merge stable/linux-4.19.y into mtk-v4.19

Signed-off-by: Fabien Parent <fparent@baylibre.com>
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
index b404d59..161e63a 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
@@ -10,8 +10,12 @@
 	- "mediatek,mt2712-apmixedsys", "syscon"
 	- "mediatek,mt6797-apmixedsys"
 	- "mediatek,mt7622-apmixedsys"
+	- "mediatek,mt7623-apmixedsys", "mediatek,mt2701-apmixedsys"
+	- "mediatek,mt7629-apmixedsys"
 	- "mediatek,mt8135-apmixedsys"
 	- "mediatek,mt8173-apmixedsys"
+	- "mediatek,mt8183-apmixedsys", "syscon"
+	- "mediatek,mt8516-apmixedsys"
 - #clock-cells: Must be 1
 
 The apmixedsys controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt
index 34a69ba..07c9d81 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt
@@ -8,6 +8,9 @@
 - compatible: Should be one of:
 	- "mediatek,mt2701-audsys", "syscon"
 	- "mediatek,mt7622-audsys", "syscon"
+	- "mediatek,mt7623-audsys", "mediatek,mt2701-audsys", "syscon"
+	- "mediatek,mt8183-audiosys", "syscon"
+	- "mediatek,mt8516-audsys", "syscon"
 - #clock-cells: Must be 1
 
 The AUDSYS controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt
index 4010e37..149567a 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt
@@ -8,6 +8,7 @@
 - compatible: Should be:
 	- "mediatek,mt2701-bdpsys", "syscon"
 	- "mediatek,mt2712-bdpsys", "syscon"
+	- "mediatek,mt7623-bdpsys", "mediatek,mt2701-bdpsys", "syscon"
 - #clock-cells: Must be 1
 
 The bdpsys controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt
new file mode 100644
index 0000000..d8930f6
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt
@@ -0,0 +1,22 @@
+MediaTek CAMSYS controller
+============================
+
+The MediaTek camsys controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be one of:
+	- "mediatek,mt8183-camsys", "syscon"
+- #clock-cells: Must be 1
+
+The camsys controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+camsys: camsys@1a000000  {
+	compatible = "mediatek,mt8183-camsys", "syscon";
+	reg = <0 0x1a000000  0 0x1000>;
+	#clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ethsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ethsys.txt
index 8f5335b..6b7e806 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ethsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ethsys.txt
@@ -8,6 +8,8 @@
 - compatible: Should be:
 	- "mediatek,mt2701-ethsys", "syscon"
 	- "mediatek,mt7622-ethsys", "syscon"
+	- "mediatek,mt7623-ethsys", "mediatek,mt2701-ethsys", "syscon"
+	- "mediatek,mt7629-ethsys", "syscon"
 - #clock-cells: Must be 1
 - #reset-cells: Must be 1
 
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,hifsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,hifsys.txt
index f5629d6..323905a 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,hifsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,hifsys.txt
@@ -9,6 +9,7 @@
 - compatible: Should be:
 	- "mediatek,mt2701-hifsys", "syscon"
 	- "mediatek,mt7622-hifsys", "syscon"
+	- "mediatek,mt7623-hifsys", "mediatek,mt2701-hifsys", "syscon"
 - #clock-cells: Must be 1
 
 The hifsys controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt
index 868bd51..e3bc4a1 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt
@@ -9,7 +9,9 @@
 	- "mediatek,mt2701-imgsys", "syscon"
 	- "mediatek,mt2712-imgsys", "syscon"
 	- "mediatek,mt6797-imgsys", "syscon"
+	- "mediatek,mt7623-imgsys", "mediatek,mt2701-imgsys", "syscon"
 	- "mediatek,mt8173-imgsys", "syscon"
+	- "mediatek,mt8183-imgsys", "syscon"
 - #clock-cells: Must be 1
 
 The imgsys controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
index 566f153..a909139 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
@@ -11,8 +11,12 @@
 	- "mediatek,mt2712-infracfg", "syscon"
 	- "mediatek,mt6797-infracfg", "syscon"
 	- "mediatek,mt7622-infracfg", "syscon"
+	- "mediatek,mt7623-infracfg", "mediatek,mt2701-infracfg", "syscon"
+	- "mediatek,mt7629-infracfg", "syscon"
 	- "mediatek,mt8135-infracfg", "syscon"
 	- "mediatek,mt8173-infracfg", "syscon"
+	- "mediatek,mt8183-infracfg", "syscon"
+	- "mediatek,mt8516-infracfg", "syscon"
 - #clock-cells: Must be 1
 - #reset-cells: Must be 1
 
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipu.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipu.txt
new file mode 100644
index 0000000..aabc8c5
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipu.txt
@@ -0,0 +1,43 @@
+Mediatek IPU controller
+============================
+
+The Mediatek ipu controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be one of:
+	- "mediatek,mt8183-ipu_conn", "syscon"
+	- "mediatek,mt8183-ipu_adl", "syscon"
+	- "mediatek,mt8183-ipu_core0", "syscon"
+	- "mediatek,mt8183-ipu_core1", "syscon"
+- #clock-cells: Must be 1
+
+The ipu controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+ipu_conn: syscon@19000000 {
+	compatible = "mediatek,mt8183-ipu_conn", "syscon";
+	reg = <0 0x19000000 0 0x1000>;
+	#clock-cells = <1>;
+};
+
+ipu_adl: syscon@19010000 {
+	compatible = "mediatek,mt8183-ipu_adl", "syscon";
+	reg = <0 0x19010000 0 0x1000>;
+	#clock-cells = <1>;
+};
+
+ipu_core0: syscon@19180000 {
+	compatible = "mediatek,mt8183-ipu_core0", "syscon";
+	reg = <0 0x19180000 0 0x1000>;
+	#clock-cells = <1>;
+};
+
+ipu_core1: syscon@19280000 {
+	compatible = "mediatek,mt8183-ipu_core1", "syscon";
+	reg = <0 0x19280000 0 0x1000>;
+	#clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt
index b8fb03f..2b882b7 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt
@@ -7,6 +7,7 @@
 
 - compatible: Should be one of:
 	- "mediatek,mt2712-mcucfg", "syscon"
+	- "mediatek,mt8183-mcucfg", "syscon"
 - #clock-cells: Must be 1
 
 The mcucfg controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt
index 859e67b..72787e7 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt
@@ -7,6 +7,7 @@
 
 - compatible: Should be one of:
 	- "mediatek,mt2712-mfgcfg", "syscon"
+	- "mediatek,mt8183-mfgcfg", "syscon"
 - #clock-cells: Must be 1
 
 The mfgcfg controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt
index 4eb8bbe..545eab7 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt
@@ -9,7 +9,9 @@
 	- "mediatek,mt2701-mmsys", "syscon"
 	- "mediatek,mt2712-mmsys", "syscon"
 	- "mediatek,mt6797-mmsys", "syscon"
+	- "mediatek,mt7623-mmsys", "mediatek,mt2701-mmsys", "syscon"
 	- "mediatek,mt8173-mmsys", "syscon"
+	- "mediatek,mt8183-mmsys", "syscon"
 - #clock-cells: Must be 1
 
 The mmsys controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pciesys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pciesys.txt
index 7fe5dc6..d179a61 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pciesys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pciesys.txt
@@ -7,6 +7,7 @@
 
 - compatible: Should be:
 	- "mediatek,mt7622-pciesys", "syscon"
+	- "mediatek,mt7629-pciesys", "syscon"
 - #clock-cells: Must be 1
 - #reset-cells: Must be 1
 
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt
index fb58ca8..4c7e478 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt
@@ -10,6 +10,8 @@
 	- "mediatek,mt2701-pericfg", "syscon"
 	- "mediatek,mt2712-pericfg", "syscon"
 	- "mediatek,mt7622-pericfg", "syscon"
+	- "mediatek,mt7623-pericfg", "mediatek,mt2701-pericfg", "syscon"
+	- "mediatek,mt7629-pericfg", "syscon"
 	- "mediatek,mt8135-pericfg", "syscon"
 	- "mediatek,mt8173-pericfg", "syscon"
 - #clock-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,sgmiisys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,sgmiisys.txt
index d113b8e..30cb645 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,sgmiisys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,sgmiisys.txt
@@ -7,6 +7,7 @@
 
 - compatible: Should be:
 	- "mediatek,mt7622-sgmiisys", "syscon"
+	- "mediatek,mt7629-sgmiisys", "syscon"
 - #clock-cells: Must be 1
 
 The SGMIISYS controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ssusbsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ssusbsys.txt
index b8184da..7cb02c93 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ssusbsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ssusbsys.txt
@@ -7,6 +7,7 @@
 
 - compatible: Should be:
 	- "mediatek,mt7622-ssusbsys", "syscon"
+	- "mediatek,mt7629-ssusbsys", "syscon"
 - #clock-cells: Must be 1
 - #reset-cells: Must be 1
 
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
index 24014a7..a023b83 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
@@ -10,8 +10,12 @@
 	- "mediatek,mt2712-topckgen", "syscon"
 	- "mediatek,mt6797-topckgen"
 	- "mediatek,mt7622-topckgen"
+	- "mediatek,mt7623-topckgen", "mediatek,mt2701-topckgen"
+	- "mediatek,mt7629-topckgen"
 	- "mediatek,mt8135-topckgen"
 	- "mediatek,mt8173-topckgen"
+	- "mediatek,mt8183-topckgen", "syscon"
+	- "mediatek,mt8516-topckgen"
 - #clock-cells: Must be 1
 
 The topckgen controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt
index ea40d05..57176bb 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt
@@ -9,7 +9,9 @@
 	- "mediatek,mt2701-vdecsys", "syscon"
 	- "mediatek,mt2712-vdecsys", "syscon"
 	- "mediatek,mt6797-vdecsys", "syscon"
+	- "mediatek,mt7623-vdecsys", "mediatek,mt2701-vdecsys", "syscon"
 	- "mediatek,mt8173-vdecsys", "syscon"
+	- "mediatek,mt8183-vdecsys", "syscon"
 - #clock-cells: Must be 1
 
 The vdecsys controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt
index 8515453..c9faa62 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt
@@ -9,6 +9,7 @@
 	- "mediatek,mt2712-vencsys", "syscon"
 	- "mediatek,mt6797-vencsys", "syscon"
 	- "mediatek,mt8173-vencsys", "syscon"
+	- "mediatek,mt8183-vencsys", "syscon"
 - #clock-cells: Must be 1
 
 The vencsys controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/connector/usb-connector.txt b/Documentation/devicetree/bindings/connector/usb-connector.txt
index 8855bfc..c55f1cf 100644
--- a/Documentation/devicetree/bindings/connector/usb-connector.txt
+++ b/Documentation/devicetree/bindings/connector/usb-connector.txt
@@ -15,6 +15,20 @@
 - type: size of the connector, should be specified in case of USB-A, USB-B
   non-fullsize connectors: "mini", "micro".
 
+Optional properties for usb-b-connector:
+- id-gpios: an input gpio for USB ID pin.
+- vbus-gpios: an input gpio for USB VBUS pin, used to detect presence of
+  VBUS 5V.
+  see gpio/gpio.txt.
+- vbus-supply: a phandle to the regulator for USB VBUS if needed when host
+  mode or dual role mode is supported.
+  Particularly, if use an output GPIO to control a VBUS regulator, should
+  model it as a regulator.
+  see regulator/fixed-regulator.yaml
+- pinctrl-names : a pinctrl state named "default" is optional
+- pinctrl-0 : pin control group
+  see pinctrl/pinctrl-bindings.txt
+
 Optional properties for usb-c-connector:
 - power-role: should be one of "source", "sink" or "dual"(DRP) if typec
   connector has power support.
diff --git a/Documentation/devicetree/bindings/display/bridge/ite,it66121.txt b/Documentation/devicetree/bindings/display/bridge/ite,it66121.txt
new file mode 100644
index 0000000..0e1e302
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/ite,it66121.txt
@@ -0,0 +1,37 @@
+ITE it66121 HDMI bridge bindings
+
+Required properties:
+	- compatible : "ite,it66121".
+	- reg : I2C address, use 0x4c or 0x4d
+	- vcn33-supply : Digital Supply Voltage (3.3V)
+	- vcn18-supply : I/O Supply Voltage (1.8V)
+	- vrf12-supply : Analog Supply Voltage (1.2V)
+	- interrupts: interrupt specifier of INT pin
+	- reset-gpios: gpio specifier of RESET pin (active low)
+	- pclk-dual-edge: dual edge clocking
+	- video interfaces: Device node can contain one video interface port
+			    node for HDMI encoder according to [1].
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+
+	it66121hdmitx: it66121hdmitx@4c {
+		compatible = "ite,it66121";
+		pinctrl-names = "default";
+		pinctrl-0 = <&ite_pins_default>;
+		vcn33-supply = <&mt6358_vcn33_wifi_reg>;
+		vcn18-supply = <&mt6358_vcn18_reg>;
+		vrf12-supply = <&mt6358_vrf12_reg>;
+		reset-gpios = <&pio 160 GPIO_ACTIVE_LOW>;
+		interrupt-parent = <&pio>;
+		interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+		reg = <0x4c>;
+		pclk-dual-edge;
+
+		port {
+			it66121_in: endpoint {
+				remote-endpoint = <&dpi_out>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
index 8469de5..8a9c5b9 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
@@ -27,19 +27,23 @@
 
 Required properties (all function blocks):
 - compatible: "mediatek,<chip>-disp-<function>", one of
-	"mediatek,<chip>-disp-ovl"   - overlay (4 layers, blending, csc)
-	"mediatek,<chip>-disp-rdma"  - read DMA / line buffer
-	"mediatek,<chip>-disp-wdma"  - write DMA
-	"mediatek,<chip>-disp-color" - color processor
-	"mediatek,<chip>-disp-aal"   - adaptive ambient light controller
-	"mediatek,<chip>-disp-gamma" - gamma correction
-	"mediatek,<chip>-disp-merge" - merge streams from two RDMA sources
-	"mediatek,<chip>-disp-split" - split stream to two encoders
-	"mediatek,<chip>-disp-ufoe"  - data compression engine
-	"mediatek,<chip>-dsi"        - DSI controller, see mediatek,dsi.txt
-	"mediatek,<chip>-dpi"        - DPI controller, see mediatek,dpi.txt
-	"mediatek,<chip>-disp-mutex" - display mutex
-	"mediatek,<chip>-disp-od"    - overdrive
+	"mediatek,<chip>-disp-ovl"   		- overlay (4 layers, blending, csc)
+	"mediatek,<chip>-disp-ovl-2l"           - overlay (2 layers, blending, csc)
+	"mediatek,<chip>-disp-rdma"  		- read DMA / line buffer
+	"mediatek,<chip>-disp-rdma1"            - function is same with RDMA, fifo size is different
+	"mediatek,<chip>-disp-wdma"  		- write DMA
+	"mediatek,<chip>-disp-ccorr"            - color correction
+	"mediatek,<chip>-disp-color" 		- color processor
+	"mediatek,<chip>-disp-dither"           - dither
+	"mediatek,<chip>-disp-aal"   		- adaptive ambient light controller
+	"mediatek,<chip>-disp-gamma" 		- gamma correction
+	"mediatek,<chip>-disp-merge" 		- merge streams from two RDMA sources
+	"mediatek,<chip>-disp-split" 		- split stream to two encoders
+	"mediatek,<chip>-disp-ufoe"  		- data compression engine
+	"mediatek,<chip>-dsi"        		- DSI controller, see mediatek,dsi.txt
+	"mediatek,<chip>-dpi"        		- DPI controller, see mediatek,dpi.txt
+	"mediatek,<chip>-disp-mutex" 		- display mutex
+	"mediatek,<chip>-disp-od"    		- overdrive
   the supported chips are mt2701, mt2712 and mt8173.
 - reg: Physical base address and length of the function block register space
 - interrupts: The interrupt signal from the function block (required, except for
@@ -49,6 +53,7 @@
   For most function blocks this is just a single clock input. Only the DSI and
   DPI controller nodes have multiple clock inputs. These are documented in
   mediatek,dsi.txt and mediatek,dpi.txt, respectively.
+  for MT8183 mutex, this hardware is always free run, has no clocks control 
 
 Required properties (DMA function blocks):
 - compatible: Should be one of
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,display.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,display.txt
new file mode 100644
index 0000000..951d2a8
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,display.txt
@@ -0,0 +1,21 @@
+Mediatek Display Device
+============================
+
+The Mediatek Display Device provides power control to the system.
+
+Required Properties:
+
+- compatible: Should be one of:
+	- "mediatek,mt8183-display"
+
+The Display Device power name are defined in
+include\dt-bindings\power\mt*-power.h
+
+
+Example:
+
+display_components: dispsys@14000000 {
+	compatible = "mediatek,mt8183-display";
+	reg = <0 0x14000000 0 0x1000>;
+	power-domains = <&scpsys MT8183_POWER_DOMAIN_DISP>;
+};
\ No newline at end of file
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
index fadf327..a19a6cc 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
@@ -7,7 +7,7 @@
 
 Required properties:
 - compatible: "mediatek,<chip>-dsi"
-  the supported chips are mt2701 and mt8173.
+  the supported chips are mt2701, mt8173 and mt8183.
 - reg: Physical base address and length of the controller's registers
 - interrupts: The interrupt signal from the function block.
 - clocks: device clocks
@@ -26,7 +26,7 @@
 
 Required properties:
 - compatible: "mediatek,<chip>-mipi-tx"
-  the supported chips are mt2701 and mt8173.
+  the supported chips are mt2701, mt8173 and mt8183.
 - reg: Physical base address and length of the controller's registers
 - clocks: PLL reference clock
 - clock-output-names: name of the output clock line to the DSI encoder
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt
index 18a2cde..3a3cd88 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt
@@ -1,56 +1,147 @@
-ARM Mali Midgard GPU
-====================
+#
+# (C) COPYRIGHT 2013-2019 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+* ARM Mali Midgard devices
+
 
 Required properties:
 
-- compatible :
-  * Must contain one of the following:
-    + "arm,mali-t604"
-    + "arm,mali-t624"
-    + "arm,mali-t628"
-    + "arm,mali-t720"
-    + "arm,mali-t760"
-    + "arm,mali-t820"
-    + "arm,mali-t830"
-    + "arm,mali-t860"
-    + "arm,mali-t880"
-  * which must be preceded by one of the following vendor specifics:
-    + "amlogic,meson-gxm-mali"
-    + "rockchip,rk3288-mali"
-    + "rockchip,rk3399-mali"
-
+- compatible : Should be mali<chip>, replacing digits with x from the back,
+until malit<Major>xx, ending with arm,mali-midgard, the latter not optional.
 - reg : Physical base address of the device and length of the register area.
-
-- interrupts : Contains the three IRQ lines required by Mali Midgard devices.
-
+- interrupts : Contains the three IRQ lines required by T-6xx devices
 - interrupt-names : Contains the names of IRQ resources in the order they were
-  provided in the interrupts property. Must contain: "job", "mmu", "gpu".
+provided in the interrupts property. Must contain: "JOB, "MMU", "GPU".
 
+Optional:
 
-Optional properties:
+- clocks : One or more pairs of phandle to clock and clock specifier
+           for the Mali device. The order is important: the first clock
+           shall correspond to the "clk_mali" source, while the second clock
+           (that is optional) shall correspond to the "shadercores" source.
+- clock-names : Shall be set to: "clk_mali", "shadercores".
+- mali-supply : Phandle to the top level regulator for the Mali device.
+                Refer to
+Documentation/devicetree/bindings/regulator/regulator.txt for details.
+- shadercores-supply : Phandle to shader cores regulator for the Mali device.
+                       This is optional.
+- operating-points-v2 : Refer to Documentation/devicetree/bindings/power/mali-opp.txt
+for details.
+- quirks_jm : Used to write to the JM_CONFIG register or equivalent.
+	  Should be used with care. Options passed here are used to override
+	  certain default behavior. Note: This will override 'idvs-group-size'
+	  field in devicetree and module param 'corestack_driver_control',
+	  therefore if 'quirks_jm' is used then 'idvs-group-size' and
+	  'corestack_driver_control' value should be incorporated into 'quirks_jm'.
+- quirks_sc : Used to write to the SHADER_CONFIG register.
+	  Should be used with care. Options passed here are used to override
+	  certain default behavior.
+- quirks_tiler : Used to write to the TILER_CONFIG register.
+	  Should be used with care. Options passed here are used to
+	  disable or override certain default behavior.
+- quirks_mmu : Used to write to the L2_CONFIG register.
+	  Should be used with care. Options passed here are used to
+	  disable or override certain default behavior.
+- power_model : Sets the power model parameters. Defined power models include:
+	  "mali-simple-power-model", "mali-g51-power-model", "mali-g52-power-model",
+	  "mali-g52_r1-power-model", "mali-g71-power-model", "mali-g72-power-model",
+	  "mali-g76-power-model", "mali-g77-power-model", "mali-tnax-power-model"
+	  and "mali-tbex-power-model".
+	- mali-simple-power-model: this model derives the GPU power usage based
+	  on the GPU voltage scaled by the system temperature. Note: it was
+	  designed for the Juno platform, and may not be suitable for others.
+		- compatible: Should be "arm,mali-simple-power-model"
+		- dynamic-coefficient: Coefficient, in pW/(Hz V^2), which is
+		  multiplied by v^2*f to calculate the dynamic power consumption.
+		- static-coefficient: Coefficient, in uW/V^3, which is
+		  multiplied by v^3 to calculate the static power consumption.
+		- ts: An array containing coefficients for the temperature
+		  scaling factor. This is used to scale the static power by a
+		  factor of tsf/1000000,
+		  where tsf = ts[3]*T^3 + ts[2]*T^2 + ts[1]*T + ts[0],
+		  and T = temperature in degrees.
+		- thermal-zone: A string identifying the thermal zone used for
+		  the GPU
+		- temp-poll-interval-ms: the interval at which the system
+		  temperature is polled
+	- mali-g*-power-model(s): unless being stated otherwise, these models derive
+	  the GPU power usage based on performance counters, so they are more
+	  accurate.
+		- compatible: Should be, as examples, "arm,mali-g51-power-model" /
+		  "arm,mali-g72-power-model".
+		- scale: the dynamic power calculated by the power model is
+		  multiplied by a factor of 'scale'. This value should be
+		  chosen to match a particular implementation.
+		- min_sample_cycles: Fall back to the simple power model if the
+		  number of GPU cycles for a given counter dump is less than
+		  'min_sample_cycles'. The default value of this should suffice.
+	* Note: when IPA is used, two separate power models (simple and counter-based)
+	  are used at different points so care should be taken to configure
+	  both power models in the device tree (specifically dynamic-coefficient,
+	  static-coefficient and scale) to best match the platform.
+- system-coherency : Sets the coherency protocol to be used for coherent
+		     accesses made from the GPU.
+		     If not set then no coherency is used.
+	- 0  : ACE-Lite
+	- 1  : ACE
+	- 31 : No coherency
+- ipa-model : Sets the IPA model to be used for power management. GPU probe will fail if the
+	      model is not found in the registered models list. If no model is specified here,
+	      a gpu-id based model is picked if available, otherwise the default model is used.
+	- mali-simple-power-model: Default model used on mali
+- protected-mode-switcher : Phandle to device implemented protected mode switching functionality.
+Refer to Documentation/devicetree/bindings/arm/smc-protected-mode-switcher.txt for one implementation.
+-  idvs-group-size : Override the IDVS group size value. Tasks are sent to
+		     cores in groups of N + 1, so i.e. 0xF means 16 tasks.
+		     Valid values are between 0 to 0x3F (including).
+-  l2-size : Override L2 cache size on GPU that supports it
+-  l2-hash : Override L2 hash function on GPU that supports it
 
-- clocks : Phandle to clock for the Mali Midgard device.
+Example for a Mali GPU with 1 clock and no regulators:
 
-- mali-supply : Phandle to regulator for the Mali device. Refer to
-  Documentation/devicetree/bindings/regulator/regulator.txt for details.
+gpu@0xfc010000 {
+	compatible = "arm,malit602", "arm,malit60x", "arm,malit6xx", "arm,mali-midgard";
+	reg = <0xfc010000 0x4000>;
+	interrupts = <0 36 4>, <0 37 4>, <0 38 4>;
+	interrupt-names = "JOB", "MMU", "GPU";
 
-- operating-points-v2 : Refer to Documentation/devicetree/bindings/opp/opp.txt
-  for details.
-
-
-Example for a Mali-T760:
-
-gpu@ffa30000 {
-	compatible = "rockchip,rk3288-mali", "arm,mali-t760";
-	reg = <0xffa30000 0x10000>;
-	interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
-		     <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
-		     <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
-	interrupt-names = "job", "mmu", "gpu";
-	clocks = <&cru ACLK_GPU>;
-	mali-supply = <&vdd_gpu>;
+	clocks = <&pclk_mali>;
+	clock-names = "clk_mali";
+	mali-supply = <&vdd_mali>;
 	operating-points-v2 = <&gpu_opp_table>;
-	power-domains = <&power RK3288_PD_GPU>;
+	power_model@0 {
+		compatible = "arm,mali-simple-power-model";
+		static-coefficient = <2427750>;
+		dynamic-coefficient = <4687>;
+		ts = <20000 2000 (-20) 2>;
+		thermal-zone = "gpu";
+	};
+	power_model@1 {
+		compatible = "arm,mali-g71-power-model";
+		scale = <5>;
+	};
+
+	idvs-group-size = <0x7>;
+	l2-size = /bits/ 8 <0x10>;
+	l2-hash = /bits/ 8 <0x04>;
 };
 
 gpu_opp_table: opp_table0 {
@@ -85,3 +176,41 @@
 		opp-microvolt = <912500>;
 	};
 };
+
+Example for a Mali GPU with 2 clocks and 2 regulators:
+
+gpu: gpu@6e000000 {
+	compatible = "arm,mali-midgard";
+	reg = <0x0 0x6e000000 0x0 0x200000>;
+	interrupts = <0 168 4>, <0 168 4>, <0 168 4>;
+	interrupt-names = "JOB", "MMU", "GPU";
+	clocks = <&clk_mali 0>, <&clk_mali 1>;
+	clock-names = "clk_mali", "shadercores";
+	mali-supply = <&supply0_3v3>;
+	shadercores-supply = <&supply1_3v3>;
+	system-coherency = <31>;
+	operating-points-v2 = <&gpu_opp_table>;
+};
+
+gpu_opp_table: opp_table0 {
+	compatible = "operating-points-v2", "operating-points-v2-mali";
+
+	opp@0 {
+		opp-hz = /bits/ 64 <50000000>;
+		opp-hz-real = /bits/ 64 <50000000>, /bits/ 64 <45000000>;
+		opp-microvolt = <820000>, <800000>;
+		opp-core-mask = /bits/ 64 <0xf>;
+	};
+	opp@1 {
+		opp-hz = /bits/ 64 <40000000>;
+		opp-hz-real = /bits/ 64 <40000000>, /bits/ 64 <35000000>;
+		opp-microvolt = <720000>, <700000>;
+		opp-core-mask = /bits/ 64 <0x7>;
+	};
+	opp@2 {
+		opp-hz = /bits/ 64 <30000000>;
+		opp-hz-real = /bits/ 64 <30000000>, /bits/ 64 <25000000>;
+		opp-microvolt = <620000>, <700000>;
+		opp-core-mask = /bits/ 64 <0x3>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mtk.txt b/Documentation/devicetree/bindings/i2c/i2c-mtk.txt
index e199695..aac7b56 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mtk.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mtk.txt
@@ -10,7 +10,9 @@
       "mediatek,mt6589-i2c": for MediaTek MT6589
       "mediatek,mt7622-i2c": for MediaTek MT7622
       "mediatek,mt7623-i2c", "mediatek,mt6577-i2c": for MediaTek MT7623
+      "mediatek,mt7629-i2c", "mediatek,mt2712-i2c": for MediaTek MT7629
       "mediatek,mt8173-i2c": for MediaTek MT8173
+      "mediatek,mt8516-i2c", "mediatek,mt2712-i2c": for MediaTek MT8516
   - reg: physical base address of the controller and dma base, length of memory
     mapped region.
   - interrupts: interrupt number to the cpu.
diff --git a/Documentation/devicetree/bindings/input/mtk-pmic-keys.txt b/Documentation/devicetree/bindings/input/mtk-pmic-keys.txt
index 2888d07..858f78e 100644
--- a/Documentation/devicetree/bindings/input/mtk-pmic-keys.txt
+++ b/Documentation/devicetree/bindings/input/mtk-pmic-keys.txt
@@ -1,15 +1,18 @@
-MediaTek MT6397/MT6323 PMIC Keys Device Driver
+MediaTek MT6397/MT6392/MT6323 PMIC Keys Device Driver
 
-There are two key functions provided by MT6397/MT6323 PMIC, pwrkey
+There are two key functions provided by MT6397/MT6392/MT6323 PMIC, pwrkey
 and homekey. The key functions are defined as the subnode of the function
 node provided by MT6397/MT6323 PMIC that is being defined as one kind
 of Muti-Function Device (MFD)
 
-For MT6397/MT6323 MFD bindings see:
+For MT6397/MT6392/MT6323 MFD bindings see:
 Documentation/devicetree/bindings/mfd/mt6397.txt
 
 Required properties:
-- compatible: "mediatek,mt6397-keys" or "mediatek,mt6323-keys"
+- compatible: Should be one of:
+	- "mediatek,mt6397-keys"
+	- "mediatek,mt6392-keys"
+	- "mediatek,mt6323-keys"
 - linux,keycodes: See Documentation/devicetree/bindings/input/keys.txt
 
 Optional Properties:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.txt b/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
index f7e95c5..4f3ae385 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
@@ -3,6 +3,7 @@
 Required properties:
 
  - compatible		: Should be "goodix,gt1151"
+				 or "goodix,gt5688"
 				 or "goodix,gt911"
 				 or "goodix,gt9110"
 				 or "goodix,gt912"
@@ -19,6 +20,9 @@
 			  interrupt gpio pin as output to reset the device.
  - reset-gpios		: GPIO pin used for reset
 
+ - AVDD28-supply	: Analog power supply regulator on AVDD28 pin
+ - VDDIO-supply		: GPIO power supply regulator on VDDIO pin
+
  - touchscreen-inverted-x  : X axis is inverted (boolean)
  - touchscreen-inverted-y  : Y axis is inverted (boolean)
  - touchscreen-swapped-x-y : X and Y axis are swapped (boolean)
diff --git a/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt b/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
index 33a98eb..0e312fe 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
@@ -1,14 +1,18 @@
-+Mediatek MT65xx/MT67xx/MT81xx sysirq
+MediaTek sysirq
 
-Mediatek SOCs sysirq support controllable irq inverter for each GIC SPI
+MediaTek SOCs sysirq support controllable irq inverter for each GIC SPI
 interrupt.
 
 Required properties:
 - compatible: should be
+	"mediatek,mt8516-sysirq", "mediatek,mt6577-sysirq": for MT8516
+	"mediatek,mt8183-sysirq", "mediatek,mt6577-sysirq": for MT8183
 	"mediatek,mt8173-sysirq", "mediatek,mt6577-sysirq": for MT8173
 	"mediatek,mt8135-sysirq", "mediatek,mt6577-sysirq": for MT8135
 	"mediatek,mt8127-sysirq", "mediatek,mt6577-sysirq": for MT8127
 	"mediatek,mt7622-sysirq", "mediatek,mt6577-sysirq": for MT7622
+	"mediatek,mt7623-sysirq", "mediatek,mt6577-sysirq": for MT7623
+	"mediatek,mt7629-sysirq", "mediatek,mt6577-sysirq": for MT7629
 	"mediatek,mt6795-sysirq", "mediatek,mt6577-sysirq": for MT6795
 	"mediatek,mt6797-sysirq", "mediatek,mt6577-sysirq": for MT6797
 	"mediatek,mt6765-sysirq", "mediatek,mt6577-sysirq": for MT6765
diff --git a/Documentation/devicetree/bindings/iommu/mediatek,iommu.txt b/Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
index df5db73..b409657 100644
--- a/Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
+++ b/Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
@@ -11,10 +11,23 @@
                |
               m4u (Multimedia Memory Management Unit)
                |
+          +--------+
+          |        |
+      gals0-rx   gals1-rx    (Global Async Local Sync rx)
+          |        |
+          |        |
+      gals0-tx   gals1-tx    (Global Async Local Sync tx)
+          |        |          Some SoCs may have GALS.
+          +--------+
+               |
            SMI Common(Smart Multimedia Interface Common)
                |
        +----------------+-------
        |                |
+       |             gals-rx        There may be GALS in some larbs.
+       |                |
+       |                |
+       |             gals-tx
        |                |
    SMI larb0        SMI larb1   ... SoCs have several SMI local arbiter(larb).
    (display)         (vdec)
@@ -36,24 +49,35 @@
 like display, video decode, and camera. And there are different ports
 in each larb. Take a example, There are many ports like MC, PP, VLD in the
 video decode local arbiter, all these ports are according to the video HW.
+  In some SoCs, there may be a GALS(Global Async Local Sync) module between
+smi-common and m4u, and additional GALS module between smi-larb and
+smi-common. GALS can been seen as a "asynchronous fifo" which could help
+synchronize for the modules in different clock frequency.
 
 Required properties:
 - compatible : must be one of the following string:
 	"mediatek,mt2701-m4u" for mt2701 which uses generation one m4u HW.
 	"mediatek,mt2712-m4u" for mt2712 which uses generation two m4u HW.
 	"mediatek,mt8173-m4u" for mt8173 which uses generation two m4u HW.
+	"mediatek,mt8183-m4u" for mt8183 which uses generation two m4u HW.
 - reg : m4u register base and size.
 - interrupts : the interrupt of m4u.
 - clocks : must contain one entry for each clock-names.
-- clock-names : must be "bclk", It is the block clock of m4u.
+- clock-names : Only 1 optional clock:
+  - "bclk": the block clock of m4u.
+  Here is the list which require this "bclk":
+  - mt2701, mt2712, mt7623 and mt8173.
+  Note that m4u use the EMI clock which always has been enabled before kernel
+  if there is no this "bclk".
 - mediatek,larbs : List of phandle to the local arbiters in the current Socs.
 	Refer to bindings/memory-controllers/mediatek,smi-larb.txt. It must sort
 	according to the local arbiter index, like larb0, larb1, larb2...
 - iommu-cells : must be 1. This is the mtk_m4u_id according to the HW.
 	Specifies the mtk_m4u_id as defined in
 	dt-binding/memory/mt2701-larb-port.h for mt2701,
-	dt-binding/memory/mt2712-larb-port.h for mt2712, and
-	dt-binding/memory/mt8173-larb-port.h for mt8173.
+	dt-binding/memory/mt2712-larb-port.h for mt2712,
+	dt-binding/memory/mt8173-larb-port.h for mt8173, and.
+	dt-binding/memory/mt8183-larb-port.h for mt8183.
 
 Example:
 	iommu: iommu@10205000 {
diff --git a/Documentation/devicetree/bindings/leds/leds-apa102.txt b/Documentation/devicetree/bindings/leds/leds-apa102.txt
new file mode 100644
index 0000000..c2bebd9
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-apa102.txt
@@ -0,0 +1,66 @@
+Shiji Lighting - apa102 LED driver
+-------------------------------------------------
+
+This LED is a three color RGB LED with 32 levels brightness adjustment that can
+be cascaded so that multiple LEDs can be set with a single command.
+
+Required properties:
+- compatible: "shiji,apa102"
+
+Property rules described in Documentation/devicetree/bindings/spi/spi-bus.txt
+apply. In particular, "reg" and "spi-max-frequency" properties must be given.
+
+LED sub-node properties:
+- reg :
+	the led number. Must be contiguously allocated between 0 and the maximum number of led minus 1
+- label :
+	see Documentation/devicetree/bindings/leds/common.txt
+- linux,default-trigger : (optional)
+	see Documentation/devicetree/bindings/leds/common.txt
+
+Example
+-------
+
+led-controller@0 {
+	compatible = "shiji,apa102";
+	reg = <0>;
+	spi-max-frequency = <1000000>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	led@0 {
+		reg = <0>;
+		label = "led1";
+	};
+
+	led@1 {
+		reg = <1>;
+		label = "led2";
+	};
+
+	led@2 {
+		reg = <2>;
+		label = "led3";
+	};
+
+	led@3 {
+		reg = <3>;
+		label = "led4";
+	};
+
+	led@4 {
+		reg = <4>;
+		label = "led5";
+	};
+
+	led@5 {
+		reg = <5>;
+		label = "led6";
+	};
+
+	led@6 {
+		reg = <6>;
+		label = "led7";
+	};
+
+	...
+};
diff --git a/Documentation/devicetree/bindings/mailbox/mtk-gce.txt b/Documentation/devicetree/bindings/mailbox/mtk-gce.txt
index 7d72b21..d48282d 100644
--- a/Documentation/devicetree/bindings/mailbox/mtk-gce.txt
+++ b/Documentation/devicetree/bindings/mailbox/mtk-gce.txt
@@ -9,7 +9,7 @@
 mailbox.txt for generic information about mailbox device-tree bindings.
 
 Required properties:
-- compatible: Must be "mediatek,mt8173-gce"
+- compatible: can be "mediatek,mt8173-gce" or "mediatek,mt8183-gce"
 - reg: Address range of the GCE unit
 - interrupts: The interrupt signal from the GCE block
 - clock: Clocks according to the common clock binding
@@ -21,15 +21,24 @@
 	priority: Priority of GCE thread.
 	atomic_exec: GCE processing continuous packets of commands in atomic
 		way.
+- #subsys-cells: Should be 3.
+	<&phandle subsys_number start_offset size>
+	phandle: Label name of a gce node.
+	subsys_number: specify the sub-system id which is corresponding
+		       to the register address.
+	start_offset: the start offset of register address that GCE can access.
+	size: the total size of register address that GCE can access.
 
 Required properties for a client device:
 - mboxes: Client use mailbox to communicate with GCE, it should have this
   property and list of phandle, mailbox specifiers.
-- mediatek,gce-subsys: u32, specify the sub-system id which is corresponding
-  to the register address.
+Optional properties for a client device:
+- mediatek,gce-client-reg: Specify the sub-system id which is corresponding
+  to the register address, it should have this property and list of phandle,
+  sub-system specifiers.
 
-Some vaules of properties are defined in 'dt-bindings/gce/mt8173-gce.h'. Such as
-sub-system ids, thread priority, event ids.
+Some vaules of properties are defined in 'dt-bindings/gce/mt8173-gce.h'
+or 'dt-binding/gce/mt8183-gce.h'. Such as sub-system ids, thread priority, event ids.
 
 Example:
 
@@ -39,8 +48,8 @@
 		interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_LOW>;
 		clocks = <&infracfg CLK_INFRA_GCE>;
 		clock-names = "gce";
-		thread-num = CMDQ_THR_MAX_COUNT;
 		#mbox-cells = <3>;
+		#subsys-cells = <3>;
 	};
 
 Example for a client device:
@@ -49,9 +58,9 @@
 		compatible = "mediatek,mt8173-mmsys";
 		mboxes = <&gce 0 CMDQ_THR_PRIO_LOWEST 1>,
 			 <&gce 1 CMDQ_THR_PRIO_LOWEST 1>;
-		mediatek,gce-subsys = <SUBSYS_1400XXXX>;
 		mutex-event-eof = <CMDQ_EVENT_MUTEX0_STREAM_EOF
 				CMDQ_EVENT_MUTEX1_STREAM_EOF>;
-
+		mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x3000 0x1000>,
+					  <&gce SUBSYS_1401XXXX 0x2000 0x100>;
 		...
 	};
diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.txt b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.txt
index 615abdd..2add09b 100644
--- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.txt
@@ -2,9 +2,10 @@
 
 The hardware block diagram please check bindings/iommu/mediatek,iommu.txt
 
-Mediatek SMI have two generations of HW architecture, mt2712 and mt8173 use
-the second generation of SMI HW while mt2701 uses the first generation HW of
-SMI.
+Mediatek SMI have two generations of HW architecture, here is the list
+which generation the SoCs use:
+generation 1: mt2701 and mt7623.
+generation 2: mt2712, mt8173 and mt8183.
 
 There's slight differences between the two SMI, for generation 2, the
 register which control the iommu port is at each larb's register base. But
@@ -18,6 +19,7 @@
 	"mediatek,mt2701-smi-common"
 	"mediatek,mt2712-smi-common"
 	"mediatek,mt8173-smi-common"
+	"mediatek,mt8183-smi-common", "syscon"
 - reg : the register and size of the SMI block.
 - power-domains : a phandle to the power domain of this local arbiter.
 - clocks : Must contain an entry for each entry in clock-names.
@@ -29,6 +31,10 @@
 	    They may be the same if both source clocks are the same.
   - "async" : asynchronous clock, it help transform the smi clock into the emi
 	      clock domain, this clock is only needed by generation 1 smi HW.
+  and these 2 option clocks for generation 2 smi HW:
+  - "gals0": the path0 clock of GALS(Global Async Local Sync).
+  - "gals1": the path1 clock of GALS(Global Async Local Sync).
+  Here is the list which has this GALS: mt8183.
 
 Example:
 	smi_common: smi@14022000 {
diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
index 083155c..2b9500a 100644
--- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
@@ -7,6 +7,7 @@
 		"mediatek,mt2701-smi-larb"
 		"mediatek,mt2712-smi-larb"
 		"mediatek,mt8173-smi-larb"
+		"mediatek,mt8183-smi-larb"
 - reg : the register and size of this local arbiter.
 - mediatek,smi : a phandle to the smi_common node.
 - power-domains : a phandle to the power domain of this local arbiter.
@@ -15,6 +16,9 @@
   - "apb" : Advanced Peripheral Bus clock, It's the clock for setting
 	    the register.
   - "smi" : It's the clock for transfer data and command.
+  and this optional clock name:
+  - "gals": the clock for GALS(Global Async Local Sync).
+  Here is the list which has this GALS: mt8183.
 
 Required property for mt2701 and mt2712:
 - mediatek,larb-id :the hardware id of this larb.
diff --git a/Documentation/devicetree/bindings/mfd/mt6397.txt b/Documentation/devicetree/bindings/mfd/mt6397.txt
index 0ebd08a..9de25c8 100644
--- a/Documentation/devicetree/bindings/mfd/mt6397.txt
+++ b/Documentation/devicetree/bindings/mfd/mt6397.txt
@@ -17,22 +17,29 @@
 This document describes the binding for MFD device and its sub module.
 
 Required properties:
-compatible: "mediatek,mt6397" or "mediatek,mt6323"
+compatible:
+	"mediatek,mt6323" for PMIC MT6323
+	"mediatek,mt6358" for PMIC MT6358
+	"mediatek,mt6397" for PMIC MT6397
 
 Optional subnodes:
 
 - rtc
 	Required properties:
-		- compatible: "mediatek,mt6397-rtc"
+		- compatible: "mediatek,mt6397-rtc" or "mediatek,mt6358-rtc"
 - regulators
 	Required properties:
 		- compatible: "mediatek,mt6397-regulator"
 	see Documentation/devicetree/bindings/regulator/mt6397-regulator.txt
+		- compatible: "mediatek,mt6358-regulator"
+	see Documentation/devicetree/bindings/regulator/mt6358-regulator.txt
+		- compatible: "mediatek,mt6392-regulator"
+	see Documentation/devicetree/bindings/regulator/mt6392-regulator.txt
 		- compatible: "mediatek,mt6323-regulator"
 	see Documentation/devicetree/bindings/regulator/mt6323-regulator.txt
 - codec
 	Required properties:
-		- compatible: "mediatek,mt6397-codec"
+		- compatible: "mediatek,mt6397-codec" or "mediatek,mt6358-sound"
 - clk
 	Required properties:
 		- compatible: "mediatek,mt6397-clk"
@@ -43,7 +50,10 @@
 
 - keys
 	Required properties:
-		- compatible: "mediatek,mt6397-keys" or "mediatek,mt6323-keys"
+		- compatible: Should be one of:
+			- "mediatek,mt6397-keys"
+			- "mediatek,mt6392-keys"
+			- "mediatek,mt6323-keys"
 	see Documentation/devicetree/bindings/input/mtk-pmic-keys.txt
 
 Example:
diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.txt b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
index f33467a..a698c96 100644
--- a/Documentation/devicetree/bindings/mmc/mtk-sd.txt
+++ b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
@@ -10,10 +10,13 @@
 - compatible: value should be either of the following.
 	"mediatek,mt8135-mmc": for mmc host ip compatible with mt8135
 	"mediatek,mt8173-mmc": for mmc host ip compatible with mt8173
+	"mediatek,mt8183-mmc": for mmc host ip compatible with mt8183
+	"mediatek,mt8516-mmc": for mmc host ip compatible with mt8516
 	"mediatek,mt2701-mmc": for mmc host ip compatible with mt2701
 	"mediatek,mt2712-mmc": for mmc host ip compatible with mt2712
 	"mediatek,mt7622-mmc": for MT7622 SoC
 	"mediatek,mt7623-mmc", "mediatek,mt2701-mmc": for MT7623 SoC
+	"mediatek,mt7620-mmc", for MT7621 SoC (and others)
 
 - reg: physical base address of the controller and length
 - interrupts: Should contain MSDC interrupt number
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
index e7d6f81..205be98 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
@@ -11,6 +11,7 @@
 	"mediatek,mt8127-pinctrl", compatible with mt8127 pinctrl.
 	"mediatek,mt8135-pinctrl", compatible with mt8135 pinctrl.
 	"mediatek,mt8173-pinctrl", compatible with mt8173 pinctrl.
+	"mediatek,mt8516-pinctrl", compatible with mt8516 pinctrl.
 - pins-are-numbered: Specify the subnodes are using numbered pinmux to
   specify pins.
 - gpio-controller : Marks the device node as a gpio controller.
diff --git a/Documentation/devicetree/bindings/power/mali-opp.txt b/Documentation/devicetree/bindings/power/mali-opp.txt
new file mode 100644
index 0000000..49ed773
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/mali-opp.txt
@@ -0,0 +1,202 @@
+#
+# (C) COPYRIGHT 2017, 2019 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+* ARM Mali Midgard OPP
+
+* OPP Table Node
+
+This describes the OPPs belonging to a device. This node can have following
+properties:
+
+Required properties:
+- compatible: Allow OPPs to express their compatibility. It should be:
+  "operating-points-v2", "operating-points-v2-mali".
+
+- OPP nodes: One or more OPP nodes describing voltage-current-frequency
+  combinations. Their name isn't significant but their phandle can be used to
+  reference an OPP.
+
+* OPP Node
+
+This defines voltage-current-frequency combinations along with other related
+properties.
+
+Required properties:
+- opp-hz: Nominal frequency in Hz, expressed as a 64-bit big-endian integer.
+  This should be treated as a relative performance measurement, taking both GPU
+  frequency and core mask into account.
+
+Optional properties:
+- opp-hz-real: List of one or two real frequencies in Hz, expressed as 64-bit
+  big-endian integers. They shall correspond to the clocks declared under
+  the Mali device node, and follow the same order.
+
+- opp-core-mask: Shader core mask. If neither this or opp-core-count are present
+  then all shader cores will be used for this OPP.
+
+- opp-core-count: Number of cores to use for this OPP. If this is present then
+  the driver will build a core mask using the available core mask provided by
+  the GPU hardware.
+
+  If neither this nor opp-core-mask are present then all shader cores will be
+  used for this OPP.
+
+  If both this and opp-core-mask are present then opp-core-mask is ignored.
+
+- opp-microvolt: List of one or two voltages in micro Volts. They shall correspond
+  to the regulators declared under the Mali device node, and follow the order:
+  "toplevel", "shadercores".
+
+  A single regulator's voltage is specified with an array of size one or three.
+  Single entry is for target voltage and three entries are for <target min max>
+  voltages.
+
+  Entries for multiple regulators must be present in the same order as
+  regulators are specified in device's DT node.
+
+- opp-microvolt-<name>: Named opp-microvolt property. This is exactly similar to
+  the above opp-microvolt property, but allows multiple voltage ranges to be
+  provided for the same OPP. At runtime, the platform can pick a <name> and
+  matching opp-microvolt-<name> property will be enabled for all OPPs. If the
+  platform doesn't pick a specific <name> or the <name> doesn't match with any
+  opp-microvolt-<name> properties, then opp-microvolt property shall be used, if
+  present.
+
+- opp-microamp: The maximum current drawn by the device in microamperes
+  considering system specific parameters (such as transients, process, aging,
+  maximum operating temperature range etc.) as necessary. This may be used to
+  set the most efficient regulator operating mode.
+
+  Should only be set if opp-microvolt is set for the OPP.
+
+  Entries for multiple regulators must be present in the same order as
+  regulators are specified in device's DT node. If this property isn't required
+  for few regulators, then this should be marked as zero for them. If it isn't
+  required for any regulator, then this property need not be present.
+
+- opp-microamp-<name>: Named opp-microamp property. Similar to
+  opp-microvolt-<name> property, but for microamp instead.
+
+- clock-latency-ns: Specifies the maximum possible transition latency (in
+  nanoseconds) for switching to this OPP from any other OPP.
+
+- turbo-mode: Marks the OPP to be used only for turbo modes. Turbo mode is
+  available on some platforms, where the device can run over its operating
+  frequency for a short duration of time limited by the device's power, current
+  and thermal limits.
+
+- opp-suspend: Marks the OPP to be used during device suspend. Only one OPP in
+  the table should have this.
+
+- opp-mali-errata-1485982: Marks the OPP to be selected for suspend clock.
+  This will be effective only if MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE is
+  enabled. It needs to be placed in any OPP that has proper suspend clock for
+  the HW workaround.
+
+- opp-supported-hw: This enables us to select only a subset of OPPs from the
+  larger OPP table, based on what version of the hardware we are running on. We
+  still can't have multiple nodes with the same opp-hz value in OPP table.
+
+  It's an user defined array containing a hierarchy of hardware version numbers,
+  supported by the OPP. For example: a platform with hierarchy of three levels
+  of versions (A, B and C), this field should be like <X Y Z>, where X
+  corresponds to Version hierarchy A, Y corresponds to version hierarchy B and Z
+  corresponds to version hierarchy C.
+
+  Each level of hierarchy is represented by a 32 bit value, and so there can be
+  only 32 different supported version per hierarchy. i.e. 1 bit per version. A
+  value of 0xFFFFFFFF will enable the OPP for all versions for that hierarchy
+  level. And a value of 0x00000000 will disable the OPP completely, and so we
+  never want that to happen.
+
+  If 32 values aren't sufficient for a version hierarchy, than that version
+  hierarchy can be contained in multiple 32 bit values. i.e. <X Y Z1 Z2> in the
+  above example, Z1 & Z2 refer to the version hierarchy Z.
+
+- status: Marks the node enabled/disabled.
+
+Example for a Juno with 1 clock and 1 regulator:
+
+gpu_opp_table: opp_table0 {
+	compatible = "operating-points-v2", "operating-points-v2-mali";
+
+	opp@112500000 {
+		opp-hz = /bits/ 64 <112500000>;
+		opp-hz-real = /bits/ 64 <450000000>;
+		opp-microvolt = <820000>;
+		opp-core-mask = /bits/ 64 <0x1>;
+		opp-suspend;
+		opp-mali-errata-1485982;
+	};
+	opp@225000000 {
+		opp-hz = /bits/ 64 <225000000>;
+		opp-hz-real = /bits/ 64 <450000000>;
+		opp-microvolt = <820000>;
+		opp-core-count = <2>;
+	};
+	opp@450000000 {
+		opp-hz = /bits/ 64 <450000000>;
+		opp-hz-real = /bits/ 64 <450000000>;
+		opp-microvolt = <820000>;
+		opp-core-mask = /bits/ 64 <0xf>;
+	};
+	opp@487500000 {
+		opp-hz = /bits/ 64 <487500000>;
+		opp-microvolt = <825000>;
+	};
+	opp@525000000 {
+		opp-hz = /bits/ 64 <525000000>;
+		opp-microvolt = <850000>;
+	};
+	opp@562500000 {
+		opp-hz = /bits/ 64 <562500000>;
+		opp-microvolt = <875000>;
+	};
+	opp@600000000 {
+		opp-hz = /bits/ 64 <600000000>;
+		opp-microvolt = <900000>;
+	};
+};
+
+Example for a Juno with 2 clocks and 2 regulators:
+
+gpu_opp_table: opp_table0 {
+	compatible = "operating-points-v2", "operating-points-v2-mali";
+
+	opp@0 {
+		opp-hz = /bits/ 64 <50000000>;
+		opp-hz-real = /bits/ 64 <50000000>, /bits/ 64 <45000000>;
+		opp-microvolt = <820000>, <800000>;
+		opp-core-mask = /bits/ 64 <0xf>;
+	};
+	opp@1 {
+		opp-hz = /bits/ 64 <40000000>;
+		opp-hz-real = /bits/ 64 <40000000>, /bits/ 64 <35000000>;
+		opp-microvolt = <720000>, <700000>;
+		opp-core-mask = /bits/ 64 <0x7>;
+	};
+	opp@2 {
+		opp-hz = /bits/ 64 <30000000>;
+		opp-hz-real = /bits/ 64 <30000000>, /bits/ 64 <25000000>;
+		opp-microvolt = <620000>, <700000>;
+		opp-core-mask = /bits/ 64 <0x3>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/power/mtk-svs.txt b/Documentation/devicetree/bindings/power/mtk-svs.txt
new file mode 100644
index 0000000..6a71992
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/mtk-svs.txt
@@ -0,0 +1,88 @@
+* Mediatek Smart Voltage Scaling (MTK SVS)
+
+This describes the device tree binding for the MTK SVS controller (bank)
+which helps provide the optimized CPU/GPU/CCI voltages. This device also
+needs thermal data to calculate thermal slope for accurately compensate
+the voltages when temperature change.
+
+Required properties:
+- compatible:
+  - "mediatek,mt8183-svs" : For MT8183 family of SoCs
+- reg: Address range of the MTK SVS controller.
+- interrupts: IRQ for the MTK SVS controller.
+- clocks, clock-names: Clocks needed for the svs controller. required
+                       clocks are:
+		       "main_clk": Main clock needed for register access
+- nvmem-cells: Phandle to the calibration data provided by a nvmem device.
+- nvmem-cell-names: Should be "svs-calibration-data" and "calibration-data"
+
+Subnodes:
+- svs_cpu_little: SVS bank device node of little CPU
+  compatible: "mediatek,mt8183-svs-cpu-little"
+  operating-points-v2: OPP table hooked by SVS little CPU bank.
+		       SVS will optimze this OPP table voltage part.
+  vcpu-little-supply: PMIC buck of little CPU
+- svs_cpu_big: SVS bank device node of big CPU
+  compatible: "mediatek,mt8183-svs-cpu-big"
+  operating-points-v2: OPP table hooked by SVS big CPU bank.
+		       SVS will optimze this OPP table voltage part.
+  vcpu-big-supply: PMIC buck of big CPU
+- svs_cci: SVS bank device node of CCI
+  compatible: "mediatek,mt8183-svs-cci"
+  operating-points-v2: OPP table hooked by SVS CCI bank.
+		       SVS will optimze this OPP table voltage part.
+  vcci-supply: PMIC buck of CCI
+- svs_gpu: SVS bank device node of GPU
+  compatible: "mediatek,mt8183-svs-gpu"
+  operating-points-v2: OPP table hooked by SVS GPU bank.
+		       SVS will optimze this OPP table voltage part.
+  vgpu-spply: PMIC buck of GPU
+
+Example:
+
+	svs: svs@1100b000 {
+		compatible = "mediatek,mt8183-svs";
+		reg = <0 0x1100b000 0 0x1000>;
+		interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_LOW 0>;
+		clocks = <&infracfg CLK_INFRA_THERM>;
+		clock-names = "main_clk";
+		nvmem-cells = <&svs_calibration>, <&thermal_calibration>;
+		nvmem-cell-names = "svs-calibration-data", "calibration-data";
+
+		svs_cpu_little: svs_cpu_little {
+			compatible = "mediatek,mt8183-svs-cpu-little";
+			operating-points-v2 = <&cluster0_opp>;
+		};
+
+		svs_cpu_big: svs_cpu_big {
+			compatible = "mediatek,mt8183-svs-cpu-big";
+			operating-points-v2 = <&cluster1_opp>;
+		};
+
+		svs_cci: svs_cci {
+			compatible = "mediatek,mt8183-svs-cci";
+			operating-points-v2 = <&cci_opp>;
+		};
+
+		svs_gpu: svs_gpu {
+			compatible = "mediatek,mt8183-svs-gpu";
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_MFG_2D>;
+			operating-points-v2 = <&gpu_opp_table>;
+		};
+	};
+
+	&svs_cpu_little {
+		vcpu-little-supply = <&mt6358_vproc12_reg>;
+	};
+
+	&svs_cpu_big {
+		vcpu-big-supply = <&mt6358_vproc11_reg>;
+	};
+
+	&svs_cci {
+		vcci-supply = <&mt6358_vproc12_reg>;
+	};
+
+	&svs_gpu {
+		vgpu-spply = <&mt6358_vgpu_reg>;
+	};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt b/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt
index 991728c..6c36d20 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt
@@ -6,6 +6,8 @@
    - "mediatek,mt7622-pwm": found on mt7622 SoC.
    - "mediatek,mt7623-pwm": found on mt7623 SoC.
    - "mediatek,mt7628-pwm": found on mt7628 SoC.
+   - "mediatek,mt8183-pwm": found on mt8183 SoC.
+   - "mediatek,mt8516-pwm": found on mt8516 SoC.
  - reg: physical base address and length of the controller's registers.
  - #pwm-cells: must be 2. See pwm.txt in this directory for a description of
    the cell format.
diff --git a/Documentation/devicetree/bindings/regulator/mt6358-regulator.txt b/Documentation/devicetree/bindings/regulator/mt6358-regulator.txt
new file mode 100644
index 0000000..9a90a92
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/mt6358-regulator.txt
@@ -0,0 +1,358 @@
+MediaTek MT6358 Regulator
+
+All voltage regulators provided by the MT6358 PMIC are described as the
+subnodes of the MT6358 regulators node. Each regulator is named according
+to its regulator type, buck_<name> and ldo_<name>. The definition for each
+of these nodes is defined using the standard binding for regulators at
+Documentation/devicetree/bindings/regulator/regulator.txt.
+
+The valid names for regulators are::
+BUCK:
+  buck_vdram1, buck_vcore, buck_vpa, buck_vproc11, buck_vproc12, buck_vgpu,
+  buck_vs2, buck_vmodem, buck_vs1
+LDO:
+  ldo_vdram2, ldo_vsim1, ldo_vibr, ldo_vrf12, ldo_vio18, ldo_vusb, ldo_vcamio,
+  ldo_vcamd, ldo_vcn18, ldo_vfe28, ldo_vsram_proc11, ldo_vcn28, ldo_vsram_others,
+  ldo_vsram_gpu, ldo_vxo22, ldo_vefuse, ldo_vaux18, ldo_vmch, ldo_vbif28,
+  ldo_vsram_proc12, ldo_vcama1, ldo_vemc, ldo_vio28, ldo_va12, ldo_vrf18,
+  ldo_vcn33_bt, ldo_vcn33_wifi, ldo_vcama2, ldo_vmc, ldo_vldo28, ldo_vaud28,
+  ldo_vsim2
+
+Example:
+
+	pmic {
+		compatible = "mediatek,mt6358";
+
+		mt6358regulator: mt6358regulator {
+			compatible = "mediatek,mt6358-regulator";
+
+			mt6358_vdram1_reg: buck_vdram1 {
+				regulator-compatible = "buck_vdram1";
+				regulator-name = "vdram1";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <2087500>;
+				regulator-ramp-delay = <12500>;
+				regulator-enable-ramp-delay = <0>;
+				regulator-always-on;
+			};
+
+			mt6358_vcore_reg: buck_vcore {
+				regulator-name = "vcore";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <1293750>;
+				regulator-ramp-delay = <6250>;
+				regulator-enable-ramp-delay = <200>;
+				regulator-always-on;
+			};
+
+			mt6358_vpa_reg: buck_vpa {
+				regulator-name = "vpa";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <3650000>;
+				regulator-ramp-delay = <50000>;
+				regulator-enable-ramp-delay = <250>;
+			};
+
+			mt6358_vproc11_reg: buck_vproc11 {
+				regulator-name = "vproc11";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <1293750>;
+				regulator-ramp-delay = <6250>;
+				regulator-enable-ramp-delay = <200>;
+				regulator-always-on;
+			};
+
+			mt6358_vproc12_reg: buck_vproc12 {
+				regulator-name = "vproc12";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <1293750>;
+				regulator-ramp-delay = <6250>;
+				regulator-enable-ramp-delay = <200>;
+				regulator-always-on;
+			};
+
+			mt6358_vgpu_reg: buck_vgpu {
+				regulator-name = "vgpu";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <1293750>;
+				regulator-ramp-delay = <6250>;
+				regulator-enable-ramp-delay = <200>;
+			};
+
+			mt6358_vs2_reg: buck_vs2 {
+				regulator-name = "vs2";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <2087500>;
+				regulator-ramp-delay = <12500>;
+				regulator-enable-ramp-delay = <0>;
+				regulator-always-on;
+			};
+
+			mt6358_vmodem_reg: buck_vmodem {
+				regulator-name = "vmodem";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <1293750>;
+				regulator-ramp-delay = <6250>;
+				regulator-enable-ramp-delay = <900>;
+				regulator-always-on;
+			};
+
+			mt6358_vs1_reg: buck_vs1 {
+				regulator-name = "vs1";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <2587500>;
+				regulator-ramp-delay = <12500>;
+				regulator-enable-ramp-delay = <0>;
+				regulator-always-on;
+			};
+
+			mt6358_vdram2_reg: ldo_vdram2 {
+				regulator-name = "vdram2";
+				regulator-min-microvolt = <600000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <3300>;
+			};
+
+			mt6358_vsim1_reg: ldo_vsim1 {
+				regulator-name = "vsim1";
+				regulator-min-microvolt = <1700000>;
+				regulator-max-microvolt = <3100000>;
+				regulator-enable-ramp-delay = <540>;
+			};
+
+			mt6358_vibr_reg: ldo_vibr {
+				regulator-name = "vibr";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <60>;
+			};
+
+			mt6358_vrf12_reg: ldo_vrf12 {
+				compatible = "regulator-fixed";
+				regulator-name = "vrf12";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+				regulator-enable-ramp-delay = <120>;
+			};
+
+			mt6358_vio18_reg: ldo_vio18 {
+				compatible = "regulator-fixed";
+				regulator-name = "vio18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <2700>;
+				regulator-always-on;
+			};
+
+			mt6358_vusb_reg: ldo_vusb {
+				regulator-name = "vusb";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3100000>;
+				regulator-enable-ramp-delay = <270>;
+				regulator-always-on;
+			};
+
+			mt6358_vcamio_reg: ldo_vcamio {
+				compatible = "regulator-fixed";
+				regulator-name = "vcamio";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vcamd_reg: ldo_vcamd {
+				regulator-name = "vcamd";
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vcn18_reg: ldo_vcn18 {
+				compatible = "regulator-fixed";
+				regulator-name = "vcn18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vfe28_reg: ldo_vfe28 {
+				compatible = "regulator-fixed";
+				regulator-name = "vfe28";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vsram_proc11_reg: ldo_vsram_proc11 {
+				regulator-name = "vsram_proc11";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <1293750>;
+				regulator-ramp-delay = <6250>;
+				regulator-enable-ramp-delay = <240>;
+				regulator-always-on;
+			};
+
+			mt6358_vcn28_reg: ldo_vcn28 {
+				compatible = "regulator-fixed";
+				regulator-name = "vcn28";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vsram_others_reg: ldo_vsram_others {
+				regulator-name = "vsram_others";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <1293750>;
+				regulator-ramp-delay = <6250>;
+				regulator-enable-ramp-delay = <240>;
+				regulator-always-on;
+			};
+
+			mt6358_vsram_gpu_reg: ldo_vsram_gpu {
+				regulator-name = "vsram_gpu";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <1293750>;
+				regulator-ramp-delay = <6250>;
+				regulator-enable-ramp-delay = <240>;
+			};
+
+			mt6358_vxo22_reg: ldo_vxo22 {
+				compatible = "regulator-fixed";
+				regulator-name = "vxo22";
+				regulator-min-microvolt = <2200000>;
+				regulator-max-microvolt = <2200000>;
+				regulator-enable-ramp-delay = <120>;
+				regulator-always-on;
+			};
+
+			mt6358_vefuse_reg: ldo_vefuse {
+				regulator-name = "vefuse";
+				regulator-min-microvolt = <1700000>;
+				regulator-max-microvolt = <1900000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vaux18_reg: ldo_vaux18 {
+				compatible = "regulator-fixed";
+				regulator-name = "vaux18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vmch_reg: ldo_vmch {
+				regulator-name = "vmch";
+				regulator-min-microvolt = <2900000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <60>;
+			};
+
+			mt6358_vbif28_reg: ldo_vbif28 {
+				compatible = "regulator-fixed";
+				regulator-name = "vbif28";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vsram_proc12_reg: ldo_vsram_proc12 {
+				regulator-name = "vsram_proc12";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <1293750>;
+				regulator-ramp-delay = <6250>;
+				regulator-enable-ramp-delay = <240>;
+				regulator-always-on;
+			};
+
+			mt6358_vcama1_reg: ldo_vcama1 {
+				regulator-name = "vcama1";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vemc_reg: ldo_vemc {
+				regulator-name = "vemc";
+				regulator-min-microvolt = <2900000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <60>;
+				regulator-always-on;
+			};
+
+			mt6358_vio28_reg: ldo_vio28 {
+				compatible = "regulator-fixed";
+				regulator-name = "vio28";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_va12_reg: ldo_va12 {
+				compatible = "regulator-fixed";
+				regulator-name = "va12";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+				regulator-enable-ramp-delay = <270>;
+				regulator-always-on;
+			};
+
+			mt6358_vrf18_reg: ldo_vrf18 {
+				compatible = "regulator-fixed";
+				regulator-name = "vrf18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <120>;
+			};
+
+			mt6358_vcn33_bt_reg: ldo_vcn33_bt {
+				regulator-name = "vcn33_bt";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3500000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vcn33_wifi_reg: ldo_vcn33_wifi {
+				regulator-name = "vcn33_wifi";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3500000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vcama2_reg: ldo_vcama2 {
+				regulator-name = "vcama2";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vmc_reg: ldo_vmc {
+				regulator-name = "vmc";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <60>;
+			};
+
+			mt6358_vldo28_reg: ldo_vldo28 {
+				regulator-name = "vldo28";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vaud28_reg: ldo_vaud28 {
+				compatible = "regulator-fixed";
+				regulator-name = "vaud28";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vsim2_reg: ldo_vsim2 {
+				regulator-name = "vsim2";
+				regulator-min-microvolt = <1700000>;
+				regulator-max-microvolt = <3100000>;
+				regulator-enable-ramp-delay = <540>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/regulator/mt6392-regulator.txt b/Documentation/devicetree/bindings/regulator/mt6392-regulator.txt
new file mode 100644
index 0000000..d03c070
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/mt6392-regulator.txt
@@ -0,0 +1,220 @@
+Mediatek MT6392 Regulator
+
+Required properties:
+- compatible: "mediatek,mt6392-regulator"
+- mt6392regulator: List of regulators provided by this controller. It is named
+  according to its regulator type, buck_<name> and ldo_<name>.
+  The definition for each of these nodes is defined using the standard binding
+  for regulators at Documentation/devicetree/bindings/regulator/regulator.txt.
+
+The valid names for regulators are::
+BUCK:
+  buck_vproc, buck_vsys, buck_vcore
+LDO:
+  ldo_vxo22, ldo_vaud22, ldo_vcama, ldo_vaud28, ldo_vadc18, ldo_vcn35,
+  ldo_vio28. ldo_vusb, ldo_vmc, ldo_vmch, ldo_vemc3v3, ldo_vgp1, ldo_vgp2,
+  ldo_vcn18, ldo_vcamaf, ldo_vm, ldo_vio18, ldo_vcamd, ldo_vcamio, ldo_vm25,
+  ldo_vefuse
+
+Example:
+	pmic {
+		compatible = "mediatek,mt6392", "mediatek,mt6323";
+		mediatek,system-power-controller;
+
+		regulator {
+			compatible = "mediatek,mt6392-regulator";
+
+			mt6392_vproc_reg: buck-vproc {
+				regulator-name = "buck_vproc";
+				regulator-min-microvolt = < 700000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <12500>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vsys_reg: buck-vsys {
+				regulator-name = "buck_vsys";
+				regulator-min-microvolt = <1400000>;
+				regulator-max-microvolt = <2987500>;
+				regulator-ramp-delay = <25000>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vcore_reg: buck-vcore {
+				regulator-name = "buck_vcore";
+				regulator-min-microvolt = < 700000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <12500>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vxo22_reg: ldo-vxo22 {
+				regulator-name = "ldo_vxo22";
+				regulator-min-microvolt = <2200000>;
+				regulator-max-microvolt = <2200000>;
+				regulator-enable-ramp-delay = <110>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vaud22_reg: ldo-vaud22 {
+				regulator-name = "ldo_vaud22";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <2200000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vcama_reg: ldo-vcama {
+				regulator-name = "ldo_vcama";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+
+			mt6392_vaud28_reg: ldo-vaud28 {
+				regulator-name = "ldo_vaud28";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vadc18_reg: ldo-vadc18 {
+				regulator-name = "ldo_vadc18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vcn35_reg: ldo-vcn35 {
+				regulator-name = "ldo_vcn35";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3600000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+
+			mt6392_vio28_reg: ldo-vio28 {
+				regulator-name = "ldo_vio28";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vusb_reg: ldo-vusb {
+				regulator-name = "ldo_vusb";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vmc_reg: ldo-vmc {
+				regulator-name = "ldo_vmc";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-boot-on;
+			};
+
+			mt6392_vmch_reg: ldo-vmch {
+				regulator-name = "ldo_vmch";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-boot-on;
+			};
+
+			mt6392_vemc3v3_reg: ldo-vemc3v3 {
+				regulator-name = "ldo_vemc3v3";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-boot-on;
+			};
+
+			mt6392_vgp1_reg: ldo-vgp1 {
+				regulator-name = "ldo_vgp1";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+
+			mt6392_vgp2_reg: ldo-vgp2 {
+				regulator-name = "ldo_vgp2";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+
+			mt6392_vcn18_reg: ldo-vcn18 {
+				regulator-name = "ldo_vcn18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+
+			mt6392_vcamaf_reg: ldo-vcamaf {
+				regulator-name = "ldo_vcamaf";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+
+			mt6392_vm_reg: ldo-vm {
+				regulator-name = "ldo_vm";
+				regulator-min-microvolt = <1240000>;
+				regulator-max-microvolt = <1390000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vio18_reg: ldo-vio18 {
+				regulator-name = "ldo_vio18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vcamd_reg: ldo-vcamd {
+				regulator-name = "ldo_vcamd";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+
+			mt6392_vcamio_reg: ldo-vcamio {
+				regulator-name = "ldo_vcamio";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+
+			mt6392_vm25_reg: ldo-vm25 {
+				regulator-name = "ldo_vm25";
+				regulator-min-microvolt = <2500000>;
+				regulator-max-microvolt = <2500000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+
+			mt6392_vefuse_reg: ldo-vefuse {
+				regulator-name = "ldo_vefuse";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <2000000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/rng/mtk-rng.txt b/Documentation/devicetree/bindings/rng/mtk-rng.txt
index 366b99b..7c77023 100644
--- a/Documentation/devicetree/bindings/rng/mtk-rng.txt
+++ b/Documentation/devicetree/bindings/rng/mtk-rng.txt
@@ -5,6 +5,7 @@
 - compatible	    : Should be
 			"mediatek,mt7622-rng", 	"mediatek,mt7623-rng" : for MT7622
 			"mediatek,mt7623-rng" : for MT7623
+			"mediatek,mt8516-rng", "mediatek,mt7623-rng" : for MT8516
 - clocks	    : list of clock specifiers, corresponding to
 		      entries in clock-names property;
 - clock-names	    : Should contain "rng" entries;
diff --git a/Documentation/devicetree/bindings/serial/mtk-uart.txt b/Documentation/devicetree/bindings/serial/mtk-uart.txt
index 742cb47..ca7e9dc 100644
--- a/Documentation/devicetree/bindings/serial/mtk-uart.txt
+++ b/Documentation/devicetree/bindings/serial/mtk-uart.txt
@@ -1,4 +1,4 @@
-* Mediatek Universal Asynchronous Receiver/Transmitter (UART)
+* MediaTek Universal Asynchronous Receiver/Transmitter (UART)
 
 Required properties:
 - compatible should contain:
@@ -13,9 +13,11 @@
   * "mediatek,mt6797-uart" for MT6797 compatible UARTS
   * "mediatek,mt7622-uart" for MT7622 compatible UARTS
   * "mediatek,mt7623-uart" for MT7623 compatible UARTS
+  * "mediatek,mt7629-uart" for MT7629 compatible UARTS
   * "mediatek,mt8127-uart" for MT8127 compatible UARTS
   * "mediatek,mt8135-uart" for MT8135 compatible UARTS
   * "mediatek,mt8173-uart" for MT8173 compatible UARTS
+  * "mediatek,mt8516-uart" for MT8516 compatible UARTS
   * "mediatek,mt6577-uart" for MT6577 and all of the above
 
 - reg: The base address of the UART register bank.
diff --git a/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt b/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
index f9987c3..7a32404 100644
--- a/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
+++ b/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
@@ -19,10 +19,13 @@
 Required properties in pwrap device node.
 - compatible:
 	"mediatek,mt2701-pwrap" for MT2701/7623 SoCs
+	"mediatek,mt6765-pwrap" for MT6765 SoCs
 	"mediatek,mt6797-pwrap" for MT6797 SoCs
 	"mediatek,mt7622-pwrap" for MT7622 SoCs
 	"mediatek,mt8135-pwrap" for MT8135 SoCs
 	"mediatek,mt8173-pwrap" for MT8173 SoCs
+	"mediatek,mt8183-pwrap" for MT8183 SoCs
+	"mediatek,mt8516-pwrap" for MT8516 SoCs
 - interrupts: IRQ for pwrap in SOC
 - reg-names: Must include the following entries:
   "pwrap": Main registers base
diff --git a/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt b/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
index d6fe16f..6bb8482 100644
--- a/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
+++ b/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
@@ -14,6 +14,7 @@
 - include/dt-bindings/power/mt2701-power.h
 - include/dt-bindings/power/mt2712-power.h
 - include/dt-bindings/power/mt7622-power.h
+- include/dt-bindings/power/mt8183-power.h
 
 Required properties:
 - compatible: Should be one of:
@@ -23,19 +24,33 @@
 	- "mediatek,mt7622-scpsys"
 	- "mediatek,mt7623-scpsys", "mediatek,mt2701-scpsys": For MT7623 SoC
 	- "mediatek,mt7623a-scpsys": For MT7623A SoC
+	- "mediatek,mt7629-scpsys", "mediatek,mt7622-scpsys": For MT7629 SoC
 	- "mediatek,mt8173-scpsys"
+	- "mediatek,mt8183-scpsys"
 - #power-domain-cells: Must be 1
 - reg: Address range of the SCPSYS unit
 - infracfg: must contain a phandle to the infracfg controller
 - clock, clock-names: clocks according to the common clock binding.
                       These are clocks which hardware needs to be
                       enabled before enabling certain power domains.
+                      The new clock type "BASIC" belongs to the type above.
+                      As to the new clock type "SUBSYS" needs to be
+                      enabled before releasing bus protection.
 	Required clocks for MT2701 or MT7623: "mm", "mfg", "ethif"
 	Required clocks for MT2712: "mm", "mfg", "venc", "jpgdec", "audio", "vdec"
 	Required clocks for MT6797: "mm", "mfg", "vdec"
-	Required clocks for MT7622: "hif_sel"
+	Required clocks for MT7622 or MT7629: "hif_sel"
 	Required clocks for MT7622A: "ethif"
 	Required clocks for MT8173: "mm", "mfg", "venc", "venc_lt"
+	Required clocks for MT8183: BASIC: "audio", "mfg", "mm", "cam", "isp",
+					   "vpu", "vpu1", "vpu2", "vpu3"
+				    SUBSYS: "mm-0", "mm-1", "mm-2", "mm-3",
+					    "mm-4", "mm-5", "mm-6", "mm-7",
+					    "mm-8", "mm-9", "isp-0", "isp-1",
+					    "cam-0", "cam-1", "cam-2", "cam-3",
+					    "cam-4", "cam-5", "cam-6", "vpu-0",
+					    "vpu-1", "vpu-2", "vpu-3", "vpu-4",
+					    "vpu-5"
 
 Optional properties:
 - vdec-supply: Power supply for the vdec power domain
diff --git a/Documentation/devicetree/bindings/sound/mt8183-mt6358.txt b/Documentation/devicetree/bindings/sound/mt8183-mt6358.txt
new file mode 100644
index 0000000..75b19f3
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mt8183-mt6358.txt
@@ -0,0 +1,12 @@
+MT8183 with MT6358 CODEC
+
+Required properties:
+- compatible: "mediatek,mt8183-mt6358-sound"
+- mediatek,platform: the phandle of MT8183 ASoC platform
+
+Example:
+
+	sound {
+		compatible = "mediatek,mt8183-mt6358-sound";
+		mediatek,platform = <&afe>;
+	};
diff --git a/Documentation/devicetree/bindings/sound/mt8516-afe-pcm.txt b/Documentation/devicetree/bindings/sound/mt8516-afe-pcm.txt
new file mode 100644
index 0000000..c5fb3c5
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mt8516-afe-pcm.txt
@@ -0,0 +1,28 @@
+Mediatek AFE PCM controller for mt8516
+
+Required properties:
+- compatible:  "mediatek,mt8516-audio"
+- interrupts: should contain AFE interrupt
+- clocks: Must contain an entry for each entry in clock-names
+- clock-names: should have these clock names:
+		"top_pdn_audio",
+		"aud_dac_clk",
+		"aud_dac_predis_clk",
+		"aud_adc_clk";
+
+Example:
+
+
+	afe: mt8516-afe-pcm@11140000  {
+		compatible = "mediatek,mt8516-audio";
+		reg = <0 0x11140000 0 0x1000>;
+		interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&topckgen CLK_TOP_AUDIO>,
+			 <&audiotop CLK_AUD_DAC>,
+			 <&audiotop CLK_AUD_DAC_PREDIS>,
+			 <&audiotop CLK_AUD_ADC>;
+		clock-names = "top_pdn_audio",
+			      "aud_dac_clk",
+			      "aud_dac_predis_clk",
+			      "aud_adc_clk";
+	};
diff --git a/Documentation/devicetree/bindings/sound/tlv320adc3101.txt b/Documentation/devicetree/bindings/sound/tlv320adc3101.txt
new file mode 100644
index 0000000..b6ba6fe
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/tlv320adc3101.txt
@@ -0,0 +1,40 @@
+Texas Instruments - tlv320adc3101 Codec module
+
+The tlv320adc3101 serial control bus communicates through I2C protocols
+
+Required properties:
+ - compatible - "ti,tlv320adc3101"
+ - reg: I2C slave address
+ - supply-*: Required supply regulators are:
+    "iov" - digital IO power supply
+    "dv" - Digital core power supply
+    "av" - Analog core power supply
+ - rst-gpio: Reset-GPIO phandle with args as described in gpio/gpio.txt
+ - left-pin-select: default left ADC input selection for left PGA. values are
+ defined in sound/soc/codecs/tlv320adc3101.h
+ - right-pin-select: default right ADC input selection for right PGA. values are
+defined in sound/soc/codecs/tlv320adc3101.h
+
+		sound-name-prefix = "U18_";
+		reg = <0x18>;
+		rst-gpio = <&pio 34 0>;
+		left-pin-select = <1>;
+		right-pin-select = <1>;
+
+		av-supply  = <&pcm1865_3v3>;
+		dv-supply  = <&pcm1865_3v3>;
+		iov-supply = <&pcm1865_3v3>;
+
+
+Example:
+
+codec: tlv320adc3101@18 {
+		reg = <0x18>;
+		rst-gpio = <&pio 34 0>;
+		left-pin-select = <1>;
+		right-pin-select = <1>;
+
+		av-supply  = <&supply_3v3>;
+		dv-supply  = <&supply_3v3>;
+		iov-supply = <&supply_1v8>;
+};
diff --git a/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
index 18d4d01..97ac288 100644
--- a/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
+++ b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
@@ -16,6 +16,7 @@
 	* "mediatek,mt8127-timer" for MT8127 compatible timers (GPT)
 	* "mediatek,mt8135-timer" for MT8135 compatible timers (GPT)
 	* "mediatek,mt8173-timer" for MT8173 compatible timers (GPT)
+	* "mediatek,mt8516-timer" for MT8516 compatible timers (GPT)
 	* "mediatek,mt6577-timer" for MT6577 and all above compatible timers (GPT)
 	* "mediatek,mt6765-timer" for MT6765 compatible timers (SYST)
 - reg: Should contain location and length for timer register.
diff --git a/Documentation/devicetree/bindings/usb/generic.txt b/Documentation/devicetree/bindings/usb/generic.txt
index 0a74ab8..cf5a1ad 100644
--- a/Documentation/devicetree/bindings/usb/generic.txt
+++ b/Documentation/devicetree/bindings/usb/generic.txt
@@ -30,6 +30,10 @@
 			optional for OTG device.
  - adp-disable: tells OTG controllers we want to disable OTG ADP, ADP is
 			optional for OTG device.
+ - usb-role-switch: boolean, indicates that the device is capable of assigning
+			the USB data role (USB host or USB device) for a given
+			USB connector, such as Type-C, Type-B(micro).
+			see connector/usb-connector.txt.
 
 This is an attribute to a USB controller such as:
 
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
index 266c2d9..91c0704 100644
--- a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
@@ -29,6 +29,7 @@
 	"sys_ck": controller clock used by normal mode,
 	the following ones are optional:
 	"ref_ck": reference clock used by low power mode etc,
+	"xhci_ck": controller clock,
 	"mcu_ck": mcu_bus clock for register access,
 	"dma_ck": dma_bus clock for data transfer by DMA
 
@@ -100,7 +101,7 @@
  - clocks : a list of phandle + clock-specifier pairs, one for each
 	entry in clock-names
  - clock-names : must contain "sys_ck", and the following ones are optional:
-	"ref_ck", "mcu_ck" and "dma_ck"
+	"ref_ck", "xhci_ck", "mcu_ck" and "dma_ck"
 
 Optional properties:
  - vbus-supply : reference to the VBUS regulator;
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt b/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
index 3382b5c..1a73534 100644
--- a/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
@@ -16,7 +16,7 @@
 	entry in clock-names
  - clock-names : must contain "sys_ck" for clock of controller,
 	the following clocks are optional:
-	"ref_ck", "mcu_ck" and "dam_ck";
+	"ref_ck", "mcu_ck" and "dma_ck";
  - phys : see usb-hcd.txt in the current directory
  - dr_mode : should be one of "host", "peripheral" or "otg",
 	refer to usb/generic.txt
@@ -28,8 +28,13 @@
 	parent's address space
  - extcon : external connector for vbus and idpin changes detection, needed
 	when supports dual-role mode.
+	it's considered valid for compatibility reasons, not allowed for
+	new bindings, and use "usb-role-switch" property instead.
  - vbus-supply : reference to the VBUS regulator, needed when supports
 	dual-role mode.
+	it's considered valid for compatibility reasons, not allowed for
+	new bindings, and put into a usb-connector node.
+	see connector/usb-connector.txt.
  - pinctrl-names : a pinctrl state named "default" is optional, and need be
 	defined if auto drd switch is enabled, that means the property dr_mode
 	is set as "otg", and meanwhile the property "mediatek,enable-manual-drd"
@@ -39,6 +44,8 @@
 
  - maximum-speed : valid arguments are "super-speed", "high-speed" and
 	"full-speed"; refer to usb/generic.txt
+ - usb-role-switch : use USB Role Switch to support dual-role switch, but
+	not extcon; see usb/generic.txt.
  - enable-manual-drd : supports manual dual-role switch via debugfs; usually
 	used when receptacle is TYPE-A and also wants to support dual-role
 	mode.
@@ -52,6 +59,8 @@
 		- 2 : used by mt2712 etc
  - mediatek,u3p-dis-msk : mask to disable u3ports, bit0 for u3port0,
 	bit1 for u3port1, ... etc;
+ - mediatek,force-vbus : boolean, indicates that the controller doesn't support
+	Vbus detection due to non-exist Vbus PIN.
 
 additionally the properties from usb-hcd.txt (in the current directory) are
 supported.
@@ -61,6 +70,9 @@
 if host mode is enabled. The DT binding details of xhci can be found in:
 Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
 
+The port would be added as subnode if use "usb-role-switch" property.
+	see graph.txt
+
 Example:
 ssusb: usb@11271000 {
 	compatible = "mediatek,mt8173-mtu3";
diff --git a/Documentation/devicetree/bindings/usb/mediatek,musb.txt b/Documentation/devicetree/bindings/usb/mediatek,musb.txt
new file mode 100644
index 0000000..7434299
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/mediatek,musb.txt
@@ -0,0 +1,55 @@
+MediaTek musb DRD/OTG controller
+-------------------------------------------
+
+Required properties:
+ - compatible      : should be one of:
+                     "mediatek,mt-2701"
+                     ...
+                     followed by "mediatek,mtk-musb"
+ - reg             : specifies physical base address and size of
+                     the registers
+ - interrupts      : interrupt used by musb controller
+ - interrupt-names : must be "mc"
+ - phys            : PHY specifier for the OTG phy
+ - dr_mode         : should be one of "host", "peripheral" or "otg",
+                     refer to usb/generic.txt
+ - clocks          : a list of phandle + clock-specifier pairs, one for
+                     each entry in clock-names
+ - clock-names     : must contain "main", "mcu", "univpll"
+                     for clocks of controller
+
+Optional properties:
+ - power-domains   : a phandle to USB power domain node to control USB's
+                     MTCMOS
+
+Required child nodes:
+ usb connector node as defined in bindings/connector/usb-connector.txt
+Optional properties:
+ - id-gpios        : input GPIO for USB ID pin.
+ - vbus-gpios      : input GPIO for USB VBUS pin.
+ - vbus-supply     : reference to the VBUS regulator, needed when supports
+                     dual-role mode
+
+Example:
+
+usb2: usb@11200000 {
+	compatible = "mediatek,mt2701-musb",
+		     "mediatek,mtk-musb";
+	reg = <0 0x11200000 0 0x1000>;
+	interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
+	interrupt-names = "mc";
+	phys = <&u2port2 PHY_TYPE_USB2>;
+	dr_mode = "otg";
+	clocks = <&pericfg CLK_PERI_USB0>,
+		 <&pericfg CLK_PERI_USB0_MCU>,
+		 <&pericfg CLK_PERI_USB_SLV>;
+	clock-names = "main","mcu","univpll";
+	power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>;
+	connector{
+		compatible = "linux,typeb-conn-gpio", "usb-b-connector";
+		label = "micro-USB";
+		type = "micro";
+		id-gpios = <&pio 44 GPIO_ACTIVE_HIGH>;
+		vbus-supply = <&usb_vbus>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/usb/usb-conn-gpio.txt b/Documentation/devicetree/bindings/usb/usb-conn-gpio.txt
new file mode 100644
index 0000000..3d05ae5
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/usb-conn-gpio.txt
@@ -0,0 +1,30 @@
+USB GPIO Based Connection Detection
+
+This is typically used to switch dual role mode from the USB ID pin connected
+to an input GPIO, and also used to enable/disable device mode from the USB
+Vbus pin connected to an input GPIO.
+
+Required properties:
+- compatible : should include "gpio-usb-b-connector" and "usb-b-connector".
+- id-gpios, vbus-gpios : input gpios, either one of them must be present,
+	and both can be present as well.
+	see connector/usb-connector.txt
+
+Optional properties:
+- vbus-supply : can be present if needed when supports dual role mode.
+	see connector/usb-connector.txt
+
+- Sub-nodes:
+	- port : can be present.
+		see graph.txt
+
+Example:
+
+&mtu3 {
+	connector {
+		compatible = "gpio-usb-b-connector", "usb-b-connector";
+		type = "micro";
+		id-gpios = <&pio 12 GPIO_ACTIVE_HIGH>;
+		vbus-supply = <&usb_p0_vbus>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 2c3fc51..1acd423 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -184,6 +184,7 @@
 isee	ISEE 2007 S.L.
 isil	Intersil
 issi	Integrated Silicon Solutions Inc.
+ite	ITE Tech Inc.
 itead	ITEAD Intelligent Systems Co.Ltd
 iwave  iWave Systems Technologies Pvt. Ltd.
 jdi	Japan Display Inc.
@@ -336,6 +337,7 @@
 sgd	Solomon Goldentek Display Corporation
 sgx	SGX Sensortech
 sharp	Sharp Corporation
+shiji	Shiji Lighting
 shimafuji	Shimafuji Electric, Inc.
 si-en	Si-En Technology Ltd.
 sifive	SiFive, Inc.
diff --git a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
index 859dee1..fd380eb 100644
--- a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
@@ -8,6 +8,8 @@
 	"mediatek,mt6797-wdt", "mediatek,mt6589-wdt": for MT6797
 	"mediatek,mt7622-wdt", "mediatek,mt6589-wdt": for MT7622
 	"mediatek,mt7623-wdt", "mediatek,mt6589-wdt": for MT7623
+	"mediatek,mt7629-wdt", "mediatek,mt6589-wdt": for MT7629
+	"mediatek,mt8516-wdt", "mediatek,mt6589-wdt": for MT8516
 
 - reg : Specifies base physical address and size of the registers.
 
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 43681ca..5a2d8c7 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -235,6 +235,7 @@
 
 CLOCK
   devm_clk_get()
+  devm_clk_get_optional()
   devm_clk_put()
   devm_clk_hw_register()
   devm_of_clk_add_hw_provider()
diff --git a/arch/arm/boot/dts/mt2701-evb.dts b/arch/arm/boot/dts/mt2701-evb.dts
index be0edb3..e856939 100644
--- a/arch/arm/boot/dts/mt2701-evb.dts
+++ b/arch/arm/boot/dts/mt2701-evb.dts
@@ -6,6 +6,7 @@
  */
 
 /dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
 #include "mt2701.dtsi"
 
 / {
@@ -60,6 +61,15 @@
 		>;
 		default-brightness-level = <9>;
 	};
+
+	usb_vbus: regulator@0 {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&pio 45 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
 };
 
 &auxadc {
@@ -229,3 +239,14 @@
 &uart0 {
 	status = "okay";
 };
+
+&usb2 {
+	status = "okay";
+	connector{
+		compatible = "linux,typeb-conn-gpio", "usb-b-connector";
+		label = "micro-USB";
+		type = "micro";
+		id-gpios = <&pio 44 GPIO_ACTIVE_HIGH>;
+		vbus-supply = <&usb_vbus>;
+	};
+};
diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi
index 180377e..a6b1434 100644
--- a/arch/arm/boot/dts/mt2701.dtsi
+++ b/arch/arm/boot/dts/mt2701.dtsi
@@ -670,6 +670,39 @@
 		};
 	};
 
+	usb2: usb@11200000 {
+		compatible = "mediatek,mt2701-musb",
+			     "mediatek,mtk-musb";
+		reg = <0 0x11200000 0 0x1000>;
+		interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
+		interrupt-names = "mc";
+		phys = <&u2port2 PHY_TYPE_USB2>;
+		dr_mode = "otg";
+		clocks = <&pericfg CLK_PERI_USB0>,
+			 <&pericfg CLK_PERI_USB0_MCU>,
+			 <&pericfg CLK_PERI_USB_SLV>;
+		clock-names = "main","mcu","univpll";
+		power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>;
+		status = "disabled";
+	};
+
+	u2phy0: usb-phy@11210000 {
+		compatible = "mediatek,generic-tphy-v1";
+		reg = <0 0x11210000 0 0x0800>;
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+		status = "okay";
+
+		u2port2: usb-phy@1a1c4800 {
+			reg = <0 0x11210800 0 0x0100>;
+			clocks = <&topckgen CLK_TOP_USB_PHY48M>;
+			clock-names = "ref";
+			#phy-cells = <1>;
+			status = "okay";
+		};
+	};
+
 	ethsys: syscon@1b000000 {
 		compatible = "mediatek,mt2701-ethsys", "syscon";
 		reg = <0 0x1b000000 0 0x1000>;
diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile
index 5b7fd6a..509111f 100644
--- a/arch/arm64/boot/dts/mediatek/Makefile
+++ b/arch/arm64/boot/dts/mediatek/Makefile
@@ -5,4 +5,8 @@
 dtb-$(CONFIG_ARCH_MEDIATEK) += mt6797-evb.dtb
 dtb-$(CONFIG_ARCH_MEDIATEK) += mt6797-x20-dev.dtb
 dtb-$(CONFIG_ARCH_MEDIATEK) += mt7622-rfb1.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8167-coral.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8167-pumpkin.dtb
 dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-evb.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-evb.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8516-pumpkin.dtb
diff --git a/arch/arm64/boot/dts/mediatek/mt6358.dtsi b/arch/arm64/boot/dts/mediatek/mt6358.dtsi
new file mode 100644
index 0000000..b97f3f1
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt6358.dtsi
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ */
+
+&pwrap {
+	pmic: mt6358 {
+		compatible = "mediatek,mt6358";
+		interrupt-controller;
+		interrupt-parent = <&pio>;
+		interrupts = <182 IRQ_TYPE_LEVEL_HIGH>;
+		#interrupt-cells = <2>;
+
+		mt6358codec: mt6358codec {
+			compatible = "mediatek,mt6358-sound";
+		};
+
+		mt6358rtc: mt6358rtc {
+			compatible = "mediatek,mt6358-rtc";
+		};
+
+		mt6358regulator: mt6358regulator {
+			compatible = "mediatek,mt6358-regulator";
+
+			mt6358_vdram1_reg: buck_vdram1 {
+				regulator-compatible = "buck_vdram1";
+				regulator-name = "vdram1";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <2087500>;
+				regulator-ramp-delay = <12500>;
+				regulator-enable-ramp-delay = <0>;
+				regulator-always-on;
+				regulator-allowed-modes = <0 1>;
+			};
+
+			mt6358_vcore_reg: buck_vcore {
+				regulator-name = "vcore";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <1293750>;
+				regulator-ramp-delay = <6250>;
+				regulator-enable-ramp-delay = <200>;
+				regulator-always-on;
+				regulator-allowed-modes = <0 1>;
+			};
+
+			mt6358_vpa_reg: buck_vpa {
+				regulator-name = "vpa";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <3650000>;
+				regulator-ramp-delay = <50000>;
+				regulator-enable-ramp-delay = <250>;
+				regulator-allowed-modes = <0 1>;
+			};
+
+			mt6358_vproc11_reg: buck_vproc11 {
+				regulator-name = "vproc11";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <1293750>;
+				regulator-ramp-delay = <6250>;
+				regulator-enable-ramp-delay = <200>;
+				regulator-always-on;
+				regulator-allowed-modes = <0 1>;
+			};
+
+			mt6358_vproc12_reg: buck_vproc12 {
+				regulator-name = "vproc12";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <1293750>;
+				regulator-ramp-delay = <6250>;
+				regulator-enable-ramp-delay = <200>;
+				regulator-always-on;
+				regulator-allowed-modes = <0 1>;
+			};
+
+			mt6358_vgpu_reg: buck_vgpu {
+				regulator-name = "vgpu";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <1293750>;
+				regulator-ramp-delay = <6250>;
+				regulator-enable-ramp-delay = <200>;
+				regulator-allowed-modes = <0 1>;
+			};
+
+			mt6358_vs2_reg: buck_vs2 {
+				regulator-name = "vs2";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <2087500>;
+				regulator-ramp-delay = <12500>;
+				regulator-enable-ramp-delay = <0>;
+				regulator-always-on;
+			};
+
+			mt6358_vmodem_reg: buck_vmodem {
+				regulator-name = "vmodem";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <1293750>;
+				regulator-ramp-delay = <6250>;
+				regulator-enable-ramp-delay = <900>;
+				regulator-always-on;
+				regulator-allowed-modes = <0 1>;
+			};
+
+			mt6358_vs1_reg: buck_vs1 {
+				regulator-name = "vs1";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <2587500>;
+				regulator-ramp-delay = <12500>;
+				regulator-enable-ramp-delay = <0>;
+				regulator-always-on;
+			};
+
+			mt6358_vdram2_reg: ldo_vdram2 {
+				regulator-name = "vdram2";
+				regulator-min-microvolt = <600000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <3300>;
+			};
+
+			mt6358_vsim1_reg: ldo_vsim1 {
+				regulator-name = "vsim1";
+				regulator-min-microvolt = <1700000>;
+				regulator-max-microvolt = <3100000>;
+				regulator-enable-ramp-delay = <540>;
+			};
+
+			mt6358_vibr_reg: ldo_vibr {
+				regulator-name = "vibr";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <60>;
+			};
+
+			mt6358_vrf12_reg: ldo_vrf12 {
+				compatible = "regulator-fixed";
+				regulator-name = "vrf12";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+				regulator-enable-ramp-delay = <120>;
+			};
+
+			mt6358_vio18_reg: ldo_vio18 {
+				compatible = "regulator-fixed";
+				regulator-name = "vio18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <2700>;
+				regulator-always-on;
+			};
+
+			mt6358_vusb_reg: ldo_vusb {
+				regulator-name = "vusb";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3100000>;
+				regulator-enable-ramp-delay = <270>;
+				regulator-always-on;
+			};
+
+			mt6358_vcamio_reg: ldo_vcamio {
+				compatible = "regulator-fixed";
+				regulator-name = "vcamio";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vcamd_reg: ldo_vcamd {
+				regulator-name = "vcamd";
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vcn18_reg: ldo_vcn18 {
+				compatible = "regulator-fixed";
+				regulator-name = "vcn18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vfe28_reg: ldo_vfe28 {
+				compatible = "regulator-fixed";
+				regulator-name = "vfe28";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vsram_proc11_reg: ldo_vsram_proc11 {
+				regulator-name = "vsram_proc11";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <1293750>;
+				regulator-ramp-delay = <6250>;
+				regulator-enable-ramp-delay = <240>;
+				regulator-always-on;
+			};
+
+			mt6358_vcn28_reg: ldo_vcn28 {
+				compatible = "regulator-fixed";
+				regulator-name = "vcn28";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vsram_others_reg: ldo_vsram_others {
+				regulator-name = "vsram_others";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <1293750>;
+				regulator-ramp-delay = <6250>;
+				regulator-enable-ramp-delay = <240>;
+				regulator-always-on;
+			};
+
+			mt6358_vsram_gpu_reg: ldo_vsram_gpu {
+				regulator-name = "vsram_gpu";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <1293750>;
+				regulator-ramp-delay = <6250>;
+				regulator-enable-ramp-delay = <240>;
+			};
+
+			mt6358_vxo22_reg: ldo_vxo22 {
+				compatible = "regulator-fixed";
+				regulator-name = "vxo22";
+				regulator-min-microvolt = <2200000>;
+				regulator-max-microvolt = <2200000>;
+				regulator-enable-ramp-delay = <120>;
+				regulator-always-on;
+			};
+
+			mt6358_vefuse_reg: ldo_vefuse {
+				regulator-name = "vefuse";
+				regulator-min-microvolt = <1700000>;
+				regulator-max-microvolt = <1900000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vaux18_reg: ldo_vaux18 {
+				compatible = "regulator-fixed";
+				regulator-name = "vaux18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vmch_reg: ldo_vmch {
+				regulator-name = "vmch";
+				regulator-min-microvolt = <2900000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <60>;
+			};
+
+			mt6358_vbif28_reg: ldo_vbif28 {
+				compatible = "regulator-fixed";
+				regulator-name = "vbif28";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vsram_proc12_reg: ldo_vsram_proc12 {
+				regulator-name = "vsram_proc12";
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <1293750>;
+				regulator-ramp-delay = <6250>;
+				regulator-enable-ramp-delay = <240>;
+				regulator-always-on;
+			};
+
+			mt6358_vcama1_reg: ldo_vcama1 {
+				regulator-name = "vcama1";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vemc_reg: ldo_vemc {
+				regulator-name = "vemc";
+				regulator-min-microvolt = <2900000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <60>;
+				regulator-always-on;
+			};
+
+			mt6358_vio28_reg: ldo_vio28 {
+				compatible = "regulator-fixed";
+				regulator-name = "vio28";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_va12_reg: ldo_va12 {
+				compatible = "regulator-fixed";
+				regulator-name = "va12";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+				regulator-enable-ramp-delay = <270>;
+				regulator-always-on;
+			};
+
+			mt6358_vrf18_reg: ldo_vrf18 {
+				compatible = "regulator-fixed";
+				regulator-name = "vrf18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <120>;
+			};
+
+			mt6358_vcn33_bt_reg: ldo_vcn33_bt {
+				regulator-name = "vcn33_bt";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3500000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vcn33_wifi_reg: ldo_vcn33_wifi {
+				regulator-name = "vcn33_wifi";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3500000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vcama2_reg: ldo_vcama2 {
+				regulator-name = "vcama2";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vmc_reg: ldo_vmc {
+				regulator-name = "vmc";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <60>;
+			};
+
+			mt6358_vldo28_reg: ldo_vldo28 {
+				regulator-name = "vldo28";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vaud28_reg: ldo_vaud28 {
+				compatible = "regulator-fixed";
+				regulator-name = "vaud28";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-enable-ramp-delay = <270>;
+			};
+
+			mt6358_vsim2_reg: ldo_vsim2 {
+				regulator-name = "vsim2";
+				regulator-min-microvolt = <1700000>;
+				regulator-max-microvolt = <3100000>;
+				regulator-enable-ramp-delay = <540>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt6392.dtsi b/arch/arm64/boot/dts/mediatek/mt6392.dtsi
new file mode 100644
index 0000000..aed1d5b
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt6392.dtsi
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ */
+
+#include <dt-bindings/input/input.h>
+
+&pwrap {
+	mt6392_pmic: pmic {
+		compatible = "mediatek,mt6392", "mediatek,mt6323";
+		mediatek,system-power-controller;
+
+		regulators {
+			compatible = "mediatek,mt6392-regulator";
+
+			mt6392_vproc_reg: buck-vproc {
+				regulator-name = "buck_vproc";
+				regulator-min-microvolt = < 700000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <12500>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vsys_reg: buck-vsys {
+				regulator-name = "buck_vsys";
+				regulator-min-microvolt = <1400000>;
+				regulator-max-microvolt = <2987500>;
+				regulator-ramp-delay = <25000>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vcore_reg: buck-vcore {
+				regulator-name = "buck_vcore";
+				regulator-min-microvolt = < 700000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <12500>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vxo22_reg: ldo-vxo22 {
+				regulator-name = "ldo_vxo22";
+				regulator-min-microvolt = <2200000>;
+				regulator-max-microvolt = <2200000>;
+				regulator-enable-ramp-delay = <110>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vaud22_reg: ldo-vaud22 {
+				regulator-name = "ldo_vaud22";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <2200000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vcama_reg: ldo-vcama {
+				regulator-name = "ldo_vcama";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+
+			mt6392_vaud28_reg: ldo-vaud28 {
+				regulator-name = "ldo_vaud28";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vadc18_reg: ldo-vadc18 {
+				regulator-name = "ldo_vadc18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vcn35_reg: ldo-vcn35 {
+				regulator-name = "ldo_vcn35";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3600000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+
+			mt6392_vio28_reg: ldo-vio28 {
+				regulator-name = "ldo_vio28";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vusb_reg: ldo-vusb {
+				regulator-name = "ldo_vusb";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vmc_reg: ldo-vmc {
+				regulator-name = "ldo_vmc";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-boot-on;
+			};
+
+			mt6392_vmch_reg: ldo-vmch {
+				regulator-name = "ldo_vmch";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-boot-on;
+			};
+
+			mt6392_vemc3v3_reg: ldo-vemc3v3 {
+				regulator-name = "ldo_vemc3v3";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-boot-on;
+			};
+
+			mt6392_vgp1_reg: ldo-vgp1 {
+				regulator-name = "ldo_vgp1";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+
+			mt6392_vgp2_reg: ldo-vgp2 {
+				regulator-name = "ldo_vgp2";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+
+			mt6392_vcn18_reg: ldo-vcn18 {
+				regulator-name = "ldo_vcn18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+
+			mt6392_vcamaf_reg: ldo-vcamaf {
+				regulator-name = "ldo_vcamaf";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+
+			mt6392_vm_reg: ldo-vm {
+				regulator-name = "ldo_vm";
+				regulator-min-microvolt = <1240000>;
+				regulator-max-microvolt = <1390000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vio18_reg: ldo-vio18 {
+				regulator-name = "ldo_vio18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <264>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			mt6392_vcamd_reg: ldo-vcamd {
+				regulator-name = "ldo_vcamd";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+
+			mt6392_vcamio_reg: ldo-vcamio {
+				regulator-name = "ldo_vcamio";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+
+			mt6392_vm25_reg: ldo-vm25 {
+				regulator-name = "ldo_vm25";
+				regulator-min-microvolt = <2500000>;
+				regulator-max-microvolt = <2500000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+
+			mt6392_vefuse_reg: ldo-vefuse {
+				regulator-name = "ldo_vefuse";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <2000000>;
+				regulator-enable-ramp-delay = <264>;
+			};
+		};
+
+		mt6392keys: mt6392keys {
+			compatible = "mediatek,mt6392-keys";
+
+			power-key {
+				label = "power";
+				linux,keycodes = <KEY_POWER>;
+				wakeup-source;
+			};
+
+			home-key {
+				label = "home";
+				linux,keycodes = <KEY_HOME>;
+				wakeup-source;
+			};
+		};
+
+		mt6392rtc: mt6392rtc {
+			compatible = "mediatek,mt6392-rtc", "mediatek,mt6397-rtc";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8167-coral.dts b/arch/arm64/boot/dts/mediatek/mt8167-coral.dts
new file mode 100644
index 0000000..3e6a4db
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8167-coral.dts
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+
+#include "mt8167.dtsi"
+#include "mt6392.dtsi"
+
+/ {
+	model = "Google Coral MT8167";
+
+	aliases {
+		serial0 = &uart0;
+	};
+
+	chosen {
+		stdout-path = "serial0:921600n8";
+	};
+
+	memory@40000000 {
+		device_type = "memory";
+		reg = <0 0x40000000 0 0x40000000>;
+	};
+
+	firmware {
+		optee: optee@4fd00000 {
+			compatible = "linaro,optee-tz";
+			method = "smc";
+		};
+	};
+};
+
+&cpu0 {
+	proc-supply = <&mt6392_vproc_reg>;
+};
+
+&cpu1 {
+	proc-supply = <&mt6392_vproc_reg>;
+};
+
+&cpu2 {
+	proc-supply = <&mt6392_vproc_reg>;
+};
+
+&cpu3 {
+	proc-supply = <&mt6392_vproc_reg>;
+};
+
+&mt6392_pmic {
+	interrupt-parent = <&pio>;
+	interrupts = <28 IRQ_TYPE_LEVEL_HIGH>;
+	interrupt-controller;
+	#interrupt-cells = <2>;
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&pio {
+	mmc0_pins_default: mmc0default {
+		pins_cmd_dat {
+			pinmux = <MT8167_PIN_120_MSDC0_DAT0__FUNC_MSDC0_DAT0>,
+				 <MT8167_PIN_119_MSDC0_DAT1__FUNC_MSDC0_DAT1>,
+				 <MT8167_PIN_118_MSDC0_DAT2__FUNC_MSDC0_DAT2>,
+				 <MT8167_PIN_117_MSDC0_DAT3__FUNC_MSDC0_DAT3>,
+				 <MT8167_PIN_113_MSDC0_DAT4__FUNC_MSDC0_DAT4>,
+				 <MT8167_PIN_112_MSDC0_DAT5__FUNC_MSDC0_DAT5>,
+				 <MT8167_PIN_111_MSDC0_DAT6__FUNC_MSDC0_DAT6>,
+				 <MT8167_PIN_110_MSDC0_DAT7__FUNC_MSDC0_DAT7>,
+				 <MT8167_PIN_115_MSDC0_CMD__FUNC_MSDC0_CMD>;
+			input-enable;
+			bias-pull-up;
+		};
+
+		pins_clk {
+			pinmux = <MT8167_PIN_116_MSDC0_CLK__FUNC_MSDC0_CLK>;
+			bias-pull-down;
+		};
+
+		pins_rst {
+			pinmux = <MT8167_PIN_114_MSDC0_RSTB__FUNC_MSDC0_RSTB>;
+			bias-pull-up;
+		};
+	};
+
+	mmc0_pins_uhs: mmc0@0{
+		pins_cmd_dat {
+			pinmux = <MT8167_PIN_120_MSDC0_DAT0__FUNC_MSDC0_DAT0>,
+				 <MT8167_PIN_119_MSDC0_DAT1__FUNC_MSDC0_DAT1>,
+				 <MT8167_PIN_118_MSDC0_DAT2__FUNC_MSDC0_DAT2>,
+				 <MT8167_PIN_117_MSDC0_DAT3__FUNC_MSDC0_DAT3>,
+				 <MT8167_PIN_113_MSDC0_DAT4__FUNC_MSDC0_DAT4>,
+				 <MT8167_PIN_112_MSDC0_DAT5__FUNC_MSDC0_DAT5>,
+				 <MT8167_PIN_111_MSDC0_DAT6__FUNC_MSDC0_DAT6>,
+				 <MT8167_PIN_110_MSDC0_DAT7__FUNC_MSDC0_DAT7>,
+				 <MT8167_PIN_115_MSDC0_CMD__FUNC_MSDC0_CMD>;
+			input-enable;
+			drive-strength = <MTK_DRIVE_6mA>;
+			bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+		};
+
+		pins_clk {
+			pinmux = <MT8167_PIN_116_MSDC0_CLK__FUNC_MSDC0_CLK>;
+			drive-strength = <MTK_DRIVE_8mA>;
+			bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+		};
+
+		pins_rst {
+			pinmux = <MT8167_PIN_114_MSDC0_RSTB__FUNC_MSDC0_RSTB>;
+			bias-pull-up;
+		};
+	};
+
+	hdmi_pins_default: hdmi_pins_default {
+	};
+
+	hdmi_pins_hpd: hdmi_pins_hpd {
+		pins_cmd_dat {
+			pinmux = <MT8167_PIN_122_HTPLG__FUNC_HTPLG>;
+			bias-pull-down;
+		};
+	};
+};
+
+&mmc0 {
+	pinctrl-names = "default", "state_uhs";
+	pinctrl-0 = <&mmc0_pins_default>;
+	pinctrl-1 = <&mmc0_pins_uhs>;
+	status = "okay";
+	bus-width = <8>;
+	max-frequency = <200000000>;
+	cap-mmc-highspeed;
+	mmc-hs200-1_8v;
+	cap-mmc-hw-reset;
+	vmmc-supply = <&mt6392_vemc3v3_reg>;
+	vqmmc-supply = <&mt6392_vio18_reg>;
+	non-removable;
+};
+
+&dpi1 {
+	status = "okay";
+	ddc-i2c-bus = <&hdmiddc>;
+
+	port {
+		hdmi_connector_in: endpoint@0 {
+			remote-endpoint = <&hdmi_out>;
+		};
+	};
+};
+
+&hdmi_phy {
+	status = "okay";
+};
+
+&cec {
+	status = "okay";
+};
+
+&hdmi {
+	pinctrl-names = "default", "hdmi_hpd";
+	pinctrl-0 = <&hdmi_pins_default>;
+	pinctrl-1 = <&hdmi_pins_hpd>;
+	status = "okay";
+
+	ports {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		port@1 {
+			reg = <1>;
+
+			hdmi_out: endpoint {
+				remote-endpoint = <&hdmi_connector_in>;
+			};
+		};
+	};
+};
+
+&usb0 {
+	status = "okay";
+	dr_mode = "peripheral";
+
+	usb_con: connector {
+		compatible = "usb-c-connector";
+		label = "USB-C";
+	};
+};
+
+&usb0_phy {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8167-pinfunc.h b/arch/arm64/boot/dts/mediatek/mt8167-pinfunc.h
new file mode 100644
index 0000000..241adff
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8167-pinfunc.h
@@ -0,0 +1,752 @@
+/*
+ * Copyright (C) 2015 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DTS_MT8167_PINFUNC_H
+#define __DTS_MT8167_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define MT8167_PIN_0_EINT0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define MT8167_PIN_0_EINT0__FUNC_PWM_B (MTK_PIN_NO(0) | 1)
+#define MT8167_PIN_0_EINT0__FUNC_DPI_CK (MTK_PIN_NO(0) | 2)
+#define MT8167_PIN_0_EINT0__FUNC_I2S2_BCK (MTK_PIN_NO(0) | 3)
+#define MT8167_PIN_0_EINT0__FUNC_EXT_TXD0 (MTK_PIN_NO(0) | 4)
+#define MT8167_PIN_0_EINT0__FUNC_SQICS (MTK_PIN_NO(0) | 6)
+#define MT8167_PIN_0_EINT0__FUNC_DBG_MON_A_6 (MTK_PIN_NO(0) | 7)
+
+#define MT8167_PIN_1_EINT1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define MT8167_PIN_1_EINT1__FUNC_PWM_C (MTK_PIN_NO(1) | 1)
+#define MT8167_PIN_1_EINT1__FUNC_DPI_D12 (MTK_PIN_NO(1) | 2)
+#define MT8167_PIN_1_EINT1__FUNC_I2S2_DI (MTK_PIN_NO(1) | 3)
+#define MT8167_PIN_1_EINT1__FUNC_EXT_TXD1 (MTK_PIN_NO(1) | 4)
+#define MT8167_PIN_1_EINT1__FUNC_CONN_MCU_TDO (MTK_PIN_NO(1) | 5)
+#define MT8167_PIN_1_EINT1__FUNC_SQISO (MTK_PIN_NO(1) | 6)
+#define MT8167_PIN_1_EINT1__FUNC_DBG_MON_A_7 (MTK_PIN_NO(1) | 7)
+
+#define MT8167_PIN_2_EINT2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define MT8167_PIN_2_EINT2__FUNC_CLKM0 (MTK_PIN_NO(2) | 1)
+#define MT8167_PIN_2_EINT2__FUNC_DPI_D13 (MTK_PIN_NO(2) | 2)
+#define MT8167_PIN_2_EINT2__FUNC_I2S2_LRCK (MTK_PIN_NO(2) | 3)
+#define MT8167_PIN_2_EINT2__FUNC_EXT_TXD2 (MTK_PIN_NO(2) | 4)
+#define MT8167_PIN_2_EINT2__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(2) | 5)
+#define MT8167_PIN_2_EINT2__FUNC_SQISI (MTK_PIN_NO(2) | 6)
+#define MT8167_PIN_2_EINT2__FUNC_DBG_MON_A_8 (MTK_PIN_NO(2) | 7)
+
+#define MT8167_PIN_3_EINT3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define MT8167_PIN_3_EINT3__FUNC_CLKM1 (MTK_PIN_NO(3) | 1)
+#define MT8167_PIN_3_EINT3__FUNC_DPI_D14 (MTK_PIN_NO(3) | 2)
+#define MT8167_PIN_3_EINT3__FUNC_SPI_MI (MTK_PIN_NO(3) | 3)
+#define MT8167_PIN_3_EINT3__FUNC_EXT_TXD3 (MTK_PIN_NO(3) | 4)
+#define MT8167_PIN_3_EINT3__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(3) | 5)
+#define MT8167_PIN_3_EINT3__FUNC_SQIWP (MTK_PIN_NO(3) | 6)
+#define MT8167_PIN_3_EINT3__FUNC_DBG_MON_A_9 (MTK_PIN_NO(3) | 7)
+
+#define MT8167_PIN_4_EINT4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define MT8167_PIN_4_EINT4__FUNC_CLKM2 (MTK_PIN_NO(4) | 1)
+#define MT8167_PIN_4_EINT4__FUNC_DPI_D15 (MTK_PIN_NO(4) | 2)
+#define MT8167_PIN_4_EINT4__FUNC_SPI_MO (MTK_PIN_NO(4) | 3)
+#define MT8167_PIN_4_EINT4__FUNC_EXT_TXC (MTK_PIN_NO(4) | 4)
+#define MT8167_PIN_4_EINT4__FUNC_CONN_MCU_TCK (MTK_PIN_NO(4) | 5)
+#define MT8167_PIN_4_EINT4__FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(4) | 6)
+#define MT8167_PIN_4_EINT4__FUNC_DBG_MON_A_10 (MTK_PIN_NO(4) | 7)
+
+#define MT8167_PIN_5_EINT5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define MT8167_PIN_5_EINT5__FUNC_UCTS2 (MTK_PIN_NO(5) | 1)
+#define MT8167_PIN_5_EINT5__FUNC_DPI_D16 (MTK_PIN_NO(5) | 2)
+#define MT8167_PIN_5_EINT5__FUNC_SPI_CSB (MTK_PIN_NO(5) | 3)
+#define MT8167_PIN_5_EINT5__FUNC_EXT_RXER (MTK_PIN_NO(5) | 4)
+#define MT8167_PIN_5_EINT5__FUNC_CONN_MCU_TDI (MTK_PIN_NO(5) | 5)
+#define MT8167_PIN_5_EINT5__FUNC_CONN_TEST_CK (MTK_PIN_NO(5) | 6)
+#define MT8167_PIN_5_EINT5__FUNC_DBG_MON_A_11 (MTK_PIN_NO(5) | 7)
+
+#define MT8167_PIN_6_EINT6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define MT8167_PIN_6_EINT6__FUNC_URTS2 (MTK_PIN_NO(6) | 1)
+#define MT8167_PIN_6_EINT6__FUNC_DPI_D17 (MTK_PIN_NO(6) | 2)
+#define MT8167_PIN_6_EINT6__FUNC_SPI_CLK (MTK_PIN_NO(6) | 3)
+#define MT8167_PIN_6_EINT6__FUNC_EXT_RXC (MTK_PIN_NO(6) | 4)
+#define MT8167_PIN_6_EINT6__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(6) | 5)
+#define MT8167_PIN_6_EINT6__FUNC_MM_TEST_CK (MTK_PIN_NO(6) | 6)
+#define MT8167_PIN_6_EINT6__FUNC_DBG_MON_A_12 (MTK_PIN_NO(6) | 7)
+
+#define MT8167_PIN_7_EINT7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define MT8167_PIN_7_EINT7__FUNC_SQIRST (MTK_PIN_NO(7) | 1)
+#define MT8167_PIN_7_EINT7__FUNC_DPI_D6 (MTK_PIN_NO(7) | 2)
+#define MT8167_PIN_7_EINT7__FUNC_SDA1_0 (MTK_PIN_NO(7) | 3)
+#define MT8167_PIN_7_EINT7__FUNC_EXT_RXDV (MTK_PIN_NO(7) | 4)
+#define MT8167_PIN_7_EINT7__FUNC_CONN_MCU_TMS (MTK_PIN_NO(7) | 5)
+#define MT8167_PIN_7_EINT7__FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(7) | 6)
+#define MT8167_PIN_7_EINT7__FUNC_DBG_MON_A_13 (MTK_PIN_NO(7) | 7)
+
+#define MT8167_PIN_8_EINT8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define MT8167_PIN_8_EINT8__FUNC_SQICK (MTK_PIN_NO(8) | 1)
+#define MT8167_PIN_8_EINT8__FUNC_CLKM3 (MTK_PIN_NO(8) | 2)
+#define MT8167_PIN_8_EINT8__FUNC_SCL1_0 (MTK_PIN_NO(8) | 3)
+#define MT8167_PIN_8_EINT8__FUNC_EXT_RXD0 (MTK_PIN_NO(8) | 4)
+#define MT8167_PIN_8_EINT8__FUNC_ANT_SEL0 (MTK_PIN_NO(8) | 5)
+#define MT8167_PIN_8_EINT8__FUNC_DPI_D7 (MTK_PIN_NO(8) | 6)
+#define MT8167_PIN_8_EINT8__FUNC_DBG_MON_A_14 (MTK_PIN_NO(8) | 7)
+
+#define MT8167_PIN_9_EINT9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define MT8167_PIN_9_EINT9__FUNC_CLKM4 (MTK_PIN_NO(9) | 1)
+#define MT8167_PIN_9_EINT9__FUNC_SDA2_0 (MTK_PIN_NO(9) | 2)
+#define MT8167_PIN_9_EINT9__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(9) | 3)
+#define MT8167_PIN_9_EINT9__FUNC_EXT_RXD1 (MTK_PIN_NO(9) | 4)
+#define MT8167_PIN_9_EINT9__FUNC_ANT_SEL1 (MTK_PIN_NO(9) | 5)
+#define MT8167_PIN_9_EINT9__FUNC_DPI_D8 (MTK_PIN_NO(9) | 6)
+#define MT8167_PIN_9_EINT9__FUNC_DBG_MON_A_15 (MTK_PIN_NO(9) | 7)
+
+#define MT8167_PIN_10_EINT10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define MT8167_PIN_10_EINT10__FUNC_CLKM5 (MTK_PIN_NO(10) | 1)
+#define MT8167_PIN_10_EINT10__FUNC_SCL2_0 (MTK_PIN_NO(10) | 2)
+#define MT8167_PIN_10_EINT10__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(10) | 3)
+#define MT8167_PIN_10_EINT10__FUNC_EXT_RXD2 (MTK_PIN_NO(10) | 4)
+#define MT8167_PIN_10_EINT10__FUNC_ANT_SEL2 (MTK_PIN_NO(10) | 5)
+#define MT8167_PIN_10_EINT10__FUNC_DPI_D9 (MTK_PIN_NO(10) | 6)
+#define MT8167_PIN_10_EINT10__FUNC_DBG_MON_A_16 (MTK_PIN_NO(10) | 7)
+
+#define MT8167_PIN_11_EINT11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define MT8167_PIN_11_EINT11__FUNC_CLKM4 (MTK_PIN_NO(11) | 1)
+#define MT8167_PIN_11_EINT11__FUNC_PWM_C (MTK_PIN_NO(11) | 2)
+#define MT8167_PIN_11_EINT11__FUNC_CONN_TEST_CK (MTK_PIN_NO(11) | 3)
+#define MT8167_PIN_11_EINT11__FUNC_ANT_SEL3 (MTK_PIN_NO(11) | 4)
+#define MT8167_PIN_11_EINT11__FUNC_DPI_D10 (MTK_PIN_NO(11) | 5)
+#define MT8167_PIN_11_EINT11__FUNC_EXT_RXD3 (MTK_PIN_NO(11) | 6)
+#define MT8167_PIN_11_EINT11__FUNC_DBG_MON_A_17 (MTK_PIN_NO(11) | 7)
+
+#define MT8167_PIN_12_EINT12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define MT8167_PIN_12_EINT12__FUNC_CLKM5 (MTK_PIN_NO(12) | 1)
+#define MT8167_PIN_12_EINT12__FUNC_PWM_A (MTK_PIN_NO(12) | 2)
+#define MT8167_PIN_12_EINT12__FUNC_SPDIF_OUT (MTK_PIN_NO(12) | 3)
+#define MT8167_PIN_12_EINT12__FUNC_ANT_SEL4 (MTK_PIN_NO(12) | 4)
+#define MT8167_PIN_12_EINT12__FUNC_DPI_D11 (MTK_PIN_NO(12) | 5)
+#define MT8167_PIN_12_EINT12__FUNC_EXT_TXEN (MTK_PIN_NO(12) | 6)
+#define MT8167_PIN_12_EINT12__FUNC_DBG_MON_A_18 (MTK_PIN_NO(12) | 7)
+
+#define MT8167_PIN_13_EINT13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define MT8167_PIN_13_EINT13__FUNC_TSF_IN (MTK_PIN_NO(13) | 3)
+#define MT8167_PIN_13_EINT13__FUNC_ANT_SEL5 (MTK_PIN_NO(13) | 4)
+#define MT8167_PIN_13_EINT13__FUNC_DPI_D0 (MTK_PIN_NO(13) | 5)
+#define MT8167_PIN_13_EINT13__FUNC_SPDIF_IN (MTK_PIN_NO(13) | 6)
+#define MT8167_PIN_13_EINT13__FUNC_DBG_MON_A_19 (MTK_PIN_NO(13) | 7)
+
+#define MT8167_PIN_14_EINT14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define MT8167_PIN_14_EINT14__FUNC_I2S_8CH_DO1 (MTK_PIN_NO(14) | 2)
+#define MT8167_PIN_14_EINT14__FUNC_TDM_RX_MCK (MTK_PIN_NO(14) | 3)
+#define MT8167_PIN_14_EINT14__FUNC_ANT_SEL1 (MTK_PIN_NO(14) | 4)
+#define MT8167_PIN_14_EINT14__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(14) | 5)
+#define MT8167_PIN_14_EINT14__FUNC_NCLE (MTK_PIN_NO(14) | 6)
+#define MT8167_PIN_14_EINT14__FUNC_DBG_MON_B_8 (MTK_PIN_NO(14) | 7)
+
+#define MT8167_PIN_15_EINT15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define MT8167_PIN_15_EINT15__FUNC_I2S_8CH_LRCK (MTK_PIN_NO(15) | 2)
+#define MT8167_PIN_15_EINT15__FUNC_TDM_RX_BCK (MTK_PIN_NO(15) | 3)
+#define MT8167_PIN_15_EINT15__FUNC_ANT_SEL2 (MTK_PIN_NO(15) | 4)
+#define MT8167_PIN_15_EINT15__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(15) | 5)
+#define MT8167_PIN_15_EINT15__FUNC_NCEB1 (MTK_PIN_NO(15) | 6)
+#define MT8167_PIN_15_EINT15__FUNC_DBG_MON_B_9 (MTK_PIN_NO(15) | 7)
+
+#define MT8167_PIN_16_EINT16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define MT8167_PIN_16_EINT16__FUNC_I2S_8CH_BCK (MTK_PIN_NO(16) | 2)
+#define MT8167_PIN_16_EINT16__FUNC_TDM_RX_LRCK (MTK_PIN_NO(16) | 3)
+#define MT8167_PIN_16_EINT16__FUNC_ANT_SEL3 (MTK_PIN_NO(16) | 4)
+#define MT8167_PIN_16_EINT16__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(16) | 5)
+#define MT8167_PIN_16_EINT16__FUNC_NCEB0 (MTK_PIN_NO(16) | 6)
+#define MT8167_PIN_16_EINT16__FUNC_DBG_MON_B_10 (MTK_PIN_NO(16) | 7)
+
+#define MT8167_PIN_17_EINT17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define MT8167_PIN_17_EINT17__FUNC_I2S_8CH_MCK (MTK_PIN_NO(17) | 2)
+#define MT8167_PIN_17_EINT17__FUNC_TDM_RX_DI (MTK_PIN_NO(17) | 3)
+#define MT8167_PIN_17_EINT17__FUNC_IDDIG (MTK_PIN_NO(17) | 4)
+#define MT8167_PIN_17_EINT17__FUNC_ANT_SEL4 (MTK_PIN_NO(17) | 5)
+#define MT8167_PIN_17_EINT17__FUNC_NREB (MTK_PIN_NO(17) | 6)
+#define MT8167_PIN_17_EINT17__FUNC_DBG_MON_B_11 (MTK_PIN_NO(17) | 7)
+
+#define MT8167_PIN_18_EINT18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define MT8167_PIN_18_EINT18__FUNC_USB_DRVVBUS (MTK_PIN_NO(18) | 2)
+#define MT8167_PIN_18_EINT18__FUNC_I2S3_LRCK (MTK_PIN_NO(18) | 3)
+#define MT8167_PIN_18_EINT18__FUNC_CLKM1 (MTK_PIN_NO(18) | 4)
+#define MT8167_PIN_18_EINT18__FUNC_ANT_SEL3 (MTK_PIN_NO(18) | 5)
+#define MT8167_PIN_18_EINT18__FUNC_I2S2_BCK (MTK_PIN_NO(18) | 6)
+#define MT8167_PIN_18_EINT18__FUNC_DBG_MON_A_20 (MTK_PIN_NO(18) | 7)
+
+#define MT8167_PIN_19_EINT19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define MT8167_PIN_19_EINT19__FUNC_UCTS1 (MTK_PIN_NO(19) | 1)
+#define MT8167_PIN_19_EINT19__FUNC_IDDIG (MTK_PIN_NO(19) | 2)
+#define MT8167_PIN_19_EINT19__FUNC_I2S3_BCK (MTK_PIN_NO(19) | 3)
+#define MT8167_PIN_19_EINT19__FUNC_CLKM2 (MTK_PIN_NO(19) | 4)
+#define MT8167_PIN_19_EINT19__FUNC_ANT_SEL4 (MTK_PIN_NO(19) | 5)
+#define MT8167_PIN_19_EINT19__FUNC_I2S2_DI (MTK_PIN_NO(19) | 6)
+#define MT8167_PIN_19_EINT19__FUNC_DBG_MON_A_21 (MTK_PIN_NO(19) | 7)
+
+#define MT8167_PIN_20_EINT20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define MT8167_PIN_20_EINT20__FUNC_URTS1 (MTK_PIN_NO(20) | 1)
+#define MT8167_PIN_20_EINT20__FUNC_I2S3_DO (MTK_PIN_NO(20) | 3)
+#define MT8167_PIN_20_EINT20__FUNC_CLKM3 (MTK_PIN_NO(20) | 4)
+#define MT8167_PIN_20_EINT20__FUNC_ANT_SEL5 (MTK_PIN_NO(20) | 5)
+#define MT8167_PIN_20_EINT20__FUNC_I2S2_LRCK (MTK_PIN_NO(20) | 6)
+#define MT8167_PIN_20_EINT20__FUNC_DBG_MON_A_22 (MTK_PIN_NO(20) | 7)
+
+#define MT8167_PIN_21_EINT21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define MT8167_PIN_21_EINT21__FUNC_NRNB (MTK_PIN_NO(21) | 1)
+#define MT8167_PIN_21_EINT21__FUNC_ANT_SEL0 (MTK_PIN_NO(21) | 2)
+#define MT8167_PIN_21_EINT21__FUNC_I2S_8CH_DO4 (MTK_PIN_NO(21) | 3)
+#define MT8167_PIN_21_EINT21__FUNC_DBG_MON_B_31 (MTK_PIN_NO(21) | 7)
+
+#define MT8167_PIN_22_EINT22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define MT8167_PIN_22_EINT22__FUNC_I2S_8CH_DO2 (MTK_PIN_NO(22) | 2)
+#define MT8167_PIN_22_EINT22__FUNC_TSF_IN (MTK_PIN_NO(22) | 3)
+#define MT8167_PIN_22_EINT22__FUNC_USB_DRVVBUS (MTK_PIN_NO(22) | 4)
+#define MT8167_PIN_22_EINT22__FUNC_SPDIF_OUT (MTK_PIN_NO(22) | 5)
+#define MT8167_PIN_22_EINT22__FUNC_NRE_C (MTK_PIN_NO(22) | 6)
+#define MT8167_PIN_22_EINT22__FUNC_DBG_MON_B_12 (MTK_PIN_NO(22) | 7)
+
+#define MT8167_PIN_23_EINT23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define MT8167_PIN_23_EINT23__FUNC_I2S_8CH_DO3 (MTK_PIN_NO(23) | 2)
+#define MT8167_PIN_23_EINT23__FUNC_CLKM0 (MTK_PIN_NO(23) | 3)
+#define MT8167_PIN_23_EINT23__FUNC_IR (MTK_PIN_NO(23) | 4)
+#define MT8167_PIN_23_EINT23__FUNC_SPDIF_IN (MTK_PIN_NO(23) | 5)
+#define MT8167_PIN_23_EINT23__FUNC_NDQS_C (MTK_PIN_NO(23) | 6)
+#define MT8167_PIN_23_EINT23__FUNC_DBG_MON_B_13 (MTK_PIN_NO(23) | 7)
+
+#define MT8167_PIN_24_EINT24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define MT8167_PIN_24_EINT24__FUNC_DPI_D20 (MTK_PIN_NO(24) | 1)
+#define MT8167_PIN_24_EINT24__FUNC_DPI_DE (MTK_PIN_NO(24) | 2)
+#define MT8167_PIN_24_EINT24__FUNC_ANT_SEL1 (MTK_PIN_NO(24) | 3)
+#define MT8167_PIN_24_EINT24__FUNC_UCTS2 (MTK_PIN_NO(24) | 4)
+#define MT8167_PIN_24_EINT24__FUNC_PWM_A (MTK_PIN_NO(24) | 5)
+#define MT8167_PIN_24_EINT24__FUNC_I2S0_MCK (MTK_PIN_NO(24) | 6)
+#define MT8167_PIN_24_EINT24__FUNC_DBG_MON_A_0 (MTK_PIN_NO(24) | 7)
+
+#define MT8167_PIN_25_EINT25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define MT8167_PIN_25_EINT25__FUNC_DPI_D19 (MTK_PIN_NO(25) | 1)
+#define MT8167_PIN_25_EINT25__FUNC_DPI_VSYNC (MTK_PIN_NO(25) | 2)
+#define MT8167_PIN_25_EINT25__FUNC_ANT_SEL0 (MTK_PIN_NO(25) | 3)
+#define MT8167_PIN_25_EINT25__FUNC_URTS2 (MTK_PIN_NO(25) | 4)
+#define MT8167_PIN_25_EINT25__FUNC_PWM_B (MTK_PIN_NO(25) | 5)
+#define MT8167_PIN_25_EINT25__FUNC_I2S_8CH_MCK (MTK_PIN_NO(25) | 6)
+#define MT8167_PIN_25_EINT25__FUNC_DBG_MON_A_1 (MTK_PIN_NO(25) | 7)
+
+#define MT8167_PIN_26_PWRAP_SPI0_MI__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define MT8167_PIN_26_PWRAP_SPI0_MI__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(26) | 1)
+#define MT8167_PIN_26_PWRAP_SPI0_MI__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(26) | 2)
+
+#define MT8167_PIN_27_PWRAP_SPI0_MO__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define MT8167_PIN_27_PWRAP_SPI0_MO__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(27) | 1)
+#define MT8167_PIN_27_PWRAP_SPI0_MO__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(27) | 2)
+
+#define MT8167_PIN_28_PWRAP_INT__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define MT8167_PIN_28_PWRAP_INT__FUNC_I2S0_MCK (MTK_PIN_NO(28) | 1)
+#define MT8167_PIN_28_PWRAP_INT__FUNC_I2S_8CH_MCK (MTK_PIN_NO(28) | 4)
+#define MT8167_PIN_28_PWRAP_INT__FUNC_I2S2_MCK (MTK_PIN_NO(28) | 5)
+#define MT8167_PIN_28_PWRAP_INT__FUNC_I2S3_MCK (MTK_PIN_NO(28) | 6)
+
+#define MT8167_PIN_29_PWRAP_SPI0_CK__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define MT8167_PIN_29_PWRAP_SPI0_CK__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(29) | 1)
+
+#define MT8167_PIN_30_PWRAP_SPI0_CSN__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define MT8167_PIN_30_PWRAP_SPI0_CSN__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(30) | 1)
+
+#define MT8167_PIN_31_RTC32K_CK__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define MT8167_PIN_31_RTC32K_CK__FUNC_RTC32K_CK (MTK_PIN_NO(31) | 1)
+
+#define MT8167_PIN_32_WATCHDOG__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define MT8167_PIN_32_WATCHDOG__FUNC_WATCHDOG (MTK_PIN_NO(32) | 1)
+
+#define MT8167_PIN_33_SRCLKENA__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define MT8167_PIN_33_SRCLKENA__FUNC_SRCLKENA0 (MTK_PIN_NO(33) | 1)
+
+#define MT8167_PIN_34_URXD2__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define MT8167_PIN_34_URXD2__FUNC_URXD2 (MTK_PIN_NO(34) | 1)
+#define MT8167_PIN_34_URXD2__FUNC_DPI_D5 (MTK_PIN_NO(34) | 2)
+#define MT8167_PIN_34_URXD2__FUNC_UTXD2 (MTK_PIN_NO(34) | 3)
+#define MT8167_PIN_34_URXD2__FUNC_DBG_SCL (MTK_PIN_NO(34) | 4)
+#define MT8167_PIN_34_URXD2__FUNC_I2S2_MCK (MTK_PIN_NO(34) | 6)
+#define MT8167_PIN_34_URXD2__FUNC_DBG_MON_B_0 (MTK_PIN_NO(34) | 7)
+
+#define MT8167_PIN_35_UTXD2__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define MT8167_PIN_35_UTXD2__FUNC_UTXD2 (MTK_PIN_NO(35) | 1)
+#define MT8167_PIN_35_UTXD2__FUNC_DPI_HSYNC (MTK_PIN_NO(35) | 2)
+#define MT8167_PIN_35_UTXD2__FUNC_URXD2 (MTK_PIN_NO(35) | 3)
+#define MT8167_PIN_35_UTXD2__FUNC_DBG_SDA (MTK_PIN_NO(35) | 4)
+#define MT8167_PIN_35_UTXD2__FUNC_DPI_D18 (MTK_PIN_NO(35) | 5)
+#define MT8167_PIN_35_UTXD2__FUNC_I2S3_MCK (MTK_PIN_NO(35) | 6)
+#define MT8167_PIN_35_UTXD2__FUNC_DBG_MON_B_1 (MTK_PIN_NO(35) | 7)
+
+#define MT8167_PIN_36_MRG_CLK__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define MT8167_PIN_36_MRG_CLK__FUNC_MRG_CLK (MTK_PIN_NO(36) | 1)
+#define MT8167_PIN_36_MRG_CLK__FUNC_DPI_D4 (MTK_PIN_NO(36) | 2)
+#define MT8167_PIN_36_MRG_CLK__FUNC_I2S0_BCK (MTK_PIN_NO(36) | 3)
+#define MT8167_PIN_36_MRG_CLK__FUNC_I2S3_BCK (MTK_PIN_NO(36) | 4)
+#define MT8167_PIN_36_MRG_CLK__FUNC_PCM0_CLK (MTK_PIN_NO(36) | 5)
+#define MT8167_PIN_36_MRG_CLK__FUNC_IR (MTK_PIN_NO(36) | 6)
+#define MT8167_PIN_36_MRG_CLK__FUNC_DBG_MON_A_2 (MTK_PIN_NO(36) | 7)
+
+#define MT8167_PIN_37_MRG_SYNC__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define MT8167_PIN_37_MRG_SYNC__FUNC_MRG_SYNC (MTK_PIN_NO(37) | 1)
+#define MT8167_PIN_37_MRG_SYNC__FUNC_DPI_D3 (MTK_PIN_NO(37) | 2)
+#define MT8167_PIN_37_MRG_SYNC__FUNC_I2S0_LRCK (MTK_PIN_NO(37) | 3)
+#define MT8167_PIN_37_MRG_SYNC__FUNC_I2S3_LRCK (MTK_PIN_NO(37) | 4)
+#define MT8167_PIN_37_MRG_SYNC__FUNC_PCM0_SYNC (MTK_PIN_NO(37) | 5)
+#define MT8167_PIN_37_MRG_SYNC__FUNC_EXT_COL (MTK_PIN_NO(37) | 6)
+#define MT8167_PIN_37_MRG_SYNC__FUNC_DBG_MON_A_3 (MTK_PIN_NO(37) | 7)
+
+#define MT8167_PIN_38_MRG_DI__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define MT8167_PIN_38_MRG_DI__FUNC_MRG_DI (MTK_PIN_NO(38) | 1)
+#define MT8167_PIN_38_MRG_DI__FUNC_DPI_D1 (MTK_PIN_NO(38) | 2)
+#define MT8167_PIN_38_MRG_DI__FUNC_I2S0_DI (MTK_PIN_NO(38) | 3)
+#define MT8167_PIN_38_MRG_DI__FUNC_I2S3_DO (MTK_PIN_NO(38) | 4)
+#define MT8167_PIN_38_MRG_DI__FUNC_PCM0_DI (MTK_PIN_NO(38) | 5)
+#define MT8167_PIN_38_MRG_DI__FUNC_EXT_MDIO (MTK_PIN_NO(38) | 6)
+#define MT8167_PIN_38_MRG_DI__FUNC_DBG_MON_A_4 (MTK_PIN_NO(38) | 7)
+
+#define MT8167_PIN_39_MRG_DO__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define MT8167_PIN_39_MRG_DO__FUNC_MRG_DO (MTK_PIN_NO(39) | 1)
+#define MT8167_PIN_39_MRG_DO__FUNC_DPI_D2 (MTK_PIN_NO(39) | 2)
+#define MT8167_PIN_39_MRG_DO__FUNC_I2S0_MCK (MTK_PIN_NO(39) | 3)
+#define MT8167_PIN_39_MRG_DO__FUNC_I2S3_MCK (MTK_PIN_NO(39) | 4)
+#define MT8167_PIN_39_MRG_DO__FUNC_PCM0_DO (MTK_PIN_NO(39) | 5)
+#define MT8167_PIN_39_MRG_DO__FUNC_EXT_MDC (MTK_PIN_NO(39) | 6)
+#define MT8167_PIN_39_MRG_DO__FUNC_DBG_MON_A_5 (MTK_PIN_NO(39) | 7)
+
+#define MT8167_PIN_40_KPROW0__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define MT8167_PIN_40_KPROW0__FUNC_KPROW0 (MTK_PIN_NO(40) | 1)
+#define MT8167_PIN_40_KPROW0__FUNC_IMG_TEST_CK (MTK_PIN_NO(40) | 4)
+#define MT8167_PIN_40_KPROW0__FUNC_DBG_MON_B_4 (MTK_PIN_NO(40) | 7)
+
+#define MT8167_PIN_41_KPROW1__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define MT8167_PIN_41_KPROW1__FUNC_KPROW1 (MTK_PIN_NO(41) | 1)
+#define MT8167_PIN_41_KPROW1__FUNC_IDDIG (MTK_PIN_NO(41) | 2)
+#define MT8167_PIN_41_KPROW1__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(41) | 3)
+#define MT8167_PIN_41_KPROW1__FUNC_MFG_TEST_CK (MTK_PIN_NO(41) | 4)
+#define MT8167_PIN_41_KPROW1__FUNC_DBG_MON_B_5 (MTK_PIN_NO(41) | 7)
+
+#define MT8167_PIN_42_KPCOL0__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define MT8167_PIN_42_KPCOL0__FUNC_KPCOL0 (MTK_PIN_NO(42) | 1)
+#define MT8167_PIN_42_KPCOL0__FUNC_DBG_MON_B_6 (MTK_PIN_NO(42) | 7)
+
+#define MT8167_PIN_43_KPCOL1__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define MT8167_PIN_43_KPCOL1__FUNC_KPCOL1 (MTK_PIN_NO(43) | 1)
+#define MT8167_PIN_43_KPCOL1__FUNC_USB_DRVVBUS (MTK_PIN_NO(43) | 2)
+#define MT8167_PIN_43_KPCOL1__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(43) | 3)
+#define MT8167_PIN_43_KPCOL1__FUNC_TSF_IN (MTK_PIN_NO(43) | 4)
+#define MT8167_PIN_43_KPCOL1__FUNC_DFD_NTRST_XI (MTK_PIN_NO(43) | 5)
+#define MT8167_PIN_43_KPCOL1__FUNC_UDI_NTRST_XI (MTK_PIN_NO(43) | 6)
+#define MT8167_PIN_43_KPCOL1__FUNC_DBG_MON_B_7 (MTK_PIN_NO(43) | 7)
+
+#define MT8167_PIN_44_JTMS__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define MT8167_PIN_44_JTMS__FUNC_JTMS (MTK_PIN_NO(44) | 1)
+#define MT8167_PIN_44_JTMS__FUNC_CONN_MCU_TMS (MTK_PIN_NO(44) | 2)
+#define MT8167_PIN_44_JTMS__FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(44) | 3)
+#define MT8167_PIN_44_JTMS__FUNC_GPUDFD_TMS_XI (MTK_PIN_NO(44) | 4)
+#define MT8167_PIN_44_JTMS__FUNC_DFD_TMS_XI (MTK_PIN_NO(44) | 5)
+#define MT8167_PIN_44_JTMS__FUNC_UDI_TMS_XI (MTK_PIN_NO(44) | 6)
+
+#define MT8167_PIN_45_JTCK__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define MT8167_PIN_45_JTCK__FUNC_JTCK (MTK_PIN_NO(45) | 1)
+#define MT8167_PIN_45_JTCK__FUNC_CONN_MCU_TCK (MTK_PIN_NO(45) | 2)
+#define MT8167_PIN_45_JTCK__FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(45) | 3)
+#define MT8167_PIN_45_JTCK__FUNC_GPUDFD_TCK_XI (MTK_PIN_NO(45) | 4)
+#define MT8167_PIN_45_JTCK__FUNC_DFD_TCK_XI (MTK_PIN_NO(45) | 5)
+#define MT8167_PIN_45_JTCK__FUNC_UDI_TCK_XI (MTK_PIN_NO(45) | 6)
+
+#define MT8167_PIN_46_JTDI__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define MT8167_PIN_46_JTDI__FUNC_JTDI (MTK_PIN_NO(46) | 1)
+#define MT8167_PIN_46_JTDI__FUNC_CONN_MCU_TDI (MTK_PIN_NO(46) | 2)
+#define MT8167_PIN_46_JTDI__FUNC_GPUDFD_TDI_XI (MTK_PIN_NO(46) | 4)
+#define MT8167_PIN_46_JTDI__FUNC_DFD_TDI_XI (MTK_PIN_NO(46) | 5)
+#define MT8167_PIN_46_JTDI__FUNC_UDI_TDI_XI (MTK_PIN_NO(46) | 6)
+
+#define MT8167_PIN_47_JTDO__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define MT8167_PIN_47_JTDO__FUNC_JTDO (MTK_PIN_NO(47) | 1)
+#define MT8167_PIN_47_JTDO__FUNC_CONN_MCU_TDO (MTK_PIN_NO(47) | 2)
+#define MT8167_PIN_47_JTDO__FUNC_GPUDFD_TDO (MTK_PIN_NO(47) | 4)
+#define MT8167_PIN_47_JTDO__FUNC_DFD_TDO (MTK_PIN_NO(47) | 5)
+#define MT8167_PIN_47_JTDO__FUNC_UDI_TDO (MTK_PIN_NO(47) | 6)
+
+#define MT8167_PIN_48_SPI_CS__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define MT8167_PIN_48_SPI_CS__FUNC_SPI_CSB (MTK_PIN_NO(48) | 1)
+#define MT8167_PIN_48_SPI_CS__FUNC_I2S0_DI (MTK_PIN_NO(48) | 3)
+#define MT8167_PIN_48_SPI_CS__FUNC_I2S2_BCK (MTK_PIN_NO(48) | 4)
+#define MT8167_PIN_48_SPI_CS__FUNC_DBG_MON_A_23 (MTK_PIN_NO(48) | 7)
+
+#define MT8167_PIN_49_SPI_CK__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define MT8167_PIN_49_SPI_CK__FUNC_SPI_CLK (MTK_PIN_NO(49) | 1)
+#define MT8167_PIN_49_SPI_CK__FUNC_I2S0_LRCK (MTK_PIN_NO(49) | 3)
+#define MT8167_PIN_49_SPI_CK__FUNC_I2S2_DI (MTK_PIN_NO(49) | 4)
+#define MT8167_PIN_49_SPI_CK__FUNC_DBG_MON_A_24 (MTK_PIN_NO(49) | 7)
+
+#define MT8167_PIN_50_SPI_MI__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define MT8167_PIN_50_SPI_MI__FUNC_SPI_MI (MTK_PIN_NO(50) | 1)
+#define MT8167_PIN_50_SPI_MI__FUNC_SPI_MO (MTK_PIN_NO(50) | 2)
+#define MT8167_PIN_50_SPI_MI__FUNC_I2S0_BCK (MTK_PIN_NO(50) | 3)
+#define MT8167_PIN_50_SPI_MI__FUNC_I2S2_LRCK (MTK_PIN_NO(50) | 4)
+#define MT8167_PIN_50_SPI_MI__FUNC_DBG_MON_A_25 (MTK_PIN_NO(50) | 7)
+
+#define MT8167_PIN_51_SPI_MO__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define MT8167_PIN_51_SPI_MO__FUNC_SPI_MO (MTK_PIN_NO(51) | 1)
+#define MT8167_PIN_51_SPI_MO__FUNC_SPI_MI (MTK_PIN_NO(51) | 2)
+#define MT8167_PIN_51_SPI_MO__FUNC_I2S0_MCK (MTK_PIN_NO(51) | 3)
+#define MT8167_PIN_51_SPI_MO__FUNC_I2S2_MCK (MTK_PIN_NO(51) | 4)
+#define MT8167_PIN_51_SPI_MO__FUNC_DBG_MON_A_26 (MTK_PIN_NO(51) | 7)
+
+#define MT8167_PIN_52_SDA1__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define MT8167_PIN_52_SDA1__FUNC_SDA1_0 (MTK_PIN_NO(52) | 1)
+
+#define MT8167_PIN_53_SCL1__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define MT8167_PIN_53_SCL1__FUNC_SCL1_0 (MTK_PIN_NO(53) | 1)
+
+#define MT8167_PIN_54_DISP_PWM__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define MT8167_PIN_54_DISP_PWM__FUNC_DISP_PWM (MTK_PIN_NO(54) | 1)
+#define MT8167_PIN_54_DISP_PWM__FUNC_PWM_B (MTK_PIN_NO(54) | 2)
+#define MT8167_PIN_54_DISP_PWM__FUNC_DBG_MON_B_2 (MTK_PIN_NO(54) | 7)
+
+#define MT8167_PIN_55_I2S_DATA_IN__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define MT8167_PIN_55_I2S_DATA_IN__FUNC_I2S0_DI (MTK_PIN_NO(55) | 1)
+#define MT8167_PIN_55_I2S_DATA_IN__FUNC_UCTS0 (MTK_PIN_NO(55) | 2)
+#define MT8167_PIN_55_I2S_DATA_IN__FUNC_I2S3_DO (MTK_PIN_NO(55) | 3)
+#define MT8167_PIN_55_I2S_DATA_IN__FUNC_I2S_8CH_DO1 (MTK_PIN_NO(55) | 4)
+#define MT8167_PIN_55_I2S_DATA_IN__FUNC_PWM_A (MTK_PIN_NO(55) | 5)
+#define MT8167_PIN_55_I2S_DATA_IN__FUNC_I2S2_BCK (MTK_PIN_NO(55) | 6)
+#define MT8167_PIN_55_I2S_DATA_IN__FUNC_DBG_MON_A_28 (MTK_PIN_NO(55) | 7)
+
+#define MT8167_PIN_56_I2S_LRCK__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define MT8167_PIN_56_I2S_LRCK__FUNC_I2S0_LRCK (MTK_PIN_NO(56) | 1)
+#define MT8167_PIN_56_I2S_LRCK__FUNC_I2S3_LRCK (MTK_PIN_NO(56) | 3)
+#define MT8167_PIN_56_I2S_LRCK__FUNC_I2S_8CH_LRCK (MTK_PIN_NO(56) | 4)
+#define MT8167_PIN_56_I2S_LRCK__FUNC_PWM_B (MTK_PIN_NO(56) | 5)
+#define MT8167_PIN_56_I2S_LRCK__FUNC_I2S2_DI (MTK_PIN_NO(56) | 6)
+#define MT8167_PIN_56_I2S_LRCK__FUNC_DBG_MON_A_29 (MTK_PIN_NO(56) | 7)
+
+#define MT8167_PIN_57_I2S_BCK__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define MT8167_PIN_57_I2S_BCK__FUNC_I2S0_BCK (MTK_PIN_NO(57) | 1)
+#define MT8167_PIN_57_I2S_BCK__FUNC_URTS0 (MTK_PIN_NO(57) | 2)
+#define MT8167_PIN_57_I2S_BCK__FUNC_I2S3_BCK (MTK_PIN_NO(57) | 3)
+#define MT8167_PIN_57_I2S_BCK__FUNC_I2S_8CH_BCK (MTK_PIN_NO(57) | 4)
+#define MT8167_PIN_57_I2S_BCK__FUNC_PWM_C (MTK_PIN_NO(57) | 5)
+#define MT8167_PIN_57_I2S_BCK__FUNC_I2S2_LRCK (MTK_PIN_NO(57) | 6)
+#define MT8167_PIN_57_I2S_BCK__FUNC_DBG_MON_A_30 (MTK_PIN_NO(57) | 7)
+
+#define MT8167_PIN_58_SDA0__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define MT8167_PIN_58_SDA0__FUNC_SDA0_0 (MTK_PIN_NO(58) | 1)
+
+#define MT8167_PIN_59_SCL0__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define MT8167_PIN_59_SCL0__FUNC_SCL0_0 (MTK_PIN_NO(59) | 1)
+
+#define MT8167_PIN_60_SDA2__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define MT8167_PIN_60_SDA2__FUNC_SDA2_0 (MTK_PIN_NO(60) | 1)
+#define MT8167_PIN_60_SDA2__FUNC_PWM_B (MTK_PIN_NO(60) | 2)
+
+#define MT8167_PIN_61_SCL2__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define MT8167_PIN_61_SCL2__FUNC_SCL2_0 (MTK_PIN_NO(61) | 1)
+#define MT8167_PIN_61_SCL2__FUNC_PWM_C (MTK_PIN_NO(61) | 2)
+
+#define MT8167_PIN_62_URXD0__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define MT8167_PIN_62_URXD0__FUNC_URXD0 (MTK_PIN_NO(62) | 1)
+#define MT8167_PIN_62_URXD0__FUNC_UTXD0 (MTK_PIN_NO(62) | 2)
+
+#define MT8167_PIN_63_UTXD0__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define MT8167_PIN_63_UTXD0__FUNC_UTXD0 (MTK_PIN_NO(63) | 1)
+#define MT8167_PIN_63_UTXD0__FUNC_URXD0 (MTK_PIN_NO(63) | 2)
+
+#define MT8167_PIN_64_URXD1__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define MT8167_PIN_64_URXD1__FUNC_URXD1 (MTK_PIN_NO(64) | 1)
+#define MT8167_PIN_64_URXD1__FUNC_UTXD1 (MTK_PIN_NO(64) | 2)
+#define MT8167_PIN_64_URXD1__FUNC_DBG_MON_A_27 (MTK_PIN_NO(64) | 7)
+
+#define MT8167_PIN_65_UTXD1__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define MT8167_PIN_65_UTXD1__FUNC_UTXD1 (MTK_PIN_NO(65) | 1)
+#define MT8167_PIN_65_UTXD1__FUNC_URXD1 (MTK_PIN_NO(65) | 2)
+#define MT8167_PIN_65_UTXD1__FUNC_DBG_MON_A_31 (MTK_PIN_NO(65) | 7)
+
+#define MT8167_PIN_66_LCM_RST__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define MT8167_PIN_66_LCM_RST__FUNC_LCM_RST (MTK_PIN_NO(66) | 1)
+#define MT8167_PIN_66_LCM_RST__FUNC_I2S0_MCK (MTK_PIN_NO(66) | 3)
+#define MT8167_PIN_66_LCM_RST__FUNC_DBG_MON_B_3 (MTK_PIN_NO(66) | 7)
+
+#define MT8167_PIN_67_DSI_TE__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define MT8167_PIN_67_DSI_TE__FUNC_DSI_TE (MTK_PIN_NO(67) | 1)
+#define MT8167_PIN_67_DSI_TE__FUNC_I2S_8CH_MCK (MTK_PIN_NO(67) | 3)
+#define MT8167_PIN_67_DSI_TE__FUNC_DBG_MON_B_14 (MTK_PIN_NO(67) | 7)
+
+#define MT8167_PIN_68_MSDC2_CMD__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define MT8167_PIN_68_MSDC2_CMD__FUNC_MSDC2_CMD (MTK_PIN_NO(68) | 1)
+#define MT8167_PIN_68_MSDC2_CMD__FUNC_I2S_8CH_DO4 (MTK_PIN_NO(68) | 2)
+#define MT8167_PIN_68_MSDC2_CMD__FUNC_SDA1_0 (MTK_PIN_NO(68) | 3)
+#define MT8167_PIN_68_MSDC2_CMD__FUNC_USB_SDA (MTK_PIN_NO(68) | 5)
+#define MT8167_PIN_68_MSDC2_CMD__FUNC_I2S3_BCK (MTK_PIN_NO(68) | 6)
+#define MT8167_PIN_68_MSDC2_CMD__FUNC_DBG_MON_B_15 (MTK_PIN_NO(68) | 7)
+
+#define MT8167_PIN_69_MSDC2_CLK__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define MT8167_PIN_69_MSDC2_CLK__FUNC_MSDC2_CLK (MTK_PIN_NO(69) | 1)
+#define MT8167_PIN_69_MSDC2_CLK__FUNC_I2S_8CH_DO3 (MTK_PIN_NO(69) | 2)
+#define MT8167_PIN_69_MSDC2_CLK__FUNC_SCL1_0 (MTK_PIN_NO(69) | 3)
+#define MT8167_PIN_69_MSDC2_CLK__FUNC_DPI_D21 (MTK_PIN_NO(69) | 4)
+#define MT8167_PIN_69_MSDC2_CLK__FUNC_USB_SCL (MTK_PIN_NO(69) | 5)
+#define MT8167_PIN_69_MSDC2_CLK__FUNC_I2S3_LRCK (MTK_PIN_NO(69) | 6)
+#define MT8167_PIN_69_MSDC2_CLK__FUNC_DBG_MON_B_16 (MTK_PIN_NO(69) | 7)
+
+#define MT8167_PIN_70_MSDC2_DAT0__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define MT8167_PIN_70_MSDC2_DAT0__FUNC_MSDC2_DAT0 (MTK_PIN_NO(70) | 1)
+#define MT8167_PIN_70_MSDC2_DAT0__FUNC_I2S_8CH_DO2 (MTK_PIN_NO(70) | 2)
+#define MT8167_PIN_70_MSDC2_DAT0__FUNC_DPI_D22 (MTK_PIN_NO(70) | 4)
+#define MT8167_PIN_70_MSDC2_DAT0__FUNC_UTXD0 (MTK_PIN_NO(70) | 5)
+#define MT8167_PIN_70_MSDC2_DAT0__FUNC_I2S3_DO (MTK_PIN_NO(70) | 6)
+#define MT8167_PIN_70_MSDC2_DAT0__FUNC_DBG_MON_B_17 (MTK_PIN_NO(70) | 7)
+
+#define MT8167_PIN_71_MSDC2_DAT1__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define MT8167_PIN_71_MSDC2_DAT1__FUNC_MSDC2_DAT1 (MTK_PIN_NO(71) | 1)
+#define MT8167_PIN_71_MSDC2_DAT1__FUNC_I2S_8CH_DO1 (MTK_PIN_NO(71) | 2)
+#define MT8167_PIN_71_MSDC2_DAT1__FUNC_PWM_A (MTK_PIN_NO(71) | 3)
+#define MT8167_PIN_71_MSDC2_DAT1__FUNC_I2S3_MCK (MTK_PIN_NO(71) | 4)
+#define MT8167_PIN_71_MSDC2_DAT1__FUNC_URXD0 (MTK_PIN_NO(71) | 5)
+#define MT8167_PIN_71_MSDC2_DAT1__FUNC_PWM_B (MTK_PIN_NO(71) | 6)
+#define MT8167_PIN_71_MSDC2_DAT1__FUNC_DBG_MON_B_18 (MTK_PIN_NO(71) | 7)
+
+#define MT8167_PIN_72_MSDC2_DAT2__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define MT8167_PIN_72_MSDC2_DAT2__FUNC_MSDC2_DAT2 (MTK_PIN_NO(72) | 1)
+#define MT8167_PIN_72_MSDC2_DAT2__FUNC_I2S_8CH_LRCK (MTK_PIN_NO(72) | 2)
+#define MT8167_PIN_72_MSDC2_DAT2__FUNC_SDA2_0 (MTK_PIN_NO(72) | 3)
+#define MT8167_PIN_72_MSDC2_DAT2__FUNC_DPI_D23 (MTK_PIN_NO(72) | 4)
+#define MT8167_PIN_72_MSDC2_DAT2__FUNC_UTXD1 (MTK_PIN_NO(72) | 5)
+#define MT8167_PIN_72_MSDC2_DAT2__FUNC_PWM_C (MTK_PIN_NO(72) | 6)
+#define MT8167_PIN_72_MSDC2_DAT2__FUNC_DBG_MON_B_19 (MTK_PIN_NO(72) | 7)
+
+#define MT8167_PIN_73_MSDC2_DAT3__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define MT8167_PIN_73_MSDC2_DAT3__FUNC_MSDC2_DAT3 (MTK_PIN_NO(73) | 1)
+#define MT8167_PIN_73_MSDC2_DAT3__FUNC_I2S_8CH_BCK (MTK_PIN_NO(73) | 2)
+#define MT8167_PIN_73_MSDC2_DAT3__FUNC_SCL2_0 (MTK_PIN_NO(73) | 3)
+#define MT8167_PIN_73_MSDC2_DAT3__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(73) | 4)
+#define MT8167_PIN_73_MSDC2_DAT3__FUNC_URXD1 (MTK_PIN_NO(73) | 5)
+#define MT8167_PIN_73_MSDC2_DAT3__FUNC_PWM_A (MTK_PIN_NO(73) | 6)
+#define MT8167_PIN_73_MSDC2_DAT3__FUNC_DBG_MON_B_20 (MTK_PIN_NO(73) | 7)
+
+#define MT8167_PIN_74_TDN3__FUNC_GPI74 (MTK_PIN_NO(74) | 0)
+#define MT8167_PIN_74_TDN3__FUNC_TDN3 (MTK_PIN_NO(74) | 1)
+
+#define MT8167_PIN_75_TDP3__FUNC_GPI75 (MTK_PIN_NO(75) | 0)
+#define MT8167_PIN_75_TDP3__FUNC_TDP3 (MTK_PIN_NO(75) | 1)
+
+#define MT8167_PIN_76_TDN2__FUNC_GPI76 (MTK_PIN_NO(76) | 0)
+#define MT8167_PIN_76_TDN2__FUNC_TDN2 (MTK_PIN_NO(76) | 1)
+
+#define MT8167_PIN_77_TDP2__FUNC_GPI77 (MTK_PIN_NO(77) | 0)
+#define MT8167_PIN_77_TDP2__FUNC_TDP2 (MTK_PIN_NO(77) | 1)
+
+#define MT8167_PIN_78_TCN__FUNC_GPI78 (MTK_PIN_NO(78) | 0)
+#define MT8167_PIN_78_TCN__FUNC_TCN (MTK_PIN_NO(78) | 1)
+
+#define MT8167_PIN_79_TCP__FUNC_GPI79 (MTK_PIN_NO(79) | 0)
+#define MT8167_PIN_79_TCP__FUNC_TCP (MTK_PIN_NO(79) | 1)
+
+#define MT8167_PIN_80_TDN1__FUNC_GPI80 (MTK_PIN_NO(80) | 0)
+#define MT8167_PIN_80_TDN1__FUNC_TDN1 (MTK_PIN_NO(80) | 1)
+
+#define MT8167_PIN_81_TDP1__FUNC_GPI81 (MTK_PIN_NO(81) | 0)
+#define MT8167_PIN_81_TDP1__FUNC_TDP1 (MTK_PIN_NO(81) | 1)
+
+#define MT8167_PIN_82_TDN0__FUNC_GPI82 (MTK_PIN_NO(82) | 0)
+#define MT8167_PIN_82_TDN0__FUNC_TDN0 (MTK_PIN_NO(82) | 1)
+
+#define MT8167_PIN_83_TDP0__FUNC_GPI83 (MTK_PIN_NO(83) | 0)
+#define MT8167_PIN_83_TDP0__FUNC_TDP0 (MTK_PIN_NO(83) | 1)
+
+#define MT8167_PIN_84_RDN0__FUNC_GPI84 (MTK_PIN_NO(84) | 0)
+#define MT8167_PIN_84_RDN0__FUNC_RDN0 (MTK_PIN_NO(84) | 1)
+
+#define MT8167_PIN_85_RDP0__FUNC_GPI85 (MTK_PIN_NO(85) | 0)
+#define MT8167_PIN_85_RDP0__FUNC_RDP0 (MTK_PIN_NO(85) | 1)
+
+#define MT8167_PIN_86_RDN1__FUNC_GPI86 (MTK_PIN_NO(86) | 0)
+#define MT8167_PIN_86_RDN1__FUNC_RDN1 (MTK_PIN_NO(86) | 1)
+
+#define MT8167_PIN_87_RDP1__FUNC_GPI87 (MTK_PIN_NO(87) | 0)
+#define MT8167_PIN_87_RDP1__FUNC_RDP1 (MTK_PIN_NO(87) | 1)
+
+#define MT8167_PIN_88_RCN__FUNC_GPI88 (MTK_PIN_NO(88) | 0)
+#define MT8167_PIN_88_RCN__FUNC_RCN (MTK_PIN_NO(88) | 1)
+
+#define MT8167_PIN_89_RCP__FUNC_GPI89 (MTK_PIN_NO(89) | 0)
+#define MT8167_PIN_89_RCP__FUNC_RCP (MTK_PIN_NO(89) | 1)
+
+#define MT8167_PIN_90_RDN2__FUNC_GPI90 (MTK_PIN_NO(90) | 0)
+#define MT8167_PIN_90_RDN2__FUNC_RDN2 (MTK_PIN_NO(90) | 1)
+#define MT8167_PIN_90_RDN2__FUNC_CMDAT8 (MTK_PIN_NO(90) | 2)
+
+#define MT8167_PIN_91_RDP2__FUNC_GPI91 (MTK_PIN_NO(91) | 0)
+#define MT8167_PIN_91_RDP2__FUNC_RDP2 (MTK_PIN_NO(91) | 1)
+#define MT8167_PIN_91_RDP2__FUNC_CMDAT9 (MTK_PIN_NO(91) | 2)
+
+#define MT8167_PIN_92_RDN3__FUNC_GPI92 (MTK_PIN_NO(92) | 0)
+#define MT8167_PIN_92_RDN3__FUNC_RDN3 (MTK_PIN_NO(92) | 1)
+#define MT8167_PIN_92_RDN3__FUNC_CMDAT4 (MTK_PIN_NO(92) | 2)
+
+#define MT8167_PIN_93_RDP3__FUNC_GPI93 (MTK_PIN_NO(93) | 0)
+#define MT8167_PIN_93_RDP3__FUNC_RDP3 (MTK_PIN_NO(93) | 1)
+#define MT8167_PIN_93_RDP3__FUNC_CMDAT5 (MTK_PIN_NO(93) | 2)
+
+#define MT8167_PIN_94_RCN_A__FUNC_GPI94 (MTK_PIN_NO(94) | 0)
+#define MT8167_PIN_94_RCN_A__FUNC_RCN_A (MTK_PIN_NO(94) | 1)
+#define MT8167_PIN_94_RCN_A__FUNC_CMDAT6 (MTK_PIN_NO(94) | 2)
+
+#define MT8167_PIN_95_RCP_A__FUNC_GPI95 (MTK_PIN_NO(95) | 0)
+#define MT8167_PIN_95_RCP_A__FUNC_RCP_A (MTK_PIN_NO(95) | 1)
+#define MT8167_PIN_95_RCP_A__FUNC_CMDAT7 (MTK_PIN_NO(95) | 2)
+
+#define MT8167_PIN_96_RDN1_A__FUNC_GPI96 (MTK_PIN_NO(96) | 0)
+#define MT8167_PIN_96_RDN1_A__FUNC_RDN1_A (MTK_PIN_NO(96) | 1)
+#define MT8167_PIN_96_RDN1_A__FUNC_CMDAT2 (MTK_PIN_NO(96) | 2)
+#define MT8167_PIN_96_RDN1_A__FUNC_CMCSD2 (MTK_PIN_NO(96) | 3)
+
+#define MT8167_PIN_97_RDP1_A__FUNC_GPI97 (MTK_PIN_NO(97) | 0)
+#define MT8167_PIN_97_RDP1_A__FUNC_RDP1_A (MTK_PIN_NO(97) | 1)
+#define MT8167_PIN_97_RDP1_A__FUNC_CMDAT3 (MTK_PIN_NO(97) | 2)
+#define MT8167_PIN_97_RDP1_A__FUNC_CMCSD3 (MTK_PIN_NO(97) | 3)
+
+#define MT8167_PIN_98_RDN0_A__FUNC_GPI98 (MTK_PIN_NO(98) | 0)
+#define MT8167_PIN_98_RDN0_A__FUNC_RDN0_A (MTK_PIN_NO(98) | 1)
+#define MT8167_PIN_98_RDN0_A__FUNC_CMHSYNC (MTK_PIN_NO(98) | 2)
+
+#define MT8167_PIN_99_RDP0_A__FUNC_GPI99 (MTK_PIN_NO(99) | 0)
+#define MT8167_PIN_99_RDP0_A__FUNC_RDP0_A (MTK_PIN_NO(99) | 1)
+#define MT8167_PIN_99_RDP0_A__FUNC_CMVSYNC (MTK_PIN_NO(99) | 2)
+
+#define MT8167_PIN_100_CMDAT0__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define MT8167_PIN_100_CMDAT0__FUNC_CMDAT0 (MTK_PIN_NO(100) | 1)
+#define MT8167_PIN_100_CMDAT0__FUNC_CMCSD0 (MTK_PIN_NO(100) | 2)
+#define MT8167_PIN_100_CMDAT0__FUNC_ANT_SEL2 (MTK_PIN_NO(100) | 3)
+#define MT8167_PIN_100_CMDAT0__FUNC_TDM_RX_MCK (MTK_PIN_NO(100) | 5)
+#define MT8167_PIN_100_CMDAT0__FUNC_DBG_MON_B_21 (MTK_PIN_NO(100) | 7)
+
+#define MT8167_PIN_101_CMDAT1__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define MT8167_PIN_101_CMDAT1__FUNC_CMDAT1 (MTK_PIN_NO(101) | 1)
+#define MT8167_PIN_101_CMDAT1__FUNC_CMCSD1 (MTK_PIN_NO(101) | 2)
+#define MT8167_PIN_101_CMDAT1__FUNC_ANT_SEL3 (MTK_PIN_NO(101) | 3)
+#define MT8167_PIN_101_CMDAT1__FUNC_CMFLASH (MTK_PIN_NO(101) | 4)
+#define MT8167_PIN_101_CMDAT1__FUNC_TDM_RX_BCK (MTK_PIN_NO(101) | 5)
+#define MT8167_PIN_101_CMDAT1__FUNC_DBG_MON_B_22 (MTK_PIN_NO(101) | 7)
+
+#define MT8167_PIN_102_CMMCLK__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define MT8167_PIN_102_CMMCLK__FUNC_CMMCLK (MTK_PIN_NO(102) | 1)
+#define MT8167_PIN_102_CMMCLK__FUNC_ANT_SEL4 (MTK_PIN_NO(102) | 3)
+#define MT8167_PIN_102_CMMCLK__FUNC_TDM_RX_LRCK (MTK_PIN_NO(102) | 5)
+#define MT8167_PIN_102_CMMCLK__FUNC_DBG_MON_B_23 (MTK_PIN_NO(102) | 7)
+
+#define MT8167_PIN_103_CMPCLK__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define MT8167_PIN_103_CMPCLK__FUNC_CMPCLK (MTK_PIN_NO(103) | 1)
+#define MT8167_PIN_103_CMPCLK__FUNC_CMCSK (MTK_PIN_NO(103) | 2)
+#define MT8167_PIN_103_CMPCLK__FUNC_ANT_SEL5 (MTK_PIN_NO(103) | 3)
+#define MT8167_PIN_103_CMPCLK__FUNC_TDM_RX_DI (MTK_PIN_NO(103) | 5)
+#define MT8167_PIN_103_CMPCLK__FUNC_DBG_MON_B_24 (MTK_PIN_NO(103) | 7)
+
+#define MT8167_PIN_104_MSDC1_CMD__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define MT8167_PIN_104_MSDC1_CMD__FUNC_MSDC1_CMD (MTK_PIN_NO(104) | 1)
+#define MT8167_PIN_104_MSDC1_CMD__FUNC_SQICS (MTK_PIN_NO(104) | 4)
+#define MT8167_PIN_104_MSDC1_CMD__FUNC_DBG_MON_B_25 (MTK_PIN_NO(104) | 7)
+
+#define MT8167_PIN_105_MSDC1_CLK__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define MT8167_PIN_105_MSDC1_CLK__FUNC_MSDC1_CLK (MTK_PIN_NO(105) | 1)
+#define MT8167_PIN_105_MSDC1_CLK__FUNC_UDI_NTRST_XI (MTK_PIN_NO(105) | 2)
+#define MT8167_PIN_105_MSDC1_CLK__FUNC_DFD_NTRST_XI (MTK_PIN_NO(105) | 3)
+#define MT8167_PIN_105_MSDC1_CLK__FUNC_SQISO (MTK_PIN_NO(105) | 4)
+#define MT8167_PIN_105_MSDC1_CLK__FUNC_GPUEJ_NTRST_XI (MTK_PIN_NO(105) | 5)
+#define MT8167_PIN_105_MSDC1_CLK__FUNC_DBG_MON_B_26 (MTK_PIN_NO(105) | 7)
+
+#define MT8167_PIN_106_MSDC1_DAT0__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define MT8167_PIN_106_MSDC1_DAT0__FUNC_MSDC1_DAT0 (MTK_PIN_NO(106) | 1)
+#define MT8167_PIN_106_MSDC1_DAT0__FUNC_UDI_TMS_XI (MTK_PIN_NO(106) | 2)
+#define MT8167_PIN_106_MSDC1_DAT0__FUNC_DFD_TMS_XI (MTK_PIN_NO(106) | 3)
+#define MT8167_PIN_106_MSDC1_DAT0__FUNC_SQISI (MTK_PIN_NO(106) | 4)
+#define MT8167_PIN_106_MSDC1_DAT0__FUNC_GPUEJ_TMS_XI (MTK_PIN_NO(106) | 5)
+#define MT8167_PIN_106_MSDC1_DAT0__FUNC_DBG_MON_B_27 (MTK_PIN_NO(106) | 7)
+
+#define MT8167_PIN_107_MSDC1_DAT1__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define MT8167_PIN_107_MSDC1_DAT1__FUNC_MSDC1_DAT1 (MTK_PIN_NO(107) | 1)
+#define MT8167_PIN_107_MSDC1_DAT1__FUNC_UDI_TCK_XI (MTK_PIN_NO(107) | 2)
+#define MT8167_PIN_107_MSDC1_DAT1__FUNC_DFD_TCK_XI (MTK_PIN_NO(107) | 3)
+#define MT8167_PIN_107_MSDC1_DAT1__FUNC_SQIWP (MTK_PIN_NO(107) | 4)
+#define MT8167_PIN_107_MSDC1_DAT1__FUNC_GPUEJ_TCK_XI (MTK_PIN_NO(107) | 5)
+#define MT8167_PIN_107_MSDC1_DAT1__FUNC_DBG_MON_B_28 (MTK_PIN_NO(107) | 7)
+
+#define MT8167_PIN_108_MSDC1_DAT2__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define MT8167_PIN_108_MSDC1_DAT2__FUNC_MSDC1_DAT2 (MTK_PIN_NO(108) | 1)
+#define MT8167_PIN_108_MSDC1_DAT2__FUNC_UDI_TDI_XI (MTK_PIN_NO(108) | 2)
+#define MT8167_PIN_108_MSDC1_DAT2__FUNC_DFD_TDI_XI (MTK_PIN_NO(108) | 3)
+#define MT8167_PIN_108_MSDC1_DAT2__FUNC_SQIRST (MTK_PIN_NO(108) | 4)
+#define MT8167_PIN_108_MSDC1_DAT2__FUNC_GPUEJ_TDI_XI (MTK_PIN_NO(108) | 5)
+#define MT8167_PIN_108_MSDC1_DAT2__FUNC_DBG_MON_B_29 (MTK_PIN_NO(108) | 7)
+
+#define MT8167_PIN_109_MSDC1_DAT3__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define MT8167_PIN_109_MSDC1_DAT3__FUNC_MSDC1_DAT3 (MTK_PIN_NO(109) | 1)
+#define MT8167_PIN_109_MSDC1_DAT3__FUNC_UDI_TDO (MTK_PIN_NO(109) | 2)
+#define MT8167_PIN_109_MSDC1_DAT3__FUNC_DFD_TDO (MTK_PIN_NO(109) | 3)
+#define MT8167_PIN_109_MSDC1_DAT3__FUNC_SQICK (MTK_PIN_NO(109) | 4)
+#define MT8167_PIN_109_MSDC1_DAT3__FUNC_GPUEJ_TDO (MTK_PIN_NO(109) | 5)
+#define MT8167_PIN_109_MSDC1_DAT3__FUNC_DBG_MON_B_30 (MTK_PIN_NO(109) | 7)
+
+#define MT8167_PIN_110_MSDC0_DAT7__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define MT8167_PIN_110_MSDC0_DAT7__FUNC_MSDC0_DAT7 (MTK_PIN_NO(110) | 1)
+#define MT8167_PIN_110_MSDC0_DAT7__FUNC_NLD7 (MTK_PIN_NO(110) | 4)
+
+#define MT8167_PIN_111_MSDC0_DAT6__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define MT8167_PIN_111_MSDC0_DAT6__FUNC_MSDC0_DAT6 (MTK_PIN_NO(111) | 1)
+#define MT8167_PIN_111_MSDC0_DAT6__FUNC_NLD6 (MTK_PIN_NO(111) | 4)
+
+#define MT8167_PIN_112_MSDC0_DAT5__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define MT8167_PIN_112_MSDC0_DAT5__FUNC_MSDC0_DAT5 (MTK_PIN_NO(112) | 1)
+#define MT8167_PIN_112_MSDC0_DAT5__FUNC_NLD4 (MTK_PIN_NO(112) | 4)
+
+#define MT8167_PIN_113_MSDC0_DAT4__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define MT8167_PIN_113_MSDC0_DAT4__FUNC_MSDC0_DAT4 (MTK_PIN_NO(113) | 1)
+#define MT8167_PIN_113_MSDC0_DAT4__FUNC_NLD3 (MTK_PIN_NO(113) | 4)
+
+#define MT8167_PIN_114_MSDC0_RSTB__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define MT8167_PIN_114_MSDC0_RSTB__FUNC_MSDC0_RSTB (MTK_PIN_NO(114) | 1)
+#define MT8167_PIN_114_MSDC0_RSTB__FUNC_NLD0 (MTK_PIN_NO(114) | 4)
+
+#define MT8167_PIN_115_MSDC0_CMD__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define MT8167_PIN_115_MSDC0_CMD__FUNC_MSDC0_CMD (MTK_PIN_NO(115) | 1)
+#define MT8167_PIN_115_MSDC0_CMD__FUNC_NALE (MTK_PIN_NO(115) | 4)
+
+#define MT8167_PIN_116_MSDC0_CLK__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define MT8167_PIN_116_MSDC0_CLK__FUNC_MSDC0_CLK (MTK_PIN_NO(116) | 1)
+#define MT8167_PIN_116_MSDC0_CLK__FUNC_NWEB (MTK_PIN_NO(116) | 4)
+
+#define MT8167_PIN_117_MSDC0_DAT3__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define MT8167_PIN_117_MSDC0_DAT3__FUNC_MSDC0_DAT3 (MTK_PIN_NO(117) | 1)
+#define MT8167_PIN_117_MSDC0_DAT3__FUNC_NLD1 (MTK_PIN_NO(117) | 4)
+
+#define MT8167_PIN_118_MSDC0_DAT2__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define MT8167_PIN_118_MSDC0_DAT2__FUNC_MSDC0_DAT2 (MTK_PIN_NO(118) | 1)
+#define MT8167_PIN_118_MSDC0_DAT2__FUNC_NLD5 (MTK_PIN_NO(118) | 4)
+
+#define MT8167_PIN_119_MSDC0_DAT1__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define MT8167_PIN_119_MSDC0_DAT1__FUNC_MSDC0_DAT1 (MTK_PIN_NO(119) | 1)
+#define MT8167_PIN_119_MSDC0_DAT1__FUNC_NLD8 (MTK_PIN_NO(119) | 4)
+
+#define MT8167_PIN_120_MSDC0_DAT0__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define MT8167_PIN_120_MSDC0_DAT0__FUNC_MSDC0_DAT0 (MTK_PIN_NO(120) | 1)
+#define MT8167_PIN_120_MSDC0_DAT0__FUNC_WATCHDOG (MTK_PIN_NO(120) | 4)
+#define MT8167_PIN_120_MSDC0_DAT0__FUNC_NLD2 (MTK_PIN_NO(120) | 5)
+
+#define MT8167_PIN_121_CEC__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define MT8167_PIN_121_CEC__FUNC_CEC (MTK_PIN_NO(121) | 1)
+
+#define MT8167_PIN_122_HTPLG__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define MT8167_PIN_122_HTPLG__FUNC_HTPLG (MTK_PIN_NO(122) | 1)
+
+#define MT8167_PIN_123_HDMISCK__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define MT8167_PIN_123_HDMISCK__FUNC_HDMISCK (MTK_PIN_NO(123) | 1)
+
+#define MT8167_PIN_124_HDMISD__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define MT8167_PIN_124_HDMISD__FUNC_HDMISD (MTK_PIN_NO(124) | 1)
+
+#endif				/* __DTS_MT8167_PINFUNC_H */
diff --git a/arch/arm64/boot/dts/mediatek/mt8167-pumpkin.dts b/arch/arm64/boot/dts/mediatek/mt8167-pumpkin.dts
new file mode 100644
index 0000000..bb2ef00
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8167-pumpkin.dts
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+/dts-v1/;
+
+#include "mt8167.dtsi"
+#include "pumpkin-common.dtsi"
+#include "pumpkin-emmc-common.dtsi"
+
+/ {
+	model = "Pumpkin MT8167";
+
+	memory@40000000 {
+		device_type = "memory";
+		reg = <0 0x40000000 0 0x80000000>;
+	};
+};
+
+&dpi1 {
+	status = "okay";
+	ddc-i2c-bus = <&hdmiddc>;
+
+	port {
+		hdmi_connector_in: endpoint@0 {
+			remote-endpoint = <&hdmi_out>;
+		};
+	};
+};
+
+&hdmi_phy {
+	status = "okay";
+};
+
+&cec {
+	status = "okay";
+};
+
+&hdmi {
+	pinctrl-names = "default", "hdmi_hpd";
+	pinctrl-0 = <&hdmi_pins_default>;
+	pinctrl-1 = <&hdmi_pins_hpd>;
+	status = "okay";
+
+	ports {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		port@1 {
+			reg = <1>;
+
+			hdmi_out: endpoint {
+				remote-endpoint = <&hdmi_connector_in>;
+			};
+		};
+	};
+};
+
+&pio {
+	hdmi_pins_default: hdmi_pins_default {
+	};
+
+	hdmi_pins_hpd: hdmi_pins_hpd {
+		pins_cmd_dat {
+			pinmux = <MT8167_PIN_122_HTPLG__FUNC_HTPLG>;
+			bias-pull-down;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8167.dtsi b/arch/arm64/boot/dts/mediatek/mt8167.dtsi
new file mode 100644
index 0000000..5e68a36
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8167.dtsi
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+#include <dt-bindings/clock/mt8167-clk.h>
+#include <dt-bindings/power/mt8167-power.h>
+#include <dt-bindings/memory/mt8167-larb-port.h>
+
+#include "mt8167-pinfunc.h"
+
+#include "mt8516.dtsi"
+
+/ {
+	compatible = "mediatek,mt8167";
+
+	aliases {
+		dpi1 = &dpi1;
+		rdma1 = &rdma1;
+
+		ovl0 = &ovl0;
+		color0 = &color;
+		ccorr0 = &ccorr;
+		aal0 = &aal;
+		dither0 = &dither;
+		rdma0 = &rdma0;
+		dsi0 = &dsi;
+
+		pwm0 = &disp_pwm;
+
+		mdp_rdma0 = &mdp_rdma;
+		mdp_rsz0 = &mdp_rsz0;
+		mdp_rsz1 = &mdp_rsz1;
+		mdp_tdshp0 = &mdp_tdshp;
+		mdp_wdma0 = &mdp_wdma;
+		mdp_wrot0 = &mdp_wrot;
+	};
+
+	soc {
+		scpsys: scpsys@10006000 {
+			compatible = "mediatek,mt8516-scpsys", "mediatek,mt8167-scpsys", "syscon";
+			#power-domain-cells = <1>;
+			reg = <0 0x10006000 0 0x1000>;
+			interrupts =	<GIC_SPI 128 IRQ_TYPE_LEVEL_LOW>,
+					<GIC_SPI 129 IRQ_TYPE_LEVEL_LOW>,
+					<GIC_SPI 130 IRQ_TYPE_LEVEL_LOW>,
+					<GIC_SPI 131 IRQ_TYPE_LEVEL_LOW>;
+			infracfg = <&infracfg>;
+			clocks = <&topckgen CLK_TOP_RG_AXI_MFG>,
+				 <&topckgen CLK_TOP_RG_SLOW_MFG>,
+				 <&topckgen CLK_TOP_SMI_MM>,
+				 <&topckgen CLK_TOP_RG_VDEC>;
+			clock-names = "axi_mfg", "mfg", "mm", "vdec";
+			mediatek,pwrap-regmap = <&pwrap>;
+		};
+
+		mmsys: mmsys@14000000 {
+			compatible = "mediatek,mt8167-mmsys", "syscon";
+			reg = <0 0x14000000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		mmsys2: mmsys2@14000000 {
+			compatible = "mediatek,mt8167-mmsys2", "syscon";
+			reg = <0 0x14000000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		imgsys: imgsys@15000000 {
+			compatible = "mediatek,mt8167-imgsys", "syscon";
+			reg = <0 0x15000000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		vdecsys: syscon@16000000 {
+			compatible = "mediatek,mt8167-vdecsys", "syscon";
+			reg = <0 0x16000000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		mutex: mutex@14015000 {
+			compatible = "mediatek,mt8167-disp-mutex";
+			reg = <0 0x14015000 0 0x1000>;
+			interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MM>;
+		};
+
+		iommu: m4u@10203000 {
+			cell-index = <0>;
+			compatible = "mediatek,mt8167-m4u";
+			reg = <0 0x10203000 0 0x1000>;
+			mediatek,larbs = <&larb0 &larb1 &larb2>;
+			interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_LOW>;
+			#iommu-cells = <1>;
+		};
+
+		mfg: clark@13000000 {
+			compatible = "mediatek,mt8167-clark";
+			reg = <0 0x13000000 0 0x80000>, <0 0x13ffe000 0 0x1000>;
+			interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_LOW>;
+			interrupt-names = "RGX";
+			power-domains =  <&scpsys MT8167_POWER_DOMAIN_MFG>;
+			clocks = <&topckgen CLK_TOP_MFG_MM_SEL>,
+				 <&topckgen CLK_TOP_AXI_MFG_IN_SEL>,
+				 <&topckgen CLK_TOP_SLOW_MFG_SEL>,
+				 <&topckgen CLK_TOP_RG_SLOW_MFG>,
+				 <&topckgen CLK_TOP_RG_AXI_MFG>,
+				 <&topckgen CLK_TOP_MFG_MM>,
+				 <&topckgen CLK_TOP_CLK26M>,
+				 <&topckgen CLK_TOP_UNIVPLL_D24>,
+				 <&topckgen CLK_TOP_MAINPLL_D11>,
+				 <&topckgen CLK_TOP_CSW_MUX_MFG_SEL>;
+			clock-names = "mfg_mm_in_sel",
+				      "mfg_axi_in_sel",
+				      "mfg_slow_in_sel",
+				      "top_slow",
+				      "top_axi",
+				      "top_mm",
+				      "slow_clk26m",
+				      "bus_univpll_d24",
+				      "bus_mainpll_d11",
+				      "engine_csw_mux";
+			clock-frequency = <400000000>;
+		};
+
+		mfgcfg: mfgcfg@13ffe000 {
+			compatible = "mediatek,mt8167-mfgcfg", "syscon";
+			reg = <0 0x13ffe000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+
+		mfg_async: mfgsys-async {
+			compatible = "mediatek,mt8167-mfg-async";
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MFG_ASYNC>;
+		};
+
+		mfg_2d: mfgsys-2d {
+			compatible = "mediatek,mt8167-mfg-2d";
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MFG_2D>;
+		};
+
+		smi_common: smi@14017000 {
+			compatible = "mediatek,mt8167-smi-common";
+			reg = <0 0x14017000 0 0x1000>;
+			clocks = <&mmsys CLK_MM_SMI_COMMON>,
+				 <&mmsys CLK_MM_SMI_COMMON>;
+			clock-names = "apb", "smi";
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_DISP>;
+		};
+
+		dpi1: dpi1@14019000 {
+			compatible = "mediatek,mt8167-dpi", "hdmi-connector";
+			reg = <0 0x14019000 0 0x1000>;
+			interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MM>;
+			clocks = <&mmsys CLK_MM_DPI1_PXL>,
+				 <&mmsys CLK_MM_DPI1_ENGINE>,
+				 <&apmixedsys CLK_APMIXED_TVDPLL>,
+				 <&topckgen CLK_TOP_DPI1_MM_SEL>,
+				 <&topckgen CLK_TOP_TVDPLL_D2>,
+				 <&topckgen CLK_TOP_TVDPLL_D4>,
+				 <&topckgen CLK_TOP_TVDPLL_D8>,
+				 <&topckgen CLK_TOP_TVDPLL_D16>;
+			clock-names = "pixel", "engine", "pll", "dpi_sel",
+				      "tvd_d2", "tvd_d4", "tvd_d8", "tvd_d16";
+			status = "disabled";
+		};
+
+		hdmiddc: i2c@11011000 {
+			compatible = "mediatek,mt8167-hdmi-ddc",
+				     "mediatek,mt8173-hdmi-ddc";
+			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>;
+			reg = <0 0x11011000 0 0x10>;
+			clocks = <&topckgen CLK_TOP_26M_HDMI_SIFM>;
+			clock-names = "ddc-i2c";
+		};
+
+		hdmi_phy: hdmi-phy@10018300 {
+			compatible = "mediatek,mt8167-hdmi-phy";
+			reg = <0 0x10018300 0 0x20>;
+			clocks = <&apmixedsys CLK_APMIXED_HDMI_REF>;
+			clock-names = "pll_ref";
+			clock-output-names = "hdmtx_dig_cts";
+			mediatek,ibias = <0xa>;
+			mediatek,ibias_up = <0x1c>;
+			#clock-cells = <0>;
+			#phy-cells = <0>;
+			status = "disabled";
+		};
+
+		hdmi: hdmi@1401b000 {
+			compatible = "mediatek,mt8167-hdmi";
+			reg = <0 0x1401b000 0 0x1000>;
+			clocks = <&mmsys CLK_MM_HDMI_PXL>,
+				 <&mmsys CLK_MM_HDMI_PLL>,
+				 <&mmsys CLK_MM_HDMI_ADSP_BCK>,
+				 <&mmsys CLK_MM_HDMI_SPDIF>;
+			clock-names = "pixel", "pll", "bclk", "spdif";
+			pinctrl-names = "default";
+			pinctrl-0 = <&hdmi_pin>;
+			phys = <&hdmi_phy>;
+			phy-names = "hdmi";
+			mediatek,syscon-hdmi = <&mmsys 0x100>;
+			cec = <&cec>;
+			ddc-i2c-bus = <&hdmiddc>;
+			status = "disabled";
+		};
+
+		cec: cec@0x1001a000 {
+			compatible = "mediatek,mt8167-cec",
+				     "mediatek,mt8173-cec";
+			reg = <0 0x1001a000 0 0xbc>;
+			interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_26M_CEC>;
+			gpio-base = <&syscfg_pctl_a>;
+			status = "disabled";
+		};
+
+		larb0: larb@14016000 {
+			compatible = "mediatek,mt8167-smi-larb";
+			reg = <0 0x14016000 0 0x1000>;
+			mediatek,smi = <&smi_common>;
+			mediatek,larbid = <0>;
+			clocks = <&mmsys CLK_MM_SMI_LARB0>,
+				 <&mmsys CLK_MM_SMI_LARB0>;
+			clock-names = "apb", "smi";
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_DISP>;
+		};
+
+		larb1: larb@15001000 {
+			compatible = "mediatek,mt8167-smi-larb";
+			reg = <0 0x15001000 0 0x1000>;
+			mediatek,smi = <&smi_common>;
+			mediatek,larbid = <1>;
+			clocks = <&imgsys CLK_IMG_LARB1_SMI>,
+				 <&imgsys CLK_IMG_LARB1_SMI>;
+			clock-names = "apb", "smi";
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_ISP>;
+		};
+
+		larb2: larb@16010000 {
+			compatible = "mediatek,mt8167-smi-larb";
+			reg = <0 0x16010000 0 0x1000>;
+			mediatek,smi = <&smi_common>;
+			mediatek,larbid = <2>;
+			clocks = <&vdecsys CLK_VDEC_CKEN>,
+				 <&vdecsys CLK_VDEC_LARB1_CKEN>;
+			clock-names = "apb", "smi";
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_VDEC>;
+		};
+
+		rdma1: rdma1@1400a000 {
+			compatible = "mediatek,mt2701-disp-rdma";
+			reg = <0 0x1400a000 0 0x1000>;
+			interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MM>;
+			clocks = <&mmsys CLK_MM_DISP_RDMA1>;
+			iommus = <&iommu M4U_PORT_DISP_RDMA1>;
+			mediatek,larb = <&larb0>;
+		};
+
+		disp_pwm: disp_pwm@1100f000 {
+			compatible = "mediatek,mt8167-disp-pwm";
+			reg = <0 0x1100f000 0 0x1000>;
+			#pwm-cells = <2>;
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MM>;
+			clocks = <&topckgen CLK_TOP_PWM_SEL>,
+				 <&topckgen CLK_TOP_PWM_MM>,
+				  <&mmsys CLK_MM_DISP_PWM_26M>,
+				  <&mmsys CLK_MM_DISP_PWM_MM>;
+			clock-names = "pwm_sel",
+				      "pwm_mm",
+				      "main",
+				      "mm";
+			status = "disabled";
+		};
+
+		dsi: dsi@14012000 {
+			compatible = "mediatek,mt8167-dsi";
+			reg = <0 0x14012000 0 0x1000>;
+			interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MM>;
+			clocks = <&mmsys CLK_MM_DSI_ENGINE>,
+				 <&mmsys CLK_MM_DSI_DIGITAL>,
+				 <&topckgen CLK_TOP_MIPI_26M_DBG>,
+				 <&mipi_tx>;
+			clock-names = "engine", "digital", "mipi26mdbg", "hs";
+			phys = <&mipi_tx>;
+			phy-names = "dphy";
+			status = "disabled";
+		};
+
+		mipi_tx: mipi_dphy@14018000 {
+			compatible = "mediatek,mt8167-mipi-tx";
+			reg = <0 0x14018000 0 0x90>;
+			clocks = <&clk26m>;
+			clock-output-names = "mipi_tx0_pll";
+			#clock-cells = <0>;
+			#phy-cells = <0>;
+			status = "disabled";
+		};
+
+		ovl0: disp_ovl0@14007000 {
+			compatible = "mediatek,mt8167-disp-ovl";
+			reg = <0 0x14007000 0 0x1000>;
+			interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MM>;
+			clocks = <&mmsys CLK_MM_DISP_OVL0>;
+			iommus = <&iommu M4U_PORT_DISP_OVL0>;
+			mediatek,larb = <&larb0>;
+		};
+
+		rdma0: disp_rdma0@14009000 {
+			compatible = "mediatek,mt2701-disp-rdma";
+			reg = <0 0x14009000 0 0x1000>;
+			interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MM>;
+			clocks = <&mmsys CLK_MM_DISP_RDMA0>;
+			iommus = <&iommu M4U_PORT_DISP_RDMA0>;
+			mediatek,larb = <&larb0>;
+		};
+
+		color: disp_color@1400c000 {
+			compatible = "mediatek,mt8167-disp-color";
+			reg = <0 0x1400c000 0 0x1000>;
+			interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MM>;
+			clocks = <&mmsys CLK_MM_DISP_COLOR>;
+		};
+
+		ccorr: disp_ccorr@1400d000 {
+			compatible = "mediatek,mt8167-disp-ccorr";
+			reg = <0 0x1400d000 0 0x1000>;
+			interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MM>;
+			clocks = <&mmsys CLK_MM_DISP_CCORR>;
+		};
+
+		aal: disp_aal@1400e000 {
+			compatible = "mediatek,mt8167-disp-aal";
+			reg = <0 0x1400e000 0 0x1000>;
+			interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MM>;
+			clocks = <&mmsys CLK_MM_DISP_AAL>;
+		};
+
+		gamma: disp_gamma@1400f000 {
+			compatible = "mediatek,mt8167-disp-gamma";
+			reg = <0 0x1400f000 0 0x1000>;
+			interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MM>;
+			clocks = <&mmsys CLK_MM_DISP_GAMMA>;
+		};
+
+		dither: disp_dither@14010000 {
+			compatible = "mediatek,mt8167-disp-dither";
+			reg = <0 0x14010000 0 0x1000>;
+			interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MM>;
+			clocks = <&mmsys CLK_MM_DISP_DITHER>;
+		};
+
+		gce: gce@1020a000 {
+			compatible = "mediatek,mt8167-gce",
+				     "mediatek,mt8173-gce";
+			reg = <0 0x1020a000 0 0x900>;
+			interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_GCE>;
+			clock-names = "gce";
+
+			#mbox-cells = <3>;
+			#subsys-cells = <3>;
+		};
+
+		vcu: vcu@0 {
+			compatible = "mediatek,mt8167-vcu";
+			mediatek,vcuid = <0>;
+			mediatek,vcuname = "vpu";
+			reg = <0 0x16000000 0 0x40000>,	/* VDEC_BASE */
+			      <0 0x15009000 0 0x1000>,	/* VENC_BASE */
+			      <0 0x19002000 0 0x1000>;	/* VENC_LT_BASE */
+			iommus = <&iommu M4U_PORT_HW_VDEC_MC_EXT>;
+		};
+
+		mdp_vcu: vcu@1 {
+			compatible = "mediatek,mt8167-vcu";
+			mediatek,vcuid = <1>;
+			mediatek,vcuname = "vpu1";
+			iommus = <&iommu M4U_PORT_HW_VDEC_MC_EXT>;
+		};
+
+		mdp_rdma: rdma@14001000 {
+			compatible = "mediatek,mt8167-mdp-rdma",
+				     "mediatek,mt8167-mdp";
+			reg = <0 0x14001000 0 0x1000>;
+			mediatek,mdpid = <0>;
+			clocks = <&mmsys CLK_MM_MDP_RDMA>,
+				 <&mmsys CLK_MM_MDP_RDMA>;
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MM>;
+			iommus = <&iommu M4U_PORT_MDP_RDMA>;
+			mediatek,larb = <&larb0>;
+			mediatek,vpu = <&mdp_vcu>;
+			mediatek,gce = <&gce>;
+			mboxes = <&gce 0 0 1>;
+		};
+
+		mdp_rsz0: rsz@14002000 {
+			compatible = "mediatek,mt8167-mdp-rsz";
+			reg = <0 14002000 0 0x1000>;
+			clocks = <&mmsys CLK_MM_MDP_RSZ0>;
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MM>;
+		};
+
+		mdp_rsz1: rsz@14003000 {
+			compatible = "mediatek,mt8167-mdp-rsz";
+			reg = <0 14003000 0 0x1000>;
+			clocks = <&mmsys CLK_MM_MDP_RSZ1>;
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MM>;
+		};
+
+		mdp_wdma: wdma@14004000 {
+			compatible = "mediatek,mt8167-mdp-wdma";
+			reg = <0 14004000 0 0x1000>;
+			clocks = <&mmsys CLK_MM_MDP_WDMA>;
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MM>;
+			iommus = <&iommu M4U_PORT_MDP_WDMA>;
+			mediatek,larb = <&larb0>;
+		};
+
+		mdp_wrot: wrot@14005000 {
+			compatible = "mediatek,mt8167-mdp-wrot";
+			reg = <0 14005000 0 0x1000>;
+			clocks = <&mmsys CLK_MM_MDP_WROT>;
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MM>;
+			iommus = <&iommu M4U_PORT_MDP_WROT>;
+			mediatek,larb = <&larb0>;
+		};
+
+		mdp_tdshp: tdshp@14006000 {
+			compatible = "mediatek,mt8167-mdp-tdshp";
+			reg = <0 14006000 0 0x1000>;
+			clocks = <&mmsys CLK_MM_MDP_TDSHP>;
+			power-domains = <&scpsys MT8167_POWER_DOMAIN_MM>;
+		};
+	};
+};
+
+&topckgen {
+	compatible = "mediatek,mt8167-topckgen",
+		     "mediatek,mt8516-topckgen", "syscon";
+	reg = <0 0x10000000 0 0x1000>;
+	#clock-cells = <1>;
+};
+
+&infracfg {
+	compatible = "mediatek,mt8167-infracfg",
+		     "mediatek,mt8516-infracfg", "syscon";
+	reg = <0 0x10001000 0 0x1000>;
+	#clock-cells = <1>;
+};
+
+&apmixedsys {
+	compatible = "mediatek,mt8167-apmixedsys",
+		     "mediatel,mt8516-apmixedsys", "syscon";
+	reg = <0 0x10018000 0 0x710>;
+	#clock-cells = <1>;
+};
+
+&syscfg_pctl_a {
+	compatible = "mediatek,mt8167-pctl-a-syscfg",
+		     "mediatek,mt8516-pctl-a-syscfg", "syscon";
+	reg = <0 0x10005000 0 0x1000>;
+};
+
+&pio {
+	compatible = "mediatek,mt8167-pinctrl", "mediatek,mt8516-pinctrl";
+	reg = <0 0x1000b000 0 0x1000>;
+	mediatek,pctl-regmap = <&syscfg_pctl_a>;
+	pins-are-numbered;
+	gpio-controller;
+	#gpio-cells = <2>;
+	interrupt-controller;
+	#interrupt-cells = <2>;
+	interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
+
+	hdmi_pin: htplg {
+		pins_cmd_dat {
+			pinmux = <MT8167_PIN_122_HTPLG__FUNC_HTPLG>;
+			input-enable;
+			bias-pull-down;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
new file mode 100644
index 0000000..f120159
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
@@ -0,0 +1,636 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Ben Ho <ben.ho@mediatek.com>
+ *	   Erin Lo <erin.lo@mediatek.com>
+ */
+
+/dts-v1/;
+#include "mt8183.dtsi"
+#include "mt6358.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+	model = "MediaTek MT8183 evaluation board";
+	compatible = "mediatek,mt8183-evb", "mediatek,mt8183";
+
+	aliases {
+		serial0 = &uart0;
+	};
+
+	memory@40000000 {
+		device_type = "memory";
+		reg = <0 0x40000000 0 0x80000000>;
+	};
+
+	chosen {
+		stdout-path = "serial0:921600n8";
+	};
+
+	firmware {
+		optee: optee@4fd00000 {
+			compatible = "linaro,optee-tz";
+			method = "smc";
+		};
+	};
+
+	backlight_lcd0: backlight_lcd0 {
+		compatible = "pwm-backlight";
+		pwms = <&pwm0 0 500000>;
+		power-supply = <&bl_pp5000>;
+		brightness-levels = <0 1023>;
+		num-interpolated-steps = <1023>;
+		default-brightness-level = <576>;
+		status = "okay";
+	};
+
+	bl_pp5000: regulator@1 {
+		compatible = "regulator-fixed";
+		regulator-name = "bl_pp5000";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+
+	usb_p0_vbus: regulator@2 {
+		compatible = "regulator-fixed";
+		regulator-name = "vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&pio 42 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	sound {
+		compatible = "mediatek,mt8183-mt6358-sound";
+		mediatek,platform = <&afe>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&aud_pins>;
+		status = "okay";
+	};
+
+	vddio_touch_reg: vddio_touch_reg {
+		compatible = "regulator-fixed";
+		regulator-name = "vddio_touch";
+		gpio = <&pio 153 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	ntc@0 {
+		compatible = "murata,ncp03wf104";
+		pullup-uv = <1800000>;
+		pullup-ohm = <390000>;
+		pulldown-ohm = <0>;
+		io-channels = <&auxadc 0>;
+	};
+};
+
+&auxadc {
+	status = "okay";
+};
+
+&pio {
+	mmc0_pins_default: mmc0default {
+		pins_cmd_dat {
+			pinmux = <PINMUX_GPIO123__FUNC_MSDC0_DAT0>,
+				 <PINMUX_GPIO128__FUNC_MSDC0_DAT1>,
+				 <PINMUX_GPIO125__FUNC_MSDC0_DAT2>,
+				 <PINMUX_GPIO132__FUNC_MSDC0_DAT3>,
+				 <PINMUX_GPIO126__FUNC_MSDC0_DAT4>,
+				 <PINMUX_GPIO129__FUNC_MSDC0_DAT5>,
+				 <PINMUX_GPIO127__FUNC_MSDC0_DAT6>,
+				 <PINMUX_GPIO130__FUNC_MSDC0_DAT7>,
+				 <PINMUX_GPIO122__FUNC_MSDC0_CMD>;
+			input-enable;
+			bias-pull-up;
+		};
+
+		pins_clk {
+			pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
+			bias-pull-down;
+		};
+
+		pins_rst {
+			pinmux = <PINMUX_GPIO133__FUNC_MSDC0_RSTB>;
+			bias-pull-up;
+		};
+	};
+
+	mmc0_pins_uhs: mmc0@0{
+		pins_cmd_dat {
+			pinmux = <PINMUX_GPIO123__FUNC_MSDC0_DAT0>,
+				 <PINMUX_GPIO128__FUNC_MSDC0_DAT1>,
+				 <PINMUX_GPIO125__FUNC_MSDC0_DAT2>,
+				 <PINMUX_GPIO132__FUNC_MSDC0_DAT3>,
+				 <PINMUX_GPIO126__FUNC_MSDC0_DAT4>,
+				 <PINMUX_GPIO129__FUNC_MSDC0_DAT5>,
+				 <PINMUX_GPIO127__FUNC_MSDC0_DAT6>,
+				 <PINMUX_GPIO130__FUNC_MSDC0_DAT7>,
+				 <PINMUX_GPIO122__FUNC_MSDC0_CMD>;
+			input-enable;
+			drive-strength = <MTK_DRIVE_10mA>;
+			bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+		};
+
+		pins_clk {
+			pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
+			drive-strength = <MTK_DRIVE_10mA>;
+			bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+		};
+
+		pins_ds {
+			pinmux = <PINMUX_GPIO131__FUNC_MSDC0_DSL>;
+			drive-strength = <MTK_DRIVE_10mA>;
+			bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+		};
+
+		pins_rst {
+			pinmux = <PINMUX_GPIO133__FUNC_MSDC0_RSTB>;
+			drive-strength = <MTK_DRIVE_10mA>;
+			bias-pull-up;
+		};
+	};
+
+	spi_pins_0: spi0{
+		pins_spi{
+			pinmux = <PINMUX_GPIO85__FUNC_SPI0_MI>,
+				 <PINMUX_GPIO86__FUNC_SPI0_CSB>,
+				 <PINMUX_GPIO87__FUNC_SPI0_MO>,
+				 <PINMUX_GPIO88__FUNC_SPI0_CLK>;
+			bias-disable;
+		};
+	};
+
+	spi_pins_1: spi1{
+		pins_spi{
+			pinmux = <PINMUX_GPIO161__FUNC_SPI1_A_MI>,
+				 <PINMUX_GPIO162__FUNC_SPI1_A_CSB>,
+				 <PINMUX_GPIO163__FUNC_SPI1_A_MO>,
+				 <PINMUX_GPIO164__FUNC_SPI1_A_CLK>;
+			bias-disable;
+		};
+	};
+
+	spi_pins_2: spi2{
+		pins_spi{
+			pinmux = <PINMUX_GPIO0__FUNC_SPI2_CSB>,
+				 <PINMUX_GPIO1__FUNC_SPI2_MO>,
+				 <PINMUX_GPIO2__FUNC_SPI2_CLK>,
+				 <PINMUX_GPIO94__FUNC_SPI2_MI>;
+			bias-disable;
+		};
+	};
+
+	panel_pins_default: panel_pins_default {
+		panel_reset {
+			pinmux = <PINMUX_GPIO45__FUNC_GPIO45>;
+			output-low;
+			bias-pull-up;
+		};
+	};
+
+	pwm0_pin_default: pwm0_pin_default {
+		disp_pwm {
+			pinmux = <PINMUX_GPIO43__FUNC_DISP_PWM>;
+		};
+	};
+
+	pwm1_pin_default: pwm1_pin_default {
+		pwm {
+			pinmux = <PINMUX_GPIO90__FUNC_PWM_A>;
+		};
+	};
+
+	aud_pins: audiopins {
+		pins_bus {
+			pinmux = <PINMUX_GPIO136__FUNC_AUD_CLK_MOSI>,
+				<PINMUX_GPIO137__FUNC_AUD_SYNC_MOSI>,
+				<PINMUX_GPIO138__FUNC_AUD_DAT_MOSI0>,
+				<PINMUX_GPIO139__FUNC_AUD_DAT_MOSI1>,
+				<PINMUX_GPIO140__FUNC_AUD_CLK_MISO>,
+				<PINMUX_GPIO141__FUNC_AUD_SYNC_MISO>,
+				<PINMUX_GPIO142__FUNC_AUD_DAT_MISO0>,
+				<PINMUX_GPIO143__FUNC_AUD_DAT_MISO1>;
+		};
+	};
+
+	mmc1_pins_default: mmc1default {
+		pins_cmd_dat {
+			pinmux = <PINMUX_GPIO31__FUNC_MSDC1_CMD>,
+				   <PINMUX_GPIO32__FUNC_MSDC1_DAT0>,
+				   <PINMUX_GPIO34__FUNC_MSDC1_DAT1>,
+				   <PINMUX_GPIO33__FUNC_MSDC1_DAT2>,
+				   <PINMUX_GPIO30__FUNC_MSDC1_DAT3>;
+			input-enable;
+			bias-pull-up;
+		};
+
+		pins_clk {
+			pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
+			input-enable;
+			bias-pull-down;
+		};
+
+		pins_pmu {
+			pinmux = <PINMUX_GPIO178__FUNC_GPIO178>,
+				   <PINMUX_GPIO166__FUNC_GPIO166>;
+			output-high;
+		};
+	};
+
+	mmc1_pins_uhs: mmc1@0{
+		pins_cmd_dat {
+			pinmux = <PINMUX_GPIO31__FUNC_MSDC1_CMD>,
+				   <PINMUX_GPIO32__FUNC_MSDC1_DAT0>,
+				   <PINMUX_GPIO34__FUNC_MSDC1_DAT1>,
+				   <PINMUX_GPIO33__FUNC_MSDC1_DAT2>,
+				   <PINMUX_GPIO30__FUNC_MSDC1_DAT3>;
+			drive-strength = <MTK_DRIVE_6mA>;
+			input-enable;
+			bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+		};
+
+		pins_clk {
+			pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
+			drive-strength = <MTK_DRIVE_6mA>;
+			bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+			input-enable;
+		};
+	};
+
+	i2c0_pins: i2c0 {
+		pins_cmd_dat {
+			pinmux = <PINMUX_GPIO82__FUNC_SDA0>,
+				 <PINMUX_GPIO83__FUNC_SCL0>;
+			bias-pull-up;
+		};
+	};
+
+	i2c6_pins: i2c6 {
+		pins_cmd_dat {
+			pinmux = <PINMUX_GPIO113__FUNC_SCL6>,
+				 <PINMUX_GPIO114__FUNC_SDA6>;
+			bias-pull-up;
+		};
+	};
+
+	ctp_pins_default: ctp_pins_default {
+		pins_eint_ctp {
+			pinmux = <PINMUX_GPIO176__FUNC_GPIO176>;
+			input-enable;
+			bias-disable;
+		};
+
+		pins_rst_ctp {
+			pinmux = <PINMUX_GPIO177__FUNC_GPIO177>;
+			output-low;
+		};
+	};
+
+	ite_pins_default: ite_pins_default {
+		pins_rst_ite {
+			pinmux = <PINMUX_GPIO160__FUNC_GPIO160>;
+			output-high;
+		};
+	};
+
+	dpi_pin_func: dpi_pin_func {
+		pins_cmd_dat {
+			pinmux = <PINMUX_GPIO12__FUNC_I2S5_BCK>,
+				<PINMUX_GPIO46__FUNC_I2S5_LRCK>,
+				<PINMUX_GPIO47__FUNC_I2S5_DO>,
+				<PINMUX_GPIO13__FUNC_DBPI_D0>,
+				<PINMUX_GPIO14__FUNC_DBPI_D1>,
+				<PINMUX_GPIO15__FUNC_DBPI_D2>,
+				<PINMUX_GPIO16__FUNC_DBPI_D3>,
+				<PINMUX_GPIO17__FUNC_DBPI_D4>,
+				<PINMUX_GPIO18__FUNC_DBPI_D5>,
+				<PINMUX_GPIO19__FUNC_DBPI_D6>,
+				<PINMUX_GPIO20__FUNC_DBPI_D7>,
+				<PINMUX_GPIO21__FUNC_DBPI_D8>,
+				<PINMUX_GPIO22__FUNC_DBPI_D9>,
+				<PINMUX_GPIO23__FUNC_DBPI_D10>,
+				<PINMUX_GPIO24__FUNC_DBPI_D11>,
+				<PINMUX_GPIO25__FUNC_DBPI_HSYNC>,
+				<PINMUX_GPIO26__FUNC_DBPI_VSYNC>,
+				<PINMUX_GPIO27__FUNC_DBPI_DE>,
+				<PINMUX_GPIO28__FUNC_DBPI_CK>;
+		};
+	};
+
+	dpi_pin_gpio: dpi_pin_gpio {
+		pins_cmd_dat {
+			pinmux = <PINMUX_GPIO12__FUNC_GPIO12>,
+				<PINMUX_GPIO46__FUNC_GPIO46>,
+				<PINMUX_GPIO47__FUNC_GPIO47>,
+				<PINMUX_GPIO13__FUNC_GPIO13>,
+				<PINMUX_GPIO14__FUNC_GPIO14>,
+				<PINMUX_GPIO15__FUNC_GPIO15>,
+				<PINMUX_GPIO16__FUNC_GPIO16>,
+				<PINMUX_GPIO17__FUNC_GPIO17>,
+				<PINMUX_GPIO18__FUNC_GPIO18>,
+				<PINMUX_GPIO19__FUNC_GPIO19>,
+				<PINMUX_GPIO20__FUNC_GPIO20>,
+				<PINMUX_GPIO21__FUNC_GPIO21>,
+				<PINMUX_GPIO22__FUNC_GPIO22>,
+				<PINMUX_GPIO23__FUNC_GPIO23>,
+				<PINMUX_GPIO24__FUNC_GPIO24>,
+				<PINMUX_GPIO25__FUNC_GPIO25>,
+				<PINMUX_GPIO26__FUNC_GPIO26>,
+				<PINMUX_GPIO27__FUNC_GPIO27>,
+				<PINMUX_GPIO28__FUNC_GPIO28>;
+		};
+	};
+
+	usb_pins: usb_pins {
+		pins_usb {
+			pinmux = <PINMUX_GPIO42__FUNC_USB_DRVVBUS>;
+		};
+		pins_iddig {
+			pinmux = <PINMUX_GPIO41__FUNC_IDDIG>;
+			mediatek,pull-up-adv = <3>;
+		};
+	};
+};
+
+&spi0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spi_pins_0>;
+	mediatek,pad-select = <0>;
+	status = "okay";
+};
+
+&spi1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spi_pins_1>;
+	mediatek,pad-select = <0>;
+	status = "okay";
+};
+
+&spi2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spi_pins_2>;
+	mediatek,pad-select = <0>;
+	status = "okay";
+};
+
+&cci {
+	proc-supply = <&mt6358_vproc12_reg>;
+};
+
+&cpu0 {
+	proc-supply = <&mt6358_vproc12_reg>;
+};
+
+&cpu1 {
+	proc-supply = <&mt6358_vproc12_reg>;
+};
+
+&cpu2 {
+	proc-supply = <&mt6358_vproc12_reg>;
+};
+
+&cpu3 {
+	proc-supply = <&mt6358_vproc12_reg>;
+};
+
+&cpu4 {
+	proc-supply = <&mt6358_vproc11_reg>;
+};
+
+&cpu5 {
+	proc-supply = <&mt6358_vproc11_reg>;
+};
+
+&cpu6 {
+	proc-supply = <&mt6358_vproc11_reg>;
+};
+
+&cpu7 {
+	proc-supply = <&mt6358_vproc11_reg>;
+};
+
+&svs_cpu_little {
+	vcpu-little-supply = <&mt6358_vproc12_reg>;
+};
+
+&svs_cpu_big {
+	vcpu-big-supply = <&mt6358_vproc11_reg>;
+};
+
+&svs_cci {
+	vcci-supply = <&mt6358_vproc12_reg>;
+};
+
+&svs_gpu {
+	vgpu-spply = <&mt6358_vgpu_reg>;
+};
+
+&scpsys {
+	mfg-supply = <&mt6358_vgpu_reg>;
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&gpu {
+	supply-names = "mali","shadercores";
+	mali-supply = <&mt6358_vgpu_reg>;
+	shadercores-supply = <&mt6358_vsram_gpu_reg>;
+	operating-points-v2 = <&gpu_opp_table>;
+	power_model@0 {
+		compatible = "arm,mali-simple-power-model";
+		static-coefficient = <2427750>;
+		dynamic-coefficient = <4687>;
+		ts = <20000 2000 (-20) 2>;
+		thermal-zone = "cpu_thermal";
+	};
+	power_model@1 {
+		compatible = "arm,mali-g72-power-model";
+		scale = <15000>;
+	};
+};
+
+&mmc0 {
+	status = "okay";
+	pinctrl-names = "default", "state_uhs";
+	pinctrl-0 = <&mmc0_pins_default>;
+	pinctrl-1 = <&mmc0_pins_uhs>;
+	bus-width = <8>;
+	max-frequency = <200000000>;
+	cap-mmc-highspeed;
+	mmc-hs200-1_8v;
+	mmc-hs400-1_8v;
+	cap-mmc-hw-reset;
+	no-sdio;
+	no-sd;
+	hs400-ds-delay = <0x12814>;
+	vmmc-supply = <&mt6358_vemc_reg>;
+	vqmmc-supply = <&mt6358_vio18_reg>;
+	assigned-clocks = <&topckgen CLK_TOP_MUX_MSDC50_0>;
+	assigned-clock-parents = <&topckgen CLK_TOP_MSDCPLL_CK>;
+	non-removable;
+};
+
+&mipi_tx0 {
+	status = "okay";
+};
+
+&pwm0 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&pwm0_pin_default>;
+};
+
+&pwm1 {
+	status = "okay";
+	pinctrl-0 = <&pwm1_pin_default>;
+	pinctrl-names = "default";
+};
+
+&dpi0 {
+	dpi_dual_edge;
+	dpi_pin_mode_swap;
+	pinctrl-names = "gpiomode", "dpimode";
+	pinctrl-0 = <&dpi_pin_gpio>;
+	pinctrl-1 = <&dpi_pin_func>;
+	status = "okay";
+	port {
+		dpi_out: endpoint {
+			remote-endpoint = <&it66121_in>;
+		};
+	};
+};
+
+&dsi0 {
+	status = "okay";
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	panel: panel@0 {
+		compatible = "sharp,nt35532";
+		reg = <0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&panel_pins_default>;
+		backlight = <&backlight_lcd0>;
+		reset-gpios = <&pio 45 0>;
+		pwr-gpios = <&pio 158 0>;
+		pwr2-gpios = <&pio 159 0>;
+		status = "okay";
+		port {
+			panel_in: endpoint {
+				remote-endpoint = <&dsi_out>;
+			};
+		};
+	};
+
+	ports {
+		port {
+			dsi_out: endpoint {
+				remote-endpoint = <&panel_in>;
+			};
+		};
+	};
+};
+
+&ssusb {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb_pins>;
+	mediatek,force-vbus;
+	usb-role-switch;
+	maximum-speed = "high-speed";
+	dr_mode = "otg";
+	vusb33-supply = <&mt6358_vusb_reg>;
+	vbus-supply = <&usb_p0_vbus>;
+	status = "okay";
+	connector {
+		compatible = "gpio-usb-b-connector", "usb-b-connector";
+		type = "mini";
+		id-gpios = <&pio 41 GPIO_ACTIVE_HIGH>;
+		vbus-supply = <&usb_p0_vbus>;
+	};
+};
+
+&usb_host {
+	vusb33-supply = <&mt6358_vusb_reg>;
+	vbus-supply = <&usb_p0_vbus>;
+	status = "okay";
+};
+
+&mt6358codec {
+	Avdd-supply = <&mt6358_vaud28_reg>;
+};
+
+&mmc1 {
+	status = "okay";
+	pinctrl-names = "default", "state_uhs";
+	pinctrl-0 = <&mmc1_pins_default>;
+	pinctrl-1 = <&mmc1_pins_uhs>;
+	bus-width = <4>;
+	max-frequency = <200000000>;
+	cap-sd-highspeed;
+	sd-uhs-sdr50;
+	sd-uhs-sdr104;
+	cap-sdio-irq;
+	no-mmc;
+	no-sd;
+	vmmc-supply = <&mt6358_vmch_reg>;
+	vqmmc-supply = <&mt6358_vmc_reg>;
+	keep-power-in-suspend;
+	enable-sdio-wakeup;
+	non-removable;
+};
+
+&i2c0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c0_pins>;
+	status = "okay";
+	clock-frequency = <400000>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	gt5688@5d {
+		compatible = "goodix,gt5688";
+		reg = <0x5d>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&ctp_pins_default>;
+		interrupt-parent = <&pio>;
+		interrupts = <189 IRQ_TYPE_EDGE_FALLING>;
+		irq-gpios = <&pio 176 GPIO_ACTIVE_HIGH>;
+		reset-gpios = <&pio 177 GPIO_ACTIVE_HIGH>;
+		AVDD28-supply = <&mt6358_vldo28_reg>;
+		VDDIO-supply = <&vddio_touch_reg>;
+	};
+};
+
+&i2c6 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c6_pins>;
+	status = "okay";
+	clock-frequency = <400000>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	it66121hdmitx: it66121hdmitx@4c {
+		compatible = "ite,it66121";
+		pinctrl-names = "default";
+		pinctrl-0 = <&ite_pins_default>;
+		vcn33-supply = <&mt6358_vcn33_wifi_reg>;
+		vcn18-supply = <&mt6358_vcn18_reg>;
+		vrf12-supply = <&mt6358_vrf12_reg>;
+		reset-gpios = <&pio 160 GPIO_ACTIVE_LOW>;
+		interrupt-parent = <&pio>;
+		interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+		reg = <0x4c>;
+		pclk-dual-edge;
+
+		port {
+			it66121_in: endpoint {
+				remote-endpoint = <&dpi_out>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-pinfunc.h b/arch/arm64/boot/dts/mediatek/mt8183-pinfunc.h
new file mode 100644
index 0000000..6221cd7
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-pinfunc.h
@@ -0,0 +1,1120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ * Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
+ *
+ */
+
+#ifndef __MT8183_PINFUNC_H
+#define __MT8183_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_MRG_SYNC (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_PCM0_SYNC (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_TP_GPIO0_AO (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_SRCLKENAI0 (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_SCP_SPI2_CS (MTK_PIN_NO(0) | 5)
+#define PINMUX_GPIO0__FUNC_I2S3_MCK (MTK_PIN_NO(0) | 6)
+#define PINMUX_GPIO0__FUNC_SPI2_CSB (MTK_PIN_NO(0) | 7)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_MRG_CLK (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_PCM0_CLK (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_TP_GPIO1_AO (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_CLKM3 (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_SCP_SPI2_MO (MTK_PIN_NO(1) | 5)
+#define PINMUX_GPIO1__FUNC_I2S3_BCK (MTK_PIN_NO(1) | 6)
+#define PINMUX_GPIO1__FUNC_SPI2_MO (MTK_PIN_NO(1) | 7)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_MRG_DO (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_PCM0_DO (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_TP_GPIO2_AO (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_SCL6 (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_SCP_SPI2_CK (MTK_PIN_NO(2) | 5)
+#define PINMUX_GPIO2__FUNC_I2S3_LRCK (MTK_PIN_NO(2) | 6)
+#define PINMUX_GPIO2__FUNC_SPI2_CLK (MTK_PIN_NO(2) | 7)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_MRG_DI (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_PCM0_DI (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_TP_GPIO3_AO (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_SDA6 (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_TDM_MCK (MTK_PIN_NO(3) | 5)
+#define PINMUX_GPIO3__FUNC_I2S3_DO (MTK_PIN_NO(3) | 6)
+#define PINMUX_GPIO3__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(3) | 7)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_PWM_B (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I2S0_MCK (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_MD_URXD1 (MTK_PIN_NO(4) | 4)
+#define PINMUX_GPIO4__FUNC_TDM_BCK (MTK_PIN_NO(4) | 5)
+#define PINMUX_GPIO4__FUNC_TP_GPIO4_AO (MTK_PIN_NO(4) | 6)
+#define PINMUX_GPIO4__FUNC_DAP_MD32_SWD (MTK_PIN_NO(4) | 7)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_PWM_C (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_I2S0_BCK (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_SSPM_URXD_AO (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_MD_UTXD1 (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_TDM_LRCK (MTK_PIN_NO(5) | 5)
+#define PINMUX_GPIO5__FUNC_TP_GPIO5_AO (MTK_PIN_NO(5) | 6)
+#define PINMUX_GPIO5__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(5) | 7)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_PWM_A (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_I2S0_LRCK (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_IDDIG (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_MD_URXD0 (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_TDM_DATA0 (MTK_PIN_NO(6) | 5)
+#define PINMUX_GPIO6__FUNC_TP_GPIO6_AO (MTK_PIN_NO(6) | 6)
+#define PINMUX_GPIO6__FUNC_CMFLASH (MTK_PIN_NO(6) | 7)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_SPI1_B_MI (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_I2S0_DI (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_USB_DRVVBUS (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_MD_UTXD0 (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_TDM_DATA1 (MTK_PIN_NO(7) | 5)
+#define PINMUX_GPIO7__FUNC_TP_GPIO7_AO (MTK_PIN_NO(7) | 6)
+#define PINMUX_GPIO7__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(7) | 7)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_SPI1_B_CSB (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_ANT_SEL3 (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_SCL7 (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_TDM_DATA2 (MTK_PIN_NO(8) | 5)
+#define PINMUX_GPIO8__FUNC_MD_INT0 (MTK_PIN_NO(8) | 6)
+#define PINMUX_GPIO8__FUNC_JTRSTN_SEL1 (MTK_PIN_NO(8) | 7)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_SPI1_B_MO (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_ANT_SEL4 (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_CMMCLK2 (MTK_PIN_NO(9) | 3)
+#define PINMUX_GPIO9__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(9) | 5)
+#define PINMUX_GPIO9__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(9) | 6)
+#define PINMUX_GPIO9__FUNC_DBG_MON_B10 (MTK_PIN_NO(9) | 7)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_SPI1_B_CLK (MTK_PIN_NO(10) | 1)
+#define PINMUX_GPIO10__FUNC_ANT_SEL5 (MTK_PIN_NO(10) | 2)
+#define PINMUX_GPIO10__FUNC_CMMCLK3 (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(10) | 4)
+#define PINMUX_GPIO10__FUNC_TDM_DATA3 (MTK_PIN_NO(10) | 5)
+#define PINMUX_GPIO10__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(10) | 6)
+#define PINMUX_GPIO10__FUNC_DBG_MON_B11 (MTK_PIN_NO(10) | 7)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_TP_URXD1_AO (MTK_PIN_NO(11) | 1)
+#define PINMUX_GPIO11__FUNC_IDDIG (MTK_PIN_NO(11) | 2)
+#define PINMUX_GPIO11__FUNC_SCL6 (MTK_PIN_NO(11) | 3)
+#define PINMUX_GPIO11__FUNC_UCTS1 (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_UCTS0 (MTK_PIN_NO(11) | 5)
+#define PINMUX_GPIO11__FUNC_SRCLKENAI1 (MTK_PIN_NO(11) | 6)
+#define PINMUX_GPIO11__FUNC_I2S5_MCK (MTK_PIN_NO(11) | 7)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_TP_UTXD1_AO (MTK_PIN_NO(12) | 1)
+#define PINMUX_GPIO12__FUNC_USB_DRVVBUS (MTK_PIN_NO(12) | 2)
+#define PINMUX_GPIO12__FUNC_SDA6 (MTK_PIN_NO(12) | 3)
+#define PINMUX_GPIO12__FUNC_URTS1 (MTK_PIN_NO(12) | 4)
+#define PINMUX_GPIO12__FUNC_URTS0 (MTK_PIN_NO(12) | 5)
+#define PINMUX_GPIO12__FUNC_I2S2_DI2 (MTK_PIN_NO(12) | 6)
+#define PINMUX_GPIO12__FUNC_I2S5_BCK (MTK_PIN_NO(12) | 7)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_DBPI_D0 (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_SPI5_MI (MTK_PIN_NO(13) | 2)
+#define PINMUX_GPIO13__FUNC_PCM0_SYNC (MTK_PIN_NO(13) | 3)
+#define PINMUX_GPIO13__FUNC_MD_URXD0 (MTK_PIN_NO(13) | 4)
+#define PINMUX_GPIO13__FUNC_ANT_SEL3 (MTK_PIN_NO(13) | 5)
+#define PINMUX_GPIO13__FUNC_I2S0_MCK (MTK_PIN_NO(13) | 6)
+#define PINMUX_GPIO13__FUNC_DBG_MON_B15 (MTK_PIN_NO(13) | 7)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_DBPI_D1 (MTK_PIN_NO(14) | 1)
+#define PINMUX_GPIO14__FUNC_SPI5_CSB (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_PCM0_CLK (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_MD_UTXD0 (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_ANT_SEL4 (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_I2S0_BCK (MTK_PIN_NO(14) | 6)
+#define PINMUX_GPIO14__FUNC_DBG_MON_B16 (MTK_PIN_NO(14) | 7)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_DBPI_D2 (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_SPI5_MO (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_PCM0_DO (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_MD_URXD1 (MTK_PIN_NO(15) | 4)
+#define PINMUX_GPIO15__FUNC_ANT_SEL5 (MTK_PIN_NO(15) | 5)
+#define PINMUX_GPIO15__FUNC_I2S0_LRCK (MTK_PIN_NO(15) | 6)
+#define PINMUX_GPIO15__FUNC_DBG_MON_B17 (MTK_PIN_NO(15) | 7)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_DBPI_D3 (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_SPI5_CLK (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_PCM0_DI (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_MD_UTXD1 (MTK_PIN_NO(16) | 4)
+#define PINMUX_GPIO16__FUNC_ANT_SEL6 (MTK_PIN_NO(16) | 5)
+#define PINMUX_GPIO16__FUNC_I2S0_DI (MTK_PIN_NO(16) | 6)
+#define PINMUX_GPIO16__FUNC_DBG_MON_B23 (MTK_PIN_NO(16) | 7)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_DBPI_D4 (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_SPI4_MI (MTK_PIN_NO(17) | 2)
+#define PINMUX_GPIO17__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_MD_INT0 (MTK_PIN_NO(17) | 4)
+#define PINMUX_GPIO17__FUNC_ANT_SEL7 (MTK_PIN_NO(17) | 5)
+#define PINMUX_GPIO17__FUNC_I2S3_MCK (MTK_PIN_NO(17) | 6)
+#define PINMUX_GPIO17__FUNC_DBG_MON_A1 (MTK_PIN_NO(17) | 7)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_DBPI_D5 (MTK_PIN_NO(18) | 1)
+#define PINMUX_GPIO18__FUNC_SPI4_CSB (MTK_PIN_NO(18) | 2)
+#define PINMUX_GPIO18__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(18) | 3)
+#define PINMUX_GPIO18__FUNC_MD_INT0 (MTK_PIN_NO(18) | 4)
+#define PINMUX_GPIO18__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(18) | 5)
+#define PINMUX_GPIO18__FUNC_I2S3_BCK (MTK_PIN_NO(18) | 6)
+#define PINMUX_GPIO18__FUNC_DBG_MON_A2 (MTK_PIN_NO(18) | 7)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_DBPI_D6 (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_SPI4_MO (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_CONN_MCU_TDO (MTK_PIN_NO(19) | 3)
+#define PINMUX_GPIO19__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(19) | 4)
+#define PINMUX_GPIO19__FUNC_URXD1 (MTK_PIN_NO(19) | 5)
+#define PINMUX_GPIO19__FUNC_I2S3_LRCK (MTK_PIN_NO(19) | 6)
+#define PINMUX_GPIO19__FUNC_DBG_MON_A3 (MTK_PIN_NO(19) | 7)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_DBPI_D7 (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_SPI4_CLK (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(20) | 3)
+#define PINMUX_GPIO20__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(20) | 4)
+#define PINMUX_GPIO20__FUNC_UTXD1 (MTK_PIN_NO(20) | 5)
+#define PINMUX_GPIO20__FUNC_I2S3_DO (MTK_PIN_NO(20) | 6)
+#define PINMUX_GPIO20__FUNC_DBG_MON_A19 (MTK_PIN_NO(20) | 7)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_DBPI_D8 (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_SPI3_MI (MTK_PIN_NO(21) | 2)
+#define PINMUX_GPIO21__FUNC_CONN_MCU_TMS (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_DAP_MD32_SWD (MTK_PIN_NO(21) | 4)
+#define PINMUX_GPIO21__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(21) | 5)
+#define PINMUX_GPIO21__FUNC_I2S2_MCK (MTK_PIN_NO(21) | 6)
+#define PINMUX_GPIO21__FUNC_DBG_MON_B5 (MTK_PIN_NO(21) | 7)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_DBPI_D9 (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_SPI3_CSB (MTK_PIN_NO(22) | 2)
+#define PINMUX_GPIO22__FUNC_CONN_MCU_TCK (MTK_PIN_NO(22) | 3)
+#define PINMUX_GPIO22__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(22) | 4)
+#define PINMUX_GPIO22__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(22) | 5)
+#define PINMUX_GPIO22__FUNC_I2S2_BCK (MTK_PIN_NO(22) | 6)
+#define PINMUX_GPIO22__FUNC_DBG_MON_B6 (MTK_PIN_NO(22) | 7)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_DBPI_D10 (MTK_PIN_NO(23) | 1)
+#define PINMUX_GPIO23__FUNC_SPI3_MO (MTK_PIN_NO(23) | 2)
+#define PINMUX_GPIO23__FUNC_CONN_MCU_TDI (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_UCTS1 (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(23) | 5)
+#define PINMUX_GPIO23__FUNC_I2S2_LRCK (MTK_PIN_NO(23) | 6)
+#define PINMUX_GPIO23__FUNC_DBG_MON_B7 (MTK_PIN_NO(23) | 7)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_DBPI_D11 (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_SPI3_CLK (MTK_PIN_NO(24) | 2)
+#define PINMUX_GPIO24__FUNC_SRCLKENAI0 (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_URTS1 (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_IO_JTAG_TCK (MTK_PIN_NO(24) | 5)
+#define PINMUX_GPIO24__FUNC_I2S2_DI (MTK_PIN_NO(24) | 6)
+#define PINMUX_GPIO24__FUNC_DBG_MON_B31 (MTK_PIN_NO(24) | 7)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_DBPI_HSYNC (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_ANT_SEL0 (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_SCL6 (MTK_PIN_NO(25) | 3)
+#define PINMUX_GPIO25__FUNC_KPCOL2 (MTK_PIN_NO(25) | 4)
+#define PINMUX_GPIO25__FUNC_IO_JTAG_TMS (MTK_PIN_NO(25) | 5)
+#define PINMUX_GPIO25__FUNC_I2S1_MCK (MTK_PIN_NO(25) | 6)
+#define PINMUX_GPIO25__FUNC_DBG_MON_B0 (MTK_PIN_NO(25) | 7)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_DBPI_VSYNC (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_ANT_SEL1 (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_SDA6 (MTK_PIN_NO(26) | 3)
+#define PINMUX_GPIO26__FUNC_KPROW2 (MTK_PIN_NO(26) | 4)
+#define PINMUX_GPIO26__FUNC_IO_JTAG_TDI (MTK_PIN_NO(26) | 5)
+#define PINMUX_GPIO26__FUNC_I2S1_BCK (MTK_PIN_NO(26) | 6)
+#define PINMUX_GPIO26__FUNC_DBG_MON_B1 (MTK_PIN_NO(26) | 7)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_DBPI_DE (MTK_PIN_NO(27) | 1)
+#define PINMUX_GPIO27__FUNC_ANT_SEL2 (MTK_PIN_NO(27) | 2)
+#define PINMUX_GPIO27__FUNC_SCL7 (MTK_PIN_NO(27) | 3)
+#define PINMUX_GPIO27__FUNC_DMIC_CLK (MTK_PIN_NO(27) | 4)
+#define PINMUX_GPIO27__FUNC_IO_JTAG_TDO (MTK_PIN_NO(27) | 5)
+#define PINMUX_GPIO27__FUNC_I2S1_LRCK (MTK_PIN_NO(27) | 6)
+#define PINMUX_GPIO27__FUNC_DBG_MON_B9 (MTK_PIN_NO(27) | 7)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_DBPI_CK (MTK_PIN_NO(28) | 1)
+#define PINMUX_GPIO28__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(28) | 2)
+#define PINMUX_GPIO28__FUNC_SDA7 (MTK_PIN_NO(28) | 3)
+#define PINMUX_GPIO28__FUNC_DMIC_DAT (MTK_PIN_NO(28) | 4)
+#define PINMUX_GPIO28__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(28) | 5)
+#define PINMUX_GPIO28__FUNC_I2S1_DO (MTK_PIN_NO(28) | 6)
+#define PINMUX_GPIO28__FUNC_DBG_MON_B32 (MTK_PIN_NO(28) | 7)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_MSDC1_CLK (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_IO_JTAG_TCK (MTK_PIN_NO(29) | 2)
+#define PINMUX_GPIO29__FUNC_UDI_TCK (MTK_PIN_NO(29) | 3)
+#define PINMUX_GPIO29__FUNC_CONN_DSP_JCK (MTK_PIN_NO(29) | 4)
+#define PINMUX_GPIO29__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(29) | 5)
+#define PINMUX_GPIO29__FUNC_PCM1_CLK (MTK_PIN_NO(29) | 6)
+#define PINMUX_GPIO29__FUNC_DBG_MON_A6 (MTK_PIN_NO(29) | 7)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_MSDC1_DAT3 (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_DAP_MD32_SWD (MTK_PIN_NO(30) | 2)
+#define PINMUX_GPIO30__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(30) | 3)
+#define PINMUX_GPIO30__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(30) | 4)
+#define PINMUX_GPIO30__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(30) | 5)
+#define PINMUX_GPIO30__FUNC_PCM1_DI (MTK_PIN_NO(30) | 6)
+#define PINMUX_GPIO30__FUNC_DBG_MON_A7 (MTK_PIN_NO(30) | 7)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_MSDC1_CMD (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_IO_JTAG_TMS (MTK_PIN_NO(31) | 2)
+#define PINMUX_GPIO31__FUNC_UDI_TMS (MTK_PIN_NO(31) | 3)
+#define PINMUX_GPIO31__FUNC_CONN_DSP_JMS (MTK_PIN_NO(31) | 4)
+#define PINMUX_GPIO31__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(31) | 5)
+#define PINMUX_GPIO31__FUNC_PCM1_SYNC (MTK_PIN_NO(31) | 6)
+#define PINMUX_GPIO31__FUNC_DBG_MON_A8 (MTK_PIN_NO(31) | 7)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_MSDC1_DAT0 (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_IO_JTAG_TDI (MTK_PIN_NO(32) | 2)
+#define PINMUX_GPIO32__FUNC_UDI_TDI (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_CONN_DSP_JDI (MTK_PIN_NO(32) | 4)
+#define PINMUX_GPIO32__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(32) | 5)
+#define PINMUX_GPIO32__FUNC_PCM1_DO0 (MTK_PIN_NO(32) | 6)
+#define PINMUX_GPIO32__FUNC_DBG_MON_A9 (MTK_PIN_NO(32) | 7)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_MSDC1_DAT2 (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_UDI_NTRST (MTK_PIN_NO(33) | 3)
+#define PINMUX_GPIO33__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(33) | 4)
+#define PINMUX_GPIO33__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(33) | 5)
+#define PINMUX_GPIO33__FUNC_PCM1_DO2 (MTK_PIN_NO(33) | 6)
+#define PINMUX_GPIO33__FUNC_DBG_MON_A10 (MTK_PIN_NO(33) | 7)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_MSDC1_DAT1 (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_IO_JTAG_TDO (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_UDI_TDO (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_CONN_DSP_JDO (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(34) | 5)
+#define PINMUX_GPIO34__FUNC_PCM1_DO1 (MTK_PIN_NO(34) | 6)
+#define PINMUX_GPIO34__FUNC_DBG_MON_A11 (MTK_PIN_NO(34) | 7)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_CCU_JTAG_TDO (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(35) | 5)
+#define PINMUX_GPIO35__FUNC_CONN_DSP_JMS (MTK_PIN_NO(35) | 6)
+#define PINMUX_GPIO35__FUNC_DBG_MON_A28 (MTK_PIN_NO(35) | 7)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_CCU_JTAG_TMS (MTK_PIN_NO(36) | 2)
+#define PINMUX_GPIO36__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(36) | 3)
+#define PINMUX_GPIO36__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(36) | 4)
+#define PINMUX_GPIO36__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(36) | 5)
+#define PINMUX_GPIO36__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(36) | 6)
+#define PINMUX_GPIO36__FUNC_DBG_MON_A29 (MTK_PIN_NO(36) | 7)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_CCU_JTAG_TDI (MTK_PIN_NO(37) | 2)
+#define PINMUX_GPIO37__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(37) | 3)
+#define PINMUX_GPIO37__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(37) | 5)
+#define PINMUX_GPIO37__FUNC_CONN_DSP_JDO (MTK_PIN_NO(37) | 6)
+#define PINMUX_GPIO37__FUNC_DBG_MON_A30 (MTK_PIN_NO(37) | 7)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(38) | 1)
+#define PINMUX_GPIO38__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(38) | 3)
+#define PINMUX_GPIO38__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(38) | 4)
+#define PINMUX_GPIO38__FUNC_DBG_MON_A20 (MTK_PIN_NO(38) | 7)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_CCU_JTAG_TCK (MTK_PIN_NO(39) | 2)
+#define PINMUX_GPIO39__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(39) | 3)
+#define PINMUX_GPIO39__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(39) | 5)
+#define PINMUX_GPIO39__FUNC_CONN_DSP_JCK (MTK_PIN_NO(39) | 6)
+#define PINMUX_GPIO39__FUNC_DBG_MON_A31 (MTK_PIN_NO(39) | 7)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_CCU_JTAG_TRST (MTK_PIN_NO(40) | 2)
+#define PINMUX_GPIO40__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(40) | 3)
+#define PINMUX_GPIO40__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(40) | 5)
+#define PINMUX_GPIO40__FUNC_CONN_DSP_JDI (MTK_PIN_NO(40) | 6)
+#define PINMUX_GPIO40__FUNC_DBG_MON_A32 (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_IDDIG (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_URXD1 (MTK_PIN_NO(41) | 2)
+#define PINMUX_GPIO41__FUNC_UCTS0 (MTK_PIN_NO(41) | 3)
+#define PINMUX_GPIO41__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(41) | 4)
+#define PINMUX_GPIO41__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(41) | 5)
+#define PINMUX_GPIO41__FUNC_DMIC_CLK (MTK_PIN_NO(41) | 6)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_USB_DRVVBUS (MTK_PIN_NO(42) | 1)
+#define PINMUX_GPIO42__FUNC_UTXD1 (MTK_PIN_NO(42) | 2)
+#define PINMUX_GPIO42__FUNC_URTS0 (MTK_PIN_NO(42) | 3)
+#define PINMUX_GPIO42__FUNC_SSPM_URXD_AO (MTK_PIN_NO(42) | 4)
+#define PINMUX_GPIO42__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(42) | 5)
+#define PINMUX_GPIO42__FUNC_DMIC_DAT (MTK_PIN_NO(42) | 6)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_DISP_PWM (MTK_PIN_NO(43) | 1)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_DSI_TE (MTK_PIN_NO(44) | 1)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_LCM_RST (MTK_PIN_NO(45) | 1)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_URXD1 (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_UCTS1 (MTK_PIN_NO(46) | 3)
+#define PINMUX_GPIO46__FUNC_CCU_UTXD_AO (MTK_PIN_NO(46) | 4)
+#define PINMUX_GPIO46__FUNC_TP_UCTS1_AO (MTK_PIN_NO(46) | 5)
+#define PINMUX_GPIO46__FUNC_IDDIG (MTK_PIN_NO(46) | 6)
+#define PINMUX_GPIO46__FUNC_I2S5_LRCK (MTK_PIN_NO(46) | 7)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_UTXD1 (MTK_PIN_NO(47) | 2)
+#define PINMUX_GPIO47__FUNC_URTS1 (MTK_PIN_NO(47) | 3)
+#define PINMUX_GPIO47__FUNC_CCU_URXD_AO (MTK_PIN_NO(47) | 4)
+#define PINMUX_GPIO47__FUNC_TP_URTS1_AO (MTK_PIN_NO(47) | 5)
+#define PINMUX_GPIO47__FUNC_USB_DRVVBUS (MTK_PIN_NO(47) | 6)
+#define PINMUX_GPIO47__FUNC_I2S5_DO (MTK_PIN_NO(47) | 7)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_SCL5 (MTK_PIN_NO(48) | 1)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_SDA5 (MTK_PIN_NO(49) | 1)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_SCL3 (MTK_PIN_NO(50) | 1)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_SDA3 (MTK_PIN_NO(51) | 1)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_BPI_ANT2 (MTK_PIN_NO(52) | 1)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_BPI_ANT0 (MTK_PIN_NO(53) | 1)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_BPI_OLAT1 (MTK_PIN_NO(54) | 1)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_BPI_BUS8 (MTK_PIN_NO(55) | 1)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_BPI_BUS9 (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_SCL_6306 (MTK_PIN_NO(56) | 2)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_BPI_BUS10 (MTK_PIN_NO(57) | 1)
+#define PINMUX_GPIO57__FUNC_SDA_6306 (MTK_PIN_NO(57) | 2)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_RFIC0_BSI_D2 (MTK_PIN_NO(58) | 1)
+#define PINMUX_GPIO58__FUNC_SPM_BSI_D2 (MTK_PIN_NO(58) | 2)
+#define PINMUX_GPIO58__FUNC_PWM_B (MTK_PIN_NO(58) | 3)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_RFIC0_BSI_D1 (MTK_PIN_NO(59) | 1)
+#define PINMUX_GPIO59__FUNC_SPM_BSI_D1 (MTK_PIN_NO(59) | 2)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_RFIC0_BSI_D0 (MTK_PIN_NO(60) | 1)
+#define PINMUX_GPIO60__FUNC_SPM_BSI_D0 (MTK_PIN_NO(60) | 2)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_MIPI1_SDATA (MTK_PIN_NO(61) | 1)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_MIPI1_SCLK (MTK_PIN_NO(62) | 1)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_MIPI0_SDATA (MTK_PIN_NO(63) | 1)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_MIPI0_SCLK (MTK_PIN_NO(64) | 1)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_MIPI3_SDATA (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_BPI_OLAT2 (MTK_PIN_NO(65) | 2)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_MIPI3_SCLK (MTK_PIN_NO(66) | 1)
+#define PINMUX_GPIO66__FUNC_BPI_OLAT3 (MTK_PIN_NO(66) | 2)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_MIPI2_SDATA (MTK_PIN_NO(67) | 1)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_MIPI2_SCLK (MTK_PIN_NO(68) | 1)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_BPI_BUS7 (MTK_PIN_NO(69) | 1)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_BPI_BUS6 (MTK_PIN_NO(70) | 1)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_BPI_BUS5 (MTK_PIN_NO(71) | 1)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_BPI_BUS4 (MTK_PIN_NO(72) | 1)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_BPI_BUS3 (MTK_PIN_NO(73) | 1)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_BPI_BUS2 (MTK_PIN_NO(74) | 1)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_BPI_BUS1 (MTK_PIN_NO(75) | 1)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_BPI_BUS0 (MTK_PIN_NO(76) | 1)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_BPI_ANT1 (MTK_PIN_NO(77) | 1)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_BPI_OLAT0 (MTK_PIN_NO(78) | 1)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_BPI_PA_VM1 (MTK_PIN_NO(79) | 1)
+#define PINMUX_GPIO79__FUNC_MIPI4_SDATA (MTK_PIN_NO(79) | 2)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_BPI_PA_VM0 (MTK_PIN_NO(80) | 1)
+#define PINMUX_GPIO80__FUNC_MIPI4_SCLK (MTK_PIN_NO(80) | 2)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_SDA1 (MTK_PIN_NO(81) | 1)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_SDA0 (MTK_PIN_NO(82) | 1)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_SCL0 (MTK_PIN_NO(83) | 1)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_SCL1 (MTK_PIN_NO(84) | 1)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_SPI0_MI (MTK_PIN_NO(85) | 1)
+#define PINMUX_GPIO85__FUNC_SCP_SPI0_MI (MTK_PIN_NO(85) | 2)
+#define PINMUX_GPIO85__FUNC_CLKM3 (MTK_PIN_NO(85) | 3)
+#define PINMUX_GPIO85__FUNC_I2S1_BCK (MTK_PIN_NO(85) | 4)
+#define PINMUX_GPIO85__FUNC_MFG_DFD_JTAG_TDO (MTK_PIN_NO(85) | 5)
+#define PINMUX_GPIO85__FUNC_DFD_TDO (MTK_PIN_NO(85) | 6)
+#define PINMUX_GPIO85__FUNC_JTDO_SEL1 (MTK_PIN_NO(85) | 7)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_SPI0_CSB (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_SCP_SPI0_CS (MTK_PIN_NO(86) | 2)
+#define PINMUX_GPIO86__FUNC_CLKM0 (MTK_PIN_NO(86) | 3)
+#define PINMUX_GPIO86__FUNC_I2S1_LRCK (MTK_PIN_NO(86) | 4)
+#define PINMUX_GPIO86__FUNC_MFG_DFD_JTAG_TMS (MTK_PIN_NO(86) | 5)
+#define PINMUX_GPIO86__FUNC_DFD_TMS (MTK_PIN_NO(86) | 6)
+#define PINMUX_GPIO86__FUNC_JTMS_SEL1 (MTK_PIN_NO(86) | 7)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_SPI0_MO (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_SCP_SPI0_MO (MTK_PIN_NO(87) | 2)
+#define PINMUX_GPIO87__FUNC_SDA1 (MTK_PIN_NO(87) | 3)
+#define PINMUX_GPIO87__FUNC_I2S1_DO (MTK_PIN_NO(87) | 4)
+#define PINMUX_GPIO87__FUNC_MFG_DFD_JTAG_TDI (MTK_PIN_NO(87) | 5)
+#define PINMUX_GPIO87__FUNC_DFD_TDI (MTK_PIN_NO(87) | 6)
+#define PINMUX_GPIO87__FUNC_JTDI_SEL1 (MTK_PIN_NO(87) | 7)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_SPI0_CLK (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_SCP_SPI0_CK (MTK_PIN_NO(88) | 2)
+#define PINMUX_GPIO88__FUNC_SCL1 (MTK_PIN_NO(88) | 3)
+#define PINMUX_GPIO88__FUNC_I2S1_MCK (MTK_PIN_NO(88) | 4)
+#define PINMUX_GPIO88__FUNC_MFG_DFD_JTAG_TCK (MTK_PIN_NO(88) | 5)
+#define PINMUX_GPIO88__FUNC_DFD_TCK_XI (MTK_PIN_NO(88) | 6)
+#define PINMUX_GPIO88__FUNC_JTCK_SEL1 (MTK_PIN_NO(88) | 7)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_SRCLKENAI0 (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_PWM_C (MTK_PIN_NO(89) | 2)
+#define PINMUX_GPIO89__FUNC_I2S5_BCK (MTK_PIN_NO(89) | 3)
+#define PINMUX_GPIO89__FUNC_ANT_SEL6 (MTK_PIN_NO(89) | 4)
+#define PINMUX_GPIO89__FUNC_SDA8 (MTK_PIN_NO(89) | 5)
+#define PINMUX_GPIO89__FUNC_CMVREF0 (MTK_PIN_NO(89) | 6)
+#define PINMUX_GPIO89__FUNC_DBG_MON_A21 (MTK_PIN_NO(89) | 7)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_PWM_A (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_CMMCLK2 (MTK_PIN_NO(90) | 2)
+#define PINMUX_GPIO90__FUNC_I2S5_LRCK (MTK_PIN_NO(90) | 3)
+#define PINMUX_GPIO90__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(90) | 4)
+#define PINMUX_GPIO90__FUNC_SCL8 (MTK_PIN_NO(90) | 5)
+#define PINMUX_GPIO90__FUNC_PTA_RXD (MTK_PIN_NO(90) | 6)
+#define PINMUX_GPIO90__FUNC_DBG_MON_A22 (MTK_PIN_NO(90) | 7)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_KPROW1 (MTK_PIN_NO(91) | 1)
+#define PINMUX_GPIO91__FUNC_PWM_B (MTK_PIN_NO(91) | 2)
+#define PINMUX_GPIO91__FUNC_I2S5_DO (MTK_PIN_NO(91) | 3)
+#define PINMUX_GPIO91__FUNC_ANT_SEL7 (MTK_PIN_NO(91) | 4)
+#define PINMUX_GPIO91__FUNC_CMMCLK3 (MTK_PIN_NO(91) | 5)
+#define PINMUX_GPIO91__FUNC_PTA_TXD (MTK_PIN_NO(91) | 6)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_KPROW0 (MTK_PIN_NO(92) | 1)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_KPCOL0 (MTK_PIN_NO(93) | 1)
+#define PINMUX_GPIO93__FUNC_DBG_MON_B27 (MTK_PIN_NO(93) | 7)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_KPCOL1 (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_I2S2_DI2 (MTK_PIN_NO(94) | 2)
+#define PINMUX_GPIO94__FUNC_I2S5_MCK (MTK_PIN_NO(94) | 3)
+#define PINMUX_GPIO94__FUNC_CMMCLK2 (MTK_PIN_NO(94) | 4)
+#define PINMUX_GPIO94__FUNC_SCP_SPI2_MI (MTK_PIN_NO(94) | 5)
+#define PINMUX_GPIO94__FUNC_SRCLKENAI1 (MTK_PIN_NO(94) | 6)
+#define PINMUX_GPIO94__FUNC_SPI2_MI (MTK_PIN_NO(94) | 7)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_URXD0 (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_UTXD0 (MTK_PIN_NO(95) | 2)
+#define PINMUX_GPIO95__FUNC_MD_URXD0 (MTK_PIN_NO(95) | 3)
+#define PINMUX_GPIO95__FUNC_MD_URXD1 (MTK_PIN_NO(95) | 4)
+#define PINMUX_GPIO95__FUNC_SSPM_URXD_AO (MTK_PIN_NO(95) | 5)
+#define PINMUX_GPIO95__FUNC_CCU_URXD_AO (MTK_PIN_NO(95) | 6)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_UTXD0 (MTK_PIN_NO(96) | 1)
+#define PINMUX_GPIO96__FUNC_URXD0 (MTK_PIN_NO(96) | 2)
+#define PINMUX_GPIO96__FUNC_MD_UTXD0 (MTK_PIN_NO(96) | 3)
+#define PINMUX_GPIO96__FUNC_MD_UTXD1 (MTK_PIN_NO(96) | 4)
+#define PINMUX_GPIO96__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(96) | 5)
+#define PINMUX_GPIO96__FUNC_CCU_UTXD_AO (MTK_PIN_NO(96) | 6)
+#define PINMUX_GPIO96__FUNC_DBG_MON_B2 (MTK_PIN_NO(96) | 7)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_UCTS0 (MTK_PIN_NO(97) | 1)
+#define PINMUX_GPIO97__FUNC_I2S2_MCK (MTK_PIN_NO(97) | 2)
+#define PINMUX_GPIO97__FUNC_IDDIG (MTK_PIN_NO(97) | 3)
+#define PINMUX_GPIO97__FUNC_CONN_MCU_TDO (MTK_PIN_NO(97) | 4)
+#define PINMUX_GPIO97__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(97) | 5)
+#define PINMUX_GPIO97__FUNC_IO_JTAG_TDO (MTK_PIN_NO(97) | 6)
+#define PINMUX_GPIO97__FUNC_DBG_MON_B3 (MTK_PIN_NO(97) | 7)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_URTS0 (MTK_PIN_NO(98) | 1)
+#define PINMUX_GPIO98__FUNC_I2S2_BCK (MTK_PIN_NO(98) | 2)
+#define PINMUX_GPIO98__FUNC_USB_DRVVBUS (MTK_PIN_NO(98) | 3)
+#define PINMUX_GPIO98__FUNC_CONN_MCU_TMS (MTK_PIN_NO(98) | 4)
+#define PINMUX_GPIO98__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(98) | 5)
+#define PINMUX_GPIO98__FUNC_IO_JTAG_TMS (MTK_PIN_NO(98) | 6)
+#define PINMUX_GPIO98__FUNC_DBG_MON_B4 (MTK_PIN_NO(98) | 7)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_CMMCLK0 (MTK_PIN_NO(99) | 1)
+#define PINMUX_GPIO99__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(99) | 4)
+#define PINMUX_GPIO99__FUNC_DBG_MON_B28 (MTK_PIN_NO(99) | 7)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_CMMCLK1 (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_PWM_C (MTK_PIN_NO(100) | 2)
+#define PINMUX_GPIO100__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(100) | 3)
+#define PINMUX_GPIO100__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(100) | 4)
+#define PINMUX_GPIO100__FUNC_DBG_MON_B29 (MTK_PIN_NO(100) | 7)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_CLKM2 (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_I2S2_LRCK (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_CMVREF1 (MTK_PIN_NO(101) | 3)
+#define PINMUX_GPIO101__FUNC_CONN_MCU_TCK (MTK_PIN_NO(101) | 4)
+#define PINMUX_GPIO101__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(101) | 5)
+#define PINMUX_GPIO101__FUNC_IO_JTAG_TCK (MTK_PIN_NO(101) | 6)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_CLKM1 (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_I2S2_DI (MTK_PIN_NO(102) | 2)
+#define PINMUX_GPIO102__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(102) | 3)
+#define PINMUX_GPIO102__FUNC_CONN_MCU_TDI (MTK_PIN_NO(102) | 4)
+#define PINMUX_GPIO102__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(102) | 5)
+#define PINMUX_GPIO102__FUNC_IO_JTAG_TDI (MTK_PIN_NO(102) | 6)
+#define PINMUX_GPIO102__FUNC_DBG_MON_B8 (MTK_PIN_NO(102) | 7)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_SCL2 (MTK_PIN_NO(103) | 1)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_SDA2 (MTK_PIN_NO(104) | 1)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_SCL4 (MTK_PIN_NO(105) | 1)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_SDA4 (MTK_PIN_NO(106) | 1)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_DMIC_CLK (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_ANT_SEL0 (MTK_PIN_NO(107) | 2)
+#define PINMUX_GPIO107__FUNC_CLKM0 (MTK_PIN_NO(107) | 3)
+#define PINMUX_GPIO107__FUNC_SDA7 (MTK_PIN_NO(107) | 4)
+#define PINMUX_GPIO107__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(107) | 5)
+#define PINMUX_GPIO107__FUNC_PWM_A (MTK_PIN_NO(107) | 6)
+#define PINMUX_GPIO107__FUNC_DBG_MON_B12 (MTK_PIN_NO(107) | 7)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_CMMCLK2 (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_ANT_SEL1 (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_CLKM1 (MTK_PIN_NO(108) | 3)
+#define PINMUX_GPIO108__FUNC_SCL8 (MTK_PIN_NO(108) | 4)
+#define PINMUX_GPIO108__FUNC_DAP_MD32_SWD (MTK_PIN_NO(108) | 5)
+#define PINMUX_GPIO108__FUNC_PWM_B (MTK_PIN_NO(108) | 6)
+#define PINMUX_GPIO108__FUNC_DBG_MON_B13 (MTK_PIN_NO(108) | 7)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_DMIC_DAT (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_ANT_SEL2 (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_CLKM2 (MTK_PIN_NO(109) | 3)
+#define PINMUX_GPIO109__FUNC_SDA8 (MTK_PIN_NO(109) | 4)
+#define PINMUX_GPIO109__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(109) | 5)
+#define PINMUX_GPIO109__FUNC_PWM_C (MTK_PIN_NO(109) | 6)
+#define PINMUX_GPIO109__FUNC_DBG_MON_B14 (MTK_PIN_NO(109) | 7)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_SCL7 (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_ANT_SEL0 (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_TP_URXD1_AO (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_USB_DRVVBUS (MTK_PIN_NO(110) | 4)
+#define PINMUX_GPIO110__FUNC_SRCLKENAI1 (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_KPCOL2 (MTK_PIN_NO(110) | 6)
+#define PINMUX_GPIO110__FUNC_URXD1 (MTK_PIN_NO(110) | 7)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_CMMCLK3 (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_ANT_SEL1 (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_SRCLKENAI0 (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(111) | 4)
+#define PINMUX_GPIO111__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(111) | 5)
+#define PINMUX_GPIO111__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(111) | 7)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_SDA7 (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_ANT_SEL2 (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_TP_UTXD1_AO (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_IDDIG (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_AGPS_SYNC (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_KPROW2 (MTK_PIN_NO(112) | 6)
+#define PINMUX_GPIO112__FUNC_UTXD1 (MTK_PIN_NO(112) | 7)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_CONN_TOP_CLK (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_SCL6 (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_AUXIF_CLK0 (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_TP_UCTS1_AO (MTK_PIN_NO(113) | 6)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_CONN_TOP_DATA (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_SDA6 (MTK_PIN_NO(114) | 3)
+#define PINMUX_GPIO114__FUNC_AUXIF_ST0 (MTK_PIN_NO(114) | 4)
+#define PINMUX_GPIO114__FUNC_TP_URTS1_AO (MTK_PIN_NO(114) | 6)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_CONN_BT_CLK (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_UTXD1 (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_PTA_TXD (MTK_PIN_NO(115) | 3)
+#define PINMUX_GPIO115__FUNC_AUXIF_CLK1 (MTK_PIN_NO(115) | 4)
+#define PINMUX_GPIO115__FUNC_DAP_MD32_SWD (MTK_PIN_NO(115) | 5)
+#define PINMUX_GPIO115__FUNC_TP_UTXD1_AO (MTK_PIN_NO(115) | 6)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_CONN_BT_DATA (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_IPU_JTAG_TRST (MTK_PIN_NO(116) | 2)
+#define PINMUX_GPIO116__FUNC_AUXIF_ST1 (MTK_PIN_NO(116) | 4)
+#define PINMUX_GPIO116__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(116) | 5)
+#define PINMUX_GPIO116__FUNC_TP_URXD2_AO (MTK_PIN_NO(116) | 6)
+#define PINMUX_GPIO116__FUNC_DBG_MON_A0 (MTK_PIN_NO(116) | 7)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_CONN_WF_HB0 (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_IPU_JTAG_TDO (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_TP_UTXD2_AO (MTK_PIN_NO(117) | 6)
+#define PINMUX_GPIO117__FUNC_DBG_MON_A4 (MTK_PIN_NO(117) | 7)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_CONN_WF_HB1 (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_IPU_JTAG_TDI (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_SSPM_URXD_AO (MTK_PIN_NO(118) | 5)
+#define PINMUX_GPIO118__FUNC_TP_UCTS2_AO (MTK_PIN_NO(118) | 6)
+#define PINMUX_GPIO118__FUNC_DBG_MON_A5 (MTK_PIN_NO(118) | 7)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_CONN_WF_HB2 (MTK_PIN_NO(119) | 1)
+#define PINMUX_GPIO119__FUNC_IPU_JTAG_TCK (MTK_PIN_NO(119) | 2)
+#define PINMUX_GPIO119__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(119) | 5)
+#define PINMUX_GPIO119__FUNC_TP_URTS2_AO (MTK_PIN_NO(119) | 6)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_CONN_WB_PTA (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_IPU_JTAG_TMS (MTK_PIN_NO(120) | 2)
+#define PINMUX_GPIO120__FUNC_CCU_URXD_AO (MTK_PIN_NO(120) | 5)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_CONN_HRST_B (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_URXD1 (MTK_PIN_NO(121) | 2)
+#define PINMUX_GPIO121__FUNC_PTA_RXD (MTK_PIN_NO(121) | 3)
+#define PINMUX_GPIO121__FUNC_CCU_UTXD_AO (MTK_PIN_NO(121) | 5)
+#define PINMUX_GPIO121__FUNC_TP_URXD1_AO (MTK_PIN_NO(121) | 6)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_MSDC0_CMD (MTK_PIN_NO(122) | 1)
+#define PINMUX_GPIO122__FUNC_SSPM_URXD2_AO (MTK_PIN_NO(122) | 2)
+#define PINMUX_GPIO122__FUNC_ANT_SEL1 (MTK_PIN_NO(122) | 3)
+#define PINMUX_GPIO122__FUNC_DBG_MON_A12 (MTK_PIN_NO(122) | 7)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_MSDC0_DAT0 (MTK_PIN_NO(123) | 1)
+#define PINMUX_GPIO123__FUNC_ANT_SEL0 (MTK_PIN_NO(123) | 3)
+#define PINMUX_GPIO123__FUNC_DBG_MON_A13 (MTK_PIN_NO(123) | 7)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_MSDC0_CLK (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_DBG_MON_A14 (MTK_PIN_NO(124) | 7)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_MSDC0_DAT2 (MTK_PIN_NO(125) | 1)
+#define PINMUX_GPIO125__FUNC_MRG_CLK (MTK_PIN_NO(125) | 3)
+#define PINMUX_GPIO125__FUNC_DBG_MON_A15 (MTK_PIN_NO(125) | 7)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_MSDC0_DAT4 (MTK_PIN_NO(126) | 1)
+#define PINMUX_GPIO126__FUNC_ANT_SEL5 (MTK_PIN_NO(126) | 3)
+#define PINMUX_GPIO126__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(126) | 6)
+#define PINMUX_GPIO126__FUNC_DBG_MON_A16 (MTK_PIN_NO(126) | 7)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_MSDC0_DAT6 (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_ANT_SEL4 (MTK_PIN_NO(127) | 3)
+#define PINMUX_GPIO127__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(127) | 6)
+#define PINMUX_GPIO127__FUNC_DBG_MON_A17 (MTK_PIN_NO(127) | 7)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_MSDC0_DAT1 (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_ANT_SEL2 (MTK_PIN_NO(128) | 3)
+#define PINMUX_GPIO128__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(128) | 6)
+#define PINMUX_GPIO128__FUNC_DBG_MON_A18 (MTK_PIN_NO(128) | 7)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_MSDC0_DAT5 (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_ANT_SEL3 (MTK_PIN_NO(129) | 3)
+#define PINMUX_GPIO129__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(129) | 6)
+#define PINMUX_GPIO129__FUNC_DBG_MON_A23 (MTK_PIN_NO(129) | 7)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_MSDC0_DAT7 (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_MRG_DO (MTK_PIN_NO(130) | 3)
+#define PINMUX_GPIO130__FUNC_DBG_MON_A24 (MTK_PIN_NO(130) | 7)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_MSDC0_DSL (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_MRG_SYNC (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_DBG_MON_A25 (MTK_PIN_NO(131) | 7)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_MSDC0_DAT3 (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_MRG_DI (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_DBG_MON_A26 (MTK_PIN_NO(132) | 7)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_MSDC0_RSTB (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_AGPS_SYNC (MTK_PIN_NO(133) | 3)
+#define PINMUX_GPIO133__FUNC_DBG_MON_A27 (MTK_PIN_NO(133) | 7)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_RTC32K_CK (MTK_PIN_NO(134) | 1)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_WATCHDOG (MTK_PIN_NO(135) | 1)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_AUD_CLK_MISO (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_I2S1_MCK (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(136) | 6)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_I2S1_BCK (MTK_PIN_NO(137) | 3)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_I2S1_LRCK (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_DBG_MON_B24 (MTK_PIN_NO(138) | 7)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_I2S1_DO (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(139) | 6)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_AUD_CLK_MISO (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_I2S0_MCK (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(140) | 6)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_I2S0_BCK (MTK_PIN_NO(141) | 3)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(142) | 2)
+#define PINMUX_GPIO142__FUNC_I2S0_LRCK (MTK_PIN_NO(142) | 3)
+#define PINMUX_GPIO142__FUNC_VOW_DAT_MISO (MTK_PIN_NO(142) | 4)
+#define PINMUX_GPIO142__FUNC_DBG_MON_B25 (MTK_PIN_NO(142) | 7)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(143) | 2)
+#define PINMUX_GPIO143__FUNC_I2S0_DI (MTK_PIN_NO(143) | 3)
+#define PINMUX_GPIO143__FUNC_VOW_CLK_MISO (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(143) | 6)
+#define PINMUX_GPIO143__FUNC_DBG_MON_B26 (MTK_PIN_NO(143) | 7)
+
+#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(144) | 2)
+
+#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(145) | 1)
+
+#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(146) | 2)
+
+#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(147) | 1)
+
+#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_SRCLKENA0 (MTK_PIN_NO(148) | 1)
+
+#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_SRCLKENA1 (MTK_PIN_NO(149) | 1)
+
+#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_PWM_A (MTK_PIN_NO(150) | 1)
+#define PINMUX_GPIO150__FUNC_CMFLASH (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_CLKM0 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_DBG_MON_B30 (MTK_PIN_NO(150) | 7)
+
+#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_PWM_B (MTK_PIN_NO(151) | 1)
+#define PINMUX_GPIO151__FUNC_CMVREF0 (MTK_PIN_NO(151) | 2)
+#define PINMUX_GPIO151__FUNC_CLKM1 (MTK_PIN_NO(151) | 3)
+#define PINMUX_GPIO151__FUNC_DBG_MON_B20 (MTK_PIN_NO(151) | 7)
+
+#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_PWM_C (MTK_PIN_NO(152) | 1)
+#define PINMUX_GPIO152__FUNC_CMFLASH (MTK_PIN_NO(152) | 2)
+#define PINMUX_GPIO152__FUNC_CLKM2 (MTK_PIN_NO(152) | 3)
+#define PINMUX_GPIO152__FUNC_DBG_MON_B21 (MTK_PIN_NO(152) | 7)
+
+#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_PWM_A (MTK_PIN_NO(153) | 1)
+#define PINMUX_GPIO153__FUNC_CMVREF0 (MTK_PIN_NO(153) | 2)
+#define PINMUX_GPIO153__FUNC_CLKM3 (MTK_PIN_NO(153) | 3)
+#define PINMUX_GPIO153__FUNC_DBG_MON_B22 (MTK_PIN_NO(153) | 7)
+
+#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(154) | 1)
+#define PINMUX_GPIO154__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(154) | 2)
+#define PINMUX_GPIO154__FUNC_DBG_MON_B18 (MTK_PIN_NO(154) | 7)
+
+#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_ANT_SEL0 (MTK_PIN_NO(155) | 1)
+#define PINMUX_GPIO155__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(155) | 2)
+#define PINMUX_GPIO155__FUNC_CMVREF1 (MTK_PIN_NO(155) | 3)
+#define PINMUX_GPIO155__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(155) | 7)
+
+#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_ANT_SEL1 (MTK_PIN_NO(156) | 1)
+#define PINMUX_GPIO156__FUNC_SRCLKENAI0 (MTK_PIN_NO(156) | 2)
+#define PINMUX_GPIO156__FUNC_SCL6 (MTK_PIN_NO(156) | 3)
+#define PINMUX_GPIO156__FUNC_KPCOL2 (MTK_PIN_NO(156) | 4)
+#define PINMUX_GPIO156__FUNC_IDDIG (MTK_PIN_NO(156) | 5)
+#define PINMUX_GPIO156__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(156) | 7)
+
+#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_ANT_SEL2 (MTK_PIN_NO(157) | 1)
+#define PINMUX_GPIO157__FUNC_SRCLKENAI1 (MTK_PIN_NO(157) | 2)
+#define PINMUX_GPIO157__FUNC_SDA6 (MTK_PIN_NO(157) | 3)
+#define PINMUX_GPIO157__FUNC_KPROW2 (MTK_PIN_NO(157) | 4)
+#define PINMUX_GPIO157__FUNC_USB_DRVVBUS (MTK_PIN_NO(157) | 5)
+#define PINMUX_GPIO157__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(157) | 7)
+
+#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_ANT_SEL3 (MTK_PIN_NO(158) | 1)
+
+#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_ANT_SEL4 (MTK_PIN_NO(159) | 1)
+
+#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_ANT_SEL5 (MTK_PIN_NO(160) | 1)
+
+#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_SPI1_A_MI (MTK_PIN_NO(161) | 1)
+#define PINMUX_GPIO161__FUNC_SCP_SPI1_MI (MTK_PIN_NO(161) | 2)
+#define PINMUX_GPIO161__FUNC_IDDIG (MTK_PIN_NO(161) | 3)
+#define PINMUX_GPIO161__FUNC_ANT_SEL6 (MTK_PIN_NO(161) | 4)
+#define PINMUX_GPIO161__FUNC_KPCOL2 (MTK_PIN_NO(161) | 5)
+#define PINMUX_GPIO161__FUNC_PTA_RXD (MTK_PIN_NO(161) | 6)
+#define PINMUX_GPIO161__FUNC_DBG_MON_B19 (MTK_PIN_NO(161) | 7)
+
+#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_SPI1_A_CSB (MTK_PIN_NO(162) | 1)
+#define PINMUX_GPIO162__FUNC_SCP_SPI1_CS (MTK_PIN_NO(162) | 2)
+#define PINMUX_GPIO162__FUNC_USB_DRVVBUS (MTK_PIN_NO(162) | 3)
+#define PINMUX_GPIO162__FUNC_ANT_SEL5 (MTK_PIN_NO(162) | 4)
+#define PINMUX_GPIO162__FUNC_KPROW2 (MTK_PIN_NO(162) | 5)
+#define PINMUX_GPIO162__FUNC_PTA_TXD (MTK_PIN_NO(162) | 6)
+
+#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_SPI1_A_MO (MTK_PIN_NO(163) | 1)
+#define PINMUX_GPIO163__FUNC_SCP_SPI1_MO (MTK_PIN_NO(163) | 2)
+#define PINMUX_GPIO163__FUNC_SDA1 (MTK_PIN_NO(163) | 3)
+#define PINMUX_GPIO163__FUNC_ANT_SEL4 (MTK_PIN_NO(163) | 4)
+#define PINMUX_GPIO163__FUNC_CMMCLK2 (MTK_PIN_NO(163) | 5)
+#define PINMUX_GPIO163__FUNC_DMIC_CLK (MTK_PIN_NO(163) | 6)
+
+#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_SPI1_A_CLK (MTK_PIN_NO(164) | 1)
+#define PINMUX_GPIO164__FUNC_SCP_SPI1_CK (MTK_PIN_NO(164) | 2)
+#define PINMUX_GPIO164__FUNC_SCL1 (MTK_PIN_NO(164) | 3)
+#define PINMUX_GPIO164__FUNC_ANT_SEL3 (MTK_PIN_NO(164) | 4)
+#define PINMUX_GPIO164__FUNC_CMMCLK3 (MTK_PIN_NO(164) | 5)
+#define PINMUX_GPIO164__FUNC_DMIC_DAT (MTK_PIN_NO(164) | 6)
+
+#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_PWM_B (MTK_PIN_NO(165) | 1)
+#define PINMUX_GPIO165__FUNC_CMMCLK2 (MTK_PIN_NO(165) | 2)
+#define PINMUX_GPIO165__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(165) | 3)
+#define PINMUX_GPIO165__FUNC_TDM_MCK_2ND (MTK_PIN_NO(165) | 6)
+#define PINMUX_GPIO165__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(165) | 7)
+
+#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_ANT_SEL6 (MTK_PIN_NO(166) | 1)
+
+#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_RFIC0_BSI_EN (MTK_PIN_NO(167) | 1)
+#define PINMUX_GPIO167__FUNC_SPM_BSI_EN (MTK_PIN_NO(167) | 2)
+
+#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_RFIC0_BSI_CK (MTK_PIN_NO(168) | 1)
+#define PINMUX_GPIO168__FUNC_SPM_BSI_CK (MTK_PIN_NO(168) | 2)
+
+#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_PWM_C (MTK_PIN_NO(169) | 1)
+#define PINMUX_GPIO169__FUNC_CMMCLK3 (MTK_PIN_NO(169) | 2)
+#define PINMUX_GPIO169__FUNC_CMVREF1 (MTK_PIN_NO(169) | 3)
+#define PINMUX_GPIO169__FUNC_ANT_SEL7 (MTK_PIN_NO(169) | 4)
+#define PINMUX_GPIO169__FUNC_AGPS_SYNC (MTK_PIN_NO(169) | 5)
+#define PINMUX_GPIO169__FUNC_TDM_BCK_2ND (MTK_PIN_NO(169) | 6)
+#define PINMUX_GPIO169__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(169) | 7)
+
+#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_I2S1_BCK (MTK_PIN_NO(170) | 1)
+#define PINMUX_GPIO170__FUNC_I2S3_BCK (MTK_PIN_NO(170) | 2)
+#define PINMUX_GPIO170__FUNC_SCL7 (MTK_PIN_NO(170) | 3)
+#define PINMUX_GPIO170__FUNC_I2S5_BCK (MTK_PIN_NO(170) | 4)
+#define PINMUX_GPIO170__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(170) | 5)
+#define PINMUX_GPIO170__FUNC_TDM_LRCK_2ND (MTK_PIN_NO(170) | 6)
+#define PINMUX_GPIO170__FUNC_ANT_SEL3 (MTK_PIN_NO(170) | 7)
+
+#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_I2S1_LRCK (MTK_PIN_NO(171) | 1)
+#define PINMUX_GPIO171__FUNC_I2S3_LRCK (MTK_PIN_NO(171) | 2)
+#define PINMUX_GPIO171__FUNC_SDA7 (MTK_PIN_NO(171) | 3)
+#define PINMUX_GPIO171__FUNC_I2S5_LRCK (MTK_PIN_NO(171) | 4)
+#define PINMUX_GPIO171__FUNC_URXD1 (MTK_PIN_NO(171) | 5)
+#define PINMUX_GPIO171__FUNC_TDM_DATA0_2ND (MTK_PIN_NO(171) | 6)
+#define PINMUX_GPIO171__FUNC_ANT_SEL4 (MTK_PIN_NO(171) | 7)
+
+#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_I2S1_DO (MTK_PIN_NO(172) | 1)
+#define PINMUX_GPIO172__FUNC_I2S3_DO (MTK_PIN_NO(172) | 2)
+#define PINMUX_GPIO172__FUNC_SCL8 (MTK_PIN_NO(172) | 3)
+#define PINMUX_GPIO172__FUNC_I2S5_DO (MTK_PIN_NO(172) | 4)
+#define PINMUX_GPIO172__FUNC_UTXD1 (MTK_PIN_NO(172) | 5)
+#define PINMUX_GPIO172__FUNC_TDM_DATA1_2ND (MTK_PIN_NO(172) | 6)
+#define PINMUX_GPIO172__FUNC_ANT_SEL5 (MTK_PIN_NO(172) | 7)
+
+#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_I2S1_MCK (MTK_PIN_NO(173) | 1)
+#define PINMUX_GPIO173__FUNC_I2S3_MCK (MTK_PIN_NO(173) | 2)
+#define PINMUX_GPIO173__FUNC_SDA8 (MTK_PIN_NO(173) | 3)
+#define PINMUX_GPIO173__FUNC_I2S5_MCK (MTK_PIN_NO(173) | 4)
+#define PINMUX_GPIO173__FUNC_UCTS0 (MTK_PIN_NO(173) | 5)
+#define PINMUX_GPIO173__FUNC_TDM_DATA2_2ND (MTK_PIN_NO(173) | 6)
+#define PINMUX_GPIO173__FUNC_ANT_SEL6 (MTK_PIN_NO(173) | 7)
+
+#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_I2S2_DI (MTK_PIN_NO(174) | 1)
+#define PINMUX_GPIO174__FUNC_I2S0_DI (MTK_PIN_NO(174) | 2)
+#define PINMUX_GPIO174__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(174) | 3)
+#define PINMUX_GPIO174__FUNC_I2S2_DI2 (MTK_PIN_NO(174) | 4)
+#define PINMUX_GPIO174__FUNC_URTS0 (MTK_PIN_NO(174) | 5)
+#define PINMUX_GPIO174__FUNC_TDM_DATA3_2ND (MTK_PIN_NO(174) | 6)
+#define PINMUX_GPIO174__FUNC_ANT_SEL7 (MTK_PIN_NO(174) | 7)
+
+#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_ANT_SEL7 (MTK_PIN_NO(175) | 1)
+
+#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+
+#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+
+#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+
+#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+
+#endif /* __MT8183-PINFUNC_H */
diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
new file mode 100644
index 0000000..2b3c86fa
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
@@ -0,0 +1,1745 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Ben Ho <ben.ho@mediatek.com>
+ *	   Erin Lo <erin.lo@mediatek.com>
+ */
+
+#include <dt-bindings/clock/mt8183-clk.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include "mt8183-pinfunc.h"
+#include <dt-bindings/reset-controller/mt8183-resets.h>
+#include <dt-bindings/memory/mt8183-larb-port.h>
+#include <dt-bindings/thermal/thermal.h>
+#include <dt-bindings/power/mt8183-power.h>
+#include <dt-bindings/gce/mt8183-gce.h>
+#include <dt-bindings/phy/phy.h>
+
+/ {
+	compatible = "mediatek,mt8183";
+	interrupt-parent = <&sysirq>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	aliases {
+		ovl0 = &ovl0;
+		ovl_2l0 = &ovl_2l0;
+		ovl_2l1 = &ovl_2l1;
+		rdma0 = &rdma0;
+		rdma1 = &rdma1;
+	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		/* 192 KiB reserved for ARM Trusted Firmware (BL31) */
+		bl31_secmon_reserved: secmon@54600000 {
+			no-map;
+			reg = <0 0x54600000 0x0 0x30000>;
+		};
+	};
+
+	cluster0_opp: opp_table0 {
+		compatible = "operating-points-v2";
+		opp-shared;
+		opp00 {
+			opp-hz = /bits/ 64 <793000000>;
+			opp-microvolt = <650000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <910000000>;
+			opp-microvolt = <687500>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <1014000000>;
+			opp-microvolt = <718750>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <1131000000>;
+			opp-microvolt = <756250>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <1248000000>;
+			opp-microvolt = <800000>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <1326000000>;
+			opp-microvolt = <818750>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1417000000>;
+			opp-microvolt = <850000>;
+		};
+		opp07 {
+			opp-hz = /bits/ 64 <1508000000>;
+			opp-microvolt = <868750>;
+		};
+		opp08 {
+			opp-hz = /bits/ 64 <1586000000>;
+			opp-microvolt = <893750>;
+		};
+		opp09 {
+			opp-hz = /bits/ 64 <1625000000>;
+			opp-microvolt = <906250>;
+		};
+		opp10 {
+			opp-hz = /bits/ 64 <1677000000>;
+			opp-microvolt = <931250>;
+		};
+		opp11 {
+			opp-hz = /bits/ 64 <1716000000>;
+			opp-microvolt = <943750>;
+		};
+		opp12 {
+			opp-hz = /bits/ 64 <1781000000>;
+			opp-microvolt = <975000>;
+		};
+		opp13 {
+			opp-hz = /bits/ 64 <1846000000>;
+			opp-microvolt = <1000000>;
+		};
+		opp14 {
+			opp-hz = /bits/ 64 <1924000000>;
+			opp-microvolt = <1025000>;
+		};
+		opp15 {
+			opp-hz = /bits/ 64 <1989000000>;
+			opp-microvolt = <1050000>;
+		};	};
+
+	cluster1_opp: opp_table1 {
+		compatible = "operating-points-v2";
+		opp-shared;
+		opp00 {
+			opp-hz = /bits/ 64 <793000000>;
+			opp-microvolt = <700000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <910000000>;
+			opp-microvolt = <725000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <1014000000>;
+			opp-microvolt = <750000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <1131000000>;
+			opp-microvolt = <775000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <1248000000>;
+			opp-microvolt = <800000>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <1326000000>;
+			opp-microvolt = <825000>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1417000000>;
+			opp-microvolt = <850000>;
+		};
+		opp07 {
+			opp-hz = /bits/ 64 <1508000000>;
+			opp-microvolt = <875000>;
+		};
+		opp08 {
+			opp-hz = /bits/ 64 <1586000000>;
+			opp-microvolt = <900000>;
+		};
+		opp09 {
+			opp-hz = /bits/ 64 <1625000000>;
+			opp-microvolt = <912500>;
+		};
+		opp10 {
+			opp-hz = /bits/ 64 <1677000000>;
+			opp-microvolt = <931250>;
+		};
+		opp11 {
+			opp-hz = /bits/ 64 <1716000000>;
+			opp-microvolt = <950000>;
+		};
+		opp12 {
+			opp-hz = /bits/ 64 <1781000000>;
+			opp-microvolt = <975000>;
+		};
+		opp13 {
+			opp-hz = /bits/ 64 <1846000000>;
+			opp-microvolt = <1000000>;
+		};
+		opp14 {
+			opp-hz = /bits/ 64 <1924000000>;
+			opp-microvolt = <1025000>;
+		};
+		opp15 {
+			opp-hz = /bits/ 64 <1989000000>;
+			opp-microvolt = <1050000>;
+		};
+	};
+
+	cci_opp: opp_table2 {
+		compatible = "operating-points-v2";
+		opp-shared;
+		opp00 {
+			opp-hz = /bits/ 64 <273000000>;
+			opp-microvolt = <650000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <338000000>;
+			opp-microvolt = <687500>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <403000000>;
+			opp-microvolt = <718750>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <463000000>;
+			opp-microvolt = <756250>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <546000000>;
+			opp-microvolt = <800000>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <624000000>;
+			opp-microvolt = <818750>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <689000000>;
+			opp-microvolt = <850000>;
+		};
+		opp07 {
+			opp-hz = /bits/ 64 <767000000>;
+			opp-microvolt = <868750>;
+		};
+		opp08 {
+			opp-hz = /bits/ 64 <845000000>;
+			opp-microvolt = <893750>;
+		};
+		opp09 {
+			opp-hz = /bits/ 64 <871000000>;
+			opp-microvolt = <906250>;
+		};
+		opp10 {
+			opp-hz = /bits/ 64 <923000000>;
+			opp-microvolt = <931250>;
+		};
+		opp11 {
+			opp-hz = /bits/ 64 <962000000>;
+			opp-microvolt = <943750>;
+		};
+		opp12 {
+			opp-hz = /bits/ 64 <1027000000>;
+			opp-microvolt = <975000>;
+		};
+		opp13 {
+			opp-hz = /bits/ 64 <1092000000>;
+			opp-microvolt = <1000000>;
+		};
+		opp14 {
+			opp-hz = /bits/ 64 <1144000000>;
+			opp-microvolt = <1025000>;
+		};
+		opp15 {
+			opp-hz = /bits/ 64 <1196000000>;
+			opp-microvolt = <1050000>;
+		};
+	};
+
+	cci: cci {
+		compatible = "mediatek,mt8183-cci";
+		clocks = <&apmixedsys CLK_APMIXED_CCIPLL>;
+		clock-names = "cci_clock";
+		operating-points-v2 = <&cci_opp>;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&cpu0>;
+				};
+				core1 {
+					cpu = <&cpu1>;
+				};
+				core2 {
+					cpu = <&cpu2>;
+				};
+				core3 {
+					cpu = <&cpu3>;
+				};
+			};
+
+			cluster1 {
+				core0 {
+					cpu = <&cpu4>;
+				};
+				core1 {
+					cpu = <&cpu5>;
+				};
+				core2 {
+					cpu = <&cpu6>;
+				};
+				core3 {
+					cpu = <&cpu7>;
+				};
+			};
+		};
+
+		cpu0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			reg = <0x000>;
+			enable-method = "psci";
+			capacity-dmips-mhz = <741>;
+			clocks = <&mcucfg CLK_MCU_MP0_SEL>,
+				 <&topckgen CLK_TOP_ARMPLL_DIV_PLL1>;
+			clock-names = "cpu", "intermediate";
+			operating-points-v2 = <&cluster0_opp>;
+			dynamic-power-coefficient = <84>;
+			#cooling-cells = <2>;
+		};
+
+		cpu1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			reg = <0x001>;
+			enable-method = "psci";
+			capacity-dmips-mhz = <741>;
+			clocks = <&mcucfg CLK_MCU_MP0_SEL>,
+				 <&topckgen CLK_TOP_ARMPLL_DIV_PLL1>;
+			clock-names = "cpu", "intermediate";
+			operating-points-v2 = <&cluster0_opp>;
+			dynamic-power-coefficient = <84>;
+			#cooling-cells = <2>;
+		};
+
+		cpu2: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			reg = <0x002>;
+			enable-method = "psci";
+			capacity-dmips-mhz = <741>;
+			clocks = <&mcucfg CLK_MCU_MP0_SEL>,
+				 <&topckgen CLK_TOP_ARMPLL_DIV_PLL1>;
+			clock-names = "cpu", "intermediate";
+			operating-points-v2 = <&cluster0_opp>;
+			dynamic-power-coefficient = <84>;
+			#cooling-cells = <2>;
+		};
+
+		cpu3: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			reg = <0x003>;
+			enable-method = "psci";
+			capacity-dmips-mhz = <741>;
+			clocks = <&mcucfg CLK_MCU_MP0_SEL>,
+				 <&topckgen CLK_TOP_ARMPLL_DIV_PLL1>;
+			clock-names = "cpu", "intermediate";
+			operating-points-v2 = <&cluster0_opp>;
+			dynamic-power-coefficient = <84>;
+			#cooling-cells = <2>;
+		};
+
+		cpu4: cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a73";
+			reg = <0x100>;
+			enable-method = "psci";
+			capacity-dmips-mhz = <1024>;
+			clocks = <&mcucfg CLK_MCU_MP2_SEL>,
+				 <&topckgen CLK_TOP_ARMPLL_DIV_PLL1>;
+			clock-names = "cpu", "intermediate";
+			operating-points-v2 = <&cluster1_opp>;
+			dynamic-power-coefficient = <211>;
+			#cooling-cells = <2>;
+		};
+
+		cpu5: cpu@101 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a73";
+			reg = <0x101>;
+			enable-method = "psci";
+			capacity-dmips-mhz = <1024>;
+			clocks = <&mcucfg CLK_MCU_MP2_SEL>,
+				 <&topckgen CLK_TOP_ARMPLL_DIV_PLL1>;
+			clock-names = "cpu", "intermediate";
+			operating-points-v2 = <&cluster1_opp>;
+			dynamic-power-coefficient = <211>;
+			#cooling-cells = <2>;
+		};
+
+		cpu6: cpu@102 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a73";
+			reg = <0x102>;
+			enable-method = "psci";
+			capacity-dmips-mhz = <1024>;
+			clocks = <&mcucfg CLK_MCU_MP2_SEL>,
+				 <&topckgen CLK_TOP_ARMPLL_DIV_PLL1>;
+			clock-names = "cpu", "intermediate";
+			operating-points-v2 = <&cluster1_opp>;
+			dynamic-power-coefficient = <211>;
+			#cooling-cells = <2>;
+		};
+
+		cpu7: cpu@103 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a73";
+			reg = <0x103>;
+			enable-method = "psci";
+			capacity-dmips-mhz = <1024>;
+			clocks = <&mcucfg CLK_MCU_MP2_SEL>,
+				 <&topckgen CLK_TOP_ARMPLL_DIV_PLL1>;
+			clock-names = "cpu", "intermediate";
+			operating-points-v2 = <&cluster1_opp>;
+			dynamic-power-coefficient = <211>;
+			#cooling-cells = <2>;
+		};
+	};
+
+	pmu-a53 {
+		compatible = "arm,cortex-a53-pmu";
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW &ppi_cluster0>;
+	};
+
+	pmu-a73 {
+		compatible = "arm,cortex-a73-pmu";
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW &ppi_cluster1>;
+	};
+
+	psci {
+		compatible      = "arm,psci-1.0";
+		method          = "smc";
+	};
+
+	clk26m: oscillator {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <26000000>;
+		clock-output-names = "clk26m";
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW 0>,
+			     <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW 0>,
+			     <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW 0>,
+			     <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW 0>;
+	};
+
+	soc {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		compatible = "simple-bus";
+		ranges;
+
+		watchdog: watchdog@10007000 {
+			compatible = "mediatek,mt8183-wdt",
+				      "mediatek,mt6589-wdt";
+			reg = <0 0x10007000 0 0x100>;
+		};
+
+		soc_data: soc_data@8000000 {
+			compatible = "mediatek,mt8183-efuse",
+				     "mediatek,efuse";
+			reg = <0 0x08000000 0 0x0010>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			status = "disabled";
+		};
+
+		gic: interrupt-controller@c000000 {
+			compatible = "arm,gic-v3";
+			#interrupt-cells = <4>;
+			interrupt-parent = <&gic>;
+			interrupt-controller;
+			reg = <0 0x0c000000 0 0x40000>,  /* GICD */
+			      <0 0x0c100000 0 0x200000>, /* GICR */
+			      <0 0x0c400000 0 0x2000>,   /* GICC */
+			      <0 0x0c410000 0 0x1000>,   /* GICH */
+			      <0 0x0c420000 0 0x2000>;   /* GICV */
+
+			interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH 0>;
+			ppi-partitions {
+				ppi_cluster0: interrupt-partition-0 {
+					affinity = <&cpu0 &cpu1 &cpu2 &cpu3>;
+				};
+				ppi_cluster1: interrupt-partition-1 {
+					affinity = <&cpu4 &cpu5 &cpu6 &cpu7>;
+				};
+			};
+		};
+
+		mcucfg: syscon@c530000 {
+			compatible = "mediatek,mt8183-mcucfg", "syscon";
+			reg = <0 0x0c530000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		sysirq: interrupt-controller@c530a80 {
+			compatible = "mediatek,mt8183-sysirq",
+				     "mediatek,mt6577-sysirq";
+			interrupt-controller;
+			#interrupt-cells = <3>;
+			interrupt-parent = <&gic>;
+			reg = <0 0x0c530a80 0 0x50>;
+		};
+
+		topckgen: syscon@10000000 {
+			compatible = "mediatek,mt8183-topckgen", "syscon";
+			reg = <0 0x10000000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		infracfg: syscon@10001000 {
+			compatible = "mediatek,mt8183-infracfg", "syscon";
+			reg = <0 0x10001000 0 0x1000>;
+			#clock-cells = <1>;
+			#reset-cells = <1>;
+		};
+
+		pio: pinctrl@10005000 {
+			compatible = "mediatek,mt8183-pinctrl";
+			reg = <0 0x10005000 0 0x1000>,
+			      <0 0x11f20000 0 0x1000>,
+			      <0 0x11e80000 0 0x1000>,
+			      <0 0x11e70000 0 0x1000>,
+			      <0 0x11e90000 0 0x1000>,
+			      <0 0x11d30000 0 0x1000>,
+			      <0 0x11d20000 0 0x1000>,
+			      <0 0x11c50000 0 0x1000>,
+			      <0 0x11f30000 0 0x1000>,
+			      <0 0x1000b000 0 0x1000>;
+			reg-names = "iocfg0", "iocfg1", "iocfg2",
+				    "iocfg3", "iocfg4", "iocfg5",
+				    "iocfg6", "iocfg7", "iocfg8",
+				    "eint";
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pio 0 0 192>;
+			interrupt-controller;
+			interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
+			#interrupt-cells = <2>;
+		};
+
+		scpsys: syscon@10006000 {
+			compatible = "mediatek,mt8183-scpsys", "syscon";
+			#power-domain-cells = <1>;
+			reg = <0 0x10006000 0 0x1000>;
+			clocks = <&topckgen CLK_TOP_MUX_AUD_INTBUS>,
+				 <&infracfg CLK_INFRA_AUDIO>,
+				 <&infracfg CLK_INFRA_AUDIO_26M_BCLK>,
+				 <&topckgen CLK_TOP_MUX_MFG>,
+				 <&topckgen CLK_TOP_MUX_MM>,
+				 <&topckgen CLK_TOP_MUX_CAM>,
+				 <&topckgen CLK_TOP_MUX_IMG>,
+				 <&topckgen CLK_TOP_MUX_IPU_IF>,
+				 <&topckgen CLK_TOP_MUX_DSP>,
+				 <&topckgen CLK_TOP_MUX_DSP1>,
+				 <&topckgen CLK_TOP_MUX_DSP2>,
+				 <&mmsys CLK_MM_SMI_COMMON>,
+				 <&mmsys CLK_MM_SMI_LARB0>,
+				 <&mmsys CLK_MM_SMI_LARB1>,
+				 <&mmsys CLK_MM_GALS_COMM0>,
+				 <&mmsys CLK_MM_GALS_COMM1>,
+				 <&mmsys CLK_MM_GALS_CCU2MM>,
+				 <&mmsys CLK_MM_GALS_IPU12MM>,
+				 <&mmsys CLK_MM_GALS_IMG2MM>,
+				 <&mmsys CLK_MM_GALS_CAM2MM>,
+				 <&mmsys CLK_MM_GALS_IPU2MM>,
+				 <&imgsys CLK_IMG_LARB5>,
+				 <&imgsys CLK_IMG_LARB2>,
+				 <&camsys CLK_CAM_LARB6>,
+				 <&camsys CLK_CAM_LARB3>,
+				 <&camsys CLK_CAM_SENINF>,
+				 <&camsys CLK_CAM_CAMSV0>,
+				 <&camsys CLK_CAM_CAMSV1>,
+				 <&camsys CLK_CAM_CAMSV2>,
+				 <&camsys CLK_CAM_CCU>,
+				 <&ipu_conn CLK_IPU_CONN_IPU>,
+				 <&ipu_conn CLK_IPU_CONN_AHB>,
+				 <&ipu_conn CLK_IPU_CONN_AXI>,
+				 <&ipu_conn CLK_IPU_CONN_ISP>,
+				 <&ipu_conn CLK_IPU_CONN_CAM_ADL>,
+				 <&ipu_conn CLK_IPU_CONN_IMG_ADL>;
+			clock-names = "audio", "audio1", "audio2",
+				      "mfg", "mm", "cam",
+				      "isp", "vpu", "vpu1",
+				      "vpu2", "vpu3", "mm-0",
+				      "mm-1", "mm-2", "mm-3",
+				      "mm-4", "mm-5", "mm-6",
+				      "mm-7", "mm-8", "mm-9",
+				      "isp-0", "isp-1", "cam-0",
+				      "cam-1", "cam-2", "cam-3",
+				      "cam-4", "cam-5", "cam-6",
+				      "vpu-0", "vpu-1", "vpu-2",
+				      "vpu-3", "vpu-4", "vpu-5";
+			infracfg = <&infracfg>;
+			smi_comm = <&smi_common>;
+		};
+
+		apmixedsys: syscon@1000c000 {
+			compatible = "mediatek,mt8183-apmixedsys", "syscon";
+			reg = <0 0x1000c000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		pwrap: pwrap@1000d000 {
+			compatible = "mediatek,mt8183-pwrap";
+			reg = <0 0x1000d000 0 0x1000>;
+			reg-names = "pwrap";
+			interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&topckgen CLK_TOP_MUX_PMICSPI>,
+				 <&infracfg CLK_INFRA_PMIC_AP>;
+			clock-names = "spi", "wrap";
+		};
+
+		auxadc: auxadc@11001000 {
+			compatible = "mediatek,mt8183-auxadc",
+				     "mediatek,mt8173-auxadc";
+			reg = <0 0x11001000 0 0x1000>;
+			clocks = <&infracfg CLK_INFRA_AUXADC>;
+			clock-names = "main";
+			#io-channel-cells = <1>;
+			status = "disabled";
+		};
+
+		iommu: iommu@10205000 {
+			compatible = "mediatek,mt8183-m4u";
+			reg = <0 0x10205000 0 0x1000>;
+			interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_LOW>;
+			mediatek,larbs = <&larb0 &larb1 &larb2 &larb3
+					  &larb4 &larb5 &larb6>;
+			#iommu-cells = <1>;
+		};
+
+		gce: gce@10238000 {
+			compatible = "mediatek,mt8183-gce";
+			reg = <0 0x10238000 0 0x4000>;
+			interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_LOW>;
+			#mbox-cells = <3>;
+			#subsys-cells = <3>;
+			clocks = <&infracfg CLK_INFRA_GCE>;
+			clock-names = "gce";
+		};
+
+		uart0: serial@11002000 {
+			compatible = "mediatek,mt8183-uart",
+				     "mediatek,mt6577-uart";
+			reg = <0 0x11002000 0 0x1000>;
+			interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&clk26m>, <&infracfg CLK_INFRA_UART0>;
+			clock-names = "baud", "bus";
+			status = "disabled";
+		};
+
+		uart1: serial@11003000 {
+			compatible = "mediatek,mt8183-uart",
+				     "mediatek,mt6577-uart";
+			reg = <0 0x11003000 0 0x1000>;
+			interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&clk26m>, <&infracfg CLK_INFRA_UART1>;
+			clock-names = "baud", "bus";
+			status = "disabled";
+		};
+
+		uart2: serial@11004000 {
+			compatible = "mediatek,mt8183-uart",
+				     "mediatek,mt6577-uart";
+			reg = <0 0x11004000 0 0x1000>;
+			interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&clk26m>, <&infracfg CLK_INFRA_UART2>;
+			clock-names = "baud", "bus";
+			status = "disabled";
+		};
+
+		svs: svs@1100b000 {
+			compatible = "mediatek,mt8183-svs";
+			reg = <0 0x1100b000 0 0x1000>;
+			interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&infracfg CLK_INFRA_THERM>;
+			clock-names = "main_clk";
+			nvmem-cells = <&svs_calibration>,
+				      <&thermal_calibration>;
+			nvmem-cell-names = "svs-calibration-data",
+					   "calibration-data";
+
+			svs_cpu_little: svs_cpu_little {
+				compatible = "mediatek,mt8183-svs-cpu-little";
+				operating-points-v2 = <&cluster0_opp>;
+			};
+
+			svs_cpu_big: svs_cpu_big {
+				compatible = "mediatek,mt8183-svs-cpu-big";
+				operating-points-v2 = <&cluster1_opp>;
+			};
+
+			svs_cci: svs_cci {
+				compatible = "mediatek,mt8183-svs-cci";
+				operating-points-v2 = <&cci_opp>;
+			};
+
+			svs_gpu: svs_gpu {
+				compatible = "mediatek,mt8183-svs-gpu";
+				power-domains = <&scpsys MT8183_POWER_DOMAIN_MFG_2D>;
+				operating-points-v2 = <&gpu_opp_table>;
+			};
+		};
+
+		spi0: spi@1100a000 {
+			compatible = "mediatek,mt8183-spi";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0 0x1100a000 0 0x1000>;
+			interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_SYSPLL_D5_D2>,
+				 <&topckgen CLK_TOP_MUX_SPI>,
+				 <&infracfg CLK_INFRA_SPI0>;
+			clock-names = "parent-clk", "sel-clk", "spi-clk";
+			status = "disabled";
+		};
+
+		spi1: spi@11010000 {
+			compatible = "mediatek,mt8183-spi";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0 0x11010000 0 0x1000>;
+			interrupts = <GIC_SPI 124 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_SYSPLL_D5_D2>,
+				 <&topckgen CLK_TOP_MUX_SPI>,
+				 <&infracfg CLK_INFRA_SPI1>;
+			clock-names = "parent-clk", "sel-clk", "spi-clk";
+			status = "disabled";
+		};
+
+		spi2: spi@11012000 {
+			compatible = "mediatek,mt8183-spi";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0 0x11012000 0 0x1000>;
+			interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_SYSPLL_D5_D2>,
+				 <&topckgen CLK_TOP_MUX_SPI>,
+				 <&infracfg CLK_INFRA_SPI2>;
+			clock-names = "parent-clk", "sel-clk", "spi-clk";
+			status = "disabled";
+		};
+
+		spi3: spi@11013000 {
+			compatible = "mediatek,mt8183-spi";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0 0x11013000 0 0x1000>;
+			interrupts = <GIC_SPI 130 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_SYSPLL_D5_D2>,
+				 <&topckgen CLK_TOP_MUX_SPI>,
+				 <&infracfg CLK_INFRA_SPI3>;
+			clock-names = "parent-clk", "sel-clk", "spi-clk";
+			status = "disabled";
+		};
+
+		spi4: spi@11018000 {
+			compatible = "mediatek,mt8183-spi";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0 0x11018000 0 0x1000>;
+			interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_SYSPLL_D5_D2>,
+				 <&topckgen CLK_TOP_MUX_SPI>,
+				 <&infracfg CLK_INFRA_SPI4>;
+			clock-names = "parent-clk", "sel-clk", "spi-clk";
+			status = "disabled";
+		};
+
+		spi5: spi@11019000 {
+			compatible = "mediatek,mt8183-spi";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0 0x11019000 0 0x1000>;
+			interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_SYSPLL_D5_D2>,
+				 <&topckgen CLK_TOP_MUX_SPI>,
+				 <&infracfg CLK_INFRA_SPI5>;
+			clock-names = "parent-clk", "sel-clk", "spi-clk";
+			status = "disabled";
+		};
+
+		thermal: thermal@1100b000 {
+			#thermal-sensor-cells = <1>;
+			compatible = "mediatek,mt8183-thermal";
+			reg = <0 0x1100b000 0 0x1000>;
+			interrupts = <0 76 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&infracfg CLK_INFRA_THERM>,
+				 <&infracfg CLK_INFRA_AUXADC>;
+			clock-names = "therm", "auxadc";
+			resets = <&infracfg  MT8183_INFRACFG_AO_THERM_SW_RST>;
+			#reset-cells = <1>;
+			mediatek,auxadc = <&auxadc>;
+			mediatek,apmixedsys = <&apmixedsys>;
+			mediatek,hw-reset-temp = <117000>;
+			nvmem-cells = <&thermal_calibration>;
+			nvmem-cell-names = "calibration-data";
+		};
+
+		thermal-zones {
+			cpu_thermal: cpu_thermal {
+				polling-delay-passive = <100>;
+				polling-delay = <500>;
+				thermal-sensors = <&thermal 0>;
+				sustainable-power = <5000>;
+
+				trips {
+					threshold: trip-point@0 {
+						temperature = <68000>;
+						hysteresis = <2000>;
+						type = "passive";
+					};
+
+					target: trip-point@1 {
+						temperature = <85000>;
+						hysteresis = <2000>;
+						type = "passive";
+					};
+
+					cpu_crit: cpu-crit {
+						temperature = <115000>;
+						hysteresis = <2000>;
+						type = "critical";
+					};
+				};
+
+				cooling-maps {
+					map0 {
+						trip = <&target>;
+						cooling-device = <&cpu0
+							THERMAL_NO_LIMIT
+							THERMAL_NO_LIMIT>,
+								 <&cpu1
+							THERMAL_NO_LIMIT
+							THERMAL_NO_LIMIT>,
+								 <&cpu2
+							THERMAL_NO_LIMIT
+							THERMAL_NO_LIMIT>,
+								 <&cpu3
+							THERMAL_NO_LIMIT
+							THERMAL_NO_LIMIT>;
+						contribution = <3072>;
+					};
+					map1 {
+						trip = <&target>;
+						cooling-device = <&cpu4
+							THERMAL_NO_LIMIT
+							THERMAL_NO_LIMIT>,
+								 <&cpu5
+							THERMAL_NO_LIMIT
+							THERMAL_NO_LIMIT>,
+								 <&cpu6
+							THERMAL_NO_LIMIT
+							THERMAL_NO_LIMIT>,
+								 <&cpu7
+							THERMAL_NO_LIMIT
+							THERMAL_NO_LIMIT>;
+						contribution = <1024>;
+					};
+				};
+			};
+
+			/* The tzts1 ~ tzts6 don't need to polling */
+			/* The tzts1 ~ tzts6 don't need to thermal throttle */
+
+			tzts1: tzts1 {
+				polling-delay-passive = <0>;
+				polling-delay = <0>;
+				thermal-sensors = <&thermal 1>;
+				sustainable-power = <5000>;
+				trips {};
+				cooling-maps {};
+			};
+
+			tzts2: tzts2 {
+				polling-delay-passive = <0>;
+				polling-delay = <0>;
+				thermal-sensors = <&thermal 2>;
+				sustainable-power = <5000>;
+				trips {};
+				cooling-maps {};
+			};
+
+			tzts3: tzts3 {
+				polling-delay-passive = <0>;
+				polling-delay = <0>;
+				thermal-sensors = <&thermal 3>;
+				sustainable-power = <5000>;
+				trips {};
+				cooling-maps {};
+			};
+
+			tzts4: tzts4 {
+				polling-delay-passive = <0>;
+				polling-delay = <0>;
+				thermal-sensors = <&thermal 4>;
+				sustainable-power = <5000>;
+				trips {};
+				cooling-maps {};
+			};
+
+			tzts5: tzts5 {
+				polling-delay-passive = <0>;
+				polling-delay = <0>;
+				thermal-sensors = <&thermal 5>;
+				sustainable-power = <5000>;
+				trips {};
+				cooling-maps {};
+			};
+
+			tztsABB: tztsABB {
+				polling-delay-passive = <0>;
+				polling-delay = <0>;
+				thermal-sensors = <&thermal 6>;
+				sustainable-power = <5000>;
+				trips {};
+				cooling-maps {};
+			};
+		};
+
+		audiosys: syscon@11220000 {
+			compatible = "mediatek,mt8183-audiosys", "syscon";
+			reg = <0 0x11220000 0 0x1000>;
+			#clock-cells = <1>;
+			afe: mt8183-afe-pcm {
+				compatible = "mediatek,mt8183-audio";
+				interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_LOW>;
+				power-domains =
+					<&scpsys MT8183_POWER_DOMAIN_AUDIO>;
+				clocks = <&audiosys CLK_AUDIO_AFE>,
+					 <&audiosys CLK_AUDIO_DAC>,
+					 <&audiosys CLK_AUDIO_DAC_PREDIS>,
+					 <&audiosys CLK_AUDIO_ADC>,
+					 <&audiosys CLK_AUDIO_PDN_ADDA6_ADC>,
+					 <&audiosys CLK_AUDIO_22M>,
+					 <&audiosys CLK_AUDIO_24M>,
+					 <&audiosys CLK_AUDIO_APLL_TUNER>,
+					 <&audiosys CLK_AUDIO_APLL2_TUNER>,
+					 <&audiosys CLK_AUDIO_I2S1>,
+					 <&audiosys CLK_AUDIO_I2S2>,
+					 <&audiosys CLK_AUDIO_I2S3>,
+					 <&audiosys CLK_AUDIO_I2S4>,
+					 <&audiosys CLK_AUDIO_TDM>,
+					 <&audiosys CLK_AUDIO_TML>,
+					 <&infracfg CLK_INFRA_AUDIO>,
+					 <&infracfg CLK_INFRA_AUDIO_26M_BCLK>,
+					 <&topckgen CLK_TOP_MUX_AUDIO>,
+					 <&topckgen CLK_TOP_MUX_AUD_INTBUS>,
+					 <&topckgen CLK_TOP_SYSPLL_D2_D4>,
+					 <&topckgen CLK_TOP_MUX_AUD_1>,
+					 <&topckgen CLK_TOP_APLL1_CK>,
+					 <&topckgen CLK_TOP_MUX_AUD_2>,
+					 <&topckgen CLK_TOP_APLL2_CK>,
+					 <&topckgen CLK_TOP_MUX_AUD_ENG1>,
+					 <&topckgen CLK_TOP_APLL1_D8>,
+					 <&topckgen CLK_TOP_MUX_AUD_ENG2>,
+					 <&topckgen CLK_TOP_APLL2_D8>,
+					 <&topckgen CLK_TOP_MUX_APLL_I2S0>,
+					 <&topckgen CLK_TOP_MUX_APLL_I2S1>,
+					 <&topckgen CLK_TOP_MUX_APLL_I2S2>,
+					 <&topckgen CLK_TOP_MUX_APLL_I2S3>,
+					 <&topckgen CLK_TOP_MUX_APLL_I2S4>,
+					 <&topckgen CLK_TOP_MUX_APLL_I2S5>,
+					 <&topckgen CLK_TOP_APLL12_DIV0>,
+					 <&topckgen CLK_TOP_APLL12_DIV1>,
+					 <&topckgen CLK_TOP_APLL12_DIV2>,
+					 <&topckgen CLK_TOP_APLL12_DIV3>,
+					 <&topckgen CLK_TOP_APLL12_DIV4>,
+					 <&topckgen CLK_TOP_APLL12_DIVB>,
+					 /*<&topckgen CLK_TOP_APLL12_DIV5>,*/
+					 <&clk26m>;
+				clock-names = "aud_afe_clk",
+						  "aud_dac_clk",
+						  "aud_dac_predis_clk",
+						  "aud_adc_clk",
+						  "aud_adc_adda6_clk",
+						  "aud_apll22m_clk",
+						  "aud_apll24m_clk",
+						  "aud_apll1_tuner_clk",
+						  "aud_apll2_tuner_clk",
+						  "aud_i2s1_bclk_sw",
+						  "aud_i2s2_bclk_sw",
+						  "aud_i2s3_bclk_sw",
+						  "aud_i2s4_bclk_sw",
+						  "aud_tdm_clk",
+						  "aud_tml_clk",
+						  "aud_infra_clk",
+						  "mtkaif_26m_clk",
+						  "top_mux_audio",
+						  "top_mux_aud_intbus",
+						  "top_syspll_d2_d4",
+						  "top_mux_aud_1",
+						  "top_apll1_ck",
+						  "top_mux_aud_2",
+						  "top_apll2_ck",
+						  "top_mux_aud_eng1",
+						  "top_apll1_d8",
+						  "top_mux_aud_eng2",
+						  "top_apll2_d8",
+						  "top_i2s0_m_sel",
+						  "top_i2s1_m_sel",
+						  "top_i2s2_m_sel",
+						  "top_i2s3_m_sel",
+						  "top_i2s4_m_sel",
+						  "top_i2s5_m_sel",
+						  "top_apll12_div0",
+						  "top_apll12_div1",
+						  "top_apll12_div2",
+						  "top_apll12_div3",
+						  "top_apll12_div4",
+						  "top_apll12_divb",
+						  /*"top_apll12_div5",*/
+						  "top_clk26m_clk";
+			};
+		};
+
+		efuse: efuse@11f10000 {
+			compatible = "mediatek,mt8183-efuse",
+				     "mediatek,efuse";
+			reg = <0 0x11f10000 0 0x1000>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			svs_calibration: calib@580 {
+				reg = <0x580 0x64>;
+			};
+			thermal_calibration: calib@180 {
+				reg = <0x180 0xc>;
+			};
+		};
+
+		mfgcfg: syscon@13000000 {
+			compatible = "mediatek,mt8183-mfgcfg", "syscon";
+			reg = <0 0x13000000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		gpu: mali@13040000 {
+			compatible = "mediatek,mt8183-mali", "arm,mali-bifrost", "arm,mali-midgard";
+			reg = <0 0x13040000 0 0x4000>;
+			interrupts =
+				<GIC_SPI 280 IRQ_TYPE_LEVEL_LOW>,
+				<GIC_SPI 279 IRQ_TYPE_LEVEL_LOW>,
+				<GIC_SPI 278 IRQ_TYPE_LEVEL_LOW>;
+			interrupt-names = "JOB", "MMU", "GPU";
+
+			/*
+			 * Note: the properties below are not part of the
+			 * upstream binding.
+			 */
+			clocks =
+				<&topckgen CLK_TOP_MFGPLL_CK>,
+				<&topckgen CLK_TOP_MUX_MFG>,
+				<&clk26m>,
+				<&mfgcfg CLK_MFG_BG3D>;
+			clock-names =
+				"clk_main_parent",
+				"clk_mux",
+				"clk_sub_parent",
+				"subsys_mfg_cg";
+
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_MFG_CORE0>;
+
+			#cooling-cells = <2>;
+			cooling-min-level = <0>;
+			cooling-max-level = <15>;
+		};
+
+		gpu_core1: mali_gpu_core1 {
+			compatible = "mediatek,gpu_core1";
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_MFG_CORE1>;
+		};
+
+		gpu_core2: mali_gpu_core2 {
+			compatible = "mediatek,gpu_core2";
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_MFG_2D>;
+		};
+
+		gpu_opp_table: opp_table0 {
+			/*
+			 * Note: "operating-points-v2-mali" compatible and the
+			 * opp-core-mask properties are not part of upstream
+			 * binding.
+			 */
+
+			compatible = "operating-points-v2", "operating-points-v2-mali";
+			opp-shared;
+
+			opp-300000000 {
+				opp-hz = /bits/ 64 <300000000>;
+				opp-microvolt = <625000>, /* Supply 0 */
+						<850000>; /* Supply 1 */
+				opp-core-mask = /bits/ 64 <0xf>;
+			};
+
+			opp-320000000 {
+				opp-hz = /bits/ 64 <320000000>;
+				opp-microvolt = <631250>, /* Supply 0 */
+						<850000>; /* Supply 1 */
+				opp-core-mask = /bits/ 64 <0xf>;
+			};
+
+			opp-340000000 {
+				opp-hz = /bits/ 64 <340000000>;
+				opp-microvolt = <637500>, /* Supply 0 */
+						<850000>; /* Supply 1 */
+				opp-core-mask = /bits/ 64 <0xf>;
+			};
+
+			opp-360000000 {
+				opp-hz = /bits/ 64 <360000000>;
+				opp-microvolt = <643750>, /* Supply 0 */
+						<850000>; /* Supply 1 */
+				opp-core-mask = /bits/ 64 <0xf>;
+			};
+
+			opp-380000000 {
+				opp-hz = /bits/ 64 <380000000>;
+				opp-microvolt = <650000>, /* Supply 0 */
+						<850000>; /* Supply 1 */
+				opp-core-mask = /bits/ 64 <0xf>;
+			};
+
+			opp-400000000 {
+				opp-hz = /bits/ 64 <400000000>;
+				opp-microvolt = <656250>, /* Supply 0 */
+						<850000>; /* Supply 1 */
+				opp-core-mask = /bits/ 64 <0xf>;
+			};
+
+			opp-420000000 {
+				opp-hz = /bits/ 64 <420000000>;
+				opp-microvolt = <662500>, /* Supply 0 */
+						<850000>; /* Supply 1 */
+				opp-core-mask = /bits/ 64 <0xf>;
+			};
+
+			opp-460000000 {
+				opp-hz = /bits/ 64 <460000000>;
+				opp-microvolt = <675000>, /* Supply 0 */
+						<850000>; /* Supply 1 */
+				opp-core-mask = /bits/ 64 <0xf>;
+			};
+
+			opp-500000000 {
+				opp-hz = /bits/ 64 <500000000>;
+				opp-microvolt = <687500>, /* Supply 0 */
+						<850000>; /* Supply 1 */
+				opp-core-mask = /bits/ 64 <0xf>;
+			};
+
+			opp-540000000 {
+				opp-hz = /bits/ 64 <540000000>;
+				opp-microvolt = <700000>, /* Supply 0 */
+						<850000>; /* Supply 1 */
+				opp-core-mask = /bits/ 64 <0xf>;
+			};
+
+			opp-580000000 {
+				opp-hz = /bits/ 64 <580000000>;
+				opp-microvolt = <712500>, /* Supply 0 */
+						<850000>; /* Supply 1 */
+				opp-core-mask = /bits/ 64 <0xf>;
+			};
+
+			opp-620000000 {
+				opp-hz = /bits/ 64 <620000000>;
+				opp-microvolt = <725000>, /* Supply 0 */
+						<850000>; /* Supply 1 */
+				opp-core-mask = /bits/ 64 <0xf>;
+			};
+
+			opp-653000000 {
+				opp-hz = /bits/ 64 <653000000>;
+				opp-microvolt = <743750>, /* Supply 0 */
+						<850000>; /* Supply 1 */
+				opp-core-mask = /bits/ 64 <0xf>;
+			};
+
+			opp-698000000 {
+				opp-hz = /bits/ 64 <698000000>;
+				opp-microvolt = <768750>, /* Supply 0 */
+						<868750>; /* Supply 1 */
+				opp-core-mask = /bits/ 64 <0xf>;
+			};
+
+			opp-743000000 {
+				opp-hz = /bits/ 64 <743000000>;
+				opp-microvolt = <793750>, /* Supply 0 */
+						<893750>; /* Supply 1 */
+				opp-core-mask = /bits/ 64 <0xf>;
+			};
+
+			opp-800000000 {
+				opp-hz = /bits/ 64 <800000000>;
+				opp-microvolt = <825000>, /* Supply 0 */
+						<925000>; /* Supply 1 */
+				opp-core-mask = /bits/ 64 <0xf>;
+			};
+		};
+
+		mmsys: syscon@14000000 {
+			compatible = "mediatek,mt8183-mmsys", "syscon";
+			reg = <0 0x14000000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		display_components: dispsys@14000000 {
+			compatible = "mediatek,mt8183-display";
+			reg = <0 0x14000000 0 0x1000>;
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_DISP>;
+		};
+
+		ovl0: ovl@14008000 {
+			compatible = "mediatek,mt8183-disp-ovl";
+			reg = <0 0x14008000 0 0x1000>;
+			interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_DISP>;
+			clocks = <&mmsys CLK_MM_DISP_OVL0>;
+			iommus = <&iommu M4U_PORT_DISP_OVL0>;
+			mediatek,larb = <&larb0>;
+		};
+
+		ovl_2l0: ovl@14009000 {
+			compatible = "mediatek,mt8183-disp-ovl-2l";
+			reg = <0 0x14009000 0 0x1000>;
+			interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_DISP>;
+			clocks = <&mmsys CLK_MM_DISP_OVL0_2L>;
+			iommus = <&iommu M4U_PORT_DISP_2L_OVL0_LARB0>;
+			mediatek,larb = <&larb0>;
+		};
+
+		ovl_2l1: ovl@1400a000 {
+			compatible = "mediatek,mt8183-disp-ovl-2l";
+			reg = <0 0x1400a000 0 0x1000>;
+			interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_DISP>;
+			clocks = <&mmsys CLK_MM_DISP_OVL1_2L>;
+			iommus = <&iommu M4U_PORT_DISP_2L_OVL1_LARB0>;
+			mediatek,larb = <&larb0>;
+		};
+
+		rdma0: rdma@1400b000 {
+			compatible = "mediatek,mt8183-disp-rdma";
+			reg = <0 0x1400b000 0 0x1000>;
+			interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_DISP>;
+			clocks = <&mmsys CLK_MM_DISP_RDMA0>;
+			iommus = <&iommu M4U_PORT_DISP_RDMA0>;
+			mediatek,larb = <&larb0>;
+		};
+
+		rdma1: rdma@1400c000 {
+			compatible = "mediatek,mt8183-disp-rdma1";
+			reg = <0 0x1400c000 0 0x1000>;
+			interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_DISP>;
+			clocks = <&mmsys CLK_MM_DISP_RDMA1>;
+			iommus = <&iommu M4U_PORT_DISP_RDMA1>;
+			mediatek,larb = <&larb0>;
+		};
+
+		color0: color@1400e000 {
+			compatible = "mediatek,mt8183-disp-color",
+				     "mediatek,mt8173-disp-color";
+			reg = <0 0x1400e000 0 0x1000>;
+			interrupts = <GIC_SPI 231 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_DISP>;
+			clocks = <&mmsys CLK_MM_DISP_COLOR0>;
+		};
+
+		ccorr0: ccorr@1400f000 {
+			compatible = "mediatek,mt8183-disp-ccorr";
+			reg = <0 0x1400f000 0 0x1000>;
+			interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_DISP>;
+			clocks = <&mmsys CLK_MM_DISP_CCORR0>;
+		};
+
+		aal0: aal@14010000 {
+			compatible = "mediatek,mt8183-disp-aal",
+				     "mediatek,mt8173-disp-aal";
+			reg = <0 0x14010000 0 0x1000>;
+			interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_DISP>;
+			clocks = <&mmsys CLK_MM_DISP_AAL0>;
+		};
+
+		gamma0: gamma@14011000 {
+			compatible = "mediatek,mt8183-disp-gamma",
+				     "mediatek,mt8173-disp-gamma";
+			reg = <0 0x14011000 0 0x1000>;
+			interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_DISP>;
+			clocks = <&mmsys CLK_MM_DISP_GAMMA0>;
+		};
+
+		dither0: dither@14012000 {
+			compatible = "mediatek,mt8183-disp-dither";
+			reg = <0 0x14012000 0 0x1000>;
+			interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_DISP>;
+			clocks = <&mmsys CLK_MM_DISP_DITHER0>;
+		};
+
+		mutex: mutex@14016000 {
+			compatible = "mediatek,mt8183-disp-mutex";
+			reg = <0 0x14016000 0 0x1000>;
+			interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_DISP>;
+		};
+
+		larb0: larb@14017000 {
+			compatible = "mediatek,mt8183-smi-larb";
+			reg = <0 0x14017000 0 0x1000>;
+			mediatek,smi = <&smi_common>;
+			clocks = <&mmsys CLK_MM_SMI_LARB0>,
+				 <&mmsys CLK_MM_SMI_LARB0>;
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_DISP>;
+			clock-names = "apb", "smi";
+		};
+
+		smi_common: smi@14019000 {
+			compatible = "mediatek,mt8183-smi-common", "syscon";
+			reg = <0 0x14019000 0 0x1000>;
+			clocks = <&mmsys CLK_MM_SMI_COMMON>,
+				 <&mmsys CLK_MM_SMI_COMMON>,
+				 <&mmsys CLK_MM_GALS_COMM0>,
+				 <&mmsys CLK_MM_GALS_COMM1>;
+			clock-names = "apb", "smi", "gals0", "gals1";
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_DISP>;
+		};
+
+		imgsys: syscon@15020000 {
+			compatible = "mediatek,mt8183-imgsys", "syscon";
+			reg = <0 0x15020000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		larb5: larb@15021000 {
+			compatible = "mediatek,mt8183-smi-larb";
+			reg = <0 0x15021000 0 0x1000>;
+			mediatek,smi = <&smi_common>;
+			clocks = <&imgsys CLK_IMG_LARB5>, <&imgsys CLK_IMG_LARB5>,
+				 <&mmsys CLK_MM_GALS_IMG2MM>;
+			clock-names = "apb", "smi", "gals";
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_ISP>;
+		};
+
+		larb2: larb@1502f000 {
+			compatible = "mediatek,mt8183-smi-larb";
+			reg = <0 0x1502f000 0 0x1000>;
+			mediatek,smi = <&smi_common>;
+			clocks = <&imgsys CLK_IMG_LARB2>, <&imgsys CLK_IMG_LARB2>,
+				 <&mmsys CLK_MM_GALS_IPU2MM>;
+			clock-names = "apb", "smi", "gals";
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_ISP>;
+		};
+
+		vdecsys: syscon@16000000 {
+			compatible = "mediatek,mt8183-vdecsys", "syscon";
+			reg = <0 0x16000000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		larb1: larb@16010000 {
+			compatible = "mediatek,mt8183-smi-larb";
+			reg = <0 0x16010000 0 0x1000>;
+			mediatek,smi = <&smi_common>;
+			clocks = <&vdecsys CLK_VDEC_VDEC>, <&vdecsys CLK_VDEC_LARB1>;
+			clock-names = "apb", "smi";
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_VDEC>;
+		};
+
+		vencsys: syscon@17000000 {
+			compatible = "mediatek,mt8183-vencsys", "syscon";
+			reg = <0 0x17000000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		larb4: larb@17010000 {
+			compatible = "mediatek,mt8183-smi-larb";
+			reg = <0 0x17010000 0 0x1000>;
+			mediatek,smi = <&smi_common>;
+			clocks = <&vencsys CLK_VENC_LARB>,
+				 <&vencsys CLK_VENC_LARB>;
+			clock-names = "apb", "smi";
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_VENC>;
+		};
+
+		ipu_conn: syscon@19000000 {
+			compatible = "mediatek,mt8183-ipu_conn", "syscon";
+			reg = <0 0x19000000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		ipu_adl: syscon@19010000 {
+			compatible = "mediatek,mt8183-ipu_adl", "syscon";
+			reg = <0 0x19010000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		ipu_core0: syscon@19180000 {
+			compatible = "mediatek,mt8183-ipu_core0", "syscon";
+			reg = <0 0x19180000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		ipu_core1: syscon@19280000 {
+			compatible = "mediatek,mt8183-ipu_core1", "syscon";
+			reg = <0 0x19280000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		camsys: syscon@1a000000 {
+			compatible = "mediatek,mt8183-camsys", "syscon";
+			reg = <0 0x1a000000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		larb6: larb@1a001000 {
+			compatible = "mediatek,mt8183-smi-larb";
+			reg = <0 0x1a001000 0 0x1000>;
+			mediatek,smi = <&smi_common>;
+			clocks = <&camsys CLK_CAM_LARB6>, <&camsys CLK_CAM_LARB6>,
+				 <&mmsys CLK_MM_GALS_CAM2MM>;
+			clock-names = "apb", "smi", "gals";
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_CAM>;
+		};
+
+		larb3: larb@1a002000 {
+			compatible = "mediatek,mt8183-smi-larb";
+			reg = <0 0x1a002000 0 0x1000>;
+			mediatek,smi = <&smi_common>;
+			clocks = <&camsys CLK_CAM_LARB3>, <&camsys CLK_CAM_LARB3>,
+				 <&mmsys CLK_MM_GALS_IPU12MM>;
+			clock-names = "apb", "smi", "gals";
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_CAM>;
+		};
+
+		mmc0: mmc@11230000 {
+			compatible = "mediatek,mt8183-mmc";
+			reg = <0 0x11230000 0 0x1000>,
+			      <0 0x11f50000 0 0x1000>;
+			interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_MUX_MSDC50_0>,
+				 <&infracfg CLK_INFRA_MSDC0>,
+				 <&infracfg CLK_INFRA_MSDC0_SCK>;
+			clock-names = "source", "hclk", "source_cg";
+			status = "disabled";
+		};
+
+		mmc1: mmc@11240000 {
+			compatible = "mediatek,mt8183-mmc";
+			reg = <0 0x11240000 0 0x1000>,
+			      <0 0x11e10000 0 0x1000>;
+			interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_MUX_MSDC30_1>,
+				 <&infracfg CLK_INFRA_MSDC1>,
+				 <&infracfg CLK_INFRA_MSDC1_SCK>;
+			clock-names = "source", "hclk", "source_cg";
+			status = "disabled";
+		};
+
+		pwm0: pwm@1100e000 {
+			compatible = "mediatek,mt8183-disp-pwm";
+			reg = <0 0x1100e000 0 0x1000>;
+			interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_DISP>;
+			#pwm-cells = <2>;
+			clocks = <&topckgen CLK_TOP_MUX_DISP_PWM>,
+					<&infracfg CLK_INFRA_DISP_PWM>;
+			clock-names = "main", "mm";
+		};
+
+		pwm1: pwm@11006000 {
+			compatible = "mediatek,mt8183-pwm";
+			reg = <0 0x11006000 0 0x1000>;
+			interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&infracfg CLK_INFRA_PWM>,
+				 <&infracfg CLK_INFRA_PWM_HCLK>,
+				 <&infracfg CLK_INFRA_PWM1>,
+				 <&infracfg CLK_INFRA_PWM2>,
+				 <&infracfg CLK_INFRA_PWM3>,
+				 <&infracfg CLK_INFRA_PWM4>;
+			clock-names = "top", "main", "pwm1", "pwm2", "pwm3",
+				      "pwm4";
+		};
+
+		mipi_tx0: mipi-dphy@11e50000 {
+			compatible = "mediatek,mt8183-mipi-tx";
+			reg = <0 0x11e50000 0 0x1000>;
+			clocks = <&apmixedsys CLK_APMIXED_MIPID0_26M>;
+			clock-names = "ref_clk";
+			#clock-cells = <0>;
+			#phy-cells = <0>;
+			clock-output-names = "mipi_tx0_pll";
+		};
+
+		dsi0: dsi@14014000 {
+			compatible = "mediatek,mt8183-dsi";
+			reg = <0 0x14014000 0 0x1000>;
+			interrupts = <GIC_SPI 236 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_DISP>;
+			mediatek,syscon-dsi = <&mmsys 0x140>;
+			clocks = <&mmsys CLK_MM_DSI0_MM>,
+				<&mmsys CLK_MM_DSI0_IF>,
+				<&mipi_tx0>;
+			clock-names = "engine", "digital", "hs";
+			phys = <&mipi_tx0>;
+			phy-names = "dphy";
+		};
+
+		u3phy: usb-phy@11f40000 {
+			compatible = "mediatek,generic-tphy-v2";
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges;
+			status = "okay";
+
+			u2port0: usb-phy@11f40000 {
+				reg = <0 0x11f40000 0 0x700>;
+				clocks = <&clk26m>;
+				clock-names = "ref";
+				#phy-cells = <1>;
+				status = "okay";
+			};
+
+			u3port0: usb-phy@11f40700 {
+				reg = <0 0x11f40700 0 0x900>;
+				clocks = <&clk26m>;
+				clock-names = "ref";
+				#phy-cells = <1>;
+				status = "okay";
+			};
+		};
+
+		ssusb: usb@11201000 {
+			compatible ="mediatek,mt8183-mtu3", "mediatek,mtu3";
+			reg = <0 0x11201000 0 0x2e00>,
+			      <0 0x11203e00 0 0x0100>;
+			reg-names = "mac", "ippc";
+			interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_LOW>;
+			phys = <&u2port0 PHY_TYPE_USB2>;
+			clocks = <&infracfg CLK_INFRA_UNIPRO_SCK>,
+				 <&infracfg CLK_INFRA_USB>;
+			clock-names = "sys_ck", "ref_ck";
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges;
+			status = "disabled";
+
+			usb_host: xhci@11200000 {
+				compatible = "mediatek,mtk-xhci";
+				reg = <0 0x11200000 0 0x1000>;
+				reg-names = "mac";
+				interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_LOW>;
+				clocks = <&infracfg CLK_INFRA_UNIPRO_SCK>,
+					 <&infracfg CLK_INFRA_USB>;
+				clock-names = "sys_ck", "ref_ck";
+				status = "disabled";
+			};
+		};
+
+		i2c0: i2c@11007000 {
+			compatible = "mediatek,mt8183-i2c";
+			reg = <0 0x11007000 0 0x1000>,
+				<0 0x11000080 0 0x80>;
+			interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&infracfg CLK_INFRA_I2C0>,
+				 <&infracfg CLK_INFRA_AP_DMA>;
+			clock-names = "main", "dma";
+			clock-div = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c1: i2c@11011000 {
+			compatible = "mediatek,mt8183-i2c";
+			reg = <0 0x11011000 0 0x1000>,
+				<0 0x11000480 0 0x80>;
+			interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&infracfg CLK_INFRA_I2C4>,
+				 <&infracfg CLK_INFRA_AP_DMA>;
+			clock-names = "main", "dma";
+			clock-div = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c2: i2c@11009000 {
+			compatible = "mediatek,mt8183-i2c";
+			reg = <0 0x11009000 0 0x1000>,
+				<0 0x11000280 0 0x80>;
+			interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&infracfg CLK_INFRA_I2C2>,
+				 <&infracfg CLK_INFRA_AP_DMA>,
+				 <&infracfg CLK_INFRA_I2C2_ARBITER>;
+			clock-names = "main", "dma", "arb";
+			clock-div = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c3: i2c@1100f000 {
+			compatible = "mediatek,mt8183-i2c";
+			reg = <0 0x1100f000 0 0x1000>,
+				<0 0x11000400 0 0x80>;
+			interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&infracfg CLK_INFRA_I2C3>,
+				 <&infracfg CLK_INFRA_AP_DMA>;
+			clock-names = "main", "dma";
+			clock-div = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c4: i2c@11008000 {
+			compatible = "mediatek,mt8183-i2c";
+			id = <4>;
+			reg = <0 0x11008000 0 0x1000>,
+				<0 0x11000100 0 0x80>;
+			interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&infracfg CLK_INFRA_I2C1>,
+				 <&infracfg CLK_INFRA_AP_DMA>,
+				 <&infracfg CLK_INFRA_I2C1_ARBITER>;
+			clock-names = "main", "dma","arb";
+			clock-div = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c5: i2c@11016000 {
+			compatible = "mediatek,mt8183-i2c";
+			reg = <0 0x11016000 0 0x1000>,
+				<0 0x11000500 0 0x80>;
+			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&infracfg CLK_INFRA_I2C5>,
+				 <&infracfg CLK_INFRA_AP_DMA>,
+				 <&infracfg CLK_INFRA_I2C5_ARBITER>;
+			clock-names = "main", "dma", "arb";
+			clock-div = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c6: i2c@11005000 {
+			compatible = "mediatek,mt8183-i2c";
+			reg = <0 0x11005000 0 0x1000>,
+				<0 0x11000600 0 0x80>;
+			interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&infracfg CLK_INFRA_I2C6>,
+				 <&infracfg CLK_INFRA_AP_DMA>;
+			clock-names = "main", "dma";
+			clock-div = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c7: i2c@1101a000 {
+			compatible = "mediatek,mt8183-i2c";
+			reg = <0 0x1101a000 0 0x1000>,
+				<0 0x11000680 0 0x80>;
+			interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&infracfg CLK_INFRA_I2C7>,
+				 <&infracfg CLK_INFRA_AP_DMA>;
+			clock-names = "main", "dma";
+			clock-div = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c8: i2c@1101b000 {
+			compatible = "mediatek,mt8183-i2c";
+			reg = <0 0x1101b000 0 0x1000>,
+				<0 0x11000700 0 0x80>;
+			interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&infracfg CLK_INFRA_I2C8>,
+				 <&infracfg CLK_INFRA_AP_DMA>;
+			clock-names = "main", "dma";
+			clock-div = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c9: i2c@11014000 {
+			compatible = "mediatek,mt8183-i2c";
+			reg = <0 0x11014000 0 0x1000>,
+				<0 0x11000180 0 0x80>;
+			interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&infracfg CLK_INFRA_I2C1_IMM>,
+				 <&infracfg CLK_INFRA_AP_DMA>,
+				 <&infracfg CLK_INFRA_I2C1_ARBITER>;
+			clock-names = "main", "dma", "arb";
+			clock-div = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c10: i2c@11015000 {
+			compatible = "mediatek,mt8183-i2c";
+			reg = <0 0x11015000 0 0x1000>,
+				<0 0x11000300 0 0x80>;
+			interrupts = <GIC_SPI 132 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&infracfg CLK_INFRA_I2C2_IMM>,
+				 <&infracfg CLK_INFRA_AP_DMA>,
+				 <&infracfg CLK_INFRA_I2C2_ARBITER>;
+			clock-names = "main", "dma", "arb";
+			clock-div = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c11: i2c@11017000 {
+			compatible = "mediatek,mt8183-i2c";
+			reg = <0 0x11017000 0 0x1000>,
+				<0 0x11000580 0 0x80>;
+			interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&infracfg CLK_INFRA_I2C5_IMM>,
+				 <&infracfg CLK_INFRA_AP_DMA>,
+				 <&infracfg CLK_INFRA_I2C5_ARBITER>;
+			clock-names = "main", "dma", "arb";
+			clock-div = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		dpi0: dpi@14015000 {
+			compatible = "mediatek,mt8183-dpi";
+			reg = <0 0x14015000 0 0x1000>;
+			interrupts = <GIC_SPI 237 IRQ_TYPE_LEVEL_LOW>;
+			power-domains = <&scpsys MT8183_POWER_DOMAIN_DISP>;
+			clocks = <&mmsys CLK_MM_DPI_IF>,
+				 <&mmsys CLK_MM_DPI_MM>,
+				 <&apmixedsys CLK_APMIXED_TVDPLL>;
+			clock-names = "pixel", "engine", "pll";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8516-pinfunc.h b/arch/arm64/boot/dts/mediatek/mt8516-pinfunc.h
new file mode 100644
index 0000000..73339bb
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8516-pinfunc.h
@@ -0,0 +1,663 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ */
+#ifndef __DTS_MT8516_PINFUNC_H
+#define __DTS_MT8516_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define MT8516_PIN_0_EINT0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define MT8516_PIN_0_EINT0__FUNC_PWM_B (MTK_PIN_NO(0) | 1)
+#define MT8516_PIN_0_EINT0__FUNC_I2S2_BCK (MTK_PIN_NO(0) | 3)
+#define MT8516_PIN_0_EINT0__FUNC_EXT_TXD0 (MTK_PIN_NO(0) | 4)
+#define MT8516_PIN_0_EINT0__FUNC_SQICS (MTK_PIN_NO(0) | 6)
+#define MT8516_PIN_0_EINT0__FUNC_DBG_MON_A_6 (MTK_PIN_NO(0) | 7)
+
+#define MT8516_PIN_1_EINT1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define MT8516_PIN_1_EINT1__FUNC_PWM_C (MTK_PIN_NO(1) | 1)
+#define MT8516_PIN_1_EINT1__FUNC_I2S2_DI (MTK_PIN_NO(1) | 3)
+#define MT8516_PIN_1_EINT1__FUNC_EXT_TXD1 (MTK_PIN_NO(1) | 4)
+#define MT8516_PIN_1_EINT1__FUNC_CONN_MCU_TDO (MTK_PIN_NO(1) | 5)
+#define MT8516_PIN_1_EINT1__FUNC_SQISO (MTK_PIN_NO(1) | 6)
+#define MT8516_PIN_1_EINT1__FUNC_DBG_MON_A_7 (MTK_PIN_NO(1) | 7)
+
+#define MT8516_PIN_2_EINT2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define MT8516_PIN_2_EINT2__FUNC_CLKM0 (MTK_PIN_NO(2) | 1)
+#define MT8516_PIN_2_EINT2__FUNC_I2S2_LRCK (MTK_PIN_NO(2) | 3)
+#define MT8516_PIN_2_EINT2__FUNC_EXT_TXD2 (MTK_PIN_NO(2) | 4)
+#define MT8516_PIN_2_EINT2__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(2) | 5)
+#define MT8516_PIN_2_EINT2__FUNC_SQISI (MTK_PIN_NO(2) | 6)
+#define MT8516_PIN_2_EINT2__FUNC_DBG_MON_A_8 (MTK_PIN_NO(2) | 7)
+
+#define MT8516_PIN_3_EINT3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define MT8516_PIN_3_EINT3__FUNC_CLKM1 (MTK_PIN_NO(3) | 1)
+#define MT8516_PIN_3_EINT3__FUNC_SPI_MI (MTK_PIN_NO(3) | 3)
+#define MT8516_PIN_3_EINT3__FUNC_EXT_TXD3 (MTK_PIN_NO(3) | 4)
+#define MT8516_PIN_3_EINT3__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(3) | 5)
+#define MT8516_PIN_3_EINT3__FUNC_SQIWP (MTK_PIN_NO(3) | 6)
+#define MT8516_PIN_3_EINT3__FUNC_DBG_MON_A_9 (MTK_PIN_NO(3) | 7)
+
+#define MT8516_PIN_4_EINT4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define MT8516_PIN_4_EINT4__FUNC_CLKM2 (MTK_PIN_NO(4) | 1)
+#define MT8516_PIN_4_EINT4__FUNC_SPI_MO (MTK_PIN_NO(4) | 3)
+#define MT8516_PIN_4_EINT4__FUNC_EXT_TXC (MTK_PIN_NO(4) | 4)
+#define MT8516_PIN_4_EINT4__FUNC_CONN_MCU_TCK (MTK_PIN_NO(4) | 5)
+#define MT8516_PIN_4_EINT4__FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(4) | 6)
+#define MT8516_PIN_4_EINT4__FUNC_DBG_MON_A_10 (MTK_PIN_NO(4) | 7)
+
+#define MT8516_PIN_5_EINT5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define MT8516_PIN_5_EINT5__FUNC_UCTS2 (MTK_PIN_NO(5) | 1)
+#define MT8516_PIN_5_EINT5__FUNC_SPI_CSB (MTK_PIN_NO(5) | 3)
+#define MT8516_PIN_5_EINT5__FUNC_EXT_RXER (MTK_PIN_NO(5) | 4)
+#define MT8516_PIN_5_EINT5__FUNC_CONN_MCU_TDI (MTK_PIN_NO(5) | 5)
+#define MT8516_PIN_5_EINT5__FUNC_CONN_TEST_CK (MTK_PIN_NO(5) | 6)
+#define MT8516_PIN_5_EINT5__FUNC_DBG_MON_A_11 (MTK_PIN_NO(5) | 7)
+
+#define MT8516_PIN_6_EINT6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define MT8516_PIN_6_EINT6__FUNC_URTS2 (MTK_PIN_NO(6) | 1)
+#define MT8516_PIN_6_EINT6__FUNC_SPI_CLK (MTK_PIN_NO(6) | 3)
+#define MT8516_PIN_6_EINT6__FUNC_EXT_RXC (MTK_PIN_NO(6) | 4)
+#define MT8516_PIN_6_EINT6__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(6) | 5)
+#define MT8516_PIN_6_EINT6__FUNC_DBG_MON_A_12 (MTK_PIN_NO(6) | 7)
+
+#define MT8516_PIN_7_EINT7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define MT8516_PIN_7_EINT7__FUNC_SQIRST (MTK_PIN_NO(7) | 1)
+#define MT8516_PIN_7_EINT7__FUNC_SDA1_0 (MTK_PIN_NO(7) | 3)
+#define MT8516_PIN_7_EINT7__FUNC_EXT_RXDV (MTK_PIN_NO(7) | 4)
+#define MT8516_PIN_7_EINT7__FUNC_CONN_MCU_TMS (MTK_PIN_NO(7) | 5)
+#define MT8516_PIN_7_EINT7__FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(7) | 6)
+#define MT8516_PIN_7_EINT7__FUNC_DBG_MON_A_13 (MTK_PIN_NO(7) | 7)
+
+#define MT8516_PIN_8_EINT8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define MT8516_PIN_8_EINT8__FUNC_SQICK (MTK_PIN_NO(8) | 1)
+#define MT8516_PIN_8_EINT8__FUNC_CLKM3 (MTK_PIN_NO(8) | 2)
+#define MT8516_PIN_8_EINT8__FUNC_SCL1_0 (MTK_PIN_NO(8) | 3)
+#define MT8516_PIN_8_EINT8__FUNC_EXT_RXD0 (MTK_PIN_NO(8) | 4)
+#define MT8516_PIN_8_EINT8__FUNC_ANT_SEL0 (MTK_PIN_NO(8) | 5)
+#define MT8516_PIN_8_EINT8__FUNC_DBG_MON_A_14 (MTK_PIN_NO(8) | 7)
+
+#define MT8516_PIN_9_EINT9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define MT8516_PIN_9_EINT9__FUNC_CLKM4 (MTK_PIN_NO(9) | 1)
+#define MT8516_PIN_9_EINT9__FUNC_SDA2_0 (MTK_PIN_NO(9) | 2)
+#define MT8516_PIN_9_EINT9__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(9) | 3)
+#define MT8516_PIN_9_EINT9__FUNC_EXT_RXD1 (MTK_PIN_NO(9) | 4)
+#define MT8516_PIN_9_EINT9__FUNC_ANT_SEL1 (MTK_PIN_NO(9) | 5)
+#define MT8516_PIN_9_EINT9__FUNC_DBG_MON_A_15 (MTK_PIN_NO(9) | 7)
+
+#define MT8516_PIN_10_EINT10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define MT8516_PIN_10_EINT10__FUNC_CLKM5 (MTK_PIN_NO(10) | 1)
+#define MT8516_PIN_10_EINT10__FUNC_SCL2_0 (MTK_PIN_NO(10) | 2)
+#define MT8516_PIN_10_EINT10__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(10) | 3)
+#define MT8516_PIN_10_EINT10__FUNC_EXT_RXD2 (MTK_PIN_NO(10) | 4)
+#define MT8516_PIN_10_EINT10__FUNC_ANT_SEL2 (MTK_PIN_NO(10) | 5)
+#define MT8516_PIN_10_EINT10__FUNC_DBG_MON_A_16 (MTK_PIN_NO(10) | 7)
+
+#define MT8516_PIN_11_EINT11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define MT8516_PIN_11_EINT11__FUNC_CLKM4 (MTK_PIN_NO(11) | 1)
+#define MT8516_PIN_11_EINT11__FUNC_PWM_C (MTK_PIN_NO(11) | 2)
+#define MT8516_PIN_11_EINT11__FUNC_CONN_TEST_CK (MTK_PIN_NO(11) | 3)
+#define MT8516_PIN_11_EINT11__FUNC_ANT_SEL3 (MTK_PIN_NO(11) | 4)
+#define MT8516_PIN_11_EINT11__FUNC_EXT_RXD3 (MTK_PIN_NO(11) | 6)
+#define MT8516_PIN_11_EINT11__FUNC_DBG_MON_A_17 (MTK_PIN_NO(11) | 7)
+
+#define MT8516_PIN_12_EINT12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define MT8516_PIN_12_EINT12__FUNC_CLKM5 (MTK_PIN_NO(12) | 1)
+#define MT8516_PIN_12_EINT12__FUNC_PWM_A (MTK_PIN_NO(12) | 2)
+#define MT8516_PIN_12_EINT12__FUNC_SPDIF_OUT (MTK_PIN_NO(12) | 3)
+#define MT8516_PIN_12_EINT12__FUNC_ANT_SEL4 (MTK_PIN_NO(12) | 4)
+#define MT8516_PIN_12_EINT12__FUNC_EXT_TXEN (MTK_PIN_NO(12) | 6)
+#define MT8516_PIN_12_EINT12__FUNC_DBG_MON_A_18 (MTK_PIN_NO(12) | 7)
+
+#define MT8516_PIN_13_EINT13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define MT8516_PIN_13_EINT13__FUNC_TSF_IN (MTK_PIN_NO(13) | 3)
+#define MT8516_PIN_13_EINT13__FUNC_ANT_SEL5 (MTK_PIN_NO(13) | 4)
+#define MT8516_PIN_13_EINT13__FUNC_SPDIF_IN (MTK_PIN_NO(13) | 6)
+#define MT8516_PIN_13_EINT13__FUNC_DBG_MON_A_19 (MTK_PIN_NO(13) | 7)
+
+#define MT8516_PIN_14_EINT14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define MT8516_PIN_14_EINT14__FUNC_I2S_8CH_DO1 (MTK_PIN_NO(14) | 2)
+#define MT8516_PIN_14_EINT14__FUNC_TDM_RX_MCK (MTK_PIN_NO(14) | 3)
+#define MT8516_PIN_14_EINT14__FUNC_ANT_SEL1 (MTK_PIN_NO(14) | 4)
+#define MT8516_PIN_14_EINT14__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(14) | 5)
+#define MT8516_PIN_14_EINT14__FUNC_NCLE (MTK_PIN_NO(14) | 6)
+#define MT8516_PIN_14_EINT14__FUNC_DBG_MON_B_8 (MTK_PIN_NO(14) | 7)
+
+#define MT8516_PIN_15_EINT15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define MT8516_PIN_15_EINT15__FUNC_I2S_8CH_LRCK (MTK_PIN_NO(15) | 2)
+#define MT8516_PIN_15_EINT15__FUNC_TDM_RX_BCK (MTK_PIN_NO(15) | 3)
+#define MT8516_PIN_15_EINT15__FUNC_ANT_SEL2 (MTK_PIN_NO(15) | 4)
+#define MT8516_PIN_15_EINT15__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(15) | 5)
+#define MT8516_PIN_15_EINT15__FUNC_NCEB1 (MTK_PIN_NO(15) | 6)
+#define MT8516_PIN_15_EINT15__FUNC_DBG_MON_B_9 (MTK_PIN_NO(15) | 7)
+
+#define MT8516_PIN_16_EINT16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define MT8516_PIN_16_EINT16__FUNC_I2S_8CH_BCK (MTK_PIN_NO(16) | 2)
+#define MT8516_PIN_16_EINT16__FUNC_TDM_RX_LRCK (MTK_PIN_NO(16) | 3)
+#define MT8516_PIN_16_EINT16__FUNC_ANT_SEL3 (MTK_PIN_NO(16) | 4)
+#define MT8516_PIN_16_EINT16__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(16) | 5)
+#define MT8516_PIN_16_EINT16__FUNC_NCEB0 (MTK_PIN_NO(16) | 6)
+#define MT8516_PIN_16_EINT16__FUNC_DBG_MON_B_10 (MTK_PIN_NO(16) | 7)
+
+#define MT8516_PIN_17_EINT17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define MT8516_PIN_17_EINT17__FUNC_I2S_8CH_MCK (MTK_PIN_NO(17) | 2)
+#define MT8516_PIN_17_EINT17__FUNC_TDM_RX_DI (MTK_PIN_NO(17) | 3)
+#define MT8516_PIN_17_EINT17__FUNC_IDDIG (MTK_PIN_NO(17) | 4)
+#define MT8516_PIN_17_EINT17__FUNC_ANT_SEL4 (MTK_PIN_NO(17) | 5)
+#define MT8516_PIN_17_EINT17__FUNC_NREB (MTK_PIN_NO(17) | 6)
+#define MT8516_PIN_17_EINT17__FUNC_DBG_MON_B_11 (MTK_PIN_NO(17) | 7)
+
+#define MT8516_PIN_18_EINT18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define MT8516_PIN_18_EINT18__FUNC_USB_DRVVBUS (MTK_PIN_NO(18) | 2)
+#define MT8516_PIN_18_EINT18__FUNC_I2S3_LRCK (MTK_PIN_NO(18) | 3)
+#define MT8516_PIN_18_EINT18__FUNC_CLKM1 (MTK_PIN_NO(18) | 4)
+#define MT8516_PIN_18_EINT18__FUNC_ANT_SEL3 (MTK_PIN_NO(18) | 5)
+#define MT8516_PIN_18_EINT18__FUNC_I2S2_BCK (MTK_PIN_NO(18) | 6)
+#define MT8516_PIN_18_EINT18__FUNC_DBG_MON_A_20 (MTK_PIN_NO(18) | 7)
+
+#define MT8516_PIN_19_EINT19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define MT8516_PIN_19_EINT19__FUNC_UCTS1 (MTK_PIN_NO(19) | 1)
+#define MT8516_PIN_19_EINT19__FUNC_IDDIG (MTK_PIN_NO(19) | 2)
+#define MT8516_PIN_19_EINT19__FUNC_I2S3_BCK (MTK_PIN_NO(19) | 3)
+#define MT8516_PIN_19_EINT19__FUNC_CLKM2 (MTK_PIN_NO(19) | 4)
+#define MT8516_PIN_19_EINT19__FUNC_ANT_SEL4 (MTK_PIN_NO(19) | 5)
+#define MT8516_PIN_19_EINT19__FUNC_I2S2_DI (MTK_PIN_NO(19) | 6)
+#define MT8516_PIN_19_EINT19__FUNC_DBG_MON_A_21 (MTK_PIN_NO(19) | 7)
+
+#define MT8516_PIN_20_EINT20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define MT8516_PIN_20_EINT20__FUNC_URTS1 (MTK_PIN_NO(20) | 1)
+#define MT8516_PIN_20_EINT20__FUNC_I2S3_DO (MTK_PIN_NO(20) | 3)
+#define MT8516_PIN_20_EINT20__FUNC_CLKM3 (MTK_PIN_NO(20) | 4)
+#define MT8516_PIN_20_EINT20__FUNC_ANT_SEL5 (MTK_PIN_NO(20) | 5)
+#define MT8516_PIN_20_EINT20__FUNC_I2S2_LRCK (MTK_PIN_NO(20) | 6)
+#define MT8516_PIN_20_EINT20__FUNC_DBG_MON_A_22 (MTK_PIN_NO(20) | 7)
+
+#define MT8516_PIN_21_EINT21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define MT8516_PIN_21_EINT21__FUNC_NRNB (MTK_PIN_NO(21) | 1)
+#define MT8516_PIN_21_EINT21__FUNC_ANT_SEL0 (MTK_PIN_NO(21) | 2)
+#define MT8516_PIN_21_EINT21__FUNC_I2S_8CH_DO4 (MTK_PIN_NO(21) | 3)
+#define MT8516_PIN_21_EINT21__FUNC_DBG_MON_B_31 (MTK_PIN_NO(21) | 7)
+
+#define MT8516_PIN_22_EINT22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define MT8516_PIN_22_EINT22__FUNC_I2S_8CH_DO2 (MTK_PIN_NO(22) | 2)
+#define MT8516_PIN_22_EINT22__FUNC_TSF_IN (MTK_PIN_NO(22) | 3)
+#define MT8516_PIN_22_EINT22__FUNC_USB_DRVVBUS (MTK_PIN_NO(22) | 4)
+#define MT8516_PIN_22_EINT22__FUNC_SPDIF_OUT (MTK_PIN_NO(22) | 5)
+#define MT8516_PIN_22_EINT22__FUNC_NRE_C (MTK_PIN_NO(22) | 6)
+#define MT8516_PIN_22_EINT22__FUNC_DBG_MON_B_12 (MTK_PIN_NO(22) | 7)
+
+#define MT8516_PIN_23_EINT23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define MT8516_PIN_23_EINT23__FUNC_I2S_8CH_DO3 (MTK_PIN_NO(23) | 2)
+#define MT8516_PIN_23_EINT23__FUNC_CLKM0 (MTK_PIN_NO(23) | 3)
+#define MT8516_PIN_23_EINT23__FUNC_IR (MTK_PIN_NO(23) | 4)
+#define MT8516_PIN_23_EINT23__FUNC_SPDIF_IN (MTK_PIN_NO(23) | 5)
+#define MT8516_PIN_23_EINT23__FUNC_NDQS_C (MTK_PIN_NO(23) | 6)
+#define MT8516_PIN_23_EINT23__FUNC_DBG_MON_B_13 (MTK_PIN_NO(23) | 7)
+
+#define MT8516_PIN_24_EINT24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define MT8516_PIN_24_EINT24__FUNC_ANT_SEL1 (MTK_PIN_NO(24) | 3)
+#define MT8516_PIN_24_EINT24__FUNC_UCTS2 (MTK_PIN_NO(24) | 4)
+#define MT8516_PIN_24_EINT24__FUNC_PWM_A (MTK_PIN_NO(24) | 5)
+#define MT8516_PIN_24_EINT24__FUNC_I2S0_MCK (MTK_PIN_NO(24) | 6)
+#define MT8516_PIN_24_EINT24__FUNC_DBG_MON_A_0 (MTK_PIN_NO(24) | 7)
+
+#define MT8516_PIN_25_EINT25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define MT8516_PIN_25_EINT25__FUNC_ANT_SEL0 (MTK_PIN_NO(25) | 3)
+#define MT8516_PIN_25_EINT25__FUNC_URTS2 (MTK_PIN_NO(25) | 4)
+#define MT8516_PIN_25_EINT25__FUNC_PWM_B (MTK_PIN_NO(25) | 5)
+#define MT8516_PIN_25_EINT25__FUNC_I2S_8CH_MCK (MTK_PIN_NO(25) | 6)
+#define MT8516_PIN_25_EINT25__FUNC_DBG_MON_A_1 (MTK_PIN_NO(25) | 7)
+
+#define MT8516_PIN_26_PWRAP_SPI0_MI__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define MT8516_PIN_26_PWRAP_SPI0_MI__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(26) | 1)
+#define MT8516_PIN_26_PWRAP_SPI0_MI__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(26) | 2)
+
+#define MT8516_PIN_27_PWRAP_SPI0_MO__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define MT8516_PIN_27_PWRAP_SPI0_MO__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(27) | 1)
+#define MT8516_PIN_27_PWRAP_SPI0_MO__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(27) | 2)
+
+#define MT8516_PIN_28_PWRAP_INT__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define MT8516_PIN_28_PWRAP_INT__FUNC_I2S0_MCK (MTK_PIN_NO(28) | 1)
+#define MT8516_PIN_28_PWRAP_INT__FUNC_I2S_8CH_MCK (MTK_PIN_NO(28) | 4)
+#define MT8516_PIN_28_PWRAP_INT__FUNC_I2S2_MCK (MTK_PIN_NO(28) | 5)
+#define MT8516_PIN_28_PWRAP_INT__FUNC_I2S3_MCK (MTK_PIN_NO(28) | 6)
+
+#define MT8516_PIN_29_PWRAP_SPI0_CK__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define MT8516_PIN_29_PWRAP_SPI0_CK__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(29) | 1)
+
+#define MT8516_PIN_30_PWRAP_SPI0_CSN__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define MT8516_PIN_30_PWRAP_SPI0_CSN__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(30) | 1)
+
+#define MT8516_PIN_31_RTC32K_CK__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define MT8516_PIN_31_RTC32K_CK__FUNC_RTC32K_CK (MTK_PIN_NO(31) | 1)
+
+#define MT8516_PIN_32_WATCHDOG__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define MT8516_PIN_32_WATCHDOG__FUNC_WATCHDOG (MTK_PIN_NO(32) | 1)
+
+#define MT8516_PIN_33_SRCLKENA__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define MT8516_PIN_33_SRCLKENA__FUNC_SRCLKENA0 (MTK_PIN_NO(33) | 1)
+
+#define MT8516_PIN_34_URXD2__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define MT8516_PIN_34_URXD2__FUNC_URXD2 (MTK_PIN_NO(34) | 1)
+#define MT8516_PIN_34_URXD2__FUNC_UTXD2 (MTK_PIN_NO(34) | 3)
+#define MT8516_PIN_34_URXD2__FUNC_DBG_SCL (MTK_PIN_NO(34) | 4)
+#define MT8516_PIN_34_URXD2__FUNC_I2S2_MCK (MTK_PIN_NO(34) | 6)
+#define MT8516_PIN_34_URXD2__FUNC_DBG_MON_B_0 (MTK_PIN_NO(34) | 7)
+
+#define MT8516_PIN_35_UTXD2__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define MT8516_PIN_35_UTXD2__FUNC_UTXD2 (MTK_PIN_NO(35) | 1)
+#define MT8516_PIN_35_UTXD2__FUNC_URXD2 (MTK_PIN_NO(35) | 3)
+#define MT8516_PIN_35_UTXD2__FUNC_DBG_SDA (MTK_PIN_NO(35) | 4)
+#define MT8516_PIN_35_UTXD2__FUNC_I2S3_MCK (MTK_PIN_NO(35) | 6)
+#define MT8516_PIN_35_UTXD2__FUNC_DBG_MON_B_1 (MTK_PIN_NO(35) | 7)
+
+#define MT8516_PIN_36_MRG_CLK__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define MT8516_PIN_36_MRG_CLK__FUNC_MRG_CLK (MTK_PIN_NO(36) | 1)
+#define MT8516_PIN_36_MRG_CLK__FUNC_I2S0_BCK (MTK_PIN_NO(36) | 3)
+#define MT8516_PIN_36_MRG_CLK__FUNC_I2S3_BCK (MTK_PIN_NO(36) | 4)
+#define MT8516_PIN_36_MRG_CLK__FUNC_PCM0_CLK (MTK_PIN_NO(36) | 5)
+#define MT8516_PIN_36_MRG_CLK__FUNC_IR (MTK_PIN_NO(36) | 6)
+#define MT8516_PIN_36_MRG_CLK__FUNC_DBG_MON_A_2 (MTK_PIN_NO(36) | 7)
+
+#define MT8516_PIN_37_MRG_SYNC__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define MT8516_PIN_37_MRG_SYNC__FUNC_MRG_SYNC (MTK_PIN_NO(37) | 1)
+#define MT8516_PIN_37_MRG_SYNC__FUNC_I2S0_LRCK (MTK_PIN_NO(37) | 3)
+#define MT8516_PIN_37_MRG_SYNC__FUNC_I2S3_LRCK (MTK_PIN_NO(37) | 4)
+#define MT8516_PIN_37_MRG_SYNC__FUNC_PCM0_SYNC (MTK_PIN_NO(37) | 5)
+#define MT8516_PIN_37_MRG_SYNC__FUNC_EXT_COL (MTK_PIN_NO(37) | 6)
+#define MT8516_PIN_37_MRG_SYNC__FUNC_DBG_MON_A_3 (MTK_PIN_NO(37) | 7)
+
+#define MT8516_PIN_38_MRG_DI__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define MT8516_PIN_38_MRG_DI__FUNC_MRG_DI (MTK_PIN_NO(38) | 1)
+#define MT8516_PIN_38_MRG_DI__FUNC_I2S0_DI (MTK_PIN_NO(38) | 3)
+#define MT8516_PIN_38_MRG_DI__FUNC_I2S3_DO (MTK_PIN_NO(38) | 4)
+#define MT8516_PIN_38_MRG_DI__FUNC_PCM0_DI (MTK_PIN_NO(38) | 5)
+#define MT8516_PIN_38_MRG_DI__FUNC_EXT_MDIO (MTK_PIN_NO(38) | 6)
+#define MT8516_PIN_38_MRG_DI__FUNC_DBG_MON_A_4 (MTK_PIN_NO(38) | 7)
+
+#define MT8516_PIN_39_MRG_DO__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define MT8516_PIN_39_MRG_DO__FUNC_MRG_DO (MTK_PIN_NO(39) | 1)
+#define MT8516_PIN_39_MRG_DO__FUNC_I2S0_MCK (MTK_PIN_NO(39) | 3)
+#define MT8516_PIN_39_MRG_DO__FUNC_I2S3_MCK (MTK_PIN_NO(39) | 4)
+#define MT8516_PIN_39_MRG_DO__FUNC_PCM0_DO (MTK_PIN_NO(39) | 5)
+#define MT8516_PIN_39_MRG_DO__FUNC_EXT_MDC (MTK_PIN_NO(39) | 6)
+#define MT8516_PIN_39_MRG_DO__FUNC_DBG_MON_A_5 (MTK_PIN_NO(39) | 7)
+
+#define MT8516_PIN_40_KPROW0__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define MT8516_PIN_40_KPROW0__FUNC_KPROW0 (MTK_PIN_NO(40) | 1)
+#define MT8516_PIN_40_KPROW0__FUNC_DBG_MON_B_4 (MTK_PIN_NO(40) | 7)
+
+#define MT8516_PIN_41_KPROW1__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define MT8516_PIN_41_KPROW1__FUNC_KPROW1 (MTK_PIN_NO(41) | 1)
+#define MT8516_PIN_41_KPROW1__FUNC_IDDIG (MTK_PIN_NO(41) | 2)
+#define MT8516_PIN_41_KPROW1__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(41) | 3)
+#define MT8516_PIN_41_KPROW1__FUNC_DBG_MON_B_5 (MTK_PIN_NO(41) | 7)
+
+#define MT8516_PIN_42_KPCOL0__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define MT8516_PIN_42_KPCOL0__FUNC_KPCOL0 (MTK_PIN_NO(42) | 1)
+#define MT8516_PIN_42_KPCOL0__FUNC_DBG_MON_B_6 (MTK_PIN_NO(42) | 7)
+
+#define MT8516_PIN_43_KPCOL1__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define MT8516_PIN_43_KPCOL1__FUNC_KPCOL1 (MTK_PIN_NO(43) | 1)
+#define MT8516_PIN_43_KPCOL1__FUNC_USB_DRVVBUS (MTK_PIN_NO(43) | 2)
+#define MT8516_PIN_43_KPCOL1__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(43) | 3)
+#define MT8516_PIN_43_KPCOL1__FUNC_TSF_IN (MTK_PIN_NO(43) | 4)
+#define MT8516_PIN_43_KPCOL1__FUNC_DBG_MON_B_7 (MTK_PIN_NO(43) | 7)
+
+#define MT8516_PIN_44_JTMS__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define MT8516_PIN_44_JTMS__FUNC_JTMS (MTK_PIN_NO(44) | 1)
+#define MT8516_PIN_44_JTMS__FUNC_CONN_MCU_TMS (MTK_PIN_NO(44) | 2)
+#define MT8516_PIN_44_JTMS__FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(44) | 3)
+#define MT8516_PIN_44_JTMS__FUNC_DFD_TMS_XI (MTK_PIN_NO(44) | 5)
+#define MT8516_PIN_44_JTMS__FUNC_UDI_TMS_XI (MTK_PIN_NO(44) | 6)
+
+#define MT8516_PIN_45_JTCK__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define MT8516_PIN_45_JTCK__FUNC_JTCK (MTK_PIN_NO(45) | 1)
+#define MT8516_PIN_45_JTCK__FUNC_CONN_MCU_TCK (MTK_PIN_NO(45) | 2)
+#define MT8516_PIN_45_JTCK__FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(45) | 3)
+
+#define MT8516_PIN_46_JTDI__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define MT8516_PIN_46_JTDI__FUNC_JTDI (MTK_PIN_NO(46) | 1)
+#define MT8516_PIN_46_JTDI__FUNC_CONN_MCU_TDI (MTK_PIN_NO(46) | 2)
+
+#define MT8516_PIN_47_JTDO__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define MT8516_PIN_47_JTDO__FUNC_JTDO (MTK_PIN_NO(47) | 1)
+#define MT8516_PIN_47_JTDO__FUNC_CONN_MCU_TDO (MTK_PIN_NO(47) | 2)
+
+#define MT8516_PIN_48_SPI_CS__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define MT8516_PIN_48_SPI_CS__FUNC_SPI_CSB (MTK_PIN_NO(48) | 1)
+#define MT8516_PIN_48_SPI_CS__FUNC_I2S0_DI (MTK_PIN_NO(48) | 3)
+#define MT8516_PIN_48_SPI_CS__FUNC_I2S2_BCK (MTK_PIN_NO(48) | 4)
+#define MT8516_PIN_48_SPI_CS__FUNC_DBG_MON_A_23 (MTK_PIN_NO(48) | 7)
+
+#define MT8516_PIN_49_SPI_CK__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define MT8516_PIN_49_SPI_CK__FUNC_SPI_CLK (MTK_PIN_NO(49) | 1)
+#define MT8516_PIN_49_SPI_CK__FUNC_I2S0_LRCK (MTK_PIN_NO(49) | 3)
+#define MT8516_PIN_49_SPI_CK__FUNC_I2S2_DI (MTK_PIN_NO(49) | 4)
+#define MT8516_PIN_49_SPI_CK__FUNC_DBG_MON_A_24 (MTK_PIN_NO(49) | 7)
+
+#define MT8516_PIN_50_SPI_MI__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define MT8516_PIN_50_SPI_MI__FUNC_SPI_MI (MTK_PIN_NO(50) | 1)
+#define MT8516_PIN_50_SPI_MI__FUNC_SPI_MO (MTK_PIN_NO(50) | 2)
+#define MT8516_PIN_50_SPI_MI__FUNC_I2S0_BCK (MTK_PIN_NO(50) | 3)
+#define MT8516_PIN_50_SPI_MI__FUNC_I2S2_LRCK (MTK_PIN_NO(50) | 4)
+#define MT8516_PIN_50_SPI_MI__FUNC_DBG_MON_A_25 (MTK_PIN_NO(50) | 7)
+
+#define MT8516_PIN_51_SPI_MO__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define MT8516_PIN_51_SPI_MO__FUNC_SPI_MO (MTK_PIN_NO(51) | 1)
+#define MT8516_PIN_51_SPI_MO__FUNC_SPI_MI (MTK_PIN_NO(51) | 2)
+#define MT8516_PIN_51_SPI_MO__FUNC_I2S0_MCK (MTK_PIN_NO(51) | 3)
+#define MT8516_PIN_51_SPI_MO__FUNC_I2S2_MCK (MTK_PIN_NO(51) | 4)
+#define MT8516_PIN_51_SPI_MO__FUNC_DBG_MON_A_26 (MTK_PIN_NO(51) | 7)
+
+#define MT8516_PIN_52_SDA1__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define MT8516_PIN_52_SDA1__FUNC_SDA1_0 (MTK_PIN_NO(52) | 1)
+
+#define MT8516_PIN_53_SCL1__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define MT8516_PIN_53_SCL1__FUNC_SCL1_0 (MTK_PIN_NO(53) | 1)
+
+#define MT8516_PIN_54_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define MT8516_PIN_54_GPIO54__FUNC_PWM_B (MTK_PIN_NO(54) | 2)
+#define MT8516_PIN_54_GPIO54__FUNC_DBG_MON_B_2 (MTK_PIN_NO(54) | 7)
+
+#define MT8516_PIN_55_I2S_DATA_IN__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define MT8516_PIN_55_I2S_DATA_IN__FUNC_I2S0_DI (MTK_PIN_NO(55) | 1)
+#define MT8516_PIN_55_I2S_DATA_IN__FUNC_UCTS0 (MTK_PIN_NO(55) | 2)
+#define MT8516_PIN_55_I2S_DATA_IN__FUNC_I2S3_DO (MTK_PIN_NO(55) | 3)
+#define MT8516_PIN_55_I2S_DATA_IN__FUNC_I2S_8CH_DO1 (MTK_PIN_NO(55) | 4)
+#define MT8516_PIN_55_I2S_DATA_IN__FUNC_PWM_A (MTK_PIN_NO(55) | 5)
+#define MT8516_PIN_55_I2S_DATA_IN__FUNC_I2S2_BCK (MTK_PIN_NO(55) | 6)
+#define MT8516_PIN_55_I2S_DATA_IN__FUNC_DBG_MON_A_28 (MTK_PIN_NO(55) | 7)
+
+#define MT8516_PIN_56_I2S_LRCK__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define MT8516_PIN_56_I2S_LRCK__FUNC_I2S0_LRCK (MTK_PIN_NO(56) | 1)
+#define MT8516_PIN_56_I2S_LRCK__FUNC_I2S3_LRCK (MTK_PIN_NO(56) | 3)
+#define MT8516_PIN_56_I2S_LRCK__FUNC_I2S_8CH_LRCK (MTK_PIN_NO(56) | 4)
+#define MT8516_PIN_56_I2S_LRCK__FUNC_PWM_B (MTK_PIN_NO(56) | 5)
+#define MT8516_PIN_56_I2S_LRCK__FUNC_I2S2_DI (MTK_PIN_NO(56) | 6)
+#define MT8516_PIN_56_I2S_LRCK__FUNC_DBG_MON_A_29 (MTK_PIN_NO(56) | 7)
+
+#define MT8516_PIN_57_I2S_BCK__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define MT8516_PIN_57_I2S_BCK__FUNC_I2S0_BCK (MTK_PIN_NO(57) | 1)
+#define MT8516_PIN_57_I2S_BCK__FUNC_URTS0 (MTK_PIN_NO(57) | 2)
+#define MT8516_PIN_57_I2S_BCK__FUNC_I2S3_BCK (MTK_PIN_NO(57) | 3)
+#define MT8516_PIN_57_I2S_BCK__FUNC_I2S_8CH_BCK (MTK_PIN_NO(57) | 4)
+#define MT8516_PIN_57_I2S_BCK__FUNC_PWM_C (MTK_PIN_NO(57) | 5)
+#define MT8516_PIN_57_I2S_BCK__FUNC_I2S2_LRCK (MTK_PIN_NO(57) | 6)
+#define MT8516_PIN_57_I2S_BCK__FUNC_DBG_MON_A_30 (MTK_PIN_NO(57) | 7)
+
+#define MT8516_PIN_58_SDA0__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define MT8516_PIN_58_SDA0__FUNC_SDA0_0 (MTK_PIN_NO(58) | 1)
+
+#define MT8516_PIN_59_SCL0__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define MT8516_PIN_59_SCL0__FUNC_SCL0_0 (MTK_PIN_NO(59) | 1)
+
+#define MT8516_PIN_60_SDA2__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define MT8516_PIN_60_SDA2__FUNC_SDA2_0 (MTK_PIN_NO(60) | 1)
+#define MT8516_PIN_60_SDA2__FUNC_PWM_B (MTK_PIN_NO(60) | 2)
+
+#define MT8516_PIN_61_SCL2__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define MT8516_PIN_61_SCL2__FUNC_SCL2_0 (MTK_PIN_NO(61) | 1)
+#define MT8516_PIN_61_SCL2__FUNC_PWM_C (MTK_PIN_NO(61) | 2)
+
+#define MT8516_PIN_62_URXD0__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define MT8516_PIN_62_URXD0__FUNC_URXD0 (MTK_PIN_NO(62) | 1)
+#define MT8516_PIN_62_URXD0__FUNC_UTXD0 (MTK_PIN_NO(62) | 2)
+
+#define MT8516_PIN_63_UTXD0__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define MT8516_PIN_63_UTXD0__FUNC_UTXD0 (MTK_PIN_NO(63) | 1)
+#define MT8516_PIN_63_UTXD0__FUNC_URXD0 (MTK_PIN_NO(63) | 2)
+
+#define MT8516_PIN_64_URXD1__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define MT8516_PIN_64_URXD1__FUNC_URXD1 (MTK_PIN_NO(64) | 1)
+#define MT8516_PIN_64_URXD1__FUNC_UTXD1 (MTK_PIN_NO(64) | 2)
+#define MT8516_PIN_64_URXD1__FUNC_DBG_MON_A_27 (MTK_PIN_NO(64) | 7)
+
+#define MT8516_PIN_65_UTXD1__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define MT8516_PIN_65_UTXD1__FUNC_UTXD1 (MTK_PIN_NO(65) | 1)
+#define MT8516_PIN_65_UTXD1__FUNC_URXD1 (MTK_PIN_NO(65) | 2)
+#define MT8516_PIN_65_UTXD1__FUNC_DBG_MON_A_31 (MTK_PIN_NO(65) | 7)
+
+#define MT8516_PIN_68_MSDC2_CMD__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define MT8516_PIN_68_MSDC2_CMD__FUNC_MSDC2_CMD (MTK_PIN_NO(68) | 1)
+#define MT8516_PIN_68_MSDC2_CMD__FUNC_I2S_8CH_DO4 (MTK_PIN_NO(68) | 2)
+#define MT8516_PIN_68_MSDC2_CMD__FUNC_SDA1_0 (MTK_PIN_NO(68) | 3)
+#define MT8516_PIN_68_MSDC2_CMD__FUNC_USB_SDA (MTK_PIN_NO(68) | 5)
+#define MT8516_PIN_68_MSDC2_CMD__FUNC_I2S3_BCK (MTK_PIN_NO(68) | 6)
+#define MT8516_PIN_68_MSDC2_CMD__FUNC_DBG_MON_B_15 (MTK_PIN_NO(68) | 7)
+
+#define MT8516_PIN_69_MSDC2_CLK__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define MT8516_PIN_69_MSDC2_CLK__FUNC_MSDC2_CLK (MTK_PIN_NO(69) | 1)
+#define MT8516_PIN_69_MSDC2_CLK__FUNC_I2S_8CH_DO3 (MTK_PIN_NO(69) | 2)
+#define MT8516_PIN_69_MSDC2_CLK__FUNC_SCL1_0 (MTK_PIN_NO(69) | 3)
+#define MT8516_PIN_69_MSDC2_CLK__FUNC_USB_SCL (MTK_PIN_NO(69) | 5)
+#define MT8516_PIN_69_MSDC2_CLK__FUNC_I2S3_LRCK (MTK_PIN_NO(69) | 6)
+#define MT8516_PIN_69_MSDC2_CLK__FUNC_DBG_MON_B_16 (MTK_PIN_NO(69) | 7)
+
+#define MT8516_PIN_70_MSDC2_DAT0__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define MT8516_PIN_70_MSDC2_DAT0__FUNC_MSDC2_DAT0 (MTK_PIN_NO(70) | 1)
+#define MT8516_PIN_70_MSDC2_DAT0__FUNC_I2S_8CH_DO2 (MTK_PIN_NO(70) | 2)
+#define MT8516_PIN_70_MSDC2_DAT0__FUNC_UTXD0 (MTK_PIN_NO(70) | 5)
+#define MT8516_PIN_70_MSDC2_DAT0__FUNC_I2S3_DO (MTK_PIN_NO(70) | 6)
+#define MT8516_PIN_70_MSDC2_DAT0__FUNC_DBG_MON_B_17 (MTK_PIN_NO(70) | 7)
+
+#define MT8516_PIN_71_MSDC2_DAT1__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define MT8516_PIN_71_MSDC2_DAT1__FUNC_MSDC2_DAT1 (MTK_PIN_NO(71) | 1)
+#define MT8516_PIN_71_MSDC2_DAT1__FUNC_I2S_8CH_DO1 (MTK_PIN_NO(71) | 2)
+#define MT8516_PIN_71_MSDC2_DAT1__FUNC_PWM_A (MTK_PIN_NO(71) | 3)
+#define MT8516_PIN_71_MSDC2_DAT1__FUNC_I2S3_MCK (MTK_PIN_NO(71) | 4)
+#define MT8516_PIN_71_MSDC2_DAT1__FUNC_URXD0 (MTK_PIN_NO(71) | 5)
+#define MT8516_PIN_71_MSDC2_DAT1__FUNC_PWM_B (MTK_PIN_NO(71) | 6)
+#define MT8516_PIN_71_MSDC2_DAT1__FUNC_DBG_MON_B_18 (MTK_PIN_NO(71) | 7)
+
+#define MT8516_PIN_72_MSDC2_DAT2__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define MT8516_PIN_72_MSDC2_DAT2__FUNC_MSDC2_DAT2 (MTK_PIN_NO(72) | 1)
+#define MT8516_PIN_72_MSDC2_DAT2__FUNC_I2S_8CH_LRCK (MTK_PIN_NO(72) | 2)
+#define MT8516_PIN_72_MSDC2_DAT2__FUNC_SDA2_0 (MTK_PIN_NO(72) | 3)
+#define MT8516_PIN_72_MSDC2_DAT2__FUNC_UTXD1 (MTK_PIN_NO(72) | 5)
+#define MT8516_PIN_72_MSDC2_DAT2__FUNC_PWM_C (MTK_PIN_NO(72) | 6)
+#define MT8516_PIN_72_MSDC2_DAT2__FUNC_DBG_MON_B_19 (MTK_PIN_NO(72) | 7)
+
+#define MT8516_PIN_73_MSDC2_DAT3__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define MT8516_PIN_73_MSDC2_DAT3__FUNC_MSDC2_DAT3 (MTK_PIN_NO(73) | 1)
+#define MT8516_PIN_73_MSDC2_DAT3__FUNC_I2S_8CH_BCK (MTK_PIN_NO(73) | 2)
+#define MT8516_PIN_73_MSDC2_DAT3__FUNC_SCL2_0 (MTK_PIN_NO(73) | 3)
+#define MT8516_PIN_73_MSDC2_DAT3__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(73) | 4)
+#define MT8516_PIN_73_MSDC2_DAT3__FUNC_URXD1 (MTK_PIN_NO(73) | 5)
+#define MT8516_PIN_73_MSDC2_DAT3__FUNC_PWM_A (MTK_PIN_NO(73) | 6)
+#define MT8516_PIN_73_MSDC2_DAT3__FUNC_DBG_MON_B_20 (MTK_PIN_NO(73) | 7)
+
+#define MT8516_PIN_74_TDN3__FUNC_GPI74 (MTK_PIN_NO(74) | 0)
+#define MT8516_PIN_74_TDN3__FUNC_TDN3 (MTK_PIN_NO(74) | 1)
+
+#define MT8516_PIN_75_TDP3__FUNC_GPI75 (MTK_PIN_NO(75) | 0)
+#define MT8516_PIN_75_TDP3__FUNC_TDP3 (MTK_PIN_NO(75) | 1)
+
+#define MT8516_PIN_76_TDN2__FUNC_GPI76 (MTK_PIN_NO(76) | 0)
+#define MT8516_PIN_76_TDN2__FUNC_TDN2 (MTK_PIN_NO(76) | 1)
+
+#define MT8516_PIN_77_TDP2__FUNC_GPI77 (MTK_PIN_NO(77) | 0)
+#define MT8516_PIN_77_TDP2__FUNC_TDP2 (MTK_PIN_NO(77) | 1)
+
+#define MT8516_PIN_78_TCN__FUNC_GPI78 (MTK_PIN_NO(78) | 0)
+#define MT8516_PIN_78_TCN__FUNC_TCN (MTK_PIN_NO(78) | 1)
+
+#define MT8516_PIN_79_TCP__FUNC_GPI79 (MTK_PIN_NO(79) | 0)
+#define MT8516_PIN_79_TCP__FUNC_TCP (MTK_PIN_NO(79) | 1)
+
+#define MT8516_PIN_80_TDN1__FUNC_GPI80 (MTK_PIN_NO(80) | 0)
+#define MT8516_PIN_80_TDN1__FUNC_TDN1 (MTK_PIN_NO(80) | 1)
+
+#define MT8516_PIN_81_TDP1__FUNC_GPI81 (MTK_PIN_NO(81) | 0)
+#define MT8516_PIN_81_TDP1__FUNC_TDP1 (MTK_PIN_NO(81) | 1)
+
+#define MT8516_PIN_82_TDN0__FUNC_GPI82 (MTK_PIN_NO(82) | 0)
+#define MT8516_PIN_82_TDN0__FUNC_TDN0 (MTK_PIN_NO(82) | 1)
+
+#define MT8516_PIN_83_TDP0__FUNC_GPI83 (MTK_PIN_NO(83) | 0)
+#define MT8516_PIN_83_TDP0__FUNC_TDP0 (MTK_PIN_NO(83) | 1)
+
+#define MT8516_PIN_84_RDN0__FUNC_GPI84 (MTK_PIN_NO(84) | 0)
+#define MT8516_PIN_84_RDN0__FUNC_RDN0 (MTK_PIN_NO(84) | 1)
+
+#define MT8516_PIN_85_RDP0__FUNC_GPI85 (MTK_PIN_NO(85) | 0)
+#define MT8516_PIN_85_RDP0__FUNC_RDP0 (MTK_PIN_NO(85) | 1)
+
+#define MT8516_PIN_86_RDN1__FUNC_GPI86 (MTK_PIN_NO(86) | 0)
+#define MT8516_PIN_86_RDN1__FUNC_RDN1 (MTK_PIN_NO(86) | 1)
+
+#define MT8516_PIN_87_RDP1__FUNC_GPI87 (MTK_PIN_NO(87) | 0)
+#define MT8516_PIN_87_RDP1__FUNC_RDP1 (MTK_PIN_NO(87) | 1)
+
+#define MT8516_PIN_88_RCN__FUNC_GPI88 (MTK_PIN_NO(88) | 0)
+#define MT8516_PIN_88_RCN__FUNC_RCN (MTK_PIN_NO(88) | 1)
+
+#define MT8516_PIN_89_RCP__FUNC_GPI89 (MTK_PIN_NO(89) | 0)
+#define MT8516_PIN_89_RCP__FUNC_RCP (MTK_PIN_NO(89) | 1)
+
+#define MT8516_PIN_90_RDN2__FUNC_GPI90 (MTK_PIN_NO(90) | 0)
+#define MT8516_PIN_90_RDN2__FUNC_RDN2 (MTK_PIN_NO(90) | 1)
+#define MT8516_PIN_90_RDN2__FUNC_CMDAT8 (MTK_PIN_NO(90) | 2)
+
+#define MT8516_PIN_91_RDP2__FUNC_GPI91 (MTK_PIN_NO(91) | 0)
+#define MT8516_PIN_91_RDP2__FUNC_RDP2 (MTK_PIN_NO(91) | 1)
+#define MT8516_PIN_91_RDP2__FUNC_CMDAT9 (MTK_PIN_NO(91) | 2)
+
+#define MT8516_PIN_92_RDN3__FUNC_GPI92 (MTK_PIN_NO(92) | 0)
+#define MT8516_PIN_92_RDN3__FUNC_RDN3 (MTK_PIN_NO(92) | 1)
+#define MT8516_PIN_92_RDN3__FUNC_CMDAT4 (MTK_PIN_NO(92) | 2)
+
+#define MT8516_PIN_93_RDP3__FUNC_GPI93 (MTK_PIN_NO(93) | 0)
+#define MT8516_PIN_93_RDP3__FUNC_RDP3 (MTK_PIN_NO(93) | 1)
+#define MT8516_PIN_93_RDP3__FUNC_CMDAT5 (MTK_PIN_NO(93) | 2)
+
+#define MT8516_PIN_94_RCN_A__FUNC_GPI94 (MTK_PIN_NO(94) | 0)
+#define MT8516_PIN_94_RCN_A__FUNC_RCN_A (MTK_PIN_NO(94) | 1)
+#define MT8516_PIN_94_RCN_A__FUNC_CMDAT6 (MTK_PIN_NO(94) | 2)
+
+#define MT8516_PIN_95_RCP_A__FUNC_GPI95 (MTK_PIN_NO(95) | 0)
+#define MT8516_PIN_95_RCP_A__FUNC_RCP_A (MTK_PIN_NO(95) | 1)
+#define MT8516_PIN_95_RCP_A__FUNC_CMDAT7 (MTK_PIN_NO(95) | 2)
+
+#define MT8516_PIN_96_RDN1_A__FUNC_GPI96 (MTK_PIN_NO(96) | 0)
+#define MT8516_PIN_96_RDN1_A__FUNC_RDN1_A (MTK_PIN_NO(96) | 1)
+#define MT8516_PIN_96_RDN1_A__FUNC_CMDAT2 (MTK_PIN_NO(96) | 2)
+#define MT8516_PIN_96_RDN1_A__FUNC_CMCSD2 (MTK_PIN_NO(96) | 3)
+
+#define MT8516_PIN_97_RDP1_A__FUNC_GPI97 (MTK_PIN_NO(97) | 0)
+#define MT8516_PIN_97_RDP1_A__FUNC_RDP1_A (MTK_PIN_NO(97) | 1)
+#define MT8516_PIN_97_RDP1_A__FUNC_CMDAT3 (MTK_PIN_NO(97) | 2)
+#define MT8516_PIN_97_RDP1_A__FUNC_CMCSD3 (MTK_PIN_NO(97) | 3)
+
+#define MT8516_PIN_98_RDN0_A__FUNC_GPI98 (MTK_PIN_NO(98) | 0)
+#define MT8516_PIN_98_RDN0_A__FUNC_RDN0_A (MTK_PIN_NO(98) | 1)
+#define MT8516_PIN_98_RDN0_A__FUNC_CMHSYNC (MTK_PIN_NO(98) | 2)
+
+#define MT8516_PIN_99_RDP0_A__FUNC_GPI99 (MTK_PIN_NO(99) | 0)
+#define MT8516_PIN_99_RDP0_A__FUNC_RDP0_A (MTK_PIN_NO(99) | 1)
+#define MT8516_PIN_99_RDP0_A__FUNC_CMVSYNC (MTK_PIN_NO(99) | 2)
+
+#define MT8516_PIN_100_CMDAT0__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define MT8516_PIN_100_CMDAT0__FUNC_CMDAT0 (MTK_PIN_NO(100) | 1)
+#define MT8516_PIN_100_CMDAT0__FUNC_CMCSD0 (MTK_PIN_NO(100) | 2)
+#define MT8516_PIN_100_CMDAT0__FUNC_ANT_SEL2 (MTK_PIN_NO(100) | 3)
+#define MT8516_PIN_100_CMDAT0__FUNC_TDM_RX_MCK (MTK_PIN_NO(100) | 5)
+#define MT8516_PIN_100_CMDAT0__FUNC_DBG_MON_B_21 (MTK_PIN_NO(100) | 7)
+
+#define MT8516_PIN_101_CMDAT1__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define MT8516_PIN_101_CMDAT1__FUNC_CMDAT1 (MTK_PIN_NO(101) | 1)
+#define MT8516_PIN_101_CMDAT1__FUNC_CMCSD1 (MTK_PIN_NO(101) | 2)
+#define MT8516_PIN_101_CMDAT1__FUNC_ANT_SEL3 (MTK_PIN_NO(101) | 3)
+#define MT8516_PIN_101_CMDAT1__FUNC_CMFLASH (MTK_PIN_NO(101) | 4)
+#define MT8516_PIN_101_CMDAT1__FUNC_TDM_RX_BCK (MTK_PIN_NO(101) | 5)
+#define MT8516_PIN_101_CMDAT1__FUNC_DBG_MON_B_22 (MTK_PIN_NO(101) | 7)
+
+#define MT8516_PIN_102_CMMCLK__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define MT8516_PIN_102_CMMCLK__FUNC_CMMCLK (MTK_PIN_NO(102) | 1)
+#define MT8516_PIN_102_CMMCLK__FUNC_ANT_SEL4 (MTK_PIN_NO(102) | 3)
+#define MT8516_PIN_102_CMMCLK__FUNC_TDM_RX_LRCK (MTK_PIN_NO(102) | 5)
+#define MT8516_PIN_102_CMMCLK__FUNC_DBG_MON_B_23 (MTK_PIN_NO(102) | 7)
+
+#define MT8516_PIN_103_CMPCLK__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define MT8516_PIN_103_CMPCLK__FUNC_CMPCLK (MTK_PIN_NO(103) | 1)
+#define MT8516_PIN_103_CMPCLK__FUNC_CMCSK (MTK_PIN_NO(103) | 2)
+#define MT8516_PIN_103_CMPCLK__FUNC_ANT_SEL5 (MTK_PIN_NO(103) | 3)
+#define MT8516_PIN_103_CMPCLK__FUNC_TDM_RX_DI (MTK_PIN_NO(103) | 5)
+#define MT8516_PIN_103_CMPCLK__FUNC_DBG_MON_B_24 (MTK_PIN_NO(103) | 7)
+
+#define MT8516_PIN_104_MSDC1_CMD__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define MT8516_PIN_104_MSDC1_CMD__FUNC_MSDC1_CMD (MTK_PIN_NO(104) | 1)
+#define MT8516_PIN_104_MSDC1_CMD__FUNC_SQICS (MTK_PIN_NO(104) | 4)
+#define MT8516_PIN_104_MSDC1_CMD__FUNC_DBG_MON_B_25 (MTK_PIN_NO(104) | 7)
+
+#define MT8516_PIN_105_MSDC1_CLK__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define MT8516_PIN_105_MSDC1_CLK__FUNC_MSDC1_CLK (MTK_PIN_NO(105) | 1)
+#define MT8516_PIN_105_MSDC1_CLK__FUNC_SQISO (MTK_PIN_NO(105) | 4)
+#define MT8516_PIN_105_MSDC1_CLK__FUNC_DBG_MON_B_26 (MTK_PIN_NO(105) | 7)
+
+#define MT8516_PIN_106_MSDC1_DAT0__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define MT8516_PIN_106_MSDC1_DAT0__FUNC_MSDC1_DAT0 (MTK_PIN_NO(106) | 1)
+#define MT8516_PIN_106_MSDC1_DAT0__FUNC_SQISI (MTK_PIN_NO(106) | 4)
+#define MT8516_PIN_106_MSDC1_DAT0__FUNC_DBG_MON_B_27 (MTK_PIN_NO(106) | 7)
+
+#define MT8516_PIN_107_MSDC1_DAT1__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define MT8516_PIN_107_MSDC1_DAT1__FUNC_MSDC1_DAT1 (MTK_PIN_NO(107) | 1)
+#define MT8516_PIN_107_MSDC1_DAT1__FUNC_SQIWP (MTK_PIN_NO(107) | 4)
+#define MT8516_PIN_107_MSDC1_DAT1__FUNC_DBG_MON_B_28 (MTK_PIN_NO(107) | 7)
+
+#define MT8516_PIN_108_MSDC1_DAT2__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define MT8516_PIN_108_MSDC1_DAT2__FUNC_MSDC1_DAT2 (MTK_PIN_NO(108) | 1)
+#define MT8516_PIN_108_MSDC1_DAT2__FUNC_SQIRST (MTK_PIN_NO(108) | 4)
+#define MT8516_PIN_108_MSDC1_DAT2__FUNC_DBG_MON_B_29 (MTK_PIN_NO(108) | 7)
+
+#define MT8516_PIN_109_MSDC1_DAT3__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define MT8516_PIN_109_MSDC1_DAT3__FUNC_MSDC1_DAT3 (MTK_PIN_NO(109) | 1)
+#define MT8516_PIN_109_MSDC1_DAT3__FUNC_SQICK (MTK_PIN_NO(109) | 4)
+#define MT8516_PIN_109_MSDC1_DAT3__FUNC_DBG_MON_B_30 (MTK_PIN_NO(109) | 7)
+
+#define MT8516_PIN_110_MSDC0_DAT7__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define MT8516_PIN_110_MSDC0_DAT7__FUNC_MSDC0_DAT7 (MTK_PIN_NO(110) | 1)
+#define MT8516_PIN_110_MSDC0_DAT7__FUNC_NLD7 (MTK_PIN_NO(110) | 4)
+
+#define MT8516_PIN_111_MSDC0_DAT6__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define MT8516_PIN_111_MSDC0_DAT6__FUNC_MSDC0_DAT6 (MTK_PIN_NO(111) | 1)
+#define MT8516_PIN_111_MSDC0_DAT6__FUNC_NLD6 (MTK_PIN_NO(111) | 4)
+
+#define MT8516_PIN_112_MSDC0_DAT5__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define MT8516_PIN_112_MSDC0_DAT5__FUNC_MSDC0_DAT5 (MTK_PIN_NO(112) | 1)
+#define MT8516_PIN_112_MSDC0_DAT5__FUNC_NLD4 (MTK_PIN_NO(112) | 4)
+
+#define MT8516_PIN_113_MSDC0_DAT4__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define MT8516_PIN_113_MSDC0_DAT4__FUNC_MSDC0_DAT4 (MTK_PIN_NO(113) | 1)
+#define MT8516_PIN_113_MSDC0_DAT4__FUNC_NLD3 (MTK_PIN_NO(113) | 4)
+
+#define MT8516_PIN_114_MSDC0_RSTB__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define MT8516_PIN_114_MSDC0_RSTB__FUNC_MSDC0_RSTB (MTK_PIN_NO(114) | 1)
+#define MT8516_PIN_114_MSDC0_RSTB__FUNC_NLD0 (MTK_PIN_NO(114) | 4)
+
+#define MT8516_PIN_115_MSDC0_CMD__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define MT8516_PIN_115_MSDC0_CMD__FUNC_MSDC0_CMD (MTK_PIN_NO(115) | 1)
+#define MT8516_PIN_115_MSDC0_CMD__FUNC_NALE (MTK_PIN_NO(115) | 4)
+
+#define MT8516_PIN_116_MSDC0_CLK__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define MT8516_PIN_116_MSDC0_CLK__FUNC_MSDC0_CLK (MTK_PIN_NO(116) | 1)
+#define MT8516_PIN_116_MSDC0_CLK__FUNC_NWEB (MTK_PIN_NO(116) | 4)
+
+#define MT8516_PIN_117_MSDC0_DAT3__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define MT8516_PIN_117_MSDC0_DAT3__FUNC_MSDC0_DAT3 (MTK_PIN_NO(117) | 1)
+#define MT8516_PIN_117_MSDC0_DAT3__FUNC_NLD1 (MTK_PIN_NO(117) | 4)
+
+#define MT8516_PIN_118_MSDC0_DAT2__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define MT8516_PIN_118_MSDC0_DAT2__FUNC_MSDC0_DAT2 (MTK_PIN_NO(118) | 1)
+#define MT8516_PIN_118_MSDC0_DAT2__FUNC_NLD5 (MTK_PIN_NO(118) | 4)
+
+#define MT8516_PIN_119_MSDC0_DAT1__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define MT8516_PIN_119_MSDC0_DAT1__FUNC_MSDC0_DAT1 (MTK_PIN_NO(119) | 1)
+#define MT8516_PIN_119_MSDC0_DAT1__FUNC_NLD8 (MTK_PIN_NO(119) | 4)
+
+#define MT8516_PIN_120_MSDC0_DAT0__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define MT8516_PIN_120_MSDC0_DAT0__FUNC_MSDC0_DAT0 (MTK_PIN_NO(120) | 1)
+#define MT8516_PIN_120_MSDC0_DAT0__FUNC_WATCHDOG (MTK_PIN_NO(120) | 4)
+#define MT8516_PIN_120_MSDC0_DAT0__FUNC_NLD2 (MTK_PIN_NO(120) | 5)
+
+#endif				/* __DTS_MT8516_PINFUNC_H */
diff --git a/arch/arm64/boot/dts/mediatek/mt8516-pumpkin.dts b/arch/arm64/boot/dts/mediatek/mt8516-pumpkin.dts
new file mode 100644
index 0000000..4bf46e5
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8516-pumpkin.dts
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+/dts-v1/;
+
+#include "mt8516.dtsi"
+#include "pumpkin-common.dtsi"
+#include "pumpkin-emmc-common.dtsi"
+
+/ {
+	model = "Pumpkin MT8516";
+
+	memory@40000000 {
+		device_type = "memory";
+		reg = <0 0x40000000 0 0x40000000>;
+	};
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8516.dtsi b/arch/arm64/boot/dts/mediatek/mt8516.dtsi
new file mode 100644
index 0000000..1aa0152
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8516.dtsi
@@ -0,0 +1,569 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+#include <dt-bindings/clock/mt8516-clk.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/phy/phy.h>
+
+#include "mt8516-pinfunc.h"
+
+/ {
+	compatible = "mediatek,mt8516";
+	interrupt-parent = <&sysirq>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	cluster0_opp: opp_table0 {
+		compatible = "operating-points-v2";
+		opp-shared;
+		opp-598000000 {
+			opp-hz = /bits/ 64 <598000000>;
+			opp-microvolt = <1150000>;
+		};
+		opp-747500000 {
+			opp-hz = /bits/ 64 <747500000>;
+			opp-microvolt = <1150000>;
+		};
+		opp-1040000000 {
+			opp-hz = /bits/ 64 <1040000000>;
+			opp-microvolt = <1200000>;
+		};
+		opp-1196000000 {
+			opp-hz = /bits/ 64 <1196000000>;
+			opp-microvolt = <1250000>;
+		};
+		opp-1300000000 {
+			opp-hz = /bits/ 64 <1300000000>;
+			opp-microvolt = <1300000>;
+		};
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a35";
+			reg = <0x0>;
+			enable-method = "psci";
+			cpu-idle-states = <&CLUSTER_SLEEP_0 &CLUSTER_SLEEP_0>,
+				<&CPU_SLEEP_0_0 &CPU_SLEEP_0_0 &CPU_SLEEP_0_0>;
+			clocks = <&infracfg CLK_IFR_MUX1_SEL>,
+				 <&topckgen CLK_TOP_MAINPLL_D2>,
+				 <&apmixedsys CLK_APMIXED_ARMPLL>;
+			clock-names = "cpu", "intermediate", "armpll";
+			operating-points-v2 = <&cluster0_opp>;
+		};
+
+		cpu1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a35";
+			reg = <0x1>;
+			enable-method = "psci";
+			cpu-idle-states = <&CLUSTER_SLEEP_0 &CLUSTER_SLEEP_0>,
+				<&CPU_SLEEP_0_0 &CPU_SLEEP_0_0 &CPU_SLEEP_0_0>;
+			clocks = <&infracfg CLK_IFR_MUX1_SEL>,
+				 <&topckgen CLK_TOP_MAINPLL_D2>,
+				 <&apmixedsys CLK_APMIXED_ARMPLL>;
+			clock-names = "cpu", "intermediate", "armpll";
+			operating-points-v2 = <&cluster0_opp>;
+		};
+
+		cpu2: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a35";
+			reg = <0x2>;
+			enable-method = "psci";
+			cpu-idle-states = <&CLUSTER_SLEEP_0 &CLUSTER_SLEEP_0>,
+				<&CPU_SLEEP_0_0 &CPU_SLEEP_0_0 &CPU_SLEEP_0_0>;
+			clocks = <&infracfg CLK_IFR_MUX1_SEL>,
+				 <&topckgen CLK_TOP_MAINPLL_D2>,
+				 <&apmixedsys CLK_APMIXED_ARMPLL>;
+			clock-names = "cpu", "intermediate", "armpll";
+			operating-points-v2 = <&cluster0_opp>;
+		};
+
+		cpu3: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a35";
+			reg = <0x3>;
+			enable-method = "psci";
+			cpu-idle-states = <&CLUSTER_SLEEP_0 &CLUSTER_SLEEP_0>,
+				<&CPU_SLEEP_0_0 &CPU_SLEEP_0_0 &CPU_SLEEP_0_0>;
+			clocks = <&infracfg CLK_IFR_MUX1_SEL>,
+				 <&topckgen CLK_TOP_MAINPLL_D2>,
+				 <&apmixedsys CLK_APMIXED_ARMPLL>;
+			clock-names = "cpu", "intermediate", "armpll";
+			operating-points-v2 = <&cluster0_opp>;
+		};
+
+		idle-states {
+			entry-method = "psci";
+
+			CPU_SLEEP_0_0: cpu-sleep-0-0 {
+				compatible = "arm,idle-state";
+				entry-latency-us = <600>;
+				exit-latency-us = <600>;
+				min-residency-us = <1200>;
+				arm,psci-suspend-param = <0x0010000>;
+			};
+
+			CLUSTER_SLEEP_0: cluster-sleep-0 {
+				compatible = "arm,idle-state";
+				entry-latency-us = <800>;
+				exit-latency-us = <1000>;
+				min-residency-us = <2000>;
+				arm,psci-suspend-param = <0x2010000>;
+			};
+		};
+	};
+
+	psci {
+		compatible = "arm,psci-1.0", "arm,psci-0.2", "arm,psci";
+		method      = "smc";
+	};
+
+	clk26m: clk26m {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <26000000>;
+		clock-output-names = "clk26m";
+	};
+
+	clk32k: clk32k {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <32000>;
+		clock-output-names = "clk32k";
+	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		/* 128 KiB reserved for ARM Trusted Firmware (BL31) */
+		bl31_secmon_reserved: secmon@43000000 {
+			no-map;
+			reg = <0 0x43000000 0 0x20000>;
+		};
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_PPI 13
+			     (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 14
+			     (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 11
+			     (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 10
+			     (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+	};
+
+
+	pmu {
+		compatible = "arm,armv8-pmuv3";
+		interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 5 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 6 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
+		interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
+	};
+
+	soc {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		compatible = "simple-bus";
+		ranges;
+
+		topckgen: topckgen@10000000 {
+			compatible = "mediatek,mt8516-topckgen", "syscon";
+			reg = <0 0x10000000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		infracfg: infracfg@10001000 {
+			compatible = "mediatek,mt8516-infracfg", "syscon";
+			reg = <0 0x10001000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		apmixedsys: apmixedsys@10018000 {
+			compatible = "mediatek,mt8516-apmixedsys", "syscon";
+			reg = <0 0x10018000 0 0x710>;
+			#clock-cells = <1>;
+		};
+
+		toprgu: toprgu@10007000 {
+			compatible = "mediatek,mt8516-wdt",
+				     "mediatek,mt6589-wdt";
+			reg = <0 0x10007000 0 0x1000>;
+			interrupts = <GIC_SPI 198 IRQ_TYPE_EDGE_FALLING>;
+			#reset-cells = <1>;
+		};
+
+		timer: timer@10008000 {
+			compatible = "mediatek,mt8516-timer",
+				     "mediatek,mt6577-timer";
+			reg = <0 0x10008000 0 0x1000>;
+			interrupts = <GIC_SPI 132 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_CLK26M_D2>,
+				 <&clk32k>,
+				 <&topckgen CLK_TOP_APXGPT>;
+			clock-names = "clk13m", "clk32k", "bus";
+		};
+
+		syscfg_pctl_a: syscfg_pctl_a@10005000 {
+			compatible = "mediatek,mt8516-pctl-a-syscfg", "syscon";
+			reg = <0 0x10005000 0 0x1000>;
+		};
+
+		pio: pinctrl@10005000 {
+			compatible = "mediatek,mt8516-pinctrl";
+			reg = <0 0x1000b000 0 0x1000>;
+			mediatek,pctl-regmap = <&syscfg_pctl_a>;
+			pins-are-numbered;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		pwrap: pwrap@1000f000 {
+			compatible = "mediatek,mt8516-pwrap";
+			reg = <0 0x1000f000 0 0x1000>;
+			reg-names = "pwrap";
+			interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_PMICWRAP_26M>,
+				 <&topckgen CLK_TOP_PMICWRAP_AP>;
+			clock-names = "spi", "wrap";
+		};
+
+		sysirq: intpol-controller@10200620 {
+			compatible = "mediatek,mt8516-sysirq",
+				     "mediatek,mt6577-sysirq";
+			interrupt-controller;
+			#interrupt-cells = <3>;
+			interrupt-parent = <&gic>;
+			reg = <0 0x10200620 0 0x20>;
+		};
+
+		gic: interrupt-controller@10310000 {
+			compatible = "arm,gic-400";
+			#interrupt-cells = <3>;
+			interrupt-parent = <&gic>;
+			interrupt-controller;
+			reg = <0 0x10310000 0 0x1000>,
+			      <0 0x10320000 0 0x1000>,
+			      <0 0x10340000 0 0x2000>,
+			      <0 0x10360000 0 0x2000>;
+			interrupts = <GIC_PPI 9
+				(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+		};
+
+		uart0: serial@11005000 {
+			compatible = "mediatek,mt8516-uart",
+				     "mediatek,mt6577-uart";
+			reg = <0 0x11005000 0 0x1000>;
+			interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_UART0_SEL>,
+				 <&topckgen CLK_TOP_UART0>;
+			clock-names = "baud","bus";
+			status = "disabled";
+		};
+
+		uart1: serial@11006000 {
+			compatible = "mediatek,mt8516-uart",
+				     "mediatek,mt6577-uart";
+			reg = <0 0x11006000 0 0x1000>;
+			interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_UART1_SEL>,
+				 <&topckgen CLK_TOP_UART1>;
+			clock-names = "baud","bus";
+			status = "disabled";
+		};
+
+		uart2: serial@11007000 {
+			compatible = "mediatek,mt8516-uart",
+				     "mediatek,mt6577-uart";
+			reg = <0 0x11007000 0 0x1000>;
+			interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_UART2_SEL>,
+				 <&topckgen CLK_TOP_UART2>;
+			clock-names = "baud","bus";
+			status = "disabled";
+		};
+
+		i2c0: i2c@11009000 {
+			compatible = "mediatek,mt8516-i2c",
+				     "mediatek,mt2712-i2c";
+			reg = <0 0x11009000 0 0x90>,
+			      <0 0x11000180 0 0x80>;
+			interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_AHB_INFRA_D2>,
+				 <&infracfg CLK_IFR_I2C0_SEL>,
+				 <&topckgen CLK_TOP_I2C0>,
+				 <&topckgen CLK_TOP_APDMA>;
+			clock-names = "main-source",
+				      "main-sel",
+				      "main",
+				      "dma";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c1: i2c@1100a000 {
+			compatible = "mediatek,mt8516-i2c",
+				     "mediatek,mt2712-i2c";
+			reg = <0 0x1100a000 0 0x90>,
+			      <0 0x11000200 0 0x80>;
+			interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_AHB_INFRA_D2>,
+				 <&infracfg CLK_IFR_I2C1_SEL>,
+				 <&topckgen CLK_TOP_I2C1>,
+				 <&topckgen CLK_TOP_APDMA>;
+			clock-names = "main-source",
+				      "main-sel",
+				      "main",
+				      "dma";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c2: i2c@1100b000 {
+			compatible = "mediatek,mt8516-i2c",
+				     "mediatek,mt2712-i2c";
+			reg = <0 0x1100b000 0 0x90>,
+			      <0 0x11000280 0 0x80>;
+			interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_AHB_INFRA_D2>,
+				 <&infracfg CLK_IFR_I2C2_SEL>,
+				 <&topckgen CLK_TOP_I2C2>,
+				 <&topckgen CLK_TOP_APDMA>;
+			clock-names = "main-source",
+				      "main-sel",
+				      "main",
+				      "dma";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		spi: spi@1100c000 {
+			compatible = "mediatek,mt8516-spi",
+				     "mediatek,mt2712-spi";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0 0x1100c000 0 0x1000>;
+			interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_UNIVPLL_D12>,
+				 <&topckgen CLK_TOP_SPI_SEL>,
+				 <&topckgen CLK_TOP_SPI>;
+			clock-names = "parent-clk", "sel-clk", "spi-clk";
+			status = "disabled";
+		};
+
+		mmc0: mmc@11120000 {
+			compatible = "mediatek,mt8516-mmc";
+			reg = <0 0x11120000 0 0x1000>;
+			interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_MSDC0>,
+					<&topckgen CLK_TOP_AHB_INFRA_SEL>,
+					<&topckgen CLK_TOP_MSDC0_INFRA>;
+			clock-names = "source", "hclk", "source_cg";
+			status = "disabled";
+		};
+
+		mmc1: mmc@11130000 {
+			compatible = "mediatek,mt8516-mmc";
+			reg = <0 0x11130000 0 0x1000>;
+			interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_AHB_INFRA_SEL>,
+					<&topckgen CLK_TOP_MSDC1>,
+					<&topckgen CLK_TOP_MSDC1_INFRA>;
+			clock-names = "source", "hclk", "source_cg";
+			status = "disabled";
+		};
+
+		mmc2: mmc@11170000 {
+			compatible = "mediatek,mt8516-mmc";
+			reg = <0 0x11170000 0 0x1000>;
+			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_MSDC2>,
+			       <&topckgen CLK_TOP_RG_MSDC2>,
+			       <&topckgen CLK_TOP_MSDC2_INFRA>;
+			clock-names = "source", "hclk", "source_cg";
+			status = "disabled";
+		};
+
+		ethernet: ethernet@11180000 {
+			compatible = "mediatek,mt8516-ethernet";
+			reg = <0 0x11180000 0 0x1000>,
+			      <0 0x10003400 0 0x1000>;
+			interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_RG_ETH>,
+				     <&topckgen CLK_TOP_66M_ETH>,
+				     <&topckgen CLK_TOP_133M_ETH>;
+			clock-names = "core", "reg", "trans";
+			status = "disabled";
+		};
+
+		usb0: usb@11100000 {
+			compatible = "mediatek,mt8516-musb",
+				     "mediatek,mtk-musb";
+			reg = <0 0x11100000 0 0x1000>;
+			interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_LOW>;
+			interrupt-names = "mc";
+			phys = <&usb0_port PHY_TYPE_USB2>;
+			clocks = <&topckgen CLK_TOP_USB>,
+				 <&topckgen CLK_TOP_USBIF>,
+				 <&topckgen CLK_TOP_USB_1P>;
+			clock-names = "main","mcu","univpll";
+			status = "disabled";
+		};
+
+		usb0_phy: usb@11110000 {
+			compatible = "mediatek,generic-tphy-v1";
+			reg = <0 0x11110000 0 0x800>;
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges;
+			status = "disabled";
+
+			usb0_port: usb-phy@11110800{
+				reg = <0 0x11110800 0 0x100>;
+				clocks = <&topckgen CLK_TOP_USB_PHY48M>;
+				clock-names = "ref";
+				#phy-cells = <1>;
+			};
+		};
+
+		usb1: usb@11190000 {
+			compatible = "mediatek,mt8167-usb11";
+			cell-index = <1>;
+			reg = <0 0x11190000 0 0x10000>,
+			      <0 0x11110000 0 0x10000>;
+			interrupts = <GIC_SPI 210 IRQ_TYPE_LEVEL_LOW>;
+			mode = <2>;
+			multipoint = <1>;
+			dyn_fifo = <1>;
+			soft_con = <1>;
+			dma = <1>;
+			num_eps = <8>;
+			dma_channels = <4>;
+			status = "disabled";
+		};
+
+		afe: audio-controller@11140000  {
+			compatible = "mediatek,mt8167-afe-pcm";
+			#sound-dai-cells = <0>;
+			reg = <0 0x11140000 0 0x1000>,
+			      <0 0x11141000 0 0x9000>;
+			interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_AUDIO>,
+				 <&topckgen CLK_TOP_APLL12_DIV0>,
+				 <&topckgen CLK_TOP_APLL12_DIV1>,
+				 <&topckgen CLK_TOP_APLL12_DIV2>,
+				 <&topckgen CLK_TOP_APLL12_DIV3>,
+				 <&topckgen CLK_TOP_APLL12_DIV4>,
+				 <&topckgen CLK_TOP_APLL12_DIV4B>,
+				 <&topckgen CLK_TOP_APLL12_DIV5>,
+				 <&topckgen CLK_TOP_APLL12_DIV5B>,
+				 <&topckgen CLK_TOP_APLL12_DIV6>,
+				 <&topckgen CLK_TOP_RG_AUD_SPDIF_IN>,
+				 <&topckgen CLK_TOP_RG_AUD_ENGEN1>,
+				 <&topckgen CLK_TOP_RG_AUD_ENGEN2>,
+				 <&topckgen CLK_TOP_RG_AUD1>,
+				 <&topckgen CLK_TOP_RG_AUD2>,
+				 <&topckgen CLK_TOP_AUD_I2S0_M_SEL>,
+				 <&topckgen CLK_TOP_AUD_I2S1_M_SEL>,
+				 <&topckgen CLK_TOP_AUD_I2S2_M_SEL>,
+				 <&topckgen CLK_TOP_AUD_I2S3_M_SEL>,
+				 <&topckgen CLK_TOP_AUD_I2S4_M_SEL>,
+				 <&topckgen CLK_TOP_AUD_I2S5_M_SEL>,
+				 <&topckgen CLK_TOP_AUD_SPDIF_B_SEL>,
+				 <&topckgen CLK_TOP_AUD_SPDIFIN_SEL>,
+				 <&topckgen CLK_TOP_UNIVPLL_D2>;
+			clock-names = "top_pdn_audio",
+				"apll12_div0",
+				"apll12_div1",
+				"apll12_div2",
+				"apll12_div3",
+				"apll12_div4",
+				"apll12_div4b",
+				"apll12_div5",
+				"apll12_div5b",
+				"apll12_div6",
+				"spdif_in",
+				"engen1",
+				"engen2",
+				"aud1",
+				"aud2",
+				"i2s0_m_sel",
+				"i2s1_m_sel",
+				"i2s2_m_sel",
+				"i2s3_m_sel",
+				"i2s4_m_sel",
+				"i2s5_m_sel",
+				"spdif_b_sel",
+				"spdifin_sel",
+				"univpll_div2";
+			assigned-clocks = <&topckgen CLK_TOP_AUD1_SEL>,
+				<&topckgen CLK_TOP_AUD2_SEL>,
+				<&topckgen CLK_TOP_AUD_ENGEN1_SEL>,
+				<&topckgen CLK_TOP_AUD_ENGEN2_SEL>;
+			assigned-clock-parents = <&topckgen CLK_TOP_APLL1>,
+				<&topckgen CLK_TOP_APLL2>,
+				<&topckgen CLK_TOP_RG_APLL1_D8_EN>,
+				<&topckgen CLK_TOP_RG_APLL2_D8_EN>;
+		};
+
+		xo: xo@10210000 {
+			compatible = "mediatek,mt8167-xo";
+			reg = <0 0x10210000 0 0x1000>;
+			default_capid = <0x00>;
+			clocks = <&topckgen CLK_TOP_BSI>,
+				 <&topckgen CLK_TOP_RG_BSI>,
+				 <&topckgen CLK_TOP_BSI_SEL>,
+				 <&topckgen CLK_TOP_CLK26M>;
+			clock-names = "bsi",
+				      "rgbsi",
+				      "bsisel",
+				      "clk26m";
+		};
+
+		pwm: pwm@11008000 {
+			compatible = "mediatek,mt8516-pwm";
+			reg = <0 0x11008000 0 0x1000>;
+			interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&topckgen CLK_TOP_PWM>,
+				 <&topckgen CLK_TOP_PWM_B>,
+				 <&topckgen CLK_TOP_PWM1_FB>,
+				 <&topckgen CLK_TOP_PWM2_FB>,
+				 <&topckgen CLK_TOP_PWM3_FB>,
+				 <&topckgen CLK_TOP_PWM4_FB>,
+				 <&topckgen CLK_TOP_PWM5_FB>;
+			clock-names = "top", "main", "pwm1", "pwm2", "pwm3",
+				      "pwm4", "pwm5";
+		};
+
+		rng: rng@1020c000 {
+			compatible = "mediatek,mt8516-rng",
+				     "mediatek,mt7623-rng";
+			reg = <0 0x1020c000 0 0x100>;
+			clocks = <&topckgen CLK_TOP_TRNG>;
+			clock-names = "rng";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
new file mode 100644
index 0000000..e70d593
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
@@ -0,0 +1,478 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+#include "mt6392.dtsi"
+
+/ {
+	aliases {
+		ethernet0 = &ethernet;
+		ethernet1 = &wifi;
+		ethernet2 = &bluetooth;
+		serial0 = &uart0;
+	};
+
+	chosen {
+		stdout-path = "serial0:921600n8";
+	};
+
+	firmware {
+		optee: optee@4fd00000 {
+			compatible = "linaro,optee-tz";
+			method = "smc";
+		};
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+		input-name = "gpio-keys";
+		pinctrl-names = "default";
+		pinctrl-0 = <&gpio_keys_default>;
+
+		volume-up {
+			gpios = <&pio 42 GPIO_ACTIVE_LOW>;
+			label = "volume_up";
+			linux,code = <115>;
+			wakeup-source;
+			debounce-interval = <15>;
+		};
+
+		volume-down {
+			gpios = <&pio 43 GPIO_ACTIVE_LOW>;
+			label = "volume_down";
+			linux,code = <114>;
+			wakeup-source;
+			debounce-interval = <15>;
+		};
+	};
+
+
+	mt8167_audio_codec: mt8167_audio_codec {
+		compatible = "mediatek,mt8167-codec";
+		clocks = <&topckgen CLK_TOP_AUDIO>;
+		clock-names = "bus";
+		mediatek,afe-regmap = <&afe>;
+		mediatek,apmixedsys-regmap = <&apmixedsys>;
+		mediatek,pwrap-regmap = <&pwrap>;
+		mediatek,speaker-mode = <0>; /* 0(CLASSD) 1(CLASSAB) */
+		mediatek,dmic-wire-mode = <1>; /* 0(ONE_WIRE) 1(TWO_WIRE) */
+	};
+
+	sound: sound {
+		compatible = "mediatek,mt8516-soc-pumpkin";
+		mediatek,platform = <&afe>;
+		mediatek,audio-codec = <&mt8167_audio_codec>;
+		status = "okay";
+	};
+};
+
+&cpu0 {
+	proc-supply = <&mt6392_vproc_reg>;
+};
+
+&cpu1 {
+	proc-supply = <&mt6392_vproc_reg>;
+};
+
+&cpu2 {
+	proc-supply = <&mt6392_vproc_reg>;
+};
+
+&cpu3 {
+	proc-supply = <&mt6392_vproc_reg>;
+};
+
+&i2c0 {
+	clock-div = <2>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c0_pins_a>;
+	status = "okay";
+
+	tca6416: gpio@20 {
+		compatible = "ti,tca6416";
+		reg = <0x20>;
+		rst-gpio = <&pio 65 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&tca6416_pins>;
+
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		eint20_mux_sel0 {
+			gpio-hog;
+			gpios = <0 0>;
+			input;
+			line-name = "eint20_mux_sel0";
+		};
+
+		expcon_mux_sel1 {
+			gpio-hog;
+			gpios = <1 0>;
+			input;
+			line-name = "expcon_mux_sel1";
+		};
+
+		mrg_di_mux_sel2 {
+			gpio-hog;
+			gpios = <2 0>;
+			input;
+			line-name = "mrg_di_mux_sel2";
+		};
+
+		sd_sdio_mux_sel3 {
+			gpio-hog;
+			gpios = <3 0>;
+			input;
+			line-name = "sd_sdio_mux_sel3";
+		};
+
+		sd_sdio_mux_ctrl7 {
+			gpio-hog;
+			gpios = <7 0>;
+			output-low;
+			line-name = "sd_sdio_mux_ctrl7";
+		};
+
+		hw_id0 {
+			gpio-hog;
+			gpios = <8 0>;
+			input;
+			line-name = "hw_id0";
+		};
+
+		hw_id1 {
+			gpio-hog;
+			gpios = <9 0>;
+			input;
+			line-name = "hw_id1";
+		};
+
+		hw_id2 {
+			gpio-hog;
+			gpios = <10 0>;
+			input;
+			line-name = "hw_id2";
+		};
+
+		fg_int_n {
+			gpio-hog;
+			gpios = <11 0>;
+			input;
+			line-name = "fg_int_n";
+		};
+
+		usba_pwr_en {
+			gpio-hog;
+			gpios = <12 0>;
+			output-high;
+			line-name = "usba_pwr_en";
+		};
+
+		wifi_3v3_pg {
+			gpio-hog;
+			gpios = <13 0>;
+			input;
+			line-name = "wifi_3v3_pg";
+		};
+
+		cam_rst {
+			gpio-hog;
+			gpios = <14 0>;
+			output-low;
+			line-name = "cam_rst";
+		};
+
+		cam_pwdn {
+			gpio-hog;
+			gpios = <15 0>;
+			output-low;
+			line-name = "cam_pwdn";
+		};
+	};
+};
+
+&i2c2 {
+	clock-div = <2>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c2_pins_a>;
+	status = "okay";
+};
+
+&mt6392_pmic {
+	interrupt-parent = <&pio>;
+	interrupts = <28 IRQ_TYPE_LEVEL_HIGH>;
+	interrupt-controller;
+	#interrupt-cells = <2>;
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&pio {
+	pinctrl-names = "default";
+	pinctrl-0 = <&state_default>;
+
+	state_default:pinconf_default {
+	};
+
+	gpio_keys_default: gpiodefault {
+		pins_cmd_dat {
+			pinmux = <MT8516_PIN_42_KPCOL0__FUNC_GPIO42>,
+				 <MT8516_PIN_43_KPCOL1__FUNC_GPIO43>;
+			bias-pull-up;
+			input-enable;
+		};
+	};
+
+	i2c0_pins_a: i2c0@0 {
+		pins1 {
+			pinmux = <MT8516_PIN_58_SDA0__FUNC_SDA0_0>,
+				 <MT8516_PIN_59_SCL0__FUNC_SCL0_0>;
+			bias-disable;
+		};
+	};
+
+
+	tca6416_pins: pinmux_tca6416_pins {
+		gpio_mux_rst_n_pin {
+			pinmux = <MT8516_PIN_65_UTXD1__FUNC_GPIO65>;
+			output-high;
+		};
+
+		gpio_mux_int_n_pin {
+			pinmux = <MT8516_PIN_64_URXD1__FUNC_GPIO64>;
+			input-enable;
+			bias-pull-up;
+		};
+	};
+
+	mmc1_pins_default: mmc1default {
+		pins_cmd_dat {
+			pinmux = <MT8516_PIN_106_MSDC1_DAT0__FUNC_MSDC1_DAT0>,
+				 <MT8516_PIN_107_MSDC1_DAT1__FUNC_MSDC1_DAT1>,
+				 <MT8516_PIN_108_MSDC1_DAT2__FUNC_MSDC1_DAT2>,
+				 <MT8516_PIN_109_MSDC1_DAT3__FUNC_MSDC1_DAT3>,
+				 <MT8516_PIN_104_MSDC1_CMD__FUNC_MSDC1_CMD>;
+			input-enable;
+			drive-strength = <MTK_DRIVE_6mA>;
+			bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+		};
+
+		pins_clk {
+			pinmux = <MT8516_PIN_105_MSDC1_CLK__FUNC_MSDC1_CLK>;
+			drive-strength = <MTK_DRIVE_6mA>;
+			bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+		};
+
+		pins_wifi_pwr_en {
+			pinmux = <MT8516_PIN_41_KPROW1__FUNC_GPIO41>;
+			output-low;
+		};
+
+		pins_pmu_en {
+			pinmux = <MT8516_PIN_40_KPROW0__FUNC_GPIO40>;
+			output-low;
+		};
+	};
+
+	mmc1_pins_uhs: mmc1@0 {
+		pins_cmd_dat {
+			pinmux = <MT8516_PIN_106_MSDC1_DAT0__FUNC_MSDC1_DAT0>,
+				 <MT8516_PIN_107_MSDC1_DAT1__FUNC_MSDC1_DAT1>,
+				 <MT8516_PIN_108_MSDC1_DAT2__FUNC_MSDC1_DAT2>,
+				 <MT8516_PIN_109_MSDC1_DAT3__FUNC_MSDC1_DAT3>,
+				 <MT8516_PIN_104_MSDC1_CMD__FUNC_MSDC1_CMD>;
+			input-enable;
+			drive-strength = <MTK_DRIVE_6mA>;
+			bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+		};
+
+		pins_clk {
+			pinmux = <MT8516_PIN_105_MSDC1_CLK__FUNC_MSDC1_CLK>;
+			drive-strength = <MTK_DRIVE_8mA>;
+			bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+		};
+
+
+		pins_wifi_pwr_en {
+			pinmux = <MT8516_PIN_41_KPROW1__FUNC_GPIO41>;
+			output-high;
+		};
+
+		pins_pmu_en {
+			pinmux = <MT8516_PIN_40_KPROW0__FUNC_GPIO40>;
+			output-high;
+		};
+	};
+
+	mmc2_pins_default: mmc2default {
+		pins_cmd_dat {
+			pinmux = <MT8516_PIN_70_MSDC2_DAT0__FUNC_MSDC2_DAT0>,
+				 <MT8516_PIN_71_MSDC2_DAT1__FUNC_MSDC2_DAT1>,
+				 <MT8516_PIN_72_MSDC2_DAT2__FUNC_MSDC2_DAT2>,
+				 <MT8516_PIN_73_MSDC2_DAT3__FUNC_MSDC2_DAT3>,
+				 <MT8516_PIN_68_MSDC2_CMD__FUNC_MSDC2_CMD>;
+			input-enable;
+			bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+		};
+
+		pins_clk {
+			pinmux = <MT8516_PIN_69_MSDC2_CLK__FUNC_MSDC2_CLK>;
+			bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+		};
+
+		pin_cd {
+			pinmux = <MT8516_PIN_4_EINT4__FUNC_GPIO4>;
+			bias-pull-up;
+		};
+	};
+
+	mmc2_pins_uhs: mmc2@0 {
+		pins_cmd_dat {
+			pinmux = <MT8516_PIN_70_MSDC2_DAT0__FUNC_MSDC2_DAT0>,
+				 <MT8516_PIN_71_MSDC2_DAT1__FUNC_MSDC2_DAT1>,
+				 <MT8516_PIN_72_MSDC2_DAT2__FUNC_MSDC2_DAT2>,
+				 <MT8516_PIN_73_MSDC2_DAT3__FUNC_MSDC2_DAT3>,
+				 <MT8516_PIN_68_MSDC2_CMD__FUNC_MSDC2_CMD>;
+			input-enable;
+			drive-strength = <MTK_DRIVE_6mA>;
+			bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+		};
+
+		pins_clk {
+			pinmux = <MT8516_PIN_69_MSDC2_CLK__FUNC_MSDC2_CLK>;
+			drive-strength = <MTK_DRIVE_8mA>;
+			bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+		};
+	};
+
+	ethernet_pins_default: ethernet {
+		pins_ethernet {
+			pinmux = <MT8516_PIN_0_EINT0__FUNC_EXT_TXD0>,
+				 <MT8516_PIN_1_EINT1__FUNC_EXT_TXD1>,
+				 <MT8516_PIN_5_EINT5__FUNC_EXT_RXER>,
+				 <MT8516_PIN_6_EINT6__FUNC_EXT_RXC>,
+				 <MT8516_PIN_7_EINT7__FUNC_EXT_RXDV>,
+				 <MT8516_PIN_8_EINT8__FUNC_EXT_RXD0>,
+				 <MT8516_PIN_9_EINT9__FUNC_EXT_RXD1>,
+				 <MT8516_PIN_12_EINT12__FUNC_EXT_TXEN>,
+				 <MT8516_PIN_38_MRG_DI__FUNC_EXT_MDIO>,
+				 <MT8516_PIN_39_MRG_DO__FUNC_EXT_MDC>;
+		};
+	};
+
+	usb1_default_pins: usb1_pins {
+	};
+
+	spi_pins_a: spi@0 {
+		pins1 {
+			pinmux = <MT8516_PIN_49_SPI_CK__FUNC_SPI_CLK>,
+				 <MT8516_PIN_51_SPI_MO__FUNC_SPI_MO>;
+			bias-disable;
+		};
+	};
+
+	i2c2_pins_a: i2c2@0 {
+		pins1 {
+			pinmux = <MT8516_PIN_60_SDA2__FUNC_SDA2_0>,
+				 <MT8516_PIN_61_SCL2__FUNC_SCL2_0>;
+			bias-disable;
+		};
+	};
+};
+
+&mmc1 {
+	pinctrl-names = "default", "state_uhs";
+	pinctrl-0 = <&mmc1_pins_default>;
+	pinctrl-1 = <&mmc1_pins_uhs>;
+	bus-width = <4>;
+	max-frequency = <200000000>;
+	cap-sd-highspeed;
+	sd-uhs-sdr50;
+	sd-uhs-sdr104;
+	keep-power-in-suspend;
+	enable-sdio-wakeup;
+	cap-sdio-irq;
+	vmmc-supply = <&mt6392_vemc3v3_reg>;
+	vqmmc-supply = <&mt6392_vio18_reg>;
+	non-removable;
+	status = "okay";
+
+	mt7668 {
+		wifi: mt7668-wifi@0 {
+			compatible = "mediatek,mt8516-wifi",
+				     "mediatek,mt7668-wifi";
+			mac-address = [00 00 00 00 00 00];
+		};
+
+		bluetooth: mt7668-bluetooth@0 {
+			compatible = "mediatek,mt8516-bluetooth",
+				     "mediatek,mt7668-bluetooth";
+			mac-address = [00 00 00 00 00 00];
+		};
+	};
+};
+
+&mmc2 {
+	pinctrl-names = "default", "state_uhs";
+	pinctrl-0 = <&mmc2_pins_default>;
+	pinctrl-1 = <&mmc2_pins_uhs>;
+	cd-gpios = <&pio 4 GPIO_ACTIVE_HIGH>;
+	bus-width = <4>;
+	max-frequency = <200000000>;
+	cap-sd-highspeed;
+	sd-uhs-sdr50;
+	sd-uhs-sdr104;
+	vmmc-supply = <&mt6392_vmch_reg>;
+	vqmmc-supply = <&mt6392_vio18_reg>;
+//	vqmmc-supply = <&mt6392_vmc_reg>;
+	status = "okay";
+};
+
+&ethernet {
+	pinctrl-names = "default";
+	pinctrl-0 = <&ethernet_pins_default>;
+	eth-gpios = <&pio 13 GPIO_ACTIVE_HIGH>;
+	eth-regulator-supply = <&mt6392_vmch_reg>;
+	mac-address = [00 00 00 00 00 00];
+	status = "okay";
+};
+
+&usb0 {
+	status = "okay";
+	dr_mode = "peripheral";
+
+	usb_con: connector {
+		compatible = "usb-c-connector";
+		label = "USB-C";
+	};
+};
+
+&usb0_phy {
+	status = "okay";
+};
+
+&usb1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb1_default_pins>;
+	status = "okay";
+};
+
+&afe {
+	/* 0(HDMI) 1(I2S) 2(TDM) */
+	mediatek,tdm-out-mode = <1>;
+	/* 0(IR1) 1(IRQ2) 4(IRQ7)*/
+	mediatek,awb-irq-mode = <4>;
+	/*0(Spearated Mode) 1(Share Mode)*/
+	mediatek,i2s-clock-modes = <1 1>;
+};
+
+&spi {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spi_pins_a>;
+};
diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-emmc-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-emmc-common.dtsi
new file mode 100644
index 0000000..6044c8c
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/pumpkin-emmc-common.dtsi
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 BayLibre, SAS.
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+&mmc0 {
+	pinctrl-names = "default", "state_uhs";
+	pinctrl-0 = <&mmc0_pins_default>;
+	pinctrl-1 = <&mmc0_pins_uhs>;
+	status = "okay";
+	bus-width = <8>;
+	max-frequency = <200000000>;
+	cap-mmc-highspeed;
+	mmc-hs200-1_8v;
+	cap-mmc-hw-reset;
+	vmmc-supply = <&mt6392_vemc3v3_reg>;
+	vqmmc-supply = <&mt6392_vio18_reg>;
+	non-removable;
+};
+
+&pio {
+	mmc0_pins_default: mmc0default {
+		pins_cmd_dat {
+			pinmux = <MT8516_PIN_120_MSDC0_DAT0__FUNC_MSDC0_DAT0>,
+				<MT8516_PIN_119_MSDC0_DAT1__FUNC_MSDC0_DAT1>,
+				<MT8516_PIN_118_MSDC0_DAT2__FUNC_MSDC0_DAT2>,
+				<MT8516_PIN_117_MSDC0_DAT3__FUNC_MSDC0_DAT3>,
+				<MT8516_PIN_113_MSDC0_DAT4__FUNC_MSDC0_DAT4>,
+				<MT8516_PIN_112_MSDC0_DAT5__FUNC_MSDC0_DAT5>,
+				<MT8516_PIN_111_MSDC0_DAT6__FUNC_MSDC0_DAT6>,
+				<MT8516_PIN_110_MSDC0_DAT7__FUNC_MSDC0_DAT7>,
+				<MT8516_PIN_115_MSDC0_CMD__FUNC_MSDC0_CMD>;
+			input-enable;
+			bias-pull-up;
+		};
+
+		pins_clk {
+			pinmux = <MT8516_PIN_116_MSDC0_CLK__FUNC_MSDC0_CLK>;
+			bias-pull-down;
+		};
+
+		pins_rst {
+			pinmux = <MT8516_PIN_114_MSDC0_RSTB__FUNC_MSDC0_RSTB>;
+			bias-pull-up;
+		};
+	};
+
+	mmc0_pins_uhs: mmc0@0{
+		pins_cmd_dat {
+			pinmux = <MT8516_PIN_120_MSDC0_DAT0__FUNC_MSDC0_DAT0>,
+				<MT8516_PIN_119_MSDC0_DAT1__FUNC_MSDC0_DAT1>,
+				<MT8516_PIN_118_MSDC0_DAT2__FUNC_MSDC0_DAT2>,
+				<MT8516_PIN_117_MSDC0_DAT3__FUNC_MSDC0_DAT3>,
+				<MT8516_PIN_113_MSDC0_DAT4__FUNC_MSDC0_DAT4>,
+				<MT8516_PIN_112_MSDC0_DAT5__FUNC_MSDC0_DAT5>,
+				<MT8516_PIN_111_MSDC0_DAT6__FUNC_MSDC0_DAT6>,
+				<MT8516_PIN_110_MSDC0_DAT7__FUNC_MSDC0_DAT7>,
+				<MT8516_PIN_115_MSDC0_CMD__FUNC_MSDC0_CMD>;
+			input-enable;
+			drive-strength = <MTK_DRIVE_6mA>;
+			bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+		};
+
+		pins_clk {
+			pinmux = <MT8516_PIN_116_MSDC0_CLK__FUNC_MSDC0_CLK>;
+			drive-strength = <MTK_DRIVE_8mA>;
+			bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+		};
+
+		pins_rst {
+			pinmux = <MT8516_PIN_114_MSDC0_RSTB__FUNC_MSDC0_RSTB>;
+			bias-pull-up;
+		};
+	};
+};
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 928fc15..e1ea419 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -3386,3 +3386,26 @@
 	dev->of_node_reused = true;
 }
 EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
+
+int device_match_name(struct device *dev, const void *name)
+{
+	return sysfs_streq(dev_name(dev), name);
+}
+EXPORT_SYMBOL_GPL(device_match_name);
+int device_match_of_node(struct device *dev, const void *np)
+{
+	return dev->of_node == np;
+}
+EXPORT_SYMBOL_GPL(device_match_of_node);
+
+int device_match_fwnode(struct device *dev, const void *fwnode)
+{
+	return dev_fwnode(dev) == fwnode;
+}
+EXPORT_SYMBOL_GPL(device_match_fwnode);
+
+int device_match_devt(struct device *dev, const void *pdevt)
+{
+	return dev->devt == *(dev_t *)pdevt;
+}
+EXPORT_SYMBOL_GPL(device_match_devt);
diff --git a/drivers/base/devcon.c b/drivers/base/devcon.c
index d427e80..0bc6337 100644
--- a/drivers/base/devcon.c
+++ b/drivers/base/devcon.c
@@ -7,10 +7,84 @@
  */
 
 #include <linux/device.h>
+#include <linux/property.h>
 
 static DEFINE_MUTEX(devcon_lock);
 static LIST_HEAD(devcon_list);
 
+static void *
+fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
+			  void *data, devcon_match_fn_t match)
+{
+	struct device_connection con = { .id = con_id };
+	struct fwnode_handle *ep;
+	void *ret;
+
+	fwnode_graph_for_each_endpoint(fwnode, ep) {
+		con.fwnode = fwnode_graph_get_remote_port_parent(ep);
+		if (!fwnode_device_is_available(con.fwnode))
+			continue;
+
+		ret = match(&con, -1, data);
+		fwnode_handle_put(con.fwnode);
+		if (ret) {
+			fwnode_handle_put(ep);
+			return ret;
+		}
+	}
+	return NULL;
+}
+
+static void *
+fwnode_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
+		    void *data, devcon_match_fn_t match)
+{
+	struct device_connection con = { };
+	void *ret;
+	int i;
+
+	for (i = 0; ; i++) {
+		con.fwnode = fwnode_find_reference(fwnode, con_id, i);
+		if (IS_ERR(con.fwnode))
+			break;
+
+		ret = match(&con, -1, data);
+		fwnode_handle_put(con.fwnode);
+		if (ret)
+			return ret;
+	}
+
+	return NULL;
+}
+
+/**
+ * fwnode_connection_find_match - Find connection from a device node
+ * @fwnode: Device node with the connection
+ * @con_id: Identifier for the connection
+ * @data: Data for the match function
+ * @match: Function to check and convert the connection description
+ *
+ * Find a connection with unique identifier @con_id between @fwnode and another
+ * device node. @match will be used to convert the connection description to
+ * data the caller is expecting to be returned.
+ */
+void *fwnode_connection_find_match(struct fwnode_handle *fwnode,
+				  const char *con_id, void *data,
+				  devcon_match_fn_t match)
+{
+	void *ret;
+
+	if (!fwnode || !match)
+		return NULL;
+
+	ret = fwnode_graph_devcon_match(fwnode, con_id, data, match);
+	if (ret)
+		return ret;
+
+	return fwnode_devcon_match(fwnode, con_id, data, match);
+}
+EXPORT_SYMBOL_GPL(fwnode_connection_find_match);
+
 /**
  * device_connection_find_match - Find physical connection to a device
  * @dev: Device with the connection
@@ -23,10 +97,9 @@
  * caller is expecting to be returned.
  */
 void *device_connection_find_match(struct device *dev, const char *con_id,
-			       void *data,
-			       void *(*match)(struct device_connection *con,
-					      int ep, void *data))
+				   void *data, devcon_match_fn_t match)
 {
+	struct fwnode_handle *fwnode = dev_fwnode(dev);
 	const char *devname = dev_name(dev);
 	struct device_connection *con;
 	void *ret = NULL;
@@ -35,6 +108,10 @@
 	if (!match)
 		return NULL;
 
+	ret = fwnode_connection_find_match(fwnode, con_id, data, match);
+	if (ret)
+		return ret;
+
 	mutex_lock(&devcon_lock);
 
 	list_for_each_entry(con, &devcon_list, list) {
@@ -75,12 +152,36 @@
 	NULL,
 };
 
+static int device_fwnode_match(struct device *dev, void *fwnode)
+{
+	return dev_fwnode(dev) == fwnode;
+}
+
+static void *device_connection_fwnode_match(struct device_connection *con)
+{
+	struct bus_type *bus;
+	struct device *dev;
+
+	for (bus = generic_match_buses[0]; bus; bus++) {
+		dev = bus_find_device(bus, NULL, (void *)con->fwnode,
+				      device_fwnode_match);
+		if (dev && !strncmp(dev_name(dev), con->id, strlen(con->id)))
+			return dev;
+
+		put_device(dev);
+	}
+	return NULL;
+}
+
 /* This tries to find the device from the most common bus types by name. */
 static void *generic_match(struct device_connection *con, int ep, void *data)
 {
 	struct bus_type *bus;
 	struct device *dev;
 
+	if (con->fwnode)
+		return device_connection_fwnode_match(con);
+
 	for (bus = generic_match_buses[0]; bus; bus++) {
 		dev = bus_find_device_by_name(bus, NULL, con->endpoint[ep]);
 		if (dev)
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 240ab52..52be8c8 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -946,6 +946,30 @@
 }
 
 /**
+ * fwnode_find_reference - Find named reference to a fwnode_handle
+ * @fwnode: Firmware node where to look for the reference
+ * @name: The name of the reference
+ * @index: Index of the reference
+ *
+ * @index can be used when the named reference holds a table of references.
+ *
+ * Returns pointer to the reference fwnode, or ERR_PTR. Caller is responsible to
+ * call fwnode_handle_put() on the returned fwnode pointer.
+ */
+struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
+					    const char *name,
+					    unsigned int index)
+{
+	struct fwnode_reference_args args;
+	int ret;
+
+	ret = fwnode_property_get_reference_args(fwnode, name, NULL, 0, index,
+						 &args);
+	return ret ? ERR_PTR(ret) : args.fwnode;
+}
+EXPORT_SYMBOL_GPL(fwnode_find_reference);
+
+/**
  * device_remove_properties - Remove properties from a device object.
  * @dev: Device whose properties to remove.
  *
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index d854e26..a062389 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -34,6 +34,17 @@
 }
 EXPORT_SYMBOL(devm_clk_get);
 
+struct clk *devm_clk_get_optional(struct device *dev, const char *id)
+{
+	struct clk *clk = devm_clk_get(dev, id);
+
+	if (clk == ERR_PTR(-ENOENT))
+		return NULL;
+
+	return clk;
+}
+EXPORT_SYMBOL(devm_clk_get_optional);
+
 struct clk_bulk_devres {
 	struct clk_bulk_data *clks;
 	int num_clks;
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 3dd1dab..42403bc 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -186,6 +186,14 @@
 	---help---
 	  This driver supports MediaTek MT8135 clocks.
 
+config COMMON_CLK_MT8167
+	bool "Clock driver for MediaTek MT8167"
+	depends on ARCH_MEDIATEK || COMPILE_TEST
+	select COMMON_CLK_MEDIATEK
+	default ARCH_MEDIATEK
+	help
+	  This driver supports MediaTek MT8167 clocks.
+
 config COMMON_CLK_MT8173
 	bool "Clock driver for MediaTek MT8173"
 	depends on ARCH_MEDIATEK || COMPILE_TEST
@@ -193,4 +201,93 @@
 	default ARCH_MEDIATEK
 	---help---
 	  This driver supports MediaTek MT8173 clocks.
+
+config COMMON_CLK_MT8183
+	bool "Clock driver for MediaTek MT8183"
+	depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
+	select COMMON_CLK_MEDIATEK
+	default ARCH_MEDIATEK && ARM64
+	help
+	  This driver supports MediaTek MT8183 basic clocks.
+
+config COMMON_CLK_MT8183_AUDIOSYS
+	bool "Clock driver for MediaTek MT8183 audiosys"
+	depends on COMMON_CLK_MT8183
+	help
+	  This driver supports MediaTek MT8183 audiosys clocks.
+
+config COMMON_CLK_MT8183_CAMSYS
+	bool "Clock driver for MediaTek MT8183 camsys"
+	depends on COMMON_CLK_MT8183
+	help
+	  This driver supports MediaTek MT8183 camsys clocks.
+
+config COMMON_CLK_MT8183_IMGSYS
+	bool "Clock driver for MediaTek MT8183 imgsys"
+	depends on COMMON_CLK_MT8183
+	help
+	  This driver supports MediaTek MT8183 imgsys clocks.
+
+config COMMON_CLK_MT8183_IPU_CORE0
+	bool "Clock driver for MediaTek MT8183 ipu_core0"
+	depends on COMMON_CLK_MT8183
+	help
+	  This driver supports MediaTek MT8183 ipu_core0 clocks.
+
+config COMMON_CLK_MT8183_IPU_CORE1
+	bool "Clock driver for MediaTek MT8183 ipu_core1"
+	depends on COMMON_CLK_MT8183
+	help
+	  This driver supports MediaTek MT8183 ipu_core1 clocks.
+
+config COMMON_CLK_MT8183_IPU_ADL
+	bool "Clock driver for MediaTek MT8183 ipu_adl"
+	depends on COMMON_CLK_MT8183
+	help
+	  This driver supports MediaTek MT8183 ipu_adl clocks.
+
+config COMMON_CLK_MT8183_IPU_CONN
+	bool "Clock driver for MediaTek MT8183 ipu_conn"
+	depends on COMMON_CLK_MT8183
+	help
+	  This driver supports MediaTek MT8183 ipu_conn clocks.
+
+config COMMON_CLK_MT8183_MFGCFG
+	bool "Clock driver for MediaTek MT8183 mfgcfg"
+	depends on COMMON_CLK_MT8183
+	help
+	  This driver supports MediaTek MT8183 mfgcfg clocks.
+
+config COMMON_CLK_MT8183_MMSYS
+	bool "Clock driver for MediaTek MT8183 mmsys"
+	depends on COMMON_CLK_MT8183
+	help
+	  This driver supports MediaTek MT8183 mmsys clocks.
+
+config COMMON_CLK_MT8183_VDECSYS
+	bool "Clock driver for MediaTek MT8183 vdecsys"
+	depends on COMMON_CLK_MT8183
+	help
+	  This driver supports MediaTek MT8183 vdecsys clocks.
+
+config COMMON_CLK_MT8183_VENCSYS
+	bool "Clock driver for MediaTek MT8183 vencsys"
+	depends on COMMON_CLK_MT8183
+	help
+	  This driver supports MediaTek MT8183 vencsys clocks.
+
+config COMMON_CLK_MT8516
+	bool "Clock driver for MediaTek MT8516"
+	depends on ARCH_MEDIATEK || COMPILE_TEST
+	select COMMON_CLK_MEDIATEK
+	default ARCH_MEDIATEK
+	help
+	  This driver supports MediaTek MT8516 clocks.
+
+config COMMON_CLK_MT8516_AUDSYS
+	bool "Clock driver for MediaTek MT8516 audsys"
+	depends on COMMON_CLK_MT8516
+	help
+	  This driver supports MediaTek MT8516 audsys clocks.
+
 endmenu
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 844b55d..01b5821 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o reset.o
+obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o reset.o clk-mux.o
+
 obj-$(CONFIG_COMMON_CLK_MT6797) += clk-mt6797.o
 obj-$(CONFIG_COMMON_CLK_MT6797_IMGSYS) += clk-mt6797-img.o
 obj-$(CONFIG_COMMON_CLK_MT6797_MMSYS) += clk-mt6797-mm.o
@@ -27,4 +28,19 @@
 obj-$(CONFIG_COMMON_CLK_MT7622_HIFSYS) += clk-mt7622-hif.o
 obj-$(CONFIG_COMMON_CLK_MT7622_AUDSYS) += clk-mt7622-aud.o
 obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o
+obj-$(CONFIG_COMMON_CLK_MT8167) += clk-mt8167.o
 obj-$(CONFIG_COMMON_CLK_MT8173) += clk-mt8173.o
+obj-$(CONFIG_COMMON_CLK_MT8183) += clk-mt8183.o
+obj-$(CONFIG_COMMON_CLK_MT8183_AUDIOSYS) += clk-mt8183-audio.o
+obj-$(CONFIG_COMMON_CLK_MT8183_CAMSYS) += clk-mt8183-cam.o
+obj-$(CONFIG_COMMON_CLK_MT8183_IMGSYS) += clk-mt8183-img.o
+obj-$(CONFIG_COMMON_CLK_MT8183_IPU_CORE0) += clk-mt8183-ipu0.o
+obj-$(CONFIG_COMMON_CLK_MT8183_IPU_CORE1) += clk-mt8183-ipu1.o
+obj-$(CONFIG_COMMON_CLK_MT8183_IPU_ADL) += clk-mt8183-ipu_adl.o
+obj-$(CONFIG_COMMON_CLK_MT8183_IPU_CONN) += clk-mt8183-ipu_conn.o
+obj-$(CONFIG_COMMON_CLK_MT8183_MFGCFG) += clk-mt8183-mfgcfg.o
+obj-$(CONFIG_COMMON_CLK_MT8183_MMSYS) += clk-mt8183-mm.o
+obj-$(CONFIG_COMMON_CLK_MT8183_VDECSYS) += clk-mt8183-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT8183_VENCSYS) += clk-mt8183-venc.o
+obj-$(CONFIG_COMMON_CLK_MT8516) += clk-mt8516.o
+obj-$(CONFIG_COMMON_CLK_MT8516_AUDSYS) += clk-mt8516-aud.o
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index 934bf0e..85daf82 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -157,7 +157,8 @@
 		int clr_ofs,
 		int sta_ofs,
 		u8 bit,
-		const struct clk_ops *ops)
+		const struct clk_ops *ops,
+		unsigned long flags)
 {
 	struct mtk_clk_gate *cg;
 	struct clk *clk;
@@ -168,7 +169,7 @@
 		return ERR_PTR(-ENOMEM);
 
 	init.name = name;
-	init.flags = CLK_SET_RATE_PARENT;
+	init.flags = flags | CLK_SET_RATE_PARENT;
 	init.parent_names = parent_name ? &parent_name : NULL;
 	init.num_parents = parent_name ? 1 : 0;
 	init.ops = ops;
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
index 72ef89b..ab24016 100644
--- a/drivers/clk/mediatek/clk-gate.h
+++ b/drivers/clk/mediatek/clk-gate.h
@@ -47,6 +47,21 @@
 		int clr_ofs,
 		int sta_ofs,
 		u8 bit,
-		const struct clk_ops *ops);
+		const struct clk_ops *ops,
+		unsigned long flags);
+
+#define GATE_MTK_FLAGS(_id, _name, _parent, _regs, _shift,	\
+			_ops, _flags) {				\
+		.id = _id,					\
+		.name = _name,					\
+		.parent_name = _parent,				\
+		.regs = _regs,					\
+		.shift = _shift,				\
+		.ops = _ops,					\
+		.flags = _flags,				\
+	}
+
+#define GATE_MTK(_id, _name, _parent, _regs, _shift, _ops)		\
+	GATE_MTK_FLAGS(_id, _name, _parent, _regs, _shift, _ops, 0)
 
 #endif /* __DRV_CLK_GATE_H */
diff --git a/drivers/clk/mediatek/clk-mt8167.c b/drivers/clk/mediatek/clk-mt8167.c
new file mode 100644
index 0000000..bf966f9
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8167.c
@@ -0,0 +1,1457 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8167-clk.h>
+
+static DEFINE_SPINLOCK(mt8167_clk_lock);
+
+static const struct mtk_fixed_clk fixed_clks[] __initconst = {
+	FIXED_CLK(CLK_TOP_CLK_NULL, "clk_null", NULL, 0),
+	FIXED_CLK(CLK_TOP_I2S_INFRA_BCK, "i2s_infra_bck", "clk_null", 26000000),
+	FIXED_CLK(CLK_TOP_MEMPLL, "mempll", "clk26m", 800000000),
+	FIXED_CLK(CLK_TOP_DSI0_LNTC_DSICK, "dsi0_lntc_dsick", "clk26m", 75000000),
+	FIXED_CLK(CLK_TOP_VPLL_DPIX, "vpll_dpix", "clk26m", 75000000),
+	FIXED_CLK(CLK_TOP_LVDSTX_CLKDIG_CTS, "lvdstx_dig_cts", "clk26m", 52500000),
+};
+
+static const struct mtk_fixed_factor top_divs[] __initconst = {
+	FACTOR(CLK_TOP_DMPLL, "dmpll_ck", "mempll", 1, 1),
+	FACTOR(CLK_TOP_MAINPLL_D2, "mainpll_d2", "mainpll", 1, 2),
+	FACTOR(CLK_TOP_MAINPLL_D4, "mainpll_d4", "mainpll", 1, 4),
+	FACTOR(CLK_TOP_MAINPLL_D8, "mainpll_d8", "mainpll", 1, 8),
+	FACTOR(CLK_TOP_MAINPLL_D16, "mainpll_d16", "mainpll", 1, 16),
+	FACTOR(CLK_TOP_MAINPLL_D11, "mainpll_d11", "mainpll", 1, 11),
+	FACTOR(CLK_TOP_MAINPLL_D22, "mainpll_d22", "mainpll", 1, 22),
+	FACTOR(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3),
+	FACTOR(CLK_TOP_MAINPLL_D6, "mainpll_d6", "mainpll", 1, 6),
+	FACTOR(CLK_TOP_MAINPLL_D12, "mainpll_d12", "mainpll", 1, 12),
+	FACTOR(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5),
+	FACTOR(CLK_TOP_MAINPLL_D10, "mainpll_d10", "mainpll", 1, 10),
+	FACTOR(CLK_TOP_MAINPLL_D20, "mainpll_d20", "mainpll", 1, 20),
+	FACTOR(CLK_TOP_MAINPLL_D40, "mainpll_d40", "mainpll", 1, 40),
+	FACTOR(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7),
+	FACTOR(CLK_TOP_MAINPLL_D14, "mainpll_d14", "mainpll", 1, 14),
+	FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+	FACTOR(CLK_TOP_UNIVPLL_D4, "univpll_d4", "univpll", 1, 4),
+	FACTOR(CLK_TOP_UNIVPLL_D8, "univpll_d8", "univpll", 1, 8),
+	FACTOR(CLK_TOP_UNIVPLL_D16, "univpll_d16", "univpll", 1, 16),
+	FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+	FACTOR(CLK_TOP_UNIVPLL_D6, "univpll_d6", "univpll", 1, 6),
+	FACTOR(CLK_TOP_UNIVPLL_D12, "univpll_d12", "univpll", 1, 12),
+	FACTOR(CLK_TOP_UNIVPLL_D24, "univpll_d24", "univpll", 1, 24),
+	FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+	FACTOR(CLK_TOP_UNIVPLL_D20, "univpll_d20", "univpll", 1, 20),
+	FACTOR(CLK_TOP_MMPLL380M, "mmpll380m", "mmpll", 1, 1),
+	FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2),
+	FACTOR(CLK_TOP_MMPLL_200M, "mmpll_200m", "mmpll", 1, 3),
+	FACTOR(CLK_TOP_LVDSPLL, "lvdspll_ck", "lvdspll", 1, 1),
+	FACTOR(CLK_TOP_LVDSPLL_D2, "lvdspll_d2", "lvdspll", 1, 2),
+	FACTOR(CLK_TOP_LVDSPLL_D4, "lvdspll_d4", "lvdspll", 1, 4),
+	FACTOR(CLK_TOP_LVDSPLL_D8, "lvdspll_d8", "lvdspll", 1, 8),
+	FACTOR(CLK_TOP_USB_PHY48M, "usb_phy48m_ck", "univpll", 1, 26),
+	FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1, 1),
+	FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1_ck", 1, 2),
+	FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "rg_apll1_d2_en", 1, 2),
+	FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "rg_apll1_d4_en", 1, 2),
+	FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1, 1),
+	FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2_ck", 1, 2),
+	FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "rg_apll2_d2_en", 1, 2),
+	FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "rg_apll2_d4_en", 1, 2),
+	FACTOR(CLK_TOP_CLK26M, "clk26m_ck", "clk26m", 1, 1),
+	FACTOR(CLK_TOP_CLK26M_D2, "clk26m_d2", "clk26m", 1, 2),
+	FACTOR(CLK_TOP_MIPI_26M, "mipi_26m", "clk26m", 1, 1),
+	FACTOR(CLK_TOP_TVDPLL, "tvdpll_ck", "tvdpll", 1, 1),
+	FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll_ck", 1, 2),
+	FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll_ck", 1, 4),
+	FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll_ck", 1, 8),
+	FACTOR(CLK_TOP_TVDPLL_D16, "tvdpll_d16", "tvdpll_ck", 1, 16),
+	FACTOR(CLK_TOP_AHB_INFRA_D2, "ahb_infra_d2", "ahb_infra_sel", 1, 2),
+	FACTOR(CLK_TOP_NFI1X, "nfi1x_ck", "nfi2x_pad_sel", 1, 2),
+	FACTOR(CLK_TOP_ETH_D2, "eth_d2_ck", "eth_sel", 1, 2),
+};
+
+static const char * const uart0_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d24"
+};
+
+static const char * const gfmux_emi1x_parents[] __initconst = {
+	"clk26m_ck",
+	"dmpll_ck"
+};
+
+static const char * const emi_ddrphy_parents[] __initconst = {
+	"gfmux_emi1x_sel",
+	"gfmux_emi1x_sel"
+};
+
+static const char * const ahb_infra_parents[] __initconst = {
+	"clk_null",
+	"clk26m_ck",
+	"mainpll_d11",
+	"clk_null",
+	"mainpll_d12",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"mainpll_d10"
+};
+
+static const char * const csw_mux_mfg_parents[] __initconst = {
+	"clk_null",
+	"clk_null",
+	"univpll_d3",
+	"univpll_d2",
+	"clk26m_ck",
+	"mainpll_d4",
+	"univpll_d24",
+	"mmpll380m"
+};
+
+static const char * const msdc0_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d6",
+	"mainpll_d8",
+	"univpll_d8",
+	"mainpll_d16",
+	"mmpll_200m",
+	"mainpll_d12",
+	"mmpll_d2"
+};
+
+static const char * const camtg_mm_parents[] __initconst = {
+	"clk_null",
+	"clk26m_ck",
+	"usb_phy48m_ck",
+	"clk_null",
+	"univpll_d6"
+};
+
+static const char * const pwm_mm_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d12"
+};
+
+static const char * const uart1_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d24"
+};
+
+static const char * const msdc1_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d6",
+	"mainpll_d8",
+	"univpll_d8",
+	"mainpll_d16",
+	"mmpll_200m",
+	"mainpll_d12",
+	"mmpll_d2"
+};
+
+static const char * const spm_52m_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d24"
+};
+
+static const char * const pmicspi_parents[] __initconst = {
+	"univpll_d20",
+	"usb_phy48m_ck",
+	"univpll_d16",
+	"clk26m_ck"
+};
+
+static const char * const qaxi_aud26m_parents[] __initconst = {
+	"clk26m_ck",
+	"ahb_infra_sel"
+};
+
+static const char * const aud_intbus_parents[] __initconst = {
+	"clk_null",
+	"clk26m_ck",
+	"mainpll_d22",
+	"clk_null",
+	"mainpll_d11"
+};
+
+static const char * const nfi2x_pad_parents[] __initconst = {
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk26m_ck",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"mainpll_d12",
+	"mainpll_d8",
+	"clk_null",
+	"mainpll_d6",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"mainpll_d4",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"mainpll_d10",
+	"mainpll_d7",
+	"clk_null",
+	"mainpll_d5"
+};
+
+static const char * const nfi1x_pad_parents[] __initconst = {
+	"ahb_infra_sel",
+	"nfi1x_ck"
+};
+
+static const char * const mfg_mm_parents[] __initconst = {
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"csw_mux_mfg_sel",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"mainpll_d3",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"mainpll_d5",
+	"mainpll_d7",
+	"clk_null",
+	"mainpll_d14"
+};
+
+static const char * const ddrphycfg_parents[] __initconst = {
+	"clk26m_ck",
+	"mainpll_d16"
+};
+
+static const char * const smi_mm_parents[] __initconst = {
+	"clk26m_ck",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"univpll_d4",
+	"mainpll_d7",
+	"clk_null",
+	"mainpll_d14"
+};
+
+static const char * const usb_78m_parents[] __initconst = {
+	"clk_null",
+	"clk26m_ck",
+	"univpll_d16",
+	"clk_null",
+	"mainpll_d20"
+};
+
+static const char * const scam_mm_parents[] __initconst = {
+	"clk_null",
+	"clk26m_ck",
+	"mainpll_d14",
+	"clk_null",
+	"mainpll_d12"
+};
+
+static const char * const spinor_parents[] __initconst = {
+	"clk26m_d2",
+	"clk26m_ck",
+	"mainpll_d40",
+	"univpll_d24",
+	"univpll_d20",
+	"mainpll_d20",
+	"mainpll_d16",
+	"univpll_d12"
+};
+
+static const char * const msdc2_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d6",
+	"mainpll_d8",
+	"univpll_d8",
+	"mainpll_d16",
+	"mmpll_200m",
+	"mainpll_d12",
+	"mmpll_d2"
+};
+
+static const char * const eth_parents[] __initconst = {
+	"clk26m_ck",
+	"mainpll_d40",
+	"univpll_d24",
+	"univpll_d20",
+	"mainpll_d20"
+};
+
+static const char * const vdec_mm_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d4",
+	"mainpll_d4",
+	"univpll_d5",
+	"univpll_d6",
+	"mainpll_d6"
+};
+
+static const char * const dpi0_mm_parents[] __initconst = {
+	"clk26m_ck",
+	"lvdspll_ck",
+	"lvdspll_d2",
+	"lvdspll_d4",
+	"lvdspll_d8"
+};
+
+static const char * const dpi1_mm_parents[] __initconst = {
+	"clk26m_ck",
+	"tvdpll_d2",
+	"tvdpll_d4",
+	"tvdpll_d8",
+	"tvdpll_d16"
+};
+
+static const char * const axi_mfg_in_parents_e1[] __initconst = {
+	"clk26m_ck",
+	"gfmux_emi1x_sel",
+	"univpll_d24",
+	"mmpll380m"
+};
+
+static const char * const axi_mfg_in_parents[] __initconst = {
+	"clk26m_ck",
+	"mainpll_d11",
+	"univpll_d24",
+	"mmpll380m"
+};
+
+static const char * const slow_mfg_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d12",
+	"univpll_d24"
+};
+
+static const char * const aud1_parents[] __initconst = {
+	"clk26m_ck",
+	"apll1_ck"
+};
+
+static const char * const aud2_parents[] __initconst = {
+	"clk26m_ck",
+	"apll2_ck"
+};
+
+static const char * const aud_engen1_parents[] __initconst = {
+	"clk26m_ck",
+	"rg_apll1_d2_en",
+	"rg_apll1_d4_en",
+	"rg_apll1_d8_en"
+};
+
+static const char * const aud_engen2_parents[] __initconst = {
+	"clk26m_ck",
+	"rg_apll2_d2_en",
+	"rg_apll2_d4_en",
+	"rg_apll2_d8_en"
+};
+
+static const char * const i2c_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d20",
+	"univpll_d16",
+	"univpll_d12"
+};
+
+static const char * const aud_i2s0_m_parents[] __initconst = {
+	"rg_aud1",
+	"rg_aud2"
+};
+
+static const char * const pwm_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d12"
+};
+
+static const char * const spi_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d12",
+	"univpll_d8",
+	"univpll_d6"
+};
+
+static const char * const aud_spdifin_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d2"
+};
+
+static const char * const uart2_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d24"
+};
+
+static const char * const bsi_parents[] __initconst = {
+	"clk26m_ck",
+	"mainpll_d10",
+	"mainpll_d12",
+	"mainpll_d20"
+};
+
+static const char * const dbg_atclk_parents[] __initconst = {
+	"clk_null",
+	"clk26m_ck",
+	"mainpll_d5",
+	"clk_null",
+	"univpll_d5"
+};
+
+static const char * const csw_nfiecc_parents[] __initconst = {
+	"clk_null",
+	"mainpll_d7",
+	"mainpll_d6",
+	"clk_null",
+	"mainpll_d5"
+};
+
+static const char * const nfiecc_parents[] __initconst = {
+	"clk_null",
+	"nfi2x_pad_sel",
+	"mainpll_d4",
+	"clk_null",
+	"csw_nfiecc_sel"
+};
+
+static struct mtk_composite top_muxes[] __initdata = {
+	/* CLK_MUX_SEL0 */
+	MUX(CLK_TOP_UART0_SEL, "uart0_sel", uart0_parents,
+		0x000, 0, 1),
+	MUX(CLK_TOP_GFMUX_EMI1X_SEL, "gfmux_emi1x_sel", gfmux_emi1x_parents,
+		0x000, 1, 1),
+	MUX(CLK_TOP_EMI_DDRPHY_SEL, "emi_ddrphy_sel", emi_ddrphy_parents,
+		0x000, 2, 1),
+	MUX(CLK_TOP_AHB_INFRA_SEL, "ahb_infra_sel", ahb_infra_parents,
+		0x000, 4, 4),
+	MUX(CLK_TOP_CSW_MUX_MFG_SEL, "csw_mux_mfg_sel", csw_mux_mfg_parents,
+		0x000, 8, 3),
+	MUX(CLK_TOP_MSDC0_SEL, "msdc0_sel", msdc0_parents,
+		0x000, 11, 3),
+	MUX(CLK_TOP_CAMTG_MM_SEL, "camtg_mm_sel", camtg_mm_parents,
+		0x000, 15, 3),
+	MUX(CLK_TOP_PWM_MM_SEL, "pwm_mm_sel", pwm_mm_parents,
+		0x000, 18, 1),
+	MUX(CLK_TOP_UART1_SEL, "uart1_sel", uart1_parents,
+		0x000, 19, 1),
+	MUX(CLK_TOP_MSDC1_SEL, "msdc1_sel", msdc1_parents,
+		0x000, 20, 3),
+	MUX(CLK_TOP_SPM_52M_SEL, "spm_52m_sel", spm_52m_parents,
+		0x000, 23, 1),
+	MUX(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents,
+		0x000, 24, 2),
+	MUX(CLK_TOP_QAXI_AUD26M_SEL, "qaxi_aud26m_sel", qaxi_aud26m_parents,
+		0x000, 26, 1),
+	MUX(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", aud_intbus_parents,
+		0x000, 27, 3),
+	/* CLK_MUX_SEL1 */
+	MUX(CLK_TOP_NFI2X_PAD_SEL, "nfi2x_pad_sel", nfi2x_pad_parents,
+		0x004, 0, 7),
+	MUX(CLK_TOP_NFI1X_PAD_SEL, "nfi1x_pad_sel", nfi1x_pad_parents,
+		0x004, 7, 1),
+	MUX(CLK_TOP_MFG_MM_SEL, "mfg_mm_sel", mfg_mm_parents,
+		0x004, 8, 6),
+	MUX(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents,
+		0x004, 15, 1),
+	MUX(CLK_TOP_SMI_MM_SEL, "smi_mm_sel", smi_mm_parents,
+		0x004, 16, 4),
+	MUX(CLK_TOP_USB_78M_SEL, "usb_78m_sel", usb_78m_parents,
+		0x004, 20, 3),
+	MUX(CLK_TOP_SCAM_MM_SEL, "scam_mm_sel", scam_mm_parents,
+		0x004, 23, 3),
+	/* CLK_MUX_SEL8 */
+	MUX(CLK_TOP_SPINOR_SEL, "spinor_sel", spinor_parents,
+		0x040, 0, 3),
+	MUX(CLK_TOP_MSDC2_SEL, "msdc2_sel", msdc2_parents,
+		0x040, 3, 3),
+	MUX(CLK_TOP_ETH_SEL, "eth_sel", eth_parents,
+		0x040, 6, 3),
+	MUX(CLK_TOP_VDEC_MM_SEL, "vdec_mm_sel", vdec_mm_parents,
+		0x040, 9, 3),
+	MUX(CLK_TOP_DPI0_MM_SEL, "dpi0_mm_sel", dpi0_mm_parents,
+		0x040, 12, 3),
+	MUX(CLK_TOP_DPI1_MM_SEL, "dpi1_mm_sel", dpi1_mm_parents,
+		0x040, 15, 3),
+	MUX(CLK_TOP_AXI_MFG_IN_SEL, "axi_mfg_in_sel", axi_mfg_in_parents,
+		0x040, 18, 2),
+	MUX(CLK_TOP_SLOW_MFG_SEL, "slow_mfg_sel", slow_mfg_parents,
+		0x040, 20, 2),
+	MUX(CLK_TOP_AUD1_SEL, "aud1_sel", aud1_parents,
+		0x040, 22, 1),
+	MUX(CLK_TOP_AUD2_SEL, "aud2_sel", aud2_parents,
+		0x040, 23, 1),
+	MUX(CLK_TOP_AUD_ENGEN1_SEL, "aud_engen1_sel", aud_engen1_parents,
+		0x040, 24, 2),
+	MUX(CLK_TOP_AUD_ENGEN2_SEL, "aud_engen2_sel", aud_engen2_parents,
+		0x040, 26, 2),
+	MUX(CLK_TOP_I2C_SEL, "i2c_sel", i2c_parents,
+		0x040, 28, 2),
+	/* CLK_SEL_9 */
+	MUX(CLK_TOP_AUD_I2S0_M_SEL, "aud_i2s0_m_sel", aud_i2s0_m_parents,
+		0x044, 12, 1),
+	MUX(CLK_TOP_AUD_I2S1_M_SEL, "aud_i2s1_m_sel", aud_i2s0_m_parents,
+		0x044, 13, 1),
+	MUX(CLK_TOP_AUD_I2S2_M_SEL, "aud_i2s2_m_sel", aud_i2s0_m_parents,
+		0x044, 14, 1),
+	MUX(CLK_TOP_AUD_I2S3_M_SEL, "aud_i2s3_m_sel", aud_i2s0_m_parents,
+		0x044, 15, 1),
+	MUX(CLK_TOP_AUD_I2S4_M_SEL, "aud_i2s4_m_sel", aud_i2s0_m_parents,
+		0x044, 16, 1),
+	MUX(CLK_TOP_AUD_I2S5_M_SEL, "aud_i2s5_m_sel", aud_i2s0_m_parents,
+		0x044, 17, 1),
+	MUX(CLK_TOP_AUD_SPDIF_B_SEL, "aud_spdif_b_sel", aud_i2s0_m_parents,
+		0x044, 18, 1),
+	/* CLK_MUX_SEL13 */
+	MUX(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents,
+		0x07c, 0, 1),
+	MUX(CLK_TOP_SPI_SEL, "spi_sel", spi_parents,
+		0x07c, 1, 2),
+	MUX(CLK_TOP_AUD_SPDIFIN_SEL, "aud_spdifin_sel", aud_spdifin_parents,
+		0x07c, 3, 1),
+	MUX(CLK_TOP_UART2_SEL, "uart2_sel", uart2_parents,
+		0x07c, 4, 1),
+	MUX(CLK_TOP_BSI_SEL, "bsi_sel", bsi_parents,
+		0x07c, 5, 2),
+	MUX(CLK_TOP_DBG_ATCLK_SEL, "dbg_atclk_sel", dbg_atclk_parents,
+		0x07c, 7, 3),
+	MUX(CLK_TOP_CSW_NFIECC_SEL, "csw_nfiecc_sel", csw_nfiecc_parents,
+		0x07c, 10, 3),
+	MUX(CLK_TOP_NFIECC_SEL, "nfiecc_sel", nfiecc_parents,
+		0x07c, 13, 3),
+};
+
+static const char * const ifr_mux1_parents[] __initconst = {
+	"clk26m_ck",
+	"armpll",
+	"univpll",
+	"mainpll_d2"
+};
+
+static const char * const ifr_eth_25m_parents[] __initconst = {
+	"eth_d2_ck",
+	"rg_eth"
+};
+
+static const char * const ifr_i2c0_parents[] __initconst = {
+	"ahb_infra_d2",
+	"rg_i2c"
+};
+
+static const struct mtk_composite ifr_muxes[] __initconst = {
+	MUX(CLK_IFR_MUX1_SEL, "ifr_mux1_sel", ifr_mux1_parents, 0x000,
+		2, 2),
+	MUX(CLK_IFR_ETH_25M_SEL, "ifr_eth_25m_sel", ifr_eth_25m_parents, 0x080,
+		0, 1),
+	MUX(CLK_IFR_I2C0_SEL, "ifr_i2c0_sel", ifr_i2c0_parents, 0x080,
+		1, 1),
+	MUX(CLK_IFR_I2C1_SEL, "ifr_i2c1_sel", ifr_i2c0_parents, 0x080,
+		2, 1),
+	MUX(CLK_IFR_I2C2_SEL, "ifr_i2c2_sel", ifr_i2c0_parents, 0x080,
+		3, 1),
+};
+
+#define DIV_ADJ(_id, _name, _parent, _reg, _shift, _width) {	\
+		.id = _id,					\
+		.name = _name,					\
+		.parent_name = _parent,				\
+		.div_reg = _reg,				\
+		.div_shift = _shift,				\
+		.div_width = _width,				\
+}
+
+#define DIV_ADJ_FLAG(_id, _name, _parent, _reg, _shift, _width ,_flag) {	\
+		.id = _id,					\
+		.name = _name,					\
+		.parent_name = _parent,				\
+		.div_reg = _reg,				\
+		.div_shift = _shift,				\
+		.div_width = _width,				\
+		.clk_divider_flags = _flag,				\
+}
+
+static const struct mtk_clk_divider top_adj_divs[] = {
+	DIV_ADJ(CLK_TOP_APLL12_CK_DIV0, "apll12_ck_div0", "aud_i2s0_m_sel",
+		0x0048, 0, 8),
+	DIV_ADJ(CLK_TOP_APLL12_CK_DIV1, "apll12_ck_div1", "aud_i2s1_m_sel",
+		0x0048, 8, 8),
+	DIV_ADJ(CLK_TOP_APLL12_CK_DIV2, "apll12_ck_div2", "aud_i2s2_m_sel",
+		0x0048, 16, 8),
+	DIV_ADJ(CLK_TOP_APLL12_CK_DIV3, "apll12_ck_div3", "aud_i2s3_m_sel",
+		0x0048, 24, 8),
+	DIV_ADJ(CLK_TOP_APLL12_CK_DIV4, "apll12_ck_div4", "aud_i2s4_m_sel",
+		0x004c, 0, 8),
+	DIV_ADJ(CLK_TOP_APLL12_CK_DIV4B, "apll12_ck_div4b", "apll12_div4",
+		0x004c, 8, 8),
+	DIV_ADJ(CLK_TOP_APLL12_CK_DIV5, "apll12_ck_div5", "aud_i2s5_m_sel",
+		0x004c, 16, 8),
+	DIV_ADJ(CLK_TOP_APLL12_CK_DIV5B, "apll12_ck_div5b", "apll12_div5",
+		0x004c, 24, 8),
+	DIV_ADJ(CLK_TOP_APLL12_CK_DIV6, "apll12_ck_div6", "aud_spdif_b_sel",
+		0x0078, 0, 8),
+};
+
+static const struct mtk_clk_divider apmixed_adj_divs[] = {
+	DIV_ADJ_FLAG(CLK_APMIXED_HDMI_REF, "hdmi_ref", "tvdpll",
+		0x1c4, 24, 3, CLK_DIVIDER_POWER_OF_TWO|CLK_DIVIDER_READ_ONLY),
+};
+
+static const struct mtk_gate_regs top0_cg_regs = {
+	.set_ofs = 0x50,
+	.clr_ofs = 0x80,
+	.sta_ofs = 0x20,
+};
+
+static const struct mtk_gate_regs top1_cg_regs = {
+	.set_ofs = 0x54,
+	.clr_ofs = 0x84,
+	.sta_ofs = 0x24,
+};
+
+static const struct mtk_gate_regs top2_cg_regs = {
+	.set_ofs = 0x6c,
+	.clr_ofs = 0x9c,
+	.sta_ofs = 0x3c,
+};
+
+static const struct mtk_gate_regs top3_cg_regs = {
+	.set_ofs = 0xa0,
+	.clr_ofs = 0xb0,
+	.sta_ofs = 0x70,
+};
+
+static const struct mtk_gate_regs top4_cg_regs = {
+	.set_ofs = 0xa4,
+	.clr_ofs = 0xb4,
+	.sta_ofs = 0x74,
+};
+
+static const struct mtk_gate_regs top5_cg_regs = {
+	.set_ofs = 0x44,
+	.clr_ofs = 0x44,
+	.sta_ofs = 0x44,
+};
+
+#define GATE_TOP0(_id, _name, _parent, _shift) {	\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &top0_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_setclr,	\
+	}
+
+#define GATE_TOP0_I(_id, _name, _parent, _shift) {	\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &top0_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+	}
+
+#define GATE_TOP1(_id, _name, _parent, _shift) {	\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &top1_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_setclr,	\
+	}
+
+#define GATE_TOP2(_id, _name, _parent, _shift) {	\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &top2_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_setclr,	\
+	}
+
+#define GATE_TOP2_I(_id, _name, _parent, _shift) {	\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &top2_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+	}
+
+#define GATE_TOP3(_id, _name, _parent, _shift) {	\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &top3_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_setclr,	\
+	}
+
+#define GATE_TOP4_I(_id, _name, _parent, _shift) {	\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &top4_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+	}
+
+#define GATE_TOP5(_id, _name, _parent, _shift) {	\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &top5_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_no_setclr,	\
+	}
+
+static const struct mtk_gate top_clks[] __initconst = {
+	/* TOP0 */
+	GATE_TOP0(CLK_TOP_PWM_MM, "pwm_mm", "pwm_mm_sel",
+		0),
+	GATE_TOP0(CLK_TOP_CAM_MM, "cam_mm", "camtg_mm_sel",
+		1),
+	GATE_TOP0(CLK_TOP_MFG_MM, "mfg_mm", "mfg_mm_sel",
+		2),
+	GATE_TOP0(CLK_TOP_SPM_52M, "spm_52m", "spm_52m_sel",
+		3),
+	GATE_TOP0_I(CLK_TOP_MIPI_26M_DBG, "mipi_26m_dbg", "mipi_26m",
+		4),
+	GATE_TOP0(CLK_TOP_SCAM_MM, "scam_mm", "scam_mm_sel",
+		5),
+	GATE_TOP0(CLK_TOP_SMI_MM, "smi_mm", "smi_mm_sel",
+		9),
+	/* TOP1 */
+	GATE_TOP1(CLK_TOP_THEM, "them", "ahb_infra_sel",
+		1),
+	GATE_TOP1(CLK_TOP_APDMA, "apdma", "ahb_infra_sel",
+		2),
+	GATE_TOP1(CLK_TOP_I2C0, "i2c0", "ifr_i2c0_sel",
+		3),
+	GATE_TOP1(CLK_TOP_I2C1, "i2c1", "ifr_i2c1_sel",
+		4),
+	GATE_TOP1(CLK_TOP_AUXADC1, "auxadc1", "ahb_infra_sel",
+		5),
+	GATE_TOP1(CLK_TOP_NFI, "nfi", "nfi1x_pad_sel",
+		6),
+	GATE_TOP1(CLK_TOP_NFIECC, "nfiecc", "rg_nfiecc",
+		7),
+	GATE_TOP1(CLK_TOP_DEBUGSYS, "debugsys", "rg_dbg_atclk",
+		8),
+	GATE_TOP1(CLK_TOP_PWM, "pwm", "ahb_infra_sel",
+		9),
+	GATE_TOP1(CLK_TOP_UART0, "uart0", "uart0_sel",
+		10),
+	GATE_TOP1(CLK_TOP_UART1, "uart1", "uart1_sel",
+		11),
+	GATE_TOP1(CLK_TOP_BTIF, "btif", "ahb_infra_sel",
+		12),
+	GATE_TOP1(CLK_TOP_USB, "usb", "usb_78m",
+		13),
+	GATE_TOP1(CLK_TOP_FLASHIF_26M, "flashif_26m", "clk26m_ck",
+		14),
+	GATE_TOP1(CLK_TOP_AUXADC2, "auxadc2", "ahb_infra_sel",
+		15),
+	GATE_TOP1(CLK_TOP_I2C2, "i2c2", "ifr_i2c2_sel",
+		16),
+	GATE_TOP1(CLK_TOP_MSDC0, "msdc0", "msdc0_sel",
+		17),
+	GATE_TOP1(CLK_TOP_MSDC1, "msdc1", "msdc1_sel",
+		18),
+	GATE_TOP1(CLK_TOP_NFI2X, "nfi2x", "nfi2x_pad_sel",
+		19),
+	GATE_TOP1(CLK_TOP_PMICWRAP_AP, "pwrap_ap", "clk26m_ck",
+		20),
+	GATE_TOP1(CLK_TOP_SEJ, "sej", "ahb_infra_sel",
+		21),
+	GATE_TOP1(CLK_TOP_MEMSLP_DLYER, "memslp_dlyer", "clk26m_ck",
+		22),
+	GATE_TOP1(CLK_TOP_SPI, "spi", "spi_sel",
+		23),
+	GATE_TOP1(CLK_TOP_APXGPT, "apxgpt", "clk26m_ck",
+		24),
+	GATE_TOP1(CLK_TOP_AUDIO, "audio", "clk26m_ck",
+		25),
+	GATE_TOP1(CLK_TOP_PMICWRAP_MD, "pwrap_md", "clk26m_ck",
+		27),
+	GATE_TOP1(CLK_TOP_PMICWRAP_CONN, "pwrap_conn", "clk26m_ck",
+		28),
+	GATE_TOP1(CLK_TOP_PMICWRAP_26M, "pwrap_26m", "clk26m_ck",
+		29),
+	GATE_TOP1(CLK_TOP_AUX_ADC, "aux_adc", "clk26m_ck",
+		30),
+	GATE_TOP1(CLK_TOP_AUX_TP, "aux_tp", "clk26m_ck",
+		31),
+	/* TOP2 */
+	GATE_TOP2(CLK_TOP_MSDC2, "msdc2", "ahb_infra_sel",
+		0),
+	GATE_TOP2(CLK_TOP_RBIST, "rbist", "univpll_d12",
+		1),
+	GATE_TOP2(CLK_TOP_NFI_BUS, "nfi_bus", "ahb_infra_sel",
+		2),
+	GATE_TOP2(CLK_TOP_GCE, "gce", "ahb_infra_sel",
+		4),
+	GATE_TOP2(CLK_TOP_TRNG, "trng", "ahb_infra_sel",
+		5),
+	GATE_TOP2(CLK_TOP_SEJ_13M, "sej_13m", "clk26m_ck",
+		6),
+	GATE_TOP2(CLK_TOP_AES, "aes", "ahb_infra_sel",
+		7),
+	GATE_TOP2(CLK_TOP_PWM_B, "pwm_b", "rg_pwm_infra",
+		8),
+	GATE_TOP2(CLK_TOP_PWM1_FB, "pwm1_fb", "rg_pwm_infra",
+		9),
+	GATE_TOP2(CLK_TOP_PWM2_FB, "pwm2_fb", "rg_pwm_infra",
+		10),
+	GATE_TOP2(CLK_TOP_PWM3_FB, "pwm3_fb", "rg_pwm_infra",
+		11),
+	GATE_TOP2(CLK_TOP_PWM4_FB, "pwm4_fb", "rg_pwm_infra",
+		12),
+	GATE_TOP2(CLK_TOP_PWM5_FB, "pwm5_fb", "rg_pwm_infra",
+		13),
+	GATE_TOP2(CLK_TOP_USB_1P, "usb_1p", "usb_78m",
+		14),
+	GATE_TOP2(CLK_TOP_FLASHIF_FREERUN, "flashif_freerun", "ahb_infra_sel",
+		15),
+	GATE_TOP2(CLK_TOP_26M_HDMI_SIFM, "hdmi_sifm_26m", "clk26m_ck",
+		16),
+	GATE_TOP2(CLK_TOP_26M_CEC, "cec_26m", "clk26m_ck",
+		17),
+	GATE_TOP2(CLK_TOP_32K_CEC, "cec_32k", "clk32k",
+		18),
+	GATE_TOP2(CLK_TOP_66M_ETH, "eth_66m", "ahb_infra_d2",
+		19),
+	GATE_TOP2(CLK_TOP_133M_ETH, "eth_133m", "ahb_infra_sel",
+		20),
+	GATE_TOP2(CLK_TOP_FETH_25M, "feth_25m", "ifr_eth_25m_sel",
+		21),
+	GATE_TOP2(CLK_TOP_FETH_50M, "feth_50m", "rg_eth",
+		22),
+	GATE_TOP2(CLK_TOP_FLASHIF_AXI, "flashif_axi", "ahb_infra_sel",
+		23),
+	GATE_TOP2(CLK_TOP_USBIF, "usbif", "ahb_infra_sel",
+		24),
+	GATE_TOP2(CLK_TOP_UART2, "uart2", "rg_uart2",
+		25),
+	GATE_TOP2(CLK_TOP_BSI, "bsi", "ahb_infra_sel",
+		26),
+	GATE_TOP2(CLK_TOP_GCPU_B, "gcpu_b", "ahb_infra_sel",
+		27),
+	GATE_TOP2_I(CLK_TOP_MSDC0_INFRA, "msdc0_infra", "msdc0",
+		28),
+	GATE_TOP2_I(CLK_TOP_MSDC1_INFRA, "msdc1_infra", "msdc1",
+		29),
+	GATE_TOP2_I(CLK_TOP_MSDC2_INFRA, "msdc2_infra", "rg_msdc2",
+		30),
+	GATE_TOP2(CLK_TOP_USB_78M, "usb_78m", "usb_78m_sel",
+		31),
+	/* TOP3 */
+	GATE_TOP3(CLK_TOP_RG_SPINOR, "rg_spinor", "spinor_sel",
+		0),
+	GATE_TOP3(CLK_TOP_RG_MSDC2, "rg_msdc2", "msdc2_sel",
+		1),
+	GATE_TOP3(CLK_TOP_RG_ETH, "rg_eth", "eth_sel",
+		2),
+	GATE_TOP3(CLK_TOP_RG_VDEC, "rg_vdec", "vdec_mm_sel",
+		3),
+	GATE_TOP3(CLK_TOP_RG_FDPI0, "rg_fdpi0", "dpi0_mm_sel",
+		4),
+	GATE_TOP3(CLK_TOP_RG_FDPI1, "rg_fdpi1", "dpi1_mm_sel",
+		5),
+	GATE_TOP3(CLK_TOP_RG_AXI_MFG, "rg_axi_mfg", "axi_mfg_in_sel",
+		6),
+	GATE_TOP3(CLK_TOP_RG_SLOW_MFG, "rg_slow_mfg", "slow_mfg_sel",
+		7),
+	GATE_TOP3(CLK_TOP_RG_AUD1, "rg_aud1", "aud1_sel",
+		8),
+	GATE_TOP3(CLK_TOP_RG_AUD2, "rg_aud2", "aud2_sel",
+		9),
+	GATE_TOP3(CLK_TOP_RG_AUD_ENGEN1, "rg_aud_engen1", "aud_engen1_sel",
+		10),
+	GATE_TOP3(CLK_TOP_RG_AUD_ENGEN2, "rg_aud_engen2", "aud_engen2_sel",
+		11),
+	GATE_TOP3(CLK_TOP_RG_I2C, "rg_i2c", "i2c_sel",
+		12),
+	GATE_TOP3(CLK_TOP_RG_PWM_INFRA, "rg_pwm_infra", "pwm_sel",
+		13),
+	GATE_TOP3(CLK_TOP_RG_AUD_SPDIF_IN, "rg_aud_spdif_in", "aud_spdifin_sel",
+		14),
+	GATE_TOP3(CLK_TOP_RG_UART2, "rg_uart2", "uart2_sel",
+		15),
+	GATE_TOP3(CLK_TOP_RG_BSI, "rg_bsi", "bsi_sel",
+		16),
+	GATE_TOP3(CLK_TOP_RG_DBG_ATCLK, "rg_dbg_atclk", "dbg_atclk_sel",
+		17),
+	GATE_TOP3(CLK_TOP_RG_NFIECC, "rg_nfiecc", "nfiecc_sel",
+		18),
+	/* TOP4 */
+	GATE_TOP4_I(CLK_TOP_RG_APLL1_D2_EN, "rg_apll1_d2_en", "apll1_d2",
+		8),
+	GATE_TOP4_I(CLK_TOP_RG_APLL1_D4_EN, "rg_apll1_d4_en", "apll1_d4",
+		9),
+	GATE_TOP4_I(CLK_TOP_RG_APLL1_D8_EN, "rg_apll1_d8_en", "apll1_d8",
+		10),
+	GATE_TOP4_I(CLK_TOP_RG_APLL2_D2_EN, "rg_apll2_d2_en", "apll2_d2",
+		11),
+	GATE_TOP4_I(CLK_TOP_RG_APLL2_D4_EN, "rg_apll2_d4_en", "apll2_d4",
+		12),
+	GATE_TOP4_I(CLK_TOP_RG_APLL2_D8_EN, "rg_apll2_d8_en", "apll2_d8",
+		13),
+	/* TOP5 */
+	GATE_TOP5(CLK_TOP_APLL12_DIV0, "apll12_div0", "apll12_ck_div0",
+		0),
+	GATE_TOP5(CLK_TOP_APLL12_DIV1, "apll12_div1", "apll12_ck_div1",
+		1),
+	GATE_TOP5(CLK_TOP_APLL12_DIV2, "apll12_div2", "apll12_ck_div2",
+		2),
+	GATE_TOP5(CLK_TOP_APLL12_DIV3, "apll12_div3", "apll12_ck_div3",
+		3),
+	GATE_TOP5(CLK_TOP_APLL12_DIV4, "apll12_div4", "apll12_ck_div4",
+		4),
+	GATE_TOP5(CLK_TOP_APLL12_DIV4B, "apll12_div4b", "apll12_ck_div4b",
+		5),
+	GATE_TOP5(CLK_TOP_APLL12_DIV5, "apll12_div5", "apll12_ck_div5",
+		6),
+	GATE_TOP5(CLK_TOP_APLL12_DIV5B, "apll12_div5b", "apll12_ck_div5b",
+		7),
+	GATE_TOP5(CLK_TOP_APLL12_DIV6, "apll12_div6", "apll12_ck_div6",
+		8),
+};
+static const struct mtk_gate_regs aud_cg_regs = {
+	.set_ofs = 0x0,
+	.clr_ofs = 0x0,
+	.sta_ofs = 0x0,
+};
+
+#define GATE_AUD(_id, _name, _parent, _shift) {	\
+		.id = _id,			\
+		.name = _name,			\
+		.parent_name = _parent,		\
+		.regs = &aud_cg_regs,		\
+		.shift = _shift,		\
+		.ops = &mtk_clk_gate_ops_setclr,		\
+	}
+
+static const struct mtk_gate aud_clks[] __initconst = {
+	GATE_AUD(CLK_AUD_AFE, "aud_afe", "clk26m_ck", 2),
+	GATE_AUD(CLK_AUD_I2S, "aud_i2s", "i2s_infra_bck", 6),
+	GATE_AUD(CLK_AUD_22M, "aud_22m", "rg_aud_engen1", 8),
+	GATE_AUD(CLK_AUD_24M, "aud_24m", "rg_aud_engen2", 9),
+	GATE_AUD(CLK_AUD_INTDIR, "aud_intdir", "rg_aud_spdif_in", 15),
+	GATE_AUD(CLK_AUD_APLL2_TUNER, "aud_apll2_tuner", "rg_aud_engen2", 18),
+	GATE_AUD(CLK_AUD_APLL_TUNER, "aud_apll_tuner", "rg_aud_engen1", 19),
+	GATE_AUD(CLK_AUD_HDMI, "aud_hdmi", "apll12_div4", 20),
+	GATE_AUD(CLK_AUD_SPDF, "aud_spdf", "apll12_div6", 21),
+	GATE_AUD(CLK_AUD_ADC, "aud_adc", "aud_afe", 24),
+	GATE_AUD(CLK_AUD_DAC, "aud_dac", "aud_afe", 25),
+	GATE_AUD(CLK_AUD_DAC_PREDIS, "aud_dac_predis", "aud_afe", 26),
+	GATE_AUD(CLK_AUD_TML, "aud_tml", "aud_afe", 27),
+};
+static const struct mtk_gate_regs mfg_cg_regs = {
+	.set_ofs = 0x4,
+	.clr_ofs = 0x8,
+	.sta_ofs = 0x0,
+};
+
+#define GATE_MFG(_id, _name, _parent, _shift) {		\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &mfg_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_setclr,	\
+	}
+
+static const struct mtk_gate mfg_clks[] __initconst = {
+	GATE_MFG(CLK_MFG_BAXI, "mfg_baxi", "ahb_infra_sel", 0),
+	GATE_MFG(CLK_MFG_BMEM, "mfg_bmem", "gfmux_emi1x_sel", 1),
+	GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_mm", 2),
+	GATE_MFG(CLK_MFG_B26M, "mfg_b26m", "clk26m_ck", 3),
+};
+static const struct mtk_gate_regs mm0_cg_regs = {
+	.set_ofs = 0x104,
+	.clr_ofs = 0x108,
+	.sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+	.set_ofs = 0x114,
+	.clr_ofs = 0x118,
+	.sta_ofs = 0x110,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) {		\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &mm0_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_setclr,	\
+	}
+
+#define GATE_MM1(_id, _name, _parent, _shift) {		\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &mm1_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_setclr,	\
+	}
+
+static const struct mtk_gate mm_clks[] __initconst = {
+	/* MM0 */
+	GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "smi_mm", 0),
+	GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "smi_mm", 1),
+	GATE_MM0(CLK_MM_CAM_MDP, "mm_cam_mdp", "smi_mm", 2),
+	GATE_MM0(CLK_MM_MDP_RDMA, "mm_mdp_rdma", "smi_mm", 3),
+	GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "smi_mm", 4),
+	GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "smi_mm", 5),
+	GATE_MM0(CLK_MM_MDP_TDSHP, "mm_mdp_tdshp", "smi_mm", 6),
+	GATE_MM0(CLK_MM_MDP_WDMA, "mm_mdp_wdma", "smi_mm", 7),
+	GATE_MM0(CLK_MM_MDP_WROT, "mm_mdp_wrot", "smi_mm", 8),
+	GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "smi_mm", 9),
+	GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "smi_mm", 10),
+	GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "smi_mm", 11),
+	GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "smi_mm", 12),
+	GATE_MM0(CLK_MM_DISP_WDMA, "mm_disp_wdma", "smi_mm", 13),
+	GATE_MM0(CLK_MM_DISP_COLOR, "mm_disp_color", "smi_mm", 14),
+	GATE_MM0(CLK_MM_DISP_CCORR, "mm_disp_ccorr", "smi_mm", 15),
+	GATE_MM0(CLK_MM_DISP_AAL, "mm_disp_aal", "smi_mm", 16),
+	GATE_MM0(CLK_MM_DISP_GAMMA, "mm_disp_gamma", "smi_mm", 17),
+	GATE_MM0(CLK_MM_DISP_DITHER, "mm_disp_dither", "smi_mm", 18),
+	GATE_MM0(CLK_MM_DISP_UFOE, "mm_disp_ufoe", "smi_mm", 19),
+	/* MM1 */
+	GATE_MM1(CLK_MM_DISP_PWM_MM, "mm_disp_pwm_mm", "smi_mm", 0),
+	GATE_MM1(CLK_MM_DISP_PWM_26M, "mm_disp_pwm_26m", "smi_mm", 1),
+	GATE_MM1(CLK_MM_DSI_ENGINE, "mm_dsi_engine", "smi_mm", 2),
+	GATE_MM1(CLK_MM_DSI_DIGITAL, "mm_dsi_digital", "dsi0_lntc_dsick", 3),
+	GATE_MM1(CLK_MM_DPI0_ENGINE, "mm_dpi0_engine", "smi_mm", 4),
+	GATE_MM1(CLK_MM_DPI0_PXL, "mm_dpi0_pxl", "rg_fdpi0", 5),
+	GATE_MM1(CLK_MM_LVDS_PXL, "mm_lvds_pxl", "vpll_dpix", 14),
+	GATE_MM1(CLK_MM_LVDS_CTS, "mm_lvds_cts", "lvdstx_dig_cts", 15),
+	GATE_MM1(CLK_MM_DPI1_ENGINE, "mm_dpi1_engine", "smi_mm", 16),
+	GATE_MM1(CLK_MM_DPI1_PXL, "mm_dpi1_pxl", "rg_fdpi1", 17),
+	GATE_MM1(CLK_MM_HDMI_PXL, "mm_hdmi_pxl", "rg_fdpi1", 18),
+	GATE_MM1(CLK_MM_HDMI_SPDIF, "mm_hdmi_spdif", "apll12_div6", 19),
+	GATE_MM1(CLK_MM_HDMI_ADSP_BCK, "mm_hdmi_adsp_b", "apll12_div4b", 20),
+	GATE_MM1(CLK_MM_HDMI_PLL, "mm_hdmi_pll", "hdmtx_dig_cts", 21),
+};
+static const struct mtk_gate_regs img_cg_regs = {
+	.set_ofs = 0x4,
+	.clr_ofs = 0x8,
+	.sta_ofs = 0x0,
+};
+
+#define GATE_IMG(_id, _name, _parent, _shift) {		\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &img_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_setclr,	\
+	}
+
+static const struct mtk_gate img_clks[] __initconst = {
+	GATE_IMG(CLK_IMG_LARB1_SMI, "img_larb1_smi", "smi_mm", 0),
+	GATE_IMG(CLK_IMG_CAM_SMI, "img_cam_smi", "smi_mm", 5),
+	GATE_IMG(CLK_IMG_CAM_CAM, "img_cam_cam", "smi_mm", 6),
+	GATE_IMG(CLK_IMG_SEN_TG, "img_sen_tg", "cam_mm", 7),
+	GATE_IMG(CLK_IMG_SEN_CAM, "img_sen_cam", "smi_mm", 8),
+	GATE_IMG(CLK_IMG_VENC, "img_venc", "smi_mm", 9),
+};
+static const struct mtk_gate_regs vdec0_cg_regs = {
+	.set_ofs = 0x0,
+	.clr_ofs = 0x4,
+	.sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+	.set_ofs = 0x8,
+	.clr_ofs = 0xc,
+	.sta_ofs = 0x8,
+};
+
+#define GATE_VDEC0_I(_id, _name, _parent, _shift) {	\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &vdec0_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+	}
+
+#define GATE_VDEC1_I(_id, _name, _parent, _shift) {	\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &vdec1_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+	}
+
+static const struct mtk_gate vdec_clks[] __initconst = {
+	/* VDEC0 */
+	GATE_VDEC0_I(CLK_VDEC_CKEN, "vdec_cken", "rg_vdec", 0),
+	/* VDEC1 */
+	GATE_VDEC1_I(CLK_VDEC_LARB1_CKEN, "vdec_larb1_cken", "smi_mm", 0),
+};
+
+static void __init mtk_topckgen_init(struct device_node *node)
+{
+	struct clk_onecell_data *clk_data;
+	int r, i;
+	void __iomem *base;
+//	enum chip_sw_ver ver = mt_get_chip_sw_ver();
+
+	base = of_iomap(node, 0);
+	if (!base) {
+		pr_err("%s(): ioremap failed\n", __func__);
+		return;
+	}
+
+	if (1/*ver == CHIP_SW_VER_01*/) {
+		for (i = 0; i < ARRAY_SIZE(top_muxes); i++) {
+			struct mtk_composite *t = &top_muxes[i];
+
+			if (t->id == CLK_TOP_AXI_MFG_IN_SEL)
+				t->parent_names = axi_mfg_in_parents_e1;
+		}
+	}
+
+	clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+	mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks), clk_data);
+	mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), clk_data);
+
+	mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+	mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+		&mt8167_clk_lock, clk_data);
+	mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
+				base, &mt8167_clk_lock, clk_data);
+
+	r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+	if (r)
+		pr_err("%s(): could not register clock provider: %d\n",
+			__func__, r);
+}
+CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8167-topckgen", mtk_topckgen_init);
+
+static void __init mtk_infracfg_init(struct device_node *node)
+{
+	struct clk_onecell_data *clk_data;
+	int r;
+	void __iomem *base;
+
+	base = of_iomap(node, 0);
+	if (!base) {
+		pr_err("%s(): ioremap failed\n", __func__);
+		return;
+	}
+
+	clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
+
+	mtk_clk_register_composites(ifr_muxes, ARRAY_SIZE(ifr_muxes), base,
+		&mt8167_clk_lock, clk_data);
+	r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+	if (r)
+		pr_err("%s(): could not register clock provider: %d\n",
+			__func__, r);
+}
+CLK_OF_DECLARE(mtk_infracfg, "mediatek,mt8167-infracfg", mtk_infracfg_init);
+
+/* FIXME: modify FMAX */
+#define MT8167_PLL_FMAX		(2500UL * MHZ)
+
+#define CON0_MT8167_RST_BAR	BIT(27)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits,	\
+			_pd_reg, _pd_shift, _tuner_reg, _pcw_reg,	\
+			_pcw_shift, _div_table) {			\
+		.id = _id,						\
+		.name = _name,						\
+		.reg = _reg,						\
+		.pwr_reg = _pwr_reg,					\
+		.en_mask = _en_mask,					\
+		.flags = _flags,					\
+		.rst_bar_mask = CON0_MT8167_RST_BAR,			\
+		.fmax = MT8167_PLL_FMAX,				\
+		.pcwbits = _pcwbits,					\
+		.pd_reg = _pd_reg,					\
+		.pd_shift = _pd_shift,					\
+		.tuner_reg = _tuner_reg,				\
+		.pcw_reg = _pcw_reg,					\
+		.pcw_shift = _pcw_shift,				\
+		.div_table = _div_table,				\
+	}
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits,	\
+			_pd_reg, _pd_shift, _tuner_reg, _pcw_reg,	\
+			_pcw_shift)					\
+		PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+			_pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
+			NULL)
+
+static const struct mtk_pll_div_table mmpll_div_table[] = {
+	{ .div = 0, .freq = MT8167_PLL_FMAX },
+	{ .div = 1, .freq = 1000000000 },
+	{ .div = 2, .freq = 604500000 },
+	{ .div = 3, .freq = 253500000 },
+	{ .div = 4, .freq = 126750000 },
+	{ } /* sentinel */
+};
+
+static const struct mtk_pll_data plls[] = {
+	/* FIXME: need to fix flags/div_table/tuner_reg/table */
+	PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0100, 0x0110, 0x00000001, 0,
+		21, 0x0104, 24, 0, 0x0104, 0),
+	PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0120, 0x0130, 0x00000001,
+		HAVE_RST_BAR, 21, 0x0124, 24, 0, 0x0124, 0),
+	PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0140, 0x0150, 0x30000001,
+		HAVE_RST_BAR, 7, 0x0144, 24, 0, 0x0144, 0),
+	PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0160, 0x0170, 0x00000001, 0,
+		21, 0x0164, 24, 0, 0x0164, 0, mmpll_div_table),
+	PLL(CLK_APMIXED_APLL1, "apll1", 0x0180, 0x0190, 0x00000001, 0,
+		31, 0x0180, 1, 0x0194, 0x0184, 0),
+	PLL(CLK_APMIXED_APLL2, "apll2", 0x01A0, 0x01B0, 0x00000001, 0,
+		31, 0x01A0, 1, 0x01B4, 0x01A4, 0),
+	PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x01C0, 0x01D0, 0x00000001, 0,
+		21, 0x01C4, 24, 0, 0x01C4, 0),
+	PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x01E0, 0x01F0, 0x00000001, 0,
+		21, 0x01E4, 24, 0, 0x01E4, 0),
+};
+
+static void __init mtk_apmixedsys_init(struct device_node *node)
+{
+	struct clk_onecell_data *clk_data;
+	void __iomem *base;
+	int r;
+
+	base = of_iomap(node, 0);
+	if (!base) {
+		pr_err("%s(): ioremap failed\n", __func__);
+		return;
+	}
+
+	clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+
+	/* FIXME: add code for APMIXEDSYS */
+	mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+	mtk_clk_register_dividers(apmixed_adj_divs, ARRAY_SIZE(apmixed_adj_divs),
+		base, &mt8167_clk_lock, clk_data);
+
+	r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+	if (r)
+		pr_err("%s(): could not register clock provider: %d\n",
+			__func__, r);
+
+}
+CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8167-apmixedsys",
+		mtk_apmixedsys_init);
+
+static void __init mtk_audiotop_init(struct device_node *node)
+{
+	struct clk_onecell_data *clk_data;
+	int r;
+
+	clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK);
+
+	mtk_clk_register_gates(node, aud_clks, ARRAY_SIZE(aud_clks), clk_data);
+
+	r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+	if (r)
+		pr_err("%s(): could not register clock provider: %d\n",
+			__func__, r);
+
+}
+CLK_OF_DECLARE(mtk_audiotop, "mediatek,mt8167-audiotop", mtk_audiotop_init);
+
+static void __init mtk_mfgcfg_init(struct device_node *node)
+{
+	struct clk_onecell_data *clk_data;
+	int r;
+
+	clk_data = mtk_alloc_clk_data(CLK_MFG_NR_CLK);
+
+	mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks), clk_data);
+
+	r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+	if (r)
+		pr_err("%s(): could not register clock provider: %d\n",
+			__func__, r);
+
+}
+CLK_OF_DECLARE(mtk_mfgcfg, "mediatek,mt8167-mfgcfg", mtk_mfgcfg_init);
+
+static void __init mtk_mmsys_init(struct device_node *node)
+{
+	struct clk_onecell_data *clk_data;
+	int r;
+
+	clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+
+	mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data);
+
+	r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+	if (r)
+		pr_err("%s(): could not register clock provider: %d\n",
+			__func__, r);
+
+}
+CLK_OF_DECLARE(mtk_mmsys, "mediatek,mt8167-mmsys", mtk_mmsys_init);
+
+static void __init mtk_imgsys_init(struct device_node *node)
+{
+	struct clk_onecell_data *clk_data;
+	int r;
+
+	clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
+
+	mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data);
+
+	r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+	if (r)
+		pr_err("%s(): could not register clock provider: %d\n",
+			__func__, r);
+
+}
+CLK_OF_DECLARE(mtk_imgsys, "mediatek,mt8167-imgsys", mtk_imgsys_init);
+
+static void __init mtk_vdecsys_init(struct device_node *node)
+{
+	struct clk_onecell_data *clk_data;
+	int r;
+
+	clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
+
+	mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks), clk_data);
+
+	r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+	if (r)
+		pr_err("%s(): could not register clock provider: %d\n",
+			__func__, r);
+
+}
+CLK_OF_DECLARE(mtk_vdecsys, "mediatek,mt8167-vdecsys", mtk_vdecsys_init);
diff --git a/drivers/clk/mediatek/clk-mt8183-audio.c b/drivers/clk/mediatek/clk-mt8183-audio.c
new file mode 100644
index 0000000..c874501
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-audio.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs audio0_cg_regs = {
+	.set_ofs = 0x0,
+	.clr_ofs = 0x0,
+	.sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs audio1_cg_regs = {
+	.set_ofs = 0x4,
+	.clr_ofs = 0x4,
+	.sta_ofs = 0x4,
+};
+
+#define GATE_AUDIO0(_id, _name, _parent, _shift)		\
+	GATE_MTK(_id, _name, _parent, &audio0_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_no_setclr)
+
+#define GATE_AUDIO1(_id, _name, _parent, _shift)		\
+	GATE_MTK(_id, _name, _parent, &audio1_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_no_setclr)
+
+static const struct mtk_gate audio_clks[] = {
+	/* AUDIO0 */
+	GATE_AUDIO0(CLK_AUDIO_AFE, "aud_afe", "audio_sel",
+		2),
+	GATE_AUDIO0(CLK_AUDIO_22M, "aud_22m", "aud_eng1_sel",
+		8),
+	GATE_AUDIO0(CLK_AUDIO_24M, "aud_24m", "aud_eng2_sel",
+		9),
+	GATE_AUDIO0(CLK_AUDIO_APLL2_TUNER, "aud_apll2_tuner", "aud_eng2_sel",
+		18),
+	GATE_AUDIO0(CLK_AUDIO_APLL_TUNER, "aud_apll_tuner", "aud_eng1_sel",
+		19),
+	GATE_AUDIO0(CLK_AUDIO_TDM, "aud_tdm", "apll12_divb",
+		20),
+	GATE_AUDIO0(CLK_AUDIO_ADC, "aud_adc", "audio_sel",
+		24),
+	GATE_AUDIO0(CLK_AUDIO_DAC, "aud_dac", "audio_sel",
+		25),
+	GATE_AUDIO0(CLK_AUDIO_DAC_PREDIS, "aud_dac_predis", "audio_sel",
+		26),
+	GATE_AUDIO0(CLK_AUDIO_TML, "aud_tml", "audio_sel",
+		27),
+	/* AUDIO1 */
+	GATE_AUDIO1(CLK_AUDIO_I2S1, "aud_i2s1", "audio_sel",
+		4),
+	GATE_AUDIO1(CLK_AUDIO_I2S2, "aud_i2s2", "audio_sel",
+		5),
+	GATE_AUDIO1(CLK_AUDIO_I2S3, "aud_i2s3", "audio_sel",
+		6),
+	GATE_AUDIO1(CLK_AUDIO_I2S4, "aud_i2s4", "audio_sel",
+		7),
+	GATE_AUDIO1(CLK_AUDIO_PDN_ADDA6_ADC, "aud_pdn_adda6_adc", "audio_sel",
+		20),
+};
+
+static int clk_mt8183_audio_probe(struct platform_device *pdev)
+{
+	struct clk_onecell_data *clk_data;
+	int r;
+	struct device_node *node = pdev->dev.of_node;
+
+	clk_data = mtk_alloc_clk_data(CLK_AUDIO_NR_CLK);
+
+	mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
+			clk_data);
+
+	r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+	if (r)
+		return r;
+
+	r = devm_of_platform_populate(&pdev->dev);
+	if (r)
+		of_clk_del_provider(node);
+
+	return r;
+}
+
+static const struct of_device_id of_match_clk_mt8183_audio[] = {
+	{ .compatible = "mediatek,mt8183-audiosys", },
+	{}
+};
+
+static struct platform_driver clk_mt8183_audio_drv = {
+	.probe = clk_mt8183_audio_probe,
+	.driver = {
+		.name = "clk-mt8183-audio",
+		.of_match_table = of_match_clk_mt8183_audio,
+	},
+};
+
+builtin_platform_driver(clk_mt8183_audio_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-cam.c b/drivers/clk/mediatek/clk-mt8183-cam.c
new file mode 100644
index 0000000..8643802
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-cam.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs cam_cg_regs = {
+	.set_ofs = 0x4,
+	.clr_ofs = 0x8,
+	.sta_ofs = 0x0,
+};
+
+#define GATE_CAM(_id, _name, _parent, _shift)			\
+	GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate cam_clks[] = {
+	GATE_CAM(CLK_CAM_LARB6, "cam_larb6", "cam_sel", 0),
+	GATE_CAM(CLK_CAM_DFP_VAD, "cam_dfp_vad", "cam_sel", 1),
+	GATE_CAM(CLK_CAM_LARB3, "cam_larb3", "cam_sel", 2),
+	GATE_CAM(CLK_CAM_CAM, "cam_cam", "cam_sel", 6),
+	GATE_CAM(CLK_CAM_CAMTG, "cam_camtg", "cam_sel", 7),
+	GATE_CAM(CLK_CAM_SENINF, "cam_seninf", "cam_sel", 8),
+	GATE_CAM(CLK_CAM_CAMSV0, "cam_camsv0", "cam_sel", 9),
+	GATE_CAM(CLK_CAM_CAMSV1, "cam_camsv1", "cam_sel", 10),
+	GATE_CAM(CLK_CAM_CAMSV2, "cam_camsv2", "cam_sel", 11),
+	GATE_CAM(CLK_CAM_CCU, "cam_ccu", "cam_sel", 12),
+};
+
+static int clk_mt8183_cam_probe(struct platform_device *pdev)
+{
+	struct clk_onecell_data *clk_data;
+	struct device_node *node = pdev->dev.of_node;
+
+	clk_data = mtk_alloc_clk_data(CLK_CAM_NR_CLK);
+
+	mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks),
+			clk_data);
+
+	return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_cam[] = {
+	{ .compatible = "mediatek,mt8183-camsys", },
+	{}
+};
+
+static struct platform_driver clk_mt8183_cam_drv = {
+	.probe = clk_mt8183_cam_probe,
+	.driver = {
+		.name = "clk-mt8183-cam",
+		.of_match_table = of_match_clk_mt8183_cam,
+	},
+};
+
+builtin_platform_driver(clk_mt8183_cam_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-img.c b/drivers/clk/mediatek/clk-mt8183-img.c
new file mode 100644
index 0000000..470d676
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-img.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs img_cg_regs = {
+	.set_ofs = 0x4,
+	.clr_ofs = 0x8,
+	.sta_ofs = 0x0,
+};
+
+#define GATE_IMG(_id, _name, _parent, _shift)			\
+	GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate img_clks[] = {
+	GATE_IMG(CLK_IMG_LARB5, "img_larb5", "img_sel", 0),
+	GATE_IMG(CLK_IMG_LARB2, "img_larb2", "img_sel", 1),
+	GATE_IMG(CLK_IMG_DIP, "img_dip", "img_sel", 2),
+	GATE_IMG(CLK_IMG_FDVT, "img_fdvt", "img_sel", 3),
+	GATE_IMG(CLK_IMG_DPE, "img_dpe", "img_sel", 4),
+	GATE_IMG(CLK_IMG_RSC, "img_rsc", "img_sel", 5),
+	GATE_IMG(CLK_IMG_MFB, "img_mfb", "img_sel", 6),
+	GATE_IMG(CLK_IMG_WPE_A, "img_wpe_a", "img_sel", 7),
+	GATE_IMG(CLK_IMG_WPE_B, "img_wpe_b", "img_sel", 8),
+	GATE_IMG(CLK_IMG_OWE, "img_owe", "img_sel", 9),
+};
+
+static int clk_mt8183_img_probe(struct platform_device *pdev)
+{
+	struct clk_onecell_data *clk_data;
+	struct device_node *node = pdev->dev.of_node;
+
+	clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
+
+	mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
+			clk_data);
+
+	return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_img[] = {
+	{ .compatible = "mediatek,mt8183-imgsys", },
+	{}
+};
+
+static struct platform_driver clk_mt8183_img_drv = {
+	.probe = clk_mt8183_img_probe,
+	.driver = {
+		.name = "clk-mt8183-img",
+		.of_match_table = of_match_clk_mt8183_img,
+	},
+};
+
+builtin_platform_driver(clk_mt8183_img_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu0.c b/drivers/clk/mediatek/clk-mt8183-ipu0.c
new file mode 100644
index 0000000..c5cb76f
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-ipu0.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs ipu_core0_cg_regs = {
+	.set_ofs = 0x4,
+	.clr_ofs = 0x8,
+	.sta_ofs = 0x0,
+};
+
+#define GATE_IPU_CORE0(_id, _name, _parent, _shift)			\
+	GATE_MTK(_id, _name, _parent, &ipu_core0_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate ipu_core0_clks[] = {
+	GATE_IPU_CORE0(CLK_IPU_CORE0_JTAG, "ipu_core0_jtag", "dsp_sel", 0),
+	GATE_IPU_CORE0(CLK_IPU_CORE0_AXI, "ipu_core0_axi", "dsp_sel", 1),
+	GATE_IPU_CORE0(CLK_IPU_CORE0_IPU, "ipu_core0_ipu", "dsp_sel", 2),
+};
+
+static int clk_mt8183_ipu_core0_probe(struct platform_device *pdev)
+{
+	struct clk_onecell_data *clk_data;
+	struct device_node *node = pdev->dev.of_node;
+
+	clk_data = mtk_alloc_clk_data(CLK_IPU_CORE0_NR_CLK);
+
+	mtk_clk_register_gates(node, ipu_core0_clks, ARRAY_SIZE(ipu_core0_clks),
+			clk_data);
+
+	return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_ipu_core0[] = {
+	{ .compatible = "mediatek,mt8183-ipu_core0", },
+	{}
+};
+
+static struct platform_driver clk_mt8183_ipu_core0_drv = {
+	.probe = clk_mt8183_ipu_core0_probe,
+	.driver = {
+		.name = "clk-mt8183-ipu_core0",
+		.of_match_table = of_match_clk_mt8183_ipu_core0,
+	},
+};
+
+builtin_platform_driver(clk_mt8183_ipu_core0_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu1.c b/drivers/clk/mediatek/clk-mt8183-ipu1.c
new file mode 100644
index 0000000..8fd5fe0
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-ipu1.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs ipu_core1_cg_regs = {
+	.set_ofs = 0x4,
+	.clr_ofs = 0x8,
+	.sta_ofs = 0x0,
+};
+
+#define GATE_IPU_CORE1(_id, _name, _parent, _shift)			\
+	GATE_MTK(_id, _name, _parent, &ipu_core1_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate ipu_core1_clks[] = {
+	GATE_IPU_CORE1(CLK_IPU_CORE1_JTAG, "ipu_core1_jtag", "dsp_sel", 0),
+	GATE_IPU_CORE1(CLK_IPU_CORE1_AXI, "ipu_core1_axi", "dsp_sel", 1),
+	GATE_IPU_CORE1(CLK_IPU_CORE1_IPU, "ipu_core1_ipu", "dsp_sel", 2),
+};
+
+static int clk_mt8183_ipu_core1_probe(struct platform_device *pdev)
+{
+	struct clk_onecell_data *clk_data;
+	struct device_node *node = pdev->dev.of_node;
+
+	clk_data = mtk_alloc_clk_data(CLK_IPU_CORE1_NR_CLK);
+
+	mtk_clk_register_gates(node, ipu_core1_clks, ARRAY_SIZE(ipu_core1_clks),
+			clk_data);
+
+	return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_ipu_core1[] = {
+	{ .compatible = "mediatek,mt8183-ipu_core1", },
+	{}
+};
+
+static struct platform_driver clk_mt8183_ipu_core1_drv = {
+	.probe = clk_mt8183_ipu_core1_probe,
+	.driver = {
+		.name = "clk-mt8183-ipu_core1",
+		.of_match_table = of_match_clk_mt8183_ipu_core1,
+	},
+};
+
+builtin_platform_driver(clk_mt8183_ipu_core1_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
new file mode 100644
index 0000000..3f37d0e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs ipu_adl_cg_regs = {
+	.set_ofs = 0x204,
+	.clr_ofs = 0x204,
+	.sta_ofs = 0x204,
+};
+
+#define GATE_IPU_ADL_I(_id, _name, _parent, _shift)		\
+	GATE_MTK(_id, _name, _parent, &ipu_adl_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate ipu_adl_clks[] = {
+	GATE_IPU_ADL_I(CLK_IPU_ADL_CABGEN, "ipu_adl_cabgen", "dsp_sel", 24),
+};
+
+static int clk_mt8183_ipu_adl_probe(struct platform_device *pdev)
+{
+	struct clk_onecell_data *clk_data;
+	struct device_node *node = pdev->dev.of_node;
+
+	clk_data = mtk_alloc_clk_data(CLK_IPU_ADL_NR_CLK);
+
+	mtk_clk_register_gates(node, ipu_adl_clks, ARRAY_SIZE(ipu_adl_clks),
+			clk_data);
+
+	return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_ipu_adl[] = {
+	{ .compatible = "mediatek,mt8183-ipu_adl", },
+	{}
+};
+
+static struct platform_driver clk_mt8183_ipu_adl_drv = {
+	.probe = clk_mt8183_ipu_adl_probe,
+	.driver = {
+		.name = "clk-mt8183-ipu_adl",
+		.of_match_table = of_match_clk_mt8183_ipu_adl,
+	},
+};
+
+builtin_platform_driver(clk_mt8183_ipu_adl_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
new file mode 100644
index 0000000..7e0eef7
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs ipu_conn_cg_regs = {
+	.set_ofs = 0x4,
+	.clr_ofs = 0x8,
+	.sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs ipu_conn_apb_cg_regs = {
+	.set_ofs = 0x10,
+	.clr_ofs = 0x10,
+	.sta_ofs = 0x10,
+};
+
+static const struct mtk_gate_regs ipu_conn_axi_cg_regs = {
+	.set_ofs = 0x18,
+	.clr_ofs = 0x18,
+	.sta_ofs = 0x18,
+};
+
+static const struct mtk_gate_regs ipu_conn_axi1_cg_regs = {
+	.set_ofs = 0x1c,
+	.clr_ofs = 0x1c,
+	.sta_ofs = 0x1c,
+};
+
+static const struct mtk_gate_regs ipu_conn_axi2_cg_regs = {
+	.set_ofs = 0x20,
+	.clr_ofs = 0x20,
+	.sta_ofs = 0x20,
+};
+
+#define GATE_IPU_CONN(_id, _name, _parent, _shift)			\
+	GATE_MTK(_id, _name, _parent, &ipu_conn_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_setclr)
+
+#define GATE_IPU_CONN_APB(_id, _name, _parent, _shift)			\
+	GATE_MTK(_id, _name, _parent, &ipu_conn_apb_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_no_setclr)
+
+#define GATE_IPU_CONN_AXI_I(_id, _name, _parent, _shift)		\
+	GATE_MTK(_id, _name, _parent, &ipu_conn_axi_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_no_setclr_inv)
+
+#define GATE_IPU_CONN_AXI1_I(_id, _name, _parent, _shift)		\
+	GATE_MTK(_id, _name, _parent, &ipu_conn_axi1_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_no_setclr_inv)
+
+#define GATE_IPU_CONN_AXI2_I(_id, _name, _parent, _shift)		\
+	GATE_MTK(_id, _name, _parent, &ipu_conn_axi2_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate ipu_conn_clks[] = {
+	GATE_IPU_CONN(CLK_IPU_CONN_IPU,
+		"ipu_conn_ipu", "dsp_sel", 0),
+	GATE_IPU_CONN(CLK_IPU_CONN_AHB,
+		"ipu_conn_ahb", "dsp_sel", 1),
+	GATE_IPU_CONN(CLK_IPU_CONN_AXI,
+		"ipu_conn_axi", "dsp_sel", 2),
+	GATE_IPU_CONN(CLK_IPU_CONN_ISP,
+		"ipu_conn_isp", "dsp_sel", 3),
+	GATE_IPU_CONN(CLK_IPU_CONN_CAM_ADL,
+		"ipu_conn_cam_adl", "dsp_sel", 4),
+	GATE_IPU_CONN(CLK_IPU_CONN_IMG_ADL,
+		"ipu_conn_img_adl", "dsp_sel", 5),
+	GATE_IPU_CONN_APB(CLK_IPU_CONN_DAP_RX,
+		"ipu_conn_dap_rx", "dsp1_sel", 0),
+	GATE_IPU_CONN_APB(CLK_IPU_CONN_APB2AXI,
+		"ipu_conn_apb2axi", "dsp1_sel", 3),
+	GATE_IPU_CONN_APB(CLK_IPU_CONN_APB2AHB,
+		"ipu_conn_apb2ahb", "dsp1_sel", 20),
+	GATE_IPU_CONN_AXI_I(CLK_IPU_CONN_IPU_CAB1TO2,
+		"ipu_conn_ipu_cab1to2", "dsp1_sel", 6),
+	GATE_IPU_CONN_AXI_I(CLK_IPU_CONN_IPU1_CAB1TO2,
+		"ipu_conn_ipu1_cab1to2", "dsp1_sel", 13),
+	GATE_IPU_CONN_AXI_I(CLK_IPU_CONN_IPU2_CAB1TO2,
+		"ipu_conn_ipu2_cab1to2", "dsp1_sel", 20),
+	GATE_IPU_CONN_AXI1_I(CLK_IPU_CONN_CAB3TO3,
+		"ipu_conn_cab3to3", "dsp1_sel", 0),
+	GATE_IPU_CONN_AXI2_I(CLK_IPU_CONN_CAB2TO1,
+		"ipu_conn_cab2to1", "dsp1_sel", 14),
+	GATE_IPU_CONN_AXI2_I(CLK_IPU_CONN_CAB3TO1_SLICE,
+		"ipu_conn_cab3to1_slice", "dsp1_sel", 17),
+};
+
+static int clk_mt8183_ipu_conn_probe(struct platform_device *pdev)
+{
+	struct clk_onecell_data *clk_data;
+	struct device_node *node = pdev->dev.of_node;
+
+	clk_data = mtk_alloc_clk_data(CLK_IPU_CONN_NR_CLK);
+
+	mtk_clk_register_gates(node, ipu_conn_clks, ARRAY_SIZE(ipu_conn_clks),
+			clk_data);
+
+	return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_ipu_conn[] = {
+	{ .compatible = "mediatek,mt8183-ipu_conn", },
+	{}
+};
+
+static struct platform_driver clk_mt8183_ipu_conn_drv = {
+	.probe = clk_mt8183_ipu_conn_probe,
+	.driver = {
+		.name = "clk-mt8183-ipu_conn",
+		.of_match_table = of_match_clk_mt8183_ipu_conn,
+	},
+};
+
+builtin_platform_driver(clk_mt8183_ipu_conn_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
new file mode 100644
index 0000000..99a6b02
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs mfg_cg_regs = {
+	.set_ofs = 0x4,
+	.clr_ofs = 0x8,
+	.sta_ofs = 0x0,
+};
+
+#define GATE_MFG(_id, _name, _parent, _shift)			\
+	GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate mfg_clks[] = {
+	GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0)
+};
+
+static int clk_mt8183_mfg_probe(struct platform_device *pdev)
+{
+	struct clk_onecell_data *clk_data;
+	struct device_node *node = pdev->dev.of_node;
+
+	clk_data = mtk_alloc_clk_data(CLK_MFG_NR_CLK);
+
+	mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks),
+			clk_data);
+
+	return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_mfg[] = {
+	{ .compatible = "mediatek,mt8183-mfgcfg", },
+	{}
+};
+
+static struct platform_driver clk_mt8183_mfg_drv = {
+	.probe = clk_mt8183_mfg_probe,
+	.driver = {
+		.name = "clk-mt8183-mfg",
+		.of_match_table = of_match_clk_mt8183_mfg,
+	},
+};
+
+builtin_platform_driver(clk_mt8183_mfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-mm.c b/drivers/clk/mediatek/clk-mt8183-mm.c
new file mode 100644
index 0000000..720c696
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-mm.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+	.set_ofs = 0x104,
+	.clr_ofs = 0x108,
+	.sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+	.set_ofs = 0x114,
+	.clr_ofs = 0x118,
+	.sta_ofs = 0x110,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift)			\
+	GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_setclr)
+
+#define GATE_MM1(_id, _name, _parent, _shift)			\
+	GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate mm_clks[] = {
+	/* MM0 */
+	GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
+	GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
+	GATE_MM0(CLK_MM_SMI_LARB1, "mm_smi_larb1", "mm_sel", 2),
+	GATE_MM0(CLK_MM_GALS_COMM0, "mm_gals_comm0", "mm_sel", 3),
+	GATE_MM0(CLK_MM_GALS_COMM1, "mm_gals_comm1", "mm_sel", 4),
+	GATE_MM0(CLK_MM_GALS_CCU2MM, "mm_gals_ccu2mm", "mm_sel", 5),
+	GATE_MM0(CLK_MM_GALS_IPU12MM, "mm_gals_ipu12mm", "mm_sel", 6),
+	GATE_MM0(CLK_MM_GALS_IMG2MM, "mm_gals_img2mm", "mm_sel", 7),
+	GATE_MM0(CLK_MM_GALS_CAM2MM, "mm_gals_cam2mm", "mm_sel", 8),
+	GATE_MM0(CLK_MM_GALS_IPU2MM, "mm_gals_ipu2mm", "mm_sel", 9),
+	GATE_MM0(CLK_MM_MDP_DL_TXCK, "mm_mdp_dl_txck", "mm_sel", 10),
+	GATE_MM0(CLK_MM_IPU_DL_TXCK, "mm_ipu_dl_txck", "mm_sel", 11),
+	GATE_MM0(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 12),
+	GATE_MM0(CLK_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 13),
+	GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 14),
+	GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 15),
+	GATE_MM0(CLK_MM_MDP_TDSHP, "mm_mdp_tdshp", "mm_sel", 16),
+	GATE_MM0(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 17),
+	GATE_MM0(CLK_MM_MDP_WDMA0, "mm_mdp_wdma0", "mm_sel", 18),
+	GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 19),
+	GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 20),
+	GATE_MM0(CLK_MM_DISP_OVL0_2L, "mm_disp_ovl0_2l", "mm_sel", 21),
+	GATE_MM0(CLK_MM_DISP_OVL1_2L, "mm_disp_ovl1_2l", "mm_sel", 22),
+	GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 23),
+	GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 24),
+	GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 25),
+	GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_sel", 26),
+	GATE_MM0(CLK_MM_DISP_CCORR0, "mm_disp_ccorr0", "mm_sel", 27),
+	GATE_MM0(CLK_MM_DISP_AAL0, "mm_disp_aal0", "mm_sel", 28),
+	GATE_MM0(CLK_MM_DISP_GAMMA0, "mm_disp_gamma0", "mm_sel", 29),
+	GATE_MM0(CLK_MM_DISP_DITHER0, "mm_disp_dither0", "mm_sel", 30),
+	GATE_MM0(CLK_MM_DISP_SPLIT, "mm_disp_split", "mm_sel", 31),
+	/* MM1 */
+	GATE_MM1(CLK_MM_DSI0_MM, "mm_dsi0_mm", "mm_sel", 0),
+	GATE_MM1(CLK_MM_DSI0_IF, "mm_dsi0_if", "mm_sel", 1),
+	GATE_MM1(CLK_MM_DPI_MM, "mm_dpi_mm", "mm_sel", 2),
+	GATE_MM1(CLK_MM_DPI_IF, "mm_dpi_if", "dpi0_sel", 3),
+	GATE_MM1(CLK_MM_FAKE_ENG2, "mm_fake_eng2", "mm_sel", 4),
+	GATE_MM1(CLK_MM_MDP_DL_RX, "mm_mdp_dl_rx", "mm_sel", 5),
+	GATE_MM1(CLK_MM_IPU_DL_RX, "mm_ipu_dl_rx", "mm_sel", 6),
+	GATE_MM1(CLK_MM_26M, "mm_26m", "f_f26m_ck", 7),
+	GATE_MM1(CLK_MM_MMSYS_R2Y, "mm_mmsys_r2y", "mm_sel", 8),
+	GATE_MM1(CLK_MM_DISP_RSZ, "mm_disp_rsz", "mm_sel", 9),
+	GATE_MM1(CLK_MM_MDP_AAL, "mm_mdp_aal", "mm_sel", 10),
+	GATE_MM1(CLK_MM_MDP_CCORR, "mm_mdp_ccorr", "mm_sel", 11),
+	GATE_MM1(CLK_MM_DBI_MM, "mm_dbi_mm", "mm_sel", 12),
+	GATE_MM1(CLK_MM_DBI_IF, "mm_dbi_if", "dpi0_sel", 13),
+};
+
+static int clk_mt8183_mm_probe(struct platform_device *pdev)
+{
+	struct clk_onecell_data *clk_data;
+	struct device_node *node = pdev->dev.of_node;
+
+	clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+
+	mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
+			clk_data);
+
+	return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_mm[] = {
+	{ .compatible = "mediatek,mt8183-mmsys", },
+	{}
+};
+
+static struct platform_driver clk_mt8183_mm_drv = {
+	.probe = clk_mt8183_mm_probe,
+	.driver = {
+		.name = "clk-mt8183-mm",
+		.of_match_table = of_match_clk_mt8183_mm,
+	},
+};
+
+builtin_platform_driver(clk_mt8183_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-vdec.c b/drivers/clk/mediatek/clk-mt8183-vdec.c
new file mode 100644
index 0000000..6250fd1
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-vdec.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs vdec0_cg_regs = {
+	.set_ofs = 0x0,
+	.clr_ofs = 0x4,
+	.sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+	.set_ofs = 0x8,
+	.clr_ofs = 0xc,
+	.sta_ofs = 0x8,
+};
+
+#define GATE_VDEC0_I(_id, _name, _parent, _shift)		\
+	GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_VDEC1_I(_id, _name, _parent, _shift)		\
+	GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate vdec_clks[] = {
+	/* VDEC0 */
+	GATE_VDEC0_I(CLK_VDEC_VDEC, "vdec_vdec", "mm_sel", 0),
+	/* VDEC1 */
+	GATE_VDEC1_I(CLK_VDEC_LARB1, "vdec_larb1", "mm_sel", 0),
+};
+
+static int clk_mt8183_vdec_probe(struct platform_device *pdev)
+{
+	struct clk_onecell_data *clk_data;
+	struct device_node *node = pdev->dev.of_node;
+
+	clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
+
+	mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
+			clk_data);
+
+	return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_vdec[] = {
+	{ .compatible = "mediatek,mt8183-vdecsys", },
+	{}
+};
+
+static struct platform_driver clk_mt8183_vdec_drv = {
+	.probe = clk_mt8183_vdec_probe,
+	.driver = {
+		.name = "clk-mt8183-vdec",
+		.of_match_table = of_match_clk_mt8183_vdec,
+	},
+};
+
+builtin_platform_driver(clk_mt8183_vdec_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-venc.c b/drivers/clk/mediatek/clk-mt8183-venc.c
new file mode 100644
index 0000000..6678ef0
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-venc.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs venc_cg_regs = {
+	.set_ofs = 0x4,
+	.clr_ofs = 0x8,
+	.sta_ofs = 0x0,
+};
+
+#define GATE_VENC_I(_id, _name, _parent, _shift)		\
+	GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate venc_clks[] = {
+	GATE_VENC_I(CLK_VENC_LARB, "venc_larb",
+		"mm_sel", 0),
+	GATE_VENC_I(CLK_VENC_VENC, "venc_venc",
+		"mm_sel", 4),
+	GATE_VENC_I(CLK_VENC_JPGENC, "venc_jpgenc",
+		"mm_sel", 8),
+};
+
+static int clk_mt8183_venc_probe(struct platform_device *pdev)
+{
+	struct clk_onecell_data *clk_data;
+	struct device_node *node = pdev->dev.of_node;
+
+	clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
+
+	mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
+			clk_data);
+
+	return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_venc[] = {
+	{ .compatible = "mediatek,mt8183-vencsys", },
+	{}
+};
+
+static struct platform_driver clk_mt8183_venc_drv = {
+	.probe = clk_mt8183_venc_probe,
+	.driver = {
+		.name = "clk-mt8183-venc",
+		.of_match_table = of_match_clk_mt8183_venc,
+	},
+};
+
+builtin_platform_driver(clk_mt8183_venc_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
new file mode 100644
index 0000000..7ead438
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -0,0 +1,1298 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+/* Infra global controller reset set register */
+#define INFRA_RST0_SET_OFFSET		0x120
+
+static DEFINE_SPINLOCK(mt8183_clk_lock);
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+	FIXED_CLK(CLK_TOP_CLK26M, "f_f26m_ck", "clk26m", 26000000),
+	FIXED_CLK(CLK_TOP_ULPOSC, "osc", NULL, 250000),
+	FIXED_CLK(CLK_TOP_UNIVP_192M, "univpll_192m", "univpll", 192000000),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+	FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1,
+		2),
+	FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1,
+		2),
+	FACTOR(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1,
+		1),
+	FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "syspll_ck", 1,
+		2),
+	FACTOR(CLK_TOP_SYSPLL_D2_D2, "syspll_d2_d2", "syspll_d2", 1,
+		2),
+	FACTOR(CLK_TOP_SYSPLL_D2_D4, "syspll_d2_d4", "syspll_d2", 1,
+		4),
+	FACTOR(CLK_TOP_SYSPLL_D2_D8, "syspll_d2_d8", "syspll_d2", 1,
+		8),
+	FACTOR(CLK_TOP_SYSPLL_D2_D16, "syspll_d2_d16", "syspll_d2", 1,
+		16),
+	FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "mainpll", 1,
+		3),
+	FACTOR(CLK_TOP_SYSPLL_D3_D2, "syspll_d3_d2", "syspll_d3", 1,
+		2),
+	FACTOR(CLK_TOP_SYSPLL_D3_D4, "syspll_d3_d4", "syspll_d3", 1,
+		4),
+	FACTOR(CLK_TOP_SYSPLL_D3_D8, "syspll_d3_d8", "syspll_d3", 1,
+		8),
+	FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1,
+		5),
+	FACTOR(CLK_TOP_SYSPLL_D5_D2, "syspll_d5_d2", "syspll_d5", 1,
+		2),
+	FACTOR(CLK_TOP_SYSPLL_D5_D4, "syspll_d5_d4", "syspll_d5", 1,
+		4),
+	FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "mainpll", 1,
+		7),
+	FACTOR(CLK_TOP_SYSPLL_D7_D2, "syspll_d7_d2", "syspll_d7", 1,
+		2),
+	FACTOR(CLK_TOP_SYSPLL_D7_D4, "syspll_d7_d4", "syspll_d7", 1,
+		4),
+	FACTOR(CLK_TOP_UNIVPLL_CK, "univpll_ck", "univpll", 1,
+		1),
+	FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll_ck", 1,
+		2),
+	FACTOR(CLK_TOP_UNIVPLL_D2_D2, "univpll_d2_d2", "univpll_d2", 1,
+		2),
+	FACTOR(CLK_TOP_UNIVPLL_D2_D4, "univpll_d2_d4", "univpll_d2", 1,
+		4),
+	FACTOR(CLK_TOP_UNIVPLL_D2_D8, "univpll_d2_d8", "univpll_d2", 1,
+		8),
+	FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1,
+		3),
+	FACTOR(CLK_TOP_UNIVPLL_D3_D2, "univpll_d3_d2", "univpll_d3", 1,
+		2),
+	FACTOR(CLK_TOP_UNIVPLL_D3_D4, "univpll_d3_d4", "univpll_d3", 1,
+		4),
+	FACTOR(CLK_TOP_UNIVPLL_D3_D8, "univpll_d3_d8", "univpll_d3", 1,
+		8),
+	FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1,
+		5),
+	FACTOR(CLK_TOP_UNIVPLL_D5_D2, "univpll_d5_d2", "univpll_d5", 1,
+		2),
+	FACTOR(CLK_TOP_UNIVPLL_D5_D4, "univpll_d5_d4", "univpll_d5", 1,
+		4),
+	FACTOR(CLK_TOP_UNIVPLL_D5_D8, "univpll_d5_d8", "univpll_d5", 1,
+		8),
+	FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1,
+		7),
+	FACTOR(CLK_TOP_UNIVP_192M_CK, "univ_192m_ck", "univpll_192m", 1,
+		1),
+	FACTOR(CLK_TOP_UNIVP_192M_D2, "univ_192m_d2", "univ_192m_ck", 1,
+		2),
+	FACTOR(CLK_TOP_UNIVP_192M_D4, "univ_192m_d4", "univ_192m_ck", 1,
+		4),
+	FACTOR(CLK_TOP_UNIVP_192M_D8, "univ_192m_d8", "univ_192m_ck", 1,
+		8),
+	FACTOR(CLK_TOP_UNIVP_192M_D16, "univ_192m_d16", "univ_192m_ck", 1,
+		16),
+	FACTOR(CLK_TOP_UNIVP_192M_D32, "univ_192m_d32", "univ_192m_ck", 1,
+		32),
+	FACTOR(CLK_TOP_APLL1_CK, "apll1_ck", "apll1", 1,
+		1),
+	FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1", 1,
+		2),
+	FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1", 1,
+		4),
+	FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "apll1", 1,
+		8),
+	FACTOR(CLK_TOP_APLL2_CK, "apll2_ck", "apll2", 1,
+		1),
+	FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2", 1,
+		2),
+	FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2", 1,
+		4),
+	FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "apll2", 1,
+		8),
+	FACTOR(CLK_TOP_TVDPLL_CK, "tvdpll_ck", "tvdpll", 1,
+		1),
+	FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll_ck", 1,
+		2),
+	FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll", 1,
+		4),
+	FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll", 1,
+		8),
+	FACTOR(CLK_TOP_TVDPLL_D16, "tvdpll_d16", "tvdpll", 1,
+		16),
+	FACTOR(CLK_TOP_MMPLL_CK, "mmpll_ck", "mmpll", 1,
+		1),
+	FACTOR(CLK_TOP_MMPLL_D4, "mmpll_d4", "mmpll", 1,
+		4),
+	FACTOR(CLK_TOP_MMPLL_D4_D2, "mmpll_d4_d2", "mmpll_d4", 1,
+		2),
+	FACTOR(CLK_TOP_MMPLL_D4_D4, "mmpll_d4_d4", "mmpll_d4", 1,
+		4),
+	FACTOR(CLK_TOP_MMPLL_D5, "mmpll_d5", "mmpll", 1,
+		5),
+	FACTOR(CLK_TOP_MMPLL_D5_D2, "mmpll_d5_d2", "mmpll_d5", 1,
+		2),
+	FACTOR(CLK_TOP_MMPLL_D5_D4, "mmpll_d5_d4", "mmpll_d5", 1,
+		4),
+	FACTOR(CLK_TOP_MMPLL_D6, "mmpll_d6", "mmpll", 1,
+		6),
+	FACTOR(CLK_TOP_MMPLL_D7, "mmpll_d7", "mmpll", 1,
+		7),
+	FACTOR(CLK_TOP_MFGPLL_CK, "mfgpll_ck", "mfgpll", 1,
+		1),
+	FACTOR(CLK_TOP_MSDCPLL_CK, "msdcpll_ck", "msdcpll", 1,
+		1),
+	FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1,
+		2),
+	FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1,
+		4),
+	FACTOR(CLK_TOP_MSDCPLL_D8, "msdcpll_d8", "msdcpll", 1,
+		8),
+	FACTOR(CLK_TOP_MSDCPLL_D16, "msdcpll_d16", "msdcpll", 1,
+		16),
+	FACTOR(CLK_TOP_AD_OSC_CK, "ad_osc_ck", "osc", 1,
+		1),
+	FACTOR(CLK_TOP_OSC_D2, "osc_d2", "osc", 1,
+		2),
+	FACTOR(CLK_TOP_OSC_D4, "osc_d4", "osc", 1,
+		4),
+	FACTOR(CLK_TOP_OSC_D8, "osc_d8", "osc", 1,
+		8),
+	FACTOR(CLK_TOP_OSC_D16, "osc_d16", "osc", 1,
+		16),
+	FACTOR(CLK_TOP_UNIVPLL, "univpll", "univ2pll", 1,
+		2),
+	FACTOR(CLK_TOP_UNIVPLL_D3_D16, "univpll_d3_d16", "univpll_d3", 1,
+		16),
+};
+
+static const char * const axi_parents[] = {
+	"clk26m",
+	"syspll_d2_d4",
+	"syspll_d7",
+	"osc_d4"
+};
+
+static const char * const mm_parents[] = {
+	"clk26m",
+	"mmpll_d7",
+	"syspll_d3",
+	"univpll_d2_d2",
+	"syspll_d2_d2",
+	"syspll_d3_d2"
+};
+
+static const char * const img_parents[] = {
+	"clk26m",
+	"mmpll_d6",
+	"univpll_d3",
+	"syspll_d3",
+	"univpll_d2_d2",
+	"syspll_d2_d2",
+	"univpll_d3_d2",
+	"syspll_d3_d2"
+};
+
+static const char * const cam_parents[] = {
+	"clk26m",
+	"syspll_d2",
+	"mmpll_d6",
+	"syspll_d3",
+	"mmpll_d7",
+	"univpll_d3",
+	"univpll_d2_d2",
+	"syspll_d2_d2",
+	"syspll_d3_d2",
+	"univpll_d3_d2"
+};
+
+static const char * const dsp_parents[] = {
+	"clk26m",
+	"mmpll_d6",
+	"mmpll_d7",
+	"univpll_d3",
+	"syspll_d3",
+	"univpll_d2_d2",
+	"syspll_d2_d2",
+	"univpll_d3_d2",
+	"syspll_d3_d2"
+};
+
+static const char * const dsp1_parents[] = {
+	"clk26m",
+	"mmpll_d6",
+	"mmpll_d7",
+	"univpll_d3",
+	"syspll_d3",
+	"univpll_d2_d2",
+	"syspll_d2_d2",
+	"univpll_d3_d2",
+	"syspll_d3_d2"
+};
+
+static const char * const dsp2_parents[] = {
+	"clk26m",
+	"mmpll_d6",
+	"mmpll_d7",
+	"univpll_d3",
+	"syspll_d3",
+	"univpll_d2_d2",
+	"syspll_d2_d2",
+	"univpll_d3_d2",
+	"syspll_d3_d2"
+};
+
+static const char * const ipu_if_parents[] = {
+	"clk26m",
+	"mmpll_d6",
+	"mmpll_d7",
+	"univpll_d3",
+	"syspll_d3",
+	"univpll_d2_d2",
+	"syspll_d2_d2",
+	"univpll_d3_d2",
+	"syspll_d3_d2"
+};
+
+static const char * const mfg_parents[] = {
+	"clk26m",
+	"mfgpll_ck",
+	"univpll_d3",
+	"syspll_d3"
+};
+
+static const char * const f52m_mfg_parents[] = {
+	"clk26m",
+	"univpll_d3_d2",
+	"univpll_d3_d4",
+	"univpll_d3_d8"
+};
+
+static const char * const camtg_parents[] = {
+	"clk26m",
+	"univ_192m_d8",
+	"univpll_d3_d8",
+	"univ_192m_d4",
+	"univpll_d3_d16",
+	"csw_f26m_ck_d2",
+	"univ_192m_d16",
+	"univ_192m_d32"
+};
+
+static const char * const camtg2_parents[] = {
+	"clk26m",
+	"univ_192m_d8",
+	"univpll_d3_d8",
+	"univ_192m_d4",
+	"univpll_d3_d16",
+	"csw_f26m_ck_d2",
+	"univ_192m_d16",
+	"univ_192m_d32"
+};
+
+static const char * const camtg3_parents[] = {
+	"clk26m",
+	"univ_192m_d8",
+	"univpll_d3_d8",
+	"univ_192m_d4",
+	"univpll_d3_d16",
+	"csw_f26m_ck_d2",
+	"univ_192m_d16",
+	"univ_192m_d32"
+};
+
+static const char * const camtg4_parents[] = {
+	"clk26m",
+	"univ_192m_d8",
+	"univpll_d3_d8",
+	"univ_192m_d4",
+	"univpll_d3_d16",
+	"csw_f26m_ck_d2",
+	"univ_192m_d16",
+	"univ_192m_d32"
+};
+
+static const char * const uart_parents[] = {
+	"clk26m",
+	"univpll_d3_d8"
+};
+
+static const char * const spi_parents[] = {
+	"clk26m",
+	"syspll_d5_d2",
+	"syspll_d3_d4",
+	"msdcpll_d4"
+};
+
+static const char * const msdc50_hclk_parents[] = {
+	"clk26m",
+	"syspll_d2_d2",
+	"syspll_d3_d2"
+};
+
+static const char * const msdc50_0_parents[] = {
+	"clk26m",
+	"msdcpll_ck",
+	"msdcpll_d2",
+	"univpll_d2_d4",
+	"syspll_d3_d2",
+	"univpll_d2_d2"
+};
+
+static const char * const msdc30_1_parents[] = {
+	"clk26m",
+	"univpll_d3_d2",
+	"syspll_d3_d2",
+	"syspll_d7",
+	"msdcpll_d2"
+};
+
+static const char * const msdc30_2_parents[] = {
+	"clk26m",
+	"univpll_d3_d2",
+	"syspll_d3_d2",
+	"syspll_d7",
+	"msdcpll_d2"
+};
+
+static const char * const audio_parents[] = {
+	"clk26m",
+	"syspll_d5_d4",
+	"syspll_d7_d4",
+	"syspll_d2_d16"
+};
+
+static const char * const aud_intbus_parents[] = {
+	"clk26m",
+	"syspll_d2_d4",
+	"syspll_d7_d2"
+};
+
+static const char * const pmicspi_parents[] = {
+	"clk26m",
+	"syspll_d2_d8",
+	"osc_d8"
+};
+
+static const char * const fpwrap_ulposc_parents[] = {
+	"clk26m",
+	"osc_d16",
+	"osc_d4",
+	"osc_d8"
+};
+
+static const char * const atb_parents[] = {
+	"clk26m",
+	"syspll_d2_d2",
+	"syspll_d5"
+};
+
+static const char * const sspm_parents[] = {
+	"clk26m",
+	"univpll_d2_d4",
+	"syspll_d2_d2",
+	"univpll_d2_d2",
+	"syspll_d3"
+};
+
+static const char * const dpi0_parents[] = {
+	"clk26m",
+	"tvdpll_d2",
+	"tvdpll_d4",
+	"tvdpll_d8",
+	"tvdpll_d16",
+	"univpll_d5_d2",
+	"univpll_d3_d4",
+	"syspll_d3_d4",
+	"univpll_d3_d8"
+};
+
+static const char * const scam_parents[] = {
+	"clk26m",
+	"syspll_d5_d2"
+};
+
+static const char * const disppwm_parents[] = {
+	"clk26m",
+	"univpll_d3_d4",
+	"osc_d2",
+	"osc_d4",
+	"osc_d16"
+};
+
+static const char * const usb_top_parents[] = {
+	"clk26m",
+	"univpll_d5_d4",
+	"univpll_d3_d4",
+	"univpll_d5_d2"
+};
+
+
+static const char * const ssusb_top_xhci_parents[] = {
+	"clk26m",
+	"univpll_d5_d4",
+	"univpll_d3_d4",
+	"univpll_d5_d2"
+};
+
+static const char * const spm_parents[] = {
+	"clk26m",
+	"syspll_d2_d8"
+};
+
+static const char * const i2c_parents[] = {
+	"clk26m",
+	"syspll_d2_d8",
+	"univpll_d5_d2"
+};
+
+static const char * const scp_parents[] = {
+	"clk26m",
+	"univpll_d2_d8",
+	"syspll_d5",
+	"syspll_d2_d2",
+	"univpll_d2_d2",
+	"syspll_d3",
+	"univpll_d3"
+};
+
+static const char * const seninf_parents[] = {
+	"clk26m",
+	"univpll_d2_d2",
+	"univpll_d3_d2",
+	"univpll_d2_d4"
+};
+
+static const char * const dxcc_parents[] = {
+	"clk26m",
+	"syspll_d2_d2",
+	"syspll_d2_d4",
+	"syspll_d2_d8"
+};
+
+static const char * const aud_engen1_parents[] = {
+	"clk26m",
+	"apll1_d2",
+	"apll1_d4",
+	"apll1_d8"
+};
+
+static const char * const aud_engen2_parents[] = {
+	"clk26m",
+	"apll2_d2",
+	"apll2_d4",
+	"apll2_d8"
+};
+
+static const char * const faes_ufsfde_parents[] = {
+	"clk26m",
+	"syspll_d2",
+	"syspll_d2_d2",
+	"syspll_d3",
+	"syspll_d2_d4",
+	"univpll_d3"
+};
+
+static const char * const fufs_parents[] = {
+	"clk26m",
+	"syspll_d2_d4",
+	"syspll_d2_d8",
+	"syspll_d2_d16"
+};
+
+static const char * const aud_1_parents[] = {
+	"clk26m",
+	"apll1_ck"
+};
+
+static const char * const aud_2_parents[] = {
+	"clk26m",
+	"apll2_ck"
+};
+
+/*
+ * CRITICAL CLOCK:
+ * axi_sel is the main bus clock of whole SOC.
+ * spm_sel is the clock of the always-on co-processor.
+ */
+static const struct mtk_mux top_muxes[] = {
+	/* CLK_CFG_0 */
+	MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MUX_AXI, "axi_sel",
+		axi_parents, 0x40,
+		0x44, 0x48, 0, 2, 7, 0x004, 0, CLK_IS_CRITICAL),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MM, "mm_sel",
+		mm_parents, 0x40,
+		0x44, 0x48, 8, 3, 15, 0x004, 1),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_IMG, "img_sel",
+		img_parents, 0x40,
+		0x44, 0x48, 16, 3, 23, 0x004, 2),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAM, "cam_sel",
+		cam_parents, 0x40,
+		0x44, 0x48, 24, 4, 31, 0x004, 3),
+	/* CLK_CFG_1 */
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DSP, "dsp_sel",
+		dsp_parents, 0x50,
+		0x54, 0x58, 0, 4, 7, 0x004, 4),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DSP1, "dsp1_sel",
+		dsp1_parents, 0x50,
+		0x54, 0x58, 8, 4, 15, 0x004, 5),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DSP2, "dsp2_sel",
+		dsp2_parents, 0x50,
+		0x54, 0x58, 16, 4, 23, 0x004, 6),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_IPU_IF, "ipu_if_sel",
+		ipu_if_parents, 0x50,
+		0x54, 0x58, 24, 4, 31, 0x004, 7),
+	/* CLK_CFG_2 */
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MFG, "mfg_sel",
+		mfg_parents, 0x60,
+		0x64, 0x68, 0, 2, 7, 0x004, 8),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_F52M_MFG, "f52m_mfg_sel",
+		f52m_mfg_parents, 0x60,
+		0x64, 0x68, 8, 2, 15, 0x004, 9),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG, "camtg_sel",
+		camtg_parents, 0x60,
+		0x64, 0x68, 16, 3, 23, 0x004, 10),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG2, "camtg2_sel",
+		camtg2_parents, 0x60,
+		0x64, 0x68, 24, 3, 31, 0x004, 11),
+	/* CLK_CFG_3 */
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG3, "camtg3_sel",
+		camtg3_parents, 0x70,
+		0x74, 0x78, 0, 3, 7, 0x004, 12),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG4, "camtg4_sel",
+		camtg4_parents, 0x70,
+		0x74, 0x78, 8, 3, 15, 0x004, 13),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_UART, "uart_sel",
+		uart_parents, 0x70,
+		0x74, 0x78, 16, 1, 23, 0x004, 14),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SPI, "spi_sel",
+		spi_parents, 0x70,
+		0x74, 0x78, 24, 2, 31, 0x004, 15),
+	/* CLK_CFG_4 */
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC50_0_HCLK, "msdc50_hclk_sel",
+		msdc50_hclk_parents, 0x80,
+		0x84, 0x88, 0, 2, 7, 0x004, 16),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC50_0, "msdc50_0_sel",
+		msdc50_0_parents, 0x80,
+		0x84, 0x88, 8, 3, 15, 0x004, 17),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC30_1, "msdc30_1_sel",
+		msdc30_1_parents, 0x80,
+		0x84, 0x88, 16, 3, 23, 0x004, 18),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC30_2, "msdc30_2_sel",
+		msdc30_2_parents, 0x80,
+		0x84, 0x88, 24, 3, 31, 0x004, 19),
+	/* CLK_CFG_5 */
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUDIO, "audio_sel",
+		audio_parents, 0x90,
+		0x94, 0x98, 0, 2, 7, 0x004, 20),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_INTBUS, "aud_intbus_sel",
+		aud_intbus_parents, 0x90,
+		0x94, 0x98, 8, 2, 15, 0x004, 21),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_PMICSPI, "pmicspi_sel",
+		pmicspi_parents, 0x90,
+		0x94, 0x98, 16, 2, 23, 0x004, 22),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_FPWRAP_ULPOSC, "fpwrap_ulposc_sel",
+		fpwrap_ulposc_parents, 0x90,
+		0x94, 0x98, 24, 2, 31, 0x004, 23),
+	/* CLK_CFG_6 */
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_ATB, "atb_sel",
+		atb_parents, 0xa0,
+		0xa4, 0xa8, 0, 2, 7, 0x004, 24),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SSPM, "sspm_sel",
+		sspm_parents, 0xa0,
+		0xa4, 0xa8, 8, 3, 15, 0x004, 25),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DPI0, "dpi0_sel",
+		dpi0_parents, 0xa0,
+		0xa4, 0xa8, 16, 4, 23, 0x004, 26),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SCAM, "scam_sel",
+		scam_parents, 0xa0,
+		0xa4, 0xa8, 24, 1, 31, 0x004, 27),
+	/* CLK_CFG_7 */
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DISP_PWM, "disppwm_sel",
+		disppwm_parents, 0xb0,
+		0xb4, 0xb8, 0, 3, 7, 0x004, 28),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_USB_TOP, "usb_top_sel",
+		usb_top_parents, 0xb0,
+		0xb4, 0xb8, 8, 2, 15, 0x004, 29),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SSUSB_TOP_XHCI, "ssusb_top_xhci_sel",
+		ssusb_top_xhci_parents, 0xb0,
+		0xb4, 0xb8, 16, 2, 23, 0x004, 30),
+	MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MUX_SPM, "spm_sel",
+		spm_parents, 0xb0,
+		0xb4, 0xb8, 24, 1, 31, 0x008, 0, CLK_IS_CRITICAL),
+	/* CLK_CFG_8 */
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_I2C, "i2c_sel",
+		i2c_parents, 0xc0,
+		0xc4, 0xc8, 0, 2, 7, 0x008, 1),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SCP, "scp_sel",
+		scp_parents, 0xc0,
+		0xc4, 0xc8, 8, 3, 15, 0x008, 2),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SENINF, "seninf_sel",
+		seninf_parents, 0xc0,
+		0xc4, 0xc8, 16, 2, 23, 0x008, 3),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DXCC, "dxcc_sel",
+		dxcc_parents, 0xc0,
+		0xc4, 0xc8, 24, 2, 31, 0x008, 4),
+	/* CLK_CFG_9 */
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_ENG1, "aud_eng1_sel",
+		aud_engen1_parents, 0xd0,
+		0xd4, 0xd8, 0, 2, 7, 0x008, 5),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_ENG2, "aud_eng2_sel",
+		aud_engen2_parents, 0xd0,
+		0xd4, 0xd8, 8, 2, 15, 0x008, 6),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_FAES_UFSFDE, "faes_ufsfde_sel",
+		faes_ufsfde_parents, 0xd0,
+		0xd4, 0xd8, 16, 3, 23, 0x008, 7),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_FUFS, "fufs_sel",
+		fufs_parents, 0xd0,
+		0xd4, 0xd8, 24, 2, 31, 0x008, 8),
+	/* CLK_CFG_10 */
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_1, "aud_1_sel",
+		aud_1_parents, 0xe0,
+		0xe4, 0xe8, 0, 1, 7, 0x008, 9),
+	MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_2, "aud_2_sel",
+		aud_2_parents, 0xe0,
+		0xe4, 0xe8, 8, 1, 15, 0x008, 10),
+};
+
+static const char * const apll_i2s0_parents[] = {
+	"aud_1_sel",
+	"aud_2_sel"
+};
+
+static const char * const apll_i2s1_parents[] = {
+	"aud_1_sel",
+	"aud_2_sel"
+};
+
+static const char * const apll_i2s2_parents[] = {
+	"aud_1_sel",
+	"aud_2_sel"
+};
+
+static const char * const apll_i2s3_parents[] = {
+	"aud_1_sel",
+	"aud_2_sel"
+};
+
+static const char * const apll_i2s4_parents[] = {
+	"aud_1_sel",
+	"aud_2_sel"
+};
+
+static const char * const apll_i2s5_parents[] = {
+	"aud_1_sel",
+	"aud_2_sel"
+};
+
+static struct mtk_composite top_aud_muxes[] = {
+	MUX(CLK_TOP_MUX_APLL_I2S0, "apll_i2s0_sel", apll_i2s0_parents,
+		0x320, 8, 1),
+	MUX(CLK_TOP_MUX_APLL_I2S1, "apll_i2s1_sel", apll_i2s1_parents,
+		0x320, 9, 1),
+	MUX(CLK_TOP_MUX_APLL_I2S2, "apll_i2s2_sel", apll_i2s2_parents,
+		0x320, 10, 1),
+	MUX(CLK_TOP_MUX_APLL_I2S3, "apll_i2s3_sel", apll_i2s3_parents,
+		0x320, 11, 1),
+	MUX(CLK_TOP_MUX_APLL_I2S4, "apll_i2s4_sel", apll_i2s4_parents,
+		0x320, 12, 1),
+	MUX(CLK_TOP_MUX_APLL_I2S5, "apll_i2s5_sel", apll_i2s5_parents,
+		0x328, 20, 1),
+};
+
+static const char * const mcu_mp0_parents[] = {
+	"clk26m",
+	"armpll_ll",
+	"armpll_div_pll1",
+	"armpll_div_pll2"
+};
+
+static const char * const mcu_mp2_parents[] = {
+	"clk26m",
+	"armpll_l",
+	"armpll_div_pll1",
+	"armpll_div_pll2"
+};
+
+static const char * const mcu_bus_parents[] = {
+	"clk26m",
+	"ccipll",
+	"armpll_div_pll1",
+	"armpll_div_pll2"
+};
+
+static struct mtk_composite mcu_muxes[] = {
+	/* mp0_pll_divider_cfg */
+	MUX(CLK_MCU_MP0_SEL, "mcu_mp0_sel", mcu_mp0_parents, 0x7A0, 9, 2),
+	/* mp2_pll_divider_cfg */
+	MUX(CLK_MCU_MP2_SEL, "mcu_mp2_sel", mcu_mp2_parents, 0x7A8, 9, 2),
+	/* bus_pll_divider_cfg */
+	MUX(CLK_MCU_BUS_SEL, "mcu_bus_sel", mcu_bus_parents, 0x7C0, 9, 2),
+};
+
+static struct mtk_composite top_aud_divs[] = {
+	DIV_GATE(CLK_TOP_APLL12_DIV0, "apll12_div0", "apll_i2s0_sel",
+		0x320, 2, 0x324, 8, 0),
+	DIV_GATE(CLK_TOP_APLL12_DIV1, "apll12_div1", "apll_i2s1_sel",
+		0x320, 3, 0x324, 8, 8),
+	DIV_GATE(CLK_TOP_APLL12_DIV2, "apll12_div2", "apll_i2s2_sel",
+		0x320, 4, 0x324, 8, 16),
+	DIV_GATE(CLK_TOP_APLL12_DIV3, "apll12_div3", "apll_i2s3_sel",
+		0x320, 5, 0x324, 8, 24),
+	DIV_GATE(CLK_TOP_APLL12_DIV4, "apll12_div4", "apll_i2s4_sel",
+		0x320, 6, 0x328, 8, 0),
+	DIV_GATE(CLK_TOP_APLL12_DIVB, "apll12_divb", "apll12_div4",
+		0x320, 7, 0x328, 8, 8),
+};
+
+static const struct mtk_gate_regs top_cg_regs = {
+	.set_ofs = 0x104,
+	.clr_ofs = 0x104,
+	.sta_ofs = 0x104,
+};
+
+#define GATE_TOP(_id, _name, _parent, _shift)			\
+	GATE_MTK(_id, _name, _parent, &top_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate top_clks[] = {
+	/* TOP */
+	GATE_TOP(CLK_TOP_ARMPLL_DIV_PLL1, "armpll_div_pll1", "mainpll", 4),
+	GATE_TOP(CLK_TOP_ARMPLL_DIV_PLL2, "armpll_div_pll2", "univpll", 5),
+};
+
+static const struct mtk_gate_regs infra0_cg_regs = {
+	.set_ofs = 0x80,
+	.clr_ofs = 0x84,
+	.sta_ofs = 0x90,
+};
+
+static const struct mtk_gate_regs infra1_cg_regs = {
+	.set_ofs = 0x88,
+	.clr_ofs = 0x8c,
+	.sta_ofs = 0x94,
+};
+
+static const struct mtk_gate_regs infra2_cg_regs = {
+	.set_ofs = 0xa4,
+	.clr_ofs = 0xa8,
+	.sta_ofs = 0xac,
+};
+
+static const struct mtk_gate_regs infra3_cg_regs = {
+	.set_ofs = 0xc0,
+	.clr_ofs = 0xc4,
+	.sta_ofs = 0xc8,
+};
+
+#define GATE_INFRA0(_id, _name, _parent, _shift)		\
+	GATE_MTK(_id, _name, _parent, &infra0_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_setclr)
+
+#define GATE_INFRA1(_id, _name, _parent, _shift)		\
+	GATE_MTK(_id, _name, _parent, &infra1_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_setclr)
+
+#define GATE_INFRA2(_id, _name, _parent, _shift)		\
+	GATE_MTK(_id, _name, _parent, &infra2_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_setclr)
+
+#define GATE_INFRA3(_id, _name, _parent, _shift)		\
+	GATE_MTK(_id, _name, _parent, &infra3_cg_regs, _shift,	\
+		&mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate infra_clks[] = {
+	/* INFRA0 */
+	GATE_INFRA0(CLK_INFRA_PMIC_TMR, "infra_pmic_tmr",
+		"axi_sel", 0),
+	GATE_INFRA0(CLK_INFRA_PMIC_AP, "infra_pmic_ap",
+		"axi_sel", 1),
+	GATE_INFRA0(CLK_INFRA_PMIC_MD, "infra_pmic_md",
+		"axi_sel", 2),
+	GATE_INFRA0(CLK_INFRA_PMIC_CONN, "infra_pmic_conn",
+		"axi_sel", 3),
+	GATE_INFRA0(CLK_INFRA_SCPSYS, "infra_scp",
+		"scp_sel", 4),
+	GATE_INFRA0(CLK_INFRA_SEJ, "infra_sej",
+		"f_f26m_ck", 5),
+	GATE_INFRA0(CLK_INFRA_APXGPT, "infra_apxgpt",
+		"axi_sel", 6),
+	GATE_INFRA0(CLK_INFRA_ICUSB, "infra_icusb",
+		"axi_sel", 8),
+	GATE_INFRA0(CLK_INFRA_GCE, "infra_gce",
+		"axi_sel", 9),
+	GATE_INFRA0(CLK_INFRA_THERM, "infra_therm",
+		"axi_sel", 10),
+	GATE_INFRA0(CLK_INFRA_I2C0, "infra_i2c0",
+		"i2c_sel", 11),
+	GATE_INFRA0(CLK_INFRA_I2C1, "infra_i2c1",
+		"i2c_sel", 12),
+	GATE_INFRA0(CLK_INFRA_I2C2, "infra_i2c2",
+		"i2c_sel", 13),
+	GATE_INFRA0(CLK_INFRA_I2C3, "infra_i2c3",
+		"i2c_sel", 14),
+	GATE_INFRA0(CLK_INFRA_PWM_HCLK, "infra_pwm_hclk",
+		"axi_sel", 15),
+	GATE_INFRA0(CLK_INFRA_PWM1, "infra_pwm1",
+		"i2c_sel", 16),
+	GATE_INFRA0(CLK_INFRA_PWM2, "infra_pwm2",
+		"i2c_sel", 17),
+	GATE_INFRA0(CLK_INFRA_PWM3, "infra_pwm3",
+		"i2c_sel", 18),
+	GATE_INFRA0(CLK_INFRA_PWM4, "infra_pwm4",
+		"i2c_sel", 19),
+	GATE_INFRA0(CLK_INFRA_PWM, "infra_pwm",
+		"i2c_sel", 21),
+	GATE_INFRA0(CLK_INFRA_UART0, "infra_uart0",
+		"uart_sel", 22),
+	GATE_INFRA0(CLK_INFRA_UART1, "infra_uart1",
+		"uart_sel", 23),
+	GATE_INFRA0(CLK_INFRA_UART2, "infra_uart2",
+		"uart_sel", 24),
+	GATE_INFRA0(CLK_INFRA_UART3, "infra_uart3",
+		"uart_sel", 25),
+	GATE_INFRA0(CLK_INFRA_GCE_26M, "infra_gce_26m",
+		"axi_sel", 27),
+	GATE_INFRA0(CLK_INFRA_CQ_DMA_FPC, "infra_cqdma_fpc",
+		"axi_sel", 28),
+	GATE_INFRA0(CLK_INFRA_BTIF, "infra_btif",
+		"axi_sel", 31),
+	/* INFRA1 */
+	GATE_INFRA1(CLK_INFRA_SPI0, "infra_spi0",
+		"spi_sel", 1),
+	GATE_INFRA1(CLK_INFRA_MSDC0, "infra_msdc0",
+		"msdc50_hclk_sel", 2),
+	GATE_INFRA1(CLK_INFRA_MSDC1, "infra_msdc1",
+		"axi_sel", 4),
+	GATE_INFRA1(CLK_INFRA_MSDC2, "infra_msdc2",
+		"axi_sel", 5),
+	GATE_INFRA1(CLK_INFRA_MSDC0_SCK, "infra_msdc0_sck",
+		"msdc50_0_sel", 6),
+	GATE_INFRA1(CLK_INFRA_DVFSRC, "infra_dvfsrc",
+		"f_f26m_ck", 7),
+	GATE_INFRA1(CLK_INFRA_GCPU, "infra_gcpu",
+		"axi_sel", 8),
+	GATE_INFRA1(CLK_INFRA_TRNG, "infra_trng",
+		"axi_sel", 9),
+	GATE_INFRA1(CLK_INFRA_AUXADC, "infra_auxadc",
+		"f_f26m_ck", 10),
+	GATE_INFRA1(CLK_INFRA_CPUM, "infra_cpum",
+		"axi_sel", 11),
+	GATE_INFRA1(CLK_INFRA_CCIF1_AP, "infra_ccif1_ap",
+		"axi_sel", 12),
+	GATE_INFRA1(CLK_INFRA_CCIF1_MD, "infra_ccif1_md",
+		"axi_sel", 13),
+	GATE_INFRA1(CLK_INFRA_AUXADC_MD, "infra_auxadc_md",
+		"f_f26m_ck", 14),
+	GATE_INFRA1(CLK_INFRA_MSDC1_SCK, "infra_msdc1_sck",
+		"msdc30_1_sel", 16),
+	GATE_INFRA1(CLK_INFRA_MSDC2_SCK, "infra_msdc2_sck",
+		"msdc30_2_sel", 17),
+	GATE_INFRA1(CLK_INFRA_AP_DMA, "infra_apdma",
+		"axi_sel", 18),
+	GATE_INFRA1(CLK_INFRA_XIU, "infra_xiu",
+		"axi_sel", 19),
+	GATE_INFRA1(CLK_INFRA_DEVICE_APC, "infra_device_apc",
+		"axi_sel", 20),
+	GATE_INFRA1(CLK_INFRA_CCIF_AP, "infra_ccif_ap",
+		"axi_sel", 23),
+	GATE_INFRA1(CLK_INFRA_DEBUGSYS, "infra_debugsys",
+		"axi_sel", 24),
+	GATE_INFRA1(CLK_INFRA_AUDIO, "infra_audio",
+		"axi_sel", 25),
+	GATE_INFRA1(CLK_INFRA_CCIF_MD, "infra_ccif_md",
+		"axi_sel", 26),
+	GATE_INFRA1(CLK_INFRA_DXCC_SEC_CORE, "infra_dxcc_sec_core",
+		"dxcc_sel", 27),
+	GATE_INFRA1(CLK_INFRA_DXCC_AO, "infra_dxcc_ao",
+		"dxcc_sel", 28),
+	GATE_INFRA1(CLK_INFRA_DEVMPU_BCLK, "infra_devmpu_bclk",
+		"axi_sel", 30),
+	GATE_INFRA1(CLK_INFRA_DRAMC_F26M, "infra_dramc_f26m",
+		"f_f26m_ck", 31),
+	/* INFRA2 */
+	GATE_INFRA2(CLK_INFRA_IRTX, "infra_irtx",
+		"f_f26m_ck", 0),
+	GATE_INFRA2(CLK_INFRA_USB, "infra_usb",
+		"usb_top_sel", 1),
+	GATE_INFRA2(CLK_INFRA_DISP_PWM, "infra_disppwm",
+		"axi_sel", 2),
+	GATE_INFRA2(CLK_INFRA_CLDMA_BCLK, "infra_cldma_bclk",
+		"axi_sel", 3),
+	GATE_INFRA2(CLK_INFRA_AUDIO_26M_BCLK, "infra_audio_26m_bclk",
+		"f_f26m_ck", 4),
+	GATE_INFRA2(CLK_INFRA_SPI1, "infra_spi1",
+		"spi_sel", 6),
+	GATE_INFRA2(CLK_INFRA_I2C4, "infra_i2c4",
+		"i2c_sel", 7),
+	GATE_INFRA2(CLK_INFRA_MODEM_TEMP_SHARE, "infra_md_tmp_share",
+		"f_f26m_ck", 8),
+	GATE_INFRA2(CLK_INFRA_SPI2, "infra_spi2",
+		"spi_sel", 9),
+	GATE_INFRA2(CLK_INFRA_SPI3, "infra_spi3",
+		"spi_sel", 10),
+	GATE_INFRA2(CLK_INFRA_UNIPRO_SCK, "infra_unipro_sck",
+		"ssusb_top_xhci_sel", 11),
+	GATE_INFRA2(CLK_INFRA_UNIPRO_TICK, "infra_unipro_tick",
+		"fufs_sel", 12),
+	GATE_INFRA2(CLK_INFRA_UFS_MP_SAP_BCLK, "infra_ufs_mp_sap_bck",
+		"fufs_sel", 13),
+	GATE_INFRA2(CLK_INFRA_MD32_BCLK, "infra_md32_bclk",
+		"axi_sel", 14),
+	GATE_INFRA2(CLK_INFRA_SSPM, "infra_sspm",
+		"sspm_sel", 15),
+	GATE_INFRA2(CLK_INFRA_UNIPRO_MBIST, "infra_unipro_mbist",
+		"axi_sel", 16),
+	GATE_INFRA2(CLK_INFRA_SSPM_BUS_HCLK, "infra_sspm_bus_hclk",
+		"axi_sel", 17),
+	GATE_INFRA2(CLK_INFRA_I2C5, "infra_i2c5",
+		"i2c_sel", 18),
+	GATE_INFRA2(CLK_INFRA_I2C5_ARBITER, "infra_i2c5_arbiter",
+		"i2c_sel", 19),
+	GATE_INFRA2(CLK_INFRA_I2C5_IMM, "infra_i2c5_imm",
+		"i2c_sel", 20),
+	GATE_INFRA2(CLK_INFRA_I2C1_ARBITER, "infra_i2c1_arbiter",
+		"i2c_sel", 21),
+	GATE_INFRA2(CLK_INFRA_I2C1_IMM, "infra_i2c1_imm",
+		"i2c_sel", 22),
+	GATE_INFRA2(CLK_INFRA_I2C2_ARBITER, "infra_i2c2_arbiter",
+		"i2c_sel", 23),
+	GATE_INFRA2(CLK_INFRA_I2C2_IMM, "infra_i2c2_imm",
+		"i2c_sel", 24),
+	GATE_INFRA2(CLK_INFRA_SPI4, "infra_spi4",
+		"spi_sel", 25),
+	GATE_INFRA2(CLK_INFRA_SPI5, "infra_spi5",
+		"spi_sel", 26),
+	GATE_INFRA2(CLK_INFRA_CQ_DMA, "infra_cqdma",
+		"axi_sel", 27),
+	GATE_INFRA2(CLK_INFRA_UFS, "infra_ufs",
+		"fufs_sel", 28),
+	GATE_INFRA2(CLK_INFRA_AES_UFSFDE, "infra_aes_ufsfde",
+		"faes_ufsfde_sel", 29),
+	GATE_INFRA2(CLK_INFRA_UFS_TICK, "infra_ufs_tick",
+		"fufs_sel", 30),
+	/* INFRA3 */
+	GATE_INFRA3(CLK_INFRA_MSDC0_SELF, "infra_msdc0_self",
+		"msdc50_0_sel", 0),
+	GATE_INFRA3(CLK_INFRA_MSDC1_SELF, "infra_msdc1_self",
+		"msdc50_0_sel", 1),
+	GATE_INFRA3(CLK_INFRA_MSDC2_SELF, "infra_msdc2_self",
+		"msdc50_0_sel", 2),
+	GATE_INFRA3(CLK_INFRA_SSPM_26M_SELF, "infra_sspm_26m_self",
+		"f_f26m_ck", 3),
+	GATE_INFRA3(CLK_INFRA_SSPM_32K_SELF, "infra_sspm_32k_self",
+		"f_f26m_ck", 4),
+	GATE_INFRA3(CLK_INFRA_UFS_AXI, "infra_ufs_axi",
+		"axi_sel", 5),
+	GATE_INFRA3(CLK_INFRA_I2C6, "infra_i2c6",
+		"i2c_sel", 6),
+	GATE_INFRA3(CLK_INFRA_AP_MSDC0, "infra_ap_msdc0",
+		"msdc50_hclk_sel", 7),
+	GATE_INFRA3(CLK_INFRA_MD_MSDC0, "infra_md_msdc0",
+		"msdc50_hclk_sel", 8),
+	GATE_INFRA3(CLK_INFRA_CCIF2_AP, "infra_ccif2_ap",
+		"axi_sel", 16),
+	GATE_INFRA3(CLK_INFRA_CCIF2_MD, "infra_ccif2_md",
+		"axi_sel", 17),
+	GATE_INFRA3(CLK_INFRA_CCIF3_AP, "infra_ccif3_ap",
+		"axi_sel", 18),
+	GATE_INFRA3(CLK_INFRA_CCIF3_MD, "infra_ccif3_md",
+		"axi_sel", 19),
+	GATE_INFRA3(CLK_INFRA_SEJ_F13M, "infra_sej_f13m",
+		"f_f26m_ck", 20),
+	GATE_INFRA3(CLK_INFRA_AES_BCLK, "infra_aes_bclk",
+		"axi_sel", 21),
+	GATE_INFRA3(CLK_INFRA_I2C7, "infra_i2c7",
+		"i2c_sel", 22),
+	GATE_INFRA3(CLK_INFRA_I2C8, "infra_i2c8",
+		"i2c_sel", 23),
+	GATE_INFRA3(CLK_INFRA_FBIST2FPC, "infra_fbist2fpc",
+		"msdc50_0_sel", 24),
+};
+
+static const struct mtk_gate_regs apmixed_cg_regs = {
+	.set_ofs = 0x20,
+	.clr_ofs = 0x20,
+	.sta_ofs = 0x20,
+};
+
+#define GATE_APMIXED_FLAGS(_id, _name, _parent, _shift, _flags)	\
+	GATE_MTK_FLAGS(_id, _name, _parent, &apmixed_cg_regs,		\
+		_shift, &mtk_clk_gate_ops_no_setclr_inv, _flags)
+
+#define GATE_APMIXED(_id, _name, _parent, _shift)	\
+	GATE_APMIXED_FLAGS(_id, _name, _parent, _shift,	0)
+
+/*
+ * CRITICAL CLOCK:
+ * apmixed_appll26m is the toppest clock gate of all PLLs.
+ */
+static const struct mtk_gate apmixed_clks[] = {
+	/* AUDIO0 */
+	GATE_APMIXED(CLK_APMIXED_SSUSB_26M, "apmixed_ssusb26m",
+		"f_f26m_ck", 4),
+	GATE_APMIXED_FLAGS(CLK_APMIXED_APPLL_26M, "apmixed_appll26m",
+		"f_f26m_ck", 5, CLK_IS_CRITICAL),
+	GATE_APMIXED(CLK_APMIXED_MIPIC0_26M, "apmixed_mipic026m",
+		"f_f26m_ck", 6),
+	GATE_APMIXED(CLK_APMIXED_MDPLLGP_26M, "apmixed_mdpll26m",
+		"f_f26m_ck", 7),
+	GATE_APMIXED(CLK_APMIXED_MMSYS_26M, "apmixed_mmsys26m",
+		"f_f26m_ck", 8),
+	GATE_APMIXED(CLK_APMIXED_UFS_26M, "apmixed_ufs26m",
+		"f_f26m_ck", 9),
+	GATE_APMIXED(CLK_APMIXED_MIPIC1_26M, "apmixed_mipic126m",
+		"f_f26m_ck", 11),
+	GATE_APMIXED(CLK_APMIXED_MEMPLL_26M, "apmixed_mempll26m",
+		"f_f26m_ck", 13),
+	GATE_APMIXED(CLK_APMIXED_CLKSQ_LVPLL_26M, "apmixed_lvpll26m",
+		"f_f26m_ck", 14),
+	GATE_APMIXED(CLK_APMIXED_MIPID0_26M, "apmixed_mipid026m",
+		"f_f26m_ck", 16),
+	GATE_APMIXED(CLK_APMIXED_MIPID1_26M, "apmixed_mipid126m",
+		"f_f26m_ck", 17),
+};
+
+#define MT8183_PLL_FMAX		(3800UL * MHZ)
+#define MT8183_PLL_FMIN		(1500UL * MHZ)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags,		\
+			_rst_bar_mask, _pcwbits, _pcwibits, _pd_reg,	\
+			_pd_shift, _tuner_reg,  _tuner_en_reg,		\
+			_tuner_en_bit, _pcw_reg, _pcw_shift,		\
+			_pcw_chg_reg, _div_table) {			\
+		.id = _id,						\
+		.name = _name,						\
+		.reg = _reg,						\
+		.pwr_reg = _pwr_reg,					\
+		.en_mask = _en_mask,					\
+		.flags = _flags,					\
+		.rst_bar_mask = _rst_bar_mask,				\
+		.fmax = MT8183_PLL_FMAX,				\
+		.fmin = MT8183_PLL_FMIN,				\
+		.pcwbits = _pcwbits,					\
+		.pcwibits = _pcwibits,					\
+		.pd_reg = _pd_reg,					\
+		.pd_shift = _pd_shift,					\
+		.tuner_reg = _tuner_reg,				\
+		.tuner_en_reg = _tuner_en_reg,				\
+		.tuner_en_bit = _tuner_en_bit,				\
+		.pcw_reg = _pcw_reg,					\
+		.pcw_shift = _pcw_shift,				\
+		.pcw_chg_reg = _pcw_chg_reg,				\
+		.div_table = _div_table,				\
+	}
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags,		\
+			_rst_bar_mask, _pcwbits, _pcwibits, _pd_reg,	\
+			_pd_shift, _tuner_reg, _tuner_en_reg,		\
+			_tuner_en_bit, _pcw_reg, _pcw_shift,		\
+			_pcw_chg_reg)					\
+		PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags,	\
+			_rst_bar_mask, _pcwbits, _pcwibits, _pd_reg,	\
+			_pd_shift, _tuner_reg, _tuner_en_reg,		\
+			_tuner_en_bit, _pcw_reg, _pcw_shift,		\
+			_pcw_chg_reg, NULL)
+
+static const struct mtk_pll_div_table armpll_div_table[] = {
+	{ .div = 0, .freq = MT8183_PLL_FMAX },
+	{ .div = 1, .freq = 1500 * MHZ },
+	{ .div = 2, .freq = 750 * MHZ },
+	{ .div = 3, .freq = 375 * MHZ },
+	{ .div = 4, .freq = 187500000 },
+	{ } /* sentinel */
+};
+
+static const struct mtk_pll_div_table mfgpll_div_table[] = {
+	{ .div = 0, .freq = MT8183_PLL_FMAX },
+	{ .div = 1, .freq = 1600 * MHZ },
+	{ .div = 2, .freq = 800 * MHZ },
+	{ .div = 3, .freq = 400 * MHZ },
+	{ .div = 4, .freq = 200 * MHZ },
+	{ } /* sentinel */
+};
+
+static const struct mtk_pll_data plls[] = {
+	PLL_B(CLK_APMIXED_ARMPLL_LL, "armpll_ll", 0x0200, 0x020C, 0x00000001,
+		HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0204, 24, 0x0, 0x0, 0,
+		0x0204, 0, 0, armpll_div_table),
+	PLL_B(CLK_APMIXED_ARMPLL_L, "armpll_l", 0x0210, 0x021C, 0x00000001,
+		HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0214, 24, 0x0, 0x0, 0,
+		0x0214, 0, 0, armpll_div_table),
+	PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x0290, 0x029C, 0x00000001,
+		HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0294, 24, 0x0, 0x0, 0,
+		0x0294, 0, 0),
+	PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0220, 0x022C, 0x00000001,
+		HAVE_RST_BAR, BIT(24), 22, 8, 0x0224, 24, 0x0, 0x0, 0,
+		0x0224, 0, 0),
+	PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0230, 0x023C, 0x00000001,
+		HAVE_RST_BAR, BIT(24), 22, 8, 0x0234, 24, 0x0, 0x0, 0,
+		0x0234, 0, 0),
+	PLL_B(CLK_APMIXED_MFGPLL, "mfgpll", 0x0240, 0x024C, 0x00000001,
+		0, 0, 22, 8, 0x0244, 24, 0x0, 0x0, 0, 0x0244, 0, 0,
+		mfgpll_div_table),
+	PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0250, 0x025C, 0x00000001,
+		0, 0, 22, 8, 0x0254, 24, 0x0, 0x0, 0, 0x0254, 0, 0),
+	PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0260, 0x026C, 0x00000001,
+		0, 0, 22, 8, 0x0264, 24, 0x0, 0x0, 0, 0x0264, 0, 0),
+	PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0270, 0x027C, 0x00000001,
+		HAVE_RST_BAR, BIT(23), 22, 8, 0x0274, 24, 0x0, 0x0, 0,
+		0x0274, 0, 0),
+	PLL(CLK_APMIXED_APLL1, "apll1", 0x02A0, 0x02B0, 0x00000001,
+		0, 0, 32, 8, 0x02A0, 1, 0x02A8, 0x0014, 0, 0x02A4, 0, 0x02A0),
+	PLL(CLK_APMIXED_APLL2, "apll2", 0x02b4, 0x02c4, 0x00000001,
+		0, 0, 32, 8, 0x02B4, 1, 0x02BC, 0x0014, 1, 0x02B8, 0, 0x02B4),
+};
+
+static int clk_mt8183_apmixed_probe(struct platform_device *pdev)
+{
+	struct clk_onecell_data *clk_data;
+	struct device_node *node = pdev->dev.of_node;
+
+	clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+
+	mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+	mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks),
+		clk_data);
+
+	return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static int clk_mt8183_top_probe(struct platform_device *pdev)
+{
+	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	void __iomem *base;
+	struct clk_onecell_data *clk_data;
+	struct device_node *node = pdev->dev.of_node;
+
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+	mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+		clk_data);
+
+	mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+
+	mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes),
+		node, &mt8183_clk_lock, clk_data);
+
+	mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes),
+		base, &mt8183_clk_lock, clk_data);
+
+	mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs),
+		base, &mt8183_clk_lock, clk_data);
+
+	mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+		clk_data);
+
+	return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static int clk_mt8183_infra_probe(struct platform_device *pdev)
+{
+	struct clk_onecell_data *clk_data;
+	struct device_node *node = pdev->dev.of_node;
+	int r;
+
+	clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+
+	mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+		clk_data);
+
+	r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+	if (r) {
+		dev_err(&pdev->dev,
+			"%s(): could not register clock provider: %d\n",
+			__func__, r);
+		return r;
+	}
+
+	mtk_register_reset_controller_set_clr(node, 4, INFRA_RST0_SET_OFFSET);
+
+	return r;
+}
+
+static int clk_mt8183_mcu_probe(struct platform_device *pdev)
+{
+	struct clk_onecell_data *clk_data;
+	struct device_node *node = pdev->dev.of_node;
+	void __iomem *base;
+	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	clk_data = mtk_alloc_clk_data(CLK_MCU_NR_CLK);
+
+	mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base,
+			&mt8183_clk_lock, clk_data);
+
+	return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183[] = {
+	{
+		.compatible = "mediatek,mt8183-apmixedsys",
+		.data = clk_mt8183_apmixed_probe,
+	}, {
+		.compatible = "mediatek,mt8183-topckgen",
+		.data = clk_mt8183_top_probe,
+	}, {
+		.compatible = "mediatek,mt8183-infracfg",
+		.data = clk_mt8183_infra_probe,
+	}, {
+		.compatible = "mediatek,mt8183-mcucfg",
+		.data = clk_mt8183_mcu_probe,
+	}, {
+		/* sentinel */
+	}
+};
+
+static int clk_mt8183_probe(struct platform_device *pdev)
+{
+	int (*clk_probe)(struct platform_device *pdev);
+	int r;
+
+	clk_probe = of_device_get_match_data(&pdev->dev);
+	if (!clk_probe)
+		return -EINVAL;
+
+	r = clk_probe(pdev);
+	if (r)
+		dev_err(&pdev->dev,
+			"could not register clock provider: %s: %d\n",
+			pdev->name, r);
+
+	return r;
+}
+
+static struct platform_driver clk_mt8183_drv = {
+	.probe = clk_mt8183_probe,
+	.driver = {
+		.name = "clk-mt8183",
+		.of_match_table = of_match_clk_mt8183,
+	},
+};
+
+static int __init clk_mt8183_init(void)
+{
+	return platform_driver_register(&clk_mt8183_drv);
+}
+
+arch_initcall(clk_mt8183_init);
diff --git a/drivers/clk/mediatek/clk-mt8516-aud.c b/drivers/clk/mediatek/clk-mt8516-aud.c
new file mode 100644
index 0000000..6ab3a06
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8516-aud.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ *         Fabien Parent <fparent@baylibre.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8516-clk.h>
+
+static const struct mtk_gate_regs aud_cg_regs = {
+	.set_ofs = 0x0,
+	.clr_ofs = 0x0,
+	.sta_ofs = 0x0,
+};
+
+#define GATE_AUD(_id, _name, _parent, _shift) {	\
+		.id = _id,			\
+		.name = _name,			\
+		.parent_name = _parent,		\
+		.regs = &aud_cg_regs,		\
+		.shift = _shift,		\
+		.ops = &mtk_clk_gate_ops_no_setclr,		\
+	}
+
+static const struct mtk_gate aud_clks[] __initconst = {
+	GATE_AUD(CLK_AUD_AFE, "aud_afe", "clk26m_ck", 2),
+	GATE_AUD(CLK_AUD_I2S, "aud_i2s", "i2s_infra_bck", 6),
+	GATE_AUD(CLK_AUD_22M, "aud_22m", "rg_aud_engen1", 8),
+	GATE_AUD(CLK_AUD_24M, "aud_24m", "rg_aud_engen2", 9),
+	GATE_AUD(CLK_AUD_INTDIR, "aud_intdir", "rg_aud_spdif_in", 15),
+	GATE_AUD(CLK_AUD_APLL2_TUNER, "aud_apll2_tuner", "rg_aud_engen2", 18),
+	GATE_AUD(CLK_AUD_APLL_TUNER, "aud_apll_tuner", "rg_aud_engen1", 19),
+	GATE_AUD(CLK_AUD_HDMI, "aud_hdmi", "apll12_div4", 20),
+	GATE_AUD(CLK_AUD_SPDF, "aud_spdf", "apll12_div6", 21),
+	GATE_AUD(CLK_AUD_ADC, "aud_adc", "aud_afe", 24),
+	GATE_AUD(CLK_AUD_DAC, "aud_dac", "aud_afe", 25),
+	GATE_AUD(CLK_AUD_DAC_PREDIS, "aud_dac_predis", "aud_afe", 26),
+	GATE_AUD(CLK_AUD_TML, "aud_tml", "aud_afe", 27),
+};
+
+static void __init mtk_audsys_init(struct device_node *node)
+{
+	struct clk_onecell_data *clk_data;
+	int r;
+
+	clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK);
+
+	mtk_clk_register_gates(node, aud_clks, ARRAY_SIZE(aud_clks), clk_data);
+
+	r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+	if (r)
+		pr_err("%s(): could not register clock provider: %d\n",
+			__func__, r);
+
+}
+CLK_OF_DECLARE(mtk_audsys, "mediatek,mt8516-audsys", mtk_audsys_init);
diff --git a/drivers/clk/mediatek/clk-mt8516.c b/drivers/clk/mediatek/clk-mt8516.c
new file mode 100644
index 0000000..26fe43c
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8516.c
@@ -0,0 +1,815 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ *         Fabien Parent <fparent@baylibre.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8516-clk.h>
+
+static DEFINE_SPINLOCK(mt8516_clk_lock);
+
+static const struct mtk_fixed_clk fixed_clks[] __initconst = {
+	FIXED_CLK(CLK_TOP_CLK_NULL, "clk_null", NULL, 0),
+	FIXED_CLK(CLK_TOP_I2S_INFRA_BCK, "i2s_infra_bck", "clk_null", 26000000),
+	FIXED_CLK(CLK_TOP_MEMPLL, "mempll", "clk26m", 800000000),
+};
+
+static const struct mtk_fixed_factor top_divs[] __initconst = {
+	FACTOR(CLK_TOP_DMPLL, "dmpll_ck", "mempll", 1, 1),
+	FACTOR(CLK_TOP_MAINPLL_D2, "mainpll_d2", "mainpll", 1, 2),
+	FACTOR(CLK_TOP_MAINPLL_D4, "mainpll_d4", "mainpll", 1, 4),
+	FACTOR(CLK_TOP_MAINPLL_D8, "mainpll_d8", "mainpll", 1, 8),
+	FACTOR(CLK_TOP_MAINPLL_D16, "mainpll_d16", "mainpll", 1, 16),
+	FACTOR(CLK_TOP_MAINPLL_D11, "mainpll_d11", "mainpll", 1, 11),
+	FACTOR(CLK_TOP_MAINPLL_D22, "mainpll_d22", "mainpll", 1, 22),
+	FACTOR(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3),
+	FACTOR(CLK_TOP_MAINPLL_D6, "mainpll_d6", "mainpll", 1, 6),
+	FACTOR(CLK_TOP_MAINPLL_D12, "mainpll_d12", "mainpll", 1, 12),
+	FACTOR(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5),
+	FACTOR(CLK_TOP_MAINPLL_D10, "mainpll_d10", "mainpll", 1, 10),
+	FACTOR(CLK_TOP_MAINPLL_D20, "mainpll_d20", "mainpll", 1, 20),
+	FACTOR(CLK_TOP_MAINPLL_D40, "mainpll_d40", "mainpll", 1, 40),
+	FACTOR(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7),
+	FACTOR(CLK_TOP_MAINPLL_D14, "mainpll_d14", "mainpll", 1, 14),
+	FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+	FACTOR(CLK_TOP_UNIVPLL_D4, "univpll_d4", "univpll", 1, 4),
+	FACTOR(CLK_TOP_UNIVPLL_D8, "univpll_d8", "univpll", 1, 8),
+	FACTOR(CLK_TOP_UNIVPLL_D16, "univpll_d16", "univpll", 1, 16),
+	FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+	FACTOR(CLK_TOP_UNIVPLL_D6, "univpll_d6", "univpll", 1, 6),
+	FACTOR(CLK_TOP_UNIVPLL_D12, "univpll_d12", "univpll", 1, 12),
+	FACTOR(CLK_TOP_UNIVPLL_D24, "univpll_d24", "univpll", 1, 24),
+	FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+	FACTOR(CLK_TOP_UNIVPLL_D20, "univpll_d20", "univpll", 1, 20),
+	FACTOR(CLK_TOP_MMPLL380M, "mmpll380m", "mmpll", 1, 1),
+	FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2),
+	FACTOR(CLK_TOP_MMPLL_200M, "mmpll_200m", "mmpll", 1, 3),
+	FACTOR(CLK_TOP_USB_PHY48M, "usb_phy48m_ck", "univpll", 1, 26),
+	FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1, 1),
+	FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1_ck", 1, 2),
+	FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "rg_apll1_d2_en", 1, 2),
+	FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "rg_apll1_d4_en", 1, 2),
+	FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1, 1),
+	FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2_ck", 1, 2),
+	FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "rg_apll2_d2_en", 1, 2),
+	FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "rg_apll2_d4_en", 1, 2),
+	FACTOR(CLK_TOP_CLK26M, "clk26m_ck", "clk26m", 1, 1),
+	FACTOR(CLK_TOP_CLK26M_D2, "clk26m_d2", "clk26m", 1, 2),
+	FACTOR(CLK_TOP_AHB_INFRA_D2, "ahb_infra_d2", "ahb_infra_sel", 1, 2),
+	FACTOR(CLK_TOP_NFI1X, "nfi1x_ck", "nfi2x_pad_sel", 1, 2),
+	FACTOR(CLK_TOP_ETH_D2, "eth_d2_ck", "eth_sel", 1, 2),
+};
+
+static const char * const uart0_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d24"
+};
+
+static const char * const ahb_infra_parents[] __initconst = {
+	"clk_null",
+	"clk26m_ck",
+	"mainpll_d11",
+	"clk_null",
+	"mainpll_d12",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"mainpll_d10"
+};
+
+static const char * const msdc0_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d6",
+	"mainpll_d8",
+	"univpll_d8",
+	"mainpll_d16",
+	"mmpll_200m",
+	"mainpll_d12",
+	"mmpll_d2"
+};
+
+static const char * const uart1_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d24"
+};
+
+static const char * const msdc1_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d6",
+	"mainpll_d8",
+	"univpll_d8",
+	"mainpll_d16",
+	"mmpll_200m",
+	"mainpll_d12",
+	"mmpll_d2"
+};
+
+static const char * const pmicspi_parents[] __initconst = {
+	"univpll_d20",
+	"usb_phy48m_ck",
+	"univpll_d16",
+	"clk26m_ck"
+};
+
+static const char * const qaxi_aud26m_parents[] __initconst = {
+	"clk26m_ck",
+	"ahb_infra_sel"
+};
+
+static const char * const aud_intbus_parents[] __initconst = {
+	"clk_null",
+	"clk26m_ck",
+	"mainpll_d22",
+	"clk_null",
+	"mainpll_d11"
+};
+
+static const char * const nfi2x_pad_parents[] __initconst = {
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk26m_ck",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"mainpll_d12",
+	"mainpll_d8",
+	"clk_null",
+	"mainpll_d6",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"mainpll_d4",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"clk_null",
+	"mainpll_d10",
+	"mainpll_d7",
+	"clk_null",
+	"mainpll_d5"
+};
+
+static const char * const nfi1x_pad_parents[] __initconst = {
+	"ahb_infra_sel",
+	"nfi1x_ck"
+};
+
+static const char * const ddrphycfg_parents[] __initconst = {
+	"clk26m_ck",
+	"mainpll_d16"
+};
+
+static const char * const usb_78m_parents[] __initconst = {
+	"clk_null",
+	"clk26m_ck",
+	"univpll_d16",
+	"clk_null",
+	"mainpll_d20"
+};
+
+static const char * const spinor_parents[] __initconst = {
+	"clk26m_d2",
+	"clk26m_ck",
+	"mainpll_d40",
+	"univpll_d24",
+	"univpll_d20",
+	"mainpll_d20",
+	"mainpll_d16",
+	"univpll_d12"
+};
+
+static const char * const msdc2_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d6",
+	"mainpll_d8",
+	"univpll_d8",
+	"mainpll_d16",
+	"mmpll_200m",
+	"mainpll_d12",
+	"mmpll_d2"
+};
+
+static const char * const eth_parents[] __initconst = {
+	"clk26m_ck",
+	"mainpll_d40",
+	"univpll_d24",
+	"univpll_d20",
+	"mainpll_d20"
+};
+
+static const char * const aud1_parents[] __initconst = {
+	"clk26m_ck",
+	"apll1_ck"
+};
+
+static const char * const aud2_parents[] __initconst = {
+	"clk26m_ck",
+	"apll2_ck"
+};
+
+static const char * const aud_engen1_parents[] __initconst = {
+	"clk26m_ck",
+	"rg_apll1_d2_en",
+	"rg_apll1_d4_en",
+	"rg_apll1_d8_en"
+};
+
+static const char * const aud_engen2_parents[] __initconst = {
+	"clk26m_ck",
+	"rg_apll2_d2_en",
+	"rg_apll2_d4_en",
+	"rg_apll2_d8_en"
+};
+
+static const char * const i2c_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d20",
+	"univpll_d16",
+	"univpll_d12"
+};
+
+static const char * const aud_i2s0_m_parents[] __initconst = {
+	"rg_aud1",
+	"rg_aud2"
+};
+
+static const char * const pwm_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d12"
+};
+
+static const char * const spi_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d12",
+	"univpll_d8",
+	"univpll_d6"
+};
+
+static const char * const aud_spdifin_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d2"
+};
+
+static const char * const uart2_parents[] __initconst = {
+	"clk26m_ck",
+	"univpll_d24"
+};
+
+static const char * const bsi_parents[] __initconst = {
+	"clk26m_ck",
+	"mainpll_d10",
+	"mainpll_d12",
+	"mainpll_d20"
+};
+
+static const char * const dbg_atclk_parents[] __initconst = {
+	"clk_null",
+	"clk26m_ck",
+	"mainpll_d5",
+	"clk_null",
+	"univpll_d5"
+};
+
+static const char * const csw_nfiecc_parents[] __initconst = {
+	"clk_null",
+	"mainpll_d7",
+	"mainpll_d6",
+	"clk_null",
+	"mainpll_d5"
+};
+
+static const char * const nfiecc_parents[] __initconst = {
+	"clk_null",
+	"nfi2x_pad_sel",
+	"mainpll_d4",
+	"clk_null",
+	"csw_nfiecc_sel"
+};
+
+static struct mtk_composite top_muxes[] __initdata = {
+	/* CLK_MUX_SEL0 */
+	MUX(CLK_TOP_UART0_SEL, "uart0_sel", uart0_parents,
+		0x000, 0, 1),
+	MUX(CLK_TOP_AHB_INFRA_SEL, "ahb_infra_sel", ahb_infra_parents,
+		0x000, 4, 4),
+	MUX(CLK_TOP_MSDC0_SEL, "msdc0_sel", msdc0_parents,
+		0x000, 11, 3),
+	MUX(CLK_TOP_UART1_SEL, "uart1_sel", uart1_parents,
+		0x000, 19, 1),
+	MUX(CLK_TOP_MSDC1_SEL, "msdc1_sel", msdc1_parents,
+		0x000, 20, 3),
+	MUX(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents,
+		0x000, 24, 2),
+	MUX(CLK_TOP_QAXI_AUD26M_SEL, "qaxi_aud26m_sel", qaxi_aud26m_parents,
+		0x000, 26, 1),
+	MUX(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", aud_intbus_parents,
+		0x000, 27, 3),
+	/* CLK_MUX_SEL1 */
+	MUX(CLK_TOP_NFI2X_PAD_SEL, "nfi2x_pad_sel", nfi2x_pad_parents,
+		0x004, 0, 7),
+	MUX(CLK_TOP_NFI1X_PAD_SEL, "nfi1x_pad_sel", nfi1x_pad_parents,
+		0x004, 7, 1),
+	MUX(CLK_TOP_USB_78M_SEL, "usb_78m_sel", usb_78m_parents,
+		0x004, 20, 3),
+	/* CLK_MUX_SEL8 */
+	MUX(CLK_TOP_SPINOR_SEL, "spinor_sel", spinor_parents,
+		0x040, 0, 3),
+	MUX(CLK_TOP_MSDC2_SEL, "msdc2_sel", msdc2_parents,
+		0x040, 3, 3),
+	MUX(CLK_TOP_ETH_SEL, "eth_sel", eth_parents,
+		0x040, 6, 3),
+	MUX(CLK_TOP_AUD1_SEL, "aud1_sel", aud1_parents,
+		0x040, 22, 1),
+	MUX(CLK_TOP_AUD2_SEL, "aud2_sel", aud2_parents,
+		0x040, 23, 1),
+	MUX(CLK_TOP_AUD_ENGEN1_SEL, "aud_engen1_sel", aud_engen1_parents,
+		0x040, 24, 2),
+	MUX(CLK_TOP_AUD_ENGEN2_SEL, "aud_engen2_sel", aud_engen2_parents,
+		0x040, 26, 2),
+	MUX(CLK_TOP_I2C_SEL, "i2c_sel", i2c_parents,
+		0x040, 28, 2),
+	/* CLK_SEL_9 */
+	MUX(CLK_TOP_AUD_I2S0_M_SEL, "aud_i2s0_m_sel", aud_i2s0_m_parents,
+		0x044, 12, 1),
+	MUX(CLK_TOP_AUD_I2S1_M_SEL, "aud_i2s1_m_sel", aud_i2s0_m_parents,
+		0x044, 13, 1),
+	MUX(CLK_TOP_AUD_I2S2_M_SEL, "aud_i2s2_m_sel", aud_i2s0_m_parents,
+		0x044, 14, 1),
+	MUX(CLK_TOP_AUD_I2S3_M_SEL, "aud_i2s3_m_sel", aud_i2s0_m_parents,
+		0x044, 15, 1),
+	MUX(CLK_TOP_AUD_I2S4_M_SEL, "aud_i2s4_m_sel", aud_i2s0_m_parents,
+		0x044, 16, 1),
+	MUX(CLK_TOP_AUD_I2S5_M_SEL, "aud_i2s5_m_sel", aud_i2s0_m_parents,
+		0x044, 17, 1),
+	MUX(CLK_TOP_AUD_SPDIF_B_SEL, "aud_spdif_b_sel", aud_i2s0_m_parents,
+		0x044, 18, 1),
+	/* CLK_MUX_SEL13 */
+	MUX(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents,
+		0x07c, 0, 1),
+	MUX(CLK_TOP_SPI_SEL, "spi_sel", spi_parents,
+		0x07c, 1, 2),
+	MUX(CLK_TOP_AUD_SPDIFIN_SEL, "aud_spdifin_sel", aud_spdifin_parents,
+		0x07c, 3, 1),
+	MUX(CLK_TOP_UART2_SEL, "uart2_sel", uart2_parents,
+		0x07c, 4, 1),
+	MUX(CLK_TOP_BSI_SEL, "bsi_sel", bsi_parents,
+		0x07c, 5, 2),
+	MUX(CLK_TOP_DBG_ATCLK_SEL, "dbg_atclk_sel", dbg_atclk_parents,
+		0x07c, 7, 3),
+	MUX(CLK_TOP_CSW_NFIECC_SEL, "csw_nfiecc_sel", csw_nfiecc_parents,
+		0x07c, 10, 3),
+	MUX(CLK_TOP_NFIECC_SEL, "nfiecc_sel", nfiecc_parents,
+		0x07c, 13, 3),
+};
+
+static const char * const ifr_mux1_parents[] __initconst = {
+	"clk26m_ck",
+	"armpll",
+	"univpll",
+	"mainpll_d2"
+};
+
+static const char * const ifr_eth_25m_parents[] __initconst = {
+	"eth_d2_ck",
+	"rg_eth"
+};
+
+static const char * const ifr_i2c0_parents[] __initconst = {
+	"ahb_infra_d2",
+	"rg_i2c"
+};
+
+static const struct mtk_composite ifr_muxes[] __initconst = {
+	MUX(CLK_IFR_MUX1_SEL, "ifr_mux1_sel", ifr_mux1_parents, 0x000,
+		2, 2),
+	MUX(CLK_IFR_ETH_25M_SEL, "ifr_eth_25m_sel", ifr_eth_25m_parents, 0x080,
+		0, 1),
+	MUX(CLK_IFR_I2C0_SEL, "ifr_i2c0_sel", ifr_i2c0_parents, 0x080,
+		1, 1),
+	MUX(CLK_IFR_I2C1_SEL, "ifr_i2c1_sel", ifr_i2c0_parents, 0x080,
+		2, 1),
+	MUX(CLK_IFR_I2C2_SEL, "ifr_i2c2_sel", ifr_i2c0_parents, 0x080,
+		3, 1),
+};
+
+#define DIV_ADJ(_id, _name, _parent, _reg, _shift, _width) {	\
+		.id = _id,					\
+		.name = _name,					\
+		.parent_name = _parent,				\
+		.div_reg = _reg,				\
+		.div_shift = _shift,				\
+		.div_width = _width,				\
+}
+
+static const struct mtk_clk_divider top_adj_divs[] = {
+	DIV_ADJ(CLK_TOP_APLL12_CK_DIV0, "apll12_ck_div0", "aud_i2s0_m_sel",
+		0x0048, 0, 8),
+	DIV_ADJ(CLK_TOP_APLL12_CK_DIV1, "apll12_ck_div1", "aud_i2s1_m_sel",
+		0x0048, 8, 8),
+	DIV_ADJ(CLK_TOP_APLL12_CK_DIV2, "apll12_ck_div2", "aud_i2s2_m_sel",
+		0x0048, 16, 8),
+	DIV_ADJ(CLK_TOP_APLL12_CK_DIV3, "apll12_ck_div3", "aud_i2s3_m_sel",
+		0x0048, 24, 8),
+	DIV_ADJ(CLK_TOP_APLL12_CK_DIV4, "apll12_ck_div4", "aud_i2s4_m_sel",
+		0x004c, 0, 8),
+	DIV_ADJ(CLK_TOP_APLL12_CK_DIV4B, "apll12_ck_div4b", "apll12_div4",
+		0x004c, 8, 8),
+	DIV_ADJ(CLK_TOP_APLL12_CK_DIV5, "apll12_ck_div5", "aud_i2s5_m_sel",
+		0x004c, 16, 8),
+	DIV_ADJ(CLK_TOP_APLL12_CK_DIV5B, "apll12_ck_div5b", "apll12_div5",
+		0x004c, 24, 8),
+	DIV_ADJ(CLK_TOP_APLL12_CK_DIV6, "apll12_ck_div6", "aud_spdif_b_sel",
+		0x0078, 0, 8),
+};
+
+static const struct mtk_gate_regs top1_cg_regs = {
+	.set_ofs = 0x54,
+	.clr_ofs = 0x84,
+	.sta_ofs = 0x24,
+};
+
+static const struct mtk_gate_regs top2_cg_regs = {
+	.set_ofs = 0x6c,
+	.clr_ofs = 0x9c,
+	.sta_ofs = 0x3c,
+};
+
+static const struct mtk_gate_regs top3_cg_regs = {
+	.set_ofs = 0xa0,
+	.clr_ofs = 0xb0,
+	.sta_ofs = 0x70,
+};
+
+static const struct mtk_gate_regs top4_cg_regs = {
+	.set_ofs = 0xa4,
+	.clr_ofs = 0xb4,
+	.sta_ofs = 0x74,
+};
+
+static const struct mtk_gate_regs top5_cg_regs = {
+	.set_ofs = 0x44,
+	.clr_ofs = 0x44,
+	.sta_ofs = 0x44,
+};
+
+#define GATE_TOP1(_id, _name, _parent, _shift) {	\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &top1_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_setclr,	\
+	}
+
+#define GATE_TOP2(_id, _name, _parent, _shift) {	\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &top2_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_setclr,	\
+	}
+
+#define GATE_TOP2_I(_id, _name, _parent, _shift) {	\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &top2_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+	}
+
+#define GATE_TOP3(_id, _name, _parent, _shift) {	\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &top3_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_setclr,	\
+	}
+
+#define GATE_TOP4_I(_id, _name, _parent, _shift) {	\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &top4_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+	}
+
+#define GATE_TOP5(_id, _name, _parent, _shift) {	\
+		.id = _id,				\
+		.name = _name,				\
+		.parent_name = _parent,			\
+		.regs = &top5_cg_regs,			\
+		.shift = _shift,			\
+		.ops = &mtk_clk_gate_ops_no_setclr,	\
+	}
+
+static const struct mtk_gate top_clks[] __initconst = {
+	/* TOP1 */
+	GATE_TOP1(CLK_TOP_THEM, "them", "ahb_infra_sel", 1),
+	GATE_TOP1(CLK_TOP_APDMA, "apdma", "ahb_infra_sel", 2),
+	GATE_TOP1(CLK_TOP_I2C0, "i2c0", "ifr_i2c0_sel", 3),
+	GATE_TOP1(CLK_TOP_I2C1, "i2c1", "ifr_i2c1_sel", 4),
+	GATE_TOP1(CLK_TOP_AUXADC1, "auxadc1", "ahb_infra_sel", 5),
+	GATE_TOP1(CLK_TOP_NFI, "nfi", "nfi1x_pad_sel", 6),
+	GATE_TOP1(CLK_TOP_NFIECC, "nfiecc", "rg_nfiecc", 7),
+	GATE_TOP1(CLK_TOP_DEBUGSYS, "debugsys", "rg_dbg_atclk", 8),
+	GATE_TOP1(CLK_TOP_PWM, "pwm", "ahb_infra_sel", 9),
+	GATE_TOP1(CLK_TOP_UART0, "uart0", "uart0_sel", 10),
+	GATE_TOP1(CLK_TOP_UART1, "uart1", "uart1_sel", 11),
+	GATE_TOP1(CLK_TOP_BTIF, "btif", "ahb_infra_sel", 12),
+	GATE_TOP1(CLK_TOP_USB, "usb", "usb_78m", 13),
+	GATE_TOP1(CLK_TOP_FLASHIF_26M, "flashif_26m", "clk26m_ck", 14),
+	GATE_TOP1(CLK_TOP_AUXADC2, "auxadc2", "ahb_infra_sel", 15),
+	GATE_TOP1(CLK_TOP_I2C2, "i2c2", "ifr_i2c2_sel", 16),
+	GATE_TOP1(CLK_TOP_MSDC0, "msdc0", "msdc0_sel", 17),
+	GATE_TOP1(CLK_TOP_MSDC1, "msdc1", "msdc1_sel", 18),
+	GATE_TOP1(CLK_TOP_NFI2X, "nfi2x", "nfi2x_pad_sel", 19),
+	GATE_TOP1(CLK_TOP_PMICWRAP_AP, "pwrap_ap", "clk26m_ck", 20),
+	GATE_TOP1(CLK_TOP_SEJ, "sej", "ahb_infra_sel", 21),
+	GATE_TOP1(CLK_TOP_MEMSLP_DLYER, "memslp_dlyer", "clk26m_ck", 22),
+	GATE_TOP1(CLK_TOP_SPI, "spi", "spi_sel", 23),
+	GATE_TOP1(CLK_TOP_APXGPT, "apxgpt", "clk26m_ck", 24),
+	GATE_TOP1(CLK_TOP_AUDIO, "audio", "clk26m_ck", 25),
+	GATE_TOP1(CLK_TOP_PMICWRAP_MD, "pwrap_md", "clk26m_ck", 27),
+	GATE_TOP1(CLK_TOP_PMICWRAP_CONN, "pwrap_conn", "clk26m_ck", 28),
+	GATE_TOP1(CLK_TOP_PMICWRAP_26M, "pwrap_26m", "clk26m_ck", 29),
+	GATE_TOP1(CLK_TOP_AUX_ADC, "aux_adc", "clk26m_ck", 30),
+	GATE_TOP1(CLK_TOP_AUX_TP, "aux_tp", "clk26m_ck", 31),
+	/* TOP2 */
+	GATE_TOP2(CLK_TOP_MSDC2, "msdc2", "ahb_infra_sel", 0),
+	GATE_TOP2(CLK_TOP_RBIST, "rbist", "univpll_d12", 1),
+	GATE_TOP2(CLK_TOP_NFI_BUS, "nfi_bus", "ahb_infra_sel", 2),
+	GATE_TOP2(CLK_TOP_GCE, "gce", "ahb_infra_sel", 4),
+	GATE_TOP2(CLK_TOP_TRNG, "trng", "ahb_infra_sel", 5),
+	GATE_TOP2(CLK_TOP_SEJ_13M, "sej_13m", "clk26m_ck", 6),
+	GATE_TOP2(CLK_TOP_AES, "aes", "ahb_infra_sel", 7),
+	GATE_TOP2(CLK_TOP_PWM_B, "pwm_b", "rg_pwm_infra", 8),
+	GATE_TOP2(CLK_TOP_PWM1_FB, "pwm1_fb", "rg_pwm_infra", 9),
+	GATE_TOP2(CLK_TOP_PWM2_FB, "pwm2_fb", "rg_pwm_infra", 10),
+	GATE_TOP2(CLK_TOP_PWM3_FB, "pwm3_fb", "rg_pwm_infra", 11),
+	GATE_TOP2(CLK_TOP_PWM4_FB, "pwm4_fb", "rg_pwm_infra", 12),
+	GATE_TOP2(CLK_TOP_PWM5_FB, "pwm5_fb", "rg_pwm_infra", 13),
+	GATE_TOP2(CLK_TOP_USB_1P, "usb_1p", "usb_78m", 14),
+	GATE_TOP2(CLK_TOP_FLASHIF_FREERUN, "flashif_freerun", "ahb_infra_sel",
+		15),
+	GATE_TOP2(CLK_TOP_66M_ETH, "eth_66m", "ahb_infra_d2", 19),
+	GATE_TOP2(CLK_TOP_133M_ETH, "eth_133m", "ahb_infra_sel", 20),
+	GATE_TOP2(CLK_TOP_FETH_25M, "feth_25m", "ifr_eth_25m_sel", 21),
+	GATE_TOP2(CLK_TOP_FETH_50M, "feth_50m", "rg_eth", 22),
+	GATE_TOP2(CLK_TOP_FLASHIF_AXI, "flashif_axi", "ahb_infra_sel", 23),
+	GATE_TOP2(CLK_TOP_USBIF, "usbif", "ahb_infra_sel", 24),
+	GATE_TOP2(CLK_TOP_UART2, "uart2", "rg_uart2", 25),
+	GATE_TOP2(CLK_TOP_BSI, "bsi", "ahb_infra_sel", 26),
+	GATE_TOP2_I(CLK_TOP_MSDC0_INFRA, "msdc0_infra", "msdc0", 28),
+	GATE_TOP2_I(CLK_TOP_MSDC1_INFRA, "msdc1_infra", "msdc1", 29),
+	GATE_TOP2_I(CLK_TOP_MSDC2_INFRA, "msdc2_infra", "rg_msdc2", 30),
+	GATE_TOP2(CLK_TOP_USB_78M, "usb_78m", "usb_78m_sel", 31),
+	/* TOP3 */
+	GATE_TOP3(CLK_TOP_RG_SPINOR, "rg_spinor", "spinor_sel", 0),
+	GATE_TOP3(CLK_TOP_RG_MSDC2, "rg_msdc2", "msdc2_sel", 1),
+	GATE_TOP3(CLK_TOP_RG_ETH, "rg_eth", "eth_sel", 2),
+	GATE_TOP3(CLK_TOP_RG_AUD1, "rg_aud1", "aud1_sel", 8),
+	GATE_TOP3(CLK_TOP_RG_AUD2, "rg_aud2", "aud2_sel", 9),
+	GATE_TOP3(CLK_TOP_RG_AUD_ENGEN1, "rg_aud_engen1", "aud_engen1_sel", 10),
+	GATE_TOP3(CLK_TOP_RG_AUD_ENGEN2, "rg_aud_engen2", "aud_engen2_sel", 11),
+	GATE_TOP3(CLK_TOP_RG_I2C, "rg_i2c", "i2c_sel", 12),
+	GATE_TOP3(CLK_TOP_RG_PWM_INFRA, "rg_pwm_infra", "pwm_sel", 13),
+	GATE_TOP3(CLK_TOP_RG_AUD_SPDIF_IN, "rg_aud_spdif_in", "aud_spdifin_sel",
+		14),
+	GATE_TOP3(CLK_TOP_RG_UART2, "rg_uart2", "uart2_sel", 15),
+	GATE_TOP3(CLK_TOP_RG_BSI, "rg_bsi", "bsi_sel", 16),
+	GATE_TOP3(CLK_TOP_RG_DBG_ATCLK, "rg_dbg_atclk", "dbg_atclk_sel", 17),
+	GATE_TOP3(CLK_TOP_RG_NFIECC, "rg_nfiecc", "nfiecc_sel", 18),
+	/* TOP4 */
+	GATE_TOP4_I(CLK_TOP_RG_APLL1_D2_EN, "rg_apll1_d2_en", "apll1_d2", 8),
+	GATE_TOP4_I(CLK_TOP_RG_APLL1_D4_EN, "rg_apll1_d4_en", "apll1_d4", 9),
+	GATE_TOP4_I(CLK_TOP_RG_APLL1_D8_EN, "rg_apll1_d8_en", "apll1_d8", 10),
+	GATE_TOP4_I(CLK_TOP_RG_APLL2_D2_EN, "rg_apll2_d2_en", "apll2_d2", 11),
+	GATE_TOP4_I(CLK_TOP_RG_APLL2_D4_EN, "rg_apll2_d4_en", "apll2_d4", 12),
+	GATE_TOP4_I(CLK_TOP_RG_APLL2_D8_EN, "rg_apll2_d8_en", "apll2_d8", 13),
+	/* TOP5 */
+	GATE_TOP5(CLK_TOP_APLL12_DIV0, "apll12_div0", "apll12_ck_div0", 0),
+	GATE_TOP5(CLK_TOP_APLL12_DIV1, "apll12_div1", "apll12_ck_div1", 1),
+	GATE_TOP5(CLK_TOP_APLL12_DIV2, "apll12_div2", "apll12_ck_div2", 2),
+	GATE_TOP5(CLK_TOP_APLL12_DIV3, "apll12_div3", "apll12_ck_div3", 3),
+	GATE_TOP5(CLK_TOP_APLL12_DIV4, "apll12_div4", "apll12_ck_div4", 4),
+	GATE_TOP5(CLK_TOP_APLL12_DIV4B, "apll12_div4b", "apll12_ck_div4b", 5),
+	GATE_TOP5(CLK_TOP_APLL12_DIV5, "apll12_div5", "apll12_ck_div5", 6),
+	GATE_TOP5(CLK_TOP_APLL12_DIV5B, "apll12_div5b", "apll12_ck_div5b", 7),
+	GATE_TOP5(CLK_TOP_APLL12_DIV6, "apll12_div6", "apll12_ck_div6", 8),
+};
+
+static void __init mtk_topckgen_init(struct device_node *node)
+{
+	struct clk_onecell_data *clk_data;
+	int r;
+	void __iomem *base;
+
+	base = of_iomap(node, 0);
+	if (!base) {
+		pr_err("%s(): ioremap failed\n", __func__);
+		return;
+	}
+
+	clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+	mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks),
+				    clk_data);
+	mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), clk_data);
+
+	mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+	mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+		&mt8516_clk_lock, clk_data);
+	mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
+				base, &mt8516_clk_lock, clk_data);
+
+	r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+	if (r)
+		pr_err("%s(): could not register clock provider: %d\n",
+			__func__, r);
+}
+CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8516-topckgen", mtk_topckgen_init);
+
+static void __init mtk_infracfg_init(struct device_node *node)
+{
+	struct clk_onecell_data *clk_data;
+	int r;
+	void __iomem *base;
+
+	base = of_iomap(node, 0);
+	if (!base) {
+		pr_err("%s(): ioremap failed\n", __func__);
+		return;
+	}
+
+	clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
+
+	mtk_clk_register_composites(ifr_muxes, ARRAY_SIZE(ifr_muxes), base,
+		&mt8516_clk_lock, clk_data);
+
+	r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+	if (r)
+		pr_err("%s(): could not register clock provider: %d\n",
+			__func__, r);
+}
+CLK_OF_DECLARE(mtk_infracfg, "mediatek,mt8516-infracfg", mtk_infracfg_init);
+
+#define MT8516_PLL_FMAX		(1502UL * MHZ)
+
+#define CON0_MT8516_RST_BAR	BIT(27)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits,	\
+			_pd_reg, _pd_shift, _tuner_reg, _pcw_reg,	\
+			_pcw_shift, _div_table) {			\
+		.id = _id,						\
+		.name = _name,						\
+		.reg = _reg,						\
+		.pwr_reg = _pwr_reg,					\
+		.en_mask = _en_mask,					\
+		.flags = _flags,					\
+		.rst_bar_mask = CON0_MT8516_RST_BAR,			\
+		.fmax = MT8516_PLL_FMAX,				\
+		.pcwbits = _pcwbits,					\
+		.pd_reg = _pd_reg,					\
+		.pd_shift = _pd_shift,					\
+		.tuner_reg = _tuner_reg,				\
+		.pcw_reg = _pcw_reg,					\
+		.pcw_shift = _pcw_shift,				\
+		.div_table = _div_table,				\
+	}
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits,	\
+			_pd_reg, _pd_shift, _tuner_reg, _pcw_reg,	\
+			_pcw_shift)					\
+		PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+			_pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
+			NULL)
+
+static const struct mtk_pll_div_table mmpll_div_table[] = {
+	{ .div = 0, .freq = MT8516_PLL_FMAX },
+	{ .div = 1, .freq = 1000000000 },
+	{ .div = 2, .freq = 604500000 },
+	{ .div = 3, .freq = 253500000 },
+	{ .div = 4, .freq = 126750000 },
+	{ } /* sentinel */
+};
+
+static const struct mtk_pll_data plls[] = {
+	PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0100, 0x0110, 0x00000001, 0,
+		21, 0x0104, 24, 0, 0x0104, 0),
+	PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0120, 0x0130, 0x00000001,
+		HAVE_RST_BAR, 21, 0x0124, 24, 0, 0x0124, 0),
+	PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0140, 0x0150, 0x30000001,
+		HAVE_RST_BAR, 7, 0x0144, 24, 0, 0x0144, 0),
+	PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0160, 0x0170, 0x00000001, 0,
+		21, 0x0164, 24, 0, 0x0164, 0, mmpll_div_table),
+	PLL(CLK_APMIXED_APLL1, "apll1", 0x0180, 0x0190, 0x00000001, 0,
+		31, 0x0180, 1, 0x0194, 0x0184, 0),
+	PLL(CLK_APMIXED_APLL2, "apll2", 0x01A0, 0x01B0, 0x00000001, 0,
+		31, 0x01A0, 1, 0x01B4, 0x01A4, 0),
+};
+
+static void __init mtk_apmixedsys_init(struct device_node *node)
+{
+	struct clk_onecell_data *clk_data;
+	void __iomem *base;
+	int r;
+
+	base = of_iomap(node, 0);
+	if (!base) {
+		pr_err("%s(): ioremap failed\n", __func__);
+		return;
+	}
+
+	clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+
+	mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+	r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+	if (r)
+		pr_err("%s(): could not register clock provider: %d\n",
+			__func__, r);
+
+}
+CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8516-apmixedsys",
+		mtk_apmixedsys_init);
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index 9c0ae42..ef41041 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -130,7 +130,7 @@
 				gate->regs->set_ofs,
 				gate->regs->clr_ofs,
 				gate->regs->sta_ofs,
-				gate->shift, gate->ops);
+				gate->shift, gate->ops, gate->flags);
 
 		if (IS_ERR(clk)) {
 			pr_err("Failed to register clk %s: %ld\n",
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index f83c2bb..f8a9746 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -158,6 +158,7 @@
 	const struct mtk_gate_regs *regs;
 	int shift;
 	const struct clk_ops *ops;
+	unsigned long flags;
 };
 
 int mtk_clk_register_gates(struct device_node *node,
@@ -214,10 +215,13 @@
 	unsigned int flags;
 	const struct clk_ops *ops;
 	u32 rst_bar_mask;
+	unsigned long fmin;
 	unsigned long fmax;
 	int pcwbits;
+	int pcwibits;
 	uint32_t pcw_reg;
 	int pcw_shift;
+	uint32_t pcw_chg_reg;
 	const struct mtk_pll_div_table *div_table;
 	const char *parent_name;
 };
@@ -232,4 +236,7 @@
 void mtk_register_reset_controller(struct device_node *np,
 			unsigned int num_regs, int regofs);
 
+void mtk_register_reset_controller_set_clr(struct device_node *np,
+	unsigned int num_regs, int regofs);
+
 #endif /* __DRV_CLK_MTK_H */
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
new file mode 100644
index 0000000..76f9cd0
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+static inline struct mtk_clk_mux *to_mtk_clk_mux(struct clk_hw *hw)
+{
+	return container_of(hw, struct mtk_clk_mux, hw);
+}
+
+static int mtk_clk_mux_enable(struct clk_hw *hw)
+{
+	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+	u32 mask = BIT(mux->data->gate_shift);
+
+	return regmap_update_bits(mux->regmap, mux->data->mux_ofs,
+			mask, ~mask);
+}
+
+static void mtk_clk_mux_disable(struct clk_hw *hw)
+{
+	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+	u32 mask = BIT(mux->data->gate_shift);
+
+	regmap_update_bits(mux->regmap, mux->data->mux_ofs, mask, mask);
+}
+
+static int mtk_clk_mux_enable_setclr(struct clk_hw *hw)
+{
+	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+
+	return regmap_write(mux->regmap, mux->data->clr_ofs,
+			BIT(mux->data->gate_shift));
+}
+
+static void mtk_clk_mux_disable_setclr(struct clk_hw *hw)
+{
+	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+
+	regmap_write(mux->regmap, mux->data->set_ofs,
+			BIT(mux->data->gate_shift));
+}
+
+static int mtk_clk_mux_is_enabled(struct clk_hw *hw)
+{
+	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+	u32 val;
+
+	regmap_read(mux->regmap, mux->data->mux_ofs, &val);
+
+	return (val & BIT(mux->data->gate_shift)) == 0;
+}
+
+static u8 mtk_clk_mux_get_parent(struct clk_hw *hw)
+{
+	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+	u32 mask = GENMASK(mux->data->mux_width - 1, 0);
+	u32 val;
+
+	regmap_read(mux->regmap, mux->data->mux_ofs, &val);
+	val = (val >> mux->data->mux_shift) & mask;
+
+	return val;
+}
+
+static int mtk_clk_mux_set_parent_lock(struct clk_hw *hw, u8 index)
+{
+	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+	u32 mask = GENMASK(mux->data->mux_width - 1, 0);
+	unsigned long flags = 0;
+
+	if (mux->lock)
+		spin_lock_irqsave(mux->lock, flags);
+	else
+		__acquire(mux->lock);
+
+	regmap_update_bits(mux->regmap, mux->data->mux_ofs, mask,
+		index << mux->data->mux_shift);
+
+	if (mux->lock)
+		spin_unlock_irqrestore(mux->lock, flags);
+	else
+		__release(mux->lock);
+
+	return 0;
+}
+
+static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
+{
+	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+	u32 mask = GENMASK(mux->data->mux_width - 1, 0);
+	u32 val, orig;
+	unsigned long flags = 0;
+
+	if (mux->lock)
+		spin_lock_irqsave(mux->lock, flags);
+	else
+		__acquire(mux->lock);
+
+	regmap_read(mux->regmap, mux->data->mux_ofs, &orig);
+	val = (orig & ~(mask << mux->data->mux_shift))
+			| (index << mux->data->mux_shift);
+
+	if (val != orig) {
+		regmap_write(mux->regmap, mux->data->clr_ofs,
+				mask << mux->data->mux_shift);
+		regmap_write(mux->regmap, mux->data->set_ofs,
+				index << mux->data->mux_shift);
+
+		if (mux->data->upd_shift >= 0)
+			regmap_write(mux->regmap, mux->data->upd_ofs,
+					BIT(mux->data->upd_shift));
+	}
+
+	if (mux->lock)
+		spin_unlock_irqrestore(mux->lock, flags);
+	else
+		__release(mux->lock);
+
+	return 0;
+}
+
+const struct clk_ops mtk_mux_ops = {
+	.get_parent = mtk_clk_mux_get_parent,
+	.set_parent = mtk_clk_mux_set_parent_lock,
+};
+
+const struct clk_ops mtk_mux_clr_set_upd_ops = {
+	.get_parent = mtk_clk_mux_get_parent,
+	.set_parent = mtk_clk_mux_set_parent_setclr_lock,
+};
+
+const struct clk_ops mtk_mux_gate_ops = {
+	.enable = mtk_clk_mux_enable,
+	.disable = mtk_clk_mux_disable,
+	.is_enabled = mtk_clk_mux_is_enabled,
+	.get_parent = mtk_clk_mux_get_parent,
+	.set_parent = mtk_clk_mux_set_parent_lock,
+};
+
+const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
+	.enable = mtk_clk_mux_enable_setclr,
+	.disable = mtk_clk_mux_disable_setclr,
+	.is_enabled = mtk_clk_mux_is_enabled,
+	.get_parent = mtk_clk_mux_get_parent,
+	.set_parent = mtk_clk_mux_set_parent_setclr_lock,
+};
+
+struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
+				 struct regmap *regmap,
+				 spinlock_t *lock)
+{
+	struct mtk_clk_mux *clk_mux;
+	struct clk_init_data init;
+	struct clk *clk;
+
+	clk_mux = kzalloc(sizeof(*clk_mux), GFP_KERNEL);
+	if (!clk_mux)
+		return ERR_PTR(-ENOMEM);
+
+	init.name = mux->name;
+	init.flags = mux->flags | CLK_SET_RATE_PARENT;
+	init.parent_names = mux->parent_names;
+	init.num_parents = mux->num_parents;
+	init.ops = mux->ops;
+
+	clk_mux->regmap = regmap;
+	clk_mux->data = mux;
+	clk_mux->lock = lock;
+	clk_mux->hw.init = &init;
+
+	clk = clk_register(NULL, &clk_mux->hw);
+	if (IS_ERR(clk)) {
+		kfree(clk_mux);
+		return clk;
+	}
+
+	return clk;
+}
+
+int mtk_clk_register_muxes(const struct mtk_mux *muxes,
+			   int num, struct device_node *node,
+			   spinlock_t *lock,
+			   struct clk_onecell_data *clk_data)
+{
+	struct regmap *regmap;
+	struct clk *clk;
+	int i;
+
+	regmap = syscon_node_to_regmap(node);
+	if (IS_ERR(regmap)) {
+		pr_err("Cannot find regmap for %pOF: %ld\n", node,
+		       PTR_ERR(regmap));
+		return PTR_ERR(regmap);
+	}
+
+	for (i = 0; i < num; i++) {
+		const struct mtk_mux *mux = &muxes[i];
+
+		if (IS_ERR_OR_NULL(clk_data->clks[mux->id])) {
+			clk = mtk_clk_register_mux(mux, regmap, lock);
+
+			if (IS_ERR(clk)) {
+				pr_err("Failed to register clk %s: %ld\n",
+				       mux->name, PTR_ERR(clk));
+				continue;
+			}
+
+			clk_data->clks[mux->id] = clk;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
new file mode 100644
index 0000000..f5625f4
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mux.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#ifndef __DRV_CLK_MTK_MUX_H
+#define __DRV_CLK_MTK_MUX_H
+
+#include <linux/clk-provider.h>
+
+struct mtk_clk_mux {
+	struct clk_hw hw;
+	struct regmap *regmap;
+	const struct mtk_mux *data;
+	spinlock_t *lock;
+};
+
+struct mtk_mux {
+	int id;
+	const char *name;
+	const char * const *parent_names;
+	unsigned int flags;
+
+	u32 mux_ofs;
+	u32 set_ofs;
+	u32 clr_ofs;
+	u32 upd_ofs;
+
+	u8 mux_shift;
+	u8 mux_width;
+	u8 gate_shift;
+	s8 upd_shift;
+
+	const struct clk_ops *ops;
+
+	signed char num_parents;
+};
+
+extern const struct clk_ops mtk_mux_ops;
+extern const struct clk_ops mtk_mux_clr_set_upd_ops;
+extern const struct clk_ops mtk_mux_gate_ops;
+extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
+
+#define GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs,		\
+			_mux_set_ofs, _mux_clr_ofs, _shift, _width,	\
+			_gate, _upd_ofs, _upd, _flags, _ops) {		\
+		.id = _id,						\
+		.name = _name,						\
+		.mux_ofs = _mux_ofs,					\
+		.set_ofs = _mux_set_ofs,				\
+		.clr_ofs = _mux_clr_ofs,				\
+		.upd_ofs = _upd_ofs,					\
+		.mux_shift = _shift,					\
+		.mux_width = _width,					\
+		.gate_shift = _gate,					\
+		.upd_shift = _upd,					\
+		.parent_names = _parents,				\
+		.num_parents = ARRAY_SIZE(_parents),			\
+		.flags = _flags,					\
+		.ops = &_ops,						\
+	}
+
+#define MUX_GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs,	\
+			_mux_set_ofs, _mux_clr_ofs, _shift, _width,	\
+			_gate, _upd_ofs, _upd, _flags)			\
+		GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs,	\
+			_mux_set_ofs, _mux_clr_ofs, _shift, _width,	\
+			_gate, _upd_ofs, _upd, _flags,			\
+			mtk_mux_gate_clr_set_upd_ops)
+
+#define MUX_GATE_CLR_SET_UPD(_id, _name, _parents, _mux_ofs,		\
+			_mux_set_ofs, _mux_clr_ofs, _shift, _width,	\
+			_gate, _upd_ofs, _upd)				\
+		MUX_GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents,	\
+			_mux_ofs, _mux_set_ofs, _mux_clr_ofs, _shift,	\
+			_width, _gate, _upd_ofs, _upd,			\
+			CLK_SET_RATE_PARENT)
+
+struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
+				 struct regmap *regmap,
+				 spinlock_t *lock);
+
+int mtk_clk_register_muxes(const struct mtk_mux *muxes,
+			   int num, struct device_node *node,
+			   spinlock_t *lock,
+			   struct clk_onecell_data *clk_data);
+
+#endif /* __DRV_CLK_MTK_MUX_H */
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index 18842d6..8d556fc 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -27,11 +27,13 @@
 #define CON0_BASE_EN		BIT(0)
 #define CON0_PWR_ON		BIT(0)
 #define CON0_ISO_EN		BIT(1)
-#define CON0_PCW_CHG		BIT(31)
+#define PCW_CHG_MASK		BIT(31)
 
 #define AUDPLL_TUNER_EN		BIT(31)
 
 #define POSTDIV_MASK		0x7
+
+/* default 7 bits integer, can be overridden with pcwibits. */
 #define INTEGER_BITS		7
 
 /*
@@ -49,6 +51,7 @@
 	void __iomem	*tuner_addr;
 	void __iomem	*tuner_en_addr;
 	void __iomem	*pcw_addr;
+	void __iomem	*pcw_chg_addr;
 	const struct mtk_pll_data *data;
 };
 
@@ -68,12 +71,15 @@
 		u32 pcw, int postdiv)
 {
 	int pcwbits = pll->data->pcwbits;
-	int pcwfbits;
+	int pcwfbits = 0;
+	int ibits;
 	u64 vco;
 	u8 c = 0;
 
 	/* The fractional part of the PLL divider. */
-	pcwfbits = pcwbits > INTEGER_BITS ? pcwbits - INTEGER_BITS : 0;
+	ibits = pll->data->pcwibits ? pll->data->pcwibits : INTEGER_BITS;
+	if (pcwbits > ibits)
+		pcwfbits = pcwbits - ibits;
 
 	vco = (u64)fin * pcw;
 
@@ -117,10 +123,7 @@
 static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
 		int postdiv)
 {
-	u32 con1, val;
-	int pll_en;
-
-	pll_en = readl(pll->base_addr + REG_CON0) & CON0_BASE_EN;
+	u32 chg, val;
 
 	/* disable tuner */
 	__mtk_pll_tuner_disable(pll);
@@ -141,21 +144,15 @@
 			pll->data->pcw_shift);
 	val |= pcw << pll->data->pcw_shift;
 	writel(val, pll->pcw_addr);
-
-	con1 = readl(pll->base_addr + REG_CON1);
-
-	if (pll_en)
-		con1 |= CON0_PCW_CHG;
-
-	writel(con1, pll->base_addr + REG_CON1);
+	chg = readl(pll->pcw_chg_addr) | PCW_CHG_MASK;
+	writel(chg, pll->pcw_chg_addr);
 	if (pll->tuner_addr)
-		writel(con1 + 1, pll->tuner_addr);
+		writel(val + 1, pll->tuner_addr);
 
 	/* restore tuner_en */
 	__mtk_pll_tuner_enable(pll);
 
-	if (pll_en)
-		udelay(20);
+	udelay(20);
 }
 
 /*
@@ -170,9 +167,10 @@
 static void mtk_pll_calc_values(struct mtk_clk_pll *pll, u32 *pcw, u32 *postdiv,
 		u32 freq, u32 fin)
 {
-	unsigned long fmin = 1000 * MHZ;
+	unsigned long fmin = pll->data->fmin ? pll->data->fmin : (1000 * MHZ);
 	const struct mtk_pll_div_table *div_table = pll->data->div_table;
 	u64 _pcw;
+	int ibits;
 	u32 val;
 
 	if (freq > pll->data->fmax)
@@ -196,7 +194,8 @@
 	}
 
 	/* _pcw = freq * postdiv / fin * 2^pcwfbits */
-	_pcw = ((u64)freq << val) << (pll->data->pcwbits - INTEGER_BITS);
+	ibits = pll->data->pcwibits ? pll->data->pcwibits : INTEGER_BITS;
+	_pcw = ((u64)freq << val) << (pll->data->pcwbits - ibits);
 	do_div(_pcw, fin);
 
 	*pcw = (u32)_pcw;
@@ -322,6 +321,10 @@
 	pll->pwr_addr = base + data->pwr_reg;
 	pll->pd_addr = base + data->pd_reg;
 	pll->pcw_addr = base + data->pcw_reg;
+	if (data->pcw_chg_reg)
+		pll->pcw_chg_addr = base + data->pcw_chg_reg;
+	else
+		pll->pcw_chg_addr = pll->base_addr + REG_CON1;
 	if (data->tuner_reg)
 		pll->tuner_addr = base + data->tuner_reg;
 	if (data->tuner_en_reg)
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index d3551d5..6061ff8 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -27,6 +27,24 @@
 	struct reset_controller_dev rcdev;
 };
 
+static int mtk_reset_assert_set_clr(struct reset_controller_dev *rcdev,
+	unsigned long id)
+{
+	struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
+	unsigned int reg = data->regofs + ((id / 32) << 4);
+
+	return regmap_write(data->regmap, reg, 1);
+}
+
+static int mtk_reset_deassert_set_clr(struct reset_controller_dev *rcdev,
+	unsigned long id)
+{
+	struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
+	unsigned int reg = data->regofs + ((id / 32) << 4) + 0x4;
+
+	return regmap_write(data->regmap, reg, 1);
+}
+
 static int mtk_reset_assert(struct reset_controller_dev *rcdev,
 			      unsigned long id)
 {
@@ -57,14 +75,32 @@
 	return mtk_reset_deassert(rcdev, id);
 }
 
+static int mtk_reset_set_clr(struct reset_controller_dev *rcdev,
+	unsigned long id)
+{
+	int ret;
+
+	ret = mtk_reset_assert_set_clr(rcdev, id);
+	if (ret)
+		return ret;
+	return mtk_reset_deassert_set_clr(rcdev, id);
+}
+
 static const struct reset_control_ops mtk_reset_ops = {
 	.assert = mtk_reset_assert,
 	.deassert = mtk_reset_deassert,
 	.reset = mtk_reset,
 };
 
-void mtk_register_reset_controller(struct device_node *np,
-			unsigned int num_regs, int regofs)
+static const struct reset_control_ops mtk_reset_ops_set_clr = {
+	.assert = mtk_reset_assert_set_clr,
+	.deassert = mtk_reset_deassert_set_clr,
+	.reset = mtk_reset_set_clr,
+};
+
+static void mtk_register_reset_controller_common(struct device_node *np,
+			unsigned int num_regs, int regofs,
+			const struct reset_control_ops *reset_ops)
 {
 	struct mtk_reset *data;
 	int ret;
@@ -85,7 +121,7 @@
 	data->regofs = regofs;
 	data->rcdev.owner = THIS_MODULE;
 	data->rcdev.nr_resets = num_regs * 32;
-	data->rcdev.ops = &mtk_reset_ops;
+	data->rcdev.ops = reset_ops;
 	data->rcdev.of_node = np;
 
 	ret = reset_controller_register(&data->rcdev);
@@ -95,3 +131,17 @@
 		return;
 	}
 }
+
+void mtk_register_reset_controller(struct device_node *np,
+	unsigned int num_regs, int regofs)
+{
+	mtk_register_reset_controller_common(np, num_regs, regofs,
+		&mtk_reset_ops);
+}
+
+void mtk_register_reset_controller_set_clr(struct device_node *np,
+	unsigned int num_regs, int regofs)
+{
+	mtk_register_reset_controller_common(np, num_regs, regofs,
+		&mtk_reset_ops_set_clr);
+}
diff --git a/drivers/clocksource/timer-mediatek.c b/drivers/clocksource/timer-mediatek.c
index 8e7894a..5ec1046 100644
--- a/drivers/clocksource/timer-mediatek.c
+++ b/drivers/clocksource/timer-mediatek.c
@@ -18,6 +18,7 @@
 
 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
 
+#include <linux/clk.h>
 #include <linux/clockchips.h>
 #include <linux/clocksource.h>
 #include <linux/interrupt.h>
@@ -287,6 +288,7 @@
 
 static int __init mtk_gpt_init(struct device_node *node)
 {
+	struct clk *clk_bus;
 	int ret;
 
 	to.clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
@@ -309,6 +311,10 @@
 	gpt_sched_reg = timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC);
 	sched_clock_register(mtk_gpt_read_sched_clock, 32, timer_of_rate(&to));
 
+	clk_bus = of_clk_get_by_name(node, "bus");
+	if (!IS_ERR(clk_bus))
+		clk_prepare_enable(clk_bus);
+
 	/* Configure clock event */
 	mtk_gpt_setup(&to, TIMER_CLK_EVT, GPT_CTRL_OP_REPEAT);
 	clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index eb8920d..c893f312 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -548,9 +548,11 @@
 	{ .compatible = "mediatek,mt2712", },
 	{ .compatible = "mediatek,mt7622", },
 	{ .compatible = "mediatek,mt7623", },
+	{ .compatible = "mediatek,mt8167", },
 	{ .compatible = "mediatek,mt817x", },
 	{ .compatible = "mediatek,mt8173", },
 	{ .compatible = "mediatek,mt8176", },
+	{ .compatible = "mediatek,mt8516", },
 
 	{ }
 };
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 3f06934..242c337 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -176,7 +176,7 @@
 out_free_priv:
 	kfree(priv);
 out_free_opp:
-	dev_pm_opp_cpumask_remove_table(policy->cpus);
+	dev_pm_opp_remove_all_dynamic(cpu_dev);
 
 	return ret;
 }
@@ -188,7 +188,7 @@
 	cpufreq_cooling_unregister(priv->cdev);
 	dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
 	kfree(priv);
-	dev_pm_opp_cpumask_remove_table(policy->related_cpus);
+	dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
 
 	return 0;
 }
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 87a98ec..9944973 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -177,7 +177,7 @@
 out_free_priv:
 	kfree(priv);
 out_free_opp:
-	dev_pm_opp_cpumask_remove_table(policy->cpus);
+	dev_pm_opp_remove_all_dynamic(cpu_dev);
 
 	return ret;
 }
@@ -190,7 +190,7 @@
 	clk_put(priv->clk);
 	dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
 	kfree(priv);
-	dev_pm_opp_cpumask_remove_table(policy->related_cpus);
+	dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
 
 	return 0;
 }
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index e9ed439..66386b4 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -2,5 +2,5 @@
 # taken to initialize them in the correct order. Link order is the only way
 # to ensure this currently.
 obj-$(CONFIG_TEGRA_HOST1X)	+= host1x/
-obj-y			+= drm/ vga/
+obj-y			+= drm/ vga/ arm/
 obj-$(CONFIG_IMX_IPUV3_CORE)	+= ipu-v3/
diff --git a/drivers/gpu/arm/Kbuild b/drivers/gpu/arm/Kbuild
new file mode 100644
index 0000000..1a6fa3c9
--- /dev/null
+++ b/drivers/gpu/arm/Kbuild
@@ -0,0 +1,23 @@
+#
+# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+
+obj-$(CONFIG_MALI_MIDGARD) += midgard/
diff --git a/drivers/gpu/arm/Kconfig b/drivers/gpu/arm/Kconfig
new file mode 100644
index 0000000..693b86f
--- /dev/null
+++ b/drivers/gpu/arm/Kconfig
@@ -0,0 +1,25 @@
+#
+# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+
+menu "ARM GPU Configuration"
+source "drivers/gpu/arm/midgard/Kconfig"
+endmenu
diff --git a/drivers/gpu/arm/midgard/Kbuild b/drivers/gpu/arm/midgard/Kbuild
new file mode 100644
index 0000000..4d0b557
--- /dev/null
+++ b/drivers/gpu/arm/midgard/Kbuild
@@ -0,0 +1,172 @@
+#
+# (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+
+# Driver version string which is returned to userspace via an ioctl
+MALI_RELEASE_NAME ?= "r20p0-01rel0"
+
+# Paths required for build
+KBASE_PATH = $(src)
+KBASE_PLATFORM_PATH = $(KBASE_PATH)/platform_dummy
+UMP_PATH = $(src)/../../../base
+
+# Set up defaults if not defined by build system
+MALI_CUSTOMER_RELEASE ?= 1
+MALI_USE_CSF ?= 0
+MALI_UNIT_TEST ?= 0
+MALI_KERNEL_TEST_API ?= 0
+MALI_COVERAGE ?= 0
+CONFIG_MALI_PLATFORM_NAME ?= "devicetree"
+
+# Set up our defines, which will be passed to gcc
+DEFINES = \
+	-DMALI_CUSTOMER_RELEASE=$(MALI_CUSTOMER_RELEASE) \
+	-DMALI_USE_CSF=$(MALI_USE_CSF) \
+	-DMALI_KERNEL_TEST_API=$(MALI_KERNEL_TEST_API) \
+	-DMALI_UNIT_TEST=$(MALI_UNIT_TEST) \
+	-DMALI_COVERAGE=$(MALI_COVERAGE) \
+	-DMALI_RELEASE_NAME=\"$(MALI_RELEASE_NAME)\"
+
+ifeq ($(KBUILD_EXTMOD),)
+# in-tree
+DEFINES +=-DMALI_KBASE_PLATFORM_PATH=../../$(src)/platform/$(CONFIG_MALI_PLATFORM_NAME)
+else
+# out-of-tree
+DEFINES +=-DMALI_KBASE_PLATFORM_PATH=$(src)/platform/$(CONFIG_MALI_PLATFORM_NAME)
+endif
+
+DEFINES += -I$(srctree)/drivers/staging/android
+
+DEFINES += -DMALI_KBASE_BUILD
+
+# Use our defines when compiling
+ccflags-y += $(DEFINES) -I$(KBASE_PATH)   -I$(KBASE_PLATFORM_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux
+subdir-ccflags-y += $(DEFINES) -I$(KBASE_PATH)   -I$(KBASE_PLATFORM_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux
+
+SRC := \
+	mali_kbase_device.c \
+	mali_kbase_cache_policy.c \
+	mali_kbase_mem.c \
+	mali_kbase_mem_pool_group.c \
+	mali_kbase_mmu.c \
+	mali_kbase_native_mgm.c \
+	mali_kbase_ctx_sched.c \
+	mali_kbase_jd.c \
+	mali_kbase_jd_debugfs.c \
+	mali_kbase_jm.c \
+	mali_kbase_gpuprops.c \
+	mali_kbase_js.c \
+	mali_kbase_js_ctx_attr.c \
+	mali_kbase_event.c \
+	mali_kbase_context.c \
+	mali_kbase_pm.c \
+	mali_kbase_config.c \
+	mali_kbase_vinstr.c \
+	mali_kbase_hwcnt.c \
+	mali_kbase_hwcnt_backend_gpu.c \
+	mali_kbase_hwcnt_gpu.c \
+	mali_kbase_hwcnt_legacy.c \
+	mali_kbase_hwcnt_types.c \
+	mali_kbase_hwcnt_virtualizer.c \
+	mali_kbase_softjobs.c \
+	mali_kbase_10969_workaround.c \
+	mali_kbase_hw.c \
+	mali_kbase_debug.c \
+	mali_kbase_gpu_memory_debugfs.c \
+	mali_kbase_mem_linux.c \
+	mali_kbase_core_linux.c \
+	mali_kbase_mem_profile_debugfs.c \
+	mali_kbase_mmu_mode_lpae.c \
+	mali_kbase_mmu_mode_aarch64.c \
+	mali_kbase_disjoint_events.c \
+	mali_kbase_debug_mem_view.c \
+	mali_kbase_debug_job_fault.c \
+	mali_kbase_smc.c \
+	mali_kbase_mem_pool.c \
+	mali_kbase_mem_pool_debugfs.c \
+	mali_kbase_debugfs_helper.c \
+	mali_kbase_timeline.c \
+	mali_kbase_timeline_io.c \
+	mali_kbase_tlstream.c \
+	mali_kbase_tracepoints.c \
+	mali_kbase_strings.c \
+	mali_kbase_as_fault_debugfs.c \
+	mali_kbase_regs_history_debugfs.c \
+	thirdparty/mali_kbase_mmap.c
+
+
+ifeq ($(CONFIG_MALI_CINSTR_GWT),y)
+	SRC += mali_kbase_gwt.c
+endif
+
+ifeq ($(MALI_UNIT_TEST),1)
+	SRC += mali_kbase_timeline_test.c
+endif
+
+ifeq ($(MALI_CUSTOMER_RELEASE),0)
+	SRC += mali_kbase_regs_dump_debugfs.c
+endif
+
+
+ccflags-y += -I$(KBASE_PATH)
+
+# Tell the Linux build system from which .o file to create the kernel module
+obj-$(CONFIG_MALI_MIDGARD) += mali_kbase.o
+
+# Tell the Linux build system to enable building of our .c files
+mali_kbase-y := $(SRC:.c=.o)
+
+# Kconfig passes in the name with quotes for in-tree builds - remove them.
+platform_name := $(shell echo $(CONFIG_MALI_PLATFORM_NAME))
+MALI_PLATFORM_DIR := platform/$(platform_name)
+ccflags-y += -I$(src)/$(MALI_PLATFORM_DIR)
+include $(src)/$(MALI_PLATFORM_DIR)/Kbuild
+
+ifeq ($(CONFIG_MALI_DEVFREQ),y)
+  ifeq ($(CONFIG_DEVFREQ_THERMAL),y)
+    include $(src)/ipa/Kbuild
+  endif
+endif
+
+ifeq ($(MALI_USE_CSF),1)
+	include $(src)/csf/Kbuild
+endif
+
+mali_kbase-$(CONFIG_MALI_DMA_FENCE) += \
+	mali_kbase_dma_fence.o \
+	mali_kbase_fence.o
+mali_kbase-$(CONFIG_SYNC) += \
+	mali_kbase_sync_android.o \
+	mali_kbase_sync_common.o
+mali_kbase-$(CONFIG_SYNC_FILE) += \
+	mali_kbase_sync_file.o \
+	mali_kbase_sync_common.o \
+	mali_kbase_fence.o
+
+include  $(src)/backend/gpu/Kbuild
+mali_kbase-y += $(BACKEND:.c=.o)
+
+
+ccflags-y += -I$(src)/backend/gpu
+subdir-ccflags-y += -I$(src)/backend/gpu
+
+# For kutf and mali_kutf_irq_latency_test
+obj-$(CONFIG_MALI_KUTF) += tests/
diff --git a/drivers/gpu/arm/midgard/Kconfig b/drivers/gpu/arm/midgard/Kconfig
new file mode 100644
index 0000000..ca3e29c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/Kconfig
@@ -0,0 +1,283 @@
+#
+# (C) COPYRIGHT 2012-2019 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+
+menuconfig MALI_MIDGARD
+	tristate "Mali Midgard series support"
+	select GPU_TRACEPOINTS if ANDROID
+	select DMA_SHARED_BUFFER
+	default n
+	help
+	  Enable this option to build support for a ARM Mali Midgard GPU.
+
+	  To compile this driver as a module, choose M here:
+	  this will generate a single module, called mali_kbase.
+
+config MALI_GATOR_SUPPORT
+	bool "Enable Streamline tracing support"
+	depends on MALI_MIDGARD
+	default y
+	help
+	  Enables kbase tracing used by the Arm Streamline Performance Analyzer.
+	  The tracepoints are used to derive GPU activity charts in Streamline.
+
+config MALI_MIDGARD_DVFS
+	bool "Enable legacy DVFS"
+	depends on MALI_MIDGARD && !MALI_DEVFREQ
+	default n
+	help
+	  Choose this option to enable legacy DVFS in the Mali Midgard DDK.
+
+config MALI_MIDGARD_ENABLE_TRACE
+	bool "Enable kbase tracing"
+	depends on MALI_MIDGARD
+	default n
+	help
+	  Enables tracing in kbase.  Trace log available through
+	  the "mali_trace" debugfs file, when the CONFIG_DEBUG_FS is enabled
+
+config MALI_DEVFREQ
+	bool "devfreq support for Mali"
+	depends on MALI_MIDGARD && PM_DEVFREQ
+	default y
+	help
+	  Support devfreq for Mali.
+
+	  Using the devfreq framework and, by default, the simpleondemand
+	  governor, the frequency of Mali will be dynamically selected from the
+	  available OPPs.
+
+config MALI_DMA_FENCE
+	bool "DMA_BUF fence support for Mali"
+	depends on MALI_MIDGARD
+	default n
+	help
+	  Support DMA_BUF fences for Mali.
+
+	  This option should only be enabled if the Linux Kernel has built in
+	  support for DMA_BUF fences.
+
+config MALI_PLATFORM_NAME
+	depends on MALI_MIDGARD
+	string "Platform name"
+	default "devicetree"
+	help
+	  Enter the name of the desired platform configuration directory to
+	  include in the build. 'platform/$(MALI_PLATFORM_NAME)/Kbuild' must
+	  exist.
+
+# MALI_EXPERT configuration options
+
+menuconfig MALI_EXPERT
+	depends on MALI_MIDGARD
+	bool "Enable Expert Settings"
+	default n
+	help
+	  Enabling this option and modifying the default settings may produce a driver with performance or
+	  other limitations.
+
+config MALI_CORESTACK
+	bool "Support controlling power to the GPU core stack"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Enabling this feature on supported GPUs will let the driver powering
+	  on/off the GPU core stack independently without involving the Power
+	  Domain Controller. This should only be enabled on platforms which
+	  integration of the PDC to the Mali GPU is known to be problematic.
+	  This feature is currently only supported on t-Six and t-HEx GPUs.
+
+	  If unsure, say N.
+
+config MALI_PLATFORM_POWER_DOWN_ONLY
+	bool "Support disabling the power down of individual cores"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Enabling this feature will let the driver avoid power down of the
+	  shader cores, the tiler, and the L2 cache.
+	  The entire GPU would be powered down at once through the platform
+	  specific code.
+	  This may be required for certain platform configurations only.
+	  This also limits the available power policies.
+
+	  If unsure, say N.
+
+config MALI_DEBUG
+	bool "Debug build"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Select this option for increased checking and reporting of errors.
+
+config MALI_FENCE_DEBUG
+	bool "Debug sync fence usage"
+	depends on MALI_MIDGARD && MALI_EXPERT && (SYNC || SYNC_FILE)
+	default y if MALI_DEBUG
+	help
+	  Select this option to enable additional checking and reporting on the
+	  use of sync fences in the Mali driver.
+
+	  This will add a 3s timeout to all sync fence waits in the Mali
+	  driver, so that when work for Mali has been waiting on a sync fence
+	  for a long time a debug message will be printed, detailing what fence
+	  is causing the block, and which dependent Mali atoms are blocked as a
+	  result of this.
+
+	  The timeout can be changed at runtime through the js_soft_timeout
+	  device attribute, where the timeout is specified in milliseconds.
+
+config MALI_NO_MALI
+	bool "No Mali"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  This can be used to test the driver in a simulated environment
+	  whereby the hardware is not physically present. If the hardware is physically
+	  present it will not be used. This can be used to test the majority of the
+	  driver without needing actual hardware or for software benchmarking.
+	  All calls to the simulated hardware will complete immediately as if the hardware
+	  completed the task.
+
+config MALI_REAL_HW
+	def_bool !MALI_NO_MALI
+
+config MALI_ERROR_INJECT
+	bool "Error injection"
+	depends on MALI_MIDGARD && MALI_EXPERT && MALI_NO_MALI
+	default n
+	help
+	  Enables insertion of errors to test module failure and recovery mechanisms.
+
+config MALI_SYSTEM_TRACE
+	bool "Enable system event tracing support"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Choose this option to enable system trace events for each
+	  kbase event. This is typically used for debugging but has
+	  minimal overhead when not in use. Enable only if you know what
+	  you are doing.
+
+config MALI_2MB_ALLOC
+	bool "Attempt to allocate 2MB pages"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Rather than allocating all GPU memory page-by-page, attempt to
+	  allocate 2MB pages from the kernel. This reduces TLB pressure and
+	  helps to prevent memory fragmentation.
+
+	  If in doubt, say N
+
+config MALI_PWRSOFT_765
+	bool "PWRSOFT-765 ticket"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  PWRSOFT-765 fixes devfreq cooling devices issues. The fix was merged
+	  in kernel v4.10, however if backported into the kernel then this
+	  option must be manually selected.
+
+	  If using kernel >= v4.10 then say N, otherwise if devfreq cooling
+	  changes have been backported say Y to avoid compilation errors.
+
+config MALI_MEMORY_FULLY_BACKED
+	bool "Memory fully physically-backed"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  This option enables full physical backing of all virtual
+	  memory allocations in the kernel. Notice that this build
+	  option only affects allocations of grow-on-GPU-page-fault
+	  memory.
+
+config MALI_DMA_BUF_MAP_ON_DEMAND
+	bool "Map imported dma-bufs on demand"
+	depends on MALI_MIDGARD
+	default n
+	help
+	  This option caused kbase to set up the GPU mapping of imported
+	  dma-buf when needed to run atoms.  This is the legacy behaviour.
+
+	  This is intended for testing and the option will get removed in the
+	  future.
+
+config MALI_DMA_BUF_LEGACY_COMPAT
+	bool "Enable legacy compatibility cache flush on dma-buf map"
+	depends on MALI_MIDGARD && !MALI_DMA_BUF_MAP_ON_DEMAND
+	default y
+	help
+	  This option enables compatibility with legacy dma-buf mapping
+	  behavior, then the dma-buf is mapped on import, by adding cache
+	  maintenance where MALI_DMA_BUF_MAP_ON_DEMAND would do the mapping,
+	  including a cache flush.
+
+config MALI_HW_ERRATA_1485982_NOT_AFFECTED
+	bool "Disable workaround for BASE_HW_ISSUE_GPU2017_1336"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  This option disables the default workaround for GPU2017-1336. The
+	  workaround keeps the L2 cache powered up except for powerdown and reset.
+
+	  The workaround introduces a limitation that will prevent the running of
+	  protected mode content on fully coherent platforms, as the switch to IO
+	  coherency mode requires the L2 to be turned off.
+
+config MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE
+	bool "Use alternative workaround for BASE_HW_ISSUE_GPU2017_1336"
+	depends on MALI_MIDGARD && MALI_EXPERT && !MALI_HW_ERRATA_1485982_NOT_AFFECTED
+	default n
+	help
+	  This option uses an alternative workaround for GPU2017-1336. Lowering
+	  the GPU clock to a, platform specific, known good frequeuncy before
+	  powering down the L2 cache. The clock can be specified in the device
+	  tree using the property, opp-mali-errata-1485982. Otherwise the
+	  slowest clock will be selected.
+
+# Instrumentation options.
+
+config MALI_JOB_DUMP
+	bool "Enable system level support needed for job dumping"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Choose this option to enable system level support needed for
+	  job dumping. This is typically used for instrumentation but has
+	  minimal overhead when not in use. Enable only if you know what
+	  you are doing.
+
+config MALI_PRFCNT_SET_SECONDARY
+	bool "Use secondary set of performance counters"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Select this option to use secondary set of performance counters. Kernel
+	  features that depend on an access to the primary set of counters may
+	  become unavailable. Enabling this option will prevent power management
+	  from working optimally and may cause instrumentation tools to return
+	  bogus results.
+
+	  If unsure, say N.
+
+source "drivers/gpu/arm/midgard/platform/Kconfig"
+source "drivers/gpu/arm/midgard/tests/Kconfig"
diff --git a/drivers/gpu/arm/midgard/Makefile b/drivers/gpu/arm/midgard/Makefile
new file mode 100644
index 0000000..53a12094
--- /dev/null
+++ b/drivers/gpu/arm/midgard/Makefile
@@ -0,0 +1,38 @@
+#
+# (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+
+KDIR ?= /lib/modules/$(shell uname -r)/build
+
+BUSLOG_PATH_RELATIVE = $(CURDIR)/../../../..
+KBASE_PATH_RELATIVE = $(CURDIR)
+
+ifeq ($(CONFIG_MALI_BUSLOG),y)
+#Add bus logger symbols
+EXTRA_SYMBOLS += $(BUSLOG_PATH_RELATIVE)/drivers/base/bus_logger/Module.symvers
+endif
+
+# we get the symbols from modules using KBUILD_EXTRA_SYMBOLS to prevent warnings about unknown functions
+all:
+	$(MAKE) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../../include -I$(CURDIR)/../../../../tests/include $(SCONS_CFLAGS)" $(SCONS_CONFIGS) KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" modules
+
+clean:
+	$(MAKE) -C $(KDIR) M=$(CURDIR) clean
diff --git a/drivers/gpu/arm/midgard/Makefile.kbase b/drivers/gpu/arm/midgard/Makefile.kbase
new file mode 100644
index 0000000..6b0f81e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/Makefile.kbase
@@ -0,0 +1,23 @@
+#
+# (C) COPYRIGHT 2010, 2013, 2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+EXTRA_CFLAGS += -I$(ROOT) -I$(KBASE_PATH) -I$(KBASE_PATH)/platform_$(PLATFORM)
+
diff --git a/drivers/gpu/arm/midgard/Mconfig b/drivers/gpu/arm/midgard/Mconfig
new file mode 100644
index 0000000..27e0d63
--- /dev/null
+++ b/drivers/gpu/arm/midgard/Mconfig
@@ -0,0 +1,271 @@
+#
+# (C) COPYRIGHT 2012-2019 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA  02110-1301, USA.
+#
+#
+
+
+menuconfig MALI_MIDGARD
+	bool "Mali Midgard series support"
+	default y
+	help
+	  Enable this option to build support for a ARM Mali Midgard GPU.
+
+	  To compile this driver as a module, choose M here:
+	  this will generate a single module, called mali_kbase.
+
+config MALI_GATOR_SUPPORT
+	bool "Enable Streamline tracing support"
+	depends on MALI_MIDGARD && !BACKEND_USER
+	default y
+	help
+	  Enables kbase tracing used by the Arm Streamline Performance Analyzer.
+	  The tracepoints are used to derive GPU activity charts in Streamline.
+
+config MALI_MIDGARD_DVFS
+	bool "Enable legacy DVFS"
+	depends on MALI_MIDGARD && !MALI_DEVFREQ
+	default n
+	help
+	  Choose this option to enable legacy DVFS in the Mali Midgard DDK.
+
+config MALI_MIDGARD_ENABLE_TRACE
+	bool "Enable kbase tracing"
+	depends on MALI_MIDGARD
+	default n
+	help
+	  Enables tracing in kbase.  Trace log available through
+	  the "mali_trace" debugfs file, when the CONFIG_DEBUG_FS is enabled
+
+config MALI_DEVFREQ
+	bool "devfreq support for Mali"
+	depends on MALI_MIDGARD
+	default y if PLATFORM_JUNO
+	default y if PLATFORM_CUSTOM
+	help
+	  Support devfreq for Mali.
+
+	  Using the devfreq framework and, by default, the simpleondemand
+	  governor, the frequency of Mali will be dynamically selected from the
+	  available OPPs.
+
+config MALI_DMA_FENCE
+	bool "DMA_BUF fence support for Mali"
+	depends on MALI_MIDGARD
+	default n
+	help
+	  Support DMA_BUF fences for Mali.
+
+	  This option should only be enabled if the Linux Kernel has built in
+	  support for DMA_BUF fences.
+
+config MALI_PLATFORM_NAME
+	depends on MALI_MIDGARD
+	string "Platform name"
+	default "hisilicon" if PLATFORM_HIKEY960
+	default "hisilicon" if PLATFORM_HIKEY970
+	default "devicetree"
+	help
+	  Enter the name of the desired platform configuration directory to
+	  include in the build. 'platform/$(MALI_PLATFORM_NAME)/Kbuild' must
+	  exist.
+
+	  When PLATFORM_CUSTOM is set, this needs to be set manually to
+	  pick up the desired platform files.
+
+# MALI_EXPERT configuration options
+
+menuconfig MALI_EXPERT
+	depends on MALI_MIDGARD
+	bool "Enable Expert Settings"
+	default y
+	help
+	  Enabling this option and modifying the default settings may produce a driver with performance or
+	  other limitations.
+
+config MALI_CORESTACK
+	bool "Support controlling power to the GPU core stack"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Enabling this feature on supported GPUs will let the driver powering
+	  on/off the GPU core stack independently without involving the Power
+	  Domain Controller. This should only be enabled on platforms which
+	  integration of the PDC to the Mali GPU is known to be problematic.
+	  This feature is currently only supported on t-Six and t-HEx GPUs.
+
+	  If unsure, say N.
+
+config MALI_PLATFORM_POWER_DOWN_ONLY
+	bool "Support disabling the power down of individual cores"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Enabling this feature will let the driver avoid power down of the
+	  shader cores, the tiler, and the L2 cache.
+	  The entire GPU would be powered down at once through the platform
+	  specific code.
+	  This may be required for certain platform configurations only.
+	  This also limits the available power policies.
+
+	  If unsure, say N.
+
+config MALI_DEBUG
+	bool "Debug build"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default y if DEBUG
+	default n
+	help
+	  Select this option for increased checking and reporting of errors.
+
+config MALI_FENCE_DEBUG
+	bool "Debug sync fence usage"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default y if MALI_DEBUG
+	help
+	  Select this option to enable additional checking and reporting on the
+	  use of sync fences in the Mali driver.
+
+	  This will add a 3s timeout to all sync fence waits in the Mali
+	  driver, so that when work for Mali has been waiting on a sync fence
+	  for a long time a debug message will be printed, detailing what fence
+	  is causing the block, and which dependent Mali atoms are blocked as a
+	  result of this.
+
+	  The timeout can be changed at runtime through the js_soft_timeout
+	  device attribute, where the timeout is specified in milliseconds.
+
+choice
+	prompt "Error injection level"
+	default MALI_ERROR_INJECT_NONE
+	help
+	  Enables insertion of errors to test module failure and recovery mechanisms.
+
+config MALI_ERROR_INJECT_NONE
+	bool "disabled"
+	help
+	  Error injection is disabled.
+
+config MALI_ERROR_INJECT_TRACK_LIST
+	bool "error track list"
+	depends on MALI_MIDGARD && MALI_EXPERT && NO_MALI
+	help
+	  Errors to inject are pre-configured by the user.
+
+config MALI_ERROR_INJECT_RANDOM
+	bool "random error injection"
+	depends on MALI_MIDGARD && MALI_EXPERT && NO_MALI
+	help
+	  Injected errors are random, rather than user-driven.
+
+endchoice
+
+config MALI_ERROR_INJECT_ON
+	string
+	default "0" if MALI_ERROR_INJECT_NONE
+	default "1" if MALI_ERROR_INJECT_TRACK_LIST
+	default "2" if MALI_ERROR_INJECT_RANDOM
+
+config MALI_ERROR_INJECT
+	bool
+	default y if !MALI_ERROR_INJECT_NONE
+
+config MALI_SYSTEM_TRACE
+	bool "Enable system event tracing support"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Choose this option to enable system trace events for each
+	  kbase event.	This is typically used for debugging but has
+	  minimal overhead when not in use. Enable only if you know what
+	  you are doing.
+
+config MALI_2MB_ALLOC
+	bool "Attempt to allocate 2MB pages"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Rather than allocating all GPU memory page-by-page, attempt to
+	  allocate 2MB pages from the kernel. This reduces TLB pressure and
+	  helps to prevent memory fragmentation.
+
+	  If in doubt, say N
+
+config MALI_PWRSOFT_765
+	bool "PWRSOFT-765 ticket"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  PWRSOFT-765 fixes devfreq cooling devices issues. However, they are
+	  not merged in mainline kernel yet. So this define helps to guard those
+	  parts of the code.
+
+config MALI_MEMORY_FULLY_BACKED
+	bool "Memory fully physically-backed"
+	default n
+	help
+	  This option enables full backing of all virtual memory allocations
+	  for the kernel. This only affects grow-on-GPU-page-fault memory.
+
+config MALI_DMA_BUF_MAP_ON_DEMAND
+	bool "Map imported dma-bufs on demand"
+	depends on MALI_MIDGARD
+	default n
+	default y if !DMA_BUF_SYNC_IOCTL_SUPPORTED
+	help
+	  This option caused kbase to set up the GPU mapping of imported
+	  dma-buf when needed to run atoms.  This is the legacy behaviour.
+
+config MALI_DMA_BUF_LEGACY_COMPAT
+	bool "Enable legacy compatibility cache flush on dma-buf map"
+	depends on MALI_MIDGARD && !MALI_DMA_BUF_MAP_ON_DEMAND
+	default y
+	help
+	  This option enables compatibility with legacy dma-buf mapping
+	  behavior, then the dma-buf is mapped on import, by adding cache
+	  maintenance where MALI_DMA_BUF_MAP_ON_DEMAND would do the mapping,
+	  including a cache flush.
+
+config MALI_REAL_HW
+	bool
+	default y
+	default n if NO_MALI
+
+config MALI_HW_ERRATA_1485982_NOT_AFFECTED
+	bool "Disable workaround for BASE_HW_ISSUE_GPU2017_1336"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	default y if PLATFORM_JUNO
+	help
+	  This option disables the default workaround for GPU2017-1336. The
+	  workaround keeps the L2 cache powered up except for powerdown and reset.
+
+	  The workaround introduces a limitation that will prevent the running of
+	  protected mode content on fully coherent platforms, as the switch to IO
+	  coherency mode requires the L2 to be turned off.
+
+config MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE
+	bool "Use alternative workaround for BASE_HW_ISSUE_GPU2017_1336"
+	depends on MALI_MIDGARD && MALI_EXPERT && !MALI_HW_ERRATA_1485982_NOT_AFFECTED
+	default n
+	help
+	  This option uses an alternative workaround for GPU2017-1336. Lowering
+	  the GPU clock to a, platform specific, known good frequeuncy before
+	  powering down the L2 cache. The clock can be specified in the device
+	  tree using the property, opp-mali-errata-1485982. Otherwise the
+	  slowest clock will be selected.
+
+# Instrumentation options.
+
+# config MALI_JOB_DUMP exists in the Kernel Kconfig but is configured using CINSTR_JOB_DUMP in Mconfig.
+# config MALI_PRFCNT_SET_SECONDARY exists in the Kernel Kconfig but is configured using CINSTR_SECONDARY_HWC in Mconfig.
+
+source "kernel/drivers/gpu/arm/midgard/tests/Mconfig"
diff --git a/drivers/gpu/arm/midgard/backend/gpu/Kbuild b/drivers/gpu/arm/midgard/backend/gpu/Kbuild
new file mode 100644
index 0000000..2414d51
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/Kbuild
@@ -0,0 +1,61 @@
+#
+# (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+BACKEND += \
+	backend/gpu/mali_kbase_cache_policy_backend.c \
+	backend/gpu/mali_kbase_device_hw.c \
+	backend/gpu/mali_kbase_gpu.c \
+	backend/gpu/mali_kbase_gpuprops_backend.c \
+	backend/gpu/mali_kbase_debug_job_fault_backend.c \
+	backend/gpu/mali_kbase_irq_linux.c \
+	backend/gpu/mali_kbase_instr_backend.c \
+	backend/gpu/mali_kbase_jm_as.c \
+	backend/gpu/mali_kbase_jm_hw.c \
+	backend/gpu/mali_kbase_jm_rb.c \
+	backend/gpu/mali_kbase_js_backend.c \
+	backend/gpu/mali_kbase_mmu_hw_direct.c \
+	backend/gpu/mali_kbase_pm_backend.c \
+	backend/gpu/mali_kbase_pm_driver.c \
+	backend/gpu/mali_kbase_pm_metrics.c \
+	backend/gpu/mali_kbase_pm_ca.c \
+	backend/gpu/mali_kbase_pm_always_on.c \
+	backend/gpu/mali_kbase_pm_coarse_demand.c \
+	backend/gpu/mali_kbase_pm_policy.c \
+	backend/gpu/mali_kbase_time.c \
+	backend/gpu/mali_kbase_l2_mmu_config.c
+
+ifeq ($(MALI_CUSTOMER_RELEASE),0)
+BACKEND += \
+	backend/gpu/mali_kbase_pm_always_on_demand.c
+endif
+
+ifeq ($(CONFIG_MALI_DEVFREQ),y)
+BACKEND += \
+	backend/gpu/mali_kbase_devfreq.c
+endif
+
+ifeq ($(CONFIG_MALI_NO_MALI),y)
+	# Dummy model
+	BACKEND += backend/gpu/mali_kbase_model_dummy.c
+	BACKEND += backend/gpu/mali_kbase_model_linux.c
+	# HW error simulation
+	BACKEND += backend/gpu/mali_kbase_model_error_generator.c
+endif
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_backend_config.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_backend_config.h
new file mode 100644
index 0000000..4a61f96
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_backend_config.h
@@ -0,0 +1,31 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Backend specific configuration
+ */
+
+#ifndef _KBASE_BACKEND_CONFIG_H_
+#define _KBASE_BACKEND_CONFIG_H_
+
+#endif /* _KBASE_BACKEND_CONFIG_H_ */
+
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c
new file mode 100644
index 0000000..7378bfd
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c
@@ -0,0 +1,34 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2016,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "backend/gpu/mali_kbase_cache_policy_backend.h"
+#include <backend/gpu/mali_kbase_device_internal.h>
+
+void kbase_cache_set_coherency_mode(struct kbase_device *kbdev,
+		u32 mode)
+{
+	kbdev->current_gpu_coherency_mode = mode;
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_COHERENCY_REG))
+		kbase_reg_write(kbdev, COHERENCY_ENABLE, mode);
+}
+
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h
new file mode 100644
index 0000000..f78ada7
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+#ifndef _KBASE_CACHE_POLICY_BACKEND_H_
+#define _KBASE_CACHE_POLICY_BACKEND_H_
+
+#include "mali_kbase.h"
+#include "mali_base_kernel.h"
+
+/**
+  * kbase_cache_set_coherency_mode() - Sets the system coherency mode
+  *			in the GPU.
+  * @kbdev:	Device pointer
+  * @mode:	Coherency mode. COHERENCY_ACE/ACE_LITE
+  */
+void kbase_cache_set_coherency_mode(struct kbase_device *kbdev,
+		u32 mode);
+
+#endif				/* _KBASE_CACHE_POLICY_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c
new file mode 100644
index 0000000..450f6e7
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c
@@ -0,0 +1,162 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include "mali_kbase_debug_job_fault.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+/*GPU_CONTROL_REG(r)*/
+static int gpu_control_reg_snapshot[] = {
+	GPU_ID,
+	SHADER_READY_LO,
+	SHADER_READY_HI,
+	TILER_READY_LO,
+	TILER_READY_HI,
+	L2_READY_LO,
+	L2_READY_HI
+};
+
+/* JOB_CONTROL_REG(r) */
+static int job_control_reg_snapshot[] = {
+	JOB_IRQ_MASK,
+	JOB_IRQ_STATUS
+};
+
+/* JOB_SLOT_REG(n,r) */
+static int job_slot_reg_snapshot[] = {
+	JS_HEAD_LO,
+	JS_HEAD_HI,
+	JS_TAIL_LO,
+	JS_TAIL_HI,
+	JS_AFFINITY_LO,
+	JS_AFFINITY_HI,
+	JS_CONFIG,
+	JS_STATUS,
+	JS_HEAD_NEXT_LO,
+	JS_HEAD_NEXT_HI,
+	JS_AFFINITY_NEXT_LO,
+	JS_AFFINITY_NEXT_HI,
+	JS_CONFIG_NEXT
+};
+
+/*MMU_REG(r)*/
+static int mmu_reg_snapshot[] = {
+	MMU_IRQ_MASK,
+	MMU_IRQ_STATUS
+};
+
+/* MMU_AS_REG(n,r) */
+static int as_reg_snapshot[] = {
+	AS_TRANSTAB_LO,
+	AS_TRANSTAB_HI,
+	AS_MEMATTR_LO,
+	AS_MEMATTR_HI,
+	AS_FAULTSTATUS,
+	AS_FAULTADDRESS_LO,
+	AS_FAULTADDRESS_HI,
+	AS_STATUS
+};
+
+bool kbase_debug_job_fault_reg_snapshot_init(struct kbase_context *kctx,
+		int reg_range)
+{
+	int i, j;
+	int offset = 0;
+	int slot_number;
+	int as_number;
+
+	if (kctx->reg_dump == NULL)
+		return false;
+
+	slot_number = kctx->kbdev->gpu_props.num_job_slots;
+	as_number = kctx->kbdev->gpu_props.num_address_spaces;
+
+	/* get the GPU control registers*/
+	for (i = 0; i < sizeof(gpu_control_reg_snapshot)/4; i++) {
+		kctx->reg_dump[offset] =
+				GPU_CONTROL_REG(gpu_control_reg_snapshot[i]);
+		offset += 2;
+	}
+
+	/* get the Job control registers*/
+	for (i = 0; i < sizeof(job_control_reg_snapshot)/4; i++) {
+		kctx->reg_dump[offset] =
+				JOB_CONTROL_REG(job_control_reg_snapshot[i]);
+		offset += 2;
+	}
+
+	/* get the Job Slot registers*/
+	for (j = 0; j < slot_number; j++)	{
+		for (i = 0; i < sizeof(job_slot_reg_snapshot)/4; i++) {
+			kctx->reg_dump[offset] =
+			JOB_SLOT_REG(j, job_slot_reg_snapshot[i]);
+			offset += 2;
+		}
+	}
+
+	/* get the MMU registers*/
+	for (i = 0; i < sizeof(mmu_reg_snapshot)/4; i++) {
+		kctx->reg_dump[offset] = MMU_REG(mmu_reg_snapshot[i]);
+		offset += 2;
+	}
+
+	/* get the Address space registers*/
+	for (j = 0; j < as_number; j++) {
+		for (i = 0; i < sizeof(as_reg_snapshot)/4; i++) {
+			kctx->reg_dump[offset] =
+					MMU_AS_REG(j, as_reg_snapshot[i]);
+			offset += 2;
+		}
+	}
+
+	WARN_ON(offset >= (reg_range*2/4));
+
+	/* set the termination flag*/
+	kctx->reg_dump[offset] = REGISTER_DUMP_TERMINATION_FLAG;
+	kctx->reg_dump[offset + 1] = REGISTER_DUMP_TERMINATION_FLAG;
+
+	dev_dbg(kctx->kbdev->dev, "kbase_job_fault_reg_snapshot_init:%d\n",
+			offset);
+
+	return true;
+}
+
+bool kbase_job_fault_get_reg_snapshot(struct kbase_context *kctx)
+{
+	int offset = 0;
+
+	if (kctx->reg_dump == NULL)
+		return false;
+
+	while (kctx->reg_dump[offset] != REGISTER_DUMP_TERMINATION_FLAG) {
+		kctx->reg_dump[offset+1] =
+				kbase_reg_read(kctx->kbdev,
+						kctx->reg_dump[offset]);
+		offset += 2;
+	}
+	return true;
+}
+
+
+#endif
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c
new file mode 100644
index 0000000..5c17297
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c
@@ -0,0 +1,734 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_tracepoints.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#ifdef CONFIG_DEVFREQ_THERMAL
+#include <linux/devfreq_cooling.h>
+#endif
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+#include <linux/pm_opp.h>
+#else /* Linux >= 3.13 */
+/* In 3.13 the OPP include header file, types, and functions were all
+ * renamed. Use the old filename for the include, and define the new names to
+ * the old, when an old kernel is detected.
+ */
+#include <linux/opp.h>
+#define dev_pm_opp opp
+#define dev_pm_opp_get_voltage opp_get_voltage
+#define dev_pm_opp_get_opp_count opp_get_opp_count
+#define dev_pm_opp_find_freq_ceil opp_find_freq_ceil
+#define dev_pm_opp_find_freq_floor opp_find_freq_floor
+#endif /* Linux >= 3.13 */
+
+/**
+ * opp_translate - Translate nominal OPP frequency from devicetree into real
+ *                 frequency and core mask
+ * @kbdev:     Device pointer
+ * @freq:      Nominal frequency
+ * @core_mask: Pointer to u64 to store core mask to
+ * @freqs:     Pointer to array of frequencies
+ * @volts:     Pointer to array of voltages
+ *
+ * This function will only perform translation if an operating-points-v2-mali
+ * table is present in devicetree. If one is not present then it will return an
+ * untranslated frequency and all cores enabled.
+ */
+static void opp_translate(struct kbase_device *kbdev, unsigned long freq,
+	u64 *core_mask, unsigned long *freqs, unsigned long *volts)
+{
+	unsigned int i;
+
+	for (i = 0; i < kbdev->num_opps; i++) {
+		if (kbdev->devfreq_table[i].opp_freq == freq) {
+			unsigned int j;
+
+			*core_mask = kbdev->devfreq_table[i].core_mask;
+			for (j = 0; j < kbdev->nr_clocks; j++) {
+				freqs[j] =
+					kbdev->devfreq_table[i].real_freqs[j];
+				volts[j] =
+					kbdev->devfreq_table[i].opp_volts[j];
+			}
+
+			break;
+		}
+	}
+
+	/* If failed to find OPP, return all cores enabled
+	 * and nominal frequency
+	 */
+	if (i == kbdev->num_opps) {
+		*core_mask = kbdev->gpu_props.props.raw_props.shader_present;
+		for (i = 0; i < kbdev->nr_clocks; i++)
+			freqs[i] = freq;
+	}
+}
+
+static int
+kbase_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags)
+{
+	struct kbase_device *kbdev = dev_get_drvdata(dev);
+	struct dev_pm_opp *opp;
+	unsigned long nominal_freq;
+	unsigned long freqs[BASE_MAX_NR_CLOCKS_REGULATORS] = {0};
+	unsigned long volts[BASE_MAX_NR_CLOCKS_REGULATORS] = {0};
+	unsigned int i;
+	u64 core_mask;
+
+	nominal_freq = *target_freq;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+	rcu_read_lock();
+#endif
+	opp = devfreq_recommended_opp(dev, &nominal_freq, flags);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+	rcu_read_unlock();
+#endif
+	if (IS_ERR_OR_NULL(opp)) {
+		dev_err(dev, "Failed to get opp (%ld)\n", PTR_ERR(opp));
+		return PTR_ERR(opp);
+	}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+	dev_pm_opp_put(opp);
+#endif
+
+	/*
+	 * Only update if there is a change of frequency
+	 */
+	if (kbdev->current_nominal_freq == nominal_freq) {
+		*target_freq = nominal_freq;
+		return 0;
+	}
+
+	opp_translate(kbdev, nominal_freq, &core_mask, freqs, volts);
+
+#ifdef CONFIG_REGULATOR
+	/* Regulators and clocks work in pairs: every clock has a regulator,
+	 * and we never expect to have more regulators than clocks.
+	 *
+	 * We always need to increase the voltage before increasing
+	 * the frequency of a regulator/clock pair, otherwise the clock
+	 * wouldn't have enough power to perform the transition.
+	 *
+	 * It's always safer to decrease the frequency before decreasing
+	 * voltage of a regulator/clock pair, otherwise the clock could have
+	 * problems operating if it is deprived of the necessary power
+	 * to sustain its current frequency (even if that happens for a short
+	 * transition interval).
+	 */
+	for (i = 0; i < kbdev->nr_clocks; i++) {
+		if (kbdev->regulators[i] &&
+				kbdev->current_voltages[i] != volts[i] &&
+				kbdev->current_freqs[i] < freqs[i]) {
+			int err;
+
+			err = regulator_set_voltage(kbdev->regulators[i],
+				volts[i], volts[i]);
+			if (!err) {
+				kbdev->current_voltages[i] = volts[i];
+			} else {
+				dev_err(dev, "Failed to increase voltage (%d) (target %lu)\n",
+					err, volts[i]);
+				return err;
+			}
+		}
+	}
+#endif
+
+	for (i = 0; i < kbdev->nr_clocks; i++) {
+		if (kbdev->clocks[i]) {
+			int err;
+
+			err = clk_set_rate(kbdev->clocks[i], freqs[i]);
+			if (!err) {
+				kbdev->current_freqs[i] = freqs[i];
+			} else {
+				dev_err(dev, "Failed to set clock %lu (target %lu)\n",
+					freqs[i], *target_freq);
+				return err;
+			}
+		}
+	}
+
+#ifdef CONFIG_REGULATOR
+	for (i = 0; i < kbdev->nr_clocks; i++) {
+		if (kbdev->regulators[i] &&
+				kbdev->current_voltages[i] != volts[i] &&
+				kbdev->current_freqs[i] > freqs[i]) {
+			int err;
+
+			err = regulator_set_voltage(kbdev->regulators[i],
+				volts[i], volts[i]);
+			if (!err) {
+				kbdev->current_voltages[i] = volts[i];
+			} else {
+				dev_err(dev, "Failed to decrease voltage (%d) (target %lu)\n",
+					err, volts[i]);
+				return err;
+			}
+		}
+	}
+#endif
+
+	kbase_devfreq_set_core_mask(kbdev, core_mask);
+
+	*target_freq = nominal_freq;
+	kbdev->current_nominal_freq = nominal_freq;
+	kbdev->current_core_mask = core_mask;
+
+	KBASE_TLSTREAM_AUX_DEVFREQ_TARGET(kbdev, (u64)nominal_freq);
+
+	return 0;
+}
+
+void kbase_devfreq_force_freq(struct kbase_device *kbdev, unsigned long freq)
+{
+	unsigned long target_freq = freq;
+
+	kbase_devfreq_target(kbdev->dev, &target_freq, 0);
+}
+
+static int
+kbase_devfreq_cur_freq(struct device *dev, unsigned long *freq)
+{
+	struct kbase_device *kbdev = dev_get_drvdata(dev);
+
+	*freq = kbdev->current_nominal_freq;
+
+	return 0;
+}
+
+static int
+kbase_devfreq_status(struct device *dev, struct devfreq_dev_status *stat)
+{
+	struct kbase_device *kbdev = dev_get_drvdata(dev);
+	struct kbasep_pm_metrics diff;
+
+	kbase_pm_get_dvfs_metrics(kbdev, &kbdev->last_devfreq_metrics, &diff);
+
+	stat->busy_time = diff.time_busy;
+	stat->total_time = diff.time_busy + diff.time_idle;
+	stat->current_frequency = kbdev->current_nominal_freq;
+	stat->private_data = NULL;
+
+	return 0;
+}
+
+static int kbase_devfreq_init_freq_table(struct kbase_device *kbdev,
+		struct devfreq_dev_profile *dp)
+{
+	int count;
+	int i = 0;
+	unsigned long freq;
+	struct dev_pm_opp *opp;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+	rcu_read_lock();
+#endif
+	count = dev_pm_opp_get_opp_count(kbdev->dev);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+	rcu_read_unlock();
+#endif
+	if (count < 0)
+		return count;
+
+	dp->freq_table = kmalloc_array(count, sizeof(dp->freq_table[0]),
+				GFP_KERNEL);
+	if (!dp->freq_table)
+		return -ENOMEM;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+	rcu_read_lock();
+#endif
+	for (i = 0, freq = ULONG_MAX; i < count; i++, freq--) {
+		opp = dev_pm_opp_find_freq_floor(kbdev->dev, &freq);
+		if (IS_ERR(opp))
+			break;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+		dev_pm_opp_put(opp);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
+
+		dp->freq_table[i] = freq;
+	}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+	rcu_read_unlock();
+#endif
+
+	if (count != i)
+		dev_warn(kbdev->dev, "Unable to enumerate all OPPs (%d!=%d\n",
+				count, i);
+
+	dp->max_state = i;
+
+	/* Have the lowest clock as suspend clock.
+	 * It may be overridden by 'opp-mali-errata-1485982'.
+	 */
+	if (kbdev->pm.backend.gpu_clock_slow_down_wa) {
+		freq = 0;
+		opp = dev_pm_opp_find_freq_ceil(kbdev->dev, &freq);
+		if (IS_ERR(opp)) {
+			dev_err(kbdev->dev, "failed to find slowest clock");
+			return 0;
+		}
+		dev_info(kbdev->dev, "suspend clock %lu from slowest", freq);
+		kbdev->pm.backend.gpu_clock_suspend_freq = freq;
+	}
+
+	return 0;
+}
+
+static void kbase_devfreq_term_freq_table(struct kbase_device *kbdev)
+{
+	struct devfreq_dev_profile *dp = &kbdev->devfreq_profile;
+
+	kfree(dp->freq_table);
+}
+
+static void kbase_devfreq_term_core_mask_table(struct kbase_device *kbdev)
+{
+	kfree(kbdev->devfreq_table);
+}
+
+static void kbase_devfreq_exit(struct device *dev)
+{
+	struct kbase_device *kbdev = dev_get_drvdata(dev);
+
+	kbase_devfreq_term_freq_table(kbdev);
+}
+
+static void kbasep_devfreq_read_suspend_clock(struct kbase_device *kbdev,
+		struct device_node *node)
+{
+	u64 freq = 0;
+	int err = 0;
+
+	/* Check if this node is the opp entry having 'opp-mali-errata-1485982'
+	 * to get the suspend clock, otherwise skip it.
+	 */
+	if (!of_property_read_bool(node, "opp-mali-errata-1485982"))
+		return;
+
+	/* In kbase DevFreq, the clock will be read from 'opp-hz'
+	 * and translated into the actual clock by opp_translate.
+	 *
+	 * In customer DVFS, the clock will be read from 'opp-hz-real'
+	 * for clk driver. If 'opp-hz-real' does not exist,
+	 * read from 'opp-hz'.
+	 */
+	if (IS_ENABLED(CONFIG_MALI_DEVFREQ))
+		err = of_property_read_u64(node, "opp-hz", &freq);
+	else {
+		if (of_property_read_u64(node, "opp-hz-real", &freq))
+			err = of_property_read_u64(node, "opp-hz", &freq);
+	}
+
+	if (WARN_ON(err || !freq))
+		return;
+
+	kbdev->pm.backend.gpu_clock_suspend_freq = freq;
+	dev_info(kbdev->dev,
+		"suspend clock %llu by opp-mali-errata-1485982", freq);
+}
+
+static int kbase_devfreq_init_core_mask_table(struct kbase_device *kbdev)
+{
+#if KERNEL_VERSION(3, 18, 0) > LINUX_VERSION_CODE || !defined(CONFIG_OF)
+	/* OPP table initialization requires at least the capability to get
+	 * regulators and clocks from the device tree, as well as parsing
+	 * arrays of unsigned integer values.
+	 *
+	 * The whole initialization process shall simply be skipped if the
+	 * minimum capability is not available.
+	 */
+	return 0;
+#else
+	struct device_node *opp_node = of_parse_phandle(kbdev->dev->of_node,
+			"operating-points-v2", 0);
+	struct device_node *node;
+	int i = 0;
+	int count;
+	u64 shader_present = kbdev->gpu_props.props.raw_props.shader_present;
+
+	if (!opp_node)
+		return 0;
+	if (!of_device_is_compatible(opp_node, "operating-points-v2-mali"))
+		return 0;
+
+	count = dev_pm_opp_get_opp_count(kbdev->dev);
+	kbdev->devfreq_table = kmalloc_array(count,
+			sizeof(struct kbase_devfreq_opp), GFP_KERNEL);
+	if (!kbdev->devfreq_table)
+		return -ENOMEM;
+
+	for_each_available_child_of_node(opp_node, node) {
+		const void *core_count_p;
+		u64 core_mask, opp_freq,
+			real_freqs[BASE_MAX_NR_CLOCKS_REGULATORS];
+		int err;
+#ifdef CONFIG_REGULATOR
+		u32 opp_volts[BASE_MAX_NR_CLOCKS_REGULATORS];
+#endif
+
+		/* Read suspend clock from opp table */
+		if (kbdev->pm.backend.gpu_clock_slow_down_wa)
+			kbasep_devfreq_read_suspend_clock(kbdev, node);
+
+		err = of_property_read_u64(node, "opp-hz", &opp_freq);
+		if (err) {
+			dev_warn(kbdev->dev, "Failed to read opp-hz property with error %d\n",
+					err);
+			continue;
+		}
+
+
+#if BASE_MAX_NR_CLOCKS_REGULATORS > 1
+		err = of_property_read_u64_array(node, "opp-hz-real",
+				real_freqs, kbdev->nr_clocks);
+#else
+		WARN_ON(kbdev->nr_clocks != 1);
+		err = of_property_read_u64(node, "opp-hz-real", real_freqs);
+#endif
+		if (err < 0) {
+			dev_warn(kbdev->dev, "Failed to read opp-hz-real property with error %d\n",
+					err);
+			continue;
+		}
+#ifdef CONFIG_REGULATOR
+		err = of_property_read_u32_array(node,
+			"opp-microvolt", opp_volts, kbdev->nr_regulators);
+		if (err < 0) {
+			dev_warn(kbdev->dev, "Failed to read opp-microvolt property with error %d\n",
+					err);
+			continue;
+		}
+#endif
+
+		if (of_property_read_u64(node, "opp-core-mask", &core_mask))
+			core_mask = shader_present;
+		if (core_mask != shader_present &&
+				(kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11056) ||
+				 corestack_driver_control ||
+				 platform_power_down_only)) {
+
+			dev_warn(kbdev->dev, "Ignoring OPP %llu - Dynamic Core Scaling not supported on this GPU\n",
+					opp_freq);
+			continue;
+		}
+
+		core_count_p = of_get_property(node, "opp-core-count", NULL);
+		if (core_count_p) {
+			u64 remaining_core_mask =
+				kbdev->gpu_props.props.raw_props.shader_present;
+			int core_count = be32_to_cpup(core_count_p);
+
+			core_mask = 0;
+
+			for (; core_count > 0; core_count--) {
+				int core = ffs(remaining_core_mask);
+
+				if (!core) {
+					dev_err(kbdev->dev, "OPP has more cores than GPU\n");
+					return -ENODEV;
+				}
+
+				core_mask |= (1ull << (core-1));
+				remaining_core_mask &= ~(1ull << (core-1));
+			}
+		}
+
+		if (!core_mask) {
+			dev_err(kbdev->dev, "OPP has invalid core mask of 0\n");
+			return -ENODEV;
+		}
+
+		kbdev->devfreq_table[i].opp_freq = opp_freq;
+		kbdev->devfreq_table[i].core_mask = core_mask;
+		if (kbdev->nr_clocks > 0) {
+			int j;
+
+			for (j = 0; j < kbdev->nr_clocks; j++)
+				kbdev->devfreq_table[i].real_freqs[j] =
+					real_freqs[j];
+		}
+#ifdef CONFIG_REGULATOR
+		if (kbdev->nr_regulators > 0) {
+			int j;
+
+			for (j = 0; j < kbdev->nr_regulators; j++)
+				kbdev->devfreq_table[i].opp_volts[j] =
+						opp_volts[j];
+		}
+#endif
+
+		dev_info(kbdev->dev, "OPP %d : opp_freq=%llu core_mask=%llx\n",
+				i, opp_freq, core_mask);
+
+		i++;
+	}
+
+	kbdev->num_opps = i;
+
+	return 0;
+#endif /* KERNEL_VERSION(3, 18, 0) > LINUX_VERSION_CODE */
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+
+static const char *kbase_devfreq_req_type_name(enum kbase_devfreq_work_type type)
+{
+	const char *p;
+
+	switch (type) {
+	case DEVFREQ_WORK_NONE:
+		p = "devfreq_none";
+		break;
+	case DEVFREQ_WORK_SUSPEND:
+		p = "devfreq_suspend";
+		break;
+	case DEVFREQ_WORK_RESUME:
+		p = "devfreq_resume";
+		break;
+	default:
+		p = "Unknown devfreq_type";
+	}
+	return p;
+}
+
+static void kbase_devfreq_suspend_resume_worker(struct work_struct *work)
+{
+	struct kbase_devfreq_queue_info *info = container_of(work,
+			struct kbase_devfreq_queue_info, work);
+	struct kbase_device *kbdev = container_of(info, struct kbase_device,
+			devfreq_queue);
+	unsigned long flags;
+	enum kbase_devfreq_work_type type, acted_type;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	type = kbdev->devfreq_queue.req_type;
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	acted_type = kbdev->devfreq_queue.acted_type;
+	dev_dbg(kbdev->dev, "Worker handles queued req: %s (acted: %s)\n",
+		kbase_devfreq_req_type_name(type),
+		kbase_devfreq_req_type_name(acted_type));
+	switch (type) {
+	case DEVFREQ_WORK_SUSPEND:
+	case DEVFREQ_WORK_RESUME:
+		if (type != acted_type) {
+			if (type == DEVFREQ_WORK_RESUME)
+				devfreq_resume_device(kbdev->devfreq);
+			else
+				devfreq_suspend_device(kbdev->devfreq);
+			dev_dbg(kbdev->dev, "Devfreq transition occured: %s => %s\n",
+				kbase_devfreq_req_type_name(acted_type),
+				kbase_devfreq_req_type_name(type));
+			kbdev->devfreq_queue.acted_type = type;
+		}
+		break;
+	default:
+		WARN_ON(1);
+	}
+}
+
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
+void kbase_devfreq_enqueue_work(struct kbase_device *kbdev,
+				       enum kbase_devfreq_work_type work_type)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+	unsigned long flags;
+
+	WARN_ON(work_type == DEVFREQ_WORK_NONE);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbdev->devfreq_queue.req_type = work_type;
+	queue_work(kbdev->devfreq_queue.workq, &kbdev->devfreq_queue.work);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	dev_dbg(kbdev->dev, "Enqueuing devfreq req: %s\n",
+		kbase_devfreq_req_type_name(work_type));
+#endif
+}
+
+static int kbase_devfreq_work_init(struct kbase_device *kbdev)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+	kbdev->devfreq_queue.req_type = DEVFREQ_WORK_NONE;
+	kbdev->devfreq_queue.acted_type = DEVFREQ_WORK_RESUME;
+
+	kbdev->devfreq_queue.workq = alloc_ordered_workqueue("devfreq_workq", 0);
+	if (!kbdev->devfreq_queue.workq)
+		return -ENOMEM;
+
+	INIT_WORK(&kbdev->devfreq_queue.work,
+			kbase_devfreq_suspend_resume_worker);
+#endif
+	return 0;
+}
+
+static void kbase_devfreq_work_term(struct kbase_device *kbdev)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+	destroy_workqueue(kbdev->devfreq_queue.workq);
+#endif
+}
+
+int kbase_devfreq_init(struct kbase_device *kbdev)
+{
+	struct devfreq_dev_profile *dp;
+	int err;
+	unsigned int i;
+
+	if (kbdev->nr_clocks == 0) {
+		dev_err(kbdev->dev, "Clock not available for devfreq\n");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < kbdev->nr_clocks; i++) {
+		if (kbdev->clocks[i])
+			kbdev->current_freqs[i] =
+				clk_get_rate(kbdev->clocks[i]);
+		else
+			kbdev->current_freqs[i] = 0;
+	}
+	kbdev->current_nominal_freq = kbdev->current_freqs[0];
+
+	dp = &kbdev->devfreq_profile;
+
+	dp->initial_freq = kbdev->current_freqs[0];
+	dp->polling_ms = 100;
+	dp->target = kbase_devfreq_target;
+	dp->get_dev_status = kbase_devfreq_status;
+	dp->get_cur_freq = kbase_devfreq_cur_freq;
+	dp->exit = kbase_devfreq_exit;
+
+	if (kbase_devfreq_init_freq_table(kbdev, dp))
+		return -EFAULT;
+
+	if (dp->max_state > 0) {
+		/* Record the maximum frequency possible */
+		kbdev->gpu_props.props.core_props.gpu_freq_khz_max =
+			dp->freq_table[0] / 1000;
+	};
+
+	err = kbase_devfreq_init_core_mask_table(kbdev);
+	if (err) {
+		kbase_devfreq_term_freq_table(kbdev);
+		return err;
+	}
+
+	/* Initialise devfreq suspend/resume workqueue */
+	err = kbase_devfreq_work_init(kbdev);
+	if (err) {
+		kbase_devfreq_term_freq_table(kbdev);
+		dev_err(kbdev->dev, "Devfreq initialization failed");
+		return err;
+	}
+
+	kbdev->devfreq = devfreq_add_device(kbdev->dev, dp,
+				"simple_ondemand", NULL);
+	if (IS_ERR(kbdev->devfreq)) {
+		err = PTR_ERR(kbdev->devfreq);
+		goto add_device_failed;
+	}
+
+	/* devfreq_add_device only copies a few of kbdev->dev's fields, so
+	 * set drvdata explicitly so IPA models can access kbdev. */
+	dev_set_drvdata(&kbdev->devfreq->dev, kbdev);
+
+	err = devfreq_register_opp_notifier(kbdev->dev, kbdev->devfreq);
+	if (err) {
+		dev_err(kbdev->dev,
+			"Failed to register OPP notifier (%d)\n", err);
+		goto opp_notifier_failed;
+	}
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+	err = kbase_ipa_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "IPA initialization failed\n");
+		goto cooling_failed;
+	}
+
+	kbdev->devfreq_cooling = of_devfreq_cooling_register_power(
+			kbdev->dev->of_node,
+			kbdev->devfreq,
+			&kbase_ipa_power_model_ops);
+	if (IS_ERR_OR_NULL(kbdev->devfreq_cooling)) {
+		err = PTR_ERR(kbdev->devfreq_cooling);
+		dev_err(kbdev->dev,
+			"Failed to register cooling device (%d)\n",
+			err);
+		goto cooling_failed;
+	}
+#endif
+
+	return 0;
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+cooling_failed:
+	devfreq_unregister_opp_notifier(kbdev->dev, kbdev->devfreq);
+#endif /* CONFIG_DEVFREQ_THERMAL */
+opp_notifier_failed:
+	if (devfreq_remove_device(kbdev->devfreq))
+		dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err);
+	else
+		kbdev->devfreq = NULL;
+add_device_failed:
+	kbase_devfreq_work_term(kbdev);
+
+	kbase_devfreq_term_freq_table(kbdev);
+
+	return err;
+}
+
+void kbase_devfreq_term(struct kbase_device *kbdev)
+{
+	int err;
+
+	dev_dbg(kbdev->dev, "Term Mali devfreq\n");
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+	if (kbdev->devfreq_cooling)
+		devfreq_cooling_unregister(kbdev->devfreq_cooling);
+
+	kbase_ipa_term(kbdev);
+#endif
+
+	devfreq_unregister_opp_notifier(kbdev->dev, kbdev->devfreq);
+
+	err = devfreq_remove_device(kbdev->devfreq);
+	if (err)
+		dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err);
+	else
+		kbdev->devfreq = NULL;
+
+	kbase_devfreq_term_core_mask_table(kbdev);
+
+	kbase_devfreq_work_term(kbdev);
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.h
new file mode 100644
index 0000000..8c976b2
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.h
@@ -0,0 +1,46 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _BASE_DEVFREQ_H_
+#define _BASE_DEVFREQ_H_
+
+int kbase_devfreq_init(struct kbase_device *kbdev);
+
+void kbase_devfreq_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_devfreq_force_freq - Set GPU frequency on L2 power on/off.
+ * @kbdev:      Device pointer
+ * @freq:       GPU frequency in HZ to be set when
+ *              MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE is enabled
+ */
+void kbase_devfreq_force_freq(struct kbase_device *kbdev, unsigned long freq);
+
+/**
+ * kbase_devfreq_enqueue_work - Enqueue a work item for suspend/resume devfreq.
+ * @kbdev:      Device pointer
+ * @work_type:  The type of the devfreq work item, i.e. suspend or resume
+ */
+void kbase_devfreq_enqueue_work(struct kbase_device *kbdev,
+				enum kbase_devfreq_work_type work_type);
+
+#endif /* _BASE_DEVFREQ_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c
new file mode 100644
index 0000000..c470a97
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c
@@ -0,0 +1,369 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016, 2018-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ *
+ */
+#include <mali_kbase.h>
+#include <backend/gpu/mali_kbase_instr_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_mmu_hw_direct.h>
+#include <mali_kbase_reset_gpu.h>
+
+#if !defined(CONFIG_MALI_NO_MALI)
+
+
+#ifdef CONFIG_DEBUG_FS
+
+int kbase_io_history_resize(struct kbase_io_history *h, u16 new_size)
+{
+	struct kbase_io_access *old_buf;
+	struct kbase_io_access *new_buf;
+	unsigned long flags;
+
+	if (!new_size)
+		goto out_err; /* The new size must not be 0 */
+
+	new_buf = vmalloc(new_size * sizeof(*h->buf));
+	if (!new_buf)
+		goto out_err;
+
+	spin_lock_irqsave(&h->lock, flags);
+
+	old_buf = h->buf;
+
+	/* Note: we won't bother with copying the old data over. The dumping
+	 * logic wouldn't work properly as it relies on 'count' both as a
+	 * counter and as an index to the buffer which would have changed with
+	 * the new array. This is a corner case that we don't need to support.
+	 */
+	h->count = 0;
+	h->size = new_size;
+	h->buf = new_buf;
+
+	spin_unlock_irqrestore(&h->lock, flags);
+
+	vfree(old_buf);
+
+	return 0;
+
+out_err:
+	return -1;
+}
+
+
+int kbase_io_history_init(struct kbase_io_history *h, u16 n)
+{
+	h->enabled = false;
+	spin_lock_init(&h->lock);
+	h->count = 0;
+	h->size = 0;
+	h->buf = NULL;
+	if (kbase_io_history_resize(h, n))
+		return -1;
+
+	return 0;
+}
+
+
+void kbase_io_history_term(struct kbase_io_history *h)
+{
+	vfree(h->buf);
+	h->buf = NULL;
+}
+
+
+/* kbase_io_history_add - add new entry to the register access history
+ *
+ * @h: Pointer to the history data structure
+ * @addr: Register address
+ * @value: The value that is either read from or written to the register
+ * @write: 1 if it's a register write, 0 if it's a read
+ */
+static void kbase_io_history_add(struct kbase_io_history *h,
+		void __iomem const *addr, u32 value, u8 write)
+{
+	struct kbase_io_access *io;
+	unsigned long flags;
+
+	spin_lock_irqsave(&h->lock, flags);
+
+	io = &h->buf[h->count % h->size];
+	io->addr = (uintptr_t)addr | write;
+	io->value = value;
+	++h->count;
+	/* If count overflows, move the index by the buffer size so the entire
+	 * buffer will still be dumped later */
+	if (unlikely(!h->count))
+		h->count = h->size;
+
+	spin_unlock_irqrestore(&h->lock, flags);
+}
+
+
+void kbase_io_history_dump(struct kbase_device *kbdev)
+{
+	struct kbase_io_history *const h = &kbdev->io_history;
+	u16 i;
+	size_t iters;
+	unsigned long flags;
+
+	if (!unlikely(h->enabled))
+		return;
+
+	spin_lock_irqsave(&h->lock, flags);
+
+	dev_err(kbdev->dev, "Register IO History:");
+	iters = (h->size > h->count) ? h->count : h->size;
+	dev_err(kbdev->dev, "Last %zu register accesses of %zu total:\n", iters,
+			h->count);
+	for (i = 0; i < iters; ++i) {
+		struct kbase_io_access *io =
+			&h->buf[(h->count - iters + i) % h->size];
+		char const access = (io->addr & 1) ? 'w' : 'r';
+
+		dev_err(kbdev->dev, "%6i: %c: reg 0x%p val %08x\n", i, access,
+				(void *)(io->addr & ~0x1), io->value);
+	}
+
+	spin_unlock_irqrestore(&h->lock, flags);
+}
+
+
+#endif /* CONFIG_DEBUG_FS */
+
+
+void kbase_reg_write(struct kbase_device *kbdev, u32 offset, u32 value)
+{
+	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+	KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
+
+	writel(value, kbdev->reg + offset);
+
+#ifdef CONFIG_DEBUG_FS
+	if (unlikely(kbdev->io_history.enabled))
+		kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset,
+				value, 1);
+#endif /* CONFIG_DEBUG_FS */
+	dev_dbg(kbdev->dev, "w: reg %08x val %08x", offset, value);
+}
+
+KBASE_EXPORT_TEST_API(kbase_reg_write);
+
+u32 kbase_reg_read(struct kbase_device *kbdev, u32 offset)
+{
+	u32 val;
+	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+	KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
+
+	val = readl(kbdev->reg + offset);
+
+#ifdef CONFIG_DEBUG_FS
+	if (unlikely(kbdev->io_history.enabled))
+		kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset,
+				val, 0);
+#endif /* CONFIG_DEBUG_FS */
+	dev_dbg(kbdev->dev, "r: reg %08x val %08x", offset, val);
+
+	return val;
+}
+
+KBASE_EXPORT_TEST_API(kbase_reg_read);
+#endif /* !defined(CONFIG_MALI_NO_MALI) */
+
+/**
+ * kbase_report_gpu_fault - Report a GPU fault.
+ * @kbdev:    Kbase device pointer
+ * @multiple: Zero if only GPU_FAULT was raised, non-zero if MULTIPLE_GPU_FAULTS
+ *            was also set
+ *
+ * This function is called from the interrupt handler when a GPU fault occurs.
+ * It reports the details of the fault using dev_warn().
+ */
+static void kbase_report_gpu_fault(struct kbase_device *kbdev, int multiple)
+{
+	u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+	u32 status = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(GPU_FAULTSTATUS));
+	u64 address = (u64) kbase_reg_read(kbdev,
+			GPU_CONTROL_REG(GPU_FAULTADDRESS_HI)) << 32;
+
+	address |= kbase_reg_read(kbdev,
+			GPU_CONTROL_REG(GPU_FAULTADDRESS_LO));
+
+	if ((gpu_id & GPU_ID2_PRODUCT_MODEL) != GPU_ID2_PRODUCT_TULX) {
+		dev_warn(kbdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx",
+			status,
+			kbase_exception_name(kbdev, status & 0xFF),
+			address);
+		if (multiple)
+			dev_warn(kbdev->dev, "There were multiple GPU faults - some have not been reported\n");
+	}
+}
+
+static bool kbase_gpu_fault_interrupt(struct kbase_device *kbdev, int multiple)
+{
+	kbase_report_gpu_fault(kbdev, multiple);
+	return false;
+}
+
+void kbase_gpu_start_cache_clean_nolock(struct kbase_device *kbdev)
+{
+	u32 irq_mask;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (kbdev->cache_clean_in_progress) {
+		/* If this is called while another clean is in progress, we
+		 * can't rely on the current one to flush any new changes in
+		 * the cache. Instead, trigger another cache clean immediately
+		 * after this one finishes.
+		 */
+		kbdev->cache_clean_queued = true;
+		return;
+	}
+
+	/* Enable interrupt */
+	irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+				irq_mask | CLEAN_CACHES_COMPLETED);
+
+	KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+					GPU_COMMAND_CLEAN_INV_CACHES);
+
+	kbdev->cache_clean_in_progress = true;
+}
+
+void kbase_gpu_start_cache_clean(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_gpu_start_cache_clean_nolock(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+void kbase_gpu_cache_clean_wait_complete(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	kbdev->cache_clean_queued = false;
+	kbdev->cache_clean_in_progress = false;
+	wake_up(&kbdev->cache_clean_wait);
+}
+
+static void kbase_clean_caches_done(struct kbase_device *kbdev)
+{
+	u32 irq_mask;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (kbdev->cache_clean_queued) {
+		kbdev->cache_clean_queued = false;
+
+		KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0);
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+				GPU_COMMAND_CLEAN_INV_CACHES);
+	} else {
+		/* Disable interrupt */
+		irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+				irq_mask & ~CLEAN_CACHES_COMPLETED);
+
+		kbase_gpu_cache_clean_wait_complete(kbdev);
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+void kbase_gpu_wait_cache_clean(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	while (kbdev->cache_clean_in_progress) {
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		wait_event_interruptible(kbdev->cache_clean_wait,
+				!kbdev->cache_clean_in_progress);
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	}
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val)
+{
+	bool clear_gpu_fault = false;
+
+	KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ, NULL, NULL, 0u, val);
+	if (val & GPU_FAULT)
+		clear_gpu_fault = kbase_gpu_fault_interrupt(kbdev,
+					val & MULTIPLE_GPU_FAULTS);
+
+	if (val & RESET_COMPLETED)
+		kbase_pm_reset_done(kbdev);
+
+	if (val & PRFCNT_SAMPLE_COMPLETED)
+		kbase_instr_hwcnt_sample_done(kbdev);
+
+	KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u, val);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val);
+
+	/* kbase_pm_check_transitions (called by kbase_pm_power_changed) must
+	 * be called after the IRQ has been cleared. This is because it might
+	 * trigger further power transitions and we don't want to miss the
+	 * interrupt raised to notify us that these further transitions have
+	 * finished. The same applies to kbase_clean_caches_done() - if another
+	 * clean was queued, it might trigger another clean, which might
+	 * generate another interrupt which shouldn't be missed.
+	 */
+
+	if (val & CLEAN_CACHES_COMPLETED)
+		kbase_clean_caches_done(kbdev);
+
+	if (val & POWER_CHANGED_ALL) {
+		kbase_pm_power_changed(kbdev);
+	} else if (val & CLEAN_CACHES_COMPLETED) {
+		/* When 'platform_power_down_only' is enabled, the L2 cache is
+		 * not powered down, but flushed before the GPU power down
+		 * (which is done by the platform code). So the L2 state machine
+		 * requests a cache flush. And when that flush completes, the L2
+		 * state machine needs to be re-invoked to proceed with the GPU
+		 * power down.
+		 * If cache line evict messages can be lost when shader cores
+		 * power down then we need to flush the L2 cache before powering
+		 * down cores. When the flush completes, the shaders' state
+		 * machine needs to be re-invoked to proceed with powering down
+		 * cores.
+		 */
+		if (platform_power_down_only ||
+				kbdev->pm.backend.l2_always_on ||
+				kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TTRX_921))
+			kbase_pm_power_changed(kbdev);
+	}
+
+
+	KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_DONE, NULL, NULL, 0u, val);
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h
new file mode 100644
index 0000000..c62f1e5
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h
@@ -0,0 +1,99 @@
+/*
+ *
+ * (C) COPYRIGHT 2014,2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Backend-specific HW access device APIs
+ */
+
+#ifndef _KBASE_DEVICE_INTERNAL_H_
+#define _KBASE_DEVICE_INTERNAL_H_
+
+/**
+ * kbase_reg_write - write to GPU register
+ * @kbdev:  Kbase device pointer
+ * @offset: Offset of register
+ * @value:  Value to write
+ *
+ * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false).
+ */
+void kbase_reg_write(struct kbase_device *kbdev, u32 offset, u32 value);
+
+/**
+ * kbase_reg_read - read from GPU register
+ * @kbdev:  Kbase device pointer
+ * @offset: Offset of register
+ *
+ * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false).
+ *
+ * Return: Value in desired register
+ */
+u32 kbase_reg_read(struct kbase_device *kbdev, u32 offset);
+
+/**
+ * kbase_gpu_start_cache_clean - Start a cache clean
+ * @kbdev: Kbase device
+ *
+ * Issue a cache clean and invalidate command to hardware. This function will
+ * take hwaccess_lock.
+ */
+void kbase_gpu_start_cache_clean(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpu_start_cache_clean_nolock - Start a cache clean
+ * @kbdev: Kbase device
+ *
+ * Issue a cache clean and invalidate command to hardware. hwaccess_lock
+ * must be held by the caller.
+ */
+void kbase_gpu_start_cache_clean_nolock(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpu_wait_cache_clean - Wait for cache cleaning to finish
+ * @kbdev: Kbase device
+ *
+ * This function will take hwaccess_lock, and may sleep.
+ */
+void kbase_gpu_wait_cache_clean(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpu_cache_clean_wait_complete - Called after the cache cleaning is
+ *                                       finished. Would also be called after
+ *                                       the GPU reset.
+ * @kbdev: Kbase device
+ *
+ * Caller must hold the hwaccess_lock.
+ */
+void kbase_gpu_cache_clean_wait_complete(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpu_interrupt - GPU interrupt handler
+ * @kbdev: Kbase device pointer
+ * @val:   The value of the GPU IRQ status register which triggered the call
+ *
+ * This function is called from the interrupt handler when a GPU irq is to be
+ * handled.
+ */
+void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val);
+
+#endif /* _KBASE_DEVICE_INTERNAL_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpu.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpu.c
new file mode 100644
index 0000000..9745df63
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpu.c
@@ -0,0 +1,162 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * Register-based HW access backend APIs
+ */
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_backend.h>
+#include <mali_kbase_reset_gpu.h>
+#include <backend/gpu/mali_kbase_irq_internal.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+#include <backend/gpu/mali_kbase_js_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+int kbase_backend_early_init(struct kbase_device *kbdev)
+{
+	int err;
+
+	err = kbasep_platform_device_init(kbdev);
+	if (err)
+		return err;
+
+	err = kbase_pm_runtime_init(kbdev);
+	if (err)
+		goto fail_runtime_pm;
+
+	/* Ensure we can access the GPU registers */
+	kbase_pm_register_access_enable(kbdev);
+
+	/* Find out GPU properties based on the GPU feature registers */
+	kbase_gpuprops_set(kbdev);
+
+	/* We're done accessing the GPU registers for now. */
+	kbase_pm_register_access_disable(kbdev);
+
+	err = kbase_install_interrupts(kbdev);
+	if (err)
+		goto fail_interrupts;
+
+	return 0;
+
+fail_interrupts:
+	kbase_pm_runtime_term(kbdev);
+fail_runtime_pm:
+	kbasep_platform_device_term(kbdev);
+
+	return err;
+}
+
+void kbase_backend_early_term(struct kbase_device *kbdev)
+{
+	kbase_release_interrupts(kbdev);
+	kbase_pm_runtime_term(kbdev);
+	kbasep_platform_device_term(kbdev);
+}
+
+int kbase_backend_late_init(struct kbase_device *kbdev)
+{
+	int err;
+
+	err = kbase_hwaccess_pm_init(kbdev);
+	if (err)
+		return err;
+
+	err = kbase_reset_gpu_init(kbdev);
+	if (err)
+		goto fail_reset_gpu_init;
+
+	err = kbase_hwaccess_pm_powerup(kbdev, PM_HW_ISSUES_DETECT);
+	if (err)
+		goto fail_pm_powerup;
+
+	err = kbase_backend_timer_init(kbdev);
+	if (err)
+		goto fail_timer;
+
+#ifdef CONFIG_MALI_DEBUG
+#ifndef CONFIG_MALI_NO_MALI
+	if (kbasep_common_test_interrupt_handlers(kbdev) != 0) {
+		dev_err(kbdev->dev, "Interrupt assigment check failed.\n");
+		err = -EINVAL;
+		goto fail_interrupt_test;
+	}
+#endif /* !CONFIG_MALI_NO_MALI */
+#endif /* CONFIG_MALI_DEBUG */
+
+	err = kbase_job_slot_init(kbdev);
+	if (err)
+		goto fail_job_slot;
+
+	/* Do the initialisation of devfreq.
+	 * Devfreq needs backend_timer_init() for completion of its
+	 * initialisation and it also needs to catch the first callback
+	 * occurence of the runtime_suspend event for maintaining state
+	 * coherence with the backend power management, hence needs to be
+	 * placed before the kbase_pm_context_idle().
+	 */
+	err = kbase_backend_devfreq_init(kbdev);
+	if (err)
+		goto fail_devfreq_init;
+
+	/* Idle the GPU and/or cores, if the policy wants it to */
+	kbase_pm_context_idle(kbdev);
+
+	/* Update gpuprops with L2_FEATURES if applicable */
+	kbase_gpuprops_update_l2_features(kbdev);
+
+	init_waitqueue_head(&kbdev->hwaccess.backend.reset_wait);
+
+	return 0;
+
+fail_devfreq_init:
+	kbase_job_slot_term(kbdev);
+fail_job_slot:
+
+#ifdef CONFIG_MALI_DEBUG
+#ifndef CONFIG_MALI_NO_MALI
+fail_interrupt_test:
+#endif /* !CONFIG_MALI_NO_MALI */
+#endif /* CONFIG_MALI_DEBUG */
+
+	kbase_backend_timer_term(kbdev);
+fail_timer:
+	kbase_hwaccess_pm_halt(kbdev);
+fail_pm_powerup:
+	kbase_reset_gpu_term(kbdev);
+fail_reset_gpu_init:
+	kbase_hwaccess_pm_term(kbdev);
+
+	return err;
+}
+
+void kbase_backend_late_term(struct kbase_device *kbdev)
+{
+	kbase_backend_devfreq_term(kbdev);
+	kbase_job_slot_halt(kbdev);
+	kbase_job_slot_term(kbdev);
+	kbase_backend_timer_term(kbdev);
+	kbase_hwaccess_pm_halt(kbdev);
+	kbase_reset_gpu_term(kbdev);
+	kbase_hwaccess_pm_term(kbdev);
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c
new file mode 100644
index 0000000..29018b2
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c
@@ -0,0 +1,125 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Base kernel property query backend APIs
+ */
+
+#include <mali_kbase.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include <mali_kbase_hwaccess_gpuprops.h>
+
+void kbase_backend_gpuprops_get(struct kbase_device *kbdev,
+					struct kbase_gpuprops_regdump *regdump)
+{
+	int i;
+
+	/* Fill regdump with the content of the relevant registers */
+	regdump->gpu_id = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_ID));
+
+	regdump->l2_features = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(L2_FEATURES));
+	regdump->core_features = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(CORE_FEATURES));
+	regdump->tiler_features = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(TILER_FEATURES));
+	regdump->mem_features = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(MEM_FEATURES));
+	regdump->mmu_features = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(MMU_FEATURES));
+	regdump->as_present = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(AS_PRESENT));
+	regdump->js_present = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(JS_PRESENT));
+
+	for (i = 0; i < GPU_MAX_JOB_SLOTS; i++)
+		regdump->js_features[i] = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(JS_FEATURES_REG(i)));
+
+	for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
+		regdump->texture_features[i] = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(TEXTURE_FEATURES_REG(i)));
+
+	regdump->thread_max_threads = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(THREAD_MAX_THREADS));
+	regdump->thread_max_workgroup_size = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(THREAD_MAX_WORKGROUP_SIZE));
+	regdump->thread_max_barrier_size = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(THREAD_MAX_BARRIER_SIZE));
+	regdump->thread_features = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(THREAD_FEATURES));
+	regdump->thread_tls_alloc = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(THREAD_TLS_ALLOC));
+
+	regdump->shader_present_lo = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(SHADER_PRESENT_LO));
+	regdump->shader_present_hi = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(SHADER_PRESENT_HI));
+
+	regdump->tiler_present_lo = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(TILER_PRESENT_LO));
+	regdump->tiler_present_hi = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(TILER_PRESENT_HI));
+
+	regdump->l2_present_lo = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(L2_PRESENT_LO));
+	regdump->l2_present_hi = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(L2_PRESENT_HI));
+
+	regdump->stack_present_lo = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(STACK_PRESENT_LO));
+	regdump->stack_present_hi = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(STACK_PRESENT_HI));
+}
+
+void kbase_backend_gpuprops_get_features(struct kbase_device *kbdev,
+					struct kbase_gpuprops_regdump *regdump)
+{
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_COHERENCY_REG)) {
+		/* Ensure we can access the GPU registers */
+		kbase_pm_register_access_enable(kbdev);
+
+		regdump->coherency_features = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(COHERENCY_FEATURES));
+
+		/* We're done accessing the GPU registers for now. */
+		kbase_pm_register_access_disable(kbdev);
+	} else {
+		/* Pre COHERENCY_FEATURES we only supported ACE_LITE */
+		regdump->coherency_features =
+				COHERENCY_FEATURE_BIT(COHERENCY_NONE) |
+				COHERENCY_FEATURE_BIT(COHERENCY_ACE_LITE);
+	}
+}
+
+void kbase_backend_gpuprops_get_l2_features(struct kbase_device *kbdev,
+					struct kbase_gpuprops_regdump *regdump)
+{
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_L2_CONFIG)) {
+		regdump->l2_features = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(L2_FEATURES));
+	}
+}
+
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c
new file mode 100644
index 0000000..1d18326
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c
@@ -0,0 +1,394 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * GPU backend instrumentation APIs.
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_hwaccess_instr.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_instr_internal.h>
+
+int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
+					struct kbase_context *kctx,
+					struct kbase_instr_hwcnt_enable *enable)
+{
+	unsigned long flags;
+	int err = -EINVAL;
+	u32 irq_mask;
+	u32 prfcnt_config;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/* alignment failure */
+	if ((enable->dump_buffer == 0ULL) || (enable->dump_buffer & (2048 - 1)))
+		goto out_err;
+
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+	if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_DISABLED) {
+		/* Instrumentation is already enabled */
+		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+		goto out_err;
+	}
+
+	/* Enable interrupt */
+	irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask |
+						PRFCNT_SAMPLE_COMPLETED);
+
+	/* In use, this context is the owner */
+	kbdev->hwcnt.kctx = kctx;
+	/* Remember the dump address so we can reprogram it later */
+	kbdev->hwcnt.addr = enable->dump_buffer;
+	kbdev->hwcnt.addr_bytes = enable->dump_buffer_bytes;
+
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+	/* Configure */
+	prfcnt_config = kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT;
+	if (enable->use_secondary)
+		prfcnt_config |= 1 << PRFCNT_CONFIG_SETSELECT_SHIFT;
+
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
+			prfcnt_config | PRFCNT_CONFIG_MODE_OFF);
+
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
+					enable->dump_buffer & 0xFFFFFFFF);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),
+					enable->dump_buffer >> 32);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN),
+					enable->jm_bm);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN),
+					enable->shader_bm);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN),
+					enable->mmu_l2_bm);
+	/* Due to PRLAM-8186 we need to disable the Tiler before we enable the
+	 * HW counter dump. */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), 0);
+	else
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
+							enable->tiler_bm);
+
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
+			prfcnt_config | PRFCNT_CONFIG_MODE_MANUAL);
+
+	/* If HW has PRLAM-8186 we can now re-enable the tiler HW counters dump
+	 */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
+							enable->tiler_bm);
+
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+	kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+	kbdev->hwcnt.backend.triggered = 1;
+	wake_up(&kbdev->hwcnt.backend.wait);
+
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+	err = 0;
+
+	dev_dbg(kbdev->dev, "HW counters dumping set-up for context %p", kctx);
+	return err;
+ out_err:
+	return err;
+}
+
+int kbase_instr_hwcnt_disable_internal(struct kbase_context *kctx)
+{
+	unsigned long flags, pm_flags;
+	int err = -EINVAL;
+	u32 irq_mask;
+	struct kbase_device *kbdev = kctx->kbdev;
+
+	while (1) {
+		spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
+		spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+		if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DISABLED) {
+			/* Instrumentation is not enabled */
+			spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+			goto out;
+		}
+
+		if (kbdev->hwcnt.kctx != kctx) {
+			/* Instrumentation has been setup for another context */
+			spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+			goto out;
+		}
+
+		if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_IDLE)
+			break;
+
+		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+
+		/* Ongoing dump/setup - wait for its completion */
+		wait_event(kbdev->hwcnt.backend.wait,
+					kbdev->hwcnt.backend.triggered != 0);
+	}
+
+	kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DISABLED;
+	kbdev->hwcnt.backend.triggered = 0;
+
+	/* Disable interrupt */
+	irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+				irq_mask & ~PRFCNT_SAMPLE_COMPLETED);
+
+	/* Disable the counters */
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), 0);
+
+	kbdev->hwcnt.kctx = NULL;
+	kbdev->hwcnt.addr = 0ULL;
+	kbdev->hwcnt.addr_bytes = 0ULL;
+
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+
+	dev_dbg(kbdev->dev, "HW counters dumping disabled for context %p",
+									kctx);
+
+	err = 0;
+
+ out:
+	return err;
+}
+
+int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx)
+{
+	unsigned long flags;
+	int err = -EINVAL;
+	struct kbase_device *kbdev = kctx->kbdev;
+
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+	if (kbdev->hwcnt.kctx != kctx) {
+		/* The instrumentation has been setup for another context */
+		goto unlock;
+	}
+
+	if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_IDLE) {
+		/* HW counters are disabled or another dump is ongoing, or we're
+		 * resetting */
+		goto unlock;
+	}
+
+	kbdev->hwcnt.backend.triggered = 0;
+
+	/* Mark that we're dumping - the PF handler can signal that we faulted
+	 */
+	kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DUMPING;
+
+	/* Reconfigure the dump address */
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
+					kbdev->hwcnt.addr & 0xFFFFFFFF);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),
+					kbdev->hwcnt.addr >> 32);
+
+	/* Start dumping */
+	KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_SAMPLE, NULL, NULL,
+					kbdev->hwcnt.addr, 0);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+					GPU_COMMAND_PRFCNT_SAMPLE);
+
+	dev_dbg(kbdev->dev, "HW counters dumping done for context %p", kctx);
+
+	err = 0;
+
+ unlock:
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+	return err;
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_request_dump);
+
+bool kbase_instr_hwcnt_dump_complete(struct kbase_context *kctx,
+						bool * const success)
+{
+	unsigned long flags;
+	bool complete = false;
+	struct kbase_device *kbdev = kctx->kbdev;
+
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+	if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_IDLE) {
+		*success = true;
+		complete = true;
+	} else if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
+		*success = false;
+		complete = true;
+		kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+	return complete;
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_dump_complete);
+
+void kbasep_cache_clean_worker(struct work_struct *data)
+{
+	struct kbase_device *kbdev;
+	unsigned long flags, pm_flags;
+
+	kbdev = container_of(data, struct kbase_device,
+						hwcnt.backend.cache_clean_work);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+	/* Clean and invalidate the caches so we're sure the mmu tables for the
+	 * dump buffer is valid.
+	 */
+	KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
+					KBASE_INSTR_STATE_REQUEST_CLEAN);
+	kbase_gpu_start_cache_clean_nolock(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+
+	kbase_gpu_wait_cache_clean(kbdev);
+
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+	KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
+					KBASE_INSTR_STATE_REQUEST_CLEAN);
+	/* All finished and idle */
+	kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+	kbdev->hwcnt.backend.triggered = 1;
+	wake_up(&kbdev->hwcnt.backend.wait);
+
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+}
+
+void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+	if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
+		kbdev->hwcnt.backend.triggered = 1;
+		wake_up(&kbdev->hwcnt.backend.wait);
+	} else if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DUMPING) {
+		if (kbdev->mmu_mode->flags & KBASE_MMU_MODE_HAS_NON_CACHEABLE) {
+			/* All finished and idle */
+			kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+			kbdev->hwcnt.backend.triggered = 1;
+			wake_up(&kbdev->hwcnt.backend.wait);
+		} else {
+			int ret;
+			/* Always clean and invalidate the cache after a successful dump
+			 */
+			kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_REQUEST_CLEAN;
+			ret = queue_work(kbdev->hwcnt.backend.cache_clean_wq,
+						&kbdev->hwcnt.backend.cache_clean_work);
+			KBASE_DEBUG_ASSERT(ret);
+		}
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+}
+
+int kbase_instr_hwcnt_wait_for_dump(struct kbase_context *kctx)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	unsigned long flags;
+	int err;
+
+	/* Wait for dump & cache clean to complete */
+	wait_event(kbdev->hwcnt.backend.wait,
+					kbdev->hwcnt.backend.triggered != 0);
+
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+	if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
+		err = -EINVAL;
+		kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+	} else {
+		/* Dump done */
+		KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
+							KBASE_INSTR_STATE_IDLE);
+		err = 0;
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+	return err;
+}
+
+int kbase_instr_hwcnt_clear(struct kbase_context *kctx)
+{
+	unsigned long flags;
+	int err = -EINVAL;
+	struct kbase_device *kbdev = kctx->kbdev;
+
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+	/* Check it's the context previously set up and we're not already
+	 * dumping */
+	if (kbdev->hwcnt.kctx != kctx || kbdev->hwcnt.backend.state !=
+							KBASE_INSTR_STATE_IDLE)
+		goto out;
+
+	/* Clear the counters */
+	KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_CLEAR, NULL, NULL, 0u, 0);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+						GPU_COMMAND_PRFCNT_CLEAR);
+
+	err = 0;
+
+out:
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+	return err;
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_clear);
+
+int kbase_instr_backend_init(struct kbase_device *kbdev)
+{
+	int ret = 0;
+
+	kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DISABLED;
+
+	init_waitqueue_head(&kbdev->hwcnt.backend.wait);
+	INIT_WORK(&kbdev->hwcnt.backend.cache_clean_work,
+						kbasep_cache_clean_worker);
+	kbdev->hwcnt.backend.triggered = 0;
+
+	kbdev->hwcnt.backend.cache_clean_wq =
+			alloc_workqueue("Mali cache cleaning workqueue", 0, 1);
+	if (NULL == kbdev->hwcnt.backend.cache_clean_wq)
+		ret = -EINVAL;
+
+	return ret;
+}
+
+void kbase_instr_backend_term(struct kbase_device *kbdev)
+{
+	destroy_workqueue(kbdev->hwcnt.backend.cache_clean_wq);
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h
new file mode 100644
index 0000000..c9fb759
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h
@@ -0,0 +1,57 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2016, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Backend-specific instrumentation definitions
+ */
+
+#ifndef _KBASE_INSTR_DEFS_H_
+#define _KBASE_INSTR_DEFS_H_
+
+/*
+ * Instrumentation State Machine States
+ */
+enum kbase_instr_state {
+	/* State where instrumentation is not active */
+	KBASE_INSTR_STATE_DISABLED = 0,
+	/* State machine is active and ready for a command. */
+	KBASE_INSTR_STATE_IDLE,
+	/* Hardware is currently dumping a frame. */
+	KBASE_INSTR_STATE_DUMPING,
+	/* We've requested a clean to occur on a workqueue */
+	KBASE_INSTR_STATE_REQUEST_CLEAN,
+	/* An error has occured during DUMPING (page fault). */
+	KBASE_INSTR_STATE_FAULT
+};
+
+/* Structure used for instrumentation and HW counters dumping */
+struct kbase_instr_backend {
+	wait_queue_head_t wait;
+	int triggered;
+
+	enum kbase_instr_state state;
+	struct workqueue_struct *cache_clean_wq;
+	struct work_struct  cache_clean_work;
+};
+
+#endif /* _KBASE_INSTR_DEFS_H_ */
+
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_internal.h
new file mode 100644
index 0000000..2254b9f
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_internal.h
@@ -0,0 +1,44 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Backend-specific HW access instrumentation APIs
+ */
+
+#ifndef _KBASE_INSTR_INTERNAL_H_
+#define _KBASE_INSTR_INTERNAL_H_
+
+/**
+ * kbasep_cache_clean_worker() - Workqueue for handling cache cleaning
+ * @data: a &struct work_struct
+ */
+void kbasep_cache_clean_worker(struct work_struct *data);
+
+/**
+ * kbase_instr_hwcnt_sample_done() - Dump complete interrupt received
+ * @kbdev: Kbase device
+ */
+void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev);
+
+#endif /* _KBASE_INSTR_INTERNAL_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_internal.h
new file mode 100644
index 0000000..ca3c048
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_internal.h
@@ -0,0 +1,44 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Backend specific IRQ APIs
+ */
+
+#ifndef _KBASE_IRQ_INTERNAL_H_
+#define _KBASE_IRQ_INTERNAL_H_
+
+int kbase_install_interrupts(struct kbase_device *kbdev);
+
+void kbase_release_interrupts(struct kbase_device *kbdev);
+
+/**
+ * kbase_synchronize_irqs - Ensure that all IRQ handlers have completed
+ *                          execution
+ * @kbdev: The kbase device
+ */
+void kbase_synchronize_irqs(struct kbase_device *kbdev);
+
+int kbasep_common_test_interrupt_handlers(
+					struct kbase_device * const kbdev);
+
+#endif /* _KBASE_IRQ_INTERNAL_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c
new file mode 100644
index 0000000..fa3d2cc
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c
@@ -0,0 +1,474 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016,2018-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_irq_internal.h>
+
+#include <linux/interrupt.h>
+
+#if !defined(CONFIG_MALI_NO_MALI)
+
+/* GPU IRQ Tags */
+#define	JOB_IRQ_TAG	0
+#define MMU_IRQ_TAG	1
+#define GPU_IRQ_TAG	2
+
+static void *kbase_tag(void *ptr, u32 tag)
+{
+	return (void *)(((uintptr_t) ptr) | tag);
+}
+
+static void *kbase_untag(void *ptr)
+{
+	return (void *)(((uintptr_t) ptr) & ~3);
+}
+
+static irqreturn_t kbase_job_irq_handler(int irq, void *data)
+{
+	unsigned long flags;
+	struct kbase_device *kbdev = kbase_untag(data);
+	u32 val;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (!kbdev->pm.backend.gpu_powered) {
+		/* GPU is turned off - IRQ is not for us */
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		return IRQ_NONE;
+	}
+
+	val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS));
+
+#ifdef CONFIG_MALI_DEBUG
+	if (!kbdev->pm.backend.driver_ready_for_irqs)
+		dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
+				__func__, irq, val);
+#endif /* CONFIG_MALI_DEBUG */
+
+	if (!val) {
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		return IRQ_NONE;
+	}
+
+	dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+	kbase_job_done(kbdev, val);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+KBASE_EXPORT_TEST_API(kbase_job_irq_handler);
+
+static irqreturn_t kbase_mmu_irq_handler(int irq, void *data)
+{
+	unsigned long flags;
+	struct kbase_device *kbdev = kbase_untag(data);
+	u32 val;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (!kbdev->pm.backend.gpu_powered) {
+		/* GPU is turned off - IRQ is not for us */
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		return IRQ_NONE;
+	}
+
+	atomic_inc(&kbdev->faults_pending);
+
+	val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS));
+
+#ifdef CONFIG_MALI_DEBUG
+	if (!kbdev->pm.backend.driver_ready_for_irqs)
+		dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
+				__func__, irq, val);
+#endif /* CONFIG_MALI_DEBUG */
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	if (!val) {
+		atomic_dec(&kbdev->faults_pending);
+		return IRQ_NONE;
+	}
+
+	dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+	kbase_mmu_interrupt(kbdev, val);
+
+	atomic_dec(&kbdev->faults_pending);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t kbase_gpu_irq_handler(int irq, void *data)
+{
+	unsigned long flags;
+	struct kbase_device *kbdev = kbase_untag(data);
+	u32 val;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (!kbdev->pm.backend.gpu_powered) {
+		/* GPU is turned off - IRQ is not for us */
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		return IRQ_NONE;
+	}
+
+	val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS));
+
+#ifdef CONFIG_MALI_DEBUG
+	if (!kbdev->pm.backend.driver_ready_for_irqs)
+		dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
+				__func__, irq, val);
+#endif /* CONFIG_MALI_DEBUG */
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	if (!val)
+		return IRQ_NONE;
+
+	dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+	kbase_gpu_interrupt(kbdev, val);
+
+	return IRQ_HANDLED;
+}
+
+KBASE_EXPORT_TEST_API(kbase_gpu_irq_handler);
+
+static irq_handler_t kbase_handler_table[] = {
+	[JOB_IRQ_TAG] = kbase_job_irq_handler,
+	[MMU_IRQ_TAG] = kbase_mmu_irq_handler,
+	[GPU_IRQ_TAG] = kbase_gpu_irq_handler,
+};
+
+#ifdef CONFIG_MALI_DEBUG
+#define  JOB_IRQ_HANDLER JOB_IRQ_TAG
+#define  MMU_IRQ_HANDLER MMU_IRQ_TAG
+#define  GPU_IRQ_HANDLER GPU_IRQ_TAG
+
+/**
+ * kbase_set_custom_irq_handler - Set a custom IRQ handler
+ * @kbdev: Device for which the handler is to be registered
+ * @custom_handler: Handler to be registered
+ * @irq_type: Interrupt type
+ *
+ * Registers given interrupt handler for requested interrupt type
+ * In the case where irq handler is not specified, the default handler shall be
+ * registered
+ *
+ * Return: 0 case success, error code otherwise
+ */
+int kbase_set_custom_irq_handler(struct kbase_device *kbdev,
+					irq_handler_t custom_handler,
+					int irq_type)
+{
+	int result = 0;
+	irq_handler_t requested_irq_handler = NULL;
+
+	KBASE_DEBUG_ASSERT((JOB_IRQ_HANDLER <= irq_type) &&
+						(GPU_IRQ_HANDLER >= irq_type));
+
+	/* Release previous handler */
+	if (kbdev->irqs[irq_type].irq)
+		free_irq(kbdev->irqs[irq_type].irq, kbase_tag(kbdev, irq_type));
+
+	requested_irq_handler = (NULL != custom_handler) ? custom_handler :
+						kbase_handler_table[irq_type];
+
+	if (0 != request_irq(kbdev->irqs[irq_type].irq,
+			requested_irq_handler,
+			kbdev->irqs[irq_type].flags | IRQF_SHARED,
+			dev_name(kbdev->dev), kbase_tag(kbdev, irq_type))) {
+		result = -EINVAL;
+		dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n",
+					kbdev->irqs[irq_type].irq, irq_type);
+#ifdef CONFIG_SPARSE_IRQ
+		dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
+#endif /* CONFIG_SPARSE_IRQ */
+	}
+
+	return result;
+}
+
+KBASE_EXPORT_TEST_API(kbase_set_custom_irq_handler);
+
+/* test correct interrupt assigment and reception by cpu */
+struct kbasep_irq_test {
+	struct hrtimer timer;
+	wait_queue_head_t wait;
+	int triggered;
+	u32 timeout;
+};
+
+static struct kbasep_irq_test kbasep_irq_test_data;
+
+#define IRQ_TEST_TIMEOUT    500
+
+static irqreturn_t kbase_job_irq_test_handler(int irq, void *data)
+{
+	unsigned long flags;
+	struct kbase_device *kbdev = kbase_untag(data);
+	u32 val;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (!kbdev->pm.backend.gpu_powered) {
+		/* GPU is turned off - IRQ is not for us */
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		return IRQ_NONE;
+	}
+
+	val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS));
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	if (!val)
+		return IRQ_NONE;
+
+	dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+	kbasep_irq_test_data.triggered = 1;
+	wake_up(&kbasep_irq_test_data.wait);
+
+	kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), val);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t kbase_mmu_irq_test_handler(int irq, void *data)
+{
+	unsigned long flags;
+	struct kbase_device *kbdev = kbase_untag(data);
+	u32 val;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (!kbdev->pm.backend.gpu_powered) {
+		/* GPU is turned off - IRQ is not for us */
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		return IRQ_NONE;
+	}
+
+	val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS));
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	if (!val)
+		return IRQ_NONE;
+
+	dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+	kbasep_irq_test_data.triggered = 1;
+	wake_up(&kbasep_irq_test_data.wait);
+
+	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), val);
+
+	return IRQ_HANDLED;
+}
+
+static enum hrtimer_restart kbasep_test_interrupt_timeout(struct hrtimer *timer)
+{
+	struct kbasep_irq_test *test_data = container_of(timer,
+						struct kbasep_irq_test, timer);
+
+	test_data->timeout = 1;
+	test_data->triggered = 1;
+	wake_up(&test_data->wait);
+	return HRTIMER_NORESTART;
+}
+
+static int kbasep_common_test_interrupt(
+				struct kbase_device * const kbdev, u32 tag)
+{
+	int err = 0;
+	irq_handler_t test_handler;
+
+	u32 old_mask_val;
+	u16 mask_offset;
+	u16 rawstat_offset;
+
+	switch (tag) {
+	case JOB_IRQ_TAG:
+		test_handler = kbase_job_irq_test_handler;
+		rawstat_offset = JOB_CONTROL_REG(JOB_IRQ_RAWSTAT);
+		mask_offset = JOB_CONTROL_REG(JOB_IRQ_MASK);
+		break;
+	case MMU_IRQ_TAG:
+		test_handler = kbase_mmu_irq_test_handler;
+		rawstat_offset = MMU_REG(MMU_IRQ_RAWSTAT);
+		mask_offset = MMU_REG(MMU_IRQ_MASK);
+		break;
+	case GPU_IRQ_TAG:
+		/* already tested by pm_driver - bail out */
+	default:
+		return 0;
+	}
+
+	/* store old mask */
+	old_mask_val = kbase_reg_read(kbdev, mask_offset);
+	/* mask interrupts */
+	kbase_reg_write(kbdev, mask_offset, 0x0);
+
+	if (kbdev->irqs[tag].irq) {
+		/* release original handler and install test handler */
+		if (kbase_set_custom_irq_handler(kbdev, test_handler, tag) != 0) {
+			err = -EINVAL;
+		} else {
+			kbasep_irq_test_data.timeout = 0;
+			hrtimer_init(&kbasep_irq_test_data.timer,
+					CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+			kbasep_irq_test_data.timer.function =
+						kbasep_test_interrupt_timeout;
+
+			/* trigger interrupt */
+			kbase_reg_write(kbdev, mask_offset, 0x1);
+			kbase_reg_write(kbdev, rawstat_offset, 0x1);
+
+			hrtimer_start(&kbasep_irq_test_data.timer,
+					HR_TIMER_DELAY_MSEC(IRQ_TEST_TIMEOUT),
+					HRTIMER_MODE_REL);
+
+			wait_event(kbasep_irq_test_data.wait,
+					kbasep_irq_test_data.triggered != 0);
+
+			if (kbasep_irq_test_data.timeout != 0) {
+				dev_err(kbdev->dev, "Interrupt %d (index %d) didn't reach CPU.\n",
+						kbdev->irqs[tag].irq, tag);
+				err = -EINVAL;
+			} else {
+				dev_dbg(kbdev->dev, "Interrupt %d (index %d) reached CPU.\n",
+						kbdev->irqs[tag].irq, tag);
+			}
+
+			hrtimer_cancel(&kbasep_irq_test_data.timer);
+			kbasep_irq_test_data.triggered = 0;
+
+			/* mask interrupts */
+			kbase_reg_write(kbdev, mask_offset, 0x0);
+
+			/* release test handler */
+			free_irq(kbdev->irqs[tag].irq, kbase_tag(kbdev, tag));
+		}
+
+		/* restore original interrupt */
+		if (request_irq(kbdev->irqs[tag].irq, kbase_handler_table[tag],
+				kbdev->irqs[tag].flags | IRQF_SHARED,
+				dev_name(kbdev->dev), kbase_tag(kbdev, tag))) {
+			dev_err(kbdev->dev, "Can't restore original interrupt %d (index %d)\n",
+						kbdev->irqs[tag].irq, tag);
+			err = -EINVAL;
+		}
+	}
+	/* restore old mask */
+	kbase_reg_write(kbdev, mask_offset, old_mask_val);
+
+	return err;
+}
+
+int kbasep_common_test_interrupt_handlers(
+					struct kbase_device * const kbdev)
+{
+	int err;
+
+	init_waitqueue_head(&kbasep_irq_test_data.wait);
+	kbasep_irq_test_data.triggered = 0;
+
+	/* A suspend won't happen during startup/insmod */
+	kbase_pm_context_active(kbdev);
+
+	err = kbasep_common_test_interrupt(kbdev, JOB_IRQ_TAG);
+	if (err) {
+		dev_err(kbdev->dev, "Interrupt JOB_IRQ didn't reach CPU. Check interrupt assignments.\n");
+		goto out;
+	}
+
+	err = kbasep_common_test_interrupt(kbdev, MMU_IRQ_TAG);
+	if (err) {
+		dev_err(kbdev->dev, "Interrupt MMU_IRQ didn't reach CPU. Check interrupt assignments.\n");
+		goto out;
+	}
+
+	dev_dbg(kbdev->dev, "Interrupts are correctly assigned.\n");
+
+ out:
+	kbase_pm_context_idle(kbdev);
+
+	return err;
+}
+#endif /* CONFIG_MALI_DEBUG */
+
+int kbase_install_interrupts(struct kbase_device *kbdev)
+{
+	u32 nr = ARRAY_SIZE(kbase_handler_table);
+	int err;
+	u32 i;
+
+	for (i = 0; i < nr; i++) {
+		err = request_irq(kbdev->irqs[i].irq, kbase_handler_table[i],
+				kbdev->irqs[i].flags | IRQF_SHARED,
+				dev_name(kbdev->dev),
+				kbase_tag(kbdev, i));
+		if (err) {
+			dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n",
+							kbdev->irqs[i].irq, i);
+#ifdef CONFIG_SPARSE_IRQ
+			dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
+#endif /* CONFIG_SPARSE_IRQ */
+			goto release;
+		}
+	}
+
+	return 0;
+
+ release:
+	while (i-- > 0)
+		free_irq(kbdev->irqs[i].irq, kbase_tag(kbdev, i));
+
+	return err;
+}
+
+void kbase_release_interrupts(struct kbase_device *kbdev)
+{
+	u32 nr = ARRAY_SIZE(kbase_handler_table);
+	u32 i;
+
+	for (i = 0; i < nr; i++) {
+		if (kbdev->irqs[i].irq)
+			free_irq(kbdev->irqs[i].irq, kbase_tag(kbdev, i));
+	}
+}
+
+void kbase_synchronize_irqs(struct kbase_device *kbdev)
+{
+	u32 nr = ARRAY_SIZE(kbase_handler_table);
+	u32 i;
+
+	for (i = 0; i < nr; i++) {
+		if (kbdev->irqs[i].irq)
+			synchronize_irq(kbdev->irqs[i].irq);
+	}
+}
+
+KBASE_EXPORT_TEST_API(kbase_synchronize_irqs);
+
+#endif /* !defined(CONFIG_MALI_NO_MALI) */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c
new file mode 100644
index 0000000..c8153ba
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c
@@ -0,0 +1,244 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * Register backend context / address space management
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_ctx_sched.h>
+
+/**
+ * assign_and_activate_kctx_addr_space - Assign an AS to a context
+ * @kbdev: Kbase device
+ * @kctx: Kbase context
+ * @current_as: Address Space to assign
+ *
+ * Assign an Address Space (AS) to a context, and add the context to the Policy.
+ *
+ * This includes
+ *   setting up the global runpool_irq structure and the context on the AS,
+ *   Activating the MMU on the AS,
+ *   Allowing jobs to be submitted on the AS.
+ *
+ * Context:
+ *   kbasep_js_kctx_info.jsctx_mutex held,
+ *   kbasep_js_device_data.runpool_mutex held,
+ *   AS transaction mutex held,
+ *   Runpool IRQ lock held
+ */
+static void assign_and_activate_kctx_addr_space(struct kbase_device *kbdev,
+						struct kbase_context *kctx,
+						struct kbase_as *current_as)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+	lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+	lockdep_assert_held(&js_devdata->runpool_mutex);
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/* Attribute handling */
+	kbasep_js_ctx_attr_runpool_retain_ctx(kbdev, kctx);
+
+	/* Allow it to run jobs */
+	kbasep_js_set_submit_allowed(js_devdata, kctx);
+
+	kbase_js_runpool_inc_context_count(kbdev, kctx);
+}
+
+bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev,
+						struct kbase_context *kctx,
+						int js)
+{
+	int i;
+
+	if (kbdev->hwaccess.active_kctx[js] == kctx) {
+		/* Context is already active */
+		return true;
+	}
+
+	for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+		if (kbdev->as_to_kctx[i] == kctx) {
+			/* Context already has ASID - mark as active */
+			return true;
+		}
+	}
+
+	/* Context does not have address space assigned */
+	return false;
+}
+
+void kbase_backend_release_ctx_irq(struct kbase_device *kbdev,
+						struct kbase_context *kctx)
+{
+	int as_nr = kctx->as_nr;
+
+	if (as_nr == KBASEP_AS_NR_INVALID) {
+		WARN(1, "Attempting to release context without ASID\n");
+		return;
+	}
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (atomic_read(&kctx->refcount) != 1) {
+		WARN(1, "Attempting to release active ASID\n");
+		return;
+	}
+
+	kbasep_js_clear_submit_allowed(&kbdev->js_data, kctx);
+
+	kbase_ctx_sched_release_ctx(kctx);
+	kbase_js_runpool_dec_context_count(kbdev, kctx);
+}
+
+void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev,
+						struct kbase_context *kctx)
+{
+}
+
+int kbase_backend_find_and_release_free_address_space(
+		struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	unsigned long flags;
+	int i;
+
+	js_devdata = &kbdev->js_data;
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+	mutex_lock(&js_devdata->runpool_mutex);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+		struct kbasep_js_kctx_info *as_js_kctx_info;
+		struct kbase_context *as_kctx;
+
+		as_kctx = kbdev->as_to_kctx[i];
+		as_js_kctx_info = &as_kctx->jctx.sched_info;
+
+		/* Don't release privileged or active contexts, or contexts with
+		 * jobs running.
+		 * Note that a context will have at least 1 reference (which
+		 * was previously taken by kbasep_js_schedule_ctx()) until
+		 * descheduled.
+		 */
+		if (as_kctx && !kbase_ctx_flag(as_kctx, KCTX_PRIVILEGED) &&
+			atomic_read(&as_kctx->refcount) == 1) {
+			if (!kbasep_js_runpool_retain_ctx_nolock(kbdev,
+								as_kctx)) {
+				WARN(1, "Failed to retain active context\n");
+
+				spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+						flags);
+				mutex_unlock(&js_devdata->runpool_mutex);
+				mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+				return KBASEP_AS_NR_INVALID;
+			}
+
+			kbasep_js_clear_submit_allowed(js_devdata, as_kctx);
+
+			/* Drop and retake locks to take the jsctx_mutex on the
+			 * context we're about to release without violating lock
+			 * ordering
+			 */
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+			mutex_unlock(&js_devdata->runpool_mutex);
+			mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+
+			/* Release context from address space */
+			mutex_lock(&as_js_kctx_info->ctx.jsctx_mutex);
+			mutex_lock(&js_devdata->runpool_mutex);
+
+			kbasep_js_runpool_release_ctx_nolock(kbdev, as_kctx);
+
+			if (!kbase_ctx_flag(as_kctx, KCTX_SCHEDULED)) {
+				kbasep_js_runpool_requeue_or_kill_ctx(kbdev,
+								as_kctx,
+								true);
+
+				mutex_unlock(&js_devdata->runpool_mutex);
+				mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex);
+
+				return i;
+			}
+
+			/* Context was retained while locks were dropped,
+			 * continue looking for free AS */
+
+			mutex_unlock(&js_devdata->runpool_mutex);
+			mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex);
+
+			mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+			mutex_lock(&js_devdata->runpool_mutex);
+			spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		}
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	mutex_unlock(&js_devdata->runpool_mutex);
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+	return KBASEP_AS_NR_INVALID;
+}
+
+bool kbase_backend_use_ctx(struct kbase_device *kbdev,
+				struct kbase_context *kctx,
+				int as_nr)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbase_as *new_address_space = NULL;
+	int js;
+
+	js_devdata = &kbdev->js_data;
+
+	for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
+		if (kbdev->hwaccess.active_kctx[js] == kctx) {
+			WARN(1, "Context is already scheduled in\n");
+			return false;
+		}
+	}
+
+	new_address_space = &kbdev->as[as_nr];
+
+	lockdep_assert_held(&js_devdata->runpool_mutex);
+	lockdep_assert_held(&kbdev->mmu_hw_mutex);
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	assign_and_activate_kctx_addr_space(kbdev, kctx, new_address_space);
+
+	if (kbase_ctx_flag(kctx, KCTX_PRIVILEGED)) {
+		/* We need to retain it to keep the corresponding address space
+		 */
+		kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+	}
+
+	return true;
+}
+
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h
new file mode 100644
index 0000000..b4d2ae1
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h
@@ -0,0 +1,116 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * Register-based HW access backend specific definitions
+ */
+
+#ifndef _KBASE_HWACCESS_GPU_DEFS_H_
+#define _KBASE_HWACCESS_GPU_DEFS_H_
+
+/* SLOT_RB_SIZE must be < 256 */
+#define SLOT_RB_SIZE 2
+#define SLOT_RB_MASK (SLOT_RB_SIZE - 1)
+
+/**
+ * struct rb_entry - Ringbuffer entry
+ * @katom:	Atom associated with this entry
+ */
+struct rb_entry {
+	struct kbase_jd_atom *katom;
+};
+
+/**
+ * struct slot_rb - Slot ringbuffer
+ * @entries:		Ringbuffer entries
+ * @last_context:	The last context to submit a job on this slot
+ * @read_idx:		Current read index of buffer
+ * @write_idx:		Current write index of buffer
+ * @job_chain_flag:	Flag used to implement jobchain disambiguation
+ */
+struct slot_rb {
+	struct rb_entry entries[SLOT_RB_SIZE];
+
+	struct kbase_context *last_context;
+
+	u8 read_idx;
+	u8 write_idx;
+
+	u8 job_chain_flag;
+};
+
+/**
+ * struct kbase_backend_data - GPU backend specific data for HW access layer
+ * @slot_rb:			Slot ringbuffers
+ * @rmu_workaround_flag:	When PRLAM-8987 is present, this flag determines
+ *				whether slots 0/1 or slot 2 are currently being
+ *				pulled from
+ * @scheduling_timer:		The timer tick used for rescheduling jobs
+ * @timer_running:		Is the timer running? The runpool_mutex must be
+ *				held whilst modifying this.
+ * @suspend_timer:              Is the timer suspended? Set when a suspend
+ *                              occurs and cleared on resume. The runpool_mutex
+ *                              must be held whilst modifying this.
+ * @reset_gpu:			Set to a KBASE_RESET_xxx value (see comments)
+ * @reset_workq:		Work queue for performing the reset
+ * @reset_work:			Work item for performing the reset
+ * @reset_wait:			Wait event signalled when the reset is complete
+ * @reset_timer:		Timeout for soft-stops before the reset
+ * @timeouts_updated:           Have timeout values just been updated?
+ *
+ * The hwaccess_lock (a spinlock) must be held when accessing this structure
+ */
+struct kbase_backend_data {
+	struct slot_rb slot_rb[BASE_JM_MAX_NR_SLOTS];
+
+	bool rmu_workaround_flag;
+
+	struct hrtimer scheduling_timer;
+
+	bool timer_running;
+	bool suspend_timer;
+
+	atomic_t reset_gpu;
+
+/* The GPU reset isn't pending */
+#define KBASE_RESET_GPU_NOT_PENDING     0
+/* kbase_prepare_to_reset_gpu has been called */
+#define KBASE_RESET_GPU_PREPARED        1
+/* kbase_reset_gpu has been called - the reset will now definitely happen
+ * within the timeout period */
+#define KBASE_RESET_GPU_COMMITTED       2
+/* The GPU reset process is currently occuring (timeout has expired or
+ * kbasep_try_reset_gpu_early was called) */
+#define KBASE_RESET_GPU_HAPPENING       3
+/* Reset the GPU silently, used when resetting the GPU as part of normal
+ * behavior (e.g. when exiting protected mode). */
+#define KBASE_RESET_GPU_SILENT          4
+	struct workqueue_struct *reset_workq;
+	struct work_struct reset_work;
+	wait_queue_head_t reset_wait;
+	struct hrtimer reset_timer;
+
+	bool timeouts_updated;
+};
+
+#endif /* _KBASE_HWACCESS_GPU_DEFS_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c
new file mode 100644
index 0000000..d4f96c8
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c
@@ -0,0 +1,1471 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Base kernel job manager APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_config.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_tracepoints.h>
+#include <mali_kbase_hw.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_reset_gpu.h>
+#include <mali_kbase_ctx_sched.h>
+#include <mali_kbase_hwcnt_context.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_irq_internal.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+
+#define beenthere(kctx, f, a...) \
+			dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
+
+static void kbasep_try_reset_gpu_early_locked(struct kbase_device *kbdev);
+
+static inline int kbasep_jm_is_js_free(struct kbase_device *kbdev, int js,
+						struct kbase_context *kctx)
+{
+	return !kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT));
+}
+
+static u64 kbase_job_write_affinity(struct kbase_device *kbdev,
+				base_jd_core_req core_req,
+				int js)
+{
+	u64 affinity;
+
+	if ((core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) ==
+			BASE_JD_REQ_T) {
+		/* Tiler-only atom */
+		/* If the hardware supports XAFFINITY then we'll only enable
+		 * the tiler (which is the default so this is a no-op),
+		 * otherwise enable shader core 0.
+		 */
+		if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY))
+			affinity = 1;
+		else
+			affinity = 0;
+	} else if ((core_req & (BASE_JD_REQ_COHERENT_GROUP |
+			BASE_JD_REQ_SPECIFIC_COHERENT_GROUP))) {
+		unsigned int num_core_groups = kbdev->gpu_props.num_core_groups;
+		struct mali_base_gpu_coherent_group_info *coherency_info =
+			&kbdev->gpu_props.props.coherency_info;
+
+		affinity = kbdev->pm.backend.shaders_avail &
+				kbdev->pm.debug_core_mask[js];
+
+		/* JS2 on a dual core group system targets core group 1. All
+		 * other cases target core group 0.
+		 */
+		if (js == 2 && num_core_groups > 1)
+			affinity &= coherency_info->group[1].core_mask;
+		else
+			affinity &= coherency_info->group[0].core_mask;
+	} else {
+		/* Use all cores */
+		affinity = kbdev->pm.backend.shaders_avail &
+				kbdev->pm.debug_core_mask[js];
+	}
+
+	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_LO),
+					affinity & 0xFFFFFFFF);
+	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_HI),
+					affinity >> 32);
+
+	return affinity;
+}
+
+void kbase_job_hw_submit(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom,
+				int js)
+{
+	struct kbase_context *kctx;
+	u32 cfg;
+	u64 jc_head = katom->jc;
+	u64 affinity;
+
+	KBASE_DEBUG_ASSERT(kbdev);
+	KBASE_DEBUG_ASSERT(katom);
+
+	kctx = katom->kctx;
+
+	/* Command register must be available */
+	KBASE_DEBUG_ASSERT(kbasep_jm_is_js_free(kbdev, js, kctx));
+
+	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO),
+						jc_head & 0xFFFFFFFF);
+	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI),
+						jc_head >> 32);
+
+	affinity = kbase_job_write_affinity(kbdev, katom->core_req, js);
+
+	/* start MMU, medium priority, cache clean/flush on end, clean/flush on
+	 * start */
+	cfg = kctx->as_nr;
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION) &&
+			!(kbdev->serialize_jobs & KBASE_SERIALIZE_RESET))
+		cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
+
+	if (0 != (katom->core_req & BASE_JD_REQ_SKIP_CACHE_START))
+		cfg |= JS_CONFIG_START_FLUSH_NO_ACTION;
+	else
+		cfg |= JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE;
+
+	if (0 != (katom->core_req & BASE_JD_REQ_SKIP_CACHE_END) &&
+			!(kbdev->serialize_jobs & KBASE_SERIALIZE_RESET))
+		cfg |= JS_CONFIG_END_FLUSH_NO_ACTION;
+	else if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_CLEAN_ONLY_SAFE))
+		cfg |= JS_CONFIG_END_FLUSH_CLEAN;
+	else
+		cfg |= JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10649))
+		cfg |= JS_CONFIG_START_MMU;
+
+	cfg |= JS_CONFIG_THREAD_PRI(8);
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE) &&
+		(katom->atom_flags & KBASE_KATOM_FLAG_PROTECTED))
+		cfg |= JS_CONFIG_DISABLE_DESCRIPTOR_WR_BK;
+
+	if (kbase_hw_has_feature(kbdev,
+				BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
+		if (!kbdev->hwaccess.backend.slot_rb[js].job_chain_flag) {
+			cfg |= JS_CONFIG_JOB_CHAIN_FLAG;
+			katom->atom_flags |= KBASE_KATOM_FLAGS_JOBCHAIN;
+			kbdev->hwaccess.backend.slot_rb[js].job_chain_flag =
+								true;
+		} else {
+			katom->atom_flags &= ~KBASE_KATOM_FLAGS_JOBCHAIN;
+			kbdev->hwaccess.backend.slot_rb[js].job_chain_flag =
+								false;
+		}
+	}
+
+	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_CONFIG_NEXT), cfg);
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION))
+		kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_FLUSH_ID_NEXT),
+				katom->flush_id);
+
+	/* Write an approximate start timestamp.
+	 * It's approximate because there might be a job in the HEAD register.
+	 */
+	katom->start_timestamp = ktime_get();
+
+	/* GO ! */
+	dev_dbg(kbdev->dev, "JS: Submitting atom %p from ctx %p to js[%d] with head=0x%llx",
+				katom, kctx, js, jc_head);
+
+	KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_SUBMIT, kctx, katom, jc_head, js,
+							(u32)affinity);
+
+	KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT(kbdev, kctx,
+		js, kbase_jd_atom_id(kctx, katom), TL_JS_EVENT_START);
+
+	KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG(kbdev, katom, jc_head,
+			affinity, cfg);
+	KBASE_TLSTREAM_TL_RET_CTX_LPU(
+		kbdev,
+		kctx,
+		&kbdev->gpu_props.props.raw_props.js_features[
+			katom->slot_nr]);
+	KBASE_TLSTREAM_TL_RET_ATOM_AS(kbdev, katom, &kbdev->as[kctx->as_nr]);
+	KBASE_TLSTREAM_TL_RET_ATOM_LPU(
+			kbdev,
+			katom,
+			&kbdev->gpu_props.props.raw_props.js_features[js],
+			"ctx_nr,atom_nr");
+#ifdef CONFIG_GPU_TRACEPOINTS
+	if (!kbase_backend_nr_atoms_submitted(kbdev, js)) {
+		/* If this is the only job on the slot, trace it as starting */
+		char js_string[16];
+
+		trace_gpu_sched_switch(
+				kbasep_make_job_slot_string(js, js_string,
+						sizeof(js_string)),
+				ktime_to_ns(katom->start_timestamp),
+				(u32)katom->kctx->id, 0, katom->work_id);
+		kbdev->hwaccess.backend.slot_rb[js].last_context = katom->kctx;
+	}
+#endif
+	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT),
+						JS_COMMAND_START);
+}
+
+/**
+ * kbasep_job_slot_update_head_start_timestamp - Update timestamp
+ * @kbdev: kbase device
+ * @js: job slot
+ * @end_timestamp: timestamp
+ *
+ * Update the start_timestamp of the job currently in the HEAD, based on the
+ * fact that we got an IRQ for the previous set of completed jobs.
+ *
+ * The estimate also takes into account the time the job was submitted, to
+ * work out the best estimate (which might still result in an over-estimate to
+ * the calculated time spent)
+ */
+static void kbasep_job_slot_update_head_start_timestamp(
+						struct kbase_device *kbdev,
+						int js,
+						ktime_t end_timestamp)
+{
+	ktime_t timestamp_diff;
+	struct kbase_jd_atom *katom;
+
+	/* Checking the HEAD position for the job slot */
+	katom = kbase_gpu_inspect(kbdev, js, 0);
+	if (katom != NULL) {
+		timestamp_diff = ktime_sub(end_timestamp,
+				katom->start_timestamp);
+		if (ktime_to_ns(timestamp_diff) >= 0) {
+			/* Only update the timestamp if it's a better estimate
+			 * than what's currently stored. This is because our
+			 * estimate that accounts for the throttle time may be
+			 * too much of an overestimate */
+			katom->start_timestamp = end_timestamp;
+		}
+	}
+}
+
+/**
+ * kbasep_trace_tl_event_lpu_softstop - Call event_lpu_softstop timeline
+ * tracepoint
+ * @kbdev: kbase device
+ * @js: job slot
+ *
+ * Make a tracepoint call to the instrumentation module informing that
+ * softstop happened on given lpu (job slot).
+ */
+static void kbasep_trace_tl_event_lpu_softstop(struct kbase_device *kbdev,
+					int js)
+{
+	KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP(
+		kbdev,
+		&kbdev->gpu_props.props.raw_props.js_features[js]);
+}
+
+void kbase_job_done(struct kbase_device *kbdev, u32 done)
+{
+	int i;
+	u32 count = 0;
+	ktime_t end_timestamp;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	KBASE_TRACE_ADD(kbdev, JM_IRQ, NULL, NULL, 0, done);
+
+	end_timestamp = ktime_get();
+
+	while (done) {
+		u32 failed = done >> 16;
+
+		/* treat failed slots as finished slots */
+		u32 finished = (done & 0xFFFF) | failed;
+
+		/* Note: This is inherently unfair, as we always check
+		 * for lower numbered interrupts before the higher
+		 * numbered ones.*/
+		i = ffs(finished) - 1;
+		KBASE_DEBUG_ASSERT(i >= 0);
+
+		do {
+			int nr_done;
+			u32 active;
+			u32 completion_code = BASE_JD_EVENT_DONE;/* assume OK */
+			u64 job_tail = 0;
+
+			if (failed & (1u << i)) {
+				/* read out the job slot status code if the job
+				 * slot reported failure */
+				completion_code = kbase_reg_read(kbdev,
+					JOB_SLOT_REG(i, JS_STATUS));
+
+				if (completion_code == BASE_JD_EVENT_STOPPED) {
+					KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT(
+						kbdev, NULL,
+						i, 0, TL_JS_EVENT_SOFT_STOP);
+
+					kbasep_trace_tl_event_lpu_softstop(
+						kbdev, i);
+
+					/* Soft-stopped job - read the value of
+					 * JS<n>_TAIL so that the job chain can
+					 * be resumed */
+					job_tail = (u64)kbase_reg_read(kbdev,
+						JOB_SLOT_REG(i, JS_TAIL_LO)) |
+						((u64)kbase_reg_read(kbdev,
+						JOB_SLOT_REG(i, JS_TAIL_HI))
+						 << 32);
+				} else if (completion_code ==
+						BASE_JD_EVENT_NOT_STARTED) {
+					/* PRLAM-10673 can cause a TERMINATED
+					 * job to come back as NOT_STARTED, but
+					 * the error interrupt helps us detect
+					 * it */
+					completion_code =
+						BASE_JD_EVENT_TERMINATED;
+				}
+
+				kbase_gpu_irq_evict(kbdev, i, completion_code);
+
+				/* Some jobs that encounter a BUS FAULT may result in corrupted
+				 * state causing future jobs to hang. Reset GPU before
+				 * allowing any other jobs on the slot to continue. */
+				if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TTRX_3076)) {
+					if (completion_code == BASE_JD_EVENT_JOB_BUS_FAULT) {
+						if (kbase_prepare_to_reset_gpu_locked(kbdev))
+							kbase_reset_gpu_locked(kbdev);
+					}
+				}
+			}
+
+			kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR),
+					done & ((1 << i) | (1 << (i + 16))));
+			active = kbase_reg_read(kbdev,
+					JOB_CONTROL_REG(JOB_IRQ_JS_STATE));
+
+			if (((active >> i) & 1) == 0 &&
+					(((done >> (i + 16)) & 1) == 0)) {
+				/* There is a potential race we must work
+				 * around:
+				 *
+				 *  1. A job slot has a job in both current and
+				 *     next registers
+				 *  2. The job in current completes
+				 *     successfully, the IRQ handler reads
+				 *     RAWSTAT and calls this function with the
+				 *     relevant bit set in "done"
+				 *  3. The job in the next registers becomes the
+				 *     current job on the GPU
+				 *  4. Sometime before the JOB_IRQ_CLEAR line
+				 *     above the job on the GPU _fails_
+				 *  5. The IRQ_CLEAR clears the done bit but not
+				 *     the failed bit. This atomically sets
+				 *     JOB_IRQ_JS_STATE. However since both jobs
+				 *     have now completed the relevant bits for
+				 *     the slot are set to 0.
+				 *
+				 * If we now did nothing then we'd incorrectly
+				 * assume that _both_ jobs had completed
+				 * successfully (since we haven't yet observed
+				 * the fail bit being set in RAWSTAT).
+				 *
+				 * So at this point if there are no active jobs
+				 * left we check to see if RAWSTAT has a failure
+				 * bit set for the job slot. If it does we know
+				 * that there has been a new failure that we
+				 * didn't previously know about, so we make sure
+				 * that we record this in active (but we wait
+				 * for the next loop to deal with it).
+				 *
+				 * If we were handling a job failure (i.e. done
+				 * has the relevant high bit set) then we know
+				 * that the value read back from
+				 * JOB_IRQ_JS_STATE is the correct number of
+				 * remaining jobs because the failed job will
+				 * have prevented any futher jobs from starting
+				 * execution.
+				 */
+				u32 rawstat = kbase_reg_read(kbdev,
+					JOB_CONTROL_REG(JOB_IRQ_RAWSTAT));
+
+				if ((rawstat >> (i + 16)) & 1) {
+					/* There is a failed job that we've
+					 * missed - add it back to active */
+					active |= (1u << i);
+				}
+			}
+
+			dev_dbg(kbdev->dev, "Job ended with status 0x%08X\n",
+							completion_code);
+
+			nr_done = kbase_backend_nr_atoms_submitted(kbdev, i);
+			nr_done -= (active >> i) & 1;
+			nr_done -= (active >> (i + 16)) & 1;
+
+			if (nr_done <= 0) {
+				dev_warn(kbdev->dev, "Spurious interrupt on slot %d",
+									i);
+
+				goto spurious;
+			}
+
+			count += nr_done;
+
+			while (nr_done) {
+				if (nr_done == 1) {
+					kbase_gpu_complete_hw(kbdev, i,
+								completion_code,
+								job_tail,
+								&end_timestamp);
+					kbase_jm_try_kick_all(kbdev);
+				} else {
+					/* More than one job has completed.
+					 * Since this is not the last job being
+					 * reported this time it must have
+					 * passed. This is because the hardware
+					 * will not allow further jobs in a job
+					 * slot to complete until the failed job
+					 * is cleared from the IRQ status.
+					 */
+					kbase_gpu_complete_hw(kbdev, i,
+							BASE_JD_EVENT_DONE,
+							0,
+							&end_timestamp);
+				}
+				nr_done--;
+			}
+ spurious:
+			done = kbase_reg_read(kbdev,
+					JOB_CONTROL_REG(JOB_IRQ_RAWSTAT));
+
+			if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10883)) {
+				/* Workaround for missing interrupt caused by
+				 * PRLAM-10883 */
+				if (((active >> i) & 1) && (0 ==
+						kbase_reg_read(kbdev,
+							JOB_SLOT_REG(i,
+							JS_STATUS)))) {
+					/* Force job slot to be processed again
+					 */
+					done |= (1u << i);
+				}
+			}
+
+			failed = done >> 16;
+			finished = (done & 0xFFFF) | failed;
+			if (done)
+				end_timestamp = ktime_get();
+		} while (finished & (1 << i));
+
+		kbasep_job_slot_update_head_start_timestamp(kbdev, i,
+								end_timestamp);
+	}
+
+	if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+						KBASE_RESET_GPU_COMMITTED) {
+		/* If we're trying to reset the GPU then we might be able to do
+		 * it early (without waiting for a timeout) because some jobs
+		 * have completed
+		 */
+		kbasep_try_reset_gpu_early_locked(kbdev);
+	}
+	KBASE_TRACE_ADD(kbdev, JM_IRQ_END, NULL, NULL, 0, count);
+}
+
+static bool kbasep_soft_stop_allowed(struct kbase_device *kbdev,
+					struct kbase_jd_atom *katom)
+{
+	bool soft_stops_allowed = true;
+
+	if (kbase_jd_katom_is_protected(katom)) {
+		soft_stops_allowed = false;
+	} else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408)) {
+		if ((katom->core_req & BASE_JD_REQ_T) != 0)
+			soft_stops_allowed = false;
+	}
+	return soft_stops_allowed;
+}
+
+static bool kbasep_hard_stop_allowed(struct kbase_device *kbdev,
+						base_jd_core_req core_reqs)
+{
+	bool hard_stops_allowed = true;
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8394)) {
+		if ((core_reqs & BASE_JD_REQ_T) != 0)
+			hard_stops_allowed = false;
+	}
+	return hard_stops_allowed;
+}
+
+void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
+					int js,
+					u32 action,
+					base_jd_core_req core_reqs,
+					struct kbase_jd_atom *target_katom)
+{
+#if KBASE_TRACE_ENABLE
+	u32 status_reg_before;
+	u64 job_in_head_before;
+	u32 status_reg_after;
+
+	KBASE_DEBUG_ASSERT(!(action & (~JS_COMMAND_MASK)));
+
+	/* Check the head pointer */
+	job_in_head_before = ((u64) kbase_reg_read(kbdev,
+					JOB_SLOT_REG(js, JS_HEAD_LO)))
+			| (((u64) kbase_reg_read(kbdev,
+					JOB_SLOT_REG(js, JS_HEAD_HI)))
+									<< 32);
+	status_reg_before = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS));
+#endif
+
+	if (action == JS_COMMAND_SOFT_STOP) {
+		bool soft_stop_allowed = kbasep_soft_stop_allowed(kbdev,
+								target_katom);
+
+		if (!soft_stop_allowed) {
+#ifdef CONFIG_MALI_DEBUG
+			dev_dbg(kbdev->dev,
+					"Attempt made to soft-stop a job that cannot be soft-stopped. core_reqs = 0x%X",
+					(unsigned int)core_reqs);
+#endif				/* CONFIG_MALI_DEBUG */
+			return;
+		}
+
+		/* We are about to issue a soft stop, so mark the atom as having
+		 * been soft stopped */
+		target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED;
+
+		/* Mark the point where we issue the soft-stop command */
+		KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE(kbdev, target_katom);
+
+		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
+			int i;
+
+			for (i = 0;
+			     i < kbase_backend_nr_atoms_submitted(kbdev, js);
+			     i++) {
+				struct kbase_jd_atom *katom;
+
+				katom = kbase_gpu_inspect(kbdev, js, i);
+
+				KBASE_DEBUG_ASSERT(katom);
+
+				/* For HW_ISSUE_8316, only 'bad' jobs attacking
+				 * the system can cause this issue: normally,
+				 * all memory should be allocated in multiples
+				 * of 4 pages, and growable memory should be
+				 * changed size in multiples of 4 pages.
+				 *
+				 * Whilst such 'bad' jobs can be cleared by a
+				 * GPU reset, the locking up of a uTLB entry
+				 * caused by the bad job could also stall other
+				 * ASs, meaning that other ASs' jobs don't
+				 * complete in the 'grace' period before the
+				 * reset. We don't want to lose other ASs' jobs
+				 * when they would normally complete fine, so we
+				 * must 'poke' the MMU regularly to help other
+				 * ASs complete */
+				kbase_as_poking_timer_retain_atom(
+						kbdev, katom->kctx, katom);
+			}
+		}
+
+		if (kbase_hw_has_feature(
+				kbdev,
+				BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
+			action = (target_katom->atom_flags &
+					KBASE_KATOM_FLAGS_JOBCHAIN) ?
+				JS_COMMAND_SOFT_STOP_1 :
+				JS_COMMAND_SOFT_STOP_0;
+		}
+	} else if (action == JS_COMMAND_HARD_STOP) {
+		bool hard_stop_allowed = kbasep_hard_stop_allowed(kbdev,
+								core_reqs);
+
+		if (!hard_stop_allowed) {
+			/* Jobs can be hard-stopped for the following reasons:
+			 *  * CFS decides the job has been running too long (and
+			 *    soft-stop has not occurred). In this case the GPU
+			 *    will be reset by CFS if the job remains on the
+			 *    GPU.
+			 *
+			 *  * The context is destroyed, kbase_jd_zap_context
+			 *    will attempt to hard-stop the job. However it also
+			 *    has a watchdog which will cause the GPU to be
+			 *    reset if the job remains on the GPU.
+			 *
+			 *  * An (unhandled) MMU fault occurred. As long as
+			 *    BASE_HW_ISSUE_8245 is defined then the GPU will be
+			 *    reset.
+			 *
+			 * All three cases result in the GPU being reset if the
+			 * hard-stop fails, so it is safe to just return and
+			 * ignore the hard-stop request.
+			 */
+			dev_warn(kbdev->dev,
+					"Attempt made to hard-stop a job that cannot be hard-stopped. core_reqs = 0x%X",
+					(unsigned int)core_reqs);
+			return;
+		}
+		target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_HARD_STOPPED;
+
+		if (kbase_hw_has_feature(
+				kbdev,
+				BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
+			action = (target_katom->atom_flags &
+					KBASE_KATOM_FLAGS_JOBCHAIN) ?
+				JS_COMMAND_HARD_STOP_1 :
+				JS_COMMAND_HARD_STOP_0;
+		}
+	}
+
+	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND), action);
+
+#if KBASE_TRACE_ENABLE
+	status_reg_after = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS));
+	if (status_reg_after == BASE_JD_EVENT_ACTIVE) {
+		struct kbase_jd_atom *head;
+		struct kbase_context *head_kctx;
+
+		head = kbase_gpu_inspect(kbdev, js, 0);
+		head_kctx = head->kctx;
+
+		if (status_reg_before == BASE_JD_EVENT_ACTIVE)
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, head_kctx,
+						head, job_in_head_before, js);
+		else
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL,
+						0, js);
+
+		switch (action) {
+		case JS_COMMAND_SOFT_STOP:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP, head_kctx,
+							head, head->jc, js);
+			break;
+		case JS_COMMAND_SOFT_STOP_0:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_0, head_kctx,
+							head, head->jc, js);
+			break;
+		case JS_COMMAND_SOFT_STOP_1:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_1, head_kctx,
+							head, head->jc, js);
+			break;
+		case JS_COMMAND_HARD_STOP:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP, head_kctx,
+							head, head->jc, js);
+			break;
+		case JS_COMMAND_HARD_STOP_0:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_0, head_kctx,
+							head, head->jc, js);
+			break;
+		case JS_COMMAND_HARD_STOP_1:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_1, head_kctx,
+							head, head->jc, js);
+			break;
+		default:
+			BUG();
+			break;
+		}
+	} else {
+		if (status_reg_before == BASE_JD_EVENT_ACTIVE)
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL,
+							job_in_head_before, js);
+		else
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL,
+							0, js);
+
+		switch (action) {
+		case JS_COMMAND_SOFT_STOP:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP, NULL, NULL, 0,
+							js);
+			break;
+		case JS_COMMAND_SOFT_STOP_0:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_0, NULL, NULL,
+							0, js);
+			break;
+		case JS_COMMAND_SOFT_STOP_1:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_1, NULL, NULL,
+							0, js);
+			break;
+		case JS_COMMAND_HARD_STOP:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP, NULL, NULL, 0,
+							js);
+			break;
+		case JS_COMMAND_HARD_STOP_0:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_0, NULL, NULL,
+							0, js);
+			break;
+		case JS_COMMAND_HARD_STOP_1:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_1, NULL, NULL,
+							0, js);
+			break;
+		default:
+			BUG();
+			break;
+		}
+	}
+#endif
+}
+
+void kbase_backend_jm_kill_running_jobs_from_kctx(struct kbase_context *kctx)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	int i;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
+		kbase_job_slot_hardstop(kctx, i, NULL);
+}
+
+void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
+				struct kbase_jd_atom *target_katom)
+{
+	struct kbase_device *kbdev;
+	int js = target_katom->slot_nr;
+	int priority = target_katom->sched_priority;
+	int i;
+	bool stop_sent = false;
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	kbdev = kctx->kbdev;
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	for (i = 0; i < kbase_backend_nr_atoms_on_slot(kbdev, js); i++) {
+		struct kbase_jd_atom *katom;
+
+		katom = kbase_gpu_inspect(kbdev, js, i);
+		if (!katom)
+			continue;
+
+		if ((kbdev->js_ctx_scheduling_mode ==
+			KBASE_JS_PROCESS_LOCAL_PRIORITY_MODE) &&
+				(katom->kctx != kctx))
+			continue;
+
+		if (katom->sched_priority > priority) {
+			if (!stop_sent)
+				KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED(
+						kbdev,
+						target_katom);
+
+			kbase_job_slot_softstop(kbdev, js, katom);
+			stop_sent = true;
+		}
+	}
+}
+
+void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	unsigned long timeout = msecs_to_jiffies(ZAP_TIMEOUT);
+
+	timeout = wait_event_timeout(kctx->jctx.zero_jobs_wait,
+			kctx->jctx.job_nr == 0, timeout);
+
+	if (timeout != 0)
+		timeout = wait_event_timeout(
+			kctx->jctx.sched_info.ctx.is_scheduled_wait,
+			!kbase_ctx_flag(kctx, KCTX_SCHEDULED),
+			timeout);
+
+	/* Neither wait timed out; all done! */
+	if (timeout != 0)
+		goto exit;
+
+	if (kbase_prepare_to_reset_gpu(kbdev)) {
+		dev_err(kbdev->dev,
+			"Issueing GPU soft-reset because jobs failed to be killed (within %d ms) as part of context termination (e.g. process exit)\n",
+			ZAP_TIMEOUT);
+		kbase_reset_gpu(kbdev);
+	}
+
+	/* Wait for the reset to complete */
+	kbase_reset_gpu_wait(kbdev);
+exit:
+	dev_dbg(kbdev->dev, "Zap: Finished Context %p", kctx);
+
+	/* Ensure that the signallers of the waitqs have finished */
+	mutex_lock(&kctx->jctx.lock);
+	mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+	mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+	mutex_unlock(&kctx->jctx.lock);
+}
+
+u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev)
+{
+	u32 flush_id = 0;
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION)) {
+		mutex_lock(&kbdev->pm.lock);
+		if (kbdev->pm.backend.gpu_powered)
+			flush_id = kbase_reg_read(kbdev,
+					GPU_CONTROL_REG(LATEST_FLUSH));
+		mutex_unlock(&kbdev->pm.lock);
+	}
+
+	return flush_id;
+}
+
+int kbase_job_slot_init(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_job_slot_init);
+
+void kbase_job_slot_halt(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+
+void kbase_job_slot_term(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+KBASE_EXPORT_TEST_API(kbase_job_slot_term);
+
+/**
+ * kbasep_check_for_afbc_on_slot() - Check whether AFBC is in use on this slot
+ * @kbdev: kbase device pointer
+ * @kctx:  context to check against
+ * @js:	   slot to check
+ * @target_katom: An atom to check, or NULL if all atoms from @kctx on
+ *                slot @js should be checked
+ *
+ * This checks are based upon parameters that would normally be passed to
+ * kbase_job_slot_hardstop().
+ *
+ * In the event of @target_katom being NULL, this will check the last jobs that
+ * are likely to be running on the slot to see if a) they belong to kctx, and
+ * so would be stopped, and b) whether they have AFBC
+ *
+ * In that case, It's guaranteed that a job currently executing on the HW with
+ * AFBC will be detected. However, this is a conservative check because it also
+ * detects jobs that have just completed too.
+ *
+ * Return: true when hard-stop _might_ stop an afbc atom, else false.
+ */
+static bool kbasep_check_for_afbc_on_slot(struct kbase_device *kbdev,
+		struct kbase_context *kctx, int js,
+		struct kbase_jd_atom *target_katom)
+{
+	bool ret = false;
+	int i;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/* When we have an atom the decision can be made straight away. */
+	if (target_katom)
+		return !!(target_katom->core_req & BASE_JD_REQ_FS_AFBC);
+
+	/* Otherwise, we must chweck the hardware to see if it has atoms from
+	 * this context with AFBC. */
+	for (i = 0; i < kbase_backend_nr_atoms_on_slot(kbdev, js); i++) {
+		struct kbase_jd_atom *katom;
+
+		katom = kbase_gpu_inspect(kbdev, js, i);
+		if (!katom)
+			continue;
+
+		/* Ignore atoms from other contexts, they won't be stopped when
+		 * we use this for checking if we should hard-stop them */
+		if (katom->kctx != kctx)
+			continue;
+
+		/* An atom on this slot and this context: check for AFBC */
+		if (katom->core_req & BASE_JD_REQ_FS_AFBC) {
+			ret = true;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * kbase_job_slot_softstop_swflags - Soft-stop a job with flags
+ * @kbdev:         The kbase device
+ * @js:            The job slot to soft-stop
+ * @target_katom:  The job that should be soft-stopped (or NULL for any job)
+ * @sw_flags:      Flags to pass in about the soft-stop
+ *
+ * Context:
+ *   The job slot lock must be held when calling this function.
+ *   The job slot must not already be in the process of being soft-stopped.
+ *
+ * Soft-stop the specified job slot, with extra information about the stop
+ *
+ * Where possible any job in the next register is evicted before the soft-stop.
+ */
+void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js,
+			struct kbase_jd_atom *target_katom, u32 sw_flags)
+{
+	KBASE_DEBUG_ASSERT(!(sw_flags & JS_COMMAND_MASK));
+	kbase_backend_soft_hard_stop_slot(kbdev, NULL, js, target_katom,
+			JS_COMMAND_SOFT_STOP | sw_flags);
+}
+
+/**
+ * kbase_job_slot_softstop - Soft-stop the specified job slot
+ * @kbdev:         The kbase device
+ * @js:            The job slot to soft-stop
+ * @target_katom:  The job that should be soft-stopped (or NULL for any job)
+ * Context:
+ *   The job slot lock must be held when calling this function.
+ *   The job slot must not already be in the process of being soft-stopped.
+ *
+ * Where possible any job in the next register is evicted before the soft-stop.
+ */
+void kbase_job_slot_softstop(struct kbase_device *kbdev, int js,
+				struct kbase_jd_atom *target_katom)
+{
+	kbase_job_slot_softstop_swflags(kbdev, js, target_katom, 0u);
+}
+
+/**
+ * kbase_job_slot_hardstop - Hard-stop the specified job slot
+ * @kctx:         The kbase context that contains the job(s) that should
+ *                be hard-stopped
+ * @js:           The job slot to hard-stop
+ * @target_katom: The job that should be hard-stopped (or NULL for all
+ *                jobs from the context)
+ * Context:
+ *   The job slot lock must be held when calling this function.
+ */
+void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
+				struct kbase_jd_atom *target_katom)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	bool stopped;
+	/* We make the check for AFBC before evicting/stopping atoms.  Note
+	 * that no other thread can modify the slots whilst we have the
+	 * hwaccess_lock. */
+	int needs_workaround_for_afbc =
+			kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3542)
+			&& kbasep_check_for_afbc_on_slot(kbdev, kctx, js,
+					 target_katom);
+
+	stopped = kbase_backend_soft_hard_stop_slot(kbdev, kctx, js,
+							target_katom,
+							JS_COMMAND_HARD_STOP);
+	if (stopped && (kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_8401) ||
+			kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_9510) ||
+			needs_workaround_for_afbc)) {
+		/* MIDBASE-2916 if a fragment job with AFBC encoding is
+		 * hardstopped, ensure to do a soft reset also in order to
+		 * clear the GPU status.
+		 * Workaround for HW issue 8401 has an issue,so after
+		 * hard-stopping just reset the GPU. This will ensure that the
+		 * jobs leave the GPU.*/
+		if (kbase_prepare_to_reset_gpu_locked(kbdev)) {
+			dev_err(kbdev->dev, "Issueing GPU soft-reset after hard stopping due to hardware issue");
+			kbase_reset_gpu_locked(kbdev);
+		}
+	}
+}
+
+/**
+ * kbase_job_check_enter_disjoint - potentiall enter disjoint mode
+ * @kbdev: kbase device
+ * @action: the event which has occurred
+ * @core_reqs: core requirements of the atom
+ * @target_katom: the atom which is being affected
+ *
+ * For a certain soft/hard-stop action, work out whether to enter disjoint
+ * state.
+ *
+ * This does not register multiple disjoint events if the atom has already
+ * started a disjoint period
+ *
+ * @core_reqs can be supplied as 0 if the atom had not started on the hardware
+ * (and so a 'real' soft/hard-stop was not required, but it still interrupted
+ * flow, perhaps on another context)
+ *
+ * kbase_job_check_leave_disjoint() should be used to end the disjoint
+ * state when the soft/hard-stop action is complete
+ */
+void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
+		base_jd_core_req core_reqs, struct kbase_jd_atom *target_katom)
+{
+	u32 hw_action = action & JS_COMMAND_MASK;
+
+	/* For hard-stop, don't enter if hard-stop not allowed */
+	if (hw_action == JS_COMMAND_HARD_STOP &&
+			!kbasep_hard_stop_allowed(kbdev, core_reqs))
+		return;
+
+	/* For soft-stop, don't enter if soft-stop not allowed, or isn't
+	 * causing disjoint */
+	if (hw_action == JS_COMMAND_SOFT_STOP &&
+			!(kbasep_soft_stop_allowed(kbdev, target_katom) &&
+			  (action & JS_COMMAND_SW_CAUSES_DISJOINT)))
+		return;
+
+	/* Nothing to do if already logged disjoint state on this atom */
+	if (target_katom->atom_flags & KBASE_KATOM_FLAG_IN_DISJOINT)
+		return;
+
+	target_katom->atom_flags |= KBASE_KATOM_FLAG_IN_DISJOINT;
+	kbase_disjoint_state_up(kbdev);
+}
+
+/**
+ * kbase_job_check_enter_disjoint - potentially leave disjoint state
+ * @kbdev: kbase device
+ * @target_katom: atom which is finishing
+ *
+ * Work out whether to leave disjoint state when finishing an atom that was
+ * originated by kbase_job_check_enter_disjoint().
+ */
+void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
+		struct kbase_jd_atom *target_katom)
+{
+	if (target_katom->atom_flags & KBASE_KATOM_FLAG_IN_DISJOINT) {
+		target_katom->atom_flags &= ~KBASE_KATOM_FLAG_IN_DISJOINT;
+		kbase_disjoint_state_down(kbdev);
+	}
+}
+
+static void kbase_debug_dump_registers(struct kbase_device *kbdev)
+{
+	int i;
+
+	kbase_io_history_dump(kbdev);
+
+	dev_err(kbdev->dev, "Register state:");
+	dev_err(kbdev->dev, "  GPU_IRQ_RAWSTAT=0x%08x GPU_STATUS=0x%08x",
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT)),
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS)));
+	dev_err(kbdev->dev, "  JOB_IRQ_RAWSTAT=0x%08x JOB_IRQ_JS_STATE=0x%08x",
+		kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_RAWSTAT)),
+		kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_JS_STATE)));
+	for (i = 0; i < 3; i++) {
+		dev_err(kbdev->dev, "  JS%d_STATUS=0x%08x      JS%d_HEAD_LO=0x%08x",
+			i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_STATUS)),
+			i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_HEAD_LO)));
+	}
+	dev_err(kbdev->dev, "  MMU_IRQ_RAWSTAT=0x%08x GPU_FAULTSTATUS=0x%08x",
+		kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_RAWSTAT)),
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS)));
+	dev_err(kbdev->dev, "  GPU_IRQ_MASK=0x%08x    JOB_IRQ_MASK=0x%08x     MMU_IRQ_MASK=0x%08x",
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK)),
+		kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK)),
+		kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK)));
+	dev_err(kbdev->dev, "  PWR_OVERRIDE0=0x%08x   PWR_OVERRIDE1=0x%08x",
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE0)),
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE1)));
+	dev_err(kbdev->dev, "  SHADER_CONFIG=0x%08x   L2_MMU_CONFIG=0x%08x",
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_CONFIG)),
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG)));
+	dev_err(kbdev->dev, "  TILER_CONFIG=0x%08x    JM_CONFIG=0x%08x",
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(TILER_CONFIG)),
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(JM_CONFIG)));
+}
+
+static void kbasep_reset_timeout_worker(struct work_struct *data)
+{
+	unsigned long flags;
+	struct kbase_device *kbdev;
+	ktime_t end_timestamp = ktime_get();
+	struct kbasep_js_device_data *js_devdata;
+	bool silent = false;
+	u32 max_loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
+
+	KBASE_DEBUG_ASSERT(data);
+
+	kbdev = container_of(data, struct kbase_device,
+						hwaccess.backend.reset_work);
+
+	KBASE_DEBUG_ASSERT(kbdev);
+	js_devdata = &kbdev->js_data;
+
+	if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+			KBASE_RESET_GPU_SILENT)
+		silent = true;
+
+	KBASE_TRACE_ADD(kbdev, JM_BEGIN_RESET_WORKER, NULL, NULL, 0u, 0);
+
+	/* Disable GPU hardware counters.
+	 * This call will block until counters are disabled.
+	 */
+	kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
+
+	/* Make sure the timer has completed - this cannot be done from
+	 * interrupt context, so this cannot be done within
+	 * kbasep_try_reset_gpu_early. */
+	hrtimer_cancel(&kbdev->hwaccess.backend.reset_timer);
+
+	if (kbase_pm_context_active_handle_suspend(kbdev,
+				KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+		/* This would re-activate the GPU. Since it's already idle,
+		 * there's no need to reset it */
+		atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+						KBASE_RESET_GPU_NOT_PENDING);
+		kbase_disjoint_state_down(kbdev);
+		wake_up(&kbdev->hwaccess.backend.reset_wait);
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		return;
+	}
+
+	KBASE_DEBUG_ASSERT(kbdev->irq_reset_flush == false);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	spin_lock(&kbdev->mmu_mask_change);
+	kbase_pm_reset_start_locked(kbdev);
+
+	/* We're about to flush out the IRQs and their bottom half's */
+	kbdev->irq_reset_flush = true;
+
+	/* Disable IRQ to avoid IRQ handlers to kick in after releasing the
+	 * spinlock; this also clears any outstanding interrupts */
+	kbase_pm_disable_interrupts_nolock(kbdev);
+
+	spin_unlock(&kbdev->mmu_mask_change);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	/* Ensure that any IRQ handlers have finished
+	 * Must be done without any locks IRQ handlers will take */
+	kbase_synchronize_irqs(kbdev);
+
+	/* Flush out any in-flight work items */
+	kbase_flush_mmu_wqs(kbdev);
+
+	/* The flush has completed so reset the active indicator */
+	kbdev->irq_reset_flush = false;
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TMIX_8463)) {
+		/* Ensure that L2 is not transitioning when we send the reset
+		 * command */
+		while (--max_loops && kbase_pm_get_trans_cores(kbdev,
+				KBASE_PM_CORE_L2))
+			;
+
+		WARN(!max_loops, "L2 power transition timed out while trying to reset\n");
+	}
+
+	mutex_lock(&kbdev->pm.lock);
+	/* We hold the pm lock, so there ought to be a current policy */
+	KBASE_DEBUG_ASSERT(kbdev->pm.backend.pm_current_policy);
+
+	/* All slot have been soft-stopped and we've waited
+	 * SOFT_STOP_RESET_TIMEOUT for the slots to clear, at this point we
+	 * assume that anything that is still left on the GPU is stuck there and
+	 * we'll kill it when we reset the GPU */
+
+	if (!silent)
+		dev_err(kbdev->dev, "Resetting GPU (allowing up to %d ms)",
+								RESET_TIMEOUT);
+
+	/* Output the state of some interesting registers to help in the
+	 * debugging of GPU resets */
+	if (!silent)
+		kbase_debug_dump_registers(kbdev);
+
+	/* Complete any jobs that were still on the GPU */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbdev->protected_mode = false;
+	if (!kbdev->pm.backend.protected_entry_transition_override)
+		kbase_backend_reset(kbdev, &end_timestamp);
+	kbase_pm_metrics_update(kbdev, NULL);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	/* Reset the GPU */
+	kbase_pm_init_hw(kbdev, 0);
+
+	mutex_unlock(&kbdev->pm.lock);
+
+	mutex_lock(&js_devdata->runpool_mutex);
+
+	mutex_lock(&kbdev->mmu_hw_mutex);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_ctx_sched_restore_all_as(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+
+	kbase_pm_enable_interrupts(kbdev);
+
+	kbase_disjoint_state_down(kbdev);
+
+	mutex_unlock(&js_devdata->runpool_mutex);
+
+	mutex_lock(&kbdev->pm.lock);
+
+	kbase_pm_reset_complete(kbdev);
+
+	/* Find out what cores are required now */
+	kbase_pm_update_cores_state(kbdev);
+
+	/* Synchronously request and wait for those cores, because if
+	 * instrumentation is enabled it would need them immediately. */
+	kbase_pm_wait_for_desired_state(kbdev);
+
+	mutex_unlock(&kbdev->pm.lock);
+
+	atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+						KBASE_RESET_GPU_NOT_PENDING);
+
+	wake_up(&kbdev->hwaccess.backend.reset_wait);
+	if (!silent)
+		dev_err(kbdev->dev, "Reset complete");
+
+	/* Try submitting some jobs to restart processing */
+	KBASE_TRACE_ADD(kbdev, JM_SUBMIT_AFTER_RESET, NULL, NULL, 0u, 0);
+	kbase_js_sched_all(kbdev);
+
+	/* Process any pending slot updates */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_backend_slot_update(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	kbase_pm_context_idle(kbdev);
+
+	/* Re-enable GPU hardware counters */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	KBASE_TRACE_ADD(kbdev, JM_END_RESET_WORKER, NULL, NULL, 0u, 0);
+}
+
+static enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *timer)
+{
+	struct kbase_device *kbdev = container_of(timer, struct kbase_device,
+						hwaccess.backend.reset_timer);
+
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	/* Reset still pending? */
+	if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
+			KBASE_RESET_GPU_COMMITTED, KBASE_RESET_GPU_HAPPENING) ==
+						KBASE_RESET_GPU_COMMITTED)
+		queue_work(kbdev->hwaccess.backend.reset_workq,
+					&kbdev->hwaccess.backend.reset_work);
+
+	return HRTIMER_NORESTART;
+}
+
+/*
+ * If all jobs are evicted from the GPU then we can reset the GPU
+ * immediately instead of waiting for the timeout to elapse
+ */
+
+static void kbasep_try_reset_gpu_early_locked(struct kbase_device *kbdev)
+{
+	int i;
+	int pending_jobs = 0;
+
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	/* Count the number of jobs */
+	for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
+		pending_jobs += kbase_backend_nr_atoms_submitted(kbdev, i);
+
+	if (pending_jobs > 0) {
+		/* There are still jobs on the GPU - wait */
+		return;
+	}
+
+	/* To prevent getting incorrect registers when dumping failed job,
+	 * skip early reset.
+	 */
+	if (atomic_read(&kbdev->job_fault_debug) > 0)
+		return;
+
+	/* Check that the reset has been committed to (i.e. kbase_reset_gpu has
+	 * been called), and that no other thread beat this thread to starting
+	 * the reset */
+	if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
+			KBASE_RESET_GPU_COMMITTED, KBASE_RESET_GPU_HAPPENING) !=
+						KBASE_RESET_GPU_COMMITTED) {
+		/* Reset has already occurred */
+		return;
+	}
+
+	queue_work(kbdev->hwaccess.backend.reset_workq,
+					&kbdev->hwaccess.backend.reset_work);
+}
+
+static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbasep_try_reset_gpu_early_locked(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+/**
+ * kbase_prepare_to_reset_gpu_locked - Prepare for resetting the GPU
+ * @kbdev: kbase device
+ *
+ * This function just soft-stops all the slots to ensure that as many jobs as
+ * possible are saved.
+ *
+ * Return:
+ *   The function returns a boolean which should be interpreted as follows:
+ *   true - Prepared for reset, kbase_reset_gpu_locked should be called.
+ *   false - Another thread is performing a reset, kbase_reset_gpu should
+ *   not be called.
+ */
+bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev)
+{
+	int i;
+
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
+						KBASE_RESET_GPU_NOT_PENDING,
+						KBASE_RESET_GPU_PREPARED) !=
+						KBASE_RESET_GPU_NOT_PENDING) {
+		/* Some other thread is already resetting the GPU */
+		return false;
+	}
+
+	kbase_disjoint_state_up(kbdev);
+
+	for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
+		kbase_job_slot_softstop(kbdev, i, NULL);
+
+	return true;
+}
+
+bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+	bool ret;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	ret = kbase_prepare_to_reset_gpu_locked(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return ret;
+}
+KBASE_EXPORT_TEST_API(kbase_prepare_to_reset_gpu);
+
+/*
+ * This function should be called after kbase_prepare_to_reset_gpu if it
+ * returns true. It should never be called without a corresponding call to
+ * kbase_prepare_to_reset_gpu.
+ *
+ * After this function is called (or not called if kbase_prepare_to_reset_gpu
+ * returned false), the caller should wait for
+ * kbdev->hwaccess.backend.reset_waitq to be signalled to know when the reset
+ * has completed.
+ */
+void kbase_reset_gpu(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	/* Note this is an assert/atomic_set because it is a software issue for
+	 * a race to be occuring here */
+	KBASE_DEBUG_ASSERT(atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+						KBASE_RESET_GPU_PREPARED);
+	atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+						KBASE_RESET_GPU_COMMITTED);
+
+	dev_err(kbdev->dev, "Preparing to soft-reset GPU: Waiting (upto %d ms) for all jobs to complete soft-stop\n",
+			kbdev->reset_timeout_ms);
+
+	hrtimer_start(&kbdev->hwaccess.backend.reset_timer,
+			HR_TIMER_DELAY_MSEC(kbdev->reset_timeout_ms),
+			HRTIMER_MODE_REL);
+
+	/* Try resetting early */
+	kbasep_try_reset_gpu_early(kbdev);
+}
+KBASE_EXPORT_TEST_API(kbase_reset_gpu);
+
+void kbase_reset_gpu_locked(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	/* Note this is an assert/atomic_set because it is a software issue for
+	 * a race to be occuring here */
+	KBASE_DEBUG_ASSERT(atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+						KBASE_RESET_GPU_PREPARED);
+	atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+						KBASE_RESET_GPU_COMMITTED);
+
+	dev_err(kbdev->dev, "Preparing to soft-reset GPU: Waiting (upto %d ms) for all jobs to complete soft-stop\n",
+			kbdev->reset_timeout_ms);
+	hrtimer_start(&kbdev->hwaccess.backend.reset_timer,
+			HR_TIMER_DELAY_MSEC(kbdev->reset_timeout_ms),
+			HRTIMER_MODE_REL);
+
+	/* Try resetting early */
+	kbasep_try_reset_gpu_early_locked(kbdev);
+}
+
+int kbase_reset_gpu_silent(struct kbase_device *kbdev)
+{
+	if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
+						KBASE_RESET_GPU_NOT_PENDING,
+						KBASE_RESET_GPU_SILENT) !=
+						KBASE_RESET_GPU_NOT_PENDING) {
+		/* Some other thread is already resetting the GPU */
+		return -EAGAIN;
+	}
+
+	kbase_disjoint_state_up(kbdev);
+
+	queue_work(kbdev->hwaccess.backend.reset_workq,
+			&kbdev->hwaccess.backend.reset_work);
+
+	return 0;
+}
+
+bool kbase_reset_gpu_is_active(struct kbase_device *kbdev)
+{
+	if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+			KBASE_RESET_GPU_NOT_PENDING)
+		return false;
+
+	return true;
+}
+
+int kbase_reset_gpu_wait(struct kbase_device *kbdev)
+{
+	wait_event(kbdev->hwaccess.backend.reset_wait,
+			atomic_read(&kbdev->hwaccess.backend.reset_gpu)
+			== KBASE_RESET_GPU_NOT_PENDING);
+
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_reset_gpu_wait);
+
+int kbase_reset_gpu_init(struct kbase_device *kbdev)
+{
+	kbdev->hwaccess.backend.reset_workq = alloc_workqueue(
+						"Mali reset workqueue", 0, 1);
+	if (kbdev->hwaccess.backend.reset_workq == NULL)
+		return -ENOMEM;
+
+	INIT_WORK(&kbdev->hwaccess.backend.reset_work,
+						kbasep_reset_timeout_worker);
+
+	hrtimer_init(&kbdev->hwaccess.backend.reset_timer, CLOCK_MONOTONIC,
+							HRTIMER_MODE_REL);
+	kbdev->hwaccess.backend.reset_timer.function =
+						kbasep_reset_timer_callback;
+
+	return 0;
+}
+
+void kbase_reset_gpu_term(struct kbase_device *kbdev)
+{
+	destroy_workqueue(kbdev->hwaccess.backend.reset_workq);
+}
+
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h
new file mode 100644
index 0000000..452ddee
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h
@@ -0,0 +1,169 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Job Manager backend-specific low-level APIs.
+ */
+
+#ifndef _KBASE_JM_HWACCESS_H_
+#define _KBASE_JM_HWACCESS_H_
+
+#include <mali_kbase_hw.h>
+#include <mali_kbase_debug.h>
+#include <linux/atomic.h>
+
+#include <backend/gpu/mali_kbase_jm_rb.h>
+
+/**
+ * kbase_job_submit_nolock() - Submit a job to a certain job-slot
+ * @kbdev:	Device pointer
+ * @katom:	Atom to submit
+ * @js:		Job slot to submit on
+ *
+ * The caller must check kbasep_jm_is_submit_slots_free() != false before
+ * calling this.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold the hwaccess_lock
+ */
+void kbase_job_submit_nolock(struct kbase_device *kbdev,
+					struct kbase_jd_atom *katom, int js);
+
+/**
+ * kbase_job_done_slot() - Complete the head job on a particular job-slot
+ * @kbdev:		Device pointer
+ * @s:			Job slot
+ * @completion_code:	Completion code of job reported by GPU
+ * @job_tail:		Job tail address reported by GPU
+ * @end_timestamp:	Timestamp of job completion
+ */
+void kbase_job_done_slot(struct kbase_device *kbdev, int s, u32 completion_code,
+					u64 job_tail, ktime_t *end_timestamp);
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+static inline char *kbasep_make_job_slot_string(int js, char *js_string,
+						size_t js_size)
+{
+	snprintf(js_string, js_size, "job_slot_%i", js);
+	return js_string;
+}
+#endif
+
+/**
+ * kbase_job_hw_submit() - Submit a job to the GPU
+ * @kbdev:	Device pointer
+ * @katom:	Atom to submit
+ * @js:		Job slot to submit on
+ *
+ * The caller must check kbasep_jm_is_submit_slots_free() != false before
+ * calling this.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold the hwaccess_lock
+ */
+void kbase_job_hw_submit(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom,
+				int js);
+
+/**
+ * kbasep_job_slot_soft_or_hard_stop_do_action() - Perform a soft or hard stop
+ *						   on the specified atom
+ * @kbdev:		Device pointer
+ * @js:			Job slot to stop on
+ * @action:		The action to perform, either JSn_COMMAND_HARD_STOP or
+ *			JSn_COMMAND_SOFT_STOP
+ * @core_reqs:		Core requirements of atom to stop
+ * @target_katom:	Atom to stop
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold the hwaccess_lock
+ */
+void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
+					int js,
+					u32 action,
+					base_jd_core_req core_reqs,
+					struct kbase_jd_atom *target_katom);
+
+/**
+ * kbase_backend_soft_hard_stop_slot() - Soft or hard stop jobs on a given job
+ *					 slot belonging to a given context.
+ * @kbdev:	Device pointer
+ * @kctx:	Context pointer. May be NULL
+ * @katom:	Specific atom to stop. May be NULL
+ * @js:		Job slot to hard stop
+ * @action:	The action to perform, either JSn_COMMAND_HARD_STOP or
+ *		JSn_COMMAND_SOFT_STOP
+ *
+ * If no context is provided then all jobs on the slot will be soft or hard
+ * stopped.
+ *
+ * If a katom is provided then only that specific atom will be stopped. In this
+ * case the kctx parameter is ignored.
+ *
+ * Jobs that are on the slot but are not yet on the GPU will be unpulled and
+ * returned to the job scheduler.
+ *
+ * Return: true if an atom was stopped, false otherwise
+ */
+bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
+					struct kbase_context *kctx,
+					int js,
+					struct kbase_jd_atom *katom,
+					u32 action);
+
+/**
+ * kbase_job_slot_init - Initialise job slot framework
+ * @kbdev: Device pointer
+ *
+ * Called on driver initialisation
+ *
+ * Return: 0 on success
+ */
+int kbase_job_slot_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_job_slot_halt - Halt the job slot framework
+ * @kbdev: Device pointer
+ *
+ * Should prevent any further job slot processing
+ */
+void kbase_job_slot_halt(struct kbase_device *kbdev);
+
+/**
+ * kbase_job_slot_term - Terminate job slot framework
+ * @kbdev: Device pointer
+ *
+ * Called on driver termination
+ */
+void kbase_job_slot_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpu_cache_clean - Cause a GPU cache clean & flush
+ * @kbdev: Device pointer
+ *
+ * Caller must not be in IRQ context
+ */
+void kbase_gpu_cache_clean(struct kbase_device *kbdev);
+
+#endif /* _KBASE_JM_HWACCESS_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c
new file mode 100644
index 0000000..55440b8
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c
@@ -0,0 +1,1750 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * Register-based HW access backend specific APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_jm.h>
+#include <mali_kbase_js.h>
+#include <mali_kbase_tracepoints.h>
+#include <mali_kbase_hwcnt_context.h>
+#include <mali_kbase_10969_workaround.h>
+#include <mali_kbase_reset_gpu.h>
+#include <backend/gpu/mali_kbase_cache_policy_backend.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+/* Return whether the specified ringbuffer is empty. HW access lock must be
+ * held */
+#define SLOT_RB_EMPTY(rb)   (rb->write_idx == rb->read_idx)
+/* Return number of atoms currently in the specified ringbuffer. HW access lock
+ * must be held */
+#define SLOT_RB_ENTRIES(rb) (int)(s8)(rb->write_idx - rb->read_idx)
+
+static void kbase_gpu_release_atom(struct kbase_device *kbdev,
+					struct kbase_jd_atom *katom,
+					ktime_t *end_timestamp);
+
+/**
+ * kbase_gpu_enqueue_atom - Enqueue an atom in the HW access ringbuffer
+ * @kbdev: Device pointer
+ * @katom: Atom to enqueue
+ *
+ * Context: Caller must hold the HW access lock
+ */
+static void kbase_gpu_enqueue_atom(struct kbase_device *kbdev,
+					struct kbase_jd_atom *katom)
+{
+	struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[katom->slot_nr];
+
+	WARN_ON(SLOT_RB_ENTRIES(rb) >= SLOT_RB_SIZE);
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	rb->entries[rb->write_idx & SLOT_RB_MASK].katom = katom;
+	rb->write_idx++;
+
+	katom->gpu_rb_state = KBASE_ATOM_GPU_RB_WAITING_BLOCKED;
+}
+
+/**
+ * kbase_gpu_dequeue_atom - Remove an atom from the HW access ringbuffer, once
+ * it has been completed
+ * @kbdev:         Device pointer
+ * @js:            Job slot to remove atom from
+ * @end_timestamp: Pointer to timestamp of atom completion. May be NULL, in
+ *                 which case current time will be used.
+ *
+ * Context: Caller must hold the HW access lock
+ *
+ * Return: Atom removed from ringbuffer
+ */
+static struct kbase_jd_atom *kbase_gpu_dequeue_atom(struct kbase_device *kbdev,
+						int js,
+						ktime_t *end_timestamp)
+{
+	struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js];
+	struct kbase_jd_atom *katom;
+
+	if (SLOT_RB_EMPTY(rb)) {
+		WARN(1, "GPU ringbuffer unexpectedly empty\n");
+		return NULL;
+	}
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	katom = rb->entries[rb->read_idx & SLOT_RB_MASK].katom;
+
+	kbase_gpu_release_atom(kbdev, katom, end_timestamp);
+
+	rb->read_idx++;
+
+	katom->gpu_rb_state = KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB;
+
+	return katom;
+}
+
+struct kbase_jd_atom *kbase_gpu_inspect(struct kbase_device *kbdev, int js,
+					int idx)
+{
+	struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js];
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if ((SLOT_RB_ENTRIES(rb) - 1) < idx)
+		return NULL; /* idx out of range */
+
+	return rb->entries[(rb->read_idx + idx) & SLOT_RB_MASK].katom;
+}
+
+struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev,
+					int js)
+{
+	struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js];
+
+	if (SLOT_RB_EMPTY(rb))
+		return NULL;
+
+	return rb->entries[(rb->write_idx - 1) & SLOT_RB_MASK].katom;
+}
+
+/**
+ * kbase_gpu_atoms_submitted - Inspect whether a slot has any atoms currently
+ * on the GPU
+ * @kbdev:  Device pointer
+ * @js:     Job slot to inspect
+ *
+ * Return: true if there are atoms on the GPU for slot js,
+ *         false otherwise
+ */
+static bool kbase_gpu_atoms_submitted(struct kbase_device *kbdev, int js)
+{
+	int i;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	for (i = 0; i < SLOT_RB_SIZE; i++) {
+		struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
+
+		if (!katom)
+			return false;
+		if (katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED ||
+				katom->gpu_rb_state == KBASE_ATOM_GPU_RB_READY)
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * kbase_gpu_atoms_submitted_any() - Inspect whether there are any atoms
+ * currently on the GPU
+ * @kbdev:  Device pointer
+ *
+ * Return: true if there are any atoms on the GPU, false otherwise
+ */
+static bool kbase_gpu_atoms_submitted_any(struct kbase_device *kbdev)
+{
+	int js;
+	int i;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+		for (i = 0; i < SLOT_RB_SIZE; i++) {
+			struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
+
+			if (katom && katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED)
+				return true;
+		}
+	}
+	return false;
+}
+
+int kbase_backend_nr_atoms_submitted(struct kbase_device *kbdev, int js)
+{
+	int nr = 0;
+	int i;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	for (i = 0; i < SLOT_RB_SIZE; i++) {
+		struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
+
+		if (katom && (katom->gpu_rb_state ==
+						KBASE_ATOM_GPU_RB_SUBMITTED))
+			nr++;
+	}
+
+	return nr;
+}
+
+int kbase_backend_nr_atoms_on_slot(struct kbase_device *kbdev, int js)
+{
+	int nr = 0;
+	int i;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	for (i = 0; i < SLOT_RB_SIZE; i++) {
+		if (kbase_gpu_inspect(kbdev, js, i))
+			nr++;
+	}
+
+	return nr;
+}
+
+static int kbase_gpu_nr_atoms_on_slot_min(struct kbase_device *kbdev, int js,
+				enum kbase_atom_gpu_rb_state min_rb_state)
+{
+	int nr = 0;
+	int i;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	for (i = 0; i < SLOT_RB_SIZE; i++) {
+		struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
+
+		if (katom && (katom->gpu_rb_state >= min_rb_state))
+			nr++;
+	}
+
+	return nr;
+}
+
+/**
+ * check_secure_atom - Check if the given atom is in the given secure state and
+ *                     has a ringbuffer state of at least
+ *                     KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION
+ * @katom:  Atom pointer
+ * @secure: Desired secure state
+ *
+ * Return: true if atom is in the given state, false otherwise
+ */
+static bool check_secure_atom(struct kbase_jd_atom *katom, bool secure)
+{
+	if (katom->gpu_rb_state >=
+			KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION &&
+			((kbase_jd_katom_is_protected(katom) && secure) ||
+			(!kbase_jd_katom_is_protected(katom) && !secure)))
+		return true;
+
+	return false;
+}
+
+/**
+ * kbase_gpu_check_secure_atoms - Check if there are any atoms in the given
+ *                                secure state in the ringbuffers of at least
+ *                                state
+ *                                KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE
+ * @kbdev:  Device pointer
+ * @secure: Desired secure state
+ *
+ * Return: true if any atoms are in the given state, false otherwise
+ */
+static bool kbase_gpu_check_secure_atoms(struct kbase_device *kbdev,
+		bool secure)
+{
+	int js, i;
+
+	for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+		for (i = 0; i < SLOT_RB_SIZE; i++) {
+			struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev,
+					js, i);
+
+			if (katom) {
+				if (check_secure_atom(katom, secure))
+					return true;
+			}
+		}
+	}
+
+	return false;
+}
+
+int kbase_backend_slot_free(struct kbase_device *kbdev, int js)
+{
+	if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) !=
+						KBASE_RESET_GPU_NOT_PENDING) {
+		/* The GPU is being reset - so prevent submission */
+		return 0;
+	}
+
+	return SLOT_RB_SIZE - kbase_backend_nr_atoms_on_slot(kbdev, js);
+}
+
+
+static void kbase_gpu_release_atom(struct kbase_device *kbdev,
+					struct kbase_jd_atom *katom,
+					ktime_t *end_timestamp)
+{
+	struct kbase_context *kctx = katom->kctx;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	switch (katom->gpu_rb_state) {
+	case KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB:
+		/* Should be impossible */
+		WARN(1, "Attempting to release atom not in ringbuffer\n");
+		break;
+
+	case KBASE_ATOM_GPU_RB_SUBMITTED:
+		/* Inform power management at start/finish of atom so it can
+		 * update its GPU utilisation metrics. Mark atom as not
+		 * submitted beforehand. */
+		katom->gpu_rb_state = KBASE_ATOM_GPU_RB_READY;
+		kbase_pm_metrics_update(kbdev, end_timestamp);
+
+		if (katom->core_req & BASE_JD_REQ_PERMON)
+			kbase_pm_release_gpu_cycle_counter_nolock(kbdev);
+		/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+		KBASE_TLSTREAM_TL_NRET_ATOM_LPU(kbdev, katom,
+			&kbdev->gpu_props.props.raw_props.js_features
+				[katom->slot_nr]);
+		KBASE_TLSTREAM_TL_NRET_ATOM_AS(kbdev, katom, &kbdev->as[kctx->as_nr]);
+		KBASE_TLSTREAM_TL_NRET_CTX_LPU(kbdev, kctx,
+			&kbdev->gpu_props.props.raw_props.js_features
+				[katom->slot_nr]);
+
+	case KBASE_ATOM_GPU_RB_READY:
+		/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+	case KBASE_ATOM_GPU_RB_WAITING_AFFINITY:
+		/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+	case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE:
+		break;
+
+	case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION:
+		if (kbase_jd_katom_is_protected(katom) &&
+				(katom->protected_state.enter !=
+				KBASE_ATOM_ENTER_PROTECTED_CHECK) &&
+				(katom->protected_state.enter !=
+				KBASE_ATOM_ENTER_PROTECTED_HWCNT)) {
+			kbase_pm_protected_override_disable(kbdev);
+			kbase_pm_update_cores_state_nolock(kbdev);
+		}
+		if (kbase_jd_katom_is_protected(katom) &&
+				(katom->protected_state.enter ==
+				KBASE_ATOM_ENTER_PROTECTED_IDLE_L2))
+			kbase_pm_protected_entry_override_disable(kbdev);
+		if (!kbase_jd_katom_is_protected(katom) &&
+				(katom->protected_state.exit !=
+				KBASE_ATOM_EXIT_PROTECTED_CHECK) &&
+				(katom->protected_state.exit !=
+				KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT)) {
+			kbase_pm_protected_override_disable(kbdev);
+			kbase_pm_update_cores_state_nolock(kbdev);
+		}
+
+		if (katom->protected_state.enter !=
+				KBASE_ATOM_ENTER_PROTECTED_CHECK ||
+				katom->protected_state.exit !=
+				KBASE_ATOM_EXIT_PROTECTED_CHECK)
+			kbdev->protected_mode_transition = false;
+		/* If the atom has suspended hwcnt but has not yet entered
+		 * protected mode, then resume hwcnt now. If the GPU is now in
+		 * protected mode then hwcnt will be resumed by GPU reset so
+		 * don't resume it here.
+		 */
+		if (kbase_jd_katom_is_protected(katom) &&
+				((katom->protected_state.enter ==
+				KBASE_ATOM_ENTER_PROTECTED_IDLE_L2) ||
+				 (katom->protected_state.enter ==
+				KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY))) {
+			WARN_ON(!kbdev->protected_mode_hwcnt_disabled);
+			kbdev->protected_mode_hwcnt_desired = true;
+			if (kbdev->protected_mode_hwcnt_disabled) {
+				kbase_hwcnt_context_enable(
+					kbdev->hwcnt_gpu_ctx);
+				kbdev->protected_mode_hwcnt_disabled = false;
+			}
+		}
+
+		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234)) {
+			if (katom->atom_flags &
+					KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT) {
+				kbase_pm_protected_l2_override(kbdev, false);
+				katom->atom_flags &=
+					~KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT;
+			}
+		}
+
+		/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+	case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV:
+		/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+	case KBASE_ATOM_GPU_RB_WAITING_BLOCKED:
+		/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+	case KBASE_ATOM_GPU_RB_RETURN_TO_JS:
+		break;
+	}
+
+	katom->gpu_rb_state = KBASE_ATOM_GPU_RB_WAITING_BLOCKED;
+	katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK;
+}
+
+static void kbase_gpu_mark_atom_for_return(struct kbase_device *kbdev,
+						struct kbase_jd_atom *katom)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	kbase_gpu_release_atom(kbdev, katom, NULL);
+	katom->gpu_rb_state = KBASE_ATOM_GPU_RB_RETURN_TO_JS;
+}
+
+static inline bool kbase_gpu_rmu_workaround(struct kbase_device *kbdev, int js)
+{
+	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+	bool slot_busy[3];
+
+	if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
+		return true;
+	slot_busy[0] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 0,
+					KBASE_ATOM_GPU_RB_WAITING_AFFINITY);
+	slot_busy[1] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 1,
+					KBASE_ATOM_GPU_RB_WAITING_AFFINITY);
+	slot_busy[2] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 2,
+					KBASE_ATOM_GPU_RB_WAITING_AFFINITY);
+
+	if ((js == 2 && !(slot_busy[0] || slot_busy[1])) ||
+		(js != 2 && !slot_busy[2]))
+		return true;
+
+	/* Don't submit slot 2 atom while GPU has jobs on slots 0/1 */
+	if (js == 2 && (kbase_gpu_atoms_submitted(kbdev, 0) ||
+			kbase_gpu_atoms_submitted(kbdev, 1) ||
+			backend->rmu_workaround_flag))
+		return false;
+
+	/* Don't submit slot 0/1 atom while GPU has jobs on slot 2 */
+	if (js != 2 && (kbase_gpu_atoms_submitted(kbdev, 2) ||
+			!backend->rmu_workaround_flag))
+		return false;
+
+	backend->rmu_workaround_flag = !backend->rmu_workaround_flag;
+
+	return true;
+}
+
+/**
+ * other_slots_busy - Determine if any job slots other than @js are currently
+ *                    running atoms
+ * @kbdev: Device pointer
+ * @js:    Job slot
+ *
+ * Return: true if any slots other than @js are busy, false otherwise
+ */
+static inline bool other_slots_busy(struct kbase_device *kbdev, int js)
+{
+	int slot;
+
+	for (slot = 0; slot < kbdev->gpu_props.num_job_slots; slot++) {
+		if (slot == js)
+			continue;
+
+		if (kbase_gpu_nr_atoms_on_slot_min(kbdev, slot,
+				KBASE_ATOM_GPU_RB_SUBMITTED))
+			return true;
+	}
+
+	return false;
+}
+
+static inline bool kbase_gpu_in_protected_mode(struct kbase_device *kbdev)
+{
+	return kbdev->protected_mode;
+}
+
+static void kbase_gpu_disable_coherent(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/*
+	 * When entering into protected mode, we must ensure that the
+	 * GPU is not operating in coherent mode as well. This is to
+	 * ensure that no protected memory can be leaked.
+	 */
+	if (kbdev->system_coherency == COHERENCY_ACE)
+		kbase_cache_set_coherency_mode(kbdev, COHERENCY_ACE_LITE);
+}
+
+static int kbase_gpu_protected_mode_enter(struct kbase_device *kbdev)
+{
+	int err = -EINVAL;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	WARN_ONCE(!kbdev->protected_ops,
+			"Cannot enter protected mode: protected callbacks not specified.\n");
+
+	if (kbdev->protected_ops) {
+		/* Switch GPU to protected mode */
+		err = kbdev->protected_ops->protected_mode_enable(
+				kbdev->protected_dev);
+
+		if (err) {
+			dev_warn(kbdev->dev, "Failed to enable protected mode: %d\n",
+					err);
+		} else {
+			kbdev->protected_mode = true;
+			kbase_ipa_protection_mode_switch_event(kbdev);
+		}
+	}
+
+	return err;
+}
+
+static int kbase_gpu_protected_mode_reset(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	WARN_ONCE(!kbdev->protected_ops,
+			"Cannot exit protected mode: protected callbacks not specified.\n");
+
+	if (!kbdev->protected_ops)
+		return -EINVAL;
+
+	/* The protected mode disable callback will be called as part of reset
+	 */
+	return kbase_reset_gpu_silent(kbdev);
+}
+
+static int kbase_jm_protected_entry(struct kbase_device *kbdev,
+				struct kbase_jd_atom **katom, int idx, int js)
+{
+	int err = 0;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	err = kbase_gpu_protected_mode_enter(kbdev);
+
+	/*
+	 * Regardless of result before this call, we are no longer
+	 * transitioning the GPU.
+	 */
+
+	kbdev->protected_mode_transition = false;
+	kbase_pm_protected_override_disable(kbdev);
+	kbase_pm_update_cores_state_nolock(kbdev);
+
+	KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END(kbdev, kbdev);
+	if (err) {
+		/*
+		 * Failed to switch into protected mode, resume
+		 * GPU hwcnt and fail atom.
+		 */
+		WARN_ON(!kbdev->protected_mode_hwcnt_disabled);
+		kbdev->protected_mode_hwcnt_desired = true;
+		if (kbdev->protected_mode_hwcnt_disabled) {
+			kbase_hwcnt_context_enable(
+				kbdev->hwcnt_gpu_ctx);
+			kbdev->protected_mode_hwcnt_disabled = false;
+		}
+
+		katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID;
+		kbase_gpu_mark_atom_for_return(kbdev, katom[idx]);
+		/*
+		 * Only return if head atom or previous atom
+		 * already removed - as atoms must be returned
+		 * in order.
+		 */
+		if (idx == 0 || katom[0]->gpu_rb_state ==
+					KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+			kbase_gpu_dequeue_atom(kbdev, js, NULL);
+			kbase_jm_return_atom_to_js(kbdev, katom[idx]);
+		}
+
+		return -EINVAL;
+	}
+
+	/*
+	 * Protected mode sanity checks.
+	 */
+	KBASE_DEBUG_ASSERT_MSG(
+			kbase_jd_katom_is_protected(katom[idx]) ==
+			kbase_gpu_in_protected_mode(kbdev),
+			"Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
+			kbase_jd_katom_is_protected(katom[idx]),
+			kbase_gpu_in_protected_mode(kbdev));
+	katom[idx]->gpu_rb_state =
+			KBASE_ATOM_GPU_RB_READY;
+
+	return err;
+}
+
+static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
+		struct kbase_jd_atom **katom, int idx, int js)
+{
+	int err = 0;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	switch (katom[idx]->protected_state.enter) {
+	case KBASE_ATOM_ENTER_PROTECTED_CHECK:
+		KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START(kbdev, kbdev);
+		/* The checks in KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV
+		 * should ensure that we are not already transitiong, and that
+		 * there are no atoms currently on the GPU. */
+		WARN_ON(kbdev->protected_mode_transition);
+		WARN_ON(kbase_gpu_atoms_submitted_any(kbdev));
+		/* If hwcnt is disabled, it means we didn't clean up correctly
+		 * during last exit from protected mode.
+		 */
+		WARN_ON(kbdev->protected_mode_hwcnt_disabled);
+
+		katom[idx]->protected_state.enter =
+			KBASE_ATOM_ENTER_PROTECTED_HWCNT;
+
+		kbdev->protected_mode_transition = true;
+
+		/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+	case KBASE_ATOM_ENTER_PROTECTED_HWCNT:
+		/* See if we can get away with disabling hwcnt atomically */
+		kbdev->protected_mode_hwcnt_desired = false;
+		if (!kbdev->protected_mode_hwcnt_disabled) {
+			if (kbase_hwcnt_context_disable_atomic(
+				kbdev->hwcnt_gpu_ctx))
+				kbdev->protected_mode_hwcnt_disabled = true;
+		}
+
+		/* We couldn't disable atomically, so kick off a worker */
+		if (!kbdev->protected_mode_hwcnt_disabled) {
+#if KERNEL_VERSION(3, 16, 0) > LINUX_VERSION_CODE
+			queue_work(system_wq,
+				&kbdev->protected_mode_hwcnt_disable_work);
+#else
+			queue_work(system_highpri_wq,
+				&kbdev->protected_mode_hwcnt_disable_work);
+#endif
+			return -EAGAIN;
+		}
+
+		/* Once reaching this point GPU must be
+		 * switched to protected mode or hwcnt
+		 * re-enabled. */
+
+		if (kbase_pm_protected_entry_override_enable(kbdev))
+			return -EAGAIN;
+
+		/*
+		 * Not in correct mode, begin protected mode switch.
+		 * Entering protected mode requires us to power down the L2,
+		 * and drop out of fully coherent mode.
+		 */
+		katom[idx]->protected_state.enter =
+			KBASE_ATOM_ENTER_PROTECTED_IDLE_L2;
+
+		kbase_pm_protected_override_enable(kbdev);
+		/*
+		 * Only if the GPU reset hasn't been initiated, there is a need
+		 * to invoke the state machine to explicitly power down the
+		 * shader cores and L2.
+		 */
+		if (!kbdev->pm.backend.protected_entry_transition_override)
+			kbase_pm_update_cores_state_nolock(kbdev);
+
+		/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+	case KBASE_ATOM_ENTER_PROTECTED_IDLE_L2:
+		/* Avoid unnecessary waiting on non-ACE platforms. */
+		if (kbdev->system_coherency == COHERENCY_ACE) {
+			if (kbdev->pm.backend.l2_always_on) {
+				/*
+				 * If the GPU reset hasn't completed, then L2
+				 * could still be powered up.
+				 */
+				if (kbase_reset_gpu_is_active(kbdev))
+					return -EAGAIN;
+			}
+
+			if (kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2) ||
+				kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_L2)) {
+				/*
+				 * The L2 is still powered, wait for all the users to
+				 * finish with it before doing the actual reset.
+				 */
+				return -EAGAIN;
+			}
+		}
+
+		katom[idx]->protected_state.enter =
+			KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY;
+
+		/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+	case KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY:
+		/*
+		 * When entering into protected mode, we must ensure that the
+		 * GPU is not operating in coherent mode as well. This is to
+		 * ensure that no protected memory can be leaked.
+		 */
+		kbase_gpu_disable_coherent(kbdev);
+
+		kbase_pm_protected_entry_override_disable(kbdev);
+
+		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234)) {
+			/*
+			 * Power on L2 caches; this will also result in the
+			 * correct value written to coherency enable register.
+			 */
+			kbase_pm_protected_l2_override(kbdev, true);
+
+			/*
+			 * Set the flag on the atom that additional
+			 * L2 references are taken.
+			 */
+			katom[idx]->atom_flags |=
+					KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT;
+		}
+
+		katom[idx]->protected_state.enter =
+			KBASE_ATOM_ENTER_PROTECTED_FINISHED;
+
+		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234))
+			return -EAGAIN;
+
+		/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+	case KBASE_ATOM_ENTER_PROTECTED_FINISHED:
+		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234)) {
+			/*
+			 * Check that L2 caches are powered and, if so,
+			 * enter protected mode.
+			 */
+			if (kbdev->pm.backend.l2_state == KBASE_L2_ON) {
+				/*
+				 * Remove additional L2 reference and reset
+				 * the atom flag which denotes it.
+				 */
+				if (katom[idx]->atom_flags &
+					KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT) {
+					kbase_pm_protected_l2_override(kbdev,
+							false);
+					katom[idx]->atom_flags &=
+						~KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT;
+				}
+
+				err = kbase_jm_protected_entry(kbdev, katom, idx, js);
+
+				if (err)
+					return err;
+			} else {
+				/*
+				 * still waiting for L2 caches to power up
+				 */
+				return -EAGAIN;
+			}
+		} else {
+			err = kbase_jm_protected_entry(kbdev, katom, idx, js);
+
+			if (err)
+				return err;
+		}
+	}
+
+	return 0;
+}
+
+static int kbase_jm_exit_protected_mode(struct kbase_device *kbdev,
+		struct kbase_jd_atom **katom, int idx, int js)
+{
+	int err = 0;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	switch (katom[idx]->protected_state.exit) {
+	case KBASE_ATOM_EXIT_PROTECTED_CHECK:
+		KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START(kbdev, kbdev);
+		/* The checks in KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV
+		 * should ensure that we are not already transitiong, and that
+		 * there are no atoms currently on the GPU. */
+		WARN_ON(kbdev->protected_mode_transition);
+		WARN_ON(kbase_gpu_atoms_submitted_any(kbdev));
+
+		/*
+		 * Exiting protected mode requires a reset, but first the L2
+		 * needs to be powered down to ensure it's not active when the
+		 * reset is issued.
+		 */
+		katom[idx]->protected_state.exit =
+				KBASE_ATOM_EXIT_PROTECTED_IDLE_L2;
+
+		kbdev->protected_mode_transition = true;
+		kbase_pm_protected_override_enable(kbdev);
+		kbase_pm_update_cores_state_nolock(kbdev);
+
+		/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+	case KBASE_ATOM_EXIT_PROTECTED_IDLE_L2:
+		if (kbdev->pm.backend.l2_state != KBASE_L2_OFF) {
+			/*
+			 * The L2 is still powered, wait for all the users to
+			 * finish with it before doing the actual reset.
+			 */
+			return -EAGAIN;
+		}
+		katom[idx]->protected_state.exit =
+				KBASE_ATOM_EXIT_PROTECTED_RESET;
+
+		/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+	case KBASE_ATOM_EXIT_PROTECTED_RESET:
+		/* Issue the reset to the GPU */
+		err = kbase_gpu_protected_mode_reset(kbdev);
+
+		if (err == -EAGAIN)
+			return -EAGAIN;
+
+		if (err) {
+			kbdev->protected_mode_transition = false;
+			kbase_pm_protected_override_disable(kbdev);
+
+			/* Failed to exit protected mode, fail atom */
+			katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID;
+			kbase_gpu_mark_atom_for_return(kbdev, katom[idx]);
+			/* Only return if head atom or previous atom
+			 * already removed - as atoms must be returned
+			 * in order */
+			if (idx == 0 || katom[0]->gpu_rb_state ==
+					KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+				kbase_gpu_dequeue_atom(kbdev, js, NULL);
+				kbase_jm_return_atom_to_js(kbdev, katom[idx]);
+			}
+
+			/* If we're exiting from protected mode, hwcnt must have
+			 * been disabled during entry.
+			 */
+			WARN_ON(!kbdev->protected_mode_hwcnt_disabled);
+			kbdev->protected_mode_hwcnt_desired = true;
+			if (kbdev->protected_mode_hwcnt_disabled) {
+				kbase_hwcnt_context_enable(
+					kbdev->hwcnt_gpu_ctx);
+				kbdev->protected_mode_hwcnt_disabled = false;
+			}
+
+			return -EINVAL;
+		}
+
+		katom[idx]->protected_state.exit =
+				KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT;
+
+		/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+	case KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT:
+		/* A GPU reset is issued when exiting protected mode. Once the
+		 * reset is done all atoms' state will also be reset. For this
+		 * reason, if the atom is still in this state we can safely
+		 * say that the reset has not completed i.e., we have not
+		 * finished exiting protected mode yet.
+		 */
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+void kbase_backend_slot_update(struct kbase_device *kbdev)
+{
+	int js;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (kbase_reset_gpu_is_active(kbdev))
+		return;
+
+	for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+		struct kbase_jd_atom *katom[2];
+		int idx;
+
+		katom[0] = kbase_gpu_inspect(kbdev, js, 0);
+		katom[1] = kbase_gpu_inspect(kbdev, js, 1);
+		WARN_ON(katom[1] && !katom[0]);
+
+		for (idx = 0; idx < SLOT_RB_SIZE; idx++) {
+			bool cores_ready;
+			int ret;
+
+			if (!katom[idx])
+				continue;
+
+			switch (katom[idx]->gpu_rb_state) {
+			case KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB:
+				/* Should be impossible */
+				WARN(1, "Attempting to update atom not in ringbuffer\n");
+				break;
+
+			case KBASE_ATOM_GPU_RB_WAITING_BLOCKED:
+				if (katom[idx]->atom_flags &
+						KBASE_KATOM_FLAG_X_DEP_BLOCKED)
+					break;
+
+				katom[idx]->gpu_rb_state =
+				KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV;
+
+			/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+			case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV:
+				if (kbase_gpu_check_secure_atoms(kbdev,
+						!kbase_jd_katom_is_protected(
+						katom[idx])))
+					break;
+
+				if ((idx == 1) && (kbase_jd_katom_is_protected(
+								katom[0]) !=
+						kbase_jd_katom_is_protected(
+								katom[1])))
+					break;
+
+				if (kbdev->protected_mode_transition)
+					break;
+
+				katom[idx]->gpu_rb_state =
+					KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION;
+
+			/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+			case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION:
+
+				/*
+				 * Exiting protected mode must be done before
+				 * the references on the cores are taken as
+				 * a power down the L2 is required which
+				 * can't happen after the references for this
+				 * atom are taken.
+				 */
+
+				if (!kbase_gpu_in_protected_mode(kbdev) &&
+					kbase_jd_katom_is_protected(katom[idx])) {
+					/* Atom needs to transition into protected mode. */
+					ret = kbase_jm_enter_protected_mode(kbdev,
+							katom, idx, js);
+					if (ret)
+						break;
+				} else if (kbase_gpu_in_protected_mode(kbdev) &&
+					!kbase_jd_katom_is_protected(katom[idx])) {
+					/* Atom needs to transition out of protected mode. */
+					ret = kbase_jm_exit_protected_mode(kbdev,
+							katom, idx, js);
+					if (ret)
+						break;
+				}
+				katom[idx]->protected_state.exit =
+						KBASE_ATOM_EXIT_PROTECTED_CHECK;
+
+				/* Atom needs no protected mode transition. */
+
+				katom[idx]->gpu_rb_state =
+					KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE;
+
+			/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+			case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE:
+				if (katom[idx]->will_fail_event_code) {
+					kbase_gpu_mark_atom_for_return(kbdev,
+							katom[idx]);
+					/* Set EVENT_DONE so this atom will be
+					   completed, not unpulled. */
+					katom[idx]->event_code =
+						BASE_JD_EVENT_DONE;
+					/* Only return if head atom or previous
+					 * atom already removed - as atoms must
+					 * be returned in order. */
+					if (idx == 0 ||	katom[0]->gpu_rb_state ==
+							KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+						kbase_gpu_dequeue_atom(kbdev, js, NULL);
+						kbase_jm_return_atom_to_js(kbdev, katom[idx]);
+					}
+					break;
+				}
+
+				cores_ready = kbase_pm_cores_requested(kbdev,
+						true);
+
+				if (katom[idx]->event_code ==
+						BASE_JD_EVENT_PM_EVENT) {
+					katom[idx]->gpu_rb_state =
+						KBASE_ATOM_GPU_RB_RETURN_TO_JS;
+					break;
+				}
+
+				if (!cores_ready)
+					break;
+
+				katom[idx]->gpu_rb_state =
+					KBASE_ATOM_GPU_RB_WAITING_AFFINITY;
+
+			/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+			case KBASE_ATOM_GPU_RB_WAITING_AFFINITY:
+				if (!kbase_gpu_rmu_workaround(kbdev, js))
+					break;
+
+				katom[idx]->gpu_rb_state =
+					KBASE_ATOM_GPU_RB_READY;
+
+			/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+			case KBASE_ATOM_GPU_RB_READY:
+
+				if (idx == 1) {
+					/* Only submit if head atom or previous
+					 * atom already submitted */
+					if ((katom[0]->gpu_rb_state !=
+						KBASE_ATOM_GPU_RB_SUBMITTED &&
+						katom[0]->gpu_rb_state !=
+					KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB))
+						break;
+
+					/* If intra-slot serialization in use
+					 * then don't submit atom to NEXT slot
+					 */
+					if (kbdev->serialize_jobs &
+						KBASE_SERIALIZE_INTRA_SLOT)
+						break;
+				}
+
+				/* If inter-slot serialization in use then don't
+				 * submit atom if any other slots are in use */
+				if ((kbdev->serialize_jobs &
+						KBASE_SERIALIZE_INTER_SLOT) &&
+						other_slots_busy(kbdev, js))
+					break;
+
+				if ((kbdev->serialize_jobs &
+						KBASE_SERIALIZE_RESET) &&
+						kbase_reset_gpu_is_active(kbdev))
+					break;
+
+				/* Check if this job needs the cycle counter
+				 * enabled before submission */
+				if (katom[idx]->core_req & BASE_JD_REQ_PERMON)
+					kbase_pm_request_gpu_cycle_counter_l2_is_on(
+									kbdev);
+
+				kbase_job_hw_submit(kbdev, katom[idx], js);
+				katom[idx]->gpu_rb_state =
+						KBASE_ATOM_GPU_RB_SUBMITTED;
+
+				/* Inform power management at start/finish of
+				 * atom so it can update its GPU utilisation
+				 * metrics. */
+				kbase_pm_metrics_update(kbdev,
+						&katom[idx]->start_timestamp);
+
+			/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+			case KBASE_ATOM_GPU_RB_SUBMITTED:
+				/* Atom submitted to HW, nothing else to do */
+				break;
+
+			case KBASE_ATOM_GPU_RB_RETURN_TO_JS:
+				/* Only return if head atom or previous atom
+				 * already removed - as atoms must be returned
+				 * in order */
+				if (idx == 0 || katom[0]->gpu_rb_state ==
+					KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+					kbase_gpu_dequeue_atom(kbdev, js, NULL);
+					kbase_jm_return_atom_to_js(kbdev,
+								katom[idx]);
+				}
+				break;
+			}
+		}
+	}
+
+	/* Warn if PRLAM-8987 affinity restrictions are violated */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
+		WARN_ON((kbase_gpu_atoms_submitted(kbdev, 0) ||
+			kbase_gpu_atoms_submitted(kbdev, 1)) &&
+			kbase_gpu_atoms_submitted(kbdev, 2));
+}
+
+
+void kbase_backend_run_atom(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+	kbase_gpu_enqueue_atom(kbdev, katom);
+	kbase_backend_slot_update(kbdev);
+}
+
+#define HAS_DEP(katom) (katom->pre_dep || katom->atom_flags & \
+	(KBASE_KATOM_FLAG_X_DEP_BLOCKED | KBASE_KATOM_FLAG_FAIL_BLOCKER))
+
+bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js,
+				u32 completion_code)
+{
+	struct kbase_jd_atom *katom;
+	struct kbase_jd_atom *next_katom;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	katom = kbase_gpu_inspect(kbdev, js, 0);
+	next_katom = kbase_gpu_inspect(kbdev, js, 1);
+
+	if (next_katom && katom->kctx == next_katom->kctx &&
+		next_katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED &&
+		(HAS_DEP(next_katom) || next_katom->sched_priority ==
+				katom->sched_priority) &&
+		(kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO))
+									!= 0 ||
+		kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI))
+									!= 0)) {
+		kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT),
+				JS_COMMAND_NOP);
+		next_katom->gpu_rb_state = KBASE_ATOM_GPU_RB_READY;
+
+		if (completion_code == BASE_JD_EVENT_STOPPED) {
+			KBASE_TLSTREAM_TL_NRET_ATOM_LPU(kbdev, next_katom,
+				&kbdev->gpu_props.props.raw_props.js_features
+					[next_katom->slot_nr]);
+			KBASE_TLSTREAM_TL_NRET_ATOM_AS(kbdev, next_katom, &kbdev->as
+					[next_katom->kctx->as_nr]);
+			KBASE_TLSTREAM_TL_NRET_CTX_LPU(kbdev, next_katom->kctx,
+				&kbdev->gpu_props.props.raw_props.js_features
+					[next_katom->slot_nr]);
+		}
+
+		if (next_katom->core_req & BASE_JD_REQ_PERMON)
+			kbase_pm_release_gpu_cycle_counter_nolock(kbdev);
+
+		return true;
+	}
+
+	return false;
+}
+
+void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
+				u32 completion_code,
+				u64 job_tail,
+				ktime_t *end_timestamp)
+{
+	struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, 0);
+	struct kbase_context *kctx = katom->kctx;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/*
+	 * When a hard-stop is followed close after a soft-stop, the completion
+	 * code may be set to STOPPED, even though the job is terminated
+	 */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TMIX_8438)) {
+		if (completion_code == BASE_JD_EVENT_STOPPED &&
+				(katom->atom_flags &
+				KBASE_KATOM_FLAG_BEEN_HARD_STOPPED)) {
+			completion_code = BASE_JD_EVENT_TERMINATED;
+		}
+	}
+
+	if ((kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6787) || (katom->core_req &
+					BASE_JD_REQ_SKIP_CACHE_END)) &&
+			completion_code != BASE_JD_EVENT_DONE &&
+			!(completion_code & BASE_JD_SW_EVENT)) {
+		/* When a job chain fails, on a T60x or when
+		 * BASE_JD_REQ_SKIP_CACHE_END is set, the GPU cache is not
+		 * flushed. To prevent future evictions causing possible memory
+		 * corruption we need to flush the cache manually before any
+		 * affected memory gets reused. */
+		katom->need_cache_flush_cores_retained = true;
+	} else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10676)) {
+		if (kbdev->gpu_props.num_core_groups > 1 &&
+				katom->device_nr >= 1) {
+			dev_info(kbdev->dev, "JD: Flushing cache due to PRLAM-10676\n");
+			katom->need_cache_flush_cores_retained = true;
+		}
+	}
+
+	katom = kbase_gpu_dequeue_atom(kbdev, js, end_timestamp);
+
+	if (completion_code == BASE_JD_EVENT_STOPPED) {
+		struct kbase_jd_atom *next_katom = kbase_gpu_inspect(kbdev, js,
+									0);
+
+		/*
+		 * Dequeue next atom from ringbuffers on same slot if required.
+		 * This atom will already have been removed from the NEXT
+		 * registers by kbase_gpu_soft_hard_stop_slot(), to ensure that
+		 * the atoms on this slot are returned in the correct order.
+		 */
+		if (next_katom && katom->kctx == next_katom->kctx &&
+				next_katom->sched_priority ==
+				katom->sched_priority) {
+			WARN_ON(next_katom->gpu_rb_state ==
+					KBASE_ATOM_GPU_RB_SUBMITTED);
+			kbase_gpu_dequeue_atom(kbdev, js, end_timestamp);
+			kbase_jm_return_atom_to_js(kbdev, next_katom);
+		}
+	} else if (completion_code != BASE_JD_EVENT_DONE) {
+		struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+		int i;
+
+		if (!kbase_ctx_flag(katom->kctx, KCTX_DYING))
+			dev_warn(kbdev->dev, "error detected from slot %d, job status 0x%08x (%s)",
+					js, completion_code,
+					kbase_exception_name
+					(kbdev,
+					completion_code));
+
+#if KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR != 0
+		KBASE_TRACE_DUMP(kbdev);
+#endif
+		kbasep_js_clear_submit_allowed(js_devdata, katom->kctx);
+
+		/*
+		 * Remove all atoms on the same context from ringbuffers. This
+		 * will not remove atoms that are already on the GPU, as these
+		 * are guaranteed not to have fail dependencies on the failed
+		 * atom.
+		 */
+		for (i = 0; i < kbdev->gpu_props.num_job_slots; i++) {
+			struct kbase_jd_atom *katom_idx0 =
+						kbase_gpu_inspect(kbdev, i, 0);
+			struct kbase_jd_atom *katom_idx1 =
+						kbase_gpu_inspect(kbdev, i, 1);
+
+			if (katom_idx0 && katom_idx0->kctx == katom->kctx &&
+					HAS_DEP(katom_idx0) &&
+					katom_idx0->gpu_rb_state !=
+					KBASE_ATOM_GPU_RB_SUBMITTED) {
+				/* Dequeue katom_idx0 from ringbuffer */
+				kbase_gpu_dequeue_atom(kbdev, i, end_timestamp);
+
+				if (katom_idx1 &&
+						katom_idx1->kctx == katom->kctx
+						&& HAS_DEP(katom_idx1) &&
+						katom_idx0->gpu_rb_state !=
+						KBASE_ATOM_GPU_RB_SUBMITTED) {
+					/* Dequeue katom_idx1 from ringbuffer */
+					kbase_gpu_dequeue_atom(kbdev, i,
+							end_timestamp);
+
+					katom_idx1->event_code =
+							BASE_JD_EVENT_STOPPED;
+					kbase_jm_return_atom_to_js(kbdev,
+								katom_idx1);
+				}
+				katom_idx0->event_code = BASE_JD_EVENT_STOPPED;
+				kbase_jm_return_atom_to_js(kbdev, katom_idx0);
+
+			} else if (katom_idx1 &&
+					katom_idx1->kctx == katom->kctx &&
+					HAS_DEP(katom_idx1) &&
+					katom_idx1->gpu_rb_state !=
+					KBASE_ATOM_GPU_RB_SUBMITTED) {
+				/* Can not dequeue this atom yet - will be
+				 * dequeued when atom at idx0 completes */
+				katom_idx1->event_code = BASE_JD_EVENT_STOPPED;
+				kbase_gpu_mark_atom_for_return(kbdev,
+								katom_idx1);
+			}
+		}
+	}
+
+	KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_JOB_DONE, kctx, katom, katom->jc,
+					js, completion_code);
+
+	if (job_tail != 0 && job_tail != katom->jc) {
+		bool was_updated = (job_tail != katom->jc);
+
+		/* Some of the job has been executed, so we update the job chain
+		 * address to where we should resume from */
+		katom->jc = job_tail;
+		if (was_updated)
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_UPDATE_HEAD, katom->kctx,
+						katom, job_tail, js);
+	}
+
+	/* Only update the event code for jobs that weren't cancelled */
+	if (katom->event_code != BASE_JD_EVENT_JOB_CANCELLED)
+		katom->event_code = (base_jd_event_code)completion_code;
+
+	/* Complete the job, and start new ones
+	 *
+	 * Also defer remaining work onto the workqueue:
+	 * - Re-queue Soft-stopped jobs
+	 * - For any other jobs, queue the job back into the dependency system
+	 * - Schedule out the parent context if necessary, and schedule a new
+	 *   one in.
+	 */
+#ifdef CONFIG_GPU_TRACEPOINTS
+	{
+		/* The atom in the HEAD */
+		struct kbase_jd_atom *next_katom = kbase_gpu_inspect(kbdev, js,
+									0);
+
+		if (next_katom && next_katom->gpu_rb_state ==
+						KBASE_ATOM_GPU_RB_SUBMITTED) {
+			char js_string[16];
+
+			trace_gpu_sched_switch(kbasep_make_job_slot_string(js,
+							js_string,
+							sizeof(js_string)),
+						ktime_to_ns(*end_timestamp),
+						(u32)next_katom->kctx->id, 0,
+						next_katom->work_id);
+			kbdev->hwaccess.backend.slot_rb[js].last_context =
+							next_katom->kctx;
+		} else {
+			char js_string[16];
+
+			trace_gpu_sched_switch(kbasep_make_job_slot_string(js,
+							js_string,
+							sizeof(js_string)),
+						ktime_to_ns(ktime_get()), 0, 0,
+						0);
+			kbdev->hwaccess.backend.slot_rb[js].last_context = 0;
+		}
+	}
+#endif
+
+	if (kbdev->serialize_jobs & KBASE_SERIALIZE_RESET)
+		kbase_reset_gpu_silent(kbdev);
+
+	if (completion_code == BASE_JD_EVENT_STOPPED)
+		katom = kbase_jm_return_atom_to_js(kbdev, katom);
+	else
+		katom = kbase_jm_complete(kbdev, katom, end_timestamp);
+
+	if (katom) {
+		/* Cross-slot dependency has now become runnable. Try to submit
+		 * it. */
+
+		/* Check if there are lower priority jobs to soft stop */
+		kbase_job_slot_ctx_priority_check_locked(kctx, katom);
+
+		kbase_jm_try_kick(kbdev, 1 << katom->slot_nr);
+	}
+
+	/* Job completion may have unblocked other atoms. Try to update all job
+	 * slots */
+	kbase_backend_slot_update(kbdev);
+}
+
+void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp)
+{
+	int js;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/* Reset should always take the GPU out of protected mode */
+	WARN_ON(kbase_gpu_in_protected_mode(kbdev));
+
+	for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+		int atom_idx = 0;
+		int idx;
+
+		for (idx = 0; idx < SLOT_RB_SIZE; idx++) {
+			struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev,
+					js, atom_idx);
+			bool keep_in_jm_rb = false;
+
+			if (!katom)
+				break;
+			if (katom->protected_state.exit ==
+			    KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT) {
+				/* protected mode sanity checks */
+				KBASE_DEBUG_ASSERT_MSG(
+					kbase_jd_katom_is_protected(katom) == kbase_gpu_in_protected_mode(kbdev),
+					"Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
+					kbase_jd_katom_is_protected(katom), kbase_gpu_in_protected_mode(kbdev));
+				KBASE_DEBUG_ASSERT_MSG(
+					(kbase_jd_katom_is_protected(katom) && js == 0) ||
+					!kbase_jd_katom_is_protected(katom),
+					"Protected atom on JS%d not supported", js);
+			}
+			if ((katom->gpu_rb_state < KBASE_ATOM_GPU_RB_SUBMITTED) &&
+			    !kbase_ctx_flag(katom->kctx, KCTX_DYING))
+				keep_in_jm_rb = true;
+
+			kbase_gpu_release_atom(kbdev, katom, NULL);
+
+			/*
+			 * If the atom wasn't on HW when the reset was issued
+			 * then leave it in the RB and next time we're kicked
+			 * it will be processed again from the starting state.
+			 */
+			if (keep_in_jm_rb) {
+				katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK;
+				/* As the atom was not removed, increment the
+				 * index so that we read the correct atom in the
+				 * next iteration. */
+				atom_idx++;
+				continue;
+			}
+
+			/*
+			 * The atom was on the HW when the reset was issued
+			 * all we can do is fail the atom.
+			 */
+			kbase_gpu_dequeue_atom(kbdev, js, NULL);
+			katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+			kbase_jm_complete(kbdev, katom, end_timestamp);
+		}
+	}
+
+	/* Re-enable GPU hardware counters if we're resetting from protected
+	 * mode.
+	 */
+	kbdev->protected_mode_hwcnt_desired = true;
+	if (kbdev->protected_mode_hwcnt_disabled) {
+		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+		kbdev->protected_mode_hwcnt_disabled = false;
+
+		KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END(kbdev, kbdev);
+	}
+
+	kbdev->protected_mode_transition = false;
+	kbase_pm_protected_override_disable(kbdev);
+}
+
+static inline void kbase_gpu_stop_atom(struct kbase_device *kbdev,
+					int js,
+					struct kbase_jd_atom *katom,
+					u32 action)
+{
+	u32 hw_action = action & JS_COMMAND_MASK;
+
+	kbase_job_check_enter_disjoint(kbdev, action, katom->core_req, katom);
+	kbasep_job_slot_soft_or_hard_stop_do_action(kbdev, js, hw_action,
+							katom->core_req, katom);
+	katom->kctx->blocked_js[js][katom->sched_priority] = true;
+}
+
+static inline void kbase_gpu_remove_atom(struct kbase_device *kbdev,
+						struct kbase_jd_atom *katom,
+						u32 action,
+						bool disjoint)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	katom->event_code = BASE_JD_EVENT_REMOVED_FROM_NEXT;
+	kbase_gpu_mark_atom_for_return(kbdev, katom);
+	katom->kctx->blocked_js[katom->slot_nr][katom->sched_priority] = true;
+
+	if (disjoint)
+		kbase_job_check_enter_disjoint(kbdev, action, katom->core_req,
+									katom);
+}
+
+static int should_stop_x_dep_slot(struct kbase_jd_atom *katom)
+{
+	if (katom->x_post_dep) {
+		struct kbase_jd_atom *dep_atom = katom->x_post_dep;
+
+		if (dep_atom->gpu_rb_state !=
+					KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB &&
+			dep_atom->gpu_rb_state !=
+					KBASE_ATOM_GPU_RB_RETURN_TO_JS)
+			return dep_atom->slot_nr;
+	}
+	return -1;
+}
+
+bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
+					struct kbase_context *kctx,
+					int js,
+					struct kbase_jd_atom *katom,
+					u32 action)
+{
+	struct kbase_jd_atom *katom_idx0;
+	struct kbase_jd_atom *katom_idx1;
+
+	bool katom_idx0_valid, katom_idx1_valid;
+
+	bool ret = false;
+
+	int stop_x_dep_idx0 = -1, stop_x_dep_idx1 = -1;
+	int prio_idx0 = 0, prio_idx1 = 0;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	katom_idx0 = kbase_gpu_inspect(kbdev, js, 0);
+	katom_idx1 = kbase_gpu_inspect(kbdev, js, 1);
+
+	if (katom_idx0)
+		prio_idx0 = katom_idx0->sched_priority;
+	if (katom_idx1)
+		prio_idx1 = katom_idx1->sched_priority;
+
+	if (katom) {
+		katom_idx0_valid = (katom_idx0 == katom);
+		/* If idx0 is to be removed and idx1 is on the same context,
+		 * then idx1 must also be removed otherwise the atoms might be
+		 * returned out of order */
+		if (katom_idx1)
+			katom_idx1_valid = (katom_idx1 == katom) ||
+						(katom_idx0_valid &&
+							(katom_idx0->kctx ==
+							katom_idx1->kctx));
+		else
+			katom_idx1_valid = false;
+	} else {
+		katom_idx0_valid = (katom_idx0 &&
+				(!kctx || katom_idx0->kctx == kctx));
+		katom_idx1_valid = (katom_idx1 &&
+				(!kctx || katom_idx1->kctx == kctx) &&
+				prio_idx0 == prio_idx1);
+	}
+
+	if (katom_idx0_valid)
+		stop_x_dep_idx0 = should_stop_x_dep_slot(katom_idx0);
+	if (katom_idx1_valid)
+		stop_x_dep_idx1 = should_stop_x_dep_slot(katom_idx1);
+
+	if (katom_idx0_valid) {
+		if (katom_idx0->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED) {
+			/* Simple case - just dequeue and return */
+			kbase_gpu_dequeue_atom(kbdev, js, NULL);
+			if (katom_idx1_valid) {
+				kbase_gpu_dequeue_atom(kbdev, js, NULL);
+				katom_idx1->event_code =
+						BASE_JD_EVENT_REMOVED_FROM_NEXT;
+				kbase_jm_return_atom_to_js(kbdev, katom_idx1);
+				katom_idx1->kctx->blocked_js[js][prio_idx1] =
+						true;
+			}
+
+			katom_idx0->event_code =
+						BASE_JD_EVENT_REMOVED_FROM_NEXT;
+			kbase_jm_return_atom_to_js(kbdev, katom_idx0);
+			katom_idx0->kctx->blocked_js[js][prio_idx0] = true;
+		} else {
+			/* katom_idx0 is on GPU */
+			if (katom_idx1_valid && katom_idx1->gpu_rb_state ==
+						KBASE_ATOM_GPU_RB_SUBMITTED) {
+				/* katom_idx0 and katom_idx1 are on GPU */
+
+				if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
+						JS_COMMAND_NEXT)) == 0) {
+					/* idx0 has already completed - stop
+					 * idx1 if needed*/
+					if (katom_idx1_valid) {
+						kbase_gpu_stop_atom(kbdev, js,
+								katom_idx1,
+								action);
+						ret = true;
+					}
+				} else {
+					/* idx1 is in NEXT registers - attempt
+					 * to remove */
+					kbase_reg_write(kbdev,
+							JOB_SLOT_REG(js,
+							JS_COMMAND_NEXT),
+							JS_COMMAND_NOP);
+
+					if (kbase_reg_read(kbdev,
+							JOB_SLOT_REG(js,
+							JS_HEAD_NEXT_LO))
+									!= 0 ||
+						kbase_reg_read(kbdev,
+							JOB_SLOT_REG(js,
+							JS_HEAD_NEXT_HI))
+									!= 0) {
+						/* idx1 removed successfully,
+						 * will be handled in IRQ */
+						kbase_gpu_remove_atom(kbdev,
+								katom_idx1,
+								action, true);
+						stop_x_dep_idx1 =
+					should_stop_x_dep_slot(katom_idx1);
+
+						/* stop idx0 if still on GPU */
+						kbase_gpu_stop_atom(kbdev, js,
+								katom_idx0,
+								action);
+						ret = true;
+					} else if (katom_idx1_valid) {
+						/* idx0 has already completed,
+						 * stop idx1 if needed */
+						kbase_gpu_stop_atom(kbdev, js,
+								katom_idx1,
+								action);
+						ret = true;
+					}
+				}
+			} else if (katom_idx1_valid) {
+				/* idx1 not on GPU but must be dequeued*/
+
+				/* idx1 will be handled in IRQ */
+				kbase_gpu_remove_atom(kbdev, katom_idx1, action,
+									false);
+				/* stop idx0 */
+				/* This will be repeated for anything removed
+				 * from the next registers, since their normal
+				 * flow was also interrupted, and this function
+				 * might not enter disjoint state e.g. if we
+				 * don't actually do a hard stop on the head
+				 * atom */
+				kbase_gpu_stop_atom(kbdev, js, katom_idx0,
+									action);
+				ret = true;
+			} else {
+				/* no atom in idx1 */
+				/* just stop idx0 */
+				kbase_gpu_stop_atom(kbdev, js, katom_idx0,
+									action);
+				ret = true;
+			}
+		}
+	} else if (katom_idx1_valid) {
+		if (katom_idx1->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED) {
+			/* Mark for return */
+			/* idx1 will be returned once idx0 completes */
+			kbase_gpu_remove_atom(kbdev, katom_idx1, action,
+									false);
+		} else {
+			/* idx1 is on GPU */
+			if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
+						JS_COMMAND_NEXT)) == 0) {
+				/* idx0 has already completed - stop idx1 */
+				kbase_gpu_stop_atom(kbdev, js, katom_idx1,
+									action);
+				ret = true;
+			} else {
+				/* idx1 is in NEXT registers - attempt to
+				 * remove */
+				kbase_reg_write(kbdev, JOB_SLOT_REG(js,
+							JS_COMMAND_NEXT),
+							JS_COMMAND_NOP);
+
+				if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
+						JS_HEAD_NEXT_LO)) != 0 ||
+				    kbase_reg_read(kbdev, JOB_SLOT_REG(js,
+						JS_HEAD_NEXT_HI)) != 0) {
+					/* idx1 removed successfully, will be
+					 * handled in IRQ once idx0 completes */
+					kbase_gpu_remove_atom(kbdev, katom_idx1,
+									action,
+									false);
+				} else {
+					/* idx0 has already completed - stop
+					 * idx1 */
+					kbase_gpu_stop_atom(kbdev, js,
+								katom_idx1,
+								action);
+					ret = true;
+				}
+			}
+		}
+	}
+
+
+	if (stop_x_dep_idx0 != -1)
+		kbase_backend_soft_hard_stop_slot(kbdev, kctx, stop_x_dep_idx0,
+								NULL, action);
+
+	if (stop_x_dep_idx1 != -1)
+		kbase_backend_soft_hard_stop_slot(kbdev, kctx, stop_x_dep_idx1,
+								NULL, action);
+
+	return ret;
+}
+
+void kbase_backend_cache_clean(struct kbase_device *kbdev,
+		struct kbase_jd_atom *katom)
+{
+	if (katom->need_cache_flush_cores_retained) {
+		kbase_gpu_start_cache_clean(kbdev);
+		kbase_gpu_wait_cache_clean(kbdev);
+
+		katom->need_cache_flush_cores_retained = false;
+	}
+}
+
+void kbase_backend_complete_wq(struct kbase_device *kbdev,
+						struct kbase_jd_atom *katom)
+{
+	/*
+	 * If cache flush required due to HW workaround then perform the flush
+	 * now
+	 */
+	kbase_backend_cache_clean(kbdev, katom);
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10969)            &&
+	    (katom->core_req & BASE_JD_REQ_FS)                        &&
+	    katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT       &&
+	    (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED) &&
+	    !(katom->atom_flags & KBASE_KATOM_FLAGS_RERUN)) {
+		dev_dbg(kbdev->dev, "Soft-stopped fragment shader job got a TILE_RANGE_FAULT. Possible HW issue, trying SW workaround\n");
+		if (kbasep_10969_workaround_clamp_coordinates(katom)) {
+			/* The job had a TILE_RANGE_FAULT after was soft-stopped
+			 * Due to an HW issue we try to execute the job again.
+			 */
+			dev_dbg(kbdev->dev,
+				"Clamping has been executed, try to rerun the job\n"
+			);
+			katom->event_code = BASE_JD_EVENT_STOPPED;
+			katom->atom_flags |= KBASE_KATOM_FLAGS_RERUN;
+		}
+	}
+}
+
+void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev,
+		base_jd_core_req core_req)
+{
+	if (!kbdev->pm.active_count) {
+		mutex_lock(&kbdev->js_data.runpool_mutex);
+		mutex_lock(&kbdev->pm.lock);
+		kbase_pm_update_active(kbdev);
+		mutex_unlock(&kbdev->pm.lock);
+		mutex_unlock(&kbdev->js_data.runpool_mutex);
+	}
+}
+
+void kbase_gpu_dump_slots(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+	int js;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	dev_info(kbdev->dev, "kbase_gpu_dump_slots:\n");
+
+	for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+		int idx;
+
+		for (idx = 0; idx < SLOT_RB_SIZE; idx++) {
+			struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev,
+									js,
+									idx);
+
+			if (katom)
+				dev_info(kbdev->dev,
+				"  js%d idx%d : katom=%p gpu_rb_state=%d\n",
+				js, idx, katom, katom->gpu_rb_state);
+			else
+				dev_info(kbdev->dev, "  js%d idx%d : empty\n",
+								js, idx);
+		}
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h
new file mode 100644
index 0000000..c3b9f2d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h
@@ -0,0 +1,83 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * Register-based HW access backend specific APIs
+ */
+
+#ifndef _KBASE_HWACCESS_GPU_H_
+#define _KBASE_HWACCESS_GPU_H_
+
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+/**
+ * kbase_gpu_irq_evict - Evict an atom from a NEXT slot
+ *
+ * @kbdev:           Device pointer
+ * @js:              Job slot to evict from
+ * @completion_code: Event code from job that was run.
+ *
+ * Evict the atom in the NEXT slot for the specified job slot. This function is
+ * called from the job complete IRQ handler when the previous job has failed.
+ *
+ * Return: true if job evicted from NEXT registers, false otherwise
+ */
+bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js,
+				u32 completion_code);
+
+/**
+ * kbase_gpu_complete_hw - Complete an atom on job slot js
+ *
+ * @kbdev:           Device pointer
+ * @js:              Job slot that has completed
+ * @completion_code: Event code from job that has completed
+ * @job_tail:        The tail address from the hardware if the job has partially
+ *                   completed
+ * @end_timestamp:   Time of completion
+ */
+void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
+				u32 completion_code,
+				u64 job_tail,
+				ktime_t *end_timestamp);
+
+/**
+ * kbase_gpu_inspect - Inspect the contents of the HW access ringbuffer
+ *
+ * @kbdev:  Device pointer
+ * @js:     Job slot to inspect
+ * @idx:    Index into ringbuffer. 0 is the job currently running on
+ *          the slot, 1 is the job waiting, all other values are invalid.
+ * Return:  The atom at that position in the ringbuffer
+ *          or NULL if no atom present
+ */
+struct kbase_jd_atom *kbase_gpu_inspect(struct kbase_device *kbdev, int js,
+					int idx);
+
+/**
+ * kbase_gpu_dump_slots - Print the contents of the slot ringbuffers
+ *
+ * @kbdev:  Device pointer
+ */
+void kbase_gpu_dump_slots(struct kbase_device *kbdev);
+
+#endif /* _KBASE_HWACCESS_GPU_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c
new file mode 100644
index 0000000..1ffaa23
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c
@@ -0,0 +1,353 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * Register-based HW access backend specific job scheduler APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_reset_gpu.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+#include <backend/gpu/mali_kbase_js_internal.h>
+
+/*
+ * Hold the runpool_mutex for this
+ */
+static inline bool timer_callback_should_run(struct kbase_device *kbdev)
+{
+	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+	s8 nr_running_ctxs;
+
+	lockdep_assert_held(&kbdev->js_data.runpool_mutex);
+
+	/* Timer must stop if we are suspending */
+	if (backend->suspend_timer)
+		return false;
+
+	/* nr_contexts_pullable is updated with the runpool_mutex. However, the
+	 * locking in the caller gives us a barrier that ensures
+	 * nr_contexts_pullable is up-to-date for reading */
+	nr_running_ctxs = atomic_read(&kbdev->js_data.nr_contexts_runnable);
+
+#ifdef CONFIG_MALI_DEBUG
+	if (kbdev->js_data.softstop_always) {
+		/* Debug support for allowing soft-stop on a single context */
+		return true;
+	}
+#endif				/* CONFIG_MALI_DEBUG */
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9435)) {
+		/* Timeouts would have to be 4x longer (due to micro-
+		 * architectural design) to support OpenCL conformance tests, so
+		 * only run the timer when there's:
+		 * - 2 or more CL contexts
+		 * - 1 or more GLES contexts
+		 *
+		 * NOTE: We will treat a context that has both Compute and Non-
+		 * Compute jobs will be treated as an OpenCL context (hence, we
+		 * don't check KBASEP_JS_CTX_ATTR_NON_COMPUTE).
+		 */
+		{
+			s8 nr_compute_ctxs =
+				kbasep_js_ctx_attr_count_on_runpool(kbdev,
+						KBASEP_JS_CTX_ATTR_COMPUTE);
+			s8 nr_noncompute_ctxs = nr_running_ctxs -
+							nr_compute_ctxs;
+
+			return (bool) (nr_compute_ctxs >= 2 ||
+							nr_noncompute_ctxs > 0);
+		}
+	} else {
+		/* Run the timer callback whenever you have at least 1 context
+		 */
+		return (bool) (nr_running_ctxs > 0);
+	}
+}
+
+static enum hrtimer_restart timer_callback(struct hrtimer *timer)
+{
+	unsigned long flags;
+	struct kbase_device *kbdev;
+	struct kbasep_js_device_data *js_devdata;
+	struct kbase_backend_data *backend;
+	int s;
+	bool reset_needed = false;
+
+	KBASE_DEBUG_ASSERT(timer != NULL);
+
+	backend = container_of(timer, struct kbase_backend_data,
+							scheduling_timer);
+	kbdev = container_of(backend, struct kbase_device, hwaccess.backend);
+	js_devdata = &kbdev->js_data;
+
+	/* Loop through the slots */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	for (s = 0; s < kbdev->gpu_props.num_job_slots; s++) {
+		struct kbase_jd_atom *atom = NULL;
+
+		if (kbase_backend_nr_atoms_on_slot(kbdev, s) > 0) {
+			atom = kbase_gpu_inspect(kbdev, s, 0);
+			KBASE_DEBUG_ASSERT(atom != NULL);
+		}
+
+		if (atom != NULL) {
+			/* The current version of the model doesn't support
+			 * Soft-Stop */
+			if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_5736)) {
+				u32 ticks = atom->ticks++;
+
+#if !defined(CONFIG_MALI_JOB_DUMP) && !defined(CONFIG_MALI_VECTOR_DUMP)
+				u32 soft_stop_ticks, hard_stop_ticks,
+								gpu_reset_ticks;
+				if (atom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
+					soft_stop_ticks =
+						js_devdata->soft_stop_ticks_cl;
+					hard_stop_ticks =
+						js_devdata->hard_stop_ticks_cl;
+					gpu_reset_ticks =
+						js_devdata->gpu_reset_ticks_cl;
+				} else {
+					soft_stop_ticks =
+						js_devdata->soft_stop_ticks;
+					hard_stop_ticks =
+						js_devdata->hard_stop_ticks_ss;
+					gpu_reset_ticks =
+						js_devdata->gpu_reset_ticks_ss;
+				}
+
+				/* If timeouts have been changed then ensure
+				 * that atom tick count is not greater than the
+				 * new soft_stop timeout. This ensures that
+				 * atoms do not miss any of the timeouts due to
+				 * races between this worker and the thread
+				 * changing the timeouts. */
+				if (backend->timeouts_updated &&
+						ticks > soft_stop_ticks)
+					ticks = atom->ticks = soft_stop_ticks;
+
+				/* Job is Soft-Stoppable */
+				if (ticks == soft_stop_ticks) {
+					/* Job has been scheduled for at least
+					 * js_devdata->soft_stop_ticks ticks.
+					 * Soft stop the slot so we can run
+					 * other jobs.
+					 */
+#if !KBASE_DISABLE_SCHEDULING_SOFT_STOPS
+					int disjoint_threshold =
+		KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD;
+					u32 softstop_flags = 0u;
+
+					dev_dbg(kbdev->dev, "Soft-stop");
+					/* nr_user_contexts_running is updated
+					 * with the runpool_mutex, but we can't
+					 * take that here.
+					 *
+					 * However, if it's about to be
+					 * increased then the new context can't
+					 * run any jobs until they take the
+					 * hwaccess_lock, so it's OK to observe
+					 * the older value.
+					 *
+					 * Similarly, if it's about to be
+					 * decreased, the last job from another
+					 * context has already finished, so it's
+					 * not too bad that we observe the older
+					 * value and register a disjoint event
+					 * when we try soft-stopping */
+					if (js_devdata->nr_user_contexts_running
+							>= disjoint_threshold)
+						softstop_flags |=
+						JS_COMMAND_SW_CAUSES_DISJOINT;
+
+					kbase_job_slot_softstop_swflags(kbdev,
+						s, atom, softstop_flags);
+#endif
+				} else if (ticks == hard_stop_ticks) {
+					/* Job has been scheduled for at least
+					 * js_devdata->hard_stop_ticks_ss ticks.
+					 * It should have been soft-stopped by
+					 * now. Hard stop the slot.
+					 */
+#if !KBASE_DISABLE_SCHEDULING_HARD_STOPS
+					int ms =
+						js_devdata->scheduling_period_ns
+								/ 1000000u;
+					dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)",
+							(unsigned long)ticks,
+							(unsigned long)ms);
+					kbase_job_slot_hardstop(atom->kctx, s,
+									atom);
+#endif
+				} else if (ticks == gpu_reset_ticks) {
+					/* Job has been scheduled for at least
+					 * js_devdata->gpu_reset_ticks_ss ticks.
+					 * It should have left the GPU by now.
+					 * Signal that the GPU needs to be
+					 * reset.
+					 */
+					reset_needed = true;
+				}
+#else				/* !CONFIG_MALI_JOB_DUMP */
+				/* NOTE: During CONFIG_MALI_JOB_DUMP, we use
+				 * the alternate timeouts, which makes the hard-
+				 * stop and GPU reset timeout much longer. We
+				 * also ensure that we don't soft-stop at all.
+				 */
+				if (ticks == js_devdata->soft_stop_ticks) {
+					/* Job has been scheduled for at least
+					 * js_devdata->soft_stop_ticks. We do
+					 * not soft-stop during
+					 * CONFIG_MALI_JOB_DUMP, however.
+					 */
+					dev_dbg(kbdev->dev, "Soft-stop");
+				} else if (ticks ==
+					js_devdata->hard_stop_ticks_dumping) {
+					/* Job has been scheduled for at least
+					 * js_devdata->hard_stop_ticks_dumping
+					 * ticks. Hard stop the slot.
+					 */
+#if !KBASE_DISABLE_SCHEDULING_HARD_STOPS
+					int ms =
+						js_devdata->scheduling_period_ns
+								/ 1000000u;
+					dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)",
+							(unsigned long)ticks,
+							(unsigned long)ms);
+					kbase_job_slot_hardstop(atom->kctx, s,
+									atom);
+#endif
+				} else if (ticks ==
+					js_devdata->gpu_reset_ticks_dumping) {
+					/* Job has been scheduled for at least
+					 * js_devdata->gpu_reset_ticks_dumping
+					 * ticks. It should have left the GPU by
+					 * now. Signal that the GPU needs to be
+					 * reset.
+					 */
+					reset_needed = true;
+				}
+#endif				/* !CONFIG_MALI_JOB_DUMP */
+			}
+		}
+	}
+	if (reset_needed) {
+		dev_err(kbdev->dev, "JS: Job has been on the GPU for too long (JS_RESET_TICKS_SS/DUMPING timeout hit). Issueing GPU soft-reset to resolve.");
+
+		if (kbase_prepare_to_reset_gpu_locked(kbdev))
+			kbase_reset_gpu_locked(kbdev);
+	}
+	/* the timer is re-issued if there is contexts in the run-pool */
+
+	if (backend->timer_running)
+		hrtimer_start(&backend->scheduling_timer,
+			HR_TIMER_DELAY_NSEC(js_devdata->scheduling_period_ns),
+			HRTIMER_MODE_REL);
+
+	backend->timeouts_updated = false;
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return HRTIMER_NORESTART;
+}
+
+void kbase_backend_ctx_count_changed(struct kbase_device *kbdev)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+	unsigned long flags;
+
+	lockdep_assert_held(&js_devdata->runpool_mutex);
+
+	if (!timer_callback_should_run(kbdev)) {
+		/* Take spinlock to force synchronisation with timer */
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		backend->timer_running = false;
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		/* From now on, return value of timer_callback_should_run() will
+		 * also cause the timer to not requeue itself. Its return value
+		 * cannot change, because it depends on variables updated with
+		 * the runpool_mutex held, which the caller of this must also
+		 * hold */
+		hrtimer_cancel(&backend->scheduling_timer);
+	}
+
+	if (timer_callback_should_run(kbdev) && !backend->timer_running) {
+		/* Take spinlock to force synchronisation with timer */
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		backend->timer_running = true;
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		hrtimer_start(&backend->scheduling_timer,
+			HR_TIMER_DELAY_NSEC(js_devdata->scheduling_period_ns),
+							HRTIMER_MODE_REL);
+
+		KBASE_TRACE_ADD(kbdev, JS_POLICY_TIMER_START, NULL, NULL, 0u,
+									0u);
+	}
+}
+
+int kbase_backend_timer_init(struct kbase_device *kbdev)
+{
+	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+	hrtimer_init(&backend->scheduling_timer, CLOCK_MONOTONIC,
+							HRTIMER_MODE_REL);
+	backend->scheduling_timer.function = timer_callback;
+
+	backend->timer_running = false;
+
+	return 0;
+}
+
+void kbase_backend_timer_term(struct kbase_device *kbdev)
+{
+	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+	hrtimer_cancel(&backend->scheduling_timer);
+}
+
+void kbase_backend_timer_suspend(struct kbase_device *kbdev)
+{
+	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+	backend->suspend_timer = true;
+
+	kbase_backend_ctx_count_changed(kbdev);
+}
+
+void kbase_backend_timer_resume(struct kbase_device *kbdev)
+{
+	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+	backend->suspend_timer = false;
+
+	kbase_backend_ctx_count_changed(kbdev);
+}
+
+void kbase_backend_timeouts_changed(struct kbase_device *kbdev)
+{
+	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+	backend->timeouts_updated = true;
+}
+
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_internal.h
new file mode 100644
index 0000000..6576e55
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_internal.h
@@ -0,0 +1,74 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * Register-based HW access backend specific job scheduler APIs
+ */
+
+#ifndef _KBASE_JS_BACKEND_H_
+#define _KBASE_JS_BACKEND_H_
+
+/**
+ * kbase_backend_timer_init() - Initialise the JS scheduling timer
+ * @kbdev:	Device pointer
+ *
+ * This function should be called at driver initialisation
+ *
+ * Return: 0 on success
+ */
+int kbase_backend_timer_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_timer_term() - Terminate the JS scheduling timer
+ * @kbdev:	Device pointer
+ *
+ * This function should be called at driver termination
+ */
+void kbase_backend_timer_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_timer_suspend - Suspend is happening, stop the JS scheduling
+ *                               timer
+ * @kbdev: Device pointer
+ *
+ * This function should be called on suspend, after the active count has reached
+ * zero. This is required as the timer may have been started on job submission
+ * to the job scheduler, but before jobs are submitted to the GPU.
+ *
+ * Caller must hold runpool_mutex.
+ */
+void kbase_backend_timer_suspend(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_timer_resume - Resume is happening, re-evaluate the JS
+ *                              scheduling timer
+ * @kbdev: Device pointer
+ *
+ * This function should be called on resume. Note that is is not guaranteed to
+ * re-start the timer, only evalute whether it should be re-started.
+ *
+ * Caller must hold runpool_mutex.
+ */
+void kbase_backend_timer_resume(struct kbase_device *kbdev);
+
+#endif /* _KBASE_JS_BACKEND_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_l2_mmu_config.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_l2_mmu_config.c
new file mode 100644
index 0000000..916916d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_l2_mmu_config.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_bits.h>
+#include <mali_kbase_config_defaults.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include "mali_kbase_l2_mmu_config.h"
+
+/**
+ * struct l2_mmu_config_limit_region
+ *
+ * @value:    The default value to load into the L2_MMU_CONFIG register
+ * @mask:     The shifted mask of the field in the L2_MMU_CONFIG register
+ * @shift:    The shift of where the field starts in the L2_MMU_CONFIG register
+ *            This should be the same value as the smaller of the two mask
+ *            values
+ */
+struct l2_mmu_config_limit_region {
+	u32 value, mask, shift;
+};
+
+/**
+ * struct l2_mmu_config_limit
+ *
+ * @product_model:    The GPU for which this entry applies
+ * @read:             Values for the read limit field
+ * @write:            Values for the write limit field
+ */
+struct l2_mmu_config_limit {
+	u32 product_model;
+	struct l2_mmu_config_limit_region read;
+	struct l2_mmu_config_limit_region write;
+};
+
+/*
+ * Zero represents no limit
+ *
+ * For LBEX TBEX TTRX and TNAX:
+ *   The value represents the number of outstanding reads (6 bits) or writes (5 bits)
+ *
+ * For all other GPUS it is a fraction see: mali_kbase_config_defaults.h
+ */
+static const struct l2_mmu_config_limit limits[] = {
+	 /* GPU                       read                  write            */
+	 {GPU_ID2_PRODUCT_LBEX, {0, GENMASK(10, 5), 5}, {0, GENMASK(16, 12), 12} },
+	 {GPU_ID2_PRODUCT_TBEX, {0, GENMASK(10, 5), 5}, {0, GENMASK(16, 12), 12} },
+	 {GPU_ID2_PRODUCT_TTRX, {0, GENMASK(12, 7), 7}, {0, GENMASK(17, 13), 13} },
+	 {GPU_ID2_PRODUCT_TNAX, {0, GENMASK(12, 7), 7}, {0, GENMASK(17, 13), 13} },
+	 {GPU_ID2_PRODUCT_TGOX,
+	   {KBASE_3BIT_AID_32, GENMASK(14, 12), 12},
+	   {KBASE_3BIT_AID_32, GENMASK(17, 15), 15} },
+	 {GPU_ID2_PRODUCT_TNOX,
+	   {KBASE_3BIT_AID_32, GENMASK(14, 12), 12},
+	   {KBASE_3BIT_AID_32, GENMASK(17, 15), 15} },
+};
+
+void kbase_set_mmu_quirks(struct kbase_device *kbdev)
+{
+	/* All older GPUs had 2 bits for both fields, this is a default */
+	struct l2_mmu_config_limit limit = {
+		  0, /* Any GPU not in the limits array defined above */
+		 {KBASE_AID_32, GENMASK(25, 24), 24},
+		 {KBASE_AID_32, GENMASK(27, 26), 26}
+		};
+	u32 product_model, gpu_id;
+	u32 mmu_config;
+	int i;
+
+	gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+	product_model = gpu_id & GPU_ID2_PRODUCT_MODEL;
+
+	/* Limit the GPU bus bandwidth if the platform needs this. */
+	for (i = 0; i < ARRAY_SIZE(limits); i++) {
+		if (product_model == limits[i].product_model) {
+			limit = limits[i];
+			break;
+		}
+	}
+
+	mmu_config = kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG));
+
+	mmu_config &= ~(limit.read.mask | limit.write.mask);
+	/* Can't use FIELD_PREP() macro here as the mask isn't constant */
+	mmu_config |= (limit.read.value << limit.read.shift) |
+		      (limit.write.value << limit.write.shift);
+
+	kbdev->hw_quirks_mmu = mmu_config;
+
+	if (kbdev->system_coherency == COHERENCY_ACE) {
+		/* Allow memory configuration disparity to be ignored,
+		 * we optimize the use of shared memory and thus we
+		 * expect some disparity in the memory configuration.
+		 */
+		kbdev->hw_quirks_mmu |= L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY;
+	}
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_l2_mmu_config.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_l2_mmu_config.h
new file mode 100644
index 0000000..25636ee
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_l2_mmu_config.h
@@ -0,0 +1,44 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ *//* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ */
+
+#ifndef _KBASE_L2_MMU_CONFIG_H_
+#define _KBASE_L2_MMU_CONFIG_H_
+/**
+ * kbase_set_mmu_quirks - Set the hw_quirks_mmu field of kbdev
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Use this function to initialise the hw_quirks_mmu field, for instance to set
+ * the MAX_READS and MAX_WRITES to sane defaults for each GPU.
+ */
+void kbase_set_mmu_quirks(struct kbase_device *kbdev);
+
+#endif /* _KBASE_L2_MMU_CONFIG_H */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c
new file mode 100644
index 0000000..77e0b78
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c
@@ -0,0 +1,395 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/bitops.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_mem.h>
+#include <mali_kbase_mmu_hw.h>
+#include <mali_kbase_tracepoints.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <mali_kbase_as_fault_debugfs.h>
+
+static inline u64 lock_region(struct kbase_device *kbdev, u64 pfn,
+		u32 num_pages)
+{
+	u64 region;
+
+	/* can't lock a zero sized range */
+	KBASE_DEBUG_ASSERT(num_pages);
+
+	region = pfn << PAGE_SHIFT;
+	/*
+	 * fls returns (given the ASSERT above):
+	 * 1 .. 32
+	 *
+	 * 10 + fls(num_pages)
+	 * results in the range (11 .. 42)
+	 */
+
+	/* gracefully handle num_pages being zero */
+	if (0 == num_pages) {
+		region |= 11;
+	} else {
+		u8 region_width;
+
+		region_width = 10 + fls(num_pages);
+		if (num_pages != (1ul << (region_width - 11))) {
+			/* not pow2, so must go up to the next pow2 */
+			region_width += 1;
+		}
+		KBASE_DEBUG_ASSERT(region_width <= KBASE_LOCK_REGION_MAX_SIZE);
+		KBASE_DEBUG_ASSERT(region_width >= KBASE_LOCK_REGION_MIN_SIZE);
+		region |= region_width;
+	}
+
+	return region;
+}
+
+static int wait_ready(struct kbase_device *kbdev,
+		unsigned int as_nr)
+{
+	unsigned int max_loops = KBASE_AS_INACTIVE_MAX_LOOPS;
+	u32 val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS));
+
+	/* Wait for the MMU status to indicate there is no active command, in
+	 * case one is pending. Do not log remaining register accesses. */
+	while (--max_loops && (val & AS_STATUS_AS_ACTIVE))
+		val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS));
+
+	if (max_loops == 0) {
+		dev_err(kbdev->dev, "AS_ACTIVE bit stuck, might be caused by slow/unstable GPU clock or possible faulty FPGA connector\n");
+		return -1;
+	}
+
+	/* If waiting in loop was performed, log last read value. */
+	if (KBASE_AS_INACTIVE_MAX_LOOPS - 1 > max_loops)
+		kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS));
+
+	return 0;
+}
+
+static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd)
+{
+	int status;
+
+	/* write AS_COMMAND when MMU is ready to accept another command */
+	status = wait_ready(kbdev, as_nr);
+	if (status == 0)
+		kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_COMMAND), cmd);
+
+	return status;
+}
+
+static void validate_protected_page_fault(struct kbase_device *kbdev)
+{
+	/* GPUs which support (native) protected mode shall not report page
+	 * fault addresses unless it has protected debug mode and protected
+	 * debug mode is turned on */
+	u32 protected_debug_mode = 0;
+
+	if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE))
+		return;
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
+		protected_debug_mode = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(GPU_STATUS)) & GPU_DBGEN;
+	}
+
+	if (!protected_debug_mode) {
+		/* fault_addr should never be reported in protected mode.
+		 * However, we just continue by printing an error message */
+		dev_err(kbdev->dev, "Fault address reported in protected mode\n");
+	}
+}
+
+void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
+{
+	const int num_as = 16;
+	const int busfault_shift = MMU_PAGE_FAULT_FLAGS;
+	const int pf_shift = 0;
+	const unsigned long as_bit_mask = (1UL << num_as) - 1;
+	unsigned long flags;
+	u32 new_mask;
+	u32 tmp;
+
+	/* bus faults */
+	u32 bf_bits = (irq_stat >> busfault_shift) & as_bit_mask;
+	/* page faults (note: Ignore ASes with both pf and bf) */
+	u32 pf_bits = ((irq_stat >> pf_shift) & as_bit_mask) & ~bf_bits;
+
+	KBASE_DEBUG_ASSERT(NULL != kbdev);
+
+	/* remember current mask */
+	spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+	new_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK));
+	/* mask interrupts for now */
+	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0);
+	spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+
+	while (bf_bits | pf_bits) {
+		struct kbase_as *as;
+		int as_no;
+		struct kbase_context *kctx;
+		struct kbase_fault *fault;
+
+		/*
+		 * the while logic ensures we have a bit set, no need to check
+		 * for not-found here
+		 */
+		as_no = ffs(bf_bits | pf_bits) - 1;
+		as = &kbdev->as[as_no];
+
+		/* find the fault type */
+		if (bf_bits & (1 << as_no))
+			fault = &as->bf_data;
+		else
+			fault = &as->pf_data;
+
+		/*
+		 * Refcount the kctx ASAP - it shouldn't disappear anyway, since
+		 * Bus/Page faults _should_ only occur whilst jobs are running,
+		 * and a job causing the Bus/Page fault shouldn't complete until
+		 * the MMU is updated
+		 */
+		kctx = kbasep_js_runpool_lookup_ctx(kbdev, as_no);
+
+		/* find faulting address */
+		fault->addr = kbase_reg_read(kbdev, MMU_AS_REG(as_no,
+				AS_FAULTADDRESS_HI));
+		fault->addr <<= 32;
+		fault->addr |= kbase_reg_read(kbdev, MMU_AS_REG(as_no,
+				AS_FAULTADDRESS_LO));
+		/* Mark the fault protected or not */
+		fault->protected_mode = kbdev->protected_mode;
+
+		if (kbdev->protected_mode && fault->addr) {
+			/* check if address reporting is allowed */
+			validate_protected_page_fault(kbdev);
+		}
+
+		/* report the fault to debugfs */
+		kbase_as_fault_debugfs_new(kbdev, as_no);
+
+		/* record the fault status */
+		fault->status = kbase_reg_read(kbdev, MMU_AS_REG(as_no,
+				AS_FAULTSTATUS));
+
+		if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) {
+			fault->extra_addr = kbase_reg_read(kbdev,
+					MMU_AS_REG(as_no, AS_FAULTEXTRA_HI));
+			fault->extra_addr <<= 32;
+			fault->extra_addr |= kbase_reg_read(kbdev,
+					MMU_AS_REG(as_no, AS_FAULTEXTRA_LO));
+		}
+
+		if (kbase_as_has_bus_fault(as, fault)) {
+			/* Mark bus fault as handled.
+			 * Note that a bus fault is processed first in case
+			 * where both a bus fault and page fault occur.
+			 */
+			bf_bits &= ~(1UL << as_no);
+
+			/* remove the queued BF (and PF) from the mask */
+			new_mask &= ~(MMU_BUS_ERROR(as_no) |
+					MMU_PAGE_FAULT(as_no));
+		} else {
+			/* Mark page fault as handled */
+			pf_bits &= ~(1UL << as_no);
+
+			/* remove the queued PF from the mask */
+			new_mask &= ~MMU_PAGE_FAULT(as_no);
+		}
+
+		/* Process the interrupt for this address space */
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		kbase_mmu_interrupt_process(kbdev, kctx, as, fault);
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	}
+
+	/* reenable interrupts */
+	spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+	tmp = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK));
+	new_mask |= tmp;
+	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), new_mask);
+	spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+}
+
+void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as)
+{
+	struct kbase_mmu_setup *current_setup = &as->current_setup;
+	u64 transcfg = 0;
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) {
+		transcfg = current_setup->transcfg;
+
+		/* Set flag AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK */
+		/* Clear PTW_MEMATTR bits */
+		transcfg &= ~AS_TRANSCFG_PTW_MEMATTR_MASK;
+		/* Enable correct PTW_MEMATTR bits */
+		transcfg |= AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK;
+		/* Ensure page-tables reads use read-allocate cache-policy in
+		 * the L2
+		 */
+		transcfg |= AS_TRANSCFG_R_ALLOCATE;
+
+		if (kbdev->system_coherency == COHERENCY_ACE) {
+			/* Set flag AS_TRANSCFG_PTW_SH_OS (outer shareable) */
+			/* Clear PTW_SH bits */
+			transcfg = (transcfg & ~AS_TRANSCFG_PTW_SH_MASK);
+			/* Enable correct PTW_SH bits */
+			transcfg = (transcfg | AS_TRANSCFG_PTW_SH_OS);
+		}
+
+		kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_LO),
+				transcfg);
+		kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_HI),
+				(transcfg >> 32) & 0xFFFFFFFFUL);
+	} else {
+		if (kbdev->system_coherency == COHERENCY_ACE)
+			current_setup->transtab |= AS_TRANSTAB_LPAE_SHARE_OUTER;
+	}
+
+	kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_LO),
+			current_setup->transtab & 0xFFFFFFFFUL);
+	kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_HI),
+			(current_setup->transtab >> 32) & 0xFFFFFFFFUL);
+
+	kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_LO),
+			current_setup->memattr & 0xFFFFFFFFUL);
+	kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_HI),
+			(current_setup->memattr >> 32) & 0xFFFFFFFFUL);
+
+	KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG(kbdev, as,
+			current_setup->transtab,
+			current_setup->memattr,
+			transcfg);
+
+	write_cmd(kbdev, as->number, AS_COMMAND_UPDATE);
+}
+
+int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
+		u64 vpfn, u32 nr, u32 op,
+		unsigned int handling_irq)
+{
+	int ret;
+
+	lockdep_assert_held(&kbdev->mmu_hw_mutex);
+
+	if (op == AS_COMMAND_UNLOCK) {
+		/* Unlock doesn't require a lock first */
+		ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK);
+	} else {
+		u64 lock_addr = lock_region(kbdev, vpfn, nr);
+
+		/* Lock the region that needs to be updated */
+		kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_LO),
+				lock_addr & 0xFFFFFFFFUL);
+		kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_HI),
+				(lock_addr >> 32) & 0xFFFFFFFFUL);
+		write_cmd(kbdev, as->number, AS_COMMAND_LOCK);
+
+		/* Run the MMU operation */
+		write_cmd(kbdev, as->number, op);
+
+		/* Wait for the flush to complete */
+		ret = wait_ready(kbdev, as->number);
+
+		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9630)) {
+			/* Issue an UNLOCK command to ensure that valid page
+			   tables are re-read by the GPU after an update.
+			   Note that, the FLUSH command should perform all the
+			   actions necessary, however the bus logs show that if
+			   multiple page faults occur within an 8 page region
+			   the MMU does not always re-read the updated page
+			   table entries for later faults or is only partially
+			   read, it subsequently raises the page fault IRQ for
+			   the same addresses, the unlock ensures that the MMU
+			   cache is flushed, so updates can be re-read.  As the
+			   region is now unlocked we need to issue 2 UNLOCK
+			   commands in order to flush the MMU/uTLB,
+			   see PRLAM-8812.
+			 */
+			write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK);
+			write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK);
+		}
+	}
+
+	return ret;
+}
+
+void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
+		enum kbase_mmu_fault_type type)
+{
+	unsigned long flags;
+	u32 pf_bf_mask;
+
+	spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+
+	/*
+	 * A reset is in-flight and we're flushing the IRQ + bottom half
+	 * so don't update anything as it could race with the reset code.
+	 */
+	if (kbdev->irq_reset_flush)
+		goto unlock;
+
+	/* Clear the page (and bus fault IRQ as well in case one occurred) */
+	pf_bf_mask = MMU_PAGE_FAULT(as->number);
+	if (type == KBASE_MMU_FAULT_TYPE_BUS ||
+			type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
+		pf_bf_mask |= MMU_BUS_ERROR(as->number);
+
+	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), pf_bf_mask);
+
+unlock:
+	spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+}
+
+void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
+		enum kbase_mmu_fault_type type)
+{
+	unsigned long flags;
+	u32 irq_mask;
+
+	/* Enable the page fault IRQ (and bus fault IRQ as well in case one
+	 * occurred) */
+	spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+
+	/*
+	 * A reset is in-flight and we're flushing the IRQ + bottom half
+	 * so don't update anything as it could race with the reset code.
+	 */
+	if (kbdev->irq_reset_flush)
+		goto unlock;
+
+	irq_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK)) |
+			MMU_PAGE_FAULT(as->number);
+
+	if (type == KBASE_MMU_FAULT_TYPE_BUS ||
+			type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
+		irq_mask |= MMU_BUS_ERROR(as->number);
+
+	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), irq_mask);
+
+unlock:
+	spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.h
new file mode 100644
index 0000000..0a3fa7e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.h
@@ -0,0 +1,62 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015, 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Interface file for the direct implementation for MMU hardware access
+ *
+ * Direct MMU hardware interface
+ *
+ * This module provides the interface(s) that are required by the direct
+ * register access implementation of the MMU hardware interface
+ */
+
+#ifndef _KBASE_MMU_HW_DIRECT_H_
+#define _KBASE_MMU_HW_DIRECT_H_
+
+#include <mali_kbase_defs.h>
+
+/**
+ * kbase_mmu_interrupt - Process an MMU interrupt.
+ *
+ * Process the MMU interrupt that was reported by the &kbase_device.
+ *
+ * @kbdev:       Pointer to the kbase device for which the interrupt happened.
+ * @irq_stat:    Value of the MMU_IRQ_STATUS register.
+ */
+void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
+
+/**
+ * kbase_mmu_bus_fault_interrupt - Process a bus fault interrupt.
+ *
+ * Process the bus fault interrupt that was reported for a particular GPU
+ * address space.
+ *
+ * @kbdev:       Pointer to the kbase device for which bus fault was reported.
+ * @status:      Value of the GPU_FAULTSTATUS register.
+ * @as_nr:       GPU address space for which the bus fault occurred.
+ *
+ * Return: zero if the operation was successful, non-zero otherwise.
+ */
+int kbase_mmu_bus_fault_interrupt(struct kbase_device *kbdev,
+		u32 status, u32 as_nr);
+
+#endif	/* _KBASE_MMU_HW_DIRECT_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c
new file mode 100644
index 0000000..51a10a2
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c
@@ -0,0 +1,68 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2015,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * "Always on" power management policy
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+
+static bool always_on_shaders_needed(struct kbase_device *kbdev)
+{
+	return true;
+}
+
+static bool always_on_get_core_active(struct kbase_device *kbdev)
+{
+	return true;
+}
+
+static void always_on_init(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+
+static void always_on_term(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+
+/*
+ * The struct kbase_pm_policy structure for the demand power policy.
+ *
+ * This is the static structure that defines the demand power policy's callback
+ * and name.
+ */
+const struct kbase_pm_policy kbase_pm_always_on_policy_ops = {
+	"always_on",			/* name */
+	always_on_init,			/* init */
+	always_on_term,			/* term */
+	always_on_shaders_needed,	/* shaders_needed */
+	always_on_get_core_active,	/* get_core_active */
+	0u,				/* flags */
+	KBASE_PM_POLICY_ID_ALWAYS_ON,	/* id */
+};
+
+KBASE_EXPORT_TEST_API(kbase_pm_always_on_policy_ops);
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h
new file mode 100644
index 0000000..e7927cf
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h
@@ -0,0 +1,81 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * "Always on" power management policy
+ */
+
+#ifndef MALI_KBASE_PM_ALWAYS_ON_H
+#define MALI_KBASE_PM_ALWAYS_ON_H
+
+/**
+ * DOC:
+ * The "Always on" power management policy has the following
+ * characteristics:
+ *
+ * - When KBase indicates that the GPU will be powered up, but we don't yet
+ *   know which Job Chains are to be run:
+ *    Shader Cores are powered up, regardless of whether or not they will be
+ *    needed later.
+ *
+ * - When KBase indicates that Shader Cores are needed to submit the currently
+ *   queued Job Chains:
+ *    Shader Cores are kept powered, regardless of whether or not they will be
+ *    needed
+ *
+ * - When KBase indicates that the GPU need not be powered:
+ *    The Shader Cores are kept powered, regardless of whether or not they will
+ *    be needed. The GPU itself is also kept powered, even though it is not
+ *    needed.
+ *
+ * This policy is automatically overridden during system suspend: the desired
+ * core state is ignored, and the cores are forced off regardless of what the
+ * policy requests. After resuming from suspend, new changes to the desired
+ * core state made by the policy are honored.
+ *
+ * Note:
+ *
+ * - KBase indicates the GPU will be powered up when it has a User Process that
+ *   has just started to submit Job Chains.
+ *
+ * - KBase indicates the GPU need not be powered when all the Job Chains from
+ *   User Processes have finished, and it is waiting for a User Process to
+ *   submit some more Job Chains.
+ */
+
+/**
+ * struct kbasep_pm_policy_always_on - Private struct for policy instance data
+ * @dummy: unused dummy variable
+ *
+ * This contains data that is private to the particular power policy that is
+ * active.
+ */
+struct kbasep_pm_policy_always_on {
+	int dummy;
+};
+
+extern const struct kbase_pm_policy kbase_pm_always_on_policy_ops;
+
+#endif /* MALI_KBASE_PM_ALWAYS_ON_H */
+
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c
new file mode 100644
index 0000000..0faf677
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c
@@ -0,0 +1,692 @@
+ /*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * GPU backend implementation of base kernel power management APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_config_defaults.h>
+
+#include <mali_kbase_pm.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_hwcnt_context.h>
+#include <backend/gpu/mali_kbase_js_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+#include <backend/gpu/mali_kbase_devfreq.h>
+
+static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data);
+static void kbase_pm_hwcnt_disable_worker(struct work_struct *data);
+static void kbase_pm_gpu_clock_control_worker(struct work_struct *data);
+
+int kbase_pm_runtime_init(struct kbase_device *kbdev)
+{
+	struct kbase_pm_callback_conf *callbacks;
+
+	callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
+	if (callbacks) {
+		kbdev->pm.backend.callback_power_on =
+					callbacks->power_on_callback;
+		kbdev->pm.backend.callback_power_off =
+					callbacks->power_off_callback;
+		kbdev->pm.backend.callback_power_suspend =
+					callbacks->power_suspend_callback;
+		kbdev->pm.backend.callback_power_resume =
+					callbacks->power_resume_callback;
+		kbdev->pm.callback_power_runtime_init =
+					callbacks->power_runtime_init_callback;
+		kbdev->pm.callback_power_runtime_term =
+					callbacks->power_runtime_term_callback;
+		kbdev->pm.backend.callback_power_runtime_on =
+					callbacks->power_runtime_on_callback;
+		kbdev->pm.backend.callback_power_runtime_off =
+					callbacks->power_runtime_off_callback;
+		kbdev->pm.backend.callback_power_runtime_idle =
+					callbacks->power_runtime_idle_callback;
+
+		if (callbacks->power_runtime_init_callback)
+			return callbacks->power_runtime_init_callback(kbdev);
+		else
+			return 0;
+	}
+
+	kbdev->pm.backend.callback_power_on = NULL;
+	kbdev->pm.backend.callback_power_off = NULL;
+	kbdev->pm.backend.callback_power_suspend = NULL;
+	kbdev->pm.backend.callback_power_resume = NULL;
+	kbdev->pm.callback_power_runtime_init = NULL;
+	kbdev->pm.callback_power_runtime_term = NULL;
+	kbdev->pm.backend.callback_power_runtime_on = NULL;
+	kbdev->pm.backend.callback_power_runtime_off = NULL;
+	kbdev->pm.backend.callback_power_runtime_idle = NULL;
+
+	return 0;
+}
+
+void kbase_pm_runtime_term(struct kbase_device *kbdev)
+{
+	if (kbdev->pm.callback_power_runtime_term) {
+		kbdev->pm.callback_power_runtime_term(kbdev);
+	}
+}
+
+void kbase_pm_register_access_enable(struct kbase_device *kbdev)
+{
+	struct kbase_pm_callback_conf *callbacks;
+
+	callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
+
+	if (callbacks)
+		callbacks->power_on_callback(kbdev);
+
+	kbdev->pm.backend.gpu_powered = true;
+}
+
+void kbase_pm_register_access_disable(struct kbase_device *kbdev)
+{
+	struct kbase_pm_callback_conf *callbacks;
+
+	callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
+
+	if (callbacks)
+		callbacks->power_off_callback(kbdev);
+
+	kbdev->pm.backend.gpu_powered = false;
+}
+
+int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
+{
+	int ret = 0;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	mutex_init(&kbdev->pm.lock);
+
+	kbdev->pm.backend.gpu_poweroff_wait_wq = alloc_workqueue("kbase_pm_poweroff_wait",
+			WQ_HIGHPRI | WQ_UNBOUND, 1);
+	if (!kbdev->pm.backend.gpu_poweroff_wait_wq)
+		return -ENOMEM;
+
+	INIT_WORK(&kbdev->pm.backend.gpu_poweroff_wait_work,
+			kbase_pm_gpu_poweroff_wait_wq);
+
+	kbdev->pm.backend.ca_cores_enabled = ~0ull;
+	kbdev->pm.backend.gpu_powered = false;
+	kbdev->pm.suspending = false;
+#ifdef CONFIG_MALI_DEBUG
+	kbdev->pm.backend.driver_ready_for_irqs = false;
+#endif /* CONFIG_MALI_DEBUG */
+	init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait);
+
+	/* Initialise the metrics subsystem */
+	ret = kbasep_pm_metrics_init(kbdev);
+	if (ret)
+		return ret;
+
+	init_waitqueue_head(&kbdev->pm.backend.reset_done_wait);
+	kbdev->pm.backend.reset_done = false;
+
+	init_waitqueue_head(&kbdev->pm.zero_active_count_wait);
+	kbdev->pm.active_count = 0;
+
+	spin_lock_init(&kbdev->pm.backend.gpu_cycle_counter_requests_lock);
+
+	init_waitqueue_head(&kbdev->pm.backend.poweroff_wait);
+
+	if (kbase_pm_ca_init(kbdev) != 0)
+		goto workq_fail;
+
+	if (kbase_pm_policy_init(kbdev) != 0)
+		goto pm_policy_fail;
+
+	if (kbase_pm_state_machine_init(kbdev) != 0)
+		goto pm_state_machine_fail;
+
+	kbdev->pm.backend.hwcnt_desired = false;
+	kbdev->pm.backend.hwcnt_disabled = true;
+	INIT_WORK(&kbdev->pm.backend.hwcnt_disable_work,
+		kbase_pm_hwcnt_disable_worker);
+	kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
+
+	/* At runtime, this feature can be enabled via module parameter
+	 * when insmod is executed. Then this will override all workarounds.
+	 */
+	if (platform_power_down_only) {
+		kbdev->pm.backend.gpu_clock_slow_down_wa = false;
+		kbdev->pm.backend.l2_always_on = false;
+
+		return 0;
+	}
+
+	if (IS_ENABLED(CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED)) {
+		kbdev->pm.backend.l2_always_on = false;
+		kbdev->pm.backend.gpu_clock_slow_down_wa = false;
+
+		return 0;
+	}
+
+	/* WA1: L2 always_on for GPUs being affected by GPU2017-1336 */
+	if (!IS_ENABLED(CONFIG_MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE)) {
+		kbdev->pm.backend.gpu_clock_slow_down_wa = false;
+		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2017_1336))
+			kbdev->pm.backend.l2_always_on = true;
+		else
+			kbdev->pm.backend.l2_always_on = false;
+
+		return 0;
+	}
+
+	/* WA3: Clock slow down for GPUs being affected by GPU2017-1336 */
+	kbdev->pm.backend.l2_always_on = false;
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2017_1336)) {
+		kbdev->pm.backend.gpu_clock_slow_down_wa = true;
+		kbdev->pm.backend.gpu_clock_suspend_freq = 0;
+		kbdev->pm.backend.gpu_clock_slow_down_desired = true;
+		kbdev->pm.backend.gpu_clock_slowed_down = false;
+		INIT_WORK(&kbdev->pm.backend.gpu_clock_control_work,
+			kbase_pm_gpu_clock_control_worker);
+	} else
+		kbdev->pm.backend.gpu_clock_slow_down_wa = false;
+
+	return 0;
+
+pm_state_machine_fail:
+	kbase_pm_policy_term(kbdev);
+pm_policy_fail:
+	kbase_pm_ca_term(kbdev);
+workq_fail:
+	kbasep_pm_metrics_term(kbdev);
+	return -EINVAL;
+}
+
+void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
+{
+	lockdep_assert_held(&kbdev->pm.lock);
+
+	/* Turn clocks and interrupts on - no-op if we haven't done a previous
+	 * kbase_pm_clock_off() */
+	kbase_pm_clock_on(kbdev, is_resume);
+
+	if (!is_resume) {
+		unsigned long flags;
+
+		/* Force update of L2 state - if we have abandoned a power off
+		 * then this may be required to power the L2 back on.
+		 */
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		kbase_pm_update_state(kbdev);
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	}
+
+	/* Update core status as required by the policy */
+	kbase_pm_update_cores_state(kbdev);
+
+	/* NOTE: We don't wait to reach the desired state, since running atoms
+	 * will wait for that state to be reached anyway */
+}
+
+static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
+{
+	struct kbase_device *kbdev = container_of(data, struct kbase_device,
+			pm.backend.gpu_poweroff_wait_work);
+	struct kbase_pm_device_data *pm = &kbdev->pm;
+	struct kbase_pm_backend_data *backend = &pm->backend;
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+	unsigned long flags;
+
+	if (!platform_power_down_only)
+		/* Wait for power transitions to complete. We do this with no locks held
+		 * so that we don't deadlock with any pending workqueues.
+		 */
+		kbase_pm_wait_for_desired_state(kbdev);
+
+	mutex_lock(&js_devdata->runpool_mutex);
+	mutex_lock(&kbdev->pm.lock);
+
+	if (!backend->poweron_required) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		WARN_ON(backend->shaders_state !=
+					KBASE_SHADERS_OFF_CORESTACK_OFF ||
+			backend->l2_state != KBASE_L2_OFF);
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+		/* Disable interrupts and turn the clock off */
+		if (!kbase_pm_clock_off(kbdev, backend->poweroff_is_suspend)) {
+			/*
+			 * Page/bus faults are pending, must drop locks to
+			 * process.  Interrupts are disabled so no more faults
+			 * should be generated at this point.
+			 */
+			mutex_unlock(&kbdev->pm.lock);
+			mutex_unlock(&js_devdata->runpool_mutex);
+			kbase_flush_mmu_wqs(kbdev);
+			mutex_lock(&js_devdata->runpool_mutex);
+			mutex_lock(&kbdev->pm.lock);
+
+			/* Turn off clock now that fault have been handled. We
+			 * dropped locks so poweron_required may have changed -
+			 * power back on if this is the case (effectively only
+			 * re-enabling of the interrupts would be done in this
+			 * case, as the clocks to GPU were not withdrawn yet).
+			 */
+			if (backend->poweron_required)
+				kbase_pm_clock_on(kbdev, false);
+			else
+				WARN_ON(!kbase_pm_clock_off(kbdev,
+						backend->poweroff_is_suspend));
+		}
+	}
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	backend->poweroff_wait_in_progress = false;
+	if (backend->poweron_required) {
+		backend->poweron_required = false;
+		kbdev->pm.backend.l2_desired = true;
+		kbase_pm_update_state(kbdev);
+		kbase_pm_update_cores_state_nolock(kbdev);
+		kbase_backend_slot_update(kbdev);
+	}
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	mutex_unlock(&kbdev->pm.lock);
+	mutex_unlock(&js_devdata->runpool_mutex);
+
+	wake_up(&kbdev->pm.backend.poweroff_wait);
+}
+
+static void kbase_pm_l2_clock_slow(struct kbase_device *kbdev)
+{
+#if defined(CONFIG_MALI_MIDGARD_DVFS)
+	struct clk *clk = kbdev->clocks[0];
+#endif
+
+	if (!kbdev->pm.backend.gpu_clock_slow_down_wa)
+		return;
+
+	/* No suspend clock is specified */
+	if (WARN_ON_ONCE(!kbdev->pm.backend.gpu_clock_suspend_freq))
+		return;
+
+#if defined(CONFIG_MALI_DEVFREQ)
+
+	/* Suspend devfreq */
+	devfreq_suspend_device(kbdev->devfreq);
+
+	/* Keep the current freq to restore it upon resume */
+	kbdev->previous_frequency = kbdev->current_nominal_freq;
+
+	/* Slow down GPU clock to the suspend clock*/
+	kbase_devfreq_force_freq(kbdev,
+			kbdev->pm.backend.gpu_clock_suspend_freq);
+
+#elif defined(CONFIG_MALI_MIDGARD_DVFS) /* CONFIG_MALI_DEVFREQ */
+
+	if (WARN_ON_ONCE(!clk))
+		return;
+
+	/* Stop the metrics gathering framework */
+	if (kbase_pm_metrics_is_active(kbdev))
+		kbase_pm_metrics_stop(kbdev);
+
+	/* Keep the current freq to restore it upon resume */
+	kbdev->previous_frequency = clk_get_rate(clk);
+
+	/* Slow down GPU clock to the suspend clock*/
+	if (WARN_ON_ONCE(clk_set_rate(clk,
+				kbdev->pm.backend.gpu_clock_suspend_freq)))
+		dev_err(kbdev->dev, "Failed to set suspend freq\n");
+
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+}
+
+static void kbase_pm_l2_clock_normalize(struct kbase_device *kbdev)
+{
+#if defined(CONFIG_MALI_MIDGARD_DVFS)
+	struct clk *clk = kbdev->clocks[0];
+#endif
+
+	if (!kbdev->pm.backend.gpu_clock_slow_down_wa)
+		return;
+
+#if defined(CONFIG_MALI_DEVFREQ)
+
+	/* Restore GPU clock to the previous one */
+	kbase_devfreq_force_freq(kbdev, kbdev->previous_frequency);
+
+	/* Resume devfreq */
+	devfreq_resume_device(kbdev->devfreq);
+
+#elif defined(CONFIG_MALI_MIDGARD_DVFS) /* CONFIG_MALI_DEVFREQ */
+
+	if (WARN_ON_ONCE(!clk))
+		return;
+
+	/* Restore GPU clock */
+	if (WARN_ON_ONCE(clk_set_rate(clk, kbdev->previous_frequency)))
+		dev_err(kbdev->dev, "Failed to restore freq (%lu)\n",
+			kbdev->previous_frequency);
+
+	/* Restart the metrics gathering framework */
+	kbase_pm_metrics_start(kbdev);
+
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+}
+
+static void kbase_pm_gpu_clock_control_worker(struct work_struct *data)
+{
+	struct kbase_device *kbdev = container_of(data, struct kbase_device,
+			pm.backend.gpu_clock_control_work);
+	struct kbase_pm_device_data *pm = &kbdev->pm;
+	struct kbase_pm_backend_data *backend = &pm->backend;
+	unsigned long flags;
+	bool slow_down = false, normalize = false;
+
+	/* Determine if GPU clock control is required */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	if (!backend->gpu_clock_slowed_down &&
+			backend->gpu_clock_slow_down_desired) {
+		slow_down = true;
+		backend->gpu_clock_slowed_down = true;
+	} else if (backend->gpu_clock_slowed_down &&
+			!backend->gpu_clock_slow_down_desired) {
+		normalize = true;
+		backend->gpu_clock_slowed_down = false;
+	}
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	/* Control GPU clock according to the request of L2 state machine.
+	 * The GPU clock needs to be lowered for safe L2 power down
+	 * and restored to previous speed at L2 power up.
+	 */
+	if (slow_down)
+		kbase_pm_l2_clock_slow(kbdev);
+	else if (normalize)
+		kbase_pm_l2_clock_normalize(kbdev);
+
+	/* Tell L2 state machine to transit to next state */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_pm_update_state(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+static void kbase_pm_hwcnt_disable_worker(struct work_struct *data)
+{
+	struct kbase_device *kbdev = container_of(data, struct kbase_device,
+			pm.backend.hwcnt_disable_work);
+	struct kbase_pm_device_data *pm = &kbdev->pm;
+	struct kbase_pm_backend_data *backend = &pm->backend;
+	unsigned long flags;
+
+	bool do_disable;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	do_disable = !backend->hwcnt_desired && !backend->hwcnt_disabled;
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	if (!do_disable)
+		return;
+
+	kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	do_disable = !backend->hwcnt_desired && !backend->hwcnt_disabled;
+
+	if (do_disable) {
+		/* PM state did not change while we were doing the disable,
+		 * so commit the work we just performed and continue the state
+		 * machine.
+		 */
+		backend->hwcnt_disabled = true;
+		kbase_pm_update_state(kbdev);
+		kbase_backend_slot_update(kbdev);
+	} else {
+		/* PM state was updated while we were doing the disable,
+		 * so we need to undo the disable we just performed.
+		 */
+		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+void kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend)
+{
+	unsigned long flags;
+
+	lockdep_assert_held(&kbdev->pm.lock);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (!kbdev->pm.backend.gpu_powered)
+		goto unlock_hwaccess;
+
+	if (kbdev->pm.backend.poweroff_wait_in_progress)
+		goto unlock_hwaccess;
+
+	/* Force all cores off */
+	kbdev->pm.backend.shaders_desired = false;
+	kbdev->pm.backend.l2_desired = false;
+
+	kbdev->pm.backend.poweroff_wait_in_progress = true;
+	kbdev->pm.backend.poweroff_is_suspend = is_suspend;
+	kbdev->pm.backend.invoke_poweroff_wait_wq_when_l2_off = true;
+
+	/* l2_desired being false should cause the state machine to
+	 * start powering off the L2. When it actually is powered off,
+	 * the interrupt handler will call kbase_pm_l2_update_state()
+	 * again, which will trigger the kbase_pm_gpu_poweroff_wait_wq.
+	 * Callers of this function will need to wait on poweroff_wait.
+	 */
+	kbase_pm_update_state(kbdev);
+
+unlock_hwaccess:
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+static bool is_poweroff_in_progress(struct kbase_device *kbdev)
+{
+	bool ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	ret = (kbdev->pm.backend.poweroff_wait_in_progress == false);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return ret;
+}
+
+void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev)
+{
+	wait_event_killable(kbdev->pm.backend.poweroff_wait,
+			is_poweroff_in_progress(kbdev));
+}
+KBASE_EXPORT_TEST_API(kbase_pm_wait_for_poweroff_complete);
+
+int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
+		unsigned int flags)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+	unsigned long irq_flags;
+	int ret;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	mutex_lock(&js_devdata->runpool_mutex);
+	mutex_lock(&kbdev->pm.lock);
+
+	/* A suspend won't happen during startup/insmod */
+	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
+
+	/* Power up the GPU, don't enable IRQs as we are not ready to receive
+	 * them. */
+	ret = kbase_pm_init_hw(kbdev, flags);
+	if (ret) {
+		mutex_unlock(&kbdev->pm.lock);
+		mutex_unlock(&js_devdata->runpool_mutex);
+		return ret;
+	}
+
+	kbdev->pm.debug_core_mask_all = kbdev->pm.debug_core_mask[0] =
+			kbdev->pm.debug_core_mask[1] =
+			kbdev->pm.debug_core_mask[2] =
+			kbdev->gpu_props.props.raw_props.shader_present;
+
+	/* Pretend the GPU is active to prevent a power policy turning the GPU
+	 * cores off */
+	kbdev->pm.active_count = 1;
+
+	spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+								irq_flags);
+	/* Ensure cycle counter is off */
+	kbdev->pm.backend.gpu_cycle_counter_requests = 0;
+	spin_unlock_irqrestore(
+			&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+								irq_flags);
+
+	/* We are ready to receive IRQ's now as power policy is set up, so
+	 * enable them now. */
+#ifdef CONFIG_MALI_DEBUG
+	kbdev->pm.backend.driver_ready_for_irqs = true;
+#endif
+	kbase_pm_enable_interrupts(kbdev);
+
+	/* Turn on the GPU and any cores needed by the policy */
+	kbase_pm_do_poweron(kbdev, false);
+	mutex_unlock(&kbdev->pm.lock);
+	mutex_unlock(&js_devdata->runpool_mutex);
+
+	return 0;
+}
+
+void kbase_hwaccess_pm_halt(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	mutex_lock(&kbdev->pm.lock);
+	kbase_pm_do_poweroff(kbdev, false);
+	mutex_unlock(&kbdev->pm.lock);
+}
+
+KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt);
+
+void kbase_hwaccess_pm_term(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0);
+	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests == 0);
+
+	cancel_work_sync(&kbdev->pm.backend.hwcnt_disable_work);
+
+	if (kbdev->pm.backend.hwcnt_disabled) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	}
+
+	/* Free any resources the policy allocated */
+	kbase_pm_state_machine_term(kbdev);
+	kbase_pm_policy_term(kbdev);
+	kbase_pm_ca_term(kbdev);
+
+	/* Shut down the metrics subsystem */
+	kbasep_pm_metrics_term(kbdev);
+
+	destroy_workqueue(kbdev->pm.backend.gpu_poweroff_wait_wq);
+}
+
+void kbase_pm_power_changed(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_pm_update_state(kbdev);
+
+	kbase_backend_slot_update(kbdev);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
+		u64 new_core_mask_js0, u64 new_core_mask_js1,
+		u64 new_core_mask_js2)
+{
+	kbdev->pm.debug_core_mask[0] = new_core_mask_js0;
+	kbdev->pm.debug_core_mask[1] = new_core_mask_js1;
+	kbdev->pm.debug_core_mask[2] = new_core_mask_js2;
+	kbdev->pm.debug_core_mask_all = new_core_mask_js0 | new_core_mask_js1 |
+			new_core_mask_js2;
+
+	kbase_pm_update_cores_state_nolock(kbdev);
+}
+
+void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev)
+{
+	kbase_pm_update_active(kbdev);
+}
+
+void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev)
+{
+	kbase_pm_update_active(kbdev);
+}
+
+void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+	/* Force power off the GPU and all cores (regardless of policy), only
+	 * after the PM active count reaches zero (otherwise, we risk turning it
+	 * off prematurely) */
+	mutex_lock(&js_devdata->runpool_mutex);
+	mutex_lock(&kbdev->pm.lock);
+
+	kbase_pm_do_poweroff(kbdev, true);
+
+	kbase_backend_timer_suspend(kbdev);
+
+	mutex_unlock(&kbdev->pm.lock);
+	mutex_unlock(&js_devdata->runpool_mutex);
+
+	kbase_pm_wait_for_poweroff_complete(kbdev);
+}
+
+void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+	mutex_lock(&js_devdata->runpool_mutex);
+	mutex_lock(&kbdev->pm.lock);
+
+	kbdev->pm.suspending = false;
+	kbase_pm_do_poweron(kbdev, true);
+
+	kbase_backend_timer_resume(kbdev);
+
+	mutex_unlock(&kbdev->pm.lock);
+	mutex_unlock(&js_devdata->runpool_mutex);
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c
new file mode 100644
index 0000000..41f6429
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c
@@ -0,0 +1,106 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Base kernel core availability APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#ifdef CONFIG_MALI_NO_MALI
+#include <backend/gpu/mali_kbase_model_dummy.h>
+#endif
+
+int kbase_pm_ca_init(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_MALI_DEVFREQ
+	struct kbase_pm_backend_data *pm_backend = &kbdev->pm.backend;
+
+	if (kbdev->current_core_mask)
+		pm_backend->ca_cores_enabled = kbdev->current_core_mask;
+	else
+		pm_backend->ca_cores_enabled =
+				kbdev->gpu_props.props.raw_props.shader_present;
+#endif
+
+	return 0;
+}
+
+void kbase_pm_ca_term(struct kbase_device *kbdev)
+{
+}
+
+#ifdef CONFIG_MALI_DEVFREQ
+void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask)
+{
+	struct kbase_pm_backend_data *pm_backend = &kbdev->pm.backend;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (!(core_mask & kbdev->pm.debug_core_mask_all)) {
+		dev_err(kbdev->dev, "OPP core mask 0x%llX does not intersect with debug mask 0x%llX\n",
+				core_mask, kbdev->pm.debug_core_mask_all);
+		goto unlock;
+	}
+
+	pm_backend->ca_cores_enabled = core_mask;
+
+	kbase_pm_update_state(kbdev);
+
+unlock:
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	dev_dbg(kbdev->dev, "Devfreq policy : new core mask=%llX\n",
+			pm_backend->ca_cores_enabled);
+}
+#endif
+
+u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_MALI_DEVFREQ
+	struct kbase_pm_backend_data *pm_backend = &kbdev->pm.backend;
+#endif
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+#ifdef CONFIG_MALI_DEVFREQ
+	return pm_backend->ca_cores_enabled & kbdev->pm.debug_core_mask_all;
+#else
+	return kbdev->gpu_props.props.raw_props.shader_present &
+			kbdev->pm.debug_core_mask_all;
+#endif
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_ca_get_core_mask);
+
+u64 kbase_pm_ca_get_instr_core_mask(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+#ifdef CONFIG_MALI_NO_MALI
+	return (((1ull) << KBASE_DUMMY_MODEL_MAX_SHADER_CORES) - 1);
+#else
+	return kbdev->pm.backend.pm_shaders_core_mask;
+#endif
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.h
new file mode 100644
index 0000000..5423e96
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.h
@@ -0,0 +1,89 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Base kernel core availability APIs
+ */
+
+#ifndef _KBASE_PM_CA_H_
+#define _KBASE_PM_CA_H_
+
+/**
+ * kbase_pm_ca_init - Initialize core availability framework
+ *
+ * Must be called before calling any other core availability function
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Return: 0 if the core availability framework was successfully initialized,
+ *         -errno otherwise
+ */
+int kbase_pm_ca_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_ca_term - Terminate core availability framework
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_ca_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_ca_get_core_mask - Get currently available shaders core mask
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Returns a mask of the currently available shader cores.
+ * Calls into the core availability policy
+ *
+ * Return: The bit mask of available cores
+ */
+u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_ca_update_core_status - Update core status
+ *
+ * @kbdev:               The kbase device structure for the device (must be
+ *                       a valid pointer)
+ * @cores_ready:         The bit mask of cores ready for job submission
+ * @cores_transitioning: The bit mask of cores that are transitioning power
+ *                       state
+ *
+ * Update core availability policy with current core power status
+ *
+ * Calls into the core availability policy
+ */
+void kbase_pm_ca_update_core_status(struct kbase_device *kbdev, u64 cores_ready,
+						u64 cores_transitioning);
+
+/**
+ * kbase_pm_ca_get_instr_core_mask - Get the PM state sync-ed shaders core mask
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Returns a mask of the PM state synchronised shader cores for arranging
+ * HW performance counter dumps
+ *
+ * Return: The bit mask of PM state synchronised cores
+ */
+u64 kbase_pm_ca_get_instr_core_mask(struct kbase_device *kbdev);
+
+#endif /* _KBASE_PM_CA_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.h
new file mode 100644
index 0000000..f67ec65
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.h
@@ -0,0 +1,60 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * A core availability policy for use with devfreq, where core masks are
+ * associated with OPPs.
+ */
+
+#ifndef MALI_KBASE_PM_CA_DEVFREQ_H
+#define MALI_KBASE_PM_CA_DEVFREQ_H
+
+/**
+ * struct kbasep_pm_ca_policy_devfreq - Private structure for devfreq ca policy
+ *
+ * This contains data that is private to the devfreq core availability
+ * policy.
+ *
+ * @cores_desired: Cores that the policy wants to be available
+ * @cores_enabled: Cores that the policy is currently returning as available
+ * @cores_used: Cores currently powered or transitioning
+ */
+struct kbasep_pm_ca_policy_devfreq {
+	u64 cores_desired;
+	u64 cores_enabled;
+	u64 cores_used;
+};
+
+extern const struct kbase_pm_ca_policy kbase_pm_ca_devfreq_policy_ops;
+
+/**
+ * kbase_devfreq_set_core_mask - Set core mask for policy to use
+ * @kbdev: Device pointer
+ * @core_mask: New core mask
+ *
+ * The new core mask will have immediate effect if the GPU is powered, or will
+ * take effect when it is next powered on.
+ */
+void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask);
+
+#endif /* MALI_KBASE_PM_CA_DEVFREQ_H */
+
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c
new file mode 100644
index 0000000..e90c44d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c
@@ -0,0 +1,67 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * "Coarse Demand" power management policy
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+
+static bool coarse_demand_shaders_needed(struct kbase_device *kbdev)
+{
+	return kbase_pm_is_active(kbdev);
+}
+
+static bool coarse_demand_get_core_active(struct kbase_device *kbdev)
+{
+	return kbase_pm_is_active(kbdev);
+}
+
+static void coarse_demand_init(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+
+static void coarse_demand_term(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+
+/* The struct kbase_pm_policy structure for the demand power policy.
+ *
+ * This is the static structure that defines the demand power policy's callback
+ * and name.
+ */
+const struct kbase_pm_policy kbase_pm_coarse_demand_policy_ops = {
+	"coarse_demand",			/* name */
+	coarse_demand_init,			/* init */
+	coarse_demand_term,			/* term */
+	coarse_demand_shaders_needed,		/* shaders_needed */
+	coarse_demand_get_core_active,		/* get_core_active */
+	0u,					/* flags */
+	KBASE_PM_POLICY_ID_COARSE_DEMAND,	/* id */
+};
+
+KBASE_EXPORT_TEST_API(kbase_pm_coarse_demand_policy_ops);
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h
new file mode 100644
index 0000000..304e5d7
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h
@@ -0,0 +1,69 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * "Coarse Demand" power management policy
+ */
+
+#ifndef MALI_KBASE_PM_COARSE_DEMAND_H
+#define MALI_KBASE_PM_COARSE_DEMAND_H
+
+/**
+ * DOC:
+ * The "Coarse" demand power management policy has the following
+ * characteristics:
+ * - When KBase indicates that the GPU will be powered up, but we don't yet
+ *   know which Job Chains are to be run:
+ *  - Shader Cores are powered up, regardless of whether or not they will be
+ *    needed later.
+ * - When KBase indicates that Shader Cores are needed to submit the currently
+ *   queued Job Chains:
+ *  - Shader Cores are kept powered, regardless of whether or not they will
+ *    be needed
+ * - When KBase indicates that the GPU need not be powered:
+ *  - The Shader Cores are powered off, and the GPU itself is powered off too.
+ *
+ * @note:
+ * - KBase indicates the GPU will be powered up when it has a User Process that
+ *   has just started to submit Job Chains.
+ * - KBase indicates the GPU need not be powered when all the Job Chains from
+ *   User Processes have finished, and it is waiting for a User Process to
+ *   submit some more Job Chains.
+ */
+
+/**
+ * struct kbasep_pm_policy_coarse_demand - Private structure for coarse demand
+ *                                         policy
+ *
+ * This contains data that is private to the coarse demand power policy.
+ *
+ * @dummy: Dummy member - no state needed
+ */
+struct kbasep_pm_policy_coarse_demand {
+	int dummy;
+};
+
+extern const struct kbase_pm_policy kbase_pm_coarse_demand_policy_ops;
+
+#endif /* MALI_KBASE_PM_COARSE_DEMAND_H */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h
new file mode 100644
index 0000000..d7dc63a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h
@@ -0,0 +1,507 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Backend-specific Power Manager definitions
+ */
+
+#ifndef _KBASE_PM_HWACCESS_DEFS_H_
+#define _KBASE_PM_HWACCESS_DEFS_H_
+
+#include "mali_kbase_pm_always_on.h"
+#include "mali_kbase_pm_coarse_demand.h"
+#if !MALI_CUSTOMER_RELEASE
+#include "mali_kbase_pm_always_on_demand.h"
+#endif
+
+/* Forward definition - see mali_kbase.h */
+struct kbase_device;
+struct kbase_jd_atom;
+
+/**
+ * Maximum number of PM policies that may be active on a device.
+ */
+#define KBASE_PM_MAX_NUM_POLICIES (10)
+
+/**
+ * enum kbase_pm_core_type - The types of core in a GPU.
+ *
+ * These enumerated values are used in calls to
+ * - kbase_pm_get_present_cores()
+ * - kbase_pm_get_active_cores()
+ * - kbase_pm_get_trans_cores()
+ * - kbase_pm_get_ready_cores().
+ *
+ * They specify which type of core should be acted on.  These values are set in
+ * a manner that allows core_type_to_reg() function to be simpler and more
+ * efficient.
+ *
+ * @KBASE_PM_CORE_L2: The L2 cache
+ * @KBASE_PM_CORE_SHADER: Shader cores
+ * @KBASE_PM_CORE_TILER: Tiler cores
+ * @KBASE_PM_CORE_STACK: Core stacks
+ */
+enum kbase_pm_core_type {
+	KBASE_PM_CORE_L2 = L2_PRESENT_LO,
+	KBASE_PM_CORE_SHADER = SHADER_PRESENT_LO,
+	KBASE_PM_CORE_TILER = TILER_PRESENT_LO,
+	KBASE_PM_CORE_STACK = STACK_PRESENT_LO
+};
+
+/**
+ * enum kbase_l2_core_state - The states used for the L2 cache & tiler power
+ *                            state machine.
+ *
+ * @KBASE_L2_OFF: The L2 cache and tiler are off
+ * @KBASE_L2_PEND_ON: The L2 cache and tiler are powering on
+ * @KBASE_L2_RESTORE_CLOCKS: The GPU clock is restored. Conditionally used.
+ * @KBASE_L2_ON_HWCNT_ENABLE: The L2 cache and tiler are on, and hwcnt is being
+ *                            enabled
+ * @KBASE_L2_ON: The L2 cache and tiler are on, and hwcnt is enabled
+ * @KBASE_L2_ON_HWCNT_DISABLE: The L2 cache and tiler are on, and hwcnt is being
+ *                             disabled
+ * @KBASE_L2_SLOW_DOWN_CLOCKS: The GPU clock is set to appropriate or lowest
+ *                             clock. Conditionally used.
+ * @KBASE_L2_POWER_DOWN: The L2 cache and tiler are about to be powered off
+ * @KBASE_L2_PEND_OFF: The L2 cache and tiler are powering off
+ * @KBASE_L2_RESET_WAIT: The GPU is resetting, L2 cache and tiler power state
+ *                       are unknown
+ */
+enum kbase_l2_core_state {
+#define KBASEP_L2_STATE(n) KBASE_L2_ ## n,
+#include "mali_kbase_pm_l2_states.h"
+#undef KBASEP_L2_STATE
+};
+
+/**
+ * enum kbase_shader_core_state - The states used for the shaders' state machine.
+ *
+ * @KBASE_SHADERS_OFF_CORESTACK_OFF: The shaders and core stacks are off
+ * @KBASE_SHADERS_OFF_CORESTACK_PEND_ON: The shaders are off, core stacks have
+ *                                       been requested to power on and hwcnt
+ *                                       is being disabled
+ * @KBASE_SHADERS_PEND_ON_CORESTACK_ON: Core stacks are on, shaders have been
+ *                                      requested to power on.
+ * @KBASE_SHADERS_ON_CORESTACK_ON: The shaders and core stacks are on, and hwcnt
+ *					already enabled.
+ * @KBASE_SHADERS_ON_CORESTACK_ON_RECHECK: The shaders and core stacks
+ *                                      are on, hwcnt disabled, and checks
+ *                                      to powering down or re-enabling
+ *                                      hwcnt.
+ * @KBASE_SHADERS_WAIT_OFF_CORESTACK_ON: The shaders have been requested to
+ *                                       power off, but they remain on for the
+ *                                       duration of the hysteresis timer
+ * @KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON: The hysteresis timer has expired
+ * @KBASE_SHADERS_L2_FLUSHING_CORESTACK_ON: The core stacks are on and the
+ *                                          level 2 cache is being flushed.
+ * @KBASE_SHADERS_READY_OFF_CORESTACK_ON: The core stacks are on and the shaders
+ *                                        are ready to be powered off.
+ * @KBASE_SHADERS_PEND_OFF_CORESTACK_ON: The core stacks are on, and the shaders
+ *                                       have been requested to power off
+ * @KBASE_SHADERS_OFF_CORESTACK_PEND_OFF: The shaders are off, and the core stacks
+ *                                        have been requested to power off
+ * @KBASE_SHADERS_OFF_CORESTACK_OFF_TIMER_PEND_OFF: Shaders and corestacks are
+ *                                                  off, but the tick timer
+ *                                                  cancellation is still
+ *                                                  pending.
+ * @KBASE_SHADERS_RESET_WAIT: The GPU is resetting, shader and core stack power
+ *                            states are unknown
+ */
+enum kbase_shader_core_state {
+#define KBASEP_SHADER_STATE(n) KBASE_SHADERS_ ## n,
+#include "mali_kbase_pm_shader_states.h"
+#undef KBASEP_SHADER_STATE
+};
+
+/**
+ * struct kbasep_pm_metrics - Metrics data collected for use by the power
+ *                            management framework.
+ *
+ *  @time_busy: number of ns the GPU was busy executing jobs since the
+ *          @time_period_start timestamp.
+ *  @time_idle: number of ns since time_period_start the GPU was not executing
+ *          jobs since the @time_period_start timestamp.
+ *  @busy_cl: number of ns the GPU was busy executing CL jobs. Note that
+ *           if two CL jobs were active for 400ns, this value would be updated
+ *           with 800.
+ *  @busy_gl: number of ns the GPU was busy executing GL jobs. Note that
+ *           if two GL jobs were active for 400ns, this value would be updated
+ *           with 800.
+ */
+struct kbasep_pm_metrics {
+	u32 time_busy;
+	u32 time_idle;
+	u32 busy_cl[2];
+	u32 busy_gl;
+};
+
+/**
+ * struct kbasep_pm_metrics_state - State required to collect the metrics in
+ *                                  struct kbasep_pm_metrics
+ *  @time_period_start: time at which busy/idle measurements started
+ *  @gpu_active: true when the GPU is executing jobs. false when
+ *           not. Updated when the job scheduler informs us a job in submitted
+ *           or removed from a GPU slot.
+ *  @active_cl_ctx: number of CL jobs active on the GPU. Array is per-device.
+ *  @active_gl_ctx: number of GL jobs active on the GPU. Array is per-slot.
+ *  @lock: spinlock protecting the kbasep_pm_metrics_data structure
+ *  @platform_data: pointer to data controlled by platform specific code
+ *  @kbdev: pointer to kbase device for which metrics are collected
+ *  @values: The current values of the power management metrics. The
+ *           kbase_pm_get_dvfs_metrics() function is used to compare these
+ *           current values with the saved values from a previous invocation.
+ *  @timer: timer to regularly make DVFS decisions based on the power
+ *           management metrics.
+ *  @timer_active: boolean indicating @timer is running
+ *  @dvfs_last: values of the PM metrics from the last DVFS tick
+ *  @dvfs_diff: different between the current and previous PM metrics.
+ */
+struct kbasep_pm_metrics_state {
+	ktime_t time_period_start;
+	bool gpu_active;
+	u32 active_cl_ctx[2];
+	u32 active_gl_ctx[3];
+	spinlock_t lock;
+
+	void *platform_data;
+	struct kbase_device *kbdev;
+
+	struct kbasep_pm_metrics values;
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+	struct hrtimer timer;
+	bool timer_active;
+	struct kbasep_pm_metrics dvfs_last;
+	struct kbasep_pm_metrics dvfs_diff;
+#endif
+};
+
+/**
+ * struct kbasep_pm_tick_timer_state - State for the shader hysteresis timer
+ * @wq: Work queue to wait for the timer to stopped
+ * @work: Work item which cancels the timer
+ * @timer: Timer for powering off the shader cores
+ * @configured_interval: Period of GPU poweroff timer
+ * @configured_ticks: User-configured number of ticks to wait after the shader
+ *                    power down request is received before turning off the cores
+ * @remaining_ticks: Number of remaining timer ticks until shaders are powered off
+ * @cancel_queued: True if the cancellation work item has been queued. This is
+ *                 required to ensure that it is not queued twice, e.g. after
+ *                 a reset, which could cause the timer to be incorrectly
+ *                 cancelled later by a delayed workitem.
+ * @needed: Whether the timer should restart itself
+ */
+struct kbasep_pm_tick_timer_state {
+	struct workqueue_struct *wq;
+	struct work_struct work;
+	struct hrtimer timer;
+
+	ktime_t configured_interval;
+	unsigned int configured_ticks;
+	unsigned int remaining_ticks;
+
+	bool cancel_queued;
+	bool needed;
+};
+
+union kbase_pm_policy_data {
+	struct kbasep_pm_policy_always_on always_on;
+	struct kbasep_pm_policy_coarse_demand coarse_demand;
+#if !MALI_CUSTOMER_RELEASE
+	struct kbasep_pm_policy_always_on_demand always_on_demand;
+#endif
+};
+
+/**
+ * struct kbase_pm_backend_data - Data stored per device for power management.
+ *
+ * This structure contains data for the power management framework. There is one
+ * instance of this structure per device in the system.
+ *
+ * @pm_current_policy: The policy that is currently actively controlling the
+ *                     power state.
+ * @pm_policy_data:    Private data for current PM policy
+ * @reset_done:        Flag when a reset is complete
+ * @reset_done_wait:   Wait queue to wait for changes to @reset_done
+ * @gpu_cycle_counter_requests: The reference count of active gpu cycle counter
+ *                              users
+ * @gpu_cycle_counter_requests_lock: Lock to protect @gpu_cycle_counter_requests
+ * @gpu_in_desired_state_wait: Wait queue set when the GPU is in the desired
+ *                             state according to the L2 and shader power state
+ *                             machines
+ * @gpu_powered:       Set to true when the GPU is powered and register
+ *                     accesses are possible, false otherwise. Access to this
+ *                     variable should be protected by: both the hwaccess_lock
+ *                     spinlock and the pm.lock mutex for writes; or at least
+ *                     one of either lock for reads.
+ * @pm_shaders_core_mask: Shader PM state synchronised shaders core mask. It
+ *                     holds the cores enabled in a hardware counters dump,
+ *                     and may differ from @shaders_avail when under different
+ *                     states and transitions.
+ * @cg1_disabled:      Set if the policy wants to keep the second core group
+ *                     powered off
+ * @driver_ready_for_irqs: Debug state indicating whether sufficient
+ *                         initialization of the driver has occurred to handle
+ *                         IRQs
+ * @metrics:           Structure to hold metrics for the GPU
+ * @shader_tick_timer: Structure to hold the shader poweroff tick timer state
+ * @poweroff_wait_in_progress: true if a wait for GPU power off is in progress.
+ *                             hwaccess_lock must be held when accessing
+ * @invoke_poweroff_wait_wq_when_l2_off: flag indicating that the L2 power state
+ *                                       machine should invoke the poweroff
+ *                                       worker after the L2 has turned off.
+ * @poweron_required: true if a GPU power on is required. Should only be set
+ *                    when poweroff_wait_in_progress is true, and therefore the
+ *                    GPU can not immediately be powered on. pm.lock must be
+ *                    held when accessing
+ * @poweroff_is_suspend: true if the GPU is being powered off due to a suspend
+ *                       request. pm.lock must be held when accessing
+ * @gpu_poweroff_wait_wq: workqueue for waiting for GPU to power off
+ * @gpu_poweroff_wait_work: work item for use with @gpu_poweroff_wait_wq
+ * @poweroff_wait: waitqueue for waiting for @gpu_poweroff_wait_work to complete
+ * @callback_power_on: Callback when the GPU needs to be turned on. See
+ *                     &struct kbase_pm_callback_conf
+ * @callback_power_off: Callback when the GPU may be turned off. See
+ *                     &struct kbase_pm_callback_conf
+ * @callback_power_suspend: Callback when a suspend occurs and the GPU needs to
+ *                          be turned off. See &struct kbase_pm_callback_conf
+ * @callback_power_resume: Callback when a resume occurs and the GPU needs to
+ *                          be turned on. See &struct kbase_pm_callback_conf
+ * @callback_power_runtime_on: Callback when the GPU needs to be turned on. See
+ *                             &struct kbase_pm_callback_conf
+ * @callback_power_runtime_off: Callback when the GPU may be turned off. See
+ *                              &struct kbase_pm_callback_conf
+ * @callback_power_runtime_idle: Optional callback when the GPU may be idle. See
+ *                              &struct kbase_pm_callback_conf
+ * @ca_cores_enabled: Cores that are currently available
+ * @l2_state:     The current state of the L2 cache state machine. See
+ *                &enum kbase_l2_core_state
+ * @l2_desired:   True if the L2 cache should be powered on by the L2 cache state
+ *                machine
+ * @l2_always_on: If true, disable powering down of l2 cache.
+ * @shaders_state: The current state of the shader state machine.
+ * @shaders_avail: This is updated by the state machine when it is in a state
+ *                 where it can handle changes to the core availability. This
+ *                 is internal to the shader state machine and should *not* be
+ *                 modified elsewhere.
+ * @shaders_desired: True if the PM active count or power policy requires the
+ *                   shader cores to be on. This is used as an input to the
+ *                   shader power state machine.  The current state of the
+ *                   cores may be different, but there should be transitions in
+ *                   progress that will eventually achieve this state (assuming
+ *                   that the policy doesn't change its mind in the mean time).
+ * @in_reset: True if a GPU is resetting and normal power manager operation is
+ *            suspended
+ * @protected_entry_transition_override : True if GPU reset is being used
+ *                                  before entering the protected mode and so
+ *                                  the reset handling behaviour is being
+ *                                  overridden.
+ * @protected_transition_override : True if a protected mode transition is in
+ *                                  progress and is overriding power manager
+ *                                  behaviour.
+ * @protected_l2_override : Non-zero if the L2 cache is required during a
+ *                          protected mode transition. Has no effect if not
+ *                          transitioning.
+ * @hwcnt_desired: True if we want GPU hardware counters to be enabled.
+ * @hwcnt_disabled: True if GPU hardware counters are not enabled.
+ * @hwcnt_disable_work: Work item to disable GPU hardware counters, used if
+ *                      atomic disable is not possible.
+ * @gpu_clock_suspend_freq: 'opp-mali-errata-1485982' clock in opp table
+ *                          for safe L2 power cycle.
+ *                          If no opp-mali-errata-1485982 specified,
+ *                          the slowest clock will be taken.
+ * @gpu_clock_slow_down_wa: If true, slow down GPU clock during L2 power cycle.
+ * @gpu_clock_slow_down_desired: True if we want lower GPU clock
+ *                             for safe L2 power cycle. False if want GPU clock
+ *                             to back to normalized one. This is updated only
+ *                             in L2 state machine, kbase_pm_l2_update_state.
+ * @gpu_clock_slowed_down: During L2 power cycle,
+ *                         True if gpu clock is set at lower frequency
+ *                         for safe L2 power down, False if gpu clock gets
+ *                         restored to previous speed. This is updated only in
+ *                         work function, kbase_pm_gpu_clock_control_worker.
+ * @gpu_clock_control_work: work item to set GPU clock during L2 power cycle
+ *                          using gpu_clock_control
+ *
+ * Note:
+ * During an IRQ, @pm_current_policy can be NULL when the policy is being
+ * changed with kbase_pm_set_policy(). The change is protected under
+ * kbase_device.pm.pcower_change_lock. Direct access to this from IRQ context
+ * must therefore check for NULL. If NULL, then kbase_pm_set_policy() will
+ * re-issue the policy functions that would have been done under IRQ.
+ */
+struct kbase_pm_backend_data {
+	const struct kbase_pm_policy *pm_current_policy;
+	union kbase_pm_policy_data pm_policy_data;
+	bool reset_done;
+	wait_queue_head_t reset_done_wait;
+	int gpu_cycle_counter_requests;
+	spinlock_t gpu_cycle_counter_requests_lock;
+
+	wait_queue_head_t gpu_in_desired_state_wait;
+
+	bool gpu_powered;
+
+	u64 pm_shaders_core_mask;
+
+	bool cg1_disabled;
+
+#ifdef CONFIG_MALI_DEBUG
+	bool driver_ready_for_irqs;
+#endif /* CONFIG_MALI_DEBUG */
+
+	struct kbasep_pm_metrics_state metrics;
+
+	struct kbasep_pm_tick_timer_state shader_tick_timer;
+
+	bool poweroff_wait_in_progress;
+	bool invoke_poweroff_wait_wq_when_l2_off;
+	bool poweron_required;
+	bool poweroff_is_suspend;
+
+	struct workqueue_struct *gpu_poweroff_wait_wq;
+	struct work_struct gpu_poweroff_wait_work;
+
+	wait_queue_head_t poweroff_wait;
+
+	int (*callback_power_on)(struct kbase_device *kbdev);
+	void (*callback_power_off)(struct kbase_device *kbdev);
+	void (*callback_power_suspend)(struct kbase_device *kbdev);
+	void (*callback_power_resume)(struct kbase_device *kbdev);
+	int (*callback_power_runtime_on)(struct kbase_device *kbdev);
+	void (*callback_power_runtime_off)(struct kbase_device *kbdev);
+	int (*callback_power_runtime_idle)(struct kbase_device *kbdev);
+
+	u64 ca_cores_enabled;
+
+	enum kbase_l2_core_state l2_state;
+	enum kbase_shader_core_state shaders_state;
+	u64 shaders_avail;
+	bool l2_desired;
+	bool l2_always_on;
+	bool shaders_desired;
+
+	bool in_reset;
+
+	bool protected_entry_transition_override;
+	bool protected_transition_override;
+	int protected_l2_override;
+
+	bool hwcnt_desired;
+	bool hwcnt_disabled;
+	struct work_struct hwcnt_disable_work;
+
+	u64 gpu_clock_suspend_freq;
+	bool gpu_clock_slow_down_wa;
+	bool gpu_clock_slow_down_desired;
+	bool gpu_clock_slowed_down;
+	struct work_struct gpu_clock_control_work;
+};
+
+
+/* List of policy IDs */
+enum kbase_pm_policy_id {
+	KBASE_PM_POLICY_ID_COARSE_DEMAND,
+#if !MALI_CUSTOMER_RELEASE
+	KBASE_PM_POLICY_ID_ALWAYS_ON_DEMAND,
+#endif
+	KBASE_PM_POLICY_ID_ALWAYS_ON
+};
+
+typedef u32 kbase_pm_policy_flags;
+
+#define KBASE_PM_POLICY_FLAG_DISABLED_WITH_POWER_DOWN_ONLY (1u)
+
+/**
+ * struct kbase_pm_policy - Power policy structure.
+ *
+ * Each power policy exposes a (static) instance of this structure which
+ * contains function pointers to the policy's methods.
+ *
+ * @name:               The name of this policy
+ * @init:               Function called when the policy is selected
+ * @term:               Function called when the policy is unselected
+ * @shaders_needed:     Function called to find out if shader cores are needed
+ * @get_core_active:    Function called to get the current overall GPU power
+ *                      state
+ * @flags:              Field indicating flags for this policy
+ * @id:                 Field indicating an ID for this policy. This is not
+ *                      necessarily the same as its index in the list returned
+ *                      by kbase_pm_list_policies().
+ *                      It is used purely for debugging.
+ */
+struct kbase_pm_policy {
+	char *name;
+
+	/**
+	 * Function called when the policy is selected
+	 *
+	 * This should initialize the kbdev->pm.pm_policy_data structure. It
+	 * should not attempt to make any changes to hardware state.
+	 *
+	 * It is undefined what state the cores are in when the function is
+	 * called.
+	 *
+	 * @kbdev: The kbase device structure for the device (must be a
+	 *         valid pointer)
+	 */
+	void (*init)(struct kbase_device *kbdev);
+
+	/**
+	 * Function called when the policy is unselected.
+	 *
+	 * @kbdev: The kbase device structure for the device (must be a
+	 *         valid pointer)
+	 */
+	void (*term)(struct kbase_device *kbdev);
+
+	/**
+	 * Function called to find out if shader cores are needed
+	 *
+	 * This needs to at least satisfy kbdev->pm.backend.shaders_desired,
+	 * and so must never return false when shaders_desired is true.
+	 *
+	 * @kbdev: The kbase device structure for the device (must be a
+	 *         valid pointer)
+	 *
+	 * Return: true if shader cores are needed, false otherwise
+	 */
+	bool (*shaders_needed)(struct kbase_device *kbdev);
+
+	/**
+	 * Function called to get the current overall GPU power state
+	 *
+	 * This function must meet or exceed the requirements for power
+	 * indicated by kbase_pm_is_active().
+	 *
+	 * @kbdev: The kbase device structure for the device (must be a
+	 *         valid pointer)
+	 *
+	 * Return: true if the GPU should be powered, false otherwise
+	 */
+	bool (*get_core_active)(struct kbase_device *kbdev);
+
+	kbase_pm_policy_flags flags;
+	enum kbase_pm_policy_id id;
+};
+
+#endif /* _KBASE_PM_HWACCESS_DEFS_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.c
new file mode 100644
index 0000000..01727d6
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.c
@@ -0,0 +1,68 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * A simple demand based power management policy
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+
+static bool demand_shaders_needed(struct kbase_device *kbdev)
+{
+	return (kbdev->shader_needed_cnt > 0);
+}
+
+static bool demand_get_core_active(struct kbase_device *kbdev)
+{
+	return kbase_pm_is_active(kbdev);
+}
+
+static void demand_init(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+
+static void demand_term(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+
+/*
+ * The struct kbase_pm_policy structure for the demand power policy.
+ *
+ * This is the static structure that defines the demand power policy's callback
+ * and name.
+ */
+const struct kbase_pm_policy kbase_pm_demand_policy_ops = {
+	"demand",			/* name */
+	demand_init,			/* init */
+	demand_term,			/* term */
+	demand_shaders_needed,		/* shaders_needed */
+	demand_get_core_active,		/* get_core_active */
+	0u,				/* flags */
+	KBASE_PM_POLICY_ID_DEMAND,	/* id */
+};
+
+KBASE_EXPORT_TEST_API(kbase_pm_demand_policy_ops);
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.h
new file mode 100644
index 0000000..4b05e6d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.h
@@ -0,0 +1,69 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * A simple demand based power management policy
+ */
+
+#ifndef MALI_KBASE_PM_DEMAND_H
+#define MALI_KBASE_PM_DEMAND_H
+
+/**
+ * DOC: Demand power management policy
+ *
+ * The demand power management policy has the following characteristics:
+ * - When KBase indicates that the GPU will be powered up, but we don't yet
+ *   know which Job Chains are to be run:
+ *  - The Shader Cores are not powered up
+ *
+ * - When KBase indicates that Shader Cores are needed to submit the currently
+ *   queued Job Chains:
+ *  - Shader Cores are powered up
+ *
+ * - When KBase indicates that the GPU need not be powered:
+ *  - The Shader Cores are powered off, and the GPU itself is powered off too.
+ *
+ * Note:
+ * - KBase indicates the GPU will be powered up when it has a User Process that
+ *   has just started to submit Job Chains.
+ *
+ * - KBase indicates the GPU need not be powered when all the Job Chains from
+ *   User Processes have finished, and it is waiting for a User Process to
+ *   submit some more Job Chains.
+ */
+
+/**
+ * struct kbasep_pm_policy_demand - Private structure for policy instance data
+ *
+ * @dummy: No state is needed, a dummy variable
+ *
+ * This contains data that is private to the demand power policy.
+ */
+struct kbasep_pm_policy_demand {
+	int dummy;
+};
+
+extern const struct kbase_pm_policy kbase_pm_demand_policy_ops;
+
+#endif /* MALI_KBASE_PM_DEMAND_H */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c
new file mode 100644
index 0000000..92d3818
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c
@@ -0,0 +1,2156 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Base kernel Power Management hardware control
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_config_defaults.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_tracepoints.h>
+#include <mali_kbase_pm.h>
+#include <mali_kbase_config_defaults.h>
+#include <mali_kbase_smc.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_reset_gpu.h>
+#include <mali_kbase_ctx_sched.h>
+#include <mali_kbase_hwcnt_context.h>
+#include <backend/gpu/mali_kbase_cache_policy_backend.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_irq_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include <backend/gpu/mali_kbase_l2_mmu_config.h>
+
+#include <linux/of.h>
+
+#ifdef CONFIG_MALI_CORESTACK
+bool corestack_driver_control = true;
+#else
+bool corestack_driver_control; /* Default value of 0/false */
+#endif
+module_param(corestack_driver_control, bool, 0444);
+MODULE_PARM_DESC(corestack_driver_control,
+		"Let the driver power on/off the GPU core stack independently "
+		"without involving the Power Domain Controller. This should "
+		"only be enabled on platforms for which integration of the PDC "
+		"to the Mali GPU is known to be problematic.");
+KBASE_EXPORT_TEST_API(corestack_driver_control);
+
+#ifdef CONFIG_MALI_PLATFORM_POWER_DOWN_ONLY
+bool platform_power_down_only = true;
+#else
+bool platform_power_down_only; /* Default value of 0/false */
+#endif
+module_param(platform_power_down_only, bool, 0444);
+MODULE_PARM_DESC(platform_power_down_only,
+		"Disable power down of individual cores.");
+
+/**
+ * enum kbasep_pm_action - Actions that can be performed on a core.
+ *
+ * This enumeration is private to the file. Its values are set to allow
+ * core_type_to_reg() function, which decodes this enumeration, to be simpler
+ * and more efficient.
+ *
+ * @ACTION_PRESENT: The cores that are present
+ * @ACTION_READY: The cores that are ready
+ * @ACTION_PWRON: Power on the cores specified
+ * @ACTION_PWROFF: Power off the cores specified
+ * @ACTION_PWRTRANS: The cores that are transitioning
+ * @ACTION_PWRACTIVE: The cores that are active
+ */
+enum kbasep_pm_action {
+	ACTION_PRESENT = 0,
+	ACTION_READY = (SHADER_READY_LO - SHADER_PRESENT_LO),
+	ACTION_PWRON = (SHADER_PWRON_LO - SHADER_PRESENT_LO),
+	ACTION_PWROFF = (SHADER_PWROFF_LO - SHADER_PRESENT_LO),
+	ACTION_PWRTRANS = (SHADER_PWRTRANS_LO - SHADER_PRESENT_LO),
+	ACTION_PWRACTIVE = (SHADER_PWRACTIVE_LO - SHADER_PRESENT_LO)
+};
+
+static u64 kbase_pm_get_state(
+		struct kbase_device *kbdev,
+		enum kbase_pm_core_type core_type,
+		enum kbasep_pm_action action);
+
+static bool kbase_pm_is_l2_desired(struct kbase_device *kbdev)
+{
+	if (kbdev->pm.backend.protected_entry_transition_override)
+		return false;
+
+	if (kbdev->pm.backend.protected_transition_override &&
+			kbdev->pm.backend.protected_l2_override)
+		return true;
+
+	if (kbdev->pm.backend.protected_transition_override &&
+			!kbdev->pm.backend.shaders_desired)
+		return false;
+
+	return kbdev->pm.backend.l2_desired;
+}
+
+void kbase_pm_protected_override_enable(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	kbdev->pm.backend.protected_transition_override = true;
+}
+void kbase_pm_protected_override_disable(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	kbdev->pm.backend.protected_transition_override = false;
+}
+
+int kbase_pm_protected_entry_override_enable(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	WARN_ON(!kbdev->protected_mode_transition);
+
+	if (kbdev->pm.backend.l2_always_on &&
+	    (kbdev->system_coherency == COHERENCY_ACE)) {
+		WARN_ON(kbdev->pm.backend.protected_entry_transition_override);
+
+		/*
+		 * If there is already a GPU reset pending then wait for it to
+		 * complete before initiating a special reset for protected
+		 * mode entry.
+		 */
+		if (kbase_reset_gpu_silent(kbdev))
+			return -EAGAIN;
+
+		kbdev->pm.backend.protected_entry_transition_override = true;
+	}
+
+	return 0;
+}
+
+void kbase_pm_protected_entry_override_disable(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	WARN_ON(!kbdev->protected_mode_transition);
+
+	if (kbdev->pm.backend.l2_always_on &&
+	    (kbdev->system_coherency == COHERENCY_ACE)) {
+		WARN_ON(!kbdev->pm.backend.protected_entry_transition_override);
+
+		kbdev->pm.backend.protected_entry_transition_override = false;
+	}
+}
+
+void kbase_pm_protected_l2_override(struct kbase_device *kbdev, bool override)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (override) {
+		kbdev->pm.backend.protected_l2_override++;
+		WARN_ON(kbdev->pm.backend.protected_l2_override <= 0);
+	} else {
+		kbdev->pm.backend.protected_l2_override--;
+		WARN_ON(kbdev->pm.backend.protected_l2_override < 0);
+	}
+
+	kbase_pm_update_state(kbdev);
+}
+
+/**
+ * core_type_to_reg - Decode a core type and action to a register.
+ *
+ * Given a core type (defined by kbase_pm_core_type) and an action (defined
+ * by kbasep_pm_action) this function will return the register offset that
+ * will perform the action on the core type. The register returned is the _LO
+ * register and an offset must be applied to use the _HI register.
+ *
+ * @core_type: The type of core
+ * @action:    The type of action
+ *
+ * Return: The register offset of the _LO register that performs an action of
+ * type @action on a core of type @core_type.
+ */
+static u32 core_type_to_reg(enum kbase_pm_core_type core_type,
+						enum kbasep_pm_action action)
+{
+	if (corestack_driver_control) {
+		if (core_type == KBASE_PM_CORE_STACK) {
+			switch (action) {
+			case ACTION_PRESENT:
+				return STACK_PRESENT_LO;
+			case ACTION_READY:
+				return STACK_READY_LO;
+			case ACTION_PWRON:
+				return STACK_PWRON_LO;
+			case ACTION_PWROFF:
+				return STACK_PWROFF_LO;
+			case ACTION_PWRTRANS:
+				return STACK_PWRTRANS_LO;
+			default:
+				WARN(1, "Invalid action for core type\n");
+			}
+		}
+	}
+
+	return (u32)core_type + (u32)action;
+}
+
+#ifdef CONFIG_ARM64
+static void mali_cci_flush_l2(struct kbase_device *kbdev)
+{
+	const u32 mask = CLEAN_CACHES_COMPLETED | RESET_COMPLETED;
+	u32 loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
+	u32 raw;
+
+	/*
+	 * Note that we don't take the cache flush mutex here since
+	 * we expect to be the last user of the L2, all other L2 users
+	 * would have dropped their references, to initiate L2 power
+	 * down, L2 power down being the only valid place for this
+	 * to be called from.
+	 */
+
+	kbase_reg_write(kbdev,
+			GPU_CONTROL_REG(GPU_COMMAND),
+			GPU_COMMAND_CLEAN_INV_CACHES);
+
+	raw = kbase_reg_read(kbdev,
+		GPU_CONTROL_REG(GPU_IRQ_RAWSTAT));
+
+	/* Wait for cache flush to complete before continuing, exit on
+	 * gpu resets or loop expiry. */
+	while (((raw & mask) == 0) && --loops) {
+		raw = kbase_reg_read(kbdev,
+					GPU_CONTROL_REG(GPU_IRQ_RAWSTAT));
+	}
+}
+#endif
+
+/**
+ * kbase_pm_invoke - Invokes an action on a core set
+ *
+ * This function performs the action given by @action on a set of cores of a
+ * type given by @core_type. It is a static function used by
+ * kbase_pm_transition_core_type()
+ *
+ * @kbdev:     The kbase device structure of the device
+ * @core_type: The type of core that the action should be performed on
+ * @cores:     A bit mask of cores to perform the action on (low 32 bits)
+ * @action:    The action to perform on the cores
+ */
+static void kbase_pm_invoke(struct kbase_device *kbdev,
+					enum kbase_pm_core_type core_type,
+					u64 cores,
+					enum kbasep_pm_action action)
+{
+	u32 reg;
+	u32 lo = cores & 0xFFFFFFFF;
+	u32 hi = (cores >> 32) & 0xFFFFFFFF;
+
+	/* When 'platform_power_down_only' is enabled, no core type should be
+	 * turned off individually.
+	 */
+	KBASE_DEBUG_ASSERT(!(action == ACTION_PWROFF &&
+			platform_power_down_only));
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	reg = core_type_to_reg(core_type, action);
+
+	KBASE_DEBUG_ASSERT(reg);
+
+	if (cores) {
+		u64 state = kbase_pm_get_state(kbdev, core_type, ACTION_READY);
+
+		if (action == ACTION_PWRON)
+			state |= cores;
+		else if (action == ACTION_PWROFF)
+			state &= ~cores;
+		KBASE_TLSTREAM_AUX_PM_STATE(kbdev, core_type, state);
+	}
+
+	/* Tracing */
+	if (cores) {
+		if (action == ACTION_PWRON)
+			switch (core_type) {
+			case KBASE_PM_CORE_SHADER:
+				KBASE_TRACE_ADD(kbdev, PM_PWRON, NULL, NULL, 0u,
+									lo);
+				break;
+			case KBASE_PM_CORE_TILER:
+				KBASE_TRACE_ADD(kbdev, PM_PWRON_TILER, NULL,
+								NULL, 0u, lo);
+				break;
+			case KBASE_PM_CORE_L2:
+				KBASE_TRACE_ADD(kbdev, PM_PWRON_L2, NULL, NULL,
+									0u, lo);
+				break;
+			default:
+				break;
+			}
+		else if (action == ACTION_PWROFF)
+			switch (core_type) {
+			case KBASE_PM_CORE_SHADER:
+				KBASE_TRACE_ADD(kbdev, PM_PWROFF, NULL, NULL,
+									0u, lo);
+				break;
+			case KBASE_PM_CORE_TILER:
+				KBASE_TRACE_ADD(kbdev, PM_PWROFF_TILER, NULL,
+								NULL, 0u, lo);
+				break;
+			case KBASE_PM_CORE_L2:
+				KBASE_TRACE_ADD(kbdev, PM_PWROFF_L2, NULL, NULL,
+									0u, lo);
+				/* disable snoops before L2 is turned off */
+				kbase_pm_cache_snoop_disable(kbdev);
+				break;
+			default:
+				break;
+			}
+	}
+
+	if (lo != 0)
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(reg), lo);
+
+	if (hi != 0)
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(reg + 4), hi);
+}
+
+/**
+ * kbase_pm_get_state - Get information about a core set
+ *
+ * This function gets information (chosen by @action) about a set of cores of
+ * a type given by @core_type. It is a static function used by
+ * kbase_pm_get_active_cores(), kbase_pm_get_trans_cores() and
+ * kbase_pm_get_ready_cores().
+ *
+ * @kbdev:     The kbase device structure of the device
+ * @core_type: The type of core that the should be queried
+ * @action:    The property of the cores to query
+ *
+ * Return: A bit mask specifying the state of the cores
+ */
+static u64 kbase_pm_get_state(struct kbase_device *kbdev,
+					enum kbase_pm_core_type core_type,
+					enum kbasep_pm_action action)
+{
+	u32 reg;
+	u32 lo, hi;
+
+	reg = core_type_to_reg(core_type, action);
+
+	KBASE_DEBUG_ASSERT(reg);
+
+	lo = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg));
+	hi = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg + 4));
+
+	return (((u64) hi) << 32) | ((u64) lo);
+}
+
+/**
+ * kbase_pm_get_present_cores - Get the cores that are present
+ *
+ * @kbdev: Kbase device
+ * @type: The type of cores to query
+ *
+ * Return: Bitmask of the cores that are present
+ */
+u64 kbase_pm_get_present_cores(struct kbase_device *kbdev,
+						enum kbase_pm_core_type type)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	switch (type) {
+	case KBASE_PM_CORE_L2:
+		return kbdev->gpu_props.props.raw_props.l2_present;
+	case KBASE_PM_CORE_SHADER:
+		return kbdev->gpu_props.props.raw_props.shader_present;
+	case KBASE_PM_CORE_TILER:
+		return kbdev->gpu_props.props.raw_props.tiler_present;
+	case KBASE_PM_CORE_STACK:
+		return kbdev->gpu_props.props.raw_props.stack_present;
+	default:
+		break;
+	}
+	KBASE_DEBUG_ASSERT(0);
+
+	return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_present_cores);
+
+/**
+ * kbase_pm_get_active_cores - Get the cores that are "active"
+ *                             (busy processing work)
+ *
+ * @kbdev: Kbase device
+ * @type: The type of cores to query
+ *
+ * Return: Bitmask of cores that are active
+ */
+u64 kbase_pm_get_active_cores(struct kbase_device *kbdev,
+						enum kbase_pm_core_type type)
+{
+	return kbase_pm_get_state(kbdev, type, ACTION_PWRACTIVE);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_active_cores);
+
+/**
+ * kbase_pm_get_trans_cores - Get the cores that are transitioning between
+ *                            power states
+ *
+ * @kbdev: Kbase device
+ * @type: The type of cores to query
+ *
+ * Return: Bitmask of cores that are transitioning
+ */
+u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev,
+						enum kbase_pm_core_type type)
+{
+	return kbase_pm_get_state(kbdev, type, ACTION_PWRTRANS);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_trans_cores);
+
+/**
+ * kbase_pm_get_ready_cores - Get the cores that are powered on
+ *
+ * @kbdev: Kbase device
+ * @type: The type of cores to query
+ *
+ * Return: Bitmask of cores that are ready (powered on)
+ */
+u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev,
+						enum kbase_pm_core_type type)
+{
+	u64 result;
+
+	result = kbase_pm_get_state(kbdev, type, ACTION_READY);
+
+	switch (type) {
+	case KBASE_PM_CORE_SHADER:
+		KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED, NULL, NULL, 0u,
+								(u32) result);
+		break;
+	case KBASE_PM_CORE_TILER:
+		KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_TILER, NULL, NULL, 0u,
+								(u32) result);
+		break;
+	case KBASE_PM_CORE_L2:
+		KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_L2, NULL, NULL, 0u,
+								(u32) result);
+		break;
+	default:
+		break;
+	}
+
+	return result;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_ready_cores);
+
+static void kbase_pm_trigger_hwcnt_disable(struct kbase_device *kbdev)
+{
+	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/* See if we can get away with disabling hwcnt
+	 * atomically, otherwise kick off a worker.
+	 */
+	if (kbase_hwcnt_context_disable_atomic(kbdev->hwcnt_gpu_ctx)) {
+		backend->hwcnt_disabled = true;
+	} else {
+#if KERNEL_VERSION(3, 16, 0) > LINUX_VERSION_CODE
+		queue_work(system_wq,
+			&backend->hwcnt_disable_work);
+#else
+		queue_work(system_highpri_wq,
+			&backend->hwcnt_disable_work);
+#endif
+	}
+}
+
+static void kbase_pm_l2_config_override(struct kbase_device *kbdev)
+{
+	u32 val;
+
+	/*
+	 * Skip if it is not supported
+	 */
+	if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_L2_CONFIG))
+		return;
+
+	/*
+	 * Skip if size and hash are not given explicitly,
+	 * which means default values are used.
+	 */
+	if ((kbdev->l2_size_override == 0) && (kbdev->l2_hash_override == 0))
+		return;
+
+	val = kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_CONFIG));
+
+	if (kbdev->l2_size_override) {
+		val &= ~L2_CONFIG_SIZE_MASK;
+		val |= (kbdev->l2_size_override << L2_CONFIG_SIZE_SHIFT);
+	}
+
+	if (kbdev->l2_hash_override) {
+		val &= ~L2_CONFIG_HASH_MASK;
+		val |= (kbdev->l2_hash_override << L2_CONFIG_HASH_SHIFT);
+	}
+
+	dev_dbg(kbdev->dev, "Program 0x%x to L2_CONFIG\n", val);
+
+	/* Write L2_CONFIG to override */
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_CONFIG), val);
+}
+
+static void kbase_pm_control_gpu_clock(struct kbase_device *kbdev)
+{
+	struct kbase_pm_backend_data *const backend = &kbdev->pm.backend;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	queue_work(system_wq, &backend->gpu_clock_control_work);
+}
+
+static const char *kbase_l2_core_state_to_string(enum kbase_l2_core_state state)
+{
+	const char *const strings[] = {
+#define KBASEP_L2_STATE(n) #n,
+#include "mali_kbase_pm_l2_states.h"
+#undef KBASEP_L2_STATE
+	};
+	if (WARN_ON((size_t)state >= ARRAY_SIZE(strings)))
+		return "Bad level 2 cache state";
+	else
+		return strings[state];
+}
+
+static u64 kbase_pm_l2_update_state(struct kbase_device *kbdev)
+{
+	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
+	u64 l2_present = kbdev->gpu_props.props.raw_props.l2_present;
+	u64 tiler_present = kbdev->gpu_props.props.raw_props.tiler_present;
+	enum kbase_l2_core_state prev_state;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	do {
+		/* Get current state */
+		u64 l2_trans = kbase_pm_get_trans_cores(kbdev,
+				KBASE_PM_CORE_L2);
+		u64 l2_ready = kbase_pm_get_ready_cores(kbdev,
+				KBASE_PM_CORE_L2);
+		u64 tiler_trans = kbase_pm_get_trans_cores(kbdev,
+				KBASE_PM_CORE_TILER);
+		u64 tiler_ready = kbase_pm_get_ready_cores(kbdev,
+				KBASE_PM_CORE_TILER);
+
+		/* mask off ready from trans in case transitions finished
+		 * between the register reads
+		 */
+		l2_trans &= ~l2_ready;
+		tiler_trans &= ~tiler_ready;
+
+		prev_state = backend->l2_state;
+
+		switch (backend->l2_state) {
+		case KBASE_L2_OFF:
+			if (kbase_pm_is_l2_desired(kbdev)) {
+				/*
+				 * Set the desired config for L2 before powering
+				 * it on
+				 */
+				kbase_pm_l2_config_override(kbdev);
+
+				/* L2 is required, power on.  Powering on the
+				 * tiler will also power the first L2 cache.
+				 */
+				kbase_pm_invoke(kbdev, KBASE_PM_CORE_TILER,
+						tiler_present, ACTION_PWRON);
+
+				/* If we have more than one L2 cache then we
+				 * must power them on explicitly.
+				 */
+				if (l2_present != 1)
+					kbase_pm_invoke(kbdev, KBASE_PM_CORE_L2,
+							l2_present & ~1,
+							ACTION_PWRON);
+				backend->l2_state = KBASE_L2_PEND_ON;
+			}
+			break;
+
+		case KBASE_L2_PEND_ON:
+			if (!l2_trans && l2_ready == l2_present && !tiler_trans
+					&& tiler_ready == tiler_present) {
+				KBASE_TRACE_ADD(kbdev,
+						PM_CORES_CHANGE_AVAILABLE_TILER,
+						NULL, NULL, 0u,
+						(u32)tiler_ready);
+				/*
+				 * Ensure snoops are enabled after L2 is powered
+				 * up. Note that kbase keeps track of the snoop
+				 * state, so safe to repeatedly call.
+				 */
+				kbase_pm_cache_snoop_enable(kbdev);
+
+				/* With the L2 enabled, we can now enable
+				 * hardware counters.
+				 */
+				if (kbdev->pm.backend.gpu_clock_slow_down_wa)
+					backend->l2_state =
+						KBASE_L2_RESTORE_CLOCKS;
+				else
+					backend->l2_state =
+						KBASE_L2_ON_HWCNT_ENABLE;
+
+				/* Now that the L2 is on, the shaders can start
+				 * powering on if they're required. The obvious
+				 * way to do this would be to call
+				 * kbase_pm_shaders_update_state() here.
+				 * However, that would make the two state
+				 * machines mutually recursive, as the opposite
+				 * would be needed for powering down. Instead,
+				 * callers of this function should use the
+				 * kbase_pm_update_state() wrapper, which will
+				 * call the shader state machine immediately
+				 * after the L2 (for power up), or
+				 * automatically re-invoke the L2 state machine
+				 * when the shaders power down.
+				 */
+			}
+			break;
+
+		case KBASE_L2_RESTORE_CLOCKS:
+			/* We always assume only GPUs being affected by
+			 * BASE_HW_ISSUE_GPU2017_1336 fall into this state
+			 */
+			WARN_ON_ONCE(!kbdev->pm.backend.gpu_clock_slow_down_wa);
+
+			/* If L2 not needed, we need to make sure cancellation
+			 * of any previously issued work to restore GPU clock.
+			 * For it, move to KBASE_L2_SLOW_DOWN_CLOCKS state.
+			 */
+			if (!kbase_pm_is_l2_desired(kbdev)) {
+				backend->l2_state = KBASE_L2_SLOW_DOWN_CLOCKS;
+				break;
+			}
+
+			backend->gpu_clock_slow_down_desired = false;
+			if (backend->gpu_clock_slowed_down)
+				kbase_pm_control_gpu_clock(kbdev);
+			else
+				backend->l2_state = KBASE_L2_ON_HWCNT_ENABLE;
+			break;
+
+		case KBASE_L2_ON_HWCNT_ENABLE:
+			backend->hwcnt_desired = true;
+			if (backend->hwcnt_disabled) {
+				kbase_hwcnt_context_enable(
+					kbdev->hwcnt_gpu_ctx);
+				backend->hwcnt_disabled = false;
+			}
+			backend->l2_state = KBASE_L2_ON;
+			break;
+
+		case KBASE_L2_ON:
+			if (!kbase_pm_is_l2_desired(kbdev)) {
+				/* Do not power off L2 until the shaders and
+				 * core stacks are off.
+				 */
+				if (backend->shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF)
+					break;
+
+				/* We need to make sure hardware counters are
+				 * disabled before powering down the L2, to
+				 * prevent loss of data.
+				 *
+				 * We waited until after the cores were powered
+				 * down to prevent ping-ponging between hwcnt
+				 * enabled and disabled, which would have
+				 * happened if userspace submitted more work
+				 * while we were trying to power down.
+				 */
+				backend->l2_state = KBASE_L2_ON_HWCNT_DISABLE;
+			}
+			break;
+
+		case KBASE_L2_ON_HWCNT_DISABLE:
+			/* If the L2 became desired while we were waiting on the
+			 * worker to do the actual hwcnt disable (which might
+			 * happen if some work was submitted immediately after
+			 * the shaders powered off), then we need to early-out
+			 * of this state and re-enable hwcnt.
+			 *
+			 * If we get lucky, the hwcnt disable might not have
+			 * actually started yet, and the logic in the hwcnt
+			 * enable state will prevent the worker from
+			 * performing the disable entirely, preventing loss of
+			 * any hardware counter data.
+			 *
+			 * If the hwcnt disable has started, then we'll lose
+			 * a tiny amount of hardware counter data between the
+			 * disable and the re-enable occurring.
+			 *
+			 * This loss of data is preferable to the alternative,
+			 * which is to block the shader cores from doing any
+			 * work until we're sure hwcnt has been re-enabled.
+			 */
+			if (kbase_pm_is_l2_desired(kbdev)) {
+				backend->l2_state = KBASE_L2_ON_HWCNT_ENABLE;
+				break;
+			}
+
+			backend->hwcnt_desired = false;
+			if (!backend->hwcnt_disabled) {
+				kbase_pm_trigger_hwcnt_disable(kbdev);
+			}
+
+			if (backend->hwcnt_disabled) {
+				if (kbdev->pm.backend.gpu_clock_slow_down_wa)
+					backend->l2_state =
+						KBASE_L2_SLOW_DOWN_CLOCKS;
+				else
+					backend->l2_state = KBASE_L2_POWER_DOWN;
+			}
+			break;
+
+		case KBASE_L2_SLOW_DOWN_CLOCKS:
+			/* We always assume only GPUs being affected by
+			 * BASE_HW_ISSUE_GPU2017_1336 fall into this state
+			 */
+			WARN_ON_ONCE(!kbdev->pm.backend.gpu_clock_slow_down_wa);
+
+			/* L2 needs to be powered up. And we need to make sure
+			 * cancellation of any previously issued work to slow
+			 * down GPU clock. For it, we move to the state,
+			 * KBASE_L2_RESTORE_CLOCKS.
+			 */
+			if (kbase_pm_is_l2_desired(kbdev)) {
+				backend->l2_state = KBASE_L2_RESTORE_CLOCKS;
+				break;
+			}
+
+			backend->gpu_clock_slow_down_desired = true;
+			if (!backend->gpu_clock_slowed_down)
+				kbase_pm_control_gpu_clock(kbdev);
+			else
+				backend->l2_state = KBASE_L2_POWER_DOWN;
+
+			break;
+
+		case KBASE_L2_POWER_DOWN:
+			if (!platform_power_down_only && !backend->l2_always_on)
+				/* Powering off the L2 will also power off the
+				 * tiler.
+				 */
+				kbase_pm_invoke(kbdev, KBASE_PM_CORE_L2,
+						l2_present,
+						ACTION_PWROFF);
+			else
+				/* If L2 cache is powered then we must flush it
+				 * before we power off the GPU. Normally this
+				 * would have been handled when the L2 was
+				 * powered off.
+				 */
+				kbase_gpu_start_cache_clean_nolock(
+						kbdev);
+
+			KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER,
+					NULL, NULL, 0u, 0u);
+
+			backend->l2_state = KBASE_L2_PEND_OFF;
+			break;
+
+		case KBASE_L2_PEND_OFF:
+			if (!platform_power_down_only && !backend->l2_always_on) {
+				/* We only need to check the L2 here - if the L2
+				 * is off then the tiler is definitely also off.
+				 */
+				if (!l2_trans && !l2_ready)
+					/* L2 is now powered off */
+					backend->l2_state = KBASE_L2_OFF;
+			} else {
+				if (!kbdev->cache_clean_in_progress)
+					backend->l2_state = KBASE_L2_OFF;
+			}
+			break;
+
+		case KBASE_L2_RESET_WAIT:
+			/* Reset complete  */
+			if (!backend->in_reset)
+				backend->l2_state = KBASE_L2_OFF;
+			break;
+
+		default:
+			WARN(1, "Invalid state in l2_state: %d",
+					backend->l2_state);
+		}
+
+		if (backend->l2_state != prev_state)
+			dev_dbg(kbdev->dev, "L2 state transition: %s to %s\n",
+				kbase_l2_core_state_to_string(prev_state),
+				kbase_l2_core_state_to_string(
+					backend->l2_state));
+
+	} while (backend->l2_state != prev_state);
+
+	if (kbdev->pm.backend.invoke_poweroff_wait_wq_when_l2_off &&
+			backend->l2_state == KBASE_L2_OFF) {
+		kbdev->pm.backend.invoke_poweroff_wait_wq_when_l2_off = false;
+		queue_work(kbdev->pm.backend.gpu_poweroff_wait_wq,
+				&kbdev->pm.backend.gpu_poweroff_wait_work);
+	}
+
+	if (backend->l2_state == KBASE_L2_ON)
+		return l2_present;
+	return 0;
+}
+
+static void shader_poweroff_timer_stop_callback(struct work_struct *data)
+{
+	unsigned long flags;
+	struct kbasep_pm_tick_timer_state *stt = container_of(data,
+			struct kbasep_pm_tick_timer_state, work);
+	struct kbase_device *kbdev = container_of(stt, struct kbase_device,
+			pm.backend.shader_tick_timer);
+
+	hrtimer_cancel(&stt->timer);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	stt->cancel_queued = false;
+	if (kbdev->pm.backend.gpu_powered)
+		kbase_pm_update_state(kbdev);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+/**
+ * shader_poweroff_timer_queue_cancel - cancel the shader poweroff tick timer
+ * @kbdev:      pointer to kbase device
+ *
+ * Synchronization between the shader state machine and the timer thread is
+ * difficult. This is because situations may arise where the state machine
+ * wants to start the timer, but the callback is already running, and has
+ * already passed the point at which it checks whether it is required, and so
+ * cancels itself, even though the state machine may have just tried to call
+ * hrtimer_start.
+ *
+ * This cannot be stopped by holding hwaccess_lock in the timer thread,
+ * because there are still infinitesimally small sections at the start and end
+ * of the callback where the lock is not held.
+ *
+ * Instead, a new state is added to the shader state machine,
+ * KBASE_SHADERS_OFF_CORESTACK_OFF_TIMER_PEND_OFF. This is used to guarantee
+ * that when the shaders are switched off, the timer has definitely been
+ * cancelled. As a result, when KBASE_SHADERS_ON_CORESTACK_ON is left and the
+ * timer is started, it is guaranteed that either the timer is already running
+ * (from an availability change or cancelled timer), or hrtimer_start will
+ * succeed. It is critical to avoid ending up in
+ * KBASE_SHADERS_WAIT_OFF_CORESTACK_ON without the timer running, or it could
+ * hang there forever.
+ */
+static void shader_poweroff_timer_queue_cancel(struct kbase_device *kbdev)
+{
+	struct kbasep_pm_tick_timer_state *stt =
+			&kbdev->pm.backend.shader_tick_timer;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	stt->needed = false;
+
+	if (hrtimer_active(&stt->timer) && !stt->cancel_queued) {
+		stt->cancel_queued = true;
+		queue_work(stt->wq, &stt->work);
+	}
+}
+
+static const char *kbase_shader_core_state_to_string(
+	enum kbase_shader_core_state state)
+{
+	const char *const strings[] = {
+#define KBASEP_SHADER_STATE(n) #n,
+#include "mali_kbase_pm_shader_states.h"
+#undef KBASEP_SHADER_STATE
+	};
+	if (WARN_ON((size_t)state >= ARRAY_SIZE(strings)))
+		return "Bad shader core state";
+	else
+		return strings[state];
+}
+
+static void kbase_pm_shaders_update_state(struct kbase_device *kbdev)
+{
+	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
+	struct kbasep_pm_tick_timer_state *stt =
+			&kbdev->pm.backend.shader_tick_timer;
+	enum kbase_shader_core_state prev_state;
+	u64 stacks_avail = 0;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (corestack_driver_control)
+		/* Always power on all the corestacks. Disabling certain
+		 * corestacks when their respective shaders are not in the
+		 * available bitmap is not currently supported.
+		 */
+		stacks_avail = kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_STACK);
+
+	do {
+		u64 shaders_trans = kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_SHADER);
+		u64 shaders_ready = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER);
+		u64 stacks_trans = 0;
+		u64 stacks_ready = 0;
+
+		if (corestack_driver_control) {
+			stacks_trans = kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_STACK);
+			stacks_ready = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_STACK);
+		}
+
+		/* mask off ready from trans in case transitions finished
+		 * between the register reads
+		 */
+		shaders_trans &= ~shaders_ready;
+		stacks_trans &= ~stacks_ready;
+
+		prev_state = backend->shaders_state;
+
+		switch (backend->shaders_state) {
+		case KBASE_SHADERS_OFF_CORESTACK_OFF:
+			/* Ignore changes to the shader core availability
+			 * except at certain points where we can handle it,
+			 * i.e. off and SHADERS_ON_CORESTACK_ON.
+			 */
+			backend->shaders_avail = kbase_pm_ca_get_core_mask(kbdev);
+			backend->pm_shaders_core_mask = 0;
+
+			if (backend->shaders_desired &&
+				backend->l2_state == KBASE_L2_ON) {
+				if (backend->hwcnt_desired &&
+					!backend->hwcnt_disabled) {
+					/* Trigger a hwcounter dump */
+					backend->hwcnt_desired = false;
+					kbase_pm_trigger_hwcnt_disable(kbdev);
+				}
+
+				if (backend->hwcnt_disabled) {
+					if (corestack_driver_control) {
+						kbase_pm_invoke(kbdev,
+							KBASE_PM_CORE_STACK,
+							stacks_avail,
+							ACTION_PWRON);
+					}
+					backend->shaders_state =
+						KBASE_SHADERS_OFF_CORESTACK_PEND_ON;
+				}
+			}
+			break;
+
+		case KBASE_SHADERS_OFF_CORESTACK_PEND_ON:
+			if (!stacks_trans && stacks_ready == stacks_avail) {
+				kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER,
+						backend->shaders_avail, ACTION_PWRON);
+
+				backend->shaders_state = KBASE_SHADERS_PEND_ON_CORESTACK_ON;
+			}
+			break;
+
+		case KBASE_SHADERS_PEND_ON_CORESTACK_ON:
+			if (!shaders_trans && shaders_ready == backend->shaders_avail) {
+				KBASE_TRACE_ADD(kbdev,
+						PM_CORES_CHANGE_AVAILABLE,
+						NULL, NULL, 0u, (u32)shaders_ready);
+				backend->pm_shaders_core_mask = shaders_ready;
+				backend->hwcnt_desired = true;
+				if (backend->hwcnt_disabled) {
+					kbase_hwcnt_context_enable(
+						kbdev->hwcnt_gpu_ctx);
+					backend->hwcnt_disabled = false;
+				}
+				backend->shaders_state = KBASE_SHADERS_ON_CORESTACK_ON;
+			}
+			break;
+
+		case KBASE_SHADERS_ON_CORESTACK_ON:
+			backend->shaders_avail = kbase_pm_ca_get_core_mask(kbdev);
+
+			/* If shaders to change state, trigger a counter dump */
+			if (!backend->shaders_desired ||
+				(backend->shaders_avail & ~shaders_ready)) {
+				backend->hwcnt_desired = false;
+				if (!backend->hwcnt_disabled)
+					kbase_pm_trigger_hwcnt_disable(kbdev);
+				backend->shaders_state =
+					KBASE_SHADERS_ON_CORESTACK_ON_RECHECK;
+			}
+			break;
+
+		case KBASE_SHADERS_ON_CORESTACK_ON_RECHECK:
+			backend->shaders_avail =
+				kbase_pm_ca_get_core_mask(kbdev);
+
+			if (!backend->hwcnt_disabled) {
+				/* Wait for being disabled */
+				;
+			} else if (!backend->shaders_desired) {
+				if (kbdev->pm.backend.protected_transition_override ||
+						!stt->configured_ticks ||
+						WARN_ON(stt->cancel_queued)) {
+					backend->shaders_state = KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON;
+				} else {
+					stt->remaining_ticks = stt->configured_ticks;
+					stt->needed = true;
+
+					/* The shader hysteresis timer is not
+					 * done the obvious way, which would be
+					 * to start an hrtimer when the shader
+					 * power off is requested. Instead,
+					 * use a 'tick' timer, and set the
+					 * remaining number of ticks on a power
+					 * off request.  This avoids the
+					 * latency of starting, then
+					 * immediately cancelling an hrtimer
+					 * when the shaders are re-requested
+					 * before the timeout expires.
+					 */
+					if (!hrtimer_active(&stt->timer))
+						hrtimer_start(&stt->timer,
+								stt->configured_interval,
+								HRTIMER_MODE_REL);
+
+					backend->shaders_state = KBASE_SHADERS_WAIT_OFF_CORESTACK_ON;
+				}
+			} else {
+				if (backend->shaders_avail & ~shaders_ready) {
+					backend->shaders_avail |= shaders_ready;
+
+					kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER,
+							backend->shaders_avail & ~shaders_ready,
+							ACTION_PWRON);
+				}
+				backend->shaders_state = KBASE_SHADERS_PEND_ON_CORESTACK_ON;
+			}
+			break;
+
+		case KBASE_SHADERS_WAIT_OFF_CORESTACK_ON:
+			if (WARN_ON(!hrtimer_active(&stt->timer))) {
+				stt->remaining_ticks = 0;
+				backend->shaders_state = KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON;
+			}
+
+			if (backend->shaders_desired) {
+				stt->remaining_ticks = 0;
+				backend->shaders_state = KBASE_SHADERS_ON_CORESTACK_ON_RECHECK;
+			} else if (stt->remaining_ticks == 0) {
+				backend->shaders_state = KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON;
+			}
+			break;
+
+		case KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON:
+			shader_poweroff_timer_queue_cancel(kbdev);
+			if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TTRX_921)) {
+				kbase_gpu_start_cache_clean_nolock(kbdev);
+				backend->shaders_state =
+					KBASE_SHADERS_L2_FLUSHING_CORESTACK_ON;
+			} else {
+				backend->shaders_state =
+					KBASE_SHADERS_READY_OFF_CORESTACK_ON;
+			}
+			break;
+
+		case KBASE_SHADERS_L2_FLUSHING_CORESTACK_ON:
+			if (!kbdev->cache_clean_in_progress)
+				backend->shaders_state =
+					KBASE_SHADERS_READY_OFF_CORESTACK_ON;
+
+			break;
+
+		case KBASE_SHADERS_READY_OFF_CORESTACK_ON:
+			if (!platform_power_down_only)
+				kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER,
+						shaders_ready, ACTION_PWROFF);
+
+			KBASE_TRACE_ADD(kbdev,
+					PM_CORES_CHANGE_AVAILABLE,
+					NULL, NULL, 0u, 0u);
+
+			backend->shaders_state = KBASE_SHADERS_PEND_OFF_CORESTACK_ON;
+			break;
+
+		case KBASE_SHADERS_PEND_OFF_CORESTACK_ON:
+			if ((!shaders_trans && !shaders_ready) || platform_power_down_only) {
+				if (corestack_driver_control && !platform_power_down_only)
+					kbase_pm_invoke(kbdev, KBASE_PM_CORE_STACK,
+							stacks_avail, ACTION_PWROFF);
+
+				backend->shaders_state = KBASE_SHADERS_OFF_CORESTACK_PEND_OFF;
+			}
+			break;
+
+		case KBASE_SHADERS_OFF_CORESTACK_PEND_OFF:
+			if ((!stacks_trans && !stacks_ready) ||
+				platform_power_down_only) {
+				/* On powered off, re-enable the hwcnt */
+				backend->pm_shaders_core_mask = 0;
+				backend->hwcnt_desired = true;
+				if (backend->hwcnt_disabled) {
+					kbase_hwcnt_context_enable(
+						kbdev->hwcnt_gpu_ctx);
+					backend->hwcnt_disabled = false;
+				}
+				backend->shaders_state = KBASE_SHADERS_OFF_CORESTACK_OFF_TIMER_PEND_OFF;
+			}
+			break;
+
+		case KBASE_SHADERS_OFF_CORESTACK_OFF_TIMER_PEND_OFF:
+			if (!hrtimer_active(&stt->timer) && !stt->cancel_queued)
+				backend->shaders_state = KBASE_SHADERS_OFF_CORESTACK_OFF;
+			break;
+
+		case KBASE_SHADERS_RESET_WAIT:
+			/* Reset complete */
+			if (!backend->in_reset)
+				backend->shaders_state = KBASE_SHADERS_OFF_CORESTACK_OFF_TIMER_PEND_OFF;
+			break;
+		}
+
+		if (backend->shaders_state != prev_state)
+			dev_dbg(kbdev->dev, "Shader state transition: %s to %s\n",
+				kbase_shader_core_state_to_string(prev_state),
+				kbase_shader_core_state_to_string(
+					backend->shaders_state));
+
+	} while (backend->shaders_state != prev_state);
+}
+
+static bool kbase_pm_is_in_desired_state_nolock(struct kbase_device *kbdev)
+{
+	bool in_desired_state = true;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (kbase_pm_is_l2_desired(kbdev) &&
+			kbdev->pm.backend.l2_state != KBASE_L2_ON)
+		in_desired_state = false;
+	else if (!kbase_pm_is_l2_desired(kbdev) &&
+			kbdev->pm.backend.l2_state != KBASE_L2_OFF)
+		in_desired_state = false;
+
+	if (kbdev->pm.backend.shaders_desired &&
+			kbdev->pm.backend.shaders_state != KBASE_SHADERS_ON_CORESTACK_ON)
+		in_desired_state = false;
+	else if (!kbdev->pm.backend.shaders_desired &&
+			kbdev->pm.backend.shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF)
+		in_desired_state = false;
+
+	return in_desired_state;
+}
+
+static bool kbase_pm_is_in_desired_state(struct kbase_device *kbdev)
+{
+	bool in_desired_state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	in_desired_state = kbase_pm_is_in_desired_state_nolock(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return in_desired_state;
+}
+
+static bool kbase_pm_is_in_desired_state_with_l2_powered(
+		struct kbase_device *kbdev)
+{
+	bool in_desired_state = false;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	if (kbase_pm_is_in_desired_state_nolock(kbdev) &&
+			(kbdev->pm.backend.l2_state == KBASE_L2_ON))
+		in_desired_state = true;
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return in_desired_state;
+}
+
+static void kbase_pm_trace_power_state(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	KBASE_TLSTREAM_AUX_PM_STATE(
+			kbdev,
+			KBASE_PM_CORE_L2,
+			kbase_pm_get_ready_cores(
+				kbdev, KBASE_PM_CORE_L2));
+	KBASE_TLSTREAM_AUX_PM_STATE(
+			kbdev,
+			KBASE_PM_CORE_SHADER,
+			kbase_pm_get_ready_cores(
+				kbdev, KBASE_PM_CORE_SHADER));
+	KBASE_TLSTREAM_AUX_PM_STATE(
+			kbdev,
+			KBASE_PM_CORE_TILER,
+			kbase_pm_get_ready_cores(
+				kbdev,
+				KBASE_PM_CORE_TILER));
+
+	if (corestack_driver_control)
+		KBASE_TLSTREAM_AUX_PM_STATE(
+				kbdev,
+				KBASE_PM_CORE_STACK,
+				kbase_pm_get_ready_cores(
+					kbdev,
+					KBASE_PM_CORE_STACK));
+}
+
+void kbase_pm_update_state(struct kbase_device *kbdev)
+{
+	enum kbase_shader_core_state prev_shaders_state =
+			kbdev->pm.backend.shaders_state;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (!kbdev->pm.backend.gpu_powered)
+		return; /* Do nothing if the GPU is off */
+
+	kbase_pm_l2_update_state(kbdev);
+	kbase_pm_shaders_update_state(kbdev);
+
+	/* If the shaders just turned off, re-invoke the L2 state machine, in
+	 * case it was waiting for the shaders to turn off before powering down
+	 * the L2.
+	 */
+	if (prev_shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF &&
+			kbdev->pm.backend.shaders_state == KBASE_SHADERS_OFF_CORESTACK_OFF)
+		kbase_pm_l2_update_state(kbdev);
+
+	if (kbase_pm_is_in_desired_state_nolock(kbdev)) {
+		KBASE_TRACE_ADD(kbdev, PM_DESIRED_REACHED, NULL, NULL,
+				true, kbdev->pm.backend.shaders_avail);
+
+		kbase_pm_trace_power_state(kbdev);
+
+		KBASE_TRACE_ADD(kbdev, PM_WAKE_WAITERS, NULL, NULL, 0u, 0);
+		wake_up(&kbdev->pm.backend.gpu_in_desired_state_wait);
+	}
+}
+
+static enum hrtimer_restart
+shader_tick_timer_callback(struct hrtimer *timer)
+{
+	struct kbasep_pm_tick_timer_state *stt = container_of(timer,
+			struct kbasep_pm_tick_timer_state, timer);
+	struct kbase_device *kbdev = container_of(stt, struct kbase_device,
+			pm.backend.shader_tick_timer);
+	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
+	unsigned long flags;
+	enum hrtimer_restart restart = HRTIMER_NORESTART;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (stt->remaining_ticks &&
+			backend->shaders_state == KBASE_SHADERS_WAIT_OFF_CORESTACK_ON) {
+		stt->remaining_ticks--;
+
+		/* If the remaining ticks just changed from 1 to 0, invoke the
+		 * PM state machine to power off the shader cores.
+		 */
+		if (!stt->remaining_ticks && !backend->shaders_desired)
+			kbase_pm_update_state(kbdev);
+	}
+
+	if (stt->needed) {
+		hrtimer_forward_now(timer, stt->configured_interval);
+		restart = HRTIMER_RESTART;
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return restart;
+}
+
+int kbase_pm_state_machine_init(struct kbase_device *kbdev)
+{
+	struct kbasep_pm_tick_timer_state *stt = &kbdev->pm.backend.shader_tick_timer;
+
+	stt->wq = alloc_workqueue("kbase_pm_shader_poweroff", WQ_HIGHPRI | WQ_UNBOUND, 1);
+	if (!stt->wq)
+		return -ENOMEM;
+
+	INIT_WORK(&stt->work, shader_poweroff_timer_stop_callback);
+
+	stt->needed = false;
+	hrtimer_init(&stt->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	stt->timer.function = shader_tick_timer_callback;
+	stt->configured_interval = HR_TIMER_DELAY_NSEC(DEFAULT_PM_GPU_POWEROFF_TICK_NS);
+	stt->configured_ticks = DEFAULT_PM_POWEROFF_TICK_SHADER;
+
+	return 0;
+}
+
+void kbase_pm_state_machine_term(struct kbase_device *kbdev)
+{
+	hrtimer_cancel(&kbdev->pm.backend.shader_tick_timer.timer);
+	destroy_workqueue(kbdev->pm.backend.shader_tick_timer.wq);
+}
+
+void kbase_pm_reset_start_locked(struct kbase_device *kbdev)
+{
+	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	backend->in_reset = true;
+	backend->l2_state = KBASE_L2_RESET_WAIT;
+	backend->shaders_state = KBASE_SHADERS_RESET_WAIT;
+
+	/* We're in a reset, so hwcnt will have been synchronously disabled by
+	 * this function's caller as part of the reset process. We therefore
+	 * know that any call to kbase_hwcnt_context_disable_atomic, if
+	 * required to sync the hwcnt refcount with our internal state, is
+	 * guaranteed to succeed.
+	 */
+	backend->hwcnt_desired = false;
+	if (!backend->hwcnt_disabled) {
+		WARN_ON(!kbase_hwcnt_context_disable_atomic(
+			kbdev->hwcnt_gpu_ctx));
+		backend->hwcnt_disabled = true;
+	}
+
+	shader_poweroff_timer_queue_cancel(kbdev);
+}
+
+void kbase_pm_reset_complete(struct kbase_device *kbdev)
+{
+	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
+	unsigned long flags;
+
+	WARN_ON(!kbase_reset_gpu_is_active(kbdev));
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	/* As GPU has just been reset, that results in implicit flush of L2
+	 * cache, can safely mark the pending cache flush operation (if there
+	 * was any) as complete and unblock the waiter.
+	 * No work can be submitted whilst GPU reset is ongoing.
+	 */
+	kbase_gpu_cache_clean_wait_complete(kbdev);
+	backend->in_reset = false;
+	kbase_pm_update_state(kbdev);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+/* Timeout for kbase_pm_wait_for_desired_state when wait_event_killable has
+ * aborted due to a fatal signal. If the time spent waiting has exceeded this
+ * threshold then there is most likely a hardware issue. */
+#define PM_TIMEOUT (5*HZ) /* 5s */
+
+static void kbase_pm_timed_out(struct kbase_device *kbdev)
+{
+	dev_err(kbdev->dev, "Power transition timed out unexpectedly\n");
+	dev_err(kbdev->dev, "Desired state :\n");
+	dev_err(kbdev->dev, "\tShader=%016llx\n",
+			kbdev->pm.backend.shaders_desired ? kbdev->pm.backend.shaders_avail : 0);
+	dev_err(kbdev->dev, "Current state :\n");
+	dev_err(kbdev->dev, "\tShader=%08x%08x\n",
+			kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(SHADER_READY_HI)),
+			kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(SHADER_READY_LO)));
+	dev_err(kbdev->dev, "\tTiler =%08x%08x\n",
+			kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(TILER_READY_HI)),
+			kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(TILER_READY_LO)));
+	dev_err(kbdev->dev, "\tL2    =%08x%08x\n",
+			kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(L2_READY_HI)),
+			kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(L2_READY_LO)));
+	dev_err(kbdev->dev, "Cores transitioning :\n");
+	dev_err(kbdev->dev, "\tShader=%08x%08x\n",
+			kbase_reg_read(kbdev, GPU_CONTROL_REG(
+					SHADER_PWRTRANS_HI)),
+			kbase_reg_read(kbdev, GPU_CONTROL_REG(
+					SHADER_PWRTRANS_LO)));
+	dev_err(kbdev->dev, "\tTiler =%08x%08x\n",
+			kbase_reg_read(kbdev, GPU_CONTROL_REG(
+					TILER_PWRTRANS_HI)),
+			kbase_reg_read(kbdev, GPU_CONTROL_REG(
+					TILER_PWRTRANS_LO)));
+	dev_err(kbdev->dev, "\tL2    =%08x%08x\n",
+			kbase_reg_read(kbdev, GPU_CONTROL_REG(
+					L2_PWRTRANS_HI)),
+			kbase_reg_read(kbdev, GPU_CONTROL_REG(
+					L2_PWRTRANS_LO)));
+
+	dev_err(kbdev->dev, "Sending reset to GPU - all running jobs will be lost\n");
+	if (kbase_prepare_to_reset_gpu(kbdev))
+		kbase_reset_gpu(kbdev);
+}
+
+void kbase_pm_wait_for_l2_powered(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+	unsigned long timeout;
+	int err;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_pm_update_state(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	timeout = jiffies + PM_TIMEOUT;
+
+	/* Wait for cores */
+	err = wait_event_killable(kbdev->pm.backend.gpu_in_desired_state_wait,
+			kbase_pm_is_in_desired_state_with_l2_powered(kbdev));
+
+	if (err < 0 && time_after(jiffies, timeout))
+		kbase_pm_timed_out(kbdev);
+}
+
+void kbase_pm_wait_for_desired_state(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+	unsigned long timeout;
+	int err;
+
+	/* Let the state machine latch the most recent desired state. */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_pm_update_state(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	timeout = jiffies + PM_TIMEOUT;
+
+	/* Wait for cores */
+	err = wait_event_killable(kbdev->pm.backend.gpu_in_desired_state_wait,
+			kbase_pm_is_in_desired_state(kbdev));
+
+	if (err < 0 && time_after(jiffies, timeout))
+		kbase_pm_timed_out(kbdev);
+}
+KBASE_EXPORT_TEST_API(kbase_pm_wait_for_desired_state);
+
+void kbase_pm_enable_interrupts(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(NULL != kbdev);
+	/*
+	 * Clear all interrupts,
+	 * and unmask them all.
+	 */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), GPU_IRQ_REG_ALL);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF);
+	kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0xFFFFFFFF);
+
+	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF);
+	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0xFFFFFFFF);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_enable_interrupts);
+
+void kbase_pm_disable_interrupts_nolock(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(NULL != kbdev);
+	/*
+	 * Mask all interrupts,
+	 * and clear them all.
+	 */
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), 0);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL);
+	kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0);
+	kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF);
+
+	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0);
+	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF);
+}
+
+void kbase_pm_disable_interrupts(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_pm_disable_interrupts_nolock(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_disable_interrupts);
+
+/*
+ * pmu layout:
+ * 0x0000: PMU TAG (RO) (0xCAFECAFE)
+ * 0x0004: PMU VERSION ID (RO) (0x00000000)
+ * 0x0008: CLOCK ENABLE (RW) (31:1 SBZ, 0 CLOCK STATE)
+ */
+void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
+{
+	bool reset_required = is_resume;
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(NULL != kbdev);
+	lockdep_assert_held(&kbdev->js_data.runpool_mutex);
+	lockdep_assert_held(&kbdev->pm.lock);
+
+	if (kbdev->pm.backend.gpu_powered) {
+		/* Already turned on */
+		if (kbdev->poweroff_pending)
+			kbase_pm_enable_interrupts(kbdev);
+		kbdev->poweroff_pending = false;
+		KBASE_DEBUG_ASSERT(!is_resume);
+		return;
+	}
+
+	kbdev->poweroff_pending = false;
+
+	KBASE_TRACE_ADD(kbdev, PM_GPU_ON, NULL, NULL, 0u, 0u);
+
+	if (is_resume && kbdev->pm.backend.callback_power_resume) {
+		kbdev->pm.backend.callback_power_resume(kbdev);
+		return;
+	} else if (kbdev->pm.backend.callback_power_on) {
+		reset_required = kbdev->pm.backend.callback_power_on(kbdev);
+	}
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbdev->pm.backend.gpu_powered = true;
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	if (reset_required) {
+		/* GPU state was lost, reset GPU to ensure it is in a
+		 * consistent state */
+		kbase_pm_init_hw(kbdev, PM_ENABLE_IRQS);
+	}
+
+	mutex_lock(&kbdev->mmu_hw_mutex);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_ctx_sched_restore_all_as(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+
+	/* Enable the interrupts */
+	kbase_pm_enable_interrupts(kbdev);
+
+	/* Turn on the L2 caches */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbdev->pm.backend.l2_desired = true;
+	kbase_pm_update_state(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_clock_on);
+
+bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend)
+{
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(NULL != kbdev);
+	lockdep_assert_held(&kbdev->pm.lock);
+
+	/* ASSERT that the cores should now be unavailable. No lock needed. */
+	WARN_ON(kbdev->pm.backend.shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF);
+
+	kbdev->poweroff_pending = true;
+
+	if (!kbdev->pm.backend.gpu_powered) {
+		/* Already turned off */
+		if (is_suspend && kbdev->pm.backend.callback_power_suspend)
+			kbdev->pm.backend.callback_power_suspend(kbdev);
+		return true;
+	}
+
+	KBASE_TRACE_ADD(kbdev, PM_GPU_OFF, NULL, NULL, 0u, 0u);
+
+	/* Disable interrupts. This also clears any outstanding interrupts */
+	kbase_pm_disable_interrupts(kbdev);
+	/* Ensure that any IRQ handlers have finished */
+	kbase_synchronize_irqs(kbdev);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (atomic_read(&kbdev->faults_pending)) {
+		/* Page/bus faults are still being processed. The GPU can not
+		 * be powered off until they have completed */
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		return false;
+	}
+
+	kbase_pm_cache_snoop_disable(kbdev);
+
+	/* The GPU power may be turned off from this point */
+	kbdev->pm.backend.gpu_powered = false;
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	if (is_suspend && kbdev->pm.backend.callback_power_suspend)
+		kbdev->pm.backend.callback_power_suspend(kbdev);
+	else if (kbdev->pm.backend.callback_power_off)
+		kbdev->pm.backend.callback_power_off(kbdev);
+	return true;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_clock_off);
+
+struct kbasep_reset_timeout_data {
+	struct hrtimer timer;
+	bool timed_out;
+	struct kbase_device *kbdev;
+};
+
+void kbase_pm_reset_done(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	kbdev->pm.backend.reset_done = true;
+	wake_up(&kbdev->pm.backend.reset_done_wait);
+}
+
+/**
+ * kbase_pm_wait_for_reset - Wait for a reset to happen
+ *
+ * Wait for the %RESET_COMPLETED IRQ to occur, then reset the waiting state.
+ *
+ * @kbdev: Kbase device
+ */
+static void kbase_pm_wait_for_reset(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->pm.lock);
+
+	wait_event(kbdev->pm.backend.reset_done_wait,
+						(kbdev->pm.backend.reset_done));
+	kbdev->pm.backend.reset_done = false;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_reset_done);
+
+static enum hrtimer_restart kbasep_reset_timeout(struct hrtimer *timer)
+{
+	struct kbasep_reset_timeout_data *rtdata =
+		container_of(timer, struct kbasep_reset_timeout_data, timer);
+
+	rtdata->timed_out = 1;
+
+	/* Set the wait queue to wake up kbase_pm_init_hw even though the reset
+	 * hasn't completed */
+	kbase_pm_reset_done(rtdata->kbdev);
+
+	return HRTIMER_NORESTART;
+}
+
+static void kbase_set_jm_quirks(struct kbase_device *kbdev, const u32 prod_id)
+{
+	kbdev->hw_quirks_jm = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(JM_CONFIG));
+	if (GPU_ID2_MODEL_MATCH_VALUE(prod_id) == GPU_ID2_PRODUCT_TMIX) {
+		/* Only for tMIx */
+		u32 coherency_features;
+
+		coherency_features = kbase_reg_read(kbdev,
+					GPU_CONTROL_REG(COHERENCY_FEATURES));
+
+		/* (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly
+		 * documented for tMIx so force correct value here.
+		 */
+		if (coherency_features ==
+				COHERENCY_FEATURE_BIT(COHERENCY_ACE)) {
+			kbdev->hw_quirks_jm |= (COHERENCY_ACE_LITE |
+					COHERENCY_ACE) <<
+					JM_FORCE_COHERENCY_FEATURES_SHIFT;
+		}
+	}
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_IDVS_GROUP_SIZE)) {
+		int default_idvs_group_size = 0xF;
+		u32 tmp;
+
+		if (of_property_read_u32(kbdev->dev->of_node,
+					"idvs-group-size", &tmp))
+			tmp = default_idvs_group_size;
+
+		if (tmp > JM_MAX_IDVS_GROUP_SIZE) {
+			dev_err(kbdev->dev,
+				"idvs-group-size of %d is too large. Maximum value is %d",
+				tmp, JM_MAX_IDVS_GROUP_SIZE);
+			tmp = default_idvs_group_size;
+		}
+
+		kbdev->hw_quirks_jm |= tmp << JM_IDVS_GROUP_SIZE_SHIFT;
+	}
+
+#define MANUAL_POWER_CONTROL ((u32)(1 << 8))
+	if (corestack_driver_control)
+		kbdev->hw_quirks_jm |= MANUAL_POWER_CONTROL;
+}
+
+static void kbase_set_sc_quirks(struct kbase_device *kbdev, const u32 prod_id)
+{
+	kbdev->hw_quirks_sc = kbase_reg_read(kbdev,
+					GPU_CONTROL_REG(SHADER_CONFIG));
+
+	/* Needed due to MIDBASE-1494: LS_PAUSEBUFFER_DISABLE.
+	 * See PRLAM-8443 and needed due to MIDGLES-3539.
+	 * See PRLAM-11035.
+	 */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8443) ||
+			kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11035))
+		kbdev->hw_quirks_sc |= SC_LS_PAUSEBUFFER_DISABLE;
+
+	/* Needed due to MIDBASE-2054: SDC_DISABLE_OQ_DISCARD.
+	 * See PRLAM-10327.
+	 */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10327))
+		kbdev->hw_quirks_sc |= SC_SDC_DISABLE_OQ_DISCARD;
+
+	/* Needed due to MIDBASE-2795. ENABLE_TEXGRD_FLAGS.
+	 * See PRLAM-10797.
+	 */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10797))
+		kbdev->hw_quirks_sc |= SC_ENABLE_TEXGRD_FLAGS;
+
+	if (!kbase_hw_has_issue(kbdev, GPUCORE_1619)) {
+		if (prod_id < 0x750 || prod_id == 0x6956) /* T60x, T62x, T72x */
+			kbdev->hw_quirks_sc |= SC_LS_ATTR_CHECK_DISABLE;
+		else if (prod_id >= 0x750 && prod_id <= 0x880) /* T76x, T8xx */
+			kbdev->hw_quirks_sc |= SC_LS_ALLOW_ATTR_TYPES;
+	}
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TTRX_2968_TTRX_3162))
+		kbdev->hw_quirks_sc |= SC_VAR_ALGORITHM;
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_TLS_HASHING))
+		kbdev->hw_quirks_sc |= SC_TLS_HASH_ENABLE;
+}
+
+static void kbase_set_tiler_quirks(struct kbase_device *kbdev)
+{
+	kbdev->hw_quirks_tiler = kbase_reg_read(kbdev,
+					GPU_CONTROL_REG(TILER_CONFIG));
+	/* Set tiler clock gate override if required */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3953))
+		kbdev->hw_quirks_tiler |= TC_CLOCK_GATE_OVERRIDE;
+}
+
+static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
+{
+	struct device_node *np = kbdev->dev->of_node;
+	const u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+	const u32 prod_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >>
+				GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+
+	kbdev->hw_quirks_jm = 0;
+	kbdev->hw_quirks_sc = 0;
+	kbdev->hw_quirks_tiler = 0;
+	kbdev->hw_quirks_mmu = 0;
+
+	if (!of_property_read_u32(np, "quirks_jm",
+				&kbdev->hw_quirks_jm)) {
+		dev_info(kbdev->dev,
+			"Found quirks_jm = [0x%x] in Devicetree\n",
+			kbdev->hw_quirks_jm);
+	} else {
+		kbase_set_jm_quirks(kbdev, prod_id);
+	}
+
+	if (!of_property_read_u32(np, "quirks_sc",
+				&kbdev->hw_quirks_sc)) {
+		dev_info(kbdev->dev,
+			"Found quirks_sc = [0x%x] in Devicetree\n",
+			kbdev->hw_quirks_sc);
+	} else {
+		kbase_set_sc_quirks(kbdev, prod_id);
+	}
+
+	if (!of_property_read_u32(np, "quirks_tiler",
+				&kbdev->hw_quirks_tiler)) {
+		dev_info(kbdev->dev,
+			"Found quirks_tiler = [0x%x] in Devicetree\n",
+			kbdev->hw_quirks_tiler);
+	} else {
+		kbase_set_tiler_quirks(kbdev);
+	}
+
+	if (!of_property_read_u32(np, "quirks_mmu",
+				&kbdev->hw_quirks_mmu)) {
+		dev_info(kbdev->dev,
+			"Found quirks_mmu = [0x%x] in Devicetree\n",
+			kbdev->hw_quirks_mmu);
+	} else {
+		kbase_set_mmu_quirks(kbdev);
+	}
+}
+
+static void kbase_pm_hw_issues_apply(struct kbase_device *kbdev)
+{
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(SHADER_CONFIG),
+			kbdev->hw_quirks_sc);
+
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(TILER_CONFIG),
+			kbdev->hw_quirks_tiler);
+
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG),
+			kbdev->hw_quirks_mmu);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(JM_CONFIG),
+			kbdev->hw_quirks_jm);
+}
+
+void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev)
+{
+	if ((kbdev->current_gpu_coherency_mode == COHERENCY_ACE) &&
+		!kbdev->cci_snoop_enabled) {
+#ifdef CONFIG_ARM64
+		if (kbdev->snoop_enable_smc != 0)
+			kbase_invoke_smc_fid(kbdev->snoop_enable_smc, 0, 0, 0);
+#endif /* CONFIG_ARM64 */
+		dev_dbg(kbdev->dev, "MALI - CCI Snoops - Enabled\n");
+		kbdev->cci_snoop_enabled = true;
+	}
+}
+
+void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev)
+{
+	if (kbdev->cci_snoop_enabled) {
+#ifdef CONFIG_ARM64
+		if (kbdev->snoop_disable_smc != 0) {
+			mali_cci_flush_l2(kbdev);
+			kbase_invoke_smc_fid(kbdev->snoop_disable_smc, 0, 0, 0);
+		}
+#endif /* CONFIG_ARM64 */
+		dev_dbg(kbdev->dev, "MALI - CCI Snoops Disabled\n");
+		kbdev->cci_snoop_enabled = false;
+	}
+}
+
+static void reenable_protected_mode_hwcnt(struct kbase_device *kbdev)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
+	kbdev->protected_mode_hwcnt_desired = true;
+	if (kbdev->protected_mode_hwcnt_disabled) {
+		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+		kbdev->protected_mode_hwcnt_disabled = false;
+	}
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
+}
+
+static int kbase_pm_do_reset(struct kbase_device *kbdev)
+{
+	struct kbasep_reset_timeout_data rtdata;
+
+	KBASE_TRACE_ADD(kbdev, CORE_GPU_SOFT_RESET, NULL, NULL, 0u, 0);
+
+	KBASE_TLSTREAM_JD_GPU_SOFT_RESET(kbdev, kbdev);
+
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+						GPU_COMMAND_SOFT_RESET);
+
+	/* Unmask the reset complete interrupt only */
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), RESET_COMPLETED);
+
+	/* Initialize a structure for tracking the status of the reset */
+	rtdata.kbdev = kbdev;
+	rtdata.timed_out = 0;
+
+	/* Create a timer to use as a timeout on the reset */
+	hrtimer_init_on_stack(&rtdata.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	rtdata.timer.function = kbasep_reset_timeout;
+
+	hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT),
+							HRTIMER_MODE_REL);
+
+	/* Wait for the RESET_COMPLETED interrupt to be raised */
+	kbase_pm_wait_for_reset(kbdev);
+
+	if (rtdata.timed_out == 0) {
+		/* GPU has been reset */
+		hrtimer_cancel(&rtdata.timer);
+		destroy_hrtimer_on_stack(&rtdata.timer);
+		return 0;
+	}
+
+	/* No interrupt has been received - check if the RAWSTAT register says
+	 * the reset has completed */
+	if (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT)) &
+							RESET_COMPLETED) {
+		/* The interrupt is set in the RAWSTAT; this suggests that the
+		 * interrupts are not getting to the CPU */
+		dev_err(kbdev->dev, "Reset interrupt didn't reach CPU. Check interrupt assignments.\n");
+		/* If interrupts aren't working we can't continue. */
+		destroy_hrtimer_on_stack(&rtdata.timer);
+		return -EINVAL;
+	}
+
+	/* The GPU doesn't seem to be responding to the reset so try a hard
+	 * reset */
+	dev_err(kbdev->dev, "Failed to soft-reset GPU (timed out after %d ms), now attempting a hard reset\n",
+								RESET_TIMEOUT);
+	KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+						GPU_COMMAND_HARD_RESET);
+
+	/* Restart the timer to wait for the hard reset to complete */
+	rtdata.timed_out = 0;
+
+	hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT),
+							HRTIMER_MODE_REL);
+
+	/* Wait for the RESET_COMPLETED interrupt to be raised */
+	kbase_pm_wait_for_reset(kbdev);
+
+	if (rtdata.timed_out == 0) {
+		/* GPU has been reset */
+		hrtimer_cancel(&rtdata.timer);
+		destroy_hrtimer_on_stack(&rtdata.timer);
+		return 0;
+	}
+
+	destroy_hrtimer_on_stack(&rtdata.timer);
+
+	dev_err(kbdev->dev, "Failed to hard-reset the GPU (timed out after %d ms)\n",
+								RESET_TIMEOUT);
+
+	return -EINVAL;
+}
+
+static int kbasep_protected_mode_enable(struct protected_mode_device *pdev)
+{
+	struct kbase_device *kbdev = pdev->data;
+
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+		GPU_COMMAND_SET_PROTECTED_MODE);
+	return 0;
+}
+
+static int kbasep_protected_mode_disable(struct protected_mode_device *pdev)
+{
+	struct kbase_device *kbdev = pdev->data;
+
+	lockdep_assert_held(&kbdev->pm.lock);
+
+	return kbase_pm_do_reset(kbdev);
+}
+
+struct protected_mode_ops kbase_native_protected_ops = {
+	.protected_mode_enable = kbasep_protected_mode_enable,
+	.protected_mode_disable = kbasep_protected_mode_disable
+};
+
+int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
+{
+	unsigned long irq_flags;
+	int err;
+
+	KBASE_DEBUG_ASSERT(NULL != kbdev);
+	lockdep_assert_held(&kbdev->pm.lock);
+
+	/* Ensure the clock is on before attempting to access the hardware */
+	if (!kbdev->pm.backend.gpu_powered) {
+		if (kbdev->pm.backend.callback_power_on)
+			kbdev->pm.backend.callback_power_on(kbdev);
+
+		kbdev->pm.backend.gpu_powered = true;
+	}
+
+	/* Ensure interrupts are off to begin with, this also clears any
+	 * outstanding interrupts */
+	kbase_pm_disable_interrupts(kbdev);
+	/* Ensure cache snoops are disabled before reset. */
+	kbase_pm_cache_snoop_disable(kbdev);
+	/* Prepare for the soft-reset */
+	kbdev->pm.backend.reset_done = false;
+
+	/* The cores should be made unavailable due to the reset */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
+	if (kbdev->pm.backend.shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF)
+		KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL,
+				NULL, 0u, (u32)0u);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
+
+	/* Soft reset the GPU */
+	if (kbdev->protected_mode_support)
+		err = kbdev->protected_ops->protected_mode_disable(
+				kbdev->protected_dev);
+	else
+		err = kbase_pm_do_reset(kbdev);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
+	kbdev->protected_mode = false;
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
+
+	if (err)
+		goto exit;
+
+	if (flags & PM_HW_ISSUES_DETECT)
+		kbase_pm_hw_issues_detect(kbdev);
+
+	kbase_pm_hw_issues_apply(kbdev);
+	kbase_cache_set_coherency_mode(kbdev, kbdev->system_coherency);
+
+	/* Sanity check protected mode was left after reset */
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
+		u32 gpu_status = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(GPU_STATUS));
+
+		WARN_ON(gpu_status & GPU_STATUS_PROTECTED_MODE_ACTIVE);
+	}
+
+	/* If cycle counter was in use re-enable it, enable_irqs will only be
+	 * false when called from kbase_pm_powerup */
+	if (kbdev->pm.backend.gpu_cycle_counter_requests &&
+						(flags & PM_ENABLE_IRQS)) {
+		kbase_pm_enable_interrupts(kbdev);
+
+		/* Re-enable the counters if we need to */
+		spin_lock_irqsave(
+			&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+								irq_flags);
+		if (kbdev->pm.backend.gpu_cycle_counter_requests)
+			kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+					GPU_COMMAND_CYCLE_COUNT_START);
+		spin_unlock_irqrestore(
+			&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+								irq_flags);
+
+		kbase_pm_disable_interrupts(kbdev);
+	}
+
+	if (flags & PM_ENABLE_IRQS)
+		kbase_pm_enable_interrupts(kbdev);
+
+exit:
+	if (!kbdev->pm.backend.protected_entry_transition_override) {
+		/* Re-enable GPU hardware counters if we're resetting from
+		 * protected mode.
+		 */
+		reenable_protected_mode_hwcnt(kbdev);
+	}
+
+	return err;
+}
+
+/**
+ * kbase_pm_request_gpu_cycle_counter_do_request - Request cycle counters
+ *
+ * Increase the count of cycle counter users and turn the cycle counters on if
+ * they were previously off
+ *
+ * This function is designed to be called by
+ * kbase_pm_request_gpu_cycle_counter() or
+ * kbase_pm_request_gpu_cycle_counter_l2_is_on() only
+ *
+ * When this function is called the l2 cache must be on - i.e., the GPU must be
+ * on.
+ *
+ * @kbdev:     The kbase device structure of the device
+ */
+static void
+kbase_pm_request_gpu_cycle_counter_do_request(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+									flags);
+
+	++kbdev->pm.backend.gpu_cycle_counter_requests;
+
+	if (1 == kbdev->pm.backend.gpu_cycle_counter_requests)
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+					GPU_COMMAND_CYCLE_COUNT_START);
+
+	spin_unlock_irqrestore(
+			&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+									flags);
+}
+
+void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+
+	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests <
+								INT_MAX);
+
+	kbase_pm_request_gpu_cycle_counter_do_request(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter);
+
+void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+
+	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests <
+								INT_MAX);
+
+	kbase_pm_request_gpu_cycle_counter_do_request(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter_l2_is_on);
+
+void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+									flags);
+
+	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests > 0);
+
+	--kbdev->pm.backend.gpu_cycle_counter_requests;
+
+	if (0 == kbdev->pm.backend.gpu_cycle_counter_requests)
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+					GPU_COMMAND_CYCLE_COUNT_STOP);
+
+	spin_unlock_irqrestore(
+			&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+									flags);
+}
+
+void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	kbase_pm_release_gpu_cycle_counter_nolock(kbdev);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_release_gpu_cycle_counter);
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h
new file mode 100644
index 0000000..6ca6a71
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h
@@ -0,0 +1,671 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Power management API definitions used internally by GPU backend
+ */
+
+#ifndef _KBASE_BACKEND_PM_INTERNAL_H_
+#define _KBASE_BACKEND_PM_INTERNAL_H_
+
+#include <mali_kbase_hwaccess_pm.h>
+
+#include "mali_kbase_pm_ca.h"
+#include "mali_kbase_pm_policy.h"
+
+
+/**
+ * kbase_pm_dev_idle - The GPU is idle.
+ *
+ * The OS may choose to turn off idle devices
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_dev_idle(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_dev_activate - The GPU is active.
+ *
+ * The OS should avoid opportunistically turning off the GPU while it is active
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_dev_activate(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_get_present_cores - Get details of the cores that are present in
+ *                              the device.
+ *
+ * This function can be called by the active power policy to return a bitmask of
+ * the cores (of a specified type) present in the GPU device and also a count of
+ * the number of cores.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid
+ *         pointer)
+ * @type:  The type of core (see the enum kbase_pm_core_type enumeration)
+ *
+ * Return: The bit mask of cores present
+ */
+u64 kbase_pm_get_present_cores(struct kbase_device *kbdev,
+						enum kbase_pm_core_type type);
+
+/**
+ * kbase_pm_get_active_cores - Get details of the cores that are currently
+ *                             active in the device.
+ *
+ * This function can be called by the active power policy to return a bitmask of
+ * the cores (of a specified type) that are actively processing work (i.e.
+ * turned on *and* busy).
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @type:  The type of core (see the enum kbase_pm_core_type enumeration)
+ *
+ * Return: The bit mask of active cores
+ */
+u64 kbase_pm_get_active_cores(struct kbase_device *kbdev,
+						enum kbase_pm_core_type type);
+
+/**
+ * kbase_pm_get_trans_cores - Get details of the cores that are currently
+ *                            transitioning between power states.
+ *
+ * This function can be called by the active power policy to return a bitmask of
+ * the cores (of a specified type) that are currently transitioning between
+ * power states.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @type:  The type of core (see the enum kbase_pm_core_type enumeration)
+ *
+ * Return: The bit mask of transitioning cores
+ */
+u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev,
+						enum kbase_pm_core_type type);
+
+/**
+ * kbase_pm_get_ready_cores - Get details of the cores that are currently
+ *                            powered and ready for jobs.
+ *
+ * This function can be called by the active power policy to return a bitmask of
+ * the cores (of a specified type) that are powered and ready for jobs (they may
+ * or may not be currently executing jobs).
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @type:  The type of core (see the enum kbase_pm_core_type enumeration)
+ *
+ * Return: The bit mask of ready cores
+ */
+u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev,
+						enum kbase_pm_core_type type);
+
+/**
+ * kbase_pm_clock_on - Turn the clock for the device on, and enable device
+ *                     interrupts.
+ *
+ * This function can be used by a power policy to turn the clock for the GPU on.
+ * It should be modified during integration to perform the necessary actions to
+ * ensure that the GPU is fully powered and clocked.
+ *
+ * @kbdev:     The kbase device structure for the device (must be a valid
+ *             pointer)
+ * @is_resume: true if clock on due to resume after suspend, false otherwise
+ */
+void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume);
+
+/**
+ * kbase_pm_clock_off - Disable device interrupts, and turn the clock for the
+ *                      device off.
+ *
+ * This function can be used by a power policy to turn the clock for the GPU
+ * off. It should be modified during integration to perform the necessary
+ * actions to turn the clock off (if this is possible in the integration).
+ *
+ * @kbdev:      The kbase device structure for the device (must be a valid
+ *              pointer)
+ * @is_suspend: true if clock off due to suspend, false otherwise
+ *
+ * Return: true  if clock was turned off, or
+ *         false if clock can not be turned off due to pending page/bus fault
+ *               workers. Caller must flush MMU workqueues and retry
+ */
+bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend);
+
+/**
+ * kbase_pm_enable_interrupts - Enable interrupts on the device.
+ *
+ * Interrupts are also enabled after a call to kbase_pm_clock_on().
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_enable_interrupts(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_disable_interrupts - Disable interrupts on the device.
+ *
+ * This prevents delivery of Power Management interrupts to the CPU so that
+ * kbase_pm_update_state() will not be called from the IRQ handler
+ * until kbase_pm_enable_interrupts() or kbase_pm_clock_on() is called.
+ *
+ * Interrupts are also disabled after a call to kbase_pm_clock_off().
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_disable_interrupts(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_disable_interrupts_nolock - Version of kbase_pm_disable_interrupts()
+ *                                      that does not take the hwaccess_lock
+ *
+ * Caller must hold the hwaccess_lock.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_disable_interrupts_nolock(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_init_hw - Initialize the hardware.
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @flags: Flags specifying the type of PM init
+ *
+ * This function checks the GPU ID register to ensure that the GPU is supported
+ * by the driver and performs a reset on the device so that it is in a known
+ * state before the device is used.
+ *
+ * Return: 0 if the device is supported and successfully reset.
+ */
+int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags);
+
+/**
+ * kbase_pm_reset_done - The GPU has been reset successfully.
+ *
+ * This function must be called by the GPU interrupt handler when the
+ * RESET_COMPLETED bit is set. It signals to the power management initialization
+ * code that the GPU has been successfully reset.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_reset_done(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_wait_for_desired_state - Wait for the desired power state to be
+ *                                   reached
+ *
+ * Wait for the L2 and shader power state machines to reach the states
+ * corresponding to the values of 'l2_desired' and 'shaders_desired'.
+ *
+ * The usual use-case for this is to ensure cores are 'READY' after performing
+ * a GPU Reset.
+ *
+ * Unlike kbase_pm_update_state(), the caller must not hold hwaccess_lock,
+ * because this function will take that lock itself.
+ *
+ * NOTE: This may not wait until the correct state is reached if there is a
+ * power off in progress. To correctly wait for the desired state the caller
+ * must ensure that this is not the case by, for example, calling
+ * kbase_pm_wait_for_poweroff_complete()
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_wait_for_desired_state(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_wait_for_l2_powered - Wait for the L2 cache to be powered on
+ *
+ * Wait for the L2 to be powered on, and for the L2 and shader state machines to
+ * stabilise by reaching the states corresponding to the values of 'l2_desired'
+ * and 'shaders_desired'.
+ *
+ * kbdev->pm.active_count must be non-zero when calling this function.
+ *
+ * Unlike kbase_pm_update_state(), the caller must not hold hwaccess_lock,
+ * because this function will take that lock itself.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_wait_for_l2_powered(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_update_cores_state_nolock - Variant of kbase_pm_update_cores_state()
+ *                                      where the caller must hold
+ *                                      kbase_device.pm.power_change_lock
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_update_state - Update the L2 and shader power state machines
+ * @kbdev: Device pointer
+ */
+void kbase_pm_update_state(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_state_machine_init - Initialize the state machines, primarily the
+ *                               shader poweroff timer
+ * @kbdev: Device pointer
+ */
+int kbase_pm_state_machine_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_state_machine_term - Clean up the PM state machines' data
+ * @kbdev: Device pointer
+ */
+void kbase_pm_state_machine_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_update_cores_state - Update the desired state of shader cores from
+ *                               the Power Policy, and begin any power
+ *                               transitions.
+ *
+ * This function will update the desired_xx_state members of
+ * struct kbase_pm_device_data by calling into the current Power Policy. It will
+ * then begin power transitions to make the hardware acheive the desired shader
+ * core state.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_update_cores_state(struct kbase_device *kbdev);
+
+/**
+ * kbasep_pm_metrics_init - Initialize the metrics gathering framework.
+ *
+ * This must be called before other metric gathering APIs are called.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Return: 0 on success, error code on error
+ */
+int kbasep_pm_metrics_init(struct kbase_device *kbdev);
+
+/**
+ * kbasep_pm_metrics_term - Terminate the metrics gathering framework.
+ *
+ * This must be called when metric gathering is no longer required. It is an
+ * error to call any metrics gathering function (other than
+ * kbasep_pm_metrics_init()) after calling this function.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbasep_pm_metrics_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_report_vsync - Function to be called by the frame buffer driver to
+ *                         update the vsync metric.
+ *
+ * This function should be called by the frame buffer driver to update whether
+ * the system is hitting the vsync target or not. buffer_updated should be true
+ * if the vsync corresponded with a new frame being displayed, otherwise it
+ * should be false. This function does not need to be called every vsync, but
+ * only when the value of @buffer_updated differs from a previous call.
+ *
+ * @kbdev:          The kbase device structure for the device (must be a
+ *                  valid pointer)
+ * @buffer_updated: True if the buffer has been updated on this VSync,
+ *                  false otherwise
+ */
+void kbase_pm_report_vsync(struct kbase_device *kbdev, int buffer_updated);
+
+/**
+ * kbase_pm_get_dvfs_action - Determine whether the DVFS system should change
+ *                            the clock speed of the GPU.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This function should be called regularly by the DVFS system to check whether
+ * the clock speed of the GPU needs updating.
+ */
+void kbase_pm_get_dvfs_action(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_request_gpu_cycle_counter - Mark that the GPU cycle counter is
+ *                                      needed
+ *
+ * If the caller is the first caller then the GPU cycle counters will be enabled
+ * along with the l2 cache
+ *
+ * The GPU must be powered when calling this function (i.e.
+ * kbase_pm_context_active() must have been called).
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_request_gpu_cycle_counter_l2_is_on - Mark GPU cycle counter is
+ *                                               needed (l2 cache already on)
+ *
+ * This is a version of the above function
+ * (kbase_pm_request_gpu_cycle_counter()) suitable for being called when the
+ * l2 cache is known to be on and assured to be on until the subsequent call of
+ * kbase_pm_release_gpu_cycle_counter() such as when a job is submitted. It does
+ * not sleep and can be called from atomic functions.
+ *
+ * The GPU must be powered when calling this function (i.e.
+ * kbase_pm_context_active() must have been called) and the l2 cache must be
+ * powered on.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_release_gpu_cycle_counter - Mark that the GPU cycle counter is no
+ *                                      longer in use
+ *
+ * If the caller is the last caller then the GPU cycle counters will be
+ * disabled. A request must have been made before a call to this.
+ *
+ * Caller must not hold the hwaccess_lock, as it will be taken in this function.
+ * If the caller is already holding this lock then
+ * kbase_pm_release_gpu_cycle_counter_nolock() must be used instead.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_release_gpu_cycle_counter_nolock - Version of kbase_pm_release_gpu_cycle_counter()
+ *                                             that does not take hwaccess_lock
+ *
+ * Caller must hold the hwaccess_lock.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_wait_for_poweroff_complete - Wait for the poweroff workqueue to
+ *                                       complete
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_runtime_init - Initialize runtime-pm for Mali GPU platform device
+ *
+ * Setup the power management callbacks and initialize/enable the runtime-pm
+ * for the Mali GPU platform device, using the callback function. This must be
+ * called before the kbase_pm_register_access_enable() function.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+int kbase_pm_runtime_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_runtime_term - Disable runtime-pm for Mali GPU platform device
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_runtime_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_register_access_enable - Enable access to GPU registers
+ *
+ * Enables access to the GPU registers before power management has powered up
+ * the GPU with kbase_pm_powerup().
+ *
+ * This results in the power management callbacks provided in the driver
+ * configuration to get called to turn on power and/or clocks to the GPU. See
+ * kbase_pm_callback_conf.
+ *
+ * This should only be used before power management is powered up with
+ * kbase_pm_powerup()
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_register_access_enable(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_register_access_disable - Disable early register access
+ *
+ * Disables access to the GPU registers enabled earlier by a call to
+ * kbase_pm_register_access_enable().
+ *
+ * This results in the power management callbacks provided in the driver
+ * configuration to get called to turn off power and/or clocks to the GPU. See
+ * kbase_pm_callback_conf
+ *
+ * This should only be used before power management is powered up with
+ * kbase_pm_powerup()
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_register_access_disable(struct kbase_device *kbdev);
+
+/* NOTE: kbase_pm_is_suspending is in mali_kbase.h, because it is an inline
+ * function */
+
+/**
+ * kbase_pm_metrics_is_active - Check if the power management metrics
+ *                              collection is active.
+ *
+ * Note that this returns if the power management metrics collection was
+ * active at the time of calling, it is possible that after the call the metrics
+ * collection enable may have changed state.
+ *
+ * The caller must handle the consequence that the state may have changed.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * Return: true if metrics collection was active else false.
+ */
+bool kbase_pm_metrics_is_active(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_do_poweron - Power on the GPU, and any cores that are requested.
+ *
+ * @kbdev:     The kbase device structure for the device (must be a valid
+ *             pointer)
+ * @is_resume: true if power on due to resume after suspend,
+ *             false otherwise
+ */
+void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume);
+
+/**
+ * kbase_pm_do_poweroff - Power off the GPU, and any cores that have been
+ *                        requested.
+ *
+ * @kbdev:      The kbase device structure for the device (must be a valid
+ *              pointer)
+ * @is_suspend: true if power off due to suspend,
+ *              false otherwise
+ */
+void kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend);
+
+#if defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS)
+void kbase_pm_get_dvfs_metrics(struct kbase_device *kbdev,
+			       struct kbasep_pm_metrics *last,
+			       struct kbasep_pm_metrics *diff);
+#endif /* defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS) */
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+
+/**
+ * kbase_platform_dvfs_event - Report utilisation to DVFS code
+ *
+ * Function provided by platform specific code when DVFS is enabled to allow
+ * the power management metrics system to report utilisation.
+ *
+ * @kbdev:         The kbase device structure for the device (must be a
+ *                 valid pointer)
+ * @utilisation:   The current calculated utilisation by the metrics system.
+ * @util_gl_share: The current calculated gl share of utilisation.
+ * @util_cl_share: The current calculated cl share of utilisation per core
+ *                 group.
+ * Return:         Returns 0 on failure and non zero on success.
+ */
+
+int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation,
+	u32 util_gl_share, u32 util_cl_share[2]);
+#endif
+
+void kbase_pm_power_changed(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_metrics_update - Inform the metrics system that an atom is either
+ *                           about to be run or has just completed.
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @now:   Pointer to the timestamp of the change, or NULL to use current time
+ *
+ * Caller must hold hwaccess_lock
+ */
+void kbase_pm_metrics_update(struct kbase_device *kbdev,
+				ktime_t *now);
+
+/**
+ * kbase_pm_cache_snoop_enable - Allow CPU snoops on the GPU
+ * If the GPU does not have coherency this is a no-op
+ * @kbdev:	Device pointer
+ *
+ * This function should be called after L2 power up.
+ */
+
+void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_cache_snoop_disable - Prevent CPU snoops on the GPU
+ * If the GPU does not have coherency this is a no-op
+ * @kbdev:	Device pointer
+ *
+ * This function should be called before L2 power off.
+ */
+void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev);
+
+#ifdef CONFIG_MALI_DEVFREQ
+/**
+ * kbase_devfreq_set_core_mask - Set devfreq core mask
+ * @kbdev:     Device pointer
+ * @core_mask: New core mask
+ *
+ * This function is used by devfreq to change the available core mask as
+ * required by Dynamic Core Scaling.
+ */
+void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask);
+#endif
+
+/**
+ * kbase_pm_reset_start_locked - Signal that GPU reset has started
+ * @kbdev: Device pointer
+ *
+ * Normal power management operation will be suspended until the reset has
+ * completed.
+ *
+ * Caller must hold hwaccess_lock.
+ */
+void kbase_pm_reset_start_locked(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_reset_complete - Signal that GPU reset has completed
+ * @kbdev: Device pointer
+ *
+ * Normal power management operation will be resumed. The power manager will
+ * re-evaluate what cores are needed and power on or off as required.
+ */
+void kbase_pm_reset_complete(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_protected_override_enable - Enable the protected mode override
+ * @kbdev: Device pointer
+ *
+ * When the protected mode override is enabled, all shader cores are requested
+ * to power down, and the L2 power state can be controlled by
+ * kbase_pm_protected_l2_override().
+ *
+ * Caller must hold hwaccess_lock.
+ */
+void kbase_pm_protected_override_enable(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_protected_override_disable - Disable the protected mode override
+ * @kbdev: Device pointer
+ *
+ * Caller must hold hwaccess_lock.
+ */
+void kbase_pm_protected_override_disable(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_protected_l2_override - Control the protected mode L2 override
+ * @kbdev: Device pointer
+ * @override: true to enable the override, false to disable
+ *
+ * When the driver is transitioning in or out of protected mode, the L2 cache is
+ * forced to power off. This can be overridden to force the L2 cache to power
+ * on. This is required to change coherency settings on some GPUs.
+ */
+void kbase_pm_protected_l2_override(struct kbase_device *kbdev, bool override);
+
+/**
+ * kbase_pm_protected_entry_override_enable - Enable the protected mode entry
+ *                                            override
+ * @kbdev: Device pointer
+ *
+ * Initiate a GPU reset and enable the protected mode entry override flag if
+ * l2_always_on WA is enabled and platform is fully coherent. If the GPU
+ * reset is already ongoing then protected mode entry override flag will not
+ * be enabled and function will have to be called again.
+ *
+ * When protected mode entry override flag is enabled to power down L2 via GPU
+ * reset, the GPU reset handling behavior gets changed. For example call to
+ * kbase_backend_reset() is skipped, Hw counters are not re-enabled and L2
+ * isn't powered up again post reset.
+ * This is needed only as a workaround for a Hw issue where explicit power down
+ * of L2 causes a glitch. For entering protected mode on fully coherent
+ * platforms L2 needs to be powered down to switch to IO coherency mode, so to
+ * avoid the glitch GPU reset is used to power down L2. Hence, this function
+ * does nothing on systems where the glitch issue isn't present.
+ *
+ * Caller must hold hwaccess_lock. Should be only called during the transition
+ * to enter protected mode.
+ *
+ * Return: -EAGAIN if a GPU reset was required for the glitch workaround but
+ * was already ongoing, otherwise 0.
+ */
+int kbase_pm_protected_entry_override_enable(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_protected_entry_override_disable - Disable the protected mode entry
+ *                                             override
+ * @kbdev: Device pointer
+ *
+ * This shall be called once L2 has powered down and switch to IO coherency
+ * mode has been made. As with kbase_pm_protected_entry_override_enable(),
+ * this function does nothing on systems where the glitch issue isn't present.
+ *
+ * Caller must hold hwaccess_lock. Should be only called during the transition
+ * to enter protected mode.
+ */
+void kbase_pm_protected_entry_override_disable(struct kbase_device *kbdev);
+
+/* If true, the driver should explicitly control corestack power management,
+ * instead of relying on the Power Domain Controller.
+ */
+extern bool corestack_driver_control;
+
+/* If true, disable powering-down of individual cores, and just power-down at
+ * the top-level using platform-specific code.
+ * If false, use the expected behaviour of controlling the individual cores
+ * from within the driver.
+ */
+extern bool platform_power_down_only;
+
+#endif /* _KBASE_BACKEND_PM_INTERNAL_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_l2_states.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_l2_states.h
new file mode 100644
index 0000000..12cb051
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_l2_states.h
@@ -0,0 +1,38 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Backend-specific Power Manager level 2 cache state definitions.
+ * The function-like macro KBASEP_L2_STATE() must be defined before including
+ * this header file. This header file can be included multiple times in the
+ * same compilation unit with different definitions of KBASEP_L2_STATE().
+ */
+KBASEP_L2_STATE(OFF)
+KBASEP_L2_STATE(PEND_ON)
+KBASEP_L2_STATE(RESTORE_CLOCKS)
+KBASEP_L2_STATE(ON_HWCNT_ENABLE)
+KBASEP_L2_STATE(ON)
+KBASEP_L2_STATE(ON_HWCNT_DISABLE)
+KBASEP_L2_STATE(SLOW_DOWN_CLOCKS)
+KBASEP_L2_STATE(POWER_DOWN)
+KBASEP_L2_STATE(PEND_OFF)
+KBASEP_L2_STATE(RESET_WAIT)
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c
new file mode 100644
index 0000000..ae494b0
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c
@@ -0,0 +1,314 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Metrics for power management
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include <backend/gpu/mali_kbase_jm_rb.h>
+#include <backend/gpu/mali_kbase_pm_defs.h>
+
+/* When VSync is being hit aim for utilisation between 70-90% */
+#define KBASE_PM_VSYNC_MIN_UTILISATION          70
+#define KBASE_PM_VSYNC_MAX_UTILISATION          90
+/* Otherwise aim for 10-40% */
+#define KBASE_PM_NO_VSYNC_MIN_UTILISATION       10
+#define KBASE_PM_NO_VSYNC_MAX_UTILISATION       40
+
+/* Shift used for kbasep_pm_metrics_data.time_busy/idle - units of (1 << 8) ns
+ * This gives a maximum period between samples of 2^(32+8)/100 ns = slightly
+ * under 11s. Exceeding this will cause overflow */
+#define KBASE_PM_TIME_SHIFT			8
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+static enum hrtimer_restart dvfs_callback(struct hrtimer *timer)
+{
+	unsigned long flags;
+	struct kbasep_pm_metrics_state *metrics;
+
+	KBASE_DEBUG_ASSERT(timer != NULL);
+
+	metrics = container_of(timer, struct kbasep_pm_metrics_state, timer);
+	kbase_pm_get_dvfs_action(metrics->kbdev);
+
+	spin_lock_irqsave(&metrics->lock, flags);
+
+	if (metrics->timer_active)
+		hrtimer_start(timer,
+			HR_TIMER_DELAY_MSEC(metrics->kbdev->pm.dvfs_period),
+			HRTIMER_MODE_REL);
+
+	spin_unlock_irqrestore(&metrics->lock, flags);
+
+	return HRTIMER_NORESTART;
+}
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+
+int kbasep_pm_metrics_init(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	kbdev->pm.backend.metrics.kbdev = kbdev;
+
+	kbdev->pm.backend.metrics.time_period_start = ktime_get();
+	kbdev->pm.backend.metrics.gpu_active = false;
+	kbdev->pm.backend.metrics.active_cl_ctx[0] = 0;
+	kbdev->pm.backend.metrics.active_cl_ctx[1] = 0;
+	kbdev->pm.backend.metrics.active_gl_ctx[0] = 0;
+	kbdev->pm.backend.metrics.active_gl_ctx[1] = 0;
+	kbdev->pm.backend.metrics.active_gl_ctx[2] = 0;
+
+	kbdev->pm.backend.metrics.values.time_busy = 0;
+	kbdev->pm.backend.metrics.values.time_idle = 0;
+	kbdev->pm.backend.metrics.values.busy_cl[0] = 0;
+	kbdev->pm.backend.metrics.values.busy_cl[1] = 0;
+	kbdev->pm.backend.metrics.values.busy_gl = 0;
+
+	spin_lock_init(&kbdev->pm.backend.metrics.lock);
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+	hrtimer_init(&kbdev->pm.backend.metrics.timer, CLOCK_MONOTONIC,
+							HRTIMER_MODE_REL);
+	kbdev->pm.backend.metrics.timer.function = dvfs_callback;
+
+	kbase_pm_metrics_start(kbdev);
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbasep_pm_metrics_init);
+
+void kbasep_pm_metrics_term(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+	kbdev->pm.backend.metrics.timer_active = false;
+	spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+
+	hrtimer_cancel(&kbdev->pm.backend.metrics.timer);
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+}
+
+KBASE_EXPORT_TEST_API(kbasep_pm_metrics_term);
+
+/* caller needs to hold kbdev->pm.backend.metrics.lock before calling this
+ * function
+ */
+static void kbase_pm_get_dvfs_utilisation_calc(struct kbase_device *kbdev,
+								ktime_t now)
+{
+	ktime_t diff;
+
+	lockdep_assert_held(&kbdev->pm.backend.metrics.lock);
+
+	diff = ktime_sub(now, kbdev->pm.backend.metrics.time_period_start);
+	if (ktime_to_ns(diff) < 0)
+		return;
+
+	if (kbdev->pm.backend.metrics.gpu_active) {
+		u32 ns_time = (u32) (ktime_to_ns(diff) >> KBASE_PM_TIME_SHIFT);
+
+		kbdev->pm.backend.metrics.values.time_busy += ns_time;
+		if (kbdev->pm.backend.metrics.active_cl_ctx[0])
+			kbdev->pm.backend.metrics.values.busy_cl[0] += ns_time;
+		if (kbdev->pm.backend.metrics.active_cl_ctx[1])
+			kbdev->pm.backend.metrics.values.busy_cl[1] += ns_time;
+		if (kbdev->pm.backend.metrics.active_gl_ctx[0])
+			kbdev->pm.backend.metrics.values.busy_gl += ns_time;
+		if (kbdev->pm.backend.metrics.active_gl_ctx[1])
+			kbdev->pm.backend.metrics.values.busy_gl += ns_time;
+		if (kbdev->pm.backend.metrics.active_gl_ctx[2])
+			kbdev->pm.backend.metrics.values.busy_gl += ns_time;
+	} else {
+		kbdev->pm.backend.metrics.values.time_idle += (u32) (ktime_to_ns(diff)
+							>> KBASE_PM_TIME_SHIFT);
+	}
+
+	kbdev->pm.backend.metrics.time_period_start = now;
+}
+
+#if defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS)
+void kbase_pm_get_dvfs_metrics(struct kbase_device *kbdev,
+			       struct kbasep_pm_metrics *last,
+			       struct kbasep_pm_metrics *diff)
+{
+	struct kbasep_pm_metrics *cur = &kbdev->pm.backend.metrics.values;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+	kbase_pm_get_dvfs_utilisation_calc(kbdev, ktime_get());
+
+	memset(diff, 0, sizeof(*diff));
+	diff->time_busy = cur->time_busy - last->time_busy;
+	diff->time_idle = cur->time_idle - last->time_idle;
+	diff->busy_cl[0] = cur->busy_cl[0] - last->busy_cl[0];
+	diff->busy_cl[1] = cur->busy_cl[1] - last->busy_cl[1];
+	diff->busy_gl = cur->busy_gl - last->busy_gl;
+
+	*last = *cur;
+
+	spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+}
+KBASE_EXPORT_TEST_API(kbase_pm_get_dvfs_metrics);
+#endif
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+void kbase_pm_get_dvfs_action(struct kbase_device *kbdev)
+{
+	int utilisation, util_gl_share;
+	int util_cl_share[2];
+	int busy;
+	struct kbasep_pm_metrics *diff;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	diff = &kbdev->pm.backend.metrics.dvfs_diff;
+
+	kbase_pm_get_dvfs_metrics(kbdev, &kbdev->pm.backend.metrics.dvfs_last, diff);
+
+	utilisation = (100 * diff->time_busy) /
+			max(diff->time_busy + diff->time_idle, 1u);
+
+	busy = max(diff->busy_gl + diff->busy_cl[0] + diff->busy_cl[1], 1u);
+	util_gl_share = (100 * diff->busy_gl) / busy;
+	util_cl_share[0] = (100 * diff->busy_cl[0]) / busy;
+	util_cl_share[1] = (100 * diff->busy_cl[1]) / busy;
+
+	kbase_platform_dvfs_event(kbdev, utilisation, util_gl_share, util_cl_share);
+}
+
+bool kbase_pm_metrics_is_active(struct kbase_device *kbdev)
+{
+	bool isactive;
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+	isactive = kbdev->pm.backend.metrics.timer_active;
+	spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+
+	return isactive;
+}
+KBASE_EXPORT_TEST_API(kbase_pm_metrics_is_active);
+
+void kbase_pm_metrics_start(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+	kbdev->pm.backend.metrics.timer_active = true;
+	spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+	hrtimer_start(&kbdev->pm.backend.metrics.timer,
+			HR_TIMER_DELAY_MSEC(kbdev->pm.dvfs_period),
+			HRTIMER_MODE_REL);
+}
+
+void kbase_pm_metrics_stop(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+	kbdev->pm.backend.metrics.timer_active = false;
+	spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+	hrtimer_cancel(&kbdev->pm.backend.metrics.timer);
+}
+
+
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+
+/**
+ * kbase_pm_metrics_active_calc - Update PM active counts based on currently
+ *                                running atoms
+ * @kbdev: Device pointer
+ *
+ * The caller must hold kbdev->pm.backend.metrics.lock
+ */
+static void kbase_pm_metrics_active_calc(struct kbase_device *kbdev)
+{
+	int js;
+
+	lockdep_assert_held(&kbdev->pm.backend.metrics.lock);
+
+	kbdev->pm.backend.metrics.active_gl_ctx[0] = 0;
+	kbdev->pm.backend.metrics.active_gl_ctx[1] = 0;
+	kbdev->pm.backend.metrics.active_gl_ctx[2] = 0;
+	kbdev->pm.backend.metrics.active_cl_ctx[0] = 0;
+	kbdev->pm.backend.metrics.active_cl_ctx[1] = 0;
+	kbdev->pm.backend.metrics.gpu_active = false;
+
+	for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
+		struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, 0);
+
+		/* Head atom may have just completed, so if it isn't running
+		 * then try the next atom */
+		if (katom && katom->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED)
+			katom = kbase_gpu_inspect(kbdev, js, 1);
+
+		if (katom && katom->gpu_rb_state ==
+				KBASE_ATOM_GPU_RB_SUBMITTED) {
+			if (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
+				int device_nr = (katom->core_req &
+					BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)
+						? katom->device_nr : 0;
+				if (!WARN_ON(device_nr >= 2))
+					kbdev->pm.backend.metrics.
+						active_cl_ctx[device_nr] = 1;
+			} else {
+				kbdev->pm.backend.metrics.active_gl_ctx[js] = 1;
+			}
+			kbdev->pm.backend.metrics.gpu_active = true;
+		}
+	}
+}
+
+/* called when job is submitted to or removed from a GPU slot */
+void kbase_pm_metrics_update(struct kbase_device *kbdev, ktime_t *timestamp)
+{
+	unsigned long flags;
+	ktime_t now;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+
+	if (!timestamp) {
+		now = ktime_get();
+		timestamp = &now;
+	}
+
+	/* Track how long CL and/or GL jobs have been busy for */
+	kbase_pm_get_dvfs_utilisation_calc(kbdev, *timestamp);
+
+	kbase_pm_metrics_active_calc(kbdev);
+
+	spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c
new file mode 100644
index 0000000..3152424
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c
@@ -0,0 +1,253 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Power policy API implementations
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_pm.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+static const struct kbase_pm_policy *const all_policy_list[] = {
+#ifdef CONFIG_MALI_NO_MALI
+	&kbase_pm_always_on_policy_ops,
+	&kbase_pm_coarse_demand_policy_ops,
+#if !MALI_CUSTOMER_RELEASE
+	&kbase_pm_always_on_demand_policy_ops,
+#endif
+#else				/* CONFIG_MALI_NO_MALI */
+	&kbase_pm_coarse_demand_policy_ops,
+#if !MALI_CUSTOMER_RELEASE
+	&kbase_pm_always_on_demand_policy_ops,
+#endif
+	&kbase_pm_always_on_policy_ops
+#endif /* CONFIG_MALI_NO_MALI */
+};
+
+static void generate_filtered_policy_list(struct kbase_device *kbdev)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(all_policy_list); ++i) {
+		const struct kbase_pm_policy *pol = all_policy_list[i];
+
+		BUILD_BUG_ON(ARRAY_SIZE(all_policy_list) >
+			KBASE_PM_MAX_NUM_POLICIES);
+		if (platform_power_down_only &&
+				(pol->flags & KBASE_PM_POLICY_FLAG_DISABLED_WITH_POWER_DOWN_ONLY))
+			continue;
+
+		kbdev->policy_list[kbdev->policy_count++] = pol;
+	}
+}
+
+int kbase_pm_policy_init(struct kbase_device *kbdev)
+{
+	generate_filtered_policy_list(kbdev);
+	if (kbdev->policy_count == 0)
+		return -EINVAL;
+
+	kbdev->pm.backend.pm_current_policy = kbdev->policy_list[0];
+	kbdev->pm.backend.pm_current_policy->init(kbdev);
+
+	return 0;
+}
+
+void kbase_pm_policy_term(struct kbase_device *kbdev)
+{
+	kbdev->pm.backend.pm_current_policy->term(kbdev);
+}
+
+void kbase_pm_update_active(struct kbase_device *kbdev)
+{
+	struct kbase_pm_device_data *pm = &kbdev->pm;
+	struct kbase_pm_backend_data *backend = &pm->backend;
+	unsigned long flags;
+	bool active;
+
+	lockdep_assert_held(&pm->lock);
+
+	/* pm_current_policy will never be NULL while pm.lock is held */
+	KBASE_DEBUG_ASSERT(backend->pm_current_policy);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	active = backend->pm_current_policy->get_core_active(kbdev);
+	WARN((kbase_pm_is_active(kbdev) && !active),
+		"GPU is active but policy '%s' is indicating that it can be powered off",
+		kbdev->pm.backend.pm_current_policy->name);
+
+	if (active) {
+		/* Power on the GPU and any cores requested by the policy */
+		if (!pm->backend.invoke_poweroff_wait_wq_when_l2_off &&
+				pm->backend.poweroff_wait_in_progress) {
+			KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+			pm->backend.poweron_required = true;
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		} else {
+			/* Cancel the the invocation of
+			 * kbase_pm_gpu_poweroff_wait_wq() from the L2 state
+			 * machine. This is safe - it
+			 * invoke_poweroff_wait_wq_when_l2_off is true, then
+			 * the poweroff work hasn't even been queued yet,
+			 * meaning we can go straight to powering on.
+			 */
+			pm->backend.invoke_poweroff_wait_wq_when_l2_off = false;
+			pm->backend.poweroff_wait_in_progress = false;
+			pm->backend.l2_desired = true;
+
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+			kbase_pm_do_poweron(kbdev, false);
+		}
+	} else {
+		/* It is an error for the power policy to power off the GPU
+		 * when there are contexts active */
+		KBASE_DEBUG_ASSERT(pm->active_count == 0);
+
+		/* Request power off */
+		if (pm->backend.gpu_powered) {
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+			/* Power off the GPU immediately */
+			kbase_pm_do_poweroff(kbdev, false);
+		} else {
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		}
+	}
+}
+
+void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev)
+{
+	bool shaders_desired;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (kbdev->pm.backend.pm_current_policy == NULL)
+		return;
+	if (kbdev->pm.backend.poweroff_wait_in_progress)
+		return;
+
+	if (kbdev->pm.backend.protected_transition_override)
+		/* We are trying to change in/out of protected mode - force all
+		 * cores off so that the L2 powers down */
+		shaders_desired = false;
+	else
+		shaders_desired = kbdev->pm.backend.pm_current_policy->shaders_needed(kbdev);
+
+	if (kbdev->pm.backend.shaders_desired != shaders_desired) {
+		KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_DESIRED, NULL, NULL, 0u,
+				(u32)kbdev->pm.backend.shaders_desired);
+
+		kbdev->pm.backend.shaders_desired = shaders_desired;
+		kbase_pm_update_state(kbdev);
+	}
+}
+
+void kbase_pm_update_cores_state(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	kbase_pm_update_cores_state_nolock(kbdev);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+int kbase_pm_list_policies(struct kbase_device *kbdev,
+	const struct kbase_pm_policy * const **list)
+{
+	WARN_ON(kbdev->policy_count == 0);
+	if (list)
+		*list = kbdev->policy_list;
+
+	return kbdev->policy_count;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_list_policies);
+
+const struct kbase_pm_policy *kbase_pm_get_policy(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	return kbdev->pm.backend.pm_current_policy;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_policy);
+
+void kbase_pm_set_policy(struct kbase_device *kbdev,
+				const struct kbase_pm_policy *new_policy)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+	const struct kbase_pm_policy *old_policy;
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(new_policy != NULL);
+
+	KBASE_TRACE_ADD(kbdev, PM_SET_POLICY, NULL, NULL, 0u, new_policy->id);
+
+	/* During a policy change we pretend the GPU is active */
+	/* A suspend won't happen here, because we're in a syscall from a
+	 * userspace thread */
+	kbase_pm_context_active(kbdev);
+
+	mutex_lock(&js_devdata->runpool_mutex);
+	mutex_lock(&kbdev->pm.lock);
+
+	/* Remove the policy to prevent IRQ handlers from working on it */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	old_policy = kbdev->pm.backend.pm_current_policy;
+	kbdev->pm.backend.pm_current_policy = NULL;
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_TERM, NULL, NULL, 0u,
+								old_policy->id);
+	if (old_policy->term)
+		old_policy->term(kbdev);
+
+	KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_INIT, NULL, NULL, 0u,
+								new_policy->id);
+	if (new_policy->init)
+		new_policy->init(kbdev);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbdev->pm.backend.pm_current_policy = new_policy;
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	/* If any core power state changes were previously attempted, but
+	 * couldn't be made because the policy was changing (current_policy was
+	 * NULL), then re-try them here. */
+	kbase_pm_update_active(kbdev);
+	kbase_pm_update_cores_state(kbdev);
+
+	mutex_unlock(&kbdev->pm.lock);
+	mutex_unlock(&js_devdata->runpool_mutex);
+
+	/* Now the policy change is finished, we release our fake context active
+	 * reference */
+	kbase_pm_context_idle(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_set_policy);
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.h
new file mode 100644
index 0000000..966fce7
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.h
@@ -0,0 +1,109 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2015, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Power policy API definitions
+ */
+
+#ifndef _KBASE_PM_POLICY_H_
+#define _KBASE_PM_POLICY_H_
+
+/**
+ * kbase_pm_policy_init - Initialize power policy framework
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Must be called before calling any other policy function
+ *
+ * Return: 0 if the power policy framework was successfully
+ *         initialized, -errno otherwise.
+ */
+int kbase_pm_policy_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_policy_term - Terminate power policy framework
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_policy_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_update_active - Update the active power state of the GPU
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Calls into the current power policy
+ */
+void kbase_pm_update_active(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_update_cores - Update the desired core state of the GPU
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Calls into the current power policy
+ */
+void kbase_pm_update_cores(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_cores_requested - Check that a power request has been locked into
+ *                            the HW.
+ * @kbdev:           Kbase device
+ * @shader_required: true if shaders are required
+ *
+ * Called by the scheduler to check if a power on request has been locked into
+ * the HW.
+ *
+ * Note that there is no guarantee that the cores are actually ready, however
+ * when the request has been locked into the HW, then it is safe to submit work
+ * since the HW will wait for the transition to ready.
+ *
+ * A reference must first be taken prior to making this call.
+ *
+ * Caller must hold the hwaccess_lock.
+ *
+ * Return: true if the request to the HW was successfully made else false if the
+ *         request is still pending.
+ */
+static inline bool kbase_pm_cores_requested(struct kbase_device *kbdev,
+		bool shader_required)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/* If the L2 & tiler are not on or pending, then the tiler is not yet
+	 * available, and shaders are definitely not powered.
+	 */
+	if (kbdev->pm.backend.l2_state != KBASE_L2_PEND_ON &&
+			kbdev->pm.backend.l2_state != KBASE_L2_ON &&
+			kbdev->pm.backend.l2_state != KBASE_L2_ON_HWCNT_ENABLE)
+		return false;
+
+	if (shader_required &&
+			kbdev->pm.backend.shaders_state != KBASE_SHADERS_PEND_ON_CORESTACK_ON &&
+			kbdev->pm.backend.shaders_state != KBASE_SHADERS_ON_CORESTACK_ON &&
+			kbdev->pm.backend.shaders_state != KBASE_SHADERS_ON_CORESTACK_ON_RECHECK)
+		return false;
+
+	return true;
+}
+
+#endif /* _KBASE_PM_POLICY_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_shader_states.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_shader_states.h
new file mode 100644
index 0000000..3f89eb5
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_shader_states.h
@@ -0,0 +1,42 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Backend-specific Power Manager shader core state definitions.
+ * The function-like macro KBASEP_SHADER_STATE() must be defined before
+ * including this header file. This header file can be included multiple
+ * times in the same compilation unit with different definitions of
+ * KBASEP_SHADER_STATE().
+ */
+KBASEP_SHADER_STATE(OFF_CORESTACK_OFF)
+KBASEP_SHADER_STATE(OFF_CORESTACK_PEND_ON)
+KBASEP_SHADER_STATE(PEND_ON_CORESTACK_ON)
+KBASEP_SHADER_STATE(ON_CORESTACK_ON)
+KBASEP_SHADER_STATE(ON_CORESTACK_ON_RECHECK)
+KBASEP_SHADER_STATE(WAIT_OFF_CORESTACK_ON)
+KBASEP_SHADER_STATE(WAIT_FINISHED_CORESTACK_ON)
+KBASEP_SHADER_STATE(L2_FLUSHING_CORESTACK_ON)
+KBASEP_SHADER_STATE(READY_OFF_CORESTACK_ON)
+KBASEP_SHADER_STATE(PEND_OFF_CORESTACK_ON)
+KBASEP_SHADER_STATE(OFF_CORESTACK_PEND_OFF)
+KBASEP_SHADER_STATE(OFF_CORESTACK_OFF_TIMER_PEND_OFF)
+KBASEP_SHADER_STATE(RESET_WAIT)
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c
new file mode 100644
index 0000000..0e17dc0
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c
@@ -0,0 +1,111 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016,2018-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_time.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
+				u64 *system_time, struct timespec *ts)
+{
+	u32 hi1, hi2;
+
+	kbase_pm_request_gpu_cycle_counter(kbdev);
+
+	if (cycle_counter) {
+		/* Read hi, lo, hi to ensure a coherent u64 */
+		do {
+			hi1 = kbase_reg_read(kbdev,
+					     GPU_CONTROL_REG(CYCLE_COUNT_HI));
+			*cycle_counter = kbase_reg_read(kbdev,
+					     GPU_CONTROL_REG(CYCLE_COUNT_LO));
+			hi2 = kbase_reg_read(kbdev,
+					     GPU_CONTROL_REG(CYCLE_COUNT_HI));
+		} while (hi1 != hi2);
+		*cycle_counter |= (((u64) hi1) << 32);
+	}
+
+	if (system_time) {
+		/* Read hi, lo, hi to ensure a coherent u64 */
+		do {
+			hi1 = kbase_reg_read(kbdev,
+					     GPU_CONTROL_REG(TIMESTAMP_HI));
+			*system_time = kbase_reg_read(kbdev,
+					     GPU_CONTROL_REG(TIMESTAMP_LO));
+			hi2 = kbase_reg_read(kbdev,
+					     GPU_CONTROL_REG(TIMESTAMP_HI));
+		} while (hi1 != hi2);
+		*system_time |= (((u64) hi1) << 32);
+	}
+
+	/* Record the CPU's idea of current time */
+	if (ts != NULL)
+		getrawmonotonic(ts);
+
+	kbase_pm_release_gpu_cycle_counter(kbdev);
+}
+
+/**
+ * kbase_wait_write_flush -  Wait for GPU write flush
+ * @kbdev: Kbase device
+ *
+ * Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush
+ * its write buffer.
+ *
+ * Only in use for BASE_HW_ISSUE_6367
+ *
+ * Note : If GPU resets occur then the counters are reset to zero, the delay may
+ * not be as expected.
+ */
+#ifndef CONFIG_MALI_NO_MALI
+void kbase_wait_write_flush(struct kbase_device *kbdev)
+{
+	u32 base_count = 0;
+
+	/*
+	 * The caller must be holding onto the kctx or the call is from
+	 * userspace.
+	 */
+	kbase_pm_context_active(kbdev);
+	kbase_pm_request_gpu_cycle_counter(kbdev);
+
+	while (true) {
+		u32 new_count;
+
+		new_count = kbase_reg_read(kbdev,
+					GPU_CONTROL_REG(CYCLE_COUNT_LO));
+		/* First time around, just store the count. */
+		if (base_count == 0) {
+			base_count = new_count;
+			continue;
+		}
+
+		/* No need to handle wrapping, unsigned maths works for this. */
+		if ((new_count - base_count) > 1000)
+			break;
+	}
+
+	kbase_pm_release_gpu_cycle_counter(kbdev);
+	kbase_pm_context_idle(kbdev);
+}
+#endif				/* CONFIG_MALI_NO_MALI */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.h
new file mode 100644
index 0000000..ece70092
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.h
@@ -0,0 +1,57 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_BACKEND_TIME_H_
+#define _KBASE_BACKEND_TIME_H_
+
+/**
+ * kbase_backend_get_gpu_time() - Get current GPU time
+ * @kbdev:		Device pointer
+ * @cycle_counter:	Pointer to u64 to store cycle counter in
+ * @system_time:	Pointer to u64 to store system time in
+ * @ts:			Pointer to struct timespec to store current monotonic
+ *			time in
+ */
+void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
+				u64 *system_time, struct timespec *ts);
+
+/**
+ * kbase_wait_write_flush() -  Wait for GPU write flush
+ * @kbdev:	Kbase device
+ *
+ * Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush
+ * its write buffer.
+ *
+ * If GPU resets occur then the counters are reset to zero, the delay may not be
+ * as expected.
+ *
+ * This function is only in use for BASE_HW_ISSUE_6367
+ */
+#ifdef CONFIG_MALI_NO_MALI
+static inline void kbase_wait_write_flush(struct kbase_device *kbdev)
+{
+}
+#else
+void kbase_wait_write_flush(struct kbase_device *kbdev);
+#endif
+
+#endif /* _KBASE_BACKEND_TIME_H_ */
diff --git a/drivers/gpu/arm/midgard/build.bp b/drivers/gpu/arm/midgard/build.bp
new file mode 100644
index 0000000..5e6fdfc
--- /dev/null
+++ b/drivers/gpu/arm/midgard/build.bp
@@ -0,0 +1,134 @@
+/*
+ * Copyright:
+ * ----------------------------------------------------------------------------
+ * This confidential and proprietary software may be used only as authorized
+ * by a licensing agreement from ARM Limited.
+ *      (C) COPYRIGHT 2017-2019 ARM Limited, ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorized copies and
+ * copies may only be made to the extent permitted by a licensing agreement
+ * from ARM Limited.
+ * ----------------------------------------------------------------------------
+ */
+
+/* Kernel-side tests may include mali_kbase's headers. Therefore any config
+ * options which affect the sizes of any structs (e.g. adding extra members)
+ * must be included in these defaults, so that the structs are consistent in
+ * both mali_kbase and the test modules. */
+bob_defaults {
+    name: "mali_kbase_shared_config_defaults",
+    no_mali: {
+        kbuild_options: ["CONFIG_MALI_NO_MALI=y"],
+    },
+    mali_real_hw: {
+        kbuild_options: ["CONFIG_MALI_REAL_HW=y"],
+    },
+    mali_devfreq: {
+        kbuild_options: ["CONFIG_MALI_DEVFREQ=y"],
+    },
+    mali_midgard_dvfs: {
+        kbuild_options: ["CONFIG_MALI_MIDGARD_DVFS=y"],
+    },
+    mali_debug: {
+        kbuild_options: ["CONFIG_MALI_DEBUG=y"],
+    },
+    buslog: {
+        kbuild_options: ["CONFIG_MALI_BUSLOG=y"],
+    },
+    cinstr_job_dump: {
+        kbuild_options: ["CONFIG_MALI_JOB_DUMP=y"],
+    },
+    cinstr_vector_dump: {
+        kbuild_options: ["CONFIG_MALI_VECTOR_DUMP=y"],
+    },
+    cinstr_gwt: {
+        kbuild_options: ["CONFIG_MALI_CINSTR_GWT=y"],
+    },
+    mali_gator_support: {
+        kbuild_options: ["CONFIG_MALI_GATOR_SUPPORT=y"],
+    },
+    mali_system_trace: {
+        kbuild_options: ["CONFIG_MALI_SYSTEM_TRACE=y"],
+    },
+    mali_pwrsoft_765: {
+        kbuild_options: ["CONFIG_MALI_PWRSOFT_765=y"],
+    },
+    mali_memory_fully_backed: {
+        kbuild_options: ["CONFIG_MALI_MEMORY_FULLY_BACKED=y"],
+    },
+    mali_dma_buf_map_on_demand: {
+        kbuild_options: ["CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND=y"],
+    },
+    mali_dma_buf_legacy_compat: {
+        kbuild_options: ["CONFIG_MALI_DMA_BUF_LEGACY_COMPAT=y"],
+    },
+    kbuild_options: [
+        "MALI_UNIT_TEST={{.unit_test_code}}",
+        "MALI_CUSTOMER_RELEASE={{.release}}",
+        "MALI_USE_CSF={{.gpu_has_csf}}",
+        "MALI_KERNEL_TEST_API={{.debug}}",
+    ],
+    defaults: ["kernel_defaults"],
+}
+
+bob_kernel_module {
+    name: "mali_kbase",
+    srcs: [
+        "*.c",
+        "*.h",
+        "Kbuild",
+        "backend/gpu/*.c",
+        "backend/gpu/*.h",
+        "backend/gpu/Kbuild",
+        "ipa/*.c",
+        "ipa/*.h",
+        "ipa/Kbuild",
+        "platform/*.h",
+        "platform/*/*.c",
+        "platform/*/*.h",
+        "platform/*/Kbuild",
+        "thirdparty/*.c",
+    ],
+    kbuild_options: [
+        "CONFIG_MALI_KUTF=n",
+        "CONFIG_MALI_MIDGARD=m",
+        "CONFIG_MALI_NO_MALI_DEFAULT_GPU={{.gpu}}",
+        "CONFIG_MALI_PLATFORM_NAME={{.mali_platform_name}}",
+    ],
+    buslog: {
+        extra_symbols: [
+            "bus_logger",
+        ],
+    },
+    mali_corestack: {
+        kbuild_options: ["CONFIG_MALI_CORESTACK=y"],
+    },
+    mali_platform_power_down_only: {
+        kbuild_options: ["CONFIG_MALI_PLATFORM_POWER_DOWN_ONLY=y"],
+    },
+    mali_error_inject: {
+        kbuild_options: ["CONFIG_MALI_ERROR_INJECT=y"],
+    },
+    mali_error_inject_random: {
+        kbuild_options: ["CONFIG_MALI_ERROR_INJECT_RANDOM=y"],
+    },
+    cinstr_secondary_hwc: {
+        kbuild_options: ["CONFIG_MALI_PRFCNT_SET_SECONDARY=y"],
+    },
+    mali_2mb_alloc: {
+        kbuild_options: ["CONFIG_MALI_2MB_ALLOC=y"],
+    },
+    mali_hw_errata_1485982_not_affected: {
+        kbuild_options: ["CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED=y"],
+    },
+    mali_hw_errata_1485982_use_clock_alternative: {
+        kbuild_options: ["CONFIG_MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE=y"],
+    },
+    gpu_has_csf: {
+        srcs: [
+            "csf/*.c",
+            "csf/*.h",
+            "csf/Kbuild",
+        ],
+    },
+    defaults: ["mali_kbase_shared_config_defaults"],
+}
diff --git a/drivers/gpu/arm/midgard/docs/Doxyfile b/drivers/gpu/arm/midgard/docs/Doxyfile
new file mode 100644
index 0000000..6498dcb
--- /dev/null
+++ b/drivers/gpu/arm/midgard/docs/Doxyfile
@@ -0,0 +1,132 @@
+#
+# (C) COPYRIGHT 2011-2013, 2015, 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+
+##############################################################################
+
+# This file contains per-module Doxygen configuration. Please do not add
+# extra settings to this file without consulting all stakeholders, as they
+# may cause override project-wide settings.
+#
+# Additionally, when defining aliases, macros, sections etc, use the module
+# name as a prefix e.g. gles_my_alias.
+
+##############################################################################
+
+@INCLUDE = ../../bldsys/Doxyfile_common
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT                  += ../../kernel/drivers/gpu/arm/midgard/
+
+##############################################################################
+# Everything below here is optional, and in most cases not required
+##############################################################################
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES                +=
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS       +=
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
+
+FILE_PATTERNS          +=
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+EXCLUDE                += ../../kernel/drivers/gpu/arm/midgard/platform ../../kernel/drivers/gpu/arm/midgard/platform_dummy ../../kernel/drivers/gpu/arm/midgard/scripts ../../kernel/drivers/gpu/arm/midgard/tests ../../kernel/drivers/gpu/arm/midgard/Makefile ../../kernel/drivers/gpu/arm/midgard/Makefile.kbase ../../kernel/drivers/gpu/arm/midgard/Kbuild ../../kernel/drivers/gpu/arm/midgard/Kconfig ../../kernel/drivers/gpu/arm/midgard/sconscript ../../kernel/drivers/gpu/arm/midgard/docs ../../kernel/drivers/gpu/arm/midgard/mali_uk.h ../../kernel/drivers/gpu/arm/midgard/Makefile
+
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS       +=
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS        +=
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH           +=
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH             +=
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH           +=
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED             +=
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED      +=
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS           += ../../kernel/drivers/gpu/arm/midgard/docs
+
diff --git a/drivers/gpu/arm/midgard/docs/policy_operation_diagram.dot b/drivers/gpu/arm/midgard/docs/policy_operation_diagram.dot
new file mode 100644
index 0000000..a15b558
--- /dev/null
+++ b/drivers/gpu/arm/midgard/docs/policy_operation_diagram.dot
@@ -0,0 +1,117 @@
+/*
+ *
+ * (C) COPYRIGHT 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+digraph policy_objects_diagram {
+	rankdir=LR;
+	size="12,8";
+	compound=true;
+
+	node [ shape = box ];
+
+	subgraph cluster_policy_queues {
+		low_queue [ shape=record label = "LowP | {<ql>ctx_lo | ... | <qm>ctx_i | ... | <qr>ctx_hi}" ];
+		queues_middle_sep [ label="" shape=plaintext width=0 height=0 ];
+
+		rt_queue [ shape=record label = "RT | {<ql>ctx_lo | ... | <qm>ctx_j | ... | <qr>ctx_hi}" ];
+
+		label = "Policy's Queue(s)";
+	}
+
+	call_enqueue [ shape=plaintext label="enqueue_ctx()" ];
+
+	{
+		rank=same;
+		ordering=out;
+		call_dequeue [ shape=plaintext label="dequeue_head_ctx()\n+ runpool_add_ctx()" ];
+		call_ctxfinish [ shape=plaintext label="runpool_remove_ctx()" ];
+
+		call_ctxdone [ shape=plaintext label="don't requeue;\n/* ctx has no more jobs */" ];
+	}
+
+	subgraph cluster_runpool {
+
+		as0 [ width=2 height = 0.25 label="AS0: Job_1, ..., Job_n" ];
+		as1 [ width=2 height = 0.25 label="AS1: Job_1, ..., Job_m" ];
+		as2 [ width=2 height = 0.25 label="AS2: Job_1, ..., Job_p" ];
+		as3 [ width=2 height = 0.25 label="AS3: Job_1, ..., Job_q" ];
+
+		label = "Policy's Run Pool";
+	}
+
+	{
+		rank=same;
+		call_jdequeue [ shape=plaintext label="dequeue_job()" ];
+		sstop_dotfixup [ shape=plaintext label="" width=0 height=0 ];
+	}
+
+	{
+		rank=same;
+		ordering=out;
+		sstop [ shape=ellipse label="SS-Timer expires" ]
+		jobslots [ shape=record label="Jobslots: | <0>js[0] | <1>js[1] | <2>js[2]" ];
+
+		irq [ label="IRQ" shape=ellipse ];
+
+		job_finish [ shape=plaintext label="don't requeue;\n/* job done */" ];
+	}
+
+	hstop [ shape=ellipse label="HS-Timer expires" ]
+
+	/*
+	 * Edges
+	 */
+
+	call_enqueue -> queues_middle_sep [ lhead=cluster_policy_queues ];
+
+	low_queue:qr -> call_dequeue:w;
+	rt_queue:qr -> call_dequeue:w;
+
+	call_dequeue -> as1 [lhead=cluster_runpool];
+
+	as1->call_jdequeue         [ltail=cluster_runpool];
+	call_jdequeue->jobslots:0;
+	call_jdequeue->sstop_dotfixup [ arrowhead=none];
+	sstop_dotfixup->sstop      [label="Spawn SS-Timer"];
+	sstop->jobslots            [label="SoftStop"];
+	sstop->hstop               [label="Spawn HS-Timer"];
+	hstop->jobslots:ne            [label="HardStop"];
+
+
+	as3->call_ctxfinish:ne [ ltail=cluster_runpool ];
+	call_ctxfinish:sw->rt_queue:qm [ lhead=cluster_policy_queues label="enqueue_ctx()\n/* ctx still has jobs */" ];
+
+	call_ctxfinish->call_ctxdone [constraint=false];
+
+	call_ctxdone->call_enqueue [weight=0.1 labeldistance=20.0 labelangle=0.0 taillabel="Job submitted to the ctx" style=dotted constraint=false];
+
+
+	{
+	jobslots->irq   [constraint=false];
+
+	irq->job_finish [constraint=false];
+	}
+
+	irq->as2  [lhead=cluster_runpool label="requeue_job()\n/* timeslice expired */" ];
+
+}
diff --git a/drivers/gpu/arm/midgard/docs/policy_overview.dot b/drivers/gpu/arm/midgard/docs/policy_overview.dot
new file mode 100644
index 0000000..6b87335
--- /dev/null
+++ b/drivers/gpu/arm/midgard/docs/policy_overview.dot
@@ -0,0 +1,68 @@
+/*
+ *
+ * (C) COPYRIGHT 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+digraph policy_objects_diagram {
+	rankdir=LR
+	size="6,6"
+	compound=true;
+
+	node [ shape = box ];
+
+	call_enqueue [ shape=plaintext label="enqueue ctx" ];
+
+
+	policy_queue [ label="Policy's Queue" ];
+
+	{
+		rank=same;
+		runpool [ label="Policy's Run Pool" ];
+
+		ctx_finish [ label="ctx finished" ];
+	}
+
+	{
+		rank=same;
+		jobslots [ shape=record label="Jobslots: | <0>js[0] | <1>js[1] | <2>js[2]" ];
+
+		job_finish [ label="Job finished" ];
+	}
+
+
+
+	/*
+	 * Edges
+	 */
+
+	call_enqueue -> policy_queue;
+
+	policy_queue->runpool [label="dequeue ctx" weight=0.1];
+	runpool->policy_queue [label="requeue ctx" weight=0.1];
+
+	runpool->ctx_finish [ style=dotted ];
+
+	runpool->jobslots  [label="dequeue job" weight=0.1];
+	jobslots->runpool  [label="requeue job" weight=0.1];
+
+	jobslots->job_finish [ style=dotted ];
+}
diff --git a/drivers/gpu/arm/midgard/ipa/Kbuild b/drivers/gpu/arm/midgard/ipa/Kbuild
new file mode 100644
index 0000000..3d9cf80
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/Kbuild
@@ -0,0 +1,28 @@
+#
+# (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+mali_kbase-y += \
+	ipa/mali_kbase_ipa_simple.o \
+	ipa/mali_kbase_ipa.o \
+	ipa/mali_kbase_ipa_vinstr_g7x.o \
+	ipa/mali_kbase_ipa_vinstr_common.o
+
+mali_kbase-$(CONFIG_DEBUG_FS) += ipa/mali_kbase_ipa_debugfs.o
\ No newline at end of file
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.c
new file mode 100644
index 0000000..9b75f0d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.c
@@ -0,0 +1,669 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+#include <linux/thermal.h>
+#include <linux/devfreq_cooling.h>
+#include <linux/of.h>
+#include "mali_kbase.h"
+#include "mali_kbase_ipa.h"
+#include "mali_kbase_ipa_debugfs.h"
+#include "mali_kbase_ipa_simple.h"
+#include "backend/gpu/mali_kbase_pm_internal.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+#include <linux/pm_opp.h>
+#else
+#include <linux/opp.h>
+#define dev_pm_opp_find_freq_exact opp_find_freq_exact
+#define dev_pm_opp_get_voltage opp_get_voltage
+#define dev_pm_opp opp
+#endif
+
+#define KBASE_IPA_FALLBACK_MODEL_NAME "mali-simple-power-model"
+
+static const struct kbase_ipa_model_ops *kbase_ipa_all_model_ops[] = {
+	&kbase_simple_ipa_model_ops,
+	&kbase_g71_ipa_model_ops,
+	&kbase_g72_ipa_model_ops,
+	&kbase_g76_ipa_model_ops,
+	&kbase_g52_ipa_model_ops,
+	&kbase_g52_r1_ipa_model_ops,
+	&kbase_g51_ipa_model_ops,
+	&kbase_g77_ipa_model_ops,
+	&kbase_tnax_ipa_model_ops,
+	&kbase_tbex_ipa_model_ops
+};
+
+int kbase_ipa_model_recalculate(struct kbase_ipa_model *model)
+{
+	int err = 0;
+
+	lockdep_assert_held(&model->kbdev->ipa.lock);
+
+	if (model->ops->recalculate) {
+		err = model->ops->recalculate(model);
+		if (err) {
+			dev_err(model->kbdev->dev,
+				"recalculation of power model %s returned error %d\n",
+				model->ops->name, err);
+		}
+	}
+
+	return err;
+}
+
+const struct kbase_ipa_model_ops *kbase_ipa_model_ops_find(struct kbase_device *kbdev,
+							    const char *name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(kbase_ipa_all_model_ops); ++i) {
+		const struct kbase_ipa_model_ops *ops = kbase_ipa_all_model_ops[i];
+
+		if (!strcmp(ops->name, name))
+			return ops;
+	}
+
+	dev_err(kbdev->dev, "power model \'%s\' not found\n", name);
+
+	return NULL;
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_model_ops_find);
+
+const char *kbase_ipa_model_name_from_id(u32 gpu_id)
+{
+	const u32 prod_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >>
+			GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+
+	switch (GPU_ID2_MODEL_MATCH_VALUE(prod_id)) {
+	case GPU_ID2_PRODUCT_TMIX:
+		return "mali-g71-power-model";
+	case GPU_ID2_PRODUCT_THEX:
+		return "mali-g72-power-model";
+	case GPU_ID2_PRODUCT_TNOX:
+		return "mali-g76-power-model";
+	case GPU_ID2_PRODUCT_TSIX:
+		return "mali-g51-power-model";
+	case GPU_ID2_PRODUCT_TGOX:
+		if ((gpu_id & GPU_ID2_VERSION_MAJOR) ==
+				(0 << GPU_ID2_VERSION_MAJOR_SHIFT))
+			/* g52 aliased to g76 power-model's ops */
+			return "mali-g52-power-model";
+		else
+			return "mali-g52_r1-power-model";
+	case GPU_ID2_PRODUCT_TNAX:
+		return "mali-tnax-power-model";
+	case GPU_ID2_PRODUCT_TTRX:
+		return "mali-g77-power-model";
+	case GPU_ID2_PRODUCT_TBEX:
+		return "mali-tbex-power-model";
+	default:
+		return KBASE_IPA_FALLBACK_MODEL_NAME;
+	}
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_model_name_from_id);
+
+static struct device_node *get_model_dt_node(struct kbase_ipa_model *model)
+{
+	struct device_node *model_dt_node;
+	char compat_string[64];
+
+	snprintf(compat_string, sizeof(compat_string), "arm,%s",
+		 model->ops->name);
+
+	/* of_find_compatible_node() will call of_node_put() on the root node,
+	 * so take a reference on it first.
+	 */
+	of_node_get(model->kbdev->dev->of_node);
+	model_dt_node = of_find_compatible_node(model->kbdev->dev->of_node,
+						NULL, compat_string);
+	if (!model_dt_node && !model->missing_dt_node_warning) {
+		dev_warn(model->kbdev->dev,
+			 "Couldn't find power_model DT node matching \'%s\'\n",
+			 compat_string);
+		model->missing_dt_node_warning = true;
+	}
+
+	return model_dt_node;
+}
+
+int kbase_ipa_model_add_param_s32(struct kbase_ipa_model *model,
+				  const char *name, s32 *addr,
+				  size_t num_elems, bool dt_required)
+{
+	int err, i;
+	struct device_node *model_dt_node = get_model_dt_node(model);
+	char *origin;
+
+	err = of_property_read_u32_array(model_dt_node, name, addr, num_elems);
+	/* We're done with model_dt_node now, so drop the reference taken in
+	 * get_model_dt_node()/of_find_compatible_node().
+	 */
+	of_node_put(model_dt_node);
+
+	if (err && dt_required) {
+		memset(addr, 0, sizeof(s32) * num_elems);
+		dev_warn(model->kbdev->dev,
+			 "Error %d, no DT entry: %s.%s = %zu*[0]\n",
+			 err, model->ops->name, name, num_elems);
+		origin = "zero";
+	} else if (err && !dt_required) {
+		origin = "default";
+	} else /* !err */ {
+		origin = "DT";
+	}
+
+	/* Create a unique debugfs entry for each element */
+	for (i = 0; i < num_elems; ++i) {
+		char elem_name[32];
+
+		if (num_elems == 1)
+			snprintf(elem_name, sizeof(elem_name), "%s", name);
+		else
+			snprintf(elem_name, sizeof(elem_name), "%s.%d",
+				name, i);
+
+		dev_dbg(model->kbdev->dev, "%s.%s = %d (%s)\n",
+			model->ops->name, elem_name, addr[i], origin);
+
+		err = kbase_ipa_model_param_add(model, elem_name,
+						&addr[i], sizeof(s32),
+						PARAM_TYPE_S32);
+		if (err)
+			goto exit;
+	}
+exit:
+	return err;
+}
+
+int kbase_ipa_model_add_param_string(struct kbase_ipa_model *model,
+				     const char *name, char *addr,
+				     size_t size, bool dt_required)
+{
+	int err;
+	struct device_node *model_dt_node = get_model_dt_node(model);
+	const char *string_prop_value;
+	char *origin;
+
+	err = of_property_read_string(model_dt_node, name,
+				      &string_prop_value);
+
+	/* We're done with model_dt_node now, so drop the reference taken in
+	 * get_model_dt_node()/of_find_compatible_node().
+	 */
+	of_node_put(model_dt_node);
+
+	if (err && dt_required) {
+		strncpy(addr, "", size - 1);
+		dev_warn(model->kbdev->dev,
+			 "Error %d, no DT entry: %s.%s = \'%s\'\n",
+			 err, model->ops->name, name, addr);
+		err = 0;
+		origin = "zero";
+	} else if (err && !dt_required) {
+		origin = "default";
+	} else /* !err */ {
+		strncpy(addr, string_prop_value, size - 1);
+		origin = "DT";
+	}
+
+	addr[size - 1] = '\0';
+
+	dev_dbg(model->kbdev->dev, "%s.%s = \'%s\' (%s)\n",
+		model->ops->name, name, string_prop_value, origin);
+
+	err = kbase_ipa_model_param_add(model, name, addr, size,
+					PARAM_TYPE_STRING);
+	return err;
+}
+
+void kbase_ipa_term_model(struct kbase_ipa_model *model)
+{
+	if (!model)
+		return;
+
+	lockdep_assert_held(&model->kbdev->ipa.lock);
+
+	if (model->ops->term)
+		model->ops->term(model);
+
+	kbase_ipa_model_param_free_all(model);
+
+	kfree(model);
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_term_model);
+
+struct kbase_ipa_model *kbase_ipa_init_model(struct kbase_device *kbdev,
+					     const struct kbase_ipa_model_ops *ops)
+{
+	struct kbase_ipa_model *model;
+	int err;
+
+	lockdep_assert_held(&kbdev->ipa.lock);
+
+	if (!ops || !ops->name)
+		return NULL;
+
+	model = kzalloc(sizeof(struct kbase_ipa_model), GFP_KERNEL);
+	if (!model)
+		return NULL;
+
+	model->kbdev = kbdev;
+	model->ops = ops;
+	INIT_LIST_HEAD(&model->params);
+
+	err = model->ops->init(model);
+	if (err) {
+		dev_err(kbdev->dev,
+			"init of power model \'%s\' returned error %d\n",
+			ops->name, err);
+		kfree(model);
+		return NULL;
+	}
+
+	err = kbase_ipa_model_recalculate(model);
+	if (err) {
+		kbase_ipa_term_model(model);
+		return NULL;
+	}
+
+	return model;
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_init_model);
+
+static void kbase_ipa_term_locked(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->ipa.lock);
+
+	/* Clean up the models */
+	if (kbdev->ipa.configured_model != kbdev->ipa.fallback_model)
+		kbase_ipa_term_model(kbdev->ipa.configured_model);
+	kbase_ipa_term_model(kbdev->ipa.fallback_model);
+
+	kbdev->ipa.configured_model = NULL;
+	kbdev->ipa.fallback_model = NULL;
+}
+
+int kbase_ipa_init(struct kbase_device *kbdev)
+{
+
+	const char *model_name;
+	const struct kbase_ipa_model_ops *ops;
+	struct kbase_ipa_model *default_model = NULL;
+	int err;
+
+	mutex_init(&kbdev->ipa.lock);
+	/*
+	 * Lock during init to avoid warnings from lockdep_assert_held (there
+	 * shouldn't be any concurrent access yet).
+	 */
+	mutex_lock(&kbdev->ipa.lock);
+
+	/* The simple IPA model must *always* be present.*/
+	ops = kbase_ipa_model_ops_find(kbdev, KBASE_IPA_FALLBACK_MODEL_NAME);
+
+	default_model = kbase_ipa_init_model(kbdev, ops);
+	if (!default_model) {
+		err = -EINVAL;
+		goto end;
+	}
+
+	kbdev->ipa.fallback_model = default_model;
+	err = of_property_read_string(kbdev->dev->of_node,
+				      "ipa-model",
+				      &model_name);
+	if (err) {
+		/* Attempt to load a match from GPU-ID */
+		u32 gpu_id;
+
+		gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+		model_name = kbase_ipa_model_name_from_id(gpu_id);
+		dev_dbg(kbdev->dev,
+			"Inferring model from GPU ID 0x%x: \'%s\'\n",
+			gpu_id, model_name);
+		err = 0;
+	} else {
+		dev_dbg(kbdev->dev,
+			"Using ipa-model parameter from DT: \'%s\'\n",
+			model_name);
+	}
+
+	if (strcmp(KBASE_IPA_FALLBACK_MODEL_NAME, model_name) != 0) {
+		ops = kbase_ipa_model_ops_find(kbdev, model_name);
+		kbdev->ipa.configured_model = kbase_ipa_init_model(kbdev, ops);
+		if (!kbdev->ipa.configured_model) {
+			dev_warn(kbdev->dev,
+				"Failed to initialize ipa-model: \'%s\'\n"
+				"Falling back on default model\n",
+				model_name);
+			kbdev->ipa.configured_model = default_model;
+		}
+	} else {
+		kbdev->ipa.configured_model = default_model;
+	}
+
+end:
+	if (err)
+		kbase_ipa_term_locked(kbdev);
+	else
+		dev_info(kbdev->dev,
+			 "Using configured power model %s, and fallback %s\n",
+			 kbdev->ipa.configured_model->ops->name,
+			 kbdev->ipa.fallback_model->ops->name);
+
+	mutex_unlock(&kbdev->ipa.lock);
+	return err;
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_init);
+
+void kbase_ipa_term(struct kbase_device *kbdev)
+{
+	mutex_lock(&kbdev->ipa.lock);
+	kbase_ipa_term_locked(kbdev);
+	mutex_unlock(&kbdev->ipa.lock);
+
+	mutex_destroy(&kbdev->ipa.lock);
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_term);
+
+/**
+ * kbase_scale_dynamic_power() - Scale a dynamic power coefficient to an OPP
+ * @c:		Dynamic model coefficient, in pW/(Hz V^2). Should be in range
+ *		0 < c < 2^26 to prevent overflow.
+ * @freq:	Frequency, in Hz. Range: 2^23 < freq < 2^30 (~8MHz to ~1GHz)
+ * @voltage:	Voltage, in mV. Range: 2^9 < voltage < 2^13 (~0.5V to ~8V)
+ *
+ * Keep a record of the approximate range of each value at every stage of the
+ * calculation, to ensure we don't overflow. This makes heavy use of the
+ * approximations 1000 = 2^10 and 1000000 = 2^20, but does the actual
+ * calculations in decimal for increased accuracy.
+ *
+ * Return: Power consumption, in mW. Range: 0 < p < 2^13 (0W to ~8W)
+ */
+static u32 kbase_scale_dynamic_power(const u32 c, const u32 freq,
+				     const u32 voltage)
+{
+	/* Range: 2^8 < v2 < 2^16 m(V^2) */
+	const u32 v2 = (voltage * voltage) / 1000;
+
+	/* Range: 2^3 < f_MHz < 2^10 MHz */
+	const u32 f_MHz = freq / 1000000;
+
+	/* Range: 2^11 < v2f_big < 2^26 kHz V^2 */
+	const u32 v2f_big = v2 * f_MHz;
+
+	/* Range: 2^1 < v2f < 2^16 MHz V^2 */
+	const u32 v2f = v2f_big / 1000;
+
+	/* Range (working backwards from next line): 0 < v2fc < 2^23 uW.
+	 * Must be < 2^42 to avoid overflowing the return value. */
+	const u64 v2fc = (u64) c * (u64) v2f;
+
+	/* Range: 0 < v2fc / 1000 < 2^13 mW */
+	return div_u64(v2fc, 1000);
+}
+
+/**
+ * kbase_scale_static_power() - Scale a static power coefficient to an OPP
+ * @c:		Static model coefficient, in uW/V^3. Should be in range
+ *		0 < c < 2^32 to prevent overflow.
+ * @voltage:	Voltage, in mV. Range: 2^9 < voltage < 2^13 (~0.5V to ~8V)
+ *
+ * Return: Power consumption, in mW. Range: 0 < p < 2^13 (0W to ~8W)
+ */
+u32 kbase_scale_static_power(const u32 c, const u32 voltage)
+{
+	/* Range: 2^8 < v2 < 2^16 m(V^2) */
+	const u32 v2 = (voltage * voltage) / 1000;
+
+	/* Range: 2^17 < v3_big < 2^29 m(V^2) mV */
+	const u32 v3_big = v2 * voltage;
+
+	/* Range: 2^7 < v3 < 2^19 m(V^3) */
+	const u32 v3 = v3_big / 1000;
+
+	/*
+	 * Range (working backwards from next line): 0 < v3c_big < 2^33 nW.
+	 * The result should be < 2^52 to avoid overflowing the return value.
+	 */
+	const u64 v3c_big = (u64) c * (u64) v3;
+
+	/* Range: 0 < v3c_big / 1000000 < 2^13 mW */
+	return div_u64(v3c_big, 1000000);
+}
+
+void kbase_ipa_protection_mode_switch_event(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/* Record the event of GPU entering protected mode. */
+	kbdev->ipa_protection_mode_switched = true;
+}
+
+static struct kbase_ipa_model *get_current_model(struct kbase_device *kbdev)
+{
+	struct kbase_ipa_model *model;
+	unsigned long flags;
+
+	lockdep_assert_held(&kbdev->ipa.lock);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (kbdev->ipa_protection_mode_switched ||
+			kbdev->ipa.force_fallback_model)
+		model = kbdev->ipa.fallback_model;
+	else
+		model = kbdev->ipa.configured_model;
+
+	/*
+	 * Having taken cognizance of the fact that whether GPU earlier
+	 * protected mode or not, the event can be now reset (if GPU is not
+	 * currently in protected mode) so that configured model is used
+	 * for the next sample.
+	 */
+	if (!kbdev->protected_mode)
+		kbdev->ipa_protection_mode_switched = false;
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return model;
+}
+
+static u32 get_static_power_locked(struct kbase_device *kbdev,
+				   struct kbase_ipa_model *model,
+				   unsigned long voltage)
+{
+	u32 power = 0;
+	int err;
+	u32 power_coeff;
+
+	lockdep_assert_held(&model->kbdev->ipa.lock);
+
+	if (!model->ops->get_static_coeff)
+		model = kbdev->ipa.fallback_model;
+
+	if (model->ops->get_static_coeff) {
+		err = model->ops->get_static_coeff(model, &power_coeff);
+		if (!err)
+			power = kbase_scale_static_power(power_coeff,
+							 (u32) voltage);
+	}
+
+	return power;
+}
+
+#if defined(CONFIG_MALI_PWRSOFT_765) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static unsigned long kbase_get_static_power(struct devfreq *df,
+					    unsigned long voltage)
+#else
+static unsigned long kbase_get_static_power(unsigned long voltage)
+#endif
+{
+	struct kbase_ipa_model *model;
+	u32 power = 0;
+#if defined(CONFIG_MALI_PWRSOFT_765) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+	struct kbase_device *kbdev = dev_get_drvdata(&df->dev);
+#else
+	struct kbase_device *kbdev = kbase_find_device(-1);
+#endif
+
+	if (!kbdev)
+		return 0ul;
+
+	mutex_lock(&kbdev->ipa.lock);
+
+	model = get_current_model(kbdev);
+	power = get_static_power_locked(kbdev, model, voltage);
+
+	mutex_unlock(&kbdev->ipa.lock);
+
+#if !(defined(CONFIG_MALI_PWRSOFT_765) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+	kbase_release_device(kbdev);
+#endif
+
+	return power;
+}
+
+#if defined(CONFIG_MALI_PWRSOFT_765) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static unsigned long kbase_get_dynamic_power(struct devfreq *df,
+					     unsigned long freq,
+					     unsigned long voltage)
+#else
+static unsigned long kbase_get_dynamic_power(unsigned long freq,
+					     unsigned long voltage)
+#endif
+{
+	struct kbase_ipa_model *model;
+	u32 power_coeff = 0, power = 0;
+	int err = 0;
+#if defined(CONFIG_MALI_PWRSOFT_765) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+	struct kbase_device *kbdev = dev_get_drvdata(&df->dev);
+#else
+	struct kbase_device *kbdev = kbase_find_device(-1);
+#endif
+
+	if (!kbdev)
+		return 0ul;
+
+	mutex_lock(&kbdev->ipa.lock);
+
+	model = kbdev->ipa.fallback_model;
+
+	err = model->ops->get_dynamic_coeff(model, &power_coeff);
+
+	if (!err)
+		power = kbase_scale_dynamic_power(power_coeff, freq, voltage);
+	else
+		dev_err_ratelimited(kbdev->dev,
+				    "Model %s returned error code %d\n",
+				    model->ops->name, err);
+
+	mutex_unlock(&kbdev->ipa.lock);
+
+#if !(defined(CONFIG_MALI_PWRSOFT_765) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+	kbase_release_device(kbdev);
+#endif
+
+	return power;
+}
+
+int kbase_get_real_power_locked(struct kbase_device *kbdev, u32 *power,
+				unsigned long freq,
+				unsigned long voltage)
+{
+	struct kbase_ipa_model *model;
+	u32 power_coeff = 0;
+	int err = 0;
+	struct kbasep_pm_metrics diff;
+	u64 total_time;
+
+	lockdep_assert_held(&kbdev->ipa.lock);
+
+	kbase_pm_get_dvfs_metrics(kbdev, &kbdev->ipa.last_metrics, &diff);
+
+	model = get_current_model(kbdev);
+
+	err = model->ops->get_dynamic_coeff(model, &power_coeff);
+
+	/* If the counter model returns an error (e.g. switching back to
+	 * protected mode and failing to read counters, or a counter sample
+	 * with too few cycles), revert to the fallback model.
+	 */
+	if (err && model != kbdev->ipa.fallback_model) {
+		model = kbdev->ipa.fallback_model;
+		err = model->ops->get_dynamic_coeff(model, &power_coeff);
+	}
+
+	if (err)
+		return err;
+
+	*power = kbase_scale_dynamic_power(power_coeff, freq, voltage);
+
+	/* time_busy / total_time cannot be >1, so assigning the 64-bit
+	 * result of div_u64 to *power cannot overflow.
+	 */
+	total_time = diff.time_busy + (u64) diff.time_idle;
+	*power = div_u64(*power * (u64) diff.time_busy,
+			 max(total_time, 1ull));
+
+	*power += get_static_power_locked(kbdev, model, voltage);
+
+	return err;
+}
+KBASE_EXPORT_TEST_API(kbase_get_real_power_locked);
+
+int kbase_get_real_power(struct devfreq *df, u32 *power,
+				unsigned long freq,
+				unsigned long voltage)
+{
+	int ret;
+	struct kbase_device *kbdev = dev_get_drvdata(&df->dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	mutex_lock(&kbdev->ipa.lock);
+	ret = kbase_get_real_power_locked(kbdev, power, freq, voltage);
+	mutex_unlock(&kbdev->ipa.lock);
+
+	return ret;
+}
+KBASE_EXPORT_TEST_API(kbase_get_real_power);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
+struct devfreq_cooling_ops kbase_ipa_power_model_ops = {
+#else
+struct devfreq_cooling_power kbase_ipa_power_model_ops = {
+#endif
+	.get_static_power = &kbase_get_static_power,
+	.get_dynamic_power = &kbase_get_dynamic_power,
+#if defined(CONFIG_MALI_PWRSOFT_765) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+	.get_real_power = &kbase_get_real_power,
+#endif
+};
+KBASE_EXPORT_TEST_API(kbase_ipa_power_model_ops);
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.h b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.h
new file mode 100644
index 0000000..92aace9
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.h
@@ -0,0 +1,253 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_IPA_H_
+#define _KBASE_IPA_H_
+
+#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)
+
+struct devfreq;
+
+/**
+ * struct kbase_ipa_model - Object describing a particular IPA model.
+ * @kbdev:                    pointer to kbase device
+ * @model_data:               opaque pointer to model specific data, accessed
+ *                            only by model specific methods.
+ * @ops:                      pointer to object containing model specific methods.
+ * @params:                   head of the list of debugfs params added for model
+ * @missing_dt_node_warning:  flag to limit the matching power model DT not found
+ *                            warning to once.
+ */
+struct kbase_ipa_model {
+	struct kbase_device *kbdev;
+	void *model_data;
+	const struct kbase_ipa_model_ops *ops;
+	struct list_head params;
+	bool missing_dt_node_warning;
+};
+
+/**
+ * kbase_ipa_model_add_param_s32 - Add an integer model parameter
+ * @model:	pointer to IPA model
+ * @name:	name of corresponding debugfs entry
+ * @addr:	address where the value is stored
+ * @num_elems:	number of elements (1 if not an array)
+ * @dt_required: if false, a corresponding devicetree entry is not required,
+ *		 and the current value will be used. If true, a warning is
+ *		 output and the data is zeroed
+ *
+ * Return: 0 on success, or an error code
+ */
+int kbase_ipa_model_add_param_s32(struct kbase_ipa_model *model,
+				  const char *name, s32 *addr,
+				  size_t num_elems, bool dt_required);
+
+/**
+ * kbase_ipa_model_add_param_string - Add a string model parameter
+ * @model:	pointer to IPA model
+ * @name:	name of corresponding debugfs entry
+ * @addr:	address where the value is stored
+ * @size:	size, in bytes, of the value storage (so the maximum string
+ *		length is size - 1)
+ * @dt_required: if false, a corresponding devicetree entry is not required,
+ *		 and the current value will be used. If true, a warning is
+ *		 output and the data is zeroed
+ *
+ * Return: 0 on success, or an error code
+ */
+int kbase_ipa_model_add_param_string(struct kbase_ipa_model *model,
+				     const char *name, char *addr,
+				     size_t size, bool dt_required);
+
+struct kbase_ipa_model_ops {
+	char *name;
+	/* The init, recalculate and term ops on the default model are always
+	 * called.  However, all the other models are only invoked if the model
+	 * is selected in the device tree. Otherwise they are never
+	 * initialized. Additional resources can be acquired by models in
+	 * init(), however they must be terminated in the term().
+	 */
+	int (*init)(struct kbase_ipa_model *model);
+	/* Called immediately after init(), or when a parameter is changed, so
+	 * that any coefficients derived from model parameters can be
+	 * recalculated. */
+	int (*recalculate)(struct kbase_ipa_model *model);
+	void (*term)(struct kbase_ipa_model *model);
+	/*
+	 * get_dynamic_coeff() - calculate dynamic power coefficient
+	 * @model:		pointer to model
+	 * @coeffp:		pointer to return value location
+	 *
+	 * Calculate a dynamic power coefficient, with units pW/(Hz V^2), which
+	 * is then scaled by the IPA framework according to the current OPP's
+	 * frequency and voltage.
+	 *
+	 * Return: 0 on success, or an error code.
+	 */
+	int (*get_dynamic_coeff)(struct kbase_ipa_model *model, u32 *coeffp);
+	/*
+	 * get_static_coeff() - calculate static power coefficient
+	 * @model:		pointer to model
+	 * @coeffp:		pointer to return value location
+	 *
+	 * Calculate a static power coefficient, with units uW/(V^3), which is
+	 * scaled by the IPA framework according to the current OPP's voltage.
+	 *
+	 * Return: 0 on success, or an error code.
+	 */
+	int (*get_static_coeff)(struct kbase_ipa_model *model, u32 *coeffp);
+};
+
+/**
+ * kbase_ipa_init - Initialize the IPA feature
+ * @kbdev:      pointer to kbase device
+ *
+ * simple IPA power model is initialized as a fallback model and if that
+ * initialization fails then IPA is not used.
+ * The device tree is read for the name of ipa model to be used, by using the
+ * property string "ipa-model". If that ipa model is supported then it is
+ * initialized but if the initialization fails then simple power model is used.
+ *
+ * Return: 0 on success, negative -errno on error
+ */
+int kbase_ipa_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_ipa_term - Terminate the IPA feature
+ * @kbdev:      pointer to kbase device
+ *
+ * Both simple IPA power model and model retrieved from device tree are
+ * terminated.
+ */
+void kbase_ipa_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_ipa_model_recalculate - Recalculate the model coefficients
+ * @model:      pointer to the IPA model object, already initialized
+ *
+ * It shall be called immediately after the model has been initialized
+ * or when the model parameter has changed, so that any coefficients
+ * derived from parameters can be recalculated.
+ * Its a wrapper for the module specific recalculate() method.
+ *
+ * Return: 0 on success, negative -errno on error
+ */
+int kbase_ipa_model_recalculate(struct kbase_ipa_model *model);
+
+/**
+ * kbase_ipa_model_ops_find - Lookup an IPA model using its name
+ * @kbdev:      pointer to kbase device
+ * @name:       name of model to lookup
+ *
+ * Return: Pointer to model's 'ops' structure, or NULL if the lookup failed.
+ */
+const struct kbase_ipa_model_ops *kbase_ipa_model_ops_find(struct kbase_device *kbdev,
+							   const char *name);
+
+/**
+ * kbase_ipa_model_name_from_id - Find the best model for a given GPU ID
+ * @gpu_id:     GPU ID of GPU the model will be used for
+ *
+ * Return: The name of the appropriate counter-based model, or the name of the
+ *         fallback model if no counter model exists.
+ */
+const char *kbase_ipa_model_name_from_id(u32 gpu_id);
+
+/**
+ * kbase_ipa_init_model - Initilaize the particular IPA model
+ * @kbdev:      pointer to kbase device
+ * @ops:        pointer to object containing model specific methods.
+ *
+ * Initialize the model corresponding to the @ops pointer passed.
+ * The init() method specified in @ops would be called.
+ *
+ * Return: pointer to kbase_ipa_model on success, NULL on error
+ */
+struct kbase_ipa_model *kbase_ipa_init_model(struct kbase_device *kbdev,
+					     const struct kbase_ipa_model_ops *ops);
+/**
+ * kbase_ipa_term_model - Terminate the particular IPA model
+ * @model:      pointer to the IPA model object, already initialized
+ *
+ * Terminate the model, using the term() method.
+ * Module specific parameters would be freed.
+ */
+void kbase_ipa_term_model(struct kbase_ipa_model *model);
+
+/**
+ * kbase_ipa_protection_mode_switch_event - Inform IPA of the GPU's entry into
+ *                                          protected mode
+ * @kbdev:      pointer to kbase device
+ *
+ * Makes IPA aware of the GPU switching to protected mode.
+ */
+void kbase_ipa_protection_mode_switch_event(struct kbase_device *kbdev);
+
+extern const struct kbase_ipa_model_ops kbase_g71_ipa_model_ops;
+extern const struct kbase_ipa_model_ops kbase_g72_ipa_model_ops;
+extern const struct kbase_ipa_model_ops kbase_g76_ipa_model_ops;
+extern const struct kbase_ipa_model_ops kbase_g52_ipa_model_ops;
+extern const struct kbase_ipa_model_ops kbase_g52_r1_ipa_model_ops;
+extern const struct kbase_ipa_model_ops kbase_g51_ipa_model_ops;
+extern const struct kbase_ipa_model_ops kbase_g77_ipa_model_ops;
+extern const struct kbase_ipa_model_ops kbase_tnax_ipa_model_ops;
+extern const struct kbase_ipa_model_ops kbase_tbex_ipa_model_ops;
+
+/**
+ * kbase_get_real_power() - get the real power consumption of the GPU
+ * @df: dynamic voltage and frequency scaling information for the GPU.
+ * @power: where to store the power consumption, in mW.
+ * @freq: a frequency, in HZ.
+ * @voltage: a voltage, in mV.
+ *
+ * The returned value incorporates both static and dynamic power consumption.
+ *
+ * Return: 0 on success, or an error code.
+ */
+int kbase_get_real_power(struct devfreq *df, u32 *power,
+				unsigned long freq,
+				unsigned long voltage);
+
+#if MALI_UNIT_TEST
+/* Called by kbase_get_real_power() to invoke the power models.
+ * Must be called with kbdev->ipa.lock held.
+ * This function is only exposed for use by unit tests.
+ */
+int kbase_get_real_power_locked(struct kbase_device *kbdev, u32 *power,
+				unsigned long freq,
+				unsigned long voltage);
+#endif /* MALI_UNIT_TEST */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
+extern struct devfreq_cooling_ops kbase_ipa_power_model_ops;
+#else
+extern struct devfreq_cooling_power kbase_ipa_power_model_ops;
+#endif
+
+#else /* !(defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)) */
+
+static inline void kbase_ipa_protection_mode_switch_event(struct kbase_device *kbdev)
+{ }
+
+#endif /* (defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)) */
+
+#endif
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.c
new file mode 100644
index 0000000..30a3b7d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.c
@@ -0,0 +1,322 @@
+/*
+ *
+ * (C) COPYRIGHT 2017-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include "mali_kbase.h"
+#include "mali_kbase_ipa.h"
+#include "mali_kbase_ipa_debugfs.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+#define DEFINE_DEBUGFS_ATTRIBUTE DEFINE_SIMPLE_ATTRIBUTE
+#endif
+
+struct kbase_ipa_model_param {
+	char *name;
+	union {
+		void *voidp;
+		s32 *s32p;
+		char *str;
+	} addr;
+	size_t size;
+	enum kbase_ipa_model_param_type type;
+	struct kbase_ipa_model *model;
+	struct list_head link;
+};
+
+static int param_int_get(void *data, u64 *val)
+{
+	struct kbase_ipa_model_param *param = data;
+
+	mutex_lock(&param->model->kbdev->ipa.lock);
+	*(s64 *) val = *param->addr.s32p;
+	mutex_unlock(&param->model->kbdev->ipa.lock);
+
+	return 0;
+}
+
+static int param_int_set(void *data, u64 val)
+{
+	struct kbase_ipa_model_param *param = data;
+	struct kbase_ipa_model *model = param->model;
+	s64 sval = (s64) val;
+	s32 old_val;
+	int err = 0;
+
+	if (sval < S32_MIN || sval > S32_MAX)
+		return -ERANGE;
+
+	mutex_lock(&param->model->kbdev->ipa.lock);
+	old_val = *param->addr.s32p;
+	*param->addr.s32p = val;
+	err = kbase_ipa_model_recalculate(model);
+	if (err < 0)
+		*param->addr.s32p = old_val;
+	mutex_unlock(&param->model->kbdev->ipa.lock);
+
+	return err;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_s32, param_int_get, param_int_set, "%lld\n");
+
+static ssize_t param_string_get(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct kbase_ipa_model_param *param = file->private_data;
+	ssize_t ret;
+	size_t len;
+
+	mutex_lock(&param->model->kbdev->ipa.lock);
+	len = strnlen(param->addr.str, param->size - 1) + 1;
+	ret = simple_read_from_buffer(user_buf, count, ppos,
+				      param->addr.str, len);
+	mutex_unlock(&param->model->kbdev->ipa.lock);
+
+	return ret;
+}
+
+static ssize_t param_string_set(struct file *file, const char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct kbase_ipa_model_param *param = file->private_data;
+	struct kbase_ipa_model *model = param->model;
+	char *old_str = NULL;
+	ssize_t ret = count;
+	size_t buf_size;
+	int err;
+
+	mutex_lock(&model->kbdev->ipa.lock);
+
+	if (count > param->size) {
+		ret = -EINVAL;
+		goto end;
+	}
+
+	old_str = kstrndup(param->addr.str, param->size, GFP_KERNEL);
+	if (!old_str) {
+		ret = -ENOMEM;
+		goto end;
+	}
+
+	buf_size = min(param->size - 1, count);
+	if (copy_from_user(param->addr.str, user_buf, buf_size)) {
+		ret = -EFAULT;
+		goto end;
+	}
+
+	param->addr.str[buf_size] = '\0';
+
+	err = kbase_ipa_model_recalculate(model);
+	if (err < 0) {
+		ret = err;
+		strlcpy(param->addr.str, old_str, param->size);
+	}
+
+end:
+	kfree(old_str);
+	mutex_unlock(&model->kbdev->ipa.lock);
+
+	return ret;
+}
+
+static const struct file_operations fops_string = {
+	.owner = THIS_MODULE,
+	.read = param_string_get,
+	.write = param_string_set,
+	.open = simple_open,
+	.llseek = default_llseek,
+};
+
+int kbase_ipa_model_param_add(struct kbase_ipa_model *model, const char *name,
+			      void *addr, size_t size,
+			      enum kbase_ipa_model_param_type type)
+{
+	struct kbase_ipa_model_param *param;
+
+	param = kzalloc(sizeof(*param), GFP_KERNEL);
+
+	if (!param)
+		return -ENOMEM;
+
+	/* 'name' is stack-allocated for array elements, so copy it into
+	 * heap-allocated storage */
+	param->name = kstrdup(name, GFP_KERNEL);
+
+	if (!param->name) {
+		kfree(param);
+		return -ENOMEM;
+	}
+
+	param->addr.voidp = addr;
+	param->size = size;
+	param->type = type;
+	param->model = model;
+
+	list_add(&param->link, &model->params);
+
+	return 0;
+}
+
+void kbase_ipa_model_param_free_all(struct kbase_ipa_model *model)
+{
+	struct kbase_ipa_model_param *param_p, *param_n;
+
+	list_for_each_entry_safe(param_p, param_n, &model->params, link) {
+		list_del(&param_p->link);
+		kfree(param_p->name);
+		kfree(param_p);
+	}
+}
+
+static int force_fallback_model_get(void *data, u64 *val)
+{
+	struct kbase_device *kbdev = data;
+
+	mutex_lock(&kbdev->ipa.lock);
+	*val = kbdev->ipa.force_fallback_model;
+	mutex_unlock(&kbdev->ipa.lock);
+
+	return 0;
+}
+
+static int force_fallback_model_set(void *data, u64 val)
+{
+	struct kbase_device *kbdev = data;
+
+	mutex_lock(&kbdev->ipa.lock);
+	kbdev->ipa.force_fallback_model = (val ? true : false);
+	mutex_unlock(&kbdev->ipa.lock);
+
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(force_fallback_model,
+		force_fallback_model_get,
+		force_fallback_model_set,
+		"%llu\n");
+
+static int current_power_get(void *data, u64 *val)
+{
+	struct kbase_device *kbdev = data;
+	struct devfreq *df = kbdev->devfreq;
+	u32 power;
+
+	kbase_pm_context_active(kbdev);
+	/* The current model assumes that there's no more than one voltage
+	 * regulator currently available in the system.
+	 */
+	kbase_get_real_power(df, &power,
+		kbdev->current_nominal_freq,
+		(kbdev->current_voltages[0] / 1000));
+	kbase_pm_context_idle(kbdev);
+
+	*val = power;
+
+	return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(current_power, current_power_get, NULL, "%llu\n");
+
+static void kbase_ipa_model_debugfs_init(struct kbase_ipa_model *model)
+{
+	struct list_head *it;
+	struct dentry *dir;
+
+	lockdep_assert_held(&model->kbdev->ipa.lock);
+
+	dir = debugfs_create_dir(model->ops->name,
+				 model->kbdev->mali_debugfs_directory);
+
+	if (!dir) {
+		dev_err(model->kbdev->dev,
+			"Couldn't create mali debugfs %s directory",
+			model->ops->name);
+		return;
+	}
+
+	list_for_each(it, &model->params) {
+		struct kbase_ipa_model_param *param =
+				list_entry(it,
+					   struct kbase_ipa_model_param,
+					   link);
+		const struct file_operations *fops = NULL;
+
+		switch (param->type) {
+		case PARAM_TYPE_S32:
+			fops = &fops_s32;
+			break;
+		case PARAM_TYPE_STRING:
+			fops = &fops_string;
+			break;
+		}
+
+		if (unlikely(!fops)) {
+			dev_err(model->kbdev->dev,
+				"Type not set for %s parameter %s\n",
+				model->ops->name, param->name);
+		} else {
+			debugfs_create_file(param->name, S_IRUGO | S_IWUSR,
+					    dir, param, fops);
+		}
+	}
+}
+
+void kbase_ipa_model_param_set_s32(struct kbase_ipa_model *model,
+	const char *name, s32 val)
+{
+	struct kbase_ipa_model_param *param;
+
+	mutex_lock(&model->kbdev->ipa.lock);
+
+	list_for_each_entry(param, &model->params, link) {
+		if (!strcmp(param->name, name)) {
+			if (param->type == PARAM_TYPE_S32) {
+				*param->addr.s32p = val;
+			} else {
+				dev_err(model->kbdev->dev,
+					"Wrong type for %s parameter %s\n",
+					model->ops->name, param->name);
+			}
+			break;
+		}
+	}
+
+	mutex_unlock(&model->kbdev->ipa.lock);
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_model_param_set_s32);
+
+void kbase_ipa_debugfs_init(struct kbase_device *kbdev)
+{
+	mutex_lock(&kbdev->ipa.lock);
+
+	if (kbdev->ipa.configured_model != kbdev->ipa.fallback_model)
+		kbase_ipa_model_debugfs_init(kbdev->ipa.configured_model);
+	kbase_ipa_model_debugfs_init(kbdev->ipa.fallback_model);
+
+	debugfs_create_file("ipa_current_power", 0444,
+		kbdev->mali_debugfs_directory, kbdev, &current_power);
+	debugfs_create_file("ipa_force_fallback_model", 0644,
+		kbdev->mali_debugfs_directory, kbdev, &force_fallback_model);
+
+	mutex_unlock(&kbdev->ipa.lock);
+}
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.h b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.h
new file mode 100644
index 0000000..a983d9c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.h
@@ -0,0 +1,68 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_IPA_DEBUGFS_H_
+#define _KBASE_IPA_DEBUGFS_H_
+
+enum kbase_ipa_model_param_type {
+	PARAM_TYPE_S32 = 1,
+	PARAM_TYPE_STRING,
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+void kbase_ipa_debugfs_init(struct kbase_device *kbdev);
+int kbase_ipa_model_param_add(struct kbase_ipa_model *model, const char *name,
+			      void *addr, size_t size,
+			      enum kbase_ipa_model_param_type type);
+void kbase_ipa_model_param_free_all(struct kbase_ipa_model *model);
+
+/**
+ * kbase_ipa_model_param_set_s32 - Set an integer model parameter
+ *
+ * @model:	pointer to IPA model
+ * @name:	name of corresponding debugfs entry
+ * @val:	new value of the parameter
+ *
+ * This function is only exposed for use by unit tests running in
+ * kernel space. Normally it is expected that parameter values will
+ * instead be set via debugfs.
+ */
+void kbase_ipa_model_param_set_s32(struct kbase_ipa_model *model,
+	const char *name, s32 val);
+
+#else /* CONFIG_DEBUG_FS */
+
+static inline int kbase_ipa_model_param_add(struct kbase_ipa_model *model,
+					    const char *name, void *addr,
+					    size_t size,
+					    enum kbase_ipa_model_param_type type)
+{
+	return 0;
+}
+
+static inline void kbase_ipa_model_param_free_all(struct kbase_ipa_model *model)
+{ }
+
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* _KBASE_IPA_DEBUGFS_H_ */
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.c
new file mode 100644
index 0000000..852559e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.c
@@ -0,0 +1,351 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <uapi/linux/thermal.h>
+#include <linux/thermal.h>
+#ifdef CONFIG_DEVFREQ_THERMAL
+#include <linux/devfreq_cooling.h>
+#endif
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+
+#include "mali_kbase.h"
+#include "mali_kbase_defs.h"
+#include "mali_kbase_ipa_simple.h"
+#include "mali_kbase_ipa_debugfs.h"
+
+#if MALI_UNIT_TEST
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+static unsigned long dummy_temp;
+
+static int kbase_simple_power_model_get_dummy_temp(
+	struct thermal_zone_device *tz,
+	unsigned long *temp)
+{
+	*temp = READ_ONCE(dummy_temp);
+	return 0;
+}
+
+#else
+static int dummy_temp;
+
+static int kbase_simple_power_model_get_dummy_temp(
+	struct thermal_zone_device *tz,
+	int *temp)
+{
+	*temp = READ_ONCE(dummy_temp);
+	return 0;
+}
+#endif
+
+/* Intercept calls to the kernel function using a macro */
+#ifdef thermal_zone_get_temp
+#undef thermal_zone_get_temp
+#endif
+#define thermal_zone_get_temp(tz, temp) \
+	kbase_simple_power_model_get_dummy_temp(tz, temp)
+
+void kbase_simple_power_model_set_dummy_temp(int temp)
+{
+	WRITE_ONCE(dummy_temp, temp);
+}
+KBASE_EXPORT_TEST_API(kbase_simple_power_model_set_dummy_temp);
+
+#endif /* MALI_UNIT_TEST */
+
+/*
+ * This model is primarily designed for the Juno platform. It may not be
+ * suitable for other platforms. The additional resources in this model
+ * should preferably be minimal, as this model is rarely used when a dynamic
+ * model is available.
+ */
+
+/**
+ * struct kbase_ipa_model_simple_data - IPA context per device
+ * @dynamic_coefficient: dynamic coefficient of the model
+ * @static_coefficient:  static coefficient of the model
+ * @ts:                  Thermal scaling coefficients of the model
+ * @tz_name:             Thermal zone name
+ * @gpu_tz:              thermal zone device
+ * @poll_temperature_thread: Handle for temperature polling thread
+ * @current_temperature: Most recent value of polled temperature
+ * @temperature_poll_interval_ms: How often temperature should be checked, in ms
+ */
+
+struct kbase_ipa_model_simple_data {
+	u32 dynamic_coefficient;
+	u32 static_coefficient;
+	s32 ts[4];
+	char tz_name[THERMAL_NAME_LENGTH];
+	struct thermal_zone_device *gpu_tz;
+	struct task_struct *poll_temperature_thread;
+	int current_temperature;
+	int temperature_poll_interval_ms;
+};
+#define FALLBACK_STATIC_TEMPERATURE 55000
+
+/**
+ * calculate_temp_scaling_factor() - Calculate temperature scaling coefficient
+ * @ts:		Signed coefficients, in order t^0 to t^3, with units Deg^-N
+ * @t:		Temperature, in mDeg C. Range: -2^17 < t < 2^17
+ *
+ * Scale the temperature according to a cubic polynomial whose coefficients are
+ * provided in the device tree. The result is used to scale the static power
+ * coefficient, where 1000000 means no change.
+ *
+ * Return: Temperature scaling factor. Range 0 <= ret <= 10,000,000.
+ */
+static u32 calculate_temp_scaling_factor(s32 ts[4], s64 t)
+{
+	/* Range: -2^24 < t2 < 2^24 m(Deg^2) */
+	const s64 t2 = div_s64((t * t), 1000);
+
+	/* Range: -2^31 < t3 < 2^31 m(Deg^3) */
+	const s64 t3 = div_s64((t * t2), 1000);
+
+	/*
+	 * Sum the parts. t^[1-3] are in m(Deg^N), but the coefficients are in
+	 * Deg^-N, so we need to multiply the last coefficient by 1000.
+	 * Range: -2^63 < res_big < 2^63
+	 */
+	const s64 res_big = ts[3] * t3    /* +/- 2^62 */
+			  + ts[2] * t2    /* +/- 2^55 */
+			  + ts[1] * t     /* +/- 2^48 */
+			  + ts[0] * (s64)1000; /* +/- 2^41 */
+
+	/* Range: -2^60 < res_unclamped < 2^60 */
+	s64 res_unclamped = div_s64(res_big, 1000);
+
+	/* Clamp to range of 0x to 10x the static power */
+	return clamp(res_unclamped, (s64) 0, (s64) 10000000);
+}
+
+/* We can't call thermal_zone_get_temp() directly in model_static_coeff(),
+ * because we don't know if tz->lock is held in the same thread. So poll it in
+ * a separate thread to get around this. */
+static int poll_temperature(void *data)
+{
+	struct kbase_ipa_model_simple_data *model_data =
+			(struct kbase_ipa_model_simple_data *) data;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+	unsigned long temp;
+#else
+	int temp;
+#endif
+
+	while (!kthread_should_stop()) {
+		struct thermal_zone_device *tz = READ_ONCE(model_data->gpu_tz);
+
+		if (tz) {
+			int ret;
+
+			ret = thermal_zone_get_temp(tz, &temp);
+			if (ret) {
+				pr_warn_ratelimited("Error reading temperature for gpu thermal zone: %d\n",
+						    ret);
+				temp = FALLBACK_STATIC_TEMPERATURE;
+			}
+		} else {
+			temp = FALLBACK_STATIC_TEMPERATURE;
+		}
+
+		WRITE_ONCE(model_data->current_temperature, temp);
+
+		msleep_interruptible(READ_ONCE(model_data->temperature_poll_interval_ms));
+	}
+
+	return 0;
+}
+
+static int model_static_coeff(struct kbase_ipa_model *model, u32 *coeffp)
+{
+	u32 temp_scaling_factor;
+	struct kbase_ipa_model_simple_data *model_data =
+		(struct kbase_ipa_model_simple_data *) model->model_data;
+	u64 coeff_big;
+	int temp;
+
+	temp = READ_ONCE(model_data->current_temperature);
+
+	/* Range: 0 <= temp_scaling_factor < 2^24 */
+	temp_scaling_factor = calculate_temp_scaling_factor(model_data->ts,
+							    temp);
+
+	/*
+	 * Range: 0 <= coeff_big < 2^52 to avoid overflowing *coeffp. This
+	 * means static_coefficient must be in range
+	 * 0 <= static_coefficient < 2^28.
+	 */
+	coeff_big = (u64) model_data->static_coefficient * (u64) temp_scaling_factor;
+	*coeffp = div_u64(coeff_big, 1000000);
+
+	return 0;
+}
+
+static int model_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp)
+{
+	struct kbase_ipa_model_simple_data *model_data =
+		(struct kbase_ipa_model_simple_data *) model->model_data;
+
+	*coeffp = model_data->dynamic_coefficient;
+
+	return 0;
+}
+
+static int add_params(struct kbase_ipa_model *model)
+{
+	int err = 0;
+	struct kbase_ipa_model_simple_data *model_data =
+			(struct kbase_ipa_model_simple_data *)model->model_data;
+
+	err = kbase_ipa_model_add_param_s32(model, "static-coefficient",
+					    &model_data->static_coefficient,
+					    1, true);
+	if (err)
+		goto end;
+
+	err = kbase_ipa_model_add_param_s32(model, "dynamic-coefficient",
+					    &model_data->dynamic_coefficient,
+					    1, true);
+	if (err)
+		goto end;
+
+	err = kbase_ipa_model_add_param_s32(model, "ts",
+					    model_data->ts, 4, true);
+	if (err)
+		goto end;
+
+	err = kbase_ipa_model_add_param_string(model, "thermal-zone",
+					       model_data->tz_name,
+					       sizeof(model_data->tz_name), true);
+	if (err)
+		goto end;
+
+	model_data->temperature_poll_interval_ms = 200;
+	err = kbase_ipa_model_add_param_s32(model, "temp-poll-interval-ms",
+					    &model_data->temperature_poll_interval_ms,
+					    1, false);
+
+end:
+	return err;
+}
+
+static int kbase_simple_power_model_init(struct kbase_ipa_model *model)
+{
+	int err;
+	struct kbase_ipa_model_simple_data *model_data;
+
+	model_data = kzalloc(sizeof(struct kbase_ipa_model_simple_data),
+			     GFP_KERNEL);
+	if (!model_data)
+		return -ENOMEM;
+
+	model->model_data = (void *) model_data;
+
+	model_data->current_temperature = FALLBACK_STATIC_TEMPERATURE;
+	model_data->poll_temperature_thread = kthread_run(poll_temperature,
+							  (void *) model_data,
+							  "mali-simple-power-model-temp-poll");
+	if (IS_ERR(model_data->poll_temperature_thread)) {
+		err = PTR_ERR(model_data->poll_temperature_thread);
+		kfree(model_data);
+		return err;
+	}
+
+	err = add_params(model);
+	if (err) {
+		kbase_ipa_model_param_free_all(model);
+		kthread_stop(model_data->poll_temperature_thread);
+		kfree(model_data);
+	}
+
+	return err;
+}
+
+static int kbase_simple_power_model_recalculate(struct kbase_ipa_model *model)
+{
+	struct kbase_ipa_model_simple_data *model_data =
+			(struct kbase_ipa_model_simple_data *)model->model_data;
+	struct thermal_zone_device *tz;
+
+	lockdep_assert_held(&model->kbdev->ipa.lock);
+
+	if (!strnlen(model_data->tz_name, sizeof(model_data->tz_name))) {
+		model_data->gpu_tz = NULL;
+	} else {
+		char tz_name[THERMAL_NAME_LENGTH];
+
+		strlcpy(tz_name, model_data->tz_name, sizeof(tz_name));
+
+		/* Release ipa.lock so that thermal_list_lock is not acquired
+		 * with ipa.lock held, thereby avoid lock ordering violation
+		 * lockdep warning. The warning comes as a chain of locks
+		 * ipa.lock --> thermal_list_lock --> tz->lock gets formed
+		 * on registering devfreq cooling device when probe method
+		 * of mali platform driver is invoked.
+		 */
+		mutex_unlock(&model->kbdev->ipa.lock);
+		tz = thermal_zone_get_zone_by_name(tz_name);
+		mutex_lock(&model->kbdev->ipa.lock);
+
+		if (IS_ERR_OR_NULL(tz)) {
+			pr_warn_ratelimited("Error %ld getting thermal zone \'%s\', not yet ready?\n",
+					    PTR_ERR(tz), tz_name);
+			return -EPROBE_DEFER;
+		}
+
+		/* Check if another thread raced against us & updated the
+		 * thermal zone name string. Update the gpu_tz pointer only if
+		 * the name string did not change whilst we retrieved the new
+		 * thermal_zone_device pointer, otherwise model_data->tz_name &
+		 * model_data->gpu_tz would become inconsistent with each other.
+		 * The below check will succeed only for the thread which last
+		 * updated the name string.
+		 */
+		if (strncmp(tz_name, model_data->tz_name, sizeof(tz_name)) == 0)
+			model_data->gpu_tz = tz;
+	}
+
+	return 0;
+}
+
+static void kbase_simple_power_model_term(struct kbase_ipa_model *model)
+{
+	struct kbase_ipa_model_simple_data *model_data =
+			(struct kbase_ipa_model_simple_data *)model->model_data;
+
+	kthread_stop(model_data->poll_temperature_thread);
+
+	kfree(model_data);
+}
+
+struct kbase_ipa_model_ops kbase_simple_ipa_model_ops = {
+		.name = "mali-simple-power-model",
+		.init = &kbase_simple_power_model_init,
+		.recalculate = &kbase_simple_power_model_recalculate,
+		.term = &kbase_simple_power_model_term,
+		.get_dynamic_coeff = &model_dynamic_coeff,
+		.get_static_coeff = &model_static_coeff,
+};
+KBASE_EXPORT_TEST_API(kbase_simple_ipa_model_ops);
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.h b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.h
new file mode 100644
index 0000000..fed67d5
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.h
@@ -0,0 +1,45 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_IPA_SIMPLE_H_
+#define _KBASE_IPA_SIMPLE_H_
+
+#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)
+
+extern struct kbase_ipa_model_ops kbase_simple_ipa_model_ops;
+
+#if MALI_UNIT_TEST
+/**
+ * kbase_simple_power_model_set_dummy_temp() - set a dummy temperature value
+ * @temp: Temperature of the thermal zone, in millidegrees celsius.
+ *
+ * This is only intended for use in unit tests, to ensure that the temperature
+ * values used by the simple power model are predictable. Deterministic
+ * behavior is necessary to allow validation of the static power values
+ * computed by this model.
+ */
+void kbase_simple_power_model_set_dummy_temp(int temp);
+#endif /* MALI_UNIT_TEST */
+
+#endif /* (defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)) */
+
+#endif /* _KBASE_IPA_SIMPLE_H_ */
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.c
new file mode 100644
index 0000000..9fae8f1
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.c
@@ -0,0 +1,346 @@
+/*
+ *
+ * (C) COPYRIGHT 2017-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_ipa_vinstr_common.h"
+#include "mali_kbase_ipa_debugfs.h"
+
+#define DEFAULT_SCALING_FACTOR 5
+
+/* If the value of GPU_ACTIVE is below this, use the simple model
+ * instead, to avoid extrapolating small amounts of counter data across
+ * large sample periods.
+ */
+#define DEFAULT_MIN_SAMPLE_CYCLES 10000
+
+/**
+ * read_hwcnt() - read a counter value
+ * @model_data:		pointer to model data
+ * @offset:		offset, in bytes, into vinstr buffer
+ *
+ * Return: A 32-bit counter value. Range: 0 < value < 2^27 (worst case would be
+ * incrementing every cycle over a ~100ms sample period at a high frequency,
+ * e.g. 1 GHz: 2^30 * 0.1seconds ~= 2^27.
+ */
+static inline u32 kbase_ipa_read_hwcnt(
+	struct kbase_ipa_model_vinstr_data *model_data,
+	u32 offset)
+{
+	u8 *p = (u8 *)model_data->dump_buf.dump_buf;
+
+	return *(u32 *)&p[offset];
+}
+
+static inline s64 kbase_ipa_add_saturate(s64 a, s64 b)
+{
+	s64 rtn;
+
+	if (a > 0 && (S64_MAX - a) < b)
+		rtn = S64_MAX;
+	else if (a < 0 && (S64_MIN - a) > b)
+		rtn = S64_MIN;
+	else
+		rtn = a + b;
+
+	return rtn;
+}
+
+s64 kbase_ipa_sum_all_shader_cores(
+	struct kbase_ipa_model_vinstr_data *model_data,
+	s32 coeff, u32 counter)
+{
+	struct kbase_device *kbdev = model_data->kbdev;
+	u64 core_mask;
+	u32 base = 0;
+	s64 ret = 0;
+
+	core_mask = kbdev->gpu_props.props.coherency_info.group[0].core_mask;
+	while (core_mask != 0ull) {
+		if ((core_mask & 1ull) != 0ull) {
+			/* 0 < counter_value < 2^27 */
+			u32 counter_value = kbase_ipa_read_hwcnt(model_data,
+						       base + counter);
+
+			/* 0 < ret < 2^27 * max_num_cores = 2^32 */
+			ret = kbase_ipa_add_saturate(ret, counter_value);
+		}
+		base += KBASE_IPA_NR_BYTES_PER_BLOCK;
+		core_mask >>= 1;
+	}
+
+	/* Range: -2^54 < ret * coeff < 2^54 */
+	return ret * coeff;
+}
+
+s64 kbase_ipa_sum_all_memsys_blocks(
+	struct kbase_ipa_model_vinstr_data *model_data,
+	s32 coeff, u32 counter)
+{
+	struct kbase_device *kbdev = model_data->kbdev;
+	const u32 num_blocks = kbdev->gpu_props.props.l2_props.num_l2_slices;
+	u32 base = 0;
+	s64 ret = 0;
+	u32 i;
+
+	for (i = 0; i < num_blocks; i++) {
+		/* 0 < counter_value < 2^27 */
+		u32 counter_value = kbase_ipa_read_hwcnt(model_data,
+					       base + counter);
+
+		/* 0 < ret < 2^27 * max_num_memsys_blocks = 2^29 */
+		ret = kbase_ipa_add_saturate(ret, counter_value);
+		base += KBASE_IPA_NR_BYTES_PER_BLOCK;
+	}
+
+	/* Range: -2^51 < ret * coeff < 2^51 */
+	return ret * coeff;
+}
+
+s64 kbase_ipa_single_counter(
+	struct kbase_ipa_model_vinstr_data *model_data,
+	s32 coeff, u32 counter)
+{
+	/* Range: 0 < counter_value < 2^27 */
+	const u32 counter_value = kbase_ipa_read_hwcnt(model_data, counter);
+
+	/* Range: -2^49 < ret < 2^49 */
+	return counter_value * (s64) coeff;
+}
+
+int kbase_ipa_attach_vinstr(struct kbase_ipa_model_vinstr_data *model_data)
+{
+	int errcode;
+	struct kbase_device *kbdev = model_data->kbdev;
+	struct kbase_hwcnt_virtualizer *hvirt = kbdev->hwcnt_gpu_virt;
+	struct kbase_hwcnt_enable_map enable_map;
+	const struct kbase_hwcnt_metadata *metadata =
+		kbase_hwcnt_virtualizer_metadata(hvirt);
+
+	if (!metadata)
+		return -1;
+
+	errcode = kbase_hwcnt_enable_map_alloc(metadata, &enable_map);
+	if (errcode) {
+		dev_err(kbdev->dev, "Failed to allocate IPA enable map");
+		return errcode;
+	}
+
+	kbase_hwcnt_enable_map_enable_all(&enable_map);
+
+	errcode = kbase_hwcnt_virtualizer_client_create(
+		hvirt, &enable_map, &model_data->hvirt_cli);
+	kbase_hwcnt_enable_map_free(&enable_map);
+	if (errcode) {
+		dev_err(kbdev->dev, "Failed to register IPA with virtualizer");
+		model_data->hvirt_cli = NULL;
+		return errcode;
+	}
+
+	errcode = kbase_hwcnt_dump_buffer_alloc(
+		metadata, &model_data->dump_buf);
+	if (errcode) {
+		dev_err(kbdev->dev, "Failed to allocate IPA dump buffer");
+		kbase_hwcnt_virtualizer_client_destroy(model_data->hvirt_cli);
+		model_data->hvirt_cli = NULL;
+		return errcode;
+	}
+
+	return 0;
+}
+
+void kbase_ipa_detach_vinstr(struct kbase_ipa_model_vinstr_data *model_data)
+{
+	if (model_data->hvirt_cli) {
+		kbase_hwcnt_virtualizer_client_destroy(model_data->hvirt_cli);
+		kbase_hwcnt_dump_buffer_free(&model_data->dump_buf);
+		model_data->hvirt_cli = NULL;
+	}
+}
+
+int kbase_ipa_vinstr_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp)
+{
+	struct kbase_ipa_model_vinstr_data *model_data =
+			(struct kbase_ipa_model_vinstr_data *)model->model_data;
+	s64 energy = 0;
+	size_t i;
+	u64 coeff = 0, coeff_mul = 0;
+	u64 start_ts_ns, end_ts_ns;
+	u32 active_cycles;
+	int err = 0;
+
+	err = kbase_hwcnt_virtualizer_client_dump(model_data->hvirt_cli,
+		&start_ts_ns, &end_ts_ns, &model_data->dump_buf);
+	if (err)
+		goto err0;
+
+	/* Range: 0 (GPU not used at all), to the max sampling interval, say
+	 * 1s, * max GPU frequency (GPU 100% utilized).
+	 * 0 <= active_cycles <= 1 * ~2GHz
+	 * 0 <= active_cycles < 2^31
+	 */
+	active_cycles = model_data->get_active_cycles(model_data);
+
+	if (active_cycles < (u32) max(model_data->min_sample_cycles, 0)) {
+		err = -ENODATA;
+		goto err0;
+	}
+
+	/* Range: 1 <= active_cycles < 2^31 */
+	active_cycles = max(1u, active_cycles);
+
+	/* Range of 'energy' is +/- 2^54 * number of IPA groups (~8), so around
+	 * -2^57 < energy < 2^57
+	 */
+	for (i = 0; i < model_data->groups_def_num; i++) {
+		const struct kbase_ipa_group *group = &model_data->groups_def[i];
+		s32 coeff = model_data->group_values[i];
+		s64 group_energy = group->op(model_data, coeff,
+					     group->counter_block_offset);
+
+		energy = kbase_ipa_add_saturate(energy, group_energy);
+	}
+
+	/* Range: 0 <= coeff < 2^57 */
+	if (energy > 0)
+		coeff = energy;
+
+	/* Range: 0 <= coeff < 2^57 (because active_cycles >= 1). However, this
+	 * can be constrained further: Counter values can only be increased by
+	 * a theoretical maximum of about 64k per clock cycle. Beyond this,
+	 * we'd have to sample every 1ms to avoid them overflowing at the
+	 * lowest clock frequency (say 100MHz). Therefore, we can write the
+	 * range of 'coeff' in terms of active_cycles:
+	 *
+	 * coeff = SUM(coeffN * counterN * num_cores_for_counterN)
+	 * coeff <= SUM(coeffN * counterN) * max_num_cores
+	 * coeff <= num_IPA_groups * max_coeff * max_counter * max_num_cores
+	 *       (substitute max_counter = 2^16 * active_cycles)
+	 * coeff <= num_IPA_groups * max_coeff * 2^16 * active_cycles * max_num_cores
+	 * coeff <=    2^3         *    2^22   * 2^16 * active_cycles * 2^5
+	 * coeff <= 2^46 * active_cycles
+	 *
+	 * So after the division: 0 <= coeff <= 2^46
+	 */
+	coeff = div_u64(coeff, active_cycles);
+
+	/* Not all models were derived at the same reference voltage. Voltage
+	 * scaling is done by multiplying by V^2, so we need to *divide* by
+	 * Vref^2 here.
+	 * Range: 0 <= coeff <= 2^49
+	 */
+	coeff = div_u64(coeff * 1000, max(model_data->reference_voltage, 1));
+	/* Range: 0 <= coeff <= 2^52 */
+	coeff = div_u64(coeff * 1000, max(model_data->reference_voltage, 1));
+
+	/* Scale by user-specified integer factor.
+	 * Range: 0 <= coeff_mul < 2^57
+	 */
+	coeff_mul = coeff * model_data->scaling_factor;
+
+	/* The power models have results with units
+	 * mW/(MHz V^2), i.e. nW/(Hz V^2). With precision of 1/1000000, this
+	 * becomes fW/(Hz V^2), which are the units of coeff_mul. However,
+	 * kbase_scale_dynamic_power() expects units of pW/(Hz V^2), so divide
+	 * by 1000.
+	 * Range: 0 <= coeff_mul < 2^47
+	 */
+	coeff_mul = div_u64(coeff_mul, 1000u);
+
+err0:
+	/* Clamp to a sensible range - 2^16 gives about 14W at 400MHz/750mV */
+	*coeffp = clamp(coeff_mul, (u64) 0, (u64) 1 << 16);
+	return err;
+}
+
+int kbase_ipa_vinstr_common_model_init(struct kbase_ipa_model *model,
+				       const struct kbase_ipa_group *ipa_groups_def,
+				       size_t ipa_group_size,
+				       kbase_ipa_get_active_cycles_callback get_active_cycles,
+				       s32 reference_voltage)
+{
+	int err = 0;
+	size_t i;
+	struct kbase_ipa_model_vinstr_data *model_data;
+
+	if (!model || !ipa_groups_def || !ipa_group_size || !get_active_cycles)
+		return -EINVAL;
+
+	model_data = kzalloc(sizeof(*model_data), GFP_KERNEL);
+	if (!model_data)
+		return -ENOMEM;
+
+	model_data->kbdev = model->kbdev;
+	model_data->groups_def = ipa_groups_def;
+	model_data->groups_def_num = ipa_group_size;
+	model_data->get_active_cycles = get_active_cycles;
+
+	model->model_data = (void *) model_data;
+
+	for (i = 0; i < model_data->groups_def_num; ++i) {
+		const struct kbase_ipa_group *group = &model_data->groups_def[i];
+
+		model_data->group_values[i] = group->default_value;
+		err = kbase_ipa_model_add_param_s32(model, group->name,
+					&model_data->group_values[i],
+					1, false);
+		if (err)
+			goto exit;
+	}
+
+	model_data->scaling_factor = DEFAULT_SCALING_FACTOR;
+	err = kbase_ipa_model_add_param_s32(model, "scale",
+					    &model_data->scaling_factor,
+					    1, false);
+	if (err)
+		goto exit;
+
+	model_data->min_sample_cycles = DEFAULT_MIN_SAMPLE_CYCLES;
+	err = kbase_ipa_model_add_param_s32(model, "min_sample_cycles",
+					    &model_data->min_sample_cycles,
+					    1, false);
+	if (err)
+		goto exit;
+
+	model_data->reference_voltage = reference_voltage;
+	err = kbase_ipa_model_add_param_s32(model, "reference_voltage",
+					    &model_data->reference_voltage,
+					    1, false);
+	if (err)
+		goto exit;
+
+	err = kbase_ipa_attach_vinstr(model_data);
+
+exit:
+	if (err) {
+		kbase_ipa_model_param_free_all(model);
+		kfree(model_data);
+	}
+	return err;
+}
+
+void kbase_ipa_vinstr_common_model_term(struct kbase_ipa_model *model)
+{
+	struct kbase_ipa_model_vinstr_data *model_data =
+			(struct kbase_ipa_model_vinstr_data *)model->model_data;
+
+	kbase_ipa_detach_vinstr(model_data);
+	kfree(model_data);
+}
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.h b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.h
new file mode 100644
index 0000000..46e3cd4
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.h
@@ -0,0 +1,217 @@
+/*
+ *
+ * (C) COPYRIGHT 2017-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_IPA_VINSTR_COMMON_H_
+#define _KBASE_IPA_VINSTR_COMMON_H_
+
+#include "mali_kbase.h"
+#include "mali_kbase_hwcnt_virtualizer.h"
+#include "mali_kbase_hwcnt_types.h"
+
+/* Maximum number of IPA groups for an IPA model. */
+#define KBASE_IPA_MAX_GROUP_DEF_NUM  16
+
+/* Number of bytes per hardware counter in a vinstr_buffer. */
+#define KBASE_IPA_NR_BYTES_PER_CNT    4
+
+/* Number of hardware counters per block in a vinstr_buffer. */
+#define KBASE_IPA_NR_CNT_PER_BLOCK   64
+
+/* Number of bytes per block in a vinstr_buffer. */
+#define KBASE_IPA_NR_BYTES_PER_BLOCK \
+	(KBASE_IPA_NR_CNT_PER_BLOCK * KBASE_IPA_NR_BYTES_PER_CNT)
+
+struct kbase_ipa_model_vinstr_data;
+
+typedef u32 (*kbase_ipa_get_active_cycles_callback)(struct kbase_ipa_model_vinstr_data *);
+
+/**
+ * struct kbase_ipa_model_vinstr_data - IPA context per device
+ * @kbdev:               pointer to kbase device
+ * @groups_def:          Array of IPA groups.
+ * @groups_def_num:      Number of elements in the array of IPA groups.
+ * @get_active_cycles:   Callback to return number of active cycles during
+ *                       counter sample period
+ * @hvirt_cli:           hardware counter virtualizer client handle
+ * @dump_buf:            buffer to dump hardware counters onto
+ * @reference_voltage:   voltage, in mV, of the operating point used when
+ *                       deriving the power model coefficients. Range approx
+ *                       0.1V - 5V (~= 8V): 2^7 <= reference_voltage <= 2^13
+ * @scaling_factor:      User-specified power scaling factor. This is an
+ *                       integer, which is multiplied by the power coefficient
+ *                       just before OPP scaling.
+ *                       Range approx 0-32: 0 < scaling_factor < 2^5
+ * @min_sample_cycles:   If the value of the GPU_ACTIVE counter (the number of
+ *                       cycles the GPU was working) is less than
+ *                       min_sample_cycles, the counter model will return an
+ *                       error, causing the IPA framework to approximate using
+ *                       the cached simple model results instead. This may be
+ *                       more accurate than extrapolating  using a very small
+ *                       counter dump.
+ */
+struct kbase_ipa_model_vinstr_data {
+	struct kbase_device *kbdev;
+	s32 group_values[KBASE_IPA_MAX_GROUP_DEF_NUM];
+	const struct kbase_ipa_group *groups_def;
+	size_t groups_def_num;
+	kbase_ipa_get_active_cycles_callback get_active_cycles;
+	struct kbase_hwcnt_virtualizer_client *hvirt_cli;
+	struct kbase_hwcnt_dump_buffer dump_buf;
+	s32 reference_voltage;
+	s32 scaling_factor;
+	s32 min_sample_cycles;
+};
+
+/**
+ * struct ipa_group - represents a single IPA group
+ * @name:               name of the IPA group
+ * @default_value:      default value of coefficient for IPA group.
+ *                      Coefficients are interpreted as fractions where the
+ *                      denominator is 1000000.
+ * @op:                 which operation to be performed on the counter values
+ * @counter_block_offset:  block offset in bytes of the counter used to calculate energy for IPA group
+ */
+struct kbase_ipa_group {
+	const char *name;
+	s32 default_value;
+	s64 (*op)(struct kbase_ipa_model_vinstr_data *, s32, u32);
+	u32 counter_block_offset;
+};
+
+/**
+ * kbase_ipa_sum_all_shader_cores() - sum a counter over all cores
+ * @model_data:		pointer to model data
+ * @coeff:		model coefficient. Unity is ~2^20, so range approx
+ *			+/- 4.0: -2^22 < coeff < 2^22
+ * @counter		offset in bytes of the counter used to calculate energy
+ *			for IPA group
+ *
+ * Calculate energy estimation based on hardware counter `counter'
+ * across all shader cores.
+ *
+ * Return: Sum of counter values. Range: -2^54 < ret < 2^54
+ */
+s64 kbase_ipa_sum_all_shader_cores(
+	struct kbase_ipa_model_vinstr_data *model_data,
+	s32 coeff, u32 counter);
+
+/**
+ * kbase_ipa_sum_all_memsys_blocks() - sum a counter over all mem system blocks
+ * @model_data:		pointer to model data
+ * @coeff:		model coefficient. Unity is ~2^20, so range approx
+ *			+/- 4.0: -2^22 < coeff < 2^22
+ * @counter:		offset in bytes of the counter used to calculate energy
+ *			for IPA group
+ *
+ * Calculate energy estimation based on hardware counter `counter' across all
+ * memory system blocks.
+ *
+ * Return: Sum of counter values. Range: -2^51 < ret < 2^51
+ */
+s64 kbase_ipa_sum_all_memsys_blocks(
+	struct kbase_ipa_model_vinstr_data *model_data,
+	s32 coeff, u32 counter);
+
+/**
+ * kbase_ipa_single_counter() - sum a single counter
+ * @model_data:		pointer to model data
+ * @coeff:		model coefficient. Unity is ~2^20, so range approx
+ *			+/- 4.0: -2^22 < coeff < 2^22
+ * @counter:		offset in bytes of the counter used to calculate energy
+ *			for IPA group
+ *
+ * Calculate energy estimation based on hardware counter `counter'.
+ *
+ * Return: Counter value. Range: -2^49 < ret < 2^49
+ */
+s64 kbase_ipa_single_counter(
+	struct kbase_ipa_model_vinstr_data *model_data,
+	s32 coeff, u32 counter);
+
+/**
+ * attach_vinstr() - attach a vinstr_buffer to an IPA model.
+ * @model_data		pointer to model data
+ *
+ * Attach a vinstr_buffer to an IPA model. The vinstr_buffer
+ * allows access to the hardware counters used to calculate
+ * energy consumption.
+ *
+ * Return: 0 on success, or an error code.
+ */
+int kbase_ipa_attach_vinstr(struct kbase_ipa_model_vinstr_data *model_data);
+
+/**
+ * detach_vinstr() - detach a vinstr_buffer from an IPA model.
+ * @model_data		pointer to model data
+ *
+ * Detach a vinstr_buffer from an IPA model.
+ */
+void kbase_ipa_detach_vinstr(struct kbase_ipa_model_vinstr_data *model_data);
+
+/**
+ * kbase_ipa_vinstr_dynamic_coeff() - calculate dynamic power based on HW counters
+ * @model:		pointer to instantiated model
+ * @coeffp:		pointer to location where calculated power, in
+ *			pW/(Hz V^2), is stored.
+ *
+ * This is a GPU-agnostic implementation of the get_dynamic_coeff()
+ * function of an IPA model. It relies on the model being populated
+ * with GPU-specific attributes at initialization time.
+ *
+ * Return: 0 on success, or an error code.
+ */
+int kbase_ipa_vinstr_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp);
+
+/**
+ * kbase_ipa_vinstr_common_model_init() - initialize ipa power model
+ * @model:		ipa power model to initialize
+ * @ipa_groups_def:	array of ipa groups which sets coefficients for
+ *			the corresponding counters used in the ipa model
+ * @ipa_group_size:     number of elements in the array @ipa_groups_def
+ * @get_active_cycles:  callback to return the number of cycles the GPU was
+ *			active during the counter sample period.
+ * @reference_voltage:  voltage, in mV, of the operating point used when
+ *                      deriving the power model coefficients.
+ *
+ * This initialization function performs initialization steps common
+ * for ipa models based on counter values. In each call, the model
+ * passes its specific coefficient values per ipa counter group via
+ * @ipa_groups_def array.
+ *
+ * Return: 0 on success, error code otherwise
+ */
+int kbase_ipa_vinstr_common_model_init(struct kbase_ipa_model *model,
+				       const struct kbase_ipa_group *ipa_groups_def,
+				       size_t ipa_group_size,
+				       kbase_ipa_get_active_cycles_callback get_active_cycles,
+				       s32 reference_voltage);
+
+/**
+ * kbase_ipa_vinstr_common_model_term() - terminate ipa power model
+ * @model: ipa power model to terminate
+ *
+ * This function performs all necessary steps to terminate ipa power model
+ * including clean up of resources allocated to hold model data.
+ */
+void kbase_ipa_vinstr_common_model_term(struct kbase_ipa_model *model);
+
+#endif /* _KBASE_IPA_VINSTR_COMMON_H_ */
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_g7x.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_g7x.c
new file mode 100644
index 0000000..270b75e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_g7x.c
@@ -0,0 +1,456 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+#include <linux/thermal.h>
+
+#include "mali_kbase_ipa_vinstr_common.h"
+#include "mali_kbase.h"
+
+
+/* Performance counter blocks base offsets */
+#define JM_BASE             (0 * KBASE_IPA_NR_BYTES_PER_BLOCK)
+#define TILER_BASE          (1 * KBASE_IPA_NR_BYTES_PER_BLOCK)
+#define MEMSYS_BASE         (2 * KBASE_IPA_NR_BYTES_PER_BLOCK)
+
+/* JM counter block offsets */
+#define JM_GPU_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT *  6)
+
+/* Tiler counter block offsets */
+#define TILER_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT * 45)
+
+/* MEMSYS counter block offsets */
+#define MEMSYS_L2_ANY_LOOKUP (KBASE_IPA_NR_BYTES_PER_CNT * 25)
+
+/* SC counter block offsets */
+#define SC_FRAG_ACTIVE             (KBASE_IPA_NR_BYTES_PER_CNT *  4)
+#define SC_EXEC_CORE_ACTIVE        (KBASE_IPA_NR_BYTES_PER_CNT * 26)
+#define SC_EXEC_INSTR_FMA          (KBASE_IPA_NR_BYTES_PER_CNT * 27)
+#define SC_EXEC_INSTR_COUNT        (KBASE_IPA_NR_BYTES_PER_CNT * 28)
+#define SC_EXEC_INSTR_MSG          (KBASE_IPA_NR_BYTES_PER_CNT * 30)
+#define SC_TEX_FILT_NUM_OPERATIONS (KBASE_IPA_NR_BYTES_PER_CNT * 39)
+#define SC_TEX_COORD_ISSUE         (KBASE_IPA_NR_BYTES_PER_CNT * 40)
+#define SC_TEX_TFCH_NUM_OPERATIONS (KBASE_IPA_NR_BYTES_PER_CNT * 42)
+#define SC_VARY_INSTR              (KBASE_IPA_NR_BYTES_PER_CNT * 49)
+#define SC_VARY_SLOT_32            (KBASE_IPA_NR_BYTES_PER_CNT * 50)
+#define SC_VARY_SLOT_16            (KBASE_IPA_NR_BYTES_PER_CNT * 51)
+#define SC_BEATS_RD_LSC            (KBASE_IPA_NR_BYTES_PER_CNT * 56)
+#define SC_BEATS_WR_LSC            (KBASE_IPA_NR_BYTES_PER_CNT * 61)
+#define SC_BEATS_WR_TIB            (KBASE_IPA_NR_BYTES_PER_CNT * 62)
+
+/**
+ * get_jm_counter() - get performance counter offset inside the Job Manager block
+ * @model_data:            pointer to GPU model data.
+ * @counter_block_offset:  offset in bytes of the performance counter inside the Job Manager block.
+ *
+ * Return: Block offset in bytes of the required performance counter.
+ */
+static u32 kbase_g7x_power_model_get_jm_counter(struct kbase_ipa_model_vinstr_data *model_data,
+						u32 counter_block_offset)
+{
+	return JM_BASE + counter_block_offset;
+}
+
+/**
+ * get_memsys_counter() - get performance counter offset inside the Memory System block
+ * @model_data:            pointer to GPU model data.
+ * @counter_block_offset:  offset in bytes of the performance counter inside the (first) Memory System block.
+ *
+ * Return: Block offset in bytes of the required performance counter.
+ */
+static u32 kbase_g7x_power_model_get_memsys_counter(struct kbase_ipa_model_vinstr_data *model_data,
+						    u32 counter_block_offset)
+{
+	/* The base address of Memory System performance counters is always the same, although their number
+	 * may vary based on the number of cores. For the moment it's ok to return a constant.
+	 */
+	return MEMSYS_BASE + counter_block_offset;
+}
+
+/**
+ * get_sc_counter() - get performance counter offset inside the Shader Cores block
+ * @model_data:            pointer to GPU model data.
+ * @counter_block_offset:  offset in bytes of the performance counter inside the (first) Shader Cores block.
+ *
+ * Return: Block offset in bytes of the required performance counter.
+ */
+static u32 kbase_g7x_power_model_get_sc_counter(struct kbase_ipa_model_vinstr_data *model_data,
+						u32 counter_block_offset)
+{
+	const u32 sc_base = MEMSYS_BASE +
+		(model_data->kbdev->gpu_props.props.l2_props.num_l2_slices *
+		 KBASE_IPA_NR_BYTES_PER_BLOCK);
+
+	return sc_base + counter_block_offset;
+}
+
+/**
+ * memsys_single_counter() - calculate energy for a single Memory System performance counter.
+ * @model_data:   pointer to GPU model data.
+ * @coeff:        default value of coefficient for IPA group.
+ * @offset:       offset in bytes of the counter inside the block it belongs to.
+ *
+ * Return: Energy estimation for a single Memory System performance counter.
+ */
+static s64 kbase_g7x_sum_all_memsys_blocks(
+		struct kbase_ipa_model_vinstr_data *model_data,
+		s32 coeff,
+		u32 offset)
+{
+	u32 counter;
+
+	counter = kbase_g7x_power_model_get_memsys_counter(model_data, offset);
+	return kbase_ipa_sum_all_memsys_blocks(model_data, coeff, counter);
+}
+
+/**
+ * sum_all_shader_cores() - calculate energy for a Shader Cores performance counter for all cores.
+ * @model_data:            pointer to GPU model data.
+ * @coeff:                 default value of coefficient for IPA group.
+ * @counter_block_offset:  offset in bytes of the counter inside the block it belongs to.
+ *
+ * Return: Energy estimation for a Shader Cores performance counter for all cores.
+ */
+static s64 kbase_g7x_sum_all_shader_cores(
+	struct kbase_ipa_model_vinstr_data *model_data,
+	s32 coeff,
+	u32 counter_block_offset)
+{
+	u32 counter;
+
+	counter = kbase_g7x_power_model_get_sc_counter(model_data,
+						       counter_block_offset);
+	return kbase_ipa_sum_all_shader_cores(model_data, coeff, counter);
+}
+
+/**
+ * jm_single_counter() - calculate energy for a single Job Manager performance counter.
+ * @model_data:            pointer to GPU model data.
+ * @coeff:                 default value of coefficient for IPA group.
+ * @counter_block_offset:  offset in bytes of the counter inside the block it belongs to.
+ *
+ * Return: Energy estimation for a single Job Manager performance counter.
+ */
+static s64 kbase_g7x_jm_single_counter(
+	struct kbase_ipa_model_vinstr_data *model_data,
+	s32 coeff,
+	u32 counter_block_offset)
+{
+	u32 counter;
+
+	counter = kbase_g7x_power_model_get_jm_counter(model_data,
+						     counter_block_offset);
+	return kbase_ipa_single_counter(model_data, coeff, counter);
+}
+
+/**
+ * get_active_cycles() - return the GPU_ACTIVE counter
+ * @model_data:            pointer to GPU model data.
+ *
+ * Return: the number of cycles the GPU was active during the counter sampling
+ * period.
+ */
+static u32 kbase_g7x_get_active_cycles(
+	struct kbase_ipa_model_vinstr_data *model_data)
+{
+	u32 counter = kbase_g7x_power_model_get_jm_counter(model_data, JM_GPU_ACTIVE);
+
+	/* Counters are only 32-bit, so we can safely multiply by 1 then cast
+	 * the 64-bit result back to a u32.
+	 */
+	return kbase_ipa_single_counter(model_data, 1, counter);
+}
+
+/** Table of IPA group definitions.
+ *
+ * For each IPA group, this table defines a function to access the given performance block counter (or counters,
+ * if the operation needs to be iterated on multiple blocks) and calculate energy estimation.
+ */
+
+static const struct kbase_ipa_group ipa_groups_def_g71[] = {
+	{
+		.name = "l2_access",
+		.default_value = 526300,
+		.op = kbase_g7x_sum_all_memsys_blocks,
+		.counter_block_offset = MEMSYS_L2_ANY_LOOKUP,
+	},
+	{
+		.name = "exec_instr_count",
+		.default_value = 301100,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_EXEC_INSTR_COUNT,
+	},
+	{
+		.name = "tex_issue",
+		.default_value = 197400,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_TEX_COORD_ISSUE,
+	},
+	{
+		.name = "tile_wb",
+		.default_value = -156400,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_BEATS_WR_TIB,
+	},
+	{
+		.name = "gpu_active",
+		.default_value = 115800,
+		.op = kbase_g7x_jm_single_counter,
+		.counter_block_offset = JM_GPU_ACTIVE,
+	},
+};
+
+static const struct kbase_ipa_group ipa_groups_def_g72[] = {
+	{
+		.name = "l2_access",
+		.default_value = 393000,
+		.op = kbase_g7x_sum_all_memsys_blocks,
+		.counter_block_offset = MEMSYS_L2_ANY_LOOKUP,
+	},
+	{
+		.name = "exec_instr_count",
+		.default_value = 227000,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_EXEC_INSTR_COUNT,
+	},
+	{
+		.name = "tex_issue",
+		.default_value = 181900,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_TEX_COORD_ISSUE,
+	},
+	{
+		.name = "tile_wb",
+		.default_value = -120200,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_BEATS_WR_TIB,
+	},
+	{
+		.name = "gpu_active",
+		.default_value = 133100,
+		.op = kbase_g7x_jm_single_counter,
+		.counter_block_offset = JM_GPU_ACTIVE,
+	},
+};
+
+static const struct kbase_ipa_group ipa_groups_def_g76[] = {
+	{
+		.name = "gpu_active",
+		.default_value = 122000,
+		.op = kbase_g7x_jm_single_counter,
+		.counter_block_offset = JM_GPU_ACTIVE,
+	},
+	{
+		.name = "exec_instr_count",
+		.default_value = 488900,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_EXEC_INSTR_COUNT,
+	},
+	{
+		.name = "vary_instr",
+		.default_value = 212100,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_VARY_INSTR,
+	},
+	{
+		.name = "tex_tfch_num_operations",
+		.default_value = 288000,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_TEX_TFCH_NUM_OPERATIONS,
+	},
+	{
+		.name = "l2_access",
+		.default_value = 378100,
+		.op = kbase_g7x_sum_all_memsys_blocks,
+		.counter_block_offset = MEMSYS_L2_ANY_LOOKUP,
+	},
+};
+
+static const struct kbase_ipa_group ipa_groups_def_g52_r1[] = {
+	{
+		.name = "gpu_active",
+		.default_value = 224200,
+		.op = kbase_g7x_jm_single_counter,
+		.counter_block_offset = JM_GPU_ACTIVE,
+	},
+	{
+		.name = "exec_instr_count",
+		.default_value = 384700,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_EXEC_INSTR_COUNT,
+	},
+	{
+		.name = "vary_instr",
+		.default_value = 271900,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_VARY_INSTR,
+	},
+	{
+		.name = "tex_tfch_num_operations",
+		.default_value = 477700,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_TEX_TFCH_NUM_OPERATIONS,
+	},
+	{
+		.name = "l2_access",
+		.default_value = 551400,
+		.op = kbase_g7x_sum_all_memsys_blocks,
+		.counter_block_offset = MEMSYS_L2_ANY_LOOKUP,
+	},
+};
+
+static const struct kbase_ipa_group ipa_groups_def_g51[] = {
+	{
+		.name = "gpu_active",
+		.default_value = 201400,
+		.op = kbase_g7x_jm_single_counter,
+		.counter_block_offset = JM_GPU_ACTIVE,
+	},
+	{
+		.name = "exec_instr_count",
+		.default_value = 392700,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_EXEC_INSTR_COUNT,
+	},
+	{
+		.name = "vary_instr",
+		.default_value = 274000,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_VARY_INSTR,
+	},
+	{
+		.name = "tex_tfch_num_operations",
+		.default_value = 528000,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_TEX_TFCH_NUM_OPERATIONS,
+	},
+	{
+		.name = "l2_access",
+		.default_value = 506400,
+		.op = kbase_g7x_sum_all_memsys_blocks,
+		.counter_block_offset = MEMSYS_L2_ANY_LOOKUP,
+	},
+};
+
+static const struct kbase_ipa_group ipa_groups_def_g77[] = {
+	{
+		.name = "l2_access",
+		.default_value = 710800,
+		.op = kbase_g7x_sum_all_memsys_blocks,
+		.counter_block_offset = MEMSYS_L2_ANY_LOOKUP,
+	},
+	{
+		.name = "exec_instr_msg",
+		.default_value = 2375300,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_EXEC_INSTR_MSG,
+	},
+	{
+		.name = "exec_instr_fma",
+		.default_value = 656100,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_EXEC_INSTR_FMA,
+	},
+	{
+		.name = "tex_filt_num_operations",
+		.default_value = 318800,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_TEX_FILT_NUM_OPERATIONS,
+	},
+	{
+		.name = "gpu_active",
+		.default_value = 172800,
+		.op = kbase_g7x_jm_single_counter,
+		.counter_block_offset = JM_GPU_ACTIVE,
+	},
+};
+
+static const struct kbase_ipa_group ipa_groups_def_tbex[] = {
+	{
+		.name = "l2_access",
+		.default_value = 599800,
+		.op = kbase_g7x_sum_all_memsys_blocks,
+		.counter_block_offset = MEMSYS_L2_ANY_LOOKUP,
+	},
+	{
+		.name = "exec_instr_msg",
+		.default_value = 1830200,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_EXEC_INSTR_MSG,
+	},
+	{
+		.name = "exec_instr_fma",
+		.default_value = 407300,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_EXEC_INSTR_FMA,
+	},
+	{
+		.name = "tex_filt_num_operations",
+		.default_value = 224500,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_TEX_FILT_NUM_OPERATIONS,
+	},
+	{
+		.name = "gpu_active",
+		.default_value = 153800,
+		.op = kbase_g7x_jm_single_counter,
+		.counter_block_offset = JM_GPU_ACTIVE,
+	},
+};
+
+
+#define IPA_POWER_MODEL_OPS(gpu, init_token) \
+	const struct kbase_ipa_model_ops kbase_ ## gpu ## _ipa_model_ops = { \
+		.name = "mali-" #gpu "-power-model", \
+		.init = kbase_ ## init_token ## _power_model_init, \
+		.term = kbase_ipa_vinstr_common_model_term, \
+		.get_dynamic_coeff = kbase_ipa_vinstr_dynamic_coeff, \
+	}; \
+	KBASE_EXPORT_TEST_API(kbase_ ## gpu ## _ipa_model_ops)
+
+#define STANDARD_POWER_MODEL(gpu, reference_voltage) \
+	static int kbase_ ## gpu ## _power_model_init(\
+			struct kbase_ipa_model *model) \
+	{ \
+		BUILD_BUG_ON(ARRAY_SIZE(ipa_groups_def_ ## gpu) > \
+				KBASE_IPA_MAX_GROUP_DEF_NUM); \
+		return kbase_ipa_vinstr_common_model_init(model, \
+				ipa_groups_def_ ## gpu, \
+				ARRAY_SIZE(ipa_groups_def_ ## gpu), \
+				kbase_g7x_get_active_cycles, \
+				(reference_voltage)); \
+	} \
+	IPA_POWER_MODEL_OPS(gpu, gpu)
+
+#define ALIAS_POWER_MODEL(gpu, as_gpu) \
+	IPA_POWER_MODEL_OPS(gpu, as_gpu)
+
+STANDARD_POWER_MODEL(g71, 800);
+STANDARD_POWER_MODEL(g72, 800);
+STANDARD_POWER_MODEL(g76, 800);
+STANDARD_POWER_MODEL(g52_r1, 1000);
+STANDARD_POWER_MODEL(g51, 1000);
+STANDARD_POWER_MODEL(g77, 1000);
+STANDARD_POWER_MODEL(tbex, 1000);
+
+/* g52 is an alias of g76 (TNOX) for IPA */
+ALIAS_POWER_MODEL(g52, g76);
+/* tnax is an alias of g77 (TTRX) for IPA */
+ALIAS_POWER_MODEL(tnax, g77);
diff --git a/drivers/gpu/arm/midgard/mali_base_hwconfig_features.h b/drivers/gpu/arm/midgard/mali_base_hwconfig_features.h
new file mode 100644
index 0000000..3d24972
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_base_hwconfig_features.h
@@ -0,0 +1,476 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/* AUTOMATICALLY GENERATED FILE. If you want to amend the issues/features,
+ * please update base/tools/hwconfig_generator/hwc_{issues,features}.py
+ * For more information see base/tools/hwconfig_generator/README
+ */
+
+#ifndef _BASE_HWCONFIG_FEATURES_H_
+#define _BASE_HWCONFIG_FEATURES_H_
+
+enum base_hw_feature {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_OPTIMIZED_COVERAGE_MASK,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4,
+	BASE_HW_FEATURE_IMAGES_IN_FRAGMENT_SHADERS,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_V4,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_TLS_HASHING,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
+	BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+	BASE_HW_FEATURE_L2_CONFIG,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_generic[] = {
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tMIx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tHEx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tSIx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tDVx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tNOx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_TLS_HASHING,
+	BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tGOx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_TLS_HASHING,
+	BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tTRx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+	BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tNAx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+	BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tBEx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+	BASE_HW_FEATURE_L2_CONFIG,
+	BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tULx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_L2_CONFIG,
+	BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tDUx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+	BASE_HW_FEATURE_L2_CONFIG,
+	BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tODx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_L2_CONFIG,
+	BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tIDx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_L2_CONFIG,
+	BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tVAx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_L2_CONFIG,
+	BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
+	BASE_HW_FEATURE_END
+};
+
+#endif /* _BASE_HWCONFIG_FEATURES_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_base_hwconfig_issues.h b/drivers/gpu/arm/midgard/mali_base_hwconfig_issues.h
new file mode 100644
index 0000000..7448608
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_base_hwconfig_issues.h
@@ -0,0 +1,565 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/* AUTOMATICALLY GENERATED FILE. If you want to amend the issues/features,
+ * please update base/tools/hwconfig_generator/hwc_{issues,features}.py
+ * For more information see base/tools/hwconfig_generator/README
+ */
+
+#ifndef _BASE_HWCONFIG_ISSUES_H_
+#define _BASE_HWCONFIG_ISSUES_H_
+
+enum base_hw_issue {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_6367,
+	BASE_HW_ISSUE_6398,
+	BASE_HW_ISSUE_6402,
+	BASE_HW_ISSUE_6787,
+	BASE_HW_ISSUE_7027,
+	BASE_HW_ISSUE_7144,
+	BASE_HW_ISSUE_7304,
+	BASE_HW_ISSUE_8073,
+	BASE_HW_ISSUE_8186,
+	BASE_HW_ISSUE_8215,
+	BASE_HW_ISSUE_8245,
+	BASE_HW_ISSUE_8250,
+	BASE_HW_ISSUE_8260,
+	BASE_HW_ISSUE_8280,
+	BASE_HW_ISSUE_8316,
+	BASE_HW_ISSUE_8381,
+	BASE_HW_ISSUE_8394,
+	BASE_HW_ISSUE_8401,
+	BASE_HW_ISSUE_8408,
+	BASE_HW_ISSUE_8443,
+	BASE_HW_ISSUE_8456,
+	BASE_HW_ISSUE_8564,
+	BASE_HW_ISSUE_8634,
+	BASE_HW_ISSUE_8778,
+	BASE_HW_ISSUE_8791,
+	BASE_HW_ISSUE_8833,
+	BASE_HW_ISSUE_8879,
+	BASE_HW_ISSUE_8896,
+	BASE_HW_ISSUE_8975,
+	BASE_HW_ISSUE_8986,
+	BASE_HW_ISSUE_8987,
+	BASE_HW_ISSUE_9010,
+	BASE_HW_ISSUE_9418,
+	BASE_HW_ISSUE_9423,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_9510,
+	BASE_HW_ISSUE_9566,
+	BASE_HW_ISSUE_9630,
+	BASE_HW_ISSUE_10127,
+	BASE_HW_ISSUE_10327,
+	BASE_HW_ISSUE_10410,
+	BASE_HW_ISSUE_10471,
+	BASE_HW_ISSUE_10472,
+	BASE_HW_ISSUE_10487,
+	BASE_HW_ISSUE_10607,
+	BASE_HW_ISSUE_10632,
+	BASE_HW_ISSUE_10649,
+	BASE_HW_ISSUE_10676,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_10684,
+	BASE_HW_ISSUE_10797,
+	BASE_HW_ISSUE_10817,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10931,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_10959,
+	BASE_HW_ISSUE_10969,
+	BASE_HW_ISSUE_10984,
+	BASE_HW_ISSUE_10995,
+	BASE_HW_ISSUE_11012,
+	BASE_HW_ISSUE_11035,
+	BASE_HW_ISSUE_11042,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_11056,
+	BASE_HW_ISSUE_T720_1386,
+	BASE_HW_ISSUE_T76X_26,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3542,
+	BASE_HW_ISSUE_T76X_3556,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3960,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_T76X_3966,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_T83X_817,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_7940,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TMIX_8138,
+	BASE_HW_ISSUE_TMIX_8206,
+	BASE_HW_ISSUE_TMIX_8343,
+	BASE_HW_ISSUE_TMIX_8463,
+	BASE_HW_ISSUE_TMIX_8456,
+	GPUCORE_1619,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_TNOX_1194,
+	BASE_HW_ISSUE_TGOX_R1_1234,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_TSIX_1792,
+	BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
+	BASE_HW_ISSUE_TTRX_3076,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_generic[] = {
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tMIx_r0p0_05dev0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TMIX_8138,
+	BASE_HW_ISSUE_TMIX_8206,
+	BASE_HW_ISSUE_TMIX_8343,
+	BASE_HW_ISSUE_TMIX_8463,
+	BASE_HW_ISSUE_TMIX_8456,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tMIx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_7940,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TMIX_8138,
+	BASE_HW_ISSUE_TMIX_8206,
+	BASE_HW_ISSUE_TMIX_8343,
+	BASE_HW_ISSUE_TMIX_8463,
+	BASE_HW_ISSUE_TMIX_8456,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tMIx_r0p1[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_7940,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TMIX_8138,
+	BASE_HW_ISSUE_TMIX_8206,
+	BASE_HW_ISSUE_TMIX_8343,
+	BASE_HW_ISSUE_TMIX_8463,
+	BASE_HW_ISSUE_TMIX_8456,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tMIx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_7940,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TMIX_8138,
+	BASE_HW_ISSUE_TMIX_8206,
+	BASE_HW_ISSUE_TMIX_8343,
+	BASE_HW_ISSUE_TMIX_8456,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tHEx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tHEx_r0p1[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tHEx_r0p2[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tHEx_r0p3[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tHEx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tSIx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TSIX_1792,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tSIx_r0p1[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TSIX_1792,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tSIx_r1p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tSIx_r1p1[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tSIx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tDVx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tDVx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tNOx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TNOX_1194,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tNOx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tGOx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TNOX_1194,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tGOx_r1p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TGOX_R1_1234,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tGOx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tTRx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
+	BASE_HW_ISSUE_TTRX_3076,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tTRx_r0p1[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
+	BASE_HW_ISSUE_TTRX_3076,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tTRx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tNAx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
+	BASE_HW_ISSUE_TTRX_3076,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tNAx_r0p1[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
+	BASE_HW_ISSUE_TTRX_3076,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_GPU2017_1336,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tNAx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tBEx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tBEx_r1p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_TTRX_2968_TTRX_3162,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tBEx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tULx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tULx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tDUx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_TTRX_921,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tDUx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tODx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tODx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tIDx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tIDx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tVAx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tVAx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+#endif /* _BASE_HWCONFIG_ISSUES_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_base_kernel.h b/drivers/gpu/arm/midgard/mali_base_kernel.h
new file mode 100644
index 0000000..a8ab408
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_base_kernel.h
@@ -0,0 +1,1792 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Base structures shared with the kernel.
+ */
+
+#ifndef _BASE_KERNEL_H_
+#define _BASE_KERNEL_H_
+
+typedef struct base_mem_handle {
+	struct {
+		u64 handle;
+	} basep;
+} base_mem_handle;
+
+#include "mali_base_mem_priv.h"
+#include "mali_midg_coherency.h"
+#include "mali_kbase_gpu_id.h"
+
+/*
+ * Dependency stuff, keep it private for now. May want to expose it if
+ * we decide to make the number of semaphores a configurable
+ * option.
+ */
+#define BASE_JD_ATOM_COUNT              256
+
+/* Set/reset values for a software event */
+#define BASE_JD_SOFT_EVENT_SET             ((unsigned char)1)
+#define BASE_JD_SOFT_EVENT_RESET           ((unsigned char)0)
+
+#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 4
+
+#define BASE_MAX_COHERENT_GROUPS 16
+
+#if defined CDBG_ASSERT
+#define LOCAL_ASSERT CDBG_ASSERT
+#elif defined KBASE_DEBUG_ASSERT
+#define LOCAL_ASSERT KBASE_DEBUG_ASSERT
+#else
+#error assert macro not defined!
+#endif
+
+#if defined(PAGE_MASK) && defined(PAGE_SHIFT)
+#define LOCAL_PAGE_SHIFT PAGE_SHIFT
+#define LOCAL_PAGE_LSB ~PAGE_MASK
+#else
+#include <osu/mali_osu.h>
+
+#if defined OSU_CONFIG_CPU_PAGE_SIZE_LOG2
+#define LOCAL_PAGE_SHIFT OSU_CONFIG_CPU_PAGE_SIZE_LOG2
+#define LOCAL_PAGE_LSB ((1ul << OSU_CONFIG_CPU_PAGE_SIZE_LOG2) - 1)
+#else
+#error Failed to find page size
+#endif
+#endif
+
+/**
+ * @addtogroup base_user_api User-side Base APIs
+ * @{
+ */
+
+/**
+ * @addtogroup base_user_api_memory User-side Base Memory APIs
+ * @{
+ */
+
+/* Physical memory group ID for normal usage.
+ */
+#define BASE_MEM_GROUP_DEFAULT (0)
+
+/* Number of physical memory groups.
+ */
+#define BASE_MEM_GROUP_COUNT (16)
+
+/**
+ * typedef base_mem_alloc_flags - Memory allocation, access/hint flags.
+ *
+ * A combination of MEM_PROT/MEM_HINT flags must be passed to each allocator
+ * in order to determine the best cache policy. Some combinations are
+ * of course invalid (e.g. MEM_PROT_CPU_WR | MEM_HINT_CPU_RD),
+ * which defines a write-only region on the CPU side, which is
+ * heavily read by the CPU...
+ * Other flags are only meaningful to a particular allocator.
+ * More flags can be added to this list, as long as they don't clash
+ * (see BASE_MEM_FLAGS_NR_BITS for the number of the first free bit).
+ */
+typedef u32 base_mem_alloc_flags;
+
+/* Memory allocation, access/hint flags.
+ *
+ * See base_mem_alloc_flags.
+ */
+
+/* IN */
+/* Read access CPU side
+ */
+#define BASE_MEM_PROT_CPU_RD ((base_mem_alloc_flags)1 << 0)
+
+/* Write access CPU side
+ */
+#define BASE_MEM_PROT_CPU_WR ((base_mem_alloc_flags)1 << 1)
+
+/* Read access GPU side
+ */
+#define BASE_MEM_PROT_GPU_RD ((base_mem_alloc_flags)1 << 2)
+
+/* Write access GPU side
+ */
+#define BASE_MEM_PROT_GPU_WR ((base_mem_alloc_flags)1 << 3)
+
+/* Execute allowed on the GPU side
+ */
+#define BASE_MEM_PROT_GPU_EX ((base_mem_alloc_flags)1 << 4)
+
+/* Will be permanently mapped in kernel space.
+ * Flag is only allowed on allocations originating from kbase.
+ */
+#define BASEP_MEM_PERMANENT_KERNEL_MAPPING ((base_mem_alloc_flags)1 << 5)
+
+/* The allocation will completely reside within the same 4GB chunk in the GPU
+ * virtual space.
+ * Since this flag is primarily required only for the TLS memory which will
+ * not be used to contain executable code and also not used for Tiler heap,
+ * it can't be used along with BASE_MEM_PROT_GPU_EX and TILER_ALIGN_TOP flags.
+ */
+#define BASE_MEM_GPU_VA_SAME_4GB_PAGE ((base_mem_alloc_flags)1 << 6)
+
+/* Userspace is not allowed to free this memory.
+ * Flag is only allowed on allocations originating from kbase.
+ */
+#define BASEP_MEM_NO_USER_FREE ((base_mem_alloc_flags)1 << 7)
+
+#define BASE_MEM_RESERVED_BIT_8 ((base_mem_alloc_flags)1 << 8)
+
+/* Grow backing store on GPU Page Fault
+ */
+#define BASE_MEM_GROW_ON_GPF ((base_mem_alloc_flags)1 << 9)
+
+/* Page coherence Outer shareable, if available
+ */
+#define BASE_MEM_COHERENT_SYSTEM ((base_mem_alloc_flags)1 << 10)
+
+/* Page coherence Inner shareable
+ */
+#define BASE_MEM_COHERENT_LOCAL ((base_mem_alloc_flags)1 << 11)
+
+/* Should be cached on the CPU
+ */
+#define BASE_MEM_CACHED_CPU ((base_mem_alloc_flags)1 << 12)
+
+/* IN/OUT */
+/* Must have same VA on both the GPU and the CPU
+ */
+#define BASE_MEM_SAME_VA ((base_mem_alloc_flags)1 << 13)
+
+/* OUT */
+/* Must call mmap to acquire a GPU address for the alloc
+ */
+#define BASE_MEM_NEED_MMAP ((base_mem_alloc_flags)1 << 14)
+
+/* IN */
+/* Page coherence Outer shareable, required.
+ */
+#define BASE_MEM_COHERENT_SYSTEM_REQUIRED ((base_mem_alloc_flags)1 << 15)
+
+/* Protected memory
+ */
+#define BASE_MEM_PROTECTED ((base_mem_alloc_flags)1 << 16)
+
+/* Not needed physical memory
+ */
+#define BASE_MEM_DONT_NEED ((base_mem_alloc_flags)1 << 17)
+
+/* Must use shared CPU/GPU zone (SAME_VA zone) but doesn't require the
+ * addresses to be the same
+ */
+#define BASE_MEM_IMPORT_SHARED ((base_mem_alloc_flags)1 << 18)
+
+/**
+ * Bit 19 is reserved.
+ *
+ * Do not remove, use the next unreserved bit for new flags
+ */
+#define BASE_MEM_RESERVED_BIT_19 ((base_mem_alloc_flags)1 << 19)
+#define BASE_MEM_MAYBE_RESERVED_BIT_19 BASE_MEM_RESERVED_BIT_19
+
+/**
+ * Memory starting from the end of the initial commit is aligned to 'extent'
+ * pages, where 'extent' must be a power of 2 and no more than
+ * BASE_MEM_TILER_ALIGN_TOP_EXTENT_MAX_PAGES
+ */
+#define BASE_MEM_TILER_ALIGN_TOP ((base_mem_alloc_flags)1 << 20)
+
+/* Should be uncached on the GPU, will work only for GPUs using AARCH64 mmu mode.
+ * Some components within the GPU might only be able to access memory that is
+ * GPU cacheable. Refer to the specific GPU implementation for more details.
+ * The 3 shareability flags will be ignored for GPU uncached memory.
+ * If used while importing USER_BUFFER type memory, then the import will fail
+ * if the memory is not aligned to GPU and CPU cache line width.
+ */
+#define BASE_MEM_UNCACHED_GPU ((base_mem_alloc_flags)1 << 21)
+
+/*
+ * Bits [22:25] for group_id (0~15).
+ *
+ * base_mem_group_id_set() should be used to pack a memory group ID into a
+ * base_mem_alloc_flags value instead of accessing the bits directly.
+ * base_mem_group_id_get() should be used to extract the memory group ID from
+ * a base_mem_alloc_flags value.
+ */
+#define BASEP_MEM_GROUP_ID_SHIFT 22
+#define BASE_MEM_GROUP_ID_MASK \
+	((base_mem_alloc_flags)0xF << BASEP_MEM_GROUP_ID_SHIFT)
+
+/**
+ * Number of bits used as flags for base memory management
+ *
+ * Must be kept in sync with the base_mem_alloc_flags flags
+ */
+#define BASE_MEM_FLAGS_NR_BITS 26
+
+/* A mask for all output bits, excluding IN/OUT bits.
+ */
+#define BASE_MEM_FLAGS_OUTPUT_MASK BASE_MEM_NEED_MMAP
+
+/* A mask for all input bits, including IN/OUT bits.
+ */
+#define BASE_MEM_FLAGS_INPUT_MASK \
+	(((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK)
+
+/**
+ * base_mem_group_id_get() - Get group ID from flags
+ * @flags: Flags to pass to base_mem_alloc
+ *
+ * This inline function extracts the encoded group ID from flags
+ * and converts it into numeric value (0~15).
+ *
+ * Return: group ID(0~15) extracted from the parameter
+ */
+static inline int base_mem_group_id_get(base_mem_alloc_flags flags)
+{
+	LOCAL_ASSERT((flags & ~BASE_MEM_FLAGS_INPUT_MASK) == 0);
+	return (int)((flags & BASE_MEM_GROUP_ID_MASK) >>
+			BASEP_MEM_GROUP_ID_SHIFT);
+}
+
+/**
+ * base_mem_group_id_set() - Set group ID into base_mem_alloc_flags
+ * @id: group ID(0~15) you want to encode
+ *
+ * This inline function encodes specific group ID into base_mem_alloc_flags.
+ * Parameter 'id' should lie in-between 0 to 15.
+ *
+ * Return: base_mem_alloc_flags with the group ID (id) encoded
+ *
+ * The return value can be combined with other flags against base_mem_alloc
+ * to identify a specific memory group.
+ */
+static inline base_mem_alloc_flags base_mem_group_id_set(int id)
+{
+	LOCAL_ASSERT(id >= 0);
+	LOCAL_ASSERT(id < BASE_MEM_GROUP_COUNT);
+
+	return ((base_mem_alloc_flags)id << BASEP_MEM_GROUP_ID_SHIFT) &
+		BASE_MEM_GROUP_ID_MASK;
+}
+
+/* A mask for all the flags which are modifiable via the base_mem_set_flags
+ * interface.
+ */
+#define BASE_MEM_FLAGS_MODIFIABLE \
+	(BASE_MEM_DONT_NEED | BASE_MEM_COHERENT_SYSTEM | \
+	 BASE_MEM_COHERENT_LOCAL)
+
+
+/* A mask of all currently reserved flags
+ */
+#define BASE_MEM_FLAGS_RESERVED \
+	(BASE_MEM_RESERVED_BIT_8 | BASE_MEM_MAYBE_RESERVED_BIT_19)
+
+/* A mask of all the flags which are only valid for allocations within kbase,
+ * and may not be passed from user space.
+ */
+#define BASEP_MEM_FLAGS_KERNEL_ONLY \
+	(BASEP_MEM_PERMANENT_KERNEL_MAPPING | BASEP_MEM_NO_USER_FREE)
+
+/* A mask of all the flags that can be returned via the base_mem_get_flags()
+ * interface.
+ */
+#define BASE_MEM_FLAGS_QUERYABLE \
+	(BASE_MEM_FLAGS_INPUT_MASK & ~(BASE_MEM_SAME_VA | \
+		BASE_MEM_COHERENT_SYSTEM_REQUIRED | BASE_MEM_DONT_NEED | \
+		BASE_MEM_IMPORT_SHARED | BASE_MEM_FLAGS_RESERVED | \
+		BASEP_MEM_FLAGS_KERNEL_ONLY))
+
+/**
+ * enum base_mem_import_type - Memory types supported by @a base_mem_import
+ *
+ * @BASE_MEM_IMPORT_TYPE_INVALID: Invalid type
+ * @BASE_MEM_IMPORT_TYPE_UMM: UMM import. Handle type is a file descriptor (int)
+ * @BASE_MEM_IMPORT_TYPE_USER_BUFFER: User buffer import. Handle is a
+ * base_mem_import_user_buffer
+ *
+ * Each type defines what the supported handle type is.
+ *
+ * If any new type is added here ARM must be contacted
+ * to allocate a numeric value for it.
+ * Do not just add a new type without synchronizing with ARM
+ * as future releases from ARM might include other new types
+ * which could clash with your custom types.
+ */
+typedef enum base_mem_import_type {
+	BASE_MEM_IMPORT_TYPE_INVALID = 0,
+	/**
+	 * Import type with value 1 is deprecated.
+	 */
+	BASE_MEM_IMPORT_TYPE_UMM = 2,
+	BASE_MEM_IMPORT_TYPE_USER_BUFFER = 3
+} base_mem_import_type;
+
+/**
+ * struct base_mem_import_user_buffer - Handle of an imported user buffer
+ *
+ * @ptr:	address of imported user buffer
+ * @length:	length of imported user buffer in bytes
+ *
+ * This structure is used to represent a handle of an imported user buffer.
+ */
+
+struct base_mem_import_user_buffer {
+	u64 ptr;
+	u64 length;
+};
+
+/**
+ * @brief Invalid memory handle.
+ *
+ * Return value from functions returning @ref base_mem_handle on error.
+ *
+ * @warning @ref base_mem_handle_new_invalid must be used instead of this macro
+ *          in C++ code or other situations where compound literals cannot be used.
+ */
+#define BASE_MEM_INVALID_HANDLE ((base_mem_handle) { {BASEP_MEM_INVALID_HANDLE} })
+
+/**
+ * @brief Special write-alloc memory handle.
+ *
+ * A special handle is used to represent a region where a special page is mapped
+ * with a write-alloc cache setup, typically used when the write result of the
+ * GPU isn't needed, but the GPU must write anyway.
+ *
+ * @warning @ref base_mem_handle_new_write_alloc must be used instead of this macro
+ *          in C++ code or other situations where compound literals cannot be used.
+ */
+#define BASE_MEM_WRITE_ALLOC_PAGES_HANDLE ((base_mem_handle) { {BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE} })
+
+#define BASEP_MEM_INVALID_HANDLE               (0ull  << 12)
+#define BASE_MEM_MMU_DUMP_HANDLE               (1ull  << 12)
+#define BASE_MEM_TRACE_BUFFER_HANDLE           (2ull  << 12)
+#define BASE_MEM_MAP_TRACKING_HANDLE           (3ull  << 12)
+#define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE     (4ull  << 12)
+/* reserved handles ..-47<<PAGE_SHIFT> for future special handles */
+#define BASE_MEM_COOKIE_BASE                   (64ul  << 12)
+#define BASE_MEM_FIRST_FREE_ADDRESS            ((BITS_PER_LONG << 12) + \
+						BASE_MEM_COOKIE_BASE)
+
+/* Mask to detect 4GB boundary alignment */
+#define BASE_MEM_MASK_4GB  0xfffff000UL
+/* Mask to detect 4GB boundary (in page units) alignment */
+#define BASE_MEM_PFN_MASK_4GB  (BASE_MEM_MASK_4GB >> LOCAL_PAGE_SHIFT)
+
+/**
+ * Limit on the 'extent' parameter for an allocation with the
+ * BASE_MEM_TILER_ALIGN_TOP flag set
+ *
+ * This is the same as the maximum limit for a Buffer Descriptor's chunk size
+ */
+#define BASE_MEM_TILER_ALIGN_TOP_EXTENT_MAX_PAGES_LOG2 \
+		(21u - (LOCAL_PAGE_SHIFT))
+#define BASE_MEM_TILER_ALIGN_TOP_EXTENT_MAX_PAGES \
+		(1ull << (BASE_MEM_TILER_ALIGN_TOP_EXTENT_MAX_PAGES_LOG2))
+
+/* Bit mask of cookies used for for memory allocation setup */
+#define KBASE_COOKIE_MASK  ~1UL /* bit 0 is reserved */
+
+/* Maximum size allowed in a single KBASE_IOCTL_MEM_ALLOC call */
+#define KBASE_MEM_ALLOC_MAX_SIZE ((8ull << 30) >> PAGE_SHIFT) /* 8 GB */
+
+
+/**
+ * @addtogroup base_user_api_memory_defered User-side Base Defered Memory Coherency APIs
+ * @{
+ */
+
+/**
+ * @brief a basic memory operation (sync-set).
+ *
+ * The content of this structure is private, and should only be used
+ * by the accessors.
+ */
+typedef struct base_syncset {
+	struct basep_syncset basep_sset;
+} base_syncset;
+
+/** @} end group base_user_api_memory_defered */
+
+/**
+ * Handle to represent imported memory object.
+ * Simple opague handle to imported memory, can't be used
+ * with anything but base_external_resource_init to bind to an atom.
+ */
+typedef struct base_import_handle {
+	struct {
+		u64 handle;
+	} basep;
+} base_import_handle;
+
+/** @} end group base_user_api_memory */
+
+/**
+ * @addtogroup base_user_api_job_dispatch User-side Base Job Dispatcher APIs
+ * @{
+ */
+
+typedef int platform_fence_type;
+#define INVALID_PLATFORM_FENCE ((platform_fence_type)-1)
+
+/**
+ * Base stream handle.
+ *
+ * References an underlying base stream object.
+ */
+typedef struct base_stream {
+	struct {
+		int fd;
+	} basep;
+} base_stream;
+
+/**
+ * Base fence handle.
+ *
+ * References an underlying base fence object.
+ */
+typedef struct base_fence {
+	struct {
+		int fd;
+		int stream_fd;
+	} basep;
+} base_fence;
+
+/**
+ * @brief Per-job data
+ *
+ * This structure is used to store per-job data, and is completely unused
+ * by the Base driver. It can be used to store things such as callback
+ * function pointer, data to handle job completion. It is guaranteed to be
+ * untouched by the Base driver.
+ */
+typedef struct base_jd_udata {
+	u64 blob[2];	 /**< per-job data array */
+} base_jd_udata;
+
+/**
+ * @brief Memory aliasing info
+ *
+ * Describes a memory handle to be aliased.
+ * A subset of the handle can be chosen for aliasing, given an offset and a
+ * length.
+ * A special handle BASE_MEM_WRITE_ALLOC_PAGES_HANDLE is used to represent a
+ * region where a special page is mapped with a write-alloc cache setup,
+ * typically used when the write result of the GPU isn't needed, but the GPU
+ * must write anyway.
+ *
+ * Offset and length are specified in pages.
+ * Offset must be within the size of the handle.
+ * Offset+length must not overrun the size of the handle.
+ *
+ * @handle Handle to alias, can be BASE_MEM_WRITE_ALLOC_PAGES_HANDLE
+ * @offset Offset within the handle to start aliasing from, in pages.
+ *         Not used with BASE_MEM_WRITE_ALLOC_PAGES_HANDLE.
+ * @length Length to alias, in pages. For BASE_MEM_WRITE_ALLOC_PAGES_HANDLE
+ *         specifies the number of times the special page is needed.
+ */
+struct base_mem_aliasing_info {
+	base_mem_handle handle;
+	u64 offset;
+	u64 length;
+};
+
+/**
+ * Similar to BASE_MEM_TILER_ALIGN_TOP, memory starting from the end of the
+ * initial commit is aligned to 'extent' pages, where 'extent' must be a power
+ * of 2 and no more than BASE_MEM_TILER_ALIGN_TOP_EXTENT_MAX_PAGES
+ */
+#define BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP  (1 << 0)
+
+/**
+ * struct base_jit_alloc_info - Structure which describes a JIT allocation
+ *                              request.
+ * @gpu_alloc_addr:             The GPU virtual address to write the JIT
+ *                              allocated GPU virtual address to.
+ * @va_pages:                   The minimum number of virtual pages required.
+ * @commit_pages:               The minimum number of physical pages which
+ *                              should back the allocation.
+ * @extent:                     Granularity of physical pages to grow the
+ *                              allocation by during a fault.
+ * @id:                         Unique ID provided by the caller, this is used
+ *                              to pair allocation and free requests.
+ *                              Zero is not a valid value.
+ * @bin_id:                     The JIT allocation bin, used in conjunction with
+ *                              @max_allocations to limit the number of each
+ *                              type of JIT allocation.
+ * @max_allocations:            The maximum number of allocations allowed within
+ *                              the bin specified by @bin_id. Should be the same
+ *                              for all JIT allocations within the same bin.
+ * @flags:                      flags specifying the special requirements for
+ *                              the JIT allocation.
+ * @padding:                    Expansion space - should be initialised to zero
+ * @usage_id:                   A hint about which allocation should be reused.
+ *                              The kernel should attempt to use a previous
+ *                              allocation with the same usage_id
+ */
+struct base_jit_alloc_info {
+	u64 gpu_alloc_addr;
+	u64 va_pages;
+	u64 commit_pages;
+	u64 extent;
+	u8 id;
+	u8 bin_id;
+	u8 max_allocations;
+	u8 flags;
+	u8 padding[2];
+	u16 usage_id;
+};
+
+/**
+ * @brief Job dependency type.
+ *
+ * A flags field will be inserted into the atom structure to specify whether a dependency is a data or
+ * ordering dependency (by putting it before/after 'core_req' in the structure it should be possible to add without
+ * changing the structure size).
+ * When the flag is set for a particular dependency to signal that it is an ordering only dependency then
+ * errors will not be propagated.
+ */
+typedef u8 base_jd_dep_type;
+
+
+#define BASE_JD_DEP_TYPE_INVALID  (0)       /**< Invalid dependency */
+#define BASE_JD_DEP_TYPE_DATA     (1U << 0) /**< Data dependency */
+#define BASE_JD_DEP_TYPE_ORDER    (1U << 1) /**< Order dependency */
+
+/**
+ * @brief Job chain hardware requirements.
+ *
+ * A job chain must specify what GPU features it needs to allow the
+ * driver to schedule the job correctly.  By not specifying the
+ * correct settings can/will cause an early job termination.  Multiple
+ * values can be ORed together to specify multiple requirements.
+ * Special case is ::BASE_JD_REQ_DEP, which is used to express complex
+ * dependencies, and that doesn't execute anything on the hardware.
+ */
+typedef u32 base_jd_core_req;
+
+/* Requirements that come from the HW */
+
+/**
+ * No requirement, dependency only
+ */
+#define BASE_JD_REQ_DEP ((base_jd_core_req)0)
+
+/**
+ * Requires fragment shaders
+ */
+#define BASE_JD_REQ_FS  ((base_jd_core_req)1 << 0)
+
+/**
+ * Requires compute shaders
+ * This covers any of the following Midgard Job types:
+ * - Vertex Shader Job
+ * - Geometry Shader Job
+ * - An actual Compute Shader Job
+ *
+ * Compare this with @ref BASE_JD_REQ_ONLY_COMPUTE, which specifies that the
+ * job is specifically just the "Compute Shader" job type, and not the "Vertex
+ * Shader" nor the "Geometry Shader" job type.
+ */
+#define BASE_JD_REQ_CS  ((base_jd_core_req)1 << 1)
+#define BASE_JD_REQ_T   ((base_jd_core_req)1 << 2)   /**< Requires tiling */
+#define BASE_JD_REQ_CF  ((base_jd_core_req)1 << 3)   /**< Requires cache flushes */
+#define BASE_JD_REQ_V   ((base_jd_core_req)1 << 4)   /**< Requires value writeback */
+
+/* SW-only requirements - the HW does not expose these as part of the job slot capabilities */
+
+/* Requires fragment job with AFBC encoding */
+#define BASE_JD_REQ_FS_AFBC  ((base_jd_core_req)1 << 13)
+
+/**
+ * SW-only requirement: coalesce completion events.
+ * If this bit is set then completion of this atom will not cause an event to
+ * be sent to userspace, whether successful or not; completion events will be
+ * deferred until an atom completes which does not have this bit set.
+ *
+ * This bit may not be used in combination with BASE_JD_REQ_EXTERNAL_RESOURCES.
+ */
+#define BASE_JD_REQ_EVENT_COALESCE ((base_jd_core_req)1 << 5)
+
+/**
+ * SW Only requirement: the job chain requires a coherent core group. We don't
+ * mind which coherent core group is used.
+ */
+#define BASE_JD_REQ_COHERENT_GROUP  ((base_jd_core_req)1 << 6)
+
+/**
+ * SW Only requirement: The performance counters should be enabled only when
+ * they are needed, to reduce power consumption.
+ */
+
+#define BASE_JD_REQ_PERMON               ((base_jd_core_req)1 << 7)
+
+/**
+ * SW Only requirement: External resources are referenced by this atom.
+ * When external resources are referenced no syncsets can be bundled with the atom
+ * but should instead be part of a NULL jobs inserted into the dependency tree.
+ * The first pre_dep object must be configured for the external resouces to use,
+ * the second pre_dep object can be used to create other dependencies.
+ *
+ * This bit may not be used in combination with BASE_JD_REQ_EVENT_COALESCE and
+ * BASE_JD_REQ_SOFT_EVENT_WAIT.
+ */
+#define BASE_JD_REQ_EXTERNAL_RESOURCES   ((base_jd_core_req)1 << 8)
+
+/**
+ * SW Only requirement: Software defined job. Jobs with this bit set will not be submitted
+ * to the hardware but will cause some action to happen within the driver
+ */
+#define BASE_JD_REQ_SOFT_JOB        ((base_jd_core_req)1 << 9)
+
+#define BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME      (BASE_JD_REQ_SOFT_JOB | 0x1)
+#define BASE_JD_REQ_SOFT_FENCE_TRIGGER          (BASE_JD_REQ_SOFT_JOB | 0x2)
+#define BASE_JD_REQ_SOFT_FENCE_WAIT             (BASE_JD_REQ_SOFT_JOB | 0x3)
+
+/* 0x4 RESERVED for now */
+
+/**
+ * SW only requirement: event wait/trigger job.
+ *
+ * - BASE_JD_REQ_SOFT_EVENT_WAIT: this job will block until the event is set.
+ * - BASE_JD_REQ_SOFT_EVENT_SET: this job sets the event, thus unblocks the
+ *   other waiting jobs. It completes immediately.
+ * - BASE_JD_REQ_SOFT_EVENT_RESET: this job resets the event, making it
+ *   possible for other jobs to wait upon. It completes immediately.
+ */
+#define BASE_JD_REQ_SOFT_EVENT_WAIT             (BASE_JD_REQ_SOFT_JOB | 0x5)
+#define BASE_JD_REQ_SOFT_EVENT_SET              (BASE_JD_REQ_SOFT_JOB | 0x6)
+#define BASE_JD_REQ_SOFT_EVENT_RESET            (BASE_JD_REQ_SOFT_JOB | 0x7)
+
+#define BASE_JD_REQ_SOFT_DEBUG_COPY             (BASE_JD_REQ_SOFT_JOB | 0x8)
+
+/**
+ * SW only requirement: Just In Time allocation
+ *
+ * This job requests a single or multiple JIT allocations through a list
+ * of @base_jit_alloc_info structure which is passed via the jc element of
+ * the atom. The number of @base_jit_alloc_info structures present in the
+ * list is passed via the nr_extres element of the atom
+ *
+ * It should be noted that the id entry in @base_jit_alloc_info must not
+ * be reused until it has been released via @BASE_JD_REQ_SOFT_JIT_FREE.
+ *
+ * Should this soft job fail it is expected that a @BASE_JD_REQ_SOFT_JIT_FREE
+ * soft job to free the JIT allocation is still made.
+ *
+ * The job will complete immediately.
+ */
+#define BASE_JD_REQ_SOFT_JIT_ALLOC              (BASE_JD_REQ_SOFT_JOB | 0x9)
+/**
+ * SW only requirement: Just In Time free
+ *
+ * This job requests a single or multiple JIT allocations created by
+ * @BASE_JD_REQ_SOFT_JIT_ALLOC to be freed. The ID list of the JIT
+ * allocations is passed via the jc element of the atom.
+ *
+ * The job will complete immediately.
+ */
+#define BASE_JD_REQ_SOFT_JIT_FREE               (BASE_JD_REQ_SOFT_JOB | 0xa)
+
+/**
+ * SW only requirement: Map external resource
+ *
+ * This job requests external resource(s) are mapped once the dependencies
+ * of the job have been satisfied. The list of external resources are
+ * passed via the jc element of the atom which is a pointer to a
+ * @base_external_resource_list.
+ */
+#define BASE_JD_REQ_SOFT_EXT_RES_MAP            (BASE_JD_REQ_SOFT_JOB | 0xb)
+/**
+ * SW only requirement: Unmap external resource
+ *
+ * This job requests external resource(s) are unmapped once the dependencies
+ * of the job has been satisfied. The list of external resources are
+ * passed via the jc element of the atom which is a pointer to a
+ * @base_external_resource_list.
+ */
+#define BASE_JD_REQ_SOFT_EXT_RES_UNMAP          (BASE_JD_REQ_SOFT_JOB | 0xc)
+
+/**
+ * HW Requirement: Requires Compute shaders (but not Vertex or Geometry Shaders)
+ *
+ * This indicates that the Job Chain contains Midgard Jobs of the 'Compute Shaders' type.
+ *
+ * In contrast to @ref BASE_JD_REQ_CS, this does \b not indicate that the Job
+ * Chain contains 'Geometry Shader' or 'Vertex Shader' jobs.
+ */
+#define BASE_JD_REQ_ONLY_COMPUTE    ((base_jd_core_req)1 << 10)
+
+/**
+ * HW Requirement: Use the base_jd_atom::device_nr field to specify a
+ * particular core group
+ *
+ * If both @ref BASE_JD_REQ_COHERENT_GROUP and this flag are set, this flag takes priority
+ *
+ * This is only guaranteed to work for @ref BASE_JD_REQ_ONLY_COMPUTE atoms.
+ *
+ * If the core availability policy is keeping the required core group turned off, then
+ * the job will fail with a @ref BASE_JD_EVENT_PM_EVENT error code.
+ */
+#define BASE_JD_REQ_SPECIFIC_COHERENT_GROUP ((base_jd_core_req)1 << 11)
+
+/**
+ * SW Flag: If this bit is set then the successful completion of this atom
+ * will not cause an event to be sent to userspace
+ */
+#define BASE_JD_REQ_EVENT_ONLY_ON_FAILURE   ((base_jd_core_req)1 << 12)
+
+/**
+ * SW Flag: If this bit is set then completion of this atom will not cause an
+ * event to be sent to userspace, whether successful or not.
+ */
+#define BASEP_JD_REQ_EVENT_NEVER ((base_jd_core_req)1 << 14)
+
+/**
+ * SW Flag: Skip GPU cache clean and invalidation before starting a GPU job.
+ *
+ * If this bit is set then the GPU's cache will not be cleaned and invalidated
+ * until a GPU job starts which does not have this bit set or a job completes
+ * which does not have the @ref BASE_JD_REQ_SKIP_CACHE_END bit set. Do not use if
+ * the CPU may have written to memory addressed by the job since the last job
+ * without this bit set was submitted.
+ */
+#define BASE_JD_REQ_SKIP_CACHE_START ((base_jd_core_req)1 << 15)
+
+/**
+ * SW Flag: Skip GPU cache clean and invalidation after a GPU job completes.
+ *
+ * If this bit is set then the GPU's cache will not be cleaned and invalidated
+ * until a GPU job completes which does not have this bit set or a job starts
+ * which does not have the @ref BASE_JD_REQ_SKIP_CACHE_START bti set. Do not use if
+ * the CPU may read from or partially overwrite memory addressed by the job
+ * before the next job without this bit set completes.
+ */
+#define BASE_JD_REQ_SKIP_CACHE_END ((base_jd_core_req)1 << 16)
+
+/**
+ * Request the atom be executed on a specific job slot.
+ *
+ * When this flag is specified, it takes precedence over any existing job slot
+ * selection logic.
+ */
+#define BASE_JD_REQ_JOB_SLOT ((base_jd_core_req)1 << 17)
+
+/**
+ * These requirement bits are currently unused in base_jd_core_req
+ */
+#define BASEP_JD_REQ_RESERVED \
+	(~(BASE_JD_REQ_ATOM_TYPE | BASE_JD_REQ_EXTERNAL_RESOURCES | \
+	BASE_JD_REQ_EVENT_ONLY_ON_FAILURE | BASEP_JD_REQ_EVENT_NEVER | \
+	BASE_JD_REQ_EVENT_COALESCE | \
+	BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP | \
+	BASE_JD_REQ_FS_AFBC | BASE_JD_REQ_PERMON | \
+	BASE_JD_REQ_SKIP_CACHE_START | BASE_JD_REQ_SKIP_CACHE_END | \
+	BASE_JD_REQ_JOB_SLOT))
+
+/**
+ * Mask of all bits in base_jd_core_req that control the type of the atom.
+ *
+ * This allows dependency only atoms to have flags set
+ */
+#define BASE_JD_REQ_ATOM_TYPE \
+	(BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T | BASE_JD_REQ_CF | \
+	BASE_JD_REQ_V | BASE_JD_REQ_SOFT_JOB | BASE_JD_REQ_ONLY_COMPUTE)
+
+/**
+ * Mask of all bits in base_jd_core_req that control the type of a soft job.
+ */
+#define BASE_JD_REQ_SOFT_JOB_TYPE (BASE_JD_REQ_SOFT_JOB | 0x1f)
+
+/*
+ * Returns non-zero value if core requirements passed define a soft job or
+ * a dependency only job.
+ */
+#define BASE_JD_REQ_SOFT_JOB_OR_DEP(core_req) \
+	((core_req & BASE_JD_REQ_SOFT_JOB) || \
+	(core_req & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP)
+
+/*
+ * Base Atom priority
+ *
+ * Only certain priority levels are actually implemented, as specified by the
+ * BASE_JD_PRIO_<...> definitions below. It is undefined to use a priority
+ * level that is not one of those defined below.
+ *
+ * Priority levels only affect scheduling after the atoms have had dependencies
+ * resolved. For example, a low priority atom that has had its dependencies
+ * resolved might run before a higher priority atom that has not had its
+ * dependencies resolved.
+ *
+ * In general, fragment atoms do not affect non-fragment atoms with
+ * lower priorities, and vice versa. One exception is that there is only one
+ * priority value for each context. So a high-priority (e.g.) fragment atom
+ * could increase its context priority, causing its non-fragment atoms to also
+ * be scheduled sooner.
+ *
+ * The atoms are scheduled as follows with respect to their priorities:
+ * - Let atoms 'X' and 'Y' be for the same job slot who have dependencies
+ *   resolved, and atom 'X' has a higher priority than atom 'Y'
+ * - If atom 'Y' is currently running on the HW, then it is interrupted to
+ *   allow atom 'X' to run soon after
+ * - If instead neither atom 'Y' nor atom 'X' are running, then when choosing
+ *   the next atom to run, atom 'X' will always be chosen instead of atom 'Y'
+ * - Any two atoms that have the same priority could run in any order with
+ *   respect to each other. That is, there is no ordering constraint between
+ *   atoms of the same priority.
+ *
+ * The sysfs file 'js_ctx_scheduling_mode' is used to control how atoms are
+ * scheduled between contexts. The default value, 0, will cause higher-priority
+ * atoms to be scheduled first, regardless of their context. The value 1 will
+ * use a round-robin algorithm when deciding which context's atoms to schedule
+ * next, so higher-priority atoms can only preempt lower priority atoms within
+ * the same context. See KBASE_JS_SYSTEM_PRIORITY_MODE and
+ * KBASE_JS_PROCESS_LOCAL_PRIORITY_MODE for more details.
+ */
+typedef u8 base_jd_prio;
+
+/* Medium atom priority. This is a priority higher than BASE_JD_PRIO_LOW */
+#define BASE_JD_PRIO_MEDIUM  ((base_jd_prio)0)
+/* High atom priority. This is a priority higher than BASE_JD_PRIO_MEDIUM and
+ * BASE_JD_PRIO_LOW */
+#define BASE_JD_PRIO_HIGH    ((base_jd_prio)1)
+/* Low atom priority. */
+#define BASE_JD_PRIO_LOW     ((base_jd_prio)2)
+
+/* Count of the number of priority levels. This itself is not a valid
+ * base_jd_prio setting */
+#define BASE_JD_NR_PRIO_LEVELS 3
+
+enum kbase_jd_atom_state {
+	/** Atom is not used */
+	KBASE_JD_ATOM_STATE_UNUSED,
+	/** Atom is queued in JD */
+	KBASE_JD_ATOM_STATE_QUEUED,
+	/** Atom has been given to JS (is runnable/running) */
+	KBASE_JD_ATOM_STATE_IN_JS,
+	/** Atom has been completed, but not yet handed back to job dispatcher
+	 *  for dependency resolution */
+	KBASE_JD_ATOM_STATE_HW_COMPLETED,
+	/** Atom has been completed, but not yet handed back to userspace */
+	KBASE_JD_ATOM_STATE_COMPLETED
+};
+
+typedef u8 base_atom_id; /**< Type big enough to store an atom number in */
+
+struct base_dependency {
+	base_atom_id  atom_id;               /**< An atom number */
+	base_jd_dep_type dependency_type;    /**< Dependency type */
+};
+
+/* This structure has changed since UK 10.2 for which base_jd_core_req was a u16 value.
+ * In order to keep the size of the structure same, padding field has been adjusted
+ * accordingly and core_req field of a u32 type (to which UK 10.3 base_jd_core_req defines)
+ * is added at the end of the structure. Place in the structure previously occupied by u16 core_req
+ * is kept but renamed to compat_core_req and as such it can be used in ioctl call for job submission
+ * as long as UK 10.2 legacy is supported. Once when this support ends, this field can be left
+ * for possible future use. */
+typedef struct base_jd_atom_v2 {
+	u64 jc;			    /**< job-chain GPU address */
+	struct base_jd_udata udata;		    /**< user data */
+	u64 extres_list;	    /**< list of external resources */
+	u16 nr_extres;			    /**< nr of external resources or JIT allocations */
+	u16 compat_core_req;	            /**< core requirements which correspond to the legacy support for UK 10.2 */
+	struct base_dependency pre_dep[2];  /**< pre-dependencies, one need to use SETTER function to assign this field,
+	this is done in order to reduce possibility of improper assigment of a dependency field */
+	base_atom_id atom_number;	    /**< unique number to identify the atom */
+	base_jd_prio prio;                  /**< Atom priority. Refer to @ref base_jd_prio for more details */
+	u8 device_nr;			    /**< coregroup when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified */
+	u8 jobslot;			    /**< Job slot to use when BASE_JD_REQ_JOB_SLOT is specified */
+	base_jd_core_req core_req;          /**< core requirements */
+} base_jd_atom_v2;
+
+typedef enum base_external_resource_access {
+	BASE_EXT_RES_ACCESS_SHARED,
+	BASE_EXT_RES_ACCESS_EXCLUSIVE
+} base_external_resource_access;
+
+typedef struct base_external_resource {
+	u64 ext_resource;
+} base_external_resource;
+
+
+/**
+ * The maximum number of external resources which can be mapped/unmapped
+ * in a single request.
+ */
+#define BASE_EXT_RES_COUNT_MAX 10
+
+/**
+ * struct base_external_resource_list - Structure which describes a list of
+ *                                      external resources.
+ * @count:                              The number of resources.
+ * @ext_res:                            Array of external resources which is
+ *                                      sized at allocation time.
+ */
+struct base_external_resource_list {
+	u64 count;
+	struct base_external_resource ext_res[1];
+};
+
+struct base_jd_debug_copy_buffer {
+	u64 address;
+	u64 size;
+	struct base_external_resource extres;
+};
+
+/**
+ * @brief Setter for a dependency structure
+ *
+ * @param[in] dep          The kbase jd atom dependency to be initialized.
+ * @param     id           The atom_id to be assigned.
+ * @param     dep_type     The dep_type to be assigned.
+ *
+ */
+static inline void base_jd_atom_dep_set(struct base_dependency *dep,
+		base_atom_id id, base_jd_dep_type dep_type)
+{
+	LOCAL_ASSERT(dep != NULL);
+
+	/*
+	 * make sure we don't set not allowed combinations
+	 * of atom_id/dependency_type.
+	 */
+	LOCAL_ASSERT((id == 0 && dep_type == BASE_JD_DEP_TYPE_INVALID) ||
+			(id > 0 && dep_type != BASE_JD_DEP_TYPE_INVALID));
+
+	dep->atom_id = id;
+	dep->dependency_type = dep_type;
+}
+
+/**
+ * @brief Make a copy of a dependency structure
+ *
+ * @param[in,out] dep          The kbase jd atom dependency to be written.
+ * @param[in]     from         The dependency to make a copy from.
+ *
+ */
+static inline void base_jd_atom_dep_copy(struct base_dependency *dep,
+		const struct base_dependency *from)
+{
+	LOCAL_ASSERT(dep != NULL);
+
+	base_jd_atom_dep_set(dep, from->atom_id, from->dependency_type);
+}
+
+/**
+ * @brief Soft-atom fence trigger setup.
+ *
+ * Sets up an atom to be a SW-only atom signaling a fence
+ * when it reaches the run state.
+ *
+ * Using the existing base dependency system the fence can
+ * be set to trigger when a GPU job has finished.
+ *
+ * The base fence object must not be terminated until the atom
+ * has been submitted to @ref base_jd_submit and @ref base_jd_submit
+ * has returned.
+ *
+ * @a fence must be a valid fence set up with @a base_fence_init.
+ * Calling this function with a uninitialized fence results in undefined behavior.
+ *
+ * @param[out] atom A pre-allocated atom to configure as a fence trigger SW atom
+ * @param[in] fence The base fence object to trigger.
+ *
+ * @pre @p fence must reference a @ref base_fence successfully initialized by
+ *      calling @ref base_fence_init.
+ * @pre @p fence was @e not initialized by calling @ref base_fence_import, nor
+ *      is it associated with a fence-trigger job that was already submitted
+ *      by calling @ref base_jd_submit.
+ * @post @p atom can be submitted by calling @ref base_jd_submit.
+ */
+static inline void base_jd_fence_trigger_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence)
+{
+	LOCAL_ASSERT(atom);
+	LOCAL_ASSERT(fence);
+	LOCAL_ASSERT(fence->basep.fd == INVALID_PLATFORM_FENCE);
+	LOCAL_ASSERT(fence->basep.stream_fd >= 0);
+	atom->jc = (uintptr_t) fence;
+	atom->core_req = BASE_JD_REQ_SOFT_FENCE_TRIGGER;
+}
+
+/**
+ * @brief Soft-atom fence wait setup.
+ *
+ * Sets up an atom to be a SW-only atom waiting on a fence.
+ * When the fence becomes triggered the atom becomes runnable
+ * and completes immediately.
+ *
+ * Using the existing base dependency system the fence can
+ * be set to block a GPU job until it has been triggered.
+ *
+ * The base fence object must not be terminated until the atom
+ * has been submitted to @ref base_jd_submit and
+ * @ref base_jd_submit has returned.
+ *
+ * @param[out] atom A pre-allocated atom to configure as a fence wait SW atom
+ * @param[in] fence The base fence object to wait on
+ *
+ * @pre @p fence must reference a @ref base_fence successfully initialized by
+ *      calling @ref base_fence_import, or it must be associated with a
+ *      fence-trigger job that was already submitted by calling
+ *      @ref base_jd_submit.
+ * @post @p atom can be submitted by calling @ref base_jd_submit.
+ */
+static inline void base_jd_fence_wait_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence)
+{
+	LOCAL_ASSERT(atom);
+	LOCAL_ASSERT(fence);
+	LOCAL_ASSERT(fence->basep.fd >= 0);
+	atom->jc = (uintptr_t) fence;
+	atom->core_req = BASE_JD_REQ_SOFT_FENCE_WAIT;
+}
+
+/**
+ * @brief External resource info initialization.
+ *
+ * Sets up an external resource object to reference
+ * a memory allocation and the type of access requested.
+ *
+ * @param[in] res     The resource object to initialize
+ * @param     handle  The handle to the imported memory object, must be
+ *                    obtained by calling @ref base_mem_as_import_handle().
+ * @param     access  The type of access requested
+ */
+static inline void base_external_resource_init(struct base_external_resource *res, struct base_import_handle handle, base_external_resource_access access)
+{
+	u64 address;
+
+	address = handle.basep.handle;
+
+	LOCAL_ASSERT(res != NULL);
+	LOCAL_ASSERT(0 == (address & LOCAL_PAGE_LSB));
+	LOCAL_ASSERT(access == BASE_EXT_RES_ACCESS_SHARED || access == BASE_EXT_RES_ACCESS_EXCLUSIVE);
+
+	res->ext_resource = address | (access & LOCAL_PAGE_LSB);
+}
+
+/**
+ * @brief Job chain event code bits
+ * Defines the bits used to create ::base_jd_event_code
+ */
+enum {
+	BASE_JD_SW_EVENT_KERNEL = (1u << 15), /**< Kernel side event */
+	BASE_JD_SW_EVENT = (1u << 14), /**< SW defined event */
+	BASE_JD_SW_EVENT_SUCCESS = (1u << 13), /**< Event idicates success (SW events only) */
+	BASE_JD_SW_EVENT_JOB = (0u << 11), /**< Job related event */
+	BASE_JD_SW_EVENT_BAG = (1u << 11), /**< Bag related event */
+	BASE_JD_SW_EVENT_INFO = (2u << 11), /**< Misc/info event */
+	BASE_JD_SW_EVENT_RESERVED = (3u << 11),	/**< Reserved event type */
+	BASE_JD_SW_EVENT_TYPE_MASK = (3u << 11)	    /**< Mask to extract the type from an event code */
+};
+
+/**
+ * @brief Job chain event codes
+ *
+ * HW and low-level SW events are represented by event codes.
+ * The status of jobs which succeeded are also represented by
+ * an event code (see ::BASE_JD_EVENT_DONE).
+ * Events are usually reported as part of a ::base_jd_event.
+ *
+ * The event codes are encoded in the following way:
+ * @li 10:0  - subtype
+ * @li 12:11 - type
+ * @li 13    - SW success (only valid if the SW bit is set)
+ * @li 14    - SW event (HW event if not set)
+ * @li 15    - Kernel event (should never be seen in userspace)
+ *
+ * Events are split up into ranges as follows:
+ * - BASE_JD_EVENT_RANGE_\<description\>_START
+ * - BASE_JD_EVENT_RANGE_\<description\>_END
+ *
+ * \a code is in \<description\>'s range when:
+ * - <tt>BASE_JD_EVENT_RANGE_\<description\>_START <= code < BASE_JD_EVENT_RANGE_\<description\>_END </tt>
+ *
+ * Ranges can be asserted for adjacency by testing that the END of the previous
+ * is equal to the START of the next. This is useful for optimizing some tests
+ * for range.
+ *
+ * A limitation is that the last member of this enum must explicitly be handled
+ * (with an assert-unreachable statement) in switch statements that use
+ * variables of this type. Otherwise, the compiler warns that we have not
+ * handled that enum value.
+ */
+typedef enum base_jd_event_code {
+	/* HW defined exceptions */
+
+	/** Start of HW Non-fault status codes
+	 *
+	 * @note Obscurely, BASE_JD_EVENT_TERMINATED indicates a real fault,
+	 * because the job was hard-stopped
+	 */
+	BASE_JD_EVENT_RANGE_HW_NONFAULT_START = 0,
+
+	/* non-fatal exceptions */
+	BASE_JD_EVENT_NOT_STARTED = 0x00, /**< Can't be seen by userspace, treated as 'previous job done' */
+	BASE_JD_EVENT_DONE = 0x01,
+	BASE_JD_EVENT_STOPPED = 0x03,	  /**< Can't be seen by userspace, becomes TERMINATED, DONE or JOB_CANCELLED */
+	BASE_JD_EVENT_TERMINATED = 0x04,  /**< This is actually a fault status code - the job was hard stopped */
+	BASE_JD_EVENT_ACTIVE = 0x08,	  /**< Can't be seen by userspace, jobs only returned on complete/fail/cancel */
+
+	/** End of HW Non-fault status codes
+	 *
+	 * @note Obscurely, BASE_JD_EVENT_TERMINATED indicates a real fault,
+	 * because the job was hard-stopped
+	 */
+	BASE_JD_EVENT_RANGE_HW_NONFAULT_END = 0x40,
+
+	/** Start of HW fault and SW Error status codes */
+	BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START = 0x40,
+
+	/* job exceptions */
+	BASE_JD_EVENT_JOB_CONFIG_FAULT = 0x40,
+	BASE_JD_EVENT_JOB_POWER_FAULT = 0x41,
+	BASE_JD_EVENT_JOB_READ_FAULT = 0x42,
+	BASE_JD_EVENT_JOB_WRITE_FAULT = 0x43,
+	BASE_JD_EVENT_JOB_AFFINITY_FAULT = 0x44,
+	BASE_JD_EVENT_JOB_BUS_FAULT = 0x48,
+	BASE_JD_EVENT_INSTR_INVALID_PC = 0x50,
+	BASE_JD_EVENT_INSTR_INVALID_ENC = 0x51,
+	BASE_JD_EVENT_INSTR_TYPE_MISMATCH = 0x52,
+	BASE_JD_EVENT_INSTR_OPERAND_FAULT = 0x53,
+	BASE_JD_EVENT_INSTR_TLS_FAULT = 0x54,
+	BASE_JD_EVENT_INSTR_BARRIER_FAULT = 0x55,
+	BASE_JD_EVENT_INSTR_ALIGN_FAULT = 0x56,
+	BASE_JD_EVENT_DATA_INVALID_FAULT = 0x58,
+	BASE_JD_EVENT_TILE_RANGE_FAULT = 0x59,
+	BASE_JD_EVENT_STATE_FAULT = 0x5A,
+	BASE_JD_EVENT_OUT_OF_MEMORY = 0x60,
+	BASE_JD_EVENT_UNKNOWN = 0x7F,
+
+	/* GPU exceptions */
+	BASE_JD_EVENT_DELAYED_BUS_FAULT = 0x80,
+	BASE_JD_EVENT_SHAREABILITY_FAULT = 0x88,
+
+	/* MMU exceptions */
+	BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1 = 0xC1,
+	BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2 = 0xC2,
+	BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3 = 0xC3,
+	BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4 = 0xC4,
+	BASE_JD_EVENT_PERMISSION_FAULT = 0xC8,
+	BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1 = 0xD1,
+	BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2 = 0xD2,
+	BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3 = 0xD3,
+	BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4 = 0xD4,
+	BASE_JD_EVENT_ACCESS_FLAG = 0xD8,
+
+	/* SW defined exceptions */
+	BASE_JD_EVENT_MEM_GROWTH_FAILED	= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000,
+	BASE_JD_EVENT_TIMED_OUT		= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x001,
+	BASE_JD_EVENT_JOB_CANCELLED	= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002,
+	BASE_JD_EVENT_JOB_INVALID	= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003,
+	BASE_JD_EVENT_PM_EVENT		= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x004,
+
+	BASE_JD_EVENT_BAG_INVALID	= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_BAG | 0x003,
+
+	/** End of HW fault and SW Error status codes */
+	BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_RESERVED | 0x3FF,
+
+	/** Start of SW Success status codes */
+	BASE_JD_EVENT_RANGE_SW_SUCCESS_START = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | 0x000,
+
+	BASE_JD_EVENT_PROGRESS_REPORT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_JOB | 0x000,
+	BASE_JD_EVENT_BAG_DONE = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_BAG | 0x000,
+	BASE_JD_EVENT_DRV_TERMINATED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_INFO | 0x000,
+
+	/** End of SW Success status codes */
+	BASE_JD_EVENT_RANGE_SW_SUCCESS_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_RESERVED | 0x3FF,
+
+	/** Start of Kernel-only status codes. Such codes are never returned to user-space */
+	BASE_JD_EVENT_RANGE_KERNEL_ONLY_START = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | 0x000,
+	BASE_JD_EVENT_REMOVED_FROM_NEXT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x000,
+
+	/** End of Kernel-only status codes. */
+	BASE_JD_EVENT_RANGE_KERNEL_ONLY_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_RESERVED | 0x3FF
+} base_jd_event_code;
+
+/**
+ * @brief Event reporting structure
+ *
+ * This structure is used by the kernel driver to report information
+ * about GPU events. The can either be HW-specific events or low-level
+ * SW events, such as job-chain completion.
+ *
+ * The event code contains an event type field which can be extracted
+ * by ANDing with ::BASE_JD_SW_EVENT_TYPE_MASK.
+ *
+ * Based on the event type base_jd_event::data holds:
+ * @li ::BASE_JD_SW_EVENT_JOB : the offset in the ring-buffer for the completed
+ * job-chain
+ * @li ::BASE_JD_SW_EVENT_BAG : The address of the ::base_jd_bag that has
+ * been completed (ie all contained job-chains have been completed).
+ * @li ::BASE_JD_SW_EVENT_INFO : base_jd_event::data not used
+ */
+typedef struct base_jd_event_v2 {
+	base_jd_event_code event_code;  /**< event code */
+	base_atom_id atom_number;       /**< the atom number that has completed */
+	struct base_jd_udata udata;     /**< user data */
+} base_jd_event_v2;
+
+/**
+ * @brief Structure for BASE_JD_REQ_SOFT_DUMP_CPU_GPU_COUNTERS jobs.
+ *
+ * This structure is stored into the memory pointed to by the @c jc field
+ * of @ref base_jd_atom.
+ *
+ * It must not occupy the same CPU cache line(s) as any neighboring data.
+ * This is to avoid cases where access to pages containing the structure
+ * is shared between cached and un-cached memory regions, which would
+ * cause memory corruption.
+ */
+
+typedef struct base_dump_cpu_gpu_counters {
+	u64 system_time;
+	u64 cycle_counter;
+	u64 sec;
+	u32 usec;
+	u8 padding[36];
+} base_dump_cpu_gpu_counters;
+
+/** @} end group base_user_api_job_dispatch */
+
+#define GPU_MAX_JOB_SLOTS 16
+
+/**
+ * @page page_base_user_api_gpuprops User-side Base GPU Property Query API
+ *
+ * The User-side Base GPU Property Query API encapsulates two
+ * sub-modules:
+ *
+ * - @ref base_user_api_gpuprops_dyn "Dynamic GPU Properties"
+ * - @ref base_plat_config_gpuprops "Base Platform Config GPU Properties"
+ *
+ * There is a related third module outside of Base, which is owned by the MIDG
+ * module:
+ * - @ref gpu_props_static "Midgard Compile-time GPU Properties"
+ *
+ * Base only deals with properties that vary between different Midgard
+ * implementations - the Dynamic GPU properties and the Platform Config
+ * properties.
+ *
+ * For properties that are constant for the Midgard Architecture, refer to the
+ * MIDG module. However, we will discuss their relevance here <b>just to
+ * provide background information.</b>
+ *
+ * @section sec_base_user_api_gpuprops_about About the GPU Properties in Base and MIDG modules
+ *
+ * The compile-time properties (Platform Config, Midgard Compile-time
+ * properties) are exposed as pre-processor macros.
+ *
+ * Complementing the compile-time properties are the Dynamic GPU
+ * Properties, which act as a conduit for the Midgard Configuration
+ * Discovery.
+ *
+ * In general, the dynamic properties are present to verify that the platform
+ * has been configured correctly with the right set of Platform Config
+ * Compile-time Properties.
+ *
+ * As a consistent guide across the entire DDK, the choice for dynamic or
+ * compile-time should consider the following, in order:
+ * -# Can the code be written so that it doesn't need to know the
+ * implementation limits at all?
+ * -# If you need the limits, get the information from the Dynamic Property
+ * lookup. This should be done once as you fetch the context, and then cached
+ * as part of the context data structure, so it's cheap to access.
+ * -# If there's a clear and arguable inefficiency in using Dynamic Properties,
+ * then use a Compile-Time Property (Platform Config, or Midgard Compile-time
+ * property). Examples of where this might be sensible follow:
+ *  - Part of a critical inner-loop
+ *  - Frequent re-use throughout the driver, causing significant extra load
+ * instructions or control flow that would be worthwhile optimizing out.
+ *
+ * We cannot provide an exhaustive set of examples, neither can we provide a
+ * rule for every possible situation. Use common sense, and think about: what
+ * the rest of the driver will be doing; how the compiler might represent the
+ * value if it is a compile-time constant; whether an OEM shipping multiple
+ * devices would benefit much more from a single DDK binary, instead of
+ * insignificant micro-optimizations.
+ *
+ * @section sec_base_user_api_gpuprops_dyn Dynamic GPU Properties
+ *
+ * Dynamic GPU properties are presented in two sets:
+ * -# the commonly used properties in @ref base_gpu_props, which have been
+ * unpacked from GPU register bitfields.
+ * -# The full set of raw, unprocessed properties in @ref gpu_raw_gpu_props
+ * (also a member of @ref base_gpu_props). All of these are presented in
+ * the packed form, as presented by the GPU  registers themselves.
+ *
+ * @usecase The raw properties in @ref gpu_raw_gpu_props are necessary to
+ * allow a user of the Mali Tools (e.g. PAT) to determine "Why is this device
+ * behaving differently?". In this case, all information about the
+ * configuration is potentially useful, but it <b>does not need to be processed
+ * by the driver</b>. Instead, the raw registers can be processed by the Mali
+ * Tools software on the host PC.
+ *
+ * The properties returned extend the Midgard Configuration Discovery
+ * registers. For example, GPU clock speed is not specified in the Midgard
+ * Architecture, but is <b>necessary for OpenCL's clGetDeviceInfo() function</b>.
+ *
+ * The GPU properties are obtained by a call to
+ * base_get_gpu_props(). This simply returns a pointer to a const
+ * base_gpu_props structure. It is constant for the life of a base
+ * context. Multiple calls to base_get_gpu_props() to a base context
+ * return the same pointer to a constant structure. This avoids cache pollution
+ * of the common data.
+ *
+ * This pointer must not be freed, because it does not point to the start of a
+ * region allocated by the memory allocator; instead, just close the @ref
+ * base_context.
+ *
+ *
+ * @section sec_base_user_api_gpuprops_kernel Kernel Operation
+ *
+ * During Base Context Create time, user-side makes a single kernel call:
+ * - A call to fill user memory with GPU information structures
+ *
+ * The kernel-side will fill the provided the entire processed @ref base_gpu_props
+ * structure, because this information is required in both
+ * user and kernel side; it does not make sense to decode it twice.
+ *
+ * Coherency groups must be derived from the bitmasks, but this can be done
+ * kernel side, and just once at kernel startup: Coherency groups must already
+ * be known kernel-side, to support chains that specify a 'Only Coherent Group'
+ * SW requirement, or 'Only Coherent Group with Tiler' SW requirement.
+ *
+ * @section sec_base_user_api_gpuprops_cocalc Coherency Group calculation
+ * Creation of the coherent group data is done at device-driver startup, and so
+ * is one-time. This will most likely involve a loop with CLZ, shifting, and
+ * bit clearing on the L2_PRESENT mask, depending on whether the
+ * system is L2 Coherent. The number of shader cores is done by a
+ * population count, since faulty cores may be disabled during production,
+ * producing a non-contiguous mask.
+ *
+ * The memory requirements for this algorithm can be determined either by a u64
+ * population count on the L2_PRESENT mask (a LUT helper already is
+ * required for the above), or simple assumption that there can be no more than
+ * 16 coherent groups, since core groups are typically 4 cores.
+ */
+
+/**
+ * @addtogroup base_user_api_gpuprops User-side Base GPU Property Query APIs
+ * @{
+ */
+
+/**
+ * @addtogroup base_user_api_gpuprops_dyn Dynamic HW Properties
+ * @{
+ */
+
+#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 4
+
+#define BASE_MAX_COHERENT_GROUPS 16
+
+struct mali_base_gpu_core_props {
+	/**
+	 * Product specific value.
+	 */
+	u32 product_id;
+
+	/**
+	 * Status of the GPU release.
+	 * No defined values, but starts at 0 and increases by one for each
+	 * release status (alpha, beta, EAC, etc.).
+	 * 4 bit values (0-15).
+	 */
+	u16 version_status;
+
+	/**
+	 * Minor release number of the GPU. "P" part of an "RnPn" release number.
+     * 8 bit values (0-255).
+	 */
+	u16 minor_revision;
+
+	/**
+	 * Major release number of the GPU. "R" part of an "RnPn" release number.
+     * 4 bit values (0-15).
+	 */
+	u16 major_revision;
+
+	u16 padding;
+
+	/* The maximum GPU frequency. Reported to applications by
+	 * clGetDeviceInfo()
+	 */
+	u32 gpu_freq_khz_max;
+
+	/**
+	 * Size of the shader program counter, in bits.
+	 */
+	u32 log2_program_counter_size;
+
+	/**
+	 * TEXTURE_FEATURES_x registers, as exposed by the GPU. This is a
+	 * bitpattern where a set bit indicates that the format is supported.
+	 *
+	 * Before using a texture format, it is recommended that the corresponding
+	 * bit be checked.
+	 */
+	u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS];
+
+	/**
+	 * Theoretical maximum memory available to the GPU. It is unlikely that a
+	 * client will be able to allocate all of this memory for their own
+	 * purposes, but this at least provides an upper bound on the memory
+	 * available to the GPU.
+	 *
+	 * This is required for OpenCL's clGetDeviceInfo() call when
+	 * CL_DEVICE_GLOBAL_MEM_SIZE is requested, for OpenCL GPU devices. The
+	 * client will not be expecting to allocate anywhere near this value.
+	 */
+	u64 gpu_available_memory_size;
+
+	/**
+	 * The number of execution engines.
+	 */
+	u8 num_exec_engines;
+};
+
+/**
+ *
+ * More information is possible - but associativity and bus width are not
+ * required by upper-level apis.
+ */
+struct mali_base_gpu_l2_cache_props {
+	u8 log2_line_size;
+	u8 log2_cache_size;
+	u8 num_l2_slices; /* Number of L2C slices. 1 or higher */
+	u8 padding[5];
+};
+
+struct mali_base_gpu_tiler_props {
+	u32 bin_size_bytes;	/* Max is 4*2^15 */
+	u32 max_active_levels;	/* Max is 2^15 */
+};
+
+/**
+ * GPU threading system details.
+ */
+struct mali_base_gpu_thread_props {
+	u32 max_threads;            /* Max. number of threads per core */
+	u32 max_workgroup_size;     /* Max. number of threads per workgroup */
+	u32 max_barrier_size;       /* Max. number of threads that can synchronize on a simple barrier */
+	u16 max_registers;          /* Total size [1..65535] of the register file available per core. */
+	u8  max_task_queue;         /* Max. tasks [1..255] which may be sent to a core before it becomes blocked. */
+	u8  max_thread_group_split; /* Max. allowed value [1..15] of the Thread Group Split field. */
+	u8  impl_tech;              /* 0 = Not specified, 1 = Silicon, 2 = FPGA, 3 = SW Model/Emulation */
+	u8  padding[3];
+	u32 tls_alloc;              /* Number of threads per core that TLS must
+				     * be allocated for
+				     */
+};
+
+/**
+ * @brief descriptor for a coherent group
+ *
+ * \c core_mask exposes all cores in that coherent group, and \c num_cores
+ * provides a cached population-count for that mask.
+ *
+ * @note Whilst all cores are exposed in the mask, not all may be available to
+ * the application, depending on the Kernel Power policy.
+ *
+ * @note if u64s must be 8-byte aligned, then this structure has 32-bits of wastage.
+ */
+struct mali_base_gpu_coherent_group {
+	u64 core_mask;	       /**< Core restriction mask required for the group */
+	u16 num_cores;	       /**< Number of cores in the group */
+	u16 padding[3];
+};
+
+/**
+ * @brief Coherency group information
+ *
+ * Note that the sizes of the members could be reduced. However, the \c group
+ * member might be 8-byte aligned to ensure the u64 core_mask is 8-byte
+ * aligned, thus leading to wastage if the other members sizes were reduced.
+ *
+ * The groups are sorted by core mask. The core masks are non-repeating and do
+ * not intersect.
+ */
+struct mali_base_gpu_coherent_group_info {
+	u32 num_groups;
+
+	/**
+	 * Number of core groups (coherent or not) in the GPU. Equivalent to the number of L2 Caches.
+	 *
+	 * The GPU Counter dumping writes 2048 bytes per core group, regardless of
+	 * whether the core groups are coherent or not. Hence this member is needed
+	 * to calculate how much memory is required for dumping.
+	 *
+	 * @note Do not use it to work out how many valid elements are in the
+	 * group[] member. Use num_groups instead.
+	 */
+	u32 num_core_groups;
+
+	/**
+	 * Coherency features of the memory, accessed by @ref gpu_mem_features
+	 * methods
+	 */
+	u32 coherency;
+
+	u32 padding;
+
+	/**
+	 * Descriptors of coherent groups
+	 */
+	struct mali_base_gpu_coherent_group group[BASE_MAX_COHERENT_GROUPS];
+};
+
+/**
+ * A complete description of the GPU's Hardware Configuration Discovery
+ * registers.
+ *
+ * The information is presented inefficiently for access. For frequent access,
+ * the values should be better expressed in an unpacked form in the
+ * base_gpu_props structure.
+ *
+ * @usecase The raw properties in @ref gpu_raw_gpu_props are necessary to
+ * allow a user of the Mali Tools (e.g. PAT) to determine "Why is this device
+ * behaving differently?". In this case, all information about the
+ * configuration is potentially useful, but it <b>does not need to be processed
+ * by the driver</b>. Instead, the raw registers can be processed by the Mali
+ * Tools software on the host PC.
+ *
+ */
+struct gpu_raw_gpu_props {
+	u64 shader_present;
+	u64 tiler_present;
+	u64 l2_present;
+	u64 stack_present;
+
+	u32 l2_features;
+	u32 core_features;
+	u32 mem_features;
+	u32 mmu_features;
+
+	u32 as_present;
+
+	u32 js_present;
+	u32 js_features[GPU_MAX_JOB_SLOTS];
+	u32 tiler_features;
+	u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS];
+
+	u32 gpu_id;
+
+	u32 thread_max_threads;
+	u32 thread_max_workgroup_size;
+	u32 thread_max_barrier_size;
+	u32 thread_features;
+
+	/*
+	 * Note: This is the _selected_ coherency mode rather than the
+	 * available modes as exposed in the coherency_features register.
+	 */
+	u32 coherency_mode;
+
+	u32 thread_tls_alloc;
+};
+
+/**
+ * Return structure for base_get_gpu_props().
+ *
+ * NOTE: the raw_props member in this data structure contains the register
+ * values from which the value of the other members are derived. The derived
+ * members exist to allow for efficient access and/or shielding the details
+ * of the layout of the registers.
+ *
+ */
+typedef struct base_gpu_props {
+	struct mali_base_gpu_core_props core_props;
+	struct mali_base_gpu_l2_cache_props l2_props;
+	u64 unused_1; /* keep for backwards compatibility */
+	struct mali_base_gpu_tiler_props tiler_props;
+	struct mali_base_gpu_thread_props thread_props;
+
+	/** This member is large, likely to be 128 bytes */
+	struct gpu_raw_gpu_props raw_props;
+
+	/** This must be last member of the structure */
+	struct mali_base_gpu_coherent_group_info coherency_info;
+} base_gpu_props;
+
+/** @} end group base_user_api_gpuprops_dyn */
+
+/** @} end group base_user_api_gpuprops */
+
+/**
+ * @addtogroup base_user_api_core User-side Base core APIs
+ * @{
+ */
+
+/**
+ * Flags to pass to ::base_context_init.
+ * Flags can be ORed together to enable multiple things.
+ *
+ * These share the same space as BASEP_CONTEXT_FLAG_*, and so must
+ * not collide with them.
+ */
+typedef u32 base_context_create_flags;
+
+/** No flags set */
+#define BASE_CONTEXT_CREATE_FLAG_NONE ((base_context_create_flags)0)
+
+/** Base context is embedded in a cctx object (flag used for CINSTR
+ * software counter macros)
+ */
+#define BASE_CONTEXT_CCTX_EMBEDDED ((base_context_create_flags)1 << 0)
+
+/** Base context is a 'System Monitor' context for Hardware counters.
+ *
+ * One important side effect of this is that job submission is disabled.
+ */
+#define BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED \
+	((base_context_create_flags)1 << 1)
+
+
+/* Bit-shift used to encode a memory group ID in base_context_create_flags
+ */
+#define BASEP_CONTEXT_MMU_GROUP_ID_SHIFT (3)
+
+/* Bitmask used to encode a memory group ID in base_context_create_flags
+ */
+#define BASEP_CONTEXT_MMU_GROUP_ID_MASK \
+	((base_context_create_flags)0xF << BASEP_CONTEXT_MMU_GROUP_ID_SHIFT)
+
+/* Bitpattern describing the base_context_create_flags that can be
+ * passed to the kernel
+ */
+#define BASEP_CONTEXT_CREATE_KERNEL_FLAGS \
+	(BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED | \
+	 BASEP_CONTEXT_MMU_GROUP_ID_MASK)
+
+/* Bitpattern describing the ::base_context_create_flags that can be
+ * passed to base_context_init()
+ */
+#define BASEP_CONTEXT_CREATE_ALLOWED_FLAGS \
+	(BASE_CONTEXT_CCTX_EMBEDDED | BASEP_CONTEXT_CREATE_KERNEL_FLAGS)
+
+/*
+ * Private flags used on the base context
+ *
+ * These start at bit 31, and run down to zero.
+ *
+ * They share the same space as @ref base_context_create_flags, and so must
+ * not collide with them.
+ */
+/** Private flag tracking whether job descriptor dumping is disabled */
+#define BASEP_CONTEXT_FLAG_JOB_DUMP_DISABLED \
+	((base_context_create_flags)(1 << 31))
+
+/**
+ * base_context_mmu_group_id_set - Encode a memory group ID in
+ *                                 base_context_create_flags
+ *
+ * Memory allocated for GPU page tables will come from the specified group.
+ *
+ * @group_id: Physical memory group ID. Range is 0..(BASE_MEM_GROUP_COUNT-1).
+ *
+ * Return: Bitmask of flags to pass to base_context_init.
+ */
+static inline base_context_create_flags base_context_mmu_group_id_set(
+	int const group_id)
+{
+	LOCAL_ASSERT(group_id >= 0);
+	LOCAL_ASSERT(group_id < BASE_MEM_GROUP_COUNT);
+	return BASEP_CONTEXT_MMU_GROUP_ID_MASK &
+		((base_context_create_flags)group_id <<
+		BASEP_CONTEXT_MMU_GROUP_ID_SHIFT);
+}
+
+/**
+ * base_context_mmu_group_id_get - Decode a memory group ID from
+ *                                 base_context_create_flags
+ *
+ * Memory allocated for GPU page tables will come from the returned group.
+ *
+ * @flags: Bitmask of flags to pass to base_context_init.
+ *
+ * Return: Physical memory group ID. Valid range is 0..(BASE_MEM_GROUP_COUNT-1).
+ */
+static inline int base_context_mmu_group_id_get(
+	base_context_create_flags const flags)
+{
+	LOCAL_ASSERT(flags == (flags & BASEP_CONTEXT_CREATE_ALLOWED_FLAGS));
+	return (int)((flags & BASEP_CONTEXT_MMU_GROUP_ID_MASK) >>
+			BASEP_CONTEXT_MMU_GROUP_ID_SHIFT);
+}
+
+/** @} end group base_user_api_core */
+
+/** @} end group base_user_api */
+
+/**
+ * @addtogroup base_plat_config_gpuprops Base Platform Config GPU Properties
+ * @{
+ *
+ * C Pre-processor macros are exposed here to do with Platform
+ * Config.
+ *
+ * These include:
+ * - GPU Properties that are constant on a particular Midgard Family
+ * Implementation e.g. Maximum samples per pixel on Mali-T600.
+ * - General platform config for the GPU, such as the GPU major and minor
+ * revison.
+ */
+
+/** @} end group base_plat_config_gpuprops */
+
+/**
+ * @addtogroup base_api Base APIs
+ * @{
+ */
+
+/** @} end group base_api */
+
+/* Enable additional tracepoints for latency measurements (TL_ATOM_READY,
+ * TL_ATOM_DONE, TL_ATOM_PRIO_CHANGE, TL_ATOM_EVENT_POST) */
+#define BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS (1 << 0)
+
+/* Indicate that job dumping is enabled. This could affect certain timers
+ * to account for the performance impact. */
+#define BASE_TLSTREAM_JOB_DUMPING_ENABLED (1 << 1)
+
+#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \
+		BASE_TLSTREAM_JOB_DUMPING_ENABLED)
+
+/**
+ * A number of bit flags are defined for requesting cpu_gpu_timeinfo. These
+ * flags are also used, where applicable, for specifying which fields
+ * are valid following the request operation.
+ */
+
+/* For monotonic (counter) timefield */
+#define BASE_TIMEINFO_MONOTONIC_FLAG (1UL << 0)
+/* For system wide timestamp */
+#define BASE_TIMEINFO_TIMESTAMP_FLAG (1UL << 1)
+/* For GPU cycle counter */
+#define BASE_TIMEINFO_CYCLE_COUNTER_FLAG (1UL << 2)
+
+#define BASE_TIMEREQUEST_ALLOWED_FLAGS (\
+		BASE_TIMEINFO_MONOTONIC_FLAG | \
+		BASE_TIMEINFO_TIMESTAMP_FLAG | \
+		BASE_TIMEINFO_CYCLE_COUNTER_FLAG)
+
+
+#endif				/* _BASE_KERNEL_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_base_mem_priv.h b/drivers/gpu/arm/midgard/mali_base_mem_priv.h
new file mode 100644
index 0000000..52c8a4f
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_base_mem_priv.h
@@ -0,0 +1,57 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#ifndef _BASE_MEM_PRIV_H_
+#define _BASE_MEM_PRIV_H_
+
+#define BASE_SYNCSET_OP_MSYNC	(1U << 0)
+#define BASE_SYNCSET_OP_CSYNC	(1U << 1)
+
+/*
+ * This structure describe a basic memory coherency operation.
+ * It can either be:
+ * @li a sync from CPU to Memory:
+ *	- type = ::BASE_SYNCSET_OP_MSYNC
+ *	- mem_handle = a handle to the memory object on which the operation
+ *	  is taking place
+ *	- user_addr = the address of the range to be synced
+ *	- size = the amount of data to be synced, in bytes
+ *	- offset is ignored.
+ * @li a sync from Memory to CPU:
+ *	- type = ::BASE_SYNCSET_OP_CSYNC
+ *	- mem_handle = a handle to the memory object on which the operation
+ *	  is taking place
+ *	- user_addr = the address of the range to be synced
+ *	- size = the amount of data to be synced, in bytes.
+ *	- offset is ignored.
+ */
+struct basep_syncset {
+	base_mem_handle mem_handle;
+	u64 user_addr;
+	u64 size;
+	u8 type;
+	u8 padding[7];
+};
+
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_base_vendor_specific_func.h b/drivers/gpu/arm/midgard/mali_base_vendor_specific_func.h
new file mode 100644
index 0000000..5e8add8
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_base_vendor_specific_func.h
@@ -0,0 +1,29 @@
+/*
+ *
+ * (C) COPYRIGHT 2010, 2012-2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+#ifndef _BASE_VENDOR_SPEC_FUNC_H_
+#define _BASE_VENDOR_SPEC_FUNC_H_
+
+int kbase_get_vendor_specific_cpu_clock_speed(u32 * const);
+
+#endif	/*_BASE_VENDOR_SPEC_FUNC_H_*/
diff --git a/drivers/gpu/arm/midgard/mali_kbase.h b/drivers/gpu/arm/midgard/mali_kbase.h
new file mode 100644
index 0000000..1ab785e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase.h
@@ -0,0 +1,735 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#ifndef _KBASE_H_
+#define _KBASE_H_
+
+#include <mali_malisw.h>
+
+#include <mali_kbase_debug.h>
+
+#include <linux/atomic.h>
+#include <linux/highmem.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+#include <linux/sched/mm.h>
+#endif
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "mali_base_kernel.h"
+#include <mali_kbase_linux.h>
+
+/*
+ * Include mali_kbase_defs.h first as this provides types needed by other local
+ * header files.
+ */
+#include "mali_kbase_defs.h"
+
+#include "mali_kbase_context.h"
+#include "mali_kbase_strings.h"
+#include "mali_kbase_mem_lowlevel.h"
+#include "mali_kbase_js.h"
+#include "mali_kbase_utility.h"
+#include "mali_kbase_mem.h"
+#include "mali_kbase_gpu_memory_debugfs.h"
+#include "mali_kbase_mem_profile_debugfs.h"
+#include "mali_kbase_debug_job_fault.h"
+#include "mali_kbase_jd_debugfs.h"
+#include "mali_kbase_gpuprops.h"
+#include "mali_kbase_jm.h"
+#include "mali_kbase_ioctl.h"
+
+#include "ipa/mali_kbase_ipa.h"
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+#include <trace/events/gpu.h>
+#endif
+
+
+#ifndef u64_to_user_ptr
+/* Introduced in Linux v4.6 */
+#define u64_to_user_ptr(x) ((void __user *)(uintptr_t)x)
+#endif
+
+
+/* Physical memory group ID for a special page which can alias several regions.
+ */
+#define KBASE_MEM_GROUP_SINK BASE_MEM_GROUP_DEFAULT
+
+/*
+ * Kernel-side Base (KBase) APIs
+ */
+
+struct kbase_device *kbase_device_alloc(void);
+/*
+* note: configuration attributes member of kbdev needs to have
+* been setup before calling kbase_device_init
+*/
+
+/*
+* API to acquire device list semaphore and return pointer
+* to the device list head
+*/
+const struct list_head *kbase_dev_list_get(void);
+/* API to release the device list semaphore */
+void kbase_dev_list_put(const struct list_head *dev_list);
+
+int kbase_device_init(struct kbase_device * const kbdev);
+void kbase_device_term(struct kbase_device *kbdev);
+void kbase_device_free(struct kbase_device *kbdev);
+int kbase_device_has_feature(struct kbase_device *kbdev, u32 feature);
+
+/* Needed for gator integration and for reporting vsync information */
+struct kbase_device *kbase_find_device(int minor);
+void kbase_release_device(struct kbase_device *kbdev);
+
+/**
+ * kbase_context_get_unmapped_area() - get an address range which is currently
+ *                                     unmapped.
+ * @kctx: A kernel base context (which has its own GPU address space).
+ * @addr: CPU mapped address (set to 0 since MAP_FIXED mapping is not allowed
+ *        as Mali GPU driver decides about the mapping).
+ * @len: Length of the address range.
+ * @pgoff: Page offset within the GPU address space of the kbase context.
+ * @flags: Flags for the allocation.
+ *
+ * Finds the unmapped address range which satisfies requirements specific to
+ * GPU and those provided by the call parameters.
+ *
+ * 1) Requirement for allocations greater than 2MB:
+ * - alignment offset is set to 2MB and the alignment mask to 2MB decremented
+ * by 1.
+ *
+ * 2) Requirements imposed for the shader memory alignment:
+ * - alignment is decided by the number of GPU pc bits which can be read from
+ * GPU properties of the device associated with this kbase context; alignment
+ * offset is set to this value in bytes and the alignment mask to the offset
+ * decremented by 1.
+ * - allocations must not to be at 4GB boundaries. Such cases are indicated
+ * by the flag KBASE_REG_GPU_NX not being set (check the flags of the kbase
+ * region). 4GB boundaries can be checked against @ref BASE_MEM_MASK_4GB.
+ *
+ * 3) Requirements imposed for tiler memory alignment, cases indicated by
+ * the flag @ref KBASE_REG_TILER_ALIGN_TOP (check the flags of the kbase
+ * region):
+ * - alignment offset is set to the difference between the kbase region
+ * extent (converted from the original value in pages to bytes) and the kbase
+ * region initial_commit (also converted from the original value in pages to
+ * bytes); alignment mask is set to the kbase region extent in bytes and
+ * decremented by 1.
+ *
+ * Return: if successful, address of the unmapped area aligned as required;
+ *         error code (negative) in case of failure;
+ */
+unsigned long kbase_context_get_unmapped_area(struct kbase_context *kctx,
+		const unsigned long addr, const unsigned long len,
+		const unsigned long pgoff, const unsigned long flags);
+
+int kbase_jd_init(struct kbase_context *kctx);
+void kbase_jd_exit(struct kbase_context *kctx);
+
+/**
+ * kbase_jd_submit - Submit atoms to the job dispatcher
+ *
+ * @kctx: The kbase context to submit to
+ * @user_addr: The address in user space of the struct base_jd_atom_v2 array
+ * @nr_atoms: The number of atoms in the array
+ * @stride: sizeof(struct base_jd_atom_v2)
+ * @uk6_atom: true if the atoms are legacy atoms (struct base_jd_atom_v2_uk6)
+ *
+ * Return: 0 on success or error code
+ */
+int kbase_jd_submit(struct kbase_context *kctx,
+		void __user *user_addr, u32 nr_atoms, u32 stride,
+		bool uk6_atom);
+
+/**
+ * kbase_jd_done_worker - Handle a job completion
+ * @data: a &struct work_struct
+ *
+ * This function requeues the job from the runpool (if it was soft-stopped or
+ * removed from NEXT registers).
+ *
+ * Removes it from the system if it finished/failed/was cancelled.
+ *
+ * Resolves dependencies to add dependent jobs to the context, potentially
+ * starting them if necessary (which may add more references to the context)
+ *
+ * Releases the reference to the context from the no-longer-running job.
+ *
+ * Handles retrying submission outside of IRQ context if it failed from within
+ * IRQ context.
+ */
+void kbase_jd_done_worker(struct work_struct *data);
+
+void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
+		kbasep_js_atom_done_code done_code);
+void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
+void kbase_jd_zap_context(struct kbase_context *kctx);
+bool jd_done_nolock(struct kbase_jd_atom *katom,
+		struct list_head *completed_jobs_ctx);
+void kbase_jd_free_external_resources(struct kbase_jd_atom *katom);
+bool jd_submit_atom(struct kbase_context *kctx,
+			 const struct base_jd_atom_v2 *user_atom,
+			 struct kbase_jd_atom *katom);
+void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_job_done - Process completed jobs from job interrupt
+ * @kbdev: Pointer to the kbase device.
+ * @done: Bitmask of done or failed jobs, from JOB_IRQ_STAT register
+ *
+ * This function processes the completed, or failed, jobs from the GPU job
+ * slots, for the bits set in the @done bitmask.
+ *
+ * The hwaccess_lock must be held when calling this function.
+ */
+void kbase_job_done(struct kbase_device *kbdev, u32 done);
+
+/**
+ * kbase_job_slot_ctx_priority_check_locked(): - Check for lower priority atoms
+ *                                               and soft stop them
+ * @kctx: Pointer to context to check.
+ * @katom: Pointer to priority atom.
+ *
+ * Atoms from @kctx on the same job slot as @katom, which have lower priority
+ * than @katom will be soft stopped and put back in the queue, so that atoms
+ * with higher priority can run.
+ *
+ * The hwaccess_lock must be held when calling this function.
+ */
+void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
+				struct kbase_jd_atom *katom);
+
+void kbase_job_slot_softstop(struct kbase_device *kbdev, int js,
+		struct kbase_jd_atom *target_katom);
+void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js,
+		struct kbase_jd_atom *target_katom, u32 sw_flags);
+void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
+		struct kbase_jd_atom *target_katom);
+void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
+		base_jd_core_req core_reqs, struct kbase_jd_atom *target_katom);
+void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
+		struct kbase_jd_atom *target_katom);
+
+void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *event);
+int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent);
+int kbase_event_pending(struct kbase_context *ctx);
+int kbase_event_init(struct kbase_context *kctx);
+void kbase_event_close(struct kbase_context *kctx);
+void kbase_event_cleanup(struct kbase_context *kctx);
+void kbase_event_wakeup(struct kbase_context *kctx);
+
+/**
+ * kbasep_jit_alloc_validate() - Validate the JIT allocation info.
+ *
+ * @kctx:	Pointer to the kbase context within which the JIT
+ *		allocation is to be validated.
+ * @info:	Pointer to struct @base_jit_alloc_info
+ *			which is to be validated.
+ * @return: 0 if jit allocation is valid; negative error code otherwise
+ */
+int kbasep_jit_alloc_validate(struct kbase_context *kctx,
+					struct base_jit_alloc_info *info);
+/**
+ * kbase_free_user_buffer() - Free memory allocated for struct
+ *		@kbase_debug_copy_buffer.
+ *
+ * @buffer:	Pointer to the memory location allocated for the object
+ *		of the type struct @kbase_debug_copy_buffer.
+ */
+static inline void kbase_free_user_buffer(
+		struct kbase_debug_copy_buffer *buffer)
+{
+	struct page **pages = buffer->extres_pages;
+	int nr_pages = buffer->nr_extres_pages;
+
+	if (pages) {
+		int i;
+
+		for (i = 0; i < nr_pages; i++) {
+			struct page *pg = pages[i];
+
+			if (pg)
+				put_page(pg);
+		}
+		kfree(pages);
+	}
+}
+
+/**
+ * kbase_mem_copy_from_extres_page() - Copy pages from external resources.
+ *
+ * @kctx:		kbase context within which the copying is to take place.
+ * @extres_pages:	Pointer to the pages which correspond to the external
+ *			resources from which the copying will take place.
+ * @pages:		Pointer to the pages to which the content is to be
+ *			copied from the provided external resources.
+ * @nr_pages:		Number of pages to copy.
+ * @target_page_nr:	Number of target pages which will be used for copying.
+ * @offset:		Offset into the target pages from which the copying
+ *			is to be performed.
+ * @to_copy:		Size of the chunk to be copied, in bytes.
+ */
+void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
+		void *extres_page, struct page **pages, unsigned int nr_pages,
+		unsigned int *target_page_nr, size_t offset, size_t *to_copy);
+/**
+ * kbase_mem_copy_from_extres() - Copy from external resources.
+ *
+ * @kctx:	kbase context within which the copying is to take place.
+ * @buf_data:	Pointer to the information about external resources:
+ *		pages pertaining to the external resource, number of
+ *		pages to copy.
+ */
+int kbase_mem_copy_from_extres(struct kbase_context *kctx,
+		struct kbase_debug_copy_buffer *buf_data);
+int kbase_process_soft_job(struct kbase_jd_atom *katom);
+int kbase_prepare_soft_job(struct kbase_jd_atom *katom);
+void kbase_finish_soft_job(struct kbase_jd_atom *katom);
+void kbase_cancel_soft_job(struct kbase_jd_atom *katom);
+void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev);
+void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom);
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom);
+#endif
+int kbase_soft_event_update(struct kbase_context *kctx,
+			    u64 event,
+			    unsigned char new_status);
+
+void kbasep_soft_job_timeout_worker(struct timer_list *timer);
+void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt);
+
+void kbasep_as_do_poke(struct work_struct *work);
+
+/** Returns the name associated with a Mali exception code
+ *
+ * This function is called from the interrupt handler when a GPU fault occurs.
+ * It reports the details of the fault using KBASE_DEBUG_PRINT_WARN.
+ *
+ * @param[in] kbdev     The kbase device that the GPU fault occurred from.
+ * @param[in] exception_code  exception code
+ * @return name associated with the exception code
+ */
+const char *kbase_exception_name(struct kbase_device *kbdev,
+		u32 exception_code);
+
+/**
+ * Check whether a system suspend is in progress, or has already been suspended
+ *
+ * The caller should ensure that either kbdev->pm.active_count_lock is held, or
+ * a dmb was executed recently (to ensure the value is most
+ * up-to-date). However, without a lock the value could change afterwards.
+ *
+ * @return false if a suspend is not in progress
+ * @return !=false otherwise
+ */
+static inline bool kbase_pm_is_suspending(struct kbase_device *kbdev)
+{
+	return kbdev->pm.suspending;
+}
+
+/**
+ * kbase_pm_is_active - Determine whether the GPU is active
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This takes into account whether there is an active context reference.
+ *
+ * Return: true if the GPU is active, false otherwise
+ */
+static inline bool kbase_pm_is_active(struct kbase_device *kbdev)
+{
+	return kbdev->pm.active_count > 0;
+}
+
+/**
+ * kbase_pm_metrics_start - Start the utilization metrics timer
+ * @kbdev: Pointer to the kbase device for which to start the utilization
+ *         metrics calculation thread.
+ *
+ * Start the timer that drives the metrics calculation, runs the custom DVFS.
+ */
+void kbase_pm_metrics_start(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_metrics_stop - Stop the utilization metrics timer
+ * @kbdev: Pointer to the kbase device for which to stop the utilization
+ *         metrics calculation thread.
+ *
+ * Stop the timer that drives the metrics calculation, runs the custom DVFS.
+ */
+void kbase_pm_metrics_stop(struct kbase_device *kbdev);
+
+/**
+ * Return the atom's ID, as was originally supplied by userspace in
+ * base_jd_atom_v2::atom_number
+ */
+static inline int kbase_jd_atom_id(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	int result;
+
+	KBASE_DEBUG_ASSERT(kctx);
+	KBASE_DEBUG_ASSERT(katom);
+	KBASE_DEBUG_ASSERT(katom->kctx == kctx);
+
+	result = katom - &kctx->jctx.atoms[0];
+	KBASE_DEBUG_ASSERT(result >= 0 && result <= BASE_JD_ATOM_COUNT);
+	return result;
+}
+
+/**
+ * kbase_jd_atom_from_id - Return the atom structure for the given atom ID
+ * @kctx: Context pointer
+ * @id:   ID of atom to retrieve
+ *
+ * Return: Pointer to struct kbase_jd_atom associated with the supplied ID
+ */
+static inline struct kbase_jd_atom *kbase_jd_atom_from_id(
+		struct kbase_context *kctx, int id)
+{
+	return &kctx->jctx.atoms[id];
+}
+
+/**
+ * Initialize the disjoint state
+ *
+ * The disjoint event count and state are both set to zero.
+ *
+ * Disjoint functions usage:
+ *
+ * The disjoint event count should be incremented whenever a disjoint event occurs.
+ *
+ * There are several cases which are regarded as disjoint behavior. Rather than just increment
+ * the counter during disjoint events we also increment the counter when jobs may be affected
+ * by what the GPU is currently doing. To facilitate this we have the concept of disjoint state.
+ *
+ * Disjoint state is entered during GPU reset. Increasing the disjoint state also increases
+ * the count of disjoint events.
+ *
+ * The disjoint state is then used to increase the count of disjoint events during job submission
+ * and job completion. Any atom submitted or completed while the disjoint state is greater than
+ * zero is regarded as a disjoint event.
+ *
+ * The disjoint event counter is also incremented immediately whenever a job is soft stopped
+ * and during context creation.
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_init(struct kbase_device *kbdev);
+
+/**
+ * Increase the count of disjoint events
+ * called when a disjoint event has happened
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_event(struct kbase_device *kbdev);
+
+/**
+ * Increase the count of disjoint events only if the GPU is in a disjoint state
+ *
+ * This should be called when something happens which could be disjoint if the GPU
+ * is in a disjoint state. The state refcount keeps track of this.
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_event_potential(struct kbase_device *kbdev);
+
+/**
+ * Returns the count of disjoint events
+ *
+ * @param kbdev The kbase device
+ * @return the count of disjoint events
+ */
+u32 kbase_disjoint_event_get(struct kbase_device *kbdev);
+
+/**
+ * Increment the refcount state indicating that the GPU is in a disjoint state.
+ *
+ * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
+ * eventually after the disjoint state has completed @ref kbase_disjoint_state_down
+ * should be called
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_state_up(struct kbase_device *kbdev);
+
+/**
+ * Decrement the refcount state
+ *
+ * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
+ *
+ * Called after @ref kbase_disjoint_state_up once the disjoint state is over
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_state_down(struct kbase_device *kbdev);
+
+/**
+ * If a job is soft stopped and the number of contexts is >= this value
+ * it is reported as a disjoint event
+ */
+#define KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD 2
+
+#if !defined(UINT64_MAX)
+	#define UINT64_MAX ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
+#endif
+
+#if KBASE_TRACE_ENABLE
+void kbasep_trace_debugfs_init(struct kbase_device *kbdev);
+
+#ifndef CONFIG_MALI_SYSTEM_TRACE
+/** Add trace values about a job-slot
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot) \
+	kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+			KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, 0)
+
+/** Add trace values about a job-slot, with info
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val) \
+	kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+			KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, info_val)
+
+/** Add trace values about a ctx refcount
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount) \
+	kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+			KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, 0)
+/** Add trace values about a ctx refcount, and info
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val) \
+	kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+			KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, info_val)
+
+/** Add trace values (no slot or refcount)
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val)     \
+	kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+			0, 0, 0, info_val)
+
+/** Clear the trace */
+#define KBASE_TRACE_CLEAR(kbdev) \
+	kbasep_trace_clear(kbdev)
+
+/** Dump the slot trace */
+#define KBASE_TRACE_DUMP(kbdev) \
+	kbasep_trace_dump(kbdev)
+
+/** PRIVATE - do not use directly. Use KBASE_TRACE_ADD() instead */
+void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val);
+/** PRIVATE - do not use directly. Use KBASE_TRACE_CLEAR() instead */
+void kbasep_trace_clear(struct kbase_device *kbdev);
+#else /* #ifndef CONFIG_MALI_SYSTEM_TRACE */
+/* Dispatch kbase trace events as system trace events */
+#include <mali_linux_kbase_trace.h>
+#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot)\
+	trace_mali_##code(jobslot, 0)
+
+#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val)\
+	trace_mali_##code(jobslot, info_val)
+
+#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount)\
+	trace_mali_##code(refcount, 0)
+
+#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val)\
+	trace_mali_##code(refcount, info_val)
+
+#define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val)\
+	trace_mali_##code(gpu_addr, info_val)
+
+#define KBASE_TRACE_CLEAR(kbdev)\
+	do {\
+		CSTD_UNUSED(kbdev);\
+		CSTD_NOP(0);\
+	} while (0)
+#define KBASE_TRACE_DUMP(kbdev)\
+	do {\
+		CSTD_UNUSED(kbdev);\
+		CSTD_NOP(0);\
+	} while (0)
+
+#endif /* #ifndef CONFIG_MALI_SYSTEM_TRACE */
+#else
+#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot)\
+	do {\
+		CSTD_UNUSED(kbdev);\
+		CSTD_NOP(code);\
+		CSTD_UNUSED(ctx);\
+		CSTD_UNUSED(katom);\
+		CSTD_UNUSED(gpu_addr);\
+		CSTD_UNUSED(jobslot);\
+	} while (0)
+
+#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val)\
+	do {\
+		CSTD_UNUSED(kbdev);\
+		CSTD_NOP(code);\
+		CSTD_UNUSED(ctx);\
+		CSTD_UNUSED(katom);\
+		CSTD_UNUSED(gpu_addr);\
+		CSTD_UNUSED(jobslot);\
+		CSTD_UNUSED(info_val);\
+		CSTD_NOP(0);\
+	} while (0)
+
+#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount)\
+	do {\
+		CSTD_UNUSED(kbdev);\
+		CSTD_NOP(code);\
+		CSTD_UNUSED(ctx);\
+		CSTD_UNUSED(katom);\
+		CSTD_UNUSED(gpu_addr);\
+		CSTD_UNUSED(refcount);\
+		CSTD_NOP(0);\
+	} while (0)
+
+#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val)\
+	do {\
+		CSTD_UNUSED(kbdev);\
+		CSTD_NOP(code);\
+		CSTD_UNUSED(ctx);\
+		CSTD_UNUSED(katom);\
+		CSTD_UNUSED(gpu_addr);\
+		CSTD_UNUSED(info_val);\
+		CSTD_NOP(0);\
+	} while (0)
+
+#define KBASE_TRACE_ADD(kbdev, code, subcode, ctx, katom, val)\
+	do {\
+		CSTD_UNUSED(kbdev);\
+		CSTD_NOP(code);\
+		CSTD_UNUSED(subcode);\
+		CSTD_UNUSED(ctx);\
+		CSTD_UNUSED(katom);\
+		CSTD_UNUSED(val);\
+		CSTD_NOP(0);\
+	} while (0)
+
+#define KBASE_TRACE_CLEAR(kbdev)\
+	do {\
+		CSTD_UNUSED(kbdev);\
+		CSTD_NOP(0);\
+	} while (0)
+#define KBASE_TRACE_DUMP(kbdev)\
+	do {\
+		CSTD_UNUSED(kbdev);\
+		CSTD_NOP(0);\
+	} while (0)
+#endif /* KBASE_TRACE_ENABLE */
+/** PRIVATE - do not use directly. Use KBASE_TRACE_DUMP() instead */
+void kbasep_trace_dump(struct kbase_device *kbdev);
+
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_MALI_NO_MALI)
+
+/* kbase_io_history_init - initialize data struct for register access history
+ *
+ * @kbdev The register history to initialize
+ * @n The number of register accesses that the buffer could hold
+ *
+ * @return 0 if successfully initialized, failure otherwise
+ */
+int kbase_io_history_init(struct kbase_io_history *h, u16 n);
+
+/* kbase_io_history_term - uninit all resources for the register access history
+ *
+ * @h The register history to terminate
+ */
+void kbase_io_history_term(struct kbase_io_history *h);
+
+/* kbase_io_history_dump - print the register history to the kernel ring buffer
+ *
+ * @kbdev Pointer to kbase_device containing the register history to dump
+ */
+void kbase_io_history_dump(struct kbase_device *kbdev);
+
+/**
+ * kbase_io_history_resize - resize the register access history buffer.
+ *
+ * @h: Pointer to a valid register history to resize
+ * @new_size: Number of accesses the buffer could hold
+ *
+ * A successful resize will clear all recent register accesses.
+ * If resizing fails for any reason (e.g., could not allocate memory, invalid
+ * buffer size) then the original buffer will be kept intact.
+ *
+ * @return 0 if the buffer was resized, failure otherwise
+ */
+int kbase_io_history_resize(struct kbase_io_history *h, u16 new_size);
+
+#else /* CONFIG_DEBUG_FS */
+
+#define kbase_io_history_init(...) ((int)0)
+
+#define kbase_io_history_term CSTD_NOP
+
+#define kbase_io_history_dump CSTD_NOP
+
+#define kbase_io_history_resize CSTD_NOP
+
+#endif /* CONFIG_DEBUG_FS */
+
+
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c
new file mode 100644
index 0000000..118511a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c
@@ -0,0 +1,209 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2015,2017-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+#include <linux/dma-mapping.h>
+#include <mali_kbase.h>
+#include <mali_kbase_10969_workaround.h>
+
+/* Mask of X and Y coordinates for the coordinates words in the descriptors*/
+#define X_COORDINATE_MASK 0x00000FFF
+#define Y_COORDINATE_MASK 0x0FFF0000
+/* Max number of words needed from the fragment shader job descriptor */
+#define JOB_HEADER_SIZE_IN_WORDS 10
+#define JOB_HEADER_SIZE (JOB_HEADER_SIZE_IN_WORDS*sizeof(u32))
+
+/* Word 0: Status Word */
+#define JOB_DESC_STATUS_WORD 0
+/* Word 1: Restart Index */
+#define JOB_DESC_RESTART_INDEX_WORD 1
+/* Word 2: Fault address low word */
+#define JOB_DESC_FAULT_ADDR_LOW_WORD 2
+/* Word 8: Minimum Tile Coordinates */
+#define FRAG_JOB_DESC_MIN_TILE_COORD_WORD 8
+/* Word 9: Maximum Tile Coordinates */
+#define FRAG_JOB_DESC_MAX_TILE_COORD_WORD 9
+
+int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom)
+{
+	struct device *dev = katom->kctx->kbdev->dev;
+	u32   clamped = 0;
+	struct kbase_va_region *region;
+	struct tagged_addr *page_array;
+	u64 page_index;
+	u32 offset = katom->jc & (~PAGE_MASK);
+	u32 *page_1 = NULL;
+	u32 *page_2 = NULL;
+	u32   job_header[JOB_HEADER_SIZE_IN_WORDS];
+	void *dst = job_header;
+	u32 minX, minY, maxX, maxY;
+	u32 restartX, restartY;
+	struct page *p;
+	u32 copy_size;
+
+	dev_warn(dev, "Called TILE_RANGE_FAULT workaround clamping function.\n");
+	if (!(katom->core_req & BASE_JD_REQ_FS))
+		return 0;
+
+	kbase_gpu_vm_lock(katom->kctx);
+	region = kbase_region_tracker_find_region_enclosing_address(katom->kctx,
+			katom->jc);
+	if (kbase_is_region_invalid_or_free(region))
+		goto out_unlock;
+
+	page_array = kbase_get_cpu_phy_pages(region);
+	if (!page_array)
+		goto out_unlock;
+
+	page_index = (katom->jc >> PAGE_SHIFT) - region->start_pfn;
+
+	p = as_page(page_array[page_index]);
+
+	/* we need the first 10 words of the fragment shader job descriptor.
+	 * We need to check that the offset + 10 words is less that the page
+	 * size otherwise we need to load the next page.
+	 * page_size_overflow will be equal to 0 in case the whole descriptor
+	 * is within the page > 0 otherwise.
+	 */
+	copy_size = MIN(PAGE_SIZE - offset, JOB_HEADER_SIZE);
+
+	page_1 = kmap_atomic(p);
+
+	/* page_1 is a u32 pointer, offset is expressed in bytes */
+	page_1 += offset>>2;
+
+	kbase_sync_single_for_cpu(katom->kctx->kbdev,
+			kbase_dma_addr(p) + offset,
+			copy_size, DMA_BIDIRECTIONAL);
+
+	memcpy(dst, page_1, copy_size);
+
+	/* The data needed overflows page the dimension,
+	 * need to map the subsequent page */
+	if (copy_size < JOB_HEADER_SIZE) {
+		p = as_page(page_array[page_index + 1]);
+		page_2 = kmap_atomic(p);
+
+		kbase_sync_single_for_cpu(katom->kctx->kbdev,
+				kbase_dma_addr(p),
+				JOB_HEADER_SIZE - copy_size, DMA_BIDIRECTIONAL);
+
+		memcpy(dst + copy_size, page_2, JOB_HEADER_SIZE - copy_size);
+	}
+
+	/* We managed to correctly map one or two pages (in case of overflow) */
+	/* Get Bounding Box data and restart index from fault address low word */
+	minX = job_header[FRAG_JOB_DESC_MIN_TILE_COORD_WORD] & X_COORDINATE_MASK;
+	minY = job_header[FRAG_JOB_DESC_MIN_TILE_COORD_WORD] & Y_COORDINATE_MASK;
+	maxX = job_header[FRAG_JOB_DESC_MAX_TILE_COORD_WORD] & X_COORDINATE_MASK;
+	maxY = job_header[FRAG_JOB_DESC_MAX_TILE_COORD_WORD] & Y_COORDINATE_MASK;
+	restartX = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] & X_COORDINATE_MASK;
+	restartY = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] & Y_COORDINATE_MASK;
+
+	dev_warn(dev, "Before Clamping:\n"
+			"Jobstatus: %08x\n"
+			"restartIdx: %08x\n"
+			"Fault_addr_low: %08x\n"
+			"minCoordsX: %08x minCoordsY: %08x\n"
+			"maxCoordsX: %08x maxCoordsY: %08x\n",
+			job_header[JOB_DESC_STATUS_WORD],
+			job_header[JOB_DESC_RESTART_INDEX_WORD],
+			job_header[JOB_DESC_FAULT_ADDR_LOW_WORD],
+			minX, minY,
+			maxX, maxY);
+
+	/* Set the restart index to the one which generated the fault*/
+	job_header[JOB_DESC_RESTART_INDEX_WORD] =
+			job_header[JOB_DESC_FAULT_ADDR_LOW_WORD];
+
+	if (restartX < minX) {
+		job_header[JOB_DESC_RESTART_INDEX_WORD] = (minX) | restartY;
+		dev_warn(dev,
+			"Clamping restart X index to minimum. %08x clamped to %08x\n",
+			restartX, minX);
+		clamped =  1;
+	}
+	if (restartY < minY) {
+		job_header[JOB_DESC_RESTART_INDEX_WORD] = (minY) | restartX;
+		dev_warn(dev,
+			"Clamping restart Y index to minimum. %08x clamped to %08x\n",
+			restartY, minY);
+		clamped =  1;
+	}
+	if (restartX > maxX) {
+		job_header[JOB_DESC_RESTART_INDEX_WORD] = (maxX) | restartY;
+		dev_warn(dev,
+			"Clamping restart X index to maximum. %08x clamped to %08x\n",
+			restartX, maxX);
+		clamped =  1;
+	}
+	if (restartY > maxY) {
+		job_header[JOB_DESC_RESTART_INDEX_WORD] = (maxY) | restartX;
+		dev_warn(dev,
+			"Clamping restart Y index to maximum. %08x clamped to %08x\n",
+			restartY, maxY);
+		clamped =  1;
+	}
+
+	if (clamped) {
+		/* Reset the fault address low word
+		 * and set the job status to STOPPED */
+		job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] = 0x0;
+		job_header[JOB_DESC_STATUS_WORD] = BASE_JD_EVENT_STOPPED;
+		dev_warn(dev, "After Clamping:\n"
+				"Jobstatus: %08x\n"
+				"restartIdx: %08x\n"
+				"Fault_addr_low: %08x\n"
+				"minCoordsX: %08x minCoordsY: %08x\n"
+				"maxCoordsX: %08x maxCoordsY: %08x\n",
+				job_header[JOB_DESC_STATUS_WORD],
+				job_header[JOB_DESC_RESTART_INDEX_WORD],
+				job_header[JOB_DESC_FAULT_ADDR_LOW_WORD],
+				minX, minY,
+				maxX, maxY);
+
+		/* Flush CPU cache to update memory for future GPU reads*/
+		memcpy(page_1, dst, copy_size);
+		p = as_page(page_array[page_index]);
+
+		kbase_sync_single_for_device(katom->kctx->kbdev,
+				kbase_dma_addr(p) + offset,
+				copy_size, DMA_TO_DEVICE);
+
+		if (copy_size < JOB_HEADER_SIZE) {
+			memcpy(page_2, dst + copy_size,
+					JOB_HEADER_SIZE - copy_size);
+			p = as_page(page_array[page_index + 1]);
+
+			kbase_sync_single_for_device(katom->kctx->kbdev,
+					kbase_dma_addr(p),
+					JOB_HEADER_SIZE - copy_size,
+					DMA_TO_DEVICE);
+		}
+	}
+	if (copy_size < JOB_HEADER_SIZE)
+		kunmap_atomic(page_2);
+
+	kunmap_atomic(page_1);
+
+out_unlock:
+	kbase_gpu_vm_unlock(katom->kctx);
+	return clamped;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.h b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.h
new file mode 100644
index 0000000..379a05a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.h
@@ -0,0 +1,37 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2014, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_10969_WORKAROUND_
+#define _KBASE_10969_WORKAROUND_
+
+/**
+ * kbasep_10969_workaround_clamp_coordinates - Apply the WA to clamp the restart indices
+ * @katom: atom representing the fragment job for which the WA has to be applied
+ *
+ * This workaround is used to solve an HW issue with single iterator GPUs.
+ * If a fragment job is soft-stopped on the edge of its bounding box, it can happen
+ * that the restart index is out of bounds and the rerun causes a tile range
+ * fault. If this happens we try to clamp the restart index to a correct value.
+ */
+int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom);
+
+#endif /* _KBASE_10969_WORKAROUND_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.c
new file mode 100644
index 0000000..6f638cc
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.c
@@ -0,0 +1,112 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/debugfs.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_as_fault_debugfs.h>
+
+#ifdef CONFIG_DEBUG_FS
+#ifdef CONFIG_MALI_DEBUG
+
+static int kbase_as_fault_read(struct seq_file *sfile, void *data)
+{
+	uintptr_t as_no = (uintptr_t) sfile->private;
+
+	struct list_head *entry;
+	const struct list_head *kbdev_list;
+	struct kbase_device *kbdev = NULL;
+
+	kbdev_list = kbase_dev_list_get();
+
+	list_for_each(entry, kbdev_list) {
+		kbdev = list_entry(entry, struct kbase_device, entry);
+
+		if (kbdev->debugfs_as_read_bitmap & (1ULL << as_no)) {
+
+			/* don't show this one again until another fault occors */
+			kbdev->debugfs_as_read_bitmap &= ~(1ULL << as_no);
+
+			/* output the last page fault addr */
+			seq_printf(sfile, "%llu\n",
+				   (u64) kbdev->as[as_no].pf_data.addr);
+		}
+
+	}
+
+	kbase_dev_list_put(kbdev_list);
+
+	return 0;
+}
+
+static int kbase_as_fault_debugfs_open(struct inode *in, struct file *file)
+{
+	return single_open(file, kbase_as_fault_read, in->i_private);
+}
+
+static const struct file_operations as_fault_fops = {
+	.owner = THIS_MODULE,
+	.open = kbase_as_fault_debugfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+#endif /* CONFIG_MALI_DEBUG */
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ *  Initialize debugfs entry for each address space
+ */
+void kbase_as_fault_debugfs_init(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_DEBUG_FS
+#ifdef CONFIG_MALI_DEBUG
+	uint i;
+	char as_name[64];
+	struct dentry *debugfs_directory;
+
+	kbdev->debugfs_as_read_bitmap = 0ULL;
+
+	KBASE_DEBUG_ASSERT(kbdev->nr_hw_address_spaces);
+	KBASE_DEBUG_ASSERT(sizeof(kbdev->as[0].pf_data.addr) == sizeof(u64));
+
+	debugfs_directory = debugfs_create_dir("address_spaces",
+					       kbdev->mali_debugfs_directory);
+
+	if (debugfs_directory) {
+		for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+			snprintf(as_name, ARRAY_SIZE(as_name), "as%u", i);
+			debugfs_create_file(as_name, S_IRUGO,
+					    debugfs_directory,
+					    (void *)(uintptr_t)i,
+					    &as_fault_fops);
+		}
+	} else {
+		dev_warn(kbdev->dev,
+			 "unable to create address_spaces debugfs directory");
+	}
+
+#endif /* CONFIG_MALI_DEBUG */
+#endif /* CONFIG_DEBUG_FS */
+	return;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.h
new file mode 100644
index 0000000..496d8b1
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.h
@@ -0,0 +1,50 @@
+/*
+ *
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_AS_FAULT_DEBUG_FS_H
+#define _KBASE_AS_FAULT_DEBUG_FS_H
+
+/**
+ * kbase_as_fault_debugfs_init() - Add debugfs files for reporting page faults
+ *
+ * @kbdev: Pointer to kbase_device
+ */
+void kbase_as_fault_debugfs_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_as_fault_debugfs_new() - make the last fault available on debugfs
+ *
+ * @kbdev: Pointer to kbase_device
+ * @as_no: The address space the fault occurred on
+ */
+static inline void
+kbase_as_fault_debugfs_new(struct kbase_device *kbdev, int as_no)
+{
+#ifdef CONFIG_DEBUG_FS
+#ifdef CONFIG_MALI_DEBUG
+	kbdev->debugfs_as_read_bitmap |= (1ULL << as_no);
+#endif /* CONFIG_DEBUG_FS */
+#endif /* CONFIG_MALI_DEBUG */
+	return;
+}
+
+#endif  /*_KBASE_AS_FAULT_DEBUG_FS_H*/
diff --git a/drivers/gpu/arm/midgard/mali_kbase_bits.h b/drivers/gpu/arm/midgard/mali_kbase_bits.h
new file mode 100644
index 0000000..2c11093
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_bits.h
@@ -0,0 +1,41 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ *//* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ */
+
+#ifndef _KBASE_BITS_H_
+#define _KBASE_BITS_H_
+
+#if (KERNEL_VERSION(4, 19, 0) <= LINUX_VERSION_CODE)
+#include <linux/bits.h>
+#else
+#include <linux/bitops.h>
+#endif
+
+#endif /* _KBASE_BITS_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_cache_policy.c b/drivers/gpu/arm/midgard/mali_kbase_cache_policy.c
new file mode 100644
index 0000000..27a03cf
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_cache_policy.c
@@ -0,0 +1,67 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Cache Policy API.
+ */
+
+#include "mali_kbase_cache_policy.h"
+
+/*
+ * The output flags should be a combination of the following values:
+ * KBASE_REG_CPU_CACHED: CPU cache should be enabled
+ * KBASE_REG_GPU_CACHED: GPU cache should be enabled
+ *
+ * NOTE: Some components within the GPU might only be able to access memory
+ * that is KBASE_REG_GPU_CACHED. Refer to the specific GPU implementation for
+ * more details.
+ */
+u32 kbase_cache_enabled(u32 flags, u32 nr_pages)
+{
+	u32 cache_flags = 0;
+
+	CSTD_UNUSED(nr_pages);
+
+	if (!(flags & BASE_MEM_UNCACHED_GPU))
+		cache_flags |= KBASE_REG_GPU_CACHED;
+
+	if (flags & BASE_MEM_CACHED_CPU)
+		cache_flags |= KBASE_REG_CPU_CACHED;
+
+	return cache_flags;
+}
+
+
+void kbase_sync_single_for_device(struct kbase_device *kbdev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir)
+{
+	dma_sync_single_for_device(kbdev->dev, handle, size, dir);
+}
+
+
+void kbase_sync_single_for_cpu(struct kbase_device *kbdev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir)
+{
+	dma_sync_single_for_cpu(kbdev->dev, handle, size, dir);
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_cache_policy.h b/drivers/gpu/arm/midgard/mali_kbase_cache_policy.h
new file mode 100644
index 0000000..8a1e529
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_cache_policy.h
@@ -0,0 +1,50 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Cache Policy API.
+ */
+
+#ifndef _KBASE_CACHE_POLICY_H_
+#define _KBASE_CACHE_POLICY_H_
+
+#include "mali_kbase.h"
+#include "mali_base_kernel.h"
+
+/**
+ * kbase_cache_enabled - Choose the cache policy for a specific region
+ * @flags:    flags describing attributes of the region
+ * @nr_pages: total number of pages (backed or not) for the region
+ *
+ * Tells whether the CPU and GPU caches should be enabled or not for a specific
+ * region.
+ * This function can be modified to customize the cache policy depending on the
+ * flags and size of the region.
+ *
+ * Return: a combination of %KBASE_REG_CPU_CACHED and %KBASE_REG_GPU_CACHED
+ *         depending on the cache policy
+ */
+u32 kbase_cache_enabled(u32 flags, u32 nr_pages);
+
+#endif				/* _KBASE_CACHE_POLICY_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_config.c b/drivers/gpu/arm/midgard/mali_kbase_config.c
new file mode 100644
index 0000000..ce7070d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_config.c
@@ -0,0 +1,48 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015,2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config_defaults.h>
+
+int kbasep_platform_device_init(struct kbase_device *kbdev)
+{
+	struct kbase_platform_funcs_conf *platform_funcs_p;
+
+	platform_funcs_p = (struct kbase_platform_funcs_conf *)PLATFORM_FUNCS;
+	if (platform_funcs_p && platform_funcs_p->platform_init_func)
+		return platform_funcs_p->platform_init_func(kbdev);
+
+	return 0;
+}
+
+void kbasep_platform_device_term(struct kbase_device *kbdev)
+{
+	struct kbase_platform_funcs_conf *platform_funcs_p;
+
+	platform_funcs_p = (struct kbase_platform_funcs_conf *)PLATFORM_FUNCS;
+	if (platform_funcs_p && platform_funcs_p->platform_term_func)
+		platform_funcs_p->platform_term_func(kbdev);
+}
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_config.h b/drivers/gpu/arm/midgard/mali_kbase_config.h
new file mode 100644
index 0000000..1637fcb
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_config.h
@@ -0,0 +1,299 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_config.h
+ * Configuration API and Attributes for KBase
+ */
+
+#ifndef _KBASE_CONFIG_H_
+#define _KBASE_CONFIG_H_
+
+#include <linux/mm.h>
+#include <mali_malisw.h>
+#include <mali_kbase_backend_config.h>
+#include <linux/rbtree.h>
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_config Configuration API and Attributes
+ * @{
+ */
+
+/* Forward declaration of struct kbase_device */
+struct kbase_device;
+
+/**
+ * kbase_platform_funcs_conf - Specifies platform init/term function pointers
+ *
+ * Specifies the functions pointers for platform specific initialization and
+ * termination. By default no functions are required. No additional platform
+ * specific control is necessary.
+ */
+struct kbase_platform_funcs_conf {
+	/**
+	 * platform_init_func - platform specific init function pointer
+	 * @kbdev - kbase_device pointer
+	 *
+	 * Returns 0 on success, negative error code otherwise.
+	 *
+	 * Function pointer for platform specific initialization or NULL if no
+	 * initialization function is required. At the point this the GPU is
+	 * not active and its power and clocks are in unknown (platform specific
+	 * state) as kbase doesn't yet have control of power and clocks.
+	 *
+	 * The platform specific private pointer kbase_device::platform_context
+	 * can be accessed (and possibly initialized) in here.
+	 */
+	int (*platform_init_func)(struct kbase_device *kbdev);
+	/**
+	 * platform_term_func - platform specific termination function pointer
+	 * @kbdev - kbase_device pointer
+	 *
+	 * Function pointer for platform specific termination or NULL if no
+	 * termination function is required. At the point this the GPU will be
+	 * idle but still powered and clocked.
+	 *
+	 * The platform specific private pointer kbase_device::platform_context
+	 * can be accessed (and possibly terminated) in here.
+	 */
+	void (*platform_term_func)(struct kbase_device *kbdev);
+};
+
+/*
+ * @brief Specifies the callbacks for power management
+ *
+ * By default no callbacks will be made and the GPU must not be powered off.
+ */
+struct kbase_pm_callback_conf {
+	/** Callback for when the GPU is idle and the power to it can be switched off.
+	 *
+	 * The system integrator can decide whether to either do nothing, just switch off
+	 * the clocks to the GPU, or to completely power down the GPU.
+	 * The platform specific private pointer kbase_device::platform_context can be accessed and modified in here. It is the
+	 * platform \em callbacks responsibility to initialize and terminate this pointer if used (see @ref kbase_platform_funcs_conf).
+	 */
+	void (*power_off_callback)(struct kbase_device *kbdev);
+
+	/** Callback for when the GPU is about to become active and power must be supplied.
+	 *
+	 * This function must not return until the GPU is powered and clocked sufficiently for register access to
+	 * succeed.  The return value specifies whether the GPU was powered down since the call to power_off_callback.
+	 * If the GPU state has been lost then this function must return 1, otherwise it should return 0.
+	 * The platform specific private pointer kbase_device::platform_context can be accessed and modified in here. It is the
+	 * platform \em callbacks responsibility to initialize and terminate this pointer if used (see @ref kbase_platform_funcs_conf).
+	 *
+	 * The return value of the first call to this function is ignored.
+	 *
+	 * @return 1 if the GPU state may have been lost, 0 otherwise.
+	 */
+	int (*power_on_callback)(struct kbase_device *kbdev);
+
+	/** Callback for when the system is requesting a suspend and GPU power
+	 * must be switched off.
+	 *
+	 * Note that if this callback is present, then this may be called
+	 * without a preceding call to power_off_callback. Therefore this
+	 * callback must be able to take any action that might otherwise happen
+	 * in power_off_callback.
+	 *
+	 * The platform specific private pointer kbase_device::platform_context
+	 * can be accessed and modified in here. It is the platform \em
+	 * callbacks responsibility to initialize and terminate this pointer if
+	 * used (see @ref kbase_platform_funcs_conf).
+	 */
+	void (*power_suspend_callback)(struct kbase_device *kbdev);
+
+	/** Callback for when the system is resuming from a suspend and GPU
+	 * power must be switched on.
+	 *
+	 * Note that if this callback is present, then this may be called
+	 * without a following call to power_on_callback. Therefore this
+	 * callback must be able to take any action that might otherwise happen
+	 * in power_on_callback.
+	 *
+	 * The platform specific private pointer kbase_device::platform_context
+	 * can be accessed and modified in here. It is the platform \em
+	 * callbacks responsibility to initialize and terminate this pointer if
+	 * used (see @ref kbase_platform_funcs_conf).
+	 */
+	void (*power_resume_callback)(struct kbase_device *kbdev);
+
+	/** Callback for handling runtime power management initialization.
+	 *
+	 * The runtime power management callbacks @ref power_runtime_off_callback and @ref power_runtime_on_callback
+	 * will become active from calls made to the OS from within this function.
+	 * The runtime calls can be triggered by calls from @ref power_off_callback and @ref power_on_callback.
+	 * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
+	 *
+	 * @return 0 on success, else int error code.
+	 */
+	 int (*power_runtime_init_callback)(struct kbase_device *kbdev);
+
+	/** Callback for handling runtime power management termination.
+	 *
+	 * The runtime power management callbacks @ref power_runtime_off_callback and @ref power_runtime_on_callback
+	 * should no longer be called by the OS on completion of this function.
+	 * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
+	 */
+	void (*power_runtime_term_callback)(struct kbase_device *kbdev);
+
+	/** Callback for runtime power-off power management callback
+	 *
+	 * For linux this callback will be called by the kernel runtime_suspend callback.
+	 * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
+	 *
+	 * @return 0 on success, else OS error code.
+	 */
+	void (*power_runtime_off_callback)(struct kbase_device *kbdev);
+
+	/** Callback for runtime power-on power management callback
+	 *
+	 * For linux this callback will be called by the kernel runtime_resume callback.
+	 * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
+	 */
+	int (*power_runtime_on_callback)(struct kbase_device *kbdev);
+
+	/*
+	 * Optional callback for checking if GPU can be suspended when idle
+	 *
+	 * This callback will be called by the runtime power management core
+	 * when the reference count goes to 0 to provide notification that the
+	 * GPU now seems idle.
+	 *
+	 * If this callback finds that the GPU can't be powered off, or handles
+	 * suspend by powering off directly or queueing up a power off, a
+	 * non-zero value must be returned to prevent the runtime PM core from
+	 * also triggering a suspend.
+	 *
+	 * Returning 0 will cause the runtime PM core to conduct a regular
+	 * autosuspend.
+	 *
+	 * This callback is optional and if not provided regular autosuspend
+	 * will be triggered.
+	 *
+	 * Note: The Linux kernel must have CONFIG_PM_RUNTIME enabled to use
+	 * this feature.
+	 *
+	 * Return 0 if GPU can be suspended, positive value if it can not be
+	 * suspeneded by runtime PM, else OS error code
+	 */
+	int (*power_runtime_idle_callback)(struct kbase_device *kbdev);
+};
+
+#ifdef CONFIG_OF
+struct kbase_platform_config {
+};
+#else
+
+/*
+ * @brief Specifies start and end of I/O memory region.
+ */
+struct kbase_io_memory_region {
+	u64 start;
+	u64 end;
+};
+
+/*
+ * @brief Specifies I/O related resources like IRQs and memory region for I/O operations.
+ */
+struct kbase_io_resources {
+	u32                      job_irq_number;
+	u32                      mmu_irq_number;
+	u32                      gpu_irq_number;
+	struct kbase_io_memory_region io_memory_region;
+};
+
+struct kbase_platform_config {
+	const struct kbase_io_resources *io_resources;
+};
+
+#endif /* CONFIG_OF */
+
+/**
+ * @brief Gets the pointer to platform config.
+ *
+ * @return Pointer to the platform config
+ */
+struct kbase_platform_config *kbase_get_platform_config(void);
+
+/**
+ * kbasep_platform_device_init: - Platform specific call to initialize hardware
+ * @kbdev: kbase device pointer
+ *
+ * Function calls a platform defined routine if specified in the configuration
+ * attributes.  The routine can initialize any hardware and context state that
+ * is required for the GPU block to function.
+ *
+ * Return: 0 if no errors have been found in the config.
+ *         Negative error code otherwise.
+ */
+int kbasep_platform_device_init(struct kbase_device *kbdev);
+
+/**
+ * kbasep_platform_device_term - Platform specific call to terminate hardware
+ * @kbdev: Kbase device pointer
+ *
+ * Function calls a platform defined routine if specified in the configuration
+ * attributes. The routine can destroy any platform specific context state and
+ * shut down any hardware functionality that are outside of the Power Management
+ * callbacks.
+ *
+ */
+void kbasep_platform_device_term(struct kbase_device *kbdev);
+
+#ifndef CONFIG_OF
+/**
+ * kbase_platform_register - Register a platform device for the GPU
+ *
+ * This can be used to register a platform device on systems where device tree
+ * is not enabled and the platform initialisation code in the kernel doesn't
+ * create the GPU device. Where possible device tree should be used instead.
+ *
+ * Return: 0 for success, any other fail causes module initialisation to fail
+ */
+int kbase_platform_register(void);
+
+/**
+ * kbase_platform_unregister - Unregister a fake platform device
+ *
+ * Unregister the platform device created with kbase_platform_register()
+ */
+void kbase_platform_unregister(void);
+#endif
+
+	  /** @} *//* end group kbase_config */
+	  /** @} *//* end group base_kbase_api */
+	  /** @} *//* end group base_api */
+
+#endif				/* _KBASE_CONFIG_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h b/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h
new file mode 100644
index 0000000..447e059
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h
@@ -0,0 +1,217 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * @file mali_kbase_config_defaults.h
+ *
+ * Default values for configuration settings
+ *
+ */
+
+#ifndef _KBASE_CONFIG_DEFAULTS_H_
+#define _KBASE_CONFIG_DEFAULTS_H_
+
+/* Include mandatory definitions per platform */
+#include <mali_kbase_config_platform.h>
+
+/**
+* Boolean indicating whether the driver is configured to be secure at
+* a potential loss of performance.
+*
+* This currently affects only r0p0-15dev0 HW and earlier.
+*
+* On r0p0-15dev0 HW and earlier, there are tradeoffs between security and
+* performance:
+*
+* - When this is set to true, the driver remains fully secure,
+* but potentially loses performance compared with setting this to
+* false.
+* - When set to false, the driver is open to certain security
+* attacks.
+*
+* From r0p0-00rel0 and onwards, there is no security loss by setting
+* this to false, and no performance loss by setting it to
+* true.
+*/
+#define DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE false
+
+enum {
+	/**
+	 * Use unrestricted Address ID width on the AXI bus.
+	 */
+	KBASE_AID_32 = 0x0,
+
+	/**
+	 * Restrict GPU to a half of maximum Address ID count.
+	 * This will reduce performance, but reduce bus load due to GPU.
+	 */
+	KBASE_AID_16 = 0x3,
+
+	/**
+	 * Restrict GPU to a quarter of maximum Address ID count.
+	 * This will reduce performance, but reduce bus load due to GPU.
+	 */
+	KBASE_AID_8  = 0x2,
+
+	/**
+	 * Restrict GPU to an eighth of maximum Address ID count.
+	 * This will reduce performance, but reduce bus load due to GPU.
+	 */
+	KBASE_AID_4  = 0x1
+};
+
+enum {
+	/**
+	 * Use unrestricted Address ID width on the AXI bus.
+	 * Restricting ID width will reduce performance & bus load due to GPU.
+	 */
+	KBASE_3BIT_AID_32 = 0x0,
+
+	/* Restrict GPU to 7/8 of maximum Address ID count. */
+	KBASE_3BIT_AID_28 = 0x1,
+
+	/* Restrict GPU to 3/4 of maximum Address ID count. */
+	KBASE_3BIT_AID_24 = 0x2,
+
+	/* Restrict GPU to 5/8 of maximum Address ID count. */
+	KBASE_3BIT_AID_20 = 0x3,
+
+	/* Restrict GPU to 1/2 of maximum Address ID count.  */
+	KBASE_3BIT_AID_16 = 0x4,
+
+	/* Restrict GPU to 3/8 of maximum Address ID count. */
+	KBASE_3BIT_AID_12 = 0x5,
+
+	/* Restrict GPU to 1/4 of maximum Address ID count. */
+	KBASE_3BIT_AID_8  = 0x6,
+
+	/* Restrict GPU to 1/8 of maximum Address ID count. */
+	KBASE_3BIT_AID_4  = 0x7
+};
+
+/**
+ * Default period for DVFS sampling
+ */
+#define DEFAULT_PM_DVFS_PERIOD 100 /* 100ms */
+
+/**
+ * Power Management poweroff tick granuality. This is in nanoseconds to
+ * allow HR timer support.
+ *
+ * On each scheduling tick, the power manager core may decide to:
+ * -# Power off one or more shader cores
+ * -# Power off the entire GPU
+ */
+#define DEFAULT_PM_GPU_POWEROFF_TICK_NS (400000) /* 400us */
+
+/**
+ * Power Manager number of ticks before shader cores are powered off
+ */
+#define DEFAULT_PM_POWEROFF_TICK_SHADER (2) /* 400-800us */
+
+/**
+ * Default scheduling tick granuality
+ */
+#define DEFAULT_JS_SCHEDULING_PERIOD_NS    (100000000u) /* 100ms */
+
+/**
+ * Default minimum number of scheduling ticks before jobs are soft-stopped.
+ *
+ * This defines the time-slice for a job (which may be different from that of a
+ * context)
+ */
+#define DEFAULT_JS_SOFT_STOP_TICKS       (1) /* 100ms-200ms */
+
+/**
+ * Default minimum number of scheduling ticks before CL jobs are soft-stopped.
+ */
+#define DEFAULT_JS_SOFT_STOP_TICKS_CL    (1) /* 100ms-200ms */
+
+/**
+ * Default minimum number of scheduling ticks before jobs are hard-stopped
+ */
+#define DEFAULT_JS_HARD_STOP_TICKS_SS    (50) /* 5s */
+#define DEFAULT_JS_HARD_STOP_TICKS_SS_8408  (300) /* 30s */
+
+/**
+ * Default minimum number of scheduling ticks before CL jobs are hard-stopped.
+ */
+#define DEFAULT_JS_HARD_STOP_TICKS_CL    (50) /* 5s */
+
+/**
+ * Default minimum number of scheduling ticks before jobs are hard-stopped
+ * during dumping
+ */
+#define DEFAULT_JS_HARD_STOP_TICKS_DUMPING   (15000) /* 1500s */
+
+/**
+ * Default timeout for some software jobs, after which the software event wait
+ * jobs will be cancelled.
+ */
+#define DEFAULT_JS_SOFT_JOB_TIMEOUT (3000) /* 3s */
+
+/**
+ * Default minimum number of scheduling ticks before the GPU is reset to clear a
+ * "stuck" job
+ */
+#define DEFAULT_JS_RESET_TICKS_SS           (55) /* 5.5s */
+#define DEFAULT_JS_RESET_TICKS_SS_8408     (450) /* 45s */
+
+/**
+ * Default minimum number of scheduling ticks before the GPU is reset to clear a
+ * "stuck" CL job.
+ */
+#define DEFAULT_JS_RESET_TICKS_CL        (55) /* 5.5s */
+
+/**
+ * Default minimum number of scheduling ticks before the GPU is reset to clear a
+ * "stuck" job during dumping.
+ */
+#define DEFAULT_JS_RESET_TICKS_DUMPING   (15020) /* 1502s */
+
+/**
+ * Default number of milliseconds given for other jobs on the GPU to be
+ * soft-stopped when the GPU needs to be reset.
+ */
+#define DEFAULT_RESET_TIMEOUT_MS (3000) /* 3s */
+
+/**
+ * Default timeslice that a context is scheduled in for, in nanoseconds.
+ *
+ * When a context has used up this amount of time across its jobs, it is
+ * scheduled out to let another run.
+ *
+ * @note the resolution is nanoseconds (ns) here, because that's the format
+ * often used by the OS.
+ */
+#define DEFAULT_JS_CTX_TIMESLICE_NS (50000000) /* 50ms */
+
+/**
+ * Maximum frequency (in kHz) that the GPU can be clocked. For some platforms
+ * this isn't available, so we simply define a dummy value here. If devfreq
+ * is enabled the value will be read from there, otherwise this should be
+ * overridden by defining GPU_FREQ_KHZ_MAX in the platform file.
+ */
+#define DEFAULT_GPU_FREQ_KHZ_MAX (5000)
+
+#endif /* _KBASE_CONFIG_DEFAULTS_H_ */
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_context.c b/drivers/gpu/arm/midgard/mali_kbase_context.c
new file mode 100644
index 0000000..e72ce70
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_context.c
@@ -0,0 +1,332 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Base kernel context APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_mem_linux.h>
+#include <mali_kbase_dma_fence.h>
+#include <mali_kbase_ctx_sched.h>
+#include <mali_kbase_mem_pool_group.h>
+#include <mali_kbase_tracepoints.h>
+
+struct kbase_context *
+kbase_create_context(struct kbase_device *kbdev, bool is_compat,
+	base_context_create_flags const flags,
+	unsigned long const api_version,
+	struct file *const filp)
+{
+	struct kbase_context *kctx;
+	int err;
+	struct page *p;
+	struct kbasep_js_kctx_info *js_kctx_info = NULL;
+	unsigned long irq_flags = 0;
+
+	if (WARN_ON(!kbdev))
+		goto out;
+
+	/* Validate flags */
+	if (WARN_ON(flags != (flags & BASEP_CONTEXT_CREATE_KERNEL_FLAGS)))
+		goto out;
+
+	/* zero-inited as lot of code assume it's zero'ed out on create */
+	kctx = vzalloc(sizeof(*kctx));
+
+	if (!kctx)
+		goto out;
+
+	/* creating a context is considered a disjoint event */
+	kbase_disjoint_event(kbdev);
+
+	kctx->kbdev = kbdev;
+	kctx->as_nr = KBASEP_AS_NR_INVALID;
+	atomic_set(&kctx->refcount, 0);
+	if (is_compat)
+		kbase_ctx_flag_set(kctx, KCTX_COMPAT);
+#if defined(CONFIG_64BIT)
+	else
+		kbase_ctx_flag_set(kctx, KCTX_FORCE_SAME_VA);
+#endif /* !defined(CONFIG_64BIT) */
+
+	spin_lock_init(&kctx->mm_update_lock);
+	kctx->process_mm = NULL;
+	atomic_set(&kctx->nonmapped_pages, 0);
+	atomic_set(&kctx->permanent_mapped_pages, 0);
+	kctx->slots_pullable = 0;
+	kctx->tgid = current->tgid;
+	kctx->pid = current->pid;
+
+	err = kbase_mem_pool_group_init(&kctx->mem_pools, kbdev,
+		&kbdev->mem_pool_defaults, &kbdev->mem_pools);
+	if (err)
+		goto free_kctx;
+
+	err = kbase_mem_evictable_init(kctx);
+	if (err)
+		goto free_both_pools;
+
+	atomic_set(&kctx->used_pages, 0);
+
+	err = kbase_jd_init(kctx);
+	if (err)
+		goto deinit_evictable;
+
+	err = kbasep_js_kctx_init(kctx);
+	if (err)
+		goto free_jd;	/* safe to call kbasep_js_kctx_term  in this case */
+
+	err = kbase_event_init(kctx);
+	if (err)
+		goto free_jd;
+
+	atomic_set(&kctx->drain_pending, 0);
+
+	mutex_init(&kctx->reg_lock);
+
+	spin_lock_init(&kctx->mem_partials_lock);
+	INIT_LIST_HEAD(&kctx->mem_partials);
+
+	INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
+	spin_lock_init(&kctx->waiting_soft_jobs_lock);
+	err = kbase_dma_fence_init(kctx);
+	if (err)
+		goto free_event;
+
+	err = kbase_mmu_init(kbdev, &kctx->mmu, kctx,
+		base_context_mmu_group_id_get(flags));
+	if (err)
+		goto term_dma_fence;
+
+	p = kbase_mem_alloc_page(
+		&kctx->mem_pools.small[KBASE_MEM_GROUP_SINK]);
+	if (!p)
+		goto no_sink_page;
+	kctx->aliasing_sink_page = as_tagged(page_to_phys(p));
+
+	init_waitqueue_head(&kctx->event_queue);
+
+	kctx->cookies = KBASE_COOKIE_MASK;
+
+	/* Make sure page 0 is not used... */
+	err = kbase_region_tracker_init(kctx);
+	if (err)
+		goto no_region_tracker;
+
+	err = kbase_sticky_resource_init(kctx);
+	if (err)
+		goto no_sticky;
+
+	err = kbase_jit_init(kctx);
+	if (err)
+		goto no_jit;
+
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+	atomic_set(&kctx->jctx.work_id, 0);
+#endif
+
+	kctx->id = atomic_add_return(1, &(kbdev->ctx_num)) - 1;
+
+	mutex_init(&kctx->legacy_hwcnt_lock);
+
+	kbase_timer_setup(&kctx->soft_job_timeout,
+			  kbasep_soft_job_timeout_worker);
+
+	mutex_lock(&kbdev->kctx_list_lock);
+	list_add(&kctx->kctx_list_link, &kbdev->kctx_list);
+	KBASE_TLSTREAM_TL_NEW_CTX(kbdev, kctx, kctx->id, (u32)(kctx->tgid));
+	mutex_unlock(&kbdev->kctx_list_lock);
+
+	kctx->api_version = api_version;
+	kctx->filp = filp;
+
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+	spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
+
+	/* Translate the flags */
+	if ((flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
+		kbase_ctx_flag_clear(kctx, KCTX_SUBMIT_DISABLED);
+
+	spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+	return kctx;
+
+no_jit:
+	kbase_gpu_vm_lock(kctx);
+	kbase_sticky_resource_term(kctx);
+	kbase_gpu_vm_unlock(kctx);
+no_sticky:
+	kbase_region_tracker_term(kctx);
+no_region_tracker:
+	kbase_mem_pool_free(
+		&kctx->mem_pools.small[KBASE_MEM_GROUP_SINK], p, false);
+no_sink_page:
+	kbase_mmu_term(kbdev, &kctx->mmu);
+term_dma_fence:
+	kbase_dma_fence_term(kctx);
+free_event:
+	kbase_event_cleanup(kctx);
+free_jd:
+	/* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
+	kbasep_js_kctx_term(kctx);
+	kbase_jd_exit(kctx);
+deinit_evictable:
+	kbase_mem_evictable_deinit(kctx);
+free_both_pools:
+	kbase_mem_pool_group_term(&kctx->mem_pools);
+free_kctx:
+	vfree(kctx);
+out:
+	return NULL;
+}
+KBASE_EXPORT_SYMBOL(kbase_create_context);
+
+static void kbase_reg_pending_dtor(struct kbase_device *kbdev,
+		struct kbase_va_region *reg)
+{
+	dev_dbg(kbdev->dev, "Freeing pending unmapped region\n");
+	kbase_mem_phy_alloc_put(reg->cpu_alloc);
+	kbase_mem_phy_alloc_put(reg->gpu_alloc);
+	kfree(reg);
+}
+
+void kbase_destroy_context(struct kbase_context *kctx)
+{
+	struct kbase_device *kbdev;
+	int pages;
+	unsigned long pending_regions_to_clean;
+	unsigned long flags;
+	struct page *p;
+
+	if (WARN_ON(!kctx))
+		return;
+
+	kbdev = kctx->kbdev;
+	if (WARN_ON(!kbdev))
+		return;
+
+	mutex_lock(&kbdev->kctx_list_lock);
+	KBASE_TLSTREAM_TL_DEL_CTX(kbdev, kctx);
+	list_del(&kctx->kctx_list_link);
+	mutex_unlock(&kbdev->kctx_list_lock);
+
+	KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u);
+
+	/* Ensure the core is powered up for the destroy process */
+	/* A suspend won't happen here, because we're in a syscall from a userspace
+	 * thread. */
+	kbase_pm_context_active(kbdev);
+
+	kbase_mem_pool_group_mark_dying(&kctx->mem_pools);
+
+	kbase_jd_zap_context(kctx);
+
+	/* We have already waited for the jobs to complete (and hereafter there
+	 * can be no more submissions for the context). However the wait could
+	 * have timedout and there could still be work items in flight that
+	 * would do the completion processing of jobs.
+	 * kbase_jd_exit() will destroy the 'job_done_wq'. And destroying the wq
+	 * will cause it do drain and implicitly wait for those work items to
+	 * complete.
+	 */
+	kbase_jd_exit(kctx);
+
+#ifdef CONFIG_DEBUG_FS
+	/* Removing the rest of the debugfs entries here as we want to keep the
+	 * atom debugfs interface alive until all atoms have completed. This
+	 * is useful for debugging hung contexts. */
+	debugfs_remove_recursive(kctx->kctx_dentry);
+	kbase_debug_job_fault_context_term(kctx);
+#endif
+
+	kbase_event_cleanup(kctx);
+
+
+	/*
+	 * JIT must be terminated before the code below as it must be called
+	 * without the region lock being held.
+	 * The code above ensures no new JIT allocations can be made by
+	 * by the time we get to this point of context tear down.
+	 */
+	kbase_jit_term(kctx);
+
+	kbase_gpu_vm_lock(kctx);
+
+	kbase_sticky_resource_term(kctx);
+
+	/* drop the aliasing sink page now that it can't be mapped anymore */
+	p = as_page(kctx->aliasing_sink_page);
+	kbase_mem_pool_free(&kctx->mem_pools.small[KBASE_MEM_GROUP_SINK],
+		p, false);
+
+	/* free pending region setups */
+	pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK;
+	while (pending_regions_to_clean) {
+		unsigned int cookie = __ffs(pending_regions_to_clean);
+
+		BUG_ON(!kctx->pending_regions[cookie]);
+
+		kbase_reg_pending_dtor(kbdev, kctx->pending_regions[cookie]);
+
+		kctx->pending_regions[cookie] = NULL;
+		pending_regions_to_clean &= ~(1UL << cookie);
+	}
+
+	kbase_region_tracker_term(kctx);
+	kbase_gpu_vm_unlock(kctx);
+
+	/* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
+	kbasep_js_kctx_term(kctx);
+
+	kbase_dma_fence_term(kctx);
+
+	mutex_lock(&kbdev->mmu_hw_mutex);
+	spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags);
+	kbase_ctx_sched_remove_ctx(kctx);
+	spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+
+	kbase_mmu_term(kbdev, &kctx->mmu);
+
+	pages = atomic_read(&kctx->used_pages);
+	if (pages != 0)
+		dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);
+
+	kbase_mem_evictable_deinit(kctx);
+
+	kbase_mem_pool_group_term(&kctx->mem_pools);
+
+	WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);
+
+	vfree(kctx);
+
+	kbase_pm_context_idle(kbdev);
+}
+KBASE_EXPORT_SYMBOL(kbase_destroy_context);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_context.h b/drivers/gpu/arm/midgard/mali_kbase_context.h
new file mode 100644
index 0000000..5037b4e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_context.h
@@ -0,0 +1,125 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2017, 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_CONTEXT_H_
+#define _KBASE_CONTEXT_H_
+
+#include <linux/atomic.h>
+
+/**
+ * kbase_create_context() - Create a kernel base context.
+ *
+ * @kbdev:       Object representing an instance of GPU platform device,
+ *               allocated from the probe method of the Mali driver.
+ * @is_compat:   Force creation of a 32-bit context
+ * @flags:       Flags to set, which shall be any combination of
+ *               BASEP_CONTEXT_CREATE_KERNEL_FLAGS.
+ * @api_version: Application program interface version, as encoded in
+ *               a single integer by the KBASE_API_VERSION macro.
+ * @filp:        Pointer to the struct file corresponding to device file
+ *               /dev/malixx instance, passed to the file's open method.
+ *
+ * Up to one context can be created for each client that opens the device file
+ * /dev/malixx. Context creation is deferred until a special ioctl() system call
+ * is made on the device file. Each context has its own GPU address space.
+ *
+ * Return: new kbase context or NULL on failure
+ */
+struct kbase_context *
+kbase_create_context(struct kbase_device *kbdev, bool is_compat,
+	base_context_create_flags const flags,
+	unsigned long api_version,
+	struct file *filp);
+
+/**
+ * kbase_destroy_context - Destroy a kernel base context.
+ * @kctx: Context to destroy
+ *
+ * Will release all outstanding regions.
+ */
+void kbase_destroy_context(struct kbase_context *kctx);
+
+/**
+ * kbase_ctx_flag - Check if @flag is set on @kctx
+ * @kctx: Pointer to kbase context to check
+ * @flag: Flag to check
+ *
+ * Return: true if @flag is set on @kctx, false if not.
+ */
+static inline bool kbase_ctx_flag(struct kbase_context *kctx,
+				      enum kbase_context_flags flag)
+{
+	return atomic_read(&kctx->flags) & flag;
+}
+
+/**
+ * kbase_ctx_flag_clear - Clear @flag on @kctx
+ * @kctx: Pointer to kbase context
+ * @flag: Flag to clear
+ *
+ * Clear the @flag on @kctx. This is done atomically, so other flags being
+ * cleared or set at the same time will be safe.
+ *
+ * Some flags have locking requirements, check the documentation for the
+ * respective flags.
+ */
+static inline void kbase_ctx_flag_clear(struct kbase_context *kctx,
+					enum kbase_context_flags flag)
+{
+#if KERNEL_VERSION(4, 3, 0) > LINUX_VERSION_CODE
+	/*
+	 * Earlier kernel versions doesn't have atomic_andnot() or
+	 * atomic_and(). atomic_clear_mask() was only available on some
+	 * architectures and removed on arm in v3.13 on arm and arm64.
+	 *
+	 * Use a compare-exchange loop to clear the flag on pre 4.3 kernels,
+	 * when atomic_andnot() becomes available.
+	 */
+	int old, new;
+
+	do {
+		old = atomic_read(&kctx->flags);
+		new = old & ~flag;
+
+	} while (atomic_cmpxchg(&kctx->flags, old, new) != old);
+#else
+	atomic_andnot(flag, &kctx->flags);
+#endif
+}
+
+/**
+ * kbase_ctx_flag_set - Set @flag on @kctx
+ * @kctx: Pointer to kbase context
+ * @flag: Flag to set
+ *
+ * Set the @flag on @kctx. This is done atomically, so other flags being
+ * cleared or set at the same time will be safe.
+ *
+ * Some flags have locking requirements, check the documentation for the
+ * respective flags.
+ */
+static inline void kbase_ctx_flag_set(struct kbase_context *kctx,
+				      enum kbase_context_flags flag)
+{
+	atomic_or(flag, &kctx->flags);
+}
+#endif /* _KBASE_CONTEXT_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_core_linux.c b/drivers/gpu/arm/midgard/mali_kbase_core_linux.c
new file mode 100644
index 0000000..57acbf9
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_core_linux.c
@@ -0,0 +1,4631 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_config_defaults.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_gator.h>
+#include <mali_kbase_mem_linux.h>
+#ifdef CONFIG_MALI_DEVFREQ
+#include <linux/devfreq.h>
+#include <backend/gpu/mali_kbase_devfreq.h>
+#ifdef CONFIG_DEVFREQ_THERMAL
+#include <ipa/mali_kbase_ipa_debugfs.h>
+#endif /* CONFIG_DEVFREQ_THERMAL */
+#endif /* CONFIG_MALI_DEVFREQ */
+#ifdef CONFIG_MALI_NO_MALI
+#include "mali_kbase_model_linux.h"
+#include <backend/gpu/mali_kbase_model_dummy.h>
+#endif /* CONFIG_MALI_NO_MALI */
+#include "mali_kbase_mem_profile_debugfs_buf_size.h"
+#include "mali_kbase_debug_mem_view.h"
+#include "mali_kbase_mem.h"
+#include "mali_kbase_mem_pool_debugfs.h"
+#include "mali_kbase_debugfs_helper.h"
+#if !MALI_CUSTOMER_RELEASE
+#include "mali_kbase_regs_dump_debugfs.h"
+#endif /* !MALI_CUSTOMER_RELEASE */
+#include "mali_kbase_regs_history_debugfs.h"
+#include <mali_kbase_hwaccess_backend.h>
+#include <mali_kbase_hwaccess_time.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_ctx_sched.h>
+#include <mali_kbase_reset_gpu.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include "mali_kbase_ioctl.h"
+#include "mali_kbase_hwcnt_context.h"
+#include "mali_kbase_hwcnt_virtualizer.h"
+#include "mali_kbase_hwcnt_legacy.h"
+#include "mali_kbase_vinstr.h"
+
+#ifdef CONFIG_MALI_CINSTR_GWT
+#include "mali_kbase_gwt.h"
+#endif
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#include <linux/semaphore.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/compat.h>	/* is_compat_task/in_compat_syscall */
+#include <linux/mman.h>
+#include <linux/version.h>
+#include <mali_kbase_hw.h>
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+#include <mali_kbase_sync.h>
+#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/log2.h>
+
+#include <mali_kbase_config.h>
+
+
+#if (KERNEL_VERSION(3, 13, 0) <= LINUX_VERSION_CODE)
+#include <linux/pm_opp.h>
+#else
+#include <linux/opp.h>
+#endif
+
+#include <linux/pm_runtime.h>
+
+#include <mali_kbase_timeline.h>
+
+#include <mali_kbase_as_fault_debugfs.h>
+
+/* GPU IRQ Tags */
+#define	JOB_IRQ_TAG	0
+#define MMU_IRQ_TAG	1
+#define GPU_IRQ_TAG	2
+
+static int kbase_dev_nr;
+
+static DEFINE_MUTEX(kbase_dev_list_lock);
+static LIST_HEAD(kbase_dev_list);
+
+#define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
+
+/**
+ * kbase_file_new - Create an object representing a device file
+ *
+ * @kbdev:  An instance of the GPU platform device, allocated from the probe
+ *          method of the driver.
+ * @filp:   Pointer to the struct file corresponding to device file
+ *          /dev/malixx instance, passed to the file's open method.
+ *
+ * In its initial state, the device file has no context (i.e. no GPU
+ * address space) and no API version number. Both must be assigned before
+ * kbase_file_get_kctx_if_setup_complete() can be used successfully.
+ *
+ * @return Address of an object representing a simulated device file, or NULL
+ *         on failure.
+ */
+static struct kbase_file *kbase_file_new(struct kbase_device *const kbdev,
+	struct file *const filp)
+{
+	struct kbase_file *const kfile = kmalloc(sizeof(*kfile), GFP_KERNEL);
+
+	if (kfile) {
+		kfile->kbdev = kbdev;
+		kfile->filp = filp;
+		kfile->kctx = NULL;
+		kfile->api_version = 0;
+		atomic_set(&kfile->setup_state, KBASE_FILE_NEED_VSN);
+	}
+	return kfile;
+}
+
+/**
+ * kbase_file_get_api_version - Set the application programmer interface version
+ *
+ * @kfile:  A device file created by kbase_file_new()
+ * @major:  Major version number (must not exceed 12 bits)
+ * @minor:  Major version number (must not exceed 12 bits)
+ *
+ * An application programmer interface (API) version must be specified
+ * before calling kbase_file_create_kctx(), otherwise an error is returned.
+ *
+ * If a version number was already set for the given @kfile (or is in the
+ * process of being set by another thread) then an error is returned.
+ *
+ * Return: 0 if successful, otherwise a negative error code.
+ */
+static int kbase_file_set_api_version(struct kbase_file *const kfile,
+	u16 const major, u16 const minor)
+{
+	if (WARN_ON(!kfile))
+		return -EINVAL;
+
+	/* setup pending, try to signal that we'll do the setup,
+	 * if setup was already in progress, err this call
+	 */
+	if (atomic_cmpxchg(&kfile->setup_state, KBASE_FILE_NEED_VSN,
+		KBASE_FILE_VSN_IN_PROGRESS) != KBASE_FILE_NEED_VSN)
+		return -EPERM;
+
+	/* save the proposed version number for later use */
+	kfile->api_version = KBASE_API_VERSION(major, minor);
+
+	atomic_set(&kfile->setup_state, KBASE_FILE_NEED_CTX);
+	return 0;
+}
+
+/**
+ * kbase_file_get_api_version - Get the application programmer interface version
+ *
+ * @kfile:  A device file created by kbase_file_new()
+ *
+ * Return: The version number (encoded with KBASE_API_VERSION) or 0 if none has
+ *         been set.
+ */
+static unsigned long kbase_file_get_api_version(struct kbase_file *const kfile)
+{
+	if (WARN_ON(!kfile))
+		return 0;
+
+	if (atomic_read(&kfile->setup_state) < KBASE_FILE_NEED_CTX)
+		return 0;
+
+	return kfile->api_version;
+}
+
+/**
+ * kbase_file_create_kctx - Create a kernel base context
+ *
+ * @kfile:  A device file created by kbase_file_new()
+ * @flags:  Flags to set, which can be any combination of
+ *          BASEP_CONTEXT_CREATE_KERNEL_FLAGS.
+ *
+ * This creates a new context for the GPU platform device instance that was
+ * specified when kbase_file_new() was called. Each context has its own GPU
+ * address space. If a context was already created for the given @kfile (or is
+ * in the process of being created for it by another thread) then an error is
+ * returned.
+ *
+ * An API version number must have been set by kbase_file_set_api_version()
+ * before calling this function, otherwise an error is returned.
+ *
+ * Return: 0 if a new context was created, otherwise a negative error code.
+ */
+static int kbase_file_create_kctx(struct kbase_file *kfile,
+	base_context_create_flags flags);
+
+/**
+ * kbase_file_get_kctx_if_setup_complete - Get a kernel base context
+ *                                         pointer from a device file
+ *
+ * @kfile: A device file created by kbase_file_new()
+ *
+ * This function returns an error code (encoded with ERR_PTR) if no context
+ * has been created for the given @kfile. This makes it safe to use in
+ * circumstances where the order of initialization cannot be enforced, but
+ * only if the caller checks the return value.
+ *
+ * Return: Address of the kernel base context associated with the @kfile, or
+ *         NULL if no context exists.
+ */
+static struct kbase_context *kbase_file_get_kctx_if_setup_complete(
+	struct kbase_file *const kfile)
+{
+	if (WARN_ON(!kfile) ||
+		atomic_read(&kfile->setup_state) != KBASE_FILE_COMPLETE ||
+		WARN_ON(!kfile->kctx))
+		return NULL;
+
+	return kfile->kctx;
+}
+
+/**
+ * kbase_file_delete - Destroy an object representing a device file
+ *
+ * @kfile: A device file created by kbase_file_new()
+ *
+ * If any context was created for the @kfile then it is destroyed.
+ */
+static void kbase_file_delete(struct kbase_file *const kfile)
+{
+	struct kbase_device *kbdev = NULL;
+
+	if (WARN_ON(!kfile))
+		return;
+
+	kfile->filp->private_data = NULL;
+	kbdev = kfile->kbdev;
+
+	if (atomic_read(&kfile->setup_state) == KBASE_FILE_COMPLETE) {
+		struct kbase_context *kctx = kfile->kctx;
+
+#ifdef CONFIG_DEBUG_FS
+		kbasep_mem_profile_debugfs_remove(kctx);
+#endif
+
+		mutex_lock(&kctx->legacy_hwcnt_lock);
+		/* If this client was performing hardware counter dumping and
+		 * did not explicitly detach itself, destroy it now
+		 */
+		kbase_hwcnt_legacy_client_destroy(kctx->legacy_hwcnt_cli);
+		kctx->legacy_hwcnt_cli = NULL;
+		mutex_unlock(&kctx->legacy_hwcnt_lock);
+
+		kbase_destroy_context(kctx);
+
+		dev_dbg(kbdev->dev, "deleted base context\n");
+	}
+
+	kbase_release_device(kbdev);
+
+	kfree(kfile);
+}
+
+static int kbase_api_handshake(struct kbase_file *kfile,
+			       struct kbase_ioctl_version_check *version)
+{
+	int err = 0;
+
+	switch (version->major) {
+	case BASE_UK_VERSION_MAJOR:
+		/* set minor to be the lowest common */
+		version->minor = min_t(int, BASE_UK_VERSION_MINOR,
+				       (int)version->minor);
+		break;
+	default:
+		/* We return our actual version regardless if it
+		 * matches the version returned by userspace -
+		 * userspace can bail if it can't handle this
+		 * version
+		 */
+		version->major = BASE_UK_VERSION_MAJOR;
+		version->minor = BASE_UK_VERSION_MINOR;
+		break;
+	}
+
+	/* save the proposed version number for later use */
+	err = kbase_file_set_api_version(kfile, version->major, version->minor);
+	if (unlikely(err))
+		return err;
+
+	/* For backward compatibility, we may need to create the context before
+	 * the flags have been set. Originally it was created on file open
+	 * (with job submission disabled) but we don't support that usage.
+	 */
+	if (kbase_file_get_api_version(kfile) < KBASE_API_VERSION(11, 15))
+		err = kbase_file_create_kctx(kfile,
+			BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED);
+
+	return err;
+}
+
+/**
+ * enum mali_error - Mali error codes shared with userspace
+ *
+ * This is subset of those common Mali errors that can be returned to userspace.
+ * Values of matching user and kernel space enumerators MUST be the same.
+ * MALI_ERROR_NONE is guaranteed to be 0.
+ *
+ * @MALI_ERROR_NONE: Success
+ * @MALI_ERROR_OUT_OF_GPU_MEMORY: Not used in the kernel driver
+ * @MALI_ERROR_OUT_OF_MEMORY: Memory allocation failure
+ * @MALI_ERROR_FUNCTION_FAILED: Generic error code
+ */
+enum mali_error {
+	MALI_ERROR_NONE = 0,
+	MALI_ERROR_OUT_OF_GPU_MEMORY,
+	MALI_ERROR_OUT_OF_MEMORY,
+	MALI_ERROR_FUNCTION_FAILED,
+};
+
+enum {
+	inited_mem = (1u << 0),
+	inited_js = (1u << 1),
+	/* Bit number 2 was earlier assigned to the runtime-pm initialization
+	 * stage (which has been merged with the backend_early stage).
+	 */
+#ifdef CONFIG_MALI_DEVFREQ
+	inited_devfreq = (1u << 3),
+#endif /* CONFIG_MALI_DEVFREQ */
+	inited_tlstream = (1u << 4),
+	inited_backend_early = (1u << 5),
+	inited_hwcnt_gpu_iface = (1u << 6),
+	inited_hwcnt_gpu_ctx = (1u << 7),
+	inited_hwcnt_gpu_virt = (1u << 8),
+	inited_vinstr = (1u << 9),
+	inited_backend_late = (1u << 10),
+	inited_device = (1u << 11),
+	inited_job_fault = (1u << 13),
+	inited_sysfs_group = (1u << 14),
+	inited_misc_register = (1u << 15),
+	inited_get_device = (1u << 16),
+	inited_dev_list = (1u << 17),
+	inited_debugfs = (1u << 18),
+	inited_gpu_device = (1u << 19),
+	inited_registers_map = (1u << 20),
+	inited_io_history = (1u << 21),
+	inited_power_control = (1u << 22),
+	inited_buslogger = (1u << 23),
+	inited_protected = (1u << 24),
+	inited_ctx_sched = (1u << 25)
+};
+
+static struct kbase_device *to_kbase_device(struct device *dev)
+{
+	return dev_get_drvdata(dev);
+}
+
+static int assign_irqs(struct platform_device *pdev)
+{
+	struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
+	int i;
+
+	if (!kbdev)
+		return -ENODEV;
+
+	/* 3 IRQ resources */
+	for (i = 0; i < 3; i++) {
+		struct resource *irq_res;
+		int irqtag;
+
+		irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+		if (!irq_res) {
+			dev_err(kbdev->dev, "No IRQ resource at index %d\n", i);
+			return -ENOENT;
+		}
+
+#ifdef CONFIG_OF
+		if (!strncmp(irq_res->name, "JOB", 4)) {
+			irqtag = JOB_IRQ_TAG;
+		} else if (!strncmp(irq_res->name, "MMU", 4)) {
+			irqtag = MMU_IRQ_TAG;
+		} else if (!strncmp(irq_res->name, "GPU", 4)) {
+			irqtag = GPU_IRQ_TAG;
+		} else {
+			dev_err(&pdev->dev, "Invalid irq res name: '%s'\n",
+				irq_res->name);
+			return -EINVAL;
+		}
+#else
+		irqtag = i;
+#endif /* CONFIG_OF */
+		kbdev->irqs[irqtag].irq = irq_res->start;
+		kbdev->irqs[irqtag].flags = irq_res->flags & IRQF_TRIGGER_MASK;
+	}
+
+	return 0;
+}
+
+/*
+ * API to acquire device list mutex and
+ * return pointer to the device list head
+ */
+const struct list_head *kbase_dev_list_get(void)
+{
+	mutex_lock(&kbase_dev_list_lock);
+	return &kbase_dev_list;
+}
+KBASE_EXPORT_TEST_API(kbase_dev_list_get);
+
+/* API to release the device list mutex */
+void kbase_dev_list_put(const struct list_head *dev_list)
+{
+	mutex_unlock(&kbase_dev_list_lock);
+}
+KBASE_EXPORT_TEST_API(kbase_dev_list_put);
+
+/* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
+struct kbase_device *kbase_find_device(int minor)
+{
+	struct kbase_device *kbdev = NULL;
+	struct list_head *entry;
+	const struct list_head *dev_list = kbase_dev_list_get();
+
+	list_for_each(entry, dev_list) {
+		struct kbase_device *tmp;
+
+		tmp = list_entry(entry, struct kbase_device, entry);
+		if (tmp->mdev.minor == minor || minor == -1) {
+			kbdev = tmp;
+			get_device(kbdev->dev);
+			break;
+		}
+	}
+	kbase_dev_list_put(dev_list);
+
+	return kbdev;
+}
+EXPORT_SYMBOL(kbase_find_device);
+
+void kbase_release_device(struct kbase_device *kbdev)
+{
+	put_device(kbdev->dev);
+}
+EXPORT_SYMBOL(kbase_release_device);
+
+#ifdef CONFIG_DEBUG_FS
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && \
+		!(LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 28) && \
+		LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+/*
+ * Older versions, before v4.6, of the kernel doesn't have
+ * kstrtobool_from_user(), except longterm 4.4.y which had it added in 4.4.28
+ */
+static int kstrtobool_from_user(const char __user *s, size_t count, bool *res)
+{
+	char buf[4];
+
+	count = min(count, sizeof(buf) - 1);
+
+	if (copy_from_user(buf, s, count))
+		return -EFAULT;
+	buf[count] = '\0';
+
+	return strtobool(buf, res);
+}
+#endif
+
+static ssize_t write_ctx_infinite_cache(struct file *f, const char __user *ubuf, size_t size, loff_t *off)
+{
+	struct kbase_context *kctx = f->private_data;
+	int err;
+	bool value;
+
+	err = kstrtobool_from_user(ubuf, size, &value);
+	if (err)
+		return err;
+
+	if (value)
+		kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE);
+	else
+		kbase_ctx_flag_clear(kctx, KCTX_INFINITE_CACHE);
+
+	return size;
+}
+
+static ssize_t read_ctx_infinite_cache(struct file *f, char __user *ubuf, size_t size, loff_t *off)
+{
+	struct kbase_context *kctx = f->private_data;
+	char buf[32];
+	int count;
+	bool value;
+
+	value = kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE);
+
+	count = scnprintf(buf, sizeof(buf), "%s\n", value ? "Y" : "N");
+
+	return simple_read_from_buffer(ubuf, size, off, buf, count);
+}
+
+static const struct file_operations kbase_infinite_cache_fops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.write = write_ctx_infinite_cache,
+	.read = read_ctx_infinite_cache,
+};
+
+static ssize_t write_ctx_force_same_va(struct file *f, const char __user *ubuf,
+		size_t size, loff_t *off)
+{
+	struct kbase_context *kctx = f->private_data;
+	int err;
+	bool value;
+
+	err = kstrtobool_from_user(ubuf, size, &value);
+	if (err)
+		return err;
+
+	if (value) {
+#if defined(CONFIG_64BIT)
+		/* 32-bit clients cannot force SAME_VA */
+		if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+			return -EINVAL;
+		kbase_ctx_flag_set(kctx, KCTX_FORCE_SAME_VA);
+#else /* defined(CONFIG_64BIT) */
+		/* 32-bit clients cannot force SAME_VA */
+		return -EINVAL;
+#endif /* defined(CONFIG_64BIT) */
+	} else {
+		kbase_ctx_flag_clear(kctx, KCTX_FORCE_SAME_VA);
+	}
+
+	return size;
+}
+
+static ssize_t read_ctx_force_same_va(struct file *f, char __user *ubuf,
+		size_t size, loff_t *off)
+{
+	struct kbase_context *kctx = f->private_data;
+	char buf[32];
+	int count;
+	bool value;
+
+	value = kbase_ctx_flag(kctx, KCTX_FORCE_SAME_VA);
+
+	count = scnprintf(buf, sizeof(buf), "%s\n", value ? "Y" : "N");
+
+	return simple_read_from_buffer(ubuf, size, off, buf, count);
+}
+
+static const struct file_operations kbase_force_same_va_fops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.write = write_ctx_force_same_va,
+	.read = read_ctx_force_same_va,
+};
+#endif /* CONFIG_DEBUG_FS */
+
+static int kbase_file_create_kctx(struct kbase_file *const kfile,
+	base_context_create_flags const flags)
+{
+	struct kbase_device *kbdev = NULL;
+	struct kbase_context *kctx = NULL;
+#ifdef CONFIG_DEBUG_FS
+	char kctx_name[64];
+#endif
+
+	if (WARN_ON(!kfile))
+		return -EINVAL;
+
+	/* setup pending, try to signal that we'll do the setup,
+	 * if setup was already in progress, err this call
+	 */
+	if (atomic_cmpxchg(&kfile->setup_state, KBASE_FILE_NEED_CTX,
+		KBASE_FILE_CTX_IN_PROGRESS) != KBASE_FILE_NEED_CTX)
+		return -EPERM;
+
+	kbdev = kfile->kbdev;
+
+#if (KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE)
+	kctx = kbase_create_context(kbdev, in_compat_syscall(),
+		flags, kfile->api_version, kfile->filp);
+#else
+	kctx = kbase_create_context(kbdev, is_compat_task(),
+		flags, kfile->api_version, kfile->filp);
+#endif /* (KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE) */
+
+	/* if bad flags, will stay stuck in setup mode */
+	if (!kctx)
+		return -ENOMEM;
+
+	if (kbdev->infinite_cache_active_default)
+		kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE);
+
+#ifdef CONFIG_DEBUG_FS
+	snprintf(kctx_name, 64, "%d_%d", kctx->tgid, kctx->id);
+
+	kctx->kctx_dentry = debugfs_create_dir(kctx_name,
+			kbdev->debugfs_ctx_directory);
+
+	if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
+		/* we don't treat this as a fail - just warn about it */
+		dev_warn(kbdev->dev, "couldn't create debugfs dir for kctx\n");
+	} else {
+		debugfs_create_file("infinite_cache", 0644, kctx->kctx_dentry,
+				kctx, &kbase_infinite_cache_fops);
+		debugfs_create_file("force_same_va", 0600,
+				kctx->kctx_dentry, kctx,
+				&kbase_force_same_va_fops);
+
+		mutex_init(&kctx->mem_profile_lock);
+
+		kbasep_jd_debugfs_ctx_init(kctx);
+		kbase_debug_mem_view_init(kctx);
+
+		kbase_debug_job_fault_context_init(kctx);
+
+		kbase_mem_pool_debugfs_init(kctx->kctx_dentry, kctx);
+
+		kbase_jit_debugfs_init(kctx);
+	}
+#endif /* CONFIG_DEBUG_FS */
+
+	dev_dbg(kbdev->dev, "created base context\n");
+
+	kfile->kctx = kctx;
+	atomic_set(&kfile->setup_state, KBASE_FILE_COMPLETE);
+
+	return 0;
+}
+
+static int kbase_open(struct inode *inode, struct file *filp)
+{
+	struct kbase_device *kbdev = NULL;
+	struct kbase_file *kfile;
+	int ret = 0;
+
+	kbdev = kbase_find_device(iminor(inode));
+
+	if (!kbdev)
+		return -ENODEV;
+
+	kfile = kbase_file_new(kbdev, filp);
+	if (!kfile) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	filp->private_data = kfile;
+	filp->f_mode |= FMODE_UNSIGNED_OFFSET;
+
+	return 0;
+
+ out:
+	kbase_release_device(kbdev);
+	return ret;
+}
+
+static int kbase_release(struct inode *inode, struct file *filp)
+{
+	struct kbase_file *const kfile = filp->private_data;
+
+	kbase_file_delete(kfile);
+	return 0;
+}
+
+static int kbase_api_set_flags(struct kbase_file *kfile,
+		struct kbase_ioctl_set_flags *flags)
+{
+	int err = 0;
+	unsigned long const api_version = kbase_file_get_api_version(kfile);
+	struct kbase_context *kctx = NULL;
+
+	/* Validate flags */
+	if (flags->create_flags !=
+		(flags->create_flags & BASEP_CONTEXT_CREATE_KERNEL_FLAGS))
+		return -EINVAL;
+
+	/* For backward compatibility, the context may have been created before
+	 * the flags were set.
+	 */
+	if (api_version >= KBASE_API_VERSION(11, 15)) {
+		err = kbase_file_create_kctx(kfile, flags->create_flags);
+	} else {
+		struct kbasep_js_kctx_info *js_kctx_info = NULL;
+		unsigned long irq_flags = 0;
+
+		/* If setup is incomplete (e.g. because the API version
+		 * wasn't set) then we have to give up.
+		 */
+		kctx = kbase_file_get_kctx_if_setup_complete(kfile);
+		if (unlikely(!kctx))
+			return -EPERM;
+
+		js_kctx_info = &kctx->jctx.sched_info;
+		mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+		spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
+
+		/* Translate the flags */
+		if ((flags->create_flags &
+			BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
+			kbase_ctx_flag_clear(kctx, KCTX_SUBMIT_DISABLED);
+
+		spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
+		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+	}
+
+	return err;
+}
+
+static int kbase_api_job_submit(struct kbase_context *kctx,
+		struct kbase_ioctl_job_submit *submit)
+{
+	return kbase_jd_submit(kctx, u64_to_user_ptr(submit->addr),
+			submit->nr_atoms,
+			submit->stride, false);
+}
+
+static int kbase_api_get_gpuprops(struct kbase_context *kctx,
+		struct kbase_ioctl_get_gpuprops *get_props)
+{
+	struct kbase_gpu_props *kprops = &kctx->kbdev->gpu_props;
+	int err;
+
+	if (get_props->flags != 0) {
+		dev_err(kctx->kbdev->dev, "Unsupported flags to get_gpuprops");
+		return -EINVAL;
+	}
+
+	if (get_props->size == 0)
+		return kprops->prop_buffer_size;
+	if (get_props->size < kprops->prop_buffer_size)
+		return -EINVAL;
+
+	err = copy_to_user(u64_to_user_ptr(get_props->buffer),
+			kprops->prop_buffer,
+			kprops->prop_buffer_size);
+	if (err)
+		return -EFAULT;
+	return kprops->prop_buffer_size;
+}
+
+static int kbase_api_post_term(struct kbase_context *kctx)
+{
+	kbase_event_close(kctx);
+	return 0;
+}
+
+static int kbase_api_mem_alloc(struct kbase_context *kctx,
+		union kbase_ioctl_mem_alloc *alloc)
+{
+	struct kbase_va_region *reg;
+	u64 flags = alloc->in.flags;
+	u64 gpu_va;
+
+	rcu_read_lock();
+	/* Don't allow memory allocation until user space has set up the
+	 * tracking page (which sets kctx->process_mm). Also catches when we've
+	 * forked.
+	 */
+	if (rcu_dereference(kctx->process_mm) != current->mm) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+	rcu_read_unlock();
+
+	if (flags & BASEP_MEM_FLAGS_KERNEL_ONLY)
+		return -ENOMEM;
+
+	/* Force SAME_VA if a 64-bit client.
+	 * The only exception is GPU-executable memory if an EXEC_VA zone
+	 * has been initialized. In that case, GPU-executable memory may
+	 * or may not be SAME_VA.
+	 */
+	if ((!kbase_ctx_flag(kctx, KCTX_COMPAT)) &&
+			kbase_ctx_flag(kctx, KCTX_FORCE_SAME_VA)) {
+		if (!(flags & BASE_MEM_PROT_GPU_EX) || !kbase_has_exec_va_zone(kctx))
+			flags |= BASE_MEM_SAME_VA;
+	}
+
+
+	reg = kbase_mem_alloc(kctx, alloc->in.va_pages,
+			alloc->in.commit_pages,
+			alloc->in.extent,
+			&flags, &gpu_va);
+
+	if (!reg)
+		return -ENOMEM;
+
+	alloc->out.flags = flags;
+	alloc->out.gpu_va = gpu_va;
+
+	return 0;
+}
+
+static int kbase_api_mem_query(struct kbase_context *kctx,
+		union kbase_ioctl_mem_query *query)
+{
+	return kbase_mem_query(kctx, query->in.gpu_addr,
+			query->in.query, &query->out.value);
+}
+
+static int kbase_api_mem_free(struct kbase_context *kctx,
+		struct kbase_ioctl_mem_free *free)
+{
+	return kbase_mem_free(kctx, free->gpu_addr);
+}
+
+static int kbase_api_hwcnt_reader_setup(struct kbase_context *kctx,
+		struct kbase_ioctl_hwcnt_reader_setup *setup)
+{
+	return kbase_vinstr_hwcnt_reader_setup(kctx->kbdev->vinstr_ctx, setup);
+}
+
+static int kbase_api_hwcnt_enable(struct kbase_context *kctx,
+		struct kbase_ioctl_hwcnt_enable *enable)
+{
+	int ret;
+
+	mutex_lock(&kctx->legacy_hwcnt_lock);
+	if (enable->dump_buffer != 0) {
+		/* Non-zero dump buffer, so user wants to create the client */
+		if (kctx->legacy_hwcnt_cli == NULL) {
+			ret = kbase_hwcnt_legacy_client_create(
+				kctx->kbdev->hwcnt_gpu_virt,
+				enable,
+				&kctx->legacy_hwcnt_cli);
+		} else {
+			/* This context already has a client */
+			ret = -EBUSY;
+		}
+	} else {
+		/* Zero dump buffer, so user wants to destroy the client */
+		if (kctx->legacy_hwcnt_cli != NULL) {
+			kbase_hwcnt_legacy_client_destroy(
+				kctx->legacy_hwcnt_cli);
+			kctx->legacy_hwcnt_cli = NULL;
+			ret = 0;
+		} else {
+			/* This context has no client to destroy */
+			ret = -EINVAL;
+		}
+	}
+	mutex_unlock(&kctx->legacy_hwcnt_lock);
+
+	return ret;
+}
+
+static int kbase_api_hwcnt_dump(struct kbase_context *kctx)
+{
+	int ret;
+
+	mutex_lock(&kctx->legacy_hwcnt_lock);
+	ret = kbase_hwcnt_legacy_client_dump(kctx->legacy_hwcnt_cli);
+	mutex_unlock(&kctx->legacy_hwcnt_lock);
+
+	return ret;
+}
+
+static int kbase_api_hwcnt_clear(struct kbase_context *kctx)
+{
+	int ret;
+
+	mutex_lock(&kctx->legacy_hwcnt_lock);
+	ret = kbase_hwcnt_legacy_client_clear(kctx->legacy_hwcnt_cli);
+	mutex_unlock(&kctx->legacy_hwcnt_lock);
+
+	return ret;
+}
+
+static int kbase_api_get_cpu_gpu_timeinfo(struct kbase_context *kctx,
+		union kbase_ioctl_get_cpu_gpu_timeinfo *timeinfo)
+{
+	u32 flags = timeinfo->in.request_flags;
+	struct timespec ts;
+	u64 timestamp;
+	u64 cycle_cnt;
+
+	kbase_pm_context_active(kctx->kbdev);
+
+	kbase_backend_get_gpu_time(kctx->kbdev,
+		(flags & BASE_TIMEINFO_CYCLE_COUNTER_FLAG) ? &cycle_cnt : NULL,
+		(flags & BASE_TIMEINFO_TIMESTAMP_FLAG) ? &timestamp : NULL,
+		(flags & BASE_TIMEINFO_MONOTONIC_FLAG) ? &ts : NULL);
+
+	if (flags & BASE_TIMEINFO_TIMESTAMP_FLAG)
+		timeinfo->out.timestamp = timestamp;
+
+	if (flags & BASE_TIMEINFO_CYCLE_COUNTER_FLAG)
+		timeinfo->out.cycle_counter = cycle_cnt;
+
+	if (flags & BASE_TIMEINFO_MONOTONIC_FLAG) {
+		timeinfo->out.sec = ts.tv_sec;
+		timeinfo->out.nsec = ts.tv_nsec;
+	}
+
+	kbase_pm_context_idle(kctx->kbdev);
+
+	return 0;
+}
+
+#ifdef CONFIG_MALI_NO_MALI
+static int kbase_api_hwcnt_set(struct kbase_context *kctx,
+		struct kbase_ioctl_hwcnt_values *values)
+{
+	gpu_model_set_dummy_prfcnt_sample(
+			(u32 __user *)(uintptr_t)values->data,
+			values->size);
+
+	return 0;
+}
+#endif
+
+static int kbase_api_disjoint_query(struct kbase_context *kctx,
+		struct kbase_ioctl_disjoint_query *query)
+{
+	query->counter = kbase_disjoint_event_get(kctx->kbdev);
+
+	return 0;
+}
+
+static int kbase_api_get_ddk_version(struct kbase_context *kctx,
+		struct kbase_ioctl_get_ddk_version *version)
+{
+	int ret;
+	int len = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
+
+	if (version->version_buffer == 0)
+		return len;
+
+	if (version->size < len)
+		return -EOVERFLOW;
+
+	ret = copy_to_user(u64_to_user_ptr(version->version_buffer),
+			KERNEL_SIDE_DDK_VERSION_STRING,
+			sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
+
+	if (ret)
+		return -EFAULT;
+
+	return len;
+}
+
+/* Defaults for legacy JIT init ioctl */
+#define DEFAULT_MAX_JIT_ALLOCATIONS 255
+#define JIT_LEGACY_TRIM_LEVEL (0) /* No trimming */
+
+static int kbase_api_mem_jit_init_old(struct kbase_context *kctx,
+		struct kbase_ioctl_mem_jit_init_old *jit_init)
+{
+	kctx->jit_version = 1;
+
+	return kbase_region_tracker_init_jit(kctx, jit_init->va_pages,
+			DEFAULT_MAX_JIT_ALLOCATIONS,
+			JIT_LEGACY_TRIM_LEVEL, BASE_MEM_GROUP_DEFAULT);
+}
+
+static int kbase_api_mem_jit_init(struct kbase_context *kctx,
+		struct kbase_ioctl_mem_jit_init *jit_init)
+{
+	int i;
+
+	kctx->jit_version = 2;
+
+	for (i = 0; i < sizeof(jit_init->padding); i++) {
+		/* Ensure all padding bytes are 0 for potential future
+		 * extension
+		 */
+		if (jit_init->padding[i])
+			return -EINVAL;
+	}
+
+	return kbase_region_tracker_init_jit(kctx, jit_init->va_pages,
+			jit_init->max_allocations, jit_init->trim_level,
+			jit_init->group_id);
+}
+
+static int kbase_api_mem_exec_init(struct kbase_context *kctx,
+		struct kbase_ioctl_mem_exec_init *exec_init)
+{
+	return kbase_region_tracker_init_exec(kctx, exec_init->va_pages);
+}
+
+static int kbase_api_mem_sync(struct kbase_context *kctx,
+		struct kbase_ioctl_mem_sync *sync)
+{
+	struct basep_syncset sset = {
+		.mem_handle.basep.handle = sync->handle,
+		.user_addr = sync->user_addr,
+		.size = sync->size,
+		.type = sync->type
+	};
+
+	return kbase_sync_now(kctx, &sset);
+}
+
+static int kbase_api_mem_find_cpu_offset(struct kbase_context *kctx,
+		union kbase_ioctl_mem_find_cpu_offset *find)
+{
+	return kbasep_find_enclosing_cpu_mapping_offset(
+			kctx,
+			find->in.cpu_addr,
+			find->in.size,
+			&find->out.offset);
+}
+
+static int kbase_api_mem_find_gpu_start_and_offset(struct kbase_context *kctx,
+		union kbase_ioctl_mem_find_gpu_start_and_offset *find)
+{
+	return kbasep_find_enclosing_gpu_mapping_start_and_offset(
+			kctx,
+			find->in.gpu_addr,
+			find->in.size,
+			&find->out.start,
+			&find->out.offset);
+}
+
+static int kbase_api_get_context_id(struct kbase_context *kctx,
+		struct kbase_ioctl_get_context_id *info)
+{
+	info->id = kctx->id;
+
+	return 0;
+}
+
+static int kbase_api_tlstream_acquire(struct kbase_context *kctx,
+		struct kbase_ioctl_tlstream_acquire *acquire)
+{
+	return kbase_timeline_io_acquire(kctx->kbdev, acquire->flags);
+}
+
+static int kbase_api_tlstream_flush(struct kbase_context *kctx)
+{
+	kbase_timeline_streams_flush(kctx->kbdev->timeline);
+
+	return 0;
+}
+
+static int kbase_api_mem_commit(struct kbase_context *kctx,
+		struct kbase_ioctl_mem_commit *commit)
+{
+	return kbase_mem_commit(kctx, commit->gpu_addr, commit->pages);
+}
+
+static int kbase_api_mem_alias(struct kbase_context *kctx,
+		union kbase_ioctl_mem_alias *alias)
+{
+	struct base_mem_aliasing_info *ai;
+	u64 flags;
+	int err;
+
+	if (alias->in.nents == 0 || alias->in.nents > 2048)
+		return -EINVAL;
+
+	if (alias->in.stride > (U64_MAX / 2048))
+		return -EINVAL;
+
+	ai = vmalloc(sizeof(*ai) * alias->in.nents);
+	if (!ai)
+		return -ENOMEM;
+
+	err = copy_from_user(ai,
+			u64_to_user_ptr(alias->in.aliasing_info),
+			sizeof(*ai) * alias->in.nents);
+	if (err) {
+		vfree(ai);
+		return -EFAULT;
+	}
+
+	flags = alias->in.flags;
+	if (flags & BASEP_MEM_FLAGS_KERNEL_ONLY) {
+		vfree(ai);
+		return -EINVAL;
+	}
+
+	alias->out.gpu_va = kbase_mem_alias(kctx, &flags,
+			alias->in.stride, alias->in.nents,
+			ai, &alias->out.va_pages);
+
+	alias->out.flags = flags;
+
+	vfree(ai);
+
+	if (alias->out.gpu_va == 0)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int kbase_api_mem_import(struct kbase_context *kctx,
+		union kbase_ioctl_mem_import *import)
+{
+	int ret;
+	u64 flags = import->in.flags;
+
+	if (flags & BASEP_MEM_FLAGS_KERNEL_ONLY)
+		return -ENOMEM;
+
+	ret = kbase_mem_import(kctx,
+			import->in.type,
+			u64_to_user_ptr(import->in.phandle),
+			import->in.padding,
+			&import->out.gpu_va,
+			&import->out.va_pages,
+			&flags);
+
+	import->out.flags = flags;
+
+	return ret;
+}
+
+static int kbase_api_mem_flags_change(struct kbase_context *kctx,
+		struct kbase_ioctl_mem_flags_change *change)
+{
+	if (change->flags & BASEP_MEM_FLAGS_KERNEL_ONLY)
+		return -ENOMEM;
+
+	return kbase_mem_flags_change(kctx, change->gpu_va,
+			change->flags, change->mask);
+}
+
+static int kbase_api_stream_create(struct kbase_context *kctx,
+		struct kbase_ioctl_stream_create *stream)
+{
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+	int fd, ret;
+
+	/* Name must be NULL-terminated and padded with NULLs, so check last
+	 * character is NULL
+	 */
+	if (stream->name[sizeof(stream->name)-1] != 0)
+		return -EINVAL;
+
+	ret = kbase_sync_fence_stream_create(stream->name, &fd);
+
+	if (ret)
+		return ret;
+	return fd;
+#else
+	return -ENOENT;
+#endif
+}
+
+static int kbase_api_fence_validate(struct kbase_context *kctx,
+		struct kbase_ioctl_fence_validate *validate)
+{
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+	return kbase_sync_fence_validate(validate->fd);
+#else
+	return -ENOENT;
+#endif
+}
+
+static int kbase_api_mem_profile_add(struct kbase_context *kctx,
+		struct kbase_ioctl_mem_profile_add *data)
+{
+	char *buf;
+	int err;
+
+	if (data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
+		dev_err(kctx->kbdev->dev, "mem_profile_add: buffer too big\n");
+		return -EINVAL;
+	}
+
+	buf = kmalloc(data->len, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf))
+		return -ENOMEM;
+
+	err = copy_from_user(buf, u64_to_user_ptr(data->buffer),
+			data->len);
+	if (err) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	return kbasep_mem_profile_debugfs_insert(kctx, buf, data->len);
+}
+
+static int kbase_api_soft_event_update(struct kbase_context *kctx,
+		struct kbase_ioctl_soft_event_update *update)
+{
+	if (update->flags != 0)
+		return -EINVAL;
+
+	return kbase_soft_event_update(kctx, update->event, update->new_status);
+}
+
+static int kbase_api_sticky_resource_map(struct kbase_context *kctx,
+		struct kbase_ioctl_sticky_resource_map *map)
+{
+	int ret;
+	u64 i;
+	u64 gpu_addr[BASE_EXT_RES_COUNT_MAX];
+
+	if (!map->count || map->count > BASE_EXT_RES_COUNT_MAX)
+		return -EOVERFLOW;
+
+	ret = copy_from_user(gpu_addr, u64_to_user_ptr(map->address),
+			sizeof(u64) * map->count);
+
+	if (ret != 0)
+		return -EFAULT;
+
+	kbase_gpu_vm_lock(kctx);
+
+	for (i = 0; i < map->count; i++) {
+		if (!kbase_sticky_resource_acquire(kctx, gpu_addr[i])) {
+			/* Invalid resource */
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	if (ret != 0) {
+		while (i > 0) {
+			i--;
+			kbase_sticky_resource_release(kctx, NULL, gpu_addr[i]);
+		}
+	}
+
+	kbase_gpu_vm_unlock(kctx);
+
+	return ret;
+}
+
+static int kbase_api_sticky_resource_unmap(struct kbase_context *kctx,
+		struct kbase_ioctl_sticky_resource_unmap *unmap)
+{
+	int ret;
+	u64 i;
+	u64 gpu_addr[BASE_EXT_RES_COUNT_MAX];
+
+	if (!unmap->count || unmap->count > BASE_EXT_RES_COUNT_MAX)
+		return -EOVERFLOW;
+
+	ret = copy_from_user(gpu_addr, u64_to_user_ptr(unmap->address),
+			sizeof(u64) * unmap->count);
+
+	if (ret != 0)
+		return -EFAULT;
+
+	kbase_gpu_vm_lock(kctx);
+
+	for (i = 0; i < unmap->count; i++) {
+		if (!kbase_sticky_resource_release(kctx, NULL, gpu_addr[i])) {
+			/* Invalid resource, but we keep going anyway */
+			ret = -EINVAL;
+		}
+	}
+
+	kbase_gpu_vm_unlock(kctx);
+
+	return ret;
+}
+
+#if MALI_UNIT_TEST
+static int kbase_api_tlstream_test(struct kbase_context *kctx,
+		struct kbase_ioctl_tlstream_test *test)
+{
+	kbase_timeline_test(
+			kctx->kbdev,
+			test->tpw_count,
+			test->msg_delay,
+			test->msg_count,
+			test->aux_msg);
+
+	return 0;
+}
+
+static int kbase_api_tlstream_stats(struct kbase_context *kctx,
+		struct kbase_ioctl_tlstream_stats *stats)
+{
+	kbase_timeline_stats(kctx->kbdev->timeline,
+			&stats->bytes_collected,
+			&stats->bytes_generated);
+
+	return 0;
+}
+#endif /* MALI_UNIT_TEST */
+
+
+#define KBASE_HANDLE_IOCTL(cmd, function, arg)    \
+	do {                                          \
+		BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_NONE); \
+		return function(arg);                     \
+	} while (0)
+
+#define KBASE_HANDLE_IOCTL_IN(cmd, function, type, arg)    \
+	do {                                                   \
+		type param;                                        \
+		int err;                                           \
+		BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_WRITE);         \
+		BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd));     \
+		err = copy_from_user(&param, uarg, sizeof(param)); \
+		if (err)                                           \
+			return -EFAULT;                                \
+		return function(arg, &param);                      \
+	} while (0)
+
+#define KBASE_HANDLE_IOCTL_OUT(cmd, function, type, arg)   \
+	do {                                                   \
+		type param;                                        \
+		int ret, err;                                      \
+		BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_READ);          \
+		BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd));     \
+		memset(&param, 0, sizeof(param));                  \
+		ret = function(arg, &param);                       \
+		err = copy_to_user(uarg, &param, sizeof(param));   \
+		if (err)                                           \
+			return -EFAULT;                                \
+		return ret;                                        \
+	} while (0)
+
+#define KBASE_HANDLE_IOCTL_INOUT(cmd, function, type, arg)     \
+	do {                                                       \
+		type param;                                            \
+		int ret, err;                                          \
+		BUILD_BUG_ON(_IOC_DIR(cmd) != (_IOC_WRITE|_IOC_READ)); \
+		BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd));         \
+		err = copy_from_user(&param, uarg, sizeof(param));     \
+		if (err)                                               \
+			return -EFAULT;                                    \
+		ret = function(arg, &param);                           \
+		err = copy_to_user(uarg, &param, sizeof(param));       \
+		if (err)                                               \
+			return -EFAULT;                                    \
+		return ret;                                            \
+	} while (0)
+
+static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	struct kbase_file *const kfile = filp->private_data;
+	struct kbase_context *kctx = NULL;
+	struct kbase_device *kbdev = kfile->kbdev;
+	void __user *uarg = (void __user *)arg;
+
+	/* Only these ioctls are available until setup is complete */
+	switch (cmd) {
+	case KBASE_IOCTL_VERSION_CHECK:
+		KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_VERSION_CHECK,
+				kbase_api_handshake,
+				struct kbase_ioctl_version_check,
+				kfile);
+		break;
+
+	case KBASE_IOCTL_SET_FLAGS:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SET_FLAGS,
+				kbase_api_set_flags,
+				struct kbase_ioctl_set_flags,
+				kfile);
+		break;
+	}
+
+	kctx = kbase_file_get_kctx_if_setup_complete(kfile);
+	if (unlikely(!kctx))
+		return -EPERM;
+
+	/* Normal ioctls */
+	switch (cmd) {
+	case KBASE_IOCTL_JOB_SUBMIT:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_JOB_SUBMIT,
+				kbase_api_job_submit,
+				struct kbase_ioctl_job_submit,
+				kctx);
+		break;
+	case KBASE_IOCTL_GET_GPUPROPS:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_GPUPROPS,
+				kbase_api_get_gpuprops,
+				struct kbase_ioctl_get_gpuprops,
+				kctx);
+		break;
+	case KBASE_IOCTL_POST_TERM:
+		KBASE_HANDLE_IOCTL(KBASE_IOCTL_POST_TERM,
+				kbase_api_post_term,
+				kctx);
+		break;
+	case KBASE_IOCTL_MEM_ALLOC:
+		KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALLOC,
+				kbase_api_mem_alloc,
+				union kbase_ioctl_mem_alloc,
+				kctx);
+		break;
+	case KBASE_IOCTL_MEM_QUERY:
+		KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_QUERY,
+				kbase_api_mem_query,
+				union kbase_ioctl_mem_query,
+				kctx);
+		break;
+	case KBASE_IOCTL_MEM_FREE:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FREE,
+				kbase_api_mem_free,
+				struct kbase_ioctl_mem_free,
+				kctx);
+		break;
+	case KBASE_IOCTL_DISJOINT_QUERY:
+		KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_DISJOINT_QUERY,
+				kbase_api_disjoint_query,
+				struct kbase_ioctl_disjoint_query,
+				kctx);
+		break;
+	case KBASE_IOCTL_GET_DDK_VERSION:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_DDK_VERSION,
+				kbase_api_get_ddk_version,
+				struct kbase_ioctl_get_ddk_version,
+				kctx);
+		break;
+	case KBASE_IOCTL_MEM_JIT_INIT_OLD:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_JIT_INIT_OLD,
+				kbase_api_mem_jit_init_old,
+				struct kbase_ioctl_mem_jit_init_old,
+				kctx);
+		break;
+	case KBASE_IOCTL_MEM_JIT_INIT:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_JIT_INIT,
+				kbase_api_mem_jit_init,
+				struct kbase_ioctl_mem_jit_init,
+				kctx);
+		break;
+	case KBASE_IOCTL_MEM_EXEC_INIT:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_EXEC_INIT,
+				kbase_api_mem_exec_init,
+				struct kbase_ioctl_mem_exec_init,
+				kctx);
+		break;
+	case KBASE_IOCTL_MEM_SYNC:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_SYNC,
+				kbase_api_mem_sync,
+				struct kbase_ioctl_mem_sync,
+				kctx);
+		break;
+	case KBASE_IOCTL_MEM_FIND_CPU_OFFSET:
+		KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_FIND_CPU_OFFSET,
+				kbase_api_mem_find_cpu_offset,
+				union kbase_ioctl_mem_find_cpu_offset,
+				kctx);
+		break;
+	case KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET:
+		KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET,
+				kbase_api_mem_find_gpu_start_and_offset,
+				union kbase_ioctl_mem_find_gpu_start_and_offset,
+				kctx);
+		break;
+	case KBASE_IOCTL_GET_CONTEXT_ID:
+		KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_GET_CONTEXT_ID,
+				kbase_api_get_context_id,
+				struct kbase_ioctl_get_context_id,
+				kctx);
+		break;
+	case KBASE_IOCTL_TLSTREAM_ACQUIRE:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_ACQUIRE,
+				kbase_api_tlstream_acquire,
+				struct kbase_ioctl_tlstream_acquire,
+				kctx);
+		break;
+	case KBASE_IOCTL_TLSTREAM_FLUSH:
+		KBASE_HANDLE_IOCTL(KBASE_IOCTL_TLSTREAM_FLUSH,
+				kbase_api_tlstream_flush,
+				kctx);
+		break;
+	case KBASE_IOCTL_MEM_COMMIT:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_COMMIT,
+				kbase_api_mem_commit,
+				struct kbase_ioctl_mem_commit,
+				kctx);
+		break;
+	case KBASE_IOCTL_MEM_ALIAS:
+		KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALIAS,
+				kbase_api_mem_alias,
+				union kbase_ioctl_mem_alias,
+				kctx);
+		break;
+	case KBASE_IOCTL_MEM_IMPORT:
+		KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_IMPORT,
+				kbase_api_mem_import,
+				union kbase_ioctl_mem_import,
+				kctx);
+		break;
+	case KBASE_IOCTL_MEM_FLAGS_CHANGE:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FLAGS_CHANGE,
+				kbase_api_mem_flags_change,
+				struct kbase_ioctl_mem_flags_change,
+				kctx);
+		break;
+	case KBASE_IOCTL_STREAM_CREATE:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STREAM_CREATE,
+				kbase_api_stream_create,
+				struct kbase_ioctl_stream_create,
+				kctx);
+		break;
+	case KBASE_IOCTL_FENCE_VALIDATE:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_FENCE_VALIDATE,
+				kbase_api_fence_validate,
+				struct kbase_ioctl_fence_validate,
+				kctx);
+		break;
+	case KBASE_IOCTL_MEM_PROFILE_ADD:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_PROFILE_ADD,
+				kbase_api_mem_profile_add,
+				struct kbase_ioctl_mem_profile_add,
+				kctx);
+		break;
+	case KBASE_IOCTL_SOFT_EVENT_UPDATE:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SOFT_EVENT_UPDATE,
+				kbase_api_soft_event_update,
+				struct kbase_ioctl_soft_event_update,
+				kctx);
+		break;
+	case KBASE_IOCTL_STICKY_RESOURCE_MAP:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STICKY_RESOURCE_MAP,
+				kbase_api_sticky_resource_map,
+				struct kbase_ioctl_sticky_resource_map,
+				kctx);
+		break;
+	case KBASE_IOCTL_STICKY_RESOURCE_UNMAP:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STICKY_RESOURCE_UNMAP,
+				kbase_api_sticky_resource_unmap,
+				struct kbase_ioctl_sticky_resource_unmap,
+				kctx);
+		break;
+
+	/* Instrumentation. */
+	case KBASE_IOCTL_HWCNT_READER_SETUP:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_READER_SETUP,
+				kbase_api_hwcnt_reader_setup,
+				struct kbase_ioctl_hwcnt_reader_setup,
+				kctx);
+		break;
+	case KBASE_IOCTL_HWCNT_ENABLE:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_ENABLE,
+				kbase_api_hwcnt_enable,
+				struct kbase_ioctl_hwcnt_enable,
+				kctx);
+		break;
+	case KBASE_IOCTL_HWCNT_DUMP:
+		KBASE_HANDLE_IOCTL(KBASE_IOCTL_HWCNT_DUMP,
+				kbase_api_hwcnt_dump,
+				kctx);
+		break;
+	case KBASE_IOCTL_HWCNT_CLEAR:
+		KBASE_HANDLE_IOCTL(KBASE_IOCTL_HWCNT_CLEAR,
+				kbase_api_hwcnt_clear,
+				kctx);
+		break;
+	case KBASE_IOCTL_GET_CPU_GPU_TIMEINFO:
+		KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_GET_CPU_GPU_TIMEINFO,
+				kbase_api_get_cpu_gpu_timeinfo,
+				union kbase_ioctl_get_cpu_gpu_timeinfo,
+				kctx);
+		break;
+#ifdef CONFIG_MALI_NO_MALI
+	case KBASE_IOCTL_HWCNT_SET:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_SET,
+				kbase_api_hwcnt_set,
+				struct kbase_ioctl_hwcnt_values,
+				kctx);
+		break;
+#endif
+#ifdef CONFIG_MALI_CINSTR_GWT
+	case KBASE_IOCTL_CINSTR_GWT_START:
+		KBASE_HANDLE_IOCTL(KBASE_IOCTL_CINSTR_GWT_START,
+				kbase_gpu_gwt_start,
+				kctx);
+		break;
+	case KBASE_IOCTL_CINSTR_GWT_STOP:
+		KBASE_HANDLE_IOCTL(KBASE_IOCTL_CINSTR_GWT_STOP,
+				kbase_gpu_gwt_stop,
+				kctx);
+		break;
+	case KBASE_IOCTL_CINSTR_GWT_DUMP:
+		KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CINSTR_GWT_DUMP,
+				kbase_gpu_gwt_dump,
+				union kbase_ioctl_cinstr_gwt_dump,
+				kctx);
+		break;
+#endif
+#if MALI_UNIT_TEST
+	case KBASE_IOCTL_TLSTREAM_TEST:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_TEST,
+				kbase_api_tlstream_test,
+				struct kbase_ioctl_tlstream_test,
+				kctx);
+		break;
+	case KBASE_IOCTL_TLSTREAM_STATS:
+		KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_TLSTREAM_STATS,
+				kbase_api_tlstream_stats,
+				struct kbase_ioctl_tlstream_stats,
+				kctx);
+		break;
+#endif
+	}
+
+	dev_warn(kbdev->dev, "Unknown ioctl 0x%x nr:%d", cmd, _IOC_NR(cmd));
+
+	return -ENOIOCTLCMD;
+}
+
+static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
+{
+	struct kbase_file *const kfile = filp->private_data;
+	struct kbase_context *const kctx =
+		kbase_file_get_kctx_if_setup_complete(kfile);
+	struct base_jd_event_v2 uevent;
+	int out_count = 0;
+
+	if (unlikely(!kctx))
+		return -EPERM;
+
+	if (count < sizeof(uevent))
+		return -ENOBUFS;
+
+	do {
+		while (kbase_event_dequeue(kctx, &uevent)) {
+			if (out_count > 0)
+				goto out;
+
+			if (filp->f_flags & O_NONBLOCK)
+				return -EAGAIN;
+
+			if (wait_event_interruptible(kctx->event_queue,
+					kbase_event_pending(kctx)) != 0)
+				return -ERESTARTSYS;
+		}
+		if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
+			if (out_count == 0)
+				return -EPIPE;
+			goto out;
+		}
+
+		if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0)
+			return -EFAULT;
+
+		buf += sizeof(uevent);
+		out_count++;
+		count -= sizeof(uevent);
+	} while (count >= sizeof(uevent));
+
+ out:
+	return out_count * sizeof(uevent);
+}
+
+static unsigned int kbase_poll(struct file *filp, poll_table *wait)
+{
+	struct kbase_file *const kfile = filp->private_data;
+	struct kbase_context *const kctx =
+		kbase_file_get_kctx_if_setup_complete(kfile);
+
+	if (unlikely(!kctx))
+		return POLLERR;
+
+	poll_wait(filp, &kctx->event_queue, wait);
+	if (kbase_event_pending(kctx))
+		return POLLIN | POLLRDNORM;
+
+	return 0;
+}
+
+void kbase_event_wakeup(struct kbase_context *kctx)
+{
+	KBASE_DEBUG_ASSERT(kctx);
+
+	wake_up_interruptible(&kctx->event_queue);
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_wakeup);
+
+static int kbase_mmap(struct file *const filp, struct vm_area_struct *const vma)
+{
+	struct kbase_file *const kfile = filp->private_data;
+	struct kbase_context *const kctx =
+		kbase_file_get_kctx_if_setup_complete(kfile);
+
+	if (unlikely(!kctx))
+		return -EPERM;
+
+	return kbase_context_mmap(kctx, vma);
+}
+
+static int kbase_check_flags(int flags)
+{
+	/* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
+	 * closes the file descriptor in a child process.
+	 */
+	if (0 == (flags & O_CLOEXEC))
+		return -EINVAL;
+
+	return 0;
+}
+
+static unsigned long kbase_get_unmapped_area(struct file *const filp,
+		const unsigned long addr, const unsigned long len,
+		const unsigned long pgoff, const unsigned long flags)
+{
+	struct kbase_file *const kfile = filp->private_data;
+	struct kbase_context *const kctx =
+		kbase_file_get_kctx_if_setup_complete(kfile);
+
+	if (unlikely(!kctx))
+		return -EPERM;
+
+	return kbase_context_get_unmapped_area(kctx, addr, len, pgoff, flags);
+}
+
+static const struct file_operations kbase_fops = {
+	.owner = THIS_MODULE,
+	.open = kbase_open,
+	.release = kbase_release,
+	.read = kbase_read,
+	.poll = kbase_poll,
+	.unlocked_ioctl = kbase_ioctl,
+	.compat_ioctl = kbase_ioctl,
+	.mmap = kbase_mmap,
+	.check_flags = kbase_check_flags,
+	.get_unmapped_area = kbase_get_unmapped_area,
+};
+
+/**
+ * show_policy - Show callback for the power_policy sysfs file.
+ *
+ * This function is called to get the contents of the power_policy sysfs
+ * file. This is a list of the available policies with the currently active one
+ * surrounded by square brackets.
+ *
+ * @dev:	The device this sysfs file is for
+ * @attr:	The attributes of the sysfs file
+ * @buf:	The output buffer for the sysfs file contents
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_policy(struct device *dev, struct device_attribute *attr, char *const buf)
+{
+	struct kbase_device *kbdev;
+	const struct kbase_pm_policy *current_policy;
+	const struct kbase_pm_policy *const *policy_list;
+	int policy_count;
+	int i;
+	ssize_t ret = 0;
+
+	kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	current_policy = kbase_pm_get_policy(kbdev);
+
+	policy_count = kbase_pm_list_policies(kbdev, &policy_list);
+
+	for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
+		if (policy_list[i] == current_policy)
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
+		else
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
+	}
+
+	if (ret < PAGE_SIZE - 1) {
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+	} else {
+		buf[PAGE_SIZE - 2] = '\n';
+		buf[PAGE_SIZE - 1] = '\0';
+		ret = PAGE_SIZE - 1;
+	}
+
+	return ret;
+}
+
+/**
+ * set_policy - Store callback for the power_policy sysfs file.
+ *
+ * This function is called when the power_policy sysfs file is written to.
+ * It matches the requested policy against the available policies and if a
+ * matching policy is found calls kbase_pm_set_policy() to change the
+ * policy.
+ *
+ * @dev:	The device with sysfs file is for
+ * @attr:	The attributes of the sysfs file
+ * @buf:	The value written to the sysfs file
+ * @count:	The number of bytes written to the sysfs file
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	const struct kbase_pm_policy *new_policy = NULL;
+	const struct kbase_pm_policy *const *policy_list;
+	int policy_count;
+	int i;
+
+	kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	policy_count = kbase_pm_list_policies(kbdev, &policy_list);
+
+	for (i = 0; i < policy_count; i++) {
+		if (sysfs_streq(policy_list[i]->name, buf)) {
+			new_policy = policy_list[i];
+			break;
+		}
+	}
+
+	if (!new_policy) {
+		dev_err(dev, "power_policy: policy not found\n");
+		return -EINVAL;
+	}
+
+	kbase_pm_set_policy(kbdev, new_policy);
+
+	return count;
+}
+
+/*
+ * The sysfs file power_policy.
+ *
+ * This is used for obtaining information about the available policies,
+ * determining which policy is currently active, and changing the active
+ * policy.
+ */
+static DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
+
+/*
+ * show_core_mask - Show callback for the core_mask sysfs file.
+ *
+ * This function is called to get the contents of the core_mask sysfs file.
+ *
+ * @dev:	The device this sysfs file is for
+ * @attr:	The attributes of the sysfs file
+ * @buf:	The output buffer for the sysfs file contents
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+	ssize_t ret = 0;
+
+	kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+			"Current core mask (JS0) : 0x%llX\n",
+			kbdev->pm.debug_core_mask[0]);
+	ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+			"Current core mask (JS1) : 0x%llX\n",
+			kbdev->pm.debug_core_mask[1]);
+	ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+			"Current core mask (JS2) : 0x%llX\n",
+			kbdev->pm.debug_core_mask[2]);
+	ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+			"Available core mask : 0x%llX\n",
+			kbdev->gpu_props.props.raw_props.shader_present);
+
+	return ret;
+}
+
+/**
+ * set_core_mask - Store callback for the core_mask sysfs file.
+ *
+ * This function is called when the core_mask sysfs file is written to.
+ *
+ * @dev:	The device with sysfs file is for
+ * @attr:	The attributes of the sysfs file
+ * @buf:	The value written to the sysfs file
+ * @count:	The number of bytes written to the sysfs file
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	u64 new_core_mask[3];
+	int items, i;
+	ssize_t err = count;
+	unsigned long flags;
+	u64 shader_present, group0_core_mask;
+
+	kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	items = sscanf(buf, "%llx %llx %llx",
+			&new_core_mask[0], &new_core_mask[1],
+			&new_core_mask[2]);
+
+	if (items != 1 && items != 3) {
+		dev_err(kbdev->dev, "Couldn't process core mask write operation.\n"
+			"Use format <core_mask>\n"
+			"or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
+		err = -EINVAL;
+		goto end;
+	}
+
+	if (items == 1)
+		new_core_mask[1] = new_core_mask[2] = new_core_mask[0];
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	shader_present = kbdev->gpu_props.props.raw_props.shader_present;
+	group0_core_mask = kbdev->gpu_props.props.coherency_info.group[0].core_mask;
+
+	for (i = 0; i < 3; ++i) {
+		if ((new_core_mask[i] & shader_present) != new_core_mask[i]) {
+			dev_err(dev, "Invalid core mask 0x%llX for JS %d: Includes non-existent cores (present = 0x%llX)",
+					new_core_mask[i], i, shader_present);
+			err = -EINVAL;
+			goto unlock;
+
+		} else if (!(new_core_mask[i] & shader_present & kbdev->pm.backend.ca_cores_enabled)) {
+			dev_err(dev, "Invalid core mask 0x%llX for JS %d: No intersection with currently available cores (present = 0x%llX, CA enabled = 0x%llX\n",
+					new_core_mask[i], i,
+					kbdev->gpu_props.props.raw_props.shader_present,
+					kbdev->pm.backend.ca_cores_enabled);
+			err = -EINVAL;
+			goto unlock;
+
+		} else if (!(new_core_mask[i] & group0_core_mask)) {
+			dev_err(dev, "Invalid core mask 0x%llX for JS %d: No intersection with group 0 core mask 0x%llX\n",
+					new_core_mask[i], i, group0_core_mask);
+			err = -EINVAL;
+			goto unlock;
+		}
+	}
+
+	if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] ||
+			kbdev->pm.debug_core_mask[1] !=
+					new_core_mask[1] ||
+			kbdev->pm.debug_core_mask[2] !=
+					new_core_mask[2]) {
+
+		kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0],
+				new_core_mask[1], new_core_mask[2]);
+	}
+
+unlock:
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+end:
+	return err;
+}
+
+/*
+ * The sysfs file core_mask.
+ *
+ * This is used to restrict shader core availability for debugging purposes.
+ * Reading it will show the current core mask and the mask of cores available.
+ * Writing to it will set the current core mask.
+ */
+static DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask);
+
+/**
+ * set_soft_job_timeout - Store callback for the soft_job_timeout sysfs
+ * file.
+ *
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The value written to the sysfs file.
+ * @count: The number of bytes written to the sysfs file.
+ *
+ * This allows setting the timeout for software jobs. Waiting soft event wait
+ * jobs will be cancelled after this period expires, while soft fence wait jobs
+ * will print debug information if the fence debug feature is enabled.
+ *
+ * This is expressed in milliseconds.
+ *
+ * Return: count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_soft_job_timeout(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	int soft_job_timeout_ms;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	if ((kstrtoint(buf, 0, &soft_job_timeout_ms) != 0) ||
+	    (soft_job_timeout_ms <= 0))
+		return -EINVAL;
+
+	atomic_set(&kbdev->js_data.soft_job_timeout_ms,
+		   soft_job_timeout_ms);
+
+	return count;
+}
+
+/**
+ * show_soft_job_timeout - Show callback for the soft_job_timeout sysfs
+ * file.
+ *
+ * This will return the timeout for the software jobs.
+ *
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The output buffer for the sysfs file contents.
+ *
+ * Return: The number of bytes output to buf.
+ */
+static ssize_t show_soft_job_timeout(struct device *dev,
+				       struct device_attribute *attr,
+				       char * const buf)
+{
+	struct kbase_device *kbdev;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	return scnprintf(buf, PAGE_SIZE, "%i\n",
+			 atomic_read(&kbdev->js_data.soft_job_timeout_ms));
+}
+
+static DEVICE_ATTR(soft_job_timeout, S_IRUGO | S_IWUSR,
+		   show_soft_job_timeout, set_soft_job_timeout);
+
+static u32 timeout_ms_to_ticks(struct kbase_device *kbdev, long timeout_ms,
+				int default_ticks, u32 old_ticks)
+{
+	if (timeout_ms > 0) {
+		u64 ticks = timeout_ms * 1000000ULL;
+		do_div(ticks, kbdev->js_data.scheduling_period_ns);
+		if (!ticks)
+			return 1;
+		return ticks;
+	} else if (timeout_ms < 0) {
+		return default_ticks;
+	} else {
+		return old_ticks;
+	}
+}
+
+/**
+ * set_js_timeouts - Store callback for the js_timeouts sysfs file.
+ *
+ * This function is called to get the contents of the js_timeouts sysfs
+ * file. This file contains five values separated by whitespace. The values
+ * are basically the same as %JS_SOFT_STOP_TICKS, %JS_HARD_STOP_TICKS_SS,
+ * %JS_HARD_STOP_TICKS_DUMPING, %JS_RESET_TICKS_SS, %JS_RESET_TICKS_DUMPING
+ * configuration values (in that order), with the difference that the js_timeout
+ * values are expressed in MILLISECONDS.
+ *
+ * The js_timeouts sysfile file allows the current values in
+ * use by the job scheduler to get override. Note that a value needs to
+ * be other than 0 for it to override the current job scheduler value.
+ *
+ * @dev:	The device with sysfs file is for
+ * @attr:	The attributes of the sysfs file
+ * @buf:	The value written to the sysfs file
+ * @count:	The number of bytes written to the sysfs file
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	int items;
+	long js_soft_stop_ms;
+	long js_soft_stop_ms_cl;
+	long js_hard_stop_ms_ss;
+	long js_hard_stop_ms_cl;
+	long js_hard_stop_ms_dumping;
+	long js_reset_ms_ss;
+	long js_reset_ms_cl;
+	long js_reset_ms_dumping;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld",
+			&js_soft_stop_ms, &js_soft_stop_ms_cl,
+			&js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
+			&js_hard_stop_ms_dumping, &js_reset_ms_ss,
+			&js_reset_ms_cl, &js_reset_ms_dumping);
+
+	if (items == 8) {
+		struct kbasep_js_device_data *js_data = &kbdev->js_data;
+		unsigned long flags;
+
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+#define UPDATE_TIMEOUT(ticks_name, ms_name, default) do {\
+	js_data->ticks_name = timeout_ms_to_ticks(kbdev, ms_name, \
+			default, js_data->ticks_name); \
+	dev_dbg(kbdev->dev, "Overriding " #ticks_name \
+			" with %lu ticks (%lu ms)\n", \
+			(unsigned long)js_data->ticks_name, \
+			ms_name); \
+	} while (0)
+
+		UPDATE_TIMEOUT(soft_stop_ticks, js_soft_stop_ms,
+				DEFAULT_JS_SOFT_STOP_TICKS);
+		UPDATE_TIMEOUT(soft_stop_ticks_cl, js_soft_stop_ms_cl,
+				DEFAULT_JS_SOFT_STOP_TICKS_CL);
+		UPDATE_TIMEOUT(hard_stop_ticks_ss, js_hard_stop_ms_ss,
+				kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408) ?
+				DEFAULT_JS_HARD_STOP_TICKS_SS_8408 :
+				DEFAULT_JS_HARD_STOP_TICKS_SS);
+		UPDATE_TIMEOUT(hard_stop_ticks_cl, js_hard_stop_ms_cl,
+				DEFAULT_JS_HARD_STOP_TICKS_CL);
+		UPDATE_TIMEOUT(hard_stop_ticks_dumping,
+				js_hard_stop_ms_dumping,
+				DEFAULT_JS_HARD_STOP_TICKS_DUMPING);
+		UPDATE_TIMEOUT(gpu_reset_ticks_ss, js_reset_ms_ss,
+				kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408) ?
+				DEFAULT_JS_RESET_TICKS_SS_8408 :
+				DEFAULT_JS_RESET_TICKS_SS);
+		UPDATE_TIMEOUT(gpu_reset_ticks_cl, js_reset_ms_cl,
+				DEFAULT_JS_RESET_TICKS_CL);
+		UPDATE_TIMEOUT(gpu_reset_ticks_dumping, js_reset_ms_dumping,
+				DEFAULT_JS_RESET_TICKS_DUMPING);
+
+		kbase_js_set_timeouts(kbdev);
+
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+		return count;
+	}
+
+	dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n"
+			"Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> <hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> <reset_ms_dumping>\n"
+			"Write 0 for no change, -1 to restore default timeout\n");
+	return -EINVAL;
+}
+
+static unsigned long get_js_timeout_in_ms(
+		u32 scheduling_period_ns,
+		u32 ticks)
+{
+	u64 ms = (u64)ticks * scheduling_period_ns;
+
+	do_div(ms, 1000000UL);
+	return ms;
+}
+
+/**
+ * show_js_timeouts - Show callback for the js_timeouts sysfs file.
+ *
+ * This function is called to get the contents of the js_timeouts sysfs
+ * file. It returns the last set values written to the js_timeouts sysfs file.
+ * If the file didn't get written yet, the values will be current setting in
+ * use.
+ * @dev:	The device this sysfs file is for
+ * @attr:	The attributes of the sysfs file
+ * @buf:	The output buffer for the sysfs file contents
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+	ssize_t ret;
+	unsigned long js_soft_stop_ms;
+	unsigned long js_soft_stop_ms_cl;
+	unsigned long js_hard_stop_ms_ss;
+	unsigned long js_hard_stop_ms_cl;
+	unsigned long js_hard_stop_ms_dumping;
+	unsigned long js_reset_ms_ss;
+	unsigned long js_reset_ms_cl;
+	unsigned long js_reset_ms_dumping;
+	u32 scheduling_period_ns;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	scheduling_period_ns = kbdev->js_data.scheduling_period_ns;
+
+#define GET_TIMEOUT(name) get_js_timeout_in_ms(\
+		scheduling_period_ns, \
+		kbdev->js_data.name)
+
+	js_soft_stop_ms = GET_TIMEOUT(soft_stop_ticks);
+	js_soft_stop_ms_cl = GET_TIMEOUT(soft_stop_ticks_cl);
+	js_hard_stop_ms_ss = GET_TIMEOUT(hard_stop_ticks_ss);
+	js_hard_stop_ms_cl = GET_TIMEOUT(hard_stop_ticks_cl);
+	js_hard_stop_ms_dumping = GET_TIMEOUT(hard_stop_ticks_dumping);
+	js_reset_ms_ss = GET_TIMEOUT(gpu_reset_ticks_ss);
+	js_reset_ms_cl = GET_TIMEOUT(gpu_reset_ticks_cl);
+	js_reset_ms_dumping = GET_TIMEOUT(gpu_reset_ticks_dumping);
+
+#undef GET_TIMEOUT
+
+	ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
+			js_soft_stop_ms, js_soft_stop_ms_cl,
+			js_hard_stop_ms_ss, js_hard_stop_ms_cl,
+			js_hard_stop_ms_dumping, js_reset_ms_ss,
+			js_reset_ms_cl, js_reset_ms_dumping);
+
+	if (ret >= PAGE_SIZE) {
+		buf[PAGE_SIZE - 2] = '\n';
+		buf[PAGE_SIZE - 1] = '\0';
+		ret = PAGE_SIZE - 1;
+	}
+
+	return ret;
+}
+
+/*
+ * The sysfs file js_timeouts.
+ *
+ * This is used to override the current job scheduler values for
+ * JS_STOP_STOP_TICKS_SS
+ * JS_STOP_STOP_TICKS_CL
+ * JS_HARD_STOP_TICKS_SS
+ * JS_HARD_STOP_TICKS_CL
+ * JS_HARD_STOP_TICKS_DUMPING
+ * JS_RESET_TICKS_SS
+ * JS_RESET_TICKS_CL
+ * JS_RESET_TICKS_DUMPING.
+ */
+static DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts);
+
+static u32 get_new_js_timeout(
+		u32 old_period,
+		u32 old_ticks,
+		u32 new_scheduling_period_ns)
+{
+	u64 ticks = (u64)old_period * (u64)old_ticks;
+	do_div(ticks, new_scheduling_period_ns);
+	return ticks?ticks:1;
+}
+
+/**
+ * set_js_scheduling_period - Store callback for the js_scheduling_period sysfs
+ *                            file
+ * @dev:   The device the sysfs file is for
+ * @attr:  The attributes of the sysfs file
+ * @buf:   The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * This function is called when the js_scheduling_period sysfs file is written
+ * to. It checks the data written, and if valid updates the js_scheduling_period
+ * value
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_js_scheduling_period(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	int ret;
+	unsigned int js_scheduling_period;
+	u32 new_scheduling_period_ns;
+	u32 old_period;
+	struct kbasep_js_device_data *js_data;
+	unsigned long flags;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	js_data = &kbdev->js_data;
+
+	ret = kstrtouint(buf, 0, &js_scheduling_period);
+	if (ret || !js_scheduling_period) {
+		dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n"
+				"Use format <js_scheduling_period_ms>\n");
+		return -EINVAL;
+	}
+
+	new_scheduling_period_ns = js_scheduling_period * 1000000;
+
+	/* Update scheduling timeouts */
+	mutex_lock(&js_data->runpool_mutex);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	/* If no contexts have been scheduled since js_timeouts was last written
+	 * to, the new timeouts might not have been latched yet. So check if an
+	 * update is pending and use the new values if necessary. */
+
+	/* Use previous 'new' scheduling period as a base if present. */
+	old_period = js_data->scheduling_period_ns;
+
+#define SET_TIMEOUT(name) \
+		(js_data->name = get_new_js_timeout(\
+				old_period, \
+				kbdev->js_data.name, \
+				new_scheduling_period_ns))
+
+	SET_TIMEOUT(soft_stop_ticks);
+	SET_TIMEOUT(soft_stop_ticks_cl);
+	SET_TIMEOUT(hard_stop_ticks_ss);
+	SET_TIMEOUT(hard_stop_ticks_cl);
+	SET_TIMEOUT(hard_stop_ticks_dumping);
+	SET_TIMEOUT(gpu_reset_ticks_ss);
+	SET_TIMEOUT(gpu_reset_ticks_cl);
+	SET_TIMEOUT(gpu_reset_ticks_dumping);
+
+#undef SET_TIMEOUT
+
+	js_data->scheduling_period_ns = new_scheduling_period_ns;
+
+	kbase_js_set_timeouts(kbdev);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	mutex_unlock(&js_data->runpool_mutex);
+
+	dev_dbg(kbdev->dev, "JS scheduling period: %dms\n",
+			js_scheduling_period);
+
+	return count;
+}
+
+/**
+ * show_js_scheduling_period - Show callback for the js_scheduling_period sysfs
+ *                             entry.
+ * @dev:  The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf:  The output buffer to receive the GPU information.
+ *
+ * This function is called to get the current period used for the JS scheduling
+ * period.
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_js_scheduling_period(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+	u32 period;
+	ssize_t ret;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	period = kbdev->js_data.scheduling_period_ns;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n",
+			period / 1000000);
+
+	return ret;
+}
+
+static DEVICE_ATTR(js_scheduling_period, S_IRUGO | S_IWUSR,
+		show_js_scheduling_period, set_js_scheduling_period);
+
+
+#ifdef CONFIG_MALI_DEBUG
+static ssize_t set_js_softstop_always(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	int ret;
+	int softstop_always;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	ret = kstrtoint(buf, 0, &softstop_always);
+	if (ret || ((softstop_always != 0) && (softstop_always != 1))) {
+		dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n"
+				"Use format <soft_stop_always>\n");
+		return -EINVAL;
+	}
+
+	kbdev->js_data.softstop_always = (bool) softstop_always;
+	dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n",
+			(kbdev->js_data.softstop_always) ?
+			"Enabled" : "Disabled");
+	return count;
+}
+
+static ssize_t show_js_softstop_always(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+	ssize_t ret;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
+
+	if (ret >= PAGE_SIZE) {
+		buf[PAGE_SIZE - 2] = '\n';
+		buf[PAGE_SIZE - 1] = '\0';
+		ret = PAGE_SIZE - 1;
+	}
+
+	return ret;
+}
+
+/*
+ * By default, soft-stops are disabled when only a single context is present.
+ * The ability to enable soft-stop when only a single context is present can be
+ * used for debug and unit-testing purposes.
+ * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
+ */
+static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
+#endif /* CONFIG_MALI_DEBUG */
+
+#ifdef CONFIG_MALI_DEBUG
+typedef void (kbasep_debug_command_func) (struct kbase_device *);
+
+enum kbasep_debug_command_code {
+	KBASEP_DEBUG_COMMAND_DUMPTRACE,
+
+	/* This must be the last enum */
+	KBASEP_DEBUG_COMMAND_COUNT
+};
+
+struct kbasep_debug_command {
+	char *str;
+	kbasep_debug_command_func *func;
+};
+
+/* Debug commands supported by the driver */
+static const struct kbasep_debug_command debug_commands[] = {
+	{
+	 .str = "dumptrace",
+	 .func = &kbasep_trace_dump,
+	 }
+};
+
+/**
+ * show_debug - Show callback for the debug_command sysfs file.
+ *
+ * This function is called to get the contents of the debug_command sysfs
+ * file. This is a list of the available debug commands, separated by newlines.
+ *
+ * @dev:	The device this sysfs file is for
+ * @attr:	The attributes of the sysfs file
+ * @buf:	The output buffer for the sysfs file contents
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+	int i;
+	ssize_t ret = 0;
+
+	kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
+
+	if (ret >= PAGE_SIZE) {
+		buf[PAGE_SIZE - 2] = '\n';
+		buf[PAGE_SIZE - 1] = '\0';
+		ret = PAGE_SIZE - 1;
+	}
+
+	return ret;
+}
+
+/**
+ * issue_debug - Store callback for the debug_command sysfs file.
+ *
+ * This function is called when the debug_command sysfs file is written to.
+ * It matches the requested command against the available commands, and if
+ * a matching command is found calls the associated function from
+ * @debug_commands to issue the command.
+ *
+ * @dev:	The device with sysfs file is for
+ * @attr:	The attributes of the sysfs file
+ * @buf:	The value written to the sysfs file
+ * @count:	The number of bytes written to the sysfs file
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	int i;
+
+	kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
+		if (sysfs_streq(debug_commands[i].str, buf)) {
+			debug_commands[i].func(kbdev);
+			return count;
+		}
+	}
+
+	/* Debug Command not found */
+	dev_err(dev, "debug_command: command not known\n");
+	return -EINVAL;
+}
+
+/* The sysfs file debug_command.
+ *
+ * This is used to issue general debug commands to the device driver.
+ * Reading it will produce a list of debug commands, separated by newlines.
+ * Writing to it with one of those commands will issue said command.
+ */
+static DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug);
+#endif /* CONFIG_MALI_DEBUG */
+
+/**
+ * kbase_show_gpuinfo - Show callback for the gpuinfo sysfs entry.
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The output buffer to receive the GPU information.
+ *
+ * This function is called to get a description of the present Mali
+ * GPU via the gpuinfo sysfs entry.  This includes the GPU family, the
+ * number of cores, the hardware version and the raw product id.  For
+ * example
+ *
+ *    Mali-T60x MP4 r0p0 0x6956
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t kbase_show_gpuinfo(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	static const struct gpu_product_id_name {
+		unsigned id;
+		char *name;
+	} gpu_product_id_names[] = {
+		{ .id = GPU_ID2_PRODUCT_TMIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+		  .name = "Mali-G71" },
+		{ .id = GPU_ID2_PRODUCT_THEX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+		  .name = "Mali-G72" },
+		{ .id = GPU_ID2_PRODUCT_TSIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+		  .name = "Mali-G51" },
+		{ .id = GPU_ID2_PRODUCT_TNOX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+		  .name = "Mali-G76" },
+		{ .id = GPU_ID2_PRODUCT_TDVX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+		  .name = "Mali-G31" },
+		{ .id = GPU_ID2_PRODUCT_TGOX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+		  .name = "Mali-G52" },
+		{ .id = GPU_ID2_PRODUCT_TTRX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+		  .name = "Mali-G77" },
+		{ .id = GPU_ID2_PRODUCT_TBEX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+		  .name = "Mali-TBEX" },
+		{ .id = GPU_ID2_PRODUCT_LBEX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+		  .name = "Mali-LBEX" },
+		{ .id = GPU_ID2_PRODUCT_TNAX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+		  .name = "Mali-TNAX" },
+		{ .id = GPU_ID2_PRODUCT_TODX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+		  .name = "Mali-TODX" },
+		{ .id = GPU_ID2_PRODUCT_LODX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+		  .name = "Mali-LODX" },
+	};
+	const char *product_name = "(Unknown Mali GPU)";
+	struct kbase_device *kbdev;
+	u32 gpu_id;
+	unsigned product_id, product_id_mask;
+	unsigned i;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+	product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+	product_id_mask = GPU_ID2_PRODUCT_MODEL >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+
+	for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
+		const struct gpu_product_id_name *p = &gpu_product_id_names[i];
+
+		if ((p->id & product_id_mask) ==
+		    (product_id & product_id_mask)) {
+			product_name = p->name;
+			break;
+		}
+	}
+
+	return scnprintf(buf, PAGE_SIZE, "%s %d cores r%dp%d 0x%04X\n",
+		product_name, kbdev->gpu_props.num_cores,
+		(gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT,
+		(gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT,
+		product_id);
+}
+static DEVICE_ATTR(gpuinfo, S_IRUGO, kbase_show_gpuinfo, NULL);
+
+/**
+ * set_dvfs_period - Store callback for the dvfs_period sysfs file.
+ * @dev:   The device with sysfs file is for
+ * @attr:  The attributes of the sysfs file
+ * @buf:   The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * This function is called when the dvfs_period sysfs file is written to. It
+ * checks the data written, and if valid updates the DVFS period variable,
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_dvfs_period(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	int ret;
+	int dvfs_period;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	ret = kstrtoint(buf, 0, &dvfs_period);
+	if (ret || dvfs_period <= 0) {
+		dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n"
+				"Use format <dvfs_period_ms>\n");
+		return -EINVAL;
+	}
+
+	kbdev->pm.dvfs_period = dvfs_period;
+	dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period);
+
+	return count;
+}
+
+/**
+ * show_dvfs_period - Show callback for the dvfs_period sysfs entry.
+ * @dev:  The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf:  The output buffer to receive the GPU information.
+ *
+ * This function is called to get the current period used for the DVFS sample
+ * timer.
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_dvfs_period(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+	ssize_t ret;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period);
+
+	return ret;
+}
+
+static DEVICE_ATTR(dvfs_period, S_IRUGO | S_IWUSR, show_dvfs_period,
+		set_dvfs_period);
+
+/**
+ * set_pm_poweroff - Store callback for the pm_poweroff sysfs file.
+ * @dev:   The device with sysfs file is for
+ * @attr:  The attributes of the sysfs file
+ * @buf:   The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * This function is called when the pm_poweroff sysfs file is written to.
+ *
+ * This file contains three values separated by whitespace. The values
+ * are gpu_poweroff_time (the period of the poweroff timer, in ns),
+ * poweroff_shader_ticks (the number of poweroff timer ticks before an idle
+ * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer
+ * ticks before the GPU is powered off), in that order.
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_pm_poweroff(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	struct kbasep_pm_tick_timer_state *stt;
+	int items;
+	u64 gpu_poweroff_time;
+	unsigned int poweroff_shader_ticks, poweroff_gpu_ticks;
+	unsigned long flags;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time,
+			&poweroff_shader_ticks,
+			&poweroff_gpu_ticks);
+	if (items != 3) {
+		dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n"
+				"Use format <gpu_poweroff_time_ns> <poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	stt = &kbdev->pm.backend.shader_tick_timer;
+	stt->configured_interval = HR_TIMER_DELAY_NSEC(gpu_poweroff_time);
+	stt->configured_ticks = poweroff_shader_ticks;
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	if (poweroff_gpu_ticks != 0)
+		dev_warn(kbdev->dev, "Separate GPU poweroff delay no longer supported.\n");
+
+	return count;
+}
+
+/**
+ * show_pm_poweroff - Show callback for the pm_poweroff sysfs entry.
+ * @dev:  The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf:  The output buffer to receive the GPU information.
+ *
+ * This function is called to get the current period used for the DVFS sample
+ * timer.
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_pm_poweroff(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+	struct kbasep_pm_tick_timer_state *stt;
+	ssize_t ret;
+	unsigned long flags;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	stt = &kbdev->pm.backend.shader_tick_timer;
+	ret = scnprintf(buf, PAGE_SIZE, "%llu %u 0\n",
+			ktime_to_ns(stt->configured_interval),
+			stt->configured_ticks);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return ret;
+}
+
+static DEVICE_ATTR(pm_poweroff, S_IRUGO | S_IWUSR, show_pm_poweroff,
+		set_pm_poweroff);
+
+/**
+ * set_reset_timeout - Store callback for the reset_timeout sysfs file.
+ * @dev:   The device with sysfs file is for
+ * @attr:  The attributes of the sysfs file
+ * @buf:   The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * This function is called when the reset_timeout sysfs file is written to. It
+ * checks the data written, and if valid updates the reset timeout.
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_reset_timeout(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	int ret;
+	int reset_timeout;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	ret = kstrtoint(buf, 0, &reset_timeout);
+	if (ret || reset_timeout <= 0) {
+		dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n"
+				"Use format <reset_timeout_ms>\n");
+		return -EINVAL;
+	}
+
+	kbdev->reset_timeout_ms = reset_timeout;
+	dev_dbg(kbdev->dev, "Reset timeout: %dms\n", reset_timeout);
+
+	return count;
+}
+
+/**
+ * show_reset_timeout - Show callback for the reset_timeout sysfs entry.
+ * @dev:  The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf:  The output buffer to receive the GPU information.
+ *
+ * This function is called to get the current reset timeout.
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_reset_timeout(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+	ssize_t ret;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms);
+
+	return ret;
+}
+
+static DEVICE_ATTR(reset_timeout, S_IRUGO | S_IWUSR, show_reset_timeout,
+		set_reset_timeout);
+
+
+static ssize_t show_mem_pool_size(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *const kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
+		kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
+		kbase_mem_pool_debugfs_size);
+}
+
+static ssize_t set_mem_pool_size(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *const kbdev = to_kbase_device(dev);
+	int err;
+
+	if (!kbdev)
+		return -ENODEV;
+
+	err = kbase_debugfs_helper_set_attr_from_string(buf,
+		kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
+		kbase_mem_pool_debugfs_trim);
+
+	return err ? err : count;
+}
+
+static DEVICE_ATTR(mem_pool_size, S_IRUGO | S_IWUSR, show_mem_pool_size,
+		set_mem_pool_size);
+
+static ssize_t show_mem_pool_max_size(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *const kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
+		kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
+		kbase_mem_pool_debugfs_max_size);
+}
+
+static ssize_t set_mem_pool_max_size(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *const kbdev = to_kbase_device(dev);
+	int err;
+
+	if (!kbdev)
+		return -ENODEV;
+
+	err = kbase_debugfs_helper_set_attr_from_string(buf,
+		kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
+		kbase_mem_pool_debugfs_set_max_size);
+
+	return err ? err : count;
+}
+
+static DEVICE_ATTR(mem_pool_max_size, S_IRUGO | S_IWUSR, show_mem_pool_max_size,
+		set_mem_pool_max_size);
+
+/**
+ * show_lp_mem_pool_size - Show size of the large memory pages pool.
+ * @dev:  The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf:  The output buffer to receive the pool size.
+ *
+ * This function is called to get the number of large memory pages which currently populate the kbdev pool.
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_lp_mem_pool_size(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *const kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
+		kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
+		kbase_mem_pool_debugfs_size);
+}
+
+/**
+ * set_lp_mem_pool_size - Set size of the large memory pages pool.
+ * @dev:   The device this sysfs file is for.
+ * @attr:  The attributes of the sysfs file.
+ * @buf:   The value written to the sysfs file.
+ * @count: The number of bytes written to the sysfs file.
+ *
+ * This function is called to set the number of large memory pages which should populate the kbdev pool.
+ * This may cause existing pages to be removed from the pool, or new pages to be created and then added to the pool.
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_lp_mem_pool_size(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *const kbdev = to_kbase_device(dev);
+	int err;
+
+	if (!kbdev)
+		return -ENODEV;
+
+	err = kbase_debugfs_helper_set_attr_from_string(buf,
+		kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
+		kbase_mem_pool_debugfs_trim);
+
+	return err ? err : count;
+}
+
+static DEVICE_ATTR(lp_mem_pool_size, S_IRUGO | S_IWUSR, show_lp_mem_pool_size,
+		set_lp_mem_pool_size);
+
+/**
+ * show_lp_mem_pool_max_size - Show maximum size of the large memory pages pool.
+ * @dev:  The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf:  The output buffer to receive the pool size.
+ *
+ * This function is called to get the maximum number of large memory pages that the kbdev pool can possibly contain.
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_lp_mem_pool_max_size(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *const kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
+		kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
+		kbase_mem_pool_debugfs_max_size);
+}
+
+/**
+ * set_lp_mem_pool_max_size - Set maximum size of the large memory pages pool.
+ * @dev:   The device this sysfs file is for.
+ * @attr:  The attributes of the sysfs file.
+ * @buf:   The value written to the sysfs file.
+ * @count: The number of bytes written to the sysfs file.
+ *
+ * This function is called to set the maximum number of large memory pages that the kbdev pool can possibly contain.
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_lp_mem_pool_max_size(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *const kbdev = to_kbase_device(dev);
+	int err;
+
+	if (!kbdev)
+		return -ENODEV;
+
+	err = kbase_debugfs_helper_set_attr_from_string(buf,
+		kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
+		kbase_mem_pool_debugfs_set_max_size);
+
+	return err ? err : count;
+}
+
+static DEVICE_ATTR(lp_mem_pool_max_size, S_IRUGO | S_IWUSR, show_lp_mem_pool_max_size,
+		set_lp_mem_pool_max_size);
+
+/**
+ * show_js_ctx_scheduling_mode - Show callback for js_ctx_scheduling_mode sysfs
+ *                               entry.
+ * @dev:  The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf:  The output buffer to receive the context scheduling mode information.
+ *
+ * This function is called to get the context scheduling mode being used by JS.
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_js_ctx_scheduling_mode(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", kbdev->js_ctx_scheduling_mode);
+}
+
+/**
+ * set_js_ctx_scheduling_mode - Set callback for js_ctx_scheduling_mode sysfs
+ *                              entry.
+ * @dev:   The device this sysfs file is for.
+ * @attr:  The attributes of the sysfs file.
+ * @buf:   The value written to the sysfs file.
+ * @count: The number of bytes written to the sysfs file.
+ *
+ * This function is called when the js_ctx_scheduling_mode sysfs file is written
+ * to. It checks the data written, and if valid updates the ctx scheduling mode
+ * being by JS.
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_js_ctx_scheduling_mode(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_context *kctx;
+	u32 new_js_ctx_scheduling_mode;
+	struct kbase_device *kbdev;
+	unsigned long flags;
+	int ret;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	ret = kstrtouint(buf, 0, &new_js_ctx_scheduling_mode);
+	if (ret || new_js_ctx_scheduling_mode >= KBASE_JS_PRIORITY_MODE_COUNT) {
+		dev_err(kbdev->dev, "Couldn't process js_ctx_scheduling_mode"
+				" write operation.\n"
+				"Use format <js_ctx_scheduling_mode>\n");
+		return -EINVAL;
+	}
+
+	if (new_js_ctx_scheduling_mode == kbdev->js_ctx_scheduling_mode)
+		return count;
+
+	mutex_lock(&kbdev->kctx_list_lock);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	/* Update the context priority mode */
+	kbdev->js_ctx_scheduling_mode = new_js_ctx_scheduling_mode;
+
+	/* Adjust priority of all the contexts as per the new mode */
+	list_for_each_entry(kctx, &kbdev->kctx_list, kctx_list_link)
+		kbase_js_update_ctx_priority(kctx);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	mutex_unlock(&kbdev->kctx_list_lock);
+
+	dev_dbg(kbdev->dev, "JS ctx scheduling mode: %u\n", new_js_ctx_scheduling_mode);
+
+	return count;
+}
+
+static DEVICE_ATTR(js_ctx_scheduling_mode, S_IRUGO | S_IWUSR,
+		show_js_ctx_scheduling_mode,
+		set_js_ctx_scheduling_mode);
+
+#ifdef MALI_KBASE_BUILD
+#ifdef CONFIG_DEBUG_FS
+
+/* Number of entries in serialize_jobs_settings[] */
+#define NR_SERIALIZE_JOBS_SETTINGS 5
+/* Maximum string length in serialize_jobs_settings[].name */
+#define MAX_SERIALIZE_JOBS_NAME_LEN 16
+
+static struct
+{
+	char *name;
+	u8 setting;
+} serialize_jobs_settings[NR_SERIALIZE_JOBS_SETTINGS] = {
+	{"none", 0},
+	{"intra-slot", KBASE_SERIALIZE_INTRA_SLOT},
+	{"inter-slot", KBASE_SERIALIZE_INTER_SLOT},
+	{"full", KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT},
+	{"full-reset", KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT |
+			KBASE_SERIALIZE_RESET}
+};
+
+/**
+ * kbasep_serialize_jobs_seq_show - Show callback for the serialize_jobs debugfs
+ *                                  file
+ * @sfile: seq_file pointer
+ * @data:  Private callback data
+ *
+ * This function is called to get the contents of the serialize_jobs debugfs
+ * file. This is a list of the available settings with the currently active one
+ * surrounded by square brackets.
+ *
+ * Return: 0 on success, or an error code on error
+ */
+static int kbasep_serialize_jobs_seq_show(struct seq_file *sfile, void *data)
+{
+	struct kbase_device *kbdev = sfile->private;
+	int i;
+
+	CSTD_UNUSED(data);
+
+	for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
+		if (kbdev->serialize_jobs == serialize_jobs_settings[i].setting)
+			seq_printf(sfile, "[%s] ",
+					serialize_jobs_settings[i].name);
+		else
+			seq_printf(sfile, "%s ",
+					serialize_jobs_settings[i].name);
+	}
+
+	seq_puts(sfile, "\n");
+
+	return 0;
+}
+
+/**
+ * kbasep_serialize_jobs_debugfs_write - Store callback for the serialize_jobs
+ *                                       debugfs file.
+ * @file:  File pointer
+ * @ubuf:  User buffer containing data to store
+ * @count: Number of bytes in user buffer
+ * @ppos:  File position
+ *
+ * This function is called when the serialize_jobs debugfs file is written to.
+ * It matches the requested setting against the available settings and if a
+ * matching setting is found updates kbdev->serialize_jobs.
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t kbasep_serialize_jobs_debugfs_write(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct seq_file *s = file->private_data;
+	struct kbase_device *kbdev = s->private;
+	char buf[MAX_SERIALIZE_JOBS_NAME_LEN];
+	int i;
+	bool valid = false;
+
+	CSTD_UNUSED(ppos);
+
+	count = min_t(size_t, sizeof(buf) - 1, count);
+	if (copy_from_user(buf, ubuf, count))
+		return -EFAULT;
+
+	buf[count] = 0;
+
+	for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
+		if (sysfs_streq(serialize_jobs_settings[i].name, buf)) {
+			kbdev->serialize_jobs =
+					serialize_jobs_settings[i].setting;
+			valid = true;
+			break;
+		}
+	}
+
+	if (!valid) {
+		dev_err(kbdev->dev, "serialize_jobs: invalid setting\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+/**
+ * kbasep_serialize_jobs_debugfs_open - Open callback for the serialize_jobs
+ *                                     debugfs file
+ * @in:   inode pointer
+ * @file: file pointer
+ *
+ * Return: Zero on success, error code on failure
+ */
+static int kbasep_serialize_jobs_debugfs_open(struct inode *in,
+		struct file *file)
+{
+	return single_open(file, kbasep_serialize_jobs_seq_show, in->i_private);
+}
+
+static const struct file_operations kbasep_serialize_jobs_debugfs_fops = {
+	.owner = THIS_MODULE,
+	.open = kbasep_serialize_jobs_debugfs_open,
+	.read = seq_read,
+	.write = kbasep_serialize_jobs_debugfs_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+#endif /* CONFIG_DEBUG_FS */
+#endif /* MALI_KBASE_BUILD */
+
+static void kbasep_protected_mode_hwcnt_disable_worker(struct work_struct *data)
+{
+	struct kbase_device *kbdev = container_of(data, struct kbase_device,
+		protected_mode_hwcnt_disable_work);
+	unsigned long flags;
+
+	bool do_disable;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	do_disable = !kbdev->protected_mode_hwcnt_desired &&
+		!kbdev->protected_mode_hwcnt_disabled;
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	if (!do_disable)
+		return;
+
+	kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	do_disable = !kbdev->protected_mode_hwcnt_desired &&
+		!kbdev->protected_mode_hwcnt_disabled;
+
+	if (do_disable) {
+		/* Protected mode state did not change while we were doing the
+		 * disable, so commit the work we just performed and continue
+		 * the state machine.
+		 */
+		kbdev->protected_mode_hwcnt_disabled = true;
+		kbase_backend_slot_update(kbdev);
+	} else {
+		/* Protected mode state was updated while we were doing the
+		 * disable, so we need to undo the disable we just performed.
+		 */
+		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+static int kbasep_protected_mode_init(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_OF
+	struct device_node *protected_node;
+	struct platform_device *pdev;
+	struct protected_mode_device *protected_dev;
+#endif
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
+		/* Use native protected ops */
+		kbdev->protected_dev = kzalloc(sizeof(*kbdev->protected_dev),
+				GFP_KERNEL);
+		if (!kbdev->protected_dev)
+			return -ENOMEM;
+		kbdev->protected_dev->data = kbdev;
+		kbdev->protected_ops = &kbase_native_protected_ops;
+		kbdev->protected_mode_support = true;
+		INIT_WORK(&kbdev->protected_mode_hwcnt_disable_work,
+			kbasep_protected_mode_hwcnt_disable_worker);
+		kbdev->protected_mode_hwcnt_desired = true;
+		kbdev->protected_mode_hwcnt_disabled = false;
+		return 0;
+	}
+
+	kbdev->protected_mode_support = false;
+
+#ifdef CONFIG_OF
+	protected_node = of_parse_phandle(kbdev->dev->of_node,
+			"protected-mode-switcher", 0);
+
+	if (!protected_node)
+		protected_node = of_parse_phandle(kbdev->dev->of_node,
+				"secure-mode-switcher", 0);
+
+	if (!protected_node) {
+		/* If protected_node cannot be looked up then we assume
+		 * protected mode is not supported on this platform. */
+		dev_info(kbdev->dev, "Protected mode not available\n");
+		return 0;
+	}
+
+	pdev = of_find_device_by_node(protected_node);
+	if (!pdev)
+		return -EINVAL;
+
+	protected_dev = platform_get_drvdata(pdev);
+	if (!protected_dev)
+		return -EPROBE_DEFER;
+
+	kbdev->protected_ops = &protected_dev->ops;
+	kbdev->protected_dev = protected_dev;
+
+	if (kbdev->protected_ops) {
+		int err;
+
+		/* Make sure protected mode is disabled on startup */
+		mutex_lock(&kbdev->pm.lock);
+		err = kbdev->protected_ops->protected_mode_disable(
+				kbdev->protected_dev);
+		mutex_unlock(&kbdev->pm.lock);
+
+		/* protected_mode_disable() returns -EINVAL if not supported */
+		kbdev->protected_mode_support = (err != -EINVAL);
+	}
+#endif
+	return 0;
+}
+
+static void kbasep_protected_mode_term(struct kbase_device *kbdev)
+{
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
+		cancel_work_sync(&kbdev->protected_mode_hwcnt_disable_work);
+		kfree(kbdev->protected_dev);
+	}
+}
+
+#ifdef CONFIG_MALI_NO_MALI
+static int kbase_common_reg_map(struct kbase_device *kbdev)
+{
+	return 0;
+}
+static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
+{
+}
+#else /* CONFIG_MALI_NO_MALI */
+static int kbase_common_reg_map(struct kbase_device *kbdev)
+{
+	int err = 0;
+
+	if (!request_mem_region(kbdev->reg_start, kbdev->reg_size, dev_name(kbdev->dev))) {
+		dev_err(kbdev->dev, "Register window unavailable\n");
+		err = -EIO;
+		goto out_region;
+	}
+
+	kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size);
+	if (!kbdev->reg) {
+		dev_err(kbdev->dev, "Can't remap register window\n");
+		err = -EINVAL;
+		goto out_ioremap;
+	}
+
+	return err;
+
+ out_ioremap:
+	release_mem_region(kbdev->reg_start, kbdev->reg_size);
+ out_region:
+	return err;
+}
+
+static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
+{
+	if (kbdev->reg) {
+		iounmap(kbdev->reg);
+		release_mem_region(kbdev->reg_start, kbdev->reg_size);
+		kbdev->reg = NULL;
+		kbdev->reg_start = 0;
+		kbdev->reg_size = 0;
+	}
+}
+#endif /* CONFIG_MALI_NO_MALI */
+
+static int registers_map(struct kbase_device * const kbdev)
+{
+
+		/* the first memory resource is the physical address of the GPU
+		 * registers */
+		struct platform_device *pdev = to_platform_device(kbdev->dev);
+		struct resource *reg_res;
+		int err;
+
+		reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (!reg_res) {
+			dev_err(kbdev->dev, "Invalid register resource\n");
+			return -ENOENT;
+		}
+
+		kbdev->reg_start = reg_res->start;
+		kbdev->reg_size = resource_size(reg_res);
+
+
+		err = kbase_common_reg_map(kbdev);
+		if (err) {
+			dev_err(kbdev->dev, "Failed to map registers\n");
+			return err;
+		}
+
+	return 0;
+}
+
+static void registers_unmap(struct kbase_device *kbdev)
+{
+	kbase_common_reg_unmap(kbdev);
+}
+
+static int power_control_init(struct platform_device *pdev)
+{
+#if KERNEL_VERSION(3, 18, 0) > LINUX_VERSION_CODE || !defined(CONFIG_OF)
+	/* Power control initialization requires at least the capability to get
+	 * regulators and clocks from the device tree, as well as parsing
+	 * arrays of unsigned integer values.
+	 *
+	 * The whole initialization process shall simply be skipped if the
+	 * minimum capability is not available.
+	 */
+	return 0;
+#else
+	struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
+	int err = 0;
+	unsigned int i;
+#if defined(CONFIG_REGULATOR)
+	static const char *regulator_names[] = {
+		"mali", "shadercores"
+	};
+	BUILD_BUG_ON(ARRAY_SIZE(regulator_names) < BASE_MAX_NR_CLOCKS_REGULATORS);
+#endif /* CONFIG_REGULATOR */
+
+	if (!kbdev)
+		return -ENODEV;
+
+#if defined(CONFIG_REGULATOR)
+	/* Since the error code EPROBE_DEFER causes the entire probing
+	 * procedure to be restarted from scratch at a later time,
+	 * all regulators will be released before returning.
+	 *
+	 * Any other error is ignored and the driver will continue
+	 * operating with a partial initialization of regulators.
+	 */
+	for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
+		kbdev->regulators[i] = regulator_get_optional(kbdev->dev,
+			regulator_names[i]);
+		if (IS_ERR_OR_NULL(kbdev->regulators[i])) {
+			err = PTR_ERR(kbdev->regulators[i]);
+			kbdev->regulators[i] = NULL;
+			break;
+		}
+	}
+	if (err == -EPROBE_DEFER) {
+		while ((i > 0) && (i < BASE_MAX_NR_CLOCKS_REGULATORS))
+			regulator_put(kbdev->regulators[--i]);
+		return err;
+	}
+
+	kbdev->nr_regulators = i;
+	dev_dbg(&pdev->dev, "Regulators probed: %u\n", kbdev->nr_regulators);
+#endif
+
+	/* Having more clocks than regulators is acceptable, while the
+	 * opposite shall not happen.
+	 *
+	 * Since the error code EPROBE_DEFER causes the entire probing
+	 * procedure to be restarted from scratch at a later time,
+	 * all clocks and regulators will be released before returning.
+	 *
+	 * Any other error is ignored and the driver will continue
+	 * operating with a partial initialization of clocks.
+	 */
+	for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
+		kbdev->clocks[i] = of_clk_get(kbdev->dev->of_node, i);
+		if (IS_ERR_OR_NULL(kbdev->clocks[i])) {
+			err = PTR_ERR(kbdev->clocks[i]);
+			kbdev->clocks[i] = NULL;
+			break;
+		}
+
+		err = clk_prepare_enable(kbdev->clocks[i]);
+		if (err) {
+			dev_err(kbdev->dev,
+				"Failed to prepare and enable clock (%d)\n",
+				err);
+			clk_put(kbdev->clocks[i]);
+			break;
+		}
+	}
+	if (err == -EPROBE_DEFER) {
+		while ((i > 0) && (i < BASE_MAX_NR_CLOCKS_REGULATORS)) {
+			clk_disable_unprepare(kbdev->clocks[--i]);
+			clk_put(kbdev->clocks[i]);
+		}
+		goto clocks_probe_defer;
+	}
+
+	kbdev->nr_clocks = i;
+	dev_dbg(&pdev->dev, "Clocks probed: %u\n", kbdev->nr_clocks);
+
+	/* Any error in parsing the OPP table from the device file
+	 * shall be ignored. The fact that the table may be absent or wrong
+	 * on the device tree of the platform shouldn't prevent the driver
+	 * from completing its initialization.
+	 */
+#if (KERNEL_VERSION(4, 4, 0) > LINUX_VERSION_CODE && \
+	!defined(LSK_OPPV2_BACKPORT))
+	err = of_init_opp_table(kbdev->dev);
+	CSTD_UNUSED(err);
+#else
+
+#if defined(CONFIG_PM_OPP)
+#if ((KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE) && \
+	defined(CONFIG_REGULATOR))
+	if (kbdev->nr_regulators > 0) {
+		kbdev->opp_table = dev_pm_opp_set_regulators(kbdev->dev,
+			regulator_names, BASE_MAX_NR_CLOCKS_REGULATORS);
+	}
+#endif /* (KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE */
+	err = dev_pm_opp_of_add_table(kbdev->dev);
+	CSTD_UNUSED(err);
+#endif /* CONFIG_PM_OPP */
+
+#endif /* KERNEL_VERSION(4, 4, 0) > LINUX_VERSION_CODE */
+	return 0;
+
+clocks_probe_defer:
+#if defined(CONFIG_REGULATOR)
+	for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++)
+		regulator_put(kbdev->regulators[i]);
+#endif
+	return err;
+#endif /* KERNEL_VERSION(3, 18, 0) > LINUX_VERSION_CODE */
+}
+
+static void power_control_term(struct kbase_device *kbdev)
+{
+	unsigned int i;
+
+#if (KERNEL_VERSION(4, 4, 0) > LINUX_VERSION_CODE && \
+	!defined(LSK_OPPV2_BACKPORT))
+#if KERNEL_VERSION(3, 19, 0) <= LINUX_VERSION_CODE
+	of_free_opp_table(kbdev->dev);
+#endif
+#else
+
+#if defined(CONFIG_PM_OPP)
+	dev_pm_opp_of_remove_table(kbdev->dev);
+#if ((KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE) && \
+	defined(CONFIG_REGULATOR))
+	if (!IS_ERR_OR_NULL(kbdev->opp_table))
+		dev_pm_opp_put_regulators(kbdev->opp_table);
+#endif /* (KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE */
+#endif /* CONFIG_PM_OPP */
+
+#endif /* KERNEL_VERSION(4, 4, 0) > LINUX_VERSION_CODE */
+
+	for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
+		if (kbdev->clocks[i]) {
+			if (__clk_is_enabled(kbdev->clocks[i]))
+				clk_disable_unprepare(kbdev->clocks[i]);
+			clk_put(kbdev->clocks[i]);
+			kbdev->clocks[i] = NULL;
+		} else
+			break;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
+			&& defined(CONFIG_REGULATOR)
+	for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
+		if (kbdev->regulators[i]) {
+			regulator_put(kbdev->regulators[i]);
+			kbdev->regulators[i] = NULL;
+		}
+	}
+#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
+}
+
+#ifdef MALI_KBASE_BUILD
+#ifdef CONFIG_DEBUG_FS
+
+static void trigger_quirks_reload(struct kbase_device *kbdev)
+{
+	kbase_pm_context_active(kbdev);
+	if (kbase_prepare_to_reset_gpu(kbdev))
+		kbase_reset_gpu(kbdev);
+	kbase_pm_context_idle(kbdev);
+}
+
+#define MAKE_QUIRK_ACCESSORS(type) \
+static int type##_quirks_set(void *data, u64 val) \
+{ \
+	struct kbase_device *kbdev; \
+	kbdev = (struct kbase_device *)data; \
+	kbdev->hw_quirks_##type = (u32)val; \
+	trigger_quirks_reload(kbdev); \
+	return 0;\
+} \
+\
+static int type##_quirks_get(void *data, u64 *val) \
+{ \
+	struct kbase_device *kbdev;\
+	kbdev = (struct kbase_device *)data;\
+	*val = kbdev->hw_quirks_##type;\
+	return 0;\
+} \
+DEFINE_SIMPLE_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get,\
+		type##_quirks_set, "%llu\n")
+
+MAKE_QUIRK_ACCESSORS(sc);
+MAKE_QUIRK_ACCESSORS(tiler);
+MAKE_QUIRK_ACCESSORS(mmu);
+MAKE_QUIRK_ACCESSORS(jm);
+
+
+/**
+ * debugfs_protected_debug_mode_read - "protected_debug_mode" debugfs read
+ * @file: File object to read is for
+ * @buf:  User buffer to populate with data
+ * @len:  Length of user buffer
+ * @ppos: Offset within file object
+ *
+ * Retrieves the current status of protected debug mode
+ * (0 = disabled, 1 = enabled)
+ *
+ * Return: Number of bytes added to user buffer
+ */
+static ssize_t debugfs_protected_debug_mode_read(struct file *file,
+				char __user *buf, size_t len, loff_t *ppos)
+{
+	struct kbase_device *kbdev = (struct kbase_device *)file->private_data;
+	u32 gpu_status;
+	ssize_t ret_val;
+
+	kbase_pm_context_active(kbdev);
+	gpu_status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS));
+	kbase_pm_context_idle(kbdev);
+
+	if (gpu_status & GPU_DBGEN)
+		ret_val = simple_read_from_buffer(buf, len, ppos, "1\n", 2);
+	else
+		ret_val = simple_read_from_buffer(buf, len, ppos, "0\n", 2);
+
+	return ret_val;
+}
+
+/*
+ * struct fops_protected_debug_mode - "protected_debug_mode" debugfs fops
+ *
+ * Contains the file operations for the "protected_debug_mode" debugfs file
+ */
+static const struct file_operations fops_protected_debug_mode = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = debugfs_protected_debug_mode_read,
+	.llseek = default_llseek,
+};
+
+static int kbase_device_debugfs_mem_pool_max_size_show(struct seq_file *sfile,
+	void *data)
+{
+	CSTD_UNUSED(data);
+	return kbase_debugfs_helper_seq_read(sfile,
+		MEMORY_GROUP_MANAGER_NR_GROUPS,
+		kbase_mem_pool_config_debugfs_max_size);
+}
+
+static ssize_t kbase_device_debugfs_mem_pool_max_size_write(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int err = 0;
+
+	CSTD_UNUSED(ppos);
+	err = kbase_debugfs_helper_seq_write(file, ubuf, count,
+		MEMORY_GROUP_MANAGER_NR_GROUPS,
+		kbase_mem_pool_config_debugfs_set_max_size);
+
+	return err ? err : count;
+}
+
+static int kbase_device_debugfs_mem_pool_max_size_open(struct inode *in,
+	struct file *file)
+{
+	return single_open(file, kbase_device_debugfs_mem_pool_max_size_show,
+		in->i_private);
+}
+
+static const struct file_operations
+	kbase_device_debugfs_mem_pool_max_size_fops = {
+	.owner = THIS_MODULE,
+	.open = kbase_device_debugfs_mem_pool_max_size_open,
+	.read = seq_read,
+	.write = kbase_device_debugfs_mem_pool_max_size_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int kbase_device_debugfs_init(struct kbase_device *kbdev)
+{
+	struct dentry *debugfs_ctx_defaults_directory;
+	int err;
+
+	kbdev->mali_debugfs_directory = debugfs_create_dir(kbdev->devname,
+			NULL);
+	if (!kbdev->mali_debugfs_directory) {
+		dev_err(kbdev->dev, "Couldn't create mali debugfs directory\n");
+		err = -ENOMEM;
+		goto out;
+	}
+
+	kbdev->debugfs_ctx_directory = debugfs_create_dir("ctx",
+			kbdev->mali_debugfs_directory);
+	if (!kbdev->debugfs_ctx_directory) {
+		dev_err(kbdev->dev, "Couldn't create mali debugfs ctx directory\n");
+		err = -ENOMEM;
+		goto out;
+	}
+
+	debugfs_ctx_defaults_directory = debugfs_create_dir("defaults",
+			kbdev->debugfs_ctx_directory);
+	if (!debugfs_ctx_defaults_directory) {
+		dev_err(kbdev->dev, "Couldn't create mali debugfs ctx defaults directory\n");
+		err = -ENOMEM;
+		goto out;
+	}
+
+#if !MALI_CUSTOMER_RELEASE
+	kbasep_regs_dump_debugfs_init(kbdev);
+#endif /* !MALI_CUSTOMER_RELEASE */
+	kbasep_regs_history_debugfs_init(kbdev);
+
+	kbase_debug_job_fault_debugfs_init(kbdev);
+	kbasep_gpu_memory_debugfs_init(kbdev);
+	kbase_as_fault_debugfs_init(kbdev);
+	/* fops_* variables created by invocations of macro
+	 * MAKE_QUIRK_ACCESSORS() above. */
+	debugfs_create_file("quirks_sc", 0644,
+			kbdev->mali_debugfs_directory, kbdev,
+			&fops_sc_quirks);
+	debugfs_create_file("quirks_tiler", 0644,
+			kbdev->mali_debugfs_directory, kbdev,
+			&fops_tiler_quirks);
+	debugfs_create_file("quirks_mmu", 0644,
+			kbdev->mali_debugfs_directory, kbdev,
+			&fops_mmu_quirks);
+	debugfs_create_file("quirks_jm", 0644,
+			kbdev->mali_debugfs_directory, kbdev,
+			&fops_jm_quirks);
+
+	debugfs_create_bool("infinite_cache", 0644,
+			debugfs_ctx_defaults_directory,
+			&kbdev->infinite_cache_active_default);
+
+	debugfs_create_file("mem_pool_max_size", 0644,
+			debugfs_ctx_defaults_directory,
+			&kbdev->mem_pool_defaults.small,
+			&kbase_device_debugfs_mem_pool_max_size_fops);
+
+	debugfs_create_file("lp_mem_pool_max_size", 0644,
+			debugfs_ctx_defaults_directory,
+			&kbdev->mem_pool_defaults.large,
+			&kbase_device_debugfs_mem_pool_max_size_fops);
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
+		debugfs_create_file("protected_debug_mode", S_IRUGO,
+				kbdev->mali_debugfs_directory, kbdev,
+				&fops_protected_debug_mode);
+	}
+
+#if KBASE_TRACE_ENABLE
+	kbasep_trace_debugfs_init(kbdev);
+#endif /* KBASE_TRACE_ENABLE */
+
+#ifdef CONFIG_MALI_DEVFREQ
+#ifdef CONFIG_DEVFREQ_THERMAL
+	if (kbdev->inited_subsys & inited_devfreq)
+		kbase_ipa_debugfs_init(kbdev);
+#endif /* CONFIG_DEVFREQ_THERMAL */
+#endif /* CONFIG_MALI_DEVFREQ */
+
+	debugfs_create_file("serialize_jobs", S_IRUGO | S_IWUSR,
+			kbdev->mali_debugfs_directory, kbdev,
+			&kbasep_serialize_jobs_debugfs_fops);
+
+
+	return 0;
+
+out:
+	debugfs_remove_recursive(kbdev->mali_debugfs_directory);
+	return err;
+}
+
+static void kbase_device_debugfs_term(struct kbase_device *kbdev)
+{
+	debugfs_remove_recursive(kbdev->mali_debugfs_directory);
+}
+
+#else /* CONFIG_DEBUG_FS */
+static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
+{
+	return 0;
+}
+
+static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
+#endif /* CONFIG_DEBUG_FS */
+#endif /* MALI_KBASE_BUILD */
+
+static void kbase_device_coherency_init(struct kbase_device *kbdev,
+		unsigned prod_id)
+{
+#ifdef CONFIG_OF
+	u32 supported_coherency_bitmap =
+		kbdev->gpu_props.props.raw_props.coherency_mode;
+	const void *coherency_override_dts;
+	u32 override_coherency;
+
+	/* Only for tMIx :
+	 * (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly
+	 * documented for tMIx so force correct value here.
+	 */
+	if (GPU_ID2_MODEL_MATCH_VALUE(prod_id) ==
+			GPU_ID2_PRODUCT_TMIX)
+		if (supported_coherency_bitmap ==
+				COHERENCY_FEATURE_BIT(COHERENCY_ACE))
+			supported_coherency_bitmap |=
+				COHERENCY_FEATURE_BIT(COHERENCY_ACE_LITE);
+
+#endif /* CONFIG_OF */
+
+	kbdev->system_coherency = COHERENCY_NONE;
+
+	/* device tree may override the coherency */
+#ifdef CONFIG_OF
+	coherency_override_dts = of_get_property(kbdev->dev->of_node,
+						"system-coherency",
+						NULL);
+	if (coherency_override_dts) {
+
+		override_coherency = be32_to_cpup(coherency_override_dts);
+
+		if ((override_coherency <= COHERENCY_NONE) &&
+			(supported_coherency_bitmap &
+			 COHERENCY_FEATURE_BIT(override_coherency))) {
+
+			kbdev->system_coherency = override_coherency;
+
+			dev_info(kbdev->dev,
+				"Using coherency mode %u set from dtb",
+				override_coherency);
+		} else
+			dev_warn(kbdev->dev,
+				"Ignoring unsupported coherency mode %u set from dtb",
+				override_coherency);
+	}
+
+#endif /* CONFIG_OF */
+
+	kbdev->gpu_props.props.raw_props.coherency_mode =
+		kbdev->system_coherency;
+}
+
+#ifdef CONFIG_MALI_BUSLOG
+
+/* Callback used by the kbase bus logger client, to initiate a GPU reset
+ * when the bus log is restarted.  GPU reset is used as reference point
+ * in HW bus log analyses.
+ */
+static void kbase_logging_started_cb(void *data)
+{
+	struct kbase_device *kbdev = (struct kbase_device *)data;
+
+	if (kbase_prepare_to_reset_gpu(kbdev))
+		kbase_reset_gpu(kbdev);
+	dev_info(kbdev->dev, "KBASE - Bus logger restarted\n");
+}
+#endif
+
+static struct attribute *kbase_attrs[] = {
+#ifdef CONFIG_MALI_DEBUG
+	&dev_attr_debug_command.attr,
+	&dev_attr_js_softstop_always.attr,
+#endif
+	&dev_attr_js_timeouts.attr,
+	&dev_attr_soft_job_timeout.attr,
+	&dev_attr_gpuinfo.attr,
+	&dev_attr_dvfs_period.attr,
+	&dev_attr_pm_poweroff.attr,
+	&dev_attr_reset_timeout.attr,
+	&dev_attr_js_scheduling_period.attr,
+	&dev_attr_power_policy.attr,
+	&dev_attr_core_mask.attr,
+	&dev_attr_mem_pool_size.attr,
+	&dev_attr_mem_pool_max_size.attr,
+	&dev_attr_lp_mem_pool_size.attr,
+	&dev_attr_lp_mem_pool_max_size.attr,
+	&dev_attr_js_ctx_scheduling_mode.attr,
+	NULL
+};
+
+static const struct attribute_group kbase_attr_group = {
+	.attrs = kbase_attrs,
+};
+
+static int kbase_platform_device_remove(struct platform_device *pdev)
+{
+	struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
+	const struct list_head *dev_list;
+
+	if (!kbdev)
+		return -ENODEV;
+
+	kfree(kbdev->gpu_props.prop_buffer);
+
+#ifdef CONFIG_MALI_BUSLOG
+	if (kbdev->inited_subsys & inited_buslogger) {
+		bl_core_client_unregister(kbdev->buslogger);
+		kbdev->inited_subsys &= ~inited_buslogger;
+	}
+#endif
+
+	if (kbdev->inited_subsys & inited_dev_list) {
+		dev_list = kbase_dev_list_get();
+		list_del(&kbdev->entry);
+		kbase_dev_list_put(dev_list);
+		kbdev->inited_subsys &= ~inited_dev_list;
+	}
+
+	if (kbdev->inited_subsys & inited_misc_register) {
+		misc_deregister(&kbdev->mdev);
+		kbdev->inited_subsys &= ~inited_misc_register;
+	}
+
+	if (kbdev->inited_subsys & inited_sysfs_group) {
+		sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
+		kbdev->inited_subsys &= ~inited_sysfs_group;
+	}
+
+	if (kbdev->inited_subsys & inited_get_device) {
+		put_device(kbdev->dev);
+		kbdev->inited_subsys &= ~inited_get_device;
+	}
+
+#ifdef MALI_KBASE_BUILD
+	if (kbdev->inited_subsys & inited_debugfs) {
+		kbase_device_debugfs_term(kbdev);
+		kbdev->inited_subsys &= ~inited_debugfs;
+	}
+#endif
+
+	if (kbdev->inited_subsys & inited_job_fault) {
+		kbase_debug_job_fault_dev_term(kbdev);
+		kbdev->inited_subsys &= ~inited_job_fault;
+	}
+
+
+	if (kbdev->inited_subsys & inited_backend_late) {
+		kbase_backend_late_term(kbdev);
+		kbdev->inited_subsys &= ~inited_backend_late;
+	}
+
+	if (kbdev->inited_subsys & inited_vinstr) {
+		kbase_vinstr_term(kbdev->vinstr_ctx);
+		kbdev->inited_subsys &= ~inited_vinstr;
+	}
+
+	if (kbdev->inited_subsys & inited_hwcnt_gpu_virt) {
+		kbase_hwcnt_virtualizer_term(kbdev->hwcnt_gpu_virt);
+		kbdev->inited_subsys &= ~inited_hwcnt_gpu_virt;
+	}
+
+	if (kbdev->inited_subsys & inited_hwcnt_gpu_ctx) {
+		kbase_hwcnt_context_term(kbdev->hwcnt_gpu_ctx);
+		kbdev->inited_subsys &= ~inited_hwcnt_gpu_ctx;
+	}
+
+	if (kbdev->inited_subsys & inited_hwcnt_gpu_iface) {
+		kbase_hwcnt_backend_gpu_destroy(&kbdev->hwcnt_gpu_iface);
+		kbdev->inited_subsys &= ~inited_hwcnt_gpu_iface;
+	}
+
+	if (kbdev->inited_subsys & inited_tlstream) {
+		kbase_timeline_term(kbdev->timeline);
+		kbdev->inited_subsys &= ~inited_tlstream;
+	}
+
+	/* Bring job and mem sys to a halt before we continue termination */
+
+	if (kbdev->inited_subsys & inited_js)
+		kbasep_js_devdata_halt(kbdev);
+
+	if (kbdev->inited_subsys & inited_mem)
+		kbase_mem_halt(kbdev);
+
+	if (kbdev->inited_subsys & inited_protected) {
+		kbasep_protected_mode_term(kbdev);
+		kbdev->inited_subsys &= ~inited_protected;
+	}
+
+	if (kbdev->inited_subsys & inited_js) {
+		kbasep_js_devdata_term(kbdev);
+		kbdev->inited_subsys &= ~inited_js;
+	}
+
+	if (kbdev->inited_subsys & inited_mem) {
+		kbase_mem_term(kbdev);
+		kbdev->inited_subsys &= ~inited_mem;
+	}
+
+	if (kbdev->inited_subsys & inited_ctx_sched) {
+		kbase_ctx_sched_term(kbdev);
+		kbdev->inited_subsys &= ~inited_ctx_sched;
+	}
+
+	if (kbdev->inited_subsys & inited_device) {
+		kbase_device_term(kbdev);
+		kbdev->inited_subsys &= ~inited_device;
+	}
+
+	if (kbdev->inited_subsys & inited_backend_early) {
+		kbase_backend_early_term(kbdev);
+		kbdev->inited_subsys &= ~inited_backend_early;
+	}
+
+	if (kbdev->inited_subsys & inited_io_history) {
+		kbase_io_history_term(&kbdev->io_history);
+		kbdev->inited_subsys &= ~inited_io_history;
+	}
+
+	if (kbdev->inited_subsys & inited_power_control) {
+		power_control_term(kbdev);
+		kbdev->inited_subsys &= ~inited_power_control;
+	}
+
+	if (kbdev->inited_subsys & inited_registers_map) {
+		registers_unmap(kbdev);
+		kbdev->inited_subsys &= ~inited_registers_map;
+	}
+
+#ifdef CONFIG_MALI_NO_MALI
+	if (kbdev->inited_subsys & inited_gpu_device) {
+		gpu_device_destroy(kbdev);
+		kbdev->inited_subsys &= ~inited_gpu_device;
+	}
+#endif /* CONFIG_MALI_NO_MALI */
+
+	if (kbdev->inited_subsys != 0)
+		dev_err(kbdev->dev, "Missing sub system termination\n");
+
+	kbase_device_free(kbdev);
+
+	return 0;
+}
+
+void kbase_backend_devfreq_term(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_MALI_DEVFREQ
+	if (kbdev->inited_subsys & inited_devfreq) {
+		kbase_devfreq_term(kbdev);
+		kbdev->inited_subsys &= ~inited_devfreq;
+	}
+#endif
+}
+
+int kbase_backend_devfreq_init(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_MALI_DEVFREQ
+	/* Devfreq uses hardware counters, so must be initialized after it. */
+	int err = kbase_devfreq_init(kbdev);
+
+	if (!err)
+		kbdev->inited_subsys |= inited_devfreq;
+	else
+		dev_err(kbdev->dev, "Continuing without devfreq\n");
+#endif /* CONFIG_MALI_DEVFREQ */
+	return 0;
+}
+
+/* Number of register accesses for the buffer that we allocate during
+ * initialization time. The buffer size can be changed later via debugfs. */
+#define KBASEP_DEFAULT_REGISTER_HISTORY_SIZE ((u16)512)
+
+static int kbase_platform_device_probe(struct platform_device *pdev)
+{
+	struct kbase_device *kbdev;
+	struct mali_base_gpu_core_props *core_props;
+	u32 gpu_id;
+	unsigned prod_id;
+	const struct list_head *dev_list;
+	int err = 0;
+
+	kbdev = kbase_device_alloc();
+	if (!kbdev) {
+		dev_err(&pdev->dev, "Allocate device failed\n");
+		kbase_platform_device_remove(pdev);
+		return -ENOMEM;
+	}
+
+	kbdev->dev = &pdev->dev;
+	dev_set_drvdata(kbdev->dev, kbdev);
+
+#ifdef CONFIG_MALI_NO_MALI
+	err = gpu_device_create(kbdev);
+	if (err) {
+		dev_err(&pdev->dev, "Dummy model initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_gpu_device;
+#endif /* CONFIG_MALI_NO_MALI */
+
+	err = assign_irqs(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "IRQ search failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+
+	err = registers_map(kbdev);
+	if (err) {
+		dev_err(&pdev->dev, "Register map failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_registers_map;
+
+	err = power_control_init(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "Power control initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_power_control;
+
+	err = kbase_io_history_init(&kbdev->io_history,
+			KBASEP_DEFAULT_REGISTER_HISTORY_SIZE);
+	if (err) {
+		dev_err(&pdev->dev, "Register access history initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return -ENOMEM;
+	}
+	kbdev->inited_subsys |= inited_io_history;
+
+	err = kbase_backend_early_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "Early backend initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_backend_early;
+
+	scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name,
+			kbase_dev_nr);
+	kbdev->id = kbase_dev_nr;
+
+	kbase_disjoint_init(kbdev);
+
+	/* obtain max configured gpu frequency, if devfreq is enabled then
+	 * this will be overridden by the highest operating point found
+	 */
+	core_props = &(kbdev->gpu_props.props.core_props);
+#ifdef GPU_FREQ_KHZ_MAX
+	core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
+#else
+	core_props->gpu_freq_khz_max = DEFAULT_GPU_FREQ_KHZ_MAX;
+#endif
+
+	err = kbase_device_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "Device initialization failed (%d)\n", err);
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_device;
+
+	err = kbase_ctx_sched_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "Context scheduler initialization failed (%d)\n",
+				err);
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_ctx_sched;
+
+	err = kbase_mem_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "Memory subsystem initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_mem;
+
+	gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+	gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
+	prod_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+
+	kbase_device_coherency_init(kbdev, prod_id);
+
+	err = kbasep_protected_mode_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "Protected mode subsystem initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_protected;
+
+	dev_list = kbase_dev_list_get();
+	list_add(&kbdev->entry, &kbase_dev_list);
+	kbase_dev_list_put(dev_list);
+	kbdev->inited_subsys |= inited_dev_list;
+
+	err = kbasep_js_devdata_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "Job JS devdata initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_js;
+
+	atomic_set(&kbdev->timeline_is_enabled, 0);
+	err = kbase_timeline_init(&kbdev->timeline, &kbdev->timeline_is_enabled);
+	if (err) {
+		dev_err(kbdev->dev, "Timeline stream initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_tlstream;
+
+	err = kbase_hwcnt_backend_gpu_create(kbdev, &kbdev->hwcnt_gpu_iface);
+	if (err) {
+		dev_err(kbdev->dev, "GPU hwcnt backend creation failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_hwcnt_gpu_iface;
+
+	err = kbase_hwcnt_context_init(&kbdev->hwcnt_gpu_iface,
+		&kbdev->hwcnt_gpu_ctx);
+	if (err) {
+		dev_err(kbdev->dev,
+			"GPU hwcnt context initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_hwcnt_gpu_ctx;
+
+	err = kbase_hwcnt_virtualizer_init(
+		kbdev->hwcnt_gpu_ctx,
+		KBASE_HWCNT_GPU_VIRTUALIZER_DUMP_THRESHOLD_NS,
+		&kbdev->hwcnt_gpu_virt);
+	if (err) {
+		dev_err(kbdev->dev,
+			"GPU hwcnt virtualizer initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_hwcnt_gpu_virt;
+
+	err = kbase_vinstr_init(kbdev->hwcnt_gpu_virt, &kbdev->vinstr_ctx);
+	if (err) {
+		dev_err(kbdev->dev,
+			"Virtual instrumentation initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return -EINVAL;
+	}
+	kbdev->inited_subsys |= inited_vinstr;
+
+	/* The initialization of the devfreq is now embedded inside the
+	 * kbase_backend_late_init(), calling the kbase_backend_devfreq_init()
+	 * before the first trigger of pm_context_idle(). */
+	err = kbase_backend_late_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "Late backend initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_backend_late;
+
+
+#ifdef MALI_KBASE_BUILD
+	err = kbase_debug_job_fault_dev_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "Job fault debug initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_job_fault;
+
+	err = kbase_device_debugfs_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "DebugFS initialization failed");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_debugfs;
+
+	kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
+	kbdev->mdev.name = kbdev->devname;
+	kbdev->mdev.fops = &kbase_fops;
+	kbdev->mdev.parent = get_device(kbdev->dev);
+	kbdev->mdev.mode = 0666;
+	kbdev->inited_subsys |= inited_get_device;
+
+	/* This needs to happen before registering the device with misc_register(),
+	 * otherwise it causes a race condition between registering the device and a
+	 * uevent event being generated for userspace, causing udev rules to run
+	 * which might expect certain sysfs attributes present. As a result of the
+	 * race condition we avoid, some Mali sysfs entries may have appeared to
+	 * udev to not exist.
+
+	 * For more information, see
+	 * https://www.kernel.org/doc/Documentation/driver-model/device.txt, the
+	 * paragraph that starts with "Word of warning", currently the second-last
+	 * paragraph.
+	 */
+	err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
+	if (err) {
+		dev_err(&pdev->dev, "SysFS group creation failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_sysfs_group;
+
+	err = misc_register(&kbdev->mdev);
+	if (err) {
+		dev_err(kbdev->dev, "Misc device registration failed for %s\n",
+			kbdev->devname);
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_misc_register;
+
+
+#ifdef CONFIG_MALI_BUSLOG
+	err = bl_core_client_register(kbdev->devname,
+						kbase_logging_started_cb,
+						kbdev, &kbdev->buslogger,
+						THIS_MODULE, NULL);
+	if (err == 0) {
+		kbdev->inited_subsys |= inited_buslogger;
+		bl_core_set_threshold(kbdev->buslogger, 1024*1024*1024);
+	} else {
+		dev_warn(kbdev->dev, "Bus log client registration failed\n");
+		err = 0;
+	}
+#endif
+
+	err = kbase_gpuprops_populate_user_buffer(kbdev);
+	if (err) {
+		dev_err(&pdev->dev, "GPU property population failed");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+
+	dev_info(kbdev->dev,
+			"Probed as %s\n", dev_name(kbdev->mdev.this_device));
+
+	kbase_dev_nr++;
+#endif /* MALI_KBASE_BUILD */
+
+	return err;
+}
+
+#undef KBASEP_DEFAULT_REGISTER_HISTORY_SIZE
+
+/**
+ * kbase_device_suspend - Suspend callback from the OS.
+ *
+ * This is called by Linux when the device should suspend.
+ *
+ * @dev:  The device to suspend
+ *
+ * Return: A standard Linux error code
+ */
+static int kbase_device_suspend(struct device *dev)
+{
+	struct kbase_device *kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	kbase_pm_suspend(kbdev);
+
+#if defined(CONFIG_MALI_DEVFREQ) && \
+		(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+	dev_dbg(dev, "Callback %s\n", __func__);
+	if (kbdev->inited_subsys & inited_devfreq) {
+		kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_SUSPEND);
+		flush_workqueue(kbdev->devfreq_queue.workq);
+	}
+#endif
+	return 0;
+}
+
+/**
+ * kbase_device_resume - Resume callback from the OS.
+ *
+ * This is called by Linux when the device should resume from suspension.
+ *
+ * @dev:  The device to resume
+ *
+ * Return: A standard Linux error code
+ */
+static int kbase_device_resume(struct device *dev)
+{
+	struct kbase_device *kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	kbase_pm_resume(kbdev);
+
+#if defined(CONFIG_MALI_DEVFREQ) && \
+		(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+	dev_dbg(dev, "Callback %s\n", __func__);
+	if (kbdev->inited_subsys & inited_devfreq) {
+		mutex_lock(&kbdev->pm.lock);
+		if (kbdev->pm.active_count > 0)
+			kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_RESUME);
+		mutex_unlock(&kbdev->pm.lock);
+		flush_workqueue(kbdev->devfreq_queue.workq);
+	}
+#endif
+	return 0;
+}
+
+/**
+ * kbase_device_runtime_suspend - Runtime suspend callback from the OS.
+ *
+ * This is called by Linux when the device should prepare for a condition in
+ * which it will not be able to communicate with the CPU(s) and RAM due to
+ * power management.
+ *
+ * @dev:  The device to suspend
+ *
+ * Return: A standard Linux error code
+ */
+#ifdef KBASE_PM_RUNTIME
+static int kbase_device_runtime_suspend(struct device *dev)
+{
+	struct kbase_device *kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	dev_dbg(dev, "Callback %s\n", __func__);
+#if defined(CONFIG_MALI_DEVFREQ) && \
+		(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+	if (kbdev->inited_subsys & inited_devfreq)
+		kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_SUSPEND);
+#endif
+
+	if (kbdev->pm.backend.callback_power_runtime_off) {
+		kbdev->pm.backend.callback_power_runtime_off(kbdev);
+		dev_dbg(dev, "runtime suspend\n");
+	}
+	return 0;
+}
+#endif /* KBASE_PM_RUNTIME */
+
+/**
+ * kbase_device_runtime_resume - Runtime resume callback from the OS.
+ *
+ * This is called by Linux when the device should go into a fully active state.
+ *
+ * @dev:  The device to suspend
+ *
+ * Return: A standard Linux error code
+ */
+
+#ifdef KBASE_PM_RUNTIME
+static int kbase_device_runtime_resume(struct device *dev)
+{
+	int ret = 0;
+	struct kbase_device *kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	dev_dbg(dev, "Callback %s\n", __func__);
+	if (kbdev->pm.backend.callback_power_runtime_on) {
+		ret = kbdev->pm.backend.callback_power_runtime_on(kbdev);
+		dev_dbg(dev, "runtime resume\n");
+	}
+
+#if defined(CONFIG_MALI_DEVFREQ) && \
+		(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+	if (kbdev->inited_subsys & inited_devfreq)
+		kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_RESUME);
+#endif
+
+	return ret;
+}
+#endif /* KBASE_PM_RUNTIME */
+
+
+#ifdef KBASE_PM_RUNTIME
+/**
+ * kbase_device_runtime_idle - Runtime idle callback from the OS.
+ * @dev: The device to suspend
+ *
+ * This is called by Linux when the device appears to be inactive and it might
+ * be placed into a low power state.
+ *
+ * Return: 0 if device can be suspended, non-zero to avoid runtime autosuspend,
+ * otherwise a standard Linux error code
+ */
+static int kbase_device_runtime_idle(struct device *dev)
+{
+	struct kbase_device *kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	dev_dbg(dev, "Callback %s\n", __func__);
+	/* Use platform specific implementation if it exists. */
+	if (kbdev->pm.backend.callback_power_runtime_idle)
+		return kbdev->pm.backend.callback_power_runtime_idle(kbdev);
+
+	/* Just need to update the device's last busy mark. Kernel will respect
+	 * the autosuspend delay and so won't suspend the device immediately.
+	 */
+	pm_runtime_mark_last_busy(kbdev->dev);
+	return 0;
+}
+#endif /* KBASE_PM_RUNTIME */
+
+/* The power management operations for the platform driver.
+ */
+static const struct dev_pm_ops kbase_pm_ops = {
+	.suspend = kbase_device_suspend,
+	.resume = kbase_device_resume,
+#ifdef KBASE_PM_RUNTIME
+	.runtime_suspend = kbase_device_runtime_suspend,
+	.runtime_resume = kbase_device_runtime_resume,
+	.runtime_idle = kbase_device_runtime_idle,
+#endif /* KBASE_PM_RUNTIME */
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id kbase_dt_ids[] = {
+	{ .compatible = "arm,malit6xx" },
+	{ .compatible = "arm,mali-midgard" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, kbase_dt_ids);
+#endif
+
+static struct platform_driver kbase_platform_driver = {
+	.probe = kbase_platform_device_probe,
+	.remove = kbase_platform_device_remove,
+	.driver = {
+		   .name = kbase_drv_name,
+		   .owner = THIS_MODULE,
+		   .pm = &kbase_pm_ops,
+		   .of_match_table = of_match_ptr(kbase_dt_ids),
+	},
+};
+
+/*
+ * The driver will not provide a shortcut to create the Mali platform device
+ * anymore when using Device Tree.
+ */
+#ifdef CONFIG_OF
+module_platform_driver(kbase_platform_driver);
+#else
+
+static int __init kbase_driver_init(void)
+{
+	int ret;
+
+	ret = kbase_platform_register();
+	if (ret)
+		return ret;
+
+	ret = platform_driver_register(&kbase_platform_driver);
+
+	if (ret)
+		kbase_platform_unregister();
+
+	return ret;
+}
+
+static void __exit kbase_driver_exit(void)
+{
+	platform_driver_unregister(&kbase_platform_driver);
+	kbase_platform_unregister();
+}
+
+module_init(kbase_driver_init);
+module_exit(kbase_driver_exit);
+
+#endif /* CONFIG_OF */
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \
+		__stringify(BASE_UK_VERSION_MAJOR) "." \
+		__stringify(BASE_UK_VERSION_MINOR) ")");
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT) || defined(CONFIG_MALI_SYSTEM_TRACE)
+#define CREATE_TRACE_POINTS
+#endif
+
+#ifdef CONFIG_MALI_GATOR_SUPPORT
+/* Create the trace points (otherwise we just get code to call a tracepoint) */
+#include "mali_linux_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_job_slots_event);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_status);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_page_fault_insert_pages);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_total_alloc_pages_change);
+
+void kbase_trace_mali_pm_status(u32 dev_id, u32 event, u64 value)
+{
+	trace_mali_pm_status(dev_id, event, value);
+}
+
+void kbase_trace_mali_job_slots_event(u32 dev_id, u32 event, const struct kbase_context *kctx, u8 atom_id)
+{
+	trace_mali_job_slots_event(dev_id, event,
+		(kctx != NULL ? kctx->tgid : 0),
+		(kctx != NULL ? kctx->pid : 0),
+		atom_id);
+}
+
+void kbase_trace_mali_page_fault_insert_pages(u32 dev_id, int event, u32 value)
+{
+	trace_mali_page_fault_insert_pages(dev_id, event, value);
+}
+
+void kbase_trace_mali_total_alloc_pages_change(u32 dev_id, long long int event)
+{
+	trace_mali_total_alloc_pages_change(dev_id, event);
+}
+#endif /* CONFIG_MALI_GATOR_SUPPORT */
+#ifdef CONFIG_MALI_SYSTEM_TRACE
+#include "mali_linux_kbase_trace.h"
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.c b/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.c
new file mode 100644
index 0000000..bda0560
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.c
@@ -0,0 +1,210 @@
+/*
+ *
+ * (C) COPYRIGHT 2017-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_config_defaults.h>
+
+#include "mali_kbase_ctx_sched.h"
+
+int kbase_ctx_sched_init(struct kbase_device *kbdev)
+{
+	int as_present = (1U << kbdev->nr_hw_address_spaces) - 1;
+
+	/* These two must be recalculated if nr_hw_address_spaces changes
+	 * (e.g. for HW workarounds) */
+	kbdev->nr_user_address_spaces = kbdev->nr_hw_address_spaces;
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) {
+		bool use_workaround;
+
+		use_workaround = DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE;
+		if (use_workaround) {
+			dev_dbg(kbdev->dev, "GPU has HW ISSUE 8987, and driver configured for security workaround: 1 address space only");
+			kbdev->nr_user_address_spaces = 1;
+		}
+	}
+
+	kbdev->as_free = as_present; /* All ASs initially free */
+
+	memset(kbdev->as_to_kctx, 0, sizeof(kbdev->as_to_kctx));
+
+	return 0;
+}
+
+void kbase_ctx_sched_term(struct kbase_device *kbdev)
+{
+	s8 i;
+
+	/* Sanity checks */
+	for (i = 0; i != kbdev->nr_hw_address_spaces; ++i) {
+		WARN_ON(kbdev->as_to_kctx[i] != NULL);
+		WARN_ON(!(kbdev->as_free & (1u << i)));
+	}
+}
+
+/* kbasep_ctx_sched_find_as_for_ctx - Find a free address space
+ *
+ * @kbdev: The context for which to find a free address space
+ *
+ * Return: A valid AS if successful, otherwise KBASEP_AS_NR_INVALID
+ *
+ * This function returns an address space available for use. It would prefer
+ * returning an AS that has been previously assigned to the context to
+ * avoid having to reprogram the MMU.
+ */
+static int kbasep_ctx_sched_find_as_for_ctx(struct kbase_context *kctx)
+{
+	struct kbase_device *const kbdev = kctx->kbdev;
+	int free_as;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/* First check if the previously assigned AS is available */
+	if ((kctx->as_nr != KBASEP_AS_NR_INVALID) &&
+			(kbdev->as_free & (1u << kctx->as_nr)))
+		return kctx->as_nr;
+
+	/* The previously assigned AS was taken, we'll be returning any free
+	 * AS at this point.
+	 */
+	free_as = ffs(kbdev->as_free) - 1;
+	if (free_as >= 0 && free_as < kbdev->nr_hw_address_spaces)
+		return free_as;
+
+	return KBASEP_AS_NR_INVALID;
+}
+
+int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx)
+{
+	struct kbase_device *const kbdev = kctx->kbdev;
+
+	lockdep_assert_held(&kbdev->mmu_hw_mutex);
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	WARN_ON(!kbdev->pm.backend.gpu_powered);
+
+	if (atomic_inc_return(&kctx->refcount) == 1) {
+		int const free_as = kbasep_ctx_sched_find_as_for_ctx(kctx);
+
+		if (free_as != KBASEP_AS_NR_INVALID) {
+			kbdev->as_free &= ~(1u << free_as);
+			/* Only program the MMU if the context has not been
+			 * assigned the same address space before.
+			 */
+			if (free_as != kctx->as_nr) {
+				struct kbase_context *const prev_kctx =
+					kbdev->as_to_kctx[free_as];
+
+				if (prev_kctx) {
+					WARN_ON(atomic_read(&prev_kctx->refcount) != 0);
+					kbase_mmu_disable(prev_kctx);
+					prev_kctx->as_nr = KBASEP_AS_NR_INVALID;
+				}
+
+				kctx->as_nr = free_as;
+				kbdev->as_to_kctx[free_as] = kctx;
+				kbase_mmu_update(kbdev, &kctx->mmu,
+					kctx->as_nr);
+			}
+		} else {
+			atomic_dec(&kctx->refcount);
+
+			/* Failed to find an available address space, we must
+			 * be returning an error at this point.
+			 */
+			WARN_ON(kctx->as_nr != KBASEP_AS_NR_INVALID);
+		}
+	}
+
+	return kctx->as_nr;
+}
+
+void kbase_ctx_sched_retain_ctx_refcount(struct kbase_context *kctx)
+{
+	struct kbase_device *const kbdev = kctx->kbdev;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+	WARN_ON(atomic_read(&kctx->refcount) == 0);
+	WARN_ON(kctx->as_nr == KBASEP_AS_NR_INVALID);
+	WARN_ON(kbdev->as_to_kctx[kctx->as_nr] != kctx);
+
+	atomic_inc(&kctx->refcount);
+}
+
+void kbase_ctx_sched_release_ctx(struct kbase_context *kctx)
+{
+	struct kbase_device *const kbdev = kctx->kbdev;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (atomic_dec_return(&kctx->refcount) == 0)
+		kbdev->as_free |= (1u << kctx->as_nr);
+}
+
+void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx)
+{
+	struct kbase_device *const kbdev = kctx->kbdev;
+
+	lockdep_assert_held(&kbdev->mmu_hw_mutex);
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	WARN_ON(atomic_read(&kctx->refcount) != 0);
+
+	if (kctx->as_nr != KBASEP_AS_NR_INVALID) {
+		if (kbdev->pm.backend.gpu_powered)
+			kbase_mmu_disable(kctx);
+
+		kbdev->as_to_kctx[kctx->as_nr] = NULL;
+		kctx->as_nr = KBASEP_AS_NR_INVALID;
+	}
+}
+
+void kbase_ctx_sched_restore_all_as(struct kbase_device *kbdev)
+{
+	s8 i;
+
+	lockdep_assert_held(&kbdev->mmu_hw_mutex);
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	WARN_ON(!kbdev->pm.backend.gpu_powered);
+
+	for (i = 0; i != kbdev->nr_hw_address_spaces; ++i) {
+		struct kbase_context *kctx;
+
+		kctx = kbdev->as_to_kctx[i];
+		if (kctx) {
+			if (atomic_read(&kctx->refcount)) {
+				WARN_ON(kctx->as_nr != i);
+
+				kbase_mmu_update(kbdev, &kctx->mmu,
+					kctx->as_nr);
+			} else {
+				/* This context might have been assigned an
+				 * AS before, clear it.
+				 */
+				kbdev->as_to_kctx[kctx->as_nr] = NULL;
+				kctx->as_nr = KBASEP_AS_NR_INVALID;
+			}
+		} else {
+			kbase_mmu_disable_as(kbdev, i);
+		}
+	}
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.h b/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.h
new file mode 100644
index 0000000..ab57a0d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.h
@@ -0,0 +1,135 @@
+/*
+ *
+ * (C) COPYRIGHT 2017-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_CTX_SCHED_H_
+#define _KBASE_CTX_SCHED_H_
+
+#include <mali_kbase.h>
+
+/**
+ * The Context Scheduler manages address space assignment and reference
+ * counting to kbase_context. The interface has been designed to minimise
+ * interactions between the Job Scheduler and Power Management/MMU to support
+ * the existing Job Scheduler interface.
+ *
+ * The initial implementation of the Context Scheduler does not schedule
+ * contexts. Instead it relies on the Job Scheduler to make decisions of
+ * when to schedule/evict contexts if address spaces are starved. In the
+ * future, once an interface between the CS and JS has been devised to
+ * provide enough information about how each context is consuming GPU resources,
+ * those decisions can be made in the CS itself, thereby reducing duplicated
+ * code.
+ */
+
+/**
+ * kbase_ctx_sched_init - Initialise the context scheduler
+ * @kbdev: The device for which the context scheduler needs to be initialised
+ *
+ * This must be called during device initialisation. The number of hardware
+ * address spaces must already be established before calling this function.
+ *
+ * Return: 0 for success, otherwise failure
+ */
+int kbase_ctx_sched_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_ctx_sched_term - Terminate the context scheduler
+ * @kbdev: The device for which the context scheduler needs to be terminated
+ *
+ * This must be called during device termination after all contexts have been
+ * destroyed.
+ */
+void kbase_ctx_sched_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_ctx_sched_retain_ctx - Retain a reference to the @ref kbase_context
+ * @kctx: The context to which to retain a reference
+ *
+ * This function should be called whenever an address space should be assigned
+ * to a context and programmed onto the MMU. It should typically be called
+ * when jobs are ready to be submitted to the GPU.
+ *
+ * It can be called as many times as necessary. The address space will be
+ * assigned to the context for as long as there is a reference to said context.
+ *
+ * The kbase_device::mmu_hw_mutex and kbase_device::hwaccess_lock locks must be
+ * held whilst calling this function.
+ *
+ * Return: The address space that the context has been assigned to or
+ *         KBASEP_AS_NR_INVALID if no address space was available.
+ */
+int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx);
+
+/**
+ * kbase_ctx_sched_retain_ctx_refcount
+ * @kctx: The context to which to retain a reference
+ *
+ * This function only retains a reference to the context. It must be called
+ * only when the context already has a reference.
+ *
+ * This is typically called inside an atomic session where we know the context
+ * is already scheduled in but want to take an extra reference to ensure that
+ * it doesn't get descheduled.
+ *
+ * The kbase_device::hwaccess_lock must be held whilst calling this function
+ */
+void kbase_ctx_sched_retain_ctx_refcount(struct kbase_context *kctx);
+
+/**
+ * kbase_ctx_sched_release_ctx - Release a reference to the @ref kbase_context
+ * @kctx: The context from which to release a reference
+ *
+ * This function should be called whenever an address space could be unassigned
+ * from a context. When there are no more references to said context, the
+ * address space previously assigned to this context shall be reassigned to
+ * other contexts as needed.
+ *
+ * The kbase_device::hwaccess_lock must be held whilst calling this function
+ */
+void kbase_ctx_sched_release_ctx(struct kbase_context *kctx);
+
+/**
+ * kbase_ctx_sched_remove_ctx - Unassign previously assigned address space
+ * @kctx: The context to be removed
+ *
+ * This function should be called when a context is being destroyed. The
+ * context must no longer have any reference. If it has been assigned an
+ * address space before then the AS will be unprogrammed.
+ *
+ * The kbase_device::mmu_hw_mutex and kbase_device::hwaccess_lock locks must be
+ * held whilst calling this function.
+ */
+void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx);
+
+/**
+ * kbase_ctx_sched_restore_all_as - Reprogram all address spaces
+ * @kbdev: The device for which address spaces to be reprogrammed
+ *
+ * This function shall reprogram all address spaces previously assigned to
+ * contexts. It can be used after the GPU is reset.
+ *
+ * The kbase_device::mmu_hw_mutex and kbase_device::hwaccess_lock locks must be
+ * held whilst calling this function.
+ */
+void kbase_ctx_sched_restore_all_as(struct kbase_device *kbdev);
+
+#endif /* _KBASE_CTX_SCHED_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_debug.c b/drivers/gpu/arm/midgard/mali_kbase_debug.c
new file mode 100644
index 0000000..118f787
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_debug.c
@@ -0,0 +1,44 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+
+static struct kbasep_debug_assert_cb kbasep_debug_assert_registered_cb = {
+	NULL,
+	NULL
+};
+
+void kbase_debug_assert_register_hook(kbase_debug_assert_hook *func, void *param)
+{
+	kbasep_debug_assert_registered_cb.func = func;
+	kbasep_debug_assert_registered_cb.param = param;
+}
+
+void kbasep_debug_assert_call_hook(void)
+{
+	if (kbasep_debug_assert_registered_cb.func != NULL)
+		kbasep_debug_assert_registered_cb.func(kbasep_debug_assert_registered_cb.param);
+}
+KBASE_EXPORT_SYMBOL(kbasep_debug_assert_call_hook);
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_debug.h b/drivers/gpu/arm/midgard/mali_kbase_debug.h
new file mode 100644
index 0000000..2fdb72d9
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_debug.h
@@ -0,0 +1,169 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#ifndef _KBASE_DEBUG_H
+#define _KBASE_DEBUG_H
+
+#include <linux/bug.h>
+
+/** @brief If equals to 0, a trace containing the file, line, and function will be displayed before each message. */
+#define KBASE_DEBUG_SKIP_TRACE 0
+
+/** @brief If different from 0, the trace will only contain the file and line. */
+#define KBASE_DEBUG_SKIP_FUNCTION_NAME 0
+
+/** @brief Disable the asserts tests if set to 1. Default is to disable the asserts in release. */
+#ifndef KBASE_DEBUG_DISABLE_ASSERTS
+#ifdef CONFIG_MALI_DEBUG
+#define KBASE_DEBUG_DISABLE_ASSERTS 0
+#else
+#define KBASE_DEBUG_DISABLE_ASSERTS 1
+#endif
+#endif				/* KBASE_DEBUG_DISABLE_ASSERTS */
+
+/** Function type that is called on an KBASE_DEBUG_ASSERT() or KBASE_DEBUG_ASSERT_MSG() */
+typedef void (kbase_debug_assert_hook) (void *);
+
+struct kbasep_debug_assert_cb {
+	kbase_debug_assert_hook *func;
+	void *param;
+};
+
+/**
+ * @def KBASEP_DEBUG_PRINT_TRACE
+ * @brief Private macro containing the format of the trace to display before every message
+ * @sa KBASE_DEBUG_SKIP_TRACE, KBASE_DEBUG_SKIP_FUNCTION_NAME
+ */
+#if !KBASE_DEBUG_SKIP_TRACE
+#define KBASEP_DEBUG_PRINT_TRACE \
+		"In file: " __FILE__ " line: " CSTD_STR2(__LINE__)
+#if !KBASE_DEBUG_SKIP_FUNCTION_NAME
+#define KBASEP_DEBUG_PRINT_FUNCTION __func__
+#else
+#define KBASEP_DEBUG_PRINT_FUNCTION ""
+#endif
+#else
+#define KBASEP_DEBUG_PRINT_TRACE ""
+#endif
+
+/**
+ * @def KBASEP_DEBUG_ASSERT_OUT(trace, function, ...)
+ * @brief (Private) system printing function associated to the @ref KBASE_DEBUG_ASSERT_MSG event.
+ * @param trace location in the code from where the message is printed
+ * @param function function from where the message is printed
+ * @param ... Format string followed by format arguments.
+ * @note function parameter cannot be concatenated with other strings
+ */
+/* Select the correct system output function*/
+#ifdef CONFIG_MALI_DEBUG
+#define KBASEP_DEBUG_ASSERT_OUT(trace, function, ...)\
+		do { \
+			pr_err("Mali<ASSERT>: %s function:%s ", trace, function);\
+			pr_err(__VA_ARGS__);\
+			pr_err("\n");\
+		} while (false)
+#else
+#define KBASEP_DEBUG_ASSERT_OUT(trace, function, ...) CSTD_NOP()
+#endif
+
+#ifdef CONFIG_MALI_DEBUG
+#define KBASE_CALL_ASSERT_HOOK() kbasep_debug_assert_call_hook()
+#else
+#define KBASE_CALL_ASSERT_HOOK() CSTD_NOP()
+#endif
+
+/**
+ * @def KBASE_DEBUG_ASSERT(expr)
+ * @brief Calls @ref KBASE_PRINT_ASSERT and prints the expression @a expr if @a expr is false
+ *
+ * @note This macro does nothing if the flag @ref KBASE_DEBUG_DISABLE_ASSERTS is set to 1
+ *
+ * @param expr Boolean expression
+ */
+#define KBASE_DEBUG_ASSERT(expr) \
+	KBASE_DEBUG_ASSERT_MSG(expr, #expr)
+
+#if KBASE_DEBUG_DISABLE_ASSERTS
+#define KBASE_DEBUG_ASSERT_MSG(expr, ...) CSTD_NOP()
+#else
+	/**
+	 * @def KBASE_DEBUG_ASSERT_MSG(expr, ...)
+	 * @brief Calls @ref KBASEP_DEBUG_ASSERT_OUT and prints the given message if @a expr is false
+	 *
+	 * @note This macro does nothing if the flag @ref KBASE_DEBUG_DISABLE_ASSERTS is set to 1
+	 *
+	 * @param expr Boolean expression
+	 * @param ...  Message to display when @a expr is false, as a format string followed by format arguments.
+	 */
+#define KBASE_DEBUG_ASSERT_MSG(expr, ...) \
+		do { \
+			if (!(expr)) { \
+				KBASEP_DEBUG_ASSERT_OUT(KBASEP_DEBUG_PRINT_TRACE, KBASEP_DEBUG_PRINT_FUNCTION, __VA_ARGS__);\
+				KBASE_CALL_ASSERT_HOOK();\
+				BUG();\
+			} \
+		} while (false)
+#endif				/* KBASE_DEBUG_DISABLE_ASSERTS */
+
+/**
+ * @def KBASE_DEBUG_CODE( X )
+ * @brief Executes the code inside the macro only in debug mode
+ *
+ * @param X Code to compile only in debug mode.
+ */
+#ifdef CONFIG_MALI_DEBUG
+#define KBASE_DEBUG_CODE(X) X
+#else
+#define KBASE_DEBUG_CODE(X) CSTD_NOP()
+#endif				/* CONFIG_MALI_DEBUG */
+
+/** @} */
+
+/**
+ * @brief Register a function to call on ASSERT
+ *
+ * Such functions will \b only be called during Debug mode, and for debugging
+ * features \b only. Do not rely on them to be called in general use.
+ *
+ * To disable the hook, supply NULL to \a func.
+ *
+ * @note This function is not thread-safe, and should only be used to
+ * register/deregister once in the module's lifetime.
+ *
+ * @param[in] func the function to call when an assert is triggered.
+ * @param[in] param the parameter to pass to \a func when calling it
+ */
+void kbase_debug_assert_register_hook(kbase_debug_assert_hook *func, void *param);
+
+/**
+ * @brief Call a debug assert hook previously registered with kbase_debug_assert_register_hook()
+ *
+ * @note This function is not thread-safe with respect to multiple threads
+ * registering functions and parameters with
+ * kbase_debug_assert_register_hook(). Otherwise, thread safety is the
+ * responsibility of the registered hook.
+ */
+void kbasep_debug_assert_call_hook(void);
+
+#endif				/* _KBASE_DEBUG_H */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.c b/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.c
new file mode 100644
index 0000000..dbc774d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.c
@@ -0,0 +1,566 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016, 2018-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <linux/spinlock.h>
+#include <mali_kbase_hwaccess_jm.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+static bool kbase_is_job_fault_event_pending(struct kbase_device *kbdev)
+{
+	struct list_head *event_list = &kbdev->job_fault_event_list;
+	unsigned long    flags;
+	bool             ret;
+
+	spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+	ret = !list_empty(event_list);
+	spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+
+	return ret;
+}
+
+static void kbase_ctx_remove_pending_event(struct kbase_context *kctx)
+{
+	struct list_head *event_list = &kctx->kbdev->job_fault_event_list;
+	struct base_job_fault_event *event;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kctx->kbdev->job_fault_event_lock, flags);
+	list_for_each_entry(event, event_list, head) {
+		if (event->katom->kctx == kctx) {
+			list_del(&event->head);
+			spin_unlock_irqrestore(&kctx->kbdev->job_fault_event_lock, flags);
+
+			wake_up(&kctx->kbdev->job_fault_resume_wq);
+			flush_work(&event->job_fault_work);
+
+			/* job_fault_event_list can only have a single atom for
+			 * each context.
+			 */
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&kctx->kbdev->job_fault_event_lock, flags);
+}
+
+static bool kbase_ctx_has_no_event_pending(struct kbase_context *kctx)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	struct list_head *event_list = &kctx->kbdev->job_fault_event_list;
+	struct base_job_fault_event *event;
+	unsigned long               flags;
+
+	spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+	if (list_empty(event_list)) {
+		spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+		return true;
+	}
+	list_for_each_entry(event, event_list, head) {
+		if (event->katom->kctx == kctx) {
+			spin_unlock_irqrestore(&kbdev->job_fault_event_lock,
+					flags);
+			return false;
+		}
+	}
+	spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+	return true;
+}
+
+static int wait_for_job_fault(struct kbase_device *kbdev)
+{
+#if KERNEL_VERSION(4, 7, 0) <= LINUX_VERSION_CODE && \
+	KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE
+	int ret = wait_event_interruptible_timeout(kbdev->job_fault_wq,
+			kbase_is_job_fault_event_pending(kbdev),
+			msecs_to_jiffies(2000));
+	if (ret == 0)
+		return -EAGAIN;
+	else if (ret > 0)
+		return 0;
+	else
+		return ret;
+#else
+	return wait_event_interruptible(kbdev->job_fault_wq,
+			kbase_is_job_fault_event_pending(kbdev));
+#endif
+}
+
+/* wait until the fault happen and copy the event */
+static int kbase_job_fault_event_wait(struct kbase_device *kbdev,
+		struct base_job_fault_event *event)
+{
+	struct list_head            *event_list = &kbdev->job_fault_event_list;
+	struct base_job_fault_event *event_in;
+	unsigned long               flags;
+
+	spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+	while (list_empty(event_list)) {
+		int err;
+
+		spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+
+		err = wait_for_job_fault(kbdev);
+		if (err)
+			return err;
+
+		spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+	}
+
+	event_in = list_entry(event_list->next,
+			struct base_job_fault_event, head);
+	event->event_code = event_in->event_code;
+	event->katom = event_in->katom;
+
+	spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+
+	return 0;
+
+}
+
+/* remove the event from the queue */
+static struct base_job_fault_event *kbase_job_fault_event_dequeue(
+		struct kbase_device *kbdev, struct list_head *event_list)
+{
+	struct base_job_fault_event *event;
+
+	event = list_entry(event_list->next,
+			struct base_job_fault_event, head);
+	list_del(event_list->next);
+
+	return event;
+
+}
+
+/* Remove all the following atoms after the failed atom in the same context
+ * Call the postponed bottom half of job done.
+ * Then, this context could be rescheduled.
+ */
+static void kbase_job_fault_resume_event_cleanup(struct kbase_context *kctx)
+{
+	struct list_head *event_list = &kctx->job_fault_resume_event_list;
+
+	while (!list_empty(event_list)) {
+		struct base_job_fault_event *event;
+
+		event = kbase_job_fault_event_dequeue(kctx->kbdev,
+				&kctx->job_fault_resume_event_list);
+		kbase_jd_done_worker(&event->katom->work);
+	}
+
+}
+
+static void kbase_job_fault_resume_worker(struct work_struct *data)
+{
+	struct base_job_fault_event *event = container_of(data,
+			struct base_job_fault_event, job_fault_work);
+	struct kbase_context *kctx;
+	struct kbase_jd_atom *katom;
+
+	katom = event->katom;
+	kctx = katom->kctx;
+
+	dev_info(kctx->kbdev->dev, "Job dumping wait\n");
+
+	/* When it was waked up, it need to check if queue is empty or the
+	 * failed atom belongs to different context. If yes, wake up. Both
+	 * of them mean the failed job has been dumped. Please note, it
+	 * should never happen that the job_fault_event_list has the two
+	 * atoms belong to the same context.
+	 */
+	wait_event(kctx->kbdev->job_fault_resume_wq,
+			 kbase_ctx_has_no_event_pending(kctx));
+
+	atomic_set(&kctx->job_fault_count, 0);
+	kbase_jd_done_worker(&katom->work);
+
+	/* In case the following atoms were scheduled during failed job dump
+	 * the job_done_worker was held. We need to rerun it after the dump
+	 * was finished
+	 */
+	kbase_job_fault_resume_event_cleanup(kctx);
+
+	dev_info(kctx->kbdev->dev, "Job dumping finish, resume scheduler\n");
+}
+
+static struct base_job_fault_event *kbase_job_fault_event_queue(
+		struct list_head *event_list,
+		struct kbase_jd_atom *atom,
+		u32 completion_code)
+{
+	struct base_job_fault_event *event;
+
+	event = &atom->fault_event;
+
+	event->katom = atom;
+	event->event_code = completion_code;
+
+	list_add_tail(&event->head, event_list);
+
+	return event;
+
+}
+
+static void kbase_job_fault_event_post(struct kbase_device *kbdev,
+		struct kbase_jd_atom *katom, u32 completion_code)
+{
+	struct base_job_fault_event *event;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+	event = kbase_job_fault_event_queue(&kbdev->job_fault_event_list,
+				katom, completion_code);
+	spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+
+	wake_up_interruptible(&kbdev->job_fault_wq);
+
+	INIT_WORK(&event->job_fault_work, kbase_job_fault_resume_worker);
+	queue_work(kbdev->job_fault_resume_workq, &event->job_fault_work);
+
+	dev_info(katom->kctx->kbdev->dev, "Job fault happen, start dump: %d_%d",
+			katom->kctx->tgid, katom->kctx->id);
+
+}
+
+/*
+ * This function will process the job fault
+ * Get the register copy
+ * Send the failed job dump event
+ * Create a Wait queue to wait until the job dump finish
+ */
+
+bool kbase_debug_job_fault_process(struct kbase_jd_atom *katom,
+		u32 completion_code)
+{
+	struct kbase_context *kctx = katom->kctx;
+
+	/* Check if dumping is in the process
+	 * only one atom of each context can be dumped at the same time
+	 * If the atom belongs to different context, it can be dumped
+	 */
+	if (atomic_read(&kctx->job_fault_count) > 0) {
+		kbase_job_fault_event_queue(
+				&kctx->job_fault_resume_event_list,
+				katom, completion_code);
+		dev_info(kctx->kbdev->dev, "queue:%d\n",
+				kbase_jd_atom_id(kctx, katom));
+		return true;
+	}
+
+	if (kbase_ctx_flag(kctx, KCTX_DYING))
+		return false;
+
+	if (atomic_read(&kctx->kbdev->job_fault_debug) > 0) {
+
+		if (completion_code != BASE_JD_EVENT_DONE) {
+
+			if (kbase_job_fault_get_reg_snapshot(kctx) == false) {
+				dev_warn(kctx->kbdev->dev, "get reg dump failed\n");
+				return false;
+			}
+
+			kbase_job_fault_event_post(kctx->kbdev, katom,
+					completion_code);
+			atomic_inc(&kctx->job_fault_count);
+			dev_info(kctx->kbdev->dev, "post:%d\n",
+					kbase_jd_atom_id(kctx, katom));
+			return true;
+
+		}
+	}
+	return false;
+
+}
+
+static int debug_job_fault_show(struct seq_file *m, void *v)
+{
+	struct kbase_device *kbdev = m->private;
+	struct base_job_fault_event *event = (struct base_job_fault_event *)v;
+	struct kbase_context *kctx = event->katom->kctx;
+	int i;
+
+	dev_info(kbdev->dev, "debug job fault seq show:%d_%d, %d",
+			kctx->tgid, kctx->id, event->reg_offset);
+
+	if (kctx->reg_dump == NULL) {
+		dev_warn(kbdev->dev, "reg dump is NULL");
+		return -1;
+	}
+
+	if (kctx->reg_dump[event->reg_offset] ==
+			REGISTER_DUMP_TERMINATION_FLAG) {
+		/* Return the error here to stop the read. And the
+		 * following next() will not be called. The stop can
+		 * get the real event resource and release it
+		 */
+		return -1;
+	}
+
+	if (event->reg_offset == 0)
+		seq_printf(m, "%d_%d\n", kctx->tgid, kctx->id);
+
+	for (i = 0; i < 50; i++) {
+		if (kctx->reg_dump[event->reg_offset] ==
+				REGISTER_DUMP_TERMINATION_FLAG) {
+			break;
+		}
+		seq_printf(m, "%08x: %08x\n",
+				kctx->reg_dump[event->reg_offset],
+				kctx->reg_dump[1+event->reg_offset]);
+		event->reg_offset += 2;
+
+	}
+
+
+	return 0;
+}
+static void *debug_job_fault_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	struct kbase_device *kbdev = m->private;
+	struct base_job_fault_event *event = (struct base_job_fault_event *)v;
+
+	dev_info(kbdev->dev, "debug job fault seq next:%d, %d",
+			event->reg_offset, (int)*pos);
+
+	return event;
+}
+
+static void *debug_job_fault_start(struct seq_file *m, loff_t *pos)
+{
+	struct kbase_device *kbdev = m->private;
+	struct base_job_fault_event *event;
+
+	dev_info(kbdev->dev, "fault job seq start:%d", (int)*pos);
+
+	/* The condition is trick here. It needs make sure the
+	 * fault hasn't happened and the dumping hasn't been started,
+	 * or the dumping has finished
+	 */
+	if (*pos == 0) {
+		event = kmalloc(sizeof(*event), GFP_KERNEL);
+		if (!event)
+			return NULL;
+		event->reg_offset = 0;
+		if (kbase_job_fault_event_wait(kbdev, event)) {
+			kfree(event);
+			return NULL;
+		}
+
+		/* The cache flush workaround is called in bottom half of
+		 * job done but we delayed it. Now we should clean cache
+		 * earlier. Then the GPU memory dump should be correct.
+		 */
+		kbase_backend_cache_clean(kbdev, event->katom);
+	} else
+		return NULL;
+
+	return event;
+}
+
+static void debug_job_fault_stop(struct seq_file *m, void *v)
+{
+	struct kbase_device *kbdev = m->private;
+
+	/* here we wake up the kbase_jd_done_worker after stop, it needs
+	 * get the memory dump before the register dump in debug daemon,
+	 * otherwise, the memory dump may be incorrect.
+	 */
+
+	if (v != NULL) {
+		kfree(v);
+		dev_info(kbdev->dev, "debug job fault seq stop stage 1");
+
+	} else {
+		unsigned long flags;
+
+		spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+		if (!list_empty(&kbdev->job_fault_event_list)) {
+			kbase_job_fault_event_dequeue(kbdev,
+				&kbdev->job_fault_event_list);
+			wake_up(&kbdev->job_fault_resume_wq);
+		}
+		spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+		dev_info(kbdev->dev, "debug job fault seq stop stage 2");
+	}
+
+}
+
+static const struct seq_operations ops = {
+	.start = debug_job_fault_start,
+	.next = debug_job_fault_next,
+	.stop = debug_job_fault_stop,
+	.show = debug_job_fault_show,
+};
+
+static int debug_job_fault_open(struct inode *in, struct file *file)
+{
+	struct kbase_device *kbdev = in->i_private;
+
+	if (atomic_cmpxchg(&kbdev->job_fault_debug, 0, 1) == 1) {
+		dev_warn(kbdev->dev, "debug job fault is busy, only a single client is allowed");
+		return -EBUSY;
+	}
+
+	seq_open(file, &ops);
+
+	((struct seq_file *)file->private_data)->private = kbdev;
+	dev_info(kbdev->dev, "debug job fault seq open");
+
+
+	return 0;
+
+}
+
+static int debug_job_fault_release(struct inode *in, struct file *file)
+{
+	struct kbase_device *kbdev = in->i_private;
+	struct list_head *event_list = &kbdev->job_fault_event_list;
+	unsigned long    flags;
+
+	seq_release(in, file);
+
+	spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+
+	/* Disable job fault dumping. This will let kbase run jobs as normal,
+	 * without blocking waiting for a job_fault client to read failed jobs.
+	 *
+	 * After this a new client may open the file, and may re-enable job
+	 * fault dumping, but the job_fault_event_lock we hold here will block
+	 * that from interfering until after we've completed the cleanup.
+	 */
+	atomic_dec(&kbdev->job_fault_debug);
+
+	/* Clean the unprocessed job fault. After that, all the suspended
+	 * contexts could be rescheduled. Remove all the failed atoms that
+	 * belong to different contexts Resume all the contexts that were
+	 * suspend due to failed job.
+	 */
+	while (!list_empty(event_list)) {
+		kbase_job_fault_event_dequeue(kbdev, event_list);
+		spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+		wake_up(&kbdev->job_fault_resume_wq);
+		spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+	}
+
+	spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+
+	dev_info(kbdev->dev, "debug job fault seq close");
+
+	return 0;
+}
+
+static const struct file_operations kbasep_debug_job_fault_fops = {
+	.owner = THIS_MODULE,
+	.open = debug_job_fault_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = debug_job_fault_release,
+};
+
+/*
+ *  Initialize debugfs entry for job fault dump
+ */
+void kbase_debug_job_fault_debugfs_init(struct kbase_device *kbdev)
+{
+	debugfs_create_file("job_fault", 0400,
+			kbdev->mali_debugfs_directory, kbdev,
+			&kbasep_debug_job_fault_fops);
+}
+
+
+int kbase_debug_job_fault_dev_init(struct kbase_device *kbdev)
+{
+
+	INIT_LIST_HEAD(&kbdev->job_fault_event_list);
+
+	init_waitqueue_head(&(kbdev->job_fault_wq));
+	init_waitqueue_head(&(kbdev->job_fault_resume_wq));
+	spin_lock_init(&kbdev->job_fault_event_lock);
+
+	kbdev->job_fault_resume_workq = alloc_workqueue(
+			"kbase_job_fault_resume_work_queue", WQ_MEM_RECLAIM, 1);
+	if (!kbdev->job_fault_resume_workq)
+		return -ENOMEM;
+
+	atomic_set(&kbdev->job_fault_debug, 0);
+
+	return 0;
+}
+
+/*
+ * Release the relevant resource per device
+ */
+void kbase_debug_job_fault_dev_term(struct kbase_device *kbdev)
+{
+	destroy_workqueue(kbdev->job_fault_resume_workq);
+}
+
+
+/*
+ *  Initialize the relevant data structure per context
+ */
+void kbase_debug_job_fault_context_init(struct kbase_context *kctx)
+{
+
+	/* We need allocate double size register range
+	 * Because this memory will keep the register address and value
+	 */
+	kctx->reg_dump = vmalloc(0x4000 * 2);
+	if (kctx->reg_dump == NULL)
+		return;
+
+	if (kbase_debug_job_fault_reg_snapshot_init(kctx, 0x4000) == false) {
+		vfree(kctx->reg_dump);
+		kctx->reg_dump = NULL;
+	}
+	INIT_LIST_HEAD(&kctx->job_fault_resume_event_list);
+	atomic_set(&kctx->job_fault_count, 0);
+
+}
+
+/*
+ *  release the relevant resource per context
+ */
+void kbase_debug_job_fault_context_term(struct kbase_context *kctx)
+{
+	vfree(kctx->reg_dump);
+}
+
+void kbase_debug_job_fault_kctx_unblock(struct kbase_context *kctx)
+{
+	WARN_ON(!kbase_ctx_flag(kctx, KCTX_DYING));
+
+	kbase_ctx_remove_pending_event(kctx);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+int kbase_debug_job_fault_dev_init(struct kbase_device *kbdev)
+{
+	return 0;
+}
+
+void kbase_debug_job_fault_dev_term(struct kbase_device *kbdev)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.h b/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.h
new file mode 100644
index 0000000..ef69627
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.h
@@ -0,0 +1,116 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_DEBUG_JOB_FAULT_H
+#define _KBASE_DEBUG_JOB_FAULT_H
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define REGISTER_DUMP_TERMINATION_FLAG 0xFFFFFFFF
+
+/**
+ * kbase_debug_job_fault_dev_init - Create the fault event wait queue
+ *		per device and initialize the required lists.
+ * @kbdev:	Device pointer
+ *
+ * Return: Zero on success or a negative error code.
+ */
+int kbase_debug_job_fault_dev_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_debug_job_fault_debugfs_init - Initialize job fault debug sysfs
+ * @kbdev:	Device pointer
+ */
+void kbase_debug_job_fault_debugfs_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_debug_job_fault_dev_term - Clean up resources created in
+ *		kbase_debug_job_fault_dev_init.
+ * @kbdev:	Device pointer
+ */
+void kbase_debug_job_fault_dev_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_debug_job_fault_context_init - Initialize the relevant
+ *		data structure per context
+ * @kctx: KBase context pointer
+ */
+void kbase_debug_job_fault_context_init(struct kbase_context *kctx);
+
+/**
+ * kbase_debug_job_fault_context_term - Release the relevant
+ *		resource per context
+ * @kctx: KBase context pointer
+ */
+void kbase_debug_job_fault_context_term(struct kbase_context *kctx);
+
+/**
+ * kbase_debug_job_fault_kctx_unblock - Unblock the atoms blocked on job fault
+ *					dumping on context termination.
+ *
+ * This function is called during context termination to unblock the atom for
+ * which the job fault occurred and also the atoms following it. This is needed
+ * otherwise the wait for zero jobs could timeout (leading to an assertion
+ * failure, kernel panic in debug builds) in the pathological case where
+ * although the thread/daemon capturing the job fault events is running,
+ * but for some reasons has stopped consuming the events.
+ *
+ * @kctx: KBase context pointer
+ */
+void kbase_debug_job_fault_kctx_unblock(struct kbase_context *kctx);
+
+/**
+ * kbase_debug_job_fault_process - Process the failed job.
+ *      It will send a event and wake up the job fault waiting queue
+ *      Then create a work queue to wait for job dump finish
+ *      This function should be called in the interrupt handler and before
+ *      jd_done that make sure the jd_done_worker will be delayed until the
+ *      job dump finish
+ * @katom: The failed atom pointer
+ * @completion_code: the job status
+ * @return true if dump is going on
+ */
+bool kbase_debug_job_fault_process(struct kbase_jd_atom *katom,
+		u32 completion_code);
+
+
+/**
+ * kbase_debug_job_fault_reg_snapshot_init - Set the interested registers
+ *      address during the job fault process, the relevant registers will
+ *      be saved when a job fault happen
+ * @kctx: KBase context pointer
+ * @reg_range: Maximum register address space
+ * @return true if initializing successfully
+ */
+bool kbase_debug_job_fault_reg_snapshot_init(struct kbase_context *kctx,
+		int reg_range);
+
+/**
+ * kbase_job_fault_get_reg_snapshot - Read the interested registers for
+ *      failed job dump
+ * @kctx: KBase context pointer
+ * @return true if getting registers successfully
+ */
+bool kbase_job_fault_get_reg_snapshot(struct kbase_context *kctx);
+
+#endif  /*_KBASE_DEBUG_JOB_FAULT_H*/
diff --git a/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.c b/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.c
new file mode 100644
index 0000000..c091f16
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.c
@@ -0,0 +1,307 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Debugfs interface to dump the memory visible to the GPU
+ */
+
+#include "mali_kbase_debug_mem_view.h"
+#include "mali_kbase.h"
+
+#include <linux/list.h>
+#include <linux/file.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+#if (KERNEL_VERSION(4, 1, 0) > LINUX_VERSION_CODE)
+#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count)
+#endif
+
+struct debug_mem_mapping {
+	struct list_head node;
+
+	struct kbase_mem_phy_alloc *alloc;
+	unsigned long flags;
+
+	u64 start_pfn;
+	size_t nr_pages;
+};
+
+struct debug_mem_data {
+	struct list_head mapping_list;
+	struct kbase_context *kctx;
+};
+
+struct debug_mem_seq_off {
+	struct list_head *lh;
+	size_t offset;
+};
+
+static void *debug_mem_start(struct seq_file *m, loff_t *_pos)
+{
+	struct debug_mem_data *mem_data = m->private;
+	struct debug_mem_seq_off *data;
+	struct debug_mem_mapping *map;
+	loff_t pos = *_pos;
+
+	list_for_each_entry(map, &mem_data->mapping_list, node) {
+		if (pos >= map->nr_pages) {
+			pos -= map->nr_pages;
+		} else {
+			data = kmalloc(sizeof(*data), GFP_KERNEL);
+			if (!data)
+				return NULL;
+			data->lh = &map->node;
+			data->offset = pos;
+			return data;
+		}
+	}
+
+	/* Beyond the end */
+	return NULL;
+}
+
+static void debug_mem_stop(struct seq_file *m, void *v)
+{
+	kfree(v);
+}
+
+static void *debug_mem_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	struct debug_mem_data *mem_data = m->private;
+	struct debug_mem_seq_off *data = v;
+	struct debug_mem_mapping *map;
+
+	map = list_entry(data->lh, struct debug_mem_mapping, node);
+
+	if (data->offset < map->nr_pages - 1) {
+		data->offset++;
+		++*pos;
+		return data;
+	}
+
+	if (list_is_last(data->lh, &mem_data->mapping_list)) {
+		kfree(data);
+		return NULL;
+	}
+
+	data->lh = data->lh->next;
+	data->offset = 0;
+	++*pos;
+
+	return data;
+}
+
+static int debug_mem_show(struct seq_file *m, void *v)
+{
+	struct debug_mem_data *mem_data = m->private;
+	struct debug_mem_seq_off *data = v;
+	struct debug_mem_mapping *map;
+	int i, j;
+	struct page *page;
+	uint32_t *mapping;
+	pgprot_t prot = PAGE_KERNEL;
+
+	map = list_entry(data->lh, struct debug_mem_mapping, node);
+
+	kbase_gpu_vm_lock(mem_data->kctx);
+
+	if (data->offset >= map->alloc->nents) {
+		seq_printf(m, "%016llx: Unbacked page\n\n", (map->start_pfn +
+				data->offset) << PAGE_SHIFT);
+		goto out;
+	}
+
+	if (!(map->flags & KBASE_REG_CPU_CACHED))
+		prot = pgprot_writecombine(prot);
+
+	page = as_page(map->alloc->pages[data->offset]);
+	mapping = vmap(&page, 1, VM_MAP, prot);
+	if (!mapping)
+		goto out;
+
+	for (i = 0; i < PAGE_SIZE; i += 4*sizeof(*mapping)) {
+		seq_printf(m, "%016llx:", i + ((map->start_pfn +
+				data->offset) << PAGE_SHIFT));
+
+		for (j = 0; j < 4*sizeof(*mapping); j += sizeof(*mapping))
+			seq_printf(m, " %08x", mapping[(i+j)/sizeof(*mapping)]);
+		seq_putc(m, '\n');
+	}
+
+	vunmap(mapping);
+
+	seq_putc(m, '\n');
+
+out:
+	kbase_gpu_vm_unlock(mem_data->kctx);
+	return 0;
+}
+
+static const struct seq_operations ops = {
+	.start = debug_mem_start,
+	.next = debug_mem_next,
+	.stop = debug_mem_stop,
+	.show = debug_mem_show,
+};
+
+static int debug_mem_zone_open(struct rb_root *rbtree,
+						struct debug_mem_data *mem_data)
+{
+	int ret = 0;
+	struct rb_node *p;
+	struct kbase_va_region *reg;
+	struct debug_mem_mapping *mapping;
+
+	for (p = rb_first(rbtree); p; p = rb_next(p)) {
+		reg = rb_entry(p, struct kbase_va_region, rblink);
+
+		if (reg->gpu_alloc == NULL)
+			/* Empty region - ignore */
+			continue;
+
+		mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
+		if (!mapping) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		mapping->alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+		mapping->start_pfn = reg->start_pfn;
+		mapping->nr_pages = reg->nr_pages;
+		mapping->flags = reg->flags;
+		list_add_tail(&mapping->node, &mem_data->mapping_list);
+	}
+
+out:
+	return ret;
+}
+
+static int debug_mem_open(struct inode *i, struct file *file)
+{
+	struct kbase_context *const kctx = i->i_private;
+	struct debug_mem_data *mem_data;
+	int ret;
+
+	if (get_file_rcu(kctx->filp) == 0)
+		return -ENOENT;
+
+	ret = seq_open(file, &ops);
+	if (ret)
+		goto open_fail;
+
+	mem_data = kmalloc(sizeof(*mem_data), GFP_KERNEL);
+	if (!mem_data) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	mem_data->kctx = kctx;
+
+	INIT_LIST_HEAD(&mem_data->mapping_list);
+
+	kbase_gpu_vm_lock(kctx);
+
+	ret = debug_mem_zone_open(&kctx->reg_rbtree_same, mem_data);
+	if (0 != ret) {
+		kbase_gpu_vm_unlock(kctx);
+		goto out;
+	}
+
+	ret = debug_mem_zone_open(&kctx->reg_rbtree_custom, mem_data);
+	if (0 != ret) {
+		kbase_gpu_vm_unlock(kctx);
+		goto out;
+	}
+
+	kbase_gpu_vm_unlock(kctx);
+
+	((struct seq_file *)file->private_data)->private = mem_data;
+
+	return 0;
+
+out:
+	if (mem_data) {
+		while (!list_empty(&mem_data->mapping_list)) {
+			struct debug_mem_mapping *mapping;
+
+			mapping = list_first_entry(&mem_data->mapping_list,
+					struct debug_mem_mapping, node);
+			kbase_mem_phy_alloc_put(mapping->alloc);
+			list_del(&mapping->node);
+			kfree(mapping);
+		}
+		kfree(mem_data);
+	}
+	seq_release(i, file);
+open_fail:
+	fput(kctx->filp);
+
+	return ret;
+}
+
+static int debug_mem_release(struct inode *inode, struct file *file)
+{
+	struct kbase_context *const kctx = inode->i_private;
+	struct seq_file *sfile = file->private_data;
+	struct debug_mem_data *mem_data = sfile->private;
+	struct debug_mem_mapping *mapping;
+
+	seq_release(inode, file);
+
+	while (!list_empty(&mem_data->mapping_list)) {
+		mapping = list_first_entry(&mem_data->mapping_list,
+				struct debug_mem_mapping, node);
+		kbase_mem_phy_alloc_put(mapping->alloc);
+		list_del(&mapping->node);
+		kfree(mapping);
+	}
+
+	kfree(mem_data);
+
+	fput(kctx->filp);
+
+	return 0;
+}
+
+static const struct file_operations kbase_debug_mem_view_fops = {
+	.owner = THIS_MODULE,
+	.open = debug_mem_open,
+	.release = debug_mem_release,
+	.read = seq_read,
+	.llseek = seq_lseek
+};
+
+void kbase_debug_mem_view_init(struct kbase_context *const kctx)
+{
+	/* Caller already ensures this, but we keep the pattern for
+	 * maintenance safety.
+	 */
+	if (WARN_ON(!kctx) ||
+		WARN_ON(IS_ERR_OR_NULL(kctx->kctx_dentry)))
+		return;
+
+	debugfs_create_file("mem_view", 0400, kctx->kctx_dentry, kctx,
+			&kbase_debug_mem_view_fops);
+}
+
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.h b/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.h
new file mode 100644
index 0000000..b948b7c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.h
@@ -0,0 +1,40 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2015, 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_DEBUG_MEM_VIEW_H
+#define _KBASE_DEBUG_MEM_VIEW_H
+
+#include <mali_kbase.h>
+
+/**
+ * kbase_debug_mem_view_init - Initialize the mem_view sysfs file
+ * @kctx: Pointer to kernel base context
+ *
+ * This function creates a "mem_view" file which can be used to get a view of
+ * the context's memory as the GPU sees it (i.e. using the GPU's page tables).
+ *
+ * The file is cleaned up by a call to debugfs_remove_recursive() deleting the
+ * parent directory.
+ */
+void kbase_debug_mem_view_init(struct kbase_context *kctx);
+
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_debugfs_helper.c b/drivers/gpu/arm/midgard/mali_kbase_debugfs_helper.c
new file mode 100644
index 0000000..37e507b
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_debugfs_helper.c
@@ -0,0 +1,183 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "mali_kbase_debugfs_helper.h"
+
+/* Arbitrary maximum size to prevent user space allocating too much kernel
+ * memory
+ */
+#define DEBUGFS_MEM_POOLS_MAX_WRITE_SIZE (256u)
+
+/**
+ * set_attr_from_string - Parse a string to set elements of an array
+ *
+ * This is the core of the implementation of
+ * kbase_debugfs_helper_set_attr_from_string. The only difference between the
+ * two functions is that this one requires the input string to be writable.
+ *
+ * @buf:         Input string to parse. Must be nul-terminated!
+ * @array:       Address of an object that can be accessed like an array.
+ * @nelems:      Number of elements in the array.
+ * @set_attr_fn: Function to be called back for each array element.
+ *
+ * Return: 0 if success, negative error code otherwise.
+ */
+static int set_attr_from_string(
+	char *const buf,
+	void *const array, size_t const nelems,
+	kbase_debugfs_helper_set_attr_fn const set_attr_fn)
+{
+	size_t index, err = 0;
+	char *ptr = buf;
+
+	for (index = 0; index < nelems && *ptr; ++index) {
+		unsigned long new_size;
+		size_t len;
+		char sep;
+
+		/* Drop leading spaces */
+		while (*ptr == ' ')
+			ptr++;
+
+		len = strcspn(ptr, "\n ");
+		if (len == 0) {
+			/* No more values (allow this) */
+			break;
+		}
+
+		/* Substitute a nul terminator for a space character
+		 * to make the substring valid for kstrtoul.
+		 */
+		sep = ptr[len];
+		if (sep == ' ')
+			ptr[len++] = '\0';
+
+		err = kstrtoul(ptr, 0, &new_size);
+		if (err)
+			break;
+
+		/* Skip the substring (including any premature nul terminator)
+		 */
+		ptr += len;
+
+		set_attr_fn(array, index, new_size);
+	}
+
+	return err;
+}
+
+int kbase_debugfs_helper_set_attr_from_string(
+	const char *const buf, void *const array, size_t const nelems,
+	kbase_debugfs_helper_set_attr_fn const set_attr_fn)
+{
+	char *const wbuf = kstrdup(buf, GFP_KERNEL);
+	int err = 0;
+
+	if (!wbuf)
+		return -ENOMEM;
+
+	err = set_attr_from_string(wbuf, array, nelems,
+		set_attr_fn);
+
+	kfree(wbuf);
+	return err;
+}
+
+ssize_t kbase_debugfs_helper_get_attr_to_string(
+	char *const buf, size_t const size,
+	void *const array, size_t const nelems,
+	kbase_debugfs_helper_get_attr_fn const get_attr_fn)
+{
+	ssize_t total = 0;
+	size_t index;
+
+	for (index = 0; index < nelems; ++index) {
+		const char *postfix = " ";
+
+		if (index == (nelems-1))
+			postfix = "\n";
+
+		total += scnprintf(buf + total, size - total, "%zu%s",
+				get_attr_fn(array, index), postfix);
+	}
+
+	return total;
+}
+
+int kbase_debugfs_helper_seq_write(struct file *const file,
+	const char __user *const ubuf, size_t const count,
+	size_t const nelems,
+	kbase_debugfs_helper_set_attr_fn const set_attr_fn)
+{
+	const struct seq_file *const sfile = file->private_data;
+	void *const array = sfile->private;
+	int err = 0;
+	char *buf;
+
+	if (WARN_ON(!array))
+		return -EINVAL;
+
+	if (WARN_ON(count > DEBUGFS_MEM_POOLS_MAX_WRITE_SIZE))
+		return -EINVAL;
+
+	buf = kmalloc(count + 1, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, ubuf, count)) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	buf[count] = '\0';
+	err = set_attr_from_string(buf,
+		array, nelems, set_attr_fn);
+	kfree(buf);
+
+	return err;
+}
+
+int kbase_debugfs_helper_seq_read(struct seq_file *const sfile,
+	size_t const nelems,
+	kbase_debugfs_helper_get_attr_fn const get_attr_fn)
+{
+	void *const array = sfile->private;
+	size_t index;
+
+	if (WARN_ON(!array))
+		return -EINVAL;
+
+	for (index = 0; index < nelems; ++index) {
+		const char *postfix = " ";
+
+		if (index == (nelems-1))
+			postfix = "\n";
+
+		seq_printf(sfile, "%zu%s", get_attr_fn(array, index), postfix);
+	}
+	return 0;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_debugfs_helper.h b/drivers/gpu/arm/midgard/mali_kbase_debugfs_helper.h
new file mode 100644
index 0000000..c3c9efa
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_debugfs_helper.h
@@ -0,0 +1,141 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_DEBUGFS_HELPER_H_
+#define _KBASE_DEBUGFS_HELPER_H_
+
+/**
+ * typedef kbase_debugfs_helper_set_attr_fn - Type of function to set an
+ *                                            attribute value from an array
+ *
+ * @array: Address of an object that can be accessed like an array.
+ * @index: An element index. The valid range depends on the use-case.
+ * @value: Attribute value to be set.
+ */
+typedef void (*kbase_debugfs_helper_set_attr_fn)(
+	void *array, size_t index, size_t value);
+
+/**
+ * kbase_debugfs_helper_set_attr_from_string - Parse a string to reconfigure an
+ *                                             array
+ *
+ * The given function is called once for each attribute value found in the
+ * input string. It is not an error if the string specifies fewer attribute
+ * values than the specified number of array elements.
+ *
+ * The number base of each attribute value is detected automatically
+ * according to the standard rules (e.g. prefix "0x" for hexadecimal).
+ * Attribute values are separated by one or more space characters.
+ * Additional leading and trailing spaces are ignored.
+ *
+ * @buf:         Input string to parse. Must be nul-terminated!
+ * @array:       Address of an object that can be accessed like an array.
+ * @nelems:      Number of elements in the array.
+ * @set_attr_fn: Function to be called back for each array element.
+ *
+ * Return: 0 if success, negative error code otherwise.
+ */
+int kbase_debugfs_helper_set_attr_from_string(
+	const char *buf, void *array, size_t nelems,
+	kbase_debugfs_helper_set_attr_fn set_attr_fn);
+
+/**
+ * typedef kbase_debugfs_helper_get_attr_fn - Type of function to get an
+ *                                            attribute value from an array
+ *
+ * @array: Address of an object that can be accessed like an array.
+ * @index: An element index. The valid range depends on the use-case.
+ *
+ * Return: Value of attribute.
+ */
+typedef size_t (*kbase_debugfs_helper_get_attr_fn)(
+	void *array, size_t index);
+
+/**
+ * kbase_debugfs_helper_get_attr_to_string - Construct a formatted string
+ *                                           from elements in an array
+ *
+ * The given function is called once for each array element to get the
+ * value of the attribute to be inspected. The attribute values are
+ * written to the buffer as a formatted string of decimal numbers
+ * separated by spaces and terminated by a linefeed.
+ *
+ * @buf:         Buffer in which to store the formatted output string.
+ * @size:        The size of the buffer, in bytes.
+ * @array:       Address of an object that can be accessed like an array.
+ * @nelems:      Number of elements in the array.
+ * @get_attr_fn: Function to be called back for each array element.
+ *
+ * Return: Number of characters written excluding the nul terminator.
+ */
+ssize_t kbase_debugfs_helper_get_attr_to_string(
+	char *buf, size_t size, void *array, size_t nelems,
+	kbase_debugfs_helper_get_attr_fn get_attr_fn);
+
+/**
+ * kbase_debugfs_helper_seq_read - Implements reads from a virtual file for an
+ *                                 array
+ *
+ * The virtual file must have been opened by calling single_open and passing
+ * the address of an object that can be accessed like an array.
+ *
+ * The given function is called once for each array element to get the
+ * value of the attribute to be inspected. The attribute values are
+ * written to the buffer as a formatted string of decimal numbers
+ * separated by spaces and terminated by a linefeed.
+ *
+ * @sfile:       A virtual file previously opened by calling single_open.
+ * @nelems:      Number of elements in the array.
+ * @get_attr_fn: Function to be called back for each array element.
+ *
+ * Return: 0 if success, negative error code otherwise.
+ */
+int kbase_debugfs_helper_seq_read(
+	struct seq_file *const sfile, size_t const nelems,
+	kbase_debugfs_helper_get_attr_fn const get_attr_fn);
+
+/**
+ * kbase_debugfs_helper_seq_write - Implements writes to a virtual file for an
+ *                                  array
+ *
+ * The virtual file must have been opened by calling single_open and passing
+ * the address of an object that can be accessed like an array.
+ *
+ * The given function is called once for each attribute value found in the
+ * data written to the virtual file. For further details, refer to the
+ * description of set_attr_from_string.
+ *
+ * @file:        A virtual file previously opened by calling single_open.
+ * @ubuf:        Source address in user space.
+ * @count:       Number of bytes written to the virtual file.
+ * @nelems:      Number of elements in the array.
+ * @set_attr_fn: Function to be called back for each array element.
+ *
+ * Return: 0 if success, negative error code otherwise.
+ */
+int kbase_debugfs_helper_seq_write(struct file *const file,
+	const char __user *const ubuf, size_t const count,
+	size_t const nelems,
+	kbase_debugfs_helper_set_attr_fn const set_attr_fn);
+
+#endif  /*_KBASE_DEBUGFS_HELPER_H_ */
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_defs.h b/drivers/gpu/arm/midgard/mali_kbase_defs.h
new file mode 100644
index 0000000..4f1c070
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_defs.h
@@ -0,0 +1,2381 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_defs.h
+ *
+ * Defintions (types, defines, etcs) common to Kbase. They are placed here to
+ * allow the hierarchy of header files to work.
+ */
+
+#ifndef _KBASE_DEFS_H_
+#define _KBASE_DEFS_H_
+
+#include <mali_kbase_config.h>
+#include <mali_base_hwconfig_features.h>
+#include <mali_base_hwconfig_issues.h>
+#include <mali_kbase_mem_lowlevel.h>
+#include <mali_kbase_mmu_hw.h>
+#include <mali_kbase_instr_defs.h>
+#include <mali_kbase_pm.h>
+#include <mali_kbase_gpuprops_types.h>
+#include <mali_kbase_hwcnt_backend_gpu.h>
+#include <protected_mode_switcher.h>
+
+#include <linux/atomic.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/sizes.h>
+
+#ifdef CONFIG_MALI_BUSLOG
+#include <linux/bus_logger.h>
+#endif
+
+#if defined(CONFIG_SYNC)
+#include <sync.h>
+#else
+#include "mali_kbase_fence_defs.h"
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif				/* CONFIG_DEBUG_FS */
+
+#ifdef CONFIG_MALI_DEVFREQ
+#include <linux/devfreq.h>
+#endif /* CONFIG_MALI_DEVFREQ */
+
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/memory_group_manager.h>
+
+#if defined(CONFIG_PM_RUNTIME) || \
+	(defined(CONFIG_PM) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+#define KBASE_PM_RUNTIME 1
+#endif
+
+/** Enable SW tracing when set */
+#ifdef CONFIG_MALI_MIDGARD_ENABLE_TRACE
+#define KBASE_TRACE_ENABLE 1
+#endif
+
+#ifndef KBASE_TRACE_ENABLE
+#ifdef CONFIG_MALI_DEBUG
+#define KBASE_TRACE_ENABLE 1
+#else
+#define KBASE_TRACE_ENABLE 0
+#endif				/* CONFIG_MALI_DEBUG */
+#endif				/* KBASE_TRACE_ENABLE */
+
+/** Dump Job slot trace on error (only active if KBASE_TRACE_ENABLE != 0) */
+#define KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR 1
+
+/**
+ * Number of milliseconds before resetting the GPU when a job cannot be "zapped" from the hardware.
+ * Note that the time is actually ZAP_TIMEOUT+SOFT_STOP_RESET_TIMEOUT between the context zap starting and the GPU
+ * actually being reset to give other contexts time for their jobs to be soft-stopped and removed from the hardware
+ * before resetting.
+ */
+#define ZAP_TIMEOUT             1000
+
+/** Number of milliseconds before we time out on a GPU soft/hard reset */
+#define RESET_TIMEOUT           500
+
+/**
+ * Prevent soft-stops from occuring in scheduling situations
+ *
+ * This is not due to HW issues, but when scheduling is desired to be more predictable.
+ *
+ * Therefore, soft stop may still be disabled due to HW issues.
+ *
+ * @note Soft stop will still be used for non-scheduling purposes e.g. when terminating a context.
+ *
+ * @note if not in use, define this value to 0 instead of \#undef'ing it
+ */
+#define KBASE_DISABLE_SCHEDULING_SOFT_STOPS 0
+
+/**
+ * Prevent hard-stops from occuring in scheduling situations
+ *
+ * This is not due to HW issues, but when scheduling is desired to be more predictable.
+ *
+ * @note Hard stop will still be used for non-scheduling purposes e.g. when terminating a context.
+ *
+ * @note if not in use, define this value to 0 instead of \#undef'ing it
+ */
+#define KBASE_DISABLE_SCHEDULING_HARD_STOPS 0
+
+/**
+ * The maximum number of Job Slots to support in the Hardware.
+ *
+ * You can optimize this down if your target devices will only ever support a
+ * small number of job slots.
+ */
+#define BASE_JM_MAX_NR_SLOTS        3
+
+/**
+ * The maximum number of Address Spaces to support in the Hardware.
+ *
+ * You can optimize this down if your target devices will only ever support a
+ * small number of Address Spaces
+ */
+#define BASE_MAX_NR_AS              16
+
+/* mmu */
+#define MIDGARD_MMU_LEVEL(x) (x)
+
+#define MIDGARD_MMU_TOPLEVEL    MIDGARD_MMU_LEVEL(0)
+
+#define MIDGARD_MMU_BOTTOMLEVEL MIDGARD_MMU_LEVEL(3)
+
+#define GROWABLE_FLAGS_REQUIRED (KBASE_REG_PF_GROW | KBASE_REG_GPU_WR)
+
+/** setting in kbase_context::as_nr that indicates it's invalid */
+#define KBASEP_AS_NR_INVALID     (-1)
+
+#define KBASE_LOCK_REGION_MAX_SIZE (63)
+#define KBASE_LOCK_REGION_MIN_SIZE (11)
+
+#define KBASE_TRACE_SIZE_LOG2 8	/* 256 entries */
+#define KBASE_TRACE_SIZE (1 << KBASE_TRACE_SIZE_LOG2)
+#define KBASE_TRACE_MASK ((1 << KBASE_TRACE_SIZE_LOG2)-1)
+
+#include "mali_kbase_js_defs.h"
+#include "mali_kbase_hwaccess_defs.h"
+
+/* Maximum number of pages of memory that require a permanent mapping, per
+ * kbase_context
+ */
+#define KBASE_PERMANENTLY_MAPPED_MEM_LIMIT_PAGES ((1024ul * 1024ul) >> \
+								PAGE_SHIFT)
+
+/** Atom has been previously soft-stoppped */
+#define KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED (1<<1)
+/** Atom has been previously retried to execute */
+#define KBASE_KATOM_FLAGS_RERUN (1<<2)
+/* Atom submitted with JOB_CHAIN_FLAG bit set in JS_CONFIG_NEXT register, helps to
+ * disambiguate short-running job chains during soft/hard stopping of jobs
+ */
+#define KBASE_KATOM_FLAGS_JOBCHAIN (1<<3)
+/** Atom has been previously hard-stopped. */
+#define KBASE_KATOM_FLAG_BEEN_HARD_STOPPED (1<<4)
+/** Atom has caused us to enter disjoint state */
+#define KBASE_KATOM_FLAG_IN_DISJOINT (1<<5)
+/* Atom blocked on cross-slot dependency */
+#define KBASE_KATOM_FLAG_X_DEP_BLOCKED (1<<7)
+/* Atom has fail dependency on cross-slot dependency */
+#define KBASE_KATOM_FLAG_FAIL_BLOCKER (1<<8)
+/* Atom is currently in the list of atoms blocked on cross-slot dependencies */
+#define KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST (1<<9)
+/* Atom is currently holding a context reference */
+#define KBASE_KATOM_FLAG_HOLDING_CTX_REF (1<<10)
+/* Atom requires GPU to be in protected mode */
+#define KBASE_KATOM_FLAG_PROTECTED (1<<11)
+/* Atom has been stored in runnable_tree */
+#define KBASE_KATOM_FLAG_JSCTX_IN_TREE (1<<12)
+/* Atom is waiting for L2 caches to power up in order to enter protected mode */
+#define KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT (1<<13)
+
+/* SW related flags about types of JS_COMMAND action
+ * NOTE: These must be masked off by JS_COMMAND_MASK */
+
+/** This command causes a disjoint event */
+#define JS_COMMAND_SW_CAUSES_DISJOINT 0x100
+
+/** Bitmask of all SW related flags */
+#define JS_COMMAND_SW_BITS  (JS_COMMAND_SW_CAUSES_DISJOINT)
+
+#if (JS_COMMAND_SW_BITS & JS_COMMAND_MASK)
+#error JS_COMMAND_SW_BITS not masked off by JS_COMMAND_MASK. Must update JS_COMMAND_SW_<..> bitmasks
+#endif
+
+/** Soft-stop command that causes a Disjoint event. This of course isn't
+ *  entirely masked off by JS_COMMAND_MASK */
+#define JS_COMMAND_SOFT_STOP_WITH_SW_DISJOINT \
+		(JS_COMMAND_SW_CAUSES_DISJOINT | JS_COMMAND_SOFT_STOP)
+
+#define KBASEP_ATOM_ID_INVALID BASE_JD_ATOM_COUNT
+
+/* Serialize atoms within a slot (ie only one atom per job slot) */
+#define KBASE_SERIALIZE_INTRA_SLOT (1 << 0)
+/* Serialize atoms between slots (ie only one job slot running at any time) */
+#define KBASE_SERIALIZE_INTER_SLOT (1 << 1)
+/* Reset the GPU after each atom completion */
+#define KBASE_SERIALIZE_RESET (1 << 2)
+
+/* Minimum threshold period for hwcnt dumps between different hwcnt virtualizer
+ * clients, to reduce undesired system load.
+ * If a virtualizer client requests a dump within this threshold period after
+ * some other client has performed a dump, a new dump won't be performed and
+ * the accumulated counter values for that client will be returned instead.
+ */
+#define KBASE_HWCNT_GPU_VIRTUALIZER_DUMP_THRESHOLD_NS (200 * NSEC_PER_USEC)
+
+/* Maximum number of clock/regulator pairs that may be referenced by
+ * the device node.
+ * This is dependent on support for of_property_read_u64_array() in the
+ * kernel.
+ */
+#if (KERNEL_VERSION(4, 0, 0) <= LINUX_VERSION_CODE) || \
+			defined(LSK_OPPV2_BACKPORT)
+#define BASE_MAX_NR_CLOCKS_REGULATORS (2)
+#else
+#define BASE_MAX_NR_CLOCKS_REGULATORS (1)
+#endif
+
+/* Forward declarations */
+struct kbase_context;
+struct kbase_device;
+struct kbase_as;
+struct kbase_mmu_setup;
+struct kbase_ipa_model_vinstr_data;
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * struct base_job_fault_event - keeps track of the atom which faulted or which
+ *                               completed after the faulty atom but before the
+ *                               debug data for faulty atom was dumped.
+ *
+ * @event_code:     event code for the atom, should != BASE_JD_EVENT_DONE for the
+ *                  atom which faulted.
+ * @katom:          pointer to the atom for which job fault occurred or which completed
+ *                  after the faulty atom.
+ * @job_fault_work: work item, queued only for the faulty atom, which waits for
+ *                  the dumping to get completed and then does the bottom half
+ *                  of job done for the atoms which followed the faulty atom.
+ * @head:           List head used to store the atom in the global list of faulty
+ *                  atoms or context specific list of atoms which got completed
+ *                  during the dump.
+ * @reg_offset:     offset of the register to be dumped next, only applicable for
+ *                  the faulty atom.
+ */
+struct base_job_fault_event {
+
+	u32 event_code;
+	struct kbase_jd_atom *katom;
+	struct work_struct job_fault_work;
+	struct list_head head;
+	int reg_offset;
+};
+
+#endif
+
+/**
+ * struct kbase_jd_atom_dependency - Contains the dependency info for an atom.
+ * @atom:          pointer to the dependee atom.
+ * @dep_type:      type of dependency on the dependee @atom, i.e. order or data
+ *                 dependency. BASE_JD_DEP_TYPE_INVALID indicates no dependency.
+ */
+struct kbase_jd_atom_dependency {
+	struct kbase_jd_atom *atom;
+	u8 dep_type;
+};
+
+/**
+ * struct kbase_io_access - holds information about 1 register access
+ *
+ * @addr: first bit indicates r/w (r=0, w=1)
+ * @value: value written or read
+ */
+struct kbase_io_access {
+	uintptr_t addr;
+	u32 value;
+};
+
+/**
+ * struct kbase_io_history - keeps track of all recent register accesses
+ *
+ * @enabled: true if register accesses are recorded, false otherwise
+ * @lock: spinlock protecting kbase_io_access array
+ * @count: number of registers read/written
+ * @size: number of elements in kbase_io_access array
+ * @buf: array of kbase_io_access
+ */
+struct kbase_io_history {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+	bool enabled;
+#else
+	u32 enabled;
+#endif
+
+	spinlock_t lock;
+	size_t count;
+	u16 size;
+	struct kbase_io_access *buf;
+};
+
+/**
+ * kbase_jd_katom_dep_atom - Retrieves a read-only reference to the
+ *                           dependee atom.
+ * @dep:   pointer to the dependency info structure.
+ *
+ * Return: readonly reference to dependee atom.
+ */
+static inline const struct kbase_jd_atom *
+kbase_jd_katom_dep_atom(const struct kbase_jd_atom_dependency *dep)
+{
+	LOCAL_ASSERT(dep != NULL);
+
+	return (const struct kbase_jd_atom *)(dep->atom);
+}
+
+/**
+ * kbase_jd_katom_dep_type -  Retrieves the dependency type info
+ *
+ * @dep:   pointer to the dependency info structure.
+ *
+ * Return: the type of dependency there is on the dependee atom.
+ */
+static inline u8 kbase_jd_katom_dep_type(const struct kbase_jd_atom_dependency *dep)
+{
+	LOCAL_ASSERT(dep != NULL);
+
+	return dep->dep_type;
+}
+
+/**
+ * kbase_jd_katom_dep_set - sets up the dependency info structure
+ *                          as per the values passed.
+ * @const_dep:    pointer to the dependency info structure to be setup.
+ * @a:            pointer to the dependee atom.
+ * @type:         type of dependency there is on the dependee atom.
+ */
+static inline void kbase_jd_katom_dep_set(const struct kbase_jd_atom_dependency *const_dep,
+		struct kbase_jd_atom *a, u8 type)
+{
+	struct kbase_jd_atom_dependency *dep;
+
+	LOCAL_ASSERT(const_dep != NULL);
+
+	dep = (struct kbase_jd_atom_dependency *)const_dep;
+
+	dep->atom = a;
+	dep->dep_type = type;
+}
+
+/**
+ * kbase_jd_katom_dep_clear - resets the dependency info structure
+ *
+ * @const_dep:    pointer to the dependency info structure to be setup.
+ */
+static inline void kbase_jd_katom_dep_clear(const struct kbase_jd_atom_dependency *const_dep)
+{
+	struct kbase_jd_atom_dependency *dep;
+
+	LOCAL_ASSERT(const_dep != NULL);
+
+	dep = (struct kbase_jd_atom_dependency *)const_dep;
+
+	dep->atom = NULL;
+	dep->dep_type = BASE_JD_DEP_TYPE_INVALID;
+}
+
+/**
+ * enum kbase_atom_gpu_rb_state - The state of an atom, pertinent after it becomes
+ *                                runnable, with respect to job slot ringbuffer/fifo.
+ * @KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB: Atom not currently present in slot fifo, which
+ *                                implies that either atom has not become runnable
+ *                                due to dependency or has completed the execution
+ *                                on GPU.
+ * @KBASE_ATOM_GPU_RB_WAITING_BLOCKED: Atom has been added to slot fifo but is blocked
+ *                                due to cross slot dependency, can't be submitted to GPU.
+ * @KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV: Atom has been added to slot fifo but
+ *                                is waiting for the completion of previously added atoms
+ *                                in current & other slots, as their protected mode
+ *                                requirements do not match with the current atom.
+ * @KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION: Atom is in slot fifo and is
+ *                                waiting for completion of protected mode transition,
+ *                                needed before the atom is submitted to GPU.
+ * @KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE: Atom is in slot fifo but is waiting
+ *                                for the cores, which are needed to execute the job
+ *                                chain represented by the atom, to become available
+ * @KBASE_ATOM_GPU_RB_WAITING_AFFINITY: Atom is in slot fifo but is blocked on
+ *                                affinity due to rmu workaround for Hw issue 8987.
+ * @KBASE_ATOM_GPU_RB_READY:      Atom is in slot fifo and can be submitted to GPU.
+ * @KBASE_ATOM_GPU_RB_SUBMITTED:  Atom is in slot fifo and has been submitted to GPU.
+ * @KBASE_ATOM_GPU_RB_RETURN_TO_JS: Atom must be returned to JS due to some failure,
+ *                                but only after the previously added atoms in fifo
+ *                                have completed or have also been returned to JS.
+ */
+enum kbase_atom_gpu_rb_state {
+	KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB,
+	KBASE_ATOM_GPU_RB_WAITING_BLOCKED,
+	KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV,
+	KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION,
+	KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE,
+	KBASE_ATOM_GPU_RB_WAITING_AFFINITY,
+	KBASE_ATOM_GPU_RB_READY,
+	KBASE_ATOM_GPU_RB_SUBMITTED,
+	KBASE_ATOM_GPU_RB_RETURN_TO_JS = -1
+};
+
+/**
+ * enum kbase_atom_enter_protected_state - The state of an atom with respect to the
+ *                      preparation for GPU's entry into protected mode, becomes
+ *                      pertinent only after atom's state with respect to slot
+ *                      ringbuffer is KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION
+ * @KBASE_ATOM_ENTER_PROTECTED_CHECK:  Starting state. Check if there are any atoms
+ *                      currently submitted to GPU and protected mode transition is
+ *                      not already in progress.
+ * @KBASE_ATOM_ENTER_PROTECTED_HWCNT: Wait for hardware counter context to
+ *                      become disabled before entry into protected mode.
+ * @KBASE_ATOM_ENTER_PROTECTED_IDLE_L2: Wait for the L2 to become idle in preparation
+ *                      for the coherency change. L2 shall be powered down and GPU shall
+ *                      come out of fully coherent mode before entering protected mode.
+ * @KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY: Prepare coherency change;
+ *                      for BASE_HW_ISSUE_TGOX_R1_1234 also request L2 power on so that
+ *                      coherency register contains correct value when GPU enters
+ *                      protected mode.
+ * @KBASE_ATOM_ENTER_PROTECTED_FINISHED: End state; for BASE_HW_ISSUE_TGOX_R1_1234 check
+ *                      that L2 is powered up and switch GPU to protected mode.
+ */
+enum kbase_atom_enter_protected_state {
+	/**
+	 * NOTE: The integer value of this must match KBASE_ATOM_EXIT_PROTECTED_CHECK.
+	 */
+	KBASE_ATOM_ENTER_PROTECTED_CHECK = 0,
+	KBASE_ATOM_ENTER_PROTECTED_HWCNT,
+	KBASE_ATOM_ENTER_PROTECTED_IDLE_L2,
+	KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY,
+	KBASE_ATOM_ENTER_PROTECTED_FINISHED,
+};
+
+/**
+ * enum kbase_atom_exit_protected_state - The state of an atom with respect to the
+ *                      preparation for GPU's exit from protected mode, becomes
+ *                      pertinent only after atom's state with respect to slot
+ *                      ringbuffer is KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION
+ * @KBASE_ATOM_EXIT_PROTECTED_CHECK: Starting state. Check if there are any atoms
+ *                      currently submitted to GPU and protected mode transition is
+ *                      not already in progress.
+ * @KBASE_ATOM_EXIT_PROTECTED_IDLE_L2: Wait for the L2 to become idle in preparation
+ *                      for the reset, as exiting protected mode requires a reset.
+ * @KBASE_ATOM_EXIT_PROTECTED_RESET: Issue the reset to trigger exit from protected mode
+ * @KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT: End state, Wait for the reset to complete
+ */
+enum kbase_atom_exit_protected_state {
+	/**
+	 * NOTE: The integer value of this must match KBASE_ATOM_ENTER_PROTECTED_CHECK.
+	 */
+	KBASE_ATOM_EXIT_PROTECTED_CHECK = 0,
+	KBASE_ATOM_EXIT_PROTECTED_IDLE_L2,
+	KBASE_ATOM_EXIT_PROTECTED_RESET,
+	KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT,
+};
+
+/**
+ * struct kbase_ext_res - Contains the info for external resources referred
+ *                        by an atom, which have been mapped on GPU side.
+ * @gpu_address:          Start address of the memory region allocated for
+ *                        the resource from GPU virtual address space.
+ * @alloc:                pointer to physical pages tracking object, set on
+ *                        mapping the external resource on GPU side.
+ */
+struct kbase_ext_res {
+	u64 gpu_address;
+	struct kbase_mem_phy_alloc *alloc;
+};
+
+/**
+ * struct kbase_jd_atom  - object representing the atom, containing the complete
+ *                         state and attributes of an atom.
+ * @work:                  work item for the bottom half processing of the atom,
+ *                         by JD or JS, after it got executed on GPU or the input
+ *                         fence got signaled
+ * @start_timestamp:       time at which the atom was submitted to the GPU, by
+ *                         updating the JS_HEAD_NEXTn register.
+ * @udata:                 copy of the user data sent for the atom in base_jd_submit.
+ * @kctx:                  Pointer to the base context with which the atom is associated.
+ * @dep_head:              Array of 2 list heads, pointing to the two list of atoms
+ *                         which are blocked due to dependency on this atom.
+ * @dep_item:              Array of 2 list heads, used to store the atom in the list of
+ *                         other atoms depending on the same dependee atom.
+ * @dep:                   Array containing the dependency info for the 2 atoms on which
+ *                         the atom depends upon.
+ * @jd_item:               List head used during job dispatch job_done processing - as
+ *                         dependencies may not be entirely resolved at this point,
+ *                         we need to use a separate list head.
+ * @in_jd_list:            flag set to true if atom's @jd_item is currently on a list,
+ *                         prevents atom being processed twice.
+ * @nr_extres:             number of external resources referenced by the atom.
+ * @extres:                pointer to the location containing info about @nr_extres
+ *                         external resources referenced by the atom.
+ * @device_nr:             indicates the coregroup with which the atom is associated,
+ *                         when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified.
+ * @jc:                    GPU address of the job-chain.
+ * @softjob_data:          Copy of data read from the user space buffer that @jc
+ *                         points to.
+ * @fence:                 Stores either an input or output sync fence, depending
+ *                         on soft-job type
+ * @sync_waiter:           Pointer to the sync fence waiter structure passed to the
+ *                         callback function on signaling of the input fence.
+ * @dma_fence:             object containing pointers to both input & output fences
+ *                         and other related members used for explicit sync through
+ *                         soft jobs and for the implicit synchronization required
+ *                         on access to external resources.
+ * @event_code:            Event code for the job chain represented by the atom, both
+ *                         HW and low-level SW events are represented by event codes.
+ * @core_req:              bitmask of BASE_JD_REQ_* flags specifying either Hw or Sw
+ *                         requirements for the job chain represented by the atom.
+ * @ticks:                 Number of scheduling ticks for which atom has been running
+ *                         on the GPU.
+ * @sched_priority:        Priority of the atom for Job scheduling, as per the
+ *                         KBASE_JS_ATOM_SCHED_PRIO_*.
+ * @poking:                Indicates whether poking of MMU is ongoing for the atom,
+ *                         as a WA for the issue HW_ISSUE_8316.
+ * @completed:             Wait queue to wait upon for the completion of atom.
+ * @status:                Indicates at high level at what stage the atom is in,
+ *                         as per KBASE_JD_ATOM_STATE_*, that whether it is not in
+ *                         use or its queued in JD or given to JS or submitted to Hw
+ *                         or it completed the execution on Hw.
+ * @work_id:               used for GPU tracepoints, its a snapshot of the 'work_id'
+ *                         counter in kbase_jd_context which is incremented on
+ *                         every call to base_jd_submit.
+ * @slot_nr:               Job slot chosen for the atom.
+ * @atom_flags:            bitmask of KBASE_KATOM_FLAG* flags capturing the exact
+ *                         low level state of the atom.
+ * @gpu_rb_state:          bitmnask of KBASE_ATOM_GPU_RB_* flags, precisely tracking
+ *                         atom's state after it has entered Job scheduler on becoming
+ *                         runnable. Atom could be blocked due to cross slot dependency
+ *                         or waiting for the shader cores to become available or
+ *                         waiting for protected mode transitions to complete.
+ * @need_cache_flush_cores_retained: flag indicating that manual flush of GPU
+ *                         cache is needed for the atom and the shader cores used
+ *                         for atom have been kept on.
+ * @blocked:               flag indicating that atom's resubmission to GPU is
+ *                         blocked till the work item is scheduled to return the
+ *                         atom to JS.
+ * @pre_dep:               Pointer to atom that this atom has same-slot dependency on
+ * @post_dep:              Pointer to atom that has same-slot dependency on this atom
+ * @x_pre_dep:             Pointer to atom that this atom has cross-slot dependency on
+ * @x_post_dep:            Pointer to atom that has cross-slot dependency on this atom
+ * @flush_id:              The GPU's flush count recorded at the time of submission,
+ *                         used for the cache flush optimisation
+ * @fault_event:           Info for dumping the debug data on Job fault.
+ * @queue:                 List head used for 4 different purposes :
+ *                         Adds atom to the list of dma-buf fence waiting atoms.
+ *                         Adds atom to the list of atoms blocked due to cross
+ *                         slot dependency.
+ *                         Adds atom to the list of softjob atoms for which JIT
+ *                         allocation has been deferred
+ *                         Adds atom to the list of softjob atoms waiting for the
+ *                         signaling of fence.
+ * @jit_node:              Used to keep track of all JIT free/alloc jobs in submission order
+ * @jit_blocked:           Flag indicating that JIT allocation requested through
+ *                         softjob atom will be reattempted after the impending
+ *                         free of other active JIT allocations.
+ * @will_fail_event_code:  If non-zero, this indicates that the atom will fail
+ *                         with the set event_code when the atom is processed.
+ *                         Used for special handling of atoms, which have a data
+ *                         dependency on the failed atoms.
+ * @protected_state:       State of the atom, as per KBASE_ATOM_(ENTER|EXIT)_PROTECTED_*,
+ *                         when transitioning into or out of protected mode. Atom will
+ *                         be either entering or exiting the protected mode.
+ * @runnable_tree_node:    The node added to context's job slot specific rb tree
+ *                         when the atom becomes runnable.
+ * @age:                   Age of atom relative to other atoms in the context, is
+ *                         snapshot of the age_count counter in kbase context.
+ */
+struct kbase_jd_atom {
+	struct work_struct work;
+	ktime_t start_timestamp;
+
+	struct base_jd_udata udata;
+	struct kbase_context *kctx;
+
+	struct list_head dep_head[2];
+	struct list_head dep_item[2];
+	const struct kbase_jd_atom_dependency dep[2];
+	struct list_head jd_item;
+	bool in_jd_list;
+
+	u16 nr_extres;
+	struct kbase_ext_res *extres;
+
+	u32 device_nr;
+	u64 jc;
+	void *softjob_data;
+#if defined(CONFIG_SYNC)
+	struct sync_fence *fence;
+	struct sync_fence_waiter sync_waiter;
+#endif				/* CONFIG_SYNC */
+#if defined(CONFIG_MALI_DMA_FENCE) || defined(CONFIG_SYNC_FILE)
+	struct {
+		/* Use the functions/API defined in mali_kbase_fence.h to
+		 * when working with this sub struct */
+#if defined(CONFIG_SYNC_FILE)
+		/* Input fence */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+		struct fence *fence_in;
+#else
+		struct dma_fence *fence_in;
+#endif
+#endif
+		/* This points to the dma-buf output fence for this atom. If
+		 * this is NULL then there is no fence for this atom and the
+		 * following fields related to dma_fence may have invalid data.
+		 *
+		 * The context and seqno fields contain the details for this
+		 * fence.
+		 *
+		 * This fence is signaled when the katom is completed,
+		 * regardless of the event_code of the katom (signal also on
+		 * failure).
+		 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+		struct fence *fence;
+#else
+		struct dma_fence *fence;
+#endif
+		/* The dma-buf fence context number for this atom. A unique
+		 * context number is allocated to each katom in the context on
+		 * context creation.
+		 */
+		unsigned int context;
+		/* The dma-buf fence sequence number for this atom. This is
+		 * increased every time this katom uses dma-buf fence.
+		 */
+		atomic_t seqno;
+		/* This contains a list of all callbacks set up to wait on
+		 * other fences.  This atom must be held back from JS until all
+		 * these callbacks have been called and dep_count have reached
+		 * 0. The initial value of dep_count must be equal to the
+		 * number of callbacks on this list.
+		 *
+		 * This list is protected by jctx.lock. Callbacks are added to
+		 * this list when the atom is built and the wait are set up.
+		 * All the callbacks then stay on the list until all callbacks
+		 * have been called and the atom is queued, or cancelled, and
+		 * then all callbacks are taken off the list and freed.
+		 */
+		struct list_head callbacks;
+		/* Atomic counter of number of outstandind dma-buf fence
+		 * dependencies for this atom. When dep_count reaches 0 the
+		 * atom may be queued.
+		 *
+		 * The special value "-1" may only be set after the count
+		 * reaches 0, while holding jctx.lock. This indicates that the
+		 * atom has been handled, either queued in JS or cancelled.
+		 *
+		 * If anyone but the dma-fence worker sets this to -1 they must
+		 * ensure that any potentially queued worker must have
+		 * completed before allowing the atom to be marked as unused.
+		 * This can be done by flushing the fence work queue:
+		 * kctx->dma_fence.wq.
+		 */
+		atomic_t dep_count;
+	} dma_fence;
+#endif /* CONFIG_MALI_DMA_FENCE || CONFIG_SYNC_FILE*/
+
+	/* Note: refer to kbasep_js_atom_retained_state, which will take a copy of some of the following members */
+	enum base_jd_event_code event_code;
+	base_jd_core_req core_req;
+	u8 jobslot;
+
+	u32 ticks;
+	int sched_priority;
+
+	int poking;
+
+	wait_queue_head_t completed;
+	enum kbase_jd_atom_state status;
+#ifdef CONFIG_GPU_TRACEPOINTS
+	int work_id;
+#endif
+	int slot_nr;
+
+	u32 atom_flags;
+
+	int retry_count;
+
+	enum kbase_atom_gpu_rb_state gpu_rb_state;
+
+	bool need_cache_flush_cores_retained;
+
+	atomic_t blocked;
+
+	struct kbase_jd_atom *pre_dep;
+	struct kbase_jd_atom *post_dep;
+
+	struct kbase_jd_atom *x_pre_dep;
+	struct kbase_jd_atom *x_post_dep;
+
+	u32 flush_id;
+
+#ifdef CONFIG_DEBUG_FS
+	struct base_job_fault_event fault_event;
+#endif
+
+	struct list_head queue;
+
+	struct list_head jit_node;
+	bool jit_blocked;
+
+	enum base_jd_event_code will_fail_event_code;
+
+	union {
+		enum kbase_atom_enter_protected_state enter;
+		enum kbase_atom_exit_protected_state exit;
+	} protected_state;
+
+	struct rb_node runnable_tree_node;
+
+	u32 age;
+};
+
+/**
+ * struct kbase_debug_copy_buffer - information about the buffer to be copied.
+ *
+ * @size:	size of the buffer in bytes
+ * @pages:	pointer to an array of pointers to the pages which contain
+ *		the buffer
+ * @is_vmalloc: true if @pages was allocated with vzalloc. false if @pages was
+ *              allocated with kcalloc
+ * @nr_pages:	number of pages
+ * @offset:	offset into the pages
+ * @gpu_alloc:	pointer to physical memory allocated by the GPU
+ * @extres_pages: array of pointers to the pages containing external resources
+ *		for this buffer
+ * @nr_extres_pages: number of pages in @extres_pages
+ */
+struct kbase_debug_copy_buffer {
+	size_t size;
+	struct page **pages;
+	bool is_vmalloc;
+	int nr_pages;
+	size_t offset;
+	struct kbase_mem_phy_alloc *gpu_alloc;
+
+	struct page **extres_pages;
+	int nr_extres_pages;
+};
+
+static inline bool kbase_jd_katom_is_protected(const struct kbase_jd_atom *katom)
+{
+	return (bool)(katom->atom_flags & KBASE_KATOM_FLAG_PROTECTED);
+}
+
+/*
+ * Theory of operations:
+ *
+ * Atom objects are statically allocated within the context structure.
+ *
+ * Each atom is the head of two lists, one for the "left" set of dependencies, one for the "right" set.
+ */
+
+#define KBASE_JD_DEP_QUEUE_SIZE 256
+
+/**
+ * struct kbase_jd_context  - per context object encapsulating all the Job dispatcher
+ *                            related state.
+ * @lock:                     lock to serialize the updates made to the Job dispatcher
+ *                            state and kbase_jd_atom objects.
+ * @sched_info:               Structure encapsulating all the Job scheduling info.
+ * @atoms:                    Array of the objects representing atoms, containing
+ *                            the complete state and attributes of an atom.
+ * @job_nr:                   Tracks the number of atoms being processed by the
+ *                            kbase. This includes atoms that are not tracked by
+ *                            scheduler: 'not ready to run' & 'dependency-only' jobs.
+ * @zero_jobs_wait:           Waitq that reflects whether there are no jobs
+ *                            (including SW-only dependency jobs). This is set
+ *                            when no jobs are present on the ctx, and clear when
+ *                            there are jobs.
+ *                            This must be updated atomically with @job_nr.
+ *                            note: Job Dispatcher knows about more jobs than the
+ *                            Job Scheduler as it is unaware of jobs that are
+ *                            blocked on dependencies and SW-only dependency jobs.
+ *                            This waitq can be waited upon to find out when the
+ *                            context jobs are all done/cancelled (including those
+ *                            that might've been blocked on dependencies) - and so,
+ *                            whether it can be terminated. However, it should only
+ *                            be terminated once it is not present in the run-pool.
+ *                            Since the waitq is only set under @lock, the waiter
+ *                            should also briefly obtain and drop @lock to guarantee
+ *                            that the setter has completed its work on the kbase_context
+ * @job_done_wq:              Workqueue to which the per atom work item is queued
+ *                            for bottom half processing when the atom completes
+ *                            execution on GPU or the input fence get signaled.
+ * @tb_lock:                  Lock to serialize the write access made to @tb to
+ *                            to store the register access trace messages.
+ * @tb:                       Pointer to the Userspace accessible buffer storing
+ *                            the trace messages for register read/write accesses
+ *                            made by the Kbase. The buffer is filled in circular
+ *                            fashion.
+ * @tb_wrap_offset:           Offset to the end location in the trace buffer, the
+ *                            write pointer is moved to the beginning on reaching
+ *                            this offset.
+ * @work_id:                  atomic variable used for GPU tracepoints, incremented
+ *                            on every call to base_jd_submit.
+ */
+struct kbase_jd_context {
+	struct mutex lock;
+	struct kbasep_js_kctx_info sched_info;
+	struct kbase_jd_atom atoms[BASE_JD_ATOM_COUNT];
+
+	u32 job_nr;
+
+	wait_queue_head_t zero_jobs_wait;
+
+	struct workqueue_struct *job_done_wq;
+
+	spinlock_t tb_lock;
+	u32 *tb;
+	size_t tb_wrap_offset;
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+	atomic_t work_id;
+#endif
+};
+
+struct kbase_device_info {
+	u32 features;
+};
+
+/** Poking state for BASE_HW_ISSUE_8316  */
+enum {
+	KBASE_AS_POKE_STATE_IN_FLIGHT     = 1<<0,
+	KBASE_AS_POKE_STATE_KILLING_POKE  = 1<<1
+};
+
+/** Poking state for BASE_HW_ISSUE_8316  */
+typedef u32 kbase_as_poke_state;
+
+struct kbase_mmu_setup {
+	u64	transtab;
+	u64	memattr;
+	u64	transcfg;
+};
+
+/**
+ * struct kbase_fault - object containing data relating to a page or bus fault.
+ * @addr:           Records the faulting address.
+ * @extra_addr:     Records the secondary fault address.
+ * @status:         Records the fault status as reported by Hw.
+ * @protected_mode: Flag indicating whether the fault occurred in protected mode
+ *                  or not.
+ */
+struct kbase_fault {
+	u64 addr;
+	u64 extra_addr;
+	u32 status;
+	bool protected_mode;
+};
+
+/**
+ * struct kbase_as   - object representing an address space of GPU.
+ * @number:            Index at which this address space structure is present
+ *                     in an array of address space structures embedded inside the
+ *                     struct kbase_device.
+ * @pf_wq:             Workqueue for processing work items related to Bus fault
+ *                     and Page fault handling.
+ * @work_pagefault:    Work item for the Page fault handling.
+ * @work_busfault:     Work item for the Bus fault handling.
+ * @pf_data:           Data relating to page fault.
+ * @bf_data:           Data relating to bus fault.
+ * @current_setup:     Stores the MMU configuration for this address space.
+ * @poke_wq:           Workqueue to process the work items queue for poking the
+ *                     MMU as a WA for BASE_HW_ISSUE_8316.
+ * @poke_work:         Work item to do the poking of MMU for this address space.
+ * @poke_refcount:     Refcount for the need of poking MMU. While the refcount is
+ *                     non zero the poking of MMU will continue.
+ *                     Protected by hwaccess_lock.
+ * @poke_state:        State indicating whether poking is in progress or it has
+ *                     been stopped. Protected by hwaccess_lock.
+ * @poke_timer:        Timer used to schedule the poking at regular intervals.
+ */
+struct kbase_as {
+	int number;
+	struct workqueue_struct *pf_wq;
+	struct work_struct work_pagefault;
+	struct work_struct work_busfault;
+	struct kbase_fault pf_data;
+	struct kbase_fault bf_data;
+	struct kbase_mmu_setup current_setup;
+	struct workqueue_struct *poke_wq;
+	struct work_struct poke_work;
+	int poke_refcount;
+	kbase_as_poke_state poke_state;
+	struct hrtimer poke_timer;
+};
+
+/**
+ * struct kbase_mmu_table  - object representing a set of GPU page tables
+ * @mmu_teardown_pages:   Buffer of 4 Pages in size, used to cache the entries
+ *                        of top & intermediate level page tables to avoid
+ *                        repeated calls to kmap_atomic during the MMU teardown.
+ * @mmu_lock:             Lock to serialize the accesses made to multi level GPU
+ *                        page tables
+ * @pgd:                  Physical address of the page allocated for the top
+ *                        level page table of the context, this is used for
+ *                        MMU HW programming as the address translation will
+ *                        start from the top level page table.
+ * @group_id:             A memory group ID to be passed to a platform-specific
+ *                        memory group manager.
+ *                        Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
+ * @kctx:                 If this set of MMU tables belongs to a context then
+ *                        this is a back-reference to the context, otherwise
+ *                        it is NULL
+ */
+struct kbase_mmu_table {
+	u64 *mmu_teardown_pages;
+	struct mutex mmu_lock;
+	phys_addr_t pgd;
+	u8 group_id;
+	struct kbase_context *kctx;
+};
+
+static inline int kbase_as_has_bus_fault(struct kbase_as *as,
+	struct kbase_fault *fault)
+{
+	return (fault == &as->bf_data);
+}
+
+static inline int kbase_as_has_page_fault(struct kbase_as *as,
+	struct kbase_fault *fault)
+{
+	return (fault == &as->pf_data);
+}
+
+struct kbasep_mem_device {
+	atomic_t used_pages;   /* Tracks usage of OS shared memory. Updated
+				   when OS memory is allocated/freed. */
+
+};
+
+#define KBASE_TRACE_CODE(X) KBASE_TRACE_CODE_ ## X
+
+enum kbase_trace_code {
+	/* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
+	 * THIS MUST BE USED AT THE START OF THE ENUM */
+#define KBASE_TRACE_CODE_MAKE_CODE(X) KBASE_TRACE_CODE(X)
+#include "mali_kbase_trace_defs.h"
+#undef  KBASE_TRACE_CODE_MAKE_CODE
+	/* Comma on its own, to extend the list */
+	,
+	/* Must be the last in the enum */
+	KBASE_TRACE_CODE_COUNT
+};
+
+#define KBASE_TRACE_FLAG_REFCOUNT (((u8)1) << 0)
+#define KBASE_TRACE_FLAG_JOBSLOT  (((u8)1) << 1)
+
+/**
+ * struct kbase_trace - object representing a trace message added to trace buffer
+ *                      kbase_device::trace_rbuf
+ * @timestamp:          CPU timestamp at which the trace message was added.
+ * @thread_id:          id of the thread in the context of which trace message
+ *                      was added.
+ * @cpu:                indicates which CPU the @thread_id was scheduled on when
+ *                      the trace message was added.
+ * @ctx:                Pointer to the kbase context for which the trace message
+ *                      was added. Will be NULL for certain trace messages like
+ *                      for traces added corresponding to power management events.
+ *                      Will point to the appropriate context corresponding to
+ *                      job-slot & context's reference count related events.
+ * @katom:              indicates if the trace message has atom related info.
+ * @atom_number:        id of the atom for which trace message was added.
+ *                      Only valid if @katom is true.
+ * @atom_udata:         Copy of the user data sent for the atom in base_jd_submit.
+ *                      Only valid if @katom is true.
+ * @gpu_addr:           GPU address of the job-chain represented by atom. Could
+ *                      be valid even if @katom is false.
+ * @info_val:           value specific to the type of event being traced. For the
+ *                      case where @katom is true, will be set to atom's affinity,
+ *                      i.e. bitmask of shader cores chosen for atom's execution.
+ * @code:               Identifies the event, refer enum kbase_trace_code.
+ * @jobslot:            job-slot for which trace message was added, valid only for
+ *                      job-slot management events.
+ * @refcount:           reference count for the context, valid for certain events
+ *                      related to scheduler core and policy.
+ * @flags:              indicates if info related to @jobslot & @refcount is present
+ *                      in the trace message, used during dumping of the message.
+ */
+struct kbase_trace {
+	struct timespec timestamp;
+	u32 thread_id;
+	u32 cpu;
+	void *ctx;
+	bool katom;
+	int atom_number;
+	u64 atom_udata[2];
+	u64 gpu_addr;
+	unsigned long info_val;
+	u8 code;
+	u8 jobslot;
+	u8 refcount;
+	u8 flags;
+};
+
+/**
+ * Data stored per device for power management.
+ *
+ * This structure contains data for the power management framework. There is one
+ * instance of this structure per device in the system.
+ */
+struct kbase_pm_device_data {
+	/**
+	 * The lock protecting Power Management structures accessed outside of
+	 * IRQ.
+	 *
+	 * This lock must also be held whenever the GPU is being powered on or
+	 * off.
+	 */
+	struct mutex lock;
+
+	/**
+	 * The reference count of active contexts on this device. Note that
+	 * some code paths keep shaders/the tiler powered whilst this is 0. Use
+	 * kbase_pm_is_active() instead to check for such cases.
+	 */
+	int active_count;
+	/** Flag indicating suspending/suspended */
+	bool suspending;
+	/* Wait queue set when active_count == 0 */
+	wait_queue_head_t zero_active_count_wait;
+
+	/**
+	 * Bit masks identifying the available shader cores that are specified
+	 * via sysfs. One mask per job slot.
+	 */
+	u64 debug_core_mask[BASE_JM_MAX_NR_SLOTS];
+	u64 debug_core_mask_all;
+
+	/**
+	 * Callback for initializing the runtime power management.
+	 *
+	 * @param kbdev The kbase device
+	 *
+	 * @return 0 on success, else error code
+	 */
+	 int (*callback_power_runtime_init)(struct kbase_device *kbdev);
+
+	/**
+	 * Callback for terminating the runtime power management.
+	 *
+	 * @param kbdev The kbase device
+	 */
+	void (*callback_power_runtime_term)(struct kbase_device *kbdev);
+
+	/* Time in milliseconds between each dvfs sample */
+	u32 dvfs_period;
+
+	struct kbase_pm_backend_data backend;
+};
+
+/**
+ * struct kbase_mem_pool - Page based memory pool for kctx/kbdev
+ * @kbdev:        Kbase device where memory is used
+ * @cur_size:     Number of free pages currently in the pool (may exceed
+ *                @max_size in some corner cases)
+ * @max_size:     Maximum number of free pages in the pool
+ * @order:        order = 0 refers to a pool of 4 KB pages
+ *                order = 9 refers to a pool of 2 MB pages (2^9 * 4KB = 2 MB)
+ * @group_id:     A memory group ID to be passed to a platform-specific
+ *                memory group manager, if present. Immutable.
+ *                Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
+ * @pool_lock:    Lock protecting the pool - must be held when modifying
+ *                @cur_size and @page_list
+ * @page_list:    List of free pages in the pool
+ * @reclaim:      Shrinker for kernel reclaim of free pages
+ * @next_pool:    Pointer to next pool where pages can be allocated when this
+ *                pool is empty. Pages will spill over to the next pool when
+ *                this pool is full. Can be NULL if there is no next pool.
+ * @dying:        true if the pool is being terminated, and any ongoing
+ *                operations should be abandoned
+ * @dont_reclaim: true if the shrinker is forbidden from reclaiming memory from
+ *                this pool, eg during a grow operation
+ */
+struct kbase_mem_pool {
+	struct kbase_device *kbdev;
+	size_t              cur_size;
+	size_t              max_size;
+	u8                  order;
+	u8                  group_id;
+	spinlock_t          pool_lock;
+	struct list_head    page_list;
+	struct shrinker     reclaim;
+
+	struct kbase_mem_pool *next_pool;
+
+	bool dying;
+	bool dont_reclaim;
+};
+
+/**
+ * struct kbase_mem_pool_group - a complete set of physical memory pools.
+ *
+ * Memory pools are used to allow efficient reallocation of previously-freed
+ * physical pages. A pair of memory pools is initialized for each physical
+ * memory group: one for 4 KiB pages and one for 2 MiB pages. These arrays
+ * should be indexed by physical memory group ID, the meaning of which is
+ * defined by the systems integrator.
+ *
+ * @small: Array of objects containing the state for pools of 4 KiB size
+ *         physical pages.
+ * @large: Array of objects containing the state for pools of 2 MiB size
+ *         physical pages.
+ */
+struct kbase_mem_pool_group {
+	struct kbase_mem_pool small[MEMORY_GROUP_MANAGER_NR_GROUPS];
+	struct kbase_mem_pool large[MEMORY_GROUP_MANAGER_NR_GROUPS];
+};
+
+/**
+ * struct kbase_mem_pool_config - Initial configuration for a physical memory
+ *                                pool
+ *
+ * @max_size: Maximum number of free pages that the pool can hold.
+ */
+struct kbase_mem_pool_config {
+	size_t max_size;
+};
+
+/**
+ * struct kbase_mem_pool_group_config - Initial configuration for a complete
+ *                                      set of physical memory pools
+ *
+ * This array should be indexed by physical memory group ID, the meaning
+ * of which is defined by the systems integrator.
+ *
+ * @small: Array of initial configuration for pools of 4 KiB pages.
+ * @large: Array of initial configuration for pools of 2 MiB pages.
+ */
+struct kbase_mem_pool_group_config {
+	struct kbase_mem_pool_config small[MEMORY_GROUP_MANAGER_NR_GROUPS];
+	struct kbase_mem_pool_config large[MEMORY_GROUP_MANAGER_NR_GROUPS];
+};
+
+/**
+ * struct kbase_devfreq_opp - Lookup table for converting between nominal OPP
+ *                            frequency, real frequencies and core mask
+ * @real_freqs: Real GPU frequencies.
+ * @opp_volts: OPP voltages.
+ * @opp_freq:  Nominal OPP frequency
+ * @core_mask: Shader core mask
+ */
+struct kbase_devfreq_opp {
+	u64 opp_freq;
+	u64 core_mask;
+	u64 real_freqs[BASE_MAX_NR_CLOCKS_REGULATORS];
+	u32 opp_volts[BASE_MAX_NR_CLOCKS_REGULATORS];
+};
+
+/* MMU mode flags */
+#define KBASE_MMU_MODE_HAS_NON_CACHEABLE (1ul << 0) /* Has NON_CACHEABLE MEMATTR */
+
+/**
+ * struct kbase_mmu_mode - object containing pointer to methods invoked for
+ *                         programming the MMU, as per the MMU mode supported
+ *                         by Hw.
+ * @update:           enable & setup/configure one of the GPU address space.
+ * @get_as_setup:     retrieve the configuration of one of the GPU address space.
+ * @disable_as:       disable one of the GPU address space.
+ * @pte_to_phy_addr:  retrieve the physical address encoded in the page table entry.
+ * @ate_is_valid:     check if the pte is a valid address translation entry
+ *                    encoding the physical address of the actual mapped page.
+ * @pte_is_valid:     check if the pte is a valid entry encoding the physical
+ *                    address of the next lower level page table.
+ * @entry_set_ate:    program the pte to be a valid address translation entry to
+ *                    encode the physical address of the actual page being mapped.
+ * @entry_set_pte:    program the pte to be a valid entry to encode the physical
+ *                    address of the next lower level page table.
+ * @entry_invalidate: clear out or invalidate the pte.
+ * @flags:            bitmask of MMU mode flags. Refer to KBASE_MMU_MODE_ constants.
+ */
+struct kbase_mmu_mode {
+	void (*update)(struct kbase_device *kbdev,
+			struct kbase_mmu_table *mmut,
+			int as_nr);
+	void (*get_as_setup)(struct kbase_mmu_table *mmut,
+			struct kbase_mmu_setup * const setup);
+	void (*disable_as)(struct kbase_device *kbdev, int as_nr);
+	phys_addr_t (*pte_to_phy_addr)(u64 entry);
+	int (*ate_is_valid)(u64 ate, int level);
+	int (*pte_is_valid)(u64 pte, int level);
+	void (*entry_set_ate)(u64 *entry, struct tagged_addr phy,
+			unsigned long flags, int level);
+	void (*entry_set_pte)(u64 *entry, phys_addr_t phy);
+	void (*entry_invalidate)(u64 *entry);
+	unsigned long flags;
+};
+
+struct kbase_mmu_mode const *kbase_mmu_mode_get_lpae(void);
+struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void);
+
+
+
+#define DEVNAME_SIZE	16
+
+/**
+ * enum kbase_devfreq_work_type - The type of work to perform in the devfreq
+ *                                suspend/resume worker.
+ * @DEVFREQ_WORK_NONE:    Initilisation state.
+ * @DEVFREQ_WORK_SUSPEND: Call devfreq_suspend_device().
+ * @DEVFREQ_WORK_RESUME:  Call devfreq_resume_device().
+ */
+enum kbase_devfreq_work_type {
+	DEVFREQ_WORK_NONE,
+	DEVFREQ_WORK_SUSPEND,
+	DEVFREQ_WORK_RESUME
+};
+
+/**
+ * struct kbase_devfreq_queue_info - Object representing an instance for managing
+ *                                   the queued devfreq suspend/resume works.
+ * @workq:                 Workqueue for devfreq suspend/resume requests
+ * @work:                  Work item for devfreq suspend & resume
+ * @req_type:              Requested work type to be performed by the devfreq
+ *                         suspend/resume worker
+ * @acted_type:            Work type has been acted on by the worker, i.e. the
+ *                         internal recorded state of the suspend/resume
+ */
+struct kbase_devfreq_queue_info {
+	struct workqueue_struct *workq;
+	struct work_struct work;
+	enum kbase_devfreq_work_type req_type;
+	enum kbase_devfreq_work_type acted_type;
+};
+
+/**
+ * struct kbase_device   - Object representing an instance of GPU platform device,
+ *                         allocated from the probe method of mali driver.
+ * @hw_quirks_sc:          Configuration to be used for the shader cores as per
+ *                         the HW issues present in the GPU.
+ * @hw_quirks_tiler:       Configuration to be used for the Tiler as per the HW
+ *                         issues present in the GPU.
+ * @hw_quirks_mmu:         Configuration to be used for the MMU as per the HW
+ *                         issues present in the GPU.
+ * @hw_quirks_jm:          Configuration to be used for the Job Manager as per
+ *                         the HW issues present in the GPU.
+ * @entry:                 Links the device instance to the global list of GPU
+ *                         devices. The list would have as many entries as there
+ *                         are GPU device instances.
+ * @dev:                   Pointer to the kernel's generic/base representation
+ *                         of the GPU platform device.
+ * @mdev:                  Pointer to the miscellaneous device registered to
+ *                         provide Userspace access to kernel driver through the
+ *                         device file /dev/malixx.
+ * @reg_start:             Base address of the region in physical address space
+ *                         where GPU registers have been mapped.
+ * @reg_size:              Size of the region containing GPU registers
+ * @reg:                   Kernel virtual address of the region containing GPU
+ *                         registers, using which Driver will access the registers.
+ * @irqs:                  Array containing IRQ resource info for 3 types of
+ *                         interrupts : Job scheduling, MMU & GPU events (like
+ *                         power management, cache etc.)
+ * @clocks:                Pointer to the input clock resources referenced by
+ *                         the GPU device node.
+ * @nr_clocks:             Number of clocks set in the clocks array.
+ * @regulators:            Pointer to the structs corresponding to the
+ *                         regulators referenced by the GPU device node.
+ * @nr_regulators:         Number of regulators set in the regulators array.
+ * @opp_table:             Pointer to the device OPP structure maintaining the
+ *                         link to OPPs attached to a device. This is obtained
+ *                         after setting regulator names for the device.
+ * @devname:               string containing the name used for GPU device instance,
+ *                         miscellaneous device is registered using the same name.
+ * @id:                    Unique identifier for the device, indicates the number of
+ *                         devices which have been created so far.
+ * @model:                 Pointer, valid only when Driver is compiled to not access
+ *                         the real GPU Hw, to the dummy model which tries to mimic
+ *                         to some extent the state & behavior of GPU Hw in response
+ *                         to the register accesses made by the Driver.
+ * @irq_slab:              slab cache for allocating the work items queued when
+ *                         model mimics raising of IRQ to cause an interrupt on CPU.
+ * @irq_workq:             workqueue for processing the irq work items.
+ * @serving_job_irq:       function to execute work items queued when model mimics
+ *                         the raising of JS irq, mimics the interrupt handler
+ *                         processing JS interrupts.
+ * @serving_gpu_irq:       function to execute work items queued when model mimics
+ *                         the raising of GPU irq, mimics the interrupt handler
+ *                         processing GPU interrupts.
+ * @serving_mmu_irq:       function to execute work items queued when model mimics
+ *                         the raising of MMU irq, mimics the interrupt handler
+ *                         processing MMU interrupts.
+ * @reg_op_lock:           lock used by model to serialize the handling of register
+ *                         accesses made by the driver.
+ * @pm:                    Per device object for storing data for power management
+ *                         framework.
+ * @js_data:               Per device object encapsulating the current context of
+ *                         Job Scheduler, which is global to the device and is not
+ *                         tied to any particular struct kbase_context running on
+ *                         the device
+ * @mem_pools:             Global pools of free physical memory pages which can
+ *                         be used by all the contexts.
+ * @memdev:                keeps track of the in use physical pages allocated by
+ *                         the Driver.
+ * @mmu_mode:              Pointer to the object containing methods for programming
+ *                         the MMU, depending on the type of MMU supported by Hw.
+ * @mgm_dev:               Pointer to the memory group manager device attached
+ *                         to the GPU device. This points to an internal memory
+ *                         group manager if no platform-specific memory group
+ *                         manager was retrieved through device tree.
+ * @as:                    Array of objects representing address spaces of GPU.
+ * @as_free:               Bitpattern of free/available GPU address spaces.
+ * @as_to_kctx:            Array of pointers to struct kbase_context, having
+ *                         GPU adrress spaces assigned to them.
+ * @mmu_mask_change:       Lock to serialize the access to MMU interrupt mask
+ *                         register used in the handling of Bus & Page faults.
+ * @gpu_props:             Object containing complete information about the
+ *                         configuration/properties of GPU HW device in use.
+ * @hw_issues_mask:        List of SW workarounds for HW issues
+ * @hw_features_mask:      List of available HW features.
+ * @disjoint_event:        struct for keeping track of the disjoint information,
+ *                         that whether the GPU is in a disjoint state and the
+ *                         number of disjoint events that have occurred on GPU.
+ * @nr_hw_address_spaces:  Number of address spaces actually available in the
+ *                         GPU, remains constant after driver initialisation.
+ * @nr_user_address_spaces: Number of address spaces available to user contexts
+ * @hwcnt:                  Structure used for instrumentation and HW counters
+ *                         dumping
+ * @hwcnt_gpu_iface:       Backend interface for GPU hardware counter access.
+ * @hwcnt_gpu_ctx:         Context for GPU hardware counter access.
+ *                         @hwaccess_lock must be held when calling
+ *                         kbase_hwcnt_context_enable() with @hwcnt_gpu_ctx.
+ * @hwcnt_gpu_virt:        Virtualizer for GPU hardware counters.
+ * @vinstr_ctx:            vinstr context created per device.
+ * @timeline_is_enabled:   Non zero, if there is at least one timeline client,
+ *                         zero otherwise.
+ * @timeline:              Timeline context created per device.
+ * @trace_lock:            Lock to serialize the access to trace buffer.
+ * @trace_first_out:       Index/offset in the trace buffer at which the first
+ *                         unread message is present.
+ * @trace_next_in:         Index/offset in the trace buffer at which the new
+ *                         message will be written.
+ * @trace_rbuf:            Pointer to the buffer storing debug messages/prints
+ *                         tracing the various events in Driver.
+ *                         The buffer is filled in circular fashion.
+ * @reset_timeout_ms:      Number of milliseconds to wait for the soft stop to
+ *                         complete for the GPU jobs before proceeding with the
+ *                         GPU reset.
+ * @cache_clean_in_progress: Set when a cache clean has been started, and
+ *                         cleared when it has finished. This prevents multiple
+ *                         cache cleans being done simultaneously.
+ * @cache_clean_queued:    Set if a cache clean is invoked while another is in
+ *                         progress. If this happens, another cache clean needs
+ *                         to be triggered immediately after completion of the
+ *                         current one.
+ * @cache_clean_wait:      Signalled when a cache clean has finished.
+ * @platform_context:      Platform specific private data to be accessed by
+ *                         platform specific config files only.
+ * @kctx_list:             List of kbase_contexts created for the device,
+ *                         including any contexts that might be created for
+ *                         hardware counters.
+ * @kctx_list_lock:        Lock protecting concurrent accesses to @kctx_list.
+ * @devfreq_profile:       Describes devfreq profile for the Mali GPU device, passed
+ *                         to devfreq_add_device() to add devfreq feature to Mali
+ *                         GPU device.
+ * @devfreq:               Pointer to devfreq structure for Mali GPU device,
+ *                         returned on the call to devfreq_add_device().
+ * @current_freqs:         The real frequencies, corresponding to
+ *                         @current_nominal_freq, at which the Mali GPU device
+ *                         is currently operating, as retrieved from
+ *                         @devfreq_table in the target callback of
+ *                         @devfreq_profile.
+ * @current_nominal_freq:  The nominal frequency currently used for the Mali GPU
+ *                         device as retrieved through devfreq_recommended_opp()
+ *                         using the freq value passed as an argument to target
+ *                         callback of @devfreq_profile
+ * @current_voltages:      The voltages corresponding to @current_nominal_freq,
+ *                         as retrieved from @devfreq_table in the target
+ *                         callback of @devfreq_profile.
+ * @current_core_mask:     bitmask of shader cores that are currently desired &
+ *                         enabled, corresponding to @current_nominal_freq as
+ *                         retrieved from @devfreq_table in the target callback
+ *                         of @devfreq_profile.
+ * @devfreq_table:         Pointer to the lookup table for converting between
+ *                         nominal OPP (operating performance point) frequency,
+ *                         and real frequency and core mask. This table is
+ *                         constructed according to operating-points-v2-mali
+ *                         table in devicetree.
+ * @num_opps:              Number of operating performance points available for the Mali
+ *                         GPU device.
+ * @devfreq_queue:         Per device object for storing data that manages devfreq
+ *                         suspend & resume request queue and the related items.
+ * @devfreq_cooling:       Pointer returned on registering devfreq cooling device
+ *                         corresponding to @devfreq.
+ * @ipa_protection_mode_switched: is set to TRUE when GPU is put into protected
+ *                         mode. It is a sticky flag which is cleared by IPA
+ *                         once it has made use of information that GPU had
+ *                         previously entered protected mode.
+ * @ipa:                   Top level structure for IPA, containing pointers to both
+ *                         configured & fallback models.
+ * @previous_frequency:    Previous frequency of GPU clock used for
+ *                         BASE_HW_ISSUE_GPU2017_1336 workaround, This clock is
+ *                         restored when L2 is powered on.
+ * @job_fault_debug:       Flag to control the dumping of debug data for job faults,
+ *                         set when the 'job_fault' debugfs file is opened.
+ * @mali_debugfs_directory: Root directory for the debugfs files created by the driver
+ * @debugfs_ctx_directory: Directory inside the @mali_debugfs_directory containing
+ *                         a sub-directory for every context.
+ * @debugfs_as_read_bitmap: bitmap of address spaces for which the bus or page fault
+ *                         has occurred.
+ * @job_fault_wq:          Waitqueue to block the job fault dumping daemon till the
+ *                         occurrence of a job fault.
+ * @job_fault_resume_wq:   Waitqueue on which every context with a faulty job wait
+ *                         for the job fault dumping to complete before they can
+ *                         do bottom half of job done for the atoms which followed
+ *                         the faulty atom.
+ * @job_fault_resume_workq: workqueue to process the work items queued for the faulty
+ *                         atoms, whereby the work item function waits for the dumping
+ *                         to get completed.
+ * @job_fault_event_list:  List of atoms, each belonging to a different context, which
+ *                         generated a job fault.
+ * @job_fault_event_lock:  Lock to protect concurrent accesses to @job_fault_event_list
+ * @regs_dump_debugfs_data: Contains the offset of register to be read through debugfs
+ *                         file "read_register".
+ * @ctx_num:               Total number of contexts created for the device.
+ * @io_history:            Pointer to an object keeping a track of all recent
+ *                         register accesses. The history of register accesses
+ *                         can be read through "regs_history" debugfs file.
+ * @hwaccess:              Contains a pointer to active kbase context and GPU
+ *                         backend specific data for HW access layer.
+ * @faults_pending:        Count of page/bus faults waiting for bottom half processing
+ *                         via workqueues.
+ * @poweroff_pending:      Set when power off operation for GPU is started, reset when
+ *                         power on for GPU is started.
+ * @infinite_cache_active_default: Set to enable using infinite cache for all the
+ *                         allocations of a new context.
+ * @mem_pool_defaults:     Default configuration for the group of memory pools
+ *                         created for a new context.
+ * @current_gpu_coherency_mode: coherency mode in use, which can be different
+ *                         from @system_coherency, when using protected mode.
+ * @system_coherency:      coherency mode as retrieved from the device tree.
+ * @cci_snoop_enabled:     Flag to track when CCI snoops have been enabled.
+ * @snoop_enable_smc:      SMC function ID to call into Trusted firmware to
+ *                         enable cache snooping. Value of 0 indicates that it
+ *                         is not used.
+ * @snoop_disable_smc:     SMC function ID to call disable cache snooping.
+ * @protected_ops:         Pointer to the methods for switching in or out of the
+ *                         protected mode, as per the @protected_dev being used.
+ * @protected_dev:         Pointer to the protected mode switcher device attached
+ *                         to the GPU device retrieved through device tree if
+ *                         GPU do not support protected mode switching natively.
+ * @protected_mode:        set to TRUE when GPU is put into protected mode
+ * @protected_mode_transition: set to TRUE when GPU is transitioning into or
+ *                         out of protected mode.
+ * @protected_mode_hwcnt_desired: True if we want GPU hardware counters to be
+ *                         enabled. Counters must be disabled before transition
+ *                         into protected mode.
+ * @protected_mode_hwcnt_disabled: True if GPU hardware counters are not
+ *                         enabled.
+ * @protected_mode_hwcnt_disable_work: Work item to disable GPU hardware
+ *                         counters, used if atomic disable is not possible.
+ * @protected_mode_support: set to true if protected mode is supported.
+ * @buslogger:              Pointer to the structure required for interfacing
+ *                          with the bus logger module to set the size of buffer
+ *                          used by the module for capturing bus logs.
+ * @irq_reset_flush:        Flag to indicate that GPU reset is in-flight and flush of
+ *                          IRQ + bottom half is being done, to prevent the writes
+ *                          to MMU_IRQ_CLEAR & MMU_IRQ_MASK registers.
+ * @inited_subsys:          Bitmap of inited sub systems at the time of device probe.
+ *                          Used during device remove or for handling error in probe.
+ * @hwaccess_lock:          Lock, which can be taken from IRQ context, to serialize
+ *                          the updates made to Job dispatcher + scheduler states.
+ * @mmu_hw_mutex:           Protects access to MMU operations and address space
+ *                          related state.
+ * @serialize_jobs:         Currently used mode for serialization of jobs, both
+ *                          intra & inter slots serialization is supported.
+ * @backup_serialize_jobs:  Copy of the original value of @serialize_jobs taken
+ *                          when GWT is enabled. Used to restore the original value
+ *                          on disabling of GWT.
+ * @js_ctx_scheduling_mode: Context scheduling mode currently being used by
+ *                          Job Scheduler
+ * @l2_size_override:       Used to set L2 cache size via device tree blob
+ * @l2_hash_override:       Used to set L2 cache hash via device tree blob
+ * @policy_list:            A filtered list of policies available in the system.
+ * @policy_count:           Number of policies in the @policy_list.
+ */
+struct kbase_device {
+	u32 hw_quirks_sc;
+	u32 hw_quirks_tiler;
+	u32 hw_quirks_mmu;
+	u32 hw_quirks_jm;
+
+	struct list_head entry;
+	struct device *dev;
+	struct miscdevice mdev;
+	u64 reg_start;
+	size_t reg_size;
+	void __iomem *reg;
+
+	struct {
+		int irq;
+		int flags;
+	} irqs[3];
+
+	struct clk *clocks[BASE_MAX_NR_CLOCKS_REGULATORS];
+	unsigned int nr_clocks;
+#ifdef CONFIG_REGULATOR
+	struct regulator *regulators[BASE_MAX_NR_CLOCKS_REGULATORS];
+	unsigned int nr_regulators;
+#if (KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE)
+	struct opp_table *opp_table;
+#endif /* (KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE */
+#endif /* CONFIG_REGULATOR */
+	char devname[DEVNAME_SIZE];
+	u32  id;
+
+#ifdef CONFIG_MALI_NO_MALI
+	void *model;
+	struct kmem_cache *irq_slab;
+	struct workqueue_struct *irq_workq;
+	atomic_t serving_job_irq;
+	atomic_t serving_gpu_irq;
+	atomic_t serving_mmu_irq;
+	spinlock_t reg_op_lock;
+#endif	/* CONFIG_MALI_NO_MALI */
+
+	struct kbase_pm_device_data pm;
+	struct kbasep_js_device_data js_data;
+	struct kbase_mem_pool_group mem_pools;
+	struct kbasep_mem_device memdev;
+	struct kbase_mmu_mode const *mmu_mode;
+
+	struct memory_group_manager_device *mgm_dev;
+
+	struct kbase_as as[BASE_MAX_NR_AS];
+	u16 as_free; /* Bitpattern of free Address Spaces */
+	struct kbase_context *as_to_kctx[BASE_MAX_NR_AS];
+
+	spinlock_t mmu_mask_change;
+
+	struct kbase_gpu_props gpu_props;
+
+	unsigned long hw_issues_mask[(BASE_HW_ISSUE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
+	unsigned long hw_features_mask[(BASE_HW_FEATURE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
+
+	struct {
+		atomic_t count;
+		atomic_t state;
+	} disjoint_event;
+
+	s8 nr_hw_address_spaces;
+	s8 nr_user_address_spaces;
+
+	struct kbase_hwcnt {
+		/* The lock should be used when accessing any of the following members */
+		spinlock_t lock;
+
+		struct kbase_context *kctx;
+		u64 addr;
+		u64 addr_bytes;
+
+		struct kbase_instr_backend backend;
+	} hwcnt;
+
+	struct kbase_hwcnt_backend_interface hwcnt_gpu_iface;
+	struct kbase_hwcnt_context *hwcnt_gpu_ctx;
+	struct kbase_hwcnt_virtualizer *hwcnt_gpu_virt;
+	struct kbase_vinstr_context *vinstr_ctx;
+
+	atomic_t               timeline_is_enabled;
+	struct kbase_timeline *timeline;
+
+#if KBASE_TRACE_ENABLE
+	spinlock_t              trace_lock;
+	u16                     trace_first_out;
+	u16                     trace_next_in;
+	struct kbase_trace            *trace_rbuf;
+#endif
+
+	u32 reset_timeout_ms;
+
+	bool cache_clean_in_progress;
+	bool cache_clean_queued;
+	wait_queue_head_t cache_clean_wait;
+
+	void *platform_context;
+
+	struct list_head        kctx_list;
+	struct mutex            kctx_list_lock;
+
+#ifdef CONFIG_MALI_DEVFREQ
+	struct devfreq_dev_profile devfreq_profile;
+	struct devfreq *devfreq;
+	unsigned long current_freqs[BASE_MAX_NR_CLOCKS_REGULATORS];
+	unsigned long current_nominal_freq;
+	unsigned long current_voltages[BASE_MAX_NR_CLOCKS_REGULATORS];
+	u64 current_core_mask;
+	struct kbase_devfreq_opp *devfreq_table;
+	int num_opps;
+	struct kbasep_pm_metrics last_devfreq_metrics;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+	struct kbase_devfreq_queue_info devfreq_queue;
+#endif
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
+	struct devfreq_cooling_device *devfreq_cooling;
+#else
+	struct thermal_cooling_device *devfreq_cooling;
+#endif
+	bool ipa_protection_mode_switched;
+	struct {
+		/* Access to this struct must be with ipa.lock held */
+		struct mutex lock;
+		struct kbase_ipa_model *configured_model;
+		struct kbase_ipa_model *fallback_model;
+
+		/* Values of the PM utilization metrics from last time the
+		 * power model was invoked. The utilization is calculated as
+		 * the difference between last_metrics and the current values.
+		 */
+		struct kbasep_pm_metrics last_metrics;
+		/* Model data to pass to ipa_gpu_active/idle() */
+		struct kbase_ipa_model_vinstr_data *model_data;
+
+		/* true if use of fallback model has been forced by the User */
+		bool force_fallback_model;
+	} ipa;
+#endif /* CONFIG_DEVFREQ_THERMAL */
+#endif /* CONFIG_MALI_DEVFREQ */
+	unsigned long previous_frequency;
+
+	atomic_t job_fault_debug;
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *mali_debugfs_directory;
+	struct dentry *debugfs_ctx_directory;
+
+#ifdef CONFIG_MALI_DEBUG
+	u64 debugfs_as_read_bitmap;
+#endif /* CONFIG_MALI_DEBUG */
+
+	wait_queue_head_t job_fault_wq;
+	wait_queue_head_t job_fault_resume_wq;
+	struct workqueue_struct *job_fault_resume_workq;
+	struct list_head job_fault_event_list;
+	spinlock_t job_fault_event_lock;
+
+#if !MALI_CUSTOMER_RELEASE
+	struct {
+		u16 reg_offset;
+	} regs_dump_debugfs_data;
+#endif /* !MALI_CUSTOMER_RELEASE */
+#endif /* CONFIG_DEBUG_FS */
+
+	atomic_t ctx_num;
+
+#ifdef CONFIG_DEBUG_FS
+	struct kbase_io_history io_history;
+#endif /* CONFIG_DEBUG_FS */
+
+	struct kbase_hwaccess_data hwaccess;
+
+	atomic_t faults_pending;
+
+	bool poweroff_pending;
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+	bool infinite_cache_active_default;
+#else
+	u32 infinite_cache_active_default;
+#endif
+	struct kbase_mem_pool_group_config mem_pool_defaults;
+
+	u32 current_gpu_coherency_mode;
+	u32 system_coherency;
+
+	bool cci_snoop_enabled;
+
+	u32 snoop_enable_smc;
+	u32 snoop_disable_smc;
+
+	struct protected_mode_ops *protected_ops;
+
+	struct protected_mode_device *protected_dev;
+
+	bool protected_mode;
+
+	bool protected_mode_transition;
+
+	bool protected_mode_hwcnt_desired;
+
+	bool protected_mode_hwcnt_disabled;
+
+	struct work_struct protected_mode_hwcnt_disable_work;
+
+	bool protected_mode_support;
+
+#ifdef CONFIG_MALI_BUSLOG
+	struct bus_logger_client *buslogger;
+#endif
+
+	bool irq_reset_flush;
+
+	u32 inited_subsys;
+
+	spinlock_t hwaccess_lock;
+
+	struct mutex mmu_hw_mutex;
+
+	/* See KBASE_SERIALIZE_* for details */
+	u8 serialize_jobs;
+
+#ifdef CONFIG_MALI_CINSTR_GWT
+	u8 backup_serialize_jobs;
+#endif
+
+	u8 l2_size_override;
+	u8 l2_hash_override;
+
+	/* See KBASE_JS_*_PRIORITY_MODE for details. */
+	u32 js_ctx_scheduling_mode;
+
+
+	const struct kbase_pm_policy *policy_list[KBASE_PM_MAX_NUM_POLICIES];
+	int policy_count;
+};
+
+/**
+ * struct jsctx_queue - JS context atom queue
+ * @runnable_tree: Root of RB-tree containing currently runnable atoms on this
+ *                 job slot.
+ * @x_dep_head:    Head item of the linked list of atoms blocked on cross-slot
+ *                 dependencies. Atoms on this list will be moved to the
+ *                 runnable_tree when the blocking atom completes.
+ *
+ * hwaccess_lock must be held when accessing this structure.
+ */
+struct jsctx_queue {
+	struct rb_root runnable_tree;
+	struct list_head x_dep_head;
+};
+
+
+#define KBASE_API_VERSION(major, minor) ((((major) & 0xFFF) << 20)  | \
+					 (((minor) & 0xFFF) << 8) | \
+					 ((0 & 0xFF) << 0))
+
+/**
+ * enum kbase_file_state - Initialization state of a file opened by @kbase_open
+ *
+ * @KBASE_FILE_NEED_VSN:        Initial state, awaiting API version.
+ * @KBASE_FILE_VSN_IN_PROGRESS: Indicates if setting an API version is in
+ *                              progress and other setup calls shall be
+ *                              rejected.
+ * @KBASE_FILE_NEED_CTX:        Indicates if the API version handshake has
+ *                              completed, awaiting context creation flags.
+ * @KBASE_FILE_CTX_IN_PROGRESS: Indicates if the context's setup is in progress
+ *                              and other setup calls shall be rejected.
+ * @KBASE_FILE_COMPLETE:        Indicates if the setup for context has
+ *                              completed, i.e. flags have been set for the
+ *                              context.
+ *
+ * The driver allows only limited interaction with user-space until setup
+ * is complete.
+ */
+enum kbase_file_state {
+	KBASE_FILE_NEED_VSN,
+	KBASE_FILE_VSN_IN_PROGRESS,
+	KBASE_FILE_NEED_CTX,
+	KBASE_FILE_CTX_IN_PROGRESS,
+	KBASE_FILE_COMPLETE
+};
+
+/**
+ * struct kbase_file - Object representing a file opened by @kbase_open
+ *
+ * @kbdev:               Object representing an instance of GPU platform device,
+ *                       allocated from the probe method of the Mali driver.
+ * @filp:                Pointer to the struct file corresponding to device file
+ *                       /dev/malixx instance, passed to the file's open method.
+ * @kctx:                Object representing an entity, among which GPU is
+ *                       scheduled and which gets its own GPU address space.
+ *                       Invalid until @setup_state is KBASE_FILE_COMPLETE.
+ * @api_version:         Contains the version number for User/kernel interface,
+ *                       used for compatibility check. Invalid until
+ *                       @setup_state is KBASE_FILE_NEED_CTX.
+ * @setup_state:         Initialization state of the file. Values come from
+ *                       the kbase_file_state enumeration.
+ */
+struct kbase_file {
+	struct kbase_device  *kbdev;
+	struct file          *filp;
+	struct kbase_context *kctx;
+	unsigned long         api_version;
+	atomic_t              setup_state;
+};
+
+/**
+ * enum kbase_context_flags - Flags for kbase contexts
+ *
+ * @KCTX_COMPAT: Set when the context process is a compat process, 32-bit
+ * process on a 64-bit kernel.
+ *
+ * @KCTX_RUNNABLE_REF: Set when context is counted in
+ * kbdev->js_data.nr_contexts_runnable. Must hold queue_mutex when accessing.
+ *
+ * @KCTX_ACTIVE: Set when the context is active.
+ *
+ * @KCTX_PULLED: Set when last kick() caused atoms to be pulled from this
+ * context.
+ *
+ * @KCTX_MEM_PROFILE_INITIALIZED: Set when the context's memory profile has been
+ * initialized.
+ *
+ * @KCTX_INFINITE_CACHE: Set when infinite cache is to be enabled for new
+ * allocations. Existing allocations will not change.
+ *
+ * @KCTX_SUBMIT_DISABLED: Set to prevent context from submitting any jobs.
+ *
+ * @KCTX_PRIVILEGED:Set if the context uses an address space and should be kept
+ * scheduled in.
+ *
+ * @KCTX_SCHEDULED: Set when the context is scheduled on the Run Pool.
+ * This is only ever updated whilst the jsctx_mutex is held.
+ *
+ * @KCTX_DYING: Set when the context process is in the process of being evicted.
+ *
+ * @KCTX_NO_IMPLICIT_SYNC: Set when explicit Android fences are in use on this
+ * context, to disable use of implicit dma-buf fences. This is used to avoid
+ * potential synchronization deadlocks.
+ *
+ * @KCTX_FORCE_SAME_VA: Set when BASE_MEM_SAME_VA should be forced on memory
+ * allocations. For 64-bit clients it is enabled by default, and disabled by
+ * default on 32-bit clients. Being able to clear this flag is only used for
+ * testing purposes of the custom zone allocation on 64-bit user-space builds,
+ * where we also require more control than is available through e.g. the JIT
+ * allocation mechanism. However, the 64-bit user-space client must still
+ * reserve a JIT region using KBASE_IOCTL_MEM_JIT_INIT
+ *
+ * @KCTX_PULLED_SINCE_ACTIVE_JS0: Set when the context has had an atom pulled
+ * from it for job slot 0. This is reset when the context first goes active or
+ * is re-activated on that slot.
+ *
+ * @KCTX_PULLED_SINCE_ACTIVE_JS1: Set when the context has had an atom pulled
+ * from it for job slot 1. This is reset when the context first goes active or
+ * is re-activated on that slot.
+ *
+ * @KCTX_PULLED_SINCE_ACTIVE_JS2: Set when the context has had an atom pulled
+ * from it for job slot 2. This is reset when the context first goes active or
+ * is re-activated on that slot.
+ *
+ * All members need to be separate bits. This enum is intended for use in a
+ * bitmask where multiple values get OR-ed together.
+ */
+enum kbase_context_flags {
+	KCTX_COMPAT = 1U << 0,
+	KCTX_RUNNABLE_REF = 1U << 1,
+	KCTX_ACTIVE = 1U << 2,
+	KCTX_PULLED = 1U << 3,
+	KCTX_MEM_PROFILE_INITIALIZED = 1U << 4,
+	KCTX_INFINITE_CACHE = 1U << 5,
+	KCTX_SUBMIT_DISABLED = 1U << 6,
+	KCTX_PRIVILEGED = 1U << 7,
+	KCTX_SCHEDULED = 1U << 8,
+	KCTX_DYING = 1U << 9,
+	KCTX_NO_IMPLICIT_SYNC = 1U << 10,
+	KCTX_FORCE_SAME_VA = 1U << 11,
+	KCTX_PULLED_SINCE_ACTIVE_JS0 = 1U << 12,
+	KCTX_PULLED_SINCE_ACTIVE_JS1 = 1U << 13,
+	KCTX_PULLED_SINCE_ACTIVE_JS2 = 1U << 14,
+};
+
+struct kbase_sub_alloc {
+	struct list_head link;
+	struct page *page;
+	DECLARE_BITMAP(sub_pages, SZ_2M / SZ_4K);
+};
+
+/**
+ * struct kbase_context - Kernel base context
+ *
+ * @filp:                 Pointer to the struct file corresponding to device file
+ *                        /dev/malixx instance, passed to the file's open method.
+ * @kbdev:                Pointer to the Kbase device for which the context is created.
+ * @kctx_list_link:       Node into Kbase device list of contexts.
+ * @mmu:                  Structure holding details of the MMU tables for this
+ *                        context
+ * @id:                   Unique identifier for the context, indicates the number of
+ *                        contexts which have been created for the device so far.
+ * @api_version:          contains the version number for User/kernel interface,
+ *                        used for compatibility check.
+ * @event_list:           list of posted events about completed atoms, to be sent to
+ *                        event handling thread of Userpsace.
+ * @event_coalesce_list:  list containing events corresponding to successive atoms
+ *                        which have requested deferred delivery of the completion
+ *                        events to Userspace.
+ * @event_mutex:          Lock to protect the concurrent access to @event_list &
+ *                        @event_mutex.
+ * @event_closed:         Flag set through POST_TERM ioctl, indicates that Driver
+ *                        should stop posting events and also inform event handling
+ *                        thread that context termination is in progress.
+ * @event_workq:          Workqueue for processing work items corresponding to atoms
+ *                        that do not return an event to userspace.
+ * @event_count:          Count of the posted events to be consumed by Userspace.
+ * @event_coalesce_count: Count of the events present in @event_coalesce_list.
+ * @flags:                bitmap of enums from kbase_context_flags, indicating the
+ *                        state & attributes for the context.
+ * @aliasing_sink_page:   Special page used for KBASE_MEM_TYPE_ALIAS allocations,
+ *                        which can alias number of memory regions. The page is
+ *                        represent a region where it is mapped with a write-alloc
+ *                        cache setup, typically used when the write result of the
+ *                        GPU isn't needed, but the GPU must write anyway.
+ * @mem_partials_lock:    Lock for protecting the operations done on the elements
+ *                        added to @mem_partials list.
+ * @mem_partials:         List head for the list of large pages, 2MB in size, which
+ *                        which have been split into 4 KB pages and are used
+ *                        partially for the allocations >= 2 MB in size.
+ * @reg_lock:             Lock used for GPU virtual address space management operations,
+ *                        like adding/freeing a memory region in the address space.
+ *                        Can be converted to a rwlock ?.
+ * @reg_rbtree_same:      RB tree of the memory regions allocated from the SAME_VA
+ *                        zone of the GPU virtual address space. Used for allocations
+ *                        having the same value for GPU & CPU virtual address.
+ * @reg_rbtree_custom:    RB tree of the memory regions allocated from the CUSTOM_VA
+ *                        zone of the GPU virtual address space.
+ * @reg_rbtree_exec:      RB tree of the memory regions allocated from the EXEC_VA
+ *                        zone of the GPU virtual address space. Used for GPU-executable
+ *                        allocations which don't need the SAME_VA property.
+ * @cookies:              Bitmask containing of BITS_PER_LONG bits, used mainly for
+ *                        SAME_VA allocations to defer the reservation of memory region
+ *                        (from the GPU virtual address space) from base_mem_alloc
+ *                        ioctl to mmap system call. This helps returning unique
+ *                        handles, disguised as GPU VA, to Userspace from base_mem_alloc
+ *                        and later retrieving the pointer to memory region structure
+ *                        in the mmap handler.
+ * @pending_regions:      Array containing pointers to memory region structures,
+ *                        used in conjunction with @cookies bitmask mainly for
+ *                        providing a mechansim to have the same value for CPU &
+ *                        GPU virtual address.
+ * @event_queue:          Wait queue used for blocking the thread, which consumes
+ *                        the base_jd_event corresponding to an atom, when there
+ *                        are no more posted events.
+ * @tgid:                 Thread group ID of the process whose thread created
+ *                        the context (by calling KBASE_IOCTL_VERSION_CHECK or
+ *                        KBASE_IOCTL_SET_FLAGS, depending on the @api_version).
+ *                        This is usually, but not necessarily, the same as the
+ *                        process whose thread opened the device file
+ *                        /dev/malixx instance.
+ * @pid:                  ID of the thread, corresponding to process @tgid,
+ *                        which actually created the context. This is usually,
+ *                        but not necessarily, the same as the thread which
+ *                        opened the device file /dev/malixx instance.
+ * @jctx:                 object encapsulating all the Job dispatcher related state,
+ *                        including the array of atoms.
+ * @used_pages:           Keeps a track of the number of 4KB physical pages in use
+ *                        for the context.
+ * @nonmapped_pages:      Updated in the same way as @used_pages, except for the case
+ *                        when special tracking page is freed by userspace where it
+ *                        is reset to 0.
+ * @permanent_mapped_pages: Usage count of permanently mapped memory
+ * @mem_pools:            Context-specific pools of free physical memory pages.
+ * @reclaim:              Shrinker object registered with the kernel containing
+ *                        the pointer to callback function which is invoked under
+ *                        low memory conditions. In the callback function Driver
+ *                        frees up the memory for allocations marked as
+ *                        evictable/reclaimable.
+ * @evict_list:           List head for the list containing the allocations which
+ *                        can be evicted or freed up in the shrinker callback.
+ * @waiting_soft_jobs:    List head for the list containing softjob atoms, which
+ *                        are either waiting for the event set operation, or waiting
+ *                        for the signaling of input fence or waiting for the GPU
+ *                        device to powered on so as to dump the CPU/GPU timestamps.
+ * @waiting_soft_jobs_lock: Lock to protect @waiting_soft_jobs list from concurrent
+ *                        accesses.
+ * @dma_fence:            Object containing list head for the list of dma-buf fence
+ *                        waiting atoms and the waitqueue to process the work item
+ *                        queued for the atoms blocked on the signaling of dma-buf
+ *                        fences.
+ * @as_nr:                id of the address space being used for the scheduled in
+ *                        context. This is effectively part of the Run Pool, because
+ *                        it only has a valid setting (!=KBASEP_AS_NR_INVALID) whilst
+ *                        the context is scheduled in. The hwaccess_lock must be held
+ *                        whilst accessing this.
+ *                        If the context relating to this value of as_nr is required,
+ *                        then the context must be retained to ensure that it doesn't
+ *                        disappear whilst it is being used. Alternatively, hwaccess_lock
+ *                        can be held to ensure the context doesn't disappear (but this
+ *                        has restrictions on what other locks can be taken simutaneously).
+ * @refcount:             Keeps track of the number of users of this context. A user
+ *                        can be a job that is available for execution, instrumentation
+ *                        needing to 'pin' a context for counter collection, etc.
+ *                        If the refcount reaches 0 then this context is considered
+ *                        inactive and the previously programmed AS might be cleared
+ *                        at any point.
+ *                        Generally the reference count is incremented when the context
+ *                        is scheduled in and an atom is pulled from the context's per
+ *                        slot runnable tree.
+ * @mm_update_lock:       lock used for handling of special tracking page.
+ * @process_mm:           Pointer to the memory descriptor of the process which
+ *                        created the context. Used for accounting the physical
+ *                        pages used for GPU allocations, done for the context,
+ *                        to the memory consumed by the process.
+ * @same_va_end:          End address of the SAME_VA zone (in 4KB page units)
+ * @exec_va_start:        Start address of the EXEC_VA zone (in 4KB page units)
+ *                        or U64_MAX if the EXEC_VA zone is uninitialized.
+ * @gpu_va_end:           End address of the GPU va space (in 4KB page units)
+ * @jit_va:               Indicates if a JIT_VA zone has been created.
+ * @mem_profile_data:     Buffer containing the profiling information provided by
+ *                        Userspace, can be read through the mem_profile debugfs file.
+ * @mem_profile_size:     Size of the @mem_profile_data.
+ * @mem_profile_lock:     Lock to serialize the operations related to mem_profile
+ *                        debugfs file.
+ * @kctx_dentry:          Pointer to the debugfs directory created for every context,
+ *                        inside kbase_device::debugfs_ctx_directory, containing
+ *                        context specific files.
+ * @reg_dump:             Buffer containing a register offset & value pair, used
+ *                        for dumping job fault debug info.
+ * @job_fault_count:      Indicates that a job fault occurred for the context and
+ *                        dumping of its debug info is in progress.
+ * @job_fault_resume_event_list: List containing atoms completed after the faulty
+ *                        atom but before the debug data for faulty atom was dumped.
+ * @jsctx_queue:          Per slot & priority arrays of object containing the root
+ *                        of RB-tree holding currently runnable atoms on the job slot
+ *                        and the head item of the linked list of atoms blocked on
+ *                        cross-slot dependencies.
+ * @atoms_pulled:         Total number of atoms currently pulled from the context.
+ * @atoms_pulled_slot:    Per slot count of the number of atoms currently pulled
+ *                        from the context.
+ * @atoms_pulled_slot_pri: Per slot & priority count of the number of atoms currently
+ *                        pulled from the context. hwaccess_lock shall be held when
+ *                        accessing it.
+ * @blocked_js:           Indicates if the context is blocked from submitting atoms
+ *                        on a slot at a given priority. This is set to true, when
+ *                        the atom corresponding to context is soft/hard stopped or
+ *                        removed from the HEAD_NEXT register in response to
+ *                        soft/hard stop.
+ * @slots_pullable:       Bitmask of slots, indicating the slots for which the
+ *                        context has pullable atoms in the runnable tree.
+ * @work:                 Work structure used for deferred ASID assignment.
+ * @legacy_hwcnt_cli:     Pointer to the legacy userspace hardware counters
+ *                        client, there can be only such client per kbase
+ *                        context.
+ * @legacy_hwcnt_lock:    Lock used to prevent concurrent access to
+ *                        @legacy_hwcnt_cli.
+ * @completed_jobs:       List containing completed atoms for which base_jd_event is
+ *                        to be posted.
+ * @work_count:           Number of work items, corresponding to atoms, currently
+ *                        pending on job_done workqueue of @jctx.
+ * @soft_job_timeout:     Timer object used for failing/cancelling the waiting
+ *                        soft-jobs which have been blocked for more than the
+ *                        timeout value used for the soft-jobs
+ * @jit_alloc:            Array of 256 pointers to GPU memory regions, used for
+ *                        for JIT allocations.
+ * @jit_max_allocations:  Maximum number of JIT allocations allowed at once.
+ * @jit_current_allocations: Current number of in-flight JIT allocations.
+ * @jit_current_allocations_per_bin: Current number of in-flight JIT allocations per bin
+ * @jit_version:          version number indicating whether userspace is using
+ *                        old or new version of interface for JIT allocations
+ *	                  1 -> client used KBASE_IOCTL_MEM_JIT_INIT_OLD
+ *	                  2 -> client used KBASE_IOCTL_MEM_JIT_INIT
+ * @jit_group_id:         A memory group ID to be passed to a platform-specific
+ *                        memory group manager.
+ *                        Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
+ * @jit_active_head:      List containing the JIT allocations which are in use.
+ * @jit_pool_head:        List containing the JIT allocations which have been
+ *                        freed up by userpsace and so not being used by them.
+ *                        Driver caches them to quickly fulfill requests for new
+ *                        JIT allocations. They are released in case of memory
+ *                        pressure as they are put on the @evict_list when they
+ *                        are freed up by userspace.
+ * @jit_destroy_head:     List containing the JIT allocations which were moved to it
+ *                        from @jit_pool_head, in the shrinker callback, after freeing
+ *                        their backing physical pages.
+ * @jit_evict_lock:       Lock used for operations done on JIT allocations and also
+ *                        for accessing @evict_list.
+ * @jit_work:             Work item queued to defer the freeing of memory region when
+ *                        JIT allocation is moved to @jit_destroy_head.
+ * @jit_atoms_head:       A list of the JIT soft-jobs, both alloc & free, in submission
+ *                        order, protected by kbase_jd_context.lock.
+ * @jit_pending_alloc:    A list of JIT alloc soft-jobs for which allocation will be
+ *                        reattempted after the impending free of other active JIT
+ *                        allocations.
+ * @ext_res_meta_head:    A list of sticky external resources which were requested to
+ *                        be mapped on GPU side, through a softjob atom of type
+ *                        EXT_RES_MAP or STICKY_RESOURCE_MAP ioctl.
+ * @drain_pending:        Used to record that a flush/invalidate of the GPU caches was
+ *                        requested from atomic context, so that the next flush request
+ *                        can wait for the flush of GPU writes.
+ * @age_count:            Counter incremented on every call to jd_submit_atom,
+ *                        atom is assigned the snapshot of this counter, which
+ *                        is used to determine the atom's age when it is added to
+ *                        the runnable RB-tree.
+ * @trim_level:           Level of JIT allocation trimming to perform on free (0-100%)
+ * @gwt_enabled:          Indicates if tracking of GPU writes is enabled, protected by
+ *                        kbase_context.reg_lock.
+ * @gwt_was_enabled:      Simple sticky bit flag to know if GWT was ever enabled.
+ * @gwt_current_list:     A list of addresses for which GPU has generated write faults,
+ *                        after the last snapshot of it was sent to userspace.
+ * @gwt_snapshot_list:    Snapshot of the @gwt_current_list for sending to user space.
+ * @priority:             Indicates the context priority. Used along with @atoms_count
+ *                        for context scheduling, protected by hwaccess_lock.
+ * @atoms_count:          Number of gpu atoms currently in use, per priority
+ *
+ * A kernel base context is an entity among which the GPU is scheduled.
+ * Each context has its own GPU address space.
+ * Up to one context can be created for each client that opens the device file
+ * /dev/malixx. Context creation is deferred until a special ioctl() system call
+ * is made on the device file.
+ */
+struct kbase_context {
+	struct file *filp;
+	struct kbase_device *kbdev;
+	struct list_head kctx_list_link;
+	struct kbase_mmu_table mmu;
+
+	u32 id;
+	unsigned long api_version;
+	struct list_head event_list;
+	struct list_head event_coalesce_list;
+	struct mutex event_mutex;
+	atomic_t event_closed;
+	struct workqueue_struct *event_workq;
+	atomic_t event_count;
+	int event_coalesce_count;
+
+	atomic_t flags;
+
+	struct tagged_addr aliasing_sink_page;
+
+	spinlock_t              mem_partials_lock;
+	struct list_head        mem_partials;
+
+	struct mutex            reg_lock;
+	struct rb_root reg_rbtree_same;
+	struct rb_root reg_rbtree_custom;
+	struct rb_root reg_rbtree_exec;
+
+
+	unsigned long    cookies;
+	struct kbase_va_region *pending_regions[BITS_PER_LONG];
+
+	wait_queue_head_t event_queue;
+	pid_t tgid;
+	pid_t pid;
+
+	struct kbase_jd_context jctx;
+	atomic_t used_pages;
+	atomic_t         nonmapped_pages;
+	atomic_t permanent_mapped_pages;
+
+	struct kbase_mem_pool_group mem_pools;
+
+	struct shrinker         reclaim;
+	struct list_head        evict_list;
+
+	struct list_head waiting_soft_jobs;
+	spinlock_t waiting_soft_jobs_lock;
+#ifdef CONFIG_MALI_DMA_FENCE
+	struct {
+		struct list_head waiting_resource;
+		struct workqueue_struct *wq;
+	} dma_fence;
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+	int as_nr;
+
+	atomic_t refcount;
+
+	spinlock_t         mm_update_lock;
+	struct mm_struct __rcu *process_mm;
+	u64 same_va_end;
+	u64 exec_va_start;
+	u64 gpu_va_end;
+	bool jit_va;
+
+#ifdef CONFIG_DEBUG_FS
+	char *mem_profile_data;
+	size_t mem_profile_size;
+	struct mutex mem_profile_lock;
+	struct dentry *kctx_dentry;
+
+	unsigned int *reg_dump;
+	atomic_t job_fault_count;
+	struct list_head job_fault_resume_event_list;
+
+#endif /* CONFIG_DEBUG_FS */
+
+	struct jsctx_queue jsctx_queue
+		[KBASE_JS_ATOM_SCHED_PRIO_COUNT][BASE_JM_MAX_NR_SLOTS];
+
+	atomic_t atoms_pulled;
+	atomic_t atoms_pulled_slot[BASE_JM_MAX_NR_SLOTS];
+	int atoms_pulled_slot_pri[BASE_JM_MAX_NR_SLOTS][
+			KBASE_JS_ATOM_SCHED_PRIO_COUNT];
+
+	bool blocked_js[BASE_JM_MAX_NR_SLOTS][KBASE_JS_ATOM_SCHED_PRIO_COUNT];
+
+	u32 slots_pullable;
+
+	struct work_struct work;
+
+	struct kbase_hwcnt_legacy_client *legacy_hwcnt_cli;
+	struct mutex legacy_hwcnt_lock;
+
+	struct list_head completed_jobs;
+	atomic_t work_count;
+
+	struct timer_list soft_job_timeout;
+
+	struct kbase_va_region *jit_alloc[256];
+	u8 jit_max_allocations;
+	u8 jit_current_allocations;
+	u8 jit_current_allocations_per_bin[256];
+	u8 jit_version;
+	u8 jit_group_id;
+	struct list_head jit_active_head;
+	struct list_head jit_pool_head;
+	struct list_head jit_destroy_head;
+	struct mutex jit_evict_lock;
+	struct work_struct jit_work;
+
+	struct list_head jit_atoms_head;
+	struct list_head jit_pending_alloc;
+
+	struct list_head ext_res_meta_head;
+
+	atomic_t drain_pending;
+
+	u32 age_count;
+
+	u8 trim_level;
+
+#ifdef CONFIG_MALI_CINSTR_GWT
+	bool gwt_enabled;
+
+	bool gwt_was_enabled;
+
+	struct list_head gwt_current_list;
+
+	struct list_head gwt_snapshot_list;
+#endif
+
+	int priority;
+	s16 atoms_count[KBASE_JS_ATOM_SCHED_PRIO_COUNT];
+};
+
+#ifdef CONFIG_MALI_CINSTR_GWT
+/**
+ * struct kbasep_gwt_list_element - Structure used to collect GPU
+ *                                  write faults.
+ * @link:                           List head for adding write faults.
+ * @region:                         Details of the region where we have the
+ *                                  faulting page address.
+ * @page_addr:                      Page address where GPU write fault occurred.
+ * @num_pages:                      The number of pages modified.
+ *
+ * Using this structure all GPU write faults are stored in a list.
+ */
+struct kbasep_gwt_list_element {
+	struct list_head link;
+	struct kbase_va_region *region;
+	u64 page_addr;
+	u64 num_pages;
+};
+
+#endif
+
+/**
+ * struct kbase_ctx_ext_res_meta - Structure which binds an external resource
+ *                                 to a @kbase_context.
+ * @ext_res_node:                  List head for adding the metadata to a
+ *                                 @kbase_context.
+ * @alloc:                         The physical memory allocation structure
+ *                                 which is mapped.
+ * @gpu_addr:                      The GPU virtual address the resource is
+ *                                 mapped to.
+ *
+ * External resources can be mapped into multiple contexts as well as the same
+ * context multiple times.
+ * As kbase_va_region itself isn't refcounted we can't attach our extra
+ * information to it as it could be removed under our feet leaving external
+ * resources pinned.
+ * This metadata structure binds a single external resource to a single
+ * context, ensuring that per context mapping is tracked separately so it can
+ * be overridden when needed and abuses by the application (freeing the resource
+ * multiple times) don't effect the refcount of the physical allocation.
+ */
+struct kbase_ctx_ext_res_meta {
+	struct list_head ext_res_node;
+	struct kbase_mem_phy_alloc *alloc;
+	u64 gpu_addr;
+};
+
+enum kbase_reg_access_type {
+	REG_READ,
+	REG_WRITE
+};
+
+enum kbase_share_attr_bits {
+	/* (1ULL << 8) bit is reserved */
+	SHARE_BOTH_BITS = (2ULL << 8),	/* inner and outer shareable coherency */
+	SHARE_INNER_BITS = (3ULL << 8)	/* inner shareable coherency */
+};
+
+/**
+ * kbase_device_is_cpu_coherent - Returns if the device is CPU coherent.
+ * @kbdev: kbase device
+ *
+ * Return: true if the device access are coherent, false if not.
+ */
+static inline bool kbase_device_is_cpu_coherent(struct kbase_device *kbdev)
+{
+	if ((kbdev->system_coherency == COHERENCY_ACE_LITE) ||
+			(kbdev->system_coherency == COHERENCY_ACE))
+		return true;
+
+	return false;
+}
+
+/* Conversion helpers for setting up high resolution timers */
+#define HR_TIMER_DELAY_MSEC(x) (ns_to_ktime(((u64)(x))*1000000U))
+#define HR_TIMER_DELAY_NSEC(x) (ns_to_ktime(x))
+
+/* Maximum number of loops polling the GPU for a cache flush before we assume it must have completed */
+#define KBASE_CLEAN_CACHE_MAX_LOOPS     100000
+/* Maximum number of loops polling the GPU for an AS command to complete before we assume the GPU has hung */
+#define KBASE_AS_INACTIVE_MAX_LOOPS     100000000
+
+/* JobDescriptorHeader - taken from the architecture specifications, the layout
+ * is currently identical for all GPU archs. */
+struct job_descriptor_header {
+	u32 exception_status;
+	u32 first_incomplete_task;
+	u64 fault_pointer;
+	u8 job_descriptor_size : 1;
+	u8 job_type : 7;
+	u8 job_barrier : 1;
+	u8 _reserved_01 : 1;
+	u8 _reserved_1 : 1;
+	u8 _reserved_02 : 1;
+	u8 _reserved_03 : 1;
+	u8 _reserved_2 : 1;
+	u8 _reserved_04 : 1;
+	u8 _reserved_05 : 1;
+	u16 job_index;
+	u16 job_dependency_index_1;
+	u16 job_dependency_index_2;
+	union {
+		u64 _64;
+		u32 _32;
+	} next_job;
+};
+
+#endif				/* _KBASE_DEFS_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_device.c b/drivers/gpu/arm/midgard/mali_kbase_device.c
new file mode 100644
index 0000000..a265082
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_device.c
@@ -0,0 +1,537 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Base kernel device APIs
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/seq_file.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_hwaccess_instr.h>
+#include <mali_kbase_hw.h>
+#include <mali_kbase_config_defaults.h>
+
+/* NOTE: Magic - 0x45435254 (TRCE in ASCII).
+ * Supports tracing feature provided in the base module.
+ * Please keep it in sync with the value of base module.
+ */
+#define TRACE_BUFFER_HEADER_SPECIAL 0x45435254
+
+#if KBASE_TRACE_ENABLE
+static const char *kbasep_trace_code_string[] = {
+	/* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
+	 * THIS MUST BE USED AT THE START OF THE ARRAY */
+#define KBASE_TRACE_CODE_MAKE_CODE(X) # X
+#include "mali_kbase_trace_defs.h"
+#undef  KBASE_TRACE_CODE_MAKE_CODE
+};
+#endif
+
+#define DEBUG_MESSAGE_SIZE 256
+
+static int kbasep_trace_init(struct kbase_device *kbdev);
+static void kbasep_trace_term(struct kbase_device *kbdev);
+static void kbasep_trace_hook_wrapper(void *param);
+
+struct kbase_device *kbase_device_alloc(void)
+{
+	return kzalloc(sizeof(struct kbase_device), GFP_KERNEL);
+}
+
+static int kbase_device_as_init(struct kbase_device *kbdev, int i)
+{
+	kbdev->as[i].number = i;
+	kbdev->as[i].bf_data.addr = 0ULL;
+	kbdev->as[i].pf_data.addr = 0ULL;
+
+	kbdev->as[i].pf_wq = alloc_workqueue("mali_mmu%d", 0, 1, i);
+	if (!kbdev->as[i].pf_wq)
+		return -EINVAL;
+
+	INIT_WORK(&kbdev->as[i].work_pagefault, page_fault_worker);
+	INIT_WORK(&kbdev->as[i].work_busfault, bus_fault_worker);
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
+		struct hrtimer *poke_timer = &kbdev->as[i].poke_timer;
+		struct work_struct *poke_work = &kbdev->as[i].poke_work;
+
+		kbdev->as[i].poke_wq =
+			alloc_workqueue("mali_mmu%d_poker", 0, 1, i);
+		if (!kbdev->as[i].poke_wq) {
+			destroy_workqueue(kbdev->as[i].pf_wq);
+			return -EINVAL;
+		}
+		INIT_WORK(poke_work, kbasep_as_do_poke);
+
+		hrtimer_init(poke_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+		poke_timer->function = kbasep_as_poke_timer_callback;
+
+		kbdev->as[i].poke_refcount = 0;
+		kbdev->as[i].poke_state = 0u;
+	}
+
+	return 0;
+}
+
+static void kbase_device_as_term(struct kbase_device *kbdev, int i)
+{
+	destroy_workqueue(kbdev->as[i].pf_wq);
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
+		destroy_workqueue(kbdev->as[i].poke_wq);
+}
+
+static int kbase_device_all_as_init(struct kbase_device *kbdev)
+{
+	int i, err;
+
+	for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+		err = kbase_device_as_init(kbdev, i);
+		if (err)
+			goto free_workqs;
+	}
+
+	return 0;
+
+free_workqs:
+	for (; i > 0; i--)
+		kbase_device_as_term(kbdev, i);
+
+	return err;
+}
+
+static void kbase_device_all_as_term(struct kbase_device *kbdev)
+{
+	int i;
+
+	for (i = 0; i < kbdev->nr_hw_address_spaces; i++)
+		kbase_device_as_term(kbdev, i);
+}
+
+int kbase_device_init(struct kbase_device * const kbdev)
+{
+	int err;
+#ifdef CONFIG_ARM64
+	struct device_node *np = NULL;
+#endif /* CONFIG_ARM64 */
+
+	spin_lock_init(&kbdev->mmu_mask_change);
+	mutex_init(&kbdev->mmu_hw_mutex);
+#ifdef CONFIG_ARM64
+	kbdev->cci_snoop_enabled = false;
+	np = kbdev->dev->of_node;
+	if (np != NULL) {
+		if (of_property_read_u32(np, "snoop_enable_smc",
+					&kbdev->snoop_enable_smc))
+			kbdev->snoop_enable_smc = 0;
+		if (of_property_read_u32(np, "snoop_disable_smc",
+					&kbdev->snoop_disable_smc))
+			kbdev->snoop_disable_smc = 0;
+		/* Either both or none of the calls should be provided. */
+		if (!((kbdev->snoop_disable_smc == 0
+			&& kbdev->snoop_enable_smc == 0)
+			|| (kbdev->snoop_disable_smc != 0
+			&& kbdev->snoop_enable_smc != 0))) {
+			WARN_ON(1);
+			err = -EINVAL;
+			goto fail;
+		}
+	}
+#endif /* CONFIG_ARM64 */
+	/* Get the list of workarounds for issues on the current HW
+	 * (identified by the GPU_ID register)
+	 */
+	err = kbase_hw_set_issues_mask(kbdev);
+	if (err)
+		goto fail;
+
+	/* Set the list of features available on the current HW
+	 * (identified by the GPU_ID register)
+	 */
+	kbase_hw_set_features_mask(kbdev);
+
+	kbase_gpuprops_set_features(kbdev);
+
+	/* On Linux 4.0+, dma coherency is determined from device tree */
+#if defined(CONFIG_ARM64) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
+	set_dma_ops(kbdev->dev, &noncoherent_swiotlb_dma_ops);
+#endif
+
+	/* Workaround a pre-3.13 Linux issue, where dma_mask is NULL when our
+	 * device structure was created by device-tree
+	 */
+	if (!kbdev->dev->dma_mask)
+		kbdev->dev->dma_mask = &kbdev->dev->coherent_dma_mask;
+
+	err = dma_set_mask(kbdev->dev,
+			DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
+	if (err)
+		goto dma_set_mask_failed;
+
+	err = dma_set_coherent_mask(kbdev->dev,
+			DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
+	if (err)
+		goto dma_set_mask_failed;
+
+	kbdev->nr_hw_address_spaces = kbdev->gpu_props.num_address_spaces;
+
+	err = kbase_device_all_as_init(kbdev);
+	if (err)
+		goto as_init_failed;
+
+	spin_lock_init(&kbdev->hwcnt.lock);
+
+	err = kbasep_trace_init(kbdev);
+	if (err)
+		goto term_as;
+
+	init_waitqueue_head(&kbdev->cache_clean_wait);
+
+	kbase_debug_assert_register_hook(&kbasep_trace_hook_wrapper, kbdev);
+
+	atomic_set(&kbdev->ctx_num, 0);
+
+	err = kbase_instr_backend_init(kbdev);
+	if (err)
+		goto term_trace;
+
+	kbdev->pm.dvfs_period = DEFAULT_PM_DVFS_PERIOD;
+
+	kbdev->reset_timeout_ms = DEFAULT_RESET_TIMEOUT_MS;
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+		kbdev->mmu_mode = kbase_mmu_mode_get_aarch64();
+	else
+		kbdev->mmu_mode = kbase_mmu_mode_get_lpae();
+
+	mutex_init(&kbdev->kctx_list_lock);
+	INIT_LIST_HEAD(&kbdev->kctx_list);
+
+	return 0;
+term_trace:
+	kbasep_trace_term(kbdev);
+term_as:
+	kbase_device_all_as_term(kbdev);
+as_init_failed:
+dma_set_mask_failed:
+fail:
+	return err;
+}
+
+void kbase_device_term(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	WARN_ON(!list_empty(&kbdev->kctx_list));
+
+#if KBASE_TRACE_ENABLE
+	kbase_debug_assert_register_hook(NULL, NULL);
+#endif
+
+	kbase_instr_backend_term(kbdev);
+
+	kbasep_trace_term(kbdev);
+
+	kbase_device_all_as_term(kbdev);
+}
+
+void kbase_device_free(struct kbase_device *kbdev)
+{
+	kfree(kbdev);
+}
+
+/*
+ * Device trace functions
+ */
+#if KBASE_TRACE_ENABLE
+
+static int kbasep_trace_init(struct kbase_device *kbdev)
+{
+	struct kbase_trace *rbuf;
+
+	rbuf = kmalloc_array(KBASE_TRACE_SIZE, sizeof(*rbuf), GFP_KERNEL);
+
+	if (!rbuf)
+		return -EINVAL;
+
+	kbdev->trace_rbuf = rbuf;
+	spin_lock_init(&kbdev->trace_lock);
+	return 0;
+}
+
+static void kbasep_trace_term(struct kbase_device *kbdev)
+{
+	kfree(kbdev->trace_rbuf);
+}
+
+static void kbasep_trace_format_msg(struct kbase_trace *trace_msg, char *buffer, int len)
+{
+	s32 written = 0;
+
+	/* Initial part of message */
+	written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d.%.6d,%d,%d,%s,%p,", (int)trace_msg->timestamp.tv_sec, (int)(trace_msg->timestamp.tv_nsec / 1000), trace_msg->thread_id, trace_msg->cpu, kbasep_trace_code_string[trace_msg->code], trace_msg->ctx), 0);
+
+	if (trace_msg->katom)
+		written += MAX(snprintf(buffer + written, MAX(len - written, 0), "atom %d (ud: 0x%llx 0x%llx)", trace_msg->atom_number, trace_msg->atom_udata[0], trace_msg->atom_udata[1]), 0);
+
+	written += MAX(snprintf(buffer + written, MAX(len - written, 0), ",%.8llx,", trace_msg->gpu_addr), 0);
+
+	/* NOTE: Could add function callbacks to handle different message types */
+	/* Jobslot present */
+	if (trace_msg->flags & KBASE_TRACE_FLAG_JOBSLOT)
+		written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->jobslot), 0);
+
+	written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
+
+	/* Refcount present */
+	if (trace_msg->flags & KBASE_TRACE_FLAG_REFCOUNT)
+		written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->refcount), 0);
+
+	written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
+
+	/* Rest of message */
+	written += MAX(snprintf(buffer + written, MAX(len - written, 0), "0x%.8lx", trace_msg->info_val), 0);
+}
+
+static void kbasep_trace_dump_msg(struct kbase_device *kbdev, struct kbase_trace *trace_msg)
+{
+	char buffer[DEBUG_MESSAGE_SIZE];
+
+	kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
+	dev_dbg(kbdev->dev, "%s", buffer);
+}
+
+void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
+{
+	unsigned long irqflags;
+	struct kbase_trace *trace_msg;
+
+	spin_lock_irqsave(&kbdev->trace_lock, irqflags);
+
+	trace_msg = &kbdev->trace_rbuf[kbdev->trace_next_in];
+
+	/* Fill the message */
+	trace_msg->thread_id = task_pid_nr(current);
+	trace_msg->cpu = task_cpu(current);
+
+	getnstimeofday(&trace_msg->timestamp);
+
+	trace_msg->code = code;
+	trace_msg->ctx = ctx;
+
+	if (NULL == katom) {
+		trace_msg->katom = false;
+	} else {
+		trace_msg->katom = true;
+		trace_msg->atom_number = kbase_jd_atom_id(katom->kctx, katom);
+		trace_msg->atom_udata[0] = katom->udata.blob[0];
+		trace_msg->atom_udata[1] = katom->udata.blob[1];
+	}
+
+	trace_msg->gpu_addr = gpu_addr;
+	trace_msg->jobslot = jobslot;
+	trace_msg->refcount = MIN((unsigned int)refcount, 0xFF);
+	trace_msg->info_val = info_val;
+	trace_msg->flags = flags;
+
+	/* Update the ringbuffer indices */
+	kbdev->trace_next_in = (kbdev->trace_next_in + 1) & KBASE_TRACE_MASK;
+	if (kbdev->trace_next_in == kbdev->trace_first_out)
+		kbdev->trace_first_out = (kbdev->trace_first_out + 1) & KBASE_TRACE_MASK;
+
+	/* Done */
+
+	spin_unlock_irqrestore(&kbdev->trace_lock, irqflags);
+}
+
+void kbasep_trace_clear(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->trace_lock, flags);
+	kbdev->trace_first_out = kbdev->trace_next_in;
+	spin_unlock_irqrestore(&kbdev->trace_lock, flags);
+}
+
+void kbasep_trace_dump(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+	u32 start;
+	u32 end;
+
+	dev_dbg(kbdev->dev, "Dumping trace:\nsecs,nthread,cpu,code,ctx,katom,gpu_addr,jobslot,refcount,info_val");
+	spin_lock_irqsave(&kbdev->trace_lock, flags);
+	start = kbdev->trace_first_out;
+	end = kbdev->trace_next_in;
+
+	while (start != end) {
+		struct kbase_trace *trace_msg = &kbdev->trace_rbuf[start];
+
+		kbasep_trace_dump_msg(kbdev, trace_msg);
+
+		start = (start + 1) & KBASE_TRACE_MASK;
+	}
+	dev_dbg(kbdev->dev, "TRACE_END");
+
+	spin_unlock_irqrestore(&kbdev->trace_lock, flags);
+
+	KBASE_TRACE_CLEAR(kbdev);
+}
+
+static void kbasep_trace_hook_wrapper(void *param)
+{
+	struct kbase_device *kbdev = (struct kbase_device *)param;
+
+	kbasep_trace_dump(kbdev);
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct trace_seq_state {
+	struct kbase_trace trace_buf[KBASE_TRACE_SIZE];
+	u32 start;
+	u32 end;
+};
+
+static void *kbasep_trace_seq_start(struct seq_file *s, loff_t *pos)
+{
+	struct trace_seq_state *state = s->private;
+	int i;
+
+	if (*pos > KBASE_TRACE_SIZE)
+		return NULL;
+	i = state->start + *pos;
+	if ((state->end >= state->start && i >= state->end) ||
+			i >= state->end + KBASE_TRACE_SIZE)
+		return NULL;
+
+	i &= KBASE_TRACE_MASK;
+
+	return &state->trace_buf[i];
+}
+
+static void kbasep_trace_seq_stop(struct seq_file *s, void *data)
+{
+}
+
+static void *kbasep_trace_seq_next(struct seq_file *s, void *data, loff_t *pos)
+{
+	struct trace_seq_state *state = s->private;
+	int i;
+
+	(*pos)++;
+
+	i = (state->start + *pos) & KBASE_TRACE_MASK;
+	if (i == state->end)
+		return NULL;
+
+	return &state->trace_buf[i];
+}
+
+static int kbasep_trace_seq_show(struct seq_file *s, void *data)
+{
+	struct kbase_trace *trace_msg = data;
+	char buffer[DEBUG_MESSAGE_SIZE];
+
+	kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
+	seq_printf(s, "%s\n", buffer);
+	return 0;
+}
+
+static const struct seq_operations kbasep_trace_seq_ops = {
+	.start = kbasep_trace_seq_start,
+	.next = kbasep_trace_seq_next,
+	.stop = kbasep_trace_seq_stop,
+	.show = kbasep_trace_seq_show,
+};
+
+static int kbasep_trace_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct kbase_device *kbdev = inode->i_private;
+	unsigned long flags;
+
+	struct trace_seq_state *state;
+
+	state = __seq_open_private(file, &kbasep_trace_seq_ops, sizeof(*state));
+	if (!state)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&kbdev->trace_lock, flags);
+	state->start = kbdev->trace_first_out;
+	state->end = kbdev->trace_next_in;
+	memcpy(state->trace_buf, kbdev->trace_rbuf, sizeof(state->trace_buf));
+	spin_unlock_irqrestore(&kbdev->trace_lock, flags);
+
+	return 0;
+}
+
+static const struct file_operations kbasep_trace_debugfs_fops = {
+	.owner = THIS_MODULE,
+	.open = kbasep_trace_debugfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release_private,
+};
+
+void kbasep_trace_debugfs_init(struct kbase_device *kbdev)
+{
+	debugfs_create_file("mali_trace", S_IRUGO,
+			kbdev->mali_debugfs_directory, kbdev,
+			&kbasep_trace_debugfs_fops);
+}
+
+#else
+void kbasep_trace_debugfs_init(struct kbase_device *kbdev)
+{
+}
+#endif				/* CONFIG_DEBUG_FS */
+
+#else				/* KBASE_TRACE_ENABLE  */
+static int kbasep_trace_init(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+	return 0;
+}
+
+static void kbasep_trace_term(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+
+static void kbasep_trace_hook_wrapper(void *param)
+{
+	CSTD_UNUSED(param);
+}
+
+void kbasep_trace_dump(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+#endif				/* KBASE_TRACE_ENABLE  */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_disjoint_events.c b/drivers/gpu/arm/midgard/mali_kbase_disjoint_events.c
new file mode 100644
index 0000000..68eb4ed
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_disjoint_events.c
@@ -0,0 +1,81 @@
+/*
+ *
+ * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Base kernel disjoint events helper functions
+ */
+
+#include <mali_kbase.h>
+
+void kbase_disjoint_init(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	atomic_set(&kbdev->disjoint_event.count, 0);
+	atomic_set(&kbdev->disjoint_event.state, 0);
+}
+
+/* increment the disjoint event count */
+void kbase_disjoint_event(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	atomic_inc(&kbdev->disjoint_event.count);
+}
+
+/* increment the state and the event counter */
+void kbase_disjoint_state_up(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	atomic_inc(&kbdev->disjoint_event.state);
+
+	kbase_disjoint_event(kbdev);
+}
+
+/* decrement the state */
+void kbase_disjoint_state_down(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(atomic_read(&kbdev->disjoint_event.state) > 0);
+
+	kbase_disjoint_event(kbdev);
+
+	atomic_dec(&kbdev->disjoint_event.state);
+}
+
+/* increments the count only if the state is > 0 */
+void kbase_disjoint_event_potential(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	if (atomic_read(&kbdev->disjoint_event.state))
+		kbase_disjoint_event(kbdev);
+}
+
+u32 kbase_disjoint_event_get(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	return atomic_read(&kbdev->disjoint_event.count);
+}
+KBASE_EXPORT_TEST_API(kbase_disjoint_event_get);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_dma_fence.c b/drivers/gpu/arm/midgard/mali_kbase_dma_fence.c
new file mode 100644
index 0000000..6a95900
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_dma_fence.c
@@ -0,0 +1,454 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/* Include mali_kbase_dma_fence.h before checking for CONFIG_MALI_DMA_FENCE as
+ * it will be set there.
+ */
+#include "mali_kbase_dma_fence.h"
+
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <linux/reservation.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/ww_mutex.h>
+
+#include <mali_kbase.h>
+
+static void
+kbase_dma_fence_work(struct work_struct *pwork);
+
+static void
+kbase_dma_fence_waiters_add(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+
+	list_add_tail(&katom->queue, &kctx->dma_fence.waiting_resource);
+}
+
+static void
+kbase_dma_fence_waiters_remove(struct kbase_jd_atom *katom)
+{
+	list_del(&katom->queue);
+}
+
+static int
+kbase_dma_fence_lock_reservations(struct kbase_dma_fence_resv_info *info,
+				  struct ww_acquire_ctx *ctx)
+{
+	struct reservation_object *content_res = NULL;
+	unsigned int content_res_idx = 0;
+	unsigned int r;
+	int err = 0;
+
+	ww_acquire_init(ctx, &reservation_ww_class);
+
+retry:
+	for (r = 0; r < info->dma_fence_resv_count; r++) {
+		if (info->resv_objs[r] == content_res) {
+			content_res = NULL;
+			continue;
+		}
+
+		err = ww_mutex_lock(&info->resv_objs[r]->lock, ctx);
+		if (err)
+			goto error;
+	}
+
+	ww_acquire_done(ctx);
+	return err;
+
+error:
+	content_res_idx = r;
+
+	/* Unlock the locked one ones */
+	while (r--)
+		ww_mutex_unlock(&info->resv_objs[r]->lock);
+
+	if (content_res)
+		ww_mutex_unlock(&content_res->lock);
+
+	/* If we deadlock try with lock_slow and retry */
+	if (err == -EDEADLK) {
+		content_res = info->resv_objs[content_res_idx];
+		ww_mutex_lock_slow(&content_res->lock, ctx);
+		goto retry;
+	}
+
+	/* If we are here the function failed */
+	ww_acquire_fini(ctx);
+	return err;
+}
+
+static void
+kbase_dma_fence_unlock_reservations(struct kbase_dma_fence_resv_info *info,
+				    struct ww_acquire_ctx *ctx)
+{
+	unsigned int r;
+
+	for (r = 0; r < info->dma_fence_resv_count; r++)
+		ww_mutex_unlock(&info->resv_objs[r]->lock);
+	ww_acquire_fini(ctx);
+}
+
+/**
+ * kbase_dma_fence_queue_work() - Queue work to handle @katom
+ * @katom: Pointer to atom for which to queue work
+ *
+ * Queue kbase_dma_fence_work() for @katom to clean up the fence callbacks and
+ * submit the atom.
+ */
+static void
+kbase_dma_fence_queue_work(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	bool ret;
+
+	INIT_WORK(&katom->work, kbase_dma_fence_work);
+	ret = queue_work(kctx->dma_fence.wq, &katom->work);
+	/* Warn if work was already queued, that should not happen. */
+	WARN_ON(!ret);
+}
+
+/**
+ * kbase_dma_fence_cancel_atom() - Cancels waiting on an atom
+ * @katom:	Katom to cancel
+ *
+ * Locking: katom->dma_fence.callbacks list assumes jctx.lock is held.
+ */
+static void
+kbase_dma_fence_cancel_atom(struct kbase_jd_atom *katom)
+{
+	lockdep_assert_held(&katom->kctx->jctx.lock);
+
+	/* Cancel callbacks and clean up. */
+	kbase_fence_free_callbacks(katom);
+
+	/* Mark the atom as handled in case all fences signaled just before
+	 * canceling the callbacks and the worker was queued.
+	 */
+	kbase_fence_dep_count_set(katom, -1);
+
+	/* Prevent job_done_nolock from being called twice on an atom when
+	 * there is a race between job completion and cancellation.
+	 */
+
+	if (katom->status == KBASE_JD_ATOM_STATE_QUEUED) {
+		/* Wait was cancelled - zap the atom */
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+		if (jd_done_nolock(katom, NULL))
+			kbase_js_sched_all(katom->kctx->kbdev);
+	}
+}
+
+/**
+ * kbase_dma_fence_work() - Worker thread called when a fence is signaled
+ * @pwork:	work_struct containing a pointer to a katom
+ *
+ * This function will clean and mark all dependencies as satisfied
+ */
+static void
+kbase_dma_fence_work(struct work_struct *pwork)
+{
+	struct kbase_jd_atom *katom;
+	struct kbase_jd_context *ctx;
+
+	katom = container_of(pwork, struct kbase_jd_atom, work);
+	ctx = &katom->kctx->jctx;
+
+	mutex_lock(&ctx->lock);
+	if (kbase_fence_dep_count_read(katom) != 0)
+		goto out;
+
+	kbase_fence_dep_count_set(katom, -1);
+
+	/* Remove atom from list of dma-fence waiting atoms. */
+	kbase_dma_fence_waiters_remove(katom);
+	/* Cleanup callbacks. */
+	kbase_fence_free_callbacks(katom);
+	/*
+	 * Queue atom on GPU, unless it has already completed due to a failing
+	 * dependency. Run jd_done_nolock() on the katom if it is completed.
+	 */
+	if (unlikely(katom->status == KBASE_JD_ATOM_STATE_COMPLETED))
+		jd_done_nolock(katom, NULL);
+	else
+		kbase_jd_dep_clear_locked(katom);
+
+out:
+	mutex_unlock(&ctx->lock);
+}
+
+static void
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+kbase_dma_fence_cb(struct fence *fence, struct fence_cb *cb)
+#else
+kbase_dma_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+#endif
+{
+	struct kbase_fence_cb *kcb = container_of(cb,
+				struct kbase_fence_cb,
+				fence_cb);
+	struct kbase_jd_atom *katom = kcb->katom;
+
+	/* If the atom is zapped dep_count will be forced to a negative number
+	 * preventing this callback from ever scheduling work. Which in turn
+	 * would reschedule the atom.
+	 */
+
+	if (kbase_fence_dep_count_dec_and_test(katom))
+		kbase_dma_fence_queue_work(katom);
+}
+
+static int
+kbase_dma_fence_add_reservation_callback(struct kbase_jd_atom *katom,
+					 struct reservation_object *resv,
+					 bool exclusive)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence *excl_fence = NULL;
+	struct fence **shared_fences = NULL;
+#else
+	struct dma_fence *excl_fence = NULL;
+	struct dma_fence **shared_fences = NULL;
+#endif
+	unsigned int shared_count = 0;
+	int err, i;
+
+	err = reservation_object_get_fences_rcu(resv,
+						&excl_fence,
+						&shared_count,
+						&shared_fences);
+	if (err)
+		return err;
+
+	if (excl_fence) {
+		err = kbase_fence_add_callback(katom,
+						excl_fence,
+						kbase_dma_fence_cb);
+
+		/* Release our reference, taken by reservation_object_get_fences_rcu(),
+		 * to the fence. We have set up our callback (if that was possible),
+		 * and it's the fence's owner is responsible for singling the fence
+		 * before allowing it to disappear.
+		 */
+		dma_fence_put(excl_fence);
+
+		if (err)
+			goto out;
+	}
+
+	if (exclusive) {
+		for (i = 0; i < shared_count; i++) {
+			err = kbase_fence_add_callback(katom,
+							shared_fences[i],
+							kbase_dma_fence_cb);
+			if (err)
+				goto out;
+		}
+	}
+
+	/* Release all our references to the shared fences, taken by
+	 * reservation_object_get_fences_rcu(). We have set up our callback (if
+	 * that was possible), and it's the fence's owner is responsible for
+	 * signaling the fence before allowing it to disappear.
+	 */
+out:
+	for (i = 0; i < shared_count; i++)
+		dma_fence_put(shared_fences[i]);
+	kfree(shared_fences);
+
+	if (err) {
+		/*
+		 * On error, cancel and clean up all callbacks that was set up
+		 * before the error.
+		 */
+		kbase_fence_free_callbacks(katom);
+	}
+
+	return err;
+}
+
+void kbase_dma_fence_add_reservation(struct reservation_object *resv,
+				     struct kbase_dma_fence_resv_info *info,
+				     bool exclusive)
+{
+	unsigned int i;
+
+	for (i = 0; i < info->dma_fence_resv_count; i++) {
+		/* Duplicate resource, ignore */
+		if (info->resv_objs[i] == resv)
+			return;
+	}
+
+	info->resv_objs[info->dma_fence_resv_count] = resv;
+	if (exclusive)
+		set_bit(info->dma_fence_resv_count,
+			info->dma_fence_excl_bitmap);
+	(info->dma_fence_resv_count)++;
+}
+
+int kbase_dma_fence_wait(struct kbase_jd_atom *katom,
+			 struct kbase_dma_fence_resv_info *info)
+{
+	int err, i;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence *fence;
+#else
+	struct dma_fence *fence;
+#endif
+	struct ww_acquire_ctx ww_ctx;
+
+	lockdep_assert_held(&katom->kctx->jctx.lock);
+
+	fence = kbase_fence_out_new(katom);
+	if (!fence) {
+		err = -ENOMEM;
+		dev_err(katom->kctx->kbdev->dev,
+			"Error %d creating fence.\n", err);
+		return err;
+	}
+
+	kbase_fence_dep_count_set(katom, 1);
+
+	err = kbase_dma_fence_lock_reservations(info, &ww_ctx);
+	if (err) {
+		dev_err(katom->kctx->kbdev->dev,
+			"Error %d locking reservations.\n", err);
+		kbase_fence_dep_count_set(katom, -1);
+		kbase_fence_out_remove(katom);
+		return err;
+	}
+
+	for (i = 0; i < info->dma_fence_resv_count; i++) {
+		struct reservation_object *obj = info->resv_objs[i];
+
+		if (!test_bit(i, info->dma_fence_excl_bitmap)) {
+			err = reservation_object_reserve_shared(obj);
+			if (err) {
+				dev_err(katom->kctx->kbdev->dev,
+					"Error %d reserving space for shared fence.\n", err);
+				goto end;
+			}
+
+			err = kbase_dma_fence_add_reservation_callback(katom, obj, false);
+			if (err) {
+				dev_err(katom->kctx->kbdev->dev,
+					"Error %d adding reservation to callback.\n", err);
+				goto end;
+			}
+
+			reservation_object_add_shared_fence(obj, fence);
+		} else {
+			err = kbase_dma_fence_add_reservation_callback(katom, obj, true);
+			if (err) {
+				dev_err(katom->kctx->kbdev->dev,
+					"Error %d adding reservation to callback.\n", err);
+				goto end;
+			}
+
+			reservation_object_add_excl_fence(obj, fence);
+		}
+	}
+
+end:
+	kbase_dma_fence_unlock_reservations(info, &ww_ctx);
+
+	if (likely(!err)) {
+		/* Test if the callbacks are already triggered */
+		if (kbase_fence_dep_count_dec_and_test(katom)) {
+			kbase_fence_dep_count_set(katom, -1);
+			kbase_fence_free_callbacks(katom);
+		} else {
+			/* Add katom to the list of dma-buf fence waiting atoms
+			 * only if it is still waiting.
+			 */
+			kbase_dma_fence_waiters_add(katom);
+		}
+	} else {
+		/* There was an error, cancel callbacks, set dep_count to -1 to
+		 * indicate that the atom has been handled (the caller will
+		 * kill it for us), signal the fence, free callbacks and the
+		 * fence.
+		 */
+		kbase_fence_free_callbacks(katom);
+		kbase_fence_dep_count_set(katom, -1);
+		kbase_dma_fence_signal(katom);
+	}
+
+	return err;
+}
+
+void kbase_dma_fence_cancel_all_atoms(struct kbase_context *kctx)
+{
+	struct list_head *list = &kctx->dma_fence.waiting_resource;
+
+	while (!list_empty(list)) {
+		struct kbase_jd_atom *katom;
+
+		katom = list_first_entry(list, struct kbase_jd_atom, queue);
+		kbase_dma_fence_waiters_remove(katom);
+		kbase_dma_fence_cancel_atom(katom);
+	}
+}
+
+void kbase_dma_fence_cancel_callbacks(struct kbase_jd_atom *katom)
+{
+	/* Cancel callbacks and clean up. */
+	if (kbase_fence_free_callbacks(katom))
+		kbase_dma_fence_queue_work(katom);
+}
+
+void kbase_dma_fence_signal(struct kbase_jd_atom *katom)
+{
+	if (!katom->dma_fence.fence)
+		return;
+
+	/* Signal the atom's fence. */
+	dma_fence_signal(katom->dma_fence.fence);
+
+	kbase_fence_out_remove(katom);
+
+	kbase_fence_free_callbacks(katom);
+}
+
+void kbase_dma_fence_term(struct kbase_context *kctx)
+{
+	destroy_workqueue(kctx->dma_fence.wq);
+	kctx->dma_fence.wq = NULL;
+}
+
+int kbase_dma_fence_init(struct kbase_context *kctx)
+{
+	INIT_LIST_HEAD(&kctx->dma_fence.waiting_resource);
+
+	kctx->dma_fence.wq = alloc_workqueue("mali-fence-%d",
+					     WQ_UNBOUND, 1, kctx->pid);
+	if (!kctx->dma_fence.wq)
+		return -ENOMEM;
+
+	return 0;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_dma_fence.h b/drivers/gpu/arm/midgard/mali_kbase_dma_fence.h
new file mode 100644
index 0000000..2a4d6fc
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_dma_fence.h
@@ -0,0 +1,136 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_DMA_FENCE_H_
+#define _KBASE_DMA_FENCE_H_
+
+#ifdef CONFIG_MALI_DMA_FENCE
+
+#include <linux/list.h>
+#include <linux/reservation.h>
+#include <mali_kbase_fence.h>
+
+
+/* Forward declaration from mali_kbase_defs.h */
+struct kbase_jd_atom;
+struct kbase_context;
+
+/**
+ * struct kbase_dma_fence_resv_info - Structure with list of reservation objects
+ * @resv_objs:             Array of reservation objects to attach the
+ *                         new fence to.
+ * @dma_fence_resv_count:  Number of reservation objects in the array.
+ * @dma_fence_excl_bitmap: Specifies which resv_obj are exclusive.
+ *
+ * This is used by some functions to pass around a collection of data about
+ * reservation objects.
+ */
+struct kbase_dma_fence_resv_info {
+	struct reservation_object **resv_objs;
+	unsigned int dma_fence_resv_count;
+	unsigned long *dma_fence_excl_bitmap;
+};
+
+/**
+ * kbase_dma_fence_add_reservation() - Adds a resv to the array of resv_objs
+ * @resv:      Reservation object to add to the array.
+ * @info:      Pointer to struct with current reservation info
+ * @exclusive: Boolean indicating if exclusive access is needed
+ *
+ * The function adds a new reservation_object to an existing array of
+ * reservation_objects. At the same time keeps track of which objects require
+ * exclusive access in dma_fence_excl_bitmap.
+ */
+void kbase_dma_fence_add_reservation(struct reservation_object *resv,
+				     struct kbase_dma_fence_resv_info *info,
+				     bool exclusive);
+
+/**
+ * kbase_dma_fence_wait() - Creates a new fence and attaches it to the resv_objs
+ * @katom: Katom with the external dependency.
+ * @info:  Pointer to struct with current reservation info
+ *
+ * Return: An error code or 0 if succeeds
+ */
+int kbase_dma_fence_wait(struct kbase_jd_atom *katom,
+			 struct kbase_dma_fence_resv_info *info);
+
+/**
+ * kbase_dma_fence_cancel_ctx() - Cancel all dma-fences blocked atoms on kctx
+ * @kctx: Pointer to kbase context
+ *
+ * This function will cancel and clean up all katoms on @kctx that is waiting
+ * on dma-buf fences.
+ *
+ * Locking: jctx.lock needs to be held when calling this function.
+ */
+void kbase_dma_fence_cancel_all_atoms(struct kbase_context *kctx);
+
+/**
+ * kbase_dma_fence_cancel_callbacks() - Cancel only callbacks on katom
+ * @katom: Pointer to katom whose callbacks are to be canceled
+ *
+ * This function cancels all dma-buf fence callbacks on @katom, but does not
+ * cancel the katom itself.
+ *
+ * The caller is responsible for ensuring that jd_done_nolock is called on
+ * @katom.
+ *
+ * Locking: jctx.lock must be held when calling this function.
+ */
+void kbase_dma_fence_cancel_callbacks(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_dma_fence_signal() - Signal katom's fence and clean up after wait
+ * @katom: Pointer to katom to signal and clean up
+ *
+ * This function will signal the @katom's fence, if it has one, and clean up
+ * the callback data from the katom's wait on earlier fences.
+ *
+ * Locking: jctx.lock must be held while calling this function.
+ */
+void kbase_dma_fence_signal(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_dma_fence_term() - Terminate Mali dma-fence context
+ * @kctx: kbase context to terminate
+ */
+void kbase_dma_fence_term(struct kbase_context *kctx);
+
+/**
+ * kbase_dma_fence_init() - Initialize Mali dma-fence context
+ * @kctx: kbase context to initialize
+ */
+int kbase_dma_fence_init(struct kbase_context *kctx);
+
+
+#else /* CONFIG_MALI_DMA_FENCE */
+/* Dummy functions for when dma-buf fence isn't enabled. */
+
+static inline int kbase_dma_fence_init(struct kbase_context *kctx)
+{
+	return 0;
+}
+
+static inline void kbase_dma_fence_term(struct kbase_context *kctx) {}
+#endif /* CONFIG_MALI_DMA_FENCE */
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_event.c b/drivers/gpu/arm/midgard/mali_kbase_event.c
new file mode 100644
index 0000000..721af69
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_event.c
@@ -0,0 +1,266 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016,2018-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_debug.h>
+#include <mali_kbase_tracepoints.h>
+
+static struct base_jd_udata kbase_event_process(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	struct base_jd_udata data;
+	struct kbase_device *kbdev;
+
+	lockdep_assert_held(&kctx->jctx.lock);
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(katom != NULL);
+	KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
+
+	kbdev = kctx->kbdev;
+	data = katom->udata;
+
+	KBASE_TLSTREAM_TL_NRET_ATOM_CTX(kbdev, katom, kctx);
+	KBASE_TLSTREAM_TL_DEL_ATOM(kbdev, katom);
+
+	katom->status = KBASE_JD_ATOM_STATE_UNUSED;
+
+	wake_up(&katom->completed);
+
+	return data;
+}
+
+int kbase_event_pending(struct kbase_context *ctx)
+{
+	KBASE_DEBUG_ASSERT(ctx);
+
+	return (atomic_read(&ctx->event_count) != 0) ||
+			(atomic_read(&ctx->event_closed) != 0);
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_pending);
+
+int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent)
+{
+	struct kbase_jd_atom *atom;
+
+	KBASE_DEBUG_ASSERT(ctx);
+
+	mutex_lock(&ctx->event_mutex);
+
+	if (list_empty(&ctx->event_list)) {
+		if (!atomic_read(&ctx->event_closed)) {
+			mutex_unlock(&ctx->event_mutex);
+			return -1;
+		}
+
+		/* generate the BASE_JD_EVENT_DRV_TERMINATED message on the fly */
+		mutex_unlock(&ctx->event_mutex);
+		uevent->event_code = BASE_JD_EVENT_DRV_TERMINATED;
+		memset(&uevent->udata, 0, sizeof(uevent->udata));
+		dev_dbg(ctx->kbdev->dev,
+				"event system closed, returning BASE_JD_EVENT_DRV_TERMINATED(0x%X)\n",
+				BASE_JD_EVENT_DRV_TERMINATED);
+		return 0;
+	}
+
+	/* normal event processing */
+	atomic_dec(&ctx->event_count);
+	atom = list_entry(ctx->event_list.next, struct kbase_jd_atom, dep_item[0]);
+	list_del(ctx->event_list.next);
+
+	mutex_unlock(&ctx->event_mutex);
+
+	dev_dbg(ctx->kbdev->dev, "event dequeuing %p\n", (void *)atom);
+	uevent->event_code = atom->event_code;
+	uevent->atom_number = (atom - ctx->jctx.atoms);
+
+	if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
+		kbase_jd_free_external_resources(atom);
+
+	mutex_lock(&ctx->jctx.lock);
+	uevent->udata = kbase_event_process(ctx, atom);
+	mutex_unlock(&ctx->jctx.lock);
+
+	return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_dequeue);
+
+/**
+ * kbase_event_process_noreport_worker - Worker for processing atoms that do not
+ *                                       return an event but do have external
+ *                                       resources
+ * @data:  Work structure
+ */
+static void kbase_event_process_noreport_worker(struct work_struct *data)
+{
+	struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
+			work);
+	struct kbase_context *kctx = katom->kctx;
+
+	if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
+		kbase_jd_free_external_resources(katom);
+
+	mutex_lock(&kctx->jctx.lock);
+	kbase_event_process(kctx, katom);
+	mutex_unlock(&kctx->jctx.lock);
+}
+
+/**
+ * kbase_event_process_noreport - Process atoms that do not return an event
+ * @kctx:  Context pointer
+ * @katom: Atom to be processed
+ *
+ * Atoms that do not have external resources will be processed immediately.
+ * Atoms that do have external resources will be processed on a workqueue, in
+ * order to avoid locking issues.
+ */
+static void kbase_event_process_noreport(struct kbase_context *kctx,
+		struct kbase_jd_atom *katom)
+{
+	if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
+		INIT_WORK(&katom->work, kbase_event_process_noreport_worker);
+		queue_work(kctx->event_workq, &katom->work);
+	} else {
+		kbase_event_process(kctx, katom);
+	}
+}
+
+/**
+ * kbase_event_coalesce - Move pending events to the main event list
+ * @kctx:  Context pointer
+ *
+ * kctx->event_list and kctx->event_coalesce_count must be protected
+ * by a lock unless this is the last thread using them
+ * (and we're about to terminate the lock).
+ *
+ * Return: The number of pending events moved to the main event list
+ */
+static int kbase_event_coalesce(struct kbase_context *kctx)
+{
+	const int event_count = kctx->event_coalesce_count;
+
+	/* Join the list of pending events onto the tail of the main list
+	   and reset it */
+	list_splice_tail_init(&kctx->event_coalesce_list, &kctx->event_list);
+	kctx->event_coalesce_count = 0;
+
+	/* Return the number of events moved */
+	return event_count;
+}
+
+void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *atom)
+{
+	struct kbase_device *kbdev = ctx->kbdev;
+
+	if (atom->core_req & BASE_JD_REQ_EVENT_ONLY_ON_FAILURE) {
+		if (atom->event_code == BASE_JD_EVENT_DONE) {
+			/* Don't report the event */
+			kbase_event_process_noreport(ctx, atom);
+			return;
+		}
+	}
+
+	if (atom->core_req & BASEP_JD_REQ_EVENT_NEVER) {
+		/* Don't report the event */
+		kbase_event_process_noreport(ctx, atom);
+		return;
+	}
+	KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(kbdev, atom, TL_ATOM_STATE_POSTED);
+	if (atom->core_req & BASE_JD_REQ_EVENT_COALESCE) {
+		/* Don't report the event until other event(s) have completed */
+		mutex_lock(&ctx->event_mutex);
+		list_add_tail(&atom->dep_item[0], &ctx->event_coalesce_list);
+		++ctx->event_coalesce_count;
+		mutex_unlock(&ctx->event_mutex);
+	} else {
+		/* Report the event and any pending events now */
+		int event_count = 1;
+
+		mutex_lock(&ctx->event_mutex);
+		event_count += kbase_event_coalesce(ctx);
+		list_add_tail(&atom->dep_item[0], &ctx->event_list);
+		atomic_add(event_count, &ctx->event_count);
+		mutex_unlock(&ctx->event_mutex);
+
+		kbase_event_wakeup(ctx);
+	}
+}
+KBASE_EXPORT_TEST_API(kbase_event_post);
+
+void kbase_event_close(struct kbase_context *kctx)
+{
+	mutex_lock(&kctx->event_mutex);
+	atomic_set(&kctx->event_closed, true);
+	mutex_unlock(&kctx->event_mutex);
+	kbase_event_wakeup(kctx);
+}
+
+int kbase_event_init(struct kbase_context *kctx)
+{
+	KBASE_DEBUG_ASSERT(kctx);
+
+	INIT_LIST_HEAD(&kctx->event_list);
+	INIT_LIST_HEAD(&kctx->event_coalesce_list);
+	mutex_init(&kctx->event_mutex);
+	atomic_set(&kctx->event_count, 0);
+	kctx->event_coalesce_count = 0;
+	atomic_set(&kctx->event_closed, false);
+	kctx->event_workq = alloc_workqueue("kbase_event", WQ_MEM_RECLAIM, 1);
+
+	if (NULL == kctx->event_workq)
+		return -EINVAL;
+
+	return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_init);
+
+void kbase_event_cleanup(struct kbase_context *kctx)
+{
+	int event_count;
+
+	KBASE_DEBUG_ASSERT(kctx);
+	KBASE_DEBUG_ASSERT(kctx->event_workq);
+
+	flush_workqueue(kctx->event_workq);
+	destroy_workqueue(kctx->event_workq);
+
+	/* We use kbase_event_dequeue to remove the remaining events as that
+	 * deals with all the cleanup needed for the atoms.
+	 *
+	 * Note: use of kctx->event_list without a lock is safe because this must be the last
+	 * thread using it (because we're about to terminate the lock)
+	 */
+	event_count = kbase_event_coalesce(kctx);
+	atomic_add(event_count, &kctx->event_count);
+
+	while (!list_empty(&kctx->event_list)) {
+		struct base_jd_event_v2 event;
+
+		kbase_event_dequeue(kctx, &event);
+	}
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_cleanup);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_fence.c b/drivers/gpu/arm/midgard/mali_kbase_fence.c
new file mode 100644
index 0000000..96a6ab9
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_fence.c
@@ -0,0 +1,212 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <mali_kbase_fence_defs.h>
+#include <mali_kbase_fence.h>
+#include <mali_kbase.h>
+
+/* Spin lock protecting all Mali fences as fence->lock. */
+static DEFINE_SPINLOCK(kbase_fence_lock);
+
+static const char *
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+kbase_fence_get_driver_name(struct fence *fence)
+#else
+kbase_fence_get_driver_name(struct dma_fence *fence)
+#endif
+{
+	return kbase_drv_name;
+}
+
+static const char *
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+kbase_fence_get_timeline_name(struct fence *fence)
+#else
+kbase_fence_get_timeline_name(struct dma_fence *fence)
+#endif
+{
+	return kbase_timeline_name;
+}
+
+static bool
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+kbase_fence_enable_signaling(struct fence *fence)
+#else
+kbase_fence_enable_signaling(struct dma_fence *fence)
+#endif
+{
+	return true;
+}
+
+static void
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+kbase_fence_fence_value_str(struct fence *fence, char *str, int size)
+#else
+kbase_fence_fence_value_str(struct dma_fence *fence, char *str, int size)
+#endif
+{
+#if (KERNEL_VERSION(5, 1, 0) > LINUX_VERSION_CODE)
+	snprintf(str, size, "%u", fence->seqno);
+#else
+	snprintf(str, size, "%llu", fence->seqno);
+#endif
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+const struct fence_ops kbase_fence_ops = {
+	.wait = fence_default_wait,
+#else
+const struct dma_fence_ops kbase_fence_ops = {
+	.wait = dma_fence_default_wait,
+#endif
+	.get_driver_name = kbase_fence_get_driver_name,
+	.get_timeline_name = kbase_fence_get_timeline_name,
+	.enable_signaling = kbase_fence_enable_signaling,
+	.fence_value_str = kbase_fence_fence_value_str
+};
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+struct fence *
+kbase_fence_out_new(struct kbase_jd_atom *katom)
+#else
+struct dma_fence *
+kbase_fence_out_new(struct kbase_jd_atom *katom)
+#endif
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence *fence;
+#else
+	struct dma_fence *fence;
+#endif
+
+	WARN_ON(katom->dma_fence.fence);
+
+	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+	if (!fence)
+		return NULL;
+
+	dma_fence_init(fence,
+		       &kbase_fence_ops,
+		       &kbase_fence_lock,
+		       katom->dma_fence.context,
+		       atomic_inc_return(&katom->dma_fence.seqno));
+
+	katom->dma_fence.fence = fence;
+
+	return fence;
+}
+
+bool
+kbase_fence_free_callbacks(struct kbase_jd_atom *katom)
+{
+	struct kbase_fence_cb *cb, *tmp;
+	bool res = false;
+
+	lockdep_assert_held(&katom->kctx->jctx.lock);
+
+	/* Clean up and free callbacks. */
+	list_for_each_entry_safe(cb, tmp, &katom->dma_fence.callbacks, node) {
+		bool ret;
+
+		/* Cancel callbacks that hasn't been called yet. */
+		ret = dma_fence_remove_callback(cb->fence, &cb->fence_cb);
+		if (ret) {
+			int ret;
+
+			/* Fence had not signaled, clean up after
+			 * canceling.
+			 */
+			ret = atomic_dec_return(&katom->dma_fence.dep_count);
+
+			if (unlikely(ret == 0))
+				res = true;
+		}
+
+		/*
+		 * Release the reference taken in
+		 * kbase_fence_add_callback().
+		 */
+		dma_fence_put(cb->fence);
+		list_del(&cb->node);
+		kfree(cb);
+	}
+
+	return res;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+int
+kbase_fence_add_callback(struct kbase_jd_atom *katom,
+			 struct fence *fence,
+			 fence_func_t callback)
+#else
+int
+kbase_fence_add_callback(struct kbase_jd_atom *katom,
+			 struct dma_fence *fence,
+			 dma_fence_func_t callback)
+#endif
+{
+	int err = 0;
+	struct kbase_fence_cb *kbase_fence_cb;
+
+	if (!fence)
+		return -EINVAL;
+
+	kbase_fence_cb = kmalloc(sizeof(*kbase_fence_cb), GFP_KERNEL);
+	if (!kbase_fence_cb)
+		return -ENOMEM;
+
+	kbase_fence_cb->fence = fence;
+	kbase_fence_cb->katom = katom;
+	INIT_LIST_HEAD(&kbase_fence_cb->node);
+	atomic_inc(&katom->dma_fence.dep_count);
+
+	err = dma_fence_add_callback(fence, &kbase_fence_cb->fence_cb,
+				     callback);
+	if (err == -ENOENT) {
+		/* Fence signaled, get the completion result */
+		err = dma_fence_get_status(fence);
+
+		/* remap success completion to err code */
+		if (err == 1)
+			err = 0;
+
+		kfree(kbase_fence_cb);
+		atomic_dec(&katom->dma_fence.dep_count);
+	} else if (err) {
+		kfree(kbase_fence_cb);
+		atomic_dec(&katom->dma_fence.dep_count);
+	} else {
+		/*
+		 * Get reference to fence that will be kept until callback gets
+		 * cleaned up in kbase_fence_free_callbacks().
+		 */
+		dma_fence_get(fence);
+		/* Add callback to katom's list of callbacks */
+		list_add(&kbase_fence_cb->node, &katom->dma_fence.callbacks);
+	}
+
+	return err;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_fence.h b/drivers/gpu/arm/midgard/mali_kbase_fence.h
new file mode 100644
index 0000000..d7a65e0
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_fence.h
@@ -0,0 +1,280 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_FENCE_H_
+#define _KBASE_FENCE_H_
+
+/*
+ * mali_kbase_fence.[hc] has common fence code used by both
+ * - CONFIG_MALI_DMA_FENCE - implicit DMA fences
+ * - CONFIG_SYNC_FILE      - explicit fences beginning with 4.9 kernel
+ */
+
+#if defined(CONFIG_MALI_DMA_FENCE) || defined(CONFIG_SYNC_FILE)
+
+#include <linux/list.h>
+#include "mali_kbase_fence_defs.h"
+#include "mali_kbase.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+extern const struct fence_ops kbase_fence_ops;
+#else
+extern const struct dma_fence_ops kbase_fence_ops;
+#endif
+
+/**
+* struct kbase_fence_cb - Mali dma-fence callback data struct
+* @fence_cb: Callback function
+* @katom:    Pointer to katom that is waiting on this callback
+* @fence:    Pointer to the fence object on which this callback is waiting
+* @node:     List head for linking this callback to the katom
+*/
+struct kbase_fence_cb {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence_cb fence_cb;
+	struct fence *fence;
+#else
+	struct dma_fence_cb fence_cb;
+	struct dma_fence *fence;
+#endif
+	struct kbase_jd_atom *katom;
+	struct list_head node;
+};
+
+/**
+ * kbase_fence_out_new() - Creates a new output fence and puts it on the atom
+ * @katom: Atom to create an output fence for
+ *
+ * return: A new fence object on success, NULL on failure.
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+struct fence *kbase_fence_out_new(struct kbase_jd_atom *katom);
+#else
+struct dma_fence *kbase_fence_out_new(struct kbase_jd_atom *katom);
+#endif
+
+#if defined(CONFIG_SYNC_FILE)
+/**
+ * kbase_fence_fence_in_set() - Assign input fence to atom
+ * @katom: Atom to assign input fence to
+ * @fence: Input fence to assign to atom
+ *
+ * This function will take ownership of one fence reference!
+ */
+#define kbase_fence_fence_in_set(katom, fence) \
+	do { \
+		WARN_ON((katom)->dma_fence.fence_in); \
+		(katom)->dma_fence.fence_in = fence; \
+	} while (0)
+#endif
+
+/**
+ * kbase_fence_out_remove() - Removes the output fence from atom
+ * @katom: Atom to remove output fence for
+ *
+ * This will also release the reference to this fence which the atom keeps
+ */
+static inline void kbase_fence_out_remove(struct kbase_jd_atom *katom)
+{
+	if (katom->dma_fence.fence) {
+		dma_fence_put(katom->dma_fence.fence);
+		katom->dma_fence.fence = NULL;
+	}
+}
+
+#if defined(CONFIG_SYNC_FILE)
+/**
+ * kbase_fence_out_remove() - Removes the input fence from atom
+ * @katom: Atom to remove input fence for
+ *
+ * This will also release the reference to this fence which the atom keeps
+ */
+static inline void kbase_fence_in_remove(struct kbase_jd_atom *katom)
+{
+	if (katom->dma_fence.fence_in) {
+		dma_fence_put(katom->dma_fence.fence_in);
+		katom->dma_fence.fence_in = NULL;
+	}
+}
+#endif
+
+/**
+ * kbase_fence_out_is_ours() - Check if atom has a valid fence created by us
+ * @katom: Atom to check output fence for
+ *
+ * Return: true if fence exists and is valid, otherwise false
+ */
+static inline bool kbase_fence_out_is_ours(struct kbase_jd_atom *katom)
+{
+	return katom->dma_fence.fence &&
+				katom->dma_fence.fence->ops == &kbase_fence_ops;
+}
+
+/**
+ * kbase_fence_out_signal() - Signal output fence of atom
+ * @katom: Atom to signal output fence for
+ * @status: Status to signal with (0 for success, < 0 for error)
+ *
+ * Return: 0 on success, < 0 on error
+ */
+static inline int kbase_fence_out_signal(struct kbase_jd_atom *katom,
+					 int status)
+{
+	if (status) {
+#if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE && \
+	  KERNEL_VERSION(4, 9, 68) <= LINUX_VERSION_CODE)
+		fence_set_error(katom->dma_fence.fence, status);
+#elif (KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE)
+		dma_fence_set_error(katom->dma_fence.fence, status);
+#else
+		katom->dma_fence.fence->status = status;
+#endif
+	}
+	return dma_fence_signal(katom->dma_fence.fence);
+}
+
+/**
+ * kbase_fence_add_callback() - Add callback on @fence to block @katom
+ * @katom: Pointer to katom that will be blocked by @fence
+ * @fence: Pointer to fence on which to set up the callback
+ * @callback: Pointer to function to be called when fence is signaled
+ *
+ * Caller needs to hold a reference to @fence when calling this function, and
+ * the caller is responsible for releasing that reference.  An additional
+ * reference to @fence will be taken when the callback was successfully set up
+ * and @fence needs to be kept valid until the callback has been called and
+ * cleanup have been done.
+ *
+ * Return: 0 on success: fence was either already signaled, or callback was
+ * set up. Negative error code is returned on error.
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+int kbase_fence_add_callback(struct kbase_jd_atom *katom,
+			     struct fence *fence,
+			     fence_func_t callback);
+#else
+int kbase_fence_add_callback(struct kbase_jd_atom *katom,
+			     struct dma_fence *fence,
+			     dma_fence_func_t callback);
+#endif
+
+/**
+ * kbase_fence_dep_count_set() - Set dep_count value on atom to specified value
+ * @katom: Atom to set dep_count for
+ * @val: value to set dep_count to
+ *
+ * The dep_count is available to the users of this module so that they can
+ * synchronize completion of the wait with cancellation and adding of more
+ * callbacks. For instance, a user could do the following:
+ *
+ * dep_count set to 1
+ * callback #1 added, dep_count is increased to 2
+ *                             callback #1 happens, dep_count decremented to 1
+ *                             since dep_count > 0, no completion is done
+ * callback #2 is added, dep_count is increased to 2
+ * dep_count decremented to 1
+ *                             callback #2 happens, dep_count decremented to 0
+ *                             since dep_count now is zero, completion executes
+ *
+ * The dep_count can also be used to make sure that the completion only
+ * executes once. This is typically done by setting dep_count to -1 for the
+ * thread that takes on this responsibility.
+ */
+static inline void
+kbase_fence_dep_count_set(struct kbase_jd_atom *katom, int val)
+{
+	atomic_set(&katom->dma_fence.dep_count, val);
+}
+
+/**
+ * kbase_fence_dep_count_dec_and_test() - Decrements dep_count
+ * @katom: Atom to decrement dep_count for
+ *
+ * See @kbase_fence_dep_count_set for general description about dep_count
+ *
+ * Return: true if value was decremented to zero, otherwise false
+ */
+static inline bool
+kbase_fence_dep_count_dec_and_test(struct kbase_jd_atom *katom)
+{
+	return atomic_dec_and_test(&katom->dma_fence.dep_count);
+}
+
+/**
+ * kbase_fence_dep_count_read() - Returns the current dep_count value
+ * @katom: Pointer to katom
+ *
+ * See @kbase_fence_dep_count_set for general description about dep_count
+ *
+ * Return: The current dep_count value
+ */
+static inline int kbase_fence_dep_count_read(struct kbase_jd_atom *katom)
+{
+	return atomic_read(&katom->dma_fence.dep_count);
+}
+
+/**
+ * kbase_fence_free_callbacks() - Free dma-fence callbacks on a katom
+ * @katom: Pointer to katom
+ *
+ * This function will free all fence callbacks on the katom's list of
+ * callbacks. Callbacks that have not yet been called, because their fence
+ * hasn't yet signaled, will first be removed from the fence.
+ *
+ * Locking: katom->dma_fence.callbacks list assumes jctx.lock is held.
+ *
+ * Return: true if dep_count reached 0, otherwise false.
+ */
+bool kbase_fence_free_callbacks(struct kbase_jd_atom *katom);
+
+#if defined(CONFIG_SYNC_FILE)
+/**
+ * kbase_fence_in_get() - Retrieve input fence for atom.
+ * @katom: Atom to get input fence from
+ *
+ * A ref will be taken for the fence, so use @kbase_fence_put() to release it
+ *
+ * Return: The fence, or NULL if there is no input fence for atom
+ */
+#define kbase_fence_in_get(katom) dma_fence_get((katom)->dma_fence.fence_in)
+#endif
+
+/**
+ * kbase_fence_out_get() - Retrieve output fence for atom.
+ * @katom: Atom to get output fence from
+ *
+ * A ref will be taken for the fence, so use @kbase_fence_put() to release it
+ *
+ * Return: The fence, or NULL if there is no output fence for atom
+ */
+#define kbase_fence_out_get(katom) dma_fence_get((katom)->dma_fence.fence)
+
+/**
+ * kbase_fence_put() - Releases a reference to a fence
+ * @fence: Fence to release reference for.
+ */
+#define kbase_fence_put(fence) dma_fence_put(fence)
+
+
+#endif /* CONFIG_MALI_DMA_FENCE || defined(CONFIG_SYNC_FILE */
+
+#endif /* _KBASE_FENCE_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_fence_defs.h b/drivers/gpu/arm/midgard/mali_kbase_fence_defs.h
new file mode 100644
index 0000000..607a95c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_fence_defs.h
@@ -0,0 +1,68 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_FENCE_DEFS_H_
+#define _KBASE_FENCE_DEFS_H_
+
+/*
+ * There was a big rename in the 4.10 kernel (fence* -> dma_fence*)
+ * This file hides the compatibility issues with this for the rest the driver
+ */
+
+#if defined(CONFIG_MALI_DMA_FENCE) || defined(CONFIG_SYNC_FILE)
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+
+#include <linux/fence.h>
+
+#define dma_fence_context_alloc(a) fence_context_alloc(a)
+#define dma_fence_init(a, b, c, d, e) fence_init(a, b, c, d, e)
+#define dma_fence_get(a) fence_get(a)
+#define dma_fence_put(a) fence_put(a)
+#define dma_fence_signal(a) fence_signal(a)
+#define dma_fence_is_signaled(a) fence_is_signaled(a)
+#define dma_fence_add_callback(a, b, c) fence_add_callback(a, b, c)
+#define dma_fence_remove_callback(a, b) fence_remove_callback(a, b)
+
+#if (KERNEL_VERSION(4, 9, 68) <= LINUX_VERSION_CODE)
+#define dma_fence_get_status(a) (fence_is_signaled(a) ? (a)->error ?: 1 : 0)
+#else
+#define dma_fence_get_status(a) (fence_is_signaled(a) ? (a)->status ?: 1 : 0)
+#endif
+
+#else
+
+#include <linux/dma-fence.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+#define dma_fence_get_status(a) (dma_fence_is_signaled(a) ? \
+	(a)->status ?: 1 \
+	: 0)
+#endif
+
+#endif /* < 4.10.0 */
+
+#endif /* CONFIG_MALI_DMA_FENCE || CONFIG_SYNC_FILE */
+
+#endif /* _KBASE_FENCE_DEFS_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator.h b/drivers/gpu/arm/midgard/mali_kbase_gator.h
new file mode 100644
index 0000000..6428f08
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator.h
@@ -0,0 +1,53 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/* NB taken from gator  */
+/*
+ * List of possible actions to be controlled by DS-5 Streamline.
+ * The following numbers are used by gator to control the frame buffer dumping
+ * and s/w counter reporting. We cannot use the enums in mali_uk_types.h because
+ * they are unknown inside gator.
+ */
+
+#ifndef _KBASE_GATOR_H_
+#define _KBASE_GATOR_H_
+
+#include <linux/types.h>
+
+#define GATOR_JOB_SLOT_START 1
+#define GATOR_JOB_SLOT_STOP  2
+#define GATOR_JOB_SLOT_SOFT_STOPPED  3
+
+#ifdef CONFIG_MALI_GATOR_SUPPORT
+
+#define GATOR_MAKE_EVENT(type, number) (((type) << 24) | ((number) << 16))
+
+struct kbase_context;
+
+void kbase_trace_mali_job_slots_event(u32 dev_id, u32 event, const struct kbase_context *kctx, u8 atom_id);
+void kbase_trace_mali_pm_status(u32 dev_id, u32 event, u64 value);
+void kbase_trace_mali_page_fault_insert_pages(u32 dev_id, int event, u32 value);
+void kbase_trace_mali_total_alloc_pages_change(u32 dev_id, long long int event);
+
+#endif /* CONFIG_MALI_GATOR_SUPPORT */
+
+#endif  /* _KBASE_GATOR_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_api.c b/drivers/gpu/arm/midgard/mali_kbase_gator_api.c
new file mode 100644
index 0000000..7077c3a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_api.c
@@ -0,0 +1,359 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase.h"
+#include "mali_kbase_hw.h"
+#include "mali_kbase_mem_linux.h"
+#include "mali_kbase_gator_api.h"
+#include "mali_kbase_gator_hwcnt_names.h"
+
+#define MALI_MAX_CORES_PER_GROUP		4
+#define MALI_MAX_NUM_BLOCKS_PER_GROUP	8
+#define MALI_COUNTERS_PER_BLOCK			64
+#define MALI_BYTES_PER_COUNTER			4
+
+struct kbase_gator_hwcnt_handles {
+	struct kbase_device *kbdev;
+	struct kbase_vinstr_client *vinstr_cli;
+	void *vinstr_buffer;
+	struct work_struct dump_work;
+	int dump_complete;
+	spinlock_t dump_lock;
+};
+
+static void dump_worker(struct work_struct *work);
+
+const char * const *kbase_gator_hwcnt_init_names(uint32_t *total_counters)
+{
+	const char * const *hardware_counters;
+	struct kbase_device *kbdev;
+	uint32_t product_id;
+	uint32_t count;
+
+	if (!total_counters)
+		return NULL;
+
+	/* Get the first device - it doesn't matter in this case */
+	kbdev = kbase_find_device(-1);
+	if (!kbdev)
+		return NULL;
+
+	product_id = kbdev->gpu_props.props.core_props.product_id;
+
+	if (GPU_ID_IS_NEW_FORMAT(product_id)) {
+		switch (GPU_ID2_MODEL_MATCH_VALUE(product_id)) {
+		case GPU_ID2_PRODUCT_TMIX:
+			hardware_counters = hardware_counters_mali_tMIx;
+			count = ARRAY_SIZE(hardware_counters_mali_tMIx);
+			break;
+		case GPU_ID2_PRODUCT_THEX:
+			hardware_counters = hardware_counters_mali_tHEx;
+			count = ARRAY_SIZE(hardware_counters_mali_tHEx);
+			break;
+		case GPU_ID2_PRODUCT_TSIX:
+			hardware_counters = hardware_counters_mali_tSIx;
+			count = ARRAY_SIZE(hardware_counters_mali_tSIx);
+			break;
+		case GPU_ID2_PRODUCT_TDVX:
+			hardware_counters = hardware_counters_mali_tSIx;
+			count = ARRAY_SIZE(hardware_counters_mali_tSIx);
+			break;
+		case GPU_ID2_PRODUCT_TNOX:
+			hardware_counters = hardware_counters_mali_tNOx;
+			count = ARRAY_SIZE(hardware_counters_mali_tNOx);
+			break;
+		case GPU_ID2_PRODUCT_TGOX:
+			hardware_counters = hardware_counters_mali_tGOx;
+			count = ARRAY_SIZE(hardware_counters_mali_tGOx);
+			break;
+		case GPU_ID2_PRODUCT_TKAX:
+			hardware_counters = hardware_counters_mali_tKAx;
+			count = ARRAY_SIZE(hardware_counters_mali_tKAx);
+			break;
+		case GPU_ID2_PRODUCT_TTRX:
+			hardware_counters = hardware_counters_mali_tTRx;
+			count = ARRAY_SIZE(hardware_counters_mali_tTRx);
+			break;
+		default:
+			hardware_counters = NULL;
+			count = 0;
+			dev_err(kbdev->dev, "Unrecognized product ID: %u\n",
+				product_id);
+			break;
+		}
+	} else {
+		switch (product_id) {
+			/* If we are using a Mali-T60x device */
+		case GPU_ID_PI_T60X:
+			hardware_counters = hardware_counters_mali_t60x;
+			count = ARRAY_SIZE(hardware_counters_mali_t60x);
+			break;
+			/* If we are using a Mali-T62x device */
+		case GPU_ID_PI_T62X:
+			hardware_counters = hardware_counters_mali_t62x;
+			count = ARRAY_SIZE(hardware_counters_mali_t62x);
+			break;
+			/* If we are using a Mali-T72x device */
+		case GPU_ID_PI_T72X:
+			hardware_counters = hardware_counters_mali_t72x;
+			count = ARRAY_SIZE(hardware_counters_mali_t72x);
+			break;
+			/* If we are using a Mali-T76x device */
+		case GPU_ID_PI_T76X:
+			hardware_counters = hardware_counters_mali_t76x;
+			count = ARRAY_SIZE(hardware_counters_mali_t76x);
+			break;
+			/* If we are using a Mali-T82x device */
+		case GPU_ID_PI_T82X:
+			hardware_counters = hardware_counters_mali_t82x;
+			count = ARRAY_SIZE(hardware_counters_mali_t82x);
+			break;
+			/* If we are using a Mali-T83x device */
+		case GPU_ID_PI_T83X:
+			hardware_counters = hardware_counters_mali_t83x;
+			count = ARRAY_SIZE(hardware_counters_mali_t83x);
+			break;
+			/* If we are using a Mali-T86x device */
+		case GPU_ID_PI_T86X:
+			hardware_counters = hardware_counters_mali_t86x;
+			count = ARRAY_SIZE(hardware_counters_mali_t86x);
+			break;
+			/* If we are using a Mali-T88x device */
+		case GPU_ID_PI_TFRX:
+			hardware_counters = hardware_counters_mali_t88x;
+			count = ARRAY_SIZE(hardware_counters_mali_t88x);
+			break;
+		default:
+			hardware_counters = NULL;
+			count = 0;
+			dev_err(kbdev->dev, "Unrecognized product ID: %u\n",
+				product_id);
+			break;
+		}
+	}
+
+	/* Release the kbdev reference. */
+	kbase_release_device(kbdev);
+
+	*total_counters = count;
+
+	/* If we return a string array take a reference on the module (or fail). */
+	if (hardware_counters && !try_module_get(THIS_MODULE))
+		return NULL;
+
+	return hardware_counters;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_init_names);
+
+void kbase_gator_hwcnt_term_names(void)
+{
+	/* Release the module reference. */
+	module_put(THIS_MODULE);
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_term_names);
+
+struct kbase_gator_hwcnt_handles *kbase_gator_hwcnt_init(struct kbase_gator_hwcnt_info *in_out_info)
+{
+	struct kbase_gator_hwcnt_handles *hand;
+	struct kbase_ioctl_hwcnt_reader_setup setup;
+	uint32_t dump_size = 0, i = 0;
+
+	if (!in_out_info)
+		return NULL;
+
+	hand = kzalloc(sizeof(*hand), GFP_KERNEL);
+	if (!hand)
+		return NULL;
+
+	INIT_WORK(&hand->dump_work, dump_worker);
+	spin_lock_init(&hand->dump_lock);
+
+	/* Get the first device */
+	hand->kbdev = kbase_find_device(-1);
+	if (!hand->kbdev)
+		goto free_hand;
+
+	dump_size = kbase_vinstr_dump_size(hand->kbdev);
+	hand->vinstr_buffer = kzalloc(dump_size, GFP_KERNEL);
+	if (!hand->vinstr_buffer)
+		goto release_device;
+	in_out_info->kernel_dump_buffer = hand->vinstr_buffer;
+
+	in_out_info->nr_cores = hand->kbdev->gpu_props.num_cores;
+	in_out_info->nr_core_groups = hand->kbdev->gpu_props.num_core_groups;
+	in_out_info->gpu_id = hand->kbdev->gpu_props.props.core_props.product_id;
+
+	/* If we are using a v4 device (Mali-T6xx or Mali-T72x) */
+	if (kbase_hw_has_feature(hand->kbdev, BASE_HW_FEATURE_V4)) {
+		uint32_t cg, j;
+		uint64_t core_mask;
+
+		/* There are 8 hardware counters blocks per core group */
+		in_out_info->hwc_layout = kmalloc(sizeof(enum hwc_type) *
+			MALI_MAX_NUM_BLOCKS_PER_GROUP *
+			in_out_info->nr_core_groups, GFP_KERNEL);
+
+		if (!in_out_info->hwc_layout)
+			goto free_vinstr_buffer;
+
+		dump_size = in_out_info->nr_core_groups *
+			MALI_MAX_NUM_BLOCKS_PER_GROUP *
+			MALI_COUNTERS_PER_BLOCK *
+			MALI_BYTES_PER_COUNTER;
+
+		for (cg = 0; cg < in_out_info->nr_core_groups; cg++) {
+			core_mask = hand->kbdev->gpu_props.props.coherency_info.group[cg].core_mask;
+
+			for (j = 0; j < MALI_MAX_CORES_PER_GROUP; j++) {
+				if (core_mask & (1u << j))
+					in_out_info->hwc_layout[i++] = SHADER_BLOCK;
+				else
+					in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+			}
+
+			in_out_info->hwc_layout[i++] = TILER_BLOCK;
+			in_out_info->hwc_layout[i++] = MMU_L2_BLOCK;
+
+			in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+
+			if (0 == cg)
+				in_out_info->hwc_layout[i++] = JM_BLOCK;
+			else
+				in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+		}
+	/* If we are using any other device */
+	} else {
+		uint32_t nr_l2, nr_sc_bits, j;
+		uint64_t core_mask;
+
+		nr_l2 = hand->kbdev->gpu_props.props.l2_props.num_l2_slices;
+
+		core_mask = hand->kbdev->gpu_props.props.coherency_info.group[0].core_mask;
+
+		nr_sc_bits = fls64(core_mask);
+
+		/* The job manager and tiler sets of counters
+		 * are always present */
+		in_out_info->hwc_layout = kmalloc(sizeof(enum hwc_type) * (2 + nr_sc_bits + nr_l2), GFP_KERNEL);
+
+		if (!in_out_info->hwc_layout)
+			goto free_vinstr_buffer;
+
+		dump_size = (2 + nr_sc_bits + nr_l2) * MALI_COUNTERS_PER_BLOCK * MALI_BYTES_PER_COUNTER;
+
+		in_out_info->hwc_layout[i++] = JM_BLOCK;
+		in_out_info->hwc_layout[i++] = TILER_BLOCK;
+
+		for (j = 0; j < nr_l2; j++)
+			in_out_info->hwc_layout[i++] = MMU_L2_BLOCK;
+
+		while (core_mask != 0ull) {
+			if ((core_mask & 1ull) != 0ull)
+				in_out_info->hwc_layout[i++] = SHADER_BLOCK;
+			else
+				in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+			core_mask >>= 1;
+		}
+	}
+
+	in_out_info->nr_hwc_blocks = i;
+	in_out_info->size = dump_size;
+
+	setup.jm_bm = in_out_info->bitmask[0];
+	setup.tiler_bm = in_out_info->bitmask[1];
+	setup.shader_bm = in_out_info->bitmask[2];
+	setup.mmu_l2_bm = in_out_info->bitmask[3];
+	hand->vinstr_cli = kbase_vinstr_hwcnt_kernel_setup(hand->kbdev->vinstr_ctx,
+			&setup, hand->vinstr_buffer);
+	if (!hand->vinstr_cli) {
+		dev_err(hand->kbdev->dev, "Failed to register gator with vinstr core");
+		goto free_layout;
+	}
+
+	return hand;
+
+free_layout:
+	kfree(in_out_info->hwc_layout);
+
+free_vinstr_buffer:
+	kfree(hand->vinstr_buffer);
+
+release_device:
+	kbase_release_device(hand->kbdev);
+
+free_hand:
+	kfree(hand);
+	return NULL;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_init);
+
+void kbase_gator_hwcnt_term(struct kbase_gator_hwcnt_info *in_out_info, struct kbase_gator_hwcnt_handles *opaque_handles)
+{
+	if (in_out_info)
+		kfree(in_out_info->hwc_layout);
+
+	if (opaque_handles) {
+		cancel_work_sync(&opaque_handles->dump_work);
+		kbase_vinstr_detach_client(opaque_handles->vinstr_cli);
+		kfree(opaque_handles->vinstr_buffer);
+		kbase_release_device(opaque_handles->kbdev);
+		kfree(opaque_handles);
+	}
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_term);
+
+static void dump_worker(struct work_struct *work)
+{
+	struct kbase_gator_hwcnt_handles *hand;
+
+	hand = container_of(work, struct kbase_gator_hwcnt_handles, dump_work);
+	if (!kbase_vinstr_hwc_dump(hand->vinstr_cli,
+			BASE_HWCNT_READER_EVENT_MANUAL)) {
+		spin_lock_bh(&hand->dump_lock);
+		hand->dump_complete = 1;
+		spin_unlock_bh(&hand->dump_lock);
+	} else {
+		schedule_work(&hand->dump_work);
+	}
+}
+
+uint32_t kbase_gator_instr_hwcnt_dump_complete(
+		struct kbase_gator_hwcnt_handles *opaque_handles,
+		uint32_t * const success)
+{
+
+	if (opaque_handles && success) {
+		*success = opaque_handles->dump_complete;
+		opaque_handles->dump_complete = 0;
+		return *success;
+	}
+	return 0;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_instr_hwcnt_dump_complete);
+
+uint32_t kbase_gator_instr_hwcnt_dump_irq(struct kbase_gator_hwcnt_handles *opaque_handles)
+{
+	if (opaque_handles)
+		schedule_work(&opaque_handles->dump_work);
+	return 0;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_instr_hwcnt_dump_irq);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_api.h b/drivers/gpu/arm/midgard/mali_kbase_gator_api.h
new file mode 100644
index 0000000..bd0589e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_api.h
@@ -0,0 +1,224 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_GATOR_API_H_
+#define _KBASE_GATOR_API_H_
+
+/**
+ * @brief This file describes the API used by Gator to fetch hardware counters.
+ */
+
+/* This define is used by the gator kernel module compile to select which DDK
+ * API calling convention to use. If not defined (legacy DDK) gator assumes
+ * version 1. The version to DDK release mapping is:
+ *     Version 1 API: DDK versions r1px, r2px
+ *     Version 2 API: DDK versions r3px, r4px
+ *     Version 3 API: DDK version r5p0 and newer
+ *
+ * API Usage
+ * =========
+ *
+ * 1] Call kbase_gator_hwcnt_init_names() to return the list of short counter
+ * names for the GPU present in this device.
+ *
+ * 2] Create a kbase_gator_hwcnt_info structure and set the counter enables for
+ * the counters you want enabled. The enables can all be set for simplicity in
+ * most use cases, but disabling some will let you minimize bandwidth impact.
+ *
+ * 3] Call kbase_gator_hwcnt_init() using the above structure, to create a
+ * counter context. On successful return the DDK will have populated the
+ * structure with a variety of useful information.
+ *
+ * 4] Call kbase_gator_hwcnt_dump_irq() to queue a non-blocking request for a
+ * counter dump. If this returns a non-zero value the request has been queued,
+ * otherwise the driver has been unable to do so (typically because of another
+ * user of the instrumentation exists concurrently).
+ *
+ * 5] Call kbase_gator_hwcnt_dump_complete() to test whether the  previously
+ * requested dump has been succesful. If this returns non-zero the counter dump
+ * has resolved, but the value of *success must also be tested as the dump
+ * may have not been successful. If it returns zero the counter dump was
+ * abandoned due to the device being busy (typically because of another
+ * user of the instrumentation exists concurrently).
+ *
+ * 6] Process the counters stored in the buffer pointed to by ...
+ *
+ *        kbase_gator_hwcnt_info->kernel_dump_buffer
+ *
+ *    In pseudo code you can find all of the counters via this approach:
+ *
+ *
+ *        hwcnt_info # pointer to kbase_gator_hwcnt_info structure
+ *        hwcnt_name # pointer to name list
+ *
+ *        u32 * hwcnt_data = (u32*)hwcnt_info->kernel_dump_buffer
+ *
+ *        # Iterate over each 64-counter block in this GPU configuration
+ *        for( i = 0; i < hwcnt_info->nr_hwc_blocks; i++) {
+ *            hwc_type type = hwcnt_info->hwc_layout[i];
+ *
+ *            # Skip reserved type blocks - they contain no counters at all
+ *            if( type == RESERVED_BLOCK ) {
+ *                continue;
+ *            }
+ *
+ *            size_t name_offset = type * 64;
+ *            size_t data_offset = i * 64;
+ *
+ *            # Iterate over the names of the counters in this block type
+ *            for( j = 0; j < 64; j++) {
+ *                const char * name = hwcnt_name[name_offset+j];
+ *
+ *                # Skip empty name strings - there is no counter here
+ *                if( name[0] == '\0' ) {
+ *                    continue;
+ *                }
+ *
+ *                u32 data = hwcnt_data[data_offset+j];
+ *
+ *                printk( "COUNTER: %s DATA: %u\n", name, data );
+ *            }
+ *        }
+ *
+ *
+ *     Note that in most implementations you typically want to either SUM or
+ *     AVERAGE multiple instances of the same counter if, for example, you have
+ *     multiple shader cores or multiple L2 caches. The most sensible view for
+ *     analysis is to AVERAGE shader core counters, but SUM L2 cache and MMU
+ *     counters.
+ *
+ * 7] Goto 4, repeating until you want to stop collecting counters.
+ *
+ * 8] Release the dump resources by calling kbase_gator_hwcnt_term().
+ *
+ * 9] Release the name table resources by calling
+ *    kbase_gator_hwcnt_term_names(). This function must only be called if
+ *    init_names() returned a non-NULL value.
+ **/
+
+#define MALI_DDK_GATOR_API_VERSION 3
+
+enum hwc_type {
+	JM_BLOCK = 0,
+	TILER_BLOCK,
+	SHADER_BLOCK,
+	MMU_L2_BLOCK,
+	RESERVED_BLOCK
+};
+
+struct kbase_gator_hwcnt_info {
+	/* Passed from Gator to kbase */
+
+	/* the bitmask of enabled hardware counters for each counter block */
+	uint16_t bitmask[4];
+
+	/* Passed from kbase to Gator */
+
+	/* ptr to counter dump memory */
+	void *kernel_dump_buffer;
+
+	/* size of counter dump memory */
+	uint32_t size;
+
+	/* the ID of the Mali device */
+	uint32_t gpu_id;
+
+	/* the number of shader cores in the GPU */
+	uint32_t nr_cores;
+
+	/* the number of core groups */
+	uint32_t nr_core_groups;
+
+	/* the memory layout of the performance counters */
+	enum hwc_type *hwc_layout;
+
+	/* the total number of hardware couter blocks */
+	uint32_t nr_hwc_blocks;
+};
+
+/**
+ * @brief Opaque block of Mali data which Gator needs to return to the API later.
+ */
+struct kbase_gator_hwcnt_handles;
+
+/**
+ * @brief Initialize the resources Gator needs for performance profiling.
+ *
+ * @param in_out_info   A pointer to a structure containing the enabled counters passed from Gator and all the Mali
+ *                      specific information that will be returned to Gator. On entry Gator must have populated the
+ *                      'bitmask' field with the counters it wishes to enable for each class of counter block.
+ *                      Each entry in the array corresponds to a single counter class based on the "hwc_type"
+ *                      enumeration, and each bit corresponds to an enable for 4 sequential counters (LSB enables
+ *                      the first 4 counters in the block, and so on). See the GPU counter array as returned by
+ *                      kbase_gator_hwcnt_get_names() for the index values of each counter for the curernt GPU.
+ *
+ * @return              Pointer to an opaque handle block on success, NULL on error.
+ */
+extern struct kbase_gator_hwcnt_handles *kbase_gator_hwcnt_init(struct kbase_gator_hwcnt_info *in_out_info);
+
+/**
+ * @brief Free all resources once Gator has finished using performance counters.
+ *
+ * @param in_out_info       A pointer to a structure containing the enabled counters passed from Gator and all the
+ *                          Mali specific information that will be returned to Gator.
+ * @param opaque_handles    A wrapper structure for kbase structures.
+ */
+extern void kbase_gator_hwcnt_term(struct kbase_gator_hwcnt_info *in_out_info, struct kbase_gator_hwcnt_handles *opaque_handles);
+
+/**
+ * @brief Poll whether a counter dump is successful.
+ *
+ * @param opaque_handles    A wrapper structure for kbase structures.
+ * @param[out] success      Non-zero on success, zero on failure.
+ *
+ * @return                  Zero if the dump is still pending, non-zero if the dump has completed. Note that a
+ *                          completed dump may not have dumped succesfully, so the caller must test for both
+ *                          a completed and successful dump before processing counters.
+ */
+extern uint32_t kbase_gator_instr_hwcnt_dump_complete(struct kbase_gator_hwcnt_handles *opaque_handles, uint32_t * const success);
+
+/**
+ * @brief Request the generation of a new counter dump.
+ *
+ * @param opaque_handles    A wrapper structure for kbase structures.
+ *
+ * @return                  Zero if the hardware device is busy and cannot handle the request, non-zero otherwise.
+ */
+extern uint32_t kbase_gator_instr_hwcnt_dump_irq(struct kbase_gator_hwcnt_handles *opaque_handles);
+
+/**
+ * @brief This function is used to fetch the names table based on the Mali device in use.
+ *
+ * @param[out] total_counters The total number of counters short names in the Mali devices' list.
+ *
+ * @return                    Pointer to an array of strings of length *total_counters.
+ */
+extern const char * const *kbase_gator_hwcnt_init_names(uint32_t *total_counters);
+
+/**
+ * @brief This function is used to terminate the use of the names table.
+ *
+ * This function must only be called if the initial call to kbase_gator_hwcnt_init_names returned a non-NULL value.
+ */
+extern void kbase_gator_hwcnt_term_names(void);
+
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h
new file mode 100644
index 0000000..5d38c7b
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h
@@ -0,0 +1,2178 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_H_
+#define _KBASE_GATOR_HWCNT_NAMES_H_
+
+/*
+ * "Short names" for hardware counters used by Streamline. Counters names are
+ * stored in accordance with their memory layout in the binary counter block
+ * emitted by the Mali GPU. Each "master" in the GPU emits a fixed-size block
+ * of 64 counters, and each GPU implements the same set of "masters" although
+ * the counters each master exposes within its block of 64 may vary.
+ *
+ * Counters which are an empty string are simply "holes" in the counter memory
+ * where no counter exists.
+ */
+
+static const char * const hardware_counters_mali_t60x[] = {
+	/* Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"T60x_MESSAGES_SENT",
+	"T60x_MESSAGES_RECEIVED",
+	"T60x_GPU_ACTIVE",
+	"T60x_IRQ_ACTIVE",
+	"T60x_JS0_JOBS",
+	"T60x_JS0_TASKS",
+	"T60x_JS0_ACTIVE",
+	"",
+	"T60x_JS0_WAIT_READ",
+	"T60x_JS0_WAIT_ISSUE",
+	"T60x_JS0_WAIT_DEPEND",
+	"T60x_JS0_WAIT_FINISH",
+	"T60x_JS1_JOBS",
+	"T60x_JS1_TASKS",
+	"T60x_JS1_ACTIVE",
+	"",
+	"T60x_JS1_WAIT_READ",
+	"T60x_JS1_WAIT_ISSUE",
+	"T60x_JS1_WAIT_DEPEND",
+	"T60x_JS1_WAIT_FINISH",
+	"T60x_JS2_JOBS",
+	"T60x_JS2_TASKS",
+	"T60x_JS2_ACTIVE",
+	"",
+	"T60x_JS2_WAIT_READ",
+	"T60x_JS2_WAIT_ISSUE",
+	"T60x_JS2_WAIT_DEPEND",
+	"T60x_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/*Tiler */
+	"",
+	"",
+	"",
+	"T60x_TI_JOBS_PROCESSED",
+	"T60x_TI_TRIANGLES",
+	"T60x_TI_QUADS",
+	"T60x_TI_POLYGONS",
+	"T60x_TI_POINTS",
+	"T60x_TI_LINES",
+	"T60x_TI_VCACHE_HIT",
+	"T60x_TI_VCACHE_MISS",
+	"T60x_TI_FRONT_FACING",
+	"T60x_TI_BACK_FACING",
+	"T60x_TI_PRIM_VISIBLE",
+	"T60x_TI_PRIM_CULLED",
+	"T60x_TI_PRIM_CLIPPED",
+	"T60x_TI_LEVEL0",
+	"T60x_TI_LEVEL1",
+	"T60x_TI_LEVEL2",
+	"T60x_TI_LEVEL3",
+	"T60x_TI_LEVEL4",
+	"T60x_TI_LEVEL5",
+	"T60x_TI_LEVEL6",
+	"T60x_TI_LEVEL7",
+	"T60x_TI_COMMAND_1",
+	"T60x_TI_COMMAND_2",
+	"T60x_TI_COMMAND_3",
+	"T60x_TI_COMMAND_4",
+	"T60x_TI_COMMAND_4_7",
+	"T60x_TI_COMMAND_8_15",
+	"T60x_TI_COMMAND_16_63",
+	"T60x_TI_COMMAND_64",
+	"T60x_TI_COMPRESS_IN",
+	"T60x_TI_COMPRESS_OUT",
+	"T60x_TI_COMPRESS_FLUSH",
+	"T60x_TI_TIMESTAMPS",
+	"T60x_TI_PCACHE_HIT",
+	"T60x_TI_PCACHE_MISS",
+	"T60x_TI_PCACHE_LINE",
+	"T60x_TI_PCACHE_STALL",
+	"T60x_TI_WRBUF_HIT",
+	"T60x_TI_WRBUF_MISS",
+	"T60x_TI_WRBUF_LINE",
+	"T60x_TI_WRBUF_PARTIAL",
+	"T60x_TI_WRBUF_STALL",
+	"T60x_TI_ACTIVE",
+	"T60x_TI_LOADING_DESC",
+	"T60x_TI_INDEX_WAIT",
+	"T60x_TI_INDEX_RANGE_WAIT",
+	"T60x_TI_VERTEX_WAIT",
+	"T60x_TI_PCACHE_WAIT",
+	"T60x_TI_WRBUF_WAIT",
+	"T60x_TI_BUS_READ",
+	"T60x_TI_BUS_WRITE",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T60x_TI_UTLB_STALL",
+	"T60x_TI_UTLB_REPLAY_MISS",
+	"T60x_TI_UTLB_REPLAY_FULL",
+	"T60x_TI_UTLB_NEW_MISS",
+	"T60x_TI_UTLB_HIT",
+
+	/* Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"T60x_FRAG_ACTIVE",
+	"T60x_FRAG_PRIMITIVES",
+	"T60x_FRAG_PRIMITIVES_DROPPED",
+	"T60x_FRAG_CYCLES_DESC",
+	"T60x_FRAG_CYCLES_PLR",
+	"T60x_FRAG_CYCLES_VERT",
+	"T60x_FRAG_CYCLES_TRISETUP",
+	"T60x_FRAG_CYCLES_RAST",
+	"T60x_FRAG_THREADS",
+	"T60x_FRAG_DUMMY_THREADS",
+	"T60x_FRAG_QUADS_RAST",
+	"T60x_FRAG_QUADS_EZS_TEST",
+	"T60x_FRAG_QUADS_EZS_KILLED",
+	"T60x_FRAG_THREADS_LZS_TEST",
+	"T60x_FRAG_THREADS_LZS_KILLED",
+	"T60x_FRAG_CYCLES_NO_TILE",
+	"T60x_FRAG_NUM_TILES",
+	"T60x_FRAG_TRANS_ELIM",
+	"T60x_COMPUTE_ACTIVE",
+	"T60x_COMPUTE_TASKS",
+	"T60x_COMPUTE_THREADS",
+	"T60x_COMPUTE_CYCLES_DESC",
+	"T60x_TRIPIPE_ACTIVE",
+	"T60x_ARITH_WORDS",
+	"T60x_ARITH_CYCLES_REG",
+	"T60x_ARITH_CYCLES_L0",
+	"T60x_ARITH_FRAG_DEPEND",
+	"T60x_LS_WORDS",
+	"T60x_LS_ISSUES",
+	"T60x_LS_RESTARTS",
+	"T60x_LS_REISSUES_MISS",
+	"T60x_LS_REISSUES_VD",
+	"T60x_LS_REISSUE_ATTRIB_MISS",
+	"T60x_LS_NO_WB",
+	"T60x_TEX_WORDS",
+	"T60x_TEX_BUBBLES",
+	"T60x_TEX_WORDS_L0",
+	"T60x_TEX_WORDS_DESC",
+	"T60x_TEX_ISSUES",
+	"T60x_TEX_RECIRC_FMISS",
+	"T60x_TEX_RECIRC_DESC",
+	"T60x_TEX_RECIRC_MULTI",
+	"T60x_TEX_RECIRC_PMISS",
+	"T60x_TEX_RECIRC_CONF",
+	"T60x_LSC_READ_HITS",
+	"T60x_LSC_READ_MISSES",
+	"T60x_LSC_WRITE_HITS",
+	"T60x_LSC_WRITE_MISSES",
+	"T60x_LSC_ATOMIC_HITS",
+	"T60x_LSC_ATOMIC_MISSES",
+	"T60x_LSC_LINE_FETCHES",
+	"T60x_LSC_DIRTY_LINE",
+	"T60x_LSC_SNOOPS",
+	"T60x_AXI_TLB_STALL",
+	"T60x_AXI_TLB_MISS",
+	"T60x_AXI_TLB_TRANSACTION",
+	"T60x_LS_TLB_MISS",
+	"T60x_LS_TLB_HIT",
+	"T60x_AXI_BEATS_READ",
+	"T60x_AXI_BEATS_WRITTEN",
+
+	/*L2 and MMU */
+	"",
+	"",
+	"",
+	"",
+	"T60x_MMU_HIT",
+	"T60x_MMU_NEW_MISS",
+	"T60x_MMU_REPLAY_FULL",
+	"T60x_MMU_REPLAY_MISS",
+	"T60x_MMU_TABLE_WALK",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T60x_UTLB_HIT",
+	"T60x_UTLB_NEW_MISS",
+	"T60x_UTLB_REPLAY_FULL",
+	"T60x_UTLB_REPLAY_MISS",
+	"T60x_UTLB_STALL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T60x_L2_EXT_WRITE_BEATS",
+	"T60x_L2_EXT_READ_BEATS",
+	"T60x_L2_ANY_LOOKUP",
+	"T60x_L2_READ_LOOKUP",
+	"T60x_L2_SREAD_LOOKUP",
+	"T60x_L2_READ_REPLAY",
+	"T60x_L2_READ_SNOOP",
+	"T60x_L2_READ_HIT",
+	"T60x_L2_CLEAN_MISS",
+	"T60x_L2_WRITE_LOOKUP",
+	"T60x_L2_SWRITE_LOOKUP",
+	"T60x_L2_WRITE_REPLAY",
+	"T60x_L2_WRITE_SNOOP",
+	"T60x_L2_WRITE_HIT",
+	"T60x_L2_EXT_READ_FULL",
+	"T60x_L2_EXT_READ_HALF",
+	"T60x_L2_EXT_WRITE_FULL",
+	"T60x_L2_EXT_WRITE_HALF",
+	"T60x_L2_EXT_READ",
+	"T60x_L2_EXT_READ_LINE",
+	"T60x_L2_EXT_WRITE",
+	"T60x_L2_EXT_WRITE_LINE",
+	"T60x_L2_EXT_WRITE_SMALL",
+	"T60x_L2_EXT_BARRIER",
+	"T60x_L2_EXT_AR_STALL",
+	"T60x_L2_EXT_R_BUF_FULL",
+	"T60x_L2_EXT_RD_BUF_FULL",
+	"T60x_L2_EXT_R_RAW",
+	"T60x_L2_EXT_W_STALL",
+	"T60x_L2_EXT_W_BUF_FULL",
+	"T60x_L2_EXT_R_W_HAZARD",
+	"T60x_L2_TAG_HAZARD",
+	"T60x_L2_SNOOP_FULL",
+	"T60x_L2_REPLAY_FULL"
+};
+static const char * const hardware_counters_mali_t62x[] = {
+	/* Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"T62x_MESSAGES_SENT",
+	"T62x_MESSAGES_RECEIVED",
+	"T62x_GPU_ACTIVE",
+	"T62x_IRQ_ACTIVE",
+	"T62x_JS0_JOBS",
+	"T62x_JS0_TASKS",
+	"T62x_JS0_ACTIVE",
+	"",
+	"T62x_JS0_WAIT_READ",
+	"T62x_JS0_WAIT_ISSUE",
+	"T62x_JS0_WAIT_DEPEND",
+	"T62x_JS0_WAIT_FINISH",
+	"T62x_JS1_JOBS",
+	"T62x_JS1_TASKS",
+	"T62x_JS1_ACTIVE",
+	"",
+	"T62x_JS1_WAIT_READ",
+	"T62x_JS1_WAIT_ISSUE",
+	"T62x_JS1_WAIT_DEPEND",
+	"T62x_JS1_WAIT_FINISH",
+	"T62x_JS2_JOBS",
+	"T62x_JS2_TASKS",
+	"T62x_JS2_ACTIVE",
+	"",
+	"T62x_JS2_WAIT_READ",
+	"T62x_JS2_WAIT_ISSUE",
+	"T62x_JS2_WAIT_DEPEND",
+	"T62x_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/*Tiler */
+	"",
+	"",
+	"",
+	"T62x_TI_JOBS_PROCESSED",
+	"T62x_TI_TRIANGLES",
+	"T62x_TI_QUADS",
+	"T62x_TI_POLYGONS",
+	"T62x_TI_POINTS",
+	"T62x_TI_LINES",
+	"T62x_TI_VCACHE_HIT",
+	"T62x_TI_VCACHE_MISS",
+	"T62x_TI_FRONT_FACING",
+	"T62x_TI_BACK_FACING",
+	"T62x_TI_PRIM_VISIBLE",
+	"T62x_TI_PRIM_CULLED",
+	"T62x_TI_PRIM_CLIPPED",
+	"T62x_TI_LEVEL0",
+	"T62x_TI_LEVEL1",
+	"T62x_TI_LEVEL2",
+	"T62x_TI_LEVEL3",
+	"T62x_TI_LEVEL4",
+	"T62x_TI_LEVEL5",
+	"T62x_TI_LEVEL6",
+	"T62x_TI_LEVEL7",
+	"T62x_TI_COMMAND_1",
+	"T62x_TI_COMMAND_2",
+	"T62x_TI_COMMAND_3",
+	"T62x_TI_COMMAND_4",
+	"T62x_TI_COMMAND_5_7",
+	"T62x_TI_COMMAND_8_15",
+	"T62x_TI_COMMAND_16_63",
+	"T62x_TI_COMMAND_64",
+	"T62x_TI_COMPRESS_IN",
+	"T62x_TI_COMPRESS_OUT",
+	"T62x_TI_COMPRESS_FLUSH",
+	"T62x_TI_TIMESTAMPS",
+	"T62x_TI_PCACHE_HIT",
+	"T62x_TI_PCACHE_MISS",
+	"T62x_TI_PCACHE_LINE",
+	"T62x_TI_PCACHE_STALL",
+	"T62x_TI_WRBUF_HIT",
+	"T62x_TI_WRBUF_MISS",
+	"T62x_TI_WRBUF_LINE",
+	"T62x_TI_WRBUF_PARTIAL",
+	"T62x_TI_WRBUF_STALL",
+	"T62x_TI_ACTIVE",
+	"T62x_TI_LOADING_DESC",
+	"T62x_TI_INDEX_WAIT",
+	"T62x_TI_INDEX_RANGE_WAIT",
+	"T62x_TI_VERTEX_WAIT",
+	"T62x_TI_PCACHE_WAIT",
+	"T62x_TI_WRBUF_WAIT",
+	"T62x_TI_BUS_READ",
+	"T62x_TI_BUS_WRITE",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T62x_TI_UTLB_STALL",
+	"T62x_TI_UTLB_REPLAY_MISS",
+	"T62x_TI_UTLB_REPLAY_FULL",
+	"T62x_TI_UTLB_NEW_MISS",
+	"T62x_TI_UTLB_HIT",
+
+	/* Shader Core */
+	"",
+	"",
+	"",
+	"T62x_SHADER_CORE_ACTIVE",
+	"T62x_FRAG_ACTIVE",
+	"T62x_FRAG_PRIMITIVES",
+	"T62x_FRAG_PRIMITIVES_DROPPED",
+	"T62x_FRAG_CYCLES_DESC",
+	"T62x_FRAG_CYCLES_FPKQ_ACTIVE",
+	"T62x_FRAG_CYCLES_VERT",
+	"T62x_FRAG_CYCLES_TRISETUP",
+	"T62x_FRAG_CYCLES_EZS_ACTIVE",
+	"T62x_FRAG_THREADS",
+	"T62x_FRAG_DUMMY_THREADS",
+	"T62x_FRAG_QUADS_RAST",
+	"T62x_FRAG_QUADS_EZS_TEST",
+	"T62x_FRAG_QUADS_EZS_KILLED",
+	"T62x_FRAG_THREADS_LZS_TEST",
+	"T62x_FRAG_THREADS_LZS_KILLED",
+	"T62x_FRAG_CYCLES_NO_TILE",
+	"T62x_FRAG_NUM_TILES",
+	"T62x_FRAG_TRANS_ELIM",
+	"T62x_COMPUTE_ACTIVE",
+	"T62x_COMPUTE_TASKS",
+	"T62x_COMPUTE_THREADS",
+	"T62x_COMPUTE_CYCLES_DESC",
+	"T62x_TRIPIPE_ACTIVE",
+	"T62x_ARITH_WORDS",
+	"T62x_ARITH_CYCLES_REG",
+	"T62x_ARITH_CYCLES_L0",
+	"T62x_ARITH_FRAG_DEPEND",
+	"T62x_LS_WORDS",
+	"T62x_LS_ISSUES",
+	"T62x_LS_RESTARTS",
+	"T62x_LS_REISSUES_MISS",
+	"T62x_LS_REISSUES_VD",
+	"T62x_LS_REISSUE_ATTRIB_MISS",
+	"T62x_LS_NO_WB",
+	"T62x_TEX_WORDS",
+	"T62x_TEX_BUBBLES",
+	"T62x_TEX_WORDS_L0",
+	"T62x_TEX_WORDS_DESC",
+	"T62x_TEX_ISSUES",
+	"T62x_TEX_RECIRC_FMISS",
+	"T62x_TEX_RECIRC_DESC",
+	"T62x_TEX_RECIRC_MULTI",
+	"T62x_TEX_RECIRC_PMISS",
+	"T62x_TEX_RECIRC_CONF",
+	"T62x_LSC_READ_HITS",
+	"T62x_LSC_READ_MISSES",
+	"T62x_LSC_WRITE_HITS",
+	"T62x_LSC_WRITE_MISSES",
+	"T62x_LSC_ATOMIC_HITS",
+	"T62x_LSC_ATOMIC_MISSES",
+	"T62x_LSC_LINE_FETCHES",
+	"T62x_LSC_DIRTY_LINE",
+	"T62x_LSC_SNOOPS",
+	"T62x_AXI_TLB_STALL",
+	"T62x_AXI_TLB_MISS",
+	"T62x_AXI_TLB_TRANSACTION",
+	"T62x_LS_TLB_MISS",
+	"T62x_LS_TLB_HIT",
+	"T62x_AXI_BEATS_READ",
+	"T62x_AXI_BEATS_WRITTEN",
+
+	/*L2 and MMU */
+	"",
+	"",
+	"",
+	"",
+	"T62x_MMU_HIT",
+	"T62x_MMU_NEW_MISS",
+	"T62x_MMU_REPLAY_FULL",
+	"T62x_MMU_REPLAY_MISS",
+	"T62x_MMU_TABLE_WALK",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T62x_UTLB_HIT",
+	"T62x_UTLB_NEW_MISS",
+	"T62x_UTLB_REPLAY_FULL",
+	"T62x_UTLB_REPLAY_MISS",
+	"T62x_UTLB_STALL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T62x_L2_EXT_WRITE_BEATS",
+	"T62x_L2_EXT_READ_BEATS",
+	"T62x_L2_ANY_LOOKUP",
+	"T62x_L2_READ_LOOKUP",
+	"T62x_L2_SREAD_LOOKUP",
+	"T62x_L2_READ_REPLAY",
+	"T62x_L2_READ_SNOOP",
+	"T62x_L2_READ_HIT",
+	"T62x_L2_CLEAN_MISS",
+	"T62x_L2_WRITE_LOOKUP",
+	"T62x_L2_SWRITE_LOOKUP",
+	"T62x_L2_WRITE_REPLAY",
+	"T62x_L2_WRITE_SNOOP",
+	"T62x_L2_WRITE_HIT",
+	"T62x_L2_EXT_READ_FULL",
+	"T62x_L2_EXT_READ_HALF",
+	"T62x_L2_EXT_WRITE_FULL",
+	"T62x_L2_EXT_WRITE_HALF",
+	"T62x_L2_EXT_READ",
+	"T62x_L2_EXT_READ_LINE",
+	"T62x_L2_EXT_WRITE",
+	"T62x_L2_EXT_WRITE_LINE",
+	"T62x_L2_EXT_WRITE_SMALL",
+	"T62x_L2_EXT_BARRIER",
+	"T62x_L2_EXT_AR_STALL",
+	"T62x_L2_EXT_R_BUF_FULL",
+	"T62x_L2_EXT_RD_BUF_FULL",
+	"T62x_L2_EXT_R_RAW",
+	"T62x_L2_EXT_W_STALL",
+	"T62x_L2_EXT_W_BUF_FULL",
+	"T62x_L2_EXT_R_W_HAZARD",
+	"T62x_L2_TAG_HAZARD",
+	"T62x_L2_SNOOP_FULL",
+	"T62x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t72x[] = {
+	/* Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"T72x_GPU_ACTIVE",
+	"T72x_IRQ_ACTIVE",
+	"T72x_JS0_JOBS",
+	"T72x_JS0_TASKS",
+	"T72x_JS0_ACTIVE",
+	"T72x_JS1_JOBS",
+	"T72x_JS1_TASKS",
+	"T72x_JS1_ACTIVE",
+	"T72x_JS2_JOBS",
+	"T72x_JS2_TASKS",
+	"T72x_JS2_ACTIVE",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/*Tiler */
+	"",
+	"",
+	"",
+	"T72x_TI_JOBS_PROCESSED",
+	"T72x_TI_TRIANGLES",
+	"T72x_TI_QUADS",
+	"T72x_TI_POLYGONS",
+	"T72x_TI_POINTS",
+	"T72x_TI_LINES",
+	"T72x_TI_FRONT_FACING",
+	"T72x_TI_BACK_FACING",
+	"T72x_TI_PRIM_VISIBLE",
+	"T72x_TI_PRIM_CULLED",
+	"T72x_TI_PRIM_CLIPPED",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T72x_TI_ACTIVE",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"T72x_FRAG_ACTIVE",
+	"T72x_FRAG_PRIMITIVES",
+	"T72x_FRAG_PRIMITIVES_DROPPED",
+	"T72x_FRAG_THREADS",
+	"T72x_FRAG_DUMMY_THREADS",
+	"T72x_FRAG_QUADS_RAST",
+	"T72x_FRAG_QUADS_EZS_TEST",
+	"T72x_FRAG_QUADS_EZS_KILLED",
+	"T72x_FRAG_THREADS_LZS_TEST",
+	"T72x_FRAG_THREADS_LZS_KILLED",
+	"T72x_FRAG_CYCLES_NO_TILE",
+	"T72x_FRAG_NUM_TILES",
+	"T72x_FRAG_TRANS_ELIM",
+	"T72x_COMPUTE_ACTIVE",
+	"T72x_COMPUTE_TASKS",
+	"T72x_COMPUTE_THREADS",
+	"T72x_TRIPIPE_ACTIVE",
+	"T72x_ARITH_WORDS",
+	"T72x_ARITH_CYCLES_REG",
+	"T72x_LS_WORDS",
+	"T72x_LS_ISSUES",
+	"T72x_LS_RESTARTS",
+	"T72x_LS_REISSUES_MISS",
+	"T72x_TEX_WORDS",
+	"T72x_TEX_BUBBLES",
+	"T72x_TEX_ISSUES",
+	"T72x_LSC_READ_HITS",
+	"T72x_LSC_READ_MISSES",
+	"T72x_LSC_WRITE_HITS",
+	"T72x_LSC_WRITE_MISSES",
+	"T72x_LSC_ATOMIC_HITS",
+	"T72x_LSC_ATOMIC_MISSES",
+	"T72x_LSC_LINE_FETCHES",
+	"T72x_LSC_DIRTY_LINE",
+	"T72x_LSC_SNOOPS",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/*L2 and MMU */
+	"",
+	"",
+	"",
+	"",
+	"T72x_L2_EXT_WRITE_BEAT",
+	"T72x_L2_EXT_READ_BEAT",
+	"T72x_L2_READ_SNOOP",
+	"T72x_L2_READ_HIT",
+	"T72x_L2_WRITE_SNOOP",
+	"T72x_L2_WRITE_HIT",
+	"T72x_L2_EXT_WRITE_SMALL",
+	"T72x_L2_EXT_BARRIER",
+	"T72x_L2_EXT_AR_STALL",
+	"T72x_L2_EXT_W_STALL",
+	"T72x_L2_SNOOP_FULL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	""
+};
+
+static const char * const hardware_counters_mali_t76x[] = {
+	/* Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"T76x_MESSAGES_SENT",
+	"T76x_MESSAGES_RECEIVED",
+	"T76x_GPU_ACTIVE",
+	"T76x_IRQ_ACTIVE",
+	"T76x_JS0_JOBS",
+	"T76x_JS0_TASKS",
+	"T76x_JS0_ACTIVE",
+	"",
+	"T76x_JS0_WAIT_READ",
+	"T76x_JS0_WAIT_ISSUE",
+	"T76x_JS0_WAIT_DEPEND",
+	"T76x_JS0_WAIT_FINISH",
+	"T76x_JS1_JOBS",
+	"T76x_JS1_TASKS",
+	"T76x_JS1_ACTIVE",
+	"",
+	"T76x_JS1_WAIT_READ",
+	"T76x_JS1_WAIT_ISSUE",
+	"T76x_JS1_WAIT_DEPEND",
+	"T76x_JS1_WAIT_FINISH",
+	"T76x_JS2_JOBS",
+	"T76x_JS2_TASKS",
+	"T76x_JS2_ACTIVE",
+	"",
+	"T76x_JS2_WAIT_READ",
+	"T76x_JS2_WAIT_ISSUE",
+	"T76x_JS2_WAIT_DEPEND",
+	"T76x_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/*Tiler */
+	"",
+	"",
+	"",
+	"T76x_TI_JOBS_PROCESSED",
+	"T76x_TI_TRIANGLES",
+	"T76x_TI_QUADS",
+	"T76x_TI_POLYGONS",
+	"T76x_TI_POINTS",
+	"T76x_TI_LINES",
+	"T76x_TI_VCACHE_HIT",
+	"T76x_TI_VCACHE_MISS",
+	"T76x_TI_FRONT_FACING",
+	"T76x_TI_BACK_FACING",
+	"T76x_TI_PRIM_VISIBLE",
+	"T76x_TI_PRIM_CULLED",
+	"T76x_TI_PRIM_CLIPPED",
+	"T76x_TI_LEVEL0",
+	"T76x_TI_LEVEL1",
+	"T76x_TI_LEVEL2",
+	"T76x_TI_LEVEL3",
+	"T76x_TI_LEVEL4",
+	"T76x_TI_LEVEL5",
+	"T76x_TI_LEVEL6",
+	"T76x_TI_LEVEL7",
+	"T76x_TI_COMMAND_1",
+	"T76x_TI_COMMAND_2",
+	"T76x_TI_COMMAND_3",
+	"T76x_TI_COMMAND_4",
+	"T76x_TI_COMMAND_5_7",
+	"T76x_TI_COMMAND_8_15",
+	"T76x_TI_COMMAND_16_63",
+	"T76x_TI_COMMAND_64",
+	"T76x_TI_COMPRESS_IN",
+	"T76x_TI_COMPRESS_OUT",
+	"T76x_TI_COMPRESS_FLUSH",
+	"T76x_TI_TIMESTAMPS",
+	"T76x_TI_PCACHE_HIT",
+	"T76x_TI_PCACHE_MISS",
+	"T76x_TI_PCACHE_LINE",
+	"T76x_TI_PCACHE_STALL",
+	"T76x_TI_WRBUF_HIT",
+	"T76x_TI_WRBUF_MISS",
+	"T76x_TI_WRBUF_LINE",
+	"T76x_TI_WRBUF_PARTIAL",
+	"T76x_TI_WRBUF_STALL",
+	"T76x_TI_ACTIVE",
+	"T76x_TI_LOADING_DESC",
+	"T76x_TI_INDEX_WAIT",
+	"T76x_TI_INDEX_RANGE_WAIT",
+	"T76x_TI_VERTEX_WAIT",
+	"T76x_TI_PCACHE_WAIT",
+	"T76x_TI_WRBUF_WAIT",
+	"T76x_TI_BUS_READ",
+	"T76x_TI_BUS_WRITE",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T76x_TI_UTLB_HIT",
+	"T76x_TI_UTLB_NEW_MISS",
+	"T76x_TI_UTLB_REPLAY_FULL",
+	"T76x_TI_UTLB_REPLAY_MISS",
+	"T76x_TI_UTLB_STALL",
+
+	/* Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"T76x_FRAG_ACTIVE",
+	"T76x_FRAG_PRIMITIVES",
+	"T76x_FRAG_PRIMITIVES_DROPPED",
+	"T76x_FRAG_CYCLES_DESC",
+	"T76x_FRAG_CYCLES_FPKQ_ACTIVE",
+	"T76x_FRAG_CYCLES_VERT",
+	"T76x_FRAG_CYCLES_TRISETUP",
+	"T76x_FRAG_CYCLES_EZS_ACTIVE",
+	"T76x_FRAG_THREADS",
+	"T76x_FRAG_DUMMY_THREADS",
+	"T76x_FRAG_QUADS_RAST",
+	"T76x_FRAG_QUADS_EZS_TEST",
+	"T76x_FRAG_QUADS_EZS_KILLED",
+	"T76x_FRAG_THREADS_LZS_TEST",
+	"T76x_FRAG_THREADS_LZS_KILLED",
+	"T76x_FRAG_CYCLES_NO_TILE",
+	"T76x_FRAG_NUM_TILES",
+	"T76x_FRAG_TRANS_ELIM",
+	"T76x_COMPUTE_ACTIVE",
+	"T76x_COMPUTE_TASKS",
+	"T76x_COMPUTE_THREADS",
+	"T76x_COMPUTE_CYCLES_DESC",
+	"T76x_TRIPIPE_ACTIVE",
+	"T76x_ARITH_WORDS",
+	"T76x_ARITH_CYCLES_REG",
+	"T76x_ARITH_CYCLES_L0",
+	"T76x_ARITH_FRAG_DEPEND",
+	"T76x_LS_WORDS",
+	"T76x_LS_ISSUES",
+	"T76x_LS_REISSUE_ATTR",
+	"T76x_LS_REISSUES_VARY",
+	"T76x_LS_VARY_RV_MISS",
+	"T76x_LS_VARY_RV_HIT",
+	"T76x_LS_NO_UNPARK",
+	"T76x_TEX_WORDS",
+	"T76x_TEX_BUBBLES",
+	"T76x_TEX_WORDS_L0",
+	"T76x_TEX_WORDS_DESC",
+	"T76x_TEX_ISSUES",
+	"T76x_TEX_RECIRC_FMISS",
+	"T76x_TEX_RECIRC_DESC",
+	"T76x_TEX_RECIRC_MULTI",
+	"T76x_TEX_RECIRC_PMISS",
+	"T76x_TEX_RECIRC_CONF",
+	"T76x_LSC_READ_HITS",
+	"T76x_LSC_READ_OP",
+	"T76x_LSC_WRITE_HITS",
+	"T76x_LSC_WRITE_OP",
+	"T76x_LSC_ATOMIC_HITS",
+	"T76x_LSC_ATOMIC_OP",
+	"T76x_LSC_LINE_FETCHES",
+	"T76x_LSC_DIRTY_LINE",
+	"T76x_LSC_SNOOPS",
+	"T76x_AXI_TLB_STALL",
+	"T76x_AXI_TLB_MISS",
+	"T76x_AXI_TLB_TRANSACTION",
+	"T76x_LS_TLB_MISS",
+	"T76x_LS_TLB_HIT",
+	"T76x_AXI_BEATS_READ",
+	"T76x_AXI_BEATS_WRITTEN",
+
+	/*L2 and MMU */
+	"",
+	"",
+	"",
+	"",
+	"T76x_MMU_HIT",
+	"T76x_MMU_NEW_MISS",
+	"T76x_MMU_REPLAY_FULL",
+	"T76x_MMU_REPLAY_MISS",
+	"T76x_MMU_TABLE_WALK",
+	"T76x_MMU_REQUESTS",
+	"",
+	"",
+	"T76x_UTLB_HIT",
+	"T76x_UTLB_NEW_MISS",
+	"T76x_UTLB_REPLAY_FULL",
+	"T76x_UTLB_REPLAY_MISS",
+	"T76x_UTLB_STALL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T76x_L2_EXT_WRITE_BEATS",
+	"T76x_L2_EXT_READ_BEATS",
+	"T76x_L2_ANY_LOOKUP",
+	"T76x_L2_READ_LOOKUP",
+	"T76x_L2_SREAD_LOOKUP",
+	"T76x_L2_READ_REPLAY",
+	"T76x_L2_READ_SNOOP",
+	"T76x_L2_READ_HIT",
+	"T76x_L2_CLEAN_MISS",
+	"T76x_L2_WRITE_LOOKUP",
+	"T76x_L2_SWRITE_LOOKUP",
+	"T76x_L2_WRITE_REPLAY",
+	"T76x_L2_WRITE_SNOOP",
+	"T76x_L2_WRITE_HIT",
+	"T76x_L2_EXT_READ_FULL",
+	"",
+	"T76x_L2_EXT_WRITE_FULL",
+	"T76x_L2_EXT_R_W_HAZARD",
+	"T76x_L2_EXT_READ",
+	"T76x_L2_EXT_READ_LINE",
+	"T76x_L2_EXT_WRITE",
+	"T76x_L2_EXT_WRITE_LINE",
+	"T76x_L2_EXT_WRITE_SMALL",
+	"T76x_L2_EXT_BARRIER",
+	"T76x_L2_EXT_AR_STALL",
+	"T76x_L2_EXT_R_BUF_FULL",
+	"T76x_L2_EXT_RD_BUF_FULL",
+	"T76x_L2_EXT_R_RAW",
+	"T76x_L2_EXT_W_STALL",
+	"T76x_L2_EXT_W_BUF_FULL",
+	"T76x_L2_EXT_R_BUF_FULL",
+	"T76x_L2_TAG_HAZARD",
+	"T76x_L2_SNOOP_FULL",
+	"T76x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t82x[] = {
+	/* Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"T82x_MESSAGES_SENT",
+	"T82x_MESSAGES_RECEIVED",
+	"T82x_GPU_ACTIVE",
+	"T82x_IRQ_ACTIVE",
+	"T82x_JS0_JOBS",
+	"T82x_JS0_TASKS",
+	"T82x_JS0_ACTIVE",
+	"",
+	"T82x_JS0_WAIT_READ",
+	"T82x_JS0_WAIT_ISSUE",
+	"T82x_JS0_WAIT_DEPEND",
+	"T82x_JS0_WAIT_FINISH",
+	"T82x_JS1_JOBS",
+	"T82x_JS1_TASKS",
+	"T82x_JS1_ACTIVE",
+	"",
+	"T82x_JS1_WAIT_READ",
+	"T82x_JS1_WAIT_ISSUE",
+	"T82x_JS1_WAIT_DEPEND",
+	"T82x_JS1_WAIT_FINISH",
+	"T82x_JS2_JOBS",
+	"T82x_JS2_TASKS",
+	"T82x_JS2_ACTIVE",
+	"",
+	"T82x_JS2_WAIT_READ",
+	"T82x_JS2_WAIT_ISSUE",
+	"T82x_JS2_WAIT_DEPEND",
+	"T82x_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/*Tiler */
+	"",
+	"",
+	"",
+	"T82x_TI_JOBS_PROCESSED",
+	"T82x_TI_TRIANGLES",
+	"T82x_TI_QUADS",
+	"T82x_TI_POLYGONS",
+	"T82x_TI_POINTS",
+	"T82x_TI_LINES",
+	"T82x_TI_FRONT_FACING",
+	"T82x_TI_BACK_FACING",
+	"T82x_TI_PRIM_VISIBLE",
+	"T82x_TI_PRIM_CULLED",
+	"T82x_TI_PRIM_CLIPPED",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T82x_TI_ACTIVE",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"T82x_FRAG_ACTIVE",
+	"T82x_FRAG_PRIMITIVES",
+	"T82x_FRAG_PRIMITIVES_DROPPED",
+	"T82x_FRAG_CYCLES_DESC",
+	"T82x_FRAG_CYCLES_FPKQ_ACTIVE",
+	"T82x_FRAG_CYCLES_VERT",
+	"T82x_FRAG_CYCLES_TRISETUP",
+	"T82x_FRAG_CYCLES_EZS_ACTIVE",
+	"T82x_FRAG_THREADS",
+	"T82x_FRAG_DUMMY_THREADS",
+	"T82x_FRAG_QUADS_RAST",
+	"T82x_FRAG_QUADS_EZS_TEST",
+	"T82x_FRAG_QUADS_EZS_KILLED",
+	"T82x_FRAG_THREADS_LZS_TEST",
+	"T82x_FRAG_THREADS_LZS_KILLED",
+	"T82x_FRAG_CYCLES_NO_TILE",
+	"T82x_FRAG_NUM_TILES",
+	"T82x_FRAG_TRANS_ELIM",
+	"T82x_COMPUTE_ACTIVE",
+	"T82x_COMPUTE_TASKS",
+	"T82x_COMPUTE_THREADS",
+	"T82x_COMPUTE_CYCLES_DESC",
+	"T82x_TRIPIPE_ACTIVE",
+	"T82x_ARITH_WORDS",
+	"T82x_ARITH_CYCLES_REG",
+	"T82x_ARITH_CYCLES_L0",
+	"T82x_ARITH_FRAG_DEPEND",
+	"T82x_LS_WORDS",
+	"T82x_LS_ISSUES",
+	"T82x_LS_REISSUE_ATTR",
+	"T82x_LS_REISSUES_VARY",
+	"T82x_LS_VARY_RV_MISS",
+	"T82x_LS_VARY_RV_HIT",
+	"T82x_LS_NO_UNPARK",
+	"T82x_TEX_WORDS",
+	"T82x_TEX_BUBBLES",
+	"T82x_TEX_WORDS_L0",
+	"T82x_TEX_WORDS_DESC",
+	"T82x_TEX_ISSUES",
+	"T82x_TEX_RECIRC_FMISS",
+	"T82x_TEX_RECIRC_DESC",
+	"T82x_TEX_RECIRC_MULTI",
+	"T82x_TEX_RECIRC_PMISS",
+	"T82x_TEX_RECIRC_CONF",
+	"T82x_LSC_READ_HITS",
+	"T82x_LSC_READ_OP",
+	"T82x_LSC_WRITE_HITS",
+	"T82x_LSC_WRITE_OP",
+	"T82x_LSC_ATOMIC_HITS",
+	"T82x_LSC_ATOMIC_OP",
+	"T82x_LSC_LINE_FETCHES",
+	"T82x_LSC_DIRTY_LINE",
+	"T82x_LSC_SNOOPS",
+	"T82x_AXI_TLB_STALL",
+	"T82x_AXI_TLB_MISS",
+	"T82x_AXI_TLB_TRANSACTION",
+	"T82x_LS_TLB_MISS",
+	"T82x_LS_TLB_HIT",
+	"T82x_AXI_BEATS_READ",
+	"T82x_AXI_BEATS_WRITTEN",
+
+	/*L2 and MMU */
+	"",
+	"",
+	"",
+	"",
+	"T82x_MMU_HIT",
+	"T82x_MMU_NEW_MISS",
+	"T82x_MMU_REPLAY_FULL",
+	"T82x_MMU_REPLAY_MISS",
+	"T82x_MMU_TABLE_WALK",
+	"T82x_MMU_REQUESTS",
+	"",
+	"",
+	"T82x_UTLB_HIT",
+	"T82x_UTLB_NEW_MISS",
+	"T82x_UTLB_REPLAY_FULL",
+	"T82x_UTLB_REPLAY_MISS",
+	"T82x_UTLB_STALL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T82x_L2_EXT_WRITE_BEATS",
+	"T82x_L2_EXT_READ_BEATS",
+	"T82x_L2_ANY_LOOKUP",
+	"T82x_L2_READ_LOOKUP",
+	"T82x_L2_SREAD_LOOKUP",
+	"T82x_L2_READ_REPLAY",
+	"T82x_L2_READ_SNOOP",
+	"T82x_L2_READ_HIT",
+	"T82x_L2_CLEAN_MISS",
+	"T82x_L2_WRITE_LOOKUP",
+	"T82x_L2_SWRITE_LOOKUP",
+	"T82x_L2_WRITE_REPLAY",
+	"T82x_L2_WRITE_SNOOP",
+	"T82x_L2_WRITE_HIT",
+	"T82x_L2_EXT_READ_FULL",
+	"",
+	"T82x_L2_EXT_WRITE_FULL",
+	"T82x_L2_EXT_R_W_HAZARD",
+	"T82x_L2_EXT_READ",
+	"T82x_L2_EXT_READ_LINE",
+	"T82x_L2_EXT_WRITE",
+	"T82x_L2_EXT_WRITE_LINE",
+	"T82x_L2_EXT_WRITE_SMALL",
+	"T82x_L2_EXT_BARRIER",
+	"T82x_L2_EXT_AR_STALL",
+	"T82x_L2_EXT_R_BUF_FULL",
+	"T82x_L2_EXT_RD_BUF_FULL",
+	"T82x_L2_EXT_R_RAW",
+	"T82x_L2_EXT_W_STALL",
+	"T82x_L2_EXT_W_BUF_FULL",
+	"T82x_L2_EXT_R_BUF_FULL",
+	"T82x_L2_TAG_HAZARD",
+	"T82x_L2_SNOOP_FULL",
+	"T82x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t83x[] = {
+	/* Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"T83x_MESSAGES_SENT",
+	"T83x_MESSAGES_RECEIVED",
+	"T83x_GPU_ACTIVE",
+	"T83x_IRQ_ACTIVE",
+	"T83x_JS0_JOBS",
+	"T83x_JS0_TASKS",
+	"T83x_JS0_ACTIVE",
+	"",
+	"T83x_JS0_WAIT_READ",
+	"T83x_JS0_WAIT_ISSUE",
+	"T83x_JS0_WAIT_DEPEND",
+	"T83x_JS0_WAIT_FINISH",
+	"T83x_JS1_JOBS",
+	"T83x_JS1_TASKS",
+	"T83x_JS1_ACTIVE",
+	"",
+	"T83x_JS1_WAIT_READ",
+	"T83x_JS1_WAIT_ISSUE",
+	"T83x_JS1_WAIT_DEPEND",
+	"T83x_JS1_WAIT_FINISH",
+	"T83x_JS2_JOBS",
+	"T83x_JS2_TASKS",
+	"T83x_JS2_ACTIVE",
+	"",
+	"T83x_JS2_WAIT_READ",
+	"T83x_JS2_WAIT_ISSUE",
+	"T83x_JS2_WAIT_DEPEND",
+	"T83x_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/*Tiler */
+	"",
+	"",
+	"",
+	"T83x_TI_JOBS_PROCESSED",
+	"T83x_TI_TRIANGLES",
+	"T83x_TI_QUADS",
+	"T83x_TI_POLYGONS",
+	"T83x_TI_POINTS",
+	"T83x_TI_LINES",
+	"T83x_TI_FRONT_FACING",
+	"T83x_TI_BACK_FACING",
+	"T83x_TI_PRIM_VISIBLE",
+	"T83x_TI_PRIM_CULLED",
+	"T83x_TI_PRIM_CLIPPED",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T83x_TI_ACTIVE",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"T83x_FRAG_ACTIVE",
+	"T83x_FRAG_PRIMITIVES",
+	"T83x_FRAG_PRIMITIVES_DROPPED",
+	"T83x_FRAG_CYCLES_DESC",
+	"T83x_FRAG_CYCLES_FPKQ_ACTIVE",
+	"T83x_FRAG_CYCLES_VERT",
+	"T83x_FRAG_CYCLES_TRISETUP",
+	"T83x_FRAG_CYCLES_EZS_ACTIVE",
+	"T83x_FRAG_THREADS",
+	"T83x_FRAG_DUMMY_THREADS",
+	"T83x_FRAG_QUADS_RAST",
+	"T83x_FRAG_QUADS_EZS_TEST",
+	"T83x_FRAG_QUADS_EZS_KILLED",
+	"T83x_FRAG_THREADS_LZS_TEST",
+	"T83x_FRAG_THREADS_LZS_KILLED",
+	"T83x_FRAG_CYCLES_NO_TILE",
+	"T83x_FRAG_NUM_TILES",
+	"T83x_FRAG_TRANS_ELIM",
+	"T83x_COMPUTE_ACTIVE",
+	"T83x_COMPUTE_TASKS",
+	"T83x_COMPUTE_THREADS",
+	"T83x_COMPUTE_CYCLES_DESC",
+	"T83x_TRIPIPE_ACTIVE",
+	"T83x_ARITH_WORDS",
+	"T83x_ARITH_CYCLES_REG",
+	"T83x_ARITH_CYCLES_L0",
+	"T83x_ARITH_FRAG_DEPEND",
+	"T83x_LS_WORDS",
+	"T83x_LS_ISSUES",
+	"T83x_LS_REISSUE_ATTR",
+	"T83x_LS_REISSUES_VARY",
+	"T83x_LS_VARY_RV_MISS",
+	"T83x_LS_VARY_RV_HIT",
+	"T83x_LS_NO_UNPARK",
+	"T83x_TEX_WORDS",
+	"T83x_TEX_BUBBLES",
+	"T83x_TEX_WORDS_L0",
+	"T83x_TEX_WORDS_DESC",
+	"T83x_TEX_ISSUES",
+	"T83x_TEX_RECIRC_FMISS",
+	"T83x_TEX_RECIRC_DESC",
+	"T83x_TEX_RECIRC_MULTI",
+	"T83x_TEX_RECIRC_PMISS",
+	"T83x_TEX_RECIRC_CONF",
+	"T83x_LSC_READ_HITS",
+	"T83x_LSC_READ_OP",
+	"T83x_LSC_WRITE_HITS",
+	"T83x_LSC_WRITE_OP",
+	"T83x_LSC_ATOMIC_HITS",
+	"T83x_LSC_ATOMIC_OP",
+	"T83x_LSC_LINE_FETCHES",
+	"T83x_LSC_DIRTY_LINE",
+	"T83x_LSC_SNOOPS",
+	"T83x_AXI_TLB_STALL",
+	"T83x_AXI_TLB_MISS",
+	"T83x_AXI_TLB_TRANSACTION",
+	"T83x_LS_TLB_MISS",
+	"T83x_LS_TLB_HIT",
+	"T83x_AXI_BEATS_READ",
+	"T83x_AXI_BEATS_WRITTEN",
+
+	/*L2 and MMU */
+	"",
+	"",
+	"",
+	"",
+	"T83x_MMU_HIT",
+	"T83x_MMU_NEW_MISS",
+	"T83x_MMU_REPLAY_FULL",
+	"T83x_MMU_REPLAY_MISS",
+	"T83x_MMU_TABLE_WALK",
+	"T83x_MMU_REQUESTS",
+	"",
+	"",
+	"T83x_UTLB_HIT",
+	"T83x_UTLB_NEW_MISS",
+	"T83x_UTLB_REPLAY_FULL",
+	"T83x_UTLB_REPLAY_MISS",
+	"T83x_UTLB_STALL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T83x_L2_EXT_WRITE_BEATS",
+	"T83x_L2_EXT_READ_BEATS",
+	"T83x_L2_ANY_LOOKUP",
+	"T83x_L2_READ_LOOKUP",
+	"T83x_L2_SREAD_LOOKUP",
+	"T83x_L2_READ_REPLAY",
+	"T83x_L2_READ_SNOOP",
+	"T83x_L2_READ_HIT",
+	"T83x_L2_CLEAN_MISS",
+	"T83x_L2_WRITE_LOOKUP",
+	"T83x_L2_SWRITE_LOOKUP",
+	"T83x_L2_WRITE_REPLAY",
+	"T83x_L2_WRITE_SNOOP",
+	"T83x_L2_WRITE_HIT",
+	"T83x_L2_EXT_READ_FULL",
+	"",
+	"T83x_L2_EXT_WRITE_FULL",
+	"T83x_L2_EXT_R_W_HAZARD",
+	"T83x_L2_EXT_READ",
+	"T83x_L2_EXT_READ_LINE",
+	"T83x_L2_EXT_WRITE",
+	"T83x_L2_EXT_WRITE_LINE",
+	"T83x_L2_EXT_WRITE_SMALL",
+	"T83x_L2_EXT_BARRIER",
+	"T83x_L2_EXT_AR_STALL",
+	"T83x_L2_EXT_R_BUF_FULL",
+	"T83x_L2_EXT_RD_BUF_FULL",
+	"T83x_L2_EXT_R_RAW",
+	"T83x_L2_EXT_W_STALL",
+	"T83x_L2_EXT_W_BUF_FULL",
+	"T83x_L2_EXT_R_BUF_FULL",
+	"T83x_L2_TAG_HAZARD",
+	"T83x_L2_SNOOP_FULL",
+	"T83x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t86x[] = {
+	/* Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"T86x_MESSAGES_SENT",
+	"T86x_MESSAGES_RECEIVED",
+	"T86x_GPU_ACTIVE",
+	"T86x_IRQ_ACTIVE",
+	"T86x_JS0_JOBS",
+	"T86x_JS0_TASKS",
+	"T86x_JS0_ACTIVE",
+	"",
+	"T86x_JS0_WAIT_READ",
+	"T86x_JS0_WAIT_ISSUE",
+	"T86x_JS0_WAIT_DEPEND",
+	"T86x_JS0_WAIT_FINISH",
+	"T86x_JS1_JOBS",
+	"T86x_JS1_TASKS",
+	"T86x_JS1_ACTIVE",
+	"",
+	"T86x_JS1_WAIT_READ",
+	"T86x_JS1_WAIT_ISSUE",
+	"T86x_JS1_WAIT_DEPEND",
+	"T86x_JS1_WAIT_FINISH",
+	"T86x_JS2_JOBS",
+	"T86x_JS2_TASKS",
+	"T86x_JS2_ACTIVE",
+	"",
+	"T86x_JS2_WAIT_READ",
+	"T86x_JS2_WAIT_ISSUE",
+	"T86x_JS2_WAIT_DEPEND",
+	"T86x_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/*Tiler */
+	"",
+	"",
+	"",
+	"T86x_TI_JOBS_PROCESSED",
+	"T86x_TI_TRIANGLES",
+	"T86x_TI_QUADS",
+	"T86x_TI_POLYGONS",
+	"T86x_TI_POINTS",
+	"T86x_TI_LINES",
+	"T86x_TI_VCACHE_HIT",
+	"T86x_TI_VCACHE_MISS",
+	"T86x_TI_FRONT_FACING",
+	"T86x_TI_BACK_FACING",
+	"T86x_TI_PRIM_VISIBLE",
+	"T86x_TI_PRIM_CULLED",
+	"T86x_TI_PRIM_CLIPPED",
+	"T86x_TI_LEVEL0",
+	"T86x_TI_LEVEL1",
+	"T86x_TI_LEVEL2",
+	"T86x_TI_LEVEL3",
+	"T86x_TI_LEVEL4",
+	"T86x_TI_LEVEL5",
+	"T86x_TI_LEVEL6",
+	"T86x_TI_LEVEL7",
+	"T86x_TI_COMMAND_1",
+	"T86x_TI_COMMAND_2",
+	"T86x_TI_COMMAND_3",
+	"T86x_TI_COMMAND_4",
+	"T86x_TI_COMMAND_5_7",
+	"T86x_TI_COMMAND_8_15",
+	"T86x_TI_COMMAND_16_63",
+	"T86x_TI_COMMAND_64",
+	"T86x_TI_COMPRESS_IN",
+	"T86x_TI_COMPRESS_OUT",
+	"T86x_TI_COMPRESS_FLUSH",
+	"T86x_TI_TIMESTAMPS",
+	"T86x_TI_PCACHE_HIT",
+	"T86x_TI_PCACHE_MISS",
+	"T86x_TI_PCACHE_LINE",
+	"T86x_TI_PCACHE_STALL",
+	"T86x_TI_WRBUF_HIT",
+	"T86x_TI_WRBUF_MISS",
+	"T86x_TI_WRBUF_LINE",
+	"T86x_TI_WRBUF_PARTIAL",
+	"T86x_TI_WRBUF_STALL",
+	"T86x_TI_ACTIVE",
+	"T86x_TI_LOADING_DESC",
+	"T86x_TI_INDEX_WAIT",
+	"T86x_TI_INDEX_RANGE_WAIT",
+	"T86x_TI_VERTEX_WAIT",
+	"T86x_TI_PCACHE_WAIT",
+	"T86x_TI_WRBUF_WAIT",
+	"T86x_TI_BUS_READ",
+	"T86x_TI_BUS_WRITE",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T86x_TI_UTLB_HIT",
+	"T86x_TI_UTLB_NEW_MISS",
+	"T86x_TI_UTLB_REPLAY_FULL",
+	"T86x_TI_UTLB_REPLAY_MISS",
+	"T86x_TI_UTLB_STALL",
+
+	/* Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"T86x_FRAG_ACTIVE",
+	"T86x_FRAG_PRIMITIVES",
+	"T86x_FRAG_PRIMITIVES_DROPPED",
+	"T86x_FRAG_CYCLES_DESC",
+	"T86x_FRAG_CYCLES_FPKQ_ACTIVE",
+	"T86x_FRAG_CYCLES_VERT",
+	"T86x_FRAG_CYCLES_TRISETUP",
+	"T86x_FRAG_CYCLES_EZS_ACTIVE",
+	"T86x_FRAG_THREADS",
+	"T86x_FRAG_DUMMY_THREADS",
+	"T86x_FRAG_QUADS_RAST",
+	"T86x_FRAG_QUADS_EZS_TEST",
+	"T86x_FRAG_QUADS_EZS_KILLED",
+	"T86x_FRAG_THREADS_LZS_TEST",
+	"T86x_FRAG_THREADS_LZS_KILLED",
+	"T86x_FRAG_CYCLES_NO_TILE",
+	"T86x_FRAG_NUM_TILES",
+	"T86x_FRAG_TRANS_ELIM",
+	"T86x_COMPUTE_ACTIVE",
+	"T86x_COMPUTE_TASKS",
+	"T86x_COMPUTE_THREADS",
+	"T86x_COMPUTE_CYCLES_DESC",
+	"T86x_TRIPIPE_ACTIVE",
+	"T86x_ARITH_WORDS",
+	"T86x_ARITH_CYCLES_REG",
+	"T86x_ARITH_CYCLES_L0",
+	"T86x_ARITH_FRAG_DEPEND",
+	"T86x_LS_WORDS",
+	"T86x_LS_ISSUES",
+	"T86x_LS_REISSUE_ATTR",
+	"T86x_LS_REISSUES_VARY",
+	"T86x_LS_VARY_RV_MISS",
+	"T86x_LS_VARY_RV_HIT",
+	"T86x_LS_NO_UNPARK",
+	"T86x_TEX_WORDS",
+	"T86x_TEX_BUBBLES",
+	"T86x_TEX_WORDS_L0",
+	"T86x_TEX_WORDS_DESC",
+	"T86x_TEX_ISSUES",
+	"T86x_TEX_RECIRC_FMISS",
+	"T86x_TEX_RECIRC_DESC",
+	"T86x_TEX_RECIRC_MULTI",
+	"T86x_TEX_RECIRC_PMISS",
+	"T86x_TEX_RECIRC_CONF",
+	"T86x_LSC_READ_HITS",
+	"T86x_LSC_READ_OP",
+	"T86x_LSC_WRITE_HITS",
+	"T86x_LSC_WRITE_OP",
+	"T86x_LSC_ATOMIC_HITS",
+	"T86x_LSC_ATOMIC_OP",
+	"T86x_LSC_LINE_FETCHES",
+	"T86x_LSC_DIRTY_LINE",
+	"T86x_LSC_SNOOPS",
+	"T86x_AXI_TLB_STALL",
+	"T86x_AXI_TLB_MISS",
+	"T86x_AXI_TLB_TRANSACTION",
+	"T86x_LS_TLB_MISS",
+	"T86x_LS_TLB_HIT",
+	"T86x_AXI_BEATS_READ",
+	"T86x_AXI_BEATS_WRITTEN",
+
+	/*L2 and MMU */
+	"",
+	"",
+	"",
+	"",
+	"T86x_MMU_HIT",
+	"T86x_MMU_NEW_MISS",
+	"T86x_MMU_REPLAY_FULL",
+	"T86x_MMU_REPLAY_MISS",
+	"T86x_MMU_TABLE_WALK",
+	"T86x_MMU_REQUESTS",
+	"",
+	"",
+	"T86x_UTLB_HIT",
+	"T86x_UTLB_NEW_MISS",
+	"T86x_UTLB_REPLAY_FULL",
+	"T86x_UTLB_REPLAY_MISS",
+	"T86x_UTLB_STALL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T86x_L2_EXT_WRITE_BEATS",
+	"T86x_L2_EXT_READ_BEATS",
+	"T86x_L2_ANY_LOOKUP",
+	"T86x_L2_READ_LOOKUP",
+	"T86x_L2_SREAD_LOOKUP",
+	"T86x_L2_READ_REPLAY",
+	"T86x_L2_READ_SNOOP",
+	"T86x_L2_READ_HIT",
+	"T86x_L2_CLEAN_MISS",
+	"T86x_L2_WRITE_LOOKUP",
+	"T86x_L2_SWRITE_LOOKUP",
+	"T86x_L2_WRITE_REPLAY",
+	"T86x_L2_WRITE_SNOOP",
+	"T86x_L2_WRITE_HIT",
+	"T86x_L2_EXT_READ_FULL",
+	"",
+	"T86x_L2_EXT_WRITE_FULL",
+	"T86x_L2_EXT_R_W_HAZARD",
+	"T86x_L2_EXT_READ",
+	"T86x_L2_EXT_READ_LINE",
+	"T86x_L2_EXT_WRITE",
+	"T86x_L2_EXT_WRITE_LINE",
+	"T86x_L2_EXT_WRITE_SMALL",
+	"T86x_L2_EXT_BARRIER",
+	"T86x_L2_EXT_AR_STALL",
+	"T86x_L2_EXT_R_BUF_FULL",
+	"T86x_L2_EXT_RD_BUF_FULL",
+	"T86x_L2_EXT_R_RAW",
+	"T86x_L2_EXT_W_STALL",
+	"T86x_L2_EXT_W_BUF_FULL",
+	"T86x_L2_EXT_R_BUF_FULL",
+	"T86x_L2_TAG_HAZARD",
+	"T86x_L2_SNOOP_FULL",
+	"T86x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t88x[] = {
+	/* Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"T88x_MESSAGES_SENT",
+	"T88x_MESSAGES_RECEIVED",
+	"T88x_GPU_ACTIVE",
+	"T88x_IRQ_ACTIVE",
+	"T88x_JS0_JOBS",
+	"T88x_JS0_TASKS",
+	"T88x_JS0_ACTIVE",
+	"",
+	"T88x_JS0_WAIT_READ",
+	"T88x_JS0_WAIT_ISSUE",
+	"T88x_JS0_WAIT_DEPEND",
+	"T88x_JS0_WAIT_FINISH",
+	"T88x_JS1_JOBS",
+	"T88x_JS1_TASKS",
+	"T88x_JS1_ACTIVE",
+	"",
+	"T88x_JS1_WAIT_READ",
+	"T88x_JS1_WAIT_ISSUE",
+	"T88x_JS1_WAIT_DEPEND",
+	"T88x_JS1_WAIT_FINISH",
+	"T88x_JS2_JOBS",
+	"T88x_JS2_TASKS",
+	"T88x_JS2_ACTIVE",
+	"",
+	"T88x_JS2_WAIT_READ",
+	"T88x_JS2_WAIT_ISSUE",
+	"T88x_JS2_WAIT_DEPEND",
+	"T88x_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/*Tiler */
+	"",
+	"",
+	"",
+	"T88x_TI_JOBS_PROCESSED",
+	"T88x_TI_TRIANGLES",
+	"T88x_TI_QUADS",
+	"T88x_TI_POLYGONS",
+	"T88x_TI_POINTS",
+	"T88x_TI_LINES",
+	"T88x_TI_VCACHE_HIT",
+	"T88x_TI_VCACHE_MISS",
+	"T88x_TI_FRONT_FACING",
+	"T88x_TI_BACK_FACING",
+	"T88x_TI_PRIM_VISIBLE",
+	"T88x_TI_PRIM_CULLED",
+	"T88x_TI_PRIM_CLIPPED",
+	"T88x_TI_LEVEL0",
+	"T88x_TI_LEVEL1",
+	"T88x_TI_LEVEL2",
+	"T88x_TI_LEVEL3",
+	"T88x_TI_LEVEL4",
+	"T88x_TI_LEVEL5",
+	"T88x_TI_LEVEL6",
+	"T88x_TI_LEVEL7",
+	"T88x_TI_COMMAND_1",
+	"T88x_TI_COMMAND_2",
+	"T88x_TI_COMMAND_3",
+	"T88x_TI_COMMAND_4",
+	"T88x_TI_COMMAND_5_7",
+	"T88x_TI_COMMAND_8_15",
+	"T88x_TI_COMMAND_16_63",
+	"T88x_TI_COMMAND_64",
+	"T88x_TI_COMPRESS_IN",
+	"T88x_TI_COMPRESS_OUT",
+	"T88x_TI_COMPRESS_FLUSH",
+	"T88x_TI_TIMESTAMPS",
+	"T88x_TI_PCACHE_HIT",
+	"T88x_TI_PCACHE_MISS",
+	"T88x_TI_PCACHE_LINE",
+	"T88x_TI_PCACHE_STALL",
+	"T88x_TI_WRBUF_HIT",
+	"T88x_TI_WRBUF_MISS",
+	"T88x_TI_WRBUF_LINE",
+	"T88x_TI_WRBUF_PARTIAL",
+	"T88x_TI_WRBUF_STALL",
+	"T88x_TI_ACTIVE",
+	"T88x_TI_LOADING_DESC",
+	"T88x_TI_INDEX_WAIT",
+	"T88x_TI_INDEX_RANGE_WAIT",
+	"T88x_TI_VERTEX_WAIT",
+	"T88x_TI_PCACHE_WAIT",
+	"T88x_TI_WRBUF_WAIT",
+	"T88x_TI_BUS_READ",
+	"T88x_TI_BUS_WRITE",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T88x_TI_UTLB_HIT",
+	"T88x_TI_UTLB_NEW_MISS",
+	"T88x_TI_UTLB_REPLAY_FULL",
+	"T88x_TI_UTLB_REPLAY_MISS",
+	"T88x_TI_UTLB_STALL",
+
+	/* Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"T88x_FRAG_ACTIVE",
+	"T88x_FRAG_PRIMITIVES",
+	"T88x_FRAG_PRIMITIVES_DROPPED",
+	"T88x_FRAG_CYCLES_DESC",
+	"T88x_FRAG_CYCLES_FPKQ_ACTIVE",
+	"T88x_FRAG_CYCLES_VERT",
+	"T88x_FRAG_CYCLES_TRISETUP",
+	"T88x_FRAG_CYCLES_EZS_ACTIVE",
+	"T88x_FRAG_THREADS",
+	"T88x_FRAG_DUMMY_THREADS",
+	"T88x_FRAG_QUADS_RAST",
+	"T88x_FRAG_QUADS_EZS_TEST",
+	"T88x_FRAG_QUADS_EZS_KILLED",
+	"T88x_FRAG_THREADS_LZS_TEST",
+	"T88x_FRAG_THREADS_LZS_KILLED",
+	"T88x_FRAG_CYCLES_NO_TILE",
+	"T88x_FRAG_NUM_TILES",
+	"T88x_FRAG_TRANS_ELIM",
+	"T88x_COMPUTE_ACTIVE",
+	"T88x_COMPUTE_TASKS",
+	"T88x_COMPUTE_THREADS",
+	"T88x_COMPUTE_CYCLES_DESC",
+	"T88x_TRIPIPE_ACTIVE",
+	"T88x_ARITH_WORDS",
+	"T88x_ARITH_CYCLES_REG",
+	"T88x_ARITH_CYCLES_L0",
+	"T88x_ARITH_FRAG_DEPEND",
+	"T88x_LS_WORDS",
+	"T88x_LS_ISSUES",
+	"T88x_LS_REISSUE_ATTR",
+	"T88x_LS_REISSUES_VARY",
+	"T88x_LS_VARY_RV_MISS",
+	"T88x_LS_VARY_RV_HIT",
+	"T88x_LS_NO_UNPARK",
+	"T88x_TEX_WORDS",
+	"T88x_TEX_BUBBLES",
+	"T88x_TEX_WORDS_L0",
+	"T88x_TEX_WORDS_DESC",
+	"T88x_TEX_ISSUES",
+	"T88x_TEX_RECIRC_FMISS",
+	"T88x_TEX_RECIRC_DESC",
+	"T88x_TEX_RECIRC_MULTI",
+	"T88x_TEX_RECIRC_PMISS",
+	"T88x_TEX_RECIRC_CONF",
+	"T88x_LSC_READ_HITS",
+	"T88x_LSC_READ_OP",
+	"T88x_LSC_WRITE_HITS",
+	"T88x_LSC_WRITE_OP",
+	"T88x_LSC_ATOMIC_HITS",
+	"T88x_LSC_ATOMIC_OP",
+	"T88x_LSC_LINE_FETCHES",
+	"T88x_LSC_DIRTY_LINE",
+	"T88x_LSC_SNOOPS",
+	"T88x_AXI_TLB_STALL",
+	"T88x_AXI_TLB_MISS",
+	"T88x_AXI_TLB_TRANSACTION",
+	"T88x_LS_TLB_MISS",
+	"T88x_LS_TLB_HIT",
+	"T88x_AXI_BEATS_READ",
+	"T88x_AXI_BEATS_WRITTEN",
+
+	/*L2 and MMU */
+	"",
+	"",
+	"",
+	"",
+	"T88x_MMU_HIT",
+	"T88x_MMU_NEW_MISS",
+	"T88x_MMU_REPLAY_FULL",
+	"T88x_MMU_REPLAY_MISS",
+	"T88x_MMU_TABLE_WALK",
+	"T88x_MMU_REQUESTS",
+	"",
+	"",
+	"T88x_UTLB_HIT",
+	"T88x_UTLB_NEW_MISS",
+	"T88x_UTLB_REPLAY_FULL",
+	"T88x_UTLB_REPLAY_MISS",
+	"T88x_UTLB_STALL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T88x_L2_EXT_WRITE_BEATS",
+	"T88x_L2_EXT_READ_BEATS",
+	"T88x_L2_ANY_LOOKUP",
+	"T88x_L2_READ_LOOKUP",
+	"T88x_L2_SREAD_LOOKUP",
+	"T88x_L2_READ_REPLAY",
+	"T88x_L2_READ_SNOOP",
+	"T88x_L2_READ_HIT",
+	"T88x_L2_CLEAN_MISS",
+	"T88x_L2_WRITE_LOOKUP",
+	"T88x_L2_SWRITE_LOOKUP",
+	"T88x_L2_WRITE_REPLAY",
+	"T88x_L2_WRITE_SNOOP",
+	"T88x_L2_WRITE_HIT",
+	"T88x_L2_EXT_READ_FULL",
+	"",
+	"T88x_L2_EXT_WRITE_FULL",
+	"T88x_L2_EXT_R_W_HAZARD",
+	"T88x_L2_EXT_READ",
+	"T88x_L2_EXT_READ_LINE",
+	"T88x_L2_EXT_WRITE",
+	"T88x_L2_EXT_WRITE_LINE",
+	"T88x_L2_EXT_WRITE_SMALL",
+	"T88x_L2_EXT_BARRIER",
+	"T88x_L2_EXT_AR_STALL",
+	"T88x_L2_EXT_R_BUF_FULL",
+	"T88x_L2_EXT_RD_BUF_FULL",
+	"T88x_L2_EXT_R_RAW",
+	"T88x_L2_EXT_W_STALL",
+	"T88x_L2_EXT_W_BUF_FULL",
+	"T88x_L2_EXT_R_BUF_FULL",
+	"T88x_L2_TAG_HAZARD",
+	"T88x_L2_SNOOP_FULL",
+	"T88x_L2_REPLAY_FULL"
+};
+
+#include "mali_kbase_gator_hwcnt_names_tmix.h"
+
+#include "mali_kbase_gator_hwcnt_names_thex.h"
+
+#include "mali_kbase_gator_hwcnt_names_tsix.h"
+
+#include "mali_kbase_gator_hwcnt_names_tnox.h"
+
+#include "mali_kbase_gator_hwcnt_names_tgox.h"
+
+#include "mali_kbase_gator_hwcnt_names_tkax.h"
+
+#include "mali_kbase_gator_hwcnt_names_ttrx.h"
+
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tgox.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tgox.h
new file mode 100644
index 0000000..72b5266
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tgox.h
@@ -0,0 +1,296 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_TGOX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_TGOX_H_
+
+static const char * const hardware_counters_mali_tGOx[] = {
+	/* Performance counters for the Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"TGOx_MESSAGES_SENT",
+	"TGOx_MESSAGES_RECEIVED",
+	"TGOx_GPU_ACTIVE",
+	"TGOx_IRQ_ACTIVE",
+	"TGOx_JS0_JOBS",
+	"TGOx_JS0_TASKS",
+	"TGOx_JS0_ACTIVE",
+	"",
+	"TGOx_JS0_WAIT_READ",
+	"TGOx_JS0_WAIT_ISSUE",
+	"TGOx_JS0_WAIT_DEPEND",
+	"TGOx_JS0_WAIT_FINISH",
+	"TGOx_JS1_JOBS",
+	"TGOx_JS1_TASKS",
+	"TGOx_JS1_ACTIVE",
+	"",
+	"TGOx_JS1_WAIT_READ",
+	"TGOx_JS1_WAIT_ISSUE",
+	"TGOx_JS1_WAIT_DEPEND",
+	"TGOx_JS1_WAIT_FINISH",
+	"TGOx_JS2_JOBS",
+	"TGOx_JS2_TASKS",
+	"TGOx_JS2_ACTIVE",
+	"",
+	"TGOx_JS2_WAIT_READ",
+	"TGOx_JS2_WAIT_ISSUE",
+	"TGOx_JS2_WAIT_DEPEND",
+	"TGOx_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Performance counters for the Tiler */
+	"",
+	"",
+	"",
+	"",
+	"TGOx_TILER_ACTIVE",
+	"TGOx_JOBS_PROCESSED",
+	"TGOx_TRIANGLES",
+	"TGOx_LINES",
+	"TGOx_POINTS",
+	"TGOx_FRONT_FACING",
+	"TGOx_BACK_FACING",
+	"TGOx_PRIM_VISIBLE",
+	"TGOx_PRIM_CULLED",
+	"TGOx_PRIM_CLIPPED",
+	"TGOx_PRIM_SAT_CULLED",
+	"TGOx_BIN_ALLOC_INIT",
+	"TGOx_BIN_ALLOC_OVERFLOW",
+	"TGOx_BUS_READ",
+	"",
+	"TGOx_BUS_WRITE",
+	"TGOx_LOADING_DESC",
+	"TGOx_IDVS_POS_SHAD_REQ",
+	"TGOx_IDVS_POS_SHAD_WAIT",
+	"TGOx_IDVS_POS_SHAD_STALL",
+	"TGOx_IDVS_POS_FIFO_FULL",
+	"TGOx_PREFETCH_STALL",
+	"TGOx_VCACHE_HIT",
+	"TGOx_VCACHE_MISS",
+	"TGOx_VCACHE_LINE_WAIT",
+	"TGOx_VFETCH_POS_READ_WAIT",
+	"TGOx_VFETCH_VERTEX_WAIT",
+	"TGOx_VFETCH_STALL",
+	"TGOx_PRIMASSY_STALL",
+	"TGOx_BBOX_GEN_STALL",
+	"TGOx_IDVS_VBU_HIT",
+	"TGOx_IDVS_VBU_MISS",
+	"TGOx_IDVS_VBU_LINE_DEALLOCATE",
+	"TGOx_IDVS_VAR_SHAD_REQ",
+	"TGOx_IDVS_VAR_SHAD_STALL",
+	"TGOx_BINNER_STALL",
+	"TGOx_ITER_STALL",
+	"TGOx_COMPRESS_MISS",
+	"TGOx_COMPRESS_STALL",
+	"TGOx_PCACHE_HIT",
+	"TGOx_PCACHE_MISS",
+	"TGOx_PCACHE_MISS_STALL",
+	"TGOx_PCACHE_EVICT_STALL",
+	"TGOx_PMGR_PTR_WR_STALL",
+	"TGOx_PMGR_PTR_RD_STALL",
+	"TGOx_PMGR_CMD_WR_STALL",
+	"TGOx_WRBUF_ACTIVE",
+	"TGOx_WRBUF_HIT",
+	"TGOx_WRBUF_MISS",
+	"TGOx_WRBUF_NO_FREE_LINE_STALL",
+	"TGOx_WRBUF_NO_AXI_ID_STALL",
+	"TGOx_WRBUF_AXI_STALL",
+	"",
+	"",
+	"",
+	"TGOx_UTLB_TRANS",
+	"TGOx_UTLB_TRANS_HIT",
+	"TGOx_UTLB_TRANS_STALL",
+	"TGOx_UTLB_TRANS_MISS_DELAY",
+	"TGOx_UTLB_MMU_REQ",
+
+	/* Performance counters for the Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"TGOx_FRAG_ACTIVE",
+	"TGOx_FRAG_PRIMITIVES",
+	"TGOx_FRAG_PRIM_RAST",
+	"TGOx_FRAG_FPK_ACTIVE",
+	"TGOx_FRAG_STARVING",
+	"TGOx_FRAG_WARPS",
+	"TGOx_FRAG_PARTIAL_WARPS",
+	"TGOx_FRAG_QUADS_RAST",
+	"TGOx_FRAG_QUADS_EZS_TEST",
+	"TGOx_FRAG_QUADS_EZS_UPDATE",
+	"TGOx_FRAG_QUADS_EZS_KILL",
+	"TGOx_FRAG_LZS_TEST",
+	"TGOx_FRAG_LZS_KILL",
+	"TGOx_WARP_REG_SIZE_64",
+	"TGOx_FRAG_PTILES",
+	"TGOx_FRAG_TRANS_ELIM",
+	"TGOx_QUAD_FPK_KILLER",
+	"TGOx_FULL_QUAD_WARPS",
+	"TGOx_COMPUTE_ACTIVE",
+	"TGOx_COMPUTE_TASKS",
+	"TGOx_COMPUTE_WARPS",
+	"TGOx_COMPUTE_STARVING",
+	"TGOx_EXEC_CORE_ACTIVE",
+	"TGOx_EXEC_ACTIVE",
+	"TGOx_EXEC_INSTR_COUNT",
+	"TGOx_EXEC_INSTR_DIVERGED",
+	"TGOx_EXEC_INSTR_STARVING",
+	"TGOx_ARITH_INSTR_SINGLE_FMA",
+	"TGOx_ARITH_INSTR_DOUBLE",
+	"TGOx_ARITH_INSTR_MSG",
+	"TGOx_ARITH_INSTR_MSG_ONLY",
+	"TGOx_TEX_MSGI_NUM_QUADS",
+	"TGOx_TEX_DFCH_NUM_PASSES",
+	"TGOx_TEX_DFCH_NUM_PASSES_MISS",
+	"TGOx_TEX_DFCH_NUM_PASSES_MIP_MAP",
+	"TGOx_TEX_TIDX_NUM_SPLIT_MIP_MAP",
+	"TGOx_TEX_TFCH_NUM_LINES_FETCHED",
+	"TGOx_TEX_TFCH_NUM_LINES_FETCHED_BLOCK",
+	"TGOx_TEX_TFCH_NUM_OPERATIONS",
+	"TGOx_TEX_FILT_NUM_OPERATIONS",
+	"TGOx_LS_MEM_READ_FULL",
+	"TGOx_LS_MEM_READ_SHORT",
+	"TGOx_LS_MEM_WRITE_FULL",
+	"TGOx_LS_MEM_WRITE_SHORT",
+	"TGOx_LS_MEM_ATOMIC",
+	"TGOx_VARY_INSTR",
+	"TGOx_VARY_SLOT_32",
+	"TGOx_VARY_SLOT_16",
+	"TGOx_ATTR_INSTR",
+	"TGOx_ARITH_INSTR_FP_MUL",
+	"TGOx_BEATS_RD_FTC",
+	"TGOx_BEATS_RD_FTC_EXT",
+	"TGOx_BEATS_RD_LSC",
+	"TGOx_BEATS_RD_LSC_EXT",
+	"TGOx_BEATS_RD_TEX",
+	"TGOx_BEATS_RD_TEX_EXT",
+	"TGOx_BEATS_RD_OTHER",
+	"TGOx_BEATS_WR_LSC_WB",
+	"TGOx_BEATS_WR_TIB",
+	"TGOx_BEATS_WR_LSC_OTHER",
+
+	/* Performance counters for the Memory System */
+	"",
+	"",
+	"",
+	"",
+	"TGOx_MMU_REQUESTS",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"TGOx_L2_RD_MSG_IN",
+	"TGOx_L2_RD_MSG_IN_STALL",
+	"TGOx_L2_WR_MSG_IN",
+	"TGOx_L2_WR_MSG_IN_STALL",
+	"TGOx_L2_SNP_MSG_IN",
+	"TGOx_L2_SNP_MSG_IN_STALL",
+	"TGOx_L2_RD_MSG_OUT",
+	"TGOx_L2_RD_MSG_OUT_STALL",
+	"TGOx_L2_WR_MSG_OUT",
+	"TGOx_L2_ANY_LOOKUP",
+	"TGOx_L2_READ_LOOKUP",
+	"TGOx_L2_WRITE_LOOKUP",
+	"TGOx_L2_EXT_SNOOP_LOOKUP",
+	"TGOx_L2_EXT_READ",
+	"TGOx_L2_EXT_READ_NOSNP",
+	"TGOx_L2_EXT_READ_UNIQUE",
+	"TGOx_L2_EXT_READ_BEATS",
+	"TGOx_L2_EXT_AR_STALL",
+	"TGOx_L2_EXT_AR_CNT_Q1",
+	"TGOx_L2_EXT_AR_CNT_Q2",
+	"TGOx_L2_EXT_AR_CNT_Q3",
+	"TGOx_L2_EXT_RRESP_0_127",
+	"TGOx_L2_EXT_RRESP_128_191",
+	"TGOx_L2_EXT_RRESP_192_255",
+	"TGOx_L2_EXT_RRESP_256_319",
+	"TGOx_L2_EXT_RRESP_320_383",
+	"TGOx_L2_EXT_WRITE",
+	"TGOx_L2_EXT_WRITE_NOSNP_FULL",
+	"TGOx_L2_EXT_WRITE_NOSNP_PTL",
+	"TGOx_L2_EXT_WRITE_SNP_FULL",
+	"TGOx_L2_EXT_WRITE_SNP_PTL",
+	"TGOx_L2_EXT_WRITE_BEATS",
+	"TGOx_L2_EXT_W_STALL",
+	"TGOx_L2_EXT_AW_CNT_Q1",
+	"TGOx_L2_EXT_AW_CNT_Q2",
+	"TGOx_L2_EXT_AW_CNT_Q3",
+	"TGOx_L2_EXT_SNOOP",
+	"TGOx_L2_EXT_SNOOP_STALL",
+	"TGOx_L2_EXT_SNOOP_RESP_CLEAN",
+	"TGOx_L2_EXT_SNOOP_RESP_DATA",
+	"TGOx_L2_EXT_SNOOP_INTERNAL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_TGOX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_thex.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_thex.h
new file mode 100644
index 0000000..e24e91a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_thex.h
@@ -0,0 +1,296 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_THEX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_THEX_H_
+
+static const char * const hardware_counters_mali_tHEx[] = {
+	/* Performance counters for the Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"THEx_MESSAGES_SENT",
+	"THEx_MESSAGES_RECEIVED",
+	"THEx_GPU_ACTIVE",
+	"THEx_IRQ_ACTIVE",
+	"THEx_JS0_JOBS",
+	"THEx_JS0_TASKS",
+	"THEx_JS0_ACTIVE",
+	"",
+	"THEx_JS0_WAIT_READ",
+	"THEx_JS0_WAIT_ISSUE",
+	"THEx_JS0_WAIT_DEPEND",
+	"THEx_JS0_WAIT_FINISH",
+	"THEx_JS1_JOBS",
+	"THEx_JS1_TASKS",
+	"THEx_JS1_ACTIVE",
+	"",
+	"THEx_JS1_WAIT_READ",
+	"THEx_JS1_WAIT_ISSUE",
+	"THEx_JS1_WAIT_DEPEND",
+	"THEx_JS1_WAIT_FINISH",
+	"THEx_JS2_JOBS",
+	"THEx_JS2_TASKS",
+	"THEx_JS2_ACTIVE",
+	"",
+	"THEx_JS2_WAIT_READ",
+	"THEx_JS2_WAIT_ISSUE",
+	"THEx_JS2_WAIT_DEPEND",
+	"THEx_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Performance counters for the Tiler */
+	"",
+	"",
+	"",
+	"",
+	"THEx_TILER_ACTIVE",
+	"THEx_JOBS_PROCESSED",
+	"THEx_TRIANGLES",
+	"THEx_LINES",
+	"THEx_POINTS",
+	"THEx_FRONT_FACING",
+	"THEx_BACK_FACING",
+	"THEx_PRIM_VISIBLE",
+	"THEx_PRIM_CULLED",
+	"THEx_PRIM_CLIPPED",
+	"THEx_PRIM_SAT_CULLED",
+	"THEx_BIN_ALLOC_INIT",
+	"THEx_BIN_ALLOC_OVERFLOW",
+	"THEx_BUS_READ",
+	"",
+	"THEx_BUS_WRITE",
+	"THEx_LOADING_DESC",
+	"THEx_IDVS_POS_SHAD_REQ",
+	"THEx_IDVS_POS_SHAD_WAIT",
+	"THEx_IDVS_POS_SHAD_STALL",
+	"THEx_IDVS_POS_FIFO_FULL",
+	"THEx_PREFETCH_STALL",
+	"THEx_VCACHE_HIT",
+	"THEx_VCACHE_MISS",
+	"THEx_VCACHE_LINE_WAIT",
+	"THEx_VFETCH_POS_READ_WAIT",
+	"THEx_VFETCH_VERTEX_WAIT",
+	"THEx_VFETCH_STALL",
+	"THEx_PRIMASSY_STALL",
+	"THEx_BBOX_GEN_STALL",
+	"THEx_IDVS_VBU_HIT",
+	"THEx_IDVS_VBU_MISS",
+	"THEx_IDVS_VBU_LINE_DEALLOCATE",
+	"THEx_IDVS_VAR_SHAD_REQ",
+	"THEx_IDVS_VAR_SHAD_STALL",
+	"THEx_BINNER_STALL",
+	"THEx_ITER_STALL",
+	"THEx_COMPRESS_MISS",
+	"THEx_COMPRESS_STALL",
+	"THEx_PCACHE_HIT",
+	"THEx_PCACHE_MISS",
+	"THEx_PCACHE_MISS_STALL",
+	"THEx_PCACHE_EVICT_STALL",
+	"THEx_PMGR_PTR_WR_STALL",
+	"THEx_PMGR_PTR_RD_STALL",
+	"THEx_PMGR_CMD_WR_STALL",
+	"THEx_WRBUF_ACTIVE",
+	"THEx_WRBUF_HIT",
+	"THEx_WRBUF_MISS",
+	"THEx_WRBUF_NO_FREE_LINE_STALL",
+	"THEx_WRBUF_NO_AXI_ID_STALL",
+	"THEx_WRBUF_AXI_STALL",
+	"",
+	"",
+	"",
+	"THEx_UTLB_TRANS",
+	"THEx_UTLB_TRANS_HIT",
+	"THEx_UTLB_TRANS_STALL",
+	"THEx_UTLB_TRANS_MISS_DELAY",
+	"THEx_UTLB_MMU_REQ",
+
+	/* Performance counters for the Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"THEx_FRAG_ACTIVE",
+	"THEx_FRAG_PRIMITIVES",
+	"THEx_FRAG_PRIM_RAST",
+	"THEx_FRAG_FPK_ACTIVE",
+	"THEx_FRAG_STARVING",
+	"THEx_FRAG_WARPS",
+	"THEx_FRAG_PARTIAL_WARPS",
+	"THEx_FRAG_QUADS_RAST",
+	"THEx_FRAG_QUADS_EZS_TEST",
+	"THEx_FRAG_QUADS_EZS_UPDATE",
+	"THEx_FRAG_QUADS_EZS_KILL",
+	"THEx_FRAG_LZS_TEST",
+	"THEx_FRAG_LZS_KILL",
+	"",
+	"THEx_FRAG_PTILES",
+	"THEx_FRAG_TRANS_ELIM",
+	"THEx_QUAD_FPK_KILLER",
+	"",
+	"THEx_COMPUTE_ACTIVE",
+	"THEx_COMPUTE_TASKS",
+	"THEx_COMPUTE_WARPS",
+	"THEx_COMPUTE_STARVING",
+	"THEx_EXEC_CORE_ACTIVE",
+	"THEx_EXEC_ACTIVE",
+	"THEx_EXEC_INSTR_COUNT",
+	"THEx_EXEC_INSTR_DIVERGED",
+	"THEx_EXEC_INSTR_STARVING",
+	"THEx_ARITH_INSTR_SINGLE_FMA",
+	"THEx_ARITH_INSTR_DOUBLE",
+	"THEx_ARITH_INSTR_MSG",
+	"THEx_ARITH_INSTR_MSG_ONLY",
+	"THEx_TEX_INSTR",
+	"THEx_TEX_INSTR_MIPMAP",
+	"THEx_TEX_INSTR_COMPRESSED",
+	"THEx_TEX_INSTR_3D",
+	"THEx_TEX_INSTR_TRILINEAR",
+	"THEx_TEX_COORD_ISSUE",
+	"THEx_TEX_COORD_STALL",
+	"THEx_TEX_STARVE_CACHE",
+	"THEx_TEX_STARVE_FILTER",
+	"THEx_LS_MEM_READ_FULL",
+	"THEx_LS_MEM_READ_SHORT",
+	"THEx_LS_MEM_WRITE_FULL",
+	"THEx_LS_MEM_WRITE_SHORT",
+	"THEx_LS_MEM_ATOMIC",
+	"THEx_VARY_INSTR",
+	"THEx_VARY_SLOT_32",
+	"THEx_VARY_SLOT_16",
+	"THEx_ATTR_INSTR",
+	"THEx_ARITH_INSTR_FP_MUL",
+	"THEx_BEATS_RD_FTC",
+	"THEx_BEATS_RD_FTC_EXT",
+	"THEx_BEATS_RD_LSC",
+	"THEx_BEATS_RD_LSC_EXT",
+	"THEx_BEATS_RD_TEX",
+	"THEx_BEATS_RD_TEX_EXT",
+	"THEx_BEATS_RD_OTHER",
+	"THEx_BEATS_WR_LSC",
+	"THEx_BEATS_WR_TIB",
+	"",
+
+	/* Performance counters for the Memory System */
+	"",
+	"",
+	"",
+	"",
+	"THEx_MMU_REQUESTS",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"THEx_L2_RD_MSG_IN",
+	"THEx_L2_RD_MSG_IN_STALL",
+	"THEx_L2_WR_MSG_IN",
+	"THEx_L2_WR_MSG_IN_STALL",
+	"THEx_L2_SNP_MSG_IN",
+	"THEx_L2_SNP_MSG_IN_STALL",
+	"THEx_L2_RD_MSG_OUT",
+	"THEx_L2_RD_MSG_OUT_STALL",
+	"THEx_L2_WR_MSG_OUT",
+	"THEx_L2_ANY_LOOKUP",
+	"THEx_L2_READ_LOOKUP",
+	"THEx_L2_WRITE_LOOKUP",
+	"THEx_L2_EXT_SNOOP_LOOKUP",
+	"THEx_L2_EXT_READ",
+	"THEx_L2_EXT_READ_NOSNP",
+	"THEx_L2_EXT_READ_UNIQUE",
+	"THEx_L2_EXT_READ_BEATS",
+	"THEx_L2_EXT_AR_STALL",
+	"THEx_L2_EXT_AR_CNT_Q1",
+	"THEx_L2_EXT_AR_CNT_Q2",
+	"THEx_L2_EXT_AR_CNT_Q3",
+	"THEx_L2_EXT_RRESP_0_127",
+	"THEx_L2_EXT_RRESP_128_191",
+	"THEx_L2_EXT_RRESP_192_255",
+	"THEx_L2_EXT_RRESP_256_319",
+	"THEx_L2_EXT_RRESP_320_383",
+	"THEx_L2_EXT_WRITE",
+	"THEx_L2_EXT_WRITE_NOSNP_FULL",
+	"THEx_L2_EXT_WRITE_NOSNP_PTL",
+	"THEx_L2_EXT_WRITE_SNP_FULL",
+	"THEx_L2_EXT_WRITE_SNP_PTL",
+	"THEx_L2_EXT_WRITE_BEATS",
+	"THEx_L2_EXT_W_STALL",
+	"THEx_L2_EXT_AW_CNT_Q1",
+	"THEx_L2_EXT_AW_CNT_Q2",
+	"THEx_L2_EXT_AW_CNT_Q3",
+	"THEx_L2_EXT_SNOOP",
+	"THEx_L2_EXT_SNOOP_STALL",
+	"THEx_L2_EXT_SNOOP_RESP_CLEAN",
+	"THEx_L2_EXT_SNOOP_RESP_DATA",
+	"THEx_L2_EXT_SNOOP_INTERNAL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_THEX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tkax.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tkax.h
new file mode 100644
index 0000000..73db45c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tkax.h
@@ -0,0 +1,296 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_TKAX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_TKAX_H_
+
+static const char * const hardware_counters_mali_tKAx[] = {
+	/* Performance counters for the Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"TKAx_MESSAGES_SENT",
+	"TKAx_MESSAGES_RECEIVED",
+	"TKAx_GPU_ACTIVE",
+	"TKAx_IRQ_ACTIVE",
+	"TKAx_JS0_JOBS",
+	"TKAx_JS0_TASKS",
+	"TKAx_JS0_ACTIVE",
+	"",
+	"TKAx_JS0_WAIT_READ",
+	"TKAx_JS0_WAIT_ISSUE",
+	"TKAx_JS0_WAIT_DEPEND",
+	"TKAx_JS0_WAIT_FINISH",
+	"TKAx_JS1_JOBS",
+	"TKAx_JS1_TASKS",
+	"TKAx_JS1_ACTIVE",
+	"",
+	"TKAx_JS1_WAIT_READ",
+	"TKAx_JS1_WAIT_ISSUE",
+	"TKAx_JS1_WAIT_DEPEND",
+	"TKAx_JS1_WAIT_FINISH",
+	"TKAx_JS2_JOBS",
+	"TKAx_JS2_TASKS",
+	"TKAx_JS2_ACTIVE",
+	"",
+	"TKAx_JS2_WAIT_READ",
+	"TKAx_JS2_WAIT_ISSUE",
+	"TKAx_JS2_WAIT_DEPEND",
+	"TKAx_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Performance counters for the Tiler */
+	"",
+	"",
+	"",
+	"",
+	"TKAx_TILER_ACTIVE",
+	"TKAx_JOBS_PROCESSED",
+	"TKAx_TRIANGLES",
+	"TKAx_LINES",
+	"TKAx_POINTS",
+	"TKAx_FRONT_FACING",
+	"TKAx_BACK_FACING",
+	"TKAx_PRIM_VISIBLE",
+	"TKAx_PRIM_CULLED",
+	"TKAx_PRIM_CLIPPED",
+	"TKAx_PRIM_SAT_CULLED",
+	"TKAx_BIN_ALLOC_INIT",
+	"TKAx_BIN_ALLOC_OVERFLOW",
+	"TKAx_BUS_READ",
+	"",
+	"TKAx_BUS_WRITE",
+	"TKAx_LOADING_DESC",
+	"TKAx_IDVS_POS_SHAD_REQ",
+	"TKAx_IDVS_POS_SHAD_WAIT",
+	"TKAx_IDVS_POS_SHAD_STALL",
+	"TKAx_IDVS_POS_FIFO_FULL",
+	"TKAx_PREFETCH_STALL",
+	"TKAx_VCACHE_HIT",
+	"TKAx_VCACHE_MISS",
+	"TKAx_VCACHE_LINE_WAIT",
+	"TKAx_VFETCH_POS_READ_WAIT",
+	"TKAx_VFETCH_VERTEX_WAIT",
+	"TKAx_VFETCH_STALL",
+	"TKAx_PRIMASSY_STALL",
+	"TKAx_BBOX_GEN_STALL",
+	"TKAx_IDVS_VBU_HIT",
+	"TKAx_IDVS_VBU_MISS",
+	"TKAx_IDVS_VBU_LINE_DEALLOCATE",
+	"TKAx_IDVS_VAR_SHAD_REQ",
+	"TKAx_IDVS_VAR_SHAD_STALL",
+	"TKAx_BINNER_STALL",
+	"TKAx_ITER_STALL",
+	"TKAx_COMPRESS_MISS",
+	"TKAx_COMPRESS_STALL",
+	"TKAx_PCACHE_HIT",
+	"TKAx_PCACHE_MISS",
+	"TKAx_PCACHE_MISS_STALL",
+	"TKAx_PCACHE_EVICT_STALL",
+	"TKAx_PMGR_PTR_WR_STALL",
+	"TKAx_PMGR_PTR_RD_STALL",
+	"TKAx_PMGR_CMD_WR_STALL",
+	"TKAx_WRBUF_ACTIVE",
+	"TKAx_WRBUF_HIT",
+	"TKAx_WRBUF_MISS",
+	"TKAx_WRBUF_NO_FREE_LINE_STALL",
+	"TKAx_WRBUF_NO_AXI_ID_STALL",
+	"TKAx_WRBUF_AXI_STALL",
+	"",
+	"",
+	"",
+	"TKAx_UTLB_TRANS",
+	"TKAx_UTLB_TRANS_HIT",
+	"TKAx_UTLB_TRANS_STALL",
+	"TKAx_UTLB_TRANS_MISS_DELAY",
+	"TKAx_UTLB_MMU_REQ",
+
+	/* Performance counters for the Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"TKAx_FRAG_ACTIVE",
+	"TKAx_FRAG_PRIMITIVES",
+	"TKAx_FRAG_PRIM_RAST",
+	"TKAx_FRAG_FPK_ACTIVE",
+	"TKAx_FRAG_STARVING",
+	"TKAx_FRAG_WARPS",
+	"TKAx_FRAG_PARTIAL_WARPS",
+	"TKAx_FRAG_QUADS_RAST",
+	"TKAx_FRAG_QUADS_EZS_TEST",
+	"TKAx_FRAG_QUADS_EZS_UPDATE",
+	"TKAx_FRAG_QUADS_EZS_KILL",
+	"TKAx_FRAG_LZS_TEST",
+	"TKAx_FRAG_LZS_KILL",
+	"TKAx_WARP_REG_SIZE_64",
+	"TKAx_FRAG_PTILES",
+	"TKAx_FRAG_TRANS_ELIM",
+	"TKAx_QUAD_FPK_KILLER",
+	"TKAx_FULL_QUAD_WARPS",
+	"TKAx_COMPUTE_ACTIVE",
+	"TKAx_COMPUTE_TASKS",
+	"TKAx_COMPUTE_WARPS",
+	"TKAx_COMPUTE_STARVING",
+	"TKAx_EXEC_CORE_ACTIVE",
+	"TKAx_EXEC_ACTIVE",
+	"TKAx_EXEC_INSTR_COUNT",
+	"TKAx_EXEC_INSTR_DIVERGED",
+	"TKAx_EXEC_INSTR_STARVING",
+	"TKAx_ARITH_INSTR_SINGLE_FMA",
+	"TKAx_ARITH_INSTR_DOUBLE",
+	"TKAx_ARITH_INSTR_MSG",
+	"TKAx_ARITH_INSTR_MSG_ONLY",
+	"TKAx_TEX_MSGI_NUM_QUADS",
+	"TKAx_TEX_DFCH_NUM_PASSES",
+	"TKAx_TEX_DFCH_NUM_PASSES_MISS",
+	"TKAx_TEX_DFCH_NUM_PASSES_MIP_MAP",
+	"TKAx_TEX_TIDX_NUM_SPLIT_MIP_MAP",
+	"TKAx_TEX_TFCH_NUM_LINES_FETCHED",
+	"TKAx_TEX_TFCH_NUM_LINES_FETCHED_BLOCK",
+	"TKAx_TEX_TFCH_NUM_OPERATIONS",
+	"TKAx_TEX_FILT_NUM_OPERATIONS",
+	"TKAx_LS_MEM_READ_FULL",
+	"TKAx_LS_MEM_READ_SHORT",
+	"TKAx_LS_MEM_WRITE_FULL",
+	"TKAx_LS_MEM_WRITE_SHORT",
+	"TKAx_LS_MEM_ATOMIC",
+	"TKAx_VARY_INSTR",
+	"TKAx_VARY_SLOT_32",
+	"TKAx_VARY_SLOT_16",
+	"TKAx_ATTR_INSTR",
+	"TKAx_ARITH_INSTR_FP_MUL",
+	"TKAx_BEATS_RD_FTC",
+	"TKAx_BEATS_RD_FTC_EXT",
+	"TKAx_BEATS_RD_LSC",
+	"TKAx_BEATS_RD_LSC_EXT",
+	"TKAx_BEATS_RD_TEX",
+	"TKAx_BEATS_RD_TEX_EXT",
+	"TKAx_BEATS_RD_OTHER",
+	"TKAx_BEATS_WR_LSC_OTHER",
+	"TKAx_BEATS_WR_TIB",
+	"TKAx_BEATS_WR_LSC_WB",
+
+	/* Performance counters for the Memory System */
+	"",
+	"",
+	"",
+	"",
+	"TKAx_MMU_REQUESTS",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"TKAx_L2_RD_MSG_IN",
+	"TKAx_L2_RD_MSG_IN_STALL",
+	"TKAx_L2_WR_MSG_IN",
+	"TKAx_L2_WR_MSG_IN_STALL",
+	"TKAx_L2_SNP_MSG_IN",
+	"TKAx_L2_SNP_MSG_IN_STALL",
+	"TKAx_L2_RD_MSG_OUT",
+	"TKAx_L2_RD_MSG_OUT_STALL",
+	"TKAx_L2_WR_MSG_OUT",
+	"TKAx_L2_ANY_LOOKUP",
+	"TKAx_L2_READ_LOOKUP",
+	"TKAx_L2_WRITE_LOOKUP",
+	"TKAx_L2_EXT_SNOOP_LOOKUP",
+	"TKAx_L2_EXT_READ",
+	"TKAx_L2_EXT_READ_NOSNP",
+	"TKAx_L2_EXT_READ_UNIQUE",
+	"TKAx_L2_EXT_READ_BEATS",
+	"TKAx_L2_EXT_AR_STALL",
+	"TKAx_L2_EXT_AR_CNT_Q1",
+	"TKAx_L2_EXT_AR_CNT_Q2",
+	"TKAx_L2_EXT_AR_CNT_Q3",
+	"TKAx_L2_EXT_RRESP_0_127",
+	"TKAx_L2_EXT_RRESP_128_191",
+	"TKAx_L2_EXT_RRESP_192_255",
+	"TKAx_L2_EXT_RRESP_256_319",
+	"TKAx_L2_EXT_RRESP_320_383",
+	"TKAx_L2_EXT_WRITE",
+	"TKAx_L2_EXT_WRITE_NOSNP_FULL",
+	"TKAx_L2_EXT_WRITE_NOSNP_PTL",
+	"TKAx_L2_EXT_WRITE_SNP_FULL",
+	"TKAx_L2_EXT_WRITE_SNP_PTL",
+	"TKAx_L2_EXT_WRITE_BEATS",
+	"TKAx_L2_EXT_W_STALL",
+	"TKAx_L2_EXT_AW_CNT_Q1",
+	"TKAx_L2_EXT_AW_CNT_Q2",
+	"TKAx_L2_EXT_AW_CNT_Q3",
+	"TKAx_L2_EXT_SNOOP",
+	"TKAx_L2_EXT_SNOOP_STALL",
+	"TKAx_L2_EXT_SNOOP_RESP_CLEAN",
+	"TKAx_L2_EXT_SNOOP_RESP_DATA",
+	"TKAx_L2_EXT_SNOOP_INTERNAL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_TKAX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tmix.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tmix.h
new file mode 100644
index 0000000..63eac50
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tmix.h
@@ -0,0 +1,296 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_TMIX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_TMIX_H_
+
+static const char * const hardware_counters_mali_tMIx[] = {
+	/* Performance counters for the Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"TMIx_MESSAGES_SENT",
+	"TMIx_MESSAGES_RECEIVED",
+	"TMIx_GPU_ACTIVE",
+	"TMIx_IRQ_ACTIVE",
+	"TMIx_JS0_JOBS",
+	"TMIx_JS0_TASKS",
+	"TMIx_JS0_ACTIVE",
+	"",
+	"TMIx_JS0_WAIT_READ",
+	"TMIx_JS0_WAIT_ISSUE",
+	"TMIx_JS0_WAIT_DEPEND",
+	"TMIx_JS0_WAIT_FINISH",
+	"TMIx_JS1_JOBS",
+	"TMIx_JS1_TASKS",
+	"TMIx_JS1_ACTIVE",
+	"",
+	"TMIx_JS1_WAIT_READ",
+	"TMIx_JS1_WAIT_ISSUE",
+	"TMIx_JS1_WAIT_DEPEND",
+	"TMIx_JS1_WAIT_FINISH",
+	"TMIx_JS2_JOBS",
+	"TMIx_JS2_TASKS",
+	"TMIx_JS2_ACTIVE",
+	"",
+	"TMIx_JS2_WAIT_READ",
+	"TMIx_JS2_WAIT_ISSUE",
+	"TMIx_JS2_WAIT_DEPEND",
+	"TMIx_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Performance counters for the Tiler */
+	"",
+	"",
+	"",
+	"",
+	"TMIx_TILER_ACTIVE",
+	"TMIx_JOBS_PROCESSED",
+	"TMIx_TRIANGLES",
+	"TMIx_LINES",
+	"TMIx_POINTS",
+	"TMIx_FRONT_FACING",
+	"TMIx_BACK_FACING",
+	"TMIx_PRIM_VISIBLE",
+	"TMIx_PRIM_CULLED",
+	"TMIx_PRIM_CLIPPED",
+	"TMIx_PRIM_SAT_CULLED",
+	"TMIx_BIN_ALLOC_INIT",
+	"TMIx_BIN_ALLOC_OVERFLOW",
+	"TMIx_BUS_READ",
+	"",
+	"TMIx_BUS_WRITE",
+	"TMIx_LOADING_DESC",
+	"TMIx_IDVS_POS_SHAD_REQ",
+	"TMIx_IDVS_POS_SHAD_WAIT",
+	"TMIx_IDVS_POS_SHAD_STALL",
+	"TMIx_IDVS_POS_FIFO_FULL",
+	"TMIx_PREFETCH_STALL",
+	"TMIx_VCACHE_HIT",
+	"TMIx_VCACHE_MISS",
+	"TMIx_VCACHE_LINE_WAIT",
+	"TMIx_VFETCH_POS_READ_WAIT",
+	"TMIx_VFETCH_VERTEX_WAIT",
+	"TMIx_VFETCH_STALL",
+	"TMIx_PRIMASSY_STALL",
+	"TMIx_BBOX_GEN_STALL",
+	"TMIx_IDVS_VBU_HIT",
+	"TMIx_IDVS_VBU_MISS",
+	"TMIx_IDVS_VBU_LINE_DEALLOCATE",
+	"TMIx_IDVS_VAR_SHAD_REQ",
+	"TMIx_IDVS_VAR_SHAD_STALL",
+	"TMIx_BINNER_STALL",
+	"TMIx_ITER_STALL",
+	"TMIx_COMPRESS_MISS",
+	"TMIx_COMPRESS_STALL",
+	"TMIx_PCACHE_HIT",
+	"TMIx_PCACHE_MISS",
+	"TMIx_PCACHE_MISS_STALL",
+	"TMIx_PCACHE_EVICT_STALL",
+	"TMIx_PMGR_PTR_WR_STALL",
+	"TMIx_PMGR_PTR_RD_STALL",
+	"TMIx_PMGR_CMD_WR_STALL",
+	"TMIx_WRBUF_ACTIVE",
+	"TMIx_WRBUF_HIT",
+	"TMIx_WRBUF_MISS",
+	"TMIx_WRBUF_NO_FREE_LINE_STALL",
+	"TMIx_WRBUF_NO_AXI_ID_STALL",
+	"TMIx_WRBUF_AXI_STALL",
+	"",
+	"",
+	"",
+	"TMIx_UTLB_TRANS",
+	"TMIx_UTLB_TRANS_HIT",
+	"TMIx_UTLB_TRANS_STALL",
+	"TMIx_UTLB_TRANS_MISS_DELAY",
+	"TMIx_UTLB_MMU_REQ",
+
+	/* Performance counters for the Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"TMIx_FRAG_ACTIVE",
+	"TMIx_FRAG_PRIMITIVES",
+	"TMIx_FRAG_PRIM_RAST",
+	"TMIx_FRAG_FPK_ACTIVE",
+	"TMIx_FRAG_STARVING",
+	"TMIx_FRAG_WARPS",
+	"TMIx_FRAG_PARTIAL_WARPS",
+	"TMIx_FRAG_QUADS_RAST",
+	"TMIx_FRAG_QUADS_EZS_TEST",
+	"TMIx_FRAG_QUADS_EZS_UPDATE",
+	"TMIx_FRAG_QUADS_EZS_KILL",
+	"TMIx_FRAG_LZS_TEST",
+	"TMIx_FRAG_LZS_KILL",
+	"",
+	"TMIx_FRAG_PTILES",
+	"TMIx_FRAG_TRANS_ELIM",
+	"TMIx_QUAD_FPK_KILLER",
+	"",
+	"TMIx_COMPUTE_ACTIVE",
+	"TMIx_COMPUTE_TASKS",
+	"TMIx_COMPUTE_WARPS",
+	"TMIx_COMPUTE_STARVING",
+	"TMIx_EXEC_CORE_ACTIVE",
+	"TMIx_EXEC_ACTIVE",
+	"TMIx_EXEC_INSTR_COUNT",
+	"TMIx_EXEC_INSTR_DIVERGED",
+	"TMIx_EXEC_INSTR_STARVING",
+	"TMIx_ARITH_INSTR_SINGLE_FMA",
+	"TMIx_ARITH_INSTR_DOUBLE",
+	"TMIx_ARITH_INSTR_MSG",
+	"TMIx_ARITH_INSTR_MSG_ONLY",
+	"TMIx_TEX_INSTR",
+	"TMIx_TEX_INSTR_MIPMAP",
+	"TMIx_TEX_INSTR_COMPRESSED",
+	"TMIx_TEX_INSTR_3D",
+	"TMIx_TEX_INSTR_TRILINEAR",
+	"TMIx_TEX_COORD_ISSUE",
+	"TMIx_TEX_COORD_STALL",
+	"TMIx_TEX_STARVE_CACHE",
+	"TMIx_TEX_STARVE_FILTER",
+	"TMIx_LS_MEM_READ_FULL",
+	"TMIx_LS_MEM_READ_SHORT",
+	"TMIx_LS_MEM_WRITE_FULL",
+	"TMIx_LS_MEM_WRITE_SHORT",
+	"TMIx_LS_MEM_ATOMIC",
+	"TMIx_VARY_INSTR",
+	"TMIx_VARY_SLOT_32",
+	"TMIx_VARY_SLOT_16",
+	"TMIx_ATTR_INSTR",
+	"TMIx_ARITH_INSTR_FP_MUL",
+	"TMIx_BEATS_RD_FTC",
+	"TMIx_BEATS_RD_FTC_EXT",
+	"TMIx_BEATS_RD_LSC",
+	"TMIx_BEATS_RD_LSC_EXT",
+	"TMIx_BEATS_RD_TEX",
+	"TMIx_BEATS_RD_TEX_EXT",
+	"TMIx_BEATS_RD_OTHER",
+	"TMIx_BEATS_WR_LSC",
+	"TMIx_BEATS_WR_TIB",
+	"",
+
+	/* Performance counters for the Memory System */
+	"",
+	"",
+	"",
+	"",
+	"TMIx_MMU_REQUESTS",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"TMIx_L2_RD_MSG_IN",
+	"TMIx_L2_RD_MSG_IN_STALL",
+	"TMIx_L2_WR_MSG_IN",
+	"TMIx_L2_WR_MSG_IN_STALL",
+	"TMIx_L2_SNP_MSG_IN",
+	"TMIx_L2_SNP_MSG_IN_STALL",
+	"TMIx_L2_RD_MSG_OUT",
+	"TMIx_L2_RD_MSG_OUT_STALL",
+	"TMIx_L2_WR_MSG_OUT",
+	"TMIx_L2_ANY_LOOKUP",
+	"TMIx_L2_READ_LOOKUP",
+	"TMIx_L2_WRITE_LOOKUP",
+	"TMIx_L2_EXT_SNOOP_LOOKUP",
+	"TMIx_L2_EXT_READ",
+	"TMIx_L2_EXT_READ_NOSNP",
+	"TMIx_L2_EXT_READ_UNIQUE",
+	"TMIx_L2_EXT_READ_BEATS",
+	"TMIx_L2_EXT_AR_STALL",
+	"TMIx_L2_EXT_AR_CNT_Q1",
+	"TMIx_L2_EXT_AR_CNT_Q2",
+	"TMIx_L2_EXT_AR_CNT_Q3",
+	"TMIx_L2_EXT_RRESP_0_127",
+	"TMIx_L2_EXT_RRESP_128_191",
+	"TMIx_L2_EXT_RRESP_192_255",
+	"TMIx_L2_EXT_RRESP_256_319",
+	"TMIx_L2_EXT_RRESP_320_383",
+	"TMIx_L2_EXT_WRITE",
+	"TMIx_L2_EXT_WRITE_NOSNP_FULL",
+	"TMIx_L2_EXT_WRITE_NOSNP_PTL",
+	"TMIx_L2_EXT_WRITE_SNP_FULL",
+	"TMIx_L2_EXT_WRITE_SNP_PTL",
+	"TMIx_L2_EXT_WRITE_BEATS",
+	"TMIx_L2_EXT_W_STALL",
+	"TMIx_L2_EXT_AW_CNT_Q1",
+	"TMIx_L2_EXT_AW_CNT_Q2",
+	"TMIx_L2_EXT_AW_CNT_Q3",
+	"TMIx_L2_EXT_SNOOP",
+	"TMIx_L2_EXT_SNOOP_STALL",
+	"TMIx_L2_EXT_SNOOP_RESP_CLEAN",
+	"TMIx_L2_EXT_SNOOP_RESP_DATA",
+	"TMIx_L2_EXT_SNOOP_INTERNAL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_TMIX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tnox.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tnox.h
new file mode 100644
index 0000000..932663c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tnox.h
@@ -0,0 +1,296 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_TNOX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_TNOX_H_
+
+static const char * const hardware_counters_mali_tNOx[] = {
+	/* Performance counters for the Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"TNOx_MESSAGES_SENT",
+	"TNOx_MESSAGES_RECEIVED",
+	"TNOx_GPU_ACTIVE",
+	"TNOx_IRQ_ACTIVE",
+	"TNOx_JS0_JOBS",
+	"TNOx_JS0_TASKS",
+	"TNOx_JS0_ACTIVE",
+	"",
+	"TNOx_JS0_WAIT_READ",
+	"TNOx_JS0_WAIT_ISSUE",
+	"TNOx_JS0_WAIT_DEPEND",
+	"TNOx_JS0_WAIT_FINISH",
+	"TNOx_JS1_JOBS",
+	"TNOx_JS1_TASKS",
+	"TNOx_JS1_ACTIVE",
+	"",
+	"TNOx_JS1_WAIT_READ",
+	"TNOx_JS1_WAIT_ISSUE",
+	"TNOx_JS1_WAIT_DEPEND",
+	"TNOx_JS1_WAIT_FINISH",
+	"TNOx_JS2_JOBS",
+	"TNOx_JS2_TASKS",
+	"TNOx_JS2_ACTIVE",
+	"",
+	"TNOx_JS2_WAIT_READ",
+	"TNOx_JS2_WAIT_ISSUE",
+	"TNOx_JS2_WAIT_DEPEND",
+	"TNOx_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Performance counters for the Tiler */
+	"",
+	"",
+	"",
+	"",
+	"TNOx_TILER_ACTIVE",
+	"TNOx_JOBS_PROCESSED",
+	"TNOx_TRIANGLES",
+	"TNOx_LINES",
+	"TNOx_POINTS",
+	"TNOx_FRONT_FACING",
+	"TNOx_BACK_FACING",
+	"TNOx_PRIM_VISIBLE",
+	"TNOx_PRIM_CULLED",
+	"TNOx_PRIM_CLIPPED",
+	"TNOx_PRIM_SAT_CULLED",
+	"TNOx_BIN_ALLOC_INIT",
+	"TNOx_BIN_ALLOC_OVERFLOW",
+	"TNOx_BUS_READ",
+	"",
+	"TNOx_BUS_WRITE",
+	"TNOx_LOADING_DESC",
+	"TNOx_IDVS_POS_SHAD_REQ",
+	"TNOx_IDVS_POS_SHAD_WAIT",
+	"TNOx_IDVS_POS_SHAD_STALL",
+	"TNOx_IDVS_POS_FIFO_FULL",
+	"TNOx_PREFETCH_STALL",
+	"TNOx_VCACHE_HIT",
+	"TNOx_VCACHE_MISS",
+	"TNOx_VCACHE_LINE_WAIT",
+	"TNOx_VFETCH_POS_READ_WAIT",
+	"TNOx_VFETCH_VERTEX_WAIT",
+	"TNOx_VFETCH_STALL",
+	"TNOx_PRIMASSY_STALL",
+	"TNOx_BBOX_GEN_STALL",
+	"TNOx_IDVS_VBU_HIT",
+	"TNOx_IDVS_VBU_MISS",
+	"TNOx_IDVS_VBU_LINE_DEALLOCATE",
+	"TNOx_IDVS_VAR_SHAD_REQ",
+	"TNOx_IDVS_VAR_SHAD_STALL",
+	"TNOx_BINNER_STALL",
+	"TNOx_ITER_STALL",
+	"TNOx_COMPRESS_MISS",
+	"TNOx_COMPRESS_STALL",
+	"TNOx_PCACHE_HIT",
+	"TNOx_PCACHE_MISS",
+	"TNOx_PCACHE_MISS_STALL",
+	"TNOx_PCACHE_EVICT_STALL",
+	"TNOx_PMGR_PTR_WR_STALL",
+	"TNOx_PMGR_PTR_RD_STALL",
+	"TNOx_PMGR_CMD_WR_STALL",
+	"TNOx_WRBUF_ACTIVE",
+	"TNOx_WRBUF_HIT",
+	"TNOx_WRBUF_MISS",
+	"TNOx_WRBUF_NO_FREE_LINE_STALL",
+	"TNOx_WRBUF_NO_AXI_ID_STALL",
+	"TNOx_WRBUF_AXI_STALL",
+	"",
+	"",
+	"",
+	"TNOx_UTLB_TRANS",
+	"TNOx_UTLB_TRANS_HIT",
+	"TNOx_UTLB_TRANS_STALL",
+	"TNOx_UTLB_TRANS_MISS_DELAY",
+	"TNOx_UTLB_MMU_REQ",
+
+	/* Performance counters for the Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"TNOx_FRAG_ACTIVE",
+	"TNOx_FRAG_PRIMITIVES",
+	"TNOx_FRAG_PRIM_RAST",
+	"TNOx_FRAG_FPK_ACTIVE",
+	"TNOx_FRAG_STARVING",
+	"TNOx_FRAG_WARPS",
+	"TNOx_FRAG_PARTIAL_WARPS",
+	"TNOx_FRAG_QUADS_RAST",
+	"TNOx_FRAG_QUADS_EZS_TEST",
+	"TNOx_FRAG_QUADS_EZS_UPDATE",
+	"TNOx_FRAG_QUADS_EZS_KILL",
+	"TNOx_FRAG_LZS_TEST",
+	"TNOx_FRAG_LZS_KILL",
+	"TNOx_WARP_REG_SIZE_64",
+	"TNOx_FRAG_PTILES",
+	"TNOx_FRAG_TRANS_ELIM",
+	"TNOx_QUAD_FPK_KILLER",
+	"TNOx_FULL_QUAD_WARPS",
+	"TNOx_COMPUTE_ACTIVE",
+	"TNOx_COMPUTE_TASKS",
+	"TNOx_COMPUTE_WARPS",
+	"TNOx_COMPUTE_STARVING",
+	"TNOx_EXEC_CORE_ACTIVE",
+	"TNOx_EXEC_ACTIVE",
+	"TNOx_EXEC_INSTR_COUNT",
+	"TNOx_EXEC_INSTR_DIVERGED",
+	"TNOx_EXEC_INSTR_STARVING",
+	"TNOx_ARITH_INSTR_SINGLE_FMA",
+	"TNOx_ARITH_INSTR_DOUBLE",
+	"TNOx_ARITH_INSTR_MSG",
+	"TNOx_ARITH_INSTR_MSG_ONLY",
+	"TNOx_TEX_MSGI_NUM_QUADS",
+	"TNOx_TEX_DFCH_NUM_PASSES",
+	"TNOx_TEX_DFCH_NUM_PASSES_MISS",
+	"TNOx_TEX_DFCH_NUM_PASSES_MIP_MAP",
+	"TNOx_TEX_TIDX_NUM_SPLIT_MIP_MAP",
+	"TNOx_TEX_TFCH_NUM_LINES_FETCHED",
+	"TNOx_TEX_TFCH_NUM_LINES_FETCHED_BLOCK",
+	"TNOx_TEX_TFCH_NUM_OPERATIONS",
+	"TNOx_TEX_FILT_NUM_OPERATIONS",
+	"TNOx_LS_MEM_READ_FULL",
+	"TNOx_LS_MEM_READ_SHORT",
+	"TNOx_LS_MEM_WRITE_FULL",
+	"TNOx_LS_MEM_WRITE_SHORT",
+	"TNOx_LS_MEM_ATOMIC",
+	"TNOx_VARY_INSTR",
+	"TNOx_VARY_SLOT_32",
+	"TNOx_VARY_SLOT_16",
+	"TNOx_ATTR_INSTR",
+	"TNOx_ARITH_INSTR_FP_MUL",
+	"TNOx_BEATS_RD_FTC",
+	"TNOx_BEATS_RD_FTC_EXT",
+	"TNOx_BEATS_RD_LSC",
+	"TNOx_BEATS_RD_LSC_EXT",
+	"TNOx_BEATS_RD_TEX",
+	"TNOx_BEATS_RD_TEX_EXT",
+	"TNOx_BEATS_RD_OTHER",
+	"TNOx_BEATS_WR_LSC_OTHER",
+	"TNOx_BEATS_WR_TIB",
+	"TNOx_BEATS_WR_LSC_WB",
+
+	/* Performance counters for the Memory System */
+	"",
+	"",
+	"",
+	"",
+	"TNOx_MMU_REQUESTS",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"TNOx_L2_RD_MSG_IN",
+	"TNOx_L2_RD_MSG_IN_STALL",
+	"TNOx_L2_WR_MSG_IN",
+	"TNOx_L2_WR_MSG_IN_STALL",
+	"TNOx_L2_SNP_MSG_IN",
+	"TNOx_L2_SNP_MSG_IN_STALL",
+	"TNOx_L2_RD_MSG_OUT",
+	"TNOx_L2_RD_MSG_OUT_STALL",
+	"TNOx_L2_WR_MSG_OUT",
+	"TNOx_L2_ANY_LOOKUP",
+	"TNOx_L2_READ_LOOKUP",
+	"TNOx_L2_WRITE_LOOKUP",
+	"TNOx_L2_EXT_SNOOP_LOOKUP",
+	"TNOx_L2_EXT_READ",
+	"TNOx_L2_EXT_READ_NOSNP",
+	"TNOx_L2_EXT_READ_UNIQUE",
+	"TNOx_L2_EXT_READ_BEATS",
+	"TNOx_L2_EXT_AR_STALL",
+	"TNOx_L2_EXT_AR_CNT_Q1",
+	"TNOx_L2_EXT_AR_CNT_Q2",
+	"TNOx_L2_EXT_AR_CNT_Q3",
+	"TNOx_L2_EXT_RRESP_0_127",
+	"TNOx_L2_EXT_RRESP_128_191",
+	"TNOx_L2_EXT_RRESP_192_255",
+	"TNOx_L2_EXT_RRESP_256_319",
+	"TNOx_L2_EXT_RRESP_320_383",
+	"TNOx_L2_EXT_WRITE",
+	"TNOx_L2_EXT_WRITE_NOSNP_FULL",
+	"TNOx_L2_EXT_WRITE_NOSNP_PTL",
+	"TNOx_L2_EXT_WRITE_SNP_FULL",
+	"TNOx_L2_EXT_WRITE_SNP_PTL",
+	"TNOx_L2_EXT_WRITE_BEATS",
+	"TNOx_L2_EXT_W_STALL",
+	"TNOx_L2_EXT_AW_CNT_Q1",
+	"TNOx_L2_EXT_AW_CNT_Q2",
+	"TNOx_L2_EXT_AW_CNT_Q3",
+	"TNOx_L2_EXT_SNOOP",
+	"TNOx_L2_EXT_SNOOP_STALL",
+	"TNOx_L2_EXT_SNOOP_RESP_CLEAN",
+	"TNOx_L2_EXT_SNOOP_RESP_DATA",
+	"TNOx_L2_EXT_SNOOP_INTERNAL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_TNOX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tsix.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tsix.h
new file mode 100644
index 0000000..b8dde32
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tsix.h
@@ -0,0 +1,296 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_TSIX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_TSIX_H_
+
+static const char * const hardware_counters_mali_tSIx[] = {
+	/* Performance counters for the Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"TSIx_MESSAGES_SENT",
+	"TSIx_MESSAGES_RECEIVED",
+	"TSIx_GPU_ACTIVE",
+	"TSIx_IRQ_ACTIVE",
+	"TSIx_JS0_JOBS",
+	"TSIx_JS0_TASKS",
+	"TSIx_JS0_ACTIVE",
+	"",
+	"TSIx_JS0_WAIT_READ",
+	"TSIx_JS0_WAIT_ISSUE",
+	"TSIx_JS0_WAIT_DEPEND",
+	"TSIx_JS0_WAIT_FINISH",
+	"TSIx_JS1_JOBS",
+	"TSIx_JS1_TASKS",
+	"TSIx_JS1_ACTIVE",
+	"",
+	"TSIx_JS1_WAIT_READ",
+	"TSIx_JS1_WAIT_ISSUE",
+	"TSIx_JS1_WAIT_DEPEND",
+	"TSIx_JS1_WAIT_FINISH",
+	"TSIx_JS2_JOBS",
+	"TSIx_JS2_TASKS",
+	"TSIx_JS2_ACTIVE",
+	"",
+	"TSIx_JS2_WAIT_READ",
+	"TSIx_JS2_WAIT_ISSUE",
+	"TSIx_JS2_WAIT_DEPEND",
+	"TSIx_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Performance counters for the Tiler */
+	"",
+	"",
+	"",
+	"",
+	"TSIx_TILER_ACTIVE",
+	"TSIx_JOBS_PROCESSED",
+	"TSIx_TRIANGLES",
+	"TSIx_LINES",
+	"TSIx_POINTS",
+	"TSIx_FRONT_FACING",
+	"TSIx_BACK_FACING",
+	"TSIx_PRIM_VISIBLE",
+	"TSIx_PRIM_CULLED",
+	"TSIx_PRIM_CLIPPED",
+	"TSIx_PRIM_SAT_CULLED",
+	"TSIx_BIN_ALLOC_INIT",
+	"TSIx_BIN_ALLOC_OVERFLOW",
+	"TSIx_BUS_READ",
+	"",
+	"TSIx_BUS_WRITE",
+	"TSIx_LOADING_DESC",
+	"TSIx_IDVS_POS_SHAD_REQ",
+	"TSIx_IDVS_POS_SHAD_WAIT",
+	"TSIx_IDVS_POS_SHAD_STALL",
+	"TSIx_IDVS_POS_FIFO_FULL",
+	"TSIx_PREFETCH_STALL",
+	"TSIx_VCACHE_HIT",
+	"TSIx_VCACHE_MISS",
+	"TSIx_VCACHE_LINE_WAIT",
+	"TSIx_VFETCH_POS_READ_WAIT",
+	"TSIx_VFETCH_VERTEX_WAIT",
+	"TSIx_VFETCH_STALL",
+	"TSIx_PRIMASSY_STALL",
+	"TSIx_BBOX_GEN_STALL",
+	"TSIx_IDVS_VBU_HIT",
+	"TSIx_IDVS_VBU_MISS",
+	"TSIx_IDVS_VBU_LINE_DEALLOCATE",
+	"TSIx_IDVS_VAR_SHAD_REQ",
+	"TSIx_IDVS_VAR_SHAD_STALL",
+	"TSIx_BINNER_STALL",
+	"TSIx_ITER_STALL",
+	"TSIx_COMPRESS_MISS",
+	"TSIx_COMPRESS_STALL",
+	"TSIx_PCACHE_HIT",
+	"TSIx_PCACHE_MISS",
+	"TSIx_PCACHE_MISS_STALL",
+	"TSIx_PCACHE_EVICT_STALL",
+	"TSIx_PMGR_PTR_WR_STALL",
+	"TSIx_PMGR_PTR_RD_STALL",
+	"TSIx_PMGR_CMD_WR_STALL",
+	"TSIx_WRBUF_ACTIVE",
+	"TSIx_WRBUF_HIT",
+	"TSIx_WRBUF_MISS",
+	"TSIx_WRBUF_NO_FREE_LINE_STALL",
+	"TSIx_WRBUF_NO_AXI_ID_STALL",
+	"TSIx_WRBUF_AXI_STALL",
+	"",
+	"",
+	"",
+	"TSIx_UTLB_TRANS",
+	"TSIx_UTLB_TRANS_HIT",
+	"TSIx_UTLB_TRANS_STALL",
+	"TSIx_UTLB_TRANS_MISS_DELAY",
+	"TSIx_UTLB_MMU_REQ",
+
+	/* Performance counters for the Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"TSIx_FRAG_ACTIVE",
+	"TSIx_FRAG_PRIMITIVES",
+	"TSIx_FRAG_PRIM_RAST",
+	"TSIx_FRAG_FPK_ACTIVE",
+	"TSIx_FRAG_STARVING",
+	"TSIx_FRAG_WARPS",
+	"TSIx_FRAG_PARTIAL_WARPS",
+	"TSIx_FRAG_QUADS_RAST",
+	"TSIx_FRAG_QUADS_EZS_TEST",
+	"TSIx_FRAG_QUADS_EZS_UPDATE",
+	"TSIx_FRAG_QUADS_EZS_KILL",
+	"TSIx_FRAG_LZS_TEST",
+	"TSIx_FRAG_LZS_KILL",
+	"",
+	"TSIx_FRAG_PTILES",
+	"TSIx_FRAG_TRANS_ELIM",
+	"TSIx_QUAD_FPK_KILLER",
+	"",
+	"TSIx_COMPUTE_ACTIVE",
+	"TSIx_COMPUTE_TASKS",
+	"TSIx_COMPUTE_WARPS",
+	"TSIx_COMPUTE_STARVING",
+	"TSIx_EXEC_CORE_ACTIVE",
+	"TSIx_EXEC_ACTIVE",
+	"TSIx_EXEC_INSTR_COUNT",
+	"TSIx_EXEC_INSTR_DIVERGED",
+	"TSIx_EXEC_INSTR_STARVING",
+	"TSIx_ARITH_INSTR_SINGLE_FMA",
+	"TSIx_ARITH_INSTR_DOUBLE",
+	"TSIx_ARITH_INSTR_MSG",
+	"TSIx_ARITH_INSTR_MSG_ONLY",
+	"TSIx_TEX_MSGI_NUM_QUADS",
+	"TSIx_TEX_DFCH_NUM_PASSES",
+	"TSIx_TEX_DFCH_NUM_PASSES_MISS",
+	"TSIx_TEX_DFCH_NUM_PASSES_MIP_MAP",
+	"TSIx_TEX_TIDX_NUM_SPLIT_MIP_MAP",
+	"TSIx_TEX_TFCH_NUM_LINES_FETCHED",
+	"TSIx_TEX_TFCH_NUM_LINES_FETCHED_BLOCK",
+	"TSIx_TEX_TFCH_NUM_OPERATIONS",
+	"TSIx_TEX_FILT_NUM_OPERATIONS",
+	"TSIx_LS_MEM_READ_FULL",
+	"TSIx_LS_MEM_READ_SHORT",
+	"TSIx_LS_MEM_WRITE_FULL",
+	"TSIx_LS_MEM_WRITE_SHORT",
+	"TSIx_LS_MEM_ATOMIC",
+	"TSIx_VARY_INSTR",
+	"TSIx_VARY_SLOT_32",
+	"TSIx_VARY_SLOT_16",
+	"TSIx_ATTR_INSTR",
+	"TSIx_ARITH_INSTR_FP_MUL",
+	"TSIx_BEATS_RD_FTC",
+	"TSIx_BEATS_RD_FTC_EXT",
+	"TSIx_BEATS_RD_LSC",
+	"TSIx_BEATS_RD_LSC_EXT",
+	"TSIx_BEATS_RD_TEX",
+	"TSIx_BEATS_RD_TEX_EXT",
+	"TSIx_BEATS_RD_OTHER",
+	"TSIx_BEATS_WR_LSC_OTHER",
+	"TSIx_BEATS_WR_TIB",
+	"TSIx_BEATS_WR_LSC_WB",
+
+	/* Performance counters for the Memory System */
+	"",
+	"",
+	"",
+	"",
+	"TSIx_MMU_REQUESTS",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"TSIx_L2_RD_MSG_IN",
+	"TSIx_L2_RD_MSG_IN_STALL",
+	"TSIx_L2_WR_MSG_IN",
+	"TSIx_L2_WR_MSG_IN_STALL",
+	"TSIx_L2_SNP_MSG_IN",
+	"TSIx_L2_SNP_MSG_IN_STALL",
+	"TSIx_L2_RD_MSG_OUT",
+	"TSIx_L2_RD_MSG_OUT_STALL",
+	"TSIx_L2_WR_MSG_OUT",
+	"TSIx_L2_ANY_LOOKUP",
+	"TSIx_L2_READ_LOOKUP",
+	"TSIx_L2_WRITE_LOOKUP",
+	"TSIx_L2_EXT_SNOOP_LOOKUP",
+	"TSIx_L2_EXT_READ",
+	"TSIx_L2_EXT_READ_NOSNP",
+	"TSIx_L2_EXT_READ_UNIQUE",
+	"TSIx_L2_EXT_READ_BEATS",
+	"TSIx_L2_EXT_AR_STALL",
+	"TSIx_L2_EXT_AR_CNT_Q1",
+	"TSIx_L2_EXT_AR_CNT_Q2",
+	"TSIx_L2_EXT_AR_CNT_Q3",
+	"TSIx_L2_EXT_RRESP_0_127",
+	"TSIx_L2_EXT_RRESP_128_191",
+	"TSIx_L2_EXT_RRESP_192_255",
+	"TSIx_L2_EXT_RRESP_256_319",
+	"TSIx_L2_EXT_RRESP_320_383",
+	"TSIx_L2_EXT_WRITE",
+	"TSIx_L2_EXT_WRITE_NOSNP_FULL",
+	"TSIx_L2_EXT_WRITE_NOSNP_PTL",
+	"TSIx_L2_EXT_WRITE_SNP_FULL",
+	"TSIx_L2_EXT_WRITE_SNP_PTL",
+	"TSIx_L2_EXT_WRITE_BEATS",
+	"TSIx_L2_EXT_W_STALL",
+	"TSIx_L2_EXT_AW_CNT_Q1",
+	"TSIx_L2_EXT_AW_CNT_Q2",
+	"TSIx_L2_EXT_AW_CNT_Q3",
+	"TSIx_L2_EXT_SNOOP",
+	"TSIx_L2_EXT_SNOOP_STALL",
+	"TSIx_L2_EXT_SNOOP_RESP_CLEAN",
+	"TSIx_L2_EXT_SNOOP_RESP_DATA",
+	"TSIx_L2_EXT_SNOOP_INTERNAL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_TSIX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_ttrx.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_ttrx.h
new file mode 100644
index 0000000..c1e315b
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_ttrx.h
@@ -0,0 +1,296 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_TTRX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_TTRX_H_
+
+static const char * const hardware_counters_mali_tTRx[] = {
+	/* Performance counters for the Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"TTRx_MESSAGES_SENT",
+	"TTRx_MESSAGES_RECEIVED",
+	"TTRx_GPU_ACTIVE",
+	"TTRx_IRQ_ACTIVE",
+	"TTRx_JS0_JOBS",
+	"TTRx_JS0_TASKS",
+	"TTRx_JS0_ACTIVE",
+	"",
+	"TTRx_JS0_WAIT_READ",
+	"TTRx_JS0_WAIT_ISSUE",
+	"TTRx_JS0_WAIT_DEPEND",
+	"TTRx_JS0_WAIT_FINISH",
+	"TTRx_JS1_JOBS",
+	"TTRx_JS1_TASKS",
+	"TTRx_JS1_ACTIVE",
+	"",
+	"TTRx_JS1_WAIT_READ",
+	"TTRx_JS1_WAIT_ISSUE",
+	"TTRx_JS1_WAIT_DEPEND",
+	"TTRx_JS1_WAIT_FINISH",
+	"TTRx_JS2_JOBS",
+	"TTRx_JS2_TASKS",
+	"TTRx_JS2_ACTIVE",
+	"",
+	"TTRx_JS2_WAIT_READ",
+	"TTRx_JS2_WAIT_ISSUE",
+	"TTRx_JS2_WAIT_DEPEND",
+	"TTRx_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Performance counters for the Tiler */
+	"",
+	"",
+	"",
+	"",
+	"TTRx_TILER_ACTIVE",
+	"TTRx_JOBS_PROCESSED",
+	"TTRx_TRIANGLES",
+	"TTRx_LINES",
+	"TTRx_POINTS",
+	"TTRx_FRONT_FACING",
+	"TTRx_BACK_FACING",
+	"TTRx_PRIM_VISIBLE",
+	"TTRx_PRIM_CULLED",
+	"TTRx_PRIM_CLIPPED",
+	"TTRx_PRIM_SAT_CULLED",
+	"TTRx_BIN_ALLOC_INIT",
+	"TTRx_BIN_ALLOC_OVERFLOW",
+	"TTRx_BUS_READ",
+	"",
+	"TTRx_BUS_WRITE",
+	"TTRx_LOADING_DESC",
+	"TTRx_IDVS_POS_SHAD_REQ",
+	"TTRx_IDVS_POS_SHAD_WAIT",
+	"TTRx_IDVS_POS_SHAD_STALL",
+	"TTRx_IDVS_POS_FIFO_FULL",
+	"TTRx_PREFETCH_STALL",
+	"TTRx_VCACHE_HIT",
+	"TTRx_VCACHE_MISS",
+	"TTRx_VCACHE_LINE_WAIT",
+	"TTRx_VFETCH_POS_READ_WAIT",
+	"TTRx_VFETCH_VERTEX_WAIT",
+	"TTRx_VFETCH_STALL",
+	"TTRx_PRIMASSY_STALL",
+	"TTRx_BBOX_GEN_STALL",
+	"TTRx_IDVS_VBU_HIT",
+	"TTRx_IDVS_VBU_MISS",
+	"TTRx_IDVS_VBU_LINE_DEALLOCATE",
+	"TTRx_IDVS_VAR_SHAD_REQ",
+	"TTRx_IDVS_VAR_SHAD_STALL",
+	"TTRx_BINNER_STALL",
+	"TTRx_ITER_STALL",
+	"TTRx_COMPRESS_MISS",
+	"TTRx_COMPRESS_STALL",
+	"TTRx_PCACHE_HIT",
+	"TTRx_PCACHE_MISS",
+	"TTRx_PCACHE_MISS_STALL",
+	"TTRx_PCACHE_EVICT_STALL",
+	"TTRx_PMGR_PTR_WR_STALL",
+	"TTRx_PMGR_PTR_RD_STALL",
+	"TTRx_PMGR_CMD_WR_STALL",
+	"TTRx_WRBUF_ACTIVE",
+	"TTRx_WRBUF_HIT",
+	"TTRx_WRBUF_MISS",
+	"TTRx_WRBUF_NO_FREE_LINE_STALL",
+	"TTRx_WRBUF_NO_AXI_ID_STALL",
+	"TTRx_WRBUF_AXI_STALL",
+	"",
+	"",
+	"",
+	"TTRx_UTLB_TRANS",
+	"TTRx_UTLB_TRANS_HIT",
+	"TTRx_UTLB_TRANS_STALL",
+	"TTRx_UTLB_TRANS_MISS_DELAY",
+	"TTRx_UTLB_MMU_REQ",
+
+	/* Performance counters for the Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"TTRx_FRAG_ACTIVE",
+	"TTRx_FRAG_PRIMITIVES",
+	"TTRx_FRAG_PRIM_RAST",
+	"TTRx_FRAG_FPK_ACTIVE",
+	"TTRx_FRAG_STARVING",
+	"TTRx_FRAG_WARPS",
+	"TTRx_FRAG_PARTIAL_WARPS",
+	"TTRx_FRAG_QUADS_RAST",
+	"TTRx_FRAG_QUADS_EZS_TEST",
+	"TTRx_FRAG_QUADS_EZS_UPDATE",
+	"TTRx_FRAG_QUADS_EZS_KILL",
+	"TTRx_FRAG_LZS_TEST",
+	"TTRx_FRAG_LZS_KILL",
+	"TTRx_WARP_REG_SIZE_64",
+	"TTRx_FRAG_PTILES",
+	"TTRx_FRAG_TRANS_ELIM",
+	"TTRx_QUAD_FPK_KILLER",
+	"TTRx_FULL_QUAD_WARPS",
+	"TTRx_COMPUTE_ACTIVE",
+	"TTRx_COMPUTE_TASKS",
+	"TTRx_COMPUTE_WARPS",
+	"TTRx_COMPUTE_STARVING",
+	"TTRx_EXEC_CORE_ACTIVE",
+	"TTRx_EXEC_INSTR_FMA",
+	"TTRx_EXEC_INSTR_CVT",
+	"TTRx_EXEC_INSTR_SFU",
+	"TTRx_EXEC_INSTR_MSG",
+	"TTRx_EXEC_INSTR_DIVERGED",
+	"TTRx_EXEC_ICACHE_MISS",
+	"TTRx_EXEC_STARVE_ARITH",
+	"TTRx_CALL_BLEND_SHADER",
+	"TTRx_TEX_MSGI_NUM_QUADS",
+	"TTRx_TEX_DFCH_NUM_PASSES",
+	"TTRx_TEX_DFCH_NUM_PASSES_MISS",
+	"TTRx_TEX_DFCH_NUM_PASSES_MIP_MAP",
+	"TTRx_TEX_TIDX_NUM_SPLIT_MIP_MAP",
+	"TTRx_TEX_TFCH_NUM_LINES_FETCHED",
+	"TTRx_TEX_TFCH_NUM_LINES_FETCHED_BLOCK",
+	"TTRx_TEX_TFCH_NUM_OPERATIONS",
+	"TTRx_TEX_FILT_NUM_OPERATIONS",
+	"TTRx_LS_MEM_READ_FULL",
+	"TTRx_LS_MEM_READ_SHORT",
+	"TTRx_LS_MEM_WRITE_FULL",
+	"TTRx_LS_MEM_WRITE_SHORT",
+	"TTRx_LS_MEM_ATOMIC",
+	"TTRx_VARY_INSTR",
+	"TTRx_VARY_SLOT_32",
+	"TTRx_VARY_SLOT_16",
+	"TTRx_ATTR_INSTR",
+	"TTRx_ARITH_INSTR_FP_MUL",
+	"TTRx_BEATS_RD_FTC",
+	"TTRx_BEATS_RD_FTC_EXT",
+	"TTRx_BEATS_RD_LSC",
+	"TTRx_BEATS_RD_LSC_EXT",
+	"TTRx_BEATS_RD_TEX",
+	"TTRx_BEATS_RD_TEX_EXT",
+	"TTRx_BEATS_RD_OTHER",
+	"TTRx_BEATS_WR_LSC_OTHER",
+	"TTRx_BEATS_WR_TIB",
+	"TTRx_BEATS_WR_LSC_WB",
+
+	/* Performance counters for the Memory System */
+	"",
+	"",
+	"",
+	"",
+	"TTRx_MMU_REQUESTS",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"TTRx_L2_RD_MSG_IN",
+	"TTRx_L2_RD_MSG_IN_STALL",
+	"TTRx_L2_WR_MSG_IN",
+	"TTRx_L2_WR_MSG_IN_STALL",
+	"TTRx_L2_SNP_MSG_IN",
+	"TTRx_L2_SNP_MSG_IN_STALL",
+	"TTRx_L2_RD_MSG_OUT",
+	"TTRx_L2_RD_MSG_OUT_STALL",
+	"TTRx_L2_WR_MSG_OUT",
+	"TTRx_L2_ANY_LOOKUP",
+	"TTRx_L2_READ_LOOKUP",
+	"TTRx_L2_WRITE_LOOKUP",
+	"TTRx_L2_EXT_SNOOP_LOOKUP",
+	"TTRx_L2_EXT_READ",
+	"TTRx_L2_EXT_READ_NOSNP",
+	"TTRx_L2_EXT_READ_UNIQUE",
+	"TTRx_L2_EXT_READ_BEATS",
+	"TTRx_L2_EXT_AR_STALL",
+	"TTRx_L2_EXT_AR_CNT_Q1",
+	"TTRx_L2_EXT_AR_CNT_Q2",
+	"TTRx_L2_EXT_AR_CNT_Q3",
+	"TTRx_L2_EXT_RRESP_0_127",
+	"TTRx_L2_EXT_RRESP_128_191",
+	"TTRx_L2_EXT_RRESP_192_255",
+	"TTRx_L2_EXT_RRESP_256_319",
+	"TTRx_L2_EXT_RRESP_320_383",
+	"TTRx_L2_EXT_WRITE",
+	"TTRx_L2_EXT_WRITE_NOSNP_FULL",
+	"TTRx_L2_EXT_WRITE_NOSNP_PTL",
+	"TTRx_L2_EXT_WRITE_SNP_FULL",
+	"TTRx_L2_EXT_WRITE_SNP_PTL",
+	"TTRx_L2_EXT_WRITE_BEATS",
+	"TTRx_L2_EXT_W_STALL",
+	"TTRx_L2_EXT_AW_CNT_Q1",
+	"TTRx_L2_EXT_AW_CNT_Q2",
+	"TTRx_L2_EXT_AW_CNT_Q3",
+	"TTRx_L2_EXT_SNOOP",
+	"TTRx_L2_EXT_SNOOP_STALL",
+	"TTRx_L2_EXT_SNOOP_RESP_CLEAN",
+	"TTRx_L2_EXT_SNOOP_RESP_DATA",
+	"TTRx_L2_EXT_SNOOP_INTERNAL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_TTRX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpu_id.h b/drivers/gpu/arm/midgard/mali_kbase_gpu_id.h
new file mode 100644
index 0000000..a38e886
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gpu_id.h
@@ -0,0 +1,115 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+#ifndef _KBASE_GPU_ID_H_
+#define _KBASE_GPU_ID_H_
+
+/* GPU_ID register */
+#define GPU_ID_VERSION_STATUS_SHIFT       0
+#define GPU_ID_VERSION_MINOR_SHIFT        4
+#define GPU_ID_VERSION_MAJOR_SHIFT        12
+#define GPU_ID_VERSION_PRODUCT_ID_SHIFT   16
+#define GPU_ID_VERSION_STATUS             (0xFu  << GPU_ID_VERSION_STATUS_SHIFT)
+#define GPU_ID_VERSION_MINOR              (0xFFu << GPU_ID_VERSION_MINOR_SHIFT)
+#define GPU_ID_VERSION_MAJOR              (0xFu  << GPU_ID_VERSION_MAJOR_SHIFT)
+#define GPU_ID_VERSION_PRODUCT_ID  (0xFFFFu << GPU_ID_VERSION_PRODUCT_ID_SHIFT)
+
+#define GPU_ID2_VERSION_STATUS_SHIFT      0
+#define GPU_ID2_VERSION_MINOR_SHIFT       4
+#define GPU_ID2_VERSION_MAJOR_SHIFT       12
+#define GPU_ID2_PRODUCT_MAJOR_SHIFT       16
+#define GPU_ID2_ARCH_REV_SHIFT            20
+#define GPU_ID2_ARCH_MINOR_SHIFT          24
+#define GPU_ID2_ARCH_MAJOR_SHIFT          28
+#define GPU_ID2_VERSION_STATUS            (0xFu << GPU_ID2_VERSION_STATUS_SHIFT)
+#define GPU_ID2_VERSION_MINOR             (0xFFu << GPU_ID2_VERSION_MINOR_SHIFT)
+#define GPU_ID2_VERSION_MAJOR             (0xFu << GPU_ID2_VERSION_MAJOR_SHIFT)
+#define GPU_ID2_PRODUCT_MAJOR             (0xFu << GPU_ID2_PRODUCT_MAJOR_SHIFT)
+#define GPU_ID2_ARCH_REV                  (0xFu << GPU_ID2_ARCH_REV_SHIFT)
+#define GPU_ID2_ARCH_MINOR                (0xFu << GPU_ID2_ARCH_MINOR_SHIFT)
+#define GPU_ID2_ARCH_MAJOR                (0xFu << GPU_ID2_ARCH_MAJOR_SHIFT)
+#define GPU_ID2_PRODUCT_MODEL  (GPU_ID2_ARCH_MAJOR | GPU_ID2_PRODUCT_MAJOR)
+#define GPU_ID2_VERSION        (GPU_ID2_VERSION_MAJOR | \
+								GPU_ID2_VERSION_MINOR | \
+								GPU_ID2_VERSION_STATUS)
+
+/* Helper macro to create a partial GPU_ID (new format) that defines
+   a product ignoring its version. */
+#define GPU_ID2_PRODUCT_MAKE(arch_major, arch_minor, arch_rev, product_major) \
+		((((u32)arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT)  | \
+		 (((u32)arch_minor) << GPU_ID2_ARCH_MINOR_SHIFT)  | \
+		 (((u32)arch_rev) << GPU_ID2_ARCH_REV_SHIFT)      | \
+		 (((u32)product_major) << GPU_ID2_PRODUCT_MAJOR_SHIFT))
+
+/* Helper macro to create a partial GPU_ID (new format) that specifies the
+   revision (major, minor, status) of a product */
+#define GPU_ID2_VERSION_MAKE(version_major, version_minor, version_status) \
+		((((u32)version_major) << GPU_ID2_VERSION_MAJOR_SHIFT)  | \
+		 (((u32)version_minor) << GPU_ID2_VERSION_MINOR_SHIFT)  | \
+		 (((u32)version_status) << GPU_ID2_VERSION_STATUS_SHIFT))
+
+/* Helper macro to create a complete GPU_ID (new format) */
+#define GPU_ID2_MAKE(arch_major, arch_minor, arch_rev, product_major, \
+	version_major, version_minor, version_status) \
+		(GPU_ID2_PRODUCT_MAKE(arch_major, arch_minor, arch_rev, \
+			product_major) | \
+		 GPU_ID2_VERSION_MAKE(version_major, version_minor,     \
+			version_status))
+
+/* Helper macro to create a partial GPU_ID (new format) that identifies
+   a particular GPU model by its arch_major and product_major. */
+#define GPU_ID2_MODEL_MAKE(arch_major, product_major) \
+		((((u32)arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT)  | \
+		(((u32)product_major) << GPU_ID2_PRODUCT_MAJOR_SHIFT))
+
+/* Strip off the non-relevant bits from a product_id value and make it suitable
+   for comparison against the GPU_ID2_PRODUCT_xxx values which identify a GPU
+   model. */
+#define GPU_ID2_MODEL_MATCH_VALUE(product_id) \
+		((((u32)product_id) << GPU_ID2_PRODUCT_MAJOR_SHIFT) & \
+		    GPU_ID2_PRODUCT_MODEL)
+
+#define GPU_ID2_PRODUCT_TMIX              GPU_ID2_MODEL_MAKE(6, 0)
+#define GPU_ID2_PRODUCT_THEX              GPU_ID2_MODEL_MAKE(6, 1)
+#define GPU_ID2_PRODUCT_TSIX              GPU_ID2_MODEL_MAKE(7, 0)
+#define GPU_ID2_PRODUCT_TDVX              GPU_ID2_MODEL_MAKE(7, 3)
+#define GPU_ID2_PRODUCT_TNOX              GPU_ID2_MODEL_MAKE(7, 1)
+#define GPU_ID2_PRODUCT_TGOX              GPU_ID2_MODEL_MAKE(7, 2)
+#define GPU_ID2_PRODUCT_TTRX              GPU_ID2_MODEL_MAKE(9, 0)
+#define GPU_ID2_PRODUCT_TNAX              GPU_ID2_MODEL_MAKE(9, 1)
+#define GPU_ID2_PRODUCT_TBEX              GPU_ID2_MODEL_MAKE(9, 2)
+#define GPU_ID2_PRODUCT_LBEX              GPU_ID2_MODEL_MAKE(9, 4)
+#define GPU_ID2_PRODUCT_TULX              GPU_ID2_MODEL_MAKE(10, 0)
+#define GPU_ID2_PRODUCT_TDUX              GPU_ID2_MODEL_MAKE(10, 1)
+#define GPU_ID2_PRODUCT_TODX              GPU_ID2_MODEL_MAKE(10, 2)
+#define GPU_ID2_PRODUCT_TIDX              GPU_ID2_MODEL_MAKE(10, 3)
+#define GPU_ID2_PRODUCT_TVAX              GPU_ID2_MODEL_MAKE(10, 4)
+#define GPU_ID2_PRODUCT_LODX              GPU_ID2_MODEL_MAKE(10, 5)
+
+/* Helper macro to create a GPU_ID assuming valid values for id, major,
+   minor, status */
+#define GPU_ID_MAKE(id, major, minor, status) \
+		((((u32)id) << GPU_ID_VERSION_PRODUCT_ID_SHIFT) | \
+		(((u32)major) << GPU_ID_VERSION_MAJOR_SHIFT) |   \
+		(((u32)minor) << GPU_ID_VERSION_MINOR_SHIFT) |   \
+		(((u32)status) << GPU_ID_VERSION_STATUS_SHIFT))
+
+#endif /* _KBASE_GPU_ID_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.c
new file mode 100644
index 0000000..2c42f5c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.c
@@ -0,0 +1,103 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2017, 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+
+#ifdef CONFIG_DEBUG_FS
+/** Show callback for the @c gpu_memory debugfs file.
+ *
+ * This function is called to get the contents of the @c gpu_memory debugfs
+ * file. This is a report of current gpu memory usage.
+ *
+ * @param sfile The debugfs entry
+ * @param data Data associated with the entry
+ *
+ * @return 0 if successfully prints data in debugfs entry file
+ *         -1 if it encountered an error
+ */
+
+static int kbasep_gpu_memory_seq_show(struct seq_file *sfile, void *data)
+{
+	struct list_head *entry;
+	const struct list_head *kbdev_list;
+
+	kbdev_list = kbase_dev_list_get();
+	list_for_each(entry, kbdev_list) {
+		struct kbase_device *kbdev = NULL;
+		struct kbase_context *kctx;
+
+		kbdev = list_entry(entry, struct kbase_device, entry);
+		/* output the total memory usage and cap for this device */
+		seq_printf(sfile, "%-16s  %10u\n",
+				kbdev->devname,
+				atomic_read(&(kbdev->memdev.used_pages)));
+		mutex_lock(&kbdev->kctx_list_lock);
+		list_for_each_entry(kctx, &kbdev->kctx_list, kctx_list_link) {
+			/* output the memory usage and cap for each kctx
+			* opened on this device */
+			seq_printf(sfile, "  %s-0x%p %10u\n",
+				"kctx",
+				kctx,
+				atomic_read(&(kctx->used_pages)));
+		}
+		mutex_unlock(&kbdev->kctx_list_lock);
+	}
+	kbase_dev_list_put(kbdev_list);
+	return 0;
+}
+
+/*
+ *  File operations related to debugfs entry for gpu_memory
+ */
+static int kbasep_gpu_memory_debugfs_open(struct inode *in, struct file *file)
+{
+	return single_open(file, kbasep_gpu_memory_seq_show, NULL);
+}
+
+static const struct file_operations kbasep_gpu_memory_debugfs_fops = {
+	.owner = THIS_MODULE,
+	.open = kbasep_gpu_memory_debugfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+/*
+ *  Initialize debugfs entry for gpu_memory
+ */
+void kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev)
+{
+	debugfs_create_file("gpu_memory", S_IRUGO,
+			kbdev->mali_debugfs_directory, NULL,
+			&kbasep_gpu_memory_debugfs_fops);
+	return;
+}
+
+#else
+/*
+ * Stub functions for when debugfs is disabled
+ */
+void kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev)
+{
+	return;
+}
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.h
new file mode 100644
index 0000000..28a871a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.h
@@ -0,0 +1,42 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2014, 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_gpu_memory_debugfs.h
+ * Header file for gpu_memory entry in debugfs
+ *
+ */
+
+#ifndef _KBASE_GPU_MEMORY_DEBUGFS_H
+#define _KBASE_GPU_MEMORY_DEBUGFS_H
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+/**
+ * @brief Initialize gpu_memory debugfs entry
+ */
+void kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev);
+
+#endif  /*_KBASE_GPU_MEMORY_DEBUGFS_H*/
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpuprops.c b/drivers/gpu/arm/midgard/mali_kbase_gpuprops.c
new file mode 100644
index 0000000..f6b70bd
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gpuprops.c
@@ -0,0 +1,602 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Base kernel property query APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_gpuprops.h>
+#include <mali_kbase_hwaccess_gpuprops.h>
+#include "mali_kbase_ioctl.h"
+#include <linux/clk.h>
+#include <mali_kbase_pm_internal.h>
+#include <linux/of_platform.h>
+#include <linux/moduleparam.h>
+
+/**
+ * KBASE_UBFX32 - Extracts bits from a 32-bit bitfield.
+ * @value:  The value from which to extract bits.
+ * @offset: The first bit to extract (0 being the LSB).
+ * @size:   The number of bits to extract.
+ *
+ * Context: @offset + @size <= 32.
+ *
+ * Return: Bits [@offset, @offset + @size) from @value.
+ */
+/* from mali_cdsb.h */
+#define KBASE_UBFX32(value, offset, size) \
+	(((u32)(value) >> (u32)(offset)) & (u32)((1ULL << (u32)(size)) - 1))
+
+static void kbase_gpuprops_construct_coherent_groups(base_gpu_props * const props)
+{
+	struct mali_base_gpu_coherent_group *current_group;
+	u64 group_present;
+	u64 group_mask;
+	u64 first_set, first_set_prev;
+	u32 num_groups = 0;
+
+	KBASE_DEBUG_ASSERT(NULL != props);
+
+	props->coherency_info.coherency = props->raw_props.mem_features;
+	props->coherency_info.num_core_groups = hweight64(props->raw_props.l2_present);
+
+	if (props->coherency_info.coherency & GROUPS_L2_COHERENT) {
+		/* Group is l2 coherent */
+		group_present = props->raw_props.l2_present;
+	} else {
+		/* Group is l1 coherent */
+		group_present = props->raw_props.shader_present;
+	}
+
+	/*
+	 * The coherent group mask can be computed from the l2 present
+	 * register.
+	 *
+	 * For the coherent group n:
+	 * group_mask[n] = (first_set[n] - 1) & ~(first_set[n-1] - 1)
+	 * where first_set is group_present with only its nth set-bit kept
+	 * (i.e. the position from where a new group starts).
+	 *
+	 * For instance if the groups are l2 coherent and l2_present=0x0..01111:
+	 * The first mask is:
+	 * group_mask[1] = (first_set[1] - 1) & ~(first_set[0] - 1)
+	 *               = (0x0..010     - 1) & ~(0x0..01      - 1)
+	 *               =  0x0..00f
+	 * The second mask is:
+	 * group_mask[2] = (first_set[2] - 1) & ~(first_set[1] - 1)
+	 *               = (0x0..100     - 1) & ~(0x0..010     - 1)
+	 *               =  0x0..0f0
+	 * And so on until all the bits from group_present have been cleared
+	 * (i.e. there is no group left).
+	 */
+
+	current_group = props->coherency_info.group;
+	first_set = group_present & ~(group_present - 1);
+
+	while (group_present != 0 && num_groups < BASE_MAX_COHERENT_GROUPS) {
+		group_present -= first_set;	/* Clear the current group bit */
+		first_set_prev = first_set;
+
+		first_set = group_present & ~(group_present - 1);
+		group_mask = (first_set - 1) & ~(first_set_prev - 1);
+
+		/* Populate the coherent_group structure for each group */
+		current_group->core_mask = group_mask & props->raw_props.shader_present;
+		current_group->num_cores = hweight64(current_group->core_mask);
+
+		num_groups++;
+		current_group++;
+	}
+
+	if (group_present != 0)
+		pr_warn("Too many coherent groups (keeping only %d groups).\n", BASE_MAX_COHERENT_GROUPS);
+
+	props->coherency_info.num_groups = num_groups;
+}
+
+/**
+ * kbase_gpuprops_get_props - Get the GPU configuration
+ * @gpu_props: The &base_gpu_props structure
+ * @kbdev: The &struct kbase_device structure for the device
+ *
+ * Fill the &base_gpu_props structure with values from the GPU configuration
+ * registers. Only the raw properties are filled in this function
+ */
+static void kbase_gpuprops_get_props(base_gpu_props * const gpu_props, struct kbase_device *kbdev)
+{
+	struct kbase_gpuprops_regdump regdump;
+	int i;
+
+	KBASE_DEBUG_ASSERT(NULL != kbdev);
+	KBASE_DEBUG_ASSERT(NULL != gpu_props);
+
+	/* Dump relevant registers */
+	kbase_backend_gpuprops_get(kbdev, &regdump);
+
+	gpu_props->raw_props.gpu_id = regdump.gpu_id;
+	gpu_props->raw_props.tiler_features = regdump.tiler_features;
+	gpu_props->raw_props.mem_features = regdump.mem_features;
+	gpu_props->raw_props.mmu_features = regdump.mmu_features;
+	gpu_props->raw_props.l2_features = regdump.l2_features;
+	gpu_props->raw_props.core_features = regdump.core_features;
+
+	gpu_props->raw_props.as_present = regdump.as_present;
+	gpu_props->raw_props.js_present = regdump.js_present;
+	gpu_props->raw_props.shader_present =
+		((u64) regdump.shader_present_hi << 32) +
+		regdump.shader_present_lo;
+	gpu_props->raw_props.tiler_present =
+		((u64) regdump.tiler_present_hi << 32) +
+		regdump.tiler_present_lo;
+	gpu_props->raw_props.l2_present =
+		((u64) regdump.l2_present_hi << 32) +
+		regdump.l2_present_lo;
+	gpu_props->raw_props.stack_present =
+		((u64) regdump.stack_present_hi << 32) +
+		regdump.stack_present_lo;
+
+	for (i = 0; i < GPU_MAX_JOB_SLOTS; i++)
+		gpu_props->raw_props.js_features[i] = regdump.js_features[i];
+
+	for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
+		gpu_props->raw_props.texture_features[i] = regdump.texture_features[i];
+
+	gpu_props->raw_props.thread_max_barrier_size = regdump.thread_max_barrier_size;
+	gpu_props->raw_props.thread_max_threads = regdump.thread_max_threads;
+	gpu_props->raw_props.thread_max_workgroup_size = regdump.thread_max_workgroup_size;
+	gpu_props->raw_props.thread_features = regdump.thread_features;
+	gpu_props->raw_props.thread_tls_alloc = regdump.thread_tls_alloc;
+}
+
+void kbase_gpuprops_update_core_props_gpu_id(base_gpu_props * const gpu_props)
+{
+	gpu_props->core_props.version_status =
+		KBASE_UBFX32(gpu_props->raw_props.gpu_id, 0U, 4);
+	gpu_props->core_props.minor_revision =
+		KBASE_UBFX32(gpu_props->raw_props.gpu_id, 4U, 8);
+	gpu_props->core_props.major_revision =
+		KBASE_UBFX32(gpu_props->raw_props.gpu_id, 12U, 4);
+	gpu_props->core_props.product_id =
+		KBASE_UBFX32(gpu_props->raw_props.gpu_id, 16U, 16);
+}
+
+/**
+ * kbase_gpuprops_calculate_props - Calculate the derived properties
+ * @gpu_props: The &base_gpu_props structure
+ * @kbdev:     The &struct kbase_device structure for the device
+ *
+ * Fill the &base_gpu_props structure with values derived from the GPU
+ * configuration registers
+ */
+static void kbase_gpuprops_calculate_props(base_gpu_props * const gpu_props, struct kbase_device *kbdev)
+{
+	int i;
+	u32 gpu_id;
+	u32 product_id;
+
+	/* Populate the base_gpu_props structure */
+	kbase_gpuprops_update_core_props_gpu_id(gpu_props);
+	gpu_props->core_props.log2_program_counter_size = KBASE_GPU_PC_SIZE_LOG2;
+#if KERNEL_VERSION(5, 0, 0) > LINUX_VERSION_CODE
+	gpu_props->core_props.gpu_available_memory_size = totalram_pages << PAGE_SHIFT;
+#else
+	gpu_props->core_props.gpu_available_memory_size =
+		totalram_pages() << PAGE_SHIFT;
+#endif
+
+	gpu_props->core_props.num_exec_engines =
+		KBASE_UBFX32(gpu_props->raw_props.core_features, 0, 4);
+
+	for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
+		gpu_props->core_props.texture_features[i] = gpu_props->raw_props.texture_features[i];
+
+	gpu_props->l2_props.log2_line_size = KBASE_UBFX32(gpu_props->raw_props.l2_features, 0U, 8);
+	gpu_props->l2_props.log2_cache_size = KBASE_UBFX32(gpu_props->raw_props.l2_features, 16U, 8);
+
+	/* Field with number of l2 slices is added to MEM_FEATURES register
+	 * since t76x. Below code assumes that for older GPU reserved bits will
+	 * be read as zero. */
+	gpu_props->l2_props.num_l2_slices =
+		KBASE_UBFX32(gpu_props->raw_props.mem_features, 8U, 4) + 1;
+
+	gpu_props->tiler_props.bin_size_bytes = 1 << KBASE_UBFX32(gpu_props->raw_props.tiler_features, 0U, 6);
+	gpu_props->tiler_props.max_active_levels = KBASE_UBFX32(gpu_props->raw_props.tiler_features, 8U, 4);
+
+	if (gpu_props->raw_props.thread_max_threads == 0)
+		gpu_props->thread_props.max_threads = THREAD_MT_DEFAULT;
+	else
+		gpu_props->thread_props.max_threads = gpu_props->raw_props.thread_max_threads;
+
+	if (gpu_props->raw_props.thread_max_workgroup_size == 0)
+		gpu_props->thread_props.max_workgroup_size = THREAD_MWS_DEFAULT;
+	else
+		gpu_props->thread_props.max_workgroup_size = gpu_props->raw_props.thread_max_workgroup_size;
+
+	if (gpu_props->raw_props.thread_max_barrier_size == 0)
+		gpu_props->thread_props.max_barrier_size = THREAD_MBS_DEFAULT;
+	else
+		gpu_props->thread_props.max_barrier_size = gpu_props->raw_props.thread_max_barrier_size;
+
+	if (gpu_props->raw_props.thread_tls_alloc == 0)
+		gpu_props->thread_props.tls_alloc =
+				gpu_props->thread_props.max_threads;
+	else
+		gpu_props->thread_props.tls_alloc =
+				gpu_props->raw_props.thread_tls_alloc;
+
+	/* Workaround for GPU2019HW-509. MIDHARC-2364 was wrongfully applied
+	 * to tDUx GPUs.
+	 */
+	gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+	product_id = gpu_id & GPU_ID_VERSION_PRODUCT_ID;
+	product_id >>= GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+
+	if ((gpu_id & GPU_ID2_PRODUCT_MODEL) == GPU_ID2_PRODUCT_TDUX) {
+		gpu_props->thread_props.max_registers =
+			KBASE_UBFX32(gpu_props->raw_props.thread_features,
+				     0U, 22);
+		gpu_props->thread_props.impl_tech =
+			KBASE_UBFX32(gpu_props->raw_props.thread_features,
+				     22U, 2);
+		gpu_props->thread_props.max_task_queue =
+			KBASE_UBFX32(gpu_props->raw_props.thread_features,
+				     24U, 8);
+		gpu_props->thread_props.max_thread_group_split = 0;
+	} else {
+		gpu_props->thread_props.max_registers =
+			KBASE_UBFX32(gpu_props->raw_props.thread_features,
+				     0U, 16);
+		gpu_props->thread_props.max_task_queue =
+			KBASE_UBFX32(gpu_props->raw_props.thread_features,
+				     16U, 8);
+		gpu_props->thread_props.max_thread_group_split =
+			KBASE_UBFX32(gpu_props->raw_props.thread_features,
+				     24U, 6);
+		gpu_props->thread_props.impl_tech =
+			KBASE_UBFX32(gpu_props->raw_props.thread_features,
+				     30U, 2);
+	}
+
+	/* If values are not specified, then use defaults */
+	if (gpu_props->thread_props.max_registers == 0) {
+		gpu_props->thread_props.max_registers = THREAD_MR_DEFAULT;
+		gpu_props->thread_props.max_task_queue = THREAD_MTQ_DEFAULT;
+		gpu_props->thread_props.max_thread_group_split = THREAD_MTGS_DEFAULT;
+	}
+	/* Initialize the coherent_group structure for each group */
+	kbase_gpuprops_construct_coherent_groups(gpu_props);
+}
+
+void kbase_gpuprops_set(struct kbase_device *kbdev)
+{
+	struct kbase_gpu_props *gpu_props;
+	struct gpu_raw_gpu_props *raw;
+
+	KBASE_DEBUG_ASSERT(NULL != kbdev);
+	gpu_props = &kbdev->gpu_props;
+	raw = &gpu_props->props.raw_props;
+
+	/* Initialize the base_gpu_props structure from the hardware */
+	kbase_gpuprops_get_props(&gpu_props->props, kbdev);
+
+	/* Populate the derived properties */
+	kbase_gpuprops_calculate_props(&gpu_props->props, kbdev);
+
+	/* Populate kbase-only fields */
+	gpu_props->l2_props.associativity = KBASE_UBFX32(raw->l2_features, 8U, 8);
+	gpu_props->l2_props.external_bus_width = KBASE_UBFX32(raw->l2_features, 24U, 8);
+
+	gpu_props->mem.core_group = KBASE_UBFX32(raw->mem_features, 0U, 1);
+
+	gpu_props->mmu.va_bits = KBASE_UBFX32(raw->mmu_features, 0U, 8);
+	gpu_props->mmu.pa_bits = KBASE_UBFX32(raw->mmu_features, 8U, 8);
+
+	gpu_props->num_cores = hweight64(raw->shader_present);
+	gpu_props->num_core_groups = hweight64(raw->l2_present);
+	gpu_props->num_address_spaces = hweight32(raw->as_present);
+	gpu_props->num_job_slots = hweight32(raw->js_present);
+}
+
+void kbase_gpuprops_set_features(struct kbase_device *kbdev)
+{
+	base_gpu_props *gpu_props;
+	struct kbase_gpuprops_regdump regdump;
+
+	gpu_props = &kbdev->gpu_props.props;
+
+	/* Dump relevant registers */
+	kbase_backend_gpuprops_get_features(kbdev, &regdump);
+
+	/*
+	 * Copy the raw value from the register, later this will get turned
+	 * into the selected coherency mode.
+	 * Additionally, add non-coherent mode, as this is always supported.
+	 */
+	gpu_props->raw_props.coherency_mode = regdump.coherency_features |
+		COHERENCY_FEATURE_BIT(COHERENCY_NONE);
+
+	if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_THREAD_GROUP_SPLIT))
+		gpu_props->thread_props.max_thread_group_split = 0;
+}
+
+/*
+ * Module parameters to allow the L2 size and hash configuration to be
+ * overridden.
+ *
+ * These parameters must be set on insmod to take effect, and are not visible
+ * in sysfs.
+ */
+static u8 override_l2_size;
+module_param(override_l2_size, byte, 0);
+MODULE_PARM_DESC(override_l2_size, "Override L2 size config for testing");
+
+static u8 override_l2_hash;
+module_param(override_l2_hash, byte, 0);
+MODULE_PARM_DESC(override_l2_hash, "Override L2 hash config for testing");
+
+/**
+ * kbase_read_l2_config_from_dt - Read L2 configuration
+ * @kbdev: The kbase device for which to get the L2 configuration.
+ *
+ * Check for L2 configuration overrides in module parameters and device tree.
+ * Override values in module parameters take priority over override values in
+ * device tree.
+ *
+ * Return: true if either size or hash was overridden, false if no overrides
+ * were found.
+ */
+static bool kbase_read_l2_config_from_dt(struct kbase_device * const kbdev)
+{
+	struct device_node *np = kbdev->dev->of_node;
+
+	if (!np)
+		return false;
+
+	if (override_l2_size)
+		kbdev->l2_size_override = override_l2_size;
+	else if (of_property_read_u8(np, "l2-size", &kbdev->l2_size_override))
+		kbdev->l2_size_override = 0;
+
+	if (override_l2_hash)
+		kbdev->l2_hash_override = override_l2_hash;
+	else if (of_property_read_u8(np, "l2-hash", &kbdev->l2_hash_override))
+		kbdev->l2_hash_override = 0;
+
+	if (kbdev->l2_size_override || kbdev->l2_hash_override)
+		return true;
+
+	return false;
+}
+
+void kbase_gpuprops_update_l2_features(struct kbase_device *kbdev)
+{
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_L2_CONFIG)) {
+		struct kbase_gpuprops_regdump regdump;
+		base_gpu_props *gpu_props = &kbdev->gpu_props.props;
+
+		/* Check for L2 cache size & hash overrides */
+		if (!kbase_read_l2_config_from_dt(kbdev))
+			return;
+
+		/* Need L2 to get powered to reflect to L2_FEATURES */
+		kbase_pm_context_active(kbdev);
+
+		/* Wait for the completion of L2 power transition */
+		kbase_pm_wait_for_l2_powered(kbdev);
+
+		/* Dump L2_FEATURES register */
+		kbase_backend_gpuprops_get_l2_features(kbdev, &regdump);
+
+		dev_info(kbdev->dev, "Reflected L2_FEATURES is 0x%x\n",
+				regdump.l2_features);
+
+		/* Update gpuprops with reflected L2_FEATURES */
+		gpu_props->raw_props.l2_features = regdump.l2_features;
+		gpu_props->l2_props.log2_cache_size =
+			KBASE_UBFX32(gpu_props->raw_props.l2_features, 16U, 8);
+
+		/* Let GPU idle */
+		kbase_pm_context_idle(kbdev);
+	}
+}
+
+static struct {
+	u32 type;
+	size_t offset;
+	int size;
+} gpu_property_mapping[] = {
+#define PROP(name, member) \
+	{KBASE_GPUPROP_ ## name, offsetof(struct base_gpu_props, member), \
+		sizeof(((struct base_gpu_props *)0)->member)}
+	PROP(PRODUCT_ID,                  core_props.product_id),
+	PROP(VERSION_STATUS,              core_props.version_status),
+	PROP(MINOR_REVISION,              core_props.minor_revision),
+	PROP(MAJOR_REVISION,              core_props.major_revision),
+	PROP(GPU_FREQ_KHZ_MAX,            core_props.gpu_freq_khz_max),
+	PROP(LOG2_PROGRAM_COUNTER_SIZE,   core_props.log2_program_counter_size),
+	PROP(TEXTURE_FEATURES_0,          core_props.texture_features[0]),
+	PROP(TEXTURE_FEATURES_1,          core_props.texture_features[1]),
+	PROP(TEXTURE_FEATURES_2,          core_props.texture_features[2]),
+	PROP(TEXTURE_FEATURES_3,          core_props.texture_features[3]),
+	PROP(GPU_AVAILABLE_MEMORY_SIZE,   core_props.gpu_available_memory_size),
+	PROP(NUM_EXEC_ENGINES,            core_props.num_exec_engines),
+
+	PROP(L2_LOG2_LINE_SIZE,           l2_props.log2_line_size),
+	PROP(L2_LOG2_CACHE_SIZE,          l2_props.log2_cache_size),
+	PROP(L2_NUM_L2_SLICES,            l2_props.num_l2_slices),
+
+	PROP(TILER_BIN_SIZE_BYTES,        tiler_props.bin_size_bytes),
+	PROP(TILER_MAX_ACTIVE_LEVELS,     tiler_props.max_active_levels),
+
+	PROP(MAX_THREADS,                 thread_props.max_threads),
+	PROP(MAX_WORKGROUP_SIZE,          thread_props.max_workgroup_size),
+	PROP(MAX_BARRIER_SIZE,            thread_props.max_barrier_size),
+	PROP(MAX_REGISTERS,               thread_props.max_registers),
+	PROP(MAX_TASK_QUEUE,              thread_props.max_task_queue),
+	PROP(MAX_THREAD_GROUP_SPLIT,      thread_props.max_thread_group_split),
+	PROP(IMPL_TECH,                   thread_props.impl_tech),
+	PROP(TLS_ALLOC,                   thread_props.tls_alloc),
+
+	PROP(RAW_SHADER_PRESENT,          raw_props.shader_present),
+	PROP(RAW_TILER_PRESENT,           raw_props.tiler_present),
+	PROP(RAW_L2_PRESENT,              raw_props.l2_present),
+	PROP(RAW_STACK_PRESENT,           raw_props.stack_present),
+	PROP(RAW_L2_FEATURES,             raw_props.l2_features),
+	PROP(RAW_CORE_FEATURES,           raw_props.core_features),
+	PROP(RAW_MEM_FEATURES,            raw_props.mem_features),
+	PROP(RAW_MMU_FEATURES,            raw_props.mmu_features),
+	PROP(RAW_AS_PRESENT,              raw_props.as_present),
+	PROP(RAW_JS_PRESENT,              raw_props.js_present),
+	PROP(RAW_JS_FEATURES_0,           raw_props.js_features[0]),
+	PROP(RAW_JS_FEATURES_1,           raw_props.js_features[1]),
+	PROP(RAW_JS_FEATURES_2,           raw_props.js_features[2]),
+	PROP(RAW_JS_FEATURES_3,           raw_props.js_features[3]),
+	PROP(RAW_JS_FEATURES_4,           raw_props.js_features[4]),
+	PROP(RAW_JS_FEATURES_5,           raw_props.js_features[5]),
+	PROP(RAW_JS_FEATURES_6,           raw_props.js_features[6]),
+	PROP(RAW_JS_FEATURES_7,           raw_props.js_features[7]),
+	PROP(RAW_JS_FEATURES_8,           raw_props.js_features[8]),
+	PROP(RAW_JS_FEATURES_9,           raw_props.js_features[9]),
+	PROP(RAW_JS_FEATURES_10,          raw_props.js_features[10]),
+	PROP(RAW_JS_FEATURES_11,          raw_props.js_features[11]),
+	PROP(RAW_JS_FEATURES_12,          raw_props.js_features[12]),
+	PROP(RAW_JS_FEATURES_13,          raw_props.js_features[13]),
+	PROP(RAW_JS_FEATURES_14,          raw_props.js_features[14]),
+	PROP(RAW_JS_FEATURES_15,          raw_props.js_features[15]),
+	PROP(RAW_TILER_FEATURES,          raw_props.tiler_features),
+	PROP(RAW_TEXTURE_FEATURES_0,      raw_props.texture_features[0]),
+	PROP(RAW_TEXTURE_FEATURES_1,      raw_props.texture_features[1]),
+	PROP(RAW_TEXTURE_FEATURES_2,      raw_props.texture_features[2]),
+	PROP(RAW_TEXTURE_FEATURES_3,      raw_props.texture_features[3]),
+	PROP(RAW_GPU_ID,                  raw_props.gpu_id),
+	PROP(RAW_THREAD_MAX_THREADS,      raw_props.thread_max_threads),
+	PROP(RAW_THREAD_MAX_WORKGROUP_SIZE,
+			raw_props.thread_max_workgroup_size),
+	PROP(RAW_THREAD_MAX_BARRIER_SIZE, raw_props.thread_max_barrier_size),
+	PROP(RAW_THREAD_FEATURES,         raw_props.thread_features),
+	PROP(RAW_THREAD_TLS_ALLOC,        raw_props.thread_tls_alloc),
+	PROP(RAW_COHERENCY_MODE,          raw_props.coherency_mode),
+
+	PROP(COHERENCY_NUM_GROUPS,        coherency_info.num_groups),
+	PROP(COHERENCY_NUM_CORE_GROUPS,   coherency_info.num_core_groups),
+	PROP(COHERENCY_COHERENCY,         coherency_info.coherency),
+	PROP(COHERENCY_GROUP_0,           coherency_info.group[0].core_mask),
+	PROP(COHERENCY_GROUP_1,           coherency_info.group[1].core_mask),
+	PROP(COHERENCY_GROUP_2,           coherency_info.group[2].core_mask),
+	PROP(COHERENCY_GROUP_3,           coherency_info.group[3].core_mask),
+	PROP(COHERENCY_GROUP_4,           coherency_info.group[4].core_mask),
+	PROP(COHERENCY_GROUP_5,           coherency_info.group[5].core_mask),
+	PROP(COHERENCY_GROUP_6,           coherency_info.group[6].core_mask),
+	PROP(COHERENCY_GROUP_7,           coherency_info.group[7].core_mask),
+	PROP(COHERENCY_GROUP_8,           coherency_info.group[8].core_mask),
+	PROP(COHERENCY_GROUP_9,           coherency_info.group[9].core_mask),
+	PROP(COHERENCY_GROUP_10,          coherency_info.group[10].core_mask),
+	PROP(COHERENCY_GROUP_11,          coherency_info.group[11].core_mask),
+	PROP(COHERENCY_GROUP_12,          coherency_info.group[12].core_mask),
+	PROP(COHERENCY_GROUP_13,          coherency_info.group[13].core_mask),
+	PROP(COHERENCY_GROUP_14,          coherency_info.group[14].core_mask),
+	PROP(COHERENCY_GROUP_15,          coherency_info.group[15].core_mask),
+
+#undef PROP
+};
+
+int kbase_gpuprops_populate_user_buffer(struct kbase_device *kbdev)
+{
+	struct kbase_gpu_props *kprops = &kbdev->gpu_props;
+	struct base_gpu_props *props = &kprops->props;
+	u32 count = ARRAY_SIZE(gpu_property_mapping);
+	u32 i;
+	u32 size = 0;
+	u8 *p;
+
+	for (i = 0; i < count; i++) {
+		/* 4 bytes for the ID, and the size of the property */
+		size += 4 + gpu_property_mapping[i].size;
+	}
+
+	kprops->prop_buffer_size = size;
+	kprops->prop_buffer = kmalloc(size, GFP_KERNEL);
+
+	if (!kprops->prop_buffer) {
+		kprops->prop_buffer_size = 0;
+		return -ENOMEM;
+	}
+
+	p = kprops->prop_buffer;
+
+#define WRITE_U8(v) (*p++ = (v) & 0xFF)
+#define WRITE_U16(v) do { WRITE_U8(v); WRITE_U8((v) >> 8); } while (0)
+#define WRITE_U32(v) do { WRITE_U16(v); WRITE_U16((v) >> 16); } while (0)
+#define WRITE_U64(v) do { WRITE_U32(v); WRITE_U32((v) >> 32); } while (0)
+
+	for (i = 0; i < count; i++) {
+		u32 type = gpu_property_mapping[i].type;
+		u8 type_size;
+		void *field = ((u8 *)props) + gpu_property_mapping[i].offset;
+
+		switch (gpu_property_mapping[i].size) {
+		case 1:
+			type_size = KBASE_GPUPROP_VALUE_SIZE_U8;
+			break;
+		case 2:
+			type_size = KBASE_GPUPROP_VALUE_SIZE_U16;
+			break;
+		case 4:
+			type_size = KBASE_GPUPROP_VALUE_SIZE_U32;
+			break;
+		case 8:
+			type_size = KBASE_GPUPROP_VALUE_SIZE_U64;
+			break;
+		default:
+			dev_err(kbdev->dev,
+				"Invalid gpu_property_mapping type=%d size=%d",
+				type, gpu_property_mapping[i].size);
+			return -EINVAL;
+		}
+
+		WRITE_U32((type<<2) | type_size);
+
+		switch (type_size) {
+		case KBASE_GPUPROP_VALUE_SIZE_U8:
+			WRITE_U8(*((u8 *)field));
+			break;
+		case KBASE_GPUPROP_VALUE_SIZE_U16:
+			WRITE_U16(*((u16 *)field));
+			break;
+		case KBASE_GPUPROP_VALUE_SIZE_U32:
+			WRITE_U32(*((u32 *)field));
+			break;
+		case KBASE_GPUPROP_VALUE_SIZE_U64:
+			WRITE_U64(*((u64 *)field));
+			break;
+		default: /* Cannot be reached */
+			WARN_ON(1);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpuprops.h b/drivers/gpu/arm/midgard/mali_kbase_gpuprops.h
new file mode 100644
index 0000000..8edba48
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gpuprops.h
@@ -0,0 +1,85 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015,2017,2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_gpuprops.h
+ * Base kernel property query APIs
+ */
+
+#ifndef _KBASE_GPUPROPS_H_
+#define _KBASE_GPUPROPS_H_
+
+#include "mali_kbase_gpuprops_types.h"
+
+/* Forward definition - see mali_kbase.h */
+struct kbase_device;
+
+/**
+ * @brief Set up Kbase GPU properties.
+ *
+ * Set up Kbase GPU properties with information from the GPU registers
+ *
+ * @param kbdev		The struct kbase_device structure for the device
+ */
+void kbase_gpuprops_set(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpuprops_set_features - Set up Kbase GPU properties
+ * @kbdev:   Device pointer
+ *
+ * This function sets up GPU properties that are dependent on the hardware
+ * features bitmask. This function must be preceeded by a call to
+ * kbase_hw_set_features_mask().
+ */
+void kbase_gpuprops_set_features(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpuprops_update_l2_features - Update GPU property of L2_FEATURES
+ * @kbdev:   Device pointer
+ *
+ * This function updates l2_features and the log2 cache size.
+ */
+void kbase_gpuprops_update_l2_features(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpuprops_populate_user_buffer - Populate the GPU properties buffer
+ * @kbdev: The kbase device
+ *
+ * Fills kbdev->gpu_props->prop_buffer with the GPU properties for user
+ * space to read.
+ */
+int kbase_gpuprops_populate_user_buffer(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpuprops_update_core_props_gpu_id - break down gpu id value
+ * @gpu_props: the &base_gpu_props structure
+ *
+ * Break down gpu_id value stored in base_gpu_props::raw_props.gpu_id into
+ * separate fields (version_status, minor_revision, major_revision, product_id)
+ * stored in base_gpu_props::core_props.
+ */
+void kbase_gpuprops_update_core_props_gpu_id(base_gpu_props * const gpu_props);
+
+
+#endif				/* _KBASE_GPUPROPS_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpuprops_types.h b/drivers/gpu/arm/midgard/mali_kbase_gpuprops_types.h
new file mode 100644
index 0000000..d7877d1
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gpuprops_types.h
@@ -0,0 +1,98 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_gpuprops_types.h
+ * Base kernel property query APIs
+ */
+
+#ifndef _KBASE_GPUPROPS_TYPES_H_
+#define _KBASE_GPUPROPS_TYPES_H_
+
+#include "mali_base_kernel.h"
+
+#define KBASE_GPU_SPEED_MHZ    123
+#define KBASE_GPU_PC_SIZE_LOG2 24U
+
+struct kbase_gpuprops_regdump {
+	u32 gpu_id;
+	u32 l2_features;
+	u32 core_features;
+	u32 tiler_features;
+	u32 mem_features;
+	u32 mmu_features;
+	u32 as_present;
+	u32 js_present;
+	u32 thread_max_threads;
+	u32 thread_max_workgroup_size;
+	u32 thread_max_barrier_size;
+	u32 thread_features;
+	u32 thread_tls_alloc;
+	u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS];
+	u32 js_features[GPU_MAX_JOB_SLOTS];
+	u32 shader_present_lo;
+	u32 shader_present_hi;
+	u32 tiler_present_lo;
+	u32 tiler_present_hi;
+	u32 l2_present_lo;
+	u32 l2_present_hi;
+	u32 stack_present_lo;
+	u32 stack_present_hi;
+	u32 coherency_features;
+};
+
+struct kbase_gpu_cache_props {
+	u8 associativity;
+	u8 external_bus_width;
+};
+
+struct kbase_gpu_mem_props {
+	u8 core_group;
+};
+
+struct kbase_gpu_mmu_props {
+	u8 va_bits;
+	u8 pa_bits;
+};
+
+struct kbase_gpu_props {
+	/* kernel-only properties */
+	u8 num_cores;
+	u8 num_core_groups;
+	u8 num_address_spaces;
+	u8 num_job_slots;
+
+	struct kbase_gpu_cache_props l2_props;
+
+	struct kbase_gpu_mem_props mem;
+	struct kbase_gpu_mmu_props mmu;
+
+	/* Properties shared with userspace */
+	base_gpu_props props;
+
+	u32 prop_buffer_size;
+	void *prop_buffer;
+};
+
+#endif				/* _KBASE_GPUPROPS_TYPES_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gwt.c b/drivers/gpu/arm/midgard/mali_kbase_gwt.c
new file mode 100644
index 0000000..75a0820
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gwt.c
@@ -0,0 +1,269 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_gwt.h"
+#include <linux/list_sort.h>
+
+static inline void kbase_gpu_gwt_setup_page_permission(
+				struct kbase_context *kctx,
+				unsigned long flag,
+				struct rb_node *node)
+{
+	struct rb_node *rbnode = node;
+
+	while (rbnode) {
+		struct kbase_va_region *reg;
+		int err = 0;
+
+		reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+		if (reg->nr_pages && !kbase_is_region_invalid_or_free(reg) &&
+					(reg->flags & KBASE_REG_GPU_WR)) {
+			err = kbase_mmu_update_pages(kctx, reg->start_pfn,
+					kbase_get_gpu_phy_pages(reg),
+					reg->gpu_alloc->nents,
+					reg->flags & flag,
+					reg->gpu_alloc->group_id);
+			if (err)
+				dev_warn(kctx->kbdev->dev, "kbase_mmu_update_pages failure\n");
+		}
+
+		rbnode = rb_next(rbnode);
+	}
+}
+
+static void kbase_gpu_gwt_setup_pages(struct kbase_context *kctx,
+					unsigned long flag)
+{
+	kbase_gpu_gwt_setup_page_permission(kctx, flag,
+				rb_first(&(kctx->reg_rbtree_same)));
+	kbase_gpu_gwt_setup_page_permission(kctx, flag,
+				rb_first(&(kctx->reg_rbtree_custom)));
+}
+
+
+int kbase_gpu_gwt_start(struct kbase_context *kctx)
+{
+	kbase_gpu_vm_lock(kctx);
+	if (kctx->gwt_enabled) {
+		kbase_gpu_vm_unlock(kctx);
+		return -EBUSY;
+	}
+
+	INIT_LIST_HEAD(&kctx->gwt_current_list);
+	INIT_LIST_HEAD(&kctx->gwt_snapshot_list);
+
+	/* If GWT is enabled using new vector dumping format
+	 * from user space, back up status of the job serialization flag and
+	 * use full serialisation of jobs for dumping.
+	 * Status will be restored on end of dumping in gwt_stop.
+	 */
+	kctx->kbdev->backup_serialize_jobs = kctx->kbdev->serialize_jobs;
+	kctx->kbdev->serialize_jobs = KBASE_SERIALIZE_INTRA_SLOT |
+						KBASE_SERIALIZE_INTER_SLOT;
+
+	/* Mark gwt enabled before making pages read only in case a
+	   write page fault is triggered while we're still in this loop.
+	   (kbase_gpu_vm_lock() doesn't prevent this!)
+	*/
+	kctx->gwt_enabled = true;
+	kctx->gwt_was_enabled = true;
+
+	kbase_gpu_gwt_setup_pages(kctx, ~KBASE_REG_GPU_WR);
+
+	kbase_gpu_vm_unlock(kctx);
+	return 0;
+}
+
+int kbase_gpu_gwt_stop(struct kbase_context *kctx)
+{
+	struct kbasep_gwt_list_element *pos, *n;
+
+	kbase_gpu_vm_lock(kctx);
+	if (!kctx->gwt_enabled) {
+		kbase_gpu_vm_unlock(kctx);
+		return -EINVAL;
+	}
+
+	list_for_each_entry_safe(pos, n, &kctx->gwt_current_list, link) {
+		list_del(&pos->link);
+		kfree(pos);
+	}
+
+	list_for_each_entry_safe(pos, n, &kctx->gwt_snapshot_list, link) {
+		list_del(&pos->link);
+		kfree(pos);
+	}
+
+	kctx->kbdev->serialize_jobs = kctx->kbdev->backup_serialize_jobs;
+
+	kbase_gpu_gwt_setup_pages(kctx, ~0UL);
+
+	kctx->gwt_enabled = false;
+	kbase_gpu_vm_unlock(kctx);
+	return 0;
+}
+
+
+static int list_cmp_function(void *priv, struct list_head *a,
+				struct list_head *b)
+{
+	struct kbasep_gwt_list_element *elementA = container_of(a,
+				struct kbasep_gwt_list_element, link);
+	struct kbasep_gwt_list_element *elementB = container_of(b,
+				struct kbasep_gwt_list_element, link);
+
+	CSTD_UNUSED(priv);
+
+	if (elementA->page_addr > elementB->page_addr)
+		return 1;
+	return -1;
+}
+
+static void kbase_gpu_gwt_collate(struct kbase_context *kctx,
+		struct list_head *snapshot_list)
+{
+	struct kbasep_gwt_list_element *pos, *n;
+	struct kbasep_gwt_list_element *collated = NULL;
+
+	/* Sort the list */
+	list_sort(NULL, snapshot_list, list_cmp_function);
+
+	/* Combine contiguous areas. */
+	list_for_each_entry_safe(pos, n, snapshot_list, link) {
+		if (collated == NULL ||	collated->region !=
+					pos->region ||
+					(collated->page_addr +
+					(collated->num_pages * PAGE_SIZE)) !=
+					pos->page_addr) {
+			/* This is the first time through, a new region or
+			 * is not contiguous - start collating to this element
+			 */
+			collated = pos;
+		} else {
+			/* contiguous so merge */
+			collated->num_pages += pos->num_pages;
+			/* remove element from list */
+			list_del(&pos->link);
+			kfree(pos);
+		}
+	}
+}
+
+int kbase_gpu_gwt_dump(struct kbase_context *kctx,
+			union kbase_ioctl_cinstr_gwt_dump *gwt_dump)
+{
+	const u32 ubuf_size = gwt_dump->in.len;
+	u32 ubuf_count = 0;
+	__user void *user_addr = (__user void *)
+			(uintptr_t)gwt_dump->in.addr_buffer;
+	__user void *user_sizes = (__user void *)
+			(uintptr_t)gwt_dump->in.size_buffer;
+
+	kbase_gpu_vm_lock(kctx);
+
+	if (!kctx->gwt_enabled) {
+		kbase_gpu_vm_unlock(kctx);
+		/* gwt_dump shouldn't be called when gwt is disabled */
+		return -EPERM;
+	}
+
+	if (!gwt_dump->in.len || !gwt_dump->in.addr_buffer
+			|| !gwt_dump->in.size_buffer) {
+		kbase_gpu_vm_unlock(kctx);
+		/* We don't have any valid user space buffer to copy the
+		 * write modified addresses.
+		 */
+		return -EINVAL;
+	}
+
+	if (list_empty(&kctx->gwt_snapshot_list) &&
+			!list_empty(&kctx->gwt_current_list)) {
+
+		list_replace_init(&kctx->gwt_current_list,
+					&kctx->gwt_snapshot_list);
+
+		/* We have collected all write faults so far
+		 * and they will be passed on to user space.
+		 * Reset the page flags state to allow collection of
+		 * further write faults.
+		 */
+		kbase_gpu_gwt_setup_pages(kctx, ~KBASE_REG_GPU_WR);
+
+		/* Sort and combine consecutive pages in the dump list*/
+		kbase_gpu_gwt_collate(kctx, &kctx->gwt_snapshot_list);
+	}
+
+	while ((!list_empty(&kctx->gwt_snapshot_list))) {
+		u64 addr_buffer[32];
+		u64 num_page_buffer[32];
+		u32 count = 0;
+		int err;
+		struct kbasep_gwt_list_element *dump_info, *n;
+
+		list_for_each_entry_safe(dump_info, n,
+				&kctx->gwt_snapshot_list, link) {
+			addr_buffer[count] = dump_info->page_addr;
+			num_page_buffer[count] = dump_info->num_pages;
+			count++;
+			list_del(&dump_info->link);
+			kfree(dump_info);
+			if (ARRAY_SIZE(addr_buffer) == count ||
+					ubuf_size == (ubuf_count + count))
+				break;
+		}
+
+		if (count) {
+			err = copy_to_user((user_addr +
+					(ubuf_count * sizeof(u64))),
+					(void *)addr_buffer,
+					count * sizeof(u64));
+			if (err) {
+				dev_err(kctx->kbdev->dev, "Copy to user failure\n");
+				kbase_gpu_vm_unlock(kctx);
+				return err;
+			}
+			err = copy_to_user((user_sizes +
+					(ubuf_count * sizeof(u64))),
+					(void *)num_page_buffer,
+					count * sizeof(u64));
+			if (err) {
+				dev_err(kctx->kbdev->dev, "Copy to user failure\n");
+				kbase_gpu_vm_unlock(kctx);
+				return err;
+			}
+
+			ubuf_count += count;
+		}
+
+		if (ubuf_count == ubuf_size)
+			break;
+	}
+
+	if (!list_empty(&kctx->gwt_snapshot_list))
+		gwt_dump->out.more_data_available = 1;
+	else
+		gwt_dump->out.more_data_available = 0;
+
+	gwt_dump->out.no_of_addr_collected = ubuf_count;
+	kbase_gpu_vm_unlock(kctx);
+	return 0;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gwt.h b/drivers/gpu/arm/midgard/mali_kbase_gwt.h
new file mode 100644
index 0000000..7e7746e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gwt.h
@@ -0,0 +1,55 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#if !defined(_KBASE_GWT_H)
+#define _KBASE_GWT_H
+
+#include <mali_kbase.h>
+#include <mali_kbase_ioctl.h>
+
+/**
+ * kbase_gpu_gwt_start - Start the GPU write tracking
+ * @kctx: Pointer to kernel context
+ *
+ * @return 0 on success, error on failure.
+ */
+int kbase_gpu_gwt_start(struct kbase_context *kctx);
+
+/**
+ * kbase_gpu_gwt_stop - Stop the GPU write tracking
+ * @kctx: Pointer to kernel context
+ *
+ * @return 0 on success, error on failure.
+ */
+int kbase_gpu_gwt_stop(struct kbase_context *kctx);
+
+/**
+ * kbase_gpu_gwt_dump - Pass page address of faulting addresses to user space.
+ * @kctx:	Pointer to kernel context
+ * @gwt_dump:	User space data to be passed.
+ *
+ * @return 0 on success, error on failure.
+ */
+int kbase_gpu_gwt_dump(struct kbase_context *kctx,
+			union kbase_ioctl_cinstr_gwt_dump *gwt_dump);
+
+#endif /* _KBASE_GWT_H */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hw.c b/drivers/gpu/arm/midgard/mali_kbase_hw.c
new file mode 100644
index 0000000..c277c0c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hw.c
@@ -0,0 +1,405 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Run-time work-arounds helpers
+ */
+
+#include <mali_base_hwconfig_features.h>
+#include <mali_base_hwconfig_issues.h>
+#include <mali_midg_regmap.h>
+#include "mali_kbase.h"
+#include "mali_kbase_hw.h"
+
+void kbase_hw_set_features_mask(struct kbase_device *kbdev)
+{
+	const enum base_hw_feature *features;
+	u32 gpu_id;
+
+	gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+
+	switch (gpu_id & GPU_ID2_PRODUCT_MODEL) {
+	case GPU_ID2_PRODUCT_TMIX:
+		features = base_hw_features_tMIx;
+		break;
+	case GPU_ID2_PRODUCT_THEX:
+		features = base_hw_features_tHEx;
+		break;
+	case GPU_ID2_PRODUCT_TSIX:
+		features = base_hw_features_tSIx;
+		break;
+	case GPU_ID2_PRODUCT_TDVX:
+		features = base_hw_features_tDVx;
+		break;
+	case GPU_ID2_PRODUCT_TNOX:
+		features = base_hw_features_tNOx;
+		break;
+	case GPU_ID2_PRODUCT_TGOX:
+		features = base_hw_features_tGOx;
+		break;
+	case GPU_ID2_PRODUCT_TTRX:
+		features = base_hw_features_tTRx;
+		break;
+	case GPU_ID2_PRODUCT_TNAX:
+		features = base_hw_features_tNAx;
+		break;
+	case GPU_ID2_PRODUCT_LBEX:
+	case GPU_ID2_PRODUCT_TBEX:
+		features = base_hw_features_tBEx;
+		break;
+	case GPU_ID2_PRODUCT_TULX:
+		features = base_hw_features_tULx;
+		break;
+	case GPU_ID2_PRODUCT_TDUX:
+		features = base_hw_features_tDUx;
+		break;
+	case GPU_ID2_PRODUCT_TODX:
+	case GPU_ID2_PRODUCT_LODX:
+		features = base_hw_features_tODx;
+		break;
+	case GPU_ID2_PRODUCT_TIDX:
+		features = base_hw_features_tIDx;
+		break;
+	case GPU_ID2_PRODUCT_TVAX:
+		features = base_hw_features_tVAx;
+		break;
+	default:
+		features = base_hw_features_generic;
+		break;
+	}
+
+	for (; *features != BASE_HW_FEATURE_END; features++)
+		set_bit(*features, &kbdev->hw_features_mask[0]);
+
+#if defined(CONFIG_MALI_JOB_DUMP) || defined(CONFIG_MALI_VECTOR_DUMP)
+	/* When dumping is enabled, need to disable flush reduction optimization
+	 * for GPUs on which it is safe to have only cache clean operation at
+	 * the end of job chain.
+	 * This is required to make job dumping work. There is some discrepancy
+	 * in the implementation of flush reduction optimization due to
+	 * unclear or ambiguous ARCH spec.
+	 */
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_CLEAN_ONLY_SAFE))
+		clear_bit(BASE_HW_FEATURE_FLUSH_REDUCTION,
+			&kbdev->hw_features_mask[0]);
+#endif
+}
+
+/**
+ * kbase_hw_get_issues_for_new_id - Get the hardware issues for a new GPU ID
+ * @kbdev: Device pointer
+ *
+ * Return: pointer to an array of hardware issues, terminated by
+ * BASE_HW_ISSUE_END.
+ *
+ * In debugging versions of the driver, unknown versions of a known GPU will
+ * be treated as the most recent known version not later than the actual
+ * version. In such circumstances, the GPU ID in @kbdev will also be replaced
+ * with the most recent known version.
+ *
+ * Note: The GPU configuration must have been read by kbase_gpuprops_get_props()
+ * before calling this function.
+ */
+static const enum base_hw_issue *kbase_hw_get_issues_for_new_id(
+					struct kbase_device *kbdev)
+{
+	const enum base_hw_issue *issues = NULL;
+
+	struct base_hw_product {
+		u32 product_model;
+		struct {
+			u32 version;
+			const enum base_hw_issue *issues;
+		} map[7];
+	};
+
+	static const struct base_hw_product base_hw_products[] = {
+		{GPU_ID2_PRODUCT_TMIX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 1),
+		   base_hw_issues_tMIx_r0p0_05dev0},
+		  {GPU_ID2_VERSION_MAKE(0, 0, 2), base_hw_issues_tMIx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(0, 1, 0), base_hw_issues_tMIx_r0p1},
+		  {U32_MAX /* sentinel value */, NULL} } },
+
+		{GPU_ID2_PRODUCT_THEX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tHEx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(0, 0, 1), base_hw_issues_tHEx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(0, 1, 0), base_hw_issues_tHEx_r0p1},
+		  {GPU_ID2_VERSION_MAKE(0, 1, 1), base_hw_issues_tHEx_r0p1},
+		  {GPU_ID2_VERSION_MAKE(0, 2, 0), base_hw_issues_tHEx_r0p2},
+		  {GPU_ID2_VERSION_MAKE(0, 3, 0), base_hw_issues_tHEx_r0p3},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TSIX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tSIx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(0, 0, 1), base_hw_issues_tSIx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(0, 1, 0), base_hw_issues_tSIx_r0p1},
+		  {GPU_ID2_VERSION_MAKE(1, 0, 0), base_hw_issues_tSIx_r1p0},
+		  {GPU_ID2_VERSION_MAKE(1, 1, 0), base_hw_issues_tSIx_r1p1},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TDVX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tDVx_r0p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TNOX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tNOx_r0p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TGOX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tGOx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(1, 0, 0), base_hw_issues_tGOx_r1p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TTRX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tTRx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(0, 0, 3), base_hw_issues_tTRx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(0, 1, 0), base_hw_issues_tTRx_r0p1},
+		  {GPU_ID2_VERSION_MAKE(0, 1, 1), base_hw_issues_tTRx_r0p1},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TNAX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tNAx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(0, 0, 3), base_hw_issues_tNAx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(0, 0, 4), base_hw_issues_tNAx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(0, 0, 5), base_hw_issues_tNAx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(0, 1, 0), base_hw_issues_tNAx_r0p1},
+		  {GPU_ID2_VERSION_MAKE(0, 1, 1), base_hw_issues_tNAx_r0p1},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_LBEX,
+		 {{GPU_ID2_VERSION_MAKE(1, 0, 0), base_hw_issues_tBEx_r1p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TBEX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tBEx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(0, 0, 3), base_hw_issues_tBEx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(1, 0, 0), base_hw_issues_tBEx_r1p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TULX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tULx_r0p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TDUX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tDUx_r0p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TODX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tODx_r0p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_LODX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tODx_r0p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TIDX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tIDx_r0p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TVAX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tVAx_r0p0},
+		  {U32_MAX, NULL} } },
+	};
+
+	u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+	const u32 product_model = gpu_id & GPU_ID2_PRODUCT_MODEL;
+	const struct base_hw_product *product = NULL;
+	size_t p;
+
+	/* Stop when we reach the end of the products array. */
+	for (p = 0; p < ARRAY_SIZE(base_hw_products); ++p) {
+		if (product_model == base_hw_products[p].product_model) {
+			product = &base_hw_products[p];
+			break;
+		}
+	}
+
+	if (product != NULL) {
+		/* Found a matching product. */
+		const u32 version = gpu_id & GPU_ID2_VERSION;
+		u32 fallback_version = 0;
+		const enum base_hw_issue *fallback_issues = NULL;
+		size_t v;
+
+		/* Stop when we reach the end of the map. */
+		for (v = 0; product->map[v].version != U32_MAX; ++v) {
+
+			if (version == product->map[v].version) {
+				/* Exact match so stop. */
+				issues = product->map[v].issues;
+				break;
+			}
+
+			/* Check whether this is a candidate for most recent
+				known version not later than the actual
+				version. */
+			if ((version > product->map[v].version) &&
+				(product->map[v].version >= fallback_version)) {
+#if MALI_CUSTOMER_RELEASE
+				/* Match on version's major and minor fields */
+				if (((version ^ product->map[v].version) >>
+					GPU_ID2_VERSION_MINOR_SHIFT) == 0)
+#endif
+				{
+					fallback_version = product->map[v].version;
+					fallback_issues = product->map[v].issues;
+				}
+			}
+		}
+
+		if ((issues == NULL) && (fallback_issues != NULL)) {
+			/* Fall back to the issue set of the most recent known
+				version not later than the actual version. */
+			issues = fallback_issues;
+
+#if MALI_CUSTOMER_RELEASE
+			dev_warn(kbdev->dev,
+				"GPU hardware issue table may need updating:\n"
+#else
+			dev_info(kbdev->dev,
+#endif
+				"r%dp%d status %d is unknown; treating as r%dp%d status %d",
+				(gpu_id & GPU_ID2_VERSION_MAJOR) >>
+					GPU_ID2_VERSION_MAJOR_SHIFT,
+				(gpu_id & GPU_ID2_VERSION_MINOR) >>
+					GPU_ID2_VERSION_MINOR_SHIFT,
+				(gpu_id & GPU_ID2_VERSION_STATUS) >>
+					GPU_ID2_VERSION_STATUS_SHIFT,
+				(fallback_version & GPU_ID2_VERSION_MAJOR) >>
+					GPU_ID2_VERSION_MAJOR_SHIFT,
+				(fallback_version & GPU_ID2_VERSION_MINOR) >>
+					GPU_ID2_VERSION_MINOR_SHIFT,
+				(fallback_version & GPU_ID2_VERSION_STATUS) >>
+					GPU_ID2_VERSION_STATUS_SHIFT);
+
+			gpu_id &= ~GPU_ID2_VERSION;
+			gpu_id |= fallback_version;
+			kbdev->gpu_props.props.raw_props.gpu_id = gpu_id;
+
+			kbase_gpuprops_update_core_props_gpu_id(
+				&kbdev->gpu_props.props);
+		}
+	}
+	return issues;
+}
+
+int kbase_hw_set_issues_mask(struct kbase_device *kbdev)
+{
+	const enum base_hw_issue *issues;
+	u32 gpu_id;
+	u32 impl_tech;
+
+	gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+	impl_tech = kbdev->gpu_props.props.thread_props.impl_tech;
+
+	if (impl_tech != IMPLEMENTATION_MODEL) {
+		issues = kbase_hw_get_issues_for_new_id(kbdev);
+		if (issues == NULL) {
+			dev_err(kbdev->dev,
+				"Unknown GPU ID %x", gpu_id);
+			return -EINVAL;
+		}
+
+#if !MALI_CUSTOMER_RELEASE
+		/* The GPU ID might have been replaced with the last
+			known version of the same GPU. */
+		gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+#endif
+	} else {
+		/* Software model */
+		switch (gpu_id & GPU_ID2_PRODUCT_MODEL) {
+		case GPU_ID2_PRODUCT_TMIX:
+			issues = base_hw_issues_model_tMIx;
+			break;
+		case GPU_ID2_PRODUCT_THEX:
+			issues = base_hw_issues_model_tHEx;
+			break;
+		case GPU_ID2_PRODUCT_TSIX:
+			issues = base_hw_issues_model_tSIx;
+			break;
+		case GPU_ID2_PRODUCT_TDVX:
+			issues = base_hw_issues_model_tDVx;
+			break;
+		case GPU_ID2_PRODUCT_TNOX:
+			issues = base_hw_issues_model_tNOx;
+			break;
+		case GPU_ID2_PRODUCT_TGOX:
+			issues = base_hw_issues_model_tGOx;
+			break;
+		case GPU_ID2_PRODUCT_TTRX:
+			issues = base_hw_issues_model_tTRx;
+			break;
+		case GPU_ID2_PRODUCT_TNAX:
+			issues = base_hw_issues_model_tNAx;
+			break;
+		case GPU_ID2_PRODUCT_LBEX:
+		case GPU_ID2_PRODUCT_TBEX:
+			issues = base_hw_issues_model_tBEx;
+			break;
+		case GPU_ID2_PRODUCT_TULX:
+			issues = base_hw_issues_model_tULx;
+			break;
+		case GPU_ID2_PRODUCT_TDUX:
+			issues = base_hw_issues_model_tDUx;
+			break;
+		case GPU_ID2_PRODUCT_TODX:
+		case GPU_ID2_PRODUCT_LODX:
+			issues = base_hw_issues_model_tODx;
+			break;
+		case GPU_ID2_PRODUCT_TIDX:
+			issues = base_hw_issues_model_tIDx;
+			break;
+		case GPU_ID2_PRODUCT_TVAX:
+			issues = base_hw_issues_model_tVAx;
+			break;
+		default:
+			dev_err(kbdev->dev,
+				"Unknown GPU ID %x", gpu_id);
+			return -EINVAL;
+		}
+	}
+
+	dev_info(kbdev->dev,
+		"GPU identified as 0x%x arch %d.%d.%d r%dp%d status %d",
+		(gpu_id & GPU_ID2_PRODUCT_MAJOR) >>
+			GPU_ID2_PRODUCT_MAJOR_SHIFT,
+		(gpu_id & GPU_ID2_ARCH_MAJOR) >>
+			GPU_ID2_ARCH_MAJOR_SHIFT,
+		(gpu_id & GPU_ID2_ARCH_MINOR) >>
+			GPU_ID2_ARCH_MINOR_SHIFT,
+		(gpu_id & GPU_ID2_ARCH_REV) >>
+			GPU_ID2_ARCH_REV_SHIFT,
+		(gpu_id & GPU_ID2_VERSION_MAJOR) >>
+			GPU_ID2_VERSION_MAJOR_SHIFT,
+		(gpu_id & GPU_ID2_VERSION_MINOR) >>
+			GPU_ID2_VERSION_MINOR_SHIFT,
+		(gpu_id & GPU_ID2_VERSION_STATUS) >>
+			GPU_ID2_VERSION_STATUS_SHIFT);
+
+	for (; *issues != BASE_HW_ISSUE_END; issues++)
+		set_bit(*issues, &kbdev->hw_issues_mask[0]);
+
+	return 0;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hw.h b/drivers/gpu/arm/midgard/mali_kbase_hw.h
new file mode 100644
index 0000000..f386b16
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hw.h
@@ -0,0 +1,70 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file
+ * Run-time work-arounds helpers
+ */
+
+#ifndef _KBASE_HW_H_
+#define _KBASE_HW_H_
+
+#include "mali_kbase_defs.h"
+
+/**
+ * @brief Tell whether a work-around should be enabled
+ */
+#define kbase_hw_has_issue(kbdev, issue)\
+	test_bit(issue, &(kbdev)->hw_issues_mask[0])
+
+/**
+ * @brief Tell whether a feature is supported
+ */
+#define kbase_hw_has_feature(kbdev, feature)\
+	test_bit(feature, &(kbdev)->hw_features_mask[0])
+
+/**
+ * kbase_hw_set_issues_mask - Set the hardware issues mask based on the GPU ID
+ * @kbdev: Device pointer
+ *
+ * Return: 0 if the GPU ID was recognized, otherwise -EINVAL.
+ *
+ * The GPU ID is read from the @kbdev.
+ *
+ * In debugging versions of the driver, unknown versions of a known GPU with a
+ * new-format ID will be treated as the most recent known version not later
+ * than the actual version. In such circumstances, the GPU ID in @kbdev will
+ * also be replaced with the most recent known version.
+ *
+ * Note: The GPU configuration must have been read by
+ * kbase_gpuprops_get_props() before calling this function.
+ */
+int kbase_hw_set_issues_mask(struct kbase_device *kbdev);
+
+/**
+ * @brief Set the features mask depending on the GPU ID
+ */
+void kbase_hw_set_features_mask(struct kbase_device *kbdev);
+
+#endif				/* _KBASE_HW_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_backend.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_backend.h
new file mode 100644
index 0000000..d5e3d3a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_backend.h
@@ -0,0 +1,73 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015, 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * HW access backend common APIs
+ */
+
+#ifndef _KBASE_HWACCESS_BACKEND_H_
+#define _KBASE_HWACCESS_BACKEND_H_
+
+/**
+ * kbase_backend_early_init - Perform any backend-specific initialization.
+ * @kbdev:	Device pointer
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+int kbase_backend_early_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_late_init - Perform any backend-specific initialization.
+ * @kbdev:	Device pointer
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+int kbase_backend_late_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_early_term - Perform any backend-specific termination.
+ * @kbdev:	Device pointer
+ */
+void kbase_backend_early_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_late_term - Perform any backend-specific termination.
+ * @kbdev:	Device pointer
+ */
+void kbase_backend_late_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_devfreq_init - Perform backend devfreq related initialization.
+ * @kbdev:      Device pointer
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+int kbase_backend_devfreq_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_devfreq_term - Perform backend-devfreq termination.
+ * @kbdev:	Device pointer
+ */
+void kbase_backend_devfreq_term(struct kbase_device *kbdev);
+
+#endif /* _KBASE_HWACCESS_BACKEND_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_defs.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_defs.h
new file mode 100644
index 0000000..124a2d9
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_defs.h
@@ -0,0 +1,51 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2016, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/**
+ * @file mali_kbase_hwaccess_gpu_defs.h
+ * HW access common definitions
+ */
+
+#ifndef _KBASE_HWACCESS_DEFS_H_
+#define _KBASE_HWACCESS_DEFS_H_
+
+#include <mali_kbase_jm_defs.h>
+
+/**
+ * struct kbase_hwaccess_data - object encapsulating the GPU backend specific
+ *                              data for the HW access layer.
+ *                              hwaccess_lock (a spinlock) must be held when
+ *                              accessing this structure.
+ * @active_kctx:     pointer to active kbase context which last submitted an
+ *                   atom to GPU and while the context is active it can
+ *                   submit new atoms to GPU from the irq context also, without
+ *                   going through the bottom half of job completion path.
+ * @backend:         GPU backend specific data for HW access layer
+ */
+struct kbase_hwaccess_data {
+	struct kbase_context *active_kctx[BASE_JM_MAX_NR_SLOTS];
+
+	struct kbase_backend_data backend;
+};
+
+#endif /* _KBASE_HWACCESS_DEFS_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_gpuprops.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_gpuprops.h
new file mode 100644
index 0000000..62628b61
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_gpuprops.h
@@ -0,0 +1,67 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015, 2018, 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/**
+ * Base kernel property query backend APIs
+ */
+
+#ifndef _KBASE_HWACCESS_GPUPROPS_H_
+#define _KBASE_HWACCESS_GPUPROPS_H_
+
+/**
+ * kbase_backend_gpuprops_get() - Fill @regdump with GPU properties read from
+ *				  GPU
+ * @kbdev:	Device pointer
+ * @regdump:	Pointer to struct kbase_gpuprops_regdump structure
+ *
+ * The caller should ensure that GPU remains powered-on during this function.
+ */
+void kbase_backend_gpuprops_get(struct kbase_device *kbdev,
+					struct kbase_gpuprops_regdump *regdump);
+
+/**
+ * kbase_backend_gpuprops_get_features - Fill @regdump with GPU properties read
+ *                                       from GPU
+ * @kbdev:   Device pointer
+ * @regdump: Pointer to struct kbase_gpuprops_regdump structure
+ *
+ * This function reads GPU properties that are dependent on the hardware
+ * features bitmask. It will power-on the GPU if required.
+ */
+void kbase_backend_gpuprops_get_features(struct kbase_device *kbdev,
+					struct kbase_gpuprops_regdump *regdump);
+
+/**
+ * kbase_backend_gpuprops_get_l2_features - Fill @regdump with L2_FEATURES read
+ *                                          from GPU
+ * @kbdev:   Device pointer
+ * @regdump: Pointer to struct kbase_gpuprops_regdump structure
+ *
+ * This function reads L2_FEATURES register that is dependent on the hardware
+ * features bitmask. It will power-on the GPU if required.
+ */
+void kbase_backend_gpuprops_get_l2_features(struct kbase_device *kbdev,
+					struct kbase_gpuprops_regdump *regdump);
+
+
+#endif /* _KBASE_HWACCESS_GPUPROPS_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_instr.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_instr.h
new file mode 100644
index 0000000..d5b9099
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_instr.h
@@ -0,0 +1,142 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015, 2017-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * HW Access instrumentation common APIs
+ */
+
+#ifndef _KBASE_HWACCESS_INSTR_H_
+#define _KBASE_HWACCESS_INSTR_H_
+
+#include <mali_kbase_instr_defs.h>
+
+/**
+ * struct kbase_instr_hwcnt_enable - Enable hardware counter collection.
+ * @dump_buffer:       GPU address to write counters to.
+ * @dump_buffer_bytes: Size in bytes of the buffer pointed to by dump_buffer.
+ * @jm_bm:             counters selection bitmask (JM).
+ * @shader_bm:         counters selection bitmask (Shader).
+ * @tiler_bm:          counters selection bitmask (Tiler).
+ * @mmu_l2_bm:         counters selection bitmask (MMU_L2).
+ * @use_secondary:     use secondary performance counters set for applicable
+ *                     counter blocks.
+ */
+struct kbase_instr_hwcnt_enable {
+	u64 dump_buffer;
+	u64 dump_buffer_bytes;
+	u32 jm_bm;
+	u32 shader_bm;
+	u32 tiler_bm;
+	u32 mmu_l2_bm;
+	bool use_secondary;
+};
+
+/**
+ * kbase_instr_hwcnt_enable_internal() - Enable HW counters collection
+ * @kbdev:	Kbase device
+ * @kctx:	Kbase context
+ * @enable:	HW counter setup parameters
+ *
+ * Context: might sleep, waiting for reset to complete
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
+				struct kbase_context *kctx,
+				struct kbase_instr_hwcnt_enable *enable);
+
+/**
+ * kbase_instr_hwcnt_disable_internal() - Disable HW counters collection
+ * @kctx: Kbase context
+ *
+ * Context: might sleep, waiting for an ongoing dump to complete
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_disable_internal(struct kbase_context *kctx);
+
+/**
+ * kbase_instr_hwcnt_request_dump() - Request HW counter dump from GPU
+ * @kctx:	Kbase context
+ *
+ * Caller must either wait for kbase_instr_hwcnt_dump_complete() to return true,
+ * of call kbase_instr_hwcnt_wait_for_dump().
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx);
+
+/**
+ * kbase_instr_hwcnt_wait_for_dump() - Wait until pending HW counter dump has
+ *				       completed.
+ * @kctx:	Kbase context
+ *
+ * Context: will sleep, waiting for dump to complete
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_wait_for_dump(struct kbase_context *kctx);
+
+/**
+ * kbase_instr_hwcnt_dump_complete - Tell whether the HW counters dump has
+ *				     completed
+ * @kctx:	Kbase context
+ * @success:	Set to true if successful
+ *
+ * Context: does not sleep.
+ *
+ * Return: true if the dump is complete
+ */
+bool kbase_instr_hwcnt_dump_complete(struct kbase_context *kctx,
+						bool * const success);
+
+/**
+ * kbase_instr_hwcnt_clear() - Clear HW counters
+ * @kctx:	Kbase context
+ *
+ * Context: might sleep, waiting for reset to complete
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_clear(struct kbase_context *kctx);
+
+/**
+ * kbase_instr_backend_init() - Initialise the instrumentation backend
+ * @kbdev:	Kbase device
+ *
+ * This function should be called during driver initialization.
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_backend_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_instr_backend_init() - Terminate the instrumentation backend
+ * @kbdev:	Kbase device
+ *
+ * This function should be called during driver termination.
+ */
+void kbase_instr_backend_term(struct kbase_device *kbdev);
+
+#endif /* _KBASE_HWACCESS_INSTR_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_jm.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_jm.h
new file mode 100644
index 0000000..c3b60e6
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_jm.h
@@ -0,0 +1,298 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * HW access job manager common APIs
+ */
+
+#ifndef _KBASE_HWACCESS_JM_H_
+#define _KBASE_HWACCESS_JM_H_
+
+/**
+ * kbase_backend_run_atom() - Run an atom on the GPU
+ * @kbdev:	Device pointer
+ * @atom:	Atom to run
+ *
+ * Caller must hold the HW access lock
+ */
+void kbase_backend_run_atom(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom);
+
+/**
+ * kbase_backend_slot_update - Update state based on slot ringbuffers
+ *
+ * @kbdev:  Device pointer
+ *
+ * Inspect the jobs in the slot ringbuffers and update state.
+ *
+ * This will cause jobs to be submitted to hardware if they are unblocked
+ */
+void kbase_backend_slot_update(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_find_and_release_free_address_space() - Release a free AS
+ * @kbdev:	Device pointer
+ * @kctx:	Context pointer
+ *
+ * This function can evict an idle context from the runpool, freeing up the
+ * address space it was using.
+ *
+ * The address space is marked as in use. The caller must either assign a
+ * context using kbase_gpu_use_ctx(), or release it using
+ * kbase_ctx_sched_release()
+ *
+ * Return: Number of free address space, or KBASEP_AS_NR_INVALID if none
+ *	   available
+ */
+int kbase_backend_find_and_release_free_address_space(
+		struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * kbase_backend_use_ctx() - Activate a currently unscheduled context, using the
+ *			     provided address space.
+ * @kbdev:	Device pointer
+ * @kctx:	Context pointer. May be NULL
+ * @as_nr:	Free address space to use
+ *
+ * kbase_gpu_next_job() will pull atoms from the active context.
+ *
+ * Return: true if successful, false if ASID not assigned.
+ */
+bool kbase_backend_use_ctx(struct kbase_device *kbdev,
+				struct kbase_context *kctx,
+				int as_nr);
+
+/**
+ * kbase_backend_use_ctx_sched() - Activate a context.
+ * @kbdev:	Device pointer
+ * @kctx:	Context pointer
+ * @js:         Job slot to activate context on
+ *
+ * kbase_gpu_next_job() will pull atoms from the active context.
+ *
+ * The context must already be scheduled and assigned to an address space. If
+ * the context is not scheduled, then kbase_gpu_use_ctx() should be used
+ * instead.
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if context is now active, false otherwise (ie if context does
+ *	   not have an address space assigned)
+ */
+bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev,
+					struct kbase_context *kctx, int js);
+
+/**
+ * kbase_backend_release_ctx_irq - Release a context from the GPU. This will
+ *                                 de-assign the assigned address space.
+ * @kbdev: Device pointer
+ * @kctx:  Context pointer
+ *
+ * Caller must hold kbase_device->mmu_hw_mutex and hwaccess_lock
+ */
+void kbase_backend_release_ctx_irq(struct kbase_device *kbdev,
+				struct kbase_context *kctx);
+
+/**
+ * kbase_backend_release_ctx_noirq - Release a context from the GPU. This will
+ *                                   de-assign the assigned address space.
+ * @kbdev: Device pointer
+ * @kctx:  Context pointer
+ *
+ * Caller must hold kbase_device->mmu_hw_mutex
+ *
+ * This function must perform any operations that could not be performed in IRQ
+ * context by kbase_backend_release_ctx_irq().
+ */
+void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev,
+						struct kbase_context *kctx);
+
+/**
+ * kbase_backend_cache_clean - Perform a cache clean if the given atom requires
+ *                            one
+ * @kbdev:	Device pointer
+ * @katom:	Pointer to the failed atom
+ *
+ * On some GPUs, the GPU cache must be cleaned following a failed atom. This
+ * function performs a clean if it is required by @katom.
+ */
+void kbase_backend_cache_clean(struct kbase_device *kbdev,
+		struct kbase_jd_atom *katom);
+
+
+/**
+ * kbase_backend_complete_wq() - Perform backend-specific actions required on
+ *				 completing an atom.
+ * @kbdev:	Device pointer
+ * @katom:	Pointer to the atom to complete
+ *
+ * This function should only be called from kbase_jd_done_worker() or
+ * js_return_worker().
+ *
+ * Return: true if atom has completed, false if atom should be re-submitted
+ */
+void kbase_backend_complete_wq(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom);
+
+/**
+ * kbase_backend_complete_wq_post_sched - Perform backend-specific actions
+ *                                        required on completing an atom, after
+ *                                        any scheduling has taken place.
+ * @kbdev:         Device pointer
+ * @core_req:      Core requirements of atom
+ *
+ * This function should only be called from kbase_jd_done_worker() or
+ * js_return_worker().
+ */
+void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev,
+		base_jd_core_req core_req);
+
+/**
+ * kbase_backend_reset() - The GPU is being reset. Cancel all jobs on the GPU
+ *			   and remove any others from the ringbuffers.
+ * @kbdev:		Device pointer
+ * @end_timestamp:	Timestamp of reset
+ */
+void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp);
+
+/**
+ * kbase_backend_inspect_tail - Return the atom currently at the tail of slot
+ *                              @js
+ * @kbdev: Device pointer
+ * @js:    Job slot to inspect
+ *
+ * Return : Atom currently at the head of slot @js, or NULL
+ */
+struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev,
+					int js);
+
+/**
+ * kbase_backend_nr_atoms_on_slot() - Return the number of atoms currently on a
+ *				      slot.
+ * @kbdev:	Device pointer
+ * @js:		Job slot to inspect
+ *
+ * Return : Number of atoms currently on slot
+ */
+int kbase_backend_nr_atoms_on_slot(struct kbase_device *kbdev, int js);
+
+/**
+ * kbase_backend_nr_atoms_submitted() - Return the number of atoms on a slot
+ *					that are currently on the GPU.
+ * @kbdev:	Device pointer
+ * @js:		Job slot to inspect
+ *
+ * Return : Number of atoms currently on slot @js that are currently on the GPU.
+ */
+int kbase_backend_nr_atoms_submitted(struct kbase_device *kbdev, int js);
+
+/**
+ * kbase_backend_ctx_count_changed() - Number of contexts ready to submit jobs
+ *				       has changed.
+ * @kbdev:	Device pointer
+ *
+ * Perform any required backend-specific actions (eg starting/stopping
+ * scheduling timers).
+ */
+void kbase_backend_ctx_count_changed(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_timeouts_changed() - Job Scheduler timeouts have changed.
+ * @kbdev:	Device pointer
+ *
+ * Perform any required backend-specific actions (eg updating timeouts of
+ * currently running atoms).
+ */
+void kbase_backend_timeouts_changed(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_slot_free() - Return the number of jobs that can be currently
+ *			       submitted to slot @js.
+ * @kbdev:	Device pointer
+ * @js:		Job slot to inspect
+ *
+ * Return : Number of jobs that can be submitted.
+ */
+int kbase_backend_slot_free(struct kbase_device *kbdev, int js);
+
+/**
+ * kbase_job_check_enter_disjoint - potentially leave disjoint state
+ * @kbdev: kbase device
+ * @target_katom: atom which is finishing
+ *
+ * Work out whether to leave disjoint state when finishing an atom that was
+ * originated by kbase_job_check_enter_disjoint().
+ */
+void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
+		struct kbase_jd_atom *target_katom);
+
+/**
+ * kbase_backend_jm_kill_running_jobs_from_kctx - Kill all jobs that are
+ *                               currently running on GPU from a context
+ * @kctx: Context pointer
+ *
+ * This is used in response to a page fault to remove all jobs from the faulting
+ * context from the hardware.
+ *
+ * Caller must hold hwaccess_lock.
+ */
+void kbase_backend_jm_kill_running_jobs_from_kctx(struct kbase_context *kctx);
+
+/**
+ * kbase_jm_wait_for_zero_jobs - Wait for context to have zero jobs running, and
+ *                               to be descheduled.
+ * @kctx: Context pointer
+ *
+ * This should be called following kbase_js_zap_context(), to ensure the context
+ * can be safely destroyed.
+ */
+void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx);
+
+/**
+ * kbase_backend_get_current_flush_id - Return the current flush ID
+ *
+ * @kbdev: Device pointer
+ *
+ * Return: the current flush ID to be recorded for each job chain
+ */
+u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev);
+
+/**
+ * kbase_job_slot_hardstop - Hard-stop the specified job slot
+ * @kctx:         The kbase context that contains the job(s) that should
+ *                be hard-stopped
+ * @js:           The job slot to hard-stop
+ * @target_katom: The job that should be hard-stopped (or NULL for all
+ *                jobs from the context)
+ * Context:
+ *   The job slot lock must be held when calling this function.
+ */
+void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
+				struct kbase_jd_atom *target_katom);
+
+/* Object containing callbacks for enabling/disabling protected mode, used
+ * on GPU which supports protected mode switching natively.
+ */
+extern struct protected_mode_ops kbase_native_protected_ops;
+
+#endif /* _KBASE_HWACCESS_JM_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_pm.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_pm.h
new file mode 100644
index 0000000..96c473a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_pm.h
@@ -0,0 +1,211 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015, 2018-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/**
+ * @file mali_kbase_hwaccess_pm.h
+ * HW access power manager common APIs
+ */
+
+#ifndef _KBASE_HWACCESS_PM_H_
+#define _KBASE_HWACCESS_PM_H_
+
+#include <mali_midg_regmap.h>
+#include <linux/atomic.h>
+
+#include <mali_kbase_pm_defs.h>
+
+/* Forward definition - see mali_kbase.h */
+struct kbase_device;
+
+/* Functions common to all HW access backends */
+
+/**
+ * Initialize the power management framework.
+ *
+ * Must be called before any other power management function
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Return: 0 if the power management framework was successfully initialized.
+ */
+int kbase_hwaccess_pm_init(struct kbase_device *kbdev);
+
+/**
+ * Terminate the power management framework.
+ *
+ * No power management functions may be called after this
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_hwaccess_pm_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_hwaccess_pm_powerup - Power up the GPU.
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @flags: Flags to pass on to kbase_pm_init_hw
+ *
+ * Power up GPU after all modules have been initialized and interrupt handlers
+ * installed.
+ *
+ * Return: 0 if powerup was successful.
+ */
+int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
+		unsigned int flags);
+
+/**
+ * Halt the power management framework.
+ *
+ * Should ensure that no new interrupts are generated, but allow any currently
+ * running interrupt handlers to complete successfully. The GPU is forced off by
+ * the time this function returns, regardless of whether or not the active power
+ * policy asks for the GPU to be powered off.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ *              pointer)
+ */
+void kbase_hwaccess_pm_halt(struct kbase_device *kbdev);
+
+/**
+ * Perform any backend-specific actions to suspend the GPU
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ *              pointer)
+ */
+void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev);
+
+/**
+ * Perform any backend-specific actions to resume the GPU from a suspend
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ *              pointer)
+ */
+void kbase_hwaccess_pm_resume(struct kbase_device *kbdev);
+
+/**
+ * Perform any required actions for activating the GPU. Called when the first
+ * context goes active.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ *              pointer)
+ */
+void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev);
+
+/**
+ * Perform any required actions for idling the GPU. Called when the last
+ * context goes idle.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ *              pointer)
+ */
+void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev);
+
+
+/**
+ * Set the debug core mask.
+ *
+ * This determines which cores the power manager is allowed to use.
+ *
+ * @param kbdev         The kbase device structure for the device (must be a
+ *                      valid pointer)
+ * @param new_core_mask_js0 The core mask to use for job slot 0
+ * @param new_core_mask_js0 The core mask to use for job slot 1
+ * @param new_core_mask_js0 The core mask to use for job slot 2
+ */
+void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
+		u64 new_core_mask_js0, u64 new_core_mask_js1,
+		u64 new_core_mask_js2);
+
+
+/**
+ * Get the current policy.
+ *
+ * Returns the policy that is currently active.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ *              pointer)
+ *
+ * @return The current policy
+ */
+const struct kbase_pm_ca_policy
+*kbase_pm_ca_get_policy(struct kbase_device *kbdev);
+
+/**
+ * Change the policy to the one specified.
+ *
+ * @param kbdev  The kbase device structure for the device (must be a valid
+ *               pointer)
+ * @param policy The policy to change to (valid pointer returned from
+ *               @ref kbase_pm_ca_list_policies)
+ */
+void kbase_pm_ca_set_policy(struct kbase_device *kbdev,
+				const struct kbase_pm_ca_policy *policy);
+
+/**
+ * Retrieve a static list of the available policies.
+ *
+ * @param[out] policies An array pointer to take the list of policies. This may
+ *                      be NULL. The contents of this array must not be
+ *                      modified.
+ *
+ * @return The number of policies
+ */
+int
+kbase_pm_ca_list_policies(const struct kbase_pm_ca_policy * const **policies);
+
+
+/**
+ * Get the current policy.
+ *
+ * Returns the policy that is currently active.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ *              pointer)
+ *
+ * @return The current policy
+ */
+const struct kbase_pm_policy *kbase_pm_get_policy(struct kbase_device *kbdev);
+
+/**
+ * Change the policy to the one specified.
+ *
+ * @param kbdev  The kbase device structure for the device (must be a valid
+ *               pointer)
+ * @param policy The policy to change to (valid pointer returned from
+ *               @ref kbase_pm_list_policies)
+ */
+void kbase_pm_set_policy(struct kbase_device *kbdev,
+					const struct kbase_pm_policy *policy);
+
+/**
+ * kbase_pm_list_policies - Retrieve a static list of the available policies.
+ *
+ * @kbdev:   The kbase device structure for the device.
+ * @list:    An array pointer to take the list of policies. This may be NULL.
+ *           The contents of this array must not be modified.
+ *
+ * Return: The number of policies
+ */
+int kbase_pm_list_policies(struct kbase_device *kbdev,
+	const struct kbase_pm_policy * const **list);
+
+#endif /* _KBASE_HWACCESS_PM_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_time.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_time.h
new file mode 100644
index 0000000..f7539f5
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_time.h
@@ -0,0 +1,62 @@
+/*
+ *
+ * (C) COPYRIGHT 2014,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/**
+ *
+ */
+
+#ifndef _KBASE_BACKEND_TIME_H_
+#define _KBASE_BACKEND_TIME_H_
+
+/**
+ * kbase_backend_get_gpu_time() - Get current GPU time
+ * @kbdev:		Device pointer
+ * @cycle_counter:	Pointer to u64 to store cycle counter in
+ * @system_time:	Pointer to u64 to store system time in
+ * @ts:			Pointer to struct timespec to store current monotonic
+ *			time in
+ */
+void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
+				u64 *system_time, struct timespec *ts);
+
+/**
+ * kbase_wait_write_flush() -  Wait for GPU write flush
+ * @kbdev:	Kbase device
+ *
+ * Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush
+ * its write buffer.
+ *
+ * If GPU resets occur then the counters are reset to zero, the delay may not be
+ * as expected.
+ *
+ * This function is only in use for BASE_HW_ISSUE_6367
+ */
+#ifdef CONFIG_MALI_NO_MALI
+static inline void kbase_wait_write_flush(struct kbase_device *kbdev)
+{
+}
+#else
+void kbase_wait_write_flush(struct kbase_device *kbdev);
+#endif
+
+#endif /* _KBASE_BACKEND_TIME_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt.c b/drivers/gpu/arm/midgard/mali_kbase_hwcnt.c
new file mode 100644
index 0000000..265fc21
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt.c
@@ -0,0 +1,807 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Implementation of hardware counter context and accumulator APIs.
+ */
+
+#include "mali_kbase_hwcnt_context.h"
+#include "mali_kbase_hwcnt_accumulator.h"
+#include "mali_kbase_hwcnt_backend.h"
+#include "mali_kbase_hwcnt_types.h"
+#include "mali_malisw.h"
+#include "mali_kbase_debug.h"
+#include "mali_kbase_linux.h"
+
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+/**
+ * enum kbase_hwcnt_accum_state - Hardware counter accumulator states.
+ * @ACCUM_STATE_ERROR:    Error state, where all accumulator operations fail.
+ * @ACCUM_STATE_DISABLED: Disabled state, where dumping is always disabled.
+ * @ACCUM_STATE_ENABLED:  Enabled state, where dumping is enabled if there are
+ *                        any enabled counters.
+ */
+enum kbase_hwcnt_accum_state {
+	ACCUM_STATE_ERROR,
+	ACCUM_STATE_DISABLED,
+	ACCUM_STATE_ENABLED
+};
+
+/**
+ * struct kbase_hwcnt_accumulator - Hardware counter accumulator structure.
+ * @backend:                Pointer to created counter backend.
+ * @state:                  The current state of the accumulator.
+ *                           - State transition from disabled->enabled or
+ *                             disabled->error requires state_lock.
+ *                           - State transition from enabled->disabled or
+ *                             enabled->error requires both accum_lock and
+ *                             state_lock.
+ *                           - Error state persists until next disable.
+ * @enable_map:             The current set of enabled counters.
+ *                           - Must only be modified while holding both
+ *                             accum_lock and state_lock.
+ *                           - Can be read while holding either lock.
+ *                           - Must stay in sync with enable_map_any_enabled.
+ * @enable_map_any_enabled: True if any counters in the map are enabled, else
+ *                          false. If true, and state is ACCUM_STATE_ENABLED,
+ *                          then the counter backend will be enabled.
+ *                           - Must only be modified while holding both
+ *                             accum_lock and state_lock.
+ *                           - Can be read while holding either lock.
+ *                           - Must stay in sync with enable_map.
+ * @scratch_map:            Scratch enable map, used as temporary enable map
+ *                          storage during dumps.
+ *                           - Must only be read or modified while holding
+ *                             accum_lock.
+ * @accum_buf:              Accumulation buffer, where dumps will be accumulated
+ *                          into on transition to a disable state.
+ *                           - Must only be read or modified while holding
+ *                             accum_lock.
+ * @accumulated:            True if the accumulation buffer has been accumulated
+ *                          into and not subsequently read from yet, else false.
+ *                           - Must only be read or modified while holding
+ *                             accum_lock.
+ * @ts_last_dump_ns:        Timestamp (ns) of the end time of the most recent
+ *                          dump that was requested by the user.
+ *                           - Must only be read or modified while holding
+ *                             accum_lock.
+ */
+struct kbase_hwcnt_accumulator {
+	struct kbase_hwcnt_backend *backend;
+	enum kbase_hwcnt_accum_state state;
+	struct kbase_hwcnt_enable_map enable_map;
+	bool enable_map_any_enabled;
+	struct kbase_hwcnt_enable_map scratch_map;
+	struct kbase_hwcnt_dump_buffer accum_buf;
+	bool accumulated;
+	u64 ts_last_dump_ns;
+};
+
+/**
+ * struct kbase_hwcnt_context - Hardware counter context structure.
+ * @iface:         Pointer to hardware counter backend interface.
+ * @state_lock:    Spinlock protecting state.
+ * @disable_count: Disable count of the context. Initialised to 1.
+ *                 Decremented when the accumulator is acquired, and incremented
+ *                 on release. Incremented on calls to
+ *                 kbase_hwcnt_context_disable[_atomic], and decremented on
+ *                 calls to kbase_hwcnt_context_enable.
+ *                  - Must only be read or modified while holding state_lock.
+ * @accum_lock:    Mutex protecting accumulator.
+ * @accum_inited:  Flag to prevent concurrent accumulator initialisation and/or
+ *                 termination. Set to true before accumulator initialisation,
+ *                 and false after accumulator termination.
+ *                  - Must only be modified while holding both accum_lock and
+ *                    state_lock.
+ *                  - Can be read while holding either lock.
+ * @accum:         Hardware counter accumulator structure.
+ */
+struct kbase_hwcnt_context {
+	const struct kbase_hwcnt_backend_interface *iface;
+	spinlock_t state_lock;
+	size_t disable_count;
+	struct mutex accum_lock;
+	bool accum_inited;
+	struct kbase_hwcnt_accumulator accum;
+};
+
+int kbase_hwcnt_context_init(
+	const struct kbase_hwcnt_backend_interface *iface,
+	struct kbase_hwcnt_context **out_hctx)
+{
+	struct kbase_hwcnt_context *hctx = NULL;
+
+	if (!iface || !out_hctx)
+		return -EINVAL;
+
+	hctx = kzalloc(sizeof(*hctx), GFP_KERNEL);
+	if (!hctx)
+		return -ENOMEM;
+
+	hctx->iface = iface;
+	spin_lock_init(&hctx->state_lock);
+	hctx->disable_count = 1;
+	mutex_init(&hctx->accum_lock);
+	hctx->accum_inited = false;
+
+	*out_hctx = hctx;
+
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_context_init);
+
+void kbase_hwcnt_context_term(struct kbase_hwcnt_context *hctx)
+{
+	if (!hctx)
+		return;
+
+	/* Make sure we didn't leak the accumulator */
+	WARN_ON(hctx->accum_inited);
+	kfree(hctx);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_context_term);
+
+/**
+ * kbasep_hwcnt_accumulator_term() - Terminate the accumulator for the context.
+ * @hctx: Non-NULL pointer to hardware counter context.
+ */
+static void kbasep_hwcnt_accumulator_term(struct kbase_hwcnt_context *hctx)
+{
+	WARN_ON(!hctx);
+	WARN_ON(!hctx->accum_inited);
+
+	kbase_hwcnt_enable_map_free(&hctx->accum.scratch_map);
+	kbase_hwcnt_dump_buffer_free(&hctx->accum.accum_buf);
+	kbase_hwcnt_enable_map_free(&hctx->accum.enable_map);
+	hctx->iface->term(hctx->accum.backend);
+	memset(&hctx->accum, 0, sizeof(hctx->accum));
+}
+
+/**
+ * kbasep_hwcnt_accumulator_init() - Initialise the accumulator for the context.
+ * @hctx: Non-NULL pointer to hardware counter context.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_hwcnt_accumulator_init(struct kbase_hwcnt_context *hctx)
+{
+	int errcode;
+
+	WARN_ON(!hctx);
+	WARN_ON(!hctx->accum_inited);
+
+	errcode = hctx->iface->init(
+		hctx->iface->info, &hctx->accum.backend);
+	if (errcode)
+		goto error;
+
+	hctx->accum.state = ACCUM_STATE_ERROR;
+
+	errcode = kbase_hwcnt_enable_map_alloc(
+		hctx->iface->metadata, &hctx->accum.enable_map);
+	if (errcode)
+		goto error;
+
+	hctx->accum.enable_map_any_enabled = false;
+
+	errcode = kbase_hwcnt_dump_buffer_alloc(
+		hctx->iface->metadata, &hctx->accum.accum_buf);
+	if (errcode)
+		goto error;
+
+	errcode = kbase_hwcnt_enable_map_alloc(
+		hctx->iface->metadata, &hctx->accum.scratch_map);
+	if (errcode)
+		goto error;
+
+	hctx->accum.accumulated = false;
+
+	hctx->accum.ts_last_dump_ns =
+		hctx->iface->timestamp_ns(hctx->accum.backend);
+
+	return 0;
+
+error:
+	kbasep_hwcnt_accumulator_term(hctx);
+	return errcode;
+}
+
+/**
+ * kbasep_hwcnt_accumulator_disable() - Transition the accumulator into the
+ *                                      disabled state, from the enabled or
+ *                                      error states.
+ * @hctx:       Non-NULL pointer to hardware counter context.
+ * @accumulate: True if we should accumulate before disabling, else false.
+ */
+static void kbasep_hwcnt_accumulator_disable(
+	struct kbase_hwcnt_context *hctx, bool accumulate)
+{
+	int errcode = 0;
+	bool backend_enabled = false;
+	struct kbase_hwcnt_accumulator *accum;
+	unsigned long flags;
+
+	WARN_ON(!hctx);
+	lockdep_assert_held(&hctx->accum_lock);
+	WARN_ON(!hctx->accum_inited);
+
+	accum = &hctx->accum;
+
+	spin_lock_irqsave(&hctx->state_lock, flags);
+
+	WARN_ON(hctx->disable_count != 0);
+	WARN_ON(hctx->accum.state == ACCUM_STATE_DISABLED);
+
+	if ((hctx->accum.state == ACCUM_STATE_ENABLED) &&
+	    (accum->enable_map_any_enabled))
+		backend_enabled = true;
+
+	if (!backend_enabled)
+		hctx->accum.state = ACCUM_STATE_DISABLED;
+
+	spin_unlock_irqrestore(&hctx->state_lock, flags);
+
+	/* Early out if the backend is not already enabled */
+	if (!backend_enabled)
+		return;
+
+	if (!accumulate)
+		goto disable;
+
+	/* Try and accumulate before disabling */
+	errcode = hctx->iface->dump_request(accum->backend);
+	if (errcode)
+		goto disable;
+
+	errcode = hctx->iface->dump_wait(accum->backend);
+	if (errcode)
+		goto disable;
+
+	errcode = hctx->iface->dump_get(accum->backend,
+		&accum->accum_buf, &accum->enable_map, accum->accumulated);
+	if (errcode)
+		goto disable;
+
+	accum->accumulated = true;
+
+disable:
+	hctx->iface->dump_disable(accum->backend);
+
+	/* Regardless of any errors during the accumulate, put the accumulator
+	 * in the disabled state.
+	 */
+	spin_lock_irqsave(&hctx->state_lock, flags);
+
+	hctx->accum.state = ACCUM_STATE_DISABLED;
+
+	spin_unlock_irqrestore(&hctx->state_lock, flags);
+}
+
+/**
+ * kbasep_hwcnt_accumulator_enable() - Transition the accumulator into the
+ *                                     enabled state, from the disabled state.
+ * @hctx: Non-NULL pointer to hardware counter context.
+ */
+static void kbasep_hwcnt_accumulator_enable(struct kbase_hwcnt_context *hctx)
+{
+	int errcode = 0;
+	struct kbase_hwcnt_accumulator *accum;
+
+	WARN_ON(!hctx);
+	lockdep_assert_held(&hctx->state_lock);
+	WARN_ON(!hctx->accum_inited);
+	WARN_ON(hctx->accum.state != ACCUM_STATE_DISABLED);
+
+	accum = &hctx->accum;
+
+	/* The backend only needs enabling if any counters are enabled */
+	if (accum->enable_map_any_enabled)
+		errcode = hctx->iface->dump_enable_nolock(
+			accum->backend, &accum->enable_map);
+
+	if (!errcode)
+		accum->state = ACCUM_STATE_ENABLED;
+	else
+		accum->state = ACCUM_STATE_ERROR;
+}
+
+/**
+ * kbasep_hwcnt_accumulator_dump() - Perform a dump with the most up-to-date
+ *                                   values of enabled counters possible, and
+ *                                   optionally update the set of enabled
+ *                                   counters.
+ * @hctx :       Non-NULL pointer to the hardware counter context
+ * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
+ *               be written out to on success
+ * @ts_end_ns:   Non-NULL pointer where the end timestamp of the dump will
+ *               be written out to on success
+ * @dump_buf:    Pointer to the buffer where the dump will be written out to on
+ *               success. If non-NULL, must have the same metadata as the
+ *               accumulator. If NULL, the dump will be discarded.
+ * @new_map:     Pointer to the new counter enable map. If non-NULL, must have
+ *               the same metadata as the accumulator. If NULL, the set of
+ *               enabled counters will be unchanged.
+ */
+static int kbasep_hwcnt_accumulator_dump(
+	struct kbase_hwcnt_context *hctx,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf,
+	const struct kbase_hwcnt_enable_map *new_map)
+{
+	int errcode = 0;
+	unsigned long flags;
+	enum kbase_hwcnt_accum_state state;
+	bool dump_requested = false;
+	bool dump_written = false;
+	bool cur_map_any_enabled;
+	struct kbase_hwcnt_enable_map *cur_map;
+	bool new_map_any_enabled = false;
+	u64 dump_time_ns;
+	struct kbase_hwcnt_accumulator *accum;
+
+	WARN_ON(!hctx);
+	WARN_ON(!ts_start_ns);
+	WARN_ON(!ts_end_ns);
+	WARN_ON(dump_buf && (dump_buf->metadata != hctx->iface->metadata));
+	WARN_ON(new_map && (new_map->metadata != hctx->iface->metadata));
+	WARN_ON(!hctx->accum_inited);
+	lockdep_assert_held(&hctx->accum_lock);
+
+	accum = &hctx->accum;
+	cur_map = &accum->scratch_map;
+
+	/* Save out info about the current enable map */
+	cur_map_any_enabled = accum->enable_map_any_enabled;
+	kbase_hwcnt_enable_map_copy(cur_map, &accum->enable_map);
+
+	if (new_map)
+		new_map_any_enabled =
+			kbase_hwcnt_enable_map_any_enabled(new_map);
+
+	/*
+	 * We're holding accum_lock, so the accumulator state might transition
+	 * from disabled to enabled during this function (as enabling is lock
+	 * free), but it will never disable (as disabling needs to hold the
+	 * accum_lock), nor will it ever transition from enabled to error (as
+	 * an enable while we're already enabled is impossible).
+	 *
+	 * If we're already disabled, we'll only look at the accumulation buffer
+	 * rather than do a real dump, so a concurrent enable does not affect
+	 * us.
+	 *
+	 * If a concurrent enable fails, we might transition to the error
+	 * state, but again, as we're only looking at the accumulation buffer,
+	 * it's not an issue.
+	 */
+	spin_lock_irqsave(&hctx->state_lock, flags);
+
+	state = accum->state;
+
+	/*
+	 * Update the new map now, such that if an enable occurs during this
+	 * dump then that enable will set the new map. If we're already enabled,
+	 * then we'll do it ourselves after the dump.
+	 */
+	if (new_map) {
+		kbase_hwcnt_enable_map_copy(
+			&accum->enable_map, new_map);
+		accum->enable_map_any_enabled = new_map_any_enabled;
+	}
+
+	spin_unlock_irqrestore(&hctx->state_lock, flags);
+
+	/* Error state, so early out. No need to roll back any map updates */
+	if (state == ACCUM_STATE_ERROR)
+		return -EIO;
+
+	/* Initiate the dump if the backend is enabled. */
+	if ((state == ACCUM_STATE_ENABLED) && cur_map_any_enabled) {
+		/* Disable pre-emption, to make the timestamp as accurate as
+		 * possible.
+		 */
+		preempt_disable();
+		{
+			dump_time_ns = hctx->iface->timestamp_ns(
+				accum->backend);
+			if (dump_buf) {
+				errcode = hctx->iface->dump_request(
+					accum->backend);
+				dump_requested = true;
+			} else {
+				errcode = hctx->iface->dump_clear(
+					accum->backend);
+			}
+		}
+		preempt_enable();
+		if (errcode)
+			goto error;
+	} else {
+		dump_time_ns = hctx->iface->timestamp_ns(accum->backend);
+	}
+
+	/* Copy any accumulation into the dest buffer */
+	if (accum->accumulated && dump_buf) {
+		kbase_hwcnt_dump_buffer_copy(
+			dump_buf, &accum->accum_buf, cur_map);
+		dump_written = true;
+	}
+
+	/* Wait for any requested dumps to complete */
+	if (dump_requested) {
+		WARN_ON(state != ACCUM_STATE_ENABLED);
+		errcode = hctx->iface->dump_wait(accum->backend);
+		if (errcode)
+			goto error;
+	}
+
+	/* If we're enabled and there's a new enable map, change the enabled set
+	 * as soon after the dump has completed as possible.
+	 */
+	if ((state == ACCUM_STATE_ENABLED) && new_map) {
+		/* Backend is only enabled if there were any enabled counters */
+		if (cur_map_any_enabled)
+			hctx->iface->dump_disable(accum->backend);
+
+		/* (Re-)enable the backend if the new map has enabled counters.
+		 * No need to acquire the spinlock, as concurrent enable while
+		 * we're already enabled and holding accum_lock is impossible.
+		 */
+		if (new_map_any_enabled) {
+			errcode = hctx->iface->dump_enable(
+				accum->backend, new_map);
+			if (errcode)
+				goto error;
+		}
+	}
+
+	/* Copy, accumulate, or zero into the dest buffer to finish */
+	if (dump_buf) {
+		/* If we dumped, copy or accumulate it into the destination */
+		if (dump_requested) {
+			WARN_ON(state != ACCUM_STATE_ENABLED);
+			errcode = hctx->iface->dump_get(
+				accum->backend,
+				dump_buf,
+				cur_map,
+				dump_written);
+			if (errcode)
+				goto error;
+			dump_written = true;
+		}
+
+		/* If we've not written anything into the dump buffer so far, it
+		 * means there was nothing to write. Zero any enabled counters.
+		 */
+		if (!dump_written)
+			kbase_hwcnt_dump_buffer_zero(dump_buf, cur_map);
+	}
+
+	/* Write out timestamps */
+	*ts_start_ns = accum->ts_last_dump_ns;
+	*ts_end_ns = dump_time_ns;
+
+	accum->accumulated = false;
+	accum->ts_last_dump_ns = dump_time_ns;
+
+	return 0;
+error:
+	/* An error was only physically possible if the backend was enabled */
+	WARN_ON(state != ACCUM_STATE_ENABLED);
+
+	/* Disable the backend, and transition to the error state */
+	hctx->iface->dump_disable(accum->backend);
+	spin_lock_irqsave(&hctx->state_lock, flags);
+
+	accum->state = ACCUM_STATE_ERROR;
+
+	spin_unlock_irqrestore(&hctx->state_lock, flags);
+
+	return errcode;
+}
+
+/**
+ * kbasep_hwcnt_context_disable() - Increment the disable count of the context.
+ * @hctx:       Non-NULL pointer to hardware counter context.
+ * @accumulate: True if we should accumulate before disabling, else false.
+ */
+static void kbasep_hwcnt_context_disable(
+	struct kbase_hwcnt_context *hctx, bool accumulate)
+{
+	unsigned long flags;
+
+	WARN_ON(!hctx);
+	lockdep_assert_held(&hctx->accum_lock);
+
+	if (!kbase_hwcnt_context_disable_atomic(hctx)) {
+		kbasep_hwcnt_accumulator_disable(hctx, accumulate);
+
+		spin_lock_irqsave(&hctx->state_lock, flags);
+
+		/* Atomic disable failed and we're holding the mutex, so current
+		 * disable count must be 0.
+		 */
+		WARN_ON(hctx->disable_count != 0);
+		hctx->disable_count++;
+
+		spin_unlock_irqrestore(&hctx->state_lock, flags);
+	}
+}
+
+int kbase_hwcnt_accumulator_acquire(
+	struct kbase_hwcnt_context *hctx,
+	struct kbase_hwcnt_accumulator **accum)
+{
+	int errcode = 0;
+	unsigned long flags;
+
+	if (!hctx || !accum)
+		return -EINVAL;
+
+	mutex_lock(&hctx->accum_lock);
+	spin_lock_irqsave(&hctx->state_lock, flags);
+
+	if (!hctx->accum_inited)
+		/* Set accum initing now to prevent concurrent init */
+		hctx->accum_inited = true;
+	else
+		/* Already have an accum, or already being inited */
+		errcode = -EBUSY;
+
+	spin_unlock_irqrestore(&hctx->state_lock, flags);
+	mutex_unlock(&hctx->accum_lock);
+
+	if (errcode)
+		return errcode;
+
+	errcode = kbasep_hwcnt_accumulator_init(hctx);
+
+	if (errcode) {
+		mutex_lock(&hctx->accum_lock);
+		spin_lock_irqsave(&hctx->state_lock, flags);
+
+		hctx->accum_inited = false;
+
+		spin_unlock_irqrestore(&hctx->state_lock, flags);
+		mutex_unlock(&hctx->accum_lock);
+
+		return errcode;
+	}
+
+	spin_lock_irqsave(&hctx->state_lock, flags);
+
+	WARN_ON(hctx->disable_count == 0);
+	WARN_ON(hctx->accum.enable_map_any_enabled);
+
+	/* Decrement the disable count to allow the accumulator to be accessible
+	 * now that it's fully constructed.
+	 */
+	hctx->disable_count--;
+
+	/*
+	 * Make sure the accumulator is initialised to the correct state.
+	 * Regardless of initial state, counters don't need to be enabled via
+	 * the backend, as the initial enable map has no enabled counters.
+	 */
+	hctx->accum.state = (hctx->disable_count == 0) ?
+		ACCUM_STATE_ENABLED :
+		ACCUM_STATE_DISABLED;
+
+	spin_unlock_irqrestore(&hctx->state_lock, flags);
+
+	*accum = &hctx->accum;
+
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_accumulator_acquire);
+
+void kbase_hwcnt_accumulator_release(struct kbase_hwcnt_accumulator *accum)
+{
+	unsigned long flags;
+	struct kbase_hwcnt_context *hctx;
+
+	if (!accum)
+		return;
+
+	hctx = container_of(accum, struct kbase_hwcnt_context, accum);
+
+	mutex_lock(&hctx->accum_lock);
+
+	/* Double release is a programming error */
+	WARN_ON(!hctx->accum_inited);
+
+	/* Disable the context to ensure the accumulator is inaccesible while
+	 * we're destroying it. This performs the corresponding disable count
+	 * increment to the decrement done during acquisition.
+	 */
+	kbasep_hwcnt_context_disable(hctx, false);
+
+	mutex_unlock(&hctx->accum_lock);
+
+	kbasep_hwcnt_accumulator_term(hctx);
+
+	mutex_lock(&hctx->accum_lock);
+	spin_lock_irqsave(&hctx->state_lock, flags);
+
+	hctx->accum_inited = false;
+
+	spin_unlock_irqrestore(&hctx->state_lock, flags);
+	mutex_unlock(&hctx->accum_lock);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_accumulator_release);
+
+void kbase_hwcnt_context_disable(struct kbase_hwcnt_context *hctx)
+{
+	if (WARN_ON(!hctx))
+		return;
+
+	/* Try and atomically disable first, so we can avoid locking the mutex
+	 * if we don't need to.
+	 */
+	if (kbase_hwcnt_context_disable_atomic(hctx))
+		return;
+
+	mutex_lock(&hctx->accum_lock);
+
+	kbasep_hwcnt_context_disable(hctx, true);
+
+	mutex_unlock(&hctx->accum_lock);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_context_disable);
+
+bool kbase_hwcnt_context_disable_atomic(struct kbase_hwcnt_context *hctx)
+{
+	unsigned long flags;
+	bool atomic_disabled = false;
+
+	if (WARN_ON(!hctx))
+		return false;
+
+	spin_lock_irqsave(&hctx->state_lock, flags);
+
+	if (!WARN_ON(hctx->disable_count == SIZE_MAX)) {
+		/*
+		 * If disable count is non-zero or no counters are enabled, we
+		 * can just bump the disable count.
+		 *
+		 * Otherwise, we can't disable in an atomic context.
+		 */
+		if (hctx->disable_count != 0) {
+			hctx->disable_count++;
+			atomic_disabled = true;
+		} else {
+			WARN_ON(!hctx->accum_inited);
+			if (!hctx->accum.enable_map_any_enabled) {
+				hctx->disable_count++;
+				hctx->accum.state = ACCUM_STATE_DISABLED;
+				atomic_disabled = true;
+			}
+		}
+	}
+
+	spin_unlock_irqrestore(&hctx->state_lock, flags);
+
+	return atomic_disabled;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_context_disable_atomic);
+
+void kbase_hwcnt_context_enable(struct kbase_hwcnt_context *hctx)
+{
+	unsigned long flags;
+
+	if (WARN_ON(!hctx))
+		return;
+
+	spin_lock_irqsave(&hctx->state_lock, flags);
+
+	if (!WARN_ON(hctx->disable_count == 0)) {
+		if (hctx->disable_count == 1)
+			kbasep_hwcnt_accumulator_enable(hctx);
+
+		hctx->disable_count--;
+	}
+
+	spin_unlock_irqrestore(&hctx->state_lock, flags);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_context_enable);
+
+const struct kbase_hwcnt_metadata *kbase_hwcnt_context_metadata(
+	struct kbase_hwcnt_context *hctx)
+{
+	if (!hctx)
+		return NULL;
+
+	return hctx->iface->metadata;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_context_metadata);
+
+int kbase_hwcnt_accumulator_set_counters(
+	struct kbase_hwcnt_accumulator *accum,
+	const struct kbase_hwcnt_enable_map *new_map,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf)
+{
+	int errcode;
+	struct kbase_hwcnt_context *hctx;
+
+	if (!accum || !new_map || !ts_start_ns || !ts_end_ns)
+		return -EINVAL;
+
+	hctx = container_of(accum, struct kbase_hwcnt_context, accum);
+
+	if ((new_map->metadata != hctx->iface->metadata) ||
+	    (dump_buf && (dump_buf->metadata != hctx->iface->metadata)))
+		return -EINVAL;
+
+	mutex_lock(&hctx->accum_lock);
+
+	errcode = kbasep_hwcnt_accumulator_dump(
+		hctx, ts_start_ns, ts_end_ns, dump_buf, new_map);
+
+	mutex_unlock(&hctx->accum_lock);
+
+	return errcode;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_accumulator_set_counters);
+
+int kbase_hwcnt_accumulator_dump(
+	struct kbase_hwcnt_accumulator *accum,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf)
+{
+	int errcode;
+	struct kbase_hwcnt_context *hctx;
+
+	if (!accum || !ts_start_ns || !ts_end_ns)
+		return -EINVAL;
+
+	hctx = container_of(accum, struct kbase_hwcnt_context, accum);
+
+	if (dump_buf && (dump_buf->metadata != hctx->iface->metadata))
+		return -EINVAL;
+
+	mutex_lock(&hctx->accum_lock);
+
+	errcode = kbasep_hwcnt_accumulator_dump(
+		hctx, ts_start_ns, ts_end_ns, dump_buf, NULL);
+
+	mutex_unlock(&hctx->accum_lock);
+
+	return errcode;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_accumulator_dump);
+
+u64 kbase_hwcnt_accumulator_timestamp_ns(struct kbase_hwcnt_accumulator *accum)
+{
+	struct kbase_hwcnt_context *hctx;
+
+	if (WARN_ON(!accum))
+		return 0;
+
+	hctx = container_of(accum, struct kbase_hwcnt_context, accum);
+	return hctx->iface->timestamp_ns(accum->backend);
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_accumulator.h b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_accumulator.h
new file mode 100644
index 0000000..eb82ea4
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_accumulator.h
@@ -0,0 +1,146 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Hardware counter accumulator API.
+ */
+
+#ifndef _KBASE_HWCNT_ACCUMULATOR_H_
+#define _KBASE_HWCNT_ACCUMULATOR_H_
+
+#include <linux/types.h>
+
+struct kbase_hwcnt_context;
+struct kbase_hwcnt_accumulator;
+struct kbase_hwcnt_enable_map;
+struct kbase_hwcnt_dump_buffer;
+
+/**
+ * kbase_hwcnt_accumulator_acquire() - Acquire the hardware counter accumulator
+ *                                     for a hardware counter context.
+ * @hctx:  Non-NULL pointer to a hardware counter context.
+ * @accum: Non-NULL pointer to where the pointer to the created accumulator
+ *         will be stored on success.
+ *
+ * There can exist at most one instance of the hardware counter accumulator per
+ * context at a time.
+ *
+ * If multiple clients need access to the hardware counters at the same time,
+ * then an abstraction built on top of the single instance to the hardware
+ * counter accumulator is required.
+ *
+ * No counters will be enabled with the returned accumulator. A subsequent call
+ * to kbase_hwcnt_accumulator_set_counters must be used to turn them on.
+ *
+ * There are four components to a hardware counter dump:
+ *  - A set of enabled counters
+ *  - A start time
+ *  - An end time
+ *  - A dump buffer containing the accumulated counter values for all enabled
+ *    counters between the start and end times.
+ *
+ * For each dump, it is guaranteed that all enabled counters were active for the
+ * entirety of the period between the start and end times.
+ *
+ * It is also guaranteed that the start time of dump "n" is always equal to the
+ * end time of dump "n - 1".
+ *
+ * For all dumps, the values of any counters that were not enabled is undefined.
+ *
+ * Return: 0 on success or error code.
+ */
+int kbase_hwcnt_accumulator_acquire(
+	struct kbase_hwcnt_context *hctx,
+	struct kbase_hwcnt_accumulator **accum);
+
+/**
+ * kbase_hwcnt_accumulator_release() - Release a hardware counter accumulator.
+ * @accum: Non-NULL pointer to the hardware counter accumulator.
+ *
+ * The accumulator must be released before the context the accumulator was
+ * created from is terminated.
+ */
+void kbase_hwcnt_accumulator_release(struct kbase_hwcnt_accumulator *accum);
+
+/**
+ * kbase_hwcnt_accumulator_set_counters() - Perform a dump of the currently
+ *                                          enabled counters, and enable a new
+ *                                          set of counters that will be used
+ *                                          for subsequent dumps.
+ * @accum:       Non-NULL pointer to the hardware counter accumulator.
+ * @new_map:     Non-NULL pointer to the new counter enable map. Must have the
+ *               same metadata as the accumulator.
+ * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
+ *               be written out to on success.
+ * @ts_end_ns:   Non-NULL pointer where the end timestamp of the dump will
+ *               be written out to on success.
+ * @dump_buf:    Pointer to the buffer where the dump will be written out to on
+ *               success. If non-NULL, must have the same metadata as the
+ *               accumulator. If NULL, the dump will be discarded.
+ *
+ * If this function fails for some unexpected reason (i.e. anything other than
+ * invalid args), then the accumulator will be put into the error state until
+ * the parent context is next disabled.
+ *
+ * Return: 0 on success or error code.
+ */
+int kbase_hwcnt_accumulator_set_counters(
+	struct kbase_hwcnt_accumulator *accum,
+	const struct kbase_hwcnt_enable_map *new_map,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf);
+
+/**
+ * kbase_hwcnt_accumulator_dump() - Perform a dump of the currently enabled
+ *                                  counters.
+ * @accum:       Non-NULL pointer to the hardware counter accumulator.
+ * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
+ *               be written out to on success.
+ * @ts_end_ns:   Non-NULL pointer where the end timestamp of the dump will
+ *               be written out to on success.
+ * @dump_buf:    Pointer to the buffer where the dump will be written out to on
+ *               success. If non-NULL, must have the same metadata as the
+ *               accumulator. If NULL, the dump will be discarded.
+ *
+ * If this function fails for some unexpected reason (i.e. anything other than
+ * invalid args), then the accumulator will be put into the error state until
+ * the parent context is next disabled.
+ *
+ * Return: 0 on success or error code.
+ */
+int kbase_hwcnt_accumulator_dump(
+	struct kbase_hwcnt_accumulator *accum,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf);
+
+/**
+ * kbase_hwcnt_accumulator_timestamp_ns() - Get the current accumulator backend
+ *                                          timestamp.
+ * @accum: Non-NULL pointer to the hardware counter accumulator.
+ *
+ * Return: Accumulator backend timestamp in nanoseconds.
+ */
+u64 kbase_hwcnt_accumulator_timestamp_ns(struct kbase_hwcnt_accumulator *accum);
+
+#endif /* _KBASE_HWCNT_ACCUMULATOR_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend.h b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend.h
new file mode 100644
index 0000000..b7aa0e1
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend.h
@@ -0,0 +1,217 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Virtual interface for hardware counter backends.
+ */
+
+#ifndef _KBASE_HWCNT_BACKEND_H_
+#define _KBASE_HWCNT_BACKEND_H_
+
+#include <linux/types.h>
+
+struct kbase_hwcnt_metadata;
+struct kbase_hwcnt_enable_map;
+struct kbase_hwcnt_dump_buffer;
+
+/*
+ * struct kbase_hwcnt_backend_info - Opaque pointer to information used to
+ *                                   create an instance of a hardware counter
+ *                                   backend.
+ */
+struct kbase_hwcnt_backend_info;
+
+/*
+ * struct kbase_hwcnt_backend_info - Opaque pointer to a hardware counter
+ *                                   backend, used to perform dumps.
+ */
+struct kbase_hwcnt_backend;
+
+/**
+ * typedef kbase_hwcnt_backend_init_fn - Initialise a counter backend.
+ * @info:        Non-NULL pointer to backend info.
+ * @out_backend: Non-NULL pointer to where backend is stored on success.
+ *
+ * All uses of the created hardware counter backend must be externally
+ * synchronised.
+ *
+ * Return: 0 on success, else error code.
+ */
+typedef int (*kbase_hwcnt_backend_init_fn)(
+	const struct kbase_hwcnt_backend_info *info,
+	struct kbase_hwcnt_backend **out_backend);
+
+/**
+ * typedef kbase_hwcnt_backend_term_fn - Terminate a counter backend.
+ * @backend: Pointer to backend to be terminated.
+ */
+typedef void (*kbase_hwcnt_backend_term_fn)(
+	struct kbase_hwcnt_backend *backend);
+
+/**
+ * typedef kbase_hwcnt_backend_timestamp_ns_fn - Get the current backend
+ *                                               timestamp.
+ * @backend: Non-NULL pointer to backend.
+ *
+ * Return: Backend timestamp in nanoseconds.
+ */
+typedef u64 (*kbase_hwcnt_backend_timestamp_ns_fn)(
+	struct kbase_hwcnt_backend *backend);
+
+/**
+ * typedef kbase_hwcnt_backend_dump_enable_fn - Start counter dumping with the
+ *                                              backend.
+ * @backend:    Non-NULL pointer to backend.
+ * @enable_map: Non-NULL pointer to enable map specifying enabled counters.
+ *
+ * The enable_map must have been created using the interface's metadata.
+ * If the backend has already been enabled, an error is returned.
+ *
+ * May be called in an atomic context.
+ *
+ * Return: 0 on success, else error code.
+ */
+typedef int (*kbase_hwcnt_backend_dump_enable_fn)(
+	struct kbase_hwcnt_backend *backend,
+	const struct kbase_hwcnt_enable_map *enable_map);
+
+/**
+ * typedef kbase_hwcnt_backend_dump_enable_nolock_fn - Start counter dumping
+ *                                                     with the backend.
+ * @backend:    Non-NULL pointer to backend.
+ * @enable_map: Non-NULL pointer to enable map specifying enabled counters.
+ *
+ * Exactly the same as kbase_hwcnt_backend_dump_enable_fn(), except must be
+ * called in an atomic context with the spinlock documented by the specific
+ * backend interface held.
+ *
+ * Return: 0 on success, else error code.
+ */
+typedef int (*kbase_hwcnt_backend_dump_enable_nolock_fn)(
+	struct kbase_hwcnt_backend *backend,
+	const struct kbase_hwcnt_enable_map *enable_map);
+
+/**
+ * typedef kbase_hwcnt_backend_dump_disable_fn - Disable counter dumping with
+ *                                               the backend.
+ * @backend: Non-NULL pointer to backend.
+ *
+ * If the backend is already disabled, does nothing.
+ * Any undumped counter values since the last dump get will be lost.
+ */
+typedef void (*kbase_hwcnt_backend_dump_disable_fn)(
+	struct kbase_hwcnt_backend *backend);
+
+/**
+ * typedef kbase_hwcnt_backend_dump_clear_fn - Reset all the current undumped
+ *                                             counters.
+ * @backend: Non-NULL pointer to backend.
+ *
+ * If the backend is not enabled, returns an error.
+ *
+ * Return: 0 on success, else error code.
+ */
+typedef int (*kbase_hwcnt_backend_dump_clear_fn)(
+	struct kbase_hwcnt_backend *backend);
+
+/**
+ * typedef kbase_hwcnt_backend_dump_request_fn - Request an asynchronous counter
+ *                                               dump.
+ * @backend: Non-NULL pointer to backend.
+ *
+ * If the backend is not enabled or another dump is already in progress,
+ * returns an error.
+ *
+ * Return: 0 on success, else error code.
+ */
+typedef int (*kbase_hwcnt_backend_dump_request_fn)(
+	struct kbase_hwcnt_backend *backend);
+
+/**
+ * typedef kbase_hwcnt_backend_dump_wait_fn - Wait until the last requested
+ *                                            counter dump has completed.
+ * @backend: Non-NULL pointer to backend.
+ *
+ * If the backend is not enabled, returns an error.
+ *
+ * Return: 0 on success, else error code.
+ */
+typedef int (*kbase_hwcnt_backend_dump_wait_fn)(
+	struct kbase_hwcnt_backend *backend);
+
+/**
+ * typedef kbase_hwcnt_backend_dump_get_fn - Copy or accumulate enable the
+ *                                           counters dumped after the last dump
+ *                                           request into the dump buffer.
+ * @backend:     Non-NULL pointer to backend.
+ * @dump_buffer: Non-NULL pointer to destination dump buffer.
+ * @enable_map:  Non-NULL pointer to enable map specifying enabled values.
+ * @accumulate:  True if counters should be accumulated into dump_buffer, rather
+ *               than copied.
+ *
+ * If the backend is not enabled, returns an error.
+ * If a dump is in progress (i.e. dump_wait has not yet returned successfully)
+ * then the resultant contents of the dump buffer will be undefined.
+ *
+ * Return: 0 on success, else error code.
+ */
+typedef int (*kbase_hwcnt_backend_dump_get_fn)(
+	struct kbase_hwcnt_backend *backend,
+	struct kbase_hwcnt_dump_buffer *dump_buffer,
+	const struct kbase_hwcnt_enable_map *enable_map,
+	bool accumulate);
+
+/**
+ * struct kbase_hwcnt_backend_interface - Hardware counter backend virtual
+ *                                        interface.
+ * @metadata:           Immutable hardware counter metadata.
+ * @info:               Immutable info used to initialise an instance of the
+ *                      backend.
+ * @init:               Function ptr to initialise an instance of the backend.
+ * @term:               Function ptr to terminate an instance of the backend.
+ * @timestamp_ns:       Function ptr to get the current backend timestamp.
+ * @dump_enable:        Function ptr to enable dumping.
+ * @dump_enable_nolock: Function ptr to enable dumping while the
+ *                      backend-specific spinlock is already held.
+ * @dump_disable:       Function ptr to disable dumping.
+ * @dump_clear:         Function ptr to clear counters.
+ * @dump_request:       Function ptr to request a dump.
+ * @dump_wait:          Function ptr to wait until dump to complete.
+ * @dump_get:           Function ptr to copy or accumulate dump into a dump
+ *                      buffer.
+ */
+struct kbase_hwcnt_backend_interface {
+	const struct kbase_hwcnt_metadata *metadata;
+	const struct kbase_hwcnt_backend_info *info;
+	kbase_hwcnt_backend_init_fn init;
+	kbase_hwcnt_backend_term_fn term;
+	kbase_hwcnt_backend_timestamp_ns_fn timestamp_ns;
+	kbase_hwcnt_backend_dump_enable_fn dump_enable;
+	kbase_hwcnt_backend_dump_enable_nolock_fn dump_enable_nolock;
+	kbase_hwcnt_backend_dump_disable_fn dump_disable;
+	kbase_hwcnt_backend_dump_clear_fn dump_clear;
+	kbase_hwcnt_backend_dump_request_fn dump_request;
+	kbase_hwcnt_backend_dump_wait_fn dump_wait;
+	kbase_hwcnt_backend_dump_get_fn dump_get;
+};
+
+#endif /* _KBASE_HWCNT_BACKEND_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend_gpu.c b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend_gpu.c
new file mode 100644
index 0000000..1e9c25a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend_gpu.c
@@ -0,0 +1,511 @@
+/*
+ *
+ * (C) COPYRIGHT 2018-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_hwcnt_backend_gpu.h"
+#include "mali_kbase_hwcnt_gpu.h"
+#include "mali_kbase_hwcnt_types.h"
+#include "mali_kbase.h"
+#include "mali_kbase_pm_ca.h"
+#include "mali_kbase_hwaccess_instr.h"
+#ifdef CONFIG_MALI_NO_MALI
+#include "backend/gpu/mali_kbase_model_dummy.h"
+#endif
+
+/**
+ * struct kbase_hwcnt_backend_gpu_info - Information used to create an instance
+ *                                       of a GPU hardware counter backend.
+ * @kbdev:         KBase device.
+ * @use_secondary: True if secondary performance counters should be used,
+ *                 else false. Ignored if secondary counters are not supported.
+ * @metadata:      Hardware counter metadata.
+ * @dump_bytes:    Bytes of GPU memory required to perform a
+ *                 hardware counter dump.
+ */
+struct kbase_hwcnt_backend_gpu_info {
+	struct kbase_device *kbdev;
+	bool use_secondary;
+	const struct kbase_hwcnt_metadata *metadata;
+	size_t dump_bytes;
+};
+
+/**
+ * struct kbase_hwcnt_backend_gpu - Instance of a GPU hardware counter backend.
+ * @info:         Info used to create the backend.
+ * @kctx:         KBase context used for GPU memory allocation and
+ *                counter dumping.
+ * @gpu_dump_va:  GPU hardware counter dump buffer virtual address.
+ * @cpu_dump_va:  CPU mapping of gpu_dump_va.
+ * @vmap:         Dump buffer vmap.
+ * @enabled:      True if dumping has been enabled, else false.
+ * @pm_core_mask:  PM state sync-ed shaders core mask for the enabled dumping.
+ */
+struct kbase_hwcnt_backend_gpu {
+	const struct kbase_hwcnt_backend_gpu_info *info;
+	struct kbase_context *kctx;
+	u64 gpu_dump_va;
+	void *cpu_dump_va;
+	struct kbase_vmap_struct *vmap;
+	bool enabled;
+	u64 pm_core_mask;
+};
+
+/* GPU backend implementation of kbase_hwcnt_backend_timestamp_ns_fn */
+static u64 kbasep_hwcnt_backend_gpu_timestamp_ns(
+	struct kbase_hwcnt_backend *backend)
+{
+	struct timespec ts;
+
+	(void)backend;
+	getrawmonotonic(&ts);
+	return (u64)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
+}
+
+/* GPU backend implementation of kbase_hwcnt_backend_dump_enable_nolock_fn */
+static int kbasep_hwcnt_backend_gpu_dump_enable_nolock(
+	struct kbase_hwcnt_backend *backend,
+	const struct kbase_hwcnt_enable_map *enable_map)
+{
+	int errcode;
+	struct kbase_hwcnt_backend_gpu *backend_gpu =
+		(struct kbase_hwcnt_backend_gpu *)backend;
+	struct kbase_context *kctx;
+	struct kbase_device *kbdev;
+	struct kbase_hwcnt_physical_enable_map phys;
+	struct kbase_instr_hwcnt_enable enable;
+
+	if (!backend_gpu || !enable_map || backend_gpu->enabled ||
+	    (enable_map->metadata != backend_gpu->info->metadata))
+		return -EINVAL;
+
+	kctx = backend_gpu->kctx;
+	kbdev = backend_gpu->kctx->kbdev;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	kbase_hwcnt_gpu_enable_map_to_physical(&phys, enable_map);
+
+	enable.jm_bm = phys.jm_bm;
+	enable.shader_bm = phys.shader_bm;
+	enable.tiler_bm = phys.tiler_bm;
+	enable.mmu_l2_bm = phys.mmu_l2_bm;
+	enable.use_secondary = backend_gpu->info->use_secondary;
+	enable.dump_buffer = backend_gpu->gpu_dump_va;
+	enable.dump_buffer_bytes = backend_gpu->info->dump_bytes;
+
+	errcode = kbase_instr_hwcnt_enable_internal(kbdev, kctx, &enable);
+	if (errcode)
+		goto error;
+
+	backend_gpu->pm_core_mask = kbase_pm_ca_get_instr_core_mask(kbdev);
+	backend_gpu->enabled = true;
+
+	return 0;
+error:
+	return errcode;
+}
+
+/* GPU backend implementation of kbase_hwcnt_backend_dump_enable_fn */
+static int kbasep_hwcnt_backend_gpu_dump_enable(
+	struct kbase_hwcnt_backend *backend,
+	const struct kbase_hwcnt_enable_map *enable_map)
+{
+	unsigned long flags;
+	int errcode;
+	struct kbase_hwcnt_backend_gpu *backend_gpu =
+		(struct kbase_hwcnt_backend_gpu *)backend;
+	struct kbase_device *kbdev;
+
+	if (!backend_gpu)
+		return -EINVAL;
+
+	kbdev = backend_gpu->kctx->kbdev;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	errcode = kbasep_hwcnt_backend_gpu_dump_enable_nolock(
+		backend, enable_map);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return errcode;
+}
+
+/* GPU backend implementation of kbase_hwcnt_backend_dump_disable_fn */
+static void kbasep_hwcnt_backend_gpu_dump_disable(
+	struct kbase_hwcnt_backend *backend)
+{
+	int errcode;
+	struct kbase_hwcnt_backend_gpu *backend_gpu =
+		(struct kbase_hwcnt_backend_gpu *)backend;
+
+	if (WARN_ON(!backend_gpu) || !backend_gpu->enabled)
+		return;
+
+	errcode = kbase_instr_hwcnt_disable_internal(backend_gpu->kctx);
+	WARN_ON(errcode);
+
+	backend_gpu->enabled = false;
+}
+
+/* GPU backend implementation of kbase_hwcnt_backend_dump_clear_fn */
+static int kbasep_hwcnt_backend_gpu_dump_clear(
+	struct kbase_hwcnt_backend *backend)
+{
+	struct kbase_hwcnt_backend_gpu *backend_gpu =
+		(struct kbase_hwcnt_backend_gpu *)backend;
+
+	if (!backend_gpu || !backend_gpu->enabled)
+		return -EINVAL;
+
+	return kbase_instr_hwcnt_clear(backend_gpu->kctx);
+}
+
+/* GPU backend implementation of kbase_hwcnt_backend_dump_request_fn */
+static int kbasep_hwcnt_backend_gpu_dump_request(
+	struct kbase_hwcnt_backend *backend)
+{
+	struct kbase_hwcnt_backend_gpu *backend_gpu =
+		(struct kbase_hwcnt_backend_gpu *)backend;
+
+	if (!backend_gpu || !backend_gpu->enabled)
+		return -EINVAL;
+
+	return kbase_instr_hwcnt_request_dump(backend_gpu->kctx);
+}
+
+/* GPU backend implementation of kbase_hwcnt_backend_dump_wait_fn */
+static int kbasep_hwcnt_backend_gpu_dump_wait(
+	struct kbase_hwcnt_backend *backend)
+{
+	struct kbase_hwcnt_backend_gpu *backend_gpu =
+		(struct kbase_hwcnt_backend_gpu *)backend;
+
+	if (!backend_gpu || !backend_gpu->enabled)
+		return -EINVAL;
+
+	return kbase_instr_hwcnt_wait_for_dump(backend_gpu->kctx);
+}
+
+/* GPU backend implementation of kbase_hwcnt_backend_dump_get_fn */
+static int kbasep_hwcnt_backend_gpu_dump_get(
+	struct kbase_hwcnt_backend *backend,
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_enable_map *dst_enable_map,
+	bool accumulate)
+{
+	struct kbase_hwcnt_backend_gpu *backend_gpu =
+		(struct kbase_hwcnt_backend_gpu *)backend;
+
+	if (!backend_gpu || !dst || !dst_enable_map ||
+	    (backend_gpu->info->metadata != dst->metadata) ||
+	    (dst_enable_map->metadata != dst->metadata))
+		return -EINVAL;
+
+	/* Invalidate the kernel buffer before reading from it. */
+	kbase_sync_mem_regions(
+		backend_gpu->kctx, backend_gpu->vmap, KBASE_SYNC_TO_CPU);
+
+	return kbase_hwcnt_gpu_dump_get(
+		dst, backend_gpu->cpu_dump_va, dst_enable_map,
+		backend_gpu->pm_core_mask, accumulate);
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_dump_alloc() - Allocate a GPU dump buffer.
+ * @info:        Non-NULL pointer to GPU backend info.
+ * @kctx:        Non-NULL pointer to kbase context.
+ * @gpu_dump_va: Non-NULL pointer to where GPU dump buffer virtual address
+ *               is stored on success.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_hwcnt_backend_gpu_dump_alloc(
+	const struct kbase_hwcnt_backend_gpu_info *info,
+	struct kbase_context *kctx,
+	u64 *gpu_dump_va)
+{
+	struct kbase_va_region *reg;
+	u64 flags;
+	u64 nr_pages;
+
+	WARN_ON(!info);
+	WARN_ON(!kctx);
+	WARN_ON(!gpu_dump_va);
+
+	flags = BASE_MEM_PROT_CPU_RD |
+		BASE_MEM_PROT_GPU_WR |
+		BASEP_MEM_PERMANENT_KERNEL_MAPPING |
+		BASE_MEM_CACHED_CPU;
+
+	if (kctx->kbdev->mmu_mode->flags & KBASE_MMU_MODE_HAS_NON_CACHEABLE)
+		flags |= BASE_MEM_UNCACHED_GPU;
+
+	nr_pages = PFN_UP(info->dump_bytes);
+
+	reg = kbase_mem_alloc(kctx, nr_pages, nr_pages, 0, &flags, gpu_dump_va);
+
+	if (!reg)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_dump_free() - Free an allocated GPU dump buffer.
+ * @kctx:        Non-NULL pointer to kbase context.
+ * @gpu_dump_va: GPU dump buffer virtual address.
+ */
+static void kbasep_hwcnt_backend_gpu_dump_free(
+	struct kbase_context *kctx,
+	u64 gpu_dump_va)
+{
+	WARN_ON(!kctx);
+	if (gpu_dump_va)
+		kbase_mem_free(kctx, gpu_dump_va);
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_destroy() - Destroy a GPU backend.
+ * @backend: Pointer to GPU backend to destroy.
+ *
+ * Can be safely called on a backend in any state of partial construction.
+ */
+static void kbasep_hwcnt_backend_gpu_destroy(
+	struct kbase_hwcnt_backend_gpu *backend)
+{
+	if (!backend)
+		return;
+
+	if (backend->kctx) {
+		struct kbase_context *kctx = backend->kctx;
+		struct kbase_device *kbdev = kctx->kbdev;
+
+		if (backend->cpu_dump_va)
+			kbase_phy_alloc_mapping_put(kctx, backend->vmap);
+
+		if (backend->gpu_dump_va)
+			kbasep_hwcnt_backend_gpu_dump_free(
+				kctx, backend->gpu_dump_va);
+
+		kbasep_js_release_privileged_ctx(kbdev, kctx);
+		kbase_destroy_context(kctx);
+	}
+
+	kfree(backend);
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_create() - Create a GPU backend.
+ * @info:        Non-NULL pointer to backend info.
+ * @out_backend: Non-NULL pointer to where backend is stored on success.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_hwcnt_backend_gpu_create(
+	const struct kbase_hwcnt_backend_gpu_info *info,
+	struct kbase_hwcnt_backend_gpu **out_backend)
+{
+	int errcode;
+	struct kbase_device *kbdev;
+	struct kbase_hwcnt_backend_gpu *backend = NULL;
+
+	WARN_ON(!info);
+	WARN_ON(!out_backend);
+
+	kbdev = info->kbdev;
+
+	backend = kzalloc(sizeof(*backend), GFP_KERNEL);
+	if (!backend)
+		goto alloc_error;
+
+	backend->info = info;
+
+	backend->kctx = kbase_create_context(kbdev, true,
+		BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED, 0, NULL);
+	if (!backend->kctx)
+		goto alloc_error;
+
+	kbasep_js_schedule_privileged_ctx(kbdev, backend->kctx);
+
+	errcode = kbasep_hwcnt_backend_gpu_dump_alloc(
+		info, backend->kctx, &backend->gpu_dump_va);
+	if (errcode)
+		goto error;
+
+	backend->cpu_dump_va = kbase_phy_alloc_mapping_get(backend->kctx,
+		backend->gpu_dump_va, &backend->vmap);
+	if (!backend->cpu_dump_va)
+		goto alloc_error;
+
+#ifdef CONFIG_MALI_NO_MALI
+	/* The dummy model needs the CPU mapping. */
+	gpu_model_set_dummy_prfcnt_base_cpu(backend->cpu_dump_va);
+#endif
+
+	*out_backend = backend;
+	return 0;
+
+alloc_error:
+	errcode = -ENOMEM;
+error:
+	kbasep_hwcnt_backend_gpu_destroy(backend);
+	return errcode;
+}
+
+/* GPU backend implementation of kbase_hwcnt_backend_init_fn */
+static int kbasep_hwcnt_backend_gpu_init(
+	const struct kbase_hwcnt_backend_info *info,
+	struct kbase_hwcnt_backend **out_backend)
+{
+	int errcode;
+	struct kbase_hwcnt_backend_gpu *backend = NULL;
+
+	if (!info || !out_backend)
+		return -EINVAL;
+
+	errcode = kbasep_hwcnt_backend_gpu_create(
+		(const struct kbase_hwcnt_backend_gpu_info *) info, &backend);
+	if (errcode)
+		return errcode;
+
+	*out_backend = (struct kbase_hwcnt_backend *)backend;
+
+	return 0;
+}
+
+/* GPU backend implementation of kbase_hwcnt_backend_term_fn */
+static void kbasep_hwcnt_backend_gpu_term(struct kbase_hwcnt_backend *backend)
+{
+	if (!backend)
+		return;
+
+	kbasep_hwcnt_backend_gpu_dump_disable(backend);
+	kbasep_hwcnt_backend_gpu_destroy(
+		(struct kbase_hwcnt_backend_gpu *)backend);
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_info_destroy() - Destroy a GPU backend info.
+ * @info: Pointer to info to destroy.
+ *
+ * Can be safely called on a backend info in any state of partial construction.
+ */
+static void kbasep_hwcnt_backend_gpu_info_destroy(
+	const struct kbase_hwcnt_backend_gpu_info *info)
+{
+	if (!info)
+		return;
+
+	kbase_hwcnt_gpu_metadata_destroy(info->metadata);
+	kfree(info);
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_info_create() - Create a GPU backend info.
+ * @kbdev: Non_NULL pointer to kbase device.
+ * @out_info: Non-NULL pointer to where info is stored on success.
+ *
+ * Return 0 on success, else error code.
+ */
+static int kbasep_hwcnt_backend_gpu_info_create(
+	struct kbase_device *kbdev,
+	const struct kbase_hwcnt_backend_gpu_info **out_info)
+{
+	int errcode = -ENOMEM;
+	struct kbase_hwcnt_gpu_info hwcnt_gpu_info;
+	struct kbase_hwcnt_backend_gpu_info *info = NULL;
+
+	WARN_ON(!kbdev);
+	WARN_ON(!out_info);
+
+	errcode = kbase_hwcnt_gpu_info_init(kbdev, &hwcnt_gpu_info);
+	if (errcode)
+		return errcode;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		goto error;
+
+	info->kbdev = kbdev;
+
+#ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY
+	info->use_secondary = true;
+#else
+	info->use_secondary = false;
+#endif
+
+	errcode = kbase_hwcnt_gpu_metadata_create(
+		&hwcnt_gpu_info, info->use_secondary,
+		&info->metadata,
+		&info->dump_bytes);
+	if (errcode)
+		goto error;
+
+	*out_info = info;
+
+	return 0;
+error:
+	kbasep_hwcnt_backend_gpu_info_destroy(info);
+	return errcode;
+}
+
+int kbase_hwcnt_backend_gpu_create(
+	struct kbase_device *kbdev,
+	struct kbase_hwcnt_backend_interface *iface)
+{
+	int errcode;
+	const struct kbase_hwcnt_backend_gpu_info *info = NULL;
+
+	if (!kbdev || !iface)
+		return -EINVAL;
+
+	errcode = kbasep_hwcnt_backend_gpu_info_create(kbdev, &info);
+
+	if (errcode)
+		return errcode;
+
+	iface->metadata = info->metadata;
+	iface->info = (struct kbase_hwcnt_backend_info *)info;
+	iface->init = kbasep_hwcnt_backend_gpu_init;
+	iface->term = kbasep_hwcnt_backend_gpu_term;
+	iface->timestamp_ns = kbasep_hwcnt_backend_gpu_timestamp_ns;
+	iface->dump_enable = kbasep_hwcnt_backend_gpu_dump_enable;
+	iface->dump_enable_nolock = kbasep_hwcnt_backend_gpu_dump_enable_nolock;
+	iface->dump_disable = kbasep_hwcnt_backend_gpu_dump_disable;
+	iface->dump_clear = kbasep_hwcnt_backend_gpu_dump_clear;
+	iface->dump_request = kbasep_hwcnt_backend_gpu_dump_request;
+	iface->dump_wait = kbasep_hwcnt_backend_gpu_dump_wait;
+	iface->dump_get = kbasep_hwcnt_backend_gpu_dump_get;
+
+	return 0;
+}
+
+void kbase_hwcnt_backend_gpu_destroy(
+	struct kbase_hwcnt_backend_interface *iface)
+{
+	if (!iface)
+		return;
+
+	kbasep_hwcnt_backend_gpu_info_destroy(
+		(const struct kbase_hwcnt_backend_gpu_info *)iface->info);
+	memset(iface, 0, sizeof(*iface));
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend_gpu.h b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend_gpu.h
new file mode 100644
index 0000000..7712f14
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend_gpu.h
@@ -0,0 +1,61 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Concrete implementation of mali_kbase_hwcnt_backend interface for GPU
+ * backend.
+ */
+
+#ifndef _KBASE_HWCNT_BACKEND_GPU_H_
+#define _KBASE_HWCNT_BACKEND_GPU_H_
+
+#include "mali_kbase_hwcnt_backend.h"
+
+struct kbase_device;
+
+/**
+ * kbase_hwcnt_backend_gpu_create() - Create a GPU hardware counter backend
+ *                                    interface.
+ * @kbdev: Non-NULL pointer to kbase device.
+ * @iface: Non-NULL pointer to backend interface structure that is filled in
+ *             on creation success.
+ *
+ * Calls to iface->dump_enable_nolock() require kbdev->hwaccess_lock held.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_backend_gpu_create(
+	struct kbase_device *kbdev,
+	struct kbase_hwcnt_backend_interface *iface);
+
+/**
+ * kbase_hwcnt_backend_gpu_destroy() - Destroy a GPU hardware counter backend
+ *                                     interface.
+ * @iface: Pointer to interface to destroy.
+ *
+ * Can be safely called on an all-zeroed interface, or on an already destroyed
+ * interface.
+ */
+void kbase_hwcnt_backend_gpu_destroy(
+	struct kbase_hwcnt_backend_interface *iface);
+
+#endif /* _KBASE_HWCNT_BACKEND_GPU_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_context.h b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_context.h
new file mode 100644
index 0000000..bc50ad1
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_context.h
@@ -0,0 +1,119 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Hardware counter context API.
+ */
+
+#ifndef _KBASE_HWCNT_CONTEXT_H_
+#define _KBASE_HWCNT_CONTEXT_H_
+
+#include <linux/types.h>
+
+struct kbase_hwcnt_backend_interface;
+struct kbase_hwcnt_context;
+
+/**
+ * kbase_hwcnt_context_init() - Initialise a hardware counter context.
+ * @iface:    Non-NULL pointer to a hardware counter backend interface.
+ * @out_hctx: Non-NULL pointer to where the pointer to the created context will
+ *            be stored on success.
+ *
+ * On creation, the disable count of the context will be 0.
+ * A hardware counter accumulator can be acquired using a created context.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_context_init(
+	const struct kbase_hwcnt_backend_interface *iface,
+	struct kbase_hwcnt_context **out_hctx);
+
+/**
+ * kbase_hwcnt_context_term() - Terminate a hardware counter context.
+ * @hctx: Pointer to context to be terminated.
+ */
+void kbase_hwcnt_context_term(struct kbase_hwcnt_context *hctx);
+
+/**
+ * kbase_hwcnt_context_metadata() - Get the hardware counter metadata used by
+ *                                  the context, so related counter data
+ *                                  structures can be created.
+ * @hctx: Non-NULL pointer to the hardware counter context.
+ *
+ * Return: Non-NULL pointer to metadata, or NULL on error.
+ */
+const struct kbase_hwcnt_metadata *kbase_hwcnt_context_metadata(
+	struct kbase_hwcnt_context *hctx);
+
+/**
+ * kbase_hwcnt_context_disable() - Increment the disable count of the context.
+ * @hctx: Pointer to the hardware counter context.
+ *
+ * If a call to this function increments the disable count from 0 to 1, and
+ * an accumulator has been acquired, then a counter dump will be performed
+ * before counters are disabled via the backend interface.
+ *
+ * Subsequent dumps via the accumulator while counters are disabled will first
+ * return the accumulated dump, then will return dumps with zeroed counters.
+ *
+ * After this function call returns, it is guaranteed that counters will not be
+ * enabled via the backend interface.
+ */
+void kbase_hwcnt_context_disable(struct kbase_hwcnt_context *hctx);
+
+/**
+ * kbase_hwcnt_context_disable_atomic() - Increment the disable count of the
+ *                                        context if possible in an atomic
+ *                                        context.
+ * @hctx: Pointer to the hardware counter context.
+ *
+ * This function will only succeed if hardware counters are effectively already
+ * disabled, i.e. there is no accumulator, the disable count is already
+ * non-zero, or the accumulator has no counters set.
+ *
+ * After this function call returns true, it is guaranteed that counters will
+ * not be enabled via the backend interface.
+ *
+ * Return: True if the disable count was incremented, else False.
+ */
+bool kbase_hwcnt_context_disable_atomic(struct kbase_hwcnt_context *hctx);
+
+/**
+ * kbase_hwcnt_context_enable() - Decrement the disable count of the context.
+ * @hctx: Pointer to the hardware counter context.
+ *
+ * If a call to this function decrements the disable count from 1 to 0, and
+ * an accumulator has been acquired, then counters will be re-enabled via the
+ * backend interface.
+ *
+ * If an accumulator has been acquired and enabling counters fails for some
+ * reason, the accumulator will be placed into an error state.
+ *
+ * It is only valid to call this function one time for each prior returned call
+ * to kbase_hwcnt_context_disable.
+ *
+ * The spinlock documented in the backend interface that was passed in to
+ * kbase_hwcnt_context_init() must be held before calling this function.
+ */
+void kbase_hwcnt_context_enable(struct kbase_hwcnt_context *hctx);
+
+#endif /* _KBASE_HWCNT_CONTEXT_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_gpu.c b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_gpu.c
new file mode 100644
index 0000000..8581fe9
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_gpu.c
@@ -0,0 +1,777 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_hwcnt_gpu.h"
+#include "mali_kbase_hwcnt_types.h"
+#include "mali_kbase.h"
+#ifdef CONFIG_MALI_NO_MALI
+#include "backend/gpu/mali_kbase_model_dummy.h"
+#endif
+
+#define KBASE_HWCNT_V4_BLOCKS_PER_GROUP 8
+#define KBASE_HWCNT_V4_SC_BLOCKS_PER_GROUP 4
+#define KBASE_HWCNT_V4_MAX_GROUPS \
+	(KBASE_HWCNT_AVAIL_MASK_BITS / KBASE_HWCNT_V4_BLOCKS_PER_GROUP)
+#define KBASE_HWCNT_V4_HEADERS_PER_BLOCK 4
+#define KBASE_HWCNT_V4_COUNTERS_PER_BLOCK 60
+#define KBASE_HWCNT_V4_VALUES_PER_BLOCK \
+	(KBASE_HWCNT_V4_HEADERS_PER_BLOCK + KBASE_HWCNT_V4_COUNTERS_PER_BLOCK)
+/* Index of the PRFCNT_EN header into a V4 counter block */
+#define KBASE_HWCNT_V4_PRFCNT_EN_HEADER 2
+
+#define KBASE_HWCNT_V5_BLOCK_TYPE_COUNT 4
+#define KBASE_HWCNT_V5_HEADERS_PER_BLOCK 4
+#define KBASE_HWCNT_V5_COUNTERS_PER_BLOCK 60
+#define KBASE_HWCNT_V5_VALUES_PER_BLOCK \
+	(KBASE_HWCNT_V5_HEADERS_PER_BLOCK + KBASE_HWCNT_V5_COUNTERS_PER_BLOCK)
+/* Index of the PRFCNT_EN header into a V5 counter block */
+#define KBASE_HWCNT_V5_PRFCNT_EN_HEADER 2
+
+/**
+ * kbasep_hwcnt_backend_gpu_metadata_v4_create() - Create hardware counter
+ *                                                 metadata for a v4 GPU.
+ * @v4_info:  Non-NULL pointer to hwcnt info for a v4 GPU.
+ * @metadata: Non-NULL pointer to where created metadata is stored on success.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_hwcnt_backend_gpu_metadata_v4_create(
+	const struct kbase_hwcnt_gpu_v4_info *v4_info,
+	const struct kbase_hwcnt_metadata **metadata)
+{
+	size_t grp;
+	int errcode = -ENOMEM;
+	struct kbase_hwcnt_description desc;
+	struct kbase_hwcnt_group_description *grps;
+	size_t avail_mask_bit;
+
+	WARN_ON(!v4_info);
+	WARN_ON(!metadata);
+
+	/* Check if there are enough bits in the availability mask to represent
+	 * all the hardware counter blocks in the system.
+	 */
+	if (v4_info->cg_count > KBASE_HWCNT_V4_MAX_GROUPS)
+		return -EINVAL;
+
+	grps = kcalloc(v4_info->cg_count, sizeof(*grps), GFP_KERNEL);
+	if (!grps)
+		goto clean_up;
+
+	desc.grp_cnt = v4_info->cg_count;
+	desc.grps = grps;
+
+	for (grp = 0; grp < v4_info->cg_count; grp++) {
+		size_t blk;
+		size_t sc;
+		const u64 core_mask = v4_info->cgs[grp].core_mask;
+		struct kbase_hwcnt_block_description *blks = kcalloc(
+			KBASE_HWCNT_V4_BLOCKS_PER_GROUP,
+			sizeof(*blks),
+			GFP_KERNEL);
+
+		if (!blks)
+			goto clean_up;
+
+		grps[grp].type = KBASE_HWCNT_GPU_GROUP_TYPE_V4;
+		grps[grp].blk_cnt = KBASE_HWCNT_V4_BLOCKS_PER_GROUP;
+		grps[grp].blks = blks;
+
+		for (blk = 0; blk < KBASE_HWCNT_V4_BLOCKS_PER_GROUP; blk++) {
+			blks[blk].inst_cnt = 1;
+			blks[blk].hdr_cnt =
+				KBASE_HWCNT_V4_HEADERS_PER_BLOCK;
+			blks[blk].ctr_cnt =
+				KBASE_HWCNT_V4_COUNTERS_PER_BLOCK;
+		}
+
+		for (sc = 0; sc < KBASE_HWCNT_V4_SC_BLOCKS_PER_GROUP; sc++) {
+			blks[sc].type = core_mask & (1ull << sc) ?
+				KBASE_HWCNT_GPU_V4_BLOCK_TYPE_SHADER :
+				KBASE_HWCNT_GPU_V4_BLOCK_TYPE_RESERVED;
+		}
+
+		blks[4].type = KBASE_HWCNT_GPU_V4_BLOCK_TYPE_TILER;
+		blks[5].type = KBASE_HWCNT_GPU_V4_BLOCK_TYPE_MMU_L2;
+		blks[6].type = KBASE_HWCNT_GPU_V4_BLOCK_TYPE_RESERVED;
+		blks[7].type = (grp == 0) ?
+			KBASE_HWCNT_GPU_V4_BLOCK_TYPE_JM :
+			KBASE_HWCNT_GPU_V4_BLOCK_TYPE_RESERVED;
+
+		WARN_ON(KBASE_HWCNT_V4_BLOCKS_PER_GROUP != 8);
+	}
+
+	/* Initialise the availability mask */
+	desc.avail_mask = 0;
+	avail_mask_bit = 0;
+
+	for (grp = 0; grp < desc.grp_cnt; grp++) {
+		size_t blk;
+		const struct kbase_hwcnt_block_description *blks =
+			desc.grps[grp].blks;
+		for (blk = 0; blk < desc.grps[grp].blk_cnt; blk++) {
+			WARN_ON(blks[blk].inst_cnt != 1);
+			if (blks[blk].type !=
+			    KBASE_HWCNT_GPU_V4_BLOCK_TYPE_RESERVED)
+				desc.avail_mask |= (1ull << avail_mask_bit);
+
+			avail_mask_bit++;
+		}
+	}
+
+	errcode = kbase_hwcnt_metadata_create(&desc, metadata);
+
+	/* Always clean up, as metadata will make a copy of the input args */
+clean_up:
+	if (grps) {
+		for (grp = 0; grp < v4_info->cg_count; grp++)
+			kfree(grps[grp].blks);
+		kfree(grps);
+	}
+	return errcode;
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_v4_dump_bytes() - Get the raw dump buffer size for a
+ *                                            V4 GPU.
+ * @v4_info: Non-NULL pointer to hwcnt info for a v4 GPU.
+ *
+ * Return: Size of buffer the V4 GPU needs to perform a counter dump.
+ */
+static size_t kbasep_hwcnt_backend_gpu_v4_dump_bytes(
+	const struct kbase_hwcnt_gpu_v4_info *v4_info)
+{
+	return v4_info->cg_count *
+		KBASE_HWCNT_V4_BLOCKS_PER_GROUP *
+		KBASE_HWCNT_V4_VALUES_PER_BLOCK *
+		KBASE_HWCNT_VALUE_BYTES;
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_metadata_v5_create() - Create hardware counter
+ *                                                 metadata for a v5 GPU.
+ * @v5_info:       Non-NULL pointer to hwcnt info for a v5 GPU.
+ * @use_secondary: True if secondary performance counters should be used, else
+ *                 false. Ignored if secondary counters are not supported.
+ * @metadata:      Non-NULL pointer to where created metadata is stored
+ *                 on success.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_hwcnt_backend_gpu_metadata_v5_create(
+	const struct kbase_hwcnt_gpu_v5_info *v5_info,
+	bool use_secondary,
+	const struct kbase_hwcnt_metadata **metadata)
+{
+	struct kbase_hwcnt_description desc;
+	struct kbase_hwcnt_group_description group;
+	struct kbase_hwcnt_block_description
+		blks[KBASE_HWCNT_V5_BLOCK_TYPE_COUNT];
+	size_t non_sc_block_count;
+	size_t sc_block_count;
+
+	WARN_ON(!v5_info);
+	WARN_ON(!metadata);
+
+	/* Calculate number of block instances that aren't shader cores */
+	non_sc_block_count = 2 + v5_info->l2_count;
+	/* Calculate number of block instances that are shader cores */
+	sc_block_count = fls64(v5_info->core_mask);
+
+	/*
+	 * A system can have up to 64 shader cores, but the 64-bit
+	 * availability mask can't physically represent that many cores as well
+	 * as the other hardware blocks.
+	 * Error out if there are more blocks than our implementation can
+	 * support.
+	 */
+	if ((sc_block_count + non_sc_block_count) > KBASE_HWCNT_AVAIL_MASK_BITS)
+		return -EINVAL;
+
+	/* One Job Manager block */
+	blks[0].type = KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_JM;
+	blks[0].inst_cnt = 1;
+	blks[0].hdr_cnt = KBASE_HWCNT_V5_HEADERS_PER_BLOCK;
+	blks[0].ctr_cnt = KBASE_HWCNT_V5_COUNTERS_PER_BLOCK;
+
+	/* One Tiler block */
+	blks[1].type = KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_TILER;
+	blks[1].inst_cnt = 1;
+	blks[1].hdr_cnt = KBASE_HWCNT_V5_HEADERS_PER_BLOCK;
+	blks[1].ctr_cnt = KBASE_HWCNT_V5_COUNTERS_PER_BLOCK;
+
+	/* l2_count memsys blks */
+	blks[2].type = use_secondary ?
+		KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS2 :
+		KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS;
+	blks[2].inst_cnt = v5_info->l2_count;
+	blks[2].hdr_cnt = KBASE_HWCNT_V5_HEADERS_PER_BLOCK;
+	blks[2].ctr_cnt = KBASE_HWCNT_V5_COUNTERS_PER_BLOCK;
+
+	/*
+	 * There are as many shader cores in the system as there are bits set in
+	 * the core mask. However, the dump buffer memory requirements need to
+	 * take into account the fact that the core mask may be non-contiguous.
+	 *
+	 * For example, a system with a core mask of 0b1011 has the same dump
+	 * buffer memory requirements as a system with 0b1111, but requires more
+	 * memory than a system with 0b0111. However, core 2 of the system with
+	 * 0b1011 doesn't physically exist, and the dump buffer memory that
+	 * accounts for that core will never be written to when we do a counter
+	 * dump.
+	 *
+	 * We find the core mask's last set bit to determine the memory
+	 * requirements, and embed the core mask into the availability mask so
+	 * we can determine later which shader cores physically exist.
+	 */
+	blks[3].type = use_secondary ?
+		KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC2 :
+		KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC;
+	blks[3].inst_cnt = sc_block_count;
+	blks[3].hdr_cnt = KBASE_HWCNT_V5_HEADERS_PER_BLOCK;
+	blks[3].ctr_cnt = KBASE_HWCNT_V5_COUNTERS_PER_BLOCK;
+
+	WARN_ON(KBASE_HWCNT_V5_BLOCK_TYPE_COUNT != 4);
+
+	group.type = KBASE_HWCNT_GPU_GROUP_TYPE_V5;
+	group.blk_cnt = KBASE_HWCNT_V5_BLOCK_TYPE_COUNT;
+	group.blks = blks;
+
+	desc.grp_cnt = 1;
+	desc.grps = &group;
+
+	/* The JM, Tiler, and L2s are always available, and are before cores */
+	desc.avail_mask = (1ull << non_sc_block_count) - 1;
+	/* Embed the core mask directly in the availability mask */
+	desc.avail_mask |= (v5_info->core_mask << non_sc_block_count);
+
+	return kbase_hwcnt_metadata_create(&desc, metadata);
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_v5_dump_bytes() - Get the raw dump buffer size for a
+ *                                            V5 GPU.
+ * @v5_info: Non-NULL pointer to hwcnt info for a v5 GPU.
+ *
+ * Return: Size of buffer the V5 GPU needs to perform a counter dump.
+ */
+static size_t kbasep_hwcnt_backend_gpu_v5_dump_bytes(
+	const struct kbase_hwcnt_gpu_v5_info *v5_info)
+{
+	WARN_ON(!v5_info);
+	return (2 + v5_info->l2_count + fls64(v5_info->core_mask)) *
+		KBASE_HWCNT_V5_VALUES_PER_BLOCK *
+		KBASE_HWCNT_VALUE_BYTES;
+}
+
+int kbase_hwcnt_gpu_info_init(
+	struct kbase_device *kbdev,
+	struct kbase_hwcnt_gpu_info *info)
+{
+	if (!kbdev || !info)
+		return -EINVAL;
+
+#ifdef CONFIG_MALI_NO_MALI
+	/* NO_MALI uses V5 layout, regardless of the underlying platform. */
+	info->type = KBASE_HWCNT_GPU_GROUP_TYPE_V5;
+	info->v5.l2_count = KBASE_DUMMY_MODEL_MAX_MEMSYS_BLOCKS;
+	info->v5.core_mask = (1ull << KBASE_DUMMY_MODEL_MAX_SHADER_CORES) - 1;
+#else
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_V4)) {
+		info->type = KBASE_HWCNT_GPU_GROUP_TYPE_V4;
+		info->v4.cg_count = kbdev->gpu_props.num_core_groups;
+		info->v4.cgs = kbdev->gpu_props.props.coherency_info.group;
+	} else {
+		const struct base_gpu_props *props = &kbdev->gpu_props.props;
+		const size_t l2_count = props->l2_props.num_l2_slices;
+		const size_t core_mask =
+			props->coherency_info.group[0].core_mask;
+
+		info->type = KBASE_HWCNT_GPU_GROUP_TYPE_V5;
+		info->v5.l2_count = l2_count;
+		info->v5.core_mask = core_mask;
+	}
+#endif
+	return 0;
+}
+
+int kbase_hwcnt_gpu_metadata_create(
+	const struct kbase_hwcnt_gpu_info *info,
+	bool use_secondary,
+	const struct kbase_hwcnt_metadata **out_metadata,
+	size_t *out_dump_bytes)
+{
+	int errcode;
+	const struct kbase_hwcnt_metadata *metadata;
+	size_t dump_bytes;
+
+	if (!info || !out_metadata || !out_dump_bytes)
+		return -EINVAL;
+
+	switch (info->type) {
+	case KBASE_HWCNT_GPU_GROUP_TYPE_V4:
+		dump_bytes = kbasep_hwcnt_backend_gpu_v4_dump_bytes(&info->v4);
+		errcode = kbasep_hwcnt_backend_gpu_metadata_v4_create(
+			&info->v4, &metadata);
+		break;
+	case KBASE_HWCNT_GPU_GROUP_TYPE_V5:
+		dump_bytes = kbasep_hwcnt_backend_gpu_v5_dump_bytes(&info->v5);
+		errcode = kbasep_hwcnt_backend_gpu_metadata_v5_create(
+			&info->v5, use_secondary, &metadata);
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (errcode)
+		return errcode;
+
+	/*
+	 * Dump abstraction size should be exactly the same size and layout as
+	 * the physical dump size, for backwards compatibility.
+	 */
+	WARN_ON(dump_bytes != metadata->dump_buf_bytes);
+
+	*out_metadata = metadata;
+	*out_dump_bytes = dump_bytes;
+
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_gpu_metadata_create);
+
+void kbase_hwcnt_gpu_metadata_destroy(
+	const struct kbase_hwcnt_metadata *metadata)
+{
+	if (!metadata)
+		return;
+
+	kbase_hwcnt_metadata_destroy(metadata);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_gpu_metadata_destroy);
+
+static bool is_block_type_shader(
+	const u64 grp_type,
+	const u64 blk_type,
+	const size_t blk)
+{
+	bool is_shader = false;
+
+	switch (grp_type) {
+	case KBASE_HWCNT_GPU_GROUP_TYPE_V4:
+		/* blk-value in [0, KBASE_HWCNT_V4_SC_BLOCKS_PER_GROUP-1]
+		 * corresponds to a shader, or its implementation
+		 * reserved. As such, here we use the blk index value to
+		 * tell the reserved case.
+		 */
+		if (blk_type == KBASE_HWCNT_GPU_V4_BLOCK_TYPE_SHADER ||
+		    (blk < KBASE_HWCNT_V4_SC_BLOCKS_PER_GROUP &&
+		     blk_type == KBASE_HWCNT_GPU_V4_BLOCK_TYPE_RESERVED))
+			is_shader = true;
+		break;
+	case KBASE_HWCNT_GPU_GROUP_TYPE_V5:
+		if (blk_type == KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC ||
+		    blk_type == KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC2)
+			is_shader = true;
+		break;
+	default:
+		/* Warn on unknown group type */
+		WARN_ON(true);
+	}
+
+	return is_shader;
+}
+
+int kbase_hwcnt_gpu_dump_get(
+	struct kbase_hwcnt_dump_buffer *dst,
+	void *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map,
+	u64 pm_core_mask,
+	bool accumulate)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+	const u32 *dump_src;
+	size_t src_offset, grp, blk, blk_inst;
+	size_t grp_prev = 0;
+	u64 core_mask = pm_core_mask;
+
+	if (!dst || !src || !dst_enable_map ||
+	    (dst_enable_map->metadata != dst->metadata))
+		return -EINVAL;
+
+	metadata = dst->metadata;
+	dump_src = (const u32 *)src;
+	src_offset = 0;
+
+	kbase_hwcnt_metadata_for_each_block(
+		metadata, grp, blk, blk_inst) {
+		const size_t hdr_cnt =
+			kbase_hwcnt_metadata_block_headers_count(
+				metadata, grp, blk);
+		const size_t ctr_cnt =
+			kbase_hwcnt_metadata_block_counters_count(
+				metadata, grp, blk);
+		const u64 blk_type = kbase_hwcnt_metadata_block_type(
+			metadata, grp, blk);
+		const bool is_shader_core = is_block_type_shader(
+			kbase_hwcnt_metadata_group_type(metadata, grp),
+			blk_type, blk);
+
+		if (grp != grp_prev) {
+			/* grp change would only happen with V4. V5 and
+			 * further are envisaged to be single group
+			 * scenario only. Here needs to drop the lower
+			 * group core-mask by shifting right with
+			 * KBASE_HWCNT_V4_SC_BLOCKS_PER_GROUP.
+			 */
+			core_mask = pm_core_mask >>
+				KBASE_HWCNT_V4_SC_BLOCKS_PER_GROUP;
+			grp_prev = grp;
+		}
+
+		/* Early out if no values in the dest block are enabled */
+		if (kbase_hwcnt_enable_map_block_enabled(
+			dst_enable_map, grp, blk, blk_inst)) {
+			u32 *dst_blk = kbase_hwcnt_dump_buffer_block_instance(
+				dst, grp, blk, blk_inst);
+			const u32 *src_blk = dump_src + src_offset;
+
+			if (!is_shader_core || (core_mask & 1)) {
+				if (accumulate) {
+					kbase_hwcnt_dump_buffer_block_accumulate(
+						dst_blk, src_blk, hdr_cnt,
+						ctr_cnt);
+				} else {
+					kbase_hwcnt_dump_buffer_block_copy(
+						dst_blk, src_blk,
+						(hdr_cnt + ctr_cnt));
+				}
+			} else if (!accumulate) {
+				kbase_hwcnt_dump_buffer_block_zero(
+					dst_blk, (hdr_cnt + ctr_cnt));
+			}
+		}
+
+		src_offset += (hdr_cnt + ctr_cnt);
+		if (is_shader_core)
+			core_mask = core_mask >> 1;
+	}
+
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_gpu_dump_get);
+
+/**
+ * kbasep_hwcnt_backend_gpu_block_map_to_physical() - Convert from a block
+ *                                                    enable map abstraction to
+ *                                                    a physical block enable
+ *                                                    map.
+ * @lo: Low 64 bits of block enable map abstraction.
+ * @hi: High 64 bits of block enable map abstraction.
+ *
+ * The abstraction uses 128 bits to enable 128 block values, whereas the
+ * physical uses just 32 bits, as bit n enables values [n*4, n*4+3].
+ * Therefore, this conversion is lossy.
+ *
+ * Return: 32-bit physical block enable map.
+ */
+static inline u32 kbasep_hwcnt_backend_gpu_block_map_to_physical(
+	u64 lo,
+	u64 hi)
+{
+	u32 phys = 0;
+	u64 dwords[2] = {lo, hi};
+	size_t dword_idx;
+
+	for (dword_idx = 0; dword_idx < 2; dword_idx++) {
+		const u64 dword = dwords[dword_idx];
+		u16 packed = 0;
+
+		size_t hword_bit;
+
+		for (hword_bit = 0; hword_bit < 16; hword_bit++) {
+			const size_t dword_bit = hword_bit * 4;
+			const u16 mask =
+				((dword >> (dword_bit + 0)) & 0x1) |
+				((dword >> (dword_bit + 1)) & 0x1) |
+				((dword >> (dword_bit + 2)) & 0x1) |
+				((dword >> (dword_bit + 3)) & 0x1);
+			packed |= (mask << hword_bit);
+		}
+		phys |= ((u32)packed) << (16 * dword_idx);
+	}
+	return phys;
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_block_map_from_physical() - Convert from a physical
+ *                                                      block enable map to a
+ *                                                      block enable map
+ *                                                      abstraction.
+ * @phys: Physical 32-bit block enable map
+ * @lo:   Non-NULL pointer to where low 64 bits of block enable map abstraction
+ *        will be stored.
+ * @hi:   Non-NULL pointer to where high 64 bits of block enable map abstraction
+ *        will be stored.
+ */
+static inline void kbasep_hwcnt_backend_gpu_block_map_from_physical(
+	u32 phys,
+	u64 *lo,
+	u64 *hi)
+{
+	u64 dwords[2] = {0, 0};
+
+	size_t dword_idx;
+
+	for (dword_idx = 0; dword_idx < 2; dword_idx++) {
+		const u16 packed = phys >> (16 * dword_idx);
+		u64 dword = 0;
+
+		size_t hword_bit;
+
+		for (hword_bit = 0; hword_bit < 16; hword_bit++) {
+			const size_t dword_bit = hword_bit * 4;
+			const u64 mask = (packed >> (hword_bit)) & 0x1;
+
+			dword |= mask << (dword_bit + 0);
+			dword |= mask << (dword_bit + 1);
+			dword |= mask << (dword_bit + 2);
+			dword |= mask << (dword_bit + 3);
+		}
+		dwords[dword_idx] = dword;
+	}
+	*lo = dwords[0];
+	*hi = dwords[1];
+}
+
+void kbase_hwcnt_gpu_enable_map_to_physical(
+	struct kbase_hwcnt_physical_enable_map *dst,
+	const struct kbase_hwcnt_enable_map *src)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+
+	u64 jm_bm = 0;
+	u64 shader_bm = 0;
+	u64 tiler_bm = 0;
+	u64 mmu_l2_bm = 0;
+
+	size_t grp, blk, blk_inst;
+
+	if (WARN_ON(!src) || WARN_ON(!dst))
+		return;
+
+	metadata = src->metadata;
+
+	kbase_hwcnt_metadata_for_each_block(
+		metadata, grp, blk, blk_inst) {
+		const u64 grp_type = kbase_hwcnt_metadata_group_type(
+			metadata, grp);
+		const u64 blk_type = kbase_hwcnt_metadata_block_type(
+			metadata, grp, blk);
+		const size_t blk_val_cnt =
+			kbase_hwcnt_metadata_block_values_count(
+				metadata, grp, blk);
+		const u64 *blk_map = kbase_hwcnt_enable_map_block_instance(
+			src, grp, blk, blk_inst);
+
+		switch ((enum kbase_hwcnt_gpu_group_type)grp_type) {
+		case KBASE_HWCNT_GPU_GROUP_TYPE_V4:
+			WARN_ON(blk_val_cnt != KBASE_HWCNT_V4_VALUES_PER_BLOCK);
+			switch ((enum kbase_hwcnt_gpu_v4_block_type)blk_type) {
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_SHADER:
+				shader_bm |= *blk_map;
+				break;
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_TILER:
+				tiler_bm |= *blk_map;
+				break;
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_MMU_L2:
+				mmu_l2_bm |= *blk_map;
+				break;
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_JM:
+				jm_bm |= *blk_map;
+				break;
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_RESERVED:
+				break;
+			default:
+				WARN_ON(true);
+			}
+			break;
+		case KBASE_HWCNT_GPU_GROUP_TYPE_V5:
+			WARN_ON(blk_val_cnt != KBASE_HWCNT_V5_VALUES_PER_BLOCK);
+			switch ((enum kbase_hwcnt_gpu_v5_block_type)blk_type) {
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_JM:
+				jm_bm |= *blk_map;
+				break;
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_TILER:
+				tiler_bm |= *blk_map;
+				break;
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC:
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC2:
+				shader_bm |= *blk_map;
+				break;
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS:
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS2:
+				mmu_l2_bm |= *blk_map;
+				break;
+			default:
+				WARN_ON(true);
+			}
+			break;
+		default:
+			WARN_ON(true);
+		}
+	}
+
+	dst->jm_bm =
+		kbasep_hwcnt_backend_gpu_block_map_to_physical(jm_bm, 0);
+	dst->shader_bm =
+		kbasep_hwcnt_backend_gpu_block_map_to_physical(shader_bm, 0);
+	dst->tiler_bm =
+		kbasep_hwcnt_backend_gpu_block_map_to_physical(tiler_bm, 0);
+	dst->mmu_l2_bm =
+		kbasep_hwcnt_backend_gpu_block_map_to_physical(mmu_l2_bm, 0);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_gpu_enable_map_to_physical);
+
+void kbase_hwcnt_gpu_enable_map_from_physical(
+	struct kbase_hwcnt_enable_map *dst,
+	const struct kbase_hwcnt_physical_enable_map *src)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+
+	u64 ignored_hi;
+	u64 jm_bm;
+	u64 shader_bm;
+	u64 tiler_bm;
+	u64 mmu_l2_bm;
+	size_t grp, blk, blk_inst;
+
+	if (WARN_ON(!src) || WARN_ON(!dst))
+		return;
+
+	metadata = dst->metadata;
+
+	kbasep_hwcnt_backend_gpu_block_map_from_physical(
+		src->jm_bm, &jm_bm, &ignored_hi);
+	kbasep_hwcnt_backend_gpu_block_map_from_physical(
+		src->shader_bm, &shader_bm, &ignored_hi);
+	kbasep_hwcnt_backend_gpu_block_map_from_physical(
+		src->tiler_bm, &tiler_bm, &ignored_hi);
+	kbasep_hwcnt_backend_gpu_block_map_from_physical(
+		src->mmu_l2_bm, &mmu_l2_bm, &ignored_hi);
+
+	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst) {
+		const u64 grp_type = kbase_hwcnt_metadata_group_type(
+			metadata, grp);
+		const u64 blk_type = kbase_hwcnt_metadata_block_type(
+			metadata, grp, blk);
+		const size_t blk_val_cnt =
+			kbase_hwcnt_metadata_block_values_count(
+				metadata, grp, blk);
+		u64 *blk_map = kbase_hwcnt_enable_map_block_instance(
+			dst, grp, blk, blk_inst);
+
+		switch ((enum kbase_hwcnt_gpu_group_type)grp_type) {
+		case KBASE_HWCNT_GPU_GROUP_TYPE_V4:
+			WARN_ON(blk_val_cnt != KBASE_HWCNT_V4_VALUES_PER_BLOCK);
+			switch ((enum kbase_hwcnt_gpu_v4_block_type)blk_type) {
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_SHADER:
+				*blk_map = shader_bm;
+				break;
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_TILER:
+				*blk_map = tiler_bm;
+				break;
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_MMU_L2:
+				*blk_map = mmu_l2_bm;
+				break;
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_JM:
+				*blk_map = jm_bm;
+				break;
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_RESERVED:
+				break;
+			default:
+				WARN_ON(true);
+			}
+			break;
+		case KBASE_HWCNT_GPU_GROUP_TYPE_V5:
+			WARN_ON(blk_val_cnt != KBASE_HWCNT_V5_VALUES_PER_BLOCK);
+			switch ((enum kbase_hwcnt_gpu_v5_block_type)blk_type) {
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_JM:
+				*blk_map = jm_bm;
+				break;
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_TILER:
+				*blk_map = tiler_bm;
+				break;
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC:
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC2:
+				*blk_map = shader_bm;
+				break;
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS:
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS2:
+				*blk_map = mmu_l2_bm;
+				break;
+			default:
+				WARN_ON(true);
+			}
+			break;
+		default:
+			WARN_ON(true);
+		}
+	}
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_gpu_enable_map_from_physical);
+
+void kbase_hwcnt_gpu_patch_dump_headers(
+	struct kbase_hwcnt_dump_buffer *buf,
+	const struct kbase_hwcnt_enable_map *enable_map)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+	size_t grp, blk, blk_inst;
+
+	if (WARN_ON(!buf) || WARN_ON(!enable_map) ||
+	    WARN_ON(buf->metadata != enable_map->metadata))
+		return;
+
+	metadata = buf->metadata;
+
+	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst) {
+		const u64 grp_type =
+			kbase_hwcnt_metadata_group_type(metadata, grp);
+		u32 *buf_blk = kbase_hwcnt_dump_buffer_block_instance(
+			buf, grp, blk, blk_inst);
+		const u64 *blk_map = kbase_hwcnt_enable_map_block_instance(
+			enable_map, grp, blk, blk_inst);
+		const u32 prfcnt_en =
+			kbasep_hwcnt_backend_gpu_block_map_to_physical(
+				blk_map[0], 0);
+
+		switch ((enum kbase_hwcnt_gpu_group_type)grp_type) {
+		case KBASE_HWCNT_GPU_GROUP_TYPE_V4:
+			buf_blk[KBASE_HWCNT_V4_PRFCNT_EN_HEADER] = prfcnt_en;
+			break;
+		case KBASE_HWCNT_GPU_GROUP_TYPE_V5:
+			buf_blk[KBASE_HWCNT_V5_PRFCNT_EN_HEADER] = prfcnt_en;
+			break;
+		default:
+			WARN_ON(true);
+		}
+	}
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_gpu_patch_dump_headers);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_gpu.h b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_gpu.h
new file mode 100644
index 0000000..12891e0
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_gpu.h
@@ -0,0 +1,251 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_HWCNT_GPU_H_
+#define _KBASE_HWCNT_GPU_H_
+
+#include <linux/types.h>
+
+struct kbase_device;
+struct kbase_hwcnt_metadata;
+struct kbase_hwcnt_enable_map;
+struct kbase_hwcnt_dump_buffer;
+
+/**
+ * enum kbase_hwcnt_gpu_group_type - GPU hardware counter group types, used to
+ *                                   identify metadata groups.
+ * @KBASE_HWCNT_GPU_GROUP_TYPE_V4: GPU V4 group type.
+ * @KBASE_HWCNT_GPU_GROUP_TYPE_V5: GPU V5 group type.
+ */
+enum kbase_hwcnt_gpu_group_type {
+	KBASE_HWCNT_GPU_GROUP_TYPE_V4 = 0x10,
+	KBASE_HWCNT_GPU_GROUP_TYPE_V5,
+};
+
+/**
+ * enum kbase_hwcnt_gpu_v4_block_type - GPU V4 hardware counter block types,
+ *                                      used to identify metadata blocks.
+ * @KBASE_HWCNT_GPU_V4_BLOCK_TYPE_SHADER:   Shader block.
+ * @KBASE_HWCNT_GPU_V4_BLOCK_TYPE_TILER:    Tiler block.
+ * @KBASE_HWCNT_GPU_V4_BLOCK_TYPE_MMU_L2:   MMU/L2 block.
+ * @KBASE_HWCNT_GPU_V4_BLOCK_TYPE_JM:       Job Manager block.
+ * @KBASE_HWCNT_GPU_V4_BLOCK_TYPE_RESERVED: Reserved block.
+ */
+enum kbase_hwcnt_gpu_v4_block_type {
+	KBASE_HWCNT_GPU_V4_BLOCK_TYPE_SHADER = 0x20,
+	KBASE_HWCNT_GPU_V4_BLOCK_TYPE_TILER,
+	KBASE_HWCNT_GPU_V4_BLOCK_TYPE_MMU_L2,
+	KBASE_HWCNT_GPU_V4_BLOCK_TYPE_JM,
+	KBASE_HWCNT_GPU_V4_BLOCK_TYPE_RESERVED,
+};
+
+/**
+ * enum kbase_hwcnt_gpu_v5_block_type - GPU V5 hardware counter block types,
+ *                                      used to identify metadata blocks.
+ * @KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_JM:      Job Manager block.
+ * @KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_TILER:   Tiler block.
+ * @KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC:      Shader Core block.
+ * @KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC2:     Secondary Shader Core block.
+ * @KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS:  Memsys block.
+ * @KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS2: Secondary Memsys block.
+ */
+enum kbase_hwcnt_gpu_v5_block_type {
+	KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_JM = 0x40,
+	KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_TILER,
+	KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC,
+	KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC2,
+	KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS,
+	KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS2,
+};
+
+/**
+ * struct kbase_hwcnt_physical_enable_map - Representation of enable map
+ *                                          directly used by GPU.
+ * @jm_bm:     Job Manager counters selection bitmask.
+ * @shader_bm: Shader counters selection bitmask.
+ * @tiler_bm:  Tiler counters selection bitmask.
+ * @mmu_l2_bm: MMU_L2 counters selection bitmask.
+ */
+struct kbase_hwcnt_physical_enable_map {
+	u32 jm_bm;
+	u32 shader_bm;
+	u32 tiler_bm;
+	u32 mmu_l2_bm;
+};
+
+/**
+ * struct kbase_hwcnt_gpu_v4_info - Information about hwcnt blocks on v4 GPUs.
+ * @cg_count: Core group count.
+ * @cgs:      Non-NULL pointer to array of cg_count coherent group structures.
+ *
+ * V4 devices are Mali-T6xx or Mali-T72x, and have one or more core groups,
+ * where each core group may have a physically different layout.
+ */
+struct kbase_hwcnt_gpu_v4_info {
+	size_t cg_count;
+	const struct mali_base_gpu_coherent_group *cgs;
+};
+
+/**
+ * struct kbase_hwcnt_gpu_v5_info - Information about hwcnt blocks on v5 GPUs.
+ * @l2_count:   L2 cache count.
+ * @core_mask:  Shader core mask. May be sparse.
+ */
+struct kbase_hwcnt_gpu_v5_info {
+	size_t l2_count;
+	u64 core_mask;
+};
+
+/**
+ * struct kbase_hwcnt_gpu_info - Tagged union with information about the current
+ *                               GPU's hwcnt blocks.
+ * @type: GPU type.
+ * @v4:   Info filled in if a v4 GPU.
+ * @v5:   Info filled in if a v5 GPU.
+ */
+struct kbase_hwcnt_gpu_info {
+	enum kbase_hwcnt_gpu_group_type type;
+	union {
+		struct kbase_hwcnt_gpu_v4_info v4;
+		struct kbase_hwcnt_gpu_v5_info v5;
+	};
+};
+
+/**
+ * kbase_hwcnt_gpu_info_init() - Initialise an info structure used to create the
+ *                               hwcnt metadata.
+ * @kbdev: Non-NULL pointer to kbase device.
+ * @info:  Non-NULL pointer to data structure to be filled in.
+ *
+ * The initialised info struct will only be valid for use while kbdev is valid.
+ */
+int kbase_hwcnt_gpu_info_init(
+	struct kbase_device *kbdev,
+	struct kbase_hwcnt_gpu_info *info);
+
+/**
+ * kbase_hwcnt_gpu_metadata_create() - Create hardware counter metadata for the
+ *                                     current GPU.
+ * @info:           Non-NULL pointer to info struct initialised by
+ *                  kbase_hwcnt_gpu_info_init.
+ * @use_secondary:  True if secondary performance counters should be used, else
+ *                  false. Ignored if secondary counters are not supported.
+ * @out_metadata:   Non-NULL pointer to where created metadata is stored on
+ *                  success.
+ * @out_dump_bytes: Non-NULL pointer to where the size of the GPU counter dump
+ *                  buffer is stored on success.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_gpu_metadata_create(
+	const struct kbase_hwcnt_gpu_info *info,
+	bool use_secondary,
+	const struct kbase_hwcnt_metadata **out_metadata,
+	size_t *out_dump_bytes);
+
+/**
+ * kbase_hwcnt_gpu_metadata_destroy() - Destroy GPU hardware counter metadata.
+ * @metadata: Pointer to metadata to destroy.
+ */
+void kbase_hwcnt_gpu_metadata_destroy(
+	const struct kbase_hwcnt_metadata *metadata);
+
+/**
+ * kbase_hwcnt_gpu_dump_get() - Copy or accumulate enabled counters from the raw
+ *                              dump buffer in src into the dump buffer
+ *                              abstraction in dst.
+ * @dst:            Non-NULL pointer to dst dump buffer.
+ * @src:            Non-NULL pointer to src raw dump buffer, of same length
+ *                  as returned in out_dump_bytes parameter of
+ *                  kbase_hwcnt_gpu_metadata_create.
+ * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
+ * @pm_core_mask:   PM state synchronized shaders core mask with the dump.
+ * @accumulate:     True if counters in src should be accumulated into dst,
+ *                  rather than copied.
+ *
+ * The dst and dst_enable_map MUST have been created from the same metadata as
+ * returned from the call to kbase_hwcnt_gpu_metadata_create as was used to get
+ * the length of src.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_gpu_dump_get(
+	struct kbase_hwcnt_dump_buffer *dst,
+	void *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map,
+	const u64 pm_core_mask,
+	bool accumulate);
+
+/**
+ * kbase_hwcnt_gpu_enable_map_to_physical() - Convert an enable map abstraction
+ *                                            into a physical enable map.
+ * @dst: Non-NULL pointer to dst physical enable map.
+ * @src: Non-NULL pointer to src enable map abstraction.
+ *
+ * The src must have been created from a metadata returned from a call to
+ * kbase_hwcnt_gpu_metadata_create.
+ *
+ * This is a lossy conversion, as the enable map abstraction has one bit per
+ * individual counter block value, but the physical enable map uses 1 bit for
+ * every 4 counters, shared over all instances of a block.
+ */
+void kbase_hwcnt_gpu_enable_map_to_physical(
+	struct kbase_hwcnt_physical_enable_map *dst,
+	const struct kbase_hwcnt_enable_map *src);
+
+/**
+ * kbase_hwcnt_gpu_enable_map_from_physical() - Convert a physical enable map to
+ *                                              an enable map abstraction.
+ * @dst: Non-NULL pointer to dst enable map abstraction.
+ * @src: Non-NULL pointer to src physical enable map.
+ *
+ * The dst must have been created from a metadata returned from a call to
+ * kbase_hwcnt_gpu_metadata_create.
+ *
+ * This is a lossy conversion, as the physical enable map can technically
+ * support counter blocks with 128 counters each, but no hardware actually uses
+ * more than 64, so the enable map abstraction has nowhere to store the enable
+ * information for the 64 non-existent counters.
+ */
+void kbase_hwcnt_gpu_enable_map_from_physical(
+	struct kbase_hwcnt_enable_map *dst,
+	const struct kbase_hwcnt_physical_enable_map *src);
+
+/**
+ * kbase_hwcnt_gpu_patch_dump_headers() - Patch all the performance counter
+ *                                        enable headers in a dump buffer to
+ *                                        reflect the specified enable map.
+ * @buf:        Non-NULL pointer to dump buffer to patch.
+ * @enable_map: Non-NULL pointer to enable map.
+ *
+ * The buf and enable_map must have been created from a metadata returned from
+ * a call to kbase_hwcnt_gpu_metadata_create.
+ *
+ * This function should be used before handing off a dump buffer over the
+ * kernel-user boundary, to ensure the header is accurate for the enable map
+ * used by the user.
+ */
+void kbase_hwcnt_gpu_patch_dump_headers(
+	struct kbase_hwcnt_dump_buffer *buf,
+	const struct kbase_hwcnt_enable_map *enable_map);
+
+#endif /* _KBASE_HWCNT_GPU_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_legacy.c b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_legacy.c
new file mode 100644
index 0000000..b0e6aee
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_legacy.c
@@ -0,0 +1,152 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_hwcnt_legacy.h"
+#include "mali_kbase_hwcnt_virtualizer.h"
+#include "mali_kbase_hwcnt_types.h"
+#include "mali_kbase_hwcnt_gpu.h"
+#include "mali_kbase_ioctl.h"
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+/**
+ * struct kbase_hwcnt_legacy_client - Legacy hardware counter client.
+ * @user_dump_buf: Pointer to a non-NULL user buffer, where dumps are returned.
+ * @enable_map:    Counter enable map.
+ * @dump_buf:      Dump buffer used to manipulate dumps before copied to user.
+ * @hvcli:         Hardware counter virtualizer client.
+ */
+struct kbase_hwcnt_legacy_client {
+	void __user *user_dump_buf;
+	struct kbase_hwcnt_enable_map enable_map;
+	struct kbase_hwcnt_dump_buffer dump_buf;
+	struct kbase_hwcnt_virtualizer_client *hvcli;
+};
+
+int kbase_hwcnt_legacy_client_create(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	struct kbase_ioctl_hwcnt_enable *enable,
+	struct kbase_hwcnt_legacy_client **out_hlcli)
+{
+	int errcode;
+	struct kbase_hwcnt_legacy_client *hlcli;
+	const struct kbase_hwcnt_metadata *metadata;
+	struct kbase_hwcnt_physical_enable_map phys_em;
+
+	if (!hvirt || !enable || !enable->dump_buffer || !out_hlcli)
+		return -EINVAL;
+
+	metadata = kbase_hwcnt_virtualizer_metadata(hvirt);
+
+	hlcli = kzalloc(sizeof(*hlcli), GFP_KERNEL);
+	if (!hlcli)
+		return -ENOMEM;
+
+	hlcli->user_dump_buf = (void __user *)(uintptr_t)enable->dump_buffer;
+
+	errcode = kbase_hwcnt_enable_map_alloc(metadata, &hlcli->enable_map);
+	if (errcode)
+		goto error;
+
+	/* Translate from the ioctl enable map to the internal one */
+	phys_em.jm_bm = enable->jm_bm;
+	phys_em.shader_bm = enable->shader_bm;
+	phys_em.tiler_bm = enable->tiler_bm;
+	phys_em.mmu_l2_bm = enable->mmu_l2_bm;
+	kbase_hwcnt_gpu_enable_map_from_physical(&hlcli->enable_map, &phys_em);
+
+	errcode = kbase_hwcnt_dump_buffer_alloc(metadata, &hlcli->dump_buf);
+	if (errcode)
+		goto error;
+
+	errcode = kbase_hwcnt_virtualizer_client_create(
+		hvirt, &hlcli->enable_map, &hlcli->hvcli);
+	if (errcode)
+		goto error;
+
+	*out_hlcli = hlcli;
+	return 0;
+
+error:
+	kbase_hwcnt_legacy_client_destroy(hlcli);
+	return errcode;
+}
+
+void kbase_hwcnt_legacy_client_destroy(struct kbase_hwcnt_legacy_client *hlcli)
+{
+	if (!hlcli)
+		return;
+
+	kbase_hwcnt_virtualizer_client_destroy(hlcli->hvcli);
+	kbase_hwcnt_dump_buffer_free(&hlcli->dump_buf);
+	kbase_hwcnt_enable_map_free(&hlcli->enable_map);
+	kfree(hlcli);
+}
+
+int kbase_hwcnt_legacy_client_dump(struct kbase_hwcnt_legacy_client *hlcli)
+{
+	int errcode;
+	u64 ts_start_ns;
+	u64 ts_end_ns;
+
+	if (!hlcli)
+		return -EINVAL;
+
+	/* Dump into the kernel buffer */
+	errcode = kbase_hwcnt_virtualizer_client_dump(hlcli->hvcli,
+		&ts_start_ns, &ts_end_ns, &hlcli->dump_buf);
+	if (errcode)
+		return errcode;
+
+	/* Patch the dump buf headers, to hide the counters that other hwcnt
+	 * clients are using.
+	 */
+	kbase_hwcnt_gpu_patch_dump_headers(
+		&hlcli->dump_buf, &hlcli->enable_map);
+
+	/* Zero all non-enabled counters (current values are undefined) */
+	kbase_hwcnt_dump_buffer_zero_non_enabled(
+		&hlcli->dump_buf, &hlcli->enable_map);
+
+	/* Copy into the user's buffer */
+	errcode = copy_to_user(hlcli->user_dump_buf, hlcli->dump_buf.dump_buf,
+		hlcli->dump_buf.metadata->dump_buf_bytes);
+	/* Non-zero errcode implies user buf was invalid or too small */
+	if (errcode)
+		return -EFAULT;
+
+	return 0;
+}
+
+int kbase_hwcnt_legacy_client_clear(struct kbase_hwcnt_legacy_client *hlcli)
+{
+	u64 ts_start_ns;
+	u64 ts_end_ns;
+
+	if (!hlcli)
+		return -EINVAL;
+
+	/* Dump with a NULL buffer to clear this client's counters */
+	return kbase_hwcnt_virtualizer_client_dump(hlcli->hvcli,
+		&ts_start_ns, &ts_end_ns, NULL);
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_legacy.h b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_legacy.h
new file mode 100644
index 0000000..7a610ae
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_legacy.h
@@ -0,0 +1,94 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Legacy hardware counter interface, giving userspace clients simple,
+ * synchronous access to hardware counters.
+ *
+ * Any functions operating on an single legacy hardware counter client instance
+ * must be externally synchronised.
+ * Different clients may safely be used concurrently.
+ */
+
+#ifndef _KBASE_HWCNT_LEGACY_H_
+#define _KBASE_HWCNT_LEGACY_H_
+
+struct kbase_hwcnt_legacy_client;
+struct kbase_ioctl_hwcnt_enable;
+struct kbase_hwcnt_virtualizer;
+
+/**
+ * kbase_hwcnt_legacy_client_create() - Create a legacy hardware counter client.
+ * @hvirt:     Non-NULL pointer to hardware counter virtualizer the client
+ *             should be attached to.
+ * @enable:    Non-NULL pointer to hwcnt_enable structure, containing a valid
+ *             pointer to a user dump buffer large enough to hold a dump, and
+ *             the counters that should be enabled.
+ * @out_hlcli: Non-NULL pointer to where the pointer to the created client will
+ *             be stored on success.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_legacy_client_create(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	struct kbase_ioctl_hwcnt_enable *enable,
+	struct kbase_hwcnt_legacy_client **out_hlcli);
+
+/**
+ * kbase_hwcnt_legacy_client_destroy() - Destroy a legacy hardware counter
+ *                                       client.
+ * @hlcli: Pointer to the legacy hardware counter client.
+ *
+ * Will safely destroy a client in any partial state of construction.
+ */
+void kbase_hwcnt_legacy_client_destroy(struct kbase_hwcnt_legacy_client *hlcli);
+
+/**
+ * kbase_hwcnt_legacy_client_dump() - Perform a hardware counter dump into the
+ *                                    client's user buffer.
+ * @hlcli: Non-NULL pointer to the legacy hardware counter client.
+ *
+ * This function will synchronously dump hardware counters into the user buffer
+ * specified on client creation, with the counters specified on client creation.
+ *
+ * The counters are automatically cleared after each dump, such that the next
+ * dump performed will return the counter values accumulated between the time of
+ * this function call and the next dump.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_legacy_client_dump(struct kbase_hwcnt_legacy_client *hlcli);
+
+/**
+ * kbase_hwcnt_legacy_client_clear() - Perform and discard a hardware counter
+ *                                     dump.
+ * @hlcli: Non-NULL pointer to the legacy hardware counter client.
+ *
+ * This function will synchronously clear the hardware counters, such that the
+ * next dump performed will return the counter values accumulated between the
+ * time of this function call and the next dump.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_legacy_client_clear(struct kbase_hwcnt_legacy_client *hlcli);
+
+#endif /* _KBASE_HWCNT_LEGACY_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_reader.h b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_reader.h
new file mode 100644
index 0000000..10706b8
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_reader.h
@@ -0,0 +1,71 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_HWCNT_READER_H_
+#define _KBASE_HWCNT_READER_H_
+
+/* The ids of ioctl commands. */
+#define KBASE_HWCNT_READER 0xBE
+#define KBASE_HWCNT_READER_GET_HWVER       _IOR(KBASE_HWCNT_READER, 0x00, u32)
+#define KBASE_HWCNT_READER_GET_BUFFER_SIZE _IOR(KBASE_HWCNT_READER, 0x01, u32)
+#define KBASE_HWCNT_READER_DUMP            _IOW(KBASE_HWCNT_READER, 0x10, u32)
+#define KBASE_HWCNT_READER_CLEAR           _IOW(KBASE_HWCNT_READER, 0x11, u32)
+#define KBASE_HWCNT_READER_GET_BUFFER      _IOR(KBASE_HWCNT_READER, 0x20,\
+		struct kbase_hwcnt_reader_metadata)
+#define KBASE_HWCNT_READER_PUT_BUFFER      _IOW(KBASE_HWCNT_READER, 0x21,\
+		struct kbase_hwcnt_reader_metadata)
+#define KBASE_HWCNT_READER_SET_INTERVAL    _IOW(KBASE_HWCNT_READER, 0x30, u32)
+#define KBASE_HWCNT_READER_ENABLE_EVENT    _IOW(KBASE_HWCNT_READER, 0x40, u32)
+#define KBASE_HWCNT_READER_DISABLE_EVENT   _IOW(KBASE_HWCNT_READER, 0x41, u32)
+#define KBASE_HWCNT_READER_GET_API_VERSION _IOW(KBASE_HWCNT_READER, 0xFF, u32)
+
+/**
+ * struct kbase_hwcnt_reader_metadata - hwcnt reader sample buffer metadata
+ * @timestamp:  time when sample was collected
+ * @event_id:   id of an event that triggered sample collection
+ * @buffer_idx: position in sampling area where sample buffer was stored
+ */
+struct kbase_hwcnt_reader_metadata {
+	u64 timestamp;
+	u32 event_id;
+	u32 buffer_idx;
+};
+
+/**
+ * enum base_hwcnt_reader_event - hwcnt dumping events
+ * @BASE_HWCNT_READER_EVENT_MANUAL:   manual request for dump
+ * @BASE_HWCNT_READER_EVENT_PERIODIC: periodic dump
+ * @BASE_HWCNT_READER_EVENT_PREJOB:   prejob dump request
+ * @BASE_HWCNT_READER_EVENT_POSTJOB:  postjob dump request
+ * @BASE_HWCNT_READER_EVENT_COUNT:    number of supported events
+ */
+enum base_hwcnt_reader_event {
+	BASE_HWCNT_READER_EVENT_MANUAL,
+	BASE_HWCNT_READER_EVENT_PERIODIC,
+	BASE_HWCNT_READER_EVENT_PREJOB,
+	BASE_HWCNT_READER_EVENT_POSTJOB,
+
+	BASE_HWCNT_READER_EVENT_COUNT
+};
+
+#endif /* _KBASE_HWCNT_READER_H_ */
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_types.c b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_types.c
new file mode 100644
index 0000000..1e9efde
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_types.c
@@ -0,0 +1,538 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_hwcnt_types.h"
+#include "mali_kbase.h"
+
+/* Minimum alignment of each block of hardware counters */
+#define KBASE_HWCNT_BLOCK_BYTE_ALIGNMENT \
+	(KBASE_HWCNT_BITFIELD_BITS * KBASE_HWCNT_VALUE_BYTES)
+
+/**
+ * KBASE_HWCNT_ALIGN_UPWARDS() - Align a value to an alignment.
+ * @value:     The value to align upwards.
+ * @alignment: The alignment.
+ *
+ * Return: A number greater than or equal to value that is aligned to alignment.
+ */
+#define KBASE_HWCNT_ALIGN_UPWARDS(value, alignment) \
+	(value + ((alignment - (value % alignment)) % alignment))
+
+int kbase_hwcnt_metadata_create(
+	const struct kbase_hwcnt_description *desc,
+	const struct kbase_hwcnt_metadata **out_metadata)
+{
+	char *buf;
+	struct kbase_hwcnt_metadata *metadata;
+	struct kbase_hwcnt_group_metadata *grp_mds;
+	size_t grp;
+	size_t enable_map_count; /* Number of u64 bitfields (inc padding) */
+	size_t dump_buf_count; /* Number of u32 values (inc padding) */
+	size_t avail_mask_bits; /* Number of availability mask bits */
+
+	size_t size;
+	size_t offset;
+
+	if (!desc || !out_metadata)
+		return -EINVAL;
+
+	/* Calculate the bytes needed to tightly pack the metadata */
+
+	/* Top level metadata */
+	size = 0;
+	size += sizeof(struct kbase_hwcnt_metadata);
+
+	/* Group metadata */
+	size += sizeof(struct kbase_hwcnt_group_metadata) * desc->grp_cnt;
+
+	/* Block metadata */
+	for (grp = 0; grp < desc->grp_cnt; grp++) {
+		size += sizeof(struct kbase_hwcnt_block_metadata) *
+			desc->grps[grp].blk_cnt;
+	}
+
+	/* Single allocation for the entire metadata */
+	buf = kmalloc(size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* Use the allocated memory for the metadata and its members */
+
+	/* Bump allocate the top level metadata */
+	offset = 0;
+	metadata = (struct kbase_hwcnt_metadata *)(buf + offset);
+	offset += sizeof(struct kbase_hwcnt_metadata);
+
+	/* Bump allocate the group metadata */
+	grp_mds = (struct kbase_hwcnt_group_metadata *)(buf + offset);
+	offset += sizeof(struct kbase_hwcnt_group_metadata) * desc->grp_cnt;
+
+	enable_map_count = 0;
+	dump_buf_count = 0;
+	avail_mask_bits = 0;
+
+	for (grp = 0; grp < desc->grp_cnt; grp++) {
+		size_t blk;
+
+		const struct kbase_hwcnt_group_description *grp_desc =
+			desc->grps + grp;
+		struct kbase_hwcnt_group_metadata *grp_md = grp_mds + grp;
+
+		size_t group_enable_map_count = 0;
+		size_t group_dump_buffer_count = 0;
+		size_t group_avail_mask_bits = 0;
+
+		/* Bump allocate this group's block metadata */
+		struct kbase_hwcnt_block_metadata *blk_mds =
+			(struct kbase_hwcnt_block_metadata *)(buf + offset);
+		offset += sizeof(struct kbase_hwcnt_block_metadata) *
+			grp_desc->blk_cnt;
+
+		/* Fill in each block in the group's information */
+		for (blk = 0; blk < grp_desc->blk_cnt; blk++) {
+			const struct kbase_hwcnt_block_description *blk_desc =
+				grp_desc->blks + blk;
+			struct kbase_hwcnt_block_metadata *blk_md =
+				blk_mds + blk;
+			const size_t n_values =
+				blk_desc->hdr_cnt + blk_desc->ctr_cnt;
+
+			blk_md->type = blk_desc->type;
+			blk_md->inst_cnt = blk_desc->inst_cnt;
+			blk_md->hdr_cnt = blk_desc->hdr_cnt;
+			blk_md->ctr_cnt = blk_desc->ctr_cnt;
+			blk_md->enable_map_index = group_enable_map_count;
+			blk_md->enable_map_stride =
+				kbase_hwcnt_bitfield_count(n_values);
+			blk_md->dump_buf_index = group_dump_buffer_count;
+			blk_md->dump_buf_stride =
+				KBASE_HWCNT_ALIGN_UPWARDS(
+					n_values,
+					(KBASE_HWCNT_BLOCK_BYTE_ALIGNMENT /
+					 KBASE_HWCNT_VALUE_BYTES));
+			blk_md->avail_mask_index = group_avail_mask_bits;
+
+			group_enable_map_count +=
+				blk_md->enable_map_stride * blk_md->inst_cnt;
+			group_dump_buffer_count +=
+				blk_md->dump_buf_stride * blk_md->inst_cnt;
+			group_avail_mask_bits += blk_md->inst_cnt;
+		}
+
+		/* Fill in the group's information */
+		grp_md->type = grp_desc->type;
+		grp_md->blk_cnt = grp_desc->blk_cnt;
+		grp_md->blk_metadata = blk_mds;
+		grp_md->enable_map_index = enable_map_count;
+		grp_md->dump_buf_index = dump_buf_count;
+		grp_md->avail_mask_index = avail_mask_bits;
+
+		enable_map_count += group_enable_map_count;
+		dump_buf_count += group_dump_buffer_count;
+		avail_mask_bits += group_avail_mask_bits;
+	}
+
+	/* Fill in the top level metadata's information */
+	metadata->grp_cnt = desc->grp_cnt;
+	metadata->grp_metadata = grp_mds;
+	metadata->enable_map_bytes =
+		enable_map_count * KBASE_HWCNT_BITFIELD_BYTES;
+	metadata->dump_buf_bytes = dump_buf_count * KBASE_HWCNT_VALUE_BYTES;
+	metadata->avail_mask = desc->avail_mask;
+
+	WARN_ON(size != offset);
+	/* Due to the block alignment, there should be exactly one enable map
+	 * bit per 4 bytes in the dump buffer.
+	 */
+	WARN_ON(metadata->dump_buf_bytes !=
+		(metadata->enable_map_bytes *
+		 BITS_PER_BYTE * KBASE_HWCNT_VALUE_BYTES));
+
+	*out_metadata = metadata;
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_metadata_create);
+
+void kbase_hwcnt_metadata_destroy(const struct kbase_hwcnt_metadata *metadata)
+{
+	kfree(metadata);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_metadata_destroy);
+
+int kbase_hwcnt_enable_map_alloc(
+	const struct kbase_hwcnt_metadata *metadata,
+	struct kbase_hwcnt_enable_map *enable_map)
+{
+	u64 *enable_map_buf;
+
+	if (!metadata || !enable_map)
+		return -EINVAL;
+
+	enable_map_buf = kzalloc(metadata->enable_map_bytes, GFP_KERNEL);
+	if (!enable_map_buf)
+		return -ENOMEM;
+
+	enable_map->metadata = metadata;
+	enable_map->enable_map = enable_map_buf;
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_enable_map_alloc);
+
+void kbase_hwcnt_enable_map_free(struct kbase_hwcnt_enable_map *enable_map)
+{
+	if (!enable_map)
+		return;
+
+	kfree(enable_map->enable_map);
+	enable_map->enable_map = NULL;
+	enable_map->metadata = NULL;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_enable_map_free);
+
+int kbase_hwcnt_dump_buffer_alloc(
+	const struct kbase_hwcnt_metadata *metadata,
+	struct kbase_hwcnt_dump_buffer *dump_buf)
+{
+	u32 *buf;
+
+	if (!metadata || !dump_buf)
+		return -EINVAL;
+
+	buf = kmalloc(metadata->dump_buf_bytes, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	dump_buf->metadata = metadata;
+	dump_buf->dump_buf = buf;
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_alloc);
+
+void kbase_hwcnt_dump_buffer_free(struct kbase_hwcnt_dump_buffer *dump_buf)
+{
+	if (!dump_buf)
+		return;
+
+	kfree(dump_buf->dump_buf);
+	memset(dump_buf, 0, sizeof(*dump_buf));
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_free);
+
+int kbase_hwcnt_dump_buffer_array_alloc(
+	const struct kbase_hwcnt_metadata *metadata,
+	size_t n,
+	struct kbase_hwcnt_dump_buffer_array *dump_bufs)
+{
+	struct kbase_hwcnt_dump_buffer *buffers;
+	size_t buf_idx;
+	unsigned int order;
+	unsigned long addr;
+
+	if (!metadata || !dump_bufs)
+		return -EINVAL;
+
+	/* Allocate memory for the dump buffer struct array */
+	buffers = kmalloc_array(n, sizeof(*buffers), GFP_KERNEL);
+	if (!buffers)
+		return -ENOMEM;
+
+	/* Allocate pages for the actual dump buffers, as they tend to be fairly
+	 * large.
+	 */
+	order = get_order(metadata->dump_buf_bytes * n);
+	addr = __get_free_pages(GFP_KERNEL, order);
+
+	if (!addr) {
+		kfree(buffers);
+		return -ENOMEM;
+	}
+
+	dump_bufs->page_addr = addr;
+	dump_bufs->page_order = order;
+	dump_bufs->buf_cnt = n;
+	dump_bufs->bufs = buffers;
+
+	/* Set the buffer of each dump buf */
+	for (buf_idx = 0; buf_idx < n; buf_idx++) {
+		const size_t offset = metadata->dump_buf_bytes * buf_idx;
+
+		buffers[buf_idx].metadata = metadata;
+		buffers[buf_idx].dump_buf = (u32 *)(addr + offset);
+	}
+
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_array_alloc);
+
+void kbase_hwcnt_dump_buffer_array_free(
+	struct kbase_hwcnt_dump_buffer_array *dump_bufs)
+{
+	if (!dump_bufs)
+		return;
+
+	kfree(dump_bufs->bufs);
+	free_pages(dump_bufs->page_addr, dump_bufs->page_order);
+	memset(dump_bufs, 0, sizeof(*dump_bufs));
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_array_free);
+
+void kbase_hwcnt_dump_buffer_zero(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_enable_map *dst_enable_map)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+	size_t grp, blk, blk_inst;
+
+	if (WARN_ON(!dst) ||
+	    WARN_ON(!dst_enable_map) ||
+	    WARN_ON(dst->metadata != dst_enable_map->metadata))
+		return;
+
+	metadata = dst->metadata;
+
+	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst) {
+		u32 *dst_blk;
+		size_t val_cnt;
+
+		if (!kbase_hwcnt_enable_map_block_enabled(
+			dst_enable_map, grp, blk, blk_inst))
+			continue;
+
+		dst_blk = kbase_hwcnt_dump_buffer_block_instance(
+			dst, grp, blk, blk_inst);
+		val_cnt = kbase_hwcnt_metadata_block_values_count(
+			metadata, grp, blk);
+
+		kbase_hwcnt_dump_buffer_block_zero(dst_blk, val_cnt);
+	}
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_zero);
+
+void kbase_hwcnt_dump_buffer_zero_strict(
+	struct kbase_hwcnt_dump_buffer *dst)
+{
+	if (WARN_ON(!dst))
+		return;
+
+	memset(dst->dump_buf, 0, dst->metadata->dump_buf_bytes);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_zero_strict);
+
+void kbase_hwcnt_dump_buffer_zero_non_enabled(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_enable_map *dst_enable_map)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+	size_t grp, blk, blk_inst;
+
+	if (WARN_ON(!dst) ||
+	    WARN_ON(!dst_enable_map) ||
+	    WARN_ON(dst->metadata != dst_enable_map->metadata))
+		return;
+
+	metadata = dst->metadata;
+
+	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst) {
+		u32 *dst_blk = kbase_hwcnt_dump_buffer_block_instance(
+			dst, grp, blk, blk_inst);
+		const u64 *blk_em = kbase_hwcnt_enable_map_block_instance(
+			dst_enable_map, grp, blk, blk_inst);
+		size_t val_cnt = kbase_hwcnt_metadata_block_values_count(
+			metadata, grp, blk);
+
+		/* Align upwards to include padding bytes */
+		val_cnt = KBASE_HWCNT_ALIGN_UPWARDS(val_cnt,
+			(KBASE_HWCNT_BLOCK_BYTE_ALIGNMENT /
+			 KBASE_HWCNT_VALUE_BYTES));
+
+		if (kbase_hwcnt_metadata_block_instance_avail(
+			metadata, grp, blk, blk_inst)) {
+			/* Block available, so only zero non-enabled values */
+			kbase_hwcnt_dump_buffer_block_zero_non_enabled(
+				dst_blk, blk_em, val_cnt);
+		} else {
+			/* Block not available, so zero the entire thing */
+			kbase_hwcnt_dump_buffer_block_zero(dst_blk, val_cnt);
+		}
+	}
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_zero_non_enabled);
+
+void kbase_hwcnt_dump_buffer_copy(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_dump_buffer *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+	size_t grp, blk, blk_inst;
+
+	if (WARN_ON(!dst) ||
+	    WARN_ON(!src) ||
+	    WARN_ON(!dst_enable_map) ||
+	    WARN_ON(dst == src) ||
+	    WARN_ON(dst->metadata != src->metadata) ||
+	    WARN_ON(dst->metadata != dst_enable_map->metadata))
+		return;
+
+	metadata = dst->metadata;
+
+	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst) {
+		u32 *dst_blk;
+		const u32 *src_blk;
+		size_t val_cnt;
+
+		if (!kbase_hwcnt_enable_map_block_enabled(
+			dst_enable_map, grp, blk, blk_inst))
+			continue;
+
+		dst_blk = kbase_hwcnt_dump_buffer_block_instance(
+			dst, grp, blk, blk_inst);
+		src_blk = kbase_hwcnt_dump_buffer_block_instance(
+			src, grp, blk, blk_inst);
+		val_cnt = kbase_hwcnt_metadata_block_values_count(
+			metadata, grp, blk);
+
+		kbase_hwcnt_dump_buffer_block_copy(dst_blk, src_blk, val_cnt);
+	}
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_copy);
+
+void kbase_hwcnt_dump_buffer_copy_strict(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_dump_buffer *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+	size_t grp, blk, blk_inst;
+
+	if (WARN_ON(!dst) ||
+	    WARN_ON(!src) ||
+	    WARN_ON(!dst_enable_map) ||
+	    WARN_ON(dst == src) ||
+	    WARN_ON(dst->metadata != src->metadata) ||
+	    WARN_ON(dst->metadata != dst_enable_map->metadata))
+		return;
+
+	metadata = dst->metadata;
+
+	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst) {
+		u32 *dst_blk = kbase_hwcnt_dump_buffer_block_instance(
+			dst, grp, blk, blk_inst);
+		const u32 *src_blk = kbase_hwcnt_dump_buffer_block_instance(
+			src, grp, blk, blk_inst);
+		const u64 *blk_em = kbase_hwcnt_enable_map_block_instance(
+			dst_enable_map, grp, blk, blk_inst);
+		size_t val_cnt = kbase_hwcnt_metadata_block_values_count(
+			metadata, grp, blk);
+		/* Align upwards to include padding bytes */
+		val_cnt = KBASE_HWCNT_ALIGN_UPWARDS(val_cnt,
+			(KBASE_HWCNT_BLOCK_BYTE_ALIGNMENT /
+			 KBASE_HWCNT_VALUE_BYTES));
+
+		kbase_hwcnt_dump_buffer_block_copy_strict(
+			dst_blk, src_blk, blk_em, val_cnt);
+	}
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_copy_strict);
+
+void kbase_hwcnt_dump_buffer_accumulate(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_dump_buffer *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+	size_t grp, blk, blk_inst;
+
+	if (WARN_ON(!dst) ||
+	    WARN_ON(!src) ||
+	    WARN_ON(!dst_enable_map) ||
+	    WARN_ON(dst == src) ||
+	    WARN_ON(dst->metadata != src->metadata) ||
+	    WARN_ON(dst->metadata != dst_enable_map->metadata))
+		return;
+
+	metadata = dst->metadata;
+
+	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst) {
+		u32 *dst_blk;
+		const u32 *src_blk;
+		size_t hdr_cnt;
+		size_t ctr_cnt;
+
+		if (!kbase_hwcnt_enable_map_block_enabled(
+			dst_enable_map, grp, blk, blk_inst))
+			continue;
+
+		dst_blk = kbase_hwcnt_dump_buffer_block_instance(
+			dst, grp, blk, blk_inst);
+		src_blk = kbase_hwcnt_dump_buffer_block_instance(
+			src, grp, blk, blk_inst);
+		hdr_cnt = kbase_hwcnt_metadata_block_headers_count(
+			metadata, grp, blk);
+		ctr_cnt = kbase_hwcnt_metadata_block_counters_count(
+			metadata, grp, blk);
+
+		kbase_hwcnt_dump_buffer_block_accumulate(
+			dst_blk, src_blk, hdr_cnt, ctr_cnt);
+	}
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_accumulate);
+
+void kbase_hwcnt_dump_buffer_accumulate_strict(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_dump_buffer *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+	size_t grp, blk, blk_inst;
+
+	if (WARN_ON(!dst) ||
+	    WARN_ON(!src) ||
+	    WARN_ON(!dst_enable_map) ||
+	    WARN_ON(dst == src) ||
+	    WARN_ON(dst->metadata != src->metadata) ||
+	    WARN_ON(dst->metadata != dst_enable_map->metadata))
+		return;
+
+	metadata = dst->metadata;
+
+	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst) {
+		u32 *dst_blk = kbase_hwcnt_dump_buffer_block_instance(
+			dst, grp, blk, blk_inst);
+		const u32 *src_blk = kbase_hwcnt_dump_buffer_block_instance(
+			src, grp, blk, blk_inst);
+		const u64 *blk_em = kbase_hwcnt_enable_map_block_instance(
+			dst_enable_map, grp, blk, blk_inst);
+		size_t hdr_cnt = kbase_hwcnt_metadata_block_headers_count(
+			metadata, grp, blk);
+		size_t ctr_cnt = kbase_hwcnt_metadata_block_counters_count(
+			metadata, grp, blk);
+		/* Align upwards to include padding bytes */
+		ctr_cnt = KBASE_HWCNT_ALIGN_UPWARDS(hdr_cnt + ctr_cnt,
+			(KBASE_HWCNT_BLOCK_BYTE_ALIGNMENT /
+			 KBASE_HWCNT_VALUE_BYTES) - hdr_cnt);
+
+		kbase_hwcnt_dump_buffer_block_accumulate_strict(
+			dst_blk, src_blk, blk_em, hdr_cnt, ctr_cnt);
+	}
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_accumulate_strict);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_types.h b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_types.h
new file mode 100644
index 0000000..4d78c84
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_types.h
@@ -0,0 +1,1087 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Hardware counter types.
+ * Contains structures for describing the physical layout of hardware counter
+ * dump buffers and enable maps within a system.
+ *
+ * Also contains helper functions for manipulation of these dump buffers and
+ * enable maps.
+ *
+ * Through use of these structures and functions, hardware counters can be
+ * enabled, copied, accumulated, and generally manipulated in a generic way,
+ * regardless of the physical counter dump layout.
+ *
+ * Terminology:
+ *
+ * Hardware Counter System:
+ *   A collection of hardware counter groups, making a full hardware counter
+ *   system.
+ * Hardware Counter Group:
+ *   A group of Hardware Counter Blocks (e.g. a t62x might have more than one
+ *   core group, so has one counter group per core group, where each group
+ *   may have a different number and layout of counter blocks).
+ * Hardware Counter Block:
+ *   A block of hardware counters (e.g. shader block, tiler block).
+ * Hardware Counter Block Instance:
+ *   An instance of a Hardware Counter Block (e.g. an MP4 GPU might have
+ *   4 shader block instances).
+ *
+ * Block Header:
+ *   A header value inside a counter block. Headers don't count anything,
+ *   so it is only valid to copy or zero them. Headers are always the first
+ *   values in the block.
+ * Block Counter:
+ *   A counter value inside a counter block. Counters can be zeroed, copied,
+ *   or accumulated. Counters are always immediately after the headers in the
+ *   block.
+ * Block Value:
+ *   A catch-all term for block headers and block counters.
+ *
+ * Enable Map:
+ *   An array of u64 bitfields, where each bit either enables exactly one
+ *   block value, or is unused (padding).
+ * Dump Buffer:
+ *   An array of u32 values, where each u32 corresponds either to one block
+ *   value, or is unused (padding).
+ * Availability Mask:
+ *   A bitfield, where each bit corresponds to whether a block instance is
+ *   physically available (e.g. an MP3 GPU may have a sparse core mask of
+ *   0b1011, meaning it only has 3 cores but for hardware counter dumps has the
+ *   same dump buffer layout as an MP4 GPU with a core mask of 0b1111. In this
+ *   case, the availability mask might be 0b1011111 (the exact layout will
+ *   depend on the specific hardware architecture), with the 3 extra early bits
+ *   corresponding to other block instances in the hardware counter system).
+ * Metadata:
+ *   Structure describing the physical layout of the enable map and dump buffers
+ *   for a specific hardware counter system.
+ *
+ */
+
+#ifndef _KBASE_HWCNT_TYPES_H_
+#define _KBASE_HWCNT_TYPES_H_
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include "mali_malisw.h"
+
+/* Number of bytes in each bitfield */
+#define KBASE_HWCNT_BITFIELD_BYTES (sizeof(u64))
+
+/* Number of bits in each bitfield */
+#define KBASE_HWCNT_BITFIELD_BITS (KBASE_HWCNT_BITFIELD_BYTES * BITS_PER_BYTE)
+
+/* Number of bytes for each counter value */
+#define KBASE_HWCNT_VALUE_BYTES (sizeof(u32))
+
+/* Number of bits in an availability mask (i.e. max total number of block
+ * instances supported in a Hardware Counter System)
+ */
+#define KBASE_HWCNT_AVAIL_MASK_BITS (sizeof(u64) * BITS_PER_BYTE)
+
+/**
+ * struct kbase_hwcnt_block_description - Description of one or more identical,
+ *                                        contiguous, Hardware Counter Blocks.
+ * @type:     The arbitrary identifier used to identify the type of the block.
+ * @inst_cnt: The number of Instances of the block.
+ * @hdr_cnt:  The number of 32-bit Block Headers in the block.
+ * @ctr_cnt:  The number of 32-bit Block Counters in the block.
+ */
+struct kbase_hwcnt_block_description {
+	u64 type;
+	size_t inst_cnt;
+	size_t hdr_cnt;
+	size_t ctr_cnt;
+};
+
+/**
+ * struct kbase_hwcnt_group_description - Description of one or more identical,
+ *                                        contiguous Hardware Counter Groups.
+ * @type:    The arbitrary identifier used to identify the type of the group.
+ * @blk_cnt: The number of types of Hardware Counter Block in the group.
+ * @blks:    Non-NULL pointer to an array of blk_cnt block descriptions,
+ *           describing each type of Hardware Counter Block in the group.
+ */
+struct kbase_hwcnt_group_description {
+	u64 type;
+	size_t blk_cnt;
+	const struct kbase_hwcnt_block_description *blks;
+};
+
+/**
+ * struct kbase_hwcnt_description - Description of a Hardware Counter System.
+ * @grp_cnt:    The number of Hardware Counter Groups.
+ * @grps:       Non-NULL pointer to an array of grp_cnt group descriptions,
+ *              describing each Hardware Counter Group in the system.
+ * @avail_mask: Flat Availability Mask for all block instances in the system.
+ */
+struct kbase_hwcnt_description {
+	size_t grp_cnt;
+	const struct kbase_hwcnt_group_description *grps;
+	u64 avail_mask;
+};
+
+/**
+ * struct kbase_hwcnt_block_metadata - Metadata describing the physical layout
+ *                                     of a block in a Hardware Counter System's
+ *                                     Dump Buffers and Enable Maps.
+ * @type:              The arbitrary identifier used to identify the type of the
+ *                     block.
+ * @inst_cnt:          The number of Instances of the block.
+ * @hdr_cnt:           The number of 32-bit Block Headers in the block.
+ * @ctr_cnt:           The number of 32-bit Block Counters in the block.
+ * @enable_map_index:  Index in u64s into the parent's Enable Map where the
+ *                     Enable Map bitfields of the Block Instances described by
+ *                     this metadata start.
+ * @enable_map_stride: Stride in u64s between the Enable Maps of each of the
+ *                     Block Instances described by this metadata.
+ * @dump_buf_index:    Index in u32s into the parent's Dump Buffer where the
+ *                     Dump Buffers of the Block Instances described by this
+ *                     metadata start.
+ * @dump_buf_stride:   Stride in u32s between the Dump Buffers of each of the
+ *                     Block Instances described by this metadata.
+ * @avail_mask_index:  Index in bits into the parent's Availability Mask where
+ *                     the Availability Masks of the Block Instances described
+ *                     by this metadata start.
+ */
+struct kbase_hwcnt_block_metadata {
+	u64 type;
+	size_t inst_cnt;
+	size_t hdr_cnt;
+	size_t ctr_cnt;
+	size_t enable_map_index;
+	size_t enable_map_stride;
+	size_t dump_buf_index;
+	size_t dump_buf_stride;
+	size_t avail_mask_index;
+};
+
+/**
+ * struct kbase_hwcnt_group_metadata - Metadata describing the physical layout
+ *                                     of a group of blocks in a Hardware
+ *                                     Counter System's Dump Buffers and Enable
+ *                                     Maps.
+ * @type:             The arbitrary identifier used to identify the type of the
+ *                    group.
+ * @blk_cnt:          The number of types of Hardware Counter Block in the
+ *                    group.
+ * @blk_metadata:     Non-NULL pointer to an array of blk_cnt block metadata,
+ *                    describing the physical layout of each type of Hardware
+ *                    Counter Block in the group.
+ * @enable_map_index: Index in u64s into the parent's Enable Map where the
+ *                    Enable Maps of the blocks within the group described by
+ *                    this metadata start.
+ * @dump_buf_index:   Index in u32s into the parent's Dump Buffer where the
+ *                    Dump Buffers of the blocks within the group described by
+ *                    metadata start.
+ * @avail_mask_index: Index in bits into the parent's Availability Mask where
+ *                    the Availability Masks of the blocks within the group
+ *                    described by this metadata start.
+ */
+struct kbase_hwcnt_group_metadata {
+	u64 type;
+	size_t blk_cnt;
+	const struct kbase_hwcnt_block_metadata *blk_metadata;
+	size_t enable_map_index;
+	size_t dump_buf_index;
+	size_t avail_mask_index;
+};
+
+/**
+ * struct kbase_hwcnt_metadata - Metadata describing the physical layout
+ *                               of Dump Buffers and Enable Maps within a
+ *                               Hardware Counter System.
+ * @grp_cnt:          The number of Hardware Counter Groups.
+ * @grp_metadata:     Non-NULL pointer to an array of grp_cnt group metadata,
+ *                    describing the physical layout of each Hardware Counter
+ *                    Group in the system.
+ * @enable_map_bytes: The size in bytes of an Enable Map needed for the system.
+ * @dump_buf_bytes:   The size in bytes of a Dump Buffer needed for the system.
+ * @avail_mask:       The Availability Mask for the system.
+ */
+struct kbase_hwcnt_metadata {
+	size_t grp_cnt;
+	const struct kbase_hwcnt_group_metadata *grp_metadata;
+	size_t enable_map_bytes;
+	size_t dump_buf_bytes;
+	u64 avail_mask;
+};
+
+/**
+ * struct kbase_hwcnt_enable_map - Hardware Counter Enable Map. Array of u64
+ *                                 bitfields.
+ * @metadata:   Non-NULL pointer to metadata used to identify, and to describe
+ *              the layout of the enable map.
+ * @enable_map: Non-NULL pointer of size metadata->enable_map_bytes to an array
+ *              of u64 bitfields, each bit of which enables one hardware
+ *              counter.
+ */
+struct kbase_hwcnt_enable_map {
+	const struct kbase_hwcnt_metadata *metadata;
+	u64 *enable_map;
+};
+
+/**
+ * struct kbase_hwcnt_dump_buffer - Hardware Counter Dump Buffer. Array of u32
+ *                                  values.
+ * @metadata: Non-NULL pointer to metadata used to identify, and to describe
+ *            the layout of the Dump Buffer.
+ * @dump_buf: Non-NULL pointer of size metadata->dump_buf_bytes to an array
+ *            of u32 values.
+ */
+struct kbase_hwcnt_dump_buffer {
+	const struct kbase_hwcnt_metadata *metadata;
+	u32 *dump_buf;
+};
+
+/**
+ * struct kbase_hwcnt_dump_buffer_array - Hardware Counter Dump Buffer array.
+ * @page_addr:  Address of allocated pages. A single allocation is used for all
+ *              Dump Buffers in the array.
+ * @page_order: The allocation order of the pages.
+ * @buf_cnt:    The number of allocated Dump Buffers.
+ * @bufs:       Non-NULL pointer to the array of Dump Buffers.
+ */
+struct kbase_hwcnt_dump_buffer_array {
+	unsigned long page_addr;
+	unsigned int page_order;
+	size_t buf_cnt;
+	struct kbase_hwcnt_dump_buffer *bufs;
+};
+
+/**
+ * kbase_hwcnt_metadata_create() - Create a hardware counter metadata object
+ *                                 from a description.
+ * @desc:     Non-NULL pointer to a hardware counter description.
+ * @metadata: Non-NULL pointer to where created metadata will be stored on
+ *            success.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_metadata_create(
+	const struct kbase_hwcnt_description *desc,
+	const struct kbase_hwcnt_metadata **metadata);
+
+/**
+ * kbase_hwcnt_metadata_destroy() - Destroy a hardware counter metadata object.
+ * @metadata: Pointer to hardware counter metadata
+ */
+void kbase_hwcnt_metadata_destroy(const struct kbase_hwcnt_metadata *metadata);
+
+/**
+ * kbase_hwcnt_metadata_group_count() - Get the number of groups.
+ * @metadata: Non-NULL pointer to metadata.
+ *
+ * Return: Number of hardware counter groups described by metadata.
+ */
+#define kbase_hwcnt_metadata_group_count(metadata) \
+	((metadata)->grp_cnt)
+
+/**
+ * kbase_hwcnt_metadata_group_type() - Get the arbitrary type of a group.
+ * @metadata: Non-NULL pointer to metadata.
+ * @grp:      Index of the group in the metadata.
+ *
+ * Return: Type of the group grp.
+ */
+#define kbase_hwcnt_metadata_group_type(metadata, grp) \
+	((metadata)->grp_metadata[(grp)].type)
+
+/**
+ * kbase_hwcnt_metadata_block_count() - Get the number of blocks in a group.
+ * @metadata: Non-NULL pointer to metadata.
+ * @grp:      Index of the group in the metadata.
+ *
+ * Return: Number of blocks in group grp.
+ */
+#define kbase_hwcnt_metadata_block_count(metadata, grp) \
+	((metadata)->grp_metadata[(grp)].blk_cnt)
+
+/**
+ * kbase_hwcnt_metadata_block_type() - Get the arbitrary type of a block.
+ * @metadata: Non-NULL pointer to metadata.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ *
+ * Return: Type of the block blk in group grp.
+ */
+#define kbase_hwcnt_metadata_block_type(metadata, grp, blk) \
+	((metadata)->grp_metadata[(grp)].blk_metadata[(blk)].type)
+
+/**
+ * kbase_hwcnt_metadata_block_instance_count() - Get the number of instances of
+ *                                               a block.
+ * @metadata: Non-NULL pointer to metadata.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ *
+ * Return: Number of instances of block blk in group grp.
+ */
+#define kbase_hwcnt_metadata_block_instance_count(metadata, grp, blk) \
+	((metadata)->grp_metadata[(grp)].blk_metadata[(blk)].inst_cnt)
+
+/**
+ * kbase_hwcnt_metadata_block_headers_count() - Get the number of counter
+ *                                              headers.
+ * @metadata: Non-NULL pointer to metadata.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ *
+ * Return: Number of u32 counter headers in each instance of block blk in
+ *         group grp.
+ */
+#define kbase_hwcnt_metadata_block_headers_count(metadata, grp, blk) \
+	((metadata)->grp_metadata[(grp)].blk_metadata[(blk)].hdr_cnt)
+
+/**
+ * kbase_hwcnt_metadata_block_counters_count() - Get the number of counters.
+ * @metadata: Non-NULL pointer to metadata.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ *
+ * Return: Number of u32 counters in each instance of block blk in group
+ *         grp.
+ */
+#define kbase_hwcnt_metadata_block_counters_count(metadata, grp, blk) \
+	((metadata)->grp_metadata[(grp)].blk_metadata[(blk)].ctr_cnt)
+
+/**
+ * kbase_hwcnt_metadata_block_values_count() - Get the number of values.
+ * @metadata: Non-NULL pointer to metadata.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ *
+ * Return: Number of u32 headers plus counters in each instance of block blk
+ *         in group grp.
+ */
+#define kbase_hwcnt_metadata_block_values_count(metadata, grp, blk) \
+	(kbase_hwcnt_metadata_block_counters_count((metadata), (grp), (blk)) \
+	+ kbase_hwcnt_metadata_block_headers_count((metadata), (grp), (blk)))
+
+/**
+ * kbase_hwcnt_metadata_for_each_block() - Iterate over each block instance in
+ *                                         the metadata.
+ * @md:       Non-NULL pointer to metadata.
+ * @grp:      size_t variable used as group iterator.
+ * @blk:      size_t variable used as block iterator.
+ * @blk_inst: size_t variable used as block instance iterator.
+ *
+ * Iteration order is group, then block, then block instance (i.e. linearly
+ * through memory).
+ */
+#define kbase_hwcnt_metadata_for_each_block(md, grp, blk, blk_inst) \
+	for ((grp) = 0; (grp) < kbase_hwcnt_metadata_group_count((md)); (grp)++) \
+		for ((blk) = 0; (blk) < kbase_hwcnt_metadata_block_count((md), (grp)); (blk)++) \
+			for ((blk_inst) = 0; (blk_inst) < kbase_hwcnt_metadata_block_instance_count((md), (grp), (blk)); (blk_inst)++)
+
+/**
+ * kbase_hwcnt_metadata_block_avail_bit() - Get the bit index into the avail
+ *                                          mask corresponding to the block.
+ * @metadata: Non-NULL pointer to metadata.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ *
+ * Return: The bit index into the avail mask for the block.
+ */
+static inline size_t kbase_hwcnt_metadata_block_avail_bit(
+	const struct kbase_hwcnt_metadata *metadata,
+	size_t grp,
+	size_t blk)
+{
+	const size_t bit =
+		metadata->grp_metadata[grp].avail_mask_index +
+		metadata->grp_metadata[grp].blk_metadata[blk].avail_mask_index;
+
+	return bit;
+}
+
+/**
+ * kbase_hwcnt_metadata_block_instance_avail() - Check if a block instance is
+ *                                               available.
+ * @metadata: Non-NULL pointer to metadata.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ * @blk_inst: Index of the block instance in the block.
+ *
+ * Return: true if the block instance is available, else false.
+ */
+static inline bool kbase_hwcnt_metadata_block_instance_avail(
+	const struct kbase_hwcnt_metadata *metadata,
+	size_t grp,
+	size_t blk,
+	size_t blk_inst)
+{
+	const size_t bit = kbase_hwcnt_metadata_block_avail_bit(
+		metadata, grp, blk) + blk_inst;
+	const u64 mask = 1ull << bit;
+
+	return (metadata->avail_mask & mask) != 0;
+}
+
+/**
+ * kbase_hwcnt_enable_map_alloc() - Allocate an enable map.
+ * @metadata:   Non-NULL pointer to metadata describing the system.
+ * @enable_map: Non-NULL pointer to enable map to be initialised. Will be
+ *              initialised to all zeroes (i.e. all counters disabled).
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_enable_map_alloc(
+	const struct kbase_hwcnt_metadata *metadata,
+	struct kbase_hwcnt_enable_map *enable_map);
+
+/**
+ * kbase_hwcnt_enable_map_free() - Free an enable map.
+ * @enable_map: Enable map to be freed.
+ *
+ * Can be safely called on an all-zeroed enable map structure, or on an already
+ * freed enable map.
+ */
+void kbase_hwcnt_enable_map_free(struct kbase_hwcnt_enable_map *enable_map);
+
+/**
+ * kbase_hwcnt_enable_map_block_instance() - Get the pointer to a block
+ *                                           instance's enable map.
+ * @map:      Non-NULL pointer to (const) enable map.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ * @blk_inst: Index of the block instance in the block.
+ *
+ * Return: (const) u64* to the bitfield(s) used as the enable map for the
+ *         block instance.
+ */
+#define kbase_hwcnt_enable_map_block_instance(map, grp, blk, blk_inst) \
+	((map)->enable_map + \
+	 (map)->metadata->grp_metadata[(grp)].enable_map_index + \
+	 (map)->metadata->grp_metadata[(grp)].blk_metadata[(blk)].enable_map_index + \
+	 (map)->metadata->grp_metadata[(grp)].blk_metadata[(blk)].enable_map_stride * (blk_inst))
+
+/**
+ * kbase_hwcnt_bitfield_count() - Calculate the number of u64 bitfields required
+ *                                to have at minimum one bit per value.
+ * @val_cnt: Number of values.
+ *
+ * Return: Number of required bitfields.
+ */
+static inline size_t kbase_hwcnt_bitfield_count(size_t val_cnt)
+{
+	return (val_cnt + KBASE_HWCNT_BITFIELD_BITS - 1) /
+		KBASE_HWCNT_BITFIELD_BITS;
+}
+
+/**
+ * kbase_hwcnt_enable_map_block_disable_all() - Disable all values in a block.
+ * @dst:      Non-NULL pointer to enable map.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ * @blk_inst: Index of the block instance in the block.
+ */
+static inline void kbase_hwcnt_enable_map_block_disable_all(
+	struct kbase_hwcnt_enable_map *dst,
+	size_t grp,
+	size_t blk,
+	size_t blk_inst)
+{
+	const size_t val_cnt = kbase_hwcnt_metadata_block_values_count(
+		dst->metadata, grp, blk);
+	const size_t bitfld_cnt = kbase_hwcnt_bitfield_count(val_cnt);
+	u64 *block_enable_map = kbase_hwcnt_enable_map_block_instance(
+		dst, grp, blk, blk_inst);
+
+	memset(block_enable_map, 0, bitfld_cnt * KBASE_HWCNT_BITFIELD_BYTES);
+}
+
+/**
+ * kbase_hwcnt_enable_map_disable_all() - Disable all values in the enable map.
+ * @dst: Non-NULL pointer to enable map to zero.
+ */
+static inline void kbase_hwcnt_enable_map_disable_all(
+	struct kbase_hwcnt_enable_map *dst)
+{
+	memset(dst->enable_map, 0, dst->metadata->enable_map_bytes);
+}
+
+/**
+ * kbase_hwcnt_enable_map_block_enable_all() - Enable all values in a block.
+ * @dst:      Non-NULL pointer to enable map.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ * @blk_inst: Index of the block instance in the block.
+ */
+static inline void kbase_hwcnt_enable_map_block_enable_all(
+	struct kbase_hwcnt_enable_map *dst,
+	size_t grp,
+	size_t blk,
+	size_t blk_inst)
+{
+	const size_t val_cnt = kbase_hwcnt_metadata_block_values_count(
+		dst->metadata, grp, blk);
+	const size_t bitfld_cnt = kbase_hwcnt_bitfield_count(val_cnt);
+	u64 *block_enable_map = kbase_hwcnt_enable_map_block_instance(
+		dst, grp, blk, blk_inst);
+
+	size_t bitfld_idx;
+
+	for (bitfld_idx = 0; bitfld_idx < bitfld_cnt; bitfld_idx++) {
+		const u64 remaining_values = val_cnt -
+			(bitfld_idx * KBASE_HWCNT_BITFIELD_BITS);
+		u64 block_enable_map_mask = U64_MAX;
+
+		if (remaining_values < KBASE_HWCNT_BITFIELD_BITS)
+			block_enable_map_mask = (1ull << remaining_values) - 1;
+
+		block_enable_map[bitfld_idx] = block_enable_map_mask;
+	}
+}
+
+/**
+ * kbase_hwcnt_enable_map_block_enable_all() - Enable all values in an enable
+ *                                             map.
+ * @dst: Non-NULL pointer to enable map.
+ */
+static inline void kbase_hwcnt_enable_map_enable_all(
+	struct kbase_hwcnt_enable_map *dst)
+{
+	size_t grp, blk, blk_inst;
+
+	kbase_hwcnt_metadata_for_each_block(dst->metadata, grp, blk, blk_inst)
+		kbase_hwcnt_enable_map_block_enable_all(
+			dst, grp, blk, blk_inst);
+}
+
+/**
+ * kbase_hwcnt_enable_map_copy() - Copy an enable map to another.
+ * @dst: Non-NULL pointer to destination enable map.
+ * @src: Non-NULL pointer to source enable map.
+ *
+ * The dst and src MUST have been created from the same metadata.
+ */
+static inline void kbase_hwcnt_enable_map_copy(
+	struct kbase_hwcnt_enable_map *dst,
+	const struct kbase_hwcnt_enable_map *src)
+{
+	memcpy(dst->enable_map,
+	       src->enable_map,
+	       dst->metadata->enable_map_bytes);
+}
+
+/**
+ * kbase_hwcnt_enable_map_union() - Union dst and src enable maps into dst.
+ * @dst: Non-NULL pointer to destination enable map.
+ * @src: Non-NULL pointer to source enable map.
+ *
+ * The dst and src MUST have been created from the same metadata.
+ */
+static inline void kbase_hwcnt_enable_map_union(
+	struct kbase_hwcnt_enable_map *dst,
+	const struct kbase_hwcnt_enable_map *src)
+{
+	const size_t bitfld_count =
+		dst->metadata->enable_map_bytes / KBASE_HWCNT_BITFIELD_BYTES;
+	size_t i;
+
+	for (i = 0; i < bitfld_count; i++)
+		dst->enable_map[i] |= src->enable_map[i];
+}
+
+/**
+ * kbase_hwcnt_enable_map_block_enabled() - Check if any values in a block
+ *                                          instance are enabled.
+ * @enable_map: Non-NULL pointer to enable map.
+ * @grp:        Index of the group in the metadata.
+ * @blk:        Index of the block in the group.
+ * @blk_inst:   Index of the block instance in the block.
+ *
+ * Return: true if any values in the block are enabled, else false.
+ */
+static inline bool kbase_hwcnt_enable_map_block_enabled(
+	const struct kbase_hwcnt_enable_map *enable_map,
+	size_t grp,
+	size_t blk,
+	size_t blk_inst)
+{
+	bool any_enabled = false;
+	const size_t val_cnt = kbase_hwcnt_metadata_block_values_count(
+		enable_map->metadata, grp, blk);
+	const size_t bitfld_cnt = kbase_hwcnt_bitfield_count(val_cnt);
+	const u64 *block_enable_map = kbase_hwcnt_enable_map_block_instance(
+		enable_map, grp, blk, blk_inst);
+
+	size_t bitfld_idx;
+
+	for (bitfld_idx = 0; bitfld_idx < bitfld_cnt; bitfld_idx++) {
+		const u64 remaining_values = val_cnt -
+			(bitfld_idx * KBASE_HWCNT_BITFIELD_BITS);
+		u64 block_enable_map_mask = U64_MAX;
+
+		if (remaining_values < KBASE_HWCNT_BITFIELD_BITS)
+			block_enable_map_mask = (1ull << remaining_values) - 1;
+
+		any_enabled = any_enabled ||
+			(block_enable_map[bitfld_idx] & block_enable_map_mask);
+	}
+
+	return any_enabled;
+}
+
+/**
+ * kbase_hwcnt_enable_map_any_enabled() - Check if any values are enabled.
+ * @enable_map: Non-NULL pointer to enable map.
+ *
+ * Return: true if any values are enabled, else false.
+ */
+static inline bool kbase_hwcnt_enable_map_any_enabled(
+	const struct kbase_hwcnt_enable_map *enable_map)
+{
+	size_t grp, blk, blk_inst;
+
+	kbase_hwcnt_metadata_for_each_block(
+		enable_map->metadata, grp, blk, blk_inst) {
+		if (kbase_hwcnt_enable_map_block_enabled(
+			enable_map, grp, blk, blk_inst))
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * kbase_hwcnt_enable_map_block_value_enabled() - Check if a value in a block
+ *                                                instance is enabled.
+ * @bitfld:  Non-NULL pointer to the block bitfield(s) obtained from a call to
+ *           kbase_hwcnt_enable_map_block_instance.
+ * @val_idx: Index of the value to check in the block instance.
+ *
+ * Return: true if the value was enabled, else false.
+ */
+static inline bool kbase_hwcnt_enable_map_block_value_enabled(
+	const u64 *bitfld,
+	size_t val_idx)
+{
+	const size_t idx = val_idx / KBASE_HWCNT_BITFIELD_BITS;
+	const size_t bit = val_idx % KBASE_HWCNT_BITFIELD_BITS;
+	const u64 mask = 1ull << bit;
+
+	return (bitfld[idx] & mask) != 0;
+}
+
+/**
+ * kbase_hwcnt_enable_map_block_enable_value() - Enable a value in a block
+ *                                               instance.
+ * @bitfld:  Non-NULL pointer to the block bitfield(s) obtained from a call to
+ *           kbase_hwcnt_enable_map_block_instance.
+ * @val_idx: Index of the value to enable in the block instance.
+ */
+static inline void kbase_hwcnt_enable_map_block_enable_value(
+	u64 *bitfld,
+	size_t val_idx)
+{
+	const size_t idx = val_idx / KBASE_HWCNT_BITFIELD_BITS;
+	const size_t bit = val_idx % KBASE_HWCNT_BITFIELD_BITS;
+	const u64 mask = 1ull << bit;
+
+	bitfld[idx] |= mask;
+}
+
+/**
+ * kbase_hwcnt_enable_map_block_disable_value() - Disable a value in a block
+ *                                                instance.
+ * @bitfld:  Non-NULL pointer to the block bitfield(s) obtained from a call to
+ *           kbase_hwcnt_enable_map_block_instance.
+ * @val_idx: Index of the value to disable in the block instance.
+ */
+static inline void kbase_hwcnt_enable_map_block_disable_value(
+	u64 *bitfld,
+	size_t val_idx)
+{
+	const size_t idx = val_idx / KBASE_HWCNT_BITFIELD_BITS;
+	const size_t bit = val_idx % KBASE_HWCNT_BITFIELD_BITS;
+	const u64 mask = 1ull << bit;
+
+	bitfld[idx] &= ~mask;
+}
+
+/**
+ * kbase_hwcnt_dump_buffer_alloc() - Allocate a dump buffer.
+ * @metadata: Non-NULL pointer to metadata describing the system.
+ * @dump_buf: Non-NULL pointer to dump buffer to be initialised. Will be
+ *            initialised to undefined values, so must be used as a copy dest,
+ *            or cleared before use.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_dump_buffer_alloc(
+	const struct kbase_hwcnt_metadata *metadata,
+	struct kbase_hwcnt_dump_buffer *dump_buf);
+
+/**
+ * kbase_hwcnt_dump_buffer_free() - Free a dump buffer.
+ * @dump_buf: Dump buffer to be freed.
+ *
+ * Can be safely called on an all-zeroed dump buffer structure, or on an already
+ * freed dump buffer.
+ */
+void kbase_hwcnt_dump_buffer_free(struct kbase_hwcnt_dump_buffer *dump_buf);
+
+/**
+ * kbase_hwcnt_dump_buffer_array_alloc() - Allocate an array of dump buffers.
+ * @metadata:  Non-NULL pointer to metadata describing the system.
+ * @n:         Number of dump buffers to allocate
+ * @dump_bufs: Non-NULL pointer to dump buffer array to be initialised. Each
+ *             dump buffer in the array will be initialised to undefined values,
+ *             so must be used as a copy dest, or cleared before use.
+ *
+ * A single contiguous page allocation will be used for all of the buffers
+ * inside the array, where:
+ * dump_bufs[n].dump_buf == page_addr + n * metadata.dump_buf_bytes
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_dump_buffer_array_alloc(
+	const struct kbase_hwcnt_metadata *metadata,
+	size_t n,
+	struct kbase_hwcnt_dump_buffer_array *dump_bufs);
+
+/**
+ * kbase_hwcnt_dump_buffer_array_free() - Free a dump buffer array.
+ * @dump_bufs: Dump buffer array to be freed.
+ *
+ * Can be safely called on an all-zeroed dump buffer array structure, or on an
+ * already freed dump buffer array.
+ */
+void kbase_hwcnt_dump_buffer_array_free(
+	struct kbase_hwcnt_dump_buffer_array *dump_bufs);
+
+/**
+ * kbase_hwcnt_dump_buffer_block_instance() - Get the pointer to a block
+ *                                            instance's dump buffer.
+ * @buf:      Non-NULL pointer to (const) dump buffer.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ * @blk_inst: Index of the block instance in the block.
+ *
+ * Return: (const) u32* to the dump buffer for the block instance.
+ */
+#define kbase_hwcnt_dump_buffer_block_instance(buf, grp, blk, blk_inst) \
+	((buf)->dump_buf + \
+	 (buf)->metadata->grp_metadata[(grp)].dump_buf_index + \
+	 (buf)->metadata->grp_metadata[(grp)].blk_metadata[(blk)].dump_buf_index + \
+	 (buf)->metadata->grp_metadata[(grp)].blk_metadata[(blk)].dump_buf_stride * (blk_inst))
+
+/**
+ * kbase_hwcnt_dump_buffer_zero() - Zero all enabled values in dst.
+ *                                  After the operation, all non-enabled values
+ *                                  will be undefined.
+ * @dst:            Non-NULL pointer to dump buffer.
+ * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
+ *
+ * The dst and dst_enable_map MUST have been created from the same metadata.
+ */
+void kbase_hwcnt_dump_buffer_zero(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_enable_map *dst_enable_map);
+
+/**
+ * kbase_hwcnt_dump_buffer_block_zero() - Zero all values in a block.
+ * @dst_blk: Non-NULL pointer to dst block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @val_cnt: Number of values in the block.
+ */
+static inline void kbase_hwcnt_dump_buffer_block_zero(
+	u32 *dst_blk,
+	size_t val_cnt)
+{
+	memset(dst_blk, 0, (val_cnt * KBASE_HWCNT_VALUE_BYTES));
+}
+
+/**
+ * kbase_hwcnt_dump_buffer_zero_strict() - Zero all values in dst.
+ *                                         After the operation, all values
+ *                                         (including padding bytes) will be
+ *                                         zero.
+ *                                         Slower than the non-strict variant.
+ * @dst: Non-NULL pointer to dump buffer.
+ */
+void kbase_hwcnt_dump_buffer_zero_strict(
+	struct kbase_hwcnt_dump_buffer *dst);
+
+/**
+ * kbase_hwcnt_dump_buffer_zero_non_enabled() - Zero all non-enabled values in
+ *                                              dst (including padding bytes and
+ *                                              unavailable blocks).
+ *                                              After the operation, all enabled
+ *                                              values will be unchanged.
+ * @dst:            Non-NULL pointer to dump buffer.
+ * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
+ *
+ * The dst and dst_enable_map MUST have been created from the same metadata.
+ */
+void kbase_hwcnt_dump_buffer_zero_non_enabled(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_enable_map *dst_enable_map);
+
+/**
+ * kbase_hwcnt_dump_buffer_block_zero_non_enabled() - Zero all non-enabled
+ *                                                    values in a block.
+ *                                                    After the operation, all
+ *                                                    enabled values will be
+ *                                                    unchanged.
+ * @dst_blk: Non-NULL pointer to dst block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @blk_em:  Non-NULL pointer to the block bitfield(s) obtained from a call to
+ *           kbase_hwcnt_enable_map_block_instance.
+ * @val_cnt: Number of values in the block.
+ */
+static inline void kbase_hwcnt_dump_buffer_block_zero_non_enabled(
+	u32 *dst_blk,
+	const u64 *blk_em,
+	size_t val_cnt)
+{
+	size_t val;
+
+	for (val = 0; val < val_cnt; val++) {
+		if (!kbase_hwcnt_enable_map_block_value_enabled(blk_em, val))
+			dst_blk[val] = 0;
+	}
+}
+
+/**
+ * kbase_hwcnt_dump_buffer_copy() - Copy all enabled values from src to dst.
+ *                                  After the operation, all non-enabled values
+ *                                  will be undefined.
+ * @dst:            Non-NULL pointer to dst dump buffer.
+ * @src:            Non-NULL pointer to src dump buffer.
+ * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
+ *
+ * The dst, src, and dst_enable_map MUST have been created from the same
+ * metadata.
+ */
+void kbase_hwcnt_dump_buffer_copy(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_dump_buffer *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map);
+
+/**
+ * kbase_hwcnt_dump_buffer_block_copy() - Copy all block values from src to dst.
+ * @dst_blk: Non-NULL pointer to dst block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @src_blk: Non-NULL pointer to src block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @val_cnt: Number of values in the block.
+ */
+static inline void kbase_hwcnt_dump_buffer_block_copy(
+	u32 *dst_blk,
+	const u32 *src_blk,
+	size_t val_cnt)
+{
+	/* Copy all the counters in the block instance.
+	 * Values of non-enabled counters are undefined.
+	 */
+	memcpy(dst_blk, src_blk, (val_cnt * KBASE_HWCNT_VALUE_BYTES));
+}
+
+/**
+ * kbase_hwcnt_dump_buffer_copy_strict() - Copy all enabled values from src to
+ *                                         dst.
+ *                                         After the operation, all non-enabled
+ *                                         values (including padding bytes) will
+ *                                         be zero.
+ *                                         Slower than the non-strict variant.
+ * @dst:            Non-NULL pointer to dst dump buffer.
+ * @src:            Non-NULL pointer to src dump buffer.
+ * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
+ *
+ * The dst, src, and dst_enable_map MUST have been created from the same
+ * metadata.
+ */
+void kbase_hwcnt_dump_buffer_copy_strict(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_dump_buffer *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map);
+
+/**
+ * kbase_hwcnt_dump_buffer_block_copy_strict() - Copy all enabled block values
+ *                                               from src to dst.
+ *                                               After the operation, all
+ *                                               non-enabled values will be
+ *                                               zero.
+ * @dst_blk: Non-NULL pointer to dst block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @src_blk: Non-NULL pointer to src block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @blk_em:  Non-NULL pointer to the block bitfield(s) obtained from a call to
+ *           kbase_hwcnt_enable_map_block_instance.
+ * @val_cnt: Number of values in the block.
+ *
+ * After the copy, any disabled values in dst will be zero.
+ */
+static inline void kbase_hwcnt_dump_buffer_block_copy_strict(
+	u32 *dst_blk,
+	const u32 *src_blk,
+	const u64 *blk_em,
+	size_t val_cnt)
+{
+	size_t val;
+
+	for (val = 0; val < val_cnt; val++) {
+		bool val_enabled = kbase_hwcnt_enable_map_block_value_enabled(
+			blk_em, val);
+
+		dst_blk[val] = val_enabled ? src_blk[val] : 0;
+	}
+}
+
+/**
+ * kbase_hwcnt_dump_buffer_accumulate() - Copy all enabled headers and
+ *                                        accumulate all enabled counters from
+ *                                        src to dst.
+ *                                        After the operation, all non-enabled
+ *                                        values will be undefined.
+ * @dst:            Non-NULL pointer to dst dump buffer.
+ * @src:            Non-NULL pointer to src dump buffer.
+ * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
+ *
+ * The dst, src, and dst_enable_map MUST have been created from the same
+ * metadata.
+ */
+void kbase_hwcnt_dump_buffer_accumulate(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_dump_buffer *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map);
+
+/**
+ * kbase_hwcnt_dump_buffer_block_accumulate() - Copy all block headers and
+ *                                              accumulate all block counters
+ *                                              from src to dst.
+ * @dst_blk: Non-NULL pointer to dst block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @src_blk: Non-NULL pointer to src block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @hdr_cnt: Number of headers in the block.
+ * @ctr_cnt: Number of counters in the block.
+ */
+static inline void kbase_hwcnt_dump_buffer_block_accumulate(
+	u32 *dst_blk,
+	const u32 *src_blk,
+	size_t hdr_cnt,
+	size_t ctr_cnt)
+{
+	size_t ctr;
+	/* Copy all the headers in the block instance.
+	 * Values of non-enabled headers are undefined.
+	 */
+	memcpy(dst_blk, src_blk, hdr_cnt * KBASE_HWCNT_VALUE_BYTES);
+
+	/* Accumulate all the counters in the block instance.
+	 * Values of non-enabled counters are undefined.
+	 */
+	for (ctr = hdr_cnt; ctr < ctr_cnt + hdr_cnt; ctr++) {
+		u32 *dst_ctr = dst_blk + ctr;
+		const u32 *src_ctr = src_blk + ctr;
+
+		const u32 src_counter = *src_ctr;
+		const u32 dst_counter = *dst_ctr;
+
+		/* Saturating add */
+		u32 accumulated = src_counter + dst_counter;
+
+		if (accumulated < src_counter)
+			accumulated = U32_MAX;
+
+		*dst_ctr = accumulated;
+	}
+}
+
+/**
+ * kbase_hwcnt_dump_buffer_accumulate_strict() - Copy all enabled headers and
+ *                                               accumulate all enabled counters
+ *                                               from src to dst.
+ *                                               After the operation, all
+ *                                               non-enabled values (including
+ *                                               padding bytes) will be zero.
+ *                                               Slower than the non-strict
+ *                                               variant.
+ * @dst:            Non-NULL pointer to dst dump buffer.
+ * @src:            Non-NULL pointer to src dump buffer.
+ * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
+ *
+ * The dst, src, and dst_enable_map MUST have been created from the same
+ * metadata.
+ */
+void kbase_hwcnt_dump_buffer_accumulate_strict(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_dump_buffer *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map);
+
+/**
+ * kbase_hwcnt_dump_buffer_block_accumulate_strict() - Copy all enabled block
+ *                                                     headers and accumulate
+ *                                                     all block counters from
+ *                                                     src to dst.
+ *                                                     After the operation, all
+ *                                                     non-enabled values will
+ *                                                     be zero.
+ * @dst_blk: Non-NULL pointer to dst block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @src_blk: Non-NULL pointer to src block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @blk_em:  Non-NULL pointer to the block bitfield(s) obtained from a call to
+ *           kbase_hwcnt_enable_map_block_instance.
+ * @hdr_cnt: Number of headers in the block.
+ * @ctr_cnt: Number of counters in the block.
+ */
+static inline void kbase_hwcnt_dump_buffer_block_accumulate_strict(
+	u32 *dst_blk,
+	const u32 *src_blk,
+	const u64 *blk_em,
+	size_t hdr_cnt,
+	size_t ctr_cnt)
+{
+	size_t ctr;
+
+	kbase_hwcnt_dump_buffer_block_copy_strict(
+		dst_blk, src_blk, blk_em, hdr_cnt);
+
+	for (ctr = hdr_cnt; ctr < ctr_cnt + hdr_cnt; ctr++) {
+		bool ctr_enabled = kbase_hwcnt_enable_map_block_value_enabled(
+			blk_em, ctr);
+
+		u32 *dst_ctr = dst_blk + ctr;
+		const u32 *src_ctr = src_blk + ctr;
+
+		const u32 src_counter = *src_ctr;
+		const u32 dst_counter = *dst_ctr;
+
+		/* Saturating add */
+		u32 accumulated = src_counter + dst_counter;
+
+		if (accumulated < src_counter)
+			accumulated = U32_MAX;
+
+		*dst_ctr = ctr_enabled ? accumulated : 0;
+	}
+}
+
+#endif /* _KBASE_HWCNT_TYPES_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_virtualizer.c b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_virtualizer.c
new file mode 100644
index 0000000..917e47c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_virtualizer.c
@@ -0,0 +1,790 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_hwcnt_virtualizer.h"
+#include "mali_kbase_hwcnt_accumulator.h"
+#include "mali_kbase_hwcnt_context.h"
+#include "mali_kbase_hwcnt_types.h"
+#include "mali_malisw.h"
+#include "mali_kbase_debug.h"
+#include "mali_kbase_linux.h"
+
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+/**
+ * struct kbase_hwcnt_virtualizer - Hardware counter virtualizer structure.
+ * @hctx:              Hardware counter context being virtualized.
+ * @dump_threshold_ns: Minimum threshold period for dumps between different
+ *                     clients where a new accumulator dump will not be
+ *                     performed, and instead accumulated values will be used.
+ *                     If 0, rate limiting is disabled.
+ * @metadata:          Hardware counter metadata.
+ * @lock:              Lock acquired at all entrypoints, to protect mutable
+ *                     state.
+ * @client_count:      Current number of virtualizer clients.
+ * @clients:           List of virtualizer clients.
+ * @accum:             Hardware counter accumulator. NULL if no clients.
+ * @scratch_map:       Enable map used as scratch space during counter changes.
+ * @scratch_buf:       Dump buffer used as scratch space during dumps.
+ * @ts_last_dump_ns:   End time of most recent dump across all clients.
+ */
+struct kbase_hwcnt_virtualizer {
+	struct kbase_hwcnt_context *hctx;
+	u64 dump_threshold_ns;
+	const struct kbase_hwcnt_metadata *metadata;
+	struct mutex lock;
+	size_t client_count;
+	struct list_head clients;
+	struct kbase_hwcnt_accumulator *accum;
+	struct kbase_hwcnt_enable_map scratch_map;
+	struct kbase_hwcnt_dump_buffer scratch_buf;
+	u64 ts_last_dump_ns;
+};
+
+/**
+ * struct kbase_hwcnt_virtualizer_client - Virtualizer client structure.
+ * @node:        List node used for virtualizer client list.
+ * @hvirt:       Hardware counter virtualizer.
+ * @enable_map:  Enable map with client's current enabled counters.
+ * @accum_buf:   Dump buffer with client's current accumulated counters.
+ * @has_accum:   True if accum_buf contains any accumulated counters.
+ * @ts_start_ns: Counter collection start time of current dump.
+ */
+struct kbase_hwcnt_virtualizer_client {
+	struct list_head node;
+	struct kbase_hwcnt_virtualizer *hvirt;
+	struct kbase_hwcnt_enable_map enable_map;
+	struct kbase_hwcnt_dump_buffer accum_buf;
+	bool has_accum;
+	u64 ts_start_ns;
+};
+
+const struct kbase_hwcnt_metadata *kbase_hwcnt_virtualizer_metadata(
+	struct kbase_hwcnt_virtualizer *hvirt)
+{
+	if (!hvirt)
+		return NULL;
+
+	return hvirt->metadata;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_virtualizer_metadata);
+
+/**
+ * kbasep_hwcnt_virtualizer_client_free - Free a virtualizer client's memory.
+ * @hvcli: Pointer to virtualizer client.
+ *
+ * Will safely free a client in any partial state of construction.
+ */
+static void kbasep_hwcnt_virtualizer_client_free(
+	struct kbase_hwcnt_virtualizer_client *hvcli)
+{
+	if (!hvcli)
+		return;
+
+	kbase_hwcnt_dump_buffer_free(&hvcli->accum_buf);
+	kbase_hwcnt_enable_map_free(&hvcli->enable_map);
+	kfree(hvcli);
+}
+
+/**
+ * kbasep_hwcnt_virtualizer_client_alloc - Allocate memory for a virtualizer
+ *                                         client.
+ * @metadata:  Non-NULL pointer to counter metadata.
+ * @out_hvcli: Non-NULL pointer to where created client will be stored on
+ *             success.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_hwcnt_virtualizer_client_alloc(
+	const struct kbase_hwcnt_metadata *metadata,
+	struct kbase_hwcnt_virtualizer_client **out_hvcli)
+{
+	int errcode;
+	struct kbase_hwcnt_virtualizer_client *hvcli = NULL;
+
+	WARN_ON(!metadata);
+	WARN_ON(!out_hvcli);
+
+	hvcli = kzalloc(sizeof(*hvcli), GFP_KERNEL);
+	if (!hvcli)
+		return -ENOMEM;
+
+	errcode = kbase_hwcnt_enable_map_alloc(metadata, &hvcli->enable_map);
+	if (errcode)
+		goto error;
+
+	errcode = kbase_hwcnt_dump_buffer_alloc(metadata, &hvcli->accum_buf);
+	if (errcode)
+		goto error;
+
+	*out_hvcli = hvcli;
+	return 0;
+error:
+	kbasep_hwcnt_virtualizer_client_free(hvcli);
+	return errcode;
+}
+
+/**
+ * kbasep_hwcnt_virtualizer_client_accumulate - Accumulate a dump buffer into a
+ *                                              client's accumulation buffer.
+ * @hvcli:    Non-NULL pointer to virtualizer client.
+ * @dump_buf: Non-NULL pointer to dump buffer to accumulate from.
+ */
+static void kbasep_hwcnt_virtualizer_client_accumulate(
+	struct kbase_hwcnt_virtualizer_client *hvcli,
+	const struct kbase_hwcnt_dump_buffer *dump_buf)
+{
+	WARN_ON(!hvcli);
+	WARN_ON(!dump_buf);
+	lockdep_assert_held(&hvcli->hvirt->lock);
+
+	if (hvcli->has_accum) {
+		/* If already some accumulation, accumulate */
+		kbase_hwcnt_dump_buffer_accumulate(
+			&hvcli->accum_buf, dump_buf, &hvcli->enable_map);
+	} else {
+		/* If no accumulation, copy */
+		kbase_hwcnt_dump_buffer_copy(
+			&hvcli->accum_buf, dump_buf, &hvcli->enable_map);
+	}
+	hvcli->has_accum = true;
+}
+
+/**
+ * kbasep_hwcnt_virtualizer_accumulator_term - Terminate the hardware counter
+ *                                             accumulator after final client
+ *                                             removal.
+ * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
+ *
+ * Will safely terminate the accumulator in any partial state of initialisation.
+ */
+static void kbasep_hwcnt_virtualizer_accumulator_term(
+	struct kbase_hwcnt_virtualizer *hvirt)
+{
+	WARN_ON(!hvirt);
+	lockdep_assert_held(&hvirt->lock);
+	WARN_ON(hvirt->client_count);
+
+	kbase_hwcnt_dump_buffer_free(&hvirt->scratch_buf);
+	kbase_hwcnt_enable_map_free(&hvirt->scratch_map);
+	kbase_hwcnt_accumulator_release(hvirt->accum);
+	hvirt->accum = NULL;
+}
+
+/**
+ * kbasep_hwcnt_virtualizer_accumulator_init - Initialise the hardware counter
+ *                                             accumulator before first client
+ *                                             addition.
+ * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_hwcnt_virtualizer_accumulator_init(
+	struct kbase_hwcnt_virtualizer *hvirt)
+{
+	int errcode;
+
+	WARN_ON(!hvirt);
+	lockdep_assert_held(&hvirt->lock);
+	WARN_ON(hvirt->client_count);
+	WARN_ON(hvirt->accum);
+
+	errcode = kbase_hwcnt_accumulator_acquire(
+		hvirt->hctx, &hvirt->accum);
+	if (errcode)
+		goto error;
+
+	errcode = kbase_hwcnt_enable_map_alloc(
+		hvirt->metadata, &hvirt->scratch_map);
+	if (errcode)
+		goto error;
+
+	errcode = kbase_hwcnt_dump_buffer_alloc(
+		hvirt->metadata, &hvirt->scratch_buf);
+	if (errcode)
+		goto error;
+
+	return 0;
+error:
+	kbasep_hwcnt_virtualizer_accumulator_term(hvirt);
+	return errcode;
+}
+
+/**
+ * kbasep_hwcnt_virtualizer_client_add - Add a newly allocated client to the
+ *                                       virtualizer.
+ * @hvirt:      Non-NULL pointer to the hardware counter virtualizer.
+ * @hvcli:      Non-NULL pointer to the virtualizer client to add.
+ * @enable_map: Non-NULL pointer to client's initial enable map.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_hwcnt_virtualizer_client_add(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	struct kbase_hwcnt_virtualizer_client *hvcli,
+	const struct kbase_hwcnt_enable_map *enable_map)
+{
+	int errcode = 0;
+	u64 ts_start_ns;
+	u64 ts_end_ns;
+
+	WARN_ON(!hvirt);
+	WARN_ON(!hvcli);
+	WARN_ON(!enable_map);
+	lockdep_assert_held(&hvirt->lock);
+
+	if (hvirt->client_count == 0)
+		/* First client added, so initialise the accumulator */
+		errcode = kbasep_hwcnt_virtualizer_accumulator_init(hvirt);
+	if (errcode)
+		return errcode;
+
+	hvirt->client_count += 1;
+
+	if (hvirt->client_count == 1) {
+		/* First client, so just pass the enable map onwards as is */
+		errcode = kbase_hwcnt_accumulator_set_counters(hvirt->accum,
+			enable_map, &ts_start_ns, &ts_end_ns, NULL);
+	} else {
+		struct kbase_hwcnt_virtualizer_client *pos;
+
+		/* Make the scratch enable map the union of all enable maps */
+		kbase_hwcnt_enable_map_copy(
+			&hvirt->scratch_map, enable_map);
+		list_for_each_entry(pos, &hvirt->clients, node)
+			kbase_hwcnt_enable_map_union(
+				&hvirt->scratch_map, &pos->enable_map);
+
+		/* Set the counters with the new union enable map */
+		errcode = kbase_hwcnt_accumulator_set_counters(hvirt->accum,
+			&hvirt->scratch_map,
+			&ts_start_ns, &ts_end_ns,
+			&hvirt->scratch_buf);
+		/* Accumulate into only existing clients' accumulation bufs */
+		if (!errcode)
+			list_for_each_entry(pos, &hvirt->clients, node)
+				kbasep_hwcnt_virtualizer_client_accumulate(
+					pos, &hvirt->scratch_buf);
+	}
+	if (errcode)
+		goto error;
+
+	list_add(&hvcli->node, &hvirt->clients);
+	hvcli->hvirt = hvirt;
+	kbase_hwcnt_enable_map_copy(&hvcli->enable_map, enable_map);
+	hvcli->has_accum = false;
+	hvcli->ts_start_ns = ts_end_ns;
+
+	/* Store the most recent dump time for rate limiting */
+	hvirt->ts_last_dump_ns = ts_end_ns;
+
+	return 0;
+error:
+	hvirt->client_count -= 1;
+	if (hvirt->client_count == 0)
+		kbasep_hwcnt_virtualizer_accumulator_term(hvirt);
+	return errcode;
+}
+
+/**
+ * kbasep_hwcnt_virtualizer_client_remove - Remove a client from the
+ *                                          virtualizer.
+ * @hvirt:      Non-NULL pointer to the hardware counter virtualizer.
+ * @hvcli:      Non-NULL pointer to the virtualizer client to remove.
+ */
+static void kbasep_hwcnt_virtualizer_client_remove(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	struct kbase_hwcnt_virtualizer_client *hvcli)
+{
+	int errcode = 0;
+	u64 ts_start_ns;
+	u64 ts_end_ns;
+
+	WARN_ON(!hvirt);
+	WARN_ON(!hvcli);
+	lockdep_assert_held(&hvirt->lock);
+
+	list_del(&hvcli->node);
+	hvirt->client_count -= 1;
+
+	if (hvirt->client_count == 0) {
+		/* Last client removed, so terminate the accumulator */
+		kbasep_hwcnt_virtualizer_accumulator_term(hvirt);
+	} else {
+		struct kbase_hwcnt_virtualizer_client *pos;
+		/* Make the scratch enable map the union of all enable maps */
+		kbase_hwcnt_enable_map_disable_all(&hvirt->scratch_map);
+		list_for_each_entry(pos, &hvirt->clients, node)
+			kbase_hwcnt_enable_map_union(
+				&hvirt->scratch_map, &pos->enable_map);
+		/* Set the counters with the new union enable map */
+		errcode = kbase_hwcnt_accumulator_set_counters(hvirt->accum,
+			&hvirt->scratch_map,
+			&ts_start_ns, &ts_end_ns,
+			&hvirt->scratch_buf);
+		/* Accumulate into remaining clients' accumulation bufs */
+		if (!errcode)
+			list_for_each_entry(pos, &hvirt->clients, node)
+				kbasep_hwcnt_virtualizer_client_accumulate(
+					pos, &hvirt->scratch_buf);
+
+		/* Store the most recent dump time for rate limiting */
+		hvirt->ts_last_dump_ns = ts_end_ns;
+	}
+	WARN_ON(errcode);
+}
+
+/**
+ * kbasep_hwcnt_virtualizer_client_set_counters - Perform a dump of the client's
+ *                                                currently enabled counters,
+ *                                                and enable a new set of
+ *                                                counters that will be used for
+ *                                                subsequent dumps.
+ * @hvirt:       Non-NULL pointer to the hardware counter virtualizer.
+ * @hvcli:       Non-NULL pointer to the virtualizer client.
+ * @enable_map:  Non-NULL pointer to the new counter enable map for the client.
+ *               Must have the same metadata as the virtualizer.
+ * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
+ *               be written out to on success.
+ * @ts_end_ns:   Non-NULL pointer where the end timestamp of the dump will
+ *               be written out to on success.
+ * @dump_buf:    Pointer to the buffer where the dump will be written out to on
+ *               success. If non-NULL, must have the same metadata as the
+ *               accumulator. If NULL, the dump will be discarded.
+ *
+ * Return: 0 on success or error code.
+ */
+static int kbasep_hwcnt_virtualizer_client_set_counters(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	struct kbase_hwcnt_virtualizer_client *hvcli,
+	const struct kbase_hwcnt_enable_map *enable_map,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf)
+{
+	int errcode;
+	struct kbase_hwcnt_virtualizer_client *pos;
+
+	WARN_ON(!hvirt);
+	WARN_ON(!hvcli);
+	WARN_ON(!enable_map);
+	WARN_ON(!ts_start_ns);
+	WARN_ON(!ts_end_ns);
+	WARN_ON(enable_map->metadata != hvirt->metadata);
+	WARN_ON(dump_buf && (dump_buf->metadata != hvirt->metadata));
+	lockdep_assert_held(&hvirt->lock);
+
+	/* Make the scratch enable map the union of all enable maps */
+	kbase_hwcnt_enable_map_copy(&hvirt->scratch_map, enable_map);
+	list_for_each_entry(pos, &hvirt->clients, node)
+		/* Ignore the enable map of the selected client */
+		if (pos != hvcli)
+			kbase_hwcnt_enable_map_union(
+				&hvirt->scratch_map, &pos->enable_map);
+
+	/* Set the counters with the new union enable map */
+	errcode = kbase_hwcnt_accumulator_set_counters(hvirt->accum,
+		&hvirt->scratch_map, ts_start_ns, ts_end_ns,
+		&hvirt->scratch_buf);
+	if (errcode)
+		return errcode;
+
+	/* Accumulate into all accumulation bufs except the selected client's */
+	list_for_each_entry(pos, &hvirt->clients, node)
+		if (pos != hvcli)
+			kbasep_hwcnt_virtualizer_client_accumulate(
+				pos, &hvirt->scratch_buf);
+
+	/* Finally, write into the dump buf */
+	if (dump_buf) {
+		const struct kbase_hwcnt_dump_buffer *src = &hvirt->scratch_buf;
+
+		if (hvcli->has_accum) {
+			kbase_hwcnt_dump_buffer_accumulate(
+				&hvcli->accum_buf, src, &hvcli->enable_map);
+			src = &hvcli->accum_buf;
+		}
+		kbase_hwcnt_dump_buffer_copy(dump_buf, src, &hvcli->enable_map);
+	}
+	hvcli->has_accum = false;
+
+	/* Update the selected client's enable map */
+	kbase_hwcnt_enable_map_copy(&hvcli->enable_map, enable_map);
+
+	/* Fix up the timestamps */
+	*ts_start_ns = hvcli->ts_start_ns;
+	hvcli->ts_start_ns = *ts_end_ns;
+
+	/* Store the most recent dump time for rate limiting */
+	hvirt->ts_last_dump_ns = *ts_end_ns;
+
+	return errcode;
+}
+
+int kbase_hwcnt_virtualizer_client_set_counters(
+	struct kbase_hwcnt_virtualizer_client *hvcli,
+	const struct kbase_hwcnt_enable_map *enable_map,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf)
+{
+	int errcode;
+	struct kbase_hwcnt_virtualizer *hvirt;
+
+	if (!hvcli || !enable_map || !ts_start_ns || !ts_end_ns)
+		return -EINVAL;
+
+	hvirt = hvcli->hvirt;
+
+	if ((enable_map->metadata != hvirt->metadata) ||
+	    (dump_buf && (dump_buf->metadata != hvirt->metadata)))
+		return -EINVAL;
+
+	mutex_lock(&hvirt->lock);
+
+	if ((hvirt->client_count == 1) && (!hvcli->has_accum)) {
+		/*
+		 * If there's only one client with no prior accumulation, we can
+		 * completely skip the virtualize and just pass through the call
+		 * to the accumulator, saving a fair few copies and
+		 * accumulations.
+		 */
+		errcode = kbase_hwcnt_accumulator_set_counters(
+			hvirt->accum, enable_map,
+			ts_start_ns, ts_end_ns, dump_buf);
+
+		if (!errcode) {
+			/* Update the selected client's enable map */
+			kbase_hwcnt_enable_map_copy(
+				&hvcli->enable_map, enable_map);
+
+			/* Fix up the timestamps */
+			*ts_start_ns = hvcli->ts_start_ns;
+			hvcli->ts_start_ns = *ts_end_ns;
+
+			/* Store the most recent dump time for rate limiting */
+			hvirt->ts_last_dump_ns = *ts_end_ns;
+		}
+	} else {
+		/* Otherwise, do the full virtualize */
+		errcode = kbasep_hwcnt_virtualizer_client_set_counters(
+			hvirt, hvcli, enable_map,
+			ts_start_ns, ts_end_ns, dump_buf);
+	}
+
+	mutex_unlock(&hvirt->lock);
+
+	return errcode;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_virtualizer_client_set_counters);
+
+/**
+ * kbasep_hwcnt_virtualizer_client_dump - Perform a dump of the client's
+ *                                        currently enabled counters.
+ * @hvirt:       Non-NULL pointer to the hardware counter virtualizer.
+ * @hvcli:       Non-NULL pointer to the virtualizer client.
+ * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
+ *               be written out to on success.
+ * @ts_end_ns:   Non-NULL pointer where the end timestamp of the dump will
+ *               be written out to on success.
+ * @dump_buf:    Pointer to the buffer where the dump will be written out to on
+ *               success. If non-NULL, must have the same metadata as the
+ *               accumulator. If NULL, the dump will be discarded.
+ *
+ * Return: 0 on success or error code.
+ */
+static int kbasep_hwcnt_virtualizer_client_dump(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	struct kbase_hwcnt_virtualizer_client *hvcli,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf)
+{
+	int errcode;
+	struct kbase_hwcnt_virtualizer_client *pos;
+
+	WARN_ON(!hvirt);
+	WARN_ON(!hvcli);
+	WARN_ON(!ts_start_ns);
+	WARN_ON(!ts_end_ns);
+	WARN_ON(dump_buf && (dump_buf->metadata != hvirt->metadata));
+	lockdep_assert_held(&hvirt->lock);
+
+	/* Perform the dump */
+	errcode = kbase_hwcnt_accumulator_dump(hvirt->accum,
+		ts_start_ns, ts_end_ns, &hvirt->scratch_buf);
+	if (errcode)
+		return errcode;
+
+	/* Accumulate into all accumulation bufs except the selected client's */
+	list_for_each_entry(pos, &hvirt->clients, node)
+		if (pos != hvcli)
+			kbasep_hwcnt_virtualizer_client_accumulate(
+				pos, &hvirt->scratch_buf);
+
+	/* Finally, write into the dump buf */
+	if (dump_buf) {
+		const struct kbase_hwcnt_dump_buffer *src = &hvirt->scratch_buf;
+
+		if (hvcli->has_accum) {
+			kbase_hwcnt_dump_buffer_accumulate(
+				&hvcli->accum_buf, src, &hvcli->enable_map);
+			src = &hvcli->accum_buf;
+		}
+		kbase_hwcnt_dump_buffer_copy(dump_buf, src, &hvcli->enable_map);
+	}
+	hvcli->has_accum = false;
+
+	/* Fix up the timestamps */
+	*ts_start_ns = hvcli->ts_start_ns;
+	hvcli->ts_start_ns = *ts_end_ns;
+
+	/* Store the most recent dump time for rate limiting */
+	hvirt->ts_last_dump_ns = *ts_end_ns;
+
+	return errcode;
+}
+
+/**
+ * kbasep_hwcnt_virtualizer_client_dump_rate_limited - Perform a dump of the
+ *                                           client's currently enabled counters
+ *                                           if it hasn't been rate limited,
+ *                                           otherwise return the client's most
+ *                                           recent accumulation.
+ * @hvirt:       Non-NULL pointer to the hardware counter virtualizer.
+ * @hvcli:       Non-NULL pointer to the virtualizer client.
+ * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
+ *               be written out to on success.
+ * @ts_end_ns:   Non-NULL pointer where the end timestamp of the dump will
+ *               be written out to on success.
+ * @dump_buf:    Pointer to the buffer where the dump will be written out to on
+ *               success. If non-NULL, must have the same metadata as the
+ *               accumulator. If NULL, the dump will be discarded.
+ *
+ * Return: 0 on success or error code.
+ */
+static int kbasep_hwcnt_virtualizer_client_dump_rate_limited(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	struct kbase_hwcnt_virtualizer_client *hvcli,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf)
+{
+	bool rate_limited = true;
+
+	WARN_ON(!hvirt);
+	WARN_ON(!hvcli);
+	WARN_ON(!ts_start_ns);
+	WARN_ON(!ts_end_ns);
+	WARN_ON(dump_buf && (dump_buf->metadata != hvirt->metadata));
+	lockdep_assert_held(&hvirt->lock);
+
+	if (hvirt->dump_threshold_ns == 0) {
+		/* Threshold == 0, so rate limiting disabled */
+		rate_limited = false;
+	} else if (hvirt->ts_last_dump_ns == hvcli->ts_start_ns) {
+		/* Last dump was performed by this client, and dumps from an
+		 * individual client are never rate limited
+		 */
+		rate_limited = false;
+	} else {
+		const u64 ts_ns =
+			kbase_hwcnt_accumulator_timestamp_ns(hvirt->accum);
+		const u64 time_since_last_dump_ns =
+			ts_ns - hvirt->ts_last_dump_ns;
+
+		/* Dump period equals or exceeds the threshold */
+		if (time_since_last_dump_ns >= hvirt->dump_threshold_ns)
+			rate_limited = false;
+	}
+
+	if (!rate_limited)
+		return kbasep_hwcnt_virtualizer_client_dump(
+			hvirt, hvcli, ts_start_ns, ts_end_ns, dump_buf);
+
+	/* If we've gotten this far, the client must have something accumulated
+	 * otherwise it is a logic error
+	 */
+	WARN_ON(!hvcli->has_accum);
+
+	if (dump_buf)
+		kbase_hwcnt_dump_buffer_copy(
+			dump_buf, &hvcli->accum_buf, &hvcli->enable_map);
+	hvcli->has_accum = false;
+
+	*ts_start_ns = hvcli->ts_start_ns;
+	*ts_end_ns = hvirt->ts_last_dump_ns;
+	hvcli->ts_start_ns = hvirt->ts_last_dump_ns;
+
+	return 0;
+}
+
+int kbase_hwcnt_virtualizer_client_dump(
+	struct kbase_hwcnt_virtualizer_client *hvcli,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf)
+{
+	int errcode;
+	struct kbase_hwcnt_virtualizer *hvirt;
+
+	if (!hvcli || !ts_start_ns || !ts_end_ns)
+		return -EINVAL;
+
+	hvirt = hvcli->hvirt;
+
+	if (dump_buf && (dump_buf->metadata != hvirt->metadata))
+		return -EINVAL;
+
+	mutex_lock(&hvirt->lock);
+
+	if ((hvirt->client_count == 1) && (!hvcli->has_accum)) {
+		/*
+		 * If there's only one client with no prior accumulation, we can
+		 * completely skip the virtualize and just pass through the call
+		 * to the accumulator, saving a fair few copies and
+		 * accumulations.
+		 */
+		errcode = kbase_hwcnt_accumulator_dump(
+			hvirt->accum, ts_start_ns, ts_end_ns, dump_buf);
+
+		if (!errcode) {
+			/* Fix up the timestamps */
+			*ts_start_ns = hvcli->ts_start_ns;
+			hvcli->ts_start_ns = *ts_end_ns;
+
+			/* Store the most recent dump time for rate limiting */
+			hvirt->ts_last_dump_ns = *ts_end_ns;
+		}
+	} else {
+		/* Otherwise, do the full virtualize */
+		errcode = kbasep_hwcnt_virtualizer_client_dump_rate_limited(
+			hvirt, hvcli, ts_start_ns, ts_end_ns, dump_buf);
+	}
+
+	mutex_unlock(&hvirt->lock);
+
+	return errcode;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_virtualizer_client_dump);
+
+int kbase_hwcnt_virtualizer_client_create(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	const struct kbase_hwcnt_enable_map *enable_map,
+	struct kbase_hwcnt_virtualizer_client **out_hvcli)
+{
+	int errcode;
+	struct kbase_hwcnt_virtualizer_client *hvcli;
+
+	if (!hvirt || !enable_map || !out_hvcli ||
+	    (enable_map->metadata != hvirt->metadata))
+		return -EINVAL;
+
+	errcode = kbasep_hwcnt_virtualizer_client_alloc(
+		hvirt->metadata, &hvcli);
+	if (errcode)
+		return errcode;
+
+	mutex_lock(&hvirt->lock);
+
+	errcode = kbasep_hwcnt_virtualizer_client_add(hvirt, hvcli, enable_map);
+
+	mutex_unlock(&hvirt->lock);
+
+	if (errcode) {
+		kbasep_hwcnt_virtualizer_client_free(hvcli);
+		return errcode;
+	}
+
+	*out_hvcli = hvcli;
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_virtualizer_client_create);
+
+void kbase_hwcnt_virtualizer_client_destroy(
+	struct kbase_hwcnt_virtualizer_client *hvcli)
+{
+	if (!hvcli)
+		return;
+
+	mutex_lock(&hvcli->hvirt->lock);
+
+	kbasep_hwcnt_virtualizer_client_remove(hvcli->hvirt, hvcli);
+
+	mutex_unlock(&hvcli->hvirt->lock);
+
+	kbasep_hwcnt_virtualizer_client_free(hvcli);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_virtualizer_client_destroy);
+
+int kbase_hwcnt_virtualizer_init(
+	struct kbase_hwcnt_context *hctx,
+	u64 dump_threshold_ns,
+	struct kbase_hwcnt_virtualizer **out_hvirt)
+{
+	struct kbase_hwcnt_virtualizer *virt;
+	const struct kbase_hwcnt_metadata *metadata;
+
+	if (!hctx || !out_hvirt)
+		return -EINVAL;
+
+	metadata = kbase_hwcnt_context_metadata(hctx);
+	if (!metadata)
+		return -EINVAL;
+
+	virt = kzalloc(sizeof(*virt), GFP_KERNEL);
+	if (!virt)
+		return -ENOMEM;
+
+	virt->hctx = hctx;
+	virt->dump_threshold_ns = dump_threshold_ns;
+	virt->metadata = metadata;
+
+	mutex_init(&virt->lock);
+	INIT_LIST_HEAD(&virt->clients);
+
+	*out_hvirt = virt;
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_virtualizer_init);
+
+void kbase_hwcnt_virtualizer_term(
+	struct kbase_hwcnt_virtualizer *hvirt)
+{
+	if (!hvirt)
+		return;
+
+	/* Non-zero client count implies client leak */
+	if (WARN_ON(hvirt->client_count != 0)) {
+		struct kbase_hwcnt_virtualizer_client *pos, *n;
+
+		list_for_each_entry_safe(pos, n, &hvirt->clients, node)
+			kbase_hwcnt_virtualizer_client_destroy(pos);
+	}
+
+	WARN_ON(hvirt->client_count != 0);
+	WARN_ON(hvirt->accum);
+
+	kfree(hvirt);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_virtualizer_term);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_virtualizer.h b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_virtualizer.h
new file mode 100644
index 0000000..8f628c3
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_virtualizer.h
@@ -0,0 +1,145 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Hardware counter virtualizer API.
+ *
+ * Virtualizes a hardware counter context, so multiple clients can access
+ * a single hardware counter resource as though each was the exclusive user.
+ */
+
+#ifndef _KBASE_HWCNT_VIRTUALIZER_H_
+#define _KBASE_HWCNT_VIRTUALIZER_H_
+
+#include <linux/types.h>
+
+struct kbase_hwcnt_context;
+struct kbase_hwcnt_virtualizer;
+struct kbase_hwcnt_virtualizer_client;
+struct kbase_hwcnt_enable_map;
+struct kbase_hwcnt_dump_buffer;
+
+/**
+ * kbase_hwcnt_virtualizer_init - Initialise a hardware counter virtualizer.
+ * @hctx:              Non-NULL pointer to the hardware counter context to
+ *                     virtualize.
+ * @dump_threshold_ns: Minimum threshold period for dumps between different
+ *                     clients where a new accumulator dump will not be
+ *                     performed, and instead accumulated values will be used.
+ *                     If 0, rate limiting will be disabled.
+ * @out_hvirt:         Non-NULL pointer to where the pointer to the created
+ *                     virtualizer will be stored on success.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_virtualizer_init(
+	struct kbase_hwcnt_context *hctx,
+	u64 dump_threshold_ns,
+	struct kbase_hwcnt_virtualizer **out_hvirt);
+
+/**
+ * kbase_hwcnt_virtualizer_term - Terminate a hardware counter virtualizer.
+ * @hvirt: Pointer to virtualizer to be terminated.
+ */
+void kbase_hwcnt_virtualizer_term(
+	struct kbase_hwcnt_virtualizer *hvirt);
+
+/**
+ * kbase_hwcnt_virtualizer_metadata - Get the hardware counter metadata used by
+ *                                    the virtualizer, so related counter data
+ *                                    structures can be created.
+ * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
+ *
+ * Return: Non-NULL pointer to metadata, or NULL on error.
+ */
+const struct kbase_hwcnt_metadata *kbase_hwcnt_virtualizer_metadata(
+	struct kbase_hwcnt_virtualizer *hvirt);
+
+/**
+ * kbase_hwcnt_virtualizer_client_create - Create a new virtualizer client.
+ * @hvirt:      Non-NULL pointer to the hardware counter virtualizer.
+ * @enable_map: Non-NULL pointer to the enable map for the client. Must have the
+ *              same metadata as the virtualizer.
+ * @out_hvcli:  Non-NULL pointer to where the pointer to the created client will
+ *              be stored on success.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_virtualizer_client_create(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	const struct kbase_hwcnt_enable_map *enable_map,
+	struct kbase_hwcnt_virtualizer_client **out_hvcli);
+
+/**
+ * kbase_hwcnt_virtualizer_client_destroy() - Destroy a virtualizer client.
+ * @hvcli: Pointer to the hardware counter client.
+ */
+void kbase_hwcnt_virtualizer_client_destroy(
+	struct kbase_hwcnt_virtualizer_client *hvcli);
+
+/**
+ * kbase_hwcnt_virtualizer_client_set_counters - Perform a dump of the client's
+ *                                               currently enabled counters, and
+ *                                               enable a new set of counters
+ *                                               that will be used for
+ *                                               subsequent dumps.
+ * @hvcli:       Non-NULL pointer to the virtualizer client.
+ * @enable_map:  Non-NULL pointer to the new counter enable map for the client.
+ *               Must have the same metadata as the virtualizer.
+ * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
+ *               be written out to on success.
+ * @ts_end_ns:   Non-NULL pointer where the end timestamp of the dump will
+ *               be written out to on success.
+ * @dump_buf:    Pointer to the buffer where the dump will be written out to on
+ *               success. If non-NULL, must have the same metadata as the
+ *               accumulator. If NULL, the dump will be discarded.
+ *
+ * Return: 0 on success or error code.
+ */
+int kbase_hwcnt_virtualizer_client_set_counters(
+	struct kbase_hwcnt_virtualizer_client *hvcli,
+	const struct kbase_hwcnt_enable_map *enable_map,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf);
+
+/**
+ * kbase_hwcnt_virtualizer_client_dump - Perform a dump of the client's
+ *                                       currently enabled counters.
+ * @hvcli:       Non-NULL pointer to the virtualizer client.
+ * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
+ *               be written out to on success.
+ * @ts_end_ns:   Non-NULL pointer where the end timestamp of the dump will
+ *               be written out to on success.
+ * @dump_buf:    Pointer to the buffer where the dump will be written out to on
+ *               success. If non-NULL, must have the same metadata as the
+ *               accumulator. If NULL, the dump will be discarded.
+ *
+ * Return: 0 on success or error code.
+ */
+int kbase_hwcnt_virtualizer_client_dump(
+	struct kbase_hwcnt_virtualizer_client *hvcli,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf);
+
+#endif /* _KBASE_HWCNT_VIRTUALIZER_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_ioctl.h b/drivers/gpu/arm/midgard/mali_kbase_ioctl.h
new file mode 100644
index 0000000..9b138e5
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_ioctl.h
@@ -0,0 +1,938 @@
+/*
+ *
+ * (C) COPYRIGHT 2017-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_IOCTL_H_
+#define _KBASE_IOCTL_H_
+
+#ifdef __cpluscplus
+extern "C" {
+#endif
+
+#include <asm-generic/ioctl.h>
+#include <linux/types.h>
+
+#define KBASE_IOCTL_TYPE 0x80
+
+/*
+ * 11.1:
+ * - Add BASE_MEM_TILER_ALIGN_TOP under base_mem_alloc_flags
+ * 11.2:
+ * - KBASE_MEM_QUERY_FLAGS can return KBASE_REG_PF_GROW and KBASE_REG_PROTECTED,
+ *   which some user-side clients prior to 11.2 might fault if they received
+ *   them
+ * 11.3:
+ * - New ioctls KBASE_IOCTL_STICKY_RESOURCE_MAP and
+ *   KBASE_IOCTL_STICKY_RESOURCE_UNMAP
+ * 11.4:
+ * - New ioctl KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET
+ * 11.5:
+ * - New ioctl: KBASE_IOCTL_MEM_JIT_INIT (old ioctl renamed to _OLD)
+ * 11.6:
+ * - Added flags field to base_jit_alloc_info structure, which can be used to
+ *   specify pseudo chunked tiler alignment for JIT allocations.
+ * 11.7:
+ * - Removed UMP support
+ * 11.8:
+ * - Added BASE_MEM_UNCACHED_GPU under base_mem_alloc_flags
+ * 11.9:
+ * - Added BASE_MEM_PERMANENT_KERNEL_MAPPING and BASE_MEM_FLAGS_KERNEL_ONLY
+ *   under base_mem_alloc_flags
+ * 11.10:
+ * - Enabled the use of nr_extres field of base_jd_atom_v2 structure for
+ *   JIT_ALLOC and JIT_FREE type softjobs to enable multiple JIT allocations
+ *   with one softjob.
+ * 11.11:
+ * - Added BASE_MEM_GPU_VA_SAME_4GB_PAGE under base_mem_alloc_flags
+ * 11.12:
+ * - Removed ioctl: KBASE_IOCTL_GET_PROFILING_CONTROLS
+ * 11.13:
+ * - New ioctl: KBASE_IOCTL_MEM_EXEC_INIT
+ * 11.14:
+ * - Add BASE_MEM_GROUP_ID_MASK, base_mem_group_id_get, base_mem_group_id_set
+ *   under base_mem_alloc_flags
+ * 11.15:
+ * - Added BASEP_CONTEXT_MMU_GROUP_ID_MASK under base_context_create_flags.
+ * - Require KBASE_IOCTL_SET_FLAGS before BASE_MEM_MAP_TRACKING_HANDLE can be
+ *   passed to mmap().
+ * 11.16:
+ * - Extended ioctl KBASE_IOCTL_MEM_SYNC to accept imported dma-buf.
+ * - Modified (backwards compatible) ioctl KBASE_IOCTL_MEM_IMPORT behavior for
+ *   dma-buf. Now, buffers are mapped on GPU when first imported, no longer
+ *   requiring external resource or sticky resource tracking. UNLESS,
+ *   CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND is enabled.
+ * 11.17:
+ * - Added BASE_JD_REQ_JOB_SLOT.
+ * - Reused padding field in base_jd_atom_v2 to pass job slot number.
+ * 11.18:
+ * - New ioctl: KBASE_IOCTL_GET_CPU_GPU_TIMEINFO
+ */
+#define BASE_UK_VERSION_MAJOR 11
+#define BASE_UK_VERSION_MINOR 17
+
+/**
+ * struct kbase_ioctl_version_check - Check version compatibility with kernel
+ *
+ * @major: Major version number
+ * @minor: Minor version number
+ */
+struct kbase_ioctl_version_check {
+	__u16 major;
+	__u16 minor;
+};
+
+#define KBASE_IOCTL_VERSION_CHECK \
+	_IOWR(KBASE_IOCTL_TYPE, 0, struct kbase_ioctl_version_check)
+
+/**
+ * struct kbase_ioctl_set_flags - Set kernel context creation flags
+ *
+ * @create_flags: Flags - see base_context_create_flags
+ */
+struct kbase_ioctl_set_flags {
+	__u32 create_flags;
+};
+
+#define KBASE_IOCTL_SET_FLAGS \
+	_IOW(KBASE_IOCTL_TYPE, 1, struct kbase_ioctl_set_flags)
+
+/**
+ * struct kbase_ioctl_job_submit - Submit jobs/atoms to the kernel
+ *
+ * @addr: Memory address of an array of struct base_jd_atom_v2
+ * @nr_atoms: Number of entries in the array
+ * @stride: sizeof(struct base_jd_atom_v2)
+ */
+struct kbase_ioctl_job_submit {
+	__u64 addr;
+	__u32 nr_atoms;
+	__u32 stride;
+};
+
+#define KBASE_IOCTL_JOB_SUBMIT \
+	_IOW(KBASE_IOCTL_TYPE, 2, struct kbase_ioctl_job_submit)
+
+/**
+ * struct kbase_ioctl_get_gpuprops - Read GPU properties from the kernel
+ *
+ * @buffer: Pointer to the buffer to store properties into
+ * @size: Size of the buffer
+ * @flags: Flags - must be zero for now
+ *
+ * The ioctl will return the number of bytes stored into @buffer or an error
+ * on failure (e.g. @size is too small). If @size is specified as 0 then no
+ * data will be written but the return value will be the number of bytes needed
+ * for all the properties.
+ *
+ * @flags may be used in the future to request a different format for the
+ * buffer. With @flags == 0 the following format is used.
+ *
+ * The buffer will be filled with pairs of values, a u32 key identifying the
+ * property followed by the value. The size of the value is identified using
+ * the bottom bits of the key. The value then immediately followed the key and
+ * is tightly packed (there is no padding). All keys and values are
+ * little-endian.
+ *
+ * 00 = u8
+ * 01 = u16
+ * 10 = u32
+ * 11 = u64
+ */
+struct kbase_ioctl_get_gpuprops {
+	__u64 buffer;
+	__u32 size;
+	__u32 flags;
+};
+
+#define KBASE_IOCTL_GET_GPUPROPS \
+	_IOW(KBASE_IOCTL_TYPE, 3, struct kbase_ioctl_get_gpuprops)
+
+#define KBASE_IOCTL_POST_TERM \
+	_IO(KBASE_IOCTL_TYPE, 4)
+
+/**
+ * union kbase_ioctl_mem_alloc - Allocate memory on the GPU
+ *
+ * @va_pages: The number of pages of virtual address space to reserve
+ * @commit_pages: The number of physical pages to allocate
+ * @extent: The number of extra pages to allocate on each GPU fault which grows
+ *          the region
+ * @flags: Flags
+ * @gpu_va: The GPU virtual address which is allocated
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_mem_alloc {
+	struct {
+		__u64 va_pages;
+		__u64 commit_pages;
+		__u64 extent;
+		__u64 flags;
+	} in;
+	struct {
+		__u64 flags;
+		__u64 gpu_va;
+	} out;
+};
+
+#define KBASE_IOCTL_MEM_ALLOC \
+	_IOWR(KBASE_IOCTL_TYPE, 5, union kbase_ioctl_mem_alloc)
+
+/**
+ * struct kbase_ioctl_mem_query - Query properties of a GPU memory region
+ * @gpu_addr: A GPU address contained within the region
+ * @query: The type of query
+ * @value: The result of the query
+ *
+ * Use a %KBASE_MEM_QUERY_xxx flag as input for @query.
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_mem_query {
+	struct {
+		__u64 gpu_addr;
+		__u64 query;
+	} in;
+	struct {
+		__u64 value;
+	} out;
+};
+
+#define KBASE_IOCTL_MEM_QUERY \
+	_IOWR(KBASE_IOCTL_TYPE, 6, union kbase_ioctl_mem_query)
+
+#define KBASE_MEM_QUERY_COMMIT_SIZE	((u64)1)
+#define KBASE_MEM_QUERY_VA_SIZE		((u64)2)
+#define KBASE_MEM_QUERY_FLAGS		((u64)3)
+
+/**
+ * struct kbase_ioctl_mem_free - Free a memory region
+ * @gpu_addr: Handle to the region to free
+ */
+struct kbase_ioctl_mem_free {
+	__u64 gpu_addr;
+};
+
+#define KBASE_IOCTL_MEM_FREE \
+	_IOW(KBASE_IOCTL_TYPE, 7, struct kbase_ioctl_mem_free)
+
+/**
+ * struct kbase_ioctl_hwcnt_reader_setup - Setup HWC dumper/reader
+ * @buffer_count: requested number of dumping buffers
+ * @jm_bm:        counters selection bitmask (JM)
+ * @shader_bm:    counters selection bitmask (Shader)
+ * @tiler_bm:     counters selection bitmask (Tiler)
+ * @mmu_l2_bm:    counters selection bitmask (MMU_L2)
+ *
+ * A fd is returned from the ioctl if successful, or a negative value on error
+ */
+struct kbase_ioctl_hwcnt_reader_setup {
+	__u32 buffer_count;
+	__u32 jm_bm;
+	__u32 shader_bm;
+	__u32 tiler_bm;
+	__u32 mmu_l2_bm;
+};
+
+#define KBASE_IOCTL_HWCNT_READER_SETUP \
+	_IOW(KBASE_IOCTL_TYPE, 8, struct kbase_ioctl_hwcnt_reader_setup)
+
+/**
+ * struct kbase_ioctl_hwcnt_enable - Enable hardware counter collection
+ * @dump_buffer:  GPU address to write counters to
+ * @jm_bm:        counters selection bitmask (JM)
+ * @shader_bm:    counters selection bitmask (Shader)
+ * @tiler_bm:     counters selection bitmask (Tiler)
+ * @mmu_l2_bm:    counters selection bitmask (MMU_L2)
+ */
+struct kbase_ioctl_hwcnt_enable {
+	__u64 dump_buffer;
+	__u32 jm_bm;
+	__u32 shader_bm;
+	__u32 tiler_bm;
+	__u32 mmu_l2_bm;
+};
+
+#define KBASE_IOCTL_HWCNT_ENABLE \
+	_IOW(KBASE_IOCTL_TYPE, 9, struct kbase_ioctl_hwcnt_enable)
+
+#define KBASE_IOCTL_HWCNT_DUMP \
+	_IO(KBASE_IOCTL_TYPE, 10)
+
+#define KBASE_IOCTL_HWCNT_CLEAR \
+	_IO(KBASE_IOCTL_TYPE, 11)
+
+/**
+ * struct kbase_ioctl_hwcnt_values - Values to set dummy the dummy counters to.
+ * @data:    Counter samples for the dummy model.
+ * @size:    Size of the counter sample data.
+ * @padding: Padding.
+ */
+struct kbase_ioctl_hwcnt_values {
+	__u64 data;
+	__u32 size;
+	__u32 padding;
+};
+
+#define KBASE_IOCTL_HWCNT_SET \
+	_IOW(KBASE_IOCTL_TYPE, 32, struct kbase_ioctl_hwcnt_values)
+
+/**
+ * struct kbase_ioctl_disjoint_query - Query the disjoint counter
+ * @counter:   A counter of disjoint events in the kernel
+ */
+struct kbase_ioctl_disjoint_query {
+	__u32 counter;
+};
+
+#define KBASE_IOCTL_DISJOINT_QUERY \
+	_IOR(KBASE_IOCTL_TYPE, 12, struct kbase_ioctl_disjoint_query)
+
+/**
+ * struct kbase_ioctl_get_ddk_version - Query the kernel version
+ * @version_buffer: Buffer to receive the kernel version string
+ * @size: Size of the buffer
+ * @padding: Padding
+ *
+ * The ioctl will return the number of bytes written into version_buffer
+ * (which includes a NULL byte) or a negative error code
+ *
+ * The ioctl request code has to be _IOW because the data in ioctl struct is
+ * being copied to the kernel, even though the kernel then writes out the
+ * version info to the buffer specified in the ioctl.
+ */
+struct kbase_ioctl_get_ddk_version {
+	__u64 version_buffer;
+	__u32 size;
+	__u32 padding;
+};
+
+#define KBASE_IOCTL_GET_DDK_VERSION \
+	_IOW(KBASE_IOCTL_TYPE, 13, struct kbase_ioctl_get_ddk_version)
+
+/**
+ * struct kbase_ioctl_mem_jit_init_old - Initialise the JIT memory allocator
+ *
+ * @va_pages: Number of VA pages to reserve for JIT
+ *
+ * Note that depending on the VA size of the application and GPU, the value
+ * specified in @va_pages may be ignored.
+ *
+ * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for
+ * backwards compatibility.
+ */
+struct kbase_ioctl_mem_jit_init_old {
+	__u64 va_pages;
+};
+
+#define KBASE_IOCTL_MEM_JIT_INIT_OLD \
+	_IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_old)
+
+/**
+ * struct kbase_ioctl_mem_jit_init - Initialise the JIT memory allocator
+ *
+ * @va_pages: Number of VA pages to reserve for JIT
+ * @max_allocations: Maximum number of concurrent allocations
+ * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%)
+ * @group_id: Group ID to be used for physical allocations
+ * @padding: Currently unused, must be zero
+ *
+ * Note that depending on the VA size of the application and GPU, the value
+ * specified in @va_pages may be ignored.
+ */
+struct kbase_ioctl_mem_jit_init {
+	__u64 va_pages;
+	__u8 max_allocations;
+	__u8 trim_level;
+	__u8 group_id;
+	__u8 padding[5];
+};
+
+#define KBASE_IOCTL_MEM_JIT_INIT \
+	_IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init)
+
+/**
+ * struct kbase_ioctl_mem_sync - Perform cache maintenance on memory
+ *
+ * @handle: GPU memory handle (GPU VA)
+ * @user_addr: The address where it is mapped in user space
+ * @size: The number of bytes to synchronise
+ * @type: The direction to synchronise: 0 is sync to memory (clean),
+ * 1 is sync from memory (invalidate). Use the BASE_SYNCSET_OP_xxx constants.
+ * @padding: Padding to round up to a multiple of 8 bytes, must be zero
+ */
+struct kbase_ioctl_mem_sync {
+	__u64 handle;
+	__u64 user_addr;
+	__u64 size;
+	__u8 type;
+	__u8 padding[7];
+};
+
+#define KBASE_IOCTL_MEM_SYNC \
+	_IOW(KBASE_IOCTL_TYPE, 15, struct kbase_ioctl_mem_sync)
+
+/**
+ * union kbase_ioctl_mem_find_cpu_offset - Find the offset of a CPU pointer
+ *
+ * @gpu_addr: The GPU address of the memory region
+ * @cpu_addr: The CPU address to locate
+ * @size: A size in bytes to validate is contained within the region
+ * @offset: The offset from the start of the memory region to @cpu_addr
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_mem_find_cpu_offset {
+	struct {
+		__u64 gpu_addr;
+		__u64 cpu_addr;
+		__u64 size;
+	} in;
+	struct {
+		__u64 offset;
+	} out;
+};
+
+#define KBASE_IOCTL_MEM_FIND_CPU_OFFSET \
+	_IOWR(KBASE_IOCTL_TYPE, 16, union kbase_ioctl_mem_find_cpu_offset)
+
+/**
+ * struct kbase_ioctl_get_context_id - Get the kernel context ID
+ *
+ * @id: The kernel context ID
+ */
+struct kbase_ioctl_get_context_id {
+	__u32 id;
+};
+
+#define KBASE_IOCTL_GET_CONTEXT_ID \
+	_IOR(KBASE_IOCTL_TYPE, 17, struct kbase_ioctl_get_context_id)
+
+/**
+ * struct kbase_ioctl_tlstream_acquire - Acquire a tlstream fd
+ *
+ * @flags: Flags
+ *
+ * The ioctl returns a file descriptor when successful
+ */
+struct kbase_ioctl_tlstream_acquire {
+	__u32 flags;
+};
+
+#define KBASE_IOCTL_TLSTREAM_ACQUIRE \
+	_IOW(KBASE_IOCTL_TYPE, 18, struct kbase_ioctl_tlstream_acquire)
+
+#define KBASE_IOCTL_TLSTREAM_FLUSH \
+	_IO(KBASE_IOCTL_TYPE, 19)
+
+/**
+ * struct kbase_ioctl_mem_commit - Change the amount of memory backing a region
+ *
+ * @gpu_addr: The memory region to modify
+ * @pages:    The number of physical pages that should be present
+ *
+ * The ioctl may return on the following error codes or 0 for success:
+ *   -ENOMEM: Out of memory
+ *   -EINVAL: Invalid arguments
+ */
+struct kbase_ioctl_mem_commit {
+	__u64 gpu_addr;
+	__u64 pages;
+};
+
+#define KBASE_IOCTL_MEM_COMMIT \
+	_IOW(KBASE_IOCTL_TYPE, 20, struct kbase_ioctl_mem_commit)
+
+/**
+ * union kbase_ioctl_mem_alias - Create an alias of memory regions
+ * @flags: Flags, see BASE_MEM_xxx
+ * @stride: Bytes between start of each memory region
+ * @nents: The number of regions to pack together into the alias
+ * @aliasing_info: Pointer to an array of struct base_mem_aliasing_info
+ * @gpu_va: Address of the new alias
+ * @va_pages: Size of the new alias
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_mem_alias {
+	struct {
+		__u64 flags;
+		__u64 stride;
+		__u64 nents;
+		__u64 aliasing_info;
+	} in;
+	struct {
+		__u64 flags;
+		__u64 gpu_va;
+		__u64 va_pages;
+	} out;
+};
+
+#define KBASE_IOCTL_MEM_ALIAS \
+	_IOWR(KBASE_IOCTL_TYPE, 21, union kbase_ioctl_mem_alias)
+
+/**
+ * union kbase_ioctl_mem_import - Import memory for use by the GPU
+ * @flags: Flags, see BASE_MEM_xxx
+ * @phandle: Handle to the external memory
+ * @type: Type of external memory, see base_mem_import_type
+ * @padding: Amount of extra VA pages to append to the imported buffer
+ * @gpu_va: Address of the new alias
+ * @va_pages: Size of the new alias
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_mem_import {
+	struct {
+		__u64 flags;
+		__u64 phandle;
+		__u32 type;
+		__u32 padding;
+	} in;
+	struct {
+		__u64 flags;
+		__u64 gpu_va;
+		__u64 va_pages;
+	} out;
+};
+
+#define KBASE_IOCTL_MEM_IMPORT \
+	_IOWR(KBASE_IOCTL_TYPE, 22, union kbase_ioctl_mem_import)
+
+/**
+ * struct kbase_ioctl_mem_flags_change - Change the flags for a memory region
+ * @gpu_va: The GPU region to modify
+ * @flags: The new flags to set
+ * @mask: Mask of the flags to modify
+ */
+struct kbase_ioctl_mem_flags_change {
+	__u64 gpu_va;
+	__u64 flags;
+	__u64 mask;
+};
+
+#define KBASE_IOCTL_MEM_FLAGS_CHANGE \
+	_IOW(KBASE_IOCTL_TYPE, 23, struct kbase_ioctl_mem_flags_change)
+
+/**
+ * struct kbase_ioctl_stream_create - Create a synchronisation stream
+ * @name: A name to identify this stream. Must be NULL-terminated.
+ *
+ * Note that this is also called a "timeline", but is named stream to avoid
+ * confusion with other uses of the word.
+ *
+ * Unused bytes in @name (after the first NULL byte) must be also be NULL bytes.
+ *
+ * The ioctl returns a file descriptor.
+ */
+struct kbase_ioctl_stream_create {
+	char name[32];
+};
+
+#define KBASE_IOCTL_STREAM_CREATE \
+	_IOW(KBASE_IOCTL_TYPE, 24, struct kbase_ioctl_stream_create)
+
+/**
+ * struct kbase_ioctl_fence_validate - Validate a fd refers to a fence
+ * @fd: The file descriptor to validate
+ */
+struct kbase_ioctl_fence_validate {
+	int fd;
+};
+
+#define KBASE_IOCTL_FENCE_VALIDATE \
+	_IOW(KBASE_IOCTL_TYPE, 25, struct kbase_ioctl_fence_validate)
+
+/**
+ * struct kbase_ioctl_mem_profile_add - Provide profiling information to kernel
+ * @buffer: Pointer to the information
+ * @len: Length
+ * @padding: Padding
+ *
+ * The data provided is accessible through a debugfs file
+ */
+struct kbase_ioctl_mem_profile_add {
+	__u64 buffer;
+	__u32 len;
+	__u32 padding;
+};
+
+#define KBASE_IOCTL_MEM_PROFILE_ADD \
+	_IOW(KBASE_IOCTL_TYPE, 27, struct kbase_ioctl_mem_profile_add)
+
+/**
+ * struct kbase_ioctl_soft_event_update - Update the status of a soft-event
+ * @event: GPU address of the event which has been updated
+ * @new_status: The new status to set
+ * @flags: Flags for future expansion
+ */
+struct kbase_ioctl_soft_event_update {
+	__u64 event;
+	__u32 new_status;
+	__u32 flags;
+};
+
+#define KBASE_IOCTL_SOFT_EVENT_UPDATE \
+	_IOW(KBASE_IOCTL_TYPE, 28, struct kbase_ioctl_soft_event_update)
+
+/**
+ * struct kbase_ioctl_sticky_resource_map - Permanently map an external resource
+ * @count: Number of resources
+ * @address: Array of u64 GPU addresses of the external resources to map
+ */
+struct kbase_ioctl_sticky_resource_map {
+	__u64 count;
+	__u64 address;
+};
+
+#define KBASE_IOCTL_STICKY_RESOURCE_MAP \
+	_IOW(KBASE_IOCTL_TYPE, 29, struct kbase_ioctl_sticky_resource_map)
+
+/**
+ * struct kbase_ioctl_sticky_resource_map - Unmap a resource mapped which was
+ *                                          previously permanently mapped
+ * @count: Number of resources
+ * @address: Array of u64 GPU addresses of the external resources to unmap
+ */
+struct kbase_ioctl_sticky_resource_unmap {
+	__u64 count;
+	__u64 address;
+};
+
+#define KBASE_IOCTL_STICKY_RESOURCE_UNMAP \
+	_IOW(KBASE_IOCTL_TYPE, 30, struct kbase_ioctl_sticky_resource_unmap)
+
+/**
+ * union kbase_ioctl_mem_find_gpu_start_and_offset - Find the start address of
+ *                                                   the GPU memory region for
+ *                                                   the given gpu address and
+ *                                                   the offset of that address
+ *                                                   into the region
+ *
+ * @gpu_addr: GPU virtual address
+ * @size: Size in bytes within the region
+ * @start: Address of the beginning of the memory region enclosing @gpu_addr
+ *         for the length of @offset bytes
+ * @offset: The offset from the start of the memory region to @gpu_addr
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_mem_find_gpu_start_and_offset {
+	struct {
+		__u64 gpu_addr;
+		__u64 size;
+	} in;
+	struct {
+		__u64 start;
+		__u64 offset;
+	} out;
+};
+
+#define KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET \
+	_IOWR(KBASE_IOCTL_TYPE, 31, union kbase_ioctl_mem_find_gpu_start_and_offset)
+
+
+#define KBASE_IOCTL_CINSTR_GWT_START \
+	_IO(KBASE_IOCTL_TYPE, 33)
+
+#define KBASE_IOCTL_CINSTR_GWT_STOP \
+	_IO(KBASE_IOCTL_TYPE, 34)
+
+/**
+ * union kbase_ioctl_gwt_dump - Used to collect all GPU write fault addresses.
+ * @addr_buffer: Address of buffer to hold addresses of gpu modified areas.
+ * @size_buffer: Address of buffer to hold size of modified areas (in pages)
+ * @len: Number of addresses the buffers can hold.
+ * @more_data_available: Status indicating if more addresses are available.
+ * @no_of_addr_collected: Number of addresses collected into addr_buffer.
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ *
+ * This structure is used when performing a call to dump GPU write fault
+ * addresses.
+ */
+union kbase_ioctl_cinstr_gwt_dump {
+	struct {
+		__u64 addr_buffer;
+		__u64 size_buffer;
+		__u32 len;
+		__u32 padding;
+
+	} in;
+	struct {
+		__u32 no_of_addr_collected;
+		__u8 more_data_available;
+		__u8 padding[27];
+	} out;
+};
+
+#define KBASE_IOCTL_CINSTR_GWT_DUMP \
+	_IOWR(KBASE_IOCTL_TYPE, 35, union kbase_ioctl_cinstr_gwt_dump)
+
+
+/**
+ * struct kbase_ioctl_mem_exec_init - Initialise the EXEC_VA memory zone
+ *
+ * @va_pages: Number of VA pages to reserve for EXEC_VA
+ */
+struct kbase_ioctl_mem_exec_init {
+	__u64 va_pages;
+};
+
+#define KBASE_IOCTL_MEM_EXEC_INIT \
+	_IOW(KBASE_IOCTL_TYPE, 38, struct kbase_ioctl_mem_exec_init)
+
+
+/**
+ * union kbase_ioctl_get_cpu_gpu_timeinfo - Request zero or more types of
+ *                                          cpu/gpu time (counter values)
+ *
+ * @request_flags: Bit-flags indicating the requested types.
+ * @paddings:      Unused, size alignment matching the out.
+ * @sec:           Integer field of the monotonic time, unit in seconds.
+ * @nsec:          Fractional sec of the monotonic time, in nano-seconds.
+ * @padding:       Unused, for u64 alignment
+ * @timestamp:     System wide timestamp (counter) value.
+ * @cycle_counter: GPU cycle counter value.
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ *
+ */
+union kbase_ioctl_get_cpu_gpu_timeinfo {
+	struct {
+		__u32 request_flags;
+		__u32 paddings[7];
+	} in;
+	struct {
+		__u64 sec;
+		__u32 nsec;
+		__u32 padding;
+		__u64 timestamp;
+		__u64 cycle_counter;
+	} out;
+};
+
+#define KBASE_IOCTL_GET_CPU_GPU_TIMEINFO \
+	_IOWR(KBASE_IOCTL_TYPE, 50, union kbase_ioctl_get_cpu_gpu_timeinfo)
+
+/***************
+ * test ioctls *
+ ***************/
+#if MALI_UNIT_TEST
+/* These ioctls are purely for test purposes and are not used in the production
+ * driver, they therefore may change without notice
+ */
+
+#define KBASE_IOCTL_TEST_TYPE (KBASE_IOCTL_TYPE + 1)
+
+/**
+ * struct kbase_ioctl_tlstream_test - Start a timeline stream test
+ *
+ * @tpw_count: number of trace point writers in each context
+ * @msg_delay: time delay between tracepoints from one writer in milliseconds
+ * @msg_count: number of trace points written by one writer
+ * @aux_msg:   if non-zero aux messages will be included
+ */
+struct kbase_ioctl_tlstream_test {
+	__u32 tpw_count;
+	__u32 msg_delay;
+	__u32 msg_count;
+	__u32 aux_msg;
+};
+
+#define KBASE_IOCTL_TLSTREAM_TEST \
+	_IOW(KBASE_IOCTL_TEST_TYPE, 1, struct kbase_ioctl_tlstream_test)
+
+/**
+ * struct kbase_ioctl_tlstream_stats - Read tlstream stats for test purposes
+ * @bytes_collected: number of bytes read by user
+ * @bytes_generated: number of bytes generated by tracepoints
+ */
+struct kbase_ioctl_tlstream_stats {
+	__u32 bytes_collected;
+	__u32 bytes_generated;
+};
+
+#define KBASE_IOCTL_TLSTREAM_STATS \
+	_IOR(KBASE_IOCTL_TEST_TYPE, 2, struct kbase_ioctl_tlstream_stats)
+
+/**
+ * struct kbase_ioctl_cs_event_memory_write - Write an event memory address
+ * @cpu_addr: Memory address to write
+ * @value: Value to write
+ * @padding: Currently unused, must be zero
+ */
+struct kbase_ioctl_cs_event_memory_write {
+	__u64 cpu_addr;
+	__u8 value;
+	__u8 padding[7];
+};
+
+/**
+ * union kbase_ioctl_cs_event_memory_read - Read an event memory address
+ * @cpu_addr: Memory address to read
+ * @value: Value read
+ * @padding: Currently unused, must be zero
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_cs_event_memory_read {
+	struct {
+		__u64 cpu_addr;
+	} in;
+	struct {
+		__u8 value;
+		__u8 padding[7];
+	} out;
+};
+
+#endif
+
+/* Customer extension range */
+#define KBASE_IOCTL_EXTRA_TYPE (KBASE_IOCTL_TYPE + 2)
+
+/* If the integration needs extra ioctl add them there
+ * like this:
+ *
+ * struct my_ioctl_args {
+ *  ....
+ * }
+ *
+ * #define KBASE_IOCTL_MY_IOCTL \
+ *         _IOWR(KBASE_IOCTL_EXTRA_TYPE, 0, struct my_ioctl_args)
+ */
+
+
+/**********************************
+ * Definitions for GPU properties *
+ **********************************/
+#define KBASE_GPUPROP_VALUE_SIZE_U8	(0x0)
+#define KBASE_GPUPROP_VALUE_SIZE_U16	(0x1)
+#define KBASE_GPUPROP_VALUE_SIZE_U32	(0x2)
+#define KBASE_GPUPROP_VALUE_SIZE_U64	(0x3)
+
+#define KBASE_GPUPROP_PRODUCT_ID			1
+#define KBASE_GPUPROP_VERSION_STATUS			2
+#define KBASE_GPUPROP_MINOR_REVISION			3
+#define KBASE_GPUPROP_MAJOR_REVISION			4
+/* 5 previously used for GPU speed */
+#define KBASE_GPUPROP_GPU_FREQ_KHZ_MAX			6
+/* 7 previously used for minimum GPU speed */
+#define KBASE_GPUPROP_LOG2_PROGRAM_COUNTER_SIZE		8
+#define KBASE_GPUPROP_TEXTURE_FEATURES_0		9
+#define KBASE_GPUPROP_TEXTURE_FEATURES_1		10
+#define KBASE_GPUPROP_TEXTURE_FEATURES_2		11
+#define KBASE_GPUPROP_GPU_AVAILABLE_MEMORY_SIZE		12
+
+#define KBASE_GPUPROP_L2_LOG2_LINE_SIZE			13
+#define KBASE_GPUPROP_L2_LOG2_CACHE_SIZE		14
+#define KBASE_GPUPROP_L2_NUM_L2_SLICES			15
+
+#define KBASE_GPUPROP_TILER_BIN_SIZE_BYTES		16
+#define KBASE_GPUPROP_TILER_MAX_ACTIVE_LEVELS		17
+
+#define KBASE_GPUPROP_MAX_THREADS			18
+#define KBASE_GPUPROP_MAX_WORKGROUP_SIZE		19
+#define KBASE_GPUPROP_MAX_BARRIER_SIZE			20
+#define KBASE_GPUPROP_MAX_REGISTERS			21
+#define KBASE_GPUPROP_MAX_TASK_QUEUE			22
+#define KBASE_GPUPROP_MAX_THREAD_GROUP_SPLIT		23
+#define KBASE_GPUPROP_IMPL_TECH				24
+
+#define KBASE_GPUPROP_RAW_SHADER_PRESENT		25
+#define KBASE_GPUPROP_RAW_TILER_PRESENT			26
+#define KBASE_GPUPROP_RAW_L2_PRESENT			27
+#define KBASE_GPUPROP_RAW_STACK_PRESENT			28
+#define KBASE_GPUPROP_RAW_L2_FEATURES			29
+#define KBASE_GPUPROP_RAW_CORE_FEATURES			30
+#define KBASE_GPUPROP_RAW_MEM_FEATURES			31
+#define KBASE_GPUPROP_RAW_MMU_FEATURES			32
+#define KBASE_GPUPROP_RAW_AS_PRESENT			33
+#define KBASE_GPUPROP_RAW_JS_PRESENT			34
+#define KBASE_GPUPROP_RAW_JS_FEATURES_0			35
+#define KBASE_GPUPROP_RAW_JS_FEATURES_1			36
+#define KBASE_GPUPROP_RAW_JS_FEATURES_2			37
+#define KBASE_GPUPROP_RAW_JS_FEATURES_3			38
+#define KBASE_GPUPROP_RAW_JS_FEATURES_4			39
+#define KBASE_GPUPROP_RAW_JS_FEATURES_5			40
+#define KBASE_GPUPROP_RAW_JS_FEATURES_6			41
+#define KBASE_GPUPROP_RAW_JS_FEATURES_7			42
+#define KBASE_GPUPROP_RAW_JS_FEATURES_8			43
+#define KBASE_GPUPROP_RAW_JS_FEATURES_9			44
+#define KBASE_GPUPROP_RAW_JS_FEATURES_10		45
+#define KBASE_GPUPROP_RAW_JS_FEATURES_11		46
+#define KBASE_GPUPROP_RAW_JS_FEATURES_12		47
+#define KBASE_GPUPROP_RAW_JS_FEATURES_13		48
+#define KBASE_GPUPROP_RAW_JS_FEATURES_14		49
+#define KBASE_GPUPROP_RAW_JS_FEATURES_15		50
+#define KBASE_GPUPROP_RAW_TILER_FEATURES		51
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_0		52
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_1		53
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_2		54
+#define KBASE_GPUPROP_RAW_GPU_ID			55
+#define KBASE_GPUPROP_RAW_THREAD_MAX_THREADS		56
+#define KBASE_GPUPROP_RAW_THREAD_MAX_WORKGROUP_SIZE	57
+#define KBASE_GPUPROP_RAW_THREAD_MAX_BARRIER_SIZE	58
+#define KBASE_GPUPROP_RAW_THREAD_FEATURES		59
+#define KBASE_GPUPROP_RAW_COHERENCY_MODE		60
+
+#define KBASE_GPUPROP_COHERENCY_NUM_GROUPS		61
+#define KBASE_GPUPROP_COHERENCY_NUM_CORE_GROUPS		62
+#define KBASE_GPUPROP_COHERENCY_COHERENCY		63
+#define KBASE_GPUPROP_COHERENCY_GROUP_0			64
+#define KBASE_GPUPROP_COHERENCY_GROUP_1			65
+#define KBASE_GPUPROP_COHERENCY_GROUP_2			66
+#define KBASE_GPUPROP_COHERENCY_GROUP_3			67
+#define KBASE_GPUPROP_COHERENCY_GROUP_4			68
+#define KBASE_GPUPROP_COHERENCY_GROUP_5			69
+#define KBASE_GPUPROP_COHERENCY_GROUP_6			70
+#define KBASE_GPUPROP_COHERENCY_GROUP_7			71
+#define KBASE_GPUPROP_COHERENCY_GROUP_8			72
+#define KBASE_GPUPROP_COHERENCY_GROUP_9			73
+#define KBASE_GPUPROP_COHERENCY_GROUP_10		74
+#define KBASE_GPUPROP_COHERENCY_GROUP_11		75
+#define KBASE_GPUPROP_COHERENCY_GROUP_12		76
+#define KBASE_GPUPROP_COHERENCY_GROUP_13		77
+#define KBASE_GPUPROP_COHERENCY_GROUP_14		78
+#define KBASE_GPUPROP_COHERENCY_GROUP_15		79
+
+#define KBASE_GPUPROP_TEXTURE_FEATURES_3		80
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_3		81
+
+#define KBASE_GPUPROP_NUM_EXEC_ENGINES                  82
+
+#define KBASE_GPUPROP_RAW_THREAD_TLS_ALLOC		83
+#define KBASE_GPUPROP_TLS_ALLOC				84
+
+#ifdef __cpluscplus
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_jd.c b/drivers/gpu/arm/midgard/mali_kbase_jd.c
new file mode 100644
index 0000000..02d5976
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_jd.c
@@ -0,0 +1,1543 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#include <linux/dma-buf.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+#include <mali_kbase.h>
+#include <linux/random.h>
+#include <linux/version.h>
+#include <linux/ratelimit.h>
+
+#include <mali_kbase_jm.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_tracepoints.h>
+
+#include "mali_kbase_dma_fence.h"
+
+#define beenthere(kctx, f, a...)  dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
+/* random32 was renamed to prandom_u32 in 3.8 */
+#define prandom_u32 random32
+#endif
+
+/* Return whether katom will run on the GPU or not. Currently only soft jobs and
+ * dependency-only atoms do not run on the GPU */
+#define IS_GPU_ATOM(katom) (!((katom->core_req & BASE_JD_REQ_SOFT_JOB) ||  \
+			((katom->core_req & BASE_JD_REQ_ATOM_TYPE) ==    \
+							BASE_JD_REQ_DEP)))
+/*
+ * This is the kernel side of the API. Only entry points are:
+ * - kbase_jd_submit(): Called from userspace to submit a single bag
+ * - kbase_jd_done(): Called from interrupt context to track the
+ *   completion of a job.
+ * Callouts:
+ * - to the job manager (enqueue a job)
+ * - to the event subsystem (signals the completion/failure of bag/job-chains).
+ */
+
+static void __user *
+get_compat_pointer(struct kbase_context *kctx, const u64 p)
+{
+#ifdef CONFIG_COMPAT
+	if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+		return compat_ptr(p);
+#endif
+	return u64_to_user_ptr(p);
+}
+
+/* Runs an atom, either by handing to the JS or by immediately running it in the case of soft-jobs
+ *
+ * Returns whether the JS needs a reschedule.
+ *
+ * Note that the caller must also check the atom status and
+ * if it is KBASE_JD_ATOM_STATE_COMPLETED must call jd_done_nolock
+ */
+static int jd_run_atom(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+
+	KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED);
+
+	if ((katom->core_req & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP) {
+		/* Dependency only atom */
+		katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+		return 0;
+	} else if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
+		/* Soft-job */
+		if (katom->will_fail_event_code) {
+			kbase_finish_soft_job(katom);
+			katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+			return 0;
+		}
+		if (kbase_process_soft_job(katom) == 0) {
+			kbase_finish_soft_job(katom);
+			katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+		}
+		return 0;
+	}
+
+	katom->status = KBASE_JD_ATOM_STATE_IN_JS;
+	/* Queue an action about whether we should try scheduling a context */
+	return kbasep_js_add_job(kctx, katom);
+}
+
+#if defined(CONFIG_MALI_DMA_FENCE)
+void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom)
+{
+	struct kbase_device *kbdev;
+
+	KBASE_DEBUG_ASSERT(katom);
+	kbdev = katom->kctx->kbdev;
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	/* Check whether the atom's other dependencies were already met. If
+	 * katom is a GPU atom then the job scheduler may be able to represent
+	 * the dependencies, hence we may attempt to submit it before they are
+	 * met. Other atoms must have had both dependencies resolved.
+	 */
+	if (IS_GPU_ATOM(katom) ||
+			(!kbase_jd_katom_dep_atom(&katom->dep[0]) &&
+			!kbase_jd_katom_dep_atom(&katom->dep[1]))) {
+		/* katom dep complete, attempt to run it */
+		bool resched = false;
+
+		resched = jd_run_atom(katom);
+
+		if (katom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
+			/* The atom has already finished */
+			resched |= jd_done_nolock(katom, NULL);
+		}
+
+		if (resched)
+			kbase_js_sched_all(kbdev);
+	}
+}
+#endif
+
+void kbase_jd_free_external_resources(struct kbase_jd_atom *katom)
+{
+#ifdef CONFIG_MALI_DMA_FENCE
+	/* Flush dma-fence workqueue to ensure that any callbacks that may have
+	 * been queued are done before continuing.
+	 * Any successfully completed atom would have had all it's callbacks
+	 * completed before the atom was run, so only flush for failed atoms.
+	 */
+	if (katom->event_code != BASE_JD_EVENT_DONE)
+		flush_workqueue(katom->kctx->dma_fence.wq);
+#endif /* CONFIG_MALI_DMA_FENCE */
+}
+
+static void kbase_jd_post_external_resources(struct kbase_jd_atom *katom)
+{
+	KBASE_DEBUG_ASSERT(katom);
+	KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES);
+
+#ifdef CONFIG_MALI_DMA_FENCE
+	kbase_dma_fence_signal(katom);
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+	kbase_gpu_vm_lock(katom->kctx);
+	/* only roll back if extres is non-NULL */
+	if (katom->extres) {
+		u32 res_no;
+
+		res_no = katom->nr_extres;
+		while (res_no-- > 0) {
+			struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
+			struct kbase_va_region *reg;
+
+			reg = kbase_region_tracker_find_region_base_address(
+					katom->kctx,
+					katom->extres[res_no].gpu_address);
+			kbase_unmap_external_resource(katom->kctx, reg, alloc);
+		}
+		kfree(katom->extres);
+		katom->extres = NULL;
+	}
+	kbase_gpu_vm_unlock(katom->kctx);
+}
+
+/*
+ * Set up external resources needed by this job.
+ *
+ * jctx.lock must be held when this is called.
+ */
+
+static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const struct base_jd_atom_v2 *user_atom)
+{
+	int err_ret_val = -EINVAL;
+	u32 res_no;
+#ifdef CONFIG_MALI_DMA_FENCE
+	struct kbase_dma_fence_resv_info info = {
+		.resv_objs = NULL,
+		.dma_fence_resv_count = 0,
+		.dma_fence_excl_bitmap = NULL
+	};
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+	/*
+	 * When both dma-buf fence and Android native sync is enabled, we
+	 * disable dma-buf fence for contexts that are using Android native
+	 * fences.
+	 */
+	const bool implicit_sync = !kbase_ctx_flag(katom->kctx,
+						   KCTX_NO_IMPLICIT_SYNC);
+#else /* CONFIG_SYNC || CONFIG_SYNC_FILE*/
+	const bool implicit_sync = true;
+#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
+#endif /* CONFIG_MALI_DMA_FENCE */
+	struct base_external_resource *input_extres;
+
+	KBASE_DEBUG_ASSERT(katom);
+	KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES);
+
+	/* no resources encoded, early out */
+	if (!katom->nr_extres)
+		return -EINVAL;
+
+	katom->extres = kmalloc_array(katom->nr_extres, sizeof(*katom->extres), GFP_KERNEL);
+	if (!katom->extres)
+		return -ENOMEM;
+
+	/* copy user buffer to the end of our real buffer.
+	 * Make sure the struct sizes haven't changed in a way
+	 * we don't support */
+	BUILD_BUG_ON(sizeof(*input_extres) > sizeof(*katom->extres));
+	input_extres = (struct base_external_resource *)
+			(((unsigned char *)katom->extres) +
+			(sizeof(*katom->extres) - sizeof(*input_extres)) *
+			katom->nr_extres);
+
+	if (copy_from_user(input_extres,
+			get_compat_pointer(katom->kctx, user_atom->extres_list),
+			sizeof(*input_extres) * katom->nr_extres) != 0) {
+		err_ret_val = -EINVAL;
+		goto early_err_out;
+	}
+
+#ifdef CONFIG_MALI_DMA_FENCE
+	if (implicit_sync) {
+		info.resv_objs = kmalloc_array(katom->nr_extres,
+					sizeof(struct reservation_object *),
+					GFP_KERNEL);
+		if (!info.resv_objs) {
+			err_ret_val = -ENOMEM;
+			goto early_err_out;
+		}
+
+		info.dma_fence_excl_bitmap =
+				kcalloc(BITS_TO_LONGS(katom->nr_extres),
+					sizeof(unsigned long), GFP_KERNEL);
+		if (!info.dma_fence_excl_bitmap) {
+			err_ret_val = -ENOMEM;
+			goto early_err_out;
+		}
+	}
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+	/* Take the processes mmap lock */
+	down_read(&current->mm->mmap_sem);
+
+	/* need to keep the GPU VM locked while we set up UMM buffers */
+	kbase_gpu_vm_lock(katom->kctx);
+	for (res_no = 0; res_no < katom->nr_extres; res_no++) {
+		struct base_external_resource *res = &input_extres[res_no];
+		struct kbase_va_region *reg;
+		struct kbase_mem_phy_alloc *alloc;
+#ifdef CONFIG_MALI_DMA_FENCE
+		bool exclusive;
+		exclusive = (res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE)
+				? true : false;
+#endif
+		reg = kbase_region_tracker_find_region_enclosing_address(
+				katom->kctx,
+				res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
+		/* did we find a matching region object? */
+		if (kbase_is_region_invalid_or_free(reg)) {
+			/* roll back */
+			goto failed_loop;
+		}
+
+		if (!(katom->core_req & BASE_JD_REQ_SOFT_JOB) &&
+				(reg->flags & KBASE_REG_PROTECTED)) {
+			katom->atom_flags |= KBASE_KATOM_FLAG_PROTECTED;
+		}
+
+		alloc = kbase_map_external_resource(katom->kctx, reg,
+				current->mm);
+		if (!alloc) {
+			err_ret_val = -EINVAL;
+			goto failed_loop;
+		}
+
+#ifdef CONFIG_MALI_DMA_FENCE
+		if (implicit_sync &&
+		    reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
+			struct reservation_object *resv;
+
+			resv = reg->gpu_alloc->imported.umm.dma_buf->resv;
+			if (resv)
+				kbase_dma_fence_add_reservation(resv, &info,
+								exclusive);
+		}
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+		/* finish with updating out array with the data we found */
+		/* NOTE: It is important that this is the last thing we do (or
+		 * at least not before the first write) as we overwrite elements
+		 * as we loop and could be overwriting ourself, so no writes
+		 * until the last read for an element.
+		 * */
+		katom->extres[res_no].gpu_address = reg->start_pfn << PAGE_SHIFT; /* save the start_pfn (as an address, not pfn) to use fast lookup later */
+		katom->extres[res_no].alloc = alloc;
+	}
+	/* successfully parsed the extres array */
+	/* drop the vm lock now */
+	kbase_gpu_vm_unlock(katom->kctx);
+
+	/* Release the processes mmap lock */
+	up_read(&current->mm->mmap_sem);
+
+#ifdef CONFIG_MALI_DMA_FENCE
+	if (implicit_sync) {
+		if (info.dma_fence_resv_count) {
+			int ret;
+
+			ret = kbase_dma_fence_wait(katom, &info);
+			if (ret < 0)
+				goto failed_dma_fence_setup;
+		}
+
+		kfree(info.resv_objs);
+		kfree(info.dma_fence_excl_bitmap);
+	}
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+	/* all done OK */
+	return 0;
+
+/* error handling section */
+
+#ifdef CONFIG_MALI_DMA_FENCE
+failed_dma_fence_setup:
+	/* Lock the processes mmap lock */
+	down_read(&current->mm->mmap_sem);
+
+	/* lock before we unmap */
+	kbase_gpu_vm_lock(katom->kctx);
+#endif
+
+ failed_loop:
+	/* undo the loop work */
+	while (res_no-- > 0) {
+		struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
+
+		kbase_unmap_external_resource(katom->kctx, NULL, alloc);
+	}
+	kbase_gpu_vm_unlock(katom->kctx);
+
+	/* Release the processes mmap lock */
+	up_read(&current->mm->mmap_sem);
+
+ early_err_out:
+	kfree(katom->extres);
+	katom->extres = NULL;
+#ifdef CONFIG_MALI_DMA_FENCE
+	if (implicit_sync) {
+		kfree(info.resv_objs);
+		kfree(info.dma_fence_excl_bitmap);
+	}
+#endif
+	return err_ret_val;
+}
+
+static inline void jd_resolve_dep(struct list_head *out_list,
+					struct kbase_jd_atom *katom,
+					u8 d, bool ctx_is_dying)
+{
+	u8 other_d = !d;
+
+	while (!list_empty(&katom->dep_head[d])) {
+		struct kbase_jd_atom *dep_atom;
+		struct kbase_jd_atom *other_dep_atom;
+		u8 dep_type;
+
+		dep_atom = list_entry(katom->dep_head[d].next,
+				struct kbase_jd_atom, dep_item[d]);
+		list_del(katom->dep_head[d].next);
+
+		dep_type = kbase_jd_katom_dep_type(&dep_atom->dep[d]);
+		kbase_jd_katom_dep_clear(&dep_atom->dep[d]);
+
+		if (katom->event_code != BASE_JD_EVENT_DONE &&
+			(dep_type != BASE_JD_DEP_TYPE_ORDER)) {
+#ifdef CONFIG_MALI_DMA_FENCE
+			kbase_dma_fence_cancel_callbacks(dep_atom);
+#endif
+
+			dep_atom->event_code = katom->event_code;
+			KBASE_DEBUG_ASSERT(dep_atom->status !=
+						KBASE_JD_ATOM_STATE_UNUSED);
+
+			dep_atom->will_fail_event_code = dep_atom->event_code;
+		}
+		other_dep_atom = (struct kbase_jd_atom *)
+			kbase_jd_katom_dep_atom(&dep_atom->dep[other_d]);
+
+		if (!dep_atom->in_jd_list && (!other_dep_atom ||
+				(IS_GPU_ATOM(dep_atom) && !ctx_is_dying &&
+				!dep_atom->will_fail_event_code &&
+				!other_dep_atom->will_fail_event_code))) {
+			bool dep_satisfied = true;
+#ifdef CONFIG_MALI_DMA_FENCE
+			int dep_count;
+
+			dep_count = kbase_fence_dep_count_read(dep_atom);
+			if (likely(dep_count == -1)) {
+				dep_satisfied = true;
+			} else {
+				/*
+				 * There are either still active callbacks, or
+				 * all fences for this @dep_atom has signaled,
+				 * but the worker that will queue the atom has
+				 * not yet run.
+				 *
+				 * Wait for the fences to signal and the fence
+				 * worker to run and handle @dep_atom. If
+				 * @dep_atom was completed due to error on
+				 * @katom, then the fence worker will pick up
+				 * the complete status and error code set on
+				 * @dep_atom above.
+				 */
+				dep_satisfied = false;
+			}
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+			if (dep_satisfied) {
+				dep_atom->in_jd_list = true;
+				list_add_tail(&dep_atom->jd_item, out_list);
+			}
+		}
+	}
+}
+
+KBASE_EXPORT_TEST_API(jd_resolve_dep);
+
+/**
+ * is_dep_valid - Validate that a dependency is valid for early dependency
+ *                submission
+ * @katom: Dependency atom to validate
+ *
+ * A dependency is valid if any of the following are true :
+ * - It does not exist (a non-existent dependency does not block submission)
+ * - It is in the job scheduler
+ * - It has completed, does not have a failure event code, and has not been
+ *   marked to fail in the future
+ *
+ * Return: true if valid, false otherwise
+ */
+static bool is_dep_valid(struct kbase_jd_atom *katom)
+{
+	/* If there's no dependency then this is 'valid' from the perspective of
+	 * early dependency submission */
+	if (!katom)
+		return true;
+
+	/* Dependency must have reached the job scheduler */
+	if (katom->status < KBASE_JD_ATOM_STATE_IN_JS)
+		return false;
+
+	/* If dependency has completed and has failed or will fail then it is
+	 * not valid */
+	if (katom->status >= KBASE_JD_ATOM_STATE_HW_COMPLETED &&
+			(katom->event_code != BASE_JD_EVENT_DONE ||
+			katom->will_fail_event_code))
+		return false;
+
+	return true;
+}
+
+static void jd_try_submitting_deps(struct list_head *out_list,
+		struct kbase_jd_atom *node)
+{
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		struct list_head *pos;
+
+		list_for_each(pos, &node->dep_head[i]) {
+			struct kbase_jd_atom *dep_atom = list_entry(pos,
+					struct kbase_jd_atom, dep_item[i]);
+
+			if (IS_GPU_ATOM(dep_atom) && !dep_atom->in_jd_list) {
+				/*Check if atom deps look sane*/
+				bool dep0_valid = is_dep_valid(
+						dep_atom->dep[0].atom);
+				bool dep1_valid = is_dep_valid(
+						dep_atom->dep[1].atom);
+				bool dep_satisfied = true;
+#ifdef CONFIG_MALI_DMA_FENCE
+				int dep_count;
+
+				dep_count = kbase_fence_dep_count_read(
+								dep_atom);
+				if (likely(dep_count == -1)) {
+					dep_satisfied = true;
+				} else {
+				/*
+				 * There are either still active callbacks, or
+				 * all fences for this @dep_atom has signaled,
+				 * but the worker that will queue the atom has
+				 * not yet run.
+				 *
+				 * Wait for the fences to signal and the fence
+				 * worker to run and handle @dep_atom. If
+				 * @dep_atom was completed due to error on
+				 * @katom, then the fence worker will pick up
+				 * the complete status and error code set on
+				 * @dep_atom above.
+				 */
+					dep_satisfied = false;
+				}
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+				if (dep0_valid && dep1_valid && dep_satisfied) {
+					dep_atom->in_jd_list = true;
+					list_add(&dep_atom->jd_item, out_list);
+				}
+			}
+		}
+	}
+}
+
+/*
+ * Perform the necessary handling of an atom that has finished running
+ * on the GPU.
+ *
+ * Note that if this is a soft-job that has had kbase_prepare_soft_job called on it then the caller
+ * is responsible for calling kbase_finish_soft_job *before* calling this function.
+ *
+ * The caller must hold the kbase_jd_context.lock.
+ */
+bool jd_done_nolock(struct kbase_jd_atom *katom,
+		struct list_head *completed_jobs_ctx)
+{
+	struct kbase_context *kctx = katom->kctx;
+	struct kbase_device *kbdev = kctx->kbdev;
+	struct list_head completed_jobs;
+	struct list_head runnable_jobs;
+	bool need_to_try_schedule_context = false;
+	int i;
+
+	INIT_LIST_HEAD(&completed_jobs);
+	INIT_LIST_HEAD(&runnable_jobs);
+
+	KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED);
+
+	/* This is needed in case an atom is failed due to being invalid, this
+	 * can happen *before* the jobs that the atom depends on have completed */
+	for (i = 0; i < 2; i++) {
+		if (kbase_jd_katom_dep_atom(&katom->dep[i])) {
+			list_del(&katom->dep_item[i]);
+			kbase_jd_katom_dep_clear(&katom->dep[i]);
+		}
+	}
+
+	/* With PRLAM-10817 or PRLAM-10959 the last tile of a fragment job being soft-stopped can fail with
+	 * BASE_JD_EVENT_TILE_RANGE_FAULT.
+	 *
+	 * So here if the fragment job failed with TILE_RANGE_FAULT and it has been soft-stopped, then we promote the
+	 * error code to BASE_JD_EVENT_DONE
+	 */
+
+	if ((kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10817) || kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10959)) &&
+		  katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT) {
+		if ((katom->core_req & BASE_JD_REQ_FS) && (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED)) {
+			/* Promote the failure to job done */
+			katom->event_code = BASE_JD_EVENT_DONE;
+			katom->atom_flags = katom->atom_flags & (~KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED);
+		}
+	}
+
+	katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+	list_add_tail(&katom->jd_item, &completed_jobs);
+
+	while (!list_empty(&completed_jobs)) {
+		katom = list_entry(completed_jobs.prev, struct kbase_jd_atom, jd_item);
+		list_del(completed_jobs.prev);
+		KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
+
+		for (i = 0; i < 2; i++)
+			jd_resolve_dep(&runnable_jobs, katom, i,
+					kbase_ctx_flag(kctx, KCTX_DYING));
+
+		if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
+			kbase_jd_post_external_resources(katom);
+
+		while (!list_empty(&runnable_jobs)) {
+			struct kbase_jd_atom *node;
+
+			node = list_entry(runnable_jobs.next,
+					struct kbase_jd_atom, jd_item);
+			list_del(runnable_jobs.next);
+			node->in_jd_list = false;
+
+			KBASE_DEBUG_ASSERT(node->status != KBASE_JD_ATOM_STATE_UNUSED);
+
+			if (node->status != KBASE_JD_ATOM_STATE_COMPLETED &&
+					!kbase_ctx_flag(kctx, KCTX_DYING)) {
+				need_to_try_schedule_context |= jd_run_atom(node);
+			} else {
+				node->event_code = katom->event_code;
+
+				if (node->core_req &
+							BASE_JD_REQ_SOFT_JOB) {
+					WARN_ON(!list_empty(&node->queue));
+					kbase_finish_soft_job(node);
+				}
+				node->status = KBASE_JD_ATOM_STATE_COMPLETED;
+			}
+
+			if (node->status == KBASE_JD_ATOM_STATE_COMPLETED) {
+				list_add_tail(&node->jd_item, &completed_jobs);
+			} else if (node->status == KBASE_JD_ATOM_STATE_IN_JS &&
+					!node->will_fail_event_code) {
+				/* Node successfully submitted, try submitting
+				 * dependencies as they may now be representable
+				 * in JS */
+				jd_try_submitting_deps(&runnable_jobs, node);
+			}
+		}
+
+		/* Register a completed job as a disjoint event when the GPU
+		 * is in a disjoint state (ie. being reset).
+		 */
+		kbase_disjoint_event_potential(kctx->kbdev);
+		if (completed_jobs_ctx)
+			list_add_tail(&katom->jd_item, completed_jobs_ctx);
+		else
+			kbase_event_post(kctx, katom);
+
+		/* Decrement and check the TOTAL number of jobs. This includes
+		 * those not tracked by the scheduler: 'not ready to run' and
+		 * 'dependency-only' jobs. */
+		if (--kctx->jctx.job_nr == 0)
+			wake_up(&kctx->jctx.zero_jobs_wait);	/* All events are safely queued now, and we can signal any waiter
+								 * that we've got no more jobs (so we can be safely terminated) */
+	}
+
+	return need_to_try_schedule_context;
+}
+
+KBASE_EXPORT_TEST_API(jd_done_nolock);
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+enum {
+	CORE_REQ_DEP_ONLY,
+	CORE_REQ_SOFT,
+	CORE_REQ_COMPUTE,
+	CORE_REQ_FRAGMENT,
+	CORE_REQ_VERTEX,
+	CORE_REQ_TILER,
+	CORE_REQ_FRAGMENT_VERTEX,
+	CORE_REQ_FRAGMENT_VERTEX_TILER,
+	CORE_REQ_FRAGMENT_TILER,
+	CORE_REQ_VERTEX_TILER,
+	CORE_REQ_UNKNOWN
+};
+static const char * const core_req_strings[] = {
+	"Dependency Only Job",
+	"Soft Job",
+	"Compute Shader Job",
+	"Fragment Shader Job",
+	"Vertex/Geometry Shader Job",
+	"Tiler Job",
+	"Fragment Shader + Vertex/Geometry Shader Job",
+	"Fragment Shader + Vertex/Geometry Shader Job + Tiler Job",
+	"Fragment Shader + Tiler Job",
+	"Vertex/Geometry Shader Job + Tiler Job",
+	"Unknown Job"
+};
+static const char *kbasep_map_core_reqs_to_string(base_jd_core_req core_req)
+{
+	if (core_req & BASE_JD_REQ_SOFT_JOB)
+		return core_req_strings[CORE_REQ_SOFT];
+	if (core_req & BASE_JD_REQ_ONLY_COMPUTE)
+		return core_req_strings[CORE_REQ_COMPUTE];
+	switch (core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) {
+	case BASE_JD_REQ_DEP:
+		return core_req_strings[CORE_REQ_DEP_ONLY];
+	case BASE_JD_REQ_FS:
+		return core_req_strings[CORE_REQ_FRAGMENT];
+	case BASE_JD_REQ_CS:
+		return core_req_strings[CORE_REQ_VERTEX];
+	case BASE_JD_REQ_T:
+		return core_req_strings[CORE_REQ_TILER];
+	case (BASE_JD_REQ_FS | BASE_JD_REQ_CS):
+		return core_req_strings[CORE_REQ_FRAGMENT_VERTEX];
+	case (BASE_JD_REQ_FS | BASE_JD_REQ_T):
+		return core_req_strings[CORE_REQ_FRAGMENT_TILER];
+	case (BASE_JD_REQ_CS | BASE_JD_REQ_T):
+		return core_req_strings[CORE_REQ_VERTEX_TILER];
+	case (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T):
+		return core_req_strings[CORE_REQ_FRAGMENT_VERTEX_TILER];
+	}
+	return core_req_strings[CORE_REQ_UNKNOWN];
+}
+#endif
+
+bool jd_submit_atom(struct kbase_context *kctx, const struct base_jd_atom_v2 *user_atom, struct kbase_jd_atom *katom)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	struct kbase_jd_context *jctx = &kctx->jctx;
+	int queued = 0;
+	int i;
+	int sched_prio;
+	bool ret;
+	bool will_fail = false;
+
+	/* Update the TOTAL number of jobs. This includes those not tracked by
+	 * the scheduler: 'not ready to run' and 'dependency-only' jobs. */
+	jctx->job_nr++;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+	katom->start_timestamp.tv64 = 0;
+#else
+	katom->start_timestamp = 0;
+#endif
+	katom->udata = user_atom->udata;
+	katom->kctx = kctx;
+	katom->nr_extres = user_atom->nr_extres;
+	katom->extres = NULL;
+	katom->device_nr = user_atom->device_nr;
+	katom->jc = user_atom->jc;
+	katom->core_req = user_atom->core_req;
+	katom->jobslot = user_atom->jobslot;
+	katom->atom_flags = 0;
+	katom->retry_count = 0;
+	katom->need_cache_flush_cores_retained = 0;
+	katom->pre_dep = NULL;
+	katom->post_dep = NULL;
+	katom->x_pre_dep = NULL;
+	katom->x_post_dep = NULL;
+	katom->will_fail_event_code = BASE_JD_EVENT_NOT_STARTED;
+	katom->softjob_data = NULL;
+
+	/* Implicitly sets katom->protected_state.enter as well. */
+	katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK;
+
+	katom->age = kctx->age_count++;
+
+	INIT_LIST_HEAD(&katom->queue);
+	INIT_LIST_HEAD(&katom->jd_item);
+#ifdef CONFIG_MALI_DMA_FENCE
+	kbase_fence_dep_count_set(katom, -1);
+#endif
+
+	/* Don't do anything if there is a mess up with dependencies.
+	   This is done in a separate cycle to check both the dependencies at ones, otherwise
+	   it will be extra complexity to deal with 1st dependency ( just added to the list )
+	   if only the 2nd one has invalid config.
+	 */
+	for (i = 0; i < 2; i++) {
+		int dep_atom_number = user_atom->pre_dep[i].atom_id;
+		base_jd_dep_type dep_atom_type = user_atom->pre_dep[i].dependency_type;
+
+		if (dep_atom_number) {
+			if (dep_atom_type != BASE_JD_DEP_TYPE_ORDER &&
+					dep_atom_type != BASE_JD_DEP_TYPE_DATA) {
+				katom->event_code = BASE_JD_EVENT_JOB_CONFIG_FAULT;
+				katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+
+				/* Wrong dependency setup. Atom will be sent
+				 * back to user space. Do not record any
+				 * dependencies. */
+				KBASE_TLSTREAM_TL_NEW_ATOM(
+						kbdev,
+						katom,
+						kbase_jd_atom_id(kctx, katom));
+				KBASE_TLSTREAM_TL_RET_ATOM_CTX(
+						kbdev,
+						katom, kctx);
+				KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(
+						kbdev,
+						katom,
+						TL_ATOM_STATE_IDLE);
+
+				ret = jd_done_nolock(katom, NULL);
+				goto out;
+			}
+		}
+	}
+
+	/* Add dependencies */
+	for (i = 0; i < 2; i++) {
+		int dep_atom_number = user_atom->pre_dep[i].atom_id;
+		base_jd_dep_type dep_atom_type;
+		struct kbase_jd_atom *dep_atom = &jctx->atoms[dep_atom_number];
+
+		dep_atom_type = user_atom->pre_dep[i].dependency_type;
+		kbase_jd_katom_dep_clear(&katom->dep[i]);
+
+		if (!dep_atom_number)
+			continue;
+
+		if (dep_atom->status == KBASE_JD_ATOM_STATE_UNUSED ||
+				dep_atom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
+
+			if (dep_atom->event_code == BASE_JD_EVENT_DONE)
+				continue;
+			/* don't stop this atom if it has an order dependency
+			 * only to the failed one, try to submit it through
+			 * the normal path
+			 */
+			if (dep_atom_type == BASE_JD_DEP_TYPE_ORDER &&
+					dep_atom->event_code > BASE_JD_EVENT_ACTIVE) {
+				continue;
+			}
+
+			/* Atom has completed, propagate the error code if any */
+			katom->event_code = dep_atom->event_code;
+			katom->status = KBASE_JD_ATOM_STATE_QUEUED;
+
+			/* This atom will be sent back to user space.
+			 * Do not record any dependencies.
+			 */
+			KBASE_TLSTREAM_TL_NEW_ATOM(
+					kbdev,
+					katom,
+					kbase_jd_atom_id(kctx, katom));
+			KBASE_TLSTREAM_TL_RET_ATOM_CTX(kbdev, katom, kctx);
+			KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(kbdev, katom,
+					TL_ATOM_STATE_IDLE);
+
+			will_fail = true;
+
+		} else {
+			/* Atom is in progress, add this atom to the list */
+			list_add_tail(&katom->dep_item[i], &dep_atom->dep_head[i]);
+			kbase_jd_katom_dep_set(&katom->dep[i], dep_atom, dep_atom_type);
+			queued = 1;
+		}
+	}
+
+	if (will_fail) {
+		if (!queued) {
+			if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
+				/* This softjob has failed due to a previous
+				 * dependency, however we should still run the
+				 * prepare & finish functions
+				 */
+				int err = kbase_prepare_soft_job(katom);
+
+				if (err >= 0)
+					kbase_finish_soft_job(katom);
+			}
+
+			ret = jd_done_nolock(katom, NULL);
+
+			goto out;
+		} else {
+
+			if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
+				/* This softjob has failed due to a previous
+				 * dependency, however we should still run the
+				 * prepare & finish functions
+				 */
+				if (kbase_prepare_soft_job(katom) != 0) {
+					katom->event_code =
+						BASE_JD_EVENT_JOB_INVALID;
+					ret = jd_done_nolock(katom, NULL);
+					goto out;
+				}
+			}
+
+			katom->will_fail_event_code = katom->event_code;
+			ret = false;
+
+			goto out;
+		}
+	} else {
+		/* These must occur after the above loop to ensure that an atom
+		 * that depends on a previous atom with the same number behaves
+		 * as expected */
+		katom->event_code = BASE_JD_EVENT_DONE;
+		katom->status = KBASE_JD_ATOM_STATE_QUEUED;
+	}
+
+	/* For invalid priority, be most lenient and choose the default */
+	sched_prio = kbasep_js_atom_prio_to_sched_prio(user_atom->prio);
+	if (sched_prio == KBASE_JS_ATOM_SCHED_PRIO_INVALID)
+		sched_prio = KBASE_JS_ATOM_SCHED_PRIO_DEFAULT;
+	katom->sched_priority = sched_prio;
+
+	/* Create a new atom. */
+	KBASE_TLSTREAM_TL_NEW_ATOM(
+			kbdev,
+			katom,
+			kbase_jd_atom_id(kctx, katom));
+	KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(kbdev, katom, TL_ATOM_STATE_IDLE);
+	KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY(kbdev, katom, katom->sched_priority);
+	KBASE_TLSTREAM_TL_RET_ATOM_CTX(kbdev, katom, kctx);
+
+	/* Reject atoms with job chain = NULL, as these cause issues with soft-stop */
+	if (!katom->jc && (katom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) {
+		dev_warn(kctx->kbdev->dev, "Rejecting atom with jc = NULL");
+		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+		ret = jd_done_nolock(katom, NULL);
+		goto out;
+	}
+
+	/* Reject atoms with an invalid device_nr */
+	if ((katom->core_req & BASE_JD_REQ_SPECIFIC_COHERENT_GROUP) &&
+	    (katom->device_nr >= kctx->kbdev->gpu_props.num_core_groups)) {
+		dev_warn(kctx->kbdev->dev,
+				"Rejecting atom with invalid device_nr %d",
+				katom->device_nr);
+		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+		ret = jd_done_nolock(katom, NULL);
+		goto out;
+	}
+
+	/* Reject atoms with invalid core requirements */
+	if ((katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) &&
+			(katom->core_req & BASE_JD_REQ_EVENT_COALESCE)) {
+		dev_warn(kctx->kbdev->dev,
+				"Rejecting atom with invalid core requirements");
+		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+		katom->core_req &= ~BASE_JD_REQ_EVENT_COALESCE;
+		ret = jd_done_nolock(katom, NULL);
+		goto out;
+	}
+
+	/* Reject soft-job atom of certain types from accessing external resources */
+	if ((katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) &&
+			(((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) == BASE_JD_REQ_SOFT_FENCE_WAIT) ||
+			 ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) == BASE_JD_REQ_SOFT_JIT_ALLOC) ||
+			 ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) == BASE_JD_REQ_SOFT_JIT_FREE))) {
+		dev_warn(kctx->kbdev->dev,
+				"Rejecting soft-job atom accessing external resources");
+		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+		ret = jd_done_nolock(katom, NULL);
+		goto out;
+	}
+
+	if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
+		/* handle what we need to do to access the external resources */
+		if (kbase_jd_pre_external_resources(katom, user_atom) != 0) {
+			/* setup failed (no access, bad resource, unknown resource types, etc.) */
+			katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+			ret = jd_done_nolock(katom, NULL);
+			goto out;
+		}
+	}
+
+	/* Validate the atom. Function will return error if the atom is
+	 * malformed.
+	 *
+	 * Soft-jobs never enter the job scheduler but have their own initialize method.
+	 *
+	 * If either fail then we immediately complete the atom with an error.
+	 */
+	if ((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0) {
+		if (!kbase_js_is_atom_valid(kctx->kbdev, katom)) {
+			katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+			ret = jd_done_nolock(katom, NULL);
+			goto out;
+		}
+	} else {
+		/* Soft-job */
+		if (kbase_prepare_soft_job(katom) != 0) {
+			katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+			ret = jd_done_nolock(katom, NULL);
+			goto out;
+		}
+	}
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+	katom->work_id = atomic_inc_return(&jctx->work_id);
+	trace_gpu_job_enqueue(kctx->id, katom->work_id,
+			kbasep_map_core_reqs_to_string(katom->core_req));
+#endif
+
+	if (queued && !IS_GPU_ATOM(katom)) {
+		ret = false;
+		goto out;
+	}
+
+#ifdef CONFIG_MALI_DMA_FENCE
+	if (kbase_fence_dep_count_read(katom) != -1) {
+		ret = false;
+		goto out;
+	}
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+	if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
+		if (kbase_process_soft_job(katom) == 0) {
+			kbase_finish_soft_job(katom);
+			ret = jd_done_nolock(katom, NULL);
+			goto out;
+		}
+
+		ret = false;
+	} else if ((katom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) {
+		katom->status = KBASE_JD_ATOM_STATE_IN_JS;
+		ret = kbasep_js_add_job(kctx, katom);
+		/* If job was cancelled then resolve immediately */
+		if (katom->event_code == BASE_JD_EVENT_JOB_CANCELLED)
+			ret = jd_done_nolock(katom, NULL);
+	} else {
+		/* This is a pure dependency. Resolve it immediately */
+		ret = jd_done_nolock(katom, NULL);
+	}
+
+ out:
+	return ret;
+}
+
+int kbase_jd_submit(struct kbase_context *kctx,
+		void __user *user_addr, u32 nr_atoms, u32 stride,
+		bool uk6_atom)
+{
+	struct kbase_jd_context *jctx = &kctx->jctx;
+	int err = 0;
+	int i;
+	bool need_to_try_schedule_context = false;
+	struct kbase_device *kbdev;
+	u32 latest_flush;
+
+	/*
+	 * kbase_jd_submit isn't expected to fail and so all errors with the
+	 * jobs are reported by immediately failing them (through event system)
+	 */
+	kbdev = kctx->kbdev;
+
+	beenthere(kctx, "%s", "Enter");
+
+	if (kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
+		dev_err(kbdev->dev, "Attempt to submit to a context that has SUBMIT_DISABLED set on it");
+		return -EINVAL;
+	}
+
+	if (stride != sizeof(base_jd_atom_v2)) {
+		dev_err(kbdev->dev, "Stride passed to job_submit doesn't match kernel");
+		return -EINVAL;
+	}
+
+	/* All atoms submitted in this call have the same flush ID */
+	latest_flush = kbase_backend_get_current_flush_id(kbdev);
+
+	for (i = 0; i < nr_atoms; i++) {
+		struct base_jd_atom_v2 user_atom;
+		struct kbase_jd_atom *katom;
+
+		if (copy_from_user(&user_atom, user_addr,
+					sizeof(user_atom)) != 0) {
+			err = -EINVAL;
+			break;
+		}
+
+		user_addr = (void __user *)((uintptr_t) user_addr + stride);
+
+		mutex_lock(&jctx->lock);
+#ifndef compiletime_assert
+#define compiletime_assert_defined
+#define compiletime_assert(x, msg) do { switch (0) { case 0: case (x):; } } \
+while (false)
+#endif
+		compiletime_assert((1 << (8*sizeof(user_atom.atom_number))) ==
+					BASE_JD_ATOM_COUNT,
+			"BASE_JD_ATOM_COUNT and base_atom_id type out of sync");
+		compiletime_assert(sizeof(user_atom.pre_dep[0].atom_id) ==
+					sizeof(user_atom.atom_number),
+			"BASE_JD_ATOM_COUNT and base_atom_id type out of sync");
+#ifdef compiletime_assert_defined
+#undef compiletime_assert
+#undef compiletime_assert_defined
+#endif
+		katom = &jctx->atoms[user_atom.atom_number];
+
+		/* Record the flush ID for the cache flush optimisation */
+		katom->flush_id = latest_flush;
+
+		while (katom->status != KBASE_JD_ATOM_STATE_UNUSED) {
+			/* Atom number is already in use, wait for the atom to
+			 * complete
+			 */
+			mutex_unlock(&jctx->lock);
+
+			/* This thread will wait for the atom to complete. Due
+			 * to thread scheduling we are not sure that the other
+			 * thread that owns the atom will also schedule the
+			 * context, so we force the scheduler to be active and
+			 * hence eventually schedule this context at some point
+			 * later.
+			 */
+			kbase_js_sched_all(kbdev);
+
+			if (wait_event_killable(katom->completed,
+					katom->status ==
+					KBASE_JD_ATOM_STATE_UNUSED) != 0) {
+				/* We're being killed so the result code
+				 * doesn't really matter
+				 */
+				return 0;
+			}
+			mutex_lock(&jctx->lock);
+		}
+
+		need_to_try_schedule_context |=
+				       jd_submit_atom(kctx, &user_atom, katom);
+
+		/* Register a completed job as a disjoint event when the GPU is in a disjoint state
+		 * (ie. being reset).
+		 */
+		kbase_disjoint_event_potential(kbdev);
+
+		mutex_unlock(&jctx->lock);
+	}
+
+	if (need_to_try_schedule_context)
+		kbase_js_sched_all(kbdev);
+
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_submit);
+
+void kbase_jd_done_worker(struct work_struct *data)
+{
+	struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work);
+	struct kbase_jd_context *jctx;
+	struct kbase_context *kctx;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	struct kbase_device *kbdev;
+	struct kbasep_js_device_data *js_devdata;
+	u64 cache_jc = katom->jc;
+	struct kbasep_js_atom_retained_state katom_retained_state;
+	bool context_idle;
+	base_jd_core_req core_req = katom->core_req;
+
+	/* Soft jobs should never reach this function */
+	KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0);
+
+	kctx = katom->kctx;
+	jctx = &kctx->jctx;
+	kbdev = kctx->kbdev;
+	js_kctx_info = &kctx->jctx.sched_info;
+	js_devdata = &kbdev->js_data;
+
+	KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER, kctx, katom, katom->jc, 0);
+
+	kbase_backend_complete_wq(kbdev, katom);
+
+	/*
+	 * Begin transaction on JD context and JS context
+	 */
+	mutex_lock(&jctx->lock);
+	KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(kbdev, katom, TL_ATOM_STATE_DONE);
+	mutex_lock(&js_devdata->queue_mutex);
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+
+	/* This worker only gets called on contexts that are scheduled *in*. This is
+	 * because it only happens in response to an IRQ from a job that was
+	 * running.
+	 */
+	KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	if (katom->event_code == BASE_JD_EVENT_STOPPED) {
+		/* Atom has been promoted to stopped */
+		unsigned long flags;
+
+		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+		mutex_unlock(&js_devdata->queue_mutex);
+
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+		katom->status = KBASE_JD_ATOM_STATE_IN_JS;
+		kbase_js_unpull(kctx, katom);
+
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		mutex_unlock(&jctx->lock);
+
+		return;
+	}
+
+	if ((katom->event_code != BASE_JD_EVENT_DONE) &&
+			(!kbase_ctx_flag(katom->kctx, KCTX_DYING)))
+		dev_err(kbdev->dev,
+			"t6xx: GPU fault 0x%02lx from job slot %d\n",
+					(unsigned long)katom->event_code,
+								katom->slot_nr);
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
+		kbase_as_poking_timer_release_atom(kbdev, kctx, katom);
+
+	/* Retain state before the katom disappears */
+	kbasep_js_atom_retained_state_copy(&katom_retained_state, katom);
+
+	context_idle = kbase_js_complete_atom_wq(kctx, katom);
+
+	KBASE_DEBUG_ASSERT(kbasep_js_has_atom_finished(&katom_retained_state));
+
+	kbasep_js_remove_job(kbdev, kctx, katom);
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+	mutex_unlock(&js_devdata->queue_mutex);
+	katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_CTX_REF;
+	/* jd_done_nolock() requires the jsctx_mutex lock to be dropped */
+	jd_done_nolock(katom, &kctx->completed_jobs);
+
+	/* katom may have been freed now, do not use! */
+
+	if (context_idle) {
+		unsigned long flags;
+
+		context_idle = false;
+		mutex_lock(&js_devdata->queue_mutex);
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+		/* If kbase_sched() has scheduled this context back in then
+		 * KCTX_ACTIVE will have been set after we marked it as
+		 * inactive, and another pm reference will have been taken, so
+		 * drop our reference. But do not call kbase_jm_idle_ctx(), as
+		 * the context is active and fast-starting is allowed.
+		 *
+		 * If an atom has been fast-started then kctx->atoms_pulled will
+		 * be non-zero but KCTX_ACTIVE will still be false (as the
+		 * previous pm reference has been inherited). Do NOT drop our
+		 * reference, as it has been re-used, and leave the context as
+		 * active.
+		 *
+		 * If no new atoms have been started then KCTX_ACTIVE will still
+		 * be false and atoms_pulled will be zero, so drop the reference
+		 * and call kbase_jm_idle_ctx().
+		 *
+		 * As the checks are done under both the queue_mutex and
+		 * hwaccess_lock is should be impossible for this to race
+		 * with the scheduler code.
+		 */
+		if (kbase_ctx_flag(kctx, KCTX_ACTIVE) ||
+		    !atomic_read(&kctx->atoms_pulled)) {
+			/* Calling kbase_jm_idle_ctx() here will ensure that
+			 * atoms are not fast-started when we drop the
+			 * hwaccess_lock. This is not performed if
+			 * KCTX_ACTIVE is set as in that case another pm
+			 * reference has been taken and a fast-start would be
+			 * valid.
+			 */
+			if (!kbase_ctx_flag(kctx, KCTX_ACTIVE))
+				kbase_jm_idle_ctx(kbdev, kctx);
+			context_idle = true;
+		} else {
+			kbase_ctx_flag_set(kctx, KCTX_ACTIVE);
+		}
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		mutex_unlock(&js_devdata->queue_mutex);
+	}
+
+	/*
+	 * Transaction complete
+	 */
+	mutex_unlock(&jctx->lock);
+
+	/* Job is now no longer running, so can now safely release the context
+	 * reference, and handle any actions that were logged against the atom's retained state */
+
+	kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx, &katom_retained_state);
+
+	kbase_js_sched_all(kbdev);
+
+	if (!atomic_dec_return(&kctx->work_count)) {
+		/* If worker now idle then post all events that jd_done_nolock()
+		 * has queued */
+		mutex_lock(&jctx->lock);
+		while (!list_empty(&kctx->completed_jobs)) {
+			struct kbase_jd_atom *atom = list_entry(
+					kctx->completed_jobs.next,
+					struct kbase_jd_atom, jd_item);
+			list_del(kctx->completed_jobs.next);
+
+			kbase_event_post(kctx, atom);
+		}
+		mutex_unlock(&jctx->lock);
+	}
+
+	kbase_backend_complete_wq_post_sched(kbdev, core_req);
+
+	if (context_idle)
+		kbase_pm_context_idle(kbdev);
+
+	KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER_END, kctx, NULL, cache_jc, 0);
+}
+
+/**
+ * jd_cancel_worker - Work queue job cancel function.
+ * @data: a &struct work_struct
+ *
+ * Only called as part of 'Zapping' a context (which occurs on termination).
+ * Operates serially with the kbase_jd_done_worker() on the work queue.
+ *
+ * This can only be called on contexts that aren't scheduled.
+ *
+ * We don't need to release most of the resources that would occur on
+ * kbase_jd_done() or kbase_jd_done_worker(), because the atoms here must not be
+ * running (by virtue of only being called on contexts that aren't
+ * scheduled).
+ */
+static void jd_cancel_worker(struct work_struct *data)
+{
+	struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work);
+	struct kbase_jd_context *jctx;
+	struct kbase_context *kctx;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	bool need_to_try_schedule_context;
+	bool attr_state_changed;
+	struct kbase_device *kbdev;
+
+	/* Soft jobs should never reach this function */
+	KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0);
+
+	kctx = katom->kctx;
+	kbdev = kctx->kbdev;
+	jctx = &kctx->jctx;
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	KBASE_TRACE_ADD(kbdev, JD_CANCEL_WORKER, kctx, katom, katom->jc, 0);
+
+	/* This only gets called on contexts that are scheduled out. Hence, we must
+	 * make sure we don't de-ref the number of running jobs (there aren't
+	 * any), nor must we try to schedule out the context (it's already
+	 * scheduled out).
+	 */
+	KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	/* Scheduler: Remove the job from the system */
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+	attr_state_changed = kbasep_js_remove_cancelled_job(kbdev, kctx, katom);
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+	mutex_lock(&jctx->lock);
+
+	need_to_try_schedule_context = jd_done_nolock(katom, NULL);
+	/* Because we're zapping, we're not adding any more jobs to this ctx, so no need to
+	 * schedule the context. There's also no need for the jsctx_mutex to have been taken
+	 * around this too. */
+	KBASE_DEBUG_ASSERT(!need_to_try_schedule_context);
+
+	/* katom may have been freed now, do not use! */
+	mutex_unlock(&jctx->lock);
+
+	if (attr_state_changed)
+		kbase_js_sched_all(kbdev);
+}
+
+/**
+ * kbase_jd_done - Complete a job that has been removed from the Hardware
+ * @katom: atom which has been completed
+ * @slot_nr: slot the atom was on
+ * @end_timestamp: completion time
+ * @done_code: completion code
+ *
+ * This must be used whenever a job has been removed from the Hardware, e.g.:
+ * An IRQ indicates that the job finished (for both error and 'done' codes), or
+ * the job was evicted from the JS_HEAD_NEXT registers during a Soft/Hard stop.
+ *
+ * Some work is carried out immediately, and the rest is deferred onto a
+ * workqueue
+ *
+ * Context:
+ *   This can be called safely from atomic context.
+ *   The caller must hold kbdev->hwaccess_lock
+ */
+void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr,
+		ktime_t *end_timestamp, kbasep_js_atom_done_code done_code)
+{
+	struct kbase_context *kctx;
+	struct kbase_device *kbdev;
+
+	KBASE_DEBUG_ASSERT(katom);
+	kctx = katom->kctx;
+	KBASE_DEBUG_ASSERT(kctx);
+	kbdev = kctx->kbdev;
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	if (done_code & KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT)
+		katom->event_code = BASE_JD_EVENT_REMOVED_FROM_NEXT;
+
+	KBASE_TRACE_ADD(kbdev, JD_DONE, kctx, katom, katom->jc, 0);
+
+	kbase_job_check_leave_disjoint(kbdev, katom);
+
+	katom->slot_nr = slot_nr;
+
+	atomic_inc(&kctx->work_count);
+
+#ifdef CONFIG_DEBUG_FS
+	/* a failed job happened and is waiting for dumping*/
+	if (!katom->will_fail_event_code &&
+			kbase_debug_job_fault_process(katom, katom->event_code))
+		return;
+#endif
+
+	WARN_ON(work_pending(&katom->work));
+	INIT_WORK(&katom->work, kbase_jd_done_worker);
+	queue_work(kctx->jctx.job_done_wq, &katom->work);
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_done);
+
+void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx;
+
+	KBASE_DEBUG_ASSERT(NULL != kbdev);
+	KBASE_DEBUG_ASSERT(NULL != katom);
+	kctx = katom->kctx;
+	KBASE_DEBUG_ASSERT(NULL != kctx);
+
+	KBASE_TRACE_ADD(kbdev, JD_CANCEL, kctx, katom, katom->jc, 0);
+
+	/* This should only be done from a context that is not scheduled */
+	KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	WARN_ON(work_pending(&katom->work));
+
+	katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+	INIT_WORK(&katom->work, jd_cancel_worker);
+	queue_work(kctx->jctx.job_done_wq, &katom->work);
+}
+
+
+void kbase_jd_zap_context(struct kbase_context *kctx)
+{
+	struct kbase_jd_atom *katom;
+	struct list_head *entry, *tmp;
+	struct kbase_device *kbdev;
+
+	KBASE_DEBUG_ASSERT(kctx);
+
+	kbdev = kctx->kbdev;
+
+	KBASE_TRACE_ADD(kbdev, JD_ZAP_CONTEXT, kctx, NULL, 0u, 0u);
+
+	kbase_js_zap_context(kctx);
+
+	mutex_lock(&kctx->jctx.lock);
+
+	/*
+	 * While holding the struct kbase_jd_context lock clean up jobs which are known to kbase but are
+	 * queued outside the job scheduler.
+	 */
+
+	del_timer_sync(&kctx->soft_job_timeout);
+	list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
+		katom = list_entry(entry, struct kbase_jd_atom, queue);
+		kbase_cancel_soft_job(katom);
+	}
+
+
+#ifdef CONFIG_MALI_DMA_FENCE
+	kbase_dma_fence_cancel_all_atoms(kctx);
+#endif
+
+	mutex_unlock(&kctx->jctx.lock);
+
+#ifdef CONFIG_MALI_DMA_FENCE
+	/* Flush dma-fence workqueue to ensure that any callbacks that may have
+	 * been queued are done before continuing.
+	 */
+	flush_workqueue(kctx->dma_fence.wq);
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+	kbase_debug_job_fault_kctx_unblock(kctx);
+#endif
+
+	kbase_jm_wait_for_zero_jobs(kctx);
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_zap_context);
+
+int kbase_jd_init(struct kbase_context *kctx)
+{
+	int i;
+	int mali_err = 0;
+
+	KBASE_DEBUG_ASSERT(kctx);
+
+	kctx->jctx.job_done_wq = alloc_workqueue("mali_jd",
+			WQ_HIGHPRI | WQ_UNBOUND, 1);
+	if (NULL == kctx->jctx.job_done_wq) {
+		mali_err = -ENOMEM;
+		goto out1;
+	}
+
+	for (i = 0; i < BASE_JD_ATOM_COUNT; i++) {
+		init_waitqueue_head(&kctx->jctx.atoms[i].completed);
+
+		INIT_LIST_HEAD(&kctx->jctx.atoms[i].dep_head[0]);
+		INIT_LIST_HEAD(&kctx->jctx.atoms[i].dep_head[1]);
+
+		/* Catch userspace attempting to use an atom which doesn't exist as a pre-dependency */
+		kctx->jctx.atoms[i].event_code = BASE_JD_EVENT_JOB_INVALID;
+		kctx->jctx.atoms[i].status = KBASE_JD_ATOM_STATE_UNUSED;
+
+#if defined(CONFIG_MALI_DMA_FENCE) || defined(CONFIG_SYNC_FILE)
+		kctx->jctx.atoms[i].dma_fence.context =
+						dma_fence_context_alloc(1);
+		atomic_set(&kctx->jctx.atoms[i].dma_fence.seqno, 0);
+		INIT_LIST_HEAD(&kctx->jctx.atoms[i].dma_fence.callbacks);
+#endif
+	}
+
+	mutex_init(&kctx->jctx.lock);
+
+	init_waitqueue_head(&kctx->jctx.zero_jobs_wait);
+
+	spin_lock_init(&kctx->jctx.tb_lock);
+
+	kctx->jctx.job_nr = 0;
+	INIT_LIST_HEAD(&kctx->completed_jobs);
+	atomic_set(&kctx->work_count, 0);
+
+	return 0;
+
+ out1:
+	return mali_err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_init);
+
+void kbase_jd_exit(struct kbase_context *kctx)
+{
+	KBASE_DEBUG_ASSERT(kctx);
+
+	/* Work queue is emptied by this */
+	destroy_workqueue(kctx->jctx.job_done_wq);
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_exit);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c
new file mode 100644
index 0000000..2fc21fd
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c
@@ -0,0 +1,246 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/seq_file.h>
+#include <mali_kbase.h>
+#include <mali_kbase_jd_debugfs.h>
+#include <mali_kbase_dma_fence.h>
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+#include <mali_kbase_sync.h>
+#endif
+#include <mali_kbase_ioctl.h>
+
+struct kbase_jd_debugfs_depinfo {
+	u8 id;
+	char type;
+};
+
+static void kbase_jd_debugfs_fence_info(struct kbase_jd_atom *atom,
+					struct seq_file *sfile)
+{
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+	struct kbase_sync_fence_info info;
+	int res;
+
+	switch (atom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+	case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
+		res = kbase_sync_fence_out_info_get(atom, &info);
+		if (0 == res) {
+			seq_printf(sfile, "Sa([%p]%d) ",
+				   info.fence, info.status);
+			break;
+		}
+	case BASE_JD_REQ_SOFT_FENCE_WAIT:
+		res = kbase_sync_fence_in_info_get(atom, &info);
+		if (0 == res) {
+			seq_printf(sfile, "Wa([%p]%d) ",
+				   info.fence, info.status);
+			break;
+		}
+	default:
+		break;
+	}
+#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
+
+#ifdef CONFIG_MALI_DMA_FENCE
+	if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
+		struct kbase_fence_cb *cb;
+
+		if (atom->dma_fence.fence) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+			struct fence *fence = atom->dma_fence.fence;
+#else
+			struct dma_fence *fence = atom->dma_fence.fence;
+#endif
+
+			seq_printf(sfile,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+					"Sd(%u#%u: %s) ",
+#else
+					"Sd(%llu#%u: %s) ",
+#endif
+					fence->context,
+					fence->seqno,
+					dma_fence_is_signaled(fence) ?
+						"signaled" : "active");
+		}
+
+		list_for_each_entry(cb, &atom->dma_fence.callbacks,
+				    node) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+			struct fence *fence = cb->fence;
+#else
+			struct dma_fence *fence = cb->fence;
+#endif
+
+			seq_printf(sfile,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+					"Wd(%u#%u: %s) ",
+#else
+					"Wd(%llu#%u: %s) ",
+#endif
+					fence->context,
+					fence->seqno,
+					dma_fence_is_signaled(fence) ?
+						"signaled" : "active");
+		}
+	}
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+}
+
+static void kbasep_jd_debugfs_atom_deps(
+		struct kbase_jd_debugfs_depinfo *deps,
+		struct kbase_jd_atom *atom)
+{
+	struct kbase_context *kctx = atom->kctx;
+	int i;
+
+	for (i = 0; i < 2; i++)	{
+		deps[i].id = (unsigned)(atom->dep[i].atom ?
+				kbase_jd_atom_id(kctx, atom->dep[i].atom) : 0);
+
+		switch (atom->dep[i].dep_type) {
+		case BASE_JD_DEP_TYPE_INVALID:
+			deps[i].type = ' ';
+			break;
+		case BASE_JD_DEP_TYPE_DATA:
+			deps[i].type = 'D';
+			break;
+		case BASE_JD_DEP_TYPE_ORDER:
+			deps[i].type = '>';
+			break;
+		default:
+			deps[i].type = '?';
+			break;
+		}
+	}
+}
+/**
+ * kbasep_jd_debugfs_atoms_show - Show callback for the JD atoms debugfs file.
+ * @sfile: The debugfs entry
+ * @data:  Data associated with the entry
+ *
+ * This function is called to get the contents of the JD atoms debugfs file.
+ * This is a report of all atoms managed by kbase_jd_context.atoms
+ *
+ * Return: 0 if successfully prints data in debugfs entry file, failure
+ * otherwise
+ */
+static int kbasep_jd_debugfs_atoms_show(struct seq_file *sfile, void *data)
+{
+	struct kbase_context *kctx = sfile->private;
+	struct kbase_jd_atom *atoms;
+	unsigned long irq_flags;
+	int i;
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+
+	/* Print version */
+	seq_printf(sfile, "v%u\n", MALI_JD_DEBUGFS_VERSION);
+
+	/* Print U/K API version */
+	seq_printf(sfile, "ukv%u.%u\n", BASE_UK_VERSION_MAJOR,
+			BASE_UK_VERSION_MINOR);
+
+	/* Print table heading */
+	seq_puts(sfile, " ID, Core req, St, CR,   Predeps,           Start time, Additional info...\n");
+
+	atoms = kctx->jctx.atoms;
+	/* General atom states */
+	mutex_lock(&kctx->jctx.lock);
+	/* JS-related states */
+	spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
+	for (i = 0; i != BASE_JD_ATOM_COUNT; ++i) {
+		struct kbase_jd_atom *atom = &atoms[i];
+		s64 start_timestamp = 0;
+		struct kbase_jd_debugfs_depinfo deps[2];
+
+		if (atom->status == KBASE_JD_ATOM_STATE_UNUSED)
+			continue;
+
+		/* start_timestamp is cleared as soon as the atom leaves UNUSED state
+		 * and set before a job is submitted to the h/w, a non-zero value means
+		 * it is valid */
+		if (ktime_to_ns(atom->start_timestamp))
+			start_timestamp = ktime_to_ns(
+					ktime_sub(ktime_get(), atom->start_timestamp));
+
+		kbasep_jd_debugfs_atom_deps(deps, atom);
+
+		seq_printf(sfile,
+				"%3u, %8x, %2u, %c%3u %c%3u, %20lld, ",
+				i, atom->core_req, atom->status,
+				deps[0].type, deps[0].id,
+				deps[1].type, deps[1].id,
+				start_timestamp);
+
+
+		kbase_jd_debugfs_fence_info(atom, sfile);
+
+		seq_puts(sfile, "\n");
+	}
+	spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
+	mutex_unlock(&kctx->jctx.lock);
+
+	return 0;
+}
+
+
+/**
+ * kbasep_jd_debugfs_atoms_open - open operation for atom debugfs file
+ * @in: &struct inode pointer
+ * @file: &struct file pointer
+ *
+ * Return: file descriptor
+ */
+static int kbasep_jd_debugfs_atoms_open(struct inode *in, struct file *file)
+{
+	return single_open(file, kbasep_jd_debugfs_atoms_show, in->i_private);
+}
+
+static const struct file_operations kbasep_jd_debugfs_atoms_fops = {
+	.owner = THIS_MODULE,
+	.open = kbasep_jd_debugfs_atoms_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+void kbasep_jd_debugfs_ctx_init(struct kbase_context *kctx)
+{
+	/* Caller already ensures this, but we keep the pattern for
+	 * maintenance safety.
+	 */
+	if (WARN_ON(!kctx) ||
+		WARN_ON(IS_ERR_OR_NULL(kctx->kctx_dentry)))
+		return;
+
+	/* Expose all atoms */
+	debugfs_create_file("atoms", S_IRUGO, kctx->kctx_dentry, kctx,
+			&kbasep_jd_debugfs_atoms_fops);
+
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.h
new file mode 100644
index 0000000..697bdef
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.h
@@ -0,0 +1,45 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * @file mali_kbase_jd_debugfs.h
+ * Header file for job dispatcher-related entries in debugfs
+ */
+
+#ifndef _KBASE_JD_DEBUGFS_H
+#define _KBASE_JD_DEBUGFS_H
+
+#include <linux/debugfs.h>
+
+#define MALI_JD_DEBUGFS_VERSION 3
+
+/* Forward declarations */
+struct kbase_context;
+
+/**
+ * kbasep_jd_debugfs_ctx_init() - Add debugfs entries for JD system
+ *
+ * @kctx Pointer to kbase_context
+ */
+void kbasep_jd_debugfs_ctx_init(struct kbase_context *kctx);
+
+#endif  /*_KBASE_JD_DEBUGFS_H*/
diff --git a/drivers/gpu/arm/midgard/mali_kbase_jm.c b/drivers/gpu/arm/midgard/mali_kbase_jm.c
new file mode 100644
index 0000000..da78a16
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_jm.c
@@ -0,0 +1,140 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * HW access job manager common APIs
+ */
+
+#include <mali_kbase.h>
+#include "mali_kbase_hwaccess_jm.h"
+#include "mali_kbase_jm.h"
+
+/**
+ * kbase_jm_next_job() - Attempt to run the next @nr_jobs_to_submit jobs on slot
+ *			 @js on the active context.
+ * @kbdev:		Device pointer
+ * @js:			Job slot to run on
+ * @nr_jobs_to_submit:	Number of jobs to attempt to submit
+ *
+ * Return: true if slot can still be submitted on, false if slot is now full.
+ */
+static bool kbase_jm_next_job(struct kbase_device *kbdev, int js,
+				int nr_jobs_to_submit)
+{
+	struct kbase_context *kctx;
+	int i;
+
+	kctx = kbdev->hwaccess.active_kctx[js];
+
+	if (!kctx)
+		return true;
+
+	for (i = 0; i < nr_jobs_to_submit; i++) {
+		struct kbase_jd_atom *katom = kbase_js_pull(kctx, js);
+
+		if (!katom)
+			return true; /* Context has no jobs on this slot */
+
+		kbase_backend_run_atom(kbdev, katom);
+	}
+
+	return false; /* Slot ringbuffer should now be full */
+}
+
+u32 kbase_jm_kick(struct kbase_device *kbdev, u32 js_mask)
+{
+	u32 ret_mask = 0;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	while (js_mask) {
+		int js = ffs(js_mask) - 1;
+		int nr_jobs_to_submit = kbase_backend_slot_free(kbdev, js);
+
+		if (kbase_jm_next_job(kbdev, js, nr_jobs_to_submit))
+			ret_mask |= (1 << js);
+
+		js_mask &= ~(1 << js);
+	}
+
+	return ret_mask;
+}
+
+void kbase_jm_try_kick(struct kbase_device *kbdev, u32 js_mask)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (!down_trylock(&js_devdata->schedule_sem)) {
+		kbase_jm_kick(kbdev, js_mask);
+		up(&js_devdata->schedule_sem);
+	}
+}
+
+void kbase_jm_try_kick_all(struct kbase_device *kbdev)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (!down_trylock(&js_devdata->schedule_sem)) {
+		kbase_jm_kick_all(kbdev);
+		up(&js_devdata->schedule_sem);
+	}
+}
+
+void kbase_jm_idle_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+	int js;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
+		if (kbdev->hwaccess.active_kctx[js] == kctx)
+			kbdev->hwaccess.active_kctx[js] = NULL;
+	}
+}
+
+struct kbase_jd_atom *kbase_jm_return_atom_to_js(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (katom->event_code != BASE_JD_EVENT_STOPPED &&
+			katom->event_code != BASE_JD_EVENT_REMOVED_FROM_NEXT) {
+		return kbase_js_complete_atom(katom, NULL);
+	} else {
+		kbase_js_unpull(katom->kctx, katom);
+		return NULL;
+	}
+}
+
+struct kbase_jd_atom *kbase_jm_complete(struct kbase_device *kbdev,
+		struct kbase_jd_atom *katom, ktime_t *end_timestamp)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	return kbase_js_complete_atom(katom, end_timestamp);
+}
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_jm.h b/drivers/gpu/arm/midgard/mali_kbase_jm.h
new file mode 100644
index 0000000..c468ea4
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_jm.h
@@ -0,0 +1,115 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * Job manager common APIs
+ */
+
+#ifndef _KBASE_JM_H_
+#define _KBASE_JM_H_
+
+/**
+ * kbase_jm_kick() - Indicate that there are jobs ready to run.
+ * @kbdev:	Device pointer
+ * @js_mask:	Mask of the job slots that can be pulled from.
+ *
+ * Caller must hold the hwaccess_lock and schedule_sem semaphore
+ *
+ * Return: Mask of the job slots that can still be submitted to.
+ */
+u32 kbase_jm_kick(struct kbase_device *kbdev, u32 js_mask);
+
+/**
+ * kbase_jm_kick_all() - Indicate that there are jobs ready to run on all job
+ *			 slots.
+ * @kbdev:	Device pointer
+ *
+ * Caller must hold the hwaccess_lock and schedule_sem semaphore
+ *
+ * Return: Mask of the job slots that can still be submitted to.
+ */
+static inline u32 kbase_jm_kick_all(struct kbase_device *kbdev)
+{
+	return kbase_jm_kick(kbdev, (1 << kbdev->gpu_props.num_job_slots) - 1);
+}
+
+/**
+ * kbase_jm_try_kick - Attempt to call kbase_jm_kick
+ * @kbdev:   Device pointer
+ * @js_mask: Mask of the job slots that can be pulled from
+ * Context: Caller must hold hwaccess_lock
+ *
+ * If schedule_sem can be immediately obtained then this function will call
+ * kbase_jm_kick() otherwise it will do nothing.
+ */
+void kbase_jm_try_kick(struct kbase_device *kbdev, u32 js_mask);
+
+/**
+ * kbase_jm_try_kick_all() - Attempt to call kbase_jm_kick_all
+ * @kbdev:  Device pointer
+ * Context: Caller must hold hwaccess_lock
+ *
+ * If schedule_sem can be immediately obtained then this function will call
+ * kbase_jm_kick_all() otherwise it will do nothing.
+ */
+void kbase_jm_try_kick_all(struct kbase_device *kbdev);
+
+/**
+ * kbase_jm_idle_ctx() - Mark a context as idle.
+ * @kbdev:	Device pointer
+ * @kctx:	Context to mark as idle
+ *
+ * No more atoms will be pulled from this context until it is marked as active
+ * by kbase_js_use_ctx().
+ *
+ * The context should have no atoms currently pulled from it
+ * (kctx->atoms_pulled == 0).
+ *
+ * Caller must hold the hwaccess_lock
+ */
+void kbase_jm_idle_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * kbase_jm_return_atom_to_js() - Return an atom to the job scheduler that has
+ *				  been soft-stopped or will fail due to a
+ *				  dependency
+ * @kbdev:	Device pointer
+ * @katom:	Atom that has been stopped or will be failed
+ *
+ * Return: Atom that has now been unblocked and can now be run, or NULL if none
+ */
+struct kbase_jd_atom *kbase_jm_return_atom_to_js(struct kbase_device *kbdev,
+			struct kbase_jd_atom *katom);
+
+/**
+ * kbase_jm_complete() - Complete an atom
+ * @kbdev:		Device pointer
+ * @katom:		Atom that has completed
+ * @end_timestamp:	Timestamp of atom completion
+ *
+ * Return: Atom that has now been unblocked and can now be run, or NULL if none
+ */
+struct kbase_jd_atom *kbase_jm_complete(struct kbase_device *kbdev,
+		struct kbase_jd_atom *katom, ktime_t *end_timestamp);
+
+#endif /* _KBASE_JM_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_js.c b/drivers/gpu/arm/midgard/mali_kbase_js.c
new file mode 100644
index 0000000..77d9716
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_js.c
@@ -0,0 +1,2891 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Job Scheduler Implementation
+ */
+#include <mali_kbase.h>
+#include <mali_kbase_js.h>
+#include <mali_kbase_tracepoints.h>
+#include <mali_kbase_hw.h>
+#include <mali_kbase_ctx_sched.h>
+
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config_defaults.h>
+
+#include "mali_kbase_jm.h"
+#include "mali_kbase_hwaccess_jm.h"
+
+/*
+ * Private types
+ */
+
+/* Bitpattern indicating the result of releasing a context */
+enum {
+	/* The context was descheduled - caller should try scheduling in a new
+	 * one to keep the runpool full */
+	KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED = (1u << 0),
+	/* Ctx attributes were changed - caller should try scheduling all
+	 * contexts */
+	KBASEP_JS_RELEASE_RESULT_SCHED_ALL = (1u << 1)
+};
+
+typedef u32 kbasep_js_release_result;
+
+const int kbasep_js_atom_priority_to_relative[BASE_JD_NR_PRIO_LEVELS] = {
+	KBASE_JS_ATOM_SCHED_PRIO_MED, /* BASE_JD_PRIO_MEDIUM */
+	KBASE_JS_ATOM_SCHED_PRIO_HIGH, /* BASE_JD_PRIO_HIGH */
+	KBASE_JS_ATOM_SCHED_PRIO_LOW  /* BASE_JD_PRIO_LOW */
+};
+
+const base_jd_prio
+kbasep_js_relative_priority_to_atom[KBASE_JS_ATOM_SCHED_PRIO_COUNT] = {
+	BASE_JD_PRIO_HIGH,   /* KBASE_JS_ATOM_SCHED_PRIO_HIGH */
+	BASE_JD_PRIO_MEDIUM, /* KBASE_JS_ATOM_SCHED_PRIO_MED */
+	BASE_JD_PRIO_LOW     /* KBASE_JS_ATOM_SCHED_PRIO_LOW */
+};
+
+
+/*
+ * Private function prototypes
+ */
+static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(
+		struct kbase_device *kbdev, struct kbase_context *kctx,
+		struct kbasep_js_atom_retained_state *katom_retained_state);
+
+static int kbase_js_get_slot(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom);
+
+static void kbase_js_foreach_ctx_job(struct kbase_context *kctx,
+		kbasep_js_ctx_job_cb callback);
+
+/* Helper for trace subcodes */
+#if KBASE_TRACE_ENABLE
+static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev,
+		struct kbase_context *kctx)
+{
+	return atomic_read(&kctx->refcount);
+}
+#else				/* KBASE_TRACE_ENABLE  */
+static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev,
+		struct kbase_context *kctx)
+{
+	CSTD_UNUSED(kbdev);
+	CSTD_UNUSED(kctx);
+	return 0;
+}
+#endif				/* KBASE_TRACE_ENABLE  */
+
+/*
+ * Private functions
+ */
+
+/**
+ * core_reqs_from_jsn_features - Convert JSn_FEATURES to core requirements
+ * @features: JSn_FEATURE register value
+ *
+ * Given a JSn_FEATURE register value returns the core requirements that match
+ *
+ * Return: Core requirement bit mask
+ */
+static base_jd_core_req core_reqs_from_jsn_features(u16 features)
+{
+	base_jd_core_req core_req = 0u;
+
+	if ((features & JS_FEATURE_SET_VALUE_JOB) != 0)
+		core_req |= BASE_JD_REQ_V;
+
+	if ((features & JS_FEATURE_CACHE_FLUSH_JOB) != 0)
+		core_req |= BASE_JD_REQ_CF;
+
+	if ((features & JS_FEATURE_COMPUTE_JOB) != 0)
+		core_req |= BASE_JD_REQ_CS;
+
+	if ((features & JS_FEATURE_TILER_JOB) != 0)
+		core_req |= BASE_JD_REQ_T;
+
+	if ((features & JS_FEATURE_FRAGMENT_JOB) != 0)
+		core_req |= BASE_JD_REQ_FS;
+
+	return core_req;
+}
+
+static void kbase_js_sync_timers(struct kbase_device *kbdev)
+{
+	mutex_lock(&kbdev->js_data.runpool_mutex);
+	kbase_backend_ctx_count_changed(kbdev);
+	mutex_unlock(&kbdev->js_data.runpool_mutex);
+}
+
+/* Hold the mmu_hw_mutex and hwaccess_lock for this */
+bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev,
+		struct kbase_context *kctx)
+{
+	bool result = false;
+	int as_nr;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	as_nr = kctx->as_nr;
+	if (atomic_read(&kctx->refcount) > 0) {
+		KBASE_DEBUG_ASSERT(as_nr >= 0);
+
+		kbase_ctx_sched_retain_ctx_refcount(kctx);
+		KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RETAIN_CTX_NOLOCK, kctx,
+				NULL, 0u, atomic_read(&kctx->refcount));
+		result = true;
+	}
+
+	return result;
+}
+
+/**
+ * jsctx_rb_none_to_pull_prio(): - Check if there are no pullable atoms
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @js:   Job slot id to check.
+ * @prio: Priority to check.
+ *
+ * Return true if there are no atoms to pull. There may be running atoms in the
+ * ring buffer even if there are no atoms to pull. It is also possible for the
+ * ring buffer to be full (with running atoms) when this functions returns
+ * true.
+ *
+ * Return: true if there are no atoms to pull, false otherwise.
+ */
+static inline bool
+jsctx_rb_none_to_pull_prio(struct kbase_context *kctx, int js, int prio)
+{
+	struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	return RB_EMPTY_ROOT(&rb->runnable_tree);
+}
+
+/**
+ * jsctx_rb_none_to_pull(): - Check if all priority ring buffers have no
+ * pullable atoms
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @js:   Job slot id to check.
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if the ring buffers for all priorities have no pullable atoms,
+ *	   false otherwise.
+ */
+static inline bool
+jsctx_rb_none_to_pull(struct kbase_context *kctx, int js)
+{
+	int prio;
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
+		if (!jsctx_rb_none_to_pull_prio(kctx, js, prio))
+			return false;
+	}
+
+	return true;
+}
+
+/**
+ * jsctx_queue_foreach_prio(): - Execute callback for each entry in the queue.
+ * @kctx:     Pointer to kbase context with the queue.
+ * @js:       Job slot id to iterate.
+ * @prio:     Priority id to iterate.
+ * @callback: Function pointer to callback.
+ *
+ * Iterate over a queue and invoke @callback for each entry in the queue, and
+ * remove the entry from the queue.
+ *
+ * If entries are added to the queue while this is running those entries may, or
+ * may not be covered. To ensure that all entries in the buffer have been
+ * enumerated when this function returns jsctx->lock must be held when calling
+ * this function.
+ *
+ * The HW access lock must always be held when calling this function.
+ */
+static void
+jsctx_queue_foreach_prio(struct kbase_context *kctx, int js, int prio,
+		kbasep_js_ctx_job_cb callback)
+{
+	struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	while (!RB_EMPTY_ROOT(&queue->runnable_tree)) {
+		struct rb_node *node = rb_first(&queue->runnable_tree);
+		struct kbase_jd_atom *entry = rb_entry(node,
+				struct kbase_jd_atom, runnable_tree_node);
+
+		rb_erase(node, &queue->runnable_tree);
+		callback(kctx->kbdev, entry);
+	}
+
+	while (!list_empty(&queue->x_dep_head)) {
+		struct kbase_jd_atom *entry = list_entry(queue->x_dep_head.next,
+				struct kbase_jd_atom, queue);
+
+		list_del(queue->x_dep_head.next);
+
+		callback(kctx->kbdev, entry);
+	}
+}
+
+/**
+ * jsctx_queue_foreach(): - Execute callback for each entry in every queue
+ * @kctx:     Pointer to kbase context with queue.
+ * @js:       Job slot id to iterate.
+ * @callback: Function pointer to callback.
+ *
+ * Iterate over all the different priorities, and for each call
+ * jsctx_queue_foreach_prio() to iterate over the queue and invoke @callback
+ * for each entry, and remove the entry from the queue.
+ */
+static inline void
+jsctx_queue_foreach(struct kbase_context *kctx, int js,
+		kbasep_js_ctx_job_cb callback)
+{
+	int prio;
+
+	for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++)
+		jsctx_queue_foreach_prio(kctx, js, prio, callback);
+}
+
+/**
+ * jsctx_rb_peek_prio(): - Check buffer and get next atom
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @js:   Job slot id to check.
+ * @prio: Priority id to check.
+ *
+ * Check the ring buffer for the specified @js and @prio and return a pointer to
+ * the next atom, unless the ring buffer is empty.
+ *
+ * Return: Pointer to next atom in buffer, or NULL if there is no atom.
+ */
+static inline struct kbase_jd_atom *
+jsctx_rb_peek_prio(struct kbase_context *kctx, int js, int prio)
+{
+	struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
+	struct rb_node *node;
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	node = rb_first(&rb->runnable_tree);
+	if (!node)
+		return NULL;
+
+	return rb_entry(node, struct kbase_jd_atom, runnable_tree_node);
+}
+
+/**
+ * jsctx_rb_peek(): - Check all priority buffers and get next atom
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @js:   Job slot id to check.
+ *
+ * Check the ring buffers for all priorities, starting from
+ * KBASE_JS_ATOM_SCHED_PRIO_HIGH, for the specified @js and @prio and return a
+ * pointer to the next atom, unless all the priority's ring buffers are empty.
+ *
+ * Caller must hold the hwaccess_lock.
+ *
+ * Return: Pointer to next atom in buffer, or NULL if there is no atom.
+ */
+static inline struct kbase_jd_atom *
+jsctx_rb_peek(struct kbase_context *kctx, int js)
+{
+	int prio;
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
+		struct kbase_jd_atom *katom;
+
+		katom = jsctx_rb_peek_prio(kctx, js, prio);
+		if (katom)
+			return katom;
+	}
+
+	return NULL;
+}
+
+/**
+ * jsctx_rb_pull(): - Mark atom in list as running
+ * @kctx:  Pointer to kbase context with ring buffer.
+ * @katom: Pointer to katom to pull.
+ *
+ * Mark an atom previously obtained from jsctx_rb_peek() as running.
+ *
+ * @katom must currently be at the head of the ring buffer.
+ */
+static inline void
+jsctx_rb_pull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	int prio = katom->sched_priority;
+	int js = katom->slot_nr;
+	struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	/* Atoms must be pulled in the correct order. */
+	WARN_ON(katom != jsctx_rb_peek_prio(kctx, js, prio));
+
+	rb_erase(&katom->runnable_tree_node, &rb->runnable_tree);
+}
+
+#define LESS_THAN_WRAP(a, b) ((s32)(a - b) < 0)
+
+static void
+jsctx_tree_add(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	int prio = katom->sched_priority;
+	int js = katom->slot_nr;
+	struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
+	struct rb_node **new = &(queue->runnable_tree.rb_node), *parent = NULL;
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	while (*new) {
+		struct kbase_jd_atom *entry = container_of(*new,
+				struct kbase_jd_atom, runnable_tree_node);
+
+		parent = *new;
+		if (LESS_THAN_WRAP(katom->age, entry->age))
+			new = &((*new)->rb_left);
+		else
+			new = &((*new)->rb_right);
+	}
+
+	/* Add new node and rebalance tree. */
+	rb_link_node(&katom->runnable_tree_node, parent, new);
+	rb_insert_color(&katom->runnable_tree_node, &queue->runnable_tree);
+
+	KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(kbdev, katom, TL_ATOM_STATE_READY);
+}
+
+/**
+ * jsctx_rb_unpull(): - Undo marking of atom in list as running
+ * @kctx:  Pointer to kbase context with ring buffer.
+ * @katom: Pointer to katom to unpull.
+ *
+ * Undo jsctx_rb_pull() and put @katom back in the queue.
+ *
+ * jsctx_rb_unpull() must be called on atoms in the same order the atoms were
+ * pulled.
+ */
+static inline void
+jsctx_rb_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	jsctx_tree_add(kctx, katom);
+}
+
+static bool kbase_js_ctx_pullable(struct kbase_context *kctx,
+					int js,
+					bool is_scheduled);
+static bool kbase_js_ctx_list_add_pullable_nolock(struct kbase_device *kbdev,
+						struct kbase_context *kctx,
+						int js);
+static bool kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device *kbdev,
+						struct kbase_context *kctx,
+						int js);
+
+/*
+ * Functions private to KBase ('Protected' functions)
+ */
+int kbasep_js_devdata_init(struct kbase_device * const kbdev)
+{
+	struct kbasep_js_device_data *jsdd;
+	int i, j;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	jsdd = &kbdev->js_data;
+
+#ifdef CONFIG_MALI_DEBUG
+	/* Soft-stop will be disabled on a single context by default unless
+	 * softstop_always is set */
+	jsdd->softstop_always = false;
+#endif				/* CONFIG_MALI_DEBUG */
+	jsdd->nr_all_contexts_running = 0;
+	jsdd->nr_user_contexts_running = 0;
+	jsdd->nr_contexts_pullable = 0;
+	atomic_set(&jsdd->nr_contexts_runnable, 0);
+	/* No ctx allowed to submit */
+	jsdd->runpool_irq.submit_allowed = 0u;
+	memset(jsdd->runpool_irq.ctx_attr_ref_count, 0,
+			sizeof(jsdd->runpool_irq.ctx_attr_ref_count));
+	memset(jsdd->runpool_irq.slot_affinities, 0,
+			sizeof(jsdd->runpool_irq.slot_affinities));
+	memset(jsdd->runpool_irq.slot_affinity_refcount, 0,
+			sizeof(jsdd->runpool_irq.slot_affinity_refcount));
+	INIT_LIST_HEAD(&jsdd->suspended_soft_jobs_list);
+
+	/* Config attributes */
+	jsdd->scheduling_period_ns = DEFAULT_JS_SCHEDULING_PERIOD_NS;
+	jsdd->soft_stop_ticks = DEFAULT_JS_SOFT_STOP_TICKS;
+	jsdd->soft_stop_ticks_cl = DEFAULT_JS_SOFT_STOP_TICKS_CL;
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
+		jsdd->hard_stop_ticks_ss = DEFAULT_JS_HARD_STOP_TICKS_SS_8408;
+	else
+		jsdd->hard_stop_ticks_ss = DEFAULT_JS_HARD_STOP_TICKS_SS;
+	jsdd->hard_stop_ticks_cl = DEFAULT_JS_HARD_STOP_TICKS_CL;
+	jsdd->hard_stop_ticks_dumping = DEFAULT_JS_HARD_STOP_TICKS_DUMPING;
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
+		jsdd->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS_8408;
+	else
+		jsdd->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS;
+	jsdd->gpu_reset_ticks_cl = DEFAULT_JS_RESET_TICKS_CL;
+	jsdd->gpu_reset_ticks_dumping = DEFAULT_JS_RESET_TICKS_DUMPING;
+	jsdd->ctx_timeslice_ns = DEFAULT_JS_CTX_TIMESLICE_NS;
+	atomic_set(&jsdd->soft_job_timeout_ms, DEFAULT_JS_SOFT_JOB_TIMEOUT);
+
+	dev_dbg(kbdev->dev, "JS Config Attribs: ");
+	dev_dbg(kbdev->dev, "\tscheduling_period_ns:%u",
+			jsdd->scheduling_period_ns);
+	dev_dbg(kbdev->dev, "\tsoft_stop_ticks:%u",
+			jsdd->soft_stop_ticks);
+	dev_dbg(kbdev->dev, "\tsoft_stop_ticks_cl:%u",
+			jsdd->soft_stop_ticks_cl);
+	dev_dbg(kbdev->dev, "\thard_stop_ticks_ss:%u",
+			jsdd->hard_stop_ticks_ss);
+	dev_dbg(kbdev->dev, "\thard_stop_ticks_cl:%u",
+			jsdd->hard_stop_ticks_cl);
+	dev_dbg(kbdev->dev, "\thard_stop_ticks_dumping:%u",
+			jsdd->hard_stop_ticks_dumping);
+	dev_dbg(kbdev->dev, "\tgpu_reset_ticks_ss:%u",
+			jsdd->gpu_reset_ticks_ss);
+	dev_dbg(kbdev->dev, "\tgpu_reset_ticks_cl:%u",
+			jsdd->gpu_reset_ticks_cl);
+	dev_dbg(kbdev->dev, "\tgpu_reset_ticks_dumping:%u",
+			jsdd->gpu_reset_ticks_dumping);
+	dev_dbg(kbdev->dev, "\tctx_timeslice_ns:%u",
+			jsdd->ctx_timeslice_ns);
+	dev_dbg(kbdev->dev, "\tsoft_job_timeout:%i",
+		atomic_read(&jsdd->soft_job_timeout_ms));
+
+	if (!(jsdd->soft_stop_ticks < jsdd->hard_stop_ticks_ss &&
+			jsdd->hard_stop_ticks_ss < jsdd->gpu_reset_ticks_ss &&
+			jsdd->soft_stop_ticks < jsdd->hard_stop_ticks_dumping &&
+			jsdd->hard_stop_ticks_dumping <
+			jsdd->gpu_reset_ticks_dumping)) {
+		dev_err(kbdev->dev, "Job scheduler timeouts invalid; soft/hard/reset tick counts should be in increasing order\n");
+		return -EINVAL;
+	}
+
+#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS
+	dev_dbg(kbdev->dev, "Job Scheduling Soft-stops disabled, ignoring value for soft_stop_ticks==%u at %uns per tick. Other soft-stops may still occur.",
+			jsdd->soft_stop_ticks,
+			jsdd->scheduling_period_ns);
+#endif
+#if KBASE_DISABLE_SCHEDULING_HARD_STOPS
+	dev_dbg(kbdev->dev, "Job Scheduling Hard-stops disabled, ignoring values for hard_stop_ticks_ss==%d and hard_stop_ticks_dumping==%u at %uns per tick. Other hard-stops may still occur.",
+			jsdd->hard_stop_ticks_ss,
+			jsdd->hard_stop_ticks_dumping,
+			jsdd->scheduling_period_ns);
+#endif
+#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS && KBASE_DISABLE_SCHEDULING_HARD_STOPS
+	dev_dbg(kbdev->dev, "Note: The JS tick timer (if coded) will still be run, but do nothing.");
+#endif
+
+	for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i)
+		jsdd->js_reqs[i] = core_reqs_from_jsn_features(
+			kbdev->gpu_props.props.raw_props.js_features[i]);
+
+	/* On error, we could continue on: providing none of the below resources
+	 * rely on the ones above */
+
+	mutex_init(&jsdd->runpool_mutex);
+	mutex_init(&jsdd->queue_mutex);
+	spin_lock_init(&kbdev->hwaccess_lock);
+	sema_init(&jsdd->schedule_sem, 1);
+
+	for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i) {
+		for (j = 0; j < KBASE_JS_ATOM_SCHED_PRIO_COUNT; ++j) {
+			INIT_LIST_HEAD(&jsdd->ctx_list_pullable[i][j]);
+			INIT_LIST_HEAD(&jsdd->ctx_list_unpullable[i][j]);
+		}
+	}
+
+	return 0;
+}
+
+void kbasep_js_devdata_halt(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+
+void kbasep_js_devdata_term(struct kbase_device *kbdev)
+{
+	struct kbasep_js_device_data *js_devdata;
+	s8 zero_ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT] = { 0, };
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	js_devdata = &kbdev->js_data;
+
+	/* The caller must de-register all contexts before calling this
+	 */
+	KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running == 0);
+	KBASE_DEBUG_ASSERT(memcmp(
+				  js_devdata->runpool_irq.ctx_attr_ref_count,
+				  zero_ctx_attr_ref_count,
+				  sizeof(zero_ctx_attr_ref_count)) == 0);
+	CSTD_UNUSED(zero_ctx_attr_ref_count);
+}
+
+int kbasep_js_kctx_init(struct kbase_context *const kctx)
+{
+	struct kbase_device *kbdev;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	int i, j;
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+
+	kbdev = kctx->kbdev;
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	for (i = 0; i < BASE_JM_MAX_NR_SLOTS; ++i)
+		INIT_LIST_HEAD(&kctx->jctx.sched_info.ctx.ctx_list_entry[i]);
+
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	js_kctx_info->ctx.nr_jobs = 0;
+	kbase_ctx_flag_clear(kctx, KCTX_SCHEDULED);
+	kbase_ctx_flag_clear(kctx, KCTX_DYING);
+	memset(js_kctx_info->ctx.ctx_attr_ref_count, 0,
+			sizeof(js_kctx_info->ctx.ctx_attr_ref_count));
+
+	/* Initially, the context is disabled from submission until the create
+	 * flags are set */
+	kbase_ctx_flag_set(kctx, KCTX_SUBMIT_DISABLED);
+
+	/* On error, we could continue on: providing none of the below resources
+	 * rely on the ones above */
+	mutex_init(&js_kctx_info->ctx.jsctx_mutex);
+
+	init_waitqueue_head(&js_kctx_info->ctx.is_scheduled_wait);
+
+	for (i = 0; i < KBASE_JS_ATOM_SCHED_PRIO_COUNT; i++) {
+		for (j = 0; j < BASE_JM_MAX_NR_SLOTS; j++) {
+			INIT_LIST_HEAD(&kctx->jsctx_queue[i][j].x_dep_head);
+			kctx->jsctx_queue[i][j].runnable_tree = RB_ROOT;
+		}
+	}
+
+	return 0;
+}
+
+void kbasep_js_kctx_term(struct kbase_context *kctx)
+{
+	struct kbase_device *kbdev;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	int js;
+	bool update_ctx_count = false;
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+
+	kbdev = kctx->kbdev;
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	/* The caller must de-register all jobs before calling this */
+	KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+	KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs == 0);
+
+	mutex_lock(&kbdev->js_data.queue_mutex);
+	mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
+		list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	if (kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF)) {
+		WARN_ON(atomic_read(&kbdev->js_data.nr_contexts_runnable) <= 0);
+		atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+		update_ctx_count = true;
+		kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+	}
+
+	mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+	mutex_unlock(&kbdev->js_data.queue_mutex);
+
+	if (update_ctx_count) {
+		mutex_lock(&kbdev->js_data.runpool_mutex);
+		kbase_backend_ctx_count_changed(kbdev);
+		mutex_unlock(&kbdev->js_data.runpool_mutex);
+	}
+}
+
+/**
+ * kbase_js_ctx_list_add_pullable_nolock - Variant of
+ *                                         kbase_jd_ctx_list_add_pullable()
+ *                                         where the caller must hold
+ *                                         hwaccess_lock
+ * @kbdev:  Device pointer
+ * @kctx:   Context to add to queue
+ * @js:     Job slot to use
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_add_pullable_nolock(struct kbase_device *kbdev,
+						struct kbase_context *kctx,
+						int js)
+{
+	bool ret = false;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
+		list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+
+	list_add_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
+			&kbdev->js_data.ctx_list_pullable[js][kctx->priority]);
+
+	if (!kctx->slots_pullable) {
+		kbdev->js_data.nr_contexts_pullable++;
+		ret = true;
+		if (!atomic_read(&kctx->atoms_pulled)) {
+			WARN_ON(kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+			kbase_ctx_flag_set(kctx, KCTX_RUNNABLE_REF);
+			atomic_inc(&kbdev->js_data.nr_contexts_runnable);
+		}
+	}
+	kctx->slots_pullable |= (1 << js);
+
+	return ret;
+}
+
+/**
+ * kbase_js_ctx_list_add_pullable_head_nolock - Variant of
+ *                                              kbase_js_ctx_list_add_pullable_head()
+ *                                              where the caller must hold
+ *                                              hwaccess_lock
+ * @kbdev:  Device pointer
+ * @kctx:   Context to add to queue
+ * @js:     Job slot to use
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return:  true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_add_pullable_head_nolock(
+		struct kbase_device *kbdev, struct kbase_context *kctx, int js)
+{
+	bool ret = false;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
+		list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+
+	list_add(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
+			&kbdev->js_data.ctx_list_pullable[js][kctx->priority]);
+
+	if (!kctx->slots_pullable) {
+		kbdev->js_data.nr_contexts_pullable++;
+		ret = true;
+		if (!atomic_read(&kctx->atoms_pulled)) {
+			WARN_ON(kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+			kbase_ctx_flag_set(kctx, KCTX_RUNNABLE_REF);
+			atomic_inc(&kbdev->js_data.nr_contexts_runnable);
+		}
+	}
+	kctx->slots_pullable |= (1 << js);
+
+	return ret;
+}
+
+/**
+ * kbase_js_ctx_list_add_pullable_head - Add context to the head of the
+ *                                       per-slot pullable context queue
+ * @kbdev:  Device pointer
+ * @kctx:   Context to add to queue
+ * @js:     Job slot to use
+ *
+ * If the context is on either the pullable or unpullable queues, then it is
+ * removed before being added to the head.
+ *
+ * This function should be used when a context has been scheduled, but no jobs
+ * can currently be pulled from it.
+ *
+ * Return:  true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_add_pullable_head(struct kbase_device *kbdev,
+						struct kbase_context *kctx,
+						int js)
+{
+	bool ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	ret = kbase_js_ctx_list_add_pullable_head_nolock(kbdev, kctx, js);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return ret;
+}
+
+/**
+ * kbase_js_ctx_list_add_unpullable_nolock - Add context to the tail of the
+ *                                           per-slot unpullable context queue
+ * @kbdev:  Device pointer
+ * @kctx:   Context to add to queue
+ * @js:     Job slot to use
+ *
+ * The context must already be on the per-slot pullable queue. It will be
+ * removed from the pullable queue before being added to the unpullable queue.
+ *
+ * This function should be used when a context has been pulled from, and there
+ * are no jobs remaining on the specified slot.
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return:  true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device *kbdev,
+						struct kbase_context *kctx,
+						int js)
+{
+	bool ret = false;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	list_move_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
+		&kbdev->js_data.ctx_list_unpullable[js][kctx->priority]);
+
+	if (kctx->slots_pullable == (1 << js)) {
+		kbdev->js_data.nr_contexts_pullable--;
+		ret = true;
+		if (!atomic_read(&kctx->atoms_pulled)) {
+			WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+			kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+			atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+		}
+	}
+	kctx->slots_pullable &= ~(1 << js);
+
+	return ret;
+}
+
+/**
+ * kbase_js_ctx_list_remove_nolock - Remove context from the per-slot pullable
+ *                                   or unpullable context queues
+ * @kbdev:  Device pointer
+ * @kctx:   Context to remove from queue
+ * @js:     Job slot to use
+ *
+ * The context must already be on one of the queues.
+ *
+ * This function should be used when a context has no jobs on the GPU, and no
+ * jobs remaining for the specified slot.
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return:  true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_remove_nolock(struct kbase_device *kbdev,
+					struct kbase_context *kctx,
+					int js)
+{
+	bool ret = false;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	WARN_ON(list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]));
+
+	list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+
+	if (kctx->slots_pullable == (1 << js)) {
+		kbdev->js_data.nr_contexts_pullable--;
+		ret = true;
+		if (!atomic_read(&kctx->atoms_pulled)) {
+			WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+			kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+			atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+		}
+	}
+	kctx->slots_pullable &= ~(1 << js);
+
+	return ret;
+}
+
+/**
+ * kbase_js_ctx_list_pop_head_nolock - Variant of kbase_js_ctx_list_pop_head()
+ *                                     where the caller must hold
+ *                                     hwaccess_lock
+ * @kbdev:  Device pointer
+ * @js:     Job slot to use
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return:  Context to use for specified slot.
+ *          NULL if no contexts present for specified slot
+ */
+static struct kbase_context *kbase_js_ctx_list_pop_head_nolock(
+						struct kbase_device *kbdev,
+						int js)
+{
+	struct kbase_context *kctx;
+	int i;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	for (i = 0; i < KBASE_JS_ATOM_SCHED_PRIO_COUNT; i++) {
+		if (list_empty(&kbdev->js_data.ctx_list_pullable[js][i]))
+			continue;
+
+		kctx = list_entry(kbdev->js_data.ctx_list_pullable[js][i].next,
+				struct kbase_context,
+				jctx.sched_info.ctx.ctx_list_entry[js]);
+
+		list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+
+		return kctx;
+	}
+	return NULL;
+}
+
+/**
+ * kbase_js_ctx_list_pop_head - Pop the head context off the per-slot pullable
+ *                              queue.
+ * @kbdev:  Device pointer
+ * @js:     Job slot to use
+ *
+ * Return:  Context to use for specified slot.
+ *          NULL if no contexts present for specified slot
+ */
+static struct kbase_context *kbase_js_ctx_list_pop_head(
+		struct kbase_device *kbdev, int js)
+{
+	struct kbase_context *kctx;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kctx = kbase_js_ctx_list_pop_head_nolock(kbdev, js);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return kctx;
+}
+
+/**
+ * kbase_js_ctx_pullable - Return if a context can be pulled from on the
+ *                         specified slot
+ * @kctx:          Context pointer
+ * @js:            Job slot to use
+ * @is_scheduled:  true if the context is currently scheduled
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return:         true if context can be pulled from on specified slot
+ *                 false otherwise
+ */
+static bool kbase_js_ctx_pullable(struct kbase_context *kctx, int js,
+					bool is_scheduled)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbase_jd_atom *katom;
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	js_devdata = &kctx->kbdev->js_data;
+
+	if (is_scheduled) {
+		if (!kbasep_js_is_submit_allowed(js_devdata, kctx))
+			return false;
+	}
+	katom = jsctx_rb_peek(kctx, js);
+	if (!katom)
+		return false; /* No pullable atoms */
+	if (kctx->blocked_js[js][katom->sched_priority])
+		return false;
+	if (atomic_read(&katom->blocked))
+		return false; /* next atom blocked */
+	if (katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) {
+		if (katom->x_pre_dep->gpu_rb_state ==
+					KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB ||
+					katom->x_pre_dep->will_fail_event_code)
+			return false;
+		if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) &&
+				kbase_backend_nr_atoms_on_slot(kctx->kbdev, js))
+			return false;
+	}
+
+	return true;
+}
+
+static bool kbase_js_dep_validate(struct kbase_context *kctx,
+				struct kbase_jd_atom *katom)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	bool ret = true;
+	bool has_dep = false, has_x_dep = false;
+	int js = kbase_js_get_slot(kbdev, katom);
+	int prio = katom->sched_priority;
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		struct kbase_jd_atom *dep_atom = katom->dep[i].atom;
+
+		if (dep_atom) {
+			int dep_js = kbase_js_get_slot(kbdev, dep_atom);
+			int dep_prio = dep_atom->sched_priority;
+
+			/* Dependent atom must already have been submitted */
+			if (!(dep_atom->atom_flags &
+					KBASE_KATOM_FLAG_JSCTX_IN_TREE)) {
+				ret = false;
+				break;
+			}
+
+			/* Dependencies with different priorities can't
+			  be represented in the ringbuffer */
+			if (prio != dep_prio) {
+				ret = false;
+				break;
+			}
+
+			if (js == dep_js) {
+				/* Only one same-slot dependency can be
+				 * represented in the ringbuffer */
+				if (has_dep) {
+					ret = false;
+					break;
+				}
+				/* Each dependee atom can only have one
+				 * same-slot dependency */
+				if (dep_atom->post_dep) {
+					ret = false;
+					break;
+				}
+				has_dep = true;
+			} else {
+				/* Only one cross-slot dependency can be
+				 * represented in the ringbuffer */
+				if (has_x_dep) {
+					ret = false;
+					break;
+				}
+				/* Each dependee atom can only have one
+				 * cross-slot dependency */
+				if (dep_atom->x_post_dep) {
+					ret = false;
+					break;
+				}
+				/* The dependee atom can not already be in the
+				 * HW access ringbuffer */
+				if (dep_atom->gpu_rb_state !=
+					KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+					ret = false;
+					break;
+				}
+				/* The dependee atom can not already have
+				 * completed */
+				if (dep_atom->status !=
+						KBASE_JD_ATOM_STATE_IN_JS) {
+					ret = false;
+					break;
+				}
+				/* Cross-slot dependencies must not violate
+				 * PRLAM-8987 affinity restrictions */
+				if (kbase_hw_has_issue(kbdev,
+							BASE_HW_ISSUE_8987) &&
+						(js == 2 || dep_js == 2)) {
+					ret = false;
+					break;
+				}
+				has_x_dep = true;
+			}
+
+			/* Dependency can be represented in ringbuffers */
+		}
+	}
+
+	/* If dependencies can be represented by ringbuffer then clear them from
+	 * atom structure */
+	if (ret) {
+		for (i = 0; i < 2; i++) {
+			struct kbase_jd_atom *dep_atom = katom->dep[i].atom;
+
+			if (dep_atom) {
+				int dep_js = kbase_js_get_slot(kbdev, dep_atom);
+
+				if ((js != dep_js) &&
+					(dep_atom->status !=
+						KBASE_JD_ATOM_STATE_COMPLETED)
+					&& (dep_atom->status !=
+					KBASE_JD_ATOM_STATE_HW_COMPLETED)
+					&& (dep_atom->status !=
+						KBASE_JD_ATOM_STATE_UNUSED)) {
+
+					katom->atom_flags |=
+						KBASE_KATOM_FLAG_X_DEP_BLOCKED;
+					katom->x_pre_dep = dep_atom;
+					dep_atom->x_post_dep = katom;
+					if (kbase_jd_katom_dep_type(
+							&katom->dep[i]) ==
+							BASE_JD_DEP_TYPE_DATA)
+						katom->atom_flags |=
+						KBASE_KATOM_FLAG_FAIL_BLOCKER;
+				}
+				if ((kbase_jd_katom_dep_type(&katom->dep[i])
+						== BASE_JD_DEP_TYPE_DATA) &&
+						(js == dep_js)) {
+					katom->pre_dep = dep_atom;
+					dep_atom->post_dep = katom;
+				}
+
+				list_del(&katom->dep_item[i]);
+				kbase_jd_katom_dep_clear(&katom->dep[i]);
+			}
+		}
+	}
+
+	return ret;
+}
+
+void kbase_js_set_ctx_priority(struct kbase_context *kctx, int new_priority)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	int js;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/* Move kctx to the pullable/upullable list as per the new priority */
+	if (new_priority != kctx->priority) {
+		for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+			if (kctx->slots_pullable & (1 << js))
+				list_move_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
+					&kbdev->js_data.ctx_list_pullable[js][new_priority]);
+			else
+				list_move_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
+					&kbdev->js_data.ctx_list_unpullable[js][new_priority]);
+		}
+
+		kctx->priority = new_priority;
+	}
+}
+
+void kbase_js_update_ctx_priority(struct kbase_context *kctx)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	int new_priority = KBASE_JS_ATOM_SCHED_PRIO_LOW;
+	int prio;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (kbdev->js_ctx_scheduling_mode == KBASE_JS_SYSTEM_PRIORITY_MODE) {
+		/* Determine the new priority for context, as per the priority
+		 * of currently in-use atoms.
+		 */
+		for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
+			if (kctx->atoms_count[prio]) {
+				new_priority = prio;
+				break;
+			}
+		}
+	}
+
+	kbase_js_set_ctx_priority(kctx, new_priority);
+}
+
+bool kbasep_js_add_job(struct kbase_context *kctx,
+		struct kbase_jd_atom *atom)
+{
+	unsigned long flags;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	struct kbase_device *kbdev;
+	struct kbasep_js_device_data *js_devdata;
+
+	bool enqueue_required = false;
+	bool timer_sync = false;
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(atom != NULL);
+	lockdep_assert_held(&kctx->jctx.lock);
+
+	kbdev = kctx->kbdev;
+	js_devdata = &kbdev->js_data;
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	mutex_lock(&js_devdata->queue_mutex);
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+
+	/*
+	 * Begin Runpool transaction
+	 */
+	mutex_lock(&js_devdata->runpool_mutex);
+
+	/* Refcount ctx.nr_jobs */
+	KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs < U32_MAX);
+	++(js_kctx_info->ctx.nr_jobs);
+
+	/* Lock for state available during IRQ */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (++kctx->atoms_count[atom->sched_priority] == 1)
+		kbase_js_update_ctx_priority(kctx);
+
+	if (!kbase_js_dep_validate(kctx, atom)) {
+		/* Dependencies could not be represented */
+		--(js_kctx_info->ctx.nr_jobs);
+
+		/* Setting atom status back to queued as it still has unresolved
+		 * dependencies */
+		atom->status = KBASE_JD_ATOM_STATE_QUEUED;
+
+		/* Undo the count, as the atom will get added again later but
+		 * leave the context priority adjusted or boosted, in case if
+		 * this was the first higher priority atom received for this
+		 * context.
+		 * This will prevent the scenario of priority inversion, where
+		 * another context having medium priority atoms keeps getting
+		 * scheduled over this context, which is having both lower and
+		 * higher priority atoms, but higher priority atoms are blocked
+		 * due to dependency on lower priority atoms. With priority
+		 * boost the high priority atom will get to run at earliest.
+		 */
+		kctx->atoms_count[atom->sched_priority]--;
+
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		mutex_unlock(&js_devdata->runpool_mutex);
+
+		goto out_unlock;
+	}
+
+	enqueue_required = kbase_js_dep_resolved_submit(kctx, atom);
+
+	KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_ADD_JOB, kctx, atom, atom->jc,
+				kbasep_js_trace_get_refcnt(kbdev, kctx));
+
+	/* Context Attribute Refcounting */
+	kbasep_js_ctx_attr_ctx_retain_atom(kbdev, kctx, atom);
+
+	if (enqueue_required) {
+		if (kbase_js_ctx_pullable(kctx, atom->slot_nr, false))
+			timer_sync = kbase_js_ctx_list_add_pullable_nolock(
+					kbdev, kctx, atom->slot_nr);
+		else
+			timer_sync = kbase_js_ctx_list_add_unpullable_nolock(
+					kbdev, kctx, atom->slot_nr);
+	}
+	/* If this context is active and the atom is the first on its slot,
+	 * kick the job manager to attempt to fast-start the atom */
+	if (enqueue_required && kctx ==
+			kbdev->hwaccess.active_kctx[atom->slot_nr])
+		kbase_jm_try_kick(kbdev, 1 << atom->slot_nr);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	if (timer_sync)
+		kbase_backend_ctx_count_changed(kbdev);
+	mutex_unlock(&js_devdata->runpool_mutex);
+	/* End runpool transaction */
+
+	if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED)) {
+		if (kbase_ctx_flag(kctx, KCTX_DYING)) {
+			/* A job got added while/after kbase_job_zap_context()
+			 * was called on a non-scheduled context. Kill that job
+			 * by killing the context. */
+			kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx,
+					false);
+		} else if (js_kctx_info->ctx.nr_jobs == 1) {
+			/* Handle Refcount going from 0 to 1: schedule the
+			 * context on the Queue */
+			KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+			dev_dbg(kbdev->dev, "JS: Enqueue Context %p", kctx);
+
+			/* Queue was updated - caller must try to
+			 * schedule the head context */
+			WARN_ON(!enqueue_required);
+		}
+	}
+out_unlock:
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+	mutex_unlock(&js_devdata->queue_mutex);
+
+	return enqueue_required;
+}
+
+void kbasep_js_remove_job(struct kbase_device *kbdev,
+		struct kbase_context *kctx, struct kbase_jd_atom *atom)
+{
+	struct kbasep_js_kctx_info *js_kctx_info;
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(atom != NULL);
+
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_REMOVE_JOB, kctx, atom, atom->jc,
+			kbasep_js_trace_get_refcnt(kbdev, kctx));
+
+	/* De-refcount ctx.nr_jobs */
+	KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs > 0);
+	--(js_kctx_info->ctx.nr_jobs);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	if (--kctx->atoms_count[atom->sched_priority] == 0)
+		kbase_js_update_ctx_priority(kctx);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev,
+		struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	unsigned long flags;
+	struct kbasep_js_atom_retained_state katom_retained_state;
+	bool attr_state_changed;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(katom != NULL);
+
+	kbasep_js_atom_retained_state_copy(&katom_retained_state, katom);
+	kbasep_js_remove_job(kbdev, kctx, katom);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	/* The atom has 'finished' (will not be re-run), so no need to call
+	 * kbasep_js_has_atom_finished().
+	 *
+	 * This is because it returns false for soft-stopped atoms, but we
+	 * want to override that, because we're cancelling an atom regardless of
+	 * whether it was soft-stopped or not */
+	attr_state_changed = kbasep_js_ctx_attr_ctx_release_atom(kbdev, kctx,
+			&katom_retained_state);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return attr_state_changed;
+}
+
+bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev,
+		struct kbase_context *kctx)
+{
+	unsigned long flags;
+	bool result;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	mutex_lock(&kbdev->mmu_hw_mutex);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	result = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+
+	return result;
+}
+
+struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev,
+		int as_nr)
+{
+	unsigned long flags;
+	struct kbase_context *found_kctx = NULL;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	found_kctx = kbdev->as_to_kctx[as_nr];
+
+	if (found_kctx != NULL)
+		kbase_ctx_sched_retain_ctx_refcount(found_kctx);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return found_kctx;
+}
+
+/**
+ * kbasep_js_run_jobs_after_ctx_and_atom_release - Try running more jobs after
+ *                           releasing a context and/or atom
+ * @kbdev:                   The kbase_device to operate on
+ * @kctx:                    The kbase_context to operate on
+ * @katom_retained_state:    Retained state from the atom
+ * @runpool_ctx_attr_change: True if the runpool context attributes have changed
+ *
+ * This collates a set of actions that must happen whilst hwaccess_lock is held.
+ *
+ * This includes running more jobs when:
+ * - The previously released kctx caused a ctx attribute change,
+ * - The released atom caused a ctx attribute change,
+ * - Slots were previously blocked due to affinity restrictions,
+ * - Submission during IRQ handling failed.
+ *
+ * Return: %KBASEP_JS_RELEASE_RESULT_SCHED_ALL if context attributes were
+ *         changed. The caller should try scheduling all contexts
+ */
+static kbasep_js_release_result kbasep_js_run_jobs_after_ctx_and_atom_release(
+		struct kbase_device *kbdev,
+		struct kbase_context *kctx,
+		struct kbasep_js_atom_retained_state *katom_retained_state,
+		bool runpool_ctx_attr_change)
+{
+	struct kbasep_js_device_data *js_devdata;
+	kbasep_js_release_result result = 0;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(katom_retained_state != NULL);
+	js_devdata = &kbdev->js_data;
+
+	lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+	lockdep_assert_held(&js_devdata->runpool_mutex);
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (js_devdata->nr_user_contexts_running != 0 && runpool_ctx_attr_change) {
+		/* A change in runpool ctx attributes might mean we can
+		 * run more jobs than before  */
+		result = KBASEP_JS_RELEASE_RESULT_SCHED_ALL;
+
+		KBASE_TRACE_ADD_SLOT(kbdev, JD_DONE_TRY_RUN_NEXT_JOB,
+					kctx, NULL, 0u, 0);
+	}
+	return result;
+}
+
+/**
+ * kbasep_js_runpool_release_ctx_internal - Internal function to release the reference
+ *                                          on a ctx and an atom's "retained state", only
+ *                                          taking the runpool and as transaction mutexes
+ * @kbdev:                   The kbase_device to operate on
+ * @kctx:                    The kbase_context to operate on
+ * @katom_retained_state:    Retained state from the atom
+ *
+ * This also starts more jobs running in the case of an ctx-attribute state change
+ *
+ * This does none of the followup actions for scheduling:
+ * - It does not schedule in a new context
+ * - It does not requeue or handle dying contexts
+ *
+ * For those tasks, just call kbasep_js_runpool_release_ctx() instead
+ *
+ * Has following requirements
+ * - Context is scheduled in, and kctx->as_nr matches kctx_as_nr
+ * - Context has a non-zero refcount
+ * - Caller holds js_kctx_info->ctx.jsctx_mutex
+ * - Caller holds js_devdata->runpool_mutex
+ *
+ * Return: A bitpattern, containing KBASEP_JS_RELEASE_RESULT_* flags, indicating
+ *         the result of releasing a context that whether the caller should try
+ *         scheduling a new context or should try scheduling all contexts.
+ */
+static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(
+		struct kbase_device *kbdev,
+		struct kbase_context *kctx,
+		struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+	unsigned long flags;
+	struct kbasep_js_device_data *js_devdata;
+	struct kbasep_js_kctx_info *js_kctx_info;
+
+	kbasep_js_release_result release_result = 0u;
+	bool runpool_ctx_attr_change = false;
+	int kctx_as_nr;
+	int new_ref_count;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	js_kctx_info = &kctx->jctx.sched_info;
+	js_devdata = &kbdev->js_data;
+
+	/* Ensure context really is scheduled in */
+	KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	kctx_as_nr = kctx->as_nr;
+	KBASE_DEBUG_ASSERT(kctx_as_nr != KBASEP_AS_NR_INVALID);
+	KBASE_DEBUG_ASSERT(atomic_read(&kctx->refcount) > 0);
+
+	/*
+	 * Transaction begins on AS and runpool_irq
+	 *
+	 * Assert about out calling contract
+	 */
+	mutex_lock(&kbdev->pm.lock);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	KBASE_DEBUG_ASSERT(kctx_as_nr == kctx->as_nr);
+	KBASE_DEBUG_ASSERT(atomic_read(&kctx->refcount) > 0);
+
+	/* Update refcount */
+	kbase_ctx_sched_release_ctx(kctx);
+	new_ref_count = atomic_read(&kctx->refcount);
+
+	/* Release the atom if it finished (i.e. wasn't soft-stopped) */
+	if (kbasep_js_has_atom_finished(katom_retained_state))
+		runpool_ctx_attr_change |= kbasep_js_ctx_attr_ctx_release_atom(
+				kbdev, kctx, katom_retained_state);
+
+	KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RELEASE_CTX, kctx, NULL, 0u,
+			new_ref_count);
+
+	if (new_ref_count == 2 && kbase_ctx_flag(kctx, KCTX_PRIVILEGED) &&
+			!kbase_pm_is_suspending(kbdev)) {
+		/* Context is kept scheduled into an address space even when
+		 * there are no jobs, in this case we have to handle the
+		 * situation where all jobs have been evicted from the GPU and
+		 * submission is disabled.
+		 *
+		 * At this point we re-enable submission to allow further jobs
+		 * to be executed
+		 */
+		kbasep_js_set_submit_allowed(js_devdata, kctx);
+	}
+
+	/* Make a set of checks to see if the context should be scheduled out.
+	 * Note that there'll always be at least 1 reference to the context
+	 * which was previously acquired by kbasep_js_schedule_ctx(). */
+	if (new_ref_count == 1 &&
+		(!kbasep_js_is_submit_allowed(js_devdata, kctx) ||
+							kbdev->pm.suspending)) {
+		int num_slots = kbdev->gpu_props.num_job_slots;
+		int slot;
+
+		/* Last reference, and we've been told to remove this context
+		 * from the Run Pool */
+		dev_dbg(kbdev->dev, "JS: RunPool Remove Context %p because refcount=%d, jobs=%d, allowed=%d",
+				kctx, new_ref_count, js_kctx_info->ctx.nr_jobs,
+				kbasep_js_is_submit_allowed(js_devdata, kctx));
+
+		KBASE_TLSTREAM_TL_NRET_AS_CTX(kbdev, &kbdev->as[kctx->as_nr], kctx);
+
+		kbase_backend_release_ctx_irq(kbdev, kctx);
+
+		for (slot = 0; slot < num_slots; slot++) {
+			if (kbdev->hwaccess.active_kctx[slot] == kctx)
+				kbdev->hwaccess.active_kctx[slot] = NULL;
+		}
+
+		/* Ctx Attribute handling
+		 *
+		 * Releasing atoms attributes must either happen before this, or
+		 * after the KCTX_SHEDULED flag is changed, otherwise we
+		 * double-decount the attributes
+		 */
+		runpool_ctx_attr_change |=
+			kbasep_js_ctx_attr_runpool_release_ctx(kbdev, kctx);
+
+		/* Releasing the context and katom retained state can allow
+		 * more jobs to run */
+		release_result |=
+			kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev,
+						kctx, katom_retained_state,
+						runpool_ctx_attr_change);
+
+		/*
+		 * Transaction ends on AS and runpool_irq:
+		 *
+		 * By this point, the AS-related data is now clear and ready
+		 * for re-use.
+		 *
+		 * Since releases only occur once for each previous successful
+		 * retain, and no more retains are allowed on this context, no
+		 * other thread will be operating in this
+		 * code whilst we are
+		 */
+
+		/* Recalculate pullable status for all slots */
+		for (slot = 0; slot < num_slots; slot++) {
+			if (kbase_js_ctx_pullable(kctx, slot, false))
+				kbase_js_ctx_list_add_pullable_nolock(kbdev,
+						kctx, slot);
+		}
+
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+		kbase_backend_release_ctx_noirq(kbdev, kctx);
+
+		mutex_unlock(&kbdev->pm.lock);
+
+		/* Note: Don't reuse kctx_as_nr now */
+
+		/* Synchronize with any timers */
+		kbase_backend_ctx_count_changed(kbdev);
+
+		/* update book-keeping info */
+		kbase_ctx_flag_clear(kctx, KCTX_SCHEDULED);
+		/* Signal any waiter that the context is not scheduled, so is
+		 * safe for termination - once the jsctx_mutex is also dropped,
+		 * and jobs have finished. */
+		wake_up(&js_kctx_info->ctx.is_scheduled_wait);
+
+		/* Queue an action to occur after we've dropped the lock */
+		release_result |= KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED |
+			KBASEP_JS_RELEASE_RESULT_SCHED_ALL;
+	} else {
+		kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev, kctx,
+				katom_retained_state, runpool_ctx_attr_change);
+
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		mutex_unlock(&kbdev->pm.lock);
+	}
+
+	return release_result;
+}
+
+void kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev,
+						struct kbase_context *kctx)
+{
+	struct kbasep_js_atom_retained_state katom_retained_state;
+
+	/* Setup a dummy katom_retained_state */
+	kbasep_js_atom_retained_state_init_invalid(&katom_retained_state);
+
+	kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
+							&katom_retained_state);
+}
+
+void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev,
+		struct kbase_context *kctx, bool has_pm_ref)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+
+	/* This is called if and only if you've you've detached the context from
+	 * the Runpool Queue, and not added it back to the Runpool
+	 */
+	KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	if (kbase_ctx_flag(kctx, KCTX_DYING)) {
+		/* Dying: don't requeue, but kill all jobs on the context. This
+		 * happens asynchronously */
+		dev_dbg(kbdev->dev,
+			"JS: ** Killing Context %p on RunPool Remove **", kctx);
+		kbase_js_foreach_ctx_job(kctx, &kbase_jd_cancel);
+	}
+}
+
+void kbasep_js_runpool_release_ctx_and_katom_retained_state(
+		struct kbase_device *kbdev, struct kbase_context *kctx,
+		struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	kbasep_js_release_result release_result;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	js_kctx_info = &kctx->jctx.sched_info;
+	js_devdata = &kbdev->js_data;
+
+	mutex_lock(&js_devdata->queue_mutex);
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+	mutex_lock(&js_devdata->runpool_mutex);
+
+	release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
+			katom_retained_state);
+
+	/* Drop the runpool mutex to allow requeing kctx */
+	mutex_unlock(&js_devdata->runpool_mutex);
+
+	if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
+		kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, true);
+
+	/* Drop the jsctx_mutex to allow scheduling in a new context */
+
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+	mutex_unlock(&js_devdata->queue_mutex);
+
+	if (release_result & KBASEP_JS_RELEASE_RESULT_SCHED_ALL)
+		kbase_js_sched_all(kbdev);
+}
+
+void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev,
+		struct kbase_context *kctx)
+{
+	struct kbasep_js_atom_retained_state katom_retained_state;
+
+	kbasep_js_atom_retained_state_init_invalid(&katom_retained_state);
+
+	kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx,
+			&katom_retained_state);
+}
+
+/* Variant of kbasep_js_runpool_release_ctx() that doesn't call into
+ * kbase_js_sched_all() */
+static void kbasep_js_runpool_release_ctx_no_schedule(
+		struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	kbasep_js_release_result release_result;
+	struct kbasep_js_atom_retained_state katom_retained_state_struct;
+	struct kbasep_js_atom_retained_state *katom_retained_state =
+		&katom_retained_state_struct;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	js_kctx_info = &kctx->jctx.sched_info;
+	js_devdata = &kbdev->js_data;
+	kbasep_js_atom_retained_state_init_invalid(katom_retained_state);
+
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+	mutex_lock(&js_devdata->runpool_mutex);
+
+	release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
+			katom_retained_state);
+
+	/* Drop the runpool mutex to allow requeing kctx */
+	mutex_unlock(&js_devdata->runpool_mutex);
+	if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
+		kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, true);
+
+	/* Drop the jsctx_mutex to allow scheduling in a new context */
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+	/* NOTE: could return release_result if the caller would like to know
+	 * whether it should schedule a new context, but currently no callers do
+	 */
+}
+
+void kbase_js_set_timeouts(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	kbase_backend_timeouts_changed(kbdev);
+}
+
+static bool kbasep_js_schedule_ctx(struct kbase_device *kbdev,
+					struct kbase_context *kctx,
+					int js)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	unsigned long flags;
+	bool kctx_suspended = false;
+	int as_nr;
+
+	js_devdata = &kbdev->js_data;
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	/* Pick available address space for this context */
+	mutex_lock(&kbdev->mmu_hw_mutex);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	as_nr = kbase_ctx_sched_retain_ctx(kctx);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+	if (as_nr == KBASEP_AS_NR_INVALID) {
+		as_nr = kbase_backend_find_and_release_free_address_space(
+				kbdev, kctx);
+		if (as_nr != KBASEP_AS_NR_INVALID) {
+			/* Attempt to retain the context again, this should
+			 * succeed */
+			mutex_lock(&kbdev->mmu_hw_mutex);
+			spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+			as_nr = kbase_ctx_sched_retain_ctx(kctx);
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+			mutex_unlock(&kbdev->mmu_hw_mutex);
+
+			WARN_ON(as_nr == KBASEP_AS_NR_INVALID);
+		}
+	}
+	if (as_nr == KBASEP_AS_NR_INVALID)
+		return false; /* No address spaces currently available */
+
+	/*
+	 * Atomic transaction on the Context and Run Pool begins
+	 */
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+	mutex_lock(&js_devdata->runpool_mutex);
+	mutex_lock(&kbdev->mmu_hw_mutex);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	/* Check to see if context is dying due to kbase_job_zap_context() */
+	if (kbase_ctx_flag(kctx, KCTX_DYING)) {
+		/* Roll back the transaction so far and return */
+		kbase_ctx_sched_release_ctx(kctx);
+
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		mutex_unlock(&kbdev->mmu_hw_mutex);
+		mutex_unlock(&js_devdata->runpool_mutex);
+		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+		return false;
+	}
+
+	KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_TRY_SCHEDULE_HEAD_CTX, kctx, NULL,
+				0u,
+				kbasep_js_trace_get_refcnt(kbdev, kctx));
+
+	kbase_ctx_flag_set(kctx, KCTX_SCHEDULED);
+
+	/* Assign context to previously chosen address space */
+	if (!kbase_backend_use_ctx(kbdev, kctx, as_nr)) {
+		/* Roll back the transaction so far and return */
+		kbase_ctx_sched_release_ctx(kctx);
+		kbase_ctx_flag_clear(kctx, KCTX_SCHEDULED);
+
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		mutex_unlock(&kbdev->mmu_hw_mutex);
+		mutex_unlock(&js_devdata->runpool_mutex);
+		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+		return false;
+	}
+
+	kbdev->hwaccess.active_kctx[js] = kctx;
+
+	KBASE_TLSTREAM_TL_RET_AS_CTX(kbdev, &kbdev->as[kctx->as_nr], kctx);
+
+	/* Cause any future waiter-on-termination to wait until the context is
+	 * descheduled */
+	wake_up(&js_kctx_info->ctx.is_scheduled_wait);
+
+	/* Re-check for suspending: a suspend could've occurred, and all the
+	 * contexts could've been removed from the runpool before we took this
+	 * lock. In this case, we don't want to allow this context to run jobs,
+	 * we just want it out immediately.
+	 *
+	 * The DMB required to read the suspend flag was issued recently as part
+	 * of the hwaccess_lock locking. If a suspend occurs *after* that lock
+	 * was taken (i.e. this condition doesn't execute), then the
+	 * kbasep_js_suspend() code will cleanup this context instead (by virtue
+	 * of it being called strictly after the suspend flag is set, and will
+	 * wait for this lock to drop) */
+	if (kbase_pm_is_suspending(kbdev)) {
+		/* Cause it to leave at some later point */
+		bool retained;
+
+		retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+		KBASE_DEBUG_ASSERT(retained);
+
+		kbasep_js_clear_submit_allowed(js_devdata, kctx);
+		kctx_suspended = true;
+	}
+
+	kbase_ctx_flag_clear(kctx, KCTX_PULLED_SINCE_ACTIVE_JS0 << js);
+
+	/* Transaction complete */
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+
+	/* Synchronize with any timers */
+	kbase_backend_ctx_count_changed(kbdev);
+
+	mutex_unlock(&js_devdata->runpool_mutex);
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+	/* Note: after this point, the context could potentially get scheduled
+	 * out immediately */
+
+	if (kctx_suspended) {
+		/* Finishing forcing out the context due to a suspend. Use a
+		 * variant of kbasep_js_runpool_release_ctx() that doesn't
+		 * schedule a new context, to prevent a risk of recursion back
+		 * into this function */
+		kbasep_js_runpool_release_ctx_no_schedule(kbdev, kctx);
+		return false;
+	}
+	return true;
+}
+
+static bool kbase_js_use_ctx(struct kbase_device *kbdev,
+				struct kbase_context *kctx,
+				int js)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (kbase_ctx_flag(kctx, KCTX_SCHEDULED) &&
+			kbase_backend_use_ctx_sched(kbdev, kctx, js)) {
+		/* Context already has ASID - mark as active */
+		if (kbdev->hwaccess.active_kctx[js] != kctx) {
+			kbdev->hwaccess.active_kctx[js] = kctx;
+			kbase_ctx_flag_clear(kctx,
+					KCTX_PULLED_SINCE_ACTIVE_JS0 << js);
+		}
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		return true; /* Context already scheduled */
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	return kbasep_js_schedule_ctx(kbdev, kctx, js);
+}
+
+void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev,
+		struct kbase_context *kctx)
+{
+	struct kbasep_js_kctx_info *js_kctx_info;
+	struct kbasep_js_device_data *js_devdata;
+	bool is_scheduled;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+
+	js_devdata = &kbdev->js_data;
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	/* This must never be attempted whilst suspending - i.e. it should only
+	 * happen in response to a syscall from a user-space thread */
+	BUG_ON(kbase_pm_is_suspending(kbdev));
+
+	mutex_lock(&js_devdata->queue_mutex);
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+
+	/* Mark the context as privileged */
+	kbase_ctx_flag_set(kctx, KCTX_PRIVILEGED);
+
+	is_scheduled = kbase_ctx_flag(kctx, KCTX_SCHEDULED);
+	if (!is_scheduled) {
+		/* Add the context to the pullable list */
+		if (kbase_js_ctx_list_add_pullable_head(kbdev, kctx, 0))
+			kbase_js_sync_timers(kbdev);
+
+		/* Fast-starting requires the jsctx_mutex to be dropped,
+		 * because it works on multiple ctxs */
+		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+		mutex_unlock(&js_devdata->queue_mutex);
+
+		/* Try to schedule the context in */
+		kbase_js_sched_all(kbdev);
+
+		/* Wait for the context to be scheduled in */
+		wait_event(kctx->jctx.sched_info.ctx.is_scheduled_wait,
+			   kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+	} else {
+		/* Already scheduled in - We need to retain it to keep the
+		 * corresponding address space */
+		WARN_ON(!kbasep_js_runpool_retain_ctx(kbdev, kctx));
+		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+		mutex_unlock(&js_devdata->queue_mutex);
+	}
+}
+KBASE_EXPORT_TEST_API(kbasep_js_schedule_privileged_ctx);
+
+void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev,
+		struct kbase_context *kctx)
+{
+	struct kbasep_js_kctx_info *js_kctx_info;
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	/* We don't need to use the address space anymore */
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+	kbase_ctx_flag_clear(kctx, KCTX_PRIVILEGED);
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+	/* Release the context - it will be scheduled out */
+	kbasep_js_runpool_release_ctx(kbdev, kctx);
+
+	kbase_js_sched_all(kbdev);
+}
+KBASE_EXPORT_TEST_API(kbasep_js_release_privileged_ctx);
+
+void kbasep_js_suspend(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+	struct kbasep_js_device_data *js_devdata;
+	int i;
+	u16 retained = 0u;
+	int nr_privileged_ctx = 0;
+
+	KBASE_DEBUG_ASSERT(kbdev);
+	KBASE_DEBUG_ASSERT(kbase_pm_is_suspending(kbdev));
+	js_devdata = &kbdev->js_data;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	/* Prevent all contexts from submitting */
+	js_devdata->runpool_irq.submit_allowed = 0;
+
+	/* Retain each of the contexts, so we can cause it to leave even if it
+	 * had no refcount to begin with */
+	for (i = BASE_MAX_NR_AS - 1; i >= 0; --i) {
+		struct kbase_context *kctx = kbdev->as_to_kctx[i];
+
+		retained = retained << 1;
+
+		if (kctx && !(kbdev->as_free & (1u << i))) {
+			kbase_ctx_sched_retain_ctx_refcount(kctx);
+			retained |= 1u;
+			/* We can only cope with up to 1 privileged context -
+			 * the instrumented context. It'll be suspended by
+			 * disabling instrumentation */
+			if (kbase_ctx_flag(kctx, KCTX_PRIVILEGED)) {
+				++nr_privileged_ctx;
+				WARN_ON(nr_privileged_ctx != 1);
+			}
+		}
+	}
+	CSTD_UNUSED(nr_privileged_ctx);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	/* De-ref the previous retain to ensure each context gets pulled out
+	 * sometime later. */
+	for (i = 0;
+		 i < BASE_MAX_NR_AS;
+		 ++i, retained = retained >> 1) {
+		struct kbase_context *kctx = kbdev->as_to_kctx[i];
+
+		if (retained & 1u)
+			kbasep_js_runpool_release_ctx(kbdev, kctx);
+	}
+
+	/* Caller must wait for all Power Manager active references to be
+	 * dropped */
+}
+
+void kbasep_js_resume(struct kbase_device *kbdev)
+{
+	struct kbasep_js_device_data *js_devdata;
+	int js, prio;
+
+	KBASE_DEBUG_ASSERT(kbdev);
+	js_devdata = &kbdev->js_data;
+	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
+
+	mutex_lock(&js_devdata->queue_mutex);
+	for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+		for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
+			struct kbase_context *kctx, *n;
+			unsigned long flags;
+
+			spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+			list_for_each_entry_safe(kctx, n,
+				 &kbdev->js_data.ctx_list_unpullable[js][prio],
+				 jctx.sched_info.ctx.ctx_list_entry[js]) {
+				struct kbasep_js_kctx_info *js_kctx_info;
+				bool timer_sync = false;
+
+				/* Drop lock so we can take kctx mutexes */
+				spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+						flags);
+
+				js_kctx_info = &kctx->jctx.sched_info;
+
+				mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+				mutex_lock(&js_devdata->runpool_mutex);
+				spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+				if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED) &&
+					kbase_js_ctx_pullable(kctx, js, false))
+					timer_sync =
+						kbase_js_ctx_list_add_pullable_nolock(
+								kbdev, kctx, js);
+				spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+				if (timer_sync)
+					kbase_backend_ctx_count_changed(kbdev);
+				mutex_unlock(&js_devdata->runpool_mutex);
+				mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+				/* Take lock before accessing list again */
+				spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+			}
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		}
+	}
+	mutex_unlock(&js_devdata->queue_mutex);
+
+	/* Restart atom processing */
+	kbase_js_sched_all(kbdev);
+
+	/* JS Resume complete */
+}
+
+bool kbase_js_is_atom_valid(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom)
+{
+	if ((katom->core_req & BASE_JD_REQ_FS) &&
+	    (katom->core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE |
+								BASE_JD_REQ_T)))
+		return false;
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987) &&
+	    (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) &&
+	    (katom->core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_T)))
+		return false;
+
+	if ((katom->core_req & BASE_JD_REQ_JOB_SLOT) &&
+			(katom->jobslot >= BASE_JM_MAX_NR_SLOTS))
+		return false;
+
+	return true;
+}
+
+static int kbase_js_get_slot(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom)
+{
+	if (katom->core_req & BASE_JD_REQ_JOB_SLOT)
+		return katom->jobslot;
+
+	if (katom->core_req & BASE_JD_REQ_FS)
+		return 0;
+
+	if (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
+		if (katom->device_nr == 1 &&
+				kbdev->gpu_props.num_core_groups == 2)
+			return 2;
+		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
+			return 2;
+	}
+
+	return 1;
+}
+
+bool kbase_js_dep_resolved_submit(struct kbase_context *kctx,
+					struct kbase_jd_atom *katom)
+{
+	bool enqueue_required;
+
+	katom->slot_nr = kbase_js_get_slot(kctx->kbdev, katom);
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+	lockdep_assert_held(&kctx->jctx.lock);
+
+	/* If slot will transition from unpullable to pullable then add to
+	 * pullable list */
+	if (jsctx_rb_none_to_pull(kctx, katom->slot_nr)) {
+		enqueue_required = true;
+	} else {
+		enqueue_required = false;
+	}
+	if ((katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) ||
+			(katom->pre_dep && (katom->pre_dep->atom_flags &
+			KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST))) {
+		int prio = katom->sched_priority;
+		int js = katom->slot_nr;
+		struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
+
+		list_add_tail(&katom->queue, &queue->x_dep_head);
+		katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST;
+		enqueue_required = false;
+	} else {
+		/* Check if there are lower priority jobs to soft stop */
+		kbase_job_slot_ctx_priority_check_locked(kctx, katom);
+
+		/* Add atom to ring buffer. */
+		jsctx_tree_add(kctx, katom);
+		katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_TREE;
+	}
+
+	return enqueue_required;
+}
+
+/**
+ * kbase_js_move_to_tree - Move atom (and any dependent atoms) to the
+ *                         runnable_tree, ready for execution
+ * @katom: Atom to submit
+ *
+ * It is assumed that @katom does not have KBASE_KATOM_FLAG_X_DEP_BLOCKED set,
+ * but is still present in the x_dep list. If @katom has a same-slot dependent
+ * atom then that atom (and any dependents) will also be moved.
+ */
+static void kbase_js_move_to_tree(struct kbase_jd_atom *katom)
+{
+	lockdep_assert_held(&katom->kctx->kbdev->hwaccess_lock);
+
+	while (katom) {
+		WARN_ON(!(katom->atom_flags &
+				KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST));
+
+		if (!(katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED)) {
+			list_del(&katom->queue);
+			katom->atom_flags &=
+					~KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST;
+			jsctx_tree_add(katom->kctx, katom);
+			katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_TREE;
+		} else {
+			break;
+		}
+
+		katom = katom->post_dep;
+	}
+}
+
+
+/**
+ * kbase_js_evict_deps - Evict dependencies of a failed atom.
+ * @kctx:       Context pointer
+ * @katom:      Pointer to the atom that has failed.
+ * @js:         The job slot the katom was run on.
+ * @prio:       Priority of the katom.
+ *
+ * Remove all post dependencies of an atom from the context ringbuffers.
+ *
+ * The original atom's event_code will be propogated to all dependent atoms.
+ *
+ * Context: Caller must hold the HW access lock
+ */
+static void kbase_js_evict_deps(struct kbase_context *kctx,
+				struct kbase_jd_atom *katom, int js, int prio)
+{
+	struct kbase_jd_atom *x_dep = katom->x_post_dep;
+	struct kbase_jd_atom *next_katom = katom->post_dep;
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	if (next_katom) {
+		KBASE_DEBUG_ASSERT(next_katom->status !=
+				KBASE_JD_ATOM_STATE_HW_COMPLETED);
+		next_katom->will_fail_event_code = katom->event_code;
+
+	}
+
+	/* Has cross slot depenency. */
+	if (x_dep && (x_dep->atom_flags & (KBASE_KATOM_FLAG_JSCTX_IN_TREE |
+				KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST))) {
+		/* Remove dependency.*/
+		x_dep->atom_flags &= ~KBASE_KATOM_FLAG_X_DEP_BLOCKED;
+
+		/* Fail if it had a data dependency. */
+		if (x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) {
+			x_dep->will_fail_event_code = katom->event_code;
+		}
+		if (x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST)
+			kbase_js_move_to_tree(x_dep);
+	}
+}
+
+struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, int js)
+{
+	struct kbase_jd_atom *katom;
+	struct kbasep_js_device_data *js_devdata;
+	struct kbase_device *kbdev;
+	int pulled;
+
+	KBASE_DEBUG_ASSERT(kctx);
+
+	kbdev = kctx->kbdev;
+
+	js_devdata = &kbdev->js_data;
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (!kbasep_js_is_submit_allowed(js_devdata, kctx))
+		return NULL;
+	if (kbase_pm_is_suspending(kbdev))
+		return NULL;
+
+	katom = jsctx_rb_peek(kctx, js);
+	if (!katom)
+		return NULL;
+	if (kctx->blocked_js[js][katom->sched_priority])
+		return NULL;
+	if (atomic_read(&katom->blocked))
+		return NULL;
+
+	/* Due to ordering restrictions when unpulling atoms on failure, we do
+	 * not allow multiple runs of fail-dep atoms from the same context to be
+	 * present on the same slot */
+	if (katom->pre_dep && atomic_read(&kctx->atoms_pulled_slot[js])) {
+		struct kbase_jd_atom *prev_atom =
+				kbase_backend_inspect_tail(kbdev, js);
+
+		if (prev_atom && prev_atom->kctx != kctx)
+			return NULL;
+	}
+
+	if (katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) {
+		if (katom->x_pre_dep->gpu_rb_state ==
+					KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB ||
+					katom->x_pre_dep->will_fail_event_code)
+			return NULL;
+		if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) &&
+				kbase_backend_nr_atoms_on_slot(kbdev, js))
+			return NULL;
+	}
+
+	kbase_ctx_flag_set(kctx, KCTX_PULLED);
+	kbase_ctx_flag_set(kctx, (KCTX_PULLED_SINCE_ACTIVE_JS0 << js));
+
+	pulled = atomic_inc_return(&kctx->atoms_pulled);
+	if (pulled == 1 && !kctx->slots_pullable) {
+		WARN_ON(kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+		kbase_ctx_flag_set(kctx, KCTX_RUNNABLE_REF);
+		atomic_inc(&kbdev->js_data.nr_contexts_runnable);
+	}
+	atomic_inc(&kctx->atoms_pulled_slot[katom->slot_nr]);
+	kctx->atoms_pulled_slot_pri[katom->slot_nr][katom->sched_priority]++;
+	jsctx_rb_pull(kctx, katom);
+
+	kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+
+	katom->atom_flags |= KBASE_KATOM_FLAG_HOLDING_CTX_REF;
+
+	katom->ticks = 0;
+
+	return katom;
+}
+
+
+static void js_return_worker(struct work_struct *data)
+{
+	struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
+									work);
+	struct kbase_context *kctx = katom->kctx;
+	struct kbase_device *kbdev = kctx->kbdev;
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+	struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info;
+	struct kbasep_js_atom_retained_state retained_state;
+	int js = katom->slot_nr;
+	int prio = katom->sched_priority;
+	bool timer_sync = false;
+	bool context_idle = false;
+	unsigned long flags;
+	base_jd_core_req core_req = katom->core_req;
+
+	KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX(kbdev, katom);
+
+	kbase_backend_complete_wq(kbdev, katom);
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
+		kbase_as_poking_timer_release_atom(kbdev, kctx, katom);
+
+	kbasep_js_atom_retained_state_copy(&retained_state, katom);
+
+	mutex_lock(&js_devdata->queue_mutex);
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+
+	atomic_dec(&kctx->atoms_pulled);
+	atomic_dec(&kctx->atoms_pulled_slot[js]);
+
+	atomic_dec(&katom->blocked);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	kctx->atoms_pulled_slot_pri[js][katom->sched_priority]--;
+
+	if (!atomic_read(&kctx->atoms_pulled_slot[js]) &&
+			jsctx_rb_none_to_pull(kctx, js))
+		timer_sync |= kbase_js_ctx_list_remove_nolock(kbdev, kctx, js);
+
+	/* If this slot has been blocked due to soft-stopped atoms, and all
+	 * atoms have now been processed, then unblock the slot */
+	if (!kctx->atoms_pulled_slot_pri[js][prio] &&
+			kctx->blocked_js[js][prio]) {
+		kctx->blocked_js[js][prio] = false;
+
+		/* Only mark the slot as pullable if the context is not idle -
+		 * that case is handled below */
+		if (atomic_read(&kctx->atoms_pulled) &&
+				kbase_js_ctx_pullable(kctx, js, true))
+			timer_sync |= kbase_js_ctx_list_add_pullable_nolock(
+					kbdev, kctx, js);
+	}
+
+	if (!atomic_read(&kctx->atoms_pulled)) {
+		if (!kctx->slots_pullable) {
+			WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+			kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+			atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+			timer_sync = true;
+		}
+
+		if (kctx->as_nr != KBASEP_AS_NR_INVALID &&
+				!kbase_ctx_flag(kctx, KCTX_DYING)) {
+			int num_slots = kbdev->gpu_props.num_job_slots;
+			int slot;
+
+			if (!kbasep_js_is_submit_allowed(js_devdata, kctx))
+				kbasep_js_set_submit_allowed(js_devdata, kctx);
+
+			for (slot = 0; slot < num_slots; slot++) {
+				if (kbase_js_ctx_pullable(kctx, slot, true))
+					timer_sync |=
+					kbase_js_ctx_list_add_pullable_nolock(
+							kbdev, kctx, slot);
+			}
+		}
+
+		kbase_jm_idle_ctx(kbdev, kctx);
+
+		context_idle = true;
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	if (context_idle) {
+		WARN_ON(!kbase_ctx_flag(kctx, KCTX_ACTIVE));
+		kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
+		kbase_pm_context_idle(kbdev);
+	}
+
+	if (timer_sync)
+		kbase_js_sync_timers(kbdev);
+
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+	mutex_unlock(&js_devdata->queue_mutex);
+
+	katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_CTX_REF;
+	WARN_ON(kbasep_js_has_atom_finished(&retained_state));
+
+	kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx,
+							&retained_state);
+
+	kbase_js_sched_all(kbdev);
+
+	kbase_backend_complete_wq_post_sched(kbdev, core_req);
+}
+
+void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	jsctx_rb_unpull(kctx, katom);
+
+	WARN_ON(work_pending(&katom->work));
+
+	/* Block re-submission until workqueue has run */
+	atomic_inc(&katom->blocked);
+
+	kbase_job_check_leave_disjoint(kctx->kbdev, katom);
+
+	INIT_WORK(&katom->work, js_return_worker);
+	queue_work(kctx->jctx.job_done_wq, &katom->work);
+}
+
+bool kbase_js_complete_atom_wq(struct kbase_context *kctx,
+						struct kbase_jd_atom *katom)
+{
+	struct kbasep_js_kctx_info *js_kctx_info;
+	struct kbasep_js_device_data *js_devdata;
+	struct kbase_device *kbdev;
+	unsigned long flags;
+	bool timer_sync = false;
+	int atom_slot;
+	bool context_idle = false;
+	int prio = katom->sched_priority;
+
+	kbdev = kctx->kbdev;
+	atom_slot = katom->slot_nr;
+
+	js_kctx_info = &kctx->jctx.sched_info;
+	js_devdata = &kbdev->js_data;
+
+	lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+
+	mutex_lock(&js_devdata->runpool_mutex);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (katom->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE) {
+		context_idle = !atomic_dec_return(&kctx->atoms_pulled);
+		atomic_dec(&kctx->atoms_pulled_slot[atom_slot]);
+		kctx->atoms_pulled_slot_pri[atom_slot][prio]--;
+
+		if (!atomic_read(&kctx->atoms_pulled) &&
+				!kctx->slots_pullable) {
+			WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+			kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+			atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+			timer_sync = true;
+		}
+
+		/* If this slot has been blocked due to soft-stopped atoms, and
+		 * all atoms have now been processed, then unblock the slot */
+		if (!kctx->atoms_pulled_slot_pri[atom_slot][prio]
+				&& kctx->blocked_js[atom_slot][prio]) {
+			kctx->blocked_js[atom_slot][prio] = false;
+			if (kbase_js_ctx_pullable(kctx, atom_slot, true))
+				timer_sync |=
+					kbase_js_ctx_list_add_pullable_nolock(
+						kbdev, kctx, atom_slot);
+		}
+	}
+	WARN_ON(!(katom->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE));
+
+	if (!atomic_read(&kctx->atoms_pulled_slot[atom_slot]) &&
+			jsctx_rb_none_to_pull(kctx, atom_slot)) {
+		if (!list_empty(
+			&kctx->jctx.sched_info.ctx.ctx_list_entry[atom_slot]))
+			timer_sync |= kbase_js_ctx_list_remove_nolock(
+					kctx->kbdev, kctx, atom_slot);
+	}
+
+	/*
+	 * If submission is disabled on this context (most likely due to an
+	 * atom failure) and there are now no atoms left in the system then
+	 * re-enable submission so that context can be scheduled again.
+	 */
+	if (!kbasep_js_is_submit_allowed(js_devdata, kctx) &&
+					!atomic_read(&kctx->atoms_pulled) &&
+					!kbase_ctx_flag(kctx, KCTX_DYING)) {
+		int js;
+
+		kbasep_js_set_submit_allowed(js_devdata, kctx);
+
+		for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+			if (kbase_js_ctx_pullable(kctx, js, true))
+				timer_sync |=
+					kbase_js_ctx_list_add_pullable_nolock(
+							kbdev, kctx, js);
+		}
+	} else if (katom->x_post_dep &&
+			kbasep_js_is_submit_allowed(js_devdata, kctx)) {
+		int js;
+
+		for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+			if (kbase_js_ctx_pullable(kctx, js, true))
+				timer_sync |=
+					kbase_js_ctx_list_add_pullable_nolock(
+							kbdev, kctx, js);
+		}
+	}
+
+	/* Mark context as inactive. The pm reference will be dropped later in
+	 * jd_done_worker().
+	 */
+	if (context_idle)
+		kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	if (timer_sync)
+		kbase_backend_ctx_count_changed(kbdev);
+	mutex_unlock(&js_devdata->runpool_mutex);
+
+	return context_idle;
+}
+
+struct kbase_jd_atom *kbase_js_complete_atom(struct kbase_jd_atom *katom,
+		ktime_t *end_timestamp)
+{
+	struct kbase_device *kbdev;
+	struct kbase_context *kctx = katom->kctx;
+	struct kbase_jd_atom *x_dep = katom->x_post_dep;
+
+	kbdev = kctx->kbdev;
+
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	if (katom->will_fail_event_code)
+		katom->event_code = katom->will_fail_event_code;
+
+	katom->status = KBASE_JD_ATOM_STATE_HW_COMPLETED;
+
+	if (katom->event_code != BASE_JD_EVENT_DONE) {
+		kbase_js_evict_deps(kctx, katom, katom->slot_nr,
+				katom->sched_priority);
+	}
+
+	KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT(kbdev, NULL,
+		katom->slot_nr, 0, TL_JS_EVENT_STOP);
+
+	kbase_jd_done(katom, katom->slot_nr, end_timestamp, 0);
+
+	/* Unblock cross dependency if present */
+	if (x_dep && (katom->event_code == BASE_JD_EVENT_DONE ||
+			!(x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER)) &&
+			(x_dep->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED)) {
+		bool was_pullable = kbase_js_ctx_pullable(kctx, x_dep->slot_nr,
+				false);
+		x_dep->atom_flags &= ~KBASE_KATOM_FLAG_X_DEP_BLOCKED;
+		kbase_js_move_to_tree(x_dep);
+		if (!was_pullable && kbase_js_ctx_pullable(kctx, x_dep->slot_nr,
+				false))
+			kbase_js_ctx_list_add_pullable_nolock(kbdev, kctx,
+					x_dep->slot_nr);
+
+		if (x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE)
+			return x_dep;
+	}
+
+	return NULL;
+}
+
+void kbase_js_sched(struct kbase_device *kbdev, int js_mask)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbase_context *last_active[BASE_JM_MAX_NR_SLOTS];
+	bool timer_sync = false;
+	bool ctx_waiting[BASE_JM_MAX_NR_SLOTS];
+	int js;
+
+	js_devdata = &kbdev->js_data;
+
+	down(&js_devdata->schedule_sem);
+	mutex_lock(&js_devdata->queue_mutex);
+
+	for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
+		last_active[js] = kbdev->hwaccess.active_kctx[js];
+		ctx_waiting[js] = false;
+	}
+
+	while (js_mask) {
+		js = ffs(js_mask) - 1;
+
+		while (1) {
+			struct kbase_context *kctx;
+			unsigned long flags;
+			bool context_idle = false;
+
+			kctx = kbase_js_ctx_list_pop_head(kbdev, js);
+
+			if (!kctx) {
+				js_mask &= ~(1 << js);
+				break; /* No contexts on pullable list */
+			}
+
+			if (!kbase_ctx_flag(kctx, KCTX_ACTIVE)) {
+				context_idle = true;
+
+				if (kbase_pm_context_active_handle_suspend(
+									kbdev,
+				      KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE)) {
+					/* Suspend pending - return context to
+					 * queue and stop scheduling */
+					mutex_lock(
+					&kctx->jctx.sched_info.ctx.jsctx_mutex);
+					if (kbase_js_ctx_list_add_pullable_head(
+						kctx->kbdev, kctx, js))
+						kbase_js_sync_timers(kbdev);
+					mutex_unlock(
+					&kctx->jctx.sched_info.ctx.jsctx_mutex);
+					mutex_unlock(&js_devdata->queue_mutex);
+					up(&js_devdata->schedule_sem);
+					return;
+				}
+				kbase_ctx_flag_set(kctx, KCTX_ACTIVE);
+			}
+
+			if (!kbase_js_use_ctx(kbdev, kctx, js)) {
+				mutex_lock(
+					&kctx->jctx.sched_info.ctx.jsctx_mutex);
+				/* Context can not be used at this time */
+				spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+				if (kbase_js_ctx_pullable(kctx, js, false)
+				    || kbase_ctx_flag(kctx, KCTX_PRIVILEGED))
+					timer_sync |=
+					kbase_js_ctx_list_add_pullable_head_nolock(
+							kctx->kbdev, kctx, js);
+				else
+					timer_sync |=
+					kbase_js_ctx_list_add_unpullable_nolock(
+							kctx->kbdev, kctx, js);
+				spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+						flags);
+				mutex_unlock(
+					&kctx->jctx.sched_info.ctx.jsctx_mutex);
+				if (context_idle) {
+					WARN_ON(!kbase_ctx_flag(kctx, KCTX_ACTIVE));
+					kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
+					kbase_pm_context_idle(kbdev);
+				}
+
+				/* No more jobs can be submitted on this slot */
+				js_mask &= ~(1 << js);
+				break;
+			}
+			mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+			spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+			kbase_ctx_flag_clear(kctx, KCTX_PULLED);
+
+			if (!kbase_jm_kick(kbdev, 1 << js))
+				/* No more jobs can be submitted on this slot */
+				js_mask &= ~(1 << js);
+
+			if (!kbase_ctx_flag(kctx, KCTX_PULLED)) {
+				bool pullable = kbase_js_ctx_pullable(kctx, js,
+						true);
+
+				/* Failed to pull jobs - push to head of list.
+				 * Unless this context is already 'active', in
+				 * which case it's effectively already scheduled
+				 * so push it to the back of the list. */
+				if (pullable && kctx == last_active[js] &&
+						kbase_ctx_flag(kctx,
+						(KCTX_PULLED_SINCE_ACTIVE_JS0 <<
+						js)))
+					timer_sync |=
+					kbase_js_ctx_list_add_pullable_nolock(
+							kctx->kbdev,
+							kctx, js);
+				else if (pullable)
+					timer_sync |=
+					kbase_js_ctx_list_add_pullable_head_nolock(
+							kctx->kbdev,
+							kctx, js);
+				else
+					timer_sync |=
+					kbase_js_ctx_list_add_unpullable_nolock(
+								kctx->kbdev,
+								kctx, js);
+
+				/* If this context is not the active context,
+				 * but the active context is pullable on this
+				 * slot, then we need to remove the active
+				 * marker to prevent it from submitting atoms in
+				 * the IRQ handler, which would prevent this
+				 * context from making progress. */
+				if (last_active[js] && kctx != last_active[js]
+						&& kbase_js_ctx_pullable(
+						last_active[js], js, true))
+					ctx_waiting[js] = true;
+
+				if (context_idle) {
+					kbase_jm_idle_ctx(kbdev, kctx);
+					spin_unlock_irqrestore(
+							&kbdev->hwaccess_lock,
+							flags);
+					WARN_ON(!kbase_ctx_flag(kctx, KCTX_ACTIVE));
+					kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
+					kbase_pm_context_idle(kbdev);
+				} else {
+					spin_unlock_irqrestore(
+							&kbdev->hwaccess_lock,
+							flags);
+				}
+				mutex_unlock(
+					&kctx->jctx.sched_info.ctx.jsctx_mutex);
+
+				js_mask &= ~(1 << js);
+				break; /* Could not run atoms on this slot */
+			}
+
+			/* Push to back of list */
+			if (kbase_js_ctx_pullable(kctx, js, true))
+				timer_sync |=
+					kbase_js_ctx_list_add_pullable_nolock(
+							kctx->kbdev, kctx, js);
+			else
+				timer_sync |=
+					kbase_js_ctx_list_add_unpullable_nolock(
+							kctx->kbdev, kctx, js);
+
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+			mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+		}
+	}
+
+	if (timer_sync)
+		kbase_js_sync_timers(kbdev);
+
+	for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
+		if (kbdev->hwaccess.active_kctx[js] == last_active[js] &&
+				ctx_waiting[js])
+			kbdev->hwaccess.active_kctx[js] = NULL;
+	}
+
+	mutex_unlock(&js_devdata->queue_mutex);
+	up(&js_devdata->schedule_sem);
+}
+
+void kbase_js_zap_context(struct kbase_context *kctx)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+	struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info;
+
+	/*
+	 * Critical assumption: No more submission is possible outside of the
+	 * workqueue. This is because the OS *must* prevent U/K calls (IOCTLs)
+	 * whilst the struct kbase_context is terminating.
+	 */
+
+	/* First, atomically do the following:
+	 * - mark the context as dying
+	 * - try to evict it from the queue */
+	mutex_lock(&kctx->jctx.lock);
+	mutex_lock(&js_devdata->queue_mutex);
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+	kbase_ctx_flag_set(kctx, KCTX_DYING);
+
+	dev_dbg(kbdev->dev, "Zap: Try Evict Ctx %p", kctx);
+
+	/*
+	 * At this point we know:
+	 * - If eviction succeeded, it was in the queue, but now no
+	 *   longer is
+	 *  - We must cancel the jobs here. No Power Manager active reference to
+	 *    release.
+	 *  - This happens asynchronously - kbase_jd_zap_context() will wait for
+	 *    those jobs to be killed.
+	 * - If eviction failed, then it wasn't in the queue. It is one
+	 *   of the following:
+	 *  - a. it didn't have any jobs, and so is not in the Queue or
+	 *       the Run Pool (not scheduled)
+	 *   - Hence, no more work required to cancel jobs. No Power Manager
+	 *     active reference to release.
+	 *  - b. it was in the middle of a scheduling transaction (and thus must
+	 *       have at least 1 job). This can happen from a syscall or a
+	 *       kernel thread. We still hold the jsctx_mutex, and so the thread
+	 *       must be waiting inside kbasep_js_try_schedule_head_ctx(),
+	 *       before checking whether the runpool is full. That thread will
+	 *       continue after we drop the mutex, and will notice the context
+	 *       is dying. It will rollback the transaction, killing all jobs at
+	 *       the same time. kbase_jd_zap_context() will wait for those jobs
+	 *       to be killed.
+	 *   - Hence, no more work required to cancel jobs, or to release the
+	 *     Power Manager active reference.
+	 *  - c. it is scheduled, and may or may not be running jobs
+	 * - We must cause it to leave the runpool by stopping it from
+	 * submitting any more jobs. When it finally does leave,
+	 * kbasep_js_runpool_requeue_or_kill_ctx() will kill all remaining jobs
+	 * (because it is dying), release the Power Manager active reference,
+	 * and will not requeue the context in the queue.
+	 * kbase_jd_zap_context() will wait for those jobs to be killed.
+	 *  - Hence, work required just to make it leave the runpool. Cancelling
+	 *    jobs and releasing the Power manager active reference will be
+	 *    handled when it leaves the runpool.
+	 */
+	if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED)) {
+		unsigned long flags;
+		int js;
+
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+			if (!list_empty(
+				&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
+				list_del_init(
+				&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+		}
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+		/* The following events require us to kill off remaining jobs
+		 * and update PM book-keeping:
+		 * - we evicted it correctly (it must have jobs to be in the
+		 *   Queue)
+		 *
+		 * These events need no action, but take this path anyway:
+		 * - Case a: it didn't have any jobs, and was never in the Queue
+		 * - Case b: scheduling transaction will be partially rolled-
+		 *           back (this already cancels the jobs)
+		 */
+
+		KBASE_TRACE_ADD(kbdev, JM_ZAP_NON_SCHEDULED, kctx, NULL, 0u,
+						kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+		dev_dbg(kbdev->dev, "Zap: Ctx %p scheduled=0", kctx);
+
+		/* Only cancel jobs when we evicted from the
+		 * queue. No Power Manager active reference was held.
+		 *
+		 * Having is_dying set ensures that this kills, and
+		 * doesn't requeue */
+		kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, false);
+
+		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+		mutex_unlock(&js_devdata->queue_mutex);
+		mutex_unlock(&kctx->jctx.lock);
+	} else {
+		unsigned long flags;
+		bool was_retained;
+
+		/* Case c: didn't evict, but it is scheduled - it's in the Run
+		 * Pool */
+		KBASE_TRACE_ADD(kbdev, JM_ZAP_SCHEDULED, kctx, NULL, 0u,
+						kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+		dev_dbg(kbdev->dev, "Zap: Ctx %p is in RunPool", kctx);
+
+		/* Disable the ctx from submitting any more jobs */
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+		kbasep_js_clear_submit_allowed(js_devdata, kctx);
+
+		/* Retain and (later) release the context whilst it is is now
+		 * disallowed from submitting jobs - ensures that someone
+		 * somewhere will be removing the context later on */
+		was_retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+
+		/* Since it's scheduled and we have the jsctx_mutex, it must be
+		 * retained successfully */
+		KBASE_DEBUG_ASSERT(was_retained);
+
+		dev_dbg(kbdev->dev, "Zap: Ctx %p Kill Any Running jobs", kctx);
+
+		/* Cancel any remaining running jobs for this kctx - if any.
+		 * Submit is disallowed which takes effect immediately, so no
+		 * more new jobs will appear after we do this. */
+		kbase_backend_jm_kill_running_jobs_from_kctx(kctx);
+
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+		mutex_unlock(&js_devdata->queue_mutex);
+		mutex_unlock(&kctx->jctx.lock);
+
+		dev_dbg(kbdev->dev, "Zap: Ctx %p Release (may or may not schedule out immediately)",
+									kctx);
+
+		kbasep_js_runpool_release_ctx(kbdev, kctx);
+	}
+
+	KBASE_TRACE_ADD(kbdev, JM_ZAP_DONE, kctx, NULL, 0u, 0u);
+
+	/* After this, you must wait on both the
+	 * kbase_jd_context::zero_jobs_wait and the
+	 * kbasep_js_kctx_info::ctx::is_scheduled_waitq - to wait for the jobs
+	 * to be destroyed, and the context to be de-scheduled (if it was on the
+	 * runpool).
+	 *
+	 * kbase_jd_zap_context() will do this. */
+}
+
+static inline int trace_get_refcnt(struct kbase_device *kbdev,
+					struct kbase_context *kctx)
+{
+	return atomic_read(&kctx->refcount);
+}
+
+/**
+ * kbase_js_foreach_ctx_job(): - Call a function on all jobs in context
+ * @kctx:     Pointer to context.
+ * @callback: Pointer to function to call for each job.
+ *
+ * Call a function on all jobs belonging to a non-queued, non-running
+ * context, and detach the jobs from the context as it goes.
+ *
+ * Due to the locks that might be held at the time of the call, the callback
+ * may need to defer work on a workqueue to complete its actions (e.g. when
+ * cancelling jobs)
+ *
+ * Atoms will be removed from the queue, so this must only be called when
+ * cancelling jobs (which occurs as part of context destruction).
+ *
+ * The locking conditions on the caller are as follows:
+ * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
+ */
+static void kbase_js_foreach_ctx_job(struct kbase_context *kctx,
+		kbasep_js_ctx_job_cb callback)
+{
+	struct kbase_device *kbdev;
+	unsigned long flags;
+	u32 js;
+
+	kbdev = kctx->kbdev;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_FOREACH_CTX_JOBS, kctx, NULL,
+					0u, trace_get_refcnt(kbdev, kctx));
+
+	/* Invoke callback on jobs on each slot in turn */
+	for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
+		jsctx_queue_foreach(kctx, js, callback);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_js.h b/drivers/gpu/arm/midgard/mali_kbase_js.h
new file mode 100644
index 0000000..355da27
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_js.h
@@ -0,0 +1,912 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_js.h
+ * Job Scheduler APIs.
+ */
+
+#ifndef _KBASE_JS_H_
+#define _KBASE_JS_H_
+
+#include "mali_kbase_js_defs.h"
+#include "mali_kbase_context.h"
+#include "mali_kbase_defs.h"
+#include "mali_kbase_debug.h"
+
+#include "mali_kbase_js_ctx_attr.h"
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_js Job Scheduler Internal APIs
+ * @{
+ *
+ * These APIs are Internal to KBase.
+ */
+
+/**
+ * @brief Initialize the Job Scheduler
+ *
+ * The struct kbasep_js_device_data sub-structure of \a kbdev must be zero
+ * initialized before passing to the kbasep_js_devdata_init() function. This is
+ * to give efficient error path code.
+ */
+int kbasep_js_devdata_init(struct kbase_device * const kbdev);
+
+/**
+ * @brief Halt the Job Scheduler.
+ *
+ * It is safe to call this on \a kbdev even if it the kbasep_js_device_data
+ * sub-structure was never initialized/failed initialization, to give efficient
+ * error-path code.
+ *
+ * For this to work, the struct kbasep_js_device_data sub-structure of \a kbdev must
+ * be zero initialized before passing to the kbasep_js_devdata_init()
+ * function. This is to give efficient error path code.
+ *
+ * It is a Programming Error to call this whilst there are still kbase_context
+ * structures registered with this scheduler.
+ *
+ */
+void kbasep_js_devdata_halt(struct kbase_device *kbdev);
+
+/**
+ * @brief Terminate the Job Scheduler
+ *
+ * It is safe to call this on \a kbdev even if it the kbasep_js_device_data
+ * sub-structure was never initialized/failed initialization, to give efficient
+ * error-path code.
+ *
+ * For this to work, the struct kbasep_js_device_data sub-structure of \a kbdev must
+ * be zero initialized before passing to the kbasep_js_devdata_init()
+ * function. This is to give efficient error path code.
+ *
+ * It is a Programming Error to call this whilst there are still kbase_context
+ * structures registered with this scheduler.
+ */
+void kbasep_js_devdata_term(struct kbase_device *kbdev);
+
+/**
+ * @brief Initialize the Scheduling Component of a struct kbase_context on the Job Scheduler.
+ *
+ * This effectively registers a struct kbase_context with a Job Scheduler.
+ *
+ * It does not register any jobs owned by the struct kbase_context with the scheduler.
+ * Those must be separately registered by kbasep_js_add_job().
+ *
+ * The struct kbase_context must be zero intitialized before passing to the
+ * kbase_js_init() function. This is to give efficient error path code.
+ */
+int kbasep_js_kctx_init(struct kbase_context * const kctx);
+
+/**
+ * @brief Terminate the Scheduling Component of a struct kbase_context on the Job Scheduler
+ *
+ * This effectively de-registers a struct kbase_context from its Job Scheduler
+ *
+ * It is safe to call this on a struct kbase_context that has never had or failed
+ * initialization of its jctx.sched_info member, to give efficient error-path
+ * code.
+ *
+ * For this to work, the struct kbase_context must be zero intitialized before passing
+ * to the kbase_js_init() function.
+ *
+ * It is a Programming Error to call this whilst there are still jobs
+ * registered with this context.
+ */
+void kbasep_js_kctx_term(struct kbase_context *kctx);
+
+/**
+ * @brief Add a job chain to the Job Scheduler, and take necessary actions to
+ * schedule the context/run the job.
+ *
+ * This atomically does the following:
+ * - Update the numbers of jobs information
+ * - Add the job to the run pool if necessary (part of init_job)
+ *
+ * Once this is done, then an appropriate action is taken:
+ * - If the ctx is scheduled, it attempts to start the next job (which might be
+ * this added job)
+ * - Otherwise, and if this is the first job on the context, it enqueues it on
+ * the Policy Queue
+ *
+ * The Policy's Queue can be updated by this in the following ways:
+ * - In the above case that this is the first job on the context
+ * - If the context is high priority and the context is not scheduled, then it
+ * could cause the Policy to schedule out a low-priority context, allowing
+ * this context to be scheduled in.
+ *
+ * If the context is already scheduled on the RunPool, then adding a job to it
+ * is guarenteed not to update the Policy Queue. And so, the caller is
+ * guarenteed to not need to try scheduling a context from the Run Pool - it
+ * can safely assert that the result is false.
+ *
+ * It is a programming error to have more than U32_MAX jobs in flight at a time.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold hwaccess_lock (as this will be obtained internally)
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally).
+ *
+ * @return true indicates that the Policy Queue was updated, and so the
+ * caller will need to try scheduling a context onto the Run Pool.
+ * @return false indicates that no updates were made to the Policy Queue,
+ * so no further action is required from the caller. This is \b always returned
+ * when the context is currently scheduled.
+ */
+bool kbasep_js_add_job(struct kbase_context *kctx, struct kbase_jd_atom *atom);
+
+/**
+ * @brief Remove a job chain from the Job Scheduler, except for its 'retained state'.
+ *
+ * Completely removing a job requires several calls:
+ * - kbasep_js_copy_atom_retained_state(), to capture the 'retained state' of
+ *   the atom
+ * - kbasep_js_remove_job(), to partially remove the atom from the Job Scheduler
+ * - kbasep_js_runpool_release_ctx_and_katom_retained_state(), to release the
+ *   remaining state held as part of the job having been run.
+ *
+ * In the common case of atoms completing normally, this set of actions is more optimal for spinlock purposes than having kbasep_js_remove_job() handle all of the actions.
+ *
+ * In the case of cancelling atoms, it is easier to call kbasep_js_remove_cancelled_job(), which handles all the necessary actions.
+ *
+ * It is a programming error to call this when:
+ * - \a atom is not a job belonging to kctx.
+ * - \a atom has already been removed from the Job Scheduler.
+ * - \a atom is still in the runpool
+ *
+ * Do not use this for removing jobs being killed by kbase_jd_cancel() - use
+ * kbasep_js_remove_cancelled_job() instead.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ *
+ */
+void kbasep_js_remove_job(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *atom);
+
+/**
+ * @brief Completely remove a job chain from the Job Scheduler, in the case
+ * where the job chain was cancelled.
+ *
+ * This is a variant of kbasep_js_remove_job() that takes care of removing all
+ * of the retained state too. This is generally useful for cancelled atoms,
+ * which need not be handled in an optimal way.
+ *
+ * It is a programming error to call this when:
+ * - \a atom is not a job belonging to kctx.
+ * - \a atom has already been removed from the Job Scheduler.
+ * - \a atom is still in the runpool:
+ *  - it is not being killed with kbasep_jd_cancel()
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold the hwaccess_lock, (as this will be obtained
+ *   internally)
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this could be
+ * obtained internally)
+ *
+ * @return true indicates that ctx attributes have changed and the caller
+ * should call kbase_js_sched_all() to try to run more jobs
+ * @return false otherwise
+ */
+bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev,
+						struct kbase_context *kctx,
+						struct kbase_jd_atom *katom);
+
+/**
+ * @brief Refcount a context as being busy, preventing it from being scheduled
+ * out.
+ *
+ * @note This function can safely be called from IRQ context.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold mmu_hw_mutex and hwaccess_lock, because they will be
+ *   used internally.
+ *
+ * @return value != false if the retain succeeded, and the context will not be scheduled out.
+ * @return false if the retain failed (because the context is being/has been scheduled out).
+ */
+bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Refcount a context as being busy, preventing it from being scheduled
+ * out.
+ *
+ * @note This function can safely be called from IRQ context.
+ *
+ * The following locks must be held by the caller:
+ * - mmu_hw_mutex, hwaccess_lock
+ *
+ * @return value != false if the retain succeeded, and the context will not be scheduled out.
+ * @return false if the retain failed (because the context is being/has been scheduled out).
+ */
+bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Lookup a context in the Run Pool based upon its current address space
+ * and ensure that is stays scheduled in.
+ *
+ * The context is refcounted as being busy to prevent it from scheduling
+ * out. It must be released with kbasep_js_runpool_release_ctx() when it is no
+ * longer required to stay scheduled in.
+ *
+ * @note This function can safely be called from IRQ context.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ *   If the hwaccess_lock is already held, then the caller should use
+ *   kbasep_js_runpool_lookup_ctx_nolock() instead.
+ *
+ * @return a valid struct kbase_context on success, which has been refcounted as being busy.
+ * @return NULL on failure, indicating that no context was found in \a as_nr
+ */
+struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev, int as_nr);
+
+/**
+ * @brief Handling the requeuing/killing of a context that was evicted from the
+ * policy queue or runpool.
+ *
+ * This should be used whenever handing off a context that has been evicted
+ * from the policy queue or the runpool:
+ * - If the context is not dying and has jobs, it gets re-added to the policy
+ * queue
+ * - Otherwise, it is not added
+ *
+ * In addition, if the context is dying the jobs are killed asynchronously.
+ *
+ * In all cases, the Power Manager active reference is released
+ * (kbase_pm_context_idle()) whenever the has_pm_ref parameter is true.  \a
+ * has_pm_ref must be set to false whenever the context was not previously in
+ * the runpool and does not hold a Power Manager active refcount. Note that
+ * contexts in a rollback of kbasep_js_try_schedule_head_ctx() might have an
+ * active refcount even though they weren't in the runpool.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be
+ * obtained internally)
+ */
+void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev, struct kbase_context *kctx, bool has_pm_ref);
+
+/**
+ * @brief Release a refcount of a context being busy, allowing it to be
+ * scheduled out.
+ *
+ * When the refcount reaches zero and the context \em might be scheduled out
+ * (depending on whether the Scheudling Policy has deemed it so, or if it has run
+ * out of jobs).
+ *
+ * If the context does get scheduled out, then The following actions will be
+ * taken as part of deschduling a context:
+ * - For the context being descheduled:
+ *  - If the context is in the processing of dying (all the jobs are being
+ * removed from it), then descheduling also kills off any jobs remaining in the
+ * context.
+ *  - If the context is not dying, and any jobs remain after descheduling the
+ * context then it is re-enqueued to the Policy's Queue.
+ *  - Otherwise, the context is still known to the scheduler, but remains absent
+ * from the Policy Queue until a job is next added to it.
+ *  - In all descheduling cases, the Power Manager active reference (obtained
+ * during kbasep_js_try_schedule_head_ctx()) is released (kbase_pm_context_idle()).
+ *
+ * Whilst the context is being descheduled, this also handles actions that
+ * cause more atoms to be run:
+ * - Attempt submitting atoms when the Context Attributes on the Runpool have
+ * changed. This is because the context being scheduled out could mean that
+ * there are more opportunities to run atoms.
+ * - Attempt submitting to a slot that was previously blocked due to affinity
+ * restrictions. This is usually only necessary when releasing a context
+ * happens as part of completing a previous job, but is harmless nonetheless.
+ * - Attempt scheduling in a new context (if one is available), and if necessary,
+ * running a job from that new context.
+ *
+ * Unlike retaining a context in the runpool, this function \b cannot be called
+ * from IRQ context.
+ *
+ * It is a programming error to call this on a \a kctx that is not currently
+ * scheduled, or that already has a zero refcount.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be
+ * obtained internally)
+ *
+ */
+void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Variant of kbasep_js_runpool_release_ctx() that handles additional
+ * actions from completing an atom.
+ *
+ * This is usually called as part of completing an atom and releasing the
+ * refcount on the context held by the atom.
+ *
+ * Therefore, the extra actions carried out are part of handling actions queued
+ * on a completed atom, namely:
+ * - Releasing the atom's context attributes
+ * - Retrying the submission on a particular slot, because we couldn't submit
+ * on that slot from an IRQ handler.
+ *
+ * The locking conditions of this function are the same as those for
+ * kbasep_js_runpool_release_ctx()
+ */
+void kbasep_js_runpool_release_ctx_and_katom_retained_state(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state);
+
+/**
+ * @brief Variant of kbase_js_runpool_release_ctx() that assumes that
+ * kbasep_js_device_data::runpool_mutex and
+ * kbasep_js_kctx_info::ctx::jsctx_mutex are held by the caller, and does not
+ * attempt to schedule new contexts.
+ */
+void kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev,
+						struct kbase_context *kctx);
+
+/**
+ * @brief Schedule in a privileged context
+ *
+ * This schedules a context in regardless of the context priority.
+ * If the runpool is full, a context will be forced out of the runpool and the function will wait
+ * for the new context to be scheduled in.
+ * The context will be kept scheduled in (and the corresponding address space reserved) until
+ * kbasep_js_release_privileged_ctx is called).
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally).
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex, because it will
+ * be used internally.
+ *
+ */
+void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Release a privileged context, allowing it to be scheduled out.
+ *
+ * See kbasep_js_runpool_release_ctx for potential side effects.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
+ * obtained internally)
+ *
+ */
+void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Try to submit the next job on each slot
+ *
+ * The following locks may be used:
+ * - kbasep_js_device_data::runpool_mutex
+ * - hwaccess_lock
+ */
+void kbase_js_try_run_jobs(struct kbase_device *kbdev);
+
+/**
+ * @brief Suspend the job scheduler during a Power Management Suspend event.
+ *
+ * Causes all contexts to be removed from the runpool, and prevents any
+ * contexts from (re)entering the runpool.
+ *
+ * This does not handle suspending the one privileged context: the caller must
+ * instead do this by by suspending the GPU HW Counter Instrumentation.
+ *
+ * This will eventually cause all Power Management active references held by
+ * contexts on the runpool to be released, without running any more atoms.
+ *
+ * The caller must then wait for all Power Mangement active refcount to become
+ * zero before completing the suspend.
+ *
+ * The emptying mechanism may take some time to complete, since it can wait for
+ * jobs to complete naturally instead of forcing them to end quickly. However,
+ * this is bounded by the Job Scheduler's Job Timeouts. Hence, this
+ * function is guaranteed to complete in a finite time.
+ */
+void kbasep_js_suspend(struct kbase_device *kbdev);
+
+/**
+ * @brief Resume the Job Scheduler after a Power Management Resume event.
+ *
+ * This restores the actions from kbasep_js_suspend():
+ * - Schedules contexts back into the runpool
+ * - Resumes running atoms on the GPU
+ */
+void kbasep_js_resume(struct kbase_device *kbdev);
+
+/**
+ * @brief Submit an atom to the job scheduler.
+ *
+ * The atom is enqueued on the context's ringbuffer. The caller must have
+ * ensured that all dependencies can be represented in the ringbuffer.
+ *
+ * Caller must hold jctx->lock
+ *
+ * @param[in] kctx  Context pointer
+ * @param[in] atom  Pointer to the atom to submit
+ *
+ * @return Whether the context requires to be enqueued. */
+bool kbase_js_dep_resolved_submit(struct kbase_context *kctx,
+					struct kbase_jd_atom *katom);
+
+/**
+  * jsctx_ll_flush_to_rb() - Pushes atoms from the linked list to ringbuffer.
+  * @kctx:  Context Pointer
+  * @prio:  Priority (specifies the queue together with js).
+  * @js:    Job slot (specifies the queue together with prio).
+  *
+  * Pushes all possible atoms from the linked list to the ringbuffer.
+  * Number of atoms are limited to free space in the ringbuffer and
+  * number of available atoms in the linked list.
+  *
+  */
+void jsctx_ll_flush_to_rb(struct kbase_context *kctx, int prio, int js);
+/**
+ * @brief Pull an atom from a context in the job scheduler for execution.
+ *
+ * The atom will not be removed from the ringbuffer at this stage.
+ *
+ * The HW access lock must be held when calling this function.
+ *
+ * @param[in] kctx  Context to pull from
+ * @param[in] js    Job slot to pull from
+ * @return          Pointer to an atom, or NULL if there are no atoms for this
+ *                  slot that can be currently run.
+ */
+struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, int js);
+
+/**
+ * @brief Return an atom to the job scheduler ringbuffer.
+ *
+ * An atom is 'unpulled' if execution is stopped but intended to be returned to
+ * later. The most common reason for this is that the atom has been
+ * soft-stopped.
+ *
+ * Note that if multiple atoms are to be 'unpulled', they must be returned in
+ * the reverse order to which they were originally pulled. It is a programming
+ * error to return atoms in any other order.
+ *
+ * The HW access lock must be held when calling this function.
+ *
+ * @param[in] kctx  Context pointer
+ * @param[in] atom  Pointer to the atom to unpull
+ */
+void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom);
+
+/**
+ * @brief Complete an atom from jd_done_worker(), removing it from the job
+ * scheduler ringbuffer.
+ *
+ * If the atom failed then all dependee atoms marked for failure propagation
+ * will also fail.
+ *
+ * @param[in] kctx  Context pointer
+ * @param[in] katom Pointer to the atom to complete
+ * @return true if the context is now idle (no jobs pulled)
+ *         false otherwise
+ */
+bool kbase_js_complete_atom_wq(struct kbase_context *kctx,
+				struct kbase_jd_atom *katom);
+
+/**
+ * @brief Complete an atom.
+ *
+ * Most of the work required to complete an atom will be performed by
+ * jd_done_worker().
+ *
+ * The HW access lock must be held when calling this function.
+ *
+ * @param[in] katom         Pointer to the atom to complete
+ * @param[in] end_timestamp The time that the atom completed (may be NULL)
+ *
+ * Return: Atom that has now been unblocked and can now be run, or NULL if none
+ */
+struct kbase_jd_atom *kbase_js_complete_atom(struct kbase_jd_atom *katom,
+		ktime_t *end_timestamp);
+
+/**
+ * @brief Submit atoms from all available contexts.
+ *
+ * This will attempt to submit as many jobs as possible to the provided job
+ * slots. It will exit when either all job slots are full, or all contexts have
+ * been used.
+ *
+ * @param[in] kbdev    Device pointer
+ * @param[in] js_mask  Mask of job slots to submit to
+ */
+void kbase_js_sched(struct kbase_device *kbdev, int js_mask);
+
+/**
+ * kbase_jd_zap_context - Attempt to deschedule a context that is being
+ *                        destroyed
+ * @kctx: Context pointer
+ *
+ * This will attempt to remove a context from any internal job scheduler queues
+ * and perform any other actions to ensure a context will not be submitted
+ * from.
+ *
+ * If the context is currently scheduled, then the caller must wait for all
+ * pending jobs to complete before taking any further action.
+ */
+void kbase_js_zap_context(struct kbase_context *kctx);
+
+/**
+ * @brief Validate an atom
+ *
+ * This will determine whether the atom can be scheduled onto the GPU. Atoms
+ * with invalid combinations of core requirements will be rejected.
+ *
+ * @param[in] kbdev  Device pointer
+ * @param[in] katom  Atom to validate
+ * @return           true if atom is valid
+ *                   false otherwise
+ */
+bool kbase_js_is_atom_valid(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom);
+
+/**
+ * kbase_js_set_timeouts - update all JS timeouts with user specified data
+ * @kbdev: Device pointer
+ *
+ * Timeouts are specified through the 'js_timeouts' sysfs file. If a timeout is
+ * set to a positive number then that becomes the new value used, if a timeout
+ * is negative then the default is set.
+ */
+void kbase_js_set_timeouts(struct kbase_device *kbdev);
+
+/**
+ * kbase_js_set_ctx_priority - set the context priority
+ * @kctx: Context pointer
+ * @new_priority: New priority value for the Context
+ *
+ * The context priority is set to a new value and it is moved to the
+ * pullable/unpullable list as per the new priority.
+ */
+void kbase_js_set_ctx_priority(struct kbase_context *kctx, int new_priority);
+
+
+/**
+ * kbase_js_update_ctx_priority - update the context priority
+ * @kctx: Context pointer
+ *
+ * The context priority gets updated as per the priority of atoms currently in
+ * use for that context, but only if system priority mode for context scheduling
+ * is being used.
+ */
+void kbase_js_update_ctx_priority(struct kbase_context *kctx);
+
+/*
+ * Helpers follow
+ */
+
+/**
+ * @brief Check that a context is allowed to submit jobs on this policy
+ *
+ * The purpose of this abstraction is to hide the underlying data size, and wrap up
+ * the long repeated line of code.
+ *
+ * As with any bool, never test the return value with true.
+ *
+ * The caller must hold hwaccess_lock.
+ */
+static inline bool kbasep_js_is_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
+{
+	u16 test_bit;
+
+	/* Ensure context really is scheduled in */
+	KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+	KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	test_bit = (u16) (1u << kctx->as_nr);
+
+	return (bool) (js_devdata->runpool_irq.submit_allowed & test_bit);
+}
+
+/**
+ * @brief Allow a context to submit jobs on this policy
+ *
+ * The purpose of this abstraction is to hide the underlying data size, and wrap up
+ * the long repeated line of code.
+ *
+ * The caller must hold hwaccess_lock.
+ */
+static inline void kbasep_js_set_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
+{
+	u16 set_bit;
+
+	/* Ensure context really is scheduled in */
+	KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+	KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	set_bit = (u16) (1u << kctx->as_nr);
+
+	dev_dbg(kctx->kbdev->dev, "JS: Setting Submit Allowed on %p (as=%d)",
+			kctx, kctx->as_nr);
+
+	js_devdata->runpool_irq.submit_allowed |= set_bit;
+}
+
+/**
+ * @brief Prevent a context from submitting more jobs on this policy
+ *
+ * The purpose of this abstraction is to hide the underlying data size, and wrap up
+ * the long repeated line of code.
+ *
+ * The caller must hold hwaccess_lock.
+ */
+static inline void kbasep_js_clear_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
+{
+	u16 clear_bit;
+	u16 clear_mask;
+
+	/* Ensure context really is scheduled in */
+	KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+	KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	clear_bit = (u16) (1u << kctx->as_nr);
+	clear_mask = ~clear_bit;
+
+	dev_dbg(kctx->kbdev->dev, "JS: Clearing Submit Allowed on %p (as=%d)",
+			kctx, kctx->as_nr);
+
+	js_devdata->runpool_irq.submit_allowed &= clear_mask;
+}
+
+/**
+ * Create an initial 'invalid' atom retained state, that requires no
+ * atom-related work to be done on releasing with
+ * kbasep_js_runpool_release_ctx_and_katom_retained_state()
+ */
+static inline void kbasep_js_atom_retained_state_init_invalid(struct kbasep_js_atom_retained_state *retained_state)
+{
+	retained_state->event_code = BASE_JD_EVENT_NOT_STARTED;
+	retained_state->core_req = KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID;
+}
+
+/**
+ * Copy atom state that can be made available after jd_done_nolock() is called
+ * on that atom.
+ */
+static inline void kbasep_js_atom_retained_state_copy(struct kbasep_js_atom_retained_state *retained_state, const struct kbase_jd_atom *katom)
+{
+	retained_state->event_code = katom->event_code;
+	retained_state->core_req = katom->core_req;
+	retained_state->sched_priority = katom->sched_priority;
+	retained_state->device_nr = katom->device_nr;
+}
+
+/**
+ * @brief Determine whether an atom has finished (given its retained state),
+ * and so should be given back to userspace/removed from the system.
+ *
+ * Reasons for an atom not finishing include:
+ * - Being soft-stopped (and so, the atom should be resubmitted sometime later)
+ *
+ * @param[in] katom_retained_state the retained state of the atom to check
+ * @return    false if the atom has not finished
+ * @return    !=false if the atom has finished
+ */
+static inline bool kbasep_js_has_atom_finished(const struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+	return (bool) (katom_retained_state->event_code != BASE_JD_EVENT_STOPPED && katom_retained_state->event_code != BASE_JD_EVENT_REMOVED_FROM_NEXT);
+}
+
+/**
+ * @brief Determine whether a struct kbasep_js_atom_retained_state is valid
+ *
+ * An invalid struct kbasep_js_atom_retained_state is allowed, and indicates that the
+ * code should just ignore it.
+ *
+ * @param[in] katom_retained_state the atom's retained state to check
+ * @return    false if the retained state is invalid, and can be ignored
+ * @return    !=false if the retained state is valid
+ */
+static inline bool kbasep_js_atom_retained_state_is_valid(const struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+	return (bool) (katom_retained_state->core_req != KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID);
+}
+
+/**
+ * @brief Variant of kbasep_js_runpool_lookup_ctx() that can be used when the
+ * context is guaranteed to be already previously retained.
+ *
+ * It is a programming error to supply the \a as_nr of a context that has not
+ * been previously retained/has a busy refcount of zero. The only exception is
+ * when there is no ctx in \a as_nr (NULL returned).
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ *
+ * @return a valid struct kbase_context on success, with a refcount that is guaranteed
+ * to be non-zero and unmodified by this function.
+ * @return NULL on failure, indicating that no context was found in \a as_nr
+ */
+static inline struct kbase_context *kbasep_js_runpool_lookup_ctx_noretain(struct kbase_device *kbdev, int as_nr)
+{
+	struct kbase_context *found_kctx;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
+
+	found_kctx = kbdev->as_to_kctx[as_nr];
+	KBASE_DEBUG_ASSERT(found_kctx == NULL ||
+			atomic_read(&found_kctx->refcount) > 0);
+
+	return found_kctx;
+}
+
+/*
+ * The following locking conditions are made on the caller:
+ * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - The caller must hold the kbasep_js_device_data::runpool_mutex
+ */
+static inline void kbase_js_runpool_inc_context_count(
+						struct kbase_device *kbdev,
+						struct kbase_context *kctx)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbasep_js_kctx_info *js_kctx_info;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+
+	js_devdata = &kbdev->js_data;
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+	lockdep_assert_held(&js_devdata->runpool_mutex);
+
+	/* Track total contexts */
+	KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running < S8_MAX);
+	++(js_devdata->nr_all_contexts_running);
+
+	if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
+		/* Track contexts that can submit jobs */
+		KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running <
+									S8_MAX);
+		++(js_devdata->nr_user_contexts_running);
+	}
+}
+
+/*
+ * The following locking conditions are made on the caller:
+ * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - The caller must hold the kbasep_js_device_data::runpool_mutex
+ */
+static inline void kbase_js_runpool_dec_context_count(
+						struct kbase_device *kbdev,
+						struct kbase_context *kctx)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbasep_js_kctx_info *js_kctx_info;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+
+	js_devdata = &kbdev->js_data;
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+	lockdep_assert_held(&js_devdata->runpool_mutex);
+
+	/* Track total contexts */
+	--(js_devdata->nr_all_contexts_running);
+	KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running >= 0);
+
+	if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
+		/* Track contexts that can submit jobs */
+		--(js_devdata->nr_user_contexts_running);
+		KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running >= 0);
+	}
+}
+
+
+/**
+ * @brief Submit atoms from all available contexts to all job slots.
+ *
+ * This will attempt to submit as many jobs as possible. It will exit when
+ * either all job slots are full, or all contexts have been used.
+ *
+ * @param[in] kbdev    Device pointer
+ */
+static inline void kbase_js_sched_all(struct kbase_device *kbdev)
+{
+	kbase_js_sched(kbdev, (1 << kbdev->gpu_props.num_job_slots) - 1);
+}
+
+extern const int
+kbasep_js_atom_priority_to_relative[BASE_JD_NR_PRIO_LEVELS];
+
+extern const base_jd_prio
+kbasep_js_relative_priority_to_atom[KBASE_JS_ATOM_SCHED_PRIO_COUNT];
+
+/**
+ * kbasep_js_atom_prio_to_sched_prio(): - Convert atom priority (base_jd_prio)
+ *                                        to relative ordering
+ * @atom_prio: Priority ID to translate.
+ *
+ * Atom priority values for @ref base_jd_prio cannot be compared directly to
+ * find out which are higher or lower.
+ *
+ * This function will convert base_jd_prio values for successively lower
+ * priorities into a monotonically increasing sequence. That is, the lower the
+ * base_jd_prio priority, the higher the value produced by this function. This
+ * is in accordance with how the rest of the kernel treates priority.
+ *
+ * The mapping is 1:1 and the size of the valid input range is the same as the
+ * size of the valid output range, i.e.
+ * KBASE_JS_ATOM_SCHED_PRIO_COUNT == BASE_JD_NR_PRIO_LEVELS
+ *
+ * Note This must be kept in sync with BASE_JD_PRIO_<...> definitions
+ *
+ * Return: On success: a value in the inclusive range
+ *         0..KBASE_JS_ATOM_SCHED_PRIO_COUNT-1. On failure:
+ *         KBASE_JS_ATOM_SCHED_PRIO_INVALID
+ */
+static inline int kbasep_js_atom_prio_to_sched_prio(base_jd_prio atom_prio)
+{
+	if (atom_prio >= BASE_JD_NR_PRIO_LEVELS)
+		return KBASE_JS_ATOM_SCHED_PRIO_INVALID;
+
+	return kbasep_js_atom_priority_to_relative[atom_prio];
+}
+
+static inline base_jd_prio kbasep_js_sched_prio_to_atom_prio(int sched_prio)
+{
+	unsigned int prio_idx;
+
+	KBASE_DEBUG_ASSERT(0 <= sched_prio
+			&& sched_prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT);
+
+	prio_idx = (unsigned int)sched_prio;
+
+	return kbasep_js_relative_priority_to_atom[prio_idx];
+}
+
+	  /** @} *//* end group kbase_js */
+	  /** @} *//* end group base_kbase_api */
+	  /** @} *//* end group base_api */
+
+#endif				/* _KBASE_JS_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.c b/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.c
new file mode 100644
index 0000000..1ff230c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.c
@@ -0,0 +1,283 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_config.h>
+
+/*
+ * Private functions follow
+ */
+
+/**
+ * @brief Check whether a ctx has a certain attribute, and if so, retain that
+ * attribute on the runpool.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx is scheduled on the runpool
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+static bool kbasep_js_ctx_attr_runpool_retain_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	bool runpool_state_changed = false;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+	js_devdata = &kbdev->js_data;
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, attribute) != false) {
+		KBASE_DEBUG_ASSERT(js_devdata->runpool_irq.ctx_attr_ref_count[attribute] < S8_MAX);
+		++(js_devdata->runpool_irq.ctx_attr_ref_count[attribute]);
+
+		if (js_devdata->runpool_irq.ctx_attr_ref_count[attribute] == 1) {
+			/* First refcount indicates a state change */
+			runpool_state_changed = true;
+			KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_ON_RUNPOOL, kctx, NULL, 0u, attribute);
+		}
+	}
+
+	return runpool_state_changed;
+}
+
+/**
+ * @brief Check whether a ctx has a certain attribute, and if so, release that
+ * attribute on the runpool.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx is scheduled on the runpool
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+static bool kbasep_js_ctx_attr_runpool_release_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	bool runpool_state_changed = false;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+	js_devdata = &kbdev->js_data;
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+	KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, attribute) != false) {
+		KBASE_DEBUG_ASSERT(js_devdata->runpool_irq.ctx_attr_ref_count[attribute] > 0);
+		--(js_devdata->runpool_irq.ctx_attr_ref_count[attribute]);
+
+		if (js_devdata->runpool_irq.ctx_attr_ref_count[attribute] == 0) {
+			/* Last de-refcount indicates a state change */
+			runpool_state_changed = true;
+			KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_OFF_RUNPOOL, kctx, NULL, 0u, attribute);
+		}
+	}
+
+	return runpool_state_changed;
+}
+
+/**
+ * @brief Retain a certain attribute on a ctx, also retaining it on the runpool
+ * if the context is scheduled.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * This may allow the scheduler to submit more jobs than previously.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+static bool kbasep_js_ctx_attr_ctx_retain_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+	struct kbasep_js_kctx_info *js_kctx_info;
+	bool runpool_state_changed = false;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+	lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+	KBASE_DEBUG_ASSERT(js_kctx_info->ctx.ctx_attr_ref_count[attribute] < U32_MAX);
+
+	++(js_kctx_info->ctx.ctx_attr_ref_count[attribute]);
+
+	if (kbase_ctx_flag(kctx, KCTX_SCHEDULED) && js_kctx_info->ctx.ctx_attr_ref_count[attribute] == 1) {
+		/* Only ref-count the attribute on the runpool for the first time this contexts sees this attribute */
+		KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_ON_CTX, kctx, NULL, 0u, attribute);
+		runpool_state_changed = kbasep_js_ctx_attr_runpool_retain_attr(kbdev, kctx, attribute);
+	}
+
+	return runpool_state_changed;
+}
+
+/*
+ * @brief Release a certain attribute on a ctx, also releasing it from the runpool
+ * if the context is scheduled.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * This may allow the scheduler to submit more jobs than previously.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+static bool kbasep_js_ctx_attr_ctx_release_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+	struct kbasep_js_kctx_info *js_kctx_info;
+	bool runpool_state_changed = false;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+	KBASE_DEBUG_ASSERT(js_kctx_info->ctx.ctx_attr_ref_count[attribute] > 0);
+
+	if (kbase_ctx_flag(kctx, KCTX_SCHEDULED) && js_kctx_info->ctx.ctx_attr_ref_count[attribute] == 1) {
+		lockdep_assert_held(&kbdev->hwaccess_lock);
+		/* Only de-ref-count the attribute on the runpool when this is the last ctx-reference to it */
+		runpool_state_changed = kbasep_js_ctx_attr_runpool_release_attr(kbdev, kctx, attribute);
+		KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_OFF_CTX, kctx, NULL, 0u, attribute);
+	}
+
+	/* De-ref must happen afterwards, because kbasep_js_ctx_attr_runpool_release() needs to check it too */
+	--(js_kctx_info->ctx.ctx_attr_ref_count[attribute]);
+
+	return runpool_state_changed;
+}
+
+/*
+ * More commonly used public functions
+ */
+
+void kbasep_js_ctx_attr_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+	bool runpool_state_changed;
+	int i;
+
+	/* Retain any existing attributes */
+	for (i = 0; i < KBASEP_JS_CTX_ATTR_COUNT; ++i) {
+		if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (enum kbasep_js_ctx_attr) i) != false) {
+			/* The context is being scheduled in, so update the runpool with the new attributes */
+			runpool_state_changed = kbasep_js_ctx_attr_runpool_retain_attr(kbdev, kctx, (enum kbasep_js_ctx_attr) i);
+
+			/* We don't need to know about state changed, because retaining a
+			 * context occurs on scheduling it, and that itself will also try
+			 * to run new atoms */
+			CSTD_UNUSED(runpool_state_changed);
+		}
+	}
+}
+
+bool kbasep_js_ctx_attr_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+	bool runpool_state_changed = false;
+	int i;
+
+	/* Release any existing attributes */
+	for (i = 0; i < KBASEP_JS_CTX_ATTR_COUNT; ++i) {
+		if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (enum kbasep_js_ctx_attr) i) != false) {
+			/* The context is being scheduled out, so update the runpool on the removed attributes */
+			runpool_state_changed |= kbasep_js_ctx_attr_runpool_release_attr(kbdev, kctx, (enum kbasep_js_ctx_attr) i);
+		}
+	}
+
+	return runpool_state_changed;
+}
+
+void kbasep_js_ctx_attr_ctx_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	bool runpool_state_changed = false;
+	base_jd_core_req core_req;
+
+	KBASE_DEBUG_ASSERT(katom);
+	core_req = katom->core_req;
+
+	if (core_req & BASE_JD_REQ_ONLY_COMPUTE)
+		runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE);
+	else
+		runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_NON_COMPUTE);
+
+	if ((core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T)) != 0 && (core_req & (BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)) == 0) {
+		/* Atom that can run on slot1 or slot2, and can use all cores */
+		runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES);
+	}
+
+	/* We don't need to know about state changed, because retaining an
+	 * atom occurs on adding it, and that itself will also try to run
+	 * new atoms */
+	CSTD_UNUSED(runpool_state_changed);
+}
+
+bool kbasep_js_ctx_attr_ctx_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+	bool runpool_state_changed = false;
+	base_jd_core_req core_req;
+
+	KBASE_DEBUG_ASSERT(katom_retained_state);
+	core_req = katom_retained_state->core_req;
+
+	/* No-op for invalid atoms */
+	if (kbasep_js_atom_retained_state_is_valid(katom_retained_state) == false)
+		return false;
+
+	if (core_req & BASE_JD_REQ_ONLY_COMPUTE)
+		runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE);
+	else
+		runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_NON_COMPUTE);
+
+	if ((core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T)) != 0 && (core_req & (BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)) == 0) {
+		/* Atom that can run on slot1 or slot2, and can use all cores */
+		runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES);
+	}
+
+	return runpool_state_changed;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.h b/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.h
new file mode 100644
index 0000000..25fd397
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.h
@@ -0,0 +1,155 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_js_ctx_attr.h
+ * Job Scheduler Context Attribute APIs
+ */
+
+#ifndef _KBASE_JS_CTX_ATTR_H_
+#define _KBASE_JS_CTX_ATTR_H_
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_js
+ * @{
+ */
+
+/**
+ * Retain all attributes of a context
+ *
+ * This occurs on scheduling in the context on the runpool (but after
+ * is_scheduled is set)
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx->is_scheduled is true
+ */
+void kbasep_js_ctx_attr_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * Release all attributes of a context
+ *
+ * This occurs on scheduling out the context from the runpool (but before
+ * is_scheduled is cleared)
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx->is_scheduled is true
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+bool kbasep_js_ctx_attr_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * Retain all attributes of an atom
+ *
+ * This occurs on adding an atom to a context
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ */
+void kbasep_js_ctx_attr_ctx_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
+
+/**
+ * Release all attributes of an atom, given its retained state.
+ *
+ * This occurs after (permanently) removing an atom from a context
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ *
+ * This is a no-op when \a katom_retained_state is invalid.
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+bool kbasep_js_ctx_attr_ctx_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state);
+
+/**
+ * Requires:
+ * - runpool_irq spinlock
+ */
+static inline s8 kbasep_js_ctx_attr_count_on_runpool(struct kbase_device *kbdev, enum kbasep_js_ctx_attr attribute)
+{
+	struct kbasep_js_device_data *js_devdata;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+	js_devdata = &kbdev->js_data;
+
+	return js_devdata->runpool_irq.ctx_attr_ref_count[attribute];
+}
+
+/**
+ * Requires:
+ * - runpool_irq spinlock
+ */
+static inline bool kbasep_js_ctx_attr_is_attr_on_runpool(struct kbase_device *kbdev, enum kbasep_js_ctx_attr attribute)
+{
+	/* In general, attributes are 'on' when they have a non-zero refcount (note: the refcount will never be < 0) */
+	return (bool) kbasep_js_ctx_attr_count_on_runpool(kbdev, attribute);
+}
+
+/**
+ * Requires:
+ * - jsctx mutex
+ */
+static inline bool kbasep_js_ctx_attr_is_attr_on_ctx(struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+	struct kbasep_js_kctx_info *js_kctx_info;
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	/* In general, attributes are 'on' when they have a refcount (which should never be < 0) */
+	return (bool) (js_kctx_info->ctx.ctx_attr_ref_count[attribute]);
+}
+
+	  /** @} *//* end group kbase_js */
+	  /** @} *//* end group base_kbase_api */
+	  /** @} *//* end group base_api */
+
+#endif				/* _KBASE_JS_DEFS_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_js_defs.h b/drivers/gpu/arm/midgard/mali_kbase_js_defs.h
new file mode 100644
index 0000000..052a0b3
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_js_defs.h
@@ -0,0 +1,416 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_js.h
+ * Job Scheduler Type Definitions
+ */
+
+#ifndef _KBASE_JS_DEFS_H_
+#define _KBASE_JS_DEFS_H_
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_js
+ * @{
+ */
+/* Forward decls */
+struct kbase_device;
+struct kbase_jd_atom;
+
+
+typedef u32 kbase_context_flags;
+
+struct kbasep_atom_req {
+	base_jd_core_req core_req;
+	kbase_context_flags ctx_req;
+	u32 device_nr;
+};
+
+/** Callback function run on all of a context's jobs registered with the Job
+ * Scheduler */
+typedef void (*kbasep_js_ctx_job_cb)(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
+
+/**
+ * @brief Maximum number of jobs that can be submitted to a job slot whilst
+ * inside the IRQ handler.
+ *
+ * This is important because GPU NULL jobs can complete whilst the IRQ handler
+ * is running. Otherwise, it potentially allows an unlimited number of GPU NULL
+ * jobs to be submitted inside the IRQ handler, which increases IRQ latency.
+ */
+#define KBASE_JS_MAX_JOB_SUBMIT_PER_SLOT_PER_IRQ 2
+
+/**
+ * @brief Context attributes
+ *
+ * Each context attribute can be thought of as a boolean value that caches some
+ * state information about either the runpool, or the context:
+ * - In the case of the runpool, it is a cache of "Do any contexts owned by
+ * the runpool have attribute X?"
+ * - In the case of a context, it is a cache of "Do any atoms owned by the
+ * context have attribute X?"
+ *
+ * The boolean value of the context attributes often affect scheduling
+ * decisions, such as affinities to use and job slots to use.
+ *
+ * To accomodate changes of state in the context, each attribute is refcounted
+ * in the context, and in the runpool for all running contexts. Specifically:
+ * - The runpool holds a refcount of how many contexts in the runpool have this
+ * attribute.
+ * - The context holds a refcount of how many atoms have this attribute.
+ */
+enum kbasep_js_ctx_attr {
+	/** Attribute indicating a context that contains Compute jobs. That is,
+	 * the context has jobs of type @ref BASE_JD_REQ_ONLY_COMPUTE
+	 *
+	 * @note A context can be both 'Compute' and 'Non Compute' if it contains
+	 * both types of jobs.
+	 */
+	KBASEP_JS_CTX_ATTR_COMPUTE,
+
+	/** Attribute indicating a context that contains Non-Compute jobs. That is,
+	 * the context has some jobs that are \b not of type @ref
+	 * BASE_JD_REQ_ONLY_COMPUTE.
+	 *
+	 * @note A context can be both 'Compute' and 'Non Compute' if it contains
+	 * both types of jobs.
+	 */
+	KBASEP_JS_CTX_ATTR_NON_COMPUTE,
+
+	/** Attribute indicating that a context contains compute-job atoms that
+	 * aren't restricted to a coherent group, and can run on all cores.
+	 *
+	 * Specifically, this is when the atom's \a core_req satisfy:
+	 * - (\a core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T) // uses slot 1 or slot 2
+	 * - && !(\a core_req & BASE_JD_REQ_COHERENT_GROUP) // not restricted to coherent groups
+	 *
+	 * Such atoms could be blocked from running if one of the coherent groups
+	 * is being used by another job slot, so tracking this context attribute
+	 * allows us to prevent such situations.
+	 *
+	 * @note This doesn't take into account the 1-coregroup case, where all
+	 * compute atoms would effectively be able to run on 'all cores', but
+	 * contexts will still not always get marked with this attribute. Instead,
+	 * it is the caller's responsibility to take into account the number of
+	 * coregroups when interpreting this attribute.
+	 *
+	 * @note Whilst Tiler atoms are normally combined with
+	 * BASE_JD_REQ_COHERENT_GROUP, it is possible to send such atoms without
+	 * BASE_JD_REQ_COHERENT_GROUP set. This is an unlikely case, but it's easy
+	 * enough to handle anyway.
+	 */
+	KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES,
+
+	/** Must be the last in the enum */
+	KBASEP_JS_CTX_ATTR_COUNT
+};
+
+enum {
+	/** Bit indicating that new atom should be started because this atom completed */
+	KBASE_JS_ATOM_DONE_START_NEW_ATOMS = (1u << 0),
+	/** Bit indicating that the atom was evicted from the JS_NEXT registers */
+	KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT = (1u << 1)
+};
+
+/** Combination of KBASE_JS_ATOM_DONE_<...> bits */
+typedef u32 kbasep_js_atom_done_code;
+
+/*
+ * Context scheduling mode defines for kbase_device::js_ctx_scheduling_mode
+ */
+enum {
+	/*
+	 * In this mode, higher priority atoms will be scheduled first,
+	 * regardless of the context they belong to. Newly-runnable higher
+	 * priority atoms can preempt lower priority atoms currently running on
+	 * the GPU, even if they belong to a different context.
+	 */
+	KBASE_JS_SYSTEM_PRIORITY_MODE = 0,
+
+	/*
+	 * In this mode, the highest-priority atom will be chosen from each
+	 * context in turn using a round-robin algorithm, so priority only has
+	 * an effect within the context an atom belongs to. Newly-runnable
+	 * higher priority atoms can preempt the lower priority atoms currently
+	 * running on the GPU, but only if they belong to the same context.
+	 */
+	KBASE_JS_PROCESS_LOCAL_PRIORITY_MODE,
+
+	/* Must be the last in the enum */
+	KBASE_JS_PRIORITY_MODE_COUNT,
+};
+
+/*
+ * Internal atom priority defines for kbase_jd_atom::sched_prio
+ */
+enum {
+	KBASE_JS_ATOM_SCHED_PRIO_HIGH = 0,
+	KBASE_JS_ATOM_SCHED_PRIO_MED,
+	KBASE_JS_ATOM_SCHED_PRIO_LOW,
+	KBASE_JS_ATOM_SCHED_PRIO_COUNT,
+};
+
+/* Invalid priority for kbase_jd_atom::sched_prio */
+#define KBASE_JS_ATOM_SCHED_PRIO_INVALID -1
+
+/* Default priority in the case of contexts with no atoms, or being lenient
+ * about invalid priorities from userspace.
+ */
+#define KBASE_JS_ATOM_SCHED_PRIO_DEFAULT KBASE_JS_ATOM_SCHED_PRIO_MED
+
+/**
+ * @brief KBase Device Data Job Scheduler sub-structure
+ *
+ * This encapsulates the current context of the Job Scheduler on a particular
+ * device. This context is global to the device, and is not tied to any
+ * particular struct kbase_context running on the device.
+ *
+ * nr_contexts_running and as_free are optimized for packing together (by making
+ * them smaller types than u32). The operations on them should rarely involve
+ * masking. The use of signed types for arithmetic indicates to the compiler that
+ * the value will not rollover (which would be undefined behavior), and so under
+ * the Total License model, it is free to make optimizations based on that (i.e.
+ * to remove masking).
+ */
+struct kbasep_js_device_data {
+	/* Sub-structure to collect together Job Scheduling data used in IRQ
+	 * context. The hwaccess_lock must be held when accessing. */
+	struct runpool_irq {
+		/** Bitvector indicating whether a currently scheduled context is allowed to submit jobs.
+		 * When bit 'N' is set in this, it indicates whether the context bound to address space
+		 * 'N' is allowed to submit jobs.
+		 */
+		u16 submit_allowed;
+
+		/** Context Attributes:
+		 * Each is large enough to hold a refcount of the number of contexts
+		 * that can fit into the runpool. This is currently BASE_MAX_NR_AS
+		 *
+		 * Note that when BASE_MAX_NR_AS==16 we need 5 bits (not 4) to store
+		 * the refcount. Hence, it's not worthwhile reducing this to
+		 * bit-manipulation on u32s to save space (where in contrast, 4 bit
+		 * sub-fields would be easy to do and would save space).
+		 *
+		 * Whilst this must not become negative, the sign bit is used for:
+		 * - error detection in debug builds
+		 * - Optimization: it is undefined for a signed int to overflow, and so
+		 * the compiler can optimize for that never happening (thus, no masking
+		 * is required on updating the variable) */
+		s8 ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT];
+
+		/*
+		 * Affinity management and tracking
+		 */
+		/** Bitvector to aid affinity checking. Element 'n' bit 'i' indicates
+		 * that slot 'n' is using core i (i.e. slot_affinity_refcount[n][i] > 0) */
+		u64 slot_affinities[BASE_JM_MAX_NR_SLOTS];
+		/** Refcount for each core owned by each slot. Used to generate the
+		 * slot_affinities array of bitvectors
+		 *
+		 * The value of the refcount will not exceed BASE_JM_SUBMIT_SLOTS,
+		 * because it is refcounted only when a job is definitely about to be
+		 * submitted to a slot, and is de-refcounted immediately after a job
+		 * finishes */
+		s8 slot_affinity_refcount[BASE_JM_MAX_NR_SLOTS][64];
+	} runpool_irq;
+
+	/**
+	 * Run Pool mutex, for managing contexts within the runpool.
+	 * Unless otherwise specified, you must hold this lock whilst accessing any
+	 * members that follow
+	 *
+	 * In addition, this is used to access:
+	 * - the kbasep_js_kctx_info::runpool substructure
+	 */
+	struct mutex runpool_mutex;
+
+	/**
+	 * Queue Lock, used to access the Policy's queue of contexts independently
+	 * of the Run Pool.
+	 *
+	 * Of course, you don't need the Run Pool lock to access this.
+	 */
+	struct mutex queue_mutex;
+
+	/**
+	 * Scheduling semaphore. This must be held when calling
+	 * kbase_jm_kick()
+	 */
+	struct semaphore schedule_sem;
+
+	/**
+	 * List of contexts that can currently be pulled from
+	 */
+	struct list_head ctx_list_pullable[BASE_JM_MAX_NR_SLOTS][KBASE_JS_ATOM_SCHED_PRIO_COUNT];
+	/**
+	 * List of contexts that can not currently be pulled from, but have
+	 * jobs currently running.
+	 */
+	struct list_head ctx_list_unpullable[BASE_JM_MAX_NR_SLOTS][KBASE_JS_ATOM_SCHED_PRIO_COUNT];
+
+	/** Number of currently scheduled user contexts (excluding ones that are not submitting jobs) */
+	s8 nr_user_contexts_running;
+	/** Number of currently scheduled contexts (including ones that are not submitting jobs) */
+	s8 nr_all_contexts_running;
+
+	/** Core Requirements to match up with base_js_atom's core_req memeber
+	 * @note This is a write-once member, and so no locking is required to read */
+	base_jd_core_req js_reqs[BASE_JM_MAX_NR_SLOTS];
+
+	u32 scheduling_period_ns;    /*< Value for JS_SCHEDULING_PERIOD_NS */
+	u32 soft_stop_ticks;	     /*< Value for JS_SOFT_STOP_TICKS */
+	u32 soft_stop_ticks_cl;	     /*< Value for JS_SOFT_STOP_TICKS_CL */
+	u32 hard_stop_ticks_ss;	     /*< Value for JS_HARD_STOP_TICKS_SS */
+	u32 hard_stop_ticks_cl;	     /*< Value for JS_HARD_STOP_TICKS_CL */
+	u32 hard_stop_ticks_dumping; /*< Value for JS_HARD_STOP_TICKS_DUMPING */
+	u32 gpu_reset_ticks_ss;	     /*< Value for JS_RESET_TICKS_SS */
+	u32 gpu_reset_ticks_cl;	     /*< Value for JS_RESET_TICKS_CL */
+	u32 gpu_reset_ticks_dumping; /*< Value for JS_RESET_TICKS_DUMPING */
+	u32 ctx_timeslice_ns;		 /**< Value for JS_CTX_TIMESLICE_NS */
+
+	/**< Value for JS_SOFT_JOB_TIMEOUT */
+	atomic_t soft_job_timeout_ms;
+
+	/** List of suspended soft jobs */
+	struct list_head suspended_soft_jobs_list;
+
+#ifdef CONFIG_MALI_DEBUG
+	/* Support soft-stop on a single context */
+	bool softstop_always;
+#endif				/* CONFIG_MALI_DEBUG */
+
+	/** The initalized-flag is placed at the end, to avoid cache-pollution (we should
+	 * only be using this during init/term paths).
+	 * @note This is a write-once member, and so no locking is required to read */
+	int init_status;
+
+	/* Number of contexts that can currently be pulled from */
+	u32 nr_contexts_pullable;
+
+	/* Number of contexts that can either be pulled from or are currently
+	 * running */
+	atomic_t nr_contexts_runnable;
+};
+
+/**
+ * @brief KBase Context Job Scheduling information structure
+ *
+ * This is a substructure in the struct kbase_context that encapsulates all the
+ * scheduling information.
+ */
+struct kbasep_js_kctx_info {
+
+	/**
+	 * Job Scheduler Context information sub-structure. These members are
+	 * accessed regardless of whether the context is:
+	 * - In the Policy's Run Pool
+	 * - In the Policy's Queue
+	 * - Not queued nor in the Run Pool.
+	 *
+	 * You must obtain the jsctx_mutex before accessing any other members of
+	 * this substructure.
+	 *
+	 * You may not access any of these members from IRQ context.
+	 */
+	struct kbase_jsctx {
+		struct mutex jsctx_mutex;		    /**< Job Scheduler Context lock */
+
+		/** Number of jobs <b>ready to run</b> - does \em not include the jobs waiting in
+		 * the dispatcher, and dependency-only jobs. See kbase_jd_context::job_nr
+		 * for such jobs*/
+		u32 nr_jobs;
+
+		/** Context Attributes:
+		 * Each is large enough to hold a refcount of the number of atoms on
+		 * the context. **/
+		u32 ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT];
+
+		/**
+		 * Wait queue to wait for KCTX_SHEDULED flag state changes.
+		 * */
+		wait_queue_head_t is_scheduled_wait;
+
+		/** Link implementing JS queues. Context can be present on one
+		 * list per job slot
+		 */
+		struct list_head ctx_list_entry[BASE_JM_MAX_NR_SLOTS];
+	} ctx;
+
+	/* The initalized-flag is placed at the end, to avoid cache-pollution (we should
+	 * only be using this during init/term paths) */
+	int init_status;
+};
+
+/** Subset of atom state that can be available after jd_done_nolock() is called
+ * on that atom. A copy must be taken via kbasep_js_atom_retained_state_copy(),
+ * because the original atom could disappear. */
+struct kbasep_js_atom_retained_state {
+	/** Event code - to determine whether the atom has finished */
+	enum base_jd_event_code event_code;
+	/** core requirements */
+	base_jd_core_req core_req;
+	/* priority */
+	int sched_priority;
+	/* Core group atom was executed on */
+	u32 device_nr;
+
+};
+
+/**
+ * Value signifying 'no retry on a slot required' for:
+ * - kbase_js_atom_retained_state::retry_submit_on_slot
+ * - kbase_jd_atom::retry_submit_on_slot
+ */
+#define KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID (-1)
+
+/**
+ * base_jd_core_req value signifying 'invalid' for a kbase_jd_atom_retained_state.
+ *
+ * @see kbase_atom_retained_state_is_valid()
+ */
+#define KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID BASE_JD_REQ_DEP
+
+/**
+ * @brief The JS timer resolution, in microseconds
+ *
+ * Any non-zero difference in time will be at least this size.
+ */
+#define KBASEP_JS_TICK_RESOLUTION_US 1
+
+
+	  /** @} *//* end group kbase_js */
+	  /** @} *//* end group base_kbase_api */
+	  /** @} *//* end group base_api */
+
+#endif				/* _KBASE_JS_DEFS_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_linux.h b/drivers/gpu/arm/midgard/mali_kbase_linux.h
new file mode 100644
index 0000000..003ac9e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_linux.h
@@ -0,0 +1,48 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_linux.h
+ * Base kernel APIs, Linux implementation.
+ */
+
+#ifndef _KBASE_LINUX_H_
+#define _KBASE_LINUX_H_
+
+/* All things that are needed for the Linux port. */
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/atomic.h>
+
+#if (defined(MALI_KERNEL_TEST_API) && (1 == MALI_KERNEL_TEST_API))
+	#define KBASE_EXPORT_TEST_API(func) EXPORT_SYMBOL(func)
+#else
+	#define KBASE_EXPORT_TEST_API(func)
+#endif
+
+#define KBASE_EXPORT_SYMBOL(func) EXPORT_SYMBOL(func)
+
+#endif /* _KBASE_LINUX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem.c b/drivers/gpu/arm/midgard/mali_kbase_mem.c
new file mode 100644
index 0000000..fa05f34
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem.c
@@ -0,0 +1,3906 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_mem.c
+ * Base kernel memory APIs
+ */
+#include <linux/dma-buf.h>
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/compat.h>
+#include <linux/version.h>
+#include <linux/log2.h>
+#ifdef CONFIG_OF
+#include <linux/of_platform.h>
+#endif
+
+#include <mali_kbase_config.h>
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_cache_policy.h>
+#include <mali_kbase_hw.h>
+#include <mali_kbase_tracepoints.h>
+#include <mali_kbase_native_mgm.h>
+#include <mali_kbase_mem_pool_group.h>
+
+
+/* Forward declarations */
+static void free_partial_locked(struct kbase_context *kctx,
+		struct kbase_mem_pool *pool, struct tagged_addr tp);
+
+static size_t kbase_get_num_cpu_va_bits(struct kbase_context *kctx)
+{
+#if defined(CONFIG_ARM64)
+	/* VA_BITS can be as high as 48 bits, but all bits are available for
+	 * both user and kernel.
+	 */
+	size_t cpu_va_bits = VA_BITS;
+#elif defined(CONFIG_X86_64)
+	/* x86_64 can access 48 bits of VA, but the 48th is used to denote
+	 * kernel (1) vs userspace (0), so the max here is 47.
+	 */
+	size_t cpu_va_bits = 47;
+#elif defined(CONFIG_ARM) || defined(CONFIG_X86_32)
+	size_t cpu_va_bits = sizeof(void *) * BITS_PER_BYTE;
+#else
+#error "Unknown CPU VA width for this architecture"
+#endif
+
+#ifdef CONFIG_64BIT
+	if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+		cpu_va_bits = 32;
+#endif
+
+	return cpu_va_bits;
+}
+
+/* This function finds out which RB tree the given pfn from the GPU VA belongs
+ * to based on the memory zone the pfn refers to */
+static struct rb_root *kbase_gpu_va_to_rbtree(struct kbase_context *kctx,
+								    u64 gpu_pfn)
+{
+	struct rb_root *rbtree = NULL;
+
+	/* The gpu_pfn can only be greater than the starting pfn of the EXEC_VA
+	 * zone if this has been initialized.
+	 */
+	if (gpu_pfn >= kctx->exec_va_start)
+		rbtree = &kctx->reg_rbtree_exec;
+	else {
+		u64 same_va_end;
+
+#ifdef CONFIG_64BIT
+		if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+#endif /* CONFIG_64BIT */
+			same_va_end = KBASE_REG_ZONE_CUSTOM_VA_BASE;
+#ifdef CONFIG_64BIT
+		else
+			same_va_end = kctx->same_va_end;
+#endif /* CONFIG_64BIT */
+
+		if (gpu_pfn >= same_va_end)
+			rbtree = &kctx->reg_rbtree_custom;
+		else
+			rbtree = &kctx->reg_rbtree_same;
+	}
+
+	return rbtree;
+}
+
+/* This function inserts a region into the tree. */
+static void kbase_region_tracker_insert(struct kbase_va_region *new_reg)
+{
+	u64 start_pfn = new_reg->start_pfn;
+	struct rb_node **link = NULL;
+	struct rb_node *parent = NULL;
+	struct rb_root *rbtree = NULL;
+
+	rbtree = new_reg->rbtree;
+
+	link = &(rbtree->rb_node);
+	/* Find the right place in the tree using tree search */
+	while (*link) {
+		struct kbase_va_region *old_reg;
+
+		parent = *link;
+		old_reg = rb_entry(parent, struct kbase_va_region, rblink);
+
+		/* RBTree requires no duplicate entries. */
+		KBASE_DEBUG_ASSERT(old_reg->start_pfn != start_pfn);
+
+		if (old_reg->start_pfn > start_pfn)
+			link = &(*link)->rb_left;
+		else
+			link = &(*link)->rb_right;
+	}
+
+	/* Put the new node there, and rebalance tree */
+	rb_link_node(&(new_reg->rblink), parent, link);
+
+	rb_insert_color(&(new_reg->rblink), rbtree);
+}
+
+static struct kbase_va_region *find_region_enclosing_range_rbtree(
+		struct rb_root *rbtree, u64 start_pfn, size_t nr_pages)
+{
+	struct rb_node *rbnode;
+	struct kbase_va_region *reg;
+	u64 end_pfn = start_pfn + nr_pages;
+
+	rbnode = rbtree->rb_node;
+
+	while (rbnode) {
+		u64 tmp_start_pfn, tmp_end_pfn;
+
+		reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+		tmp_start_pfn = reg->start_pfn;
+		tmp_end_pfn = reg->start_pfn + reg->nr_pages;
+
+		/* If start is lower than this, go left. */
+		if (start_pfn < tmp_start_pfn)
+			rbnode = rbnode->rb_left;
+		/* If end is higher than this, then go right. */
+		else if (end_pfn > tmp_end_pfn)
+			rbnode = rbnode->rb_right;
+		else	/* Enclosing */
+			return reg;
+	}
+
+	return NULL;
+}
+
+struct kbase_va_region *kbase_find_region_enclosing_address(
+		struct rb_root *rbtree, u64 gpu_addr)
+{
+	u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
+	struct rb_node *rbnode;
+	struct kbase_va_region *reg;
+
+	rbnode = rbtree->rb_node;
+
+	while (rbnode) {
+		u64 tmp_start_pfn, tmp_end_pfn;
+
+		reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+		tmp_start_pfn = reg->start_pfn;
+		tmp_end_pfn = reg->start_pfn + reg->nr_pages;
+
+		/* If start is lower than this, go left. */
+		if (gpu_pfn < tmp_start_pfn)
+			rbnode = rbnode->rb_left;
+		/* If end is higher than this, then go right. */
+		else if (gpu_pfn >= tmp_end_pfn)
+			rbnode = rbnode->rb_right;
+		else	/* Enclosing */
+			return reg;
+	}
+
+	return NULL;
+}
+
+/* Find region enclosing given address. */
+struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(
+		struct kbase_context *kctx, u64 gpu_addr)
+{
+	u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
+	struct rb_root *rbtree = NULL;
+
+	KBASE_DEBUG_ASSERT(NULL != kctx);
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	rbtree = kbase_gpu_va_to_rbtree(kctx, gpu_pfn);
+
+	return kbase_find_region_enclosing_address(rbtree, gpu_addr);
+}
+
+KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_enclosing_address);
+
+struct kbase_va_region *kbase_find_region_base_address(
+		struct rb_root *rbtree, u64 gpu_addr)
+{
+	u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
+	struct rb_node *rbnode = NULL;
+	struct kbase_va_region *reg = NULL;
+
+	rbnode = rbtree->rb_node;
+
+	while (rbnode) {
+		reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+		if (reg->start_pfn > gpu_pfn)
+			rbnode = rbnode->rb_left;
+		else if (reg->start_pfn < gpu_pfn)
+			rbnode = rbnode->rb_right;
+		else
+			return reg;
+	}
+
+	return NULL;
+}
+
+/* Find region with given base address */
+struct kbase_va_region *kbase_region_tracker_find_region_base_address(
+		struct kbase_context *kctx, u64 gpu_addr)
+{
+	u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
+	struct rb_root *rbtree = NULL;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	rbtree = kbase_gpu_va_to_rbtree(kctx, gpu_pfn);
+
+	return kbase_find_region_base_address(rbtree, gpu_addr);
+}
+
+KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_base_address);
+
+/* Find region meeting given requirements */
+static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(
+		struct kbase_va_region *reg_reqs,
+		size_t nr_pages, size_t align_offset, size_t align_mask,
+		u64 *out_start_pfn)
+{
+	struct rb_node *rbnode = NULL;
+	struct kbase_va_region *reg = NULL;
+	struct rb_root *rbtree = NULL;
+
+	/* Note that this search is a linear search, as we do not have a target
+	   address in mind, so does not benefit from the rbtree search */
+	rbtree = reg_reqs->rbtree;
+
+	for (rbnode = rb_first(rbtree); rbnode; rbnode = rb_next(rbnode)) {
+		reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+		if ((reg->nr_pages >= nr_pages) &&
+				(reg->flags & KBASE_REG_FREE)) {
+			/* Check alignment */
+			u64 start_pfn = reg->start_pfn;
+
+			/* When align_offset == align, this sequence is
+			 * equivalent to:
+			 *   (start_pfn + align_mask) & ~(align_mask)
+			 *
+			 * Otherwise, it aligns to n*align + offset, for the
+			 * lowest value n that makes this still >start_pfn */
+			start_pfn += align_mask;
+			start_pfn -= (start_pfn - align_offset) & (align_mask);
+
+			if (!(reg_reqs->flags & KBASE_REG_GPU_NX)) {
+				/* Can't end at 4GB boundary */
+				if (0 == ((start_pfn + nr_pages) & BASE_MEM_PFN_MASK_4GB))
+					start_pfn += align_offset;
+
+				/* Can't start at 4GB boundary */
+				if (0 == (start_pfn & BASE_MEM_PFN_MASK_4GB))
+					start_pfn += align_offset;
+
+				if (!((start_pfn + nr_pages) & BASE_MEM_PFN_MASK_4GB) ||
+				    !(start_pfn & BASE_MEM_PFN_MASK_4GB))
+					continue;
+			} else if (reg_reqs->flags &
+					KBASE_REG_GPU_VA_SAME_4GB_PAGE) {
+				u64 end_pfn = start_pfn + nr_pages - 1;
+
+				if ((start_pfn & ~BASE_MEM_PFN_MASK_4GB) !=
+				    (end_pfn & ~BASE_MEM_PFN_MASK_4GB))
+					start_pfn = end_pfn & ~BASE_MEM_PFN_MASK_4GB;
+			}
+
+			if ((start_pfn >= reg->start_pfn) &&
+					(start_pfn <= (reg->start_pfn + reg->nr_pages - 1)) &&
+					((start_pfn + nr_pages - 1) <= (reg->start_pfn + reg->nr_pages - 1))) {
+				*out_start_pfn = start_pfn;
+				return reg;
+			}
+		}
+	}
+
+	return NULL;
+}
+
+/**
+ * @brief Remove a region object from the global list.
+ *
+ * The region reg is removed, possibly by merging with other free and
+ * compatible adjacent regions.  It must be called with the context
+ * region lock held. The associated memory is not released (see
+ * kbase_free_alloced_region). Internal use only.
+ */
+int kbase_remove_va_region(struct kbase_va_region *reg)
+{
+	struct rb_node *rbprev;
+	struct kbase_va_region *prev = NULL;
+	struct rb_node *rbnext;
+	struct kbase_va_region *next = NULL;
+	struct rb_root *reg_rbtree = NULL;
+
+	int merged_front = 0;
+	int merged_back = 0;
+	int err = 0;
+
+	reg_rbtree = reg->rbtree;
+
+	/* Try to merge with the previous block first */
+	rbprev = rb_prev(&(reg->rblink));
+	if (rbprev) {
+		prev = rb_entry(rbprev, struct kbase_va_region, rblink);
+		if (prev->flags & KBASE_REG_FREE) {
+			/* We're compatible with the previous VMA,
+			 * merge with it */
+			WARN_ON((prev->flags & KBASE_REG_ZONE_MASK) !=
+					    (reg->flags & KBASE_REG_ZONE_MASK));
+			prev->nr_pages += reg->nr_pages;
+			rb_erase(&(reg->rblink), reg_rbtree);
+			reg = prev;
+			merged_front = 1;
+		}
+	}
+
+	/* Try to merge with the next block second */
+	/* Note we do the lookup here as the tree may have been rebalanced. */
+	rbnext = rb_next(&(reg->rblink));
+	if (rbnext) {
+		/* We're compatible with the next VMA, merge with it */
+		next = rb_entry(rbnext, struct kbase_va_region, rblink);
+		if (next->flags & KBASE_REG_FREE) {
+			WARN_ON((next->flags & KBASE_REG_ZONE_MASK) !=
+					    (reg->flags & KBASE_REG_ZONE_MASK));
+			next->start_pfn = reg->start_pfn;
+			next->nr_pages += reg->nr_pages;
+			rb_erase(&(reg->rblink), reg_rbtree);
+			merged_back = 1;
+			if (merged_front) {
+				/* We already merged with prev, free it */
+				kfree(reg);
+			}
+		}
+	}
+
+	/* If we failed to merge then we need to add a new block */
+	if (!(merged_front || merged_back)) {
+		/*
+		 * We didn't merge anything. Add a new free
+		 * placeholder and remove the original one.
+		 */
+		struct kbase_va_region *free_reg;
+
+		free_reg = kbase_alloc_free_region(reg_rbtree,
+				reg->start_pfn, reg->nr_pages,
+				reg->flags & KBASE_REG_ZONE_MASK);
+		if (!free_reg) {
+			err = -ENOMEM;
+			goto out;
+		}
+		rb_replace_node(&(reg->rblink), &(free_reg->rblink), reg_rbtree);
+	}
+
+ out:
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_remove_va_region);
+
+/**
+ * kbase_insert_va_region_nolock - Insert a VA region to the list,
+ * replacing the existing one.
+ *
+ * @new_reg: The new region to insert
+ * @at_reg: The region to replace
+ * @start_pfn: The Page Frame Number to insert at
+ * @nr_pages: The number of pages of the region
+ */
+static int kbase_insert_va_region_nolock(struct kbase_va_region *new_reg,
+		struct kbase_va_region *at_reg, u64 start_pfn, size_t nr_pages)
+{
+	struct rb_root *reg_rbtree = NULL;
+	int err = 0;
+
+	reg_rbtree = at_reg->rbtree;
+
+	/* Must be a free region */
+	KBASE_DEBUG_ASSERT((at_reg->flags & KBASE_REG_FREE) != 0);
+	/* start_pfn should be contained within at_reg */
+	KBASE_DEBUG_ASSERT((start_pfn >= at_reg->start_pfn) && (start_pfn < at_reg->start_pfn + at_reg->nr_pages));
+	/* at least nr_pages from start_pfn should be contained within at_reg */
+	KBASE_DEBUG_ASSERT(start_pfn + nr_pages <= at_reg->start_pfn + at_reg->nr_pages);
+
+	new_reg->start_pfn = start_pfn;
+	new_reg->nr_pages = nr_pages;
+
+	/* Regions are a whole use, so swap and delete old one. */
+	if (at_reg->start_pfn == start_pfn && at_reg->nr_pages == nr_pages) {
+		rb_replace_node(&(at_reg->rblink), &(new_reg->rblink),
+								reg_rbtree);
+		kfree(at_reg);
+	}
+	/* New region replaces the start of the old one, so insert before. */
+	else if (at_reg->start_pfn == start_pfn) {
+		at_reg->start_pfn += nr_pages;
+		KBASE_DEBUG_ASSERT(at_reg->nr_pages >= nr_pages);
+		at_reg->nr_pages -= nr_pages;
+
+		kbase_region_tracker_insert(new_reg);
+	}
+	/* New region replaces the end of the old one, so insert after. */
+	else if ((at_reg->start_pfn + at_reg->nr_pages) == (start_pfn + nr_pages)) {
+		at_reg->nr_pages -= nr_pages;
+
+		kbase_region_tracker_insert(new_reg);
+	}
+	/* New region splits the old one, so insert and create new */
+	else {
+		struct kbase_va_region *new_front_reg;
+
+		new_front_reg = kbase_alloc_free_region(reg_rbtree,
+				at_reg->start_pfn,
+				start_pfn - at_reg->start_pfn,
+				at_reg->flags & KBASE_REG_ZONE_MASK);
+
+		if (new_front_reg) {
+			at_reg->nr_pages -= nr_pages + new_front_reg->nr_pages;
+			at_reg->start_pfn = start_pfn + nr_pages;
+
+			kbase_region_tracker_insert(new_front_reg);
+			kbase_region_tracker_insert(new_reg);
+		} else {
+			err = -ENOMEM;
+		}
+	}
+
+	return err;
+}
+
+/**
+ * kbase_add_va_region - Add a VA region to the region list for a context.
+ *
+ * @kctx: kbase context containing the region
+ * @reg: the region to add
+ * @addr: the address to insert the region at
+ * @nr_pages: the number of pages in the region
+ * @align: the minimum alignment in pages
+ */
+int kbase_add_va_region(struct kbase_context *kctx,
+		struct kbase_va_region *reg, u64 addr,
+		size_t nr_pages, size_t align)
+{
+	int err = 0;
+	struct kbase_device *kbdev = kctx->kbdev;
+	int cpu_va_bits = kbase_get_num_cpu_va_bits(kctx);
+	int gpu_pc_bits =
+		kbdev->gpu_props.props.core_props.log2_program_counter_size;
+
+	KBASE_DEBUG_ASSERT(NULL != kctx);
+	KBASE_DEBUG_ASSERT(NULL != reg);
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	/* The executable allocation from the SAME_VA zone would already have an
+	 * appropriately aligned GPU VA chosen for it.
+	 * Also the executable allocation from EXEC_VA zone doesn't need the
+	 * special alignment.
+	 */
+	if (!(reg->flags & KBASE_REG_GPU_NX) && !addr &&
+	    ((reg->flags & KBASE_REG_ZONE_MASK) != KBASE_REG_ZONE_EXEC_VA)) {
+		if (cpu_va_bits > gpu_pc_bits) {
+			align = max(align, (size_t)((1ULL << gpu_pc_bits)
+						>> PAGE_SHIFT));
+		}
+	}
+
+	do {
+		err = kbase_add_va_region_rbtree(kbdev, reg, addr, nr_pages,
+				align);
+		if (err != -ENOMEM)
+			break;
+
+		/*
+		 * If the allocation is not from the same zone as JIT
+		 * then don't retry, we're out of VA and there is
+		 * nothing which can be done about it.
+		 */
+		if ((reg->flags & KBASE_REG_ZONE_MASK) !=
+				KBASE_REG_ZONE_CUSTOM_VA)
+			break;
+	} while (kbase_jit_evict(kctx));
+
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_add_va_region);
+
+/**
+ * kbase_add_va_region_rbtree - Insert a region into its corresponding rbtree
+ *
+ * Insert a region into the rbtree that was specified when the region was
+ * created. If addr is 0 a free area in the rbtree is used, otherwise the
+ * specified address is used.
+ *
+ * @kbdev: The kbase device
+ * @reg: The region to add
+ * @addr: The address to add the region at, or 0 to map at any available address
+ * @nr_pages: The size of the region in pages
+ * @align: The minimum alignment in pages
+ */
+int kbase_add_va_region_rbtree(struct kbase_device *kbdev,
+		struct kbase_va_region *reg,
+		u64 addr, size_t nr_pages, size_t align)
+{
+	struct device *const dev = kbdev->dev;
+	struct rb_root *rbtree = NULL;
+	struct kbase_va_region *tmp;
+	u64 gpu_pfn = addr >> PAGE_SHIFT;
+	int err = 0;
+
+	rbtree = reg->rbtree;
+
+	if (!align)
+		align = 1;
+
+	/* must be a power of 2 */
+	KBASE_DEBUG_ASSERT(is_power_of_2(align));
+	KBASE_DEBUG_ASSERT(nr_pages > 0);
+
+	/* Path 1: Map a specific address. Find the enclosing region,
+	 * which *must* be free.
+	 */
+	if (gpu_pfn) {
+		KBASE_DEBUG_ASSERT(!(gpu_pfn & (align - 1)));
+
+		tmp = find_region_enclosing_range_rbtree(rbtree, gpu_pfn,
+				nr_pages);
+		if (kbase_is_region_invalid(tmp)) {
+			dev_warn(dev, "Enclosing region not found or invalid: 0x%08llx gpu_pfn, %zu nr_pages", gpu_pfn, nr_pages);
+			err = -ENOMEM;
+			goto exit;
+		} else if (!kbase_is_region_free(tmp)) {
+			dev_warn(dev, "!(tmp->flags & KBASE_REG_FREE): tmp->start_pfn=0x%llx tmp->flags=0x%lx tmp->nr_pages=0x%zx gpu_pfn=0x%llx nr_pages=0x%zx\n",
+					tmp->start_pfn, tmp->flags,
+					tmp->nr_pages, gpu_pfn, nr_pages);
+			err = -ENOMEM;
+			goto exit;
+		}
+
+		err = kbase_insert_va_region_nolock(reg, tmp, gpu_pfn,
+				nr_pages);
+		if (err) {
+			dev_warn(dev, "Failed to insert va region");
+			err = -ENOMEM;
+		}
+	} else {
+		/* Path 2: Map any free address which meets the requirements. */
+		u64 start_pfn;
+		size_t align_offset = align;
+		size_t align_mask = align - 1;
+
+		if ((reg->flags & KBASE_REG_TILER_ALIGN_TOP)) {
+			WARN(align > 1, "%s with align %lx might not be honored for KBASE_REG_TILER_ALIGN_TOP memory",
+					__func__,
+					(unsigned long)align);
+			align_mask  = reg->extent - 1;
+			align_offset = reg->extent - reg->initial_commit;
+		}
+
+		tmp = kbase_region_tracker_find_region_meeting_reqs(reg,
+				nr_pages, align_offset, align_mask,
+				&start_pfn);
+		if (tmp) {
+			err = kbase_insert_va_region_nolock(reg, tmp,
+							start_pfn, nr_pages);
+			if (unlikely(err)) {
+				dev_warn(dev, "Failed to insert region: 0x%08llx start_pfn, %zu nr_pages",
+					start_pfn, nr_pages);
+			}
+		} else {
+			dev_dbg(dev, "Failed to find a suitable region: %zu nr_pages, %zu align_offset, %zu align_mask\n",
+				nr_pages, align_offset, align_mask);
+			err = -ENOMEM;
+		}
+	}
+
+exit:
+	return err;
+}
+
+/**
+ * @brief Initialize the internal region tracker data structure.
+ */
+static void kbase_region_tracker_ds_init(struct kbase_context *kctx,
+		struct kbase_va_region *same_va_reg,
+		struct kbase_va_region *custom_va_reg)
+{
+	kctx->reg_rbtree_same = RB_ROOT;
+	kbase_region_tracker_insert(same_va_reg);
+
+	/* Although custom_va_reg and exec_va_reg don't always exist,
+	 * initialize unconditionally because of the mem_view debugfs
+	 * implementation which relies on them being empty.
+	 *
+	 * The difference between the two is that the EXEC_VA region
+	 * is never initialized at this stage.
+	 */
+	kctx->reg_rbtree_custom = RB_ROOT;
+	kctx->reg_rbtree_exec = RB_ROOT;
+
+	if (custom_va_reg)
+		kbase_region_tracker_insert(custom_va_reg);
+}
+
+static void kbase_region_tracker_erase_rbtree(struct rb_root *rbtree)
+{
+	struct rb_node *rbnode;
+	struct kbase_va_region *reg;
+
+	do {
+		rbnode = rb_first(rbtree);
+		if (rbnode) {
+			rb_erase(rbnode, rbtree);
+			reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+			WARN_ON(reg->va_refcnt != 1);
+			/* Reset the start_pfn - as the rbtree is being
+			 * destroyed and we've already erased this region, there
+			 * is no further need to attempt to remove it.
+			 * This won't affect the cleanup if the region was
+			 * being used as a sticky resource as the cleanup
+			 * related to sticky resources anyways need to be
+			 * performed before the term of region tracker.
+			 */
+			reg->start_pfn = 0;
+			kbase_free_alloced_region(reg);
+		}
+	} while (rbnode);
+}
+
+void kbase_region_tracker_term(struct kbase_context *kctx)
+{
+	kbase_region_tracker_erase_rbtree(&kctx->reg_rbtree_same);
+	kbase_region_tracker_erase_rbtree(&kctx->reg_rbtree_custom);
+	kbase_region_tracker_erase_rbtree(&kctx->reg_rbtree_exec);
+}
+
+void kbase_region_tracker_term_rbtree(struct rb_root *rbtree)
+{
+	kbase_region_tracker_erase_rbtree(rbtree);
+}
+
+static size_t kbase_get_same_va_bits(struct kbase_context *kctx)
+{
+	return min(kbase_get_num_cpu_va_bits(kctx),
+			(size_t) kctx->kbdev->gpu_props.mmu.va_bits);
+}
+
+int kbase_region_tracker_init(struct kbase_context *kctx)
+{
+	struct kbase_va_region *same_va_reg;
+	struct kbase_va_region *custom_va_reg = NULL;
+	size_t same_va_bits = kbase_get_same_va_bits(kctx);
+	u64 custom_va_size = KBASE_REG_ZONE_CUSTOM_VA_SIZE;
+	u64 gpu_va_limit = (1ULL << kctx->kbdev->gpu_props.mmu.va_bits) >> PAGE_SHIFT;
+	u64 same_va_pages;
+	int err;
+
+	/* Take the lock as kbase_free_alloced_region requires it */
+	kbase_gpu_vm_lock(kctx);
+
+	same_va_pages = (1ULL << (same_va_bits - PAGE_SHIFT)) - 1;
+	/* all have SAME_VA */
+	same_va_reg = kbase_alloc_free_region(&kctx->reg_rbtree_same, 1,
+			same_va_pages,
+			KBASE_REG_ZONE_SAME_VA);
+
+	if (!same_va_reg) {
+		err = -ENOMEM;
+		goto fail_unlock;
+	}
+
+#ifdef CONFIG_64BIT
+	/* 32-bit clients have custom VA zones */
+	if (kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+#endif
+		if (gpu_va_limit <= KBASE_REG_ZONE_CUSTOM_VA_BASE) {
+			err = -EINVAL;
+			goto fail_free_same_va;
+		}
+		/* If the current size of TMEM is out of range of the
+		 * virtual address space addressable by the MMU then
+		 * we should shrink it to fit
+		 */
+		if ((KBASE_REG_ZONE_CUSTOM_VA_BASE + KBASE_REG_ZONE_CUSTOM_VA_SIZE) >= gpu_va_limit)
+			custom_va_size = gpu_va_limit - KBASE_REG_ZONE_CUSTOM_VA_BASE;
+
+		custom_va_reg = kbase_alloc_free_region(
+				&kctx->reg_rbtree_custom,
+				KBASE_REG_ZONE_CUSTOM_VA_BASE,
+				custom_va_size, KBASE_REG_ZONE_CUSTOM_VA);
+
+		if (!custom_va_reg) {
+			err = -ENOMEM;
+			goto fail_free_same_va;
+		}
+#ifdef CONFIG_64BIT
+	} else {
+		custom_va_size = 0;
+	}
+#endif
+
+	kbase_region_tracker_ds_init(kctx, same_va_reg, custom_va_reg);
+
+	kctx->same_va_end = same_va_pages + 1;
+	kctx->gpu_va_end = kctx->same_va_end + custom_va_size;
+	kctx->exec_va_start = U64_MAX;
+	kctx->jit_va = false;
+
+
+	kbase_gpu_vm_unlock(kctx);
+	return 0;
+
+fail_free_same_va:
+	kbase_free_alloced_region(same_va_reg);
+fail_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	return err;
+}
+
+#ifdef CONFIG_64BIT
+static int kbase_region_tracker_init_jit_64(struct kbase_context *kctx,
+		u64 jit_va_pages)
+{
+	struct kbase_va_region *same_va;
+	struct kbase_va_region *custom_va_reg;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	/* First verify that a JIT_VA zone has not been created already. */
+	if (kctx->jit_va)
+		return -EINVAL;
+
+	/*
+	 * Modify the same VA free region after creation. Be careful to ensure
+	 * that allocations haven't been made as they could cause an overlap
+	 * to happen with existing same VA allocations and the custom VA zone.
+	 */
+	same_va = kbase_region_tracker_find_region_base_address(kctx,
+			PAGE_SIZE);
+	if (!same_va)
+		return -ENOMEM;
+
+	if (same_va->nr_pages < jit_va_pages || kctx->same_va_end < jit_va_pages)
+		return -ENOMEM;
+
+	/* It's safe to adjust the same VA zone now */
+	same_va->nr_pages -= jit_va_pages;
+	kctx->same_va_end -= jit_va_pages;
+
+	/*
+	 * Create a custom VA zone at the end of the VA for allocations which
+	 * JIT can use so it doesn't have to allocate VA from the kernel.
+	 */
+	custom_va_reg = kbase_alloc_free_region(&kctx->reg_rbtree_custom,
+				kctx->same_va_end,
+				jit_va_pages,
+				KBASE_REG_ZONE_CUSTOM_VA);
+
+	/*
+	 * The context will be destroyed if we fail here so no point
+	 * reverting the change we made to same_va.
+	 */
+	if (!custom_va_reg)
+		return -ENOMEM;
+
+	kbase_region_tracker_insert(custom_va_reg);
+	return 0;
+}
+#endif
+
+int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages,
+		u8 max_allocations, u8 trim_level, int group_id)
+{
+	int err = 0;
+
+	if (trim_level > 100)
+		return -EINVAL;
+
+	if (WARN_ON(group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS) ||
+		WARN_ON(group_id < 0))
+		return -EINVAL;
+
+	kbase_gpu_vm_lock(kctx);
+
+#ifdef CONFIG_64BIT
+	if (!kbase_ctx_flag(kctx, KCTX_COMPAT))
+		err = kbase_region_tracker_init_jit_64(kctx, jit_va_pages);
+#endif
+	/*
+	 * Nothing to do for 32-bit clients, JIT uses the existing
+	 * custom VA zone.
+	 */
+
+	if (!err) {
+		kctx->jit_max_allocations = max_allocations;
+		kctx->trim_level = trim_level;
+		kctx->jit_va = true;
+		kctx->jit_group_id = group_id;
+	}
+
+	kbase_gpu_vm_unlock(kctx);
+
+	return err;
+}
+
+int kbase_region_tracker_init_exec(struct kbase_context *kctx, u64 exec_va_pages)
+{
+	struct kbase_va_region *shrinking_va_reg;
+	struct kbase_va_region *exec_va_reg;
+	u64 exec_va_start, exec_va_base_addr;
+	int err;
+
+	/* The EXEC_VA zone shall be created by making space at the end of the
+	 * address space. Firstly, verify that the number of EXEC_VA pages
+	 * requested by the client is reasonable and then make sure that it is
+	 * not greater than the address space itself before calculating the base
+	 * address of the new zone.
+	 */
+	if (exec_va_pages == 0 || exec_va_pages > KBASE_REG_ZONE_EXEC_VA_MAX_PAGES)
+		return -EINVAL;
+
+	kbase_gpu_vm_lock(kctx);
+
+	/* First verify that a JIT_VA zone has not been created already. */
+	if (kctx->jit_va) {
+		err = -EPERM;
+		goto exit_unlock;
+	}
+
+	if (exec_va_pages > kctx->gpu_va_end) {
+		err = -ENOMEM;
+		goto exit_unlock;
+	}
+
+	exec_va_start = kctx->gpu_va_end - exec_va_pages;
+	exec_va_base_addr = exec_va_start << PAGE_SHIFT;
+
+	shrinking_va_reg = kbase_region_tracker_find_region_enclosing_address(kctx,
+			exec_va_base_addr);
+	if (!shrinking_va_reg) {
+		err = -ENOMEM;
+		goto exit_unlock;
+	}
+
+	/* Make sure that the EXEC_VA region is still uninitialized */
+	if ((shrinking_va_reg->flags & KBASE_REG_ZONE_MASK) ==
+			KBASE_REG_ZONE_EXEC_VA) {
+		err = -EPERM;
+		goto exit_unlock;
+	}
+
+	if (shrinking_va_reg->nr_pages <= exec_va_pages) {
+		err = -ENOMEM;
+		goto exit_unlock;
+	}
+
+	exec_va_reg = kbase_alloc_free_region(&kctx->reg_rbtree_exec,
+			exec_va_start,
+			exec_va_pages,
+			KBASE_REG_ZONE_EXEC_VA);
+	if (!exec_va_reg) {
+		err = -ENOMEM;
+		goto exit_unlock;
+	}
+
+	shrinking_va_reg->nr_pages -= exec_va_pages;
+#ifdef CONFIG_64BIT
+	if (!kbase_ctx_flag(kctx, KCTX_COMPAT))
+		kctx->same_va_end -= exec_va_pages;
+#endif
+	kctx->exec_va_start = exec_va_start;
+
+	kbase_region_tracker_insert(exec_va_reg);
+	err = 0;
+
+exit_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	return err;
+}
+
+
+int kbase_mem_init(struct kbase_device *kbdev)
+{
+	int err = 0;
+	struct kbasep_mem_device *memdev;
+#ifdef CONFIG_OF
+	struct device_node *mgm_node = NULL;
+#endif
+
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	memdev = &kbdev->memdev;
+
+	kbase_mem_pool_group_config_set_max_size(&kbdev->mem_pool_defaults,
+		KBASE_MEM_POOL_MAX_SIZE_KCTX);
+
+	/* Initialize memory usage */
+	atomic_set(&memdev->used_pages, 0);
+
+	kbdev->mgm_dev = &kbase_native_mgm_dev;
+
+#ifdef CONFIG_OF
+	/* Check to see whether or not a platform-specific memory group manager
+	 * is configured and available.
+	 */
+	mgm_node = of_parse_phandle(kbdev->dev->of_node,
+		"physical-memory-group-manager", 0);
+	if (!mgm_node) {
+		dev_info(kbdev->dev,
+			"No memory group manager is configured\n");
+	} else {
+		struct platform_device *const pdev =
+			of_find_device_by_node(mgm_node);
+
+		if (!pdev) {
+			dev_err(kbdev->dev,
+				"The configured memory group manager was not found\n");
+		} else {
+			kbdev->mgm_dev = platform_get_drvdata(pdev);
+			if (!kbdev->mgm_dev) {
+				dev_info(kbdev->dev,
+					"Memory group manager is not ready\n");
+				err = -EPROBE_DEFER;
+			} else if (!try_module_get(kbdev->mgm_dev->owner)) {
+				dev_err(kbdev->dev,
+					"Failed to get memory group manger module\n");
+				err = -ENODEV;
+				kbdev->mgm_dev = NULL;
+			}
+		}
+		of_node_put(mgm_node);
+	}
+#endif
+
+	if (likely(!err)) {
+		struct kbase_mem_pool_group_config mem_pool_defaults;
+
+		kbase_mem_pool_group_config_set_max_size(&mem_pool_defaults,
+			KBASE_MEM_POOL_MAX_SIZE_KBDEV);
+
+		err = kbase_mem_pool_group_init(&kbdev->mem_pools, kbdev,
+			&mem_pool_defaults, NULL);
+	}
+
+	return err;
+}
+
+void kbase_mem_halt(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+
+void kbase_mem_term(struct kbase_device *kbdev)
+{
+	struct kbasep_mem_device *memdev;
+	int pages;
+
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	memdev = &kbdev->memdev;
+
+	pages = atomic_read(&memdev->used_pages);
+	if (pages != 0)
+		dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);
+
+	kbase_mem_pool_group_term(&kbdev->mem_pools);
+
+	if (kbdev->mgm_dev)
+		module_put(kbdev->mgm_dev->owner);
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_term);
+
+
+
+
+/**
+ * @brief Allocate a free region object.
+ *
+ * The allocated object is not part of any list yet, and is flagged as
+ * KBASE_REG_FREE. No mapping is allocated yet.
+ *
+ * zone is KBASE_REG_ZONE_CUSTOM_VA or KBASE_REG_ZONE_SAME_VA.
+ *
+ */
+struct kbase_va_region *kbase_alloc_free_region(struct rb_root *rbtree,
+		u64 start_pfn, size_t nr_pages, int zone)
+{
+	struct kbase_va_region *new_reg;
+
+	KBASE_DEBUG_ASSERT(rbtree != NULL);
+
+	/* zone argument should only contain zone related region flags */
+	KBASE_DEBUG_ASSERT((zone & ~KBASE_REG_ZONE_MASK) == 0);
+	KBASE_DEBUG_ASSERT(nr_pages > 0);
+	/* 64-bit address range is the max */
+	KBASE_DEBUG_ASSERT(start_pfn + nr_pages <= (U64_MAX / PAGE_SIZE));
+
+	new_reg = kzalloc(sizeof(*new_reg), GFP_KERNEL);
+
+	if (!new_reg)
+		return NULL;
+
+	new_reg->va_refcnt = 1;
+	new_reg->cpu_alloc = NULL; /* no alloc bound yet */
+	new_reg->gpu_alloc = NULL; /* no alloc bound yet */
+	new_reg->rbtree = rbtree;
+	new_reg->flags = zone | KBASE_REG_FREE;
+
+	new_reg->flags |= KBASE_REG_GROWABLE;
+
+	new_reg->start_pfn = start_pfn;
+	new_reg->nr_pages = nr_pages;
+
+	INIT_LIST_HEAD(&new_reg->jit_node);
+
+	return new_reg;
+}
+
+KBASE_EXPORT_TEST_API(kbase_alloc_free_region);
+
+static struct kbase_context *kbase_reg_flags_to_kctx(
+		struct kbase_va_region *reg)
+{
+	struct kbase_context *kctx = NULL;
+	struct rb_root *rbtree = reg->rbtree;
+
+	switch (reg->flags & KBASE_REG_ZONE_MASK) {
+	case KBASE_REG_ZONE_CUSTOM_VA:
+		kctx = container_of(rbtree, struct kbase_context,
+				reg_rbtree_custom);
+		break;
+	case KBASE_REG_ZONE_SAME_VA:
+		kctx = container_of(rbtree, struct kbase_context,
+				reg_rbtree_same);
+		break;
+	case KBASE_REG_ZONE_EXEC_VA:
+		kctx = container_of(rbtree, struct kbase_context,
+				reg_rbtree_exec);
+		break;
+	default:
+		WARN(1, "Unknown zone in region: flags=0x%lx\n", reg->flags);
+		break;
+	}
+
+	return kctx;
+}
+
+/**
+ * @brief Free a region object.
+ *
+ * The described region must be freed of any mapping.
+ *
+ * If the region is not flagged as KBASE_REG_FREE, the region's
+ * alloc object will be released.
+ * It is a bug if no alloc object exists for non-free regions.
+ *
+ */
+void kbase_free_alloced_region(struct kbase_va_region *reg)
+{
+	if (!(reg->flags & KBASE_REG_FREE)) {
+		struct kbase_context *kctx = kbase_reg_flags_to_kctx(reg);
+
+		if (WARN_ON(!kctx))
+			return;
+
+		if (WARN_ON(kbase_is_region_invalid(reg)))
+			return;
+
+
+		mutex_lock(&kctx->jit_evict_lock);
+
+		/*
+		 * The physical allocation should have been removed from the
+		 * eviction list before this function is called. However, in the
+		 * case of abnormal process termination or the app leaking the
+		 * memory kbase_mem_free_region is not called so it can still be
+		 * on the list at termination time of the region tracker.
+		 */
+		if (!list_empty(&reg->gpu_alloc->evict_node)) {
+			mutex_unlock(&kctx->jit_evict_lock);
+
+			/*
+			 * Unlink the physical allocation before unmaking it
+			 * evictable so that the allocation isn't grown back to
+			 * its last backed size as we're going to unmap it
+			 * anyway.
+			 */
+			reg->cpu_alloc->reg = NULL;
+			if (reg->cpu_alloc != reg->gpu_alloc)
+				reg->gpu_alloc->reg = NULL;
+
+			/*
+			 * If a region has been made evictable then we must
+			 * unmake it before trying to free it.
+			 * If the memory hasn't been reclaimed it will be
+			 * unmapped and freed below, if it has been reclaimed
+			 * then the operations below are no-ops.
+			 */
+			if (reg->flags & KBASE_REG_DONT_NEED) {
+				KBASE_DEBUG_ASSERT(reg->cpu_alloc->type ==
+						   KBASE_MEM_TYPE_NATIVE);
+				kbase_mem_evictable_unmake(reg->gpu_alloc);
+			}
+		} else {
+			mutex_unlock(&kctx->jit_evict_lock);
+		}
+
+		/*
+		 * Remove the region from the sticky resource metadata
+		 * list should it be there.
+		 */
+		kbase_sticky_resource_release(kctx, NULL,
+				reg->start_pfn << PAGE_SHIFT);
+
+		kbase_mem_phy_alloc_put(reg->cpu_alloc);
+		kbase_mem_phy_alloc_put(reg->gpu_alloc);
+
+		reg->flags |= KBASE_REG_VA_FREED;
+		kbase_va_region_alloc_put(kctx, reg);
+	} else {
+		kfree(reg);
+	}
+}
+
+KBASE_EXPORT_TEST_API(kbase_free_alloced_region);
+
+int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align)
+{
+	int err;
+	size_t i = 0;
+	unsigned long attr;
+	unsigned long mask = ~KBASE_REG_MEMATTR_MASK;
+	unsigned long gwt_mask = ~0;
+	int group_id;
+	struct kbase_mem_phy_alloc *alloc;
+
+#ifdef CONFIG_MALI_CINSTR_GWT
+	if (kctx->gwt_enabled)
+		gwt_mask = ~KBASE_REG_GPU_WR;
+#endif
+
+	if ((kctx->kbdev->system_coherency == COHERENCY_ACE) &&
+		(reg->flags & KBASE_REG_SHARE_BOTH))
+		attr = KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_OUTER_WA);
+	else
+		attr = KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_WRITE_ALLOC);
+
+	KBASE_DEBUG_ASSERT(NULL != kctx);
+	KBASE_DEBUG_ASSERT(NULL != reg);
+
+	err = kbase_add_va_region(kctx, reg, addr, nr_pages, align);
+	if (err)
+		return err;
+
+	alloc = reg->gpu_alloc;
+	group_id = alloc->group_id;
+
+	if (reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
+		u64 const stride = alloc->imported.alias.stride;
+
+		KBASE_DEBUG_ASSERT(alloc->imported.alias.aliased);
+		for (i = 0; i < alloc->imported.alias.nents; i++) {
+			if (alloc->imported.alias.aliased[i].alloc) {
+				err = kbase_mmu_insert_pages(kctx->kbdev,
+						&kctx->mmu,
+						reg->start_pfn + (i * stride),
+						alloc->imported.alias.aliased[i].alloc->pages + alloc->imported.alias.aliased[i].offset,
+						alloc->imported.alias.aliased[i].length,
+						reg->flags & gwt_mask,
+						kctx->as_nr,
+						group_id);
+				if (err)
+					goto bad_insert;
+
+				kbase_mem_phy_alloc_gpu_mapped(alloc->imported.alias.aliased[i].alloc);
+			} else {
+				err = kbase_mmu_insert_single_page(kctx,
+					reg->start_pfn + i * stride,
+					kctx->aliasing_sink_page,
+					alloc->imported.alias.aliased[i].length,
+					(reg->flags & mask & gwt_mask) | attr,
+					group_id);
+
+				if (err)
+					goto bad_insert;
+			}
+		}
+	} else {
+		err = kbase_mmu_insert_pages(kctx->kbdev,
+				&kctx->mmu,
+				reg->start_pfn,
+				kbase_get_gpu_phy_pages(reg),
+				kbase_reg_current_backed_size(reg),
+				reg->flags & gwt_mask,
+				kctx->as_nr,
+				group_id);
+		if (err)
+			goto bad_insert;
+		kbase_mem_phy_alloc_gpu_mapped(alloc);
+	}
+
+	if (reg->flags & KBASE_REG_IMPORT_PAD &&
+	    !WARN_ON(reg->nr_pages < reg->gpu_alloc->nents) &&
+	    reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM &&
+	    reg->gpu_alloc->imported.umm.current_mapping_usage_count) {
+		/* For padded imported dma-buf memory, map the dummy aliasing
+		 * page from the end of the dma-buf pages, to the end of the
+		 * region using a read only mapping.
+		 *
+		 * Only map when it's imported dma-buf memory that is currently
+		 * mapped.
+		 *
+		 * Assume reg->gpu_alloc->nents is the number of actual pages
+		 * in the dma-buf memory.
+		 */
+		err = kbase_mmu_insert_single_page(kctx,
+				reg->start_pfn + reg->gpu_alloc->nents,
+				kctx->aliasing_sink_page,
+				reg->nr_pages - reg->gpu_alloc->nents,
+				(reg->flags | KBASE_REG_GPU_RD) &
+				~KBASE_REG_GPU_WR,
+				KBASE_MEM_GROUP_SINK);
+		if (err)
+			goto bad_insert;
+	}
+
+	return err;
+
+bad_insert:
+	kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu,
+				 reg->start_pfn, reg->nr_pages,
+				 kctx->as_nr);
+
+	if (alloc->type == KBASE_MEM_TYPE_ALIAS) {
+		KBASE_DEBUG_ASSERT(alloc->imported.alias.aliased);
+		while (i--)
+			if (alloc->imported.alias.aliased[i].alloc)
+				kbase_mem_phy_alloc_gpu_unmapped(alloc->imported.alias.aliased[i].alloc);
+	}
+
+	kbase_remove_va_region(reg);
+
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_gpu_mmap);
+
+static void kbase_jd_user_buf_unmap(struct kbase_context *kctx,
+		struct kbase_mem_phy_alloc *alloc, bool writeable);
+
+int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+	int err = 0;
+	size_t i;
+
+	if (reg->start_pfn == 0)
+		return 0;
+
+	if (!reg->gpu_alloc)
+		return -EINVAL;
+
+	/* Tear down down GPU page tables, depending on memory type. */
+	switch (reg->gpu_alloc->type) {
+	case KBASE_MEM_TYPE_ALIAS: /* Fall-through */
+	case KBASE_MEM_TYPE_IMPORTED_UMM:
+		err = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu,
+				reg->start_pfn, reg->nr_pages, kctx->as_nr);
+		break;
+	default:
+		err = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu,
+			reg->start_pfn, kbase_reg_current_backed_size(reg),
+			kctx->as_nr);
+		break;
+	}
+
+	/* Update tracking, and other cleanup, depending on memory type. */
+	switch (reg->gpu_alloc->type) {
+	case KBASE_MEM_TYPE_ALIAS:
+		KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased);
+		for (i = 0; i < reg->gpu_alloc->imported.alias.nents; i++)
+			if (reg->gpu_alloc->imported.alias.aliased[i].alloc)
+				kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
+		break;
+	case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
+			struct kbase_alloc_import_user_buf *user_buf =
+				&reg->gpu_alloc->imported.user_buf;
+
+			if (user_buf->current_mapping_usage_count & PINNED_ON_IMPORT) {
+				user_buf->current_mapping_usage_count &=
+					~PINNED_ON_IMPORT;
+
+				/* The allocation could still have active mappings. */
+				if (user_buf->current_mapping_usage_count == 0) {
+					kbase_jd_user_buf_unmap(kctx, reg->gpu_alloc,
+						(reg->flags & KBASE_REG_GPU_WR));
+				}
+			}
+		}
+		/* Fall-through */
+	default:
+		kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc);
+		break;
+	}
+
+	return err;
+}
+
+static struct kbase_cpu_mapping *kbasep_find_enclosing_cpu_mapping(
+		struct kbase_context *kctx,
+		unsigned long uaddr, size_t size, u64 *offset)
+{
+	struct vm_area_struct *vma;
+	struct kbase_cpu_mapping *map;
+	unsigned long vm_pgoff_in_region;
+	unsigned long vm_off_in_region;
+	unsigned long map_start;
+	size_t map_size;
+
+	lockdep_assert_held(&current->mm->mmap_sem);
+
+	if ((uintptr_t) uaddr + size < (uintptr_t) uaddr) /* overflow check */
+		return NULL;
+
+	vma = find_vma_intersection(current->mm, uaddr, uaddr+size);
+
+	if (!vma || vma->vm_start > uaddr)
+		return NULL;
+	if (vma->vm_ops != &kbase_vm_ops)
+		/* Not ours! */
+		return NULL;
+
+	map = vma->vm_private_data;
+
+	if (map->kctx != kctx)
+		/* Not from this context! */
+		return NULL;
+
+	vm_pgoff_in_region = vma->vm_pgoff - map->region->start_pfn;
+	vm_off_in_region = vm_pgoff_in_region << PAGE_SHIFT;
+	map_start = vma->vm_start - vm_off_in_region;
+	map_size = map->region->nr_pages << PAGE_SHIFT;
+
+	if ((uaddr + size) > (map_start + map_size))
+		/* Not within the CPU mapping */
+		return NULL;
+
+	*offset = (uaddr - vma->vm_start) + vm_off_in_region;
+
+	return map;
+}
+
+int kbasep_find_enclosing_cpu_mapping_offset(
+		struct kbase_context *kctx,
+		unsigned long uaddr, size_t size, u64 *offset)
+{
+	struct kbase_cpu_mapping *map;
+
+	kbase_os_mem_map_lock(kctx);
+
+	map = kbasep_find_enclosing_cpu_mapping(kctx, uaddr, size, offset);
+
+	kbase_os_mem_map_unlock(kctx);
+
+	if (!map)
+		return -EINVAL;
+
+	return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbasep_find_enclosing_cpu_mapping_offset);
+
+int kbasep_find_enclosing_gpu_mapping_start_and_offset(struct kbase_context *kctx,
+		u64 gpu_addr, size_t size, u64 *start, u64 *offset)
+{
+	struct kbase_va_region *region;
+
+	kbase_gpu_vm_lock(kctx);
+
+	region = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
+
+	if (!region) {
+		kbase_gpu_vm_unlock(kctx);
+		return -EINVAL;
+	}
+
+	*start = region->start_pfn << PAGE_SHIFT;
+
+	*offset = gpu_addr - *start;
+
+	if (((region->start_pfn + region->nr_pages) << PAGE_SHIFT) < (gpu_addr + size)) {
+		kbase_gpu_vm_unlock(kctx);
+		return -EINVAL;
+	}
+
+	kbase_gpu_vm_unlock(kctx);
+
+	return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbasep_find_enclosing_gpu_mapping_start_and_offset);
+
+void kbase_sync_single(struct kbase_context *kctx,
+		struct tagged_addr t_cpu_pa, struct tagged_addr t_gpu_pa,
+		off_t offset, size_t size, enum kbase_sync_type sync_fn)
+{
+	struct page *cpu_page;
+	phys_addr_t cpu_pa = as_phys_addr_t(t_cpu_pa);
+	phys_addr_t gpu_pa = as_phys_addr_t(t_gpu_pa);
+
+	cpu_page = pfn_to_page(PFN_DOWN(cpu_pa));
+
+	if (likely(cpu_pa == gpu_pa)) {
+		dma_addr_t dma_addr;
+
+		BUG_ON(!cpu_page);
+		BUG_ON(offset + size > PAGE_SIZE);
+
+		dma_addr = kbase_dma_addr(cpu_page) + offset;
+		if (sync_fn == KBASE_SYNC_TO_CPU)
+			dma_sync_single_for_cpu(kctx->kbdev->dev, dma_addr,
+					size, DMA_BIDIRECTIONAL);
+		else if (sync_fn == KBASE_SYNC_TO_DEVICE)
+			dma_sync_single_for_device(kctx->kbdev->dev, dma_addr,
+					size, DMA_BIDIRECTIONAL);
+	} else {
+		void *src = NULL;
+		void *dst = NULL;
+		struct page *gpu_page;
+
+		if (WARN(!gpu_pa, "No GPU PA found for infinite cache op"))
+			return;
+
+		gpu_page = pfn_to_page(PFN_DOWN(gpu_pa));
+
+		if (sync_fn == KBASE_SYNC_TO_DEVICE) {
+			src = ((unsigned char *)kmap(cpu_page)) + offset;
+			dst = ((unsigned char *)kmap(gpu_page)) + offset;
+		} else if (sync_fn == KBASE_SYNC_TO_CPU) {
+			dma_sync_single_for_cpu(kctx->kbdev->dev,
+					kbase_dma_addr(gpu_page) + offset,
+					size, DMA_BIDIRECTIONAL);
+			src = ((unsigned char *)kmap(gpu_page)) + offset;
+			dst = ((unsigned char *)kmap(cpu_page)) + offset;
+		}
+		memcpy(dst, src, size);
+		kunmap(gpu_page);
+		kunmap(cpu_page);
+		if (sync_fn == KBASE_SYNC_TO_DEVICE)
+			dma_sync_single_for_device(kctx->kbdev->dev,
+					kbase_dma_addr(gpu_page) + offset,
+					size, DMA_BIDIRECTIONAL);
+	}
+}
+
+static int kbase_do_syncset(struct kbase_context *kctx,
+		struct basep_syncset *sset, enum kbase_sync_type sync_fn)
+{
+	int err = 0;
+	struct kbase_va_region *reg;
+	struct kbase_cpu_mapping *map;
+	unsigned long start;
+	size_t size;
+	struct tagged_addr *cpu_pa;
+	struct tagged_addr *gpu_pa;
+	u64 page_off, page_count;
+	u64 i;
+	u64 offset;
+
+	kbase_os_mem_map_lock(kctx);
+	kbase_gpu_vm_lock(kctx);
+
+	/* find the region where the virtual address is contained */
+	reg = kbase_region_tracker_find_region_enclosing_address(kctx,
+			sset->mem_handle.basep.handle);
+	if (kbase_is_region_invalid_or_free(reg)) {
+		dev_warn(kctx->kbdev->dev, "Can't find a valid region at VA 0x%016llX",
+				sset->mem_handle.basep.handle);
+		err = -EINVAL;
+		goto out_unlock;
+	}
+
+	/*
+	 * Handle imported memory before checking for KBASE_REG_CPU_CACHED. The
+	 * CPU mapping cacheability is defined by the owner of the imported
+	 * memory, and not by kbase, therefore we must assume that any imported
+	 * memory may be cached.
+	 */
+	if (kbase_mem_is_imported(reg->gpu_alloc->type)) {
+		err = kbase_mem_do_sync_imported(kctx, reg, sync_fn);
+		goto out_unlock;
+	}
+
+	if (!(reg->flags & KBASE_REG_CPU_CACHED))
+		goto out_unlock;
+
+	start = (uintptr_t)sset->user_addr;
+	size = (size_t)sset->size;
+
+	map = kbasep_find_enclosing_cpu_mapping(kctx, start, size, &offset);
+	if (!map) {
+		dev_warn(kctx->kbdev->dev, "Can't find CPU mapping 0x%016lX for VA 0x%016llX",
+				start, sset->mem_handle.basep.handle);
+		err = -EINVAL;
+		goto out_unlock;
+	}
+
+	page_off = offset >> PAGE_SHIFT;
+	offset &= ~PAGE_MASK;
+	page_count = (size + offset + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+	cpu_pa = kbase_get_cpu_phy_pages(reg);
+	gpu_pa = kbase_get_gpu_phy_pages(reg);
+
+	if (page_off > reg->nr_pages ||
+			page_off + page_count > reg->nr_pages) {
+		/* Sync overflows the region */
+		err = -EINVAL;
+		goto out_unlock;
+	}
+
+	/* Sync first page */
+	if (as_phys_addr_t(cpu_pa[page_off])) {
+		size_t sz = MIN(((size_t) PAGE_SIZE - offset), size);
+
+		kbase_sync_single(kctx, cpu_pa[page_off], gpu_pa[page_off],
+				offset, sz, sync_fn);
+	}
+
+	/* Sync middle pages (if any) */
+	for (i = 1; page_count > 2 && i < page_count - 1; i++) {
+		/* we grow upwards, so bail on first non-present page */
+		if (!as_phys_addr_t(cpu_pa[page_off + i]))
+			break;
+
+		kbase_sync_single(kctx, cpu_pa[page_off + i],
+				gpu_pa[page_off + i], 0, PAGE_SIZE, sync_fn);
+	}
+
+	/* Sync last page (if any) */
+	if (page_count > 1 &&
+	    as_phys_addr_t(cpu_pa[page_off + page_count - 1])) {
+		size_t sz = ((start + size - 1) & ~PAGE_MASK) + 1;
+
+		kbase_sync_single(kctx, cpu_pa[page_off + page_count - 1],
+				gpu_pa[page_off + page_count - 1], 0, sz,
+				sync_fn);
+	}
+
+out_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	kbase_os_mem_map_unlock(kctx);
+	return err;
+}
+
+int kbase_sync_now(struct kbase_context *kctx, struct basep_syncset *sset)
+{
+	int err = -EINVAL;
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(sset != NULL);
+
+	if (sset->mem_handle.basep.handle & ~PAGE_MASK) {
+		dev_warn(kctx->kbdev->dev,
+				"mem_handle: passed parameter is invalid");
+		return -EINVAL;
+	}
+
+	switch (sset->type) {
+	case BASE_SYNCSET_OP_MSYNC:
+		err = kbase_do_syncset(kctx, sset, KBASE_SYNC_TO_DEVICE);
+		break;
+
+	case BASE_SYNCSET_OP_CSYNC:
+		err = kbase_do_syncset(kctx, sset, KBASE_SYNC_TO_CPU);
+		break;
+
+	default:
+		dev_warn(kctx->kbdev->dev, "Unknown msync op %d\n", sset->type);
+		break;
+	}
+
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_sync_now);
+
+/* vm lock must be held */
+int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+	int err;
+
+	KBASE_DEBUG_ASSERT(NULL != kctx);
+	KBASE_DEBUG_ASSERT(NULL != reg);
+	lockdep_assert_held(&kctx->reg_lock);
+
+	if (reg->flags & KBASE_REG_NO_USER_FREE) {
+		dev_warn(kctx->kbdev->dev, "Attempt to free GPU memory whose freeing by user space is forbidden!\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Unlink the physical allocation before unmaking it evictable so
+	 * that the allocation isn't grown back to its last backed size
+	 * as we're going to unmap it anyway.
+	 */
+	reg->cpu_alloc->reg = NULL;
+	if (reg->cpu_alloc != reg->gpu_alloc)
+		reg->gpu_alloc->reg = NULL;
+
+	/*
+	 * If a region has been made evictable then we must unmake it
+	 * before trying to free it.
+	 * If the memory hasn't been reclaimed it will be unmapped and freed
+	 * below, if it has been reclaimed then the operations below are no-ops.
+	 */
+	if (reg->flags & KBASE_REG_DONT_NEED) {
+		KBASE_DEBUG_ASSERT(reg->cpu_alloc->type ==
+				   KBASE_MEM_TYPE_NATIVE);
+		kbase_mem_evictable_unmake(reg->gpu_alloc);
+	}
+
+	err = kbase_gpu_munmap(kctx, reg);
+	if (err) {
+		dev_warn(kctx->kbdev->dev, "Could not unmap from the GPU...\n");
+		goto out;
+	}
+
+	/* This will also free the physical pages */
+	kbase_free_alloced_region(reg);
+
+ out:
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_free_region);
+
+/**
+ * @brief Free the region from the GPU and unregister it.
+ *
+ * This function implements the free operation on a memory segment.
+ * It will loudly fail if called with outstanding mappings.
+ */
+int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr)
+{
+	int err = 0;
+	struct kbase_va_region *reg;
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+
+	if ((gpu_addr & ~PAGE_MASK) && (gpu_addr >= PAGE_SIZE)) {
+		dev_warn(kctx->kbdev->dev, "kbase_mem_free: gpu_addr parameter is invalid");
+		return -EINVAL;
+	}
+
+	if (0 == gpu_addr) {
+		dev_warn(kctx->kbdev->dev, "gpu_addr 0 is reserved for the ringbuffer and it's an error to try to free it using kbase_mem_free\n");
+		return -EINVAL;
+	}
+	kbase_gpu_vm_lock(kctx);
+
+	if (gpu_addr >= BASE_MEM_COOKIE_BASE &&
+	    gpu_addr < BASE_MEM_FIRST_FREE_ADDRESS) {
+		int cookie = PFN_DOWN(gpu_addr - BASE_MEM_COOKIE_BASE);
+
+		reg = kctx->pending_regions[cookie];
+		if (!reg) {
+			err = -EINVAL;
+			goto out_unlock;
+		}
+
+		/* ask to unlink the cookie as we'll free it */
+
+		kctx->pending_regions[cookie] = NULL;
+		kctx->cookies |= (1UL << cookie);
+
+		kbase_free_alloced_region(reg);
+	} else {
+		/* A real GPU va */
+		/* Validate the region */
+		reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
+		if (kbase_is_region_invalid_or_free(reg)) {
+			dev_warn(kctx->kbdev->dev, "kbase_mem_free called with nonexistent gpu_addr 0x%llX",
+					gpu_addr);
+			err = -EINVAL;
+			goto out_unlock;
+		}
+
+		if ((reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_SAME_VA) {
+			/* SAME_VA must be freed through munmap */
+			dev_warn(kctx->kbdev->dev, "%s called on SAME_VA memory 0x%llX", __func__,
+					gpu_addr);
+			err = -EINVAL;
+			goto out_unlock;
+		}
+		err = kbase_mem_free_region(kctx, reg);
+	}
+
+ out_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_free);
+
+int kbase_update_region_flags(struct kbase_context *kctx,
+		struct kbase_va_region *reg, unsigned long flags)
+{
+	KBASE_DEBUG_ASSERT(NULL != reg);
+	KBASE_DEBUG_ASSERT((flags & ~((1ul << BASE_MEM_FLAGS_NR_BITS) - 1)) == 0);
+
+	reg->flags |= kbase_cache_enabled(flags, reg->nr_pages);
+	/* all memory is now growable */
+	reg->flags |= KBASE_REG_GROWABLE;
+
+	if (flags & BASE_MEM_GROW_ON_GPF)
+		reg->flags |= KBASE_REG_PF_GROW;
+
+	if (flags & BASE_MEM_PROT_CPU_WR)
+		reg->flags |= KBASE_REG_CPU_WR;
+
+	if (flags & BASE_MEM_PROT_CPU_RD)
+		reg->flags |= KBASE_REG_CPU_RD;
+
+	if (flags & BASE_MEM_PROT_GPU_WR)
+		reg->flags |= KBASE_REG_GPU_WR;
+
+	if (flags & BASE_MEM_PROT_GPU_RD)
+		reg->flags |= KBASE_REG_GPU_RD;
+
+	if (0 == (flags & BASE_MEM_PROT_GPU_EX))
+		reg->flags |= KBASE_REG_GPU_NX;
+
+	if (!kbase_device_is_cpu_coherent(kctx->kbdev)) {
+		if (flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED &&
+				!(flags & BASE_MEM_UNCACHED_GPU))
+			return -EINVAL;
+	} else if (flags & (BASE_MEM_COHERENT_SYSTEM |
+			BASE_MEM_COHERENT_SYSTEM_REQUIRED)) {
+		reg->flags |= KBASE_REG_SHARE_BOTH;
+	}
+
+	if (!(reg->flags & KBASE_REG_SHARE_BOTH) &&
+			flags & BASE_MEM_COHERENT_LOCAL) {
+		reg->flags |= KBASE_REG_SHARE_IN;
+	}
+
+	if (flags & BASE_MEM_TILER_ALIGN_TOP)
+		reg->flags |= KBASE_REG_TILER_ALIGN_TOP;
+
+
+	/* Set up default MEMATTR usage */
+	if (!(reg->flags & KBASE_REG_GPU_CACHED)) {
+		if (kctx->kbdev->mmu_mode->flags &
+				KBASE_MMU_MODE_HAS_NON_CACHEABLE) {
+			/* Override shareability, and MEMATTR for uncached */
+			reg->flags &= ~(KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH);
+			reg->flags |= KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_NON_CACHEABLE);
+		} else {
+			dev_warn(kctx->kbdev->dev,
+				"Can't allocate GPU uncached memory due to MMU in Legacy Mode\n");
+			return -EINVAL;
+		}
+	} else if (kctx->kbdev->system_coherency == COHERENCY_ACE &&
+		(reg->flags & KBASE_REG_SHARE_BOTH)) {
+		reg->flags |=
+			KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT_ACE);
+	} else {
+		reg->flags |=
+			KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT);
+	}
+
+	if (flags & BASEP_MEM_PERMANENT_KERNEL_MAPPING)
+		reg->flags |= KBASE_REG_PERMANENT_KERNEL_MAPPING;
+
+	if (flags & BASEP_MEM_NO_USER_FREE)
+		reg->flags |= KBASE_REG_NO_USER_FREE;
+
+	if (flags & BASE_MEM_GPU_VA_SAME_4GB_PAGE)
+		reg->flags |= KBASE_REG_GPU_VA_SAME_4GB_PAGE;
+
+	return 0;
+}
+
+int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc,
+		size_t nr_pages_requested)
+{
+	int new_page_count __maybe_unused;
+	size_t nr_left = nr_pages_requested;
+	int res;
+	struct kbase_context *kctx;
+	struct kbase_device *kbdev;
+	struct tagged_addr *tp;
+
+	if (WARN_ON(alloc->type != KBASE_MEM_TYPE_NATIVE) ||
+	    WARN_ON(alloc->imported.native.kctx == NULL) ||
+	    WARN_ON(alloc->group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS)) {
+		return -EINVAL;
+	}
+
+	if (alloc->reg) {
+		if (nr_pages_requested > alloc->reg->nr_pages - alloc->nents)
+			goto invalid_request;
+	}
+
+	kctx = alloc->imported.native.kctx;
+	kbdev = kctx->kbdev;
+
+	if (nr_pages_requested == 0)
+		goto done; /*nothing to do*/
+
+	new_page_count = atomic_add_return(
+		nr_pages_requested, &kctx->used_pages);
+	atomic_add(nr_pages_requested,
+		&kctx->kbdev->memdev.used_pages);
+
+	/* Increase mm counters before we allocate pages so that this
+	 * allocation is visible to the OOM killer */
+	kbase_process_page_usage_inc(kctx, nr_pages_requested);
+
+	tp = alloc->pages + alloc->nents;
+
+#ifdef CONFIG_MALI_2MB_ALLOC
+	/* Check if we have enough pages requested so we can allocate a large
+	 * page (512 * 4KB = 2MB )
+	 */
+	if (nr_left >= (SZ_2M / SZ_4K)) {
+		int nr_lp = nr_left / (SZ_2M / SZ_4K);
+
+		res = kbase_mem_pool_alloc_pages(
+			&kctx->mem_pools.large[alloc->group_id],
+			 nr_lp * (SZ_2M / SZ_4K),
+			 tp,
+			 true);
+
+		if (res > 0) {
+			nr_left -= res;
+			tp += res;
+		}
+
+		if (nr_left) {
+			struct kbase_sub_alloc *sa, *temp_sa;
+
+			spin_lock(&kctx->mem_partials_lock);
+
+			list_for_each_entry_safe(sa, temp_sa,
+						 &kctx->mem_partials, link) {
+				int pidx = 0;
+
+				while (nr_left) {
+					pidx = find_next_zero_bit(sa->sub_pages,
+								  SZ_2M / SZ_4K,
+								  pidx);
+					bitmap_set(sa->sub_pages, pidx, 1);
+					*tp++ = as_tagged_tag(page_to_phys(sa->page +
+									   pidx),
+							      FROM_PARTIAL);
+					nr_left--;
+
+					if (bitmap_full(sa->sub_pages, SZ_2M / SZ_4K)) {
+						/* unlink from partial list when full */
+						list_del_init(&sa->link);
+						break;
+					}
+				}
+			}
+			spin_unlock(&kctx->mem_partials_lock);
+		}
+
+		/* only if we actually have a chunk left <512. If more it indicates
+		 * that we couldn't allocate a 2MB above, so no point to retry here.
+		 */
+		if (nr_left > 0 && nr_left < (SZ_2M / SZ_4K)) {
+			/* create a new partial and suballocate the rest from it */
+			struct page *np = NULL;
+
+			do {
+				int err;
+
+				np = kbase_mem_pool_alloc(
+					&kctx->mem_pools.large[
+						alloc->group_id]);
+				if (np)
+					break;
+
+				err = kbase_mem_pool_grow(
+					&kctx->mem_pools.large[alloc->group_id],
+					1);
+				if (err)
+					break;
+			} while (1);
+
+			if (np) {
+				int i;
+				struct kbase_sub_alloc *sa;
+				struct page *p;
+
+				sa = kmalloc(sizeof(*sa), GFP_KERNEL);
+				if (!sa) {
+					kbase_mem_pool_free(
+						&kctx->mem_pools.large[
+							alloc->group_id],
+						np,
+						false);
+					goto no_new_partial;
+				}
+
+				/* store pointers back to the control struct */
+				np->lru.next = (void *)sa;
+				for (p = np; p < np + SZ_2M / SZ_4K; p++)
+					p->lru.prev = (void *)np;
+				INIT_LIST_HEAD(&sa->link);
+				bitmap_zero(sa->sub_pages, SZ_2M / SZ_4K);
+				sa->page = np;
+
+				for (i = 0; i < nr_left; i++)
+					*tp++ = as_tagged_tag(page_to_phys(np + i), FROM_PARTIAL);
+
+				bitmap_set(sa->sub_pages, 0, nr_left);
+				nr_left = 0;
+
+				/* expose for later use */
+				spin_lock(&kctx->mem_partials_lock);
+				list_add(&sa->link, &kctx->mem_partials);
+				spin_unlock(&kctx->mem_partials_lock);
+			}
+		}
+	}
+no_new_partial:
+#endif
+
+	if (nr_left) {
+		res = kbase_mem_pool_alloc_pages(
+			&kctx->mem_pools.small[alloc->group_id],
+			nr_left, tp, false);
+		if (res <= 0)
+			goto alloc_failed;
+	}
+
+	KBASE_TLSTREAM_AUX_PAGESALLOC(
+			kbdev,
+			kctx->id,
+			(u64)new_page_count);
+
+	alloc->nents += nr_pages_requested;
+done:
+	return 0;
+
+alloc_failed:
+	/* rollback needed if got one or more 2MB but failed later */
+	if (nr_left != nr_pages_requested) {
+		size_t nr_pages_to_free = nr_pages_requested - nr_left;
+
+		alloc->nents += nr_pages_to_free;
+
+		kbase_process_page_usage_inc(kctx, nr_pages_to_free);
+		atomic_add(nr_pages_to_free, &kctx->used_pages);
+		atomic_add(nr_pages_to_free,
+			&kctx->kbdev->memdev.used_pages);
+
+		kbase_free_phy_pages_helper(alloc, nr_pages_to_free);
+	}
+
+	kbase_process_page_usage_dec(kctx, nr_pages_requested);
+	atomic_sub(nr_pages_requested, &kctx->used_pages);
+	atomic_sub(nr_pages_requested,
+		&kctx->kbdev->memdev.used_pages);
+
+invalid_request:
+	return -ENOMEM;
+}
+
+struct tagged_addr *kbase_alloc_phy_pages_helper_locked(
+		struct kbase_mem_phy_alloc *alloc, struct kbase_mem_pool *pool,
+		size_t nr_pages_requested,
+		struct kbase_sub_alloc **prealloc_sa)
+{
+	int new_page_count __maybe_unused;
+	size_t nr_left = nr_pages_requested;
+	int res;
+	struct kbase_context *kctx;
+	struct kbase_device *kbdev;
+	struct tagged_addr *tp;
+	struct tagged_addr *new_pages = NULL;
+
+	KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
+	KBASE_DEBUG_ASSERT(alloc->imported.native.kctx);
+
+	lockdep_assert_held(&pool->pool_lock);
+
+#if !defined(CONFIG_MALI_2MB_ALLOC)
+	WARN_ON(pool->order);
+#endif
+
+	if (alloc->reg) {
+		if (nr_pages_requested > alloc->reg->nr_pages - alloc->nents)
+			goto invalid_request;
+	}
+
+	kctx = alloc->imported.native.kctx;
+	kbdev = kctx->kbdev;
+
+	lockdep_assert_held(&kctx->mem_partials_lock);
+
+	if (nr_pages_requested == 0)
+		goto done; /*nothing to do*/
+
+	new_page_count = atomic_add_return(
+		nr_pages_requested, &kctx->used_pages);
+	atomic_add(nr_pages_requested,
+		&kctx->kbdev->memdev.used_pages);
+
+	/* Increase mm counters before we allocate pages so that this
+	 * allocation is visible to the OOM killer
+	 */
+	kbase_process_page_usage_inc(kctx, nr_pages_requested);
+
+	tp = alloc->pages + alloc->nents;
+	new_pages = tp;
+
+#ifdef CONFIG_MALI_2MB_ALLOC
+	if (pool->order) {
+		int nr_lp = nr_left / (SZ_2M / SZ_4K);
+
+		res = kbase_mem_pool_alloc_pages_locked(pool,
+						 nr_lp * (SZ_2M / SZ_4K),
+						 tp);
+
+		if (res > 0) {
+			nr_left -= res;
+			tp += res;
+		}
+
+		if (nr_left) {
+			struct kbase_sub_alloc *sa, *temp_sa;
+
+			list_for_each_entry_safe(sa, temp_sa,
+						 &kctx->mem_partials, link) {
+				int pidx = 0;
+
+				while (nr_left) {
+					pidx = find_next_zero_bit(sa->sub_pages,
+								  SZ_2M / SZ_4K,
+								  pidx);
+					bitmap_set(sa->sub_pages, pidx, 1);
+					*tp++ = as_tagged_tag(page_to_phys(
+							sa->page + pidx),
+							FROM_PARTIAL);
+					nr_left--;
+
+					if (bitmap_full(sa->sub_pages,
+							SZ_2M / SZ_4K)) {
+						/* unlink from partial list when
+						 * full
+						 */
+						list_del_init(&sa->link);
+						break;
+					}
+				}
+			}
+		}
+
+		/* only if we actually have a chunk left <512. If more it
+		 * indicates that we couldn't allocate a 2MB above, so no point
+		 * to retry here.
+		 */
+		if (nr_left > 0 && nr_left < (SZ_2M / SZ_4K)) {
+			/* create a new partial and suballocate the rest from it
+			 */
+			struct page *np = NULL;
+
+			np = kbase_mem_pool_alloc_locked(pool);
+
+			if (np) {
+				int i;
+				struct kbase_sub_alloc *const sa = *prealloc_sa;
+				struct page *p;
+
+				/* store pointers back to the control struct */
+				np->lru.next = (void *)sa;
+				for (p = np; p < np + SZ_2M / SZ_4K; p++)
+					p->lru.prev = (void *)np;
+				INIT_LIST_HEAD(&sa->link);
+				bitmap_zero(sa->sub_pages, SZ_2M / SZ_4K);
+				sa->page = np;
+
+				for (i = 0; i < nr_left; i++)
+					*tp++ = as_tagged_tag(
+							page_to_phys(np + i),
+							FROM_PARTIAL);
+
+				bitmap_set(sa->sub_pages, 0, nr_left);
+				nr_left = 0;
+				/* Indicate to user that we'll free this memory
+				 * later.
+				 */
+				*prealloc_sa = NULL;
+
+				/* expose for later use */
+				list_add(&sa->link, &kctx->mem_partials);
+			}
+		}
+		if (nr_left)
+			goto alloc_failed;
+	} else {
+#endif
+		res = kbase_mem_pool_alloc_pages_locked(pool,
+						 nr_left,
+						 tp);
+		if (res <= 0)
+			goto alloc_failed;
+#ifdef CONFIG_MALI_2MB_ALLOC
+	}
+#endif
+
+	KBASE_TLSTREAM_AUX_PAGESALLOC(
+			kbdev,
+			kctx->id,
+			(u64)new_page_count);
+
+	alloc->nents += nr_pages_requested;
+done:
+	return new_pages;
+
+alloc_failed:
+	/* rollback needed if got one or more 2MB but failed later */
+	if (nr_left != nr_pages_requested) {
+		size_t nr_pages_to_free = nr_pages_requested - nr_left;
+
+		struct tagged_addr *start_free = alloc->pages + alloc->nents;
+
+#ifdef CONFIG_MALI_2MB_ALLOC
+		if (pool->order) {
+			while (nr_pages_to_free) {
+				if (is_huge_head(*start_free)) {
+					kbase_mem_pool_free_pages_locked(
+						pool, 512,
+						start_free,
+						false, /* not dirty */
+						true); /* return to pool */
+					nr_pages_to_free -= 512;
+					start_free += 512;
+				} else if (is_partial(*start_free)) {
+					free_partial_locked(kctx, pool,
+							*start_free);
+					nr_pages_to_free--;
+					start_free++;
+				}
+			}
+		} else {
+#endif
+			kbase_mem_pool_free_pages_locked(pool,
+					nr_pages_to_free,
+					start_free,
+					false, /* not dirty */
+					true); /* return to pool */
+#ifdef CONFIG_MALI_2MB_ALLOC
+		}
+#endif
+	}
+
+	kbase_process_page_usage_dec(kctx, nr_pages_requested);
+	atomic_sub(nr_pages_requested, &kctx->used_pages);
+	atomic_sub(nr_pages_requested, &kctx->kbdev->memdev.used_pages);
+
+invalid_request:
+	return NULL;
+}
+
+static void free_partial(struct kbase_context *kctx, int group_id, struct
+		tagged_addr tp)
+{
+	struct page *p, *head_page;
+	struct kbase_sub_alloc *sa;
+
+	p = as_page(tp);
+	head_page = (struct page *)p->lru.prev;
+	sa = (struct kbase_sub_alloc *)head_page->lru.next;
+	spin_lock(&kctx->mem_partials_lock);
+	clear_bit(p - head_page, sa->sub_pages);
+	if (bitmap_empty(sa->sub_pages, SZ_2M / SZ_4K)) {
+		list_del(&sa->link);
+		kbase_mem_pool_free(
+			&kctx->mem_pools.large[group_id],
+			head_page,
+			true);
+		kfree(sa);
+	} else if (bitmap_weight(sa->sub_pages, SZ_2M / SZ_4K) ==
+		   SZ_2M / SZ_4K - 1) {
+		/* expose the partial again */
+		list_add(&sa->link, &kctx->mem_partials);
+	}
+	spin_unlock(&kctx->mem_partials_lock);
+}
+
+int kbase_free_phy_pages_helper(
+	struct kbase_mem_phy_alloc *alloc,
+	size_t nr_pages_to_free)
+{
+	struct kbase_context *kctx = alloc->imported.native.kctx;
+	struct kbase_device *kbdev = kctx->kbdev;
+	bool syncback;
+	bool reclaimed = (alloc->evicted != 0);
+	struct tagged_addr *start_free;
+	int new_page_count __maybe_unused;
+	size_t freed = 0;
+
+	if (WARN_ON(alloc->type != KBASE_MEM_TYPE_NATIVE) ||
+	    WARN_ON(alloc->imported.native.kctx == NULL) ||
+	    WARN_ON(alloc->nents < nr_pages_to_free) ||
+	    WARN_ON(alloc->group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS)) {
+		return -EINVAL;
+	}
+
+	/* early out if nothing to do */
+	if (0 == nr_pages_to_free)
+		return 0;
+
+	start_free = alloc->pages + alloc->nents - nr_pages_to_free;
+
+	syncback = alloc->properties & KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED;
+
+	/* pad start_free to a valid start location */
+	while (nr_pages_to_free && is_huge(*start_free) &&
+	       !is_huge_head(*start_free)) {
+		nr_pages_to_free--;
+		start_free++;
+	}
+
+	while (nr_pages_to_free) {
+		if (is_huge_head(*start_free)) {
+			/* This is a 2MB entry, so free all the 512 pages that
+			 * it points to
+			 */
+			kbase_mem_pool_free_pages(
+				&kctx->mem_pools.large[alloc->group_id],
+				512,
+				start_free,
+				syncback,
+				reclaimed);
+			nr_pages_to_free -= 512;
+			start_free += 512;
+			freed += 512;
+		} else if (is_partial(*start_free)) {
+			free_partial(kctx, alloc->group_id, *start_free);
+			nr_pages_to_free--;
+			start_free++;
+			freed++;
+		} else {
+			struct tagged_addr *local_end_free;
+
+			local_end_free = start_free;
+			while (nr_pages_to_free &&
+				!is_huge(*local_end_free) &&
+				!is_partial(*local_end_free)) {
+				local_end_free++;
+				nr_pages_to_free--;
+			}
+			kbase_mem_pool_free_pages(
+				&kctx->mem_pools.small[alloc->group_id],
+				local_end_free - start_free,
+				start_free,
+				syncback,
+				reclaimed);
+			freed += local_end_free - start_free;
+			start_free += local_end_free - start_free;
+		}
+	}
+
+	alloc->nents -= freed;
+
+	/*
+	 * If the allocation was not evicted (i.e. evicted == 0) then
+	 * the page accounting needs to be done.
+	 */
+	if (!reclaimed) {
+		kbase_process_page_usage_dec(kctx, freed);
+		new_page_count = atomic_sub_return(freed,
+			&kctx->used_pages);
+		atomic_sub(freed,
+			&kctx->kbdev->memdev.used_pages);
+
+		KBASE_TLSTREAM_AUX_PAGESALLOC(
+			kbdev,
+			kctx->id,
+			(u64)new_page_count);
+	}
+
+	return 0;
+}
+
+static void free_partial_locked(struct kbase_context *kctx,
+		struct kbase_mem_pool *pool, struct tagged_addr tp)
+{
+	struct page *p, *head_page;
+	struct kbase_sub_alloc *sa;
+
+	lockdep_assert_held(&pool->pool_lock);
+	lockdep_assert_held(&kctx->mem_partials_lock);
+
+	p = as_page(tp);
+	head_page = (struct page *)p->lru.prev;
+	sa = (struct kbase_sub_alloc *)head_page->lru.next;
+	clear_bit(p - head_page, sa->sub_pages);
+	if (bitmap_empty(sa->sub_pages, SZ_2M / SZ_4K)) {
+		list_del(&sa->link);
+		kbase_mem_pool_free_locked(pool, head_page, true);
+		kfree(sa);
+	} else if (bitmap_weight(sa->sub_pages, SZ_2M / SZ_4K) ==
+		   SZ_2M / SZ_4K - 1) {
+		/* expose the partial again */
+		list_add(&sa->link, &kctx->mem_partials);
+	}
+}
+
+void kbase_free_phy_pages_helper_locked(struct kbase_mem_phy_alloc *alloc,
+		struct kbase_mem_pool *pool, struct tagged_addr *pages,
+		size_t nr_pages_to_free)
+{
+	struct kbase_context *kctx = alloc->imported.native.kctx;
+	struct kbase_device *kbdev = kctx->kbdev;
+	bool syncback;
+	bool reclaimed = (alloc->evicted != 0);
+	struct tagged_addr *start_free;
+	size_t freed = 0;
+
+	KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
+	KBASE_DEBUG_ASSERT(alloc->imported.native.kctx);
+	KBASE_DEBUG_ASSERT(alloc->nents >= nr_pages_to_free);
+
+	lockdep_assert_held(&pool->pool_lock);
+	lockdep_assert_held(&kctx->mem_partials_lock);
+
+	/* early out if nothing to do */
+	if (!nr_pages_to_free)
+		return;
+
+	start_free = pages;
+
+	syncback = alloc->properties & KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED;
+
+	/* pad start_free to a valid start location */
+	while (nr_pages_to_free && is_huge(*start_free) &&
+	       !is_huge_head(*start_free)) {
+		nr_pages_to_free--;
+		start_free++;
+	}
+
+	while (nr_pages_to_free) {
+		if (is_huge_head(*start_free)) {
+			/* This is a 2MB entry, so free all the 512 pages that
+			 * it points to
+			 */
+			WARN_ON(!pool->order);
+			kbase_mem_pool_free_pages_locked(pool,
+					512,
+					start_free,
+					syncback,
+					reclaimed);
+			nr_pages_to_free -= 512;
+			start_free += 512;
+			freed += 512;
+		} else if (is_partial(*start_free)) {
+			WARN_ON(!pool->order);
+			free_partial_locked(kctx, pool, *start_free);
+			nr_pages_to_free--;
+			start_free++;
+			freed++;
+		} else {
+			struct tagged_addr *local_end_free;
+
+			WARN_ON(pool->order);
+			local_end_free = start_free;
+			while (nr_pages_to_free &&
+			       !is_huge(*local_end_free) &&
+			       !is_partial(*local_end_free)) {
+				local_end_free++;
+				nr_pages_to_free--;
+			}
+			kbase_mem_pool_free_pages_locked(pool,
+					local_end_free - start_free,
+					start_free,
+					syncback,
+					reclaimed);
+			freed += local_end_free - start_free;
+			start_free += local_end_free - start_free;
+		}
+	}
+
+	alloc->nents -= freed;
+
+	/*
+	 * If the allocation was not evicted (i.e. evicted == 0) then
+	 * the page accounting needs to be done.
+	 */
+	if (!reclaimed) {
+		int new_page_count;
+
+		kbase_process_page_usage_dec(kctx, freed);
+		new_page_count = atomic_sub_return(freed,
+			&kctx->used_pages);
+		atomic_sub(freed,
+			&kctx->kbdev->memdev.used_pages);
+
+		KBASE_TLSTREAM_AUX_PAGESALLOC(
+				kbdev,
+				kctx->id,
+				(u64)new_page_count);
+	}
+}
+
+
+void kbase_mem_kref_free(struct kref *kref)
+{
+	struct kbase_mem_phy_alloc *alloc;
+
+	alloc = container_of(kref, struct kbase_mem_phy_alloc, kref);
+
+	switch (alloc->type) {
+	case KBASE_MEM_TYPE_NATIVE: {
+
+		if (!WARN_ON(!alloc->imported.native.kctx)) {
+			if (alloc->permanent_map)
+				kbase_phy_alloc_mapping_term(
+						alloc->imported.native.kctx,
+						alloc);
+
+			/*
+			 * The physical allocation must have been removed from
+			 * the eviction list before trying to free it.
+			 */
+			mutex_lock(
+				&alloc->imported.native.kctx->jit_evict_lock);
+			WARN_ON(!list_empty(&alloc->evict_node));
+			mutex_unlock(
+				&alloc->imported.native.kctx->jit_evict_lock);
+
+			kbase_process_page_usage_dec(
+					alloc->imported.native.kctx,
+					alloc->imported.native.nr_struct_pages);
+		}
+		kbase_free_phy_pages_helper(alloc, alloc->nents);
+		break;
+	}
+	case KBASE_MEM_TYPE_ALIAS: {
+		/* just call put on the underlying phy allocs */
+		size_t i;
+		struct kbase_aliased *aliased;
+
+		aliased = alloc->imported.alias.aliased;
+		if (aliased) {
+			for (i = 0; i < alloc->imported.alias.nents; i++)
+				if (aliased[i].alloc)
+					kbase_mem_phy_alloc_put(aliased[i].alloc);
+			vfree(aliased);
+		}
+		break;
+	}
+	case KBASE_MEM_TYPE_RAW:
+		/* raw pages, external cleanup */
+		break;
+	case KBASE_MEM_TYPE_IMPORTED_UMM:
+		if (!IS_ENABLED(CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND)) {
+			WARN_ONCE(alloc->imported.umm.current_mapping_usage_count != 1,
+					"WARNING: expected excatly 1 mapping, got %d",
+					alloc->imported.umm.current_mapping_usage_count);
+			dma_buf_unmap_attachment(
+					alloc->imported.umm.dma_attachment,
+					alloc->imported.umm.sgt,
+					DMA_BIDIRECTIONAL);
+		}
+		dma_buf_detach(alloc->imported.umm.dma_buf,
+			       alloc->imported.umm.dma_attachment);
+		dma_buf_put(alloc->imported.umm.dma_buf);
+		break;
+	case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
+		if (alloc->imported.user_buf.mm)
+			mmdrop(alloc->imported.user_buf.mm);
+		if (alloc->properties & KBASE_MEM_PHY_ALLOC_LARGE)
+			vfree(alloc->imported.user_buf.pages);
+		else
+			kfree(alloc->imported.user_buf.pages);
+		break;
+	default:
+		WARN(1, "Unexecpted free of type %d\n", alloc->type);
+		break;
+	}
+
+	/* Free based on allocation type */
+	if (alloc->properties & KBASE_MEM_PHY_ALLOC_LARGE)
+		vfree(alloc);
+	else
+		kfree(alloc);
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_kref_free);
+
+int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size)
+{
+	KBASE_DEBUG_ASSERT(NULL != reg);
+	KBASE_DEBUG_ASSERT(vsize > 0);
+
+	/* validate user provided arguments */
+	if (size > vsize || vsize > reg->nr_pages)
+		goto out_term;
+
+	/* Prevent vsize*sizeof from wrapping around.
+	 * For instance, if vsize is 2**29+1, we'll allocate 1 byte and the alloc won't fail.
+	 */
+	if ((size_t) vsize > ((size_t) -1 / sizeof(*reg->cpu_alloc->pages)))
+		goto out_term;
+
+	KBASE_DEBUG_ASSERT(0 != vsize);
+
+	if (kbase_alloc_phy_pages_helper(reg->cpu_alloc, size) != 0)
+		goto out_term;
+
+	reg->cpu_alloc->reg = reg;
+	if (reg->cpu_alloc != reg->gpu_alloc) {
+		if (kbase_alloc_phy_pages_helper(reg->gpu_alloc, size) != 0)
+			goto out_rollback;
+		reg->gpu_alloc->reg = reg;
+	}
+
+	return 0;
+
+out_rollback:
+	kbase_free_phy_pages_helper(reg->cpu_alloc, size);
+out_term:
+	return -1;
+}
+
+KBASE_EXPORT_TEST_API(kbase_alloc_phy_pages);
+
+bool kbase_check_alloc_flags(unsigned long flags)
+{
+	/* Only known input flags should be set. */
+	if (flags & ~BASE_MEM_FLAGS_INPUT_MASK)
+		return false;
+
+	/* At least one flag should be set */
+	if (flags == 0)
+		return false;
+
+	/* Either the GPU or CPU must be reading from the allocated memory */
+	if ((flags & (BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD)) == 0)
+		return false;
+
+	/* Either the GPU or CPU must be writing to the allocated memory */
+	if ((flags & (BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR)) == 0)
+		return false;
+
+	/* GPU executable memory cannot:
+	 * - Be written by the GPU
+	 * - Be grown on GPU page fault
+	 * - Have the top of its initial commit aligned to 'extent' */
+	if ((flags & BASE_MEM_PROT_GPU_EX) && (flags &
+			(BASE_MEM_PROT_GPU_WR | BASE_MEM_GROW_ON_GPF |
+			BASE_MEM_TILER_ALIGN_TOP)))
+		return false;
+
+	/* To have an allocation lie within a 4GB chunk is required only for
+	 * TLS memory, which will never be used to contain executable code
+	 * and also used for Tiler heap.
+	 */
+	if ((flags & BASE_MEM_GPU_VA_SAME_4GB_PAGE) && (flags &
+			(BASE_MEM_PROT_GPU_EX | BASE_MEM_TILER_ALIGN_TOP)))
+		return false;
+
+	/* GPU should have at least read or write access otherwise there is no
+	   reason for allocating. */
+	if ((flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR)) == 0)
+		return false;
+
+	/* BASE_MEM_IMPORT_SHARED is only valid for imported memory */
+	if ((flags & BASE_MEM_IMPORT_SHARED) == BASE_MEM_IMPORT_SHARED)
+		return false;
+
+	/* Should not combine BASE_MEM_COHERENT_LOCAL with
+	 * BASE_MEM_COHERENT_SYSTEM */
+	if ((flags & (BASE_MEM_COHERENT_LOCAL | BASE_MEM_COHERENT_SYSTEM)) ==
+			(BASE_MEM_COHERENT_LOCAL | BASE_MEM_COHERENT_SYSTEM))
+		return false;
+
+	return true;
+}
+
+bool kbase_check_import_flags(unsigned long flags)
+{
+	/* Only known input flags should be set. */
+	if (flags & ~BASE_MEM_FLAGS_INPUT_MASK)
+		return false;
+
+	/* At least one flag should be set */
+	if (flags == 0)
+		return false;
+
+	/* Imported memory cannot be GPU executable */
+	if (flags & BASE_MEM_PROT_GPU_EX)
+		return false;
+
+	/* Imported memory cannot grow on page fault */
+	if (flags & BASE_MEM_GROW_ON_GPF)
+		return false;
+
+	/* Imported memory cannot be aligned to the end of its initial commit */
+	if (flags & BASE_MEM_TILER_ALIGN_TOP)
+		return false;
+
+	/* GPU should have at least read or write access otherwise there is no
+	   reason for importing. */
+	if ((flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR)) == 0)
+		return false;
+
+	/* Protected memory cannot be read by the CPU */
+	if ((flags & BASE_MEM_PROTECTED) && (flags & BASE_MEM_PROT_CPU_RD))
+		return false;
+
+	return true;
+}
+
+int kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags,
+		u64 va_pages, u64 commit_pages, u64 large_extent)
+{
+	struct device *dev = kctx->kbdev->dev;
+	int gpu_pc_bits = kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
+	u64 gpu_pc_pages_max = 1ULL << gpu_pc_bits >> PAGE_SHIFT;
+	struct kbase_va_region test_reg;
+
+	/* kbase_va_region's extent member can be of variable size, so check against that type */
+	test_reg.extent = large_extent;
+
+#define KBASE_MSG_PRE "GPU allocation attempted with "
+
+	if (0 == va_pages) {
+		dev_warn(dev, KBASE_MSG_PRE "0 va_pages!");
+		return -EINVAL;
+	}
+
+	if (va_pages > KBASE_MEM_ALLOC_MAX_SIZE) {
+		dev_warn(dev, KBASE_MSG_PRE "va_pages==%lld larger than KBASE_MEM_ALLOC_MAX_SIZE!",
+				(unsigned long long)va_pages);
+		return -ENOMEM;
+	}
+
+	/* Note: commit_pages is checked against va_pages during
+	 * kbase_alloc_phy_pages() */
+
+	/* Limit GPU executable allocs to GPU PC size */
+	if ((flags & BASE_MEM_PROT_GPU_EX) && (va_pages > gpu_pc_pages_max)) {
+		dev_warn(dev, KBASE_MSG_PRE "BASE_MEM_PROT_GPU_EX and va_pages==%lld larger than GPU PC range %lld",
+				(unsigned long long)va_pages,
+				(unsigned long long)gpu_pc_pages_max);
+
+		return -EINVAL;
+	}
+
+	if ((flags & (BASE_MEM_GROW_ON_GPF | BASE_MEM_TILER_ALIGN_TOP)) &&
+			test_reg.extent == 0) {
+		dev_warn(dev, KBASE_MSG_PRE "BASE_MEM_GROW_ON_GPF or BASE_MEM_TILER_ALIGN_TOP but extent == 0\n");
+		return -EINVAL;
+	}
+
+	if (!(flags & (BASE_MEM_GROW_ON_GPF | BASE_MEM_TILER_ALIGN_TOP)) &&
+			test_reg.extent != 0) {
+		dev_warn(dev, KBASE_MSG_PRE "neither BASE_MEM_GROW_ON_GPF nor BASE_MEM_TILER_ALIGN_TOP set but extent != 0\n");
+		return -EINVAL;
+	}
+
+	/* BASE_MEM_TILER_ALIGN_TOP memory has a number of restrictions */
+	if (flags & BASE_MEM_TILER_ALIGN_TOP) {
+#define KBASE_MSG_PRE_FLAG KBASE_MSG_PRE "BASE_MEM_TILER_ALIGN_TOP and "
+		unsigned long small_extent;
+
+		if (large_extent > BASE_MEM_TILER_ALIGN_TOP_EXTENT_MAX_PAGES) {
+			dev_warn(dev, KBASE_MSG_PRE_FLAG "extent==%lld pages exceeds limit %lld",
+					(unsigned long long)large_extent,
+					BASE_MEM_TILER_ALIGN_TOP_EXTENT_MAX_PAGES);
+			return -EINVAL;
+		}
+		/* For use with is_power_of_2, which takes unsigned long, so
+		 * must ensure e.g. on 32-bit kernel it'll fit in that type */
+		small_extent = (unsigned long)large_extent;
+
+		if (!is_power_of_2(small_extent)) {
+			dev_warn(dev, KBASE_MSG_PRE_FLAG "extent==%ld not a non-zero power of 2",
+					small_extent);
+			return -EINVAL;
+		}
+
+		if (commit_pages > large_extent) {
+			dev_warn(dev, KBASE_MSG_PRE_FLAG "commit_pages==%ld exceeds extent==%ld",
+					(unsigned long)commit_pages,
+					(unsigned long)large_extent);
+			return -EINVAL;
+		}
+#undef KBASE_MSG_PRE_FLAG
+	}
+
+	if ((flags & BASE_MEM_GPU_VA_SAME_4GB_PAGE) &&
+	    (va_pages > (BASE_MEM_PFN_MASK_4GB + 1))) {
+		dev_warn(dev, KBASE_MSG_PRE "BASE_MEM_GPU_VA_SAME_4GB_PAGE and va_pages==%lld greater than that needed for 4GB space",
+				(unsigned long long)va_pages);
+		return -EINVAL;
+	}
+
+	return 0;
+#undef KBASE_MSG_PRE
+}
+
+/**
+ * @brief Acquire the per-context region list lock
+ */
+void kbase_gpu_vm_lock(struct kbase_context *kctx)
+{
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	mutex_lock(&kctx->reg_lock);
+}
+
+KBASE_EXPORT_TEST_API(kbase_gpu_vm_lock);
+
+/**
+ * @brief Release the per-context region list lock
+ */
+void kbase_gpu_vm_unlock(struct kbase_context *kctx)
+{
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	mutex_unlock(&kctx->reg_lock);
+}
+
+KBASE_EXPORT_TEST_API(kbase_gpu_vm_unlock);
+
+#ifdef CONFIG_DEBUG_FS
+struct kbase_jit_debugfs_data {
+	int (*func)(struct kbase_jit_debugfs_data *);
+	struct mutex lock;
+	struct kbase_context *kctx;
+	u64 active_value;
+	u64 pool_value;
+	u64 destroy_value;
+	char buffer[50];
+};
+
+static int kbase_jit_debugfs_common_open(struct inode *inode,
+		struct file *file, int (*func)(struct kbase_jit_debugfs_data *))
+{
+	struct kbase_jit_debugfs_data *data;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->func = func;
+	mutex_init(&data->lock);
+	data->kctx = (struct kbase_context *) inode->i_private;
+
+	file->private_data = data;
+
+	return nonseekable_open(inode, file);
+}
+
+static ssize_t kbase_jit_debugfs_common_read(struct file *file,
+		char __user *buf, size_t len, loff_t *ppos)
+{
+	struct kbase_jit_debugfs_data *data;
+	size_t size;
+	int ret;
+
+	data = (struct kbase_jit_debugfs_data *) file->private_data;
+	mutex_lock(&data->lock);
+
+	if (*ppos) {
+		size = strnlen(data->buffer, sizeof(data->buffer));
+	} else {
+		if (!data->func) {
+			ret = -EACCES;
+			goto out_unlock;
+		}
+
+		if (data->func(data)) {
+			ret = -EACCES;
+			goto out_unlock;
+		}
+
+		size = scnprintf(data->buffer, sizeof(data->buffer),
+				"%llu,%llu,%llu", data->active_value,
+				data->pool_value, data->destroy_value);
+	}
+
+	ret = simple_read_from_buffer(buf, len, ppos, data->buffer, size);
+
+out_unlock:
+	mutex_unlock(&data->lock);
+	return ret;
+}
+
+static int kbase_jit_debugfs_common_release(struct inode *inode,
+		struct file *file)
+{
+	kfree(file->private_data);
+	return 0;
+}
+
+#define KBASE_JIT_DEBUGFS_DECLARE(__fops, __func) \
+static int __fops ## _open(struct inode *inode, struct file *file) \
+{ \
+	return kbase_jit_debugfs_common_open(inode, file, __func); \
+} \
+static const struct file_operations __fops = { \
+	.owner = THIS_MODULE, \
+	.open = __fops ## _open, \
+	.release = kbase_jit_debugfs_common_release, \
+	.read = kbase_jit_debugfs_common_read, \
+	.write = NULL, \
+	.llseek = generic_file_llseek, \
+}
+
+static int kbase_jit_debugfs_count_get(struct kbase_jit_debugfs_data *data)
+{
+	struct kbase_context *kctx = data->kctx;
+	struct list_head *tmp;
+
+	mutex_lock(&kctx->jit_evict_lock);
+	list_for_each(tmp, &kctx->jit_active_head) {
+		data->active_value++;
+	}
+
+	list_for_each(tmp, &kctx->jit_pool_head) {
+		data->pool_value++;
+	}
+
+	list_for_each(tmp, &kctx->jit_destroy_head) {
+		data->destroy_value++;
+	}
+	mutex_unlock(&kctx->jit_evict_lock);
+
+	return 0;
+}
+KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_count_fops,
+		kbase_jit_debugfs_count_get);
+
+static int kbase_jit_debugfs_vm_get(struct kbase_jit_debugfs_data *data)
+{
+	struct kbase_context *kctx = data->kctx;
+	struct kbase_va_region *reg;
+
+	mutex_lock(&kctx->jit_evict_lock);
+	list_for_each_entry(reg, &kctx->jit_active_head, jit_node) {
+		data->active_value += reg->nr_pages;
+	}
+
+	list_for_each_entry(reg, &kctx->jit_pool_head, jit_node) {
+		data->pool_value += reg->nr_pages;
+	}
+
+	list_for_each_entry(reg, &kctx->jit_destroy_head, jit_node) {
+		data->destroy_value += reg->nr_pages;
+	}
+	mutex_unlock(&kctx->jit_evict_lock);
+
+	return 0;
+}
+KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_vm_fops,
+		kbase_jit_debugfs_vm_get);
+
+static int kbase_jit_debugfs_phys_get(struct kbase_jit_debugfs_data *data)
+{
+	struct kbase_context *kctx = data->kctx;
+	struct kbase_va_region *reg;
+
+	mutex_lock(&kctx->jit_evict_lock);
+	list_for_each_entry(reg, &kctx->jit_active_head, jit_node) {
+		data->active_value += reg->gpu_alloc->nents;
+	}
+
+	list_for_each_entry(reg, &kctx->jit_pool_head, jit_node) {
+		data->pool_value += reg->gpu_alloc->nents;
+	}
+
+	list_for_each_entry(reg, &kctx->jit_destroy_head, jit_node) {
+		data->destroy_value += reg->gpu_alloc->nents;
+	}
+	mutex_unlock(&kctx->jit_evict_lock);
+
+	return 0;
+}
+KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_phys_fops,
+		kbase_jit_debugfs_phys_get);
+
+void kbase_jit_debugfs_init(struct kbase_context *kctx)
+{
+	/* Caller already ensures this, but we keep the pattern for
+	 * maintenance safety.
+	 */
+	if (WARN_ON(!kctx) ||
+		WARN_ON(IS_ERR_OR_NULL(kctx->kctx_dentry)))
+		return;
+
+	/* Debugfs entry for getting the number of JIT allocations. */
+	debugfs_create_file("mem_jit_count", S_IRUGO, kctx->kctx_dentry,
+			kctx, &kbase_jit_debugfs_count_fops);
+
+	/*
+	 * Debugfs entry for getting the total number of virtual pages
+	 * used by JIT allocations.
+	 */
+	debugfs_create_file("mem_jit_vm", S_IRUGO, kctx->kctx_dentry,
+			kctx, &kbase_jit_debugfs_vm_fops);
+
+	/*
+	 * Debugfs entry for getting the number of physical pages used
+	 * by JIT allocations.
+	 */
+	debugfs_create_file("mem_jit_phys", S_IRUGO, kctx->kctx_dentry,
+			kctx, &kbase_jit_debugfs_phys_fops);
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * kbase_jit_destroy_worker - Deferred worker which frees JIT allocations
+ * @work: Work item
+ *
+ * This function does the work of freeing JIT allocations whose physical
+ * backing has been released.
+ */
+static void kbase_jit_destroy_worker(struct work_struct *work)
+{
+	struct kbase_context *kctx;
+	struct kbase_va_region *reg;
+
+	kctx = container_of(work, struct kbase_context, jit_work);
+	do {
+		mutex_lock(&kctx->jit_evict_lock);
+		if (list_empty(&kctx->jit_destroy_head)) {
+			mutex_unlock(&kctx->jit_evict_lock);
+			break;
+		}
+
+		reg = list_first_entry(&kctx->jit_destroy_head,
+				struct kbase_va_region, jit_node);
+
+		list_del(&reg->jit_node);
+		mutex_unlock(&kctx->jit_evict_lock);
+
+		kbase_gpu_vm_lock(kctx);
+		reg->flags &= ~KBASE_REG_NO_USER_FREE;
+		kbase_mem_free_region(kctx, reg);
+		kbase_gpu_vm_unlock(kctx);
+	} while (1);
+}
+
+int kbase_jit_init(struct kbase_context *kctx)
+{
+	mutex_lock(&kctx->jit_evict_lock);
+	INIT_LIST_HEAD(&kctx->jit_active_head);
+	INIT_LIST_HEAD(&kctx->jit_pool_head);
+	INIT_LIST_HEAD(&kctx->jit_destroy_head);
+	INIT_WORK(&kctx->jit_work, kbase_jit_destroy_worker);
+
+	INIT_LIST_HEAD(&kctx->jit_pending_alloc);
+	INIT_LIST_HEAD(&kctx->jit_atoms_head);
+	mutex_unlock(&kctx->jit_evict_lock);
+
+	kctx->jit_max_allocations = 0;
+	kctx->jit_current_allocations = 0;
+	kctx->trim_level = 0;
+
+	return 0;
+}
+
+/* Check if the allocation from JIT pool is of the same size as the new JIT
+ * allocation and also, if BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP is set, meets
+ * the alignment requirements.
+ */
+static bool meet_size_and_tiler_align_top_requirements(struct kbase_context *kctx,
+	struct kbase_va_region *walker, struct base_jit_alloc_info *info)
+{
+	bool meet_reqs = true;
+
+	if (walker->nr_pages != info->va_pages)
+		meet_reqs = false;
+	else if (info->flags & BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP) {
+		size_t align = info->extent;
+		size_t align_mask = align - 1;
+
+		if ((walker->start_pfn + info->commit_pages) & align_mask)
+			meet_reqs = false;
+	}
+
+	return meet_reqs;
+}
+
+static int kbase_jit_grow(struct kbase_context *kctx,
+		struct base_jit_alloc_info *info, struct kbase_va_region *reg)
+{
+	size_t delta;
+	size_t pages_required;
+	size_t old_size;
+	struct kbase_mem_pool *pool;
+	int ret = -ENOMEM;
+	struct tagged_addr *gpu_pages;
+	struct kbase_sub_alloc *prealloc_sas[2] = { NULL, NULL };
+	int i;
+
+	if (info->commit_pages > reg->nr_pages) {
+		/* Attempted to grow larger than maximum size */
+		return -EINVAL;
+	}
+
+	kbase_gpu_vm_lock(kctx);
+
+	/* Make the physical backing no longer reclaimable */
+	if (!kbase_mem_evictable_unmake(reg->gpu_alloc))
+		goto update_failed;
+
+	if (reg->gpu_alloc->nents >= info->commit_pages)
+		goto done;
+
+	/* Grow the backing */
+	old_size = reg->gpu_alloc->nents;
+
+	/* Allocate some more pages */
+	delta = info->commit_pages - reg->gpu_alloc->nents;
+	pages_required = delta;
+
+#ifdef CONFIG_MALI_2MB_ALLOC
+	/* Preallocate memory for the sub-allocation structs */
+	for (i = 0; i != ARRAY_SIZE(prealloc_sas); ++i) {
+		prealloc_sas[i] = kmalloc(sizeof(*prealloc_sas[i]),
+				GFP_KERNEL);
+		if (!prealloc_sas[i])
+			goto update_failed;
+	}
+
+	if (pages_required >= (SZ_2M / SZ_4K)) {
+		pool = &kctx->mem_pools.large[kctx->jit_group_id];
+		/* Round up to number of 2 MB pages required */
+		pages_required += ((SZ_2M / SZ_4K) - 1);
+		pages_required /= (SZ_2M / SZ_4K);
+	} else {
+#endif
+		pool = &kctx->mem_pools.small[kctx->jit_group_id];
+#ifdef CONFIG_MALI_2MB_ALLOC
+	}
+#endif
+
+	if (reg->cpu_alloc != reg->gpu_alloc)
+		pages_required *= 2;
+
+	spin_lock(&kctx->mem_partials_lock);
+	kbase_mem_pool_lock(pool);
+
+	/* As we can not allocate memory from the kernel with the vm_lock held,
+	 * grow the pool to the required size with the lock dropped. We hold the
+	 * pool lock to prevent another thread from allocating from the pool
+	 * between the grow and allocation.
+	 */
+	while (kbase_mem_pool_size(pool) < pages_required) {
+		int pool_delta = pages_required - kbase_mem_pool_size(pool);
+
+		kbase_mem_pool_unlock(pool);
+		spin_unlock(&kctx->mem_partials_lock);
+		kbase_gpu_vm_unlock(kctx);
+
+		if (kbase_mem_pool_grow(pool, pool_delta))
+			goto update_failed_unlocked;
+
+		kbase_gpu_vm_lock(kctx);
+		spin_lock(&kctx->mem_partials_lock);
+		kbase_mem_pool_lock(pool);
+	}
+
+	gpu_pages = kbase_alloc_phy_pages_helper_locked(reg->gpu_alloc, pool,
+			delta, &prealloc_sas[0]);
+	if (!gpu_pages) {
+		kbase_mem_pool_unlock(pool);
+		spin_unlock(&kctx->mem_partials_lock);
+		goto update_failed;
+	}
+
+	if (reg->cpu_alloc != reg->gpu_alloc) {
+		struct tagged_addr *cpu_pages;
+
+		cpu_pages = kbase_alloc_phy_pages_helper_locked(reg->cpu_alloc,
+				pool, delta, &prealloc_sas[1]);
+		if (!cpu_pages) {
+			kbase_free_phy_pages_helper_locked(reg->gpu_alloc,
+					pool, gpu_pages, delta);
+			kbase_mem_pool_unlock(pool);
+			spin_unlock(&kctx->mem_partials_lock);
+			goto update_failed;
+		}
+	}
+	kbase_mem_pool_unlock(pool);
+	spin_unlock(&kctx->mem_partials_lock);
+
+	ret = kbase_mem_grow_gpu_mapping(kctx, reg, info->commit_pages,
+			old_size);
+	/*
+	 * The grow failed so put the allocation back in the
+	 * pool and return failure.
+	 */
+	if (ret)
+		goto update_failed;
+
+done:
+	ret = 0;
+
+	/* Update attributes of JIT allocation taken from the pool */
+	reg->initial_commit = info->commit_pages;
+	reg->extent = info->extent;
+
+update_failed:
+	kbase_gpu_vm_unlock(kctx);
+update_failed_unlocked:
+	for (i = 0; i != ARRAY_SIZE(prealloc_sas); ++i)
+		kfree(prealloc_sas[i]);
+
+	return ret;
+}
+
+static void trace_jit_stats(struct kbase_context *kctx,
+		u32 bin_id, u32 max_allocations)
+{
+	const u32 alloc_count =
+		kctx->jit_current_allocations_per_bin[bin_id];
+	struct kbase_device *kbdev = kctx->kbdev;
+
+	struct kbase_va_region *walker;
+	u32 va_pages = 0;
+	u32 ph_pages = 0;
+
+	mutex_lock(&kctx->jit_evict_lock);
+	list_for_each_entry(walker, &kctx->jit_active_head, jit_node) {
+		if (walker->jit_bin_id != bin_id)
+			continue;
+
+		va_pages += walker->nr_pages;
+		ph_pages += walker->gpu_alloc->nents;
+	}
+	mutex_unlock(&kctx->jit_evict_lock);
+
+	KBASE_TLSTREAM_AUX_JIT_STATS(kbdev, kctx->id, bin_id,
+		max_allocations, alloc_count, va_pages, ph_pages);
+}
+
+struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
+		struct base_jit_alloc_info *info)
+{
+	struct kbase_va_region *reg = NULL;
+
+	if (kctx->jit_current_allocations >= kctx->jit_max_allocations) {
+		/* Too many current allocations */
+		dev_dbg(kctx->kbdev->dev,
+			"Max JIT allocations limit reached: active allocations %d, max allocations %d\n",
+			kctx->jit_current_allocations,
+			kctx->jit_max_allocations);
+		return NULL;
+	}
+	if (info->max_allocations > 0 &&
+			kctx->jit_current_allocations_per_bin[info->bin_id] >=
+			info->max_allocations) {
+		/* Too many current allocations in this bin */
+		dev_dbg(kctx->kbdev->dev,
+			"Per bin limit of max JIT allocations reached: bin_id %d, active allocations %d, max allocations %d\n",
+			info->bin_id,
+			kctx->jit_current_allocations_per_bin[info->bin_id],
+			info->max_allocations);
+		return NULL;
+	}
+
+	mutex_lock(&kctx->jit_evict_lock);
+
+	/*
+	 * Scan the pool for an existing allocation which meets our
+	 * requirements and remove it.
+	 */
+	if (info->usage_id != 0) {
+		/* First scan for an allocation with the same usage ID */
+		struct kbase_va_region *walker;
+		size_t current_diff = SIZE_MAX;
+
+		list_for_each_entry(walker, &kctx->jit_pool_head, jit_node) {
+
+			if (walker->jit_usage_id == info->usage_id &&
+					walker->jit_bin_id == info->bin_id &&
+					meet_size_and_tiler_align_top_requirements(
+							kctx, walker, info)) {
+				size_t min_size, max_size, diff;
+
+				/*
+				 * The JIT allocations VA requirements have been
+				 * met, it's suitable but other allocations
+				 * might be a better fit.
+				 */
+				min_size = min_t(size_t,
+						walker->gpu_alloc->nents,
+						info->commit_pages);
+				max_size = max_t(size_t,
+						walker->gpu_alloc->nents,
+						info->commit_pages);
+				diff = max_size - min_size;
+
+				if (current_diff > diff) {
+					current_diff = diff;
+					reg = walker;
+				}
+
+				/* The allocation is an exact match */
+				if (current_diff == 0)
+					break;
+			}
+		}
+	}
+
+	if (!reg) {
+		/* No allocation with the same usage ID, or usage IDs not in
+		 * use. Search for an allocation we can reuse.
+		 */
+		struct kbase_va_region *walker;
+		size_t current_diff = SIZE_MAX;
+
+		list_for_each_entry(walker, &kctx->jit_pool_head, jit_node) {
+
+			if (walker->jit_bin_id == info->bin_id &&
+					meet_size_and_tiler_align_top_requirements(
+							kctx, walker, info)) {
+				size_t min_size, max_size, diff;
+
+				/*
+				 * The JIT allocations VA requirements have been
+				 * met, it's suitable but other allocations
+				 * might be a better fit.
+				 */
+				min_size = min_t(size_t,
+						walker->gpu_alloc->nents,
+						info->commit_pages);
+				max_size = max_t(size_t,
+						walker->gpu_alloc->nents,
+						info->commit_pages);
+				diff = max_size - min_size;
+
+				if (current_diff > diff) {
+					current_diff = diff;
+					reg = walker;
+				}
+
+				/* The allocation is an exact match, so stop
+				 * looking.
+				 */
+				if (current_diff == 0)
+					break;
+			}
+		}
+	}
+
+	if (reg) {
+		/*
+		 * Remove the found region from the pool and add it to the
+		 * active list.
+		 */
+		list_move(&reg->jit_node, &kctx->jit_active_head);
+
+		/*
+		 * Remove the allocation from the eviction list as it's no
+		 * longer eligible for eviction. This must be done before
+		 * dropping the jit_evict_lock
+		 */
+		list_del_init(&reg->gpu_alloc->evict_node);
+		mutex_unlock(&kctx->jit_evict_lock);
+
+		if (kbase_jit_grow(kctx, info, reg) < 0) {
+			/*
+			 * An update to an allocation from the pool failed,
+			 * chances are slim a new allocation would fair any
+			 * better so return the allocation to the pool and
+			 * return the function with failure.
+			 */
+			dev_dbg(kctx->kbdev->dev,
+				"JIT allocation resize failed: va_pages 0x%llx, commit_pages 0x%llx\n",
+				info->va_pages, info->commit_pages);
+			goto update_failed_unlocked;
+		}
+	} else {
+		/* No suitable JIT allocation was found so create a new one */
+		u64 flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD |
+				BASE_MEM_PROT_GPU_WR | BASE_MEM_GROW_ON_GPF |
+				BASE_MEM_COHERENT_LOCAL |
+				BASEP_MEM_NO_USER_FREE;
+		u64 gpu_addr;
+
+		mutex_unlock(&kctx->jit_evict_lock);
+
+		if (info->flags & BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP)
+			flags |= BASE_MEM_TILER_ALIGN_TOP;
+
+		flags |= base_mem_group_id_set(kctx->jit_group_id);
+
+		reg = kbase_mem_alloc(kctx, info->va_pages, info->commit_pages,
+				info->extent, &flags, &gpu_addr);
+		if (!reg) {
+			/* Most likely not enough GPU virtual space left for
+			 * the new JIT allocation.
+			 */
+			dev_dbg(kctx->kbdev->dev,
+				"Failed to allocate JIT memory: va_pages 0x%llx, commit_pages 0x%llx\n",
+				info->va_pages, info->commit_pages);
+			goto out_unlocked;
+		}
+
+		mutex_lock(&kctx->jit_evict_lock);
+		list_add(&reg->jit_node, &kctx->jit_active_head);
+		mutex_unlock(&kctx->jit_evict_lock);
+	}
+
+	kctx->jit_current_allocations++;
+	kctx->jit_current_allocations_per_bin[info->bin_id]++;
+
+	trace_jit_stats(kctx, info->bin_id, info->max_allocations);
+
+	reg->jit_usage_id = info->usage_id;
+	reg->jit_bin_id = info->bin_id;
+
+	return reg;
+
+update_failed_unlocked:
+	mutex_lock(&kctx->jit_evict_lock);
+	list_move(&reg->jit_node, &kctx->jit_pool_head);
+	mutex_unlock(&kctx->jit_evict_lock);
+out_unlocked:
+	return NULL;
+}
+
+void kbase_jit_free(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+	u64 old_pages;
+
+	/* Get current size of JIT region */
+	old_pages = kbase_reg_current_backed_size(reg);
+	if (reg->initial_commit < old_pages) {
+		/* Free trim_level % of region, but don't go below initial
+		 * commit size
+		 */
+		u64 new_size = MAX(reg->initial_commit,
+			div_u64(old_pages * (100 - kctx->trim_level), 100));
+		u64 delta = old_pages - new_size;
+
+		if (delta) {
+			kbase_mem_shrink_cpu_mapping(kctx, reg, old_pages-delta,
+					old_pages);
+			kbase_mem_shrink_gpu_mapping(kctx, reg, old_pages-delta,
+					old_pages);
+
+			kbase_free_phy_pages_helper(reg->cpu_alloc, delta);
+			if (reg->cpu_alloc != reg->gpu_alloc)
+				kbase_free_phy_pages_helper(reg->gpu_alloc,
+						delta);
+		}
+	}
+
+	kctx->jit_current_allocations--;
+	kctx->jit_current_allocations_per_bin[reg->jit_bin_id]--;
+
+	trace_jit_stats(kctx, reg->jit_bin_id, UINT_MAX);
+
+	kbase_mem_evictable_mark_reclaim(reg->gpu_alloc);
+
+	kbase_gpu_vm_lock(kctx);
+	reg->flags |= KBASE_REG_DONT_NEED;
+	kbase_mem_shrink_cpu_mapping(kctx, reg, 0, reg->gpu_alloc->nents);
+	kbase_gpu_vm_unlock(kctx);
+
+	/*
+	 * Add the allocation to the eviction list and the jit pool, after this
+	 * point the shrink can reclaim it, or it may be reused.
+	 */
+	mutex_lock(&kctx->jit_evict_lock);
+
+	/* This allocation can't already be on a list. */
+	WARN_ON(!list_empty(&reg->gpu_alloc->evict_node));
+	list_add(&reg->gpu_alloc->evict_node, &kctx->evict_list);
+
+	list_move(&reg->jit_node, &kctx->jit_pool_head);
+
+	mutex_unlock(&kctx->jit_evict_lock);
+}
+
+void kbase_jit_backing_lost(struct kbase_va_region *reg)
+{
+	struct kbase_context *kctx = kbase_reg_flags_to_kctx(reg);
+
+	if (WARN_ON(!kctx))
+		return;
+
+	lockdep_assert_held(&kctx->jit_evict_lock);
+
+	/*
+	 * JIT allocations will always be on a list, if the region
+	 * is not on a list then it's not a JIT allocation.
+	 */
+	if (list_empty(&reg->jit_node))
+		return;
+
+	/*
+	 * Freeing the allocation requires locks we might not be able
+	 * to take now, so move the allocation to the free list and kick
+	 * the worker which will do the freeing.
+	 */
+	list_move(&reg->jit_node, &kctx->jit_destroy_head);
+
+	schedule_work(&kctx->jit_work);
+}
+
+bool kbase_jit_evict(struct kbase_context *kctx)
+{
+	struct kbase_va_region *reg = NULL;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	/* Free the oldest allocation from the pool */
+	mutex_lock(&kctx->jit_evict_lock);
+	if (!list_empty(&kctx->jit_pool_head)) {
+		reg = list_entry(kctx->jit_pool_head.prev,
+				struct kbase_va_region, jit_node);
+		list_del(&reg->jit_node);
+		list_del_init(&reg->gpu_alloc->evict_node);
+	}
+	mutex_unlock(&kctx->jit_evict_lock);
+
+	if (reg) {
+		reg->flags &= ~KBASE_REG_NO_USER_FREE;
+		kbase_mem_free_region(kctx, reg);
+	}
+
+	return (reg != NULL);
+}
+
+void kbase_jit_term(struct kbase_context *kctx)
+{
+	struct kbase_va_region *walker;
+
+	/* Free all allocations for this context */
+
+	kbase_gpu_vm_lock(kctx);
+	mutex_lock(&kctx->jit_evict_lock);
+	/* Free all allocations from the pool */
+	while (!list_empty(&kctx->jit_pool_head)) {
+		walker = list_first_entry(&kctx->jit_pool_head,
+				struct kbase_va_region, jit_node);
+		list_del(&walker->jit_node);
+		list_del_init(&walker->gpu_alloc->evict_node);
+		mutex_unlock(&kctx->jit_evict_lock);
+		walker->flags &= ~KBASE_REG_NO_USER_FREE;
+		kbase_mem_free_region(kctx, walker);
+		mutex_lock(&kctx->jit_evict_lock);
+	}
+
+	/* Free all allocations from active list */
+	while (!list_empty(&kctx->jit_active_head)) {
+		walker = list_first_entry(&kctx->jit_active_head,
+				struct kbase_va_region, jit_node);
+		list_del(&walker->jit_node);
+		list_del_init(&walker->gpu_alloc->evict_node);
+		mutex_unlock(&kctx->jit_evict_lock);
+		walker->flags &= ~KBASE_REG_NO_USER_FREE;
+		kbase_mem_free_region(kctx, walker);
+		mutex_lock(&kctx->jit_evict_lock);
+	}
+	mutex_unlock(&kctx->jit_evict_lock);
+	kbase_gpu_vm_unlock(kctx);
+
+	/*
+	 * Flush the freeing of allocations whose backing has been freed
+	 * (i.e. everything in jit_destroy_head).
+	 */
+	cancel_work_sync(&kctx->jit_work);
+}
+
+bool kbase_has_exec_va_zone(struct kbase_context *kctx)
+{
+	bool has_exec_va_zone;
+
+	kbase_gpu_vm_lock(kctx);
+	has_exec_va_zone = (kctx->exec_va_start != U64_MAX);
+	kbase_gpu_vm_unlock(kctx);
+
+	return has_exec_va_zone;
+}
+
+
+int kbase_jd_user_buf_pin_pages(struct kbase_context *kctx,
+		struct kbase_va_region *reg)
+{
+	struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
+	struct page **pages = alloc->imported.user_buf.pages;
+	unsigned long address = alloc->imported.user_buf.address;
+	struct mm_struct *mm = alloc->imported.user_buf.mm;
+	long pinned_pages;
+	long i;
+
+	if (WARN_ON(alloc->type != KBASE_MEM_TYPE_IMPORTED_USER_BUF))
+		return -EINVAL;
+
+	if (alloc->nents) {
+		if (WARN_ON(alloc->nents != alloc->imported.user_buf.nr_pages))
+			return -EINVAL;
+		else
+			return 0;
+	}
+
+	if (WARN_ON(reg->gpu_alloc->imported.user_buf.mm != current->mm))
+		return -EINVAL;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+	pinned_pages = get_user_pages(NULL, mm,
+			address,
+			alloc->imported.user_buf.nr_pages,
+#if KERNEL_VERSION(4, 4, 168) <= LINUX_VERSION_CODE && \
+KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE
+			reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
+			pages, NULL);
+#else
+			reg->flags & KBASE_REG_GPU_WR,
+			0, pages, NULL);
+#endif
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+	pinned_pages = get_user_pages_remote(NULL, mm,
+			address,
+			alloc->imported.user_buf.nr_pages,
+			reg->flags & KBASE_REG_GPU_WR,
+			0, pages, NULL);
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+	pinned_pages = get_user_pages_remote(NULL, mm,
+			address,
+			alloc->imported.user_buf.nr_pages,
+			reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
+			pages, NULL);
+#else
+	pinned_pages = get_user_pages_remote(NULL, mm,
+			address,
+			alloc->imported.user_buf.nr_pages,
+			reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
+			pages, NULL, NULL);
+#endif
+
+	if (pinned_pages <= 0)
+		return pinned_pages;
+
+	if (pinned_pages != alloc->imported.user_buf.nr_pages) {
+		for (i = 0; i < pinned_pages; i++)
+			put_page(pages[i]);
+		return -ENOMEM;
+	}
+
+	alloc->nents = pinned_pages;
+
+	return 0;
+}
+
+static int kbase_jd_user_buf_map(struct kbase_context *kctx,
+		struct kbase_va_region *reg)
+{
+	long pinned_pages;
+	struct kbase_mem_phy_alloc *alloc;
+	struct page **pages;
+	struct tagged_addr *pa;
+	long i;
+	unsigned long address;
+	struct device *dev;
+	unsigned long offset;
+	unsigned long local_size;
+	unsigned long gwt_mask = ~0;
+	int err = kbase_jd_user_buf_pin_pages(kctx, reg);
+
+	if (err)
+		return err;
+
+	alloc = reg->gpu_alloc;
+	pa = kbase_get_gpu_phy_pages(reg);
+	address = alloc->imported.user_buf.address;
+	pinned_pages = alloc->nents;
+	pages = alloc->imported.user_buf.pages;
+	dev = kctx->kbdev->dev;
+	offset = address & ~PAGE_MASK;
+	local_size = alloc->imported.user_buf.size;
+
+	for (i = 0; i < pinned_pages; i++) {
+		dma_addr_t dma_addr;
+		unsigned long min;
+
+		min = MIN(PAGE_SIZE - offset, local_size);
+		dma_addr = dma_map_page(dev, pages[i],
+				offset, min,
+				DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(dev, dma_addr))
+			goto unwind;
+
+		alloc->imported.user_buf.dma_addrs[i] = dma_addr;
+		pa[i] = as_tagged(page_to_phys(pages[i]));
+
+		local_size -= min;
+		offset = 0;
+	}
+
+#ifdef CONFIG_MALI_CINSTR_GWT
+	if (kctx->gwt_enabled)
+		gwt_mask = ~KBASE_REG_GPU_WR;
+#endif
+
+	err = kbase_mmu_insert_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn,
+			pa, kbase_reg_current_backed_size(reg),
+			reg->flags & gwt_mask, kctx->as_nr,
+			alloc->group_id);
+	if (err == 0)
+		return 0;
+
+	/* fall down */
+unwind:
+	alloc->nents = 0;
+	while (i--) {
+		dma_unmap_page(kctx->kbdev->dev,
+				alloc->imported.user_buf.dma_addrs[i],
+				PAGE_SIZE, DMA_BIDIRECTIONAL);
+	}
+
+	while (++i < pinned_pages) {
+		put_page(pages[i]);
+		pages[i] = NULL;
+	}
+
+	return err;
+}
+
+/* This function would also perform the work of unpinning pages on Job Manager
+ * GPUs, which implies that a call to kbase_jd_user_buf_pin_pages() will NOT
+ * have a corresponding call to kbase_jd_user_buf_unpin_pages().
+ */
+static void kbase_jd_user_buf_unmap(struct kbase_context *kctx,
+		struct kbase_mem_phy_alloc *alloc, bool writeable)
+{
+	long i;
+	struct page **pages;
+	unsigned long size = alloc->imported.user_buf.size;
+
+	KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF);
+	pages = alloc->imported.user_buf.pages;
+	for (i = 0; i < alloc->imported.user_buf.nr_pages; i++) {
+		unsigned long local_size;
+		dma_addr_t dma_addr = alloc->imported.user_buf.dma_addrs[i];
+
+		local_size = MIN(size, PAGE_SIZE - (dma_addr & ~PAGE_MASK));
+		dma_unmap_page(kctx->kbdev->dev, dma_addr, local_size,
+				DMA_BIDIRECTIONAL);
+		if (writeable)
+			set_page_dirty_lock(pages[i]);
+		put_page(pages[i]);
+		pages[i] = NULL;
+
+		size -= local_size;
+	}
+	alloc->nents = 0;
+}
+
+struct kbase_mem_phy_alloc *kbase_map_external_resource(
+		struct kbase_context *kctx, struct kbase_va_region *reg,
+		struct mm_struct *locked_mm)
+{
+	int err;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	/* decide what needs to happen for this resource */
+	switch (reg->gpu_alloc->type) {
+	case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
+		if ((reg->gpu_alloc->imported.user_buf.mm != locked_mm) &&
+		    (!reg->gpu_alloc->nents))
+			goto exit;
+
+		reg->gpu_alloc->imported.user_buf.current_mapping_usage_count++;
+		if (1 == reg->gpu_alloc->imported.user_buf.current_mapping_usage_count) {
+			err = kbase_jd_user_buf_map(kctx, reg);
+			if (err) {
+				reg->gpu_alloc->imported.user_buf.current_mapping_usage_count--;
+				goto exit;
+			}
+		}
+	}
+	break;
+	case KBASE_MEM_TYPE_IMPORTED_UMM: {
+		err = kbase_mem_umm_map(kctx, reg);
+		if (err)
+			goto exit;
+		break;
+	}
+	default:
+		goto exit;
+	}
+
+	return kbase_mem_phy_alloc_get(reg->gpu_alloc);
+exit:
+	return NULL;
+}
+
+void kbase_unmap_external_resource(struct kbase_context *kctx,
+		struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc)
+{
+	switch (alloc->type) {
+	case KBASE_MEM_TYPE_IMPORTED_UMM: {
+		kbase_mem_umm_unmap(kctx, reg, alloc);
+	}
+	break;
+	case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
+		alloc->imported.user_buf.current_mapping_usage_count--;
+
+		if (0 == alloc->imported.user_buf.current_mapping_usage_count) {
+			bool writeable = true;
+
+			if (!kbase_is_region_invalid_or_free(reg) &&
+					reg->gpu_alloc == alloc)
+				kbase_mmu_teardown_pages(
+						kctx->kbdev,
+						&kctx->mmu,
+						reg->start_pfn,
+						kbase_reg_current_backed_size(reg),
+						kctx->as_nr);
+
+			if (reg && ((reg->flags & KBASE_REG_GPU_WR) == 0))
+				writeable = false;
+
+			kbase_jd_user_buf_unmap(kctx, alloc, writeable);
+		}
+	}
+	break;
+	default:
+	break;
+	}
+	kbase_mem_phy_alloc_put(alloc);
+}
+
+struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire(
+		struct kbase_context *kctx, u64 gpu_addr)
+{
+	struct kbase_ctx_ext_res_meta *meta = NULL;
+	struct kbase_ctx_ext_res_meta *walker;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	/*
+	 * Walk the per context external resource metadata list for the
+	 * metadata which matches the region which is being acquired.
+	 */
+	list_for_each_entry(walker, &kctx->ext_res_meta_head, ext_res_node) {
+		if (walker->gpu_addr == gpu_addr) {
+			meta = walker;
+			break;
+		}
+	}
+
+	/* No metadata exists so create one. */
+	if (!meta) {
+		struct kbase_va_region *reg;
+
+		/* Find the region */
+		reg = kbase_region_tracker_find_region_enclosing_address(
+				kctx, gpu_addr);
+		if (kbase_is_region_invalid_or_free(reg))
+			goto failed;
+
+		/* Allocate the metadata object */
+		meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+		if (!meta)
+			goto failed;
+
+		/*
+		 * Fill in the metadata object and acquire a reference
+		 * for the physical resource.
+		 */
+		meta->alloc = kbase_map_external_resource(kctx, reg, NULL);
+
+		if (!meta->alloc)
+			goto fail_map;
+
+		meta->gpu_addr = reg->start_pfn << PAGE_SHIFT;
+
+		list_add(&meta->ext_res_node, &kctx->ext_res_meta_head);
+	}
+
+	return meta;
+
+fail_map:
+	kfree(meta);
+failed:
+	return NULL;
+}
+
+bool kbase_sticky_resource_release(struct kbase_context *kctx,
+		struct kbase_ctx_ext_res_meta *meta, u64 gpu_addr)
+{
+	struct kbase_ctx_ext_res_meta *walker;
+	struct kbase_va_region *reg;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	/* Search of the metadata if one isn't provided. */
+	if (!meta) {
+		/*
+		 * Walk the per context external resource metadata list for the
+		 * metadata which matches the region which is being released.
+		 */
+		list_for_each_entry(walker, &kctx->ext_res_meta_head,
+				ext_res_node) {
+			if (walker->gpu_addr == gpu_addr) {
+				meta = walker;
+				break;
+			}
+		}
+	}
+
+	/* No metadata so just return. */
+	if (!meta)
+		return false;
+
+	/* Drop the physical memory reference and free the metadata. */
+	reg = kbase_region_tracker_find_region_enclosing_address(
+			kctx,
+			meta->gpu_addr);
+
+	kbase_unmap_external_resource(kctx, reg, meta->alloc);
+	list_del(&meta->ext_res_node);
+	kfree(meta);
+
+	return true;
+}
+
+int kbase_sticky_resource_init(struct kbase_context *kctx)
+{
+	INIT_LIST_HEAD(&kctx->ext_res_meta_head);
+
+	return 0;
+}
+
+void kbase_sticky_resource_term(struct kbase_context *kctx)
+{
+	struct kbase_ctx_ext_res_meta *walker;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	/*
+	 * Free any sticky resources which haven't been unmapped.
+	 *
+	 * Note:
+	 * We don't care about refcounts at this point as no future
+	 * references to the meta data will be made.
+	 * Region termination would find these if we didn't free them
+	 * here, but it's more efficient if we do the clean up here.
+	 */
+	while (!list_empty(&kctx->ext_res_meta_head)) {
+		walker = list_first_entry(&kctx->ext_res_meta_head,
+				struct kbase_ctx_ext_res_meta, ext_res_node);
+
+		kbase_sticky_resource_release(kctx, walker, 0);
+	}
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem.h b/drivers/gpu/arm/midgard/mali_kbase_mem.h
new file mode 100644
index 0000000..bebf55f
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem.h
@@ -0,0 +1,1675 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_mem.h
+ * Base kernel memory APIs
+ */
+
+#ifndef _KBASE_MEM_H_
+#define _KBASE_MEM_H_
+
+#ifndef _KBASE_H_
+#error "Don't include this file directly, use mali_kbase.h instead"
+#endif
+
+#include <linux/kref.h>
+#include "mali_base_kernel.h"
+#include <mali_kbase_hw.h>
+#include "mali_kbase_pm.h"
+#include "mali_kbase_defs.h"
+/* Required for kbase_mem_evictable_unmake */
+#include "mali_kbase_mem_linux.h"
+
+static inline void kbase_process_page_usage_inc(struct kbase_context *kctx,
+		int pages);
+
+/* Part of the workaround for uTLB invalid pages is to ensure we grow/shrink tmem by 4 pages at a time */
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316 (2)	/* round to 4 pages */
+
+/* Part of the workaround for PRLAM-9630 requires us to grow/shrink memory by 8 pages.
+The MMU reads in 8 page table entries from memory at a time, if we have more than one page fault within the same 8 pages and
+page tables are updated accordingly, the MMU does not re-read the page table entries from memory for the subsequent page table
+updates and generates duplicate page faults as the page table information used by the MMU is not valid.   */
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630 (3)	/* round to 8 pages */
+
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2 (0)	/* round to 1 page */
+
+/* This must always be a power of 2 */
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2)
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_8316 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316)
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_9630 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630)
+/**
+ * A CPU mapping
+ */
+struct kbase_cpu_mapping {
+	struct   list_head mappings_list;
+	struct   kbase_mem_phy_alloc *alloc;
+	struct   kbase_context *kctx;
+	struct   kbase_va_region *region;
+	int      count;
+	int      free_on_close;
+};
+
+enum kbase_memory_type {
+	KBASE_MEM_TYPE_NATIVE,
+	KBASE_MEM_TYPE_IMPORTED_UMM,
+	KBASE_MEM_TYPE_IMPORTED_USER_BUF,
+	KBASE_MEM_TYPE_ALIAS,
+	KBASE_MEM_TYPE_RAW
+};
+
+/* internal structure, mirroring base_mem_aliasing_info,
+ * but with alloc instead of a gpu va (handle) */
+struct kbase_aliased {
+	struct kbase_mem_phy_alloc *alloc; /* NULL for special, non-NULL for native */
+	u64 offset; /* in pages */
+	u64 length; /* in pages */
+};
+
+/**
+ * @brief Physical pages tracking object properties
+  */
+#define KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED  (1u << 0)
+#define KBASE_MEM_PHY_ALLOC_LARGE            (1u << 1)
+
+/* struct kbase_mem_phy_alloc - Physical pages tracking object.
+ *
+ * Set up to track N pages.
+ * N not stored here, the creator holds that info.
+ * This object only tracks how many elements are actually valid (present).
+ * Changing of nents or *pages should only happen if the kbase_mem_phy_alloc
+ * is not shared with another region or client. CPU mappings are OK to
+ * exist when changing, as long as the tracked mappings objects are
+ * updated as part of the change.
+ *
+ * @kref: number of users of this alloc
+ * @gpu_mappings: count number of times mapped on the GPU
+ * @nents: 0..N
+ * @pages: N elements, only 0..nents are valid
+ * @mappings: List of CPU mappings of this physical memory allocation.
+ * @evict_node: Node used to store this allocation on the eviction list
+ * @evicted: Physical backing size when the pages where evicted
+ * @reg: Back reference to the region structure which created this
+ *       allocation, or NULL if it has been freed.
+ * @type: type of buffer
+ * @permanent_map: Kernel side mapping of the alloc, shall never be
+ *                 referred directly. kbase_phy_alloc_mapping_get() &
+ *                 kbase_phy_alloc_mapping_put() pair should be used
+ *                 around access to the kernel-side CPU mapping so that
+ *                 mapping doesn't disappear whilst it is being accessed.
+ * @properties: Bitmask of properties, e.g. KBASE_MEM_PHY_ALLOC_LARGE.
+ * @group_id: A memory group ID to be passed to a platform-specific
+ *            memory group manager, if present.
+ *            Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
+ * @imported: member in union valid based on @a type
+ */
+struct kbase_mem_phy_alloc {
+	struct kref           kref;
+	atomic_t              gpu_mappings;
+	size_t                nents;
+	struct tagged_addr    *pages;
+	struct list_head      mappings;
+	struct list_head      evict_node;
+	size_t                evicted;
+	struct kbase_va_region *reg;
+	enum kbase_memory_type type;
+	struct kbase_vmap_struct *permanent_map;
+	u8 properties;
+	u8 group_id;
+
+	union {
+		struct {
+			struct dma_buf *dma_buf;
+			struct dma_buf_attachment *dma_attachment;
+			unsigned int current_mapping_usage_count;
+			struct sg_table *sgt;
+		} umm;
+		struct {
+			u64 stride;
+			size_t nents;
+			struct kbase_aliased *aliased;
+		} alias;
+		struct {
+			struct kbase_context *kctx;
+			/* Number of pages in this structure, including *pages.
+			 * Used for kernel memory tracking.
+			 */
+			size_t nr_struct_pages;
+		} native;
+		struct kbase_alloc_import_user_buf {
+			unsigned long address;
+			unsigned long size;
+			unsigned long nr_pages;
+			struct page **pages;
+			/* top bit (1<<31) of current_mapping_usage_count
+			 * specifies that this import was pinned on import
+			 * See PINNED_ON_IMPORT
+			 */
+			u32 current_mapping_usage_count;
+			struct mm_struct *mm;
+			dma_addr_t *dma_addrs;
+		} user_buf;
+	} imported;
+};
+
+/* The top bit of kbase_alloc_import_user_buf::current_mapping_usage_count is
+ * used to signify that a buffer was pinned when it was imported. Since the
+ * reference count is limited by the number of atoms that can be submitted at
+ * once there should be no danger of overflowing into this bit.
+ * Stealing the top bit also has the benefit that
+ * current_mapping_usage_count != 0 if and only if the buffer is mapped.
+ */
+#define PINNED_ON_IMPORT	(1<<31)
+
+static inline void kbase_mem_phy_alloc_gpu_mapped(struct kbase_mem_phy_alloc *alloc)
+{
+	KBASE_DEBUG_ASSERT(alloc);
+	/* we only track mappings of NATIVE buffers */
+	if (alloc->type == KBASE_MEM_TYPE_NATIVE)
+		atomic_inc(&alloc->gpu_mappings);
+}
+
+static inline void kbase_mem_phy_alloc_gpu_unmapped(struct kbase_mem_phy_alloc *alloc)
+{
+	KBASE_DEBUG_ASSERT(alloc);
+	/* we only track mappings of NATIVE buffers */
+	if (alloc->type == KBASE_MEM_TYPE_NATIVE)
+		if (0 > atomic_dec_return(&alloc->gpu_mappings)) {
+			pr_err("Mismatched %s:\n", __func__);
+			dump_stack();
+		}
+}
+
+/**
+ * kbase_mem_is_imported - Indicate whether a memory type is imported
+ *
+ * @type: the memory type
+ *
+ * Return: true if the memory type is imported, false otherwise
+ */
+static inline bool kbase_mem_is_imported(enum kbase_memory_type type)
+{
+	return (type == KBASE_MEM_TYPE_IMPORTED_UMM) ||
+		(type == KBASE_MEM_TYPE_IMPORTED_USER_BUF);
+}
+
+void kbase_mem_kref_free(struct kref *kref);
+
+int kbase_mem_init(struct kbase_device *kbdev);
+void kbase_mem_halt(struct kbase_device *kbdev);
+void kbase_mem_term(struct kbase_device *kbdev);
+
+static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_get(struct kbase_mem_phy_alloc *alloc)
+{
+	kref_get(&alloc->kref);
+	return alloc;
+}
+
+static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_put(struct kbase_mem_phy_alloc *alloc)
+{
+	kref_put(&alloc->kref, kbase_mem_kref_free);
+	return NULL;
+}
+
+/**
+ * A GPU memory region, and attributes for CPU mappings.
+ */
+struct kbase_va_region {
+	struct rb_node rblink;
+	struct list_head link;
+
+	struct rb_root *rbtree;	/* Backlink to rb tree */
+
+	u64 start_pfn;		/* The PFN in GPU space */
+	size_t nr_pages;
+	/* Initial commit, for aligning the start address and correctly growing
+	 * KBASE_REG_TILER_ALIGN_TOP regions */
+	size_t initial_commit;
+
+/* Free region */
+#define KBASE_REG_FREE              (1ul << 0)
+/* CPU write access */
+#define KBASE_REG_CPU_WR            (1ul << 1)
+/* GPU write access */
+#define KBASE_REG_GPU_WR            (1ul << 2)
+/* No eXecute flag */
+#define KBASE_REG_GPU_NX            (1ul << 3)
+/* Is CPU cached? */
+#define KBASE_REG_CPU_CACHED        (1ul << 4)
+/* Is GPU cached?
+ * Some components within the GPU might only be able to access memory that is
+ * GPU cacheable. Refer to the specific GPU implementation for more details.
+ */
+#define KBASE_REG_GPU_CACHED        (1ul << 5)
+
+#define KBASE_REG_GROWABLE          (1ul << 6)
+/* Can grow on pf? */
+#define KBASE_REG_PF_GROW           (1ul << 7)
+
+/* Allocation doesn't straddle the 4GB boundary in GPU virtual space */
+#define KBASE_REG_GPU_VA_SAME_4GB_PAGE (1ul << 8)
+
+/* inner shareable coherency */
+#define KBASE_REG_SHARE_IN          (1ul << 9)
+/* inner & outer shareable coherency */
+#define KBASE_REG_SHARE_BOTH        (1ul << 10)
+
+/* Space for 4 different zones */
+#define KBASE_REG_ZONE_MASK         (3ul << 11)
+#define KBASE_REG_ZONE(x)           (((x) & 3) << 11)
+
+/* GPU read access */
+#define KBASE_REG_GPU_RD            (1ul<<13)
+/* CPU read access */
+#define KBASE_REG_CPU_RD            (1ul<<14)
+
+/* Index of chosen MEMATTR for this region (0..7) */
+#define KBASE_REG_MEMATTR_MASK      (7ul << 16)
+#define KBASE_REG_MEMATTR_INDEX(x)  (((x) & 7) << 16)
+#define KBASE_REG_MEMATTR_VALUE(x)  (((x) & KBASE_REG_MEMATTR_MASK) >> 16)
+
+#define KBASE_REG_PROTECTED         (1ul << 19)
+
+#define KBASE_REG_DONT_NEED         (1ul << 20)
+
+/* Imported buffer is padded? */
+#define KBASE_REG_IMPORT_PAD        (1ul << 21)
+
+/* Bit 22 is reserved.
+ *
+ * Do not remove, use the next unreserved bit for new flags */
+#define KBASE_REG_RESERVED_BIT_22   (1ul << 22)
+
+/* The top of the initial commit is aligned to extent pages.
+ * Extent must be a power of 2 */
+#define KBASE_REG_TILER_ALIGN_TOP   (1ul << 23)
+
+/* Whilst this flag is set the GPU allocation is not supposed to be freed by
+ * user space. The flag will remain set for the lifetime of JIT allocations.
+ */
+#define KBASE_REG_NO_USER_FREE      (1ul << 24)
+
+/* Memory has permanent kernel side mapping */
+#define KBASE_REG_PERMANENT_KERNEL_MAPPING (1ul << 25)
+
+/* GPU VA region has been freed by the userspace, but still remains allocated
+ * due to the reference held by CPU mappings created on the GPU VA region.
+ *
+ * A region with this flag set has had kbase_gpu_munmap() called on it, but can
+ * still be looked-up in the region tracker as a non-free region. Hence must
+ * not create or update any more GPU mappings on such regions because they will
+ * not be unmapped when the region is finally destroyed.
+ *
+ * Since such regions are still present in the region tracker, new allocations
+ * attempted with BASE_MEM_SAME_VA might fail if their address intersects with
+ * a region with this flag set.
+ *
+ * In addition, this flag indicates the gpu_alloc member might no longer valid
+ * e.g. in infinite cache simulation.
+ */
+#define KBASE_REG_VA_FREED (1ul << 26)
+
+#define KBASE_REG_ZONE_SAME_VA      KBASE_REG_ZONE(0)
+
+/* only used with 32-bit clients */
+/*
+ * On a 32bit platform, custom VA should be wired from 4GB
+ * to the VA limit of the GPU. Unfortunately, the Linux mmap() interface
+ * limits us to 2^32 pages (2^44 bytes, see mmap64 man page for reference).
+ * So we put the default limit to the maximum possible on Linux and shrink
+ * it down, if required by the GPU, during initialization.
+ */
+
+#define KBASE_REG_ZONE_CUSTOM_VA         KBASE_REG_ZONE(1)
+#define KBASE_REG_ZONE_CUSTOM_VA_BASE    (0x100000000ULL >> PAGE_SHIFT)
+#define KBASE_REG_ZONE_CUSTOM_VA_SIZE    (((1ULL << 44) >> PAGE_SHIFT) - KBASE_REG_ZONE_CUSTOM_VA_BASE)
+/* end 32-bit clients only */
+
+/* The starting address and size of the GPU-executable zone are dynamic
+ * and depend on the platform and the number of pages requested by the
+ * user process, with an upper limit of 4 GB.
+ */
+#define KBASE_REG_ZONE_EXEC_VA           KBASE_REG_ZONE(2)
+#define KBASE_REG_ZONE_EXEC_VA_MAX_PAGES ((1ULL << 32) >> PAGE_SHIFT) /* 4 GB */
+
+
+	unsigned long flags;
+
+	size_t extent; /* nr of pages alloc'd on PF */
+
+	struct kbase_mem_phy_alloc *cpu_alloc; /* the one alloc object we mmap to the CPU when mapping this region */
+	struct kbase_mem_phy_alloc *gpu_alloc; /* the one alloc object we mmap to the GPU when mapping this region */
+
+	/* List head used to store the region in the JIT allocation pool */
+	struct list_head jit_node;
+	/* The last JIT usage ID for this region */
+	u16 jit_usage_id;
+	/* The JIT bin this allocation came from */
+	u8 jit_bin_id;
+
+	int    va_refcnt; /* number of users of this va */
+};
+
+static inline bool kbase_is_region_free(struct kbase_va_region *reg)
+{
+	return (!reg || reg->flags & KBASE_REG_FREE);
+}
+
+static inline bool kbase_is_region_invalid(struct kbase_va_region *reg)
+{
+	return (!reg || reg->flags & KBASE_REG_VA_FREED);
+}
+
+static inline bool kbase_is_region_invalid_or_free(struct kbase_va_region *reg)
+{
+	/* Possibly not all functions that find regions would be using this
+	 * helper, so they need to be checked when maintaining this function.
+	 */
+	return (kbase_is_region_invalid(reg) ||	kbase_is_region_free(reg));
+}
+
+int kbase_remove_va_region(struct kbase_va_region *reg);
+static inline void kbase_region_refcnt_free(struct kbase_va_region *reg)
+{
+	/* If region was mapped then remove va region*/
+	if (reg->start_pfn)
+		kbase_remove_va_region(reg);
+
+	/* To detect use-after-free in debug builds */
+	KBASE_DEBUG_CODE(reg->flags |= KBASE_REG_FREE);
+	kfree(reg);
+}
+
+static inline struct kbase_va_region *kbase_va_region_alloc_get(
+		struct kbase_context *kctx, struct kbase_va_region *region)
+{
+	lockdep_assert_held(&kctx->reg_lock);
+
+	WARN_ON(!region->va_refcnt);
+
+	/* non-atomic as kctx->reg_lock is held */
+	region->va_refcnt++;
+
+	return region;
+}
+
+static inline struct kbase_va_region *kbase_va_region_alloc_put(
+		struct kbase_context *kctx, struct kbase_va_region *region)
+{
+	lockdep_assert_held(&kctx->reg_lock);
+
+	WARN_ON(region->va_refcnt <= 0);
+	WARN_ON(region->flags & KBASE_REG_FREE);
+
+	/* non-atomic as kctx->reg_lock is held */
+	region->va_refcnt--;
+	if (!region->va_refcnt)
+		kbase_region_refcnt_free(region);
+
+	return NULL;
+}
+
+/* Common functions */
+static inline struct tagged_addr *kbase_get_cpu_phy_pages(
+		struct kbase_va_region *reg)
+{
+	KBASE_DEBUG_ASSERT(reg);
+	KBASE_DEBUG_ASSERT(reg->cpu_alloc);
+	KBASE_DEBUG_ASSERT(reg->gpu_alloc);
+	KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
+
+	return reg->cpu_alloc->pages;
+}
+
+static inline struct tagged_addr *kbase_get_gpu_phy_pages(
+		struct kbase_va_region *reg)
+{
+	KBASE_DEBUG_ASSERT(reg);
+	KBASE_DEBUG_ASSERT(reg->cpu_alloc);
+	KBASE_DEBUG_ASSERT(reg->gpu_alloc);
+	KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
+
+	return reg->gpu_alloc->pages;
+}
+
+static inline size_t kbase_reg_current_backed_size(struct kbase_va_region *reg)
+{
+	KBASE_DEBUG_ASSERT(reg);
+	/* if no alloc object the backed size naturally is 0 */
+	if (!reg->cpu_alloc)
+		return 0;
+
+	KBASE_DEBUG_ASSERT(reg->cpu_alloc);
+	KBASE_DEBUG_ASSERT(reg->gpu_alloc);
+	KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
+
+	return reg->cpu_alloc->nents;
+}
+
+#define KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD ((size_t)(4*1024)) /* size above which vmalloc is used over kmalloc */
+
+static inline struct kbase_mem_phy_alloc *kbase_alloc_create(
+		struct kbase_context *kctx, size_t nr_pages,
+		enum kbase_memory_type type, int group_id)
+{
+	struct kbase_mem_phy_alloc *alloc;
+	size_t alloc_size = sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages;
+	size_t per_page_size = sizeof(*alloc->pages);
+
+	/* Imported pages may have page private data already in use */
+	if (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF) {
+		alloc_size += nr_pages *
+				sizeof(*alloc->imported.user_buf.dma_addrs);
+		per_page_size += sizeof(*alloc->imported.user_buf.dma_addrs);
+	}
+
+	/*
+	 * Prevent nr_pages*per_page_size + sizeof(*alloc) from
+	 * wrapping around.
+	 */
+	if (nr_pages > ((((size_t) -1) - sizeof(*alloc))
+			/ per_page_size))
+		return ERR_PTR(-ENOMEM);
+
+	/* Allocate based on the size to reduce internal fragmentation of vmem */
+	if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
+		alloc = vzalloc(alloc_size);
+	else
+		alloc = kzalloc(alloc_size, GFP_KERNEL);
+
+	if (!alloc)
+		return ERR_PTR(-ENOMEM);
+
+	if (type == KBASE_MEM_TYPE_NATIVE) {
+		alloc->imported.native.nr_struct_pages =
+				(alloc_size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+		kbase_process_page_usage_inc(kctx,
+				alloc->imported.native.nr_struct_pages);
+	}
+
+	/* Store allocation method */
+	if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
+		alloc->properties |= KBASE_MEM_PHY_ALLOC_LARGE;
+
+	kref_init(&alloc->kref);
+	atomic_set(&alloc->gpu_mappings, 0);
+	alloc->nents = 0;
+	alloc->pages = (void *)(alloc + 1);
+	INIT_LIST_HEAD(&alloc->mappings);
+	alloc->type = type;
+	alloc->group_id = group_id;
+
+	if (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF)
+		alloc->imported.user_buf.dma_addrs =
+				(void *) (alloc->pages + nr_pages);
+
+	return alloc;
+}
+
+static inline int kbase_reg_prepare_native(struct kbase_va_region *reg,
+		struct kbase_context *kctx, int group_id)
+{
+	KBASE_DEBUG_ASSERT(reg);
+	KBASE_DEBUG_ASSERT(!reg->cpu_alloc);
+	KBASE_DEBUG_ASSERT(!reg->gpu_alloc);
+	KBASE_DEBUG_ASSERT(reg->flags & KBASE_REG_FREE);
+
+	reg->cpu_alloc = kbase_alloc_create(kctx, reg->nr_pages,
+			KBASE_MEM_TYPE_NATIVE, group_id);
+	if (IS_ERR(reg->cpu_alloc))
+		return PTR_ERR(reg->cpu_alloc);
+	else if (!reg->cpu_alloc)
+		return -ENOMEM;
+
+	reg->cpu_alloc->imported.native.kctx = kctx;
+	if (kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE)
+	    && (reg->flags & KBASE_REG_CPU_CACHED)) {
+		reg->gpu_alloc = kbase_alloc_create(kctx, reg->nr_pages,
+				KBASE_MEM_TYPE_NATIVE, group_id);
+		if (IS_ERR_OR_NULL(reg->gpu_alloc)) {
+			kbase_mem_phy_alloc_put(reg->cpu_alloc);
+			return -ENOMEM;
+		}
+		reg->gpu_alloc->imported.native.kctx = kctx;
+	} else {
+		reg->gpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
+	}
+
+	mutex_lock(&kctx->jit_evict_lock);
+	INIT_LIST_HEAD(&reg->cpu_alloc->evict_node);
+	INIT_LIST_HEAD(&reg->gpu_alloc->evict_node);
+	mutex_unlock(&kctx->jit_evict_lock);
+
+	reg->flags &= ~KBASE_REG_FREE;
+
+	return 0;
+}
+
+/*
+ * Max size for kbdev memory pool (in pages)
+ */
+#define KBASE_MEM_POOL_MAX_SIZE_KBDEV (SZ_64M >> PAGE_SHIFT)
+
+/*
+ * Max size for kctx memory pool (in pages)
+ */
+#define KBASE_MEM_POOL_MAX_SIZE_KCTX  (SZ_64M >> PAGE_SHIFT)
+
+/*
+ * The order required for a 2MB page allocation (2^order * 4KB = 2MB)
+ */
+#define KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER	9
+
+/*
+ * The order required for a 4KB page allocation
+ */
+#define KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER	0
+
+/**
+ * kbase_mem_pool_config_set_max_size - Set maximum number of free pages in
+ *                                      initial configuration of a memory pool
+ *
+ * @config:   Initial configuration for a physical memory pool
+ * @max_size: Maximum number of free pages that a pool created from
+ *            @config can hold
+ */
+static inline void kbase_mem_pool_config_set_max_size(
+	struct kbase_mem_pool_config *const config, size_t const max_size)
+{
+	WRITE_ONCE(config->max_size, max_size);
+}
+
+/**
+ * kbase_mem_pool_config_get_max_size - Get maximum number of free pages from
+ *                                      initial configuration of a memory pool
+ *
+ * @config: Initial configuration for a physical memory pool
+ *
+ * Return: Maximum number of free pages that a pool created from @config
+ *         can hold
+ */
+static inline size_t kbase_mem_pool_config_get_max_size(
+	const struct kbase_mem_pool_config *const config)
+{
+	return READ_ONCE(config->max_size);
+}
+
+/**
+ * kbase_mem_pool_init - Create a memory pool for a kbase device
+ * @pool:      Memory pool to initialize
+ * @config:    Initial configuration for the memory pool
+ * @order:     Page order for physical page size (order=0=>4kB, order=9=>2MB)
+ * @group_id:  A memory group ID to be passed to a platform-specific
+ *             memory group manager, if present.
+ *             Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
+ * @kbdev:     Kbase device where memory is used
+ * @next_pool: Pointer to the next pool or NULL.
+ *
+ * Allocations from @pool are in whole pages. Each @pool has a free list where
+ * pages can be quickly allocated from. The free list is initially empty and
+ * filled whenever pages are freed back to the pool. The number of free pages
+ * in the pool will in general not exceed @max_size, but the pool may in
+ * certain corner cases grow above @max_size.
+ *
+ * If @next_pool is not NULL, we will allocate from @next_pool before going to
+ * the memory group manager. Similarly pages can spill over to @next_pool when
+ * @pool is full. Pages are zeroed before they spill over to another pool, to
+ * prevent leaking information between applications.
+ *
+ * A shrinker is registered so that Linux mm can reclaim pages from the pool as
+ * needed.
+ *
+ * Return: 0 on success, negative -errno on error
+ */
+int kbase_mem_pool_init(struct kbase_mem_pool *pool,
+		const struct kbase_mem_pool_config *config,
+		unsigned int order,
+		int group_id,
+		struct kbase_device *kbdev,
+		struct kbase_mem_pool *next_pool);
+
+/**
+ * kbase_mem_pool_term - Destroy a memory pool
+ * @pool:  Memory pool to destroy
+ *
+ * Pages in the pool will spill over to @next_pool (if available) or freed to
+ * the kernel.
+ */
+void kbase_mem_pool_term(struct kbase_mem_pool *pool);
+
+/**
+ * kbase_mem_pool_alloc - Allocate a page from memory pool
+ * @pool:  Memory pool to allocate from
+ *
+ * Allocations from the pool are made as follows:
+ * 1. If there are free pages in the pool, allocate a page from @pool.
+ * 2. Otherwise, if @next_pool is not NULL and has free pages, allocate a page
+ *    from @next_pool.
+ * 3. Return NULL if no memory in the pool
+ *
+ * Return: Pointer to allocated page, or NULL if allocation failed.
+ *
+ * Note : This function should not be used if the pool lock is held. Use
+ * kbase_mem_pool_alloc_locked() instead.
+ */
+struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool);
+
+/**
+ * kbase_mem_pool_alloc_locked - Allocate a page from memory pool
+ * @pool:  Memory pool to allocate from
+ *
+ * If there are free pages in the pool, this function allocates a page from
+ * @pool. This function does not use @next_pool.
+ *
+ * Return: Pointer to allocated page, or NULL if allocation failed.
+ *
+ * Note : Caller must hold the pool lock.
+ */
+struct page *kbase_mem_pool_alloc_locked(struct kbase_mem_pool *pool);
+
+/**
+ * kbase_mem_pool_free - Free a page to memory pool
+ * @pool:  Memory pool where page should be freed
+ * @page:  Page to free to the pool
+ * @dirty: Whether some of the page may be dirty in the cache.
+ *
+ * Pages are freed to the pool as follows:
+ * 1. If @pool is not full, add @page to @pool.
+ * 2. Otherwise, if @next_pool is not NULL and not full, add @page to
+ *    @next_pool.
+ * 3. Finally, free @page to the kernel.
+ *
+ * Note : This function should not be used if the pool lock is held. Use
+ * kbase_mem_pool_free_locked() instead.
+ */
+void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *page,
+		bool dirty);
+
+/**
+ * kbase_mem_pool_free_locked - Free a page to memory pool
+ * @pool:  Memory pool where page should be freed
+ * @p:     Page to free to the pool
+ * @dirty: Whether some of the page may be dirty in the cache.
+ *
+ * If @pool is not full, this function adds @page to @pool. Otherwise, @page is
+ * freed to the kernel. This function does not use @next_pool.
+ *
+ * Note : Caller must hold the pool lock.
+ */
+void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p,
+		bool dirty);
+
+/**
+ * kbase_mem_pool_alloc_pages - Allocate pages from memory pool
+ * @pool:     Memory pool to allocate from
+ * @nr_4k_pages: Number of pages to allocate
+ * @pages:    Pointer to array where the physical address of the allocated
+ *            pages will be stored.
+ * @partial_allowed: If fewer pages allocated is allowed
+ *
+ * Like kbase_mem_pool_alloc() but optimized for allocating many pages.
+ *
+ * Return:
+ * On success number of pages allocated (could be less than nr_pages if
+ * partial_allowed).
+ * On error an error code.
+ *
+ * Note : This function should not be used if the pool lock is held. Use
+ * kbase_mem_pool_alloc_pages_locked() instead.
+ *
+ * The caller must not hold vm_lock, as this could cause a deadlock if
+ * the kernel OoM killer runs. If the caller must allocate pages while holding
+ * this lock, it should use kbase_mem_pool_alloc_pages_locked() instead.
+ */
+int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
+		struct tagged_addr *pages, bool partial_allowed);
+
+/**
+ * kbase_mem_pool_alloc_pages_locked - Allocate pages from memory pool
+ * @pool:        Memory pool to allocate from
+ * @nr_4k_pages: Number of pages to allocate
+ * @pages:       Pointer to array where the physical address of the allocated
+ *               pages will be stored.
+ *
+ * Like kbase_mem_pool_alloc() but optimized for allocating many pages. This
+ * version does not allocate new pages from the kernel, and therefore will never
+ * trigger the OoM killer. Therefore, it can be run while the vm_lock is held.
+ *
+ * As new pages can not be allocated, the caller must ensure there are
+ * sufficient pages in the pool. Usage of this function should look like :
+ *
+ *   kbase_gpu_vm_lock(kctx);
+ *   kbase_mem_pool_lock(pool)
+ *   while (kbase_mem_pool_size(pool) < pages_required) {
+ *     kbase_mem_pool_unlock(pool)
+ *     kbase_gpu_vm_unlock(kctx);
+ *     kbase_mem_pool_grow(pool)
+ *     kbase_gpu_vm_lock(kctx);
+ *     kbase_mem_pool_lock(pool)
+ *   }
+ *   kbase_mem_pool_alloc_pages_locked(pool)
+ *   kbase_mem_pool_unlock(pool)
+ *   Perform other processing that requires vm_lock...
+ *   kbase_gpu_vm_unlock(kctx);
+ *
+ * This ensures that the pool can be grown to the required size and that the
+ * allocation can complete without another thread using the newly grown pages.
+ *
+ * Return:
+ * On success number of pages allocated.
+ * On error an error code.
+ *
+ * Note : Caller must hold the pool lock.
+ */
+int kbase_mem_pool_alloc_pages_locked(struct kbase_mem_pool *pool,
+		size_t nr_4k_pages, struct tagged_addr *pages);
+
+/**
+ * kbase_mem_pool_free_pages - Free pages to memory pool
+ * @pool:     Memory pool where pages should be freed
+ * @nr_pages: Number of pages to free
+ * @pages:    Pointer to array holding the physical addresses of the pages to
+ *            free.
+ * @dirty:    Whether any pages may be dirty in the cache.
+ * @reclaimed: Whether the pages where reclaimable and thus should bypass
+ *             the pool and go straight to the kernel.
+ *
+ * Like kbase_mem_pool_free() but optimized for freeing many pages.
+ */
+void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
+		struct tagged_addr *pages, bool dirty, bool reclaimed);
+
+/**
+ * kbase_mem_pool_free_pages_locked - Free pages to memory pool
+ * @pool:     Memory pool where pages should be freed
+ * @nr_pages: Number of pages to free
+ * @pages:    Pointer to array holding the physical addresses of the pages to
+ *            free.
+ * @dirty:    Whether any pages may be dirty in the cache.
+ * @reclaimed: Whether the pages where reclaimable and thus should bypass
+ *             the pool and go straight to the kernel.
+ *
+ * Like kbase_mem_pool_free() but optimized for freeing many pages.
+ */
+void kbase_mem_pool_free_pages_locked(struct kbase_mem_pool *pool,
+		size_t nr_pages, struct tagged_addr *pages, bool dirty,
+		bool reclaimed);
+
+/**
+ * kbase_mem_pool_size - Get number of free pages in memory pool
+ * @pool:  Memory pool to inspect
+ *
+ * Note: the size of the pool may in certain corner cases exceed @max_size!
+ *
+ * Return: Number of free pages in the pool
+ */
+static inline size_t kbase_mem_pool_size(struct kbase_mem_pool *pool)
+{
+	return READ_ONCE(pool->cur_size);
+}
+
+/**
+ * kbase_mem_pool_max_size - Get maximum number of free pages in memory pool
+ * @pool:  Memory pool to inspect
+ *
+ * Return: Maximum number of free pages in the pool
+ */
+static inline size_t kbase_mem_pool_max_size(struct kbase_mem_pool *pool)
+{
+	return pool->max_size;
+}
+
+
+/**
+ * kbase_mem_pool_set_max_size - Set maximum number of free pages in memory pool
+ * @pool:     Memory pool to inspect
+ * @max_size: Maximum number of free pages the pool can hold
+ *
+ * If @max_size is reduced, the pool will be shrunk to adhere to the new limit.
+ * For details see kbase_mem_pool_shrink().
+ */
+void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size);
+
+/**
+ * kbase_mem_pool_grow - Grow the pool
+ * @pool:       Memory pool to grow
+ * @nr_to_grow: Number of pages to add to the pool
+ *
+ * Adds @nr_to_grow pages to the pool. Note that this may cause the pool to
+ * become larger than the maximum size specified.
+ *
+ * Returns: 0 on success, -ENOMEM if unable to allocate sufficent pages
+ */
+int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow);
+
+/**
+ * kbase_mem_pool_trim - Grow or shrink the pool to a new size
+ * @pool:     Memory pool to trim
+ * @new_size: New number of pages in the pool
+ *
+ * If @new_size > @cur_size, fill the pool with new pages from the kernel, but
+ * not above the max_size for the pool.
+ * If @new_size < @cur_size, shrink the pool by freeing pages to the kernel.
+ */
+void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size);
+
+/**
+ * kbase_mem_pool_mark_dying - Mark that this pool is dying
+ * @pool:     Memory pool
+ *
+ * This will cause any ongoing allocation operations (eg growing on page fault)
+ * to be terminated.
+ */
+void kbase_mem_pool_mark_dying(struct kbase_mem_pool *pool);
+
+/**
+ * kbase_mem_alloc_page - Allocate a new page for a device
+ * @pool:  Memory pool to allocate a page from
+ *
+ * Most uses should use kbase_mem_pool_alloc to allocate a page. However that
+ * function can fail in the event the pool is empty.
+ *
+ * Return: A new page or NULL if no memory
+ */
+struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool);
+
+/**
+ * kbase_region_tracker_init - Initialize the region tracker data structure
+ * @kctx: kbase context
+ *
+ * Return: 0 if success, negative error code otherwise.
+ */
+int kbase_region_tracker_init(struct kbase_context *kctx);
+
+/**
+ * kbase_region_tracker_init_jit - Initialize the JIT region
+ * @kctx: kbase context
+ * @jit_va_pages: Size of the JIT region in pages
+ * @max_allocations: Maximum number of allocations allowed for the JIT region
+ * @trim_level: Trim level for the JIT region
+ * @group_id: The physical group ID from which to allocate JIT memory.
+ *            Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
+ *
+ * Return: 0 if success, negative error code otherwise.
+ */
+int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages,
+		u8 max_allocations, u8 trim_level, int group_id);
+
+/**
+ * kbase_region_tracker_init_exec - Initialize the EXEC_VA region
+ * @kctx: kbase context
+ * @exec_va_pages: Size of the JIT region in pages.
+ *                 It must not be greater than 4 GB.
+ *
+ * Return: 0 if success, negative error code otherwise.
+ */
+int kbase_region_tracker_init_exec(struct kbase_context *kctx, u64 exec_va_pages);
+
+/**
+ * kbase_region_tracker_term - Terminate the JIT region
+ * @kctx: kbase context
+ */
+void kbase_region_tracker_term(struct kbase_context *kctx);
+
+/**
+ * kbase_region_tracker_term_rbtree - Free memory for a region tracker
+ *
+ * This will free all the regions within the region tracker
+ *
+ * @rbtree: Region tracker tree root
+ */
+void kbase_region_tracker_term_rbtree(struct rb_root *rbtree);
+
+struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(
+		struct kbase_context *kctx, u64 gpu_addr);
+struct kbase_va_region *kbase_find_region_enclosing_address(
+		struct rb_root *rbtree, u64 gpu_addr);
+
+/**
+ * @brief Check that a pointer is actually a valid region.
+ *
+ * Must be called with context lock held.
+ */
+struct kbase_va_region *kbase_region_tracker_find_region_base_address(
+		struct kbase_context *kctx, u64 gpu_addr);
+struct kbase_va_region *kbase_find_region_base_address(struct rb_root *rbtree,
+		u64 gpu_addr);
+
+struct kbase_va_region *kbase_alloc_free_region(struct rb_root *rbtree,
+		u64 start_pfn, size_t nr_pages, int zone);
+void kbase_free_alloced_region(struct kbase_va_region *reg);
+int kbase_add_va_region(struct kbase_context *kctx, struct kbase_va_region *reg,
+		u64 addr, size_t nr_pages, size_t align);
+int kbase_add_va_region_rbtree(struct kbase_device *kbdev,
+		struct kbase_va_region *reg, u64 addr, size_t nr_pages,
+		size_t align);
+
+bool kbase_check_alloc_flags(unsigned long flags);
+bool kbase_check_import_flags(unsigned long flags);
+
+/**
+ * kbase_check_alloc_sizes - check user space sizes parameters for an
+ *                           allocation
+ *
+ * @kctx:         kbase context
+ * @flags:        The flags passed from user space
+ * @va_pages:     The size of the requested region, in pages.
+ * @commit_pages: Number of pages to commit initially.
+ * @extent:       Number of pages to grow by on GPU page fault and/or alignment
+ *                (depending on flags)
+ *
+ * Makes checks on the size parameters passed in from user space for a memory
+ * allocation call, with respect to the flags requested.
+ *
+ * Return: 0 if sizes are valid for these flags, negative error code otherwise
+ */
+int kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags,
+		u64 va_pages, u64 commit_pages, u64 extent);
+
+/**
+ * kbase_update_region_flags - Convert user space flags to kernel region flags
+ *
+ * @kctx:  kbase context
+ * @reg:   The region to update the flags on
+ * @flags: The flags passed from user space
+ *
+ * The user space flag BASE_MEM_COHERENT_SYSTEM_REQUIRED will be rejected and
+ * this function will fail if the system does not support system coherency.
+ *
+ * Return: 0 if successful, -EINVAL if the flags are not supported
+ */
+int kbase_update_region_flags(struct kbase_context *kctx,
+		struct kbase_va_region *reg, unsigned long flags);
+
+void kbase_gpu_vm_lock(struct kbase_context *kctx);
+void kbase_gpu_vm_unlock(struct kbase_context *kctx);
+
+int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size);
+
+/**
+ * kbase_mmu_init - Initialise an object representing GPU page tables
+ *
+ * The structure should be terminated using kbase_mmu_term()
+ *
+ * @kbdev:    Instance of GPU platform device, allocated from the probe method.
+ * @mmut:     GPU page tables to be initialized.
+ * @kctx:     Optional kbase context, may be NULL if this set of MMU tables
+ *            is not associated with a context.
+ * @group_id: The physical group ID from which to allocate GPU page tables.
+ *            Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
+ *
+ * Return:    0 if successful, otherwise a negative error code.
+ */
+int kbase_mmu_init(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
+		struct kbase_context *kctx, int group_id);
+/**
+ * kbase_mmu_term - Terminate an object representing GPU page tables
+ *
+ * This will free any page tables that have been allocated
+ *
+ * @kbdev: Instance of GPU platform device, allocated from the probe method.
+ * @mmut:  GPU page tables to be destroyed.
+ */
+void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut);
+
+/**
+ * kbase_mmu_create_ate - Create an address translation entry
+ *
+ * @kbdev:    Instance of GPU platform device, allocated from the probe method.
+ * @phy:      Physical address of the page to be mapped for GPU access.
+ * @flags:    Bitmask of attributes of the GPU memory region being mapped.
+ * @level:    Page table level for which to build an address translation entry.
+ * @group_id: The physical memory group in which the page was allocated.
+ *            Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
+ *
+ * This function creates an address translation entry to encode the physical
+ * address of a page to be mapped for access by the GPU, along with any extra
+ * attributes required for the GPU memory region.
+ *
+ * Return: An address translation entry, either in LPAE or AArch64 format
+ *         (depending on the driver's configuration).
+ */
+u64 kbase_mmu_create_ate(struct kbase_device *kbdev,
+	struct tagged_addr phy, unsigned long flags, int level, int group_id);
+
+int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev,
+				    struct kbase_mmu_table *mmut,
+				    const u64 start_vpfn,
+				    struct tagged_addr *phys, size_t nr,
+				    unsigned long flags, int group_id);
+int kbase_mmu_insert_pages(struct kbase_device *kbdev,
+			   struct kbase_mmu_table *mmut, u64 vpfn,
+			   struct tagged_addr *phys, size_t nr,
+			   unsigned long flags, int as_nr, int group_id);
+int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
+					struct tagged_addr phys, size_t nr,
+					unsigned long flags, int group_id);
+
+int kbase_mmu_teardown_pages(struct kbase_device *kbdev,
+			     struct kbase_mmu_table *mmut, u64 vpfn,
+			     size_t nr, int as_nr);
+int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn,
+			   struct tagged_addr *phys, size_t nr,
+			   unsigned long flags, int const group_id);
+
+/**
+ * @brief Register region and map it on the GPU.
+ *
+ * Call kbase_add_va_region() and map the region on the GPU.
+ */
+int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align);
+
+/**
+ * @brief Remove the region from the GPU and unregister it.
+ *
+ * Must be called with context lock held.
+ */
+int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg);
+
+/**
+ * kbase_mmu_update - Configure an address space on the GPU to the specified
+ *                    MMU tables
+ *
+ * The caller has the following locking conditions:
+ * - It must hold kbase_device->mmu_hw_mutex
+ * - It must hold the hwaccess_lock
+ *
+ * @kbdev: Kbase device structure
+ * @mmut:  The set of MMU tables to be configured on the address space
+ * @as_nr: The address space to be configured
+ */
+void kbase_mmu_update(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
+		int as_nr);
+
+/**
+ * kbase_mmu_disable() - Disable the MMU for a previously active kbase context.
+ * @kctx:	Kbase context
+ *
+ * Disable and perform the required cache maintenance to remove the all
+ * data from provided kbase context from the GPU caches.
+ *
+ * The caller has the following locking conditions:
+ * - It must hold kbase_device->mmu_hw_mutex
+ * - It must hold the hwaccess_lock
+ */
+void kbase_mmu_disable(struct kbase_context *kctx);
+
+/**
+ * kbase_mmu_disable_as() - Set the MMU to unmapped mode for the specified
+ * address space.
+ * @kbdev:	Kbase device
+ * @as_nr:	The address space number to set to unmapped.
+ *
+ * This function must only be called during reset/power-up and it used to
+ * ensure the registers are in a known state.
+ *
+ * The caller must hold kbdev->mmu_hw_mutex.
+ */
+void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr);
+
+void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
+
+/** Dump the MMU tables to a buffer
+ *
+ * This function allocates a buffer (of @c nr_pages pages) to hold a dump of the MMU tables and fills it. If the
+ * buffer is too small then the return value will be NULL.
+ *
+ * The GPU vm lock must be held when calling this function.
+ *
+ * The buffer returned should be freed with @ref vfree when it is no longer required.
+ *
+ * @param[in]   kctx        The kbase context to dump
+ * @param[in]   nr_pages    The number of pages to allocate for the buffer.
+ *
+ * @return The address of the buffer containing the MMU dump or NULL on error (including if the @c nr_pages is too
+ * small)
+ */
+void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages);
+
+/**
+ * kbase_sync_now - Perform cache maintenance on a memory region
+ *
+ * @kctx: The kbase context of the region
+ * @sset: A syncset structure describing the region and direction of the
+ *        synchronisation required
+ *
+ * Return: 0 on success or error code
+ */
+int kbase_sync_now(struct kbase_context *kctx, struct basep_syncset *sset);
+void kbase_sync_single(struct kbase_context *kctx, struct tagged_addr cpu_pa,
+		struct tagged_addr gpu_pa, off_t offset, size_t size,
+		enum kbase_sync_type sync_fn);
+void kbase_pre_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr);
+void kbase_post_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr);
+
+/* OS specific functions */
+int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr);
+int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg);
+void kbase_os_mem_map_lock(struct kbase_context *kctx);
+void kbase_os_mem_map_unlock(struct kbase_context *kctx);
+
+/**
+ * @brief Update the memory allocation counters for the current process
+ *
+ * OS specific call to updates the current memory allocation counters for the current process with
+ * the supplied delta.
+ *
+ * @param[in] kctx  The kbase context
+ * @param[in] pages The desired delta to apply to the memory usage counters.
+ */
+
+void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages);
+
+/**
+ * @brief Add to the memory allocation counters for the current process
+ *
+ * OS specific call to add to the current memory allocation counters for the current process by
+ * the supplied amount.
+ *
+ * @param[in] kctx  The kernel base context used for the allocation.
+ * @param[in] pages The desired delta to apply to the memory usage counters.
+ */
+
+static inline void kbase_process_page_usage_inc(struct kbase_context *kctx, int pages)
+{
+	kbasep_os_process_page_usage_update(kctx, pages);
+}
+
+/**
+ * @brief Subtract from the memory allocation counters for the current process
+ *
+ * OS specific call to subtract from the current memory allocation counters for the current process by
+ * the supplied amount.
+ *
+ * @param[in] kctx  The kernel base context used for the allocation.
+ * @param[in] pages The desired delta to apply to the memory usage counters.
+ */
+
+static inline void kbase_process_page_usage_dec(struct kbase_context *kctx, int pages)
+{
+	kbasep_os_process_page_usage_update(kctx, 0 - pages);
+}
+
+/**
+ * kbasep_find_enclosing_cpu_mapping_offset() - Find the offset of the CPU
+ * mapping of a memory allocation containing a given address range
+ *
+ * Searches for a CPU mapping of any part of any region that fully encloses the
+ * CPU virtual address range specified by @uaddr and @size. Returns a failure
+ * indication if only part of the address range lies within a CPU mapping.
+ *
+ * @kctx:      The kernel base context used for the allocation.
+ * @uaddr:     Start of the CPU virtual address range.
+ * @size:      Size of the CPU virtual address range (in bytes).
+ * @offset:    The offset from the start of the allocation to the specified CPU
+ *             virtual address.
+ *
+ * Return: 0 if offset was obtained successfully. Error code otherwise.
+ */
+int kbasep_find_enclosing_cpu_mapping_offset(
+		struct kbase_context *kctx,
+		unsigned long uaddr, size_t size, u64 *offset);
+
+/**
+ * kbasep_find_enclosing_gpu_mapping_start_and_offset() - Find the address of
+ * the start of GPU virtual memory region which encloses @gpu_addr for the
+ * @size length in bytes
+ *
+ * Searches for the memory region in GPU virtual memory space which contains
+ * the region defined by the @gpu_addr and @size, where @gpu_addr is the
+ * beginning and @size the length in bytes of the provided region. If found,
+ * the location of the start address of the GPU virtual memory region is
+ * passed in @start pointer and the location of the offset of the region into
+ * the GPU virtual memory region is passed in @offset pointer.
+ *
+ * @kctx:	The kernel base context within which the memory is searched.
+ * @gpu_addr:	GPU virtual address for which the region is sought; defines
+ *              the beginning of the provided region.
+ * @size:       The length (in bytes) of the provided region for which the
+ *              GPU virtual memory region is sought.
+ * @start:      Pointer to the location where the address of the start of
+ *              the found GPU virtual memory region is.
+ * @offset:     Pointer to the location where the offset of @gpu_addr into
+ *              the found GPU virtual memory region is.
+ */
+int kbasep_find_enclosing_gpu_mapping_start_and_offset(
+		struct kbase_context *kctx,
+		u64 gpu_addr, size_t size, u64 *start, u64 *offset);
+
+enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer);
+void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
+void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
+
+/**
+ * kbase_alloc_phy_pages_helper - Allocates physical pages.
+ * @alloc:              allocation object to add pages to
+ * @nr_pages_requested: number of physical pages to allocate
+ *
+ * Allocates \a nr_pages_requested and updates the alloc object.
+ *
+ * Return: 0 if all pages have been successfully allocated. Error code otherwise
+ *
+ * Note : The caller must not hold vm_lock, as this could cause a deadlock if
+ * the kernel OoM killer runs. If the caller must allocate pages while holding
+ * this lock, it should use kbase_mem_pool_alloc_pages_locked() instead.
+ *
+ * This function cannot be used from interrupt context
+ */
+int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc,
+		size_t nr_pages_requested);
+
+/**
+ * kbase_alloc_phy_pages_helper_locked - Allocates physical pages.
+ * @alloc:              allocation object to add pages to
+ * @pool:               Memory pool to allocate from
+ * @nr_pages_requested: number of physical pages to allocate
+ * @prealloc_sa:        Information about the partial allocation if the amount
+ *                      of memory requested is not a multiple of 2MB. One
+ *                      instance of struct kbase_sub_alloc must be allocated by
+ *                      the caller iff CONFIG_MALI_2MB_ALLOC is enabled.
+ *
+ * Allocates \a nr_pages_requested and updates the alloc object. This function
+ * does not allocate new pages from the kernel, and therefore will never trigger
+ * the OoM killer. Therefore, it can be run while the vm_lock is held.
+ *
+ * As new pages can not be allocated, the caller must ensure there are
+ * sufficient pages in the pool. Usage of this function should look like :
+ *
+ *   kbase_gpu_vm_lock(kctx);
+ *   kbase_mem_pool_lock(pool)
+ *   while (kbase_mem_pool_size(pool) < pages_required) {
+ *     kbase_mem_pool_unlock(pool)
+ *     kbase_gpu_vm_unlock(kctx);
+ *     kbase_mem_pool_grow(pool)
+ *     kbase_gpu_vm_lock(kctx);
+ *     kbase_mem_pool_lock(pool)
+ *   }
+ *   kbase_alloc_phy_pages_helper_locked(pool)
+ *   kbase_mem_pool_unlock(pool)
+ *   Perform other processing that requires vm_lock...
+ *   kbase_gpu_vm_unlock(kctx);
+ *
+ * This ensures that the pool can be grown to the required size and that the
+ * allocation can complete without another thread using the newly grown pages.
+ *
+ * If CONFIG_MALI_2MB_ALLOC is defined and the allocation is >= 2MB, then
+ * @pool must be alloc->imported.native.kctx->lp_mem_pool. Otherwise it must be
+ * alloc->imported.native.kctx->mem_pool.
+ * @prealloc_sa is used to manage the non-2MB sub-allocation. It has to be
+ * pre-allocated because we must not sleep (due to the usage of kmalloc())
+ * whilst holding pool->pool_lock.
+ * @prealloc_sa shall be set to NULL if it has been consumed by this function
+ * to indicate that the caller must not free it.
+ *
+ * Return: Pointer to array of allocated pages. NULL on failure.
+ *
+ * Note : Caller must hold pool->pool_lock
+ */
+struct tagged_addr *kbase_alloc_phy_pages_helper_locked(
+		struct kbase_mem_phy_alloc *alloc, struct kbase_mem_pool *pool,
+		size_t nr_pages_requested,
+		struct kbase_sub_alloc **prealloc_sa);
+
+/**
+* @brief Free physical pages.
+*
+* Frees \a nr_pages and updates the alloc object.
+*
+* @param[in] alloc allocation object to free pages from
+* @param[in] nr_pages_to_free number of physical pages to free
+*
+* Return: 0 on success, otherwise a negative error code
+*/
+int kbase_free_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_to_free);
+
+/**
+ * kbase_free_phy_pages_helper_locked - Free pages allocated with
+ *                                      kbase_alloc_phy_pages_helper_locked()
+ * @alloc:            Allocation object to free pages from
+ * @pool:             Memory pool to return freed pages to
+ * @pages:            Pages allocated by kbase_alloc_phy_pages_helper_locked()
+ * @nr_pages_to_free: Number of physical pages to free
+ *
+ * This function atomically frees pages allocated with
+ * kbase_alloc_phy_pages_helper_locked(). @pages is the pointer to the page
+ * array that is returned by that function. @pool must be the pool that the
+ * pages were originally allocated from.
+ *
+ * If the mem_pool has been unlocked since the allocation then
+ * kbase_free_phy_pages_helper() should be used instead.
+ */
+void kbase_free_phy_pages_helper_locked(struct kbase_mem_phy_alloc *alloc,
+		struct kbase_mem_pool *pool, struct tagged_addr *pages,
+		size_t nr_pages_to_free);
+
+static inline void kbase_set_dma_addr(struct page *p, dma_addr_t dma_addr)
+{
+	SetPagePrivate(p);
+	if (sizeof(dma_addr_t) > sizeof(p->private)) {
+		/* on 32-bit ARM with LPAE dma_addr_t becomes larger, but the
+		 * private field stays the same. So we have to be clever and
+		 * use the fact that we only store DMA addresses of whole pages,
+		 * so the low bits should be zero */
+		KBASE_DEBUG_ASSERT(!(dma_addr & (PAGE_SIZE - 1)));
+		set_page_private(p, dma_addr >> PAGE_SHIFT);
+	} else {
+		set_page_private(p, dma_addr);
+	}
+}
+
+static inline dma_addr_t kbase_dma_addr(struct page *p)
+{
+	if (sizeof(dma_addr_t) > sizeof(p->private))
+		return ((dma_addr_t)page_private(p)) << PAGE_SHIFT;
+
+	return (dma_addr_t)page_private(p);
+}
+
+static inline void kbase_clear_dma_addr(struct page *p)
+{
+	ClearPagePrivate(p);
+}
+
+/**
+ * kbase_mmu_interrupt_process - Process a bus or page fault.
+ * @kbdev   The kbase_device the fault happened on
+ * @kctx    The kbase_context for the faulting address space if one was found.
+ * @as      The address space that has the fault
+ * @fault   Data relating to the fault
+ *
+ * This function will process a fault on a specific address space
+ */
+void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
+		struct kbase_context *kctx, struct kbase_as *as,
+		struct kbase_fault *fault);
+
+
+/**
+ * @brief Process a page fault.
+ *
+ * @param[in] data  work_struct passed by queue_work()
+ */
+void page_fault_worker(struct work_struct *data);
+
+/**
+ * @brief Process a bus fault.
+ *
+ * @param[in] data  work_struct passed by queue_work()
+ */
+void bus_fault_worker(struct work_struct *data);
+
+/**
+ * @brief Flush MMU workqueues.
+ *
+ * This function will cause any outstanding page or bus faults to be processed.
+ * It should be called prior to powering off the GPU.
+ *
+ * @param[in] kbdev   Device pointer
+ */
+void kbase_flush_mmu_wqs(struct kbase_device *kbdev);
+
+/**
+ * kbase_sync_single_for_device - update physical memory and give GPU ownership
+ * @kbdev: Device pointer
+ * @handle: DMA address of region
+ * @size: Size of region to sync
+ * @dir:  DMA data direction
+ */
+
+void kbase_sync_single_for_device(struct kbase_device *kbdev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir);
+
+/**
+ * kbase_sync_single_for_cpu - update physical memory and give CPU ownership
+ * @kbdev: Device pointer
+ * @handle: DMA address of region
+ * @size: Size of region to sync
+ * @dir:  DMA data direction
+ */
+
+void kbase_sync_single_for_cpu(struct kbase_device *kbdev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir);
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * kbase_jit_debugfs_init - Add per context debugfs entry for JIT.
+ * @kctx: kbase context
+ */
+void kbase_jit_debugfs_init(struct kbase_context *kctx);
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * kbase_jit_init - Initialize the JIT memory pool management
+ * @kctx: kbase context
+ *
+ * Returns zero on success or negative error number on failure.
+ */
+int kbase_jit_init(struct kbase_context *kctx);
+
+/**
+ * kbase_jit_allocate - Allocate JIT memory
+ * @kctx: kbase context
+ * @info: JIT allocation information
+ *
+ * Return: JIT allocation on success or NULL on failure.
+ */
+struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
+		struct base_jit_alloc_info *info);
+
+/**
+ * kbase_jit_free - Free a JIT allocation
+ * @kctx: kbase context
+ * @reg: JIT allocation
+ *
+ * Frees a JIT allocation and places it into the free pool for later reuse.
+ */
+void kbase_jit_free(struct kbase_context *kctx, struct kbase_va_region *reg);
+
+/**
+ * kbase_jit_backing_lost - Inform JIT that an allocation has lost backing
+ * @reg: JIT allocation
+ */
+void kbase_jit_backing_lost(struct kbase_va_region *reg);
+
+/**
+ * kbase_jit_evict - Evict a JIT allocation from the pool
+ * @kctx: kbase context
+ *
+ * Evict the least recently used JIT allocation from the pool. This can be
+ * required if normal VA allocations are failing due to VA exhaustion.
+ *
+ * Return: True if a JIT allocation was freed, false otherwise.
+ */
+bool kbase_jit_evict(struct kbase_context *kctx);
+
+/**
+ * kbase_jit_term - Terminate the JIT memory pool management
+ * @kctx: kbase context
+ */
+void kbase_jit_term(struct kbase_context *kctx);
+
+/**
+ * kbase_has_exec_va_zone - EXEC_VA zone predicate
+ *
+ * Determine whether an EXEC_VA zone has been created for the GPU address space
+ * of the given kbase context.
+ *
+ * @kctx: kbase context
+ *
+ * Return: True if the kbase context has an EXEC_VA zone.
+ */
+bool kbase_has_exec_va_zone(struct kbase_context *kctx);
+
+/**
+ * kbase_map_external_resource - Map an external resource to the GPU.
+ * @kctx:              kbase context.
+ * @reg:               The region to map.
+ * @locked_mm:         The mm_struct which has been locked for this operation.
+ *
+ * Return: The physical allocation which backs the region on success or NULL
+ * on failure.
+ */
+struct kbase_mem_phy_alloc *kbase_map_external_resource(
+		struct kbase_context *kctx, struct kbase_va_region *reg,
+		struct mm_struct *locked_mm);
+
+/**
+ * kbase_unmap_external_resource - Unmap an external resource from the GPU.
+ * @kctx:  kbase context.
+ * @reg:   The region to unmap or NULL if it has already been released.
+ * @alloc: The physical allocation being unmapped.
+ */
+void kbase_unmap_external_resource(struct kbase_context *kctx,
+		struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc);
+
+
+/**
+ * kbase_jd_user_buf_pin_pages - Pin the pages of a user buffer.
+ * @kctx: kbase context.
+ * @reg:  The region associated with the imported user buffer.
+ *
+ * To successfully pin the pages for a user buffer the current mm_struct must
+ * be the same as the mm_struct of the user buffer. After successfully pinning
+ * the pages further calls to this function succeed without doing work.
+ *
+ * Return: zero on success or negative number on failure.
+ */
+int kbase_jd_user_buf_pin_pages(struct kbase_context *kctx,
+		struct kbase_va_region *reg);
+
+/**
+ * kbase_sticky_resource_init - Initialize sticky resource management.
+ * @kctx: kbase context
+ *
+ * Returns zero on success or negative error number on failure.
+ */
+int kbase_sticky_resource_init(struct kbase_context *kctx);
+
+/**
+ * kbase_sticky_resource_acquire - Acquire a reference on a sticky resource.
+ * @kctx:     kbase context.
+ * @gpu_addr: The GPU address of the external resource.
+ *
+ * Return: The metadata object which represents the binding between the
+ * external resource and the kbase context on success or NULL on failure.
+ */
+struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire(
+		struct kbase_context *kctx, u64 gpu_addr);
+
+/**
+ * kbase_sticky_resource_release - Release a reference on a sticky resource.
+ * @kctx:     kbase context.
+ * @meta:     Binding metadata.
+ * @gpu_addr: GPU address of the external resource.
+ *
+ * If meta is NULL then gpu_addr will be used to scan the metadata list and
+ * find the matching metadata (if any), otherwise the provided meta will be
+ * used and gpu_addr will be ignored.
+ *
+ * Return: True if the release found the metadata and the reference was dropped.
+ */
+bool kbase_sticky_resource_release(struct kbase_context *kctx,
+		struct kbase_ctx_ext_res_meta *meta, u64 gpu_addr);
+
+/**
+ * kbase_sticky_resource_term - Terminate sticky resource management.
+ * @kctx: kbase context
+ */
+void kbase_sticky_resource_term(struct kbase_context *kctx);
+
+/**
+ * kbase_mem_pool_lock - Lock a memory pool
+ * @pool: Memory pool to lock
+ */
+static inline void kbase_mem_pool_lock(struct kbase_mem_pool *pool)
+{
+	spin_lock(&pool->pool_lock);
+}
+
+/**
+ * kbase_mem_pool_lock - Release a memory pool
+ * @pool: Memory pool to lock
+ */
+static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool)
+{
+	spin_unlock(&pool->pool_lock);
+}
+
+/**
+ * kbase_mem_evictable_mark_reclaim - Mark the pages as reclaimable.
+ * @alloc: The physical allocation
+ */
+void kbase_mem_evictable_mark_reclaim(struct kbase_mem_phy_alloc *alloc);
+
+
+/**
+ * kbase_mem_umm_map - Map dma-buf
+ * @kctx: Pointer to the kbase context
+ * @reg: Pointer to the region of the imported dma-buf to map
+ *
+ * Map a dma-buf on the GPU. The mappings are reference counted.
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int kbase_mem_umm_map(struct kbase_context *kctx,
+		struct kbase_va_region *reg);
+
+/**
+ * kbase_mem_umm_unmap - Unmap dma-buf
+ * @kctx: Pointer to the kbase context
+ * @reg: Pointer to the region of the imported dma-buf to unmap
+ * @alloc: Pointer to the alloc to release
+ *
+ * Unmap a dma-buf from the GPU. The mappings are reference counted.
+ *
+ * @reg must be the original region with GPU mapping of @alloc; or NULL. If
+ * @reg is NULL, or doesn't match @alloc, the GPU page table entries matching
+ * @reg will not be updated.
+ *
+ * @alloc must be a valid physical allocation of type
+ * KBASE_MEM_TYPE_IMPORTED_UMM that was previously mapped by
+ * kbase_mem_umm_map(). The dma-buf attachment referenced by @alloc will
+ * release it's mapping reference, and if the refcount reaches 0, also be be
+ * unmapped, regardless of the value of @reg.
+ */
+void kbase_mem_umm_unmap(struct kbase_context *kctx,
+		struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc);
+
+/**
+ * kbase_mem_do_sync_imported - Sync caches for imported memory
+ * @kctx: Pointer to the kbase context
+ * @reg: Pointer to the region with imported memory to sync
+ * @sync_fn: The type of sync operation to perform
+ *
+ * Sync CPU caches for supported (currently only dma-buf (UMM)) memory.
+ * Attempting to sync unsupported imported memory types will result in an error
+ * code, -EINVAL.
+ *
+ * Return: 0 on success, or a negative error code.
+ */
+int kbase_mem_do_sync_imported(struct kbase_context *kctx,
+		struct kbase_va_region *reg, enum kbase_sync_type sync_fn);
+
+#endif				/* _KBASE_MEM_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c
new file mode 100644
index 0000000..50a74ad
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c
@@ -0,0 +1,2948 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_mem_linux.c
+ * Base kernel memory APIs, Linux implementation.
+ */
+
+#include <linux/compat.h>
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/fs.h>
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) && \
+	(LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+#include <linux/dma-attrs.h>
+#endif /* LINUX_VERSION_CODE >= 3.5.0 && < 4.8.0 */
+#include <linux/dma-buf.h>
+#include <linux/shrinker.h>
+#include <linux/cache.h>
+#include <linux/memory_group_manager.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_mem_linux.h>
+#include <mali_kbase_tracepoints.h>
+#include <mali_kbase_ioctl.h>
+
+#if ((KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE) || \
+	(KERNEL_VERSION(5, 0, 0) > LINUX_VERSION_CODE))
+/* Enable workaround for ion for kernels prior to v5.0.0 and from v5.3.0
+ * onwards.
+ *
+ * For kernels prior to v4.12, workaround is needed as ion lacks the cache
+ * maintenance in begin_cpu_access and end_cpu_access methods.
+ *
+ * For kernels prior to v4.17.2, workaround is needed to avoid the potentially
+ * disruptive warnings which can come if begin_cpu_access and end_cpu_access
+ * methods are not called in pairs.
+ * Note that some long term maintenance kernel versions (e.g. 4.9.x, 4.14.x)
+ * only require this workaround on their earlier releases. However it is still
+ * safe to use it on such releases, and it simplifies the version check.
+ *
+ * For kernels later than v4.17.2, workaround is needed as ion can potentially
+ * end up calling dma_sync_sg_for_* for a dma-buf importer that hasn't mapped
+ * the attachment. This would result in a kernel panic as ion populates the
+ * dma_address when the attachment is mapped and kernel derives the physical
+ * address for cache maintenance from the dma_address.
+ * With some multi-threaded tests it has been seen that the same dma-buf memory
+ * gets imported twice on Mali DDK side and so the problem of sync happening
+ * with an importer having an unmapped attachment comes at the time of 2nd
+ * import. The same problem can if there is another importer of dma-buf
+ * memory.
+ *
+ * Workaround can be safely disabled for kernels between v5.0.0 and v5.2.2,
+ * as all the above stated issues are not there.
+ *
+ * dma_sync_sg_for_* calls will be made directly as a workaround using the
+ * Kbase's attachment to dma-buf that was previously mapped.
+ */
+#define KBASE_MEM_ION_SYNC_WORKAROUND
+#endif
+
+
+static int kbase_vmap_phy_pages(struct kbase_context *kctx,
+		struct kbase_va_region *reg, u64 offset_bytes, size_t size,
+		struct kbase_vmap_struct *map);
+static void kbase_vunmap_phy_pages(struct kbase_context *kctx,
+		struct kbase_vmap_struct *map);
+
+static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma);
+
+/* Retrieve the associated region pointer if the GPU address corresponds to
+ * one of the event memory pages. The enclosing region, if found, shouldn't
+ * have been marked as free.
+ */
+static struct kbase_va_region *kbase_find_event_mem_region(
+			struct kbase_context *kctx, u64 gpu_addr)
+{
+
+	return NULL;
+}
+
+/**
+ * kbase_phy_alloc_mapping_init - Initialize the kernel side permanent mapping
+ *                                of the physical allocation belonging to a
+ *                                region
+ * @kctx:  The kernel base context @reg belongs to.
+ * @reg:   The region whose physical allocation is to be mapped
+ * @vsize: The size of the requested region, in pages
+ * @size:  The size in pages initially committed to the region
+ *
+ * Return: 0 on success, otherwise an error code indicating failure
+ *
+ * Maps the physical allocation backing a non-free @reg, so it may be
+ * accessed directly from the kernel. This is only supported for physical
+ * allocations of type KBASE_MEM_TYPE_NATIVE, and will fail for other types of
+ * physical allocation.
+ *
+ * The mapping is stored directly in the allocation that backs @reg. The
+ * refcount is not incremented at this point. Instead, use of the mapping should
+ * be surrounded by kbase_phy_alloc_mapping_get() and
+ * kbase_phy_alloc_mapping_put() to ensure it does not disappear whilst the
+ * client is accessing it.
+ *
+ * Both cached and uncached regions are allowed, but any sync operations are the
+ * responsibility of the client using the permanent mapping.
+ *
+ * A number of checks are made to ensure that a region that needs a permanent
+ * mapping can actually be supported:
+ * - The region must be created as fully backed
+ * - The region must not be growable
+ *
+ * This function will fail if those checks are not satisfied.
+ *
+ * On success, the region will also be forced into a certain kind:
+ * - It will no longer be growable
+ */
+static int kbase_phy_alloc_mapping_init(struct kbase_context *kctx,
+		struct kbase_va_region *reg, size_t vsize, size_t size)
+{
+	size_t size_bytes = (size << PAGE_SHIFT);
+	struct kbase_vmap_struct *kern_mapping;
+	int err = 0;
+
+	/* Can only map in regions that are always fully committed
+	 * Don't setup the mapping twice
+	 * Only support KBASE_MEM_TYPE_NATIVE allocations
+	 */
+	if (vsize != size || reg->cpu_alloc->permanent_map != NULL ||
+			reg->cpu_alloc->type != KBASE_MEM_TYPE_NATIVE)
+		return -EINVAL;
+
+	if (size > (KBASE_PERMANENTLY_MAPPED_MEM_LIMIT_PAGES -
+			atomic_read(&kctx->permanent_mapped_pages))) {
+		dev_warn(kctx->kbdev->dev, "Request for %llu more pages mem needing a permanent mapping would breach limit %lu, currently at %d pages",
+				(u64)size,
+				KBASE_PERMANENTLY_MAPPED_MEM_LIMIT_PAGES,
+				atomic_read(&kctx->permanent_mapped_pages));
+		return -ENOMEM;
+	}
+
+	kern_mapping = kzalloc(sizeof(*kern_mapping), GFP_KERNEL);
+	if (!kern_mapping)
+		return -ENOMEM;
+
+	err = kbase_vmap_phy_pages(kctx, reg, 0u, size_bytes, kern_mapping);
+	if (err < 0)
+		goto vmap_fail;
+
+	/* No support for growing or shrinking mapped regions */
+	reg->flags &= ~KBASE_REG_GROWABLE;
+
+	reg->cpu_alloc->permanent_map = kern_mapping;
+	atomic_add(size, &kctx->permanent_mapped_pages);
+
+	return 0;
+vmap_fail:
+	kfree(kern_mapping);
+	return err;
+}
+
+void kbase_phy_alloc_mapping_term(struct kbase_context *kctx,
+		struct kbase_mem_phy_alloc *alloc)
+{
+	WARN_ON(!alloc->permanent_map);
+	kbase_vunmap_phy_pages(kctx, alloc->permanent_map);
+	kfree(alloc->permanent_map);
+
+	alloc->permanent_map = NULL;
+
+	/* Mappings are only done on cpu_alloc, so don't need to worry about
+	 * this being reduced a second time if a separate gpu_alloc is
+	 * freed
+	 */
+	WARN_ON(alloc->nents > atomic_read(&kctx->permanent_mapped_pages));
+	atomic_sub(alloc->nents, &kctx->permanent_mapped_pages);
+}
+
+void *kbase_phy_alloc_mapping_get(struct kbase_context *kctx,
+		u64 gpu_addr,
+		struct kbase_vmap_struct **out_kern_mapping)
+{
+	struct kbase_va_region *reg;
+	void *kern_mem_ptr = NULL;
+	struct kbase_vmap_struct *kern_mapping;
+	u64 mapping_offset;
+
+	WARN_ON(!kctx);
+	WARN_ON(!out_kern_mapping);
+
+	kbase_gpu_vm_lock(kctx);
+
+	/* First do a quick lookup in the list of event memory regions */
+	reg = kbase_find_event_mem_region(kctx, gpu_addr);
+
+	if (!reg) {
+		reg = kbase_region_tracker_find_region_enclosing_address(
+			kctx, gpu_addr);
+	}
+
+	if (kbase_is_region_invalid_or_free(reg))
+		goto out_unlock;
+
+	kern_mapping = reg->cpu_alloc->permanent_map;
+	if (kern_mapping == NULL)
+		goto out_unlock;
+
+	mapping_offset = gpu_addr - (reg->start_pfn << PAGE_SHIFT);
+
+	/* Refcount the allocations to prevent them disappearing */
+	WARN_ON(reg->cpu_alloc != kern_mapping->cpu_alloc);
+	WARN_ON(reg->gpu_alloc != kern_mapping->gpu_alloc);
+	(void)kbase_mem_phy_alloc_get(kern_mapping->cpu_alloc);
+	(void)kbase_mem_phy_alloc_get(kern_mapping->gpu_alloc);
+
+	kern_mem_ptr = (void *)(uintptr_t)((uintptr_t)kern_mapping->addr + mapping_offset);
+	*out_kern_mapping = kern_mapping;
+out_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	return kern_mem_ptr;
+}
+
+void kbase_phy_alloc_mapping_put(struct kbase_context *kctx,
+		struct kbase_vmap_struct *kern_mapping)
+{
+	WARN_ON(!kctx);
+	WARN_ON(!kern_mapping);
+
+	WARN_ON(kctx != kern_mapping->cpu_alloc->imported.native.kctx);
+	WARN_ON(kern_mapping != kern_mapping->cpu_alloc->permanent_map);
+
+	kbase_mem_phy_alloc_put(kern_mapping->cpu_alloc);
+	kbase_mem_phy_alloc_put(kern_mapping->gpu_alloc);
+
+	/* kern_mapping and the gpu/cpu phy allocs backing it must not be used
+	 * from now on
+	 */
+}
+
+struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
+		u64 va_pages, u64 commit_pages, u64 extent, u64 *flags,
+		u64 *gpu_va)
+{
+	int zone;
+	struct kbase_va_region *reg;
+	struct rb_root *rbtree;
+	struct device *dev;
+
+	KBASE_DEBUG_ASSERT(kctx);
+	KBASE_DEBUG_ASSERT(flags);
+	KBASE_DEBUG_ASSERT(gpu_va);
+
+	dev = kctx->kbdev->dev;
+	dev_dbg(dev, "Allocating %lld va_pages, %lld commit_pages, %lld extent, 0x%llX flags\n",
+		va_pages, commit_pages, extent, *flags);
+
+	*gpu_va = 0; /* return 0 on failure */
+
+	if (!kbase_check_alloc_flags(*flags)) {
+		dev_warn(dev,
+				"kbase_mem_alloc called with bad flags (%llx)",
+				(unsigned long long)*flags);
+		goto bad_flags;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	if (unlikely(kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE))) {
+		/* Mask coherency flags if infinite cache is enabled to prevent
+		 * the skipping of syncs from BASE side.
+		 */
+		*flags &= ~(BASE_MEM_COHERENT_SYSTEM_REQUIRED |
+			    BASE_MEM_COHERENT_SYSTEM);
+	}
+#endif
+
+	if ((*flags & BASE_MEM_UNCACHED_GPU) != 0 &&
+			(*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0) {
+		/* Remove COHERENT_SYSTEM_REQUIRED flag if uncached GPU mapping is requested */
+		*flags &= ~BASE_MEM_COHERENT_SYSTEM_REQUIRED;
+	}
+	if ((*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0 &&
+			!kbase_device_is_cpu_coherent(kctx->kbdev)) {
+		dev_warn(dev, "kbase_mem_alloc call required coherent mem when unavailable");
+		goto bad_flags;
+	}
+	if ((*flags & BASE_MEM_COHERENT_SYSTEM) != 0 &&
+			!kbase_device_is_cpu_coherent(kctx->kbdev)) {
+		/* Remove COHERENT_SYSTEM flag if coherent mem is unavailable */
+		*flags &= ~BASE_MEM_COHERENT_SYSTEM;
+	}
+
+	if (kbase_check_alloc_sizes(kctx, *flags, va_pages, commit_pages, extent))
+		goto bad_sizes;
+
+#ifdef CONFIG_MALI_MEMORY_FULLY_BACKED
+	/* Ensure that memory is fully physically-backed. */
+	if (*flags & BASE_MEM_GROW_ON_GPF)
+		commit_pages = va_pages;
+#endif
+
+	/* find out which VA zone to use */
+	if (*flags & BASE_MEM_SAME_VA) {
+		rbtree = &kctx->reg_rbtree_same;
+		zone = KBASE_REG_ZONE_SAME_VA;
+	} else if ((*flags & BASE_MEM_PROT_GPU_EX) && kbase_has_exec_va_zone(kctx)) {
+		rbtree = &kctx->reg_rbtree_exec;
+		zone = KBASE_REG_ZONE_EXEC_VA;
+	} else {
+		rbtree = &kctx->reg_rbtree_custom;
+		zone = KBASE_REG_ZONE_CUSTOM_VA;
+	}
+
+	reg = kbase_alloc_free_region(rbtree, 0, va_pages, zone);
+	if (!reg) {
+		dev_err(dev, "Failed to allocate free region");
+		goto no_region;
+	}
+
+	if (kbase_update_region_flags(kctx, reg, *flags) != 0)
+		goto invalid_flags;
+
+	if (kbase_reg_prepare_native(reg, kctx,
+				base_mem_group_id_get(*flags)) != 0) {
+		dev_err(dev, "Failed to prepare region");
+		goto prepare_failed;
+	}
+
+	if (*flags & (BASE_MEM_GROW_ON_GPF|BASE_MEM_TILER_ALIGN_TOP)) {
+		/* kbase_check_alloc_sizes() already checks extent is valid for
+		 * assigning to reg->extent */
+		reg->extent = extent;
+	} else {
+		reg->extent = 0;
+	}
+
+	if (kbase_alloc_phy_pages(reg, va_pages, commit_pages) != 0) {
+		dev_warn(dev, "Failed to allocate %lld pages (va_pages=%lld)",
+				(unsigned long long)commit_pages,
+				(unsigned long long)va_pages);
+		goto no_mem;
+	}
+	reg->initial_commit = commit_pages;
+
+	kbase_gpu_vm_lock(kctx);
+
+	if (reg->flags & KBASE_REG_PERMANENT_KERNEL_MAPPING) {
+		/* Permanent kernel mappings must happen as soon as
+		 * reg->cpu_alloc->pages is ready. Currently this happens after
+		 * kbase_alloc_phy_pages(). If we move that to setup pages
+		 * earlier, also move this call too
+		 */
+		int err = kbase_phy_alloc_mapping_init(kctx, reg, va_pages,
+				commit_pages);
+		if (err < 0) {
+			kbase_gpu_vm_unlock(kctx);
+			goto no_kern_mapping;
+		}
+	}
+
+
+	/* mmap needed to setup VA? */
+	if (*flags & BASE_MEM_SAME_VA) {
+		unsigned long cookie, cookie_nr;
+
+		/* Bind to a cookie */
+		if (!kctx->cookies) {
+			dev_err(dev, "No cookies available for allocation!");
+			kbase_gpu_vm_unlock(kctx);
+			goto no_cookie;
+		}
+		/* return a cookie */
+		cookie_nr = __ffs(kctx->cookies);
+		kctx->cookies &= ~(1UL << cookie_nr);
+		BUG_ON(kctx->pending_regions[cookie_nr]);
+		kctx->pending_regions[cookie_nr] = reg;
+
+		/* relocate to correct base */
+		cookie = cookie_nr + PFN_DOWN(BASE_MEM_COOKIE_BASE);
+		cookie <<= PAGE_SHIFT;
+
+		*gpu_va = (u64) cookie;
+	} else /* we control the VA */ {
+		if (kbase_gpu_mmap(kctx, reg, 0, va_pages, 1) != 0) {
+			dev_warn(dev, "Failed to map memory on GPU");
+			kbase_gpu_vm_unlock(kctx);
+			goto no_mmap;
+		}
+		/* return real GPU VA */
+		*gpu_va = reg->start_pfn << PAGE_SHIFT;
+	}
+
+	kbase_gpu_vm_unlock(kctx);
+	return reg;
+
+no_mmap:
+no_cookie:
+no_kern_mapping:
+no_mem:
+	kbase_mem_phy_alloc_put(reg->cpu_alloc);
+	kbase_mem_phy_alloc_put(reg->gpu_alloc);
+invalid_flags:
+prepare_failed:
+	kfree(reg);
+no_region:
+bad_sizes:
+bad_flags:
+	return NULL;
+}
+KBASE_EXPORT_TEST_API(kbase_mem_alloc);
+
+int kbase_mem_query(struct kbase_context *kctx,
+		u64 gpu_addr, u64 query, u64 * const out)
+{
+	struct kbase_va_region *reg;
+	int ret = -EINVAL;
+
+	KBASE_DEBUG_ASSERT(kctx);
+	KBASE_DEBUG_ASSERT(out);
+
+	if (gpu_addr & ~PAGE_MASK) {
+		dev_warn(kctx->kbdev->dev, "mem_query: gpu_addr: passed parameter is invalid");
+		return -EINVAL;
+	}
+
+	kbase_gpu_vm_lock(kctx);
+
+	/* Validate the region */
+	reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
+	if (kbase_is_region_invalid_or_free(reg))
+		goto out_unlock;
+
+	switch (query) {
+	case KBASE_MEM_QUERY_COMMIT_SIZE:
+		if (reg->cpu_alloc->type != KBASE_MEM_TYPE_ALIAS) {
+			*out = kbase_reg_current_backed_size(reg);
+		} else {
+			size_t i;
+			struct kbase_aliased *aliased;
+			*out = 0;
+			aliased = reg->cpu_alloc->imported.alias.aliased;
+			for (i = 0; i < reg->cpu_alloc->imported.alias.nents; i++)
+				*out += aliased[i].length;
+		}
+		break;
+	case KBASE_MEM_QUERY_VA_SIZE:
+		*out = reg->nr_pages;
+		break;
+	case KBASE_MEM_QUERY_FLAGS:
+	{
+		*out = 0;
+		if (KBASE_REG_CPU_WR & reg->flags)
+			*out |= BASE_MEM_PROT_CPU_WR;
+		if (KBASE_REG_CPU_RD & reg->flags)
+			*out |= BASE_MEM_PROT_CPU_RD;
+		if (KBASE_REG_CPU_CACHED & reg->flags)
+			*out |= BASE_MEM_CACHED_CPU;
+		if (KBASE_REG_GPU_WR & reg->flags)
+			*out |= BASE_MEM_PROT_GPU_WR;
+		if (KBASE_REG_GPU_RD & reg->flags)
+			*out |= BASE_MEM_PROT_GPU_RD;
+		if (!(KBASE_REG_GPU_NX & reg->flags))
+			*out |= BASE_MEM_PROT_GPU_EX;
+		if (KBASE_REG_SHARE_BOTH & reg->flags)
+			*out |= BASE_MEM_COHERENT_SYSTEM;
+		if (KBASE_REG_SHARE_IN & reg->flags)
+			*out |= BASE_MEM_COHERENT_LOCAL;
+		if (kctx->api_version >= KBASE_API_VERSION(11, 2)) {
+			/* Prior to 11.2, these were known about by user-side
+			 * but we did not return them. Returning some of these
+			 * caused certain clients that were not expecting them
+			 * to fail, so we omit all of them as a special-case
+			 * for compatibility reasons */
+			if (KBASE_REG_PF_GROW & reg->flags)
+				*out |= BASE_MEM_GROW_ON_GPF;
+			if (KBASE_REG_PROTECTED & reg->flags)
+				*out |= BASE_MEM_PROTECTED;
+		}
+		if (KBASE_REG_TILER_ALIGN_TOP & reg->flags)
+			*out |= BASE_MEM_TILER_ALIGN_TOP;
+		if (!(KBASE_REG_GPU_CACHED & reg->flags))
+			*out |= BASE_MEM_UNCACHED_GPU;
+		if (KBASE_REG_GPU_VA_SAME_4GB_PAGE & reg->flags)
+			*out |= BASE_MEM_GPU_VA_SAME_4GB_PAGE;
+
+		*out |= base_mem_group_id_set(reg->cpu_alloc->group_id);
+
+		WARN(*out & ~BASE_MEM_FLAGS_QUERYABLE,
+				"BASE_MEM_FLAGS_QUERYABLE needs updating\n");
+		*out &= BASE_MEM_FLAGS_QUERYABLE;
+		break;
+	}
+	default:
+		*out = 0;
+		goto out_unlock;
+	}
+
+	ret = 0;
+
+out_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	return ret;
+}
+
+/**
+ * kbase_mem_evictable_reclaim_count_objects - Count number of pages in the
+ * Ephemeral memory eviction list.
+ * @s:        Shrinker
+ * @sc:       Shrinker control
+ *
+ * Return: Number of pages which can be freed.
+ */
+static
+unsigned long kbase_mem_evictable_reclaim_count_objects(struct shrinker *s,
+		struct shrink_control *sc)
+{
+	struct kbase_context *kctx;
+	struct kbase_mem_phy_alloc *alloc;
+	unsigned long pages = 0;
+
+	kctx = container_of(s, struct kbase_context, reclaim);
+
+	mutex_lock(&kctx->jit_evict_lock);
+
+	list_for_each_entry(alloc, &kctx->evict_list, evict_node)
+		pages += alloc->nents;
+
+	mutex_unlock(&kctx->jit_evict_lock);
+	return pages;
+}
+
+/**
+ * kbase_mem_evictable_reclaim_scan_objects - Scan the Ephemeral memory eviction
+ * list for pages and try to reclaim them.
+ * @s:        Shrinker
+ * @sc:       Shrinker control
+ *
+ * Return: Number of pages freed (can be less then requested) or -1 if the
+ * shrinker failed to free pages in its pool.
+ *
+ * Note:
+ * This function accesses region structures without taking the region lock,
+ * this is required as the OOM killer can call the shrinker after the region
+ * lock has already been held.
+ * This is safe as we can guarantee that a region on the eviction list will
+ * not be freed (kbase_mem_free_region removes the allocation from the list
+ * before destroying it), or modified by other parts of the driver.
+ * The eviction list itself is guarded by the eviction lock and the MMU updates
+ * are protected by their own lock.
+ */
+static
+unsigned long kbase_mem_evictable_reclaim_scan_objects(struct shrinker *s,
+		struct shrink_control *sc)
+{
+	struct kbase_context *kctx;
+	struct kbase_mem_phy_alloc *alloc;
+	struct kbase_mem_phy_alloc *tmp;
+	unsigned long freed = 0;
+
+	kctx = container_of(s, struct kbase_context, reclaim);
+	mutex_lock(&kctx->jit_evict_lock);
+
+	list_for_each_entry_safe(alloc, tmp, &kctx->evict_list, evict_node) {
+		int err;
+
+		err = kbase_mem_shrink_gpu_mapping(kctx, alloc->reg,
+				0, alloc->nents);
+		if (err != 0) {
+			/*
+			 * Failed to remove GPU mapping, tell the shrinker
+			 * to stop trying to shrink our slab even though we
+			 * have pages in it.
+			 */
+			freed = -1;
+			goto out_unlock;
+		}
+
+		/*
+		 * Update alloc->evicted before freeing the backing so the
+		 * helper can determine that it needs to bypass the accounting
+		 * and memory pool.
+		 */
+		alloc->evicted = alloc->nents;
+
+		kbase_free_phy_pages_helper(alloc, alloc->evicted);
+		freed += alloc->evicted;
+		list_del_init(&alloc->evict_node);
+
+		/*
+		 * Inform the JIT allocator this region has lost backing
+		 * as it might need to free the allocation.
+		 */
+		kbase_jit_backing_lost(alloc->reg);
+
+		/* Enough pages have been freed so stop now */
+		if (freed > sc->nr_to_scan)
+			break;
+	}
+out_unlock:
+	mutex_unlock(&kctx->jit_evict_lock);
+
+	return freed;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+static int kbase_mem_evictable_reclaim_shrink(struct shrinker *s,
+		struct shrink_control *sc)
+{
+	if (sc->nr_to_scan == 0)
+		return kbase_mem_evictable_reclaim_count_objects(s, sc);
+
+	return kbase_mem_evictable_reclaim_scan_objects(s, sc);
+}
+#endif
+
+int kbase_mem_evictable_init(struct kbase_context *kctx)
+{
+	INIT_LIST_HEAD(&kctx->evict_list);
+	mutex_init(&kctx->jit_evict_lock);
+
+	/* Register shrinker */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+	kctx->reclaim.shrink = kbase_mem_evictable_reclaim_shrink;
+#else
+	kctx->reclaim.count_objects = kbase_mem_evictable_reclaim_count_objects;
+	kctx->reclaim.scan_objects = kbase_mem_evictable_reclaim_scan_objects;
+#endif
+	kctx->reclaim.seeks = DEFAULT_SEEKS;
+	/* Kernel versions prior to 3.1 :
+	 * struct shrinker does not define batch */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
+	kctx->reclaim.batch = 0;
+#endif
+	register_shrinker(&kctx->reclaim);
+	return 0;
+}
+
+void kbase_mem_evictable_deinit(struct kbase_context *kctx)
+{
+	unregister_shrinker(&kctx->reclaim);
+}
+
+/**
+ * kbase_mem_evictable_mark_reclaim - Mark the pages as reclaimable.
+ * @alloc: The physical allocation
+ */
+void kbase_mem_evictable_mark_reclaim(struct kbase_mem_phy_alloc *alloc)
+{
+	struct kbase_context *kctx = alloc->imported.native.kctx;
+	struct kbase_device *kbdev = kctx->kbdev;
+	int __maybe_unused new_page_count;
+
+	kbase_process_page_usage_dec(kctx, alloc->nents);
+	new_page_count = atomic_sub_return(alloc->nents,
+		&kctx->used_pages);
+	atomic_sub(alloc->nents, &kctx->kbdev->memdev.used_pages);
+
+	KBASE_TLSTREAM_AUX_PAGESALLOC(
+			kbdev,
+			kctx->id,
+			(u64)new_page_count);
+}
+
+/**
+ * kbase_mem_evictable_unmark_reclaim - Mark the pages as no longer reclaimable.
+ * @alloc: The physical allocation
+ */
+static
+void kbase_mem_evictable_unmark_reclaim(struct kbase_mem_phy_alloc *alloc)
+{
+	struct kbase_context *kctx = alloc->imported.native.kctx;
+	struct kbase_device *kbdev = kctx->kbdev;
+	int __maybe_unused new_page_count;
+
+	new_page_count = atomic_add_return(alloc->nents,
+		&kctx->used_pages);
+	atomic_add(alloc->nents, &kctx->kbdev->memdev.used_pages);
+
+	/* Increase mm counters so that the allocation is accounted for
+	 * against the process and thus is visible to the OOM killer,
+	 */
+	kbase_process_page_usage_inc(kctx, alloc->nents);
+
+	KBASE_TLSTREAM_AUX_PAGESALLOC(
+			kbdev,
+			kctx->id,
+			(u64)new_page_count);
+}
+
+int kbase_mem_evictable_make(struct kbase_mem_phy_alloc *gpu_alloc)
+{
+	struct kbase_context *kctx = gpu_alloc->imported.native.kctx;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	kbase_mem_shrink_cpu_mapping(kctx, gpu_alloc->reg,
+			0, gpu_alloc->nents);
+
+	mutex_lock(&kctx->jit_evict_lock);
+	/* This allocation can't already be on a list. */
+	WARN_ON(!list_empty(&gpu_alloc->evict_node));
+
+	/*
+	 * Add the allocation to the eviction list, after this point the shrink
+	 * can reclaim it.
+	 */
+	list_add(&gpu_alloc->evict_node, &kctx->evict_list);
+	mutex_unlock(&kctx->jit_evict_lock);
+	kbase_mem_evictable_mark_reclaim(gpu_alloc);
+
+	gpu_alloc->reg->flags |= KBASE_REG_DONT_NEED;
+	return 0;
+}
+
+bool kbase_mem_evictable_unmake(struct kbase_mem_phy_alloc *gpu_alloc)
+{
+	struct kbase_context *kctx = gpu_alloc->imported.native.kctx;
+	int err = 0;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	mutex_lock(&kctx->jit_evict_lock);
+	/*
+	 * First remove the allocation from the eviction list as it's no
+	 * longer eligible for eviction.
+	 */
+	list_del_init(&gpu_alloc->evict_node);
+	mutex_unlock(&kctx->jit_evict_lock);
+
+	if (gpu_alloc->evicted == 0) {
+		/*
+		 * The backing is still present, update the VM stats as it's
+		 * in use again.
+		 */
+		kbase_mem_evictable_unmark_reclaim(gpu_alloc);
+	} else {
+		/* If the region is still alive ... */
+		if (gpu_alloc->reg) {
+			/* ... allocate replacement backing ... */
+			err = kbase_alloc_phy_pages_helper(gpu_alloc,
+					gpu_alloc->evicted);
+
+			/*
+			 * ... and grow the mapping back to its
+			 * pre-eviction size.
+			 */
+			if (!err)
+				err = kbase_mem_grow_gpu_mapping(kctx,
+						gpu_alloc->reg,
+						gpu_alloc->evicted, 0);
+
+			gpu_alloc->evicted = 0;
+		}
+	}
+
+	/* If the region is still alive remove the DONT_NEED attribute. */
+	if (gpu_alloc->reg)
+		gpu_alloc->reg->flags &= ~KBASE_REG_DONT_NEED;
+
+	return (err == 0);
+}
+
+int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask)
+{
+	struct kbase_va_region *reg;
+	int ret = -EINVAL;
+	unsigned int real_flags = 0;
+	unsigned int new_flags = 0;
+	bool prev_needed, new_needed;
+
+	KBASE_DEBUG_ASSERT(kctx);
+
+	if (!gpu_addr)
+		return -EINVAL;
+
+	if ((gpu_addr & ~PAGE_MASK) && (gpu_addr >= PAGE_SIZE))
+		return -EINVAL;
+
+	/* nuke other bits */
+	flags &= mask;
+
+	/* check for only supported flags */
+	if (flags & ~(BASE_MEM_FLAGS_MODIFIABLE))
+		goto out;
+
+	/* mask covers bits we don't support? */
+	if (mask & ~(BASE_MEM_FLAGS_MODIFIABLE))
+		goto out;
+
+	/* convert flags */
+	if (BASE_MEM_COHERENT_SYSTEM & flags)
+		real_flags |= KBASE_REG_SHARE_BOTH;
+	else if (BASE_MEM_COHERENT_LOCAL & flags)
+		real_flags |= KBASE_REG_SHARE_IN;
+
+	/* now we can lock down the context, and find the region */
+	down_write(&current->mm->mmap_sem);
+	kbase_gpu_vm_lock(kctx);
+
+	/* Validate the region */
+	reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
+	if (kbase_is_region_invalid_or_free(reg))
+		goto out_unlock;
+
+	/* Is the region being transitioning between not needed and needed? */
+	prev_needed = (KBASE_REG_DONT_NEED & reg->flags) == KBASE_REG_DONT_NEED;
+	new_needed = (BASE_MEM_DONT_NEED & flags) == BASE_MEM_DONT_NEED;
+	if (prev_needed != new_needed) {
+		/* Aliased allocations can't be made ephemeral */
+		if (atomic_read(&reg->cpu_alloc->gpu_mappings) > 1)
+			goto out_unlock;
+
+		if (new_needed) {
+			/* Only native allocations can be marked not needed */
+			if (reg->cpu_alloc->type != KBASE_MEM_TYPE_NATIVE) {
+				ret = -EINVAL;
+				goto out_unlock;
+			}
+			ret = kbase_mem_evictable_make(reg->gpu_alloc);
+			if (ret)
+				goto out_unlock;
+		} else {
+			kbase_mem_evictable_unmake(reg->gpu_alloc);
+		}
+	}
+
+	/* limit to imported memory */
+	if (reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM)
+		goto out_unlock;
+
+	/* shareability flags are ignored for GPU uncached memory */
+	if (!(reg->flags & KBASE_REG_GPU_CACHED)) {
+		ret = 0;
+		goto out_unlock;
+	}
+
+	/* no change? */
+	if (real_flags == (reg->flags & (KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH))) {
+		ret = 0;
+		goto out_unlock;
+	}
+
+	new_flags = reg->flags & ~(KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH);
+	new_flags |= real_flags;
+
+	/* Currently supporting only imported memory */
+	if (reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	if (IS_ENABLED(CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND)) {
+		/* Future use will use the new flags, existing mapping
+		 * will NOT be updated as memory should not be in use
+		 * by the GPU when updating the flags.
+		 */
+		WARN_ON(reg->gpu_alloc->imported.umm.current_mapping_usage_count);
+		ret = 0;
+	} else if (reg->gpu_alloc->imported.umm.current_mapping_usage_count) {
+		/*
+		 * When CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND is not enabled the
+		 * dma-buf GPU mapping should always be present, check that
+		 * this is the case and warn and skip the page table update if
+		 * not.
+		 *
+		 * Then update dma-buf GPU mapping with the new flags.
+		 *
+		 * Note: The buffer must not be in use on the GPU when
+		 * changing flags. If the buffer is in active use on
+		 * the GPU, there is a risk that the GPU may trigger a
+		 * shareability fault, as it will see the same
+		 * addresses from buffer with different shareability
+		 * properties.
+		 */
+		dev_dbg(kctx->kbdev->dev,
+			"Updating page tables on mem flag change\n");
+		ret = kbase_mmu_update_pages(kctx, reg->start_pfn,
+				kbase_get_gpu_phy_pages(reg),
+				kbase_reg_current_backed_size(reg),
+				new_flags,
+				reg->gpu_alloc->group_id);
+		if (ret)
+			dev_warn(kctx->kbdev->dev,
+				 "Failed to update GPU page tables on flag change: %d\n",
+				 ret);
+	} else
+		WARN_ON(!reg->gpu_alloc->imported.umm.current_mapping_usage_count);
+
+	/* If everything is good, then set the new flags on the region. */
+	if (!ret)
+		reg->flags = new_flags;
+
+out_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	up_write(&current->mm->mmap_sem);
+out:
+	return ret;
+}
+
+#define KBASE_MEM_IMPORT_HAVE_PAGES (1UL << BASE_MEM_FLAGS_NR_BITS)
+
+int kbase_mem_do_sync_imported(struct kbase_context *kctx,
+		struct kbase_va_region *reg, enum kbase_sync_type sync_fn)
+{
+	int ret = -EINVAL;
+	struct dma_buf *dma_buf;
+	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	/* We assume that the same physical allocation object is used for both
+	 * GPU and CPU for imported buffers.
+	 */
+	WARN_ON(reg->cpu_alloc != reg->gpu_alloc);
+
+	/* Currently only handle dma-bufs */
+	if (reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM)
+		return ret;
+	/*
+	 * Attempting to sync with CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND
+	 * enabled can expose us to a Linux Kernel issue between v4.6 and
+	 * v4.19. We will not attempt to support cache syncs on dma-bufs that
+	 * are mapped on demand (i.e. not on import), even on pre-4.6, neither
+	 * on 4.20 or newer kernels, because this makes it difficult for
+	 * userspace to know when they can rely on the cache sync.
+	 * Instead, only support syncing when we always map dma-bufs on import,
+	 * or if the particular buffer is mapped right now.
+	 */
+	if (IS_ENABLED(CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND) &&
+	    !reg->gpu_alloc->imported.umm.current_mapping_usage_count)
+		return ret;
+
+	dma_buf = reg->gpu_alloc->imported.umm.dma_buf;
+
+	switch (sync_fn) {
+	case KBASE_SYNC_TO_DEVICE:
+		dev_dbg(kctx->kbdev->dev,
+			"Syncing imported buffer at GPU VA %llx to GPU\n",
+			reg->start_pfn);
+#ifdef KBASE_MEM_ION_SYNC_WORKAROUND
+		if (!WARN_ON(!reg->gpu_alloc->imported.umm.dma_attachment)) {
+			struct dma_buf_attachment *attachment = reg->gpu_alloc->imported.umm.dma_attachment;
+			struct sg_table *sgt = reg->gpu_alloc->imported.umm.sgt;
+
+			dma_sync_sg_for_device(attachment->dev, sgt->sgl,
+					sgt->nents, dir);
+			ret = 0;
+		}
+#else
+	/* Though the below version check could be superfluous depending upon the version condition
+	 * used for enabling KBASE_MEM_ION_SYNC_WORKAROUND, we still keep this check here to allow
+	 * ease of modification for non-ION systems or systems where ION has been patched.
+	 */
+#if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE && !defined(CONFIG_CHROMEOS)
+		dma_buf_end_cpu_access(dma_buf,
+				0, dma_buf->size,
+				dir);
+		ret = 0;
+#else
+		ret = dma_buf_end_cpu_access(dma_buf,
+				dir);
+#endif
+#endif /* KBASE_MEM_ION_SYNC_WORKAROUND */
+		break;
+	case KBASE_SYNC_TO_CPU:
+		dev_dbg(kctx->kbdev->dev,
+			"Syncing imported buffer at GPU VA %llx to CPU\n",
+			reg->start_pfn);
+#ifdef KBASE_MEM_ION_SYNC_WORKAROUND
+		if (!WARN_ON(!reg->gpu_alloc->imported.umm.dma_attachment)) {
+			struct dma_buf_attachment *attachment = reg->gpu_alloc->imported.umm.dma_attachment;
+			struct sg_table *sgt = reg->gpu_alloc->imported.umm.sgt;
+
+			dma_sync_sg_for_cpu(attachment->dev, sgt->sgl,
+					sgt->nents, dir);
+			ret = 0;
+		}
+#else
+		ret = dma_buf_begin_cpu_access(dma_buf,
+#if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE && !defined(CONFIG_CHROMEOS)
+				0, dma_buf->size,
+#endif
+				dir);
+#endif /* KBASE_MEM_ION_SYNC_WORKAROUND */
+		break;
+	};
+
+	if (unlikely(ret))
+		dev_warn(kctx->kbdev->dev,
+			 "Failed to sync mem region %pK at GPU VA %llx: %d\n",
+			 reg, reg->start_pfn, ret);
+
+	return ret;
+}
+
+/**
+ * kbase_mem_umm_unmap_attachment - Unmap dma-buf attachment
+ * @kctx: Pointer to kbase context
+ * @alloc: Pointer to allocation with imported dma-buf memory to unmap
+ *
+ * This will unmap a dma-buf. Must be called after the GPU page tables for the
+ * region have been torn down.
+ */
+static void kbase_mem_umm_unmap_attachment(struct kbase_context *kctx,
+					   struct kbase_mem_phy_alloc *alloc)
+{
+	struct tagged_addr *pa = alloc->pages;
+
+	dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment,
+				 alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
+	alloc->imported.umm.sgt = NULL;
+
+	memset(pa, 0xff, sizeof(*pa) * alloc->nents);
+	alloc->nents = 0;
+}
+
+/**
+ * kbase_mem_umm_map_attachment - Prepare attached dma-buf for GPU mapping
+ * @kctx: Pointer to kbase context
+ * @reg: Pointer to region with imported dma-buf memory to map
+ *
+ * Map the dma-buf and prepare the page array with the tagged Mali physical
+ * addresses for GPU mapping.
+ *
+ * Return: 0 on success, or negative error code
+ */
+static int kbase_mem_umm_map_attachment(struct kbase_context *kctx,
+		struct kbase_va_region *reg)
+{
+	struct sg_table *sgt;
+	struct scatterlist *s;
+	int i;
+	struct tagged_addr *pa;
+	int err;
+	size_t count = 0;
+	struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
+
+	WARN_ON_ONCE(alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM);
+	WARN_ON_ONCE(alloc->imported.umm.sgt);
+
+	sgt = dma_buf_map_attachment(alloc->imported.umm.dma_attachment,
+			DMA_BIDIRECTIONAL);
+	if (IS_ERR_OR_NULL(sgt))
+		return -EINVAL;
+
+	/* save for later */
+	alloc->imported.umm.sgt = sgt;
+
+	pa = kbase_get_gpu_phy_pages(reg);
+
+	for_each_sg(sgt->sgl, s, sgt->nents, i) {
+		size_t j, pages = PFN_UP(sg_dma_len(s));
+
+		WARN_ONCE(sg_dma_len(s) & (PAGE_SIZE-1),
+		"sg_dma_len(s)=%u is not a multiple of PAGE_SIZE\n",
+		sg_dma_len(s));
+
+		WARN_ONCE(sg_dma_address(s) & (PAGE_SIZE-1),
+		"sg_dma_address(s)=%llx is not aligned to PAGE_SIZE\n",
+		(unsigned long long) sg_dma_address(s));
+
+		for (j = 0; (j < pages) && (count < reg->nr_pages); j++, count++)
+			*pa++ = as_tagged(sg_dma_address(s) +
+				(j << PAGE_SHIFT));
+		WARN_ONCE(j < pages,
+		"sg list from dma_buf_map_attachment > dma_buf->size=%zu\n",
+		alloc->imported.umm.dma_buf->size);
+	}
+
+	if (!(reg->flags & KBASE_REG_IMPORT_PAD) &&
+			WARN_ONCE(count < reg->nr_pages,
+			"sg list from dma_buf_map_attachment < dma_buf->size=%zu\n",
+			alloc->imported.umm.dma_buf->size)) {
+		err = -EINVAL;
+		goto err_unmap_attachment;
+	}
+
+	/* Update nents as we now have pages to map */
+	alloc->nents = count;
+
+	return 0;
+
+err_unmap_attachment:
+	kbase_mem_umm_unmap_attachment(kctx, alloc);
+
+	return err;
+}
+
+int kbase_mem_umm_map(struct kbase_context *kctx,
+		struct kbase_va_region *reg)
+{
+	int err;
+	struct kbase_mem_phy_alloc *alloc;
+	unsigned long gwt_mask = ~0;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	alloc = reg->gpu_alloc;
+
+	alloc->imported.umm.current_mapping_usage_count++;
+	if (alloc->imported.umm.current_mapping_usage_count != 1) {
+		if (IS_ENABLED(CONFIG_MALI_DMA_BUF_LEGACY_COMPAT)) {
+			if (!kbase_is_region_invalid_or_free(reg)) {
+				err = kbase_mem_do_sync_imported(kctx, reg,
+						KBASE_SYNC_TO_DEVICE);
+				WARN_ON_ONCE(err);
+			}
+		}
+		return 0;
+	}
+
+	err = kbase_mem_umm_map_attachment(kctx, reg);
+	if (err)
+		goto bad_map_attachment;
+
+#ifdef CONFIG_MALI_CINSTR_GWT
+	if (kctx->gwt_enabled)
+		gwt_mask = ~KBASE_REG_GPU_WR;
+#endif
+
+	err = kbase_mmu_insert_pages(kctx->kbdev,
+				     &kctx->mmu,
+				     reg->start_pfn,
+				     kbase_get_gpu_phy_pages(reg),
+				     kbase_reg_current_backed_size(reg),
+				     reg->flags & gwt_mask,
+				     kctx->as_nr,
+				     alloc->group_id);
+	if (err)
+		goto bad_insert;
+
+	if (reg->flags & KBASE_REG_IMPORT_PAD &&
+			!WARN_ON(reg->nr_pages < alloc->nents)) {
+		/* For padded imported dma-buf memory, map the dummy aliasing
+		 * page from the end of the dma-buf pages, to the end of the
+		 * region using a read only mapping.
+		 *
+		 * Assume alloc->nents is the number of actual pages in the
+		 * dma-buf memory.
+		 */
+		err = kbase_mmu_insert_single_page(kctx,
+				reg->start_pfn + alloc->nents,
+				kctx->aliasing_sink_page,
+				reg->nr_pages - alloc->nents,
+				(reg->flags | KBASE_REG_GPU_RD) &
+				~KBASE_REG_GPU_WR,
+				KBASE_MEM_GROUP_SINK);
+		if (err)
+			goto bad_pad_insert;
+	}
+
+	return 0;
+
+bad_pad_insert:
+	kbase_mmu_teardown_pages(kctx->kbdev,
+				 &kctx->mmu,
+				 reg->start_pfn,
+				 alloc->nents,
+				 kctx->as_nr);
+bad_insert:
+	kbase_mem_umm_unmap_attachment(kctx, alloc);
+bad_map_attachment:
+	alloc->imported.umm.current_mapping_usage_count--;
+
+	return err;
+}
+
+void kbase_mem_umm_unmap(struct kbase_context *kctx,
+		struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc)
+{
+	alloc->imported.umm.current_mapping_usage_count--;
+	if (alloc->imported.umm.current_mapping_usage_count) {
+		if (IS_ENABLED(CONFIG_MALI_DMA_BUF_LEGACY_COMPAT)) {
+			if (!kbase_is_region_invalid_or_free(reg)) {
+				int err = kbase_mem_do_sync_imported(kctx, reg,
+						KBASE_SYNC_TO_CPU);
+				WARN_ON_ONCE(err);
+			}
+		}
+		return;
+	}
+
+	if (!kbase_is_region_invalid_or_free(reg) && reg->gpu_alloc == alloc) {
+		int err;
+
+		err = kbase_mmu_teardown_pages(kctx->kbdev,
+					       &kctx->mmu,
+					       reg->start_pfn,
+					       reg->nr_pages,
+					       kctx->as_nr);
+		WARN_ON(err);
+	}
+
+	kbase_mem_umm_unmap_attachment(kctx, alloc);
+}
+
+static int get_umm_memory_group_id(struct kbase_context *kctx,
+		struct dma_buf *dma_buf)
+{
+	int group_id = BASE_MEM_GROUP_DEFAULT;
+
+	if (kctx->kbdev->mgm_dev->ops.mgm_get_import_memory_id) {
+		struct memory_group_manager_import_data mgm_import_data;
+
+		mgm_import_data.type =
+			MEMORY_GROUP_MANAGER_IMPORT_TYPE_DMA_BUF;
+		mgm_import_data.u.dma_buf = dma_buf;
+
+		group_id = kctx->kbdev->mgm_dev->ops.mgm_get_import_memory_id(
+			kctx->kbdev->mgm_dev, &mgm_import_data);
+	}
+
+	return group_id;
+}
+
+/**
+ * kbase_mem_from_umm - Import dma-buf memory into kctx
+ * @kctx: Pointer to kbase context to import memory into
+ * @fd: File descriptor of dma-buf to import
+ * @va_pages: Pointer where virtual size of the region will be output
+ * @flags: Pointer to memory flags
+ * @padding: Number of read only padding pages to be inserted at the end of the
+ * GPU mapping of the dma-buf
+ *
+ * Return: Pointer to new kbase_va_region object of the imported dma-buf, or
+ * NULL on error.
+ *
+ * This function imports a dma-buf into kctx, and created a kbase_va_region
+ * object that wraps the dma-buf.
+ */
+static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx,
+		int fd, u64 *va_pages, u64 *flags, u32 padding)
+{
+	struct kbase_va_region *reg;
+	struct dma_buf *dma_buf;
+	struct dma_buf_attachment *dma_attachment;
+	bool shared_zone = false;
+	int group_id;
+
+	/* 64-bit address range is the max */
+	if (*va_pages > (U64_MAX / PAGE_SIZE))
+		return NULL;
+
+	dma_buf = dma_buf_get(fd);
+	if (IS_ERR_OR_NULL(dma_buf))
+		return NULL;
+
+	dma_attachment = dma_buf_attach(dma_buf, kctx->kbdev->dev);
+	if (IS_ERR_OR_NULL(dma_attachment)) {
+		dma_buf_put(dma_buf);
+		return NULL;
+	}
+
+	*va_pages = (PAGE_ALIGN(dma_buf->size) >> PAGE_SHIFT) + padding;
+	if (!*va_pages) {
+		dma_buf_detach(dma_buf, dma_attachment);
+		dma_buf_put(dma_buf);
+		return NULL;
+	}
+
+	/* ignore SAME_VA */
+	*flags &= ~BASE_MEM_SAME_VA;
+
+	/*
+	 * Force CPU cached flag.
+	 *
+	 * We can't query the dma-buf exporter to get details about the CPU
+	 * cache attributes of CPU mappings, so we have to assume that the
+	 * buffer may be cached, and call into the exporter for cache
+	 * maintenance, and rely on the exporter to do the right thing when
+	 * handling our calls.
+	 */
+	*flags |= BASE_MEM_CACHED_CPU;
+
+	if (*flags & BASE_MEM_IMPORT_SHARED)
+		shared_zone = true;
+
+#ifdef CONFIG_64BIT
+	if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+		/*
+		 * 64-bit tasks require us to reserve VA on the CPU that we use
+		 * on the GPU.
+		 */
+		shared_zone = true;
+	}
+#endif
+
+	if (shared_zone) {
+		*flags |= BASE_MEM_NEED_MMAP;
+		reg = kbase_alloc_free_region(&kctx->reg_rbtree_same,
+				0, *va_pages, KBASE_REG_ZONE_SAME_VA);
+	} else {
+		reg = kbase_alloc_free_region(&kctx->reg_rbtree_custom,
+				0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA);
+	}
+
+	if (!reg) {
+		dma_buf_detach(dma_buf, dma_attachment);
+		dma_buf_put(dma_buf);
+		return NULL;
+	}
+
+	group_id = get_umm_memory_group_id(kctx, dma_buf);
+
+	reg->gpu_alloc = kbase_alloc_create(kctx, *va_pages,
+			KBASE_MEM_TYPE_IMPORTED_UMM, group_id);
+	if (IS_ERR_OR_NULL(reg->gpu_alloc))
+		goto no_alloc;
+
+	reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+
+	if (kbase_update_region_flags(kctx, reg, *flags) != 0)
+		goto error_out;
+
+	/* No pages to map yet */
+	reg->gpu_alloc->nents = 0;
+
+	reg->flags &= ~KBASE_REG_FREE;
+	reg->flags |= KBASE_REG_GPU_NX;	/* UMM is always No eXecute */
+	reg->flags &= ~KBASE_REG_GROWABLE;	/* UMM cannot be grown */
+
+	if (*flags & BASE_MEM_PROTECTED)
+		reg->flags |= KBASE_REG_PROTECTED;
+
+	if (padding)
+		reg->flags |= KBASE_REG_IMPORT_PAD;
+
+	reg->gpu_alloc->type = KBASE_MEM_TYPE_IMPORTED_UMM;
+	reg->gpu_alloc->imported.umm.sgt = NULL;
+	reg->gpu_alloc->imported.umm.dma_buf = dma_buf;
+	reg->gpu_alloc->imported.umm.dma_attachment = dma_attachment;
+	reg->gpu_alloc->imported.umm.current_mapping_usage_count = 0;
+	reg->extent = 0;
+
+	if (!IS_ENABLED(CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND)) {
+		int err;
+
+		reg->gpu_alloc->imported.umm.current_mapping_usage_count = 1;
+
+		err = kbase_mem_umm_map_attachment(kctx, reg);
+		if (err) {
+			dev_warn(kctx->kbdev->dev,
+				 "Failed to map dma-buf %pK on GPU: %d\n",
+				 dma_buf, err);
+			goto error_out;
+		}
+
+		*flags |= KBASE_MEM_IMPORT_HAVE_PAGES;
+	}
+
+	return reg;
+
+error_out:
+	kbase_mem_phy_alloc_put(reg->gpu_alloc);
+	kbase_mem_phy_alloc_put(reg->cpu_alloc);
+no_alloc:
+	kfree(reg);
+
+	return NULL;
+}
+
+u32 kbase_get_cache_line_alignment(struct kbase_device *kbdev)
+{
+	u32 cpu_cache_line_size = cache_line_size();
+	u32 gpu_cache_line_size =
+		(1UL << kbdev->gpu_props.props.l2_props.log2_line_size);
+
+	return ((cpu_cache_line_size > gpu_cache_line_size) ?
+				cpu_cache_line_size :
+				gpu_cache_line_size);
+}
+
+static struct kbase_va_region *kbase_mem_from_user_buffer(
+		struct kbase_context *kctx, unsigned long address,
+		unsigned long size, u64 *va_pages, u64 *flags)
+{
+	long i;
+	struct kbase_va_region *reg;
+	struct rb_root *rbtree;
+	long faulted_pages;
+	int zone = KBASE_REG_ZONE_CUSTOM_VA;
+	bool shared_zone = false;
+	u32 cache_line_alignment = kbase_get_cache_line_alignment(kctx->kbdev);
+	struct kbase_alloc_import_user_buf *user_buf;
+	struct page **pages = NULL;
+
+	if ((address & (cache_line_alignment - 1)) != 0 ||
+			(size & (cache_line_alignment - 1)) != 0) {
+		if (*flags & BASE_MEM_UNCACHED_GPU) {
+			dev_warn(kctx->kbdev->dev,
+					"User buffer is not cache line aligned and marked as GPU uncached\n");
+			goto bad_size;
+		}
+
+		/* Coherency must be enabled to handle partial cache lines */
+		if (*flags & (BASE_MEM_COHERENT_SYSTEM |
+			BASE_MEM_COHERENT_SYSTEM_REQUIRED)) {
+			/* Force coherent system required flag, import will
+			 * then fail if coherency isn't available
+			 */
+			*flags |= BASE_MEM_COHERENT_SYSTEM_REQUIRED;
+		} else {
+			dev_warn(kctx->kbdev->dev,
+					"User buffer is not cache line aligned and no coherency enabled\n");
+			goto bad_size;
+		}
+	}
+
+	*va_pages = (PAGE_ALIGN(address + size) >> PAGE_SHIFT) -
+		PFN_DOWN(address);
+	if (!*va_pages)
+		goto bad_size;
+
+	if (*va_pages > (UINT64_MAX / PAGE_SIZE))
+		/* 64-bit address range is the max */
+		goto bad_size;
+
+	/* SAME_VA generally not supported with imported memory (no known use cases) */
+	*flags &= ~BASE_MEM_SAME_VA;
+
+	if (*flags & BASE_MEM_IMPORT_SHARED)
+		shared_zone = true;
+
+#ifdef CONFIG_64BIT
+	if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+		/*
+		 * 64-bit tasks require us to reserve VA on the CPU that we use
+		 * on the GPU.
+		 */
+		shared_zone = true;
+	}
+#endif
+
+	if (shared_zone) {
+		*flags |= BASE_MEM_NEED_MMAP;
+		zone = KBASE_REG_ZONE_SAME_VA;
+		rbtree = &kctx->reg_rbtree_same;
+	} else
+		rbtree = &kctx->reg_rbtree_custom;
+
+	reg = kbase_alloc_free_region(rbtree, 0, *va_pages, zone);
+
+	if (!reg)
+		goto no_region;
+
+	reg->gpu_alloc = kbase_alloc_create(
+		kctx, *va_pages, KBASE_MEM_TYPE_IMPORTED_USER_BUF,
+		BASE_MEM_GROUP_DEFAULT);
+	if (IS_ERR_OR_NULL(reg->gpu_alloc))
+		goto no_alloc_obj;
+
+	reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+
+	if (kbase_update_region_flags(kctx, reg, *flags) != 0)
+		goto invalid_flags;
+
+	reg->flags &= ~KBASE_REG_FREE;
+	reg->flags |= KBASE_REG_GPU_NX; /* User-buffers are always No eXecute */
+	reg->flags &= ~KBASE_REG_GROWABLE; /* Cannot be grown */
+
+	user_buf = &reg->gpu_alloc->imported.user_buf;
+
+	user_buf->size = size;
+	user_buf->address = address;
+	user_buf->nr_pages = *va_pages;
+	user_buf->mm = current->mm;
+#if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE
+	atomic_inc(&current->mm->mm_count);
+#else
+	mmgrab(current->mm);
+#endif
+	if (reg->gpu_alloc->properties & KBASE_MEM_PHY_ALLOC_LARGE)
+		user_buf->pages = vmalloc(*va_pages * sizeof(struct page *));
+	else
+		user_buf->pages = kmalloc_array(*va_pages,
+				sizeof(struct page *), GFP_KERNEL);
+
+	if (!user_buf->pages)
+		goto no_page_array;
+
+	/* If the region is coherent with the CPU then the memory is imported
+	 * and mapped onto the GPU immediately.
+	 * Otherwise get_user_pages is called as a sanity check, but with
+	 * NULL as the pages argument which will fault the pages, but not
+	 * pin them. The memory will then be pinned only around the jobs that
+	 * specify the region as an external resource.
+	 */
+	if (reg->flags & KBASE_REG_SHARE_BOTH) {
+		pages = user_buf->pages;
+		*flags |= KBASE_MEM_IMPORT_HAVE_PAGES;
+	}
+
+	down_read(&current->mm->mmap_sem);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+	faulted_pages = get_user_pages(current, current->mm, address, *va_pages,
+#if KERNEL_VERSION(4, 4, 168) <= LINUX_VERSION_CODE && \
+KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE
+			reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
+			pages, NULL);
+#else
+			reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL);
+#endif
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+	faulted_pages = get_user_pages(address, *va_pages,
+			reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL);
+#else
+	faulted_pages = get_user_pages(address, *va_pages,
+			reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
+			pages, NULL);
+#endif
+
+	up_read(&current->mm->mmap_sem);
+
+	if (faulted_pages != *va_pages)
+		goto fault_mismatch;
+
+	reg->gpu_alloc->nents = 0;
+	reg->extent = 0;
+
+	if (pages) {
+		struct device *dev = kctx->kbdev->dev;
+		unsigned long local_size = user_buf->size;
+		unsigned long offset = user_buf->address & ~PAGE_MASK;
+		struct tagged_addr *pa = kbase_get_gpu_phy_pages(reg);
+
+		/* Top bit signifies that this was pinned on import */
+		user_buf->current_mapping_usage_count |= PINNED_ON_IMPORT;
+
+		for (i = 0; i < faulted_pages; i++) {
+			dma_addr_t dma_addr;
+			unsigned long min;
+
+			min = MIN(PAGE_SIZE - offset, local_size);
+			dma_addr = dma_map_page(dev, pages[i],
+					offset, min,
+					DMA_BIDIRECTIONAL);
+			if (dma_mapping_error(dev, dma_addr))
+				goto unwind_dma_map;
+
+			user_buf->dma_addrs[i] = dma_addr;
+			pa[i] = as_tagged(page_to_phys(pages[i]));
+
+			local_size -= min;
+			offset = 0;
+		}
+
+		reg->gpu_alloc->nents = faulted_pages;
+	}
+
+	return reg;
+
+unwind_dma_map:
+	while (i--) {
+		dma_unmap_page(kctx->kbdev->dev,
+				user_buf->dma_addrs[i],
+				PAGE_SIZE, DMA_BIDIRECTIONAL);
+	}
+fault_mismatch:
+	if (pages) {
+		for (i = 0; i < faulted_pages; i++)
+			put_page(pages[i]);
+	}
+no_page_array:
+invalid_flags:
+	kbase_mem_phy_alloc_put(reg->cpu_alloc);
+	kbase_mem_phy_alloc_put(reg->gpu_alloc);
+no_alloc_obj:
+	kfree(reg);
+no_region:
+bad_size:
+	return NULL;
+
+}
+
+
+u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride,
+		    u64 nents, struct base_mem_aliasing_info *ai,
+		    u64 *num_pages)
+{
+	struct kbase_va_region *reg;
+	u64 gpu_va;
+	size_t i;
+	bool coherent;
+
+	KBASE_DEBUG_ASSERT(kctx);
+	KBASE_DEBUG_ASSERT(flags);
+	KBASE_DEBUG_ASSERT(ai);
+	KBASE_DEBUG_ASSERT(num_pages);
+
+	/* mask to only allowed flags */
+	*flags &= (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR |
+		   BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_LOCAL |
+		   BASE_MEM_PROT_CPU_RD | BASE_MEM_COHERENT_SYSTEM_REQUIRED);
+
+	if (!(*flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR))) {
+		dev_warn(kctx->kbdev->dev,
+				"kbase_mem_alias called with bad flags (%llx)",
+				(unsigned long long)*flags);
+		goto bad_flags;
+	}
+	coherent = (*flags & BASE_MEM_COHERENT_SYSTEM) != 0 ||
+			(*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0;
+
+	if (!stride)
+		goto bad_stride;
+
+	if (!nents)
+		goto bad_nents;
+
+	if ((nents * stride) > (U64_MAX / PAGE_SIZE))
+		/* 64-bit address range is the max */
+		goto bad_size;
+
+	/* calculate the number of pages this alias will cover */
+	*num_pages = nents * stride;
+
+#ifdef CONFIG_64BIT
+	if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+		/* 64-bit tasks must MMAP anyway, but not expose this address to
+		 * clients */
+		*flags |= BASE_MEM_NEED_MMAP;
+		reg = kbase_alloc_free_region(&kctx->reg_rbtree_same, 0,
+				*num_pages,
+				KBASE_REG_ZONE_SAME_VA);
+	} else {
+#else
+	if (1) {
+#endif
+		reg = kbase_alloc_free_region(&kctx->reg_rbtree_custom,
+				0, *num_pages,
+				KBASE_REG_ZONE_CUSTOM_VA);
+	}
+
+	if (!reg)
+		goto no_reg;
+
+	/* zero-sized page array, as we don't need one/can support one */
+	reg->gpu_alloc = kbase_alloc_create(kctx, 0, KBASE_MEM_TYPE_ALIAS,
+		BASE_MEM_GROUP_DEFAULT);
+	if (IS_ERR_OR_NULL(reg->gpu_alloc))
+		goto no_alloc_obj;
+
+	reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+
+	if (kbase_update_region_flags(kctx, reg, *flags) != 0)
+		goto invalid_flags;
+
+	reg->gpu_alloc->imported.alias.nents = nents;
+	reg->gpu_alloc->imported.alias.stride = stride;
+	reg->gpu_alloc->imported.alias.aliased = vzalloc(sizeof(*reg->gpu_alloc->imported.alias.aliased) * nents);
+	if (!reg->gpu_alloc->imported.alias.aliased)
+		goto no_aliased_array;
+
+	kbase_gpu_vm_lock(kctx);
+
+	/* validate and add src handles */
+	for (i = 0; i < nents; i++) {
+		if (ai[i].handle.basep.handle < BASE_MEM_FIRST_FREE_ADDRESS) {
+			if (ai[i].handle.basep.handle !=
+			    BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE)
+				goto bad_handle; /* unsupported magic handle */
+			if (!ai[i].length)
+				goto bad_handle; /* must be > 0 */
+			if (ai[i].length > stride)
+				goto bad_handle; /* can't be larger than the
+						    stride */
+			reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length;
+		} else {
+			struct kbase_va_region *aliasing_reg;
+			struct kbase_mem_phy_alloc *alloc;
+
+			aliasing_reg = kbase_region_tracker_find_region_base_address(
+				kctx,
+				(ai[i].handle.basep.handle >> PAGE_SHIFT) << PAGE_SHIFT);
+
+			/* validate found region */
+			if (kbase_is_region_invalid_or_free(aliasing_reg))
+				goto bad_handle; /* Not found/already free */
+			if (aliasing_reg->flags & KBASE_REG_DONT_NEED)
+				goto bad_handle; /* Ephemeral region */
+			if (!(aliasing_reg->flags & KBASE_REG_GPU_CACHED))
+				goto bad_handle; /* GPU uncached memory */
+			if (!aliasing_reg->gpu_alloc)
+				goto bad_handle; /* No alloc */
+			if (aliasing_reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE)
+				goto bad_handle; /* Not a native alloc */
+			if (coherent != ((aliasing_reg->flags & KBASE_REG_SHARE_BOTH) != 0))
+				goto bad_handle;
+				/* Non-coherent memory cannot alias
+				   coherent memory, and vice versa.*/
+
+			/* check size against stride */
+			if (!ai[i].length)
+				goto bad_handle; /* must be > 0 */
+			if (ai[i].length > stride)
+				goto bad_handle; /* can't be larger than the
+						    stride */
+
+			alloc = aliasing_reg->gpu_alloc;
+
+			/* check against the alloc's size */
+			if (ai[i].offset > alloc->nents)
+				goto bad_handle; /* beyond end */
+			if (ai[i].offset + ai[i].length > alloc->nents)
+				goto bad_handle; /* beyond end */
+
+			reg->gpu_alloc->imported.alias.aliased[i].alloc = kbase_mem_phy_alloc_get(alloc);
+			reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length;
+			reg->gpu_alloc->imported.alias.aliased[i].offset = ai[i].offset;
+		}
+	}
+
+#ifdef CONFIG_64BIT
+	if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+		/* Bind to a cookie */
+		if (!kctx->cookies) {
+			dev_err(kctx->kbdev->dev, "No cookies available for allocation!");
+			goto no_cookie;
+		}
+		/* return a cookie */
+		gpu_va = __ffs(kctx->cookies);
+		kctx->cookies &= ~(1UL << gpu_va);
+		BUG_ON(kctx->pending_regions[gpu_va]);
+		kctx->pending_regions[gpu_va] = reg;
+
+		/* relocate to correct base */
+		gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE);
+		gpu_va <<= PAGE_SHIFT;
+	} else /* we control the VA */ {
+#else
+	if (1) {
+#endif
+		if (kbase_gpu_mmap(kctx, reg, 0, *num_pages, 1) != 0) {
+			dev_warn(kctx->kbdev->dev, "Failed to map memory on GPU");
+			goto no_mmap;
+		}
+		/* return real GPU VA */
+		gpu_va = reg->start_pfn << PAGE_SHIFT;
+	}
+
+	reg->flags &= ~KBASE_REG_FREE;
+	reg->flags &= ~KBASE_REG_GROWABLE;
+
+	kbase_gpu_vm_unlock(kctx);
+
+	return gpu_va;
+
+#ifdef CONFIG_64BIT
+no_cookie:
+#endif
+no_mmap:
+bad_handle:
+	kbase_gpu_vm_unlock(kctx);
+no_aliased_array:
+invalid_flags:
+	kbase_mem_phy_alloc_put(reg->cpu_alloc);
+	kbase_mem_phy_alloc_put(reg->gpu_alloc);
+no_alloc_obj:
+	kfree(reg);
+no_reg:
+bad_size:
+bad_nents:
+bad_stride:
+bad_flags:
+	return 0;
+}
+
+int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type,
+		void __user *phandle, u32 padding, u64 *gpu_va, u64 *va_pages,
+		u64 *flags)
+{
+	struct kbase_va_region *reg;
+
+	KBASE_DEBUG_ASSERT(kctx);
+	KBASE_DEBUG_ASSERT(gpu_va);
+	KBASE_DEBUG_ASSERT(va_pages);
+	KBASE_DEBUG_ASSERT(flags);
+
+	if ((!kbase_ctx_flag(kctx, KCTX_COMPAT)) &&
+			kbase_ctx_flag(kctx, KCTX_FORCE_SAME_VA))
+		*flags |= BASE_MEM_SAME_VA;
+
+	if (!kbase_check_import_flags(*flags)) {
+		dev_warn(kctx->kbdev->dev,
+				"kbase_mem_import called with bad flags (%llx)",
+				(unsigned long long)*flags);
+		goto bad_flags;
+	}
+
+	if ((*flags & BASE_MEM_UNCACHED_GPU) != 0 &&
+			(*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0) {
+		/* Remove COHERENT_SYSTEM_REQUIRED flag if uncached GPU mapping is requested */
+		*flags &= ~BASE_MEM_COHERENT_SYSTEM_REQUIRED;
+	}
+	if ((*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0 &&
+			!kbase_device_is_cpu_coherent(kctx->kbdev)) {
+		dev_warn(kctx->kbdev->dev,
+				"kbase_mem_import call required coherent mem when unavailable");
+		goto bad_flags;
+	}
+	if ((*flags & BASE_MEM_COHERENT_SYSTEM) != 0 &&
+			!kbase_device_is_cpu_coherent(kctx->kbdev)) {
+		/* Remove COHERENT_SYSTEM flag if coherent mem is unavailable */
+		*flags &= ~BASE_MEM_COHERENT_SYSTEM;
+	}
+
+	if ((padding != 0) && (type != BASE_MEM_IMPORT_TYPE_UMM)) {
+		dev_warn(kctx->kbdev->dev,
+				"padding is only supported for UMM");
+		goto bad_flags;
+	}
+
+	switch (type) {
+	case BASE_MEM_IMPORT_TYPE_UMM: {
+		int fd;
+
+		if (get_user(fd, (int __user *)phandle))
+			reg = NULL;
+		else
+			reg = kbase_mem_from_umm(kctx, fd, va_pages, flags,
+					padding);
+	}
+	break;
+	case BASE_MEM_IMPORT_TYPE_USER_BUFFER: {
+		struct base_mem_import_user_buffer user_buffer;
+		void __user *uptr;
+
+		if (copy_from_user(&user_buffer, phandle,
+				sizeof(user_buffer))) {
+			reg = NULL;
+		} else {
+#ifdef CONFIG_COMPAT
+			if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+				uptr = compat_ptr(user_buffer.ptr);
+			else
+#endif
+				uptr = u64_to_user_ptr(user_buffer.ptr);
+
+			reg = kbase_mem_from_user_buffer(kctx,
+					(unsigned long)uptr, user_buffer.length,
+					va_pages, flags);
+		}
+		break;
+	}
+	default: {
+		reg = NULL;
+		break;
+	}
+	}
+
+	if (!reg)
+		goto no_reg;
+
+	kbase_gpu_vm_lock(kctx);
+
+	/* mmap needed to setup VA? */
+	if (*flags & (BASE_MEM_SAME_VA | BASE_MEM_NEED_MMAP)) {
+		/* Bind to a cookie */
+		if (!kctx->cookies)
+			goto no_cookie;
+		/* return a cookie */
+		*gpu_va = __ffs(kctx->cookies);
+		kctx->cookies &= ~(1UL << *gpu_va);
+		BUG_ON(kctx->pending_regions[*gpu_va]);
+		kctx->pending_regions[*gpu_va] = reg;
+
+		/* relocate to correct base */
+		*gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE);
+		*gpu_va <<= PAGE_SHIFT;
+
+	} else if (*flags & KBASE_MEM_IMPORT_HAVE_PAGES)  {
+		/* we control the VA, mmap now to the GPU */
+		if (kbase_gpu_mmap(kctx, reg, 0, *va_pages, 1) != 0)
+			goto no_gpu_va;
+		/* return real GPU VA */
+		*gpu_va = reg->start_pfn << PAGE_SHIFT;
+	} else {
+		/* we control the VA, but nothing to mmap yet */
+		if (kbase_add_va_region(kctx, reg, 0, *va_pages, 1) != 0)
+			goto no_gpu_va;
+		/* return real GPU VA */
+		*gpu_va = reg->start_pfn << PAGE_SHIFT;
+	}
+
+	/* clear out private flags */
+	*flags &= ((1UL << BASE_MEM_FLAGS_NR_BITS) - 1);
+
+	kbase_gpu_vm_unlock(kctx);
+
+	return 0;
+
+no_gpu_va:
+no_cookie:
+	kbase_gpu_vm_unlock(kctx);
+	kbase_mem_phy_alloc_put(reg->cpu_alloc);
+	kbase_mem_phy_alloc_put(reg->gpu_alloc);
+	kfree(reg);
+no_reg:
+bad_flags:
+	*gpu_va = 0;
+	*va_pages = 0;
+	*flags = 0;
+	return -ENOMEM;
+}
+
+int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx,
+		struct kbase_va_region *reg,
+		u64 new_pages, u64 old_pages)
+{
+	struct tagged_addr *phy_pages;
+	u64 delta = new_pages - old_pages;
+	int ret = 0;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	/* Map the new pages into the GPU */
+	phy_pages = kbase_get_gpu_phy_pages(reg);
+	ret = kbase_mmu_insert_pages(kctx->kbdev, &kctx->mmu,
+		reg->start_pfn + old_pages, phy_pages + old_pages, delta,
+		reg->flags, kctx->as_nr, reg->gpu_alloc->group_id);
+
+	return ret;
+}
+
+void kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx,
+		struct kbase_va_region *reg,
+		u64 new_pages, u64 old_pages)
+{
+	u64 gpu_va_start = reg->start_pfn;
+
+	if (new_pages == old_pages)
+		/* Nothing to do */
+		return;
+
+	unmap_mapping_range(kctx->filp->f_inode->i_mapping,
+			(gpu_va_start + new_pages)<<PAGE_SHIFT,
+			(old_pages - new_pages)<<PAGE_SHIFT, 1);
+}
+
+int kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx,
+		struct kbase_va_region *reg,
+		u64 new_pages, u64 old_pages)
+{
+	u64 delta = old_pages - new_pages;
+	int ret = 0;
+
+	ret = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu,
+			reg->start_pfn + new_pages, delta, kctx->as_nr);
+
+	return ret;
+}
+
+int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages)
+{
+	u64 old_pages;
+	u64 delta;
+	int res = -EINVAL;
+	struct kbase_va_region *reg;
+	bool read_locked = false;
+
+	KBASE_DEBUG_ASSERT(kctx);
+	KBASE_DEBUG_ASSERT(gpu_addr != 0);
+
+	if (gpu_addr & ~PAGE_MASK) {
+		dev_warn(kctx->kbdev->dev, "kbase:mem_commit: gpu_addr: passed parameter is invalid");
+		return -EINVAL;
+	}
+
+	down_write(&current->mm->mmap_sem);
+	kbase_gpu_vm_lock(kctx);
+
+	/* Validate the region */
+	reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
+	if (kbase_is_region_invalid_or_free(reg))
+		goto out_unlock;
+
+	KBASE_DEBUG_ASSERT(reg->cpu_alloc);
+	KBASE_DEBUG_ASSERT(reg->gpu_alloc);
+
+	if (reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE)
+		goto out_unlock;
+
+	if (0 == (reg->flags & KBASE_REG_GROWABLE))
+		goto out_unlock;
+
+	/* Would overflow the VA region */
+	if (new_pages > reg->nr_pages)
+		goto out_unlock;
+
+	/* can't be mapped more than once on the GPU */
+	if (atomic_read(&reg->gpu_alloc->gpu_mappings) > 1)
+		goto out_unlock;
+	/* can't grow regions which are ephemeral */
+	if (reg->flags & KBASE_REG_DONT_NEED)
+		goto out_unlock;
+
+#ifdef CONFIG_MALI_MEMORY_FULLY_BACKED
+	/* Reject resizing commit size */
+	if (reg->flags & KBASE_REG_PF_GROW)
+		new_pages = reg->nr_pages;
+#endif
+
+	if (new_pages == reg->gpu_alloc->nents) {
+		/* no change */
+		res = 0;
+		goto out_unlock;
+	}
+
+	old_pages = kbase_reg_current_backed_size(reg);
+	if (new_pages > old_pages) {
+		delta = new_pages - old_pages;
+
+		/*
+		 * No update to the mm so downgrade the writer lock to a read
+		 * lock so other readers aren't blocked after this point.
+		 */
+		downgrade_write(&current->mm->mmap_sem);
+		read_locked = true;
+
+		/* Allocate some more pages */
+		if (kbase_alloc_phy_pages_helper(reg->cpu_alloc, delta) != 0) {
+			res = -ENOMEM;
+			goto out_unlock;
+		}
+		if (reg->cpu_alloc != reg->gpu_alloc) {
+			if (kbase_alloc_phy_pages_helper(
+					reg->gpu_alloc, delta) != 0) {
+				res = -ENOMEM;
+				kbase_free_phy_pages_helper(reg->cpu_alloc,
+						delta);
+				goto out_unlock;
+			}
+		}
+
+		/* No update required for CPU mappings, that's done on fault. */
+
+		/* Update GPU mapping. */
+		res = kbase_mem_grow_gpu_mapping(kctx, reg,
+				new_pages, old_pages);
+
+		/* On error free the new pages */
+		if (res) {
+			kbase_free_phy_pages_helper(reg->cpu_alloc, delta);
+			if (reg->cpu_alloc != reg->gpu_alloc)
+				kbase_free_phy_pages_helper(reg->gpu_alloc,
+						delta);
+			res = -ENOMEM;
+			goto out_unlock;
+		}
+	} else {
+		delta = old_pages - new_pages;
+
+		/* Update all CPU mapping(s) */
+		kbase_mem_shrink_cpu_mapping(kctx, reg,
+				new_pages, old_pages);
+
+		/* Update the GPU mapping */
+		res = kbase_mem_shrink_gpu_mapping(kctx, reg,
+				new_pages, old_pages);
+		if (res) {
+			res = -ENOMEM;
+			goto out_unlock;
+		}
+
+		kbase_free_phy_pages_helper(reg->cpu_alloc, delta);
+		if (reg->cpu_alloc != reg->gpu_alloc)
+			kbase_free_phy_pages_helper(reg->gpu_alloc, delta);
+	}
+
+out_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	if (read_locked)
+		up_read(&current->mm->mmap_sem);
+	else
+		up_write(&current->mm->mmap_sem);
+
+	return res;
+}
+
+static void kbase_cpu_vm_open(struct vm_area_struct *vma)
+{
+	struct kbase_cpu_mapping *map = vma->vm_private_data;
+
+	KBASE_DEBUG_ASSERT(map);
+	KBASE_DEBUG_ASSERT(map->count > 0);
+	/* non-atomic as we're under Linux' mm lock */
+	map->count++;
+}
+
+static void kbase_cpu_vm_close(struct vm_area_struct *vma)
+{
+	struct kbase_cpu_mapping *map = vma->vm_private_data;
+
+	KBASE_DEBUG_ASSERT(map);
+	KBASE_DEBUG_ASSERT(map->count > 0);
+
+	/* non-atomic as we're under Linux' mm lock */
+	if (--map->count)
+		return;
+
+	KBASE_DEBUG_ASSERT(map->kctx);
+	KBASE_DEBUG_ASSERT(map->alloc);
+
+	kbase_gpu_vm_lock(map->kctx);
+
+	if (map->free_on_close) {
+		KBASE_DEBUG_ASSERT((map->region->flags & KBASE_REG_ZONE_MASK) ==
+				KBASE_REG_ZONE_SAME_VA);
+		/* Avoid freeing memory on the process death which results in
+		 * GPU Page Fault. Memory will be freed in kbase_destroy_context
+		 */
+		if (!(current->flags & PF_EXITING))
+			kbase_mem_free_region(map->kctx, map->region);
+	}
+
+	list_del(&map->mappings_list);
+
+	kbase_va_region_alloc_put(map->kctx, map->region);
+	kbase_gpu_vm_unlock(map->kctx);
+
+	kbase_mem_phy_alloc_put(map->alloc);
+	kfree(map);
+}
+
+KBASE_EXPORT_TEST_API(kbase_cpu_vm_close);
+
+static struct kbase_aliased *get_aliased_alloc(struct vm_area_struct *vma,
+					struct kbase_va_region *reg,
+					pgoff_t *start_off,
+					size_t nr_pages)
+{
+	struct kbase_aliased *aliased =
+		reg->cpu_alloc->imported.alias.aliased;
+
+	if (!reg->cpu_alloc->imported.alias.stride ||
+			reg->nr_pages < (*start_off + nr_pages)) {
+		return NULL;
+	}
+
+	while (*start_off >= reg->cpu_alloc->imported.alias.stride) {
+		aliased++;
+		*start_off -= reg->cpu_alloc->imported.alias.stride;
+	}
+
+	if (!aliased->alloc) {
+		/* sink page not available for dumping map */
+		return NULL;
+	}
+
+	if ((*start_off + nr_pages) > aliased->length) {
+		/* not fully backed by physical pages */
+		return NULL;
+	}
+
+	return aliased;
+}
+
+#if (KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE)
+static vm_fault_t kbase_cpu_vm_fault(struct vm_area_struct *vma,
+			struct vm_fault *vmf)
+{
+#else
+static vm_fault_t kbase_cpu_vm_fault(struct vm_fault *vmf)
+{
+	struct vm_area_struct *vma = vmf->vma;
+#endif
+	struct kbase_cpu_mapping *map = vma->vm_private_data;
+	pgoff_t map_start_pgoff;
+	pgoff_t fault_pgoff;
+	size_t i;
+	pgoff_t addr;
+	size_t nents;
+	struct tagged_addr *pages;
+	vm_fault_t ret = VM_FAULT_SIGBUS;
+	struct memory_group_manager_device *mgm_dev;
+
+	KBASE_DEBUG_ASSERT(map);
+	KBASE_DEBUG_ASSERT(map->count > 0);
+	KBASE_DEBUG_ASSERT(map->kctx);
+	KBASE_DEBUG_ASSERT(map->alloc);
+
+	map_start_pgoff = vma->vm_pgoff - map->region->start_pfn;
+
+	kbase_gpu_vm_lock(map->kctx);
+	if (unlikely(map->region->cpu_alloc->type == KBASE_MEM_TYPE_ALIAS)) {
+		struct kbase_aliased *aliased =
+		      get_aliased_alloc(vma, map->region, &map_start_pgoff, 1);
+
+		if (!aliased)
+			goto exit;
+
+		nents = aliased->length;
+		pages = aliased->alloc->pages + aliased->offset;
+	} else  {
+		nents = map->alloc->nents;
+		pages = map->alloc->pages;
+	}
+
+	fault_pgoff = map_start_pgoff + (vmf->pgoff - vma->vm_pgoff);
+
+	if (fault_pgoff >= nents)
+		goto exit;
+
+	/* Fault on access to DONT_NEED regions */
+	if (map->alloc->reg && (map->alloc->reg->flags & KBASE_REG_DONT_NEED))
+		goto exit;
+
+	/* We are inserting all valid pages from the start of CPU mapping and
+	 * not from the fault location (the mmap handler was previously doing
+	 * the same).
+	 */
+	i = map_start_pgoff;
+	addr = (pgoff_t)(vma->vm_start >> PAGE_SHIFT);
+	mgm_dev = map->kctx->kbdev->mgm_dev;
+	while (i < nents && (addr < vma->vm_end >> PAGE_SHIFT)) {
+
+		ret = mgm_dev->ops.mgm_vmf_insert_pfn_prot(mgm_dev,
+			map->alloc->group_id, vma, addr << PAGE_SHIFT,
+			PFN_DOWN(as_phys_addr_t(pages[i])), vma->vm_page_prot);
+
+		if (ret != VM_FAULT_NOPAGE)
+			goto exit;
+
+		i++; addr++;
+	}
+
+exit:
+	kbase_gpu_vm_unlock(map->kctx);
+	return ret;
+}
+
+const struct vm_operations_struct kbase_vm_ops = {
+	.open  = kbase_cpu_vm_open,
+	.close = kbase_cpu_vm_close,
+	.fault = kbase_cpu_vm_fault
+};
+
+static int kbase_cpu_mmap(struct kbase_context *kctx,
+		struct kbase_va_region *reg,
+		struct vm_area_struct *vma,
+		void *kaddr,
+		size_t nr_pages,
+		unsigned long aligned_offset,
+		int free_on_close)
+{
+	struct kbase_cpu_mapping *map;
+	int err = 0;
+
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
+
+	if (!map) {
+		WARN_ON(1);
+		err = -ENOMEM;
+		goto out;
+	}
+
+	/*
+	 * VM_DONTCOPY - don't make this mapping available in fork'ed processes
+	 * VM_DONTEXPAND - disable mremap on this region
+	 * VM_IO - disables paging
+	 * VM_DONTDUMP - Don't include in core dumps (3.7 only)
+	 * VM_MIXEDMAP - Support mixing struct page*s and raw pfns.
+	 *               This is needed to support using the dedicated and
+	 *               the OS based memory backends together.
+	 */
+	/*
+	 * This will need updating to propagate coherency flags
+	 * See MIDBASE-1057
+	 */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO;
+#else
+	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO;
+#endif
+	vma->vm_ops = &kbase_vm_ops;
+	vma->vm_private_data = map;
+
+	if (reg->cpu_alloc->type == KBASE_MEM_TYPE_ALIAS && nr_pages) {
+		pgoff_t rel_pgoff = vma->vm_pgoff - reg->start_pfn +
+					(aligned_offset >> PAGE_SHIFT);
+		struct kbase_aliased *aliased =
+			get_aliased_alloc(vma, reg, &rel_pgoff, nr_pages);
+
+		if (!aliased) {
+			err = -EINVAL;
+			kfree(map);
+			goto out;
+		}
+	}
+
+	if (!(reg->flags & KBASE_REG_CPU_CACHED) &&
+	    (reg->flags & (KBASE_REG_CPU_WR|KBASE_REG_CPU_RD))) {
+		/* We can't map vmalloc'd memory uncached.
+		 * Other memory will have been returned from
+		 * kbase_mem_pool which would be
+		 * suitable for mapping uncached.
+		 */
+		BUG_ON(kaddr);
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+	}
+
+	if (!kaddr) {
+		vma->vm_flags |= VM_PFNMAP;
+	} else {
+		WARN_ON(aligned_offset);
+		/* MIXEDMAP so we can vfree the kaddr early and not track it after map time */
+		vma->vm_flags |= VM_MIXEDMAP;
+		/* vmalloc remaping is easy... */
+		err = remap_vmalloc_range(vma, kaddr, 0);
+		WARN_ON(err);
+	}
+
+	if (err) {
+		kfree(map);
+		goto out;
+	}
+
+	map->region = kbase_va_region_alloc_get(kctx, reg);
+	map->free_on_close = free_on_close;
+	map->kctx = kctx;
+	map->alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
+	map->count = 1; /* start with one ref */
+
+	if (reg->flags & KBASE_REG_CPU_CACHED)
+		map->alloc->properties |= KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED;
+
+	list_add(&map->mappings_list, &map->alloc->mappings);
+
+ out:
+	return err;
+}
+
+#ifdef CONFIG_MALI_VECTOR_DUMP
+static void kbase_free_unused_jit_allocations(struct kbase_context *kctx)
+{
+	/* Free all cached/unused JIT allocations as their contents are not
+	 * really needed for the replay. The GPU writes to them would already
+	 * have been captured through the GWT mechanism.
+	 * This considerably reduces the size of mmu-snapshot-file and it also
+	 * helps avoid segmentation fault issue during vector dumping of
+	 * complex contents when the unused JIT allocations are accessed to
+	 * dump their contents (as they appear in the page tables snapshot)
+	 * but they got freed by the shrinker under low memory scenarios
+	 * (which do occur with complex contents).
+	 */
+	while (kbase_jit_evict(kctx))
+		;
+}
+#endif
+
+static int kbase_mmu_dump_mmap(struct kbase_context *kctx,
+			struct vm_area_struct *vma,
+			struct kbase_va_region **const reg,
+			void **const kmap_addr)
+{
+	struct kbase_va_region *new_reg;
+	void *kaddr;
+	u32 nr_pages;
+	size_t size;
+	int err = 0;
+
+	dev_dbg(kctx->kbdev->dev, "in kbase_mmu_dump_mmap\n");
+	size = (vma->vm_end - vma->vm_start);
+	nr_pages = size >> PAGE_SHIFT;
+
+#ifdef CONFIG_MALI_VECTOR_DUMP
+	kbase_free_unused_jit_allocations(kctx);
+#endif
+
+	kaddr = kbase_mmu_dump(kctx, nr_pages);
+
+	if (!kaddr) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	new_reg = kbase_alloc_free_region(&kctx->reg_rbtree_same, 0, nr_pages,
+			KBASE_REG_ZONE_SAME_VA);
+	if (!new_reg) {
+		err = -ENOMEM;
+		WARN_ON(1);
+		goto out;
+	}
+
+	new_reg->cpu_alloc = kbase_alloc_create(kctx, 0, KBASE_MEM_TYPE_RAW,
+		BASE_MEM_GROUP_DEFAULT);
+	if (IS_ERR_OR_NULL(new_reg->cpu_alloc)) {
+		err = -ENOMEM;
+		new_reg->cpu_alloc = NULL;
+		WARN_ON(1);
+		goto out_no_alloc;
+	}
+
+	new_reg->gpu_alloc = kbase_mem_phy_alloc_get(new_reg->cpu_alloc);
+
+	new_reg->flags &= ~KBASE_REG_FREE;
+	new_reg->flags |= KBASE_REG_CPU_CACHED;
+	if (kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1) != 0) {
+		err = -ENOMEM;
+		WARN_ON(1);
+		goto out_va_region;
+	}
+
+	*kmap_addr = kaddr;
+	*reg = new_reg;
+
+	dev_dbg(kctx->kbdev->dev, "kbase_mmu_dump_mmap done\n");
+	return 0;
+
+out_no_alloc:
+out_va_region:
+	kbase_free_alloced_region(new_reg);
+out:
+	return err;
+}
+
+
+void kbase_os_mem_map_lock(struct kbase_context *kctx)
+{
+	struct mm_struct *mm = current->mm;
+	(void)kctx;
+	down_read(&mm->mmap_sem);
+}
+
+void kbase_os_mem_map_unlock(struct kbase_context *kctx)
+{
+	struct mm_struct *mm = current->mm;
+	(void)kctx;
+	up_read(&mm->mmap_sem);
+}
+
+static int kbasep_reg_mmap(struct kbase_context *kctx,
+			   struct vm_area_struct *vma,
+			   struct kbase_va_region **regm,
+			   size_t *nr_pages, size_t *aligned_offset)
+
+{
+	int cookie = vma->vm_pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE);
+	struct kbase_va_region *reg;
+	int err = 0;
+
+	*aligned_offset = 0;
+
+	dev_dbg(kctx->kbdev->dev, "in kbasep_reg_mmap\n");
+
+	/* SAME_VA stuff, fetch the right region */
+	reg = kctx->pending_regions[cookie];
+	if (!reg) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	if ((reg->flags & KBASE_REG_GPU_NX) && (reg->nr_pages != *nr_pages)) {
+		/* incorrect mmap size */
+		/* leave the cookie for a potential later
+		 * mapping, or to be reclaimed later when the
+		 * context is freed */
+		err = -ENOMEM;
+		goto out;
+	}
+
+	if ((vma->vm_flags & VM_READ && !(reg->flags & KBASE_REG_CPU_RD)) ||
+	    (vma->vm_flags & VM_WRITE && !(reg->flags & KBASE_REG_CPU_WR))) {
+		/* VM flags inconsistent with region flags */
+		err = -EPERM;
+		dev_err(kctx->kbdev->dev, "%s:%d inconsistent VM flags\n",
+							__FILE__, __LINE__);
+		goto out;
+	}
+
+	/* adjust down nr_pages to what we have physically */
+	*nr_pages = kbase_reg_current_backed_size(reg);
+
+	if (kbase_gpu_mmap(kctx, reg, vma->vm_start + *aligned_offset,
+						reg->nr_pages, 1) != 0) {
+		dev_err(kctx->kbdev->dev, "%s:%d\n", __FILE__, __LINE__);
+		/* Unable to map in GPU space. */
+		WARN_ON(1);
+		err = -ENOMEM;
+		goto out;
+	}
+	/* no need for the cookie anymore */
+	kctx->pending_regions[cookie] = NULL;
+	kctx->cookies |= (1UL << cookie);
+
+	/*
+	 * Overwrite the offset with the region start_pfn, so we effectively
+	 * map from offset 0 in the region. However subtract the aligned
+	 * offset so that when user space trims the mapping the beginning of
+	 * the trimmed VMA has the correct vm_pgoff;
+	 */
+	vma->vm_pgoff = reg->start_pfn - ((*aligned_offset)>>PAGE_SHIFT);
+out:
+	*regm = reg;
+	dev_dbg(kctx->kbdev->dev, "kbasep_reg_mmap done\n");
+
+	return err;
+}
+
+int kbase_context_mmap(struct kbase_context *const kctx,
+	struct vm_area_struct *const vma)
+{
+	struct kbase_va_region *reg = NULL;
+	void *kaddr = NULL;
+	size_t nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+	int err = 0;
+	int free_on_close = 0;
+	struct device *dev = kctx->kbdev->dev;
+	size_t aligned_offset = 0;
+
+	dev_dbg(dev, "kbase_mmap\n");
+
+	if (!(vma->vm_flags & VM_READ))
+		vma->vm_flags &= ~VM_MAYREAD;
+	if (!(vma->vm_flags & VM_WRITE))
+		vma->vm_flags &= ~VM_MAYWRITE;
+
+	if (0 == nr_pages) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (!(vma->vm_flags & VM_SHARED)) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	kbase_gpu_vm_lock(kctx);
+
+	if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MAP_TRACKING_HANDLE)) {
+		/* The non-mapped tracking helper page */
+		err = kbase_tracking_page_setup(kctx, vma);
+		goto out_unlock;
+	}
+
+	/* if not the MTP, verify that the MTP has been mapped */
+	rcu_read_lock();
+	/* catches both when the special page isn't present or
+	 * when we've forked */
+	if (rcu_dereference(kctx->process_mm) != current->mm) {
+		err = -EINVAL;
+		rcu_read_unlock();
+		goto out_unlock;
+	}
+	rcu_read_unlock();
+
+	switch (vma->vm_pgoff) {
+	case PFN_DOWN(BASEP_MEM_INVALID_HANDLE):
+	case PFN_DOWN(BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE):
+		/* Illegal handle for direct map */
+		err = -EINVAL;
+		goto out_unlock;
+	case PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE):
+		/* MMU dump */
+		err = kbase_mmu_dump_mmap(kctx, vma, &reg, &kaddr);
+		if (0 != err)
+			goto out_unlock;
+		/* free the region on munmap */
+		free_on_close = 1;
+		break;
+	case PFN_DOWN(BASE_MEM_COOKIE_BASE) ...
+	     PFN_DOWN(BASE_MEM_FIRST_FREE_ADDRESS) - 1: {
+		err = kbasep_reg_mmap(kctx, vma, &reg, &nr_pages,
+							&aligned_offset);
+		if (0 != err)
+			goto out_unlock;
+		/* free the region on munmap */
+		free_on_close = 1;
+		break;
+	}
+	default: {
+		reg = kbase_region_tracker_find_region_enclosing_address(kctx,
+					(u64)vma->vm_pgoff << PAGE_SHIFT);
+
+		if (!kbase_is_region_invalid_or_free(reg)) {
+			/* will this mapping overflow the size of the region? */
+			if (nr_pages > (reg->nr_pages -
+					(vma->vm_pgoff - reg->start_pfn))) {
+				err = -ENOMEM;
+				goto out_unlock;
+			}
+
+			if ((vma->vm_flags & VM_READ &&
+					!(reg->flags & KBASE_REG_CPU_RD)) ||
+					(vma->vm_flags & VM_WRITE &&
+					!(reg->flags & KBASE_REG_CPU_WR))) {
+				/* VM flags inconsistent with region flags */
+				err = -EPERM;
+				dev_err(dev, "%s:%d inconsistent VM flags\n",
+					__FILE__, __LINE__);
+				goto out_unlock;
+			}
+
+			if (KBASE_MEM_TYPE_IMPORTED_UMM ==
+							reg->cpu_alloc->type) {
+				if (0 != (vma->vm_pgoff - reg->start_pfn)) {
+					err = -EINVAL;
+					dev_warn(dev, "%s:%d attempt to do a partial map in a dma_buf: non-zero offset to dma_buf mapping!\n",
+						__FILE__, __LINE__);
+					goto out_unlock;
+				}
+				err = dma_buf_mmap(
+					reg->cpu_alloc->imported.umm.dma_buf,
+					vma, vma->vm_pgoff - reg->start_pfn);
+				goto out_unlock;
+			}
+
+			if (reg->cpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
+				/* initial params check for aliased dumping map */
+				if (nr_pages > reg->gpu_alloc->imported.alias.stride ||
+					!reg->gpu_alloc->imported.alias.stride ||
+					!nr_pages) {
+					err = -EINVAL;
+					dev_warn(dev, "mmap aliased: invalid params!\n");
+					goto out_unlock;
+				}
+			}
+			else if (reg->cpu_alloc->nents <
+					(vma->vm_pgoff - reg->start_pfn + nr_pages)) {
+				/* limit what we map to the amount currently backed */
+				if ((vma->vm_pgoff - reg->start_pfn) >= reg->cpu_alloc->nents)
+					nr_pages = 0;
+				else
+					nr_pages = reg->cpu_alloc->nents - (vma->vm_pgoff - reg->start_pfn);
+			}
+		} else {
+			err = -ENOMEM;
+			goto out_unlock;
+		}
+	} /* default */
+	} /* switch */
+
+	err = kbase_cpu_mmap(kctx, reg, vma, kaddr, nr_pages, aligned_offset,
+			free_on_close);
+
+	if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE)) {
+		/* MMU dump - userspace should now have a reference on
+		 * the pages, so we can now free the kernel mapping */
+		vfree(kaddr);
+	}
+
+out_unlock:
+	kbase_gpu_vm_unlock(kctx);
+out:
+	if (err)
+		dev_err(dev, "mmap failed %d\n", err);
+
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_context_mmap);
+
+void kbase_sync_mem_regions(struct kbase_context *kctx,
+		struct kbase_vmap_struct *map, enum kbase_sync_type dest)
+{
+	size_t i;
+	off_t const offset = map->offset_in_page;
+	size_t const page_count = PFN_UP(offset + map->size);
+
+	/* Sync first page */
+	size_t sz = MIN(((size_t) PAGE_SIZE - offset), map->size);
+	struct tagged_addr cpu_pa = map->cpu_pages[0];
+	struct tagged_addr gpu_pa = map->gpu_pages[0];
+
+	kbase_sync_single(kctx, cpu_pa, gpu_pa, offset, sz, dest);
+
+	/* Sync middle pages (if any) */
+	for (i = 1; page_count > 2 && i < page_count - 1; i++) {
+		cpu_pa = map->cpu_pages[i];
+		gpu_pa = map->gpu_pages[i];
+		kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, PAGE_SIZE, dest);
+	}
+
+	/* Sync last page (if any) */
+	if (page_count > 1) {
+		cpu_pa = map->cpu_pages[page_count - 1];
+		gpu_pa = map->gpu_pages[page_count - 1];
+		sz = ((offset + map->size - 1) & ~PAGE_MASK) + 1;
+		kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, sz, dest);
+	}
+}
+
+static int kbase_vmap_phy_pages(struct kbase_context *kctx,
+		struct kbase_va_region *reg, u64 offset_bytes, size_t size,
+		struct kbase_vmap_struct *map)
+{
+	unsigned long page_index;
+	unsigned int offset_in_page = offset_bytes & ~PAGE_MASK;
+	size_t page_count = PFN_UP(offset_in_page + size);
+	struct tagged_addr *page_array;
+	struct page **pages;
+	void *cpu_addr = NULL;
+	pgprot_t prot;
+	size_t i;
+
+	if (!size || !map || !reg->cpu_alloc || !reg->gpu_alloc)
+		return -EINVAL;
+
+	/* check if page_count calculation will wrap */
+	if (size > ((size_t)-1 / PAGE_SIZE))
+		return -EINVAL;
+
+	page_index = offset_bytes >> PAGE_SHIFT;
+
+	/* check if page_index + page_count will wrap */
+	if (-1UL - page_count < page_index)
+		return -EINVAL;
+
+	if (page_index + page_count > kbase_reg_current_backed_size(reg))
+		return -ENOMEM;
+
+	if (reg->flags & KBASE_REG_DONT_NEED)
+		return -EINVAL;
+
+	prot = PAGE_KERNEL;
+	if (!(reg->flags & KBASE_REG_CPU_CACHED)) {
+		/* Map uncached */
+		prot = pgprot_writecombine(prot);
+	}
+
+	page_array = kbase_get_cpu_phy_pages(reg);
+	if (!page_array)
+		return -ENOMEM;
+
+	pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
+	if (!pages)
+		return -ENOMEM;
+
+	for (i = 0; i < page_count; i++)
+		pages[i] = as_page(page_array[page_index + i]);
+
+	/* Note: enforcing a RO prot_request onto prot is not done, since:
+	 * - CPU-arch-specific integration required
+	 * - kbase_vmap() requires no access checks to be made/enforced */
+
+	cpu_addr = vmap(pages, page_count, VM_MAP, prot);
+
+	kfree(pages);
+
+	if (!cpu_addr)
+		return -ENOMEM;
+
+	map->offset_in_page = offset_in_page;
+	map->cpu_alloc = reg->cpu_alloc;
+	map->cpu_pages = &kbase_get_cpu_phy_pages(reg)[page_index];
+	map->gpu_alloc = reg->gpu_alloc;
+	map->gpu_pages = &kbase_get_gpu_phy_pages(reg)[page_index];
+	map->addr = (void *)((uintptr_t)cpu_addr + offset_in_page);
+	map->size = size;
+	map->sync_needed = ((reg->flags & KBASE_REG_CPU_CACHED) != 0) &&
+		!kbase_mem_is_imported(map->gpu_alloc->type);
+
+	if (map->sync_needed)
+		kbase_sync_mem_regions(kctx, map, KBASE_SYNC_TO_CPU);
+
+	return 0;
+}
+
+void *kbase_vmap_prot(struct kbase_context *kctx, u64 gpu_addr, size_t size,
+		      unsigned long prot_request, struct kbase_vmap_struct *map)
+{
+	struct kbase_va_region *reg;
+	void *addr = NULL;
+	u64 offset_bytes;
+	struct kbase_mem_phy_alloc *cpu_alloc;
+	struct kbase_mem_phy_alloc *gpu_alloc;
+	int err;
+
+	kbase_gpu_vm_lock(kctx);
+
+	reg = kbase_region_tracker_find_region_enclosing_address(kctx,
+			gpu_addr);
+	if (kbase_is_region_invalid_or_free(reg))
+		goto out_unlock;
+
+	/* check access permissions can be satisfied
+	 * Intended only for checking KBASE_REG_{CPU,GPU}_{RD,WR}
+	 */
+	if ((reg->flags & prot_request) != prot_request)
+		goto out_unlock;
+
+	offset_bytes = gpu_addr - (reg->start_pfn << PAGE_SHIFT);
+	cpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
+	gpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+
+	err = kbase_vmap_phy_pages(kctx, reg, offset_bytes, size, map);
+	if (err < 0)
+		goto fail_vmap_phy_pages;
+
+	addr = map->addr;
+
+out_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	return addr;
+
+fail_vmap_phy_pages:
+	kbase_gpu_vm_unlock(kctx);
+	kbase_mem_phy_alloc_put(cpu_alloc);
+	kbase_mem_phy_alloc_put(gpu_alloc);
+
+	return NULL;
+}
+
+void *kbase_vmap(struct kbase_context *kctx, u64 gpu_addr, size_t size,
+		struct kbase_vmap_struct *map)
+{
+	/* 0 is specified for prot_request to indicate no access checks should
+	 * be made.
+	 *
+	 * As mentioned in kbase_vmap_prot() this means that a kernel-side
+	 * CPU-RO mapping is not enforced to allow this to work */
+	return kbase_vmap_prot(kctx, gpu_addr, size, 0u, map);
+}
+KBASE_EXPORT_TEST_API(kbase_vmap);
+
+static void kbase_vunmap_phy_pages(struct kbase_context *kctx,
+		struct kbase_vmap_struct *map)
+{
+	void *addr = (void *)((uintptr_t)map->addr & PAGE_MASK);
+	vunmap(addr);
+
+	if (map->sync_needed)
+		kbase_sync_mem_regions(kctx, map, KBASE_SYNC_TO_DEVICE);
+
+	map->offset_in_page = 0;
+	map->cpu_pages = NULL;
+	map->gpu_pages = NULL;
+	map->addr = NULL;
+	map->size = 0;
+	map->sync_needed = false;
+}
+
+void kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map)
+{
+	kbase_vunmap_phy_pages(kctx, map);
+	map->cpu_alloc = kbase_mem_phy_alloc_put(map->cpu_alloc);
+	map->gpu_alloc = kbase_mem_phy_alloc_put(map->gpu_alloc);
+}
+KBASE_EXPORT_TEST_API(kbase_vunmap);
+
+void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages)
+{
+	struct mm_struct *mm;
+
+	rcu_read_lock();
+	mm = rcu_dereference(kctx->process_mm);
+	if (mm) {
+		atomic_add(pages, &kctx->nonmapped_pages);
+#ifdef SPLIT_RSS_COUNTING
+		add_mm_counter(mm, MM_FILEPAGES, pages);
+#else
+		spin_lock(&mm->page_table_lock);
+		add_mm_counter(mm, MM_FILEPAGES, pages);
+		spin_unlock(&mm->page_table_lock);
+#endif
+	}
+	rcu_read_unlock();
+}
+
+static void kbasep_os_process_page_usage_drain(struct kbase_context *kctx)
+{
+	int pages;
+	struct mm_struct *mm;
+
+	spin_lock(&kctx->mm_update_lock);
+	mm = rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock));
+	if (!mm) {
+		spin_unlock(&kctx->mm_update_lock);
+		return;
+	}
+
+	rcu_assign_pointer(kctx->process_mm, NULL);
+	spin_unlock(&kctx->mm_update_lock);
+	synchronize_rcu();
+
+	pages = atomic_xchg(&kctx->nonmapped_pages, 0);
+#ifdef SPLIT_RSS_COUNTING
+	add_mm_counter(mm, MM_FILEPAGES, -pages);
+#else
+	spin_lock(&mm->page_table_lock);
+	add_mm_counter(mm, MM_FILEPAGES, -pages);
+	spin_unlock(&mm->page_table_lock);
+#endif
+}
+
+static void kbase_special_vm_close(struct vm_area_struct *vma)
+{
+	struct kbase_context *kctx;
+
+	kctx = vma->vm_private_data;
+	kbasep_os_process_page_usage_drain(kctx);
+}
+
+static const struct vm_operations_struct kbase_vm_special_ops = {
+	.close = kbase_special_vm_close,
+};
+
+static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma)
+{
+	/* check that this is the only tracking page */
+	spin_lock(&kctx->mm_update_lock);
+	if (rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock))) {
+		spin_unlock(&kctx->mm_update_lock);
+		return -EFAULT;
+	}
+
+	rcu_assign_pointer(kctx->process_mm, current->mm);
+
+	spin_unlock(&kctx->mm_update_lock);
+
+	/* no real access */
+	vma->vm_flags &= ~(VM_READ | VM_MAYREAD | VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+#else
+	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO;
+#endif
+	vma->vm_ops = &kbase_vm_special_ops;
+	vma->vm_private_data = kctx;
+
+	return 0;
+}
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_linux.h b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.h
new file mode 100644
index 0000000..02f1c3b
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.h
@@ -0,0 +1,469 @@
+/*
+ *
+ * (C) COPYRIGHT 2010, 2012-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_mem_linux.h
+ * Base kernel memory APIs, Linux implementation.
+ */
+
+#ifndef _KBASE_MEM_LINUX_H_
+#define _KBASE_MEM_LINUX_H_
+
+/** A HWC dump mapping */
+struct kbase_hwc_dma_mapping {
+	void       *cpu_va;
+	dma_addr_t  dma_pa;
+	size_t      size;
+};
+
+/**
+ * kbase_mem_alloc - Create a new allocation for GPU
+ *
+ * @kctx:         The kernel context
+ * @va_pages:     The number of pages of virtual address space to reserve
+ * @commit_pages: The number of physical pages to allocate upfront
+ * @extent:       The number of extra pages to allocate on each GPU fault which
+ *                grows the region.
+ * @flags:        bitmask of BASE_MEM_* flags to convey special requirements &
+ *                properties for the new allocation.
+ * @gpu_va:       Start address of the memory region which was allocated from GPU
+ *                virtual address space.
+ *
+ * Return: 0 on success or error code
+ */
+struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
+		u64 va_pages, u64 commit_pages, u64 extent, u64 *flags,
+		u64 *gpu_va);
+
+/**
+ * kbase_mem_query - Query properties of a GPU memory region
+ *
+ * @kctx:     The kernel context
+ * @gpu_addr: A GPU address contained within the memory region
+ * @query:    The type of query, from KBASE_MEM_QUERY_* flags, which could be
+ *            regarding the amount of backing physical memory allocated so far
+ *            for the region or the size of the region or the flags associated
+ *            with the region.
+ * @out:      Pointer to the location to store the result of query.
+ *
+ * Return: 0 on success or error code
+ */
+int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, u64 query,
+		u64 *const out);
+
+/**
+ * kbase_mem_import - Import the external memory for use by the GPU
+ *
+ * @kctx:     The kernel context
+ * @type:     Type of external memory
+ * @phandle:  Handle to the external memory interpreted as per the type.
+ * @padding:  Amount of extra VA pages to append to the imported buffer
+ * @gpu_va:   GPU address assigned to the imported external memory
+ * @va_pages: Size of the memory region reserved from the GPU address space
+ * @flags:    bitmask of BASE_MEM_* flags to convey special requirements &
+ *            properties for the new allocation representing the external
+ *            memory.
+ * Return: 0 on success or error code
+ */
+int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type,
+		void __user *phandle, u32 padding, u64 *gpu_va, u64 *va_pages,
+		u64 *flags);
+
+/**
+ * kbase_mem_alias - Create a new allocation for GPU, aliasing one or more
+ *                   memory regions
+ *
+ * @kctx:      The kernel context
+ * @flags:     bitmask of BASE_MEM_* flags.
+ * @stride:    Bytes between start of each memory region
+ * @nents:     The number of regions to pack together into the alias
+ * @ai:        Pointer to the struct containing the memory aliasing info
+ * @num_pages: Number of pages the alias will cover
+ *
+ * Return: 0 on failure or otherwise the GPU VA for the alias
+ */
+u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, u64 nents, struct base_mem_aliasing_info *ai, u64 *num_pages);
+
+/**
+ * kbase_mem_flags_change - Change the flags for a memory region
+ *
+ * @kctx:     The kernel context
+ * @gpu_addr: A GPU address contained within the memory region to modify.
+ * @flags:    The new flags to set
+ * @mask:     Mask of the flags, from BASE_MEM_*, to modify.
+ *
+ * Return: 0 on success or error code
+ */
+int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask);
+
+/**
+ * kbase_mem_commit - Change the physical backing size of a region
+ *
+ * @kctx: The kernel context
+ * @gpu_addr: Handle to the memory region
+ * @new_pages: Number of physical pages to back the region with
+ *
+ * Return: 0 on success or error code
+ */
+int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages);
+
+/**
+ * kbase_context_mmap - Memory map method, gets invoked when mmap system call is
+ *                      issued on device file /dev/malixx.
+ * @kctx: The kernel context
+ * @vma:  Pointer to the struct containing the info where the GPU allocation
+ *        will be mapped in virtual address space of CPU.
+ *
+ * Return: 0 on success or error code
+ */
+int kbase_context_mmap(struct kbase_context *kctx, struct vm_area_struct *vma);
+
+/**
+ * kbase_mem_evictable_init - Initialize the Ephemeral memory eviction
+ * mechanism.
+ * @kctx: The kbase context to initialize.
+ *
+ * Return: Zero on success or -errno on failure.
+ */
+int kbase_mem_evictable_init(struct kbase_context *kctx);
+
+/**
+ * kbase_mem_evictable_deinit - De-initialize the Ephemeral memory eviction
+ * mechanism.
+ * @kctx: The kbase context to de-initialize.
+ */
+void kbase_mem_evictable_deinit(struct kbase_context *kctx);
+
+/**
+ * kbase_mem_grow_gpu_mapping - Grow the GPU mapping of an allocation
+ * @kctx:      Context the region belongs to
+ * @reg:       The GPU region
+ * @new_pages: The number of pages after the grow
+ * @old_pages: The number of pages before the grow
+ *
+ * Return: 0 on success, -errno on error.
+ *
+ * Expand the GPU mapping to encompass the new psychical pages which have
+ * been added to the allocation.
+ *
+ * Note: Caller must be holding the region lock.
+ */
+int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx,
+		struct kbase_va_region *reg,
+		u64 new_pages, u64 old_pages);
+
+/**
+ * kbase_mem_evictable_make - Make a physical allocation eligible for eviction
+ * @gpu_alloc: The physical allocation to make evictable
+ *
+ * Return: 0 on success, -errno on error.
+ *
+ * Take the provided region and make all the physical pages within it
+ * reclaimable by the kernel, updating the per-process VM stats as well.
+ * Remove any CPU mappings (as these can't be removed in the shrinker callback
+ * as mmap_sem might already be taken) but leave the GPU mapping intact as
+ * and until the shrinker reclaims the allocation.
+ *
+ * Note: Must be called with the region lock of the containing context.
+ */
+int kbase_mem_evictable_make(struct kbase_mem_phy_alloc *gpu_alloc);
+
+/**
+ * kbase_mem_evictable_unmake - Remove a physical allocations eligibility for
+ * eviction.
+ * @alloc: The physical allocation to remove eviction eligibility from.
+ *
+ * Return: True if the allocation had its backing restored and false if
+ * it hasn't.
+ *
+ * Make the physical pages in the region no longer reclaimable and update the
+ * per-process stats, if the shrinker has already evicted the memory then
+ * re-allocate it if the region is still alive.
+ *
+ * Note: Must be called with the region lock of the containing context.
+ */
+bool kbase_mem_evictable_unmake(struct kbase_mem_phy_alloc *alloc);
+
+struct kbase_vmap_struct {
+	off_t offset_in_page;
+	struct kbase_mem_phy_alloc *cpu_alloc;
+	struct kbase_mem_phy_alloc *gpu_alloc;
+	struct tagged_addr *cpu_pages;
+	struct tagged_addr *gpu_pages;
+	void *addr;
+	size_t size;
+	bool sync_needed;
+};
+
+
+/**
+ * kbase_vmap_prot - Map a GPU VA range into the kernel safely, only if the
+ * requested access permissions are supported
+ * @kctx:         Context the VA range belongs to
+ * @gpu_addr:     Start address of VA range
+ * @size:         Size of VA range
+ * @prot_request: Flags indicating how the caller will then access the memory
+ * @map:          Structure to be given to kbase_vunmap() on freeing
+ *
+ * Return: Kernel-accessible CPU pointer to the VA range, or NULL on error
+ *
+ * Map a GPU VA Range into the kernel. The VA range must be contained within a
+ * GPU memory region. Appropriate CPU cache-flushing operations are made as
+ * required, dependent on the CPU mapping for the memory region.
+ *
+ * This is safer than using kmap() on the pages directly,
+ * because the pages here are refcounted to prevent freeing (and hence reuse
+ * elsewhere in the system) until an kbase_vunmap()
+ *
+ * The flags in @prot_request should use KBASE_REG_{CPU,GPU}_{RD,WR}, to check
+ * whether the region should allow the intended access, and return an error if
+ * disallowed. This is essential for security of imported memory, particularly
+ * a user buf from SHM mapped into the process as RO. In that case, write
+ * access must be checked if the intention is for kernel to write to the
+ * memory.
+ *
+ * The checks are also there to help catch access errors on memory where
+ * security is not a concern: imported memory that is always RW, and memory
+ * that was allocated and owned by the process attached to @kctx. In this case,
+ * it helps to identify memory that was was mapped with the wrong access type.
+ *
+ * Note: KBASE_REG_GPU_{RD,WR} flags are currently supported for legacy cases
+ * where either the security of memory is solely dependent on those flags, or
+ * when userspace code was expecting only the GPU to access the memory (e.g. HW
+ * workarounds).
+ *
+ * All cache maintenance operations shall be ignored if the
+ * memory region has been imported.
+ *
+ */
+void *kbase_vmap_prot(struct kbase_context *kctx, u64 gpu_addr, size_t size,
+		      unsigned long prot_request, struct kbase_vmap_struct *map);
+
+/**
+ * kbase_vmap - Map a GPU VA range into the kernel safely
+ * @kctx:     Context the VA range belongs to
+ * @gpu_addr: Start address of VA range
+ * @size:     Size of VA range
+ * @map:      Structure to be given to kbase_vunmap() on freeing
+ *
+ * Return: Kernel-accessible CPU pointer to the VA range, or NULL on error
+ *
+ * Map a GPU VA Range into the kernel. The VA range must be contained within a
+ * GPU memory region. Appropriate CPU cache-flushing operations are made as
+ * required, dependent on the CPU mapping for the memory region.
+ *
+ * This is safer than using kmap() on the pages directly,
+ * because the pages here are refcounted to prevent freeing (and hence reuse
+ * elsewhere in the system) until an kbase_vunmap()
+ *
+ * kbase_vmap_prot() should be used in preference, since kbase_vmap() makes no
+ * checks to ensure the security of e.g. imported user bufs from RO SHM.
+ *
+ * Note: All cache maintenance operations shall be ignored if the memory region
+ * has been imported.
+ */
+void *kbase_vmap(struct kbase_context *kctx, u64 gpu_addr, size_t size,
+		struct kbase_vmap_struct *map);
+
+/**
+ * kbase_vunmap - Unmap a GPU VA range from the kernel
+ * @kctx: Context the VA range belongs to
+ * @map:  Structure describing the mapping from the corresponding kbase_vmap()
+ *        call
+ *
+ * Unmaps a GPU VA range from the kernel, given its @map structure obtained
+ * from kbase_vmap(). Appropriate CPU cache-flushing operations are made as
+ * required, dependent on the CPU mapping for the memory region.
+ *
+ * The reference taken on pages during kbase_vmap() is released.
+ *
+ * Note: All cache maintenance operations shall be ignored if the memory region
+ * has been imported.
+ */
+void kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map);
+
+extern const struct vm_operations_struct kbase_vm_ops;
+
+/**
+ * kbase_sync_mem_regions - Perform the cache maintenance for the kernel mode
+ *                          CPU mapping.
+ * @kctx: Context the CPU mapping belongs to.
+ * @map:  Structure describing the CPU mapping, setup previously by the
+ *        kbase_vmap() call.
+ * @dest: Indicates the type of maintenance required (i.e. flush or invalidate)
+ *
+ * Note: The caller shall ensure that CPU mapping is not revoked & remains
+ * active whilst the maintenance is in progress.
+ */
+void kbase_sync_mem_regions(struct kbase_context *kctx,
+		struct kbase_vmap_struct *map, enum kbase_sync_type dest);
+
+/**
+ * kbase_mem_shrink_cpu_mapping - Shrink the CPU mapping(s) of an allocation
+ * @kctx:      Context the region belongs to
+ * @reg:       The GPU region
+ * @new_pages: The number of pages after the shrink
+ * @old_pages: The number of pages before the shrink
+ *
+ * Shrink (or completely remove) all CPU mappings which reference the shrunk
+ * part of the allocation.
+ */
+void kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx,
+		struct kbase_va_region *reg,
+		u64 new_pages, u64 old_pages);
+
+/**
+ * kbase_mem_shrink_gpu_mapping - Shrink the GPU mapping of an allocation
+ * @kctx:      Context the region belongs to
+ * @reg:       The GPU region or NULL if there isn't one
+ * @new_pages: The number of pages after the shrink
+ * @old_pages: The number of pages before the shrink
+ *
+ * Return: 0 on success, negative -errno on error
+ *
+ * Unmap the shrunk pages from the GPU mapping. Note that the size of the region
+ * itself is unmodified as we still need to reserve the VA, only the page tables
+ * will be modified by this function.
+ */
+int kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx,
+		struct kbase_va_region *reg,
+		u64 new_pages, u64 old_pages);
+
+/**
+ * kbase_phy_alloc_mapping_term - Terminate the kernel side mapping of a
+ *                                physical allocation
+ * @kctx:  The kernel base context associated with the mapping
+ * @alloc: Pointer to the allocation to terminate
+ *
+ * This function will unmap the kernel mapping, and free any structures used to
+ * track it.
+ */
+void kbase_phy_alloc_mapping_term(struct kbase_context *kctx,
+		struct kbase_mem_phy_alloc *alloc);
+
+/**
+ * kbase_phy_alloc_mapping_get - Get a kernel-side CPU pointer to the permanent
+ *                               mapping of a physical allocation
+ * @kctx:             The kernel base context @gpu_addr will be looked up in
+ * @gpu_addr:         The gpu address to lookup for the kernel-side CPU mapping
+ * @out_kern_mapping: Pointer to storage for a struct kbase_vmap_struct pointer
+ *                    which will be used for a call to
+ *                    kbase_phy_alloc_mapping_put()
+ *
+ * Return: Pointer to a kernel-side accessible location that directly
+ *         corresponds to @gpu_addr, or NULL on failure
+ *
+ * Looks up @gpu_addr to retrieve the CPU pointer that can be used to access
+ * that location kernel-side. Only certain kinds of memory have a permanent
+ * kernel mapping, refer to the internal functions
+ * kbase_reg_needs_kernel_mapping() and kbase_phy_alloc_mapping_init() for more
+ * information.
+ *
+ * If this function succeeds, a CPU access to the returned pointer will access
+ * the actual location represented by @gpu_addr. That is, the return value does
+ * not require any offset added to it to access the location specified in
+ * @gpu_addr
+ *
+ * The client must take care to either apply any necessary sync operations when
+ * accessing the data, or ensure that the enclosing region was coherent with
+ * the GPU, or uncached in the CPU.
+ *
+ * The refcount on the physical allocations backing the region are taken, so
+ * that they do not disappear whilst the client is accessing it. Once the
+ * client has finished accessing the memory, it must be released with a call to
+ * kbase_phy_alloc_mapping_put()
+ *
+ * Whilst this is expected to execute quickly (the mapping was already setup
+ * when the physical allocation was created), the call is not IRQ-safe due to
+ * the region lookup involved.
+ *
+ * An error code may indicate that:
+ * - a userside process has freed the allocation, and so @gpu_addr is no longer
+ *   valid
+ * - the region containing @gpu_addr does not support a permanent kernel mapping
+ */
+void *kbase_phy_alloc_mapping_get(struct kbase_context *kctx, u64 gpu_addr,
+		struct kbase_vmap_struct **out_kern_mapping);
+
+/**
+ * kbase_phy_alloc_mapping_put - Put a reference to the kernel-side mapping of a
+ *                               physical allocation
+ * @kctx:         The kernel base context associated with the mapping
+ * @kern_mapping: Pointer to a struct kbase_phy_alloc_mapping pointer obtained
+ *                from a call to kbase_phy_alloc_mapping_get()
+ *
+ * Releases the reference to the allocations backing @kern_mapping that was
+ * obtained through a call to kbase_phy_alloc_mapping_get(). This must be used
+ * when the client no longer needs to access the kernel-side CPU pointer.
+ *
+ * If this was the last reference on the underlying physical allocations, they
+ * will go through the normal allocation free steps, which also includes an
+ * unmap of the permanent kernel mapping for those allocations.
+ *
+ * Due to these operations, the function is not IRQ-safe. However it is
+ * expected to execute quickly in the normal case, i.e. when the region holding
+ * the physical allocation is still present.
+ */
+void kbase_phy_alloc_mapping_put(struct kbase_context *kctx,
+		struct kbase_vmap_struct *kern_mapping);
+
+/**
+ * kbase_get_cache_line_alignment - Return cache line alignment
+ *
+ * Helper function to return the maximum cache line alignment considering
+ * both CPU and GPU cache sizes.
+ *
+ * Return: CPU and GPU cache line alignment, in bytes.
+ *
+ * @kbdev: Device pointer.
+ */
+u32 kbase_get_cache_line_alignment(struct kbase_device *kbdev);
+
+#if (KERNEL_VERSION(4, 20, 0) > LINUX_VERSION_CODE)
+static inline vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma,
+			unsigned long addr, unsigned long pfn, pgprot_t pgprot)
+{
+	int err;
+
+#if ((KERNEL_VERSION(4, 4, 147) >= LINUX_VERSION_CODE) || \
+		((KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE) && \
+		 (KERNEL_VERSION(4, 5, 0) <= LINUX_VERSION_CODE)))
+	if (pgprot_val(pgprot) != pgprot_val(vma->vm_page_prot))
+		return VM_FAULT_SIGBUS;
+
+	err = vm_insert_pfn(vma, addr, pfn);
+#else
+	err = vm_insert_pfn_prot(vma, addr, pfn, pgprot);
+#endif
+
+	if (unlikely(err == -ENOMEM))
+		return VM_FAULT_OOM;
+	if (unlikely(err < 0 && err != -EBUSY))
+		return VM_FAULT_SIGBUS;
+
+	return VM_FAULT_NOPAGE;
+}
+#endif
+
+#endif				/* _KBASE_MEM_LINUX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_lowlevel.h b/drivers/gpu/arm/midgard/mali_kbase_mem_lowlevel.h
new file mode 100644
index 0000000..7011603
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_lowlevel.h
@@ -0,0 +1,166 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2014,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#ifndef _KBASE_MEM_LOWLEVEL_H
+#define _KBASE_MEM_LOWLEVEL_H
+
+#ifndef _KBASE_H_
+#error "Don't include this file directly, use mali_kbase.h instead"
+#endif
+
+#include <linux/dma-mapping.h>
+
+/**
+ * @brief Flags for kbase_phy_allocator_pages_alloc
+ */
+#define KBASE_PHY_PAGES_FLAG_DEFAULT (0)	/** Default allocation flag */
+#define KBASE_PHY_PAGES_FLAG_CLEAR   (1 << 0)	/** Clear the pages after allocation */
+#define KBASE_PHY_PAGES_FLAG_POISON  (1 << 1)	/** Fill the memory with a poison value */
+
+#define KBASE_PHY_PAGES_SUPPORTED_FLAGS (KBASE_PHY_PAGES_FLAG_DEFAULT|KBASE_PHY_PAGES_FLAG_CLEAR|KBASE_PHY_PAGES_FLAG_POISON)
+
+#define KBASE_PHY_PAGES_POISON_VALUE  0xFD /** Value to fill the memory with when KBASE_PHY_PAGES_FLAG_POISON is set */
+
+enum kbase_sync_type {
+	KBASE_SYNC_TO_CPU,
+	KBASE_SYNC_TO_DEVICE
+};
+
+struct tagged_addr { phys_addr_t tagged_addr; };
+
+#define HUGE_PAGE    (1u << 0)
+#define HUGE_HEAD    (1u << 1)
+#define FROM_PARTIAL (1u << 2)
+
+/*
+ * Note: if macro for converting physical address to page is not defined
+ * in the kernel itself, it is defined hereby. This is to avoid build errors
+ * which are reported during builds for some architectures.
+ */
+#ifndef phys_to_page
+#define phys_to_page(phys)	(pfn_to_page((phys) >> PAGE_SHIFT))
+#endif
+
+/**
+ * as_phys_addr_t - Retrieve the physical address from tagged address by
+ *                  masking the lower order 12 bits.
+ * @t: tagged address to be translated.
+ *
+ * Return: physical address corresponding to tagged address.
+ */
+static inline phys_addr_t as_phys_addr_t(struct tagged_addr t)
+{
+	return t.tagged_addr & PAGE_MASK;
+}
+
+/**
+ * as_page - Retrieve the struct page from a tagged address
+ * @t: tagged address to be translated.
+ *
+ * Return: pointer to struct page corresponding to tagged address.
+ */
+static inline struct page *as_page(struct tagged_addr t)
+{
+	return phys_to_page(as_phys_addr_t(t));
+}
+
+/**
+ * as_tagged - Convert the physical address to tagged address type though
+ *             there is no tag info present, the lower order 12 bits will be 0
+ * @phys: physical address to be converted to tagged type
+ *
+ * This is used for 4KB physical pages allocated by the Driver or imported pages
+ * and is needed as physical pages tracking object stores the reference for
+ * physical pages using tagged address type in lieu of the type generally used
+ * for physical addresses.
+ *
+ * Return: address of tagged address type.
+ */
+static inline struct tagged_addr as_tagged(phys_addr_t phys)
+{
+	struct tagged_addr t;
+
+	t.tagged_addr = phys & PAGE_MASK;
+	return t;
+}
+
+/**
+ * as_tagged_tag - Form the tagged address by storing the tag or metadata in the
+ *                 lower order 12 bits of physial address
+ * @phys: physical address to be converted to tagged address
+ * @tag:  tag to be stored along with the physical address.
+ *
+ * The tag info is used while freeing up the pages
+ *
+ * Return: tagged address storing physical address & tag.
+ */
+static inline struct tagged_addr as_tagged_tag(phys_addr_t phys, int tag)
+{
+	struct tagged_addr t;
+
+	t.tagged_addr = (phys & PAGE_MASK) | (tag & ~PAGE_MASK);
+	return t;
+}
+
+/**
+ * is_huge - Check if the physical page is one of the 512 4KB pages of the
+ *           large page which was not split to be used partially
+ * @t: tagged address storing the tag in the lower order bits.
+ *
+ * Return: true if page belongs to large page, or false
+ */
+static inline bool is_huge(struct tagged_addr t)
+{
+	return t.tagged_addr & HUGE_PAGE;
+}
+
+/**
+ * is_huge_head - Check if the physical page is the first 4KB page of the
+ *                512 4KB pages within a large page which was not split
+ *                to be used partially
+ * @t: tagged address storing the tag in the lower order bits.
+ *
+ * Return: true if page is the first page of a large page, or false
+ */
+static inline bool is_huge_head(struct tagged_addr t)
+{
+	int mask = HUGE_HEAD | HUGE_PAGE;
+
+	return mask == (t.tagged_addr & mask);
+}
+
+/**
+ * is_partial - Check if the physical page is one of the 512 pages of the
+ *              large page which was split in 4KB pages to be used
+ *              partially for allocations >= 2 MB in size.
+ * @t: tagged address storing the tag in the lower order bits.
+ *
+ * Return: true if page was taken from large page used partially, or false
+ */
+static inline bool is_partial(struct tagged_addr t)
+{
+	return t.tagged_addr & FROM_PARTIAL;
+}
+
+#endif /* _KBASE_LOWLEVEL_H */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c b/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c
new file mode 100644
index 0000000..0723e32
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c
@@ -0,0 +1,856 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <linux/spinlock.h>
+#include <linux/shrinker.h>
+#include <linux/atomic.h>
+#include <linux/version.h>
+
+#define pool_dbg(pool, format, ...) \
+	dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format,	\
+		(pool->next_pool) ? "kctx" : "kbdev",	\
+		kbase_mem_pool_size(pool),	\
+		kbase_mem_pool_max_size(pool),	\
+		##__VA_ARGS__)
+
+#define NOT_DIRTY false
+#define NOT_RECLAIMED false
+
+static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool)
+{
+	ssize_t max_size = kbase_mem_pool_max_size(pool);
+	ssize_t cur_size = kbase_mem_pool_size(pool);
+
+	return max(max_size - cur_size, (ssize_t)0);
+}
+
+static bool kbase_mem_pool_is_full(struct kbase_mem_pool *pool)
+{
+	return kbase_mem_pool_size(pool) >= kbase_mem_pool_max_size(pool);
+}
+
+static bool kbase_mem_pool_is_empty(struct kbase_mem_pool *pool)
+{
+	return kbase_mem_pool_size(pool) == 0;
+}
+
+static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool,
+		struct page *p)
+{
+	lockdep_assert_held(&pool->pool_lock);
+
+	list_add(&p->lru, &pool->page_list);
+	pool->cur_size++;
+
+	pool_dbg(pool, "added page\n");
+}
+
+static void kbase_mem_pool_add(struct kbase_mem_pool *pool, struct page *p)
+{
+	kbase_mem_pool_lock(pool);
+	kbase_mem_pool_add_locked(pool, p);
+	kbase_mem_pool_unlock(pool);
+}
+
+static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool,
+		struct list_head *page_list, size_t nr_pages)
+{
+	lockdep_assert_held(&pool->pool_lock);
+
+	list_splice(page_list, &pool->page_list);
+	pool->cur_size += nr_pages;
+
+	pool_dbg(pool, "added %zu pages\n", nr_pages);
+}
+
+static void kbase_mem_pool_add_list(struct kbase_mem_pool *pool,
+		struct list_head *page_list, size_t nr_pages)
+{
+	kbase_mem_pool_lock(pool);
+	kbase_mem_pool_add_list_locked(pool, page_list, nr_pages);
+	kbase_mem_pool_unlock(pool);
+}
+
+static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool)
+{
+	struct page *p;
+
+	lockdep_assert_held(&pool->pool_lock);
+
+	if (kbase_mem_pool_is_empty(pool))
+		return NULL;
+
+	p = list_first_entry(&pool->page_list, struct page, lru);
+	list_del_init(&p->lru);
+	pool->cur_size--;
+
+	pool_dbg(pool, "removed page\n");
+
+	return p;
+}
+
+static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool)
+{
+	struct page *p;
+
+	kbase_mem_pool_lock(pool);
+	p = kbase_mem_pool_remove_locked(pool);
+	kbase_mem_pool_unlock(pool);
+
+	return p;
+}
+
+static void kbase_mem_pool_sync_page(struct kbase_mem_pool *pool,
+		struct page *p)
+{
+	struct device *dev = pool->kbdev->dev;
+	dma_sync_single_for_device(dev, kbase_dma_addr(p),
+			(PAGE_SIZE << pool->order), DMA_BIDIRECTIONAL);
+}
+
+static void kbase_mem_pool_zero_page(struct kbase_mem_pool *pool,
+		struct page *p)
+{
+	int i;
+
+	for (i = 0; i < (1U << pool->order); i++)
+		clear_highpage(p+i);
+
+	kbase_mem_pool_sync_page(pool, p);
+}
+
+static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool,
+		struct page *p)
+{
+	/* Zero page before spilling */
+	kbase_mem_pool_zero_page(next_pool, p);
+
+	kbase_mem_pool_add(next_pool, p);
+}
+
+struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool)
+{
+	struct page *p;
+	gfp_t gfp;
+	struct kbase_device *const kbdev = pool->kbdev;
+	struct device *const dev = kbdev->dev;
+	dma_addr_t dma_addr;
+	int i;
+
+#if defined(CONFIG_ARM) && !defined(CONFIG_HAVE_DMA_ATTRS) && \
+	LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+	/* DMA cache sync fails for HIGHMEM before 3.5 on ARM */
+	gfp = GFP_USER | __GFP_ZERO;
+#else
+	gfp = GFP_HIGHUSER | __GFP_ZERO;
+#endif
+
+	/* don't warn on higher order failures */
+	if (pool->order)
+		gfp |= __GFP_NOWARN;
+
+	p = kbdev->mgm_dev->ops.mgm_alloc_page(kbdev->mgm_dev,
+		pool->group_id, gfp, pool->order);
+	if (!p)
+		return NULL;
+
+	dma_addr = dma_map_page(dev, p, 0, (PAGE_SIZE << pool->order),
+				DMA_BIDIRECTIONAL);
+
+	if (dma_mapping_error(dev, dma_addr)) {
+		kbdev->mgm_dev->ops.mgm_free_page(kbdev->mgm_dev,
+			pool->group_id, p, pool->order);
+		return NULL;
+	}
+
+	WARN_ON(dma_addr != page_to_phys(p));
+	for (i = 0; i < (1u << pool->order); i++)
+		kbase_set_dma_addr(p+i, dma_addr + PAGE_SIZE * i);
+
+	return p;
+}
+
+static void kbase_mem_pool_free_page(struct kbase_mem_pool *pool,
+		struct page *p)
+{
+	struct kbase_device *const kbdev = pool->kbdev;
+	struct device *const dev = kbdev->dev;
+	dma_addr_t dma_addr = kbase_dma_addr(p);
+	int i;
+
+	dma_unmap_page(dev, dma_addr, (PAGE_SIZE << pool->order),
+		       DMA_BIDIRECTIONAL);
+	for (i = 0; i < (1u << pool->order); i++)
+		kbase_clear_dma_addr(p+i);
+
+	kbdev->mgm_dev->ops.mgm_free_page(kbdev->mgm_dev,
+		pool->group_id, p, pool->order);
+
+	pool_dbg(pool, "freed page to kernel\n");
+}
+
+static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool,
+		size_t nr_to_shrink)
+{
+	struct page *p;
+	size_t i;
+
+	lockdep_assert_held(&pool->pool_lock);
+
+	for (i = 0; i < nr_to_shrink && !kbase_mem_pool_is_empty(pool); i++) {
+		p = kbase_mem_pool_remove_locked(pool);
+		kbase_mem_pool_free_page(pool, p);
+	}
+
+	return i;
+}
+
+static size_t kbase_mem_pool_shrink(struct kbase_mem_pool *pool,
+		size_t nr_to_shrink)
+{
+	size_t nr_freed;
+
+	kbase_mem_pool_lock(pool);
+	nr_freed = kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
+	kbase_mem_pool_unlock(pool);
+
+	return nr_freed;
+}
+
+int kbase_mem_pool_grow(struct kbase_mem_pool *pool,
+		size_t nr_to_grow)
+{
+	struct page *p;
+	size_t i;
+
+	kbase_mem_pool_lock(pool);
+
+	pool->dont_reclaim = true;
+	for (i = 0; i < nr_to_grow; i++) {
+		if (pool->dying) {
+			pool->dont_reclaim = false;
+			kbase_mem_pool_shrink_locked(pool, nr_to_grow);
+			kbase_mem_pool_unlock(pool);
+
+			return -ENOMEM;
+		}
+		kbase_mem_pool_unlock(pool);
+
+		p = kbase_mem_alloc_page(pool);
+		if (!p) {
+			kbase_mem_pool_lock(pool);
+			pool->dont_reclaim = false;
+			kbase_mem_pool_unlock(pool);
+
+			return -ENOMEM;
+		}
+
+		kbase_mem_pool_lock(pool);
+		kbase_mem_pool_add_locked(pool, p);
+	}
+	pool->dont_reclaim = false;
+	kbase_mem_pool_unlock(pool);
+
+	return 0;
+}
+
+void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size)
+{
+	size_t cur_size;
+	int err = 0;
+
+	cur_size = kbase_mem_pool_size(pool);
+
+	if (new_size > pool->max_size)
+		new_size = pool->max_size;
+
+	if (new_size < cur_size)
+		kbase_mem_pool_shrink(pool, cur_size - new_size);
+	else if (new_size > cur_size)
+		err = kbase_mem_pool_grow(pool, new_size - cur_size);
+
+	if (err) {
+		size_t grown_size = kbase_mem_pool_size(pool);
+
+		dev_warn(pool->kbdev->dev,
+			 "Mem pool not grown to the required size of %zu bytes, grown for additional %zu bytes instead!\n",
+			 (new_size - cur_size), (grown_size - cur_size));
+	}
+}
+
+void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size)
+{
+	size_t cur_size;
+	size_t nr_to_shrink;
+
+	kbase_mem_pool_lock(pool);
+
+	pool->max_size = max_size;
+
+	cur_size = kbase_mem_pool_size(pool);
+	if (max_size < cur_size) {
+		nr_to_shrink = cur_size - max_size;
+		kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
+	}
+
+	kbase_mem_pool_unlock(pool);
+}
+
+
+static unsigned long kbase_mem_pool_reclaim_count_objects(struct shrinker *s,
+		struct shrink_control *sc)
+{
+	struct kbase_mem_pool *pool;
+	size_t pool_size;
+
+	pool = container_of(s, struct kbase_mem_pool, reclaim);
+
+	kbase_mem_pool_lock(pool);
+	if (pool->dont_reclaim && !pool->dying) {
+		kbase_mem_pool_unlock(pool);
+		return 0;
+	}
+	pool_size = kbase_mem_pool_size(pool);
+	kbase_mem_pool_unlock(pool);
+
+	return pool_size;
+}
+
+static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s,
+		struct shrink_control *sc)
+{
+	struct kbase_mem_pool *pool;
+	unsigned long freed;
+
+	pool = container_of(s, struct kbase_mem_pool, reclaim);
+
+	kbase_mem_pool_lock(pool);
+	if (pool->dont_reclaim && !pool->dying) {
+		kbase_mem_pool_unlock(pool);
+		return 0;
+	}
+
+	pool_dbg(pool, "reclaim scan %ld:\n", sc->nr_to_scan);
+
+	freed = kbase_mem_pool_shrink_locked(pool, sc->nr_to_scan);
+
+	kbase_mem_pool_unlock(pool);
+
+	pool_dbg(pool, "reclaim freed %ld pages\n", freed);
+
+	return freed;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+static int kbase_mem_pool_reclaim_shrink(struct shrinker *s,
+		struct shrink_control *sc)
+{
+	if (sc->nr_to_scan == 0)
+		return kbase_mem_pool_reclaim_count_objects(s, sc);
+
+	return kbase_mem_pool_reclaim_scan_objects(s, sc);
+}
+#endif
+
+int kbase_mem_pool_init(struct kbase_mem_pool *pool,
+		const struct kbase_mem_pool_config *config,
+		unsigned int order,
+		int group_id,
+		struct kbase_device *kbdev,
+		struct kbase_mem_pool *next_pool)
+{
+	if (WARN_ON(group_id < 0) ||
+		WARN_ON(group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS)) {
+		return -EINVAL;
+	}
+
+	pool->cur_size = 0;
+	pool->max_size = kbase_mem_pool_config_get_max_size(config);
+	pool->order = order;
+	pool->group_id = group_id;
+	pool->kbdev = kbdev;
+	pool->next_pool = next_pool;
+	pool->dying = false;
+
+	spin_lock_init(&pool->pool_lock);
+	INIT_LIST_HEAD(&pool->page_list);
+
+	/* Register shrinker */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+	pool->reclaim.shrink = kbase_mem_pool_reclaim_shrink;
+#else
+	pool->reclaim.count_objects = kbase_mem_pool_reclaim_count_objects;
+	pool->reclaim.scan_objects = kbase_mem_pool_reclaim_scan_objects;
+#endif
+	pool->reclaim.seeks = DEFAULT_SEEKS;
+	/* Kernel versions prior to 3.1 :
+	 * struct shrinker does not define batch */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
+	pool->reclaim.batch = 0;
+#endif
+	register_shrinker(&pool->reclaim);
+
+	pool_dbg(pool, "initialized\n");
+
+	return 0;
+}
+
+void kbase_mem_pool_mark_dying(struct kbase_mem_pool *pool)
+{
+	kbase_mem_pool_lock(pool);
+	pool->dying = true;
+	kbase_mem_pool_unlock(pool);
+}
+
+void kbase_mem_pool_term(struct kbase_mem_pool *pool)
+{
+	struct kbase_mem_pool *next_pool = pool->next_pool;
+	struct page *p, *tmp;
+	size_t nr_to_spill = 0;
+	LIST_HEAD(spill_list);
+	LIST_HEAD(free_list);
+	int i;
+
+	pool_dbg(pool, "terminate()\n");
+
+	unregister_shrinker(&pool->reclaim);
+
+	kbase_mem_pool_lock(pool);
+	pool->max_size = 0;
+
+	if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
+		/* Spill to next pool (may overspill) */
+		nr_to_spill = kbase_mem_pool_capacity(next_pool);
+		nr_to_spill = min(kbase_mem_pool_size(pool), nr_to_spill);
+
+		/* Zero pages first without holding the next_pool lock */
+		for (i = 0; i < nr_to_spill; i++) {
+			p = kbase_mem_pool_remove_locked(pool);
+			list_add(&p->lru, &spill_list);
+		}
+	}
+
+	while (!kbase_mem_pool_is_empty(pool)) {
+		/* Free remaining pages to kernel */
+		p = kbase_mem_pool_remove_locked(pool);
+		list_add(&p->lru, &free_list);
+	}
+
+	kbase_mem_pool_unlock(pool);
+
+	if (next_pool && nr_to_spill) {
+		list_for_each_entry(p, &spill_list, lru)
+			kbase_mem_pool_zero_page(pool, p);
+
+		/* Add new page list to next_pool */
+		kbase_mem_pool_add_list(next_pool, &spill_list, nr_to_spill);
+
+		pool_dbg(pool, "terminate() spilled %zu pages\n", nr_to_spill);
+	}
+
+	list_for_each_entry_safe(p, tmp, &free_list, lru) {
+		list_del_init(&p->lru);
+		kbase_mem_pool_free_page(pool, p);
+	}
+
+	pool_dbg(pool, "terminated\n");
+}
+
+struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool)
+{
+	struct page *p;
+
+	do {
+		pool_dbg(pool, "alloc()\n");
+		p = kbase_mem_pool_remove(pool);
+
+		if (p)
+			return p;
+
+		pool = pool->next_pool;
+	} while (pool);
+
+	return NULL;
+}
+
+struct page *kbase_mem_pool_alloc_locked(struct kbase_mem_pool *pool)
+{
+	struct page *p;
+
+	lockdep_assert_held(&pool->pool_lock);
+
+	pool_dbg(pool, "alloc_locked()\n");
+	p = kbase_mem_pool_remove_locked(pool);
+
+	if (p)
+		return p;
+
+	return NULL;
+}
+
+void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p,
+		bool dirty)
+{
+	struct kbase_mem_pool *next_pool = pool->next_pool;
+
+	pool_dbg(pool, "free()\n");
+
+	if (!kbase_mem_pool_is_full(pool)) {
+		/* Add to our own pool */
+		if (dirty)
+			kbase_mem_pool_sync_page(pool, p);
+
+		kbase_mem_pool_add(pool, p);
+	} else if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
+		/* Spill to next pool */
+		kbase_mem_pool_spill(next_pool, p);
+	} else {
+		/* Free page */
+		kbase_mem_pool_free_page(pool, p);
+	}
+}
+
+void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p,
+		bool dirty)
+{
+	pool_dbg(pool, "free_locked()\n");
+
+	lockdep_assert_held(&pool->pool_lock);
+
+	if (!kbase_mem_pool_is_full(pool)) {
+		/* Add to our own pool */
+		if (dirty)
+			kbase_mem_pool_sync_page(pool, p);
+
+		kbase_mem_pool_add_locked(pool, p);
+	} else {
+		/* Free page */
+		kbase_mem_pool_free_page(pool, p);
+	}
+}
+
+int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
+		struct tagged_addr *pages, bool partial_allowed)
+{
+	struct page *p;
+	size_t nr_from_pool;
+	size_t i = 0;
+	int err = -ENOMEM;
+	size_t nr_pages_internal;
+
+	nr_pages_internal = nr_4k_pages / (1u << (pool->order));
+
+	if (nr_pages_internal * (1u << pool->order) != nr_4k_pages)
+		return -EINVAL;
+
+	pool_dbg(pool, "alloc_pages(4k=%zu):\n", nr_4k_pages);
+	pool_dbg(pool, "alloc_pages(internal=%zu):\n", nr_pages_internal);
+
+	/* Get pages from this pool */
+	kbase_mem_pool_lock(pool);
+	nr_from_pool = min(nr_pages_internal, kbase_mem_pool_size(pool));
+	while (nr_from_pool--) {
+		int j;
+		p = kbase_mem_pool_remove_locked(pool);
+		if (pool->order) {
+			pages[i++] = as_tagged_tag(page_to_phys(p),
+						   HUGE_HEAD | HUGE_PAGE);
+			for (j = 1; j < (1u << pool->order); j++)
+				pages[i++] = as_tagged_tag(page_to_phys(p) +
+							   PAGE_SIZE * j,
+							   HUGE_PAGE);
+		} else {
+			pages[i++] = as_tagged(page_to_phys(p));
+		}
+	}
+	kbase_mem_pool_unlock(pool);
+
+	if (i != nr_4k_pages && pool->next_pool) {
+		/* Allocate via next pool */
+		err = kbase_mem_pool_alloc_pages(pool->next_pool,
+				nr_4k_pages - i, pages + i, partial_allowed);
+
+		if (err < 0)
+			goto err_rollback;
+
+		i += err;
+	} else {
+		/* Get any remaining pages from kernel */
+		while (i != nr_4k_pages) {
+			p = kbase_mem_alloc_page(pool);
+			if (!p) {
+				if (partial_allowed)
+					goto done;
+				else
+					goto err_rollback;
+			}
+
+			if (pool->order) {
+				int j;
+
+				pages[i++] = as_tagged_tag(page_to_phys(p),
+							   HUGE_PAGE |
+							   HUGE_HEAD);
+				for (j = 1; j < (1u << pool->order); j++) {
+					phys_addr_t phys;
+
+					phys = page_to_phys(p) + PAGE_SIZE * j;
+					pages[i++] = as_tagged_tag(phys,
+								   HUGE_PAGE);
+				}
+			} else {
+				pages[i++] = as_tagged(page_to_phys(p));
+			}
+		}
+	}
+
+done:
+	pool_dbg(pool, "alloc_pages(%zu) done\n", i);
+	return i;
+
+err_rollback:
+	kbase_mem_pool_free_pages(pool, i, pages, NOT_DIRTY, NOT_RECLAIMED);
+	return err;
+}
+
+int kbase_mem_pool_alloc_pages_locked(struct kbase_mem_pool *pool,
+		size_t nr_4k_pages, struct tagged_addr *pages)
+{
+	struct page *p;
+	size_t i;
+	size_t nr_pages_internal;
+
+	lockdep_assert_held(&pool->pool_lock);
+
+	nr_pages_internal = nr_4k_pages / (1u << (pool->order));
+
+	if (nr_pages_internal * (1u << pool->order) != nr_4k_pages)
+		return -EINVAL;
+
+	pool_dbg(pool, "alloc_pages_locked(4k=%zu):\n", nr_4k_pages);
+	pool_dbg(pool, "alloc_pages_locked(internal=%zu):\n",
+			nr_pages_internal);
+
+	if (kbase_mem_pool_size(pool) < nr_pages_internal) {
+		pool_dbg(pool, "Failed alloc\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < nr_pages_internal; i++) {
+		int j;
+
+		p = kbase_mem_pool_remove_locked(pool);
+		if (pool->order) {
+			*pages++ = as_tagged_tag(page_to_phys(p),
+						   HUGE_HEAD | HUGE_PAGE);
+			for (j = 1; j < (1u << pool->order); j++) {
+				*pages++ = as_tagged_tag(page_to_phys(p) +
+							   PAGE_SIZE * j,
+							   HUGE_PAGE);
+			}
+		} else {
+			*pages++ = as_tagged(page_to_phys(p));
+		}
+	}
+
+	return nr_4k_pages;
+}
+
+static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool,
+				     size_t nr_pages, struct tagged_addr *pages,
+				     bool zero, bool sync)
+{
+	struct page *p;
+	size_t nr_to_pool = 0;
+	LIST_HEAD(new_page_list);
+	size_t i;
+
+	if (!nr_pages)
+		return;
+
+	pool_dbg(pool, "add_array(%zu, zero=%d, sync=%d):\n",
+			nr_pages, zero, sync);
+
+	/* Zero/sync pages first without holding the pool lock */
+	for (i = 0; i < nr_pages; i++) {
+		if (unlikely(!as_phys_addr_t(pages[i])))
+			continue;
+
+		if (is_huge_head(pages[i]) || !is_huge(pages[i])) {
+			p = as_page(pages[i]);
+			if (zero)
+				kbase_mem_pool_zero_page(pool, p);
+			else if (sync)
+				kbase_mem_pool_sync_page(pool, p);
+
+			list_add(&p->lru, &new_page_list);
+			nr_to_pool++;
+		}
+		pages[i] = as_tagged(0);
+	}
+
+	/* Add new page list to pool */
+	kbase_mem_pool_add_list(pool, &new_page_list, nr_to_pool);
+
+	pool_dbg(pool, "add_array(%zu) added %zu pages\n",
+			nr_pages, nr_to_pool);
+}
+
+static void kbase_mem_pool_add_array_locked(struct kbase_mem_pool *pool,
+		size_t nr_pages, struct tagged_addr *pages,
+		bool zero, bool sync)
+{
+	struct page *p;
+	size_t nr_to_pool = 0;
+	LIST_HEAD(new_page_list);
+	size_t i;
+
+	lockdep_assert_held(&pool->pool_lock);
+
+	if (!nr_pages)
+		return;
+
+	pool_dbg(pool, "add_array_locked(%zu, zero=%d, sync=%d):\n",
+			nr_pages, zero, sync);
+
+	/* Zero/sync pages first */
+	for (i = 0; i < nr_pages; i++) {
+		if (unlikely(!as_phys_addr_t(pages[i])))
+			continue;
+
+		if (is_huge_head(pages[i]) || !is_huge(pages[i])) {
+			p = as_page(pages[i]);
+			if (zero)
+				kbase_mem_pool_zero_page(pool, p);
+			else if (sync)
+				kbase_mem_pool_sync_page(pool, p);
+
+			list_add(&p->lru, &new_page_list);
+			nr_to_pool++;
+		}
+		pages[i] = as_tagged(0);
+	}
+
+	/* Add new page list to pool */
+	kbase_mem_pool_add_list_locked(pool, &new_page_list, nr_to_pool);
+
+	pool_dbg(pool, "add_array_locked(%zu) added %zu pages\n",
+			nr_pages, nr_to_pool);
+}
+
+void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
+		struct tagged_addr *pages, bool dirty, bool reclaimed)
+{
+	struct kbase_mem_pool *next_pool = pool->next_pool;
+	struct page *p;
+	size_t nr_to_pool;
+	LIST_HEAD(to_pool_list);
+	size_t i = 0;
+
+	pool_dbg(pool, "free_pages(%zu):\n", nr_pages);
+
+	if (!reclaimed) {
+		/* Add to this pool */
+		nr_to_pool = kbase_mem_pool_capacity(pool);
+		nr_to_pool = min(nr_pages, nr_to_pool);
+
+		kbase_mem_pool_add_array(pool, nr_to_pool, pages, false, dirty);
+
+		i += nr_to_pool;
+
+		if (i != nr_pages && next_pool) {
+			/* Spill to next pool (may overspill) */
+			nr_to_pool = kbase_mem_pool_capacity(next_pool);
+			nr_to_pool = min(nr_pages - i, nr_to_pool);
+
+			kbase_mem_pool_add_array(next_pool, nr_to_pool,
+					pages + i, true, dirty);
+			i += nr_to_pool;
+		}
+	}
+
+	/* Free any remaining pages to kernel */
+	for (; i < nr_pages; i++) {
+		if (unlikely(!as_phys_addr_t(pages[i])))
+			continue;
+
+		if (is_huge(pages[i]) && !is_huge_head(pages[i])) {
+			pages[i] = as_tagged(0);
+			continue;
+		}
+
+		p = as_page(pages[i]);
+
+		kbase_mem_pool_free_page(pool, p);
+		pages[i] = as_tagged(0);
+	}
+
+	pool_dbg(pool, "free_pages(%zu) done\n", nr_pages);
+}
+
+
+void kbase_mem_pool_free_pages_locked(struct kbase_mem_pool *pool,
+		size_t nr_pages, struct tagged_addr *pages, bool dirty,
+		bool reclaimed)
+{
+	struct page *p;
+	size_t nr_to_pool;
+	LIST_HEAD(to_pool_list);
+	size_t i = 0;
+
+	lockdep_assert_held(&pool->pool_lock);
+
+	pool_dbg(pool, "free_pages_locked(%zu):\n", nr_pages);
+
+	if (!reclaimed) {
+		/* Add to this pool */
+		nr_to_pool = kbase_mem_pool_capacity(pool);
+		nr_to_pool = min(nr_pages, nr_to_pool);
+
+		kbase_mem_pool_add_array_locked(pool, nr_pages, pages, false,
+				dirty);
+
+		i += nr_to_pool;
+	}
+
+	/* Free any remaining pages to kernel */
+	for (; i < nr_pages; i++) {
+		if (unlikely(!as_phys_addr_t(pages[i])))
+			continue;
+
+		if (is_huge(pages[i]) && !is_huge_head(pages[i])) {
+			pages[i] = as_tagged(0);
+			continue;
+		}
+
+		p = as_page(pages[i]);
+
+		kbase_mem_pool_free_page(pool, p);
+		pages[i] = as_tagged(0);
+	}
+
+	pool_dbg(pool, "free_pages_locked(%zu) done\n", nr_pages);
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.c
new file mode 100644
index 0000000..edb9cd4
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.c
@@ -0,0 +1,183 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "mali_kbase_mem_pool_debugfs.h"
+#include "mali_kbase_debugfs_helper.h"
+
+void kbase_mem_pool_debugfs_trim(void *const array, size_t const index,
+	size_t const value)
+{
+	struct kbase_mem_pool *const mem_pools = array;
+
+	if (WARN_ON(!mem_pools) ||
+		WARN_ON(index >= MEMORY_GROUP_MANAGER_NR_GROUPS))
+		return;
+
+	kbase_mem_pool_trim(&mem_pools[index], value);
+}
+
+void kbase_mem_pool_debugfs_set_max_size(void *const array,
+	size_t const index, size_t const value)
+{
+	struct kbase_mem_pool *const mem_pools = array;
+
+	if (WARN_ON(!mem_pools) ||
+		WARN_ON(index >= MEMORY_GROUP_MANAGER_NR_GROUPS))
+		return;
+
+	kbase_mem_pool_set_max_size(&mem_pools[index], value);
+}
+
+size_t kbase_mem_pool_debugfs_size(void *const array, size_t const index)
+{
+	struct kbase_mem_pool *const mem_pools = array;
+
+	if (WARN_ON(!mem_pools) ||
+		WARN_ON(index >= MEMORY_GROUP_MANAGER_NR_GROUPS))
+		return 0;
+
+	return kbase_mem_pool_size(&mem_pools[index]);
+}
+
+size_t kbase_mem_pool_debugfs_max_size(void *const array, size_t const index)
+{
+	struct kbase_mem_pool *const mem_pools = array;
+
+	if (WARN_ON(!mem_pools) ||
+		WARN_ON(index >= MEMORY_GROUP_MANAGER_NR_GROUPS))
+		return 0;
+
+	return kbase_mem_pool_max_size(&mem_pools[index]);
+}
+
+void kbase_mem_pool_config_debugfs_set_max_size(void *const array,
+	size_t const index, size_t const value)
+{
+	struct kbase_mem_pool_config *const configs = array;
+
+	if (WARN_ON(!configs) ||
+		WARN_ON(index >= MEMORY_GROUP_MANAGER_NR_GROUPS))
+		return;
+
+	kbase_mem_pool_config_set_max_size(&configs[index], value);
+}
+
+size_t kbase_mem_pool_config_debugfs_max_size(void *const array,
+	size_t const index)
+{
+	struct kbase_mem_pool_config *const configs = array;
+
+	if (WARN_ON(!configs) ||
+		WARN_ON(index >= MEMORY_GROUP_MANAGER_NR_GROUPS))
+		return 0;
+
+	return kbase_mem_pool_config_get_max_size(&configs[index]);
+}
+
+static int kbase_mem_pool_debugfs_size_show(struct seq_file *sfile, void *data)
+{
+	CSTD_UNUSED(data);
+	return kbase_debugfs_helper_seq_read(sfile,
+		MEMORY_GROUP_MANAGER_NR_GROUPS, kbase_mem_pool_debugfs_size);
+}
+
+static ssize_t kbase_mem_pool_debugfs_write(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int err;
+
+	CSTD_UNUSED(ppos);
+	err = kbase_debugfs_helper_seq_write(file, ubuf, count,
+		MEMORY_GROUP_MANAGER_NR_GROUPS, kbase_mem_pool_debugfs_trim);
+	return err ? err : count;
+}
+
+static int kbase_mem_pool_debugfs_open(struct inode *in, struct file *file)
+{
+	return single_open(file, kbase_mem_pool_debugfs_size_show,
+		in->i_private);
+}
+
+static const struct file_operations kbase_mem_pool_debugfs_fops = {
+	.owner = THIS_MODULE,
+	.open = kbase_mem_pool_debugfs_open,
+	.read = seq_read,
+	.write = kbase_mem_pool_debugfs_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int kbase_mem_pool_debugfs_max_size_show(struct seq_file *sfile,
+	void *data)
+{
+	CSTD_UNUSED(data);
+	return kbase_debugfs_helper_seq_read(sfile,
+		MEMORY_GROUP_MANAGER_NR_GROUPS,
+		kbase_mem_pool_debugfs_max_size);
+}
+
+static ssize_t kbase_mem_pool_debugfs_max_size_write(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int err;
+
+	CSTD_UNUSED(ppos);
+	err = kbase_debugfs_helper_seq_write(file, ubuf, count,
+		MEMORY_GROUP_MANAGER_NR_GROUPS,
+		kbase_mem_pool_debugfs_set_max_size);
+	return err ? err : count;
+}
+
+static int kbase_mem_pool_debugfs_max_size_open(struct inode *in,
+	struct file *file)
+{
+	return single_open(file, kbase_mem_pool_debugfs_max_size_show,
+		in->i_private);
+}
+
+static const struct file_operations kbase_mem_pool_debugfs_max_size_fops = {
+	.owner = THIS_MODULE,
+	.open = kbase_mem_pool_debugfs_max_size_open,
+	.read = seq_read,
+	.write = kbase_mem_pool_debugfs_max_size_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+void kbase_mem_pool_debugfs_init(struct dentry *parent,
+		struct kbase_context *kctx)
+{
+	debugfs_create_file("mem_pool_size", S_IRUGO | S_IWUSR, parent,
+		&kctx->mem_pools.small, &kbase_mem_pool_debugfs_fops);
+
+	debugfs_create_file("mem_pool_max_size", S_IRUGO | S_IWUSR, parent,
+		&kctx->mem_pools.small, &kbase_mem_pool_debugfs_max_size_fops);
+
+	debugfs_create_file("lp_mem_pool_size", S_IRUGO | S_IWUSR, parent,
+		&kctx->mem_pools.large, &kbase_mem_pool_debugfs_fops);
+
+	debugfs_create_file("lp_mem_pool_max_size", S_IRUGO | S_IWUSR, parent,
+		&kctx->mem_pools.large, &kbase_mem_pool_debugfs_max_size_fops);
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.h
new file mode 100644
index 0000000..2932945
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.h
@@ -0,0 +1,123 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_MEM_POOL_DEBUGFS_H_
+#define _KBASE_MEM_POOL_DEBUGFS_H_
+
+#include <mali_kbase.h>
+
+/**
+ * kbase_mem_pool_debugfs_init - add debugfs knobs for @pool
+ * @parent:  Parent debugfs dentry
+ * @kctx:    The kbase context
+ *
+ * Adds four debugfs files under @parent:
+ * - mem_pool_size: get/set the current sizes of @kctx: mem_pools
+ * - mem_pool_max_size: get/set the max sizes of @kctx: mem_pools
+ * - lp_mem_pool_size: get/set the current sizes of @kctx: lp_mem_pool
+ * - lp_mem_pool_max_size: get/set the max sizes of @kctx:lp_mem_pool
+ */
+void kbase_mem_pool_debugfs_init(struct dentry *parent,
+		struct kbase_context *kctx);
+
+/**
+ * kbase_mem_pool_debugfs_trim - Grow or shrink a memory pool to a new size
+ *
+ * @array: Address of the first in an array of physical memory pools.
+ * @index: A memory group ID to be used as an index into the array of memory
+ *         pools. Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
+ * @value: New number of pages in the pool.
+ *
+ * If @value > current size, fill the pool with new pages from the kernel, but
+ * not above the max_size for the pool.
+ * If @value < current size, shrink the pool by freeing pages to the kernel.
+ */
+void kbase_mem_pool_debugfs_trim(void *array, size_t index, size_t value);
+
+/**
+ * kbase_mem_pool_debugfs_set_max_size - Set maximum number of free pages in
+ *                                       memory pool
+ *
+ * @array: Address of the first in an array of physical memory pools.
+ * @index: A memory group ID to be used as an index into the array of memory
+ *         pools. Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
+ * @value: Maximum number of free pages the pool can hold.
+ *
+ * If the maximum size is reduced, the pool will be shrunk to adhere to the
+ * new limit. For details see kbase_mem_pool_shrink().
+ */
+void kbase_mem_pool_debugfs_set_max_size(void *array, size_t index,
+	size_t value);
+
+/**
+ * kbase_mem_pool_debugfs_size - Get number of free pages in a memory pool
+ *
+ * @array: Address of the first in an array of physical memory pools.
+ * @index: A memory group ID to be used as an index into the array of memory
+ *         pools. Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
+ *
+ * Note: the size of the pool may in certain corner cases exceed @max_size!
+ *
+ * Return: Number of free pages in the pool
+ */
+size_t kbase_mem_pool_debugfs_size(void *array, size_t index);
+
+/**
+ * kbase_mem_pool_debugfs_max_size - Get maximum number of free pages in a
+ *                                   memory pool
+ *
+ * @array: Address of the first in an array of physical memory pools.
+ * @index: A memory group ID to be used as an index into the array of memory
+ *         pools. Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
+ *
+ * Return: Maximum number of free pages in the pool
+ */
+size_t kbase_mem_pool_debugfs_max_size(void *array, size_t index);
+
+/**
+ * kbase_mem_pool_config_debugfs_set_max_size - Set maximum number of free pages
+ *                                              in initial configuration of pool
+ *
+ * @array:  Array of initial configurations for a set of physical memory pools.
+ * @index:  A memory group ID to be used as an index into the array.
+ *          Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
+ * @value : Maximum number of free pages that a memory pool created from the
+ *          selected configuration can hold.
+ */
+void kbase_mem_pool_config_debugfs_set_max_size(void *array, size_t index,
+	size_t value);
+
+/**
+ * kbase_mem_pool_config_debugfs_max_size - Get maximum number of free pages
+ *                                          from initial configuration of pool
+ *
+ * @array:  Array of initial configurations for a set of physical memory pools.
+ * @index:  A memory group ID to be used as an index into the array.
+ *          Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
+ *
+ * Return: Maximum number of free pages that a memory pool created from the
+ *         selected configuration can hold.
+ */
+size_t kbase_mem_pool_config_debugfs_max_size(void *array, size_t index);
+
+#endif  /*_KBASE_MEM_POOL_DEBUGFS_H_ */
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_pool_group.c b/drivers/gpu/arm/midgard/mali_kbase_mem_pool_group.c
new file mode 100644
index 0000000..aa25548
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_pool_group.c
@@ -0,0 +1,115 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_mem.h>
+#include <mali_kbase_mem_pool_group.h>
+
+#include <linux/memory_group_manager.h>
+
+void kbase_mem_pool_group_config_set_max_size(
+	struct kbase_mem_pool_group_config *const configs,
+	size_t const max_size)
+{
+	size_t const large_max_size = max_size >>
+		(KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER -
+		KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER);
+	int gid;
+
+	for (gid = 0; gid < MEMORY_GROUP_MANAGER_NR_GROUPS; ++gid) {
+		kbase_mem_pool_config_set_max_size(&configs->small[gid],
+			max_size);
+
+		kbase_mem_pool_config_set_max_size(&configs->large[gid],
+			large_max_size);
+	}
+}
+
+int kbase_mem_pool_group_init(
+	struct kbase_mem_pool_group *const mem_pools,
+	struct kbase_device *const kbdev,
+	const struct kbase_mem_pool_group_config *const configs,
+	struct kbase_mem_pool_group *next_pools)
+{
+	int gid, err = 0;
+
+	for (gid = 0; gid < MEMORY_GROUP_MANAGER_NR_GROUPS; ++gid) {
+		err = kbase_mem_pool_init(&mem_pools->small[gid],
+			&configs->small[gid],
+			KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER,
+			gid,
+			kbdev,
+			next_pools ? &next_pools->small[gid] : NULL);
+
+		if (!err) {
+			err = kbase_mem_pool_init(&mem_pools->large[gid],
+				&configs->large[gid],
+				KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER,
+				gid,
+				kbdev,
+				next_pools ? &next_pools->large[gid] : NULL);
+			if (err)
+				kbase_mem_pool_term(&mem_pools->small[gid]);
+		}
+
+		/* Break out of the loop early to avoid incrementing the count
+		 * of memory pool pairs successfully initialized.
+		 */
+		if (err)
+			break;
+	}
+
+	if (err) {
+		/* gid gives the number of memory pool pairs successfully
+		 * initialized, which is one greater than the array index of the
+		 * last group.
+		 */
+		while (gid-- > 0) {
+			kbase_mem_pool_term(&mem_pools->small[gid]);
+			kbase_mem_pool_term(&mem_pools->large[gid]);
+		}
+	}
+
+	return err;
+}
+
+void kbase_mem_pool_group_mark_dying(
+	struct kbase_mem_pool_group *const mem_pools)
+{
+	int gid;
+
+	for (gid = 0; gid < MEMORY_GROUP_MANAGER_NR_GROUPS; ++gid) {
+		kbase_mem_pool_mark_dying(&mem_pools->small[gid]);
+		kbase_mem_pool_mark_dying(&mem_pools->large[gid]);
+	}
+}
+
+void kbase_mem_pool_group_term(
+	struct kbase_mem_pool_group *const mem_pools)
+{
+	int gid;
+
+	for (gid = 0; gid < MEMORY_GROUP_MANAGER_NR_GROUPS; ++gid) {
+		kbase_mem_pool_term(&mem_pools->small[gid]);
+		kbase_mem_pool_term(&mem_pools->large[gid]);
+	}
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_pool_group.h b/drivers/gpu/arm/midgard/mali_kbase_mem_pool_group.h
new file mode 100644
index 0000000..0484f59
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_pool_group.h
@@ -0,0 +1,92 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_MEM_POOL_GROUP_H_
+#define _KBASE_MEM_POOL_GROUP_H_
+
+#include <mali_kbase_defs.h>
+
+/**
+ * kbase_mem_pool_group_config_init - Set the initial configuration for a
+ *                                    set of memory pools
+ *
+ * This function sets the initial configuration for every memory pool so that
+ * the maximum amount of free memory that each pool can hold is identical.
+ * The equivalent number of 2 MiB pages is calculated automatically for the
+ * purpose of configuring the large page pools.
+ *
+ * @configs:  Initial configuration for the set of memory pools
+ * @max_size: Maximum number of free 4 KiB pages each pool can hold
+ */
+void kbase_mem_pool_group_config_set_max_size(
+	struct kbase_mem_pool_group_config *configs, size_t max_size);
+
+/**
+ * kbase_mem_pool_group_init - Initialize a set of memory pools
+ *
+ * Initializes a complete set of physical memory pools. Memory pools are used to
+ * allow efficient reallocation of previously-freed physical pages. A pair of
+ * memory pools is initialized for each physical memory group: one for 4 KiB
+ * pages and one for 2 MiB pages.
+ *
+ * If @next_pools is not NULL then a request to allocate memory from an
+ * empty pool in @mem_pools will attempt to allocate from the equivalent pool
+ * in @next_pools before going to the memory group manager. Similarly
+ * pages can spill over to the equivalent pool in @next_pools when a pool
+ * is full in @mem_pools. Pages are zeroed before they spill over to another
+ * pool, to prevent leaking information between applications.
+ *
+ * @mem_pools:  Set of memory pools to initialize
+ * @kbdev:      Kbase device where memory is used
+ * @configs:    Initial configuration for the set of memory pools
+ * @next_pools: Set of memory pools from which to allocate memory if there
+ *              is no free memory in one of the @mem_pools
+ *
+ * Return: 0 on success, otherwise a negative error code
+ */
+int kbase_mem_pool_group_init(struct kbase_mem_pool_group *mem_pools,
+	struct kbase_device *kbdev,
+	const struct kbase_mem_pool_group_config *configs,
+	struct kbase_mem_pool_group *next_pools);
+
+/**
+ * kbase_mem_pool_group_term - Mark a set of memory pools as dying
+ *
+ * Marks a complete set of physical memory pools previously initialized by
+ * @kbase_mem_pool_group_init as dying. This will cause any ongoing allocation
+ * operations (eg growing on page fault) to be terminated.
+ *
+ * @mem_pools: Set of memory pools to mark
+ */
+void kbase_mem_pool_group_mark_dying(struct kbase_mem_pool_group *mem_pools);
+
+/**
+ * kbase_mem_pool_group_term - Terminate a set of memory pools
+ *
+ * Terminates a complete set of physical memory pools previously initialized by
+ * @kbase_mem_pool_group_init.
+ *
+ * @mem_pools: Set of memory pools to terminate
+ */
+void kbase_mem_pool_group_term(struct kbase_mem_pool_group *mem_pools);
+
+#endif /* _KBASE_MEM_POOL_GROUP_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.c
new file mode 100644
index 0000000..5d38ed2
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.c
@@ -0,0 +1,129 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2017, 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+/** Show callback for the @c mem_profile debugfs file.
+ *
+ * This function is called to get the contents of the @c mem_profile debugfs
+ * file. This is a report of current memory usage and distribution in userspace.
+ *
+ * @param sfile The debugfs entry
+ * @param data Data associated with the entry
+ *
+ * @return 0 if it successfully prints data in debugfs entry file, non-zero otherwise
+ */
+static int kbasep_mem_profile_seq_show(struct seq_file *sfile, void *data)
+{
+	struct kbase_context *kctx = sfile->private;
+
+	mutex_lock(&kctx->mem_profile_lock);
+
+	seq_write(sfile, kctx->mem_profile_data, kctx->mem_profile_size);
+
+	seq_putc(sfile, '\n');
+
+	mutex_unlock(&kctx->mem_profile_lock);
+
+	return 0;
+}
+
+/*
+ *  File operations related to debugfs entry for mem_profile
+ */
+static int kbasep_mem_profile_debugfs_open(struct inode *in, struct file *file)
+{
+	return single_open(file, kbasep_mem_profile_seq_show, in->i_private);
+}
+
+static const struct file_operations kbasep_mem_profile_debugfs_fops = {
+	.owner = THIS_MODULE,
+	.open = kbasep_mem_profile_debugfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data,
+					size_t size)
+{
+	int err = 0;
+
+	mutex_lock(&kctx->mem_profile_lock);
+
+	dev_dbg(kctx->kbdev->dev, "initialised: %d",
+		kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED));
+
+	if (!kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)) {
+		if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
+			err  = -ENOMEM;
+		} else if (!debugfs_create_file("mem_profile", 0444,
+					kctx->kctx_dentry, kctx,
+					&kbasep_mem_profile_debugfs_fops)) {
+			err = -EAGAIN;
+		} else {
+			kbase_ctx_flag_set(kctx,
+					   KCTX_MEM_PROFILE_INITIALIZED);
+		}
+	}
+
+	if (kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)) {
+		kfree(kctx->mem_profile_data);
+		kctx->mem_profile_data = data;
+		kctx->mem_profile_size = size;
+	} else {
+		kfree(data);
+	}
+
+	dev_dbg(kctx->kbdev->dev, "returning: %d, initialised: %d",
+		err, kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED));
+
+	mutex_unlock(&kctx->mem_profile_lock);
+
+	return err;
+}
+
+void kbasep_mem_profile_debugfs_remove(struct kbase_context *kctx)
+{
+	mutex_lock(&kctx->mem_profile_lock);
+
+	dev_dbg(kctx->kbdev->dev, "initialised: %d",
+				kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED));
+
+	kfree(kctx->mem_profile_data);
+	kctx->mem_profile_data = NULL;
+	kctx->mem_profile_size = 0;
+
+	mutex_unlock(&kctx->mem_profile_lock);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data,
+					size_t size)
+{
+	kfree(data);
+	return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.h
new file mode 100644
index 0000000..1462247
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.h
@@ -0,0 +1,64 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_mem_profile_debugfs.h
+ * Header file for mem profiles entries in debugfs
+ *
+ */
+
+#ifndef _KBASE_MEM_PROFILE_DEBUGFS_H
+#define _KBASE_MEM_PROFILE_DEBUGFS_H
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+/**
+ * @brief Remove entry from Mali memory profile debugfs
+ */
+void kbasep_mem_profile_debugfs_remove(struct kbase_context *kctx);
+
+/**
+ * @brief Insert @p data to the debugfs file so it can be read by userspace
+ *
+ * The function takes ownership of @p data and frees it later when new data
+ * is inserted.
+ *
+ * If the debugfs entry corresponding to the @p kctx doesn't exist,
+ * an attempt will be made to create it.
+ *
+ * @param kctx The context whose debugfs file @p data should be inserted to
+ * @param data A NULL-terminated string to be inserted to the debugfs file,
+ *             without the trailing new line character
+ * @param size The length of the @p data string
+ * @return 0 if @p data inserted correctly
+ *         -EAGAIN in case of error
+ * @post @ref mem_profile_initialized will be set to @c true
+ *       the first time this function succeeds.
+ */
+int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data,
+					size_t size);
+
+#endif  /*_KBASE_MEM_PROFILE_DEBUGFS_H*/
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h
new file mode 100644
index 0000000..81e2886
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2018-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * @file mali_kbase_mem_profile_debugfs_buf_size.h
+ * Header file for the size of the buffer to accumulate the histogram report text in
+ */
+
+#ifndef _KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_
+#define _KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_
+
+/**
+ * The size of the buffer to accumulate the histogram report text in
+ * @see @ref CCTXP_HIST_BUF_SIZE_MAX_LENGTH_REPORT
+ */
+#define KBASE_MEM_PROFILE_MAX_BUF_SIZE \
+	((size_t) (64 + ((80 + (56 * 64)) * 50) + 56))
+
+#endif  /*_KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_*/
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mipe_gen_header.h b/drivers/gpu/arm/midgard/mali_kbase_mipe_gen_header.h
new file mode 100644
index 0000000..99475b67
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mipe_gen_header.h
@@ -0,0 +1,120 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_mipe_proto.h"
+
+/**
+ * This header generates MIPE tracepoint declaration BLOB at
+ * compile time.
+ *
+ * Before including this header, the following parameters
+ * must be defined:
+ *
+ * MIPE_HEADER_BLOB_VAR_NAME: the name of the variable
+ * where the result BLOB will be stored.
+ *
+ * MIPE_HEADER_TP_LIST: the list of tracepoints to process.
+ * It should be defined as follows:
+ * #define MIPE_HEADER_TP_LIST \
+ *     TP_DESC(FIRST_TRACEPOINT, "Some description", "@II", "first_arg,second_arg") \
+ *     TP_DESC(SECOND_TRACEPOINT, "Some description", "@II", "first_arg,second_arg") \
+ *     etc.
+ * Where the first argument is tracepoints name, the second
+ * argument is a short tracepoint description, the third argument
+ * argument types (see MIPE documentation), and the fourth argument
+ * is comma separated argument names.
+ *
+ * MIPE_HEADER_TP_LIST_COUNT: number of entries in MIPE_HEADER_TP_LIST.
+ *
+ * MIPE_HEADER_PKT_CLASS: MIPE packet class.
+ */
+
+#if !defined(MIPE_HEADER_BLOB_VAR_NAME)
+#error "MIPE_HEADER_BLOB_VAR_NAME must be defined!"
+#endif
+
+#if !defined(MIPE_HEADER_TP_LIST)
+#error "MIPE_HEADER_TP_LIST must be defined!"
+#endif
+
+#if !defined(MIPE_HEADER_TP_LIST_COUNT)
+#error "MIPE_HEADER_TP_LIST_COUNT must be defined!"
+#endif
+
+#if !defined(MIPE_HEADER_PKT_CLASS)
+#error "MIPE_HEADER_PKT_CLASS must be defined!"
+#endif
+
+static const struct {
+	u32 _mipe_w0;
+	u32 _mipe_w1;
+	u8  _protocol_version;
+	u8  _pointer_size;
+	u32 _tp_count;
+#define TP_DESC(name, desc, arg_types, arg_names)       \
+	struct {                                        \
+		u32  _name;                             \
+		u32  _size_string_name;                 \
+		char _string_name[sizeof(#name)];       \
+		u32  _size_desc;                        \
+		char _desc[sizeof(desc)];               \
+		u32  _size_arg_types;                   \
+		char _arg_types[sizeof(arg_types)];     \
+		u32  _size_arg_names;                   \
+		char _arg_names[sizeof(arg_names)];     \
+	} __attribute__ ((__packed__)) __ ## name;
+
+	MIPE_HEADER_TP_LIST
+#undef TP_DESC
+
+} __attribute__ ((__packed__)) MIPE_HEADER_BLOB_VAR_NAME = {
+	._mipe_w0 = MIPE_PACKET_HEADER_W0(
+		TL_PACKET_FAMILY_TL,
+		MIPE_HEADER_PKT_CLASS,
+		TL_PACKET_TYPE_HEADER,
+		1),
+	._mipe_w1 = MIPE_PACKET_HEADER_W1(
+		sizeof(MIPE_HEADER_BLOB_VAR_NAME) - PACKET_HEADER_SIZE,
+		0),
+	._protocol_version = SWTRACE_VERSION,
+	._pointer_size = sizeof(void *),
+	._tp_count = MIPE_HEADER_TP_LIST_COUNT,
+#define TP_DESC(name, desc, arg_types, arg_names)       \
+	.__ ## name = {                                 \
+		._name = name,                          \
+		._size_string_name = sizeof(#name),     \
+		._string_name = #name,                  \
+		._size_desc = sizeof(desc),             \
+		._desc = desc,                          \
+		._size_arg_types = sizeof(arg_types),   \
+		._arg_types = arg_types,                \
+		._size_arg_names = sizeof(arg_names),   \
+		._arg_names = arg_names                 \
+	},
+	MIPE_HEADER_TP_LIST
+#undef TP_DESC
+};
+
+#undef MIPE_HEADER_BLOB_VAR_NAME
+#undef MIPE_HEADER_TP_LIST
+#undef MIPE_HEADER_TP_LIST_COUNT
+#undef MIPE_HEADER_PKT_CLASS
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mipe_proto.h b/drivers/gpu/arm/midgard/mali_kbase_mipe_proto.h
new file mode 100644
index 0000000..1a0b8b4
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mipe_proto.h
@@ -0,0 +1,113 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#if !defined(_KBASE_MIPE_PROTO_H)
+#define _KBASE_MIPE_PROTO_H
+
+#define _BITFIELD_MASK_FIELD(pos, len) \
+	(((1u << len) - 1) << pos)
+
+#define _BITFIELD_SET_FIELD(pos, len, value) \
+	(_BITFIELD_MASK_FIELD(pos, len) & (((u32) value) << pos))
+
+#define BITFIELD_SET(field_name, value) \
+	_BITFIELD_SET_FIELD(field_name ## _POS, field_name ## _LEN, value)
+
+/* The version of swtrace protocol used in timeline stream. */
+#define SWTRACE_VERSION    3
+
+/* Packet header - first word.
+ * These values must be defined according to MIPE documentation.
+ */
+#define PACKET_STREAMID_POS  0
+#define PACKET_STREAMID_LEN  8
+#define PACKET_RSVD1_POS     (PACKET_STREAMID_POS + PACKET_STREAMID_LEN)
+#define PACKET_RSVD1_LEN     8
+#define PACKET_TYPE_POS      (PACKET_RSVD1_POS + PACKET_RSVD1_LEN)
+#define PACKET_TYPE_LEN      3
+#define PACKET_CLASS_POS     (PACKET_TYPE_POS + PACKET_TYPE_LEN)
+#define PACKET_CLASS_LEN     7
+#define PACKET_FAMILY_POS    (PACKET_CLASS_POS + PACKET_CLASS_LEN)
+#define PACKET_FAMILY_LEN    6
+
+/* Packet header - second word
+ * These values must be defined according to MIPE documentation.
+ */
+#define PACKET_LENGTH_POS    0
+#define PACKET_LENGTH_LEN    24
+#define PACKET_SEQBIT_POS    (PACKET_LENGTH_POS + PACKET_LENGTH_LEN)
+#define PACKET_SEQBIT_LEN    1
+#define PACKET_RSVD2_POS     (PACKET_SEQBIT_POS + PACKET_SEQBIT_LEN)
+#define PACKET_RSVD2_LEN     7
+
+/* First word of a MIPE packet */
+#define MIPE_PACKET_HEADER_W0(pkt_family, pkt_class, pkt_type, stream_id) \
+	(0                                          \
+	| BITFIELD_SET(PACKET_FAMILY,   pkt_family) \
+	| BITFIELD_SET(PACKET_CLASS,    pkt_class)  \
+	| BITFIELD_SET(PACKET_TYPE,     pkt_type)   \
+	| BITFIELD_SET(PACKET_STREAMID, stream_id))
+
+/* Second word of a MIPE packet */
+#define MIPE_PACKET_HEADER_W1(packet_length, seqbit) \
+	(0                                           \
+	| BITFIELD_SET(PACKET_LENGTH, packet_length) \
+	| BITFIELD_SET(PACKET_SEQBIT, seqbit))
+
+/* The number of bytes reserved for packet header.
+ * These value must be defined according to MIPE documentation.
+ */
+#define PACKET_HEADER_SIZE 8 /* bytes */
+
+/* The number of bytes reserved for packet sequence number.
+ * These value must be defined according to MIPE documentation.
+ */
+#define PACKET_NUMBER_SIZE 4 /* bytes */
+
+/* Timeline packet family ids.
+ * Values are significant! Check MIPE documentation.
+ */
+enum tl_packet_family {
+	TL_PACKET_FAMILY_CTRL = 0, /* control packets */
+	TL_PACKET_FAMILY_TL = 1,   /* timeline packets */
+	TL_PACKET_FAMILY_COUNT
+};
+
+/* Packet classes used in timeline streams.
+ * Values are significant! Check MIPE documentation.
+ */
+enum tl_packet_class {
+	TL_PACKET_CLASS_OBJ = 0, /* timeline objects packet */
+	TL_PACKET_CLASS_AUX = 1, /* auxiliary events packet */
+};
+
+/* Packet types used in timeline streams.
+ * Values are significant! Check MIPE documentation.
+ */
+enum tl_packet_type {
+	TL_PACKET_TYPE_HEADER = 0,  /* stream's header/directory */
+	TL_PACKET_TYPE_BODY = 1,    /* stream's body */
+	TL_PACKET_TYPE_SUMMARY = 2, /* stream's summary */
+};
+
+#endif /* _KBASE_MIPE_PROTO_H */
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu.c b/drivers/gpu/arm/midgard/mali_kbase_mmu.c
new file mode 100644
index 0000000..ccb63d0
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mmu.c
@@ -0,0 +1,2714 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_mmu.c
+ * Base kernel MMU management.
+ */
+
+/* #define DEBUG    1 */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_tracepoints.h>
+#include <mali_kbase_instr_defs.h>
+#include <mali_kbase_debug.h>
+
+#define beenthere(kctx, f, a...)  dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
+
+#include <mali_kbase_defs.h>
+#include <mali_kbase_hw.h>
+#include <mali_kbase_mmu_hw.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_hwaccess_time.h>
+#include <mali_kbase_mem.h>
+#include <mali_kbase_reset_gpu.h>
+
+#define KBASE_MMU_PAGE_ENTRIES 512
+
+/**
+ * kbase_mmu_flush_invalidate() - Flush and invalidate the GPU caches.
+ * @kctx: The KBase context.
+ * @vpfn: The virtual page frame number to start the flush on.
+ * @nr: The number of pages to flush.
+ * @sync: Set if the operation should be synchronous or not.
+ *
+ * Issue a cache flush + invalidate to the GPU caches and invalidate the TLBs.
+ *
+ * If sync is not set then transactions still in flight when the flush is issued
+ * may use the old page tables and the data they write will not be written out
+ * to memory, this function returns after the flush has been issued but
+ * before all accesses which might effect the flushed region have completed.
+ *
+ * If sync is set then accesses in the flushed region will be drained
+ * before data is flush and invalidated through L1, L2 and into memory,
+ * after which point this function will return.
+ */
+static void kbase_mmu_flush_invalidate(struct kbase_context *kctx,
+		u64 vpfn, size_t nr, bool sync);
+
+/**
+ * kbase_mmu_flush_invalidate_no_ctx() - Flush and invalidate the GPU caches.
+ * @kbdev: Device pointer.
+ * @vpfn: The virtual page frame number to start the flush on.
+ * @nr: The number of pages to flush.
+ * @sync: Set if the operation should be synchronous or not.
+ * @as_nr: GPU address space number for which flush + invalidate is required.
+ *
+ * This is used for MMU tables which do not belong to a user space context.
+ */
+static void kbase_mmu_flush_invalidate_no_ctx(struct kbase_device *kbdev,
+		u64 vpfn, size_t nr, bool sync, int as_nr);
+
+/**
+ * kbase_mmu_sync_pgd - sync page directory to memory
+ * @kbdev:	Device pointer.
+ * @handle:	Address of DMA region.
+ * @size:       Size of the region to sync.
+ *
+ * This should be called after each page directory update.
+ */
+
+static void kbase_mmu_sync_pgd(struct kbase_device *kbdev,
+		dma_addr_t handle, size_t size)
+{
+	/* If page table is not coherent then ensure the gpu can read
+	 * the pages from memory
+	 */
+	if (kbdev->system_coherency != COHERENCY_ACE)
+		dma_sync_single_for_device(kbdev->dev, handle, size,
+				DMA_TO_DEVICE);
+}
+
+/*
+ * Definitions:
+ * - PGD: Page Directory.
+ * - PTE: Page Table Entry. A 64bit value pointing to the next
+ *        level of translation
+ * - ATE: Address Transation Entry. A 64bit value pointing to
+ *        a 4kB physical page.
+ */
+
+static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
+		struct kbase_as *as, const char *reason_str,
+		struct kbase_fault *fault);
+
+
+static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
+					struct tagged_addr *phys, size_t nr,
+					unsigned long flags, int group_id);
+
+/**
+ * reg_grow_calc_extra_pages() - Calculate the number of backed pages to add to
+ *                               a region on a GPU page fault
+ *
+ * @reg:           The region that will be backed with more pages
+ * @fault_rel_pfn: PFN of the fault relative to the start of the region
+ *
+ * This calculates how much to increase the backing of a region by, based on
+ * where a GPU page fault occurred and the flags in the region.
+ *
+ * This can be more than the minimum number of pages that would reach
+ * @fault_rel_pfn, for example to reduce the overall rate of page fault
+ * interrupts on a region, or to ensure that the end address is aligned.
+ *
+ * Return: the number of backed pages to increase by
+ */
+static size_t reg_grow_calc_extra_pages(struct kbase_device *kbdev,
+		struct kbase_va_region *reg, size_t fault_rel_pfn)
+{
+	size_t multiple = reg->extent;
+	size_t reg_current_size = kbase_reg_current_backed_size(reg);
+	size_t minimum_extra = fault_rel_pfn - reg_current_size + 1;
+	size_t remainder;
+
+	if (!multiple) {
+		dev_warn(kbdev->dev,
+				"VA Region 0x%llx extent was 0, allocator needs to set this properly for KBASE_REG_PF_GROW\n",
+				((unsigned long long)reg->start_pfn) << PAGE_SHIFT);
+		return minimum_extra;
+	}
+
+	/* Calculate the remainder to subtract from minimum_extra to make it
+	 * the desired (rounded down) multiple of the extent.
+	 * Depending on reg's flags, the base used for calculating multiples is
+	 * different */
+	if (reg->flags & KBASE_REG_TILER_ALIGN_TOP) {
+		/* multiple is based from the top of the initial commit, which
+		 * has been allocated in such a way that (start_pfn +
+		 * initial_commit) is already aligned to multiple. Hence the
+		 * pfn for the end of committed memory will also be aligned to
+		 * multiple */
+		size_t initial_commit = reg->initial_commit;
+
+		if (fault_rel_pfn < initial_commit) {
+			/* this case is just to catch in case it's been
+			 * recommitted by userspace to be smaller than the
+			 * initial commit */
+			minimum_extra = initial_commit - reg_current_size;
+			remainder = 0;
+		} else {
+			/* same as calculating (fault_rel_pfn - initial_commit + 1) */
+			size_t pages_after_initial = minimum_extra + reg_current_size - initial_commit;
+
+			remainder = pages_after_initial % multiple;
+		}
+	} else {
+		/* multiple is based from the current backed size, even if the
+		 * current backed size/pfn for end of committed memory are not
+		 * themselves aligned to multiple */
+		remainder = minimum_extra % multiple;
+	}
+
+	if (remainder == 0)
+		return minimum_extra;
+
+	return minimum_extra + multiple - remainder;
+}
+
+#ifdef CONFIG_MALI_CINSTR_GWT
+static void kbase_gpu_mmu_handle_write_faulting_as(
+				struct kbase_device *kbdev,
+				struct kbase_as *faulting_as,
+				u64 start_pfn, size_t nr, u32 op)
+{
+	mutex_lock(&kbdev->mmu_hw_mutex);
+
+	kbase_mmu_hw_clear_fault(kbdev, faulting_as,
+			KBASE_MMU_FAULT_TYPE_PAGE);
+	kbase_mmu_hw_do_operation(kbdev, faulting_as, start_pfn,
+			nr, op, 1);
+
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+
+	kbase_mmu_hw_enable_fault(kbdev, faulting_as,
+			KBASE_MMU_FAULT_TYPE_PAGE);
+}
+
+static void kbase_gpu_mmu_handle_write_fault(struct kbase_context *kctx,
+			struct kbase_as *faulting_as)
+{
+	struct kbasep_gwt_list_element *pos;
+	struct kbase_va_region *region;
+	struct kbase_device *kbdev;
+	struct kbase_fault *fault;
+	u64 fault_pfn, pfn_offset;
+	u32 op;
+	int ret;
+	int as_no;
+
+	as_no = faulting_as->number;
+	kbdev = container_of(faulting_as, struct kbase_device, as[as_no]);
+	fault = &faulting_as->pf_data;
+	fault_pfn = fault->addr >> PAGE_SHIFT;
+
+	kbase_gpu_vm_lock(kctx);
+
+	/* Find region and check if it should be writable. */
+	region = kbase_region_tracker_find_region_enclosing_address(kctx,
+			fault->addr);
+	if (kbase_is_region_invalid_or_free(region)) {
+		kbase_gpu_vm_unlock(kctx);
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Memory is not mapped on the GPU",
+				&faulting_as->pf_data);
+		return;
+	}
+
+	if (!(region->flags & KBASE_REG_GPU_WR)) {
+		kbase_gpu_vm_unlock(kctx);
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Region does not have write permissions",
+				&faulting_as->pf_data);
+		return;
+	}
+
+	/* Capture addresses of faulting write location
+	 * for job dumping if write tracking is enabled.
+	 */
+	if (kctx->gwt_enabled) {
+		u64 page_addr = fault->addr & PAGE_MASK;
+		bool found = false;
+		/* Check if this write was already handled. */
+		list_for_each_entry(pos, &kctx->gwt_current_list, link) {
+			if (page_addr == pos->page_addr) {
+				found = true;
+				break;
+			}
+		}
+
+		if (!found) {
+			pos = kmalloc(sizeof(*pos), GFP_KERNEL);
+			if (pos) {
+				pos->region = region;
+				pos->page_addr = page_addr;
+				pos->num_pages = 1;
+				list_add(&pos->link, &kctx->gwt_current_list);
+			} else {
+				dev_warn(kbdev->dev, "kmalloc failure");
+			}
+		}
+	}
+
+	pfn_offset = fault_pfn - region->start_pfn;
+	/* Now make this faulting page writable to GPU. */
+	ret = kbase_mmu_update_pages_no_flush(kctx, fault_pfn,
+				&kbase_get_gpu_phy_pages(region)[pfn_offset],
+				1, region->flags, region->gpu_alloc->group_id);
+
+	/* flush L2 and unlock the VA (resumes the MMU) */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367))
+		op = AS_COMMAND_FLUSH;
+	else
+		op = AS_COMMAND_FLUSH_PT;
+
+	kbase_gpu_mmu_handle_write_faulting_as(kbdev, faulting_as,
+			fault_pfn, 1, op);
+
+	kbase_gpu_vm_unlock(kctx);
+}
+
+static void kbase_gpu_mmu_handle_permission_fault(struct kbase_context *kctx,
+			struct kbase_as	*faulting_as)
+{
+	struct kbase_fault *fault = &faulting_as->pf_data;
+
+	switch (fault->status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
+	case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
+	case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
+		kbase_gpu_mmu_handle_write_fault(kctx, faulting_as);
+		break;
+	case AS_FAULTSTATUS_ACCESS_TYPE_EX:
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Execute Permission fault", fault);
+		break;
+	case AS_FAULTSTATUS_ACCESS_TYPE_READ:
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Read Permission fault", fault);
+		break;
+	default:
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Unknown Permission fault", fault);
+		break;
+	}
+}
+#endif
+
+#define MAX_POOL_LEVEL 2
+
+/**
+ * page_fault_try_alloc - Try to allocate memory from a context pool
+ * @kctx:          Context pointer
+ * @region:        Region to grow
+ * @new_pages:     Number of 4 kB pages to allocate
+ * @pages_to_grow: Pointer to variable to store number of outstanding pages on
+ *                 failure. This can be either 4 kB or 2 MB pages, depending on
+ *                 the number of pages requested.
+ * @grow_2mb_pool: Pointer to variable to store which pool needs to grow - true
+ *                 for 2 MB, false for 4 kB.
+ * @prealloc_sas:  Pointer to kbase_sub_alloc structures
+ *
+ * This function will try to allocate as many pages as possible from the context
+ * pool, then if required will try to allocate the remaining pages from the
+ * device pool.
+ *
+ * This function will not allocate any new memory beyond that that is already
+ * present in the context or device pools. This is because it is intended to be
+ * called with the vm_lock held, which could cause recursive locking if the
+ * allocation caused the out-of-memory killer to run.
+ *
+ * If 2 MB pages are enabled and new_pages is >= 2 MB then pages_to_grow will be
+ * a count of 2 MB pages, otherwise it will be a count of 4 kB pages.
+ *
+ * Return: true if successful, false on failure
+ */
+static bool page_fault_try_alloc(struct kbase_context *kctx,
+		struct kbase_va_region *region, size_t new_pages,
+		int *pages_to_grow, bool *grow_2mb_pool,
+		struct kbase_sub_alloc **prealloc_sas)
+{
+	struct tagged_addr *gpu_pages[MAX_POOL_LEVEL] = {NULL};
+	struct tagged_addr *cpu_pages[MAX_POOL_LEVEL] = {NULL};
+	size_t pages_alloced[MAX_POOL_LEVEL] = {0};
+	struct kbase_mem_pool *pool, *root_pool;
+	int pool_level = 0;
+	bool alloc_failed = false;
+	size_t pages_still_required;
+
+	if (WARN_ON(region->gpu_alloc->group_id >=
+		MEMORY_GROUP_MANAGER_NR_GROUPS)) {
+		/* Do not try to grow the memory pool */
+		*pages_to_grow = 0;
+		return false;
+	}
+
+#ifdef CONFIG_MALI_2MB_ALLOC
+	if (new_pages >= (SZ_2M / SZ_4K)) {
+		root_pool = &kctx->mem_pools.large[region->gpu_alloc->group_id];
+		*grow_2mb_pool = true;
+	} else {
+#endif
+		root_pool = &kctx->mem_pools.small[region->gpu_alloc->group_id];
+		*grow_2mb_pool = false;
+#ifdef CONFIG_MALI_2MB_ALLOC
+	}
+#endif
+
+	if (region->gpu_alloc != region->cpu_alloc)
+		new_pages *= 2;
+
+	pages_still_required = new_pages;
+
+	/* Determine how many pages are in the pools before trying to allocate.
+	 * Don't attempt to allocate & free if the allocation can't succeed.
+	 */
+	for (pool = root_pool; pool != NULL; pool = pool->next_pool) {
+		size_t pool_size_4k;
+
+		kbase_mem_pool_lock(pool);
+
+		pool_size_4k = kbase_mem_pool_size(pool) << pool->order;
+		if (pool_size_4k >= pages_still_required)
+			pages_still_required = 0;
+		else
+			pages_still_required -= pool_size_4k;
+
+		kbase_mem_pool_unlock(pool);
+
+		if (!pages_still_required)
+			break;
+	}
+
+	if (pages_still_required) {
+		/* Insufficient pages in pools. Don't try to allocate - just
+		 * request a grow.
+		 */
+		*pages_to_grow = pages_still_required;
+
+		return false;
+	}
+
+	/* Since we've dropped the pool locks, the amount of memory in the pools
+	 * may change between the above check and the actual allocation.
+	 */
+	pool = root_pool;
+	for (pool_level = 0; pool_level < MAX_POOL_LEVEL; pool_level++) {
+		size_t pool_size_4k;
+		size_t pages_to_alloc_4k;
+		size_t pages_to_alloc_4k_per_alloc;
+
+		kbase_mem_pool_lock(pool);
+
+		/* Allocate as much as possible from this pool*/
+		pool_size_4k = kbase_mem_pool_size(pool) << pool->order;
+		pages_to_alloc_4k = MIN(new_pages, pool_size_4k);
+		if (region->gpu_alloc == region->cpu_alloc)
+			pages_to_alloc_4k_per_alloc = pages_to_alloc_4k;
+		else
+			pages_to_alloc_4k_per_alloc = pages_to_alloc_4k >> 1;
+
+		pages_alloced[pool_level] = pages_to_alloc_4k;
+		if (pages_to_alloc_4k) {
+			gpu_pages[pool_level] =
+					kbase_alloc_phy_pages_helper_locked(
+						region->gpu_alloc, pool,
+						pages_to_alloc_4k_per_alloc,
+						&prealloc_sas[0]);
+
+			if (!gpu_pages[pool_level]) {
+				alloc_failed = true;
+			} else if (region->gpu_alloc != region->cpu_alloc) {
+				cpu_pages[pool_level] =
+					kbase_alloc_phy_pages_helper_locked(
+						region->cpu_alloc, pool,
+						pages_to_alloc_4k_per_alloc,
+						&prealloc_sas[1]);
+
+				if (!cpu_pages[pool_level])
+					alloc_failed = true;
+			}
+		}
+
+		kbase_mem_pool_unlock(pool);
+
+		if (alloc_failed) {
+			WARN_ON(!new_pages);
+			WARN_ON(pages_to_alloc_4k >= new_pages);
+			WARN_ON(pages_to_alloc_4k_per_alloc >= new_pages);
+			break;
+		}
+
+		new_pages -= pages_to_alloc_4k;
+
+		if (!new_pages)
+			break;
+
+		pool = pool->next_pool;
+		if (!pool)
+			break;
+	}
+
+	if (new_pages) {
+		/* Allocation was unsuccessful */
+		int max_pool_level = pool_level;
+
+		pool = root_pool;
+
+		/* Free memory allocated so far */
+		for (pool_level = 0; pool_level <= max_pool_level;
+				pool_level++) {
+			kbase_mem_pool_lock(pool);
+
+			if (region->gpu_alloc != region->cpu_alloc) {
+				if (pages_alloced[pool_level] &&
+						cpu_pages[pool_level])
+					kbase_free_phy_pages_helper_locked(
+						region->cpu_alloc,
+						pool, cpu_pages[pool_level],
+						pages_alloced[pool_level]);
+			}
+
+			if (pages_alloced[pool_level] && gpu_pages[pool_level])
+				kbase_free_phy_pages_helper_locked(
+						region->gpu_alloc,
+						pool, gpu_pages[pool_level],
+						pages_alloced[pool_level]);
+
+			kbase_mem_pool_unlock(pool);
+
+			pool = pool->next_pool;
+		}
+
+		/*
+		 * If the allocation failed despite there being enough memory in
+		 * the pool, then just fail. Otherwise, try to grow the memory
+		 * pool.
+		 */
+		if (alloc_failed)
+			*pages_to_grow = 0;
+		else
+			*pages_to_grow = new_pages;
+
+		return false;
+	}
+
+	/* Allocation was successful. No pages to grow, return success. */
+	*pages_to_grow = 0;
+
+	return true;
+}
+
+void page_fault_worker(struct work_struct *data)
+{
+	u64 fault_pfn;
+	u32 fault_status;
+	size_t new_pages;
+	size_t fault_rel_pfn;
+	struct kbase_as *faulting_as;
+	int as_no;
+	struct kbase_context *kctx;
+	struct kbase_device *kbdev;
+	struct kbase_va_region *region;
+	struct kbase_fault *fault;
+	int err;
+	bool grown = false;
+	int pages_to_grow;
+	bool grow_2mb_pool;
+	struct kbase_sub_alloc *prealloc_sas[2] = { NULL, NULL };
+	int i;
+
+	faulting_as = container_of(data, struct kbase_as, work_pagefault);
+	fault = &faulting_as->pf_data;
+	fault_pfn = fault->addr >> PAGE_SHIFT;
+	as_no = faulting_as->number;
+
+	kbdev = container_of(faulting_as, struct kbase_device, as[as_no]);
+
+	/* Grab the context that was already refcounted in kbase_mmu_interrupt().
+	 * Therefore, it cannot be scheduled out of this AS until we explicitly release it
+	 */
+	kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no);
+	if (WARN_ON(!kctx)) {
+		atomic_dec(&kbdev->faults_pending);
+		return;
+	}
+
+	KBASE_DEBUG_ASSERT(kctx->kbdev == kbdev);
+
+	if (unlikely(fault->protected_mode)) {
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Protected mode fault", fault);
+		kbase_mmu_hw_clear_fault(kbdev, faulting_as,
+				KBASE_MMU_FAULT_TYPE_PAGE);
+
+		goto fault_done;
+	}
+
+	fault_status = fault->status;
+	switch (fault_status & AS_FAULTSTATUS_EXCEPTION_CODE_MASK) {
+
+	case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSLATION_FAULT:
+		/* need to check against the region to handle this one */
+		break;
+
+	case AS_FAULTSTATUS_EXCEPTION_CODE_PERMISSION_FAULT:
+#ifdef CONFIG_MALI_CINSTR_GWT
+		/* If GWT was ever enabled then we need to handle
+		 * write fault pages even if the feature was disabled later.
+		 */
+		if (kctx->gwt_was_enabled) {
+			kbase_gpu_mmu_handle_permission_fault(kctx,
+							faulting_as);
+			goto fault_done;
+		}
+#endif
+
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Permission failure", fault);
+		goto fault_done;
+
+	case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSTAB_BUS_FAULT:
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Translation table bus fault", fault);
+		goto fault_done;
+
+	case AS_FAULTSTATUS_EXCEPTION_CODE_ACCESS_FLAG:
+		/* nothing to do, but we don't expect this fault currently */
+		dev_warn(kbdev->dev, "Access flag unexpectedly set");
+		goto fault_done;
+
+	case AS_FAULTSTATUS_EXCEPTION_CODE_ADDRESS_SIZE_FAULT:
+		if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+			kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+					"Address size fault", fault);
+		else
+			kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+					"Unknown fault code", fault);
+		goto fault_done;
+
+	case AS_FAULTSTATUS_EXCEPTION_CODE_MEMORY_ATTRIBUTES_FAULT:
+		if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+			kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+					"Memory attributes fault", fault);
+		else
+			kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+					"Unknown fault code", fault);
+		goto fault_done;
+
+	default:
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Unknown fault code", fault);
+		goto fault_done;
+	}
+
+#ifdef CONFIG_MALI_2MB_ALLOC
+	/* Preallocate memory for the sub-allocation structs if necessary */
+	for (i = 0; i != ARRAY_SIZE(prealloc_sas); ++i) {
+		prealloc_sas[i] = kmalloc(sizeof(*prealloc_sas[i]), GFP_KERNEL);
+		if (!prealloc_sas[i]) {
+			kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+					"Failed pre-allocating memory for sub-allocations' metadata",
+					fault);
+			goto fault_done;
+		}
+	}
+#endif /* CONFIG_MALI_2MB_ALLOC */
+
+page_fault_retry:
+	/* so we have a translation fault, let's see if it is for growable
+	 * memory */
+	kbase_gpu_vm_lock(kctx);
+
+	region = kbase_region_tracker_find_region_enclosing_address(kctx,
+			fault->addr);
+	if (kbase_is_region_invalid_or_free(region)) {
+		kbase_gpu_vm_unlock(kctx);
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Memory is not mapped on the GPU", fault);
+		goto fault_done;
+	}
+
+	if (region->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
+		kbase_gpu_vm_unlock(kctx);
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"DMA-BUF is not mapped on the GPU", fault);
+		goto fault_done;
+	}
+
+	if (region->gpu_alloc->group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS) {
+		kbase_gpu_vm_unlock(kctx);
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Bad physical memory group ID", fault);
+		goto fault_done;
+	}
+
+	if ((region->flags & GROWABLE_FLAGS_REQUIRED)
+			!= GROWABLE_FLAGS_REQUIRED) {
+		kbase_gpu_vm_unlock(kctx);
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Memory is not growable", fault);
+		goto fault_done;
+	}
+
+	if ((region->flags & KBASE_REG_DONT_NEED)) {
+		kbase_gpu_vm_unlock(kctx);
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Don't need memory can't be grown", fault);
+		goto fault_done;
+	}
+
+	/* find the size we need to grow it by */
+	/* we know the result fit in a size_t due to kbase_region_tracker_find_region_enclosing_address
+	 * validating the fault_adress to be within a size_t from the start_pfn */
+	fault_rel_pfn = fault_pfn - region->start_pfn;
+
+	if (fault_rel_pfn < kbase_reg_current_backed_size(region)) {
+		dev_dbg(kbdev->dev, "Page fault @ 0x%llx in allocated region 0x%llx-0x%llx of growable TMEM: Ignoring",
+				fault->addr, region->start_pfn,
+				region->start_pfn +
+				kbase_reg_current_backed_size(region));
+
+		mutex_lock(&kbdev->mmu_hw_mutex);
+
+		kbase_mmu_hw_clear_fault(kbdev, faulting_as,
+				KBASE_MMU_FAULT_TYPE_PAGE);
+		/* [1] in case another page fault occurred while we were
+		 * handling the (duplicate) page fault we need to ensure we
+		 * don't loose the other page fault as result of us clearing
+		 * the MMU IRQ. Therefore, after we clear the MMU IRQ we send
+		 * an UNLOCK command that will retry any stalled memory
+		 * transaction (which should cause the other page fault to be
+		 * raised again).
+		 */
+		kbase_mmu_hw_do_operation(kbdev, faulting_as, 0, 0,
+				AS_COMMAND_UNLOCK, 1);
+
+		mutex_unlock(&kbdev->mmu_hw_mutex);
+
+		kbase_mmu_hw_enable_fault(kbdev, faulting_as,
+				KBASE_MMU_FAULT_TYPE_PAGE);
+		kbase_gpu_vm_unlock(kctx);
+
+		goto fault_done;
+	}
+
+	new_pages = reg_grow_calc_extra_pages(kbdev, region, fault_rel_pfn);
+
+	/* cap to max vsize */
+	new_pages = min(new_pages, region->nr_pages - kbase_reg_current_backed_size(region));
+
+	if (0 == new_pages) {
+		mutex_lock(&kbdev->mmu_hw_mutex);
+
+		/* Duplicate of a fault we've already handled, nothing to do */
+		kbase_mmu_hw_clear_fault(kbdev, faulting_as,
+				KBASE_MMU_FAULT_TYPE_PAGE);
+		/* See comment [1] about UNLOCK usage */
+		kbase_mmu_hw_do_operation(kbdev, faulting_as, 0, 0,
+				AS_COMMAND_UNLOCK, 1);
+
+		mutex_unlock(&kbdev->mmu_hw_mutex);
+
+		kbase_mmu_hw_enable_fault(kbdev, faulting_as,
+				KBASE_MMU_FAULT_TYPE_PAGE);
+		kbase_gpu_vm_unlock(kctx);
+		goto fault_done;
+	}
+
+	pages_to_grow = 0;
+
+	spin_lock(&kctx->mem_partials_lock);
+	grown = page_fault_try_alloc(kctx, region, new_pages, &pages_to_grow,
+			&grow_2mb_pool, prealloc_sas);
+	spin_unlock(&kctx->mem_partials_lock);
+
+	if (grown) {
+		u64 pfn_offset;
+		u32 op;
+
+		/* alloc success */
+		KBASE_DEBUG_ASSERT(kbase_reg_current_backed_size(region) <= region->nr_pages);
+
+		/* set up the new pages */
+		pfn_offset = kbase_reg_current_backed_size(region) - new_pages;
+		/*
+		 * Note:
+		 * Issuing an MMU operation will unlock the MMU and cause the
+		 * translation to be replayed. If the page insertion fails then
+		 * rather then trying to continue the context should be killed
+		 * so the no_flush version of insert_pages is used which allows
+		 * us to unlock the MMU as we see fit.
+		 */
+		err = kbase_mmu_insert_pages_no_flush(kbdev, &kctx->mmu,
+			region->start_pfn + pfn_offset,
+			&kbase_get_gpu_phy_pages(region)[pfn_offset],
+			new_pages, region->flags, region->gpu_alloc->group_id);
+		if (err) {
+			kbase_free_phy_pages_helper(region->gpu_alloc, new_pages);
+			if (region->gpu_alloc != region->cpu_alloc)
+				kbase_free_phy_pages_helper(region->cpu_alloc,
+						new_pages);
+			kbase_gpu_vm_unlock(kctx);
+			/* The locked VA region will be unlocked and the cache invalidated in here */
+			kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+					"Page table update failure", fault);
+			goto fault_done;
+		}
+		KBASE_TLSTREAM_AUX_PAGEFAULT(kbdev, kctx->id, as_no, (u64)new_pages);
+
+		/* AS transaction begin */
+		mutex_lock(&kbdev->mmu_hw_mutex);
+
+		/* flush L2 and unlock the VA (resumes the MMU) */
+		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367))
+			op = AS_COMMAND_FLUSH;
+		else
+			op = AS_COMMAND_FLUSH_PT;
+
+		/* clear MMU interrupt - this needs to be done after updating
+		 * the page tables but before issuing a FLUSH command. The
+		 * FLUSH cmd has a side effect that it restarts stalled memory
+		 * transactions in other address spaces which may cause
+		 * another fault to occur. If we didn't clear the interrupt at
+		 * this stage a new IRQ might not be raised when the GPU finds
+		 * a MMU IRQ is already pending.
+		 */
+		kbase_mmu_hw_clear_fault(kbdev, faulting_as,
+					 KBASE_MMU_FAULT_TYPE_PAGE);
+
+		kbase_mmu_hw_do_operation(kbdev, faulting_as,
+				fault->addr >> PAGE_SHIFT,
+				new_pages, op, 1);
+
+		mutex_unlock(&kbdev->mmu_hw_mutex);
+		/* AS transaction end */
+
+		/* reenable this in the mask */
+		kbase_mmu_hw_enable_fault(kbdev, faulting_as,
+					 KBASE_MMU_FAULT_TYPE_PAGE);
+
+#ifdef CONFIG_MALI_CINSTR_GWT
+		if (kctx->gwt_enabled) {
+			/* GWT also tracks growable regions. */
+			struct kbasep_gwt_list_element *pos;
+
+			pos = kmalloc(sizeof(*pos), GFP_KERNEL);
+			if (pos) {
+				pos->region = region;
+				pos->page_addr = (region->start_pfn +
+							pfn_offset) <<
+							 PAGE_SHIFT;
+				pos->num_pages = new_pages;
+				list_add(&pos->link,
+					&kctx->gwt_current_list);
+			} else {
+				dev_warn(kbdev->dev, "kmalloc failure");
+			}
+		}
+#endif
+		kbase_gpu_vm_unlock(kctx);
+	} else {
+		int ret = -ENOMEM;
+
+		kbase_gpu_vm_unlock(kctx);
+
+		/* If the memory pool was insufficient then grow it and retry.
+		 * Otherwise fail the allocation.
+		 */
+		if (pages_to_grow > 0) {
+#ifdef CONFIG_MALI_2MB_ALLOC
+			if (grow_2mb_pool) {
+				/* Round page requirement up to nearest 2 MB */
+				struct kbase_mem_pool *const lp_mem_pool =
+					&kctx->mem_pools.large[
+					region->gpu_alloc->group_id];
+
+				pages_to_grow = (pages_to_grow +
+					((1 << lp_mem_pool->order) - 1))
+						>> lp_mem_pool->order;
+
+				ret = kbase_mem_pool_grow(lp_mem_pool,
+					pages_to_grow);
+			} else {
+#endif
+				struct kbase_mem_pool *const mem_pool =
+					&kctx->mem_pools.small[
+					region->gpu_alloc->group_id];
+
+				ret = kbase_mem_pool_grow(mem_pool,
+					pages_to_grow);
+#ifdef CONFIG_MALI_2MB_ALLOC
+			}
+#endif
+		}
+		if (ret < 0) {
+			/* failed to extend, handle as a normal PF */
+			kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+					"Page allocation failure", fault);
+		} else {
+			goto page_fault_retry;
+		}
+	}
+
+fault_done:
+	for (i = 0; i != ARRAY_SIZE(prealloc_sas); ++i)
+		kfree(prealloc_sas[i]);
+
+	/*
+	 * By this point, the fault was handled in some way,
+	 * so release the ctx refcount
+	 */
+	kbasep_js_runpool_release_ctx(kbdev, kctx);
+
+	atomic_dec(&kbdev->faults_pending);
+}
+
+static phys_addr_t kbase_mmu_alloc_pgd(struct kbase_device *kbdev,
+		struct kbase_mmu_table *mmut)
+{
+	u64 *page;
+	int i;
+	struct page *p;
+
+	p = kbase_mem_pool_alloc(&kbdev->mem_pools.small[mmut->group_id]);
+	if (!p)
+		return 0;
+
+	page = kmap(p);
+	if (NULL == page)
+		goto alloc_free;
+
+	/* If the MMU tables belong to a context then account the memory usage
+	 * to that context, otherwise the MMU tables are device wide and are
+	 * only accounted to the device.
+	 */
+	if (mmut->kctx) {
+		int new_page_count;
+
+		new_page_count = atomic_add_return(1,
+			&mmut->kctx->used_pages);
+		KBASE_TLSTREAM_AUX_PAGESALLOC(
+			kbdev,
+			mmut->kctx->id,
+			(u64)new_page_count);
+		kbase_process_page_usage_inc(mmut->kctx, 1);
+	}
+
+	atomic_add(1, &kbdev->memdev.used_pages);
+
+	for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++)
+		kbdev->mmu_mode->entry_invalidate(&page[i]);
+
+	kbase_mmu_sync_pgd(kbdev, kbase_dma_addr(p), PAGE_SIZE);
+
+	kunmap(p);
+	return page_to_phys(p);
+
+alloc_free:
+	kbase_mem_pool_free(&kbdev->mem_pools.small[mmut->group_id], p,
+		false);
+
+	return 0;
+}
+
+/* Given PGD PFN for level N, return PGD PFN for level N+1, allocating the
+ * new table from the pool if needed and possible
+ */
+static int mmu_get_next_pgd(struct kbase_device *kbdev,
+		struct kbase_mmu_table *mmut,
+		phys_addr_t *pgd, u64 vpfn, int level)
+{
+	u64 *page;
+	phys_addr_t target_pgd;
+	struct page *p;
+
+	KBASE_DEBUG_ASSERT(*pgd);
+
+	lockdep_assert_held(&mmut->mmu_lock);
+
+	/*
+	 * Architecture spec defines level-0 as being the top-most.
+	 * This is a bit unfortunate here, but we keep the same convention.
+	 */
+	vpfn >>= (3 - level) * 9;
+	vpfn &= 0x1FF;
+
+	p = pfn_to_page(PFN_DOWN(*pgd));
+	page = kmap(p);
+	if (NULL == page) {
+		dev_warn(kbdev->dev, "%s: kmap failure\n", __func__);
+		return -EINVAL;
+	}
+
+	target_pgd = kbdev->mmu_mode->pte_to_phy_addr(page[vpfn]);
+
+	if (!target_pgd) {
+		target_pgd = kbase_mmu_alloc_pgd(kbdev, mmut);
+		if (!target_pgd) {
+			dev_dbg(kbdev->dev, "%s: kbase_mmu_alloc_pgd failure\n",
+					__func__);
+			kunmap(p);
+			return -ENOMEM;
+		}
+
+		kbdev->mmu_mode->entry_set_pte(&page[vpfn], target_pgd);
+
+		kbase_mmu_sync_pgd(kbdev, kbase_dma_addr(p), PAGE_SIZE);
+		/* Rely on the caller to update the address space flags. */
+	}
+
+	kunmap(p);
+	*pgd = target_pgd;
+
+	return 0;
+}
+
+/*
+ * Returns the PGD for the specified level of translation
+ */
+static int mmu_get_pgd_at_level(struct kbase_device *kbdev,
+					struct kbase_mmu_table *mmut,
+					u64 vpfn,
+					int level,
+					phys_addr_t *out_pgd)
+{
+	phys_addr_t pgd;
+	int l;
+
+	lockdep_assert_held(&mmut->mmu_lock);
+	pgd = mmut->pgd;
+
+	for (l = MIDGARD_MMU_TOPLEVEL; l < level; l++) {
+		int err = mmu_get_next_pgd(kbdev, mmut, &pgd, vpfn, l);
+		/* Handle failure condition */
+		if (err) {
+			dev_dbg(kbdev->dev,
+				 "%s: mmu_get_next_pgd failure at level %d\n",
+				 __func__, l);
+			return err;
+		}
+	}
+
+	*out_pgd = pgd;
+
+	return 0;
+}
+
+static int mmu_get_bottom_pgd(struct kbase_device *kbdev,
+		struct kbase_mmu_table *mmut,
+		u64 vpfn,
+		phys_addr_t *out_pgd)
+{
+	return mmu_get_pgd_at_level(kbdev, mmut, vpfn, MIDGARD_MMU_BOTTOMLEVEL,
+			out_pgd);
+}
+
+static void mmu_insert_pages_failure_recovery(struct kbase_device *kbdev,
+		struct kbase_mmu_table *mmut,
+		u64 from_vpfn, u64 to_vpfn)
+{
+	phys_addr_t pgd;
+	u64 vpfn = from_vpfn;
+	struct kbase_mmu_mode const *mmu_mode;
+
+	/* 64-bit address range is the max */
+	KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
+	KBASE_DEBUG_ASSERT(from_vpfn <= to_vpfn);
+
+	lockdep_assert_held(&mmut->mmu_lock);
+
+	mmu_mode = kbdev->mmu_mode;
+
+	while (vpfn < to_vpfn) {
+		unsigned int i;
+		unsigned int idx = vpfn & 0x1FF;
+		unsigned int count = KBASE_MMU_PAGE_ENTRIES - idx;
+		unsigned int pcount = 0;
+		unsigned int left = to_vpfn - vpfn;
+		int level;
+		u64 *page;
+
+		if (count > left)
+			count = left;
+
+		/* need to check if this is a 2MB page or a 4kB */
+		pgd = mmut->pgd;
+
+		for (level = MIDGARD_MMU_TOPLEVEL;
+				level <= MIDGARD_MMU_BOTTOMLEVEL; level++) {
+			idx = (vpfn >> ((3 - level) * 9)) & 0x1FF;
+			page = kmap(phys_to_page(pgd));
+			if (mmu_mode->ate_is_valid(page[idx], level))
+				break; /* keep the mapping */
+			kunmap(phys_to_page(pgd));
+			pgd = mmu_mode->pte_to_phy_addr(page[idx]);
+		}
+
+		switch (level) {
+		case MIDGARD_MMU_LEVEL(2):
+			/* remap to single entry to update */
+			pcount = 1;
+			break;
+		case MIDGARD_MMU_BOTTOMLEVEL:
+			/* page count is the same as the logical count */
+			pcount = count;
+			break;
+		default:
+			dev_warn(kbdev->dev, "%sNo support for ATEs at level %d\n",
+			       __func__, level);
+			goto next;
+		}
+
+		/* Invalidate the entries we added */
+		for (i = 0; i < pcount; i++)
+			mmu_mode->entry_invalidate(&page[idx + i]);
+
+		kbase_mmu_sync_pgd(kbdev,
+				   kbase_dma_addr(phys_to_page(pgd)) + 8 * idx,
+				   8 * pcount);
+		kunmap(phys_to_page(pgd));
+
+next:
+		vpfn += count;
+	}
+}
+
+/*
+ * Map the single page 'phys' 'nr' of times, starting at GPU PFN 'vpfn'
+ */
+int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
+					struct tagged_addr phys, size_t nr,
+					unsigned long flags, int const group_id)
+{
+	phys_addr_t pgd;
+	u64 *pgd_page;
+	/* In case the insert_single_page only partially completes we need to be
+	 * able to recover */
+	bool recover_required = false;
+	u64 recover_vpfn = vpfn;
+	size_t recover_count = 0;
+	size_t remain = nr;
+	int err;
+	struct kbase_device *kbdev;
+
+	KBASE_DEBUG_ASSERT(NULL != kctx);
+	/* 64-bit address range is the max */
+	KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
+
+	kbdev = kctx->kbdev;
+
+	/* Early out if there is nothing to do */
+	if (nr == 0)
+		return 0;
+
+	mutex_lock(&kctx->mmu.mmu_lock);
+
+	while (remain) {
+		unsigned int i;
+		unsigned int index = vpfn & 0x1FF;
+		unsigned int count = KBASE_MMU_PAGE_ENTRIES - index;
+		struct page *p;
+
+		if (count > remain)
+			count = remain;
+
+		/*
+		 * Repeatedly calling mmu_get_bottom_pte() is clearly
+		 * suboptimal. We don't have to re-parse the whole tree
+		 * each time (just cache the l0-l2 sequence).
+		 * On the other hand, it's only a gain when we map more than
+		 * 256 pages at once (on average). Do we really care?
+		 */
+		do {
+			err = mmu_get_bottom_pgd(kbdev, &kctx->mmu,
+					vpfn, &pgd);
+			if (err != -ENOMEM)
+				break;
+			/* Fill the memory pool with enough pages for
+			 * the page walk to succeed
+			 */
+			mutex_unlock(&kctx->mmu.mmu_lock);
+			err = kbase_mem_pool_grow(
+				&kbdev->mem_pools.small[
+					kctx->mmu.group_id],
+				MIDGARD_MMU_BOTTOMLEVEL);
+			mutex_lock(&kctx->mmu.mmu_lock);
+		} while (!err);
+		if (err) {
+			dev_warn(kbdev->dev, "kbase_mmu_insert_pages: mmu_get_bottom_pgd failure\n");
+			if (recover_required) {
+				/* Invalidate the pages we have partially
+				 * completed */
+				mmu_insert_pages_failure_recovery(kbdev,
+						&kctx->mmu,
+						recover_vpfn,
+						recover_vpfn + recover_count);
+			}
+			goto fail_unlock;
+		}
+
+		p = pfn_to_page(PFN_DOWN(pgd));
+		pgd_page = kmap(p);
+		if (!pgd_page) {
+			dev_warn(kbdev->dev, "kbase_mmu_insert_pages: kmap failure\n");
+			if (recover_required) {
+				/* Invalidate the pages we have partially
+				 * completed */
+				mmu_insert_pages_failure_recovery(kbdev,
+						&kctx->mmu,
+						recover_vpfn,
+						recover_vpfn + recover_count);
+			}
+			err = -ENOMEM;
+			goto fail_unlock;
+		}
+
+		for (i = 0; i < count; i++) {
+			unsigned int ofs = index + i;
+
+			/* Fail if the current page is a valid ATE entry */
+			KBASE_DEBUG_ASSERT(0 == (pgd_page[ofs] & 1UL));
+
+			pgd_page[ofs] = kbase_mmu_create_ate(kbdev,
+				phys, flags, MIDGARD_MMU_BOTTOMLEVEL, group_id);
+		}
+
+		vpfn += count;
+		remain -= count;
+
+		kbase_mmu_sync_pgd(kbdev,
+				kbase_dma_addr(p) + (index * sizeof(u64)),
+				count * sizeof(u64));
+
+		kunmap(p);
+		/* We have started modifying the page table.
+		 * If further pages need inserting and fail we need to undo what
+		 * has already taken place */
+		recover_required = true;
+		recover_count += count;
+	}
+	mutex_unlock(&kctx->mmu.mmu_lock);
+	kbase_mmu_flush_invalidate(kctx, vpfn, nr, false);
+	return 0;
+
+fail_unlock:
+	mutex_unlock(&kctx->mmu.mmu_lock);
+	kbase_mmu_flush_invalidate(kctx, vpfn, nr, false);
+	return err;
+}
+
+static inline void cleanup_empty_pte(struct kbase_device *kbdev,
+		struct kbase_mmu_table *mmut, u64 *pte)
+{
+	phys_addr_t tmp_pgd;
+	struct page *tmp_p;
+
+	tmp_pgd = kbdev->mmu_mode->pte_to_phy_addr(*pte);
+	tmp_p = phys_to_page(tmp_pgd);
+	kbase_mem_pool_free(&kbdev->mem_pools.small[mmut->group_id],
+		tmp_p, false);
+
+	/* If the MMU tables belong to a context then we accounted the memory
+	 * usage to that context, so decrement here.
+	 */
+	if (mmut->kctx) {
+		kbase_process_page_usage_dec(mmut->kctx, 1);
+		atomic_sub(1, &mmut->kctx->used_pages);
+	}
+	atomic_sub(1, &kbdev->memdev.used_pages);
+}
+
+u64 kbase_mmu_create_ate(struct kbase_device *const kbdev,
+	struct tagged_addr const phy, unsigned long const flags,
+	int const level, int const group_id)
+{
+	u64 entry;
+
+	kbdev->mmu_mode->entry_set_ate(&entry, phy, flags, level);
+	return kbdev->mgm_dev->ops.mgm_update_gpu_pte(kbdev->mgm_dev,
+		group_id, level, entry);
+}
+
+int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev,
+				    struct kbase_mmu_table *mmut,
+				    const u64 start_vpfn,
+				    struct tagged_addr *phys, size_t nr,
+				    unsigned long flags,
+				    int const group_id)
+{
+	phys_addr_t pgd;
+	u64 *pgd_page;
+	u64 insert_vpfn = start_vpfn;
+	size_t remain = nr;
+	int err;
+	struct kbase_mmu_mode const *mmu_mode;
+
+	/* Note that 0 is a valid start_vpfn */
+	/* 64-bit address range is the max */
+	KBASE_DEBUG_ASSERT(start_vpfn <= (U64_MAX / PAGE_SIZE));
+
+	mmu_mode = kbdev->mmu_mode;
+
+	/* Early out if there is nothing to do */
+	if (nr == 0)
+		return 0;
+
+	mutex_lock(&mmut->mmu_lock);
+
+	while (remain) {
+		unsigned int i;
+		unsigned int vindex = insert_vpfn & 0x1FF;
+		unsigned int count = KBASE_MMU_PAGE_ENTRIES - vindex;
+		struct page *p;
+		int cur_level;
+
+		if (count > remain)
+			count = remain;
+
+		if (!vindex && is_huge_head(*phys))
+			cur_level = MIDGARD_MMU_LEVEL(2);
+		else
+			cur_level = MIDGARD_MMU_BOTTOMLEVEL;
+
+		/*
+		 * Repeatedly calling mmu_get_pgd_at_level() is clearly
+		 * suboptimal. We don't have to re-parse the whole tree
+		 * each time (just cache the l0-l2 sequence).
+		 * On the other hand, it's only a gain when we map more than
+		 * 256 pages at once (on average). Do we really care?
+		 */
+		do {
+			err = mmu_get_pgd_at_level(kbdev, mmut, insert_vpfn,
+						   cur_level, &pgd);
+			if (err != -ENOMEM)
+				break;
+			/* Fill the memory pool with enough pages for
+			 * the page walk to succeed
+			 */
+			mutex_unlock(&mmut->mmu_lock);
+			err = kbase_mem_pool_grow(
+				&kbdev->mem_pools.small[mmut->group_id],
+				cur_level);
+			mutex_lock(&mmut->mmu_lock);
+		} while (!err);
+
+		if (err) {
+			dev_warn(kbdev->dev,
+				 "%s: mmu_get_bottom_pgd failure\n", __func__);
+			if (insert_vpfn != start_vpfn) {
+				/* Invalidate the pages we have partially
+				 * completed */
+				mmu_insert_pages_failure_recovery(kbdev,
+						mmut, start_vpfn, insert_vpfn);
+			}
+			goto fail_unlock;
+		}
+
+		p = pfn_to_page(PFN_DOWN(pgd));
+		pgd_page = kmap(p);
+		if (!pgd_page) {
+			dev_warn(kbdev->dev, "%s: kmap failure\n",
+				 __func__);
+			if (insert_vpfn != start_vpfn) {
+				/* Invalidate the pages we have partially
+				 * completed */
+				mmu_insert_pages_failure_recovery(kbdev,
+						mmut, start_vpfn, insert_vpfn);
+			}
+			err = -ENOMEM;
+			goto fail_unlock;
+		}
+
+		if (cur_level == MIDGARD_MMU_LEVEL(2)) {
+			int level_index = (insert_vpfn >> 9) & 0x1FF;
+			u64 *target = &pgd_page[level_index];
+
+			if (mmu_mode->pte_is_valid(*target, cur_level))
+				cleanup_empty_pte(kbdev, mmut, target);
+			*target = kbase_mmu_create_ate(kbdev, *phys, flags,
+				cur_level, group_id);
+		} else {
+			for (i = 0; i < count; i++) {
+				unsigned int ofs = vindex + i;
+				u64 *target = &pgd_page[ofs];
+
+				/* Warn if the current page is a valid ATE
+				 * entry. The page table shouldn't have anything
+				 * in the place where we are trying to put a
+				 * new entry. Modification to page table entries
+				 * should be performed with
+				 * kbase_mmu_update_pages()
+				 */
+				WARN_ON((*target & 1UL) != 0);
+
+				*target = kbase_mmu_create_ate(kbdev,
+					phys[i], flags, cur_level, group_id);
+			}
+		}
+
+		phys += count;
+		insert_vpfn += count;
+		remain -= count;
+
+		kbase_mmu_sync_pgd(kbdev,
+				kbase_dma_addr(p) + (vindex * sizeof(u64)),
+				count * sizeof(u64));
+
+		kunmap(p);
+	}
+
+	err = 0;
+
+fail_unlock:
+	mutex_unlock(&mmut->mmu_lock);
+	return err;
+}
+
+/*
+ * Map 'nr' pages pointed to by 'phys' at GPU PFN 'vpfn' for GPU address space
+ * number 'as_nr'.
+ */
+int kbase_mmu_insert_pages(struct kbase_device *kbdev,
+		struct kbase_mmu_table *mmut, u64 vpfn,
+		struct tagged_addr *phys, size_t nr,
+		unsigned long flags, int as_nr, int const group_id)
+{
+	int err;
+
+	err = kbase_mmu_insert_pages_no_flush(kbdev, mmut, vpfn,
+			phys, nr, flags, group_id);
+
+	if (mmut->kctx)
+		kbase_mmu_flush_invalidate(mmut->kctx, vpfn, nr, false);
+	else
+		kbase_mmu_flush_invalidate_no_ctx(kbdev, vpfn, nr, false, as_nr);
+
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mmu_insert_pages);
+
+/**
+ * kbase_mmu_flush_invalidate_noretain() - Flush and invalidate the GPU caches
+ * without retaining the kbase context.
+ * @kctx: The KBase context.
+ * @vpfn: The virtual page frame number to start the flush on.
+ * @nr: The number of pages to flush.
+ * @sync: Set if the operation should be synchronous or not.
+ *
+ * As per kbase_mmu_flush_invalidate but doesn't retain the kctx or do any
+ * other locking.
+ */
+static void kbase_mmu_flush_invalidate_noretain(struct kbase_context *kctx,
+		u64 vpfn, size_t nr, bool sync)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	int err;
+	u32 op;
+
+	/* Early out if there is nothing to do */
+	if (nr == 0)
+		return;
+
+	if (sync)
+		op = AS_COMMAND_FLUSH_MEM;
+	else
+		op = AS_COMMAND_FLUSH_PT;
+
+	err = kbase_mmu_hw_do_operation(kbdev,
+				&kbdev->as[kctx->as_nr],
+				vpfn, nr, op, 0);
+	if (err) {
+		/* Flush failed to complete, assume the
+		 * GPU has hung and perform a reset to
+		 * recover */
+		dev_err(kbdev->dev, "Flush for GPU page table update did not complete. Issuing GPU soft-reset to recover\n");
+
+		if (kbase_prepare_to_reset_gpu_locked(kbdev))
+			kbase_reset_gpu_locked(kbdev);
+	}
+
+#ifndef CONFIG_MALI_NO_MALI
+	/*
+	 * As this function could be called in interrupt context the sync
+	 * request can't block. Instead log the request and the next flush
+	 * request will pick it up.
+	 */
+	if ((!err) && sync &&
+			kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367))
+		atomic_set(&kctx->drain_pending, 1);
+#endif /* !CONFIG_MALI_NO_MALI */
+}
+
+/* Perform a flush/invalidate on a particular address space
+ */
+static void kbase_mmu_flush_invalidate_as(struct kbase_device *kbdev,
+		struct kbase_as *as,
+		u64 vpfn, size_t nr, bool sync, bool drain_pending)
+{
+	int err;
+	u32 op;
+
+	if (kbase_pm_context_active_handle_suspend(kbdev,
+				KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+		/* GPU is off so there's no need to perform flush/invalidate */
+		return;
+	}
+
+	/* AS transaction begin */
+	mutex_lock(&kbdev->mmu_hw_mutex);
+
+	if (sync)
+		op = AS_COMMAND_FLUSH_MEM;
+	else
+		op = AS_COMMAND_FLUSH_PT;
+
+	err = kbase_mmu_hw_do_operation(kbdev,
+			as, vpfn, nr, op, 0);
+
+	if (err) {
+		/* Flush failed to complete, assume the GPU has hung and
+		 * perform a reset to recover
+		 */
+		dev_err(kbdev->dev, "Flush for GPU page table update did not complete. Issueing GPU soft-reset to recover\n");
+
+		if (kbase_prepare_to_reset_gpu(kbdev))
+			kbase_reset_gpu(kbdev);
+	}
+
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+	/* AS transaction end */
+
+#ifndef CONFIG_MALI_NO_MALI
+	/*
+	 * The transaction lock must be dropped before here
+	 * as kbase_wait_write_flush could take it if
+	 * the GPU was powered down (static analysis doesn't
+	 * know this can't happen).
+	 */
+	drain_pending |= (!err) && sync &&
+		kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367);
+	if (drain_pending) {
+		/* Wait for GPU to flush write buffer */
+		kbase_wait_write_flush(kbdev);
+	}
+#endif /* !CONFIG_MALI_NO_MALI */
+
+	kbase_pm_context_idle(kbdev);
+}
+
+static void kbase_mmu_flush_invalidate_no_ctx(struct kbase_device *kbdev,
+		u64 vpfn, size_t nr, bool sync, int as_nr)
+{
+	/* Skip if there is nothing to do */
+	if (nr) {
+		kbase_mmu_flush_invalidate_as(kbdev, &kbdev->as[as_nr], vpfn,
+					nr, sync, false);
+	}
+}
+
+static void kbase_mmu_flush_invalidate(struct kbase_context *kctx,
+		u64 vpfn, size_t nr, bool sync)
+{
+	struct kbase_device *kbdev;
+	bool ctx_is_in_runpool;
+	bool drain_pending = false;
+
+#ifndef CONFIG_MALI_NO_MALI
+	if (atomic_xchg(&kctx->drain_pending, 0))
+		drain_pending = true;
+#endif /* !CONFIG_MALI_NO_MALI */
+
+	/* Early out if there is nothing to do */
+	if (nr == 0)
+		return;
+
+	kbdev = kctx->kbdev;
+	mutex_lock(&kbdev->js_data.queue_mutex);
+	ctx_is_in_runpool = kbasep_js_runpool_retain_ctx(kbdev, kctx);
+	mutex_unlock(&kbdev->js_data.queue_mutex);
+
+	if (ctx_is_in_runpool) {
+		KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+
+		kbase_mmu_flush_invalidate_as(kbdev, &kbdev->as[kctx->as_nr],
+				vpfn, nr, sync, drain_pending);
+
+		kbasep_js_runpool_release_ctx(kbdev, kctx);
+	}
+}
+
+void kbase_mmu_update(struct kbase_device *kbdev,
+		struct kbase_mmu_table *mmut,
+		int as_nr)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+	lockdep_assert_held(&kbdev->mmu_hw_mutex);
+	KBASE_DEBUG_ASSERT(as_nr != KBASEP_AS_NR_INVALID);
+
+	kbdev->mmu_mode->update(kbdev, mmut, as_nr);
+}
+KBASE_EXPORT_TEST_API(kbase_mmu_update);
+
+void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+	lockdep_assert_held(&kbdev->mmu_hw_mutex);
+
+	kbdev->mmu_mode->disable_as(kbdev, as_nr);
+}
+
+void kbase_mmu_disable(struct kbase_context *kctx)
+{
+	/* ASSERT that the context has a valid as_nr, which is only the case
+	 * when it's scheduled in.
+	 *
+	 * as_nr won't change because the caller has the hwaccess_lock */
+	KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	/*
+	 * The address space is being disabled, drain all knowledge of it out
+	 * from the caches as pages and page tables might be freed after this.
+	 *
+	 * The job scheduler code will already be holding the locks and context
+	 * so just do the flush.
+	 */
+	kbase_mmu_flush_invalidate_noretain(kctx, 0, ~0, true);
+
+	kctx->kbdev->mmu_mode->disable_as(kctx->kbdev, kctx->as_nr);
+}
+KBASE_EXPORT_TEST_API(kbase_mmu_disable);
+
+/*
+ * We actually only discard the ATE, and not the page table
+ * pages. There is a potential DoS here, as we'll leak memory by
+ * having PTEs that are potentially unused.  Will require physical
+ * page accounting, so MMU pages are part of the process allocation.
+ *
+ * IMPORTANT: This uses kbasep_js_runpool_release_ctx() when the context is
+ * currently scheduled into the runpool, and so potentially uses a lot of locks.
+ * These locks must be taken in the correct order with respect to others
+ * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more
+ * information.
+ */
+int kbase_mmu_teardown_pages(struct kbase_device *kbdev,
+	struct kbase_mmu_table *mmut, u64 vpfn, size_t nr, int as_nr)
+{
+	phys_addr_t pgd;
+	size_t requested_nr = nr;
+	struct kbase_mmu_mode const *mmu_mode;
+	int err = -EFAULT;
+
+	if (0 == nr) {
+		/* early out if nothing to do */
+		return 0;
+	}
+
+	mutex_lock(&mmut->mmu_lock);
+
+	mmu_mode = kbdev->mmu_mode;
+
+	while (nr) {
+		unsigned int i;
+		unsigned int index = vpfn & 0x1FF;
+		unsigned int count = KBASE_MMU_PAGE_ENTRIES - index;
+		unsigned int pcount;
+		int level;
+		u64 *page;
+
+		if (count > nr)
+			count = nr;
+
+		/* need to check if this is a 2MB or a 4kB page */
+		pgd = mmut->pgd;
+
+		for (level = MIDGARD_MMU_TOPLEVEL;
+				level <= MIDGARD_MMU_BOTTOMLEVEL; level++) {
+			phys_addr_t next_pgd;
+
+			index = (vpfn >> ((3 - level) * 9)) & 0x1FF;
+			page = kmap(phys_to_page(pgd));
+			if (mmu_mode->ate_is_valid(page[index], level))
+				break; /* keep the mapping */
+			else if (!mmu_mode->pte_is_valid(page[index], level)) {
+				/* nothing here, advance */
+				switch (level) {
+				case MIDGARD_MMU_LEVEL(0):
+					count = 134217728;
+					break;
+				case MIDGARD_MMU_LEVEL(1):
+					count = 262144;
+					break;
+				case MIDGARD_MMU_LEVEL(2):
+					count = 512;
+					break;
+				case MIDGARD_MMU_LEVEL(3):
+					count = 1;
+					break;
+				}
+				if (count > nr)
+					count = nr;
+				goto next;
+			}
+			next_pgd = mmu_mode->pte_to_phy_addr(page[index]);
+			kunmap(phys_to_page(pgd));
+			pgd = next_pgd;
+		}
+
+		switch (level) {
+		case MIDGARD_MMU_LEVEL(0):
+		case MIDGARD_MMU_LEVEL(1):
+			dev_warn(kbdev->dev,
+				 "%s: No support for ATEs at level %d\n",
+				 __func__, level);
+			kunmap(phys_to_page(pgd));
+			goto out;
+		case MIDGARD_MMU_LEVEL(2):
+			/* can only teardown if count >= 512 */
+			if (count >= 512) {
+				pcount = 1;
+			} else {
+				dev_warn(kbdev->dev,
+					 "%s: limiting teardown as it tries to do a partial 2MB teardown, need 512, but have %d to tear down\n",
+					 __func__, count);
+				pcount = 0;
+			}
+			break;
+		case MIDGARD_MMU_BOTTOMLEVEL:
+			/* page count is the same as the logical count */
+			pcount = count;
+			break;
+		default:
+			dev_err(kbdev->dev,
+				"%s: found non-mapped memory, early out\n",
+				__func__);
+			vpfn += count;
+			nr -= count;
+			continue;
+		}
+
+		/* Invalidate the entries we added */
+		for (i = 0; i < pcount; i++)
+			mmu_mode->entry_invalidate(&page[index + i]);
+
+		kbase_mmu_sync_pgd(kbdev,
+				   kbase_dma_addr(phys_to_page(pgd)) +
+				   8 * index, 8*pcount);
+
+next:
+		kunmap(phys_to_page(pgd));
+		vpfn += count;
+		nr -= count;
+	}
+	err = 0;
+out:
+	mutex_unlock(&mmut->mmu_lock);
+
+	if (mmut->kctx)
+		kbase_mmu_flush_invalidate(mmut->kctx, vpfn, requested_nr, true);
+	else
+		kbase_mmu_flush_invalidate_no_ctx(kbdev, vpfn, requested_nr, true, as_nr);
+
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mmu_teardown_pages);
+
+/**
+ * kbase_mmu_update_pages_no_flush() - Update page table entries on the GPU
+ *
+ * This will update page table entries that already exist on the GPU based on
+ * the new flags that are passed. It is used as a response to the changes of
+ * the memory attributes
+ *
+ * The caller is responsible for validating the memory attributes
+ *
+ * @kctx:  Kbase context
+ * @vpfn:  Virtual PFN (Page Frame Number) of the first page to update
+ * @phys:  Tagged physical addresses of the physical pages to replace the
+ *         current mappings
+ * @nr:    Number of pages to update
+ * @flags: Flags
+ * @group_id: The physical memory group in which the page was allocated.
+ *            Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
+ */
+static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
+					struct tagged_addr *phys, size_t nr,
+					unsigned long flags, int const group_id)
+{
+	phys_addr_t pgd;
+	u64 *pgd_page;
+	int err;
+	struct kbase_device *kbdev;
+
+	KBASE_DEBUG_ASSERT(NULL != kctx);
+	KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
+
+	/* Early out if there is nothing to do */
+	if (nr == 0)
+		return 0;
+
+	mutex_lock(&kctx->mmu.mmu_lock);
+
+	kbdev = kctx->kbdev;
+
+	while (nr) {
+		unsigned int i;
+		unsigned int index = vpfn & 0x1FF;
+		size_t count = KBASE_MMU_PAGE_ENTRIES - index;
+		struct page *p;
+
+		if (count > nr)
+			count = nr;
+
+		do {
+			err = mmu_get_bottom_pgd(kbdev, &kctx->mmu,
+					vpfn, &pgd);
+			if (err != -ENOMEM)
+				break;
+			/* Fill the memory pool with enough pages for
+			 * the page walk to succeed
+			 */
+			mutex_unlock(&kctx->mmu.mmu_lock);
+			err = kbase_mem_pool_grow(
+				&kbdev->mem_pools.small[
+					kctx->mmu.group_id],
+				MIDGARD_MMU_BOTTOMLEVEL);
+			mutex_lock(&kctx->mmu.mmu_lock);
+		} while (!err);
+		if (err) {
+			dev_warn(kbdev->dev,
+				 "mmu_get_bottom_pgd failure\n");
+			goto fail_unlock;
+		}
+
+		p = pfn_to_page(PFN_DOWN(pgd));
+		pgd_page = kmap(p);
+		if (!pgd_page) {
+			dev_warn(kbdev->dev, "kmap failure\n");
+			err = -ENOMEM;
+			goto fail_unlock;
+		}
+
+		for (i = 0; i < count; i++)
+			pgd_page[index + i] = kbase_mmu_create_ate(kbdev,
+				phys[i], flags, MIDGARD_MMU_BOTTOMLEVEL,
+				group_id);
+
+		phys += count;
+		vpfn += count;
+		nr -= count;
+
+		kbase_mmu_sync_pgd(kbdev,
+				kbase_dma_addr(p) + (index * sizeof(u64)),
+				count * sizeof(u64));
+
+		kunmap(pfn_to_page(PFN_DOWN(pgd)));
+	}
+
+	mutex_unlock(&kctx->mmu.mmu_lock);
+	return 0;
+
+fail_unlock:
+	mutex_unlock(&kctx->mmu.mmu_lock);
+	return err;
+}
+
+int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn,
+			   struct tagged_addr *phys, size_t nr,
+			   unsigned long flags, int const group_id)
+{
+	int err;
+
+	err = kbase_mmu_update_pages_no_flush(kctx, vpfn, phys, nr, flags,
+		group_id);
+	kbase_mmu_flush_invalidate(kctx, vpfn, nr, true);
+	return err;
+}
+
+static void mmu_teardown_level(struct kbase_device *kbdev,
+		struct kbase_mmu_table *mmut, phys_addr_t pgd,
+		int level, u64 *pgd_page_buffer)
+{
+	phys_addr_t target_pgd;
+	struct page *p;
+	u64 *pgd_page;
+	int i;
+	struct kbase_mmu_mode const *mmu_mode;
+
+	lockdep_assert_held(&mmut->mmu_lock);
+
+	pgd_page = kmap_atomic(pfn_to_page(PFN_DOWN(pgd)));
+	/* kmap_atomic should NEVER fail. */
+	KBASE_DEBUG_ASSERT(NULL != pgd_page);
+	/* Copy the page to our preallocated buffer so that we can minimize
+	 * kmap_atomic usage */
+	memcpy(pgd_page_buffer, pgd_page, PAGE_SIZE);
+	kunmap_atomic(pgd_page);
+	pgd_page = pgd_page_buffer;
+
+	mmu_mode = kbdev->mmu_mode;
+
+	for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) {
+		target_pgd = mmu_mode->pte_to_phy_addr(pgd_page[i]);
+
+		if (target_pgd) {
+			if (mmu_mode->pte_is_valid(pgd_page[i], level)) {
+				mmu_teardown_level(kbdev, mmut,
+						   target_pgd,
+						   level + 1,
+						   pgd_page_buffer +
+						   (PAGE_SIZE / sizeof(u64)));
+			}
+		}
+	}
+
+	p = pfn_to_page(PFN_DOWN(pgd));
+
+	kbase_mem_pool_free(&kbdev->mem_pools.small[mmut->group_id],
+		p, true);
+
+	atomic_sub(1, &kbdev->memdev.used_pages);
+
+	/* If MMU tables belong to a context then pages will have been accounted
+	 * against it, so we must decrement the usage counts here.
+	 */
+	if (mmut->kctx) {
+		kbase_process_page_usage_dec(mmut->kctx, 1);
+		atomic_sub(1, &mmut->kctx->used_pages);
+	}
+}
+
+int kbase_mmu_init(struct kbase_device *const kbdev,
+	struct kbase_mmu_table *const mmut, struct kbase_context *const kctx,
+	int const group_id)
+{
+	if (WARN_ON(group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS) ||
+	    WARN_ON(group_id < 0))
+		return -EINVAL;
+
+	mmut->group_id = group_id;
+	mutex_init(&mmut->mmu_lock);
+	mmut->kctx = kctx;
+
+	/* Preallocate MMU depth of four pages for mmu_teardown_level to use */
+	mmut->mmu_teardown_pages = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
+
+	if (mmut->mmu_teardown_pages == NULL)
+		return -ENOMEM;
+
+	mmut->pgd = 0;
+	/* We allocate pages into the kbdev memory pool, then
+	 * kbase_mmu_alloc_pgd will allocate out of that pool. This is done to
+	 * avoid allocations from the kernel happening with the lock held.
+	 */
+	while (!mmut->pgd) {
+		int err;
+
+		err = kbase_mem_pool_grow(
+			&kbdev->mem_pools.small[mmut->group_id],
+			MIDGARD_MMU_BOTTOMLEVEL);
+		if (err) {
+			kbase_mmu_term(kbdev, mmut);
+			return -ENOMEM;
+		}
+
+		mutex_lock(&mmut->mmu_lock);
+		mmut->pgd = kbase_mmu_alloc_pgd(kbdev, mmut);
+		mutex_unlock(&mmut->mmu_lock);
+	}
+
+	return 0;
+}
+
+void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut)
+{
+	if (mmut->pgd) {
+		mutex_lock(&mmut->mmu_lock);
+		mmu_teardown_level(kbdev, mmut, mmut->pgd, MIDGARD_MMU_TOPLEVEL,
+				mmut->mmu_teardown_pages);
+		mutex_unlock(&mmut->mmu_lock);
+
+		if (mmut->kctx)
+			KBASE_TLSTREAM_AUX_PAGESALLOC(kbdev, mmut->kctx->id, 0);
+	}
+
+	kfree(mmut->mmu_teardown_pages);
+	mutex_destroy(&mmut->mmu_lock);
+}
+
+static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, int level, char ** const buffer, size_t *size_left)
+{
+	phys_addr_t target_pgd;
+	u64 *pgd_page;
+	int i;
+	size_t size = KBASE_MMU_PAGE_ENTRIES * sizeof(u64) + sizeof(u64);
+	size_t dump_size;
+	struct kbase_device *kbdev;
+	struct kbase_mmu_mode const *mmu_mode;
+
+	KBASE_DEBUG_ASSERT(NULL != kctx);
+	lockdep_assert_held(&kctx->mmu.mmu_lock);
+
+	kbdev = kctx->kbdev;
+	mmu_mode = kbdev->mmu_mode;
+
+	pgd_page = kmap(pfn_to_page(PFN_DOWN(pgd)));
+	if (!pgd_page) {
+		dev_warn(kbdev->dev, "%s: kmap failure\n", __func__);
+		return 0;
+	}
+
+	if (*size_left >= size) {
+		/* A modified physical address that contains the page table level */
+		u64 m_pgd = pgd | level;
+
+		/* Put the modified physical address in the output buffer */
+		memcpy(*buffer, &m_pgd, sizeof(m_pgd));
+		*buffer += sizeof(m_pgd);
+
+		/* Followed by the page table itself */
+		memcpy(*buffer, pgd_page, sizeof(u64) * KBASE_MMU_PAGE_ENTRIES);
+		*buffer += sizeof(u64) * KBASE_MMU_PAGE_ENTRIES;
+
+		*size_left -= size;
+	}
+
+	if (level < MIDGARD_MMU_BOTTOMLEVEL) {
+		for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) {
+			if (mmu_mode->pte_is_valid(pgd_page[i], level)) {
+				target_pgd = mmu_mode->pte_to_phy_addr(
+						pgd_page[i]);
+
+				dump_size = kbasep_mmu_dump_level(kctx,
+						target_pgd, level + 1,
+						buffer, size_left);
+				if (!dump_size) {
+					kunmap(pfn_to_page(PFN_DOWN(pgd)));
+					return 0;
+				}
+				size += dump_size;
+			}
+		}
+	}
+
+	kunmap(pfn_to_page(PFN_DOWN(pgd)));
+
+	return size;
+}
+
+void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages)
+{
+	void *kaddr;
+	size_t size_left;
+
+	KBASE_DEBUG_ASSERT(kctx);
+
+	if (0 == nr_pages) {
+		/* can't dump in a 0 sized buffer, early out */
+		return NULL;
+	}
+
+	size_left = nr_pages * PAGE_SIZE;
+
+	KBASE_DEBUG_ASSERT(0 != size_left);
+	kaddr = vmalloc_user(size_left);
+
+	mutex_lock(&kctx->mmu.mmu_lock);
+
+	if (kaddr) {
+		u64 end_marker = 0xFFULL;
+		char *buffer;
+		char *mmu_dump_buffer;
+		u64 config[3];
+		size_t dump_size, size = 0;
+		struct kbase_mmu_setup as_setup;
+
+		buffer = (char *)kaddr;
+		mmu_dump_buffer = buffer;
+
+		kctx->kbdev->mmu_mode->get_as_setup(&kctx->mmu,
+				&as_setup);
+		config[0] = as_setup.transtab;
+		config[1] = as_setup.memattr;
+		config[2] = as_setup.transcfg;
+		memcpy(buffer, &config, sizeof(config));
+		mmu_dump_buffer += sizeof(config);
+		size_left -= sizeof(config);
+		size += sizeof(config);
+
+		dump_size = kbasep_mmu_dump_level(kctx,
+				kctx->mmu.pgd,
+				MIDGARD_MMU_TOPLEVEL,
+				&mmu_dump_buffer,
+				&size_left);
+
+		if (!dump_size)
+			goto fail_free;
+
+		size += dump_size;
+
+		/* Add on the size for the end marker */
+		size += sizeof(u64);
+
+		if (size > (nr_pages * PAGE_SIZE)) {
+			/* The buffer isn't big enough - free the memory and return failure */
+			goto fail_free;
+		}
+
+		/* Add the end marker */
+		memcpy(mmu_dump_buffer, &end_marker, sizeof(u64));
+	}
+
+	mutex_unlock(&kctx->mmu.mmu_lock);
+	return kaddr;
+
+fail_free:
+	vfree(kaddr);
+	mutex_unlock(&kctx->mmu.mmu_lock);
+	return NULL;
+}
+KBASE_EXPORT_TEST_API(kbase_mmu_dump);
+
+void bus_fault_worker(struct work_struct *data)
+{
+	struct kbase_as *faulting_as;
+	int as_no;
+	struct kbase_context *kctx;
+	struct kbase_device *kbdev;
+	struct kbase_fault *fault;
+	bool reset_status = false;
+
+	faulting_as = container_of(data, struct kbase_as, work_busfault);
+	fault = &faulting_as->bf_data;
+
+	/* Ensure that any pending page fault worker has completed */
+	flush_work(&faulting_as->work_pagefault);
+
+	as_no = faulting_as->number;
+
+	kbdev = container_of(faulting_as, struct kbase_device, as[as_no]);
+
+	/* Grab the context, already refcounted in kbase_mmu_interrupt() on
+	 * flagging of the bus-fault. Therefore, it cannot be scheduled out of
+	 * this AS until we explicitly release it
+	 */
+	kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no);
+	if (WARN_ON(!kctx)) {
+		atomic_dec(&kbdev->faults_pending);
+		return;
+	}
+
+	if (unlikely(fault->protected_mode)) {
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Permission failure", fault);
+		kbase_mmu_hw_clear_fault(kbdev, faulting_as,
+				KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+		kbasep_js_runpool_release_ctx(kbdev, kctx);
+		atomic_dec(&kbdev->faults_pending);
+		return;
+
+	}
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
+		/* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode.
+		 * We start the reset before switching to UNMAPPED to ensure that unrelated jobs
+		 * are evicted from the GPU before the switch.
+		 */
+		dev_err(kbdev->dev, "GPU bus error occurred. For this GPU version we now soft-reset as part of bus error recovery\n");
+		reset_status = kbase_prepare_to_reset_gpu(kbdev);
+	}
+	/* NOTE: If GPU already powered off for suspend, we don't need to switch to unmapped */
+	if (!kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+		unsigned long flags;
+
+		/* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */
+		/* AS transaction begin */
+		mutex_lock(&kbdev->mmu_hw_mutex);
+
+		/* Set the MMU into unmapped mode */
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		kbase_mmu_disable(kctx);
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+		mutex_unlock(&kbdev->mmu_hw_mutex);
+		/* AS transaction end */
+
+		kbase_mmu_hw_clear_fault(kbdev, faulting_as,
+					 KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+		kbase_mmu_hw_enable_fault(kbdev, faulting_as,
+					 KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+
+		kbase_pm_context_idle(kbdev);
+	}
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245) && reset_status)
+		kbase_reset_gpu(kbdev);
+
+	kbasep_js_runpool_release_ctx(kbdev, kctx);
+
+	atomic_dec(&kbdev->faults_pending);
+}
+
+const char *kbase_exception_name(struct kbase_device *kbdev, u32 exception_code)
+{
+	const char *e;
+
+	switch (exception_code) {
+		/* Non-Fault Status code */
+	case 0x00:
+		e = "NOT_STARTED/IDLE/OK";
+		break;
+	case 0x01:
+		e = "DONE";
+		break;
+	case 0x02:
+		e = "INTERRUPTED";
+		break;
+	case 0x03:
+		e = "STOPPED";
+		break;
+	case 0x04:
+		e = "TERMINATED";
+		break;
+	case 0x08:
+		e = "ACTIVE";
+		break;
+		/* Job exceptions */
+	case 0x40:
+		e = "JOB_CONFIG_FAULT";
+		break;
+	case 0x41:
+		e = "JOB_POWER_FAULT";
+		break;
+	case 0x42:
+		e = "JOB_READ_FAULT";
+		break;
+	case 0x43:
+		e = "JOB_WRITE_FAULT";
+		break;
+	case 0x44:
+		e = "JOB_AFFINITY_FAULT";
+		break;
+	case 0x48:
+		e = "JOB_BUS_FAULT";
+		break;
+	case 0x50:
+		e = "INSTR_INVALID_PC";
+		break;
+	case 0x51:
+		e = "INSTR_INVALID_ENC";
+		break;
+	case 0x52:
+		e = "INSTR_TYPE_MISMATCH";
+		break;
+	case 0x53:
+		e = "INSTR_OPERAND_FAULT";
+		break;
+	case 0x54:
+		e = "INSTR_TLS_FAULT";
+		break;
+	case 0x55:
+		e = "INSTR_BARRIER_FAULT";
+		break;
+	case 0x56:
+		e = "INSTR_ALIGN_FAULT";
+		break;
+	case 0x58:
+		e = "DATA_INVALID_FAULT";
+		break;
+	case 0x59:
+		e = "TILE_RANGE_FAULT";
+		break;
+	case 0x5A:
+		e = "ADDR_RANGE_FAULT";
+		break;
+	case 0x60:
+		e = "OUT_OF_MEMORY";
+		break;
+		/* GPU exceptions */
+	case 0x80:
+		e = "DELAYED_BUS_FAULT";
+		break;
+	case 0x88:
+		e = "SHAREABILITY_FAULT";
+		break;
+		/* MMU exceptions */
+	case 0xC0:
+	case 0xC1:
+	case 0xC2:
+	case 0xC3:
+	case 0xC4:
+	case 0xC5:
+	case 0xC6:
+	case 0xC7:
+		e = "TRANSLATION_FAULT";
+		break;
+	case 0xC8:
+		e = "PERMISSION_FAULT";
+		break;
+	case 0xC9:
+	case 0xCA:
+	case 0xCB:
+	case 0xCC:
+	case 0xCD:
+	case 0xCE:
+	case 0xCF:
+		if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+			e = "PERMISSION_FAULT";
+		else
+			e = "UNKNOWN";
+		break;
+	case 0xD0:
+	case 0xD1:
+	case 0xD2:
+	case 0xD3:
+	case 0xD4:
+	case 0xD5:
+	case 0xD6:
+	case 0xD7:
+		e = "TRANSTAB_BUS_FAULT";
+		break;
+	case 0xD8:
+		e = "ACCESS_FLAG";
+		break;
+	case 0xD9:
+	case 0xDA:
+	case 0xDB:
+	case 0xDC:
+	case 0xDD:
+	case 0xDE:
+	case 0xDF:
+		if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+			e = "ACCESS_FLAG";
+		else
+			e = "UNKNOWN";
+		break;
+	case 0xE0:
+	case 0xE1:
+	case 0xE2:
+	case 0xE3:
+	case 0xE4:
+	case 0xE5:
+	case 0xE6:
+	case 0xE7:
+		if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+			e = "ADDRESS_SIZE_FAULT";
+		else
+			e = "UNKNOWN";
+		break;
+	case 0xE8:
+	case 0xE9:
+	case 0xEA:
+	case 0xEB:
+	case 0xEC:
+	case 0xED:
+	case 0xEE:
+	case 0xEF:
+		if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+			e = "MEMORY_ATTRIBUTES_FAULT";
+		else
+			e = "UNKNOWN";
+		break;
+	default:
+		e = "UNKNOWN";
+		break;
+	};
+
+	return e;
+}
+
+static const char *access_type_name(struct kbase_device *kbdev,
+		u32 fault_status)
+{
+	switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
+	case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
+		if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+			return "ATOMIC";
+		else
+			return "UNKNOWN";
+	case AS_FAULTSTATUS_ACCESS_TYPE_READ:
+		return "READ";
+	case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
+		return "WRITE";
+	case AS_FAULTSTATUS_ACCESS_TYPE_EX:
+		return "EXECUTE";
+	default:
+		WARN_ON(1);
+		return NULL;
+	}
+}
+
+
+/**
+ * The caller must ensure it's retained the ctx to prevent it from being scheduled out whilst it's being worked on.
+ */
+static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
+		struct kbase_as *as, const char *reason_str,
+		struct kbase_fault *fault)
+{
+	unsigned long flags;
+	int exception_type;
+	int access_type;
+	int source_id;
+	int as_no;
+	struct kbase_device *kbdev;
+	struct kbasep_js_device_data *js_devdata;
+
+	bool reset_status = false;
+
+	as_no = as->number;
+	kbdev = kctx->kbdev;
+	js_devdata = &kbdev->js_data;
+
+	/* ASSERT that the context won't leave the runpool */
+	KBASE_DEBUG_ASSERT(atomic_read(&kctx->refcount) > 0);
+
+	/* decode the fault status */
+	exception_type = fault->status & 0xFF;
+	access_type = (fault->status >> 8) & 0x3;
+	source_id = (fault->status >> 16);
+
+	/* terminal fault, print info about the fault */
+	dev_err(kbdev->dev,
+		"Unhandled Page fault in AS%d at VA 0x%016llX\n"
+		"Reason: %s\n"
+		"raw fault status: 0x%X\n"
+		"decoded fault status: %s\n"
+		"exception type 0x%X: %s\n"
+		"access type 0x%X: %s\n"
+		"source id 0x%X\n"
+		"pid: %d\n",
+		as_no, fault->addr,
+		reason_str,
+		fault->status,
+		(fault->status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
+		exception_type, kbase_exception_name(kbdev, exception_type),
+		access_type, access_type_name(kbdev, fault->status),
+		source_id,
+		kctx->pid);
+
+	/* hardware counters dump fault handling */
+	if ((kbdev->hwcnt.kctx) && (kbdev->hwcnt.kctx->as_nr == as_no) &&
+			(kbdev->hwcnt.backend.state ==
+						KBASE_INSTR_STATE_DUMPING)) {
+		if ((fault->addr >= kbdev->hwcnt.addr) &&
+				(fault->addr < (kbdev->hwcnt.addr +
+					kbdev->hwcnt.addr_bytes)))
+			kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_FAULT;
+	}
+
+	/* Stop the kctx from submitting more jobs and cause it to be scheduled
+	 * out/rescheduled - this will occur on releasing the context's refcount */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbasep_js_clear_submit_allowed(js_devdata, kctx);
+
+	/* Kill any running jobs from the context. Submit is disallowed, so no more jobs from this
+	 * context can appear in the job slots from this point on */
+	kbase_backend_jm_kill_running_jobs_from_kctx(kctx);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	/* AS transaction begin */
+	mutex_lock(&kbdev->mmu_hw_mutex);
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
+		/* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode.
+		 * We start the reset before switching to UNMAPPED to ensure that unrelated jobs
+		 * are evicted from the GPU before the switch.
+		 */
+		dev_err(kbdev->dev, "Unhandled page fault. For this GPU version we now soft-reset the GPU as part of page fault recovery.");
+		reset_status = kbase_prepare_to_reset_gpu(kbdev);
+	}
+	/* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_mmu_disable(kctx);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+
+
+	/* AS transaction end */
+	/* Clear down the fault */
+	kbase_mmu_hw_clear_fault(kbdev, as,
+			KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+	kbase_mmu_hw_enable_fault(kbdev, as,
+			KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245) && reset_status)
+		kbase_reset_gpu(kbdev);
+}
+
+void kbasep_as_do_poke(struct work_struct *work)
+{
+	struct kbase_as *as;
+	struct kbase_device *kbdev;
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(work);
+	as = container_of(work, struct kbase_as, poke_work);
+	kbdev = container_of(as, struct kbase_device, as[as->number]);
+	KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT);
+
+	/* GPU power will already be active by virtue of the caller holding a JS
+	 * reference on the address space, and will not release it until this worker
+	 * has finished */
+
+	/* Further to the comment above, we know that while this function is running
+	 * the AS will not be released as before the atom is released this workqueue
+	 * is flushed (in kbase_as_poking_timer_release_atom)
+	 */
+
+	/* AS transaction begin */
+	mutex_lock(&kbdev->mmu_hw_mutex);
+	/* Force a uTLB invalidate */
+	kbase_mmu_hw_do_operation(kbdev, as, 0, 0,
+				  AS_COMMAND_UNLOCK, 0);
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+	/* AS transaction end */
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	if (as->poke_refcount &&
+		!(as->poke_state & KBASE_AS_POKE_STATE_KILLING_POKE)) {
+		/* Only queue up the timer if we need it, and we're not trying to kill it */
+		hrtimer_start(&as->poke_timer, HR_TIMER_DELAY_MSEC(5), HRTIMER_MODE_REL);
+	}
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer)
+{
+	struct kbase_as *as;
+	int queue_work_ret;
+
+	KBASE_DEBUG_ASSERT(NULL != timer);
+	as = container_of(timer, struct kbase_as, poke_timer);
+	KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT);
+
+	queue_work_ret = queue_work(as->poke_wq, &as->poke_work);
+	KBASE_DEBUG_ASSERT(queue_work_ret);
+	return HRTIMER_NORESTART;
+}
+
+/**
+ * Retain the poking timer on an atom's context (if the atom hasn't already
+ * done so), and start the timer (if it's not already started).
+ *
+ * This must only be called on a context that's scheduled in, and an atom
+ * that's running on the GPU.
+ *
+ * The caller must hold hwaccess_lock
+ *
+ * This can be called safely from atomic context
+ */
+void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	struct kbase_as *as;
+
+	KBASE_DEBUG_ASSERT(kbdev);
+	KBASE_DEBUG_ASSERT(kctx);
+	KBASE_DEBUG_ASSERT(katom);
+	KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (katom->poking)
+		return;
+
+	katom->poking = 1;
+
+	/* It's safe to work on the as/as_nr without an explicit reference,
+	 * because the caller holds the hwaccess_lock, and the atom itself
+	 * was also running and had already taken a reference  */
+	as = &kbdev->as[kctx->as_nr];
+
+	if (++(as->poke_refcount) == 1) {
+		/* First refcount for poke needed: check if not already in flight */
+		if (!as->poke_state) {
+			/* need to start poking */
+			as->poke_state |= KBASE_AS_POKE_STATE_IN_FLIGHT;
+			queue_work(as->poke_wq, &as->poke_work);
+		}
+	}
+}
+
+/**
+ * If an atom holds a poking timer, release it and wait for it to finish
+ *
+ * This must only be called on a context that's scheduled in, and an atom
+ * that still has a JS reference on the context
+ *
+ * This must \b not be called from atomic context, since it can sleep.
+ */
+void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	struct kbase_as *as;
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(kbdev);
+	KBASE_DEBUG_ASSERT(kctx);
+	KBASE_DEBUG_ASSERT(katom);
+	KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+
+	if (!katom->poking)
+		return;
+
+	as = &kbdev->as[kctx->as_nr];
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	KBASE_DEBUG_ASSERT(as->poke_refcount > 0);
+	KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT);
+
+	if (--(as->poke_refcount) == 0) {
+		as->poke_state |= KBASE_AS_POKE_STATE_KILLING_POKE;
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+		hrtimer_cancel(&as->poke_timer);
+		flush_workqueue(as->poke_wq);
+
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+		/* Re-check whether it's still needed */
+		if (as->poke_refcount) {
+			int queue_work_ret;
+			/* Poking still needed:
+			 * - Another retain will not be starting the timer or queueing work,
+			 * because it's still marked as in-flight
+			 * - The hrtimer has finished, and has not started a new timer or
+			 * queued work because it's been marked as killing
+			 *
+			 * So whatever happens now, just queue the work again */
+			as->poke_state &= ~((kbase_as_poke_state)KBASE_AS_POKE_STATE_KILLING_POKE);
+			queue_work_ret = queue_work(as->poke_wq, &as->poke_work);
+			KBASE_DEBUG_ASSERT(queue_work_ret);
+		} else {
+			/* It isn't - so mark it as not in flight, and not killing */
+			as->poke_state = 0u;
+
+			/* The poke associated with the atom has now finished. If this is
+			 * also the last atom on the context, then we can guarentee no more
+			 * pokes (and thus no more poking register accesses) will occur on
+			 * the context until new atoms are run */
+		}
+	}
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	katom->poking = 0;
+}
+
+void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
+		struct kbase_context *kctx, struct kbase_as *as,
+		struct kbase_fault *fault)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (!kctx) {
+		dev_warn(kbdev->dev, "%s in AS%d at 0x%016llx with no context present! Spurious IRQ or SW Design Error?\n",
+				kbase_as_has_bus_fault(as, fault) ?
+						"Bus error" : "Page fault",
+				as->number, fault->addr);
+
+		/* Since no ctx was found, the MMU must be disabled. */
+		WARN_ON(as->current_setup.transtab);
+
+		if (kbase_as_has_bus_fault(as, fault)) {
+			kbase_mmu_hw_clear_fault(kbdev, as,
+					KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+			kbase_mmu_hw_enable_fault(kbdev, as,
+					KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+		} else if (kbase_as_has_page_fault(as, fault)) {
+			kbase_mmu_hw_clear_fault(kbdev, as,
+					KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+			kbase_mmu_hw_enable_fault(kbdev, as,
+					KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+		}
+
+		if (kbase_as_has_bus_fault(as, fault) &&
+			    kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
+			bool reset_status;
+			/*
+			 * Reset the GPU, like in bus_fault_worker, in case an
+			 * earlier error hasn't been properly cleared by this
+			 * point.
+			 */
+			dev_err(kbdev->dev, "GPU bus error occurred. For this GPU version we now soft-reset as part of bus error recovery\n");
+			reset_status = kbase_prepare_to_reset_gpu_locked(kbdev);
+			if (reset_status)
+				kbase_reset_gpu_locked(kbdev);
+		}
+
+		return;
+	}
+
+	if (kbase_as_has_bus_fault(as, fault)) {
+		struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+		/*
+		 * hw counters dumping in progress, signal the
+		 * other thread that it failed
+		 */
+		if ((kbdev->hwcnt.kctx == kctx) &&
+		    (kbdev->hwcnt.backend.state ==
+					KBASE_INSTR_STATE_DUMPING))
+			kbdev->hwcnt.backend.state =
+						KBASE_INSTR_STATE_FAULT;
+
+		/*
+		 * Stop the kctx from submitting more jobs and cause it
+		 * to be scheduled out/rescheduled when all references
+		 * to it are released
+		 */
+		kbasep_js_clear_submit_allowed(js_devdata, kctx);
+
+		if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+			dev_warn(kbdev->dev,
+					"Bus error in AS%d at VA=0x%016llx, IPA=0x%016llx\n",
+					as->number, fault->addr,
+					fault->extra_addr);
+		else
+			dev_warn(kbdev->dev, "Bus error in AS%d at 0x%016llx\n",
+					as->number, fault->addr);
+
+		/*
+		 * We need to switch to UNMAPPED mode - but we do this in a
+		 * worker so that we can sleep
+		 */
+		WARN_ON(!queue_work(as->pf_wq, &as->work_busfault));
+		atomic_inc(&kbdev->faults_pending);
+	} else {
+		WARN_ON(!queue_work(as->pf_wq, &as->work_pagefault));
+		atomic_inc(&kbdev->faults_pending);
+	}
+}
+
+void kbase_flush_mmu_wqs(struct kbase_device *kbdev)
+{
+	int i;
+
+	for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+		struct kbase_as *as = &kbdev->as[i];
+
+		flush_workqueue(as->pf_wq);
+	}
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h b/drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h
new file mode 100644
index 0000000..f49a1d4
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h
@@ -0,0 +1,124 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015, 2018-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * @file
+ * Interface file for accessing MMU hardware functionality
+ */
+
+/**
+ * @page mali_kbase_mmu_hw_page MMU hardware interface
+ *
+ * @section mali_kbase_mmu_hw_intro_sec Introduction
+ * This module provides an abstraction for accessing the functionality provided
+ * by the midgard MMU and thus allows all MMU HW access to be contained within
+ * one common place and allows for different backends (implementations) to
+ * be provided.
+ */
+
+#ifndef _KBASE_MMU_HW_H_
+#define _KBASE_MMU_HW_H_
+
+/* Forward declarations */
+struct kbase_device;
+struct kbase_as;
+struct kbase_context;
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup mali_kbase_mmu_hw  MMU access APIs
+ * @{
+ */
+
+/** @brief MMU fault type descriptor.
+ */
+enum kbase_mmu_fault_type {
+	KBASE_MMU_FAULT_TYPE_UNKNOWN = 0,
+	KBASE_MMU_FAULT_TYPE_PAGE,
+	KBASE_MMU_FAULT_TYPE_BUS,
+	KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED,
+	KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED
+};
+
+/** @brief Configure an address space for use.
+ *
+ * Configure the MMU using the address space details setup in the
+ * @ref kbase_context structure.
+ *
+ * @param[in]  kbdev          kbase device to configure.
+ * @param[in]  as             address space to configure.
+ */
+void kbase_mmu_hw_configure(struct kbase_device *kbdev,
+		struct kbase_as *as);
+
+/** @brief Issue an operation to the MMU.
+ *
+ * Issue an operation (MMU invalidate, MMU flush, etc) on the address space that
+ * is associated with the provided @ref kbase_context over the specified range
+ *
+ * @param[in]  kbdev         kbase device to issue the MMU operation on.
+ * @param[in]  as            address space to issue the MMU operation on.
+ * @param[in]  vpfn          MMU Virtual Page Frame Number to start the
+ *                           operation on.
+ * @param[in]  nr            Number of pages to work on.
+ * @param[in]  type          Operation type (written to ASn_COMMAND).
+ * @param[in]  handling_irq  Is this operation being called during the handling
+ *                           of an interrupt?
+ *
+ * @return Zero if the operation was successful, non-zero otherwise.
+ */
+int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
+		u64 vpfn, u32 nr, u32 type,
+		unsigned int handling_irq);
+
+/** @brief Clear a fault that has been previously reported by the MMU.
+ *
+ * Clear a bus error or page fault that has been reported by the MMU.
+ *
+ * @param[in]  kbdev         kbase device to  clear the fault from.
+ * @param[in]  as            address space to  clear the fault from.
+ * @param[in]  type          The type of fault that needs to be cleared.
+ */
+void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
+		enum kbase_mmu_fault_type type);
+
+/** @brief Enable fault that has been previously reported by the MMU.
+ *
+ * After a page fault or bus error has been reported by the MMU these
+ * will be disabled. After these are handled this function needs to be
+ * called to enable the page fault or bus error fault again.
+ *
+ * @param[in]  kbdev         kbase device to again enable the fault from.
+ * @param[in]  as            address space to again enable the fault from.
+ * @param[in]  type          The type of fault that needs to be enabled again.
+ */
+void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
+		enum kbase_mmu_fault_type type);
+
+/** @} *//* end group mali_kbase_mmu_hw */
+/** @} *//* end group base_kbase_api */
+
+#endif	/* _KBASE_MMU_HW_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_aarch64.c b/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_aarch64.c
new file mode 100644
index 0000000..7b9cc0c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_aarch64.c
@@ -0,0 +1,223 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2014, 2016-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+#include "mali_kbase.h"
+#include "mali_midg_regmap.h"
+#include "mali_kbase_defs.h"
+
+#define ENTRY_TYPE_MASK     3ULL
+/* For valid ATEs bit 1 = ((level == 3) ? 1 : 0).
+ * Valid ATE entries at level 3 are flagged with the value 3.
+ * Valid ATE entries at level 0-2 are flagged with the value 1.
+ */
+#define ENTRY_IS_ATE_L3		3ULL
+#define ENTRY_IS_ATE_L02	1ULL
+#define ENTRY_IS_INVAL		2ULL
+#define ENTRY_IS_PTE		3ULL
+
+#define ENTRY_ATTR_BITS (7ULL << 2)	/* bits 4:2 */
+#define ENTRY_ACCESS_RW (1ULL << 6)     /* bits 6:7 */
+#define ENTRY_ACCESS_RO (3ULL << 6)
+#define ENTRY_SHARE_BITS (3ULL << 8)	/* bits 9:8 */
+#define ENTRY_ACCESS_BIT (1ULL << 10)
+#define ENTRY_NX_BIT (1ULL << 54)
+
+/* Helper Function to perform assignment of page table entries, to
+ * ensure the use of strd, which is required on LPAE systems.
+ */
+static inline void page_table_entry_set(u64 *pte, u64 phy)
+{
+#if KERNEL_VERSION(3, 18, 13) <= LINUX_VERSION_CODE
+	WRITE_ONCE(*pte, phy);
+#else
+#ifdef CONFIG_64BIT
+	barrier();
+	*pte = phy;
+	barrier();
+#elif defined(CONFIG_ARM)
+	barrier();
+	asm volatile("ldrd r0, [%1]\n\t"
+		     "strd r0, %0\n\t"
+		     : "=m" (*pte)
+		     : "r" (&phy)
+		     : "r0", "r1");
+	barrier();
+#else
+#error "64-bit atomic write must be implemented for your architecture"
+#endif
+#endif
+}
+
+static void mmu_get_as_setup(struct kbase_mmu_table *mmut,
+		struct kbase_mmu_setup * const setup)
+{
+	/* Set up the required caching policies at the correct indices
+	 * in the memattr register.
+	 */
+	setup->memattr =
+		(AS_MEMATTR_IMPL_DEF_CACHE_POLICY <<
+			(AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
+		(AS_MEMATTR_FORCE_TO_CACHE_ALL    <<
+			(AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) |
+		(AS_MEMATTR_WRITE_ALLOC           <<
+			(AS_MEMATTR_INDEX_WRITE_ALLOC * 8)) |
+		(AS_MEMATTR_AARCH64_OUTER_IMPL_DEF   <<
+			(AS_MEMATTR_INDEX_OUTER_IMPL_DEF * 8)) |
+		(AS_MEMATTR_AARCH64_OUTER_WA         <<
+			(AS_MEMATTR_INDEX_OUTER_WA * 8)) |
+		(AS_MEMATTR_AARCH64_NON_CACHEABLE    <<
+			(AS_MEMATTR_INDEX_NON_CACHEABLE * 8));
+
+	setup->transtab = (u64)mmut->pgd & AS_TRANSTAB_BASE_MASK;
+	setup->transcfg = AS_TRANSCFG_ADRMODE_AARCH64_4K;
+}
+
+static void mmu_update(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
+		int as_nr)
+{
+	struct kbase_as *as;
+	struct kbase_mmu_setup *current_setup;
+
+	if (WARN_ON(as_nr == KBASEP_AS_NR_INVALID))
+		return;
+
+	as = &kbdev->as[as_nr];
+	current_setup = &as->current_setup;
+
+	mmu_get_as_setup(mmut, current_setup);
+
+	/* Apply the address space setting */
+	kbase_mmu_hw_configure(kbdev, as);
+}
+
+static void mmu_disable_as(struct kbase_device *kbdev, int as_nr)
+{
+	struct kbase_as * const as = &kbdev->as[as_nr];
+	struct kbase_mmu_setup * const current_setup = &as->current_setup;
+
+	current_setup->transtab = 0ULL;
+	current_setup->transcfg = AS_TRANSCFG_ADRMODE_UNMAPPED;
+
+	/* Apply the address space setting */
+	kbase_mmu_hw_configure(kbdev, as);
+}
+
+static phys_addr_t pte_to_phy_addr(u64 entry)
+{
+	if (!(entry & 1))
+		return 0;
+
+	return entry & ~0xFFF;
+}
+
+static int ate_is_valid(u64 ate, int const level)
+{
+	if (level == MIDGARD_MMU_BOTTOMLEVEL)
+		return ((ate & ENTRY_TYPE_MASK) == ENTRY_IS_ATE_L3);
+	else
+		return ((ate & ENTRY_TYPE_MASK) == ENTRY_IS_ATE_L02);
+}
+
+static int pte_is_valid(u64 pte, int const level)
+{
+	/* PTEs cannot exist at the bottom level */
+	if (level == MIDGARD_MMU_BOTTOMLEVEL)
+		return false;
+	return ((pte & ENTRY_TYPE_MASK) == ENTRY_IS_PTE);
+}
+
+/*
+ * Map KBASE_REG flags to MMU flags
+ */
+static u64 get_mmu_flags(unsigned long flags)
+{
+	u64 mmu_flags;
+
+	/* store mem_attr index as 4:2 (macro called ensures 3 bits already) */
+	mmu_flags = KBASE_REG_MEMATTR_VALUE(flags) << 2;
+
+	/* Set access flags - note that AArch64 stage 1 does not support
+	 * write-only access, so we use read/write instead
+	 */
+	if (flags & KBASE_REG_GPU_WR)
+		mmu_flags |= ENTRY_ACCESS_RW;
+	else if (flags & KBASE_REG_GPU_RD)
+		mmu_flags |= ENTRY_ACCESS_RO;
+
+	/* nx if requested */
+	mmu_flags |= (flags & KBASE_REG_GPU_NX) ? ENTRY_NX_BIT : 0;
+
+	if (flags & KBASE_REG_SHARE_BOTH) {
+		/* inner and outer shareable */
+		mmu_flags |= SHARE_BOTH_BITS;
+	} else if (flags & KBASE_REG_SHARE_IN) {
+		/* inner shareable coherency */
+		mmu_flags |= SHARE_INNER_BITS;
+	}
+
+	return mmu_flags;
+}
+
+static void entry_set_ate(u64 *entry,
+		struct tagged_addr phy,
+		unsigned long flags,
+		int const level)
+{
+	if (level == MIDGARD_MMU_BOTTOMLEVEL)
+		page_table_entry_set(entry, as_phys_addr_t(phy) |
+				get_mmu_flags(flags) |
+				ENTRY_ACCESS_BIT | ENTRY_IS_ATE_L3);
+	else
+		page_table_entry_set(entry, as_phys_addr_t(phy) |
+				get_mmu_flags(flags) |
+				ENTRY_ACCESS_BIT | ENTRY_IS_ATE_L02);
+}
+
+static void entry_set_pte(u64 *entry, phys_addr_t phy)
+{
+	page_table_entry_set(entry, (phy & PAGE_MASK) |
+			ENTRY_ACCESS_BIT | ENTRY_IS_PTE);
+}
+
+static void entry_invalidate(u64 *entry)
+{
+	page_table_entry_set(entry, ENTRY_IS_INVAL);
+}
+
+static struct kbase_mmu_mode const aarch64_mode = {
+	.update = mmu_update,
+	.get_as_setup = mmu_get_as_setup,
+	.disable_as = mmu_disable_as,
+	.pte_to_phy_addr = pte_to_phy_addr,
+	.ate_is_valid = ate_is_valid,
+	.pte_is_valid = pte_is_valid,
+	.entry_set_ate = entry_set_ate,
+	.entry_set_pte = entry_set_pte,
+	.entry_invalidate = entry_invalidate,
+	.flags = KBASE_MMU_MODE_HAS_NON_CACHEABLE
+};
+
+struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void)
+{
+	return &aarch64_mode;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_lpae.c b/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_lpae.c
new file mode 100644
index 0000000..7ec90cf
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_lpae.c
@@ -0,0 +1,214 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+#include "mali_kbase.h"
+#include "mali_midg_regmap.h"
+#include "mali_kbase_defs.h"
+
+#define ENTRY_TYPE_MASK     3ULL
+#define ENTRY_IS_ATE        1ULL
+#define ENTRY_IS_INVAL      2ULL
+#define ENTRY_IS_PTE        3ULL
+
+#define ENTRY_ATTR_BITS (7ULL << 2)	/* bits 4:2 */
+#define ENTRY_RD_BIT (1ULL << 6)
+#define ENTRY_WR_BIT (1ULL << 7)
+#define ENTRY_SHARE_BITS (3ULL << 8)	/* bits 9:8 */
+#define ENTRY_ACCESS_BIT (1ULL << 10)
+#define ENTRY_NX_BIT (1ULL << 54)
+
+#define ENTRY_FLAGS_MASK (ENTRY_ATTR_BITS | ENTRY_RD_BIT | ENTRY_WR_BIT | \
+		ENTRY_SHARE_BITS | ENTRY_ACCESS_BIT | ENTRY_NX_BIT)
+
+/* Helper Function to perform assignment of page table entries, to
+ * ensure the use of strd, which is required on LPAE systems.
+ */
+static inline void page_table_entry_set(u64 *pte, u64 phy)
+{
+#if KERNEL_VERSION(3, 18, 13) <= LINUX_VERSION_CODE
+	WRITE_ONCE(*pte, phy);
+#else
+#ifdef CONFIG_64BIT
+	barrier();
+	*pte = phy;
+	barrier();
+#elif defined(CONFIG_ARM)
+	barrier();
+	asm volatile("ldrd r0, [%1]\n\t"
+		     "strd r0, %0\n\t"
+		     : "=m" (*pte)
+		     : "r" (&phy)
+		     : "r0", "r1");
+	barrier();
+#else
+#error "64-bit atomic write must be implemented for your architecture"
+#endif
+#endif
+}
+
+static void mmu_get_as_setup(struct kbase_mmu_table *mmut,
+		struct kbase_mmu_setup * const setup)
+{
+	/* Set up the required caching policies at the correct indices
+	 * in the memattr register. */
+	setup->memattr =
+		(AS_MEMATTR_LPAE_IMPL_DEF_CACHE_POLICY <<
+		(AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
+		(AS_MEMATTR_LPAE_FORCE_TO_CACHE_ALL    <<
+		(AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8))    |
+		(AS_MEMATTR_LPAE_WRITE_ALLOC           <<
+		(AS_MEMATTR_INDEX_WRITE_ALLOC * 8))           |
+		(AS_MEMATTR_LPAE_OUTER_IMPL_DEF        <<
+		(AS_MEMATTR_INDEX_OUTER_IMPL_DEF * 8))        |
+		(AS_MEMATTR_LPAE_OUTER_WA              <<
+		(AS_MEMATTR_INDEX_OUTER_WA * 8))              |
+		0; /* The other indices are unused for now */
+
+	setup->transtab = ((u64)mmut->pgd &
+		((0xFFFFFFFFULL << 32) | AS_TRANSTAB_LPAE_ADDR_SPACE_MASK)) |
+		AS_TRANSTAB_LPAE_ADRMODE_TABLE |
+		AS_TRANSTAB_LPAE_READ_INNER;
+
+	setup->transcfg = 0;
+}
+
+static void mmu_update(struct kbase_device *kbdev,
+		struct kbase_mmu_table *mmut,
+		int as_nr)
+{
+	struct kbase_as *as;
+	struct kbase_mmu_setup *current_setup;
+
+	if (WARN_ON(as_nr == KBASEP_AS_NR_INVALID))
+		return;
+
+	as = &kbdev->as[as_nr];
+	current_setup = &as->current_setup;
+
+	mmu_get_as_setup(mmut, current_setup);
+
+	/* Apply the address space setting */
+	kbase_mmu_hw_configure(kbdev, as);
+}
+
+static void mmu_disable_as(struct kbase_device *kbdev, int as_nr)
+{
+	struct kbase_as * const as = &kbdev->as[as_nr];
+	struct kbase_mmu_setup * const current_setup = &as->current_setup;
+
+	current_setup->transtab = AS_TRANSTAB_LPAE_ADRMODE_UNMAPPED;
+
+	/* Apply the address space setting */
+	kbase_mmu_hw_configure(kbdev, as);
+}
+
+static phys_addr_t pte_to_phy_addr(u64 entry)
+{
+	if (!(entry & 1))
+		return 0;
+
+	return entry & ~0xFFF;
+}
+
+static int ate_is_valid(u64 ate, int const level)
+{
+	return ((ate & ENTRY_TYPE_MASK) == ENTRY_IS_ATE);
+}
+
+static int pte_is_valid(u64 pte, int const level)
+{
+	return ((pte & ENTRY_TYPE_MASK) == ENTRY_IS_PTE);
+}
+
+/*
+ * Map KBASE_REG flags to MMU flags
+ */
+static u64 get_mmu_flags(unsigned long flags)
+{
+	u64 mmu_flags;
+	unsigned long memattr_idx;
+
+	memattr_idx = KBASE_REG_MEMATTR_VALUE(flags);
+	if (WARN(memattr_idx == AS_MEMATTR_INDEX_NON_CACHEABLE,
+			"Legacy Mode MMU cannot honor GPU non-cachable memory, will use default instead\n"))
+		memattr_idx = AS_MEMATTR_INDEX_DEFAULT;
+	/* store mem_attr index as 4:2, noting that:
+	 * - macro called above ensures 3 bits already
+	 * - all AS_MEMATTR_INDEX_<...> macros only use 3 bits
+	 */
+	mmu_flags = memattr_idx << 2;
+
+	/* write perm if requested */
+	mmu_flags |= (flags & KBASE_REG_GPU_WR) ? ENTRY_WR_BIT : 0;
+	/* read perm if requested */
+	mmu_flags |= (flags & KBASE_REG_GPU_RD) ? ENTRY_RD_BIT : 0;
+	/* nx if requested */
+	mmu_flags |= (flags & KBASE_REG_GPU_NX) ? ENTRY_NX_BIT : 0;
+
+	if (flags & KBASE_REG_SHARE_BOTH) {
+		/* inner and outer shareable */
+		mmu_flags |= SHARE_BOTH_BITS;
+	} else if (flags & KBASE_REG_SHARE_IN) {
+		/* inner shareable coherency */
+		mmu_flags |= SHARE_INNER_BITS;
+	}
+
+	return mmu_flags;
+}
+
+static void entry_set_ate(u64 *entry,
+		struct tagged_addr phy,
+		unsigned long flags,
+		int const level)
+{
+	page_table_entry_set(entry, as_phys_addr_t(phy) | get_mmu_flags(flags) |
+			     ENTRY_IS_ATE);
+}
+
+static void entry_set_pte(u64 *entry, phys_addr_t phy)
+{
+	page_table_entry_set(entry, (phy & ~0xFFF) | ENTRY_IS_PTE);
+}
+
+static void entry_invalidate(u64 *entry)
+{
+	page_table_entry_set(entry, ENTRY_IS_INVAL);
+}
+
+static struct kbase_mmu_mode const lpae_mode = {
+	.update = mmu_update,
+	.get_as_setup = mmu_get_as_setup,
+	.disable_as = mmu_disable_as,
+	.pte_to_phy_addr = pte_to_phy_addr,
+	.ate_is_valid = ate_is_valid,
+	.pte_is_valid = pte_is_valid,
+	.entry_set_ate = entry_set_ate,
+	.entry_set_pte = entry_set_pte,
+	.entry_invalidate = entry_invalidate,
+	.flags = 0
+};
+
+struct kbase_mmu_mode const *kbase_mmu_mode_get_lpae(void)
+{
+	return &lpae_mode;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_native_mgm.c b/drivers/gpu/arm/midgard/mali_kbase_native_mgm.c
new file mode 100644
index 0000000..38ae46e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_native_mgm.c
@@ -0,0 +1,153 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/memory_group_manager.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_native_mgm.h>
+
+/**
+ * kbase_native_mgm_alloc - Native physical memory allocation method
+ *
+ * @mgm_dev:  The memory group manager the request is being made through.
+ * @group_id: A physical memory group ID, which must be valid but is not used.
+ *            Its valid range is 0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1.
+ * @gfp_mask: Bitmask of Get Free Page flags affecting allocator behavior.
+ * @order:    Page order for physical page size (order=0 means 4 KiB,
+ *            order=9 means 2 MiB).
+ *
+ * Delegates all memory allocation requests to the kernel's alloc_pages
+ * function.
+ *
+ * Return: Pointer to allocated page, or NULL if allocation failed.
+ */
+static struct page *kbase_native_mgm_alloc(
+	struct memory_group_manager_device *mgm_dev, int group_id,
+	gfp_t gfp_mask, unsigned int order)
+{
+	/*
+	 * Check that the base and the mgm defines, from separate header files,
+	 * for the max number of memory groups are compatible.
+	 */
+	BUILD_BUG_ON(BASE_MEM_GROUP_COUNT != MEMORY_GROUP_MANAGER_NR_GROUPS);
+	/*
+	 * Check that the mask used for storing the memory group ID is big
+	 * enough for the largest possible memory group ID.
+	 */
+	BUILD_BUG_ON((BASEP_CONTEXT_MMU_GROUP_ID_MASK
+				>> BASEP_CONTEXT_MMU_GROUP_ID_SHIFT)
+			< (BASE_MEM_GROUP_COUNT - 1));
+
+	CSTD_UNUSED(mgm_dev);
+	CSTD_UNUSED(group_id);
+
+	return alloc_pages(gfp_mask, order);
+}
+
+/**
+ * kbase_native_mgm_free - Native physical memory freeing method
+ *
+ * @mgm_dev:  The memory group manager the request is being made through.
+ * @group_id: A physical memory group ID, which must be valid but is not used.
+ *            Its valid range is 0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1.
+ * @page:     Address of the struct associated with a page of physical
+ *            memory that was allocated by calling kbase_native_mgm_alloc
+ *            with the same argument values.
+ * @order:    Page order for physical page size (order=0 means 4 KiB,
+ *            order=9 means 2 MiB).
+ *
+ * Delegates all memory freeing requests to the kernel's __free_pages function.
+ */
+static void kbase_native_mgm_free(struct memory_group_manager_device *mgm_dev,
+	int group_id, struct page *page, unsigned int order)
+{
+	CSTD_UNUSED(mgm_dev);
+	CSTD_UNUSED(group_id);
+
+	__free_pages(page, order);
+}
+
+/**
+ * kbase_native_mgm_vmf_insert_pfn_prot - Native method to map a page on the CPU
+ *
+ * @mgm_dev:  The memory group manager the request is being made through.
+ * @group_id: A physical memory group ID, which must be valid but is not used.
+ *            Its valid range is 0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1.
+ * @vma:      The virtual memory area to insert the page into.
+ * @addr:     An address contained in @vma to assign to the inserted page.
+ * @pfn:      The kernel Page Frame Number to insert at @addr in @vma.
+ * @pgprot:   Protection flags for the inserted page.
+ *
+ * Called from a CPU virtual memory page fault handler. Delegates all memory
+ * mapping requests to the kernel's vmf_insert_pfn_prot function.
+ *
+ * Return: Type of fault that occurred or VM_FAULT_NOPAGE if the page table
+ *         entry was successfully installed.
+ */
+static vm_fault_t kbase_native_mgm_vmf_insert_pfn_prot(
+		struct memory_group_manager_device *mgm_dev, int group_id,
+		struct vm_area_struct *vma, unsigned long addr,
+		unsigned long pfn, pgprot_t pgprot)
+{
+	CSTD_UNUSED(mgm_dev);
+	CSTD_UNUSED(group_id);
+
+	return vmf_insert_pfn_prot(vma, addr, pfn, pgprot);
+}
+
+/**
+ * kbase_native_mgm_update_gpu_pte - Native method to modify a GPU page table
+ *                                   entry
+ *
+ * @mgm_dev:   The memory group manager the request is being made through.
+ * @group_id:  A physical memory group ID, which must be valid but is not used.
+ *             Its valid range is 0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1.
+ * @mmu_level: The level of the MMU page table where the page is getting mapped.
+ * @pte:       The prepared page table entry.
+ *
+ * This function simply returns the @pte without modification.
+ *
+ * Return: A GPU page table entry to be stored in a page table.
+ */
+static u64
+kbase_native_mgm_update_gpu_pte(struct memory_group_manager_device *mgm_dev,
+			      int group_id, int mmu_level, u64 pte)
+{
+	CSTD_UNUSED(mgm_dev);
+	CSTD_UNUSED(group_id);
+	CSTD_UNUSED(mmu_level);
+
+	return pte;
+}
+
+struct memory_group_manager_device kbase_native_mgm_dev = {
+	.ops = {
+		.mgm_alloc_page = kbase_native_mgm_alloc,
+		.mgm_free_page = kbase_native_mgm_free,
+		.mgm_get_import_memory_id = NULL,
+		.mgm_vmf_insert_pfn_prot = kbase_native_mgm_vmf_insert_pfn_prot,
+		.mgm_update_gpu_pte = kbase_native_mgm_update_gpu_pte,
+	},
+	.data = NULL
+};
diff --git a/drivers/gpu/arm/midgard/mali_kbase_native_mgm.h b/drivers/gpu/arm/midgard/mali_kbase_native_mgm.h
new file mode 100644
index 0000000..431b1f4
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_native_mgm.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_NATIVE_MGM_H_
+#define _KBASE_NATIVE_MGM_H_
+
+#include <linux/memory_group_manager.h>
+
+/**
+ * kbase_native_mgm_dev - Native memory group manager device
+ *
+ * An implementation of the memory group manager interface that is intended for
+ * internal use when no platform-specific memory group manager is available.
+ *
+ * It ignores the specified group ID and delegates to the kernel's physical
+ * memory allocation and freeing functions.
+ */
+extern struct memory_group_manager_device kbase_native_mgm_dev;
+
+#endif /* _KBASE_NATIVE_MGM_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_platform_fake.c b/drivers/gpu/arm/midgard/mali_kbase_platform_fake.c
new file mode 100644
index 0000000..fbb090e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_platform_fake.c
@@ -0,0 +1,124 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+
+
+/*
+ * This file is included only for type definitions and functions belonging to
+ * specific platform folders. Do not add dependencies with symbols that are
+ * defined somewhere else.
+ */
+#include <mali_kbase_config.h>
+
+#define PLATFORM_CONFIG_RESOURCE_COUNT 4
+#define PLATFORM_CONFIG_IRQ_RES_COUNT  3
+
+static struct platform_device *mali_device;
+
+#ifndef CONFIG_OF
+/**
+ * @brief Convert data in struct kbase_io_resources struct to Linux-specific resources
+ *
+ * Function converts data in struct kbase_io_resources struct to an array of Linux resource structures. Note that function
+ * assumes that size of linux_resource array is at least PLATFORM_CONFIG_RESOURCE_COUNT.
+ * Resources are put in fixed order: I/O memory region, job IRQ, MMU IRQ, GPU IRQ.
+ *
+ * @param[in]  io_resource      Input IO resource data
+ * @param[out] linux_resources  Pointer to output array of Linux resource structures
+ */
+static void kbasep_config_parse_io_resources(const struct kbase_io_resources *io_resources, struct resource *const linux_resources)
+{
+	if (!io_resources || !linux_resources) {
+		pr_err("%s: couldn't find proper resources\n", __func__);
+		return;
+	}
+
+	memset(linux_resources, 0, PLATFORM_CONFIG_RESOURCE_COUNT * sizeof(struct resource));
+
+	linux_resources[0].start = io_resources->io_memory_region.start;
+	linux_resources[0].end   = io_resources->io_memory_region.end;
+	linux_resources[0].flags = IORESOURCE_MEM;
+
+	linux_resources[1].start = io_resources->job_irq_number;
+	linux_resources[1].end   = io_resources->job_irq_number;
+	linux_resources[1].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL;
+
+	linux_resources[2].start = io_resources->mmu_irq_number;
+	linux_resources[2].end   = io_resources->mmu_irq_number;
+	linux_resources[2].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL;
+
+	linux_resources[3].start = io_resources->gpu_irq_number;
+	linux_resources[3].end   = io_resources->gpu_irq_number;
+	linux_resources[3].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL;
+}
+#endif /* CONFIG_OF */
+
+int kbase_platform_register(void)
+{
+	struct kbase_platform_config *config;
+#ifndef CONFIG_OF
+	struct resource resources[PLATFORM_CONFIG_RESOURCE_COUNT];
+#endif
+	int err;
+
+	config = kbase_get_platform_config(); /* declared in midgard/mali_kbase_config.h but defined in platform folder */
+	if (config == NULL) {
+		pr_err("%s: couldn't get platform config\n", __func__);
+		return -ENODEV;
+	}
+
+	mali_device = platform_device_alloc("mali", 0);
+	if (mali_device == NULL)
+		return -ENOMEM;
+
+#ifndef CONFIG_OF
+	kbasep_config_parse_io_resources(config->io_resources, resources);
+	err = platform_device_add_resources(mali_device, resources, PLATFORM_CONFIG_RESOURCE_COUNT);
+	if (err) {
+		platform_device_put(mali_device);
+		mali_device = NULL;
+		return err;
+	}
+#endif /* CONFIG_OF */
+
+	err = platform_device_add(mali_device);
+	if (err) {
+		platform_device_unregister(mali_device);
+		mali_device = NULL;
+		return err;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(kbase_platform_register);
+
+void kbase_platform_unregister(void)
+{
+	if (mali_device)
+		platform_device_unregister(mali_device);
+}
+EXPORT_SYMBOL(kbase_platform_unregister);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_pm.c b/drivers/gpu/arm/midgard/mali_kbase_pm.c
new file mode 100644
index 0000000..5699eb8
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_pm.c
@@ -0,0 +1,196 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_pm.c
+ * Base kernel power management APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_vinstr.h>
+#include <mali_kbase_hwcnt_context.h>
+
+#include <mali_kbase_pm.h>
+
+int kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags)
+{
+	return kbase_hwaccess_pm_powerup(kbdev, flags);
+}
+
+void kbase_pm_halt(struct kbase_device *kbdev)
+{
+	kbase_hwaccess_pm_halt(kbdev);
+}
+
+void kbase_pm_context_active(struct kbase_device *kbdev)
+{
+	(void)kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE);
+}
+
+int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+	int c;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	mutex_lock(&js_devdata->runpool_mutex);
+	mutex_lock(&kbdev->pm.lock);
+	if (kbase_pm_is_suspending(kbdev)) {
+		switch (suspend_handler) {
+		case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE:
+			if (kbdev->pm.active_count != 0)
+				break;
+			/* FALLTHROUGH */
+		case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
+			mutex_unlock(&kbdev->pm.lock);
+			mutex_unlock(&js_devdata->runpool_mutex);
+			return 1;
+
+		case KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE:
+			/* FALLTHROUGH */
+		default:
+			KBASE_DEBUG_ASSERT_MSG(false, "unreachable");
+			break;
+		}
+	}
+	c = ++kbdev->pm.active_count;
+	KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_ACTIVE, NULL, NULL, 0u, c);
+
+	if (c == 1) {
+		/* First context active: Power on the GPU and any cores requested by
+		 * the policy */
+		kbase_hwaccess_pm_gpu_active(kbdev);
+	}
+
+	mutex_unlock(&kbdev->pm.lock);
+	mutex_unlock(&js_devdata->runpool_mutex);
+
+	return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_context_active);
+
+void kbase_pm_context_idle(struct kbase_device *kbdev)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+	int c;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+
+	mutex_lock(&js_devdata->runpool_mutex);
+	mutex_lock(&kbdev->pm.lock);
+
+	c = --kbdev->pm.active_count;
+	KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c);
+
+	KBASE_DEBUG_ASSERT(c >= 0);
+
+	if (c == 0) {
+		/* Last context has gone idle */
+		kbase_hwaccess_pm_gpu_idle(kbdev);
+
+		/* Wake up anyone waiting for this to become 0 (e.g. suspend). The
+		 * waiters must synchronize with us by locking the pm.lock after
+		 * waiting.
+		 */
+		wake_up(&kbdev->pm.zero_active_count_wait);
+	}
+
+	mutex_unlock(&kbdev->pm.lock);
+	mutex_unlock(&js_devdata->runpool_mutex);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_context_idle);
+
+void kbase_pm_suspend(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	/* Suspend vinstr. This blocks until the vinstr worker and timer are
+	 * no longer running.
+	 */
+	kbase_vinstr_suspend(kbdev->vinstr_ctx);
+
+	/* Disable GPU hardware counters.
+	 * This call will block until counters are disabled.
+	 */
+	kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
+
+	mutex_lock(&kbdev->pm.lock);
+	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
+	kbdev->pm.suspending = true;
+	mutex_unlock(&kbdev->pm.lock);
+
+	/* From now on, the active count will drop towards zero. Sometimes, it'll
+	 * go up briefly before going down again. However, once it reaches zero it
+	 * will stay there - guaranteeing that we've idled all pm references */
+
+	/* Suspend job scheduler and associated components, so that it releases all
+	 * the PM active count references */
+	kbasep_js_suspend(kbdev);
+
+	/* Wait for the active count to reach zero. This is not the same as
+	 * waiting for a power down, since not all policies power down when this
+	 * reaches zero. */
+	wait_event(kbdev->pm.zero_active_count_wait, kbdev->pm.active_count == 0);
+
+	/* NOTE: We synchronize with anything that was just finishing a
+	 * kbase_pm_context_idle() call by locking the pm.lock below */
+
+	kbase_hwaccess_pm_suspend(kbdev);
+}
+
+void kbase_pm_resume(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	/* MUST happen before any pm_context_active calls occur */
+	kbase_hwaccess_pm_resume(kbdev);
+
+	/* Initial active call, to power on the GPU/cores if needed */
+	kbase_pm_context_active(kbdev);
+
+	/* Resume any blocked atoms (which may cause contexts to be scheduled in
+	 * and dependent atoms to run) */
+	kbase_resume_suspended_soft_jobs(kbdev);
+
+	/* Resume the Job Scheduler and associated components, and start running
+	 * atoms */
+	kbasep_js_resume(kbdev);
+
+	/* Matching idle call, to power off the GPU/cores if we didn't actually
+	 * need it and the policy doesn't want it on */
+	kbase_pm_context_idle(kbdev);
+
+	/* Re-enable GPU hardware counters */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	/* Resume vinstr */
+	kbase_vinstr_resume(kbdev->vinstr_ctx);
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_pm.h b/drivers/gpu/arm/midgard/mali_kbase_pm.h
new file mode 100644
index 0000000..59a0314
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_pm.h
@@ -0,0 +1,180 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2015,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_pm.h
+ * Power management API definitions
+ */
+
+#ifndef _KBASE_PM_H_
+#define _KBASE_PM_H_
+
+#include "mali_kbase_hwaccess_pm.h"
+
+#define PM_ENABLE_IRQS       0x01
+#define PM_HW_ISSUES_DETECT  0x02
+
+
+/** Initialize the power management framework.
+ *
+ * Must be called before any other power management function
+ *
+ * @param kbdev     The kbase device structure for the device (must be a valid pointer)
+ *
+ * @return 0 if the power management framework was successfully initialized.
+ */
+int kbase_pm_init(struct kbase_device *kbdev);
+
+/** Power up GPU after all modules have been initialized and interrupt handlers installed.
+ *
+ * @param kbdev     The kbase device structure for the device (must be a valid pointer)
+ *
+ * @param flags     Flags to pass on to kbase_pm_init_hw
+ *
+ * @return 0 if powerup was successful.
+ */
+int kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags);
+
+/**
+ * Halt the power management framework.
+ * Should ensure that no new interrupts are generated,
+ * but allow any currently running interrupt handlers to complete successfully.
+ * The GPU is forced off by the time this function returns, regardless of
+ * whether or not the active power policy asks for the GPU to be powered off.
+ *
+ * @param kbdev     The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_halt(struct kbase_device *kbdev);
+
+/** Terminate the power management framework.
+ *
+ * No power management functions may be called after this
+ * (except @ref kbase_pm_init)
+ *
+ * @param kbdev     The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_term(struct kbase_device *kbdev);
+
+/** Increment the count of active contexts.
+ *
+ * This function should be called when a context is about to submit a job. It informs the active power policy that the
+ * GPU is going to be in use shortly and the policy is expected to start turning on the GPU.
+ *
+ * This function will block until the GPU is available.
+ *
+ * This function ASSERTS if a suspend is occuring/has occurred whilst this is
+ * in use. Use kbase_pm_contect_active_unless_suspending() instead.
+ *
+ * @note a Suspend is only visible to Kernel threads; user-space threads in a
+ * syscall cannot witness a suspend, because they are frozen before the suspend
+ * begins.
+ *
+ * @param kbdev     The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_context_active(struct kbase_device *kbdev);
+
+
+/** Handler codes for doing kbase_pm_context_active_handle_suspend() */
+enum kbase_pm_suspend_handler {
+	/** A suspend is not expected/not possible - this is the same as
+	 * kbase_pm_context_active() */
+	KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE,
+	/** If we're suspending, fail and don't increase the active count */
+	KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE,
+	/** If we're suspending, succeed and allow the active count to increase iff
+	 * it didn't go from 0->1 (i.e., we didn't re-activate the GPU).
+	 *
+	 * This should only be used when there is a bounded time on the activation
+	 * (e.g. guarantee it's going to be idled very soon after) */
+	KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE
+};
+
+/** Suspend 'safe' variant of kbase_pm_context_active()
+ *
+ * If a suspend is in progress, this allows for various different ways of
+ * handling the suspend. Refer to @ref enum kbase_pm_suspend_handler for details.
+ *
+ * We returns a status code indicating whether we're allowed to keep the GPU
+ * active during the suspend, depending on the handler code. If the status code
+ * indicates a failure, the caller must abort whatever operation it was
+ * attempting, and potentially queue it up for after the OS has resumed.
+ *
+ * @param kbdev     The kbase device structure for the device (must be a valid pointer)
+ * @param suspend_handler The handler code for how to handle a suspend that might occur
+ * @return zero     Indicates success
+ * @return non-zero Indicates failure due to the system being suspending/suspended.
+ */
+int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler);
+
+/** Decrement the reference count of active contexts.
+ *
+ * This function should be called when a context becomes idle. After this call the GPU may be turned off by the power
+ * policy so the calling code should ensure that it does not access the GPU's registers.
+ *
+ * @param kbdev     The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_context_idle(struct kbase_device *kbdev);
+
+/* NOTE: kbase_pm_is_active() is in mali_kbase.h, because it is an inline
+ * function
+ */
+
+/**
+ * Suspend the GPU and prevent any further register accesses to it from Kernel
+ * threads.
+ *
+ * This is called in response to an OS suspend event, and calls into the various
+ * kbase components to complete the suspend.
+ *
+ * @note the mechanisms used here rely on all user-space threads being frozen
+ * by the OS before we suspend. Otherwise, an IOCTL could occur that powers up
+ * the GPU e.g. via atom submission.
+ *
+ * @param kbdev     The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_suspend(struct kbase_device *kbdev);
+
+/**
+ * Resume the GPU, allow register accesses to it, and resume running atoms on
+ * the GPU.
+ *
+ * This is called in response to an OS resume event, and calls into the various
+ * kbase components to complete the resume.
+ *
+ * @param kbdev     The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_resume(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_vsync_callback - vsync callback
+ *
+ * @buffer_updated: 1 if a new frame was displayed, 0 otherwise
+ * @data: Pointer to the kbase device as returned by kbase_find_device()
+ *
+ * Callback function used to notify the power management code that a vsync has
+ * occurred on the display.
+ */
+void kbase_pm_vsync_callback(int buffer_updated, void *data);
+
+#endif				/* _KBASE_PM_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_profiling_gator_api.h b/drivers/gpu/arm/midgard/mali_kbase_profiling_gator_api.h
new file mode 100644
index 0000000..15bca79
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_profiling_gator_api.h
@@ -0,0 +1,45 @@
+/*
+ *
+ * (C) COPYRIGHT 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * @file mali_kbase_profiling_gator_api.h
+ * Model interface
+ */
+
+#ifndef _KBASE_PROFILING_GATOR_API_H_
+#define _KBASE_PROFILING_GATOR_API_H_
+
+/*
+ * List of possible actions to be controlled by Streamline.
+ * The following numbers are used by gator to control
+ * the frame buffer dumping and s/w counter reporting.
+ */
+#define FBDUMP_CONTROL_ENABLE (1)
+#define FBDUMP_CONTROL_RATE (2)
+#define SW_COUNTER_ENABLE (3)
+#define FBDUMP_CONTROL_RESIZE_FACTOR (4)
+#define FBDUMP_CONTROL_MAX (5)
+#define FBDUMP_CONTROL_MIN FBDUMP_CONTROL_ENABLE
+
+void _mali_profiling_control(u32 action, u32 value);
+
+#endif				/* _KBASE_PROFILING_GATOR_API */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_regs_history_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_regs_history_debugfs.c
new file mode 100644
index 0000000..53d9427
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_regs_history_debugfs.c
@@ -0,0 +1,136 @@
+/*
+ *
+ * (C) COPYRIGHT 2016, 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase.h"
+
+#include "mali_kbase_regs_history_debugfs.h"
+
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_MALI_NO_MALI)
+
+#include <linux/debugfs.h>
+
+
+static int regs_history_size_get(void *data, u64 *val)
+{
+	struct kbase_io_history *const h = data;
+
+	*val = h->size;
+
+	return 0;
+}
+
+static int regs_history_size_set(void *data, u64 val)
+{
+	struct kbase_io_history *const h = data;
+
+	return kbase_io_history_resize(h, (u16)val);
+}
+
+
+DEFINE_SIMPLE_ATTRIBUTE(regs_history_size_fops,
+		regs_history_size_get,
+		regs_history_size_set,
+		"%llu\n");
+
+
+/**
+ * regs_history_show - show callback for the register access history file.
+ *
+ * @sfile: The debugfs entry
+ * @data: Data associated with the entry
+ *
+ * This function is called to dump all recent accesses to the GPU registers.
+ *
+ * @return 0 if successfully prints data in debugfs entry file, failure
+ * otherwise
+ */
+static int regs_history_show(struct seq_file *sfile, void *data)
+{
+	struct kbase_io_history *const h = sfile->private;
+	u16 i;
+	size_t iters;
+	unsigned long flags;
+
+	if (!h->enabled) {
+		seq_puts(sfile, "The register access history is disabled\n");
+		goto out;
+	}
+
+	spin_lock_irqsave(&h->lock, flags);
+
+	iters = (h->size > h->count) ? h->count : h->size;
+	seq_printf(sfile, "Last %zu register accesses of %zu total:\n", iters,
+			h->count);
+	for (i = 0; i < iters; ++i) {
+		struct kbase_io_access *io =
+			&h->buf[(h->count - iters + i) % h->size];
+		char const access = (io->addr & 1) ? 'w' : 'r';
+
+		seq_printf(sfile, "%6i: %c: reg 0x%p val %08x\n", i, access,
+				(void *)(io->addr & ~0x1), io->value);
+	}
+
+	spin_unlock_irqrestore(&h->lock, flags);
+
+out:
+	return 0;
+}
+
+
+/**
+ * regs_history_open - open operation for regs_history debugfs file
+ *
+ * @in: &struct inode pointer
+ * @file: &struct file pointer
+ *
+ * @return file descriptor
+ */
+static int regs_history_open(struct inode *in, struct file *file)
+{
+	return single_open(file, &regs_history_show, in->i_private);
+}
+
+
+static const struct file_operations regs_history_fops = {
+	.owner = THIS_MODULE,
+	.open = &regs_history_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+
+void kbasep_regs_history_debugfs_init(struct kbase_device *kbdev)
+{
+	debugfs_create_bool("regs_history_enabled", S_IRUGO | S_IWUSR,
+			kbdev->mali_debugfs_directory,
+			&kbdev->io_history.enabled);
+	debugfs_create_file("regs_history_size", S_IRUGO | S_IWUSR,
+			kbdev->mali_debugfs_directory,
+			&kbdev->io_history, &regs_history_size_fops);
+	debugfs_create_file("regs_history", S_IRUGO,
+			kbdev->mali_debugfs_directory, &kbdev->io_history,
+			&regs_history_fops);
+}
+
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_regs_history_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_regs_history_debugfs.h
new file mode 100644
index 0000000..a0078cb
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_regs_history_debugfs.h
@@ -0,0 +1,55 @@
+/*
+ *
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Header file for register access history support via debugfs
+ *
+ * This interface is made available via /sys/kernel/debug/mali#/regs_history*.
+ *
+ * Usage:
+ * - regs_history_enabled: whether recording of register accesses is enabled.
+ *   Write 'y' to enable, 'n' to disable.
+ * - regs_history_size: size of the register history buffer, must be > 0
+ * - regs_history: return the information about last accesses to the registers.
+ */
+
+#ifndef _KBASE_REGS_HISTORY_DEBUGFS_H
+#define _KBASE_REGS_HISTORY_DEBUGFS_H
+
+struct kbase_device;
+
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_MALI_NO_MALI)
+
+/**
+ * kbasep_regs_history_debugfs_init - add debugfs entries for register history
+ *
+ * @kbdev: Pointer to kbase_device containing the register history
+ */
+void kbasep_regs_history_debugfs_init(struct kbase_device *kbdev);
+
+#else /* CONFIG_DEBUG_FS */
+
+#define kbasep_regs_history_debugfs_init CSTD_NOP
+
+#endif /* CONFIG_DEBUG_FS */
+
+#endif  /*_KBASE_REGS_HISTORY_DEBUGFS_H*/
diff --git a/drivers/gpu/arm/midgard/mali_kbase_replay.c b/drivers/gpu/arm/midgard/mali_kbase_replay.c
new file mode 100644
index 0000000..92101fe
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_replay.c
@@ -0,0 +1,1156 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * @file mali_kbase_replay.c
+ * Replay soft job handlers
+ */
+
+#include <linux/dma-mapping.h>
+#include <mali_kbase_config.h>
+#include <mali_kbase.h>
+#include <mali_kbase_mem.h>
+#include <mali_kbase_mem_linux.h>
+
+#define JOB_NOT_STARTED 0
+#define JOB_TYPE_NULL      (1)
+#define JOB_TYPE_VERTEX    (5)
+#define JOB_TYPE_TILER     (7)
+#define JOB_TYPE_FUSED     (8)
+#define JOB_TYPE_FRAGMENT  (9)
+
+#define JOB_HEADER_32_FBD_OFFSET (31*4)
+#define JOB_HEADER_64_FBD_OFFSET (44*4)
+
+#define FBD_POINTER_MASK (~0x3f)
+
+#define SFBD_TILER_OFFSET (48*4)
+
+#define MFBD_TILER_OFFSET       (14*4)
+
+#define FBD_HIERARCHY_WEIGHTS 8
+#define FBD_HIERARCHY_MASK_MASK 0x1fff
+
+#define FBD_TYPE 1
+
+#define HIERARCHY_WEIGHTS 13
+
+#define JOB_HEADER_ID_MAX                 0xffff
+
+#define JOB_SOURCE_ID(status)		(((status) >> 16) & 0xFFFF)
+#define JOB_POLYGON_LIST		(0x03)
+
+struct fragment_job {
+	struct job_descriptor_header header;
+
+	u32 x[2];
+	union {
+		u64 _64;
+		u32 _32;
+	} fragment_fbd;
+};
+
+static void dump_job_head(struct kbase_context *kctx, char *head_str,
+		struct job_descriptor_header *job)
+{
+#ifdef CONFIG_MALI_DEBUG
+	dev_dbg(kctx->kbdev->dev, "%s\n", head_str);
+	dev_dbg(kctx->kbdev->dev,
+			"addr                  = %p\n"
+			"exception_status      = %x (Source ID: 0x%x Access: 0x%x Exception: 0x%x)\n"
+			"first_incomplete_task = %x\n"
+			"fault_pointer         = %llx\n"
+			"job_descriptor_size   = %x\n"
+			"job_type              = %x\n"
+			"job_barrier           = %x\n"
+			"_reserved_01          = %x\n"
+			"_reserved_02          = %x\n"
+			"_reserved_03          = %x\n"
+			"_reserved_04/05       = %x,%x\n"
+			"job_index             = %x\n"
+			"dependencies          = %x,%x\n",
+			job, job->exception_status,
+			JOB_SOURCE_ID(job->exception_status),
+			(job->exception_status >> 8) & 0x3,
+			job->exception_status  & 0xFF,
+			job->first_incomplete_task,
+			job->fault_pointer, job->job_descriptor_size,
+			job->job_type, job->job_barrier, job->_reserved_01,
+			job->_reserved_02, job->_reserved_03,
+			job->_reserved_04, job->_reserved_05,
+			job->job_index,
+			job->job_dependency_index_1,
+			job->job_dependency_index_2);
+
+	if (job->job_descriptor_size)
+		dev_dbg(kctx->kbdev->dev, "next               = %llx\n",
+				job->next_job._64);
+	else
+		dev_dbg(kctx->kbdev->dev, "next               = %x\n",
+				job->next_job._32);
+#endif
+}
+
+static int kbasep_replay_reset_sfbd(struct kbase_context *kctx,
+		u64 fbd_address, u64 tiler_heap_free,
+		u16 hierarchy_mask, u32 default_weight)
+{
+	struct {
+		u32 padding_1[1];
+		u32 flags;
+		u64 padding_2[2];
+		u64 heap_free_address;
+		u32 padding[8];
+		u32 weights[FBD_HIERARCHY_WEIGHTS];
+	} *fbd_tiler;
+	struct kbase_vmap_struct map;
+
+	dev_dbg(kctx->kbdev->dev, "fbd_address: %llx\n", fbd_address);
+
+	fbd_tiler = kbase_vmap(kctx, fbd_address + SFBD_TILER_OFFSET,
+			sizeof(*fbd_tiler), &map);
+	if (!fbd_tiler) {
+		dev_err(kctx->kbdev->dev, "kbasep_replay_reset_fbd: failed to map fbd\n");
+		return -EINVAL;
+	}
+
+#ifdef CONFIG_MALI_DEBUG
+	dev_dbg(kctx->kbdev->dev,
+		"FBD tiler:\n"
+		"flags = %x\n"
+		"heap_free_address = %llx\n",
+		fbd_tiler->flags, fbd_tiler->heap_free_address);
+#endif
+	if (hierarchy_mask) {
+		u32 weights[HIERARCHY_WEIGHTS];
+		u16 old_hierarchy_mask = fbd_tiler->flags &
+						       FBD_HIERARCHY_MASK_MASK;
+		int i, j = 0;
+
+		for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
+			if (old_hierarchy_mask & (1 << i)) {
+				KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
+				weights[i] = fbd_tiler->weights[j++];
+			} else {
+				weights[i] = default_weight;
+			}
+		}
+
+
+		dev_dbg(kctx->kbdev->dev, "Old hierarchy mask=%x  New hierarchy mask=%x\n",
+				old_hierarchy_mask, hierarchy_mask);
+
+		for (i = 0; i < HIERARCHY_WEIGHTS; i++)
+			dev_dbg(kctx->kbdev->dev, " Hierarchy weight %02d: %08x\n",
+					i, weights[i]);
+
+		j = 0;
+
+		for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
+			if (hierarchy_mask & (1 << i)) {
+				KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
+
+				dev_dbg(kctx->kbdev->dev, " Writing hierarchy level %02d (%08x) to %d\n",
+						i, weights[i], j);
+
+				fbd_tiler->weights[j++] = weights[i];
+			}
+		}
+
+		for (; j < FBD_HIERARCHY_WEIGHTS; j++)
+			fbd_tiler->weights[j] = 0;
+
+		fbd_tiler->flags = hierarchy_mask | (1 << 16);
+	}
+
+	fbd_tiler->heap_free_address = tiler_heap_free;
+
+	dev_dbg(kctx->kbdev->dev, "heap_free_address=%llx flags=%x\n",
+			fbd_tiler->heap_free_address, fbd_tiler->flags);
+
+	kbase_vunmap(kctx, &map);
+
+	return 0;
+}
+
+static int kbasep_replay_reset_mfbd(struct kbase_context *kctx,
+		u64 fbd_address, u64 tiler_heap_free,
+		u16 hierarchy_mask, u32 default_weight)
+{
+	struct kbase_vmap_struct map;
+	struct {
+		u32 padding_0;
+		u32 flags;
+		u64 padding_1[2];
+		u64 heap_free_address;
+		u64 padding_2;
+		u32 weights[FBD_HIERARCHY_WEIGHTS];
+	} *fbd_tiler;
+
+	dev_dbg(kctx->kbdev->dev, "fbd_address: %llx\n", fbd_address);
+
+	fbd_tiler = kbase_vmap(kctx, fbd_address + MFBD_TILER_OFFSET,
+			sizeof(*fbd_tiler), &map);
+	if (!fbd_tiler) {
+		dev_err(kctx->kbdev->dev,
+			       "kbasep_replay_reset_fbd: failed to map fbd\n");
+		return -EINVAL;
+	}
+
+#ifdef CONFIG_MALI_DEBUG
+	dev_dbg(kctx->kbdev->dev, "FBD tiler:\n"
+			"flags = %x\n"
+			"heap_free_address = %llx\n",
+			fbd_tiler->flags,
+			fbd_tiler->heap_free_address);
+#endif
+	if (hierarchy_mask) {
+		u32 weights[HIERARCHY_WEIGHTS];
+		u16 old_hierarchy_mask = (fbd_tiler->flags) &
+						       FBD_HIERARCHY_MASK_MASK;
+		int i, j = 0;
+
+		for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
+			if (old_hierarchy_mask & (1 << i)) {
+				KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
+				weights[i] = fbd_tiler->weights[j++];
+			} else {
+				weights[i] = default_weight;
+			}
+		}
+
+
+		dev_dbg(kctx->kbdev->dev, "Old hierarchy mask=%x  New hierarchy mask=%x\n",
+				old_hierarchy_mask, hierarchy_mask);
+
+		for (i = 0; i < HIERARCHY_WEIGHTS; i++)
+			dev_dbg(kctx->kbdev->dev, " Hierarchy weight %02d: %08x\n",
+					i, weights[i]);
+
+		j = 0;
+
+		for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
+			if (hierarchy_mask & (1 << i)) {
+				KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
+
+				dev_dbg(kctx->kbdev->dev,
+				" Writing hierarchy level %02d (%08x) to %d\n",
+							     i, weights[i], j);
+
+				fbd_tiler->weights[j++] = weights[i];
+			}
+		}
+
+		for (; j < FBD_HIERARCHY_WEIGHTS; j++)
+			fbd_tiler->weights[j] = 0;
+
+		fbd_tiler->flags = hierarchy_mask | (1 << 16);
+	}
+
+	fbd_tiler->heap_free_address = tiler_heap_free;
+
+	kbase_vunmap(kctx, &map);
+
+	return 0;
+}
+
+/**
+ * @brief Reset the status of an FBD pointed to by a tiler job
+ *
+ * This performs two functions :
+ * - Set the hierarchy mask
+ * - Reset the tiler free heap address
+ *
+ * @param[in] kctx              Context pointer
+ * @param[in] job_header        Address of job header to reset.
+ * @param[in] tiler_heap_free   The value to reset Tiler Heap Free to
+ * @param[in] hierarchy_mask    The hierarchy mask to use
+ * @param[in] default_weight    Default hierarchy weight to write when no other
+ *                              weight is given in the FBD
+ * @param[in] job_64            true if this job is using 64-bit
+ *                              descriptors
+ *
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_reset_tiler_job(struct kbase_context *kctx,
+		u64 job_header,	u64 tiler_heap_free,
+		u16 hierarchy_mask, u32 default_weight,	bool job_64)
+{
+	struct kbase_vmap_struct map;
+	u64 fbd_address;
+
+	if (job_64) {
+		u64 *job_ext;
+
+		job_ext = kbase_vmap(kctx,
+				job_header + JOB_HEADER_64_FBD_OFFSET,
+				sizeof(*job_ext), &map);
+
+		if (!job_ext) {
+			dev_err(kctx->kbdev->dev, "kbasep_replay_reset_tiler_job: failed to map jc\n");
+			return -EINVAL;
+		}
+
+		fbd_address = *job_ext;
+
+		kbase_vunmap(kctx, &map);
+	} else {
+		u32 *job_ext;
+
+		job_ext = kbase_vmap(kctx,
+				job_header + JOB_HEADER_32_FBD_OFFSET,
+				sizeof(*job_ext), &map);
+
+		if (!job_ext) {
+			dev_err(kctx->kbdev->dev, "kbasep_replay_reset_tiler_job: failed to map jc\n");
+			return -EINVAL;
+		}
+
+		fbd_address = *job_ext;
+
+		kbase_vunmap(kctx, &map);
+	}
+
+	if (fbd_address & FBD_TYPE) {
+		return kbasep_replay_reset_mfbd(kctx,
+						fbd_address & FBD_POINTER_MASK,
+						tiler_heap_free,
+						hierarchy_mask,
+						default_weight);
+	} else {
+		return kbasep_replay_reset_sfbd(kctx,
+						fbd_address & FBD_POINTER_MASK,
+						tiler_heap_free,
+						hierarchy_mask,
+						default_weight);
+	}
+}
+
+/**
+ * @brief Reset the status of a job
+ *
+ * This performs the following functions :
+ *
+ * - Reset the Job Status field of each job to NOT_STARTED.
+ * - Set the Job Type field of any Vertex Jobs to Null Job.
+ * - For any jobs using an FBD, set the Tiler Heap Free field to the value of
+ *   the tiler_heap_free parameter, and set the hierarchy level mask to the
+ *   hier_mask parameter.
+ * - Offset HW dependencies by the hw_job_id_offset parameter
+ * - Set the Perform Job Barrier flag if this job is the first in the chain
+ * - Read the address of the next job header
+ *
+ * @param[in] kctx              Context pointer
+ * @param[in,out] job_header    Address of job header to reset. Set to address
+ *                              of next job header on exit.
+ * @param[in] prev_jc           Previous job chain to link to, if this job is
+ *                              the last in the chain.
+ * @param[in] hw_job_id_offset  Offset for HW job IDs
+ * @param[in] tiler_heap_free   The value to reset Tiler Heap Free to
+ * @param[in] hierarchy_mask    The hierarchy mask to use
+ * @param[in] default_weight    Default hierarchy weight to write when no other
+ *                              weight is given in the FBD
+ * @param[in] first_in_chain    true if this job is the first in the chain
+ * @param[in] fragment_chain    true if this job is in the fragment chain
+ *
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_reset_job(struct kbase_context *kctx,
+		u64 *job_header, u64 prev_jc,
+		u64 tiler_heap_free, u16 hierarchy_mask,
+		u32 default_weight, u16 hw_job_id_offset,
+		bool first_in_chain, bool fragment_chain)
+{
+	struct fragment_job *frag_job;
+	struct job_descriptor_header *job;
+	u64 new_job_header;
+	struct kbase_vmap_struct map;
+
+	frag_job = kbase_vmap(kctx, *job_header, sizeof(*frag_job), &map);
+	if (!frag_job) {
+		dev_err(kctx->kbdev->dev,
+				 "kbasep_replay_parse_jc: failed to map jc\n");
+		return -EINVAL;
+	}
+	job = &frag_job->header;
+
+	dump_job_head(kctx, "Job header:", job);
+
+	if (job->exception_status == JOB_NOT_STARTED && !fragment_chain) {
+		dev_err(kctx->kbdev->dev, "Job already not started\n");
+		goto out_unmap;
+	}
+	job->exception_status = JOB_NOT_STARTED;
+
+	if (job->job_type == JOB_TYPE_VERTEX)
+		job->job_type = JOB_TYPE_NULL;
+
+	if (job->job_type == JOB_TYPE_FUSED) {
+		dev_err(kctx->kbdev->dev, "Fused jobs can not be replayed\n");
+		goto out_unmap;
+	}
+
+	if (first_in_chain)
+		job->job_barrier = 1;
+
+	if ((job->job_dependency_index_1 + hw_job_id_offset) >
+			JOB_HEADER_ID_MAX ||
+	    (job->job_dependency_index_2 + hw_job_id_offset) >
+			JOB_HEADER_ID_MAX ||
+	    (job->job_index + hw_job_id_offset) > JOB_HEADER_ID_MAX) {
+		dev_err(kctx->kbdev->dev,
+			     "Job indicies/dependencies out of valid range\n");
+		goto out_unmap;
+	}
+
+	if (job->job_dependency_index_1)
+		job->job_dependency_index_1 += hw_job_id_offset;
+	if (job->job_dependency_index_2)
+		job->job_dependency_index_2 += hw_job_id_offset;
+
+	job->job_index += hw_job_id_offset;
+
+	if (job->job_descriptor_size) {
+		new_job_header = job->next_job._64;
+		if (!job->next_job._64)
+			job->next_job._64 = prev_jc;
+	} else {
+		new_job_header = job->next_job._32;
+		if (!job->next_job._32)
+			job->next_job._32 = prev_jc;
+	}
+	dump_job_head(kctx, "Updated to:", job);
+
+	if (job->job_type == JOB_TYPE_TILER) {
+		bool job_64 = job->job_descriptor_size != 0;
+
+		if (kbasep_replay_reset_tiler_job(kctx, *job_header,
+				tiler_heap_free, hierarchy_mask,
+				default_weight, job_64) != 0)
+			goto out_unmap;
+
+	} else if (job->job_type == JOB_TYPE_FRAGMENT) {
+		u64 fbd_address;
+
+		if (job->job_descriptor_size)
+			fbd_address = frag_job->fragment_fbd._64;
+		else
+			fbd_address = (u64)frag_job->fragment_fbd._32;
+
+		if (fbd_address & FBD_TYPE) {
+			if (kbasep_replay_reset_mfbd(kctx,
+					fbd_address & FBD_POINTER_MASK,
+					tiler_heap_free,
+					hierarchy_mask,
+					default_weight) != 0)
+				goto out_unmap;
+		} else {
+			if (kbasep_replay_reset_sfbd(kctx,
+					fbd_address & FBD_POINTER_MASK,
+					tiler_heap_free,
+					hierarchy_mask,
+					default_weight) != 0)
+				goto out_unmap;
+		}
+	}
+
+	kbase_vunmap(kctx, &map);
+
+	*job_header = new_job_header;
+
+	return 0;
+
+out_unmap:
+	kbase_vunmap(kctx, &map);
+	return -EINVAL;
+}
+
+/**
+ * @brief Find the highest job ID in a job chain
+ *
+ * @param[in] kctx        Context pointer
+ * @param[in] jc          Job chain start address
+ * @param[out] hw_job_id  Highest job ID in chain
+ *
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_find_hw_job_id(struct kbase_context *kctx,
+		u64 jc,	u16 *hw_job_id)
+{
+	while (jc) {
+		struct job_descriptor_header *job;
+		struct kbase_vmap_struct map;
+
+		dev_dbg(kctx->kbdev->dev,
+			"kbasep_replay_find_hw_job_id: parsing jc=%llx\n", jc);
+
+		job = kbase_vmap(kctx, jc, sizeof(*job), &map);
+		if (!job) {
+			dev_err(kctx->kbdev->dev, "failed to map jc\n");
+
+			return -EINVAL;
+		}
+
+		if (job->job_index > *hw_job_id)
+			*hw_job_id = job->job_index;
+
+		if (job->job_descriptor_size)
+			jc = job->next_job._64;
+		else
+			jc = job->next_job._32;
+
+		kbase_vunmap(kctx, &map);
+	}
+
+	return 0;
+}
+
+/**
+ * @brief Reset the status of a number of jobs
+ *
+ * This function walks the provided job chain, and calls
+ * kbasep_replay_reset_job for each job. It also links the job chain to the
+ * provided previous job chain.
+ *
+ * The function will fail if any of the jobs passed already have status of
+ * NOT_STARTED.
+ *
+ * @param[in] kctx              Context pointer
+ * @param[in] jc                Job chain to be processed
+ * @param[in] prev_jc           Job chain to be added to. May be NULL
+ * @param[in] tiler_heap_free   The value to reset Tiler Heap Free to
+ * @param[in] hierarchy_mask    The hierarchy mask to use
+ * @param[in] default_weight    Default hierarchy weight to write when no other
+ *                              weight is given in the FBD
+ * @param[in] hw_job_id_offset  Offset for HW job IDs
+ * @param[in] fragment_chain    true if this chain is the fragment chain
+ *
+ * @return 0 on success, error code otherwise
+ */
+static int kbasep_replay_parse_jc(struct kbase_context *kctx,
+		u64 jc,	u64 prev_jc,
+		u64 tiler_heap_free, u16 hierarchy_mask,
+		u32 default_weight, u16 hw_job_id_offset,
+		bool fragment_chain)
+{
+	bool first_in_chain = true;
+	int nr_jobs = 0;
+
+	dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_jc: jc=%llx hw_job_id=%x\n",
+			jc, hw_job_id_offset);
+
+	while (jc) {
+		dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_jc: parsing jc=%llx\n", jc);
+
+		if (kbasep_replay_reset_job(kctx, &jc, prev_jc,
+				tiler_heap_free, hierarchy_mask,
+				default_weight, hw_job_id_offset,
+				first_in_chain, fragment_chain) != 0)
+			return -EINVAL;
+
+		first_in_chain = false;
+
+		nr_jobs++;
+		if (fragment_chain &&
+		    nr_jobs >= BASE_JD_REPLAY_F_CHAIN_JOB_LIMIT) {
+			dev_err(kctx->kbdev->dev,
+				"Exceeded maximum number of jobs in fragment chain\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * @brief Reset the status of a replay job, and set up dependencies
+ *
+ * This performs the actions to allow the replay job to be re-run following
+ * completion of the passed dependency.
+ *
+ * @param[in] katom     The atom to be reset
+ * @param[in] dep_atom  The dependency to be attached to the atom
+ */
+static void kbasep_replay_reset_softjob(struct kbase_jd_atom *katom,
+		struct kbase_jd_atom *dep_atom)
+{
+	katom->status = KBASE_JD_ATOM_STATE_QUEUED;
+	kbase_jd_katom_dep_set(&katom->dep[0], dep_atom, BASE_JD_DEP_TYPE_DATA);
+	list_add_tail(&katom->dep_item[0], &dep_atom->dep_head[0]);
+}
+
+/**
+ * @brief Allocate an unused katom
+ *
+ * This will search the provided context for an unused katom, and will mark it
+ * as KBASE_JD_ATOM_STATE_QUEUED.
+ *
+ * If no atoms are available then the function will fail.
+ *
+ * @param[in] kctx      Context pointer
+ * @return An atom ID, or -1 on failure
+ */
+static int kbasep_allocate_katom(struct kbase_context *kctx)
+{
+	struct kbase_jd_context *jctx = &kctx->jctx;
+	int i;
+
+	for (i = BASE_JD_ATOM_COUNT-1; i > 0; i--) {
+		if (jctx->atoms[i].status == KBASE_JD_ATOM_STATE_UNUSED) {
+			jctx->atoms[i].status = KBASE_JD_ATOM_STATE_QUEUED;
+			dev_dbg(kctx->kbdev->dev,
+				  "kbasep_allocate_katom: Allocated atom %d\n",
+									    i);
+			return i;
+		}
+	}
+
+	return -1;
+}
+
+/**
+ * @brief Release a katom
+ *
+ * This will mark the provided atom as available, and remove any dependencies.
+ *
+ * For use on error path.
+ *
+ * @param[in] kctx      Context pointer
+ * @param[in] atom_id   ID of atom to release
+ */
+static void kbasep_release_katom(struct kbase_context *kctx, int atom_id)
+{
+	struct kbase_jd_context *jctx = &kctx->jctx;
+
+	dev_dbg(kctx->kbdev->dev, "kbasep_release_katom: Released atom %d\n",
+			atom_id);
+
+	while (!list_empty(&jctx->atoms[atom_id].dep_head[0]))
+		list_del(jctx->atoms[atom_id].dep_head[0].next);
+
+	while (!list_empty(&jctx->atoms[atom_id].dep_head[1]))
+		list_del(jctx->atoms[atom_id].dep_head[1].next);
+
+	jctx->atoms[atom_id].status = KBASE_JD_ATOM_STATE_UNUSED;
+}
+
+static void kbasep_replay_create_atom(struct kbase_context *kctx,
+				      struct base_jd_atom_v2 *atom,
+				      int atom_nr,
+				      base_jd_prio prio)
+{
+	atom->nr_extres = 0;
+	atom->extres_list = 0;
+	atom->device_nr = 0;
+	atom->prio = prio;
+	atom->atom_number = atom_nr;
+
+	base_jd_atom_dep_set(&atom->pre_dep[0], 0, BASE_JD_DEP_TYPE_INVALID);
+	base_jd_atom_dep_set(&atom->pre_dep[1], 0, BASE_JD_DEP_TYPE_INVALID);
+
+	atom->udata.blob[0] = 0;
+	atom->udata.blob[1] = 0;
+}
+
+/**
+ * @brief Create two atoms for the purpose of replaying jobs
+ *
+ * Two atoms are allocated and created. The jc pointer is not set at this
+ * stage. The second atom has a dependency on the first. The remaining fields
+ * are set up as follows :
+ *
+ * - No external resources. Any required external resources will be held by the
+ *   replay atom.
+ * - device_nr is set to 0. This is not relevant as
+ *   BASE_JD_REQ_SPECIFIC_COHERENT_GROUP should not be set.
+ * - Priority is inherited from the replay job.
+ *
+ * @param[out] t_atom      Atom to use for tiler jobs
+ * @param[out] f_atom      Atom to use for fragment jobs
+ * @param[in]  prio        Priority of new atom (inherited from replay soft
+ *                         job)
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_create_atoms(struct kbase_context *kctx,
+		struct base_jd_atom_v2 *t_atom,
+		struct base_jd_atom_v2 *f_atom,
+		base_jd_prio prio)
+{
+	int t_atom_nr, f_atom_nr;
+
+	t_atom_nr = kbasep_allocate_katom(kctx);
+	if (t_atom_nr < 0) {
+		dev_err(kctx->kbdev->dev, "Failed to allocate katom\n");
+		return -EINVAL;
+	}
+
+	f_atom_nr = kbasep_allocate_katom(kctx);
+	if (f_atom_nr < 0) {
+		dev_err(kctx->kbdev->dev, "Failed to allocate katom\n");
+		kbasep_release_katom(kctx, t_atom_nr);
+		return -EINVAL;
+	}
+
+	kbasep_replay_create_atom(kctx, t_atom, t_atom_nr, prio);
+	kbasep_replay_create_atom(kctx, f_atom, f_atom_nr, prio);
+
+	base_jd_atom_dep_set(&f_atom->pre_dep[0], t_atom_nr,
+			     BASE_JD_DEP_TYPE_DATA);
+
+	return 0;
+}
+
+#ifdef CONFIG_MALI_DEBUG
+static void payload_dump(struct kbase_context *kctx, base_jd_replay_payload *payload)
+{
+	u64 next;
+
+	dev_dbg(kctx->kbdev->dev, "Tiler jc list :\n");
+	next = payload->tiler_jc_list;
+
+	while (next) {
+		struct kbase_vmap_struct map;
+		base_jd_replay_jc *jc_struct;
+
+		jc_struct = kbase_vmap(kctx, next, sizeof(*jc_struct), &map);
+
+		if (!jc_struct)
+			return;
+
+		dev_dbg(kctx->kbdev->dev, "* jc_struct=%p jc=%llx next=%llx\n",
+				jc_struct, jc_struct->jc, jc_struct->next);
+
+		next = jc_struct->next;
+
+		kbase_vunmap(kctx, &map);
+	}
+}
+#endif
+
+/**
+ * @brief Parse a base_jd_replay_payload provided by userspace
+ *
+ * This will read the payload from userspace, and parse the job chains.
+ *
+ * @param[in] kctx         Context pointer
+ * @param[in] replay_atom  Replay soft job atom
+ * @param[in] t_atom       Atom to use for tiler jobs
+ * @param[in] f_atom       Atom to use for fragment jobs
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_parse_payload(struct kbase_context *kctx,
+					      struct kbase_jd_atom *replay_atom,
+					      struct base_jd_atom_v2 *t_atom,
+					      struct base_jd_atom_v2 *f_atom)
+{
+	base_jd_replay_payload *payload = NULL;
+	u64 next;
+	u64 prev_jc = 0;
+	u16 hw_job_id_offset = 0;
+	int ret = -EINVAL;
+	struct kbase_vmap_struct map;
+
+	dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_payload: replay_atom->jc = %llx sizeof(payload) = %zu\n",
+			replay_atom->jc, sizeof(payload));
+
+	payload = kbase_vmap(kctx, replay_atom->jc, sizeof(*payload), &map);
+	if (!payload) {
+		dev_err(kctx->kbdev->dev, "kbasep_replay_parse_payload: failed to map payload into kernel space\n");
+		return -EINVAL;
+	}
+
+#ifdef CONFIG_MALI_DEBUG
+	dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_payload: payload=%p\n", payload);
+	dev_dbg(kctx->kbdev->dev, "Payload structure:\n"
+				  "tiler_jc_list            = %llx\n"
+				  "fragment_jc              = %llx\n"
+				  "tiler_heap_free          = %llx\n"
+				  "fragment_hierarchy_mask  = %x\n"
+				  "tiler_hierarchy_mask     = %x\n"
+				  "hierarchy_default_weight = %x\n"
+				  "tiler_core_req           = %x\n"
+				  "fragment_core_req        = %x\n",
+							payload->tiler_jc_list,
+							  payload->fragment_jc,
+						      payload->tiler_heap_free,
+					      payload->fragment_hierarchy_mask,
+						 payload->tiler_hierarchy_mask,
+					     payload->hierarchy_default_weight,
+						       payload->tiler_core_req,
+						   payload->fragment_core_req);
+	payload_dump(kctx, payload);
+#endif
+	t_atom->core_req = payload->tiler_core_req | BASEP_JD_REQ_EVENT_NEVER;
+	f_atom->core_req = payload->fragment_core_req | BASEP_JD_REQ_EVENT_NEVER;
+
+	/* Sanity check core requirements*/
+	if ((t_atom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_T ||
+	    (f_atom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_FS ||
+	     t_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES ||
+	     f_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
+
+		int t_atom_type = t_atom->core_req & BASE_JD_REQ_ATOM_TYPE & ~BASE_JD_REQ_COHERENT_GROUP;
+		int f_atom_type = f_atom->core_req & BASE_JD_REQ_ATOM_TYPE & ~BASE_JD_REQ_COHERENT_GROUP & ~BASE_JD_REQ_FS_AFBC;
+		int t_has_ex_res = t_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES;
+		int f_has_ex_res = f_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES;
+
+		if (t_atom_type != BASE_JD_REQ_T) {
+			dev_err(kctx->kbdev->dev, "Invalid core requirement: Tiler atom not a tiler job. Was: 0x%x\n Expected: 0x%x",
+			    t_atom_type, BASE_JD_REQ_T);
+		}
+		if (f_atom_type != BASE_JD_REQ_FS) {
+			dev_err(kctx->kbdev->dev, "Invalid core requirement: Fragment shader atom not a fragment shader. Was 0x%x Expected: 0x%x\n",
+			    f_atom_type, BASE_JD_REQ_FS);
+		}
+		if (t_has_ex_res) {
+			dev_err(kctx->kbdev->dev, "Invalid core requirement: Tiler atom has external resources.\n");
+		}
+		if (f_has_ex_res) {
+			dev_err(kctx->kbdev->dev, "Invalid core requirement: Fragment shader atom has external resources.\n");
+		}
+
+		goto out;
+	}
+
+	/* Process tiler job chains */
+	next = payload->tiler_jc_list;
+	if (!next) {
+		dev_err(kctx->kbdev->dev, "Invalid tiler JC list\n");
+		goto out;
+	}
+
+	while (next) {
+		base_jd_replay_jc *jc_struct;
+		struct kbase_vmap_struct jc_map;
+		u64 jc;
+
+		jc_struct = kbase_vmap(kctx, next, sizeof(*jc_struct), &jc_map);
+
+		if (!jc_struct) {
+			dev_err(kctx->kbdev->dev, "Failed to map jc struct\n");
+			goto out;
+		}
+
+		jc = jc_struct->jc;
+		next = jc_struct->next;
+		if (next)
+			jc_struct->jc = 0;
+
+		kbase_vunmap(kctx, &jc_map);
+
+		if (jc) {
+			u16 max_hw_job_id = 0;
+
+			if (kbasep_replay_find_hw_job_id(kctx, jc,
+					&max_hw_job_id) != 0)
+				goto out;
+
+			if (kbasep_replay_parse_jc(kctx, jc, prev_jc,
+					payload->tiler_heap_free,
+					payload->tiler_hierarchy_mask,
+					payload->hierarchy_default_weight,
+					hw_job_id_offset, false) != 0) {
+				goto out;
+			}
+
+			hw_job_id_offset += max_hw_job_id;
+
+			prev_jc = jc;
+		}
+	}
+	t_atom->jc = prev_jc;
+
+	/* Process fragment job chain */
+	f_atom->jc = payload->fragment_jc;
+	if (kbasep_replay_parse_jc(kctx, payload->fragment_jc, 0,
+			payload->tiler_heap_free,
+			payload->fragment_hierarchy_mask,
+			payload->hierarchy_default_weight, 0,
+			true) != 0) {
+		goto out;
+	}
+
+	if (!t_atom->jc || !f_atom->jc) {
+		dev_err(kctx->kbdev->dev, "Invalid payload\n");
+		goto out;
+	}
+
+	dev_dbg(kctx->kbdev->dev, "t_atom->jc=%llx f_atom->jc=%llx\n",
+			t_atom->jc, f_atom->jc);
+	ret = 0;
+
+out:
+	kbase_vunmap(kctx, &map);
+
+	return ret;
+}
+
+static void kbase_replay_process_worker(struct work_struct *data)
+{
+	struct kbase_jd_atom *katom;
+	struct kbase_context *kctx;
+	struct kbase_jd_context *jctx;
+	bool need_to_try_schedule_context = false;
+
+	struct base_jd_atom_v2 t_atom, f_atom;
+	struct kbase_jd_atom *t_katom, *f_katom;
+	base_jd_prio atom_prio;
+
+	katom = container_of(data, struct kbase_jd_atom, work);
+	kctx = katom->kctx;
+	jctx = &kctx->jctx;
+
+	mutex_lock(&jctx->lock);
+
+	atom_prio = kbasep_js_sched_prio_to_atom_prio(katom->sched_priority);
+
+	if (kbasep_replay_create_atoms(
+			kctx, &t_atom, &f_atom, atom_prio) != 0) {
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+		goto out;
+	}
+
+	t_katom = &jctx->atoms[t_atom.atom_number];
+	f_katom = &jctx->atoms[f_atom.atom_number];
+
+	if (kbasep_replay_parse_payload(kctx, katom, &t_atom, &f_atom) != 0) {
+		kbasep_release_katom(kctx, t_atom.atom_number);
+		kbasep_release_katom(kctx, f_atom.atom_number);
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+		goto out;
+	}
+
+	kbasep_replay_reset_softjob(katom, f_katom);
+
+	need_to_try_schedule_context |= jd_submit_atom(kctx, &t_atom, t_katom);
+	if (t_katom->event_code == BASE_JD_EVENT_JOB_INVALID) {
+		dev_err(kctx->kbdev->dev, "Replay failed to submit atom\n");
+		kbasep_release_katom(kctx, f_atom.atom_number);
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+		goto out;
+	}
+	need_to_try_schedule_context |= jd_submit_atom(kctx, &f_atom, f_katom);
+	if (f_katom->event_code == BASE_JD_EVENT_JOB_INVALID) {
+		dev_err(kctx->kbdev->dev, "Replay failed to submit atom\n");
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+		goto out;
+	}
+
+	katom->event_code = BASE_JD_EVENT_DONE;
+
+out:
+	if (katom->event_code != BASE_JD_EVENT_DONE) {
+		kbase_disjoint_state_down(kctx->kbdev);
+
+		need_to_try_schedule_context |= jd_done_nolock(katom, NULL);
+	}
+
+	if (need_to_try_schedule_context)
+		kbase_js_sched_all(kctx->kbdev);
+
+	mutex_unlock(&jctx->lock);
+}
+
+/**
+ * @brief Check job replay fault
+ *
+ * This will read the job payload, checks fault type and source, then decides
+ * whether replay is required.
+ *
+ * @param[in] katom       The atom to be processed
+ * @return  true (success) if replay required or false on failure.
+ */
+static bool kbase_replay_fault_check(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	struct device *dev = kctx->kbdev->dev;
+	base_jd_replay_payload *payload;
+	u64 job_header;
+	u64 job_loop_detect;
+	struct job_descriptor_header *job;
+	struct kbase_vmap_struct job_map;
+	struct kbase_vmap_struct map;
+	bool err = false;
+
+	/* Replay job if fault is of type BASE_JD_EVENT_JOB_WRITE_FAULT or
+	 * if force_replay is enabled.
+	 */
+	if (BASE_JD_EVENT_TERMINATED == katom->event_code) {
+		return false;
+	} else if (BASE_JD_EVENT_JOB_WRITE_FAULT == katom->event_code) {
+		return true;
+	} else if (BASE_JD_EVENT_FORCE_REPLAY == katom->event_code) {
+		katom->event_code = BASE_JD_EVENT_DATA_INVALID_FAULT;
+		return true;
+	} else if (BASE_JD_EVENT_DATA_INVALID_FAULT != katom->event_code) {
+		/* No replay for faults of type other than
+		 * BASE_JD_EVENT_DATA_INVALID_FAULT.
+		 */
+		return false;
+	}
+
+	/* Job fault is BASE_JD_EVENT_DATA_INVALID_FAULT, now scan fragment jc
+	 * to find out whether the source of exception is POLYGON_LIST. Replay
+	 * is required if the source of fault is POLYGON_LIST.
+	 */
+	payload = kbase_vmap(kctx, katom->jc, sizeof(*payload), &map);
+	if (!payload) {
+		dev_err(dev, "kbase_replay_fault_check: failed to map payload.\n");
+		return false;
+	}
+
+#ifdef CONFIG_MALI_DEBUG
+	dev_dbg(dev, "kbase_replay_fault_check: payload=%p\n", payload);
+	dev_dbg(dev, "\nPayload structure:\n"
+		     "fragment_jc              = 0x%llx\n"
+		     "fragment_hierarchy_mask  = 0x%x\n"
+		     "fragment_core_req        = 0x%x\n",
+		     payload->fragment_jc,
+		     payload->fragment_hierarchy_mask,
+		     payload->fragment_core_req);
+#endif
+	/* Process fragment job chain */
+	job_header      = (u64) payload->fragment_jc;
+	job_loop_detect = job_header;
+	while (job_header) {
+		job = kbase_vmap(kctx, job_header, sizeof(*job), &job_map);
+		if (!job) {
+			dev_err(dev, "failed to map jc\n");
+			/* unmap payload*/
+			kbase_vunmap(kctx, &map);
+			return false;
+		}
+
+
+		dump_job_head(kctx, "\njob_head structure:\n", job);
+
+		/* Replay only when the polygon list reader caused the
+		 * DATA_INVALID_FAULT */
+		if ((BASE_JD_EVENT_DATA_INVALID_FAULT == katom->event_code) &&
+		   (JOB_POLYGON_LIST == JOB_SOURCE_ID(job->exception_status))) {
+			err = true;
+			kbase_vunmap(kctx, &job_map);
+			break;
+		}
+
+		/* Move on to next fragment job in the list */
+		if (job->job_descriptor_size)
+			job_header = job->next_job._64;
+		else
+			job_header = job->next_job._32;
+
+		kbase_vunmap(kctx, &job_map);
+
+		/* Job chain loop detected */
+		if (job_header == job_loop_detect)
+			break;
+	}
+
+	/* unmap payload*/
+	kbase_vunmap(kctx, &map);
+
+	return err;
+}
+
+
+/**
+ * @brief Process a replay job
+ *
+ * Called from kbase_process_soft_job.
+ *
+ * On exit, if the job has completed, katom->event_code will have been updated.
+ * If the job has not completed, and is replaying jobs, then the atom status
+ * will have been reset to KBASE_JD_ATOM_STATE_QUEUED.
+ *
+ * @param[in] katom  The atom to be processed
+ * @return           false if the atom has completed
+ *                   true if the atom is replaying jobs
+ */
+bool kbase_replay_process(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	struct kbase_device *kbdev = kctx->kbdev;
+
+	/* Don't replay this atom if these issues are not present in the
+	 * hardware */
+	if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11020) &&
+			!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11024)) {
+		dev_dbg(kbdev->dev, "Hardware does not need replay workaround");
+
+		/* Signal failure to userspace */
+		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+
+		return false;
+	}
+
+	if (katom->event_code == BASE_JD_EVENT_DONE) {
+		dev_dbg(kbdev->dev, "Previous job succeeded - not replaying\n");
+
+		if (katom->retry_count)
+			kbase_disjoint_state_down(kbdev);
+
+		return false;
+	}
+
+	if (kbase_ctx_flag(kctx, KCTX_DYING)) {
+		dev_dbg(kbdev->dev, "Not replaying; context is dying\n");
+
+		if (katom->retry_count)
+			kbase_disjoint_state_down(kbdev);
+
+		return false;
+	}
+
+	/* Check job exception type and source before replaying. */
+	if (!kbase_replay_fault_check(katom)) {
+		dev_dbg(kbdev->dev,
+			"Replay cancelled on event %x\n", katom->event_code);
+		/* katom->event_code is already set to the failure code of the
+		 * previous job.
+		 */
+		return false;
+	}
+
+	dev_warn(kbdev->dev, "Replaying jobs retry=%d\n",
+			katom->retry_count);
+
+	katom->retry_count++;
+
+	if (katom->retry_count > BASEP_JD_REPLAY_LIMIT) {
+		dev_err(kbdev->dev, "Replay exceeded limit - failing jobs\n");
+
+		kbase_disjoint_state_down(kbdev);
+
+		/* katom->event_code is already set to the failure code of the
+		   previous job */
+		return false;
+	}
+
+	/* only enter the disjoint state once for the whole time while the replay is ongoing */
+	if (katom->retry_count == 1)
+		kbase_disjoint_state_up(kbdev);
+
+	INIT_WORK(&katom->work, kbase_replay_process_worker);
+	queue_work(kctx->event_workq, &katom->work);
+
+	return true;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_reset_gpu.h b/drivers/gpu/arm/midgard/mali_kbase_reset_gpu.h
new file mode 100644
index 0000000..df72eec
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_reset_gpu.h
@@ -0,0 +1,139 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_RESET_GPU_H_
+#define _KBASE_RESET_GPU_H_
+
+/**
+ * kbase_prepare_to_reset_gpu_locked - Prepare for resetting the GPU.
+ * @kbdev: Device pointer
+ *
+ * Caller is expected to hold the kbdev->hwaccess_lock.
+ *
+ * Return: a boolean which should be interpreted as follows:
+ * - true  - Prepared for reset, kbase_reset_gpu should be called.
+ * - false - Another thread is performing a reset, kbase_reset_gpu should
+ *           not be called.
+ */
+bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev);
+
+/**
+ * kbase_prepare_to_reset_gpu - Prepare for resetting the GPU.
+ * @kbdev: Device pointer
+ *
+ * Return: a boolean which should be interpreted as follows:
+ * - true  - Prepared for reset, kbase_reset_gpu should be called.
+ * - false - Another thread is performing a reset, kbase_reset_gpu should
+ *           not be called.
+ */
+bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu - Reset the GPU
+ * @kbdev: Device pointer
+ *
+ * This function should be called after kbase_prepare_to_reset_gpu if it returns
+ * true. It should never be called without a corresponding call to
+ * kbase_prepare_to_reset_gpu (only on Job Manager GPUs).
+ *
+ * After this function is called the caller should call kbase_reset_gpu_wait()
+ * to know when the reset has completed.
+ */
+void kbase_reset_gpu(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu_locked - Reset the GPU
+ * @kbdev: Device pointer
+ *
+ * This function should be called after kbase_prepare_to_reset_gpu_locked if it
+ * returns true. It should never be called without a corresponding call to
+ * kbase_prepare_to_reset_gpu (only on Job Manager GPUs).
+ * Caller is expected to hold the kbdev->hwaccess_lock.
+ *
+ * After this function is called, the caller should call kbase_reset_gpu_wait()
+ * to know when the reset has completed.
+ */
+void kbase_reset_gpu_locked(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu_silent - Reset the GPU silently
+ * @kbdev: Device pointer
+ *
+ * Reset the GPU without trying to cancel jobs (applicable to Job Manager GPUs)
+ * and don't emit messages into the kernel log while doing the reset.
+ *
+ * This function should be used in cases where we are doing a controlled reset
+ * of the GPU as part of normal processing (e.g. exiting protected mode) where
+ * the driver will have ensured the scheduler has been idled and all other
+ * users of the GPU (e.g. instrumentation) have been suspended.
+ *
+ * Return: 0 if the reset was started successfully
+ *         -EAGAIN if another reset is currently in progress
+ */
+int kbase_reset_gpu_silent(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu_is_active - Reports if the GPU is being reset
+ * @kbdev: Device pointer
+ *
+ * Return: True if the GPU is in the process of being reset (or if the reset of
+ * GPU failed, not applicable to Job Manager GPUs).
+ */
+bool kbase_reset_gpu_is_active(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu_wait - Wait for a GPU reset to complete
+ * @kbdev: Device pointer
+ *
+ * This function may wait indefinitely.
+ *
+ * Return: 0 if successful or a negative error code on failure.
+ */
+int kbase_reset_gpu_wait(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu_init - Initialize the GPU reset handling mechanism.
+ *
+ * @kbdev: Device pointer
+ *
+ * Return: 0 if successful or a negative error code on failure.
+ */
+int kbase_reset_gpu_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu_term - Terminate the GPU reset handling mechanism.
+ *
+ * @kbdev: Device pointer
+ */
+void kbase_reset_gpu_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu_register_complete_cb - Register the callback function to be
+ *                                        invoked on completion of GPU reset.
+ *
+ * @kbdev: Device pointer
+ * @complete_callback: Pointer to the callback function
+ */
+void kbase_reset_gpu_register_complete_cb(struct kbase_device *kbdev,
+			int (*complete_callback)(struct kbase_device *kbdev));
+
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_smc.c b/drivers/gpu/arm/midgard/mali_kbase_smc.c
new file mode 100644
index 0000000..3470f58
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_smc.c
@@ -0,0 +1,91 @@
+/*
+ *
+ * (C) COPYRIGHT 2015, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifdef CONFIG_ARM64
+
+#include <mali_kbase.h>
+#include <mali_kbase_smc.h>
+
+#include <linux/compiler.h>
+
+/* __asmeq is not available on Kernel versions >= 4.20 */
+#ifndef __asmeq
+/*
+ * This is used to ensure the compiler did actually allocate the register we
+ * asked it for some inline assembly sequences.  Apparently we can't trust the
+ * compiler from one version to another so a bit of paranoia won't hurt.  This
+ * string is meant to be concatenated with the inline asm string and will
+ * cause compilation to stop on mismatch.  (for details, see gcc PR 15089)
+ */
+#define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"
+#endif
+
+static noinline u64 invoke_smc_fid(u64 function_id,
+		u64 arg0, u64 arg1, u64 arg2)
+{
+	register u64 x0 asm("x0") = function_id;
+	register u64 x1 asm("x1") = arg0;
+	register u64 x2 asm("x2") = arg1;
+	register u64 x3 asm("x3") = arg2;
+
+	asm volatile(
+			__asmeq("%0", "x0")
+			__asmeq("%1", "x1")
+			__asmeq("%2", "x2")
+			__asmeq("%3", "x3")
+			"smc    #0\n"
+			: "+r" (x0)
+			: "r" (x1), "r" (x2), "r" (x3));
+
+	return x0;
+}
+
+u64 kbase_invoke_smc_fid(u32 fid, u64 arg0, u64 arg1, u64 arg2)
+{
+	/* Is fast call (bit 31 set) */
+	KBASE_DEBUG_ASSERT(fid & ~SMC_FAST_CALL);
+	/* bits 16-23 must be zero for fast calls */
+	KBASE_DEBUG_ASSERT((fid & (0xFF << 16)) == 0);
+
+	return invoke_smc_fid(fid, arg0, arg1, arg2);
+}
+
+u64 kbase_invoke_smc(u32 oen, u16 function_number, bool smc64,
+		u64 arg0, u64 arg1, u64 arg2)
+{
+	u32 fid = 0;
+
+	/* Only the six bits allowed should be used. */
+	KBASE_DEBUG_ASSERT((oen & ~SMC_OEN_MASK) == 0);
+
+	fid |= SMC_FAST_CALL; /* Bit 31: Fast call */
+	if (smc64)
+		fid |= SMC_64; /* Bit 30: 1=SMC64, 0=SMC32 */
+	fid |= oen; /* Bit 29:24: OEN */
+	/* Bit 23:16: Must be zero for fast calls */
+	fid |= (function_number); /* Bit 15:0: function number */
+
+	return kbase_invoke_smc_fid(fid, arg0, arg1, arg2);
+}
+
+#endif /* CONFIG_ARM64 */
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_smc.h b/drivers/gpu/arm/midgard/mali_kbase_smc.h
new file mode 100644
index 0000000..221eb21
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_smc.h
@@ -0,0 +1,72 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#ifndef _KBASE_SMC_H_
+#define _KBASE_SMC_H_
+
+#ifdef CONFIG_ARM64
+
+#include <mali_kbase.h>
+
+#define SMC_FAST_CALL (1 << 31)
+#define SMC_64 (1 << 30)
+
+#define SMC_OEN_OFFSET 24
+#define SMC_OEN_MASK (0x3F << SMC_OEN_OFFSET) /* 6 bits */
+#define SMC_OEN_SIP (2 << SMC_OEN_OFFSET)
+#define SMC_OEN_STD (4 << SMC_OEN_OFFSET)
+
+
+/**
+  * kbase_invoke_smc_fid - Perform a secure monitor call
+  * @fid: The SMC function to call, see SMC Calling convention.
+  * @arg0: First argument to the SMC.
+  * @arg1: Second argument to the SMC.
+  * @arg2: Third argument to the SMC.
+  *
+  * See SMC Calling Convention for details.
+  *
+  * Return: the return value from the SMC.
+  */
+u64 kbase_invoke_smc_fid(u32 fid, u64 arg0, u64 arg1, u64 arg2);
+
+/**
+  * kbase_invoke_smc_fid - Perform a secure monitor call
+  * @oen: Owning Entity number (SIP, STD etc).
+  * @function_number: The function number within the OEN.
+  * @smc64: use SMC64 calling convention instead of SMC32.
+  * @arg0: First argument to the SMC.
+  * @arg1: Second argument to the SMC.
+  * @arg2: Third argument to the SMC.
+  *
+  * See SMC Calling Convention for details.
+  *
+  * Return: the return value from the SMC call.
+  */
+u64 kbase_invoke_smc(u32 oen, u16 function_number, bool smc64,
+		u64 arg0, u64 arg1, u64 arg2);
+
+#endif /* CONFIG_ARM64 */
+
+#endif /* _KBASE_SMC_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_softjobs.c b/drivers/gpu/arm/midgard/mali_kbase_softjobs.c
new file mode 100644
index 0000000..88773cc
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_softjobs.c
@@ -0,0 +1,1712 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+
+#include <linux/dma-buf.h>
+#include <asm/cacheflush.h>
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+#include <mali_kbase_sync.h>
+#endif
+#include <linux/dma-mapping.h>
+#include <mali_base_kernel.h>
+#include <mali_kbase_hwaccess_time.h>
+#include <mali_kbase_mem_linux.h>
+#include <mali_kbase_tracepoints.h>
+#include <linux/version.h>
+#include <linux/ktime.h>
+#include <linux/pfn.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/cache.h>
+
+/**
+ * @file mali_kbase_softjobs.c
+ *
+ * This file implements the logic behind software only jobs that are
+ * executed within the driver rather than being handed over to the GPU.
+ */
+
+static void kbasep_add_waiting_soft_job(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	unsigned long lflags;
+
+	spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+	list_add_tail(&katom->queue, &kctx->waiting_soft_jobs);
+	spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+}
+
+void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	unsigned long lflags;
+
+	spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+	list_del(&katom->queue);
+	spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+}
+
+static void kbasep_add_waiting_with_timeout(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+
+	/* Record the start time of this atom so we could cancel it at
+	 * the right time.
+	 */
+	katom->start_timestamp = ktime_get();
+
+	/* Add the atom to the waiting list before the timer is
+	 * (re)started to make sure that it gets processed.
+	 */
+	kbasep_add_waiting_soft_job(katom);
+
+	/* Schedule timeout of this atom after a period if it is not active */
+	if (!timer_pending(&kctx->soft_job_timeout)) {
+		int timeout_ms = atomic_read(
+				&kctx->kbdev->js_data.soft_job_timeout_ms);
+		mod_timer(&kctx->soft_job_timeout,
+			  jiffies + msecs_to_jiffies(timeout_ms));
+	}
+}
+
+static int kbasep_read_soft_event_status(
+		struct kbase_context *kctx, u64 evt, unsigned char *status)
+{
+	unsigned char *mapped_evt;
+	struct kbase_vmap_struct map;
+
+	mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map);
+	if (!mapped_evt)
+		return -EFAULT;
+
+	*status = *mapped_evt;
+
+	kbase_vunmap(kctx, &map);
+
+	return 0;
+}
+
+static int kbasep_write_soft_event_status(
+		struct kbase_context *kctx, u64 evt, unsigned char new_status)
+{
+	unsigned char *mapped_evt;
+	struct kbase_vmap_struct map;
+
+	if ((new_status != BASE_JD_SOFT_EVENT_SET) &&
+	    (new_status != BASE_JD_SOFT_EVENT_RESET))
+		return -EINVAL;
+
+	mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map);
+	if (!mapped_evt)
+		return -EFAULT;
+
+	*mapped_evt = new_status;
+
+	kbase_vunmap(kctx, &map);
+
+	return 0;
+}
+
+static int kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom)
+{
+	struct kbase_vmap_struct map;
+	void *user_result;
+	struct timespec ts;
+	struct base_dump_cpu_gpu_counters data;
+	u64 system_time;
+	u64 cycle_counter;
+	u64 jc = katom->jc;
+	struct kbase_context *kctx = katom->kctx;
+	int pm_active_err;
+
+	memset(&data, 0, sizeof(data));
+
+	/* Take the PM active reference as late as possible - otherwise, it could
+	 * delay suspend until we process the atom (which may be at the end of a
+	 * long chain of dependencies */
+	pm_active_err = kbase_pm_context_active_handle_suspend(kctx->kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE);
+	if (pm_active_err) {
+		struct kbasep_js_device_data *js_devdata = &kctx->kbdev->js_data;
+
+		/* We're suspended - queue this on the list of suspended jobs
+		 * Use dep_item[1], because dep_item[0] was previously in use
+		 * for 'waiting_soft_jobs'.
+		 */
+		mutex_lock(&js_devdata->runpool_mutex);
+		list_add_tail(&katom->dep_item[1], &js_devdata->suspended_soft_jobs_list);
+		mutex_unlock(&js_devdata->runpool_mutex);
+
+		/* Also adding this to the list of waiting soft job */
+		kbasep_add_waiting_soft_job(katom);
+
+		return pm_active_err;
+	}
+
+	kbase_backend_get_gpu_time(kctx->kbdev, &cycle_counter, &system_time,
+									&ts);
+
+	kbase_pm_context_idle(kctx->kbdev);
+
+	data.sec = ts.tv_sec;
+	data.usec = ts.tv_nsec / 1000;
+	data.system_time = system_time;
+	data.cycle_counter = cycle_counter;
+
+	/* Assume this atom will be cancelled until we know otherwise */
+	katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+	/* GPU_WR access is checked on the range for returning the result to
+	 * userspace for the following reasons:
+	 * - security, this is currently how imported user bufs are checked.
+	 * - userspace ddk guaranteed to assume region was mapped as GPU_WR */
+	user_result = kbase_vmap_prot(kctx, jc, sizeof(data), KBASE_REG_GPU_WR, &map);
+	if (!user_result)
+		return 0;
+
+	memcpy(user_result, &data, sizeof(data));
+
+	kbase_vunmap(kctx, &map);
+
+	/* Atom was fine - mark it as done */
+	katom->event_code = BASE_JD_EVENT_DONE;
+
+	return 0;
+}
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+/* Called by the explicit fence mechanism when a fence wait has completed */
+void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+
+	mutex_lock(&kctx->jctx.lock);
+	kbasep_remove_waiting_soft_job(katom);
+	kbase_finish_soft_job(katom);
+	if (jd_done_nolock(katom, NULL))
+		kbase_js_sched_all(kctx->kbdev);
+	mutex_unlock(&kctx->jctx.lock);
+}
+#endif
+
+static void kbasep_soft_event_complete_job(struct work_struct *work)
+{
+	struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
+			work);
+	struct kbase_context *kctx = katom->kctx;
+	int resched;
+
+	mutex_lock(&kctx->jctx.lock);
+	resched = jd_done_nolock(katom, NULL);
+	mutex_unlock(&kctx->jctx.lock);
+
+	if (resched)
+		kbase_js_sched_all(kctx->kbdev);
+}
+
+void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt)
+{
+	int cancel_timer = 1;
+	struct list_head *entry, *tmp;
+	unsigned long lflags;
+
+	spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+	list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
+		struct kbase_jd_atom *katom = list_entry(
+				entry, struct kbase_jd_atom, queue);
+
+		switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+		case BASE_JD_REQ_SOFT_EVENT_WAIT:
+			if (katom->jc == evt) {
+				list_del(&katom->queue);
+
+				katom->event_code = BASE_JD_EVENT_DONE;
+				INIT_WORK(&katom->work,
+					  kbasep_soft_event_complete_job);
+				queue_work(kctx->jctx.job_done_wq,
+					   &katom->work);
+			} else {
+				/* There are still other waiting jobs, we cannot
+				 * cancel the timer yet.
+				 */
+				cancel_timer = 0;
+			}
+			break;
+#ifdef CONFIG_MALI_FENCE_DEBUG
+		case BASE_JD_REQ_SOFT_FENCE_WAIT:
+			/* Keep the timer running if fence debug is enabled and
+			 * there are waiting fence jobs.
+			 */
+			cancel_timer = 0;
+			break;
+#endif
+		}
+	}
+
+	if (cancel_timer)
+		del_timer(&kctx->soft_job_timeout);
+	spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+}
+
+#ifdef CONFIG_MALI_FENCE_DEBUG
+static void kbase_fence_debug_check_atom(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	struct device *dev = kctx->kbdev->dev;
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		struct kbase_jd_atom *dep;
+
+		list_for_each_entry(dep, &katom->dep_head[i], dep_item[i]) {
+			if (dep->status == KBASE_JD_ATOM_STATE_UNUSED ||
+			    dep->status == KBASE_JD_ATOM_STATE_COMPLETED)
+				continue;
+
+			if ((dep->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
+					== BASE_JD_REQ_SOFT_FENCE_TRIGGER) {
+				/* Found blocked trigger fence. */
+				struct kbase_sync_fence_info info;
+
+				if (!kbase_sync_fence_in_info_get(dep, &info)) {
+					dev_warn(dev,
+						 "\tVictim trigger atom %d fence [%p] %s: %s\n",
+						 kbase_jd_atom_id(kctx, dep),
+						 info.fence,
+						 info.name,
+						 kbase_sync_status_string(info.status));
+				 }
+			}
+
+			kbase_fence_debug_check_atom(dep);
+		}
+	}
+}
+
+static void kbase_fence_debug_wait_timeout(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	struct device *dev = katom->kctx->kbdev->dev;
+	int timeout_ms = atomic_read(&kctx->kbdev->js_data.soft_job_timeout_ms);
+	unsigned long lflags;
+	struct kbase_sync_fence_info info;
+
+	spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+
+	if (kbase_sync_fence_in_info_get(katom, &info)) {
+		/* Fence must have signaled just after timeout. */
+		spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+		return;
+	}
+
+	dev_warn(dev, "ctx %d_%d: Atom %d still waiting for fence [%p] after %dms\n",
+		 kctx->tgid, kctx->id,
+		 kbase_jd_atom_id(kctx, katom),
+		 info.fence, timeout_ms);
+	dev_warn(dev, "\tGuilty fence [%p] %s: %s\n",
+		 info.fence, info.name,
+		 kbase_sync_status_string(info.status));
+
+	/* Search for blocked trigger atoms */
+	kbase_fence_debug_check_atom(katom);
+
+	spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+
+	kbase_sync_fence_in_dump(katom);
+}
+
+struct kbase_fence_debug_work {
+	struct kbase_jd_atom *katom;
+	struct work_struct work;
+};
+
+static void kbase_fence_debug_wait_timeout_worker(struct work_struct *work)
+{
+	struct kbase_fence_debug_work *w = container_of(work,
+			struct kbase_fence_debug_work, work);
+	struct kbase_jd_atom *katom = w->katom;
+	struct kbase_context *kctx = katom->kctx;
+
+	mutex_lock(&kctx->jctx.lock);
+	kbase_fence_debug_wait_timeout(katom);
+	mutex_unlock(&kctx->jctx.lock);
+
+	kfree(w);
+}
+
+static void kbase_fence_debug_timeout(struct kbase_jd_atom *katom)
+{
+	struct kbase_fence_debug_work *work;
+	struct kbase_context *kctx = katom->kctx;
+
+	/* Enqueue fence debug worker. Use job_done_wq to get
+	 * debug print ordered with job completion.
+	 */
+	work = kzalloc(sizeof(struct kbase_fence_debug_work), GFP_ATOMIC);
+	/* Ignore allocation failure. */
+	if (work) {
+		work->katom = katom;
+		INIT_WORK(&work->work, kbase_fence_debug_wait_timeout_worker);
+		queue_work(kctx->jctx.job_done_wq, &work->work);
+	}
+}
+#endif /* CONFIG_MALI_FENCE_DEBUG */
+
+void kbasep_soft_job_timeout_worker(struct timer_list *timer)
+{
+	struct kbase_context *kctx = container_of(timer, struct kbase_context,
+			soft_job_timeout);
+	u32 timeout_ms = (u32)atomic_read(
+			&kctx->kbdev->js_data.soft_job_timeout_ms);
+	ktime_t cur_time = ktime_get();
+	bool restarting = false;
+	unsigned long lflags;
+	struct list_head *entry, *tmp;
+
+	spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+	list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
+		struct kbase_jd_atom *katom = list_entry(entry,
+				struct kbase_jd_atom, queue);
+		s64 elapsed_time = ktime_to_ms(ktime_sub(cur_time,
+					katom->start_timestamp));
+
+		if (elapsed_time < (s64)timeout_ms) {
+			restarting = true;
+			continue;
+		}
+
+		switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+		case BASE_JD_REQ_SOFT_EVENT_WAIT:
+			/* Take it out of the list to ensure that it
+			 * will be cancelled in all cases
+			 */
+			list_del(&katom->queue);
+
+			katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+			INIT_WORK(&katom->work, kbasep_soft_event_complete_job);
+			queue_work(kctx->jctx.job_done_wq, &katom->work);
+			break;
+#ifdef CONFIG_MALI_FENCE_DEBUG
+		case BASE_JD_REQ_SOFT_FENCE_WAIT:
+			kbase_fence_debug_timeout(katom);
+			break;
+#endif
+		}
+	}
+
+	if (restarting)
+		mod_timer(timer, jiffies + msecs_to_jiffies(timeout_ms));
+	spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+}
+
+static int kbasep_soft_event_wait(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	unsigned char status;
+
+	/* The status of this soft-job is stored in jc */
+	if (kbasep_read_soft_event_status(kctx, katom->jc, &status)) {
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+		return 0;
+	}
+
+	if (status == BASE_JD_SOFT_EVENT_SET)
+		return 0; /* Event already set, nothing to do */
+
+	kbasep_add_waiting_with_timeout(katom);
+
+	return 1;
+}
+
+static void kbasep_soft_event_update_locked(struct kbase_jd_atom *katom,
+				     unsigned char new_status)
+{
+	/* Complete jobs waiting on the same event */
+	struct kbase_context *kctx = katom->kctx;
+
+	if (kbasep_write_soft_event_status(kctx, katom->jc, new_status) != 0) {
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+		return;
+	}
+
+	if (new_status == BASE_JD_SOFT_EVENT_SET)
+		kbasep_complete_triggered_soft_events(kctx, katom->jc);
+}
+
+/**
+ * kbase_soft_event_update() - Update soft event state
+ * @kctx: Pointer to context
+ * @event: Event to update
+ * @new_status: New status value of event
+ *
+ * Update the event, and wake up any atoms waiting for the event.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+int kbase_soft_event_update(struct kbase_context *kctx,
+			     u64 event,
+			     unsigned char new_status)
+{
+	int err = 0;
+
+	mutex_lock(&kctx->jctx.lock);
+
+	if (kbasep_write_soft_event_status(kctx, event, new_status)) {
+		err = -ENOENT;
+		goto out;
+	}
+
+	if (new_status == BASE_JD_SOFT_EVENT_SET)
+		kbasep_complete_triggered_soft_events(kctx, event);
+
+out:
+	mutex_unlock(&kctx->jctx.lock);
+
+	return err;
+}
+
+static void kbasep_soft_event_cancel_job(struct kbase_jd_atom *katom)
+{
+	katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+	if (jd_done_nolock(katom, NULL))
+		kbase_js_sched_all(katom->kctx->kbdev);
+}
+
+static void kbase_debug_copy_finish(struct kbase_jd_atom *katom)
+{
+	struct kbase_debug_copy_buffer *buffers = katom->softjob_data;
+	unsigned int i;
+	unsigned int nr = katom->nr_extres;
+
+	if (!buffers)
+		return;
+
+	kbase_gpu_vm_lock(katom->kctx);
+	for (i = 0; i < nr; i++) {
+		int p;
+		struct kbase_mem_phy_alloc *gpu_alloc = buffers[i].gpu_alloc;
+
+		if (!buffers[i].pages)
+			break;
+		for (p = 0; p < buffers[i].nr_pages; p++) {
+			struct page *pg = buffers[i].pages[p];
+
+			if (pg)
+				put_page(pg);
+		}
+		if (buffers[i].is_vmalloc)
+			vfree(buffers[i].pages);
+		else
+			kfree(buffers[i].pages);
+		if (gpu_alloc) {
+			switch (gpu_alloc->type) {
+			case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
+			{
+				kbase_free_user_buffer(&buffers[i]);
+				break;
+			}
+			default:
+				/* Nothing to be done. */
+				break;
+			}
+			kbase_mem_phy_alloc_put(gpu_alloc);
+		}
+	}
+	kbase_gpu_vm_unlock(katom->kctx);
+	kfree(buffers);
+
+	katom->softjob_data = NULL;
+}
+
+static int kbase_debug_copy_prepare(struct kbase_jd_atom *katom)
+{
+	struct kbase_debug_copy_buffer *buffers;
+	struct base_jd_debug_copy_buffer *user_buffers = NULL;
+	unsigned int i;
+	unsigned int nr = katom->nr_extres;
+	int ret = 0;
+	void __user *user_structs = (void __user *)(uintptr_t)katom->jc;
+
+	if (!user_structs)
+		return -EINVAL;
+
+	buffers = kcalloc(nr, sizeof(*buffers), GFP_KERNEL);
+	if (!buffers) {
+		ret = -ENOMEM;
+		goto out_cleanup;
+	}
+	katom->softjob_data = buffers;
+
+	user_buffers = kmalloc_array(nr, sizeof(*user_buffers), GFP_KERNEL);
+
+	if (!user_buffers) {
+		ret = -ENOMEM;
+		goto out_cleanup;
+	}
+
+	ret = copy_from_user(user_buffers, user_structs,
+			sizeof(*user_buffers)*nr);
+	if (ret) {
+		ret = -EFAULT;
+		goto out_cleanup;
+	}
+
+	for (i = 0; i < nr; i++) {
+		u64 addr = user_buffers[i].address;
+		u64 page_addr = addr & PAGE_MASK;
+		u64 end_page_addr = addr + user_buffers[i].size - 1;
+		u64 last_page_addr = end_page_addr & PAGE_MASK;
+		int nr_pages = (last_page_addr-page_addr)/PAGE_SIZE+1;
+		int pinned_pages;
+		struct kbase_va_region *reg;
+		struct base_external_resource user_extres;
+
+		if (!addr)
+			continue;
+
+		if (last_page_addr < page_addr) {
+			ret = -EINVAL;
+			goto out_cleanup;
+		}
+
+		buffers[i].nr_pages = nr_pages;
+		buffers[i].offset = addr & ~PAGE_MASK;
+		if (buffers[i].offset >= PAGE_SIZE) {
+			ret = -EINVAL;
+			goto out_cleanup;
+		}
+		buffers[i].size = user_buffers[i].size;
+
+		if (nr_pages > (KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD /
+				sizeof(struct page *))) {
+			buffers[i].is_vmalloc = true;
+			buffers[i].pages = vzalloc(nr_pages *
+					sizeof(struct page *));
+		} else {
+			buffers[i].is_vmalloc = false;
+			buffers[i].pages = kcalloc(nr_pages,
+					sizeof(struct page *), GFP_KERNEL);
+		}
+
+		if (!buffers[i].pages) {
+			ret = -ENOMEM;
+			goto out_cleanup;
+		}
+
+		pinned_pages = get_user_pages_fast(page_addr,
+					nr_pages,
+					1, /* Write */
+					buffers[i].pages);
+		if (pinned_pages < 0) {
+			/* get_user_pages_fast has failed - page array is not
+			 * valid. Don't try to release any pages.
+			 */
+			buffers[i].nr_pages = 0;
+
+			ret = pinned_pages;
+			goto out_cleanup;
+		}
+		if (pinned_pages != nr_pages) {
+			/* Adjust number of pages, so that we only attempt to
+			 * release pages in the array that we know are valid.
+			 */
+			buffers[i].nr_pages = pinned_pages;
+
+			ret = -EINVAL;
+			goto out_cleanup;
+		}
+
+		user_extres = user_buffers[i].extres;
+		if (user_extres.ext_resource == 0ULL) {
+			ret = -EINVAL;
+			goto out_cleanup;
+		}
+
+		kbase_gpu_vm_lock(katom->kctx);
+		reg = kbase_region_tracker_find_region_enclosing_address(
+				katom->kctx, user_extres.ext_resource &
+				~BASE_EXT_RES_ACCESS_EXCLUSIVE);
+
+		if (kbase_is_region_invalid_or_free(reg) ||
+		    reg->gpu_alloc == NULL) {
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+
+		buffers[i].gpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+		buffers[i].nr_extres_pages = reg->nr_pages;
+
+		if (reg->nr_pages*PAGE_SIZE != buffers[i].size)
+			dev_warn(katom->kctx->kbdev->dev, "Copy buffer is not of same size as the external resource to copy.\n");
+
+		switch (reg->gpu_alloc->type) {
+		case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
+		{
+			struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
+			unsigned long nr_pages =
+				alloc->imported.user_buf.nr_pages;
+
+			if (alloc->imported.user_buf.mm != current->mm) {
+				ret = -EINVAL;
+				goto out_unlock;
+			}
+			buffers[i].extres_pages = kcalloc(nr_pages,
+					sizeof(struct page *), GFP_KERNEL);
+			if (!buffers[i].extres_pages) {
+				ret = -ENOMEM;
+				goto out_unlock;
+			}
+
+			ret = get_user_pages_fast(
+					alloc->imported.user_buf.address,
+					nr_pages, 0,
+					buffers[i].extres_pages);
+			if (ret != nr_pages) {
+				/* Adjust number of pages, so that we only
+				 * attempt to release pages in the array that we
+				 * know are valid.
+				 */
+				if (ret < 0)
+					buffers[i].nr_extres_pages = 0;
+				else
+					buffers[i].nr_extres_pages = ret;
+
+				goto out_unlock;
+			}
+			ret = 0;
+			break;
+		}
+		default:
+			/* Nothing to be done. */
+			break;
+		}
+		kbase_gpu_vm_unlock(katom->kctx);
+	}
+	kfree(user_buffers);
+
+	return ret;
+
+out_unlock:
+	kbase_gpu_vm_unlock(katom->kctx);
+
+out_cleanup:
+	/* Frees allocated memory for kbase_debug_copy_job struct, including
+	 * members, and sets jc to 0 */
+	kbase_debug_copy_finish(katom);
+	kfree(user_buffers);
+
+	return ret;
+}
+
+void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
+		void *extres_page, struct page **pages, unsigned int nr_pages,
+		unsigned int *target_page_nr, size_t offset, size_t *to_copy)
+{
+	void *target_page = kmap(pages[*target_page_nr]);
+	size_t chunk = PAGE_SIZE-offset;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	if (!target_page) {
+		*target_page_nr += 1;
+		dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
+		return;
+	}
+
+	chunk = min(chunk, *to_copy);
+
+	memcpy(target_page + offset, extres_page, chunk);
+	*to_copy -= chunk;
+
+	kunmap(pages[*target_page_nr]);
+
+	*target_page_nr += 1;
+	if (*target_page_nr >= nr_pages)
+		return;
+
+	target_page = kmap(pages[*target_page_nr]);
+	if (!target_page) {
+		*target_page_nr += 1;
+		dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
+		return;
+	}
+
+	KBASE_DEBUG_ASSERT(target_page);
+
+	chunk = min(offset, *to_copy);
+	memcpy(target_page, extres_page + PAGE_SIZE-offset, chunk);
+	*to_copy -= chunk;
+
+	kunmap(pages[*target_page_nr]);
+}
+
+int kbase_mem_copy_from_extres(struct kbase_context *kctx,
+		struct kbase_debug_copy_buffer *buf_data)
+{
+	unsigned int i;
+	unsigned int target_page_nr = 0;
+	struct page **pages = buf_data->pages;
+	u64 offset = buf_data->offset;
+	size_t extres_size = buf_data->nr_extres_pages*PAGE_SIZE;
+	size_t to_copy = min(extres_size, buf_data->size);
+	struct kbase_mem_phy_alloc *gpu_alloc = buf_data->gpu_alloc;
+	int ret = 0;
+	size_t dma_to_copy;
+
+	KBASE_DEBUG_ASSERT(pages != NULL);
+
+	kbase_gpu_vm_lock(kctx);
+	if (!gpu_alloc) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	switch (gpu_alloc->type) {
+	case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
+	{
+		for (i = 0; i < buf_data->nr_extres_pages; i++) {
+			struct page *pg = buf_data->extres_pages[i];
+			void *extres_page = kmap(pg);
+
+			if (extres_page)
+				kbase_mem_copy_from_extres_page(kctx,
+						extres_page, pages,
+						buf_data->nr_pages,
+						&target_page_nr,
+						offset, &to_copy);
+
+			kunmap(pg);
+			if (target_page_nr >= buf_data->nr_pages)
+				break;
+		}
+		break;
+	}
+	break;
+	case KBASE_MEM_TYPE_IMPORTED_UMM: {
+		struct dma_buf *dma_buf = gpu_alloc->imported.umm.dma_buf;
+
+		KBASE_DEBUG_ASSERT(dma_buf != NULL);
+		if (dma_buf->size > buf_data->nr_extres_pages * PAGE_SIZE)
+			dev_warn(kctx->kbdev->dev, "External resources buffer size mismatch");
+
+		dma_to_copy = min(dma_buf->size,
+			(size_t)(buf_data->nr_extres_pages * PAGE_SIZE));
+		ret = dma_buf_begin_cpu_access(dma_buf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && !defined(CONFIG_CHROMEOS)
+				0, dma_to_copy,
+#endif
+				DMA_FROM_DEVICE);
+		if (ret)
+			goto out_unlock;
+
+		for (i = 0; i < dma_to_copy/PAGE_SIZE; i++) {
+
+			void *extres_page = dma_buf_kmap(dma_buf, i);
+
+			if (extres_page)
+				kbase_mem_copy_from_extres_page(kctx,
+						extres_page, pages,
+						buf_data->nr_pages,
+						&target_page_nr,
+						offset, &to_copy);
+
+			dma_buf_kunmap(dma_buf, i, extres_page);
+			if (target_page_nr >= buf_data->nr_pages)
+				break;
+		}
+		dma_buf_end_cpu_access(dma_buf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && !defined(CONFIG_CHROMEOS)
+				0, dma_to_copy,
+#endif
+				DMA_FROM_DEVICE);
+		break;
+	}
+	default:
+		ret = -EINVAL;
+	}
+out_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	return ret;
+
+}
+
+static int kbase_debug_copy(struct kbase_jd_atom *katom)
+{
+	struct kbase_debug_copy_buffer *buffers = katom->softjob_data;
+	unsigned int i;
+
+	if (WARN_ON(!buffers))
+		return -EINVAL;
+
+	for (i = 0; i < katom->nr_extres; i++) {
+		int res = kbase_mem_copy_from_extres(katom->kctx, &buffers[i]);
+
+		if (res)
+			return res;
+	}
+
+	return 0;
+}
+
+#define KBASEP_JIT_ALLOC_GPU_ADDR_ALIGNMENT ((u32)0x7)
+
+int kbasep_jit_alloc_validate(struct kbase_context *kctx,
+					struct base_jit_alloc_info *info)
+{
+	/* If the ID is zero, then fail the job */
+	if (info->id == 0)
+		return -EINVAL;
+
+	/* Sanity check that the PA fits within the VA */
+	if (info->va_pages < info->commit_pages)
+		return -EINVAL;
+
+	/* Ensure the GPU address is correctly aligned */
+	if ((info->gpu_alloc_addr & KBASEP_JIT_ALLOC_GPU_ADDR_ALIGNMENT) != 0)
+		return -EINVAL;
+
+	if (kctx->jit_version == 1) {
+		/* Old JIT didn't have usage_id, max_allocations, bin_id
+		 * or padding, so force them to zero
+		 */
+		info->usage_id = 0;
+		info->max_allocations = 0;
+		info->bin_id = 0;
+		info->flags = 0;
+		memset(info->padding, 0, sizeof(info->padding));
+	} else {
+		int j;
+
+		/* Check padding is all zeroed */
+		for (j = 0; j < sizeof(info->padding); j++) {
+			if (info->padding[j] != 0) {
+				return -EINVAL;
+			}
+		}
+
+		/* No bit other than TILER_ALIGN_TOP shall be set */
+		if (info->flags & ~BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP) {
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom)
+{
+	__user void *data = (__user void *)(uintptr_t) katom->jc;
+	struct base_jit_alloc_info *info;
+	struct kbase_context *kctx = katom->kctx;
+	struct kbase_device *kbdev = kctx->kbdev;
+	u32 count;
+	int ret;
+	u32 i;
+
+	/* For backwards compatibility */
+	if (katom->nr_extres == 0)
+		katom->nr_extres = 1;
+	count = katom->nr_extres;
+
+	/* Sanity checks */
+	if (!data || count > kctx->jit_max_allocations ||
+			count > ARRAY_SIZE(kctx->jit_alloc)) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	/* Copy the information for safe access and future storage */
+	info = kmalloc_array(count, sizeof(*info), GFP_KERNEL);
+	if (!info) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+	if (copy_from_user(info, data, sizeof(*info)*count) != 0) {
+		ret = -EINVAL;
+		goto free_info;
+	}
+	katom->softjob_data = info;
+
+	for (i = 0; i < count; i++, info++) {
+		ret = kbasep_jit_alloc_validate(kctx, info);
+		if (ret)
+			goto free_info;
+		KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO(kbdev, katom,
+			info->va_pages, info->commit_pages, info->extent,
+			info->id, info->bin_id, info->max_allocations,
+			info->flags, info->usage_id);
+	}
+
+	katom->jit_blocked = false;
+
+	lockdep_assert_held(&kctx->jctx.lock);
+	list_add_tail(&katom->jit_node, &kctx->jit_atoms_head);
+
+	/*
+	 * Note:
+	 * The provided info->gpu_alloc_addr isn't validated here as
+	 * userland can cache allocations which means that even
+	 * though the region is valid it doesn't represent the
+	 * same thing it used to.
+	 *
+	 * Complete validation of va_pages, commit_pages and extent
+	 * isn't done here as it will be done during the call to
+	 * kbase_mem_alloc.
+	 */
+	return 0;
+
+free_info:
+	kfree(katom->softjob_data);
+	katom->softjob_data = NULL;
+fail:
+	return ret;
+}
+
+static u8 *kbase_jit_free_get_ids(struct kbase_jd_atom *katom)
+{
+	if (WARN_ON((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) !=
+				BASE_JD_REQ_SOFT_JIT_FREE))
+		return NULL;
+
+	return (u8 *) katom->softjob_data;
+}
+
+static void kbase_jit_add_to_pending_alloc_list(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	struct list_head *target_list_head = NULL;
+	struct kbase_jd_atom *entry;
+
+	list_for_each_entry(entry, &kctx->jit_pending_alloc, queue) {
+		if (katom->age < entry->age) {
+			target_list_head = &entry->queue;
+			break;
+		}
+	}
+
+	if (target_list_head == NULL)
+		target_list_head = &kctx->jit_pending_alloc;
+
+	list_add_tail(&katom->queue, target_list_head);
+}
+
+static int kbase_jit_allocate_process(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	struct kbase_device *kbdev = kctx->kbdev;
+	struct base_jit_alloc_info *info;
+	struct kbase_va_region *reg;
+	struct kbase_vmap_struct mapping;
+	u64 *ptr, new_addr;
+	u32 count = katom->nr_extres;
+	u32 i;
+
+	if (katom->jit_blocked) {
+		list_del(&katom->queue);
+		katom->jit_blocked = false;
+	}
+
+	info = katom->softjob_data;
+	if (WARN_ON(!info)) {
+		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+		return 0;
+	}
+
+	for (i = 0; i < count; i++, info++) {
+		/* The JIT ID is still in use so fail the allocation */
+		if (kctx->jit_alloc[info->id]) {
+			katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
+			return 0;
+		}
+	}
+
+	for (i = 0, info = katom->softjob_data; i < count; i++, info++) {
+		if (kctx->jit_alloc[info->id]) {
+			/* The JIT ID is duplicated in this atom. Roll back
+			 * previous allocations and fail.
+			 */
+			u32 j;
+
+			info = katom->softjob_data;
+			for (j = 0; j < i; j++, info++) {
+				kbase_jit_free(kctx, kctx->jit_alloc[info->id]);
+				kctx->jit_alloc[info->id] =
+						(struct kbase_va_region *) -1;
+			}
+
+			katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
+			return 0;
+		}
+
+		/* Create a JIT allocation */
+		reg = kbase_jit_allocate(kctx, info);
+		if (!reg) {
+			struct kbase_jd_atom *jit_atom;
+			bool can_block = false;
+
+			lockdep_assert_held(&kctx->jctx.lock);
+
+			jit_atom = list_first_entry(&kctx->jit_atoms_head,
+					struct kbase_jd_atom, jit_node);
+
+			list_for_each_entry(jit_atom, &kctx->jit_atoms_head, jit_node) {
+				if (jit_atom == katom)
+					break;
+
+				if ((jit_atom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) ==
+						BASE_JD_REQ_SOFT_JIT_FREE) {
+					u8 *free_ids = kbase_jit_free_get_ids(jit_atom);
+
+					if (free_ids && *free_ids &&
+						kctx->jit_alloc[*free_ids]) {
+						/* A JIT free which is active and
+						 * submitted before this atom
+						 */
+						can_block = true;
+						break;
+					}
+				}
+			}
+
+			if (!can_block) {
+				/* Mark the failed allocation as well as the
+				 * other un-attempted allocations in the set,
+				 * so we know they are in use even if the
+				 * allocation itself failed.
+				 */
+				for (; i < count; i++, info++) {
+					kctx->jit_alloc[info->id] =
+						(struct kbase_va_region *) -1;
+				}
+
+				katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
+				dev_warn_ratelimited(kbdev->dev, "JIT alloc softjob failed: atom id %d\n",
+						     kbase_jd_atom_id(kctx, katom));
+				return 0;
+			}
+
+			/* There are pending frees for an active allocation
+			 * so we should wait to see whether they free the
+			 * memory. Add to the list of atoms for which JIT
+			 * allocation is pending.
+			 */
+			kbase_jit_add_to_pending_alloc_list(katom);
+			katom->jit_blocked = true;
+
+			/* Rollback, the whole set will be re-attempted */
+			while (i-- > 0) {
+				info--;
+				kbase_jit_free(kctx, kctx->jit_alloc[info->id]);
+				kctx->jit_alloc[info->id] = NULL;
+			}
+
+			return 1;
+		}
+
+		/* Bind it to the user provided ID. */
+		kctx->jit_alloc[info->id] = reg;
+	}
+
+	for (i = 0, info = katom->softjob_data; i < count; i++, info++) {
+		u64 entry_mmu_flags = 0;
+		/*
+		 * Write the address of the JIT allocation to the user provided
+		 * GPU allocation.
+		 */
+		ptr = kbase_vmap(kctx, info->gpu_alloc_addr, sizeof(*ptr),
+				&mapping);
+		if (!ptr) {
+			/*
+			 * Leave the allocations "live" as the JIT free atom
+			 * will be submitted anyway.
+			 */
+			katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+			return 0;
+		}
+
+		reg = kctx->jit_alloc[info->id];
+		new_addr = reg->start_pfn << PAGE_SHIFT;
+		*ptr = new_addr;
+
+#if defined(CONFIG_MALI_VECTOR_DUMP)
+		/*
+		 * Retrieve the mmu flags for JIT allocation
+		 * only if dumping is enabled
+		 */
+		entry_mmu_flags = kbase_mmu_create_ate(kbdev,
+			(struct tagged_addr){ 0 }, reg->flags,
+			 MIDGARD_MMU_BOTTOMLEVEL, kctx->jit_group_id);
+#endif
+
+		KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT(kbdev, katom,
+			info->gpu_alloc_addr, new_addr, info->flags,
+			entry_mmu_flags, info->id, info->commit_pages,
+			info->extent, info->va_pages);
+		kbase_vunmap(kctx, &mapping);
+	}
+
+	katom->event_code = BASE_JD_EVENT_DONE;
+
+	return 0;
+}
+
+static void kbase_jit_allocate_finish(struct kbase_jd_atom *katom)
+{
+	struct base_jit_alloc_info *info;
+
+	lockdep_assert_held(&katom->kctx->jctx.lock);
+
+	if (WARN_ON(!katom->softjob_data))
+		return;
+
+	/* Remove atom from jit_atoms_head list */
+	list_del(&katom->jit_node);
+
+	if (katom->jit_blocked) {
+		list_del(&katom->queue);
+		katom->jit_blocked = false;
+	}
+
+	info = katom->softjob_data;
+	/* Free the info structure */
+	kfree(info);
+}
+
+static int kbase_jit_free_prepare(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	struct kbase_device *kbdev = kctx->kbdev;
+	__user void *data = (__user void *)(uintptr_t) katom->jc;
+	u8 *ids;
+	u32 count = MAX(katom->nr_extres, 1);
+	u32 i;
+	int ret;
+
+	/* Sanity checks */
+	if (count > ARRAY_SIZE(kctx->jit_alloc)) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	/* Copy the information for safe access and future storage */
+	ids = kmalloc_array(count, sizeof(*ids), GFP_KERNEL);
+	if (!ids) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	lockdep_assert_held(&kctx->jctx.lock);
+	katom->softjob_data = ids;
+
+	/* For backwards compatibility */
+	if (katom->nr_extres) {
+		/* Fail the job if there is no list of ids */
+		if (!data) {
+			ret = -EINVAL;
+			goto free_info;
+		}
+
+		if (copy_from_user(ids, data, sizeof(*ids)*count) != 0) {
+			ret = -EINVAL;
+			goto free_info;
+		}
+	} else {
+		katom->nr_extres = 1;
+		*ids = (u8)katom->jc;
+	}
+	for (i = 0; i < count; i++)
+		KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITFREEINFO(kbdev, katom, ids[i]);
+
+	list_add_tail(&katom->jit_node, &kctx->jit_atoms_head);
+
+	return 0;
+
+free_info:
+	kfree(katom->softjob_data);
+	katom->softjob_data = NULL;
+fail:
+	return ret;
+}
+
+static void kbase_jit_free_process(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	u8 *ids = kbase_jit_free_get_ids(katom);
+	u32 count = katom->nr_extres;
+	u32 i;
+
+	if (ids == NULL) {
+		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+		return;
+	}
+
+	for (i = 0; i < count; i++, ids++) {
+		/*
+		 * If the ID is zero or it is not in use yet then fail the job.
+		 */
+		if ((*ids == 0) || (kctx->jit_alloc[*ids] == NULL)) {
+			katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+			return;
+		}
+	}
+}
+
+static void kbasep_jit_free_finish_worker(struct work_struct *work)
+{
+	struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
+			work);
+	struct kbase_context *kctx = katom->kctx;
+	int resched;
+
+	mutex_lock(&kctx->jctx.lock);
+	kbase_finish_soft_job(katom);
+	resched = jd_done_nolock(katom, NULL);
+	mutex_unlock(&kctx->jctx.lock);
+
+	if (resched)
+		kbase_js_sched_all(kctx->kbdev);
+}
+
+static void kbase_jit_free_finish(struct kbase_jd_atom *katom)
+{
+	struct list_head *i, *tmp;
+	struct kbase_context *kctx = katom->kctx;
+	LIST_HEAD(jit_pending_alloc_list);
+	u8 *ids;
+	size_t j;
+
+	lockdep_assert_held(&kctx->jctx.lock);
+
+	ids = kbase_jit_free_get_ids(katom);
+	if (WARN_ON(ids == NULL)) {
+		return;
+	}
+
+	/* Remove this atom from the kctx->jit_atoms_head list */
+	list_del(&katom->jit_node);
+
+	for (j = 0; j != katom->nr_extres; ++j) {
+		if ((ids[j] != 0) && (kctx->jit_alloc[ids[j]] != NULL)) {
+			/*
+			 * If the ID is valid but the allocation request failed
+			 * still succeed this soft job but don't try and free
+			 * the allocation.
+			 */
+			if (kctx->jit_alloc[ids[j]] != (struct kbase_va_region *) -1) {
+				KBASE_TLSTREAM_TL_JIT_USEDPAGES(kctx->kbdev,
+					kctx->jit_alloc[ids[j]]->
+					gpu_alloc->nents, ids[j]);
+				kbase_jit_free(kctx, kctx->jit_alloc[ids[j]]);
+			}
+			kctx->jit_alloc[ids[j]] = NULL;
+		}
+	}
+	/* Free the list of ids */
+	kfree(ids);
+
+	list_splice_tail_init(&kctx->jit_pending_alloc, &jit_pending_alloc_list);
+
+	list_for_each_safe(i, tmp, &jit_pending_alloc_list) {
+		struct kbase_jd_atom *pending_atom = list_entry(i,
+				struct kbase_jd_atom, queue);
+		if (kbase_jit_allocate_process(pending_atom) == 0) {
+			/* Atom has completed */
+			INIT_WORK(&pending_atom->work,
+					kbasep_jit_free_finish_worker);
+			queue_work(kctx->jctx.job_done_wq, &pending_atom->work);
+		}
+	}
+}
+
+static int kbase_ext_res_prepare(struct kbase_jd_atom *katom)
+{
+	__user struct base_external_resource_list *user_ext_res;
+	struct base_external_resource_list *ext_res;
+	u64 count = 0;
+	size_t copy_size;
+	int ret;
+
+	user_ext_res = (__user struct base_external_resource_list *)
+			(uintptr_t) katom->jc;
+
+	/* Fail the job if there is no info structure */
+	if (!user_ext_res) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (copy_from_user(&count, &user_ext_res->count, sizeof(u64)) != 0) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	/* Is the number of external resources in range? */
+	if (!count || count > BASE_EXT_RES_COUNT_MAX) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	/* Copy the information for safe access and future storage */
+	copy_size = sizeof(*ext_res);
+	copy_size += sizeof(struct base_external_resource) * (count - 1);
+	ext_res = kzalloc(copy_size, GFP_KERNEL);
+	if (!ext_res) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	if (copy_from_user(ext_res, user_ext_res, copy_size) != 0) {
+		ret = -EINVAL;
+		goto free_info;
+	}
+
+	/*
+	 * Overwrite the count with the first value incase it was changed
+	 * after the fact.
+	 */
+	ext_res->count = count;
+
+	katom->softjob_data = ext_res;
+
+	return 0;
+
+free_info:
+	kfree(ext_res);
+fail:
+	return ret;
+}
+
+static void kbase_ext_res_process(struct kbase_jd_atom *katom, bool map)
+{
+	struct base_external_resource_list *ext_res;
+	int i;
+	bool failed = false;
+
+	ext_res = katom->softjob_data;
+	if (!ext_res)
+		goto failed_jc;
+
+	kbase_gpu_vm_lock(katom->kctx);
+
+	for (i = 0; i < ext_res->count; i++) {
+		u64 gpu_addr;
+
+		gpu_addr = ext_res->ext_res[i].ext_resource &
+				~BASE_EXT_RES_ACCESS_EXCLUSIVE;
+		if (map) {
+			if (!kbase_sticky_resource_acquire(katom->kctx,
+					gpu_addr))
+				goto failed_loop;
+		} else
+			if (!kbase_sticky_resource_release(katom->kctx, NULL,
+					gpu_addr))
+				failed = true;
+	}
+
+	/*
+	 * In the case of unmap we continue unmapping other resources in the
+	 * case of failure but will always report failure if _any_ unmap
+	 * request fails.
+	 */
+	if (failed)
+		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+	else
+		katom->event_code = BASE_JD_EVENT_DONE;
+
+	kbase_gpu_vm_unlock(katom->kctx);
+
+	return;
+
+failed_loop:
+	while (i > 0) {
+		u64 const gpu_addr = ext_res->ext_res[i - 1].ext_resource &
+				~BASE_EXT_RES_ACCESS_EXCLUSIVE;
+
+		kbase_sticky_resource_release(katom->kctx, NULL, gpu_addr);
+
+		--i;
+	}
+
+	katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+	kbase_gpu_vm_unlock(katom->kctx);
+
+failed_jc:
+	return;
+}
+
+static void kbase_ext_res_finish(struct kbase_jd_atom *katom)
+{
+	struct base_external_resource_list *ext_res;
+
+	ext_res = katom->softjob_data;
+	/* Free the info structure */
+	kfree(ext_res);
+}
+
+int kbase_process_soft_job(struct kbase_jd_atom *katom)
+{
+	int ret = 0;
+	struct kbase_context *kctx = katom->kctx;
+	struct kbase_device *kbdev = kctx->kbdev;
+
+	KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START(kbdev, katom);
+
+	switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+	case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
+		ret = kbase_dump_cpu_gpu_time(katom);
+		break;
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+	case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
+		katom->event_code = kbase_sync_fence_out_trigger(katom,
+				katom->event_code == BASE_JD_EVENT_DONE ?
+								0 : -EFAULT);
+		break;
+	case BASE_JD_REQ_SOFT_FENCE_WAIT:
+	{
+		ret = kbase_sync_fence_in_wait(katom);
+
+		if (ret == 1) {
+#ifdef CONFIG_MALI_FENCE_DEBUG
+			kbasep_add_waiting_with_timeout(katom);
+#else
+			kbasep_add_waiting_soft_job(katom);
+#endif
+		}
+		break;
+	}
+#endif
+	case BASE_JD_REQ_SOFT_EVENT_WAIT:
+		ret = kbasep_soft_event_wait(katom);
+		break;
+	case BASE_JD_REQ_SOFT_EVENT_SET:
+		kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_SET);
+		break;
+	case BASE_JD_REQ_SOFT_EVENT_RESET:
+		kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_RESET);
+		break;
+	case BASE_JD_REQ_SOFT_DEBUG_COPY:
+	{
+		int res = kbase_debug_copy(katom);
+
+		if (res)
+			katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+		break;
+	}
+	case BASE_JD_REQ_SOFT_JIT_ALLOC:
+		ret = kbase_jit_allocate_process(katom);
+		break;
+	case BASE_JD_REQ_SOFT_JIT_FREE:
+		kbase_jit_free_process(katom);
+		break;
+	case BASE_JD_REQ_SOFT_EXT_RES_MAP:
+		kbase_ext_res_process(katom, true);
+		break;
+	case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
+		kbase_ext_res_process(katom, false);
+		break;
+	}
+
+	/* Atom is complete */
+	KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END(kbdev, katom);
+	return ret;
+}
+
+void kbase_cancel_soft_job(struct kbase_jd_atom *katom)
+{
+	switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+	case BASE_JD_REQ_SOFT_FENCE_WAIT:
+		kbase_sync_fence_in_cancel_wait(katom);
+		break;
+#endif
+	case BASE_JD_REQ_SOFT_EVENT_WAIT:
+		kbasep_soft_event_cancel_job(katom);
+		break;
+	default:
+		/* This soft-job doesn't support cancellation! */
+		KBASE_DEBUG_ASSERT(0);
+	}
+}
+
+int kbase_prepare_soft_job(struct kbase_jd_atom *katom)
+{
+	switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+	case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
+		{
+			if (!IS_ALIGNED(katom->jc, cache_line_size()))
+				return -EINVAL;
+		}
+		break;
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+	case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
+		{
+			struct base_fence fence;
+			int fd;
+
+			if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
+				return -EINVAL;
+
+			fd = kbase_sync_fence_out_create(katom,
+							 fence.basep.stream_fd);
+			if (fd < 0)
+				return -EINVAL;
+
+			fence.basep.fd = fd;
+			if (0 != copy_to_user((__user void *)(uintptr_t) katom->jc, &fence, sizeof(fence))) {
+				kbase_sync_fence_out_remove(katom);
+				kbase_sync_fence_close_fd(fd);
+				fence.basep.fd = -EINVAL;
+				return -EINVAL;
+			}
+		}
+		break;
+	case BASE_JD_REQ_SOFT_FENCE_WAIT:
+		{
+			struct base_fence fence;
+			int ret;
+
+			if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
+				return -EINVAL;
+
+			/* Get a reference to the fence object */
+			ret = kbase_sync_fence_in_from_fd(katom,
+							  fence.basep.fd);
+			if (ret < 0)
+				return ret;
+
+#ifdef CONFIG_MALI_DMA_FENCE
+			/*
+			 * Set KCTX_NO_IMPLICIT_FENCE in the context the first
+			 * time a soft fence wait job is observed. This will
+			 * prevent the implicit dma-buf fence to conflict with
+			 * the Android native sync fences.
+			 */
+			if (!kbase_ctx_flag(katom->kctx, KCTX_NO_IMPLICIT_SYNC))
+				kbase_ctx_flag_set(katom->kctx, KCTX_NO_IMPLICIT_SYNC);
+#endif /* CONFIG_MALI_DMA_FENCE */
+		}
+		break;
+#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
+	case BASE_JD_REQ_SOFT_JIT_ALLOC:
+		return kbase_jit_allocate_prepare(katom);
+	case BASE_JD_REQ_SOFT_JIT_FREE:
+		return kbase_jit_free_prepare(katom);
+	case BASE_JD_REQ_SOFT_EVENT_WAIT:
+	case BASE_JD_REQ_SOFT_EVENT_SET:
+	case BASE_JD_REQ_SOFT_EVENT_RESET:
+		if (katom->jc == 0)
+			return -EINVAL;
+		break;
+	case BASE_JD_REQ_SOFT_DEBUG_COPY:
+		return kbase_debug_copy_prepare(katom);
+	case BASE_JD_REQ_SOFT_EXT_RES_MAP:
+		return kbase_ext_res_prepare(katom);
+	case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
+		return kbase_ext_res_prepare(katom);
+	default:
+		/* Unsupported soft-job */
+		return -EINVAL;
+	}
+	return 0;
+}
+
+void kbase_finish_soft_job(struct kbase_jd_atom *katom)
+{
+	switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+	case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
+		/* Nothing to do */
+		break;
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+	case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
+		/* If fence has not yet been signaled, do it now */
+		kbase_sync_fence_out_trigger(katom, katom->event_code ==
+				BASE_JD_EVENT_DONE ? 0 : -EFAULT);
+		break;
+	case BASE_JD_REQ_SOFT_FENCE_WAIT:
+		/* Release katom's reference to fence object */
+		kbase_sync_fence_in_remove(katom);
+		break;
+#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
+	case BASE_JD_REQ_SOFT_DEBUG_COPY:
+		kbase_debug_copy_finish(katom);
+		break;
+	case BASE_JD_REQ_SOFT_JIT_ALLOC:
+		kbase_jit_allocate_finish(katom);
+		break;
+	case BASE_JD_REQ_SOFT_EXT_RES_MAP:
+		kbase_ext_res_finish(katom);
+		break;
+	case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
+		kbase_ext_res_finish(katom);
+		break;
+	case BASE_JD_REQ_SOFT_JIT_FREE:
+		kbase_jit_free_finish(katom);
+		break;
+	}
+}
+
+void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev)
+{
+	LIST_HEAD(local_suspended_soft_jobs);
+	struct kbase_jd_atom *tmp_iter;
+	struct kbase_jd_atom *katom_iter;
+	struct kbasep_js_device_data *js_devdata;
+	bool resched = false;
+
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	js_devdata = &kbdev->js_data;
+
+	/* Move out the entire list */
+	mutex_lock(&js_devdata->runpool_mutex);
+	list_splice_init(&js_devdata->suspended_soft_jobs_list,
+			&local_suspended_soft_jobs);
+	mutex_unlock(&js_devdata->runpool_mutex);
+
+	/*
+	 * Each atom must be detached from the list and ran separately -
+	 * it could be re-added to the old list, but this is unlikely
+	 */
+	list_for_each_entry_safe(katom_iter, tmp_iter,
+			&local_suspended_soft_jobs, dep_item[1]) {
+		struct kbase_context *kctx = katom_iter->kctx;
+
+		mutex_lock(&kctx->jctx.lock);
+
+		/* Remove from the global list */
+		list_del(&katom_iter->dep_item[1]);
+		/* Remove from the context's list of waiting soft jobs */
+		kbasep_remove_waiting_soft_job(katom_iter);
+
+		if (kbase_process_soft_job(katom_iter) == 0) {
+			kbase_finish_soft_job(katom_iter);
+			resched |= jd_done_nolock(katom_iter, NULL);
+		}
+		mutex_unlock(&kctx->jctx.lock);
+	}
+
+	if (resched)
+		kbase_js_sched_all(kbdev);
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_strings.c b/drivers/gpu/arm/midgard/mali_kbase_strings.c
new file mode 100644
index 0000000..22caa4a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_strings.c
@@ -0,0 +1,28 @@
+ /*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+#include "mali_kbase_strings.h"
+
+#define KBASE_DRV_NAME "mali"
+#define KBASE_TIMELINE_NAME KBASE_DRV_NAME ".timeline"
+
+const char kbase_drv_name[] = KBASE_DRV_NAME;
+const char kbase_timeline_name[] = KBASE_TIMELINE_NAME;
diff --git a/drivers/gpu/arm/midgard/mali_kbase_strings.h b/drivers/gpu/arm/midgard/mali_kbase_strings.h
new file mode 100644
index 0000000..d2f1825
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_strings.h
@@ -0,0 +1,24 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+extern const char kbase_drv_name[];
+extern const char kbase_timeline_name[];
diff --git a/drivers/gpu/arm/midgard/mali_kbase_sync.h b/drivers/gpu/arm/midgard/mali_kbase_sync.h
new file mode 100644
index 0000000..785b9ff
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_sync.h
@@ -0,0 +1,222 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * @file mali_kbase_sync.h
+ *
+ * This file contains our internal "API" for explicit fences.
+ * It hides the implementation details of the actual explicit fence mechanism
+ * used (Android fences or sync file with DMA fences).
+ */
+
+#ifndef MALI_KBASE_SYNC_H
+#define MALI_KBASE_SYNC_H
+
+#include <linux/syscalls.h>
+#ifdef CONFIG_SYNC
+#include <sync.h>
+#endif
+#ifdef CONFIG_SYNC_FILE
+#include "mali_kbase_fence_defs.h"
+#include <linux/sync_file.h>
+#endif
+
+#include "mali_kbase.h"
+
+/**
+ * struct kbase_sync_fence_info - Information about a fence
+ * @fence: Pointer to fence (type is void*, as underlaying struct can differ)
+ * @name: The name given to this fence when it was created
+ * @status: < 0 means error, 0 means active, 1 means signaled
+ *
+ * Use kbase_sync_fence_in_info_get() or kbase_sync_fence_out_info_get()
+ * to get the information.
+ */
+struct kbase_sync_fence_info {
+	void *fence;
+	char name[32];
+	int status;
+};
+
+/**
+ * kbase_sync_fence_stream_create() - Create a stream object
+ * @name: Name of stream (only used to ease debugging/visualization)
+ * @out_fd: A file descriptor representing the created stream object
+ *
+ * Can map down to a timeline implementation in some implementations.
+ * Exposed as a file descriptor.
+ * Life-time controlled via the file descriptor:
+ * - dup to add a ref
+ * - close to remove a ref
+ *
+ * return: 0 on success, < 0 on error
+ */
+int kbase_sync_fence_stream_create(const char *name, int *const out_fd);
+
+/**
+ * kbase_sync_fence_out_create Create an explicit output fence to specified atom
+ * @katom: Atom to assign the new explicit fence to
+ * @stream_fd: File descriptor for stream object to create fence on
+ *
+ * return: Valid file descriptor to fence or < 0 on error
+ */
+int kbase_sync_fence_out_create(struct kbase_jd_atom *katom, int stream_fd);
+
+/**
+ * kbase_sync_fence_in_from_fd() Assigns an existing fence to specified atom
+ * @katom: Atom to assign the existing explicit fence to
+ * @fd: File descriptor to an existing fence
+ *
+ * Assigns an explicit input fence to atom.
+ * This can later be waited for by calling @kbase_sync_fence_in_wait
+ *
+ * return: 0 on success, < 0 on error
+ */
+int kbase_sync_fence_in_from_fd(struct kbase_jd_atom *katom, int fd);
+
+/**
+ * kbase_sync_fence_validate() - Validate a fd to be a valid fence
+ * @fd: File descriptor to check
+ *
+ * This function is only usable to catch unintentional user errors early,
+ * it does not stop malicious code changing the fd after this function returns.
+ *
+ * return 0: if fd is for a valid fence, < 0 if invalid
+ */
+int kbase_sync_fence_validate(int fd);
+
+/**
+ * kbase_sync_fence_out_trigger - Signal explicit output fence attached on katom
+ * @katom: Atom with an explicit fence to signal
+ * @result: < 0 means signal with error, 0 >= indicates success
+ *
+ * Signal output fence attached on katom and remove the fence from the atom.
+ *
+ * return: The "next" event code for atom, typically JOB_CANCELLED or EVENT_DONE
+ */
+enum base_jd_event_code
+kbase_sync_fence_out_trigger(struct kbase_jd_atom *katom, int result);
+
+/**
+ * kbase_sync_fence_in_wait() - Wait for explicit input fence to be signaled
+ * @katom: Atom with explicit fence to wait for
+ *
+ * If the fence is already signaled, then 0 is returned, and the caller must
+ * continue processing of the katom.
+ *
+ * If the fence isn't already signaled, then this kbase_sync framework will
+ * take responsibility to continue the processing once the fence is signaled.
+ *
+ * return: 0 if already signaled, otherwise 1
+ */
+int kbase_sync_fence_in_wait(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_sync_fence_in_cancel_wait() - Cancel explicit input fence waits
+ * @katom: Atom to cancel wait for
+ *
+ * This function is fully responsible for continuing processing of this atom
+ * (remove_waiting_soft_job + finish_soft_job + jd_done + js_sched_all)
+ */
+void kbase_sync_fence_in_cancel_wait(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_sync_fence_in_remove() - Remove the input fence from the katom
+ * @katom: Atom to remove explicit input fence for
+ *
+ * This will also release the corresponding reference.
+ */
+void kbase_sync_fence_in_remove(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_sync_fence_out_remove() - Remove the output fence from the katom
+ * @katom: Atom to remove explicit output fence for
+ *
+ * This will also release the corresponding reference.
+ */
+void kbase_sync_fence_out_remove(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_sync_fence_close_fd() - Close a file descriptor representing a fence
+ * @fd: File descriptor to close
+ */
+static inline void kbase_sync_fence_close_fd(int fd)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0)
+	ksys_close(fd);
+#else
+	sys_close(fd);
+#endif
+}
+
+/**
+ * kbase_sync_fence_in_info_get() - Retrieves information about input fence
+ * @katom: Atom to get fence information from
+ * @info: Struct to be filled with fence information
+ *
+ * return: 0 on success, < 0 on error
+ */
+int kbase_sync_fence_in_info_get(struct kbase_jd_atom *katom,
+				 struct kbase_sync_fence_info *info);
+
+/**
+ * kbase_sync_fence_out_info_get() - Retrieves information about output fence
+ * @katom: Atom to get fence information from
+ * @info: Struct to be filled with fence information
+ *
+ * return: 0 on success, < 0 on error
+ */
+int kbase_sync_fence_out_info_get(struct kbase_jd_atom *katom,
+				  struct kbase_sync_fence_info *info);
+
+#if defined(CONFIG_SYNC_FILE)
+#if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
+void kbase_sync_fence_info_get(struct fence *fence,
+			       struct kbase_sync_fence_info *info);
+#else
+void kbase_sync_fence_info_get(struct dma_fence *fence,
+			       struct kbase_sync_fence_info *info);
+#endif
+#endif
+
+/**
+ * kbase_sync_status_string() - Get string matching @status
+ * @status: Value of fence status.
+ *
+ * return: Pointer to string describing @status.
+ */
+const char *kbase_sync_status_string(int status);
+
+/*
+ * Internal worker used to continue processing of atom.
+ */
+void kbase_sync_fence_wait_worker(struct work_struct *data);
+
+#ifdef CONFIG_MALI_FENCE_DEBUG
+/**
+ * kbase_sync_fence_in_dump() Trigger a debug dump of atoms input fence state
+ * @katom: Atom to trigger fence debug dump for
+ */
+void kbase_sync_fence_in_dump(struct kbase_jd_atom *katom);
+#endif
+
+#endif /* MALI_KBASE_SYNC_H */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_sync_android.c b/drivers/gpu/arm/midgard/mali_kbase_sync_android.c
new file mode 100644
index 0000000..75940fb
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_sync_android.c
@@ -0,0 +1,542 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Code for supporting explicit Android fences (CONFIG_SYNC)
+ * Known to be good for kernels 4.5 and earlier.
+ * Replaced with CONFIG_SYNC_FILE for 4.9 and later kernels
+ * (see mali_kbase_sync_file.c)
+ */
+
+#include <linux/sched.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/anon_inodes.h>
+#include <linux/version.h>
+#include "sync.h"
+#include <mali_kbase.h>
+#include <mali_kbase_sync.h>
+
+struct mali_sync_timeline {
+	struct sync_timeline timeline;
+	atomic_t counter;
+	atomic_t signaled;
+};
+
+struct mali_sync_pt {
+	struct sync_pt pt;
+	int order;
+	int result;
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+/* For backwards compatibility with kernels before 3.17. After 3.17
+ * sync_pt_parent is included in the kernel. */
+static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt)
+{
+	return pt->parent;
+}
+#endif
+
+static struct mali_sync_timeline *to_mali_sync_timeline(
+						struct sync_timeline *timeline)
+{
+	return container_of(timeline, struct mali_sync_timeline, timeline);
+}
+
+static struct mali_sync_pt *to_mali_sync_pt(struct sync_pt *pt)
+{
+	return container_of(pt, struct mali_sync_pt, pt);
+}
+
+static struct sync_pt *timeline_dup(struct sync_pt *pt)
+{
+	struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+	struct mali_sync_pt *new_mpt;
+	struct sync_pt *new_pt = sync_pt_create(sync_pt_parent(pt),
+						sizeof(struct mali_sync_pt));
+
+	if (!new_pt)
+		return NULL;
+
+	new_mpt = to_mali_sync_pt(new_pt);
+	new_mpt->order = mpt->order;
+	new_mpt->result = mpt->result;
+
+	return new_pt;
+}
+
+static int timeline_has_signaled(struct sync_pt *pt)
+{
+	struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+	struct mali_sync_timeline *mtl = to_mali_sync_timeline(
+							sync_pt_parent(pt));
+	int result = mpt->result;
+
+	int diff = atomic_read(&mtl->signaled) - mpt->order;
+
+	if (diff >= 0)
+		return (result < 0) ? result : 1;
+
+	return 0;
+}
+
+static int timeline_compare(struct sync_pt *a, struct sync_pt *b)
+{
+	struct mali_sync_pt *ma = container_of(a, struct mali_sync_pt, pt);
+	struct mali_sync_pt *mb = container_of(b, struct mali_sync_pt, pt);
+
+	int diff = ma->order - mb->order;
+
+	if (diff == 0)
+		return 0;
+
+	return (diff < 0) ? -1 : 1;
+}
+
+static void timeline_value_str(struct sync_timeline *timeline, char *str,
+			       int size)
+{
+	struct mali_sync_timeline *mtl = to_mali_sync_timeline(timeline);
+
+	snprintf(str, size, "%d", atomic_read(&mtl->signaled));
+}
+
+static void pt_value_str(struct sync_pt *pt, char *str, int size)
+{
+	struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+
+	snprintf(str, size, "%d(%d)", mpt->order, mpt->result);
+}
+
+static struct sync_timeline_ops mali_timeline_ops = {
+	.driver_name = "Mali",
+	.dup = timeline_dup,
+	.has_signaled = timeline_has_signaled,
+	.compare = timeline_compare,
+	.timeline_value_str = timeline_value_str,
+	.pt_value_str       = pt_value_str,
+};
+
+/* Allocates a timeline for Mali
+ *
+ * One timeline should be allocated per API context.
+ */
+static struct sync_timeline *mali_sync_timeline_alloc(const char *name)
+{
+	struct sync_timeline *tl;
+	struct mali_sync_timeline *mtl;
+
+	tl = sync_timeline_create(&mali_timeline_ops,
+				  sizeof(struct mali_sync_timeline), name);
+	if (!tl)
+		return NULL;
+
+	/* Set the counter in our private struct */
+	mtl = to_mali_sync_timeline(tl);
+	atomic_set(&mtl->counter, 0);
+	atomic_set(&mtl->signaled, 0);
+
+	return tl;
+}
+
+static int kbase_stream_close(struct inode *inode, struct file *file)
+{
+	struct sync_timeline *tl;
+
+	tl = (struct sync_timeline *)file->private_data;
+	sync_timeline_destroy(tl);
+	return 0;
+}
+
+static const struct file_operations stream_fops = {
+	.owner = THIS_MODULE,
+	.release = kbase_stream_close,
+};
+
+int kbase_sync_fence_stream_create(const char *name, int *const out_fd)
+{
+	struct sync_timeline *tl;
+
+	if (!out_fd)
+		return -EINVAL;
+
+	tl = mali_sync_timeline_alloc(name);
+	if (!tl)
+		return -EINVAL;
+
+	*out_fd = anon_inode_getfd(name, &stream_fops, tl, O_RDONLY|O_CLOEXEC);
+
+	if (*out_fd < 0) {
+		sync_timeline_destroy(tl);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Allocates a sync point within the timeline.
+ *
+ * The timeline must be the one allocated by kbase_sync_timeline_alloc
+ *
+ * Sync points must be triggered in *exactly* the same order as they are
+ * allocated.
+ */
+static struct sync_pt *kbase_sync_pt_alloc(struct sync_timeline *parent)
+{
+	struct sync_pt *pt = sync_pt_create(parent,
+					    sizeof(struct mali_sync_pt));
+	struct mali_sync_timeline *mtl = to_mali_sync_timeline(parent);
+	struct mali_sync_pt *mpt;
+
+	if (!pt)
+		return NULL;
+
+	mpt = to_mali_sync_pt(pt);
+	mpt->order = atomic_inc_return(&mtl->counter);
+	mpt->result = 0;
+
+	return pt;
+}
+
+int kbase_sync_fence_out_create(struct kbase_jd_atom *katom, int tl_fd)
+{
+	struct sync_timeline *tl;
+	struct sync_pt *pt;
+	struct sync_fence *fence;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
+	struct files_struct *files;
+	struct fdtable *fdt;
+#endif
+	int fd;
+	struct file *tl_file;
+
+	tl_file = fget(tl_fd);
+	if (tl_file == NULL)
+		return -EBADF;
+
+	if (tl_file->f_op != &stream_fops) {
+		fd = -EBADF;
+		goto out;
+	}
+
+	tl = tl_file->private_data;
+
+	pt = kbase_sync_pt_alloc(tl);
+	if (!pt) {
+		fd = -EFAULT;
+		goto out;
+	}
+
+	fence = sync_fence_create("mali_fence", pt);
+	if (!fence) {
+		sync_pt_free(pt);
+		fd = -EFAULT;
+		goto out;
+	}
+
+	/* from here the fence owns the sync_pt */
+
+	/* create a fd representing the fence */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+	fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
+	if (fd < 0) {
+		sync_fence_put(fence);
+		goto out;
+	}
+#else
+	fd = get_unused_fd();
+	if (fd < 0) {
+		sync_fence_put(fence);
+		goto out;
+	}
+
+	files = current->files;
+	spin_lock(&files->file_lock);
+	fdt = files_fdtable(files);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+	__set_close_on_exec(fd, fdt);
+#else
+	FD_SET(fd, fdt->close_on_exec);
+#endif
+	spin_unlock(&files->file_lock);
+#endif  /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) */
+
+	/* bind fence to the new fd */
+	sync_fence_install(fence, fd);
+
+	katom->fence = sync_fence_fdget(fd);
+	if (katom->fence == NULL) {
+		/* The only way the fence can be NULL is if userspace closed it
+		 * for us, so we don't need to clear it up */
+		fd = -EINVAL;
+		goto out;
+	}
+
+out:
+	fput(tl_file);
+
+	return fd;
+}
+
+int kbase_sync_fence_in_from_fd(struct kbase_jd_atom *katom, int fd)
+{
+	katom->fence = sync_fence_fdget(fd);
+	return katom->fence ? 0 : -ENOENT;
+}
+
+int kbase_sync_fence_validate(int fd)
+{
+	struct sync_fence *fence;
+
+	fence = sync_fence_fdget(fd);
+	if (!fence)
+		return -EINVAL;
+
+	sync_fence_put(fence);
+	return 0;
+}
+
+/* Returns true if the specified timeline is allocated by Mali */
+static int kbase_sync_timeline_is_ours(struct sync_timeline *timeline)
+{
+	return timeline->ops == &mali_timeline_ops;
+}
+
+/* Signals a particular sync point
+ *
+ * Sync points must be triggered in *exactly* the same order as they are
+ * allocated.
+ *
+ * If they are signaled in the wrong order then a message will be printed in
+ * debug builds and otherwise attempts to signal order sync_pts will be ignored.
+ *
+ * result can be negative to indicate error, any other value is interpreted as
+ * success.
+ */
+static void kbase_sync_signal_pt(struct sync_pt *pt, int result)
+{
+	struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+	struct mali_sync_timeline *mtl = to_mali_sync_timeline(
+							sync_pt_parent(pt));
+	int signaled;
+	int diff;
+
+	mpt->result = result;
+
+	do {
+		signaled = atomic_read(&mtl->signaled);
+
+		diff = signaled - mpt->order;
+
+		if (diff > 0) {
+			/* The timeline is already at or ahead of this point.
+			 * This should not happen unless userspace has been
+			 * signaling fences out of order, so warn but don't
+			 * violate the sync_pt API.
+			 * The warning is only in debug builds to prevent
+			 * a malicious user being able to spam dmesg.
+			 */
+#ifdef CONFIG_MALI_DEBUG
+			pr_err("Fences were triggered in a different order to allocation!");
+#endif				/* CONFIG_MALI_DEBUG */
+			return;
+		}
+	} while (atomic_cmpxchg(&mtl->signaled,
+				signaled, mpt->order) != signaled);
+}
+
+enum base_jd_event_code
+kbase_sync_fence_out_trigger(struct kbase_jd_atom *katom, int result)
+{
+	struct sync_pt *pt;
+	struct sync_timeline *timeline;
+
+	if (!katom->fence)
+		return BASE_JD_EVENT_JOB_CANCELLED;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+	if (!list_is_singular(&katom->fence->pt_list_head)) {
+#else
+	if (katom->fence->num_fences != 1) {
+#endif
+		/* Not exactly one item in the list - so it didn't (directly)
+		 * come from us */
+		return BASE_JD_EVENT_JOB_CANCELLED;
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+	pt = list_first_entry(&katom->fence->pt_list_head,
+			      struct sync_pt, pt_list);
+#else
+	pt = container_of(katom->fence->cbs[0].sync_pt, struct sync_pt, base);
+#endif
+	timeline = sync_pt_parent(pt);
+
+	if (!kbase_sync_timeline_is_ours(timeline)) {
+		/* Fence has a sync_pt which isn't ours! */
+		return BASE_JD_EVENT_JOB_CANCELLED;
+	}
+
+	kbase_sync_signal_pt(pt, result);
+
+	sync_timeline_signal(timeline);
+
+	kbase_sync_fence_out_remove(katom);
+
+	return (result < 0) ? BASE_JD_EVENT_JOB_CANCELLED : BASE_JD_EVENT_DONE;
+}
+
+static inline int kbase_fence_get_status(struct sync_fence *fence)
+{
+	if (!fence)
+		return -ENOENT;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+	return fence->status;
+#else
+	return atomic_read(&fence->status);
+#endif
+}
+
+static void kbase_fence_wait_callback(struct sync_fence *fence,
+				      struct sync_fence_waiter *waiter)
+{
+	struct kbase_jd_atom *katom = container_of(waiter,
+					struct kbase_jd_atom, sync_waiter);
+	struct kbase_context *kctx = katom->kctx;
+
+	/* Propagate the fence status to the atom.
+	 * If negative then cancel this atom and its dependencies.
+	 */
+	if (kbase_fence_get_status(fence) < 0)
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+	/* To prevent a potential deadlock we schedule the work onto the
+	 * job_done_wq workqueue
+	 *
+	 * The issue is that we may signal the timeline while holding
+	 * kctx->jctx.lock and the callbacks are run synchronously from
+	 * sync_timeline_signal. So we simply defer the work.
+	 */
+
+	INIT_WORK(&katom->work, kbase_sync_fence_wait_worker);
+	queue_work(kctx->jctx.job_done_wq, &katom->work);
+}
+
+int kbase_sync_fence_in_wait(struct kbase_jd_atom *katom)
+{
+	int ret;
+
+	sync_fence_waiter_init(&katom->sync_waiter, kbase_fence_wait_callback);
+
+	ret = sync_fence_wait_async(katom->fence, &katom->sync_waiter);
+
+	if (ret == 1) {
+		/* Already signaled */
+		return 0;
+	}
+
+	if (ret < 0) {
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+		/* We should cause the dependent jobs in the bag to be failed,
+		 * to do this we schedule the work queue to complete this job */
+		INIT_WORK(&katom->work, kbase_sync_fence_wait_worker);
+		queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
+	}
+
+	return 1;
+}
+
+void kbase_sync_fence_in_cancel_wait(struct kbase_jd_atom *katom)
+{
+	if (sync_fence_cancel_async(katom->fence, &katom->sync_waiter) != 0) {
+		/* The wait wasn't cancelled - leave the cleanup for
+		 * kbase_fence_wait_callback */
+		return;
+	}
+
+	/* Wait was cancelled - zap the atoms */
+	katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+	kbasep_remove_waiting_soft_job(katom);
+	kbase_finish_soft_job(katom);
+
+	if (jd_done_nolock(katom, NULL))
+		kbase_js_sched_all(katom->kctx->kbdev);
+}
+
+void kbase_sync_fence_out_remove(struct kbase_jd_atom *katom)
+{
+	if (katom->fence) {
+		sync_fence_put(katom->fence);
+		katom->fence = NULL;
+	}
+}
+
+void kbase_sync_fence_in_remove(struct kbase_jd_atom *katom)
+{
+	if (katom->fence) {
+		sync_fence_put(katom->fence);
+		katom->fence = NULL;
+	}
+}
+
+int kbase_sync_fence_in_info_get(struct kbase_jd_atom *katom,
+				 struct kbase_sync_fence_info *info)
+{
+	if (!katom->fence)
+		return -ENOENT;
+
+	info->fence = katom->fence;
+	info->status = kbase_fence_get_status(katom->fence);
+	strlcpy(info->name, katom->fence->name, sizeof(info->name));
+
+	return 0;
+}
+
+int kbase_sync_fence_out_info_get(struct kbase_jd_atom *katom,
+				 struct kbase_sync_fence_info *info)
+{
+	if (!katom->fence)
+		return -ENOENT;
+
+	info->fence = katom->fence;
+	info->status = kbase_fence_get_status(katom->fence);
+	strlcpy(info->name, katom->fence->name, sizeof(info->name));
+
+	return 0;
+}
+
+#ifdef CONFIG_MALI_FENCE_DEBUG
+void kbase_sync_fence_in_dump(struct kbase_jd_atom *katom)
+{
+	/* Dump out the full state of all the Android sync fences.
+	 * The function sync_dump() isn't exported to modules, so force
+	 * sync_fence_wait() to time out to trigger sync_dump().
+	 */
+	if (katom->fence)
+		sync_fence_wait(katom->fence, 1);
+}
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_sync_common.c b/drivers/gpu/arm/midgard/mali_kbase_sync_common.c
new file mode 100644
index 0000000..03c0df5
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_sync_common.c
@@ -0,0 +1,49 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * @file mali_kbase_sync_common.c
+ *
+ * Common code for our explicit fence functionality
+ */
+
+#include <linux/workqueue.h>
+#include "mali_kbase.h"
+#include "mali_kbase_sync.h"
+
+void kbase_sync_fence_wait_worker(struct work_struct *data)
+{
+	struct kbase_jd_atom *katom;
+
+	katom = container_of(data, struct kbase_jd_atom, work);
+	kbase_soft_event_wait_callback(katom);
+}
+
+const char *kbase_sync_status_string(int status)
+{
+	if (status == 0)
+		return "active";
+	else if (status > 0)
+		return "signaled";
+	else
+		return "error";
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_sync_file.c b/drivers/gpu/arm/midgard/mali_kbase_sync_file.c
new file mode 100644
index 0000000..0679c48
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_sync_file.c
@@ -0,0 +1,366 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Code for supporting explicit Linux fences (CONFIG_SYNC_FILE)
+ * Introduced in kernel 4.9.
+ * Android explicit fences (CONFIG_SYNC) can be used for older kernels
+ * (see mali_kbase_sync_android.c)
+ */
+
+#include <linux/sched.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/anon_inodes.h>
+#include <linux/version.h>
+#include <linux/uaccess.h>
+#include <linux/sync_file.h>
+#include <linux/slab.h>
+#include "mali_kbase_fence_defs.h"
+#include "mali_kbase_sync.h"
+#include "mali_kbase_fence.h"
+#include "mali_kbase.h"
+
+static const struct file_operations stream_fops = {
+	.owner = THIS_MODULE
+};
+
+int kbase_sync_fence_stream_create(const char *name, int *const out_fd)
+{
+	if (!out_fd)
+		return -EINVAL;
+
+	*out_fd = anon_inode_getfd(name, &stream_fops, NULL,
+				   O_RDONLY | O_CLOEXEC);
+	if (*out_fd < 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+int kbase_sync_fence_out_create(struct kbase_jd_atom *katom, int stream_fd)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence *fence;
+#else
+	struct dma_fence *fence;
+#endif
+	struct sync_file *sync_file;
+	int fd;
+
+	fence = kbase_fence_out_new(katom);
+	if (!fence)
+		return -ENOMEM;
+
+#if (KERNEL_VERSION(4, 9, 67) >= LINUX_VERSION_CODE)
+	/* Take an extra reference to the fence on behalf of the sync_file.
+	 * This is only needed on older kernels where sync_file_create()
+	 * does not take its own reference. This was changed in v4.9.68,
+	 * where sync_file_create() now takes its own reference.
+	 */
+	dma_fence_get(fence);
+#endif
+
+	/* create a sync_file fd representing the fence */
+	sync_file = sync_file_create(fence);
+	if (!sync_file) {
+#if (KERNEL_VERSION(4, 9, 67) >= LINUX_VERSION_CODE)
+		dma_fence_put(fence);
+#endif
+		kbase_fence_out_remove(katom);
+		return -ENOMEM;
+	}
+
+	fd = get_unused_fd_flags(O_CLOEXEC);
+	if (fd < 0) {
+		fput(sync_file->file);
+		kbase_fence_out_remove(katom);
+		return fd;
+	}
+
+	fd_install(fd, sync_file->file);
+
+	return fd;
+}
+
+int kbase_sync_fence_in_from_fd(struct kbase_jd_atom *katom, int fd)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence *fence = sync_file_get_fence(fd);
+#else
+	struct dma_fence *fence = sync_file_get_fence(fd);
+#endif
+
+	if (!fence)
+		return -ENOENT;
+
+	kbase_fence_fence_in_set(katom, fence);
+
+	return 0;
+}
+
+int kbase_sync_fence_validate(int fd)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence *fence = sync_file_get_fence(fd);
+#else
+	struct dma_fence *fence = sync_file_get_fence(fd);
+#endif
+
+	if (!fence)
+		return -EINVAL;
+
+	dma_fence_put(fence);
+
+	return 0; /* valid */
+}
+
+enum base_jd_event_code
+kbase_sync_fence_out_trigger(struct kbase_jd_atom *katom, int result)
+{
+	int res;
+
+	if (!kbase_fence_out_is_ours(katom)) {
+		/* Not our fence */
+		return BASE_JD_EVENT_JOB_CANCELLED;
+	}
+
+	res = kbase_fence_out_signal(katom, result);
+	if (unlikely(res < 0)) {
+		dev_warn(katom->kctx->kbdev->dev,
+				"fence_signal() failed with %d\n", res);
+	}
+
+	kbase_sync_fence_out_remove(katom);
+
+	return (result != 0) ? BASE_JD_EVENT_JOB_CANCELLED : BASE_JD_EVENT_DONE;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+static void kbase_fence_wait_callback(struct fence *fence,
+				      struct fence_cb *cb)
+#else
+static void kbase_fence_wait_callback(struct dma_fence *fence,
+				      struct dma_fence_cb *cb)
+#endif
+{
+	struct kbase_fence_cb *kcb = container_of(cb,
+				struct kbase_fence_cb,
+				fence_cb);
+	struct kbase_jd_atom *katom = kcb->katom;
+	struct kbase_context *kctx = katom->kctx;
+
+	/* Cancel atom if fence is erroneous */
+#if (KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE || \
+	 (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE && \
+	  KERNEL_VERSION(4, 9, 68) <= LINUX_VERSION_CODE))
+	if (dma_fence_is_signaled(kcb->fence) && kcb->fence->error)
+#else
+	if (dma_fence_is_signaled(kcb->fence) && kcb->fence->status < 0)
+#endif
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+	if (kbase_fence_dep_count_dec_and_test(katom)) {
+		/* We take responsibility of handling this */
+		kbase_fence_dep_count_set(katom, -1);
+
+		/* To prevent a potential deadlock we schedule the work onto the
+		 * job_done_wq workqueue
+		 *
+		 * The issue is that we may signal the timeline while holding
+		 * kctx->jctx.lock and the callbacks are run synchronously from
+		 * sync_timeline_signal. So we simply defer the work.
+		 */
+		INIT_WORK(&katom->work, kbase_sync_fence_wait_worker);
+		queue_work(kctx->jctx.job_done_wq, &katom->work);
+	}
+}
+
+int kbase_sync_fence_in_wait(struct kbase_jd_atom *katom)
+{
+	int err;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence *fence;
+#else
+	struct dma_fence *fence;
+#endif
+
+	fence = kbase_fence_in_get(katom);
+	if (!fence)
+		return 0; /* no input fence to wait for, good to go! */
+
+	kbase_fence_dep_count_set(katom, 1);
+
+	err = kbase_fence_add_callback(katom, fence, kbase_fence_wait_callback);
+
+	kbase_fence_put(fence);
+
+	if (likely(!err)) {
+		/* Test if the callbacks are already triggered */
+		if (kbase_fence_dep_count_dec_and_test(katom)) {
+			kbase_fence_free_callbacks(katom);
+			kbase_fence_dep_count_set(katom, -1);
+			return 0; /* Already signaled, good to go right now */
+		}
+
+		/* Callback installed, so we just need to wait for it... */
+	} else {
+		/* Failure */
+		kbase_fence_free_callbacks(katom);
+		kbase_fence_dep_count_set(katom, -1);
+
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+		/* We should cause the dependent jobs in the bag to be failed,
+		 * to do this we schedule the work queue to complete this job */
+
+		INIT_WORK(&katom->work, kbase_sync_fence_wait_worker);
+		queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
+	}
+
+	return 1; /* completion to be done later by callback/worker */
+}
+
+void kbase_sync_fence_in_cancel_wait(struct kbase_jd_atom *katom)
+{
+	if (!kbase_fence_free_callbacks(katom)) {
+		/* The wait wasn't cancelled -
+		 * leave the cleanup for kbase_fence_wait_callback */
+		return;
+	}
+
+	/* Take responsibility of completion */
+	kbase_fence_dep_count_set(katom, -1);
+
+	/* Wait was cancelled - zap the atoms */
+	katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+	kbasep_remove_waiting_soft_job(katom);
+	kbase_finish_soft_job(katom);
+
+	if (jd_done_nolock(katom, NULL))
+		kbase_js_sched_all(katom->kctx->kbdev);
+}
+
+void kbase_sync_fence_out_remove(struct kbase_jd_atom *katom)
+{
+	kbase_fence_out_remove(katom);
+}
+
+void kbase_sync_fence_in_remove(struct kbase_jd_atom *katom)
+{
+	kbase_fence_free_callbacks(katom);
+	kbase_fence_in_remove(katom);
+}
+
+#if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
+void kbase_sync_fence_info_get(struct fence *fence,
+			       struct kbase_sync_fence_info *info)
+#else
+void kbase_sync_fence_info_get(struct dma_fence *fence,
+			       struct kbase_sync_fence_info *info)
+#endif
+{
+	info->fence = fence;
+
+	/* translate into CONFIG_SYNC status:
+	 * < 0 : error
+	 * 0 : active
+	 * 1 : signaled
+	 */
+	if (dma_fence_is_signaled(fence)) {
+#if (KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE || \
+	 (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE && \
+	  KERNEL_VERSION(4, 9, 68) <= LINUX_VERSION_CODE))
+		int status = fence->error;
+#else
+		int status = fence->status;
+#endif
+		if (status < 0)
+			info->status = status; /* signaled with error */
+		else
+			info->status = 1; /* signaled with success */
+	} else  {
+		info->status = 0; /* still active (unsignaled) */
+	}
+
+#if (KERNEL_VERSION(4, 8, 0) > LINUX_VERSION_CODE)
+	scnprintf(info->name, sizeof(info->name), "%u#%u",
+		  fence->context, fence->seqno);
+#elif (KERNEL_VERSION(5, 1, 0) > LINUX_VERSION_CODE)
+	scnprintf(info->name, sizeof(info->name), "%llu#%u",
+		  fence->context, fence->seqno);
+#else
+	scnprintf(info->name, sizeof(info->name), "%llu#%llu",
+		  fence->context, fence->seqno);
+#endif
+}
+
+int kbase_sync_fence_in_info_get(struct kbase_jd_atom *katom,
+				 struct kbase_sync_fence_info *info)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence *fence;
+#else
+	struct dma_fence *fence;
+#endif
+
+	fence = kbase_fence_in_get(katom);
+	if (!fence)
+		return -ENOENT;
+
+	kbase_sync_fence_info_get(fence, info);
+
+	kbase_fence_put(fence);
+
+	return 0;
+}
+
+int kbase_sync_fence_out_info_get(struct kbase_jd_atom *katom,
+				  struct kbase_sync_fence_info *info)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence *fence;
+#else
+	struct dma_fence *fence;
+#endif
+
+	fence = kbase_fence_out_get(katom);
+	if (!fence)
+		return -ENOENT;
+
+	kbase_sync_fence_info_get(fence, info);
+
+	kbase_fence_put(fence);
+
+	return 0;
+}
+
+
+#ifdef CONFIG_MALI_FENCE_DEBUG
+void kbase_sync_fence_in_dump(struct kbase_jd_atom *katom)
+{
+	/* Not implemented */
+}
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_timeline.c b/drivers/gpu/arm/midgard/mali_kbase_timeline.c
new file mode 100644
index 0000000..17470fc
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_timeline.c
@@ -0,0 +1,342 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_timeline.h"
+#include "mali_kbase_timeline_priv.h"
+#include "mali_kbase_tracepoints.h"
+
+#include <mali_kbase.h>
+#include <mali_kbase_jm.h>
+
+#include <linux/anon_inodes.h>
+#include <linux/atomic.h>
+#include <linux/file.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/stringify.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
+
+
+/* The period of autoflush checker execution in milliseconds. */
+#define AUTOFLUSH_INTERVAL 1000 /* ms */
+
+/*****************************************************************************/
+
+/* These values are used in mali_kbase_tracepoints.h
+ * to retrieve the streams from a kbase_timeline instance.
+ */
+const size_t __obj_stream_offset =
+	offsetof(struct kbase_timeline, streams)
+	+ sizeof(struct kbase_tlstream) * TL_STREAM_TYPE_OBJ;
+
+const size_t __aux_stream_offset =
+	offsetof(struct kbase_timeline, streams)
+	+ sizeof(struct kbase_tlstream) * TL_STREAM_TYPE_AUX;
+
+/**
+ * kbasep_timeline_autoflush_timer_callback - autoflush timer callback
+ * @timer:  Timer list
+ *
+ * Timer is executed periodically to check if any of the stream contains
+ * buffer ready to be submitted to user space.
+ */
+static void kbasep_timeline_autoflush_timer_callback(struct timer_list *timer)
+{
+	enum tl_stream_type stype;
+	int                 rcode;
+	struct kbase_timeline *timeline =
+		container_of(timer, struct kbase_timeline, autoflush_timer);
+
+	CSTD_UNUSED(timer);
+
+	for (stype = (enum tl_stream_type)0; stype < TL_STREAM_TYPE_COUNT;
+			stype++) {
+		struct kbase_tlstream *stream = &timeline->streams[stype];
+
+		int af_cnt = atomic_read(&stream->autoflush_counter);
+
+		/* Check if stream contain unflushed data. */
+		if (af_cnt < 0)
+			continue;
+
+		/* Check if stream should be flushed now. */
+		if (af_cnt != atomic_cmpxchg(
+					&stream->autoflush_counter,
+					af_cnt,
+					af_cnt + 1))
+			continue;
+		if (!af_cnt)
+			continue;
+
+		/* Autoflush this stream. */
+		kbase_tlstream_flush_stream(stream);
+	}
+
+	if (atomic_read(&timeline->autoflush_timer_active))
+		rcode = mod_timer(
+				&timeline->autoflush_timer,
+				jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL));
+	CSTD_UNUSED(rcode);
+}
+
+
+
+/*****************************************************************************/
+
+int kbase_timeline_init(struct kbase_timeline **timeline,
+		atomic_t *timeline_is_enabled)
+{
+	enum tl_stream_type i;
+	struct kbase_timeline *result;
+
+	if (!timeline || !timeline_is_enabled)
+		return -EINVAL;
+
+	result = kzalloc(sizeof(*result), GFP_KERNEL);
+	if (!result)
+		return -ENOMEM;
+
+	mutex_init(&result->reader_lock);
+	init_waitqueue_head(&result->event_queue);
+
+	/* Prepare stream structures. */
+	for (i = 0; i < TL_STREAM_TYPE_COUNT; i++)
+		kbase_tlstream_init(&result->streams[i], i,
+			&result->event_queue);
+
+	/* Initialize autoflush timer. */
+	atomic_set(&result->autoflush_timer_active, 0);
+	kbase_timer_setup(&result->autoflush_timer,
+			  kbasep_timeline_autoflush_timer_callback);
+	result->is_enabled = timeline_is_enabled;
+
+	*timeline = result;
+	return 0;
+}
+
+void kbase_timeline_term(struct kbase_timeline *timeline)
+{
+	enum tl_stream_type i;
+
+	if (!timeline)
+		return;
+
+	for (i = (enum tl_stream_type)0; i < TL_STREAM_TYPE_COUNT; i++)
+		kbase_tlstream_term(&timeline->streams[i]);
+
+	kfree(timeline);
+}
+
+static void kbase_create_timeline_objects(struct kbase_device *kbdev)
+{
+	unsigned int lpu_id;
+	unsigned int as_nr;
+	struct kbase_context *kctx;
+	struct kbase_timeline *timeline = kbdev->timeline;
+	struct kbase_tlstream *summary =
+		&timeline->streams[TL_STREAM_TYPE_OBJ_SUMMARY];
+
+	/* Summarize the LPU objects. */
+	for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
+		u32 *lpu =
+			&kbdev->gpu_props.props.raw_props.js_features[lpu_id];
+		__kbase_tlstream_tl_new_lpu(summary, lpu, lpu_id, *lpu);
+	}
+
+	/* Summarize the Address Space objects. */
+	for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
+		__kbase_tlstream_tl_new_as(summary, &kbdev->as[as_nr], as_nr);
+
+	/* Create GPU object and make it retain all LPUs and address spaces. */
+	__kbase_tlstream_tl_new_gpu(summary,
+			kbdev,
+			kbdev->gpu_props.props.raw_props.gpu_id,
+			kbdev->gpu_props.num_cores);
+
+	for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
+		void *lpu =
+			&kbdev->gpu_props.props.raw_props.js_features[lpu_id];
+		__kbase_tlstream_tl_lifelink_lpu_gpu(summary, lpu, kbdev);
+	}
+
+	for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
+		__kbase_tlstream_tl_lifelink_as_gpu(summary,
+				&kbdev->as[as_nr],
+				kbdev);
+
+	/* Lock the context list, to ensure no changes to the list are made
+	 * while we're summarizing the contexts and their contents.
+	 */
+	mutex_lock(&kbdev->kctx_list_lock);
+
+	/* For each context in the device... */
+	list_for_each_entry(kctx, &kbdev->kctx_list, kctx_list_link) {
+		/* Summarize the context itself */
+		__kbase_tlstream_tl_new_ctx(summary,
+				kctx,
+				kctx->id,
+				(u32)(kctx->tgid));
+	};
+
+	/* Reset body stream buffers while holding the kctx lock.
+	 * This ensures we can't fire both summary and normal tracepoints for
+	 * the same objects.
+	 * If we weren't holding the lock, it's possible that the summarized
+	 * objects could have been created, destroyed, or used after we
+	 * constructed the summary stream tracepoints, but before we reset
+	 * the body stream, resulting in losing those object event tracepoints.
+	 */
+	kbase_timeline_streams_body_reset(timeline);
+
+	mutex_unlock(&kbdev->kctx_list_lock);
+
+	/* Static object are placed into summary packet that needs to be
+	 * transmitted first. Flush all streams to make it available to
+	 * user space.
+	 */
+	kbase_timeline_streams_flush(timeline);
+}
+
+#ifdef CONFIG_MALI_DEVFREQ
+static void kbase_tlstream_current_devfreq_target(struct kbase_device *kbdev)
+{
+	struct devfreq *devfreq = kbdev->devfreq;
+
+	/* Devfreq initialization failure isn't a fatal error, so devfreq might
+	 * be null.
+	 */
+	if (devfreq) {
+		unsigned long cur_freq = 0;
+
+		mutex_lock(&devfreq->lock);
+#if KERNEL_VERSION(4, 3, 0) > LINUX_VERSION_CODE
+		cur_freq = kbdev->current_nominal_freq;
+#else
+		cur_freq = devfreq->last_status.current_frequency;
+#endif
+		KBASE_TLSTREAM_AUX_DEVFREQ_TARGET(kbdev, (u64)cur_freq);
+		mutex_unlock(&devfreq->lock);
+	}
+}
+#endif /* CONFIG_MALI_DEVFREQ */
+
+int kbase_timeline_io_acquire(struct kbase_device *kbdev, u32 flags)
+{
+	int ret;
+	u32 tlstream_enabled = TLSTREAM_ENABLED | flags;
+	struct kbase_timeline *timeline = kbdev->timeline;
+
+	if (!atomic_cmpxchg(timeline->is_enabled, 0, tlstream_enabled)) {
+		int rcode;
+
+		ret = anon_inode_getfd(
+				"[mali_tlstream]",
+				&kbasep_tlstream_fops,
+				timeline,
+				O_RDONLY | O_CLOEXEC);
+		if (ret < 0) {
+			atomic_set(timeline->is_enabled, 0);
+			return ret;
+		}
+
+		/* Reset and initialize header streams. */
+		kbase_tlstream_reset(
+			&timeline->streams[TL_STREAM_TYPE_OBJ_SUMMARY]);
+
+		timeline->obj_header_btc = obj_desc_header_size;
+		timeline->aux_header_btc = aux_desc_header_size;
+
+		/* Start autoflush timer. */
+		atomic_set(&timeline->autoflush_timer_active, 1);
+		rcode = mod_timer(
+				&timeline->autoflush_timer,
+				jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL));
+		CSTD_UNUSED(rcode);
+
+		/* If job dumping is enabled, readjust the software event's
+		 * timeout as the default value of 3 seconds is often
+		 * insufficient.
+		 */
+		if (flags & BASE_TLSTREAM_JOB_DUMPING_ENABLED) {
+			dev_info(kbdev->dev,
+					"Job dumping is enabled, readjusting the software event's timeout\n");
+			atomic_set(&kbdev->js_data.soft_job_timeout_ms,
+					1800000);
+		}
+
+		/* Summary stream was cleared during acquire.
+		 * Create static timeline objects that will be
+		 * read by client.
+		 */
+		kbase_create_timeline_objects(kbdev);
+
+#ifdef CONFIG_MALI_DEVFREQ
+		/* Devfreq target tracepoints are only fired when the target
+		 * changes, so we won't know the current target unless we
+		 * send it now.
+		 */
+		kbase_tlstream_current_devfreq_target(kbdev);
+#endif /* CONFIG_MALI_DEVFREQ */
+
+	} else {
+		ret = -EBUSY;
+	}
+
+	return ret;
+}
+
+void kbase_timeline_streams_flush(struct kbase_timeline *timeline)
+{
+	enum tl_stream_type stype;
+
+	for (stype = 0; stype < TL_STREAM_TYPE_COUNT; stype++)
+		kbase_tlstream_flush_stream(&timeline->streams[stype]);
+}
+
+void kbase_timeline_streams_body_reset(struct kbase_timeline *timeline)
+{
+	kbase_tlstream_reset(
+			&timeline->streams[TL_STREAM_TYPE_OBJ]);
+	kbase_tlstream_reset(
+			&timeline->streams[TL_STREAM_TYPE_AUX]);
+}
+
+#if MALI_UNIT_TEST
+void kbase_timeline_stats(struct kbase_timeline *timeline,
+		u32 *bytes_collected, u32 *bytes_generated)
+{
+	enum tl_stream_type stype;
+
+	KBASE_DEBUG_ASSERT(bytes_collected);
+
+	/* Accumulate bytes generated per stream  */
+	*bytes_generated = 0;
+	for (stype = (enum tl_stream_type)0; stype < TL_STREAM_TYPE_COUNT;
+			stype++)
+		*bytes_generated += atomic_read(
+			&timeline->streams[stype].bytes_generated);
+
+	*bytes_collected = atomic_read(&timeline->bytes_collected);
+}
+#endif /* MALI_UNIT_TEST */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_timeline.h b/drivers/gpu/arm/midgard/mali_kbase_timeline.h
new file mode 100644
index 0000000..d800288
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_timeline.h
@@ -0,0 +1,121 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#if !defined(_KBASE_TIMELINE_H)
+#define _KBASE_TIMELINE_H
+
+#include <mali_kbase.h>
+
+/*****************************************************************************/
+
+struct kbase_timeline;
+
+/**
+ * kbase_timeline_init - initialize timeline infrastructure in kernel
+ * @timeline:            Newly created instance of kbase_timeline will
+ *                       be stored in this pointer.
+ * @timeline_is_enabled: Timeline status will be written to this variable
+ *                       when a client is attached/detached. The variable
+ *                       must be valid while timeline instance is valid.
+ * Return: zero on success, negative number on error
+ */
+int kbase_timeline_init(struct kbase_timeline **timeline,
+	atomic_t *timeline_is_enabled);
+
+/**
+ * kbase_timeline_term - terminate timeline infrastructure in kernel
+ *
+ * @timeline:     Timeline instance to be terminated. It must be previously created
+ *                with kbase_timeline_init().
+ */
+void kbase_timeline_term(struct kbase_timeline *timeline);
+
+/**
+ * kbase_timeline_io_acquire - acquire timeline stream file descriptor
+ * @kbdev:     Kbase device
+ * @flags:     Timeline stream flags
+ *
+ * This descriptor is meant to be used by userspace timeline to gain access to
+ * kernel timeline stream. This stream is later broadcasted by user space to the
+ * timeline client.
+ * Only one entity can own the descriptor at any given time. Descriptor shall be
+ * closed if unused. If descriptor cannot be obtained (i.e. when it is already
+ * being used) return will be a negative value.
+ *
+ * Return: file descriptor on success, negative number on error
+ */
+int kbase_timeline_io_acquire(struct kbase_device *kbdev, u32 flags);
+
+/**
+ * kbase_timeline_streams_flush - flush timeline streams.
+ * @timeline:     Timeline instance
+ *
+ * Function will flush pending data in all timeline streams.
+ */
+void kbase_timeline_streams_flush(struct kbase_timeline *timeline);
+
+/**
+ * kbase_timeline_streams_body_reset - reset timeline body streams.
+ *
+ * Function will discard pending data in all timeline body streams.
+ * @timeline:     Timeline instance
+ */
+void kbase_timeline_streams_body_reset(struct kbase_timeline *timeline);
+
+#if MALI_UNIT_TEST
+/**
+ * kbase_timeline_test - start timeline stream data generator
+ * @kbdev:     Kernel common context
+ * @tpw_count: Number of trace point writers in each context
+ * @msg_delay: Time delay in milliseconds between trace points written by one
+ *             writer
+ * @msg_count: Number of trace points written by one writer
+ * @aux_msg:   If non-zero aux messages will be included
+ *
+ * This test starts a requested number of asynchronous writers in both IRQ and
+ * thread context. Each writer will generate required number of test
+ * tracepoints (tracepoints with embedded information about writer that
+ * should be verified by user space reader). Tracepoints will be emitted in
+ * all timeline body streams. If aux_msg is non-zero writer will also
+ * generate not testable tracepoints (tracepoints without information about
+ * writer). These tracepoints are used to check correctness of remaining
+ * timeline message generating functions. Writer will wait requested time
+ * between generating another set of messages. This call blocks until all
+ * writers finish.
+ */
+void kbase_timeline_test(
+	struct kbase_device *kbdev,
+	unsigned int tpw_count,
+	unsigned int msg_delay,
+	unsigned int msg_count,
+	int          aux_msg);
+
+/**
+ * kbase_timeline_stats - read timeline stream statistics
+ * @timeline:        Timeline instance
+ * @bytes_collected: Will hold number of bytes read by the user
+ * @bytes_generated: Will hold number of bytes generated by trace points
+ */
+void kbase_timeline_stats(struct kbase_timeline *timeline, u32 *bytes_collected, u32 *bytes_generated);
+#endif /* MALI_UNIT_TEST */
+
+#endif /* _KBASE_TIMELINE_H */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_timeline_io.c b/drivers/gpu/arm/midgard/mali_kbase_timeline_io.c
new file mode 100644
index 0000000..ffcf84a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_timeline_io.c
@@ -0,0 +1,314 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase_timeline_priv.h>
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_tracepoints.h>
+
+#include <linux/poll.h>
+
+/* The timeline stream file operations functions. */
+static ssize_t kbasep_timeline_io_read(
+		struct file *filp,
+		char __user *buffer,
+		size_t      size,
+		loff_t      *f_pos);
+static unsigned int kbasep_timeline_io_poll(struct file *filp, poll_table *wait);
+static int kbasep_timeline_io_release(struct inode *inode, struct file *filp);
+
+/* The timeline stream file operations structure. */
+const struct file_operations kbasep_tlstream_fops = {
+	.owner = THIS_MODULE,
+	.release = kbasep_timeline_io_release,
+	.read    = kbasep_timeline_io_read,
+	.poll    = kbasep_timeline_io_poll,
+};
+
+/**
+ * kbasep_timeline_io_packet_pending - check timeline streams for pending packets
+ * @timeline:      Timeline instance
+ * @ready_stream:  Pointer to variable where stream will be placed
+ * @rb_idx_raw:    Pointer to variable where read buffer index will be placed
+ *
+ * Function checks all streams for pending packets. It will stop as soon as
+ * packet ready to be submitted to user space is detected. Variables under
+ * pointers, passed as the parameters to this function will be updated with
+ * values pointing to right stream and buffer.
+ *
+ * Return: non-zero if any of timeline streams has at last one packet ready
+ */
+static int kbasep_timeline_io_packet_pending(
+		struct kbase_timeline  *timeline,
+		struct kbase_tlstream **ready_stream,
+		unsigned int           *rb_idx_raw)
+{
+	enum tl_stream_type i;
+
+	KBASE_DEBUG_ASSERT(ready_stream);
+	KBASE_DEBUG_ASSERT(rb_idx_raw);
+
+	for (i = (enum tl_stream_type)0; i < TL_STREAM_TYPE_COUNT; ++i) {
+		struct kbase_tlstream *stream = &timeline->streams[i];
+		*rb_idx_raw = atomic_read(&stream->rbi);
+		/* Read buffer index may be updated by writer in case of
+		 * overflow. Read and write buffer indexes must be
+		 * loaded in correct order.
+		 */
+		smp_rmb();
+		if (atomic_read(&stream->wbi) != *rb_idx_raw) {
+			*ready_stream = stream;
+			return 1;
+		}
+
+	}
+
+	return 0;
+}
+
+/**
+ * kbasep_timeline_copy_header - copy timeline headers to the user
+ * @timeline:    Timeline instance
+ * @buffer:      Pointer to the buffer provided by user
+ * @size:        Maximum amount of data that can be stored in the buffer
+ * @copy_len:    Pointer to amount of bytes that has been copied already
+ *               within the read system call.
+ *
+ * This helper function checks if timeline headers have not been sent
+ * to the user, and if so, sends them. @ref copy_len is respectively
+ * updated.
+ *
+ * Returns: 0 if success, -1 if copy_to_user has failed.
+ */
+static inline int kbasep_timeline_copy_header(
+	struct kbase_timeline *timeline,
+	char __user *buffer,
+	size_t size,
+	ssize_t *copy_len)
+{
+	if (timeline->obj_header_btc) {
+		size_t offset = obj_desc_header_size -
+			timeline->obj_header_btc;
+
+		size_t header_cp_size = MIN(
+			size - *copy_len,
+			timeline->obj_header_btc);
+
+		if (copy_to_user(
+			    &buffer[*copy_len],
+			    &obj_desc_header[offset],
+			    header_cp_size))
+			return -1;
+
+		timeline->obj_header_btc -= header_cp_size;
+		*copy_len += header_cp_size;
+	}
+
+	if (timeline->aux_header_btc) {
+		size_t offset = aux_desc_header_size -
+			timeline->aux_header_btc;
+		size_t header_cp_size = MIN(
+			size - *copy_len,
+			timeline->aux_header_btc);
+
+		if (copy_to_user(
+			    &buffer[*copy_len],
+			    &aux_desc_header[offset],
+			    header_cp_size))
+			return -1;
+
+		timeline->aux_header_btc -= header_cp_size;
+		*copy_len += header_cp_size;
+	}
+	return 0;
+}
+
+
+/**
+ * kbasep_timeline_io_read - copy data from streams to buffer provided by user
+ * @filp:   Pointer to file structure
+ * @buffer: Pointer to the buffer provided by user
+ * @size:   Maximum amount of data that can be stored in the buffer
+ * @f_pos:  Pointer to file offset (unused)
+ *
+ * Return: number of bytes stored in the buffer
+ */
+static ssize_t kbasep_timeline_io_read(
+		struct file *filp,
+		char __user *buffer,
+		size_t      size,
+		loff_t      *f_pos)
+{
+	ssize_t copy_len = 0;
+	struct kbase_timeline *timeline;
+
+	KBASE_DEBUG_ASSERT(filp);
+	KBASE_DEBUG_ASSERT(f_pos);
+
+	if (WARN_ON(!filp->private_data))
+		return -EFAULT;
+
+	timeline = (struct kbase_timeline *) filp->private_data;
+
+	if (!buffer)
+		return -EINVAL;
+
+	if ((*f_pos < 0) || (size < PACKET_SIZE))
+		return -EINVAL;
+
+	mutex_lock(&timeline->reader_lock);
+
+	while (copy_len < size) {
+		struct kbase_tlstream *stream = NULL;
+		unsigned int        rb_idx_raw = 0;
+		unsigned int        wb_idx_raw;
+		unsigned int        rb_idx;
+		size_t              rb_size;
+
+		if (kbasep_timeline_copy_header(
+			    timeline, buffer, size, &copy_len)) {
+			copy_len = -EFAULT;
+			break;
+		}
+
+		/* If we already read some packets and there is no
+		 * packet pending then return back to user.
+		 * If we don't have any data yet, wait for packet to be
+		 * submitted.
+		 */
+		if (copy_len > 0) {
+			if (!kbasep_timeline_io_packet_pending(
+						timeline,
+						&stream,
+						&rb_idx_raw))
+				break;
+		} else {
+			if (wait_event_interruptible(
+						timeline->event_queue,
+						kbasep_timeline_io_packet_pending(
+							timeline,
+							&stream,
+							&rb_idx_raw))) {
+				copy_len = -ERESTARTSYS;
+				break;
+			}
+		}
+
+		if (WARN_ON(!stream)) {
+			copy_len = -EFAULT;
+			break;
+		}
+
+		/* Check if this packet fits into the user buffer.
+		 * If so copy its content.
+		 */
+		rb_idx = rb_idx_raw % PACKET_COUNT;
+		rb_size = atomic_read(&stream->buffer[rb_idx].size);
+		if (rb_size > size - copy_len)
+			break;
+		if (copy_to_user(
+					&buffer[copy_len],
+					stream->buffer[rb_idx].data,
+					rb_size)) {
+			copy_len = -EFAULT;
+			break;
+		}
+
+		/* If the distance between read buffer index and write
+		 * buffer index became more than PACKET_COUNT, then overflow
+		 * happened and we need to ignore the last portion of bytes
+		 * that we have just sent to user.
+		 */
+		smp_rmb();
+		wb_idx_raw = atomic_read(&stream->wbi);
+
+		if (wb_idx_raw - rb_idx_raw < PACKET_COUNT) {
+			copy_len += rb_size;
+			atomic_inc(&stream->rbi);
+#if MALI_UNIT_TEST
+			atomic_add(rb_size, &timeline->bytes_collected);
+#endif /* MALI_UNIT_TEST */
+
+		} else {
+			const unsigned int new_rb_idx_raw =
+				wb_idx_raw - PACKET_COUNT + 1;
+			/* Adjust read buffer index to the next valid buffer */
+			atomic_set(&stream->rbi, new_rb_idx_raw);
+		}
+	}
+
+	mutex_unlock(&timeline->reader_lock);
+
+	return copy_len;
+}
+
+/**
+ * kbasep_timeline_io_poll - poll timeline stream for packets
+ * @filp: Pointer to file structure
+ * @wait: Pointer to poll table
+ * Return: POLLIN if data can be read without blocking, otherwise zero
+ */
+static unsigned int kbasep_timeline_io_poll(struct file *filp, poll_table *wait)
+{
+	struct kbase_tlstream *stream;
+	unsigned int        rb_idx;
+	struct kbase_timeline *timeline;
+
+	KBASE_DEBUG_ASSERT(filp);
+	KBASE_DEBUG_ASSERT(wait);
+
+	if (WARN_ON(!filp->private_data))
+		return -EFAULT;
+
+	timeline = (struct kbase_timeline *) filp->private_data;
+
+	poll_wait(filp, &timeline->event_queue, wait);
+	if (kbasep_timeline_io_packet_pending(timeline, &stream, &rb_idx))
+		return POLLIN;
+	return 0;
+}
+
+/**
+ * kbasep_timeline_io_release - release timeline stream descriptor
+ * @inode: Pointer to inode structure
+ * @filp:  Pointer to file structure
+ *
+ * Return always return zero
+ */
+static int kbasep_timeline_io_release(struct inode *inode, struct file *filp)
+{
+	struct kbase_timeline *timeline;
+
+	KBASE_DEBUG_ASSERT(inode);
+	KBASE_DEBUG_ASSERT(filp);
+	KBASE_DEBUG_ASSERT(filp->private_data);
+
+	CSTD_UNUSED(inode);
+
+	timeline = (struct kbase_timeline *) filp->private_data;
+
+	/* Stop autoflush timer before releasing access to streams. */
+	atomic_set(&timeline->autoflush_timer_active, 0);
+	del_timer_sync(&timeline->autoflush_timer);
+
+	atomic_set(timeline->is_enabled, 0);
+	return 0;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_timeline_priv.h b/drivers/gpu/arm/midgard/mali_kbase_timeline_priv.h
new file mode 100644
index 0000000..e4a4a20
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_timeline_priv.h
@@ -0,0 +1,63 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#if !defined(_KBASE_TIMELINE_PRIV_H)
+#define _KBASE_TIMELINE_PRIV_H
+
+#include <mali_kbase.h>
+#include <mali_kbase_tlstream.h>
+
+#include <linux/timer.h>
+#include <linux/atomic.h>
+#include <linux/mutex.h>
+
+/**
+ * struct kbase_timeline - timeline state structure
+ * @streams:                The timeline streams generated by kernel
+ * @autoflush_timer:        Autoflush timer
+ * @autoflush_timer_active: If non-zero autoflush timer is active
+ * @reader_lock:            Reader lock. Only one reader is allowed to
+ *                          have access to the timeline streams at any given time.
+ * @event_queue:            Timeline stream event queue
+ * @bytes_collected:        Number of bytes read by user
+ * @is_enabled:             Zero, if timeline is disabled. Timeline stream flags
+ *                          otherwise. See kbase_timeline_io_acquire().
+ * @obj_header_btc:         Remaining bytes to copy for the object stream header
+ * @aux_header_btc:         Remaining bytes to copy for the aux stream header
+ */
+struct kbase_timeline {
+	struct kbase_tlstream streams[TL_STREAM_TYPE_COUNT];
+	struct timer_list autoflush_timer;
+	atomic_t          autoflush_timer_active;
+	struct mutex      reader_lock;
+	wait_queue_head_t event_queue;
+#if MALI_UNIT_TEST
+	atomic_t          bytes_collected;
+#endif /* MALI_UNIT_TEST */
+	atomic_t         *is_enabled;
+	size_t            obj_header_btc;
+	size_t            aux_header_btc;
+};
+
+extern const struct file_operations kbasep_tlstream_fops;
+
+#endif /* _KBASE_TIMELINE_PRIV_H */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_tl_serialize.h b/drivers/gpu/arm/midgard/mali_kbase_tl_serialize.h
new file mode 100644
index 0000000..90808ce
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_tl_serialize.h
@@ -0,0 +1,127 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#if !defined(_KBASE_TL_SERIALIZE_H)
+#define _KBASE_TL_SERIALIZE_H
+
+#include <mali_kbase.h>
+
+#include <linux/timer.h>
+
+/* The number of nanoseconds in a second. */
+#define NSECS_IN_SEC       1000000000ull /* ns */
+
+/**
+ * kbasep_serialize_bytes - serialize bytes to the message buffer
+ *
+ * Serialize bytes as is using memcpy()
+ *
+ * @buffer:    Message buffer
+ * @pos:       Message buffer offset
+ * @bytes:     Bytes to serialize
+ * @len:       Length of bytes array
+ *
+ * Return: updated position in the buffer
+ */
+static inline size_t kbasep_serialize_bytes(
+		char       *buffer,
+		size_t     pos,
+		const void *bytes,
+		size_t     len)
+{
+	KBASE_DEBUG_ASSERT(buffer);
+	KBASE_DEBUG_ASSERT(bytes);
+
+	memcpy(&buffer[pos], bytes, len);
+
+	return pos + len;
+}
+
+/**
+ * kbasep_serialize_string - serialize string to the message buffer
+ *
+ * String is serialized as 4 bytes for string size,
+ * then string content and then null terminator.
+ *
+ * @buffer:         Message buffer
+ * @pos:            Message buffer offset
+ * @string:         String to serialize
+ * @max_write_size: Number of bytes that can be stored in buffer
+ *
+ * Return: updated position in the buffer
+ */
+static inline size_t kbasep_serialize_string(
+		char       *buffer,
+		size_t     pos,
+		const char *string,
+		size_t     max_write_size)
+{
+	u32 string_len;
+
+	KBASE_DEBUG_ASSERT(buffer);
+	KBASE_DEBUG_ASSERT(string);
+	/* Timeline string consists of at least string length and nul
+	 * terminator.
+	 */
+	KBASE_DEBUG_ASSERT(max_write_size >= sizeof(string_len) + sizeof(char));
+	max_write_size -= sizeof(string_len);
+
+	string_len = strlcpy(
+			&buffer[pos + sizeof(string_len)],
+			string,
+			max_write_size);
+	string_len += sizeof(char);
+
+	/* Make sure that the source string fit into the buffer. */
+	KBASE_DEBUG_ASSERT(string_len <= max_write_size);
+
+	/* Update string length. */
+	memcpy(&buffer[pos], &string_len, sizeof(string_len));
+
+	return pos + sizeof(string_len) + string_len;
+}
+
+/**
+ * kbasep_serialize_timestamp - serialize timestamp to the message buffer
+ *
+ * Get current timestamp using kbasep_get_timestamp()
+ * and serialize it as 64 bit unsigned integer.
+ *
+ * @buffer: Message buffer
+ * @pos:    Message buffer offset
+ *
+ * Return: updated position in the buffer
+ */
+static inline size_t kbasep_serialize_timestamp(void *buffer, size_t pos)
+{
+	struct timespec ts;
+	u64             timestamp;
+
+	getrawmonotonic(&ts);
+	timestamp = (u64)ts.tv_sec * NSECS_IN_SEC + ts.tv_nsec;
+
+	return kbasep_serialize_bytes(
+			buffer, pos,
+			&timestamp, sizeof(timestamp));
+}
+#endif /* _KBASE_TL_SERIALIZE_H */
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_tlstream.c b/drivers/gpu/arm/midgard/mali_kbase_tlstream.c
new file mode 100644
index 0000000..2a76bc0
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_tlstream.c
@@ -0,0 +1,287 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_tlstream.h"
+#include "mali_kbase_tl_serialize.h"
+#include "mali_kbase_mipe_proto.h"
+
+/**
+ * kbasep_packet_header_setup - setup the packet header
+ * @buffer:     pointer to the buffer
+ * @pkt_family: packet's family
+ * @pkt_type:   packet's type
+ * @pkt_class:  packet's class
+ * @stream_id:  stream id
+ * @numbered:   non-zero if this stream is numbered
+ *
+ * Function sets up immutable part of packet header in the given buffer.
+ */
+static void kbasep_packet_header_setup(
+	char                  *buffer,
+	enum tl_packet_family pkt_family,
+	enum tl_packet_class  pkt_class,
+	enum tl_packet_type   pkt_type,
+	unsigned int          stream_id,
+	int                   numbered)
+{
+	u32 words[2] = {
+		MIPE_PACKET_HEADER_W0(pkt_family, pkt_class, pkt_type, stream_id),
+		MIPE_PACKET_HEADER_W1(0, !!numbered),
+	};
+	memcpy(buffer, words, sizeof(words));
+}
+
+/**
+ * kbasep_packet_header_update - update the packet header
+ * @buffer:    pointer to the buffer
+ * @data_size: amount of data carried in this packet
+ * @numbered:   non-zero if the stream is numbered
+ *
+ * Function updates mutable part of packet header in the given buffer.
+ * Note that value of data_size must not including size of the header.
+ */
+static void kbasep_packet_header_update(
+		char  *buffer,
+		size_t data_size,
+		int    numbered)
+{
+	u32 word0;
+	u32 word1 = MIPE_PACKET_HEADER_W1((u32)data_size, !!numbered);
+
+	KBASE_DEBUG_ASSERT(buffer);
+	CSTD_UNUSED(word0);
+
+	memcpy(&buffer[sizeof(word0)], &word1, sizeof(word1));
+}
+
+/**
+ * kbasep_packet_number_update - update the packet number
+ * @buffer:  pointer to the buffer
+ * @counter: value of packet counter for this packet's stream
+ *
+ * Function updates packet number embedded within the packet placed in the
+ * given buffer.
+ */
+static void kbasep_packet_number_update(char *buffer, u32 counter)
+{
+	KBASE_DEBUG_ASSERT(buffer);
+
+	memcpy(&buffer[PACKET_HEADER_SIZE], &counter, sizeof(counter));
+}
+
+void kbase_tlstream_reset(struct kbase_tlstream *stream)
+{
+	unsigned int i;
+
+	for (i = 0; i < PACKET_COUNT; i++) {
+		if (stream->numbered)
+			atomic_set(
+				&stream->buffer[i].size,
+				PACKET_HEADER_SIZE +
+				PACKET_NUMBER_SIZE);
+		else
+			atomic_set(&stream->buffer[i].size, PACKET_HEADER_SIZE);
+	}
+
+	atomic_set(&stream->wbi, 0);
+	atomic_set(&stream->rbi, 0);
+}
+
+/* Configuration of timeline streams generated by kernel.
+ * Kernel emit only streams containing either timeline object events or
+ * auxiliary events. All streams have stream id value of 1 (as opposed to user
+ * space streams that have value of 0).
+ */
+static const struct {
+	enum tl_packet_family pkt_family;
+	enum tl_packet_class  pkt_class;
+	enum tl_packet_type   pkt_type;
+	unsigned int          stream_id;
+} tl_stream_cfg[TL_STREAM_TYPE_COUNT] = {
+	{TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_SUMMARY, 1},
+	{TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_BODY,    1},
+	{TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_AUX, TL_PACKET_TYPE_BODY,    1}
+};
+
+void kbase_tlstream_init(
+	struct kbase_tlstream *stream,
+	enum tl_stream_type    stream_type,
+	wait_queue_head_t     *ready_read)
+{
+	unsigned int i;
+
+	KBASE_DEBUG_ASSERT(stream);
+	KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type);
+
+	spin_lock_init(&stream->lock);
+
+	/* All packets carrying tracepoints shall be numbered. */
+	if (TL_PACKET_TYPE_BODY == tl_stream_cfg[stream_type].pkt_type)
+		stream->numbered = 1;
+	else
+		stream->numbered = 0;
+
+	for (i = 0; i < PACKET_COUNT; i++)
+		kbasep_packet_header_setup(
+			stream->buffer[i].data,
+			tl_stream_cfg[stream_type].pkt_family,
+			tl_stream_cfg[stream_type].pkt_class,
+			tl_stream_cfg[stream_type].pkt_type,
+			tl_stream_cfg[stream_type].stream_id,
+			stream->numbered);
+
+#if MALI_UNIT_TEST
+	atomic_set(&stream->bytes_generated, 0);
+#endif
+	stream->ready_read = ready_read;
+
+	kbase_tlstream_reset(stream);
+}
+
+void kbase_tlstream_term(struct kbase_tlstream *stream)
+{
+	KBASE_DEBUG_ASSERT(stream);
+}
+
+/**
+ * kbase_tlstream_msgbuf_submit - submit packet to user space
+ * @stream:     Pointer to the stream structure
+ * @wb_idx_raw: Write buffer index
+ * @wb_size:    Length of data stored in the current buffer
+ *
+ * Updates currently written buffer with the packet header.
+ * Then write index is incremented and the buffer is handed to user space.
+ * Parameters of the new buffer are returned using provided arguments.
+ *
+ * Return: length of data in the new buffer
+ *
+ * Warning: the user must update the stream structure with returned value.
+ */
+static size_t kbasep_tlstream_msgbuf_submit(
+		struct kbase_tlstream *stream,
+		unsigned int      wb_idx_raw,
+		unsigned int      wb_size)
+{
+	unsigned int wb_idx = wb_idx_raw % PACKET_COUNT;
+
+	/* Set stream as flushed. */
+	atomic_set(&stream->autoflush_counter, -1);
+
+	kbasep_packet_header_update(
+		stream->buffer[wb_idx].data,
+		wb_size - PACKET_HEADER_SIZE,
+		stream->numbered);
+
+	if (stream->numbered)
+		kbasep_packet_number_update(
+			stream->buffer[wb_idx].data,
+			wb_idx_raw);
+
+	/* Increasing write buffer index will expose this packet to the reader.
+	 * As stream->lock is not taken on reader side we must make sure memory
+	 * is updated correctly before this will happen. */
+	smp_wmb();
+	atomic_inc(&stream->wbi);
+
+	/* Inform user that packets are ready for reading. */
+	wake_up_interruptible(stream->ready_read);
+
+	wb_size = PACKET_HEADER_SIZE;
+	if (stream->numbered)
+		wb_size += PACKET_NUMBER_SIZE;
+
+	return wb_size;
+}
+
+char *kbase_tlstream_msgbuf_acquire(
+	struct kbase_tlstream *stream,
+	size_t              msg_size,
+	unsigned long       *flags) __acquires(&stream->lock)
+{
+	unsigned int     wb_idx_raw;
+	unsigned int     wb_idx;
+	size_t           wb_size;
+
+	KBASE_DEBUG_ASSERT(
+		PACKET_SIZE - PACKET_HEADER_SIZE - PACKET_NUMBER_SIZE >=
+		msg_size);
+
+	spin_lock_irqsave(&stream->lock, *flags);
+
+	wb_idx_raw = atomic_read(&stream->wbi);
+	wb_idx     = wb_idx_raw % PACKET_COUNT;
+	wb_size    = atomic_read(&stream->buffer[wb_idx].size);
+
+	/* Select next buffer if data will not fit into current one. */
+	if (PACKET_SIZE < wb_size + msg_size) {
+		wb_size = kbasep_tlstream_msgbuf_submit(
+				stream, wb_idx_raw, wb_size);
+		wb_idx  = (wb_idx_raw + 1) % PACKET_COUNT;
+	}
+
+	/* Reserve space in selected buffer. */
+	atomic_set(&stream->buffer[wb_idx].size, wb_size + msg_size);
+
+#if MALI_UNIT_TEST
+	atomic_add(msg_size, &stream->bytes_generated);
+#endif /* MALI_UNIT_TEST */
+
+	return &stream->buffer[wb_idx].data[wb_size];
+}
+
+void kbase_tlstream_msgbuf_release(
+	struct kbase_tlstream *stream,
+	unsigned long       flags) __releases(&stream->lock)
+{
+	/* Mark stream as containing unflushed data. */
+	atomic_set(&stream->autoflush_counter, 0);
+
+	spin_unlock_irqrestore(&stream->lock, flags);
+}
+
+void kbase_tlstream_flush_stream(
+	struct kbase_tlstream *stream)
+{
+	unsigned long    flags;
+	unsigned int     wb_idx_raw;
+	unsigned int     wb_idx;
+	size_t           wb_size;
+	size_t           min_size = PACKET_HEADER_SIZE;
+
+	if (stream->numbered)
+		min_size += PACKET_NUMBER_SIZE;
+
+	spin_lock_irqsave(&stream->lock, flags);
+
+	wb_idx_raw = atomic_read(&stream->wbi);
+	wb_idx     = wb_idx_raw % PACKET_COUNT;
+	wb_size    = atomic_read(&stream->buffer[wb_idx].size);
+
+	if (wb_size > min_size) {
+		wb_size = kbasep_tlstream_msgbuf_submit(
+				stream, wb_idx_raw, wb_size);
+		wb_idx = (wb_idx_raw + 1) % PACKET_COUNT;
+		atomic_set(&stream->buffer[wb_idx].size, wb_size);
+	}
+	spin_unlock_irqrestore(&stream->lock, flags);
+}
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_tlstream.h b/drivers/gpu/arm/midgard/mali_kbase_tlstream.h
new file mode 100644
index 0000000..5797738
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_tlstream.h
@@ -0,0 +1,167 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#if !defined(_KBASE_TLSTREAM_H)
+#define _KBASE_TLSTREAM_H
+
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/wait.h>
+
+/* The maximum size of a single packet used by timeline. */
+#define PACKET_SIZE        4096 /* bytes */
+
+/* The number of packets used by one timeline stream. */
+#if defined(CONFIG_MALI_JOB_DUMP) || defined(CONFIG_MALI_VECTOR_DUMP)
+	#define PACKET_COUNT       64
+#else
+	#define PACKET_COUNT       32
+#endif
+
+/* The maximum expected length of string in tracepoint descriptor. */
+#define STRLEN_MAX         64 /* bytes */
+
+/**
+ * struct kbase_tlstream - timeline stream structure
+ * @lock:              Message order lock
+ * @buffer:            Array of buffers
+ * @wbi:               Write buffer index
+ * @rbi:               Read buffer index
+ * @numbered:          If non-zero stream's packets are sequentially numbered
+ * @autoflush_counter: Counter tracking stream's autoflush state
+ * @ready_read:        Pointer to a wait queue, which is signaled when
+ *                     timeline messages are ready for collection.
+ * @bytes_generated:   Number of bytes generated by tracepoint messages
+ *
+ * This structure holds information needed to construct proper packets in the
+ * timeline stream.
+ *
+ * Each message in the sequence must bear a timestamp that is
+ * greater than the previous message in the same stream. For this reason
+ * a lock is held throughout the process of message creation.
+ *
+ * Each stream contains a set of buffers. Each buffer will hold one MIPE
+ * packet. In case there is no free space required to store the incoming
+ * message the oldest buffer is discarded. Each packet in timeline body
+ * stream has a sequence number embedded, this value must increment
+ * monotonically and is used by the packets receiver to discover these
+ * buffer overflows.
+ *
+ * The autoflush counter is set to a negative number when there is no data
+ * pending for flush and it is set to zero on every update of the buffer. The
+ * autoflush timer will increment the counter by one on every expiry. If there
+ * is no activity on the buffer for two consecutive timer expiries, the stream
+ * buffer will be flushed.
+ */
+struct kbase_tlstream {
+	spinlock_t lock;
+
+	struct {
+		atomic_t size;              /* number of bytes in buffer */
+		char     data[PACKET_SIZE]; /* buffer's data */
+	} buffer[PACKET_COUNT];
+
+	atomic_t wbi;
+	atomic_t rbi;
+
+	int      numbered;
+	atomic_t autoflush_counter;
+	wait_queue_head_t *ready_read;
+#if MALI_UNIT_TEST
+	atomic_t bytes_generated;
+#endif
+};
+
+/* Types of streams generated by timeline. */
+enum tl_stream_type {
+	TL_STREAM_TYPE_FIRST,
+	TL_STREAM_TYPE_OBJ_SUMMARY = TL_STREAM_TYPE_FIRST,
+	TL_STREAM_TYPE_OBJ,
+	TL_STREAM_TYPE_AUX,
+
+	TL_STREAM_TYPE_COUNT
+};
+
+/**
+ * kbase_tlstream_init - initialize timeline stream
+ * @stream:      Pointer to the stream structure
+ * @stream_type: Stream type
+ * @ready_read:  Pointer to a wait queue to signal when
+ *               timeline messages are ready for collection.
+ */
+void kbase_tlstream_init(struct kbase_tlstream *stream,
+	enum tl_stream_type stream_type,
+	wait_queue_head_t  *ready_read);
+
+/**
+ * kbase_tlstream_term - terminate timeline stream
+ * @stream: Pointer to the stream structure
+ */
+void kbase_tlstream_term(struct kbase_tlstream *stream);
+
+/**
+ * kbase_tlstream_reset - reset stream
+ * @stream:    Pointer to the stream structure
+ *
+ * Function discards all pending messages and resets packet counters.
+ */
+void kbase_tlstream_reset(struct kbase_tlstream *stream);
+
+/**
+ * kbase_tlstream_msgbuf_acquire - lock selected stream and reserve a buffer
+ * @stream:      Pointer to the stream structure
+ * @msg_size:    Message size
+ * @flags:       Pointer to store flags passed back on stream release
+ *
+ * Lock the stream and reserve the number of bytes requested
+ * in msg_size for the user.
+ *
+ * Return: pointer to the buffer where a message can be stored
+ *
+ * Warning: The stream must be released with kbase_tlstream_msgbuf_release().
+ *          Only atomic operations are allowed while the stream is locked
+ *          (i.e. do not use any operation that may sleep).
+ */
+char *kbase_tlstream_msgbuf_acquire(struct kbase_tlstream *stream,
+	size_t msg_size, unsigned long *flags) __acquires(&stream->lock);
+
+/**
+ * kbase_tlstream_msgbuf_release - unlock selected stream
+ * @stream:    Pointer to the stream structure
+ * @flags:     Value obtained during stream acquire
+ *
+ * Release the stream that has been previously
+ * locked with a call to kbase_tlstream_msgbuf_acquire().
+ */
+void kbase_tlstream_msgbuf_release(struct kbase_tlstream *stream,
+	unsigned long flags) __releases(&stream->lock);
+
+/**
+ * kbase_tlstream_flush_stream - flush stream
+ * @stream:     Pointer to the stream structure
+ *
+ * Flush pending data in the timeline stream.
+ */
+void kbase_tlstream_flush_stream(struct kbase_tlstream *stream);
+
+#endif /* _KBASE_TLSTREAM_H */
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_trace_defs.h b/drivers/gpu/arm/midgard/mali_kbase_trace_defs.h
new file mode 100644
index 0000000..77fb818
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_trace_defs.h
@@ -0,0 +1,261 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/* ***** IMPORTANT: THIS IS NOT A NORMAL HEADER FILE         *****
+ * *****            DO NOT INCLUDE DIRECTLY                  *****
+ * *****            THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */
+
+/*
+ * The purpose of this header file is just to contain a list of trace code idenitifers
+ *
+ * Each identifier is wrapped in a macro, so that its string form and enum form can be created
+ *
+ * Each macro is separated with a comma, to allow insertion into an array initializer or enum definition block.
+ *
+ * This allows automatic creation of an enum and a corresponding array of strings
+ *
+ * Before #including, the includer MUST #define KBASE_TRACE_CODE_MAKE_CODE.
+ * After #including, the includer MUST #under KBASE_TRACE_CODE_MAKE_CODE.
+ *
+ * e.g.:
+ * #define KBASE_TRACE_CODE( X ) KBASE_TRACE_CODE_ ## X
+ * typedef enum
+ * {
+ * #define KBASE_TRACE_CODE_MAKE_CODE( X ) KBASE_TRACE_CODE( X )
+ * #include "mali_kbase_trace_defs.h"
+ * #undef  KBASE_TRACE_CODE_MAKE_CODE
+ * } kbase_trace_code;
+ *
+ * IMPORTANT: THIS FILE MUST NOT BE USED FOR ANY OTHER PURPOSE OTHER THAN THE ABOVE
+ *
+ *
+ * The use of the macro here is:
+ * - KBASE_TRACE_CODE_MAKE_CODE( X )
+ *
+ * Which produces:
+ * - For an enum, KBASE_TRACE_CODE_X
+ * - For a string, "X"
+ *
+ *
+ * For example:
+ * - KBASE_TRACE_CODE_MAKE_CODE( JM_JOB_COMPLETE ) expands to:
+ *  - KBASE_TRACE_CODE_JM_JOB_COMPLETE for the enum
+ *  - "JM_JOB_COMPLETE" for the string
+ * - To use it to trace an event, do:
+ *  - KBASE_TRACE_ADD( kbdev, JM_JOB_COMPLETE, subcode, kctx, uatom, val );
+ */
+
+#if 0 /* Dummy section to avoid breaking formatting */
+int dummy_array[] = {
+#endif
+
+/*
+ * Core events
+ */
+	/* no info_val, no gpu_addr, no atom */
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_CTX_DESTROY),
+	/* no info_val, no gpu_addr, no atom */
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_CTX_HWINSTR_TERM),
+	/* info_val == GPU_IRQ_STATUS register */
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ),
+	/* info_val == bits cleared */
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ_CLEAR),
+	/* info_val == GPU_IRQ_STATUS register */
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ_DONE),
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_SOFT_RESET),
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_HARD_RESET),
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_PRFCNT_CLEAR),
+	/* GPU addr==dump address */
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_PRFCNT_SAMPLE),
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_CLEAN_INV_CACHES),
+/*
+ * Job Slot management events
+ */
+	/* info_val==irq rawstat at start */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_IRQ),
+	/* info_val==jobs processed */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_IRQ_END),
+/* In the following:
+ *
+ * - ctx is set if a corresponding job found (NULL otherwise, e.g. some soft-stop cases)
+ * - uatom==kernel-side mapped uatom address (for correlation with user-side)
+ */
+	/* info_val==exit code; gpu_addr==chain gpuaddr */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_JOB_DONE),
+	/* gpu_addr==JS_HEAD_NEXT written, info_val==lower 32 bits of affinity */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_SUBMIT),
+	/* gpu_addr is as follows:
+	 * - If JS_STATUS active after soft-stop, val==gpu addr written to
+	 *   JS_HEAD on submit
+	 * - otherwise gpu_addr==0 */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP),
+	KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP_0),
+	KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP_1),
+	/* gpu_addr==JS_HEAD read */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP),
+	/* gpu_addr==JS_HEAD read */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP_0),
+	/* gpu_addr==JS_HEAD read */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP_1),
+	/* gpu_addr==JS_TAIL read */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_UPDATE_HEAD),
+/* gpu_addr is as follows:
+ * - If JS_STATUS active before soft-stop, val==JS_HEAD
+ * - otherwise gpu_addr==0
+ */
+	/* gpu_addr==JS_HEAD read */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_CHECK_HEAD),
+	KBASE_TRACE_CODE_MAKE_CODE(JM_FLUSH_WORKQS),
+	KBASE_TRACE_CODE_MAKE_CODE(JM_FLUSH_WORKQS_DONE),
+	/* info_val == is_scheduled */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_NON_SCHEDULED),
+	/* info_val == is_scheduled */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_SCHEDULED),
+	KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_DONE),
+	/* info_val == nr jobs submitted */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_SLOT_SOFT_OR_HARD_STOP),
+	/* gpu_addr==JS_HEAD_NEXT last written */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_SLOT_EVICT),
+	KBASE_TRACE_CODE_MAKE_CODE(JM_SUBMIT_AFTER_RESET),
+	KBASE_TRACE_CODE_MAKE_CODE(JM_BEGIN_RESET_WORKER),
+	KBASE_TRACE_CODE_MAKE_CODE(JM_END_RESET_WORKER),
+/*
+ * Job dispatch events
+ */
+	/* gpu_addr==value to write into JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JD_DONE),
+	/* gpu_addr==value to write into JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_WORKER),
+	/* gpu_addr==value to write into JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_WORKER_END),
+	/* gpu_addr==value to write into JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_TRY_RUN_NEXT_JOB),
+	/* gpu_addr==0, info_val==0, uatom==0 */
+	KBASE_TRACE_CODE_MAKE_CODE(JD_ZAP_CONTEXT),
+	/* gpu_addr==value to write into JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JD_CANCEL),
+	/* gpu_addr==value to write into JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JD_CANCEL_WORKER),
+/*
+ * Scheduler Core events
+ */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_RETAIN_CTX_NOLOCK),
+	/* gpu_addr==value to write into JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_ADD_JOB),
+	/* gpu_addr==last value written/would be written to JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_REMOVE_JOB),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_RETAIN_CTX),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_RELEASE_CTX),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_TRY_SCHEDULE_HEAD_CTX),
+	/* gpu_addr==value to write into JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_JOB_DONE_TRY_RUN_NEXT_JOB),
+	/* gpu_addr==value to write into JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_JOB_DONE_RETRY_NEEDED),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_AFFINITY_SUBMIT_TO_BLOCKED),
+	/* info_val == lower 32 bits of affinity */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_AFFINITY_CURRENT),
+	/* info_val == lower 32 bits of affinity */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REQUEST_CORES_FAILED),
+	/* info_val == lower 32 bits of affinity */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REGISTER_INUSE_FAILED),
+	/* info_val == lower 32 bits of rechecked affinity */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REQUEST_ON_RECHECK_FAILED),
+	/* info_val == lower 32 bits of rechecked affinity */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REGISTER_ON_RECHECK_FAILED),
+	/* info_val == lower 32 bits of affinity */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_AFFINITY_WOULD_VIOLATE),
+	/* info_val == the ctx attribute now on ctx */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_ON_CTX),
+	/* info_val == the ctx attribute now on runpool */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_ON_RUNPOOL),
+	/* info_val == the ctx attribute now off ctx */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_OFF_CTX),
+	/* info_val == the ctx attribute now off runpool */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_OFF_RUNPOOL),
+/*
+ * Scheduler Policy events
+ */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_INIT_CTX),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TERM_CTX),
+	/* info_val == whether it was evicted */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TRY_EVICT_CTX),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_FOREACH_CTX_JOBS),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_ENQUEUE_CTX),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_HEAD_CTX),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_RUNPOOL_ADD_CTX),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_RUNPOOL_REMOVE_CTX),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_JOB),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_JOB_IRQ),
+	/* gpu_addr==JS_HEAD to write if the job were run */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_ENQUEUE_JOB),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TIMER_START),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TIMER_END),
+/*
+ * Power Management Events
+ */
+	KBASE_TRACE_CODE_MAKE_CODE(PM_JOB_SUBMIT_AFTER_POWERING_UP),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_JOB_SUBMIT_AFTER_POWERED_UP),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON_TILER),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON_L2),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF_TILER),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF_L2),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED_TILER),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED_L2),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_DESIRED),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_DESIRED_TILER),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_AVAILABLE),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_AVAILABLE_TILER),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_AVAILABLE),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_AVAILABLE_TILER),
+	/* PM_DESIRED_REACHED: gpu_addr == pm.gpu_in_desired_state */
+	KBASE_TRACE_CODE_MAKE_CODE(PM_DESIRED_REACHED),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_DESIRED_REACHED_TILER),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_RELEASE_CHANGE_SHADER_NEEDED),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_RELEASE_CHANGE_TILER_NEEDED),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_REQUEST_CHANGE_SHADER_NEEDED),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_REQUEST_CHANGE_TILER_NEEDED),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_WAKE_WAITERS),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CONTEXT_ACTIVE),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CONTEXT_IDLE),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_GPU_ON),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_GPU_OFF),
+	/* info_val == policy number, or -1 for "Already changing" */
+	KBASE_TRACE_CODE_MAKE_CODE(PM_SET_POLICY),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CA_SET_POLICY),
+	/* info_val == policy number */
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CURRENT_POLICY_INIT),
+	/* info_val == policy number */
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CURRENT_POLICY_TERM),
+/* Unused code just to make it easier to not have a comma at the end.
+ * All other codes MUST come before this */
+	KBASE_TRACE_CODE_MAKE_CODE(DUMMY)
+
+#if 0 /* Dummy section to avoid breaking formatting */
+};
+#endif
+
+/* ***** THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_tracepoints.c b/drivers/gpu/arm/midgard/mali_kbase_tracepoints.c
new file mode 100644
index 0000000..2c55127
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_tracepoints.c
@@ -0,0 +1,2836 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * THIS FILE IS AUTOGENERATED BY mali_trace_generator.py.
+ * DO NOT EDIT.
+ */
+
+#include "mali_kbase_tracepoints.h"
+#include "mali_kbase_tlstream.h"
+#include "mali_kbase_tl_serialize.h"
+
+/* clang-format off */
+
+/* Message ids of trace events that are recorded in the timeline stream. */
+enum tl_msg_id_obj {
+	KBASE_TL_NEW_CTX,
+	KBASE_TL_NEW_GPU,
+	KBASE_TL_NEW_LPU,
+	KBASE_TL_NEW_ATOM,
+	KBASE_TL_NEW_AS,
+	KBASE_TL_DEL_CTX,
+	KBASE_TL_DEL_ATOM,
+	KBASE_TL_LIFELINK_LPU_GPU,
+	KBASE_TL_LIFELINK_AS_GPU,
+	KBASE_TL_RET_CTX_LPU,
+	KBASE_TL_RET_ATOM_CTX,
+	KBASE_TL_RET_ATOM_LPU,
+	KBASE_TL_NRET_CTX_LPU,
+	KBASE_TL_NRET_ATOM_CTX,
+	KBASE_TL_NRET_ATOM_LPU,
+	KBASE_TL_RET_AS_CTX,
+	KBASE_TL_NRET_AS_CTX,
+	KBASE_TL_RET_ATOM_AS,
+	KBASE_TL_NRET_ATOM_AS,
+	KBASE_TL_ATTRIB_ATOM_CONFIG,
+	KBASE_TL_ATTRIB_ATOM_PRIORITY,
+	KBASE_TL_ATTRIB_ATOM_STATE,
+	KBASE_TL_ATTRIB_ATOM_PRIORITIZED,
+	KBASE_TL_ATTRIB_ATOM_JIT,
+	KBASE_TL_JIT_USEDPAGES,
+	KBASE_TL_ATTRIB_ATOM_JITALLOCINFO,
+	KBASE_TL_ATTRIB_ATOM_JITFREEINFO,
+	KBASE_TL_ATTRIB_AS_CONFIG,
+	KBASE_TL_EVENT_LPU_SOFTSTOP,
+	KBASE_TL_EVENT_ATOM_SOFTSTOP_EX,
+	KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE,
+	KBASE_TL_EVENT_ATOM_SOFTJOB_START,
+	KBASE_TL_EVENT_ATOM_SOFTJOB_END,
+	KBASE_JD_GPU_SOFT_RESET,
+	KBASE_TL_NEW_KCPUQUEUE,
+	KBASE_TL_RET_KCPUQUEUE_CTX,
+	KBASE_TL_DEL_KCPUQUEUE,
+	KBASE_TL_NRET_KCPUQUEUE_CTX,
+	KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL,
+	KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_WAIT,
+	KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT,
+	KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT,
+	KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT,
+	KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET,
+	KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET,
+	KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET,
+	KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY,
+	KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY,
+	KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY,
+	KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_MAP_IMPORT,
+	KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT,
+	KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
+	KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
+	KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
+	KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE,
+	KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE,
+	KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE,
+	KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START,
+	KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END,
+	KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_START,
+	KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_END,
+	KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_START,
+	KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_END,
+	KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_START,
+	KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_END,
+	KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_START,
+	KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_END,
+	KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_START,
+	KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_END,
+	KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START,
+	KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END,
+	KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_ALLOC_START,
+	KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END,
+	KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END,
+	KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END,
+	KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_START,
+	KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END,
+	KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END,
+	KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END,
+	KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_ERRORBARRIER,
+	KBASE_OBJ_MSG_COUNT,
+};
+
+/* Message ids of trace events that are recorded in the auxiliary stream. */
+enum tl_msg_id_aux {
+	KBASE_AUX_PM_STATE,
+	KBASE_AUX_PAGEFAULT,
+	KBASE_AUX_PAGESALLOC,
+	KBASE_AUX_DEVFREQ_TARGET,
+	KBASE_AUX_PROTECTED_ENTER_START,
+	KBASE_AUX_PROTECTED_ENTER_END,
+	KBASE_AUX_PROTECTED_LEAVE_START,
+	KBASE_AUX_PROTECTED_LEAVE_END,
+	KBASE_AUX_JIT_STATS,
+	KBASE_AUX_EVENT_JOB_SLOT,
+	KBASE_AUX_MSG_COUNT,
+};
+
+#define OBJ_TL_LIST \
+	TP_DESC(KBASE_TL_NEW_CTX, \
+		"object ctx is created", \
+		"@pII", \
+		"ctx,ctx_nr,tgid") \
+	TP_DESC(KBASE_TL_NEW_GPU, \
+		"object gpu is created", \
+		"@pII", \
+		"gpu,gpu_id,core_count") \
+	TP_DESC(KBASE_TL_NEW_LPU, \
+		"object lpu is created", \
+		"@pII", \
+		"lpu,lpu_nr,lpu_fn") \
+	TP_DESC(KBASE_TL_NEW_ATOM, \
+		"object atom is created", \
+		"@pI", \
+		"atom,atom_nr") \
+	TP_DESC(KBASE_TL_NEW_AS, \
+		"address space object is created", \
+		"@pI", \
+		"address_space,as_nr") \
+	TP_DESC(KBASE_TL_DEL_CTX, \
+		"context is destroyed", \
+		"@p", \
+		"ctx") \
+	TP_DESC(KBASE_TL_DEL_ATOM, \
+		"atom is destroyed", \
+		"@p", \
+		"atom") \
+	TP_DESC(KBASE_TL_LIFELINK_LPU_GPU, \
+		"lpu is deleted with gpu", \
+		"@pp", \
+		"lpu,gpu") \
+	TP_DESC(KBASE_TL_LIFELINK_AS_GPU, \
+		"address space is deleted with gpu", \
+		"@pp", \
+		"address_space,gpu") \
+	TP_DESC(KBASE_TL_RET_CTX_LPU, \
+		"context is retained by lpu", \
+		"@pp", \
+		"ctx,lpu") \
+	TP_DESC(KBASE_TL_RET_ATOM_CTX, \
+		"atom is retained by context", \
+		"@pp", \
+		"atom,ctx") \
+	TP_DESC(KBASE_TL_RET_ATOM_LPU, \
+		"atom is retained by lpu", \
+		"@pps", \
+		"atom,lpu,attrib_match_list") \
+	TP_DESC(KBASE_TL_NRET_CTX_LPU, \
+		"context is released by lpu", \
+		"@pp", \
+		"ctx,lpu") \
+	TP_DESC(KBASE_TL_NRET_ATOM_CTX, \
+		"atom is released by context", \
+		"@pp", \
+		"atom,ctx") \
+	TP_DESC(KBASE_TL_NRET_ATOM_LPU, \
+		"atom is released by lpu", \
+		"@pp", \
+		"atom,lpu") \
+	TP_DESC(KBASE_TL_RET_AS_CTX, \
+		"address space is retained by context", \
+		"@pp", \
+		"address_space,ctx") \
+	TP_DESC(KBASE_TL_NRET_AS_CTX, \
+		"address space is released by context", \
+		"@pp", \
+		"address_space,ctx") \
+	TP_DESC(KBASE_TL_RET_ATOM_AS, \
+		"atom is retained by address space", \
+		"@pp", \
+		"atom,address_space") \
+	TP_DESC(KBASE_TL_NRET_ATOM_AS, \
+		"atom is released by address space", \
+		"@pp", \
+		"atom,address_space") \
+	TP_DESC(KBASE_TL_ATTRIB_ATOM_CONFIG, \
+		"atom job slot attributes", \
+		"@pLLI", \
+		"atom,descriptor,affinity,config") \
+	TP_DESC(KBASE_TL_ATTRIB_ATOM_PRIORITY, \
+		"atom priority", \
+		"@pI", \
+		"atom,prio") \
+	TP_DESC(KBASE_TL_ATTRIB_ATOM_STATE, \
+		"atom state", \
+		"@pI", \
+		"atom,state") \
+	TP_DESC(KBASE_TL_ATTRIB_ATOM_PRIORITIZED, \
+		"atom caused priority change", \
+		"@p", \
+		"atom") \
+	TP_DESC(KBASE_TL_ATTRIB_ATOM_JIT, \
+		"jit done for atom", \
+		"@pLLILILLL", \
+		"atom,edit_addr,new_addr,jit_flags,mem_flags,j_id,com_pgs,extent,va_pgs") \
+	TP_DESC(KBASE_TL_JIT_USEDPAGES, \
+		"used pages for jit", \
+		"@LI", \
+		"used_pages,j_id") \
+	TP_DESC(KBASE_TL_ATTRIB_ATOM_JITALLOCINFO, \
+		"Information about JIT allocations", \
+		"@pLLLIIIII", \
+		"atom,va_pgs,com_pgs,extent,j_id,bin_id,max_allocs,jit_flags,usg_id") \
+	TP_DESC(KBASE_TL_ATTRIB_ATOM_JITFREEINFO, \
+		"Information about JIT frees", \
+		"@pI", \
+		"atom,j_id") \
+	TP_DESC(KBASE_TL_ATTRIB_AS_CONFIG, \
+		"address space attributes", \
+		"@pLLL", \
+		"address_space,transtab,memattr,transcfg") \
+	TP_DESC(KBASE_TL_EVENT_LPU_SOFTSTOP, \
+		"softstop event on given lpu", \
+		"@p", \
+		"lpu") \
+	TP_DESC(KBASE_TL_EVENT_ATOM_SOFTSTOP_EX, \
+		"atom softstopped", \
+		"@p", \
+		"atom") \
+	TP_DESC(KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE, \
+		"atom softstop issued", \
+		"@p", \
+		"atom") \
+	TP_DESC(KBASE_TL_EVENT_ATOM_SOFTJOB_START, \
+		"atom soft job has started", \
+		"@p", \
+		"atom") \
+	TP_DESC(KBASE_TL_EVENT_ATOM_SOFTJOB_END, \
+		"atom soft job has completed", \
+		"@p", \
+		"atom") \
+	TP_DESC(KBASE_JD_GPU_SOFT_RESET, \
+		"gpu soft reset", \
+		"@p", \
+		"gpu") \
+	TP_DESC(KBASE_TL_NEW_KCPUQUEUE, \
+		"New KCPU Queue", \
+		"@ppI", \
+		"kcpu_queue,ctx,kcpuq_num_pending_cmds") \
+	TP_DESC(KBASE_TL_RET_KCPUQUEUE_CTX, \
+		"Context retains KCPU Queue", \
+		"@pp", \
+		"kcpu_queue,ctx") \
+	TP_DESC(KBASE_TL_DEL_KCPUQUEUE, \
+		"Delete KCPU Queue", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_NRET_KCPUQUEUE_CTX, \
+		"Context releases KCPU Queue", \
+		"@pp", \
+		"kcpu_queue,ctx") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL, \
+		"KCPU Queue enqueues Signal on Fence", \
+		"@pL", \
+		"kcpu_queue,fence") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_WAIT, \
+		"KCPU Queue enqueues Wait on Fence", \
+		"@pL", \
+		"kcpu_queue,fence") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT, \
+		"Begin array of KCPU Queue enqueues Wait on Cross Queue Sync Object", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT, \
+		"Array item of KCPU Queue enqueues Wait on Cross Queue Sync Object", \
+		"@pLI", \
+		"kcpu_queue,cqs_obj_gpu_addr,cqs_obj_compare_value") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT, \
+		"End array of KCPU Queue enqueues Wait on Cross Queue Sync Object", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET, \
+		"Begin array of KCPU Queue enqueues Set on Cross Queue Sync Object", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET, \
+		"Array item of KCPU Queue enqueues Set on Cross Queue Sync Object", \
+		"@pL", \
+		"kcpu_queue,cqs_obj_gpu_addr") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET, \
+		"End array of KCPU Queue enqueues Set on Cross Queue Sync Object", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY, \
+		"Begin array of KCPU Queue enqueues Debug Copy", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY, \
+		"Array item of KCPU Queue enqueues Debug Copy", \
+		"@pL", \
+		"kcpu_queue,debugcopy_dst_size") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY, \
+		"End array of KCPU Queue enqueues Debug Copy", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_MAP_IMPORT, \
+		"KCPU Queue enqueues Map Import", \
+		"@pL", \
+		"kcpu_queue,map_import_buf_gpu_addr") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT, \
+		"KCPU Queue enqueues Unmap Import", \
+		"@pL", \
+		"kcpu_queue,map_import_buf_gpu_addr") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC, \
+		"Begin array of KCPU Queue enqueues JIT Alloc", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC, \
+		"Array item of KCPU Queue enqueues JIT Alloc", \
+		"@pLLLLIIIII", \
+		"kcpu_queue,jit_alloc_gpu_alloc_addr_dest,jit_alloc_va_pages,jit_alloc_commit_pages,jit_alloc_extent,jit_alloc_jit_id,jit_alloc_bin_id,jit_alloc_max_allocations,jit_alloc_flags,jit_alloc_usage_id") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC, \
+		"End array of KCPU Queue enqueues JIT Alloc", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE, \
+		"Begin array of KCPU Queue enqueues JIT Free", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE, \
+		"Array item of KCPU Queue enqueues JIT Free", \
+		"@pI", \
+		"kcpu_queue,jit_alloc_jit_id") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE, \
+		"End array of KCPU Queue enqueues JIT Free", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START, \
+		"KCPU Queue starts a Signal on Fence", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END, \
+		"KCPU Queue ends a Signal on Fence", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_START, \
+		"KCPU Queue starts a Wait on Fence", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_END, \
+		"KCPU Queue ends a Wait on Fence", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_START, \
+		"KCPU Queue starts a Wait on an array of Cross Queue Sync Objects", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_END, \
+		"KCPU Queue ends a Wait on an array of Cross Queue Sync Objects", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_START, \
+		"KCPU Queue starts a Set on an array of Cross Queue Sync Objects", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_END, \
+		"KCPU Queue ends a Set on an array of Cross Queue Sync Objects", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_START, \
+		"KCPU Queue starts an array of Debug Copys", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_END, \
+		"KCPU Queue ends an array of Debug Copys", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_START, \
+		"KCPU Queue starts a Map Import", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_END, \
+		"KCPU Queue ends a Map Import", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START, \
+		"KCPU Queue starts an Unmap Import", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END, \
+		"KCPU Queue ends an Unmap Import", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_ALLOC_START, \
+		"KCPU Queue starts an array of JIT Allocs", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END, \
+		"Begin array of KCPU Queue ends an array of JIT Allocs", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END, \
+		"Array item of KCPU Queue ends an array of JIT Allocs", \
+		"@pLL", \
+		"kcpu_queue,jit_alloc_gpu_alloc_addr,jit_alloc_mmu_flags") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END, \
+		"End array of KCPU Queue ends an array of JIT Allocs", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_START, \
+		"KCPU Queue starts an array of JIT Frees", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END, \
+		"Begin array of KCPU Queue ends an array of JIT Frees", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END, \
+		"Array item of KCPU Queue ends an array of JIT Frees", \
+		"@pL", \
+		"kcpu_queue,jit_free_pages_used") \
+	TP_DESC(KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END, \
+		"End array of KCPU Queue ends an array of JIT Frees", \
+		"@p", \
+		"kcpu_queue") \
+	TP_DESC(KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_ERRORBARRIER, \
+		"KCPU Queue executes an Error Barrier", \
+		"@p", \
+		"kcpu_queue") \
+
+#define MIPE_HEADER_BLOB_VAR_NAME    __obj_desc_header
+#define MIPE_HEADER_TP_LIST          OBJ_TL_LIST
+#define MIPE_HEADER_TP_LIST_COUNT    KBASE_OBJ_MSG_COUNT
+#define MIPE_HEADER_PKT_CLASS        TL_PACKET_CLASS_OBJ
+
+#include "mali_kbase_mipe_gen_header.h"
+
+const char   *obj_desc_header = (const char *) &__obj_desc_header;
+const size_t  obj_desc_header_size = sizeof(__obj_desc_header);
+
+#define AUX_TL_LIST \
+	TP_DESC(KBASE_AUX_PM_STATE, \
+		"PM state", \
+		"@IL", \
+		"core_type,core_state_bitset") \
+	TP_DESC(KBASE_AUX_PAGEFAULT, \
+		"Page fault", \
+		"@IIL", \
+		"ctx_nr,as_nr,page_cnt_change") \
+	TP_DESC(KBASE_AUX_PAGESALLOC, \
+		"Total alloc pages change", \
+		"@IL", \
+		"ctx_nr,page_cnt") \
+	TP_DESC(KBASE_AUX_DEVFREQ_TARGET, \
+		"New device frequency target", \
+		"@L", \
+		"target_freq") \
+	TP_DESC(KBASE_AUX_PROTECTED_ENTER_START, \
+		"enter protected mode start", \
+		"@p", \
+		"gpu") \
+	TP_DESC(KBASE_AUX_PROTECTED_ENTER_END, \
+		"enter protected mode end", \
+		"@p", \
+		"gpu") \
+	TP_DESC(KBASE_AUX_PROTECTED_LEAVE_START, \
+		"leave protected mode start", \
+		"@p", \
+		"gpu") \
+	TP_DESC(KBASE_AUX_PROTECTED_LEAVE_END, \
+		"leave protected mode end", \
+		"@p", \
+		"gpu") \
+	TP_DESC(KBASE_AUX_JIT_STATS, \
+		"per-bin JIT statistics", \
+		"@IIIIII", \
+		"ctx_nr,bid,max_allocs,allocs,va_pages,ph_pages") \
+	TP_DESC(KBASE_AUX_EVENT_JOB_SLOT, \
+		"event on a given job slot", \
+		"@pIII", \
+		"ctx,slot_nr,atom_nr,event") \
+
+#define MIPE_HEADER_BLOB_VAR_NAME    __aux_desc_header
+#define MIPE_HEADER_TP_LIST          AUX_TL_LIST
+#define MIPE_HEADER_TP_LIST_COUNT    KBASE_AUX_MSG_COUNT
+#define MIPE_HEADER_PKT_CLASS        TL_PACKET_CLASS_AUX
+
+#include "mali_kbase_mipe_gen_header.h"
+
+const char   *aux_desc_header = (const char *) &__aux_desc_header;
+const size_t  aux_desc_header_size = sizeof(__aux_desc_header);
+
+void __kbase_tlstream_tl_new_ctx(
+	struct kbase_tlstream *stream,
+	const void *ctx,
+	u32 ctx_nr,
+	u32 tgid)
+{
+	const u32 msg_id = KBASE_TL_NEW_CTX;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(ctx)
+		+ sizeof(ctx_nr)
+		+ sizeof(tgid)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx, sizeof(ctx));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx_nr, sizeof(ctx_nr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &tgid, sizeof(tgid));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_new_gpu(
+	struct kbase_tlstream *stream,
+	const void *gpu,
+	u32 gpu_id,
+	u32 core_count)
+{
+	const u32 msg_id = KBASE_TL_NEW_GPU;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(gpu)
+		+ sizeof(gpu_id)
+		+ sizeof(core_count)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &gpu, sizeof(gpu));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &gpu_id, sizeof(gpu_id));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &core_count, sizeof(core_count));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_new_lpu(
+	struct kbase_tlstream *stream,
+	const void *lpu,
+	u32 lpu_nr,
+	u32 lpu_fn)
+{
+	const u32 msg_id = KBASE_TL_NEW_LPU;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(lpu)
+		+ sizeof(lpu_nr)
+		+ sizeof(lpu_fn)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &lpu, sizeof(lpu));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &lpu_nr, sizeof(lpu_nr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &lpu_fn, sizeof(lpu_fn));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_new_atom(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	u32 atom_nr)
+{
+	const u32 msg_id = KBASE_TL_NEW_ATOM;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		+ sizeof(atom_nr)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom_nr, sizeof(atom_nr));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_new_as(
+	struct kbase_tlstream *stream,
+	const void *address_space,
+	u32 as_nr)
+{
+	const u32 msg_id = KBASE_TL_NEW_AS;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(address_space)
+		+ sizeof(as_nr)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &address_space, sizeof(address_space));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &as_nr, sizeof(as_nr));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_del_ctx(
+	struct kbase_tlstream *stream,
+	const void *ctx)
+{
+	const u32 msg_id = KBASE_TL_DEL_CTX;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(ctx)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx, sizeof(ctx));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_del_atom(
+	struct kbase_tlstream *stream,
+	const void *atom)
+{
+	const u32 msg_id = KBASE_TL_DEL_ATOM;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_lifelink_lpu_gpu(
+	struct kbase_tlstream *stream,
+	const void *lpu,
+	const void *gpu)
+{
+	const u32 msg_id = KBASE_TL_LIFELINK_LPU_GPU;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(lpu)
+		+ sizeof(gpu)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &lpu, sizeof(lpu));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &gpu, sizeof(gpu));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_lifelink_as_gpu(
+	struct kbase_tlstream *stream,
+	const void *address_space,
+	const void *gpu)
+{
+	const u32 msg_id = KBASE_TL_LIFELINK_AS_GPU;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(address_space)
+		+ sizeof(gpu)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &address_space, sizeof(address_space));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &gpu, sizeof(gpu));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_ret_ctx_lpu(
+	struct kbase_tlstream *stream,
+	const void *ctx,
+	const void *lpu)
+{
+	const u32 msg_id = KBASE_TL_RET_CTX_LPU;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(ctx)
+		+ sizeof(lpu)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx, sizeof(ctx));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &lpu, sizeof(lpu));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_ret_atom_ctx(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	const void *ctx)
+{
+	const u32 msg_id = KBASE_TL_RET_ATOM_CTX;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		+ sizeof(ctx)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx, sizeof(ctx));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_ret_atom_lpu(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	const void *lpu,
+	const char *attrib_match_list)
+{
+	const u32 msg_id = KBASE_TL_RET_ATOM_LPU;
+	const size_t s0 = sizeof(u32) + sizeof(char)
+		+ strnlen(attrib_match_list, STRLEN_MAX);
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		+ sizeof(lpu)
+		+ s0
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &lpu, sizeof(lpu));
+	pos = kbasep_serialize_string(buffer,
+		pos, attrib_match_list, s0);
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_nret_ctx_lpu(
+	struct kbase_tlstream *stream,
+	const void *ctx,
+	const void *lpu)
+{
+	const u32 msg_id = KBASE_TL_NRET_CTX_LPU;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(ctx)
+		+ sizeof(lpu)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx, sizeof(ctx));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &lpu, sizeof(lpu));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_nret_atom_ctx(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	const void *ctx)
+{
+	const u32 msg_id = KBASE_TL_NRET_ATOM_CTX;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		+ sizeof(ctx)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx, sizeof(ctx));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_nret_atom_lpu(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	const void *lpu)
+{
+	const u32 msg_id = KBASE_TL_NRET_ATOM_LPU;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		+ sizeof(lpu)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &lpu, sizeof(lpu));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_ret_as_ctx(
+	struct kbase_tlstream *stream,
+	const void *address_space,
+	const void *ctx)
+{
+	const u32 msg_id = KBASE_TL_RET_AS_CTX;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(address_space)
+		+ sizeof(ctx)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &address_space, sizeof(address_space));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx, sizeof(ctx));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_nret_as_ctx(
+	struct kbase_tlstream *stream,
+	const void *address_space,
+	const void *ctx)
+{
+	const u32 msg_id = KBASE_TL_NRET_AS_CTX;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(address_space)
+		+ sizeof(ctx)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &address_space, sizeof(address_space));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx, sizeof(ctx));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_ret_atom_as(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	const void *address_space)
+{
+	const u32 msg_id = KBASE_TL_RET_ATOM_AS;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		+ sizeof(address_space)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &address_space, sizeof(address_space));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_nret_atom_as(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	const void *address_space)
+{
+	const u32 msg_id = KBASE_TL_NRET_ATOM_AS;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		+ sizeof(address_space)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &address_space, sizeof(address_space));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_config(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	u64 descriptor,
+	u64 affinity,
+	u32 config)
+{
+	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_CONFIG;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		+ sizeof(descriptor)
+		+ sizeof(affinity)
+		+ sizeof(config)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &descriptor, sizeof(descriptor));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &affinity, sizeof(affinity));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &config, sizeof(config));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_priority(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	u32 prio)
+{
+	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITY;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		+ sizeof(prio)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &prio, sizeof(prio));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_state(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	u32 state)
+{
+	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_STATE;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		+ sizeof(state)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &state, sizeof(state));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_prioritized(
+	struct kbase_tlstream *stream,
+	const void *atom)
+{
+	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITIZED;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_jit(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	u64 edit_addr,
+	u64 new_addr,
+	u32 jit_flags,
+	u64 mem_flags,
+	u32 j_id,
+	u64 com_pgs,
+	u64 extent,
+	u64 va_pgs)
+{
+	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JIT;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		+ sizeof(edit_addr)
+		+ sizeof(new_addr)
+		+ sizeof(jit_flags)
+		+ sizeof(mem_flags)
+		+ sizeof(j_id)
+		+ sizeof(com_pgs)
+		+ sizeof(extent)
+		+ sizeof(va_pgs)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &edit_addr, sizeof(edit_addr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &new_addr, sizeof(new_addr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &jit_flags, sizeof(jit_flags));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &mem_flags, sizeof(mem_flags));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &j_id, sizeof(j_id));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &com_pgs, sizeof(com_pgs));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &extent, sizeof(extent));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &va_pgs, sizeof(va_pgs));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_jit_usedpages(
+	struct kbase_tlstream *stream,
+	u64 used_pages,
+	u32 j_id)
+{
+	const u32 msg_id = KBASE_TL_JIT_USEDPAGES;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(used_pages)
+		+ sizeof(j_id)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &used_pages, sizeof(used_pages));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &j_id, sizeof(j_id));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_jitallocinfo(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	u64 va_pgs,
+	u64 com_pgs,
+	u64 extent,
+	u32 j_id,
+	u32 bin_id,
+	u32 max_allocs,
+	u32 jit_flags,
+	u32 usg_id)
+{
+	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JITALLOCINFO;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		+ sizeof(va_pgs)
+		+ sizeof(com_pgs)
+		+ sizeof(extent)
+		+ sizeof(j_id)
+		+ sizeof(bin_id)
+		+ sizeof(max_allocs)
+		+ sizeof(jit_flags)
+		+ sizeof(usg_id)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &va_pgs, sizeof(va_pgs));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &com_pgs, sizeof(com_pgs));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &extent, sizeof(extent));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &j_id, sizeof(j_id));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &bin_id, sizeof(bin_id));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &max_allocs, sizeof(max_allocs));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &jit_flags, sizeof(jit_flags));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &usg_id, sizeof(usg_id));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_jitfreeinfo(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	u32 j_id)
+{
+	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JITFREEINFO;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		+ sizeof(j_id)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &j_id, sizeof(j_id));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_as_config(
+	struct kbase_tlstream *stream,
+	const void *address_space,
+	u64 transtab,
+	u64 memattr,
+	u64 transcfg)
+{
+	const u32 msg_id = KBASE_TL_ATTRIB_AS_CONFIG;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(address_space)
+		+ sizeof(transtab)
+		+ sizeof(memattr)
+		+ sizeof(transcfg)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &address_space, sizeof(address_space));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &transtab, sizeof(transtab));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &memattr, sizeof(memattr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &transcfg, sizeof(transcfg));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_lpu_softstop(
+	struct kbase_tlstream *stream,
+	const void *lpu)
+{
+	const u32 msg_id = KBASE_TL_EVENT_LPU_SOFTSTOP;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(lpu)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &lpu, sizeof(lpu));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softstop_ex(
+	struct kbase_tlstream *stream,
+	const void *atom)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_EX;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softstop_issue(
+	struct kbase_tlstream *stream,
+	const void *atom)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softjob_start(
+	struct kbase_tlstream *stream,
+	const void *atom)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTJOB_START;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softjob_end(
+	struct kbase_tlstream *stream,
+	const void *atom)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTJOB_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_jd_gpu_soft_reset(
+	struct kbase_tlstream *stream,
+	const void *gpu)
+{
+	const u32 msg_id = KBASE_JD_GPU_SOFT_RESET;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(gpu)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &gpu, sizeof(gpu));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_pm_state(
+	struct kbase_tlstream *stream,
+	u32 core_type,
+	u64 core_state_bitset)
+{
+	const u32 msg_id = KBASE_AUX_PM_STATE;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(core_type)
+		+ sizeof(core_state_bitset)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &core_type, sizeof(core_type));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &core_state_bitset, sizeof(core_state_bitset));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_pagefault(
+	struct kbase_tlstream *stream,
+	u32 ctx_nr,
+	u32 as_nr,
+	u64 page_cnt_change)
+{
+	const u32 msg_id = KBASE_AUX_PAGEFAULT;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(ctx_nr)
+		+ sizeof(as_nr)
+		+ sizeof(page_cnt_change)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx_nr, sizeof(ctx_nr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &as_nr, sizeof(as_nr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &page_cnt_change, sizeof(page_cnt_change));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_pagesalloc(
+	struct kbase_tlstream *stream,
+	u32 ctx_nr,
+	u64 page_cnt)
+{
+	const u32 msg_id = KBASE_AUX_PAGESALLOC;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(ctx_nr)
+		+ sizeof(page_cnt)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx_nr, sizeof(ctx_nr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &page_cnt, sizeof(page_cnt));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_devfreq_target(
+	struct kbase_tlstream *stream,
+	u64 target_freq)
+{
+	const u32 msg_id = KBASE_AUX_DEVFREQ_TARGET;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(target_freq)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &target_freq, sizeof(target_freq));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_protected_enter_start(
+	struct kbase_tlstream *stream,
+	const void *gpu)
+{
+	const u32 msg_id = KBASE_AUX_PROTECTED_ENTER_START;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(gpu)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &gpu, sizeof(gpu));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_protected_enter_end(
+	struct kbase_tlstream *stream,
+	const void *gpu)
+{
+	const u32 msg_id = KBASE_AUX_PROTECTED_ENTER_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(gpu)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &gpu, sizeof(gpu));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_protected_leave_start(
+	struct kbase_tlstream *stream,
+	const void *gpu)
+{
+	const u32 msg_id = KBASE_AUX_PROTECTED_LEAVE_START;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(gpu)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &gpu, sizeof(gpu));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_protected_leave_end(
+	struct kbase_tlstream *stream,
+	const void *gpu)
+{
+	const u32 msg_id = KBASE_AUX_PROTECTED_LEAVE_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(gpu)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &gpu, sizeof(gpu));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_jit_stats(
+	struct kbase_tlstream *stream,
+	u32 ctx_nr,
+	u32 bid,
+	u32 max_allocs,
+	u32 allocs,
+	u32 va_pages,
+	u32 ph_pages)
+{
+	const u32 msg_id = KBASE_AUX_JIT_STATS;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(ctx_nr)
+		+ sizeof(bid)
+		+ sizeof(max_allocs)
+		+ sizeof(allocs)
+		+ sizeof(va_pages)
+		+ sizeof(ph_pages)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx_nr, sizeof(ctx_nr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &bid, sizeof(bid));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &max_allocs, sizeof(max_allocs));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &allocs, sizeof(allocs));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &va_pages, sizeof(va_pages));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ph_pages, sizeof(ph_pages));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_event_job_slot(
+	struct kbase_tlstream *stream,
+	const void *ctx,
+	u32 slot_nr,
+	u32 atom_nr,
+	u32 event)
+{
+	const u32 msg_id = KBASE_AUX_EVENT_JOB_SLOT;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(ctx)
+		+ sizeof(slot_nr)
+		+ sizeof(atom_nr)
+		+ sizeof(event)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx, sizeof(ctx));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &slot_nr, sizeof(slot_nr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom_nr, sizeof(atom_nr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &event, sizeof(event));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_new_kcpuqueue(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	const void *ctx,
+	u32 kcpuq_num_pending_cmds)
+{
+	const u32 msg_id = KBASE_TL_NEW_KCPUQUEUE;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		+ sizeof(ctx)
+		+ sizeof(kcpuq_num_pending_cmds)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx, sizeof(ctx));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpuq_num_pending_cmds, sizeof(kcpuq_num_pending_cmds));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_ret_kcpuqueue_ctx(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	const void *ctx)
+{
+	const u32 msg_id = KBASE_TL_RET_KCPUQUEUE_CTX;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		+ sizeof(ctx)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx, sizeof(ctx));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_del_kcpuqueue(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_DEL_KCPUQUEUE;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_nret_kcpuqueue_ctx(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	const void *ctx)
+{
+	const u32 msg_id = KBASE_TL_NRET_KCPUQUEUE_CTX;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		+ sizeof(ctx)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx, sizeof(ctx));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_signal(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 fence)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		+ sizeof(fence)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &fence, sizeof(fence));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_wait(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 fence)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_WAIT;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		+ sizeof(fence)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &fence, sizeof(fence));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_wait(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_wait(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 cqs_obj_gpu_addr,
+	u32 cqs_obj_compare_value)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		+ sizeof(cqs_obj_gpu_addr)
+		+ sizeof(cqs_obj_compare_value)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &cqs_obj_gpu_addr, sizeof(cqs_obj_gpu_addr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &cqs_obj_compare_value, sizeof(cqs_obj_compare_value));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_wait(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_set(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_set(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 cqs_obj_gpu_addr)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		+ sizeof(cqs_obj_gpu_addr)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &cqs_obj_gpu_addr, sizeof(cqs_obj_gpu_addr));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_set(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_debugcopy(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_debugcopy(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 debugcopy_dst_size)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		+ sizeof(debugcopy_dst_size)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &debugcopy_dst_size, sizeof(debugcopy_dst_size));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_debugcopy(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_enqueue_map_import(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 map_import_buf_gpu_addr)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_MAP_IMPORT;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		+ sizeof(map_import_buf_gpu_addr)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &map_import_buf_gpu_addr, sizeof(map_import_buf_gpu_addr));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_enqueue_unmap_import(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 map_import_buf_gpu_addr)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		+ sizeof(map_import_buf_gpu_addr)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &map_import_buf_gpu_addr, sizeof(map_import_buf_gpu_addr));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_alloc(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_alloc(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 jit_alloc_gpu_alloc_addr_dest,
+	u64 jit_alloc_va_pages,
+	u64 jit_alloc_commit_pages,
+	u64 jit_alloc_extent,
+	u32 jit_alloc_jit_id,
+	u32 jit_alloc_bin_id,
+	u32 jit_alloc_max_allocations,
+	u32 jit_alloc_flags,
+	u32 jit_alloc_usage_id)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		+ sizeof(jit_alloc_gpu_alloc_addr_dest)
+		+ sizeof(jit_alloc_va_pages)
+		+ sizeof(jit_alloc_commit_pages)
+		+ sizeof(jit_alloc_extent)
+		+ sizeof(jit_alloc_jit_id)
+		+ sizeof(jit_alloc_bin_id)
+		+ sizeof(jit_alloc_max_allocations)
+		+ sizeof(jit_alloc_flags)
+		+ sizeof(jit_alloc_usage_id)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &jit_alloc_gpu_alloc_addr_dest, sizeof(jit_alloc_gpu_alloc_addr_dest));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &jit_alloc_va_pages, sizeof(jit_alloc_va_pages));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &jit_alloc_commit_pages, sizeof(jit_alloc_commit_pages));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &jit_alloc_extent, sizeof(jit_alloc_extent));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &jit_alloc_jit_id, sizeof(jit_alloc_jit_id));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &jit_alloc_bin_id, sizeof(jit_alloc_bin_id));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &jit_alloc_max_allocations, sizeof(jit_alloc_max_allocations));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &jit_alloc_flags, sizeof(jit_alloc_flags));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &jit_alloc_usage_id, sizeof(jit_alloc_usage_id));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_alloc(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_free(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_free(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u32 jit_alloc_jit_id)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		+ sizeof(jit_alloc_jit_id)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &jit_alloc_jit_id, sizeof(jit_alloc_jit_id));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_free(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_start(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_start(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_START;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_start(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_START;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_start(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_START;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_start(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_START;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_start(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_START;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_start(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_jit_alloc_start(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_ALLOC_START;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_alloc_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_alloc_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 jit_alloc_gpu_alloc_addr,
+	u64 jit_alloc_mmu_flags)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		+ sizeof(jit_alloc_gpu_alloc_addr)
+		+ sizeof(jit_alloc_mmu_flags)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &jit_alloc_gpu_alloc_addr, sizeof(jit_alloc_gpu_alloc_addr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &jit_alloc_mmu_flags, sizeof(jit_alloc_mmu_flags));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_alloc_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_jit_free_start(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_START;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_free_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_free_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 jit_free_pages_used)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		+ sizeof(jit_free_pages_used)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &jit_free_pages_used, sizeof(jit_free_pages_used));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_free_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_kcpuqueue_execute_errorbarrier(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue)
+{
+	const u32 msg_id = KBASE_TL_EVENT_KCPUQUEUE_EXECUTE_ERRORBARRIER;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+/* clang-format on */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_tracepoints.h b/drivers/gpu/arm/midgard/mali_kbase_tracepoints.h
new file mode 100644
index 0000000..7346493
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_tracepoints.h
@@ -0,0 +1,2417 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * THIS FILE IS AUTOGENERATED BY mali_trace_generator.py.
+ * DO NOT EDIT.
+ */
+
+#if !defined(_KBASE_TRACEPOINTS_H)
+#define _KBASE_TRACEPOINTS_H
+
+/* Tracepoints are abstract callbacks notifying that some important
+ * software or hardware event has happened.
+ *
+ * In this particular implementation, it results into a MIPE
+ * timeline event and, in some cases, it also fires an ftrace event
+ * (a.k.a. Gator events, see details below).
+ */
+
+#include "mali_kbase.h"
+#include "mali_kbase_gator.h"
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+
+/* clang-format off */
+
+struct kbase_tlstream;
+
+extern const size_t __obj_stream_offset;
+extern const size_t __aux_stream_offset;
+
+/* This macro dispatches a kbase_tlstream from
+ * a kbase_device instance. Only AUX or OBJ
+ * streams can be dispatched. It is aware of
+ * kbase_timeline binary representation and
+ * relies on offset variables:
+ * __obj_stream_offset and __aux_stream_offset.
+ */
+#define __TL_DISPATCH_STREAM(kbdev, stype) \
+	((struct kbase_tlstream *) \
+	 ((u8 *)kbdev->timeline + __ ## stype ## _stream_offset))
+
+struct tp_desc;
+
+/* Descriptors of timeline messages transmitted in object events stream. */
+extern const char   *obj_desc_header;
+extern const size_t  obj_desc_header_size;
+/* Descriptors of timeline messages transmitted in auxiliary events stream. */
+extern const char   *aux_desc_header;
+extern const size_t  aux_desc_header_size;
+
+#define TL_ATOM_STATE_IDLE 0
+#define TL_ATOM_STATE_READY 1
+#define TL_ATOM_STATE_DONE 2
+#define TL_ATOM_STATE_POSTED 3
+
+#define TL_JS_EVENT_START     GATOR_JOB_SLOT_START
+#define TL_JS_EVENT_STOP      GATOR_JOB_SLOT_STOP
+#define TL_JS_EVENT_SOFT_STOP GATOR_JOB_SLOT_SOFT_STOPPED
+
+#define TLSTREAM_ENABLED (1 << 31)
+
+void __kbase_tlstream_tl_new_ctx(
+	struct kbase_tlstream *stream,
+	const void *ctx,
+	u32 ctx_nr,
+	u32 tgid);
+void __kbase_tlstream_tl_new_gpu(
+	struct kbase_tlstream *stream,
+	const void *gpu,
+	u32 gpu_id,
+	u32 core_count);
+void __kbase_tlstream_tl_new_lpu(
+	struct kbase_tlstream *stream,
+	const void *lpu,
+	u32 lpu_nr,
+	u32 lpu_fn);
+void __kbase_tlstream_tl_new_atom(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	u32 atom_nr);
+void __kbase_tlstream_tl_new_as(
+	struct kbase_tlstream *stream,
+	const void *address_space,
+	u32 as_nr);
+void __kbase_tlstream_tl_del_ctx(
+	struct kbase_tlstream *stream,
+	const void *ctx);
+void __kbase_tlstream_tl_del_atom(
+	struct kbase_tlstream *stream,
+	const void *atom);
+void __kbase_tlstream_tl_lifelink_lpu_gpu(
+	struct kbase_tlstream *stream,
+	const void *lpu,
+	const void *gpu);
+void __kbase_tlstream_tl_lifelink_as_gpu(
+	struct kbase_tlstream *stream,
+	const void *address_space,
+	const void *gpu);
+void __kbase_tlstream_tl_ret_ctx_lpu(
+	struct kbase_tlstream *stream,
+	const void *ctx,
+	const void *lpu);
+void __kbase_tlstream_tl_ret_atom_ctx(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	const void *ctx);
+void __kbase_tlstream_tl_ret_atom_lpu(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	const void *lpu,
+	const char *attrib_match_list);
+void __kbase_tlstream_tl_nret_ctx_lpu(
+	struct kbase_tlstream *stream,
+	const void *ctx,
+	const void *lpu);
+void __kbase_tlstream_tl_nret_atom_ctx(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	const void *ctx);
+void __kbase_tlstream_tl_nret_atom_lpu(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	const void *lpu);
+void __kbase_tlstream_tl_ret_as_ctx(
+	struct kbase_tlstream *stream,
+	const void *address_space,
+	const void *ctx);
+void __kbase_tlstream_tl_nret_as_ctx(
+	struct kbase_tlstream *stream,
+	const void *address_space,
+	const void *ctx);
+void __kbase_tlstream_tl_ret_atom_as(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	const void *address_space);
+void __kbase_tlstream_tl_nret_atom_as(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	const void *address_space);
+void __kbase_tlstream_tl_attrib_atom_config(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	u64 descriptor,
+	u64 affinity,
+	u32 config);
+void __kbase_tlstream_tl_attrib_atom_priority(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	u32 prio);
+void __kbase_tlstream_tl_attrib_atom_state(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	u32 state);
+void __kbase_tlstream_tl_attrib_atom_prioritized(
+	struct kbase_tlstream *stream,
+	const void *atom);
+void __kbase_tlstream_tl_attrib_atom_jit(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	u64 edit_addr,
+	u64 new_addr,
+	u32 jit_flags,
+	u64 mem_flags,
+	u32 j_id,
+	u64 com_pgs,
+	u64 extent,
+	u64 va_pgs);
+void __kbase_tlstream_tl_jit_usedpages(
+	struct kbase_tlstream *stream,
+	u64 used_pages,
+	u32 j_id);
+void __kbase_tlstream_tl_attrib_atom_jitallocinfo(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	u64 va_pgs,
+	u64 com_pgs,
+	u64 extent,
+	u32 j_id,
+	u32 bin_id,
+	u32 max_allocs,
+	u32 jit_flags,
+	u32 usg_id);
+void __kbase_tlstream_tl_attrib_atom_jitfreeinfo(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	u32 j_id);
+void __kbase_tlstream_tl_attrib_as_config(
+	struct kbase_tlstream *stream,
+	const void *address_space,
+	u64 transtab,
+	u64 memattr,
+	u64 transcfg);
+void __kbase_tlstream_tl_event_lpu_softstop(
+	struct kbase_tlstream *stream,
+	const void *lpu);
+void __kbase_tlstream_tl_event_atom_softstop_ex(
+	struct kbase_tlstream *stream,
+	const void *atom);
+void __kbase_tlstream_tl_event_atom_softstop_issue(
+	struct kbase_tlstream *stream,
+	const void *atom);
+void __kbase_tlstream_tl_event_atom_softjob_start(
+	struct kbase_tlstream *stream,
+	const void *atom);
+void __kbase_tlstream_tl_event_atom_softjob_end(
+	struct kbase_tlstream *stream,
+	const void *atom);
+void __kbase_tlstream_jd_gpu_soft_reset(
+	struct kbase_tlstream *stream,
+	const void *gpu);
+void __kbase_tlstream_aux_pm_state(
+	struct kbase_tlstream *stream,
+	u32 core_type,
+	u64 core_state_bitset);
+void __kbase_tlstream_aux_pagefault(
+	struct kbase_tlstream *stream,
+	u32 ctx_nr,
+	u32 as_nr,
+	u64 page_cnt_change);
+void __kbase_tlstream_aux_pagesalloc(
+	struct kbase_tlstream *stream,
+	u32 ctx_nr,
+	u64 page_cnt);
+void __kbase_tlstream_aux_devfreq_target(
+	struct kbase_tlstream *stream,
+	u64 target_freq);
+void __kbase_tlstream_aux_protected_enter_start(
+	struct kbase_tlstream *stream,
+	const void *gpu);
+void __kbase_tlstream_aux_protected_enter_end(
+	struct kbase_tlstream *stream,
+	const void *gpu);
+void __kbase_tlstream_aux_protected_leave_start(
+	struct kbase_tlstream *stream,
+	const void *gpu);
+void __kbase_tlstream_aux_protected_leave_end(
+	struct kbase_tlstream *stream,
+	const void *gpu);
+void __kbase_tlstream_aux_jit_stats(
+	struct kbase_tlstream *stream,
+	u32 ctx_nr,
+	u32 bid,
+	u32 max_allocs,
+	u32 allocs,
+	u32 va_pages,
+	u32 ph_pages);
+void __kbase_tlstream_aux_event_job_slot(
+	struct kbase_tlstream *stream,
+	const void *ctx,
+	u32 slot_nr,
+	u32 atom_nr,
+	u32 event);
+void __kbase_tlstream_tl_new_kcpuqueue(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	const void *ctx,
+	u32 kcpuq_num_pending_cmds);
+void __kbase_tlstream_tl_ret_kcpuqueue_ctx(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	const void *ctx);
+void __kbase_tlstream_tl_del_kcpuqueue(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_nret_kcpuqueue_ctx(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	const void *ctx);
+void __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_signal(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 fence);
+void __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_wait(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 fence);
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_wait(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_wait(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 cqs_obj_gpu_addr,
+	u32 cqs_obj_compare_value);
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_wait(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_set(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_set(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 cqs_obj_gpu_addr);
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_set(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_debugcopy(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_debugcopy(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 debugcopy_dst_size);
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_debugcopy(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_enqueue_map_import(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 map_import_buf_gpu_addr);
+void __kbase_tlstream_tl_event_kcpuqueue_enqueue_unmap_import(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 map_import_buf_gpu_addr);
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_alloc(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_alloc(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 jit_alloc_gpu_alloc_addr_dest,
+	u64 jit_alloc_va_pages,
+	u64 jit_alloc_commit_pages,
+	u64 jit_alloc_extent,
+	u32 jit_alloc_jit_id,
+	u32 jit_alloc_bin_id,
+	u32 jit_alloc_max_allocations,
+	u32 jit_alloc_flags,
+	u32 jit_alloc_usage_id);
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_alloc(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_free(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_free(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u32 jit_alloc_jit_id);
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_free(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_start(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_start(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_start(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_start(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_start(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_start(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_start(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_jit_alloc_start(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_alloc_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_alloc_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 jit_alloc_gpu_alloc_addr,
+	u64 jit_alloc_mmu_flags);
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_alloc_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_jit_free_start(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_free_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_free_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 jit_free_pages_used);
+void __kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_free_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+void __kbase_tlstream_tl_event_kcpuqueue_execute_errorbarrier(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue);
+
+struct kbase_tlstream;
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_CTX -
+ *   object ctx is created
+ *
+ * @kbdev:	Kbase device
+ * @ctx:	Name of the context object
+ * @ctx_nr:	Kernel context number
+ * @tgid:	Thread Group Id
+ */
+#define KBASE_TLSTREAM_TL_NEW_CTX(	\
+	kbdev,	\
+	ctx,	\
+	ctx_nr,	\
+	tgid	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_new_ctx(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				ctx, ctx_nr, tgid);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_GPU -
+ *   object gpu is created
+ *
+ * @kbdev:	Kbase device
+ * @gpu:	Name of the GPU object
+ * @gpu_id:	Name of the GPU object
+ * @core_count:	Number of cores this GPU hosts
+ */
+#define KBASE_TLSTREAM_TL_NEW_GPU(	\
+	kbdev,	\
+	gpu,	\
+	gpu_id,	\
+	core_count	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_new_gpu(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				gpu, gpu_id, core_count);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_LPU -
+ *   object lpu is created
+ *
+ * @kbdev:	Kbase device
+ * @lpu:	Name of the Logical Processing Unit object
+ * @lpu_nr:	Sequential number assigned to the newly created LPU
+ * @lpu_fn:	Property describing functional abilities of this LPU
+ */
+#define KBASE_TLSTREAM_TL_NEW_LPU(	\
+	kbdev,	\
+	lpu,	\
+	lpu_nr,	\
+	lpu_fn	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_new_lpu(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				lpu, lpu_nr, lpu_fn);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_ATOM -
+ *   object atom is created
+ *
+ * @kbdev:	Kbase device
+ * @atom:	Atom identifier
+ * @atom_nr:	Sequential number of an atom
+ */
+#define KBASE_TLSTREAM_TL_NEW_ATOM(	\
+	kbdev,	\
+	atom,	\
+	atom_nr	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_new_atom(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				atom, atom_nr);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_AS -
+ *   address space object is created
+ *
+ * @kbdev:	Kbase device
+ * @address_space:	Name of the address space object
+ * @as_nr:	Address space number
+ */
+#define KBASE_TLSTREAM_TL_NEW_AS(	\
+	kbdev,	\
+	address_space,	\
+	as_nr	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_new_as(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				address_space, as_nr);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_DEL_CTX -
+ *   context is destroyed
+ *
+ * @kbdev:	Kbase device
+ * @ctx:	Name of the context object
+ */
+#define KBASE_TLSTREAM_TL_DEL_CTX(	\
+	kbdev,	\
+	ctx	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_del_ctx(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				ctx);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_DEL_ATOM -
+ *   atom is destroyed
+ *
+ * @kbdev:	Kbase device
+ * @atom:	Atom identifier
+ */
+#define KBASE_TLSTREAM_TL_DEL_ATOM(	\
+	kbdev,	\
+	atom	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_del_atom(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				atom);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_LIFELINK_LPU_GPU -
+ *   lpu is deleted with gpu
+ *
+ * @kbdev:	Kbase device
+ * @lpu:	Name of the Logical Processing Unit object
+ * @gpu:	Name of the GPU object
+ */
+#define KBASE_TLSTREAM_TL_LIFELINK_LPU_GPU(	\
+	kbdev,	\
+	lpu,	\
+	gpu	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_lifelink_lpu_gpu(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				lpu, gpu);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_LIFELINK_AS_GPU -
+ *   address space is deleted with gpu
+ *
+ * @kbdev:	Kbase device
+ * @address_space:	Name of the address space object
+ * @gpu:	Name of the GPU object
+ */
+#define KBASE_TLSTREAM_TL_LIFELINK_AS_GPU(	\
+	kbdev,	\
+	address_space,	\
+	gpu	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_lifelink_as_gpu(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				address_space, gpu);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_CTX_LPU -
+ *   context is retained by lpu
+ *
+ * @kbdev:	Kbase device
+ * @ctx:	Name of the context object
+ * @lpu:	Name of the Logical Processing Unit object
+ */
+#define KBASE_TLSTREAM_TL_RET_CTX_LPU(	\
+	kbdev,	\
+	ctx,	\
+	lpu	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_ret_ctx_lpu(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				ctx, lpu);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_ATOM_CTX -
+ *   atom is retained by context
+ *
+ * @kbdev:	Kbase device
+ * @atom:	Atom identifier
+ * @ctx:	Name of the context object
+ */
+#define KBASE_TLSTREAM_TL_RET_ATOM_CTX(	\
+	kbdev,	\
+	atom,	\
+	ctx	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_ret_atom_ctx(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				atom, ctx);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_ATOM_LPU -
+ *   atom is retained by lpu
+ *
+ * @kbdev:	Kbase device
+ * @atom:	Atom identifier
+ * @lpu:	Name of the Logical Processing Unit object
+ * @attrib_match_list:	List containing match operator attributes
+ */
+#define KBASE_TLSTREAM_TL_RET_ATOM_LPU(	\
+	kbdev,	\
+	atom,	\
+	lpu,	\
+	attrib_match_list	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_ret_atom_lpu(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				atom, lpu, attrib_match_list);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_CTX_LPU -
+ *   context is released by lpu
+ *
+ * @kbdev:	Kbase device
+ * @ctx:	Name of the context object
+ * @lpu:	Name of the Logical Processing Unit object
+ */
+#define KBASE_TLSTREAM_TL_NRET_CTX_LPU(	\
+	kbdev,	\
+	ctx,	\
+	lpu	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_nret_ctx_lpu(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				ctx, lpu);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_ATOM_CTX -
+ *   atom is released by context
+ *
+ * @kbdev:	Kbase device
+ * @atom:	Atom identifier
+ * @ctx:	Name of the context object
+ */
+#define KBASE_TLSTREAM_TL_NRET_ATOM_CTX(	\
+	kbdev,	\
+	atom,	\
+	ctx	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_nret_atom_ctx(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				atom, ctx);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_ATOM_LPU -
+ *   atom is released by lpu
+ *
+ * @kbdev:	Kbase device
+ * @atom:	Atom identifier
+ * @lpu:	Name of the Logical Processing Unit object
+ */
+#define KBASE_TLSTREAM_TL_NRET_ATOM_LPU(	\
+	kbdev,	\
+	atom,	\
+	lpu	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_nret_atom_lpu(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				atom, lpu);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_AS_CTX -
+ *   address space is retained by context
+ *
+ * @kbdev:	Kbase device
+ * @address_space:	Name of the address space object
+ * @ctx:	Name of the context object
+ */
+#define KBASE_TLSTREAM_TL_RET_AS_CTX(	\
+	kbdev,	\
+	address_space,	\
+	ctx	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_ret_as_ctx(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				address_space, ctx);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_AS_CTX -
+ *   address space is released by context
+ *
+ * @kbdev:	Kbase device
+ * @address_space:	Name of the address space object
+ * @ctx:	Name of the context object
+ */
+#define KBASE_TLSTREAM_TL_NRET_AS_CTX(	\
+	kbdev,	\
+	address_space,	\
+	ctx	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_nret_as_ctx(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				address_space, ctx);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_ATOM_AS -
+ *   atom is retained by address space
+ *
+ * @kbdev:	Kbase device
+ * @atom:	Atom identifier
+ * @address_space:	Name of the address space object
+ */
+#define KBASE_TLSTREAM_TL_RET_ATOM_AS(	\
+	kbdev,	\
+	atom,	\
+	address_space	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_ret_atom_as(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				atom, address_space);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_ATOM_AS -
+ *   atom is released by address space
+ *
+ * @kbdev:	Kbase device
+ * @atom:	Atom identifier
+ * @address_space:	Name of the address space object
+ */
+#define KBASE_TLSTREAM_TL_NRET_ATOM_AS(	\
+	kbdev,	\
+	atom,	\
+	address_space	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_nret_atom_as(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				atom, address_space);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG -
+ *   atom job slot attributes
+ *
+ * @kbdev:	Kbase device
+ * @atom:	Atom identifier
+ * @descriptor:	Job descriptor address
+ * @affinity:	Job affinity
+ * @config:	Job config
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG(	\
+	kbdev,	\
+	atom,	\
+	descriptor,	\
+	affinity,	\
+	config	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_attrib_atom_config(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				atom, descriptor, affinity, config);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY -
+ *   atom priority
+ *
+ * @kbdev:	Kbase device
+ * @atom:	Atom identifier
+ * @prio:	Atom priority
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY(	\
+	kbdev,	\
+	atom,	\
+	prio	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled);	\
+		if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS)	\
+			__kbase_tlstream_tl_attrib_atom_priority(	\
+				__TL_DISPATCH_STREAM(kbdev, obj),	\
+				atom, prio);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE -
+ *   atom state
+ *
+ * @kbdev:	Kbase device
+ * @atom:	Atom identifier
+ * @state:	Atom state
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(	\
+	kbdev,	\
+	atom,	\
+	state	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled);	\
+		if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS)	\
+			__kbase_tlstream_tl_attrib_atom_state(	\
+				__TL_DISPATCH_STREAM(kbdev, obj),	\
+				atom, state);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED -
+ *   atom caused priority change
+ *
+ * @kbdev:	Kbase device
+ * @atom:	Atom identifier
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED(	\
+	kbdev,	\
+	atom	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled);	\
+		if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS)	\
+			__kbase_tlstream_tl_attrib_atom_prioritized(	\
+				__TL_DISPATCH_STREAM(kbdev, obj),	\
+				atom);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT -
+ *   jit done for atom
+ *
+ * @kbdev:	Kbase device
+ * @atom:	Atom identifier
+ * @edit_addr:	Address edited by jit
+ * @new_addr:	Address placed into the edited location
+ * @jit_flags:	Flags specifying the special requirements for
+ * the JIT allocation.
+ * @mem_flags:	Flags defining the properties of a memory region
+ * @j_id:	Unique ID provided by the caller, this is used
+ * to pair allocation and free requests.
+ * @com_pgs:	The minimum number of physical pages which
+ * should back the allocation.
+ * @extent:	Granularity of physical pages to grow the
+ * allocation by during a fault.
+ * @va_pgs:	The minimum number of virtual pages required
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT(	\
+	kbdev,	\
+	atom,	\
+	edit_addr,	\
+	new_addr,	\
+	jit_flags,	\
+	mem_flags,	\
+	j_id,	\
+	com_pgs,	\
+	extent,	\
+	va_pgs	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled);	\
+		if (enabled & BASE_TLSTREAM_JOB_DUMPING_ENABLED)	\
+			__kbase_tlstream_tl_attrib_atom_jit(	\
+				__TL_DISPATCH_STREAM(kbdev, obj),	\
+				atom, edit_addr, new_addr, jit_flags, mem_flags, j_id, com_pgs, extent, va_pgs);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_JIT_USEDPAGES -
+ *   used pages for jit
+ *
+ * @kbdev:	Kbase device
+ * @used_pages:	Number of pages used for jit
+ * @j_id:	Unique ID provided by the caller, this is used
+ * to pair allocation and free requests.
+ */
+#define KBASE_TLSTREAM_TL_JIT_USEDPAGES(	\
+	kbdev,	\
+	used_pages,	\
+	j_id	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_jit_usedpages(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				used_pages, j_id);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO -
+ *   Information about JIT allocations
+ *
+ * @kbdev:	Kbase device
+ * @atom:	Atom identifier
+ * @va_pgs:	The minimum number of virtual pages required
+ * @com_pgs:	The minimum number of physical pages which
+ * should back the allocation.
+ * @extent:	Granularity of physical pages to grow the
+ * allocation by during a fault.
+ * @j_id:	Unique ID provided by the caller, this is used
+ * to pair allocation and free requests.
+ * @bin_id:	The JIT allocation bin, used in conjunction with
+ * max_allocations to limit the number of each
+ * type of JIT allocation.
+ * @max_allocs:	Maximum allocations allowed in this bin.
+ * @jit_flags:	Flags specifying the special requirements for
+ * the JIT allocation.
+ * @usg_id:	A hint about which allocation should be reused.
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO(	\
+	kbdev,	\
+	atom,	\
+	va_pgs,	\
+	com_pgs,	\
+	extent,	\
+	j_id,	\
+	bin_id,	\
+	max_allocs,	\
+	jit_flags,	\
+	usg_id	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_attrib_atom_jitallocinfo(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				atom, va_pgs, com_pgs, extent, j_id, bin_id, max_allocs, jit_flags, usg_id);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITFREEINFO -
+ *   Information about JIT frees
+ *
+ * @kbdev:	Kbase device
+ * @atom:	Atom identifier
+ * @j_id:	Unique ID provided by the caller, this is used
+ * to pair allocation and free requests.
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITFREEINFO(	\
+	kbdev,	\
+	atom,	\
+	j_id	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_attrib_atom_jitfreeinfo(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				atom, j_id);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG -
+ *   address space attributes
+ *
+ * @kbdev:	Kbase device
+ * @address_space:	Name of the address space object
+ * @transtab:	Configuration of the TRANSTAB register
+ * @memattr:	Configuration of the MEMATTR register
+ * @transcfg:	Configuration of the TRANSCFG register (or zero if not present)
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG(	\
+	kbdev,	\
+	address_space,	\
+	transtab,	\
+	memattr,	\
+	transcfg	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_attrib_as_config(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				address_space, transtab, memattr, transcfg);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP -
+ *   softstop event on given lpu
+ *
+ * @kbdev:	Kbase device
+ * @lpu:	Name of the Logical Processing Unit object
+ */
+#define KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP(	\
+	kbdev,	\
+	lpu	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_lpu_softstop(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				lpu);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX -
+ *   atom softstopped
+ *
+ * @kbdev:	Kbase device
+ * @atom:	Atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX(	\
+	kbdev,	\
+	atom	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_atom_softstop_ex(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				atom);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE -
+ *   atom softstop issued
+ *
+ * @kbdev:	Kbase device
+ * @atom:	Atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE(	\
+	kbdev,	\
+	atom	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_atom_softstop_issue(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				atom);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START -
+ *   atom soft job has started
+ *
+ * @kbdev:	Kbase device
+ * @atom:	Atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START(	\
+	kbdev,	\
+	atom	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_atom_softjob_start(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				atom);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END -
+ *   atom soft job has completed
+ *
+ * @kbdev:	Kbase device
+ * @atom:	Atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END(	\
+	kbdev,	\
+	atom	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_atom_softjob_end(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				atom);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_JD_GPU_SOFT_RESET -
+ *   gpu soft reset
+ *
+ * @kbdev:	Kbase device
+ * @gpu:	Name of the GPU object
+ */
+#define KBASE_TLSTREAM_JD_GPU_SOFT_RESET(	\
+	kbdev,	\
+	gpu	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_jd_gpu_soft_reset(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				gpu);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PM_STATE -
+ *   PM state
+ *
+ * @kbdev:	Kbase device
+ * @core_type:	Core type (shader, tiler, l2 cache, l3 cache)
+ * @core_state_bitset:	64bits bitmask reporting power state of the cores
+ * (1-ON, 0-OFF)
+ */
+#define KBASE_TLSTREAM_AUX_PM_STATE(	\
+	kbdev,	\
+	core_type,	\
+	core_state_bitset	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_aux_pm_state(	\
+				__TL_DISPATCH_STREAM(kbdev, aux), \
+				core_type, core_state_bitset);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PAGEFAULT -
+ *   Page fault
+ *
+ * @kbdev:	Kbase device
+ * @ctx_nr:	Kernel context number
+ * @as_nr:	Address space number
+ * @page_cnt_change:	Number of pages to be added
+ */
+#define KBASE_TLSTREAM_AUX_PAGEFAULT(	\
+	kbdev,	\
+	ctx_nr,	\
+	as_nr,	\
+	page_cnt_change	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_aux_pagefault(	\
+				__TL_DISPATCH_STREAM(kbdev, aux), \
+				ctx_nr, as_nr, page_cnt_change);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PAGESALLOC -
+ *   Total alloc pages change
+ *
+ * @kbdev:	Kbase device
+ * @ctx_nr:	Kernel context number
+ * @page_cnt:	Number of pages used by the context
+ */
+#define KBASE_TLSTREAM_AUX_PAGESALLOC(	\
+	kbdev,	\
+	ctx_nr,	\
+	page_cnt	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_aux_pagesalloc(	\
+				__TL_DISPATCH_STREAM(kbdev, aux), \
+				ctx_nr, page_cnt);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_DEVFREQ_TARGET -
+ *   New device frequency target
+ *
+ * @kbdev:	Kbase device
+ * @target_freq:	New target frequency
+ */
+#define KBASE_TLSTREAM_AUX_DEVFREQ_TARGET(	\
+	kbdev,	\
+	target_freq	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_aux_devfreq_target(	\
+				__TL_DISPATCH_STREAM(kbdev, aux), \
+				target_freq);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START -
+ *   enter protected mode start
+ *
+ * @kbdev:	Kbase device
+ * @gpu:	Name of the GPU object
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START(	\
+	kbdev,	\
+	gpu	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled);	\
+		if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS)	\
+			__kbase_tlstream_aux_protected_enter_start(	\
+				__TL_DISPATCH_STREAM(kbdev, aux),	\
+				gpu);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END -
+ *   enter protected mode end
+ *
+ * @kbdev:	Kbase device
+ * @gpu:	Name of the GPU object
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END(	\
+	kbdev,	\
+	gpu	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled);	\
+		if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS)	\
+			__kbase_tlstream_aux_protected_enter_end(	\
+				__TL_DISPATCH_STREAM(kbdev, aux),	\
+				gpu);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START -
+ *   leave protected mode start
+ *
+ * @kbdev:	Kbase device
+ * @gpu:	Name of the GPU object
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START(	\
+	kbdev,	\
+	gpu	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled);	\
+		if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS)	\
+			__kbase_tlstream_aux_protected_leave_start(	\
+				__TL_DISPATCH_STREAM(kbdev, aux),	\
+				gpu);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END -
+ *   leave protected mode end
+ *
+ * @kbdev:	Kbase device
+ * @gpu:	Name of the GPU object
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END(	\
+	kbdev,	\
+	gpu	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled);	\
+		if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS)	\
+			__kbase_tlstream_aux_protected_leave_end(	\
+				__TL_DISPATCH_STREAM(kbdev, aux),	\
+				gpu);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_JIT_STATS -
+ *   per-bin JIT statistics
+ *
+ * @kbdev:	Kbase device
+ * @ctx_nr:	Kernel context number
+ * @bid:	JIT bin id
+ * @max_allocs:	Maximum allocations allowed in this bin.
+ * @allocs:	Number of active allocations in this bin
+ * @va_pages:	Number of virtual pages allocated in this bin
+ * @ph_pages:	Number of physical pages allocated in this bin
+ */
+#define KBASE_TLSTREAM_AUX_JIT_STATS(	\
+	kbdev,	\
+	ctx_nr,	\
+	bid,	\
+	max_allocs,	\
+	allocs,	\
+	va_pages,	\
+	ph_pages	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_aux_jit_stats(	\
+				__TL_DISPATCH_STREAM(kbdev, aux), \
+				ctx_nr, bid, max_allocs, allocs, va_pages, ph_pages);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT -
+ *   event on a given job slot
+ *
+ * @kbdev:	Kbase device
+ * @ctx:	Name of the context object
+ * @slot_nr:	Job slot number
+ * @atom_nr:	Sequential number of an atom
+ * @event:	Event type. One of TL_JS_EVENT values
+ */
+#define KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT(	\
+	kbdev,	\
+	ctx,	\
+	slot_nr,	\
+	atom_nr,	\
+	event	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_aux_event_job_slot(	\
+				__TL_DISPATCH_STREAM(kbdev, aux), \
+				ctx, slot_nr, atom_nr, event);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_KCPUQUEUE -
+ *   New KCPU Queue
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ * @ctx:	Name of the context object
+ * @kcpuq_num_pending_cmds:	Number of commands already enqueued
+ * in the KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_NEW_KCPUQUEUE(	\
+	kbdev,	\
+	kcpu_queue,	\
+	ctx,	\
+	kcpuq_num_pending_cmds	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_new_kcpuqueue(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue, ctx, kcpuq_num_pending_cmds);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_KCPUQUEUE_CTX -
+ *   Context retains KCPU Queue
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ * @ctx:	Name of the context object
+ */
+#define KBASE_TLSTREAM_TL_RET_KCPUQUEUE_CTX(	\
+	kbdev,	\
+	kcpu_queue,	\
+	ctx	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_ret_kcpuqueue_ctx(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue, ctx);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_DEL_KCPUQUEUE -
+ *   Delete KCPU Queue
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_DEL_KCPUQUEUE(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_del_kcpuqueue(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_KCPUQUEUE_CTX -
+ *   Context releases KCPU Queue
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ * @ctx:	Name of the context object
+ */
+#define KBASE_TLSTREAM_TL_NRET_KCPUQUEUE_CTX(	\
+	kbdev,	\
+	kcpu_queue,	\
+	ctx	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_nret_kcpuqueue_ctx(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue, ctx);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL -
+ *   KCPU Queue enqueues Signal on Fence
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ * @fence:	Fence object handle
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL(	\
+	kbdev,	\
+	kcpu_queue,	\
+	fence	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_signal(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue, fence);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_WAIT -
+ *   KCPU Queue enqueues Wait on Fence
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ * @fence:	Fence object handle
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_WAIT(	\
+	kbdev,	\
+	kcpu_queue,	\
+	fence	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_wait(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue, fence);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT -
+ *   Begin array of KCPU Queue enqueues Wait on Cross Queue Sync Object
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_wait(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT -
+ *   Array item of KCPU Queue enqueues Wait on Cross Queue Sync Object
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ * @cqs_obj_gpu_addr:	CQS Object GPU ptr
+ * @cqs_obj_compare_value:	Semaphore value that should be exceeded
+ * for the WAIT to pass
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT(	\
+	kbdev,	\
+	kcpu_queue,	\
+	cqs_obj_gpu_addr,	\
+	cqs_obj_compare_value	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_wait(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue, cqs_obj_gpu_addr, cqs_obj_compare_value);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT -
+ *   End array of KCPU Queue enqueues Wait on Cross Queue Sync Object
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_wait(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET -
+ *   Begin array of KCPU Queue enqueues Set on Cross Queue Sync Object
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_set(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET -
+ *   Array item of KCPU Queue enqueues Set on Cross Queue Sync Object
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ * @cqs_obj_gpu_addr:	CQS Object GPU ptr
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET(	\
+	kbdev,	\
+	kcpu_queue,	\
+	cqs_obj_gpu_addr	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_set(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue, cqs_obj_gpu_addr);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET -
+ *   End array of KCPU Queue enqueues Set on Cross Queue Sync Object
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_set(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY -
+ *   Begin array of KCPU Queue enqueues Debug Copy
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_debugcopy(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY -
+ *   Array item of KCPU Queue enqueues Debug Copy
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ * @debugcopy_dst_size:	Debug Copy destination size
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY(	\
+	kbdev,	\
+	kcpu_queue,	\
+	debugcopy_dst_size	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_debugcopy(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue, debugcopy_dst_size);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY -
+ *   End array of KCPU Queue enqueues Debug Copy
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_debugcopy(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_MAP_IMPORT -
+ *   KCPU Queue enqueues Map Import
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ * @map_import_buf_gpu_addr:	Map import buffer GPU ptr
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_MAP_IMPORT(	\
+	kbdev,	\
+	kcpu_queue,	\
+	map_import_buf_gpu_addr	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_enqueue_map_import(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue, map_import_buf_gpu_addr);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT -
+ *   KCPU Queue enqueues Unmap Import
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ * @map_import_buf_gpu_addr:	Map import buffer GPU ptr
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT(	\
+	kbdev,	\
+	kcpu_queue,	\
+	map_import_buf_gpu_addr	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_enqueue_unmap_import(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue, map_import_buf_gpu_addr);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC -
+ *   Begin array of KCPU Queue enqueues JIT Alloc
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_alloc(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC -
+ *   Array item of KCPU Queue enqueues JIT Alloc
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ * @jit_alloc_gpu_alloc_addr_dest:	The GPU virtual address to write
+ * the JIT allocated GPU virtual address to
+ * @jit_alloc_va_pages:	The minimum number of virtual pages required
+ * @jit_alloc_commit_pages:	The minimum number of physical pages which
+ * should back the allocation
+ * @jit_alloc_extent:	Granularity of physical pages to grow the allocation
+ * by during a fault
+ * @jit_alloc_jit_id:	Unique ID provided by the caller, this is used
+ * to pair allocation and free requests. Zero is not a valid value
+ * @jit_alloc_bin_id:	The JIT allocation bin, used in conjunction with
+ * max_allocations to limit the number of each type of JIT allocation
+ * @jit_alloc_max_allocations:	The maximum number of allocations
+ * allowed within the bin specified by bin_id. Should be the same for all
+ * JIT allocations within the same bin.
+ * @jit_alloc_flags:	Flags specifying the special requirements for the
+ * JIT allocation
+ * @jit_alloc_usage_id:	A hint about which allocation should be
+ * reused. The kernel should attempt to use a previous allocation with the same
+ * usage_id
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC(	\
+	kbdev,	\
+	kcpu_queue,	\
+	jit_alloc_gpu_alloc_addr_dest,	\
+	jit_alloc_va_pages,	\
+	jit_alloc_commit_pages,	\
+	jit_alloc_extent,	\
+	jit_alloc_jit_id,	\
+	jit_alloc_bin_id,	\
+	jit_alloc_max_allocations,	\
+	jit_alloc_flags,	\
+	jit_alloc_usage_id	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_alloc(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue, jit_alloc_gpu_alloc_addr_dest, jit_alloc_va_pages, jit_alloc_commit_pages, jit_alloc_extent, jit_alloc_jit_id, jit_alloc_bin_id, jit_alloc_max_allocations, jit_alloc_flags, jit_alloc_usage_id);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC -
+ *   End array of KCPU Queue enqueues JIT Alloc
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_alloc(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE -
+ *   Begin array of KCPU Queue enqueues JIT Free
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_free(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE -
+ *   Array item of KCPU Queue enqueues JIT Free
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ * @jit_alloc_jit_id:	Unique ID provided by the caller, this is used
+ * to pair allocation and free requests. Zero is not a valid value
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE(	\
+	kbdev,	\
+	kcpu_queue,	\
+	jit_alloc_jit_id	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_free(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue, jit_alloc_jit_id);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE -
+ *   End array of KCPU Queue enqueues JIT Free
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_free(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START -
+ *   KCPU Queue starts a Signal on Fence
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_start(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END -
+ *   KCPU Queue ends a Signal on Fence
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_end(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_START -
+ *   KCPU Queue starts a Wait on Fence
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_START(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_start(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_END -
+ *   KCPU Queue ends a Wait on Fence
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_END(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_end(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_START -
+ *   KCPU Queue starts a Wait on an array of Cross Queue Sync Objects
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_START(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_start(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_END -
+ *   KCPU Queue ends a Wait on an array of Cross Queue Sync Objects
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_END(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_end(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_START -
+ *   KCPU Queue starts a Set on an array of Cross Queue Sync Objects
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_START(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_start(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_END -
+ *   KCPU Queue ends a Set on an array of Cross Queue Sync Objects
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_END(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_end(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_START -
+ *   KCPU Queue starts an array of Debug Copys
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_START(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_start(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_END -
+ *   KCPU Queue ends an array of Debug Copys
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_END(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_end(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_START -
+ *   KCPU Queue starts a Map Import
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_START(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_execute_map_import_start(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_END -
+ *   KCPU Queue ends a Map Import
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_END(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_execute_map_import_end(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START -
+ *   KCPU Queue starts an Unmap Import
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_start(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END -
+ *   KCPU Queue ends an Unmap Import
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_end(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_ALLOC_START -
+ *   KCPU Queue starts an array of JIT Allocs
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_ALLOC_START(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_execute_jit_alloc_start(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END -
+ *   Begin array of KCPU Queue ends an array of JIT Allocs
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_alloc_end(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END -
+ *   Array item of KCPU Queue ends an array of JIT Allocs
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ * @jit_alloc_gpu_alloc_addr:	The JIT allocated GPU virtual address
+ * @jit_alloc_mmu_flags:	The MMU flags for the JIT allocation
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END(	\
+	kbdev,	\
+	kcpu_queue,	\
+	jit_alloc_gpu_alloc_addr,	\
+	jit_alloc_mmu_flags	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_alloc_end(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue, jit_alloc_gpu_alloc_addr, jit_alloc_mmu_flags);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END -
+ *   End array of KCPU Queue ends an array of JIT Allocs
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_alloc_end(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_START -
+ *   KCPU Queue starts an array of JIT Frees
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_START(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_execute_jit_free_start(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END -
+ *   Begin array of KCPU Queue ends an array of JIT Frees
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_free_end(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END -
+ *   Array item of KCPU Queue ends an array of JIT Frees
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ * @jit_free_pages_used:	The actual number of pages used by the JIT
+ * allocation
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END(	\
+	kbdev,	\
+	kcpu_queue,	\
+	jit_free_pages_used	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_free_end(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue, jit_free_pages_used);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END -
+ *   End array of KCPU Queue ends an array of JIT Frees
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_free_end(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_ERRORBARRIER -
+ *   KCPU Queue executes an Error Barrier
+ *
+ * @kbdev:	Kbase device
+ * @kcpu_queue:	KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_ERRORBARRIER(	\
+	kbdev,	\
+	kcpu_queue	\
+	)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_tl_event_kcpuqueue_execute_errorbarrier(	\
+				__TL_DISPATCH_STREAM(kbdev, obj), \
+				kcpu_queue);	\
+	} while (0)
+
+
+/* Gator tracepoints are hooked into TLSTREAM interface.
+ * When the following tracepoints are called, corresponding
+ * Gator tracepoint will be called as well.
+ */
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+/* `event` is one of TL_JS_EVENT values here.
+ * The values of TL_JS_EVENT are guaranteed to match
+ * with corresponding GATOR_JOB_SLOT values.
+ */
+#undef KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT
+#define KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT(kbdev, \
+	context, slot_nr, atom_nr, event)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled);	\
+		kbase_trace_mali_job_slots_event(kbdev->id,	\
+			GATOR_MAKE_EVENT(event, slot_nr),	\
+			context, (u8) atom_nr);	\
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_aux_event_job_slot(	\
+				__TL_DISPATCH_STREAM(kbdev, aux),	\
+				context, slot_nr, atom_nr, event);	\
+	} while (0)
+
+#undef KBASE_TLSTREAM_AUX_PM_STATE
+#define KBASE_TLSTREAM_AUX_PM_STATE(kbdev, core_type, state)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled);	\
+		kbase_trace_mali_pm_status(kbdev->id,	\
+			core_type, state);	\
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_aux_pm_state(	\
+				__TL_DISPATCH_STREAM(kbdev, aux),	\
+				core_type, state);	\
+	} while (0)
+
+#undef KBASE_TLSTREAM_AUX_PAGEFAULT
+#define KBASE_TLSTREAM_AUX_PAGEFAULT(kbdev, \
+	ctx_nr, as_nr, page_cnt_change)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled);	\
+		kbase_trace_mali_page_fault_insert_pages(kbdev->id,	\
+			as_nr,	\
+			page_cnt_change);	\
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_aux_pagefault(	\
+				__TL_DISPATCH_STREAM(kbdev, aux),	\
+				ctx_nr, as_nr, page_cnt_change);	\
+	} while (0)
+
+/* kbase_trace_mali_total_alloc_pages_change is handled differently here.
+ * We stream the total amount of pages allocated for `kbdev` rather
+ * than `page_count`, which is per-context.
+ */
+#undef KBASE_TLSTREAM_AUX_PAGESALLOC
+#define KBASE_TLSTREAM_AUX_PAGESALLOC(kbdev, ctx_nr, page_cnt)	\
+	do {	\
+		int enabled = atomic_read(&kbdev->timeline_is_enabled);	\
+		u32 global_pages_count = \
+			atomic_read(&kbdev->memdev.used_pages);	\
+			\
+		kbase_trace_mali_total_alloc_pages_change(kbdev->id,	\
+			global_pages_count);	\
+		if (enabled & TLSTREAM_ENABLED)	\
+			__kbase_tlstream_aux_pagesalloc(	\
+				__TL_DISPATCH_STREAM(kbdev, aux),	\
+				ctx_nr, page_cnt);	\
+	} while (0)
+#endif /* CONFIG_MALI_GATOR_SUPPORT */
+
+/* clang-format on */
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_utility.c b/drivers/gpu/arm/midgard/mali_kbase_utility.c
new file mode 100644
index 0000000..3ea234a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_utility.c
@@ -0,0 +1,38 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+
+bool kbasep_list_member_of(const struct list_head *base, struct list_head *entry)
+{
+	struct list_head *pos = base->next;
+
+	while (pos != base) {
+		if (pos == entry)
+			return true;
+
+		pos = pos->next;
+	}
+	return false;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_utility.h b/drivers/gpu/arm/midgard/mali_kbase_utility.h
new file mode 100644
index 0000000..8d4f0443
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_utility.h
@@ -0,0 +1,55 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2013, 2015, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#ifndef _KBASE_UTILITY_H
+#define _KBASE_UTILITY_H
+
+#ifndef _KBASE_H_
+#error "Don't include this file directly, use mali_kbase.h instead"
+#endif
+
+static inline void kbase_timer_setup(struct timer_list *timer,
+				     void (*callback)(struct timer_list *timer))
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
+	setup_timer(timer, (void (*)(unsigned long)) callback,
+			(unsigned long) timer);
+#else
+	timer_setup(timer, callback, 0);
+#endif
+}
+
+#ifndef WRITE_ONCE
+	#ifdef ASSIGN_ONCE
+		#define WRITE_ONCE(x, val) ASSIGN_ONCE(val, x)
+	#else
+		#define WRITE_ONCE(x, val) (ACCESS_ONCE(x) = (val))
+	#endif
+#endif
+
+#ifndef READ_ONCE
+	#define READ_ONCE(x) ACCESS_ONCE(x)
+#endif
+
+#endif				/* _KBASE_UTILITY_H */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_vinstr.c b/drivers/gpu/arm/midgard/mali_kbase_vinstr.c
new file mode 100644
index 0000000..377642d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_vinstr.c
@@ -0,0 +1,990 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_vinstr.h"
+#include "mali_kbase_hwcnt_virtualizer.h"
+#include "mali_kbase_hwcnt_types.h"
+#include "mali_kbase_hwcnt_reader.h"
+#include "mali_kbase_hwcnt_gpu.h"
+#include "mali_kbase_ioctl.h"
+#include "mali_malisw.h"
+#include "mali_kbase_debug.h"
+
+#include <linux/anon_inodes.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/hrtimer.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+/* Hwcnt reader API version */
+#define HWCNT_READER_API 1
+
+/* The minimum allowed interval between dumps (equivalent to 10KHz) */
+#define DUMP_INTERVAL_MIN_NS (100 * NSEC_PER_USEC)
+
+/* The maximum allowed buffers per client */
+#define MAX_BUFFER_COUNT 32
+
+/**
+ * struct kbase_vinstr_context - IOCTL interface for userspace hardware
+ *                               counters.
+ * @hvirt:         Hardware counter virtualizer used by vinstr.
+ * @metadata:      Hardware counter metadata provided by virtualizer.
+ * @lock:          Lock protecting all vinstr state.
+ * @suspend_count: Suspend reference count. If non-zero, timer and worker are
+ *                 prevented from being re-scheduled.
+ * @client_count:  Number of vinstr clients.
+ * @clients:       List of vinstr clients.
+ * @dump_timer:    Timer that enqueues dump_work to a workqueue.
+ * @dump_work:     Worker for performing periodic counter dumps.
+ */
+struct kbase_vinstr_context {
+	struct kbase_hwcnt_virtualizer *hvirt;
+	const struct kbase_hwcnt_metadata *metadata;
+	struct mutex lock;
+	size_t suspend_count;
+	size_t client_count;
+	struct list_head clients;
+	struct hrtimer dump_timer;
+	struct work_struct dump_work;
+};
+
+/**
+ * struct kbase_vinstr_client - A vinstr client attached to a vinstr context.
+ * @vctx:              Vinstr context client is attached to.
+ * @hvcli:             Hardware counter virtualizer client.
+ * @node:              Node used to attach this client to list in vinstr
+ *                     context.
+ * @dump_interval_ns:  Interval between periodic dumps. If 0, not a periodic
+ *                     client.
+ * @next_dump_time_ns: Time in ns when this client's next periodic dump must
+ *                     occur. If 0, not a periodic client.
+ * @enable_map:        Counters enable map.
+ * @dump_bufs:         Array of dump buffers allocated by this client.
+ * @dump_bufs_meta:    Metadata of dump buffers.
+ * @meta_idx:          Index of metadata being accessed by userspace.
+ * @read_idx:          Index of buffer read by userspace.
+ * @write_idx:         Index of buffer being written by dump worker.
+ * @waitq:             Client's notification queue.
+ */
+struct kbase_vinstr_client {
+	struct kbase_vinstr_context *vctx;
+	struct kbase_hwcnt_virtualizer_client *hvcli;
+	struct list_head node;
+	u64 next_dump_time_ns;
+	u32 dump_interval_ns;
+	struct kbase_hwcnt_enable_map enable_map;
+	struct kbase_hwcnt_dump_buffer_array dump_bufs;
+	struct kbase_hwcnt_reader_metadata *dump_bufs_meta;
+	atomic_t meta_idx;
+	atomic_t read_idx;
+	atomic_t write_idx;
+	wait_queue_head_t waitq;
+};
+
+static unsigned int kbasep_vinstr_hwcnt_reader_poll(
+	struct file *filp,
+	poll_table *wait);
+
+static long kbasep_vinstr_hwcnt_reader_ioctl(
+	struct file *filp,
+	unsigned int cmd,
+	unsigned long arg);
+
+static int kbasep_vinstr_hwcnt_reader_mmap(
+	struct file *filp,
+	struct vm_area_struct *vma);
+
+static int kbasep_vinstr_hwcnt_reader_release(
+	struct inode *inode,
+	struct file *filp);
+
+/* Vinstr client file operations */
+static const struct file_operations vinstr_client_fops = {
+	.owner = THIS_MODULE,
+	.poll           = kbasep_vinstr_hwcnt_reader_poll,
+	.unlocked_ioctl = kbasep_vinstr_hwcnt_reader_ioctl,
+	.compat_ioctl   = kbasep_vinstr_hwcnt_reader_ioctl,
+	.mmap           = kbasep_vinstr_hwcnt_reader_mmap,
+	.release        = kbasep_vinstr_hwcnt_reader_release,
+};
+
+/**
+ * kbasep_vinstr_timestamp_ns() - Get the current time in nanoseconds.
+ *
+ * Return: Current time in nanoseconds.
+ */
+static u64 kbasep_vinstr_timestamp_ns(void)
+{
+	struct timespec ts;
+
+	getrawmonotonic(&ts);
+	return (u64)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
+}
+
+/**
+ * kbasep_vinstr_next_dump_time_ns() - Calculate the next periodic dump time.
+ * @cur_ts_ns: Current time in nanoseconds.
+ * @interval:  Interval between dumps in nanoseconds.
+ *
+ * Return: 0 if interval is 0 (i.e. a non-periodic client), or the next dump
+ *         time that occurs after cur_ts_ns.
+ */
+static u64 kbasep_vinstr_next_dump_time_ns(u64 cur_ts_ns, u32 interval)
+{
+	/* Non-periodic client */
+	if (interval == 0)
+		return 0;
+
+	/*
+	 * Return the next interval after the current time relative to t=0.
+	 * This means multiple clients with the same period will synchronise,
+	 * regardless of when they were started, allowing the worker to be
+	 * scheduled less frequently.
+	 */
+	do_div(cur_ts_ns, interval);
+	return (cur_ts_ns + 1) * interval;
+}
+
+/**
+ * kbasep_vinstr_client_dump() - Perform a dump for a client.
+ * @vcli:     Non-NULL pointer to a vinstr client.
+ * @event_id: Event type that triggered the dump.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_vinstr_client_dump(
+	struct kbase_vinstr_client *vcli,
+	enum base_hwcnt_reader_event event_id)
+{
+	int errcode;
+	u64 ts_start_ns;
+	u64 ts_end_ns;
+	unsigned int write_idx;
+	unsigned int read_idx;
+	struct kbase_hwcnt_dump_buffer *dump_buf;
+	struct kbase_hwcnt_reader_metadata *meta;
+
+	WARN_ON(!vcli);
+	lockdep_assert_held(&vcli->vctx->lock);
+
+	write_idx = atomic_read(&vcli->write_idx);
+	read_idx = atomic_read(&vcli->read_idx);
+
+	/* Check if there is a place to copy HWC block into. */
+	if (write_idx - read_idx == vcli->dump_bufs.buf_cnt)
+		return -EBUSY;
+	write_idx %= vcli->dump_bufs.buf_cnt;
+
+	dump_buf = &vcli->dump_bufs.bufs[write_idx];
+	meta = &vcli->dump_bufs_meta[write_idx];
+
+	errcode = kbase_hwcnt_virtualizer_client_dump(
+		vcli->hvcli, &ts_start_ns, &ts_end_ns, dump_buf);
+	if (errcode)
+		return errcode;
+
+	/* Patch the dump buf headers, to hide the counters that other hwcnt
+	 * clients are using.
+	 */
+	kbase_hwcnt_gpu_patch_dump_headers(dump_buf, &vcli->enable_map);
+
+	/* Zero all non-enabled counters (current values are undefined) */
+	kbase_hwcnt_dump_buffer_zero_non_enabled(dump_buf, &vcli->enable_map);
+
+	meta->timestamp = ts_end_ns;
+	meta->event_id = event_id;
+	meta->buffer_idx = write_idx;
+
+	/* Notify client. Make sure all changes to memory are visible. */
+	wmb();
+	atomic_inc(&vcli->write_idx);
+	wake_up_interruptible(&vcli->waitq);
+	return 0;
+}
+
+/**
+ * kbasep_vinstr_client_clear() - Reset all the client's counters to zero.
+ * @vcli: Non-NULL pointer to a vinstr client.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_vinstr_client_clear(struct kbase_vinstr_client *vcli)
+{
+	u64 ts_start_ns;
+	u64 ts_end_ns;
+
+	WARN_ON(!vcli);
+	lockdep_assert_held(&vcli->vctx->lock);
+
+	/* A virtualizer dump with a NULL buffer will just clear the virtualizer
+	 * client's buffer.
+	 */
+	return kbase_hwcnt_virtualizer_client_dump(
+		vcli->hvcli, &ts_start_ns, &ts_end_ns, NULL);
+}
+
+/**
+ * kbasep_vinstr_reschedule_worker() - Update next dump times for all periodic
+ *                                     vinstr clients, then reschedule the dump
+ *                                     worker appropriately.
+ * @vctx: Non-NULL pointer to the vinstr context.
+ *
+ * If there are no periodic clients, then the dump worker will not be
+ * rescheduled. Else, the dump worker will be rescheduled for the next periodic
+ * client dump.
+ */
+static void kbasep_vinstr_reschedule_worker(struct kbase_vinstr_context *vctx)
+{
+	u64 cur_ts_ns;
+	u64 earliest_next_ns = U64_MAX;
+	struct kbase_vinstr_client *pos;
+
+	WARN_ON(!vctx);
+	lockdep_assert_held(&vctx->lock);
+
+	cur_ts_ns = kbasep_vinstr_timestamp_ns();
+
+	/*
+	 * Update each client's next dump time, and find the earliest next
+	 * dump time if any of the clients have a non-zero interval.
+	 */
+	list_for_each_entry(pos, &vctx->clients, node) {
+		const u64 cli_next_ns =
+			kbasep_vinstr_next_dump_time_ns(
+				cur_ts_ns, pos->dump_interval_ns);
+
+		/* Non-zero next dump time implies a periodic client */
+		if ((cli_next_ns != 0) && (cli_next_ns < earliest_next_ns))
+			earliest_next_ns = cli_next_ns;
+
+		pos->next_dump_time_ns = cli_next_ns;
+	}
+
+	/* Cancel the timer if it is already pending */
+	hrtimer_cancel(&vctx->dump_timer);
+
+	/* Start the timer if there are periodic clients and vinstr is not
+	 * suspended.
+	 */
+	if ((earliest_next_ns != U64_MAX) &&
+	    (vctx->suspend_count == 0) &&
+	    !WARN_ON(earliest_next_ns < cur_ts_ns))
+		hrtimer_start(
+			&vctx->dump_timer,
+			ns_to_ktime(earliest_next_ns - cur_ts_ns),
+			HRTIMER_MODE_REL);
+}
+
+/**
+ * kbasep_vinstr_dump_worker()- Dump worker, that dumps all periodic clients
+ *                              that need to be dumped, then reschedules itself.
+ * @work: Work structure.
+ */
+static void kbasep_vinstr_dump_worker(struct work_struct *work)
+{
+	struct kbase_vinstr_context *vctx =
+		container_of(work, struct kbase_vinstr_context, dump_work);
+	struct kbase_vinstr_client *pos;
+	u64 cur_time_ns;
+
+	mutex_lock(&vctx->lock);
+
+	cur_time_ns = kbasep_vinstr_timestamp_ns();
+
+	/* Dump all periodic clients whose next dump time is before the current
+	 * time.
+	 */
+	list_for_each_entry(pos, &vctx->clients, node) {
+		if ((pos->next_dump_time_ns != 0) &&
+			(pos->next_dump_time_ns < cur_time_ns))
+			kbasep_vinstr_client_dump(
+				pos, BASE_HWCNT_READER_EVENT_PERIODIC);
+	}
+
+	/* Update the next dump times of all periodic clients, then reschedule
+	 * this worker at the earliest next dump time.
+	 */
+	kbasep_vinstr_reschedule_worker(vctx);
+
+	mutex_unlock(&vctx->lock);
+}
+
+/**
+ * kbasep_vinstr_dump_timer() - Dump timer that schedules the dump worker for
+ *                              execution as soon as possible.
+ * @timer: Timer structure.
+ */
+static enum hrtimer_restart kbasep_vinstr_dump_timer(struct hrtimer *timer)
+{
+	struct kbase_vinstr_context *vctx =
+		container_of(timer, struct kbase_vinstr_context, dump_timer);
+
+	/* We don't need to check vctx->suspend_count here, as the suspend
+	 * function will ensure that any worker enqueued here is immediately
+	 * cancelled, and the worker itself won't reschedule this timer if
+	 * suspend_count != 0.
+	 */
+#if KERNEL_VERSION(3, 16, 0) > LINUX_VERSION_CODE
+	queue_work(system_wq, &vctx->dump_work);
+#else
+	queue_work(system_highpri_wq, &vctx->dump_work);
+#endif
+	return HRTIMER_NORESTART;
+}
+
+/**
+ * kbasep_vinstr_client_destroy() - Destroy a vinstr client.
+ * @vcli: vinstr client. Must not be attached to a vinstr context.
+ */
+static void kbasep_vinstr_client_destroy(struct kbase_vinstr_client *vcli)
+{
+	if (!vcli)
+		return;
+
+	kbase_hwcnt_virtualizer_client_destroy(vcli->hvcli);
+	kfree(vcli->dump_bufs_meta);
+	kbase_hwcnt_dump_buffer_array_free(&vcli->dump_bufs);
+	kbase_hwcnt_enable_map_free(&vcli->enable_map);
+	kfree(vcli);
+}
+
+/**
+ * kbasep_vinstr_client_create() - Create a vinstr client. Does not attach to
+ *                                 the vinstr context.
+ * @vctx:     Non-NULL pointer to vinstr context.
+ * @setup:    Non-NULL pointer to hardware counter ioctl setup structure.
+ *            setup->buffer_count must not be 0.
+ * @out_vcli: Non-NULL pointer to where created client will be stored on
+ *            success.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_vinstr_client_create(
+	struct kbase_vinstr_context *vctx,
+	struct kbase_ioctl_hwcnt_reader_setup *setup,
+	struct kbase_vinstr_client **out_vcli)
+{
+	int errcode;
+	struct kbase_vinstr_client *vcli;
+	struct kbase_hwcnt_physical_enable_map phys_em;
+
+	WARN_ON(!vctx);
+	WARN_ON(!setup);
+	WARN_ON(setup->buffer_count == 0);
+
+	vcli = kzalloc(sizeof(*vcli), GFP_KERNEL);
+	if (!vcli)
+		return -ENOMEM;
+
+	vcli->vctx = vctx;
+
+	errcode = kbase_hwcnt_enable_map_alloc(
+		vctx->metadata, &vcli->enable_map);
+	if (errcode)
+		goto error;
+
+	phys_em.jm_bm = setup->jm_bm;
+	phys_em.shader_bm = setup->shader_bm;
+	phys_em.tiler_bm = setup->tiler_bm;
+	phys_em.mmu_l2_bm = setup->mmu_l2_bm;
+	kbase_hwcnt_gpu_enable_map_from_physical(&vcli->enable_map, &phys_em);
+
+	errcode = kbase_hwcnt_dump_buffer_array_alloc(
+		vctx->metadata, setup->buffer_count, &vcli->dump_bufs);
+	if (errcode)
+		goto error;
+
+	errcode = -ENOMEM;
+	vcli->dump_bufs_meta = kmalloc_array(
+		setup->buffer_count, sizeof(*vcli->dump_bufs_meta), GFP_KERNEL);
+	if (!vcli->dump_bufs_meta)
+		goto error;
+
+	errcode = kbase_hwcnt_virtualizer_client_create(
+		vctx->hvirt, &vcli->enable_map, &vcli->hvcli);
+	if (errcode)
+		goto error;
+
+	init_waitqueue_head(&vcli->waitq);
+
+	*out_vcli = vcli;
+	return 0;
+error:
+	kbasep_vinstr_client_destroy(vcli);
+	return errcode;
+}
+
+int kbase_vinstr_init(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	struct kbase_vinstr_context **out_vctx)
+{
+	struct kbase_vinstr_context *vctx;
+	const struct kbase_hwcnt_metadata *metadata;
+
+	if (!hvirt || !out_vctx)
+		return -EINVAL;
+
+	metadata = kbase_hwcnt_virtualizer_metadata(hvirt);
+	if (!metadata)
+		return -EINVAL;
+
+	vctx = kzalloc(sizeof(*vctx), GFP_KERNEL);
+	if (!vctx)
+		return -ENOMEM;
+
+	vctx->hvirt = hvirt;
+	vctx->metadata = metadata;
+
+	mutex_init(&vctx->lock);
+	INIT_LIST_HEAD(&vctx->clients);
+	hrtimer_init(&vctx->dump_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	vctx->dump_timer.function = kbasep_vinstr_dump_timer;
+	INIT_WORK(&vctx->dump_work, kbasep_vinstr_dump_worker);
+
+	*out_vctx = vctx;
+	return 0;
+}
+
+void kbase_vinstr_term(struct kbase_vinstr_context *vctx)
+{
+	if (!vctx)
+		return;
+
+	cancel_work_sync(&vctx->dump_work);
+
+	/* Non-zero client count implies client leak */
+	if (WARN_ON(vctx->client_count != 0)) {
+		struct kbase_vinstr_client *pos, *n;
+
+		list_for_each_entry_safe(pos, n, &vctx->clients, node) {
+			list_del(&pos->node);
+			vctx->client_count--;
+			kbasep_vinstr_client_destroy(pos);
+		}
+	}
+
+	WARN_ON(vctx->client_count != 0);
+	kfree(vctx);
+}
+
+void kbase_vinstr_suspend(struct kbase_vinstr_context *vctx)
+{
+	if (WARN_ON(!vctx))
+		return;
+
+	mutex_lock(&vctx->lock);
+
+	if (!WARN_ON(vctx->suspend_count == SIZE_MAX))
+		vctx->suspend_count++;
+
+	mutex_unlock(&vctx->lock);
+
+	/* Always sync cancel the timer and then the worker, regardless of the
+	 * new suspend count.
+	 *
+	 * This ensures concurrent calls to kbase_vinstr_suspend() always block
+	 * until vinstr is fully suspended.
+	 *
+	 * The timer is cancelled before the worker, as the timer
+	 * unconditionally re-enqueues the worker, but the worker checks the
+	 * suspend_count that we just incremented before rescheduling the timer.
+	 *
+	 * Therefore if we cancel the worker first, the timer might re-enqueue
+	 * the worker before we cancel the timer, but the opposite is not
+	 * possible.
+	 */
+	hrtimer_cancel(&vctx->dump_timer);
+	cancel_work_sync(&vctx->dump_work);
+}
+
+void kbase_vinstr_resume(struct kbase_vinstr_context *vctx)
+{
+	if (WARN_ON(!vctx))
+		return;
+
+	mutex_lock(&vctx->lock);
+
+	if (!WARN_ON(vctx->suspend_count == 0)) {
+		vctx->suspend_count--;
+
+		/* Last resume, so re-enqueue the worker if we have any periodic
+		 * clients.
+		 */
+		if (vctx->suspend_count == 0) {
+			struct kbase_vinstr_client *pos;
+			bool has_periodic_clients = false;
+
+			list_for_each_entry(pos, &vctx->clients, node) {
+				if (pos->dump_interval_ns != 0) {
+					has_periodic_clients = true;
+					break;
+				}
+			}
+
+			if (has_periodic_clients)
+#if KERNEL_VERSION(3, 16, 0) > LINUX_VERSION_CODE
+				queue_work(system_wq, &vctx->dump_work);
+#else
+				queue_work(system_highpri_wq, &vctx->dump_work);
+#endif
+		}
+	}
+
+	mutex_unlock(&vctx->lock);
+}
+
+int kbase_vinstr_hwcnt_reader_setup(
+	struct kbase_vinstr_context *vctx,
+	struct kbase_ioctl_hwcnt_reader_setup *setup)
+{
+	int errcode;
+	int fd;
+	struct kbase_vinstr_client *vcli = NULL;
+
+	if (!vctx || !setup ||
+	    (setup->buffer_count == 0) ||
+	    (setup->buffer_count > MAX_BUFFER_COUNT))
+		return -EINVAL;
+
+	errcode = kbasep_vinstr_client_create(vctx, setup, &vcli);
+	if (errcode)
+		goto error;
+
+	errcode = anon_inode_getfd(
+		"[mali_vinstr_desc]",
+		&vinstr_client_fops,
+		vcli,
+		O_RDONLY | O_CLOEXEC);
+	if (errcode < 0)
+		goto error;
+
+	fd = errcode;
+
+	/* Add the new client. No need to reschedule worker, as not periodic */
+	mutex_lock(&vctx->lock);
+
+	vctx->client_count++;
+	list_add(&vcli->node, &vctx->clients);
+
+	mutex_unlock(&vctx->lock);
+
+	return fd;
+error:
+	kbasep_vinstr_client_destroy(vcli);
+	return errcode;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_buffer_ready() - Check if client has ready
+ *                                             buffers.
+ * @cli: Non-NULL pointer to vinstr client.
+ *
+ * Return: Non-zero if client has at least one dumping buffer filled that was
+ *         not notified to user yet.
+ */
+static int kbasep_vinstr_hwcnt_reader_buffer_ready(
+	struct kbase_vinstr_client *cli)
+{
+	WARN_ON(!cli);
+	return atomic_read(&cli->write_idx) != atomic_read(&cli->meta_idx);
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_dump() - Dump ioctl command.
+ * @cli: Non-NULL pointer to vinstr client.
+ *
+ * Return: 0 on success, else error code.
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_dump(
+	struct kbase_vinstr_client *cli)
+{
+	int errcode;
+
+	mutex_lock(&cli->vctx->lock);
+
+	errcode = kbasep_vinstr_client_dump(
+		cli, BASE_HWCNT_READER_EVENT_MANUAL);
+
+	mutex_unlock(&cli->vctx->lock);
+	return errcode;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_clear() - Clear ioctl command.
+ * @cli: Non-NULL pointer to vinstr client.
+ *
+ * Return: 0 on success, else error code.
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_clear(
+	struct kbase_vinstr_client *cli)
+{
+	int errcode;
+
+	mutex_lock(&cli->vctx->lock);
+
+	errcode = kbasep_vinstr_client_clear(cli);
+
+	mutex_unlock(&cli->vctx->lock);
+	return errcode;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_get_buffer() - Get buffer ioctl command.
+ * @cli:    Non-NULL pointer to vinstr client.
+ * @buffer: Non-NULL pointer to userspace buffer.
+ * @size:   Size of buffer.
+ *
+ * Return: 0 on success, else error code.
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_get_buffer(
+	struct kbase_vinstr_client *cli,
+	void __user *buffer,
+	size_t size)
+{
+	unsigned int meta_idx = atomic_read(&cli->meta_idx);
+	unsigned int idx = meta_idx % cli->dump_bufs.buf_cnt;
+
+	struct kbase_hwcnt_reader_metadata *meta = &cli->dump_bufs_meta[idx];
+
+	/* Metadata sanity check. */
+	WARN_ON(idx != meta->buffer_idx);
+
+	if (sizeof(struct kbase_hwcnt_reader_metadata) != size)
+		return -EINVAL;
+
+	/* Check if there is any buffer available. */
+	if (atomic_read(&cli->write_idx) == meta_idx)
+		return -EAGAIN;
+
+	/* Check if previously taken buffer was put back. */
+	if (atomic_read(&cli->read_idx) != meta_idx)
+		return -EBUSY;
+
+	/* Copy next available buffer's metadata to user. */
+	if (copy_to_user(buffer, meta, size))
+		return -EFAULT;
+
+	atomic_inc(&cli->meta_idx);
+
+	return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_put_buffer() - Put buffer ioctl command.
+ * @cli:    Non-NULL pointer to vinstr client.
+ * @buffer: Non-NULL pointer to userspace buffer.
+ * @size:   Size of buffer.
+ *
+ * Return: 0 on success, else error code.
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_put_buffer(
+	struct kbase_vinstr_client *cli,
+	void __user *buffer,
+	size_t size)
+{
+	unsigned int read_idx = atomic_read(&cli->read_idx);
+	unsigned int idx = read_idx % cli->dump_bufs.buf_cnt;
+
+	struct kbase_hwcnt_reader_metadata meta;
+
+	if (sizeof(struct kbase_hwcnt_reader_metadata) != size)
+		return -EINVAL;
+
+	/* Check if any buffer was taken. */
+	if (atomic_read(&cli->meta_idx) == read_idx)
+		return -EPERM;
+
+	/* Check if correct buffer is put back. */
+	if (copy_from_user(&meta, buffer, size))
+		return -EFAULT;
+	if (idx != meta.buffer_idx)
+		return -EINVAL;
+
+	atomic_inc(&cli->read_idx);
+
+	return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_set_interval() - Set interval ioctl command.
+ * @cli:      Non-NULL pointer to vinstr client.
+ * @interval: Periodic dumping interval (disable periodic dumping if 0).
+ *
+ * Return: 0 always.
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_set_interval(
+	struct kbase_vinstr_client *cli,
+	u32 interval)
+{
+	mutex_lock(&cli->vctx->lock);
+
+	if ((interval != 0) && (interval < DUMP_INTERVAL_MIN_NS))
+		interval = DUMP_INTERVAL_MIN_NS;
+	/* Update the interval, and put in a dummy next dump time */
+	cli->dump_interval_ns = interval;
+	cli->next_dump_time_ns = 0;
+
+	/*
+	 * If it's a periodic client, kick off the worker early to do a proper
+	 * timer reschedule. Return value is ignored, as we don't care if the
+	 * worker is already queued.
+	 */
+	if ((interval != 0) && (cli->vctx->suspend_count == 0))
+#if KERNEL_VERSION(3, 16, 0) > LINUX_VERSION_CODE
+		queue_work(system_wq, &cli->vctx->dump_work);
+#else
+		queue_work(system_highpri_wq, &cli->vctx->dump_work);
+#endif
+
+	mutex_unlock(&cli->vctx->lock);
+
+	return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_enable_event() - Enable event ioctl command.
+ * @cli:      Non-NULL pointer to vinstr client.
+ * @event_id: ID of event to enable.
+ *
+ * Return: 0 always.
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_enable_event(
+		struct kbase_vinstr_client *cli,
+		enum base_hwcnt_reader_event event_id)
+{
+	/* No-op, as events aren't supported */
+	return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_disable_event() - Disable event ioctl
+ *                                                    command.
+ * @cli:      Non-NULL pointer to vinstr client.
+ * @event_id: ID of event to disable.
+ *
+ * Return: 0 always.
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_disable_event(
+	struct kbase_vinstr_client *cli,
+	enum base_hwcnt_reader_event event_id)
+{
+	/* No-op, as events aren't supported */
+	return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_get_hwver() - Get HW version ioctl command.
+ * @cli:   Non-NULL pointer to vinstr client.
+ * @hwver: Non-NULL pointer to user buffer where HW version will be stored.
+ *
+ * Return: 0 on success, else error code.
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_get_hwver(
+	struct kbase_vinstr_client *cli,
+	u32 __user *hwver)
+{
+	u32 ver = 0;
+	const enum kbase_hwcnt_gpu_group_type type =
+		kbase_hwcnt_metadata_group_type(cli->vctx->metadata, 0);
+
+	switch (type) {
+	case KBASE_HWCNT_GPU_GROUP_TYPE_V4:
+		ver = 4;
+		break;
+	case KBASE_HWCNT_GPU_GROUP_TYPE_V5:
+		ver = 5;
+		break;
+	default:
+		WARN_ON(true);
+	}
+
+	if (ver != 0) {
+		return put_user(ver, hwver);
+	} else {
+		return -EINVAL;
+	}
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl() - hwcnt reader's ioctl.
+ * @filp:   Non-NULL pointer to file structure.
+ * @cmd:    User command.
+ * @arg:    Command's argument.
+ *
+ * Return: 0 on success, else error code.
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl(
+	struct file *filp,
+	unsigned int cmd,
+	unsigned long arg)
+{
+	long rcode;
+	struct kbase_vinstr_client *cli;
+
+	if (!filp || (_IOC_TYPE(cmd) != KBASE_HWCNT_READER))
+		return -EINVAL;
+
+	cli = filp->private_data;
+	if (!cli)
+		return -EINVAL;
+
+	switch (cmd) {
+	case KBASE_HWCNT_READER_GET_API_VERSION:
+		rcode = put_user(HWCNT_READER_API, (u32 __user *)arg);
+		break;
+	case KBASE_HWCNT_READER_GET_HWVER:
+		rcode = kbasep_vinstr_hwcnt_reader_ioctl_get_hwver(
+			cli, (u32 __user *)arg);
+		break;
+	case KBASE_HWCNT_READER_GET_BUFFER_SIZE:
+		rcode = put_user(
+			(u32)cli->vctx->metadata->dump_buf_bytes,
+			(u32 __user *)arg);
+		break;
+	case KBASE_HWCNT_READER_DUMP:
+		rcode = kbasep_vinstr_hwcnt_reader_ioctl_dump(cli);
+		break;
+	case KBASE_HWCNT_READER_CLEAR:
+		rcode = kbasep_vinstr_hwcnt_reader_ioctl_clear(cli);
+		break;
+	case KBASE_HWCNT_READER_GET_BUFFER:
+		rcode = kbasep_vinstr_hwcnt_reader_ioctl_get_buffer(
+			cli, (void __user *)arg, _IOC_SIZE(cmd));
+		break;
+	case KBASE_HWCNT_READER_PUT_BUFFER:
+		rcode = kbasep_vinstr_hwcnt_reader_ioctl_put_buffer(
+			cli, (void __user *)arg, _IOC_SIZE(cmd));
+		break;
+	case KBASE_HWCNT_READER_SET_INTERVAL:
+		rcode = kbasep_vinstr_hwcnt_reader_ioctl_set_interval(
+			cli, (u32)arg);
+		break;
+	case KBASE_HWCNT_READER_ENABLE_EVENT:
+		rcode = kbasep_vinstr_hwcnt_reader_ioctl_enable_event(
+			cli, (enum base_hwcnt_reader_event)arg);
+		break;
+	case KBASE_HWCNT_READER_DISABLE_EVENT:
+		rcode = kbasep_vinstr_hwcnt_reader_ioctl_disable_event(
+			cli, (enum base_hwcnt_reader_event)arg);
+		break;
+	default:
+		WARN_ON(true);
+		rcode = -EINVAL;
+		break;
+	}
+
+	return rcode;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_poll() - hwcnt reader's poll.
+ * @filp: Non-NULL pointer to file structure.
+ * @wait: Non-NULL pointer to poll table.
+ *
+ * Return: POLLIN if data can be read without blocking, 0 if data can not be
+ *         read without blocking, else error code.
+ */
+static unsigned int kbasep_vinstr_hwcnt_reader_poll(
+	struct file *filp,
+	poll_table *wait)
+{
+	struct kbase_vinstr_client *cli;
+
+	if (!filp || !wait)
+		return -EINVAL;
+
+	cli = filp->private_data;
+	if (!cli)
+		return -EINVAL;
+
+	poll_wait(filp, &cli->waitq, wait);
+	if (kbasep_vinstr_hwcnt_reader_buffer_ready(cli))
+		return POLLIN;
+	return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_mmap() - hwcnt reader's mmap.
+ * @filp: Non-NULL pointer to file structure.
+ * @vma:  Non-NULL pointer to vma structure.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_vinstr_hwcnt_reader_mmap(
+	struct file *filp,
+	struct vm_area_struct *vma)
+{
+	struct kbase_vinstr_client *cli;
+	unsigned long vm_size, size, addr, pfn, offset;
+
+	if (!filp || !vma)
+		return -EINVAL;
+
+	cli = filp->private_data;
+	if (!cli)
+		return -EINVAL;
+
+	vm_size = vma->vm_end - vma->vm_start;
+	size = cli->dump_bufs.buf_cnt * cli->vctx->metadata->dump_buf_bytes;
+
+	if (vma->vm_pgoff > (size >> PAGE_SHIFT))
+		return -EINVAL;
+
+	offset = vma->vm_pgoff << PAGE_SHIFT;
+	if (vm_size > size - offset)
+		return -EINVAL;
+
+	addr = __pa(cli->dump_bufs.page_addr + offset);
+	pfn = addr >> PAGE_SHIFT;
+
+	return remap_pfn_range(
+		vma, vma->vm_start, pfn, vm_size, vma->vm_page_prot);
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_release() - hwcnt reader's release.
+ * @inode: Non-NULL pointer to inode structure.
+ * @filp:  Non-NULL pointer to file structure.
+ *
+ * Return: 0 always.
+ */
+static int kbasep_vinstr_hwcnt_reader_release(struct inode *inode,
+	struct file *filp)
+{
+	struct kbase_vinstr_client *vcli = filp->private_data;
+
+	mutex_lock(&vcli->vctx->lock);
+
+	vcli->vctx->client_count--;
+	list_del(&vcli->node);
+
+	mutex_unlock(&vcli->vctx->lock);
+
+	kbasep_vinstr_client_destroy(vcli);
+
+	return 0;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_vinstr.h b/drivers/gpu/arm/midgard/mali_kbase_vinstr.h
new file mode 100644
index 0000000..81d315f
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_vinstr.h
@@ -0,0 +1,91 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Vinstr, used to provide an ioctl for userspace access to periodic hardware
+ * counters.
+ */
+
+#ifndef _KBASE_VINSTR_H_
+#define _KBASE_VINSTR_H_
+
+struct kbase_vinstr_context;
+struct kbase_hwcnt_virtualizer;
+struct kbase_ioctl_hwcnt_reader_setup;
+
+/**
+ * kbase_vinstr_init() - Initialise a vinstr context.
+ * @hvirt:    Non-NULL pointer to the hardware counter virtualizer.
+ * @out_vctx: Non-NULL pointer to where the pointer to the created vinstr
+ *            context will be stored on success.
+ *
+ * On creation, the suspend count of the context will be 0.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_vinstr_init(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	struct kbase_vinstr_context **out_vctx);
+
+/**
+ * kbase_vinstr_term() - Terminate a vinstr context.
+ * @vctx: Pointer to the vinstr context to be terminated.
+ */
+void kbase_vinstr_term(struct kbase_vinstr_context *vctx);
+
+/**
+ * kbase_vinstr_suspend() - Increment the suspend count of the context.
+ * @vctx: Non-NULL pointer to the vinstr context to be suspended.
+ *
+ * After this function call returns, it is guaranteed that all timers and
+ * workers in vinstr will be cancelled, and will not be re-triggered until
+ * after the context has been resumed. In effect, this means no new counter
+ * dumps will occur for any existing or subsequently added periodic clients.
+ */
+void kbase_vinstr_suspend(struct kbase_vinstr_context *vctx);
+
+/**
+ * kbase_vinstr_resume() - Decrement the suspend count of the context.
+ * @vctx: Non-NULL pointer to the vinstr context to be resumed.
+ *
+ * If a call to this function decrements the suspend count from 1 to 0, then
+ * normal operation of vinstr will be resumed (i.e. counter dumps will once
+ * again be automatically triggered for all periodic clients).
+ *
+ * It is only valid to call this function one time for each prior returned call
+ * to kbase_vinstr_suspend.
+ */
+void kbase_vinstr_resume(struct kbase_vinstr_context *vctx);
+
+/**
+ * kbase_vinstr_hwcnt_reader_setup() - Set up a new hardware counter reader
+ *                                     client.
+ * @vinstr_ctx: Non-NULL pointer to the vinstr context.
+ * @setup:      Non-NULL pointer to the hwcnt reader configuration.
+ *
+ * Return: file descriptor on success, else a (negative) error code.
+ */
+int kbase_vinstr_hwcnt_reader_setup(
+	struct kbase_vinstr_context *vinstr_ctx,
+	struct kbase_ioctl_hwcnt_reader_setup *setup);
+
+#endif /* _KBASE_VINSTR_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_linux_kbase_trace.h b/drivers/gpu/arm/midgard/mali_linux_kbase_trace.h
new file mode 100644
index 0000000..6c6a8c6
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_linux_kbase_trace.h
@@ -0,0 +1,204 @@
+/*
+ *
+ * (C) COPYRIGHT 2014,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+#if !defined(_TRACE_MALI_KBASE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MALI_KBASE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mali
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(mali_slot_template,
+	TP_PROTO(int jobslot, unsigned int info_val),
+	TP_ARGS(jobslot, info_val),
+	TP_STRUCT__entry(
+		__field(unsigned int, jobslot)
+		__field(unsigned int, info_val)
+	),
+	TP_fast_assign(
+		__entry->jobslot = jobslot;
+		__entry->info_val = info_val;
+	),
+	TP_printk("jobslot=%u info=%u", __entry->jobslot, __entry->info_val)
+);
+
+#define DEFINE_MALI_SLOT_EVENT(name) \
+DEFINE_EVENT(mali_slot_template, mali_##name, \
+	TP_PROTO(int jobslot, unsigned int info_val), \
+	TP_ARGS(jobslot, info_val))
+DEFINE_MALI_SLOT_EVENT(JM_SUBMIT);
+DEFINE_MALI_SLOT_EVENT(JM_JOB_DONE);
+DEFINE_MALI_SLOT_EVENT(JM_UPDATE_HEAD);
+DEFINE_MALI_SLOT_EVENT(JM_CHECK_HEAD);
+DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP);
+DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP_0);
+DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP_1);
+DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP);
+DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP_0);
+DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP_1);
+DEFINE_MALI_SLOT_EVENT(JM_SLOT_SOFT_OR_HARD_STOP);
+DEFINE_MALI_SLOT_EVENT(JM_SLOT_EVICT);
+DEFINE_MALI_SLOT_EVENT(JM_BEGIN_RESET_WORKER);
+DEFINE_MALI_SLOT_EVENT(JM_END_RESET_WORKER);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REGISTER_ON_RECHECK_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_AFFINITY_SUBMIT_TO_BLOCKED);
+DEFINE_MALI_SLOT_EVENT(JS_AFFINITY_CURRENT);
+DEFINE_MALI_SLOT_EVENT(JD_DONE_TRY_RUN_NEXT_JOB);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REQUEST_CORES_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REGISTER_INUSE_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REQUEST_ON_RECHECK_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_AFFINITY_WOULD_VIOLATE);
+DEFINE_MALI_SLOT_EVENT(JS_JOB_DONE_TRY_RUN_NEXT_JOB);
+DEFINE_MALI_SLOT_EVENT(JS_JOB_DONE_RETRY_NEEDED);
+DEFINE_MALI_SLOT_EVENT(JS_POLICY_DEQUEUE_JOB);
+DEFINE_MALI_SLOT_EVENT(JS_POLICY_DEQUEUE_JOB_IRQ);
+#undef DEFINE_MALI_SLOT_EVENT
+
+DECLARE_EVENT_CLASS(mali_refcount_template,
+	TP_PROTO(int refcount, unsigned int info_val),
+	TP_ARGS(refcount, info_val),
+	TP_STRUCT__entry(
+		__field(unsigned int, refcount)
+		__field(unsigned int, info_val)
+	),
+	TP_fast_assign(
+		__entry->refcount = refcount;
+		__entry->info_val = info_val;
+	),
+	TP_printk("refcount=%u info=%u", __entry->refcount, __entry->info_val)
+);
+
+#define DEFINE_MALI_REFCOUNT_EVENT(name) \
+DEFINE_EVENT(mali_refcount_template, mali_##name, \
+	TP_PROTO(int refcount, unsigned int info_val), \
+	TP_ARGS(refcount, info_val))
+DEFINE_MALI_REFCOUNT_EVENT(JS_RETAIN_CTX_NOLOCK);
+DEFINE_MALI_REFCOUNT_EVENT(JS_ADD_JOB);
+DEFINE_MALI_REFCOUNT_EVENT(JS_REMOVE_JOB);
+DEFINE_MALI_REFCOUNT_EVENT(JS_RETAIN_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_RELEASE_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_TRY_SCHEDULE_HEAD_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_INIT_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_TERM_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_ENQUEUE_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_DEQUEUE_HEAD_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_TRY_EVICT_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_RUNPOOL_ADD_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_RUNPOOL_REMOVE_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_FOREACH_CTX_JOBS);
+DEFINE_MALI_REFCOUNT_EVENT(PM_CONTEXT_ACTIVE);
+DEFINE_MALI_REFCOUNT_EVENT(PM_CONTEXT_IDLE);
+#undef DEFINE_MALI_REFCOUNT_EVENT
+
+DECLARE_EVENT_CLASS(mali_add_template,
+	TP_PROTO(int gpu_addr, unsigned int info_val),
+	TP_ARGS(gpu_addr, info_val),
+	TP_STRUCT__entry(
+		__field(unsigned int, gpu_addr)
+		__field(unsigned int, info_val)
+	),
+	TP_fast_assign(
+		__entry->gpu_addr = gpu_addr;
+		__entry->info_val = info_val;
+	),
+	TP_printk("gpu_addr=%u info=%u", __entry->gpu_addr, __entry->info_val)
+);
+
+#define DEFINE_MALI_ADD_EVENT(name) \
+DEFINE_EVENT(mali_add_template, mali_##name, \
+	TP_PROTO(int gpu_addr, unsigned int info_val), \
+	TP_ARGS(gpu_addr, info_val))
+DEFINE_MALI_ADD_EVENT(CORE_CTX_DESTROY);
+DEFINE_MALI_ADD_EVENT(CORE_CTX_HWINSTR_TERM);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ_CLEAR);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ_DONE);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_SOFT_RESET);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_HARD_RESET);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_PRFCNT_SAMPLE);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_PRFCNT_CLEAR);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_CLEAN_INV_CACHES);
+DEFINE_MALI_ADD_EVENT(JD_DONE_WORKER);
+DEFINE_MALI_ADD_EVENT(JD_DONE_WORKER_END);
+DEFINE_MALI_ADD_EVENT(JD_CANCEL_WORKER);
+DEFINE_MALI_ADD_EVENT(JD_DONE);
+DEFINE_MALI_ADD_EVENT(JD_CANCEL);
+DEFINE_MALI_ADD_EVENT(JD_ZAP_CONTEXT);
+DEFINE_MALI_ADD_EVENT(JM_IRQ);
+DEFINE_MALI_ADD_EVENT(JM_IRQ_END);
+DEFINE_MALI_ADD_EVENT(JM_FLUSH_WORKQS);
+DEFINE_MALI_ADD_EVENT(JM_FLUSH_WORKQS_DONE);
+DEFINE_MALI_ADD_EVENT(JM_ZAP_NON_SCHEDULED);
+DEFINE_MALI_ADD_EVENT(JM_ZAP_SCHEDULED);
+DEFINE_MALI_ADD_EVENT(JM_ZAP_DONE);
+DEFINE_MALI_ADD_EVENT(JM_SUBMIT_AFTER_RESET);
+DEFINE_MALI_ADD_EVENT(JM_JOB_COMPLETE);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_ON_RUNPOOL);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_OFF_RUNPOOL);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_ON_CTX);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_OFF_CTX);
+DEFINE_MALI_ADD_EVENT(JS_POLICY_TIMER_END);
+DEFINE_MALI_ADD_EVENT(JS_POLICY_TIMER_START);
+DEFINE_MALI_ADD_EVENT(JS_POLICY_ENQUEUE_JOB);
+DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_DESIRED);
+DEFINE_MALI_ADD_EVENT(PM_JOB_SUBMIT_AFTER_POWERING_UP);
+DEFINE_MALI_ADD_EVENT(PM_JOB_SUBMIT_AFTER_POWERED_UP);
+DEFINE_MALI_ADD_EVENT(PM_PWRON);
+DEFINE_MALI_ADD_EVENT(PM_PWRON_TILER);
+DEFINE_MALI_ADD_EVENT(PM_PWRON_L2);
+DEFINE_MALI_ADD_EVENT(PM_PWROFF);
+DEFINE_MALI_ADD_EVENT(PM_PWROFF_TILER);
+DEFINE_MALI_ADD_EVENT(PM_PWROFF_L2);
+DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED);
+DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED_TILER);
+DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED_L2);
+DEFINE_MALI_ADD_EVENT(PM_DESIRED_REACHED);
+DEFINE_MALI_ADD_EVENT(PM_DESIRED_REACHED_TILER);
+DEFINE_MALI_ADD_EVENT(PM_REQUEST_CHANGE_SHADER_NEEDED);
+DEFINE_MALI_ADD_EVENT(PM_REQUEST_CHANGE_TILER_NEEDED);
+DEFINE_MALI_ADD_EVENT(PM_RELEASE_CHANGE_SHADER_NEEDED);
+DEFINE_MALI_ADD_EVENT(PM_RELEASE_CHANGE_TILER_NEEDED);
+DEFINE_MALI_ADD_EVENT(PM_CORES_AVAILABLE);
+DEFINE_MALI_ADD_EVENT(PM_CORES_AVAILABLE_TILER);
+DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_AVAILABLE);
+DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_AVAILABLE_TILER);
+DEFINE_MALI_ADD_EVENT(PM_GPU_ON);
+DEFINE_MALI_ADD_EVENT(PM_GPU_OFF);
+DEFINE_MALI_ADD_EVENT(PM_SET_POLICY);
+DEFINE_MALI_ADD_EVENT(PM_CURRENT_POLICY_INIT);
+DEFINE_MALI_ADD_EVENT(PM_CURRENT_POLICY_TERM);
+DEFINE_MALI_ADD_EVENT(PM_CA_SET_POLICY);
+DEFINE_MALI_ADD_EVENT(PM_WAKE_WAITERS);
+#undef DEFINE_MALI_ADD_EVENT
+
+#endif /* _TRACE_MALI_KBASE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef linux
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mali_linux_kbase_trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/arm/midgard/mali_linux_trace.h b/drivers/gpu/arm/midgard/mali_linux_trace.h
new file mode 100644
index 0000000..96296ac
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_linux_trace.h
@@ -0,0 +1,138 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#if !defined(_TRACE_MALI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MALI_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mali
+#define TRACE_INCLUDE_FILE mali_linux_trace
+
+#include <linux/tracepoint.h>
+
+#define MALI_JOB_SLOTS_EVENT_CHANGED
+
+/**
+ * mali_job_slots_event - Reports change of job slot status.
+ * @gpu_id:   Kbase device id
+ * @event_id: ORed together bitfields representing a type of event,
+ *            made with the GATOR_MAKE_EVENT() macro.
+ */
+TRACE_EVENT(mali_job_slots_event,
+	TP_PROTO(u32 gpu_id, u32 event_id, u32 tgid, u32 pid,
+		u8 job_id),
+	TP_ARGS(gpu_id, event_id, tgid, pid, job_id),
+	TP_STRUCT__entry(
+		__field(u32, gpu_id)
+		__field(u32, event_id)
+		__field(u32, tgid)
+		__field(u32, pid)
+		__field(u8,  job_id)
+	),
+	TP_fast_assign(
+		__entry->gpu_id   = gpu_id;
+		__entry->event_id = event_id;
+		__entry->tgid     = tgid;
+		__entry->pid      = pid;
+		__entry->job_id   = job_id;
+	),
+	TP_printk("gpu=%u event=%u tgid=%u pid=%u job_id=%u",
+		__entry->gpu_id, __entry->event_id,
+		__entry->tgid, __entry->pid, __entry->job_id)
+);
+
+/**
+ * mali_pm_status - Reports change of power management status.
+ * @gpu_id:   Kbase device id
+ * @event_id: Core type (shader, tiler, L2 cache)
+ * @value:    64bits bitmask reporting either power status of
+ *            the cores (1-ON, 0-OFF)
+ */
+TRACE_EVENT(mali_pm_status,
+	TP_PROTO(u32 gpu_id, u32 event_id, u64 value),
+	TP_ARGS(gpu_id, event_id, value),
+	TP_STRUCT__entry(
+		__field(u32, gpu_id)
+		__field(u32, event_id)
+		__field(u64, value)
+	),
+	TP_fast_assign(
+		__entry->gpu_id   = gpu_id;
+		__entry->event_id = event_id;
+		__entry->value    = value;
+	),
+	TP_printk("gpu=%u event %u = %llu",
+		__entry->gpu_id, __entry->event_id, __entry->value)
+);
+
+/**
+ * mali_page_fault_insert_pages - Reports an MMU page fault
+ * resulting in new pages being mapped.
+ * @gpu_id:   Kbase device id
+ * @event_id: MMU address space number
+ * @value:    Number of newly allocated pages
+ */
+TRACE_EVENT(mali_page_fault_insert_pages,
+	TP_PROTO(u32 gpu_id, s32 event_id, u64 value),
+	TP_ARGS(gpu_id, event_id, value),
+	TP_STRUCT__entry(
+		__field(u32, gpu_id)
+		__field(s32, event_id)
+		__field(u64, value)
+	),
+	TP_fast_assign(
+		__entry->gpu_id   = gpu_id;
+		__entry->event_id = event_id;
+		__entry->value    = value;
+	),
+	TP_printk("gpu=%u event %d = %llu",
+		__entry->gpu_id, __entry->event_id, __entry->value)
+);
+
+/**
+ * mali_total_alloc_pages_change - Reports that the total number of
+ * allocated pages has changed.
+ * @gpu_id:   Kbase device id
+ * @event_id: Total number of pages allocated
+ */
+TRACE_EVENT(mali_total_alloc_pages_change,
+	TP_PROTO(u32 gpu_id, s64 event_id),
+	TP_ARGS(gpu_id, event_id),
+	TP_STRUCT__entry(
+		__field(u32, gpu_id)
+		__field(s64, event_id)
+	),
+	TP_fast_assign(
+		__entry->gpu_id   = gpu_id;
+		__entry->event_id = event_id;
+	),
+	TP_printk("gpu=%u event=%lld", __entry->gpu_id, __entry->event_id)
+);
+
+#endif /* _TRACE_MALI_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef linux
+#define TRACE_INCLUDE_PATH .
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/arm/midgard/mali_malisw.h b/drivers/gpu/arm/midgard/mali_malisw.h
new file mode 100644
index 0000000..3a4db10
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_malisw.h
@@ -0,0 +1,109 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Kernel-wide include for common macros and types.
+ */
+
+#ifndef _MALISW_H_
+#define _MALISW_H_
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+#define U8_MAX          ((u8)~0U)
+#define S8_MAX          ((s8)(U8_MAX>>1))
+#define S8_MIN          ((s8)(-S8_MAX - 1))
+#define U16_MAX         ((u16)~0U)
+#define S16_MAX         ((s16)(U16_MAX>>1))
+#define S16_MIN         ((s16)(-S16_MAX - 1))
+#define U32_MAX         ((u32)~0U)
+#define S32_MAX         ((s32)(U32_MAX>>1))
+#define S32_MIN         ((s32)(-S32_MAX - 1))
+#define U64_MAX         ((u64)~0ULL)
+#define S64_MAX         ((s64)(U64_MAX>>1))
+#define S64_MIN         ((s64)(-S64_MAX - 1))
+#endif /* LINUX_VERSION_CODE */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+#define SIZE_MAX        (~(size_t)0)
+#endif /* LINUX_VERSION_CODE */
+
+/**
+ * MIN - Return the lesser of two values.
+ *
+ * As a macro it may evaluate its arguments more than once.
+ * Refer to MAX macro for more details
+ */
+#define MIN(x, y)	((x) < (y) ? (x) : (y))
+
+/**
+ * MAX -  Return the greater of two values.
+ *
+ * As a macro it may evaluate its arguments more than once.
+ * If called on the same two arguments as MIN it is guaranteed to return
+ * the one that MIN didn't return. This is significant for types where not
+ * all values are comparable e.g. NaNs in floating-point types. But if you want
+ * to retrieve the min and max of two values, consider using a conditional swap
+ * instead.
+ */
+#define MAX(x, y)	((x) < (y) ? (y) : (x))
+
+/**
+ * @hideinitializer
+ * Function-like macro for suppressing unused variable warnings. Where possible
+ * such variables should be removed; this macro is present for cases where we
+ * much support API backwards compatibility.
+ */
+#define CSTD_UNUSED(x)	((void)(x))
+
+/**
+ * @hideinitializer
+ * Function-like macro for use where "no behavior" is desired. This is useful
+ * when compile time macros turn a function-like macro in to a no-op, but
+ * where having no statement is otherwise invalid.
+ */
+#define CSTD_NOP(...)	((void)#__VA_ARGS__)
+
+/**
+ * @hideinitializer
+ * Function-like macro for stringizing a single level macro.
+ * @code
+ * #define MY_MACRO 32
+ * CSTD_STR1( MY_MACRO )
+ * > "MY_MACRO"
+ * @endcode
+ */
+#define CSTD_STR1(x)	#x
+
+/**
+ * @hideinitializer
+ * Function-like macro for stringizing a macro's value. This should not be used
+ * if the macro is defined in a way which may have no value; use the
+ * alternative @c CSTD_STR2N macro should be used instead.
+ * @code
+ * #define MY_MACRO 32
+ * CSTD_STR2( MY_MACRO )
+ * > "32"
+ * @endcode
+ */
+#define CSTD_STR2(x)	CSTD_STR1(x)
+
+#endif /* _MALISW_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_midg_coherency.h b/drivers/gpu/arm/midgard/mali_midg_coherency.h
new file mode 100644
index 0000000..29d5df3
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_midg_coherency.h
@@ -0,0 +1,31 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _MIDG_COHERENCY_H_
+#define _MIDG_COHERENCY_H_
+
+#define COHERENCY_ACE_LITE 0
+#define COHERENCY_ACE      1
+#define COHERENCY_NONE     31
+#define COHERENCY_FEATURE_BIT(x) (1 << (x))
+
+#endif /* _MIDG_COHERENCY_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_midg_regmap.h b/drivers/gpu/arm/midgard/mali_midg_regmap.h
new file mode 100644
index 0000000..f0ec391
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_midg_regmap.h
@@ -0,0 +1,449 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _MIDG_REGMAP_H_
+#define _MIDG_REGMAP_H_
+
+#include "mali_midg_coherency.h"
+#include "mali_kbase_gpu_id.h"
+#include "mali_midg_regmap_jm.h"
+
+/* Begin Register Offsets */
+/* GPU control registers */
+
+#define GPU_CONTROL_BASE        0x0000
+#define GPU_CONTROL_REG(r)      (GPU_CONTROL_BASE + (r))
+#define GPU_ID                  0x000   /* (RO) GPU and revision identifier */
+#define L2_FEATURES             0x004   /* (RO) Level 2 cache features */
+#define TILER_FEATURES          0x00C   /* (RO) Tiler Features */
+#define MEM_FEATURES            0x010   /* (RO) Memory system features */
+#define MMU_FEATURES            0x014   /* (RO) MMU features */
+#define AS_PRESENT              0x018   /* (RO) Address space slots present */
+#define GPU_IRQ_RAWSTAT         0x020   /* (RW) */
+#define GPU_IRQ_CLEAR           0x024   /* (WO) */
+#define GPU_IRQ_MASK            0x028   /* (RW) */
+#define GPU_IRQ_STATUS          0x02C   /* (RO) */
+
+#define GPU_COMMAND             0x030   /* (WO) */
+#define GPU_STATUS              0x034   /* (RO) */
+
+#define GPU_DBGEN               (1 << 8)    /* DBGEN wire status */
+
+#define GPU_FAULTSTATUS         0x03C   /* (RO) GPU exception type and fault status */
+#define GPU_FAULTADDRESS_LO     0x040   /* (RO) GPU exception fault address, low word */
+#define GPU_FAULTADDRESS_HI     0x044   /* (RO) GPU exception fault address, high word */
+
+#define L2_CONFIG               0x048   /* (RW) Level 2 cache configuration */
+
+#define PWR_KEY                 0x050   /* (WO) Power manager key register */
+#define PWR_OVERRIDE0           0x054   /* (RW) Power manager override settings */
+#define PWR_OVERRIDE1           0x058   /* (RW) Power manager override settings */
+
+#define PRFCNT_BASE_LO          0x060   /* (RW) Performance counter memory region base address, low word */
+#define PRFCNT_BASE_HI          0x064   /* (RW) Performance counter memory region base address, high word */
+#define PRFCNT_CONFIG           0x068   /* (RW) Performance counter configuration */
+#define PRFCNT_JM_EN            0x06C   /* (RW) Performance counter enable flags for Job Manager */
+#define PRFCNT_SHADER_EN        0x070   /* (RW) Performance counter enable flags for shader cores */
+#define PRFCNT_TILER_EN         0x074   /* (RW) Performance counter enable flags for tiler */
+#define PRFCNT_MMU_L2_EN        0x07C   /* (RW) Performance counter enable flags for MMU/L2 cache */
+
+#define CYCLE_COUNT_LO          0x090   /* (RO) Cycle counter, low word */
+#define CYCLE_COUNT_HI          0x094   /* (RO) Cycle counter, high word */
+#define TIMESTAMP_LO            0x098   /* (RO) Global time stamp counter, low word */
+#define TIMESTAMP_HI            0x09C   /* (RO) Global time stamp counter, high word */
+
+#define THREAD_MAX_THREADS      0x0A0   /* (RO) Maximum number of threads per core */
+#define THREAD_MAX_WORKGROUP_SIZE 0x0A4 /* (RO) Maximum workgroup size */
+#define THREAD_MAX_BARRIER_SIZE 0x0A8   /* (RO) Maximum threads waiting at a barrier */
+#define THREAD_FEATURES         0x0AC   /* (RO) Thread features */
+#define THREAD_TLS_ALLOC        0x310   /* (RO) Number of threads per core that TLS must be allocated for */
+
+#define TEXTURE_FEATURES_0      0x0B0   /* (RO) Support flags for indexed texture formats 0..31 */
+#define TEXTURE_FEATURES_1      0x0B4   /* (RO) Support flags for indexed texture formats 32..63 */
+#define TEXTURE_FEATURES_2      0x0B8   /* (RO) Support flags for indexed texture formats 64..95 */
+#define TEXTURE_FEATURES_3      0x0BC   /* (RO) Support flags for texture order */
+
+#define TEXTURE_FEATURES_REG(n) GPU_CONTROL_REG(TEXTURE_FEATURES_0 + ((n) << 2))
+
+#define SHADER_PRESENT_LO       0x100   /* (RO) Shader core present bitmap, low word */
+#define SHADER_PRESENT_HI       0x104   /* (RO) Shader core present bitmap, high word */
+
+#define TILER_PRESENT_LO        0x110   /* (RO) Tiler core present bitmap, low word */
+#define TILER_PRESENT_HI        0x114   /* (RO) Tiler core present bitmap, high word */
+
+#define L2_PRESENT_LO           0x120   /* (RO) Level 2 cache present bitmap, low word */
+#define L2_PRESENT_HI           0x124   /* (RO) Level 2 cache present bitmap, high word */
+
+#define STACK_PRESENT_LO        0xE00   /* (RO) Core stack present bitmap, low word */
+#define STACK_PRESENT_HI        0xE04   /* (RO) Core stack present bitmap, high word */
+
+#define SHADER_READY_LO         0x140   /* (RO) Shader core ready bitmap, low word */
+#define SHADER_READY_HI         0x144   /* (RO) Shader core ready bitmap, high word */
+
+#define TILER_READY_LO          0x150   /* (RO) Tiler core ready bitmap, low word */
+#define TILER_READY_HI          0x154   /* (RO) Tiler core ready bitmap, high word */
+
+#define L2_READY_LO             0x160   /* (RO) Level 2 cache ready bitmap, low word */
+#define L2_READY_HI             0x164   /* (RO) Level 2 cache ready bitmap, high word */
+
+#define STACK_READY_LO          0xE10   /* (RO) Core stack ready bitmap, low word */
+#define STACK_READY_HI          0xE14   /* (RO) Core stack ready bitmap, high word */
+
+#define SHADER_PWRON_LO         0x180   /* (WO) Shader core power on bitmap, low word */
+#define SHADER_PWRON_HI         0x184   /* (WO) Shader core power on bitmap, high word */
+
+#define TILER_PWRON_LO          0x190   /* (WO) Tiler core power on bitmap, low word */
+#define TILER_PWRON_HI          0x194   /* (WO) Tiler core power on bitmap, high word */
+
+#define L2_PWRON_LO             0x1A0   /* (WO) Level 2 cache power on bitmap, low word */
+#define L2_PWRON_HI             0x1A4   /* (WO) Level 2 cache power on bitmap, high word */
+
+#define STACK_PWRON_LO          0xE20   /* (RO) Core stack power on bitmap, low word */
+#define STACK_PWRON_HI          0xE24   /* (RO) Core stack power on bitmap, high word */
+
+#define SHADER_PWROFF_LO        0x1C0   /* (WO) Shader core power off bitmap, low word */
+#define SHADER_PWROFF_HI        0x1C4   /* (WO) Shader core power off bitmap, high word */
+
+#define TILER_PWROFF_LO         0x1D0   /* (WO) Tiler core power off bitmap, low word */
+#define TILER_PWROFF_HI         0x1D4   /* (WO) Tiler core power off bitmap, high word */
+
+#define L2_PWROFF_LO            0x1E0   /* (WO) Level 2 cache power off bitmap, low word */
+#define L2_PWROFF_HI            0x1E4   /* (WO) Level 2 cache power off bitmap, high word */
+
+#define STACK_PWROFF_LO         0xE30   /* (RO) Core stack power off bitmap, low word */
+#define STACK_PWROFF_HI         0xE34   /* (RO) Core stack power off bitmap, high word */
+
+#define SHADER_PWRTRANS_LO      0x200   /* (RO) Shader core power transition bitmap, low word */
+#define SHADER_PWRTRANS_HI      0x204   /* (RO) Shader core power transition bitmap, high word */
+
+#define TILER_PWRTRANS_LO       0x210   /* (RO) Tiler core power transition bitmap, low word */
+#define TILER_PWRTRANS_HI       0x214   /* (RO) Tiler core power transition bitmap, high word */
+
+#define L2_PWRTRANS_LO          0x220   /* (RO) Level 2 cache power transition bitmap, low word */
+#define L2_PWRTRANS_HI          0x224   /* (RO) Level 2 cache power transition bitmap, high word */
+
+#define STACK_PWRTRANS_LO       0xE40   /* (RO) Core stack power transition bitmap, low word */
+#define STACK_PWRTRANS_HI       0xE44   /* (RO) Core stack power transition bitmap, high word */
+
+#define SHADER_PWRACTIVE_LO     0x240   /* (RO) Shader core active bitmap, low word */
+#define SHADER_PWRACTIVE_HI     0x244   /* (RO) Shader core active bitmap, high word */
+
+#define TILER_PWRACTIVE_LO      0x250   /* (RO) Tiler core active bitmap, low word */
+#define TILER_PWRACTIVE_HI      0x254   /* (RO) Tiler core active bitmap, high word */
+
+#define L2_PWRACTIVE_LO         0x260   /* (RO) Level 2 cache active bitmap, low word */
+#define L2_PWRACTIVE_HI         0x264   /* (RO) Level 2 cache active bitmap, high word */
+
+#define COHERENCY_FEATURES      0x300   /* (RO) Coherency features present */
+#define COHERENCY_ENABLE        0x304   /* (RW) Coherency enable */
+
+#define SHADER_CONFIG           0xF04   /* (RW) Shader core configuration (implementation-specific) */
+#define TILER_CONFIG            0xF08   /* (RW) Tiler core configuration (implementation-specific) */
+#define L2_MMU_CONFIG           0xF0C   /* (RW) L2 cache and MMU configuration (implementation-specific) */
+
+/* Job control registers */
+
+#define JOB_CONTROL_BASE        0x1000
+
+#define JOB_CONTROL_REG(r)      (JOB_CONTROL_BASE + (r))
+
+#define JOB_IRQ_RAWSTAT         0x000   /* Raw interrupt status register */
+#define JOB_IRQ_CLEAR           0x004   /* Interrupt clear register */
+#define JOB_IRQ_MASK            0x008   /* Interrupt mask register */
+#define JOB_IRQ_STATUS          0x00C   /* Interrupt status register */
+
+/* MMU control registers */
+
+#define MEMORY_MANAGEMENT_BASE  0x2000
+#define MMU_REG(r)              (MEMORY_MANAGEMENT_BASE + (r))
+
+#define MMU_IRQ_RAWSTAT         0x000   /* (RW) Raw interrupt status register */
+#define MMU_IRQ_CLEAR           0x004   /* (WO) Interrupt clear register */
+#define MMU_IRQ_MASK            0x008   /* (RW) Interrupt mask register */
+#define MMU_IRQ_STATUS          0x00C   /* (RO) Interrupt status register */
+
+#define MMU_AS0                 0x400   /* Configuration registers for address space 0 */
+#define MMU_AS1                 0x440   /* Configuration registers for address space 1 */
+#define MMU_AS2                 0x480   /* Configuration registers for address space 2 */
+#define MMU_AS3                 0x4C0   /* Configuration registers for address space 3 */
+#define MMU_AS4                 0x500   /* Configuration registers for address space 4 */
+#define MMU_AS5                 0x540   /* Configuration registers for address space 5 */
+#define MMU_AS6                 0x580   /* Configuration registers for address space 6 */
+#define MMU_AS7                 0x5C0   /* Configuration registers for address space 7 */
+#define MMU_AS8                 0x600   /* Configuration registers for address space 8 */
+#define MMU_AS9                 0x640   /* Configuration registers for address space 9 */
+#define MMU_AS10                0x680   /* Configuration registers for address space 10 */
+#define MMU_AS11                0x6C0   /* Configuration registers for address space 11 */
+#define MMU_AS12                0x700   /* Configuration registers for address space 12 */
+#define MMU_AS13                0x740   /* Configuration registers for address space 13 */
+#define MMU_AS14                0x780   /* Configuration registers for address space 14 */
+#define MMU_AS15                0x7C0   /* Configuration registers for address space 15 */
+
+/* MMU address space control registers */
+
+#define MMU_AS_REG(n, r)        (MMU_REG(MMU_AS0 + ((n) << 6)) + (r))
+
+#define AS_TRANSTAB_LO         0x00	/* (RW) Translation Table Base Address for address space n, low word */
+#define AS_TRANSTAB_HI         0x04	/* (RW) Translation Table Base Address for address space n, high word */
+#define AS_MEMATTR_LO          0x08	/* (RW) Memory attributes for address space n, low word. */
+#define AS_MEMATTR_HI          0x0C	/* (RW) Memory attributes for address space n, high word. */
+#define AS_LOCKADDR_LO         0x10	/* (RW) Lock region address for address space n, low word */
+#define AS_LOCKADDR_HI         0x14	/* (RW) Lock region address for address space n, high word */
+#define AS_COMMAND             0x18	/* (WO) MMU command register for address space n */
+#define AS_FAULTSTATUS         0x1C	/* (RO) MMU fault status register for address space n */
+#define AS_FAULTADDRESS_LO     0x20	/* (RO) Fault Address for address space n, low word */
+#define AS_FAULTADDRESS_HI     0x24	/* (RO) Fault Address for address space n, high word */
+#define AS_STATUS              0x28	/* (RO) Status flags for address space n */
+
+/* (RW) Translation table configuration for address space n, low word */
+#define AS_TRANSCFG_LO         0x30
+/* (RW) Translation table configuration for address space n, high word */
+#define AS_TRANSCFG_HI         0x34
+/* (RO) Secondary fault address for address space n, low word */
+#define AS_FAULTEXTRA_LO       0x38
+/* (RO) Secondary fault address for address space n, high word */
+#define AS_FAULTEXTRA_HI       0x3C
+
+/* End Register Offsets */
+
+/* IRQ flags */
+#define GPU_FAULT               (1 << 0)    /* A GPU Fault has occurred */
+#define MULTIPLE_GPU_FAULTS     (1 << 7)    /* More than one GPU Fault occurred. */
+#define RESET_COMPLETED         (1 << 8)    /* Set when a reset has completed. */
+#define POWER_CHANGED_SINGLE    (1 << 9)    /* Set when a single core has finished powering up or down. */
+#define POWER_CHANGED_ALL       (1 << 10)   /* Set when all cores have finished powering up or down. */
+
+#define PRFCNT_SAMPLE_COMPLETED (1 << 16)   /* Set when a performance count sample has completed. */
+#define CLEAN_CACHES_COMPLETED  (1 << 17)   /* Set when a cache clean operation has completed. */
+
+#define GPU_IRQ_REG_ALL (GPU_FAULT | MULTIPLE_GPU_FAULTS | RESET_COMPLETED \
+		| POWER_CHANGED_ALL | PRFCNT_SAMPLE_COMPLETED)
+
+/*
+ * MMU_IRQ_RAWSTAT register values. Values are valid also for
+ * MMU_IRQ_CLEAR, MMU_IRQ_MASK, MMU_IRQ_STATUS registers.
+ */
+
+#define MMU_PAGE_FAULT_FLAGS    16
+
+/* Macros returning a bitmask to retrieve page fault or bus error flags from
+ * MMU registers */
+#define MMU_PAGE_FAULT(n)       (1UL << (n))
+#define MMU_BUS_ERROR(n)        (1UL << ((n) + MMU_PAGE_FAULT_FLAGS))
+
+/*
+ * Begin LPAE MMU TRANSTAB register values
+ */
+#define AS_TRANSTAB_LPAE_ADDR_SPACE_MASK   0xfffff000
+#define AS_TRANSTAB_LPAE_ADRMODE_UNMAPPED  (0u << 0)
+#define AS_TRANSTAB_LPAE_ADRMODE_IDENTITY  (1u << 1)
+#define AS_TRANSTAB_LPAE_ADRMODE_TABLE     (3u << 0)
+#define AS_TRANSTAB_LPAE_READ_INNER        (1u << 2)
+#define AS_TRANSTAB_LPAE_SHARE_OUTER       (1u << 4)
+
+#define AS_TRANSTAB_LPAE_ADRMODE_MASK      0x00000003
+
+/*
+ * Begin AARCH64 MMU TRANSTAB register values
+ */
+#define MMU_HW_OUTA_BITS 40
+#define AS_TRANSTAB_BASE_MASK ((1ULL << MMU_HW_OUTA_BITS) - (1ULL << 4))
+
+/*
+ * Begin MMU STATUS register values
+ */
+#define AS_STATUS_AS_ACTIVE 0x01
+
+#define AS_FAULTSTATUS_EXCEPTION_CODE_MASK                      (0x7<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_TRANSLATION_FAULT         (0x0<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_PERMISSION_FAULT          (0x1<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_TRANSTAB_BUS_FAULT        (0x2<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_ACCESS_FLAG               (0x3<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_ADDRESS_SIZE_FAULT        (0x4<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_MEMORY_ATTRIBUTES_FAULT   (0x5<<3)
+
+#define AS_FAULTSTATUS_ACCESS_TYPE_MASK         (0x3<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC       (0x0<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_EX           (0x1<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_READ         (0x2<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE        (0x3<<8)
+
+/*
+ * Begin MMU TRANSCFG register values
+ */
+#define AS_TRANSCFG_ADRMODE_LEGACY      0
+#define AS_TRANSCFG_ADRMODE_UNMAPPED    1
+#define AS_TRANSCFG_ADRMODE_IDENTITY    2
+#define AS_TRANSCFG_ADRMODE_AARCH64_4K  6
+#define AS_TRANSCFG_ADRMODE_AARCH64_64K 8
+
+#define AS_TRANSCFG_ADRMODE_MASK        0xF
+
+/*
+ * Begin TRANSCFG register values
+ */
+#define AS_TRANSCFG_PTW_MEMATTR_MASK (3ull << 24)
+#define AS_TRANSCFG_PTW_MEMATTR_NON_CACHEABLE (1ull << 24)
+#define AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK (2ull << 24)
+
+#define AS_TRANSCFG_PTW_SH_MASK ((3ull << 28))
+#define AS_TRANSCFG_PTW_SH_OS (2ull << 28)
+#define AS_TRANSCFG_PTW_SH_IS (3ull << 28)
+#define AS_TRANSCFG_R_ALLOCATE (1ull << 30)
+
+/*
+ * Begin Command Values
+ */
+
+/* AS_COMMAND register commands */
+#define AS_COMMAND_NOP         0x00	/* NOP Operation */
+#define AS_COMMAND_UPDATE      0x01	/* Broadcasts the values in AS_TRANSTAB and ASn_MEMATTR to all MMUs */
+#define AS_COMMAND_LOCK        0x02	/* Issue a lock region command to all MMUs */
+#define AS_COMMAND_UNLOCK      0x03	/* Issue a flush region command to all MMUs */
+#define AS_COMMAND_FLUSH       0x04	/* Flush all L2 caches then issue a flush region command to all MMUs
+					   (deprecated - only for use with T60x) */
+#define AS_COMMAND_FLUSH_PT    0x04	/* Flush all L2 caches then issue a flush region command to all MMUs */
+#define AS_COMMAND_FLUSH_MEM   0x05	/* Wait for memory accesses to complete, flush all the L1s cache then
+					   flush all L2 caches then issue a flush region command to all MMUs */
+
+/* GPU_STATUS values */
+#define GPU_STATUS_PRFCNT_ACTIVE            (1 << 2)    /* Set if the performance counters are active. */
+#define GPU_STATUS_PROTECTED_MODE_ACTIVE    (1 << 7)    /* Set if protected mode is active */
+
+/* PRFCNT_CONFIG register values */
+#define PRFCNT_CONFIG_MODE_SHIFT        0 /* Counter mode position. */
+#define PRFCNT_CONFIG_AS_SHIFT          4 /* Address space bitmap position. */
+#define PRFCNT_CONFIG_SETSELECT_SHIFT   8 /* Set select position. */
+
+/* The performance counters are disabled. */
+#define PRFCNT_CONFIG_MODE_OFF          0
+/* The performance counters are enabled, but are only written out when a
+ * PRFCNT_SAMPLE command is issued using the GPU_COMMAND register.
+ */
+#define PRFCNT_CONFIG_MODE_MANUAL       1
+/* The performance counters are enabled, and are written out each time a tile
+ * finishes rendering.
+ */
+#define PRFCNT_CONFIG_MODE_TILE         2
+
+/* AS<n>_MEMATTR values from MMU_MEMATTR_STAGE1: */
+/* Use GPU implementation-defined caching policy. */
+#define AS_MEMATTR_IMPL_DEF_CACHE_POLICY 0x88ull
+/* The attribute set to force all resources to be cached. */
+#define AS_MEMATTR_FORCE_TO_CACHE_ALL    0x8Full
+/* Inner write-alloc cache setup, no outer caching */
+#define AS_MEMATTR_WRITE_ALLOC           0x8Dull
+
+/* Set to implementation defined, outer caching */
+#define AS_MEMATTR_AARCH64_OUTER_IMPL_DEF 0x88ull
+/* Set to write back memory, outer caching */
+#define AS_MEMATTR_AARCH64_OUTER_WA       0x8Dull
+/* Set to inner non-cacheable, outer-non-cacheable
+ * Setting defined by the alloc bits is ignored, but set to a valid encoding:
+ * - no-alloc on read
+ * - no alloc on write
+ */
+#define AS_MEMATTR_AARCH64_NON_CACHEABLE  0x4Cull
+
+/* Use GPU implementation-defined  caching policy. */
+#define AS_MEMATTR_LPAE_IMPL_DEF_CACHE_POLICY 0x48ull
+/* The attribute set to force all resources to be cached. */
+#define AS_MEMATTR_LPAE_FORCE_TO_CACHE_ALL    0x4Full
+/* Inner write-alloc cache setup, no outer caching */
+#define AS_MEMATTR_LPAE_WRITE_ALLOC           0x4Dull
+/* Set to implementation defined, outer caching */
+#define AS_MEMATTR_LPAE_OUTER_IMPL_DEF        0x88ull
+/* Set to write back memory, outer caching */
+#define AS_MEMATTR_LPAE_OUTER_WA              0x8Dull
+/* There is no LPAE support for non-cacheable, since the memory type is always
+ * write-back.
+ * Marking this setting as reserved for LPAE
+ */
+#define AS_MEMATTR_LPAE_NON_CACHEABLE_RESERVED
+
+/* Symbols for default MEMATTR to use
+ * Default is - HW implementation defined caching */
+#define AS_MEMATTR_INDEX_DEFAULT               0
+#define AS_MEMATTR_INDEX_DEFAULT_ACE           3
+
+/* HW implementation defined caching */
+#define AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY 0
+/* Force cache on */
+#define AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL    1
+/* Write-alloc */
+#define AS_MEMATTR_INDEX_WRITE_ALLOC           2
+/* Outer coherent, inner implementation defined policy */
+#define AS_MEMATTR_INDEX_OUTER_IMPL_DEF        3
+/* Outer coherent, write alloc inner */
+#define AS_MEMATTR_INDEX_OUTER_WA              4
+/* Normal memory, inner non-cacheable, outer non-cacheable (ARMv8 mode only) */
+#define AS_MEMATTR_INDEX_NON_CACHEABLE         5
+
+/* L2_MMU_CONFIG register */
+#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT       (23)
+#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY             (0x1 << L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT)
+
+/* End L2_MMU_CONFIG register */
+
+/* THREAD_* registers */
+
+/* THREAD_FEATURES IMPLEMENTATION_TECHNOLOGY values */
+#define IMPLEMENTATION_UNSPECIFIED  0
+#define IMPLEMENTATION_SILICON      1
+#define IMPLEMENTATION_FPGA         2
+#define IMPLEMENTATION_MODEL        3
+
+/* Default values when registers are not supported by the implemented hardware */
+#define THREAD_MT_DEFAULT     256
+#define THREAD_MWS_DEFAULT    256
+#define THREAD_MBS_DEFAULT    256
+#define THREAD_MR_DEFAULT     1024
+#define THREAD_MTQ_DEFAULT    4
+#define THREAD_MTGS_DEFAULT   10
+
+/* End THREAD_* registers */
+
+/* SHADER_CONFIG register */
+#define SC_ALT_COUNTERS             (1ul << 3)
+#define SC_OVERRIDE_FWD_PIXEL_KILL  (1ul << 4)
+#define SC_SDC_DISABLE_OQ_DISCARD   (1ul << 6)
+#define SC_LS_ALLOW_ATTR_TYPES      (1ul << 16)
+#define SC_LS_PAUSEBUFFER_DISABLE   (1ul << 16)
+#define SC_TLS_HASH_ENABLE          (1ul << 17)
+#define SC_LS_ATTR_CHECK_DISABLE    (1ul << 18)
+#define SC_ENABLE_TEXGRD_FLAGS      (1ul << 25)
+#define SC_VAR_ALGORITHM            (1ul << 29)
+/* End SHADER_CONFIG register */
+
+/* TILER_CONFIG register */
+#define TC_CLOCK_GATE_OVERRIDE      (1ul << 0)
+/* End TILER_CONFIG register */
+
+/* L2_CONFIG register */
+#define L2_CONFIG_SIZE_SHIFT        16
+#define L2_CONFIG_SIZE_MASK         (0xFFul << L2_CONFIG_SIZE_SHIFT)
+#define L2_CONFIG_HASH_SHIFT        24
+#define L2_CONFIG_HASH_MASK         (0xFFul << L2_CONFIG_HASH_SHIFT)
+/* End L2_CONFIG register */
+
+
+#endif /* _MIDG_REGMAP_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_midg_regmap_jm.h b/drivers/gpu/arm/midgard/mali_midg_regmap_jm.h
new file mode 100644
index 0000000..58e4d08
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_midg_regmap_jm.h
@@ -0,0 +1,210 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _MIDG_REGMAP_JM_H_
+#define _MIDG_REGMAP_JM_H_
+
+/* GPU control registers */
+
+#define CORE_FEATURES           0x008   /* (RO) Shader Core Features */
+#define JS_PRESENT              0x01C   /* (RO) Job slots present */
+#define LATEST_FLUSH            0x038   /* (RO) Flush ID of latest clean-and-invalidate operation */
+#define GROUPS_L2_COHERENT      (1 << 0)    /* Cores groups are l2 coherent */
+
+#define JS0_FEATURES            0x0C0   /* (RO) Features of job slot 0 */
+#define JS1_FEATURES            0x0C4   /* (RO) Features of job slot 1 */
+#define JS2_FEATURES            0x0C8   /* (RO) Features of job slot 2 */
+#define JS3_FEATURES            0x0CC   /* (RO) Features of job slot 3 */
+#define JS4_FEATURES            0x0D0   /* (RO) Features of job slot 4 */
+#define JS5_FEATURES            0x0D4   /* (RO) Features of job slot 5 */
+#define JS6_FEATURES            0x0D8   /* (RO) Features of job slot 6 */
+#define JS7_FEATURES            0x0DC   /* (RO) Features of job slot 7 */
+#define JS8_FEATURES            0x0E0   /* (RO) Features of job slot 8 */
+#define JS9_FEATURES            0x0E4   /* (RO) Features of job slot 9 */
+#define JS10_FEATURES           0x0E8   /* (RO) Features of job slot 10 */
+#define JS11_FEATURES           0x0EC   /* (RO) Features of job slot 11 */
+#define JS12_FEATURES           0x0F0   /* (RO) Features of job slot 12 */
+#define JS13_FEATURES           0x0F4   /* (RO) Features of job slot 13 */
+#define JS14_FEATURES           0x0F8   /* (RO) Features of job slot 14 */
+#define JS15_FEATURES           0x0FC   /* (RO) Features of job slot 15 */
+
+#define JS_FEATURES_REG(n)      GPU_CONTROL_REG(JS0_FEATURES + ((n) << 2))
+
+#define JM_CONFIG               0xF00   /* (RW) Job manager configuration (implementation-specific) */
+
+/* Job control registers */
+
+#define JOB_IRQ_JS_STATE        0x010   /* status==active and _next == busy snapshot from last JOB_IRQ_CLEAR */
+#define JOB_IRQ_THROTTLE        0x014   /* cycles to delay delivering an interrupt externally. The JOB_IRQ_STATUS is NOT affected by this, just the delivery of the interrupt.  */
+
+#define JOB_SLOT0               0x800   /* Configuration registers for job slot 0 */
+#define JOB_SLOT1               0x880   /* Configuration registers for job slot 1 */
+#define JOB_SLOT2               0x900   /* Configuration registers for job slot 2 */
+#define JOB_SLOT3               0x980   /* Configuration registers for job slot 3 */
+#define JOB_SLOT4               0xA00   /* Configuration registers for job slot 4 */
+#define JOB_SLOT5               0xA80   /* Configuration registers for job slot 5 */
+#define JOB_SLOT6               0xB00   /* Configuration registers for job slot 6 */
+#define JOB_SLOT7               0xB80   /* Configuration registers for job slot 7 */
+#define JOB_SLOT8               0xC00   /* Configuration registers for job slot 8 */
+#define JOB_SLOT9               0xC80   /* Configuration registers for job slot 9 */
+#define JOB_SLOT10              0xD00   /* Configuration registers for job slot 10 */
+#define JOB_SLOT11              0xD80   /* Configuration registers for job slot 11 */
+#define JOB_SLOT12              0xE00   /* Configuration registers for job slot 12 */
+#define JOB_SLOT13              0xE80   /* Configuration registers for job slot 13 */
+#define JOB_SLOT14              0xF00   /* Configuration registers for job slot 14 */
+#define JOB_SLOT15              0xF80   /* Configuration registers for job slot 15 */
+
+#define JOB_SLOT_REG(n, r)      (JOB_CONTROL_REG(JOB_SLOT0 + ((n) << 7)) + (r))
+
+#define JS_HEAD_LO             0x00	/* (RO) Job queue head pointer for job slot n, low word */
+#define JS_HEAD_HI             0x04	/* (RO) Job queue head pointer for job slot n, high word */
+#define JS_TAIL_LO             0x08	/* (RO) Job queue tail pointer for job slot n, low word */
+#define JS_TAIL_HI             0x0C	/* (RO) Job queue tail pointer for job slot n, high word */
+#define JS_AFFINITY_LO         0x10	/* (RO) Core affinity mask for job slot n, low word */
+#define JS_AFFINITY_HI         0x14	/* (RO) Core affinity mask for job slot n, high word */
+#define JS_CONFIG              0x18	/* (RO) Configuration settings for job slot n */
+#define JS_XAFFINITY           0x1C	/* (RO) Extended affinity mask for job
+					   slot n */
+
+#define JS_COMMAND             0x20	/* (WO) Command register for job slot n */
+#define JS_STATUS              0x24	/* (RO) Status register for job slot n */
+
+#define JS_HEAD_NEXT_LO        0x40	/* (RW) Next job queue head pointer for job slot n, low word */
+#define JS_HEAD_NEXT_HI        0x44	/* (RW) Next job queue head pointer for job slot n, high word */
+
+#define JS_AFFINITY_NEXT_LO    0x50	/* (RW) Next core affinity mask for job slot n, low word */
+#define JS_AFFINITY_NEXT_HI    0x54	/* (RW) Next core affinity mask for job slot n, high word */
+#define JS_CONFIG_NEXT         0x58	/* (RW) Next configuration settings for job slot n */
+#define JS_XAFFINITY_NEXT      0x5C	/* (RW) Next extended affinity mask for
+					   job slot n */
+
+#define JS_COMMAND_NEXT        0x60	/* (RW) Next command register for job slot n */
+
+#define JS_FLUSH_ID_NEXT       0x70	/* (RW) Next job slot n cache flush ID */
+
+/* No JM-specific MMU control registers */
+/* No JM-specific MMU address space control registers */
+
+/* JS_COMMAND register commands */
+#define JS_COMMAND_NOP         0x00	/* NOP Operation. Writing this value is ignored */
+#define JS_COMMAND_START       0x01	/* Start processing a job chain. Writing this value is ignored */
+#define JS_COMMAND_SOFT_STOP   0x02	/* Gently stop processing a job chain */
+#define JS_COMMAND_HARD_STOP   0x03	/* Rudely stop processing a job chain */
+#define JS_COMMAND_SOFT_STOP_0 0x04	/* Execute SOFT_STOP if JOB_CHAIN_FLAG is 0 */
+#define JS_COMMAND_HARD_STOP_0 0x05	/* Execute HARD_STOP if JOB_CHAIN_FLAG is 0 */
+#define JS_COMMAND_SOFT_STOP_1 0x06	/* Execute SOFT_STOP if JOB_CHAIN_FLAG is 1 */
+#define JS_COMMAND_HARD_STOP_1 0x07	/* Execute HARD_STOP if JOB_CHAIN_FLAG is 1 */
+
+#define JS_COMMAND_MASK        0x07    /* Mask of bits currently in use by the HW */
+
+/* Possible values of JS_CONFIG and JS_CONFIG_NEXT registers */
+#define JS_CONFIG_START_FLUSH_NO_ACTION        (0u << 0)
+#define JS_CONFIG_START_FLUSH_CLEAN            (1u << 8)
+#define JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE (3u << 8)
+#define JS_CONFIG_START_MMU                    (1u << 10)
+#define JS_CONFIG_JOB_CHAIN_FLAG               (1u << 11)
+#define JS_CONFIG_END_FLUSH_NO_ACTION          JS_CONFIG_START_FLUSH_NO_ACTION
+#define JS_CONFIG_END_FLUSH_CLEAN              (1u << 12)
+#define JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE   (3u << 12)
+#define JS_CONFIG_ENABLE_FLUSH_REDUCTION       (1u << 14)
+#define JS_CONFIG_DISABLE_DESCRIPTOR_WR_BK     (1u << 15)
+#define JS_CONFIG_THREAD_PRI(n)                ((n) << 16)
+
+/* JS_XAFFINITY register values */
+#define JS_XAFFINITY_XAFFINITY_ENABLE (1u << 0)
+#define JS_XAFFINITY_TILER_ENABLE     (1u << 8)
+#define JS_XAFFINITY_CACHE_ENABLE     (1u << 16)
+
+/* JS_STATUS register values */
+
+/* NOTE: Please keep this values in sync with enum base_jd_event_code in mali_base_kernel.h.
+ * The values are separated to avoid dependency of userspace and kernel code.
+ */
+
+/* Group of values representing the job status insead a particular fault */
+#define JS_STATUS_NO_EXCEPTION_BASE   0x00
+#define JS_STATUS_INTERRUPTED         (JS_STATUS_NO_EXCEPTION_BASE + 0x02)	/* 0x02 means INTERRUPTED */
+#define JS_STATUS_STOPPED             (JS_STATUS_NO_EXCEPTION_BASE + 0x03)	/* 0x03 means STOPPED */
+#define JS_STATUS_TERMINATED          (JS_STATUS_NO_EXCEPTION_BASE + 0x04)	/* 0x04 means TERMINATED */
+
+/* General fault values */
+#define JS_STATUS_FAULT_BASE          0x40
+#define JS_STATUS_CONFIG_FAULT        (JS_STATUS_FAULT_BASE)	/* 0x40 means CONFIG FAULT */
+#define JS_STATUS_POWER_FAULT         (JS_STATUS_FAULT_BASE + 0x01)	/* 0x41 means POWER FAULT */
+#define JS_STATUS_READ_FAULT          (JS_STATUS_FAULT_BASE + 0x02)	/* 0x42 means READ FAULT */
+#define JS_STATUS_WRITE_FAULT         (JS_STATUS_FAULT_BASE + 0x03)	/* 0x43 means WRITE FAULT */
+#define JS_STATUS_AFFINITY_FAULT      (JS_STATUS_FAULT_BASE + 0x04)	/* 0x44 means AFFINITY FAULT */
+#define JS_STATUS_BUS_FAULT           (JS_STATUS_FAULT_BASE + 0x08)	/* 0x48 means BUS FAULT */
+
+/* Instruction or data faults */
+#define JS_STATUS_INSTRUCTION_FAULT_BASE  0x50
+#define JS_STATUS_INSTR_INVALID_PC        (JS_STATUS_INSTRUCTION_FAULT_BASE)	/* 0x50 means INSTR INVALID PC */
+#define JS_STATUS_INSTR_INVALID_ENC       (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x01)	/* 0x51 means INSTR INVALID ENC */
+#define JS_STATUS_INSTR_TYPE_MISMATCH     (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x02)	/* 0x52 means INSTR TYPE MISMATCH */
+#define JS_STATUS_INSTR_OPERAND_FAULT     (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x03)	/* 0x53 means INSTR OPERAND FAULT */
+#define JS_STATUS_INSTR_TLS_FAULT         (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x04)	/* 0x54 means INSTR TLS FAULT */
+#define JS_STATUS_INSTR_BARRIER_FAULT     (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x05)	/* 0x55 means INSTR BARRIER FAULT */
+#define JS_STATUS_INSTR_ALIGN_FAULT       (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x06)	/* 0x56 means INSTR ALIGN FAULT */
+/* NOTE: No fault with 0x57 code defined in spec. */
+#define JS_STATUS_DATA_INVALID_FAULT      (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x08)	/* 0x58 means DATA INVALID FAULT */
+#define JS_STATUS_TILE_RANGE_FAULT        (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x09)	/* 0x59 means TILE RANGE FAULT */
+#define JS_STATUS_ADDRESS_RANGE_FAULT     (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x0A)	/* 0x5A means ADDRESS RANGE FAULT */
+
+/* Other faults */
+#define JS_STATUS_MEMORY_FAULT_BASE   0x60
+#define JS_STATUS_OUT_OF_MEMORY       (JS_STATUS_MEMORY_FAULT_BASE)	/* 0x60 means OUT OF MEMORY */
+#define JS_STATUS_UNKNOWN             0x7F	/* 0x7F means UNKNOWN */
+
+/* JS<n>_FEATURES register */
+#define JS_FEATURE_NULL_JOB              (1u << 1)
+#define JS_FEATURE_SET_VALUE_JOB         (1u << 2)
+#define JS_FEATURE_CACHE_FLUSH_JOB       (1u << 3)
+#define JS_FEATURE_COMPUTE_JOB           (1u << 4)
+#define JS_FEATURE_VERTEX_JOB            (1u << 5)
+#define JS_FEATURE_GEOMETRY_JOB          (1u << 6)
+#define JS_FEATURE_TILER_JOB             (1u << 7)
+#define JS_FEATURE_FUSED_JOB             (1u << 8)
+#define JS_FEATURE_FRAGMENT_JOB          (1u << 9)
+
+/* JM_CONFIG register */
+#define JM_TIMESTAMP_OVERRIDE  (1ul << 0)
+#define JM_CLOCK_GATE_OVERRIDE (1ul << 1)
+#define JM_JOB_THROTTLE_ENABLE (1ul << 2)
+#define JM_JOB_THROTTLE_LIMIT_SHIFT (3)
+#define JM_MAX_JOB_THROTTLE_LIMIT (0x3F)
+#define JM_FORCE_COHERENCY_FEATURES_SHIFT (2)
+#define JM_IDVS_GROUP_SIZE_SHIFT (16)
+#define JM_MAX_IDVS_GROUP_SIZE (0x3F)
+
+/* GPU_COMMAND values */
+#define GPU_COMMAND_NOP                0x00 /* No operation, nothing happens */
+#define GPU_COMMAND_SOFT_RESET         0x01 /* Stop all external bus interfaces, and then reset the entire GPU. */
+#define GPU_COMMAND_HARD_RESET         0x02 /* Immediately reset the entire GPU. */
+#define GPU_COMMAND_PRFCNT_CLEAR       0x03 /* Clear all performance counters, setting them all to zero. */
+#define GPU_COMMAND_PRFCNT_SAMPLE      0x04 /* Sample all performance counters, writing them out to memory */
+#define GPU_COMMAND_CYCLE_COUNT_START  0x05 /* Starts the cycle counter, and system timestamp propagation */
+#define GPU_COMMAND_CYCLE_COUNT_STOP   0x06 /* Stops the cycle counter, and system timestamp propagation */
+#define GPU_COMMAND_CLEAN_CACHES       0x07 /* Clean all caches */
+#define GPU_COMMAND_CLEAN_INV_CACHES   0x08 /* Clean and invalidate all caches */
+#define GPU_COMMAND_SET_PROTECTED_MODE 0x09 /* Places the GPU in protected mode */
+
+#endif /* _MIDG_REGMAP_JM_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_uk.h b/drivers/gpu/arm/midgard/mali_uk.h
new file mode 100644
index 0000000..701f390
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_uk.h
@@ -0,0 +1,84 @@
+/*
+ *
+ * (C) COPYRIGHT 2010, 2012-2015, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_uk.h
+ * Types and definitions that are common across OSs for both the user
+ * and kernel side of the User-Kernel interface.
+ */
+
+#ifndef _UK_H_
+#define _UK_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif				/* __cplusplus */
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @defgroup uk_api User-Kernel Interface API
+ *
+ * The User-Kernel Interface abstracts the communication mechanism between the user and kernel-side code of device
+ * drivers developed as part of the Midgard DDK. Currently that includes the Base driver.
+ *
+ * It exposes an OS independent API to user-side code (UKU) which routes functions calls to an OS-independent
+ * kernel-side API (UKK) via an OS-specific communication mechanism.
+ *
+ * This API is internal to the Midgard DDK and is not exposed to any applications.
+ *
+ * @{
+ */
+
+/**
+ * These are identifiers for kernel-side drivers implementing a UK interface, aka UKK clients. The
+ * UK module maps this to an OS specific device name, e.g. "gpu_base" -> "GPU0:". Specify this
+ * identifier to select a UKK client to the uku_open() function.
+ *
+ * When a new UKK client driver is created a new identifier needs to be added to the uk_client_id
+ * enumeration and the uku_open() implemenation for the various OS ports need to be updated to
+ * provide a mapping of the identifier to the OS specific device name.
+ *
+ */
+enum uk_client_id {
+	/**
+	 * Value used to identify the Base driver UK client.
+	 */
+	UK_CLIENT_MALI_T600_BASE,
+
+	/** The number of uk clients supported. This must be the last member of the enum */
+	UK_CLIENT_COUNT
+};
+
+/** @} end group uk_api */
+
+/** @} *//* end group base_api */
+
+#ifdef __cplusplus
+}
+#endif				/* __cplusplus */
+#endif				/* _UK_H_ */
diff --git a/drivers/gpu/arm/midgard/platform/Kconfig b/drivers/gpu/arm/midgard/platform/Kconfig
new file mode 100644
index 0000000..ef9fb96
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/Kconfig
@@ -0,0 +1,30 @@
+#
+# (C) COPYRIGHT 2012-2013, 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+
+
+# Add your platform specific Kconfig file here
+#
+# "drivers/gpu/arm/midgard/platform/xxx/Kconfig"
+#
+# Where xxx is the platform name is the name set in MALI_PLATFORM_NAME
+#
+
diff --git a/drivers/gpu/arm/midgard/platform/devicetree/Kbuild b/drivers/gpu/arm/midgard/platform/devicetree/Kbuild
new file mode 100644
index 0000000..ce637fb
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/devicetree/Kbuild
@@ -0,0 +1,24 @@
+#
+# (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+mali_kbase-y += \
+	$(MALI_PLATFORM_DIR)/mali_kbase_config_devicetree.o \
+	$(MALI_PLATFORM_DIR)/mali_kbase_runtime_pm.o
diff --git a/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_devicetree.c b/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_devicetree.c
new file mode 100644
index 0000000..ccefddf
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_devicetree.c
@@ -0,0 +1,41 @@
+/*
+ *
+ * (C) COPYRIGHT 2015, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase_config.h>
+
+static struct kbase_platform_config dummy_platform_config;
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+	return &dummy_platform_config;
+}
+
+#ifndef CONFIG_OF
+int kbase_platform_register(void)
+{
+	return 0;
+}
+
+void kbase_platform_unregister(void)
+{
+}
+#endif
diff --git a/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_platform.h b/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_platform.h
new file mode 100644
index 0000000..5990313
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_platform.h
@@ -0,0 +1,46 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Power management configuration
+ *
+ * Attached value: pointer to @ref kbase_pm_callback_conf
+ * Default value: See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value: pointer to @ref kbase_platform_funcs_conf
+ * Default value: See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (NULL)
+
+extern struct kbase_pm_callback_conf pm_callbacks;
+
+/**
+ * Autosuspend delay
+ *
+ * The delay time (in milliseconds) to be used for autosuspend
+ */
+#define AUTO_SUSPEND_DELAY (100)
diff --git a/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_runtime_pm.c b/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_runtime_pm.c
new file mode 100644
index 0000000..8772edb
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_runtime_pm.c
@@ -0,0 +1,185 @@
+/*
+ *
+ * (C) COPYRIGHT 2015, 2017-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regulator/consumer.h>
+#include "mali_kbase_config_platform.h"
+
+static void enable_gpu_power_control(struct kbase_device *kbdev)
+{
+	unsigned int i;
+
+#if defined(CONFIG_REGULATOR)
+	for (i = 0; i < kbdev->nr_regulators; i++) {
+		if (WARN_ON(kbdev->regulators[i] == NULL))
+			;
+		else if (!regulator_is_enabled(kbdev->regulators[i]))
+			WARN_ON(regulator_enable(kbdev->regulators[i]));
+	}
+#endif
+
+	for (i = 0; i < kbdev->nr_clocks; i++) {
+		if (WARN_ON(kbdev->clocks[i] == NULL))
+			;
+		else if (!__clk_is_enabled(kbdev->clocks[i]))
+			WARN_ON(clk_prepare_enable(kbdev->clocks[i]));
+	}
+}
+
+static void disable_gpu_power_control(struct kbase_device *kbdev)
+{
+	unsigned int i;
+
+	for (i = 0; i < kbdev->nr_clocks; i++) {
+		if (WARN_ON(kbdev->clocks[i] == NULL))
+			;
+		else if (__clk_is_enabled(kbdev->clocks[i])) {
+			clk_disable_unprepare(kbdev->clocks[i]);
+			WARN_ON(__clk_is_enabled(kbdev->clocks[i]));
+		}
+
+	}
+
+#if defined(CONFIG_REGULATOR)
+	for (i = 0; i < kbdev->nr_regulators; i++) {
+		if (WARN_ON(kbdev->regulators[i] == NULL))
+			;
+		else if (regulator_is_enabled(kbdev->regulators[i]))
+			WARN_ON(regulator_disable(kbdev->regulators[i]));
+	}
+#endif
+}
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+	int ret = 1; /* Assume GPU has been powered off */
+	int error;
+
+	dev_dbg(kbdev->dev, "pm_callback_power_on %p\n",
+			(void *)kbdev->dev->pm_domain);
+
+	enable_gpu_power_control(kbdev);
+
+	error = pm_runtime_get_sync(kbdev->dev);
+	if (error == 1) {
+		/*
+		 * Let core know that the chip has not been
+		 * powered off, so we can save on re-initialization.
+		 */
+		ret = 0;
+	}
+
+	dev_dbg(kbdev->dev, "pm_runtime_get_sync returned %d\n", error);
+
+	return ret;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+	dev_dbg(kbdev->dev, "pm_callback_power_off\n");
+
+	pm_runtime_mark_last_busy(kbdev->dev);
+	pm_runtime_put_autosuspend(kbdev->dev);
+
+#ifndef KBASE_PM_RUNTIME
+	disable_gpu_power_control(kbdev);
+#endif
+}
+
+#ifdef KBASE_PM_RUNTIME
+static int kbase_device_runtime_init(struct kbase_device *kbdev)
+{
+	int ret = 0;
+
+	dev_dbg(kbdev->dev, "kbase_device_runtime_init\n");
+
+	pm_runtime_set_autosuspend_delay(kbdev->dev, AUTO_SUSPEND_DELAY);
+	pm_runtime_use_autosuspend(kbdev->dev);
+
+	pm_runtime_set_active(kbdev->dev);
+	pm_runtime_enable(kbdev->dev);
+
+	if (!pm_runtime_enabled(kbdev->dev)) {
+		dev_warn(kbdev->dev, "pm_runtime not enabled");
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static void kbase_device_runtime_disable(struct kbase_device *kbdev)
+{
+	dev_dbg(kbdev->dev, "kbase_device_runtime_disable\n");
+	pm_runtime_disable(kbdev->dev);
+}
+#endif
+
+static int pm_callback_runtime_on(struct kbase_device *kbdev)
+{
+	dev_dbg(kbdev->dev, "pm_callback_runtime_on\n");
+
+	enable_gpu_power_control(kbdev);
+	return 0;
+}
+
+static void pm_callback_runtime_off(struct kbase_device *kbdev)
+{
+	dev_dbg(kbdev->dev, "pm_callback_runtime_off\n");
+
+	disable_gpu_power_control(kbdev);
+}
+
+static void pm_callback_resume(struct kbase_device *kbdev)
+{
+	int ret = pm_callback_runtime_on(kbdev);
+
+	WARN_ON(ret);
+}
+
+static void pm_callback_suspend(struct kbase_device *kbdev)
+{
+	pm_callback_runtime_off(kbdev);
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+	.power_on_callback = pm_callback_power_on,
+	.power_off_callback = pm_callback_power_off,
+	.power_suspend_callback = pm_callback_suspend,
+	.power_resume_callback = pm_callback_resume,
+#ifdef KBASE_PM_RUNTIME
+	.power_runtime_init_callback = kbase_device_runtime_init,
+	.power_runtime_term_callback = kbase_device_runtime_disable,
+	.power_runtime_on_callback = pm_callback_runtime_on,
+	.power_runtime_off_callback = pm_callback_runtime_off,
+#else				/* KBASE_PM_RUNTIME */
+	.power_runtime_init_callback = NULL,
+	.power_runtime_term_callback = NULL,
+	.power_runtime_on_callback = NULL,
+	.power_runtime_off_callback = NULL,
+#endif				/* KBASE_PM_RUNTIME */
+};
+
+
diff --git a/drivers/gpu/arm/midgard/platform/mediatek/Kbuild b/drivers/gpu/arm/midgard/platform/mediatek/Kbuild
new file mode 100644
index 0000000..c1161b0
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/mediatek/Kbuild
@@ -0,0 +1,14 @@
+# Copyright (C) 2018 MediaTek Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+
+mali_kbase-y += \
+	$(MALI_PLATFORM_DIR)/mali_kbase_config_mediatek.o \
+	$(MALI_PLATFORM_DIR)/mali_kbase_runtime_pm.o
diff --git a/drivers/gpu/arm/midgard/platform/mediatek/mali_kbase_config_mediatek.c b/drivers/gpu/arm/midgard/platform/mediatek/mali_kbase_config_mediatek.c
new file mode 100644
index 0000000..a273717
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/mediatek/mali_kbase_config_mediatek.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ */
+
+
+#include <mali_kbase_config.h>
+
+int kbase_platform_early_init(void)
+{
+	/* Nothing needed at this stage */
+	return 0;
+}
+
+static struct kbase_platform_config dummy_platform_config;
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+	return &dummy_platform_config;
+}
+
+int kbase_platform_register(void)
+{
+	return 0;
+}
+
+void kbase_platform_unregister(void)
+{
+}
diff --git a/drivers/gpu/arm/midgard/platform/mediatek/mali_kbase_config_platform.h b/drivers/gpu/arm/midgard/platform/mediatek/mali_kbase_config_platform.h
new file mode 100644
index 0000000..848b533
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/mediatek/mali_kbase_config_platform.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ */
+
+struct mfg_base {
+	struct clk *clk_mux;
+	struct clk *clk_main_parent;
+	struct clk *clk_sub_parent;
+	struct clk *subsys_mfg_cg;
+	struct platform_device *gpu_core1_dev;
+	struct platform_device *gpu_core2_dev;
+	bool is_powered;
+};
+
+/* Definition for PMIC regulators */
+#define VSRAM_GPU_MAX_VOLT (925000)	/* uV */
+#define VSRAM_GPU_MIN_VOLT (850000) /* uV */
+#define VGPU_MAX_VOLT (825000)	/* uV */
+#define VGPU_MIN_VOLT (625000)	/* uV */
+
+#define MIN_VOLT_BIAS (100000) /* uV */
+#define MAX_VOLT_BIAS (250000) /* uV */
+#define VOLT_TOL (125) /* uV */
+
+/**
+ * Maximum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MAX (800000)
+/**
+ * Minimum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MIN (300000)
+
+/**
+ * CPU_SPEED_FUNC - A pointer to a function that calculates the CPU clock
+ *
+ * CPU clock speed of the platform is in MHz - see kbase_cpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_cpu_clk_speed_func.
+ * Default Value:  NA
+ */
+#define CPU_SPEED_FUNC (NULL)
+
+/**
+ * GPU_SPEED_FUNC - A pointer to a function that calculates the GPU clock
+ *
+ * GPU clock speed of the platform in MHz - see kbase_gpu_clk_speed_func
+ * for the function prototype.
+ *
+ * Attached value: A kbase_gpu_clk_speed_func.
+ * Default Value:  NA
+ */
+#define GPU_SPEED_FUNC (NULL)
+
+/**
+ * Power management configuration
+ *
+ * Attached value: pointer to @ref kbase_pm_callback_conf
+ * Default value: See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value: pointer to @ref kbase_platform_funcs_conf
+ * Default value: See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (&platform_funcs)
+
+extern struct kbase_pm_callback_conf pm_callbacks;
+extern struct kbase_platform_funcs_conf platform_funcs;
+
+/**
+ * Autosuspend delay
+ *
+ * The delay time (in milliseconds) to be used for autosuspend
+ */
+#define AUTO_SUSPEND_DELAY (100)
diff --git a/drivers/gpu/arm/midgard/platform/mediatek/mali_kbase_runtime_pm.c b/drivers/gpu/arm/midgard/platform/mediatek/mali_kbase_runtime_pm.c
new file mode 100644
index 0000000..b40e407
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/mediatek/mali_kbase_runtime_pm.c
@@ -0,0 +1,396 @@
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ */
+
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <mali_kbase.h>
+#include "mali_kbase_config_platform.h"
+#include <mali_kbase_defs.h>
+
+static struct platform_device *probe_gpu_core1_dev;
+static struct platform_device *probe_gpu_core2_dev;
+
+static const struct of_device_id mtk_gpu_corex_of_ids[] = {
+	{ .compatible = "mediatek,gpu_core1", .data = "1" },
+	{ .compatible = "mediatek,gpu_core2", .data = "2" },
+	{}
+};
+
+static int mtk_gpu_corex_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	const struct of_device_id *match;
+	const char *tmp;
+
+	match = of_match_device(mtk_gpu_corex_of_ids, dev);
+	if (!match)
+		return -ENODEV;
+	tmp = match->data;
+	if (*tmp == '1')
+		probe_gpu_core1_dev = pdev;
+	else
+		probe_gpu_core2_dev = pdev;
+
+	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	return 0;
+}
+
+static int mtk_gpu_corex_remove(struct platform_device *pdev)
+{
+	pm_runtime_disable(&pdev->dev);
+	return 0;
+}
+
+static struct platform_driver mtk_gpu_corex_driver = {
+	.probe  = mtk_gpu_corex_probe,
+	.remove = mtk_gpu_corex_remove,
+	.driver = {
+		.name = "gpu_corex",
+		.of_match_table = mtk_gpu_corex_of_ids,
+	}
+};
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+	int error;
+	struct mfg_base *mfg = kbdev->platform_context;
+
+	if (mfg->is_powered) {
+		dev_dbg(kbdev->dev, "mali_device is already powered\n");
+		return 0;
+	}
+
+	error = pm_runtime_get_sync(kbdev->dev);
+	if (error < 0) {
+		dev_err(kbdev->dev,
+			"Power on core 0 failed (err: %d)\n", error);
+		return error;
+	}
+
+	error = pm_runtime_get_sync(&mfg->gpu_core1_dev->dev);
+	if (error < 0) {
+		dev_err(kbdev->dev,
+			"Power on core 1 failed (err: %d)\n", error);
+		return error;
+	}
+
+	error = pm_runtime_get_sync(&mfg->gpu_core2_dev->dev);
+	if (error < 0) {
+		dev_err(kbdev->dev,
+			"Power on core 2 failed (err: %d)\n", error);
+		return error;
+	}
+
+	error = clk_enable(mfg->clk_main_parent);
+	if (error < 0) {
+		dev_err(kbdev->dev,
+			"clk_main_parent clock enable failed (err: %d)\n",
+			error);
+		return error;
+	}
+
+	error = clk_enable(mfg->clk_mux);
+	if (error < 0) {
+		dev_err(kbdev->dev,
+			"clk_mux clock enable failed (err: %d)\n", error);
+		return error;
+	}
+
+	error = clk_enable(mfg->subsys_mfg_cg);
+	if (error < 0) {
+		dev_err(kbdev->dev,
+			"subsys_mfg_cg clock enable failed (err: %d)\n", error);
+		return error;
+	}
+
+	mfg->is_powered = true;
+
+	return 1;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+	struct mfg_base *mfg = kbdev->platform_context;
+	int error;
+
+	if (!mfg->is_powered) {
+		dev_dbg(kbdev->dev, "mali_device is already powered off\n");
+		return;
+	}
+
+	mfg->is_powered = false;
+
+	clk_disable(mfg->subsys_mfg_cg);
+
+	clk_disable(mfg->clk_mux);
+
+	clk_disable(mfg->clk_main_parent);
+
+	pm_runtime_mark_last_busy(&mfg->gpu_core2_dev->dev);
+	error = pm_runtime_put_autosuspend(&mfg->gpu_core2_dev->dev);
+	if (error < 0)
+		dev_err(kbdev->dev,
+		"Power off core 2 failed (err: %d)\n", error);
+
+	pm_runtime_mark_last_busy(&mfg->gpu_core1_dev->dev);
+	error = pm_runtime_put_autosuspend(&mfg->gpu_core1_dev->dev);
+	if (error < 0)
+		dev_err(kbdev->dev,
+		"Power off core 1 failed (err: %d)\n", error);
+
+	pm_runtime_mark_last_busy(kbdev->dev);
+	error = pm_runtime_put_autosuspend(kbdev->dev);
+	if (error < 0)
+		dev_err(kbdev->dev,
+		"Power off core 0 failed (err: %d)\n", error);
+}
+
+static int kbase_device_runtime_init(struct kbase_device *kbdev)
+{
+	dev_dbg(kbdev->dev, "%s\n", __func__);
+
+	return 0;
+}
+
+static void kbase_device_runtime_disable(struct kbase_device *kbdev)
+{
+	dev_dbg(kbdev->dev, "%s\n", __func__);
+}
+
+static int pm_callback_runtime_on(struct kbase_device *kbdev)
+{
+	struct mfg_base *mfg = kbdev->platform_context;
+	int error, i;
+
+	for (i = 0; i < kbdev->nr_regulators; i++) {
+		error = regulator_enable(kbdev->regulators[i]);
+		if (error < 0) {
+			dev_err(kbdev->dev,
+				"Power on reg %d failed error = %d\n",
+				i, error);
+			return error;
+		}
+	}
+
+	error = clk_prepare(mfg->clk_main_parent);
+	if (error < 0) {
+		dev_err(kbdev->dev,
+			"clk_main_parent clock prepare failed (err: %d)\n",
+			error);
+		return error;
+	}
+
+	error = clk_prepare(mfg->clk_mux);
+	if (error < 0) {
+		dev_err(kbdev->dev,
+			"clk_mux clock prepare failed (err: %d)\n", error);
+		return error;
+	}
+
+	error = clk_prepare(mfg->subsys_mfg_cg);
+	if (error < 0) {
+		dev_err(kbdev->dev,
+			"subsys_mfg_cg clock prepare failed (err: %d)\n",
+			error);
+		return error;
+	}
+
+	return 0;
+}
+
+static void pm_callback_runtime_off(struct kbase_device *kbdev)
+{
+	struct mfg_base *mfg = kbdev->platform_context;
+	int error, i;
+
+	clk_unprepare(mfg->subsys_mfg_cg);
+
+	clk_unprepare(mfg->clk_mux);
+
+	clk_unprepare(mfg->clk_main_parent);
+
+	for (i = 0; i < kbdev->nr_regulators; i++) {
+		error = regulator_disable(kbdev->regulators[i]);
+		if (error < 0) {
+			dev_err(kbdev->dev,
+				"Power off reg %d failed error = %d\n",
+				i, error);
+		}
+	}
+}
+
+static void pm_callback_resume(struct kbase_device *kbdev)
+{
+	pm_callback_power_on(kbdev);
+}
+
+static void pm_callback_suspend(struct kbase_device *kbdev)
+{
+	pm_callback_power_off(kbdev);
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+	.power_on_callback = pm_callback_power_on,
+	.power_off_callback = pm_callback_power_off,
+	.power_suspend_callback = pm_callback_suspend,
+	.power_resume_callback = pm_callback_resume,
+#ifdef KBASE_PM_RUNTIME
+	.power_runtime_init_callback = kbase_device_runtime_init,
+	.power_runtime_term_callback = kbase_device_runtime_disable,
+	.power_runtime_on_callback = pm_callback_runtime_on,
+	.power_runtime_off_callback = pm_callback_runtime_off,
+#else				/* KBASE_PM_RUNTIME */
+	.power_runtime_init_callback = NULL,
+	.power_runtime_term_callback = NULL,
+	.power_runtime_on_callback = NULL,
+	.power_runtime_off_callback = NULL,
+#endif				/* KBASE_PM_RUNTIME */
+};
+
+
+int mali_mfgsys_init(struct kbase_device *kbdev, struct mfg_base *mfg)
+{
+	int err = 0, i;
+	unsigned long volt;
+
+	if (!probe_gpu_core1_dev || !probe_gpu_core2_dev)
+		return -EPROBE_DEFER;
+
+	for (i = 0; i < kbdev->nr_regulators; i++)
+		if (kbdev->regulators[i] == NULL)
+			return -EINVAL;
+
+	mfg->gpu_core1_dev = probe_gpu_core1_dev;
+	mfg->gpu_core2_dev = probe_gpu_core2_dev;
+
+	mfg->clk_main_parent = devm_clk_get(kbdev->dev, "clk_main_parent");
+	if (IS_ERR(mfg->clk_main_parent)) {
+		err = PTR_ERR(mfg->clk_main_parent);
+		dev_err(kbdev->dev, "devm_clk_get clk_main_parent failed\n");
+		return err;
+	}
+
+	mfg->clk_sub_parent = devm_clk_get(kbdev->dev, "clk_sub_parent");
+	if (IS_ERR(mfg->clk_sub_parent)) {
+		err = PTR_ERR(mfg->clk_sub_parent);
+		dev_err(kbdev->dev, "devm_clk_get clk_sub_parent failed\n");
+		return err;
+	}
+
+	mfg->clk_mux = devm_clk_get(kbdev->dev, "clk_mux");
+	if (IS_ERR(mfg->clk_mux)) {
+		err = PTR_ERR(mfg->clk_mux);
+		dev_err(kbdev->dev, "devm_clk_get clk_mux failed\n");
+		return err;
+	}
+
+	mfg->subsys_mfg_cg = devm_clk_get(kbdev->dev, "subsys_mfg_cg");
+	if (IS_ERR(mfg->subsys_mfg_cg)) {
+		err = PTR_ERR(mfg->subsys_mfg_cg);
+		dev_err(kbdev->dev, "devm_clk_get subsys_mfg_cg failed\n");
+		return err;
+	}
+
+	for (i = 0; i < kbdev->nr_regulators; i++) {
+		volt = (0 == i) ? VGPU_MAX_VOLT : VSRAM_GPU_MAX_VOLT;
+		err = regulator_set_voltage(kbdev->regulators[i],
+			volt, volt + VOLT_TOL);
+		if (err < 0) {
+			dev_err(kbdev->dev,
+				"Regulator %d set voltage failed: %d\n",
+				i, err);
+			return err;
+		}
+		kbdev->current_voltages[i] = volt;
+	}
+
+	mfg->is_powered = false;
+
+	return 0;
+}
+
+static int platform_init(struct kbase_device *kbdev)
+{
+	int err;
+	struct mfg_base *mfg;
+
+	mfg = kzalloc(sizeof(*mfg), GFP_KERNEL);
+	if (!mfg)
+		return -ENOMEM;
+
+	err = mali_mfgsys_init(kbdev, mfg);
+	if (err)
+		goto platform_init_err;
+
+	kbdev->platform_context = mfg;
+	pm_runtime_set_autosuspend_delay(kbdev->dev, 50);
+	pm_runtime_use_autosuspend(kbdev->dev);
+	pm_runtime_enable(kbdev->dev);
+
+	err = clk_set_parent(mfg->clk_mux, mfg->clk_sub_parent);
+	if (err) {
+		dev_err(kbdev->dev, "Failed to select sub clock src\n");
+		goto platform_init_err;
+	}
+
+	err = clk_set_rate(mfg->clk_main_parent, GPU_FREQ_KHZ_MAX * 1000);
+	if (err) {
+		dev_err(kbdev->dev, "Failed to set clock %d kHz\n",
+				GPU_FREQ_KHZ_MAX);
+		goto platform_init_err;
+	}
+
+	err = clk_set_parent(mfg->clk_mux, mfg->clk_main_parent);
+	if (err) {
+		dev_err(kbdev->dev, "Failed to select main clock src\n");
+		goto platform_init_err;
+	}
+
+	return 0;
+
+platform_init_err:
+	kfree(mfg);
+	return err;
+}
+
+static void platform_term(struct kbase_device *kbdev)
+{
+	struct mfg_base *mfg = kbdev->platform_context;
+
+	kfree(mfg);
+	kbdev->platform_context = NULL;
+	pm_runtime_disable(kbdev->dev);
+}
+
+struct kbase_platform_funcs_conf platform_funcs = {
+	.platform_init_func = platform_init,
+	.platform_term_func = platform_term
+};
+
+static int __init mtk_mfg_corex(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&mtk_gpu_corex_driver);
+	if (ret != 0)
+		pr_debug("%s: Failed to register GPU core driver", __func__);
+
+	return ret;
+}
+
+subsys_initcall(mtk_mfg_corex);
diff --git a/drivers/gpu/arm/midgard/platform/rk/Kbuild b/drivers/gpu/arm/midgard/platform/rk/Kbuild
new file mode 100644
index 0000000..56b6c01
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/rk/Kbuild
@@ -0,0 +1,14 @@
+#
+# (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+
+ifeq ($(CONFIG_MALI_MIDGARD),y)
+	obj-y += platform/rk/mali_kbase_config_rk.o
+else ifeq ($(CONFIG_MALI_MIDGARD),m)
+	SRC += platform/rk/mali_kbase_config_rk.c
+endif
diff --git a/drivers/gpu/arm/midgard/platform/rk/mali_kbase_config_platform.h b/drivers/gpu/arm/midgard/platform/rk/mali_kbase_config_platform.h
new file mode 100644
index 0000000..551b598
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/rk/mali_kbase_config_platform.h
@@ -0,0 +1,93 @@
+/*
+ * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ */
+
+/**
+ * @file mali_kbase_config_platform.h
+ * That define the RK platform config.
+ */
+
+/**
+ * Maximum frequency GPU will be clocked at.
+ * Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MAX (500000)
+
+/**
+ * Minimum frequency GPU will be clocked at.
+ * Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MIN (500000)
+
+/**
+ * CPU_SPEED_FUNC
+ * - A pointer to a function that calculates the CPU clock
+ *
+ * CPU clock speed of the platform is in MHz
+ * - see kbase_cpu_clk_speed_func for the function prototype.
+ *
+ * Attached value: A kbase_cpu_clk_speed_func.
+ * Default Value:  NA
+ */
+#define CPU_SPEED_FUNC (NULL)
+
+/**
+ * GPU_SPEED_FUNC
+ * - A pointer to a function that calculates the GPU clock
+ *
+ * GPU clock speed of the platform in MHz
+ * - see kbase_gpu_clk_speed_func for the function prototype.
+ *
+ * Attached value: A kbase_gpu_clk_speed_func.
+ * Default Value:  NA
+ */
+#define GPU_SPEED_FUNC (NULL)
+
+/**
+ * Power management configuration
+ *
+ * Attached value:
+ *	pointer to @ref kbase_pm_callback_conf
+ * Default value:
+ *	See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+extern struct kbase_pm_callback_conf pm_callbacks;
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value:
+ *	pointer to @ref kbase_platform_funcs_conf
+ * Default value:
+ *	See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (&platform_funcs)
+extern struct kbase_platform_funcs_conf platform_funcs;
+
+/**
+ * Secure mode switch
+ *
+ * Attached value: pointer to @ref kbase_secure_ops
+ */
+#define SECURE_CALLBACKS (NULL)
+
+/*
+ * Both rk3288 and rk3399 get really unhappy unless you do this.
+ * See https://crrev.com/c/1325749 for some context.
+ */
+#define PLATFORM_POWER_DOWN_ONLY (1)
+
diff --git a/drivers/gpu/arm/midgard/platform/rk/mali_kbase_config_rk.c b/drivers/gpu/arm/midgard/platform/rk/mali_kbase_config_rk.c
new file mode 100644
index 0000000..bb46018
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/rk/mali_kbase_config_rk.c
@@ -0,0 +1,272 @@
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config.h>
+
+#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
+
+#include "mali_kbase_rk.h"
+
+static int kbase_pm_notifier(struct notifier_block *nb, unsigned long action,
+		void *data)
+{
+	struct rk_context *platform = container_of(nb, struct rk_context, pm_nb);
+	struct device *dev = platform->kbdev->dev;
+
+	switch (action) {
+	case PM_SUSPEND_PREPARE:
+		return pm_runtime_get_sync(dev);
+	case PM_POST_SUSPEND:
+		return pm_runtime_put(dev);
+	}
+
+	return 0;
+}
+
+static int kbase_platform_rk_init(struct kbase_device *kbdev)
+{
+	struct rk_context *platform;
+
+	platform = kzalloc(sizeof(*platform), GFP_KERNEL);
+	if (!platform)
+		return -ENOMEM;
+
+	platform->is_powered = false;
+
+	kbdev->platform_context = platform;
+	platform->kbdev = kbdev;
+
+	return 0;
+}
+
+static void kbase_platform_rk_term(struct kbase_device *kbdev)
+{
+	struct rk_context *platform = kbdev->platform_context;
+
+	kfree(platform);
+}
+
+struct kbase_platform_funcs_conf platform_funcs = {
+	.platform_init_func = &kbase_platform_rk_init,
+	.platform_term_func = &kbase_platform_rk_term,
+};
+
+/* TODO */
+static int rk_pm_callback_runtime_on(struct kbase_device *kbdev)
+{
+	return 0;
+}
+
+static void rk_pm_callback_runtime_off(struct kbase_device *kbdev)
+{
+}
+
+static int rk_pm_enable_regulator(struct kbase_device *kbdev)
+{
+	int error;
+	int i;
+
+	dev_dbg(kbdev->dev, "Enabling regulator.");
+
+	for (i = 0; i < kbdev->regulator_num; i++) {
+		error = regulator_enable(kbdev->regulator[i]);
+		if (error < 0) {
+			dev_err(kbdev->dev,
+				"Power on reg %d failed error = %d\n",
+				i, error);
+			return error;
+		}
+	}
+
+	return 0;
+}
+
+static void rk_pm_disable_regulator(struct kbase_device *kbdev)
+{
+	int error;
+	int i;
+
+	for (i = 0; i < kbdev->regulator_num; i++) {
+		error = regulator_disable(kbdev->regulator[i]);
+		if (error < 0) {
+			dev_err(kbdev->dev,
+				"Power off reg %d failed error = %d\n",
+				i, error);
+		}
+	}
+}
+
+static int rk_pm_enable_clk(struct kbase_device *kbdev)
+{
+	int ret = 0;
+
+	if (!(kbdev->clock)) {
+		dev_dbg(kbdev->dev, "Continuing without Mali clock control\n");
+		/* Allow probe to continue without clock. */
+	} else {
+		ret = clk_prepare_enable(kbdev->clock);
+		if (ret)
+			dev_err(kbdev->dev, "failed to enable clk: %d\n", ret);
+	}
+
+	return ret;
+}
+
+static void rk_pm_disable_clk(struct kbase_device *kbdev)
+{
+	if (!(kbdev->clock))
+		dev_dbg(kbdev->dev, "Continuing without Mali clock control\n");
+		/* Allow probe to continue without clock. */
+	else
+		clk_disable_unprepare(kbdev->clock);
+}
+
+static int rk_pm_callback_power_on(struct kbase_device *kbdev)
+{
+	int ret = 1; /* Assume GPU has been powered off */
+	int err = 0;
+	struct rk_context *platform;
+
+	platform = kbdev->platform_context;
+	if (platform->is_powered) {
+		dev_dbg(kbdev->dev, "mali_device is already powered\n");
+		return 0;
+	}
+
+	dev_dbg(kbdev->dev, "powering on.");
+
+	/* we must enable vdd_gpu before pd_gpu_in_chip. */
+	err = rk_pm_enable_regulator(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "fail to enable regulator, err=%d\n", err);
+		return err;
+	}
+
+	err = pm_runtime_get_sync(kbdev->dev);
+	if (err < 0 && err != -EACCES) {
+		dev_err(kbdev->dev,
+			"failed to runtime resume device: %d\n", err);
+		pm_runtime_put_sync(kbdev->dev);
+		rk_pm_disable_regulator(kbdev);
+		return err;
+	} else if (err == 1) {
+		/*
+		 * Let core know that the chip has not been
+		 * powered off, so we can save on re-initialization.
+		 */
+		ret = 0;
+	}
+
+	err = rk_pm_enable_clk(kbdev); /* clk is not relative to pd */
+	if (err) {
+		dev_err(kbdev->dev, "failed to enable clk: %d\n", err);
+		pm_runtime_put_sync(kbdev->dev);
+		rk_pm_disable_clk(kbdev);
+		return err;
+	}
+
+	platform->is_powered = true;
+
+	return ret;
+}
+
+static void rk_pm_callback_power_off(struct kbase_device *kbdev)
+{
+	struct rk_context *platform = kbdev->platform_context;
+
+	if (!platform->is_powered) {
+		dev_dbg(kbdev->dev, "mali_dev is already powered off\n");
+		return;
+	}
+
+	dev_dbg(kbdev->dev, "powering off\n");
+
+	platform->is_powered = false;
+
+	rk_pm_disable_clk(kbdev);
+
+	pm_runtime_mark_last_busy(kbdev->dev);
+	pm_runtime_put_autosuspend(kbdev->dev);
+
+	rk_pm_disable_regulator(kbdev);
+}
+
+int rk_kbase_device_runtime_init(struct kbase_device *kbdev)
+{
+	struct rk_context *platform = kbdev->platform_context;
+	int err;
+
+	pm_runtime_set_autosuspend_delay(kbdev->dev, 200);
+	pm_runtime_use_autosuspend(kbdev->dev);
+
+	platform->pm_nb.notifier_call = kbase_pm_notifier;
+	platform->pm_nb.priority = 0;
+	err = register_pm_notifier(&platform->pm_nb);
+	if (err) {
+		dev_err(kbdev->dev, "Couldn't register pm notifier\n");
+		return -ENODEV;
+	}
+
+	/* no need to call pm_runtime_set_active here. */
+	pm_runtime_enable(kbdev->dev);
+
+	return 0;
+}
+
+void rk_kbase_device_runtime_disable(struct kbase_device *kbdev)
+{
+	struct rk_context *platform = kbdev->platform_context;
+
+	pm_runtime_disable(kbdev->dev);
+	unregister_pm_notifier(&platform->pm_nb);
+}
+
+static void rk_pm_suspend_callback(struct kbase_device *kbdev)
+{
+	/*
+	 * Depending on power policy, the GPU might not be powered off at this
+	 * point. We have to call rk_pm_callback_power_off() here.
+	 */
+	rk_pm_callback_power_off(kbdev);
+}
+
+static void rk_pm_resume_callback(struct kbase_device *kbdev)
+{
+	/*
+	 * Core will call rk_pm_enable_regulator() itself before attempting
+	 * to access the GPU, so no need to do it here.
+	 */
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+	.power_on_callback = rk_pm_callback_power_on,
+	.power_off_callback = rk_pm_callback_power_off,
+	.power_suspend_callback = rk_pm_suspend_callback,
+	.power_resume_callback = rk_pm_resume_callback,
+#ifdef CONFIG_PM
+	.power_runtime_init_callback = rk_kbase_device_runtime_init,
+	.power_runtime_term_callback = rk_kbase_device_runtime_disable,
+	.power_runtime_on_callback = rk_pm_callback_runtime_on,
+	.power_runtime_off_callback = rk_pm_callback_runtime_off,
+#else				/* CONFIG_PM */
+	.power_runtime_init_callback = NULL,
+	.power_runtime_term_callback = NULL,
+	.power_runtime_on_callback = NULL,
+	.power_runtime_off_callback = NULL,
+#endif				/* CONFIG_PM */
+};
+
+int kbase_platform_early_init(void)
+{
+	/* Nothing needed at this stage */
+	return 0;
+}
diff --git a/drivers/gpu/arm/midgard/platform/rk/mali_kbase_rk.h b/drivers/gpu/arm/midgard/platform/rk/mali_kbase_rk.h
new file mode 100644
index 0000000..1e59118
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/rk/mali_kbase_rk.h
@@ -0,0 +1,37 @@
+/* drivers/gpu/t6xx/kbase/src/platform/rk/mali_kbase_platform.h
+ * Rockchip SoC Mali-Midgard platform-dependent codes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file mali_kbase_rk.h
+ *
+ * defines work_context type of platform_dependent_part.
+ */
+
+#ifndef _MALI_KBASE_RK_H_
+#define _MALI_KBASE_RK_H_
+
+#include <linux/notifier.h>
+
+#include <mali_kbase.h>
+
+/*---------------------------------------------------------------------------*/
+
+/**
+ * struct rk_context - work_context of platform_dependent_part_of_rk.
+ * @is_powered: record the status
+ *      of common_parts calling 'power_on_callback' and 'power_off_callback'.
+ */
+struct rk_context {
+	struct kbase_device *kbdev;
+	/* Notifier block to runtime resume the kbase_device on suspend. */
+	struct notifier_block pm_nb;
+	bool is_powered;
+};
+
+#endif				/* _MALI_KBASE_RK_H_ */
+
diff --git a/drivers/gpu/arm/midgard/platform/vexpress/Kbuild b/drivers/gpu/arm/midgard/platform/vexpress/Kbuild
new file mode 100644
index 0000000..6780e4c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/vexpress/Kbuild
@@ -0,0 +1,24 @@
+#
+# (C) COPYRIGHT 2012-2013, 2016-2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+mali_kbase-y += \
+	$(MALI_PLATFORM_DIR)/mali_kbase_config_vexpress.o \
+	mali_kbase_platform_fake.o
diff --git a/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_platform.h b/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_platform.h
new file mode 100644
index 0000000..fac3cd5
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_platform.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Power management configuration
+ *
+ * Attached value: pointer to @ref kbase_pm_callback_conf
+ * Default value: See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value: pointer to @ref kbase_platform_funcs_conf
+ * Default value: See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (NULL)
+
+extern struct kbase_pm_callback_conf pm_callbacks;
diff --git a/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_vexpress.c b/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_vexpress.c
new file mode 100644
index 0000000..d165ce2
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_vexpress.c
@@ -0,0 +1,69 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#include <linux/ioport.h>
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config.h>
+#include "mali_kbase_config_platform.h"
+
+#ifndef CONFIG_OF
+static struct kbase_io_resources io_resources = {
+	.job_irq_number = 68,
+	.mmu_irq_number = 69,
+	.gpu_irq_number = 70,
+	.io_memory_region = {
+	.start = 0xFC010000,
+	.end = 0xFC010000 + (4096 * 4) - 1
+	}
+};
+#endif /* CONFIG_OF */
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+	/* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */
+	return 1;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+	.power_on_callback = pm_callback_power_on,
+	.power_off_callback = pm_callback_power_off,
+	.power_suspend_callback  = NULL,
+	.power_resume_callback = NULL
+};
+
+static struct kbase_platform_config versatile_platform_config = {
+#ifndef CONFIG_OF
+	.io_resources = &io_resources
+#endif
+};
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+	return &versatile_platform_config;
+}
diff --git a/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/Kbuild b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/Kbuild
new file mode 100644
index 0000000..51b408e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/Kbuild
@@ -0,0 +1,24 @@
+#
+# (C) COPYRIGHT 2013-2014, 2016-2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+mali_kbase-y += \
+	$(MALI_PLATFORM_DIR)/mali_kbase_config_vexpress.o \
+	mali_kbase_platform_fake.o
diff --git a/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h
new file mode 100644
index 0000000..fac3cd5
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Power management configuration
+ *
+ * Attached value: pointer to @ref kbase_pm_callback_conf
+ * Default value: See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value: pointer to @ref kbase_platform_funcs_conf
+ * Default value: See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (NULL)
+
+extern struct kbase_pm_callback_conf pm_callbacks;
diff --git a/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c
new file mode 100644
index 0000000..efca0a5
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c
@@ -0,0 +1,65 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2014, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/ioport.h>
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config.h>
+
+#ifndef CONFIG_OF
+static struct kbase_io_resources io_resources = {
+	.job_irq_number = 68,
+	.mmu_irq_number = 69,
+	.gpu_irq_number = 70,
+	.io_memory_region = {
+			     .start = 0x2f010000,
+			     .end = 0x2f010000 + (4096 * 4) - 1}
+};
+#endif
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+	/* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */
+	return 1;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+	.power_on_callback = pm_callback_power_on,
+	.power_off_callback = pm_callback_power_off,
+	.power_suspend_callback  = NULL,
+	.power_resume_callback = NULL
+};
+
+static struct kbase_platform_config versatile_platform_config = {
+#ifndef CONFIG_OF
+	.io_resources = &io_resources
+#endif
+};
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+	return &versatile_platform_config;
+}
diff --git a/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/Kbuild b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/Kbuild
new file mode 100644
index 0000000..e07709c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/Kbuild
@@ -0,0 +1,25 @@
+#
+# (C) COPYRIGHT 2012-2013, 2016-2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+mali_kbase-y += \
+	$(MALI_PLATFORM_DIR)/mali_kbase_config_vexpress.o \
+	$(MALI_PLATFORM_DIR)/mali_kbase_cpu_vexpress.o \
+	mali_kbase_platform_fake.o
diff --git a/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h
new file mode 100644
index 0000000..fac3cd5
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Power management configuration
+ *
+ * Attached value: pointer to @ref kbase_pm_callback_conf
+ * Default value: See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value: pointer to @ref kbase_platform_funcs_conf
+ * Default value: See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (NULL)
+
+extern struct kbase_pm_callback_conf pm_callbacks;
diff --git a/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c
new file mode 100644
index 0000000..b6714b9
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c
@@ -0,0 +1,67 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2014, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#include <linux/ioport.h>
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config.h>
+
+#ifndef CONFIG_OF
+static struct kbase_io_resources io_resources = {
+	.job_irq_number = 75,
+	.mmu_irq_number = 76,
+	.gpu_irq_number = 77,
+	.io_memory_region = {
+			     .start = 0x2F000000,
+			     .end = 0x2F000000 + (4096 * 4) - 1}
+};
+#endif
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+	/* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */
+	return 1;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+	.power_on_callback = pm_callback_power_on,
+	.power_off_callback = pm_callback_power_off,
+	.power_suspend_callback  = NULL,
+	.power_resume_callback = NULL
+};
+
+static struct kbase_platform_config versatile_platform_config = {
+#ifndef CONFIG_OF
+	.io_resources = &io_resources
+#endif
+};
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+	return &versatile_platform_config;
+}
diff --git a/drivers/gpu/arm/midgard/platform_dummy/mali_ukk_os.h b/drivers/gpu/arm/midgard/platform_dummy/mali_ukk_os.h
new file mode 100644
index 0000000..ef1ec70
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform_dummy/mali_ukk_os.h
@@ -0,0 +1,58 @@
+/*
+ *
+ * (C) COPYRIGHT 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_ukk_os.h
+ * Types and definitions that are common for Linux OSs for the kernel side of the
+ * User-Kernel interface.
+ */
+
+#ifndef _UKK_OS_H_ /* Linux version */
+#define _UKK_OS_H_
+
+#include <linux/fs.h>
+
+/**
+ * @addtogroup uk_api User-Kernel Interface API
+ * @{
+ */
+
+/**
+ * @addtogroup uk_api_kernel UKK (Kernel side)
+ * @{
+ */
+
+/**
+ * Internal OS specific data structure associated with each UKK session. Part
+ * of a ukk_session object.
+ */
+typedef struct ukkp_session {
+	int dummy;     /**< No internal OS specific data at this time */
+} ukkp_session;
+
+/** @} end group uk_api_kernel */
+
+/** @} end group uk_api */
+
+#endif /* _UKK_OS_H__ */
diff --git a/drivers/gpu/arm/midgard/protected_mode_switcher.h b/drivers/gpu/arm/midgard/protected_mode_switcher.h
new file mode 100644
index 0000000..8778d81
--- /dev/null
+++ b/drivers/gpu/arm/midgard/protected_mode_switcher.h
@@ -0,0 +1,69 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _PROTECTED_MODE_SWITCH_H_
+#define _PROTECTED_MODE_SWITCH_H_
+
+struct protected_mode_device;
+
+/**
+ * struct protected_mode_ops - Callbacks for protected mode switch operations
+ *
+ * @protected_mode_enable:  Callback to enable protected mode for device
+ * @protected_mode_disable: Callback to disable protected mode for device
+ */
+struct protected_mode_ops {
+	/**
+	 * protected_mode_enable() - Enable protected mode on device
+	 * @dev:	The struct device
+	 *
+	 * Return: 0 on success, non-zero on error
+	 */
+	int (*protected_mode_enable)(
+			struct protected_mode_device *protected_dev);
+
+	/**
+	 * protected_mode_disable() - Disable protected mode on device, and
+	 *                            reset device
+	 * @dev:	The struct device
+	 *
+	 * Return: 0 on success, non-zero on error
+	 */
+	int (*protected_mode_disable)(
+			struct protected_mode_device *protected_dev);
+};
+
+/**
+ * struct protected_mode_device - Device structure for protected mode devices
+ *
+ * @ops  - Callbacks associated with this device
+ * @data - Pointer to device private data
+ *
+ * This structure should be registered with the platform device using
+ * platform_set_drvdata().
+ */
+struct protected_mode_device {
+	struct protected_mode_ops ops;
+	void *data;
+};
+
+#endif /* _PROTECTED_MODE_SWITCH_H_ */
diff --git a/drivers/gpu/arm/midgard/sconscript b/drivers/gpu/arm/midgard/sconscript
new file mode 100644
index 0000000..01c7589
--- /dev/null
+++ b/drivers/gpu/arm/midgard/sconscript
@@ -0,0 +1,67 @@
+#
+# (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+import sys
+Import('env')
+
+SConscript( 'tests/sconscript' )
+
+mock_test = 0
+
+# Source files required for kbase.
+kbase_src = [
+	Glob('*.c'),
+	Glob('backend/*/*.c'),
+	Glob('internal/*/*.c'),
+	Glob('ipa/*.c'),
+	Glob('platform/%s/*.c' % env['platform_config']),
+	Glob('thirdparty/*.c'),
+]
+
+if env['platform_config']=='juno_soc':
+	kbase_src += [Glob('platform/devicetree/*.c')]
+else:
+	kbase_src += [Glob('platform/%s/*.c' % env['platform_config'])]
+
+if Glob('#kernel/drivers/gpu/arm/midgard/tests/internal/src/mock') and env['unit'] == '1':
+	kbase_src += [Glob('#kernel/drivers/gpu/arm/midgard/tests/internal/src/mock/*.c')]
+	mock_test = 1
+
+make_args = env.kernel_get_config_defines(ret_list = True) + [
+	'PLATFORM=%s' % env['platform'],
+	'MALI_KERNEL_TEST_API=%s' % env['debug'],
+	'MALI_UNIT_TEST=%s' % env['unit'],
+	'MALI_RELEASE_NAME=%s' % env['mali_release_name'],
+	'MALI_MOCK_TEST=%s' % mock_test,
+	'MALI_CUSTOMER_RELEASE=%s' % env['release'],
+	'MALI_USE_CSF=%s' % env['csf'],
+	'MALI_COVERAGE=%s' % env['coverage'],
+]
+
+kbase = env.BuildKernelModule('$STATIC_LIB_PATH/mali_kbase.ko', kbase_src,
+                              make_args = make_args)
+
+if 'smc_protected_mode_switcher' in env:
+	env.Depends('$STATIC_LIB_PATH/mali_kbase.ko', '$STATIC_LIB_PATH/smc_protected_mode_switcher.ko')
+
+env.KernelObjTarget('kbase', kbase)
+
+env.AppendUnique(BASE=['cutils_linked_list'])
diff --git a/drivers/gpu/arm/midgard/tests/Kbuild b/drivers/gpu/arm/midgard/tests/Kbuild
new file mode 100644
index 0000000..df16a77
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/Kbuild
@@ -0,0 +1,23 @@
+#
+# (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+obj-$(CONFIG_MALI_KUTF) += kutf/
+obj-$(CONFIG_MALI_IRQ_LATENCY) += mali_kutf_irq_test/
diff --git a/drivers/gpu/arm/midgard/tests/Kconfig b/drivers/gpu/arm/midgard/tests/Kconfig
new file mode 100644
index 0000000..fa91aea
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/Kconfig
@@ -0,0 +1,23 @@
+#
+# (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+source "drivers/gpu/arm/midgard/tests/kutf/Kconfig"
+source "drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kconfig"
diff --git a/drivers/gpu/arm/midgard/tests/Mconfig b/drivers/gpu/arm/midgard/tests/Mconfig
new file mode 100644
index 0000000..af4e383
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/Mconfig
@@ -0,0 +1,32 @@
+#
+# (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA  02110-1301, USA.
+#
+
+config UNIT_TEST_KERNEL_MODULES
+	bool
+	default y if UNIT_TEST_CODE && BUILD_KERNEL_MODULES
+	default n
+
+config BUILD_IPA_TESTS
+	bool
+	default y if UNIT_TEST_KERNEL_MODULES && MALI_DEVFREQ
+	default n
+
+config BUILD_IPA_UNIT_TESTS
+	bool
+	default y if NO_MALI && BUILD_IPA_TESTS
+	default n
+
+config BUILD_CSF_TESTS
+	bool
+	default y if UNIT_TEST_KERNEL_MODULES && GPU_HAS_CSF
+	default n
diff --git a/drivers/gpu/arm/midgard/tests/build.bp b/drivers/gpu/arm/midgard/tests/build.bp
new file mode 100644
index 0000000..3107062
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/build.bp
@@ -0,0 +1,36 @@
+/*
+ * Copyright:
+ * ----------------------------------------------------------------------------
+ * This confidential and proprietary software may be used only as authorized
+ * by a licensing agreement from ARM Limited.
+ *      (C) COPYRIGHT 2018 ARM Limited, ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorized copies and
+ * copies may only be made to the extent permitted by a licensing agreement
+ * from ARM Limited.
+ * ----------------------------------------------------------------------------
+ */
+
+bob_defaults {
+    name: "kernel_test_module_defaults",
+    defaults: ["mali_kbase_shared_config_defaults"],
+    include_dirs: [
+        "kernel/drivers/gpu/arm",
+        "kernel/drivers/gpu/arm/midgard",
+        "kernel/drivers/gpu/arm/midgard/backend/gpu",
+        "kernel/drivers/gpu/arm/midgard/tests/include",
+    ],
+}
+
+subdirs = [
+    "kutf",
+    "mali_kutf_irq_test",
+]
+
+optional_subdirs = [
+    "kutf_test",
+    "kutf_test_runner",
+    "mali_kutf_ipa_test",
+    "mali_kutf_ipa_unit_test",
+    "mali_kutf_vinstr_test",
+    "mali_kutf_fw_test",
+]
diff --git a/drivers/gpu/arm/midgard/tests/include/kutf/kutf_helpers.h b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_helpers.h
new file mode 100644
index 0000000..15e168c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_helpers.h
@@ -0,0 +1,77 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KERNEL_UTF_HELPERS_H_
+#define _KERNEL_UTF_HELPERS_H_
+
+/* kutf_helpers.h
+ * Test helper functions for the kernel UTF test infrastructure.
+ *
+ * These functions provide methods for enqueuing/dequeuing lines of text sent
+ * by user space. They are used to implement the transfer of "userdata" from
+ * user space to kernel.
+ */
+
+#include <kutf/kutf_suite.h>
+
+/**
+ * kutf_helper_input_dequeue() - Dequeue a line sent by user space
+ * @context:    KUTF context
+ * @str_size:   Pointer to an integer to receive the size of the string
+ *
+ * If no line is available then this function will wait (interruptibly) until
+ * a line is available.
+ *
+ * Return: The line dequeued, ERR_PTR(-EINTR) if interrupted or NULL on end
+ * of data.
+ */
+char *kutf_helper_input_dequeue(struct kutf_context *context, size_t *str_size);
+
+/**
+ * kutf_helper_input_enqueue() - Enqueue a line sent by user space
+ * @context:   KUTF context
+ * @str:       The user space address of the line
+ * @size:      The length in bytes of the string
+ *
+ * This function will use copy_from_user to copy the string out of user space.
+ * The string need not be NULL-terminated (@size should not include the NULL
+ * termination).
+ *
+ * As a special case @str==NULL and @size==0 is valid to mark the end of input,
+ * but callers should use kutf_helper_input_enqueue_end_of_data() instead.
+ *
+ * Return: 0 on success, -EFAULT if the line cannot be copied from user space,
+ * -ENOMEM if out of memory.
+ */
+int kutf_helper_input_enqueue(struct kutf_context *context,
+		const char __user *str, size_t size);
+
+/**
+ * kutf_helper_input_enqueue_end_of_data() - Signal no more data is to be sent
+ * @context:    KUTF context
+ *
+ * After this function has been called, kutf_helper_input_dequeue() will always
+ * return NULL.
+ */
+void kutf_helper_input_enqueue_end_of_data(struct kutf_context *context);
+
+#endif	/* _KERNEL_UTF_HELPERS_H_ */
diff --git a/drivers/gpu/arm/midgard/tests/include/kutf/kutf_helpers_user.h b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_helpers_user.h
new file mode 100644
index 0000000..3b1300e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_helpers_user.h
@@ -0,0 +1,179 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KERNEL_UTF_HELPERS_USER_H_
+#define _KERNEL_UTF_HELPERS_USER_H_
+
+/* kutf_helpers.h
+ * Test helper functions for the kernel UTF test infrastructure, whose
+ * implementation mirrors that of similar functions for kutf-userside
+ */
+
+#include <kutf/kutf_suite.h>
+#include <kutf/kutf_helpers.h>
+
+
+#define KUTF_HELPER_MAX_VAL_NAME_LEN 255
+
+enum kutf_helper_valtype {
+	KUTF_HELPER_VALTYPE_INVALID,
+	KUTF_HELPER_VALTYPE_U64,
+	KUTF_HELPER_VALTYPE_STR,
+
+	KUTF_HELPER_VALTYPE_COUNT /* Must be last */
+};
+
+struct kutf_helper_named_val {
+	enum kutf_helper_valtype type;
+	char *val_name;
+	union {
+		u64 val_u64;
+		char *val_str;
+	} u;
+};
+
+/* Extra error values for certain helpers when we want to distinguish between
+ * Linux's own error values too.
+ *
+ * These can only be used on certain functions returning an int type that are
+ * documented as returning one of these potential values, they cannot be used
+ * from functions return a ptr type, since we can't decode it with PTR_ERR
+ *
+ * No negative values are used - Linux error codes should be used instead, and
+ * indicate a problem in accessing the data file itself (are generally
+ * unrecoverable)
+ *
+ * Positive values indicate correct access but invalid parsing (can be
+ * recovered from assuming data in the future is correct) */
+enum kutf_helper_err {
+	/* No error - must be zero */
+	KUTF_HELPER_ERR_NONE = 0,
+	/* Named value parsing encountered an invalid name */
+	KUTF_HELPER_ERR_INVALID_NAME,
+	/* Named value parsing of string or u64 type encountered extra
+	 * characters after the value (after the last digit for a u64 type or
+	 * after the string end delimiter for string type) */
+	KUTF_HELPER_ERR_CHARS_AFTER_VAL,
+	/* Named value parsing of string type couldn't find the string end
+	 * delimiter.
+	 *
+	 * This cannot be encountered when the NAME="value" message exceeds the
+	 * textbuf's maximum line length, because such messages are not checked
+	 * for an end string delimiter */
+	KUTF_HELPER_ERR_NO_END_DELIMITER,
+	/* Named value didn't parse as any of the known types */
+	KUTF_HELPER_ERR_INVALID_VALUE,
+};
+
+
+/* Send named NAME=value pair, u64 value
+ *
+ * NAME must match [A-Z0-9_]\+ and can be up to MAX_VAL_NAME_LEN characters long
+ *
+ * Any failure will be logged on the suite's current test fixture
+ *
+ * Returns 0 on success, non-zero on failure
+ */
+int kutf_helper_send_named_u64(struct kutf_context *context,
+		const char *val_name, u64 val);
+
+/* Get the maximum length of a string that can be represented as a particular
+ * NAME="value" pair without string-value truncation in the kernel's buffer
+ *
+ * Given val_name and the kernel buffer's size, this can be used to determine
+ * the maximum length of a string that can be sent as val_name="value" pair
+ * without having the string value truncated. Any string longer than this will
+ * be truncated at some point during communication to this size.
+ *
+ * It is assumed that val_name is a valid name for
+ * kutf_helper_send_named_str(), and no checking will be made to
+ * ensure this.
+ *
+ * Returns the maximum string length that can be represented, or a negative
+ * value if the NAME="value" encoding itself wouldn't fit in kern_buf_sz
+ */
+int kutf_helper_max_str_len_for_kern(const char *val_name, int kern_buf_sz);
+
+/* Send named NAME="str" pair
+ *
+ * no escaping allowed in str. Any of the following characters will terminate
+ * the string: '"' '\\' '\n'
+ *
+ * NAME must match [A-Z0-9_]\+ and can be up to MAX_VAL_NAME_LEN characters long
+ *
+ * Any failure will be logged on the suite's current test fixture
+ *
+ * Returns 0 on success, non-zero on failure */
+int kutf_helper_send_named_str(struct kutf_context *context,
+		const char *val_name, const char *val_str);
+
+/* Receive named NAME=value pair
+ *
+ * This can receive u64 and string values - check named_val->type
+ *
+ * If you are not planning on dynamic handling of the named value's name and
+ * type, then kutf_helper_receive_check_val() is more useful as a
+ * convenience function.
+ *
+ * String members of named_val will come from memory allocated on the fixture's mempool
+ *
+ * Returns 0 on success. Negative value on failure to receive from the 'run'
+ * file, positive value indicates an enum kutf_helper_err value for correct
+ * reception of data but invalid parsing */
+int kutf_helper_receive_named_val(
+		struct kutf_context *context,
+		struct kutf_helper_named_val *named_val);
+
+/* Receive and validate NAME=value pair
+ *
+ * As with kutf_helper_receive_named_val, but validate that the
+ * name and type are as expected, as a convenience for a common pattern found
+ * in tests.
+ *
+ * NOTE: this only returns an error value if there was actually a problem
+ * receiving data.
+ *
+ * NOTE: If the underlying data was received correctly, but:
+ * - isn't of the expected name
+ * - isn't the expected type
+ * - isn't correctly parsed for the type
+ * then the following happens:
+ * - failure result is recorded
+ * - named_val->type will be KUTF_HELPER_VALTYPE_INVALID
+ * - named_val->u will contain some default value that should be relatively
+ *   harmless for the test, including being writable in the case of string
+ *   values
+ * - return value will be 0 to indicate success
+ *
+ * The rationale behind this is that we'd prefer to continue the rest of the
+ * test with failures propagated, rather than hitting a timeout */
+int kutf_helper_receive_check_val(
+		struct kutf_helper_named_val *named_val,
+		struct kutf_context *context,
+		const char *expect_val_name,
+		enum kutf_helper_valtype expect_val_type);
+
+/* Output a named value to kmsg */
+void kutf_helper_output_named_val(struct kutf_helper_named_val *named_val);
+
+
+#endif	/* _KERNEL_UTF_HELPERS_USER_H_ */
diff --git a/drivers/gpu/arm/midgard/tests/include/kutf/kutf_mem.h b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_mem.h
new file mode 100644
index 0000000..988559d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_mem.h
@@ -0,0 +1,73 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KERNEL_UTF_MEM_H_
+#define _KERNEL_UTF_MEM_H_
+
+/* kutf_mem.h
+ * Functions for management of memory pools in the kernel.
+ *
+ * This module implements a memory pool allocator, allowing a test
+ * implementation to allocate linked allocations which can then be freed by a
+ * single free which releases all of the resources held by the entire pool.
+ *
+ * Note that it is not possible to free single resources within the pool once
+ * allocated.
+ */
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+/**
+ * struct kutf_mempool - the memory pool context management structure
+ * @head:	list head on which the allocations in this context are added to
+ * @lock:	mutex for concurrent allocation from multiple threads
+ *
+ */
+struct kutf_mempool {
+	struct list_head head;
+	struct mutex lock;
+};
+
+/**
+ * kutf_mempool_init() - Initialize a memory pool.
+ * @pool:	Memory pool structure to initialize, provided by the user
+ *
+ * Return:	zero on success
+ */
+int kutf_mempool_init(struct kutf_mempool *pool);
+
+/**
+ * kutf_mempool_alloc() - Allocate memory from a pool
+ * @pool:	Memory pool to allocate from
+ * @size:	Size of memory wanted in number of bytes
+ *
+ * Return:	Pointer to memory on success, NULL on failure.
+ */
+void *kutf_mempool_alloc(struct kutf_mempool *pool, size_t size);
+
+/**
+ * kutf_mempool_destroy() - Destroy a memory pool, freeing all memory within it.
+ * @pool:	The memory pool to free
+ */
+void kutf_mempool_destroy(struct kutf_mempool *pool);
+#endif	/* _KERNEL_UTF_MEM_H_ */
diff --git a/drivers/gpu/arm/midgard/tests/include/kutf/kutf_resultset.h b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_resultset.h
new file mode 100644
index 0000000..49ebeb4
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_resultset.h
@@ -0,0 +1,181 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KERNEL_UTF_RESULTSET_H_
+#define _KERNEL_UTF_RESULTSET_H_
+
+/* kutf_resultset.h
+ * Functions and structures for handling test results and result sets.
+ *
+ * This section of the kernel UTF contains structures and functions used for the
+ * management of Results and Result Sets.
+ */
+
+/**
+ * enum kutf_result_status - Status values for a single Test error.
+ * @KUTF_RESULT_BENCHMARK:	Result is a meta-result containing benchmark
+ *                              results.
+ * @KUTF_RESULT_SKIP:		The test was skipped.
+ * @KUTF_RESULT_UNKNOWN:	The test has an unknown result.
+ * @KUTF_RESULT_PASS:		The test result passed.
+ * @KUTF_RESULT_DEBUG:		The test result passed, but raised a debug
+ *                              message.
+ * @KUTF_RESULT_INFO:		The test result passed, but raised
+ *                              an informative message.
+ * @KUTF_RESULT_WARN:		The test result passed, but raised a warning
+ *                              message.
+ * @KUTF_RESULT_FAIL:		The test result failed with a non-fatal error.
+ * @KUTF_RESULT_FATAL:		The test result failed with a fatal error.
+ * @KUTF_RESULT_ABORT:		The test result failed due to a non-UTF
+ *                              assertion failure.
+ * @KUTF_RESULT_USERDATA:	User data is ready to be read,
+ *                              this is not seen outside the kernel
+ * @KUTF_RESULT_USERDATA_WAIT:	Waiting for user data to be sent,
+ *                              this is not seen outside the kernel
+ * @KUTF_RESULT_TEST_FINISHED:	The test has finished, no more results will
+ *                              be produced. This is not seen outside kutf
+ */
+enum kutf_result_status {
+	KUTF_RESULT_BENCHMARK = -3,
+	KUTF_RESULT_SKIP    = -2,
+	KUTF_RESULT_UNKNOWN = -1,
+
+	KUTF_RESULT_PASS    = 0,
+	KUTF_RESULT_DEBUG   = 1,
+	KUTF_RESULT_INFO    = 2,
+	KUTF_RESULT_WARN    = 3,
+	KUTF_RESULT_FAIL    = 4,
+	KUTF_RESULT_FATAL   = 5,
+	KUTF_RESULT_ABORT   = 6,
+
+	KUTF_RESULT_USERDATA      = 7,
+	KUTF_RESULT_USERDATA_WAIT = 8,
+	KUTF_RESULT_TEST_FINISHED = 9
+};
+
+/* The maximum size of a kutf_result_status result when
+ * converted to a string
+ */
+#define KUTF_ERROR_MAX_NAME_SIZE 21
+
+#ifdef __KERNEL__
+
+#include <kutf/kutf_mem.h>
+#include <linux/wait.h>
+
+struct kutf_context;
+
+/**
+ * struct kutf_result - Represents a single test result.
+ * @node:	Next result in the list of results.
+ * @status:	The status summary (pass / warn / fail / etc).
+ * @message:	A more verbose status message.
+ */
+struct kutf_result {
+	struct list_head            node;
+	enum kutf_result_status     status;
+	const char                  *message;
+};
+
+/**
+ * KUTF_RESULT_SET_WAITING_FOR_INPUT - Test is waiting for user data
+ *
+ * This flag is set within a struct kutf_result_set whenever the test is blocked
+ * waiting for user data. Attempts to dequeue results when this flag is set
+ * will cause a dummy %KUTF_RESULT_USERDATA_WAIT result to be produced. This
+ * is used to output a warning message and end of file.
+ */
+#define KUTF_RESULT_SET_WAITING_FOR_INPUT 1
+
+/**
+ * struct kutf_result_set - Represents a set of results.
+ * @results:	List head of a struct kutf_result list for storing the results
+ * @waitq:	Wait queue signalled whenever new results are added.
+ * @flags:	Flags see %KUTF_RESULT_SET_WAITING_FOR_INPUT
+ */
+struct kutf_result_set {
+	struct list_head          results;
+	wait_queue_head_t         waitq;
+	int                       flags;
+};
+
+/**
+ * kutf_create_result_set() - Create a new result set
+ *                            to which results can be added.
+ *
+ * Return: The created result set.
+ */
+struct kutf_result_set *kutf_create_result_set(void);
+
+/**
+ * kutf_add_result() - Add a result to the end of an existing result set.
+ *
+ * @context:	The kutf context
+ * @status:	The result status to add.
+ * @message:	The result message to add.
+ *
+ * Return: 0 if the result is successfully added. -ENOMEM if allocation fails.
+ */
+int kutf_add_result(struct kutf_context *context,
+		enum kutf_result_status status, const char *message);
+
+/**
+ * kutf_remove_result() - Remove a result from the head of a result set.
+ * @set:	The result set.
+ *
+ * This function will block until there is a result to read. The wait is
+ * interruptible, so this function will return with an ERR_PTR if interrupted.
+ *
+ * Return: result or ERR_PTR if interrupted
+ */
+struct kutf_result *kutf_remove_result(
+		struct kutf_result_set *set);
+
+/**
+ * kutf_destroy_result_set() - Free a previously created result set.
+ *
+ * @results:	The result set whose resources to free.
+ */
+void kutf_destroy_result_set(struct kutf_result_set *results);
+
+/**
+ * kutf_set_waiting_for_input() - The test is waiting for userdata
+ *
+ * @set: The result set to update
+ *
+ * Causes the result set to always have results and return a fake
+ * %KUTF_RESULT_USERDATA_WAIT result.
+ */
+void kutf_set_waiting_for_input(struct kutf_result_set *set);
+
+/**
+ * kutf_clear_waiting_for_input() - The test is no longer waiting for userdata
+ *
+ * @set: The result set to update
+ *
+ * Cancels the effect of kutf_set_waiting_for_input()
+ */
+void kutf_clear_waiting_for_input(struct kutf_result_set *set);
+
+#endif	/* __KERNEL__ */
+
+#endif	/* _KERNEL_UTF_RESULTSET_H_ */
diff --git a/drivers/gpu/arm/midgard/tests/include/kutf/kutf_suite.h b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_suite.h
new file mode 100644
index 0000000..8d75f50
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_suite.h
@@ -0,0 +1,569 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KERNEL_UTF_SUITE_H_
+#define _KERNEL_UTF_SUITE_H_
+
+/* kutf_suite.h
+ * Functions for management of test suites.
+ *
+ * This collection of data structures, macros, and functions are used to
+ * create Test Suites, Tests within those Test Suites, and Fixture variants
+ * of each test.
+ */
+
+#include <linux/kref.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+
+#include <kutf/kutf_mem.h>
+#include <kutf/kutf_resultset.h>
+
+/* Arbitrary maximum size to prevent user space allocating too much kernel
+ * memory
+ */
+#define KUTF_MAX_LINE_LENGTH (1024u)
+
+/**
+ * Pseudo-flag indicating an absence of any specified test class. Note that
+ * tests should not be annotated with this constant as it is simply a zero
+ * value; tests without a more specific class must be marked with the flag
+ * KUTF_F_TEST_GENERIC.
+ */
+#define KUTF_F_TEST_NONE                ((unsigned int)(0))
+
+/**
+ * Class indicating this test is a smoke test.
+ * A given set of smoke tests should be quick to run, enabling rapid turn-around
+ * of "regress-on-commit" test runs.
+ */
+#define KUTF_F_TEST_SMOKETEST           ((unsigned int)(1 << 1))
+
+/**
+ * Class indicating this test is a performance test.
+ * These tests typically produce a performance metric, such as "time to run" or
+ * "frames per second",
+ */
+#define KUTF_F_TEST_PERFORMANCE         ((unsigned int)(1 << 2))
+
+/**
+ * Class indicating that this test is a deprecated test.
+ * These tests have typically been replaced by an alternative test which is
+ * more efficient, or has better coverage.
+ */
+#define KUTF_F_TEST_DEPRECATED          ((unsigned int)(1 << 3))
+
+/**
+ * Class indicating that this test is a known failure.
+ * These tests have typically been run and failed, but marking them as a known
+ * failure means it is easier to triage results.
+ *
+ * It is typically more convenient to triage known failures using the
+ * results database and web UI, as this means there is no need to modify the
+ * test code.
+ */
+#define KUTF_F_TEST_EXPECTED_FAILURE    ((unsigned int)(1 << 4))
+
+/**
+ * Class indicating that this test is a generic test, which is not a member of
+ * a more specific test class. Tests which are not created with a specific set
+ * of filter flags by the user are assigned this test class by default.
+ */
+#define KUTF_F_TEST_GENERIC             ((unsigned int)(1 << 5))
+
+/**
+ * Class indicating this test is a resource allocation failure test.
+ * A resource allocation failure test will test that an error code is
+ * correctly propagated when an allocation fails.
+ */
+#define KUTF_F_TEST_RESFAIL             ((unsigned int)(1 << 6))
+
+/**
+ * Additional flag indicating that this test is an expected failure when
+ * run in resource failure mode. These tests are never run when running
+ * the low resource mode.
+ */
+#define KUTF_F_TEST_EXPECTED_FAILURE_RF ((unsigned int)(1 << 7))
+
+/**
+ * Flag reserved for user-defined filter zero.
+ */
+#define KUTF_F_TEST_USER_0 ((unsigned int)(1 << 24))
+
+/**
+ * Flag reserved for user-defined filter one.
+ */
+#define KUTF_F_TEST_USER_1 ((unsigned int)(1 << 25))
+
+/**
+ * Flag reserved for user-defined filter two.
+ */
+#define KUTF_F_TEST_USER_2 ((unsigned int)(1 << 26))
+
+/**
+ * Flag reserved for user-defined filter three.
+ */
+#define KUTF_F_TEST_USER_3 ((unsigned int)(1 << 27))
+
+/**
+ * Flag reserved for user-defined filter four.
+ */
+#define KUTF_F_TEST_USER_4 ((unsigned int)(1 << 28))
+
+/**
+ * Flag reserved for user-defined filter five.
+ */
+#define KUTF_F_TEST_USER_5 ((unsigned int)(1 << 29))
+
+/**
+ * Flag reserved for user-defined filter six.
+ */
+#define KUTF_F_TEST_USER_6 ((unsigned int)(1 << 30))
+
+/**
+ * Flag reserved for user-defined filter seven.
+ */
+#define KUTF_F_TEST_USER_7 ((unsigned int)(1 << 31))
+
+/**
+ * Pseudo-flag indicating that all test classes should be executed.
+ */
+#define KUTF_F_TEST_ALL                 ((unsigned int)(0xFFFFFFFFU))
+
+/**
+ * union kutf_callback_data - Union used to store test callback data
+ * @ptr_value:		pointer to the location where test callback data
+ *                      are stored
+ * @u32_value:		a number which represents test callback data
+ */
+union kutf_callback_data {
+	void *ptr_value;
+	u32  u32_value;
+};
+
+/**
+ * struct kutf_userdata_line - A line of user data to be returned to the user
+ * @node:   struct list_head to link this into a list
+ * @str:    The line of user data to return to user space
+ * @size:   The number of bytes within @str
+ */
+struct kutf_userdata_line {
+	struct list_head node;
+	char *str;
+	size_t size;
+};
+
+/**
+ * KUTF_USERDATA_WARNING_OUTPUT - Flag specifying that a warning has been output
+ *
+ * If user space reads the "run" file while the test is waiting for user data,
+ * then the framework will output a warning message and set this flag within
+ * struct kutf_userdata. A subsequent read will then simply return an end of
+ * file condition rather than outputting the warning again. The upshot of this
+ * is that simply running 'cat' on a test which requires user data will produce
+ * the warning followed by 'cat' exiting due to EOF - which is much more user
+ * friendly than blocking indefinitely waiting for user data.
+ */
+#define KUTF_USERDATA_WARNING_OUTPUT  1
+
+/**
+ * struct kutf_userdata - Structure holding user data
+ * @flags:       See %KUTF_USERDATA_WARNING_OUTPUT
+ * @input_head:  List of struct kutf_userdata_line containing user data
+ *               to be read by the kernel space test.
+ * @input_waitq: Wait queue signalled when there is new user data to be
+ *               read by the kernel space test.
+ */
+struct kutf_userdata {
+	unsigned long flags;
+	struct list_head input_head;
+	wait_queue_head_t input_waitq;
+};
+
+/**
+ * struct kutf_context - Structure representing a kernel test context
+ * @kref:		Refcount for number of users of this context
+ * @suite:		Convenience pointer to the suite this context
+ *                      is running
+ * @test_fix:		The fixture that is being run in this context
+ * @fixture_pool:	The memory pool used for the duration of
+ *                      the fixture/text context.
+ * @fixture:		The user provided fixture structure.
+ * @fixture_index:	The index (id) of the current fixture.
+ * @fixture_name:	The name of the current fixture (or NULL if unnamed).
+ * @test_data:		Any user private data associated with this test
+ * @result_set:		All the results logged by this test context
+ * @status:		The status of the currently running fixture.
+ * @expected_status:	The expected status on exist of the currently
+ *                      running fixture.
+ * @work:		Work item to enqueue onto the work queue to run the test
+ * @userdata:		Structure containing the user data for the test to read
+ */
+struct kutf_context {
+	struct kref                     kref;
+	struct kutf_suite               *suite;
+	struct kutf_test_fixture        *test_fix;
+	struct kutf_mempool             fixture_pool;
+	void                            *fixture;
+	unsigned int                    fixture_index;
+	const char                      *fixture_name;
+	union kutf_callback_data        test_data;
+	struct kutf_result_set          *result_set;
+	enum kutf_result_status         status;
+	enum kutf_result_status         expected_status;
+
+	struct work_struct              work;
+	struct kutf_userdata            userdata;
+};
+
+/**
+ * struct kutf_suite - Structure representing a kernel test suite
+ * @app:			The application this suite belongs to.
+ * @name:			The name of this suite.
+ * @suite_data:			Any user private data associated with this
+ *                              suite.
+ * @create_fixture:		Function used to create a new fixture instance
+ * @remove_fixture:		Function used to destroy a new fixture instance
+ * @fixture_variants:		The number of variants (must be at least 1).
+ * @suite_default_flags:	Suite global filter flags which are set on
+ *                              all tests.
+ * @node:			List node for suite_list
+ * @dir:			The debugfs directory for this suite
+ * @test_list:			List head to store all the tests which are
+ *                              part of this suite
+ */
+struct kutf_suite {
+	struct kutf_application        *app;
+	const char                     *name;
+	union kutf_callback_data       suite_data;
+	void *(*create_fixture)(struct kutf_context *context);
+	void  (*remove_fixture)(struct kutf_context *context);
+	unsigned int                   fixture_variants;
+	unsigned int                   suite_default_flags;
+	struct list_head               node;
+	struct dentry                  *dir;
+	struct list_head               test_list;
+};
+
+/* ============================================================================
+	Application functions
+============================================================================ */
+
+/**
+ * kutf_create_application() - Create an in kernel test application.
+ * @name:	The name of the test application.
+ *
+ * Return: pointer to the kutf_application  on success or NULL
+ * on failure
+ */
+struct kutf_application *kutf_create_application(const char *name);
+
+/**
+ * kutf_destroy_application() - Destroy an in kernel test application.
+ *
+ * @app:	The test application to destroy.
+ */
+void kutf_destroy_application(struct kutf_application *app);
+
+/* ============================================================================
+	Suite functions
+============================================================================ */
+
+/**
+ * kutf_create_suite() - Create a kernel test suite.
+ * @app:		The test application to create the suite in.
+ * @name:		The name of the suite.
+ * @fixture_count:	The number of fixtures to run over the test
+ *                      functions in this suite
+ * @create_fixture:	Callback used to create a fixture. The returned value
+ *                      is stored in the fixture pointer in the context for
+ *                      use in the test functions.
+ * @remove_fixture:	Callback used to remove a previously created fixture.
+ *
+ * Suite names must be unique. Should two suites with the same name be
+ * registered with the same application then this function will fail, if they
+ * are registered with different applications then the function will not detect
+ * this and the call will succeed.
+ *
+ * Return: pointer to the created kutf_suite on success or NULL
+ * on failure
+ */
+struct kutf_suite *kutf_create_suite(
+		struct kutf_application *app,
+		const char *name,
+		unsigned int fixture_count,
+		void *(*create_fixture)(struct kutf_context *context),
+		void (*remove_fixture)(struct kutf_context *context));
+
+/**
+ * kutf_create_suite_with_filters() - Create a kernel test suite with user
+ *                                    defined default filters.
+ * @app:		The test application to create the suite in.
+ * @name:		The name of the suite.
+ * @fixture_count:	The number of fixtures to run over the test
+ *                      functions in this suite
+ * @create_fixture:	Callback used to create a fixture. The returned value
+ *			is stored in the fixture pointer in the context for
+ *			use in the test functions.
+ * @remove_fixture:	Callback used to remove a previously created fixture.
+ * @filters:		Filters to apply to a test if it doesn't provide its own
+ *
+ * Suite names must be unique. Should two suites with the same name be
+ * registered with the same application then this function will fail, if they
+ * are registered with different applications then the function will not detect
+ * this and the call will succeed.
+ *
+ * Return: pointer to the created kutf_suite on success or NULL on failure
+ */
+struct kutf_suite *kutf_create_suite_with_filters(
+		struct kutf_application *app,
+		const char *name,
+		unsigned int fixture_count,
+		void *(*create_fixture)(struct kutf_context *context),
+		void (*remove_fixture)(struct kutf_context *context),
+		unsigned int filters);
+
+/**
+ * kutf_create_suite_with_filters_and_data() - Create a kernel test suite with
+ *                                             user defined default filters.
+ * @app:		The test application to create the suite in.
+ * @name:		The name of the suite.
+ * @fixture_count:	The number of fixtures to run over the test
+ *			functions in this suite
+ * @create_fixture:	Callback used to create a fixture. The returned value
+ *			is stored in the fixture pointer in the context for
+ *			use in the test functions.
+ * @remove_fixture:	Callback used to remove a previously created fixture.
+ * @filters:		Filters to apply to a test if it doesn't provide its own
+ * @suite_data:		Suite specific callback data, provided during the
+ *			running of the test in the kutf_context
+ *
+ * Return: pointer to the created kutf_suite on success or NULL
+ * on failure
+ */
+struct kutf_suite *kutf_create_suite_with_filters_and_data(
+		struct kutf_application *app,
+		const char *name,
+		unsigned int fixture_count,
+		void *(*create_fixture)(struct kutf_context *context),
+		void (*remove_fixture)(struct kutf_context *context),
+		unsigned int filters,
+		union kutf_callback_data suite_data);
+
+/**
+ * kutf_add_test() - Add a test to a kernel test suite.
+ * @suite:	The suite to add the test to.
+ * @id:		The ID of the test.
+ * @name:	The name of the test.
+ * @execute:	Callback to the test function to run.
+ *
+ * Note: As no filters are provided the test will use the suite filters instead
+ */
+void kutf_add_test(struct kutf_suite *suite,
+		unsigned int id,
+		const char *name,
+		void (*execute)(struct kutf_context *context));
+
+/**
+ * kutf_add_test_with_filters() - Add a test to a kernel test suite with filters
+ * @suite:	The suite to add the test to.
+ * @id:		The ID of the test.
+ * @name:	The name of the test.
+ * @execute:	Callback to the test function to run.
+ * @filters:	A set of filtering flags, assigning test categories.
+ */
+void kutf_add_test_with_filters(struct kutf_suite *suite,
+		unsigned int id,
+		const char *name,
+		void (*execute)(struct kutf_context *context),
+		unsigned int filters);
+
+/**
+ * kutf_add_test_with_filters_and_data() - Add a test to a kernel test suite
+ *					   with filters.
+ * @suite:	The suite to add the test to.
+ * @id:		The ID of the test.
+ * @name:	The name of the test.
+ * @execute:	Callback to the test function to run.
+ * @filters:	A set of filtering flags, assigning test categories.
+ * @test_data:	Test specific callback data, provided during the
+ *		running of the test in the kutf_context
+ */
+void kutf_add_test_with_filters_and_data(
+		struct kutf_suite *suite,
+		unsigned int id,
+		const char *name,
+		void (*execute)(struct kutf_context *context),
+		unsigned int filters,
+		union kutf_callback_data test_data);
+
+
+/* ============================================================================
+	Test functions
+============================================================================ */
+/**
+ * kutf_test_log_result_external() - Log a result which has been created
+ *                                   externally into a in a standard form
+ *                                   recognized by the log parser.
+ * @context:	The test context the test is running in
+ * @message:	The message for this result
+ * @new_status:	The result status of this log message
+ */
+void kutf_test_log_result_external(
+	struct kutf_context *context,
+	const char *message,
+	enum kutf_result_status new_status);
+
+/**
+ * kutf_test_expect_abort() - Tell the kernel that you expect the current
+ *                            fixture to produce an abort.
+ * @context:	The test context this test is running in.
+ */
+void kutf_test_expect_abort(struct kutf_context *context);
+
+/**
+ * kutf_test_expect_fatal() - Tell the kernel that you expect the current
+ *                            fixture to produce a fatal error.
+ * @context:	The test context this test is running in.
+ */
+void kutf_test_expect_fatal(struct kutf_context *context);
+
+/**
+ * kutf_test_expect_fail() - Tell the kernel that you expect the current
+ *                           fixture to fail.
+ * @context:	The test context this test is running in.
+ */
+void kutf_test_expect_fail(struct kutf_context *context);
+
+/**
+ * kutf_test_expect_warn() - Tell the kernel that you expect the current
+ *                           fixture to produce a warning.
+ * @context:	The test context this test is running in.
+ */
+void kutf_test_expect_warn(struct kutf_context *context);
+
+/**
+ * kutf_test_expect_pass() - Tell the kernel that you expect the current
+ *                           fixture to pass.
+ * @context:	The test context this test is running in.
+ */
+void kutf_test_expect_pass(struct kutf_context *context);
+
+/**
+ * kutf_test_skip() - Tell the kernel that the test should be skipped.
+ * @context:	The test context this test is running in.
+ */
+void kutf_test_skip(struct kutf_context *context);
+
+/**
+ * kutf_test_skip_msg() - Tell the kernel that this test has been skipped,
+ *                        supplying a reason string.
+ * @context:	The test context this test is running in.
+ * @message:	A message string containing the reason for the skip.
+ *
+ * Note: The message must not be freed during the lifetime of the test run.
+ * This means it should either be a prebaked string, or if a dynamic string
+ * is required it must be created with kutf_dsprintf which will store
+ * the resultant string in a buffer who's lifetime is the same as the test run.
+ */
+void kutf_test_skip_msg(struct kutf_context *context, const char *message);
+
+/**
+ * kutf_test_pass() - Tell the kernel that this test has passed.
+ * @context:	The test context this test is running in.
+ * @message:	A message string containing the reason for the pass.
+ *
+ * Note: The message must not be freed during the lifetime of the test run.
+ * This means it should either be a pre-baked string, or if a dynamic string
+ * is required it must be created with kutf_dsprintf which will store
+ * the resultant string in a buffer who's lifetime is the same as the test run.
+ */
+void kutf_test_pass(struct kutf_context *context, char const *message);
+
+/**
+ * kutf_test_debug() - Send a debug message
+ * @context:	The test context this test is running in.
+ * @message:	A message string containing the debug information.
+ *
+ * Note: The message must not be freed during the lifetime of the test run.
+ * This means it should either be a pre-baked string, or if a dynamic string
+ * is required it must be created with kutf_dsprintf which will store
+ * the resultant string in a buffer who's lifetime is the same as the test run.
+ */
+void kutf_test_debug(struct kutf_context *context, char const *message);
+
+/**
+ * kutf_test_info() - Send an information message
+ * @context:	The test context this test is running in.
+ * @message:	A message string containing the information message.
+ *
+ * Note: The message must not be freed during the lifetime of the test run.
+ * This means it should either be a pre-baked string, or if a dynamic string
+ * is required it must be created with kutf_dsprintf which will store
+ * the resultant string in a buffer who's lifetime is the same as the test run.
+ */
+void kutf_test_info(struct kutf_context *context, char const *message);
+
+/**
+ * kutf_test_warn() - Send a warning message
+ * @context:	The test context this test is running in.
+ * @message:	A message string containing the warning message.
+ *
+ * Note: The message must not be freed during the lifetime of the test run.
+ * This means it should either be a pre-baked string, or if a dynamic string
+ * is required it must be created with kutf_dsprintf which will store
+ * the resultant string in a buffer who's lifetime is the same as the test run.
+ */
+void kutf_test_warn(struct kutf_context *context, char const *message);
+
+/**
+ * kutf_test_fail() - Tell the kernel that a test has failed
+ * @context:	The test context this test is running in.
+ * @message:	A message string containing the failure message.
+ *
+ * Note: The message must not be freed during the lifetime of the test run.
+ * This means it should either be a pre-baked string, or if a dynamic string
+ * is required it must be created with kutf_dsprintf which will store
+ * the resultant string in a buffer who's lifetime is the same as the test run.
+ */
+void kutf_test_fail(struct kutf_context *context, char const *message);
+
+/**
+ * kutf_test_fatal() - Tell the kernel that a test has triggered a fatal error
+ * @context:	The test context this test is running in.
+ * @message:	A message string containing the fatal error message.
+ *
+ * Note: The message must not be freed during the lifetime of the test run.
+ * This means it should either be a pre-baked string, or if a dynamic string
+ * is required it must be created with kutf_dsprintf which will store
+ * the resultant string in a buffer who's lifetime is the same as the test run.
+ */
+void kutf_test_fatal(struct kutf_context *context, char const *message);
+
+/**
+ * kutf_test_abort() - Tell the kernel that a test triggered an abort in the test
+ *
+ * @context:	The test context this test is running in.
+ */
+void kutf_test_abort(struct kutf_context *context);
+
+#endif	/* _KERNEL_UTF_SUITE_H_ */
diff --git a/drivers/gpu/arm/midgard/tests/include/kutf/kutf_utils.h b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_utils.h
new file mode 100644
index 0000000..25b8285
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_utils.h
@@ -0,0 +1,60 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KERNEL_UTF_UTILS_H_
+#define _KERNEL_UTF_UTILS_H_
+
+/* kutf_utils.h
+ * Utilities for the kernel UTF test infrastructure.
+ *
+ * This collection of library functions are provided for use by kernel UTF
+ * and users of kernel UTF which don't directly fit within the other
+ * code modules.
+ */
+
+#include <kutf/kutf_mem.h>
+
+/**
+ * Maximum size of the message strings within kernel UTF, messages longer then
+ * this will be truncated.
+ */
+#define KUTF_MAX_DSPRINTF_LEN	1024
+
+/**
+ * kutf_dsprintf() - dynamic sprintf
+ * @pool:	memory pool to allocate from
+ * @fmt:	The format string describing the string to document.
+ * @...		The parameters to feed in to the format string.
+ *
+ * This function implements sprintf which dynamically allocates memory to store
+ * the string. The library will free the memory containing the string when the
+ * result set is cleared or destroyed.
+ *
+ * Note The returned string may be truncated to fit an internal temporary
+ * buffer, which is KUTF_MAX_DSPRINTF_LEN bytes in length.
+ *
+ * Return: Returns pointer to allocated string, or NULL on error.
+ */
+const char *kutf_dsprintf(struct kutf_mempool *pool,
+		const char *fmt, ...);
+
+#endif	/* _KERNEL_UTF_UTILS_H_ */
diff --git a/drivers/gpu/arm/midgard/tests/kutf/Kbuild b/drivers/gpu/arm/midgard/tests/kutf/Kbuild
new file mode 100644
index 0000000..2531d41
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/Kbuild
@@ -0,0 +1,26 @@
+#
+# (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+ccflags-y += -I$(src)/../include
+
+obj-$(CONFIG_MALI_KUTF) += kutf.o
+
+kutf-y := kutf_mem.o kutf_resultset.o kutf_suite.o kutf_utils.o kutf_helpers.o kutf_helpers_user.o
diff --git a/drivers/gpu/arm/midgard/tests/kutf/Kconfig b/drivers/gpu/arm/midgard/tests/kutf/Kconfig
new file mode 100644
index 0000000..0cdb474
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/Kconfig
@@ -0,0 +1,28 @@
+#
+# (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+
+config MALI_KUTF
+ tristate "Mali Kernel Unit Test Framework"
+ default m
+ help
+   Enables MALI testing framework. To compile it as a module,
+   choose M here - this will generate a single module called kutf.
diff --git a/drivers/gpu/arm/midgard/tests/kutf/Makefile b/drivers/gpu/arm/midgard/tests/kutf/Makefile
new file mode 100644
index 0000000..d848e87
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/Makefile
@@ -0,0 +1,35 @@
+#
+# (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+# linux build system bootstrap for out-of-tree module
+
+# default to building for the host
+ARCH ?= $(shell uname -m)
+
+ifeq ($(KDIR),)
+$(error Must specify KDIR to point to the kernel to target))
+endif
+
+all:
+	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) $(SCONS_CONFIGS) EXTRA_CFLAGS=-I$(CURDIR)/../include modules
+
+clean:
+	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
diff --git a/drivers/gpu/arm/midgard/tests/kutf/build.bp b/drivers/gpu/arm/midgard/tests/kutf/build.bp
new file mode 100644
index 0000000..f0c7a0c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/build.bp
@@ -0,0 +1,33 @@
+/*
+ * Copyright:
+ * ----------------------------------------------------------------------------
+ * This confidential and proprietary software may be used only as authorized
+ * by a licensing agreement from ARM Limited.
+ *      (C) COPYRIGHT 2018-2019 ARM Limited, ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorized copies and
+ * copies may only be made to the extent permitted by a licensing agreement
+ * from ARM Limited.
+ * ----------------------------------------------------------------------------
+ */
+
+bob_kernel_module {
+    name: "kutf",
+    defaults: [
+        "kernel_defaults",
+        "kutf_includes",
+    ],
+    srcs: [
+        "Kbuild",
+        "kutf_helpers.c",
+        "kutf_helpers_user.c",
+        "kutf_mem.c",
+        "kutf_resultset.c",
+        "kutf_suite.c",
+        "kutf_utils.c",
+    ],
+    kbuild_options: ["CONFIG_MALI_KUTF=m"],
+    enabled: false,
+    base_build_kutf: {
+        enabled: true,
+    },
+}
diff --git a/drivers/gpu/arm/midgard/tests/kutf/kutf_helpers.c b/drivers/gpu/arm/midgard/tests/kutf/kutf_helpers.c
new file mode 100644
index 0000000..cab5add6
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/kutf_helpers.c
@@ -0,0 +1,129 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/* Kernel UTF test helpers */
+#include <kutf/kutf_helpers.h>
+
+#include <linux/err.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/preempt.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+
+static DEFINE_SPINLOCK(kutf_input_lock);
+
+static bool pending_input(struct kutf_context *context)
+{
+	bool input_pending;
+
+	spin_lock(&kutf_input_lock);
+
+	input_pending = !list_empty(&context->userdata.input_head);
+
+	spin_unlock(&kutf_input_lock);
+
+	return input_pending;
+}
+
+char *kutf_helper_input_dequeue(struct kutf_context *context, size_t *str_size)
+{
+	struct kutf_userdata_line *line;
+
+	spin_lock(&kutf_input_lock);
+
+	while (list_empty(&context->userdata.input_head)) {
+		int err;
+
+		kutf_set_waiting_for_input(context->result_set);
+
+		spin_unlock(&kutf_input_lock);
+
+		err = wait_event_interruptible(context->userdata.input_waitq,
+				pending_input(context));
+
+		if (err)
+			return ERR_PTR(-EINTR);
+
+		spin_lock(&kutf_input_lock);
+	}
+
+	line = list_first_entry(&context->userdata.input_head,
+			struct kutf_userdata_line, node);
+	if (line->str) {
+		/*
+		 * Unless it is the end-of-input marker,
+		 * remove it from the list
+		 */
+		list_del(&line->node);
+	}
+
+	spin_unlock(&kutf_input_lock);
+
+	if (str_size)
+		*str_size = line->size;
+	return line->str;
+}
+
+int kutf_helper_input_enqueue(struct kutf_context *context,
+		const char __user *str, size_t size)
+{
+	struct kutf_userdata_line *line;
+
+	line = kutf_mempool_alloc(&context->fixture_pool,
+			sizeof(*line) + size + 1);
+	if (!line)
+		return -ENOMEM;
+	if (str) {
+		unsigned long bytes_not_copied;
+
+		line->size = size;
+		line->str = (void *)(line + 1);
+		bytes_not_copied = copy_from_user(line->str, str, size);
+		if (bytes_not_copied != 0)
+			return -EFAULT;
+		/* Zero terminate the string */
+		line->str[size] = '\0';
+	} else {
+		/* This is used to mark the end of input */
+		WARN_ON(size);
+		line->size = 0;
+		line->str = NULL;
+	}
+
+	spin_lock(&kutf_input_lock);
+
+	list_add_tail(&line->node, &context->userdata.input_head);
+
+	kutf_clear_waiting_for_input(context->result_set);
+
+	spin_unlock(&kutf_input_lock);
+
+	wake_up(&context->userdata.input_waitq);
+
+	return 0;
+}
+
+void kutf_helper_input_enqueue_end_of_data(struct kutf_context *context)
+{
+	kutf_helper_input_enqueue(context, NULL, 0);
+}
diff --git a/drivers/gpu/arm/midgard/tests/kutf/kutf_helpers_user.c b/drivers/gpu/arm/midgard/tests/kutf/kutf_helpers_user.c
new file mode 100644
index 0000000..108fa82
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/kutf_helpers_user.c
@@ -0,0 +1,468 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/* Kernel UTF test helpers that mirror those for kutf-userside */
+#include <kutf/kutf_helpers_user.h>
+#include <kutf/kutf_helpers.h>
+#include <kutf/kutf_utils.h>
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+const char *valtype_names[] = {
+	"INVALID",
+	"U64",
+	"STR",
+};
+
+static const char *get_val_type_name(enum kutf_helper_valtype valtype)
+{
+	/* enums can be signed or unsigned (implementation dependant), so
+	 * enforce it to prevent:
+	 * a) "<0 comparison on unsigned type" warning - if we did both upper
+	 *    and lower bound check
+	 * b) incorrect range checking if it was a signed type - if we did
+	 *    upper bound check only */
+	unsigned int type_idx = (unsigned int)valtype;
+
+	if (type_idx >= (unsigned int)KUTF_HELPER_VALTYPE_COUNT)
+		type_idx = (unsigned int)KUTF_HELPER_VALTYPE_INVALID;
+
+	return valtype_names[type_idx];
+}
+
+/* Check up to str_len chars of val_str to see if it's a valid value name:
+ *
+ * - Has between 1 and KUTF_HELPER_MAX_VAL_NAME_LEN characters before the \0 terminator
+ * - And, each char is in the character set [A-Z0-9_] */
+static int validate_val_name(const char *val_str, int str_len)
+{
+	int i = 0;
+
+	for (i = 0; str_len && i <= KUTF_HELPER_MAX_VAL_NAME_LEN && val_str[i] != '\0'; ++i, --str_len) {
+		char val_chr = val_str[i];
+
+		if (val_chr >= 'A' && val_chr <= 'Z')
+			continue;
+		if (val_chr >= '0' && val_chr <= '9')
+			continue;
+		if (val_chr == '_')
+			continue;
+
+		/* Character not in the set [A-Z0-9_] - report error */
+		return 1;
+	}
+
+	/* Names of 0 length are not valid */
+	if (i == 0)
+		return 1;
+	/* Length greater than KUTF_HELPER_MAX_VAL_NAME_LEN not allowed */
+	if (i > KUTF_HELPER_MAX_VAL_NAME_LEN || (i == KUTF_HELPER_MAX_VAL_NAME_LEN && val_str[i] != '\0'))
+		return 1;
+
+	return 0;
+}
+
+/* Find the length of the valid part of the string when it will be in quotes
+ * e.g. "str"
+ *
+ * That is, before any '\\', '\n' or '"' characters. This is so we don't have
+ * to escape the string */
+static int find_quoted_string_valid_len(const char *str)
+{
+	char *ptr;
+	const char *check_chars = "\\\n\"";
+
+	ptr = strpbrk(str, check_chars);
+	if (ptr)
+		return (int)(ptr-str);
+
+	return (int)strlen(str);
+}
+
+static int kutf_helper_userdata_enqueue(struct kutf_context *context,
+		const char *str)
+{
+	char *str_copy;
+	size_t len;
+	int err;
+
+	len = strlen(str)+1;
+
+	str_copy = kutf_mempool_alloc(&context->fixture_pool, len);
+	if (!str_copy)
+		return -ENOMEM;
+
+	strcpy(str_copy, str);
+
+	err = kutf_add_result(context, KUTF_RESULT_USERDATA, str_copy);
+
+	return err;
+}
+
+#define MAX_U64_HEX_LEN 16
+/* (Name size) + ("=0x" size) + (64-bit hex value size) + (terminator) */
+#define NAMED_U64_VAL_BUF_SZ (KUTF_HELPER_MAX_VAL_NAME_LEN + 3 + MAX_U64_HEX_LEN + 1)
+
+int kutf_helper_send_named_u64(struct kutf_context *context,
+		const char *val_name, u64 val)
+{
+	int ret = 1;
+	char msgbuf[NAMED_U64_VAL_BUF_SZ];
+	const char *errmsg = NULL;
+
+	if (validate_val_name(val_name, KUTF_HELPER_MAX_VAL_NAME_LEN + 1)) {
+		errmsg = kutf_dsprintf(&context->fixture_pool,
+				"Failed to send u64 value named '%s': Invalid value name", val_name);
+		goto out_err;
+	}
+
+	ret = snprintf(msgbuf, NAMED_U64_VAL_BUF_SZ, "%s=0x%llx", val_name, val);
+	if (ret >= NAMED_U64_VAL_BUF_SZ || ret < 0) {
+		errmsg = kutf_dsprintf(&context->fixture_pool,
+				"Failed to send u64 value named '%s': snprintf() problem buffer size==%d ret=%d",
+				val_name, NAMED_U64_VAL_BUF_SZ, ret);
+		goto out_err;
+	}
+
+	ret = kutf_helper_userdata_enqueue(context, msgbuf);
+	if (ret) {
+		errmsg = kutf_dsprintf(&context->fixture_pool,
+				"Failed to send u64 value named '%s': send returned %d",
+				val_name, ret);
+		goto out_err;
+	}
+
+	return ret;
+out_err:
+	kutf_test_fail(context, errmsg);
+	return ret;
+}
+EXPORT_SYMBOL(kutf_helper_send_named_u64);
+
+#define NAMED_VALUE_SEP "="
+#define NAMED_STR_START_DELIM NAMED_VALUE_SEP "\""
+#define NAMED_STR_END_DELIM "\""
+
+int kutf_helper_max_str_len_for_kern(const char *val_name,
+		int kern_buf_sz)
+{
+	const int val_name_len = strlen(val_name);
+	const int start_delim_len = strlen(NAMED_STR_START_DELIM);
+	const int end_delim_len = strlen(NAMED_STR_END_DELIM);
+	int max_msg_len = kern_buf_sz;
+	int max_str_len;
+
+	max_str_len = max_msg_len - val_name_len - start_delim_len -
+		end_delim_len;
+
+	return max_str_len;
+}
+EXPORT_SYMBOL(kutf_helper_max_str_len_for_kern);
+
+int kutf_helper_send_named_str(struct kutf_context *context,
+		const char *val_name,
+		const char *val_str)
+{
+	int val_str_len;
+	int str_buf_sz;
+	char *str_buf = NULL;
+	int ret = 1;
+	char *copy_ptr;
+	int val_name_len;
+	int start_delim_len = strlen(NAMED_STR_START_DELIM);
+	int end_delim_len = strlen(NAMED_STR_END_DELIM);
+	const char *errmsg = NULL;
+
+	if (validate_val_name(val_name, KUTF_HELPER_MAX_VAL_NAME_LEN + 1)) {
+		errmsg = kutf_dsprintf(&context->fixture_pool,
+				"Failed to send u64 value named '%s': Invalid value name", val_name);
+		goto out_err;
+	}
+	val_name_len = strlen(val_name);
+
+	val_str_len = find_quoted_string_valid_len(val_str);
+
+	/* (name length) + ("=\"" length) + (val_str len) + ("\"" length) + terminator */
+	str_buf_sz = val_name_len + start_delim_len + val_str_len + end_delim_len + 1;
+
+	/* Using kmalloc() here instead of mempool since we know we need to free
+	 * before we return */
+	str_buf = kmalloc(str_buf_sz, GFP_KERNEL);
+	if (!str_buf) {
+		errmsg = kutf_dsprintf(&context->fixture_pool,
+				"Failed to send str value named '%s': kmalloc failed, str_buf_sz=%d",
+				val_name, str_buf_sz);
+		goto out_err;
+	}
+	copy_ptr = str_buf;
+
+	/* Manually copy each string component instead of snprintf because
+	 * val_str may need to end early, and less error path handling */
+
+	/* name */
+	memcpy(copy_ptr, val_name, val_name_len);
+	copy_ptr += val_name_len;
+
+	/* str start delimiter */
+	memcpy(copy_ptr, NAMED_STR_START_DELIM, start_delim_len);
+	copy_ptr += start_delim_len;
+
+	/* str value */
+	memcpy(copy_ptr, val_str, val_str_len);
+	copy_ptr += val_str_len;
+
+	/* str end delimiter */
+	memcpy(copy_ptr, NAMED_STR_END_DELIM, end_delim_len);
+	copy_ptr += end_delim_len;
+
+	/* Terminator */
+	*copy_ptr = '\0';
+
+	ret = kutf_helper_userdata_enqueue(context, str_buf);
+
+	if (ret) {
+		errmsg = kutf_dsprintf(&context->fixture_pool,
+				"Failed to send str value named '%s': send returned %d",
+				val_name, ret);
+		goto out_err;
+	}
+
+	kfree(str_buf);
+	return ret;
+
+out_err:
+	kutf_test_fail(context, errmsg);
+	kfree(str_buf);
+	return ret;
+}
+EXPORT_SYMBOL(kutf_helper_send_named_str);
+
+int kutf_helper_receive_named_val(
+		struct kutf_context *context,
+		struct kutf_helper_named_val *named_val)
+{
+	size_t recv_sz;
+	char *recv_str;
+	char *search_ptr;
+	char *name_str = NULL;
+	int name_len;
+	int strval_len;
+	enum kutf_helper_valtype type = KUTF_HELPER_VALTYPE_INVALID;
+	char *strval = NULL;
+	u64 u64val = 0;
+	int err = KUTF_HELPER_ERR_INVALID_VALUE;
+
+	recv_str = kutf_helper_input_dequeue(context, &recv_sz);
+	if (!recv_str)
+		return -EBUSY;
+	else if (IS_ERR(recv_str))
+		return PTR_ERR(recv_str);
+
+	/* Find the '=', grab the name and validate it */
+	search_ptr = strnchr(recv_str, recv_sz, NAMED_VALUE_SEP[0]);
+	if (search_ptr) {
+		name_len = search_ptr - recv_str;
+		if (!validate_val_name(recv_str, name_len)) {
+			/* no need to reallocate - just modify string in place */
+			name_str = recv_str;
+			name_str[name_len] = '\0';
+
+			/* Move until after the '=' */
+			recv_str += (name_len + 1);
+			recv_sz -= (name_len + 1);
+		}
+	}
+	if (!name_str) {
+		pr_err("Invalid name part for received string '%s'\n",
+				recv_str);
+		return KUTF_HELPER_ERR_INVALID_NAME;
+	}
+
+	/* detect value type */
+	if (*recv_str == NAMED_STR_START_DELIM[1]) {
+		/* string delimiter start*/
+		++recv_str;
+		--recv_sz;
+
+		/* Find end of string */
+		search_ptr = strnchr(recv_str, recv_sz, NAMED_STR_END_DELIM[0]);
+		if (search_ptr) {
+			strval_len = search_ptr - recv_str;
+			/* Validate the string to ensure it contains no quotes */
+			if (strval_len == find_quoted_string_valid_len(recv_str)) {
+				/* no need to reallocate - just modify string in place */
+				strval = recv_str;
+				strval[strval_len] = '\0';
+
+				/* Move until after the end delimiter */
+				recv_str += (strval_len + 1);
+				recv_sz -= (strval_len + 1);
+				type = KUTF_HELPER_VALTYPE_STR;
+			} else {
+				pr_err("String value contains invalid characters in rest of received string '%s'\n", recv_str);
+				err = KUTF_HELPER_ERR_CHARS_AFTER_VAL;
+			}
+		} else {
+			pr_err("End of string delimiter not found in rest of received string '%s'\n", recv_str);
+			err = KUTF_HELPER_ERR_NO_END_DELIMITER;
+		}
+	} else {
+		/* possibly a number value - strtoull will parse it */
+		err = kstrtoull(recv_str, 0, &u64val);
+		/* unlike userspace can't get an end ptr, but if kstrtoull()
+		 * reads characters after the number it'll report -EINVAL */
+		if (!err) {
+			int len_remain = strnlen(recv_str, recv_sz);
+
+			type = KUTF_HELPER_VALTYPE_U64;
+			recv_str += len_remain;
+			recv_sz -= len_remain;
+		} else {
+			/* special case: not a number, report as such */
+			pr_err("Rest of received string was not a numeric value or quoted string value: '%s'\n", recv_str);
+		}
+	}
+
+	if (type == KUTF_HELPER_VALTYPE_INVALID)
+		return err;
+
+	/* Any remaining characters - error */
+	if (strnlen(recv_str, recv_sz) != 0) {
+		pr_err("Characters remain after value of type %s: '%s'\n",
+				get_val_type_name(type), recv_str);
+		return KUTF_HELPER_ERR_CHARS_AFTER_VAL;
+	}
+
+	/* Success - write into the output structure */
+	switch (type) {
+	case KUTF_HELPER_VALTYPE_U64:
+		named_val->u.val_u64 = u64val;
+		break;
+	case KUTF_HELPER_VALTYPE_STR:
+		named_val->u.val_str = strval;
+		break;
+	default:
+		pr_err("Unreachable, fix kutf_helper_receive_named_val\n");
+		/* Coding error, report as though 'run' file failed */
+		return -EINVAL;
+	}
+
+	named_val->val_name = name_str;
+	named_val->type = type;
+
+	return KUTF_HELPER_ERR_NONE;
+}
+EXPORT_SYMBOL(kutf_helper_receive_named_val);
+
+#define DUMMY_MSG "<placeholder due to test fail>"
+int kutf_helper_receive_check_val(
+		struct kutf_helper_named_val *named_val,
+		struct kutf_context *context,
+		const char *expect_val_name,
+		enum kutf_helper_valtype expect_val_type)
+{
+	int err;
+
+	err = kutf_helper_receive_named_val(context, named_val);
+	if (err < 0) {
+		const char *msg = kutf_dsprintf(&context->fixture_pool,
+				"Failed to receive value named '%s'",
+				expect_val_name);
+		kutf_test_fail(context, msg);
+		return err;
+	} else if (err > 0) {
+		const char *msg = kutf_dsprintf(&context->fixture_pool,
+				"Named-value parse error when expecting value named '%s'",
+				expect_val_name);
+		kutf_test_fail(context, msg);
+		goto out_fail_and_fixup;
+	}
+
+	if (strcmp(named_val->val_name, expect_val_name) != 0) {
+		const char *msg = kutf_dsprintf(&context->fixture_pool,
+				"Expecting to receive value named '%s' but got '%s'",
+				expect_val_name, named_val->val_name);
+		kutf_test_fail(context, msg);
+		goto out_fail_and_fixup;
+	}
+
+
+	if (named_val->type != expect_val_type) {
+		const char *msg = kutf_dsprintf(&context->fixture_pool,
+				"Expecting value named '%s' to be of type %s but got %s",
+				expect_val_name, get_val_type_name(expect_val_type),
+				get_val_type_name(named_val->type));
+		kutf_test_fail(context, msg);
+		goto out_fail_and_fixup;
+	}
+
+	return err;
+
+out_fail_and_fixup:
+	/* Produce a valid but incorrect value */
+	switch (expect_val_type) {
+	case KUTF_HELPER_VALTYPE_U64:
+		named_val->u.val_u64 = 0ull;
+		break;
+	case KUTF_HELPER_VALTYPE_STR:
+		{
+			char *str = kutf_mempool_alloc(&context->fixture_pool, sizeof(DUMMY_MSG));
+
+			if (!str)
+				return -1;
+
+			strcpy(str, DUMMY_MSG);
+			named_val->u.val_str = str;
+			break;
+		}
+	default:
+		break;
+	}
+
+	/* Indicate that this is invalid */
+	named_val->type = KUTF_HELPER_VALTYPE_INVALID;
+
+	/* But at least allow the caller to continue in the test with failures */
+	return 0;
+}
+EXPORT_SYMBOL(kutf_helper_receive_check_val);
+
+void kutf_helper_output_named_val(struct kutf_helper_named_val *named_val)
+{
+	switch (named_val->type) {
+	case KUTF_HELPER_VALTYPE_U64:
+		pr_warn("%s=0x%llx\n", named_val->val_name, named_val->u.val_u64);
+		break;
+	case KUTF_HELPER_VALTYPE_STR:
+		pr_warn("%s=\"%s\"\n", named_val->val_name, named_val->u.val_str);
+		break;
+	case KUTF_HELPER_VALTYPE_INVALID:
+		pr_warn("%s is invalid\n", named_val->val_name);
+		break;
+	default:
+		pr_warn("%s has unknown type %d\n", named_val->val_name, named_val->type);
+		break;
+	}
+}
+EXPORT_SYMBOL(kutf_helper_output_named_val);
diff --git a/drivers/gpu/arm/midgard/tests/kutf/kutf_mem.c b/drivers/gpu/arm/midgard/tests/kutf/kutf_mem.c
new file mode 100644
index 0000000..fd98bea
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/kutf_mem.c
@@ -0,0 +1,108 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/* Kernel UTF memory management functions */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include <kutf/kutf_mem.h>
+
+
+/**
+ * struct kutf_alloc_entry - Structure representing an allocation.
+ * @node:	List node for use with kutf_mempool.
+ * @data:	Data area of the allocation
+ */
+struct kutf_alloc_entry {
+	struct list_head node;
+	u8 data[0];
+};
+
+int kutf_mempool_init(struct kutf_mempool *pool)
+{
+	if (!pool) {
+		pr_err("NULL pointer passed to %s\n", __func__);
+		return -1;
+	}
+
+	INIT_LIST_HEAD(&pool->head);
+	mutex_init(&pool->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(kutf_mempool_init);
+
+void kutf_mempool_destroy(struct kutf_mempool *pool)
+{
+	struct list_head *remove;
+	struct list_head *tmp;
+
+	if (!pool) {
+		pr_err("NULL pointer passed to %s\n", __func__);
+		return;
+	}
+
+	mutex_lock(&pool->lock);
+	list_for_each_safe(remove, tmp, &pool->head) {
+		struct kutf_alloc_entry *remove_alloc;
+
+		remove_alloc = list_entry(remove, struct kutf_alloc_entry, node);
+		list_del(&remove_alloc->node);
+		kfree(remove_alloc);
+	}
+	mutex_unlock(&pool->lock);
+
+}
+EXPORT_SYMBOL(kutf_mempool_destroy);
+
+void *kutf_mempool_alloc(struct kutf_mempool *pool, size_t size)
+{
+	struct kutf_alloc_entry *ret;
+
+	if (!pool) {
+		pr_err("NULL pointer passed to %s\n", __func__);
+		goto fail_pool;
+	}
+
+	mutex_lock(&pool->lock);
+
+	ret = kmalloc(sizeof(*ret) + size, GFP_KERNEL);
+	if (!ret) {
+		pr_err("Failed to allocate memory\n");
+		goto fail_alloc;
+	}
+
+	INIT_LIST_HEAD(&ret->node);
+	list_add(&ret->node, &pool->head);
+
+	mutex_unlock(&pool->lock);
+
+	return &ret->data[0];
+
+fail_alloc:
+	mutex_unlock(&pool->lock);
+fail_pool:
+	return NULL;
+}
+EXPORT_SYMBOL(kutf_mempool_alloc);
diff --git a/drivers/gpu/arm/midgard/tests/kutf/kutf_resultset.c b/drivers/gpu/arm/midgard/tests/kutf/kutf_resultset.c
new file mode 100644
index 0000000..94ecfa4
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/kutf_resultset.c
@@ -0,0 +1,164 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/* Kernel UTF result management functions */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+
+#include <kutf/kutf_suite.h>
+#include <kutf/kutf_resultset.h>
+
+/* Lock to protect all result structures */
+static DEFINE_SPINLOCK(kutf_result_lock);
+
+struct kutf_result_set *kutf_create_result_set(void)
+{
+	struct kutf_result_set *set;
+
+	set = kmalloc(sizeof(*set), GFP_KERNEL);
+	if (!set) {
+		pr_err("Failed to allocate resultset");
+		goto fail_alloc;
+	}
+
+	INIT_LIST_HEAD(&set->results);
+	init_waitqueue_head(&set->waitq);
+	set->flags = 0;
+
+	return set;
+
+fail_alloc:
+	return NULL;
+}
+
+int kutf_add_result(struct kutf_context *context,
+		enum kutf_result_status status,
+		const char *message)
+{
+	struct kutf_mempool *mempool = &context->fixture_pool;
+	struct kutf_result_set *set = context->result_set;
+	/* Create the new result */
+	struct kutf_result *new_result;
+
+	BUG_ON(set == NULL);
+
+	new_result = kutf_mempool_alloc(mempool, sizeof(*new_result));
+	if (!new_result) {
+		pr_err("Result allocation failed\n");
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&new_result->node);
+	new_result->status = status;
+	new_result->message = message;
+
+	spin_lock(&kutf_result_lock);
+
+	list_add_tail(&new_result->node, &set->results);
+
+	spin_unlock(&kutf_result_lock);
+
+	wake_up(&set->waitq);
+
+	return 0;
+}
+
+void kutf_destroy_result_set(struct kutf_result_set *set)
+{
+	if (!list_empty(&set->results))
+		pr_err("kutf_destroy_result_set: Unread results from test\n");
+
+	kfree(set);
+}
+
+static bool kutf_has_result(struct kutf_result_set *set)
+{
+	bool has_result;
+
+	spin_lock(&kutf_result_lock);
+	if (set->flags & KUTF_RESULT_SET_WAITING_FOR_INPUT)
+		/* Pretend there are results if waiting for input */
+		has_result = true;
+	else
+		has_result = !list_empty(&set->results);
+	spin_unlock(&kutf_result_lock);
+
+	return has_result;
+}
+
+struct kutf_result *kutf_remove_result(struct kutf_result_set *set)
+{
+	struct kutf_result *result = NULL;
+	int ret;
+
+	do {
+		ret = wait_event_interruptible(set->waitq,
+				kutf_has_result(set));
+
+		if (ret)
+			return ERR_PTR(ret);
+
+		spin_lock(&kutf_result_lock);
+
+		if (!list_empty(&set->results)) {
+			result = list_first_entry(&set->results,
+					struct kutf_result,
+					node);
+			list_del(&result->node);
+		} else if (set->flags & KUTF_RESULT_SET_WAITING_FOR_INPUT) {
+			/* Return a fake result */
+			static struct kutf_result waiting = {
+				.status = KUTF_RESULT_USERDATA_WAIT
+			};
+			result = &waiting;
+		}
+		/* If result == NULL then there was a race with the event
+		 * being removed between the check in kutf_has_result and
+		 * the lock being obtained. In this case we retry
+		 */
+
+		spin_unlock(&kutf_result_lock);
+	} while (result == NULL);
+
+	return result;
+}
+
+void kutf_set_waiting_for_input(struct kutf_result_set *set)
+{
+	spin_lock(&kutf_result_lock);
+	set->flags |= KUTF_RESULT_SET_WAITING_FOR_INPUT;
+	spin_unlock(&kutf_result_lock);
+
+	wake_up(&set->waitq);
+}
+
+void kutf_clear_waiting_for_input(struct kutf_result_set *set)
+{
+	spin_lock(&kutf_result_lock);
+	set->flags &= ~KUTF_RESULT_SET_WAITING_FOR_INPUT;
+	spin_unlock(&kutf_result_lock);
+}
diff --git a/drivers/gpu/arm/midgard/tests/kutf/kutf_suite.c b/drivers/gpu/arm/midgard/tests/kutf/kutf_suite.c
new file mode 100644
index 0000000..3307c0e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/kutf_suite.c
@@ -0,0 +1,1203 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2017-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/* Kernel UTF suite, test and fixture management including user to kernel
+ * interaction */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/version.h>
+#include <linux/atomic.h>
+#include <linux/sched.h>
+
+#include <generated/autoconf.h>
+
+#include <kutf/kutf_suite.h>
+#include <kutf/kutf_resultset.h>
+#include <kutf/kutf_utils.h>
+#include <kutf/kutf_helpers.h>
+
+/**
+ * struct kutf_application - Structure which represents kutf application
+ * @name:	The name of this test application.
+ * @dir:	The debugfs directory for this test
+ * @suite_list:	List head to store all the suites which are part of this
+ *              application
+ */
+struct kutf_application {
+	const char         *name;
+	struct dentry      *dir;
+	struct list_head   suite_list;
+};
+
+/**
+ * struct kutf_test_function - Structure which represents kutf test function
+ * @suite:		Back reference to the suite this test function
+ *                      belongs to
+ * @filters:		Filters that apply to this test function
+ * @test_id:		Test ID
+ * @execute:		Function to run for this test
+ * @test_data:		Static data for this test
+ * @node:		List node for test_list
+ * @variant_list:	List head to store all the variants which can run on
+ *                      this function
+ * @dir:		debugfs directory for this test function
+ */
+struct kutf_test_function {
+	struct kutf_suite  *suite;
+	unsigned int       filters;
+	unsigned int       test_id;
+	void (*execute)(struct kutf_context *context);
+	union kutf_callback_data test_data;
+	struct list_head   node;
+	struct list_head   variant_list;
+	struct dentry      *dir;
+};
+
+/**
+ * struct kutf_test_fixture - Structure which holds information on the kutf
+ *                            test fixture
+ * @test_func:		Test function this fixture belongs to
+ * @fixture_index:	Index of this fixture
+ * @node:		List node for variant_list
+ * @dir:		debugfs directory for this test fixture
+ */
+struct kutf_test_fixture {
+	struct kutf_test_function *test_func;
+	unsigned int              fixture_index;
+	struct list_head          node;
+	struct dentry             *dir;
+};
+
+static struct dentry *base_dir;
+static struct workqueue_struct *kutf_workq;
+
+/**
+ * struct kutf_convert_table - Structure which keeps test results
+ * @result_name:	Status of the test result
+ * @result:		Status value for a single test
+ */
+struct kutf_convert_table {
+	char                    result_name[50];
+	enum kutf_result_status result;
+};
+
+struct kutf_convert_table kutf_convert[] = {
+#define ADD_UTF_RESULT(_name) \
+{ \
+	#_name, \
+	_name, \
+},
+ADD_UTF_RESULT(KUTF_RESULT_BENCHMARK)
+ADD_UTF_RESULT(KUTF_RESULT_SKIP)
+ADD_UTF_RESULT(KUTF_RESULT_UNKNOWN)
+ADD_UTF_RESULT(KUTF_RESULT_PASS)
+ADD_UTF_RESULT(KUTF_RESULT_DEBUG)
+ADD_UTF_RESULT(KUTF_RESULT_INFO)
+ADD_UTF_RESULT(KUTF_RESULT_WARN)
+ADD_UTF_RESULT(KUTF_RESULT_FAIL)
+ADD_UTF_RESULT(KUTF_RESULT_FATAL)
+ADD_UTF_RESULT(KUTF_RESULT_ABORT)
+};
+
+#define UTF_CONVERT_SIZE (ARRAY_SIZE(kutf_convert))
+
+/**
+ * kutf_create_context() - Create a test context in which a specific fixture
+ *                         of an application will be run and its results
+ *                         reported back to the user
+ * @test_fix:	Test fixture to be run.
+ *
+ * The context's refcount will be initialized to 1.
+ *
+ * Return: Returns the created test context on success or NULL on failure
+ */
+static struct kutf_context *kutf_create_context(
+		struct kutf_test_fixture *test_fix);
+
+/**
+ * kutf_destroy_context() - Destroy a previously created test context, only
+ *                          once its refcount has become zero
+ * @kref:	pointer to kref member within the context
+ *
+ * This should only be used via a kref_put() call on the context's kref member
+ */
+static void kutf_destroy_context(struct kref *kref);
+
+/**
+ * kutf_context_get() - increment refcount on a context
+ * @context:	the kutf context
+ *
+ * This must be used when the lifetime of the context might exceed that of the
+ * thread creating @context
+ */
+static void kutf_context_get(struct kutf_context *context);
+
+/**
+ * kutf_context_put() - decrement refcount on a context, destroying it when it
+ *                      reached zero
+ * @context:	the kutf context
+ *
+ * This must be used only after a corresponding kutf_context_get() call on
+ * @context, and the caller no longer needs access to @context.
+ */
+static void kutf_context_put(struct kutf_context *context);
+
+/**
+ * kutf_set_result() - Set the test result against the specified test context
+ * @context:	Test context
+ * @status:	Result status
+ */
+static void kutf_set_result(struct kutf_context *context,
+		enum kutf_result_status status);
+
+/**
+ * kutf_set_expected_result() - Set the expected test result for the specified
+ *                              test context
+ * @context:		Test context
+ * @expected_status:	Expected result status
+ */
+static void kutf_set_expected_result(struct kutf_context *context,
+		enum kutf_result_status expected_status);
+
+/**
+ * kutf_result_to_string() - Converts a KUTF result into a string
+ * @result_str:      Output result string
+ * @result:          Result status to convert
+ *
+ * Return: 1 if test result was successfully converted to string, 0 otherwise
+ */
+static int kutf_result_to_string(char **result_str,
+		enum kutf_result_status result)
+{
+	int i;
+	int ret = 0;
+
+	for (i = 0; i < UTF_CONVERT_SIZE; i++) {
+		if (result == kutf_convert[i].result) {
+			*result_str = kutf_convert[i].result_name;
+			ret = 1;
+		}
+	}
+	return ret;
+}
+
+/**
+ * kutf_debugfs_const_string_read() - Simple debugfs read callback which
+ *                                    returns a constant string
+ * @file:	Opened file to read from
+ * @buf:	User buffer to write the data into
+ * @len:	Amount of data to read
+ * @ppos:	Offset into file to read from
+ *
+ * Return: On success, the number of bytes read and offset @ppos advanced by
+ *         this number; on error, negative value
+ */
+static ssize_t kutf_debugfs_const_string_read(struct file *file,
+		char __user *buf, size_t len, loff_t *ppos)
+{
+	char *str = file->private_data;
+
+	return simple_read_from_buffer(buf, len, ppos, str, strlen(str));
+}
+
+static const struct file_operations kutf_debugfs_const_string_ops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = kutf_debugfs_const_string_read,
+	.llseek  = default_llseek,
+};
+
+/**
+ * kutf_add_explicit_result() - Check if an explicit result needs to be added
+ * @context:	KUTF test context
+ */
+static void kutf_add_explicit_result(struct kutf_context *context)
+{
+	switch (context->expected_status) {
+	case KUTF_RESULT_UNKNOWN:
+		break;
+
+	case KUTF_RESULT_WARN:
+		if (context->status == KUTF_RESULT_WARN)
+			kutf_test_pass(context,
+					"Pass (expected warn occurred)");
+		else if (context->status != KUTF_RESULT_SKIP)
+			kutf_test_fail(context,
+					"Fail (expected warn missing)");
+		break;
+
+	case KUTF_RESULT_FAIL:
+		if (context->status == KUTF_RESULT_FAIL)
+			kutf_test_pass(context,
+					"Pass (expected fail occurred)");
+		else if (context->status != KUTF_RESULT_SKIP) {
+			/* Force the expected status so the fail gets logged */
+			context->expected_status = KUTF_RESULT_PASS;
+			kutf_test_fail(context,
+					"Fail (expected fail missing)");
+		}
+		break;
+
+	case KUTF_RESULT_FATAL:
+		if (context->status == KUTF_RESULT_FATAL)
+			kutf_test_pass(context,
+					"Pass (expected fatal occurred)");
+		else if (context->status != KUTF_RESULT_SKIP)
+			kutf_test_fail(context,
+					"Fail (expected fatal missing)");
+		break;
+
+	case KUTF_RESULT_ABORT:
+		if (context->status == KUTF_RESULT_ABORT)
+			kutf_test_pass(context,
+					"Pass (expected abort occurred)");
+		else if (context->status != KUTF_RESULT_SKIP)
+			kutf_test_fail(context,
+					"Fail (expected abort missing)");
+		break;
+	default:
+		break;
+	}
+}
+
+static void kutf_run_test(struct work_struct *data)
+{
+	struct kutf_context *test_context = container_of(data,
+			struct kutf_context, work);
+	struct kutf_suite *suite = test_context->suite;
+	struct kutf_test_function *test_func;
+
+	test_func = test_context->test_fix->test_func;
+
+	/*
+	 * Call the create fixture function if required before the
+	 * fixture is run
+	 */
+	if (suite->create_fixture)
+		test_context->fixture = suite->create_fixture(test_context);
+
+	/* Only run the test if the fixture was created (if required) */
+	if ((suite->create_fixture && test_context->fixture) ||
+			(!suite->create_fixture)) {
+		/* Run this fixture */
+		test_func->execute(test_context);
+
+		if (suite->remove_fixture)
+			suite->remove_fixture(test_context);
+
+		kutf_add_explicit_result(test_context);
+	}
+
+	kutf_add_result(test_context, KUTF_RESULT_TEST_FINISHED, NULL);
+
+	kutf_context_put(test_context);
+}
+
+/**
+ * kutf_debugfs_run_open() Debugfs open callback for the "run" entry.
+ * @inode:	inode of the opened file
+ * @file:	Opened file to read from
+ *
+ * This function creates a KUTF context and queues it onto a workqueue to be
+ * run asynchronously. The resulting file descriptor can be used to communicate
+ * userdata to the test and to read back the results of the test execution.
+ *
+ * Return: 0 on success
+ */
+static int kutf_debugfs_run_open(struct inode *inode, struct file *file)
+{
+	struct kutf_test_fixture *test_fix = inode->i_private;
+	struct kutf_context *test_context;
+	int err = 0;
+
+	test_context = kutf_create_context(test_fix);
+	if (!test_context) {
+		err = -ENOMEM;
+		goto finish;
+	}
+
+	file->private_data = test_context;
+
+	/* This reference is release by the kutf_run_test */
+	kutf_context_get(test_context);
+
+	queue_work(kutf_workq, &test_context->work);
+
+finish:
+	return err;
+}
+
+#define USERDATA_WARNING_MESSAGE "WARNING: This test requires userdata\n"
+
+/**
+ * kutf_debugfs_run_read() - Debugfs read callback for the "run" entry.
+ * @file:	Opened file to read from
+ * @buf:	User buffer to write the data into
+ * @len:	Amount of data to read
+ * @ppos:	Offset into file to read from
+ *
+ * This function emits the results of the test, blocking until they are
+ * available.
+ *
+ * If the test involves user data then this will also return user data records
+ * to user space. If the test is waiting for user data then this function will
+ * output a message (to make the likes of 'cat' display it), followed by
+ * returning 0 to mark the end of file.
+ *
+ * Results will be emitted one at a time, once all the results have been read
+ * 0 will be returned to indicate there is no more data.
+ *
+ * Return: Number of bytes read.
+ */
+static ssize_t kutf_debugfs_run_read(struct file *file, char __user *buf,
+		size_t len, loff_t *ppos)
+{
+	struct kutf_context *test_context = file->private_data;
+	struct kutf_result *res;
+	unsigned long bytes_not_copied;
+	ssize_t bytes_copied = 0;
+	char *kutf_str_ptr = NULL;
+	size_t kutf_str_len = 0;
+	size_t message_len = 0;
+	char separator = ':';
+	char terminator = '\n';
+
+	res = kutf_remove_result(test_context->result_set);
+
+	if (IS_ERR(res))
+		return PTR_ERR(res);
+
+	/*
+	 * Handle 'fake' results - these results are converted to another
+	 * form before being returned from the kernel
+	 */
+	switch (res->status) {
+	case KUTF_RESULT_TEST_FINISHED:
+		return 0;
+	case KUTF_RESULT_USERDATA_WAIT:
+		if (test_context->userdata.flags &
+				KUTF_USERDATA_WARNING_OUTPUT) {
+			/*
+			 * Warning message already output,
+			 * signal end-of-file
+			 */
+			return 0;
+		}
+
+		message_len = sizeof(USERDATA_WARNING_MESSAGE)-1;
+		if (message_len > len)
+			message_len = len;
+
+		bytes_not_copied = copy_to_user(buf,
+				USERDATA_WARNING_MESSAGE,
+				message_len);
+		if (bytes_not_copied != 0)
+			return -EFAULT;
+		test_context->userdata.flags |= KUTF_USERDATA_WARNING_OUTPUT;
+		return message_len;
+	case KUTF_RESULT_USERDATA:
+		message_len = strlen(res->message);
+		if (message_len > len-1) {
+			message_len = len-1;
+			pr_warn("User data truncated, read not long enough\n");
+		}
+		bytes_not_copied = copy_to_user(buf, res->message,
+				message_len);
+		if (bytes_not_copied != 0) {
+			pr_warn("Failed to copy data to user space buffer\n");
+			return -EFAULT;
+		}
+		/* Finally the terminator */
+		bytes_not_copied = copy_to_user(&buf[message_len],
+				&terminator, 1);
+		if (bytes_not_copied != 0) {
+			pr_warn("Failed to copy data to user space buffer\n");
+			return -EFAULT;
+		}
+		return message_len+1;
+	default:
+		/* Fall through - this is a test result */
+		break;
+	}
+
+	/* Note: This code assumes a result is read completely */
+	kutf_result_to_string(&kutf_str_ptr, res->status);
+	if (kutf_str_ptr)
+		kutf_str_len = strlen(kutf_str_ptr);
+
+	if (res->message)
+		message_len = strlen(res->message);
+
+	if ((kutf_str_len + 1 + message_len + 1) > len) {
+		pr_err("Not enough space in user buffer for a single result");
+		return 0;
+	}
+
+	/* First copy the result string */
+	if (kutf_str_ptr) {
+		bytes_not_copied = copy_to_user(&buf[0], kutf_str_ptr,
+						kutf_str_len);
+		bytes_copied += kutf_str_len - bytes_not_copied;
+		if (bytes_not_copied)
+			goto exit;
+	}
+
+	/* Then the separator */
+	bytes_not_copied = copy_to_user(&buf[bytes_copied],
+					&separator, 1);
+	bytes_copied += 1 - bytes_not_copied;
+	if (bytes_not_copied)
+		goto exit;
+
+	/* Finally Next copy the result string */
+	if (res->message) {
+		bytes_not_copied = copy_to_user(&buf[bytes_copied],
+						res->message, message_len);
+		bytes_copied += message_len - bytes_not_copied;
+		if (bytes_not_copied)
+			goto exit;
+	}
+
+	/* Finally the terminator */
+	bytes_not_copied = copy_to_user(&buf[bytes_copied],
+					&terminator, 1);
+	bytes_copied += 1 - bytes_not_copied;
+
+exit:
+	return bytes_copied;
+}
+
+/**
+ * kutf_debugfs_run_write() Debugfs write callback for the "run" entry.
+ * @file:	Opened file to write to
+ * @buf:	User buffer to read the data from
+ * @len:	Amount of data to write
+ * @ppos:	Offset into file to write to
+ *
+ * This function allows user and kernel to exchange extra data necessary for
+ * the test fixture.
+ *
+ * The data is added to the first struct kutf_context running the fixture
+ *
+ * Return: Number of bytes written
+ */
+static ssize_t kutf_debugfs_run_write(struct file *file,
+		const char __user *buf, size_t len, loff_t *ppos)
+{
+	int ret = 0;
+	struct kutf_context *test_context = file->private_data;
+
+	if (len > KUTF_MAX_LINE_LENGTH)
+		return -EINVAL;
+
+	ret = kutf_helper_input_enqueue(test_context, buf, len);
+	if (ret < 0)
+		return ret;
+
+	return len;
+}
+
+/**
+ * kutf_debugfs_run_release() - Debugfs release callback for the "run" entry.
+ * @inode:	File entry representation
+ * @file:	A specific opening of the file
+ *
+ * Release any resources that were created during the opening of the file
+ *
+ * Note that resources may not be released immediately, that might only happen
+ * later when other users of the kutf_context release their refcount.
+ *
+ * Return: 0 on success
+ */
+static int kutf_debugfs_run_release(struct inode *inode, struct file *file)
+{
+	struct kutf_context *test_context = file->private_data;
+
+	kutf_helper_input_enqueue_end_of_data(test_context);
+
+	kutf_context_put(test_context);
+	return 0;
+}
+
+static const struct file_operations kutf_debugfs_run_ops = {
+	.owner = THIS_MODULE,
+	.open = kutf_debugfs_run_open,
+	.read = kutf_debugfs_run_read,
+	.write = kutf_debugfs_run_write,
+	.release = kutf_debugfs_run_release,
+	.llseek  = default_llseek,
+};
+
+/**
+ * create_fixture_variant() - Creates a fixture variant for the specified
+ *                            test function and index and the debugfs entries
+ *                            that represent it.
+ * @test_func:		Test function
+ * @fixture_index:	Fixture index
+ *
+ * Return: 0 on success, negative value corresponding to error code in failure
+ */
+static int create_fixture_variant(struct kutf_test_function *test_func,
+		unsigned int fixture_index)
+{
+	struct kutf_test_fixture *test_fix;
+	char name[11];	/* Enough to print the MAX_UINT32 + the null terminator */
+	struct dentry *tmp;
+	int err;
+
+	test_fix = kmalloc(sizeof(*test_fix), GFP_KERNEL);
+	if (!test_fix) {
+		pr_err("Failed to create debugfs directory when adding fixture\n");
+		err = -ENOMEM;
+		goto fail_alloc;
+	}
+
+	test_fix->test_func = test_func;
+	test_fix->fixture_index = fixture_index;
+
+	snprintf(name, sizeof(name), "%d", fixture_index);
+	test_fix->dir = debugfs_create_dir(name, test_func->dir);
+	if (!test_func->dir) {
+		pr_err("Failed to create debugfs directory when adding fixture\n");
+		/* Might not be the right error, we don't get it passed back to us */
+		err = -EEXIST;
+		goto fail_dir;
+	}
+
+	tmp = debugfs_create_file("type", S_IROTH, test_fix->dir, "fixture\n",
+				  &kutf_debugfs_const_string_ops);
+	if (!tmp) {
+		pr_err("Failed to create debugfs file \"type\" when adding fixture\n");
+		/* Might not be the right error, we don't get it passed back to us */
+		err = -EEXIST;
+		goto fail_file;
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+	tmp = debugfs_create_file_unsafe(
+#else
+	tmp = debugfs_create_file(
+#endif
+			"run", 0600, test_fix->dir,
+			test_fix,
+			&kutf_debugfs_run_ops);
+	if (!tmp) {
+		pr_err("Failed to create debugfs file \"run\" when adding fixture\n");
+		/* Might not be the right error, we don't get it passed back to us */
+		err = -EEXIST;
+		goto fail_file;
+	}
+
+	list_add(&test_fix->node, &test_func->variant_list);
+	return 0;
+
+fail_file:
+	debugfs_remove_recursive(test_fix->dir);
+fail_dir:
+	kfree(test_fix);
+fail_alloc:
+	return err;
+}
+
+/**
+ * kutf_remove_test_variant() - Destroy a previously created fixture variant.
+ * @test_fix:	Test fixture
+ */
+static void kutf_remove_test_variant(struct kutf_test_fixture *test_fix)
+{
+	debugfs_remove_recursive(test_fix->dir);
+	kfree(test_fix);
+}
+
+void kutf_add_test_with_filters_and_data(
+		struct kutf_suite *suite,
+		unsigned int id,
+		const char *name,
+		void (*execute)(struct kutf_context *context),
+		unsigned int filters,
+		union kutf_callback_data test_data)
+{
+	struct kutf_test_function *test_func;
+	struct dentry *tmp;
+	unsigned int i;
+
+	test_func = kmalloc(sizeof(*test_func), GFP_KERNEL);
+	if (!test_func) {
+		pr_err("Failed to allocate memory when adding test %s\n", name);
+		goto fail_alloc;
+	}
+
+	INIT_LIST_HEAD(&test_func->variant_list);
+
+	test_func->dir = debugfs_create_dir(name, suite->dir);
+	if (!test_func->dir) {
+		pr_err("Failed to create debugfs directory when adding test %s\n", name);
+		goto fail_dir;
+	}
+
+	tmp = debugfs_create_file("type", S_IROTH, test_func->dir, "test\n",
+				  &kutf_debugfs_const_string_ops);
+	if (!tmp) {
+		pr_err("Failed to create debugfs file \"type\" when adding test %s\n", name);
+		goto fail_file;
+	}
+
+	test_func->filters = filters;
+	tmp = debugfs_create_x32("filters", S_IROTH, test_func->dir,
+				 &test_func->filters);
+	if (!tmp) {
+		pr_err("Failed to create debugfs file \"filters\" when adding test %s\n", name);
+		goto fail_file;
+	}
+
+	test_func->test_id = id;
+	tmp = debugfs_create_u32("test_id", S_IROTH, test_func->dir,
+				 &test_func->test_id);
+	if (!tmp) {
+		pr_err("Failed to create debugfs file \"test_id\" when adding test %s\n", name);
+		goto fail_file;
+	}
+
+	for (i = 0; i < suite->fixture_variants; i++) {
+		if (create_fixture_variant(test_func, i)) {
+			pr_err("Failed to create fixture %d when adding test %s\n", i, name);
+			goto fail_file;
+		}
+	}
+
+	test_func->suite = suite;
+	test_func->execute = execute;
+	test_func->test_data = test_data;
+
+	list_add(&test_func->node, &suite->test_list);
+	return;
+
+fail_file:
+	debugfs_remove_recursive(test_func->dir);
+fail_dir:
+	kfree(test_func);
+fail_alloc:
+	return;
+}
+EXPORT_SYMBOL(kutf_add_test_with_filters_and_data);
+
+void kutf_add_test_with_filters(
+		struct kutf_suite *suite,
+		unsigned int id,
+		const char *name,
+		void (*execute)(struct kutf_context *context),
+		unsigned int filters)
+{
+	union kutf_callback_data data;
+
+	data.ptr_value = NULL;
+
+	kutf_add_test_with_filters_and_data(suite,
+					    id,
+					    name,
+					    execute,
+					    suite->suite_default_flags,
+					    data);
+}
+EXPORT_SYMBOL(kutf_add_test_with_filters);
+
+void kutf_add_test(struct kutf_suite *suite,
+		unsigned int id,
+		const char *name,
+		void (*execute)(struct kutf_context *context))
+{
+	union kutf_callback_data data;
+
+	data.ptr_value = NULL;
+
+	kutf_add_test_with_filters_and_data(suite,
+					    id,
+					    name,
+					    execute,
+					    suite->suite_default_flags,
+					    data);
+}
+EXPORT_SYMBOL(kutf_add_test);
+
+/**
+ * kutf_remove_test(): Remove a previously added test function.
+ * @test_func: Test function
+ */
+static void kutf_remove_test(struct kutf_test_function *test_func)
+{
+	struct list_head *pos;
+	struct list_head *tmp;
+
+	list_for_each_safe(pos, tmp, &test_func->variant_list) {
+		struct kutf_test_fixture *test_fix;
+
+		test_fix = list_entry(pos, struct kutf_test_fixture, node);
+		kutf_remove_test_variant(test_fix);
+	}
+
+	list_del(&test_func->node);
+	debugfs_remove_recursive(test_func->dir);
+	kfree(test_func);
+}
+
+struct kutf_suite *kutf_create_suite_with_filters_and_data(
+		struct kutf_application *app,
+		const char *name,
+		unsigned int fixture_count,
+		void *(*create_fixture)(struct kutf_context *context),
+		void (*remove_fixture)(struct kutf_context *context),
+		unsigned int filters,
+		union kutf_callback_data suite_data)
+{
+	struct kutf_suite *suite;
+	struct dentry *tmp;
+
+	suite = kmalloc(sizeof(*suite), GFP_KERNEL);
+	if (!suite) {
+		pr_err("Failed to allocate memory when creating suite %s\n", name);
+		goto fail_kmalloc;
+	}
+
+	suite->dir = debugfs_create_dir(name, app->dir);
+	if (!suite->dir) {
+		pr_err("Failed to create debugfs directory when adding test %s\n", name);
+		goto fail_debugfs;
+	}
+
+	tmp = debugfs_create_file("type", S_IROTH, suite->dir, "suite\n",
+				  &kutf_debugfs_const_string_ops);
+	if (!tmp) {
+		pr_err("Failed to create debugfs file \"type\" when adding test %s\n", name);
+		goto fail_file;
+	}
+
+	INIT_LIST_HEAD(&suite->test_list);
+	suite->app = app;
+	suite->name = name;
+	suite->fixture_variants = fixture_count;
+	suite->create_fixture = create_fixture;
+	suite->remove_fixture = remove_fixture;
+	suite->suite_default_flags = filters;
+	suite->suite_data = suite_data;
+
+	list_add(&suite->node, &app->suite_list);
+
+	return suite;
+
+fail_file:
+	debugfs_remove_recursive(suite->dir);
+fail_debugfs:
+	kfree(suite);
+fail_kmalloc:
+	return NULL;
+}
+EXPORT_SYMBOL(kutf_create_suite_with_filters_and_data);
+
+struct kutf_suite *kutf_create_suite_with_filters(
+		struct kutf_application *app,
+		const char *name,
+		unsigned int fixture_count,
+		void *(*create_fixture)(struct kutf_context *context),
+		void (*remove_fixture)(struct kutf_context *context),
+		unsigned int filters)
+{
+	union kutf_callback_data data;
+
+	data.ptr_value = NULL;
+	return kutf_create_suite_with_filters_and_data(app,
+						       name,
+						       fixture_count,
+						       create_fixture,
+						       remove_fixture,
+						       filters,
+						       data);
+}
+EXPORT_SYMBOL(kutf_create_suite_with_filters);
+
+struct kutf_suite *kutf_create_suite(
+		struct kutf_application *app,
+		const char *name,
+		unsigned int fixture_count,
+		void *(*create_fixture)(struct kutf_context *context),
+		void (*remove_fixture)(struct kutf_context *context))
+{
+	union kutf_callback_data data;
+
+	data.ptr_value = NULL;
+	return kutf_create_suite_with_filters_and_data(app,
+						       name,
+						       fixture_count,
+						       create_fixture,
+						       remove_fixture,
+						       KUTF_F_TEST_GENERIC,
+						       data);
+}
+EXPORT_SYMBOL(kutf_create_suite);
+
+/**
+ * kutf_destroy_suite() - Destroy a previously added test suite.
+ * @suite:	Test suite
+ */
+static void kutf_destroy_suite(struct kutf_suite *suite)
+{
+	struct list_head *pos;
+	struct list_head *tmp;
+
+	list_for_each_safe(pos, tmp, &suite->test_list) {
+		struct kutf_test_function *test_func;
+
+		test_func = list_entry(pos, struct kutf_test_function, node);
+		kutf_remove_test(test_func);
+	}
+
+	list_del(&suite->node);
+	debugfs_remove_recursive(suite->dir);
+	kfree(suite);
+}
+
+struct kutf_application *kutf_create_application(const char *name)
+{
+	struct kutf_application *app;
+	struct dentry *tmp;
+
+	app = kmalloc(sizeof(*app), GFP_KERNEL);
+	if (!app) {
+		pr_err("Failed to create allocate memory when creating application %s\n", name);
+		goto fail_kmalloc;
+	}
+
+	app->dir = debugfs_create_dir(name, base_dir);
+	if (!app->dir) {
+		pr_err("Failed to create debugfs direcotry when creating application %s\n", name);
+		goto fail_debugfs;
+	}
+
+	tmp = debugfs_create_file("type", S_IROTH, app->dir, "application\n",
+				  &kutf_debugfs_const_string_ops);
+	if (!tmp) {
+		pr_err("Failed to create debugfs file \"type\" when creating application %s\n", name);
+		goto fail_file;
+	}
+
+	INIT_LIST_HEAD(&app->suite_list);
+	app->name = name;
+
+	return app;
+
+fail_file:
+	debugfs_remove_recursive(app->dir);
+fail_debugfs:
+	kfree(app);
+fail_kmalloc:
+	return NULL;
+}
+EXPORT_SYMBOL(kutf_create_application);
+
+void kutf_destroy_application(struct kutf_application *app)
+{
+	struct list_head *pos;
+	struct list_head *tmp;
+
+	list_for_each_safe(pos, tmp, &app->suite_list) {
+		struct kutf_suite *suite;
+
+		suite = list_entry(pos, struct kutf_suite, node);
+		kutf_destroy_suite(suite);
+	}
+
+	debugfs_remove_recursive(app->dir);
+	kfree(app);
+}
+EXPORT_SYMBOL(kutf_destroy_application);
+
+static struct kutf_context *kutf_create_context(
+		struct kutf_test_fixture *test_fix)
+{
+	struct kutf_context *new_context;
+
+	new_context = kmalloc(sizeof(*new_context), GFP_KERNEL);
+	if (!new_context) {
+		pr_err("Failed to allocate test context");
+		goto fail_alloc;
+	}
+
+	new_context->result_set = kutf_create_result_set();
+	if (!new_context->result_set) {
+		pr_err("Failed to create result set");
+		goto fail_result_set;
+	}
+
+	new_context->test_fix = test_fix;
+	/* Save the pointer to the suite as the callbacks will require it */
+	new_context->suite = test_fix->test_func->suite;
+	new_context->status = KUTF_RESULT_UNKNOWN;
+	new_context->expected_status = KUTF_RESULT_UNKNOWN;
+
+	kutf_mempool_init(&new_context->fixture_pool);
+	new_context->fixture = NULL;
+	new_context->fixture_index = test_fix->fixture_index;
+	new_context->fixture_name = NULL;
+	new_context->test_data = test_fix->test_func->test_data;
+
+	new_context->userdata.flags = 0;
+	INIT_LIST_HEAD(&new_context->userdata.input_head);
+	init_waitqueue_head(&new_context->userdata.input_waitq);
+
+	INIT_WORK(&new_context->work, kutf_run_test);
+
+	kref_init(&new_context->kref);
+
+	return new_context;
+
+fail_result_set:
+	kfree(new_context);
+fail_alloc:
+	return NULL;
+}
+
+static void kutf_destroy_context(struct kref *kref)
+{
+	struct kutf_context *context;
+
+	context = container_of(kref, struct kutf_context, kref);
+	kutf_destroy_result_set(context->result_set);
+	kutf_mempool_destroy(&context->fixture_pool);
+	kfree(context);
+}
+
+static void kutf_context_get(struct kutf_context *context)
+{
+	kref_get(&context->kref);
+}
+
+static void kutf_context_put(struct kutf_context *context)
+{
+	kref_put(&context->kref, kutf_destroy_context);
+}
+
+
+static void kutf_set_result(struct kutf_context *context,
+		enum kutf_result_status status)
+{
+	context->status = status;
+}
+
+static void kutf_set_expected_result(struct kutf_context *context,
+		enum kutf_result_status expected_status)
+{
+	context->expected_status = expected_status;
+}
+
+/**
+ * kutf_test_log_result() - Log a result for the specified test context
+ * @context:	Test context
+ * @message:	Result string
+ * @new_status:	Result status
+ */
+static void kutf_test_log_result(
+	struct kutf_context *context,
+	const char *message,
+	enum kutf_result_status new_status)
+{
+	if (context->status < new_status)
+		context->status = new_status;
+
+	if (context->expected_status != new_status)
+		kutf_add_result(context, new_status, message);
+}
+
+void kutf_test_log_result_external(
+	struct kutf_context *context,
+	const char *message,
+	enum kutf_result_status new_status)
+{
+	kutf_test_log_result(context, message, new_status);
+}
+EXPORT_SYMBOL(kutf_test_log_result_external);
+
+void kutf_test_expect_abort(struct kutf_context *context)
+{
+	kutf_set_expected_result(context, KUTF_RESULT_ABORT);
+}
+EXPORT_SYMBOL(kutf_test_expect_abort);
+
+void kutf_test_expect_fatal(struct kutf_context *context)
+{
+	kutf_set_expected_result(context, KUTF_RESULT_FATAL);
+}
+EXPORT_SYMBOL(kutf_test_expect_fatal);
+
+void kutf_test_expect_fail(struct kutf_context *context)
+{
+	kutf_set_expected_result(context, KUTF_RESULT_FAIL);
+}
+EXPORT_SYMBOL(kutf_test_expect_fail);
+
+void kutf_test_expect_warn(struct kutf_context *context)
+{
+	kutf_set_expected_result(context, KUTF_RESULT_WARN);
+}
+EXPORT_SYMBOL(kutf_test_expect_warn);
+
+void kutf_test_expect_pass(struct kutf_context *context)
+{
+	kutf_set_expected_result(context, KUTF_RESULT_PASS);
+}
+EXPORT_SYMBOL(kutf_test_expect_pass);
+
+void kutf_test_skip(struct kutf_context *context)
+{
+	kutf_set_result(context, KUTF_RESULT_SKIP);
+	kutf_set_expected_result(context, KUTF_RESULT_UNKNOWN);
+
+	kutf_test_log_result(context, "Test skipped", KUTF_RESULT_SKIP);
+}
+EXPORT_SYMBOL(kutf_test_skip);
+
+void kutf_test_skip_msg(struct kutf_context *context, const char *message)
+{
+	kutf_set_result(context, KUTF_RESULT_SKIP);
+	kutf_set_expected_result(context, KUTF_RESULT_UNKNOWN);
+
+	kutf_test_log_result(context, kutf_dsprintf(&context->fixture_pool,
+			     "Test skipped: %s", message), KUTF_RESULT_SKIP);
+	kutf_test_log_result(context, "!!!Test skipped!!!", KUTF_RESULT_SKIP);
+}
+EXPORT_SYMBOL(kutf_test_skip_msg);
+
+void kutf_test_debug(struct kutf_context *context, char const *message)
+{
+	kutf_test_log_result(context, message, KUTF_RESULT_DEBUG);
+}
+EXPORT_SYMBOL(kutf_test_debug);
+
+void kutf_test_pass(struct kutf_context *context, char const *message)
+{
+	static const char explicit_message[] = "(explicit pass)";
+
+	if (!message)
+		message = explicit_message;
+
+	kutf_test_log_result(context, message, KUTF_RESULT_PASS);
+}
+EXPORT_SYMBOL(kutf_test_pass);
+
+void kutf_test_info(struct kutf_context *context, char const *message)
+{
+	kutf_test_log_result(context, message, KUTF_RESULT_INFO);
+}
+EXPORT_SYMBOL(kutf_test_info);
+
+void kutf_test_warn(struct kutf_context *context, char const *message)
+{
+	kutf_test_log_result(context, message, KUTF_RESULT_WARN);
+}
+EXPORT_SYMBOL(kutf_test_warn);
+
+void kutf_test_fail(struct kutf_context *context, char const *message)
+{
+	kutf_test_log_result(context, message, KUTF_RESULT_FAIL);
+}
+EXPORT_SYMBOL(kutf_test_fail);
+
+void kutf_test_fatal(struct kutf_context *context, char const *message)
+{
+	kutf_test_log_result(context, message, KUTF_RESULT_FATAL);
+}
+EXPORT_SYMBOL(kutf_test_fatal);
+
+void kutf_test_abort(struct kutf_context *context)
+{
+	kutf_test_log_result(context, "", KUTF_RESULT_ABORT);
+}
+EXPORT_SYMBOL(kutf_test_abort);
+
+#ifdef CONFIG_DEBUG_FS
+
+/**
+ * init_kutf_core() - Module entry point.
+ *
+ * Create the base entry point in debugfs.
+ */
+static int __init init_kutf_core(void)
+{
+	kutf_workq = alloc_workqueue("kutf workq", WQ_UNBOUND, 1);
+	if (!kutf_workq)
+		return -ENOMEM;
+
+	base_dir = debugfs_create_dir("kutf_tests", NULL);
+	if (!base_dir) {
+		destroy_workqueue(kutf_workq);
+		kutf_workq = NULL;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * exit_kutf_core() - Module exit point.
+ *
+ * Remove the base entry point in debugfs.
+ */
+static void __exit exit_kutf_core(void)
+{
+	debugfs_remove_recursive(base_dir);
+
+	if (kutf_workq)
+		destroy_workqueue(kutf_workq);
+}
+
+#else	/* CONFIG_DEBUG_FS */
+
+/**
+ * init_kutf_core() - Module entry point.
+ *
+ * Stub for when build against a kernel without debugfs support
+ */
+static int __init init_kutf_core(void)
+{
+	pr_debug("KUTF requires a kernel with debug fs support");
+
+	return -ENODEV;
+}
+
+/**
+ * exit_kutf_core() - Module exit point.
+ *
+ * Stub for when build against a kernel without debugfs support
+ */
+static void __exit exit_kutf_core(void)
+{
+}
+#endif	/* CONFIG_DEBUG_FS */
+
+MODULE_LICENSE("GPL");
+
+module_init(init_kutf_core);
+module_exit(exit_kutf_core);
diff --git a/drivers/gpu/arm/midgard/tests/kutf/kutf_utils.c b/drivers/gpu/arm/midgard/tests/kutf/kutf_utils.c
new file mode 100644
index 0000000..7f5ac51
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/kutf_utils.c
@@ -0,0 +1,76 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/* Kernel UTF utility functions */
+
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+
+#include <kutf/kutf_utils.h>
+#include <kutf/kutf_mem.h>
+
+static char tmp_buffer[KUTF_MAX_DSPRINTF_LEN];
+
+DEFINE_MUTEX(buffer_lock);
+
+const char *kutf_dsprintf(struct kutf_mempool *pool,
+		const char *fmt, ...)
+{
+	va_list args;
+	int len;
+	int size;
+	void *buffer;
+
+	mutex_lock(&buffer_lock);
+	va_start(args, fmt);
+	len = vsnprintf(tmp_buffer, sizeof(tmp_buffer), fmt, args);
+	va_end(args);
+
+	if (len < 0) {
+		pr_err("kutf_dsprintf: Bad format dsprintf format %s\n", fmt);
+		goto fail_format;
+	}
+
+	if (len >= sizeof(tmp_buffer)) {
+		pr_warn("kutf_dsprintf: Truncated dsprintf message %s\n", fmt);
+		size = sizeof(tmp_buffer);
+	} else {
+		size = len + 1;
+	}
+
+	buffer = kutf_mempool_alloc(pool, size);
+	if (!buffer)
+		goto fail_alloc;
+
+	memcpy(buffer, tmp_buffer, size);
+	mutex_unlock(&buffer_lock);
+
+	return buffer;
+
+fail_alloc:
+fail_format:
+	mutex_unlock(&buffer_lock);
+	return NULL;
+}
+EXPORT_SYMBOL(kutf_dsprintf);
diff --git a/drivers/gpu/arm/midgard/tests/kutf/sconscript b/drivers/gpu/arm/midgard/tests/kutf/sconscript
new file mode 100644
index 0000000..98f6446
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/sconscript
@@ -0,0 +1,27 @@
+#
+# (C) COPYRIGHT 2014-2016, 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+Import('kutf_env')
+
+make_args = kutf_env.kernel_get_config_defines(ret_list = True)
+
+mod = kutf_env.BuildKernelModule('$STATIC_LIB_PATH/kutf.ko', Glob('*.c'), make_args = make_args)
+kutf_env.KernelObjTarget('kutf', mod)
diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kbuild b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kbuild
new file mode 100644
index 0000000..ca8c512
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kbuild
@@ -0,0 +1,26 @@
+#
+# (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+ccflags-y += -I$(src)/../include -I$(src)/../../../ -I$(src)/../../ -I$(src)/../../backend/gpu -I$(srctree)/drivers/staging/android
+
+obj-$(CONFIG_MALI_IRQ_LATENCY) += mali_kutf_irq_test.o
+
+mali_kutf_irq_test-y := mali_kutf_irq_test_main.o
diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kconfig b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kconfig
new file mode 100644
index 0000000..4a3863a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kconfig
@@ -0,0 +1,29 @@
+#
+# (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+config MALI_IRQ_LATENCY
+ tristate "Mali GPU IRQ latency measurement"
+ depends on MALI_MIDGARD && MALI_DEBUG && MALI_KUTF
+ default m
+ help
+   This option will build a test module mali_kutf_irq_test that
+   can determine the latency of the Mali GPU IRQ on your system.
+   Choosing M here will generate a single module called mali_kutf_irq_test.
diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Makefile b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Makefile
new file mode 100644
index 0000000..9218a40
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Makefile
@@ -0,0 +1,49 @@
+#
+# (C) COPYRIGHT 2015, 2017-2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+# linux build system bootstrap for out-of-tree module
+
+# default to building for the host
+ARCH ?= $(shell uname -m)
+
+ifeq ($(KDIR),)
+$(error Must specify KDIR to point to the kernel to target))
+endif
+
+TEST_CCFLAGS := \
+	-DMALI_UNIT_TEST=$(MALI_UNIT_TEST) \
+	-DMALI_CUSTOMER_RELEASE=$(MALI_CUSTOMER_RELEASE) \
+	-DMALI_USE_CSF=$(MALI_USE_CSF) \
+	$(SCONS_CFLAGS) \
+	-I$(CURDIR)/../include \
+	-I$(CURDIR)/../../../../../../include \
+	-I$(CURDIR)/../../../ \
+	-I$(CURDIR)/../../ \
+	-I$(CURDIR)/../../backend/gpu \
+	-I$(CURDIR)/ \
+	-I$(srctree)/drivers/staging/android \
+	-I$(srctree)/include/linux
+
+all:
+	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) $(SCONS_CONFIGS) EXTRA_CFLAGS="$(TEST_CCFLAGS)" KBUILD_EXTRA_SYMBOLS="$(CURDIR)/../kutf/Module.symvers $(CURDIR)/../../Module.symvers" modules
+
+clean:
+	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/build.bp b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/build.bp
new file mode 100644
index 0000000..971f092
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/build.bp
@@ -0,0 +1,32 @@
+/*
+ * Copyright:
+ * ----------------------------------------------------------------------------
+ * This confidential and proprietary software may be used only as authorized
+ * by a licensing agreement from ARM Limited.
+ *      (C) COPYRIGHT 2018-2019 ARM Limited, ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorized copies and
+ * copies may only be made to the extent permitted by a licensing agreement
+ * from ARM Limited.
+ * ----------------------------------------------------------------------------
+ */
+
+bob_kernel_module {
+    name: "mali_kutf_irq_test",
+    defaults: [
+        "mali_kbase_shared_config_defaults",
+        "kernel_test_includes",
+    ],
+    srcs: [
+        "Kbuild",
+        "mali_kutf_irq_test_main.c",
+    ],
+    extra_symbols: [
+        "mali_kbase",
+        "kutf",
+    ],
+    enabled: false,
+    base_build_kutf: {
+        enabled: true,
+        kbuild_options: ["CONFIG_MALI_IRQ_LATENCY=m"],
+    },
+}
diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/mali_kutf_irq_test_main.c b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/mali_kutf_irq_test_main.c
new file mode 100644
index 0000000..4181b7f
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/mali_kutf_irq_test_main.c
@@ -0,0 +1,273 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include "mali_kbase.h"
+#include <midgard/backend/gpu/mali_kbase_device_internal.h>
+
+#include <kutf/kutf_suite.h>
+#include <kutf/kutf_utils.h>
+
+/*
+ * This file contains the code which is used for measuring interrupt latency
+ * of the Mali GPU IRQ. In particular, function mali_kutf_irq_latency() is
+ * used with this purpose and it is called within KUTF framework - a kernel
+ * unit test framework. The measured latency provided by this test should
+ * be representative for the latency of the Mali JOB/MMU IRQs as well.
+ */
+
+/* KUTF test application pointer for this test */
+struct kutf_application *irq_app;
+
+/**
+ * struct kutf_irq_fixture data - test fixture used by the test functions.
+ * @kbdev:	kbase device for the GPU.
+ *
+ */
+struct kutf_irq_fixture_data {
+	struct kbase_device *kbdev;
+};
+
+#define SEC_TO_NANO(s)	      ((s)*1000000000LL)
+
+/* ID for the GPU IRQ */
+#define GPU_IRQ_HANDLER 2
+
+#define NR_TEST_IRQS 1000000
+
+/* IRQ for the test to trigger. Currently MULTIPLE_GPU_FAULTS as we would not
+ * expect to see this in normal use (e.g., when Android is running). */
+#define TEST_IRQ MULTIPLE_GPU_FAULTS
+
+#define IRQ_TIMEOUT HZ
+
+/* Kernel API for setting irq throttle hook callback and irq time in us*/
+extern int kbase_set_custom_irq_handler(struct kbase_device *kbdev,
+		irq_handler_t custom_handler,
+		int irq_type);
+extern irqreturn_t kbase_gpu_irq_handler(int irq, void *data);
+
+static DECLARE_WAIT_QUEUE_HEAD(wait);
+static bool triggered;
+static u64 irq_time;
+
+static void *kbase_untag(void *ptr)
+{
+	return (void *)(((uintptr_t) ptr) & ~3);
+}
+
+/**
+ * kbase_gpu_irq_custom_handler - Custom IRQ throttle handler
+ * @irq:  IRQ number
+ * @data: Data associated with this IRQ
+ *
+ * Return: state of the IRQ
+ */
+static irqreturn_t kbase_gpu_irq_custom_handler(int irq, void *data)
+{
+	struct kbase_device *kbdev = kbase_untag(data);
+	u32 val;
+
+	val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS));
+	if (val & TEST_IRQ) {
+		struct timespec tval;
+
+		getnstimeofday(&tval);
+		irq_time = SEC_TO_NANO(tval.tv_sec) + (tval.tv_nsec);
+
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val);
+
+		triggered = true;
+		wake_up(&wait);
+
+		return IRQ_HANDLED;
+	}
+
+	/* Trigger main irq handler */
+	return kbase_gpu_irq_handler(irq, data);
+}
+
+/**
+ * mali_kutf_irq_default_create_fixture() - Creates the fixture data required
+ *                                          for all the tests in the irq suite.
+ * @context:             KUTF context.
+ *
+ * Return: Fixture data created on success or NULL on failure
+ */
+static void *mali_kutf_irq_default_create_fixture(
+		struct kutf_context *context)
+{
+	struct kutf_irq_fixture_data *data;
+
+	data = kutf_mempool_alloc(&context->fixture_pool,
+			sizeof(struct kutf_irq_fixture_data));
+
+	if (!data)
+		goto fail;
+
+	/* Acquire the kbase device */
+	data->kbdev = kbase_find_device(-1);
+	if (data->kbdev == NULL) {
+		kutf_test_fail(context, "Failed to find kbase device");
+		goto fail;
+	}
+
+	return data;
+
+fail:
+	return NULL;
+}
+
+/**
+ * mali_kutf_irq_default_remove_fixture() - Destroy fixture data previously
+ *                          created by mali_kutf_irq_default_create_fixture.
+ *
+ * @context:             KUTF context.
+ */
+static void mali_kutf_irq_default_remove_fixture(
+		struct kutf_context *context)
+{
+	struct kutf_irq_fixture_data *data = context->fixture;
+	struct kbase_device *kbdev = data->kbdev;
+
+	kbase_release_device(kbdev);
+}
+
+/**
+ * mali_kutf_irq_latency() - measure GPU IRQ latency
+ * @context:		kutf context within which to perform the test
+ *
+ * The test triggers IRQs manually, and measures the
+ * time between triggering the IRQ and the IRQ handler being executed.
+ *
+ * This is not a traditional test, in that the pass/fail status has little
+ * meaning (other than indicating that the IRQ handler executed at all). Instead
+ * the results are in the latencies provided with the test result. There is no
+ * meaningful pass/fail result that can be obtained here, instead the latencies
+ * are provided for manual analysis only.
+ */
+static void mali_kutf_irq_latency(struct kutf_context *context)
+{
+	struct kutf_irq_fixture_data *data = context->fixture;
+	struct kbase_device *kbdev = data->kbdev;
+	u64 min_time = U64_MAX, max_time = 0, average_time = 0;
+	int i;
+	bool test_failed = false;
+
+	/* Force GPU to be powered */
+	kbase_pm_context_active(kbdev);
+
+	kbase_set_custom_irq_handler(kbdev, kbase_gpu_irq_custom_handler,
+			GPU_IRQ_HANDLER);
+
+	for (i = 0; i < NR_TEST_IRQS; i++) {
+		struct timespec tval;
+		u64 start_time;
+		int ret;
+
+		triggered = false;
+		getnstimeofday(&tval);
+		start_time = SEC_TO_NANO(tval.tv_sec) + (tval.tv_nsec);
+
+		/* Trigger fake IRQ */
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT),
+				TEST_IRQ);
+
+		ret = wait_event_timeout(wait, triggered != false, IRQ_TIMEOUT);
+
+		if (ret == 0) {
+			kutf_test_fail(context, "Timed out waiting for IRQ\n");
+			test_failed = true;
+			break;
+		}
+
+		if ((irq_time - start_time) < min_time)
+			min_time = irq_time - start_time;
+		if ((irq_time - start_time) > max_time)
+			max_time = irq_time - start_time;
+		average_time += irq_time - start_time;
+
+		udelay(10);
+	}
+
+	/* Go back to default handler */
+	kbase_set_custom_irq_handler(kbdev, NULL, GPU_IRQ_HANDLER);
+
+	kbase_pm_context_idle(kbdev);
+
+	if (!test_failed) {
+		const char *results;
+
+		do_div(average_time, NR_TEST_IRQS);
+		results = kutf_dsprintf(&context->fixture_pool,
+				"Min latency = %lldns, Max latency = %lldns, Average latency = %lldns\n",
+				min_time, max_time, average_time);
+		kutf_test_pass(context, results);
+	}
+}
+
+/**
+ * Module entry point for this test.
+ */
+int mali_kutf_irq_test_main_init(void)
+{
+	struct kutf_suite *suite;
+
+	irq_app = kutf_create_application("irq");
+
+	if (NULL == irq_app) {
+		pr_warn("Creation of test application failed!\n");
+		return -ENOMEM;
+	}
+
+	suite = kutf_create_suite(irq_app, "irq_default",
+			1, mali_kutf_irq_default_create_fixture,
+			mali_kutf_irq_default_remove_fixture);
+
+	if (NULL == suite) {
+		pr_warn("Creation of test suite failed!\n");
+		kutf_destroy_application(irq_app);
+		return -ENOMEM;
+	}
+
+	kutf_add_test(suite, 0x0, "irq_latency",
+			mali_kutf_irq_latency);
+	return 0;
+}
+
+/**
+ * Module exit point for this test.
+ */
+void mali_kutf_irq_test_main_exit(void)
+{
+	kutf_destroy_application(irq_app);
+}
+
+module_init(mali_kutf_irq_test_main_init);
+module_exit(mali_kutf_irq_test_main_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION("1.0");
diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/sconscript b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/sconscript
new file mode 100644
index 0000000..76e3730
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/sconscript
@@ -0,0 +1,36 @@
+#
+# (C) COPYRIGHT 2015-2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+import os
+Import('env')
+
+src = [Glob('#kernel/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/*.c'), Glob('#kernel/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Makefile')]
+
+if env.GetOption('clean') :
+	env.Execute(Action("make clean", '[CLEAN] mali_kutf_irq_test'))
+	cmd = env.Command('$STATIC_LIB_PATH/mali_kutf_irq_test.ko', src, [])
+	env.KernelObjTarget('mali_kutf_irq_test', cmd)
+else:
+	makeAction=Action("cd ${SOURCE.dir} && make MALI_UNIT_TEST=${unit} MALI_CUSTOMER_RELEASE=${release} MALI_USE_CSF=${csf} %s && ( ( [ -f mali_kutf_irq_test.ko ] && cp mali_kutf_irq_test.ko $STATIC_LIB_PATH/ ) || touch $STATIC_LIB_PATH/mali_kutf_irq_test.ko)" % env.kernel_get_config_defines(), '$MAKECOMSTR')
+	cmd = env.Command('$STATIC_LIB_PATH/mali_kutf_irq_test.ko', src, [makeAction])
+	env.Depends('$STATIC_LIB_PATH/mali_kutf_irq_test.ko', '$STATIC_LIB_PATH/kutf.ko')
+	env.Depends('$STATIC_LIB_PATH/mali_kutf_irq_test.ko', '$STATIC_LIB_PATH/mali_kbase.ko')
+	env.KernelObjTarget('mali_kutf_irq_test', cmd)
diff --git a/drivers/gpu/arm/midgard/tests/sconscript b/drivers/gpu/arm/midgard/tests/sconscript
new file mode 100644
index 0000000..0bd24a5
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/sconscript
@@ -0,0 +1,44 @@
+#
+# (C) COPYRIGHT 2010-2011, 2013, 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+Import ('env')
+
+kutf_env = env.Clone()
+kutf_env.Append(CPPPATH = '#kernel/drivers/gpu/arm/midgard/tests/include')
+Export('kutf_env')
+
+if Glob('internal/sconscript'):
+	SConscript('internal/sconscript')
+
+if kutf_env['debug'] == '1':
+	SConscript('kutf/sconscript')
+	SConscript('mali_kutf_irq_test/sconscript')
+
+	if Glob('kutf_test/sconscript'):
+		SConscript('kutf_test/sconscript')
+
+	if Glob('kutf_test_runner/sconscript'):
+		SConscript('kutf_test_runner/sconscript')
+
+if env['unit'] == '1':
+	SConscript('mali_kutf_ipa_test/sconscript')
+	SConscript('mali_kutf_ipa_unit_test/sconscript')
+	SConscript('mali_kutf_vinstr_test/sconscript')
diff --git a/drivers/gpu/arm/midgard/thirdparty/mali_kbase_mmap.c b/drivers/gpu/arm/midgard/thirdparty/mali_kbase_mmap.c
new file mode 100644
index 0000000..f266d8e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/thirdparty/mali_kbase_mmap.c
@@ -0,0 +1,366 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ *//*
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ */
+
+#include "linux/mman.h"
+#include "../mali_kbase.h"
+
+/* mali_kbase_mmap.c
+ *
+ * This file contains Linux specific implementation of
+ * kbase_context_get_unmapped_area() interface.
+ */
+
+
+/**
+ * align_and_check() - Align the specified pointer to the provided alignment and
+ *                     check that it is still in range.
+ * @gap_end:        Highest possible start address for allocation (end of gap in
+ *                  address space)
+ * @gap_start:      Start address of current memory area / gap in address space
+ * @info:           vm_unmapped_area_info structure passed to caller, containing
+ *                  alignment, length and limits for the allocation
+ * @is_shader_code: True if the allocation is for shader code (which has
+ *                  additional alignment requirements)
+ * @is_same_4gb_page: True if the allocation needs to reside completely within
+ *                    a 4GB chunk
+ *
+ * Return: true if gap_end is now aligned correctly and is still in range,
+ *         false otherwise
+ */
+static bool align_and_check(unsigned long *gap_end, unsigned long gap_start,
+		struct vm_unmapped_area_info *info, bool is_shader_code,
+		bool is_same_4gb_page)
+{
+	/* Compute highest gap address at the desired alignment */
+	(*gap_end) -= info->length;
+	(*gap_end) -= (*gap_end - info->align_offset) & info->align_mask;
+
+	if (is_shader_code) {
+		/* Check for 4GB boundary */
+		if (0 == (*gap_end & BASE_MEM_MASK_4GB))
+			(*gap_end) -= (info->align_offset ? info->align_offset :
+					info->length);
+		if (0 == ((*gap_end + info->length) & BASE_MEM_MASK_4GB))
+			(*gap_end) -= (info->align_offset ? info->align_offset :
+					info->length);
+
+		if (!(*gap_end & BASE_MEM_MASK_4GB) || !((*gap_end +
+				info->length) & BASE_MEM_MASK_4GB))
+			return false;
+	} else if (is_same_4gb_page) {
+		unsigned long start = *gap_end;
+		unsigned long end = *gap_end + info->length;
+		unsigned long mask = ~((unsigned long)U32_MAX);
+
+		/* Check if 4GB boundary is straddled */
+		if ((start & mask) != ((end - 1) & mask)) {
+			unsigned long offset = end - (end & mask);
+			/* This is to ensure that alignment doesn't get
+			 * disturbed in an attempt to prevent straddling at
+			 * 4GB boundary. The GPU VA is aligned to 2MB when the
+			 * allocation size is > 2MB and there is enough CPU &
+			 * GPU virtual space.
+			 */
+			unsigned long rounded_offset =
+					ALIGN(offset, info->align_mask + 1);
+
+			start -= rounded_offset;
+			end -= rounded_offset;
+
+			*gap_end = start;
+
+			/* The preceding 4GB boundary shall not get straddled,
+			 * even after accounting for the alignment, as the
+			 * size of allocation is limited to 4GB and the initial
+			 * start location was already aligned.
+			 */
+			WARN_ON((start & mask) != ((end - 1) & mask));
+		}
+	}
+
+
+	if ((*gap_end < info->low_limit) || (*gap_end < gap_start))
+		return false;
+
+
+	return true;
+}
+
+/**
+ * kbase_unmapped_area_topdown() - allocates new areas top-down from
+ *                                 below the stack limit.
+ * @info:              Information about the memory area to allocate.
+ * @is_shader_code:    Boolean which denotes whether the allocated area is
+ *                      intended for the use by shader core in which case a
+ *                      special alignment requirements apply.
+ * @is_same_4gb_page: Boolean which indicates whether the allocated area needs
+ *                    to reside completely within a 4GB chunk.
+ *
+ * The unmapped_area_topdown() function in the Linux kernel is not exported
+ * using EXPORT_SYMBOL_GPL macro. To allow us to call this function from a
+ * module and also make use of the fact that some of the requirements for
+ * the unmapped area are known in advance, we implemented an extended version
+ * of this function and prefixed it with 'kbase_'.
+ *
+ * The difference in the call parameter list comes from the fact that
+ * kbase_unmapped_area_topdown() is called with additional parameters which
+ * are provided to indicate whether the allocation is for a shader core memory,
+ * which has additional alignment requirements, and whether the allocation can
+ * straddle a 4GB boundary.
+ *
+ * The modification of the original Linux function lies in how the computation
+ * of the highest gap address at the desired alignment is performed once the
+ * gap with desirable properties is found. For this purpose a special function
+ * is introduced (@ref align_and_check()) which beside computing the gap end
+ * at the desired alignment also performs additional alignment checks for the
+ * case when the memory is executable shader core memory, for which it is
+ * ensured that the gap does not end on a 4GB boundary, and for the case when
+ * memory needs to be confined within a 4GB chunk.
+ *
+ * Return: address of the found gap end (high limit) if area is found;
+ *         -ENOMEM if search is unsuccessful
+*/
+
+static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info
+		*info, bool is_shader_code, bool is_same_4gb_page)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	unsigned long length, low_limit, high_limit, gap_start, gap_end;
+
+	/* Adjust search length to account for worst case alignment overhead */
+	length = info->length + info->align_mask;
+	if (length < info->length)
+		return -ENOMEM;
+
+	/*
+	 * Adjust search limits by the desired length.
+	 * See implementation comment at top of unmapped_area().
+	 */
+	gap_end = info->high_limit;
+	if (gap_end < length)
+		return -ENOMEM;
+	high_limit = gap_end - length;
+
+	if (info->low_limit > high_limit)
+		return -ENOMEM;
+	low_limit = info->low_limit + length;
+
+	/* Check highest gap, which does not precede any rbtree node */
+	gap_start = mm->highest_vm_end;
+	if (gap_start <= high_limit) {
+		if (align_and_check(&gap_end, gap_start, info,
+				is_shader_code, is_same_4gb_page))
+			return gap_end;
+	}
+
+	/* Check if rbtree root looks promising */
+	if (RB_EMPTY_ROOT(&mm->mm_rb))
+		return -ENOMEM;
+	vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
+	if (vma->rb_subtree_gap < length)
+		return -ENOMEM;
+
+	while (true) {
+		/* Visit right subtree if it looks promising */
+		gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+		if (gap_start <= high_limit && vma->vm_rb.rb_right) {
+			struct vm_area_struct *right =
+				rb_entry(vma->vm_rb.rb_right,
+					 struct vm_area_struct, vm_rb);
+			if (right->rb_subtree_gap >= length) {
+				vma = right;
+				continue;
+			}
+		}
+
+check_current:
+		/* Check if current node has a suitable gap */
+		gap_end = vma->vm_start;
+		if (gap_end < low_limit)
+			return -ENOMEM;
+		if (gap_start <= high_limit && gap_end - gap_start >= length) {
+			/* We found a suitable gap. Clip it with the original
+			 * high_limit. */
+			if (gap_end > info->high_limit)
+				gap_end = info->high_limit;
+
+			if (align_and_check(&gap_end, gap_start, info,
+					is_shader_code, is_same_4gb_page))
+				return gap_end;
+		}
+
+		/* Visit left subtree if it looks promising */
+		if (vma->vm_rb.rb_left) {
+			struct vm_area_struct *left =
+				rb_entry(vma->vm_rb.rb_left,
+					 struct vm_area_struct, vm_rb);
+			if (left->rb_subtree_gap >= length) {
+				vma = left;
+				continue;
+			}
+		}
+
+		/* Go back up the rbtree to find next candidate node */
+		while (true) {
+			struct rb_node *prev = &vma->vm_rb;
+
+			if (!rb_parent(prev))
+				return -ENOMEM;
+			vma = rb_entry(rb_parent(prev),
+				       struct vm_area_struct, vm_rb);
+			if (prev == vma->vm_rb.rb_right) {
+				gap_start = vma->vm_prev ?
+					vma->vm_prev->vm_end : 0;
+				goto check_current;
+			}
+		}
+	}
+
+	return -ENOMEM;
+}
+
+
+/* This function is based on Linux kernel's arch_get_unmapped_area, but
+ * simplified slightly. Modifications come from the fact that some values
+ * about the memory area are known in advance.
+ */
+unsigned long kbase_context_get_unmapped_area(struct kbase_context *const kctx,
+		const unsigned long addr, const unsigned long len,
+		const unsigned long pgoff, const unsigned long flags)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_unmapped_area_info info;
+	unsigned long align_offset = 0;
+	unsigned long align_mask = 0;
+	unsigned long high_limit = mm->mmap_base;
+	unsigned long low_limit = PAGE_SIZE;
+	int cpu_va_bits = BITS_PER_LONG;
+	int gpu_pc_bits =
+	      kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
+	bool is_shader_code = false;
+	bool is_same_4gb_page = false;
+	unsigned long ret;
+
+	/* err on fixed address */
+	if ((flags & MAP_FIXED) || addr)
+		return -EINVAL;
+
+#ifdef CONFIG_64BIT
+	/* too big? */
+	if (len > TASK_SIZE - SZ_2M)
+		return -ENOMEM;
+
+	if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+
+		high_limit = min_t(unsigned long, mm->mmap_base,
+				(kctx->same_va_end << PAGE_SHIFT));
+
+		/* If there's enough (> 33 bits) of GPU VA space, align
+		 * to 2MB boundaries.
+		 */
+		if (kctx->kbdev->gpu_props.mmu.va_bits > 33) {
+			if (len >= SZ_2M) {
+				align_offset = SZ_2M;
+				align_mask = SZ_2M - 1;
+			}
+		}
+
+		low_limit = SZ_2M;
+	} else {
+		cpu_va_bits = 32;
+	}
+#endif /* CONFIG_64BIT */
+	if ((PFN_DOWN(BASE_MEM_COOKIE_BASE) <= pgoff) &&
+		(PFN_DOWN(BASE_MEM_FIRST_FREE_ADDRESS) > pgoff)) {
+			int cookie = pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE);
+			struct kbase_va_region *reg;
+
+			/* Need to hold gpu vm lock when using reg */
+			kbase_gpu_vm_lock(kctx);
+			reg = kctx->pending_regions[cookie];
+			if (!reg) {
+				kbase_gpu_vm_unlock(kctx);
+				return -EINVAL;
+			}
+			if (!(reg->flags & KBASE_REG_GPU_NX)) {
+				if (cpu_va_bits > gpu_pc_bits) {
+					align_offset = 1ULL << gpu_pc_bits;
+					align_mask = align_offset - 1;
+					is_shader_code = true;
+				}
+			} else if (reg->flags & KBASE_REG_TILER_ALIGN_TOP) {
+				unsigned long extent_bytes =
+				     (unsigned long)(reg->extent << PAGE_SHIFT);
+				/* kbase_check_alloc_sizes() already satisfies
+				 * these checks, but they're here to avoid
+				 * maintenance hazards due to the assumptions
+				 * involved */
+				WARN_ON(reg->extent > (ULONG_MAX >> PAGE_SHIFT));
+				WARN_ON(reg->initial_commit > (ULONG_MAX >> PAGE_SHIFT));
+				WARN_ON(!is_power_of_2(extent_bytes));
+				align_mask = extent_bytes - 1;
+				align_offset =
+				      extent_bytes - (reg->initial_commit << PAGE_SHIFT);
+			} else if (reg->flags & KBASE_REG_GPU_VA_SAME_4GB_PAGE) {
+				is_same_4gb_page = true;
+			}
+			kbase_gpu_vm_unlock(kctx);
+#ifndef CONFIG_64BIT
+	} else {
+		return current->mm->get_unmapped_area(
+			kctx->filp, addr, len, pgoff, flags);
+#endif
+	}
+
+	info.flags = 0;
+	info.length = len;
+	info.low_limit = low_limit;
+	info.high_limit = high_limit;
+	info.align_offset = align_offset;
+	info.align_mask = align_mask;
+
+	ret = kbase_unmapped_area_topdown(&info, is_shader_code,
+			is_same_4gb_page);
+
+	if (IS_ERR_VALUE(ret) && high_limit == mm->mmap_base &&
+			high_limit < (kctx->same_va_end << PAGE_SHIFT)) {
+		/* Retry above mmap_base */
+		info.low_limit = mm->mmap_base;
+		info.high_limit = min_t(u64, TASK_SIZE,
+					(kctx->same_va_end << PAGE_SHIFT));
+
+		ret = kbase_unmapped_area_topdown(&info, is_shader_code,
+				is_same_4gb_page);
+	}
+
+	return ret;
+}
diff --git a/drivers/gpu/arm/sconscript b/drivers/gpu/arm/sconscript
new file mode 100644
index 0000000..dd02acd
--- /dev/null
+++ b/drivers/gpu/arm/sconscript
@@ -0,0 +1,26 @@
+#
+# (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+import glob
+
+
+SConscript('midgard/sconscript')
+
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 7a3e5a8..c6e46b1 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -43,6 +43,14 @@
 	  Support for non-programmable RGB to VGA DAC bridges, such as ADI
 	  ADV7123, TI THS8134 and THS8135 or passive resistor ladder DACs.
 
+config DRM_ITE_IT66121
+	tristate "ITE IT66121 HDMI bridge"
+	depends on OF
+	select DRM_KMS_HELPER
+	select REGMAP_I2C
+	help
+	  Support for ITE IT66121 HDMI bridge.
+
 config DRM_LVDS_ENCODER
 	tristate "Transparent parallel to LVDS encoder support"
 	depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 35f88d4..b7c1fe1 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -2,6 +2,7 @@
 obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
 obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
 obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
+obj-$(CONFIG_DRM_ITE_IT66121) += ite-it66121.o
 obj-$(CONFIG_DRM_LVDS_ENCODER) += lvds-encoder.o
 obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
 obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
new file mode 100644
index 0000000..2f26ec7
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -0,0 +1,989 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BayLibre, SAS
+ * Author: Phong LE <ple@baylibre.com>
+ * Copyright (C) 2018-2019, Artem Mygaiev
+ * Copyright (C) 2017, Fresco Logic, Incorporated.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modes.h>
+
+#define IT66121_MASTER_SEL_REG			0x10
+#define IT66121_MASTER_SEL_HOST			BIT(0)
+
+#define IT66121_AFE_DRV_REG			0x61
+#define IT66121_AFE_DRV_RST			BIT(4)
+#define IT66121_AFE_DRV_PWD			BIT(5)
+
+#define IT66121_INPUT_MODE_REG			0x70
+#define IT66121_INPUT_MODE_RGB			(0 << 6)
+#define IT66121_INPUT_MODE_YUV422		BIT(6)
+#define IT66121_INPUT_MODE_YUV444		(2 << 6)
+#define IT66121_INPUT_MODE_CCIR656		BIT(4)
+#define IT66121_INPUT_MODE_SYNCEMB		BIT(3)
+#define IT66121_INPUT_MODE_DDR			BIT(2)
+
+#define IT66121_INPUT_CSC_REG			0x72
+#define IT66121_INPUT_CSC_ENDITHER		BIT(7)
+#define IT66121_INPUT_CSC_ENUDFILTER		BIT(6)
+#define IT66121_INPUT_CSC_DNFREE_GO		BIT(5)
+#define IT66121_INPUT_CSC_RGB_TO_YUV		0x02
+#define IT66121_INPUT_CSC_YUV_TO_RGB		0x03
+#define IT66121_INPUT_CSC_NO_CONV		0x00
+
+#define IT66121_AFE_XP_REG			0x62
+#define IT66121_AFE_XP_GAINBIT			BIT(7)
+#define IT66121_AFE_XP_PWDPLL			BIT(6)
+#define IT66121_AFE_XP_ENI			BIT(5)
+#define IT66121_AFE_XP_ENO			BIT(4)
+#define IT66121_AFE_XP_RESETB			BIT(3)
+#define IT66121_AFE_XP_PWDI			BIT(2)
+
+#define IT66121_AFE_IP_REG			0x64
+#define IT66121_AFE_IP_GAINBIT			BIT(7)
+#define IT66121_AFE_IP_PWDPLL			BIT(6)
+#define IT66121_AFE_IP_CKSEL_05			(0 << 4)
+#define IT66121_AFE_IP_CKSEL_1			BIT(4)
+#define IT66121_AFE_IP_CKSEL_2			(2 << 4)
+#define IT66121_AFE_IP_CKSEL_2OR4		(3 << 4)
+#define IT66121_AFE_IP_ER0			BIT(3)
+#define IT66121_AFE_IP_RESETB			BIT(2)
+#define IT66121_AFE_IP_ENC			BIT(1)
+#define IT66121_AFE_IP_EC1			BIT(0)
+
+#define IT66121_AFE_XP_EC1_REG			0x68
+#define IT66121_AFE_XP_EC1_LOWCLK		BIT(4)
+
+#define IT66121_SW_RST_REG			0x04
+#define IT66121_SW_RST_REF			BIT(5)
+#define IT66121_SW_RST_AREF			BIT(4)
+#define IT66121_SW_RST_VID			BIT(3)
+#define IT66121_SW_RST_AUD			BIT(2)
+#define IT66121_SW_RST_HDCP			BIT(0)
+
+#define IT66121_DDC_COMMAND_REG			0x15
+#define IT66121_DDC_COMMAND_BURST_READ		0x0
+#define IT66121_DDC_COMMAND_EDID_READ		0x3
+#define IT66121_DDC_COMMAND_FIFO_CLR		0x9
+#define IT66121_DDC_COMMAND_SCL_PULSE		0xA
+#define IT66121_DDC_COMMAND_ABORT		0xF
+
+#define IT66121_HDCP_REG			0x20
+#define IT66121_HDCP_CPDESIRED			BIT(0)
+#define IT66121_HDCP_EN1P1FEAT			BIT(1)
+
+#define IT66121_INT_STATUS1_REG			0x06
+#define IT66121_INT_STATUS1_AUD_OVF		BIT(7)
+#define IT66121_INT_STATUS1_DDC_NOACK		BIT(5)
+#define IT66121_INT_STATUS1_DDC_FIFOERR		BIT(4)
+#define IT66121_INT_STATUS1_DDC_BUSHANG		BIT(2)
+#define IT66121_INT_STATUS1_RX_SENS_STATUS	BIT(1)
+#define IT66121_INT_STATUS1_HPD_STATUS		BIT(0)
+
+#define IT66121_DDC_HEADER_REG			0x11
+#define IT66121_DDC_HEADER_HDCP			0x74
+#define IT66121_DDC_HEADER_EDID			0xA0
+
+#define IT66121_DDC_OFFSET_REG			0x12
+#define IT66121_DDC_BYTE_REG			0x13
+#define IT66121_DDC_SEGMENT_REG			0x14
+#define IT66121_DDC_RD_FIFO_REG			0x17
+
+#define IT66121_CLK_BANK_REG			0x0F
+#define IT66121_CLK_BANK_PWROFF_RCLK		BIT(6)
+#define IT66121_CLK_BANK_PWROFF_ACLK		BIT(5)
+#define IT66121_CLK_BANK_PWROFF_TXCLK		BIT(4)
+#define IT66121_CLK_BANK_PWROFF_CRCLK		BIT(3)
+#define IT66121_CLK_BANK_0			0
+#define IT66121_CLK_BANK_1			1
+
+#define IT66121_INT_REG				0x05
+#define IT66121_INT_ACTIVE_HIGH			BIT(7)
+#define IT66121_INT_OPEN_DRAIN			BIT(6)
+#define IT66121_INT_TX_CLK_OFF			BIT(0)
+
+#define IT66121_INT_MASK1_REG			0x09
+#define IT66121_INT_MASK1_AUD_OVF		BIT(7)
+#define IT66121_INT_MASK1_DDC_NOACK		BIT(5)
+#define IT66121_INT_MASK1_DDC_FIFOERR		BIT(4)
+#define IT66121_INT_MASK1_DDC_BUSHANG		BIT(2)
+#define IT66121_INT_MASK1_RX_SENS		BIT(1)
+#define IT66121_INT_MASK1_HPD			BIT(0)
+
+#define IT66121_INT_CLR1_REG			0x0C
+#define IT66121_INT_CLR1_PKTACP			BIT(7)
+#define IT66121_INT_CLR1_PKTNULL		BIT(6)
+#define IT66121_INT_CLR1_PKTGEN			BIT(5)
+#define IT66121_INT_CLR1_KSVLISTCHK		BIT(4)
+#define IT66121_INT_CLR1_AUTHDONE		BIT(3)
+#define IT66121_INT_CLR1_AUTHFAIL		BIT(2)
+#define IT66121_INT_CLR1_RX_SENS		BIT(1)
+#define IT66121_INT_CLR1_HPD			BIT(0)
+
+#define IT66121_AV_MUTE_REG			0xC1
+#define IT66121_AV_MUTE_ON			BIT(0)
+#define IT66121_AV_MUTE_BLUESCR			BIT(1)
+
+#define IT66121_PKT_GEN_CTRL_REG		0xC6
+#define IT66121_PKT_GEN_CTRL_ON			BIT(0)
+#define IT66121_PKT_GEN_CTRL_RPT		BIT(1)
+
+#define IT66121_AVIINFO_DB1_REG			0x158
+#define IT66121_AVIINFO_DB2_REG			0x159
+#define IT66121_AVIINFO_DB3_REG			0x15A
+#define IT66121_AVIINFO_DB4_REG			0x15B
+#define IT66121_AVIINFO_DB5_REG			0x15C
+#define IT66121_AVIINFO_CSUM_REG		0x15D
+#define IT66121_AVIINFO_DB6_REG			0x15E
+#define IT66121_AVIINFO_DB7_REG			0x15F
+#define IT66121_AVIINFO_DB8_REG			0x160
+#define IT66121_AVIINFO_DB9_REG			0x161
+#define IT66121_AVIINFO_DB10_REG		0x162
+#define IT66121_AVIINFO_DB11_REG		0x163
+#define IT66121_AVIINFO_DB12_REG		0x164
+#define IT66121_AVIINFO_DB13_REG		0x165
+
+#define IT66121_AVI_INFO_PKT_REG		0xCD
+#define IT66121_AVI_INFO_PKT_ON			BIT(0)
+#define IT66121_AVI_INFO_PKT_RPT		BIT(1)
+
+#define IT66121_HDMI_MODE_REG			0xC0
+#define IT66121_HDMI_MODE_HDMI			BIT(0)
+
+#define IT66121_SYS_STATUS_REG			0x0E
+#define IT66121_SYS_STATUS_ACTIVE_IRQ		BIT(7)
+#define IT66121_SYS_STATUS_HPDETECT		BIT(6)
+#define IT66121_SYS_STATUS_SENDECTECT		BIT(5)
+#define IT66121_SYS_STATUS_VID_STABLE		BIT(4)
+#define IT66121_SYS_STATUS_AUD_CTS_CLR		BIT(1)
+#define IT66121_SYS_STATUS_CLEAR_IRQ		BIT(0)
+
+#define IT66121_DDC_STATUS_REG			0x16
+#define IT66121_DDC_STATUS_TX_DONE		BIT(7)
+#define IT66121_DDC_STATUS_ACTIVE		BIT(6)
+#define IT66121_DDC_STATUS_NOACK		BIT(5)
+#define IT66121_DDC_STATUS_WAIT_BUS		BIT(4)
+#define IT66121_DDC_STATUS_ARBI_LOSE		BIT(3)
+#define IT66121_DDC_STATUS_FIFO_FULL		BIT(2)
+#define IT66121_DDC_STATUS_FIFO_EMPTY		BIT(1)
+#define IT66121_DDC_STATUS_FIFO_VALID		BIT(0)
+
+#define IT66121_VENDOR_ID0			0x54
+#define IT66121_VENDOR_ID1			0x49
+#define IT66121_DEVICE_ID0			0x12
+#define IT66121_DEVICE_ID1			0x06
+#define IT66121_DEVICE_MASK			0x0F
+#define IT66121_EDID_SLEEP			20000
+#define IT66121_EDID_TIMEOUT			200000
+#define IT66121_EDID_FIFO_SIZE			32
+#define IT66121_AFE_CLK_HIGH			80000
+
+struct it66121_conf {
+	unsigned int input_mode_reg;
+	unsigned int input_conversion_reg;
+};
+
+struct it66121_ctx {
+	struct regmap *regmap;
+	struct drm_bridge bridge;
+	struct drm_connector connector;
+	struct device *dev;
+	struct gpio_desc *gpio_reset;
+	struct i2c_client *client;
+	struct regulator_bulk_data supplies[3];
+	bool dual_edge;
+	const struct it66121_conf *conf;
+	struct mutex lock; /* Protects fields below and device registers */
+	struct edid *edid;
+	struct hdmi_avi_infoframe hdmi_avi_infoframe;
+};
+
+static const struct regmap_range_cfg it66121_regmap_banks[] = {
+	{
+		.name = "it66121",
+		.range_min = 0x00,
+		.range_max = 0x1FF,
+		.selector_reg = IT66121_CLK_BANK_REG,
+		.selector_mask = 0x1,
+		.selector_shift = 0,
+		.window_start = 0x00,
+		.window_len = 0x130,
+	},
+};
+
+static const struct regmap_config it66121_regmap_config = {
+	.val_bits = 8,
+	.reg_bits = 8,
+	.max_register = 0x1FF,
+	.ranges = it66121_regmap_banks,
+	.num_ranges = ARRAY_SIZE(it66121_regmap_banks),
+};
+
+static const struct it66121_conf it66121_conf_simple = {
+	.input_mode_reg = IT66121_INPUT_MODE_RGB | IT66121_INPUT_MODE_DDR,
+	.input_conversion_reg = IT66121_INPUT_CSC_NO_CONV,
+};
+
+static void it66121_hw_reset(struct it66121_ctx *ctx)
+{
+	gpiod_set_value(ctx->gpio_reset, 1);
+	msleep(20);
+	gpiod_set_value(ctx->gpio_reset, 0);
+}
+
+static int ite66121_power_on(struct it66121_ctx *ctx)
+{
+	return regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int ite66121_power_off(struct it66121_ctx *ctx)
+{
+	return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int it66121_preamble_ddc(struct it66121_ctx *ctx)
+{
+	return regmap_write(ctx->regmap, IT66121_MASTER_SEL_REG,
+				IT66121_MASTER_SEL_HOST);
+}
+
+static int it66121_fire_afe(struct it66121_ctx *ctx)
+{
+	return regmap_write(ctx->regmap, IT66121_AFE_DRV_REG, 0);
+}
+
+static int it66121_configure_input(struct it66121_ctx *ctx)
+{
+	int ret;
+
+	ret = regmap_write(ctx->regmap, IT66121_INPUT_MODE_REG,
+			   ctx->conf->input_mode_reg);
+	if (ret)
+		return ret;
+
+	return regmap_write(ctx->regmap, IT66121_INPUT_CSC_REG,
+			    ctx->conf->input_conversion_reg);
+}
+
+/**
+ * it66121_configure_afe() - Configure the analog front end
+ * @ctx: it66121_ctx object
+ *
+ * RETURNS:
+ * zero if success, a negative error code otherwise.
+ */
+static int it66121_configure_afe(struct it66121_ctx *ctx,
+				 const struct drm_display_mode *mode)
+{
+	int ret;
+
+	ret = regmap_write(ctx->regmap, IT66121_AFE_DRV_REG,
+			   IT66121_AFE_DRV_RST);
+	if (ret)
+		return ret;
+
+	if (mode->clock > IT66121_AFE_CLK_HIGH) {
+		ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG,
+					IT66121_AFE_XP_GAINBIT |
+					IT66121_AFE_XP_ENO,
+					IT66121_AFE_XP_GAINBIT);
+		if (ret)
+			return ret;
+
+		ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
+					IT66121_AFE_IP_GAINBIT |
+					IT66121_AFE_IP_ER0 |
+					IT66121_AFE_IP_EC1,
+					IT66121_AFE_IP_GAINBIT);
+		if (ret)
+			return ret;
+
+		ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_EC1_REG,
+					IT66121_AFE_XP_EC1_LOWCLK, 0x80);
+		if (ret)
+			return ret;
+	} else {
+		ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG,
+					IT66121_AFE_XP_GAINBIT |
+					IT66121_AFE_XP_ENO,
+					IT66121_AFE_XP_ENO);
+		if (ret)
+			return ret;
+
+		ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
+					IT66121_AFE_IP_GAINBIT |
+					IT66121_AFE_IP_ER0 |
+					IT66121_AFE_IP_EC1, IT66121_AFE_IP_ER0 |
+					IT66121_AFE_IP_EC1);
+		if (ret)
+			return ret;
+
+		ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_EC1_REG,
+					IT66121_AFE_XP_EC1_LOWCLK,
+					IT66121_AFE_XP_EC1_LOWCLK);
+		if (ret)
+			return ret;
+	}
+
+	/* Clear reset flags */
+	ret = regmap_write_bits(ctx->regmap, IT66121_SW_RST_REG,
+				IT66121_SW_RST_REF | IT66121_SW_RST_VID,
+				~(IT66121_SW_RST_REF | IT66121_SW_RST_VID) &
+				0xFF);
+	if (ret)
+		return ret;
+
+	return it66121_fire_afe(ctx);
+}
+
+static inline int it66121_wait_ddc_ready(struct it66121_ctx *ctx)
+{
+	int ret, val;
+
+	ret = regmap_read_poll_timeout(ctx->regmap, IT66121_DDC_STATUS_REG,
+				       val, true,
+				       IT66121_EDID_SLEEP,
+				       IT66121_EDID_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (val & (IT66121_DDC_STATUS_NOACK | IT66121_DDC_STATUS_WAIT_BUS |
+	    IT66121_DDC_STATUS_ARBI_LOSE))
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int it66121_clear_ddc_fifo(struct it66121_ctx *ctx)
+{
+	int ret;
+
+	ret = it66121_preamble_ddc(ctx);
+	if (ret)
+		return ret;
+
+	return regmap_write(ctx->regmap, IT66121_DDC_COMMAND_REG,
+			    IT66121_DDC_COMMAND_FIFO_CLR);
+}
+
+static int it66121_abort_ddc_ops(struct it66121_ctx *ctx)
+{
+	int ret;
+	unsigned int swreset, cpdesire;
+
+	ret = regmap_read(ctx->regmap, IT66121_SW_RST_REG, &swreset);
+	if (ret)
+		return ret;
+
+	ret = regmap_read(ctx->regmap, IT66121_HDCP_REG, &cpdesire);
+	if (ret)
+		return ret;
+
+	ret = regmap_write(ctx->regmap, IT66121_HDCP_REG,
+			   cpdesire & (~IT66121_HDCP_CPDESIRED & 0xFF));
+	if (ret)
+		return ret;
+
+	ret = regmap_write(ctx->regmap, IT66121_SW_RST_REG,
+			   swreset | IT66121_SW_RST_HDCP);
+	if (ret)
+		return ret;
+
+	ret = it66121_preamble_ddc(ctx);
+	if (ret)
+		return ret;
+
+	ret = regmap_write(ctx->regmap, IT66121_DDC_COMMAND_REG,
+			   IT66121_DDC_COMMAND_ABORT);
+	if (ret)
+		return ret;
+
+	return it66121_wait_ddc_ready(ctx);
+}
+
+static int it66121_get_edid_block(void *context, u8 *buf,
+				  unsigned int block, size_t len)
+{
+	struct it66121_ctx *ctx = context;
+	unsigned int val;
+	int remain = len;
+	int offset = 0;
+	int ret, cnt;
+
+	offset = (block % 2) * len;
+	block = block / 2;
+
+	ret = regmap_read(ctx->regmap, IT66121_INT_STATUS1_REG, &val);
+	if (ret)
+		return ret;
+
+	if (val & IT66121_INT_STATUS1_DDC_BUSHANG) {
+		ret = it66121_abort_ddc_ops(ctx);
+		if (ret)
+			return ret;
+	}
+
+	ret = it66121_clear_ddc_fifo(ctx);
+	if (ret)
+		return ret;
+
+	while (remain > 0) {
+		cnt = (remain > IT66121_EDID_FIFO_SIZE) ?
+				IT66121_EDID_FIFO_SIZE : remain;
+		ret = it66121_preamble_ddc(ctx);
+		if (ret)
+			return ret;
+
+		ret = regmap_write(ctx->regmap, IT66121_DDC_COMMAND_REG,
+				   IT66121_DDC_COMMAND_FIFO_CLR);
+		if (ret)
+			return ret;
+
+		ret = it66121_wait_ddc_ready(ctx);
+		if (ret)
+			return ret;
+
+		ret = regmap_read(ctx->regmap, IT66121_INT_STATUS1_REG, &val);
+		if (ret)
+			return ret;
+
+		if (val & IT66121_INT_STATUS1_DDC_BUSHANG) {
+			ret = it66121_abort_ddc_ops(ctx);
+			if (ret)
+				return ret;
+		}
+
+		ret = it66121_preamble_ddc(ctx);
+		if (ret)
+			return ret;
+
+		ret = regmap_write(ctx->regmap, IT66121_DDC_HEADER_REG,
+				   IT66121_DDC_HEADER_EDID);
+		if (ret)
+			return ret;
+
+		ret = regmap_write(ctx->regmap, IT66121_DDC_OFFSET_REG, offset);
+		if (ret)
+			return ret;
+
+		ret = regmap_write(ctx->regmap, IT66121_DDC_BYTE_REG, cnt);
+		if (ret)
+			return ret;
+
+		ret = regmap_write(ctx->regmap, IT66121_DDC_SEGMENT_REG, block);
+		if (ret)
+			return ret;
+
+		ret = regmap_write(ctx->regmap, IT66121_DDC_COMMAND_REG,
+				   IT66121_DDC_COMMAND_EDID_READ);
+		if (ret)
+			return ret;
+
+		offset += cnt;
+		remain -= cnt;
+		msleep(20);
+
+		ret = it66121_wait_ddc_ready(ctx);
+		if (ret)
+			return ret;
+
+		do {
+			ret = regmap_read(ctx->regmap,
+					  IT66121_DDC_RD_FIFO_REG, &val);
+			if (ret)
+				return ret;
+			*(buf++) = val;
+			cnt--;
+		} while (cnt > 0);
+	}
+
+	return 0;
+}
+
+static int it66121_connector_get_modes(struct drm_connector *connector)
+{
+	int ret, num_modes = 0;
+	struct it66121_ctx *ctx = container_of(connector, struct it66121_ctx,
+			connector);
+
+	if (ctx->edid)
+		return drm_add_edid_modes(connector, ctx->edid);
+
+	mutex_lock(&ctx->lock);
+
+	ctx->edid = drm_do_get_edid(connector, it66121_get_edid_block, ctx);
+	if (!ctx->edid) {
+		DRM_ERROR("Failed to read EDID\n");
+		goto unlock;
+	}
+
+	ret = drm_connector_update_edid_property(connector,
+						 ctx->edid);
+	if (ret) {
+		DRM_ERROR("Failed to update EDID property: %d\n", ret);
+		goto unlock;
+	}
+
+	num_modes = drm_add_edid_modes(connector, ctx->edid);
+
+unlock:
+	mutex_unlock(&ctx->lock);
+
+	return num_modes;
+}
+
+static bool it66121_is_hpd_detect(struct it66121_ctx *ctx)
+{
+	int val;
+
+	if (regmap_read(ctx->regmap, IT66121_SYS_STATUS_REG, &val))
+		return false;
+
+	return (val & IT66121_SYS_STATUS_HPDETECT);
+}
+
+static int it66121_connector_detect_ctx(struct drm_connector *connector,
+					struct drm_modeset_acquire_ctx *c,
+					bool force)
+{
+	struct it66121_ctx *ctx = container_of(connector, struct it66121_ctx,
+			connector);
+
+	return (it66121_is_hpd_detect(ctx)) ?
+		connector_status_connected : connector_status_disconnected;
+}
+
+static enum drm_mode_status
+it66121_connector_mode_valid(struct drm_connector *connector,
+			     struct drm_display_mode *mode)
+{
+	unsigned long max_clock;
+	struct it66121_ctx *ctx = container_of(connector, struct it66121_ctx,
+			connector);
+
+	max_clock = ctx->dual_edge ? 74250 : 148500;
+
+	if (mode->clock > max_clock)
+		return MODE_CLOCK_HIGH;
+
+	if (mode->clock < 25000)
+		return MODE_CLOCK_LOW;
+
+	return MODE_OK;
+}
+
+static struct drm_connector_helper_funcs it66121_connector_helper_funcs = {
+	.get_modes = it66121_connector_get_modes,
+	.detect_ctx = it66121_connector_detect_ctx,
+	.mode_valid = it66121_connector_mode_valid,
+};
+
+static const struct drm_connector_funcs it66121_connector_funcs = {
+	.reset = drm_atomic_helper_connector_reset,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = drm_connector_cleanup,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int it66121_bridge_attach(struct drm_bridge *bridge)
+{
+	int ret;
+	struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx,
+			bridge);
+
+	if (!bridge->encoder) {
+		DRM_ERROR("Parent encoder object not found");
+		return -ENODEV;
+	}
+
+	ret = regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
+				IT66121_CLK_BANK_PWROFF_RCLK, 0);
+	if (ret)
+		return ret;
+
+	ret = regmap_write_bits(ctx->regmap, IT66121_INT_REG,
+				IT66121_INT_TX_CLK_OFF, 0);
+	if (ret)
+		return ret;
+
+	ret = regmap_write_bits(ctx->regmap, IT66121_AFE_DRV_REG,
+				IT66121_AFE_DRV_PWD, 0);
+	if (ret)
+		return ret;
+
+	ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG,
+				IT66121_AFE_XP_PWDI | IT66121_AFE_XP_PWDPLL, 0);
+	if (ret)
+		return ret;
+
+	ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
+				IT66121_AFE_IP_PWDPLL, 0);
+	if (ret)
+		return ret;
+
+	ret = regmap_write_bits(ctx->regmap, IT66121_AFE_DRV_REG,
+				IT66121_AFE_DRV_RST, 0);
+	if (ret)
+		return ret;
+
+	ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG,
+				IT66121_AFE_XP_RESETB, IT66121_AFE_XP_RESETB);
+	if (ret)
+		return ret;
+
+	ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
+				IT66121_AFE_IP_RESETB, IT66121_AFE_IP_RESETB);
+	if (ret)
+		return ret;
+
+	ret = regmap_write_bits(ctx->regmap, IT66121_SW_RST_REG,
+				IT66121_SW_RST_REF,
+				IT66121_SW_RST_REF);
+	if (ret)
+		return ret;
+
+	msleep(50);
+
+	ret = drm_connector_init(bridge->dev, &ctx->connector,
+				 &it66121_connector_funcs,
+				 DRM_MODE_CONNECTOR_HDMIA);
+	if (ret)
+		return ret;
+
+	ctx->connector.polled = DRM_CONNECTOR_POLL_HPD;
+	drm_connector_helper_add(&ctx->connector,
+				 &it66121_connector_helper_funcs);
+
+	ret = drm_connector_attach_encoder(&ctx->connector, bridge->encoder);
+	if (ret)
+		return ret;
+
+	ret = drm_connector_register(&ctx->connector);
+	if (ret)
+		return ret;
+
+	/* Start interrupts */
+	return regmap_write_bits(ctx->regmap, IT66121_INT_MASK1_REG,
+				 IT66121_INT_MASK1_DDC_NOACK |
+				 IT66121_INT_MASK1_HPD |
+				 IT66121_INT_MASK1_DDC_FIFOERR |
+				 IT66121_INT_MASK1_DDC_BUSHANG,
+				 ~(IT66121_INT_MASK1_DDC_NOACK |
+				 IT66121_INT_MASK1_HPD |
+				 IT66121_INT_MASK1_DDC_FIFOERR |
+				 IT66121_INT_MASK1_DDC_BUSHANG) & 0xFF);
+}
+
+static int it66121_set_mute(struct it66121_ctx *ctx, bool mute)
+{
+	int ret;
+	unsigned int val;
+
+	val = mute ? IT66121_AV_MUTE_ON : (~IT66121_AV_MUTE_ON & 0xFF);
+	ret = regmap_write_bits(ctx->regmap, IT66121_AV_MUTE_REG,
+				IT66121_AV_MUTE_ON, val);
+	if (ret)
+		return ret;
+
+	return regmap_write(ctx->regmap, IT66121_PKT_GEN_CTRL_REG,
+			    IT66121_PKT_GEN_CTRL_ON |
+			    IT66121_PKT_GEN_CTRL_RPT);
+}
+
+static void it66121_bridge_enable(struct drm_bridge *bridge)
+{
+	struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx,
+			bridge);
+
+	it66121_set_mute(ctx, false);
+}
+
+static void it66121_bridge_disable(struct drm_bridge *bridge)
+{
+	struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx,
+			bridge);
+
+	it66121_set_mute(ctx, true);
+}
+
+static
+void it66121_bridge_mode_set(struct drm_bridge *bridge,
+			     struct drm_display_mode *mode,
+			     struct drm_display_mode *adjusted_mode)
+{
+	int ret, i;
+	u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
+	struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx,
+			bridge);
+	const u16 aviinfo_reg[HDMI_AVI_INFOFRAME_SIZE] = {
+		IT66121_AVIINFO_DB1_REG,
+		IT66121_AVIINFO_DB2_REG,
+		IT66121_AVIINFO_DB3_REG,
+		IT66121_AVIINFO_DB4_REG,
+		IT66121_AVIINFO_DB5_REG,
+		IT66121_AVIINFO_DB6_REG,
+		IT66121_AVIINFO_DB7_REG,
+		IT66121_AVIINFO_DB8_REG,
+		IT66121_AVIINFO_DB9_REG,
+		IT66121_AVIINFO_DB10_REG,
+		IT66121_AVIINFO_DB11_REG,
+		IT66121_AVIINFO_DB12_REG,
+		IT66121_AVIINFO_DB13_REG
+	};
+
+	mutex_lock(&ctx->lock);
+
+	hdmi_avi_infoframe_init(&ctx->hdmi_avi_infoframe);
+
+	ret = drm_hdmi_avi_infoframe_from_display_mode(&ctx->hdmi_avi_infoframe,
+						       adjusted_mode, false);
+	if (ret) {
+		DRM_ERROR("Failed to setup AVI infoframe: %d\n", ret);
+		goto unlock;
+	}
+
+	ret = hdmi_avi_infoframe_pack(&ctx->hdmi_avi_infoframe, buf,
+				      sizeof(buf));
+	if (ret < 0) {
+		DRM_ERROR("Failed to pack infoframe: %d\n", ret);
+		goto unlock;
+	}
+
+	/* Write new AVI infoframe packet */
+	for (i = 0; i < HDMI_AVI_INFOFRAME_SIZE; i++) {
+		if (regmap_write(ctx->regmap, aviinfo_reg[i],
+				 buf[i + HDMI_INFOFRAME_HEADER_SIZE]))
+			goto unlock;
+	}
+	if (regmap_write(ctx->regmap, IT66121_AVIINFO_CSUM_REG, buf[3]))
+		goto unlock;
+
+	/* Enable AVI infoframe */
+	if (regmap_write(ctx->regmap, IT66121_AVI_INFO_PKT_REG,
+			 IT66121_AVI_INFO_PKT_ON |
+			 IT66121_AVI_INFO_PKT_RPT))
+		goto unlock;
+
+	/* Set TX mode to HDMI */
+	if (regmap_write(ctx->regmap, IT66121_HDMI_MODE_REG,
+			 IT66121_HDMI_MODE_HDMI))
+		goto unlock;
+
+	if (regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
+			      IT66121_CLK_BANK_PWROFF_TXCLK,
+			      IT66121_CLK_BANK_PWROFF_TXCLK))
+		goto unlock;
+
+	if (it66121_configure_input(ctx))
+		goto unlock;
+
+	if (it66121_configure_afe(ctx, adjusted_mode))
+		goto unlock;
+
+	regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
+			  IT66121_CLK_BANK_PWROFF_TXCLK,
+			  ~IT66121_CLK_BANK_PWROFF_TXCLK & 0xFF);
+
+unlock:
+	mutex_unlock(&ctx->lock);
+}
+
+static const struct drm_bridge_funcs it66121_bridge_funcs = {
+	.attach = it66121_bridge_attach,
+	.enable = it66121_bridge_enable,
+	.disable = it66121_bridge_disable,
+	.mode_set = it66121_bridge_mode_set,
+};
+
+static irqreturn_t it66121_irq_threaded_handler(int irq, void *dev_id)
+{
+	int ret;
+	unsigned int val;
+	struct it66121_ctx *ctx = dev_id;
+	struct device *dev = ctx->dev;
+	bool event = false;
+
+	mutex_lock(&ctx->lock);
+
+	ret = regmap_read(ctx->regmap, IT66121_SYS_STATUS_REG, &val);
+	if (ret)
+		goto unlock;
+
+	if (val & IT66121_SYS_STATUS_ACTIVE_IRQ) {
+		ret = regmap_read(ctx->regmap, IT66121_INT_STATUS1_REG, &val);
+		if (ret) {
+			dev_err(dev, "Cannot read STATUS1_REG %d\n", ret);
+		} else {
+			if (val & IT66121_INT_STATUS1_DDC_FIFOERR)
+				it66121_clear_ddc_fifo(ctx);
+			if (val & (IT66121_INT_STATUS1_DDC_BUSHANG |
+					IT66121_INT_STATUS1_DDC_NOACK))
+				it66121_abort_ddc_ops(ctx);
+			if (val & IT66121_INT_STATUS1_HPD_STATUS) {
+				regmap_write_bits(ctx->regmap,
+						  IT66121_INT_CLR1_REG,
+						  IT66121_INT_CLR1_HPD,
+						  IT66121_INT_CLR1_HPD);
+
+				if (!it66121_is_hpd_detect(ctx)) {
+					kfree(ctx->edid);
+					ctx->edid = NULL;
+				}
+				event = true;
+			}
+		}
+
+		regmap_write_bits(ctx->regmap, IT66121_SYS_STATUS_REG,
+				  IT66121_SYS_STATUS_CLEAR_IRQ,
+				  IT66121_SYS_STATUS_CLEAR_IRQ);
+	}
+
+unlock:
+	mutex_unlock(&ctx->lock);
+
+	if (event)
+		drm_helper_hpd_irq_event(ctx->bridge.dev);
+
+	return IRQ_HANDLED;
+}
+
+static int it66121_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	u8 ids[4];
+	int i, ret;
+	struct it66121_ctx *ctx;
+	struct device *dev = &client->dev;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(dev, "I2C check functionality failed.\n");
+		return -ENXIO;
+	}
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->dev = dev;
+	ctx->client = client;
+	i2c_set_clientdata(client, ctx);
+	mutex_init(&ctx->lock);
+	ctx->conf = (struct it66121_conf *)of_device_get_match_data(dev);
+	if (!ctx->conf)
+		return -ENODEV;
+
+	ctx->supplies[0].supply = "vcn33";
+	ctx->supplies[1].supply = "vcn18";
+	ctx->supplies[2].supply = "vrf12";
+	ret = devm_regulator_bulk_get(ctx->dev, 3, ctx->supplies);
+	if (ret) {
+		dev_err(ctx->dev, "regulator_bulk failed\n");
+		return ret;
+	}
+
+	ctx->dual_edge = of_property_read_bool(dev->of_node, "pclk-dual-edge");
+
+	ret = ite66121_power_on(ctx);
+	if (ret)
+		return ret;
+
+	it66121_hw_reset(ctx);
+
+	ctx->regmap = devm_regmap_init_i2c(client, &it66121_regmap_config);
+	if (IS_ERR(ctx->regmap)) {
+		ite66121_power_off(ctx);
+		return PTR_ERR(ctx);
+	}
+
+	for (i = 0; i < 4; i++) {
+		regmap_read(ctx->regmap, i, &ret);
+		ids[i] = ret;
+	}
+
+	if (ids[0] != IT66121_VENDOR_ID0 ||
+	    ids[1] != IT66121_VENDOR_ID1 ||
+	    ids[2] != IT66121_DEVICE_ID0 ||
+	    ((ids[3] & IT66121_DEVICE_MASK) != IT66121_DEVICE_ID1)) {
+		ite66121_power_off(ctx);
+		return -ENODEV;
+	}
+
+	ctx->bridge.funcs = &it66121_bridge_funcs;
+	ctx->bridge.of_node = dev->of_node;
+
+	ret = devm_request_threaded_irq(dev, client->irq, NULL,
+					it66121_irq_threaded_handler,
+					IRQF_SHARED | IRQF_TRIGGER_LOW |
+					IRQF_ONESHOT,
+					dev_name(dev),
+					ctx);
+	if (ret < 0) {
+		dev_err(dev, "Failed to request irq %d:%d\n", client->irq, ret);
+		ite66121_power_off(ctx);
+		return ret;
+	}
+
+	drm_bridge_add(&ctx->bridge);
+
+	return 0;
+}
+
+static int it66121_remove(struct i2c_client *client)
+{
+	struct it66121_ctx *ctx = i2c_get_clientdata(client);
+
+	ite66121_power_off(ctx);
+	drm_bridge_remove(&ctx->bridge);
+	kfree(ctx->edid);
+	mutex_destroy(&ctx->lock);
+
+	return 0;
+}
+
+static const struct of_device_id it66121_dt_match[] = {
+	{ .compatible = "ite,it66121",
+	  .data = &it66121_conf_simple,
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, it66121_dt_match);
+
+static const struct i2c_device_id it66121_id[] = {
+	{ "it66121", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, it66121_id);
+
+static struct i2c_driver it66121_driver = {
+	.driver = {
+		.name	= "it66121",
+		.of_match_table = it66121_dt_match,
+	},
+	.probe = it66121_probe,
+	.remove = it66121_remove,
+	.id_table = it66121_id,
+};
+
+module_i2c_driver(it66121_driver);
+
+MODULE_AUTHOR("Phong LE");
+MODULE_DESCRIPTION("IT66121 HDMI transmitter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index ce83c39..6f4a082 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
+
 mediatek-drm-y := mtk_disp_color.o \
 		  mtk_disp_ovl.o \
 		  mtk_disp_rdma.o \
@@ -11,6 +12,8 @@
 		  mtk_drm_plane.o \
 		  mtk_dsi.o \
 		  mtk_mipi_tx.o \
+		  mtk_mt8173_mipi_tx.o \
+		  mtk_mt8183_mipi_tx.o \
 		  mtk_dpi.o
 
 obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
@@ -18,6 +21,9 @@
 mediatek-drm-hdmi-objs := mtk_cec.o \
 			  mtk_hdmi.o \
 			  mtk_hdmi_ddc.o \
-			  mtk_mt8173_hdmi_phy.o
+                          mtk_mt2701_hdmi_phy.o \
+			  mtk_mt8167_hdmi_phy.o \
+			  mtk_mt8173_hdmi_phy.o \
+			  mtk_hdmi_phy.o
 
 obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mediatek-drm-hdmi.o
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index f609b62..b5f68e8 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -23,6 +23,7 @@
 
 #define DISP_COLOR_CFG_MAIN			0x0400
 #define DISP_COLOR_START_MT2701			0x0f00
+#define DISP_COLOR_START_MT8167			0x0400
 #define DISP_COLOR_START_MT8173			0x0c00
 #define DISP_COLOR_START(comp)			((comp)->data->color_offset)
 #define DISP_COLOR_WIDTH(comp)			(DISP_COLOR_START(comp) + 0x50)
@@ -152,6 +153,10 @@
 	.color_offset = DISP_COLOR_START_MT2701,
 };
 
+static const struct mtk_disp_color_data mt8167_color_driver_data = {
+	.color_offset = DISP_COLOR_START_MT8167,
+};
+
 static const struct mtk_disp_color_data mt8173_color_driver_data = {
 	.color_offset = DISP_COLOR_START_MT8173,
 };
@@ -159,6 +164,8 @@
 static const struct of_device_id mtk_disp_color_driver_dt_match[] = {
 	{ .compatible = "mediatek,mt2701-disp-color",
 	  .data = &mt2701_color_driver_data},
+	{ .compatible = "mediatek,mt8167-disp-color",
+	  .data = &mt8167_color_driver_data},
 	{ .compatible = "mediatek,mt8173-disp-color",
 	  .data = &mt8173_color_driver_data},
 	{},
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 28d1911..6cf019c 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -27,6 +27,8 @@
 #define DISP_REG_OVL_EN				0x000c
 #define DISP_REG_OVL_RST			0x0014
 #define DISP_REG_OVL_ROI_SIZE			0x0020
+#define DISP_REG_OVL_DATAPATH_CON		0x0024
+#define OVL_BGCLR_SEL_IN				BIT(2)
 #define DISP_REG_OVL_ROI_BGCLR			0x0028
 #define DISP_REG_OVL_SRC_CON			0x002c
 #define DISP_REG_OVL_CON(n)			(0x0030 + 0x20 * (n))
@@ -36,10 +38,13 @@
 #define DISP_REG_OVL_RDMA_CTRL(n)		(0x00c0 + 0x20 * (n))
 #define DISP_REG_OVL_RDMA_GMC(n)		(0x00c8 + 0x20 * (n))
 #define DISP_REG_OVL_ADDR_MT2701		0x0040
+#define DISP_REG_OVL_ADDR_MT8167		0x0f40
 #define DISP_REG_OVL_ADDR_MT8173		0x0f40
 #define DISP_REG_OVL_ADDR(ovl, n)		((ovl)->data->addr + 0x20 * (n))
 
-#define	OVL_RDMA_MEM_GMC	0x40402020
+#define GMC_THRESHOLD_BITS	16
+#define GMC_THRESHOLD_HIGH	((1 << GMC_THRESHOLD_BITS) / 4)
+#define GMC_THRESHOLD_LOW	((1 << GMC_THRESHOLD_BITS) / 8)
 
 #define OVL_CON_BYTE_SWAP	BIT(24)
 #define OVL_CON_MTX_YUV_TO_RGB	(6 << 16)
@@ -57,6 +62,8 @@
 
 struct mtk_disp_ovl_data {
 	unsigned int addr;
+	unsigned int gmc_bits;
+	unsigned int layer_nr;
 	bool fmt_rgb565_is_0;
 };
 
@@ -134,15 +141,31 @@
 
 static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
 {
-	return 4;
+	struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+
+	return ovl->data->layer_nr;
 }
 
 static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
 {
 	unsigned int reg;
+	unsigned int gmc_thrshd_l;
+	unsigned int gmc_thrshd_h;
+	unsigned int gmc_value;
+	struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
 
 	writel(0x1, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
-	writel(OVL_RDMA_MEM_GMC, comp->regs + DISP_REG_OVL_RDMA_GMC(idx));
+
+	gmc_thrshd_l = GMC_THRESHOLD_LOW >>
+		      (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
+	gmc_thrshd_h = GMC_THRESHOLD_HIGH >>
+		      (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
+	if (ovl->data->gmc_bits == 10)
+		gmc_value = gmc_thrshd_h | gmc_thrshd_h << 16;
+	else
+		gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 |
+			    gmc_thrshd_h << 16 | gmc_thrshd_h << 24;
+	writel(gmc_value, comp->regs + DISP_REG_OVL_RDMA_GMC(idx));
 
 	reg = readl(comp->regs + DISP_REG_OVL_SRC_CON);
 	reg = reg | BIT(idx);
@@ -225,6 +248,24 @@
 		mtk_ovl_layer_on(comp, idx);
 }
 
+static void mtk_ovl_bgclr_in_on(struct mtk_ddp_comp *comp)
+{
+	unsigned int reg;
+
+	reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON);
+	reg = reg | OVL_BGCLR_SEL_IN;
+	writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON);
+}
+
+static void mtk_ovl_bgclr_in_off(struct mtk_ddp_comp *comp)
+{
+	unsigned int reg;
+
+	reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON);
+	reg = reg & ~OVL_BGCLR_SEL_IN;
+	writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON);
+}
+
 static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
 	.config = mtk_ovl_config,
 	.start = mtk_ovl_start,
@@ -235,6 +276,8 @@
 	.layer_on = mtk_ovl_layer_on,
 	.layer_off = mtk_ovl_layer_off,
 	.layer_config = mtk_ovl_layer_config,
+	.bgclr_in_on = mtk_ovl_bgclr_in_on,
+	.bgclr_in_off = mtk_ovl_bgclr_in_off,
 };
 
 static int mtk_disp_ovl_bind(struct device *dev, struct device *master,
@@ -284,7 +327,12 @@
 	if (irq < 0)
 		return irq;
 
-	comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL);
+	priv->data = of_device_get_match_data(dev);
+
+	comp_id = mtk_ddp_comp_get_id(dev->of_node,
+				      priv->data->layer_nr == 4 ?
+				      MTK_DISP_OVL :
+				      MTK_DISP_OVL_2L);
 	if (comp_id < 0) {
 		dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
 		return comp_id;
@@ -297,8 +345,6 @@
 		return ret;
 	}
 
-	priv->data = of_device_get_match_data(dev);
-
 	platform_set_drvdata(pdev, priv);
 
 	ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
@@ -324,19 +370,50 @@
 
 static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = {
 	.addr = DISP_REG_OVL_ADDR_MT2701,
+	.gmc_bits = 8,
+	.layer_nr = 4,
 	.fmt_rgb565_is_0 = false,
 };
 
+static const struct mtk_disp_ovl_data mt8167_ovl_driver_data = {
+	.addr = DISP_REG_OVL_ADDR_MT8167,
+	.gmc_bits = 8,
+	.layer_nr = 4,
+	.fmt_rgb565_is_0 = true,
+};
+
 static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = {
 	.addr = DISP_REG_OVL_ADDR_MT8173,
+	.gmc_bits = 8,
+	.layer_nr = 4,
+	.fmt_rgb565_is_0 = true,
+};
+
+static const struct mtk_disp_ovl_data mt8183_ovl_driver_data = {
+	.addr = DISP_REG_OVL_ADDR_MT8173,
+	.gmc_bits = 10,
+	.layer_nr = 4,
+	.fmt_rgb565_is_0 = true,
+};
+
+static const struct mtk_disp_ovl_data mt8183_ovl_2l_driver_data = {
+	.addr = DISP_REG_OVL_ADDR_MT8173,
+	.gmc_bits = 10,
+	.layer_nr = 2,
 	.fmt_rgb565_is_0 = true,
 };
 
 static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = {
 	{ .compatible = "mediatek,mt2701-disp-ovl",
 	  .data = &mt2701_ovl_driver_data},
+	{ .compatible = "mediatek,mt8167-disp-ovl",
+	  .data = &mt8167_ovl_driver_data},
 	{ .compatible = "mediatek,mt8173-disp-ovl",
 	  .data = &mt8173_ovl_driver_data},
+	{ .compatible = "mediatek,mt8183-disp-ovl",
+	  .data = &mt8183_ovl_driver_data},
+	{ .compatible = "mediatek,mt8183-disp-ovl-2l",
+	  .data = &mt8183_ovl_2l_driver_data},
 	{},
 };
 MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index b0a5cff..5d62588 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -336,11 +336,23 @@
 	.fifo_size = SZ_8K,
 };
 
+static const struct mtk_disp_rdma_data mt8183_rdma_driver_data = {
+	.fifo_size = 5 * SZ_1K,
+};
+
+static const struct mtk_disp_rdma_data mt8183_rdma1_driver_data = {
+	.fifo_size = SZ_2K,
+};
+
 static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = {
 	{ .compatible = "mediatek,mt2701-disp-rdma",
 	  .data = &mt2701_rdma_driver_data},
 	{ .compatible = "mediatek,mt8173-disp-rdma",
 	  .data = &mt8173_rdma_driver_data},
+	{ .compatible = "mediatek,mt8183-disp-rdma",
+	  .data = &mt8183_rdma_driver_data},
+	{ .compatible = "mediatek,mt8183-disp-rdma1",
+	  .data = &mt8183_rdma1_driver_data},
 	{},
 };
 MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 6c0ea39..6fa280a 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -14,11 +14,15 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
 #include <linux/kernel.h>
 #include <linux/component.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
 #include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
 #include <linux/interrupt.h>
 #include <linux/types.h>
 #include <linux/clk.h>
@@ -70,14 +74,24 @@
 	struct clk *engine_clk;
 	struct clk *pixel_clk;
 	struct clk *tvd_clk;
+	struct clk *dpi_sel;
+	struct clk *tvd_d2;
+	struct clk *tvd_d4;
+	struct clk *tvd_d8;
+	struct clk *tvd_d16;
 	int irq;
 	struct drm_display_mode mode;
+	const struct mtk_dpi_conf *conf;
 	enum mtk_dpi_out_color_format color_format;
 	enum mtk_dpi_out_yc_map yc_map;
 	enum mtk_dpi_out_bit_num bit_num;
 	enum mtk_dpi_out_channel_swap channel_swap;
-	bool power_sta;
-	u8 power_ctl;
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *pins_gpio;
+	struct pinctrl_state *pins_dpi;
+	int refcount;
+	bool dual_edge;
+	bool dpi_pin_ctrl;
 };
 
 static inline struct mtk_dpi *mtk_dpi_from_encoder(struct drm_encoder *e)
@@ -90,11 +104,6 @@
 	MTK_DPI_POLARITY_FALLING,
 };
 
-enum mtk_dpi_power_ctl {
-	DPI_POWER_START = BIT(0),
-	DPI_POWER_ENABLE = BIT(1),
-};
-
 struct mtk_dpi_polarities {
 	enum mtk_dpi_polarity de_pol;
 	enum mtk_dpi_polarity ck_pol;
@@ -116,6 +125,19 @@
 	u16 c_bottom;
 };
 
+enum mtk_dpi_chip {
+	MTK_DPI_MT2701,
+	MTK_DPI_MT8167,
+	MTK_DPI_MT8173,
+};
+
+struct mtk_dpi_conf {
+	unsigned int (*cal_factor)(int clock);
+	u32 reg_h_fre_con;
+	bool edge_sel_en;
+	enum mtk_dpi_chip chip;
+};
+
 static void mtk_dpi_mask(struct mtk_dpi *dpi, u32 offset, u32 val, u32 mask)
 {
 	u32 tmp = readl(dpi->regs + offset) & ~mask;
@@ -341,7 +363,20 @@
 
 static void mtk_dpi_config_2n_h_fre(struct mtk_dpi *dpi)
 {
-	mtk_dpi_mask(dpi, DPI_H_FRE_CON, H_FRE_2N, H_FRE_2N);
+	mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N);
+}
+
+static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi)
+{
+	if (dpi->conf->edge_sel_en)
+		mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, 0, EDGE_SEL_EN);
+}
+
+static void mtk_dpi_enable_dual_edge(struct mtk_dpi *dpi)
+{
+	mtk_dpi_mask(dpi, DPI_DDR_SETTING, DDR_EN | DDR_4PHASE,
+		     DDR_EN | DDR_4PHASE);
+	mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, EDGE_SEL, EDGE_SEL);
 }
 
 static void mtk_dpi_config_color_format(struct mtk_dpi *dpi,
@@ -367,40 +402,33 @@
 	}
 }
 
-static void mtk_dpi_power_off(struct mtk_dpi *dpi, enum mtk_dpi_power_ctl pctl)
+static void mtk_dpi_power_off(struct mtk_dpi *dpi)
 {
-	dpi->power_ctl &= ~pctl;
-
-	if ((dpi->power_ctl & DPI_POWER_START) ||
-	    (dpi->power_ctl & DPI_POWER_ENABLE))
+	if (WARN_ON(dpi->refcount == 0))
 		return;
 
-	if (!dpi->power_sta)
+	if (--dpi->refcount != 0)
 		return;
 
+	if (dpi->dpi_pin_ctrl)
+		pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
+
 	mtk_dpi_disable(dpi);
 	clk_disable_unprepare(dpi->pixel_clk);
 	clk_disable_unprepare(dpi->engine_clk);
-	dpi->power_sta = false;
 }
 
-static int mtk_dpi_power_on(struct mtk_dpi *dpi, enum mtk_dpi_power_ctl pctl)
+static int mtk_dpi_power_on(struct mtk_dpi *dpi)
 {
 	int ret;
 
-	dpi->power_ctl |= pctl;
-
-	if (!(dpi->power_ctl & DPI_POWER_START) &&
-	    !(dpi->power_ctl & DPI_POWER_ENABLE))
-		return 0;
-
-	if (dpi->power_sta)
+	if (++dpi->refcount != 1)
 		return 0;
 
 	ret = clk_prepare_enable(dpi->engine_clk);
 	if (ret) {
 		dev_err(dpi->dev, "Failed to enable engine clock: %d\n", ret);
-		goto err_eng;
+		goto err_refcount;
 	}
 
 	ret = clk_prepare_enable(dpi->pixel_clk);
@@ -409,14 +437,16 @@
 		goto err_pixel;
 	}
 
+	if (dpi->dpi_pin_ctrl)
+		pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi);
+
 	mtk_dpi_enable(dpi);
-	dpi->power_sta = true;
 	return 0;
 
 err_pixel:
 	clk_disable_unprepare(dpi->engine_clk);
-err_eng:
-	dpi->power_ctl &= ~pctl;
+err_refcount:
+	dpi->refcount--;
 	return ret;
 }
 
@@ -433,17 +463,10 @@
 	struct videomode vm = { 0 };
 	unsigned long pll_rate;
 	unsigned int factor;
+	int ret;
 
 	/* let pll_rate can fix the valid range of tvdpll (1G~2GHz) */
-
-	if (mode->clock <= 27000)
-		factor = 3 << 4;
-	else if (mode->clock <= 84000)
-		factor = 3 << 3;
-	else if (mode->clock <= 167000)
-		factor = 3 << 2;
-	else
-		factor = 3 << 1;
+	factor = dpi->conf->cal_factor(mode->clock);
 	drm_display_mode_to_videomode(mode, &vm);
 	pll_rate = vm.pixelclock * factor;
 
@@ -454,9 +477,37 @@
 	pll_rate = clk_get_rate(dpi->tvd_clk);
 
 	vm.pixelclock = pll_rate / factor;
-	clk_set_rate(dpi->pixel_clk, vm.pixelclock);
+	if (dpi->conf->chip != MTK_DPI_MT8167)
+		clk_set_rate(dpi->pixel_clk,
+			     vm.pixelclock * (dpi->dual_edge ? 2 : 1));
 	vm.pixelclock = clk_get_rate(dpi->pixel_clk);
 
+	if (dpi->conf->chip == MTK_DPI_MT8167) {
+		switch (factor) {
+			case 16:
+				ret = clk_set_parent(dpi->dpi_sel, dpi->tvd_d16);
+			break;
+
+			case 8:
+				ret = clk_set_parent(dpi->dpi_sel, dpi->tvd_d8);
+			break;
+
+			case 4:
+				ret = clk_set_parent(dpi->dpi_sel, dpi->tvd_d4);
+			break;
+
+			case 2:
+			default:
+				ret = clk_set_parent(dpi->dpi_sel, dpi->tvd_d2);
+			break;
+		}
+
+		if (ret < 0) {
+			dev_err(dpi->dev, "failed to clk_set_parent (%d)\n", ret);
+			return ret;
+		}
+	}
+
 	dev_dbg(dpi->dev, "Got  PLL %lu Hz, pixel clock %lu Hz\n",
 		pll_rate, vm.pixelclock);
 
@@ -518,6 +569,9 @@
 	mtk_dpi_config_yc_map(dpi, dpi->yc_map);
 	mtk_dpi_config_color_format(dpi, dpi->color_format);
 	mtk_dpi_config_2n_h_fre(dpi);
+	mtk_dpi_config_disable_edge(dpi);
+	if (dpi->dual_edge)
+		mtk_dpi_enable_dual_edge(dpi);
 	mtk_dpi_sw_reset(dpi, false);
 
 	return 0;
@@ -552,14 +606,14 @@
 {
 	struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
 
-	mtk_dpi_power_off(dpi, DPI_POWER_ENABLE);
+	mtk_dpi_power_off(dpi);
 }
 
 static void mtk_dpi_encoder_enable(struct drm_encoder *encoder)
 {
 	struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
 
-	mtk_dpi_power_on(dpi, DPI_POWER_ENABLE);
+	mtk_dpi_power_on(dpi);
 	mtk_dpi_set_display_mode(dpi, &dpi->mode);
 }
 
@@ -582,14 +636,14 @@
 {
 	struct mtk_dpi *dpi = container_of(comp, struct mtk_dpi, ddp_comp);
 
-	mtk_dpi_power_on(dpi, DPI_POWER_START);
+	mtk_dpi_power_on(dpi);
 }
 
 static void mtk_dpi_stop(struct mtk_ddp_comp *comp)
 {
 	struct mtk_dpi *dpi = container_of(comp, struct mtk_dpi, ddp_comp);
 
-	mtk_dpi_power_off(dpi, DPI_POWER_START);
+	mtk_dpi_power_off(dpi);
 }
 
 static const struct mtk_ddp_comp_funcs mtk_dpi_funcs = {
@@ -620,6 +674,8 @@
 
 	/* Currently DPI0 is fixed to be driven by OVL1 */
 	dpi->encoder.possible_crtcs = BIT(1);
+	if (dpi->conf->chip == MTK_DPI_MT8167)
+		dpi->encoder.possible_crtcs |= BIT(0);
 
 	ret = drm_bridge_attach(&dpi->encoder, dpi->bridge, NULL);
 	if (ret) {
@@ -656,12 +712,78 @@
 	.unbind = mtk_dpi_unbind,
 };
 
+static unsigned int mt8167_calculate_factor(int clock)
+{
+	if (clock <= 64000)
+		return 16;
+	else if (clock <= 74250)
+		return 8;
+	else if (clock <= 160000)
+		return 4;
+	else
+		return 2;
+}
+
+static unsigned int mt8173_calculate_factor(int clock)
+{
+	if (clock <= 27000)
+		return 3 << 4;
+	else if (clock <= 84000)
+		return 3 << 3;
+	else if (clock <= 167000)
+		return 3 << 2;
+	else
+		return 3 << 1;
+}
+
+static unsigned int mt2701_calculate_factor(int clock)
+{
+	if (clock <= 64000)
+		return 4;
+	else if (clock <= 128000)
+		return 2;
+	else
+		return 1;
+}
+
+static unsigned int mt8183_calculate_factor(int clock)
+{
+	if (clock <= 27000)
+		return 8;
+	else if (clock <= 167000)
+		return 4;
+	else
+		return 2;
+}
+
+static const struct mtk_dpi_conf mt8167_conf = {
+	.cal_factor = mt8167_calculate_factor,
+	.reg_h_fre_con = 0xe0,
+	.chip = MTK_DPI_MT8167,
+};
+
+static const struct mtk_dpi_conf mt8173_conf = {
+	.cal_factor = mt8173_calculate_factor,
+	.reg_h_fre_con = 0xe0,
+	.chip = MTK_DPI_MT8173,
+};
+
+static const struct mtk_dpi_conf mt2701_conf = {
+	.cal_factor = mt2701_calculate_factor,
+	.reg_h_fre_con = 0xb0,
+	.edge_sel_en = true,
+};
+
+static const struct mtk_dpi_conf mt8183_conf = {
+	.cal_factor = mt8183_calculate_factor,
+	.reg_h_fre_con = 0xe0,
+};
+
 static int mtk_dpi_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct mtk_dpi *dpi;
 	struct resource *mem;
-	struct device_node *bridge_node;
 	int comp_id;
 	int ret;
 
@@ -670,6 +792,33 @@
 		return -ENOMEM;
 
 	dpi->dev = dev;
+	dpi->conf = (struct mtk_dpi_conf *)of_device_get_match_data(dev);
+	dpi->dual_edge = of_property_read_bool(dev->of_node, "dpi_dual_edge");
+	dpi->dpi_pin_ctrl = of_property_read_bool(dev->of_node,
+						  "dpi_pin_mode_swap");
+
+	if (dpi->dpi_pin_ctrl) {
+		dpi->pinctrl = devm_pinctrl_get(&pdev->dev);
+		if (IS_ERR(dpi->pinctrl)) {
+			dev_err(&pdev->dev, "Cannot find pinctrl!\n");
+			return PTR_ERR(dpi->pinctrl);
+		}
+
+		dpi->pins_gpio = pinctrl_lookup_state(dpi->pinctrl,
+						      "gpiomode");
+		if (IS_ERR(dpi->pins_gpio)) {
+			dev_err(&pdev->dev, "Cannot find pinctrl gpiomode!\n");
+			return PTR_ERR(dpi->pins_gpio);
+		}
+
+		pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
+
+		dpi->pins_dpi = pinctrl_lookup_state(dpi->pinctrl, "dpimode");
+		if (IS_ERR(dpi->pins_dpi)) {
+			dev_err(&pdev->dev, "Cannot find pinctrl dpimode!\n");
+			return PTR_ERR(dpi->pins_dpi);
+		}
+	}
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	dpi->regs = devm_ioremap_resource(dev, mem);
@@ -700,22 +849,55 @@
 		return ret;
 	}
 
+	if (dpi->conf->chip == MTK_DPI_MT8167) {
+		dpi->dpi_sel = devm_clk_get(dev, "dpi_sel");
+		if (IS_ERR(dpi->dpi_sel)) {
+			ret = PTR_ERR(dpi->tvd_d2);
+			dev_err(dev, "Failed to get dpi_sel clock: %d\n", ret);
+			return ret;
+		}
+
+		dpi->tvd_d2 = devm_clk_get(dev, "tvd_d2");
+		if (IS_ERR(dpi->tvd_d2)) {
+			ret = PTR_ERR(dpi->tvd_d2);
+			dev_err(dev, "Failed to get tvd_d2 clock: %d\n", ret);
+		return ret;
+		}
+
+		dpi->tvd_d4 = devm_clk_get(dev, "tvd_d4");
+		if (IS_ERR(dpi->tvd_d4)) {
+			ret = PTR_ERR(dpi->tvd_d4);
+			dev_err(dev, "Failed to get tvd_d4 clock: %d\n", ret);
+			return ret;
+		}
+
+		dpi->tvd_d8 = devm_clk_get(dev, "tvd_d8");
+		if (IS_ERR(dpi->tvd_d8)) {
+			ret = PTR_ERR(dpi->tvd_d8);
+			dev_err(dev, "Failed to get tvd_d8 clock: %d\n", ret);
+			return ret;
+		}
+
+		dpi->tvd_d16 = devm_clk_get(dev, "tvd_d16");
+		if (IS_ERR(dpi->tvd_d16)) {
+			ret = PTR_ERR(dpi->tvd_clk);
+			dev_err(dev, "Failed to get tvd_d16 clock: %d\n", ret);
+			return ret;
+		}
+	}
+
 	dpi->irq = platform_get_irq(pdev, 0);
 	if (dpi->irq <= 0) {
 		dev_err(dev, "Failed to get irq: %d\n", dpi->irq);
 		return -EINVAL;
 	}
 
-	bridge_node = of_graph_get_remote_node(dev->of_node, 0, 0);
-	if (!bridge_node)
-		return -ENODEV;
+	ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
+					  NULL, &dpi->bridge);
+	if (ret)
+		return ret;
 
-	dev_info(dev, "Found bridge node: %pOF\n", bridge_node);
-
-	dpi->bridge = of_drm_find_bridge(bridge_node);
-	of_node_put(bridge_node);
-	if (!dpi->bridge)
-		return -EPROBE_DEFER;
+	dev_info(dev, "Found bridge node: %pOF\n", dpi->bridge->of_node);
 
 	comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DPI);
 	if (comp_id < 0) {
@@ -749,8 +931,19 @@
 }
 
 static const struct of_device_id mtk_dpi_of_ids[] = {
-	{ .compatible = "mediatek,mt8173-dpi", },
-	{}
+	{ .compatible = "mediatek,mt2701-dpi",
+	  .data = &mt2701_conf,
+	},
+	{ .compatible = "mediatek,mt8167-dpi",
+	  .data = &mt8167_conf,
+	},
+	{ .compatible = "mediatek,mt8173-dpi",
+	  .data = &mt8173_conf,
+	},
+	{ .compatible = "mediatek,mt8183-dpi",
+	  .data = &mt8183_conf,
+	},
+	{ },
 };
 
 struct platform_driver mtk_dpi_driver = {
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi_regs.h b/drivers/gpu/drm/mediatek/mtk_dpi_regs.h
index 4b6ad47..d9db8c4 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi_regs.h
+++ b/drivers/gpu/drm/mediatek/mtk_dpi_regs.h
@@ -223,6 +223,6 @@
 #define ESAV_CODE2			(0xFFF << 0)
 #define ESAV_CODE3_MSB			BIT(16)
 
-#define DPI_H_FRE_CON		0xE0
+#define EDGE_SEL_EN			BIT(5)
 #define H_FRE_2N			BIT(25)
 #endif /* __MTK_DPI_REGS_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index eac9caf..98bc695 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -50,6 +50,7 @@
 	bool				pending_planes;
 
 	void __iomem			*config_regs;
+	const struct mtk_mmsys_reg_data *mmsys_reg_data;
 	struct mtk_disp_mutex		*mutex;
 	unsigned int			ddp_comp_nr;
 	struct mtk_ddp_comp		**ddp_comp;
@@ -271,6 +272,7 @@
 	DRM_DEBUG_DRIVER("mediatek_ddp_ddp_path_setup\n");
 	for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
 		mtk_ddp_add_comp_to_path(mtk_crtc->config_regs,
+					 mtk_crtc->mmsys_reg_data,
 					 mtk_crtc->ddp_comp[i]->id,
 					 mtk_crtc->ddp_comp[i + 1]->id);
 		mtk_disp_mutex_add_comp(mtk_crtc->mutex,
@@ -281,6 +283,15 @@
 
 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
 		struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
+		enum mtk_ddp_comp_id prev;
+
+		if (i > 0)
+			prev = mtk_crtc->ddp_comp[i - 1]->id;
+		else
+			prev = DDP_COMPONENT_ID_MAX;
+
+		if (prev == DDP_COMPONENT_OVL0)
+			mtk_ddp_comp_bgclr_in_on(comp);
 
 		mtk_ddp_comp_config(comp, width, height, vrefresh, bpc);
 		mtk_ddp_comp_start(comp);
@@ -290,9 +301,18 @@
 	for (i = 0; i < mtk_crtc->layer_nr; i++) {
 		struct drm_plane *plane = &mtk_crtc->planes[i];
 		struct mtk_plane_state *plane_state;
+		struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
+		unsigned int comp_layer_nr = mtk_ddp_comp_layer_nr(comp);
+		unsigned int local_layer;
 
 		plane_state = to_mtk_plane_state(plane->state);
-		mtk_ddp_comp_layer_config(mtk_crtc->ddp_comp[0], i,
+
+		if (i >= comp_layer_nr) {
+			comp = mtk_crtc->ddp_comp[1];
+			local_layer = i - comp_layer_nr;
+		} else
+			local_layer = i;
+		mtk_ddp_comp_layer_config(comp , local_layer,
 					  plane_state);
 	}
 
@@ -319,7 +339,9 @@
 					   mtk_crtc->ddp_comp[i]->id);
 	mtk_disp_mutex_disable(mtk_crtc->mutex);
 	for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
+		mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]);
 		mtk_ddp_remove_comp_from_path(mtk_crtc->config_regs,
+					      mtk_crtc->mmsys_reg_data,
 					      mtk_crtc->ddp_comp[i]->id,
 					      mtk_crtc->ddp_comp[i + 1]->id);
 		mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
@@ -345,6 +367,8 @@
 	struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
 	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
 	unsigned int i;
+	unsigned int comp_layer_nr = mtk_ddp_comp_layer_nr(comp);
+	unsigned int local_layer;
 
 	/*
 	 * TODO: instead of updating the registers here, we should prepare
@@ -367,7 +391,14 @@
 			plane_state = to_mtk_plane_state(plane->state);
 
 			if (plane_state->pending.config) {
-				mtk_ddp_comp_layer_config(comp, i, plane_state);
+				if (i >= comp_layer_nr) {
+					comp = mtk_crtc->ddp_comp[1];
+					local_layer = i - comp_layer_nr;
+				} else
+					local_layer = i;
+
+				mtk_ddp_comp_layer_config(comp, local_layer,
+							  plane_state);
 				plane_state->pending.config = false;
 			}
 		}
@@ -577,6 +608,7 @@
 		return -ENOMEM;
 
 	mtk_crtc->config_regs = priv->config_regs;
+	mtk_crtc->mmsys_reg_data = priv->data->reg_data;
 	mtk_crtc->ddp_comp_nr = path_len;
 	mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr,
 						sizeof(*mtk_crtc->ddp_comp),
@@ -616,6 +648,12 @@
 	}
 
 	mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
+	if (mtk_crtc->ddp_comp_nr > 1) {
+		struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[1];
+
+		if (comp->funcs->bgclr_in_on)
+			mtk_crtc->layer_nr += mtk_ddp_comp_layer_nr(comp);
+	}
 	mtk_crtc->planes = devm_kcalloc(dev, mtk_crtc->layer_nr,
 					sizeof(struct drm_plane),
 					GFP_KERNEL);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 546b3e3..c625d60 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -21,11 +21,13 @@
 #include "mtk_drm_ddp.h"
 #include "mtk_drm_ddp_comp.h"
 
+#define DISP_REG_CONFIG_DISP_DITHER_MOUT_EN	0x038
 #define DISP_REG_CONFIG_DISP_OVL0_MOUT_EN	0x040
 #define DISP_REG_CONFIG_DISP_OVL1_MOUT_EN	0x044
 #define DISP_REG_CONFIG_DISP_OD_MOUT_EN		0x048
 #define DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN	0x04c
 #define DISP_REG_CONFIG_DISP_UFOE_MOUT_EN	0x050
+#define DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN	0x06c
 #define DISP_REG_CONFIG_DISP_COLOR0_SEL_IN	0x084
 #define DISP_REG_CONFIG_DISP_COLOR1_SEL_IN	0x088
 #define DISP_REG_CONFIG_DSIE_SEL_IN		0x0a4
@@ -39,16 +41,68 @@
 #define DISP_REG_CONFIG_DISP_OVL_MOUT_EN	0x030
 #define DISP_REG_CONFIG_OUT_SEL			0x04c
 #define DISP_REG_CONFIG_DSI_SEL			0x050
+#define DISP_REG_CONFIG_DPI_SEL			0x064
 
-#define DISP_REG_MUTEX_EN(n)	(0x20 + 0x20 * (n))
-#define DISP_REG_MUTEX(n)	(0x24 + 0x20 * (n))
-#define DISP_REG_MUTEX_RST(n)	(0x28 + 0x20 * (n))
-#define DISP_REG_MUTEX_MOD(n)	(0x2c + 0x20 * (n))
-#define DISP_REG_MUTEX_SOF(n)	(0x30 + 0x20 * (n))
-#define DISP_REG_MUTEX_MOD2(n)	(0x34 + 0x20 * (n))
+#define MT8183_DISP_OVL0_MOUT_EN		0xf00
+#define MT8183_DISP_OVL0_2L_MOUT_EN		0xf04
+#define MT8183_DISP_OVL1_2L_MOUT_EN		0xf08
+#define MT8183_DISP_DITHER0_MOUT_EN		0xf0c
+#define MT8183_DISP_PATH0_SEL_IN		0xf24
+#define MT8183_DISP_DSI0_SEL_IN			0xf2c
+#define MT8183_DISP_DPI0_SEL_IN			0xf30
+#define MT8183_DISP_RDMA0_SOUT_SEL_IN		0xf50
+#define MT8183_DISP_RDMA1_SOUT_SEL_IN		0xf54
+
+#define OVL0_2L_MOUT_EN_DISP_PATH0			BIT(0)
+#define OVL1_2L_MOUT_EN_RDMA1				BIT(4)
+#define DITHER0_MOUT_IN_DSI0				BIT(0)
+#define DISP_PATH0_SEL_IN_OVL0_2L			0x1
+#define DSI0_SEL_IN_RDMA0				0x1
+#define MT8183_DSI0_SEL_IN_RDMA1			0x3
+#define MT8183_DPI0_SEL_IN_RDMA0			0x1
+#define MT8183_DPI0_SEL_IN_RDMA1			0x2
+#define MT8183_RDMA0_SOUT_COLOR0			0x1
+#define MT8183_RDMA1_SOUT_DSI0				0x1
+
+#define MT2701_DISP_MUTEX0_MOD0			0x2c
+#define MT2701_DISP_MUTEX0_SOF0			0x30
+#define MT8183_DISP_MUTEX0_MOD0			0x30
+#define MT8183_DISP_MUTEX0_SOF0			0x2c
+
+#define DISP_REG_MUTEX_EN(n)			(0x20 + 0x20 * (n))
+#define DISP_REG_MUTEX(n)			(0x24 + 0x20 * (n))
+#define DISP_REG_MUTEX_RST(n)			(0x28 + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD(mutex_mod_reg, n)	(mutex_mod_reg + 0x20 * (n))
+#define DISP_REG_MUTEX_SOF(mutex_sof_reg, n)	(mutex_sof_reg + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD2(n)			(0x34 + 0x20 * (n))
 
 #define INT_MUTEX				BIT(1)
 
+#define MT8183_MUTEX_MOD_DISP_RDMA0		0
+#define MT8183_MUTEX_MOD_DISP_RDMA1		1
+#define MT8183_MUTEX_MOD_DISP_OVL0		9
+#define MT8183_MUTEX_MOD_DISP_OVL0_2L		10
+#define MT8183_MUTEX_MOD_DISP_OVL1_2L		11
+#define MT8183_MUTEX_MOD_DISP_WDMA0		12
+#define MT8183_MUTEX_MOD_DISP_COLOR0		13
+#define MT8183_MUTEX_MOD_DISP_CCORR0		14
+#define MT8183_MUTEX_MOD_DISP_AAL0		15
+#define MT8183_MUTEX_MOD_DISP_GAMMA0		16
+#define MT8183_MUTEX_MOD_DISP_DITHER0		17
+
+#define MT8167_MUTEX_MOD_DISP_OVL0		6
+#define MT8167_MUTEX_MOD_DISP_OVL1		7
+#define MT8167_MUTEX_MOD_DISP_RDMA0		8
+#define MT8167_MUTEX_MOD_DISP_RDMA1		9
+#define MT8167_MUTEX_MOD_DISP_WDMA0		10
+#define MT8167_MUTEX_MOD_DISP_CCORR		11
+#define MT8167_MUTEX_MOD_DISP_COLOR            12
+#define MT8167_MUTEX_MOD_DISP_AAL              13
+#define MT8167_MUTEX_MOD_DISP_GAMMA            14
+#define MT8167_MUTEX_MOD_DISP_DITHER   15
+#define MT8167_MUTEX_MOD_DISP_UFOE             16
+#define MT8167_MUTEX_MOD_DISP_PWM              1
+
 #define MT8173_MUTEX_MOD_DISP_OVL0		11
 #define MT8173_MUTEX_MOD_DISP_OVL1		12
 #define MT8173_MUTEX_MOD_DISP_RDMA0		13
@@ -97,6 +151,12 @@
 #define MUTEX_SOF_DPI1			4
 #define MUTEX_SOF_DSI2			5
 #define MUTEX_SOF_DSI3			6
+#define MT8167_MUTEX_SOF_DPI0		2
+#define MT8167_MUTEX_SOF_DPI1		3
+
+#define MT8183_MUTEX_SOF_DPI0			2
+#define MT8183_MUTEX_EOF_DSI0			(MUTEX_SOF_DSI0 << 6)
+#define MT8183_MUTEX_EOF_DPI0			(MT8183_MUTEX_SOF_DPI0 << 6)
 
 #define OVL0_MOUT_EN_COLOR0		0x1
 #define OD_MOUT_EN_RDMA0		0x1
@@ -136,19 +196,61 @@
 
 #define OVL_MOUT_EN_RDMA		0x1
 #define BLS_TO_DSI_RDMA1_TO_DPI1	0x8
+#define BLS_TO_DPI_RDMA1_TO_DSI		0x2
 #define DSI_SEL_IN_BLS			0x0
+#define DPI_SEL_IN_BLS			0x0
+#define DSI_SEL_IN_RDMA			0x1
+
+#define OVL0_MOUT_EN_OVL0_2L		BIT(4)
+
+#define DITHER_MOUT_EN_RDMA            0x1
+#define RDMA0_SOUT_SEL_IN_DSI0         0x2
 
 struct mtk_disp_mutex {
 	int id;
 	bool claimed;
 };
 
+enum mtk_ddp_mutex_sof_id {
+	DDP_MUTEX_SOF_SINGLE_MODE,
+	DDP_MUTEX_SOF_DSI0,
+	DDP_MUTEX_SOF_DSI1,
+	DDP_MUTEX_SOF_DPI0,
+	DDP_MUTEX_SOF_DPI1,
+	DDP_MUTEX_SOF_DSI2,
+	DDP_MUTEX_SOF_DSI3,
+};
+
+struct mtk_ddp_data {
+	const unsigned int *mutex_mod;
+	const unsigned int *mutex_sof;
+	const unsigned int mutex_mod_reg;
+	const unsigned int mutex_sof_reg;
+};
+
 struct mtk_ddp {
 	struct device			*dev;
 	struct clk			*clk;
 	void __iomem			*regs;
 	struct mtk_disp_mutex		mutex[10];
-	const unsigned int		*mutex_mod;
+	const struct mtk_ddp_data	*data;
+};
+
+struct mtk_mmsys_reg_data {
+	u32 ovl0_mout_en;
+	u32 rdma0_sout_sel_in;
+	u32 rdma0_sout_color0;
+	u32 rdma1_sout_sel_in;
+	u32 rdma1_sout_dpi0;
+	u32 rdma1_sout_dpi1;
+	u32 rdma1_sout_dsi0;
+	u32 dpi0_sel_in;
+	u32 dpi0_sel_in_rdma1;
+	u32 dpi1_sel_in;
+	u32 dpi1_sel_in_rdma1;
+	u32 dsi0_sel_in;
+	u32 dsi0_sel_in_rdma1;
+	u32 color0_sel_in;
 };
 
 static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
@@ -180,6 +282,21 @@
 	[DDP_COMPONENT_WDMA1] = MT2712_MUTEX_MOD_DISP_WDMA1,
 };
 
+static const unsigned int mt8167_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+	[DDP_COMPONENT_AAL0] = MT8167_MUTEX_MOD_DISP_AAL,
+	[DDP_COMPONENT_CCORR] = MT8167_MUTEX_MOD_DISP_CCORR,
+	[DDP_COMPONENT_COLOR0] = MT8167_MUTEX_MOD_DISP_COLOR,
+	[DDP_COMPONENT_DITHER] = MT8167_MUTEX_MOD_DISP_DITHER,
+	[DDP_COMPONENT_GAMMA] = MT8167_MUTEX_MOD_DISP_GAMMA,
+	[DDP_COMPONENT_OVL0] = MT8167_MUTEX_MOD_DISP_OVL0,
+	[DDP_COMPONENT_OVL1] = MT8167_MUTEX_MOD_DISP_OVL1,
+	[DDP_COMPONENT_PWM0] = MT8167_MUTEX_MOD_DISP_PWM,
+	[DDP_COMPONENT_RDMA0] = MT8167_MUTEX_MOD_DISP_RDMA0,
+	[DDP_COMPONENT_RDMA1] = MT8167_MUTEX_MOD_DISP_RDMA1,
+	[DDP_COMPONENT_UFOE] = MT8167_MUTEX_MOD_DISP_UFOE,
+	[DDP_COMPONENT_WDMA0] = MT8167_MUTEX_MOD_DISP_WDMA0,
+};
+
 static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
 	[DDP_COMPONENT_AAL0] = MT8173_MUTEX_MOD_DISP_AAL,
 	[DDP_COMPONENT_COLOR0] = MT8173_MUTEX_MOD_DISP_COLOR0,
@@ -198,18 +315,146 @@
 	[DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1,
 };
 
-static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
+static const unsigned int mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+	[DDP_COMPONENT_AAL0] = MT8183_MUTEX_MOD_DISP_AAL0,
+	[DDP_COMPONENT_CCORR] = MT8183_MUTEX_MOD_DISP_CCORR0,
+	[DDP_COMPONENT_COLOR0] = MT8183_MUTEX_MOD_DISP_COLOR0,
+	[DDP_COMPONENT_DITHER] = MT8183_MUTEX_MOD_DISP_DITHER0,
+	[DDP_COMPONENT_GAMMA] = MT8183_MUTEX_MOD_DISP_GAMMA0,
+	[DDP_COMPONENT_OVL0] = MT8183_MUTEX_MOD_DISP_OVL0,
+	[DDP_COMPONENT_OVL_2L0] = MT8183_MUTEX_MOD_DISP_OVL0_2L,
+	[DDP_COMPONENT_OVL_2L1] = MT8183_MUTEX_MOD_DISP_OVL1_2L,
+	[DDP_COMPONENT_RDMA0] = MT8183_MUTEX_MOD_DISP_RDMA0,
+	[DDP_COMPONENT_RDMA1] = MT8183_MUTEX_MOD_DISP_RDMA1,
+	[DDP_COMPONENT_WDMA0] = MT8183_MUTEX_MOD_DISP_WDMA0,
+};
+
+static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_DSI3 + 1] = {
+	[DDP_MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+	[DDP_MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
+	[DDP_MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
+	[DDP_MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0,
+	[DDP_MUTEX_SOF_DPI1] = MUTEX_SOF_DPI1,
+	[DDP_MUTEX_SOF_DSI2] = MUTEX_SOF_DSI2,
+	[DDP_MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3,
+};
+
+static const unsigned int mt8167_mutex_sof[DDP_MUTEX_SOF_DSI3 + 1] = {
+	[DDP_MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+	[DDP_MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
+	[DDP_MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
+	[DDP_MUTEX_SOF_DPI0] = MT8167_MUTEX_SOF_DPI0,
+	[DDP_MUTEX_SOF_DPI1] = MT8167_MUTEX_SOF_DPI1,
+	[DDP_MUTEX_SOF_DSI2] = MUTEX_SOF_DSI2,
+	[DDP_MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3,
+};
+
+static const unsigned int mt8183_mutex_sof[DDP_MUTEX_SOF_DSI3 + 1] = {
+	[DDP_MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+	[DDP_MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0 | MT8183_MUTEX_EOF_DSI0,
+	[DDP_MUTEX_SOF_DPI0] = MT8183_MUTEX_SOF_DPI0 | MT8183_MUTEX_EOF_DPI0,
+};
+
+static const struct mtk_ddp_data mt2701_ddp_driver_data = {
+	.mutex_mod = mt2701_mutex_mod,
+	.mutex_sof = mt2712_mutex_sof,
+	.mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
+	.mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+};
+
+static const struct mtk_ddp_data mt2712_ddp_driver_data = {
+	.mutex_mod = mt2712_mutex_mod,
+	.mutex_sof = mt2712_mutex_sof,
+	.mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
+	.mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+};
+
+static const struct mtk_ddp_data mt8167_ddp_driver_data = {
+	.mutex_mod = mt8167_mutex_mod,
+	.mutex_sof = mt8167_mutex_sof,
+	.mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
+	.mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+};
+
+static const struct mtk_ddp_data mt8173_ddp_driver_data = {
+	.mutex_mod = mt8173_mutex_mod,
+	.mutex_sof = mt2712_mutex_sof,
+	.mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
+	.mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+};
+
+static const struct mtk_ddp_data mt8183_ddp_driver_data = {
+	.mutex_mod = mt8183_mutex_mod,
+	.mutex_sof = mt8183_mutex_sof,
+	.mutex_mod_reg = MT8183_DISP_MUTEX0_MOD0,
+	.mutex_sof_reg = MT8183_DISP_MUTEX0_SOF0,
+};
+
+const struct mtk_mmsys_reg_data mt2701_mmsys_reg_data = {
+	.ovl0_mout_en = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
+	.dsi0_sel_in = DISP_REG_CONFIG_DSI_SEL,
+	.dsi0_sel_in_rdma1 = DSI_SEL_IN_RDMA,
+	.rdma1_sout_dpi1 = RDMA1_SOUT_DPI1,
+	.dpi1_sel_in = DISP_REG_CONFIG_DPI_SEL_IN,
+	.color0_sel_in = DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
+};
+
+const struct mtk_mmsys_reg_data mt8167_mmsys_reg_data = {
+	.rdma1_sout_sel_in = 0x70,
+	.rdma1_sout_dpi0 = 0x2,
+	.rdma1_sout_dpi1 = 0x2,
+	.dpi1_sel_in = 0x74,
+	.dpi1_sel_in_rdma1 = 0x2,
+
+	.dsi0_sel_in = 0x64,
+	.ovl0_mout_en = 0x30,
+	.color0_sel_in = 0x58,
+};
+
+const struct mtk_mmsys_reg_data mt8173_mmsys_reg_data = {
+	.ovl0_mout_en = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
+	.rdma1_sout_sel_in = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN,
+	.rdma1_sout_dpi0 = RDMA1_SOUT_DPI0,
+	.dpi0_sel_in = DISP_REG_CONFIG_DPI_SEL_IN,
+	.dpi0_sel_in_rdma1 = DPI0_SEL_IN_RDMA1,
+	.dsi0_sel_in = DISP_REG_CONFIG_DSIE_SEL_IN,
+	.dsi0_sel_in_rdma1 = DSI0_SEL_IN_RDMA1,
+	.rdma1_sout_dpi1 = RDMA1_SOUT_DPI1,
+	.dpi1_sel_in = DISP_REG_CONFIG_DPI_SEL_IN,
+	.color0_sel_in = DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
+};
+
+const struct mtk_mmsys_reg_data mt8183_mmsys_reg_data = {
+	.ovl0_mout_en = MT8183_DISP_OVL0_MOUT_EN,
+	.rdma0_sout_sel_in = MT8183_DISP_RDMA0_SOUT_SEL_IN,
+	.rdma0_sout_color0 = MT8183_RDMA0_SOUT_COLOR0,
+	.rdma1_sout_sel_in = MT8183_DISP_RDMA1_SOUT_SEL_IN,
+	.rdma1_sout_dsi0 = MT8183_RDMA1_SOUT_DSI0,
+	.dpi0_sel_in = MT8183_DISP_DPI0_SEL_IN,
+	.dpi0_sel_in_rdma1 = MT8183_DPI0_SEL_IN_RDMA1,
+	.dsi0_sel_in = MT8183_DISP_DSI0_SEL_IN,
+	.dsi0_sel_in_rdma1 = MT8183_DSI0_SEL_IN_RDMA1,
+	.rdma1_sout_dpi1 = RDMA1_SOUT_DPI1,
+	.dpi1_sel_in = DISP_REG_CONFIG_DPI_SEL_IN,
+	.color0_sel_in = DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
+};
+
+static unsigned int mtk_ddp_mout_en(const struct mtk_mmsys_reg_data *data,
+				    enum mtk_ddp_comp_id cur,
 				    enum mtk_ddp_comp_id next,
 				    unsigned int *addr)
 {
 	unsigned int value;
 
 	if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
-		*addr = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN;
+		*addr = data->ovl0_mout_en;
 		value = OVL0_MOUT_EN_COLOR0;
 	} else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) {
-		*addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN;
+		*addr = data->ovl0_mout_en;
 		value = OVL_MOUT_EN_RDMA;
+	} else if (cur == DDP_COMPONENT_DITHER && next == DDP_COMPONENT_RDMA0) {
+		*addr = DISP_REG_CONFIG_DISP_DITHER_MOUT_EN;
+		value = DITHER_MOUT_EN_RDMA;
 	} else if (cur == DDP_COMPONENT_OD0 && next == DDP_COMPONENT_RDMA0) {
 		*addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
 		value = OD_MOUT_EN_RDMA0;
@@ -225,51 +470,20 @@
 	} else if (cur == DDP_COMPONENT_OD1 && next == DDP_COMPONENT_RDMA1) {
 		*addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
 		value = OD1_MOUT_EN_RDMA1;
-	} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
-		*addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
-		value = RDMA0_SOUT_DPI0;
-	} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) {
-		*addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
-		value = RDMA0_SOUT_DPI1;
-	} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) {
-		*addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
-		value = RDMA0_SOUT_DSI1;
-	} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
-		*addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
-		value = RDMA0_SOUT_DSI2;
-	} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI3) {
-		*addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
-		value = RDMA0_SOUT_DSI3;
-	} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
-		*addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
-		value = RDMA1_SOUT_DSI1;
-	} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
-		*addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
-		value = RDMA1_SOUT_DSI2;
-	} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
-		*addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
-		value = RDMA1_SOUT_DSI3;
-	} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
-		*addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
-		value = RDMA1_SOUT_DPI0;
-	} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
-		*addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
-		value = RDMA1_SOUT_DPI1;
-	} else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
-		*addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
-		value = RDMA2_SOUT_DPI0;
-	} else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
-		*addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
-		value = RDMA2_SOUT_DPI1;
-	} else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
-		*addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
-		value = RDMA2_SOUT_DSI1;
-	} else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
-		*addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
-		value = RDMA2_SOUT_DSI2;
-	} else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
-		*addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
-		value = RDMA2_SOUT_DSI3;
+	} else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_OVL_2L0) {
+		*addr = data->ovl0_mout_en;
+		value = OVL0_MOUT_EN_OVL0_2L;
+	} else if (cur == DDP_COMPONENT_OVL_2L0 &&
+		   next == DDP_COMPONENT_RDMA0) {
+		*addr = MT8183_DISP_OVL0_2L_MOUT_EN;
+		value = OVL0_2L_MOUT_EN_DISP_PATH0;
+	} else if (cur == DDP_COMPONENT_OVL_2L1 &&
+		   next == DDP_COMPONENT_RDMA1) {
+		*addr = MT8183_DISP_OVL1_2L_MOUT_EN;
+		value = OVL1_2L_MOUT_EN_RDMA1;
+	} else if (cur == DDP_COMPONENT_DITHER && next == DDP_COMPONENT_DSI0) {
+		*addr = MT8183_DISP_DITHER0_MOUT_EN;
+		value = DITHER0_MOUT_IN_DSI0;
 	} else {
 		value = 0;
 	}
@@ -277,24 +491,25 @@
 	return value;
 }
 
-static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
+static unsigned int mtk_ddp_sel_in(const struct mtk_mmsys_reg_data *data,
+				   enum mtk_ddp_comp_id cur,
 				   enum mtk_ddp_comp_id next,
 				   unsigned int *addr)
 {
 	unsigned int value;
 
 	if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
-		*addr = DISP_REG_CONFIG_DISP_COLOR0_SEL_IN;
+		*addr = data->color0_sel_in;
 		value = COLOR0_SEL_IN_OVL0;
 	} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
-		*addr = DISP_REG_CONFIG_DPI_SEL_IN;
-		value = DPI0_SEL_IN_RDMA1;
+		*addr = data->dpi0_sel_in;
+		value = data->dpi0_sel_in_rdma1;
 	} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
-		*addr = DISP_REG_CONFIG_DPI_SEL_IN;
-		value = DPI1_SEL_IN_RDMA1;
+		*addr = data->dpi1_sel_in;
+		value = data->dpi1_sel_in_rdma1;
 	} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) {
-		*addr = DISP_REG_CONFIG_DSIE_SEL_IN;
-		value = DSI0_SEL_IN_RDMA1;
+		*addr = data->dsi0_sel_in;
+		value = data->dsi0_sel_in_rdma1;
 	} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
 		*addr = DISP_REG_CONFIG_DSIO_SEL_IN;
 		value = DSI1_SEL_IN_RDMA1;
@@ -328,6 +543,13 @@
 	} else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
 		*addr = DISP_REG_CONFIG_DSI_SEL;
 		value = DSI_SEL_IN_BLS;
+	} else if (cur == DDP_COMPONENT_OVL_2L0 &&
+		   next == DDP_COMPONENT_RDMA0) {
+		*addr = MT8183_DISP_PATH0_SEL_IN;
+		value = DISP_PATH0_SEL_IN_OVL0_2L;
+	} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI0) {
+		*addr = data->dsi0_sel_in;
+		value = DSI0_SEL_IN_RDMA0;
 	} else {
 		value = 0;
 	}
@@ -335,30 +557,98 @@
 	return value;
 }
 
-static void mtk_ddp_sout_sel(void __iomem *config_regs,
-			     enum mtk_ddp_comp_id cur,
-			     enum mtk_ddp_comp_id next)
+static unsigned int mtk_ddp_sout_sel(const struct mtk_mmsys_reg_data *data,
+				     enum mtk_ddp_comp_id cur,
+				     enum mtk_ddp_comp_id next,
+				     unsigned int *addr)
 {
-	if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0)
-		writel_relaxed(BLS_TO_DSI_RDMA1_TO_DPI1,
-			       config_regs + DISP_REG_CONFIG_OUT_SEL);
+	unsigned int value;
+
+	if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
+		*addr = DISP_REG_CONFIG_OUT_SEL;
+		value = BLS_TO_DSI_RDMA1_TO_DPI1;
+	} else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DPI0) {
+		*addr = DISP_REG_CONFIG_OUT_SEL;
+		value = BLS_TO_DPI_RDMA1_TO_DSI;
+	} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI0) {
+		*addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN;
+		value = RDMA0_SOUT_SEL_IN_DSI0;
+	} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
+		*addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+		value = RDMA0_SOUT_DPI0;
+	} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) {
+		*addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+		value = RDMA0_SOUT_DPI1;
+	} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) {
+		*addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+		value = RDMA0_SOUT_DSI1;
+	} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
+		*addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+		value = RDMA0_SOUT_DSI2;
+	} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI3) {
+		*addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+		value = RDMA0_SOUT_DSI3;
+	} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
+		*addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+		value = RDMA1_SOUT_DSI1;
+	} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
+		*addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+		value = RDMA1_SOUT_DSI2;
+	} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
+		*addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+		value = RDMA1_SOUT_DSI3;
+	} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
+		*addr = data->rdma1_sout_sel_in;
+		value = data->rdma1_sout_dpi0;
+	} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
+		*addr = data->rdma1_sout_sel_in;
+		value = data->rdma1_sout_dpi1;
+	} else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
+		*addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+		value = RDMA2_SOUT_DPI0;
+	} else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
+		*addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+		value = RDMA2_SOUT_DPI1;
+	} else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+		*addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+		value = RDMA2_SOUT_DSI1;
+	} else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
+		*addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+		value = RDMA2_SOUT_DSI2;
+	} else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
+		*addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+		value = RDMA2_SOUT_DSI3;
+	} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_COLOR0) {
+		*addr = data->rdma0_sout_sel_in;
+		value = data->rdma0_sout_color0;
+	} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) {
+		*addr = data->rdma1_sout_sel_in;
+		value = data->rdma1_sout_dsi0;
+	} else {
+		value = 0;
+	}
+
+	return value;
 }
 
 void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
+			      const struct mtk_mmsys_reg_data *reg_data,
 			      enum mtk_ddp_comp_id cur,
 			      enum mtk_ddp_comp_id next)
 {
 	unsigned int addr, value, reg;
 
-	value = mtk_ddp_mout_en(cur, next, &addr);
+	value = mtk_ddp_mout_en(reg_data, cur, next, &addr);
 	if (value) {
 		reg = readl_relaxed(config_regs + addr) | value;
 		writel_relaxed(reg, config_regs + addr);
 	}
 
-	mtk_ddp_sout_sel(config_regs, cur, next);
+	value = mtk_ddp_sout_sel(reg_data, cur, next, &addr);
+	if (value)
+		writel_relaxed(value, config_regs + addr);
 
-	value = mtk_ddp_sel_in(cur, next, &addr);
+	value = mtk_ddp_sel_in(reg_data, cur, next, &addr);
 	if (value) {
 		reg = readl_relaxed(config_regs + addr) | value;
 		writel_relaxed(reg, config_regs + addr);
@@ -366,18 +656,19 @@
 }
 
 void mtk_ddp_remove_comp_from_path(void __iomem *config_regs,
+				   const struct mtk_mmsys_reg_data *reg_data,
 				   enum mtk_ddp_comp_id cur,
 				   enum mtk_ddp_comp_id next)
 {
 	unsigned int addr, value, reg;
 
-	value = mtk_ddp_mout_en(cur, next, &addr);
+	value = mtk_ddp_mout_en(reg_data, cur, next, &addr);
 	if (value) {
 		reg = readl_relaxed(config_regs + addr) & ~value;
 		writel_relaxed(reg, config_regs + addr);
 	}
 
-	value = mtk_ddp_sel_in(cur, next, &addr);
+	value = mtk_ddp_sel_in(reg_data, cur, next, &addr);
 	if (value) {
 		reg = readl_relaxed(config_regs + addr) & ~value;
 		writel_relaxed(reg, config_regs + addr);
@@ -428,45 +719,49 @@
 	struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
 					   mutex[mutex->id]);
 	unsigned int reg;
+	unsigned int sof_id;
 	unsigned int offset;
 
 	WARN_ON(&ddp->mutex[mutex->id] != mutex);
 
 	switch (id) {
 	case DDP_COMPONENT_DSI0:
-		reg = MUTEX_SOF_DSI0;
+		sof_id = DDP_MUTEX_SOF_DSI0;
 		break;
 	case DDP_COMPONENT_DSI1:
-		reg = MUTEX_SOF_DSI0;
+		sof_id = DDP_MUTEX_SOF_DSI0;
 		break;
 	case DDP_COMPONENT_DSI2:
-		reg = MUTEX_SOF_DSI2;
+		sof_id = DDP_MUTEX_SOF_DSI2;
 		break;
 	case DDP_COMPONENT_DSI3:
-		reg = MUTEX_SOF_DSI3;
+		sof_id = DDP_MUTEX_SOF_DSI3;
 		break;
 	case DDP_COMPONENT_DPI0:
-		reg = MUTEX_SOF_DPI0;
+		sof_id = DDP_MUTEX_SOF_DPI0;
 		break;
 	case DDP_COMPONENT_DPI1:
-		reg = MUTEX_SOF_DPI1;
+		sof_id = DDP_MUTEX_SOF_DPI1;
 		break;
 	default:
-		if (ddp->mutex_mod[id] < 32) {
-			offset = DISP_REG_MUTEX_MOD(mutex->id);
+		if (ddp->data->mutex_mod[id] < 32) {
+			offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg,
+						    mutex->id);
 			reg = readl_relaxed(ddp->regs + offset);
-			reg |= 1 << ddp->mutex_mod[id];
+			reg |= 1 << ddp->data->mutex_mod[id];
 			writel_relaxed(reg, ddp->regs + offset);
 		} else {
 			offset = DISP_REG_MUTEX_MOD2(mutex->id);
 			reg = readl_relaxed(ddp->regs + offset);
-			reg |= 1 << (ddp->mutex_mod[id] - 32);
+			reg |= 1 << (ddp->data->mutex_mod[id] - 32);
 			writel_relaxed(reg, ddp->regs + offset);
 		}
 		return;
 	}
 
-	writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
+	writel_relaxed(ddp->data->mutex_sof[sof_id],
+		       ddp->regs +
+		       DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg, mutex->id));
 }
 
 void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
@@ -487,18 +782,21 @@
 	case DDP_COMPONENT_DPI0:
 	case DDP_COMPONENT_DPI1:
 		writel_relaxed(MUTEX_SOF_SINGLE_MODE,
-			       ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
+			       ddp->regs +
+			       DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg,
+						  mutex->id));
 		break;
 	default:
-		if (ddp->mutex_mod[id] < 32) {
-			offset = DISP_REG_MUTEX_MOD(mutex->id);
+		if (ddp->data->mutex_mod[id] < 32) {
+			offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg,
+						    mutex->id);
 			reg = readl_relaxed(ddp->regs + offset);
-			reg &= ~(1 << ddp->mutex_mod[id]);
+			reg &= ~(1 << ddp->data->mutex_mod[id]);
 			writel_relaxed(reg, ddp->regs + offset);
 		} else {
 			offset = DISP_REG_MUTEX_MOD2(mutex->id);
 			reg = readl_relaxed(ddp->regs + offset);
-			reg &= ~(1 << (ddp->mutex_mod[id] - 32));
+			reg &= ~(1 << (ddp->data->mutex_mod[id] - 32));
 			writel_relaxed(reg, ddp->regs + offset);
 		}
 		break;
@@ -560,10 +858,12 @@
 	for (i = 0; i < 10; i++)
 		ddp->mutex[i].id = i;
 
-	ddp->clk = devm_clk_get(dev, NULL);
-	if (IS_ERR(ddp->clk)) {
-		dev_err(dev, "Failed to get clock\n");
-		return PTR_ERR(ddp->clk);
+	if (of_find_property(dev->of_node, "clocks", &i)) {
+		ddp->clk = devm_clk_get(dev, NULL);
+		if (IS_ERR(ddp->clk)) {
+			dev_err(dev, "Failed to get clock\n");
+			return PTR_ERR(ddp->clk);
+		}
 	}
 
 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -573,7 +873,7 @@
 		return PTR_ERR(ddp->regs);
 	}
 
-	ddp->mutex_mod = of_device_get_match_data(dev);
+	ddp->data = of_device_get_match_data(dev);
 
 	platform_set_drvdata(pdev, ddp);
 
@@ -586,9 +886,16 @@
 }
 
 static const struct of_device_id ddp_driver_dt_match[] = {
-	{ .compatible = "mediatek,mt2701-disp-mutex", .data = mt2701_mutex_mod},
-	{ .compatible = "mediatek,mt2712-disp-mutex", .data = mt2712_mutex_mod},
-	{ .compatible = "mediatek,mt8173-disp-mutex", .data = mt8173_mutex_mod},
+	{ .compatible = "mediatek,mt2701-disp-mutex",
+	  .data = &mt2701_ddp_driver_data},
+	{ .compatible = "mediatek,mt2712-disp-mutex",
+	  .data = &mt2712_ddp_driver_data},
+	{ .compatible = "mediatek,mt8167-disp-mutex",
+	  .data = &mt8167_ddp_driver_data},
+	{ .compatible = "mediatek,mt8173-disp-mutex",
+	  .data = &mt8173_ddp_driver_data},
+	{ .compatible = "mediatek,mt8183-disp-mutex",
+	  .data = &mt8183_ddp_driver_data},
 	{},
 };
 MODULE_DEVICE_TABLE(of, ddp_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
index f9a7991..0688f09 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
@@ -19,11 +19,18 @@
 struct regmap;
 struct device;
 struct mtk_disp_mutex;
+struct mtk_mmsys_reg_data;
 
+extern const struct mtk_mmsys_reg_data mt2701_mmsys_reg_data;
+extern const struct mtk_mmsys_reg_data mt8167_mmsys_reg_data;
+extern const struct mtk_mmsys_reg_data mt8173_mmsys_reg_data;
+extern const struct mtk_mmsys_reg_data mt8183_mmsys_reg_data;
 void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
+			      const struct mtk_mmsys_reg_data *reg_data,
 			      enum mtk_ddp_comp_id cur,
 			      enum mtk_ddp_comp_id next);
 void mtk_ddp_remove_comp_from_path(void __iomem *config_regs,
+				   const struct mtk_mmsys_reg_data *reg_data,
 				   enum mtk_ddp_comp_id cur,
 				   enum mtk_ddp_comp_id next);
 
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index ff974d8..028d9ab 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -41,6 +41,18 @@
 #define DISP_AAL_EN				0x0000
 #define DISP_AAL_SIZE				0x0030
 
+#define DISP_CCORR_EN				0x0000
+#define CCORR_EN				BIT(0)
+#define DISP_CCORR_CFG				0x0020
+#define CCORR_RELAY_MODE			BIT(0)
+#define DISP_CCORR_SIZE				0x0030
+
+#define DISP_DITHER_EN				0x0000
+#define DITHER_EN				BIT(0)
+#define DISP_DITHER_CFG				0x0020
+#define DITHER_RELAY_MODE			BIT(0)
+#define DISP_DITHER_SIZE			0x0030
+
 #define DISP_GAMMA_EN				0x0000
 #define DISP_GAMMA_CFG				0x0020
 #define DISP_GAMMA_SIZE				0x0030
@@ -131,6 +143,42 @@
 	writel_relaxed(0x0, comp->regs + DISP_AAL_EN);
 }
 
+static void mtk_ccorr_config(struct mtk_ddp_comp *comp, unsigned int w,
+			     unsigned int h, unsigned int vrefresh,
+			     unsigned int bpc)
+{
+	writel(h << 16 | w, comp->regs + DISP_CCORR_SIZE);
+	writel(CCORR_RELAY_MODE, comp->regs + DISP_CCORR_CFG);
+}
+
+static void mtk_ccorr_start(struct mtk_ddp_comp *comp)
+{
+	writel(CCORR_EN, comp->regs + DISP_CCORR_EN);
+}
+
+static void mtk_ccorr_stop(struct mtk_ddp_comp *comp)
+{
+	writel_relaxed(0x0, comp->regs + DISP_CCORR_EN);
+}
+
+static void mtk_dither_config(struct mtk_ddp_comp *comp, unsigned int w,
+			      unsigned int h, unsigned int vrefresh,
+			      unsigned int bpc)
+{
+	writel(h << 16 | w, comp->regs + DISP_DITHER_SIZE);
+	writel(DITHER_RELAY_MODE, comp->regs + DISP_DITHER_CFG);
+}
+
+static void mtk_dither_start(struct mtk_ddp_comp *comp)
+{
+	writel(DITHER_EN, comp->regs + DISP_DITHER_EN);
+}
+
+static void mtk_dither_stop(struct mtk_ddp_comp *comp)
+{
+	writel_relaxed(0x0, comp->regs + DISP_DITHER_EN);
+}
+
 static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w,
 			     unsigned int h, unsigned int vrefresh,
 			     unsigned int bpc)
@@ -179,6 +227,18 @@
 	.stop = mtk_aal_stop,
 };
 
+static const struct mtk_ddp_comp_funcs ddp_ccorr = {
+	.config = mtk_ccorr_config,
+	.start = mtk_ccorr_start,
+	.stop = mtk_ccorr_stop,
+};
+
+static const struct mtk_ddp_comp_funcs ddp_dither = {
+	.config = mtk_dither_config,
+	.start = mtk_dither_start,
+	.stop = mtk_dither_stop,
+};
+
 static const struct mtk_ddp_comp_funcs ddp_gamma = {
 	.gamma_set = mtk_gamma_set,
 	.config = mtk_gamma_config,
@@ -197,11 +257,14 @@
 
 static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = {
 	[MTK_DISP_OVL] = "ovl",
+	[MTK_DISP_OVL_2L] = "ovl_2l",
 	[MTK_DISP_RDMA] = "rdma",
 	[MTK_DISP_WDMA] = "wdma",
 	[MTK_DISP_COLOR] = "color",
+	[MTK_DISP_CCORR] = "ccorr",
 	[MTK_DISP_AAL] = "aal",
 	[MTK_DISP_GAMMA] = "gamma",
+	[MTK_DISP_DITHER] = "dither",
 	[MTK_DISP_UFOE] = "ufoe",
 	[MTK_DSI] = "dsi",
 	[MTK_DPI] = "dpi",
@@ -221,8 +284,10 @@
 	[DDP_COMPONENT_AAL0]	= { MTK_DISP_AAL,	0, &ddp_aal },
 	[DDP_COMPONENT_AAL1]	= { MTK_DISP_AAL,	1, &ddp_aal },
 	[DDP_COMPONENT_BLS]	= { MTK_DISP_BLS,	0, NULL },
+	[DDP_COMPONENT_CCORR]	= { MTK_DISP_CCORR,	0, &ddp_ccorr },
 	[DDP_COMPONENT_COLOR0]	= { MTK_DISP_COLOR,	0, NULL },
 	[DDP_COMPONENT_COLOR1]	= { MTK_DISP_COLOR,	1, NULL },
+	[DDP_COMPONENT_DITHER]	= { MTK_DISP_DITHER,	0, &ddp_dither },
 	[DDP_COMPONENT_DPI0]	= { MTK_DPI,		0, NULL },
 	[DDP_COMPONENT_DPI1]	= { MTK_DPI,		1, NULL },
 	[DDP_COMPONENT_DSI0]	= { MTK_DSI,		0, NULL },
@@ -234,6 +299,8 @@
 	[DDP_COMPONENT_OD1]	= { MTK_DISP_OD,	1, &ddp_od },
 	[DDP_COMPONENT_OVL0]	= { MTK_DISP_OVL,	0, NULL },
 	[DDP_COMPONENT_OVL1]	= { MTK_DISP_OVL,	1, NULL },
+	[DDP_COMPONENT_OVL_2L0]	= { MTK_DISP_OVL_2L,	0, NULL },
+	[DDP_COMPONENT_OVL_2L1]	= { MTK_DISP_OVL_2L,	1, NULL },
 	[DDP_COMPONENT_PWM0]	= { MTK_DISP_PWM,	0, NULL },
 	[DDP_COMPONENT_PWM1]	= { MTK_DISP_PWM,	1, NULL },
 	[DDP_COMPONENT_PWM2]	= { MTK_DISP_PWM,	2, NULL },
@@ -299,6 +366,7 @@
 	/* Only DMA capable components need the LARB property */
 	comp->larb_dev = NULL;
 	if (type != MTK_DISP_OVL &&
+	    type != MTK_DISP_OVL_2L &&
 	    type != MTK_DISP_RDMA &&
 	    type != MTK_DISP_WDMA)
 		return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 8399229..d05e277 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -25,9 +25,12 @@
 
 enum mtk_ddp_comp_type {
 	MTK_DISP_OVL,
+	MTK_DISP_OVL_2L,
 	MTK_DISP_RDMA,
 	MTK_DISP_WDMA,
 	MTK_DISP_COLOR,
+	MTK_DISP_CCORR,
+	MTK_DISP_DITHER,
 	MTK_DISP_AAL,
 	MTK_DISP_GAMMA,
 	MTK_DISP_UFOE,
@@ -44,8 +47,10 @@
 	DDP_COMPONENT_AAL0,
 	DDP_COMPONENT_AAL1,
 	DDP_COMPONENT_BLS,
+	DDP_COMPONENT_CCORR,
 	DDP_COMPONENT_COLOR0,
 	DDP_COMPONENT_COLOR1,
+	DDP_COMPONENT_DITHER,
 	DDP_COMPONENT_DPI0,
 	DDP_COMPONENT_DPI1,
 	DDP_COMPONENT_DSI0,
@@ -56,6 +61,8 @@
 	DDP_COMPONENT_OD0,
 	DDP_COMPONENT_OD1,
 	DDP_COMPONENT_OVL0,
+	DDP_COMPONENT_OVL_2L0,
+	DDP_COMPONENT_OVL_2L1,
 	DDP_COMPONENT_OVL1,
 	DDP_COMPONENT_PWM0,
 	DDP_COMPONENT_PWM1,
@@ -85,6 +92,8 @@
 			     struct mtk_plane_state *state);
 	void (*gamma_set)(struct mtk_ddp_comp *comp,
 			  struct drm_crtc_state *state);
+	void (*bgclr_in_on)(struct mtk_ddp_comp *comp);
+	void (*bgclr_in_off)(struct mtk_ddp_comp *comp);
 };
 
 struct mtk_ddp_comp {
@@ -166,6 +175,18 @@
 		comp->funcs->gamma_set(comp, state);
 }
 
+static inline void mtk_ddp_comp_bgclr_in_on(struct mtk_ddp_comp *comp)
+{
+	if (comp->funcs && comp->funcs->bgclr_in_on)
+		comp->funcs->bgclr_in_on(comp);
+}
+
+static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp)
+{
+	if (comp->funcs && comp->funcs->bgclr_in_off)
+		comp->funcs->bgclr_in_off(comp);
+}
+
 int mtk_ddp_comp_get_id(struct device_node *node,
 			enum mtk_ddp_comp_type comp_type);
 int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 947bc6d..2c258d1 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -16,6 +16,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_of.h>
@@ -191,11 +192,46 @@
 	DDP_COMPONENT_DPI0,
 };
 
+static const enum mtk_ddp_comp_id mt8183_mtk_ddp_main[] = {
+	DDP_COMPONENT_OVL0,
+	DDP_COMPONENT_OVL_2L0,
+	DDP_COMPONENT_RDMA0,
+	DDP_COMPONENT_COLOR0,
+	DDP_COMPONENT_CCORR,
+	DDP_COMPONENT_AAL0,
+	DDP_COMPONENT_GAMMA,
+	DDP_COMPONENT_DITHER,
+	DDP_COMPONENT_DSI0,
+};
+
+static const enum mtk_ddp_comp_id mt8183_mtk_ddp_ext[] = {
+	DDP_COMPONENT_OVL_2L1,
+	DDP_COMPONENT_RDMA1,
+	DDP_COMPONENT_DPI0,
+};
+
+static enum mtk_ddp_comp_id mt8167_mtk_ddp_main[] = {
+	DDP_COMPONENT_OVL0,
+	DDP_COMPONENT_COLOR0,
+	DDP_COMPONENT_CCORR,
+	DDP_COMPONENT_AAL0,
+	DDP_COMPONENT_GAMMA,
+	DDP_COMPONENT_DITHER,
+	DDP_COMPONENT_RDMA0,
+	DDP_COMPONENT_DSI0,
+};
+
+static enum mtk_ddp_comp_id mt8167_mtk_ddp_ext[] = {
+	DDP_COMPONENT_RDMA1,
+	DDP_COMPONENT_DPI1,
+};
+
 static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
 	.main_path = mt2701_mtk_ddp_main,
 	.main_len = ARRAY_SIZE(mt2701_mtk_ddp_main),
 	.ext_path = mt2701_mtk_ddp_ext,
 	.ext_len = ARRAY_SIZE(mt2701_mtk_ddp_ext),
+	.reg_data = &mt2701_mmsys_reg_data,
 	.shadow_register = true,
 };
 
@@ -206,6 +242,7 @@
 	.ext_len = ARRAY_SIZE(mt2712_mtk_ddp_ext),
 	.third_path = mt2712_mtk_ddp_third,
 	.third_len = ARRAY_SIZE(mt2712_mtk_ddp_third),
+	.reg_data = &mt8173_mmsys_reg_data,
 };
 
 static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
@@ -213,6 +250,23 @@
 	.main_len = ARRAY_SIZE(mt8173_mtk_ddp_main),
 	.ext_path = mt8173_mtk_ddp_ext,
 	.ext_len = ARRAY_SIZE(mt8173_mtk_ddp_ext),
+	.reg_data = &mt8173_mmsys_reg_data,
+};
+
+static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
+	.main_path = mt8183_mtk_ddp_main,
+	.main_len = ARRAY_SIZE(mt8183_mtk_ddp_main),
+	.ext_path = mt8183_mtk_ddp_ext,
+	.ext_len = ARRAY_SIZE(mt8183_mtk_ddp_ext),
+	.reg_data = &mt8183_mmsys_reg_data,
+};
+
+static const struct mtk_mmsys_driver_data mt8167_mmsys_driver_data = {
+	.main_path = mt8167_mtk_ddp_main,
+	.main_len = ARRAY_SIZE(mt8167_mtk_ddp_main),
+	.ext_path = mt8167_mtk_ddp_ext,
+	.ext_len = ARRAY_SIZE(mt8167_mtk_ddp_ext),
+	.reg_data = &mt8167_mmsys_reg_data,
 };
 
 static int mtk_drm_kms_init(struct drm_device *drm)
@@ -385,6 +439,8 @@
 	.gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
 	.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
 	.gem_prime_mmap = mtk_drm_gem_mmap_buf,
+	.gem_prime_vmap = mtk_drm_gem_prime_vmap,
+	.gem_prime_vunmap = mtk_drm_gem_prime_vunmap,
 	.fops = &mtk_drm_fops,
 
 	.name = DRIVER_NAME,
@@ -420,6 +476,10 @@
 	if (ret < 0)
 		goto err_deinit;
 
+	ret = drm_fbdev_generic_setup(drm, 32);
+	if (ret)
+		DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
+
 	return 0;
 
 err_deinit:
@@ -448,38 +508,78 @@
 static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
 	{ .compatible = "mediatek,mt2701-disp-ovl",
 	  .data = (void *)MTK_DISP_OVL },
+	{ .compatible = "mediatek,mt8167-disp-ovl",
+	  .data = (void *)MTK_DISP_OVL },
 	{ .compatible = "mediatek,mt8173-disp-ovl",
 	  .data = (void *)MTK_DISP_OVL },
+	{ .compatible = "mediatek,mt8183-disp-ovl",
+	  .data = (void *)MTK_DISP_OVL },
+	{ .compatible = "mediatek,mt8183-disp-ovl-2l",
+	  .data = (void *)MTK_DISP_OVL_2L },
 	{ .compatible = "mediatek,mt2701-disp-rdma",
 	  .data = (void *)MTK_DISP_RDMA },
+	{ .compatible = "mediatek,mt8167-disp-rdma",
+	  .data = (void *)MTK_DISP_RDMA },
 	{ .compatible = "mediatek,mt8173-disp-rdma",
 	  .data = (void *)MTK_DISP_RDMA },
+	{ .compatible = "mediatek,mt8183-disp-rdma",
+	  .data = (void *)MTK_DISP_RDMA },
+	{ .compatible = "mediatek,mt8183-disp-rdma1",
+	  .data = (void *)MTK_DISP_RDMA },
 	{ .compatible = "mediatek,mt8173-disp-wdma",
 	  .data = (void *)MTK_DISP_WDMA },
+	{ .compatible = "mediatek,mt8167-disp-ccorr",
+	  .data = (void *)MTK_DISP_CCORR },
+	{ .compatible = "mediatek,mt8183-disp-ccorr",
+	  .data = (void *)MTK_DISP_CCORR },
 	{ .compatible = "mediatek,mt2701-disp-color",
 	  .data = (void *)MTK_DISP_COLOR },
+	{ .compatible = "mediatek,mt8167-disp-color",
+	  .data = (void *)MTK_DISP_COLOR },
 	{ .compatible = "mediatek,mt8173-disp-color",
 	  .data = (void *)MTK_DISP_COLOR },
+	{ .compatible = "mediatek,mt8167-disp-aal",
+	  .data = (void *)MTK_DISP_AAL},
 	{ .compatible = "mediatek,mt8173-disp-aal",
 	  .data = (void *)MTK_DISP_AAL},
+	{ .compatible = "mediatek,mt8167-disp-gamma",
+	  .data = (void *)MTK_DISP_GAMMA, },
 	{ .compatible = "mediatek,mt8173-disp-gamma",
 	  .data = (void *)MTK_DISP_GAMMA, },
+	{ .compatible = "mediatek,mt8167-disp-dither",
+	  .data = (void *)MTK_DISP_DITHER },
+	{ .compatible = "mediatek,mt8183-disp-dither",
+	  .data = (void *)MTK_DISP_DITHER },
 	{ .compatible = "mediatek,mt8173-disp-ufoe",
 	  .data = (void *)MTK_DISP_UFOE },
 	{ .compatible = "mediatek,mt2701-dsi",
 	  .data = (void *)MTK_DSI },
+	{ .compatible = "mediatek,mt8167-dsi",
+	  .data = (void *)MTK_DSI },
 	{ .compatible = "mediatek,mt8173-dsi",
 	  .data = (void *)MTK_DSI },
+	{ .compatible = "mediatek,mt8183-dsi",
+	  .data = (void *)MTK_DSI },
 	{ .compatible = "mediatek,mt8173-dpi",
 	  .data = (void *)MTK_DPI },
+	{ .compatible = "mediatek,mt8183-dpi",
+	  .data = (void *)MTK_DPI },
+	{ .compatible = "mediatek,mt8167-dpi",
+	  .data = (void *)MTK_DPI },
 	{ .compatible = "mediatek,mt2701-disp-mutex",
 	  .data = (void *)MTK_DISP_MUTEX },
 	{ .compatible = "mediatek,mt2712-disp-mutex",
 	  .data = (void *)MTK_DISP_MUTEX },
+	{ .compatible = "mediatek,mt8167-disp-mutex",
+	  .data = (void *)MTK_DISP_MUTEX },
 	{ .compatible = "mediatek,mt8173-disp-mutex",
 	  .data = (void *)MTK_DISP_MUTEX },
+	{ .compatible = "mediatek,mt8183-disp-mutex",
+	  .data = (void *)MTK_DISP_MUTEX },
 	{ .compatible = "mediatek,mt2701-disp-pwm",
 	  .data = (void *)MTK_DISP_BLS },
+	{ .compatible = "mediatek,mt8167-disp-pwm",
+	  .data = (void *)MTK_DISP_PWM },
 	{ .compatible = "mediatek,mt8173-disp-pwm",
 	  .data = (void *)MTK_DISP_PWM },
 	{ .compatible = "mediatek,mt8173-disp-od",
@@ -553,6 +653,7 @@
 		 */
 		if (comp_type == MTK_DISP_COLOR ||
 		    comp_type == MTK_DISP_OVL ||
+		    comp_type == MTK_DISP_OVL_2L ||
 		    comp_type == MTK_DISP_RDMA ||
 		    comp_type == MTK_DSI ||
 		    comp_type == MTK_DPI) {
@@ -655,6 +756,10 @@
 	  .data = &mt2712_mmsys_driver_data},
 	{ .compatible = "mediatek,mt8173-mmsys",
 	  .data = &mt8173_mmsys_driver_data},
+	{ .compatible = "mediatek,mt8183-display",
+	  .data = &mt8183_mmsys_driver_data},
+	{ .compatible = "mediatek,mt8167-mmsys2",
+	  .data = &mt8167_mmsys_driver_data},
 	{ }
 };
 
@@ -675,8 +780,8 @@
 	&mtk_disp_rdma_driver,
 	&mtk_dpi_driver,
 	&mtk_drm_platform_driver,
-	&mtk_dsi_driver,
 	&mtk_mipi_tx_driver,
+	&mtk_dsi_driver,
 };
 
 static int __init mtk_drm_init(void)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 8fa60d4..36b466b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -15,6 +15,7 @@
 #define MTK_DRM_DRV_H
 
 #include <linux/io.h>
+#include "mtk_drm_ddp.h"
 #include "mtk_drm_ddp_comp.h"
 
 #define MAX_CRTC	3
@@ -36,6 +37,8 @@
 	const enum mtk_ddp_comp_id *third_path;
 	unsigned int third_len;
 
+	const struct mtk_mmsys_reg_data *reg_data;
+
 	bool shadow_register;
 };
 
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 259b7b0..38483e9 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -241,3 +241,49 @@
 	kfree(mtk_gem);
 	return ERR_PTR(ret);
 }
+
+void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
+{
+	struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+	struct sg_table *sgt;
+	struct sg_page_iter iter;
+	unsigned int npages;
+	unsigned int i = 0;
+
+	if (mtk_gem->kvaddr)
+		return mtk_gem->kvaddr;
+
+	sgt = mtk_gem_prime_get_sg_table(obj);
+	if (IS_ERR(sgt))
+		return NULL;
+
+	npages = obj->size >> PAGE_SHIFT;
+	mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
+	if (!mtk_gem->pages)
+		goto out;
+
+	for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
+		mtk_gem->pages[i++] = sg_page_iter_page(&iter);
+		if (i > npages)
+			break;
+	}
+	mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
+			       pgprot_writecombine(PAGE_KERNEL));
+
+out:
+	kfree((void *)sgt);
+
+	return mtk_gem->kvaddr;
+}
+
+void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+
+	if (!mtk_gem->pages)
+		return;
+
+	vunmap(vaddr);
+	mtk_gem->kvaddr = 0;
+	kfree((void *)mtk_gem->pages);
+}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
index 534639b..c047a7e 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
@@ -37,6 +37,7 @@
 	dma_addr_t		dma_addr;
 	unsigned long		dma_attrs;
 	struct sg_table		*sg;
+	struct page		**pages;
 };
 
 #define to_mtk_gem_obj(x)	container_of(x, struct mtk_drm_gem_obj, base)
@@ -52,5 +53,7 @@
 struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
 struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
 			struct dma_buf_attachment *attach, struct sg_table *sg);
+void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj);
+void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 
 #endif
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index f7e6aa1..c996e47 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -30,6 +30,7 @@
 	DRM_FORMAT_RGB565,
 	DRM_FORMAT_UYVY,
 	DRM_FORMAT_YUYV,
+	DRM_FORMAT_XBGR8888,
 };
 
 static void mtk_plane_reset(struct drm_plane *plane)
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 0dd317a..aa32554 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -21,10 +21,12 @@
 #include <linux/component.h>
 #include <linux/iopoll.h>
 #include <linux/irq.h>
+#include <linux/mfd/syscon.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/phy/phy.h>
 #include <linux/platform_device.h>
+#include <linux/regmap.h>
 #include <video/mipi_display.h>
 #include <video/videomode.h>
 
@@ -45,6 +47,7 @@
 #define DSI_CON_CTRL		0x10
 #define DSI_RESET			BIT(0)
 #define DSI_EN				BIT(1)
+#define DPHY_RESET			BIT(2)
 
 #define DSI_MODE_CTRL		0x14
 #define MODE				(3)
@@ -78,6 +81,7 @@
 #define DSI_VBP_NL		0x24
 #define DSI_VFP_NL		0x28
 #define DSI_VACT_NL		0x2C
+#define DSI_SIZE_CON		0x38
 #define DSI_HSA_WC		0x50
 #define DSI_HBP_WC		0x54
 #define DSI_HFP_WC		0x58
@@ -131,7 +135,10 @@
 #define VM_CMD_EN			BIT(0)
 #define TS_VFP_EN			BIT(5)
 
-#define DSI_CMDQ0		0x180
+#define DSI_SHADOW_DEBUG	0x190U
+#define FORCE_COMMIT			BIT(0)
+#define BYPASS_SHADOW			BIT(1)
+
 #define CONFIG				(0xff << 0)
 #define SHORT_PACKET			0
 #define LONG_PACKET			2
@@ -140,11 +147,7 @@
 #define DATA_0				(0xff << 16)
 #define DATA_1				(0xff << 24)
 
-#define T_LPX		5
-#define T_HS_PREP	6
-#define T_HS_TRAIL	8
-#define T_HS_EXIT	7
-#define T_HS_ZERO	10
+#define MMSYS_SW_RST_DSI_B BIT(25)
 
 #define NS_TO_CYCLE(n, c)    ((n) / (c) + (((n) % (c)) ? 1 : 0))
 
@@ -154,8 +157,33 @@
 	(type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \
 	(type == MIPI_DSI_DCS_READ))
 
+struct mtk_phy_timing {
+	u32 lpx;
+	u32 da_hs_prepare;
+	u32 da_hs_zero;
+	u32 da_hs_trail;
+
+	u32 ta_go;
+	u32 ta_sure;
+	u32 ta_get;
+	u32 da_hs_exit;
+
+	u32 clk_hs_zero;
+	u32 clk_hs_trail;
+
+	u32 clk_hs_prepare;
+	u32 clk_hs_post;
+	u32 clk_hs_exit;
+};
+
 struct phy;
 
+struct mtk_dsi_driver_data {
+	const u32 reg_cmdq_off;
+	bool has_shadow_ctl;
+	bool has_size_ctl;
+};
+
 struct mtk_dsi {
 	struct mtk_ddp_comp ddp_comp;
 	struct device *dev;
@@ -165,11 +193,14 @@
 	struct drm_panel *panel;
 	struct drm_bridge *bridge;
 	struct phy *phy;
+	struct regmap *mmsys_sw_rst_b;
+	u32 sw_rst_b;
 
 	void __iomem *regs;
 
 	struct clk *engine_clk;
 	struct clk *digital_clk;
+	struct clk *mipi26mdbg;
 	struct clk *hs_clk;
 
 	u32 data_rate;
@@ -178,10 +209,12 @@
 	enum mipi_dsi_pixel_format format;
 	unsigned int lanes;
 	struct videomode vm;
+	struct mtk_phy_timing phy_timing;
 	int refcount;
 	bool enabled;
 	u32 irq_data;
 	wait_queue_head_t irq_wait_queue;
+	const struct mtk_dsi_driver_data *driver_data;
 };
 
 static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e)
@@ -209,18 +242,34 @@
 static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
 {
 	u32 timcon0, timcon1, timcon2, timcon3;
-	u32 ui, cycle_time;
+	u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, 1000000);
+	struct mtk_phy_timing *timing = &dsi->phy_timing;
 
-	ui = 1000 / dsi->data_rate + 0x01;
-	cycle_time = 8000 / dsi->data_rate + 0x01;
+	timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
+	timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000;
+	timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 -
+			     timing->da_hs_prepare;
+	timing->da_hs_trail = timing->da_hs_prepare + 1;
 
-	timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24;
-	timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 |
-		  T_HS_EXIT << 24;
-	timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) |
-		  (NS_TO_CYCLE(0x150, cycle_time) << 16);
-	timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 |
-		  NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8;
+	timing->ta_go = 4 * timing->lpx - 2;
+	timing->ta_sure = timing->lpx + 2;
+	timing->ta_get = 4 * timing->lpx;
+	timing->da_hs_exit = 2 * timing->lpx + 1;
+
+	timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000);
+	timing->clk_hs_post = timing->clk_hs_prepare + 8;
+	timing->clk_hs_trail = timing->clk_hs_prepare;
+	timing->clk_hs_zero = timing->clk_hs_trail * 4;
+	timing->clk_hs_exit = 2 * timing->clk_hs_trail;
+
+	timcon0 = timing->lpx | timing->da_hs_prepare << 8 |
+		  timing->da_hs_zero << 16 | timing->da_hs_trail << 24;
+	timcon1 = timing->ta_go | timing->ta_sure << 8 |
+		  timing->ta_get << 16 | timing->da_hs_exit << 24;
+	timcon2 = 1 << 8 | timing->clk_hs_zero << 16 |
+		  timing->clk_hs_trail << 24;
+	timcon3 = timing->clk_hs_prepare | timing->clk_hs_post << 8 |
+		  timing->clk_hs_exit << 16;
 
 	writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
 	writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
@@ -238,12 +287,31 @@
 	mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, 0);
 }
 
+static void mtk_dsi_reset_all(struct mtk_dsi *dsi)
+{
+	if (!dsi->mmsys_sw_rst_b)
+		return;
+
+	regmap_update_bits(dsi->mmsys_sw_rst_b, dsi->sw_rst_b,
+			   MMSYS_SW_RST_DSI_B, ~MMSYS_SW_RST_DSI_B);
+	usleep_range(1000, 1100);
+
+	regmap_update_bits(dsi->mmsys_sw_rst_b, dsi->sw_rst_b,
+			   MMSYS_SW_RST_DSI_B, MMSYS_SW_RST_DSI_B);
+}
+
 static void mtk_dsi_reset_engine(struct mtk_dsi *dsi)
 {
 	mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, DSI_RESET);
 	mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0);
 }
 
+static void mtk_dsi_reset_dphy(struct mtk_dsi *dsi)
+{
+	mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, DPHY_RESET);
+	mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, 0);
+}
+
 static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
 {
 	mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
@@ -407,7 +475,8 @@
 	u32 horizontal_sync_active_byte;
 	u32 horizontal_backporch_byte;
 	u32 horizontal_frontporch_byte;
-	u32 dsi_tmp_buf_bpp;
+	u32 dsi_tmp_buf_bpp, data_phy_cycles;
+	struct mtk_phy_timing *timing = &dsi->phy_timing;
 
 	struct videomode *vm = &dsi->vm;
 
@@ -421,6 +490,10 @@
 	writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL);
 	writel(vm->vactive, dsi->regs + DSI_VACT_NL);
 
+	if (dsi->driver_data->has_size_ctl)
+		writel(vm->vactive << 16 | vm->hactive,
+		       dsi->regs + DSI_SIZE_CON);
+
 	horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
 
 	if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
@@ -430,7 +503,46 @@
 		horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) *
 			dsi_tmp_buf_bpp - 10);
 
-	horizontal_frontporch_byte = (vm->hfront_porch * dsi_tmp_buf_bpp - 12);
+	data_phy_cycles = timing->lpx + timing->da_hs_prepare +
+			  timing->da_hs_zero + timing->da_hs_exit + 3;
+
+	if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+		if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
+		    data_phy_cycles * dsi->lanes + 18) {
+			horizontal_frontporch_byte =
+				vm->hfront_porch * dsi_tmp_buf_bpp -
+				(data_phy_cycles * dsi->lanes + 18) *
+				vm->hfront_porch /
+				(vm->hfront_porch + vm->hback_porch);
+
+			horizontal_backporch_byte =
+				horizontal_backporch_byte -
+				(data_phy_cycles * dsi->lanes + 18) *
+				vm->hback_porch /
+				(vm->hfront_porch + vm->hback_porch);
+		} else {
+			DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
+			horizontal_frontporch_byte = vm->hfront_porch *
+						     dsi_tmp_buf_bpp;
+		}
+	} else {
+		if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
+		    data_phy_cycles * dsi->lanes + 12) {
+			horizontal_frontporch_byte =
+				vm->hfront_porch * dsi_tmp_buf_bpp -
+				(data_phy_cycles * dsi->lanes + 12) *
+				vm->hfront_porch /
+				(vm->hfront_porch + vm->hback_porch);
+			horizontal_backporch_byte = horizontal_backporch_byte -
+				(data_phy_cycles * dsi->lanes + 12) *
+				vm->hback_porch /
+				(vm->hfront_porch + vm->hback_porch);
+		} else {
+			DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
+			horizontal_frontporch_byte = vm->hfront_porch *
+						     dsi_tmp_buf_bpp;
+		}
+	}
 
 	writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
 	writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
@@ -528,10 +640,9 @@
 
 static int mtk_dsi_poweron(struct mtk_dsi *dsi)
 {
-	struct device *dev = dsi->dev;
+	struct device *dev = dsi->host.dev;
 	int ret;
-	u64 pixel_clock, total_bits;
-	u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
+	u32 bit_per_pixel;
 
 	if (++dsi->refcount != 1)
 		return 0;
@@ -550,24 +661,8 @@
 		break;
 	}
 
-	/**
-	 * htotal_time = htotal * byte_per_pixel / num_lanes
-	 * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
-	 * mipi_ratio = (htotal_time + overhead_time) / htotal_time
-	 * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
-	 */
-	pixel_clock = dsi->vm.pixelclock;
-	htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
-			dsi->vm.hsync_len;
-	htotal_bits = htotal * bit_per_pixel;
-
-	overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
-			T_HS_EXIT;
-	overhead_bits = overhead_cycles * dsi->lanes * 8;
-	total_bits = htotal_bits + overhead_bits;
-
-	dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
-					  htotal * dsi->lanes);
+	dsi->data_rate = DIV_ROUND_UP_ULL(dsi->vm.pixelclock * bit_per_pixel,
+					  dsi->lanes);
 
 	ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
 	if (ret < 0) {
@@ -577,6 +672,12 @@
 
 	phy_power_on(dsi->phy);
 
+	ret = clk_prepare_enable(dsi->mipi26mdbg);
+	if (dsi->mipi26mdbg && ret < 0) {
+		dev_err(dev, "Failed to enable mipi26mdbg clock: %d\n", ret);
+		goto err_phy_power_off;
+	}
+
 	ret = clk_prepare_enable(dsi->engine_clk);
 	if (ret < 0) {
 		dev_err(dev, "Failed to enable engine clock: %d\n", ret);
@@ -590,10 +691,17 @@
 	}
 
 	mtk_dsi_enable(dsi);
+
+	if (dsi->driver_data->has_shadow_ctl)
+		writel(FORCE_COMMIT | BYPASS_SHADOW,
+		       dsi->regs + DSI_SHADOW_DEBUG);
+
 	mtk_dsi_reset_engine(dsi);
 	mtk_dsi_phy_timconfig(dsi);
 
 	mtk_dsi_rxtx_control(dsi);
+	usleep_range(30, 100);
+	mtk_dsi_reset_dphy(dsi);
 	mtk_dsi_ps_control_vact(dsi);
 	mtk_dsi_set_vm_cmd(dsi);
 	mtk_dsi_config_vdo_timing(dsi);
@@ -601,7 +709,7 @@
 
 	mtk_dsi_clk_ulp_mode_leave(dsi);
 	mtk_dsi_lane0_ulp_mode_leave(dsi);
-	mtk_dsi_clk_hs_mode(dsi, 0);
+	mtk_dsi_clk_hs_mode(dsi, 1);
 
 	if (dsi->panel) {
 		if (drm_panel_prepare(dsi->panel)) {
@@ -656,6 +764,7 @@
 
 	clk_disable_unprepare(dsi->engine_clk);
 	clk_disable_unprepare(dsi->digital_clk);
+	clk_disable_unprepare(dsi->mipi26mdbg);
 
 	phy_power_off(dsi->phy);
 }
@@ -836,6 +945,8 @@
 			goto err_encoder_cleanup;
 	}
 
+	mtk_dsi_reset_all(dsi);
+
 	return 0;
 
 err_encoder_cleanup:
@@ -941,6 +1052,7 @@
 	const char *tx_buf = msg->tx_buf;
 	u8 config, cmdq_size, cmdq_off, type = msg->type;
 	u32 reg_val, cmdq_mask, i;
+	u32 reg_cmdq_off = dsi->driver_data->reg_cmdq_off;
 
 	if (MTK_DSI_HOST_IS_READ(type))
 		config = BTA;
@@ -960,9 +1072,11 @@
 	}
 
 	for (i = 0; i < msg->tx_len; i++)
-		writeb(tx_buf[i], dsi->regs + DSI_CMDQ0 + cmdq_off + i);
+		mtk_dsi_mask(dsi, (reg_cmdq_off + cmdq_off + i) & (~0x3U),
+			     (0xffUL << (((i + cmdq_off) & 3U) * 8U)),
+			     tx_buf[i] << (((i + cmdq_off) & 3U) * 8U));
 
-	mtk_dsi_mask(dsi, DSI_CMDQ0, cmdq_mask, reg_val);
+	mtk_dsi_mask(dsi, reg_cmdq_off, cmdq_mask, reg_val);
 	mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size);
 }
 
@@ -1052,12 +1166,6 @@
 		return ret;
 	}
 
-	ret = mipi_dsi_host_register(&dsi->host);
-	if (ret < 0) {
-		dev_err(dev, "failed to register DSI host: %d\n", ret);
-		goto err_ddp_comp_unregister;
-	}
-
 	ret = mtk_dsi_create_conn_enc(drm, dsi);
 	if (ret) {
 		DRM_ERROR("Encoder create failed with %d\n", ret);
@@ -1067,8 +1175,6 @@
 	return 0;
 
 err_unregister:
-	mipi_dsi_host_unregister(&dsi->host);
-err_ddp_comp_unregister:
 	mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
 	return ret;
 }
@@ -1080,7 +1186,6 @@
 	struct mtk_dsi *dsi = dev_get_drvdata(dev);
 
 	mtk_dsi_destroy_conn_enc(dsi);
-	mipi_dsi_host_unregister(&dsi->host);
 	mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
 }
 
@@ -1094,6 +1199,7 @@
 	struct mtk_dsi *dsi;
 	struct device *dev = &pdev->dev;
 	struct resource *regs;
+	struct regmap *regmap;
 	int irq_num;
 	int comp_id;
 	int ret;
@@ -1104,31 +1210,40 @@
 
 	dsi->host.ops = &mtk_dsi_ops;
 	dsi->host.dev = dev;
+	ret = mipi_dsi_host_register(&dsi->host);
+	if (ret < 0) {
+		dev_err(dev, "failed to register DSI host: %d\n", ret);
+		return ret;
+	}
 
 	ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
 					  &dsi->panel, &dsi->bridge);
 	if (ret)
-		return ret;
+		goto err_unregister_host;
+
+	dsi->driver_data = of_device_get_match_data(dev);
 
 	dsi->engine_clk = devm_clk_get(dev, "engine");
 	if (IS_ERR(dsi->engine_clk)) {
 		ret = PTR_ERR(dsi->engine_clk);
 		dev_err(dev, "Failed to get engine clock: %d\n", ret);
-		return ret;
+		goto err_unregister_host;
 	}
 
 	dsi->digital_clk = devm_clk_get(dev, "digital");
 	if (IS_ERR(dsi->digital_clk)) {
 		ret = PTR_ERR(dsi->digital_clk);
 		dev_err(dev, "Failed to get digital clock: %d\n", ret);
-		return ret;
+		goto err_unregister_host;
 	}
 
+	dsi->mipi26mdbg = devm_clk_get_optional(dev, "mipi26mdbg");
+
 	dsi->hs_clk = devm_clk_get(dev, "hs");
 	if (IS_ERR(dsi->hs_clk)) {
 		ret = PTR_ERR(dsi->hs_clk);
 		dev_err(dev, "Failed to get hs clock: %d\n", ret);
-		return ret;
+		goto err_unregister_host;
 	}
 
 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1136,33 +1251,50 @@
 	if (IS_ERR(dsi->regs)) {
 		ret = PTR_ERR(dsi->regs);
 		dev_err(dev, "Failed to ioremap memory: %d\n", ret);
-		return ret;
+		goto err_unregister_host;
 	}
 
 	dsi->phy = devm_phy_get(dev, "dphy");
 	if (IS_ERR(dsi->phy)) {
 		ret = PTR_ERR(dsi->phy);
 		dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret);
-		return ret;
+		goto err_unregister_host;
+	}
+
+	regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+						 "mediatek,syscon-dsi");
+	ret = of_property_read_u32_index(dev->of_node, "mediatek,syscon-dsi", 1,
+					 &dsi->sw_rst_b);
+
+	if (IS_ERR(regmap))
+		ret = PTR_ERR(regmap);
+
+	if (ret) {
+		ret = PTR_ERR(regmap);
+		dev_err(dev, "Failed to get mmsys registers: %d\n", ret);
+	} else {
+		dsi->mmsys_sw_rst_b = regmap;
 	}
 
 	comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DSI);
 	if (comp_id < 0) {
 		dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
-		return comp_id;
+		ret = comp_id;
+		goto err_unregister_host;
 	}
 
 	ret = mtk_ddp_comp_init(dev, dev->of_node, &dsi->ddp_comp, comp_id,
 				&mtk_dsi_funcs);
 	if (ret) {
 		dev_err(dev, "Failed to initialize component: %d\n", ret);
-		return ret;
+		goto err_unregister_host;
 	}
 
 	irq_num = platform_get_irq(pdev, 0);
 	if (irq_num < 0) {
-		dev_err(&pdev->dev, "failed to request dsi irq resource\n");
-		return -EPROBE_DEFER;
+		dev_err(&pdev->dev, "failed to get dsi irq_num: %d\n", irq_num);
+		ret = irq_num;
+		goto err_unregister_host;
 	}
 
 	irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW);
@@ -1170,14 +1302,24 @@
 			       IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
-		return -EPROBE_DEFER;
+		goto err_unregister_host;
 	}
 
 	init_waitqueue_head(&dsi->irq_wait_queue);
 
 	platform_set_drvdata(pdev, dsi);
 
-	return component_add(&pdev->dev, &mtk_dsi_component_ops);
+	ret = component_add(&pdev->dev, &mtk_dsi_component_ops);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add component: %d\n", ret);
+		goto err_unregister_host;
+	}
+
+	return 0;
+
+err_unregister_host:
+	mipi_dsi_host_unregister(&dsi->host);
+	return ret;
 }
 
 static int mtk_dsi_remove(struct platform_device *pdev)
@@ -1186,13 +1328,38 @@
 
 	mtk_output_dsi_disable(dsi);
 	component_del(&pdev->dev, &mtk_dsi_component_ops);
+	mipi_dsi_host_unregister(&dsi->host);
 
 	return 0;
 }
 
+static const struct mtk_dsi_driver_data mt8167_dsi_driver_data = {
+	.reg_cmdq_off = 0x180,
+};
+
+static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = {
+	.reg_cmdq_off = 0x200,
+};
+
+static const struct mtk_dsi_driver_data mt2701_dsi_driver_data = {
+	.reg_cmdq_off = 0x180,
+};
+
+static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = {
+	.reg_cmdq_off = 0x200,
+	.has_shadow_ctl = true,
+	.has_size_ctl = true,
+};
+
 static const struct of_device_id mtk_dsi_of_match[] = {
-	{ .compatible = "mediatek,mt2701-dsi" },
-	{ .compatible = "mediatek,mt8173-dsi" },
+	{ .compatible = "mediatek,mt2701-dsi",
+	  .data = &mt2701_dsi_driver_data },
+	{ .compatible = "mediatek,mt8167-dsi",
+	  .data = &mt8167_dsi_driver_data },
+	{ .compatible = "mediatek,mt8173-dsi",
+	  .data = &mt8173_dsi_driver_data },
+	{ .compatible = "mediatek,mt8183-dsi",
+	  .data = &mt8183_dsi_driver_data },
 	{ },
 };
 
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 62444a3..5aa150a 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -38,6 +38,13 @@
 
 #define NCTS_BYTES	7
 
+struct mtk_hdmi_conf {
+	uint32_t sys_cfg1c;
+	uint32_t sys_cfg20;
+	uint32_t hdisplay_max;
+	uint32_t vdisplay_max;
+};
+
 enum mtk_hdmi_clk_id {
 	MTK_HDMI_CLK_HDMI_PIXEL,
 	MTK_HDMI_CLK_HDMI_PLL,
@@ -148,6 +155,7 @@
 };
 
 struct mtk_hdmi {
+	struct mtk_hdmi_conf *conf;
 	struct drm_bridge bridge;
 	struct drm_bridge *next_bridge;
 	struct drm_connector conn;
@@ -233,6 +241,7 @@
 static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
 {
 	struct arm_smccc_res res;
+	struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(hdmi->phy);
 
 	/*
 	 * MT8173 HDMI hardware has an output control bit to enable/disable HDMI
@@ -240,18 +249,23 @@
 	 * The ARM trusted firmware provides an API for the HDMI driver to set
 	 * this control bit to enable HDMI output in supervisor mode.
 	 */
-	arm_smccc_smc(MTK_SIP_SET_AUTHORIZED_SECURE_REG, 0x14000904, 0x80000000,
-		      0, 0, 0, 0, 0, &res);
+	if (hdmi_phy->conf && hdmi_phy->conf->tz_disabled)
+		regmap_update_bits(hdmi->sys_regmap,
+				   hdmi->sys_offset + hdmi->conf->sys_cfg20,
+				   0x80008005, enable ? 0x80000005 : 0x8000);
+	else
+		arm_smccc_smc(MTK_SIP_SET_AUTHORIZED_SECURE_REG, 0x14000904,
+			      0x80000000, 0, 0, 0, 0, 0, &res);
 
-	regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+	regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + hdmi->conf->sys_cfg20,
 			   HDMI_PCLK_FREE_RUN, enable ? HDMI_PCLK_FREE_RUN : 0);
-	regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
+	regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + hdmi->conf->sys_cfg1c,
 			   HDMI_ON | ANLG_ON, enable ? (HDMI_ON | ANLG_ON) : 0);
 }
 
 static void mtk_hdmi_hw_1p4_version_enable(struct mtk_hdmi *hdmi, bool enable)
 {
-	regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+	regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + hdmi->conf->sys_cfg20,
 			   HDMI2P0_EN, enable ? 0 : HDMI2P0_EN);
 }
 
@@ -267,12 +281,12 @@
 
 static void mtk_hdmi_hw_reset(struct mtk_hdmi *hdmi)
 {
-	regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
+	regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + hdmi->conf->sys_cfg1c,
 			   HDMI_RST, HDMI_RST);
-	regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
+	regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + hdmi->conf->sys_cfg1c,
 			   HDMI_RST, 0);
 	mtk_hdmi_clear_bits(hdmi, GRL_CFG3, CFG3_CONTROL_PACKET_DELAY);
-	regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
+	regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + hdmi->conf->sys_cfg1c,
 			   ANLG_ON, ANLG_ON);
 }
 
@@ -356,16 +370,16 @@
 
 static void mtk_hdmi_hw_config_sys(struct mtk_hdmi *hdmi)
 {
-	regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+	regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + hdmi->conf->sys_cfg20,
 			   HDMI_OUT_FIFO_EN | MHL_MODE_ON, 0);
 	usleep_range(2000, 4000);
-	regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+	regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + hdmi->conf->sys_cfg20,
 			   HDMI_OUT_FIFO_EN | MHL_MODE_ON, HDMI_OUT_FIFO_EN);
 }
 
 static void mtk_hdmi_hw_set_deep_color_mode(struct mtk_hdmi *hdmi)
 {
-	regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+	regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + hdmi->conf->sys_cfg20,
 			   DEEP_COLOR_MODE_MASK | DEEP_COLOR_EN,
 			   COLOR_8BIT_MODE);
 }
@@ -1245,6 +1259,14 @@
 			return MODE_BAD;
 	}
 
+	if (hdmi->conf->hdisplay_max &&
+	    mode->hdisplay > hdmi->conf->hdisplay_max)
+		return MODE_BAD;
+
+	if (hdmi->conf->vdisplay_max &&
+	    mode->vdisplay > hdmi->conf->vdisplay_max)
+		return MODE_BAD;
+
 	if (mode->clock < 27000)
 		return MODE_CLOCK_LOW;
 	if (mode->clock > 297000)
@@ -1673,6 +1695,7 @@
 		return -ENOMEM;
 
 	hdmi->dev = dev;
+	hdmi->conf = (struct mtk_hdmi_conf*) of_device_get_match_data(dev);
 
 	ret = mtk_hdmi_dt_parse_pdata(hdmi, pdev);
 	if (ret)
@@ -1750,8 +1773,22 @@
 static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops,
 			 mtk_hdmi_suspend, mtk_hdmi_resume);
 
+
+static struct mtk_hdmi_conf mt8173_conf = {
+	.sys_cfg1c = HDMI_SYS_CFG1C,
+	.sys_cfg20 = HDMI_SYS_CFG20,
+};
+
+static struct mtk_hdmi_conf mt8167_conf = {
+	.sys_cfg1c = MT8167_HDMI_SYS_CFG1C,
+	.sys_cfg20 = MT8167_HDMI_SYS_CFG20,
+	.hdisplay_max = 1920,
+	.vdisplay_max = 1080,
+};
+
 static const struct of_device_id mtk_drm_hdmi_of_ids[] = {
-	{ .compatible = "mediatek,mt8173-hdmi", },
+	{ .compatible = "mediatek,mt8173-hdmi", .data = &mt8173_conf},
+	{ .compatible = "mediatek,mt8167-hdmi", .data = &mt8167_conf},
 	{}
 };
 
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.h b/drivers/gpu/drm/mediatek/mtk_hdmi.h
index 6371b3d..3e9fb8d 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.h
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.h
@@ -13,11 +13,11 @@
  */
 #ifndef _MTK_HDMI_CTRL_H
 #define _MTK_HDMI_CTRL_H
+#include "mtk_hdmi_phy.h"
 
 struct platform_driver;
 
 extern struct platform_driver mtk_cec_driver;
 extern struct platform_driver mtk_hdmi_ddc_driver;
-extern struct platform_driver mtk_hdmi_phy_driver;
 
 #endif /* _MTK_HDMI_CTRL_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
new file mode 100644
index 0000000..d7023fb
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ */
+
+#include "mtk_hdmi_phy.h"
+
+static int mtk_hdmi_phy_power_on(struct phy *phy);
+static int mtk_hdmi_phy_power_off(struct phy *phy);
+
+static const struct phy_ops mtk_hdmi_phy_dev_ops = {
+	.power_on = mtk_hdmi_phy_power_on,
+	.power_off = mtk_hdmi_phy_power_off,
+	.owner = THIS_MODULE,
+};
+
+void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+			     u32 bits)
+{
+	void __iomem *reg = hdmi_phy->regs + offset;
+	u32 tmp;
+
+	tmp = readl(reg);
+	tmp &= ~bits;
+	writel(tmp, reg);
+}
+
+void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+			   u32 bits)
+{
+	void __iomem *reg = hdmi_phy->regs + offset;
+	u32 tmp;
+
+	tmp = readl(reg);
+	tmp |= bits;
+	writel(tmp, reg);
+}
+
+void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+		       u32 val, u32 mask)
+{
+	void __iomem *reg = hdmi_phy->regs + offset;
+	u32 tmp;
+
+	tmp = readl(reg);
+	tmp = (tmp & ~mask) | (val & mask);
+	writel(tmp, reg);
+}
+
+inline struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw)
+{
+	return container_of(hw, struct mtk_hdmi_phy, pll_hw);
+}
+
+static int mtk_hdmi_phy_power_on(struct phy *phy)
+{
+	struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
+	int ret;
+
+	ret = clk_prepare_enable(hdmi_phy->pll);
+	if (ret < 0)
+		return ret;
+
+	hdmi_phy->conf->hdmi_phy_enable_tmds(hdmi_phy);
+	return 0;
+}
+
+static int mtk_hdmi_phy_power_off(struct phy *phy)
+{
+	struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
+
+	hdmi_phy->conf->hdmi_phy_disable_tmds(hdmi_phy);
+	clk_disable_unprepare(hdmi_phy->pll);
+
+	return 0;
+}
+
+static const struct phy_ops *
+mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy)
+{
+	if (hdmi_phy && hdmi_phy->conf &&
+	    hdmi_phy->conf->hdmi_phy_enable_tmds &&
+	    hdmi_phy->conf->hdmi_phy_disable_tmds)
+		return &mtk_hdmi_phy_dev_ops;
+
+	dev_err(hdmi_phy->dev, "Failed to get dev ops of phy\n");
+		return NULL;
+}
+
+static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy,
+				      struct clk_init_data *clk_init)
+{
+	clk_init->flags = hdmi_phy->conf->flags;
+	clk_init->ops = hdmi_phy->conf->hdmi_phy_clk_ops;
+}
+
+static int mtk_hdmi_phy_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mtk_hdmi_phy *hdmi_phy;
+	struct resource *mem;
+	struct clk *ref_clk;
+	const char *ref_clk_name;
+	struct clk_init_data clk_init = {
+		.num_parents = 1,
+		.parent_names = (const char * const *)&ref_clk_name,
+	};
+
+	struct phy *phy;
+	struct phy_provider *phy_provider;
+	int ret;
+
+	hdmi_phy = devm_kzalloc(dev, sizeof(*hdmi_phy), GFP_KERNEL);
+	if (!hdmi_phy)
+		return -ENOMEM;
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hdmi_phy->regs = devm_ioremap_resource(dev, mem);
+	if (IS_ERR(hdmi_phy->regs)) {
+		ret = PTR_ERR(hdmi_phy->regs);
+		dev_err(dev, "Failed to get memory resource: %d\n", ret);
+		return ret;
+	}
+
+	ref_clk = devm_clk_get(dev, "pll_ref");
+	if (IS_ERR(ref_clk)) {
+		ret = PTR_ERR(ref_clk);
+		dev_err(&pdev->dev, "Failed to get PLL reference clock: %d\n",
+			ret);
+		return ret;
+	}
+	ref_clk_name = __clk_get_name(ref_clk);
+
+	ret = of_property_read_string(dev->of_node, "clock-output-names",
+				      &clk_init.name);
+	if (ret < 0) {
+		dev_err(dev, "Failed to read clock-output-names: %d\n", ret);
+		return ret;
+	}
+
+	hdmi_phy->dev = dev;
+	hdmi_phy->conf =
+		(struct mtk_hdmi_phy_conf *)of_device_get_match_data(dev);
+	mtk_hdmi_phy_clk_get_data(hdmi_phy, &clk_init);
+	hdmi_phy->pll_hw.init = &clk_init;
+	hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
+	if (IS_ERR(hdmi_phy->pll)) {
+		ret = PTR_ERR(hdmi_phy->pll);
+		dev_err(dev, "Failed to register PLL: %d\n", ret);
+		return ret;
+	}
+
+	ret = of_property_read_u32(dev->of_node, "mediatek,ibias",
+				   &hdmi_phy->ibias);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to get ibias: %d\n", ret);
+		return ret;
+	}
+
+	ret = of_property_read_u32(dev->of_node, "mediatek,ibias_up",
+				   &hdmi_phy->ibias_up);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to get ibias up: %d\n", ret);
+		return ret;
+	}
+
+	dev_info(dev, "Using default TX DRV impedance: 4.2k/36\n");
+	hdmi_phy->drv_imp_clk = 0x30;
+	hdmi_phy->drv_imp_d2 = 0x30;
+	hdmi_phy->drv_imp_d1 = 0x30;
+	hdmi_phy->drv_imp_d0 = 0x30;
+
+	phy = devm_phy_create(dev, NULL, mtk_hdmi_phy_dev_get_ops(hdmi_phy));
+	if (IS_ERR(phy)) {
+		dev_err(dev, "Failed to create HDMI PHY\n");
+		return PTR_ERR(phy);
+	}
+	phy_set_drvdata(phy, hdmi_phy);
+
+	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+	if (IS_ERR(phy_provider)) {
+		dev_err(dev, "Failed to register HDMI PHY\n");
+		return PTR_ERR(phy_provider);
+	}
+
+	return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
+				   hdmi_phy->pll);
+}
+
+static const struct of_device_id mtk_hdmi_phy_match[] = {
+	{ .compatible = "mediatek,mt2701-hdmi-phy",
+	  .data = &mtk_hdmi_phy_2701_conf,
+	},
+	{ .compatible = "mediatek,mt8167-hdmi-phy",
+	  .data = &mtk_hdmi_phy_8167_conf,
+	},
+	{ .compatible = "mediatek,mt8173-hdmi-phy",
+	  .data = &mtk_hdmi_phy_8173_conf,
+	},
+	{},
+};
+
+struct platform_driver mtk_hdmi_phy_driver = {
+	.probe = mtk_hdmi_phy_probe,
+	.driver = {
+		.name = "mediatek-hdmi-phy",
+		.of_match_table = mtk_hdmi_phy_match,
+	},
+};
+
+MODULE_DESCRIPTION("MediaTek HDMI PHY Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
new file mode 100644
index 0000000..a32964c
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Chunhui Dai <chunhui.dai@mediatek.com>
+ */
+
+#ifndef _MTK_HDMI_PHY_H
+#define _MTK_HDMI_PHY_H
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+struct mtk_hdmi_phy;
+
+struct mtk_hdmi_phy_conf {
+	bool tz_disabled;
+	unsigned long flags;
+	const struct clk_ops *hdmi_phy_clk_ops;
+	void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
+	void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
+};
+
+struct mtk_hdmi_phy {
+	void __iomem *regs;
+	struct device *dev;
+	struct mtk_hdmi_phy_conf *conf;
+	struct clk *pll;
+	struct clk_hw pll_hw;
+	unsigned long pll_rate;
+	unsigned char drv_imp_clk;
+	unsigned char drv_imp_d2;
+	unsigned char drv_imp_d1;
+	unsigned char drv_imp_d0;
+	unsigned int ibias;
+	unsigned int ibias_up;
+};
+
+void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+			     u32 bits);
+void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+			   u32 bits);
+void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+		       u32 val, u32 mask);
+struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw);
+
+extern struct platform_driver mtk_hdmi_phy_driver;
+extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf;
+extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8167_conf;
+extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf;
+
+#endif /* _MTK_HDMI_PHY_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h b/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h
index a5cb07d..bab4af8 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h
@@ -203,6 +203,7 @@
 #define GEN_RGB				(0 << 7)
 
 #define HDMI_SYS_CFG1C		0x000
+#define MT8167_HDMI_SYS_CFG1C		0x800
 #define HDMI_ON				BIT(0)
 #define HDMI_RST			BIT(1)
 #define ANLG_ON				BIT(2)
@@ -219,6 +220,7 @@
 #define HTPLG_PIN_SEL_OFF		BIT(30)
 #define AES_EFUSE_ENABLE		BIT(31)
 #define HDMI_SYS_CFG20		0x004
+#define MT8167_HDMI_SYS_CFG20		0x804
 #define DEEP_COLOR_MODE_MASK		(3 << 1)
 #define COLOR_8BIT_MODE			(0 << 1)
 #define COLOR_10BIT_MODE		(1 << 1)
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
index 90e9131..0f860d6 100644
--- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
@@ -11,292 +11,39 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/phy/phy.h>
+#include "mtk_mipi_tx.h"
 
-#define MIPITX_DSI_CON		0x00
-#define RG_DSI_LDOCORE_EN		BIT(0)
-#define RG_DSI_CKG_LDOOUT_EN		BIT(1)
-#define RG_DSI_BCLK_SEL			(3 << 2)
-#define RG_DSI_LD_IDX_SEL		(7 << 4)
-#define RG_DSI_PHYCLK_SEL		(2 << 8)
-#define RG_DSI_DSICLK_FREQ_SEL		BIT(10)
-#define RG_DSI_LPTX_CLMP_EN		BIT(11)
-
-#define MIPITX_DSI_CLOCK_LANE	0x04
-#define MIPITX_DSI_DATA_LANE0	0x08
-#define MIPITX_DSI_DATA_LANE1	0x0c
-#define MIPITX_DSI_DATA_LANE2	0x10
-#define MIPITX_DSI_DATA_LANE3	0x14
-#define RG_DSI_LNTx_LDOOUT_EN		BIT(0)
-#define RG_DSI_LNTx_CKLANE_EN		BIT(1)
-#define RG_DSI_LNTx_LPTX_IPLUS1		BIT(2)
-#define RG_DSI_LNTx_LPTX_IPLUS2		BIT(3)
-#define RG_DSI_LNTx_LPTX_IMINUS		BIT(4)
-#define RG_DSI_LNTx_LPCD_IPLUS		BIT(5)
-#define RG_DSI_LNTx_LPCD_IMINUS		BIT(6)
-#define RG_DSI_LNTx_RT_CODE		(0xf << 8)
-
-#define MIPITX_DSI_TOP_CON	0x40
-#define RG_DSI_LNT_INTR_EN		BIT(0)
-#define RG_DSI_LNT_HS_BIAS_EN		BIT(1)
-#define RG_DSI_LNT_IMP_CAL_EN		BIT(2)
-#define RG_DSI_LNT_TESTMODE_EN		BIT(3)
-#define RG_DSI_LNT_IMP_CAL_CODE		(0xf << 4)
-#define RG_DSI_LNT_AIO_SEL		(7 << 8)
-#define RG_DSI_PAD_TIE_LOW_EN		BIT(11)
-#define RG_DSI_DEBUG_INPUT_EN		BIT(12)
-#define RG_DSI_PRESERVE			(7 << 13)
-
-#define MIPITX_DSI_BG_CON	0x44
-#define RG_DSI_BG_CORE_EN		BIT(0)
-#define RG_DSI_BG_CKEN			BIT(1)
-#define RG_DSI_BG_DIV			(0x3 << 2)
-#define RG_DSI_BG_FAST_CHARGE		BIT(4)
-#define RG_DSI_VOUT_MSK			(0x3ffff << 5)
-#define RG_DSI_V12_SEL			(7 << 5)
-#define RG_DSI_V10_SEL			(7 << 8)
-#define RG_DSI_V072_SEL			(7 << 11)
-#define RG_DSI_V04_SEL			(7 << 14)
-#define RG_DSI_V032_SEL			(7 << 17)
-#define RG_DSI_V02_SEL			(7 << 20)
-#define RG_DSI_BG_R1_TRIM		(0xf << 24)
-#define RG_DSI_BG_R2_TRIM		(0xf << 28)
-
-#define MIPITX_DSI_PLL_CON0	0x50
-#define RG_DSI_MPPLL_PLL_EN		BIT(0)
-#define RG_DSI_MPPLL_DIV_MSK		(0x1ff << 1)
-#define RG_DSI_MPPLL_PREDIV		(3 << 1)
-#define RG_DSI_MPPLL_TXDIV0		(3 << 3)
-#define RG_DSI_MPPLL_TXDIV1		(3 << 5)
-#define RG_DSI_MPPLL_POSDIV		(7 << 7)
-#define RG_DSI_MPPLL_MONVC_EN		BIT(10)
-#define RG_DSI_MPPLL_MONREF_EN		BIT(11)
-#define RG_DSI_MPPLL_VOD_EN		BIT(12)
-
-#define MIPITX_DSI_PLL_CON1	0x54
-#define RG_DSI_MPPLL_SDM_FRA_EN		BIT(0)
-#define RG_DSI_MPPLL_SDM_SSC_PH_INIT	BIT(1)
-#define RG_DSI_MPPLL_SDM_SSC_EN		BIT(2)
-#define RG_DSI_MPPLL_SDM_SSC_PRD	(0xffff << 16)
-
-#define MIPITX_DSI_PLL_CON2	0x58
-
-#define MIPITX_DSI_PLL_TOP	0x64
-#define RG_DSI_MPPLL_PRESERVE		(0xff << 8)
-
-#define MIPITX_DSI_PLL_PWR	0x68
-#define RG_DSI_MPPLL_SDM_PWR_ON		BIT(0)
-#define RG_DSI_MPPLL_SDM_ISO_EN		BIT(1)
-#define RG_DSI_MPPLL_SDM_PWR_ACK	BIT(8)
-
-#define MIPITX_DSI_SW_CTRL	0x80
-#define SW_CTRL_EN			BIT(0)
-
-#define MIPITX_DSI_SW_CTRL_CON0	0x84
-#define SW_LNTC_LPTX_PRE_OE		BIT(0)
-#define SW_LNTC_LPTX_OE			BIT(1)
-#define SW_LNTC_LPTX_P			BIT(2)
-#define SW_LNTC_LPTX_N			BIT(3)
-#define SW_LNTC_HSTX_PRE_OE		BIT(4)
-#define SW_LNTC_HSTX_OE			BIT(5)
-#define SW_LNTC_HSTX_ZEROCLK		BIT(6)
-#define SW_LNT0_LPTX_PRE_OE		BIT(7)
-#define SW_LNT0_LPTX_OE			BIT(8)
-#define SW_LNT0_LPTX_P			BIT(9)
-#define SW_LNT0_LPTX_N			BIT(10)
-#define SW_LNT0_HSTX_PRE_OE		BIT(11)
-#define SW_LNT0_HSTX_OE			BIT(12)
-#define SW_LNT0_LPRX_EN			BIT(13)
-#define SW_LNT1_LPTX_PRE_OE		BIT(14)
-#define SW_LNT1_LPTX_OE			BIT(15)
-#define SW_LNT1_LPTX_P			BIT(16)
-#define SW_LNT1_LPTX_N			BIT(17)
-#define SW_LNT1_HSTX_PRE_OE		BIT(18)
-#define SW_LNT1_HSTX_OE			BIT(19)
-#define SW_LNT2_LPTX_PRE_OE		BIT(20)
-#define SW_LNT2_LPTX_OE			BIT(21)
-#define SW_LNT2_LPTX_P			BIT(22)
-#define SW_LNT2_LPTX_N			BIT(23)
-#define SW_LNT2_HSTX_PRE_OE		BIT(24)
-#define SW_LNT2_HSTX_OE			BIT(25)
-
-struct mtk_mipitx_data {
-	const u32 mppll_preserve;
-};
-
-struct mtk_mipi_tx {
-	struct device *dev;
-	void __iomem *regs;
-	u32 data_rate;
-	const struct mtk_mipitx_data *driver_data;
-	struct clk_hw pll_hw;
-	struct clk *pll;
-};
-
-static inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw)
+inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw)
 {
 	return container_of(hw, struct mtk_mipi_tx, pll_hw);
 }
 
-static void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
-				   u32 bits)
+void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+			    u32 bits)
 {
 	u32 temp = readl(mipi_tx->regs + offset);
 
 	writel(temp & ~bits, mipi_tx->regs + offset);
 }
 
-static void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
-				 u32 bits)
+void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+			  u32 bits)
 {
 	u32 temp = readl(mipi_tx->regs + offset);
 
 	writel(temp | bits, mipi_tx->regs + offset);
 }
 
-static void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
-				    u32 mask, u32 data)
+void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+			     u32 mask, u32 data)
 {
 	u32 temp = readl(mipi_tx->regs + offset);
 
 	writel((temp & ~mask) | (data & mask), mipi_tx->regs + offset);
 }
 
-static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
-{
-	struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
-	u8 txdiv, txdiv0, txdiv1;
-	u64 pcw;
-
-	dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate);
-
-	if (mipi_tx->data_rate >= 500000000) {
-		txdiv = 1;
-		txdiv0 = 0;
-		txdiv1 = 0;
-	} else if (mipi_tx->data_rate >= 250000000) {
-		txdiv = 2;
-		txdiv0 = 1;
-		txdiv1 = 0;
-	} else if (mipi_tx->data_rate >= 125000000) {
-		txdiv = 4;
-		txdiv0 = 2;
-		txdiv1 = 0;
-	} else if (mipi_tx->data_rate > 62000000) {
-		txdiv = 8;
-		txdiv0 = 2;
-		txdiv1 = 1;
-	} else if (mipi_tx->data_rate >= 50000000) {
-		txdiv = 16;
-		txdiv0 = 2;
-		txdiv1 = 2;
-	} else {
-		return -EINVAL;
-	}
-
-	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON,
-				RG_DSI_VOUT_MSK |
-				RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN,
-				(4 << 20) | (4 << 17) | (4 << 14) |
-				(4 << 11) | (4 << 8) | (4 << 5) |
-				RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
-
-	usleep_range(30, 100);
-
-	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON,
-				RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN,
-				(8 << 4) | RG_DSI_LNT_HS_BIAS_EN);
-
-	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON,
-			     RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
-
-	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
-				RG_DSI_MPPLL_SDM_PWR_ON |
-				RG_DSI_MPPLL_SDM_ISO_EN,
-				RG_DSI_MPPLL_SDM_PWR_ON);
-
-	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
-			       RG_DSI_MPPLL_PLL_EN);
-
-	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
-				RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 |
-				RG_DSI_MPPLL_PREDIV,
-				(txdiv0 << 3) | (txdiv1 << 5));
-
-	/*
-	 * PLL PCW config
-	 * PCW bit 24~30 = integer part of pcw
-	 * PCW bit 0~23 = fractional part of pcw
-	 * pcw = data_Rate*4*txdiv/(Ref_clk*2);
-	 * Post DIV =4, so need data_Rate*4
-	 * Ref_clk is 26MHz
-	 */
-	pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24,
-		      26000000);
-	writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2);
-
-	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
-			     RG_DSI_MPPLL_SDM_FRA_EN);
-
-	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
-
-	usleep_range(20, 100);
-
-	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
-			       RG_DSI_MPPLL_SDM_SSC_EN);
-
-	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
-				RG_DSI_MPPLL_PRESERVE,
-				mipi_tx->driver_data->mppll_preserve);
-
-	return 0;
-}
-
-static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
-{
-	struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
-
-	dev_dbg(mipi_tx->dev, "unprepare\n");
-
-	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
-			       RG_DSI_MPPLL_PLL_EN);
-
-	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
-				RG_DSI_MPPLL_PRESERVE, 0);
-
-	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
-				RG_DSI_MPPLL_SDM_ISO_EN |
-				RG_DSI_MPPLL_SDM_PWR_ON,
-				RG_DSI_MPPLL_SDM_ISO_EN);
-
-	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
-			       RG_DSI_LNT_HS_BIAS_EN);
-
-	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON,
-			       RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
-
-	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON,
-			       RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
-
-	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
-			       RG_DSI_MPPLL_DIV_MSK);
-}
-
-static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
-				       unsigned long *prate)
-{
-	return clamp_val(rate, 50000000, 1250000000);
-}
-
-static int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
-				    unsigned long parent_rate)
+int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+			     unsigned long parent_rate)
 {
 	struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
 
@@ -307,37 +54,14 @@
 	return 0;
 }
 
-static unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
-						 unsigned long parent_rate)
+unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
+					  unsigned long parent_rate)
 {
 	struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
 
 	return mipi_tx->data_rate;
 }
 
-static const struct clk_ops mtk_mipi_tx_pll_ops = {
-	.prepare = mtk_mipi_tx_pll_prepare,
-	.unprepare = mtk_mipi_tx_pll_unprepare,
-	.round_rate = mtk_mipi_tx_pll_round_rate,
-	.set_rate = mtk_mipi_tx_pll_set_rate,
-	.recalc_rate = mtk_mipi_tx_pll_recalc_rate,
-};
-
-static int mtk_mipi_tx_power_on_signal(struct phy *phy)
-{
-	struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
-	u32 reg;
-
-	for (reg = MIPITX_DSI_CLOCK_LANE;
-	     reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
-		mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
-
-	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
-			       RG_DSI_PAD_TIE_LOW_EN);
-
-	return 0;
-}
-
 static int mtk_mipi_tx_power_on(struct phy *phy)
 {
 	struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
@@ -349,30 +73,16 @@
 		return ret;
 
 	/* Enable DSI Lane LDO outputs, disable pad tie low */
-	mtk_mipi_tx_power_on_signal(phy);
-
+	mipi_tx->driver_data->mipi_tx_enable_signal(phy);
 	return 0;
 }
 
-static void mtk_mipi_tx_power_off_signal(struct phy *phy)
-{
-	struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
-	u32 reg;
-
-	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
-			     RG_DSI_PAD_TIE_LOW_EN);
-
-	for (reg = MIPITX_DSI_CLOCK_LANE;
-	     reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
-		mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
-}
-
 static int mtk_mipi_tx_power_off(struct phy *phy)
 {
 	struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
 
 	/* Enable pad tie low, disable DSI Lane LDO outputs */
-	mtk_mipi_tx_power_off_signal(phy);
+	mipi_tx->driver_data->mipi_tx_disable_signal(phy);
 
 	/* Disable PLL and power down core */
 	clk_disable_unprepare(mipi_tx->pll);
@@ -391,10 +101,8 @@
 	struct device *dev = &pdev->dev;
 	struct mtk_mipi_tx *mipi_tx;
 	struct resource *mem;
-	struct clk *ref_clk;
 	const char *ref_clk_name;
 	struct clk_init_data clk_init = {
-		.ops = &mtk_mipi_tx_pll_ops,
 		.num_parents = 1,
 		.parent_names = (const char * const *)&ref_clk_name,
 		.flags = CLK_SET_RATE_GATE,
@@ -408,6 +116,7 @@
 		return -ENOMEM;
 
 	mipi_tx->driver_data = of_device_get_match_data(dev);
+
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	mipi_tx->regs = devm_ioremap_resource(dev, mem);
 	if (IS_ERR(mipi_tx->regs)) {
@@ -416,13 +125,14 @@
 		return ret;
 	}
 
-	ref_clk = devm_clk_get(dev, NULL);
-	if (IS_ERR(ref_clk)) {
-		ret = PTR_ERR(ref_clk);
+	mipi_tx->ref_clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(mipi_tx->ref_clk)) {
+		ret = PTR_ERR(mipi_tx->ref_clk);
 		dev_err(dev, "Failed to get reference clock: %d\n", ret);
 		return ret;
 	}
-	ref_clk_name = __clk_get_name(ref_clk);
+
+	ref_clk_name = __clk_get_name(mipi_tx->ref_clk);
 
 	ret = of_property_read_string(dev->of_node, "clock-output-names",
 				      &clk_init.name);
@@ -431,6 +141,8 @@
 		return ret;
 	}
 
+	clk_init.ops = mipi_tx->driver_data->mipi_tx_clk_ops;
+
 	mipi_tx->pll_hw.init = &clk_init;
 	mipi_tx->pll = devm_clk_register(dev, &mipi_tx->pll_hw);
 	if (IS_ERR(mipi_tx->pll)) {
@@ -465,20 +177,16 @@
 	return 0;
 }
 
-static const struct mtk_mipitx_data mt2701_mipitx_data = {
-	.mppll_preserve = (3 << 8)
-};
-
-static const struct mtk_mipitx_data mt8173_mipitx_data = {
-	.mppll_preserve = (0 << 8)
-};
-
 static const struct of_device_id mtk_mipi_tx_match[] = {
 	{ .compatible = "mediatek,mt2701-mipi-tx",
 	  .data = &mt2701_mipitx_data },
+	{ .compatible = "mediatek,mt8167-mipi-tx",
+	  .data = &mt8167_mipitx_data },
 	{ .compatible = "mediatek,mt8173-mipi-tx",
 	  .data = &mt8173_mipitx_data },
-	{},
+	{ .compatible = "mediatek,mt8183-mipi-tx",
+	  .data = &mt8183_mipitx_data },
+	{ },
 };
 
 struct platform_driver mtk_mipi_tx_driver = {
@@ -489,3 +197,4 @@
 		.of_match_table = mtk_mipi_tx_match,
 	},
 };
+
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.h b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
new file mode 100644
index 0000000..f4f4e34
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Jitao Shi <jitao.shi@mediatek.com>
+ */
+
+#ifndef _MTK_MIPI_TX_H
+#define _MTK_MIPI_TX_H
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+
+struct mtk_mipitx_data {
+	const u32 mppll_preserve;
+	const struct clk_ops *mipi_tx_clk_ops;
+	void (*mipi_tx_enable_signal)(struct phy *phy);
+	void (*mipi_tx_disable_signal)(struct phy *phy);
+};
+
+struct mtk_mipi_tx {
+	struct device *dev;
+	void __iomem *regs;
+	u32 data_rate;
+	struct clk *ref_clk;
+	const struct mtk_mipitx_data *driver_data;
+	struct clk_hw pll_hw;
+	struct clk *pll;
+};
+
+struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw);
+void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits);
+void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits);
+void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 mask,
+			     u32 data);
+int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+			     unsigned long parent_rate);
+unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
+					  unsigned long parent_rate);
+
+extern const struct mtk_mipitx_data mt2701_mipitx_data;
+extern const struct mtk_mipitx_data mt8167_mipitx_data;
+extern const struct mtk_mipitx_data mt8173_mipitx_data;
+extern const struct mtk_mipitx_data mt8183_mipitx_data;
+
+#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
new file mode 100644
index 0000000..d3cc402
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Chunhui Dai <chunhui.dai@mediatek.com>
+ */
+
+#include "mtk_hdmi_phy.h"
+
+#define HDMI_CON0	0x00
+#define RG_HDMITX_DRV_IBIAS		0
+#define RG_HDMITX_DRV_IBIAS_MASK	(0x3f << 0)
+#define RG_HDMITX_EN_SER		12
+#define RG_HDMITX_EN_SER_MASK		(0x0f << 12)
+#define RG_HDMITX_EN_SLDO		16
+#define RG_HDMITX_EN_SLDO_MASK		(0x0f << 16)
+#define RG_HDMITX_EN_PRED		20
+#define RG_HDMITX_EN_PRED_MASK		(0x0f << 20)
+#define RG_HDMITX_EN_IMP		24
+#define RG_HDMITX_EN_IMP_MASK		(0x0f << 24)
+#define RG_HDMITX_EN_DRV		28
+#define RG_HDMITX_EN_DRV_MASK		(0x0f << 28)
+
+#define HDMI_CON1	0x04
+#define RG_HDMITX_PRED_IBIAS		18
+#define RG_HDMITX_PRED_IBIAS_MASK	(0x0f << 18)
+#define RG_HDMITX_PRED_IMP		(0x01 << 22)
+#define RG_HDMITX_DRV_IMP		26
+#define RG_HDMITX_DRV_IMP_MASK		(0x3f << 26)
+
+#define HDMI_CON2	0x08
+#define RG_HDMITX_EN_TX_CKLDO		(0x01 << 0)
+#define RG_HDMITX_EN_TX_POSDIV		(0x01 << 1)
+#define RG_HDMITX_TX_POSDIV		3
+#define RG_HDMITX_TX_POSDIV_MASK	(0x03 << 3)
+#define RG_HDMITX_EN_MBIAS		(0x01 << 6)
+#define RG_HDMITX_MBIAS_LPF_EN		(0x01 << 7)
+
+#define HDMI_CON4	0x10
+#define RG_HDMITX_RESERVE_MASK		(0xffffffff << 0)
+
+#define HDMI_CON6	0x18
+#define RG_HTPLL_BR			0
+#define RG_HTPLL_BR_MASK		(0x03 << 0)
+#define RG_HTPLL_BC			2
+#define RG_HTPLL_BC_MASK		(0x03 << 2)
+#define RG_HTPLL_BP			4
+#define RG_HTPLL_BP_MASK		(0x0f << 4)
+#define RG_HTPLL_IR			8
+#define RG_HTPLL_IR_MASK		(0x0f << 8)
+#define RG_HTPLL_IC			12
+#define RG_HTPLL_IC_MASK		(0x0f << 12)
+#define RG_HTPLL_POSDIV			16
+#define RG_HTPLL_POSDIV_MASK		(0x03 << 16)
+#define RG_HTPLL_PREDIV			18
+#define RG_HTPLL_PREDIV_MASK		(0x03 << 18)
+#define RG_HTPLL_FBKSEL			20
+#define RG_HTPLL_FBKSEL_MASK		(0x03 << 20)
+#define RG_HTPLL_RLH_EN			(0x01 << 22)
+#define RG_HTPLL_FBKDIV			24
+#define RG_HTPLL_FBKDIV_MASK		(0x7f << 24)
+#define RG_HTPLL_EN			(0x01 << 31)
+
+#define HDMI_CON7	0x1c
+#define RG_HTPLL_AUTOK_EN		(0x01 << 23)
+#define RG_HTPLL_DIVEN			28
+#define RG_HTPLL_DIVEN_MASK		(0x07 << 28)
+
+static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
+{
+	struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+	usleep_range(80, 100);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+	usleep_range(80, 100);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+	usleep_range(80, 100);
+	return 0;
+}
+
+static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
+{
+	struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+	usleep_range(80, 100);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+	usleep_range(80, 100);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+	usleep_range(80, 100);
+}
+
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+				    unsigned long *parent_rate)
+{
+	return rate;
+}
+
+static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+				 unsigned long parent_rate)
+{
+	struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+	u32 pos_div;
+
+	if (rate <= 64000000)
+		pos_div = 3;
+	else if (rate <= 128000000)
+		pos_div = 2;
+	else
+		pos_div = 1;
+
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC),
+			  RG_HTPLL_IC_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR),
+			  RG_HTPLL_IR_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON2, (pos_div << RG_HDMITX_TX_POSDIV),
+			  RG_HDMITX_TX_POSDIV_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (1 << RG_HTPLL_FBKSEL),
+			  RG_HTPLL_FBKSEL_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (19 << RG_HTPLL_FBKDIV),
+			  RG_HTPLL_FBKDIV_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON7, (0x2 << RG_HTPLL_DIVEN),
+			  RG_HTPLL_DIVEN_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0xc << RG_HTPLL_BP),
+			  RG_HTPLL_BP_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x2 << RG_HTPLL_BC),
+			  RG_HTPLL_BC_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_BR),
+			  RG_HTPLL_BR_MASK);
+
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PRED_IMP);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, (0x3 << RG_HDMITX_PRED_IBIAS),
+			  RG_HDMITX_PRED_IBIAS_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_IMP_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, (0x28 << RG_HDMITX_DRV_IMP),
+			  RG_HDMITX_DRV_IMP_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4, 0x28, RG_HDMITX_RESERVE_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0, (0xa << RG_HDMITX_DRV_IBIAS),
+			  RG_HDMITX_DRV_IBIAS_MASK);
+	return 0;
+}
+
+static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+					      unsigned long parent_rate)
+{
+	struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+	unsigned long out_rate, val;
+
+	val = (readl(hdmi_phy->regs + HDMI_CON6)
+	       & RG_HTPLL_PREDIV_MASK) >> RG_HTPLL_PREDIV;
+	switch (val) {
+	case 0x00:
+		out_rate = parent_rate;
+		break;
+	case 0x01:
+		out_rate = parent_rate / 2;
+		break;
+	default:
+		out_rate = parent_rate / 4;
+		break;
+	}
+
+	val = (readl(hdmi_phy->regs + HDMI_CON6)
+	       & RG_HTPLL_FBKDIV_MASK) >> RG_HTPLL_FBKDIV;
+	out_rate *= (val + 1) * 2;
+	val = (readl(hdmi_phy->regs + HDMI_CON2)
+	       & RG_HDMITX_TX_POSDIV_MASK);
+	out_rate >>= (val >> RG_HDMITX_TX_POSDIV);
+
+	if (readl(hdmi_phy->regs + HDMI_CON2) & RG_HDMITX_EN_TX_POSDIV)
+		out_rate /= 5;
+
+	return out_rate;
+}
+
+static const struct clk_ops mtk_hdmi_phy_pll_ops = {
+	.prepare = mtk_hdmi_pll_prepare,
+	.unprepare = mtk_hdmi_pll_unprepare,
+	.set_rate = mtk_hdmi_pll_set_rate,
+	.round_rate = mtk_hdmi_pll_round_rate,
+	.recalc_rate = mtk_hdmi_pll_recalc_rate,
+};
+
+static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
+{
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+	usleep_range(80, 100);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+	usleep_range(80, 100);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+	usleep_range(80, 100);
+}
+
+static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
+{
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+	usleep_range(80, 100);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+	usleep_range(80, 100);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+	usleep_range(80, 100);
+}
+
+struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = {
+	.tz_disabled = true,
+	.flags = CLK_SET_RATE_GATE,
+	.hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
+	.hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
+	.hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
+};
+
+MODULE_AUTHOR("Chunhui Dai <chunhui.dai@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek HDMI PHY Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8167_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8167_hdmi_phy.c
new file mode 100644
index 0000000..92e13c9
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mt8167_hdmi_phy.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Chunhui Dai <chunhui.dai@mediatek.com>
+ */
+
+#include "mtk_hdmi_phy.h"
+
+#define HDMI_CON0	0x00
+#define RG_HDMITX_DRV_IBIAS		0
+#define RG_HDMITX_DRV_IBIAS_MASK	(0x3f << 0)
+#define RG_HDMITX_EN_SER		12
+#define RG_HDMITX_EN_SER_MASK		(0x0f << 12)
+#define RG_HDMITX_EN_SLDO		16
+#define RG_HDMITX_EN_SLDO_MASK		(0x0f << 16)
+#define RG_HDMITX_EN_PRED		20
+#define RG_HDMITX_EN_PRED_MASK		(0x0f << 20)
+#define RG_HDMITX_EN_IMP		24
+#define RG_HDMITX_EN_IMP_MASK		(0x0f << 24)
+#define RG_HDMITX_EN_DRV		28
+#define RG_HDMITX_EN_DRV_MASK		(0x0f << 28)
+
+#define HDMI_CON1	0x04
+#define RG_HDMITX_PRED_IBIAS		18
+#define RG_HDMITX_PRED_IBIAS_MASK	(0x0f << 18)
+#define RG_HDMITX_PRED_IMP		(0x01 << 22)
+#define RG_HDMITX_DRV_IMP		26
+#define RG_HDMITX_DRV_IMP_MASK		(0x3f << 26)
+
+#define HDMI_CON2	0x08
+#define RG_HDMITX_EN_TX_CKLDO		(0x01 << 0)
+#define RG_HDMITX_EN_TX_POSDIV		(0x01 << 1)
+#define RG_HDMITX_TX_POSDIV		3
+#define RG_HDMITX_TX_POSDIV_MASK	(0x03 << 3)
+#define RG_HDMITX_EN_MBIAS		(0x01 << 6)
+#define RG_HDMITX_MBIAS_LPF_EN		(0x01 << 7)
+
+#define HDMI_CON4	0x10
+#define RG_HDMITX_RESERVE_MASK		(0xffffffff << 0)
+
+#define HDMI_CON6	0x18
+#define RG_HTPLL_BR			0
+#define RG_HTPLL_BR_MASK		(0x03 << 0)
+#define RG_HTPLL_BC			2
+#define RG_HTPLL_BC_MASK		(0x03 << 2)
+#define RG_HTPLL_BP			4
+#define RG_HTPLL_BP_MASK		(0x0f << 4)
+#define RG_HTPLL_IR			8
+#define RG_HTPLL_IR_MASK		(0x0f << 8)
+#define RG_HTPLL_IC			12
+#define RG_HTPLL_IC_MASK		(0x0f << 12)
+#define RG_HTPLL_POSDIV			16
+#define RG_HTPLL_POSDIV_MASK		(0x03 << 16)
+#define RG_HTPLL_PREDIV			18
+#define RG_HTPLL_PREDIV_MASK		(0x03 << 18)
+#define RG_HTPLL_FBKSEL			20
+#define RG_HTPLL_FBKSEL_MASK		(0x03 << 20)
+#define RG_HTPLL_RLH_EN			(0x01 << 22)
+#define RG_HTPLL_FBKDIV			24
+#define RG_HTPLL_FBKDIV_MASK		(0x7f << 24)
+#define RG_HTPLL_EN			(0x01 << 31)
+
+#define HDMI_CON7	0x1c
+#define RG_HTPLL_AUTOK_EN		(0x01 << 23)
+#define RG_HTPLL_DIVEN			28
+#define RG_HTPLL_DIVEN_MASK		(0x07 << 28)
+
+static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
+{
+	struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+	usleep_range(80, 100);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+	usleep_range(80, 100);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+	usleep_range(80, 100);
+	return 0;
+}
+
+static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
+{
+	struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+	usleep_range(80, 100);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+	usleep_range(80, 100);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+	usleep_range(80, 100);
+}
+
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+				    unsigned long *parent_rate)
+{
+	return rate;
+}
+
+static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+				 unsigned long parent_rate)
+{
+	struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+	u32 pos_div;
+
+	if (rate <= 28000000)
+		pos_div = 3;
+	else if (rate <= 74250000)
+		pos_div = 2;
+	else
+		pos_div = 1;
+
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC),
+			  RG_HTPLL_IC_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR),
+			  RG_HTPLL_IR_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON2, (pos_div << RG_HDMITX_TX_POSDIV),
+			  RG_HDMITX_TX_POSDIV_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (1 << RG_HTPLL_FBKSEL),
+			  RG_HTPLL_FBKSEL_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (19 << RG_HTPLL_FBKDIV),
+			  RG_HTPLL_FBKDIV_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON7, (0x2 << RG_HTPLL_DIVEN),
+			  RG_HTPLL_DIVEN_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0xc << RG_HTPLL_BP),
+			  RG_HTPLL_BP_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x2 << RG_HTPLL_BC),
+			  RG_HTPLL_BC_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_BR),
+			  RG_HTPLL_BR_MASK);
+
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PRED_IMP);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, (0x3 << RG_HDMITX_PRED_IBIAS),
+			  RG_HDMITX_PRED_IBIAS_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_IMP_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, (0x28 << RG_HDMITX_DRV_IMP),
+			  RG_HDMITX_DRV_IMP_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4, 0x28, RG_HDMITX_RESERVE_MASK);
+	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0, (0xa << RG_HDMITX_DRV_IBIAS),
+			  RG_HDMITX_DRV_IBIAS_MASK);
+	return 0;
+}
+
+static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+					      unsigned long parent_rate)
+{
+	struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+	unsigned long out_rate, val;
+
+	val = (readl(hdmi_phy->regs + HDMI_CON6)
+	       & RG_HTPLL_PREDIV_MASK) >> RG_HTPLL_PREDIV;
+	switch (val) {
+	case 0x00:
+		out_rate = parent_rate;
+		break;
+	case 0x01:
+		out_rate = parent_rate / 2;
+		break;
+	default:
+		out_rate = parent_rate / 4;
+		break;
+	}
+
+	val = (readl(hdmi_phy->regs + HDMI_CON6)
+	       & RG_HTPLL_FBKDIV_MASK) >> RG_HTPLL_FBKDIV;
+	out_rate *= (val + 1) * 2;
+	val = (readl(hdmi_phy->regs + HDMI_CON2)
+	       & RG_HDMITX_TX_POSDIV_MASK);
+	out_rate >>= (val >> RG_HDMITX_TX_POSDIV);
+
+	if (readl(hdmi_phy->regs + HDMI_CON2) & RG_HDMITX_EN_TX_POSDIV)
+		out_rate /= 5;
+
+	return out_rate;
+}
+
+static const struct clk_ops mtk_hdmi_phy_pll_ops = {
+	.prepare = mtk_hdmi_pll_prepare,
+	.unprepare = mtk_hdmi_pll_unprepare,
+	.set_rate = mtk_hdmi_pll_set_rate,
+	.round_rate = mtk_hdmi_pll_round_rate,
+	.recalc_rate = mtk_hdmi_pll_recalc_rate,
+};
+
+static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
+{
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+	usleep_range(80, 100);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+	usleep_range(80, 100);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+	mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+	usleep_range(80, 100);
+}
+
+static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
+{
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+	usleep_range(80, 100);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+	usleep_range(80, 100);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
+	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+	usleep_range(80, 100);
+}
+
+struct mtk_hdmi_phy_conf mtk_hdmi_phy_8167_conf = {
+	.tz_disabled = true,
+	.flags = CLK_SET_RATE_GATE,
+	.hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
+	.hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
+	.hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
+};
+
+MODULE_AUTHOR("Chunhui Dai <chunhui.dai@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek HDMI PHY Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
index 51cb9cf..47f8a29 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
@@ -12,15 +12,7 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/phy/phy.h>
-#include <linux/platform_device.h>
-#include <linux/types.h>
+#include "mtk_hdmi_phy.h"
 
 #define HDMI_CON0		0x00
 #define RG_HDMITX_PLL_EN		BIT(31)
@@ -123,20 +115,6 @@
 #define RGS_HDMITX_5T1_EDG		(0xf << 4)
 #define RGS_HDMITX_PLUG_TST		BIT(0)
 
-struct mtk_hdmi_phy {
-	void __iomem *regs;
-	struct device *dev;
-	struct clk *pll;
-	struct clk_hw pll_hw;
-	unsigned long pll_rate;
-	u8 drv_imp_clk;
-	u8 drv_imp_d2;
-	u8 drv_imp_d1;
-	u8 drv_imp_d0;
-	u32 ibias;
-	u32 ibias_up;
-};
-
 static const u8 PREDIV[3][4] = {
 	{0x0, 0x0, 0x0, 0x0},	/* 27Mhz */
 	{0x1, 0x1, 0x1, 0x1},	/* 74Mhz */
@@ -185,44 +163,6 @@
 	{0x1, 0x2, 0x2, 0x1}	/* 148Mhz */
 };
 
-static void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
-				    u32 bits)
-{
-	void __iomem *reg = hdmi_phy->regs + offset;
-	u32 tmp;
-
-	tmp = readl(reg);
-	tmp &= ~bits;
-	writel(tmp, reg);
-}
-
-static void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
-				  u32 bits)
-{
-	void __iomem *reg = hdmi_phy->regs + offset;
-	u32 tmp;
-
-	tmp = readl(reg);
-	tmp |= bits;
-	writel(tmp, reg);
-}
-
-static void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
-			      u32 val, u32 mask)
-{
-	void __iomem *reg = hdmi_phy->regs + offset;
-	u32 tmp;
-
-	tmp = readl(reg);
-	tmp = (tmp & ~mask) | (val & mask);
-	writel(tmp, reg);
-}
-
-static inline struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw)
-{
-	return container_of(hw, struct mtk_hdmi_phy, pll_hw);
-}
-
 static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
 {
 	struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
@@ -259,6 +199,20 @@
 	usleep_range(100, 150);
 }
 
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+				    unsigned long *parent_rate)
+{
+	struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+	hdmi_phy->pll_rate = rate;
+	if (rate <= 74250000)
+		*parent_rate = rate;
+	else
+		*parent_rate = rate / 2;
+
+	return rate;
+}
+
 static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
 				 unsigned long parent_rate)
 {
@@ -345,20 +299,6 @@
 	return 0;
 }
 
-static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
-				    unsigned long *parent_rate)
-{
-	struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
-	hdmi_phy->pll_rate = rate;
-	if (rate <= 74250000)
-		*parent_rate = rate;
-	else
-		*parent_rate = rate / 2;
-
-	return rate;
-}
-
 static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
 					      unsigned long parent_rate)
 {
@@ -367,7 +307,7 @@
 	return hdmi_phy->pll_rate;
 }
 
-static const struct clk_ops mtk_hdmi_pll_ops = {
+static const struct clk_ops mtk_hdmi_phy_pll_ops = {
 	.prepare = mtk_hdmi_pll_prepare,
 	.unprepare = mtk_hdmi_pll_unprepare,
 	.set_rate = mtk_hdmi_pll_set_rate,
@@ -390,142 +330,11 @@
 				RG_HDMITX_SER_EN);
 }
 
-static int mtk_hdmi_phy_power_on(struct phy *phy)
-{
-	struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
-	int ret;
-
-	ret = clk_prepare_enable(hdmi_phy->pll);
-	if (ret < 0)
-		return ret;
-
-	mtk_hdmi_phy_enable_tmds(hdmi_phy);
-
-	return 0;
-}
-
-static int mtk_hdmi_phy_power_off(struct phy *phy)
-{
-	struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
-
-	mtk_hdmi_phy_disable_tmds(hdmi_phy);
-	clk_disable_unprepare(hdmi_phy->pll);
-
-	return 0;
-}
-
-static const struct phy_ops mtk_hdmi_phy_ops = {
-	.power_on = mtk_hdmi_phy_power_on,
-	.power_off = mtk_hdmi_phy_power_off,
-	.owner = THIS_MODULE,
-};
-
-static int mtk_hdmi_phy_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct mtk_hdmi_phy *hdmi_phy;
-	struct resource *mem;
-	struct clk *ref_clk;
-	const char *ref_clk_name;
-	struct clk_init_data clk_init = {
-		.ops = &mtk_hdmi_pll_ops,
-		.num_parents = 1,
-		.parent_names = (const char * const *)&ref_clk_name,
-		.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
-	};
-	struct phy *phy;
-	struct phy_provider *phy_provider;
-	int ret;
-
-	hdmi_phy = devm_kzalloc(dev, sizeof(*hdmi_phy), GFP_KERNEL);
-	if (!hdmi_phy)
-		return -ENOMEM;
-
-	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	hdmi_phy->regs = devm_ioremap_resource(dev, mem);
-	if (IS_ERR(hdmi_phy->regs)) {
-		ret = PTR_ERR(hdmi_phy->regs);
-		dev_err(dev, "Failed to get memory resource: %d\n", ret);
-		return ret;
-	}
-
-	ref_clk = devm_clk_get(dev, "pll_ref");
-	if (IS_ERR(ref_clk)) {
-		ret = PTR_ERR(ref_clk);
-		dev_err(&pdev->dev, "Failed to get PLL reference clock: %d\n",
-			ret);
-		return ret;
-	}
-	ref_clk_name = __clk_get_name(ref_clk);
-
-	ret = of_property_read_string(dev->of_node, "clock-output-names",
-				      &clk_init.name);
-	if (ret < 0) {
-		dev_err(dev, "Failed to read clock-output-names: %d\n", ret);
-		return ret;
-	}
-
-	hdmi_phy->pll_hw.init = &clk_init;
-	hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
-	if (IS_ERR(hdmi_phy->pll)) {
-		ret = PTR_ERR(hdmi_phy->pll);
-		dev_err(dev, "Failed to register PLL: %d\n", ret);
-		return ret;
-	}
-
-	ret = of_property_read_u32(dev->of_node, "mediatek,ibias",
-				   &hdmi_phy->ibias);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "Failed to get ibias: %d\n", ret);
-		return ret;
-	}
-
-	ret = of_property_read_u32(dev->of_node, "mediatek,ibias_up",
-				   &hdmi_phy->ibias_up);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "Failed to get ibias up: %d\n", ret);
-		return ret;
-	}
-
-	dev_info(dev, "Using default TX DRV impedance: 4.2k/36\n");
-	hdmi_phy->drv_imp_clk = 0x30;
-	hdmi_phy->drv_imp_d2 = 0x30;
-	hdmi_phy->drv_imp_d1 = 0x30;
-	hdmi_phy->drv_imp_d0 = 0x30;
-
-	phy = devm_phy_create(dev, NULL, &mtk_hdmi_phy_ops);
-	if (IS_ERR(phy)) {
-		dev_err(dev, "Failed to create HDMI PHY\n");
-		return PTR_ERR(phy);
-	}
-	phy_set_drvdata(phy, hdmi_phy);
-
-	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
-	if (IS_ERR(phy_provider))
-		return PTR_ERR(phy_provider);
-
-	hdmi_phy->dev = dev;
-	return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
-				   hdmi_phy->pll);
-}
-
-static int mtk_hdmi_phy_remove(struct platform_device *pdev)
-{
-	return 0;
-}
-
-static const struct of_device_id mtk_hdmi_phy_match[] = {
-	{ .compatible = "mediatek,mt8173-hdmi-phy", },
-	{},
-};
-
-struct platform_driver mtk_hdmi_phy_driver = {
-	.probe = mtk_hdmi_phy_probe,
-	.remove = mtk_hdmi_phy_remove,
-	.driver = {
-		.name = "mediatek-hdmi-phy",
-		.of_match_table = mtk_hdmi_phy_match,
-	},
+struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = {
+	.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
+	.hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
+	.hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
+	.hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
 };
 
 MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c
new file mode 100644
index 0000000..17d8a29
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: jitao.shi <jitao.shi@mediatek.com>
+ */
+
+#include "mtk_mipi_tx.h"
+
+#define MIPITX_DSI_CON		0x00
+#define RG_DSI_LDOCORE_EN		BIT(0)
+#define RG_DSI_CKG_LDOOUT_EN		BIT(1)
+#define RG_DSI_BCLK_SEL			(3 << 2)
+#define RG_DSI_LD_IDX_SEL		(7 << 4)
+#define RG_DSI_PHYCLK_SEL		(2 << 8)
+#define RG_DSI_DSICLK_FREQ_SEL		BIT(10)
+#define RG_DSI_LPTX_CLMP_EN		BIT(11)
+
+#define MIPITX_DSI_CLOCK_LANE	0x04
+#define MIPITX_DSI_DATA_LANE0	0x08
+#define MIPITX_DSI_DATA_LANE1	0x0c
+#define MIPITX_DSI_DATA_LANE2	0x10
+#define MIPITX_DSI_DATA_LANE3	0x14
+#define RG_DSI_LNTx_LDOOUT_EN		BIT(0)
+#define RG_DSI_LNTx_CKLANE_EN		BIT(1)
+#define RG_DSI_LNTx_LPTX_IPLUS1		BIT(2)
+#define RG_DSI_LNTx_LPTX_IPLUS2		BIT(3)
+#define RG_DSI_LNTx_LPTX_IMINUS		BIT(4)
+#define RG_DSI_LNTx_LPCD_IPLUS		BIT(5)
+#define RG_DSI_LNTx_LPCD_IMINUS		BIT(6)
+#define RG_DSI_LNTx_RT_CODE		(0xf << 8)
+
+#define MIPITX_DSI_TOP_CON	0x40
+#define RG_DSI_LNT_INTR_EN		BIT(0)
+#define RG_DSI_LNT_HS_BIAS_EN		BIT(1)
+#define RG_DSI_LNT_IMP_CAL_EN		BIT(2)
+#define RG_DSI_LNT_TESTMODE_EN		BIT(3)
+#define RG_DSI_LNT_IMP_CAL_CODE		(0xf << 4)
+#define RG_DSI_LNT_AIO_SEL		(7 << 8)
+#define RG_DSI_PAD_TIE_LOW_EN		BIT(11)
+#define RG_DSI_DEBUG_INPUT_EN		BIT(12)
+#define RG_DSI_PRESERVE			(7 << 13)
+
+#define MIPITX_DSI_BG_CON	0x44
+#define RG_DSI_BG_CORE_EN		BIT(0)
+#define RG_DSI_BG_CKEN			BIT(1)
+#define RG_DSI_BG_DIV			(0x3 << 2)
+#define RG_DSI_BG_FAST_CHARGE		BIT(4)
+#define RG_DSI_VOUT_MSK			(0x3ffff << 5)
+#define RG_DSI_V12_SEL			(7 << 5)
+#define RG_DSI_V10_SEL			(7 << 8)
+#define RG_DSI_V072_SEL			(7 << 11)
+#define RG_DSI_V04_SEL			(7 << 14)
+#define RG_DSI_V032_SEL			(7 << 17)
+#define RG_DSI_V02_SEL			(7 << 20)
+#define RG_DSI_BG_R1_TRIM		(0xf << 24)
+#define RG_DSI_BG_R2_TRIM		(0xf << 28)
+
+#define MIPITX_DSI_PLL_CON0	0x50
+#define RG_DSI_MPPLL_PLL_EN		BIT(0)
+#define RG_DSI_MPPLL_DIV_MSK		(0x1ff << 1)
+#define RG_DSI_MPPLL_PREDIV		(3 << 1)
+#define RG_DSI_MPPLL_TXDIV0		(3 << 3)
+#define RG_DSI_MPPLL_TXDIV1		(3 << 5)
+#define RG_DSI_MPPLL_POSDIV		(7 << 7)
+#define RG_DSI_MPPLL_MONVC_EN		BIT(10)
+#define RG_DSI_MPPLL_MONREF_EN		BIT(11)
+#define RG_DSI_MPPLL_VOD_EN		BIT(12)
+
+#define MIPITX_DSI_PLL_CON1	0x54
+#define RG_DSI_MPPLL_SDM_FRA_EN		BIT(0)
+#define RG_DSI_MPPLL_SDM_SSC_PH_INIT	BIT(1)
+#define RG_DSI_MPPLL_SDM_SSC_EN		BIT(2)
+#define RG_DSI_MPPLL_SDM_SSC_PRD	(0xffff << 16)
+
+#define MIPITX_DSI_PLL_CON2	0x58
+
+#define MIPITX_DSI_PLL_TOP	0x64
+#define RG_DSI_MPPLL_PRESERVE		(0xff << 8)
+
+#define MIPITX_DSI_PLL_PWR	0x68
+#define RG_DSI_MPPLL_SDM_PWR_ON		BIT(0)
+#define RG_DSI_MPPLL_SDM_ISO_EN		BIT(1)
+#define RG_DSI_MPPLL_SDM_PWR_ACK	BIT(8)
+
+#define MIPITX_DSI_SW_CTRL	0x80
+#define SW_CTRL_EN			BIT(0)
+
+#define MIPITX_DSI_SW_CTRL_CON0	0x84
+#define SW_LNTC_LPTX_PRE_OE		BIT(0)
+#define SW_LNTC_LPTX_OE			BIT(1)
+#define SW_LNTC_LPTX_P			BIT(2)
+#define SW_LNTC_LPTX_N			BIT(3)
+#define SW_LNTC_HSTX_PRE_OE		BIT(4)
+#define SW_LNTC_HSTX_OE			BIT(5)
+#define SW_LNTC_HSTX_ZEROCLK		BIT(6)
+#define SW_LNT0_LPTX_PRE_OE		BIT(7)
+#define SW_LNT0_LPTX_OE			BIT(8)
+#define SW_LNT0_LPTX_P			BIT(9)
+#define SW_LNT0_LPTX_N			BIT(10)
+#define SW_LNT0_HSTX_PRE_OE		BIT(11)
+#define SW_LNT0_HSTX_OE			BIT(12)
+#define SW_LNT0_LPRX_EN			BIT(13)
+#define SW_LNT1_LPTX_PRE_OE		BIT(14)
+#define SW_LNT1_LPTX_OE			BIT(15)
+#define SW_LNT1_LPTX_P			BIT(16)
+#define SW_LNT1_LPTX_N			BIT(17)
+#define SW_LNT1_HSTX_PRE_OE		BIT(18)
+#define SW_LNT1_HSTX_OE			BIT(19)
+#define SW_LNT2_LPTX_PRE_OE		BIT(20)
+#define SW_LNT2_LPTX_OE			BIT(21)
+#define SW_LNT2_LPTX_P			BIT(22)
+#define SW_LNT2_LPTX_N			BIT(23)
+#define SW_LNT2_HSTX_PRE_OE		BIT(24)
+#define SW_LNT2_HSTX_OE			BIT(25)
+
+static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
+{
+	struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+	u8 txdiv, txdiv0, txdiv1;
+	u64 pcw;
+
+	dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate);
+
+	if (mipi_tx->data_rate >= 500000000) {
+		txdiv = 1;
+		txdiv0 = 0;
+		txdiv1 = 0;
+	} else if (mipi_tx->data_rate >= 250000000) {
+		txdiv = 2;
+		txdiv0 = 1;
+		txdiv1 = 0;
+	} else if (mipi_tx->data_rate >= 125000000) {
+		txdiv = 4;
+		txdiv0 = 2;
+		txdiv1 = 0;
+	} else if (mipi_tx->data_rate > 62000000) {
+		txdiv = 8;
+		txdiv0 = 2;
+		txdiv1 = 1;
+	} else if (mipi_tx->data_rate >= 50000000) {
+		txdiv = 16;
+		txdiv0 = 2;
+		txdiv1 = 2;
+	} else {
+		return -EINVAL;
+	}
+
+	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON,
+				RG_DSI_VOUT_MSK |
+				RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN,
+				(4 << 20) | (4 << 17) | (4 << 14) |
+				(4 << 11) | (4 << 8) | (4 << 5) |
+				RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+
+	usleep_range(30, 100);
+
+	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+				RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN,
+				(8 << 4) | RG_DSI_LNT_HS_BIAS_EN);
+
+	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON,
+			     RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+
+	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
+				RG_DSI_MPPLL_SDM_PWR_ON |
+				RG_DSI_MPPLL_SDM_ISO_EN,
+				RG_DSI_MPPLL_SDM_PWR_ON);
+
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+			       RG_DSI_MPPLL_PLL_EN);
+
+	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+				RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 |
+				RG_DSI_MPPLL_PREDIV,
+				(txdiv0 << 3) | (txdiv1 << 5));
+
+	/*
+	 * PLL PCW config
+	 * PCW bit 24~30 = integer part of pcw
+	 * PCW bit 0~23 = fractional part of pcw
+	 * pcw = data_Rate*4*txdiv/(Ref_clk*2);
+	 * Post DIV =4, so need data_Rate*4
+	 * Ref_clk is 26MHz
+	 */
+	pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24,
+		      26000000);
+	writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2);
+
+	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
+			     RG_DSI_MPPLL_SDM_FRA_EN);
+
+	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
+
+	usleep_range(20, 100);
+
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
+			       RG_DSI_MPPLL_SDM_SSC_EN);
+
+	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
+				RG_DSI_MPPLL_PRESERVE,
+				mipi_tx->driver_data->mppll_preserve);
+
+	return 0;
+}
+
+static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
+{
+	struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+
+	dev_dbg(mipi_tx->dev, "unprepare\n");
+
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+			       RG_DSI_MPPLL_PLL_EN);
+
+	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
+				RG_DSI_MPPLL_PRESERVE, 0);
+
+	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
+				RG_DSI_MPPLL_SDM_ISO_EN |
+				RG_DSI_MPPLL_SDM_PWR_ON,
+				RG_DSI_MPPLL_SDM_ISO_EN);
+
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+			       RG_DSI_LNT_HS_BIAS_EN);
+
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON,
+			       RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON,
+			       RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+			       RG_DSI_MPPLL_DIV_MSK);
+}
+
+static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+				       unsigned long *prate)
+{
+	return clamp_val(rate, 50000000, 1250000000);
+}
+
+static const struct clk_ops mtk_mipi_tx_pll_ops = {
+	.prepare = mtk_mipi_tx_pll_prepare,
+	.unprepare = mtk_mipi_tx_pll_unprepare,
+	.round_rate = mtk_mipi_tx_pll_round_rate,
+	.set_rate = mtk_mipi_tx_pll_set_rate,
+	.recalc_rate = mtk_mipi_tx_pll_recalc_rate,
+};
+
+static void mtk_mipi_tx_power_on_signal(struct phy *phy)
+{
+	struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+	u32 reg;
+
+	for (reg = MIPITX_DSI_CLOCK_LANE;
+	     reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
+		mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+			       RG_DSI_PAD_TIE_LOW_EN);
+}
+
+static void mtk_mipi_tx_power_off_signal(struct phy *phy)
+{
+	struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+	u32 reg;
+
+	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+			     RG_DSI_PAD_TIE_LOW_EN);
+
+	for (reg = MIPITX_DSI_CLOCK_LANE;
+	     reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
+		mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+}
+
+const struct mtk_mipitx_data mt8167_mipitx_data = {
+	.mppll_preserve = (3 << 8),
+	.mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
+	.mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
+	.mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
+};
+
+const struct mtk_mipitx_data mt2701_mipitx_data = {
+	.mppll_preserve = (3 << 8),
+	.mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
+	.mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
+	.mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
+};
+
+const struct mtk_mipitx_data mt8173_mipitx_data = {
+	.mppll_preserve = (0 << 8),
+	.mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
+	.mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
+	.mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
+};
+
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
new file mode 100644
index 0000000..7758bc9
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: jitao.shi <jitao.shi@mediatek.com>
+ */
+
+#include "mtk_mipi_tx.h"
+
+#define MIPITX_LANE_CON		0x000c
+#define RG_DSI_CPHY_T1DRV_EN		BIT(0)
+#define RG_DSI_ANA_CK_SEL		BIT(1)
+#define RG_DSI_PHY_CK_SEL		BIT(2)
+#define RG_DSI_CPHY_EN			BIT(3)
+#define RG_DSI_PHYCK_INV_EN		BIT(4)
+#define RG_DSI_PWR04_EN			BIT(5)
+#define RG_DSI_BG_LPF_EN		BIT(6)
+#define RG_DSI_BG_CORE_EN		BIT(7)
+#define RG_DSI_PAD_TIEL_SEL		BIT(8)
+
+#define MIPITX_PLL_PWR	0x0028
+#define MIPITX_PLL_CON0	0x002c
+#define MIPITX_PLL_CON1	0x0030
+#define MIPITX_PLL_CON2	0x0034
+#define MIPITX_PLL_CON3	0x0038
+#define MIPITX_PLL_CON4	0x003c
+#define RG_DSI_PLL_IBIAS		(3 << 10)
+
+#define MIPITX_D2_SW_CTL_EN	0x0144
+#define MIPITX_D0_SW_CTL_EN	0x0244
+#define MIPITX_CK_CKMODE_EN	0x0328
+#define DSI_CK_CKMODE_EN		BIT(0)
+#define MIPITX_CK_SW_CTL_EN	0x0344
+#define MIPITX_D1_SW_CTL_EN	0x0444
+#define MIPITX_D3_SW_CTL_EN	0x0544
+#define DSI_SW_CTL_EN			BIT(0)
+#define AD_DSI_PLL_SDM_PWR_ON		BIT(0)
+#define AD_DSI_PLL_SDM_ISO_EN		BIT(1)
+
+#define RG_DSI_PLL_EN			BIT(4)
+#define RG_DSI_PLL_POSDIV		(0x7 << 8)
+
+static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
+{
+	struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+	int ret = 0;
+
+	ret = clk_prepare(mipi_tx->ref_clk);
+	if (ret < 0)
+		dev_err(mipi_tx->dev,
+			"can't prepare mipi_tx ref_clk %d\n", ret);
+
+	return ret;
+}
+
+static int mtk_mipi_tx_pll_enable(struct clk_hw *hw)
+{
+	struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+	unsigned int txdiv, txdiv0;
+	u64 pcw;
+	int ret;
+
+	dev_dbg(mipi_tx->dev, "enable: %u bps\n", mipi_tx->data_rate);
+
+	if (mipi_tx->data_rate >= 2000000000) {
+		txdiv = 1;
+		txdiv0 = 0;
+	} else if (mipi_tx->data_rate >= 1000000000) {
+		txdiv = 2;
+		txdiv0 = 1;
+	} else if (mipi_tx->data_rate >= 500000000) {
+		txdiv = 4;
+		txdiv0 = 2;
+	} else if (mipi_tx->data_rate > 250000000) {
+		txdiv = 8;
+		txdiv0 = 3;
+	} else if (mipi_tx->data_rate >= 125000000) {
+		txdiv = 16;
+		txdiv0 = 4;
+	} else {
+		return -EINVAL;
+	}
+
+	ret = clk_enable(mipi_tx->ref_clk);
+	if (ret < 0) {
+		dev_err(mipi_tx->dev,
+			"can't enable mipi_tx ref_clk %d\n", ret);
+		return ret;
+	}
+
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON4, RG_DSI_PLL_IBIAS);
+
+	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+	udelay(1);
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
+	pcw = div_u64(((u64)mipi_tx->data_rate * txdiv) << 24, 26000000);
+	writel(pcw, mipi_tx->regs + MIPITX_PLL_CON0);
+	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_POSDIV,
+				txdiv0 << 8);
+	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+
+	return 0;
+}
+
+static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
+{
+	struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+
+	clk_unprepare(mipi_tx->ref_clk);
+}
+
+static void mtk_mipi_tx_pll_disable(struct clk_hw *hw)
+{
+	struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+
+	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
+	clk_disable(mipi_tx->ref_clk);
+}
+
+static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+				       unsigned long *prate)
+{
+	return clamp_val(rate, 50000000, 1600000000);
+}
+
+static const struct clk_ops mtk_mipi_tx_pll_ops = {
+	.prepare = mtk_mipi_tx_pll_prepare,
+	.enable = mtk_mipi_tx_pll_enable,
+	.unprepare = mtk_mipi_tx_pll_unprepare,
+	.disable = mtk_mipi_tx_pll_disable,
+	.round_rate = mtk_mipi_tx_pll_round_rate,
+	.set_rate = mtk_mipi_tx_pll_set_rate,
+	.recalc_rate = mtk_mipi_tx_pll_recalc_rate,
+};
+
+static void mtk_mipi_tx_power_on_signal(struct phy *phy)
+{
+	struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+
+	/* BG_LPF_EN / BG_CORE_EN */
+	writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN,
+	       mipi_tx->regs + MIPITX_LANE_CON);
+	usleep_range(30, 100);
+	writel(RG_DSI_BG_CORE_EN | RG_DSI_BG_LPF_EN,
+	       mipi_tx->regs + MIPITX_LANE_CON);
+
+	/* Switch OFF each Lane */
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
+
+	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_CKMODE_EN, DSI_CK_CKMODE_EN);
+}
+
+static void mtk_mipi_tx_power_off_signal(struct phy *phy)
+{
+	struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+
+	/* Switch ON each Lane */
+	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
+	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
+	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
+	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
+	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
+
+	writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN,
+	       mipi_tx->regs + MIPITX_LANE_CON);
+	writel(RG_DSI_PAD_TIEL_SEL, mipi_tx->regs + MIPITX_LANE_CON);
+}
+
+const struct mtk_mipitx_data mt8183_mipitx_data = {
+	.mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
+	.mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
+	.mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
+};
+
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 6020c30..429d697 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -117,6 +117,14 @@
 	  Pi 7" Touchscreen.  To compile this driver as a module,
 	  choose M here.
 
+config DRM_PANEL_RPI_PUMPKIN_TOUCHSCREEN
+	tristate "RPi 7-inch touchscreen panel for Pumpkin boards"
+	depends on DRM_MIPI_DSI
+	help
+	  Say Y here if you want to enable support for the Raspberry
+	  Pi 7" Touchscreen modified for use with the pumpkin boards.
+	  To compile this driver as a module, choose M here.
+
 config DRM_PANEL_RAYDIUM_RM68200
 	tristate "Raydium RM68200 720x1280 DSI video mode panel"
 	depends on OF
@@ -178,6 +186,15 @@
 	  Say Y here if you want to enable support for Sharp LS043T1LE01 qHD
 	  (540x960) DSI panel as found on the Qualcomm APQ8074 Dragonboard
 
+config DRM_PANEL_SHARP_NT35532
+	tristate "Sharp NT35532 panel"
+	depends on OF
+	depends on DRM_MIPI_DSI
+	depends on BACKLIGHT_CLASS_DEVICE
+	help
+	  Say Y here if you want to enable support for Sharp NT35532 DSI panel
+	  as found on the MT8183 EVB (aiv8183m1)
+
 config DRM_PANEL_SITRONIX_ST7789V
 	tristate "Sitronix ST7789V panel"
 	depends on OF && SPI
@@ -186,4 +203,21 @@
 	  Say Y here if you want to enable support for the Sitronix
 	  ST7789V controller for 240x320 LCD panels
 
+config DRM_PANEL_TPV_OTM1901A
+	tristate "TPV OTM1901A panel"
+	depends on OF
+	depends on DRM_MIPI_DSI
+	depends on BACKLIGHT_CLASS_DEVICE
+	help
+	  Say Y here if you want to enable support for TPV OTM1901A DSI panel
+	  as found on the MT8183 EVB (aiv8183m1)
+
+config DRM_PANEL_TRULY_R63350A
+	tristate "TRULY R63350A panel"
+	depends on OF
+	depends on DRM_MIPI_DSI
+	depends on BACKLIGHT_CLASS_DEVICE
+	help
+	  Say Y here if you want to enable support for Truly R63350A DSI panel
+	  as found on the MT8183 EVB (aiv8183m1)
 endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 5ccaaa9..e4cd72b6 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -10,6 +10,7 @@
 obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
 obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
 obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o
+obj-$(CONFIG_DRM_PANEL_RPI_PUMPKIN_TOUCHSCREEN) += panel-rpi-pumpkin-touchscreen.o
 obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
 obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
 obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
@@ -18,4 +19,7 @@
 obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
 obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
 obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
+obj-$(CONFIG_DRM_PANEL_SHARP_NT35532) += panel-sharp-nt35532.o
 obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
+obj-$(CONFIG_DRM_PANEL_TPV_OTM1901A) += panel-tpv-otm1901a.o
+obj-$(CONFIG_DRM_PANEL_TRULY_R63350A) += panel-truly-r63350a.o
diff --git a/drivers/gpu/drm/panel/panel-rpi-pumpkin-touchscreen.c b/drivers/gpu/drm/panel/panel-rpi-pumpkin-touchscreen.c
new file mode 100644
index 0000000..5e238e5
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-rpi-pumpkin-touchscreen.c
@@ -0,0 +1,546 @@
+/*
+ * Copyright © 2016-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Portions of this file (derived from panel-simple.c) are:
+ *
+ * Copyright (C) 2013, NVIDIA Corporation.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * Raspberry Pi 7" touchscreen panel driver.
+ *
+ * The 7" touchscreen consists of a DPI LCD panel, a Toshiba
+ * TC358762XBG DSI-DPI bridge, and an I2C-connected Atmel ATTINY88-MUR
+ * controlling power management, the LCD PWM, and initial register
+ * setup of the Tohsiba.
+ *
+ * This driver controls the TC358762 and ATTINY88, presenting a DSI
+ * device with a drm_panel.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_panel.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#define RPI_DSI_DRIVER_NAME "rpi-ts-dsi"
+
+/* I2C registers of the Atmel microcontroller. */
+enum REG_ADDR {
+	REG_ID = 0x80,
+	REG_PORTA, /* BIT(2) for horizontal flip, BIT(3) for vertical flip */
+	REG_PORTB,
+	REG_PORTC,
+	REG_PORTD,
+	REG_POWERON,
+	REG_PWM,
+	REG_DDRA,
+	REG_DDRB,
+	REG_DDRC,
+	REG_DDRD,
+	REG_TEST,
+	REG_WR_ADDRL,
+	REG_WR_ADDRH,
+	REG_READH,
+	REG_READL,
+	REG_WRITEH,
+	REG_WRITEL,
+	REG_ID2,
+};
+
+/* DSI D-PHY Layer Registers */
+#define D0W_DPHYCONTTX		0x0004
+#define CLW_DPHYCONTRX		0x0020
+#define D0W_DPHYCONTRX		0x0024
+#define D1W_DPHYCONTRX		0x0028
+#define COM_DPHYCONTRX		0x0038
+#define CLW_CNTRL		0x0040
+#define D0W_CNTRL		0x0044
+#define D1W_CNTRL		0x0048
+#define DFTMODE_CNTRL		0x0054
+
+/* DSI PPI Layer Registers */
+#define PPI_STARTPPI		0x0104
+#define PPI_BUSYPPI		0x0108
+#define PPI_LINEINITCNT		0x0110
+#define PPI_LPTXTIMECNT		0x0114
+#define PPI_CLS_ATMR		0x0140
+#define PPI_D0S_ATMR		0x0144
+#define PPI_D1S_ATMR		0x0148
+#define PPI_D0S_CLRSIPOCOUNT	0x0164
+#define PPI_D1S_CLRSIPOCOUNT	0x0168
+#define CLS_PRE			0x0180
+#define D0S_PRE			0x0184
+#define D1S_PRE			0x0188
+#define CLS_PREP		0x01A0
+#define D0S_PREP		0x01A4
+#define D1S_PREP		0x01A8
+#define CLS_ZERO		0x01C0
+#define D0S_ZERO		0x01C4
+#define D1S_ZERO		0x01C8
+#define PPI_CLRFLG		0x01E0
+#define PPI_CLRSIPO		0x01E4
+#define HSTIMEOUT		0x01F0
+#define HSTIMEOUTENABLE		0x01F4
+
+/* DSI Protocol Layer Registers */
+#define DSI_STARTDSI		0x0204
+#define DSI_BUSYDSI		0x0208
+#define DSI_LANEENABLE		0x0210
+# define DSI_LANEENABLE_CLOCK		BIT(0)
+# define DSI_LANEENABLE_D0		BIT(1)
+# define DSI_LANEENABLE_D1		BIT(2)
+
+#define DSI_LANESTATUS0		0x0214
+#define DSI_LANESTATUS1		0x0218
+#define DSI_INTSTATUS		0x0220
+#define DSI_INTMASK		0x0224
+#define DSI_INTCLR		0x0228
+#define DSI_LPTXTO		0x0230
+#define DSI_MODE		0x0260
+#define DSI_PAYLOAD0		0x0268
+#define DSI_PAYLOAD1		0x026C
+#define DSI_SHORTPKTDAT		0x0270
+#define DSI_SHORTPKTREQ		0x0274
+#define DSI_BTASTA		0x0278
+#define DSI_BTACLR		0x027C
+
+/* DSI General Registers */
+#define DSIERRCNT		0x0300
+#define DSISIGMOD		0x0304
+
+/* DSI Application Layer Registers */
+#define APLCTRL			0x0400
+#define APLSTAT			0x0404
+#define APLERR			0x0408
+#define PWRMOD			0x040C
+#define RDPKTLN			0x0410
+#define PXLFMT			0x0414
+#define MEMWRCMD		0x0418
+
+/* LCDC/DPI Host Registers */
+#define LCDCTRL			0x0420
+#define HSR			0x0424
+#define HDISPR			0x0428
+#define VSR			0x042C
+#define VDISPR			0x0430
+#define VFUEN			0x0434
+
+/* DBI-B Host Registers */
+#define DBIBCTRL		0x0440
+
+/* SPI Master Registers */
+#define SPICMR			0x0450
+#define SPITCR			0x0454
+
+/* System Controller Registers */
+#define SYSSTAT			0x0460
+#define SYSCTRL			0x0464
+#define SYSPLL1			0x0468
+#define SYSPLL2			0x046C
+#define SYSPLL3			0x0470
+#define SYSPMCTRL		0x047C
+
+/* GPIO Registers */
+#define GPIOC			0x0480
+#define GPIOO			0x0484
+#define GPIOI			0x0488
+
+/* I2C Registers */
+#define I2CCLKCTRL		0x0490
+
+/* Chip/Rev Registers */
+#define IDREG			0x04A0
+
+/* Debug Registers */
+#define WCMDQUEUE		0x0500
+#define RCMDQUEUE		0x0504
+
+static const struct i2c_device_id rpi_touchscreen_id[] = {
+	{ "7inch-ts-panel", 0, },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, rpi_touchscreen_id);
+
+struct rpi_touchscreen {
+	struct drm_panel base;
+	struct mipi_dsi_device *dsi;
+	struct i2c_client *i2c;
+	struct regulator *supply;
+};
+
+static const struct drm_display_mode rpi_touchscreen_modes[] = {
+	{
+#define PIXEL_CLOCK   27112000
+		/* Round up the pixel clock a bit (10khz), so that the
+		 * "don't run things faster than the requested clock
+		 * rate" rule of the clk driver doesn't reject the
+		 * divide-by-3 mode due to rounding error.
+		 */
+		.clock = PIXEL_CLOCK / 1000,
+		.hdisplay = 800,
+		.hsync_start = 800 + 20,
+		.hsync_end = 800 + 20 + 32,
+		.htotal = 800 + 20 + 32 + 16,
+		.vdisplay = 498,
+		.vsync_start = 498 + 2,
+		.vsync_end = 498 + 2 + 3,
+		.vtotal = 498 + 2 + 3 + 20,
+		.vrefresh = 60,
+	},
+};
+
+static struct rpi_touchscreen *panel_to_ts(struct drm_panel *panel)
+{
+	return container_of(panel, struct rpi_touchscreen, base);
+}
+
+static int rpi_touchscreen_i2c_read(struct rpi_touchscreen *ts, u8 reg)
+{
+	return i2c_smbus_read_byte_data(ts->i2c, reg);
+}
+
+static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
+				      u8 reg, u8 val)
+{
+	int ret;
+
+	ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
+	if (ret)
+		dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
+}
+
+static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
+{
+	u8 msg[] = {
+		reg,
+		reg >> 8,
+		val,
+		val >> 8,
+		val >> 16,
+		val >> 24,
+	};
+
+	mipi_dsi_generic_write(ts->dsi, msg, sizeof(msg));
+
+	return 0;
+}
+
+static int rpi_touchscreen_disable(struct drm_panel *panel)
+{
+	struct rpi_touchscreen *ts = panel_to_ts(panel);
+
+	rpi_touchscreen_i2c_write(ts, REG_PWM, 0);
+
+	rpi_touchscreen_i2c_write(ts, REG_POWERON, 0);
+	udelay(1);
+
+	return 0;
+}
+
+static int rpi_touchscreen_noop(struct drm_panel *panel)
+{
+	return 0;
+}
+
+static int rpi_touchscreen_enable(struct drm_panel *panel)
+{
+	struct rpi_touchscreen *ts = panel_to_ts(panel);
+	int i;
+
+	rpi_touchscreen_i2c_write(ts, REG_POWERON, 1);
+	/* Wait for nPWRDWN to go low to indicate poweron is done. */
+	for (i = 0; i < 100; i++) {
+		if (rpi_touchscreen_i2c_read(ts, REG_PORTB) & 1)
+			break;
+	}
+
+	rpi_touchscreen_write(ts, DSI_LANEENABLE,
+			      DSI_LANEENABLE_CLOCK |
+			      DSI_LANEENABLE_D0);
+	rpi_touchscreen_write(ts, PPI_D0S_CLRSIPOCOUNT, 0x05);
+	rpi_touchscreen_write(ts, PPI_D1S_CLRSIPOCOUNT, 0x05);
+	rpi_touchscreen_write(ts, PPI_D0S_ATMR, 0x00);
+	rpi_touchscreen_write(ts, PPI_D1S_ATMR, 0x00);
+	rpi_touchscreen_write(ts, PPI_LPTXTIMECNT, 0x03);
+
+	rpi_touchscreen_write(ts, SPICMR, 0x00);
+#if 1
+	rpi_touchscreen_write(ts, HSR, 0x002c0002);    // H Back Porch = 44, H Pulse Width = 2
+	rpi_touchscreen_write(ts, VSR, 0x00150002);    // V Back Porch = 21, V Pulse Width = 2
+	rpi_touchscreen_write(ts, HDISPR, 0x003d0320); // H Front Porch = 61, HDISP Size = 800
+	rpi_touchscreen_write(ts, VDISPR, 0x000701e0); // V Front Porch = 7, VDISP Size = 480
+#endif
+	rpi_touchscreen_write(ts, LCDCTRL, 0x00100152);
+	rpi_touchscreen_write(ts, SYSCTRL, 0x040f);
+	msleep(100);
+
+	rpi_touchscreen_write(ts, PPI_STARTPPI, 0x01);
+	rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
+	msleep(100);
+
+	/* Turn on the backlight. */
+	rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
+
+	/* Default to the same orientation as the closed source
+	 * firmware used for the panel.  Runtime rotation
+	 * configuration will be supported using VC4's plane
+	 * orientation bits.
+	 */
+	rpi_touchscreen_i2c_write(ts, REG_PORTA, BIT(2));
+	return 0;
+}
+
+static int rpi_touchscreen_get_modes(struct drm_panel *panel)
+{
+	struct drm_connector *connector = panel->connector;
+	struct drm_device *drm = panel->drm;
+	unsigned int i, num = 0;
+	static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+
+	for (i = 0; i < ARRAY_SIZE(rpi_touchscreen_modes); i++) {
+		const struct drm_display_mode *m = &rpi_touchscreen_modes[i];
+		struct drm_display_mode *mode;
+
+		mode = drm_mode_duplicate(drm, m);
+		if (!mode) {
+			dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+				m->hdisplay, m->vdisplay, m->vrefresh);
+			continue;
+		}
+
+		mode->type |= DRM_MODE_TYPE_DRIVER;
+
+		if (i == 0)
+			mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+		drm_mode_set_name(mode);
+
+		drm_mode_probed_add(connector, mode);
+		num++;
+	}
+
+	connector->display_info.bpc = 8;
+	connector->display_info.width_mm = 154;
+	connector->display_info.height_mm = 86;
+	drm_display_info_set_bus_formats(&connector->display_info,
+					 &bus_format, 1);
+
+	return num;
+}
+
+static const struct drm_panel_funcs rpi_touchscreen_funcs = {
+	.disable = rpi_touchscreen_disable,
+	.unprepare = rpi_touchscreen_noop,
+	.prepare = rpi_touchscreen_enable,
+	.enable = rpi_touchscreen_noop,
+	.get_modes = rpi_touchscreen_get_modes,
+};
+
+static int rpi_touchscreen_probe(struct i2c_client *i2c,
+				 const struct i2c_device_id *id)
+{
+	struct device *dev = &i2c->dev;
+	struct rpi_touchscreen *ts;
+	int ret, ver;
+
+ 	ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
+	if (!ts)
+		return -ENOMEM;
+
+	i2c_set_clientdata(i2c, ts);
+
+	ts->i2c = i2c;
+
+	ts->supply = devm_regulator_get(dev, "power");
+	ret = PTR_ERR_OR_ZERO(ts->supply);
+	if (ret) {
+		dev_err(dev, "failed to get supply 'power'\n");
+		return ret;
+	}
+
+	ret = regulator_set_voltage(ts->supply, 1800000, 1800000);
+	if (ret) {
+		dev_err(dev, "failed set voltage to regulator 'power'\n");
+		return ret;
+	}
+
+	ret = regulator_enable(ts->supply);
+	if (ret) {
+		dev_err(dev, "failed to enable supply 'power'\n");
+		return ret;
+	}
+
+	usleep_range(1000, 2000);
+
+	ver = rpi_touchscreen_i2c_read(ts, REG_ID);
+
+	if (ver < 0) {
+		dev_err(dev, "Atmel I2C read failed: %d\n", ver);
+		return -ENODEV;
+	}
+
+	switch (ver) {
+	case 0xde: /* ver 1 */
+	case 0xc3: /* ver 2 */
+		break;
+	default:
+		dev_err(dev, "Unknown Atmel firmware revision: 0x%02x\n", ver);
+		return -ENODEV;
+	}
+
+	/* Turn off at boot, so we can cleanly sequence powering on. */
+	rpi_touchscreen_i2c_write(ts, REG_POWERON, 0);
+
+	return 0;
+}
+
+static int rpi_touchscreen_dsi_probe(struct mipi_dsi_device *dsi)
+{
+	struct device *dev = &dsi->dev;
+	struct rpi_touchscreen *ts;
+	int ret;
+	struct device_node *ts_node;
+	struct i2c_client* i2c;
+
+	ts_node = of_parse_phandle(dev->of_node, "pumpkin,touchscreen", 0);
+	if (!ts_node) {
+		dev_err(dev, "Property 'touchscreen' missing or invalid\n");
+		return -EINVAL;
+	}
+
+	i2c = of_find_i2c_device_by_node(ts_node);
+	of_node_put(ts_node);
+	if (!i2c)
+		return -EPROBE_DEFER;
+
+	ts = i2c_get_clientdata(i2c);
+	if (!ts)
+		return -EINVAL;
+
+	dsi->mode_flags = (MIPI_DSI_MODE_VIDEO |
+			   MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+			   MIPI_DSI_MODE_LPM);
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->lanes = 1;
+
+	mipi_dsi_set_drvdata(dsi, ts);
+
+	ts->dsi = dsi;
+
+	drm_panel_init(&ts->base);
+	ts->base.dev = dev;
+	ts->base.funcs = &rpi_touchscreen_funcs;
+
+	ret = drm_panel_add(&ts->base);
+	if (ret) {
+		dev_err(&dsi->dev, "failed to add panel: %d\n", ret);
+		return ret;
+	}
+
+	ret = mipi_dsi_attach(dsi);
+	if (ret) {
+		dev_err(&dsi->dev, "failed to attach dsi to host: %d\n", ret);
+		drm_panel_remove(&ts->base);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int rpi_touchscreen_dsi_remove(struct mipi_dsi_device *dsi)
+{
+	struct rpi_touchscreen *ts = mipi_dsi_get_drvdata(dsi);
+
+	mipi_dsi_detach(dsi);
+
+	drm_panel_remove(&ts->base);
+	put_device(&ts->i2c->dev);
+
+	return 0;
+}
+
+static const struct of_device_id rpi_touchscreen_dsi_of_ids[] = {
+	{ .compatible = "pumpkin,7inch-touchscreen-panel-dsi", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, rpi_touchscreen_dsi_of_ids);
+
+static struct mipi_dsi_driver rpi_touchscreen_dsi_driver = {
+	.driver.name = RPI_DSI_DRIVER_NAME,
+	.probe = rpi_touchscreen_dsi_probe,
+	.remove = rpi_touchscreen_dsi_remove,
+	.driver.owner = THIS_MODULE,
+	.driver.of_match_table = rpi_touchscreen_dsi_of_ids,
+
+};
+
+static const struct of_device_id rpi_touchscreen_of_ids[] = {
+	{ .compatible = "pumpkin,7inch-touchscreen-panel" },
+	{ } /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, rpi_touchscreen_of_ids);
+
+static struct i2c_driver rpi_touchscreen_driver = {
+	.driver = {
+		.name = "pumpkin_touchscreen",
+		.of_match_table = rpi_touchscreen_of_ids,
+	},
+	.probe = rpi_touchscreen_probe,
+	.id_table	= rpi_touchscreen_id,
+};
+
+static int __init rpi_touchscreen_init(void)
+{
+	mipi_dsi_driver_register(&rpi_touchscreen_dsi_driver);
+	return i2c_add_driver(&rpi_touchscreen_driver);
+}
+module_init(rpi_touchscreen_init);
+
+static void __exit rpi_touchscreen_exit(void)
+{
+	i2c_del_driver(&rpi_touchscreen_driver);
+	mipi_dsi_driver_unregister(&rpi_touchscreen_dsi_driver);
+}
+module_exit(rpi_touchscreen_exit);
+
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_DESCRIPTION("Raspberry Pi 7-inch touchscreen driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-sharp-nt35532.c b/drivers/gpu/drm/panel/panel-sharp-nt35532.c
new file mode 100644
index 0000000..868cc80
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sharp-nt35532.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright (C) 2019 BayLibre
+ * Author: Alexandre Bailon <abailon@baylibre.com>
+ *
+ * Based on panel-sharp-ls043t1le01 driver.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct sharp_nt_panel {
+	struct drm_panel base;
+	struct mipi_dsi_device *dsi;
+
+	struct gpio_desc *reset_gpio;
+	struct gpio_desc *pwr_gpio;
+	struct gpio_desc *pwr2_gpio;
+
+	struct backlight_device *backlight;
+
+	bool prepared;
+	bool enabled;
+
+	const struct drm_display_mode *mode;
+};
+
+static inline struct sharp_nt_panel *to_sharp_nt_panel(struct drm_panel *panel)
+{
+	return container_of(panel, struct sharp_nt_panel, base);
+}
+
+static int sharp_nt_panel_on(struct sharp_nt_panel *sharp_nt)
+{
+	struct mipi_dsi_device *dsi = sharp_nt->dsi;
+	int ret;
+
+
+	dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+	ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+	if (ret < 0)
+		return ret;
+
+	ret = mipi_dsi_dcs_set_display_on(dsi);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int sharp_nt_panel_off(struct sharp_nt_panel *sharp_nt)
+{
+	struct mipi_dsi_device *dsi = sharp_nt->dsi;
+	int ret;
+
+	dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+	ret = mipi_dsi_dcs_set_display_off(dsi);
+	if (ret < 0)
+		return ret;
+
+	ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+
+static int sharp_nt_panel_disable(struct drm_panel *panel)
+{
+	struct sharp_nt_panel *sharp_nt = to_sharp_nt_panel(panel);
+
+	if (!sharp_nt->enabled)
+		return 0;
+
+	backlight_disable(sharp_nt->backlight);
+
+	sharp_nt->enabled = false;
+
+	return 0;
+}
+
+static int sharp_nt_panel_unprepare(struct drm_panel *panel)
+{
+	struct sharp_nt_panel *sharp_nt = to_sharp_nt_panel(panel);
+	int ret;
+
+	if (!sharp_nt->prepared)
+		return 0;
+
+	ret = sharp_nt_panel_off(sharp_nt);
+	if (ret < 0) {
+		dev_err(panel->dev, "failed to set panel off: %d\n", ret);
+		return ret;
+	}
+
+	gpiod_set_value(sharp_nt->pwr2_gpio, 0);
+	gpiod_set_value(sharp_nt->pwr_gpio, 0);
+	gpiod_set_value(sharp_nt->reset_gpio, 0);
+
+	sharp_nt->prepared = false;
+
+	return 0;
+}
+
+static int sharp_nt_panel_prepare(struct drm_panel *panel)
+{
+	struct sharp_nt_panel *sharp_nt = to_sharp_nt_panel(panel);
+	int ret;
+
+	if (sharp_nt->prepared)
+		return 0;
+
+	gpiod_set_value(sharp_nt->pwr_gpio, 1);
+	gpiod_set_value(sharp_nt->pwr2_gpio, 1);
+
+	msleep(20);
+
+	gpiod_set_value(sharp_nt->reset_gpio, 1);
+	msleep(1);
+	gpiod_set_value(sharp_nt->reset_gpio, 0);
+	msleep(1);
+	gpiod_set_value(sharp_nt->reset_gpio, 1);
+	msleep(10);
+
+	ret = sharp_nt_panel_on(sharp_nt);
+	if (ret < 0) {
+		dev_err(panel->dev, "failed to set panel on: %d\n", ret);
+		goto poweroff;
+	}
+
+	sharp_nt->prepared = true;
+
+	return 0;
+
+poweroff:
+	gpiod_set_value(sharp_nt->pwr2_gpio, 0);
+	gpiod_set_value(sharp_nt->pwr_gpio, 0);
+	gpiod_set_value(sharp_nt->reset_gpio, 0);
+
+	return ret;
+}
+
+static int sharp_nt_panel_enable(struct drm_panel *panel)
+{
+	struct sharp_nt_panel *sharp_nt = to_sharp_nt_panel(panel);
+
+	if (sharp_nt->enabled)
+		return 0;
+
+	backlight_enable(sharp_nt->backlight);
+
+	sharp_nt->enabled = true;
+
+	return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+	.clock = 137380,
+	.hdisplay = 1080,
+	.hsync_start = 1080 + 72,
+	.hsync_end = 1080 + 72 + 8,
+	.htotal = 1080 + 72 + 8 + 16,
+	.vdisplay = 1920,
+	.vsync_start = 1920 + 14,
+	.vsync_end = 1920 + 14 + 2,
+	.vtotal = 1920 + 14 + 2 + 6,
+	.vrefresh = 60,
+};
+
+static int sharp_nt_panel_get_modes(struct drm_panel *panel)
+{
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	if (!mode) {
+		dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+				default_mode.hdisplay, default_mode.vdisplay,
+				default_mode.vrefresh);
+		return -ENOMEM;
+	}
+
+	drm_mode_set_name(mode);
+
+	drm_mode_probed_add(panel->connector, mode);
+
+	panel->connector->display_info.width_mm = 68;
+	panel->connector->display_info.height_mm = 120;
+
+	return 1;
+}
+
+static const struct drm_panel_funcs sharp_nt_panel_funcs = {
+	.disable = sharp_nt_panel_disable,
+	.unprepare = sharp_nt_panel_unprepare,
+	.prepare = sharp_nt_panel_prepare,
+	.enable = sharp_nt_panel_enable,
+	.get_modes = sharp_nt_panel_get_modes,
+};
+
+static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt)
+{
+	struct device *dev = &sharp_nt->dsi->dev;
+
+	sharp_nt->mode = &default_mode;
+	sharp_nt->reset_gpio = devm_gpiod_get(dev, "reset",
+					      GPIOD_OUT_LOW);
+	if (IS_ERR(sharp_nt->reset_gpio)) {
+		dev_err(dev, "cannot get reset-gpios %ld\n",
+			PTR_ERR(sharp_nt->reset_gpio));
+		return PTR_ERR(sharp_nt->reset_gpio);
+	}
+
+	sharp_nt->pwr_gpio = devm_gpiod_get(dev, "pwr",
+					    GPIOD_OUT_LOW);
+	if (IS_ERR(sharp_nt->pwr_gpio)) {
+		dev_err(dev, "cannot get pwr-gpios %ld\n",
+			PTR_ERR(sharp_nt->pwr_gpio));
+		return PTR_ERR(sharp_nt->pwr_gpio);
+	}
+
+	sharp_nt->pwr2_gpio = devm_gpiod_get(dev, "pwr2",
+					     GPIOD_OUT_LOW);
+	if (IS_ERR(sharp_nt->pwr_gpio)) {
+		dev_err(dev, "cannot get pwr2-gpios %ld\n",
+			PTR_ERR(sharp_nt->pwr2_gpio));
+		return PTR_ERR(sharp_nt->pwr2_gpio);
+	}
+
+	sharp_nt->backlight = devm_of_find_backlight(dev);
+	if (IS_ERR(sharp_nt->backlight)) {
+		dev_err(dev, "failed to get backlight\n");
+		return PTR_ERR(sharp_nt->backlight);
+	}
+
+	drm_panel_init(&sharp_nt->base);
+	sharp_nt->base.funcs = &sharp_nt_panel_funcs;
+	sharp_nt->base.dev = &sharp_nt->dsi->dev;
+
+	return drm_panel_add(&sharp_nt->base);
+}
+
+static void sharp_nt_panel_del(struct sharp_nt_panel *sharp_nt)
+{
+	if (sharp_nt->base.dev)
+		drm_panel_remove(&sharp_nt->base);
+}
+
+static int sharp_nt_panel_probe(struct mipi_dsi_device *dsi)
+{
+	struct sharp_nt_panel *sharp_nt;
+	int ret;
+
+	dsi->lanes = 4;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+			MIPI_DSI_MODE_VIDEO_HSE |
+			MIPI_DSI_CLOCK_NON_CONTINUOUS |
+			MIPI_DSI_MODE_EOT_PACKET;
+
+	sharp_nt = devm_kzalloc(&dsi->dev, sizeof(*sharp_nt), GFP_KERNEL);
+	if (!sharp_nt)
+		return -ENOMEM;
+
+	mipi_dsi_set_drvdata(dsi, sharp_nt);
+	sharp_nt->dsi = dsi;
+
+	ret = sharp_nt_panel_add(sharp_nt);
+	if (ret < 0)
+		return ret;
+
+	return mipi_dsi_attach(dsi);
+}
+
+static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
+{
+	struct sharp_nt_panel *sharp_nt = mipi_dsi_get_drvdata(dsi);
+	int ret;
+
+	ret = sharp_nt_panel_disable(&sharp_nt->base);
+	if (ret < 0)
+		dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
+
+	ret = mipi_dsi_detach(dsi);
+	if (ret < 0)
+		dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
+
+	sharp_nt_panel_del(sharp_nt);
+
+	return 0;
+}
+
+static void sharp_nt_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+	struct sharp_nt_panel *sharp_nt = mipi_dsi_get_drvdata(dsi);
+
+	sharp_nt_panel_disable(&sharp_nt->base);
+}
+
+static const struct of_device_id sharp_nt_of_match[] = {
+	{ .compatible = "sharp,nt35532", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, sharp_nt_of_match);
+
+static struct mipi_dsi_driver sharp_nt_panel_driver = {
+	.driver = {
+		.name = "panel-sharp-nt35532",
+		.of_match_table = sharp_nt_of_match,
+	},
+	.probe = sharp_nt_panel_probe,
+	.remove = sharp_nt_panel_remove,
+	.shutdown = sharp_nt_panel_shutdown,
+};
+module_mipi_dsi_driver(sharp_nt_panel_driver);
+
+MODULE_AUTHOR("Alexandre Bailon <abailon@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-tpv-otm1901a.c b/drivers/gpu/drm/panel/panel-tpv-otm1901a.c
new file mode 100644
index 0000000..bc4406f
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-tpv-otm1901a.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ * Author: Pedro Tsai <pedro.tsai@mediatek.com>
+ *
+ * Based on panel-sharp-nt35532 driver.
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct lcm_init_struct {
+	u8 cmd;
+	u8 count;
+	u8 params[64];
+};
+
+static struct lcm_init_struct init_setting[] = {
+	{0x00, 1, {0x00} },
+	{0xFF, 4, {0x19, 0x01, 0x01, 0x00} },
+	{0x00, 1, {0x80} },
+	{0xFF, 2, {0x19, 0x01} },
+	{0x00, 1, {0x00} },
+	{0x1C, 1, {0x33} },
+	{0x00, 1, {0xA0} },
+	{0xC1, 1, {0xE8} },
+	{0x00, 1, {0xA7} },
+	{0xC1, 1, {0x00} },
+	{0x00, 1, {0x90} },
+	{0xC0, 6, {0x00, 0x2F, 0x00, 0x00, 0x00, 0x01} },
+	{0x00, 1, {0xC0} },
+	{0xC0, 6, {0x00, 0x2F, 0x00, 0x00, 0x00, 0x01} },
+	{0x00, 1, {0x9A} },
+	{0xC0, 1, {0x1E} },
+	{0x00, 1, {0xAC} },
+	{0xC0, 1, {0x06} },
+	{0x00, 1, {0xDC} },
+	{0xC0, 1, {0x06} },
+	{0x00, 1, {0x81} },
+	{0xA5, 1, {0x06} },
+	{0x00, 1, {0x82} },
+	{0xC4, 1, {0xF0} },
+	{0x00, 1, {0x92} },
+	{0xE9, 1, {0x00} },
+	{0x00, 1, {0x90} },
+	{0xF3, 1, {0x01} },
+	{0x00, 1, {0x82} },
+	{0xA5, 1, {0x1F} },
+	{0x00, 1, {0x93} },
+	{0xC5, 1, {0x19} },
+	{0x00, 1, {0x95} },
+	{0xC5, 1, {0x28} },
+	{0x00, 1, {0x97} },
+	{0xC5, 1, {0x18} },
+	{0x00, 1, {0x99} },
+	{0xC5, 1, {0x23} },
+	{0x00, 1, {0x9B} },
+	{0xC5, 2, {0x44, 0x40} },
+	{0x00, 1, {0x00} },
+	{0xD9, 2, {0x00, 0xBA} },
+	{0x00, 1, {0x00} },
+	{0xD8, 2, {0x1B, 0x1B} },
+	{0x00, 1, {0xB3} },
+	{0xC0, 1, {0xCC} },
+	{0x00, 1, {0xBC} },
+	{0xC0, 1, {0x00} },
+	{0x00, 1, {0x84} },
+	{0xC4, 1, {0x22} },
+	{0x00, 1, {0x94} },
+	{0xC1, 1, {0x84} },
+	{0x00, 1, {0x98} },
+	{0xC1, 1, {0x74} },
+	{0x00, 1, {0x80} },
+	{0xC4, 1, {0x38} },
+	{0x00, 1, {0xCD} },
+	{0xF5, 1, {0x19} },
+	{0x00, 1, {0xDB} },
+	{0xF5, 1, {0x19} },
+	{0x00, 1, {0xF5} },
+	{0xC1, 1, {0x40} },
+	{0x00, 1, {0xB9} },
+	{0xC0, 1, {0x11} },
+	{0x00, 1, {0x8D} },
+	{0xF5, 1, {0x20} },
+	{0x00, 1, {0x80} },
+	{0xC0, 14, {0x00, 0x86, 0x00, 0x0A, 0x0A, 0x00, 0x86, 0x0A, 0x0A, 0x00,
+		    0x86, 0x00, 0x0A, 0x0A} },
+	{0x00, 1, {0xF0} },
+	{0xC3, 6, {0x00, 0x00, 0x00, 0x00, 0x00, 0x80} },
+	{0x00, 1, {0xA0} },
+	{0xC0, 7, {0x00, 0x00, 0x03, 0x00, 0x00, 0x1E, 0x06} },
+	{0x00, 1, {0xD0} },
+	{0xC0, 7, {0x00, 0x00, 0x03, 0x00, 0x00, 0x1E, 0x06} },
+	{0x00, 1, {0x90} },
+	{0xC2, 4, {0x84, 0x01, 0x3B, 0x40} },
+	{0x00, 1, {0xB0} },
+	{0xC2, 8, {0x02, 0x01, 0x45, 0x43, 0x02, 0x01, 0x45, 0x43} },
+	{0x00, 1, {0x80} },
+	{0xC3, 12, {0x84, 0x08, 0x03, 0x00, 0x02, 0x89, 0x82, 0x08, 0x03, 0x00,
+		    0x02, 0x89} },
+	{0x00, 1, {0x90} },
+	{0xC3, 12, {0x83, 0x08, 0x03, 0x00, 0x02, 0x89, 0x81, 0x08, 0x03, 0x00,
+		    0x02, 0x89} },
+	{0x00, 1, {0x80} },
+	{0xCC, 15, {0x09, 0x0D, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
+		    0x0E, 0x28, 0x28, 0x28, 0x28} },
+	{0x00, 1, {0x90} },
+	{0xCC, 15, {0x0D, 0x09, 0x14, 0x13, 0x12, 0x11, 0x15, 0x16, 0x17, 0x18,
+		    0x0E, 0x28, 0x28, 0x28, 0x28} },
+	{0x00, 1, {0xA0} },
+	{0xCC, 15, {0x1D, 0x1E, 0x1F, 0x19, 0x1A, 0x1B, 0x1C, 0x20, 0x21, 0x22,
+		    0x23, 0x24, 0x25, 0x26, 0x27} },
+	{0x00, 1, {0xB0} },
+	{0xCC, 8, {0x01, 0x02, 0x03, 0x05, 0x06, 0x07, 0x04, 0x08} },
+	{0x00, 1, {0xC0} },
+	{0xCC, 12, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		    0x00, 0x77} },
+	{0x00, 1, {0xD0} },
+	{0xCC, 12, {0xFF, 0x0F, 0x30, 0xC0, 0x0F, 0x30, 0x00, 0x00, 0x33, 0x03,
+		    0x00, 0x77} },
+	{0x00, 1, {0xDE} },
+	{0xCC, 1, {0x00} },
+	{0x00, 1, {0x80} },
+	{0xCB, 15, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		    0x30, 0x00, 0x00, 0x00, 0x00} },
+	{0x00, 1, {0x90} },
+	{0xCB, 15, {0x30, 0x00, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		    0x00, 0x00, 0x00, 0x00, 0x00} },
+	{0x00, 1, {0xA0} },
+	{0xCB, 15, {0x15, 0x15, 0x05, 0xF5, 0x05, 0xF5, 0x00, 0x00, 0x00, 0x00,
+		    0x15, 0x00, 0x00, 0x00, 0x00} },
+	{0x00, 1, {0xB0} },
+	{0xCB, 15, {0x00, 0x01, 0xFD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		    0x00, 0x00, 0x00, 0x00, 0x00} },
+	{0x00, 1, {0xC0} },
+	{0xCB, 8, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x77} },
+	{0x00, 1, {0xD0} },
+	{0xCB, 8, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x77} },
+	{0x00, 1, {0xE0} },
+	{0xCB, 8, {0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x77, 0x77} },
+	{0x00, 1, {0xF0} },
+	{0xCB, 8, {0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x77, 0x77} },
+	{0x00, 1, {0x80} },
+	{0xCD, 15, {0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x01, 0x12,
+		    0x11, 0x03, 0x04, 0x0B, 0x17} },
+	{0x00, 1, {0x90} },
+	{0xCD, 11, {0x3D, 0x02, 0x3D, 0x25, 0x25, 0x25, 0x1F, 0x20, 0x21, 0x25,
+		    0x25} },
+	{0x00, 1, {0xA0} },
+	{0xCD, 15, {0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x01, 0x12,
+		    0x11, 0x05, 0x06, 0x0B, 0x17} },
+	{0x00, 1, {0xB0} },
+	{0xCD, 11, {0x17, 0x02, 0x3D, 0x25, 0x25, 0x25, 0x1F, 0x20, 0x21, 0x25,
+		    0x25} },
+	{0x00, 1, {0x00} },
+	{0xE1, 24, {0x56, 0x56, 0x59, 0x60, 0x64, 0x67, 0x6d, 0x79, 0x7e, 0x8c,
+		    0x93, 0x99, 0x62, 0x5e, 0x5e, 0x4f, 0x3e, 0x2f, 0x24, 0x1d,
+		    0x16, 0x0c, 0x08, 0x04} },
+	{0x00, 1, {0x00} },
+	{0xE2, 24, {0x56, 0x56, 0x59, 0x60, 0x64, 0x67, 0x6d, 0x79, 0x7e, 0x8c,
+		    0x93, 0x99, 0x62, 0x5e, 0x5a, 0x4b, 0x3e, 0x2f, 0x24, 0x1d,
+		    0x16, 0x0c, 0x08, 0x04} },
+	{0x00, 1, {0x00} },
+	{0xE3, 24, {0x53, 0x56, 0x58, 0x5c, 0x61, 0x65, 0x6c, 0x77, 0x7c, 0x8b,
+		    0x93, 0x99, 0x62, 0x5e, 0x5d, 0x4f, 0x3e, 0x2e, 0x24, 0x1d,
+		    0x16, 0x0c, 0x07, 0x04} },
+	{0x00, 1, {0x00} },
+	{0xE4, 24, {0x53, 0x56, 0x58, 0x5c, 0x61, 0x65, 0x6c, 0x77, 0x7c, 0x8b,
+		    0x93, 0x99, 0x62, 0x5e, 0x59, 0x4b, 0x3e, 0x2e, 0x24, 0x1d,
+		    0x16, 0x0c, 0x07, 0x04} },
+	{0x00, 1, {0x00} },
+	{0xE5, 24, {0x20, 0x22, 0x29, 0x35, 0x3f, 0x45, 0x51, 0x63, 0x6e, 0x81,
+		    0x8c, 0x95, 0x64, 0x5f, 0x5e, 0x4e, 0x3e, 0x2e, 0x24, 0x1d,
+		    0x16, 0x0c, 0x07, 0x04} },
+	{0x00, 1, {0x00} },
+	{0xE6, 24, {0x20, 0x22, 0x29, 0x35, 0x3f, 0x45, 0x51, 0x63, 0x6e, 0x81,
+		    0x8c, 0x95, 0x64, 0x5f, 0x5a, 0x4a, 0x3e, 0x2e, 0x24, 0x1d,
+		    0x16, 0x0c, 0x07, 0x04} },
+	{0x00, 1, {0xD4} },
+	{0xC3, 4, {0x01, 0x01, 0x01, 0x01} },
+	{0x00, 1, {0xF7} },
+	{0xC3, 4, {0x03, 0x1B, 0x00, 0x00} },
+	{0x00, 1, {0xF2} },
+	{0xC1, 3, {0x80, 0x0F, 0x0F} },
+	{0x00, 1, {0xC2} },
+	{0xC5, 1, {0x12} },
+	{0x00, 1, {0xA8} },
+	{0xC4, 1, {0x11} },
+	{0x00, 1, {0x00} },
+	{0xFF, 3, {0xFF, 0xFF, 0xFF} },
+};
+
+static ssize_t tpv_r_write_buffer(struct mipi_dsi_device *dsi, u8 cmd,
+			       const void *data, size_t len)
+{
+	ssize_t err;
+	size_t size;
+	u8 *tx;
+
+	if (len > 0) {
+		size = len + 1;
+
+		tx = kmalloc(size, GFP_KERNEL);
+		if (!tx)
+			return -ENOMEM;
+
+		tx[0] = cmd;
+		memcpy(&tx[1], data, len);
+	} else {
+		tx = &cmd;
+		size = 1;
+	}
+
+	if (cmd < 0xB0)
+		err = mipi_dsi_dcs_write_buffer(dsi, tx, size);
+	else
+		err = mipi_dsi_generic_write(dsi, tx, size);
+
+	if (len > 0)
+		kfree(tx);
+
+	return err;
+}
+
+static ssize_t tpv_r_push_table(struct mipi_dsi_device *dsi,
+				const struct lcm_init_struct *table,
+				size_t len)
+{
+	ssize_t err;
+	size_t i;
+
+	for (i = 0; i < len; i++) {
+		err = tpv_r_write_buffer(dsi, table[i].cmd, table[i].params,
+					 table[i].count);
+		if (err < 0)
+			return err;
+	}
+
+	return err;
+}
+
+struct tpv_otm_panel {
+	struct drm_panel base;
+	struct mipi_dsi_device *dsi;
+
+	struct gpio_desc *reset_gpio;
+	struct gpio_desc *pwr_gpio;
+	struct gpio_desc *pwr2_gpio;
+
+	struct backlight_device *backlight;
+
+	bool prepared;
+	bool enabled;
+
+	const struct drm_display_mode *mode;
+};
+
+static inline struct tpv_otm_panel *to_tpv_otm_panel(struct drm_panel *panel)
+{
+	return container_of(panel, struct tpv_otm_panel, base);
+}
+
+static int tpv_otm_panel_on(struct tpv_otm_panel *tpv_otm)
+{
+	struct mipi_dsi_device *dsi = tpv_otm->dsi;
+	int ret;
+
+	dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+	ret = tpv_r_push_table(dsi, init_setting, ARRAY_SIZE(init_setting));
+	if (ret < 0)
+		return ret;
+
+	ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+	if (ret < 0)
+		return ret;
+
+	ret = mipi_dsi_dcs_set_display_on(dsi);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int tpv_otm_panel_off(struct tpv_otm_panel *tpv_otm)
+{
+	struct mipi_dsi_device *dsi = tpv_otm->dsi;
+	int ret;
+
+	dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+	ret = mipi_dsi_dcs_set_display_off(dsi);
+	if (ret < 0)
+		return ret;
+
+	ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+
+static int tpv_otm_panel_disable(struct drm_panel *panel)
+{
+	struct tpv_otm_panel *tpv_otm = to_tpv_otm_panel(panel);
+
+	if (!tpv_otm->enabled)
+		return 0;
+
+	backlight_disable(tpv_otm->backlight);
+
+	tpv_otm->enabled = false;
+
+	return 0;
+}
+
+static int tpv_otm_panel_unprepare(struct drm_panel *panel)
+{
+	struct tpv_otm_panel *tpv_otm = to_tpv_otm_panel(panel);
+	int ret;
+
+	if (!tpv_otm->prepared)
+		return 0;
+
+	ret = tpv_otm_panel_off(tpv_otm);
+	if (ret < 0) {
+		dev_err(panel->dev, "failed to set panel off: %d\n", ret);
+		return ret;
+	}
+
+	gpiod_set_value(tpv_otm->pwr2_gpio, 0);
+	gpiod_set_value(tpv_otm->pwr_gpio, 0);
+	gpiod_set_value(tpv_otm->reset_gpio, 0);
+
+	tpv_otm->prepared = false;
+
+	return 0;
+}
+
+static int tpv_otm_panel_prepare(struct drm_panel *panel)
+{
+	struct tpv_otm_panel *tpv_otm = to_tpv_otm_panel(panel);
+	int ret;
+
+	if (tpv_otm->prepared)
+		return 0;
+
+	gpiod_set_value(tpv_otm->pwr_gpio, 1);
+	msleep(20);
+	gpiod_set_value(tpv_otm->pwr2_gpio, 1);
+	msleep(20);
+
+	gpiod_set_value(tpv_otm->reset_gpio, 0);
+	msleep(5);
+	gpiod_set_value(tpv_otm->reset_gpio, 1);
+	msleep(20);
+
+	ret = tpv_otm_panel_on(tpv_otm);
+	if (ret < 0) {
+		dev_err(panel->dev, "failed to set panel on: %d\n", ret);
+		goto poweroff;
+	}
+	tpv_otm->prepared = true;
+
+	return 0;
+
+poweroff:
+	gpiod_set_value(tpv_otm->pwr2_gpio, 0);
+	gpiod_set_value(tpv_otm->pwr_gpio, 0);
+	gpiod_set_value(tpv_otm->reset_gpio, 0);
+
+	return ret;
+}
+
+static int tpv_otm_panel_enable(struct drm_panel *panel)
+{
+	struct tpv_otm_panel *tpv_otm = to_tpv_otm_panel(panel);
+
+	if (tpv_otm->enabled)
+		return 0;
+
+	backlight_enable(tpv_otm->backlight);
+
+	tpv_otm->enabled = true;
+
+	return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+	.clock = 134500,  /*  clock: (htotal * vtotal * 60) /1000 */
+	.hdisplay = 1080,
+	.hsync_start = 1080 + 40,
+	.hsync_end = 1080 + 40 + 10,
+	.htotal = 1080 + 40 + 10 + 20,
+	.vdisplay = 1920,
+	.vsync_start = 1920 + 20,
+	.vsync_end = 1920 + 20 + 2,
+	.vtotal = 1920 + 20 + 2 + 8,
+	.vrefresh = 60,
+};
+
+static int tpv_otm_panel_get_modes(struct drm_panel *panel)
+{
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	if (!mode) {
+		dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+				default_mode.hdisplay, default_mode.vdisplay,
+				default_mode.vrefresh);
+		return -ENOMEM;
+	}
+
+	drm_mode_set_name(mode);
+
+	drm_mode_probed_add(panel->connector, mode);
+
+
+	panel->connector->display_info.width_mm = 80;
+	panel->connector->display_info.height_mm = 136;
+
+	return 1;
+}
+
+static const struct drm_panel_funcs tpv_otm_panel_funcs = {
+	.disable = tpv_otm_panel_disable,
+	.unprepare = tpv_otm_panel_unprepare,
+	.prepare = tpv_otm_panel_prepare,
+	.enable = tpv_otm_panel_enable,
+	.get_modes = tpv_otm_panel_get_modes,
+};
+
+static int tpv_otm_panel_add(struct tpv_otm_panel *tpv_otm)
+{
+	struct device *dev = &tpv_otm->dsi->dev;
+
+	tpv_otm->mode = &default_mode;
+	tpv_otm->reset_gpio = devm_gpiod_get(dev, "reset",
+					      GPIOD_OUT_LOW);
+	if (IS_ERR(tpv_otm->reset_gpio)) {
+		dev_err(dev, "cannot get reset-gpios %ld\n",
+			PTR_ERR(tpv_otm->reset_gpio));
+		return PTR_ERR(tpv_otm->reset_gpio);
+	}
+
+	tpv_otm->pwr_gpio = devm_gpiod_get(dev, "pwr",
+					    GPIOD_OUT_LOW);
+	if (IS_ERR(tpv_otm->pwr_gpio)) {
+		dev_err(dev, "cannot get pwr-gpios %ld\n",
+			PTR_ERR(tpv_otm->pwr_gpio));
+		return PTR_ERR(tpv_otm->pwr_gpio);
+	}
+
+	tpv_otm->pwr2_gpio = devm_gpiod_get(dev, "pwr2",
+					     GPIOD_OUT_LOW);
+	if (IS_ERR(tpv_otm->pwr_gpio)) {
+		dev_err(dev, "cannot get pwr2-gpios %ld\n",
+			PTR_ERR(tpv_otm->pwr2_gpio));
+		return PTR_ERR(tpv_otm->pwr2_gpio);
+	}
+
+	tpv_otm->backlight = devm_of_find_backlight(dev);
+	if (IS_ERR(tpv_otm->backlight)) {
+		dev_err(dev, "failed to get backlight\n");
+		return PTR_ERR(tpv_otm->backlight);
+	}
+
+	drm_panel_init(&tpv_otm->base);
+	tpv_otm->base.funcs = &tpv_otm_panel_funcs;
+	tpv_otm->base.dev = &tpv_otm->dsi->dev;
+
+	return drm_panel_add(&tpv_otm->base);
+}
+
+static void tpv_otm_panel_del(struct tpv_otm_panel *tpv_otm)
+{
+	if (tpv_otm->base.dev)
+		drm_panel_remove(&tpv_otm->base);
+}
+
+
+static int tpv_otm_panel_probe(struct mipi_dsi_device *dsi)
+{
+	struct tpv_otm_panel *tpv_otm;
+	int ret;
+
+	dsi->lanes = 4;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+			MIPI_DSI_MODE_VIDEO_HSE |
+			MIPI_DSI_CLOCK_NON_CONTINUOUS |
+			MIPI_DSI_MODE_EOT_PACKET;
+
+	tpv_otm = devm_kzalloc(&dsi->dev, sizeof(*tpv_otm), GFP_KERNEL);
+	if (!tpv_otm)
+		return -ENOMEM;
+
+	mipi_dsi_set_drvdata(dsi, tpv_otm);
+	tpv_otm->dsi = dsi;
+
+	ret = tpv_otm_panel_add(tpv_otm);
+	if (ret < 0)
+		return ret;
+
+	return mipi_dsi_attach(dsi);
+}
+
+static int tpv_otm_panel_remove(struct mipi_dsi_device *dsi)
+{
+	struct tpv_otm_panel *tpv_otm = mipi_dsi_get_drvdata(dsi);
+	int ret;
+
+	ret = tpv_otm_panel_disable(&tpv_otm->base);
+	if (ret < 0)
+		dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
+
+	ret = mipi_dsi_detach(dsi);
+	if (ret < 0)
+		dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
+
+	tpv_otm_panel_del(tpv_otm);
+
+	return 0;
+}
+
+static void tpv_otm_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+	struct tpv_otm_panel *tpv_otm = mipi_dsi_get_drvdata(dsi);
+
+	tpv_otm_panel_disable(&tpv_otm->base);
+}
+
+static const struct of_device_id tpv_otm_of_match[] = {
+	{ .compatible = "tpv,otm1901a", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, tpv_otm_of_match);
+
+static struct mipi_dsi_driver tpv_otm_panel_driver = {
+	.driver = {
+		.name = "panel-tpv-otm1901a",
+		.of_match_table = tpv_otm_of_match,
+	},
+	.probe = tpv_otm_panel_probe,
+	.remove = tpv_otm_panel_remove,
+	.shutdown = tpv_otm_panel_shutdown,
+};
+module_mipi_dsi_driver(tpv_otm_panel_driver);
+
+MODULE_AUTHOR("Pedro Tsai <pedro.tsai@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-truly-r63350a.c b/drivers/gpu/drm/panel/panel-truly-r63350a.c
new file mode 100644
index 0000000..ad55d70
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-truly-r63350a.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Mediatek
+ * Author: Andrew Perepech <andrew.perepech@mediatek.com>
+ *
+ * Based on panel-sharp-nt35532 driver.
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+
+struct lcm_init_struct {
+	u8 cmd;
+	u8 count;
+	u8 params[64];
+};
+
+static const struct lcm_init_struct lcm_init[] = {
+	{0xB0, 1, {0x00} },
+	{0xD6, 1, {0x01} },
+	{0xB3, 6, {0x14, 0x00, 0x00, 0x00, 0x00, 0x00} },
+	{0xB4, 2, {0x0C, 0x00} },
+	{0xB6, 3, {0x4B, 0xDB, 0x16} },/* 0xCB,0x16 */
+	{0xBE, 2, {0x00, 0x04} },
+	{0xC0, 1, {0x00} },
+	{0xC1, 34, {0x04, 0x60, 0x00, 0x20, 0xA9, 0x30, 0x20, 0x63,
+				0xF0, 0xFF, 0xFF, 0x9B, 0x7B, 0xCF, 0xB5, 0xFF,
+				0xFF, 0x87, 0x8C, 0x41, 0x22, 0x54, 0x02, 0x00,
+				0x00, 0x00, 0x00, 0x00, 0x22, 0x33, 0x03, 0x22,
+				0x00, 0xFF} },
+	{0xC2, 8, {0x31, 0xf7, 0x80, 0x06, 0x04, 0x00, 0x00, 0x08} },
+	{0xC3, 3, {0x00, 0x00, 0x00} },
+	{0xC4, 11, {0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+				0x00, 0x00, 0x02} },
+	/* reduce noise*/
+	{0xC5, 1, {0x00} },
+	{0xC6, 21, {0xC8, 0x3C, 0x3C, 0x07, 0x01, 0x07, 0x01, 0x00,
+				0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+				0x00, 0x0E, 0x1A, 0x07, 0xC8} },
+	{0xC7, 30, {0x03, 0x15, 0x1F, 0x2A, 0x39, 0x46, 0x4E, 0x5B,
+				0x3D, 0x45, 0x52, 0x5F, 0x68, 0x6D, 0x72, 0x01,
+				0x15, 0x1F, 0x2A, 0x39, 0x46, 0x4E, 0x5B, 0x3D,
+				0x45, 0x52, 0x5F, 0x68, 0x6D, 0x78} },
+	{0xCB, 15, {0xFF, 0xE1, 0x87, 0xFF, 0x00, 0x00, 0x00, 0x00,
+				0xFF, 0xE1, 0x87, 0xFF, 0xE8, 0x00, 0x00} },
+	{0xCC, 1, {0x34} },
+	{0xD0, 10, {0x11, 0x00, 0x00, 0x56, 0xD5, 0x40, 0x19, 0x19,
+				0x09, 0x00} },
+	{0xD1, 4, {0x00, 0x48, 0x16, 0x0F} },
+	{0xD2, 3, {0x5C, 0x00, 0x00} },
+	{0xD3, 26, {0x1B, 0x33, 0xBB, 0xBB, 0xB3, 0x33, 0x33, 0x33,
+				0x33, 0x00, 0x01, 0x00, 0x00, 0xD8, 0xA0, 0x0C,
+				0x4D, 0x4D, 0x33, 0x33, 0x72, 0x12, 0x8A, 0x57,
+				0x3D, 0xBC} },
+	{0xD5, 7, {0x06, 0x00, 0x00, 0x01, 0x39, 0x01, 0x39} },
+	{0xD8, 3, {0x00, 0x00, 0x00} },
+	{0xD9, 3, {0x00, 0x00, 0x00} },
+	{0xFD, 4, {0x00, 0x00, 0x00, 0x30} },
+	{0x35, 1, {0x00} },
+	/* Test revert */
+	/* {0x36, 1, {0xC0} }, */
+	{0x51, 1, {0xff} },
+	/*  Write CTRL Display */
+	{0x53, 1, {0x24} },
+	/* Write Display Brightness */
+	{0x55, 1, {0x00} },
+
+};
+
+struct truly_r_panel {
+	struct drm_panel base;
+	struct mipi_dsi_device *dsi;
+
+	struct gpio_desc *reset_gpio;
+	struct gpio_desc *pwr_gpio;
+	struct gpio_desc *pwr2_gpio;
+
+	struct backlight_device *backlight;
+
+	bool prepared;
+	bool enabled;
+
+	const struct drm_display_mode *mode;
+};
+
+static inline struct truly_r_panel *to_truly_r_panel(struct drm_panel *panel)
+{
+	return container_of(panel, struct truly_r_panel, base);
+}
+
+
+static ssize_t truly_r_write_buffer(struct mipi_dsi_device *dsi, u8 cmd,
+			       const void *data, size_t len)
+{
+	ssize_t err;
+	size_t size;
+	u8 *tx;
+
+	if (len > 0) {
+		size = len + 1;
+
+		tx = kmalloc(size, GFP_KERNEL);
+		if (!tx)
+			return -ENOMEM;
+
+		tx[0] = cmd;
+		memcpy(&tx[1], data, len);
+	} else {
+		tx = &cmd;
+		size = 1;
+	}
+
+	if (cmd < 0xB0)
+		err = mipi_dsi_dcs_write_buffer(dsi, tx, size);
+	else
+		err = mipi_dsi_generic_write(dsi, tx, size);
+
+	if (len > 0)
+		kfree(tx);
+
+	return err;
+}
+
+static ssize_t truly_r_push_table(struct mipi_dsi_device *dsi,
+				  const struct lcm_init_struct *table,
+				  size_t len)
+{
+	ssize_t err;
+	size_t i;
+
+	for (i = 0; i < len; i++) {
+		err = truly_r_write_buffer(dsi, table[i].cmd,
+					   table[i].params, table[i].count);
+		if (err < 0)
+			return err;
+	}
+
+	return err;
+}
+
+static int truly_r_panel_on(struct truly_r_panel *truly_r)
+{
+	struct mipi_dsi_device *dsi = truly_r->dsi;
+	int ret;
+
+
+	dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+	ret = truly_r_push_table(dsi, lcm_init, ARRAY_SIZE(lcm_init));
+	if (ret < 0)
+		return ret;
+
+	ret = mipi_dsi_dcs_set_display_on(dsi);
+	if (ret < 0)
+		return ret;
+
+	msleep(50);
+
+	ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+	if (ret < 0)
+		return ret;
+
+	msleep(120);
+
+	return 0;
+}
+
+static int truly_r_panel_off(struct truly_r_panel *truly_r)
+{
+	struct mipi_dsi_device *dsi = truly_r->dsi;
+	int ret;
+
+	dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+	ret = mipi_dsi_dcs_set_display_off(dsi);
+	if (ret < 0)
+		return ret;
+
+	ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+
+static int truly_r_panel_disable(struct drm_panel *panel)
+{
+	struct truly_r_panel *truly_r = to_truly_r_panel(panel);
+
+	if (!truly_r->enabled)
+		return 0;
+
+	backlight_disable(truly_r->backlight);
+
+	truly_r->enabled = false;
+
+	return 0;
+}
+
+static int truly_r_panel_unprepare(struct drm_panel *panel)
+{
+	struct truly_r_panel *truly_r = to_truly_r_panel(panel);
+	int ret;
+
+	if (!truly_r->prepared)
+		return 0;
+
+	ret = truly_r_panel_off(truly_r);
+	if (ret < 0) {
+		dev_err(panel->dev, "failed to set panel off: %d\n", ret);
+		return ret;
+	}
+
+	gpiod_set_value(truly_r->pwr2_gpio, 0);
+	gpiod_set_value(truly_r->pwr_gpio, 0);
+	gpiod_set_value(truly_r->reset_gpio, 0);
+
+	truly_r->prepared = false;
+
+	return 0;
+}
+
+static int truly_r_panel_prepare(struct drm_panel *panel)
+{
+	struct truly_r_panel *truly_r = to_truly_r_panel(panel);
+	int ret;
+
+	if (truly_r->prepared)
+		return 0;
+
+	gpiod_set_value(truly_r->pwr_gpio, 1);
+	gpiod_set_value(truly_r->pwr2_gpio, 1);
+
+	msleep(20);
+
+	gpiod_set_value(truly_r->reset_gpio, 1);
+	msleep(1);
+	gpiod_set_value(truly_r->reset_gpio, 0);
+	msleep(1);
+	gpiod_set_value(truly_r->reset_gpio, 1);
+	msleep(10);
+
+	ret = truly_r_panel_on(truly_r);
+	if (ret < 0) {
+		dev_err(panel->dev, "failed to set panel on: %d\n", ret);
+		goto poweroff;
+	}
+
+	truly_r->prepared = true;
+
+	return 0;
+
+poweroff:
+	gpiod_set_value(truly_r->pwr2_gpio, 0);
+	gpiod_set_value(truly_r->pwr_gpio, 0);
+	gpiod_set_value(truly_r->reset_gpio, 0);
+
+	return ret;
+}
+
+static int truly_r_panel_enable(struct drm_panel *panel)
+{
+	struct truly_r_panel *truly_r = to_truly_r_panel(panel);
+
+	if (truly_r->enabled)
+		return 0;
+
+	backlight_enable(truly_r->backlight);
+
+	truly_r->enabled = true;
+
+	return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+//	.clock = 148500,
+	.clock = 145200,            //1250 * 1936 *60 /1000
+	.hdisplay = 1080,
+	.hsync_start = 1080 + 90,
+	.hsync_end = 1080 + 90 + 20,
+	.htotal = 1080 + 90 + 20 + 60,
+	.vdisplay = 1920,
+	.vsync_start = 1920 + 10,
+	.vsync_end = 1920 + 10 + 2,
+	.vtotal = 1920 + 10 + 2 + 4,
+	.vrefresh = 60,
+};
+
+static int truly_r_panel_get_modes(struct drm_panel *panel)
+{
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	if (!mode) {
+		dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+				default_mode.hdisplay, default_mode.vdisplay,
+				default_mode.vrefresh);
+		return -ENOMEM;
+	}
+
+	drm_mode_set_name(mode);
+
+	drm_mode_probed_add(panel->connector, mode);
+
+	panel->connector->display_info.width_mm = 68;
+	panel->connector->display_info.height_mm = 120;
+
+	return 1;
+}
+
+static const struct drm_panel_funcs truly_r_panel_funcs = {
+	.disable = truly_r_panel_disable,
+	.unprepare = truly_r_panel_unprepare,
+	.prepare = truly_r_panel_prepare,
+	.enable = truly_r_panel_enable,
+	.get_modes = truly_r_panel_get_modes,
+};
+
+static int truly_r_panel_add(struct truly_r_panel *truly_r)
+{
+	struct device *dev = &truly_r->dsi->dev;
+
+	truly_r->mode = &default_mode;
+	truly_r->reset_gpio = devm_gpiod_get(dev, "reset",
+					      GPIOD_OUT_LOW);
+	if (IS_ERR(truly_r->reset_gpio)) {
+		dev_err(dev, "cannot get reset-gpios %ld\n",
+			PTR_ERR(truly_r->reset_gpio));
+		return PTR_ERR(truly_r->reset_gpio);
+	}
+
+	truly_r->pwr_gpio = devm_gpiod_get(dev, "pwr",
+					    GPIOD_OUT_LOW);
+	if (IS_ERR(truly_r->pwr_gpio)) {
+		dev_err(dev, "cannot get pwr-gpios %ld\n",
+			PTR_ERR(truly_r->pwr_gpio));
+		return PTR_ERR(truly_r->pwr_gpio);
+	}
+
+	truly_r->pwr2_gpio = devm_gpiod_get(dev, "pwr2",
+					     GPIOD_OUT_LOW);
+	if (IS_ERR(truly_r->pwr_gpio)) {
+		dev_err(dev, "cannot get pwr2-gpios %ld\n",
+			PTR_ERR(truly_r->pwr2_gpio));
+		return PTR_ERR(truly_r->pwr2_gpio);
+	}
+
+	truly_r->backlight = devm_of_find_backlight(dev);
+	if (IS_ERR(truly_r->backlight)) {
+		dev_err(dev, "failed to get backlight\n");
+		return PTR_ERR(truly_r->backlight);
+	}
+
+	drm_panel_init(&truly_r->base);
+	truly_r->base.funcs = &truly_r_panel_funcs;
+	truly_r->base.dev = &truly_r->dsi->dev;
+
+	return drm_panel_add(&truly_r->base);
+}
+
+static void truly_r_panel_del(struct truly_r_panel *truly_r)
+{
+	if (truly_r->base.dev)
+		drm_panel_remove(&truly_r->base);
+}
+
+static int truly_r_panel_probe(struct mipi_dsi_device *dsi)
+{
+	struct truly_r_panel *truly_r;
+	int ret;
+
+	dsi->lanes = 4;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+			MIPI_DSI_MODE_VIDEO_HSE |
+			MIPI_DSI_CLOCK_NON_CONTINUOUS |
+			MIPI_DSI_MODE_EOT_PACKET;
+
+	truly_r = devm_kzalloc(&dsi->dev, sizeof(*truly_r), GFP_KERNEL);
+	if (!truly_r)
+		return -ENOMEM;
+
+	mipi_dsi_set_drvdata(dsi, truly_r);
+	truly_r->dsi = dsi;
+
+	ret = truly_r_panel_add(truly_r);
+	if (ret < 0)
+		return ret;
+
+	return mipi_dsi_attach(dsi);
+}
+
+static int truly_r_panel_remove(struct mipi_dsi_device *dsi)
+{
+	struct truly_r_panel *truly_r = mipi_dsi_get_drvdata(dsi);
+	int ret;
+
+	ret = truly_r_panel_disable(&truly_r->base);
+	if (ret < 0)
+		dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
+
+	ret = mipi_dsi_detach(dsi);
+	if (ret < 0)
+		dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
+
+	truly_r_panel_del(truly_r);
+
+	return 0;
+}
+
+static void truly_r_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+	struct truly_r_panel *truly_r = mipi_dsi_get_drvdata(dsi);
+
+	truly_r_panel_disable(&truly_r->base);
+}
+
+static const struct of_device_id truly_r_of_match[] = {
+	{ .compatible = "truly,r63350a", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, truly_r_of_match);
+
+static struct mipi_dsi_driver truly_r_panel_driver = {
+	.driver = {
+		.name = "panel-truly-r63350a",
+		.of_match_table = truly_r_of_match,
+	},
+	.probe = truly_r_panel_probe,
+	.remove = truly_r_panel_remove,
+	.shutdown = truly_r_panel_shutdown,
+};
+module_mipi_dsi_driver(truly_r_panel_driver);
+
+MODULE_AUTHOR("Andrew Perepech <andrew.perepech@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 2bb4d20..745b0d0 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -35,6 +35,7 @@
 #include <linux/slab.h>
 
 #define I2C_RS_TRANSFER			(1 << 4)
+#define I2C_ARB_LOST			(1 << 3)
 #define I2C_HS_NACKERR			(1 << 2)
 #define I2C_ACKERR			(1 << 1)
 #define I2C_TRANSAC_COMP		(1 << 0)
@@ -76,6 +77,8 @@
 #define I2C_CONTROL_DIR_CHANGE          (0x1 << 4)
 #define I2C_CONTROL_ACKERR_DET_EN       (0x1 << 5)
 #define I2C_CONTROL_TRANSFER_LEN_CHANGE (0x1 << 6)
+#define I2C_CONTROL_DMAACK_EN           (0x1 << 8)
+#define I2C_CONTROL_ASYNC_MODE          (0x1 << 9)
 #define I2C_CONTROL_WRAPPER             (0x1 << 0)
 
 #define I2C_DRV_NAME		"i2c-mt65xx"
@@ -106,40 +109,97 @@
 };
 
 enum I2C_REGS_OFFSET {
-	OFFSET_DATA_PORT = 0x0,
-	OFFSET_SLAVE_ADDR = 0x04,
-	OFFSET_INTR_MASK = 0x08,
-	OFFSET_INTR_STAT = 0x0c,
-	OFFSET_CONTROL = 0x10,
-	OFFSET_TRANSFER_LEN = 0x14,
-	OFFSET_TRANSAC_LEN = 0x18,
-	OFFSET_DELAY_LEN = 0x1c,
-	OFFSET_TIMING = 0x20,
-	OFFSET_START = 0x24,
-	OFFSET_EXT_CONF = 0x28,
-	OFFSET_FIFO_STAT = 0x30,
-	OFFSET_FIFO_THRESH = 0x34,
-	OFFSET_FIFO_ADDR_CLR = 0x38,
-	OFFSET_IO_CONFIG = 0x40,
-	OFFSET_RSV_DEBUG = 0x44,
-	OFFSET_HS = 0x48,
-	OFFSET_SOFTRESET = 0x50,
-	OFFSET_DCM_EN = 0x54,
-	OFFSET_PATH_DIR = 0x60,
-	OFFSET_DEBUGSTAT = 0x64,
-	OFFSET_DEBUGCTRL = 0x68,
-	OFFSET_TRANSFER_LEN_AUX = 0x6c,
-	OFFSET_CLOCK_DIV = 0x70,
+	OFFSET_DATA_PORT,
+	OFFSET_SLAVE_ADDR,
+	OFFSET_INTR_MASK,
+	OFFSET_INTR_STAT,
+	OFFSET_CONTROL,
+	OFFSET_TRANSFER_LEN,
+	OFFSET_TRANSAC_LEN,
+	OFFSET_DELAY_LEN,
+	OFFSET_TIMING,
+	OFFSET_START,
+	OFFSET_EXT_CONF,
+	OFFSET_FIFO_STAT,
+	OFFSET_FIFO_THRESH,
+	OFFSET_FIFO_ADDR_CLR,
+	OFFSET_IO_CONFIG,
+	OFFSET_RSV_DEBUG,
+	OFFSET_HS,
+	OFFSET_SOFTRESET,
+	OFFSET_DCM_EN,
+	OFFSET_PATH_DIR,
+	OFFSET_DEBUGSTAT,
+	OFFSET_DEBUGCTRL,
+	OFFSET_TRANSFER_LEN_AUX,
+	OFFSET_CLOCK_DIV,
+	OFFSET_LTIMING,
+};
+
+static const u16 mt_i2c_regs_v1[] = {
+	[OFFSET_DATA_PORT] = 0x0,
+	[OFFSET_SLAVE_ADDR] = 0x4,
+	[OFFSET_INTR_MASK] = 0x8,
+	[OFFSET_INTR_STAT] = 0xc,
+	[OFFSET_CONTROL] = 0x10,
+	[OFFSET_TRANSFER_LEN] = 0x14,
+	[OFFSET_TRANSAC_LEN] = 0x18,
+	[OFFSET_DELAY_LEN] = 0x1c,
+	[OFFSET_TIMING] = 0x20,
+	[OFFSET_START] = 0x24,
+	[OFFSET_EXT_CONF] = 0x28,
+	[OFFSET_FIFO_STAT] = 0x30,
+	[OFFSET_FIFO_THRESH] = 0x34,
+	[OFFSET_FIFO_ADDR_CLR] = 0x38,
+	[OFFSET_IO_CONFIG] = 0x40,
+	[OFFSET_RSV_DEBUG] = 0x44,
+	[OFFSET_HS] = 0x48,
+	[OFFSET_SOFTRESET] = 0x50,
+	[OFFSET_DCM_EN] = 0x54,
+	[OFFSET_PATH_DIR] = 0x60,
+	[OFFSET_DEBUGSTAT] = 0x64,
+	[OFFSET_DEBUGCTRL] = 0x68,
+	[OFFSET_TRANSFER_LEN_AUX] = 0x6c,
+	[OFFSET_CLOCK_DIV] = 0x70,
+};
+
+static const u16 mt_i2c_regs_v2[] = {
+	[OFFSET_DATA_PORT] = 0x0,
+	[OFFSET_SLAVE_ADDR] = 0x4,
+	[OFFSET_INTR_MASK] = 0x8,
+	[OFFSET_INTR_STAT] = 0xc,
+	[OFFSET_CONTROL] = 0x10,
+	[OFFSET_TRANSFER_LEN] = 0x14,
+	[OFFSET_TRANSAC_LEN] = 0x18,
+	[OFFSET_DELAY_LEN] = 0x1c,
+	[OFFSET_TIMING] = 0x20,
+	[OFFSET_START] = 0x24,
+	[OFFSET_EXT_CONF] = 0x28,
+	[OFFSET_LTIMING] = 0x2c,
+	[OFFSET_HS] = 0x30,
+	[OFFSET_IO_CONFIG] = 0x34,
+	[OFFSET_FIFO_ADDR_CLR] = 0x38,
+	[OFFSET_TRANSFER_LEN_AUX] = 0x44,
+	[OFFSET_CLOCK_DIV] = 0x48,
+	[OFFSET_SOFTRESET] = 0x50,
+	[OFFSET_DEBUGSTAT] = 0xe0,
+	[OFFSET_DEBUGCTRL] = 0xe8,
+	[OFFSET_FIFO_STAT] = 0xf4,
+	[OFFSET_FIFO_THRESH] = 0xf8,
+	[OFFSET_DCM_EN] = 0xf88,
 };
 
 struct mtk_i2c_compatible {
 	const struct i2c_adapter_quirks *quirks;
+	const u16 *regs;
 	unsigned char pmic_i2c: 1;
 	unsigned char dcm: 1;
 	unsigned char auto_restart: 1;
 	unsigned char aux_len_reg: 1;
 	unsigned char support_33bits: 1;
 	unsigned char timing_adjust: 1;
+	unsigned char dma_sync: 1;
+	unsigned char ltiming_adjust: 1;
 };
 
 struct mtk_i2c {
@@ -153,6 +213,7 @@
 	struct clk *clk_main;		/* main clock for i2c bus */
 	struct clk *clk_dma;		/* DMA clock for i2c via DMA */
 	struct clk *clk_pmic;		/* PMIC clock for i2c from PMIC */
+	struct clk *clk_arb;		/* Arbitrator clock for i2c */
 	bool have_pmic;			/* can use i2c pins from PMIC */
 	bool use_push_pull;		/* IO config push-pull mode */
 
@@ -162,6 +223,7 @@
 	enum mtk_trans_op op;
 	u16 timing_reg;
 	u16 high_speed_reg;
+	u16 ltiming_reg;
 	unsigned char auto_restart;
 	bool ignore_restart_irq;
 	const struct mtk_i2c_compatible *dev_comp;
@@ -181,51 +243,78 @@
 };
 
 static const struct mtk_i2c_compatible mt2712_compat = {
+	.regs = mt_i2c_regs_v1,
 	.pmic_i2c = 0,
 	.dcm = 1,
 	.auto_restart = 1,
 	.aux_len_reg = 1,
 	.support_33bits = 1,
 	.timing_adjust = 1,
+	.dma_sync = 0,
+	.ltiming_adjust = 0,
 };
 
 static const struct mtk_i2c_compatible mt6577_compat = {
 	.quirks = &mt6577_i2c_quirks,
+	.regs = mt_i2c_regs_v1,
 	.pmic_i2c = 0,
 	.dcm = 1,
 	.auto_restart = 0,
 	.aux_len_reg = 0,
 	.support_33bits = 0,
 	.timing_adjust = 0,
+	.dma_sync = 0,
+	.ltiming_adjust = 0,
 };
 
 static const struct mtk_i2c_compatible mt6589_compat = {
 	.quirks = &mt6577_i2c_quirks,
+	.regs = mt_i2c_regs_v1,
 	.pmic_i2c = 1,
 	.dcm = 0,
 	.auto_restart = 0,
 	.aux_len_reg = 0,
 	.support_33bits = 0,
 	.timing_adjust = 0,
+	.dma_sync = 0,
+	.ltiming_adjust = 0,
 };
 
 static const struct mtk_i2c_compatible mt7622_compat = {
 	.quirks = &mt7622_i2c_quirks,
+	.regs = mt_i2c_regs_v1,
 	.pmic_i2c = 0,
 	.dcm = 1,
 	.auto_restart = 1,
 	.aux_len_reg = 1,
 	.support_33bits = 0,
 	.timing_adjust = 0,
+	.dma_sync = 0,
+	.ltiming_adjust = 0,
 };
 
 static const struct mtk_i2c_compatible mt8173_compat = {
+	.regs = mt_i2c_regs_v1,
 	.pmic_i2c = 0,
 	.dcm = 1,
 	.auto_restart = 1,
 	.aux_len_reg = 1,
 	.support_33bits = 1,
 	.timing_adjust = 0,
+	.dma_sync = 0,
+	.ltiming_adjust = 0,
+};
+
+static const struct mtk_i2c_compatible mt8183_compat = {
+	.regs = mt_i2c_regs_v2,
+	.pmic_i2c = 0,
+	.dcm = 0,
+	.auto_restart = 1,
+	.aux_len_reg = 1,
+	.support_33bits = 1,
+	.timing_adjust = 1,
+	.dma_sync = 1,
+	.ltiming_adjust = 1,
 };
 
 static const struct of_device_id mtk_i2c_of_match[] = {
@@ -234,10 +323,22 @@
 	{ .compatible = "mediatek,mt6589-i2c", .data = &mt6589_compat },
 	{ .compatible = "mediatek,mt7622-i2c", .data = &mt7622_compat },
 	{ .compatible = "mediatek,mt8173-i2c", .data = &mt8173_compat },
+	{ .compatible = "mediatek,mt8183-i2c", .data = &mt8183_compat },
 	{}
 };
 MODULE_DEVICE_TABLE(of, mtk_i2c_of_match);
 
+static u16 mtk_i2c_readw(struct mtk_i2c *i2c, enum I2C_REGS_OFFSET reg)
+{
+	return readw(i2c->base + i2c->dev_comp->regs[reg]);
+}
+
+static void mtk_i2c_writew(struct mtk_i2c *i2c, u16 val,
+			   enum I2C_REGS_OFFSET reg)
+{
+	writew(val, i2c->base + i2c->dev_comp->regs[reg]);
+}
+
 static int mtk_i2c_clock_enable(struct mtk_i2c *i2c)
 {
 	int ret;
@@ -255,8 +356,18 @@
 		if (ret)
 			goto err_pmic;
 	}
+
+	if (i2c->clk_arb) {
+		ret = clk_prepare_enable(i2c->clk_arb);
+		if (ret)
+			goto err_arb;
+	}
+
 	return 0;
 
+err_arb:
+	if (i2c->have_pmic)
+		clk_disable_unprepare(i2c->clk_pmic);
 err_pmic:
 	clk_disable_unprepare(i2c->clk_main);
 err_main:
@@ -267,6 +378,9 @@
 
 static void mtk_i2c_clock_disable(struct mtk_i2c *i2c)
 {
+	if (i2c->clk_arb)
+		clk_disable_unprepare(i2c->clk_arb);
+
 	if (i2c->have_pmic)
 		clk_disable_unprepare(i2c->clk_pmic);
 
@@ -278,31 +392,36 @@
 {
 	u16 control_reg;
 
-	writew(I2C_SOFT_RST, i2c->base + OFFSET_SOFTRESET);
+	mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
 
 	/* Set ioconfig */
 	if (i2c->use_push_pull)
-		writew(I2C_IO_CONFIG_PUSH_PULL, i2c->base + OFFSET_IO_CONFIG);
+		mtk_i2c_writew(i2c, I2C_IO_CONFIG_PUSH_PULL, OFFSET_IO_CONFIG);
 	else
-		writew(I2C_IO_CONFIG_OPEN_DRAIN, i2c->base + OFFSET_IO_CONFIG);
+		mtk_i2c_writew(i2c, I2C_IO_CONFIG_OPEN_DRAIN, OFFSET_IO_CONFIG);
 
 	if (i2c->dev_comp->dcm)
-		writew(I2C_DCM_DISABLE, i2c->base + OFFSET_DCM_EN);
+		mtk_i2c_writew(i2c, I2C_DCM_DISABLE, OFFSET_DCM_EN);
 
 	if (i2c->dev_comp->timing_adjust)
-		writew(I2C_DEFAULT_CLK_DIV - 1, i2c->base + OFFSET_CLOCK_DIV);
+		mtk_i2c_writew(i2c, I2C_DEFAULT_CLK_DIV - 1, OFFSET_CLOCK_DIV);
 
-	writew(i2c->timing_reg, i2c->base + OFFSET_TIMING);
-	writew(i2c->high_speed_reg, i2c->base + OFFSET_HS);
+	mtk_i2c_writew(i2c, i2c->timing_reg, OFFSET_TIMING);
+	mtk_i2c_writew(i2c, i2c->high_speed_reg, OFFSET_HS);
+	if (i2c->dev_comp->ltiming_adjust)
+		mtk_i2c_writew(i2c, i2c->ltiming_reg, OFFSET_LTIMING);
 
 	/* If use i2c pin from PMIC mt6397 side, need set PATH_DIR first */
 	if (i2c->have_pmic)
-		writew(I2C_CONTROL_WRAPPER, i2c->base + OFFSET_PATH_DIR);
+		mtk_i2c_writew(i2c, I2C_CONTROL_WRAPPER, OFFSET_PATH_DIR);
 
 	control_reg = I2C_CONTROL_ACKERR_DET_EN |
 		      I2C_CONTROL_CLK_EXT_EN | I2C_CONTROL_DMA_EN;
-	writew(control_reg, i2c->base + OFFSET_CONTROL);
-	writew(I2C_DELAY_LEN, i2c->base + OFFSET_DELAY_LEN);
+	if (i2c->dev_comp->dma_sync)
+		control_reg |= I2C_CONTROL_DMAACK_EN | I2C_CONTROL_ASYNC_MODE;
+
+	mtk_i2c_writew(i2c, control_reg, OFFSET_CONTROL);
+	mtk_i2c_writew(i2c, I2C_DELAY_LEN, OFFSET_DELAY_LEN);
 
 	writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
 	udelay(50);
@@ -390,6 +509,8 @@
 	unsigned int clk_src;
 	unsigned int step_cnt;
 	unsigned int sample_cnt;
+	unsigned int l_step_cnt;
+	unsigned int l_sample_cnt;
 	unsigned int target_speed;
 	int ret;
 
@@ -399,11 +520,11 @@
 	if (target_speed > MAX_FS_MODE_SPEED) {
 		/* Set master code speed register */
 		ret = mtk_i2c_calculate_speed(i2c, clk_src, MAX_FS_MODE_SPEED,
-					      &step_cnt, &sample_cnt);
+					      &l_step_cnt, &l_sample_cnt);
 		if (ret < 0)
 			return ret;
 
-		i2c->timing_reg = (sample_cnt << 8) | step_cnt;
+		i2c->timing_reg = (l_sample_cnt << 8) | l_step_cnt;
 
 		/* Set the high speed mode register */
 		ret = mtk_i2c_calculate_speed(i2c, clk_src, target_speed,
@@ -413,6 +534,10 @@
 
 		i2c->high_speed_reg = I2C_TIME_DEFAULT_VALUE |
 			(sample_cnt << 12) | (step_cnt << 8);
+
+		if (i2c->dev_comp->ltiming_adjust)
+			i2c->ltiming_reg = (l_sample_cnt << 6) | l_step_cnt |
+					   (sample_cnt << 12) | (step_cnt << 9);
 	} else {
 		ret = mtk_i2c_calculate_speed(i2c, clk_src, target_speed,
 					      &step_cnt, &sample_cnt);
@@ -423,6 +548,9 @@
 
 		/* Disable the high speed transaction */
 		i2c->high_speed_reg = I2C_TIME_CLR_VALUE;
+
+		if (i2c->dev_comp->ltiming_adjust)
+			i2c->ltiming_reg = (sample_cnt << 6) | step_cnt;
 	}
 
 	return 0;
@@ -454,48 +582,49 @@
 
 	reinit_completion(&i2c->msg_complete);
 
-	control_reg = readw(i2c->base + OFFSET_CONTROL) &
+	control_reg = mtk_i2c_readw(i2c, OFFSET_CONTROL) &
 			~(I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS);
-	if ((i2c->speed_hz > 400000) || (left_num >= 1))
+	if ((i2c->speed_hz > MAX_FS_MODE_SPEED) || (left_num >= 1))
 		control_reg |= I2C_CONTROL_RS;
 
 	if (i2c->op == I2C_MASTER_WRRD)
 		control_reg |= I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS;
 
-	writew(control_reg, i2c->base + OFFSET_CONTROL);
+	mtk_i2c_writew(i2c, control_reg, OFFSET_CONTROL);
 
 	/* set start condition */
-	if (i2c->speed_hz <= 100000)
-		writew(I2C_ST_START_CON, i2c->base + OFFSET_EXT_CONF);
+	if (i2c->speed_hz <= I2C_DEFAULT_SPEED)
+		mtk_i2c_writew(i2c, I2C_ST_START_CON, OFFSET_EXT_CONF);
 	else
-		writew(I2C_FS_START_CON, i2c->base + OFFSET_EXT_CONF);
+		mtk_i2c_writew(i2c, I2C_FS_START_CON, OFFSET_EXT_CONF);
 
 	addr_reg = i2c_8bit_addr_from_msg(msgs);
-	writew(addr_reg, i2c->base + OFFSET_SLAVE_ADDR);
+	mtk_i2c_writew(i2c, addr_reg, OFFSET_SLAVE_ADDR);
 
 	/* Clear interrupt status */
-	writew(restart_flag | I2C_HS_NACKERR | I2C_ACKERR |
-	       I2C_TRANSAC_COMP, i2c->base + OFFSET_INTR_STAT);
-	writew(I2C_FIFO_ADDR_CLR, i2c->base + OFFSET_FIFO_ADDR_CLR);
+	mtk_i2c_writew(i2c, restart_flag | I2C_HS_NACKERR | I2C_ACKERR |
+			    I2C_ARB_LOST | I2C_TRANSAC_COMP, OFFSET_INTR_STAT);
+
+	mtk_i2c_writew(i2c, I2C_FIFO_ADDR_CLR, OFFSET_FIFO_ADDR_CLR);
 
 	/* Enable interrupt */
-	writew(restart_flag | I2C_HS_NACKERR | I2C_ACKERR |
-	       I2C_TRANSAC_COMP, i2c->base + OFFSET_INTR_MASK);
+	mtk_i2c_writew(i2c, restart_flag | I2C_HS_NACKERR | I2C_ACKERR |
+			    I2C_ARB_LOST | I2C_TRANSAC_COMP, OFFSET_INTR_MASK);
 
 	/* Set transfer and transaction len */
 	if (i2c->op == I2C_MASTER_WRRD) {
 		if (i2c->dev_comp->aux_len_reg) {
-			writew(msgs->len, i2c->base + OFFSET_TRANSFER_LEN);
-			writew((msgs + 1)->len, i2c->base +
-			       OFFSET_TRANSFER_LEN_AUX);
+			mtk_i2c_writew(i2c, msgs->len, OFFSET_TRANSFER_LEN);
+			mtk_i2c_writew(i2c, (msgs + 1)->len,
+					    OFFSET_TRANSFER_LEN_AUX);
 		} else {
-			writew(msgs->len | ((msgs + 1)->len) << 8,
-			       i2c->base + OFFSET_TRANSFER_LEN);
+			mtk_i2c_writew(i2c, msgs->len | ((msgs + 1)->len) << 8,
+					    OFFSET_TRANSFER_LEN);
 		}
-		writew(I2C_WRRD_TRANAC_VALUE, i2c->base + OFFSET_TRANSAC_LEN);
+		mtk_i2c_writew(i2c, I2C_WRRD_TRANAC_VALUE, OFFSET_TRANSAC_LEN);
 	} else {
-		writew(msgs->len, i2c->base + OFFSET_TRANSFER_LEN);
-		writew(num, i2c->base + OFFSET_TRANSAC_LEN);
+		mtk_i2c_writew(i2c, msgs->len, OFFSET_TRANSFER_LEN);
+		mtk_i2c_writew(i2c, num, OFFSET_TRANSAC_LEN);
 	}
 
 	/* Prepare buffer data to start transfer */
@@ -607,14 +736,14 @@
 		if (left_num >= 1)
 			start_reg |= I2C_RS_MUL_CNFG;
 	}
-	writew(start_reg, i2c->base + OFFSET_START);
+	mtk_i2c_writew(i2c, start_reg, OFFSET_START);
 
 	ret = wait_for_completion_timeout(&i2c->msg_complete,
 					  i2c->adap.timeout);
 
 	/* Clear interrupt mask */
-	writew(~(restart_flag | I2C_HS_NACKERR | I2C_ACKERR |
-	       I2C_TRANSAC_COMP), i2c->base + OFFSET_INTR_MASK);
+	mtk_i2c_writew(i2c, ~(restart_flag | I2C_HS_NACKERR | I2C_ACKERR |
+			    I2C_ARB_LOST | I2C_TRANSAC_COMP), OFFSET_INTR_MASK);
 
 	if (i2c->op == I2C_MASTER_WR) {
 		dma_unmap_single(i2c->dev, wpaddr,
@@ -642,8 +771,6 @@
 		return -ETIMEDOUT;
 	}
 
-	completion_done(&i2c->msg_complete);
-
 	if (i2c->irq_stat & (I2C_HS_NACKERR | I2C_ACKERR)) {
 		dev_dbg(i2c->dev, "addr: %x, transfer ACK error\n", msgs->addr);
 		mtk_i2c_init_hw(i2c);
@@ -726,8 +853,8 @@
 	if (i2c->auto_restart)
 		restart_flag = I2C_RS_TRANSFER;
 
-	intr_stat = readw(i2c->base + OFFSET_INTR_STAT);
-	writew(intr_stat, i2c->base + OFFSET_INTR_STAT);
+	intr_stat = mtk_i2c_readw(i2c, OFFSET_INTR_STAT);
+	mtk_i2c_writew(i2c, intr_stat, OFFSET_INTR_STAT);
 
 	/*
 	 * when occurs ack error, i2c controller generate two interrupts
@@ -739,8 +866,8 @@
 	if (i2c->ignore_restart_irq && (i2c->irq_stat & restart_flag)) {
 		i2c->ignore_restart_irq = false;
 		i2c->irq_stat = 0;
-		writew(I2C_RS_MUL_CNFG | I2C_RS_MUL_TRIG | I2C_TRANSAC_START,
-		       i2c->base + OFFSET_START);
+		mtk_i2c_writew(i2c, I2C_RS_MUL_CNFG | I2C_RS_MUL_TRIG |
+				    I2C_TRANSAC_START, OFFSET_START);
 	} else {
 		if (i2c->irq_stat & (I2C_TRANSAC_COMP | restart_flag))
 			complete(&i2c->msg_complete);
@@ -841,6 +968,10 @@
 		return PTR_ERR(i2c->clk_dma);
 	}
 
+	i2c->clk_arb = devm_clk_get(&pdev->dev, "arb");
+	if (IS_ERR(i2c->clk_arb))
+		i2c->clk_arb = NULL;
+
 	clk = i2c->clk_main;
 	if (i2c->have_pmic) {
 		i2c->clk_pmic = devm_clk_get(&pdev->dev, "pmic");
diff --git a/drivers/iio/adc/mt6577_auxadc.c b/drivers/iio/adc/mt6577_auxadc.c
index 95d76ab..0e8050c 100644
--- a/drivers/iio/adc/mt6577_auxadc.c
+++ b/drivers/iio/adc/mt6577_auxadc.c
@@ -42,17 +42,22 @@
 #define MT6577_AUXADC_POWER_READY_MS          1
 #define MT6577_AUXADC_SAMPLE_READY_US         25
 
+#define MT6577_VOLTAGE_FULL_RANGE             1500
+#define MT6577_AUXADC_PRECISION               4096
+
 struct mt6577_auxadc_device {
 	void __iomem *reg_base;
 	struct clk *adc_clk;
 	struct mutex lock;
 };
 
-#define MT6577_AUXADC_CHANNEL(idx) {				    \
-		.type = IIO_VOLTAGE,				    \
-		.indexed = 1,					    \
-		.channel = (idx),				    \
-		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \
+#define MT6577_AUXADC_CHANNEL(idx) {				      \
+		.type = IIO_VOLTAGE,				      \
+		.indexed = 1,					      \
+		.channel = (idx),				      \
+		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |  \
+				      BIT(IIO_CHAN_INFO_RAW),	      \
+		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
 }
 
 static const struct iio_chan_spec mt6577_auxadc_iio_channels[] = {
@@ -164,6 +169,7 @@
 				  long info)
 {
 	switch (info) {
+	case IIO_CHAN_INFO_RAW:
 	case IIO_CHAN_INFO_PROCESSED:
 		*val = mt6577_auxadc_read(indio_dev, chan);
 		if (*val < 0) {
@@ -174,6 +180,11 @@
 		}
 		return IIO_VAL_INT;
 
+	case IIO_CHAN_INFO_SCALE:
+		*val = MT6577_VOLTAGE_FULL_RANGE;
+		*val2 = MT6577_AUXADC_PRECISION;
+		return IIO_VAL_FRACTIONAL;
+
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c
index 02c67a1..26883d7 100644
--- a/drivers/input/keyboard/mtk-pmic-keys.c
+++ b/drivers/input/keyboard/mtk-pmic-keys.c
@@ -24,6 +24,7 @@
 #include <linux/of_device.h>
 #include <linux/regmap.h>
 #include <linux/mfd/mt6323/registers.h>
+#include <linux/mfd/mt6392/registers.h>
 #include <linux/mfd/mt6397/registers.h>
 #include <linux/mfd/mt6397/core.h>
 
@@ -84,6 +85,16 @@
 	.pmic_rst_reg = MT6323_TOP_RST_MISC,
 };
 
+static const struct mtk_pmic_regs mt6392_regs = {
+	.keys_regs[MTK_PMIC_PWRKEY_INDEX] =
+		MTK_PMIC_KEYS_REGS(MT6392_CHRSTATUS,
+		0x2, MT6392_INT_MISC_CON, 0x10),
+	.keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
+		MTK_PMIC_KEYS_REGS(MT6392_CHRSTATUS,
+		0x4, MT6392_INT_MISC_CON, 0x8),
+	.pmic_rst_reg = MT6392_TOP_RST_MISC,
+};
+
 struct mtk_pmic_keys_info {
 	struct mtk_pmic_keys *keys;
 	const struct mtk_pmic_keys_regs *regs;
@@ -240,6 +251,9 @@
 		.compatible = "mediatek,mt6323-keys",
 		.data = &mt6323_regs,
 	}, {
+		.compatible = "mediatek,mt6392-keys",
+		.data = &mt6392_regs,
+	}, {
 		/* sentinel */
 	}
 };
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 3fdaa64..c2a189c 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -39,6 +39,7 @@
 #include <linux/input/mt.h>
 #include <linux/input/touchscreen.h>
 #include <linux/of_device.h>
+#include <linux/firmware.h>
 
 #define WORK_REGISTER_THRESHOLD		0x00
 #define WORK_REGISTER_REPORT_RATE	0x08
@@ -69,6 +70,75 @@
 #define EDT_RAW_DATA_RETRIES		100
 #define EDT_RAW_DATA_DELAY		1000 /* usec */
 
+#define EDT_MAX_FW_SIZE			(60 * 1024)
+#define EDT_MAX_NB_TRIES		30
+#define EDT_CMD_RESET			0x07
+#define EDT_CMD_READ_ID			0x90
+#define EDT_CMD_ERASE_APP		0x61
+#define EDT_CMD_WRITE			0xBF
+#define EDT_CMD_FLASH_STATUS		0x6A
+#define EDT_CMD_CHIP_ID			0xA3
+#define EDT_CMD_CHIP_ID2		0x9F
+#define EDT_CMD_START1			0x55
+#define EDT_CMD_START2			0xAA
+#define EDT_CMD_START_DELAY		12
+
+#define EDT_UPGRADE_AA			0xAA
+#define EDT_UPGRADE_55			0x55
+#define EDT_ID				0xA6
+
+#define EDT_RETRIES_WRITE		100
+#define EDT_RETRIES_DELAY_WRITE		1
+#define EDT_UPGRADE_LOOP		30
+
+#define EDT_DELAY_UPGRADE_RESET		80
+#define EDT_DELAY_UPGRADE_AA		10
+#define EDT_DELAY_UPGRADE_55		80
+#define EDT_DELAY_READ_ID		20
+#define EDT_DELAY_ERASE			500
+#define EDT_INTERVAL_READ_REG		200
+#define EDT_TIMEOUT_READ_REG		1000
+
+#define EDT_FLASH_PACKET_LENGTH		32
+#define EDT_CMD_WRITE_LEN		6
+
+#define BYTE_OFF_0(x)			(u8)((x) & 0xFF)
+#define BYTE_OFF_8(x)			(u8)(((x) >> 8) & 0xFF)
+#define BYTE_OFF_16(x)			(u8)(((x) >> 16) & 0xFF)
+
+enum edt_fw_status {
+	EDT_RUN_IN_ERROR,
+	EDT_RUN_IN_APP,
+	EDT_RUN_IN_ROM,
+	EDT_RUN_IN_PRAM,
+	EDT_RUN_IN_BOOTLOADER,
+};
+
+struct edt_fw_param {
+	u8 model;
+	u16 bootloader_id;
+	u16 chip_id;
+	u16 flash_status_ok;
+	u16 fw_len_offset;
+	u8 cmd_upgrade;
+	u16 start_addr;
+};
+
+#define EDT_FW_PARAMS(_model, _bid, _cid, _fs, _fo, _cu, _sa) \
+	{ \
+		.model = _model, \
+		.bootloader_id = _bid, \
+		.chip_id = _cid, \
+		.flash_status_ok = _fs, \
+		.fw_len_offset = _fo,  \
+		.cmd_upgrade = _cu, \
+		.start_addr = _sa, \
+	}
+
+static struct edt_fw_param edt_fw_params[] = {
+	EDT_FW_PARAMS(0x5F, 0x7918, 0x3600, 0xB002, 0x100, 0xBC, 0),
+};
+
 enum edt_ver {
 	EDT_M06,
 	EDT_M09,
@@ -113,6 +183,7 @@
 
 	struct edt_reg_addr reg_addr;
 	enum edt_ver version;
+	struct edt_fw_param *fw_param;
 };
 
 struct edt_i2c_chip_data {
@@ -478,6 +549,481 @@
 	return error ?: count;
 }
 
+static inline int edt_ft5x06_write_delay(struct edt_ft5x06_ts_data *tsdata,
+				   u16 wr_len, u8 *wr_buf,
+				   u16 delay)
+{
+	struct i2c_client *client = tsdata->client;
+	int ret;
+
+	ret = edt_ft5x06_ts_readwrite(client, wr_len, wr_buf, 0, NULL);
+	if (ret < 0) {
+		dev_err(&client->dev, "write cmd failed\n");
+		return ret;
+	}
+
+	if (delay > 0)
+		msleep(delay);
+
+	return 0;
+}
+
+static inline bool edt_ft5x06_wait_tp_to_valid(
+			struct edt_ft5x06_ts_data *tsdata)
+{
+	struct i2c_client *client = tsdata->client;
+	int ret;
+	int cnt = 0;
+	u8 idh;
+	u8 idl;
+	u8 cmd;
+	u16 chip_id;
+
+	do {
+		cmd = EDT_CMD_CHIP_ID;
+		ret = edt_ft5x06_ts_readwrite(client, 1, &cmd, 1, &idh);
+		if (ret < 0) {
+			dev_err(&client->dev, "read chip failed\n");
+			return false;
+		}
+		cmd = EDT_CMD_CHIP_ID2;
+		ret = edt_ft5x06_ts_readwrite(client, 1, &cmd, 1, &idl);
+
+		if (ret < 0) {
+			dev_err(&client->dev, "read chip2 failed\n");
+			return false;
+		}
+
+		chip_id = (((u16)idh) << 8) + idl;
+
+		if (tsdata->fw_param->chip_id == chip_id)
+			return true;
+
+		cnt++;
+		msleep(EDT_INTERVAL_READ_REG);
+	} while ((cnt * EDT_INTERVAL_READ_REG) < EDT_TIMEOUT_READ_REG);
+
+	return false;
+}
+
+static int edt_ft5x06_fwupg_get_boot_state(struct edt_ft5x06_ts_data *tsdata,
+				    enum edt_fw_status *fw_sts)
+{
+	struct i2c_client *client = tsdata->client;
+	int ret;
+	u8 cmd[4];
+	u8 val[2];
+	u16 bootloader_id;
+
+	*fw_sts = EDT_RUN_IN_ERROR;
+	cmd[0] = EDT_CMD_START1;
+	cmd[1] = EDT_CMD_START2;
+	ret = edt_ft5x06_write_delay(tsdata, 2,
+					cmd, EDT_CMD_START_DELAY);
+	if (ret < 0) {
+		dev_err(&client->dev, "write 55 aa failed\n");
+		return ret;
+	}
+
+	cmd[0] = EDT_CMD_READ_ID;
+	cmd[1] = cmd[2] = cmd[3] = 0x00;
+	ret = edt_ft5x06_ts_readwrite(client, 4, cmd, 2, val);
+	if (ret < 0) {
+		dev_err(&client->dev, "write 90 failed\n");
+		return ret;
+	}
+	bootloader_id = (((u16)val[0]) << 8) + val[1];
+
+	if (tsdata->fw_param->bootloader_id == bootloader_id)
+		*fw_sts = EDT_RUN_IN_BOOTLOADER;
+
+	return 0;
+}
+
+static int edt_ft5x06_reset(struct edt_ft5x06_ts_data *tsdata)
+{
+	u8 cmd = EDT_CMD_RESET;
+
+	return edt_ft5x06_write_delay(tsdata, sizeof(cmd),
+					&cmd, EDT_DELAY_UPGRADE_RESET);
+}
+
+static int edt_ft5x06_fwupg_reset_to_boot(struct edt_ft5x06_ts_data *tsdata)
+{
+	struct i2c_client *client = tsdata->client;
+	int ret;
+	u8 buf[2];
+
+	buf[0] = tsdata->fw_param->cmd_upgrade;
+	buf[1] = EDT_UPGRADE_AA;
+	ret = edt_ft5x06_write_delay(tsdata, sizeof(buf),
+					buf, EDT_DELAY_UPGRADE_AA);
+
+	if (ret < 0) {
+		dev_err(&client->dev, "write AA failed\n");
+		return ret;
+	}
+
+	buf[1] = EDT_UPGRADE_55;
+	ret = edt_ft5x06_write_delay(tsdata, sizeof(buf),
+					buf, EDT_DELAY_UPGRADE_55);
+
+	if (ret < 0) {
+		dev_err(&client->dev, "write 55 failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int edt_ft5x06_fwupg_reset_in_boot(struct edt_ft5x06_ts_data *tsdata)
+{
+	struct i2c_client *client = tsdata->client;
+	int ret;
+	u8 cmd = EDT_CMD_RESET;
+
+	ret = edt_ft5x06_write_delay(tsdata, sizeof(cmd),
+					&cmd, EDT_DELAY_UPGRADE_RESET);
+
+	if (ret < 0) {
+		dev_err(&client->dev, "write cmd reset failed\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static bool edt_ft5x06_fwupg_check_state(struct edt_ft5x06_ts_data *tsdata,
+					enum edt_fw_status rstate)
+{
+	struct i2c_client *client = tsdata->client;
+	int ret;
+	int i;
+	enum edt_fw_status cstate;
+
+	for (i = 0; i < EDT_UPGRADE_LOOP; i++) {
+		ret = edt_ft5x06_fwupg_get_boot_state(tsdata, &cstate);
+		if (cstate == rstate)
+			return true;
+		msleep(EDT_DELAY_READ_ID);
+	}
+
+	return false;
+}
+
+static bool edt_ft5x06_check_flash_status(
+	struct edt_ft5x06_ts_data *tsdata,
+	u16 flash_status,
+	int retries,
+	int retries_delay)
+{
+	struct i2c_client *client = tsdata->client;
+	int ret;
+	int i;
+	u8 cmd[4] = { 0 };
+	u8 val[2];
+	u16 read_status;
+
+	cmd[0] = EDT_CMD_FLASH_STATUS;
+	for (i = 0; i < retries; i++) {
+		ret = edt_ft5x06_ts_readwrite(client, 4, cmd, 2, val);
+		if (ret < 0) {
+			dev_err(&client->dev, "cmd flash status failed\n");
+			return 0;
+		}
+		read_status = (((u16)val[0]) << 8) + val[1];
+		if (flash_status == read_status)
+			return 1;
+
+		msleep(retries_delay);
+	}
+
+	return 0;
+}
+
+static int edt_ft5x06_enter_bl(struct edt_ft5x06_ts_data *tsdata)
+{
+	struct i2c_client *client = tsdata->client;
+	int ret;
+	bool fwvalid;
+	enum edt_fw_status state;
+
+	fwvalid = edt_ft5x06_wait_tp_to_valid(tsdata);
+	if (fwvalid) {
+		ret = edt_ft5x06_fwupg_reset_to_boot(tsdata);
+		if (ret < 0) {
+			dev_err(&client->dev, "enter into bootloader fail\n");
+			return ret;
+		}
+	} else {
+		ret = edt_ft5x06_fwupg_reset_in_boot(tsdata);
+		if (ret < 0) {
+			dev_err(&client->dev, "boot id when fw invalid fail\n");
+			return ret;
+		}
+	}
+
+	state = edt_ft5x06_fwupg_check_state(tsdata, EDT_RUN_IN_BOOTLOADER);
+	if (!state) {
+		dev_err(&client->dev, "fw not in bootloader, fail\n");
+		return -EIO;
+	}
+
+	return ret;
+}
+static int edt_ft5x06_erase(struct edt_ft5x06_ts_data *tsdata)
+{
+	struct i2c_client *client = tsdata->client;
+	int ret;
+	u8 cmd;
+	bool flag;
+
+	cmd = EDT_CMD_ERASE_APP;
+	ret = edt_ft5x06_write_delay(tsdata, sizeof(cmd),
+					&cmd, EDT_DELAY_ERASE);
+	if (ret < 0) {
+		dev_err(&client->dev, "erase failed\n");
+		return ret;
+	}
+	flag = edt_ft5x06_check_flash_status(tsdata,
+			tsdata->fw_param->flash_status_ok, 50, 100);
+	if (!flag) {
+		dev_err(&client->dev, "ecc flash status check fail\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int edt_ft5x06_write_fw(struct edt_ft5x06_ts_data *tsdata,
+				u32 start_addr, const u8 *buf, u32 len)
+{
+	struct i2c_client *client = tsdata->client;
+	int ret;
+	u32 i;
+	u32 j;
+	u32 packet_number;
+	u32 packet_len;
+	u32 addr;
+	u32 offset;
+	u32 remainder;
+	u8 packet_buf[EDT_FLASH_PACKET_LENGTH + EDT_CMD_WRITE_LEN];
+	u8 ecc_in_host = 0;
+	u8 cmd[4];
+	u8 val[2];
+	u16 read_status;
+	u16 wr_ok;
+
+	packet_number = len / EDT_FLASH_PACKET_LENGTH;
+	remainder = len % EDT_FLASH_PACKET_LENGTH;
+	if (remainder > 0)
+		packet_number++;
+	packet_len = EDT_FLASH_PACKET_LENGTH;
+
+	packet_buf[0] = EDT_CMD_WRITE;
+	for (i = 0; i < packet_number; i++) {
+		offset = i * EDT_FLASH_PACKET_LENGTH;
+		addr = start_addr + offset;
+		packet_buf[1] = BYTE_OFF_16(addr);
+		packet_buf[2] = BYTE_OFF_8(addr);
+		packet_buf[3] = BYTE_OFF_0(addr);
+
+		/* last packet */
+		if ((i == (packet_number - 1)) && remainder)
+			packet_len = remainder;
+
+		packet_buf[4] = BYTE_OFF_8(packet_len);
+		packet_buf[5] = BYTE_OFF_0(packet_len);
+
+		for (j = 0; j < packet_len; j++) {
+			packet_buf[EDT_CMD_WRITE_LEN + j] = buf[offset + j];
+			ecc_in_host ^= packet_buf[EDT_CMD_WRITE_LEN + j];
+		}
+
+		ret = edt_ft5x06_write_delay(tsdata,
+						packet_len + EDT_CMD_WRITE_LEN,
+						packet_buf, 0);
+		if (ret < 0) {
+			dev_err(&client->dev, "write packet failed\n");
+			return ret;
+		}
+		mdelay(1);
+
+		/* read status */
+		cmd[0] = EDT_CMD_FLASH_STATUS;
+		cmd[1] = 0x00;
+		cmd[2] = 0x00;
+		cmd[3] = 0x00;
+		wr_ok = tsdata->fw_param->flash_status_ok + i + 1;
+		for (j = 0; j < EDT_RETRIES_WRITE; j++) {
+			ret = edt_ft5x06_ts_readwrite(client, 4, cmd, 2, val);
+			read_status = (((u16)val[0]) << 8) + val[1];
+
+			if (wr_ok == read_status)
+				break;
+
+			mdelay(EDT_RETRIES_DELAY_WRITE);
+		}
+
+	}
+
+	return (int)ecc_in_host;
+}
+
+static int edt_ft5x06_ecc_cal(struct edt_ft5x06_ts_data *tsdata)
+{
+	struct i2c_client *client = tsdata->client;
+	int ret;
+	u8 reg_val = 0;
+	u8 cmd = 0xCC;
+
+	ret = edt_ft5x06_ts_readwrite(client, 1, &cmd, 1, &reg_val);
+	if (ret < 0) {
+		dev_err(&client->dev, "write cmd ecc failed\n");
+		return ret;
+	}
+
+	return reg_val;
+}
+
+static int edt_ft5x06_i2c_do_update_firmware(struct edt_ft5x06_ts_data *ts,
+					u32 start_addr,
+					const struct firmware *fw)
+{
+	struct i2c_client *client = ts->client;
+	int ecc_in_host;
+	int ecc_in_tp;
+	u32 fw_length;
+	int ret;
+
+	if (fw->size == 0 || fw->size > EDT_MAX_FW_SIZE) {
+		dev_err(&client->dev, "Invalid firmware length\n");
+		return -EINVAL;
+	}
+	fw_length = ((u32)fw->data[ts->fw_param->fw_len_offset] << 8) +
+			fw->data[ts->fw_param->fw_len_offset+1];
+
+	ret = edt_ft5x06_enter_bl(ts);
+	if (ret) {
+		dev_err(&client->dev, "Unable to enter bl %d\n", ret);
+		return ret;
+	}
+
+	ret = edt_ft5x06_erase(ts);
+	if (ret < 0) {
+		dev_err(&client->dev, "Error erase %d\n", ret);
+		goto fw_reset;
+	}
+
+	ecc_in_host = edt_ft5x06_write_fw(ts, start_addr, fw->data, fw_length);
+	if (ecc_in_host < 0) {
+		dev_err(&client->dev, "Error write fw %d\n", ecc_in_host);
+		goto fw_reset;
+	}
+
+	ecc_in_tp = edt_ft5x06_ecc_cal(ts);
+	if (ecc_in_tp < 0) {
+		dev_err(&client->dev, "Error read ecc %d\n", ecc_in_tp);
+		goto fw_reset;
+	}
+
+	if (ecc_in_tp != ecc_in_host) {
+		dev_err(&client->dev, "Error check ecc\n");
+		goto fw_reset;
+	}
+
+	ret = edt_ft5x06_reset(ts);
+	if (ret < 0) {
+		dev_err(&client->dev, "Error reset normal\n");
+		return -EIO;
+	}
+
+	msleep(200);
+
+	return 0;
+
+fw_reset:
+	ret = edt_ft5x06_reset(ts);
+	if (ret < 0)
+		dev_err(&client->dev, "Error reset normal\n");
+
+	return -EIO;
+}
+
+static int edt_ft5x06_i2c_fw_update(struct edt_ft5x06_ts_data *ts)
+{
+	struct i2c_client *client = ts->client;
+	const struct firmware *fw = NULL;
+	char *fw_file;
+	int error;
+
+	if (!ts->fw_param) {
+		dev_dbg(&client->dev, "firmware update not supported\n");
+		return -EINVAL;
+	}
+
+	fw_file = kasprintf(GFP_KERNEL, "ft5x06_%02X.bin", ts->fw_param->model);
+	if (!fw_file)
+		return -ENOMEM;
+
+	dev_dbg(&client->dev, "firmware name: %s\n", fw_file);
+
+	error = request_firmware(&fw, fw_file, &client->dev);
+	if (error) {
+		dev_err(&client->dev, "Unable to open firmware %s\n", fw_file);
+		goto out_free_fw_file;
+	}
+
+	disable_irq(client->irq);
+
+	error = edt_ft5x06_i2c_do_update_firmware(ts,
+						ts->fw_param->start_addr, fw);
+	if (error) {
+		dev_err(&client->dev, "firmware update failed: %d\n", error);
+		goto out_enable_irq;
+	}
+
+out_enable_irq:
+	enable_irq(client->irq);
+	msleep(100);
+
+	release_firmware(fw);
+
+out_free_fw_file:
+	kfree(fw_file);
+
+	return error;
+}
+
+static ssize_t edt_ft5x06_update_fw_store(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+	int ret;
+
+	mutex_lock(&tsdata->mutex);
+
+	ret = edt_ft5x06_i2c_fw_update(tsdata);
+
+	mutex_unlock(&tsdata->mutex);
+	return ret ?: count;
+}
+
+static ssize_t fw_version_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+	int ret = edt_ft5x06_register_read(tsdata, EDT_ID);
+	if (ret < 0)
+		return ret;
+	else
+		return sprintf(buf, "0x%02x\n", ret);
+}
+
 /* m06, m09: range 0-31, m12: range 0-5 */
 static EDT_ATTR(gain, S_IWUSR | S_IRUGO, WORK_REGISTER_GAIN,
 		M09_REGISTER_GAIN, 0, 31);
@@ -491,11 +1037,16 @@
 static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE,
 		NO_REGISTER, 0, 255);
 
+static DEVICE_ATTR(update_fw, S_IWUSR, NULL, edt_ft5x06_update_fw_store);
+static DEVICE_ATTR_RO(fw_version);
+
 static struct attribute *edt_ft5x06_attrs[] = {
 	&edt_ft5x06_attr_gain.dattr.attr,
 	&edt_ft5x06_attr_offset.dattr.attr,
 	&edt_ft5x06_attr_threshold.dattr.attr,
 	&edt_ft5x06_attr_report_rate.dattr.attr,
+	&dev_attr_update_fw.attr,
+	&dev_attr_fw_version.attr,
 	NULL
 };
 
@@ -778,6 +1329,7 @@
 	char *p;
 	int error;
 	char *model_name = tsdata->name;
+	int i;
 
 	/* see what we find if we assume it is a M06 *
 	 * if we get less than EDT_NAME_LEN, we don't want
@@ -871,6 +1423,13 @@
 			snprintf(model_name, EDT_NAME_LEN,
 				 "generic ft5x06 (%02x)",
 				 rdbuf[0]);
+
+			for (i = 0; i < ARRAY_SIZE(edt_fw_params); i++) {
+				if (edt_fw_params[i].model == rdbuf[0]) {
+					tsdata->fw_param = &edt_fw_params[i];
+					break;
+				}
+			}
 			break;
 		}
 	}
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index b20ba65..8172a08 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -27,6 +27,7 @@
 #include <linux/delay.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <linux/acpi.h>
 #include <linux/of.h>
@@ -47,6 +48,8 @@
 	struct touchscreen_properties prop;
 	unsigned int max_touch_num;
 	unsigned int int_trigger_type;
+	struct regulator *avdd28;
+	struct regulator *vddio;
 	struct gpio_desc *gpiod_int;
 	struct gpio_desc *gpiod_rst;
 	u16 id;
@@ -225,6 +228,7 @@
 {
 	switch (id) {
 	case 1151:
+	case 5688:
 		return &gt1x_chip_data;
 
 	case 911:
@@ -540,6 +544,24 @@
 		return -EINVAL;
 	dev = &ts->client->dev;
 
+	ts->avdd28 = devm_regulator_get(dev, "AVDD28");
+	if (IS_ERR(ts->avdd28)) {
+		error = PTR_ERR(ts->avdd28);
+		if (error != -EPROBE_DEFER)
+			dev_err(dev,
+				"Failed to get AVDD28 regulator: %d\n", error);
+		return error;
+	}
+
+	ts->vddio = devm_regulator_get(dev, "VDDIO");
+	if (IS_ERR(ts->vddio)) {
+		error = PTR_ERR(ts->vddio);
+		if (error != -EPROBE_DEFER)
+			dev_err(dev,
+				"Failed to get VDDIO regulator: %d\n", error);
+		return error;
+	}
+
 	/* Get the interrupt GPIO pin number */
 	gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_INT_NAME, GPIOD_IN);
 	if (IS_ERR(gpiod)) {
@@ -770,6 +792,14 @@
 	complete_all(&ts->firmware_loading_complete);
 }
 
+static void goodix_disable_regulators(void *arg)
+{
+	struct goodix_ts_data *ts = arg;
+
+	regulator_disable(ts->vddio);
+	regulator_disable(ts->avdd28);
+}
+
 static int goodix_ts_probe(struct i2c_client *client,
 			   const struct i2c_device_id *id)
 {
@@ -795,6 +825,29 @@
 	if (error)
 		return error;
 
+	/* power up the controller */
+	error = regulator_enable(ts->avdd28);
+	if (error) {
+		dev_err(&client->dev,
+			"Failed to enable AVDD28 regulator: %d\n",
+			error);
+		return error;
+	}
+
+	error = regulator_enable(ts->vddio);
+	if (error) {
+		dev_err(&client->dev,
+			"Failed to enable VDDIO regulator: %d\n",
+			error);
+		regulator_disable(ts->avdd28);
+		return error;
+	}
+
+	error = devm_add_action_or_reset(&client->dev,
+					 goodix_disable_regulators, ts);
+	if (error)
+		return error;
+
 	if (ts->gpiod_int && ts->gpiod_rst) {
 		/* reset the controller */
 		error = goodix_reset(ts);
@@ -951,6 +1004,7 @@
 #ifdef CONFIG_OF
 static const struct of_device_id goodix_of_match[] = {
 	{ .compatible = "goodix,gt1151" },
+	{ .compatible = "goodix,gt5688" },
 	{ .compatible = "goodix,gt911" },
 	{ .compatible = "goodix,gt9110" },
 	{ .compatible = "goodix,gt912" },
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 48d4709..4bdc4d8 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -124,7 +124,9 @@
 #define ARM_V7S_TEX_MASK		0x7
 #define ARM_V7S_ATTR_TEX(val)		(((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT)
 
-#define ARM_V7S_ATTR_MTK_4GB		BIT(9) /* MTK extend it for 4GB mode */
+/* MediaTek extend the two bits below for over 4GB mode */
+#define ARM_V7S_ATTR_MTK_PA_BIT32	BIT(9)
+#define ARM_V7S_ATTR_MTK_PA_BIT33	BIT(4)
 
 /* *well, except for TEX on level 2 large pages, of course :( */
 #define ARM_V7S_CONT_PAGE_TEX_SHIFT	6
@@ -181,18 +183,54 @@
 	spinlock_t		split_lock;
 };
 
+static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl);
+
 static dma_addr_t __arm_v7s_dma_addr(void *pages)
 {
 	return (dma_addr_t)virt_to_phys(pages);
 }
 
-static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl)
+static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
+				    struct io_pgtable_cfg *cfg)
 {
+	arm_v7s_iopte pte = paddr & ARM_V7S_LVL_MASK(lvl);
+
+	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB) {
+		if (paddr & BIT_ULL(32))
+			pte |= ARM_V7S_ATTR_MTK_PA_BIT32;
+		if (paddr & BIT_ULL(33))
+			pte |= ARM_V7S_ATTR_MTK_PA_BIT33;
+	}
+	return pte;
+}
+
+static phys_addr_t iopte_to_paddr(arm_v7s_iopte pte, int lvl,
+				  struct io_pgtable_cfg *cfg)
+{
+	arm_v7s_iopte mask;
+	phys_addr_t paddr;
+
 	if (ARM_V7S_PTE_IS_TABLE(pte, lvl))
-		pte &= ARM_V7S_TABLE_MASK;
+		mask = ARM_V7S_TABLE_MASK;
+	else if (arm_v7s_pte_is_cont(pte, lvl))
+		mask = ARM_V7S_LVL_MASK(lvl) * ARM_V7S_CONT_PAGES;
 	else
-		pte &= ARM_V7S_LVL_MASK(lvl);
-	return phys_to_virt(pte);
+		mask = ARM_V7S_LVL_MASK(lvl);
+
+	paddr = pte & mask;
+	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB) {
+		if (pte & ARM_V7S_ATTR_MTK_PA_BIT32)
+			paddr |= BIT_ULL(32);
+		if (pte & ARM_V7S_ATTR_MTK_PA_BIT33)
+			paddr |= BIT_ULL(33);
+	}
+	return paddr;
+}
+
+static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl,
+				  struct arm_v7s_io_pgtable *data)
+{
+	return phys_to_virt(iopte_to_paddr(pte, lvl, &data->iop.cfg));
 }
 
 static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
@@ -307,9 +345,6 @@
 	if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS))
 		pte |= ARM_V7S_ATTR_NS_SECTION;
 
-	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB)
-		pte |= ARM_V7S_ATTR_MTK_4GB;
-
 	return pte;
 }
 
@@ -408,7 +443,7 @@
 	if (num_entries > 1)
 		pte = arm_v7s_pte_to_cont(pte, lvl);
 
-	pte |= paddr & ARM_V7S_LVL_MASK(lvl);
+	pte |= paddr_to_iopte(paddr, lvl, cfg);
 
 	__arm_v7s_set_pte(ptep, pte, num_entries, cfg);
 	return 0;
@@ -474,7 +509,7 @@
 	}
 
 	if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) {
-		cptep = iopte_deref(pte, lvl);
+		cptep = iopte_deref(pte, lvl, data);
 	} else if (pte) {
 		/* We require an unmap first */
 		WARN_ON(!selftest_running);
@@ -496,7 +531,9 @@
 	if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
 		return 0;
 
-	if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr)))
+	if (WARN_ON(upper_32_bits(iova)) ||
+	    WARN_ON(upper_32_bits(paddr) &&
+		    !(iop->cfg.quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB)))
 		return -ERANGE;
 
 	ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
@@ -524,7 +561,8 @@
 		arm_v7s_iopte pte = data->pgd[i];
 
 		if (ARM_V7S_PTE_IS_TABLE(pte, 1))
-			__arm_v7s_free_table(iopte_deref(pte, 1), 2, data);
+			__arm_v7s_free_table(iopte_deref(pte, 1, data),
+					     2, data);
 	}
 	__arm_v7s_free_table(data->pgd, 1, data);
 	kmem_cache_destroy(data->l2_tables);
@@ -594,7 +632,7 @@
 		if (!ARM_V7S_PTE_IS_TABLE(pte, 1))
 			return 0;
 
-		tablep = iopte_deref(pte, 1);
+		tablep = iopte_deref(pte, 1, data);
 		return __arm_v7s_unmap(data, iova, size, 2, tablep);
 	}
 
@@ -652,7 +690,7 @@
 				io_pgtable_tlb_add_flush(iop, iova, blk_size,
 					ARM_V7S_BLOCK_SIZE(lvl + 1), false);
 				io_pgtable_tlb_sync(iop);
-				ptep = iopte_deref(pte[i], lvl);
+				ptep = iopte_deref(pte[i], lvl, data);
 				__arm_v7s_free_table(ptep, lvl + 1, data);
 			} else {
 				io_pgtable_tlb_add_flush(iop, iova, blk_size,
@@ -670,7 +708,7 @@
 	}
 
 	/* Keep on walkin' */
-	ptep = iopte_deref(pte[0], lvl);
+	ptep = iopte_deref(pte[0], lvl, data);
 	return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep);
 }
 
@@ -696,7 +734,7 @@
 	do {
 		ptep += ARM_V7S_LVL_IDX(iova, ++lvl);
 		pte = READ_ONCE(*ptep);
-		ptep = iopte_deref(pte, lvl);
+		ptep = iopte_deref(pte, lvl, data);
 	} while (ARM_V7S_PTE_IS_TABLE(pte, lvl));
 
 	if (!ARM_V7S_PTE_IS_VALID(pte))
@@ -705,7 +743,7 @@
 	mask = ARM_V7S_LVL_MASK(lvl);
 	if (arm_v7s_pte_is_cont(pte, lvl))
 		mask *= ARM_V7S_CONT_PAGES;
-	return (pte & mask) | (iova & ~mask);
+	return iopte_to_paddr(pte, lvl, &data->iop.cfg) | (iova & ~mask);
 }
 
 static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 00e1c90..3e14f6a 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1963,7 +1963,7 @@
 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
 		      const struct iommu_ops *ops)
 {
-	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 
 	if (fwspec)
 		return ops == fwspec->ops ? 0 : -EINVAL;
@@ -1975,26 +1975,26 @@
 	of_node_get(to_of_node(iommu_fwnode));
 	fwspec->iommu_fwnode = iommu_fwnode;
 	fwspec->ops = ops;
-	dev->iommu_fwspec = fwspec;
+	dev_iommu_fwspec_set(dev, fwspec);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(iommu_fwspec_init);
 
 void iommu_fwspec_free(struct device *dev)
 {
-	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 
 	if (fwspec) {
 		fwnode_handle_put(fwspec->iommu_fwnode);
 		kfree(fwspec);
-		dev->iommu_fwspec = NULL;
+		dev_iommu_fwspec_set(dev, NULL);
 	}
 }
 EXPORT_SYMBOL_GPL(iommu_fwspec_free);
 
 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
 {
-	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 	size_t size;
 	int i;
 
@@ -2003,11 +2003,11 @@
 
 	size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]);
 	if (size > sizeof(*fwspec)) {
-		fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL);
+		fwspec = krealloc(fwspec, size, GFP_KERNEL);
 		if (!fwspec)
 			return -ENOMEM;
 
-		dev->iommu_fwspec = fwspec;
+		dev_iommu_fwspec_set(dev, fwspec);
 	}
 
 	for (i = 0; i < num_ids; i++)
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 8e75f34a..d8a926e 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -36,6 +36,7 @@
 #include "mtk_iommu.h"
 
 #define REG_MMU_PT_BASE_ADDR			0x000
+#define MMU_PT_ADDR_MASK			GENMASK(31, 7)
 
 #define REG_MMU_INVALIDATE			0x020
 #define F_ALL_INVLD				0x2
@@ -52,12 +53,9 @@
 #define REG_MMU_DCM_DIS				0x050
 
 #define REG_MMU_CTRL_REG			0x110
+#define F_MMU_TF_PROT_TO_PROGRAM_ADDR		(2 << 4)
 #define F_MMU_PREFETCH_RT_REPLACE_MOD		BIT(4)
-#define F_MMU_TF_PROTECT_SEL_SHIFT(data) \
-	((data)->m4u_plat == M4U_MT2712 ? 4 : 5)
-/* It's named by F_MMU_TF_PROT_SEL in mt2712. */
-#define F_MMU_TF_PROTECT_SEL(prot, data) \
-	(((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data))
+#define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173	(2 << 5)
 
 #define REG_MMU_IVRP_PADDR			0x114
 
@@ -74,26 +72,32 @@
 #define F_INT_CLR_BIT				BIT(12)
 
 #define REG_MMU_INT_MAIN_CONTROL		0x124
-#define F_INT_TRANSLATION_FAULT			BIT(0)
-#define F_INT_MAIN_MULTI_HIT_FAULT		BIT(1)
-#define F_INT_INVALID_PA_FAULT			BIT(2)
-#define F_INT_ENTRY_REPLACEMENT_FAULT		BIT(3)
-#define F_INT_TLB_MISS_FAULT			BIT(4)
-#define F_INT_MISS_TRANSACTION_FIFO_FAULT	BIT(5)
-#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT	BIT(6)
+						/* mmu0 | mmu1 */
+#define F_INT_TRANSLATION_FAULT			(BIT(0) | BIT(7))
+#define F_INT_MAIN_MULTI_HIT_FAULT		(BIT(1) | BIT(8))
+#define F_INT_INVALID_PA_FAULT			(BIT(2) | BIT(9))
+#define F_INT_ENTRY_REPLACEMENT_FAULT		(BIT(3) | BIT(10))
+#define F_INT_TLB_MISS_FAULT			(BIT(4) | BIT(11))
+#define F_INT_MISS_TRANSACTION_FIFO_FAULT	(BIT(5) | BIT(12))
+#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT	(BIT(6) | BIT(13))
 
 #define REG_MMU_CPE_DONE			0x12C
 
 #define REG_MMU_FAULT_ST1			0x134
+#define F_REG_MMU0_FAULT_MASK			GENMASK(6, 0)
+#define F_REG_MMU1_FAULT_MASK			GENMASK(13, 7)
 
-#define REG_MMU_FAULT_VA			0x13c
+#define REG_MMU0_FAULT_VA			0x13c
 #define F_MMU_FAULT_VA_WRITE_BIT		BIT(1)
 #define F_MMU_FAULT_VA_LAYER_BIT		BIT(0)
 
-#define REG_MMU_INVLD_PA			0x140
-#define REG_MMU_INT_ID				0x150
-#define F_MMU0_INT_ID_LARB_ID(a)		(((a) >> 7) & 0x7)
-#define F_MMU0_INT_ID_PORT_ID(a)		(((a) >> 2) & 0x1f)
+#define REG_MMU0_INVLD_PA			0x140
+#define REG_MMU1_FAULT_VA			0x144
+#define REG_MMU1_INVLD_PA			0x148
+#define REG_MMU0_INT_ID				0x150
+#define REG_MMU1_INT_ID				0x154
+#define F_MMU_INT_ID_LARB_ID(a)			(((a) >> 7) & 0x7)
+#define F_MMU_INT_ID_PORT_ID(a)			(((a) >> 2) & 0x1f)
 
 #define MTK_PROTECT_PA_ALIGN			128
 
@@ -113,7 +117,21 @@
 	struct iommu_domain		domain;
 };
 
-static struct iommu_ops mtk_iommu_ops;
+static const struct iommu_ops mtk_iommu_ops;
+
+/*
+ * In M4U 4GB mode, the physical address is remapped as below:
+ *  CPU PA         ->   M4U HW PA
+ *  0x4000_0000         0x1_4000_0000 (Add bit32)
+ *  0x8000_0000         0x1_8000_0000 ...
+ *  0xc000_0000         0x1_c000_0000 ...
+ *  0x1_0000_0000       0x1_0000_0000 (No change)
+ *
+ * Thus, We always add BIT32 in the iommu_map and disable BIT32 if PA is >=
+ * 0x1_4000_0000 in the iova_to_phys(From cpu point of view, the PA range is
+ * 0x4000_0000 - 0x1_3fff_ffff).
+ */
+#define MTK_IOMMU_4GB_MODE_REMAP_BASE	 0x140000000UL
 
 /*
  * In M4U 4GB mode, the physical address is remapped as below:
@@ -236,13 +254,21 @@
 
 	/* Read error info from registers */
 	int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
-	fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
+	if (int_state & F_REG_MMU0_FAULT_MASK) {
+		regval = readl_relaxed(data->base + REG_MMU0_INT_ID);
+		fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA);
+		fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA);
+	} else {
+		regval = readl_relaxed(data->base + REG_MMU1_INT_ID);
+		fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA);
+		fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA);
+	}
 	layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
 	write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
-	fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
-	regval = readl_relaxed(data->base + REG_MMU_INT_ID);
-	fault_larb = F_MMU0_INT_ID_LARB_ID(regval);
-	fault_port = F_MMU0_INT_ID_PORT_ID(regval);
+	fault_larb = F_MMU_INT_ID_LARB_ID(regval);
+	fault_port = F_MMU_INT_ID_PORT_ID(regval);
+
+	fault_larb = data->plat_data->larbid_remap[fault_larb];
 
 	if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
 			       write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
@@ -268,13 +294,13 @@
 {
 	struct mtk_smi_larb_iommu    *larb_mmu;
 	unsigned int                 larbid, portid;
-	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 	int i;
 
 	for (i = 0; i < fwspec->num_ids; ++i) {
 		larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
 		portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
-		larb_mmu = &data->smi_imu.larb_imu[larbid];
+		larb_mmu = &data->larb_imu[larbid];
 
 		dev_dbg(dev, "%s iommu port: %d\n",
 			enable ? "enable" : "disable", portid);
@@ -295,7 +321,8 @@
 	dom->cfg = (struct io_pgtable_cfg) {
 		.quirks = IO_PGTABLE_QUIRK_ARM_NS |
 			IO_PGTABLE_QUIRK_NO_PERMS |
-			IO_PGTABLE_QUIRK_TLBI_ON_MAP,
+			IO_PGTABLE_QUIRK_TLBI_ON_MAP |
+			IO_PGTABLE_QUIRK_ARM_MTK_4GB,
 		.pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
 		.ias = 32,
 		.oas = 32,
@@ -303,9 +330,6 @@
 		.iommu_dev = data->dev,
 	};
 
-	if (data->enable_4GB)
-		dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB;
-
 	dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
 	if (!dom->iop) {
 		dev_err(data->dev, "Failed to alloc io pgtable\n");
@@ -360,7 +384,7 @@
 				   struct device *dev)
 {
 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
-	struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
+	struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
 
 	if (!data)
 		return -ENODEV;
@@ -368,7 +392,7 @@
 	/* Update the pgtable base address register of the M4U HW */
 	if (!data->m4u_dom) {
 		data->m4u_dom = dom;
-		writel(dom->cfg.arm_v7s_cfg.ttbr[0],
+		writel(dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
 		       data->base + REG_MMU_PT_BASE_ADDR);
 	}
 
@@ -379,7 +403,7 @@
 static void mtk_iommu_detach_device(struct iommu_domain *domain,
 				    struct device *dev)
 {
-	struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
+	struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
 
 	if (!data)
 		return;
@@ -391,12 +415,16 @@
 			 phys_addr_t paddr, size_t size, int prot)
 {
 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+	struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
 	unsigned long flags;
 	int ret;
 
+	/* The "4GB mode" M4U physically can not use the lower remap of Dram. */
+	if (data->enable_4GB)
+		paddr |= BIT_ULL(32);
+
 	spin_lock_irqsave(&dom->pgtlock, flags);
-	ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32),
-			    size, prot);
+	ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
 	spin_unlock_irqrestore(&dom->pgtlock, flags);
 
 	return ret;
@@ -441,13 +469,14 @@
 
 static int mtk_iommu_add_device(struct device *dev)
 {
+	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 	struct mtk_iommu_data *data;
 	struct iommu_group *group;
 
-	if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
+	if (!fwspec || fwspec->ops != &mtk_iommu_ops)
 		return -ENODEV; /* Not a iommu client device */
 
-	data = dev->iommu_fwspec->iommu_priv;
+	data = fwspec->iommu_priv;
 	iommu_device_link(&data->iommu, dev);
 
 	group = iommu_group_get_for_dev(dev);
@@ -460,12 +489,13 @@
 
 static void mtk_iommu_remove_device(struct device *dev)
 {
+	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 	struct mtk_iommu_data *data;
 
-	if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
+	if (!fwspec || fwspec->ops != &mtk_iommu_ops)
 		return;
 
-	data = dev->iommu_fwspec->iommu_priv;
+	data = fwspec->iommu_priv;
 	iommu_device_unlink(&data->iommu, dev);
 
 	iommu_group_remove_device(dev);
@@ -492,6 +522,7 @@
 
 static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
 {
+	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 	struct platform_device *m4updev;
 
 	if (args->args_count != 1) {
@@ -500,19 +531,19 @@
 		return -EINVAL;
 	}
 
-	if (!dev->iommu_fwspec->iommu_priv) {
+	if (!fwspec->iommu_priv) {
 		/* Get the m4u device */
 		m4updev = of_find_device_by_node(args->np);
 		if (WARN_ON(!m4updev))
 			return -EINVAL;
 
-		dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev);
+		fwspec->iommu_priv = platform_get_drvdata(m4updev);
 	}
 
 	return iommu_fwspec_add_ids(dev, args->args, 1);
 }
 
-static struct iommu_ops mtk_iommu_ops = {
+static const struct iommu_ops mtk_iommu_ops = {
 	.domain_alloc	= mtk_iommu_domain_alloc,
 	.domain_free	= mtk_iommu_domain_free,
 	.attach_dev	= mtk_iommu_attach_device,
@@ -540,9 +571,11 @@
 		return ret;
 	}
 
-	regval = F_MMU_TF_PROTECT_SEL(2, data);
-	if (data->m4u_plat == M4U_MT8173)
-		regval |= F_MMU_PREFETCH_RT_REPLACE_MOD;
+	if (data->plat_data->m4u_plat == M4U_MT8173)
+		regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
+			 F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
+	else
+		regval = F_MMU_TF_PROT_TO_PROGRAM_ADDR;
 	writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
 
 	regval = F_L2_MULIT_HIT_EN |
@@ -562,14 +595,15 @@
 		F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
 	writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
 
-	if (data->m4u_plat == M4U_MT8173)
+	if (data->plat_data->m4u_plat == M4U_MT8173 ||
+	    data->plat_data->m4u_plat == M4U_MT8167)
 		regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
 	else
 		regval = lower_32_bits(data->protect_base) |
 			 upper_32_bits(data->protect_base);
 	writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
 
-	if (data->enable_4GB && data->m4u_plat != M4U_MT8173) {
+	if (data->enable_4GB && data->plat_data->has_vld_pa_rng) {
 		/*
 		 * If 4GB mode is enabled, the validate PA range is from
 		 * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
@@ -579,8 +613,7 @@
 	}
 	writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
 
-	/* It's MISC control register whose default value is ok except mt8173.*/
-	if (data->m4u_plat == M4U_MT8173)
+	if (data->plat_data->reset_axi)
 		writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
 
 	if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
@@ -613,7 +646,7 @@
 	if (!data)
 		return -ENOMEM;
 	data->dev = dev;
-	data->m4u_plat = (enum mtk_iommu_plat)of_device_get_match_data(dev);
+	data->plat_data = of_device_get_match_data(dev);
 
 	/* Protect memory. HW will access here while translation fault.*/
 	protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
@@ -623,6 +656,8 @@
 
 	/* Whether the current dram is over 4GB */
 	data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
+	if (!data->plat_data->has_4gb_mode)
+		data->enable_4GB = false;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	data->base = devm_ioremap_resource(dev, res);
@@ -634,15 +669,16 @@
 	if (data->irq < 0)
 		return data->irq;
 
-	data->bclk = devm_clk_get(dev, "bclk");
-	if (IS_ERR(data->bclk))
-		return PTR_ERR(data->bclk);
+	if (data->plat_data->has_bclk) {
+		data->bclk = devm_clk_get(dev, "bclk");
+		if (IS_ERR(data->bclk))
+			return PTR_ERR(data->bclk);
+	}
 
 	larb_nr = of_count_phandle_with_args(dev->of_node,
 					     "mediatek,larbs", NULL);
 	if (larb_nr < 0)
 		return larb_nr;
-	data->smi_imu.larb_nr = larb_nr;
 
 	for (i = 0; i < larb_nr; i++) {
 		struct device_node *larbnode;
@@ -653,17 +689,21 @@
 		if (!larbnode)
 			return -EINVAL;
 
-		if (!of_device_is_available(larbnode))
+		if (!of_device_is_available(larbnode)) {
+			of_node_put(larbnode);
 			continue;
+		}
 
 		ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
 		if (ret)/* The id is consecutive if there is no this property */
 			id = i;
 
 		plarbdev = of_find_device_by_node(larbnode);
-		if (!plarbdev)
+		if (!plarbdev) {
+			of_node_put(larbnode);
 			return -EPROBE_DEFER;
-		data->smi_imu.larb_imu[id].dev = &plarbdev->dev;
+		}
+		data->larb_imu[id].dev = &plarbdev->dev;
 
 		component_match_add_release(dev, &match, release_of,
 					    compare_of, larbnode);
@@ -724,6 +764,7 @@
 	reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
 	reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
 	reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
+	reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG);
 	clk_disable_unprepare(data->bclk);
 	return 0;
 }
@@ -732,6 +773,7 @@
 {
 	struct mtk_iommu_data *data = dev_get_drvdata(dev);
 	struct mtk_iommu_suspend_reg *reg = &data->reg;
+	struct mtk_iommu_domain *m4u_dom = data->m4u_dom;
 	void __iomem *base = data->base;
 	int ret;
 
@@ -747,8 +789,9 @@
 	writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
 	writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
 	writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
-	if (data->m4u_dom)
-		writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
+	writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
+	if (m4u_dom)
+		writel(m4u_dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
 		       base + REG_MMU_PT_BASE_ADDR);
 	return 0;
 }
@@ -757,9 +800,40 @@
 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
 };
 
+static const struct mtk_iommu_plat_data mt2712_data = {
+	.m4u_plat     = M4U_MT2712,
+	.has_4gb_mode = true,
+	.has_bclk     = true,
+	.has_vld_pa_rng   = true,
+	.larbid_remap = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
+};
+
+static const struct mtk_iommu_plat_data mt8167_data = {
+	.m4u_plat     = M4U_MT8167,
+	.has_4gb_mode = true,
+	.reset_axi    = true,
+	.larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */
+};
+
+static const struct mtk_iommu_plat_data mt8173_data = {
+	.m4u_plat     = M4U_MT8173,
+	.has_4gb_mode = true,
+	.has_bclk     = true,
+	.reset_axi    = true,
+	.larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */
+};
+
+static const struct mtk_iommu_plat_data mt8183_data = {
+	.m4u_plat     = M4U_MT8183,
+	.reset_axi    = true,
+	.larbid_remap = {0, 4, 5, 6, 7, 2, 3, 1},
+};
+
 static const struct of_device_id mtk_iommu_of_ids[] = {
-	{ .compatible = "mediatek,mt2712-m4u", .data = (void *)M4U_MT2712},
-	{ .compatible = "mediatek,mt8173-m4u", .data = (void *)M4U_MT8173},
+	{ .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
+	{ .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data},
+	{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
+	{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
 	{}
 };
 
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index 778498b..0638505 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -33,12 +33,26 @@
 	u32				int_control0;
 	u32				int_main_control;
 	u32				ivrp_paddr;
+	u32				vld_pa_rng;
 };
 
 enum mtk_iommu_plat {
 	M4U_MT2701,
 	M4U_MT2712,
+	M4U_MT8167,
 	M4U_MT8173,
+	M4U_MT8183,
+};
+
+struct mtk_iommu_plat_data {
+	enum mtk_iommu_plat m4u_plat;
+	bool                has_4gb_mode;
+
+	/* HW will use the EMI clock if there isn't the "bclk". */
+	bool                has_bclk;
+	bool                has_vld_pa_rng;
+	bool                reset_axi;
+	unsigned char       larbid_remap[MTK_LARB_NR_MAX];
 };
 
 struct mtk_iommu_domain;
@@ -52,14 +66,14 @@
 	struct mtk_iommu_suspend_reg	reg;
 	struct mtk_iommu_domain		*m4u_dom;
 	struct iommu_group		*m4u_group;
-	struct mtk_smi_iommu		smi_imu;      /* SMI larb iommu info */
 	bool                            enable_4GB;
 	bool				tlb_flush_active;
 
 	struct iommu_device		iommu;
-	enum mtk_iommu_plat		m4u_plat;
+	const struct mtk_iommu_plat_data *plat_data;
 
 	struct list_head		list;
+	struct mtk_smi_larb_iommu	larb_imu[MTK_LARB_NR_MAX];
 };
 
 static inline int compare_of(struct device *dev, void *data)
@@ -76,14 +90,14 @@
 {
 	struct mtk_iommu_data *data = dev_get_drvdata(dev);
 
-	return component_bind_all(dev, &data->smi_imu);
+	return component_bind_all(dev, &data->larb_imu);
 }
 
 static inline void mtk_iommu_unbind(struct device *dev)
 {
 	struct mtk_iommu_data *data = dev_get_drvdata(dev);
 
-	component_unbind_all(dev, &data->smi_imu);
+	component_unbind_all(dev, &data->larb_imu);
 }
 
 #endif
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 676c029..eb0affe 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -206,13 +206,13 @@
 {
 	struct mtk_smi_larb_iommu    *larb_mmu;
 	unsigned int                 larbid, portid;
-	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 	int i;
 
 	for (i = 0; i < fwspec->num_ids; ++i) {
 		larbid = mt2701_m4u_to_larb(fwspec->ids[i]);
 		portid = mt2701_m4u_to_port(fwspec->ids[i]);
-		larb_mmu = &data->smi_imu.larb_imu[larbid];
+		larb_mmu = &data->larb_imu[larbid];
 
 		dev_dbg(dev, "%s iommu port: %d\n",
 			enable ? "enable" : "disable", portid);
@@ -271,7 +271,7 @@
 				   struct device *dev)
 {
 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
-	struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
+	struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
 	int ret;
 
 	if (!data)
@@ -293,7 +293,7 @@
 static void mtk_iommu_detach_device(struct iommu_domain *domain,
 				    struct device *dev)
 {
-	struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
+	struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
 
 	if (!data)
 		return;
@@ -362,7 +362,7 @@
 	return pa;
 }
 
-static struct iommu_ops mtk_iommu_ops;
+static const struct iommu_ops mtk_iommu_ops;
 
 /*
  * MTK generation one iommu HW only support one iommu domain, and all the client
@@ -371,6 +371,7 @@
 static int mtk_iommu_create_mapping(struct device *dev,
 				    struct of_phandle_args *args)
 {
+	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 	struct mtk_iommu_data *data;
 	struct platform_device *m4updev;
 	struct dma_iommu_mapping *mtk_mapping;
@@ -383,28 +384,29 @@
 		return -EINVAL;
 	}
 
-	if (!dev->iommu_fwspec) {
+	if (!fwspec) {
 		ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_ops);
 		if (ret)
 			return ret;
-	} else if (dev->iommu_fwspec->ops != &mtk_iommu_ops) {
+		fwspec = dev_iommu_fwspec_get(dev);
+	} else if (dev_iommu_fwspec_get(dev)->ops != &mtk_iommu_ops) {
 		return -EINVAL;
 	}
 
-	if (!dev->iommu_fwspec->iommu_priv) {
+	if (!fwspec->iommu_priv) {
 		/* Get the m4u device */
 		m4updev = of_find_device_by_node(args->np);
 		if (WARN_ON(!m4updev))
 			return -EINVAL;
 
-		dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev);
+		fwspec->iommu_priv = platform_get_drvdata(m4updev);
 	}
 
 	ret = iommu_fwspec_add_ids(dev, args->args, 1);
 	if (ret)
 		return ret;
 
-	data = dev->iommu_fwspec->iommu_priv;
+	data = fwspec->iommu_priv;
 	m4udev = data->dev;
 	mtk_mapping = m4udev->archdata.iommu;
 	if (!mtk_mapping) {
@@ -422,6 +424,7 @@
 
 static int mtk_iommu_add_device(struct device *dev)
 {
+	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 	struct dma_iommu_mapping *mtk_mapping;
 	struct of_phandle_args iommu_spec;
 	struct of_phandle_iterator it;
@@ -440,7 +443,7 @@
 		of_node_put(iommu_spec.np);
 	}
 
-	if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
+	if (!fwspec || fwspec->ops != &mtk_iommu_ops)
 		return -ENODEV; /* Not a iommu client device */
 
 	/*
@@ -458,7 +461,7 @@
 	if (err)
 		return err;
 
-	data = dev->iommu_fwspec->iommu_priv;
+	data = fwspec->iommu_priv;
 	mtk_mapping = data->dev->archdata.iommu;
 	err = arm_iommu_attach_device(dev, mtk_mapping);
 	if (err) {
@@ -471,12 +474,13 @@
 
 static void mtk_iommu_remove_device(struct device *dev)
 {
+	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 	struct mtk_iommu_data *data;
 
-	if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
+	if (!fwspec || fwspec->ops != &mtk_iommu_ops)
 		return;
 
-	data = dev->iommu_fwspec->iommu_priv;
+	data = fwspec->iommu_priv;
 	iommu_device_unlink(&data->iommu, dev);
 
 	iommu_group_remove_device(dev);
@@ -524,7 +528,7 @@
 	return 0;
 }
 
-static struct iommu_ops mtk_iommu_ops = {
+static const struct iommu_ops mtk_iommu_ops = {
 	.domain_alloc	= mtk_iommu_domain_alloc,
 	.domain_free	= mtk_iommu_domain_free,
 	.attach_dev	= mtk_iommu_attach_device,
@@ -609,14 +613,12 @@
 			}
 		}
 
-		data->smi_imu.larb_imu[larb_nr].dev = &plarbdev->dev;
+		data->larb_imu[larb_nr].dev = &plarbdev->dev;
 		component_match_add_release(dev, &match, release_of,
 					    compare_of, larb_spec.np);
 		larb_nr++;
 	}
 
-	data->smi_imu.larb_nr = larb_nr;
-
 	platform_set_drvdata(pdev, data);
 
 	ret = mtk_iommu_hw_init(data);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 44097a3..b2d1c95 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -40,6 +40,13 @@
 
 comment "LED drivers"
 
+config LEDS_APA102
+	tristate "LED Support for Shiji APA102"
+	depends on LEDS_CLASS
+	depends on SPI
+	help
+	  This option enables support for APA102 LEDs.
+
 config LEDS_88PM860X
 	tristate "LED Support for Marvell 88PM860x PMIC"
 	depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 420b5d2..da52050 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -9,6 +9,7 @@
 # LED Platform Drivers
 obj-$(CONFIG_LEDS_88PM860X)		+= leds-88pm860x.o
 obj-$(CONFIG_LEDS_AAT1290)		+= leds-aat1290.o
+obj-$(CONFIG_LEDS_APA102)		+= leds-apa102.o
 obj-$(CONFIG_LEDS_APU)			+= leds-apu.o
 obj-$(CONFIG_LEDS_AS3645A)		+= leds-as3645a.o
 obj-$(CONFIG_LEDS_BCM6328)		+= leds-bcm6328.o
diff --git a/drivers/leds/leds-apa102.c b/drivers/leds/leds-apa102.c
new file mode 100644
index 0000000..bd36c45
--- /dev/null
+++ b/drivers/leds/leds-apa102.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+#include <uapi/linux/uleds.h>
+
+/*
+ *  APA102 SPI protocol description:
+ *  +------+----------------------------------------+------+
+ *  |START |               DATA FIELD:              | END  |
+ *  |FRAME |               N LED FRAMES             |FRAME |
+ *  +------+------+------+------+------+-----+------+------+
+ *  | 0*32 | LED1 | LED2 | LED3 | LED4 | --- | LEDN | 1*32 |
+ *  +------+------+------+------+------+-----+------+------+
+ *
+ *  +-----------------------------------+
+ *  |START FRAME 32bits                 |
+ *  +--------+--------+--------+--------+
+ *  |00000000|00000000|00000000|00000000|
+ *  +--------+--------+--------+--------+
+ *
+ *  +------------------------------------+
+ *  |LED  FRAME 32bits                   |
+ *  +---+-----+--------+--------+--------+
+ *  |111|LUMA |  BLUE  | GREEN  |  RED   |
+ *  +---+-----+--------+--------+--------+
+ *  |3b |5bits| 8bits  | 8bits  | 8bits  |
+ *  +---+-----+--------+--------+--------+
+ *  |MSB   LSB|MSB  LSB|MSB  LSB|MSB  LSB|
+ *  +---+-----+--------+--------+--------+
+ *
+ *  +-----------------------------------+
+ *  |END FRAME 32bits                   |
+ *  +--------+--------+--------+--------+
+ *  |11111111|11111111|11111111|11111111|
+ *  +--------+--------+--------+--------+
+ */
+
+/* apa102 default settings */
+#define CR_MAX_BRIGHTNESS	GENMASK(7, 0)
+#define LM_MAX_BRIGHTNESS	GENMASK(4, 0)
+#define CH_NUM			4
+#define START_BYTE		0
+#define END_BYTE		GENMASK(7, 0)
+#define LED_FRAME_HEADER	GENMASK(7, 5)
+
+enum led_channels {
+	RED,
+	GREEN,
+	BLUE,
+	LUMA,
+};
+
+struct apa102_led {
+	char			name[LED_MAX_NAME_SIZE];
+	struct apa102		*priv;
+	struct led_classdev	ldev;
+	u8			brightness;
+};
+
+struct apa102 {
+	size_t			led_count;
+	struct device		*dev;
+	struct mutex		lock;
+	struct spi_device	*spi;
+	u8			*buf;
+	struct apa102_led	leds[];
+};
+
+static int apa102_sync(struct apa102 *priv)
+{
+	int	ret;
+	size_t	i;
+	size_t	bytes = 0;
+
+	for (i = 0; i < 4; i++)
+		priv->buf[bytes++] = START_BYTE;
+
+	for (i = 0; i < priv->led_count; i++) {
+		priv->buf[bytes++] = LED_FRAME_HEADER |
+				     priv->leds[i * CH_NUM + LUMA].brightness;
+		priv->buf[bytes++] = priv->leds[i * CH_NUM + BLUE].brightness;
+		priv->buf[bytes++] = priv->leds[i * CH_NUM + GREEN].brightness;
+		priv->buf[bytes++] = priv->leds[i * CH_NUM + RED].brightness;
+	}
+
+	for (i = 0; i < 4; i++)
+		priv->buf[bytes++] = END_BYTE;
+
+	ret = spi_write(priv->spi, priv->buf, bytes);
+
+	return ret;
+}
+
+static int apa102_set_sync(struct led_classdev *ldev,
+			   enum led_brightness brightness)
+{
+	int			ret;
+	struct apa102_led	*led = container_of(ldev,
+						    struct apa102_led,
+						    ldev);
+
+	dev_dbg(led->priv->dev, "Set brightness of %s to %d\n",
+		led->name, brightness);
+
+	mutex_lock(&led->priv->lock);
+	led->brightness = (u8)brightness;
+	ret = apa102_sync(led->priv);
+	mutex_unlock(&led->priv->lock);
+
+	return ret;
+}
+
+static int apa102_probe_dt(struct apa102 *priv)
+{
+	u32			i = 0;
+	int			j = 0;
+	struct apa102_led	*led;
+	struct fwnode_handle	*child;
+	struct device_node	*np;
+	int			ret;
+	const char		*str;
+	static const char	* const rgb_name[] = {"red",
+						      "green",
+						      "blue",
+						      "luma"};
+
+	device_for_each_child_node(priv->dev, child) {
+		np = to_of_node(child);
+		/* for each physical LED, 4 LEDs are created representing
+		 * the 4 components: red, green, blue and global luma.
+		 */
+		for (j = 0; j < CH_NUM; j++) {
+			ret = fwnode_property_read_u32(child, "reg", &i);
+			if (ret)
+				return ret;
+
+			if (i >= priv->led_count)
+				return -EINVAL;
+
+			led = &priv->leds[i * CH_NUM + j];
+			ret = fwnode_property_read_string(child, "label", &str);
+			if (ret)
+				snprintf(led->name, sizeof(led->name),
+					 "apa102:%s:%d", rgb_name[j], i);
+			else
+				snprintf(led->name, sizeof(led->name),
+					 "apa102:%s:%s", rgb_name[j], str);
+
+			fwnode_property_read_string(child,
+						    "linux,default-trigger",
+						    &led->ldev.default_trigger);
+
+			led->priv			  = priv;
+			led->ldev.name			  = led->name;
+			if (j == LUMA) {
+				led->ldev.brightness	 = led->brightness
+							 = LM_MAX_BRIGHTNESS;
+				led->ldev.max_brightness = LM_MAX_BRIGHTNESS;
+			} else {
+				led->ldev.brightness	 = led->brightness
+							 = 0;
+				led->ldev.max_brightness = CR_MAX_BRIGHTNESS;
+			}
+
+			led->ldev.brightness_set_blocking = apa102_set_sync;
+
+			ret = devm_of_led_classdev_register(priv->dev, np,
+							    &led->ldev);
+			if (ret) {
+				dev_err(priv->dev,
+					"failed to register LED %s, err %d",
+					led->name, ret);
+				fwnode_handle_put(child);
+				return ret;
+			}
+
+			led->ldev.dev->of_node = np;
+
+		}
+	}
+
+	return 0;
+}
+
+static int apa102_probe(struct spi_device *spi)
+{
+	struct apa102	*priv;
+	size_t		led_count;
+	int		ret;
+
+	led_count = device_get_child_node_count(&spi->dev);
+	if (!led_count) {
+		dev_err(&spi->dev, "No LEDs defined in device tree!");
+		return -ENODEV;
+	}
+
+	priv = devm_kzalloc(&spi->dev,
+			    struct_size(priv, leds, led_count * CH_NUM),
+			    GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->buf = devm_kzalloc(&spi->dev, led_count * CH_NUM + 8, GFP_KERNEL);
+	if (!priv->buf)
+		return -ENOMEM;
+
+	mutex_init(&priv->lock);
+	priv->led_count	= led_count;
+	priv->dev	= &spi->dev;
+	priv->spi	= spi;
+
+	ret = apa102_probe_dt(priv);
+	if (ret)
+		return ret;
+
+	/* Set the LEDs with default values at start */
+	apa102_sync(priv);
+	if (ret)
+		return ret;
+
+	spi_set_drvdata(spi, priv);
+
+	return 0;
+}
+
+static int apa102_remove(struct spi_device *spi)
+{
+	struct apa102 *priv = spi_get_drvdata(spi);
+
+	mutex_destroy(&priv->lock);
+
+	return 0;
+}
+
+static const struct of_device_id apa102_dt_ids[] = {
+	{ .compatible = "shiji,apa102", },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, apa102_dt_ids);
+
+static struct spi_driver apa102_driver = {
+	.probe		= apa102_probe,
+	.remove		= apa102_remove,
+	.driver = {
+		.name		= KBUILD_MODNAME,
+		.of_match_table	= apa102_dt_ids,
+	},
+};
+
+module_spi_driver(apa102_driver);
+
+MODULE_AUTHOR("Nicolas Belin <nbelin@baylibre.com>");
+MODULE_DESCRIPTION("apa102 LED driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:apa102");
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 055c90b..8ed17b9 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -517,3 +517,73 @@
 	mutex_unlock(&con_mutex);
 }
 EXPORT_SYMBOL_GPL(mbox_controller_unregister);
+
+static void __devm_mbox_controller_unregister(struct device *dev, void *res)
+{
+	struct mbox_controller **mbox = res;
+
+	mbox_controller_unregister(*mbox);
+}
+
+static int devm_mbox_controller_match(struct device *dev, void *res, void *data)
+{
+	struct mbox_controller **mbox = res;
+
+	if (WARN_ON(!mbox || !*mbox))
+		return 0;
+
+	return *mbox == data;
+}
+
+/**
+ * devm_mbox_controller_register() - managed mbox_controller_register()
+ * @dev: device owning the mailbox controller being registered
+ * @mbox: mailbox controller being registered
+ *
+ * This function adds a device-managed resource that will make sure that the
+ * mailbox controller, which is registered using mbox_controller_register()
+ * as part of this function, will be unregistered along with the rest of
+ * device-managed resources upon driver probe failure or driver removal.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int devm_mbox_controller_register(struct device *dev,
+				  struct mbox_controller *mbox)
+{
+	struct mbox_controller **ptr;
+	int err;
+
+	ptr = devres_alloc(__devm_mbox_controller_unregister, sizeof(*ptr),
+			   GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	err = mbox_controller_register(mbox);
+	if (err < 0) {
+		devres_free(ptr);
+		return err;
+	}
+
+	devres_add(dev, ptr);
+	*ptr = mbox;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(devm_mbox_controller_register);
+
+/**
+ * devm_mbox_controller_unregister() - managed mbox_controller_unregister()
+ * @dev: device owning the mailbox controller being unregistered
+ * @mbox: mailbox controller being unregistered
+ *
+ * This function unregisters the mailbox controller and removes the device-
+ * managed resource that was set up to automatically unregister the mailbox
+ * controller on driver probe failure or driver removal. It's typically not
+ * necessary to call this function.
+ */
+void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox)
+{
+	WARN_ON(devres_release(dev, __devm_mbox_controller_unregister,
+			       devm_mbox_controller_match, mbox));
+}
+EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index f7cc29c..2c1b80d 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -17,10 +17,10 @@
 #include <linux/of_device.h>
 
 #define CMDQ_OP_CODE_MASK		(0xff << CMDQ_OP_CODE_SHIFT)
-#define CMDQ_IRQ_MASK			0xffff
 #define CMDQ_NUM_CMD(t)			(t->cmd_buf_size / CMDQ_INST_SIZE)
 
 #define CMDQ_CURR_IRQ_STATUS		0x10
+#define CMDQ_SYNC_TOKEN_UPDATE		0x68
 #define CMDQ_THR_SLOT_CYCLES		0x30
 #define CMDQ_THR_BASE			0x100
 #define CMDQ_THR_SIZE			0x80
@@ -71,6 +71,7 @@
 	void __iomem		*base;
 	u32			irq;
 	u32			thread_nr;
+	u32			irq_mask;
 	struct cmdq_thread	*thread;
 	struct clk		*clock;
 	bool			suspended;
@@ -103,8 +104,12 @@
 
 static void cmdq_init(struct cmdq *cmdq)
 {
+	int i;
+
 	WARN_ON(clk_enable(cmdq->clock) < 0);
 	writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
+	for (i = 0; i <= CMDQ_MAX_EVENT; i++)
+		writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
 	clk_disable(cmdq->clock);
 }
 
@@ -284,11 +289,11 @@
 	unsigned long irq_status, flags = 0L;
 	int bit;
 
-	irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & CMDQ_IRQ_MASK;
-	if (!(irq_status ^ CMDQ_IRQ_MASK))
+	irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask;
+	if (!(irq_status ^ cmdq->irq_mask))
 		return IRQ_NONE;
 
-	for_each_clear_bit(bit, &irq_status, fls(CMDQ_IRQ_MASK)) {
+	for_each_clear_bit(bit, &irq_status, cmdq->thread_nr) {
 		struct cmdq_thread *thread = &cmdq->thread[bit];
 
 		spin_lock_irqsave(&thread->chan->lock, flags);
@@ -337,17 +342,8 @@
 {
 	struct cmdq *cmdq = platform_get_drvdata(pdev);
 
-	mbox_controller_unregister(&cmdq->mbox);
 	clk_unprepare(cmdq->clock);
 
-	if (cmdq->mbox.chans)
-		devm_kfree(&pdev->dev, cmdq->mbox.chans);
-
-	if (cmdq->thread)
-		devm_kfree(&pdev->dev, cmdq->thread);
-
-	devm_kfree(&pdev->dev, cmdq);
-
 	return 0;
 }
 
@@ -481,6 +477,9 @@
 		dev_err(dev, "failed to get irq\n");
 		return -EINVAL;
 	}
+
+	cmdq->thread_nr = (u32)(unsigned long)of_device_get_match_data(dev);
+	cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0);
 	err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
 			       "mtk_cmdq", cmdq);
 	if (err < 0) {
@@ -497,7 +496,6 @@
 		return PTR_ERR(cmdq->clock);
 	}
 
-	cmdq->thread_nr = (u32)(unsigned long)of_device_get_match_data(dev);
 	cmdq->mbox.dev = dev;
 	cmdq->mbox.chans = devm_kcalloc(dev, cmdq->thread_nr,
 					sizeof(*cmdq->mbox.chans), GFP_KERNEL);
@@ -524,7 +522,7 @@
 		cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
 	}
 
-	err = mbox_controller_register(&cmdq->mbox);
+	err = devm_mbox_controller_register(dev, &cmdq->mbox);
 	if (err < 0) {
 		dev_err(dev, "failed to register mailbox: %d\n", err);
 		return err;
@@ -545,6 +543,7 @@
 
 static const struct of_device_id cmdq_of_ids[] = {
 	{.compatible = "mediatek,mt8173-gce", .data = (void *)16},
+	{.compatible = "mediatek,mt8183-gce", .data = (void *)24},
 	{}
 };
 
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
index 03aba03..b331cff 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
@@ -25,6 +25,7 @@
 static const char * const mtk_mdp_comp_stem[MTK_MDP_COMP_TYPE_MAX] = {
 	"mdp_rdma",
 	"mdp_rsz",
+	"mdp_tdshp",
 	"mdp_wdma",
 	"mdp_wrot",
 };
@@ -40,6 +41,7 @@
 	{ MTK_MDP_RSZ,	0 },
 	{ MTK_MDP_RSZ,	1 },
 	{ MTK_MDP_RSZ,	2 },
+	{ MTK_MDP_TDSHP, 0 },
 	{ MTK_MDP_WDMA,	0 },
 	{ MTK_MDP_WROT,	0 },
 	{ MTK_MDP_WROT,	1 },
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.h b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.h
index 63b3983..caedd36 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.h
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.h
@@ -19,12 +19,14 @@
  * enum mtk_mdp_comp_type - the MDP component
  * @MTK_MDP_RDMA:	Read DMA
  * @MTK_MDP_RSZ:	Riszer
+ * @MTK_MDP_TDSHP:	2D sharpness
  * @MTK_MDP_WDMA:	Write DMA
  * @MTK_MDP_WROT:	Write DMA with rotation
  */
 enum mtk_mdp_comp_type {
 	MTK_MDP_RDMA,
 	MTK_MDP_RSZ,
+	MTK_MDP_TDSHP,
 	MTK_MDP_WDMA,
 	MTK_MDP_WROT,
 	MTK_MDP_COMP_TYPE_MAX,
@@ -36,6 +38,7 @@
 	MTK_MDP_COMP_RSZ0,
 	MTK_MDP_COMP_RSZ1,
 	MTK_MDP_COMP_RSZ2,
+	MTK_MDP_COMP_TDSHP0,
 	MTK_MDP_COMP_WDMA,
 	MTK_MDP_COMP_WROT0,
 	MTK_MDP_COMP_WROT1,
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
index 3deb054..f2a0bcd 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
@@ -39,6 +39,21 @@
 
 static const struct of_device_id mtk_mdp_comp_dt_ids[] = {
 	{
+		.compatible = "mediatek,mt8167-mdp-rdma",
+		.data = (void *)MTK_MDP_RDMA
+	}, {
+		.compatible = "mediatek,mt8167-mdp-rsz",
+		.data = (void *)MTK_MDP_RSZ
+	}, {
+		.compatible = "mediatek,mt8167-mdp-wdma",
+		.data = (void *)MTK_MDP_WDMA
+	}, {
+		.compatible = "mediatek,mt8167-mdp-wrot",
+		.data = (void *)MTK_MDP_WROT
+	}, {
+		.compatible = "mediatek,mt8167-mdp-tdshp",
+		.data = (void *)MTK_MDP_TDSHP
+	}, {
 		.compatible = "mediatek,mt8173-mdp-rdma",
 		.data = (void *)MTK_MDP_RDMA
 	}, {
@@ -56,6 +71,7 @@
 
 static const struct of_device_id mtk_mdp_of_ids[] = {
 	{ .compatible = "mediatek,mt8173-mdp", },
+	{ .compatible = "mediatek,mt8167-mdp", },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, mtk_mdp_of_ids);
@@ -66,7 +82,8 @@
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(mdp->comp); i++)
-		mtk_mdp_comp_clock_on(dev, mdp->comp[i]);
+		if (mdp->comp[i])
+			mtk_mdp_comp_clock_on(dev, mdp->comp[i]);
 }
 
 static void mtk_mdp_clock_off(struct mtk_mdp_dev *mdp)
@@ -75,7 +92,8 @@
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(mdp->comp); i++)
-		mtk_mdp_comp_clock_off(dev, mdp->comp[i]);
+		if (mdp->comp[i])
+			mtk_mdp_comp_clock_off(dev, mdp->comp[i]);
 }
 
 static void mtk_mdp_wdt_worker(struct work_struct *work)
@@ -99,13 +117,45 @@
 	queue_work(mdp->wdt_wq, &mdp->wdt_work);
 }
 
+static const struct of_device_id mtk_mdp_comp_of_match[] = {
+	{ .compatible = "mediatek,mt8167-mdp-wdma" },
+	{ .compatible = "mediatek,mt8167-mdp-wrot" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mtk_mdp_comp_of_match);
+
+struct platform_driver mtk_mdp_comp = {
+	.driver		= {
+		.name	= "mediatek-mdp-comp",
+		.owner	= THIS_MODULE,
+		.of_match_table = mtk_mdp_comp_of_match,
+	},
+};
+
 static int mtk_mdp_probe(struct platform_device *pdev)
 {
 	struct mtk_mdp_dev *mdp;
 	struct device *dev = &pdev->dev;
 	struct device_node *node, *parent;
+	struct platform_device *cmdq_dev;
 	int i, ret = 0;
 
+	//mtk_mdp_dbg_level = 3;
+
+	/* Check whether cmdq driver is ready */
+	node = of_parse_phandle(dev->of_node, "mediatek,gce", 0);
+	if (!node) {
+		dev_err(dev, "cannot get gce node handle\n");
+		return -EINVAL;
+	}
+
+	cmdq_dev = of_find_device_by_node(node);
+	if (!cmdq_dev || !cmdq_dev->dev.driver) {
+		dev_err(dev, "Waiting cmdq driver ready...\n");
+		of_node_put(node);
+		return -EPROBE_DEFER;
+	}
+
 	mdp = devm_kzalloc(dev, sizeof(*mdp), GFP_KERNEL);
 	if (!mdp)
 		return -ENOMEM;
@@ -164,6 +214,8 @@
 			goto err_comp;
 	}
 
+	platform_driver_register(&mtk_mdp_comp);
+
 	mdp->job_wq = create_singlethread_workqueue(MTK_MDP_MODULE_NAME);
 	if (!mdp->job_wq) {
 		dev_err(&pdev->dev, "unable to alloc job workqueue\n");
@@ -201,6 +253,9 @@
 	vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
 
 	pm_runtime_enable(dev);
+
+	mdp->cmdq_client = cmdq_mbox_create(dev, 0, CMDQ_NO_TIMEOUT);
+
 	dev_dbg(dev, "mdp-%d registered successfully\n", mdp->id);
 
 	return 0;
@@ -240,6 +295,8 @@
 	for (i = 0; i < ARRAY_SIZE(mdp->comp); i++)
 		mtk_mdp_comp_deinit(&pdev->dev, mdp->comp[i]);
 
+	cmdq_mbox_destroy(mdp->cmdq_client);
+
 	dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
 	return 0;
 }
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.h b/drivers/media/platform/mtk-mdp/mtk_mdp_core.h
index ad1cff3..b21acc6 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.h
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.h
@@ -22,6 +22,7 @@
 #include <media/v4l2-mem2mem.h>
 #include <media/videobuf2-core.h>
 #include <media/videobuf2-dma-contig.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
 
 #include "mtk_mdp_vpu.h"
 #include "mtk_mdp_comp.h"
@@ -95,6 +96,8 @@
 	struct v4l2_ctrl *hflip;
 	struct v4l2_ctrl *vflip;
 	struct v4l2_ctrl *global_alpha;
+	struct v4l2_ctrl *sharpness;
+	struct v4l2_ctrl *contrast_auto;
 };
 
 /**
@@ -175,6 +178,7 @@
 	unsigned long			id_counter;
 	struct workqueue_struct		*wdt_wq;
 	struct work_struct		wdt_work;
+	struct cmdq_client		*cmdq_client;
 };
 
 /**
@@ -213,6 +217,8 @@
 	int				rotation;
 	u32				hflip:1;
 	u32				vflip:1;
+	u32				contrast_auto:1;
+	int				sharpness;
 	struct mtk_mdp_dev		*mdp_dev;
 	struct v4l2_m2m_ctx		*m2m_ctx;
 	struct v4l2_fh			fh;
@@ -227,6 +233,7 @@
 	struct mtk_mdp_vpu		vpu;
 	struct mutex			slock;
 	struct work_struct		work;
+	struct cmdq_pkt			*cmdq_handle;
 };
 
 extern int mtk_mdp_dbg_level;
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h b/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h
index 78e2cc0..afc49ce 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h
@@ -22,10 +22,12 @@
 	AP_MDP_INIT		= 0xd000,
 	AP_MDP_DEINIT		= 0xd001,
 	AP_MDP_PROCESS		= 0xd002,
+	AP_MDP_CMDQ_DONE	= 0xd003,
 
 	VPU_MDP_INIT_ACK	= 0xe000,
 	VPU_MDP_DEINIT_ACK	= 0xe001,
-	VPU_MDP_PROCESS_ACK	= 0xe002
+	VPU_MDP_PROCESS_ACK	= 0xe002,
+	VPU_MDP_CMDQ_DONE_ACK	= 0xe003
 };
 
 #pragma pack(push, 4)
@@ -53,7 +55,7 @@
 	uint32_t msg_id;
 	uint32_t ipi_id;
 	uint64_t ap_inst;
-	uint32_t vpu_inst_addr;
+	uint64_t vpu_inst_addr;
 };
 
 /**
@@ -68,7 +70,7 @@
 	uint32_t msg_id;
 	uint32_t ipi_id;
 	uint64_t ap_inst;
-	uint32_t vpu_inst_addr;
+	uint64_t vpu_inst_addr;
 	int32_t status;
 };
 
@@ -78,26 +80,24 @@
  * @y        : top
  * @w        : width
  * @h        : height
- * @w_stride : bytes in horizontal
- * @h_stride : bytes in vertical
  * @crop_x   : cropped left
  * @crop_y   : cropped top
  * @crop_w   : cropped width
  * @crop_h   : cropped height
  * @format   : color format
+ * @pitch    : bytes per line for each plane
  */
 struct mdp_config {
 	int32_t x;
 	int32_t y;
 	int32_t w;
 	int32_t h;
-	int32_t w_stride;
-	int32_t h_stride;
 	int32_t crop_x;
 	int32_t crop_y;
 	int32_t crop_w;
 	int32_t crop_h;
 	int32_t format;
+	uint32_t pitch[MTK_MDP_MAX_NUM_PLANE];
 };
 
 struct mdp_buffer {
@@ -113,12 +113,54 @@
 	int32_t alpha; /* global alpha */
 };
 
+/**
+ * struct mdp_cmdq_info - command queue information
+ * @engine_flag      : bit flag of engines used.
+ * @vpu_buf_addr     : pointer to instruction buffer in vpu.
+ *                          This must point to an 64-bit aligned uint32_t array.
+ * @ap_buf_addr      : pointer to instruction buffer in ap.
+ * @ap_buf_pa        : physical address of instruction buffer in ap.
+ * @buf_size         : size of buffer, in bytes.
+ * @cmd_offset       : offset of real instruction start pointer relative to
+ *                     buf_addr.
+ * @cmd_size         : used size, in bytes.
+ * @regr_count       : number of requesting to read register values
+ * @regr_addr_offset : offset of read registers addresse relative to buf_addr.
+ * @regr_val_offset  : offset of read back registers values address relative to
+ *                     buf_addr.
+ */
+struct mdp_cmdq_info {
+	uint64_t engine_flag;
+	uint64_t vpu_buf_addr;
+	uint64_t ap_buf_addr;
+	uint64_t ap_buf_pa;
+	uint32_t buf_size;
+	uint32_t cmd_offset;
+	uint32_t cmd_size;
+	uint32_t reserved;
+};
+
+/**
+ * struct mdp_pq_info - picture quality information
+ * @sharpness_enable : sharpness enable.
+ * @sharpness_level  : sharpness level.
+ * @dc_enable        : dynamic contrast enable.
+ */
+struct mdp_pq_info {
+	uint32_t sharpness_enable;
+	uint32_t sharpness_level;
+	uint32_t dynamic_contrast_enable;
+	uint32_t reserved;
+};
+
 struct mdp_process_vsi {
 	struct mdp_config src_config;
 	struct mdp_buffer src_buffer;
 	struct mdp_config dst_config;
 	struct mdp_buffer dst_buffer;
 	struct mdp_config_misc misc;
+	struct mdp_cmdq_info cmdq;
+	struct mdp_pq_info pq;
 };
 
 #pragma pack(pop)
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
index ceffc31..f1403d6 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
@@ -27,6 +27,7 @@
 #include "mtk_mdp_regs.h"
 #include "mtk_vpu.h"
 
+#define V4L2_CID_MTK_MDP_CONTRAST_AUTO (V4L2_CID_USER_BASE | 0x1000)
 
 /**
  *  struct mtk_mdp_pix_limit - image pixel size limits
@@ -57,7 +58,7 @@
 	{
 		.pixelformat	= V4L2_PIX_FMT_MT21C,
 		.depth		= { 8, 4 },
-		.row_depth	= { 8, 8 },
+		.row_depth	= { 8, 4 },
 		.num_planes	= 2,
 		.num_comp	= 2,
 		.align		= &mtk_mdp_size_align,
@@ -65,15 +66,47 @@
 	}, {
 		.pixelformat	= V4L2_PIX_FMT_NV12M,
 		.depth		= { 8, 4 },
-		.row_depth	= { 8, 8 },
+		.row_depth	= { 8, 4 },
 		.num_planes	= 2,
 		.num_comp	= 2,
 		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
 				  MTK_MDP_FMT_FLAG_CAPTURE,
 	}, {
+		.pixelformat	= V4L2_PIX_FMT_NV12,
+		.depth		= { 12 },
+		.row_depth	= { 8, 4 },
+		.num_planes	= 1,
+		.num_comp	= 2,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_NV21M,
+		.depth		= { 8, 4 },
+		.row_depth	= { 8, 4 },
+		.num_planes	= 2,
+		.num_comp	= 2,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_NV21,
+		.depth		= { 12 },
+		.row_depth	= { 8, 4 },
+		.num_planes	= 1,
+		.num_comp	= 2,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_YVU420M,
+		.depth		= { 8, 2, 2 },
+		.row_depth	= { 8, 2, 2 },
+		.num_planes	= 3,
+		.num_comp	= 3,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
 		.pixelformat	= V4L2_PIX_FMT_YUV420M,
 		.depth		= { 8, 2, 2 },
-		.row_depth	= { 8, 4, 4 },
+		.row_depth	= { 8, 2, 2 },
 		.num_planes	= 3,
 		.num_comp	= 3,
 		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
@@ -81,11 +114,131 @@
 	}, {
 		.pixelformat	= V4L2_PIX_FMT_YVU420,
 		.depth		= { 12 },
-		.row_depth	= { 8 },
+		.row_depth	= { 8, 2, 2 },
 		.num_planes	= 1,
 		.num_comp	= 3,
 		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
 				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_YUV420,
+		.depth		= { 12 },
+		.row_depth	= { 8, 2, 2 },
+		.num_planes	= 1,
+		.num_comp	= 3,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_YUV422P,
+		.depth		= { 16 },
+		.row_depth	= { 8, 4, 4 },
+		.num_planes	= 1,
+		.num_comp	= 3,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_NV16,
+		.depth		= { 16 },
+		.row_depth	= { 8, 8 },
+		.num_planes	= 1,
+		.num_comp	= 2,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_NV16M,
+		.depth		= { 8, 8 },
+		.row_depth	= { 8, 8 },
+		.num_planes	= 2,
+		.num_comp	= 2,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_YUYV,
+		.depth		= { 16 },
+		.row_depth	= { 16 },
+		.num_planes	= 1,
+		.num_comp	= 1,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_UYVY,
+		.depth		= { 16 },
+		.row_depth	= { 16 },
+		.num_planes	= 1,
+		.num_comp	= 1,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_YVYU,
+		.depth		= { 16 },
+		.row_depth	= { 16 },
+		.num_planes	= 1,
+		.num_comp	= 1,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_VYUY,
+		.depth		= { 16 },
+		.row_depth	= { 16 },
+		.num_planes	= 1,
+		.num_comp	= 1,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_ARGB32,
+		.depth		= { 32 },
+		.row_depth	= { 32 },
+		.num_planes	= 1,
+		.num_comp	= 1,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_ABGR32,
+		.depth		= { 32 },
+		.row_depth	= { 32 },
+		.num_planes	= 1,
+		.num_comp	= 1,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_XRGB32,
+		.depth		= { 32 },
+		.row_depth	= { 32 },
+		.num_planes	= 1,
+		.num_comp	= 1,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_XBGR32,
+		.depth		= { 32 },
+		.row_depth	= { 32 },
+		.num_planes	= 1,
+		.num_comp	= 1,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_RGB565,
+		.depth		= { 16 },
+		.row_depth	= { 16 },
+		.num_planes	= 1,
+		.num_comp	= 1,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_RGB24,
+		.depth		= { 24 },
+		.row_depth	= { 24 },
+		.num_planes	= 1,
+		.num_comp	= 1,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_BGR24,
+		.depth		= { 24 },
+		.row_depth	= { 24 },
+		.num_planes	= 1,
+		.num_comp	= 1,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
 	}
 };
 
@@ -448,26 +601,54 @@
 				 struct mtk_mdp_addr *addr)
 {
 	u32 pix_size, planes, i;
+	u32 pix_size_tmp;
 
 	pix_size = frame->width * frame->height;
 	planes = min_t(u32, frame->fmt->num_planes, ARRAY_SIZE(addr->addr));
 	for (i = 0; i < planes; i++)
 		addr->addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
 
-	if (planes == 1) {
-		if (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) {
+	if (planes == 1 && frame->fmt->num_comp > 1) {
+		pix_size_tmp = pix_size;
+		if (frame->width < frame->pitch[0])
+			pix_size = frame->pitch[0]*frame->height;
+
+		if ((frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) ||
+			(frame->fmt->pixelformat == V4L2_PIX_FMT_YUV420)) {
 			addr->addr[1] = (dma_addr_t)(addr->addr[0] + pix_size);
 			addr->addr[2] = (dma_addr_t)(addr->addr[1] +
 					(pix_size >> 2));
+			frame->pitch[1] = frame->pitch[0]>>1;
+			frame->pitch[2] = frame->pitch[0]>>1;
+		} else if (frame->fmt->pixelformat == V4L2_PIX_FMT_YUV422P) {
+			addr->addr[1] = (dma_addr_t)(addr->addr[0] + pix_size);
+			addr->addr[2] = (dma_addr_t)(addr->addr[1] +
+					(pix_size >> 1));
+			frame->pitch[1] = frame->pitch[0]>>1;
+			frame->pitch[2] = frame->pitch[0]>>1;
+		} else if (frame->fmt->pixelformat == V4L2_PIX_FMT_NV12 ||
+			frame->fmt->pixelformat == V4L2_PIX_FMT_NV21) {
+			addr->addr[1] = (dma_addr_t)(addr->addr[0] + pix_size);
+			addr->addr[2] = 0;
+			frame->pitch[1] = frame->pitch[0];
+			frame->pitch[2] = 0;
+		} else if (frame->fmt->pixelformat == V4L2_PIX_FMT_NV16) {
+			addr->addr[1] = (dma_addr_t)(addr->addr[0] + pix_size);
+			addr->addr[2] = 0;
+			frame->pitch[1] = frame->pitch[0];
+			frame->pitch[2] = 0;
 		} else {
 			dev_err(&ctx->mdp_dev->pdev->dev,
 				"Invalid pixelformat:0x%x\n",
 				frame->fmt->pixelformat);
 		}
+
+		pix_size = pix_size_tmp;
 	}
-	mtk_mdp_dbg(3, "[%d] planes:%d, size:%d, addr:%p,%p,%p",
-		    ctx->id, planes, pix_size, (void *)addr->addr[0],
-		    (void *)addr->addr[1], (void *)addr->addr[2]);
+	mtk_mdp_dbg(3, "[%d] planes:%d, size%u, pitch:%u,%u,%u, addr:%p,%p,%p",
+		ctx->id, planes, pix_size, frame->pitch[0], frame->pitch[1],
+		frame->pitch[2], (void *)addr->addr[0],
+		(void *)addr->addr[1], (void *)addr->addr[2]);
 }
 
 static void mtk_mdp_m2m_get_bufs(struct mtk_mdp_ctx *ctx)
@@ -543,6 +724,8 @@
 	mtk_mdp_hw_set_rotation(ctx);
 	mtk_mdp_hw_set_global_alpha(ctx);
 
+	mtk_mdp_hw_set_pq_info(ctx);
+
 	ret = mtk_mdp_vpu_process(&ctx->vpu);
 	if (ret) {
 		dev_err(&mdp->pdev->dev, "processing failed: %d", ret);
@@ -761,6 +944,11 @@
 			mtk_mdp_ctx_state_lock_clear(ctx, MTK_MDP_SRC_FMT);
 		else
 			mtk_mdp_ctx_state_lock_clear(ctx, MTK_MDP_DST_FMT);
+	} else {
+		if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+			mtk_mdp_ctx_state_lock_set(ctx, MTK_MDP_SRC_FMT);
+		else
+			mtk_mdp_ctx_state_lock_set(ctx, MTK_MDP_DST_FMT);
 	}
 
 	return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
@@ -1034,6 +1222,12 @@
 	case V4L2_CID_ALPHA_COMPONENT:
 		ctx->d_frame.alpha = ctrl->val;
 		break;
+	case V4L2_CID_SHARPNESS:
+		ctx->sharpness = ctrl->val;
+		break;
+	case V4L2_CID_MTK_MDP_CONTRAST_AUTO:
+		ctx->contrast_auto = ctrl->val;
+		break;
 	}
 
 	return 0;
@@ -1043,6 +1237,16 @@
 	.s_ctrl = mtk_mdp_s_ctrl,
 };
 
+static const struct v4l2_ctrl_config mtk_mdp_ctrl_contrast_auto_config = {
+	.ops = &mtk_mdp_ctrl_ops,
+	.id = V4L2_CID_MTK_MDP_CONTRAST_AUTO,
+	.name = "Contrast, Automatic",
+	.type = V4L2_CTRL_TYPE_BOOLEAN,
+	.min = 0,
+	.max = 1,
+	.step = 1,
+};
+
 static int mtk_mdp_ctrls_create(struct mtk_mdp_ctx *ctx)
 {
 	v4l2_ctrl_handler_init(&ctx->ctrl_handler, MTK_MDP_MAX_CTRL_NUM);
@@ -1061,6 +1265,13 @@
 						    &mtk_mdp_ctrl_ops,
 						    V4L2_CID_ALPHA_COMPONENT,
 						    0, 255, 1, 0);
+	ctx->ctrls.sharpness = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+						 &mtk_mdp_ctrl_ops,
+						 V4L2_CID_SHARPNESS,
+						 0, 4096, 1, 0);
+	ctx->ctrls.contrast_auto = v4l2_ctrl_new_custom(&ctx->ctrl_handler,
+					&mtk_mdp_ctrl_contrast_auto_config,
+					NULL);
 	ctx->ctrls_rdy = ctx->ctrl_handler.error == 0;
 
 	if (ctx->ctrl_handler.error) {
@@ -1156,12 +1367,20 @@
 	}
 
 	list_add(&ctx->list, &mdp->ctx_list);
+
+	ctx->cmdq_handle = cmdq_pkt_create(mdp->cmdq_client, PAGE_SIZE);
+	if (IS_ERR_OR_NULL(ctx->cmdq_handle)) {
+		dev_err(&mdp->pdev->dev, "Create cmdq ptk failed %d\n", ret);
+		goto err_create_cmdq_pkt;
+	}
+
 	mutex_unlock(&mdp->lock);
 
 	mtk_mdp_dbg(0, "%s [%d]", dev_name(&mdp->pdev->dev), ctx->id);
 
 	return 0;
 
+err_create_cmdq_pkt:
 err_load_vpu:
 	mdp->ctx_num--;
 	v4l2_m2m_ctx_release(ctx->m2m_ctx);
@@ -1191,6 +1410,7 @@
 	mtk_mdp_vpu_deinit(&ctx->vpu);
 	mdp->ctx_num--;
 	list_del_init(&ctx->list);
+	cmdq_pkt_destroy(ctx->cmdq_handle);
 
 	mtk_mdp_dbg(0, "%s [%d]", dev_name(&mdp->pdev->dev), ctx->id);
 
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_regs.c b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.c
index 86d57f3..135d3ae 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_regs.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.c
@@ -17,40 +17,57 @@
 
 #include "mtk_mdp_core.h"
 #include "mtk_mdp_regs.h"
-
-
-#define MDP_COLORFMT_PACK(VIDEO, PLANE, COPLANE, HF, VF, BITS, GROUP, SWAP, ID)\
-	(((VIDEO) << 27) | ((PLANE) << 24) | ((COPLANE) << 22) |\
-	((HF) << 20) | ((VF) << 18) | ((BITS) << 8) | ((GROUP) << 6) |\
-	((SWAP) << 5) | ((ID) << 0))
-
-enum MDP_COLOR_ENUM {
-	MDP_COLOR_UNKNOWN = 0,
-	MDP_COLOR_NV12 = MDP_COLORFMT_PACK(0, 2, 1, 1, 1, 8, 1, 0, 12),
-	MDP_COLOR_I420 = MDP_COLORFMT_PACK(0, 3, 0, 1, 1, 8, 1, 0, 8),
-	MDP_COLOR_YV12 = MDP_COLORFMT_PACK(0, 3, 0, 1, 1, 8, 1, 1, 8),
-	/* Mediatek proprietary format */
-	MDP_COLOR_420_MT21 = MDP_COLORFMT_PACK(5, 2, 1, 1, 1, 256, 1, 0, 12),
-};
+#include "mtk_mdp_type.h"
 
 static int32_t mtk_mdp_map_color_format(int v4l2_format)
 {
 	switch (v4l2_format) {
 	case V4L2_PIX_FMT_NV12M:
 	case V4L2_PIX_FMT_NV12:
-		return MDP_COLOR_NV12;
+		return DP_COLOR_NV12;
+	case V4L2_PIX_FMT_NV21M:
+	case V4L2_PIX_FMT_NV21:
+		return DP_COLOR_NV21;
 	case V4L2_PIX_FMT_MT21C:
-		return MDP_COLOR_420_MT21;
+		return DP_COLOR_420_BLKP_UFO;
 	case V4L2_PIX_FMT_YUV420M:
 	case V4L2_PIX_FMT_YUV420:
-		return MDP_COLOR_I420;
+		return DP_COLOR_I420;
+	case V4L2_PIX_FMT_YVU420M:
 	case V4L2_PIX_FMT_YVU420:
-		return MDP_COLOR_YV12;
+		return DP_COLOR_YV12;
+	case V4L2_PIX_FMT_YUV422P:
+		return DP_COLOR_I422;
+	case V4L2_PIX_FMT_NV16:
+	case V4L2_PIX_FMT_NV16M:
+		return DP_COLOR_NV16;
+	case V4L2_PIX_FMT_YUYV:
+		return DP_COLOR_YUYV;
+	case V4L2_PIX_FMT_UYVY:
+		return DP_COLOR_UYVY;
+	case V4L2_PIX_FMT_YVYU:
+		return DP_COLOR_YVYU;
+	case V4L2_PIX_FMT_VYUY:
+		return DP_COLOR_VYUY;
+	case V4L2_PIX_FMT_ARGB32:
+		return DP_COLOR_RGBA8888;
+	case V4L2_PIX_FMT_ABGR32:
+		return DP_COLOR_BGRA8888;
+	case V4L2_PIX_FMT_XRGB32:
+		return DP_COLOR_RGBA8888;
+	case V4L2_PIX_FMT_XBGR32:
+		return DP_COLOR_BGRA8888;
+	case V4L2_PIX_FMT_RGB565:
+		return DP_COLOR_RGB565;
+	case V4L2_PIX_FMT_RGB24:
+		return DP_COLOR_RGB888;
+	case V4L2_PIX_FMT_BGR24:
+		return DP_COLOR_BGR888;
 	}
 
 	mtk_mdp_err("Unknown format 0x%x", v4l2_format);
 
-	return MDP_COLOR_UNKNOWN;
+	return DP_COLOR_UNKNOWN;
 }
 
 void mtk_mdp_hw_set_input_addr(struct mtk_mdp_ctx *ctx,
@@ -95,18 +112,36 @@
 
 void mtk_mdp_hw_set_in_image_format(struct mtk_mdp_ctx *ctx)
 {
-	unsigned int i;
+	unsigned int i, num_comp;
 	struct mtk_mdp_frame *frame = &ctx->s_frame;
 	struct mdp_config *config = &ctx->vpu.vsi->src_config;
 	struct mdp_buffer *src_buf = &ctx->vpu.vsi->src_buffer;
 
-	src_buf->plane_num = frame->fmt->num_comp;
+	num_comp = frame->fmt->num_comp;
+	src_buf->plane_num = num_comp;
 	config->format = mtk_mdp_map_color_format(frame->fmt->pixelformat);
-	config->w_stride = 0; /* MDP will calculate it by color format. */
-	config->h_stride = 0; /* MDP will calculate it by color format. */
-
-	for (i = 0; i < src_buf->plane_num; i++)
-		src_buf->plane_size[i] = frame->payload[i];
+	if (frame->fmt->num_planes == 1 && num_comp > 1) {
+		config->pitch[0] = frame->pitch[0];
+		config->pitch[1] = frame->pitch[1];
+		config->pitch[2] = frame->pitch[2];
+		src_buf->plane_size[0] =
+			(int32_t)(frame->addr.addr[1] - frame->addr.addr[0]);
+		if (num_comp > 2) {
+			src_buf->plane_size[1] =
+				(int32_t)(frame->addr.addr[2] -
+				frame->addr.addr[1]);
+			src_buf->plane_size[2] = frame->payload[0] -
+				src_buf->plane_size[0] -
+				src_buf->plane_size[1];
+		} else
+			src_buf->plane_size[1] = frame->payload[0] -
+				src_buf->plane_size[0];
+	} else {
+		for (i = 0; i < src_buf->plane_num; i++) {
+			config->pitch[i] = frame->pitch[i];
+			src_buf->plane_size[i] = (int32_t)frame->payload[i];
+		}
+	}
 }
 
 void mtk_mdp_hw_set_out_size(struct mtk_mdp_ctx *ctx)
@@ -126,17 +161,36 @@
 
 void mtk_mdp_hw_set_out_image_format(struct mtk_mdp_ctx *ctx)
 {
-	unsigned int i;
+	unsigned int i, num_comp;
 	struct mtk_mdp_frame *frame = &ctx->d_frame;
 	struct mdp_config *config = &ctx->vpu.vsi->dst_config;
 	struct mdp_buffer *dst_buf = &ctx->vpu.vsi->dst_buffer;
 
-	dst_buf->plane_num = frame->fmt->num_comp;
+	num_comp = frame->fmt->num_comp;
+	dst_buf->plane_num = num_comp;
 	config->format = mtk_mdp_map_color_format(frame->fmt->pixelformat);
-	config->w_stride = 0; /* MDP will calculate it by color format. */
-	config->h_stride = 0; /* MDP will calculate it by color format. */
-	for (i = 0; i < dst_buf->plane_num; i++)
-		dst_buf->plane_size[i] = frame->payload[i];
+	if (frame->fmt->num_planes == 1 && num_comp > 1) {
+		config->pitch[0] = frame->pitch[0];
+		config->pitch[1] = frame->pitch[1];
+		config->pitch[2] = frame->pitch[2];
+		dst_buf->plane_size[0] =
+			(int32_t)(frame->addr.addr[1] - frame->addr.addr[0]);
+		if (num_comp > 2) {
+			dst_buf->plane_size[1] =
+				(int32_t)(frame->addr.addr[2] -
+				frame->addr.addr[1]);
+			dst_buf->plane_size[2] = frame->payload[0] -
+				dst_buf->plane_size[0] -
+				dst_buf->plane_size[1];
+		} else
+			dst_buf->plane_size[1] = frame->payload[0] -
+				dst_buf->plane_size[0];
+	} else {
+		for (i = 0; i < dst_buf->plane_num; i++) {
+			config->pitch[i] = frame->pitch[i];
+			dst_buf->plane_size[i] = (int32_t)frame->payload[i];
+		}
+	}
 }
 
 void mtk_mdp_hw_set_rotation(struct mtk_mdp_ctx *ctx)
@@ -154,3 +208,12 @@
 
 	misc->alpha = ctx->ctrls.global_alpha->val;
 }
+
+void mtk_mdp_hw_set_pq_info(struct mtk_mdp_ctx *ctx)
+{
+	struct mdp_pq_info *vsi_pq = &ctx->vpu.vsi->pq;
+
+	vsi_pq->sharpness_enable = !!ctx->sharpness;
+	vsi_pq->sharpness_level = ctx->sharpness;
+	vsi_pq->dynamic_contrast_enable = ctx->contrast_auto;
+}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_regs.h b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.h
index 42bd057..a256254 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_regs.h
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.h
@@ -26,6 +26,7 @@
 void mtk_mdp_hw_set_out_image_format(struct mtk_mdp_ctx *ctx);
 void mtk_mdp_hw_set_rotation(struct mtk_mdp_ctx *ctx);
 void mtk_mdp_hw_set_global_alpha(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_pq_info(struct mtk_mdp_ctx *ctx);
 
 
 #endif /* __MTK_MDP_REGS_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_type.h b/drivers/media/platform/mtk-mdp/mtk_mdp_type.h
new file mode 100644
index 0000000..44e6d35
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_type.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ */
+
+#ifndef MTK_MDP_TYPE_H__
+#define MTK_MDP_TYPE_H__
+
+#define DP_COLORFMT_PACK(PACKED, LOOSE, VIDEO, PLANE, HFACTOR, VFACTOR, BITS, GROUP, SWAP_ENABLE, UNIQUEID)  \
+    (((PACKED)      << 27) |                                                             \
+     ((LOOSE)       << 26) |                                                             \
+     ((VIDEO)       << 23) |                                                             \
+     ((PLANE)       << 21) |                                                             \
+     ((HFACTOR)     << 19) |                                                             \
+     ((VFACTOR)     << 18) |                                                             \
+     ((BITS)        << 8)  |                                                             \
+     ((GROUP)       << 6)  |                                                             \
+     ((SWAP_ENABLE) << 5)  |                                                             \
+     ((UNIQUEID)    << 0))
+
+enum DP_COLOR_ENUM {
+    DP_COLOR_UNKNOWN        = 0,
+
+    DP_COLOR_FULLG8         = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0,  8, 2,  0, 21),
+    DP_COLOR_FULLG10        = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 10, 2,  0, 21),
+    DP_COLOR_FULLG12        = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 12, 2,  0, 21),
+    DP_COLOR_FULLG14        = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 14, 2,  0, 21),
+    DP_COLOR_UFO10          = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 10, 2,  0, 24),
+
+    DP_COLOR_BAYER8         = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0,  8, 2,  0, 20),
+    DP_COLOR_BAYER10        = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 10, 2,  0, 20),
+    DP_COLOR_BAYER12        = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 12, 2,  0, 20),
+    DP_COLOR_RGB48          = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 48, 0,  0, 23),
+    //for Bayer+Mono raw-16
+    DP_COLOR_RGB565_RAW     = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 16, 2,  0, 0),
+
+    DP_COLOR_BAYER8_UNPAK   = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0,  8, 2,  0, 22), // fix 16 bits for pixel
+    DP_COLOR_BAYER10_UNPAK  = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 10, 2,  0, 22), // fix 16 bits for pixel
+    DP_COLOR_BAYER12_UNPAK  = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 12, 2,  0, 22), // fix 16 bits for pixel
+    DP_COLOR_BAYER14_UNPAK  = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 14, 2,  0, 22), // fix 16 bits for pixel
+
+    // Unified format
+    DP_COLOR_GREY           = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0,  8, 1,  0, 7),
+
+    DP_COLOR_RGB565         = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 16, 0,  0, 0),
+    DP_COLOR_BGR565         = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 16, 0,  1, 0),
+    DP_COLOR_RGB888         = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 24, 0,  1, 1),
+    DP_COLOR_BGR888         = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 24, 0,  0, 1),
+    DP_COLOR_RGBA8888       = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 32, 0,  1, 2),
+    DP_COLOR_BGRA8888       = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 32, 0,  0, 2),
+    DP_COLOR_ARGB8888       = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 32, 0,  1, 3),
+    DP_COLOR_ABGR8888       = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 32, 0,  0, 3),
+
+    DP_COLOR_UYVY           = DP_COLORFMT_PACK(0, 0, 0, 1, 1, 0, 16, 1,  0, 4),
+    DP_COLOR_VYUY           = DP_COLORFMT_PACK(0, 0, 0, 1, 1, 0, 16, 1,  1, 4),
+    DP_COLOR_YUYV           = DP_COLORFMT_PACK(0, 0, 0, 1, 1, 0, 16, 1,  0, 5),
+    DP_COLOR_YVYU           = DP_COLORFMT_PACK(0, 0, 0, 1, 1, 0, 16, 1,  1, 5),
+
+    DP_COLOR_I420           = DP_COLORFMT_PACK(0, 0, 0, 3, 1, 1,  8, 1,  0, 8),
+    DP_COLOR_YV12           = DP_COLORFMT_PACK(0, 0, 0, 3, 1, 1,  8, 1,  1, 8),
+    DP_COLOR_I422           = DP_COLORFMT_PACK(0, 0, 0, 3, 1, 0,  8, 1,  0, 9),
+    DP_COLOR_YV16           = DP_COLORFMT_PACK(0, 0, 0, 3, 1, 0,  8, 1,  1, 9),
+    DP_COLOR_I444           = DP_COLORFMT_PACK(0, 0, 0, 3, 0, 0,  8, 1,  0, 10),
+    DP_COLOR_YV24           = DP_COLORFMT_PACK(0, 0, 0, 3, 0, 0,  8, 1,  1, 10),
+
+    DP_COLOR_NV12           = DP_COLORFMT_PACK(0, 0, 0, 2, 1, 1,  8, 1,  0, 12),
+    DP_COLOR_NV21           = DP_COLORFMT_PACK(0, 0, 0, 2, 1, 1,  8, 1,  1, 12),
+    DP_COLOR_NV16           = DP_COLORFMT_PACK(0, 0, 0, 2, 1, 0,  8, 1,  0, 13),
+    DP_COLOR_NV61           = DP_COLORFMT_PACK(0, 0, 0, 2, 1, 0,  8, 1,  1, 13),
+    DP_COLOR_NV24           = DP_COLORFMT_PACK(0, 0, 0, 2, 0, 0,  8, 1,  0, 14),
+    DP_COLOR_NV42           = DP_COLORFMT_PACK(0, 0, 0, 2, 0, 0,  8, 1,  1, 14),
+
+    // Mediatek proprietary format
+    //Frame mode + Block mode
+    DP_COLOR_420_BLKP_UFO   = DP_COLORFMT_PACK(0, 0, 5, 2, 1, 1, 256, 1, 0, 12),
+    //Frame mode + Block mode
+    DP_COLOR_420_BLKP       = DP_COLORFMT_PACK(0, 0, 1, 2, 1, 1, 256, 1, 0, 12),
+    //Field mode + Block mode
+    DP_COLOR_420_BLKI       = DP_COLORFMT_PACK(0, 0, 3, 2, 1, 1, 256, 1, 0, 12),
+    //Frame mode
+    DP_COLOR_422_BLKP       = DP_COLORFMT_PACK(0, 0, 1, 1, 1, 0, 512, 1, 0, 4),
+
+    DP_COLOR_IYU2           = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 24,  1, 0, 25),
+    DP_COLOR_YUV444         = DP_COLORFMT_PACK(0, 0, 0, 1, 0, 0, 24,  1, 0, 30),
+
+    // Mediatek proprietary 10bit format
+    DP_COLOR_RGBA1010102    = DP_COLORFMT_PACK(1, 0, 0, 1, 0, 0, 32,  0, 1, 2),
+    DP_COLOR_BGRA1010102    = DP_COLORFMT_PACK(1, 0, 0, 1, 0, 0, 32,  0, 0, 2),
+    //Packed 10bit UYVY
+    DP_COLOR_UYVY_10P       = DP_COLORFMT_PACK(1, 0, 0, 1, 1, 0, 20,  1, 0, 4),
+    //Packed 10bit NV21
+    DP_COLOR_NV21_10P       = DP_COLORFMT_PACK(1, 0, 0, 2, 1, 1, 10,  1, 1, 12),
+    //Frame mode + Block mode
+    DP_COLOR_420_BLKP_10_H  = DP_COLORFMT_PACK(1, 0, 1, 2, 1, 1, 320, 1, 0, 12),
+    //Frame mode + HEVC tile mode
+    DP_COLOR_420_BLKP_10_V  = DP_COLORFMT_PACK(1, 1, 1, 2, 1, 1, 320, 1, 0, 12),
+    //Frame mode + Block mode
+    DP_COLOR_420_BLKP_UFO_10_H  = DP_COLORFMT_PACK(1, 0, 5, 2, 1, 1, 320, 1, 0, 12),
+    //Frame mode + HEVC tile mode
+    DP_COLOR_420_BLKP_UFO_10_V  = DP_COLORFMT_PACK(1, 1, 5, 2, 1, 1, 320, 1, 0, 12),
+
+    // Loose 10bit format
+    DP_COLOR_UYVY_10L       = DP_COLORFMT_PACK(0, 1, 0, 1, 1, 0, 20,  1, 0, 4),
+    DP_COLOR_VYUY_10L       = DP_COLORFMT_PACK(0, 1, 0, 1, 1, 0, 20,  1, 1, 4),
+    DP_COLOR_YUYV_10L       = DP_COLORFMT_PACK(0, 1, 0, 1, 1, 0, 20,  1, 0, 5),
+    DP_COLOR_YVYU_10L       = DP_COLORFMT_PACK(0, 1, 0, 1, 1, 0, 20,  1, 1, 5),
+    DP_COLOR_NV12_10L       = DP_COLORFMT_PACK(0, 1, 0, 2, 1, 1, 10,  1, 0, 12),
+    DP_COLOR_NV21_10L       = DP_COLORFMT_PACK(0, 1, 0, 2, 1, 1, 10,  1, 1, 12),
+    DP_COLOR_NV16_10L       = DP_COLORFMT_PACK(0, 1, 0, 2, 1, 0, 10,  1, 0, 13),
+    DP_COLOR_NV61_10L       = DP_COLORFMT_PACK(0, 1, 0, 2, 1, 0, 10,  1, 1, 13),
+    DP_COLOR_YV12_10L       = DP_COLORFMT_PACK(0, 1, 0, 3, 1, 1, 10,  1, 1, 8),
+    DP_COLOR_I420_10L       = DP_COLORFMT_PACK(0, 1, 0, 3, 1, 1, 10,  1, 0, 8),
+};
+#endif  /* MTK_MDP_TYPE_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c
index 4893825..a3e7b55 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c
@@ -27,11 +27,21 @@
 {
 	struct mtk_mdp_vpu *vpu = (struct mtk_mdp_vpu *)
 					(unsigned long)msg->ap_inst;
+	struct mdp_cmdq_info *cmdq;
 
 	/* mapping VPU address to kernel virtual address */
 	vpu->vsi = (struct mdp_process_vsi *)
 			vpu_mapping_dm_addr(vpu->pdev, msg->vpu_inst_addr);
 	vpu->inst_addr = msg->vpu_inst_addr;
+
+	/* mapping cmdq buffer address in VPU to kernel virtual address */
+	cmdq = &vpu->vsi->cmdq;
+	if (cmdq->vpu_buf_addr != 0uLL) {
+		cmdq->ap_buf_addr = (uint64_t)(unsigned long)
+			vpu_mapping_dm_addr(vpu->pdev,
+				(unsigned long)cmdq->vpu_buf_addr);
+		cmdq->ap_buf_pa = __pa(cmdq->ap_buf_addr);
+	}
 }
 
 static void mtk_mdp_vpu_ipi_handler(void *data, unsigned int len, void *priv)
@@ -50,6 +60,7 @@
 			break;
 		case VPU_MDP_DEINIT_ACK:
 		case VPU_MDP_PROCESS_ACK:
+		case VPU_MDP_CMDQ_DONE_ACK:
 			break;
 		default:
 			ctx = vpu_to_ctx(vpu);
@@ -139,7 +150,69 @@
 	return mtk_mdp_vpu_send_ap_ipi(vpu, AP_MDP_DEINIT);
 }
 
+static int mtk_mdp_cmdq_exec(struct mtk_mdp_ctx *ctx,
+	struct mdp_cmdq_info *cmdq)
+{
+	struct cmdq_pkt *handle  = ctx->cmdq_handle;
+	int err, request_size;
+
+	mtk_mdp_dbg(2, "eng=%llx,addr=%llx(%llx),offset=%u,size=%u",
+		cmdq->engine_flag, cmdq->ap_buf_addr, cmdq->ap_buf_pa,
+		cmdq->cmd_offset, cmdq->cmd_size);
+
+	/* copy cmd buffer */
+	handle->cmd_buf_size = 0;
+	if (cmdq->cmd_size % CMDQ_INST_SIZE)
+		return -EINVAL;
+
+	request_size = cmdq->cmd_size;
+	if (unlikely(request_size > handle->buf_size)) {
+		request_size = roundup(request_size, PAGE_SIZE);
+		err = -1;//cmdq_pkt_realloc_cmd_buffer(handle, request_size);
+		if (err < 0)
+			return err;
+	}
+
+	memcpy(handle->va_base,
+		(void *)(unsigned long)cmdq->ap_buf_addr + cmdq->cmd_offset,
+		cmdq->cmd_size);
+	handle->cmd_buf_size = cmdq->cmd_size;
+
+	/* execute cmd */
+	err = cmdq_pkt_flush(handle);
+	if (unlikely(err < 0))
+		dev_err(&ctx->mdp_dev->pdev->dev, "cmdq flush failed!!!\n");
+
+	return err;
+}
+
 int mtk_mdp_vpu_process(struct mtk_mdp_vpu *vpu)
 {
-	return mtk_mdp_vpu_send_ap_ipi(vpu, AP_MDP_PROCESS);
+	int err, use_cmdq;
+	struct mtk_mdp_ctx *ctx;
+	struct mdp_cmdq_info *cmdq;
+
+	err = mtk_mdp_vpu_send_ap_ipi(vpu, (uint32_t)AP_MDP_PROCESS);
+
+	use_cmdq = 0;
+	cmdq = &vpu->vsi->cmdq;
+
+	/* There are command in cmdq buffer, to use cmdq. */
+	if (err == 0 && cmdq->ap_buf_addr != 0uLL && cmdq->cmd_size != 0u)
+		use_cmdq = 1;
+
+	if (use_cmdq) {
+		ctx = container_of(vpu, struct mtk_mdp_ctx, vpu);
+
+		err = mtk_mdp_cmdq_exec(ctx, cmdq);
+		if (unlikely(err < 0))
+			dev_err(&ctx->mdp_dev->pdev->dev,
+				"cmdq execute failed!!!\n");
+
+		/* notify VPU that cmdq instructions executed done */
+		/* to do: add status in vpu->vsi->cmdq */
+		err = mtk_mdp_vpu_send_ap_ipi(vpu, (uint32_t)AP_MDP_CMDQ_DONE);
+	}
+
+	return err;
 }
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h
index df4bdda..9c02422 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h
@@ -28,7 +28,7 @@
  */
 struct mtk_mdp_vpu {
 	struct platform_device	*pdev;
-	uint32_t		inst_addr;
+	uint64_t		inst_addr;
 	int32_t			failure;
 	struct mdp_process_vsi	*vsi;
 };
diff --git a/drivers/media/platform/mtk-vpu/Makefile b/drivers/media/platform/mtk-vpu/Makefile
index 58cc1b4..493af47 100644
--- a/drivers/media/platform/mtk-vpu/Makefile
+++ b/drivers/media/platform/mtk-vpu/Makefile
@@ -1,3 +1,5 @@
 mtk-vpu-y += mtk_vpu.o
+mtk-vcu-y += mtk_vcu.o mtk_vcodec_mem.o
 
 obj-$(CONFIG_VIDEO_MEDIATEK_VPU) += mtk-vpu.o
+obj-$(CONFIG_VIDEO_MEDIATEK_VPU) += mtk-vcu.o
diff --git a/drivers/media/platform/mtk-vpu/mtk_vcodec_mem.c b/drivers/media/platform/mtk-vpu/mtk_vcodec_mem.c
new file mode 100644
index 0000000..0f1d5c6
--- /dev/null
+++ b/drivers/media/platform/mtk-vpu/mtk_vcodec_mem.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Yunfei Dong <yunfei.dong@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mtk_vcodec_mem.h"
+
+struct mtk_vcu_queue *mtk_vcu_dec_init(struct device *dev)
+{
+	struct mtk_vcu_queue *vcu_queue;
+
+	pr_debug("Allocate new vcu queue !\n");
+	vcu_queue = kzalloc(sizeof(struct mtk_vcu_queue), GFP_KERNEL);
+	if (vcu_queue == NULL) {
+		pr_err("Allocate new vcu queue fail!\n");
+		return NULL;
+	}
+
+	vcu_queue->mem_ops = &vb2_dma_contig_memops;
+	vcu_queue->dev = dev;
+	vcu_queue->num_buffers = 0;
+	vcu_queue->map_buf = 0;
+	vcu_queue->map_type = 0;
+	mutex_init(&vcu_queue->mmap_lock);
+
+	return vcu_queue;
+}
+
+void mtk_vcu_dec_release(struct mtk_vcu_queue *vcu_queue)
+{
+	struct mtk_vcu_mem *vcu_buffer;
+	unsigned int buffer;
+
+	mutex_lock(&vcu_queue->mmap_lock);
+	pr_debug("Release vcu queue !\n");
+	if (vcu_queue->num_buffers != 0) {
+		for (buffer = 0; buffer < vcu_queue->num_buffers; buffer++) {
+			vcu_buffer = &vcu_queue->bufs[buffer];
+			vcu_queue->mem_ops->put(vcu_buffer->mem_priv);
+			vcu_queue->bufs[buffer].mem_priv = NULL;
+			vcu_queue->bufs[buffer].size = 0;
+		}
+	}
+	mutex_unlock(&vcu_queue->mmap_lock);
+	kfree(vcu_queue);
+	vcu_queue = NULL;
+}
+
+void *mtk_vcu_get_buffer(struct mtk_vcu_queue *vcu_queue,
+	    struct mem_obj *mem_buff_data)
+{
+	void *cook, *dma_addr;
+	struct mtk_vcu_mem *vcu_buffer;
+	unsigned int buffers;
+
+	buffers = vcu_queue->num_buffers;
+	if (mem_buff_data->len > DEC_ALLOCATE_MAX_BUFFER_SIZE ||
+		mem_buff_data->len == 0U || buffers >= DEC_MAX_BUFFER) {
+		pr_err("Get buffer fail: buffer len = %ld num_buffers = %d !!\n",
+			mem_buff_data->len, buffers);
+		return ERR_PTR(-EINVAL);
+	}
+
+	mutex_lock(&vcu_queue->mmap_lock);
+	vcu_buffer = &vcu_queue->bufs[buffers];
+	vcu_buffer->mem_priv = vcu_queue->mem_ops->alloc(vcu_queue->dev, 0,
+		mem_buff_data->len, 0, 0);
+	vcu_buffer->size = mem_buff_data->len;
+	if (IS_ERR(vcu_buffer->mem_priv)) {
+		mutex_unlock(&vcu_queue->mmap_lock);
+		goto free;
+	}
+
+	cook = vcu_queue->mem_ops->vaddr(vcu_buffer->mem_priv);
+	dma_addr = vcu_queue->mem_ops->cookie(vcu_buffer->mem_priv);
+
+	mem_buff_data->iova = *(dma_addr_t *)dma_addr;
+	mem_buff_data->va = (unsigned long)cook;
+	vcu_queue->num_buffers++;
+	mutex_unlock(&vcu_queue->mmap_lock);
+
+	return vcu_buffer->mem_priv;
+
+free:
+	vcu_queue->mem_ops->put(vcu_buffer->mem_priv);
+
+	return ERR_PTR(-ENOMEM);
+}
+
+int mtk_vcu_free_buffer(struct mtk_vcu_queue *vcu_queue,
+	    struct mem_obj *mem_buff_data)
+{
+	struct mtk_vcu_mem *vcu_buffer;
+	void *cook, *dma_addr;
+	unsigned int buffer, num_buffers, last_buffer;
+	int ret = -EINVAL;
+
+	mutex_lock(&vcu_queue->mmap_lock);
+	num_buffers = vcu_queue->num_buffers;
+	if (num_buffers != 0U) {
+		for (buffer = 0; buffer < num_buffers; buffer++) {
+			vcu_buffer = &vcu_queue->bufs[buffer];
+			cook = vcu_queue->mem_ops->vaddr(vcu_buffer->mem_priv);
+			dma_addr = vcu_queue->mem_ops->cookie(vcu_buffer->mem_priv);
+
+			if (mem_buff_data->va == (unsigned long)cook &&
+				mem_buff_data->iova == *(dma_addr_t *)dma_addr &&
+				mem_buff_data->len == vcu_buffer->size) {
+				pr_debug("Free buff = %d pa = %lx va = %llx, queue_num = %d\n",
+					buffer, mem_buff_data->iova, mem_buff_data->va,
+					num_buffers);
+				vcu_queue->mem_ops->put(vcu_buffer->mem_priv);
+				last_buffer = num_buffers - 1U;
+				if (last_buffer != buffer)
+					vcu_queue->bufs[buffer] =
+					    vcu_queue->bufs[last_buffer];
+				vcu_queue->bufs[last_buffer].mem_priv = NULL;
+				vcu_queue->bufs[last_buffer].size = 0;
+				vcu_queue->num_buffers--;
+				ret = 0;
+				break;
+			}
+		}
+	}
+	mutex_unlock(&vcu_queue->mmap_lock);
+
+	if (ret != 0)
+		pr_err("Can not free memory va %llx iova %lx len %lu!\n",
+			mem_buff_data->va, mem_buff_data->iova, mem_buff_data->len);
+
+	return ret;
+}
+
diff --git a/drivers/media/platform/mtk-vpu/mtk_vcodec_mem.h b/drivers/media/platform/mtk-vpu/mtk_vcodec_mem.h
new file mode 100644
index 0000000..f094312
--- /dev/null
+++ b/drivers/media/platform/mtk-vpu/mtk_vcodec_mem.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Yunfei Dong <yunfei.dong@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MTK_VCODEC_MEM_H
+#define MTK_VCODEC_MEM_H
+
+#include <media/videobuf2-dma-contig.h>
+#include <uapi/linux/mtk_vcu_controls.h>
+#include <linux/slab.h>
+
+#define DEC_MAX_BUFFER 32U
+#define DEC_ALLOCATE_MAX_BUFFER_SIZE 0x1B00000UL
+
+/**
+ * struct mtk_vcu_mem - memory buffer allocated in kernel
+ *
+ * @mem_priv:	vb2_dc_buf
+ * @size:	allocated buffer size
+ */
+struct mtk_vcu_mem {
+	void *mem_priv;
+	size_t size;
+};
+
+/**
+ * struct mtk_vcu_queue - the allocated buffer queue
+ *
+ * @vcu:	struct mtk_vcu
+ * @mmap_lock:	the lock to protect allocated buffer
+ * @dev:	device
+ * @num_buffers:	allocated buffer number
+ * @mem_ops:	the file operation of memory allocated
+ * @bufs:	store the information of allocated buffers
+ */
+struct mtk_vcu_queue {
+	void *vcu;
+	struct vb2_alloc_ctx *alloc_ctx;
+	struct mutex mmap_lock;
+	struct device *dev;
+	unsigned int num_buffers;
+	const struct vb2_mem_ops *mem_ops;
+	struct mtk_vcu_mem bufs[DEC_MAX_BUFFER];
+	int map_buf;
+	int map_type;
+};
+
+/**
+ * mtk_vcu_dec_init - just init vcu_queue
+ *
+ * @dev:	vcu device.
+ *
+ * Return:	Return NULL if it is failed.
+ * otherwise it is vcu queue to store the allocated buffer
+ **/
+struct mtk_vcu_queue *mtk_vcu_dec_init(struct device *dev);
+
+/**
+ * mtk_vcu_dec_release - just release the vcu_queue
+ *
+ * @vcu_queue:	the queue to store allocated buffer.
+ *
+ * Return: void
+ **/
+void mtk_vcu_dec_release(struct mtk_vcu_queue *vcu_queue);
+
+/**
+ * mtk_vcu_get_buffer - get the allocated buffer iova/va
+ *
+ * @vcu_queue:	the queue to store allocated buffer.
+ * @mem_buff_data:	store iova/va.
+ *
+ * Return: Return real address if it is ok, otherwise failed
+ **/
+void *mtk_vcu_get_buffer(struct mtk_vcu_queue *vcu_queue, struct mem_obj *mem_buff_data);
+
+/**
+ * mtk_vcu_free_buffer - just free unused buffer iova/va
+ *
+ * @vcu_queue:	the queue to store allocated buffer.
+ * @mem_buff_data:	store iova/va to free.
+ *
+ * Return:	Return 0 if it is ok, otherwise failed
+ **/
+int mtk_vcu_free_buffer(struct mtk_vcu_queue *vcu_queue, struct mem_obj *mem_buff_data);
+
+#endif
+
diff --git a/drivers/media/platform/mtk-vpu/mtk_vcu.c b/drivers/media/platform/mtk-vpu/mtk_vcu.c
new file mode 100644
index 0000000..06d08c2
--- /dev/null
+++ b/drivers/media/platform/mtk-vpu/mtk_vcu.c
@@ -0,0 +1,1130 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Andrew-CT Chen <andrew-ct.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/cdev.h>
+#include <linux/dma-mapping.h>
+#include <linux/file.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/compat.h>
+#include <linux/freezer.h>
+#include <linux/pm_runtime.h>
+
+#ifdef CONFIG_MTK_IOMMU
+#include <linux/iommu.h>
+#endif
+#include "mtk_vcodec_mem.h"
+#include <uapi/linux/mtk_vcu_controls.h>
+#include "mtk_vpu.h"
+
+/**
+ * VCU (Video Communication/Controller Unit) is a daemon in user space
+ * controlling video hardware related to video codec, scaling and color
+ * format converting.
+ * VCU interfaces with other blocks by share memory and interrupt.
+ **/
+#define VCU_PATH		"/dev/vpud"
+#define MDP_PATH		"/dev/mdpd"
+#define CAM_PATH		"/dev/camd"
+#define VCU_DEVNAME		"vpu"
+
+#define IPI_TIMEOUT_MS		4000U
+#define VCU_FW_VER_LEN		16
+
+/* mtk vcu device instance id enumeration */
+enum mtk_vcu_daemon_id {
+	MTK_VCU_VPUD = 0,
+	MTK_VCU_MDPD = 1,
+	MTK_VCU_CAMD = 2,
+	MTK_VCU_NR_MAX
+};
+
+/* vcu extended mapping length */
+#define VCU_PMEM0_LEN(vcu_data)	(vcu_data->extmem.p_len)
+#define VCU_DMEM0_LEN(vcu_data)	(vcu_data->extmem.d_len)
+/* vcu extended user virtural address */
+#define VCU_PMEM0_VMA(vcu_data)	(vcu_data->extmem.p_vma)
+#define VCU_DMEM0_VMA(vcu_data)	(vcu_data->extmem.d_vma)
+/* vcu extended kernel virtural address */
+#define VCU_PMEM0_VIRT(vcu_data)	(vcu_data->extmem.p_va)
+#define VCU_DMEM0_VIRT(vcu_data)	(vcu_data->extmem.d_va)
+/* vcu extended phsyial address */
+#define VCU_PMEM0_PHY(vcu_data)	(vcu_data->extmem.p_pa)
+#define VCU_DMEM0_PHY(vcu_data)	(vcu_data->extmem.d_pa)
+/* vcu extended iova address*/
+#define VCU_PMEM0_IOVA(vcu_data)	(vcu_data->extmem.p_iova)
+#define VCU_DMEM0_IOVA(vcu_data)	(vcu_data->extmem.d_iova)
+
+#define MAP_SHMEM_ALLOC_BASE	0x80000000UL
+#define MAP_SHMEM_ALLOC_RANGE	0x08000000UL
+#define MAP_SHMEM_ALLOC_END	(MAP_SHMEM_ALLOC_BASE + MAP_SHMEM_ALLOC_RANGE)
+#define MAP_SHMEM_COMMIT_BASE	0x88000000UL
+#define MAP_SHMEM_COMMIT_RANGE	0x08000000UL
+#define MAP_SHMEM_COMMIT_END	(MAP_SHMEM_COMMIT_BASE + MAP_SHMEM_COMMIT_RANGE)
+
+#define MAP_SHMEM_MM_BASE	0x90000000UL
+#define MAP_SHMEM_MM_CACHEABLE_BASE	0x190000000UL
+#define MAP_SHMEM_MM_RANGE	0xFFFFFFFFUL
+#define MAP_SHMEM_MM_END	(MAP_SHMEM_MM_BASE + MAP_SHMEM_MM_RANGE)
+#define MAP_SHMEM_MM_CACHEABLE_END	(MAP_SHMEM_MM_CACHEABLE_BASE + MAP_SHMEM_MM_RANGE)
+#define MAP_VENC_CACHE_MAX_NUM 30
+#define VCU_IPIMSG_VENC_BASE 0xD000
+
+inline int ipi_id_to_inst_id(int vcuid, int id)
+{
+	/* Assume VENC uses instance 1 and others use 0. */
+	if (vcuid == MTK_VCU_VPUD &&
+	    (id == IPI_VENC_H264 ||
+	    id ==  IPI_VENC_VP8))
+		return 1;
+
+	return 0;
+}
+
+enum vcu_map_hw_reg_id {
+	VDEC,
+	VENC,
+	VENC_LT,
+	VCU_MAP_HW_REG_NUM
+};
+
+static const unsigned long vcu_map_hw_type[VCU_MAP_HW_REG_NUM] = {
+	0x70000000,	/* VDEC */
+	0x71000000,	/* VENC */
+	0x72000000	/* VENC_LT */
+};
+
+/* Default vcu_mtkdev[0] handle vdec, vcu_mtkdev[1] handle mdp */
+static struct mtk_vcu *vcu_mtkdev[MTK_VCU_NR_MAX];
+
+static struct task_struct *vcud_task;
+static struct files_struct *files;
+
+/**
+ * struct vcu_mem - VCU memory information
+ *
+ * @p_vma:	the user virtual memory address of
+ *		VCU extended program memory
+ * @d_vma:	the user  virtual memory address of VCU extended data memory
+ * @p_va:	the kernel virtual memory address of
+ *		VCU extended program memory
+ * @d_va:	the kernel virtual memory address of VCU extended data memory
+ * @p_pa:	the physical memory address of VCU extended program memory
+ * @d_pa:	the physical memory address of VCU extended data memory
+ * @p_iova:	the iova memory address of VCU extended program memory
+ * @d_iova:	the iova memory address of VCU extended data memory
+ */
+struct vcu_mem {
+	unsigned long p_vma;
+	unsigned long d_vma;
+	void *p_va;
+	void *d_va;
+	dma_addr_t p_pa;
+	dma_addr_t d_pa;
+	dma_addr_t p_iova;
+	dma_addr_t d_iova;
+	unsigned long p_len;
+	unsigned long d_len;
+};
+
+/**
+ * struct vcu_run - VCU initialization status
+ *
+ * @signaled:		the signal of vcu initialization completed
+ * @fw_ver:		VCU firmware version
+ * @dec_capability:	decoder capability which is not used for now and
+ *			the value is reserved for future use
+ * @enc_capability:	encoder capability which is not used for now and
+ *			the value is reserved for future use
+ * @wq:			wait queue for VCU initialization status
+ */
+struct vcu_run {
+	u32 signaled;
+	char fw_ver[VCU_FW_VER_LEN];
+	unsigned int	dec_capability;
+	unsigned int	enc_capability;
+	wait_queue_head_t wq;
+};
+
+/**
+ * struct vcu_ipi_desc - VCU IPI descriptor
+ *
+ * @handler:	IPI handler
+ * @name:	the name of IPI handler
+ * @priv:	the private data of IPI handler
+ */
+struct vcu_ipi_desc {
+	ipi_handler_t handler;
+	const char *name;
+	void *priv;
+};
+
+struct map_hw_reg {
+	unsigned long base;
+	unsigned long len;
+};
+struct map_cache_mva {
+    unsigned long    mmap64_pa; /* pa */
+    unsigned long    length;
+};
+
+struct vcu_map_mva {
+    uint64_t      mmap64_pa; /* pa */
+    uint64_t      length;
+    uint32_t      status;
+};
+
+
+/**
+ * struct vcu_ipi_msg_common - VCU ack AP cmd common structure
+ * @msg_id:	message id (VCU_IPIMSG_XXX_DONE)
+ * @status:	cmd status (venc_ipi_msg_status)
+ * @venc_inst:	AP encoder instance (struct venc_vp8_inst/venc_h264_inst *)
+ */
+struct vcu_ipi_msg_common {
+	uint32_t msg_id;
+	uint32_t status;
+	uint64_t inst;
+};
+/**
+ * enum venc_ipi_msg_id - message id between AP and VCU
+ * (ipi stands for inter-processor interrupt)
+ * @AP_IPIMSG_ENC_XXX:		AP to VCU cmd message id
+ * @VCU_IPIMSG_ENC_XXX_DONE:	VCU ack AP cmd message id
+ */
+enum venc_ipi_msg_id {
+	VCU_IPIMSG_ENC_INIT_DONE = VCU_IPIMSG_VENC_BASE,
+	VCU_IPIMSG_ENC_SET_PARAM_DONE,
+	VCU_IPIMSG_ENC_ENCODE_DONE,
+	VCU_IPIMSG_ENC_DEINIT_DONE,
+};
+
+/**
+ * struct mtk_vcu - vcu driver data
+ * @extmem:		VCU extended memory information
+ * @run:		VCU initialization status
+ * @ipi_desc:		VCU IPI descriptor
+ * @dev:		VCU struct device
+ * @vcu_mutex:		protect mtk_vcu (except recv_buf) and ensure only
+ *			one client to use VCU service at a time. For example,
+ *			suppose a client is using VCU to decode VP8.
+ *			If the other client wants to encode VP8,
+ *			it has to wait until VP8 decode completes.
+ * @file:		VCU daemon file pointer
+ * @is_open:		The flag to indicate if VCUD device is open.
+ * @is_alloc:		The flag to indicate if VCU extended memory is allocated.
+ * @ack_wq:		The wait queue for each codec and mdp. When sleeping
+ *			processes wake up, they will check the condition
+ *			"ipi_id_ack" to run the corresponding action or
+ *			go back to sleep.
+ * @ipi_id_ack:		The ACKs for registered IPI function sending
+ *			interrupt to VCU
+ * @get_wq:		When sleeping process waking up, it will check the
+ *			condition "ipi_got" to run the corresponding action or
+ *			go back to sleep.
+ * @ipi_got:		The flags for IPI message polling from user.
+ * @ipi_done:		The flags for IPI message polling from user again, which
+ *			means the previous messages has been dispatched done in
+ *			daemon.
+ * @user_obj:		Temporary share_obj used for ipi_msg_get.
+ * @vcu_devno:		The vcu_devno for vcu init vcu character device
+ * @vcu_cdev:		The point of vcu character device.
+ * @vcu_class:		The class_create for create vcu device
+ * @vcu_device:		VCU struct device
+ * @vcuname:		VCU struct device name in dtsi
+ * @path:		The path to keep mdpd path or vcud path.
+ * @vpuid:		VCU device id
+ * @vpuid:		recorder need mapp cache buffer address
+ *
+ */
+struct mtk_vcu {
+	struct mtk_vpu_plat vpu;
+	struct vcu_mem extmem;
+	struct vcu_run run;
+	struct vcu_ipi_desc ipi_desc[IPI_MAX];
+	struct device *dev;
+	struct mutex vcu_mutex[2]; /* for protecting vcu data structure */
+	struct mutex vcu_share;
+	struct file *file;
+	struct iommu_domain *io_domain;
+	struct map_hw_reg map_base[VCU_MAP_HW_REG_NUM];
+	bool   is_open;
+	bool   is_alloc;
+	wait_queue_head_t ack_wq[2];
+	bool ipi_id_ack[IPI_MAX];
+	wait_queue_head_t get_wq[2];
+	atomic_t ipi_got[2];
+	atomic_t ipi_done[2];
+	struct share_obj user_obj[2];
+	dev_t vcu_devno;
+	struct cdev *vcu_cdev;
+	struct class *vcu_class;
+	struct device *vcu_device;
+	const char *vcuname;
+	const char *path;
+	int vcuid;
+	wait_queue_head_t vdec_log_get_wq;
+	atomic_t vdec_log_got;
+	struct map_cache_mva map_buffer[MAP_VENC_CACHE_MAX_NUM];
+};
+
+#define to_vcu(vpu) container_of(vpu, struct mtk_vcu, vpu)
+
+static inline bool vcu_running(struct mtk_vcu *vcu)
+{
+	return (bool)vcu->run.signaled;
+}
+
+int vcu_ipi_register(struct mtk_vpu_plat *vpu,
+		     enum ipi_id id, ipi_handler_t handler,
+		     const char *name, void *priv)
+{
+	struct mtk_vcu *vcu = to_vcu(vpu);
+	struct vcu_ipi_desc *ipi_desc;
+
+	if (vcu == NULL) {
+		dev_err(vcu->dev, "vcu device in not ready\n");
+		return -EPROBE_DEFER;
+	}
+
+	if (id >= 0 && id < IPI_MAX && handler != NULL) {
+		ipi_desc = vcu->ipi_desc;
+		ipi_desc[id].name = name;
+		ipi_desc[id].handler = handler;
+		ipi_desc[id].priv = priv;
+		return 0;
+	}
+
+	dev_err(vcu->dev, "register vcu ipi id %d with invalid arguments\n",
+	       id);
+	return -EINVAL;
+}
+
+int vcu_ipi_send(struct mtk_vpu_plat *vpu,
+		 enum ipi_id id, void *buf,
+		 unsigned int len)
+{
+	int i = 0;
+	struct mtk_vcu *vcu = to_vcu(vpu);
+	struct share_obj send_obj;
+	unsigned long timeout;
+	int ret;
+
+	if (id <= IPI_VPU_INIT || id >= IPI_MAX ||
+	    len > sizeof(send_obj.share_buf) || buf == NULL) {
+		dev_err(vcu->dev, "[VCU] failed to send ipi message (Invalid arg.)\n");
+		return -EINVAL;
+	}
+
+	if (vcu_running(vcu) == false) {
+		dev_err(vcu->dev, "[VCU] vcu_ipi_send: VCU is not running\n");
+		return -EPERM;
+	}
+
+	i = ipi_id_to_inst_id(vcu->vcuid, id);
+
+	mutex_lock(&vcu->vcu_mutex[i]);
+	vcu->ipi_id_ack[id] = false;
+	/* send the command to VCU */
+	memcpy((void *)vcu->user_obj[i].share_buf, buf, len);
+	vcu->user_obj[i].len = len;
+	vcu->user_obj[i].id = (int)id;
+	if (id == IPI_MDP)
+		vcu->user_obj[i].id = 13;
+	atomic_set(&vcu->ipi_got[i], 1);
+	atomic_set(&vcu->ipi_done[i], 0);
+	wake_up(&vcu->get_wq[i]);
+	/* Waiting ipi_done, success means the daemon receiver thread
+	 * dispatchs ipi msg done and returns to kernel for get next
+	 * ipi msg.
+	 * The dispatched ipi msg is being processed by app service.
+	 * Usually, it takes dozens of microseconds in average.
+	 */
+	while (atomic_read(&vcu->ipi_done[i]) == 0)
+		cond_resched();
+
+	ret = 0;
+	mutex_unlock(&vcu->vcu_mutex[i]);
+
+	if (ret != 0) {
+		dev_err(vcu->dev, "[VCU] failed to send ipi message (ret=%d)\n", ret);
+		goto end;
+	}
+
+	/* wait for VCU's ACK */
+	timeout = msecs_to_jiffies(IPI_TIMEOUT_MS);
+	ret = wait_event_timeout(vcu->ack_wq[i], vcu->ipi_id_ack[id], timeout);
+	vcu->ipi_id_ack[id] = false;
+	if (ret == 0) {
+		dev_err(vcu->dev, "vcu ipi %d ack time out !", id);
+		ret = -EIO;
+		goto end;
+	} else if (-ERESTARTSYS == ret) {
+		dev_err(vcu->dev, "vcu ipi %d ack wait interrupted by a signal",
+		       id);
+		ret = -ERESTARTSYS;
+		goto end;
+	} else
+		ret = 0;
+
+end:
+	return ret;
+}
+
+static int vcu_ipi_get(struct mtk_vcu *vcu, unsigned long arg)
+{
+	int i = 0, ret;
+	unsigned char *user_data_addr = NULL;
+	struct share_obj share_buff_data;
+
+	user_data_addr = (unsigned char *)arg;
+	ret = (long)copy_from_user(&share_buff_data, user_data_addr,
+		(unsigned long)sizeof(struct share_obj));
+	i = ipi_id_to_inst_id(vcu->vcuid, share_buff_data.id);
+
+	/* mutex protection here is unnecessary, since different app service
+	 * threads of daemon are corresponding to different vcu_ipi_get thread.
+	 * Different threads use differnet variables, e.g. ipi_done.
+	 */
+	atomic_set(&vcu->ipi_done[i], 1);
+
+	ret = wait_event_freezable(vcu->get_wq[i],
+				    atomic_read(&vcu->ipi_got[i]));
+	if (ret != 0) {
+		pr_info("[VCU][%d][%d] wait event return %d @%s\n",
+			vcu->vcuid, i, ret, __func__);
+		return ret;
+	}
+	ret = copy_to_user(user_data_addr, &vcu->user_obj[i],
+		(unsigned long)sizeof(struct share_obj));
+	if (ret != 0) {
+		pr_info("[VCU] %s(%d) Copy data to user failed!\n",
+			__func__, __LINE__);
+		ret = -EINVAL;
+	}
+	atomic_set(&vcu->ipi_got[i], 0);
+
+	return ret;
+}
+
+unsigned int vcu_get_vdec_hw_capa(struct mtk_vpu_plat *vpu)
+{
+	struct mtk_vcu *vcu = to_vcu(vpu);
+
+	return vcu->run.dec_capability;
+}
+
+unsigned int vcu_get_venc_hw_capa(struct mtk_vpu_plat *vpu)
+{
+	struct mtk_vcu *vcu = to_vcu(vpu);
+
+	return vcu->run.enc_capability;
+}
+
+void *vcu_mapping_dm_addr(struct mtk_vpu_plat *vpu,
+			  uintptr_t dtcm_dmem_addr)
+{
+	struct mtk_vcu *vcu = to_vcu(vpu);
+	uintptr_t d_vma = (uintptr_t)(dtcm_dmem_addr);
+	uintptr_t d_va_start = (uintptr_t)VCU_DMEM0_VIRT(vcu);
+	uintptr_t d_off = d_vma - VCU_DMEM0_VMA(vcu);
+	uintptr_t d_va;
+
+	if (dtcm_dmem_addr == 0UL || d_off > VCU_DMEM0_LEN(vcu)) {
+		dev_err(vcu->dev, "[VCU] %s: Invalid vma 0x%lx len %lx\n",
+			__func__, dtcm_dmem_addr, VCU_DMEM0_LEN(vcu));
+		return NULL;
+	}
+
+	d_va = d_va_start + d_off;
+	dev_dbg(vcu->dev, "[VCU] %s: 0x%lx -> 0x%lx\n", __func__, d_vma, d_va);
+
+	return (void *)d_va;
+}
+
+int vcu_load_firmware(struct mtk_vpu_plat *vpu)
+{
+	return 0;
+}
+
+
+void vcu_get_task(struct task_struct **task, struct files_struct **f)
+{
+	pr_debug("mtk_vcu_get_task %p\n", vcud_task);
+	*task = vcud_task;
+	*f = files;
+}
+
+static int vcu_ipi_handler(struct mtk_vcu *vcu, struct share_obj *rcv_obj)
+{
+	struct vcu_ipi_desc *ipi_desc = vcu->ipi_desc;
+	int non_ack = 0;
+	int ret = -1;
+	int i = 0;
+
+	i = ipi_id_to_inst_id(vcu->vcuid, rcv_obj->id);
+
+	if (rcv_obj->id < (int)IPI_MAX &&
+		ipi_desc[rcv_obj->id].handler != NULL) {
+		ipi_desc[rcv_obj->id].handler(rcv_obj->share_buf,
+							rcv_obj->len,
+							ipi_desc[rcv_obj->id].priv);
+		if (rcv_obj->id > (int)IPI_VPU_INIT && non_ack == 0) {
+			vcu->ipi_id_ack[rcv_obj->id] = true;
+			wake_up(&vcu->ack_wq[i]);
+		}
+		ret = 0;
+	} else {
+		dev_err(vcu->dev, "[VCU] No such ipi id = %d\n", rcv_obj->id);
+	}
+
+	return ret;
+}
+
+static int vcu_ipi_init(struct mtk_vcu *vcu)
+{
+	vcu->is_open = false;
+	vcu->is_alloc = false;
+	mutex_init(&vcu->vcu_mutex[0]);
+	mutex_init(&vcu->vcu_mutex[1]);
+	mutex_init(&vcu->vcu_share);
+
+	return 0;
+}
+
+static void vcu_init_ipi_handler(void *data, unsigned int len, void *priv)
+{
+	struct mtk_vcu *vcu = (struct mtk_vcu *)priv;
+	struct vcu_run *run = (struct vcu_run *)data;
+
+	/* handle uninitialize message */
+	if (vcu->run.signaled == 1u && run->signaled == 0u) {
+		int i;
+		/* wake up the threads in daemon */
+		for (i = 0; i < 2; i++) {
+			atomic_set(&vcu->ipi_got[i], 1);
+			atomic_set(&vcu->ipi_done[i], 0);
+			wake_up(&vcu->get_wq[i]);
+		}
+
+		atomic_set(&vcu->vdec_log_got, 1);
+		wake_up(&vcu->vdec_log_get_wq);
+	}
+
+	vcu->run.signaled = run->signaled;
+	strncpy(vcu->run.fw_ver, run->fw_ver, VCU_FW_VER_LEN);
+	vcu->run.dec_capability = run->dec_capability;
+	vcu->run.enc_capability = run->enc_capability;
+
+	dev_dbg(vcu->dev, "[VCU] fw ver: %s\n", vcu->run.fw_ver);
+	dev_dbg(vcu->dev, "[VCU] dec cap: %x\n", vcu->run.dec_capability);
+	dev_dbg(vcu->dev, "[VCU] enc cap: %x\n", vcu->run.enc_capability);
+}
+
+static int mtk_vcu_open(struct inode *inode, struct file *file)
+{
+	int vcuid;
+	struct mtk_vcu_queue *vcu_queue;
+
+	if (strcmp(current->comm, "camd") == 0)
+		vcuid = MTK_VCU_CAMD;
+	else if (strcmp(current->comm, "mdpd") == 0)
+		vcuid = MTK_VCU_MDPD;
+	else {
+		vcud_task = current;
+		files = vcud_task->files;
+		vcuid = MTK_VCU_VPUD;
+	}
+
+	vcu_mtkdev[vcuid]->vcuid = vcuid;
+
+	vcu_queue = mtk_vcu_dec_init(vcu_mtkdev[vcuid]->dev);
+	vcu_queue->vcu = vcu_mtkdev[vcuid];
+	file->private_data = vcu_queue;
+
+	return 0;
+}
+
+static int mtk_vcu_release(struct inode *inode, struct file *file)
+{
+	mtk_vcu_dec_release((struct mtk_vcu_queue *)file->private_data);
+
+	return 0;
+}
+
+static void vcu_free_d_ext_mem(struct mtk_vcu *vcu)
+{
+	mutex_lock(&vcu->vcu_share);
+	mutex_lock(&vcu->vcu_mutex[0]);
+	mutex_lock(&vcu->vcu_mutex[1]);
+	if (vcu->is_open == true) {
+		filp_close(vcu->file, NULL);
+		vcu->is_open = false;
+	}
+	if (vcu->is_alloc == true) {
+		kfree(VCU_DMEM0_VIRT(vcu));
+		VCU_DMEM0_VIRT(vcu) = NULL;
+		vcu->is_alloc = false;
+	}
+	mutex_unlock(&vcu->vcu_mutex[1]);
+	mutex_unlock(&vcu->vcu_mutex[0]);
+	mutex_unlock(&vcu->vcu_share);
+}
+
+static int vcu_alloc_d_ext_mem(struct mtk_vcu *vcu, unsigned long len)
+{
+	mutex_lock(&vcu->vcu_share);
+	mutex_lock(&vcu->vcu_mutex[0]);
+	mutex_lock(&vcu->vcu_mutex[1]);
+	if (vcu->is_alloc == false) {
+		VCU_DMEM0_VIRT(vcu) = kmalloc(len, GFP_KERNEL);
+		VCU_DMEM0_PHY(vcu) = virt_to_phys(VCU_DMEM0_VIRT(vcu));
+		VCU_DMEM0_LEN(vcu) = len;
+		vcu->is_alloc = true;
+	}
+	mutex_unlock(&vcu->vcu_mutex[1]);
+	mutex_unlock(&vcu->vcu_mutex[0]);
+	mutex_unlock(&vcu->vcu_share);
+
+	dev_dbg(vcu->dev, "[VCU] Data extend memory (len:%lu) phy=0x%llx virt=0x%p iova=0x%llx\n",
+		VCU_DMEM0_LEN(vcu),
+		(unsigned long long)VCU_DMEM0_PHY(vcu),
+		VCU_DMEM0_VIRT(vcu),
+		(unsigned long long)VCU_DMEM0_IOVA(vcu));
+	return 0;
+}
+
+static int mtk_vcu_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	unsigned long length = vma->vm_end - vma->vm_start;
+	unsigned long pa_start = vma->vm_pgoff << PAGE_SHIFT;
+	unsigned long pa_start_base = pa_start;
+	unsigned long pa_end = pa_start + length;
+	unsigned long start = vma->vm_start;
+	unsigned long pos = 0;
+	int i;
+	bool cache_mva = false;
+	struct mtk_vcu *vcu_dev;
+	struct mtk_vcu_queue *vcu_queue = (struct mtk_vcu_queue *)file->private_data;
+
+	vcu_dev = (struct mtk_vcu *)vcu_queue->vcu;
+	pr_debug("mtk_vcu_mmap start 0x%lx, end 0x%lx, length:%lx pgoff 0x%lx pa_start:0x%lx %ld map_buf:%d,%d vcu_dev->vcuid:%d\n",
+		 vma->vm_start, vma->vm_end,length,
+		 vma->vm_pgoff, pa_start, vma->vm_flags,
+		 vcu_queue->map_buf, vcu_queue->map_type,vcu_dev->vcuid);
+
+	if (vcu_dev->vcuid == 0) {
+		for (i = 0; i < (int)VCU_MAP_HW_REG_NUM; i++) {
+			if (pa_start == vcu_map_hw_type[i] &&
+			    length <= vcu_dev->map_base[i].len) {
+				vma->vm_pgoff =
+					vcu_dev->map_base[i].base >> PAGE_SHIFT;
+				goto reg_valid_map;
+			}
+		}
+	}
+
+	if (vcu_queue->map_buf == 0 ) {
+		/*only vcud need this case*/
+
+		if (pa_start >= MAP_SHMEM_ALLOC_BASE && pa_end <= MAP_SHMEM_ALLOC_END) {
+			vcu_free_d_ext_mem(vcu_dev);
+			if (vcu_alloc_d_ext_mem(vcu_dev, length) != 0) {
+				pr_err("[VCU] allocate DM failed\n");
+			dev_err(vcu_dev->dev, "[VCU] allocate DM failed\n");
+			return -ENOMEM;
+		}
+		vma->vm_pgoff =
+			(unsigned long)(VCU_DMEM0_PHY(vcu_dev) >> PAGE_SHIFT);
+		goto valid_map;
+	}
+
+	if (pa_start >= MAP_SHMEM_COMMIT_BASE && pa_end <= MAP_SHMEM_COMMIT_END) {
+		VCU_DMEM0_VMA(vcu_dev) = vma->vm_start;
+		vma->vm_pgoff =
+			(unsigned long)(VCU_DMEM0_PHY(vcu_dev) >> PAGE_SHIFT);
+		goto valid_map;
+		}
+	}
+
+	if(pa_start_base >= MAP_SHMEM_MM_BASE || vcu_queue->map_buf == 1)
+	{
+		if(vcu_queue ->map_type == 1)
+		{
+			cache_mva = true;
+		}
+
+#ifdef CONFIG_MTK_IOMMU
+		while (length > 0) {
+			vma->vm_pgoff = iommu_iova_to_phys(vcu_dev->io_domain,
+						   pa_start + pos);
+			vma->vm_pgoff >>= PAGE_SHIFT;
+			if(!cache_mva)
+				vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+			if (remap_pfn_range(vma, start, vma->vm_pgoff,
+			    PAGE_SIZE, vma->vm_page_prot) == true)
+				return -EAGAIN;
+
+			start += PAGE_SIZE;
+			pos += PAGE_SIZE;
+			if (length > PAGE_SIZE)
+				length -= PAGE_SIZE;
+			else
+				length = 0;
+		}
+		return 0;
+#endif
+	}
+
+	dev_err(vcu_dev->dev, "[VCU] Invalid argument\n");
+	return -EINVAL;
+
+reg_valid_map:
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+valid_map:
+	dev_dbg(vcu_dev->dev, "[VCU] Mapping pgoff 0x%lx\n", vma->vm_pgoff);
+
+	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+			    vma->vm_end - vma->vm_start,
+			    vma->vm_page_prot) != 0) {
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+static long mtk_vcu_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	long ret = -1;
+	void *mem_priv;
+	unsigned char *user_data_addr = NULL;
+	struct mtk_vcu *vcu_dev;
+	struct device *dev;
+	struct map_obj mem_map_obj;
+	struct share_obj share_buff_data;
+	struct mem_obj mem_buff_data;
+	struct mtk_vcu_queue *vcu_queue = (struct mtk_vcu_queue *)file->private_data;
+
+	vcu_dev = (struct mtk_vcu *)vcu_queue->vcu;
+	dev = vcu_dev->dev;
+	switch (cmd) {
+	case VCUD_SET_OBJECT:
+		user_data_addr = (unsigned char *)arg;
+		ret = (long)copy_from_user(&share_buff_data, user_data_addr,
+			(unsigned long)sizeof(struct share_obj));
+		if (ret != 0L || share_buff_data.id > (int)IPI_MAX ||
+		    share_buff_data.id < (int)IPI_VPU_INIT) {
+			pr_err("[VCU] %s(%d) Copy data from user failed!\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		ret = vcu_ipi_handler(vcu_dev, &share_buff_data);
+		ret = (long)copy_to_user(user_data_addr, &share_buff_data,
+			(unsigned long)sizeof(struct share_obj));
+		if (ret != 0L) {
+			pr_err("[VCU] %s(%d) Copy data to user failed!\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		break;
+    case VCUD_SET_MMAP_TYPE:
+
+		user_data_addr = (unsigned char *)arg;
+		ret = (long)copy_from_user(&mem_map_obj, user_data_addr,
+			(unsigned long)sizeof(struct map_obj));
+
+		if (ret != 0L) {
+			pr_err("[VCU] %s(%d) Copy data to user failed!\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+
+		pr_err("[VCU] VCUD_SET_MMAP_TYPE(%d) mem_map_obj:(%lu %lu)\n",
+				 __LINE__,mem_map_obj.map_buf,mem_map_obj.map_type);
+
+		vcu_queue->map_buf = mem_map_obj.map_buf;
+		vcu_queue->map_type = mem_map_obj.map_type;
+
+		break;
+	case VCUD_GET_OBJECT:
+		ret = vcu_ipi_get(vcu_dev, arg);
+		break;
+	case VCUD_MVA_ALLOCATION:
+		user_data_addr = (unsigned char *)arg;
+		ret = (long)copy_from_user(&mem_buff_data, user_data_addr,
+			(unsigned long)sizeof(struct mem_obj));
+		if (ret != 0L) {
+			pr_err("[VCU] %s(%d) Copy data from user failed!\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+
+		mem_priv = mtk_vcu_get_buffer(vcu_queue, &mem_buff_data);
+		if (IS_ERR(mem_priv) == true) {
+			pr_err("[VCU] Dma alloc buf failed!\n");
+			return PTR_ERR(mem_priv);
+		}
+
+		ret = (long)copy_to_user(user_data_addr, &mem_buff_data,
+			(unsigned long)sizeof(struct mem_obj));
+		if (ret != 0L) {
+			pr_err("[VCU] %s(%d) Copy data to user failed!\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		ret = 0;
+		break;
+	case VCUD_MVA_FREE:
+		user_data_addr = (unsigned char *)arg;
+		ret = (long)copy_from_user(&mem_buff_data, user_data_addr,
+			(unsigned long)sizeof(struct mem_obj));
+		if ((ret != 0L) || (mem_buff_data.iova == 0UL)) {
+			pr_err("[VCU] %s(%d) Free buf failed!\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+
+		ret = mtk_vcu_free_buffer(vcu_queue, &mem_buff_data);
+		if (ret != 0L) {
+			pr_err("[VCU] Dma free buf failed!\n");
+			return -EINVAL;
+		}
+		mem_buff_data.va = 0;
+		mem_buff_data.iova = 0;
+
+		ret = (long)copy_to_user(user_data_addr, &mem_buff_data,
+			(unsigned long)sizeof(struct mem_obj));
+		if (ret != 0L) {
+			pr_err("[VCU] %s(%d) Copy data to user failed!\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		ret = 0;
+		break;
+	default:
+		dev_err(dev, "[VCU] Unknown cmd\n");
+		break;
+	}
+
+	return ret;
+}
+
+#if IS_ENABLED(CONFIG_COMPAT)
+static int compat_get_vpud_allocation_data(
+				struct compat_mem_obj __user *data32,
+				struct mem_obj __user *data)
+{
+	compat_ulong_t l;
+	compat_u64 u;
+	unsigned int err = 0;
+
+	err = get_user(l, &data32->iova);
+	err |= put_user(l, &data->iova);
+	err |= get_user(l, &data32->len);
+	err |= put_user(l, &data->len);
+	err |= get_user(u, &data32->va);
+	err |= put_user(u, &data->va);
+
+	return (int)err;
+}
+
+static int compat_put_vpud_allocation_data(
+				struct compat_mem_obj __user *data32,
+				struct mem_obj __user *data)
+{
+	compat_ulong_t l;
+	compat_u64 u;
+	unsigned int err = 0;
+
+	err = get_user(l, &data->iova);
+	err |= put_user(l, &data32->iova);
+	err |= get_user(l, &data->len);
+	err |= put_user(l, &data32->len);
+	err |= get_user(u, &data->va);
+	err |= put_user(u, &data32->va);
+
+	return (int)err;
+}
+
+static long mtk_vcu_unlocked_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	int err = 0;
+	long ret = -1;
+	struct share_obj __user *share_data32;
+	struct compat_mem_obj __user *data32;
+	struct mem_obj __user *data;
+
+	switch (cmd) {
+	case COMPAT_VCUD_SET_OBJECT:
+	case VCUD_GET_OBJECT:
+	case VCUD_SET_MMAP_TYPE:
+		share_data32 = compat_ptr((uint32_t)arg);
+		ret = file->f_op->unlocked_ioctl(file,
+				cmd, (unsigned long)share_data32);
+		break;
+	case COMPAT_VCUD_MVA_ALLOCATION:
+		data32 = compat_ptr((uint32_t)arg);
+		data = compat_alloc_user_space(sizeof(struct mem_obj));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_vpud_allocation_data(data32, data);
+		if (err != 0)
+			return err;
+		ret = file->f_op->unlocked_ioctl(file,
+			(uint32_t)VCUD_MVA_ALLOCATION, (unsigned long)data);
+
+		err = compat_put_vpud_allocation_data(data32, data);
+		if (err != 0)
+			return err;
+		break;
+	case COMPAT_VCUD_MVA_FREE:
+		data32 = compat_ptr((uint32_t)arg);
+		data = compat_alloc_user_space(sizeof(struct mem_obj));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_vpud_allocation_data(data32, data);
+		if (err != 0)
+			return err;
+		ret = file->f_op->unlocked_ioctl(file,
+			(uint32_t)VCUD_MVA_FREE, (unsigned long)data);
+
+		err = compat_put_vpud_allocation_data(data32, data);
+		if (err != 0)
+			return err;
+		break;
+	case COMPAT_VCUD_CACHE_FLUSH_ALL:
+		ret = file->f_op->unlocked_ioctl(file,
+			(uint32_t)VCUD_CACHE_FLUSH_ALL, 0);
+		break;
+	default:
+		pr_err("[VCU] Invalid cmd_number 0x%x.\n", cmd);
+		break;
+	}
+	return ret;
+}
+#endif
+
+static const struct file_operations vcu_fops = {
+	.owner      = THIS_MODULE,
+	.unlocked_ioctl = mtk_vcu_unlocked_ioctl,
+	.open       = mtk_vcu_open,
+	.release    = mtk_vcu_release,
+	.mmap       = mtk_vcu_mmap,
+#if IS_ENABLED(CONFIG_COMPAT)
+	.compat_ioctl = mtk_vcu_unlocked_compat_ioctl,
+#endif
+};
+
+static struct mtk_vpu_ops mtk_vcu_ops = {
+	.ipi_register = vcu_ipi_register,
+	.ipi_send = vcu_ipi_send,
+	.get_vdec_hw_capa = vcu_get_vdec_hw_capa,
+	.get_venc_hw_capa = vcu_get_venc_hw_capa,
+	.load_firmware = vcu_load_firmware,
+	.mapping_dm_addr = vcu_mapping_dm_addr,
+};
+
+static int mtk_vcu_probe(struct platform_device *pdev)
+{
+	struct mtk_vcu *vcu;
+	struct device *dev;
+	struct resource *res;
+	int i, vcuid, ret = 0;
+
+	dev_dbg(&pdev->dev, "[VCU] initialization\n");
+
+	dev = &pdev->dev;
+	vcu = devm_kzalloc(dev, sizeof(*vcu), GFP_KERNEL);
+	if (vcu == NULL)
+		return -ENOMEM;
+
+	vcu->vpu.ops = &mtk_vcu_ops;
+
+	ret = of_property_read_u32(dev->of_node, "mediatek,vcuid", &vcuid);
+	if (ret != 0) {
+		dev_err(dev, "[VCU] failed to find mediatek,vcuid\n");
+		return ret;
+	}
+	vcu_mtkdev[vcuid] = vcu;
+
+#ifdef CONFIG_MTK_IOMMU
+	vcu_mtkdev[vcuid]->io_domain = iommu_get_domain_for_dev(dev);
+	if (vcu_mtkdev[vcuid]->io_domain == NULL) {
+		dev_err(dev, "[VCU] vcuid: %d get iommu domain fail !!\n", vcuid);
+		return -EPROBE_DEFER;
+	}
+	dev_dbg(dev, "vcu iommudom: %p,vcuid:%d\n", vcu_mtkdev[vcuid]->io_domain, vcuid);
+#endif
+
+	if (vcuid == 2)
+		vcu_mtkdev[vcuid]->path = CAM_PATH;
+	else if (vcuid == 1)
+		vcu_mtkdev[vcuid]->path = MDP_PATH;
+	else if (vcuid == 0)
+		vcu_mtkdev[vcuid]->path = VCU_PATH;
+	else
+		return -ENXIO;
+
+	ret = of_property_read_string(dev->of_node, "mediatek,vcuname", &vcu_mtkdev[vcuid]->vcuname);
+	if (ret != 0) {
+		dev_err(dev, "[VCU] failed to find mediatek,vcuname\n");
+		return ret;
+	}
+
+	vcu->dev = &pdev->dev;
+	platform_set_drvdata(pdev, &vcu->vpu);
+
+	if (vcuid == 0) {
+		for (i = 0; i < (int)VCU_MAP_HW_REG_NUM; i++) {
+			res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+			if (res == NULL) {
+				dev_err(dev, "Get memory resource failed.\n");
+				ret = -ENXIO;
+				goto err_ipi_init;
+			}
+			vcu->map_base[i].base = res->start;
+			vcu->map_base[i].len = resource_size(res);
+			dev_dbg(dev, "[VCU] base[%d]: 0x%lx 0x%lx", i, vcu->map_base[i].base,
+				vcu->map_base[i].len);
+		}
+	}
+	dev_dbg(dev, "[VCU] vcu ipi init\n");
+	ret = vcu_ipi_init(vcu);
+	if (ret != 0) {
+		dev_err(dev, "[VCU] Failed to init ipi\n");
+		goto err_ipi_init;
+	}
+
+	/* register vcu initialization IPI */
+	ret = vcu_ipi_register(&vcu->vpu, IPI_VPU_INIT, vcu_init_ipi_handler,
+			       "vcu_init", vcu);
+	if (ret != 0) {
+		dev_err(dev, "Failed to register IPI_VPU_INIT\n");
+		goto vcu_mutex_destroy;
+	}
+
+	init_waitqueue_head(&vcu->ack_wq[0]);
+	init_waitqueue_head(&vcu->ack_wq[1]);
+	init_waitqueue_head(&vcu->get_wq[0]);
+	init_waitqueue_head(&vcu->get_wq[1]);
+	init_waitqueue_head(&vcu->vdec_log_get_wq);
+	atomic_set(&vcu->ipi_got[0], 0);
+	atomic_set(&vcu->ipi_got[1], 0);
+	atomic_set(&vcu->ipi_done[0], 0);
+	atomic_set(&vcu->ipi_done[1], 0);
+	atomic_set(&vcu->vdec_log_got, 0);
+	/* init character device */
+
+	ret = alloc_chrdev_region(&vcu_mtkdev[vcuid]->vcu_devno, 0, 1, vcu_mtkdev[vcuid]->vcuname);
+	if (ret < 0) {
+		dev_err(dev, "[VCU]  alloc_chrdev_region failed (ret=%d)\n", ret);
+		goto err_alloc;
+	}
+
+	vcu_mtkdev[vcuid]->vcu_cdev = cdev_alloc();
+	vcu_mtkdev[vcuid]->vcu_cdev->owner = THIS_MODULE;
+	vcu_mtkdev[vcuid]->vcu_cdev->ops = &vcu_fops;
+
+	ret = cdev_add(vcu_mtkdev[vcuid]->vcu_cdev, vcu_mtkdev[vcuid]->vcu_devno, 1);
+	if (ret < 0) {
+		dev_err(dev, "[VCU] class create fail (ret=%d)", ret);
+		goto err_add;
+	}
+
+	vcu_mtkdev[vcuid]->vcu_class = class_create(THIS_MODULE, vcu_mtkdev[vcuid]->vcuname);
+	if (IS_ERR(vcu_mtkdev[vcuid]->vcu_class) == true) {
+		ret = (int)PTR_ERR(vcu_mtkdev[vcuid]->vcu_class);
+		dev_err(dev, "[VCU] class create fail (ret=%d)", ret);
+		goto err_add;
+	}
+
+	vcu_mtkdev[vcuid]->vcu_device = device_create(vcu_mtkdev[vcuid]->vcu_class, NULL,
+				vcu_mtkdev[vcuid]->vcu_devno, NULL, vcu_mtkdev[vcuid]->vcuname);
+	if (IS_ERR(vcu_mtkdev[vcuid]->vcu_device) == true) {
+		ret = (int)PTR_ERR(vcu_mtkdev[vcuid]->vcu_device);
+		dev_err(dev, "[VCU] device_create fail (ret=%d)", ret);
+		goto err_device;
+	}
+
+	dev_dbg(dev, "[VCU] initialization completed\n");
+	return 0;
+
+err_device:
+	class_destroy(vcu_mtkdev[vcuid]->vcu_class);
+err_add:
+	cdev_del(vcu_mtkdev[vcuid]->vcu_cdev);
+err_alloc:
+	unregister_chrdev_region(vcu_mtkdev[vcuid]->vcu_devno, 1);
+vcu_mutex_destroy:
+	mutex_destroy(&vcu->vcu_mutex[0]);
+	mutex_destroy(&vcu->vcu_mutex[1]);
+	mutex_destroy(&vcu->vcu_share);
+err_ipi_init:
+	devm_kfree(dev, vcu);
+
+	return ret;
+}
+
+static const struct of_device_id mtk_vcu_match[] = {
+	{.compatible = "mediatek,mt8167-vcu",},
+	{},
+};
+MODULE_DEVICE_TABLE(of, mtk_vcu_match);
+
+static int mtk_vcu_remove(struct platform_device *pdev)
+{
+	struct mtk_vcu *vcu = platform_get_drvdata(pdev);
+
+	if (vcu->is_open == true) {
+		filp_close(vcu->file, NULL);
+		vcu->is_open = false;
+	}
+	devm_kfree(&pdev->dev, vcu);
+
+	device_destroy(vcu->vcu_class, vcu->vcu_devno);
+	class_destroy(vcu->vcu_class);
+	cdev_del(vcu->vcu_cdev);
+	unregister_chrdev_region(vcu->vcu_devno, 1);
+
+	return 0;
+}
+
+static struct platform_driver mtk_vcu_driver = {
+	.probe	= mtk_vcu_probe,
+	.remove	= mtk_vcu_remove,
+	.driver	= {
+		.name	= "mtk_vcu",
+		.owner	= THIS_MODULE,
+		.of_match_table = mtk_vcu_match,
+	},
+};
+
+module_platform_driver(mtk_vcu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mediatek Video Communication And Controller Unit driver");
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index f8d35e3..aecb52c 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -206,6 +206,7 @@
  *
  */
 struct mtk_vpu {
+	struct mtk_vpu_plat vpu;
 	struct vpu_mem extmem[2];
 	struct vpu_regs reg;
 	struct vpu_run run;
@@ -222,6 +223,7 @@
 	wait_queue_head_t ack_wq;
 	bool ipi_id_ack[IPI_MAX];
 };
+#define to_vpu(vpu_plat) container_of(vpu_plat, struct mtk_vpu, vpu)
 
 static inline void vpu_cfg_writel(struct mtk_vpu *vpu, u32 val, u32 offset)
 {
@@ -269,15 +271,15 @@
 	return ret;
 }
 
-int vpu_ipi_register(struct platform_device *pdev,
-		     enum ipi_id id, ipi_handler_t handler,
-		     const char *name, void *priv)
+static int mtk_vpu_ipi_register(struct mtk_vpu_plat *vpu_plat,
+				enum ipi_id id, ipi_handler_t handler,
+				const char *name, void *priv)
 {
-	struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+	struct mtk_vpu *vpu = to_vpu(vpu_plat);
 	struct vpu_ipi_desc *ipi_desc;
 
 	if (!vpu) {
-		dev_err(&pdev->dev, "vpu device in not ready\n");
+		dev_err(vpu->dev, "vpu device in not ready\n");
 		return -EPROBE_DEFER;
 	}
 
@@ -289,17 +291,16 @@
 		return 0;
 	}
 
-	dev_err(&pdev->dev, "register vpu ipi id %d with invalid arguments\n",
+	dev_err(vpu->dev, "register vpu ipi id %d with invalid arguments\n",
 		id);
 	return -EINVAL;
 }
-EXPORT_SYMBOL_GPL(vpu_ipi_register);
 
-int vpu_ipi_send(struct platform_device *pdev,
-		 enum ipi_id id, void *buf,
-		 unsigned int len)
+static int mtk_vpu_ipi_send(struct mtk_vpu_plat *vpu_plat,
+			    enum ipi_id id, void *buf,
+			    unsigned int len)
 {
-	struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+	struct mtk_vpu *vpu = to_vpu(vpu_plat);
 	struct share_obj *send_obj = vpu->send_buf;
 	unsigned long timeout;
 	int ret = 0;
@@ -363,7 +364,6 @@
 
 	return ret;
 }
-EXPORT_SYMBOL_GPL(vpu_ipi_send);
 
 static void vpu_wdt_reset_func(struct work_struct *ws)
 {
@@ -392,15 +392,15 @@
 	}
 }
 
-int vpu_wdt_reg_handler(struct platform_device *pdev,
-			void wdt_reset(void *),
-			void *priv, enum rst_id id)
+static int mtk_vpu_wdt_reg_handler(struct mtk_vpu_plat *vpu_plat,
+				   void wdt_reset(void *),
+				   void *priv, enum rst_id id)
 {
-	struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+	struct mtk_vpu *vpu = to_vpu(vpu_plat);
 	struct vpu_wdt_handler *handler;
 
 	if (!vpu) {
-		dev_err(&pdev->dev, "vpu device in not ready\n");
+		dev_err(vpu->dev, "vpu device in not ready\n");
 		return -EPROBE_DEFER;
 	}
 
@@ -418,28 +418,25 @@
 	dev_err(vpu->dev, "register vpu wdt handler failed\n");
 	return -EINVAL;
 }
-EXPORT_SYMBOL_GPL(vpu_wdt_reg_handler);
 
-unsigned int vpu_get_vdec_hw_capa(struct platform_device *pdev)
+static unsigned int mtk_vpu_get_vdec_hw_capa(struct mtk_vpu_plat *vpu_plat)
 {
-	struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+	struct mtk_vpu *vpu = to_vpu(vpu_plat);
 
 	return vpu->run.dec_capability;
 }
-EXPORT_SYMBOL_GPL(vpu_get_vdec_hw_capa);
 
-unsigned int vpu_get_venc_hw_capa(struct platform_device *pdev)
+static unsigned int mtk_vpu_get_venc_hw_capa(struct mtk_vpu_plat *vpu_plat)
 {
-	struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+	struct mtk_vpu *vpu = to_vpu(vpu_plat);
 
 	return vpu->run.enc_capability;
 }
-EXPORT_SYMBOL_GPL(vpu_get_venc_hw_capa);
 
-void *vpu_mapping_dm_addr(struct platform_device *pdev,
-			  u32 dtcm_dmem_addr)
+static void *mtk_vpu_mapping_dm_addr(struct mtk_vpu_plat *vpu_plat,
+				     uintptr_t dtcm_dmem_addr)
 {
-	struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+	struct mtk_vpu *vpu = to_vpu(vpu_plat);
 
 	if (!dtcm_dmem_addr ||
 	    (dtcm_dmem_addr > (VPU_DTCM_SIZE + VPU_EXT_D_SIZE))) {
@@ -453,7 +450,6 @@
 
 	return vpu->extmem[D_FW].va + (dtcm_dmem_addr - VPU_DTCM_SIZE);
 }
-EXPORT_SYMBOL_GPL(vpu_mapping_dm_addr);
 
 struct platform_device *vpu_get_plat_device(struct platform_device *pdev)
 {
@@ -534,20 +530,19 @@
 	return 0;
 }
 
-int vpu_load_firmware(struct platform_device *pdev)
+static int mtk_vpu_load_firmware(struct mtk_vpu_plat *vpu_plat)
 {
-	struct mtk_vpu *vpu;
-	struct device *dev = &pdev->dev;
+	struct mtk_vpu *vpu = to_vpu(vpu_plat);
+	struct device *dev = vpu->dev;
 	struct vpu_run *run;
 	const struct firmware *vpu_fw = NULL;
 	int ret;
 
-	if (!pdev) {
+	if (!vpu_plat) {
 		dev_err(dev, "VPU platform device is invalid\n");
 		return -EINVAL;
 	}
 
-	vpu = platform_get_drvdata(pdev);
 	run = &vpu->run;
 
 	mutex_lock(&vpu->vpu_mutex);
@@ -607,7 +602,6 @@
 
 	return ret;
 }
-EXPORT_SYMBOL_GPL(vpu_load_firmware);
 
 static void vpu_init_ipi_handler(void *data, unsigned int len, void *priv)
 {
@@ -771,6 +765,17 @@
 	return IRQ_HANDLED;
 }
 
+
+static struct mtk_vpu_ops mtk_vpu_ops = {
+	.ipi_register = mtk_vpu_ipi_register,
+	.ipi_send = mtk_vpu_ipi_send,
+	.wdt_reg_handler = mtk_vpu_wdt_reg_handler,
+	.get_vdec_hw_capa = mtk_vpu_get_vdec_hw_capa,
+	.get_venc_hw_capa = mtk_vpu_get_venc_hw_capa,
+	.load_firmware = mtk_vpu_load_firmware,
+	.mapping_dm_addr = mtk_vpu_mapping_dm_addr,
+};
+
 #ifdef CONFIG_DEBUG_FS
 static struct dentry *vpu_debugfs;
 #endif
@@ -789,6 +794,7 @@
 		return -ENOMEM;
 
 	vpu->dev = &pdev->dev;
+	vpu->vpu.ops = &mtk_vpu_ops;
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcm");
 	vpu->reg.tcm = devm_ioremap_resource(dev, res);
 	if (IS_ERR((__force void *)vpu->reg.tcm))
@@ -806,7 +812,7 @@
 		return PTR_ERR(vpu->clk);
 	}
 
-	platform_set_drvdata(pdev, vpu);
+	platform_set_drvdata(pdev, &vpu->vpu);
 
 	ret = clk_prepare(vpu->clk);
 	if (ret) {
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.h b/drivers/media/platform/mtk-vpu/mtk_vpu.h
index aec0268..29c60c4 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.h
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.h
@@ -84,6 +84,28 @@
 	VPU_RST_MAX,
 };
 
+struct mtk_vpu_plat;
+
+struct mtk_vpu_ops {
+	int (*ipi_register)(struct mtk_vpu_plat *vpu, enum ipi_id id,
+		     ipi_handler_t handler, const char *name, void *priv);
+	int (*ipi_send)(struct mtk_vpu_plat *vpu,
+		 enum ipi_id id, void *buf,
+		 unsigned int len);
+	int (*wdt_reg_handler)(struct mtk_vpu_plat *vpu,
+			void vpu_wdt_reset_func(void *),
+			void *priv, enum rst_id id);
+	unsigned int (*get_vdec_hw_capa)(struct mtk_vpu_plat *vpu);
+	unsigned int (*get_venc_hw_capa)(struct mtk_vpu_plat *vpu);
+	int (*load_firmware)(struct mtk_vpu_plat *vpu);
+	void *(*mapping_dm_addr)(struct mtk_vpu_plat *vpu,
+			  uintptr_t dtcm_dmem_addr);
+};
+
+struct mtk_vpu_plat {
+	struct mtk_vpu_ops *ops;
+};
+
 /**
  * vpu_ipi_register - register an ipi function
  *
@@ -97,8 +119,16 @@
  *
  * Return: Return 0 if ipi registers successfully, otherwise it is failed.
  */
-int vpu_ipi_register(struct platform_device *pdev, enum ipi_id id,
-		     ipi_handler_t handler, const char *name, void *priv);
+static inline int vpu_ipi_register(struct platform_device *pdev, enum ipi_id id,
+		     ipi_handler_t handler, const char *name, void *priv)
+{
+	struct mtk_vpu_plat *vpu = platform_get_drvdata(pdev);
+
+	if (vpu->ops->ipi_register)
+		return vpu->ops->ipi_register(vpu, id, handler, name, priv);
+
+	return -ENOTSUPP;
+}
 
 /**
  * vpu_ipi_send - send data from AP to vpu.
@@ -115,9 +145,17 @@
  *
  * Return: Return 0 if sending data successfully, otherwise it is failed.
  **/
-int vpu_ipi_send(struct platform_device *pdev,
+static inline int vpu_ipi_send(struct platform_device *pdev,
 		 enum ipi_id id, void *buf,
-		 unsigned int len);
+		 unsigned int len)
+{
+	struct mtk_vpu_plat *vpu = platform_get_drvdata(pdev);
+
+	if (vpu->ops->ipi_send)
+		return vpu->ops->ipi_send(vpu, id, buf, len);
+
+	return -ENOTSUPP;
+}
 
 /**
  * vpu_get_plat_device - get VPU's platform device
@@ -144,9 +182,17 @@
  * otherwise it is failed.
  *
  **/
-int vpu_wdt_reg_handler(struct platform_device *pdev,
+static inline int vpu_wdt_reg_handler(struct platform_device *pdev,
 			void vpu_wdt_reset_func(void *),
-			void *priv, enum rst_id id);
+			void *priv, enum rst_id id)
+{
+	struct mtk_vpu_plat *vpu = platform_get_drvdata(pdev);
+
+	if (vpu->ops->wdt_reg_handler)
+		return vpu->ops->wdt_reg_handler(vpu, vpu_wdt_reset_func, priv, id);
+
+	return -ENOTSUPP;
+}
 
 /**
  * vpu_get_vdec_hw_capa - get video decoder hardware capability
@@ -155,7 +201,15 @@
  *
  * Return: video decoder hardware capability
  **/
-unsigned int vpu_get_vdec_hw_capa(struct platform_device *pdev);
+static inline unsigned int vpu_get_vdec_hw_capa(struct platform_device *pdev)
+{
+	struct mtk_vpu_plat *vpu = platform_get_drvdata(pdev);
+
+	if (vpu->ops->get_vdec_hw_capa)
+		return vpu->ops->get_vdec_hw_capa(vpu);
+
+	return 0;
+}
 
 /**
  * vpu_get_venc_hw_capa - get video encoder hardware capability
@@ -164,7 +218,15 @@
  *
  * Return: video encoder hardware capability
  **/
-unsigned int vpu_get_venc_hw_capa(struct platform_device *pdev);
+static inline unsigned int vpu_get_venc_hw_capa(struct platform_device *pdev)
+{
+	struct mtk_vpu_plat *vpu = platform_get_drvdata(pdev);
+
+	if (vpu->ops->get_venc_hw_capa)
+		return vpu->ops->get_venc_hw_capa(vpu);
+
+	return 0;
+}
 
 /**
  * vpu_load_firmware - download VPU firmware and boot it
@@ -174,7 +236,15 @@
  * Return: Return 0 if downloading firmware successfully,
  * otherwise it is failed
  **/
-int vpu_load_firmware(struct platform_device *pdev);
+static inline int vpu_load_firmware(struct platform_device *pdev)
+{
+	struct mtk_vpu_plat *vpu = platform_get_drvdata(pdev);
+
+	if (vpu->ops->load_firmware)
+		return vpu->ops->load_firmware(vpu);
+
+	return -ENOTSUPP;
+}
 
 /**
  * vpu_mapping_dm_addr - Mapping DTCM/DMEM to kernel virtual address
@@ -189,6 +259,14 @@
  * Return: Return ERR_PTR(-EINVAL) if mapping failed,
  * otherwise the mapped kernel virtual address
  **/
-void *vpu_mapping_dm_addr(struct platform_device *pdev,
-			  u32 dtcm_dmem_addr);
+static inline void *vpu_mapping_dm_addr(struct platform_device *pdev,
+					uintptr_t dtcm_dmem_addr)
+{
+	struct mtk_vpu_plat *vpu = platform_get_drvdata(pdev);
+
+	if (vpu->ops->mapping_dm_addr)
+		return vpu->ops->mapping_dm_addr(vpu, dtcm_dmem_addr);
+
+	return ERR_PTR(-ENOTSUPP);
+}
 #endif /* _MTK_VPU_H */
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 8f2d152..b15afd8 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -26,6 +26,7 @@
 
 /* mt8173 */
 #define SMI_LARB_MMU_EN		0xf00
+#define MT8167_SMI_LARB_MMU_EN	0xfc0
 
 /* mt2701 */
 #define REG_SMI_SECUR_CON_BASE		0x5c0
@@ -49,17 +50,40 @@
 #define SMI_LARB_NONSEC_CON(id)	(0x380 + ((id) * 4))
 #define F_MMU_EN		BIT(0)
 
+/* SMI COMMON */
+#define SMI_BUS_SEL			0x220
+#define SMI_BUS_LARB_SHIFT(larbid)	((larbid) << 1)
+/* All are MMU0 defaultly. Only specialize mmu1 here. */
+#define F_MMU1_LARB(larbid)		(0x1 << SMI_BUS_LARB_SHIFT(larbid))
+
+enum mtk_smi_gen {
+	MTK_SMI_GEN1,
+	MTK_SMI_GEN2
+};
+
+struct mtk_smi_common_plat {
+	enum mtk_smi_gen gen;
+	bool             has_gals;
+	u32              bus_sel; /* Balance some larbs to enter mmu0 or mmu1 */
+};
+
 struct mtk_smi_larb_gen {
-	bool need_larbid;
 	int port_in_larb[MTK_LARB_NR_MAX + 1];
 	void (*config_port)(struct device *);
+	unsigned int larb_direct_to_common_mask;
+	bool             has_gals;
 };
 
 struct mtk_smi {
 	struct device			*dev;
 	struct clk			*clk_apb, *clk_smi;
+	struct clk			*clk_gals0, *clk_gals1;
 	struct clk			*clk_async; /*only needed by mt2701*/
-	void __iomem			*smi_ao_base;
+	union {
+		void __iomem		*smi_ao_base; /* only for gen1 */
+		void __iomem		*base;	      /* only for gen2 */
+	};
+	const struct mtk_smi_common_plat *plat;
 };
 
 struct mtk_smi_larb { /* larb: local arbiter */
@@ -71,82 +95,56 @@
 	u32				*mmu;
 };
 
-enum mtk_smi_gen {
-	MTK_SMI_GEN1,
-	MTK_SMI_GEN2
-};
-
-static int mtk_smi_enable(const struct mtk_smi *smi)
+static int mtk_smi_clk_enable(const struct mtk_smi *smi)
 {
 	int ret;
 
-	ret = pm_runtime_get_sync(smi->dev);
-	if (ret < 0)
-		return ret;
-
 	ret = clk_prepare_enable(smi->clk_apb);
 	if (ret)
-		goto err_put_pm;
+		return ret;
 
 	ret = clk_prepare_enable(smi->clk_smi);
 	if (ret)
 		goto err_disable_apb;
 
+	ret = clk_prepare_enable(smi->clk_gals0);
+	if (ret)
+		goto err_disable_smi;
+
+	ret = clk_prepare_enable(smi->clk_gals1);
+	if (ret)
+		goto err_disable_gals0;
+
 	return 0;
 
+err_disable_gals0:
+	clk_disable_unprepare(smi->clk_gals0);
+err_disable_smi:
+	clk_disable_unprepare(smi->clk_smi);
 err_disable_apb:
 	clk_disable_unprepare(smi->clk_apb);
-err_put_pm:
-	pm_runtime_put_sync(smi->dev);
 	return ret;
 }
 
-static void mtk_smi_disable(const struct mtk_smi *smi)
+static void mtk_smi_clk_disable(const struct mtk_smi *smi)
 {
+	clk_disable_unprepare(smi->clk_gals1);
+	clk_disable_unprepare(smi->clk_gals0);
 	clk_disable_unprepare(smi->clk_smi);
 	clk_disable_unprepare(smi->clk_apb);
-	pm_runtime_put_sync(smi->dev);
 }
 
 int mtk_smi_larb_get(struct device *larbdev)
 {
-	struct mtk_smi_larb *larb = dev_get_drvdata(larbdev);
-	const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
-	struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev);
-	int ret;
+	int ret = pm_runtime_get_sync(larbdev);
 
-	/* Enable the smi-common's power and clocks */
-	ret = mtk_smi_enable(common);
-	if (ret)
-		return ret;
-
-	/* Enable the larb's power and clocks */
-	ret = mtk_smi_enable(&larb->smi);
-	if (ret) {
-		mtk_smi_disable(common);
-		return ret;
-	}
-
-	/* Configure the iommu info for this larb */
-	larb_gen->config_port(larbdev);
-
-	return 0;
+	return (ret < 0) ? ret : 0;
 }
 EXPORT_SYMBOL_GPL(mtk_smi_larb_get);
 
 void mtk_smi_larb_put(struct device *larbdev)
 {
-	struct mtk_smi_larb *larb = dev_get_drvdata(larbdev);
-	struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev);
-
-	/*
-	 * Don't de-configure the iommu info for this larb since there may be
-	 * several modules in this larb.
-	 * The iommu info will be reset after power off.
-	 */
-
-	mtk_smi_disable(&larb->smi);
-	mtk_smi_disable(common);
+	pm_runtime_put_sync(larbdev);
 }
 EXPORT_SYMBOL_GPL(mtk_smi_larb_put);
 
@@ -154,39 +152,26 @@
 mtk_smi_larb_bind(struct device *dev, struct device *master, void *data)
 {
 	struct mtk_smi_larb *larb = dev_get_drvdata(dev);
-	struct mtk_smi_iommu *smi_iommu = data;
+	struct mtk_smi_larb_iommu *larb_mmu = data;
 	unsigned int         i;
 
-	if (larb->larb_gen->need_larbid) {
-		larb->mmu = &smi_iommu->larb_imu[larb->larbid].mmu;
-		return 0;
-	}
-
-	/*
-	 * If there is no larbid property, Loop to find the corresponding
-	 * iommu information.
-	 */
-	for (i = 0; i < smi_iommu->larb_nr; i++) {
-		if (dev == smi_iommu->larb_imu[i].dev) {
-			/* The 'mmu' may be updated in iommu-attach/detach. */
-			larb->mmu = &smi_iommu->larb_imu[i].mmu;
+	for (i = 0; i < MTK_LARB_NR_MAX; i++) {
+		if (dev == larb_mmu[i].dev) {
+			larb->larbid = i;
+			larb->mmu = &larb_mmu[i].mmu;
 			return 0;
 		}
 	}
 	return -ENODEV;
 }
 
-static void mtk_smi_larb_config_port_mt2712(struct device *dev)
+static void mtk_smi_larb_config_port_gen2_general(struct device *dev)
 {
 	struct mtk_smi_larb *larb = dev_get_drvdata(dev);
 	u32 reg;
 	int i;
 
-	/*
-	 * larb 8/9 is the bdpsys larb, the iommu_en is enabled defaultly.
-	 * Don't need to set it again.
-	 */
-	if (larb->larbid == 8 || larb->larbid == 9)
+	if (BIT(larb->larbid) & larb->larb_gen->larb_direct_to_common_mask)
 		return;
 
 	for_each_set_bit(i, (unsigned long *)larb->mmu, 32) {
@@ -203,6 +188,13 @@
 	writel(*larb->mmu, larb->base + SMI_LARB_MMU_EN);
 }
 
+static void mtk_smi_larb_config_port_mt8167(struct device *dev)
+{
+	struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+
+	writel(*larb->mmu, larb->base + MT8167_SMI_LARB_MMU_EN);
+}
+
 static void mtk_smi_larb_config_port_gen1(struct device *dev)
 {
 	struct mtk_smi_larb *larb = dev_get_drvdata(dev);
@@ -250,8 +242,12 @@
 	.config_port = mtk_smi_larb_config_port_mt8173,
 };
 
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8167 = {
+	/* mt8167 do not need the port in larb */
+	.config_port = mtk_smi_larb_config_port_mt8167,
+};
+
 static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = {
-	.need_larbid = true,
 	.port_in_larb = {
 		LARB0_PORT_OFFSET, LARB1_PORT_OFFSET,
 		LARB2_PORT_OFFSET, LARB3_PORT_OFFSET
@@ -260,12 +256,23 @@
 };
 
 static const struct mtk_smi_larb_gen mtk_smi_larb_mt2712 = {
-	.need_larbid = true,
-	.config_port = mtk_smi_larb_config_port_mt2712,
+	.config_port                = mtk_smi_larb_config_port_gen2_general,
+	.larb_direct_to_common_mask = BIT(8) | BIT(9),      /* bdpsys */
+};
+
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = {
+	.has_gals                   = true,
+	.config_port                = mtk_smi_larb_config_port_gen2_general,
+	.larb_direct_to_common_mask = BIT(2) | BIT(3) | BIT(7),
+				      /* IPU0 | IPU1 | CCU */
 };
 
 static const struct of_device_id mtk_smi_larb_of_ids[] = {
 	{
+		.compatible = "mediatek,mt8167-smi-larb",
+		.data = &mtk_smi_larb_mt8167
+	},
+	{
 		.compatible = "mediatek,mt8173-smi-larb",
 		.data = &mtk_smi_larb_mt8173
 	},
@@ -277,6 +284,10 @@
 		.compatible = "mediatek,mt2712-smi-larb",
 		.data = &mtk_smi_larb_mt2712
 	},
+	{
+		.compatible = "mediatek,mt8183-smi-larb",
+		.data = &mtk_smi_larb_mt8183
+	},
 	{}
 };
 
@@ -287,7 +298,6 @@
 	struct device *dev = &pdev->dev;
 	struct device_node *smi_node;
 	struct platform_device *smi_pdev;
-	int err;
 
 	larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL);
 	if (!larb)
@@ -306,16 +316,16 @@
 	larb->smi.clk_smi = devm_clk_get(dev, "smi");
 	if (IS_ERR(larb->smi.clk_smi))
 		return PTR_ERR(larb->smi.clk_smi);
-	larb->smi.dev = dev;
 
-	if (larb->larb_gen->need_larbid) {
-		err = of_property_read_u32(dev->of_node, "mediatek,larb-id",
-					   &larb->larbid);
-		if (err) {
-			dev_err(dev, "missing larbid property\n");
-			return err;
-		}
+	if (larb->larb_gen->has_gals) {
+		/* The larbs may still haven't gals even if the SoC support.*/
+		larb->smi.clk_gals0 = devm_clk_get(dev, "gals");
+		if (PTR_ERR(larb->smi.clk_gals0) == -ENOENT)
+			larb->smi.clk_gals0 = NULL;
+		else if (IS_ERR(larb->smi.clk_gals0))
+			return PTR_ERR(larb->smi.clk_gals0);
 	}
+	larb->smi.dev = dev;
 
 	smi_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0);
 	if (!smi_node)
@@ -344,27 +354,90 @@
 	return 0;
 }
 
+static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
+{
+	struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+	const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
+	int ret;
+
+	/* Power on smi-common. */
+	ret = pm_runtime_get_sync(larb->smi_common_dev);
+	if (ret < 0) {
+		dev_err(dev, "Failed to pm get for smi-common(%d).\n", ret);
+		return ret;
+	}
+
+	ret = mtk_smi_clk_enable(&larb->smi);
+	if (ret < 0) {
+		dev_err(dev, "Failed to enable clock(%d).\n", ret);
+		pm_runtime_put_sync(larb->smi_common_dev);
+		return ret;
+	}
+
+	/* Configure the basic setting for this larb */
+	larb_gen->config_port(dev);
+
+	return 0;
+}
+
+static int __maybe_unused mtk_smi_larb_suspend(struct device *dev)
+{
+	struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+
+	mtk_smi_clk_disable(&larb->smi);
+	pm_runtime_put_sync(larb->smi_common_dev);
+	return 0;
+}
+
+static const struct dev_pm_ops smi_larb_pm_ops = {
+	SET_RUNTIME_PM_OPS(mtk_smi_larb_suspend, mtk_smi_larb_resume, NULL)
+};
+
 static struct platform_driver mtk_smi_larb_driver = {
 	.probe	= mtk_smi_larb_probe,
 	.remove	= mtk_smi_larb_remove,
 	.driver	= {
 		.name = "mtk-smi-larb",
 		.of_match_table = mtk_smi_larb_of_ids,
+		.pm             = &smi_larb_pm_ops,
 	}
 };
 
+static const struct mtk_smi_common_plat mtk_smi_common_gen1 = {
+	.gen = MTK_SMI_GEN1,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_gen2 = {
+	.gen = MTK_SMI_GEN2,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
+	.gen      = MTK_SMI_GEN2,
+	.has_gals = true,
+	.bus_sel  = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(5) |
+		    F_MMU1_LARB(7),
+};
+
 static const struct of_device_id mtk_smi_common_of_ids[] = {
 	{
 		.compatible = "mediatek,mt8173-smi-common",
-		.data = (void *)MTK_SMI_GEN2
+		.data = &mtk_smi_common_gen2,
+	},
+	{
+		.compatible = "mediatek,mt8167-smi-common",
+		.data = &mtk_smi_common_gen2,
 	},
 	{
 		.compatible = "mediatek,mt2701-smi-common",
-		.data = (void *)MTK_SMI_GEN1
+		.data = &mtk_smi_common_gen1,
 	},
 	{
 		.compatible = "mediatek,mt2712-smi-common",
-		.data = (void *)MTK_SMI_GEN2
+		.data = &mtk_smi_common_gen2,
+	},
+	{
+		.compatible = "mediatek,mt8183-smi-common",
+		.data = &mtk_smi_common_mt8183,
 	},
 	{}
 };
@@ -374,13 +447,13 @@
 	struct device *dev = &pdev->dev;
 	struct mtk_smi *common;
 	struct resource *res;
-	enum mtk_smi_gen smi_gen;
 	int ret;
 
 	common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
 	if (!common)
 		return -ENOMEM;
 	common->dev = dev;
+	common->plat = of_device_get_match_data(dev);
 
 	common->clk_apb = devm_clk_get(dev, "apb");
 	if (IS_ERR(common->clk_apb))
@@ -390,14 +463,23 @@
 	if (IS_ERR(common->clk_smi))
 		return PTR_ERR(common->clk_smi);
 
+	if (common->plat->has_gals) {
+		common->clk_gals0 = devm_clk_get(dev, "gals0");
+		if (IS_ERR(common->clk_gals0))
+			return PTR_ERR(common->clk_gals0);
+
+		common->clk_gals1 = devm_clk_get(dev, "gals1");
+		if (IS_ERR(common->clk_gals1))
+			return PTR_ERR(common->clk_gals1);
+	}
+
 	/*
 	 * for mtk smi gen 1, we need to get the ao(always on) base to config
 	 * m4u port, and we need to enable the aync clock for transform the smi
 	 * clock into emi clock domain, but for mtk smi gen2, there's no smi ao
 	 * base.
 	 */
-	smi_gen = (enum mtk_smi_gen)of_device_get_match_data(dev);
-	if (smi_gen == MTK_SMI_GEN1) {
+	if (common->plat->gen == MTK_SMI_GEN1) {
 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 		common->smi_ao_base = devm_ioremap_resource(dev, res);
 		if (IS_ERR(common->smi_ao_base))
@@ -410,6 +492,11 @@
 		ret = clk_prepare_enable(common->clk_async);
 		if (ret)
 			return ret;
+	} else {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		common->base = devm_ioremap_resource(dev, res);
+		if (IS_ERR(common->base))
+			return PTR_ERR(common->base);
 	}
 	pm_runtime_enable(dev);
 	platform_set_drvdata(pdev, common);
@@ -422,12 +509,42 @@
 	return 0;
 }
 
+static int __maybe_unused mtk_smi_common_resume(struct device *dev)
+{
+	struct mtk_smi *common = dev_get_drvdata(dev);
+	u32 bus_sel = common->plat->bus_sel;
+	int ret;
+
+	ret = mtk_smi_clk_enable(common);
+	if (ret) {
+		dev_err(common->dev, "Failed to enable clock(%d).\n", ret);
+		return ret;
+	}
+
+	if (common->plat->gen == MTK_SMI_GEN2 && bus_sel)
+		writel(bus_sel, common->base + SMI_BUS_SEL);
+	return 0;
+}
+
+static int __maybe_unused mtk_smi_common_suspend(struct device *dev)
+{
+	struct mtk_smi *common = dev_get_drvdata(dev);
+
+	mtk_smi_clk_disable(common);
+	return 0;
+}
+
+static const struct dev_pm_ops smi_common_pm_ops = {
+	SET_RUNTIME_PM_OPS(mtk_smi_common_suspend, mtk_smi_common_resume, NULL)
+};
+
 static struct platform_driver mtk_smi_common_driver = {
 	.probe	= mtk_smi_common_probe,
 	.remove = mtk_smi_common_remove,
 	.driver	= {
 		.name = "mtk-smi-common",
 		.of_match_table = mtk_smi_common_of_ids,
+		.pm             = &smi_common_pm_ops,
 	}
 };
 
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 5856a94..3bef131 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -229,7 +229,9 @@
 obj-$(CONFIG_INTEL_SOC_PMIC_BXTWC)	+= intel_soc_pmic_bxtwc.o
 obj-$(CONFIG_INTEL_SOC_PMIC_CHTWC)	+= intel_soc_pmic_chtwc.o
 obj-$(CONFIG_INTEL_SOC_PMIC_CHTDC_TI)	+= intel_soc_pmic_chtdc_ti.o
-obj-$(CONFIG_MFD_MT6397)	+= mt6397-core.o
+
+mt6397-objs			:= mt6397-core.o mt6397-irq.o mt6358-irq.o
+obj-$(CONFIG_MFD_MT6397)	+= mt6397.o
 
 obj-$(CONFIG_MFD_ALTERA_A10SR)	+= altera-a10sr.o
 obj-$(CONFIG_MFD_SUN4I_GPADC)	+= sun4i-gpadc.o
diff --git a/drivers/mfd/mt6358-irq.c b/drivers/mfd/mt6358-irq.c
new file mode 100644
index 0000000..a6e8252
--- /dev/null
+++ b/drivers/mfd/mt6358-irq.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 MediaTek Inc.
+
+#include <linux/interrupt.h>
+#include <linux/mfd/mt6358/core.h>
+#include <linux/mfd/mt6358/registers.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+static struct irq_top_t mt6358_ints[] = {
+	MT6358_TOP_GEN(BUCK),
+	MT6358_TOP_GEN(LDO),
+	MT6358_TOP_GEN(PSC),
+	MT6358_TOP_GEN(SCK),
+	MT6358_TOP_GEN(BM),
+	MT6358_TOP_GEN(HK),
+	MT6358_TOP_GEN(AUD),
+	MT6358_TOP_GEN(MISC),
+};
+
+static void pmic_irq_enable(struct irq_data *data)
+{
+	unsigned int hwirq = irqd_to_hwirq(data);
+	struct mt6397_chip *chip = irq_data_get_irq_chip_data(data);
+	struct pmic_irq_data *irqd = chip->irq_data;
+
+	irqd->enable_hwirq[hwirq] = true;
+}
+
+static void pmic_irq_disable(struct irq_data *data)
+{
+	unsigned int hwirq = irqd_to_hwirq(data);
+	struct mt6397_chip *chip = irq_data_get_irq_chip_data(data);
+	struct pmic_irq_data *irqd = chip->irq_data;
+
+	irqd->enable_hwirq[hwirq] = false;
+}
+
+static void pmic_irq_lock(struct irq_data *data)
+{
+	struct mt6397_chip *chip = irq_data_get_irq_chip_data(data);
+
+	mutex_lock(&chip->irqlock);
+}
+
+static void pmic_irq_sync_unlock(struct irq_data *data)
+{
+	unsigned int i, top_gp, en_reg, int_regs, shift;
+	struct mt6397_chip *chip = irq_data_get_irq_chip_data(data);
+	struct pmic_irq_data *irqd = chip->irq_data;
+
+	for (i = 0; i < irqd->num_pmic_irqs; i++) {
+		if (irqd->enable_hwirq[i] == irqd->cache_hwirq[i])
+			continue;
+
+		top_gp = 0;
+		while ((top_gp + 1) < ARRAY_SIZE(mt6358_ints) && i >=
+			mt6358_ints[top_gp + 1].hwirq_base)
+			top_gp++;
+
+		if (top_gp >= ARRAY_SIZE(mt6358_ints)) {
+			mutex_unlock(&chip->irqlock);
+			dev_err(chip->dev,
+				"Failed to get top_group: %d\n", top_gp);
+			return;
+		}
+
+		int_regs = (i - mt6358_ints[top_gp].hwirq_base) /
+			    MT6358_REG_WIDTH;
+		en_reg = mt6358_ints[top_gp].en_reg +
+			mt6358_ints[top_gp].en_reg_shift * int_regs;
+		shift = (i - mt6358_ints[top_gp].hwirq_base) % MT6358_REG_WIDTH;
+		regmap_update_bits(chip->regmap, en_reg, BIT(shift),
+				   irqd->enable_hwirq[i] << shift);
+		irqd->cache_hwirq[i] = irqd->enable_hwirq[i];
+	}
+	mutex_unlock(&chip->irqlock);
+}
+
+static struct irq_chip mt6358_irq_chip = {
+	.name = "mt6358-irq",
+	.flags = IRQCHIP_SKIP_SET_WAKE,
+	.irq_enable = pmic_irq_enable,
+	.irq_disable = pmic_irq_disable,
+	.irq_bus_lock = pmic_irq_lock,
+	.irq_bus_sync_unlock = pmic_irq_sync_unlock,
+};
+
+static void mt6358_irq_sp_handler(struct mt6397_chip *chip,
+				  unsigned int top_gp)
+{
+	unsigned int sta_reg, irq_status;
+	unsigned int hwirq, virq;
+	int ret, i, j;
+
+	for (i = 0; i < mt6358_ints[top_gp].num_int_regs; i++) {
+		sta_reg = mt6358_ints[top_gp].sta_reg +
+			mt6358_ints[top_gp].sta_reg_shift * i;
+		ret = regmap_read(chip->regmap, sta_reg, &irq_status);
+		if (ret) {
+			dev_err(chip->dev,
+				"Failed to read irq status: %d\n", ret);
+			return;
+		}
+
+		if (!irq_status)
+			continue;
+
+		for (j = 0; j < MT6358_REG_WIDTH ; j++) {
+			if ((irq_status & BIT(j)) == 0)
+				continue;
+			hwirq = mt6358_ints[top_gp].hwirq_base +
+				MT6358_REG_WIDTH * i + j;
+			virq = irq_find_mapping(chip->irq_domain, hwirq);
+			if (virq)
+				handle_nested_irq(virq);
+		}
+
+		regmap_write(chip->regmap, sta_reg, irq_status);
+	}
+}
+
+static irqreturn_t mt6358_irq_handler(int irq, void *data)
+{
+	struct mt6397_chip *chip = data;
+	struct pmic_irq_data *mt6358_irq_data = chip->irq_data;
+	unsigned int top_irq_status;
+	unsigned int i;
+	int ret;
+
+	ret = regmap_read(chip->regmap,
+			  mt6358_irq_data->top_int_status_reg,
+			  &top_irq_status);
+	if (ret) {
+		dev_err(chip->dev, "Can't read TOP_INT_STATUS ret=%d\n", ret);
+		return IRQ_NONE;
+	}
+
+	for (i = 0; i < mt6358_irq_data->num_top; i++) {
+		if (top_irq_status & BIT(mt6358_ints[i].top_offset))
+			mt6358_irq_sp_handler(chip, i);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int pmic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+			       irq_hw_number_t hw)
+{
+	struct mt6397_chip *mt6397 = d->host_data;
+
+	irq_set_chip_data(irq, mt6397);
+	irq_set_chip_and_handler(irq, &mt6358_irq_chip, handle_level_irq);
+	irq_set_nested_thread(irq, 1);
+	irq_set_noprobe(irq);
+
+	return 0;
+}
+
+static const struct irq_domain_ops mt6358_irq_domain_ops = {
+	.map = pmic_irq_domain_map,
+	.xlate = irq_domain_xlate_twocell,
+};
+
+int mt6358_irq_init(struct mt6397_chip *chip)
+{
+	int i, j, ret;
+	struct pmic_irq_data *irqd;
+
+	irqd = devm_kzalloc(chip->dev, sizeof(struct pmic_irq_data *),
+			    GFP_KERNEL);
+	if (!irqd)
+		return -ENOMEM;
+
+	chip->irq_data = irqd;
+
+	mutex_init(&chip->irqlock);
+	irqd->top_int_status_reg = MT6358_TOP_INT_STATUS0;
+	irqd->num_pmic_irqs = MT6358_IRQ_NR;
+	irqd->num_top = ARRAY_SIZE(mt6358_ints);
+
+	irqd->enable_hwirq = devm_kcalloc(chip->dev,
+					  irqd->num_pmic_irqs,
+					  sizeof(bool),
+					  GFP_KERNEL);
+	if (!irqd->enable_hwirq)
+		return -ENOMEM;
+
+	irqd->cache_hwirq = devm_kcalloc(chip->dev,
+					 irqd->num_pmic_irqs,
+					 sizeof(bool),
+					 GFP_KERNEL);
+	if (!irqd->cache_hwirq)
+		return -ENOMEM;
+
+	/* Disable all interrupt for initializing */
+	for (i = 0; i < irqd->num_top; i++) {
+		for (j = 0; j < mt6358_ints[i].num_int_regs; j++)
+			regmap_write(chip->regmap,
+				     mt6358_ints[i].en_reg +
+				     mt6358_ints[i].en_reg_shift * j, 0);
+	}
+
+	chip->irq_domain = irq_domain_add_linear(chip->dev->of_node,
+						 irqd->num_pmic_irqs,
+						 &mt6358_irq_domain_ops, chip);
+	if (!chip->irq_domain) {
+		dev_err(chip->dev, "could not create irq domain\n");
+		return -ENODEV;
+	}
+
+	ret = devm_request_threaded_irq(chip->dev, chip->irq, NULL,
+					mt6358_irq_handler, IRQF_ONESHOT,
+					mt6358_irq_chip.name, chip);
+	if (ret) {
+		dev_err(chip->dev, "failed to register irq=%d; err: %d\n",
+			chip->irq, ret);
+		return ret;
+	}
+
+	enable_irq_wake(chip->irq);
+	return ret;
+}
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index ab24e17..ddffa55 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -12,23 +12,51 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
 #include <linux/regmap.h>
 #include <linux/mfd/core.h>
-#include <linux/mfd/mt6397/core.h>
 #include <linux/mfd/mt6323/core.h>
-#include <linux/mfd/mt6397/registers.h>
+#include <linux/mfd/mt6358/core.h>
+#include <linux/mfd/mt6392/core.h>
+#include <linux/mfd/mt6397/core.h>
 #include <linux/mfd/mt6323/registers.h>
+#include <linux/mfd/mt6358/registers.h>
+#include <linux/mfd/mt6392/registers.h>
+#include <linux/mfd/mt6397/registers.h>
 
+#define MT6358_RTC_BASE		0x0588
+#define MT6358_RTC_SIZE		0x3c
+#define MT6358_RTC_WRTGR_OFFSET	0x3a
+#define MT6392_RTC_BASE		0x8000
+#define MT6392_RTC_SIZE		0x3e
 #define MT6397_RTC_BASE		0xe000
 #define MT6397_RTC_SIZE		0x3e
+#define MT6397_RTC_WRTGR_OFFSET	0x3c
 
-#define MT6323_CID_CODE		0x23
-#define MT6391_CID_CODE		0x91
-#define MT6397_CID_CODE		0x97
+static const struct resource mt6358_rtc_resources[] = {
+	{
+		.start = MT6358_RTC_BASE,
+		.end   = MT6358_RTC_BASE + MT6358_RTC_SIZE,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = MT6358_IRQ_RTC,
+		.end   = MT6358_IRQ_RTC,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = MT6358_RTC_WRTGR_OFFSET,
+		.end   = MT6358_RTC_WRTGR_OFFSET,
+		.flags = IORESOURCE_REG,
+	},
+};
+
+static const struct resource mt6392_rtc_resources[] = {
+	DEFINE_RES_MEM(MT6392_RTC_BASE, MT6392_RTC_SIZE),
+	DEFINE_RES_IRQ(MT6392_IRQ_RTC),
+};
 
 static const struct resource mt6397_rtc_resources[] = {
 	{
@@ -41,6 +69,11 @@
 		.end   = MT6397_IRQ_RTC,
 		.flags = IORESOURCE_IRQ,
 	},
+	{
+		.start = MT6397_RTC_WRTGR_OFFSET,
+		.end   = MT6397_RTC_WRTGR_OFFSET,
+		.flags = IORESOURCE_REG,
+	},
 };
 
 static const struct resource mt6323_keys_resources[] = {
@@ -48,11 +81,33 @@
 	DEFINE_RES_IRQ(MT6323_IRQ_STATUS_FCHRKEY),
 };
 
+static const struct resource mt6392_keys_resources[] = {
+	DEFINE_RES_IRQ(MT6392_IRQ_PWRKEY),
+	DEFINE_RES_IRQ(MT6392_IRQ_FCHRKEY),
+};
+
 static const struct resource mt6397_keys_resources[] = {
 	DEFINE_RES_IRQ(MT6397_IRQ_PWRKEY),
 	DEFINE_RES_IRQ(MT6397_IRQ_HOMEKEY),
 };
 
+static const struct mfd_cell mt6392_devs[] = {
+	{
+		.name = "mt6392-regulator",
+		.of_compatible = "mediatek,mt6392-regulator",
+	}, {
+		.name = "mt6397-rtc",
+		.num_resources = ARRAY_SIZE(mt6392_rtc_resources),
+		.resources = mt6392_rtc_resources,
+		.of_compatible = "mediatek,mt6392-rtc",
+	}, {
+		.name = "mtk-pmic-keys",
+		.num_resources = ARRAY_SIZE(mt6392_keys_resources),
+		.resources = mt6392_keys_resources,
+		.of_compatible = "mediatek,mt6392-keys"
+	},
+};
+
 static const struct mfd_cell mt6323_devs[] = {
 	{
 		.name = "mt6323-regulator",
@@ -68,6 +123,21 @@
 	},
 };
 
+static const struct mfd_cell mt6358_devs[] = {
+	{
+		.name = "mt6358-regulator",
+		.of_compatible = "mediatek,mt6358-regulator"
+	}, {
+		.name = "mt6397-rtc",
+		.num_resources = ARRAY_SIZE(mt6358_rtc_resources),
+		.resources = mt6358_rtc_resources,
+		.of_compatible = "mediatek,mt6358-rtc",
+	}, {
+		.name = "mt6358-sound",
+		.of_compatible = "mediatek,mt6358-sound"
+	},
+};
+
 static const struct mfd_cell mt6397_devs[] = {
 	{
 		.name = "mt6397-rtc",
@@ -94,182 +164,37 @@
 	}
 };
 
-static void mt6397_irq_lock(struct irq_data *data)
-{
-	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
-
-	mutex_lock(&mt6397->irqlock);
-}
-
-static void mt6397_irq_sync_unlock(struct irq_data *data)
-{
-	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
-
-	regmap_write(mt6397->regmap, mt6397->int_con[0],
-		     mt6397->irq_masks_cur[0]);
-	regmap_write(mt6397->regmap, mt6397->int_con[1],
-		     mt6397->irq_masks_cur[1]);
-
-	mutex_unlock(&mt6397->irqlock);
-}
-
-static void mt6397_irq_disable(struct irq_data *data)
-{
-	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
-	int shift = data->hwirq & 0xf;
-	int reg = data->hwirq >> 4;
-
-	mt6397->irq_masks_cur[reg] &= ~BIT(shift);
-}
-
-static void mt6397_irq_enable(struct irq_data *data)
-{
-	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
-	int shift = data->hwirq & 0xf;
-	int reg = data->hwirq >> 4;
-
-	mt6397->irq_masks_cur[reg] |= BIT(shift);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int mt6397_irq_set_wake(struct irq_data *irq_data, unsigned int on)
-{
-	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(irq_data);
-	int shift = irq_data->hwirq & 0xf;
-	int reg = irq_data->hwirq >> 4;
-
-	if (on)
-		mt6397->wake_mask[reg] |= BIT(shift);
-	else
-		mt6397->wake_mask[reg] &= ~BIT(shift);
-
-	return 0;
-}
-#else
-#define mt6397_irq_set_wake NULL
-#endif
-
-static struct irq_chip mt6397_irq_chip = {
-	.name = "mt6397-irq",
-	.irq_bus_lock = mt6397_irq_lock,
-	.irq_bus_sync_unlock = mt6397_irq_sync_unlock,
-	.irq_enable = mt6397_irq_enable,
-	.irq_disable = mt6397_irq_disable,
-	.irq_set_wake = mt6397_irq_set_wake,
+struct chip_data {
+	u32 cid_addr;
+	u32 cid_shift;
 };
 
-static void mt6397_irq_handle_reg(struct mt6397_chip *mt6397, int reg,
-		int irqbase)
-{
-	unsigned int status;
-	int i, irq, ret;
-
-	ret = regmap_read(mt6397->regmap, reg, &status);
-	if (ret) {
-		dev_err(mt6397->dev, "Failed to read irq status: %d\n", ret);
-		return;
-	}
-
-	for (i = 0; i < 16; i++) {
-		if (status & BIT(i)) {
-			irq = irq_find_mapping(mt6397->irq_domain, irqbase + i);
-			if (irq)
-				handle_nested_irq(irq);
-		}
-	}
-
-	regmap_write(mt6397->regmap, reg, status);
-}
-
-static irqreturn_t mt6397_irq_thread(int irq, void *data)
-{
-	struct mt6397_chip *mt6397 = data;
-
-	mt6397_irq_handle_reg(mt6397, mt6397->int_status[0], 0);
-	mt6397_irq_handle_reg(mt6397, mt6397->int_status[1], 16);
-
-	return IRQ_HANDLED;
-}
-
-static int mt6397_irq_domain_map(struct irq_domain *d, unsigned int irq,
-					irq_hw_number_t hw)
-{
-	struct mt6397_chip *mt6397 = d->host_data;
-
-	irq_set_chip_data(irq, mt6397);
-	irq_set_chip_and_handler(irq, &mt6397_irq_chip, handle_level_irq);
-	irq_set_nested_thread(irq, 1);
-	irq_set_noprobe(irq);
-
-	return 0;
-}
-
-static const struct irq_domain_ops mt6397_irq_domain_ops = {
-	.map = mt6397_irq_domain_map,
+static const struct chip_data mt6323_core = {
+	.cid_addr = MT6323_CID,
+	.cid_shift = 0,
 };
 
-static int mt6397_irq_init(struct mt6397_chip *mt6397)
-{
-	int ret;
+static const struct chip_data mt6358_core = {
+	.cid_addr = MT6358_SWCID,
+	.cid_shift = 8,
+};
 
-	mutex_init(&mt6397->irqlock);
+static const struct chip_data mt6392_core = {
+	.cid_addr = MT6392_CID,
+	.cid_shift = 0,
+};
 
-	/* Mask all interrupt sources */
-	regmap_write(mt6397->regmap, mt6397->int_con[0], 0x0);
-	regmap_write(mt6397->regmap, mt6397->int_con[1], 0x0);
-
-	mt6397->irq_domain = irq_domain_add_linear(mt6397->dev->of_node,
-		MT6397_IRQ_NR, &mt6397_irq_domain_ops, mt6397);
-	if (!mt6397->irq_domain) {
-		dev_err(mt6397->dev, "could not create irq domain\n");
-		return -ENOMEM;
-	}
-
-	ret = devm_request_threaded_irq(mt6397->dev, mt6397->irq, NULL,
-		mt6397_irq_thread, IRQF_ONESHOT, "mt6397-pmic", mt6397);
-	if (ret) {
-		dev_err(mt6397->dev, "failed to register irq=%d; err: %d\n",
-			mt6397->irq, ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int mt6397_irq_suspend(struct device *dev)
-{
-	struct mt6397_chip *chip = dev_get_drvdata(dev);
-
-	regmap_write(chip->regmap, chip->int_con[0], chip->wake_mask[0]);
-	regmap_write(chip->regmap, chip->int_con[1], chip->wake_mask[1]);
-
-	enable_irq_wake(chip->irq);
-
-	return 0;
-}
-
-static int mt6397_irq_resume(struct device *dev)
-{
-	struct mt6397_chip *chip = dev_get_drvdata(dev);
-
-	regmap_write(chip->regmap, chip->int_con[0], chip->irq_masks_cur[0]);
-	regmap_write(chip->regmap, chip->int_con[1], chip->irq_masks_cur[1]);
-
-	disable_irq_wake(chip->irq);
-
-	return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_irq_suspend,
-			mt6397_irq_resume);
+static const struct chip_data mt6397_core = {
+	.cid_addr = MT6397_CID,
+	.cid_shift = 0,
+};
 
 static int mt6397_probe(struct platform_device *pdev)
 {
 	int ret;
 	unsigned int id;
 	struct mt6397_chip *pmic;
+	const struct chip_data *pmic_core;
 
 	pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
 	if (!pmic)
@@ -285,50 +210,60 @@
 	if (!pmic->regmap)
 		return -ENODEV;
 
-	platform_set_drvdata(pdev, pmic);
+	pmic_core = of_device_get_match_data(&pdev->dev);
+	if (!pmic_core)
+		return -ENODEV;
 
-	ret = regmap_read(pmic->regmap, MT6397_CID, &id);
+	ret = regmap_read(pmic->regmap, pmic_core->cid_addr, &id);
 	if (ret) {
-		dev_err(pmic->dev, "Failed to read chip id: %d\n", ret);
+		dev_err(&pdev->dev, "Failed to read chip id: %d\n", ret);
 		return ret;
 	}
 
+	pmic->chip_id = (id >> pmic_core->cid_shift) & 0xff;
+
+	platform_set_drvdata(pdev, pmic);
+
 	pmic->irq = platform_get_irq(pdev, 0);
 	if (pmic->irq <= 0)
 		return pmic->irq;
 
-	switch (id & 0xff) {
-	case MT6323_CID_CODE:
-		pmic->int_con[0] = MT6323_INT_CON0;
-		pmic->int_con[1] = MT6323_INT_CON1;
-		pmic->int_status[0] = MT6323_INT_STATUS0;
-		pmic->int_status[1] = MT6323_INT_STATUS1;
+	if (pmic->chip_id == MT6358_CHIP_ID)
+		ret = mt6358_irq_init(pmic);
+	else
 		ret = mt6397_irq_init(pmic);
-		if (ret)
-			return ret;
 
+	if (ret)
+		return ret;
+
+	switch (pmic->chip_id) {
+	case MT6323_CHIP_ID:
 		ret = devm_mfd_add_devices(&pdev->dev, -1, mt6323_devs,
 					   ARRAY_SIZE(mt6323_devs), NULL,
 					   0, pmic->irq_domain);
 		break;
 
-	case MT6397_CID_CODE:
-	case MT6391_CID_CODE:
-		pmic->int_con[0] = MT6397_INT_CON0;
-		pmic->int_con[1] = MT6397_INT_CON1;
-		pmic->int_status[0] = MT6397_INT_STATUS0;
-		pmic->int_status[1] = MT6397_INT_STATUS1;
-		ret = mt6397_irq_init(pmic);
-		if (ret)
-			return ret;
+	case MT6358_CHIP_ID:
+		ret = devm_mfd_add_devices(&pdev->dev, -1, mt6358_devs,
+					   ARRAY_SIZE(mt6358_devs), NULL,
+					   0, pmic->irq_domain);
+		break;
 
+	case MT6391_CHIP_ID:
+	case MT6397_CHIP_ID:
 		ret = devm_mfd_add_devices(&pdev->dev, -1, mt6397_devs,
 					   ARRAY_SIZE(mt6397_devs), NULL,
 					   0, pmic->irq_domain);
 		break;
 
+	case MT6392_CHIP_ID:
+		ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE,
+					   mt6392_devs, ARRAY_SIZE(mt6392_devs),
+					   NULL, 0, pmic->irq_domain);
+		break;
+
 	default:
-		dev_err(&pdev->dev, "unsupported chip: %d\n", id);
+		dev_err(&pdev->dev, "unsupported chip: %d\n", pmic->chip_id);
 		return -ENODEV;
 	}
 
@@ -341,9 +276,21 @@
 }
 
 static const struct of_device_id mt6397_of_match[] = {
-	{ .compatible = "mediatek,mt6397" },
-	{ .compatible = "mediatek,mt6323" },
-	{ }
+	{
+		.compatible = "mediatek,mt6323",
+		.data = &mt6323_core,
+	}, {
+		.compatible = "mediatek,mt6358",
+		.data = &mt6358_core,
+	}, {
+		.compatible = "mediatek,mt6392",
+		.data = &mt6392_core,
+	}, {
+		.compatible = "mediatek,mt6397",
+		.data = &mt6397_core,
+	}, {
+		/* sentinel */
+	}
 };
 MODULE_DEVICE_TABLE(of, mt6397_of_match);
 
@@ -358,7 +305,6 @@
 	.driver = {
 		.name = "mt6397",
 		.of_match_table = of_match_ptr(mt6397_of_match),
-		.pm = &mt6397_pm_ops,
 	},
 	.id_table = mt6397_id,
 };
diff --git a/drivers/mfd/mt6397-irq.c b/drivers/mfd/mt6397-irq.c
new file mode 100644
index 0000000..cb5211c
--- /dev/null
+++ b/drivers/mfd/mt6397-irq.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 MediaTek Inc.
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/suspend.h>
+#include <linux/mfd/mt6323/core.h>
+#include <linux/mfd/mt6323/registers.h>
+#include <linux/mfd/mt6392/core.h>
+#include <linux/mfd/mt6392/registers.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/mfd/mt6397/registers.h>
+
+static void mt6397_irq_lock(struct irq_data *data)
+{
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
+
+	mutex_lock(&mt6397->irqlock);
+}
+
+static void mt6397_irq_sync_unlock(struct irq_data *data)
+{
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
+
+	regmap_write(mt6397->regmap, mt6397->int_con[0],
+		     mt6397->irq_masks_cur[0]);
+	regmap_write(mt6397->regmap, mt6397->int_con[1],
+		     mt6397->irq_masks_cur[1]);
+
+	mutex_unlock(&mt6397->irqlock);
+}
+
+static void mt6397_irq_disable(struct irq_data *data)
+{
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
+	int shift = data->hwirq & 0xf;
+	int reg = data->hwirq >> 4;
+
+	mt6397->irq_masks_cur[reg] &= ~BIT(shift);
+}
+
+static void mt6397_irq_enable(struct irq_data *data)
+{
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
+	int shift = data->hwirq & 0xf;
+	int reg = data->hwirq >> 4;
+
+	mt6397->irq_masks_cur[reg] |= BIT(shift);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mt6397_irq_set_wake(struct irq_data *irq_data, unsigned int on)
+{
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(irq_data);
+	int shift = irq_data->hwirq & 0xf;
+	int reg = irq_data->hwirq >> 4;
+
+	if (on)
+		mt6397->wake_mask[reg] |= BIT(shift);
+	else
+		mt6397->wake_mask[reg] &= ~BIT(shift);
+
+	return 0;
+}
+#else
+#define mt6397_irq_set_wake NULL
+#endif
+
+static struct irq_chip mt6397_irq_chip = {
+	.name = "mt6397-irq",
+	.irq_bus_lock = mt6397_irq_lock,
+	.irq_bus_sync_unlock = mt6397_irq_sync_unlock,
+	.irq_enable = mt6397_irq_enable,
+	.irq_disable = mt6397_irq_disable,
+	.irq_set_wake = mt6397_irq_set_wake,
+};
+
+static void mt6397_irq_handle_reg(struct mt6397_chip *mt6397, int reg,
+				  int irqbase)
+{
+	unsigned int status;
+	int i, irq, ret;
+
+	ret = regmap_read(mt6397->regmap, reg, &status);
+	if (ret) {
+		dev_err(mt6397->dev, "Failed to read irq status: %d\n", ret);
+		return;
+	}
+
+	for (i = 0; i < 16; i++) {
+		if (status & BIT(i)) {
+			irq = irq_find_mapping(mt6397->irq_domain, irqbase + i);
+			if (irq)
+				handle_nested_irq(irq);
+		}
+	}
+
+	regmap_write(mt6397->regmap, reg, status);
+}
+
+static irqreturn_t mt6397_irq_thread(int irq, void *data)
+{
+	struct mt6397_chip *mt6397 = data;
+
+	mt6397_irq_handle_reg(mt6397, mt6397->int_status[0], 0);
+	mt6397_irq_handle_reg(mt6397, mt6397->int_status[1], 16);
+
+	return IRQ_HANDLED;
+}
+
+static int mt6397_irq_domain_map(struct irq_domain *d, unsigned int irq,
+				 irq_hw_number_t hw)
+{
+	struct mt6397_chip *mt6397 = d->host_data;
+
+	irq_set_chip_data(irq, mt6397);
+	irq_set_chip_and_handler(irq, &mt6397_irq_chip, handle_level_irq);
+	irq_set_nested_thread(irq, 1);
+	irq_set_noprobe(irq);
+
+	return 0;
+}
+
+static const struct irq_domain_ops mt6397_irq_domain_ops = {
+	.map = mt6397_irq_domain_map,
+};
+
+static int mt6397_irq_pm_notifier(struct notifier_block *notifier,
+				  unsigned long pm_event, void *unused)
+{
+	struct mt6397_chip *chip =
+		container_of(notifier, struct mt6397_chip, pm_nb);
+
+	switch (pm_event) {
+	case PM_SUSPEND_PREPARE:
+		regmap_write(chip->regmap,
+			     chip->int_con[0], chip->wake_mask[0]);
+		regmap_write(chip->regmap,
+			     chip->int_con[1], chip->wake_mask[1]);
+		enable_irq_wake(chip->irq);
+		break;
+
+	case PM_POST_SUSPEND:
+		regmap_write(chip->regmap,
+			     chip->int_con[0], chip->irq_masks_cur[0]);
+		regmap_write(chip->regmap,
+			     chip->int_con[1], chip->irq_masks_cur[1]);
+		disable_irq_wake(chip->irq);
+		break;
+
+	default:
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+int mt6397_irq_init(struct mt6397_chip *chip)
+{
+	int ret;
+
+	mutex_init(&chip->irqlock);
+
+	switch (chip->chip_id) {
+	case MT6323_CHIP_ID:
+		chip->int_con[0] = MT6323_INT_CON0;
+		chip->int_con[1] = MT6323_INT_CON1;
+		chip->int_status[0] = MT6323_INT_STATUS0;
+		chip->int_status[1] = MT6323_INT_STATUS1;
+		break;
+
+	case MT6391_CHIP_ID:
+	case MT6397_CHIP_ID:
+		chip->int_con[0] = MT6397_INT_CON0;
+		chip->int_con[1] = MT6397_INT_CON1;
+		chip->int_status[0] = MT6397_INT_STATUS0;
+		chip->int_status[1] = MT6397_INT_STATUS1;
+		break;
+
+	case MT6392_CHIP_ID:
+		chip->int_con[0] = MT6392_INT_CON0;
+		chip->int_con[1] = MT6392_INT_CON1;
+		chip->int_status[0] = MT6392_INT_STATUS0;
+		chip->int_status[1] = MT6392_INT_STATUS1;
+		break;
+
+	default:
+		dev_err(chip->dev, "unsupported chip: 0x%x\n", chip->chip_id);
+		return -ENODEV;
+	}
+
+	/* Mask all interrupt sources */
+	regmap_write(chip->regmap, chip->int_con[0], 0x0);
+	regmap_write(chip->regmap, chip->int_con[1], 0x0);
+
+	chip->pm_nb.notifier_call = mt6397_irq_pm_notifier;
+	chip->irq_domain = irq_domain_add_linear(chip->dev->of_node,
+						 MT6397_IRQ_NR,
+						 &mt6397_irq_domain_ops,
+						 chip);
+	if (!chip->irq_domain) {
+		dev_err(chip->dev, "could not create irq domain\n");
+		return -ENOMEM;
+	}
+
+	ret = devm_request_threaded_irq(chip->dev, chip->irq, NULL,
+					mt6397_irq_thread, IRQF_ONESHOT,
+					"mt6397-pmic", chip);
+	if (ret) {
+		dev_err(chip->dev, "failed to register irq=%d; err: %d\n",
+			chip->irq, ret);
+		return ret;
+	}
+
+	register_pm_notifier(&chip->pm_nb);
+	return 0;
+}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 3726eac..9008a09 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -527,4 +527,5 @@
 source "drivers/misc/cxl/Kconfig"
 source "drivers/misc/ocxl/Kconfig"
 source "drivers/misc/cardreader/Kconfig"
+source "drivers/misc/mediatek/Kconfig"
 endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index af22bbc..bdc59be 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -57,4 +57,4 @@
 obj-$(CONFIG_ASPEED_LPC_SNOOP)	+= aspeed-lpc-snoop.o
 obj-$(CONFIG_PCI_ENDPOINT_TEST)	+= pci_endpoint_test.o
 obj-$(CONFIG_OCXL)		+= ocxl/
-obj-$(CONFIG_MISC_RTSX)		+= cardreader/
+obj-y				+= mediatek/
diff --git a/drivers/misc/mediatek/Kconfig b/drivers/misc/mediatek/Kconfig
new file mode 100644
index 0000000..c3544322
--- /dev/null
+++ b/drivers/misc/mediatek/Kconfig
@@ -0,0 +1,2 @@
+source "drivers/misc/mediatek/gpu/gpu_rgx/Kconfig"
+source "drivers/misc/mediatek/usb11/Kconfig"
diff --git a/drivers/misc/mediatek/Makefile b/drivers/misc/mediatek/Makefile
new file mode 100644
index 0000000..eb759c1
--- /dev/null
+++ b/drivers/misc/mediatek/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ARCH_MEDIATEK) += xo/
+obj-$(CONFIG_MTK_USBFSH) += usb11/
+obj-y += gpu/
diff --git a/drivers/misc/mediatek/base/power/mt8167/mtk_io.h b/drivers/misc/mediatek/base/power/mt8167/mtk_io.h
new file mode 100644
index 0000000..de17db5
--- /dev/null
+++ b/drivers/misc/mediatek/base/power/mt8167/mtk_io.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2015 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT_IO_H__
+#define __MT_IO_H__
+
+/* only for arm64 */
+#ifdef CONFIG_ARM64
+#define IOMEM(a)	((void __force __iomem *)((a)))
+#endif
+
+#endif  /* !__MT_IO_H__ */
+
diff --git a/drivers/misc/mediatek/base/power/mt8167/mtk_spm.h b/drivers/misc/mediatek/base/power/mt8167/mtk_spm.h
new file mode 100644
index 0000000..60bb931
--- /dev/null
+++ b/drivers/misc/mediatek/base/power/mt8167/mtk_spm.h
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) 2015 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MT_SPM_
+#define _MT_SPM_
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/irqchip/mtk-gic-extend.h>
+
+extern void __iomem *spm_base;
+extern void __iomem *scp_i2c0_base;
+extern void __iomem *scp_i2c1_base;
+extern void __iomem *scp_i2c2_base;
+extern u32 spm_irq_0;
+extern u32 spm_irq_1;
+extern u32 spm_irq_2;
+extern u32 spm_irq_3;
+extern u32 spm_irq_4;
+extern u32 spm_irq_5;
+extern u32 spm_irq_6;
+extern u32 spm_irq_7;
+
+#undef SPM_BASE
+#define SPM_BASE spm_base
+
+
+/* #include <mach/mt_irq.h> */
+#include "sync_write.h"
+#include "mtk_io.h"
+
+/**************************************
+ * Config and Parameter
+ **************************************/
+
+#undef SPM_I2C0_BASE
+#undef SPM_I2C1_BASE
+#undef SPM_I2C2_BASE
+#define SPM_I2C0_BASE	scp_i2c0_base
+#define SPM_I2C1_BASE	scp_i2c1_base
+#define SPM_I2C2_BASE	scp_i2c2_base
+
+#define SPM_IRQ0_ID		spm_irq_0
+#define SPM_IRQ1_ID		spm_irq_1
+#define SPM_IRQ2_ID		spm_irq_2
+#define SPM_IRQ3_ID		spm_irq_3
+#define SPM_IRQ4_ID		spm_irq_4
+#define SPM_IRQ5_ID		spm_irq_5
+#define SPM_IRQ6_ID		spm_irq_6
+#define SPM_IRQ7_ID		spm_irq_7
+
+/**************************************
+ * Define and Declare
+ **************************************/
+#define SPM_POWERON_CONFIG_SET		(SPM_BASE + 0x000)
+#define SPM_POWER_ON_VAL0		(SPM_BASE + 0x010)
+#define SPM_POWER_ON_VAL1		(SPM_BASE + 0x014)
+#define SPM_CLK_SETTLE			(SPM_BASE + 0x100)
+#define SPM_CA7_CPU0_PWR_CON		(SPM_BASE + 0x200)
+#define SPM_CA7_DBG_PWR_CON		(SPM_BASE + 0x204)
+#define SPM_CA7_CPUTOP_PWR_CON		(SPM_BASE + 0x208)
+#define SPM_VDE_PWR_CON			(SPM_BASE + 0x210)
+#define SPM_MFG_PWR_CON			(SPM_BASE + 0x214)
+#define SPM_CA7_CPU1_PWR_CON		(SPM_BASE + 0x218)
+#define SPM_CA7_CPU2_PWR_CON		(SPM_BASE + 0x21c)
+#define SPM_CA7_CPU3_PWR_CON		(SPM_BASE + 0x220)
+#define SPM_VEN_PWR_CON			(SPM_BASE + 0x230)
+#define SPM_IFR_PWR_CON			(SPM_BASE + 0x234)
+#define SPM_ISP_PWR_CON			(SPM_BASE + 0x238)
+#define SPM_DIS_PWR_CON			(SPM_BASE + 0x23c)
+#define SPM_DPY_PWR_CON			(SPM_BASE + 0x240)
+#define SPM_CA7_CPUTOP_L2_PDN		(SPM_BASE + 0x244)
+#define SPM_CA7_CPUTOP_L2_SLEEP		(SPM_BASE + 0x248)
+#define SPM_CA7_CPU0_L1_PDN		(SPM_BASE + 0x25c)
+#define SPM_CA7_CPU1_L1_PDN		(SPM_BASE + 0x264)
+#define SPM_CA7_CPU2_L1_PDN		(SPM_BASE + 0x26c)
+#define SPM_CA7_CPU3_L1_PDN		(SPM_BASE + 0x274)
+#define SPM_GCPU_SRAM_CON		(SPM_BASE + 0x27c)
+#define SPM_CONN_PWR_CON		(SPM_BASE + 0x280)
+#define SPM_MCU_PWR_CON			(SPM_BASE + 0x290)
+#define SPM_IFR_SRAMROM_CON		(SPM_BASE + 0x294)
+#define SPM_MJC_PWR_CON			(SPM_BASE + 0x298)
+#define SPM_AUDIO_PWR_CON		(SPM_BASE + 0x29c)
+#define SPM_MFG_2D_PWR_CON      (SPM_BASE + 0x2c0)
+#define SPM_MFG_ASYNC_PWR_CON		(SPM_BASE + 0x2c4)
+#define SPM_ARMPLL_DIV_PWR_CON  (SPM_BASE + 0x2cc)
+#define SPM_INFRA_MD_PWR_CON    (SPM_BASE + 0x2d8)
+#define SPM_CPU_EXT_ISO         (SPM_BASE + 0x2dc)
+#define SPM_PCM_CON0			(SPM_BASE + 0x310)
+#define SPM_PCM_CON1			(SPM_BASE + 0x314)
+#define SPM_PCM_IM_PTR			(SPM_BASE + 0x318)
+#define SPM_PCM_IM_LEN			(SPM_BASE + 0x31c)
+#define SPM_PCM_REG_DATA_INI		(SPM_BASE + 0x320)
+#define SPM_PCM_EVENT_VECTOR0		(SPM_BASE + 0x340)
+#define SPM_PCM_EVENT_VECTOR1		(SPM_BASE + 0x344)
+#define SPM_PCM_EVENT_VECTOR2		(SPM_BASE + 0x348)
+#define SPM_PCM_EVENT_VECTOR3		(SPM_BASE + 0x34c)
+#define SPM_PCM_MAS_PAUSE_MASK		(SPM_BASE + 0x354)
+#define SPM_PCM_PWR_IO_EN		(SPM_BASE + 0x358)
+#define SPM_PCM_TIMER_VAL		(SPM_BASE + 0x35c)
+#define SPM_PCM_TIMER_OUT		(SPM_BASE + 0x360)
+#define SPM_PCM_REG0_DATA		(SPM_BASE + 0x380)
+#define SPM_PCM_REG1_DATA		(SPM_BASE + 0x384)
+#define SPM_PCM_REG2_DATA		(SPM_BASE + 0x388)
+#define SPM_PCM_REG3_DATA		(SPM_BASE + 0x38c)
+#define SPM_PCM_REG4_DATA		(SPM_BASE + 0x390)
+#define SPM_PCM_REG5_DATA		(SPM_BASE + 0x394)
+#define SPM_PCM_REG6_DATA		(SPM_BASE + 0x398)
+#define SPM_PCM_REG7_DATA		(SPM_BASE + 0x39c)
+#define SPM_PCM_REG8_DATA		(SPM_BASE + 0x3a0)
+#define SPM_PCM_REG9_DATA		(SPM_BASE + 0x3a4)
+#define SPM_PCM_REG10_DATA		(SPM_BASE + 0x3a8)
+#define SPM_PCM_REG11_DATA		(SPM_BASE + 0x3ac)
+#define SPM_PCM_REG12_DATA		(SPM_BASE + 0x3b0)
+#define SPM_PCM_REG13_DATA		(SPM_BASE + 0x3b4)
+#define SPM_PCM_REG14_DATA		(SPM_BASE + 0x3b8)
+#define SPM_PCM_REG15_DATA		(SPM_BASE + 0x3bc)
+#define SPM_PCM_EVENT_REG_STA		(SPM_BASE + 0x3c0)
+#define SPM_PCM_FSM_STA			(SPM_BASE + 0x3c4)
+#define SPM_PCM_IM_HOST_RW_PTR		(SPM_BASE + 0x3c8)
+#define SPM_PCM_IM_HOST_RW_DAT		(SPM_BASE + 0x3cc)
+#define SPM_PCM_EVENT_VECTOR4		(SPM_BASE + 0x3d0)
+#define SPM_PCM_EVENT_VECTOR5		(SPM_BASE + 0x3d4)
+#define SPM_PCM_EVENT_VECTOR6		(SPM_BASE + 0x3d8)
+#define SPM_PCM_EVENT_VECTOR7		(SPM_BASE + 0x3dc)
+#define SPM_PCM_SW_INT_SET		(SPM_BASE + 0x3e0)
+#define SPM_PCM_SW_INT_CLEAR		(SPM_BASE + 0x3e4)
+#define SPM_CLK_CON			(SPM_BASE + 0x400)
+#define SPM_SLEEP_DUAL_VCORE_PWR_CON	(SPM_BASE + 0x404)
+#define SPM_SLEEP_PTPOD2_CON		(SPM_BASE + 0x408)
+#define SPM_APMCU_PWRCTL		(SPM_BASE + 0x600)
+#define SPM_AP_DVFS_CON_SET		(SPM_BASE + 0x604)
+#define SPM_AP_STANBY_CON		(SPM_BASE + 0x608)
+#define SPM_PWR_STATUS			(SPM_BASE + 0x60c)
+#define SPM_PWR_STATUS_2ND		(SPM_BASE + 0x610)
+#define SPM_SLEEP_MDBSI_CON		(SPM_BASE + 0x614)
+#define SPM_RF_CLK_CFG		        (SPM_BASE + 0x620)
+#define SPM_RF_CFG_SET		      (SPM_BASE + 0x624)
+#define SPM_RF_CLK_CFG_CLR		    (SPM_BASE + 0x628)
+#define SPM_BSI_DO_SR		      (SPM_BASE + 0x62c)
+#define SPM_BSI_D1_SR		      (SPM_BASE + 0x630)
+#define SPM_BSI_D2_SR		      (SPM_BASE + 0x634)
+#define SPM_AP_SEMA		        (SPM_BASE + 0x638)
+#define SPM_SPM_SEMA		      (SPM_BASE + 0x63c)
+#define SPM_SLEEP_TIMER_STA		(SPM_BASE + 0x720)
+#define SPM_SLEEP_TWAM_CON		(SPM_BASE + 0x760)
+#define SPM_SLEEP_TWAM_STATUS0		(SPM_BASE + 0x764)
+#define SPM_SLEEP_TWAM_STATUS1		(SPM_BASE + 0x768)
+#define SPM_SLEEP_TWAM_STATUS2		(SPM_BASE + 0x76c)
+#define SPM_SLEEP_TWAM_STATUS3		(SPM_BASE + 0x770)
+#define SPM_SLEEP_TWAM_CURR_STATUS0	(SPM_BASE + 0x774)
+#define SPM_SLEEP_TWAM_CURR_STATUS1	(SPM_BASE + 0x778)
+#define SPM_SLEEP_TWAM_CURR_STATUS2	(SPM_BASE + 0x77C)
+#define SPM_SLEEP_TWAM_CURR_STATUS3	(SPM_BASE + 0x780)
+#define SPM_SLEEP_TWAM_TIMER_OUT	(SPM_BASE + 0x784)
+#define SPM_SLEEP_TWAM_WINDOW_LEN	(SPM_BASE + 0x788)
+#define SPM_SLEEP_IDLE_SEL		(SPM_BASE + 0x78C)
+#define SPM_SLEEP_WAKEUP_EVENT_MASK	(SPM_BASE + 0x810)
+#define SPM_SLEEP_CPU_WAKEUP_EVENT	(SPM_BASE + 0x814)
+#define SPM_PCM_WDT_TIMER_VAL		(SPM_BASE + 0x824)
+#define SPM_PCM_WDT_TIMER_OUT		(SPM_BASE + 0x828)
+#define SPM_SLEEP_ISR_MASK		(SPM_BASE + 0x900)
+#define SPM_SLEEP_ISR_STATUS		(SPM_BASE + 0x904)
+#define SPM_SLEEP_ISR_RAW_STA		(SPM_BASE + 0x910)
+#define SPM_SLEEP_WAKEUP_MISC		(SPM_BASE + 0x918)
+#define SPM_SLEEP_BUS_PROTECT_RDY	(SPM_BASE + 0x91c)
+#define SPM_SLEEP_SUBSYS_IDLE_STA	(SPM_BASE + 0x920)
+#define SPM_PCM_RESERVE			(SPM_BASE + 0xb00)
+#define SPM_PCM_RESERVE2		(SPM_BASE + 0xb04)
+#define SPM_PCM_FLAGS			(SPM_BASE + 0xb08)
+#define SPM_PCM_SRC_REQ			(SPM_BASE + 0xb0c)
+#define SPM_PCM_RESERVE3		(SPM_BASE + 0xb14)
+#define SPM_PCM_RESERVE4		(SPM_BASE + 0xb18)
+#define SPM_PCM_MMDDR_MASK		(SPM_BASE + 0xb1c)
+#define SPM_PCM_DEBUG_CON		(SPM_BASE + 0xb20)
+#define SPM_PCM_WDT_LATCH		(SPM_BASE + 0xb24)
+#define SPM_CA7_CPU0_IRQ_MASK		(SPM_BASE + 0xb30)
+#define SPM_CA7_CPU1_IRQ_MASK		(SPM_BASE + 0xb34)
+#define SPM_CA7_CPU2_IRQ_MASK		(SPM_BASE + 0xb38)
+#define SPM_CA7_CPU3_IRQ_MASK		(SPM_BASE + 0xb3c)
+#define SPM_PCM_PASR_DPD_0		(SPM_BASE + 0xb60)
+#define SPM_PCM_PASR_DPD_1		(SPM_BASE + 0xb64)
+#define SPM_PCM_PASR_DPD_2		(SPM_BASE + 0xb68)
+#define SPM_PCM_PASR_DPD_3		(SPM_BASE + 0xb6c)
+#define SPM_PCM_EVENT_EN		(SPM_BASE + 0xc00)
+#define SPM_PCM_EVENT_VECTOR8           (SPM_BASE + 0xc04)
+#define SPM_PCM_EVENT_VECTOR9           (SPM_BASE + 0xc08)
+#define SPM_PCM_EVENT_VECTORA           (SPM_BASE + 0xc0c)
+#define SPM_PCM_EVENT_VECTORB           (SPM_BASE + 0xc10)
+#define SPM_PCM_EVENT_VECTORC           (SPM_BASE + 0xc14)
+#define SPM_PCM_EVENT_VECTORD           (SPM_BASE + 0xc18)
+#define SPM_PCM_EVENT_VECTORE           (SPM_BASE + 0xc1c)
+#define SPM_PCM_EVENT_VECTORF           (SPM_BASE + 0xc20)
+#define SPM_PCM_RESERVE5                (SPM_BASE + 0xc24)
+#define SPM_PCM_RESERVE6                (SPM_BASE + 0xc28)
+#define SPM_PCM_RESERVE7                (SPM_BASE + 0xc2c)
+#define SPM_PCM_RESERVE8                (SPM_BASE + 0xc30)
+#define SPM_NFI_SRAM_CON		(SPM_BASE + 0xc38)
+#define SPM_SPMC_MP0_CPU0_PWR_CON	(SPM_BASE + 0xc40)
+#define SPM_SPMC_MP0_CPU1_PWR_CON	(SPM_BASE + 0xc44)
+#define SPM_SPMC_MP0_CPU2_PWR_CON	(SPM_BASE + 0xc48)
+#define SPM_SPMC_MP0_CPU3_PWR_CON	(SPM_BASE + 0xc4c)
+#define SPM_SPMC_MP0_CPUTOP_PWR_CONi	(SPM_BASE + 0xc50)
+#define SPM_SPMC_MP0_SRAM_SLP		(SPM_BASE + 0xc54)
+#define SPM_SPMC_MP0_CPUTOP_CLK_DIS	(SPM_BASE + 0xc58)
+#define SPM_SPMC_BYPASS			(SPM_BASE + 0xc5c)
+#define SPM_DPY_MISC			(SPM_BASE + 0xc60)
+#define SPM_SLEEP_CA7_WFI0_EN		(SPM_BASE + 0xf00)
+#define SPM_SLEEP_CA7_WFI1_EN		(SPM_BASE + 0xf04)
+#define SPM_SLEEP_CA7_WFI2_EN		(SPM_BASE + 0xf08)
+#define SPM_SLEEP_CA7_WFI3_EN		(SPM_BASE + 0xf0c)
+
+#define SPM_PROJECT_CODE	0xb16
+
+#define SPM_REGWR_EN		(1U << 0)
+#define SPM_REGWR_CFG_KEY	(SPM_PROJECT_CODE << 16)
+
+/* PCM Flags store in PCM_RESERVE4(0xB18)*/
+#define SPM_CPU_PDN_DIS		(1U << 0)
+#define SPM_CPUTOP_PDN_DIS	(1U << 1)
+#define SPM_L2_DORMANT_DIS	(1U << 2)
+#define SPM_MCU_PDN_DIS		(1U << 3)
+#define SPM_BUS26M_DIS		(1U << 4)
+#define SPM_MPLLOFF_DIS	        (1U << 5)
+#define SPM_DDRPHY_S1_DIS	(1U << 6)
+#define SPM_DDRPHY_S0_DIS	(1U << 7)
+#define SPM_FHC_SLEEP_DIS	(1U << 8)
+#define SPM_VPROC_LOW_DIS	(1U << 9)
+#define SPM_INFRA_PDN_DIS	(1U << 10)
+#define SPM_26M_DIS		(1U << 11)
+#define SPM_26M_OFF_DIS		(1U << 12)
+#define SPM_XO_OFF_DIS		(1U << 13)
+#define SPM_32K_LESS		(1U << 14)
+#define SPM_PLL_OFF_DIS		(1U << 15)
+#define SPM_NO_PMIC_WRAP	(1U << 16)
+
+/* Wakeup Source*/
+#define SPM_WAKE_SRC_LIST	{	\
+	SPM_WAKE_SRC(0, SPM_MERGE),	/* PCM timer, TWAM or CPU */	\
+	SPM_WAKE_SRC(1, AUDIO_REQ), /* new */	\
+	SPM_WAKE_SRC(2, KP),		\
+	SPM_WAKE_SRC(3, WDT),		\
+	SPM_WAKE_SRC(4, GPT),		\
+	SPM_WAKE_SRC(5, EINT),	\
+	SPM_WAKE_SRC(6, CONN_WDT),		\
+	SPM_WAKE_SRC(7, GCE),	\
+	SPM_WAKE_SRC(9, LOW_BAT),	\
+	SPM_WAKE_SRC(10, CONN2AP),	\
+	SPM_WAKE_SRC(11, F26M_WAKE),	\
+	SPM_WAKE_SRC(12, F26M_SLEEP),	\
+	SPM_WAKE_SRC(13, PCM_WDT),	\
+	SPM_WAKE_SRC(14, USB_CD),	\
+	SPM_WAKE_SRC(15, USB_PDN),	\
+	SPM_WAKE_SRC(16, ETH),		\
+	SPM_WAKE_SRC(18, DBGSYS), \
+	SPM_WAKE_SRC(19, UART0),	\
+	SPM_WAKE_SRC(20, AFE),		\
+	SPM_WAKE_SRC(21, THERM),	\
+	SPM_WAKE_SRC(22, CIRQ),		\
+	SPM_WAKE_SRC(23, SEJ),		\
+	SPM_WAKE_SRC(24, SYSPWREQ),	\
+	SPM_WAKE_SRC(25, IRRX),		\
+	SPM_WAKE_SRC(26, CPU0_IRQ),	 \
+	SPM_WAKE_SRC(27, CPU1_IRQ),		\
+	SPM_WAKE_SRC(28, CPU2_IRQ),	 \
+	SPM_WAKE_SRC(29, CPU3_IRQ),	 \
+	SPM_WAKE_SRC(30, APSRC_WAKE),	\
+	SPM_WAKE_SRC(31, APSRC_SLEEP)	\
+}
+
+enum SPM_WAKE_SRC {
+	WAKE_SRC_SPM_MERGE = (1U << 0),	/* WAKE_ID_SPM_MERGE */
+	WAKE_SRC_AUDIO_REQ = (1U << 1),	/* WAKE_ID_AUDIO_REQ */
+	WAKE_SRC_KP = (1U << 2),	/* WAKE_ID_KP */
+	WAKE_SRC_WDT = (1U << 3),	/* WAKE_ID_WDT */
+	WAKE_SRC_GPT = (1U << 4),	/* WAKE_ID_GPT */
+	WAKE_SRC_EINT = (1U << 5),	/* WAKE_ID_EINT */
+	WAKE_SRC_CONN_WDT = (1U << 6),	/* WAKE_ID_CONN_WDT */
+	WAKE_SRC_GCE = (1U << 7),	/* WAKE_ID_GCE */
+	WAKE_SRC_LOW_BAT = (1U << 9),	/* WAKE_ID_LOW_BAT */
+	WAKE_SRC_CONN2AP = (1U << 10),	/* WAKE_ID_CONN2AP */
+	WAKE_SRC_F26M_WAKE = (1U << 11),	/* WAKE_ID_F26M_WAKE */
+	WAKE_SRC_F26M_SLEE = (1U << 12),	/* WAKE_ID_F26M_SLEEP */
+	WAKE_SRC_PCM_WDT = (1U << 13),	/* WAKE_ID_PCM_WDT */
+	WAKE_SRC_USB_CD = (1U << 14),	/* WAKE_ID_USB_CD */
+	WAKE_SRC_USB_PDN = (1U << 15),	/* WAKE_ID_USB_PDN */
+	WAKE_SRC_ETH = (1U << 16),	/* WAKE_ID_ETH */
+	WAKE_SRC_DBGSYS = (1U << 18),	/* WAKE_ID_DBGSYS */
+	WAKE_SRC_UART0 = (1U << 19),	/* WAKE_ID_UART0 */
+	WAKE_SRC_AFE = (1U << 20),	/* WAKE_ID_AFE */
+	WAKE_SRC_THERM = (1U << 21),	/* WAKE_ID_THERM */
+	WAKE_SRC_CIRQ = (1U << 22),	/* WAKE_ID_CIRQ */
+	WAKE_SRC_SEJ = (1U << 23),	/* WAKE_ID_SEJ */
+	WAKE_SRC_SYSPWREQ = (1U << 24),	/* WAKE_ID_SYSPWREQ */
+	WAKE_SRC_IRRX = (1U << 25),	/* WAKE_ID_IRRX */
+	WAKE_SRC_CPU0_IRQ = (1U << 26),	/* WAKE_ID_CPU0_IRQ */
+	WAKE_SRC_CPU1_IRQ = (1U << 27),	/* WAKE_ID_CPU1_IRQ */
+	WAKE_SRC_CPU2_IRQ = (1U << 28),	/* WAKE_ID_CPU2_IRQ */
+	WAKE_SRC_CPU3_IRQ = (1U << 29),	/* WAKE_ID_CPU3_IRQ */
+	WAKE_SRC_APSRC_WAKE = (1U << 30),	/* WAKE_ID_APSRC_WAKE */
+	WAKE_SRC_APSRC_SLEEP = (1U << 31),	/* WAKE_ID_APSRC_SLEEP */
+};
+
+/* enum SPM_WAKE_SRC_LIST; */
+
+typedef enum {
+	WR_NONE = 0,
+	WR_UART_BUSY = 1,
+	WR_PCM_ASSERT = 2,
+	WR_PCM_TIMER = 3,
+	WR_WAKE_SRC = 4,
+	WR_UNKNOWN = 5,
+} wake_reason_t;
+
+struct twam_sig {
+	u32 sig0;		/* signal 0: config or status */
+	u32 sig1;		/* signal 1: config or status */
+	u32 sig2;		/* signal 2: config or status */
+	u32 sig3;		/* signal 3: config or status */
+};
+
+typedef void (*twam_handler_t) (struct twam_sig *twamsig);
+
+/* for ANC in talking */
+extern void spm_mainpll_on_request(const char *drv_name);
+extern void spm_mainpll_on_unrequest(const char *drv_name);
+
+/* for TWAM in MET */
+extern void spm_twam_register_handler(twam_handler_t handler);
+extern void spm_twam_enable_monitor(const struct twam_sig *twamsig, bool speed_mode,
+				    unsigned int window_len);
+extern void spm_twam_disable_monitor(void);
+
+extern uint32_t mt_xo_get_current_capid(void);
+
+extern void unmask_irq(struct irq_desc *desc); /* to replace mt_irq_unmask_for_sleep */
+
+unsigned int spm_get_cpu_pwr_status(void);
+
+/**************************************
+ * Macro and Inline
+ **************************************/
+#define spm_read(addr)			__raw_readl(IOMEM(addr))
+#define spm_write(addr, val)		mt_reg_sync_writel(val, addr)
+
+#define is_cpu_pdn(flags)		(!((flags) & SPM_CPU_PDN_DIS))
+#define is_infra_pdn(flags)		(!((flags) & SPM_INFRA_PDN_DIS))
+#define is_ddrphy_pdn(flags)		(!((flags) & SPM_DDRPHY_PDN_DIS))
+#define is_dualvcore_pdn(flags)		(!((flags) & SPM_DUALVCORE_PDN_DIS))
+
+#define get_high_cnt(sigsta)		((sigsta) & 0x3ff)
+#define get_high_percent(sigsta)	((get_high_cnt(sigsta) * 100 + 511) / 1023)
+
+#endif
diff --git a/drivers/misc/mediatek/base/power/mt8167/mtk_spm_sleep.h b/drivers/misc/mediatek/base/power/mt8167/mtk_spm_sleep.h
new file mode 100644
index 0000000..6280244
--- /dev/null
+++ b/drivers/misc/mediatek/base/power/mt8167/mtk_spm_sleep.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2015 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MT_SPM_SLEEP_
+#define _MT_SPM_SLEEP_
+
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/irqchip/mtk-gic-extend.h>
+#include <linux/kernel.h>
+#include "mtk_spm.h"
+/*
+ * for suspend
+ */
+extern int spm_set_sleep_wakesrc(u32 wakesrc, bool enable, bool replace);
+extern u32 spm_get_sleep_wakesrc(void);
+extern wake_reason_t spm_go_to_sleep(u32 spm_flags, u32 spm_data);
+
+extern void unmask_irq(struct irq_desc *desc);
+
+extern bool spm_is_conn_sleep(void);
+extern void spm_set_wakeup_src_check(void);
+extern bool spm_check_wakeup_src(void);
+extern void spm_poweron_config_set(void);
+extern bool spm_set_suspned_pcm_init_flag(u32 *suspend_flags);
+
+extern void spm_output_sleep_option(void);
+
+/* record last wakesta */
+extern u32 spm_get_last_wakeup_src(void);
+extern u32 spm_get_last_wakeup_misc(void);
+extern bool mt_xo_has_ext_crystal(void);
+extern void spm_set_sleep_26m_req(bool req);
+#endif
diff --git a/drivers/misc/mediatek/base/power/mt8167/sync_write.h b/drivers/misc/mediatek/base/power/mt8167/sync_write.h
new file mode 100644
index 0000000..5d6ff6c
--- /dev/null
+++ b/drivers/misc/mediatek/base/power/mt8167/sync_write.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MT_SYNC_WRITE_H
+#define _MT_SYNC_WRITE_H
+
+#if defined(__KERNEL__)
+
+#include <linux/io.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Define macros.
+ */
+#define mt_reg_sync_writel(v, a) \
+	do {    \
+		__raw_writel((v), (void __force __iomem *)((a)));   \
+		mb();  \
+	} while (0)
+
+#define mt_reg_sync_writew(v, a) \
+	do {    \
+		__raw_writew((v), (void __force __iomem *)((a)));   \
+		mb();  \
+	} while (0)
+
+#define mt_reg_sync_writeb(v, a) \
+	do {    \
+		__raw_writeb((v), (void __force __iomem *)((a)));   \
+		mb();  \
+	} while (0)
+
+#ifdef CONFIG_64BIT
+#define mt_reg_sync_writeq(v, a) \
+	do {    \
+		__raw_writeq((v), (void __force __iomem *)((a)));   \
+		mb();  \
+	} while (0)
+#endif
+
+#else				/* __KERNEL__ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+
+#define mt_reg_sync_writel(v, a)        mt65xx_reg_sync_writel(v, a)
+#define mt_reg_sync_writew(v, a)        mt65xx_reg_sync_writew(v, a)
+#define mt_reg_sync_writeb(v, a)        mt65xx_reg_sync_writeb(v, a)
+
+#define mb()   \
+	{    \
+		__asm__ __volatile__ ("dsb" : : : "memory"); \
+	}
+
+#define mt65xx_reg_sync_writel(v, a) \
+	do {    \
+		*(volatile unsigned int *)(a) = (v);    \
+		mb(); \
+	} while (0)
+
+#define mt65xx_reg_sync_writew(v, a) \
+	do {    \
+		*(volatile unsigned short *)(a) = (v);    \
+		mb(); \
+	} while (0)
+
+#define mt65xx_reg_sync_writeb(v, a) \
+	do {    \
+		*(volatile unsigned char *)(a) = (v);    \
+		mb(); \
+	} while (0)
+
+#endif				/* __KERNEL__ */
+
+#endif				/* !_MT_SYNC_WRITE_H */
diff --git a/drivers/misc/mediatek/gpu/Makefile b/drivers/misc/mediatek/gpu/Makefile
new file mode 100644
index 0000000..ac1a0e0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/Makefile
@@ -0,0 +1 @@
+obj-y += gpu_rgx/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/Kconfig b/drivers/misc/mediatek/gpu/gpu_rgx/Kconfig
new file mode 100644
index 0000000..d872b2c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/Kconfig
@@ -0,0 +1,5 @@
+config RGX_M1_9ED4917962
+	tristate "Imagination Rogue GX M1.9ED4917662 driver"
+
+config RGX_M1_11_5516664
+	tristate "Imagination Rogue GX M1.11@5516664 driver"
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/Makefile b/drivers/misc/mediatek/gpu/gpu_rgx/Makefile
new file mode 100644
index 0000000..631df43
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_RGX_M1_9ED4917962) += m1.9ED4917962/
+obj-$(CONFIG_RGX_M1_11_5516664) += m1.11_5516664/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/Kconfig b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/Kconfig
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/Kconfig
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/Makefile b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/Makefile
new file mode 100644
index 0000000..66860d3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/Makefile
@@ -0,0 +1,11 @@
+include drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrvkm.mk
+
+obj-y += pvrsrvkm.o
+
+ccflags-y += \
+ -include config_kernel.h \
+ -I $(srctree)/include/drm \
+ -I $(srctree)/$(src) \
+ -I $(srctree)/$(src)/km \
+ -I $(srctree)/$(src)/system \
+ -D__linux__
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/allocmem.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/allocmem.c
new file mode 100644
index 0000000..3b4f2ed
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/allocmem.c
@@ -0,0 +1,455 @@
+/*************************************************************************/ /*!
+@File
+@Title          Host memory management implementation for Linux
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "img_defs.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+#include "osfunc.h"
+
+#if defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#define ALLOCMEM_MEMSTATS_PADDING 0
+#else
+#define ALLOCMEM_MEMSTATS_PADDING sizeof(IMG_UINT32)
+#endif
+
+static inline void _pvr_vfree(const void* pvAddr)
+{
+#if defined(DEBUG)
+			/* Size harder to come by for vmalloc and since vmalloc allocates
+			 * a whole number of pages, poison the minimum size known to have
+			 * been allocated.
+			 */
+			OSCachedMemSet((void*)pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE,
+			               PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD);
+#endif
+			vfree(pvAddr);
+}
+
+static inline void _pvr_kfree(const void* pvAddr)
+{
+#if defined(DEBUG)
+			/* Poison whole memory block */
+			OSCachedMemSet((void*)pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE,
+			               ksize(pvAddr));
+#endif
+			kfree(pvAddr);
+}
+
+#if !defined(PVRSRV_ENABLE_PROCESS_STATS)
+void *OSAllocMem(IMG_UINT32 ui32Size)
+{
+	void *pvRet = NULL;
+
+	if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vmalloc(ui32Size);
+	}
+	if (pvRet == NULL)
+	{
+		pvRet = kmalloc(ui32Size, GFP_KERNEL);
+	}
+
+	return pvRet;
+}
+
+void *OSAllocZMem(IMG_UINT32 ui32Size)
+{
+	void *pvRet = NULL;
+
+	if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vzalloc(ui32Size);
+	}
+	if (pvRet == NULL)
+	{
+		pvRet = kzalloc(ui32Size, GFP_KERNEL);
+	}
+
+	return pvRet;
+}
+
+/*
+ * The parentheses around OSFreeMem prevent the macro in allocmem.h from
+ * applying, as it would break the function's definition.
+ */
+void (OSFreeMem)(void *pvMem)
+{
+	if (pvMem != NULL)
+	{
+		if (!is_vmalloc_addr(pvMem))
+		{
+			_pvr_kfree(pvMem);
+		}
+		else
+		{
+			_pvr_vfree(pvMem);
+		}
+	}
+}
+#else
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG) && defined(PVRSRV_ENABLE_MEMORY_STATS)
+void *_OSAllocMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine)
+{
+	void *pvRet = NULL;
+
+	if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vmalloc(ui32Size);
+	}
+	if (pvRet == NULL)
+	{
+		pvRet = kmalloc(ui32Size, GFP_KERNEL);
+	}
+
+	if (pvRet != NULL)
+	{
+
+		if (!is_vmalloc_addr(pvRet))
+		{
+			IMG_CPU_PHYADDR sCpuPAddr;
+			sCpuPAddr.uiAddr = 0;
+
+			_PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+										  pvRet,
+										  sCpuPAddr,
+										  ksize(pvRet),
+										  NULL,
+										  OSGetCurrentClientProcessIDKM(),
+										  pvAllocFromFile,
+										  ui32AllocFromLine);
+		}
+		else
+		{
+			IMG_CPU_PHYADDR sCpuPAddr;
+			sCpuPAddr.uiAddr = 0;
+
+			_PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+										  pvRet,
+										  sCpuPAddr,
+										  ((ui32Size + PAGE_SIZE-1) & ~(PAGE_SIZE-1)),
+										  NULL,
+										  OSGetCurrentClientProcessIDKM(),
+										  pvAllocFromFile,
+										  ui32AllocFromLine);
+		}
+	}
+	return pvRet;
+}
+
+void *_OSAllocZMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine)
+{
+	void *pvRet = NULL;
+
+	if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vzalloc(ui32Size);
+	}
+	if (pvRet == NULL)
+	{
+		pvRet = kzalloc(ui32Size, GFP_KERNEL);
+	}
+
+	if (pvRet != NULL)
+	{
+		if (!is_vmalloc_addr(pvRet))
+		{
+			IMG_CPU_PHYADDR sCpuPAddr;
+			sCpuPAddr.uiAddr = 0;
+
+			_PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+										  pvRet,
+										  sCpuPAddr,
+										  ksize(pvRet),
+										  NULL,
+										  OSGetCurrentClientProcessIDKM(),
+										  pvAllocFromFile,
+										  ui32AllocFromLine);
+		}
+		else
+		{
+			IMG_CPU_PHYADDR sCpuPAddr;
+			sCpuPAddr.uiAddr = 0;
+
+			_PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+										  pvRet,
+										  sCpuPAddr,
+										  ((ui32Size + PAGE_SIZE-1) & ~(PAGE_SIZE-1)),
+										  NULL,
+										  OSGetCurrentClientProcessIDKM(),
+										  pvAllocFromFile,
+										  ui32AllocFromLine);
+		}
+	}
+	return pvRet;
+}
+#else
+void *OSAllocMem(IMG_UINT32 ui32Size)
+{
+	void *pvRet = NULL;
+
+	if ((ui32Size + ALLOCMEM_MEMSTATS_PADDING) > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vmalloc(ui32Size);
+	}
+	if (pvRet == NULL)
+	{
+		/* Allocate an additional 4 bytes to store the PID of the allocating process */
+		pvRet = kmalloc(ui32Size + ALLOCMEM_MEMSTATS_PADDING, GFP_KERNEL);
+	}
+
+	if (pvRet != NULL)
+	{
+
+		if (!is_vmalloc_addr(pvRet))
+		{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			{
+				/* Store the PID in the final additional 4 bytes allocated */
+				IMG_UINT32 *puiTemp = (IMG_UINT32*) (((IMG_BYTE*)pvRet) + (ksize(pvRet) - ALLOCMEM_MEMSTATS_PADDING));
+				*puiTemp = OSGetCurrentProcessID();
+			}
+			PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ksize(pvRet), OSGetCurrentClientProcessIDKM());
+#else
+			IMG_CPU_PHYADDR sCpuPAddr;
+			sCpuPAddr.uiAddr = 0;
+
+			PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+										 pvRet,
+										 sCpuPAddr,
+										 ksize(pvRet),
+										 NULL,
+										 OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+		}
+		else
+		{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+											    ((ui32Size + PAGE_SIZE-1) & ~(PAGE_SIZE-1)),
+											    (IMG_UINT64)(uintptr_t) pvRet,
+												OSGetCurrentClientProcessIDKM());
+#else
+			IMG_CPU_PHYADDR sCpuPAddr;
+			sCpuPAddr.uiAddr = 0;
+
+			PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+										 pvRet,
+										 sCpuPAddr,
+										 ((ui32Size + PAGE_SIZE-1) & ~(PAGE_SIZE-1)),
+										 NULL,
+										 OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+		}
+	}
+	return pvRet;
+}
+
+void *OSAllocZMem(IMG_UINT32 ui32Size)
+{
+	void *pvRet = NULL;
+
+	if ((ui32Size + ALLOCMEM_MEMSTATS_PADDING) > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vzalloc(ui32Size);
+	}
+	if (pvRet == NULL)
+	{
+		/* Allocate an additional 4 bytes to store the PID of the allocating process */
+		pvRet = kzalloc(ui32Size + ALLOCMEM_MEMSTATS_PADDING, GFP_KERNEL);
+	}
+
+	if (pvRet != NULL)
+	{
+		if (!is_vmalloc_addr(pvRet))
+		{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			{
+				/* Store the PID in the final additional 4 bytes allocated */
+				IMG_UINT32 *puiTemp = (IMG_UINT32*) (((IMG_BYTE*)pvRet) + (ksize(pvRet) - ALLOCMEM_MEMSTATS_PADDING));
+				*puiTemp = OSGetCurrentProcessID();
+			}
+			PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ksize(pvRet), OSGetCurrentClientProcessIDKM());
+#else
+			IMG_CPU_PHYADDR sCpuPAddr;
+			sCpuPAddr.uiAddr = 0;
+
+			PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+								 pvRet,
+								 sCpuPAddr,
+								 ksize(pvRet),
+								 NULL,
+								 OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+		}
+		else
+		{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+											    ((ui32Size + PAGE_SIZE-1) & ~(PAGE_SIZE-1)),
+											    (IMG_UINT64)(uintptr_t) pvRet,
+												OSGetCurrentClientProcessIDKM());
+#else
+			IMG_CPU_PHYADDR sCpuPAddr;
+			sCpuPAddr.uiAddr = 0;
+
+			PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+										 pvRet,
+										 sCpuPAddr,
+										 ((ui32Size + PAGE_SIZE-1) & ~(PAGE_SIZE-1)),
+										 NULL,
+										 OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+		}
+	}
+	return pvRet;
+}
+#endif
+
+/*
+ * The parentheses around OSFreeMem prevent the macro in allocmem.h from
+ * applying, as it would break the function's definition.
+ */
+void (OSFreeMem)(void *pvMem)
+{
+	if (pvMem != NULL)
+	{
+		if (!is_vmalloc_addr(pvMem))
+		{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			{
+				IMG_UINT32 *puiTemp = (IMG_UINT32*) (((IMG_BYTE*)pvMem) + (ksize(pvMem) - ALLOCMEM_MEMSTATS_PADDING));
+				PVRSRVStatsDecrMemKAllocStat(ksize(pvMem), *puiTemp);
+			}
+#else
+			PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+			                                (IMG_UINT64)(uintptr_t) pvMem,
+											OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+			_pvr_kfree(pvMem);
+		}
+		else
+		{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+			                                      (IMG_UINT64)(uintptr_t) pvMem);
+#else
+			PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+			                                (IMG_UINT64)(uintptr_t) pvMem,
+											OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+			_pvr_vfree(pvMem);
+		}
+	}
+}
+#endif
+
+
+void *OSAllocMemNoStats(IMG_UINT32 ui32Size)
+{
+	void *pvRet = NULL;
+
+	if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vmalloc(ui32Size);
+	}
+	if (pvRet == NULL)
+	{
+		pvRet = kmalloc(ui32Size, GFP_KERNEL);
+	}
+
+	return pvRet;
+}
+
+void *OSAllocZMemNoStats(IMG_UINT32 ui32Size)
+{
+	void *pvRet = NULL;
+
+	if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vzalloc(ui32Size);
+	}
+	if (pvRet == NULL)
+	{
+		pvRet = kzalloc(ui32Size, GFP_KERNEL);
+	}
+
+	return pvRet;
+}
+
+/*
+ * The parentheses around OSFreeMemNoStats prevent the macro in allocmem.h from
+ * applying, as it would break the function's definition.
+ */
+void (OSFreeMemNoStats)(void *pvMem)
+{
+	if (pvMem != NULL)
+	{
+		if ( !is_vmalloc_addr(pvMem) )
+		{
+			_pvr_kfree(pvMem);
+		}
+		else
+		{
+			_pvr_vfree(pvMem);
+		}
+	}
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/allocmem.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/allocmem.h
new file mode 100644
index 0000000..fc4c2382
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/allocmem.h
@@ -0,0 +1,178 @@
+/*************************************************************************/ /*!
+@File           allocmem.h
+@Title          memory allocation header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Memory-Allocation API definitions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef ALLOCMEM_H
+#define ALLOCMEM_H
+
+#include "img_types.h"
+#include "pvr_debug.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#if !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) || !defined(DEBUG) || !defined(PVRSRV_ENABLE_PROCESS_STATS) || !defined(PVRSRV_ENABLE_MEMORY_STATS)
+/**************************************************************************/ /*!
+@Function       OSAllocMem
+@Description    Allocates CPU memory. Contents are uninitialized.
+                If passed a size of zero, function should not assert,
+                but just return a NULL pointer.
+@Input          ui32Size        Size of required allocation (in bytes)
+@Return         Pointer to allocated memory on success.
+                Otherwise NULL.
+ */ /**************************************************************************/
+void *OSAllocMem(IMG_UINT32 ui32Size);
+/**************************************************************************/ /*!
+@Function       OSAllocZMem
+@Description    Allocates CPU memory and initializes the contents to zero.
+                If passed a size of zero, function should not assert,
+                but just return a NULL pointer.
+@Input          ui32Size        Size of required allocation (in bytes)
+@Return         Pointer to allocated memory on success.
+                Otherwise NULL.
+ */ /**************************************************************************/
+void *OSAllocZMem(IMG_UINT32 ui32Size);
+#else
+void *_OSAllocMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine);
+void *_OSAllocZMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine);
+#define OSAllocMem(_size)	_OSAllocMem((_size), (__FILE__), (__LINE__))
+#define OSAllocZMem(_size)	_OSAllocZMem((_size), (__FILE__), (__LINE__))
+#endif
+
+/**************************************************************************/ /*!
+@Function       OSAllocMemNoStats
+@Description    Allocates CPU memory. Contents are uninitialized.
+                If passed a size of zero, function should not assert,
+                but just return a NULL pointer.
+                The allocated memory is not accounted for by process stats.
+                Process stats are an optional feature (enabled only when
+                PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount
+                of memory allocated to help in debugging. Where this is not
+                required, OSAllocMem() and OSAllocMemNoStats() equate to
+                the same operation.
+@Input          ui32Size        Size of required allocation (in bytes)
+@Return         Pointer to allocated memory on success.
+                Otherwise NULL.
+ */ /**************************************************************************/
+void *OSAllocMemNoStats(IMG_UINT32 ui32Size);
+
+/**************************************************************************/ /*!
+@Function       OSAllocZMemNoStats
+@Description    Allocates CPU memory and initializes the contents to zero.
+                If passed a size of zero, function should not assert,
+                but just return a NULL pointer.
+                The allocated memory is not accounted for by process stats.
+                Process stats are an optional feature (enabled only when
+                PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount
+                of memory allocated to help in debugging. Where this is not
+                required, OSAllocZMem() and OSAllocZMemNoStats() equate to
+                the same operation.
+@Input          ui32Size        Size of required allocation (in bytes)
+@Return         Pointer to allocated memory on success.
+                Otherwise NULL.
+ */ /**************************************************************************/
+void *OSAllocZMemNoStats(IMG_UINT32 ui32Size);
+
+/**************************************************************************/ /*!
+@Function       OSFreeMem
+@Description    Frees previously allocated CPU memory.
+@Input          pvCpuVAddr       Pointer to the memory to be freed.
+@Return         None.
+ */ /**************************************************************************/
+void OSFreeMem(void *pvCpuVAddr);
+
+/**************************************************************************/ /*!
+@Function       OSFreeMemNoStats
+@Description    Frees previously allocated CPU memory.
+                The freed memory does not update the figures in process stats.
+                Process stats are an optional feature (enabled only when
+                PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount
+                of memory allocated to help in debugging. Where this is not
+                required, OSFreeMem() and OSFreeMemNoStats() equate to the
+                same operation.
+@Input          pvCpuVAddr       Pointer to the memory to be freed.
+@Return         None.
+ */ /**************************************************************************/
+void OSFreeMemNoStats(void *pvCpuVAddr);
+
+/*
+ * These macros allow us to catch double-free bugs on DEBUG builds and
+ * prevent crashes on RELEASE builds.
+ */
+
+/*! @cond Doxygen_Suppress */
+#if defined(DEBUG)
+#define double_free_sentinel ((void *)&OSFreeMem)
+#define ALLOCMEM_ASSERT(exp) PVR_ASSERT(exp)
+#else
+#define double_free_sentinel NULL
+#define ALLOCMEM_ASSERT(exp) do {} while(0)
+#endif
+/*! @endcond */
+
+/*! Frees memory allocated by OSAllocMem(). */
+#define OSFreeMem(_ptr) do { \
+		ALLOCMEM_ASSERT((_ptr) != double_free_sentinel); \
+		(OSFreeMem)(_ptr); \
+		(_ptr) = double_free_sentinel; \
+		MSC_SUPPRESS_4127 \
+	} while (0)
+
+/*! Frees memory allocated by OSAllocMemNoStats(). */
+#define OSFreeMemNoStats(_ptr) do { \
+		ALLOCMEM_ASSERT((_ptr) != double_free_sentinel); \
+		(OSFreeMemNoStats)(_ptr); \
+		(_ptr) = double_free_sentinel; \
+		MSC_SUPPRESS_4127 \
+	} while (0)
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ALLOCMEM_H */
+
+/******************************************************************************
+ End of file (allocmem.h)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/apollo.mk b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/apollo.mk
new file mode 100644
index 0000000..6d34673
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/apollo.mk
@@ -0,0 +1,4 @@
+apollo-y += \
+ tc_apollo.o \
+ tc_drv.o \
+ tc_odin.o
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/apollo_regs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/apollo_regs.h
new file mode 100644
index 0000000..e1f97ae
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/apollo_regs.h
@@ -0,0 +1,222 @@
+/*************************************************************************/ /*!
+@File
+@Title          System Description Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides system-specific declarations and macros
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__APOLLO_REGS_H__)
+#define __APOLLO_REGS_H__
+
+/*
+ * The core clock speed is passed through a multiplier depending on the TC version.
+ *
+ * On TC_ES1: Multiplier = x3, final speed = 270MHz
+ * On TC_ES2: Multiplier = x6, final speed = 540MHz
+ * On TCF5:   Multiplier = 1x final speed = 45MHz
+ *
+ *
+ * The base (unmultiplied speed) can be adjusted using a module parameter called "sys_core_clk_speed",
+ * a number in Hz.
+ *
+ * As an example:
+ *
+ * PVR_SRVKM_PARAMS="sys_core_clk_speed=60000000" /etc/init.d/rc.pvr start
+ *
+ * would result in a core speed of 60MHz xMultiplier.
+ *
+ *
+ * The memory clock is unmultiplied and can be adjusted using a module parameter called
+ * "sys_mem_clk_speed", this should be the number in Hz for the memory clock speed.
+ *
+ * As an example:
+ *
+ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=100000000" /etc/init.d/rc.pvr start
+ *
+ * would attempt to start the driver with the memory clock speed set to 100MHz.
+ *
+ *
+ * Same applies to the system interface clock speed sys_sysif_clk_speed.
+ * Needed for TCF5 but not for TC_ES2/ES1.
+ * As an example:
+ *
+ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=45000000" /etc/init.d/rc.pvr start
+ *
+ * would attempt to start the driver with the system clock speed set to 45MHz.
+ *
+ *
+ * All parameters can be specified at once, eg:
+ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=MEMORY_SPEED sys_core_clk_speed=CORE_SPEED sys_mem_clk_speed=SYSIF_SPEED" /etc/init.d/rc.pvr start
+ */
+
+#define RGX_TC_SYS_CLOCK_SPEED		(25000000) /*< At the moment just used for TCF5 */
+
+#if defined(TC_APOLLO_TCF5_22_46_54_330)
+ #undef RGX_TC_SYS_CLOCK_SPEED
+ #define RGX_TC_CORE_CLOCK_SPEED	(100000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(45000000)
+ #define RGX_TC_SYS_CLOCK_SPEED		(45000000)
+#elif defined(TC_APOLLO_TCF5_22_49_21_16) || defined(TC_APOLLO_TCF5_22_50_22_29) || \
+      defined(TC_APOLLO_TCF5_22_60_22_29) || defined(TC_APOLLO_TCF5_22_69_22_25) || \
+      defined(TC_APOLLO_TCF5_22_75_22_25)
+ #define RGX_TC_CORE_CLOCK_SPEED	(20000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(50000000)
+#elif defined(TC_APOLLO_TCF5_22_62_21_16) || defined(TC_APOLLO_TCF5_22_80_21_19) || \
+      defined(TC_APOLLO_TCF5_22_97_22_225)
+ #define RGX_TC_CORE_CLOCK_SPEED	(20000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(45000000)
+#elif defined(TC_APOLLO_TCF5_22_67_54_30)  || defined(TC_APOLLO_TCF5_22_63_54_330)
+ #define RGX_TC_CORE_CLOCK_SPEED	(100000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(45000000)
+#elif defined(TC_APOLLO_TCF5_22_70_208_316) || defined(TC_APOLLO_TCF5_22_89_204_18)
+ #define RGX_TC_CORE_CLOCK_SPEED	(50000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(25000000)
+#elif defined(TC_APOLLO_TCF5_22_73_104_312) || defined(TC_APOLLO_TCF5_22_78_104_212)
+ #define RGX_TC_CORE_CLOCK_SPEED	(50000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(40000000)
+#elif defined(TC_APOLLO_TCF5_22_76_104_12)
+ #define RGX_TC_CORE_CLOCK_SPEED	(50000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(50000000)
+#elif defined(TC_APOLLO_TCF5_22_81_104_12)
+ #define RGX_TC_CORE_CLOCK_SPEED	(50000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(45000000)
+#elif defined(TC_APOLLO_TCF5_22_86_104_218)
+ #define RGX_TC_CORE_CLOCK_SPEED	(30000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(40000000)
+#elif defined(TC_APOLLO_TCF5_22_88_104_318)
+ #define RGX_TC_CORE_CLOCK_SPEED	(28000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(40000000)
+#elif defined(TC_APOLLO_TCF5_22_96_104_618)
+ #define RGX_TC_CORE_CLOCK_SPEED	(35000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(45000000)
+#elif defined(TC_APOLLO_TCF5_22_98_54_230)
+ #define RGX_TC_CORE_CLOCK_SPEED	(100000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(40000000)
+#elif defined(TC_APOLLO_TCF5_22_102_54_38)
+ #define RGX_TC_CORE_CLOCK_SPEED	(80000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(25000000)
+#elif defined(TC_APOLLO_TCF5_24_12_104_2)
+ #define RGX_TC_CORE_CLOCK_SPEED	(100000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(50000000)
+#elif defined(TC_APOLLO_TCF5_BVNC_NOT_SUPPORTED)
+ /* TC TCF5 (22.*) fallback frequencies */
+ #undef RGX_TC_SYS_CLOCK_SPEED
+ #define RGX_TC_CORE_CLOCK_SPEED	(20000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(50000000)
+ #define RGX_TC_SYS_CLOCK_SPEED		(25000000)
+#elif defined(TC_APOLLO_TCF5_REFERENCE)
+ /* TC TCF5 (Reference bitfile) */
+ #undef RGX_TC_SYS_CLOCK_SPEED
+ #define RGX_TC_CORE_CLOCK_SPEED	(50000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(50000000)
+ #define RGX_TC_SYS_CLOCK_SPEED		(45000000)
+#elif defined(TC_APOLLO_BONNIE)
+ /* TC Bonnie */
+ #define RGX_TC_CORE_CLOCK_SPEED	(18000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(65000000)
+#elif defined(TC_APOLLO_ES2)
+ /* TC ES2 */
+ #define RGX_TC_CORE_CLOCK_SPEED	(90000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(104000000)
+#else
+ /* TC ES1 */
+ #define RGX_TC_CORE_CLOCK_SPEED	(90000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(65000000)
+#endif
+
+/* TC TCF5 */
+#define TC5_SYS_APOLLO_REG_PCI_BASENUM (1)
+#define TC5_SYS_APOLLO_REG_PDP2_OFFSET (0x800000)
+#define TC5_SYS_APOLLO_REG_PDP2_SIZE   (0x7C4)
+
+#define TC5_SYS_APOLLO_REG_PDP2_FBDC_OFFSET (0xA00000)
+#define TC5_SYS_APOLLO_REG_PDP2_FBDC_SIZE   (0x14)
+
+#define TC5_SYS_APOLLO_REG_HDMI_OFFSET (0xC00000)
+#define TC5_SYS_APOLLO_REG_HDMI_SIZE   (0x1C)
+
+/* TC ES2 */
+#define TCF_TEMP_SENSOR_SPI_OFFSET 	0xe
+#define TCF_TEMP_SENSOR_TO_C(raw) 	(((raw) * 248 / 4096) - 54)
+
+/* Number of bytes that are broken */
+#define SYS_DEV_MEM_BROKEN_BYTES	(1024 * 1024)
+#define SYS_DEV_MEM_REGION_SIZE		(0x40000000 - SYS_DEV_MEM_BROKEN_BYTES)
+
+/* Apollo reg on base register 0 */
+#define SYS_APOLLO_REG_PCI_BASENUM	(0)
+#define SYS_APOLLO_REG_REGION_SIZE	(0x00010000)
+
+#define SYS_APOLLO_REG_SYS_OFFSET	(0x0000)
+#define SYS_APOLLO_REG_SYS_SIZE		(0x0400)
+
+#define SYS_APOLLO_REG_PLL_OFFSET	(0x1000)
+#define SYS_APOLLO_REG_PLL_SIZE		(0x0400)
+
+#define SYS_APOLLO_REG_HOST_OFFSET	(0x4050)
+#define SYS_APOLLO_REG_HOST_SIZE	(0x0014)
+
+#define SYS_APOLLO_REG_PDP1_OFFSET	(0xC000)
+#define SYS_APOLLO_REG_PDP1_SIZE	(0x2000)
+
+/* Offsets for flashing Apollo PROMs from base 0 */
+#define APOLLO_FLASH_STAT_OFFSET	(0x4058)
+#define APOLLO_FLASH_DATA_WRITE_OFFSET	(0x4050)
+#define APOLLO_FLASH_RESET_OFFSET	(0x4060)
+
+#define APOLLO_FLASH_FIFO_STATUS_MASK 	 (0xF)
+#define APOLLO_FLASH_FIFO_STATUS_SHIFT 	 (0)
+#define APOLLO_FLASH_PROGRAM_STATUS_MASK (0xF)
+#define APOLLO_FLASH_PROGRAM_STATUS_SHIFT (16)
+
+#define APOLLO_FLASH_PROG_COMPLETE_BIT	(0x1)
+#define APOLLO_FLASH_PROG_PROGRESS_BIT	(0x2)
+#define APOLLO_FLASH_PROG_FAILED_BIT	(0x4)
+#define APOLLO_FLASH_INV_FILETYPE_BIT	(0x8)
+
+#define APOLLO_FLASH_FIFO_SIZE		(8)
+
+/* RGX reg on base register 1 */
+#define SYS_RGX_REG_PCI_BASENUM		(1)
+#define SYS_RGX_REG_REGION_SIZE		(0x7FFFF)
+
+/* Device memory (including HP mapping) on base register 2 */
+#define SYS_DEV_MEM_PCI_BASENUM		(2)
+
+#endif /* if !defined(__APOLLO_REGS_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/bonnie_tcf.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/bonnie_tcf.h
new file mode 100644
index 0000000..fc87ec7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/bonnie_tcf.h
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* bonnie_tcf.h - Bonnie TCF register definitions */
+
+/* tab size 4 */
+
+#ifndef BONNIE_TCF_DEFS_H
+#define BONNIE_TCF_DEFS_H
+
+#define BONNIE_TCF_OFFSET_BONNIETC_REGBANK							0x00000000
+#define BONNIE_TCF_OFFSET_TC_IFACE_COUNTERS							0x00004000
+#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_IMGV4_RTM_TOP				0x00008000
+#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_TCF_SCRATCH_PAD_SECN		0x0000C000
+#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_TCF_SCRATCH_PAD_DBG		0x00010000
+#define BONNIE_TCF_OFFSET_MULTI_CLK_ALIGN							0x00014000
+#define BONNIE_TCF_OFFSET_ALIGN_DATA_TX								0x00018000
+#define BONNIE_TCF_OFFSET_SAI_RX_1									0x0001C000
+#define BONNIE_TCF_OFFSET_SAI_RX_SDR								0x00040000
+#define BONNIE_TCF_OFFSET_SAI_TX_1									0x00044000
+#define BONNIE_TCF_OFFSET_SAI_TX_SDR								0x00068000
+
+#define BONNIE_TCF_OFFSET_SAI_RX_DELTA								0x00004000
+#define BONNIE_TCF_OFFSET_SAI_TX_DELTA								0x00004000
+
+#define BONNIE_TCF_OFFSET_SAI_CLK_TAPS								0x0000000C
+#define BONNIE_TCF_OFFSET_SAI_EYES									0x00000010
+#define BONNIE_TCF_OFFSET_SAI_TRAIN_ACK								0x00000018
+
+
+#endif /* BONNIE_TCF_DEFS_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_crtc.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_crtc.c
new file mode 100644
index 0000000..bb0824b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_crtc.c
@@ -0,0 +1,989 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_linux_fence.h"
+#include "drm_pdp_drv.h"
+
+#include <linux/reservation.h>
+#include <linux/version.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "drm_pdp_gem.h"
+
+#include "pdp_apollo.h"
+#include "pdp_odin.h"
+#include "pdp_plato.h"
+
+#include "plato_drv.h"
+
+#if defined(PDP_USE_ATOMIC)
+#include <drm/drm_atomic_helper.h>
+#endif
+
+#include "kernel_compatibility.h"
+
+enum pdp_crtc_flip_status {
+	PDP_CRTC_FLIP_STATUS_NONE = 0,
+	PDP_CRTC_FLIP_STATUS_PENDING,
+	PDP_CRTC_FLIP_STATUS_DONE,
+};
+
+struct pdp_flip_data {
+	struct dma_fence_cb base;
+	struct drm_crtc *crtc;
+	struct dma_fence *wait_fence;
+};
+
+/* returns true for ok, false for fail */
+static bool pdp_clocks_set(struct drm_crtc *crtc,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct pdp_drm_private *dev_priv = crtc->dev->dev_private;
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	bool res;
+
+	switch (dev_priv->version) {
+	case PDP_VERSION_ODIN: {
+		pdp_odin_set_updates_enabled(crtc->dev->dev,
+						pdp_crtc->pdp_reg, false);
+		res = pdp_odin_clocks_set(crtc->dev->dev,
+				pdp_crtc->pdp_reg, pdp_crtc->pll_reg,
+				0,                       /* apollo only */
+				pdp_crtc->odn_core_reg,  /* odin only */
+				adjusted_mode->hdisplay,
+				adjusted_mode->vdisplay);
+		pdp_odin_set_updates_enabled(crtc->dev->dev,
+					     pdp_crtc->pdp_reg, true);
+
+		break;
+	}
+	case PDP_VERSION_APOLLO: {
+		int clock_in_mhz = adjusted_mode->clock / 1000;
+
+		pdp_apollo_set_updates_enabled(crtc->dev->dev,
+					       pdp_crtc->pdp_reg, false);
+		res = pdp_apollo_clocks_set(crtc->dev->dev,
+				pdp_crtc->pdp_reg, pdp_crtc->pll_reg,
+				clock_in_mhz,           /* apollo only */
+				NULL,                   /* odin only */
+				adjusted_mode->hdisplay,
+				adjusted_mode->vdisplay);
+		pdp_apollo_set_updates_enabled(crtc->dev->dev,
+					       pdp_crtc->pdp_reg, true);
+
+		DRM_DEBUG_DRIVER("pdp clock set to %dMhz\n", clock_in_mhz);
+
+		break;
+	}
+	case PDP_VERSION_PLATO:
+#if defined(SUPPORT_PLATO_DISPLAY)
+		plato_enable_pdp_clock(dev_priv->dev->dev->parent);
+		res = true;
+#else
+		DRM_ERROR("Trying to enable plato PDP clock on non-Plato build\n");
+		res = false;
+#endif
+		break;
+	default:
+		BUG();
+	}
+
+	return res;
+}
+
+void pdp_crtc_set_plane_enabled(struct drm_crtc *crtc, bool enable)
+{
+	struct pdp_drm_private *dev_priv = crtc->dev->dev_private;
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+
+	switch (dev_priv->version) {
+	case PDP_VERSION_ODIN:
+		pdp_odin_set_plane_enabled(crtc->dev->dev,
+					   pdp_crtc->pdp_reg,
+					   0, enable);
+		break;
+	case PDP_VERSION_APOLLO:
+		pdp_apollo_set_plane_enabled(crtc->dev->dev,
+					     pdp_crtc->pdp_reg,
+					     0, enable);
+		break;
+	case PDP_VERSION_PLATO:
+		pdp_plato_set_plane_enabled(crtc->dev->dev,
+					    pdp_crtc->pdp_reg,
+					    0, enable);
+		break;
+	default:
+		BUG();
+	}
+}
+
+static void pdp_crtc_set_syncgen_enabled(struct drm_crtc *crtc, bool enable)
+{
+	struct pdp_drm_private *dev_priv = crtc->dev->dev_private;
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+
+	switch (dev_priv->version) {
+	case PDP_VERSION_ODIN:
+		pdp_odin_set_syncgen_enabled(crtc->dev->dev,
+					     pdp_crtc->pdp_reg,
+					     enable);
+		break;
+	case PDP_VERSION_APOLLO:
+		pdp_apollo_set_syncgen_enabled(crtc->dev->dev,
+					       pdp_crtc->pdp_reg,
+					       enable);
+		break;
+	case PDP_VERSION_PLATO:
+		pdp_plato_set_syncgen_enabled(crtc->dev->dev,
+					      pdp_crtc->pdp_reg,
+					      enable);
+		break;
+	default:
+		BUG();
+	}
+}
+
+static void pdp_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
+{
+	struct pdp_drm_private *dev_priv = crtc->dev->dev_private;
+
+	if (enable) {
+		pdp_crtc_set_syncgen_enabled(crtc, enable);
+		pdp_crtc_set_plane_enabled(crtc, dev_priv->display_enabled);
+		drm_crtc_vblank_on(crtc);
+	} else {
+		drm_crtc_vblank_off(crtc);
+		pdp_crtc_set_plane_enabled(crtc, enable);
+		pdp_crtc_set_syncgen_enabled(crtc, enable);
+	}
+}
+
+static void pdp_crtc_mode_set(struct drm_crtc *crtc,
+			      struct drm_display_mode *adjusted_mode)
+{
+	/*
+	 * ht   = horizontal total
+	 * hbps = horizontal back porch start
+	 * has  = horizontal active start
+	 * hlbs = horizontal left border start
+	 * hfps = horizontal front porch start
+	 * hrbs = horizontal right border start
+	 *
+	 * vt   = vertical total
+	 * vbps = vertical back porch start
+	 * vas  = vertical active start
+	 * vtbs = vertical top border start
+	 * vfps = vertical front porch start
+	 * vbbs = vertical bottom border start
+	 */
+	struct pdp_drm_private *dev_priv = crtc->dev->dev_private;
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	uint32_t ht = adjusted_mode->htotal;
+	uint32_t hbps = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
+	uint32_t has = (adjusted_mode->htotal - adjusted_mode->hsync_start);
+	uint32_t hlbs = has;
+	uint32_t hfps = (hlbs + adjusted_mode->hdisplay);
+	uint32_t hrbs = hfps;
+	uint32_t vt = adjusted_mode->vtotal;
+	uint32_t vbps = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
+	uint32_t vas = (adjusted_mode->vtotal - adjusted_mode->vsync_start);
+	uint32_t vtbs = vas;
+	uint32_t vfps = (vtbs + adjusted_mode->vdisplay);
+	uint32_t vbbs = vfps;
+	bool ok;
+
+	ok = pdp_clocks_set(crtc, adjusted_mode);
+
+	if (!ok) {
+		dev_info(crtc->dev->dev, "%s failed\n", __func__);
+		return;
+	}
+
+	switch (dev_priv->version) {
+	case PDP_VERSION_ODIN:
+		pdp_odin_set_updates_enabled(crtc->dev->dev,
+					     pdp_crtc->pdp_reg, false);
+		pdp_odin_reset_planes(crtc->dev->dev,
+				      pdp_crtc->pdp_reg);
+		pdp_odin_mode_set(crtc->dev->dev,
+			     pdp_crtc->pdp_reg,
+			     adjusted_mode->hdisplay, adjusted_mode->vdisplay,
+			     hbps, ht, has,
+			     hlbs, hfps, hrbs,
+			     vbps, vt, vas,
+			     vtbs, vfps, vbbs,
+			     adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC,
+			     adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC);
+		pdp_odin_set_powerdwn_enabled(crtc->dev->dev,
+					      pdp_crtc->pdp_reg, false);
+		pdp_odin_set_updates_enabled(crtc->dev->dev,
+					     pdp_crtc->pdp_reg, true);
+		break;
+	case PDP_VERSION_APOLLO:
+		pdp_apollo_set_updates_enabled(crtc->dev->dev,
+					       pdp_crtc->pdp_reg, false);
+		pdp_apollo_reset_planes(crtc->dev->dev,
+					pdp_crtc->pdp_reg);
+		pdp_apollo_mode_set(crtc->dev->dev,
+			     pdp_crtc->pdp_reg,
+			     adjusted_mode->hdisplay, adjusted_mode->vdisplay,
+			     hbps, ht, has,
+			     hlbs, hfps, hrbs,
+			     vbps, vt, vas,
+			     vtbs, vfps, vbbs,
+			     adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC,
+			     adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC);
+		pdp_apollo_set_powerdwn_enabled(crtc->dev->dev,
+						pdp_crtc->pdp_reg, false);
+		pdp_apollo_set_updates_enabled(crtc->dev->dev,
+					       pdp_crtc->pdp_reg, true);
+		break;
+	case PDP_VERSION_PLATO:
+		pdp_plato_mode_set(crtc->dev->dev,
+				   pdp_crtc->pdp_reg,
+				   adjusted_mode->hdisplay,
+				   adjusted_mode->vdisplay,
+				   hbps, ht, has,
+				   hlbs, hfps, hrbs,
+				   vbps, vt, vas,
+				   vtbs, vfps, vbbs,
+				   adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC,
+				   adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC);
+		break;
+	default:
+		BUG();
+	}
+}
+
+
+static bool pdp_crtc_helper_mode_fixup(struct drm_crtc *crtc,
+					const struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode)
+{
+	struct pdp_drm_private *dev_priv = crtc->dev->dev_private;
+
+	if (dev_priv->version == PDP_VERSION_ODIN
+		&& mode->hdisplay == 1920
+		&& mode->vdisplay == 1080) {
+
+		/* 1080p 60Hz */
+		const int h_total = 2200;
+		const int h_active_start = 192;
+		const int h_back_porch_start = 44;
+		const int v_total = 1125;
+		const int v_active_start = 41;
+		const int v_back_porch_start = 5;
+
+		adjusted_mode->htotal = h_total;
+		adjusted_mode->hsync_start = adjusted_mode->htotal -
+						h_active_start;
+		adjusted_mode->hsync_end = adjusted_mode->hsync_start +
+						h_back_porch_start;
+		adjusted_mode->vtotal = v_total;
+		adjusted_mode->vsync_start = adjusted_mode->vtotal -
+						v_active_start;
+		adjusted_mode->vsync_end = adjusted_mode->vsync_start +
+						v_back_porch_start;
+	}
+	return true;
+}
+
+static void pdp_crtc_flip_complete(struct drm_crtc *crtc);
+
+#if defined(PDP_USE_ATOMIC)
+static void pdp_crtc_helper_mode_set_nofb(struct drm_crtc *crtc)
+{
+	pdp_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
+}
+
+static void pdp_crtc_helper_atomic_flush(struct drm_crtc *crtc,
+					 struct drm_crtc_state *old_crtc_state)
+{
+	struct drm_crtc_state *new_crtc_state = crtc->state;
+
+	if (!new_crtc_state->active || !old_crtc_state->active)
+		return;
+
+	if (crtc->state->event) {
+		struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+		unsigned long flags;
+
+		pdp_crtc->flip_async = !!(new_crtc_state->pageflip_flags
+					  & DRM_MODE_PAGE_FLIP_ASYNC);
+
+		if (pdp_crtc->flip_async)
+			WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+		pdp_crtc->flip_event = crtc->state->event;
+		crtc->state->event = NULL;
+
+		atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_DONE);
+		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+		if (pdp_crtc->flip_async)
+			pdp_crtc_flip_complete(crtc);
+	}
+}
+
+static void pdp_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+					  struct drm_crtc_state *old_crtc_state)
+{
+	pdp_crtc_set_enabled(crtc, true);
+
+	if (crtc->state->event) {
+		struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+		unsigned long flags;
+
+		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+		pdp_crtc->flip_event = crtc->state->event;
+		crtc->state->event = NULL;
+
+		atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_DONE);
+		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+	}
+}
+
+static void pdp_crtc_helper_atomic_disable(struct drm_crtc *crtc,
+					   struct drm_crtc_state *old_crtc_state)
+{
+	pdp_crtc_set_enabled(crtc, false);
+
+	if (crtc->state->event) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+		drm_crtc_send_vblank_event(crtc, crtc->state->event);
+		crtc->state->event = NULL;
+		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+	}
+}
+#else
+static void pdp_crtc_helper_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static void pdp_crtc_helper_prepare(struct drm_crtc *crtc)
+{
+	pdp_crtc_set_enabled(crtc, false);
+}
+
+static void pdp_crtc_helper_commit(struct drm_crtc *crtc)
+{
+	pdp_crtc_set_enabled(crtc, true);
+}
+
+static int pdp_crtc_helper_mode_set_base_atomic(struct drm_crtc *crtc,
+						struct drm_framebuffer *fb,
+						int x, int y,
+						enum mode_set_atomic atomic)
+{
+	if (x < 0 || y < 0)
+		return -EINVAL;
+
+	pdp_plane_set_surface(crtc, crtc->primary, fb,
+			      (uint32_t) x, (uint32_t) y);
+
+	return 0;
+}
+
+static int pdp_crtc_helper_mode_set_base(struct drm_crtc *crtc,
+					 int x, int y,
+					 struct drm_framebuffer *old_fb)
+{
+	if (!crtc->primary->fb) {
+		DRM_ERROR("no framebuffer\n");
+		return 0;
+	}
+
+	return pdp_crtc_helper_mode_set_base_atomic(crtc,
+						    crtc->primary->fb,
+						    x, y,
+						    0);
+}
+
+static int pdp_crtc_helper_mode_set(struct drm_crtc *crtc,
+				    struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode,
+				    int x, int y,
+				    struct drm_framebuffer *old_fb)
+{
+	pdp_crtc_mode_set(crtc, adjusted_mode);
+
+	return pdp_crtc_helper_mode_set_base(crtc, x, y, old_fb);
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0))
+static void pdp_crtc_helper_load_lut(struct drm_crtc *crtc)
+{
+}
+#endif
+
+static void pdp_crtc_helper_disable(struct drm_crtc *crtc)
+{
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	enum pdp_crtc_flip_status status;
+
+	pdp_crtc_set_enabled(crtc, false);
+
+	status = atomic_read(&pdp_crtc->flip_status);
+	if (status != PDP_CRTC_FLIP_STATUS_NONE) {
+		long lerr;
+
+		lerr = wait_event_timeout(
+			pdp_crtc->flip_pending_wait_queue,
+			atomic_read(&pdp_crtc->flip_status)
+					!= PDP_CRTC_FLIP_STATUS_PENDING,
+			30 * HZ);
+		if (!lerr)
+			DRM_ERROR("Failed to wait for pending flip\n");
+		else if (!pdp_crtc->flip_async)
+			pdp_crtc_flip_complete(crtc);
+	}
+}
+#endif /* defined(PDP_USE_ATOMIC) */
+
+static void pdp_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+
+	DRM_DEBUG_DRIVER("[CRTC:%d]\n", crtc->base.id);
+
+	drm_crtc_cleanup(crtc);
+
+	iounmap(pdp_crtc->pll_reg);
+
+	iounmap(pdp_crtc->pdp_reg);
+	release_mem_region(pdp_crtc->pdp_reg_phys_base, pdp_crtc->pdp_reg_size);
+
+	kfree(pdp_crtc);
+	dev_priv->crtc = NULL;
+}
+
+static void pdp_crtc_flip_complete(struct drm_crtc *crtc)
+{
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	unsigned long flags;
+
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+	/* The flipping process has been completed so reset the flip state */
+	atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_NONE);
+	pdp_crtc->flip_async = false;
+
+#if !defined(PDP_USE_ATOMIC)
+	if (pdp_crtc->flip_data) {
+		dma_fence_put(pdp_crtc->flip_data->wait_fence);
+		kfree(pdp_crtc->flip_data);
+		pdp_crtc->flip_data = NULL;
+	}
+#endif
+
+	if (pdp_crtc->flip_event) {
+		drm_crtc_send_vblank_event(crtc, pdp_crtc->flip_event);
+		pdp_crtc->flip_event = NULL;
+	}
+
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+
+#if !defined(PDP_USE_ATOMIC)
+static void pdp_crtc_flip(struct drm_crtc *crtc)
+{
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	struct drm_framebuffer *old_fb;
+
+	WARN_ON(atomic_read(&to_pdp_crtc(crtc)->flip_status)
+			!= PDP_CRTC_FLIP_STATUS_PENDING);
+
+	old_fb = pdp_crtc->old_fb;
+	pdp_crtc->old_fb = NULL;
+
+	/*
+	 * The graphics stream registers latch on vsync so we can go ahead and
+	 * do the flip now.
+	 */
+	(void) pdp_crtc_helper_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
+
+	atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_DONE);
+	wake_up(&pdp_crtc->flip_pending_wait_queue);
+
+	if (pdp_crtc->flip_async)
+		pdp_crtc_flip_complete(crtc);
+}
+
+static void pdp_crtc_flip_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+	struct pdp_flip_data *flip_data =
+		container_of(cb, struct pdp_flip_data, base);
+
+	pdp_crtc_flip(flip_data->crtc);
+}
+
+static void pdp_crtc_flip_schedule_cb(struct dma_fence *fence,
+				      struct dma_fence_cb *cb)
+{
+	struct pdp_flip_data *flip_data =
+		container_of(cb, struct pdp_flip_data, base);
+	int err = 0;
+
+	if (flip_data->wait_fence)
+		err = dma_fence_add_callback(flip_data->wait_fence,
+					     &flip_data->base,
+					     pdp_crtc_flip_cb);
+
+	if (!flip_data->wait_fence || err) {
+		if (err && err != -ENOENT)
+			DRM_ERROR("flip failed to wait on old buffer\n");
+		pdp_crtc_flip_cb(flip_data->wait_fence, &flip_data->base);
+	}
+}
+
+static int pdp_crtc_flip_schedule(struct drm_crtc *crtc,
+				  struct drm_gem_object *obj,
+				  struct drm_gem_object *old_obj)
+{
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	struct reservation_object *resv = pdp_gem_get_resv(obj);
+	struct reservation_object *old_resv = pdp_gem_get_resv(old_obj);
+	struct pdp_flip_data *flip_data;
+	struct dma_fence *fence;
+	int err;
+
+	flip_data = kmalloc(sizeof(*flip_data), GFP_KERNEL);
+	if (!flip_data)
+		return -ENOMEM;
+
+	flip_data->crtc = crtc;
+
+	ww_mutex_lock(&old_resv->lock, NULL);
+	flip_data->wait_fence =
+		dma_fence_get(reservation_object_get_excl(old_resv));
+
+	if (old_resv != resv) {
+		ww_mutex_unlock(&old_resv->lock);
+		ww_mutex_lock(&resv->lock, NULL);
+	}
+
+	fence = dma_fence_get(reservation_object_get_excl(resv));
+	ww_mutex_unlock(&resv->lock);
+
+	pdp_crtc->flip_data = flip_data;
+	atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_PENDING);
+
+	if (fence) {
+		err = dma_fence_add_callback(fence, &flip_data->base,
+					     pdp_crtc_flip_schedule_cb);
+		dma_fence_put(fence);
+		if (err && err != -ENOENT)
+			goto err_set_flip_status_none;
+	}
+
+	if (!fence || err == -ENOENT) {
+		pdp_crtc_flip_schedule_cb(fence, &flip_data->base);
+		err = 0;
+	}
+
+	return err;
+
+err_set_flip_status_none:
+	atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_NONE);
+	dma_fence_put(flip_data->wait_fence);
+	kfree(flip_data);
+	return err;
+}
+
+static int pdp_crtc_page_flip(struct drm_crtc *crtc,
+			      struct drm_framebuffer *fb,
+			      struct drm_pending_vblank_event *event,
+			      uint32_t page_flip_flags
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+			      , struct drm_modeset_acquire_ctx *ctx
+#endif
+			     )
+{
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb);
+	struct pdp_framebuffer *pdp_old_fb =
+		to_pdp_framebuffer(crtc->primary->fb);
+	enum pdp_crtc_flip_status status;
+	unsigned long flags;
+	int err;
+
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+	status = atomic_read(&pdp_crtc->flip_status);
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+	if (status != PDP_CRTC_FLIP_STATUS_NONE)
+		return -EBUSY;
+
+	if (!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)) {
+		err = drm_crtc_vblank_get(crtc);
+		if (err)
+			return err;
+	}
+
+	pdp_crtc->old_fb = crtc->primary->fb;
+	pdp_crtc->flip_event = event;
+	pdp_crtc->flip_async = !!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC);
+
+	/* Set the crtc primary plane to point to the new framebuffer */
+	crtc->primary->fb = fb;
+
+	err = pdp_crtc_flip_schedule(crtc, pdp_fb->obj[0], pdp_old_fb->obj[0]);
+	if (err) {
+		crtc->primary->fb = pdp_crtc->old_fb;
+		pdp_crtc->old_fb = NULL;
+		pdp_crtc->flip_event = NULL;
+		pdp_crtc->flip_async = false;
+
+		DRM_ERROR("failed to schedule flip (err=%d)\n", err);
+		goto err_vblank_put;
+	}
+
+	return 0;
+
+err_vblank_put:
+	if (!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC))
+		drm_crtc_vblank_put(crtc);
+	return err;
+}
+#endif /* !defined(PDP_USE_ATOMIC) */
+
+static const struct drm_crtc_helper_funcs pdp_crtc_helper_funcs = {
+	.mode_fixup = pdp_crtc_helper_mode_fixup,
+#if defined(PDP_USE_ATOMIC)
+	.mode_set_nofb = pdp_crtc_helper_mode_set_nofb,
+	.atomic_flush = pdp_crtc_helper_atomic_flush,
+	.atomic_enable = pdp_crtc_helper_atomic_enable,
+	.atomic_disable = pdp_crtc_helper_atomic_disable,
+#else
+	.dpms = pdp_crtc_helper_dpms,
+	.prepare = pdp_crtc_helper_prepare,
+	.commit = pdp_crtc_helper_commit,
+	.mode_set = pdp_crtc_helper_mode_set,
+	.mode_set_base = pdp_crtc_helper_mode_set_base,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0))
+	.load_lut = pdp_crtc_helper_load_lut,
+#endif
+	.mode_set_base_atomic = pdp_crtc_helper_mode_set_base_atomic,
+	.disable = pdp_crtc_helper_disable,
+#endif
+};
+
+static const struct drm_crtc_funcs pdp_crtc_funcs = {
+	.destroy = pdp_crtc_destroy,
+#if defined(PDP_USE_ATOMIC)
+	.reset = drm_atomic_helper_crtc_reset,
+	.set_config = drm_atomic_helper_set_config,
+	.page_flip = drm_atomic_helper_page_flip,
+	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+#else
+	.set_config = drm_crtc_helper_set_config,
+	.page_flip = pdp_crtc_page_flip,
+#endif
+};
+
+
+struct drm_crtc *pdp_crtc_create(struct drm_device *dev, uint32_t number,
+				 struct drm_plane *primary_plane)
+{
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+	struct pdp_crtc *pdp_crtc;
+	const char *crtc_name = NULL;
+	int err;
+
+	pdp_crtc = kzalloc(sizeof(*pdp_crtc), GFP_KERNEL);
+	if (!pdp_crtc) {
+		err = -ENOMEM;
+		goto err_exit;
+	}
+
+	init_waitqueue_head(&pdp_crtc->flip_pending_wait_queue);
+	atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_NONE);
+	pdp_crtc->number = number;
+
+	switch (number) {
+	case 0:
+	{
+		struct resource *regs;
+
+		regs = platform_get_resource_byname(
+				    to_platform_device(dev->dev),
+				    IORESOURCE_MEM,
+				    "pdp-regs");
+		if (!regs) {
+			DRM_ERROR("missing pdp register info\n");
+			err = -ENXIO;
+			goto err_crtc_free;
+		}
+
+		pdp_crtc->pdp_reg_phys_base = regs->start;
+		pdp_crtc->pdp_reg_size = resource_size(regs);
+
+		if (dev_priv->version == PDP_VERSION_ODIN ||
+			dev_priv->version == PDP_VERSION_APOLLO) {
+			regs = platform_get_resource_byname(
+					    to_platform_device(dev->dev),
+					    IORESOURCE_MEM,
+					    "pll-regs");
+			if (!regs) {
+				DRM_ERROR("missing pll register info\n");
+				err = -ENXIO;
+				goto err_crtc_free;
+			}
+
+			pdp_crtc->pll_reg_phys_base = regs->start;
+			pdp_crtc->pll_reg_size = resource_size(regs);
+
+			pdp_crtc->pll_reg =
+				ioremap_nocache(pdp_crtc->pll_reg_phys_base,
+						pdp_crtc->pll_reg_size);
+			if (!pdp_crtc->pll_reg) {
+				DRM_ERROR("failed to map pll registers\n");
+				err = -ENOMEM;
+				goto err_crtc_free;
+			}
+		} else if (dev_priv->version == PDP_VERSION_PLATO) {
+			regs = platform_get_resource_byname(
+				    to_platform_device(dev->dev),
+				    IORESOURCE_MEM,
+				    PLATO_PDP_RESOURCE_BIF_REGS);
+			if (!regs) {
+				DRM_ERROR("missing pdp-bif register info\n");
+				err = -ENXIO;
+				goto err_crtc_free;
+			}
+
+			pdp_crtc->pdp_bif_reg_phys_base = regs->start;
+			pdp_crtc->pdp_bif_reg_size = resource_size(regs);
+
+			if (!request_mem_region(pdp_crtc->pdp_bif_reg_phys_base,
+					pdp_crtc->pdp_bif_reg_size,
+					crtc_name)) {
+				DRM_ERROR("failed to reserve pdp-bif registers\n");
+				err = -EBUSY;
+				goto err_crtc_free;
+			}
+
+			pdp_crtc->pdp_bif_reg =
+				ioremap_nocache(pdp_crtc->pdp_bif_reg_phys_base,
+						pdp_crtc->pdp_bif_reg_size);
+			if (!pdp_crtc->pdp_bif_reg) {
+				DRM_ERROR("failed to map pdp-bif registers\n");
+				err = -ENOMEM;
+				goto err_iounmap_regs;
+			}
+		}
+
+		if (dev_priv->version == PDP_VERSION_ODIN) {
+			regs = platform_get_resource_byname(
+					    to_platform_device(dev->dev),
+					    IORESOURCE_MEM,
+					    "odn-core");
+			if (!regs) {
+				DRM_ERROR("missing odn-core info\n");
+				err = -ENXIO;
+				goto err_crtc_free;
+			}
+
+			pdp_crtc->odn_core_phys_base = regs->start;
+			pdp_crtc->odn_core_size = resource_size(regs);
+
+			pdp_crtc->odn_core_reg
+				= ioremap_nocache(pdp_crtc->odn_core_phys_base,
+					  pdp_crtc->odn_core_size);
+			if (!pdp_crtc->odn_core_reg) {
+				DRM_ERROR("failed to map pdp reset register\n");
+				err = -ENOMEM;
+				goto err_iounmap_regs;
+			}
+		}
+
+		crtc_name = "crtc-0";
+		break;
+	}
+	default:
+		DRM_ERROR("invalid crtc number %u\n", number);
+		err = -EINVAL;
+		goto err_crtc_free;
+	}
+
+	if (!request_mem_region(pdp_crtc->pdp_reg_phys_base,
+				pdp_crtc->pdp_reg_size,
+				crtc_name)) {
+		DRM_ERROR("failed to reserve pdp registers\n");
+		err = -EBUSY;
+		goto err_crtc_free;
+	}
+
+	pdp_crtc->pdp_reg = ioremap_nocache(pdp_crtc->pdp_reg_phys_base,
+							pdp_crtc->pdp_reg_size);
+	if (!pdp_crtc->pdp_reg) {
+		DRM_ERROR("failed to map pdp registers\n");
+		err = -ENOMEM;
+		goto err_release_mem_region;
+	}
+
+	err = drm_crtc_init_with_planes(dev, &pdp_crtc->base, primary_plane,
+					NULL, &pdp_crtc_funcs, NULL);
+	if (err) {
+		DRM_ERROR("CRTC init with planes failed");
+		goto err_iounmap_regs;
+	}
+
+	drm_crtc_helper_add(&pdp_crtc->base, &pdp_crtc_helper_funcs);
+
+	DRM_DEBUG_DRIVER("[CRTC:%d]\n", pdp_crtc->base.base.id);
+
+	return &pdp_crtc->base;
+
+err_iounmap_regs:
+	iounmap(pdp_crtc->pdp_reg);
+	if (pdp_crtc->odn_core_reg)
+		iounmap(pdp_crtc->odn_core_reg);
+	if (pdp_crtc->pdp_bif_reg)
+		iounmap(pdp_crtc->pdp_bif_reg);
+err_release_mem_region:
+	release_mem_region(pdp_crtc->pdp_reg_phys_base, pdp_crtc->pdp_reg_size);
+err_crtc_free:
+	kfree(pdp_crtc);
+err_exit:
+	return ERR_PTR(err);
+}
+
+void pdp_crtc_set_vblank_enabled(struct drm_crtc *crtc, bool enable)
+{
+	struct pdp_drm_private *dev_priv = crtc->dev->dev_private;
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+
+	switch (dev_priv->version) {
+	case PDP_VERSION_ODIN:
+		pdp_odin_set_vblank_enabled(crtc->dev->dev,
+					    pdp_crtc->pdp_reg,
+					    enable);
+		break;
+	case PDP_VERSION_APOLLO:
+		pdp_apollo_set_vblank_enabled(crtc->dev->dev,
+					    pdp_crtc->pdp_reg,
+					    enable);
+		break;
+	case PDP_VERSION_PLATO:
+		pdp_plato_set_vblank_enabled(crtc->dev->dev,
+					     pdp_crtc->pdp_reg,
+					     enable);
+		break;
+	default:
+		BUG();
+	}
+}
+
+void pdp_crtc_irq_handler(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	bool handled;
+
+	switch (dev_priv->version) {
+	case PDP_VERSION_ODIN:
+		handled = pdp_odin_check_and_clear_vblank(dev->dev,
+							  pdp_crtc->pdp_reg);
+		break;
+	case PDP_VERSION_APOLLO:
+		handled = pdp_apollo_check_and_clear_vblank(dev->dev,
+							    pdp_crtc->pdp_reg);
+		break;
+	case PDP_VERSION_PLATO:
+		handled = pdp_plato_check_and_clear_vblank(dev->dev,
+							   pdp_crtc->pdp_reg);
+		break;
+	default:
+		handled = false;
+		break;
+	}
+
+	if (handled) {
+		enum pdp_crtc_flip_status status;
+
+		drm_handle_vblank(dev, pdp_crtc->number);
+
+		status = atomic_read(&pdp_crtc->flip_status);
+		if (status == PDP_CRTC_FLIP_STATUS_DONE) {
+			if (!pdp_crtc->flip_async) {
+				pdp_crtc_flip_complete(crtc);
+#if !defined(PDP_USE_ATOMIC)
+				drm_crtc_vblank_put(crtc);
+#endif
+			}
+		}
+	}
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+void pdp_crtc_flip_event_cancel(struct drm_crtc *crtc, struct drm_file *file)
+{
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	unsigned long flags;
+
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+	if (pdp_crtc->flip_event &&
+	    pdp_crtc->flip_event->base.file_priv == file) {
+		pdp_crtc->flip_event->base.destroy(&pdp_crtc->flip_event->base);
+		pdp_crtc->flip_event = NULL;
+	}
+
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_debugfs.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_debugfs.c
new file mode 100644
index 0000000..2b40647
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_debugfs.c
@@ -0,0 +1,172 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/debugfs.h>
+
+#include "drm_pdp_drv.h"
+
+#define PDP_DEBUGFS_DISPLAY_ENABLED "display_enabled"
+
+static int display_enabled_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+
+	return 0;
+}
+
+static ssize_t display_enabled_read(struct file *file,
+				    char __user *user_buffer,
+				    size_t count,
+				    loff_t *position_ptr)
+{
+	struct drm_device *dev = file->private_data;
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+	loff_t position = *position_ptr;
+	char buffer[] = "N\n";
+	size_t buffer_size = ARRAY_SIZE(buffer);
+	int err;
+
+	if (position < 0)
+		return -EINVAL;
+	else if (position >= buffer_size || count == 0)
+		return 0;
+
+	if (dev_priv->display_enabled)
+		buffer[0] = 'Y';
+
+	if (count > buffer_size - position)
+		count = buffer_size - position;
+
+	err = copy_to_user(user_buffer, &buffer[position], count);
+	if (err)
+		return -EFAULT;
+
+	*position_ptr = position + count;
+
+	return count;
+}
+
+static ssize_t display_enabled_write(struct file *file,
+				     const char __user *user_buffer,
+				     size_t count,
+				     loff_t *position)
+{
+	struct drm_device *dev = file->private_data;
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+	char buffer[3];
+	int err;
+
+	count = min(count, ARRAY_SIZE(buffer) - 1);
+
+	err = copy_from_user(buffer, user_buffer, count);
+	if (err)
+		return -EFAULT;
+	buffer[count] = '\0';
+
+	if (!strtobool(buffer, &dev_priv->display_enabled) && dev_priv->crtc)
+		pdp_crtc_set_plane_enabled(dev_priv->crtc, dev_priv->display_enabled);
+
+	return count;
+}
+
+static const struct file_operations pdp_display_enabled_fops = {
+	.owner = THIS_MODULE,
+	.open = display_enabled_open,
+	.read = display_enabled_read,
+	.write = display_enabled_write,
+	.llseek = default_llseek,
+};
+
+static int pdp_debugfs_create(struct drm_minor *minor, const char *name,
+			      umode_t mode, const struct file_operations *fops)
+{
+	struct drm_info_node *node;
+
+	/*
+	 * We can't get access to our driver private data when this function is
+	 * called so we fake up a node so that we can clean up entries later on.
+	 */
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	node->dent = debugfs_create_file(name, mode, minor->debugfs_root,
+					 minor->dev, fops);
+	if (!node->dent) {
+		kfree(node);
+		return -ENOMEM;
+	}
+
+	node->minor = minor;
+	node->info_ent = (void *) fops;
+
+	mutex_lock(&minor->debugfs_lock);
+	list_add(&node->list, &minor->debugfs_list);
+	mutex_unlock(&minor->debugfs_lock);
+
+	return 0;
+}
+
+int pdp_debugfs_init(struct drm_minor *minor)
+{
+	int err;
+
+	err = pdp_debugfs_create(minor, PDP_DEBUGFS_DISPLAY_ENABLED,
+				 0100644,
+				 &pdp_display_enabled_fops);
+	if (err) {
+		DRM_INFO("failed to create '%s' debugfs entry\n",
+			 PDP_DEBUGFS_DISPLAY_ENABLED);
+	}
+
+	return err;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+void pdp_debugfs_cleanup(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files((struct drm_info_list *) &pdp_display_enabled_fops,
+				 1, minor);
+}
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_drv.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_drv.c
new file mode 100644
index 0000000..c073a5f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_drv.c
@@ -0,0 +1,782 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/module.h>
+#include <linux/reservation.h>
+#include <linux/version.h>
+#include <linux/component.h>
+#include <linux/of_platform.h>
+
+#include <drm/drmP.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+#include <drm/drm_gem.h>
+#endif
+
+#include "tc_drv.h"
+#include "pvrversion.h"
+
+#include "drm_pdp_drv.h"
+#include "drm_pdp_gem.h"
+#include "pdp_drm.h"
+
+#include "odin_defs.h"
+
+#if defined(SUPPORT_PLATO_DISPLAY)
+#include "plato_drv.h"
+#include "pdp2_regs.h"
+#include "pdp2_mmu_regs.h"
+#endif
+
+#define DRIVER_NAME "pdp"
+#define DRIVER_DESC "Imagination Technologies PDP DRM Display Driver"
+#define DRIVER_DATE "20150612"
+
+#if defined(PDP_USE_ATOMIC)
+#include <drm/drm_atomic_helper.h>
+
+#define PVR_DRIVER_ATOMIC DRIVER_ATOMIC
+#else
+#define PVR_DRIVER_ATOMIC 0
+#endif
+
+/* This header must always be included last */
+#include "kernel_compatibility.h"
+
+static bool display_enable = true;
+
+module_param(display_enable, bool, 0444);
+MODULE_PARM_DESC(display_enable, "Enable all displays (default: Y)");
+
+
+static void pdp_irq_handler(void *data)
+{
+	struct drm_device *dev = data;
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		pdp_crtc_irq_handler(crtc);
+}
+
+static int pdp_early_load(struct drm_device *dev)
+{
+	struct pdp_drm_private *dev_priv;
+	int err;
+
+	DRM_DEBUG("loading %s device\n", to_platform_device(dev->dev)->name);
+
+	platform_set_drvdata(to_platform_device(dev->dev), dev);
+
+	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+	if (!dev_priv)
+		return -ENOMEM;
+
+	dev->dev_private = dev_priv;
+	dev_priv->dev = dev;
+	dev_priv->version = (enum pdp_version)
+		to_platform_device(dev->dev)->id_entry->driver_data;
+	dev_priv->display_enabled = display_enable;
+
+	if (dev_priv->version == PDP_VERSION_APOLLO ||
+		dev_priv->version == PDP_VERSION_ODIN) {
+#if !defined(SUPPORT_PLATO_DISPLAY)
+		err = tc_enable(dev->dev->parent);
+		if (err) {
+			DRM_ERROR("failed to enable parent device (err=%d)\n", err);
+			goto err_dev_priv_free;
+		}
+#endif
+	}
+#if defined(SUPPORT_PLATO_DISPLAY)
+	else if (dev_priv->version == PDP_VERSION_PLATO) {
+// XXX do we we need to do this? Plato driver has already enabled device.
+		err = plato_enable(dev->dev->parent);
+		if (err) {
+			DRM_ERROR("failed to enable parent device (err=%d)\n", err);
+			goto err_dev_priv_free;
+		}
+	}
+#endif
+
+	dev_priv->gem_priv = pdp_gem_init(dev);
+	if (!dev_priv->gem_priv) {
+		DRM_ERROR("gem initialisation failed\n");
+		err = -ENOMEM;
+		goto err_disable_parent_device;
+	}
+
+	err = pdp_modeset_early_init(dev_priv);
+	if (err) {
+		DRM_ERROR("early modeset initialisation failed (err=%d)\n",
+			  err);
+		goto err_gem_cleanup;
+	}
+
+	err = drm_vblank_init(dev_priv->dev, 1);
+	if (err) {
+		DRM_ERROR("failed to complete vblank init (err=%d)\n", err);
+		goto err_modeset_late_cleanup;
+	}
+
+	if (dev_priv->version == PDP_VERSION_APOLLO ||
+		dev_priv->version == PDP_VERSION_ODIN) {
+#if !defined(SUPPORT_PLATO_DISPLAY)
+		err = tc_set_interrupt_handler(dev->dev->parent,
+					   TC_INTERRUPT_PDP,
+					   pdp_irq_handler,
+					   dev);
+		if (err) {
+			DRM_ERROR("failed to set interrupt handler (err=%d)\n",
+				  err);
+			goto err_vblank_cleanup;
+		}
+
+		err = tc_enable_interrupt(dev->dev->parent, TC_INTERRUPT_PDP);
+		if (err) {
+			DRM_ERROR("failed to enable pdp interrupts (err=%d)\n",
+				  err);
+			goto err_uninstall_interrupt_handle;
+		}
+#endif
+	}
+#if defined(SUPPORT_PLATO_DISPLAY)
+	else if (dev_priv->version == PDP_VERSION_PLATO) {
+		err = plato_set_interrupt_handler(dev->dev->parent,
+							PLATO_INTERRUPT_PDP,
+							pdp_irq_handler,
+							dev);
+		if (err) {
+			DRM_ERROR("failed to set interrupt handler (err=%d)\n",
+				  err);
+			goto err_vblank_cleanup;
+		}
+
+		err = plato_enable_interrupt(dev->dev->parent, PLATO_INTERRUPT_PDP);
+		if (err) {
+			DRM_ERROR("failed to enable pdp interrupts (err=%d)\n",
+				  err);
+			goto err_uninstall_interrupt_handle;
+		}
+	}
+#endif
+
+	dev->irq_enabled = true;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0))
+	dev->vblank_disable_allowed = 1;
+#endif
+
+	return 0;
+
+err_uninstall_interrupt_handle:
+	if (dev_priv->version == PDP_VERSION_APOLLO ||
+		dev_priv->version == PDP_VERSION_ODIN) {
+#if !defined(SUPPORT_PLATO_DISPLAY)
+		tc_set_interrupt_handler(dev->dev->parent,
+					     TC_INTERRUPT_PDP,
+					     NULL,
+					     NULL);
+#endif
+	}
+#if defined(SUPPORT_PLATO_DISPLAY)
+	else if (dev_priv->version == PDP_VERSION_PLATO) {
+		plato_set_interrupt_handler(dev->dev->parent,
+				PLATO_INTERRUPT_PDP,
+				NULL,
+				NULL);
+	}
+#endif
+err_vblank_cleanup:
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0))
+	/* Called by drm_dev_fini in Linux 4.11.0 and later */
+	drm_vblank_cleanup(dev_priv->dev);
+#endif
+err_modeset_late_cleanup:
+	pdp_modeset_late_cleanup(dev_priv);
+err_gem_cleanup:
+	pdp_gem_cleanup(dev_priv->gem_priv);
+err_disable_parent_device:
+	if (dev_priv->version == PDP_VERSION_APOLLO ||
+		dev_priv->version == PDP_VERSION_ODIN) {
+#if !defined(SUPPORT_PLATO_DISPLAY)
+		tc_disable(dev->dev->parent);
+#endif
+	}
+#if defined(SUPPORT_PLATO_DISPLAY)
+	else if (dev_priv->version == PDP_VERSION_PLATO)
+		plato_disable(dev->dev->parent);
+#endif
+err_dev_priv_free:
+	kfree(dev_priv);
+	return err;
+}
+
+static int pdp_late_load(struct drm_device *dev)
+{
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+	int err;
+
+	err = pdp_modeset_late_init(dev_priv);
+	if (err) {
+		DRM_ERROR("late modeset initialisation failed (err=%d)\n",
+			  err);
+		return err;
+	}
+
+	return 0;
+}
+
+static void pdp_early_unload(struct drm_device *dev)
+{
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+
+	pdp_modeset_early_cleanup(dev_priv);
+}
+
+static void pdp_late_unload(struct drm_device *dev)
+{
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+
+	DRM_INFO("unloading %s device.\n", to_platform_device(dev->dev)->name);
+	if (dev_priv->version == PDP_VERSION_APOLLO ||
+		dev_priv->version == PDP_VERSION_ODIN) {
+#if !defined(SUPPORT_PLATO_DISPLAY)
+		tc_disable_interrupt(dev->dev->parent, TC_INTERRUPT_PDP);
+		tc_set_interrupt_handler(dev->dev->parent,
+					     TC_INTERRUPT_PDP,
+					     NULL,
+					     NULL);
+#endif
+	}
+#if defined(SUPPORT_PLATO_DISPLAY)
+	else if (dev_priv->version == PDP_VERSION_PLATO) {
+		plato_disable_interrupt(dev->dev->parent, PLATO_INTERRUPT_PDP);
+		plato_set_interrupt_handler(dev->dev->parent,
+						PLATO_INTERRUPT_PDP,
+						NULL,
+						NULL);
+	}
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0))
+	/* Called by drm_dev_fini in Linux 4.11.0 and later */
+	drm_vblank_cleanup(dev_priv->dev);
+#endif
+	pdp_modeset_late_cleanup(dev_priv);
+	pdp_gem_cleanup(dev_priv->gem_priv);
+
+	if (dev_priv->version == PDP_VERSION_APOLLO ||
+		dev_priv->version == PDP_VERSION_ODIN) {
+#if !defined(SUPPORT_PLATO_DISPLAY)
+		tc_disable(dev->dev->parent);
+#endif
+	}
+#if defined(SUPPORT_PLATO_DISPLAY)
+	else if (dev_priv->version == PDP_VERSION_PLATO)
+		plato_disable(dev->dev->parent);
+#endif
+
+	kfree(dev_priv);
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
+static int pdp_load(struct drm_device *dev, unsigned long flags)
+{
+	int err;
+
+	err = pdp_early_load(dev);
+	if (err)
+		return err;
+
+	err = pdp_late_load(dev);
+	if (err) {
+		pdp_late_unload(dev);
+		return err;
+	}
+
+	return 0;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+static int pdp_unload(struct drm_device *dev)
+#else
+static void pdp_unload(struct drm_device *dev)
+#endif
+{
+	pdp_early_unload(dev);
+	pdp_late_unload(dev);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+	return 0;
+#endif
+}
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+static void pdp_preclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		pdp_crtc_flip_event_cancel(crtc, file);
+}
+#endif
+
+static void pdp_lastclose(struct drm_device *dev)
+{
+#if defined(PDP_USE_ATOMIC)
+	drm_atomic_helper_shutdown(dev);
+#else
+	struct drm_crtc *crtc;
+
+	DRM_INFO("%s: %s device\n", __func__, to_platform_device(dev->dev)->name);
+	drm_modeset_lock_all(dev);
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc->primary->fb) {
+			struct drm_mode_set mode_set = { .crtc = crtc };
+			int err;
+
+			err = drm_mode_set_config_internal(&mode_set);
+			if (err)
+				DRM_ERROR("failed to disable crtc %p (err=%d)\n",
+					  crtc, err);
+		}
+	}
+	drm_modeset_unlock_all(dev);
+#endif
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+static int pdp_enable_vblank(struct drm_device *dev, unsigned int crtc)
+#else
+static int pdp_enable_vblank(struct drm_device *dev, int crtc)
+#endif
+{
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+
+	switch (crtc) {
+	case 0:
+		pdp_crtc_set_vblank_enabled(dev_priv->crtc, true);
+		break;
+	default:
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+		DRM_ERROR("invalid crtc %u\n", crtc);
+#else
+		DRM_ERROR("invalid crtc %d\n", crtc);
+#endif
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("vblank interrupts enabled for crtc %d\n", crtc);
+
+	return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+static void pdp_disable_vblank(struct drm_device *dev, unsigned int crtc)
+#else
+static void pdp_disable_vblank(struct drm_device *dev, int crtc)
+#endif
+{
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+
+	switch (crtc) {
+	case 0:
+		pdp_crtc_set_vblank_enabled(dev_priv->crtc, false);
+		break;
+	default:
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+		DRM_ERROR("invalid crtc %u\n", crtc);
+#else
+		DRM_ERROR("invalid crtc %d\n", crtc);
+#endif
+		return;
+	}
+
+	DRM_DEBUG("vblank interrupts disabled for crtc %d\n", crtc);
+}
+
+static int pdp_gem_object_create_ioctl(struct drm_device *dev,
+				       void *data,
+				       struct drm_file *file)
+{
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+
+	return pdp_gem_object_create_ioctl_priv(dev,
+						dev_priv->gem_priv,
+						data,
+						file);
+}
+
+static int pdp_gem_dumb_create(struct drm_file *file,
+			       struct drm_device *dev,
+			       struct drm_mode_create_dumb *args)
+{
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+
+	return pdp_gem_dumb_create_priv(file,
+					dev,
+					dev_priv->gem_priv,
+					args);
+}
+
+static void pdp_gem_object_free(struct drm_gem_object *obj)
+{
+	struct pdp_drm_private *dev_priv = obj->dev->dev_private;
+
+	pdp_gem_object_free_priv(dev_priv->gem_priv, obj);
+}
+
+static const struct vm_operations_struct pdp_gem_vm_ops = {
+	.fault	= pdp_gem_object_vm_fault,
+	.open	= drm_gem_vm_open,
+	.close	= drm_gem_vm_close,
+};
+
+static const struct drm_ioctl_desc pdp_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(PDP_GEM_CREATE, pdp_gem_object_create_ioctl,
+				DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(PDP_GEM_MMAP, pdp_gem_object_mmap_ioctl,
+				DRM_AUTH | DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(PDP_GEM_CPU_PREP, pdp_gem_object_cpu_prep_ioctl,
+				DRM_AUTH | DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(PDP_GEM_CPU_FINI, pdp_gem_object_cpu_fini_ioctl,
+				DRM_AUTH | DRM_UNLOCKED),
+};
+
+static const struct file_operations pdp_driver_fops = {
+	.owner		= THIS_MODULE,
+	.open		= drm_open,
+	.release	= drm_release,
+	.unlocked_ioctl	= drm_ioctl,
+	.mmap		= drm_gem_mmap,
+	.poll		= drm_poll,
+	.read		= drm_read,
+	.llseek		= noop_llseek,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= drm_compat_ioctl,
+#endif
+};
+
+static struct drm_driver pdp_drm_driver = {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+	.load				= NULL,
+	.unload				= NULL,
+#else
+	.load				= pdp_load,
+	.unload				= pdp_unload,
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+	.preclose			= pdp_preclose,
+#endif
+	.lastclose			= pdp_lastclose,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) && \
+	(LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+	.set_busid			= drm_platform_set_busid,
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+	.get_vblank_counter		= NULL,
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+	.get_vblank_counter		= drm_vblank_no_hw_counter,
+#else
+	.get_vblank_counter		= drm_vblank_count,
+#endif
+	.enable_vblank			= pdp_enable_vblank,
+	.disable_vblank			= pdp_disable_vblank,
+
+	.debugfs_init			= pdp_debugfs_init,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+	.debugfs_cleanup		= pdp_debugfs_cleanup,
+#endif
+
+	.gem_free_object		= pdp_gem_object_free,
+
+	.prime_handle_to_fd		= drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle		= drm_gem_prime_fd_to_handle,
+	.gem_prime_export		= pdp_gem_prime_export,
+	.gem_prime_import		= pdp_gem_prime_import,
+	.gem_prime_import_sg_table	= pdp_gem_prime_import_sg_table,
+
+    // Set dumb_create to NULL to avoid xorg owning the display (if xorg is running).
+	.dumb_create			= pdp_gem_dumb_create,
+	.dumb_map_offset		= pdp_gem_dumb_map_offset,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0))
+	.dumb_destroy			= drm_gem_dumb_destroy,
+#endif
+
+	.gem_vm_ops			= &pdp_gem_vm_ops,
+
+	.name				= DRIVER_NAME,
+	.desc				= DRIVER_DESC,
+	.date				= DRIVER_DATE,
+	.major				= PVRVERSION_MAJ,
+	.minor				= PVRVERSION_MIN,
+	.patchlevel			= PVRVERSION_BUILD,
+
+	.driver_features		= DRIVER_GEM |
+					  DRIVER_MODESET |
+					  DRIVER_PRIME |
+					  PVR_DRIVER_ATOMIC,
+	.ioctls				= pdp_ioctls,
+	.num_ioctls			= ARRAY_SIZE(pdp_ioctls),
+	.fops				= &pdp_driver_fops,
+};
+
+#if defined(SUPPORT_PLATO_DISPLAY)
+
+static int compare_parent_dev(struct device *dev, void *data)
+{
+	struct device *pdp_dev = data;
+
+	return dev->parent && dev->parent == pdp_dev->parent;
+}
+
+static int pdp_component_bind(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct drm_device *ddev;
+	int ret;
+
+	dev_info(dev, "Loading platform device\n");
+	ddev = drm_dev_alloc(&pdp_drm_driver, &pdev->dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+	if (IS_ERR(ddev))
+		return PTR_ERR(ddev);
+#else
+	if (!ddev)
+		return -ENOMEM;
+#endif
+
+	// XXX no need to do this as happens in pdp_early_load
+	platform_set_drvdata(pdev, ddev);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+	/* Needed by drm_platform_set_busid */
+	ddev->platformdev = pdev;
+#endif
+	BUG_ON(pdp_drm_driver.load != NULL);
+
+	ret = pdp_early_load(ddev);
+	if (ret)
+		goto err_drm_dev_put;
+
+	DRM_DEBUG_DRIVER("Binding other components\n");
+	/* Bind other components, including HDMI encoder/connector */
+	ret = component_bind_all(dev, ddev);
+	if (ret) {
+		DRM_ERROR("Failed to bind other components (ret=%d)\n", ret);
+		goto err_drm_dev_late_unload;
+	}
+
+	ret = drm_dev_register(ddev, 0);
+	if (ret)
+		goto err_drm_dev_late_unload;
+
+	ret = pdp_late_load(ddev);
+	if (ret)
+		goto err_drm_dev_unregister;
+
+	return 0;
+
+err_drm_dev_unregister:
+	drm_dev_unregister(ddev);
+err_drm_dev_late_unload:
+	pdp_late_unload(ddev);
+err_drm_dev_put:
+	drm_dev_put(ddev);
+	return	ret;
+}
+
+static void pdp_component_unbind(struct device *dev)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+
+	dev_info(dev, "Unloading platform device\n");
+	BUG_ON(pdp_drm_driver.unload != NULL);
+	pdp_early_unload(ddev);
+	drm_dev_unregister(ddev);
+	pdp_late_unload(ddev);
+	component_unbind_all(dev, ddev);
+	drm_dev_put(ddev);
+}
+
+static const struct component_master_ops pdp_component_ops = {
+	.bind	= pdp_component_bind,
+	.unbind = pdp_component_unbind,
+};
+
+
+static int pdp_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct component_match *match = NULL;
+
+	component_match_add(dev, &match, compare_parent_dev, dev);
+	return component_master_add_with_match(dev, &pdp_component_ops, match);
+}
+
+static int pdp_remove(struct platform_device *pdev)
+{
+	component_master_del(&pdev->dev, &pdp_component_ops);
+	return 0;
+}
+
+#else  // !SUPPORT_PLATO_DISPLAY
+
+static int pdp_probe(struct platform_device *pdev)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+	struct drm_device *ddev;
+	int ret;
+
+	ddev = drm_dev_alloc(&pdp_drm_driver, &pdev->dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+	if (IS_ERR(ddev))
+		return PTR_ERR(ddev);
+#else
+	if (!ddev)
+		return -ENOMEM;
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+	/* Needed by drm_platform_set_busid */
+	ddev->platformdev = pdev;
+#endif
+	/*
+	 * The load callback, called from drm_dev_register, is deprecated,
+	 * because of potential race conditions.
+	 */
+	BUG_ON(pdp_drm_driver.load != NULL);
+
+	ret = pdp_early_load(ddev);
+	if (ret)
+		goto err_drm_dev_put;
+
+	ret = drm_dev_register(ddev, 0);
+	if (ret)
+		goto err_drm_dev_late_unload;
+
+	ret = pdp_late_load(ddev);
+	if (ret)
+		goto err_drm_dev_unregister;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+		pdp_drm_driver.name,
+		pdp_drm_driver.major,
+		pdp_drm_driver.minor,
+		pdp_drm_driver.patchlevel,
+		pdp_drm_driver.date,
+		ddev->primary->index);
+#endif
+	return 0;
+
+err_drm_dev_unregister:
+	drm_dev_unregister(ddev);
+err_drm_dev_late_unload:
+	pdp_late_unload(ddev);
+err_drm_dev_put:
+	drm_dev_put(ddev);
+	return	ret;
+#else
+	return drm_platform_init(&pdp_drm_driver, pdev);
+#endif
+}
+
+static int pdp_remove(struct platform_device *pdev)
+{
+	struct drm_device *ddev = platform_get_drvdata(pdev);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+	/*
+	 * The unload callback, called from drm_dev_unregister, is
+	 * deprecated.
+	 */
+	BUG_ON(pdp_drm_driver.unload != NULL);
+
+	pdp_early_unload(ddev);
+
+	drm_dev_unregister(ddev);
+
+	pdp_late_unload(ddev);
+
+	drm_dev_put(ddev);
+#else
+	drm_put_dev(ddev);
+#endif
+	return 0;
+}
+
+#endif  // SUPPORT_PLATO_DISPLAY
+
+static void pdp_shutdown(struct platform_device *pdev)
+{
+}
+
+static struct platform_device_id pdp_platform_device_id_table[] = {
+	{ .name = APOLLO_DEVICE_NAME_PDP, .driver_data = PDP_VERSION_APOLLO },
+	{ .name = ODN_DEVICE_NAME_PDP, .driver_data = PDP_VERSION_ODIN },
+#if defined(SUPPORT_PLATO_DISPLAY)
+	{ .name = PLATO_DEVICE_NAME_PDP, .driver_data = PDP_VERSION_PLATO },
+#endif  // SUPPORT_PLATO_DISPLAY
+	{ },
+};
+
+static struct platform_driver pdp_platform_driver = {
+	.probe		= pdp_probe,
+	.remove		= pdp_remove,
+	.shutdown	= pdp_shutdown,
+	.driver		= {
+		.owner  = THIS_MODULE,
+		.name	= DRIVER_NAME,
+	},
+	.id_table	= pdp_platform_device_id_table,
+};
+
+module_platform_driver(pdp_platform_driver);
+
+MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_DEVICE_TABLE(platform, pdp_platform_device_id_table);
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_drv.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_drv.h
new file mode 100644
index 0000000..83c9e02
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_drv.h
@@ -0,0 +1,189 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__DRM_PDP_DRV_H__)
+#define __DRM_PDP_DRV_H__
+
+#include <linux/version.h>
+#include <linux/wait.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_mm.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+#include <drm/drm_plane.h>
+#endif
+
+#include "pdp_common.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && \
+	!defined(PVR_ANDROID_USE_PDP_LEGACY)
+#define PDP_USE_ATOMIC
+#endif
+
+struct pdp_gem_context;
+enum pdp_crtc_flip_status;
+struct pdp_flip_data;
+struct pdp_gem_private;
+
+#if !defined(SUPPORT_PLATO_DISPLAY)
+struct tc_pdp_platform_data;
+#else
+struct plato_pdp_platform_data;
+#endif
+
+struct pdp_drm_private {
+	struct drm_device *dev;
+
+	enum pdp_version version;
+
+	/* created by pdp_gem_init */
+	struct pdp_gem_private	*gem_priv;
+
+	/* initialised by pdp_modeset_early_init */
+	struct drm_plane *plane;
+	struct drm_crtc *crtc;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+
+	bool display_enabled;
+};
+
+struct pdp_crtc {
+	struct drm_crtc base;
+
+	uint32_t number;
+
+	resource_size_t pdp_reg_size;
+	resource_size_t pdp_reg_phys_base;
+	void __iomem *pdp_reg;
+
+	resource_size_t pdp_bif_reg_size;
+	resource_size_t pdp_bif_reg_phys_base;
+	void __iomem *pdp_bif_reg;
+
+	resource_size_t pll_reg_size;
+	resource_size_t pll_reg_phys_base;
+	void __iomem *pll_reg;
+
+	resource_size_t odn_core_size; /* needed for odin pdp clk reset */
+	resource_size_t odn_core_phys_base;
+	void __iomem *odn_core_reg;
+
+	wait_queue_head_t flip_pending_wait_queue;
+
+	/* Reuse the drm_device event_lock to protect these */
+	atomic_t flip_status;
+	struct drm_pending_vblank_event *flip_event;
+	struct drm_framebuffer *old_fb;
+	struct pdp_flip_data *flip_data;
+	bool flip_async;
+};
+
+#define to_pdp_crtc(crtc) container_of(crtc, struct pdp_crtc, base)
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0))
+struct pdp_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_gem_object *obj[1];
+};
+
+#define to_pdp_framebuffer(fb) container_of(fb, struct pdp_framebuffer, base)
+#else
+#define pdp_framebuffer drm_framebuffer
+#define to_pdp_framebuffer(fb) (fb)
+#endif
+
+static inline u32 pdp_drm_fb_cpp(struct drm_framebuffer *fb)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+	return fb->format->cpp[0];
+#else
+	return fb->bits_per_pixel / 8;
+#endif
+}
+
+static inline u32 pdp_drm_fb_format(struct drm_framebuffer *fb)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+	return fb->format->format;
+#else
+	return fb->pixel_format;
+#endif
+}
+
+int pdp_debugfs_init(struct drm_minor *minor);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+void pdp_debugfs_cleanup(struct drm_minor *minor);
+#endif
+
+struct drm_plane *pdp_plane_create(struct drm_device *dev,
+				   enum drm_plane_type type);
+void pdp_plane_set_surface(struct drm_crtc *crtc, struct drm_plane *plane,
+			   struct drm_framebuffer *fb,
+			   const uint32_t src_x, const uint32_t src_y);
+
+struct drm_crtc *pdp_crtc_create(struct drm_device *dev, uint32_t number,
+				 struct drm_plane *primary_plane);
+void pdp_crtc_set_plane_enabled(struct drm_crtc *crtc, bool enable);
+void pdp_crtc_set_vblank_enabled(struct drm_crtc *crtc, bool enable);
+void pdp_crtc_irq_handler(struct drm_crtc *crtc);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+void pdp_crtc_flip_event_cancel(struct drm_crtc *crtc, struct drm_file *file);
+#endif
+
+struct drm_connector *pdp_dvi_connector_create(struct drm_device *dev);
+
+struct drm_encoder *pdp_tmds_encoder_create(struct drm_device *dev);
+
+int pdp_modeset_early_init(struct pdp_drm_private *dev_priv);
+int pdp_modeset_late_init(struct pdp_drm_private *dev_priv);
+void pdp_modeset_early_cleanup(struct pdp_drm_private *dev_priv);
+void pdp_modeset_late_cleanup(struct pdp_drm_private *dev_priv);
+
+#endif /* !defined(__DRM_PDP_DRV_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_dvi.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_dvi.c
new file mode 100644
index 0000000..c4fd464
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_dvi.c
@@ -0,0 +1,301 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "drm_pdp_drv.h"
+
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#if defined(PDP_USE_ATOMIC)
+#include <drm/drm_atomic_helper.h>
+#endif
+
+#include "kernel_compatibility.h"
+
+struct pdp_mode_data {
+	int hdisplay;
+	int vdisplay;
+	int vrefresh;
+	bool reduced_blanking;
+	bool interlaced;
+	bool margins;
+};
+
+static const struct pdp_mode_data pdp_extra_modes[] = {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0))
+	{
+		.hdisplay = 1280,
+		.vdisplay = 720,
+		.vrefresh = 60,
+		.reduced_blanking = false,
+		.interlaced = false,
+		.margins = false,
+	},
+	{
+		.hdisplay = 1920,
+		.vdisplay = 1080,
+		.vrefresh = 60,
+		.reduced_blanking = false,
+		.interlaced = false,
+		.margins = false,
+	},
+#endif
+};
+
+static char preferred_mode_name[DRM_DISPLAY_MODE_LEN] = "\0";
+
+module_param_string(dvi_preferred_mode,
+		    preferred_mode_name,
+		    DRM_DISPLAY_MODE_LEN,
+		    0444);
+
+MODULE_PARM_DESC(dvi_preferred_mode,
+		 "Specify the preferred mode (if supported), e.g. 1280x1024.");
+
+
+static int pdp_dvi_add_extra_modes(struct drm_connector *connector)
+{
+	struct drm_display_mode *mode;
+	int num_modes;
+	int i;
+
+	for (i = 0, num_modes = 0; i < ARRAY_SIZE(pdp_extra_modes); i++) {
+		mode = drm_cvt_mode(connector->dev,
+				    pdp_extra_modes[i].hdisplay,
+				    pdp_extra_modes[i].vdisplay,
+				    pdp_extra_modes[i].vrefresh,
+				    pdp_extra_modes[i].reduced_blanking,
+				    pdp_extra_modes[i].interlaced,
+				    pdp_extra_modes[i].margins);
+		if (mode) {
+			drm_mode_probed_add(connector, mode);
+			num_modes++;
+		}
+	}
+
+	return num_modes;
+}
+
+static int pdp_dvi_connector_helper_get_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	int num_modes;
+	int len = strlen(preferred_mode_name);
+
+	if (len)
+		dev_info(dev->dev, "detected dvi_preferred_mode=%s\n",
+					preferred_mode_name);
+	else
+		dev_info(dev->dev, "no dvi_preferred_mode\n");
+
+	num_modes = drm_add_modes_noedid(connector,
+					 dev->mode_config.max_width,
+					 dev->mode_config.max_height);
+
+	num_modes += pdp_dvi_add_extra_modes(connector);
+	if (num_modes) {
+		struct drm_display_mode *pref_mode = NULL;
+
+		if (len) {
+			struct drm_display_mode *mode;
+			struct list_head *entry;
+
+			list_for_each(entry, &connector->probed_modes) {
+				mode = list_entry(entry,
+						  struct drm_display_mode,
+						  head);
+				if (!strcmp(mode->name, preferred_mode_name)) {
+					pref_mode = mode;
+					break;
+				}
+			}
+		}
+
+		if (pref_mode)
+			pref_mode->type |= DRM_MODE_TYPE_PREFERRED;
+		else
+			drm_set_preferred_mode(connector,
+					       dev->mode_config.max_width,
+					       dev->mode_config.max_height);
+	}
+
+	drm_mode_sort(&connector->probed_modes);
+
+	DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s] found %d modes\n",
+			 connector->base.id,
+			 connector->name,
+			 num_modes);
+
+	return num_modes;
+}
+
+static int pdp_dvi_connector_helper_mode_valid(struct drm_connector *connector,
+					       struct drm_display_mode *mode)
+{
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		return MODE_NO_INTERLACE;
+	else if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	return MODE_OK;
+}
+
+#if !defined(PDP_USE_ATOMIC)
+static struct drm_encoder *
+pdp_dvi_connector_helper_best_encoder(struct drm_connector *connector)
+{
+	/* Pick the first encoder we find */
+	if (connector->encoder_ids[0] != 0) {
+		struct drm_encoder *encoder;
+
+		encoder = drm_encoder_find(connector->dev,
+					   NULL,
+					   connector->encoder_ids[0]);
+		if (encoder) {
+			DRM_DEBUG_DRIVER("[ENCODER:%d:%s] best for "
+					 "[CONNECTOR:%d:%s]\n",
+					 encoder->base.id,
+					 encoder->name,
+					 connector->base.id,
+					 connector->name);
+			return encoder;
+		}
+	}
+
+	return NULL;
+}
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+static enum drm_connector_status
+pdp_dvi_connector_detect(struct drm_connector *connector,
+			 bool force)
+{
+	/*
+	 * It appears that there is no way to determine if a monitor
+	 * is connected. This needs to be set to connected otherwise
+	 * DPMS never gets set to ON.
+	 */
+	return connector_status_connected;
+}
+#endif
+
+static void pdp_dvi_connector_destroy(struct drm_connector *connector)
+{
+	struct pdp_drm_private *dev_priv = connector->dev->dev_private;
+
+	DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s]\n",
+			 connector->base.id,
+			 connector->name);
+
+	drm_connector_cleanup(connector);
+
+	kfree(connector);
+	dev_priv->connector = NULL;
+}
+
+static void pdp_dvi_connector_force(struct drm_connector *connector)
+{
+}
+
+static struct drm_connector_helper_funcs pdp_dvi_connector_helper_funcs = {
+	.get_modes = pdp_dvi_connector_helper_get_modes,
+	.mode_valid = pdp_dvi_connector_helper_mode_valid,
+	/*
+	 * For atomic, don't set atomic_best_encoder or best_encoder. This will
+	 * cause the DRM core to fallback to drm_atomic_helper_best_encoder().
+	 * This is fine as we only have a single connector and encoder.
+	 */
+#if !defined(PDP_USE_ATOMIC)
+	.best_encoder = pdp_dvi_connector_helper_best_encoder,
+#endif
+};
+
+static const struct drm_connector_funcs pdp_dvi_connector_funcs = {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	.detect = pdp_dvi_connector_detect,
+#endif
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = pdp_dvi_connector_destroy,
+	.force = pdp_dvi_connector_force,
+#if defined(PDP_USE_ATOMIC)
+	.reset = drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+#else
+	.dpms = drm_helper_connector_dpms,
+#endif
+};
+
+
+struct drm_connector *
+pdp_dvi_connector_create(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+
+	connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+	if (!connector)
+		return ERR_PTR(-ENOMEM);
+
+	drm_connector_init(dev,
+			   connector,
+			   &pdp_dvi_connector_funcs,
+			   DRM_MODE_CONNECTOR_DVID);
+	drm_connector_helper_add(connector, &pdp_dvi_connector_helper_funcs);
+
+	connector->dpms = DRM_MODE_DPMS_OFF;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+
+	DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s]\n",
+			 connector->base.id,
+			 connector->name);
+
+	return connector;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_gem.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_gem.c
new file mode 100644
index 0000000..03d8457
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_gem.c
@@ -0,0 +1,712 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/capability.h>
+
+#include <drm/drm_mm.h>
+
+#if defined(SUPPORT_PLATO_DISPLAY)
+#include "plato_drv.h"
+#else
+#include "tc_drv.h"
+#endif
+
+#include "drm_pdp_drv.h"
+#include "drm_pdp_gem.h"
+#include "pdp_drm.h"
+#include "kernel_compatibility.h"
+
+struct pdp_gem_object {
+	struct drm_gem_object base;
+
+	/* Non-null if backing originated from this driver */
+	struct drm_mm_node *vram;
+
+	/* Non-null if backing was imported */
+	struct sg_table *sgt;
+
+	phys_addr_t cpu_addr;
+	dma_addr_t dev_addr;
+
+	struct reservation_object _resv;
+	struct reservation_object *resv;
+
+	bool cpu_prep;
+};
+
+#define to_pdp_obj(obj) container_of(obj, struct pdp_gem_object, base)
+
+#if defined(SUPPORT_PLATO_DISPLAY)
+	typedef struct plato_pdp_platform_data pdp_gem_platform_data;
+#else
+	typedef struct tc_pdp_platform_data pdp_gem_platform_data;
+#endif
+
+struct pdp_gem_private {
+	struct mutex			vram_lock;
+	struct				drm_mm vram;
+};
+
+static struct pdp_gem_object *
+pdp_gem_private_object_create(struct drm_device *dev,
+			      size_t size)
+{
+	struct pdp_gem_object *pdp_obj;
+
+	WARN_ON(PAGE_ALIGN(size) != size);
+
+	pdp_obj = kzalloc(sizeof(*pdp_obj), GFP_KERNEL);
+	if (!pdp_obj)
+		return ERR_PTR(-ENOMEM);
+
+	drm_gem_private_object_init(dev, &pdp_obj->base, size);
+	reservation_object_init(&pdp_obj->_resv);
+
+	return pdp_obj;
+}
+
+static struct drm_gem_object *pdp_gem_object_create(struct drm_device *dev,
+					struct pdp_gem_private *gem_priv,
+					size_t size,
+					u32 flags)
+{
+	pdp_gem_platform_data *pdata =
+		to_platform_device(dev->dev)->dev.platform_data;
+	struct pdp_gem_object *pdp_obj;
+	struct drm_mm_node *node;
+	int err = 0;
+
+	pdp_obj = pdp_gem_private_object_create(dev, size);
+	if (!pdp_obj) {
+		err = -ENOMEM;
+		goto err_exit;
+	}
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node) {
+		err = -ENOMEM;
+		goto err_unref;
+	}
+
+	mutex_lock(&gem_priv->vram_lock);
+	err = drm_mm_insert_node(&gem_priv->vram, node, size);
+	mutex_unlock(&gem_priv->vram_lock);
+	if (err)
+		goto err_free_node;
+
+	pdp_obj->vram = node;
+	pdp_obj->dev_addr = pdp_obj->vram->start;
+	pdp_obj->cpu_addr = pdata->memory_base + pdp_obj->dev_addr;
+	pdp_obj->resv = &pdp_obj->_resv;
+
+	return &pdp_obj->base;
+
+err_free_node:
+	kfree(node);
+err_unref:
+	drm_gem_object_put_unlocked(&pdp_obj->base);
+err_exit:
+	return ERR_PTR(err);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+int pdp_gem_object_vm_fault(struct vm_fault *vmf)
+#else
+int pdp_gem_object_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+#endif
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+	struct vm_area_struct *vma = vmf->vma;
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+	unsigned long addr = vmf->address;
+#else
+	unsigned long addr = (unsigned long)vmf->virtual_address;
+#endif
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct pdp_gem_object *pdp_obj = to_pdp_obj(obj);
+	unsigned long off;
+	unsigned long pfn;
+
+	off = addr - vma->vm_start;
+	pfn = (pdp_obj->cpu_addr + off) >> PAGE_SHIFT;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0))
+	return vmf_insert_pfn(vma, addr, pfn);
+#else
+	{
+		int err;
+
+		err = vm_insert_pfn(vma, addr, pfn);
+		switch (err) {
+		case 0:
+		case -EBUSY:
+			return VM_FAULT_NOPAGE;
+		case -ENOMEM:
+			return VM_FAULT_OOM;
+		default:
+			return VM_FAULT_SIGBUS;
+		}
+	}
+#endif
+}
+
+void pdp_gem_object_free_priv(struct pdp_gem_private *gem_priv,
+			      struct drm_gem_object *obj)
+{
+	struct pdp_gem_object *pdp_obj = to_pdp_obj(obj);
+
+	drm_gem_free_mmap_offset(obj);
+
+	if (&pdp_obj->_resv == pdp_obj->resv)
+		reservation_object_fini(pdp_obj->resv);
+
+	if (pdp_obj->vram) {
+		mutex_lock(&gem_priv->vram_lock);
+		drm_mm_remove_node(pdp_obj->vram);
+		mutex_unlock(&gem_priv->vram_lock);
+
+		kfree(pdp_obj->vram);
+	} else if (obj->import_attach) {
+		drm_prime_gem_destroy(obj, pdp_obj->sgt);
+	}
+
+	drm_gem_object_release(&pdp_obj->base);
+	kfree(pdp_obj);
+}
+
+static int pdp_gem_prime_attach(struct dma_buf *dma_buf,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
+				struct device *dev,
+#endif
+				struct dma_buf_attachment *attach)
+{
+	struct drm_gem_object *obj = dma_buf->priv;
+
+	/* Restrict access to Rogue */
+	if (WARN_ON(!obj->dev->dev->parent) ||
+	    obj->dev->dev->parent != attach->dev->parent)
+		return -EPERM;
+
+	return 0;
+}
+
+static struct sg_table *
+pdp_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
+			  enum dma_data_direction dir)
+{
+	struct drm_gem_object *obj = attach->dmabuf->priv;
+	struct pdp_gem_object *pdp_obj = to_pdp_obj(obj);
+	struct sg_table *sgt;
+
+	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+	if (!sgt)
+		return NULL;
+
+	if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+		goto err_free_sgt;
+
+	sg_dma_address(sgt->sgl) = pdp_obj->dev_addr;
+	sg_dma_len(sgt->sgl) = obj->size;
+
+	return sgt;
+
+err_free_sgt:
+	kfree(sgt);
+	return NULL;
+}
+
+static void pdp_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+					struct sg_table *sgt,
+					enum dma_data_direction dir)
+{
+	sg_free_table(sgt);
+	kfree(sgt);
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
+static void *pdp_gem_prime_kmap_atomic(struct dma_buf *dma_buf,
+				       unsigned long page_num)
+{
+	return NULL;
+}
+#endif
+
+static void *pdp_gem_prime_kmap(struct dma_buf *dma_buf,
+				unsigned long page_num)
+{
+	return NULL;
+}
+
+static int pdp_gem_prime_mmap(struct dma_buf *dma_buf,
+			      struct vm_area_struct *vma)
+{
+	struct drm_gem_object *obj = dma_buf->priv;
+	int err;
+
+	mutex_lock(&obj->dev->struct_mutex);
+	err = drm_gem_mmap_obj(obj, obj->size, vma);
+	mutex_unlock(&obj->dev->struct_mutex);
+
+	return err;
+}
+
+#if defined(CONFIG_X86)
+static void *pdp_gem_prime_vmap(struct dma_buf *dma_buf)
+{
+	struct drm_gem_object *obj = dma_buf->priv;
+	struct pdp_gem_object *pdp_obj = to_pdp_obj(obj);
+	void *vaddr;
+
+	mutex_lock(&obj->dev->struct_mutex);
+
+	/*
+	 * On x86 platforms, the pointer returned by ioremap can be dereferenced
+	 * directly. As such, explicitly cast away the __ioremap qualifier.
+	 */
+	vaddr = (void __force *)ioremap(pdp_obj->cpu_addr, obj->size);
+	if (vaddr == NULL)
+		DRM_DEBUG_DRIVER("ioremap failed");
+
+	mutex_unlock(&obj->dev->struct_mutex);
+
+	return vaddr;
+}
+
+static void pdp_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+	struct drm_gem_object *obj = dma_buf->priv;
+
+	mutex_lock(&obj->dev->struct_mutex);
+	iounmap((void __iomem *)vaddr);
+	mutex_unlock(&obj->dev->struct_mutex);
+}
+#endif
+
+static const struct dma_buf_ops pdp_gem_prime_dmabuf_ops = {
+	.attach		= pdp_gem_prime_attach,
+	.map_dma_buf	= pdp_gem_prime_map_dma_buf,
+	.unmap_dma_buf	= pdp_gem_prime_unmap_dma_buf,
+	.release	= drm_gem_dmabuf_release,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
+	.map_atomic	= pdp_gem_prime_kmap_atomic,
+#endif
+	.map		= pdp_gem_prime_kmap,
+#else
+	.kmap_atomic	= pdp_gem_prime_kmap_atomic,
+	.kmap		= pdp_gem_prime_kmap,
+#endif
+	.mmap		= pdp_gem_prime_mmap,
+#if defined(CONFIG_X86)
+	.vmap		= pdp_gem_prime_vmap,
+	.vunmap		= pdp_gem_prime_vunmap
+#endif
+};
+
+
+static int
+pdp_gem_lookup_our_object(struct drm_file *file, u32 handle,
+			  struct drm_gem_object **objp)
+
+{
+	struct drm_gem_object *obj;
+
+	obj = drm_gem_object_lookup(file, handle);
+	if (!obj)
+		return -ENOENT;
+
+	if (obj->import_attach) {
+		/*
+		 * The dmabuf associated with the object is not one of
+		 * ours. Our own buffers are handled differently on import.
+		 */
+		drm_gem_object_put_unlocked(obj);
+		return -EINVAL;
+	}
+
+	*objp = obj;
+	return 0;
+}
+
+struct dma_buf *pdp_gem_prime_export(struct drm_device *dev,
+				     struct drm_gem_object *obj,
+				     int flags)
+{
+	struct pdp_gem_object *pdp_obj = to_pdp_obj(obj);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+	DEFINE_DMA_BUF_EXPORT_INFO(export_info);
+
+	export_info.ops = &pdp_gem_prime_dmabuf_ops;
+	export_info.size = obj->size;
+	export_info.flags = flags;
+	export_info.resv = pdp_obj->resv;
+	export_info.priv = obj;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+	return drm_gem_dmabuf_export(dev, &export_info);
+#else
+	return dma_buf_export(&export_info);
+#endif
+#else
+	return dma_buf_export(obj, &pdp_gem_prime_dmabuf_ops, obj->size,
+			      flags, pdp_obj->resv);
+#endif
+}
+
+struct drm_gem_object *
+pdp_gem_prime_import(struct drm_device *dev,
+		     struct dma_buf *dma_buf)
+{
+	struct drm_gem_object *obj = dma_buf->priv;
+
+	if (obj->dev == dev) {
+		BUG_ON(dma_buf->ops != &pdp_gem_prime_dmabuf_ops);
+
+		/*
+		 * The dmabuf is one of ours, so return the associated
+		 * PDP GEM object, rather than create a new one.
+		 */
+		drm_gem_object_get(obj);
+
+		return obj;
+	}
+
+	return drm_gem_prime_import(dev, dma_buf);
+}
+
+struct drm_gem_object *
+pdp_gem_prime_import_sg_table(struct drm_device *dev,
+			      struct dma_buf_attachment *attach,
+			      struct sg_table *sgt)
+{
+	pdp_gem_platform_data *pdata =
+		to_platform_device(dev->dev)->dev.platform_data;
+	struct pdp_gem_object *pdp_obj;
+	int err;
+
+	pdp_obj = pdp_gem_private_object_create(dev, attach->dmabuf->size);
+	if (!pdp_obj) {
+		err = -ENOMEM;
+		goto err_exit;
+	}
+
+	pdp_obj->sgt = sgt;
+
+	/* We only expect a single entry for card memory */
+	if (pdp_obj->sgt->nents != 1) {
+		err = -EINVAL;
+		goto err_obj_unref;
+	}
+
+	pdp_obj->dev_addr = sg_dma_address(pdp_obj->sgt->sgl);
+	pdp_obj->cpu_addr = pdata->memory_base + pdp_obj->dev_addr;
+	pdp_obj->resv = attach->dmabuf->resv;
+
+	return &pdp_obj->base;
+
+err_obj_unref:
+	drm_gem_object_put_unlocked(&pdp_obj->base);
+err_exit:
+	return ERR_PTR(err);
+}
+
+int pdp_gem_dumb_create_priv(struct drm_file *file,
+			     struct drm_device *dev,
+			     struct pdp_gem_private *gem_priv,
+			     struct drm_mode_create_dumb *args)
+{
+	struct drm_gem_object *obj;
+	u32 handle;
+	u32 pitch;
+	size_t size;
+	int err;
+
+	pitch = args->width * (ALIGN(args->bpp, 8) >> 3);
+	size = PAGE_ALIGN(pitch * args->height);
+
+	obj = pdp_gem_object_create(dev, gem_priv, size, 0);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	err = drm_gem_handle_create(file, obj, &handle);
+	if (err)
+		goto exit;
+
+	args->handle = handle;
+	args->pitch = pitch;
+	args->size = size;
+
+exit:
+	drm_gem_object_put_unlocked(obj);
+	return err;
+}
+
+int pdp_gem_dumb_map_offset(struct drm_file *file,
+			    struct drm_device *dev,
+			    uint32_t handle,
+			    uint64_t *offset)
+{
+	struct drm_gem_object *obj;
+	int err;
+
+	mutex_lock(&dev->struct_mutex);
+
+	err = pdp_gem_lookup_our_object(file, handle, &obj);
+	if (err)
+		goto exit_unlock;
+
+	err = drm_gem_create_mmap_offset(obj);
+	if (err)
+		goto exit_obj_unref;
+
+	*offset = drm_vma_node_offset_addr(&obj->vma_node);
+
+exit_obj_unref:
+	drm_gem_object_put_unlocked(obj);
+exit_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return err;
+}
+
+struct pdp_gem_private *pdp_gem_init(struct drm_device *dev)
+{
+#if !defined(SUPPORT_ION) || defined(SUPPORT_GEM_ALLOC)
+	pdp_gem_platform_data *pdata =
+		to_platform_device(dev->dev)->dev.platform_data;
+#endif
+	struct pdp_gem_private *gem_priv =
+					kmalloc(sizeof(*gem_priv), GFP_KERNEL);
+
+	if (!gem_priv)
+		return NULL;
+
+	mutex_init(&gem_priv->vram_lock);
+
+	memset(&gem_priv->vram, 0, sizeof(gem_priv->vram));
+
+#if defined(SUPPORT_ION) && !defined(SUPPORT_GEM_ALLOC)
+	drm_mm_init(&gem_priv->vram, 0, 0);
+	DRM_INFO("%s has no directly allocatable memory; the memory is managed by ION\n",
+		dev->driver->name);
+#else
+	drm_mm_init(&gem_priv->vram,
+			pdata->pdp_heap_memory_base - pdata->memory_base,
+			pdata->pdp_heap_memory_size);
+
+	DRM_INFO("%s has %pa bytes of allocatable memory at 0x%llx = (0x%llx - 0x%llx)\n",
+		dev->driver->name, &pdata->pdp_heap_memory_size,
+		(u64)(pdata->pdp_heap_memory_base - pdata->memory_base),
+		(u64)pdata->pdp_heap_memory_base, (u64)pdata->memory_base);
+#endif
+	return gem_priv;
+}
+
+void pdp_gem_cleanup(struct pdp_gem_private *gem_priv)
+{
+	drm_mm_takedown(&gem_priv->vram);
+	mutex_destroy(&gem_priv->vram_lock);
+
+	kfree(gem_priv);
+}
+
+struct reservation_object *pdp_gem_get_resv(struct drm_gem_object *obj)
+{
+	return (to_pdp_obj(obj)->resv);
+}
+
+u64 pdp_gem_get_dev_addr(struct drm_gem_object *obj)
+{
+	struct pdp_gem_object *pdp_obj = to_pdp_obj(obj);
+
+	return pdp_obj->dev_addr;
+}
+
+int pdp_gem_object_create_ioctl_priv(struct drm_device *dev,
+				struct pdp_gem_private *gem_priv,
+				void *data,
+				struct drm_file *file)
+{
+	struct drm_pdp_gem_create *args = data;
+	struct drm_gem_object *obj;
+	int err;
+
+	if (args->flags) {
+		DRM_ERROR("invalid flags: %#08x\n", args->flags);
+		return -EINVAL;
+	}
+
+	if (args->handle) {
+		DRM_ERROR("invalid handle (this should always be 0)\n");
+		return -EINVAL;
+	}
+
+	obj = pdp_gem_object_create(dev,
+					gem_priv,
+					PAGE_ALIGN(args->size),
+					args->flags);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	err = drm_gem_handle_create(file, obj, &args->handle);
+	drm_gem_object_put_unlocked(obj);
+
+	return err;
+
+}
+
+int pdp_gem_object_mmap_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file)
+{
+	struct drm_pdp_gem_mmap *args = (struct drm_pdp_gem_mmap *)data;
+
+	if (args->pad) {
+		DRM_ERROR("invalid pad (this should always be 0)\n");
+		return -EINVAL;
+	}
+
+	if (args->offset) {
+		DRM_ERROR("invalid offset (this should always be 0)\n");
+		return -EINVAL;
+	}
+
+	return pdp_gem_dumb_map_offset(file, dev, args->handle, &args->offset);
+}
+
+int pdp_gem_object_cpu_prep_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file)
+{
+	struct drm_pdp_gem_cpu_prep *args = (struct drm_pdp_gem_cpu_prep *)data;
+	struct drm_gem_object *obj;
+	struct pdp_gem_object *pdp_obj;
+	bool write = !!(args->flags & PDP_GEM_CPU_PREP_WRITE);
+	bool wait = !(args->flags & PDP_GEM_CPU_PREP_NOWAIT);
+	int err = 0;
+
+	if (args->flags & ~(PDP_GEM_CPU_PREP_READ |
+			    PDP_GEM_CPU_PREP_WRITE |
+			    PDP_GEM_CPU_PREP_NOWAIT)) {
+		DRM_ERROR("invalid flags: %#08x\n", args->flags);
+		return -EINVAL;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	err = pdp_gem_lookup_our_object(file, args->handle, &obj);
+	if (err)
+		goto exit_unlock;
+
+	pdp_obj = to_pdp_obj(obj);
+
+	if (pdp_obj->cpu_prep) {
+		err = -EBUSY;
+		goto exit_unref;
+	}
+
+	if (wait) {
+		long lerr;
+
+		lerr = reservation_object_wait_timeout_rcu(pdp_obj->resv,
+							   write,
+							   true,
+							   30 * HZ);
+		if (!lerr)
+			err = -EBUSY;
+		else if (lerr < 0)
+			err = lerr;
+	} else {
+		if (!reservation_object_test_signaled_rcu(pdp_obj->resv,
+							  write))
+			err = -EBUSY;
+	}
+
+	if (!err)
+		pdp_obj->cpu_prep = true;
+
+exit_unref:
+	drm_gem_object_put_unlocked(obj);
+exit_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return err;
+}
+
+int pdp_gem_object_cpu_fini_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file)
+{
+	struct drm_pdp_gem_cpu_fini *args = (struct drm_pdp_gem_cpu_fini *)data;
+	struct drm_gem_object *obj;
+	struct pdp_gem_object *pdp_obj;
+	int err = 0;
+
+	if (args->pad) {
+		DRM_ERROR("invalid pad (this should always be 0)\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	err = pdp_gem_lookup_our_object(file, args->handle, &obj);
+	if (err)
+		goto exit_unlock;
+
+	pdp_obj = to_pdp_obj(obj);
+
+	if (!pdp_obj->cpu_prep) {
+		err = -EINVAL;
+		goto exit_unref;
+	}
+
+	pdp_obj->cpu_prep = false;
+
+exit_unref:
+	drm_gem_object_put_unlocked(obj);
+exit_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return err;
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_gem.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_gem.h
new file mode 100644
index 0000000..d878c8b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_gem.h
@@ -0,0 +1,108 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__DRM_PDP_GEM_H__)
+#define __DRM_PDP_GEM_H__
+
+#include <linux/version.h>
+#include <drm/drmP.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+#include <drm/drm_gem.h>
+#endif
+
+struct pdp_gem_private;
+
+struct pdp_gem_private *pdp_gem_init(struct drm_device *dev);
+
+void pdp_gem_cleanup(struct pdp_gem_private *dev_priv);
+
+/* ioctl functions */
+int pdp_gem_object_create_ioctl_priv(struct drm_device *dev,
+				     struct pdp_gem_private *gem_priv,
+				     void *data,
+				     struct drm_file *file);
+int pdp_gem_object_mmap_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file);
+int pdp_gem_object_cpu_prep_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file);
+int pdp_gem_object_cpu_fini_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file);
+
+/* drm driver functions */
+void pdp_gem_object_free_priv(struct pdp_gem_private *gem_priv,
+			      struct drm_gem_object *obj);
+
+struct dma_buf *pdp_gem_prime_export(struct drm_device *dev,
+				     struct drm_gem_object *obj,
+				     int flags);
+
+struct drm_gem_object *pdp_gem_prime_import(struct drm_device *dev,
+					    struct dma_buf *dma_buf);
+
+struct drm_gem_object *
+pdp_gem_prime_import_sg_table(struct drm_device *dev,
+			      struct dma_buf_attachment *attach,
+			      struct sg_table *sgt);
+
+int pdp_gem_dumb_create_priv(struct drm_file *file,
+			     struct drm_device *dev,
+			     struct pdp_gem_private *gem_priv,
+			     struct drm_mode_create_dumb *args);
+
+int pdp_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+			    uint32_t handle, uint64_t *offset);
+
+/* vm operation functions */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+int pdp_gem_object_vm_fault(struct vm_fault *vmf);
+#else
+int pdp_gem_object_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+#endif
+
+/* internal interfaces */
+struct reservation_object *pdp_gem_get_resv(struct drm_gem_object *obj);
+u64 pdp_gem_get_dev_addr(struct drm_gem_object *obj);
+
+#endif /* !defined(__DRM_PDP_GEM_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_modeset.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_modeset.c
new file mode 100644
index 0000000..3110dad
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_modeset.c
@@ -0,0 +1,368 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "drm_pdp_drv.h"
+
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+#include <drm/drm_gem.h>
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0))
+#define drm_gem_fb_create(...) pdp_framebuffer_create(__VA_ARGS__)
+#else
+#include <drm/drm_gem_framebuffer_helper.h>
+#endif
+
+#if defined(PDP_USE_ATOMIC)
+#include <drm/drm_atomic_helper.h>
+#endif
+
+#include "kernel_compatibility.h"
+
+#define PDP_WIDTH_MIN			640
+#define PDP_WIDTH_MAX			1280
+#define PDP_HEIGHT_MIN			480
+#define PDP_HEIGHT_MAX			1024
+
+#define ODIN_PDP_WIDTH_MAX		1920
+#define ODIN_PDP_HEIGHT_MAX		1080
+
+#define PLATO_PDP_WIDTH_MAX		1920
+#define PLATO_PDP_HEIGHT_MAX	1080
+
+static bool async_flip_enable = true;
+
+module_param(async_flip_enable, bool, 0444);
+
+MODULE_PARM_DESC(async_flip_enable,
+		 "Enable support for 'faked' async flipping (default: Y)");
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0))
+static void pdp_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb);
+
+	DRM_DEBUG_DRIVER("[FB:%d]\n", fb->base.id);
+
+	drm_framebuffer_cleanup(fb);
+
+	drm_gem_object_put_unlocked(pdp_fb->obj[0]);
+
+	kfree(pdp_fb);
+}
+
+static int pdp_framebuffer_create_handle(struct drm_framebuffer *fb,
+					 struct drm_file *file,
+					 unsigned int *handle)
+{
+	struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb);
+
+	DRM_DEBUG_DRIVER("[FB:%d]\n", fb->base.id);
+
+	return drm_gem_handle_create(file, pdp_fb->obj[0], handle);
+}
+
+static const struct drm_framebuffer_funcs pdp_framebuffer_funcs = {
+	.destroy = pdp_framebuffer_destroy,
+	.create_handle = pdp_framebuffer_create_handle,
+	.dirty = NULL,
+};
+
+static struct drm_framebuffer *
+pdp_framebuffer_create(struct drm_device *dev,
+		       struct drm_file *file,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || \
+	(defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)))
+		       const
+#endif
+		       struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+	struct drm_gem_object *obj;
+	struct pdp_framebuffer *pdp_fb;
+	int err;
+
+	obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
+	if (!obj) {
+		DRM_ERROR("failed to find buffer with handle %u\n",
+			  mode_cmd->handles[0]);
+		err = -ENOENT;
+		goto err_out;
+	}
+
+	pdp_fb = kzalloc(sizeof(*pdp_fb), GFP_KERNEL);
+	if (!pdp_fb) {
+		err = -ENOMEM;
+		goto err_obj_put;
+	}
+
+	drm_helper_mode_fill_fb_struct(dev_priv->dev, &pdp_fb->base, mode_cmd);
+	pdp_fb->obj[0] = obj;
+
+	err = drm_framebuffer_init(dev_priv->dev, &pdp_fb->base,
+				   &pdp_framebuffer_funcs);
+	if (err) {
+		DRM_ERROR("failed to initialise framebuffer (err=%d)\n", err);
+		goto err_free_fb;
+	}
+
+	DRM_DEBUG_DRIVER("[FB:%d]\n", pdp_fb->base.base.id);
+
+	return &pdp_fb->base;
+
+err_free_fb:
+	kfree(pdp_fb);
+err_obj_put:
+	drm_gem_object_put_unlocked(obj);
+err_out:
+	return ERR_PTR(err);
+}
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) */
+
+
+/*************************************************************************
+ * DRM mode config callbacks
+ **************************************************************************/
+
+static struct drm_framebuffer *
+pdp_fb_create(struct drm_device *dev,
+			struct drm_file *file,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || \
+	(defined(CHROMIUMOS_KERNEL) && \
+	      (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)))
+			const
+#endif
+			struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_framebuffer *fb;
+
+	switch (mode_cmd->pixel_format) {
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_XRGB8888:
+		break;
+	default:
+		DRM_ERROR_RATELIMITED("pixel format not supported (format = %u)\n",
+			  mode_cmd->pixel_format);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (mode_cmd->flags & DRM_MODE_FB_INTERLACED) {
+		DRM_ERROR_RATELIMITED("interlaced framebuffers not supported\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+	if (mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE) {
+		DRM_ERROR_RATELIMITED("format modifier 0x%llx is not supported\n",
+			  mode_cmd->modifier[0]);
+		return ERR_PTR(-EINVAL);
+	}
+#endif
+
+	fb = drm_gem_fb_create(dev, file, mode_cmd);
+	if (IS_ERR(fb))
+		goto out;
+
+	DRM_DEBUG_DRIVER("[FB:%d]\n", fb->base.id);
+
+out:
+	return fb;
+}
+
+static const struct drm_mode_config_funcs pdp_mode_config_funcs = {
+	.fb_create = pdp_fb_create,
+	.output_poll_changed = NULL,
+#if defined(PDP_USE_ATOMIC)
+	.atomic_check = drm_atomic_helper_check,
+	.atomic_commit = drm_atomic_helper_commit,
+#endif
+};
+
+
+int pdp_modeset_early_init(struct pdp_drm_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	int err;
+
+	drm_mode_config_init(dev);
+
+	dev->mode_config.funcs = &pdp_mode_config_funcs;
+	dev->mode_config.min_width = PDP_WIDTH_MIN;
+	dev->mode_config.min_height = PDP_HEIGHT_MIN;
+
+	switch (dev_priv->version) {
+	case PDP_VERSION_APOLLO:
+		dev->mode_config.max_width = PDP_WIDTH_MAX;
+		dev->mode_config.max_height = PDP_HEIGHT_MAX;
+		break;
+	case PDP_VERSION_ODIN:
+		dev->mode_config.max_width = ODIN_PDP_WIDTH_MAX;
+		dev->mode_config.max_height = ODIN_PDP_HEIGHT_MAX;
+		break;
+	case PDP_VERSION_PLATO:
+		dev->mode_config.max_width = PLATO_PDP_WIDTH_MAX;
+		dev->mode_config.max_height = PLATO_PDP_HEIGHT_MAX;
+		break;
+	default:
+		BUG();
+	}
+
+	DRM_INFO("max_width is %d\n",
+		dev->mode_config.max_width);
+	DRM_INFO("max_height is %d\n",
+		dev->mode_config.max_height);
+
+	dev->mode_config.fb_base = 0;
+	dev->mode_config.async_page_flip = async_flip_enable;
+
+	DRM_INFO("%s async flip support is %s\n",
+		 dev->driver->name, async_flip_enable ? "enabled" : "disabled");
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+	dev->mode_config.allow_fb_modifiers = true;
+#endif
+
+	dev_priv->plane = pdp_plane_create(dev, DRM_PLANE_TYPE_PRIMARY);
+	if (IS_ERR(dev_priv->plane)) {
+		DRM_ERROR("failed to create a primary plane\n");
+		err = PTR_ERR(dev_priv->plane);
+		goto err_config_cleanup;
+	}
+
+	dev_priv->crtc = pdp_crtc_create(dev, 0, dev_priv->plane);
+	if (IS_ERR(dev_priv->crtc)) {
+		DRM_ERROR("failed to create a CRTC\n");
+		err = PTR_ERR(dev_priv->crtc);
+		goto err_config_cleanup;
+	}
+
+	switch (dev_priv->version) {
+	case PDP_VERSION_APOLLO:
+	case PDP_VERSION_ODIN:
+		dev_priv->connector = pdp_dvi_connector_create(dev);
+		if (IS_ERR(dev_priv->connector)) {
+			DRM_ERROR("failed to create a connector\n");
+			err = PTR_ERR(dev_priv->connector);
+			goto err_config_cleanup;
+		}
+
+		dev_priv->encoder = pdp_tmds_encoder_create(dev);
+		if (IS_ERR(dev_priv->encoder)) {
+			DRM_ERROR("failed to create an encoder\n");
+			err = PTR_ERR(dev_priv->encoder);
+			goto err_config_cleanup;
+		}
+
+		err = drm_connector_attach_encoder(dev_priv->connector,
+						   dev_priv->encoder);
+		if (err) {
+			DRM_ERROR("failed to attach [ENCODER:%d:%s] to [CONNECTOR:%d:%s] (err=%d)\n",
+				  dev_priv->encoder->base.id,
+				  dev_priv->encoder->name,
+				  dev_priv->connector->base.id,
+				  dev_priv->connector->name,
+				  err);
+			goto err_config_cleanup;
+		}
+		break;
+	case PDP_VERSION_PLATO:
+		// PLATO connectors are created in HDMI component driver
+		break;
+	default:
+		BUG();
+	}
+
+	DRM_DEBUG_DRIVER("initialised\n");
+
+	return 0;
+
+err_config_cleanup:
+	drm_mode_config_cleanup(dev);
+
+	return err;
+}
+
+int pdp_modeset_late_init(struct pdp_drm_private *dev_priv)
+{
+	struct drm_device *ddev = dev_priv->dev;
+
+	drm_mode_config_reset(ddev);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+	if (dev_priv->connector != NULL) {
+		int err;
+
+		err = drm_connector_register(dev_priv->connector);
+		if (err) {
+			DRM_ERROR("[CONNECTOR:%d:%s] failed to register (err=%d)\n",
+				  dev_priv->connector->base.id,
+				  dev_priv->connector->name,
+				  err);
+			return err;
+		}
+	}
+#endif
+	return 0;
+}
+
+void pdp_modeset_early_cleanup(struct pdp_drm_private *dev_priv)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+	if (dev_priv->connector != NULL)
+		drm_connector_unregister(dev_priv->connector);
+#endif
+}
+
+void pdp_modeset_late_cleanup(struct pdp_drm_private *dev_priv)
+{
+	drm_mode_config_cleanup(dev_priv->dev);
+
+	DRM_DEBUG_DRIVER("cleaned up\n");
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_plane.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_plane.c
new file mode 100644
index 0000000..efa5798
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_plane.c
@@ -0,0 +1,243 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "drm_pdp_drv.h"
+
+#include <drm/drmP.h>
+#include <drm/drm_plane_helper.h>
+
+#if defined(PDP_USE_ATOMIC)
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#endif
+
+#include "drm_pdp_gem.h"
+#include "pdp_apollo.h"
+#include "pdp_odin.h"
+#include "pdp_plato.h"
+
+#include "kernel_compatibility.h"
+
+
+#if defined(PDP_USE_ATOMIC)
+static int pdp_plane_helper_atomic_check(struct drm_plane *plane,
+					 struct drm_plane_state *state)
+{
+	struct drm_crtc_state *crtc_new_state;
+
+	if (!state->crtc)
+		return 0;
+
+	crtc_new_state = drm_atomic_get_new_crtc_state(state->state,
+						       state->crtc);
+
+	return drm_atomic_helper_check_plane_state(state, crtc_new_state,
+						   DRM_PLANE_HELPER_NO_SCALING,
+						   DRM_PLANE_HELPER_NO_SCALING,
+						   false, true);
+}
+
+static void pdp_plane_helper_atomic_update(struct drm_plane *plane,
+					   struct drm_plane_state *old_state)
+{
+	struct drm_plane_state *plane_state = plane->state;
+	struct drm_framebuffer *fb = plane_state->fb;
+
+	if (fb) {
+		pdp_plane_set_surface(plane_state->crtc, plane, fb,
+				      plane_state->src_x, plane_state->src_y);
+	}
+}
+
+static const struct drm_plane_helper_funcs pdp_plane_helper_funcs = {
+	.prepare_fb =  drm_gem_fb_prepare_fb,
+	.atomic_check = pdp_plane_helper_atomic_check,
+	.atomic_update = pdp_plane_helper_atomic_update,
+};
+
+static const struct drm_plane_funcs pdp_plane_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.destroy = drm_primary_helper_destroy,
+	.reset = drm_atomic_helper_plane_reset,
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+#else
+#define pdp_plane_funcs drm_primary_helper_funcs
+#endif
+
+struct drm_plane *pdp_plane_create(struct drm_device *dev,
+				   enum drm_plane_type type)
+{
+	struct drm_plane *plane;
+	const uint32_t supported_formats[] = {
+		DRM_FORMAT_XRGB8888,
+		DRM_FORMAT_ARGB8888,
+	};
+	int err;
+
+	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+	if (!plane) {
+		err = -ENOMEM;
+		goto err_exit;
+	}
+
+	err = drm_universal_plane_init(dev, plane, 0, &pdp_plane_funcs,
+				       supported_formats,
+				       ARRAY_SIZE(supported_formats),
+				       NULL, type, NULL);
+	if (err)
+		goto err_plane_free;
+
+#if defined(PDP_USE_ATOMIC)
+	drm_plane_helper_add(plane, &pdp_plane_helper_funcs);
+#endif
+
+	DRM_DEBUG_DRIVER("[PLANE:%d]\n", plane->base.id);
+
+	return plane;
+
+err_plane_free:
+	kfree(plane);
+err_exit:
+	return ERR_PTR(err);
+}
+
+void pdp_plane_set_surface(struct drm_crtc *crtc, struct drm_plane *plane,
+			   struct drm_framebuffer *fb,
+			   const uint32_t src_x, const uint32_t src_y)
+{
+	struct pdp_drm_private *dev_priv = plane->dev->dev_private;
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb);
+	unsigned int pitch = fb->pitches[0];
+	uint64_t address = pdp_gem_get_dev_addr(pdp_fb->obj[0]);
+
+	/*
+	 * User space specifies 'x' and 'y' and this is used to tell the display
+	 * to scan out from part way through a buffer.
+	 */
+	address += ((src_y * pitch) + (src_x * (pdp_drm_fb_cpp(fb))));
+
+	/*
+	 * NOTE: If the buffer dimensions are less than the current mode then
+	 * the output will appear in the top left of the screen. This can be
+	 * centered by adjusting horizontal active start, right border start,
+	 * vertical active start and bottom border start. At this point it's
+	 * not entirely clear where this should be done. On the one hand it's
+	 * related to pdp_crtc_helper_mode_set but on the other hand there
+	 * might not always be a call to pdp_crtc_helper_mode_set. This needs
+	 * to be investigated.
+	 */
+	switch (dev_priv->version) {
+	case PDP_VERSION_APOLLO:
+		switch (pdp_drm_fb_format(fb)) {
+		case DRM_FORMAT_ARGB8888:
+		case DRM_FORMAT_XRGB8888:
+			break;
+		default:
+			DRM_ERROR("unsupported pixel format (format = %d)\n",
+				  pdp_drm_fb_format(fb));
+			return;
+		}
+
+		pdp_apollo_set_surface(plane->dev->dev,
+				       pdp_crtc->pdp_reg,
+				       0,
+				       address,
+				       0, 0,
+				       fb->width, fb->height, pitch,
+				       0xE,
+				       255,
+				       false);
+		break;
+	case PDP_VERSION_ODIN:
+		switch (pdp_drm_fb_format(fb)) {
+		case DRM_FORMAT_ARGB8888:
+		case DRM_FORMAT_XRGB8888:
+			break;
+		default:
+			DRM_ERROR("unsupported pixel format (format = %d)\n",
+				  pdp_drm_fb_format(fb));
+			return;
+		}
+
+		pdp_odin_set_surface(plane->dev->dev,
+				     pdp_crtc->pdp_reg,
+				     0,
+				     address,
+				     0, 0,
+				     fb->width, fb->height, pitch,
+				     ODN_PDP_SURF_PIXFMT_ARGB8888,
+				     255,
+				     false);
+		break;
+	case PDP_VERSION_PLATO:
+		switch (pdp_drm_fb_format(fb)) {
+		case DRM_FORMAT_ARGB8888:
+		case DRM_FORMAT_XRGB8888:
+			break;
+		default:
+			DRM_ERROR("unsupported pixel format (format = %d)\n",
+				  pdp_drm_fb_format(fb));
+			return;
+		}
+
+		pdp_plato_set_surface(crtc->dev->dev,
+				      pdp_crtc->pdp_reg,
+				      pdp_crtc->pdp_bif_reg,
+				      0,
+				      address,
+				      0, 0,
+				      fb->width, fb->height, pitch,
+				      PLATO_PDP_PIXEL_FORMAT_ARGB8,
+				      255,
+				      false);
+		break;
+	default:
+			BUG();
+	}
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_tmds.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_tmds.c
new file mode 100644
index 0000000..b7a2d93
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/drm_pdp_tmds.c
@@ -0,0 +1,141 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+
+#include "drm_pdp_drv.h"
+
+#include "kernel_compatibility.h"
+
+static void pdp_tmds_encoder_helper_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool
+pdp_tmds_encoder_helper_mode_fixup(struct drm_encoder *encoder,
+				   const struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void pdp_tmds_encoder_helper_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void pdp_tmds_encoder_helper_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+pdp_tmds_encoder_helper_mode_set(struct drm_encoder *encoder,
+				 struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void pdp_tmds_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct pdp_drm_private *dev_priv = encoder->dev->dev_private;
+
+	DRM_DEBUG_DRIVER("[ENCODER:%d:%s]\n",
+			 encoder->base.id,
+			 encoder->name);
+
+	drm_encoder_cleanup(encoder);
+
+	kfree(encoder);
+	dev_priv->encoder = NULL;
+}
+
+static const struct drm_encoder_helper_funcs pdp_tmds_encoder_helper_funcs = {
+	.dpms = pdp_tmds_encoder_helper_dpms,
+	.mode_fixup = pdp_tmds_encoder_helper_mode_fixup,
+	.prepare = pdp_tmds_encoder_helper_prepare,
+	.commit = pdp_tmds_encoder_helper_commit,
+	.mode_set = pdp_tmds_encoder_helper_mode_set,
+	.get_crtc = NULL,
+	.detect = NULL,
+	.disable = NULL,
+};
+
+static const struct drm_encoder_funcs pdp_tmds_encoder_funcs = {
+	.reset = NULL,
+	.destroy = pdp_tmds_encoder_destroy,
+};
+
+struct drm_encoder *
+pdp_tmds_encoder_create(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+	int err;
+
+	encoder = kzalloc(sizeof(*encoder), GFP_KERNEL);
+	if (!encoder)
+		return ERR_PTR(-ENOMEM);
+
+	err = drm_encoder_init(dev,
+			       encoder,
+			       &pdp_tmds_encoder_funcs,
+			       DRM_MODE_ENCODER_TMDS,
+			       NULL);
+	if (err) {
+		DRM_ERROR("Failed to initialise encoder");
+		return ERR_PTR(err);
+	}
+	drm_encoder_helper_add(encoder, &pdp_tmds_encoder_helper_funcs);
+
+	/*
+	 * This is a bit field that's used to determine which
+	 * CRTCs can drive this encoder.
+	 */
+	encoder->possible_crtcs = 0x1;
+
+	DRM_DEBUG_DRIVER("[ENCODER:%d:%s]\n",
+			 encoder->base.id,
+			 encoder->name);
+
+	return encoder;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/odin_defs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/odin_defs.h
new file mode 100644
index 0000000..98a2af1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/odin_defs.h
@@ -0,0 +1,289 @@
+/****************************************************************************
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Odin Memory Map - View from PCIe
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+****************************************************************************/
+
+#ifndef _ODIN_DEFS_H_
+#define _ODIN_DEFS_H_
+
+/* These defines have not been autogenerated */
+
+#define PCI_VENDOR_ID_ODIN                  (0x1AEE)
+#define DEVICE_ID_ODIN                      (0x1010)
+
+/* PCI BAR 0 contains the PDP regs and the Odin system regs */
+#define ODN_SYS_BAR                         0
+#define ODN_SYS_REGION_SIZE                 0x000800000 /* 8MB */
+
+#define ODN_SYS_REGS_OFFSET                 0
+#define ODN_SYS_REGS_SIZE                   0x000400000 /* 4MB */
+
+#define ODN_PDP_REGS_OFFSET                 0x000440000
+#define ODN_PDP_REGS_SIZE                   0x000040000 /* 256k */
+
+
+/* PCI BAR 2 contains the Device Under Test SOCIF 64MB region */
+#define ODN_DUT_SOCIF_BAR                   2
+#define ODN_DUT_SOCIF_OFFSET                0x000000000
+#define ODN_DUT_SOCIF_SIZE                  0x004000000 /* 64MB */
+
+/* PCI BAR 4 contains the on-board 1GB DDR memory */
+#define ODN_DDR_BAR                         4
+#define ODN_DDR_MEM_OFFSET                  0x000000000
+#define ODN_DDR_MEM_SIZE                    0x040000000 /* 1GB */
+
+/* Odin system register banks */
+#define ODN_REG_BANK_CORE                   0x00000
+#define ODN_REG_BANK_TCF_SPI_MASTER         0x02000
+#define ODN_REG_BANK_ODN_CLK_BLK            0x0A000
+#define ODN_REG_BANK_ODN_MCU_COMMUNICATOR   0x0C000
+#define ODN_REG_BANK_DB_TYPE_ID             0x0C200
+#define ODN_REG_BANK_DB_TYPE_ID_TYPE_MASK   0x000000C0U
+#define ODN_REG_BANK_DB_TYPE_ID_TYPE_SHIFT  0x6
+#define ODN_REG_BANK_ODN_I2C                0x0E000
+#define ODN_REG_BANK_MULTI_CLK_ALIGN        0x20000
+#define ODN_REG_BANK_ALIGN_DATA_TX          0x22000
+#define ODN_REG_BANK_SAI_RX_DDR_0           0x24000
+#define ODN_REG_BANK_SAI_RX_DDR(n)          (ODN_REG_BANK_SAI_RX_DDR_0 + (0x02000*n))
+#define ODN_REG_BANK_SAI_TX_DDR_0           0x3A000
+#define ODN_REG_BANK_SAI_TX_DDR(n)          (ODN_REG_BANK_SAI_TX_DDR_0 + (0x02000*n))
+#define ODN_REG_BANK_SAI_TX_SDR             0x4E000
+
+/* Odin SPI regs */
+#define ODN_SPI_MST_ADDR_RDNWR              0x0000
+#define ODN_SPI_MST_WDATA                   0x0004
+#define ODN_SPI_MST_RDATA                   0x0008
+#define ODN_SPI_MST_STATUS                  0x000C
+#define ODN_SPI_MST_GO                      0x0010
+
+
+/*
+   Odin CLK regs - the odn_clk_blk module defs are not auto generated
+   because it is licenced 3rd party IP from Xilinx.
+   These defs are taken from the Odin TRM.
+ */
+#define ODN_PDP_P_CLK_OUT_DIVIDER_REG1           0x620
+#define ODN_PDP_PCLK_ODIV1_LO_TIME_MASK          0x0000003FU
+#define ODN_PDP_PCLK_ODIV1_LO_TIME_SHIFT         0
+#define ODN_PDP_PCLK_ODIV1_HI_TIME_MASK          0x00000FC0U
+#define ODN_PDP_PCLK_ODIV1_HI_TIME_SHIFT         6
+
+#define ODN_PDP_P_CLK_OUT_DIVIDER_REG2           0x624
+#define ODN_PDP_PCLK_ODIV2_NOCOUNT_MASK          0x00000040U
+#define ODN_PDP_PCLK_ODIV2_NOCOUNT_SHIFT         6
+#define ODN_PDP_PCLK_ODIV2_EDGE_MASK             0x00000080U
+#define ODN_PDP_PCLK_ODIV2_EDGE_SHIFT            7
+
+#define ODN_PDP_P_CLK_OUT_DIVIDER_REG3           0x61C
+
+#define ODN_PDP_M_CLK_OUT_DIVIDER_REG1           0x628
+#define ODN_PDP_MCLK_ODIV1_LO_TIME_MASK          0x0000003FU
+#define ODN_PDP_MCLK_ODIV1_LO_TIME_SHIFT         0
+#define ODN_PDP_MCLK_ODIV1_HI_TIME_MASK	         0x00000FC0U
+#define ODN_PDP_MCLK_ODIV1_HI_TIME_SHIFT         6
+
+#define ODN_PDP_M_CLK_OUT_DIVIDER_REG2           0x62C
+#define ODN_PDP_MCLK_ODIV2_NOCOUNT_MASK          0x00000040U
+#define ODN_PDP_MCLK_ODIV2_NOCOUNT_SHIFT         6
+#define ODN_PDP_MCLK_ODIV2_EDGE_MASK             0x00000080U
+#define ODN_PDP_MCLK_ODIV2_EDGE_SHIFT            7
+
+#define ODN_PDP_P_CLK_MULTIPLIER_REG1            0x650
+#define ODN_PDP_PCLK_MUL1_LO_TIME_MASK           0x0000003FU
+#define ODN_PDP_PCLK_MUL1_LO_TIME_SHIFT          0
+#define ODN_PDP_PCLK_MUL1_HI_TIME_MASK           0x00000FC0U
+#define ODN_PDP_PCLK_MUL1_HI_TIME_SHIFT          6
+
+#define ODN_PDP_P_CLK_MULTIPLIER_REG2            0x654
+#define ODN_PDP_PCLK_MUL2_NOCOUNT_MASK           0x00000040U
+#define ODN_PDP_PCLK_MUL2_NOCOUNT_SHIFT          6
+#define ODN_PDP_PCLK_MUL2_EDGE_MASK              0x00000080U
+#define ODN_PDP_PCLK_MUL2_EDGE_SHIFT             7
+
+#define ODN_PDP_P_CLK_MULTIPLIER_REG3            0x64C
+
+#define ODN_PDP_P_CLK_IN_DIVIDER_REG             0x658
+#define ODN_PDP_PCLK_IDIV_LO_TIME_MASK           0x0000003FU
+#define ODN_PDP_PCLK_IDIV_LO_TIME_SHIFT          0
+#define ODN_PDP_PCLK_IDIV_HI_TIME_MASK           0x00000FC0U
+#define ODN_PDP_PCLK_IDIV_HI_TIME_SHIFT          6
+#define ODN_PDP_PCLK_IDIV_NOCOUNT_MASK           0x00001000U
+#define ODN_PDP_PCLK_IDIV_NOCOUNT_SHIFT          12
+#define ODN_PDP_PCLK_IDIV_EDGE_MASK              0x00002000U
+#define ODN_PDP_PCLK_IDIV_EDGE_SHIFT             13
+
+/*
+ * DUT core clock input divider, multiplier and out divider.
+ */
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1                (0x0028)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_MASK   (0x00000FC0U)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT  (6)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_MASK   (0x0000003FU)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT  (0)
+
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2                (0x002C)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_MASK      (0x00000080U)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_SHIFT     (7)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_MASK   (0x00000040U)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT  (6)
+
+#define ODN_DUT_CORE_CLK_MULTIPLIER1                 (0x0050)
+#define ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME_MASK    (0x00000FC0U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME_SHIFT   (6)
+#define ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME_MASK    (0x0000003FU)
+#define ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME_SHIFT   (0)
+
+#define ODN_DUT_CORE_CLK_MULTIPLIER2                 (0x0054)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_MASK       (0x00007000U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_SHIFT      (12)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_EN_MASK    (0x00000800U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_EN_SHIFT   (11)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE_MASK       (0x00000080U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE_SHIFT      (7)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT_MASK    (0x00000040U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT_SHIFT   (6)
+
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1                 (0x0058)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE_MASK       (0x00002000U)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE_SHIFT      (13)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT_MASK    (0x00001000U)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT_SHIFT   (12)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME_MASK    (0x00000FC0U)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME_SHIFT   (6)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME_MASK    (0x0000003FU)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME_SHIFT   (0)
+
+/*
+ * DUT interface clock input divider, multiplier and out divider.
+ */
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1               (0x0220)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME_MASK  (0x00000FC0U)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT (6)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME_MASK  (0x0000003FU)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT (0)
+
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2               (0x0224)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE_MASK     (0x00000080U)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE_SHIFT    (7)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT_MASK  (0x00000040U)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT (6)
+
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1                (0x0250)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME_MASK   (0x00000FC0U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME_SHIFT  (6)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME_MASK   (0x0000003FU)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME_SHIFT  (0)
+
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2                (0x0254)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_MASK      (0x00007000U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_SHIFT     (12)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_EN_MASK   (0x00000800U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_EN_SHIFT  (11)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE_MASK      (0x00000080U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE_SHIFT     (7)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT_MASK   (0x00000040U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT_SHIFT  (6)
+
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1                (0x0258)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE_MASK      (0x00002000U)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE_SHIFT     (13)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT_MASK   (0x00001000U)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT_SHIFT  (12)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME_MASK   (0x00000FC0U)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME_SHIFT  (6)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME_MASK   (0x0000003FU)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME_SHIFT  (0)
+
+
+/*
+ * Min max values from Xilinx Virtex7 data sheet DS183, for speed grade 2
+ * All in Hz
+ */
+#define ODN_INPUT_CLOCK_SPEED                        (100000000U)
+#define ODN_INPUT_CLOCK_SPEED_MIN                    (10000000U)
+#define ODN_INPUT_CLOCK_SPEED_MAX                    (933000000U)
+#define ODN_OUTPUT_CLOCK_SPEED_MIN                   (4690000U)
+#define ODN_OUTPUT_CLOCK_SPEED_MAX                   (933000000U)
+#define ODN_VCO_MIN                                  (600000000U)
+#define ODN_VCO_MAX                                  (1440000000U)
+#define ODN_PFD_MIN                                  (10000000U)
+#define ODN_PFD_MAX                                  (500000000U)
+
+/*
+ * Max values that can be set in DRP registers
+ */
+#define ODN_OREG_VALUE_MAX                            (126.875f)
+#define ODN_MREG_VALUE_MAX                            (126.875f)
+#define ODN_DREG_VALUE_MAX                            (126U)
+
+
+#define ODN_MMCM_LOCK_STATUS_DUT_CORE                (0x00000001U)
+#define ODN_MMCM_LOCK_STATUS_DUT_IF                  (0x00000002U)
+#define ODN_MMCM_LOCK_STATUS_PDPP                    (0x00000008U)
+
+/*
+    Odin interrupt flags
+*/
+#define ODN_INTERRUPT_ENABLE_PDP1           (1 << ODN_INTERRUPT_ENABLE_PDP1_SHIFT)
+#define ODN_INTERRUPT_ENABLE_DUT            (1 << ODN_INTERRUPT_ENABLE_DUT_SHIFT)
+#define ODN_INTERRUPT_STATUS_PDP1           (1 << ODN_INTERRUPT_STATUS_PDP1_SHIFT)
+#define ODN_INTERRUPT_STATUS_DUT            (1 << ODN_INTERRUPT_STATUS_DUT_SHIFT)
+#define ODN_INTERRUPT_CLEAR_PDP1            (1 << ODN_INTERRUPT_CLR_PDP1_SHIFT)
+#define ODN_INTERRUPT_CLEAR_DUT             (1 << ODN_INTERRUPT_CLR_DUT_SHIFT)
+
+/*
+   Other defines
+*/
+#define ODN_STREAM_OFF                      0
+#define ODN_STREAM_ON                       1
+#define ODN_SYNC_GEN_DISABLE                0
+#define ODN_SYNC_GEN_ENABLE                 1
+#define ODN_INTERLACE_DISABLE               0
+#define ODN_INTERLACE_ENABLE                1
+#define ODN_PIXEL_CLOCK_INVERTED            1
+#define ODN_HSYNC_POLARITY_ACTIVE_HIGH      1
+
+#define ODN_PDP_INTCLR_ALL                  0x000FFFFFU
+#define	ODN_PDP_INTSTAT_ALL_OURUN_MASK      0x000FFFF0U
+
+#endif /* _ODIN_DEFS_H_ */
+
+/*****************************************************************************
+ End of file (odn_defs.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/odin_pdp_regs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/odin_pdp_regs.h
new file mode 100644
index 0000000..da47a25
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/odin_pdp_regs.h
@@ -0,0 +1,8540 @@
+/*************************************************************************/ /*!
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* tab size 4 */
+
+#ifndef ODN_PDP_REGS_H
+#define ODN_PDP_REGS_H
+
+/* Odin-PDP hardware register definitions */
+
+
+#define ODN_PDP_GRPH1SURF_OFFSET					(0x0000)
+
+/* PDP, GRPH1SURF, GRPH1PIXFMT
+*/
+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_MASK			(0xF8000000)
+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_LSBMASK		(0x0000001F)
+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT			(27)
+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_LENGTH		(5)
+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1USEGAMMA
+*/
+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_MASK		(0x04000000)
+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_SHIFT		(26)
+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_LENGTH		(1)
+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1USECSC
+*/
+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_MASK			(0x02000000)
+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_SHIFT			(25)
+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_LENGTH		(1)
+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1LUTRWCHOICE
+*/
+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_MASK		(0x01000000)
+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SHIFT	(24)
+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LENGTH	(1)
+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1USELUT
+*/
+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_MASK			(0x00800000)
+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_SHIFT			(23)
+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_LENGTH		(1)
+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH2SURF_OFFSET					(0x0004)
+
+/* PDP, GRPH2SURF, GRPH2PIXFMT
+*/
+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_MASK			(0xF8000000)
+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_LSBMASK		(0x0000001F)
+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_SHIFT			(27)
+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_LENGTH		(5)
+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2USEGAMMA
+*/
+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_MASK		(0x04000000)
+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_SHIFT		(26)
+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_LENGTH		(1)
+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2USECSC
+*/
+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_MASK			(0x02000000)
+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_SHIFT			(25)
+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_LENGTH		(1)
+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2LUTRWCHOICE
+*/
+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_MASK		(0x01000000)
+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SHIFT	(24)
+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LENGTH	(1)
+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2USELUT
+*/
+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_MASK			(0x00800000)
+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_SHIFT			(23)
+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_LENGTH		(1)
+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH3SURF_OFFSET					(0x0008)
+
+/* PDP, GRPH3SURF, GRPH3PIXFMT
+*/
+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_MASK			(0xF8000000)
+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_LSBMASK		(0x0000001F)
+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_SHIFT			(27)
+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_LENGTH		(5)
+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3USEGAMMA
+*/
+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_MASK		(0x04000000)
+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_SHIFT		(26)
+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_LENGTH		(1)
+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3USECSC
+*/
+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_MASK			(0x02000000)
+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_SHIFT			(25)
+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_LENGTH		(1)
+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3LUTRWCHOICE
+*/
+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_MASK		(0x01000000)
+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SHIFT	(24)
+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LENGTH	(1)
+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3USELUT
+*/
+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_MASK			(0x00800000)
+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_SHIFT			(23)
+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_LENGTH		(1)
+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH4SURF_OFFSET					(0x000C)
+
+/* PDP, GRPH4SURF, GRPH4PIXFMT
+*/
+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_MASK			(0xF8000000)
+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_LSBMASK		(0x0000001F)
+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_SHIFT			(27)
+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_LENGTH		(5)
+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4USEGAMMA
+*/
+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_MASK		(0x04000000)
+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_SHIFT		(26)
+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_LENGTH		(1)
+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4USECSC
+*/
+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_MASK			(0x02000000)
+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_SHIFT			(25)
+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_LENGTH		(1)
+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4LUTRWCHOICE
+*/
+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_MASK		(0x01000000)
+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SHIFT	(24)
+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LENGTH	(1)
+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4USELUT
+*/
+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_MASK			(0x00800000)
+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_SHIFT			(23)
+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_LENGTH		(1)
+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1SURF_OFFSET						(0x0010)
+
+/* PDP, VID1SURF, VID1PIXFMT
+*/
+#define ODN_PDP_VID1SURF_VID1PIXFMT_MASK			(0xF8000000)
+#define ODN_PDP_VID1SURF_VID1PIXFMT_LSBMASK			(0x0000001F)
+#define ODN_PDP_VID1SURF_VID1PIXFMT_SHIFT			(27)
+#define ODN_PDP_VID1SURF_VID1PIXFMT_LENGTH			(5)
+#define ODN_PDP_VID1SURF_VID1PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEGAMMA
+*/
+#define ODN_PDP_VID1SURF_VID1USEGAMMA_MASK			(0x04000000)
+#define ODN_PDP_VID1SURF_VID1USEGAMMA_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1SURF_VID1USEGAMMA_SHIFT			(26)
+#define ODN_PDP_VID1SURF_VID1USEGAMMA_LENGTH		(1)
+#define ODN_PDP_VID1SURF_VID1USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USECSC
+*/
+#define ODN_PDP_VID1SURF_VID1USECSC_MASK			(0x02000000)
+#define ODN_PDP_VID1SURF_VID1USECSC_LSBMASK			(0x00000001)
+#define ODN_PDP_VID1SURF_VID1USECSC_SHIFT			(25)
+#define ODN_PDP_VID1SURF_VID1USECSC_LENGTH			(1)
+#define ODN_PDP_VID1SURF_VID1USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEI2P
+*/
+#define ODN_PDP_VID1SURF_VID1USEI2P_MASK			(0x01000000)
+#define ODN_PDP_VID1SURF_VID1USEI2P_LSBMASK			(0x00000001)
+#define ODN_PDP_VID1SURF_VID1USEI2P_SHIFT			(24)
+#define ODN_PDP_VID1SURF_VID1USEI2P_LENGTH			(1)
+#define ODN_PDP_VID1SURF_VID1USEI2P_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1COSITED
+*/
+#define ODN_PDP_VID1SURF_VID1COSITED_MASK			(0x00800000)
+#define ODN_PDP_VID1SURF_VID1COSITED_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1SURF_VID1COSITED_SHIFT			(23)
+#define ODN_PDP_VID1SURF_VID1COSITED_LENGTH			(1)
+#define ODN_PDP_VID1SURF_VID1COSITED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEHQCD
+*/
+#define ODN_PDP_VID1SURF_VID1USEHQCD_MASK			(0x00400000)
+#define ODN_PDP_VID1SURF_VID1USEHQCD_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1SURF_VID1USEHQCD_SHIFT			(22)
+#define ODN_PDP_VID1SURF_VID1USEHQCD_LENGTH			(1)
+#define ODN_PDP_VID1SURF_VID1USEHQCD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEINSTREAM
+*/
+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_MASK		(0x00200000)
+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_LSBMASK	(0x00000001)
+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_SHIFT		(21)
+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_LENGTH		(1)
+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2SURF_OFFSET						(0x0014)
+
+/* PDP, VID2SURF, VID2PIXFMT
+*/
+#define ODN_PDP_VID2SURF_VID2PIXFMT_MASK			(0xF8000000)
+#define ODN_PDP_VID2SURF_VID2PIXFMT_LSBMASK			(0x0000001F)
+#define ODN_PDP_VID2SURF_VID2PIXFMT_SHIFT			(27)
+#define ODN_PDP_VID2SURF_VID2PIXFMT_LENGTH			(5)
+#define ODN_PDP_VID2SURF_VID2PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SURF, VID2COSITED
+*/
+#define ODN_PDP_VID2SURF_VID2COSITED_MASK			(0x00800000)
+#define ODN_PDP_VID2SURF_VID2COSITED_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2SURF_VID2COSITED_SHIFT			(23)
+#define ODN_PDP_VID2SURF_VID2COSITED_LENGTH			(1)
+#define ODN_PDP_VID2SURF_VID2COSITED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SURF, VID2USEGAMMA
+*/
+#define ODN_PDP_VID2SURF_VID2USEGAMMA_MASK			(0x04000000)
+#define ODN_PDP_VID2SURF_VID2USEGAMMA_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2SURF_VID2USEGAMMA_SHIFT			(26)
+#define ODN_PDP_VID2SURF_VID2USEGAMMA_LENGTH		(1)
+#define ODN_PDP_VID2SURF_VID2USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SURF, VID2USECSC
+*/
+#define ODN_PDP_VID2SURF_VID2USECSC_MASK			(0x02000000)
+#define ODN_PDP_VID2SURF_VID2USECSC_LSBMASK			(0x00000001)
+#define ODN_PDP_VID2SURF_VID2USECSC_SHIFT			(25)
+#define ODN_PDP_VID2SURF_VID2USECSC_LENGTH			(1)
+#define ODN_PDP_VID2SURF_VID2USECSC_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3SURF_OFFSET						(0x0018)
+
+/* PDP, VID3SURF, VID3PIXFMT
+*/
+#define ODN_PDP_VID3SURF_VID3PIXFMT_MASK			(0xF8000000)
+#define ODN_PDP_VID3SURF_VID3PIXFMT_LSBMASK			(0x0000001F)
+#define ODN_PDP_VID3SURF_VID3PIXFMT_SHIFT			(27)
+#define ODN_PDP_VID3SURF_VID3PIXFMT_LENGTH			(5)
+#define ODN_PDP_VID3SURF_VID3PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SURF, VID3COSITED
+*/
+#define ODN_PDP_VID3SURF_VID3COSITED_MASK			(0x00800000)
+#define ODN_PDP_VID3SURF_VID3COSITED_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3SURF_VID3COSITED_SHIFT			(23)
+#define ODN_PDP_VID3SURF_VID3COSITED_LENGTH			(1)
+#define ODN_PDP_VID3SURF_VID3COSITED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SURF, VID3USEGAMMA
+*/
+#define ODN_PDP_VID3SURF_VID3USEGAMMA_MASK			(0x04000000)
+#define ODN_PDP_VID3SURF_VID3USEGAMMA_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3SURF_VID3USEGAMMA_SHIFT			(26)
+#define ODN_PDP_VID3SURF_VID3USEGAMMA_LENGTH		(1)
+#define ODN_PDP_VID3SURF_VID3USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SURF, VID3USECSC
+*/
+#define ODN_PDP_VID3SURF_VID3USECSC_MASK			(0x02000000)
+#define ODN_PDP_VID3SURF_VID3USECSC_LSBMASK			(0x00000001)
+#define ODN_PDP_VID3SURF_VID3USECSC_SHIFT			(25)
+#define ODN_PDP_VID3SURF_VID3USECSC_LENGTH			(1)
+#define ODN_PDP_VID3SURF_VID3USECSC_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4SURF_OFFSET						(0x001C)
+
+/* PDP, VID4SURF, VID4PIXFMT
+*/
+#define ODN_PDP_VID4SURF_VID4PIXFMT_MASK			(0xF8000000)
+#define ODN_PDP_VID4SURF_VID4PIXFMT_LSBMASK			(0x0000001F)
+#define ODN_PDP_VID4SURF_VID4PIXFMT_SHIFT			(27)
+#define ODN_PDP_VID4SURF_VID4PIXFMT_LENGTH			(5)
+#define ODN_PDP_VID4SURF_VID4PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SURF, VID4COSITED
+*/
+#define ODN_PDP_VID4SURF_VID4COSITED_MASK			(0x00800000)
+#define ODN_PDP_VID4SURF_VID4COSITED_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4SURF_VID4COSITED_SHIFT			(23)
+#define ODN_PDP_VID4SURF_VID4COSITED_LENGTH			(1)
+#define ODN_PDP_VID4SURF_VID4COSITED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SURF, VID4USEGAMMA
+*/
+#define ODN_PDP_VID4SURF_VID4USEGAMMA_MASK			(0x04000000)
+#define ODN_PDP_VID4SURF_VID4USEGAMMA_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4SURF_VID4USEGAMMA_SHIFT			(26)
+#define ODN_PDP_VID4SURF_VID4USEGAMMA_LENGTH		(1)
+#define ODN_PDP_VID4SURF_VID4USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SURF, VID4USECSC
+*/
+#define ODN_PDP_VID4SURF_VID4USECSC_MASK			(0x02000000)
+#define ODN_PDP_VID4SURF_VID4USECSC_LSBMASK			(0x00000001)
+#define ODN_PDP_VID4SURF_VID4USECSC_SHIFT			(25)
+#define ODN_PDP_VID4SURF_VID4USECSC_LENGTH			(1)
+#define ODN_PDP_VID4SURF_VID4USECSC_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1CTRL_OFFSET					(0x0020)
+
+/* PDP, GRPH1CTRL, GRPH1STREN
+*/
+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_MASK			(0x80000000)
+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_SHIFT			(31)
+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_LENGTH			(1)
+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1CKEYEN
+*/
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_MASK			(0x40000000)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_SHIFT			(30)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_LENGTH		(1)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1CKEYSRC
+*/
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_MASK			(0x20000000)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_SHIFT		(29)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_LENGTH		(1)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1BLEND
+*/
+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_MASK			(0x18000000)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_LSBMASK		(0x00000003)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_SHIFT			(27)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_LENGTH			(2)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1BLENDPOS
+*/
+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_MASK		(0x07000000)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_LSBMASK		(0x00000007)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_SHIFT		(24)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_LENGTH		(3)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1DITHEREN
+*/
+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_MASK		(0x00800000)
+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_SHIFT		(23)
+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_LENGTH		(1)
+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2CTRL_OFFSET					(0x0024)
+
+/* PDP, GRPH2CTRL, GRPH2STREN
+*/
+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_MASK			(0x80000000)
+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_SHIFT			(31)
+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_LENGTH			(1)
+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2CKEYEN
+*/
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_MASK			(0x40000000)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_SHIFT			(30)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_LENGTH		(1)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2CKEYSRC
+*/
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_MASK			(0x20000000)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_SHIFT		(29)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_LENGTH		(1)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2BLEND
+*/
+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_MASK			(0x18000000)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_LSBMASK		(0x00000003)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_SHIFT			(27)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_LENGTH			(2)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2BLENDPOS
+*/
+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_MASK		(0x07000000)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_LSBMASK		(0x00000007)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_SHIFT		(24)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_LENGTH		(3)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2DITHEREN
+*/
+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_MASK		(0x00800000)
+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_SHIFT		(23)
+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_LENGTH		(1)
+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3CTRL_OFFSET					(0x0028)
+
+/* PDP, GRPH3CTRL, GRPH3STREN
+*/
+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_MASK			(0x80000000)
+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_SHIFT			(31)
+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_LENGTH			(1)
+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3CKEYEN
+*/
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_MASK			(0x40000000)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_SHIFT			(30)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_LENGTH		(1)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3CKEYSRC
+*/
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_MASK			(0x20000000)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_SHIFT		(29)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_LENGTH		(1)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3BLEND
+*/
+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_MASK			(0x18000000)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_LSBMASK		(0x00000003)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_SHIFT			(27)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_LENGTH			(2)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3BLENDPOS
+*/
+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_MASK		(0x07000000)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_LSBMASK		(0x00000007)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_SHIFT		(24)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_LENGTH		(3)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3DITHEREN
+*/
+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_MASK		(0x00800000)
+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_SHIFT		(23)
+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_LENGTH		(1)
+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4CTRL_OFFSET					(0x002C)
+
+/* PDP, GRPH4CTRL, GRPH4STREN
+*/
+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_MASK			(0x80000000)
+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_SHIFT			(31)
+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_LENGTH			(1)
+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4CKEYEN
+*/
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_MASK			(0x40000000)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_SHIFT			(30)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_LENGTH		(1)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4CKEYSRC
+*/
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_MASK			(0x20000000)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_SHIFT		(29)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_LENGTH		(1)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4BLEND
+*/
+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_MASK			(0x18000000)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_LSBMASK		(0x00000003)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_SHIFT			(27)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_LENGTH			(2)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4BLENDPOS
+*/
+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_MASK		(0x07000000)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_LSBMASK		(0x00000007)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_SHIFT		(24)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_LENGTH		(3)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4DITHEREN
+*/
+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_MASK		(0x00800000)
+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_SHIFT		(23)
+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_LENGTH		(1)
+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1CTRL_OFFSET						(0x0030)
+
+/* PDP, VID1CTRL, VID1STREN
+*/
+#define ODN_PDP_VID1CTRL_VID1STREN_MASK				(0x80000000)
+#define ODN_PDP_VID1CTRL_VID1STREN_LSBMASK			(0x00000001)
+#define ODN_PDP_VID1CTRL_VID1STREN_SHIFT			(31)
+#define ODN_PDP_VID1CTRL_VID1STREN_LENGTH			(1)
+#define ODN_PDP_VID1CTRL_VID1STREN_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID1CTRL, VID1CKEYEN
+*/
+#define ODN_PDP_VID1CTRL_VID1CKEYEN_MASK			(0x40000000)
+#define ODN_PDP_VID1CTRL_VID1CKEYEN_LSBMASK			(0x00000001)
+#define ODN_PDP_VID1CTRL_VID1CKEYEN_SHIFT			(30)
+#define ODN_PDP_VID1CTRL_VID1CKEYEN_LENGTH			(1)
+#define ODN_PDP_VID1CTRL_VID1CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CTRL, VID1CKEYSRC
+*/
+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_MASK			(0x20000000)
+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_SHIFT			(29)
+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_LENGTH			(1)
+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CTRL, VID1BLEND
+*/
+#define ODN_PDP_VID1CTRL_VID1BLEND_MASK				(0x18000000)
+#define ODN_PDP_VID1CTRL_VID1BLEND_LSBMASK			(0x00000003)
+#define ODN_PDP_VID1CTRL_VID1BLEND_SHIFT			(27)
+#define ODN_PDP_VID1CTRL_VID1BLEND_LENGTH			(2)
+#define ODN_PDP_VID1CTRL_VID1BLEND_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID1CTRL, VID1BLENDPOS
+*/
+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_MASK			(0x07000000)
+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_LSBMASK		(0x00000007)
+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_SHIFT			(24)
+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_LENGTH		(3)
+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CTRL, VID1DITHEREN
+*/
+#define ODN_PDP_VID1CTRL_VID1DITHEREN_MASK			(0x00800000)
+#define ODN_PDP_VID1CTRL_VID1DITHEREN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1CTRL_VID1DITHEREN_SHIFT			(23)
+#define ODN_PDP_VID1CTRL_VID1DITHEREN_LENGTH		(1)
+#define ODN_PDP_VID1CTRL_VID1DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2CTRL_OFFSET						(0x0034)
+
+/* PDP, VID2CTRL, VID2STREN
+*/
+#define ODN_PDP_VID2CTRL_VID2STREN_MASK				(0x80000000)
+#define ODN_PDP_VID2CTRL_VID2STREN_LSBMASK			(0x00000001)
+#define ODN_PDP_VID2CTRL_VID2STREN_SHIFT			(31)
+#define ODN_PDP_VID2CTRL_VID2STREN_LENGTH			(1)
+#define ODN_PDP_VID2CTRL_VID2STREN_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID2CTRL, VID2CKEYEN
+*/
+#define ODN_PDP_VID2CTRL_VID2CKEYEN_MASK			(0x40000000)
+#define ODN_PDP_VID2CTRL_VID2CKEYEN_LSBMASK			(0x00000001)
+#define ODN_PDP_VID2CTRL_VID2CKEYEN_SHIFT			(30)
+#define ODN_PDP_VID2CTRL_VID2CKEYEN_LENGTH			(1)
+#define ODN_PDP_VID2CTRL_VID2CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CTRL, VID2CKEYSRC
+*/
+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_MASK			(0x20000000)
+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_SHIFT			(29)
+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_LENGTH			(1)
+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CTRL, VID2BLEND
+*/
+#define ODN_PDP_VID2CTRL_VID2BLEND_MASK				(0x18000000)
+#define ODN_PDP_VID2CTRL_VID2BLEND_LSBMASK			(0x00000003)
+#define ODN_PDP_VID2CTRL_VID2BLEND_SHIFT			(27)
+#define ODN_PDP_VID2CTRL_VID2BLEND_LENGTH			(2)
+#define ODN_PDP_VID2CTRL_VID2BLEND_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID2CTRL, VID2BLENDPOS
+*/
+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_MASK			(0x07000000)
+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_LSBMASK		(0x00000007)
+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_SHIFT			(24)
+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_LENGTH		(3)
+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CTRL, VID2DITHEREN
+*/
+#define ODN_PDP_VID2CTRL_VID2DITHEREN_MASK			(0x00800000)
+#define ODN_PDP_VID2CTRL_VID2DITHEREN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2CTRL_VID2DITHEREN_SHIFT			(23)
+#define ODN_PDP_VID2CTRL_VID2DITHEREN_LENGTH		(1)
+#define ODN_PDP_VID2CTRL_VID2DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3CTRL_OFFSET						(0x0038)
+
+/* PDP, VID3CTRL, VID3STREN
+*/
+#define ODN_PDP_VID3CTRL_VID3STREN_MASK				(0x80000000)
+#define ODN_PDP_VID3CTRL_VID3STREN_LSBMASK			(0x00000001)
+#define ODN_PDP_VID3CTRL_VID3STREN_SHIFT			(31)
+#define ODN_PDP_VID3CTRL_VID3STREN_LENGTH			(1)
+#define ODN_PDP_VID3CTRL_VID3STREN_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID3CTRL, VID3CKEYEN
+*/
+#define ODN_PDP_VID3CTRL_VID3CKEYEN_MASK			(0x40000000)
+#define ODN_PDP_VID3CTRL_VID3CKEYEN_LSBMASK			(0x00000001)
+#define ODN_PDP_VID3CTRL_VID3CKEYEN_SHIFT			(30)
+#define ODN_PDP_VID3CTRL_VID3CKEYEN_LENGTH			(1)
+#define ODN_PDP_VID3CTRL_VID3CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CTRL, VID3CKEYSRC
+*/
+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_MASK			(0x20000000)
+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_SHIFT			(29)
+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_LENGTH			(1)
+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CTRL, VID3BLEND
+*/
+#define ODN_PDP_VID3CTRL_VID3BLEND_MASK				(0x18000000)
+#define ODN_PDP_VID3CTRL_VID3BLEND_LSBMASK			(0x00000003)
+#define ODN_PDP_VID3CTRL_VID3BLEND_SHIFT			(27)
+#define ODN_PDP_VID3CTRL_VID3BLEND_LENGTH			(2)
+#define ODN_PDP_VID3CTRL_VID3BLEND_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID3CTRL, VID3BLENDPOS
+*/
+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_MASK			(0x07000000)
+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_LSBMASK		(0x00000007)
+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_SHIFT			(24)
+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_LENGTH		(3)
+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CTRL, VID3DITHEREN
+*/
+#define ODN_PDP_VID3CTRL_VID3DITHEREN_MASK			(0x00800000)
+#define ODN_PDP_VID3CTRL_VID3DITHEREN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3CTRL_VID3DITHEREN_SHIFT			(23)
+#define ODN_PDP_VID3CTRL_VID3DITHEREN_LENGTH		(1)
+#define ODN_PDP_VID3CTRL_VID3DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4CTRL_OFFSET						(0x003C)
+
+/* PDP, VID4CTRL, VID4STREN
+*/
+#define ODN_PDP_VID4CTRL_VID4STREN_MASK				(0x80000000)
+#define ODN_PDP_VID4CTRL_VID4STREN_LSBMASK			(0x00000001)
+#define ODN_PDP_VID4CTRL_VID4STREN_SHIFT			(31)
+#define ODN_PDP_VID4CTRL_VID4STREN_LENGTH			(1)
+#define ODN_PDP_VID4CTRL_VID4STREN_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID4CTRL, VID4CKEYEN
+*/
+#define ODN_PDP_VID4CTRL_VID4CKEYEN_MASK			(0x40000000)
+#define ODN_PDP_VID4CTRL_VID4CKEYEN_LSBMASK			(0x00000001)
+#define ODN_PDP_VID4CTRL_VID4CKEYEN_SHIFT			(30)
+#define ODN_PDP_VID4CTRL_VID4CKEYEN_LENGTH			(1)
+#define ODN_PDP_VID4CTRL_VID4CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CTRL, VID4CKEYSRC
+*/
+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_MASK			(0x20000000)
+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_SHIFT			(29)
+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_LENGTH			(1)
+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CTRL, VID4BLEND
+*/
+#define ODN_PDP_VID4CTRL_VID4BLEND_MASK				(0x18000000)
+#define ODN_PDP_VID4CTRL_VID4BLEND_LSBMASK			(0x00000003)
+#define ODN_PDP_VID4CTRL_VID4BLEND_SHIFT			(27)
+#define ODN_PDP_VID4CTRL_VID4BLEND_LENGTH			(2)
+#define ODN_PDP_VID4CTRL_VID4BLEND_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID4CTRL, VID4BLENDPOS
+*/
+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_MASK			(0x07000000)
+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_LSBMASK		(0x00000007)
+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_SHIFT			(24)
+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_LENGTH		(3)
+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CTRL, VID4DITHEREN
+*/
+#define ODN_PDP_VID4CTRL_VID4DITHEREN_MASK			(0x00800000)
+#define ODN_PDP_VID4CTRL_VID4DITHEREN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4CTRL_VID4DITHEREN_SHIFT			(23)
+#define ODN_PDP_VID4CTRL_VID4DITHEREN_LENGTH		(1)
+#define ODN_PDP_VID4CTRL_VID4DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1UCTRL_OFFSET					(0x0050)
+
+/* PDP, VID1UCTRL, VID1UVHALFSTR
+*/
+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_MASK		(0xC0000000)
+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_LSBMASK		(0x00000003)
+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_SHIFT		(30)
+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_LENGTH		(2)
+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2UCTRL_OFFSET					(0x0054)
+
+/* PDP, VID2UCTRL, VID2UVHALFSTR
+*/
+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_MASK		(0xC0000000)
+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_LSBMASK		(0x00000003)
+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_SHIFT		(30)
+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_LENGTH		(2)
+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3UCTRL_OFFSET					(0x0058)
+
+/* PDP, VID3UCTRL, VID3UVHALFSTR
+*/
+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_MASK		(0xC0000000)
+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_LSBMASK		(0x00000003)
+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_SHIFT		(30)
+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_LENGTH		(2)
+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4UCTRL_OFFSET					(0x005C)
+
+/* PDP, VID4UCTRL, VID4UVHALFSTR
+*/
+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_MASK		(0xC0000000)
+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_LSBMASK		(0x00000003)
+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_SHIFT		(30)
+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_LENGTH		(2)
+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH1STRIDE_OFFSET					(0x0060)
+
+/* PDP, GRPH1STRIDE, GRPH1STRIDE
+*/
+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_MASK		(0xFFC00000)
+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT		(22)
+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_LENGTH		(10)
+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2STRIDE_OFFSET					(0x0064)
+
+/* PDP, GRPH2STRIDE, GRPH2STRIDE
+*/
+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_MASK		(0xFFC00000)
+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_SHIFT		(22)
+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_LENGTH		(10)
+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3STRIDE_OFFSET					(0x0068)
+
+/* PDP, GRPH3STRIDE, GRPH3STRIDE
+*/
+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_MASK		(0xFFC00000)
+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_SHIFT		(22)
+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_LENGTH		(10)
+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4STRIDE_OFFSET					(0x006C)
+
+/* PDP, GRPH4STRIDE, GRPH4STRIDE
+*/
+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_MASK		(0xFFC00000)
+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_SHIFT		(22)
+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_LENGTH		(10)
+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1STRIDE_OFFSET					(0x0070)
+
+/* PDP, VID1STRIDE, VID1STRIDE
+*/
+#define ODN_PDP_VID1STRIDE_VID1STRIDE_MASK			(0xFFC00000)
+#define ODN_PDP_VID1STRIDE_VID1STRIDE_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID1STRIDE_VID1STRIDE_SHIFT			(22)
+#define ODN_PDP_VID1STRIDE_VID1STRIDE_LENGTH		(10)
+#define ODN_PDP_VID1STRIDE_VID1STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2STRIDE_OFFSET					(0x0074)
+
+/* PDP, VID2STRIDE, VID2STRIDE
+*/
+#define ODN_PDP_VID2STRIDE_VID2STRIDE_MASK			(0xFFC00000)
+#define ODN_PDP_VID2STRIDE_VID2STRIDE_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID2STRIDE_VID2STRIDE_SHIFT			(22)
+#define ODN_PDP_VID2STRIDE_VID2STRIDE_LENGTH		(10)
+#define ODN_PDP_VID2STRIDE_VID2STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3STRIDE_OFFSET					(0x0078)
+
+/* PDP, VID3STRIDE, VID3STRIDE
+*/
+#define ODN_PDP_VID3STRIDE_VID3STRIDE_MASK			(0xFFC00000)
+#define ODN_PDP_VID3STRIDE_VID3STRIDE_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID3STRIDE_VID3STRIDE_SHIFT			(22)
+#define ODN_PDP_VID3STRIDE_VID3STRIDE_LENGTH		(10)
+#define ODN_PDP_VID3STRIDE_VID3STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4STRIDE_OFFSET					(0x007C)
+
+/* PDP, VID4STRIDE, VID4STRIDE
+*/
+#define ODN_PDP_VID4STRIDE_VID4STRIDE_MASK			(0xFFC00000)
+#define ODN_PDP_VID4STRIDE_VID4STRIDE_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID4STRIDE_VID4STRIDE_SHIFT			(22)
+#define ODN_PDP_VID4STRIDE_VID4STRIDE_LENGTH		(10)
+#define ODN_PDP_VID4STRIDE_VID4STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1SIZE_OFFSET					(0x0080)
+
+/* PDP, GRPH1SIZE, GRPH1WIDTH
+*/
+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_MASK			(0x0FFF0000)
+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT			(16)
+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_LENGTH			(12)
+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1SIZE, GRPH1HEIGHT
+*/
+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_MASK			(0x00000FFF)
+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT			(0)
+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_LENGTH		(12)
+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH2SIZE_OFFSET					(0x0084)
+
+/* PDP, GRPH2SIZE, GRPH2WIDTH
+*/
+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_MASK			(0x0FFF0000)
+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_SHIFT			(16)
+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_LENGTH			(12)
+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2SIZE, GRPH2HEIGHT
+*/
+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_MASK			(0x00000FFF)
+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_SHIFT			(0)
+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_LENGTH		(12)
+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH3SIZE_OFFSET					(0x0088)
+
+/* PDP, GRPH3SIZE, GRPH3WIDTH
+*/
+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_MASK			(0x0FFF0000)
+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_SHIFT			(16)
+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_LENGTH			(12)
+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3SIZE, GRPH3HEIGHT
+*/
+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_MASK			(0x00000FFF)
+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_SHIFT			(0)
+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_LENGTH		(12)
+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH4SIZE_OFFSET					(0x008C)
+
+/* PDP, GRPH4SIZE, GRPH4WIDTH
+*/
+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_MASK			(0x0FFF0000)
+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_SHIFT			(16)
+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_LENGTH			(12)
+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4SIZE, GRPH4HEIGHT
+*/
+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_MASK			(0x00000FFF)
+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_SHIFT			(0)
+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_LENGTH		(12)
+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1SIZE_OFFSET						(0x0090)
+
+/* PDP, VID1SIZE, VID1WIDTH
+*/
+#define ODN_PDP_VID1SIZE_VID1WIDTH_MASK				(0x0FFF0000)
+#define ODN_PDP_VID1SIZE_VID1WIDTH_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID1SIZE_VID1WIDTH_SHIFT			(16)
+#define ODN_PDP_VID1SIZE_VID1WIDTH_LENGTH			(12)
+#define ODN_PDP_VID1SIZE_VID1WIDTH_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID1SIZE, VID1HEIGHT
+*/
+#define ODN_PDP_VID1SIZE_VID1HEIGHT_MASK			(0x00000FFF)
+#define ODN_PDP_VID1SIZE_VID1HEIGHT_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID1SIZE_VID1HEIGHT_SHIFT			(0)
+#define ODN_PDP_VID1SIZE_VID1HEIGHT_LENGTH			(12)
+#define ODN_PDP_VID1SIZE_VID1HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2SIZE_OFFSET						(0x0094)
+
+/* PDP, VID2SIZE, VID2WIDTH
+*/
+#define ODN_PDP_VID2SIZE_VID2WIDTH_MASK				(0x0FFF0000)
+#define ODN_PDP_VID2SIZE_VID2WIDTH_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID2SIZE_VID2WIDTH_SHIFT			(16)
+#define ODN_PDP_VID2SIZE_VID2WIDTH_LENGTH			(12)
+#define ODN_PDP_VID2SIZE_VID2WIDTH_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID2SIZE, VID2HEIGHT
+*/
+#define ODN_PDP_VID2SIZE_VID2HEIGHT_MASK			(0x00000FFF)
+#define ODN_PDP_VID2SIZE_VID2HEIGHT_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID2SIZE_VID2HEIGHT_SHIFT			(0)
+#define ODN_PDP_VID2SIZE_VID2HEIGHT_LENGTH			(12)
+#define ODN_PDP_VID2SIZE_VID2HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3SIZE_OFFSET						(0x0098)
+
+/* PDP, VID3SIZE, VID3WIDTH
+*/
+#define ODN_PDP_VID3SIZE_VID3WIDTH_MASK				(0x0FFF0000)
+#define ODN_PDP_VID3SIZE_VID3WIDTH_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID3SIZE_VID3WIDTH_SHIFT			(16)
+#define ODN_PDP_VID3SIZE_VID3WIDTH_LENGTH			(12)
+#define ODN_PDP_VID3SIZE_VID3WIDTH_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID3SIZE, VID3HEIGHT
+*/
+#define ODN_PDP_VID3SIZE_VID3HEIGHT_MASK			(0x00000FFF)
+#define ODN_PDP_VID3SIZE_VID3HEIGHT_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID3SIZE_VID3HEIGHT_SHIFT			(0)
+#define ODN_PDP_VID3SIZE_VID3HEIGHT_LENGTH			(12)
+#define ODN_PDP_VID3SIZE_VID3HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4SIZE_OFFSET						(0x009C)
+
+/* PDP, VID4SIZE, VID4WIDTH
+*/
+#define ODN_PDP_VID4SIZE_VID4WIDTH_MASK				(0x0FFF0000)
+#define ODN_PDP_VID4SIZE_VID4WIDTH_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID4SIZE_VID4WIDTH_SHIFT			(16)
+#define ODN_PDP_VID4SIZE_VID4WIDTH_LENGTH			(12)
+#define ODN_PDP_VID4SIZE_VID4WIDTH_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID4SIZE, VID4HEIGHT
+*/
+#define ODN_PDP_VID4SIZE_VID4HEIGHT_MASK			(0x00000FFF)
+#define ODN_PDP_VID4SIZE_VID4HEIGHT_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID4SIZE_VID4HEIGHT_SHIFT			(0)
+#define ODN_PDP_VID4SIZE_VID4HEIGHT_LENGTH			(12)
+#define ODN_PDP_VID4SIZE_VID4HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1POSN_OFFSET					(0x00A0)
+
+/* PDP, GRPH1POSN, GRPH1XSTART
+*/
+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_MASK			(0x0FFF0000)
+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_SHIFT			(16)
+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_LENGTH		(12)
+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1POSN, GRPH1YSTART
+*/
+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_MASK			(0x00000FFF)
+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_SHIFT			(0)
+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_LENGTH		(12)
+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH2POSN_OFFSET					(0x00A4)
+
+/* PDP, GRPH2POSN, GRPH2XSTART
+*/
+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_MASK			(0x0FFF0000)
+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_SHIFT			(16)
+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_LENGTH		(12)
+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2POSN, GRPH2YSTART
+*/
+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_MASK			(0x00000FFF)
+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_SHIFT			(0)
+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_LENGTH		(12)
+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH3POSN_OFFSET					(0x00A8)
+
+/* PDP, GRPH3POSN, GRPH3XSTART
+*/
+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_MASK			(0x0FFF0000)
+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_SHIFT			(16)
+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_LENGTH		(12)
+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3POSN, GRPH3YSTART
+*/
+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_MASK			(0x00000FFF)
+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_SHIFT			(0)
+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_LENGTH		(12)
+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH4POSN_OFFSET					(0x00AC)
+
+/* PDP, GRPH4POSN, GRPH4XSTART
+*/
+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_MASK			(0x0FFF0000)
+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_SHIFT			(16)
+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_LENGTH		(12)
+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4POSN, GRPH4YSTART
+*/
+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_MASK			(0x00000FFF)
+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_SHIFT			(0)
+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_LENGTH		(12)
+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1POSN_OFFSET						(0x00B0)
+
+/* PDP, VID1POSN, VID1XSTART
+*/
+#define ODN_PDP_VID1POSN_VID1XSTART_MASK			(0x0FFF0000)
+#define ODN_PDP_VID1POSN_VID1XSTART_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID1POSN_VID1XSTART_SHIFT			(16)
+#define ODN_PDP_VID1POSN_VID1XSTART_LENGTH			(12)
+#define ODN_PDP_VID1POSN_VID1XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1POSN, VID1YSTART
+*/
+#define ODN_PDP_VID1POSN_VID1YSTART_MASK			(0x00000FFF)
+#define ODN_PDP_VID1POSN_VID1YSTART_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID1POSN_VID1YSTART_SHIFT			(0)
+#define ODN_PDP_VID1POSN_VID1YSTART_LENGTH			(12)
+#define ODN_PDP_VID1POSN_VID1YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2POSN_OFFSET						(0x00B4)
+
+/* PDP, VID2POSN, VID2XSTART
+*/
+#define ODN_PDP_VID2POSN_VID2XSTART_MASK			(0x0FFF0000)
+#define ODN_PDP_VID2POSN_VID2XSTART_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID2POSN_VID2XSTART_SHIFT			(16)
+#define ODN_PDP_VID2POSN_VID2XSTART_LENGTH			(12)
+#define ODN_PDP_VID2POSN_VID2XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2POSN, VID2YSTART
+*/
+#define ODN_PDP_VID2POSN_VID2YSTART_MASK			(0x00000FFF)
+#define ODN_PDP_VID2POSN_VID2YSTART_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID2POSN_VID2YSTART_SHIFT			(0)
+#define ODN_PDP_VID2POSN_VID2YSTART_LENGTH			(12)
+#define ODN_PDP_VID2POSN_VID2YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3POSN_OFFSET						(0x00B8)
+
+/* PDP, VID3POSN, VID3XSTART
+*/
+#define ODN_PDP_VID3POSN_VID3XSTART_MASK			(0x0FFF0000)
+#define ODN_PDP_VID3POSN_VID3XSTART_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID3POSN_VID3XSTART_SHIFT			(16)
+#define ODN_PDP_VID3POSN_VID3XSTART_LENGTH			(12)
+#define ODN_PDP_VID3POSN_VID3XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3POSN, VID3YSTART
+*/
+#define ODN_PDP_VID3POSN_VID3YSTART_MASK			(0x00000FFF)
+#define ODN_PDP_VID3POSN_VID3YSTART_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID3POSN_VID3YSTART_SHIFT			(0)
+#define ODN_PDP_VID3POSN_VID3YSTART_LENGTH			(12)
+#define ODN_PDP_VID3POSN_VID3YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4POSN_OFFSET						(0x00BC)
+
+/* PDP, VID4POSN, VID4XSTART
+*/
+#define ODN_PDP_VID4POSN_VID4XSTART_MASK			(0x0FFF0000)
+#define ODN_PDP_VID4POSN_VID4XSTART_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID4POSN_VID4XSTART_SHIFT			(16)
+#define ODN_PDP_VID4POSN_VID4XSTART_LENGTH			(12)
+#define ODN_PDP_VID4POSN_VID4XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4POSN, VID4YSTART
+*/
+#define ODN_PDP_VID4POSN_VID4YSTART_MASK			(0x00000FFF)
+#define ODN_PDP_VID4POSN_VID4YSTART_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID4POSN_VID4YSTART_SHIFT			(0)
+#define ODN_PDP_VID4POSN_VID4YSTART_LENGTH			(12)
+#define ODN_PDP_VID4POSN_VID4YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1GALPHA_OFFSET					(0x00C0)
+
+/* PDP, GRPH1GALPHA, GRPH1GALPHA
+*/
+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_MASK		(0x000003FF)
+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_SHIFT		(0)
+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_LENGTH		(10)
+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2GALPHA_OFFSET					(0x00C4)
+
+/* PDP, GRPH2GALPHA, GRPH2GALPHA
+*/
+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_MASK		(0x000003FF)
+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_SHIFT		(0)
+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_LENGTH		(10)
+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3GALPHA_OFFSET					(0x00C8)
+
+/* PDP, GRPH3GALPHA, GRPH3GALPHA
+*/
+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_MASK		(0x000003FF)
+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_SHIFT		(0)
+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_LENGTH		(10)
+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4GALPHA_OFFSET					(0x00CC)
+
+/* PDP, GRPH4GALPHA, GRPH4GALPHA
+*/
+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_MASK		(0x000003FF)
+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_SHIFT		(0)
+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_LENGTH		(10)
+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1GALPHA_OFFSET					(0x00D0)
+
+/* PDP, VID1GALPHA, VID1GALPHA
+*/
+#define ODN_PDP_VID1GALPHA_VID1GALPHA_MASK			(0x000003FF)
+#define ODN_PDP_VID1GALPHA_VID1GALPHA_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID1GALPHA_VID1GALPHA_SHIFT			(0)
+#define ODN_PDP_VID1GALPHA_VID1GALPHA_LENGTH		(10)
+#define ODN_PDP_VID1GALPHA_VID1GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2GALPHA_OFFSET					(0x00D4)
+
+/* PDP, VID2GALPHA, VID2GALPHA
+*/
+#define ODN_PDP_VID2GALPHA_VID2GALPHA_MASK			(0x000003FF)
+#define ODN_PDP_VID2GALPHA_VID2GALPHA_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID2GALPHA_VID2GALPHA_SHIFT			(0)
+#define ODN_PDP_VID2GALPHA_VID2GALPHA_LENGTH		(10)
+#define ODN_PDP_VID2GALPHA_VID2GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3GALPHA_OFFSET					(0x00D8)
+
+/* PDP, VID3GALPHA, VID3GALPHA
+*/
+#define ODN_PDP_VID3GALPHA_VID3GALPHA_MASK			(0x000003FF)
+#define ODN_PDP_VID3GALPHA_VID3GALPHA_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID3GALPHA_VID3GALPHA_SHIFT			(0)
+#define ODN_PDP_VID3GALPHA_VID3GALPHA_LENGTH		(10)
+#define ODN_PDP_VID3GALPHA_VID3GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4GALPHA_OFFSET				    (0x00DC)
+
+/* PDP, VID4GALPHA, VID4GALPHA
+*/
+#define ODN_PDP_VID4GALPHA_VID4GALPHA_MASK			(0x000003FF)
+#define ODN_PDP_VID4GALPHA_VID4GALPHA_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID4GALPHA_VID4GALPHA_SHIFT			(0)
+#define ODN_PDP_VID4GALPHA_VID4GALPHA_LENGTH		(10)
+#define ODN_PDP_VID4GALPHA_VID4GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1CKEY_R_OFFSET					(0x00E0)
+
+/* PDP, GRPH1CKEY_R, GRPH1CKEY_R
+*/
+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_MASK		(0x000003FF)
+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_SHIFT		(0)
+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_LENGTH		(10)
+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH1CKEY_GB_OFFSET			        (0x00E4)
+
+/* PDP, GRPH1CKEY_GB, GRPH1CKEY_G
+*/
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_MASK		(0x03FF0000)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LSBMASK	(0x000003FF)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SHIFT		(16)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LENGTH		(10)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH1CKEY_GB, GRPH1CKEY_B
+*/
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_MASK		(0x000003FF)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LSBMASK	(0x000003FF)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SHIFT		(0)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LENGTH		(10)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2CKEY_R_OFFSET					(0x00E8)
+
+/* PDP, GRPH2CKEY_R, GRPH2CKEY_R
+*/
+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_MASK		(0x000003FF)
+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_SHIFT		(0)
+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_LENGTH		(10)
+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2CKEY_GB_OFFSET					(0x00EC)
+
+/* PDP, GRPH2CKEY_GB, GRPH2CKEY_G
+*/
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_MASK		(0x03FF0000)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LSBMASK	(0x000003FF)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SHIFT		(16)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LENGTH		(10)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH2CKEY_GB, GRPH2CKEY_B
+*/
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_MASK		(0x000003FF)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LSBMASK	(0x000003FF)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SHIFT		(0)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LENGTH		(10)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3CKEY_R_OFFSET					(0x00F0)
+
+/* PDP, GRPH3CKEY_R, GRPH3CKEY_R
+*/
+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_MASK		(0x000003FF)
+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_SHIFT		(0)
+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_LENGTH		(10)
+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3CKEY_GB_OFFSET					(0x00F4)
+
+/* PDP, GRPH3CKEY_GB, GRPH3CKEY_G
+*/
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_MASK		(0x03FF0000)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LSBMASK	(0x000003FF)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SHIFT		(16)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LENGTH		(10)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH3CKEY_GB, GRPH3CKEY_B
+*/
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_MASK		(0x000003FF)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LSBMASK	(0x000003FF)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SHIFT		(0)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LENGTH		(10)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4CKEY_R_OFFSET					(0x00F8)
+
+/* PDP, GRPH4CKEY_R, GRPH4CKEY_R
+*/
+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_MASK		(0x000003FF)
+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_SHIFT		(0)
+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_LENGTH		(10)
+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4CKEY_GB_OFFSET					(0x00FC)
+
+/* PDP, GRPH4CKEY_GB, GRPH4CKEY_G
+*/
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_MASK		(0x03FF0000)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LSBMASK	(0x000003FF)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SHIFT		(16)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LENGTH		(10)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH4CKEY_GB, GRPH4CKEY_B
+*/
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_MASK		(0x000003FF)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LSBMASK	(0x000003FF)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SHIFT		(0)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LENGTH		(10)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1CKEY_R_OFFSET					(0x0100)
+
+/* PDP, VID1CKEY_R, VID1CKEY_R
+*/
+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_MASK			(0x000003FF)
+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_SHIFT			(0)
+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_LENGTH		(10)
+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1CKEY_GB_OFFSET					(0x0104)
+
+/* PDP, VID1CKEY_GB, VID1CKEY_G
+*/
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_MASK			(0x03FF0000)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_SHIFT		(16)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_LENGTH		(10)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CKEY_GB, VID1CKEY_B
+*/
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_MASK			(0x000003FF)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_SHIFT		(0)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_LENGTH		(10)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2CKEY_R_OFFSET					(0x0108)
+
+/* PDP, VID2CKEY_R, VID2CKEY_R
+*/
+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_MASK			(0x000003FF)
+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_SHIFT			(0)
+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_LENGTH		(10)
+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2CKEY_GB_OFFSET					(0x010C)
+
+/* PDP, VID2CKEY_GB, VID2CKEY_G
+*/
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_MASK			(0x03FF0000)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_SHIFT		(16)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_LENGTH		(10)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CKEY_GB, VID2CKEY_B
+*/
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_MASK			(0x000003FF)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_SHIFT		(0)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_LENGTH		(10)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3CKEY_R_OFFSET					(0x0110)
+
+/* PDP, VID3CKEY_R, VID3CKEY_R
+*/
+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_MASK			(0x000003FF)
+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_SHIFT			(0)
+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_LENGTH		(10)
+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3CKEY_GB_OFFSET					(0x0114)
+
+/* PDP, VID3CKEY_GB, VID3CKEY_G
+*/
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_MASK			(0x03FF0000)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_SHIFT		(16)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_LENGTH		(10)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CKEY_GB, VID3CKEY_B
+*/
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_MASK			(0x000003FF)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_SHIFT		(0)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_LENGTH		(10)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4CKEY_R_OFFSET					(0x0118)
+
+/* PDP, VID4CKEY_R, VID4CKEY_R
+*/
+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_MASK			(0x000003FF)
+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_SHIFT			(0)
+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_LENGTH		(10)
+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4CKEY_GB_OFFSET					(0x011C)
+
+/* PDP, VID4CKEY_GB, VID4CKEY_G
+*/
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_MASK			(0x03FF0000)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_SHIFT		(16)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_LENGTH		(10)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CKEY_GB, VID4CKEY_B
+*/
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_MASK			(0x000003FF)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_SHIFT		(0)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_LENGTH		(10)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1BLND2_R_OFFSET					(0x0120)
+
+/* PDP, GRPH1BLND2_R, GRPH1PIXDBL
+*/
+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_MASK		(0x80000000)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_SHIFT		(31)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_LENGTH		(1)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH1BLND2_R, GRPH1LINDBL
+*/
+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_MASK		(0x20000000)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_SHIFT		(29)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_LENGTH		(1)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH1BLND2_R, GRPH1CKEYMASK_R
+*/
+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_MASK	(0x000003FF)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SHIFT	(0)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LENGTH	(10)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH1BLND2_GB_OFFSET				(0x0124)
+
+/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_G
+*/
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_MASK	(0x03FF0000)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SHIFT	(16)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LENGTH (10)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_B
+*/
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_MASK	(0x000003FF)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SHIFT	(0)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LENGTH (10)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2BLND2_R_OFFSET					(0x0128)
+
+/* PDP, GRPH2BLND2_R, GRPH2PIXDBL
+*/
+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_MASK		(0x80000000)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_SHIFT		(31)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_LENGTH		(1)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH2BLND2_R, GRPH2LINDBL
+*/
+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_MASK		(0x20000000)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_SHIFT		(29)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_LENGTH		(1)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH2BLND2_R, GRPH2CKEYMASK_R
+*/
+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_MASK	(0x000003FF)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SHIFT	(0)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LENGTH	(10)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2BLND2_GB_OFFSET				(0x012C)
+
+/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_G
+*/
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_MASK	(0x03FF0000)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SHIFT	(16)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LENGTH (10)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_B
+*/
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_MASK	(0x000003FF)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SHIFT	(0)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LENGTH (10)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3BLND2_R_OFFSET					(0x0130)
+
+/* PDP, GRPH3BLND2_R, GRPH3PIXDBL
+*/
+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_MASK		(0x80000000)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_SHIFT		(31)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_LENGTH		(1)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH3BLND2_R, GRPH3LINDBL
+*/
+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_MASK		(0x20000000)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_SHIFT		(29)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_LENGTH		(1)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH3BLND2_R, GRPH3CKEYMASK_R
+*/
+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_MASK	(0x000003FF)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SHIFT	(0)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LENGTH	(10)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3BLND2_GB_OFFSET				(0x0134)
+
+/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_G
+*/
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_MASK	(0x03FF0000)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SHIFT	(16)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LENGTH (10)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_B
+*/
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_MASK	(0x000003FF)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SHIFT	(0)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LENGTH (10)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4BLND2_R_OFFSET					(0x0138)
+
+/* PDP, GRPH4BLND2_R, GRPH4PIXDBL
+*/
+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_MASK		(0x80000000)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_SHIFT		(31)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_LENGTH		(1)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH4BLND2_R, GRPH4LINDBL
+*/
+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_MASK		(0x20000000)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_SHIFT		(29)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_LENGTH		(1)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH4BLND2_R, GRPH4CKEYMASK_R
+*/
+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_MASK	(0x000003FF)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SHIFT	(0)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LENGTH	(10)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4BLND2_GB_OFFSET				(0x013C)
+
+/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_G
+*/
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_MASK	(0x03FF0000)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SHIFT	(16)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LENGTH (10)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_B
+*/
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_MASK	(0x000003FF)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SHIFT	(0)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LENGTH (10)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1BLND2_R_OFFSET					(0x0140)
+
+/* PDP, VID1BLND2_R, VID1CKEYMASK_R
+*/
+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_MASK		(0x000003FF)
+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_SHIFT	(0)
+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_LENGTH	(10)
+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1BLND2_GB_OFFSET					(0x0144)
+
+/* PDP, VID1BLND2_GB, VID1CKEYMASK_G
+*/
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_MASK	(0x03FF0000)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_SHIFT	(16)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_LENGTH	(10)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID1BLND2_GB, VID1CKEYMASK_B
+*/
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_MASK	(0x000003FF)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_SHIFT	(0)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_LENGTH	(10)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2BLND2_R_OFFSET		            (0x0148)
+
+/* PDP, VID2BLND2_R, VID2CKEYMASK_R
+*/
+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_MASK		(0x000003FF)
+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_SHIFT	(0)
+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_LENGTH	(10)
+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2BLND2_GB_OFFSET		            (0x014C)
+
+/* PDP, VID2BLND2_GB, VID2CKEYMASK_G
+*/
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_MASK	(0x03FF0000)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_SHIFT	(16)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_LENGTH	(10)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID2BLND2_GB, VID2CKEYMASK_B
+*/
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_MASK	(0x000003FF)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_SHIFT	(0)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_LENGTH	(10)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3BLND2_R_OFFSET		            (0x0150)
+
+/* PDP, VID3BLND2_R, VID3CKEYMASK_R
+*/
+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_MASK		(0x000003FF)
+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_SHIFT	(0)
+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_LENGTH	(10)
+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3BLND2_GB_OFFSET		            (0x0154)
+
+/* PDP, VID3BLND2_GB, VID3CKEYMASK_G
+*/
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_MASK	(0x03FF0000)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_SHIFT	(16)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_LENGTH	(10)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID3BLND2_GB, VID3CKEYMASK_B
+*/
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_MASK	(0x000003FF)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_SHIFT	(0)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_LENGTH	(10)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4BLND2_R_OFFSET		            (0x0158)
+
+/* PDP, VID4BLND2_R, VID4CKEYMASK_R
+*/
+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_MASK		(0x000003FF)
+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_SHIFT	(0)
+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_LENGTH	(10)
+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4BLND2_GB_OFFSET		            (0x015C)
+
+/* PDP, VID4BLND2_GB, VID4CKEYMASK_G
+*/
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_MASK	(0x03FF0000)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_SHIFT	(16)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_LENGTH	(10)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID4BLND2_GB, VID4CKEYMASK_B
+*/
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_MASK	(0x000003FF)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_SHIFT	(0)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_LENGTH	(10)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_OFFSET		    (0x0160)
+
+/* PDP, GRPH1INTERLEAVE_CTRL, GRPH1INTFIELD
+*/
+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_MASK	(0x00000001)
+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SHIFT (0)
+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LENGTH (1)
+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_OFFSET		    (0x0164)
+
+/* PDP, GRPH2INTERLEAVE_CTRL, GRPH2INTFIELD
+*/
+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_MASK (0x00000001)
+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SHIFT (0)
+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LENGTH (1)
+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_OFFSET		    (0x0168)
+
+/* PDP, GRPH3INTERLEAVE_CTRL, GRPH3INTFIELD
+*/
+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_MASK (0x00000001)
+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SHIFT (0)
+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LENGTH (1)
+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_OFFSET		    (0x016C)
+
+/* PDP, GRPH4INTERLEAVE_CTRL, GRPH4INTFIELD
+*/
+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_MASK (0x00000001)
+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SHIFT (0)
+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LENGTH (1)
+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1INTERLEAVE_CTRL_OFFSET		    (0x0170)
+
+/* PDP, VID1INTERLEAVE_CTRL, VID1INTFIELD
+*/
+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_MASK (0x00000001)
+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SHIFT (0)
+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LENGTH (1)
+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2INTERLEAVE_CTRL_OFFSET		    (0x0174)
+
+/* PDP, VID2INTERLEAVE_CTRL, VID2INTFIELD
+*/
+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_MASK (0x00000001)
+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SHIFT (0)
+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LENGTH (1)
+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3INTERLEAVE_CTRL_OFFSET		    (0x0178)
+
+/* PDP, VID3INTERLEAVE_CTRL, VID3INTFIELD
+*/
+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_MASK (0x00000001)
+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SHIFT (0)
+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LENGTH (1)
+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4INTERLEAVE_CTRL_OFFSET		    (0x017C)
+
+/* PDP, VID4INTERLEAVE_CTRL, VID4INTFIELD
+*/
+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_MASK (0x00000001)
+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SHIFT (0)
+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LENGTH (1)
+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH1BASEADDR_OFFSET		        (0x0180)
+
+/* PDP, GRPH1BASEADDR, GRPH1BASEADDR
+*/
+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_MASK	(0xFFFFFFE0)
+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_LSBMASK	(0x07FFFFFF)
+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_SHIFT	(5)
+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_LENGTH	(27)
+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2BASEADDR_OFFSET		        (0x0184)
+
+/* PDP, GRPH2BASEADDR, GRPH2BASEADDR
+*/
+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_MASK	(0xFFFFFFE0)
+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_LSBMASK	(0x07FFFFFF)
+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_SHIFT	(5)
+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_LENGTH	(27)
+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3BASEADDR_OFFSET		        (0x0188)
+
+/* PDP, GRPH3BASEADDR, GRPH3BASEADDR
+*/
+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_MASK	(0xFFFFFFE0)
+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_LSBMASK	(0x07FFFFFF)
+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_SHIFT	(5)
+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_LENGTH	(27)
+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4BASEADDR_OFFSET		        (0x018C)
+
+/* PDP, GRPH4BASEADDR, GRPH4BASEADDR
+*/
+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_MASK	(0xFFFFFFE0)
+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_LSBMASK	(0x07FFFFFF)
+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_SHIFT	(5)
+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_LENGTH	(27)
+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1BASEADDR_OFFSET		            (0x0190)
+
+/* PDP, VID1BASEADDR, VID1BASEADDR
+*/
+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_LSBMASK	(0x07FFFFFF)
+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_SHIFT		(5)
+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_LENGTH	(27)
+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2BASEADDR_OFFSET		            (0x0194)
+
+/* PDP, VID2BASEADDR, VID2BASEADDR
+*/
+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_LSBMASK	(0x07FFFFFF)
+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_SHIFT		(5)
+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_LENGTH	(27)
+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3BASEADDR_OFFSET		            (0x0198)
+
+/* PDP, VID3BASEADDR, VID3BASEADDR
+*/
+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_LSBMASK	(0x07FFFFFF)
+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_SHIFT		(5)
+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_LENGTH	(27)
+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4BASEADDR_OFFSET		            (0x019C)
+
+/* PDP, VID4BASEADDR, VID4BASEADDR
+*/
+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_LSBMASK	(0x07FFFFFF)
+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_SHIFT		(5)
+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_LENGTH	(27)
+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1UBASEADDR_OFFSET				(0x01B0)
+
+/* PDP, VID1UBASEADDR, VID1UBASEADDR
+*/
+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_MASK	(0xFFFFFFE0)
+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LSBMASK	(0x07FFFFFF)
+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_SHIFT	(5)
+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH	(27)
+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2UBASEADDR_OFFSET		        (0x01B4)
+
+/* PDP, VID2UBASEADDR, VID2UBASEADDR
+*/
+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_LSBMASK		(0x07FFFFFF)
+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_SHIFT		(5)
+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_LENGTH		(27)
+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3UBASEADDR_OFFSET		        (0x01B8)
+
+/* PDP, VID3UBASEADDR, VID3UBASEADDR
+*/
+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_LSBMASK		(0x07FFFFFF)
+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_SHIFT		(5)
+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_LENGTH		(27)
+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4UBASEADDR_OFFSET		        (0x01BC)
+
+/* PDP, VID4UBASEADDR, VID4UBASEADDR
+*/
+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_LSBMASK		(0x07FFFFFF)
+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_SHIFT		(5)
+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_LENGTH		(27)
+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VBASEADDR_OFFSET		        (0x01D0)
+
+/* PDP, VID1VBASEADDR, VID1VBASEADDR
+*/
+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LSBMASK		(0x07FFFFFF)
+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_SHIFT		(5)
+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH		(27)
+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VBASEADDR_OFFSET		        (0x01D4)
+
+/* PDP, VID2VBASEADDR, VID2VBASEADDR
+*/
+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_LSBMASK		(0x07FFFFFF)
+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_SHIFT		(5)
+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_LENGTH		(27)
+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VBASEADDR_OFFSET		        (0x01D8)
+
+/* PDP, VID3VBASEADDR, VID3VBASEADDR
+*/
+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_LSBMASK		(0x07FFFFFF)
+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_SHIFT		(5)
+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_LENGTH		(27)
+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VBASEADDR_OFFSET		(0x01DC)
+
+/* PDP, VID4VBASEADDR, VID4VBASEADDR
+*/
+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_LSBMASK		(0x07FFFFFF)
+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_SHIFT		(5)
+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_LENGTH		(27)
+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1POSTSKIPCTRL_OFFSET		(0x0230)
+
+/* PDP, VID1POSTSKIPCTRL, VID1HPOSTCLIP
+*/
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_MASK		(0x007F0000)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SHIFT		(16)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LENGTH		(7)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1POSTSKIPCTRL, VID1VPOSTCLIP
+*/
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_MASK		(0x0000003F)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LSBMASK		(0x0000003F)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SHIFT		(0)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LENGTH		(6)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2POSTSKIPCTRL_OFFSET		(0x0234)
+
+/* PDP, VID2POSTSKIPCTRL, VID2HPOSTCLIP
+*/
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_MASK		(0x007F0000)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SHIFT		(16)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LENGTH		(7)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2POSTSKIPCTRL, VID2VPOSTCLIP
+*/
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_MASK		(0x0000003F)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LSBMASK		(0x0000003F)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SHIFT		(0)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LENGTH		(6)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3POSTSKIPCTRL_OFFSET		(0x0238)
+
+/* PDP, VID3POSTSKIPCTRL, VID3HPOSTCLIP
+*/
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_MASK		(0x007F0000)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SHIFT		(16)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LENGTH		(7)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3POSTSKIPCTRL, VID3VPOSTCLIP
+*/
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_MASK		(0x0000003F)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LSBMASK		(0x0000003F)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SHIFT		(0)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LENGTH		(6)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4POSTSKIPCTRL_OFFSET		(0x023C)
+
+/* PDP, VID4POSTSKIPCTRL, VID4HPOSTCLIP
+*/
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_MASK		(0x007F0000)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SHIFT		(16)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LENGTH		(7)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4POSTSKIPCTRL, VID4VPOSTCLIP
+*/
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_MASK		(0x0000003F)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LSBMASK		(0x0000003F)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SHIFT		(0)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LENGTH		(6)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1DECIMATE_CTRL_OFFSET		(0x0240)
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_EN
+*/
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_MASK		(0x00000001)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SHIFT		(0)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LENGTH		(1)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH2DECIMATE_CTRL_OFFSET		(0x0244)
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_EN
+*/
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_MASK		(0x00000001)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SHIFT		(0)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LENGTH		(1)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH3DECIMATE_CTRL_OFFSET		(0x0248)
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_EN
+*/
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_MASK		(0x00000001)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SHIFT		(0)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LENGTH		(1)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH4DECIMATE_CTRL_OFFSET		(0x024C)
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_EN
+*/
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_MASK		(0x00000001)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SHIFT		(0)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LENGTH		(1)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1DECIMATE_CTRL_OFFSET		(0x0250)
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_EN
+*/
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_MASK		(0x00000001)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SHIFT		(0)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LENGTH		(1)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2DECIMATE_CTRL_OFFSET		(0x0254)
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_EN
+*/
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_MASK		(0x00000001)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SHIFT		(0)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LENGTH		(1)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3DECIMATE_CTRL_OFFSET		(0x0258)
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_EN
+*/
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_MASK		(0x00000001)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SHIFT		(0)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LENGTH		(1)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4DECIMATE_CTRL_OFFSET		(0x025C)
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_EN
+*/
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_MASK		(0x00000001)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SHIFT		(0)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LENGTH		(1)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1SKIPCTRL_OFFSET		(0x0270)
+
+/* PDP, VID1SKIPCTRL, VID1HSKIP
+*/
+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_MASK		(0x0FFF0000)
+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_SHIFT		(16)
+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_LENGTH		(12)
+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SKIPCTRL, VID1VSKIP
+*/
+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_MASK		(0x00000FFF)
+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_SHIFT		(0)
+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_LENGTH		(12)
+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2SKIPCTRL_OFFSET		(0x0274)
+
+/* PDP, VID2SKIPCTRL, VID2HSKIP
+*/
+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_MASK		(0x0FFF0000)
+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_SHIFT		(16)
+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_LENGTH		(12)
+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SKIPCTRL, VID2VSKIP
+*/
+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_MASK		(0x00000FFF)
+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_SHIFT		(0)
+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_LENGTH		(12)
+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3SKIPCTRL_OFFSET		(0x0278)
+
+/* PDP, VID3SKIPCTRL, VID3HSKIP
+*/
+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_MASK		(0x0FFF0000)
+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_SHIFT		(16)
+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_LENGTH		(12)
+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SKIPCTRL, VID3VSKIP
+*/
+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_MASK		(0x00000FFF)
+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_SHIFT		(0)
+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_LENGTH		(12)
+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4SKIPCTRL_OFFSET		(0x027C)
+
+/* PDP, VID4SKIPCTRL, VID4HSKIP
+*/
+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_MASK		(0x0FFF0000)
+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_SHIFT		(16)
+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_LENGTH		(12)
+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SKIPCTRL, VID4VSKIP
+*/
+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_MASK		(0x00000FFF)
+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_SHIFT		(0)
+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_LENGTH		(12)
+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1SCALECTRL_OFFSET		(0x0460)
+
+/* PDP, VID1SCALECTRL, VID1HSCALEBP
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_MASK		(0x80000000)
+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_SHIFT		(31)
+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_LENGTH		(1)
+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VSCALEBP
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_MASK		(0x40000000)
+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_SHIFT		(30)
+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_LENGTH		(1)
+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1HSBEFOREVS
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_MASK		(0x20000000)
+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_SHIFT		(29)
+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_LENGTH		(1)
+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VSURUNCTRL
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_MASK		(0x08000000)
+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_SHIFT		(27)
+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_LENGTH		(1)
+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1PAN_EN
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_MASK		(0x00040000)
+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_SHIFT		(18)
+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_LENGTH		(1)
+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VORDER
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_MASK		(0x00030000)
+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_LSBMASK		(0x00000003)
+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_SHIFT		(16)
+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_LENGTH		(2)
+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VPITCH
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_MASK		(0x0000FFFF)
+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_SHIFT		(0)
+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_LENGTH		(16)
+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VSINIT_OFFSET		(0x0464)
+
+/* PDP, VID1VSINIT, VID1VINITIAL1
+*/
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_MASK		(0xFFFF0000)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_SHIFT		(16)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_LENGTH		(16)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1VSINIT, VID1VINITIAL0
+*/
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_MASK		(0x0000FFFF)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_SHIFT		(0)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_LENGTH		(16)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF0_OFFSET		(0x0468)
+
+/* PDP, VID1VCOEFF0, VID1VCOEFF0
+*/
+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_SHIFT		(0)
+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_LENGTH		(32)
+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF1_OFFSET		(0x046C)
+
+/* PDP, VID1VCOEFF1, VID1VCOEFF1
+*/
+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_SHIFT		(0)
+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_LENGTH		(32)
+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF2_OFFSET		(0x0470)
+
+/* PDP, VID1VCOEFF2, VID1VCOEFF2
+*/
+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_SHIFT		(0)
+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_LENGTH		(32)
+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF3_OFFSET		(0x0474)
+
+/* PDP, VID1VCOEFF3, VID1VCOEFF3
+*/
+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_SHIFT		(0)
+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_LENGTH		(32)
+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF4_OFFSET		(0x0478)
+
+/* PDP, VID1VCOEFF4, VID1VCOEFF4
+*/
+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_SHIFT		(0)
+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_LENGTH		(32)
+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF5_OFFSET		(0x047C)
+
+/* PDP, VID1VCOEFF5, VID1VCOEFF5
+*/
+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_SHIFT		(0)
+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_LENGTH		(32)
+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF6_OFFSET		(0x0480)
+
+/* PDP, VID1VCOEFF6, VID1VCOEFF6
+*/
+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_SHIFT		(0)
+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_LENGTH		(32)
+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF7_OFFSET		(0x0484)
+
+/* PDP, VID1VCOEFF7, VID1VCOEFF7
+*/
+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_SHIFT		(0)
+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_LENGTH		(32)
+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF8_OFFSET		(0x0488)
+
+/* PDP, VID1VCOEFF8, VID1VCOEFF8
+*/
+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_MASK		(0x000000FF)
+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_SHIFT		(0)
+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_LENGTH		(8)
+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HSINIT_OFFSET		(0x048C)
+
+/* PDP, VID1HSINIT, VID1HINITIAL
+*/
+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_MASK		(0xFFFF0000)
+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_SHIFT		(16)
+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_LENGTH		(16)
+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1HSINIT, VID1HPITCH
+*/
+#define ODN_PDP_VID1HSINIT_VID1HPITCH_MASK		(0x0000FFFF)
+#define ODN_PDP_VID1HSINIT_VID1HPITCH_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID1HSINIT_VID1HPITCH_SHIFT		(0)
+#define ODN_PDP_VID1HSINIT_VID1HPITCH_LENGTH		(16)
+#define ODN_PDP_VID1HSINIT_VID1HPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF0_OFFSET		(0x0490)
+
+/* PDP, VID1HCOEFF0, VID1HCOEFF0
+*/
+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF1_OFFSET		(0x0494)
+
+/* PDP, VID1HCOEFF1, VID1HCOEFF1
+*/
+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF2_OFFSET		(0x0498)
+
+/* PDP, VID1HCOEFF2, VID1HCOEFF2
+*/
+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF3_OFFSET		(0x049C)
+
+/* PDP, VID1HCOEFF3, VID1HCOEFF3
+*/
+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF4_OFFSET		(0x04A0)
+
+/* PDP, VID1HCOEFF4, VID1HCOEFF4
+*/
+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF5_OFFSET		(0x04A4)
+
+/* PDP, VID1HCOEFF5, VID1HCOEFF5
+*/
+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF6_OFFSET		(0x04A8)
+
+/* PDP, VID1HCOEFF6, VID1HCOEFF6
+*/
+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF7_OFFSET		(0x04AC)
+
+/* PDP, VID1HCOEFF7, VID1HCOEFF7
+*/
+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF8_OFFSET		(0x04B0)
+
+/* PDP, VID1HCOEFF8, VID1HCOEFF8
+*/
+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF9_OFFSET		(0x04B4)
+
+/* PDP, VID1HCOEFF9, VID1HCOEFF9
+*/
+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF10_OFFSET		(0x04B8)
+
+/* PDP, VID1HCOEFF10, VID1HCOEFF10
+*/
+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF11_OFFSET		(0x04BC)
+
+/* PDP, VID1HCOEFF11, VID1HCOEFF11
+*/
+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF12_OFFSET		(0x04C0)
+
+/* PDP, VID1HCOEFF12, VID1HCOEFF12
+*/
+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF13_OFFSET		(0x04C4)
+
+/* PDP, VID1HCOEFF13, VID1HCOEFF13
+*/
+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF14_OFFSET		(0x04C8)
+
+/* PDP, VID1HCOEFF14, VID1HCOEFF14
+*/
+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF15_OFFSET		(0x04CC)
+
+/* PDP, VID1HCOEFF15, VID1HCOEFF15
+*/
+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF16_OFFSET		(0x04D0)
+
+/* PDP, VID1HCOEFF16, VID1HCOEFF16
+*/
+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_MASK		(0x000000FF)
+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_LENGTH		(8)
+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1SCALESIZE_OFFSET		(0x04D4)
+
+/* PDP, VID1SCALESIZE, VID1SCALEWIDTH
+*/
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_MASK		(0x0FFF0000)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_SHIFT		(16)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_LENGTH		(12)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALESIZE, VID1SCALEHEIGHT
+*/
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_MASK		(0x00000FFF)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SHIFT		(0)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LENGTH		(12)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CORE_ID_OFFSET		(0x04E0)
+
+/* PDP, PVR_ODN_PDP_CORE_ID, GROUP_ID
+*/
+#define ODN_PDP_CORE_ID_GROUP_ID_MASK		(0xFF000000)
+#define ODN_PDP_CORE_ID_GROUP_ID_LSBMASK		(0x000000FF)
+#define ODN_PDP_CORE_ID_GROUP_ID_SHIFT		(24)
+#define ODN_PDP_CORE_ID_GROUP_ID_LENGTH		(8)
+#define ODN_PDP_CORE_ID_GROUP_ID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_ODN_PDP_CORE_ID, CORE_ID
+*/
+#define ODN_PDP_CORE_ID_CORE_ID_MASK		(0x00FF0000)
+#define ODN_PDP_CORE_ID_CORE_ID_LSBMASK		(0x000000FF)
+#define ODN_PDP_CORE_ID_CORE_ID_SHIFT		(16)
+#define ODN_PDP_CORE_ID_CORE_ID_LENGTH		(8)
+#define ODN_PDP_CORE_ID_CORE_ID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_ODN_PDP_CORE_ID, CONFIG_ID
+*/
+#define ODN_PDP_CORE_ID_CONFIG_ID_MASK		(0x0000FFFF)
+#define ODN_PDP_CORE_ID_CONFIG_ID_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_CORE_ID_CONFIG_ID_SHIFT		(0)
+#define ODN_PDP_CORE_ID_CONFIG_ID_LENGTH		(16)
+#define ODN_PDP_CORE_ID_CONFIG_ID_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CORE_REV_OFFSET		(0x04F0)
+
+/* PDP, PVR_ODN_PDP_CORE_REV, MAJOR_REV
+*/
+#define ODN_PDP_CORE_REV_MAJOR_REV_MASK		(0x00FF0000)
+#define ODN_PDP_CORE_REV_MAJOR_REV_LSBMASK		(0x000000FF)
+#define ODN_PDP_CORE_REV_MAJOR_REV_SHIFT		(16)
+#define ODN_PDP_CORE_REV_MAJOR_REV_LENGTH		(8)
+#define ODN_PDP_CORE_REV_MAJOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_ODN_PDP_CORE_REV, MINOR_REV
+*/
+#define ODN_PDP_CORE_REV_MINOR_REV_MASK		(0x0000FF00)
+#define ODN_PDP_CORE_REV_MINOR_REV_LSBMASK		(0x000000FF)
+#define ODN_PDP_CORE_REV_MINOR_REV_SHIFT		(8)
+#define ODN_PDP_CORE_REV_MINOR_REV_LENGTH		(8)
+#define ODN_PDP_CORE_REV_MINOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_ODN_PDP_CORE_REV, MAINT_REV
+*/
+#define ODN_PDP_CORE_REV_MAINT_REV_MASK		(0x000000FF)
+#define ODN_PDP_CORE_REV_MAINT_REV_LSBMASK		(0x000000FF)
+#define ODN_PDP_CORE_REV_MAINT_REV_SHIFT		(0)
+#define ODN_PDP_CORE_REV_MAINT_REV_LENGTH		(8)
+#define ODN_PDP_CORE_REV_MAINT_REV_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2SCALECTRL_OFFSET		(0x0500)
+
+/* PDP, VID2SCALECTRL, VID2HSCALEBP
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_MASK		(0x80000000)
+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_SHIFT		(31)
+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_LENGTH		(1)
+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VSCALEBP
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_MASK		(0x40000000)
+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_SHIFT		(30)
+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_LENGTH		(1)
+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2HSBEFOREVS
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_MASK		(0x20000000)
+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_SHIFT		(29)
+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_LENGTH		(1)
+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VSURUNCTRL
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_MASK		(0x08000000)
+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_SHIFT		(27)
+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_LENGTH		(1)
+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2PAN_EN
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_MASK		(0x00040000)
+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_SHIFT		(18)
+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_LENGTH		(1)
+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VORDER
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_MASK		(0x00030000)
+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_LSBMASK		(0x00000003)
+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_SHIFT		(16)
+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_LENGTH		(2)
+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VPITCH
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_MASK		(0x0000FFFF)
+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_SHIFT		(0)
+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_LENGTH		(16)
+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VSINIT_OFFSET		(0x0504)
+
+/* PDP, VID2VSINIT, VID2VINITIAL1
+*/
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_MASK		(0xFFFF0000)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_SHIFT		(16)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_LENGTH		(16)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2VSINIT, VID2VINITIAL0
+*/
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_MASK		(0x0000FFFF)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_SHIFT		(0)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_LENGTH		(16)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF0_OFFSET		(0x0508)
+
+/* PDP, VID2VCOEFF0, VID2VCOEFF0
+*/
+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_SHIFT		(0)
+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_LENGTH		(32)
+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF1_OFFSET		(0x050C)
+
+/* PDP, VID2VCOEFF1, VID2VCOEFF1
+*/
+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_SHIFT		(0)
+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_LENGTH		(32)
+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF2_OFFSET		(0x0510)
+
+/* PDP, VID2VCOEFF2, VID2VCOEFF2
+*/
+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_SHIFT		(0)
+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_LENGTH		(32)
+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF3_OFFSET		(0x0514)
+
+/* PDP, VID2VCOEFF3, VID2VCOEFF3
+*/
+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_SHIFT		(0)
+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_LENGTH		(32)
+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF4_OFFSET		(0x0518)
+
+/* PDP, VID2VCOEFF4, VID2VCOEFF4
+*/
+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_SHIFT		(0)
+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_LENGTH		(32)
+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF5_OFFSET		(0x051C)
+
+/* PDP, VID2VCOEFF5, VID2VCOEFF5
+*/
+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_SHIFT		(0)
+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_LENGTH		(32)
+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF6_OFFSET		(0x0520)
+
+/* PDP, VID2VCOEFF6, VID2VCOEFF6
+*/
+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_SHIFT		(0)
+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_LENGTH		(32)
+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF7_OFFSET		(0x0524)
+
+/* PDP, VID2VCOEFF7, VID2VCOEFF7
+*/
+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_SHIFT		(0)
+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_LENGTH		(32)
+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF8_OFFSET		(0x0528)
+
+/* PDP, VID2VCOEFF8, VID2VCOEFF8
+*/
+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_MASK		(0x000000FF)
+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_SHIFT		(0)
+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_LENGTH		(8)
+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HSINIT_OFFSET		(0x052C)
+
+/* PDP, VID2HSINIT, VID2HINITIAL
+*/
+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_MASK		(0xFFFF0000)
+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_SHIFT		(16)
+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_LENGTH		(16)
+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2HSINIT, VID2HPITCH
+*/
+#define ODN_PDP_VID2HSINIT_VID2HPITCH_MASK		(0x0000FFFF)
+#define ODN_PDP_VID2HSINIT_VID2HPITCH_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID2HSINIT_VID2HPITCH_SHIFT		(0)
+#define ODN_PDP_VID2HSINIT_VID2HPITCH_LENGTH		(16)
+#define ODN_PDP_VID2HSINIT_VID2HPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF0_OFFSET		(0x0530)
+
+/* PDP, VID2HCOEFF0, VID2HCOEFF0
+*/
+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF1_OFFSET		(0x0534)
+
+/* PDP, VID2HCOEFF1, VID2HCOEFF1
+*/
+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF2_OFFSET		(0x0538)
+
+/* PDP, VID2HCOEFF2, VID2HCOEFF2
+*/
+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF3_OFFSET		(0x053C)
+
+/* PDP, VID2HCOEFF3, VID2HCOEFF3
+*/
+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF4_OFFSET		(0x0540)
+
+/* PDP, VID2HCOEFF4, VID2HCOEFF4
+*/
+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF5_OFFSET		(0x0544)
+
+/* PDP, VID2HCOEFF5, VID2HCOEFF5
+*/
+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF6_OFFSET		(0x0548)
+
+/* PDP, VID2HCOEFF6, VID2HCOEFF6
+*/
+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF7_OFFSET		(0x054C)
+
+/* PDP, VID2HCOEFF7, VID2HCOEFF7
+*/
+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF8_OFFSET		(0x0550)
+
+/* PDP, VID2HCOEFF8, VID2HCOEFF8
+*/
+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF9_OFFSET		(0x0554)
+
+/* PDP, VID2HCOEFF9, VID2HCOEFF9
+*/
+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF10_OFFSET		(0x0558)
+
+/* PDP, VID2HCOEFF10, VID2HCOEFF10
+*/
+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF11_OFFSET		(0x055C)
+
+/* PDP, VID2HCOEFF11, VID2HCOEFF11
+*/
+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF12_OFFSET		(0x0560)
+
+/* PDP, VID2HCOEFF12, VID2HCOEFF12
+*/
+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF13_OFFSET		(0x0564)
+
+/* PDP, VID2HCOEFF13, VID2HCOEFF13
+*/
+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF14_OFFSET		(0x0568)
+
+/* PDP, VID2HCOEFF14, VID2HCOEFF14
+*/
+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF15_OFFSET		(0x056C)
+
+/* PDP, VID2HCOEFF15, VID2HCOEFF15
+*/
+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF16_OFFSET		(0x0570)
+
+/* PDP, VID2HCOEFF16, VID2HCOEFF16
+*/
+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_MASK		(0x000000FF)
+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_LENGTH		(8)
+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2SCALESIZE_OFFSET		(0x0574)
+
+/* PDP, VID2SCALESIZE, VID2SCALEWIDTH
+*/
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_MASK		(0x0FFF0000)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_SHIFT		(16)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_LENGTH		(12)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALESIZE, VID2SCALEHEIGHT
+*/
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_MASK		(0x00000FFF)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SHIFT		(0)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LENGTH		(12)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3SCALECTRL_OFFSET		(0x0578)
+
+/* PDP, VID3SCALECTRL, VID3HSCALEBP
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_MASK		(0x80000000)
+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_SHIFT		(31)
+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_LENGTH		(1)
+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VSCALEBP
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_MASK		(0x40000000)
+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_SHIFT		(30)
+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_LENGTH		(1)
+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3HSBEFOREVS
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_MASK		(0x20000000)
+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_SHIFT		(29)
+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_LENGTH		(1)
+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VSURUNCTRL
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_MASK		(0x08000000)
+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_SHIFT		(27)
+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_LENGTH		(1)
+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3PAN_EN
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_MASK		(0x00040000)
+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_SHIFT		(18)
+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_LENGTH		(1)
+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VORDER
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_MASK		(0x00030000)
+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_LSBMASK		(0x00000003)
+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_SHIFT		(16)
+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_LENGTH		(2)
+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VPITCH
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_MASK		(0x0000FFFF)
+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_SHIFT		(0)
+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_LENGTH		(16)
+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VSINIT_OFFSET		(0x057C)
+
+/* PDP, VID3VSINIT, VID3VINITIAL1
+*/
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_MASK		(0xFFFF0000)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_SHIFT		(16)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_LENGTH		(16)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3VSINIT, VID3VINITIAL0
+*/
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_MASK		(0x0000FFFF)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_SHIFT		(0)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_LENGTH		(16)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF0_OFFSET		(0x0580)
+
+/* PDP, VID3VCOEFF0, VID3VCOEFF0
+*/
+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_SHIFT		(0)
+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_LENGTH		(32)
+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF1_OFFSET		(0x0584)
+
+/* PDP, VID3VCOEFF1, VID3VCOEFF1
+*/
+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_SHIFT		(0)
+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_LENGTH		(32)
+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF2_OFFSET		(0x0588)
+
+/* PDP, VID3VCOEFF2, VID3VCOEFF2
+*/
+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_SHIFT		(0)
+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_LENGTH		(32)
+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF3_OFFSET		(0x058C)
+
+/* PDP, VID3VCOEFF3, VID3VCOEFF3
+*/
+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_SHIFT		(0)
+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_LENGTH		(32)
+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF4_OFFSET		(0x0590)
+
+/* PDP, VID3VCOEFF4, VID3VCOEFF4
+*/
+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_SHIFT		(0)
+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_LENGTH		(32)
+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF5_OFFSET		(0x0594)
+
+/* PDP, VID3VCOEFF5, VID3VCOEFF5
+*/
+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_SHIFT		(0)
+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_LENGTH		(32)
+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF6_OFFSET		(0x0598)
+
+/* PDP, VID3VCOEFF6, VID3VCOEFF6
+*/
+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_SHIFT		(0)
+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_LENGTH		(32)
+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF7_OFFSET		(0x059C)
+
+/* PDP, VID3VCOEFF7, VID3VCOEFF7
+*/
+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_SHIFT		(0)
+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_LENGTH		(32)
+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF8_OFFSET		(0x05A0)
+
+/* PDP, VID3VCOEFF8, VID3VCOEFF8
+*/
+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_MASK		(0x000000FF)
+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_SHIFT		(0)
+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_LENGTH		(8)
+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HSINIT_OFFSET		(0x05A4)
+
+/* PDP, VID3HSINIT, VID3HINITIAL
+*/
+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_MASK		(0xFFFF0000)
+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_SHIFT		(16)
+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_LENGTH		(16)
+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3HSINIT, VID3HPITCH
+*/
+#define ODN_PDP_VID3HSINIT_VID3HPITCH_MASK		(0x0000FFFF)
+#define ODN_PDP_VID3HSINIT_VID3HPITCH_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID3HSINIT_VID3HPITCH_SHIFT		(0)
+#define ODN_PDP_VID3HSINIT_VID3HPITCH_LENGTH		(16)
+#define ODN_PDP_VID3HSINIT_VID3HPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF0_OFFSET		(0x05A8)
+
+/* PDP, VID3HCOEFF0, VID3HCOEFF0
+*/
+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF1_OFFSET		(0x05AC)
+
+/* PDP, VID3HCOEFF1, VID3HCOEFF1
+*/
+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF2_OFFSET		(0x05B0)
+
+/* PDP, VID3HCOEFF2, VID3HCOEFF2
+*/
+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF3_OFFSET		(0x05B4)
+
+/* PDP, VID3HCOEFF3, VID3HCOEFF3
+*/
+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF4_OFFSET		(0x05B8)
+
+/* PDP, VID3HCOEFF4, VID3HCOEFF4
+*/
+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF5_OFFSET		(0x05BC)
+
+/* PDP, VID3HCOEFF5, VID3HCOEFF5
+*/
+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF6_OFFSET		(0x05C0)
+
+/* PDP, VID3HCOEFF6, VID3HCOEFF6
+*/
+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF7_OFFSET		(0x05C4)
+
+/* PDP, VID3HCOEFF7, VID3HCOEFF7
+*/
+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF8_OFFSET		(0x05C8)
+
+/* PDP, VID3HCOEFF8, VID3HCOEFF8
+*/
+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF9_OFFSET		(0x05CC)
+
+/* PDP, VID3HCOEFF9, VID3HCOEFF9
+*/
+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF10_OFFSET		(0x05D0)
+
+/* PDP, VID3HCOEFF10, VID3HCOEFF10
+*/
+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_LSBMASK	(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_LENGTH	(32)
+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF11_OFFSET		(0x05D4)
+
+/* PDP, VID3HCOEFF11, VID3HCOEFF11
+*/
+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_LSBMASK	(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_LENGTH	(32)
+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF12_OFFSET		(0x05D8)
+
+/* PDP, VID3HCOEFF12, VID3HCOEFF12
+*/
+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF13_OFFSET		(0x05DC)
+
+/* PDP, VID3HCOEFF13, VID3HCOEFF13
+*/
+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF14_OFFSET		(0x05E0)
+
+/* PDP, VID3HCOEFF14, VID3HCOEFF14
+*/
+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF15_OFFSET		(0x05E4)
+
+/* PDP, VID3HCOEFF15, VID3HCOEFF15
+*/
+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF16_OFFSET		(0x05E8)
+
+/* PDP, VID3HCOEFF16, VID3HCOEFF16
+*/
+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_MASK		(0x000000FF)
+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_LENGTH		(8)
+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3SCALESIZE_OFFSET		(0x05EC)
+
+/* PDP, VID3SCALESIZE, VID3SCALEWIDTH
+*/
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_MASK		(0x0FFF0000)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_SHIFT		(16)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_LENGTH		(12)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALESIZE, VID3SCALEHEIGHT
+*/
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_MASK		(0x00000FFF)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SHIFT		(0)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LENGTH		(12)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4SCALECTRL_OFFSET		(0x05F0)
+
+/* PDP, VID4SCALECTRL, VID4HSCALEBP
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_MASK		(0x80000000)
+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_LSBMASK	(0x00000001)
+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_SHIFT	(31)
+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_LENGTH	(1)
+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VSCALEBP
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_MASK		(0x40000000)
+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_LSBMASK	(0x00000001)
+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_SHIFT	(30)
+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_LENGTH	(1)
+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4HSBEFOREVS
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_MASK	(0x20000000)
+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_LSBMASK	(0x00000001)
+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_SHIFT	(29)
+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_LENGTH	(1)
+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VSURUNCTRL
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_MASK	(0x08000000)
+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_LSBMASK	(0x00000001)
+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_SHIFT	(27)
+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_LENGTH	(1)
+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4PAN_EN
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_MASK		(0x00040000)
+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_LSBMASK	(0x00000001)
+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_SHIFT		(18)
+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_LENGTH		(1)
+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VORDER
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_MASK		(0x00030000)
+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_LSBMASK	(0x00000003)
+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_SHIFT		(16)
+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_LENGTH		(2)
+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VPITCH
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_MASK		(0x0000FFFF)
+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_LSBMASK	(0x0000FFFF)
+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_SHIFT		(0)
+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_LENGTH		(16)
+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VSINIT_OFFSET			(0x05F4)
+
+/* PDP, VID4VSINIT, VID4VINITIAL1
+*/
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_MASK		(0xFFFF0000)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_SHIFT		(16)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_LENGTH		(16)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4VSINIT, VID4VINITIAL0
+*/
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_MASK		(0x0000FFFF)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_SHIFT		(0)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_LENGTH		(16)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF0_OFFSET			(0x05F8)
+
+/* PDP, VID4VCOEFF0, VID4VCOEFF0
+*/
+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_SHIFT		(0)
+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_LENGTH		(32)
+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF1_OFFSET			(0x05FC)
+
+/* PDP, VID4VCOEFF1, VID4VCOEFF1
+*/
+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_SHIFT		(0)
+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_LENGTH		(32)
+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF2_OFFSET			(0x0600)
+
+/* PDP, VID4VCOEFF2, VID4VCOEFF2
+*/
+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_SHIFT		(0)
+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_LENGTH		(32)
+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF3_OFFSET			(0x0604)
+
+/* PDP, VID4VCOEFF3, VID4VCOEFF3
+*/
+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_SHIFT		(0)
+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_LENGTH		(32)
+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF4_OFFSET			(0x0608)
+
+/* PDP, VID4VCOEFF4, VID4VCOEFF4
+*/
+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_SHIFT		(0)
+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_LENGTH		(32)
+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF5_OFFSET			(0x060C)
+
+/* PDP, VID4VCOEFF5, VID4VCOEFF5
+*/
+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_SHIFT		(0)
+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_LENGTH		(32)
+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF6_OFFSET			(0x0610)
+
+/* PDP, VID4VCOEFF6, VID4VCOEFF6
+*/
+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_SHIFT		(0)
+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_LENGTH		(32)
+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF7_OFFSET			(0x0614)
+
+/* PDP, VID4VCOEFF7, VID4VCOEFF7
+*/
+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_SHIFT		(0)
+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_LENGTH		(32)
+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF8_OFFSET			(0x0618)
+
+/* PDP, VID4VCOEFF8, VID4VCOEFF8
+*/
+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_MASK		(0x000000FF)
+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_SHIFT		(0)
+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_LENGTH		(8)
+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HSINIT_OFFSET			(0x061C)
+
+/* PDP, VID4HSINIT, VID4HINITIAL
+*/
+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_MASK		(0xFFFF0000)
+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_SHIFT		(16)
+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_LENGTH		(16)
+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4HSINIT, VID4HPITCH
+*/
+#define ODN_PDP_VID4HSINIT_VID4HPITCH_MASK		(0x0000FFFF)
+#define ODN_PDP_VID4HSINIT_VID4HPITCH_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID4HSINIT_VID4HPITCH_SHIFT		(0)
+#define ODN_PDP_VID4HSINIT_VID4HPITCH_LENGTH		(16)
+#define ODN_PDP_VID4HSINIT_VID4HPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF0_OFFSET			(0x0620)
+
+/* PDP, VID4HCOEFF0, VID4HCOEFF0
+*/
+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF1_OFFSET			(0x0624)
+
+/* PDP, VID4HCOEFF1, VID4HCOEFF1
+*/
+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF2_OFFSET			(0x0628)
+
+/* PDP, VID4HCOEFF2, VID4HCOEFF2
+*/
+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF3_OFFSET			(0x062C)
+
+/* PDP, VID4HCOEFF3, VID4HCOEFF3
+*/
+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF4_OFFSET			(0x0630)
+
+/* PDP, VID4HCOEFF4, VID4HCOEFF4
+*/
+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF5_OFFSET			(0x0634)
+
+/* PDP, VID4HCOEFF5, VID4HCOEFF5
+*/
+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF6_OFFSET			(0x0638)
+
+/* PDP, VID4HCOEFF6, VID4HCOEFF6
+*/
+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF7_OFFSET			(0x063C)
+
+/* PDP, VID4HCOEFF7, VID4HCOEFF7
+*/
+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF8_OFFSET			(0x0640)
+
+/* PDP, VID4HCOEFF8, VID4HCOEFF8
+*/
+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF9_OFFSET			(0x0644)
+
+/* PDP, VID4HCOEFF9, VID4HCOEFF9
+*/
+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF10_OFFSET			(0x0648)
+
+/* PDP, VID4HCOEFF10, VID4HCOEFF10
+*/
+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_LSBMASK	(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_LENGTH	(32)
+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF11_OFFSET			(0x064C)
+
+/* PDP, VID4HCOEFF11, VID4HCOEFF11
+*/
+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_LSBMASK	(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_LENGTH	(32)
+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF12_OFFSET			(0x0650)
+
+/* PDP, VID4HCOEFF12, VID4HCOEFF12
+*/
+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_LSBMASK	(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_LENGTH	(32)
+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF13_OFFSET			(0x0654)
+
+/* PDP, VID4HCOEFF13, VID4HCOEFF13
+*/
+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_LSBMASK	(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_LENGTH	(32)
+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF14_OFFSET			(0x0658)
+
+/* PDP, VID4HCOEFF14, VID4HCOEFF14
+*/
+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_LSBMASK	(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_LENGTH	(32)
+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF15_OFFSET			(0x065C)
+
+/* PDP, VID4HCOEFF15, VID4HCOEFF15
+*/
+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_LSBMASK	(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_LENGTH	(32)
+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF16_OFFSET			(0x0660)
+
+/* PDP, VID4HCOEFF16, VID4HCOEFF16
+*/
+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_MASK		(0x000000FF)
+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_LSBMASK	(0x000000FF)
+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_LENGTH	(8)
+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4SCALESIZE_OFFSET			(0x0664)
+
+/* PDP, VID4SCALESIZE, VID4SCALEWIDTH
+*/
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_MASK		(0x0FFF0000)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_SHIFT		(16)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_LENGTH		(12)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALESIZE, VID4SCALEHEIGHT
+*/
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_MASK		(0x00000FFF)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SHIFT		(0)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LENGTH		(12)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND0_OFFSET				(0x0668)
+
+/* PDP, PORTER_BLND0, BLND0BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_MASK		(0x00000010)
+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_LSBMASK		(0x00000001)
+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_SHIFT		(4)
+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_LENGTH		(1)
+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND0, BLND0PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_MASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_LSBMASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_SHIFT		(0)
+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_LENGTH		(4)
+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND1_OFFSET				(0x066C)
+
+/* PDP, PORTER_BLND1, BLND1BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_MASK		(0x00000010)
+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_LSBMASK		(0x00000001)
+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_SHIFT		(4)
+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_LENGTH		(1)
+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND1, BLND1PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_MASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_LSBMASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_SHIFT		(0)
+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_LENGTH		(4)
+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND2_OFFSET				(0x0670)
+
+/* PDP, PORTER_BLND2, BLND2BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_MASK		(0x00000010)
+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_LSBMASK		(0x00000001)
+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_SHIFT		(4)
+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_LENGTH		(1)
+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND2, BLND2PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_MASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_LSBMASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_SHIFT		(0)
+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_LENGTH		(4)
+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND3_OFFSET				(0x0674)
+
+/* PDP, PORTER_BLND3, BLND3BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_MASK		(0x00000010)
+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_LSBMASK		(0x00000001)
+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_SHIFT		(4)
+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_LENGTH		(1)
+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND3, BLND3PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_MASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_LSBMASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_SHIFT		(0)
+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_LENGTH		(4)
+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND4_OFFSET				(0x0678)
+
+/* PDP, PORTER_BLND4, BLND4BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_MASK		(0x00000010)
+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_LSBMASK		(0x00000001)
+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_SHIFT		(4)
+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_LENGTH		(1)
+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND4, BLND4PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_MASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_LSBMASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_SHIFT		(0)
+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_LENGTH		(4)
+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND5_OFFSET				(0x067C)
+
+/* PDP, PORTER_BLND5, BLND5BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_MASK		(0x00000010)
+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_LSBMASK		(0x00000001)
+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_SHIFT		(4)
+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_LENGTH		(1)
+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND5, BLND5PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_MASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_LSBMASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_SHIFT		(0)
+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_LENGTH		(4)
+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND6_OFFSET				(0x0680)
+
+/* PDP, PORTER_BLND6, BLND6BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_MASK		(0x00000010)
+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_LSBMASK		(0x00000001)
+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_SHIFT		(4)
+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_LENGTH		(1)
+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND6, BLND6PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_MASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_LSBMASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_SHIFT		(0)
+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_LENGTH		(4)
+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND7_OFFSET				(0x0684)
+
+/* PDP, PORTER_BLND7, BLND7BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_MASK		(0x00000010)
+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_LSBMASK		(0x00000001)
+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_SHIFT		(4)
+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_LENGTH		(1)
+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND7, BLND7PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_MASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_LSBMASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_SHIFT		(0)
+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_LENGTH		(4)
+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET		(0x06C8)
+
+/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_TRANS
+*/
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_MASK		(0x03FF0000)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SHIFT		(16)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LENGTH		(10)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_OPAQUE
+*/
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_MASK		(0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SHIFT		(0)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LENGTH		(10)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_OFFSET		(0x06CC)
+
+/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMAX
+*/
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_MASK		(0x03FF0000)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SHIFT		(16)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LENGTH		(10)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMIN
+*/
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_MASK		(0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SHIFT		(0)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LENGTH		(10)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1LUMAKEY_C_RG_OFFSET		(0x06D0)
+
+/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_R
+*/
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_MASK		(0x0FFF0000)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SHIFT		(16)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LENGTH		(12)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_G
+*/
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_MASK		(0x00000FFF)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SHIFT		(0)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LENGTH		(12)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1LUMAKEY_C_B_OFFSET		(0x06D4)
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYALPHAMULT
+*/
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_MASK		(0x20000000)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SHIFT		(29)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LENGTH		(1)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYEN
+*/
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_MASK		(0x10000000)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SHIFT		(28)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LENGTH		(1)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYOUTOFF
+*/
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_MASK		(0x03FF0000)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SHIFT		(16)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LENGTH	(10)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYC_B
+*/
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_MASK		(0x00000FFF)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SHIFT		(0)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LENGTH		(12)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET		(0x06D8)
+
+/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_TRANS
+*/
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_MASK		(0x03FF0000)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SHIFT		(16)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LENGTH		(10)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_OPAQUE
+*/
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_MASK		(0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SHIFT		(0)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LENGTH		(10)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_OFFSET				(0x06DC)
+
+/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMAX
+*/
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_MASK		(0x03FF0000)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SHIFT		(16)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LENGTH		(10)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMIN
+*/
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_MASK		(0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SHIFT		(0)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LENGTH		(10)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2LUMAKEY_C_RG_OFFSET				(0x06E0)
+
+/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_R
+*/
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_MASK		(0x0FFF0000)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SHIFT		(16)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LENGTH		(12)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_G
+*/
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_MASK		(0x00000FFF)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SHIFT		(0)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LENGTH		(12)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2LUMAKEY_C_B_OFFSET				(0x06E4)
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYALPHAMULT
+*/
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_MASK		(0x20000000)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SHIFT		(29)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LENGTH		(1)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYEN
+*/
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_MASK		(0x10000000)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SHIFT		(28)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LENGTH		(1)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYOUTOFF
+*/
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_MASK		(0x03FF0000)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SHIFT		(16)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LENGTH	(10)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYC_B
+*/
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_MASK		(0x00000FFF)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SHIFT		(0)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LENGTH		(12)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET		(0x06E8)
+
+/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_TRANS
+*/
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_MASK		(0x03FF0000)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SHIFT		(16)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LENGTH		(10)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_OPAQUE
+*/
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_MASK		(0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SHIFT		(0)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LENGTH		(10)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_OFFSET			(0x06EC)
+
+/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMAX
+*/
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_MASK		(0x03FF0000)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SHIFT		(16)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LENGTH		(10)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMIN
+*/
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_MASK		(0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SHIFT		(0)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LENGTH		(10)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3LUMAKEY_C_RG_OFFSET		(0x06F0)
+
+/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_R
+*/
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_MASK		(0x0FFF0000)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SHIFT		(16)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LENGTH		(12)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_G
+*/
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_MASK		(0x00000FFF)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SHIFT		(0)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LENGTH		(12)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3LUMAKEY_C_B_OFFSET		(0x06F4)
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYALPHAMULT
+*/
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_MASK		(0x20000000)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SHIFT		(29)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LENGTH		(1)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYEN
+*/
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_MASK		(0x10000000)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SHIFT		(28)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LENGTH		(1)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYOUTOFF
+*/
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_MASK		(0x03FF0000)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SHIFT		(16)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LENGTH	(10)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYC_B
+*/
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_MASK		(0x00000FFF)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SHIFT		(0)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LENGTH		(12)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET		(0x06F8)
+
+/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_TRANS
+*/
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_MASK		(0x03FF0000)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SHIFT		(16)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LENGTH		(10)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_OPAQUE
+*/
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_MASK		(0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SHIFT		(0)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LENGTH		(10)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_OFFSET		(0x06FC)
+
+/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMAX
+*/
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_MASK		(0x03FF0000)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SHIFT		(16)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LENGTH		(10)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMIN
+*/
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_MASK		(0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SHIFT		(0)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LENGTH		(10)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4LUMAKEY_C_RG_OFFSET			(0x0700)
+
+/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_R
+*/
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_MASK		(0x0FFF0000)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SHIFT		(16)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LENGTH		(12)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_G
+*/
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_MASK		(0x00000FFF)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SHIFT		(0)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LENGTH		(12)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4LUMAKEY_C_B_OFFSET		(0x0704)
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYALPHAMULT
+*/
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_MASK	(0x20000000)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LSBMASK	(0x00000001)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SHIFT	(29)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LENGTH	(1)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYEN
+*/
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_MASK		(0x10000000)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SHIFT		(28)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LENGTH		(1)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYOUTOFF
+*/
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_MASK		(0x03FF0000)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SHIFT		(16)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LENGTH	(10)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYC_B
+*/
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_MASK		(0x00000FFF)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SHIFT		(0)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LENGTH		(12)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CSCCOEFF0_OFFSET			(0x0708)
+
+/* PDP, CSCCOEFF0, CSCCOEFFRU
+*/
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_MASK		(0x003FF800)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_LSBMASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_SHIFT		(11)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_LENGTH		(11)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CSCCOEFF0, CSCCOEFFRY
+*/
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_MASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_LSBMASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_SHIFT		(0)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_LENGTH		(11)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CSCCOEFF1_OFFSET			(0x070C)
+
+/* PDP, CSCCOEFF1, CSCCOEFFGY
+*/
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_MASK		(0x003FF800)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_LSBMASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_SHIFT		(11)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_LENGTH		(11)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CSCCOEFF1, CSCCOEFFRV
+*/
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_MASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_LSBMASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_SHIFT		(0)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_LENGTH		(11)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CSCCOEFF2_OFFSET			(0x0710)
+
+/* PDP, CSCCOEFF2, CSCCOEFFGV
+*/
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_MASK		(0x003FF800)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_LSBMASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_SHIFT		(11)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_LENGTH		(11)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CSCCOEFF2, CSCCOEFFGU
+*/
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_MASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_LSBMASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_SHIFT		(0)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_LENGTH		(11)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CSCCOEFF3_OFFSET			(0x0714)
+
+/* PDP, CSCCOEFF3, CSCCOEFFBU
+*/
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_MASK		(0x003FF800)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_LSBMASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_SHIFT		(11)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_LENGTH		(11)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CSCCOEFF3, CSCCOEFFBY
+*/
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_MASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_LSBMASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_SHIFT		(0)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_LENGTH		(11)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CSCCOEFF4_OFFSET			(0x0718)
+
+/* PDP, CSCCOEFF4, CSCCOEFFBV
+*/
+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_MASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_LSBMASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_SHIFT		(0)
+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_LENGTH		(11)
+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_BGNDCOL_AR_OFFSET			(0x071C)
+
+/* PDP, BGNDCOL_AR, BGNDCOL_A
+*/
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_MASK		(0x03FF0000)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_LSBMASK		(0x000003FF)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_SHIFT		(16)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_LENGTH		(10)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, BGNDCOL_AR, BGNDCOL_R
+*/
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_MASK		(0x000003FF)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_SHIFT		(0)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_LENGTH		(10)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_BGNDCOL_GB_OFFSET			(0x0720)
+
+/* PDP, BGNDCOL_GB, BGNDCOL_G
+*/
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_MASK		(0x03FF0000)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_SHIFT		(16)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_LENGTH		(10)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, BGNDCOL_GB, BGNDCOL_B
+*/
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_MASK		(0x000003FF)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_SHIFT		(0)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_LENGTH		(10)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_BORDCOL_R_OFFSET			(0x0724)
+
+/* PDP, BORDCOL_R, BORDCOL_R
+*/
+#define ODN_PDP_BORDCOL_R_BORDCOL_R_MASK		(0x000003FF)
+#define ODN_PDP_BORDCOL_R_BORDCOL_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_BORDCOL_R_BORDCOL_R_SHIFT		(0)
+#define ODN_PDP_BORDCOL_R_BORDCOL_R_LENGTH		(10)
+#define ODN_PDP_BORDCOL_R_BORDCOL_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_BORDCOL_GB_OFFSET			(0x0728)
+
+/* PDP, BORDCOL_GB, BORDCOL_G
+*/
+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_MASK		(0x03FF0000)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_SHIFT		(16)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_LENGTH		(10)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, BORDCOL_GB, BORDCOL_B
+*/
+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_MASK		(0x000003FF)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_SHIFT		(0)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_LENGTH		(10)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_LINESTAT_OFFSET				(0x0734)
+
+/* PDP, LINESTAT, LINENO
+*/
+#define ODN_PDP_LINESTAT_LINENO_MASK			(0x00001FFF)
+#define ODN_PDP_LINESTAT_LINENO_LSBMASK			(0x00001FFF)
+#define ODN_PDP_LINESTAT_LINENO_SHIFT			(0)
+#define ODN_PDP_LINESTAT_LINENO_LENGTH			(13)
+#define ODN_PDP_LINESTAT_LINENO_SIGNED_FIELD		IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_OFFSET	(0x0738)
+
+/* PDP, CR_ODN_PDP_PROCAMP_C11C12, CR_PROCAMP_C12
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_MASK		(0x3FFF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LSBMASK	(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SHIFT		(16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LENGTH		(14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_C11C12, CR_PROCAMP_C11
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_MASK		(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LSBMASK	(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SHIFT		(0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LENGTH		(14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_OFFSET		(0x073C)
+
+/* PDP, CR_ODN_PDP_PROCAMP_C13C21, CR_PROCAMP_C21
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_MASK		(0x3FFF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LSBMASK	(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SHIFT		(16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LENGTH		(14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_C13C21, CR_PROCAMP_C13
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_MASK		(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LSBMASK	(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SHIFT		(0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LENGTH		(14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_OFFSET		(0x0740)
+
+/* PDP, CR_ODN_PDP_PROCAMP_C22C23, CR_PROCAMP_C23
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_MASK		(0x3FFF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LSBMASK	(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SHIFT		(16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LENGTH		(14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_C22C23, CR_PROCAMP_C22
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_MASK		(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LSBMASK	(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SHIFT		(0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LENGTH		(14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_OFFSET		(0x0744)
+
+/* PDP, CR_ODN_PDP_PROCAMP_C31C32, CR_PROCAMP_C32
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_MASK		(0x3FFF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LSBMASK	(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SHIFT		(16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LENGTH		(14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_C31C32, CR_PROCAMP_C31
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_MASK		(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LSBMASK	(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SHIFT		(0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LENGTH		(14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_OFFSET		(0x0748)
+
+/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_C33
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_MASK		(0x3FFF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_LSBMASK		(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_SHIFT		(16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_LENGTH		(14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_RANGE
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_MASK		(0x00000030)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LSBMASK		(0x00000003)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SHIFT		(4)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LENGTH		(2)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_EN
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_MASK		(0x00000001)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_SHIFT		(0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_LENGTH		(1)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_OFFSET		(0x074C)
+
+/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_G
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_MASK		(0x0FFF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LSBMASK		(0x00000FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SHIFT		(16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LENGTH		(12)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_B
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_MASK		(0x00000FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LSBMASK		(0x00000FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SHIFT		(0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LENGTH		(12)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_OFFSET		(0x0750)
+
+/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_R, CR_PROCAMP_OUTOFF_R
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_MASK		(0x00000FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LSBMASK		(0x00000FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SHIFT		(0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LENGTH		(12)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_OFFSET		(0x0754)
+
+/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_G
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_MASK		(0x03FF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SHIFT		(16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LENGTH		(10)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_B
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_MASK		(0x000003FF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SHIFT		(0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LENGTH		(10)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_OFFSET		(0x0758)
+
+/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_R, CR_PROCAMP_INOFF_R
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_MASK		(0x000003FF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SHIFT		(0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LENGTH		(10)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_SIGNAT_R_OFFSET		(0x075C)
+
+/* PDP, SIGNAT_R, SIGNATURE_R
+*/
+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_MASK		(0x000003FF)
+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_SHIFT		(0)
+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_LENGTH		(10)
+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_SIGNAT_GB_OFFSET		(0x0760)
+
+/* PDP, SIGNAT_GB, SIGNATURE_G
+*/
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_MASK		(0x03FF0000)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_SHIFT		(16)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_LENGTH		(10)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SIGNAT_GB, SIGNATURE_B
+*/
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_MASK		(0x000003FF)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_SHIFT		(0)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_LENGTH		(10)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_REGISTER_UPDATE_CTRL_OFFSET		(0x0764)
+
+/* PDP, REGISTER_UPDATE_CTRL, BYPASS_DOUBLE_BUFFERING
+*/
+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_MASK		(0x00000004)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LSBMASK		(0x00000001)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SHIFT		(2)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LENGTH		(1)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, REGISTER_UPDATE_CTRL, REGISTERS_VALID
+*/
+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_MASK		(0x00000002)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LSBMASK		(0x00000001)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT		(1)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LENGTH		(1)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, REGISTER_UPDATE_CTRL, USE_VBLANK
+*/
+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_MASK		(0x00000001)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LSBMASK		(0x00000001)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SHIFT		(0)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LENGTH		(1)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_REGISTER_UPDATE_STATUS_OFFSET		(0x0768)
+
+/* PDP, REGISTER_UPDATE_STATUS, REGISTERS_UPDATED
+*/
+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_MASK		(0x00000002)
+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LSBMASK		(0x00000001)
+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SHIFT		(1)
+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LENGTH		(1)
+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DBGCTRL_OFFSET		(0x076C)
+
+/* PDP, DBGCTRL, DBG_READ
+*/
+#define ODN_PDP_DBGCTRL_DBG_READ_MASK		(0x00000002)
+#define ODN_PDP_DBGCTRL_DBG_READ_LSBMASK		(0x00000001)
+#define ODN_PDP_DBGCTRL_DBG_READ_SHIFT		(1)
+#define ODN_PDP_DBGCTRL_DBG_READ_LENGTH		(1)
+#define ODN_PDP_DBGCTRL_DBG_READ_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DBGCTRL, DBG_ENAB
+*/
+#define ODN_PDP_DBGCTRL_DBG_ENAB_MASK		(0x00000001)
+#define ODN_PDP_DBGCTRL_DBG_ENAB_LSBMASK		(0x00000001)
+#define ODN_PDP_DBGCTRL_DBG_ENAB_SHIFT		(0)
+#define ODN_PDP_DBGCTRL_DBG_ENAB_LENGTH		(1)
+#define ODN_PDP_DBGCTRL_DBG_ENAB_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DBGDATA_R_OFFSET		(0x0770)
+
+/* PDP, DBGDATA_R, DBG_DATA_R
+*/
+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_MASK		(0x000003FF)
+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_SHIFT		(0)
+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_LENGTH		(10)
+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DBGDATA_GB_OFFSET		(0x0774)
+
+/* PDP, DBGDATA_GB, DBG_DATA_G
+*/
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_MASK		(0x03FF0000)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_SHIFT		(16)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_LENGTH		(10)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DBGDATA_GB, DBG_DATA_B
+*/
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_MASK		(0x000003FF)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_SHIFT		(0)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_LENGTH		(10)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DBGSIDE_OFFSET				(0x0778)
+
+/* PDP, DBGSIDE, DBG_VAL
+*/
+#define ODN_PDP_DBGSIDE_DBG_VAL_MASK			(0x00000008)
+#define ODN_PDP_DBGSIDE_DBG_VAL_LSBMASK			(0x00000001)
+#define ODN_PDP_DBGSIDE_DBG_VAL_SHIFT			(3)
+#define ODN_PDP_DBGSIDE_DBG_VAL_LENGTH			(1)
+#define ODN_PDP_DBGSIDE_DBG_VAL_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, DBGSIDE, DBG_SIDE
+*/
+#define ODN_PDP_DBGSIDE_DBG_SIDE_MASK			(0x00000007)
+#define ODN_PDP_DBGSIDE_DBG_SIDE_LSBMASK		(0x00000007)
+#define ODN_PDP_DBGSIDE_DBG_SIDE_SHIFT			(0)
+#define ODN_PDP_DBGSIDE_DBG_SIDE_LENGTH			(3)
+#define ODN_PDP_DBGSIDE_DBG_SIDE_SIGNED_FIELD		IMG_FALSE
+
+#define ODN_PDP_OUTPUT_OFFSET				(0x077C)
+
+/* PDP, OUTPUT, EIGHT_BIT_OUTPUT
+*/
+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_MASK		(0x00000002)
+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_LSBMASK		(0x00000001)
+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_SHIFT		(1)
+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_LENGTH		(1)
+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, OUTPUT, OUTPUT_CONFIG
+*/
+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_MASK		(0x00000001)
+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_LSBMASK		(0x00000001)
+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_SHIFT		(0)
+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_LENGTH		(1)
+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_SYNCCTRL_OFFSET				(0x0780)
+
+/* PDP, SYNCCTRL, SYNCACTIVE
+*/
+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_MASK		(0x80000000)
+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_SHIFT		(31)
+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, ODN_PDP_RST
+*/
+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_MASK		(0x20000000)
+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_SHIFT		(29)
+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, POWERDN
+*/
+#define ODN_PDP_SYNCCTRL_POWERDN_MASK			(0x10000000)
+#define ODN_PDP_SYNCCTRL_POWERDN_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_POWERDN_SHIFT			(28)
+#define ODN_PDP_SYNCCTRL_POWERDN_LENGTH			(1)
+#define ODN_PDP_SYNCCTRL_POWERDN_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, SYNCCTRL, LOWPWRMODE
+*/
+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_MASK		(0x08000000)
+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_SHIFT		(27)
+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDSYNCTRL
+*/
+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_MASK		(0x04000000)
+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_SHIFT		(26)
+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDINTCTRL
+*/
+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_MASK		(0x02000000)
+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_SHIFT		(25)
+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDCTRL
+*/
+#define ODN_PDP_SYNCCTRL_UPDCTRL_MASK			(0x01000000)
+#define ODN_PDP_SYNCCTRL_UPDCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_UPDCTRL_SHIFT			(24)
+#define ODN_PDP_SYNCCTRL_UPDCTRL_LENGTH			(1)
+#define ODN_PDP_SYNCCTRL_UPDCTRL_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDWAIT
+*/
+#define ODN_PDP_SYNCCTRL_UPDWAIT_MASK			(0x000F0000)
+#define ODN_PDP_SYNCCTRL_UPDWAIT_LSBMASK		(0x0000000F)
+#define ODN_PDP_SYNCCTRL_UPDWAIT_SHIFT			(16)
+#define ODN_PDP_SYNCCTRL_UPDWAIT_LENGTH			(4)
+#define ODN_PDP_SYNCCTRL_UPDWAIT_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, SYNCCTRL, FIELD_EN
+*/
+#define ODN_PDP_SYNCCTRL_FIELD_EN_MASK			(0x00002000)
+#define ODN_PDP_SYNCCTRL_FIELD_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_FIELD_EN_SHIFT			(13)
+#define ODN_PDP_SYNCCTRL_FIELD_EN_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_FIELD_EN_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, SYNCCTRL, CSYNC_EN
+*/
+#define ODN_PDP_SYNCCTRL_CSYNC_EN_MASK			(0x00001000)
+#define ODN_PDP_SYNCCTRL_CSYNC_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_CSYNC_EN_SHIFT			(12)
+#define ODN_PDP_SYNCCTRL_CSYNC_EN_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_CSYNC_EN_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, SYNCCTRL, CLKPOL
+*/
+#define ODN_PDP_SYNCCTRL_CLKPOL_MASK		(0x00000800)
+#define ODN_PDP_SYNCCTRL_CLKPOL_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_CLKPOL_SHIFT		(11)
+#define ODN_PDP_SYNCCTRL_CLKPOL_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_CLKPOL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, VS_SLAVE
+*/
+#define ODN_PDP_SYNCCTRL_VS_SLAVE_MASK		(0x00000080)
+#define ODN_PDP_SYNCCTRL_VS_SLAVE_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_VS_SLAVE_SHIFT		(7)
+#define ODN_PDP_SYNCCTRL_VS_SLAVE_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_VS_SLAVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, HS_SLAVE
+*/
+#define ODN_PDP_SYNCCTRL_HS_SLAVE_MASK		(0x00000040)
+#define ODN_PDP_SYNCCTRL_HS_SLAVE_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_HS_SLAVE_SHIFT		(6)
+#define ODN_PDP_SYNCCTRL_HS_SLAVE_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_HS_SLAVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, BLNKPOL
+*/
+#define ODN_PDP_SYNCCTRL_BLNKPOL_MASK		(0x00000020)
+#define ODN_PDP_SYNCCTRL_BLNKPOL_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_BLNKPOL_SHIFT		(5)
+#define ODN_PDP_SYNCCTRL_BLNKPOL_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_BLNKPOL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, BLNKDIS
+*/
+#define ODN_PDP_SYNCCTRL_BLNKDIS_MASK		(0x00000010)
+#define ODN_PDP_SYNCCTRL_BLNKDIS_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_BLNKDIS_SHIFT		(4)
+#define ODN_PDP_SYNCCTRL_BLNKDIS_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_BLNKDIS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, VSPOL
+*/
+#define ODN_PDP_SYNCCTRL_VSPOL_MASK		(0x00000008)
+#define ODN_PDP_SYNCCTRL_VSPOL_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_VSPOL_SHIFT		(3)
+#define ODN_PDP_SYNCCTRL_VSPOL_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_VSPOL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, VSDIS
+*/
+#define ODN_PDP_SYNCCTRL_VSDIS_MASK		(0x00000004)
+#define ODN_PDP_SYNCCTRL_VSDIS_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_VSDIS_SHIFT		(2)
+#define ODN_PDP_SYNCCTRL_VSDIS_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_VSDIS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, HSPOL
+*/
+#define ODN_PDP_SYNCCTRL_HSPOL_MASK		(0x00000002)
+#define ODN_PDP_SYNCCTRL_HSPOL_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_HSPOL_SHIFT		(1)
+#define ODN_PDP_SYNCCTRL_HSPOL_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_HSPOL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, HSDIS
+*/
+#define ODN_PDP_SYNCCTRL_HSDIS_MASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_HSDIS_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_HSDIS_SHIFT		(0)
+#define ODN_PDP_SYNCCTRL_HSDIS_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_HSDIS_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_HSYNC1_OFFSET			(0x0784)
+
+/* PDP, HSYNC1, HBPS
+*/
+#define ODN_PDP_HSYNC1_HBPS_MASK		(0x1FFF0000)
+#define ODN_PDP_HSYNC1_HBPS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_HSYNC1_HBPS_SHIFT		(16)
+#define ODN_PDP_HSYNC1_HBPS_LENGTH		(13)
+#define ODN_PDP_HSYNC1_HBPS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, HSYNC1, HT
+*/
+#define ODN_PDP_HSYNC1_HT_MASK			(0x00001FFF)
+#define ODN_PDP_HSYNC1_HT_LSBMASK		(0x00001FFF)
+#define ODN_PDP_HSYNC1_HT_SHIFT			(0)
+#define ODN_PDP_HSYNC1_HT_LENGTH		(13)
+#define ODN_PDP_HSYNC1_HT_SIGNED_FIELD		IMG_FALSE
+
+#define ODN_PDP_HSYNC2_OFFSET			(0x0788)
+
+/* PDP, HSYNC2, HAS
+*/
+#define ODN_PDP_HSYNC2_HAS_MASK			(0x1FFF0000)
+#define ODN_PDP_HSYNC2_HAS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_HSYNC2_HAS_SHIFT		(16)
+#define ODN_PDP_HSYNC2_HAS_LENGTH		(13)
+#define ODN_PDP_HSYNC2_HAS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, HSYNC2, HLBS
+*/
+#define ODN_PDP_HSYNC2_HLBS_MASK		(0x00001FFF)
+#define ODN_PDP_HSYNC2_HLBS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_HSYNC2_HLBS_SHIFT		(0)
+#define ODN_PDP_HSYNC2_HLBS_LENGTH		(13)
+#define ODN_PDP_HSYNC2_HLBS_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_HSYNC3_OFFSET			(0x078C)
+
+/* PDP, HSYNC3, HFPS
+*/
+#define ODN_PDP_HSYNC3_HFPS_MASK		(0x1FFF0000)
+#define ODN_PDP_HSYNC3_HFPS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_HSYNC3_HFPS_SHIFT		(16)
+#define ODN_PDP_HSYNC3_HFPS_LENGTH		(13)
+#define ODN_PDP_HSYNC3_HFPS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, HSYNC3, HRBS
+*/
+#define ODN_PDP_HSYNC3_HRBS_MASK		(0x00001FFF)
+#define ODN_PDP_HSYNC3_HRBS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_HSYNC3_HRBS_SHIFT		(0)
+#define ODN_PDP_HSYNC3_HRBS_LENGTH		(13)
+#define ODN_PDP_HSYNC3_HRBS_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VSYNC1_OFFSET			(0x0790)
+
+/* PDP, VSYNC1, VBPS
+*/
+#define ODN_PDP_VSYNC1_VBPS_MASK		(0x1FFF0000)
+#define ODN_PDP_VSYNC1_VBPS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_VSYNC1_VBPS_SHIFT		(16)
+#define ODN_PDP_VSYNC1_VBPS_LENGTH		(13)
+#define ODN_PDP_VSYNC1_VBPS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VSYNC1, VT
+*/
+#define ODN_PDP_VSYNC1_VT_MASK			(0x00001FFF)
+#define ODN_PDP_VSYNC1_VT_LSBMASK		(0x00001FFF)
+#define ODN_PDP_VSYNC1_VT_SHIFT			(0)
+#define ODN_PDP_VSYNC1_VT_LENGTH		(13)
+#define ODN_PDP_VSYNC1_VT_SIGNED_FIELD		IMG_FALSE
+
+#define ODN_PDP_VSYNC2_OFFSET			(0x0794)
+
+/* PDP, VSYNC2, VAS
+*/
+#define ODN_PDP_VSYNC2_VAS_MASK			(0x1FFF0000)
+#define ODN_PDP_VSYNC2_VAS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_VSYNC2_VAS_SHIFT		(16)
+#define ODN_PDP_VSYNC2_VAS_LENGTH		(13)
+#define ODN_PDP_VSYNC2_VAS_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VSYNC2, VTBS
+*/
+#define ODN_PDP_VSYNC2_VTBS_MASK		(0x00001FFF)
+#define ODN_PDP_VSYNC2_VTBS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_VSYNC2_VTBS_SHIFT		(0)
+#define ODN_PDP_VSYNC2_VTBS_LENGTH		(13)
+#define ODN_PDP_VSYNC2_VTBS_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VSYNC3_OFFSET			(0x0798)
+
+/* PDP, VSYNC3, VFPS
+*/
+#define ODN_PDP_VSYNC3_VFPS_MASK		(0x1FFF0000)
+#define ODN_PDP_VSYNC3_VFPS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_VSYNC3_VFPS_SHIFT		(16)
+#define ODN_PDP_VSYNC3_VFPS_LENGTH		(13)
+#define ODN_PDP_VSYNC3_VFPS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VSYNC3, VBBS
+*/
+#define ODN_PDP_VSYNC3_VBBS_MASK		(0x00001FFF)
+#define ODN_PDP_VSYNC3_VBBS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_VSYNC3_VBBS_SHIFT		(0)
+#define ODN_PDP_VSYNC3_VBBS_LENGTH		(13)
+#define ODN_PDP_VSYNC3_VBBS_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_INTSTAT_OFFSET			(0x079C)
+
+/* PDP, INTSTAT, INTS_VID4ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_MASK		(0x00080000)
+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_SHIFT		(19)
+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID3ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_MASK		(0x00040000)
+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_SHIFT		(18)
+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID2ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_MASK		(0x00020000)
+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_SHIFT		(17)
+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID1ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_MASK		(0x00010000)
+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_SHIFT		(16)
+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH4ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_MASK		(0x00008000)
+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_SHIFT		(15)
+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH3ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_MASK		(0x00004000)
+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_SHIFT		(14)
+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH2ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_MASK		(0x00002000)
+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_SHIFT		(13)
+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH1ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_MASK		(0x00001000)
+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_SHIFT		(12)
+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID4URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID4URUN_MASK		(0x00000800)
+#define ODN_PDP_INTSTAT_INTS_VID4URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID4URUN_SHIFT		(11)
+#define ODN_PDP_INTSTAT_INTS_VID4URUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VID4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID3URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID3URUN_MASK		(0x00000400)
+#define ODN_PDP_INTSTAT_INTS_VID3URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID3URUN_SHIFT		(10)
+#define ODN_PDP_INTSTAT_INTS_VID3URUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VID3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID2URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID2URUN_MASK		(0x00000200)
+#define ODN_PDP_INTSTAT_INTS_VID2URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID2URUN_SHIFT		(9)
+#define ODN_PDP_INTSTAT_INTS_VID2URUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VID2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID1URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID1URUN_MASK		(0x00000100)
+#define ODN_PDP_INTSTAT_INTS_VID1URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID1URUN_SHIFT		(8)
+#define ODN_PDP_INTSTAT_INTS_VID1URUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VID1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH4URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_MASK		(0x00000080)
+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_SHIFT		(7)
+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH3URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_MASK		(0x00000040)
+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_SHIFT		(6)
+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH2URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_MASK		(0x00000020)
+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_SHIFT		(5)
+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH1URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_MASK		(0x00000010)
+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_SHIFT		(4)
+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VBLNK1
+*/
+#define ODN_PDP_INTSTAT_INTS_VBLNK1_MASK		(0x00000008)
+#define ODN_PDP_INTSTAT_INTS_VBLNK1_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VBLNK1_SHIFT		(3)
+#define ODN_PDP_INTSTAT_INTS_VBLNK1_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VBLNK0
+*/
+#define ODN_PDP_INTSTAT_INTS_VBLNK0_MASK		(0x00000004)
+#define ODN_PDP_INTSTAT_INTS_VBLNK0_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VBLNK0_SHIFT		(2)
+#define ODN_PDP_INTSTAT_INTS_VBLNK0_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VBLNK0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_HBLNK1
+*/
+#define ODN_PDP_INTSTAT_INTS_HBLNK1_MASK		(0x00000002)
+#define ODN_PDP_INTSTAT_INTS_HBLNK1_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_HBLNK1_SHIFT		(1)
+#define ODN_PDP_INTSTAT_INTS_HBLNK1_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_HBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_HBLNK0
+*/
+#define ODN_PDP_INTSTAT_INTS_HBLNK0_MASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_HBLNK0_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_HBLNK0_SHIFT		(0)
+#define ODN_PDP_INTSTAT_INTS_HBLNK0_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_HBLNK0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_INTENAB_OFFSET				(0x07A0)
+
+/* PDP, INTENAB, INTEN_VID4ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_MASK		(0x00080000)
+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_SHIFT		(19)
+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID3ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_MASK		(0x00040000)
+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_SHIFT		(18)
+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID2ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_MASK		(0x00020000)
+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_SHIFT		(17)
+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID1ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_MASK		(0x00010000)
+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_SHIFT		(16)
+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH4ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_MASK		(0x00008000)
+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_SHIFT		(15)
+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH3ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_MASK		(0x00004000)
+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_SHIFT		(14)
+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH2ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_MASK		(0x00002000)
+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_SHIFT		(13)
+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH1ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_MASK		(0x00001000)
+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_SHIFT		(12)
+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID4URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID4URUN_MASK		(0x00000800)
+#define ODN_PDP_INTENAB_INTEN_VID4URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID4URUN_SHIFT		(11)
+#define ODN_PDP_INTENAB_INTEN_VID4URUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VID4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID3URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID3URUN_MASK		(0x00000400)
+#define ODN_PDP_INTENAB_INTEN_VID3URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID3URUN_SHIFT		(10)
+#define ODN_PDP_INTENAB_INTEN_VID3URUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VID3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID2URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID2URUN_MASK		(0x00000200)
+#define ODN_PDP_INTENAB_INTEN_VID2URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID2URUN_SHIFT		(9)
+#define ODN_PDP_INTENAB_INTEN_VID2URUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VID2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID1URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID1URUN_MASK		(0x00000100)
+#define ODN_PDP_INTENAB_INTEN_VID1URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID1URUN_SHIFT		(8)
+#define ODN_PDP_INTENAB_INTEN_VID1URUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VID1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH4URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_MASK		(0x00000080)
+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_SHIFT		(7)
+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH3URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_MASK		(0x00000040)
+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_SHIFT		(6)
+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH2URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_MASK		(0x00000020)
+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_SHIFT		(5)
+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH1URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_MASK		(0x00000010)
+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_SHIFT		(4)
+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VBLNK1
+*/
+#define ODN_PDP_INTENAB_INTEN_VBLNK1_MASK		(0x00000008)
+#define ODN_PDP_INTENAB_INTEN_VBLNK1_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VBLNK1_SHIFT		(3)
+#define ODN_PDP_INTENAB_INTEN_VBLNK1_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VBLNK0
+*/
+#define ODN_PDP_INTENAB_INTEN_VBLNK0_MASK		(0x00000004)
+#define ODN_PDP_INTENAB_INTEN_VBLNK0_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VBLNK0_SHIFT		(2)
+#define ODN_PDP_INTENAB_INTEN_VBLNK0_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VBLNK0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_HBLNK1
+*/
+#define ODN_PDP_INTENAB_INTEN_HBLNK1_MASK		(0x00000002)
+#define ODN_PDP_INTENAB_INTEN_HBLNK1_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_HBLNK1_SHIFT		(1)
+#define ODN_PDP_INTENAB_INTEN_HBLNK1_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_HBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_HBLNK0
+*/
+#define ODN_PDP_INTENAB_INTEN_HBLNK0_MASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_HBLNK0_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_HBLNK0_SHIFT		(0)
+#define ODN_PDP_INTENAB_INTEN_HBLNK0_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_HBLNK0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_INTCLR_OFFSET		(0x07A4)
+
+/* PDP, INTCLR, INTCLR_VID4ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_MASK		(0x00080000)
+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_SHIFT		(19)
+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID3ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_MASK		(0x00040000)
+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_SHIFT		(18)
+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID2ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_MASK		(0x00020000)
+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_SHIFT		(17)
+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID1ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_MASK		(0x00010000)
+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_SHIFT		(16)
+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH4ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_MASK		(0x00008000)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_SHIFT		(15)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH3ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_MASK		(0x00004000)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_SHIFT		(14)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH2ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_MASK		(0x00002000)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_SHIFT		(13)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH1ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_MASK		(0x00001000)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_SHIFT		(12)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID4URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_MASK		(0x00000800)
+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_SHIFT		(11)
+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID3URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_MASK		(0x00000400)
+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_SHIFT		(10)
+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID2URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_MASK		(0x00000200)
+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_SHIFT		(9)
+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID1URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_MASK		(0x00000100)
+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_SHIFT		(8)
+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH4URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_MASK		(0x00000080)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_SHIFT		(7)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH3URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_MASK		(0x00000040)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_SHIFT		(6)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH2URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_MASK		(0x00000020)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_SHIFT		(5)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH1URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_MASK		(0x00000010)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_SHIFT		(4)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VBLNK1
+*/
+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_MASK		(0x00000008)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_SHIFT		(3)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VBLNK0
+*/
+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_MASK		(0x00000004)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_SHIFT		(2)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_HBLNK1
+*/
+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_MASK		(0x00000002)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_SHIFT		(1)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_HBLNK0
+*/
+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_MASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_SHIFT		(0)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_MEMCTRL_OFFSET		(0x07A8)
+
+/* PDP, MEMCTRL, MEMREFRESH
+*/
+#define ODN_PDP_MEMCTRL_MEMREFRESH_MASK		(0xC0000000)
+#define ODN_PDP_MEMCTRL_MEMREFRESH_LSBMASK		(0x00000003)
+#define ODN_PDP_MEMCTRL_MEMREFRESH_SHIFT		(30)
+#define ODN_PDP_MEMCTRL_MEMREFRESH_LENGTH		(2)
+#define ODN_PDP_MEMCTRL_MEMREFRESH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, MEMCTRL, BURSTLEN
+*/
+#define ODN_PDP_MEMCTRL_BURSTLEN_MASK		(0x000000FF)
+#define ODN_PDP_MEMCTRL_BURSTLEN_LSBMASK		(0x000000FF)
+#define ODN_PDP_MEMCTRL_BURSTLEN_SHIFT		(0)
+#define ODN_PDP_MEMCTRL_BURSTLEN_LENGTH		(8)
+#define ODN_PDP_MEMCTRL_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_MEM_THRESH_OFFSET		(0x07AC)
+
+/* PDP, MEM_THRESH, UVTHRESHOLD
+*/
+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_MASK		(0xFF000000)
+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_SHIFT		(24)
+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_LENGTH		(8)
+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, MEM_THRESH, YTHRESHOLD
+*/
+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_MASK		(0x001FF000)
+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_SHIFT		(12)
+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_LENGTH		(9)
+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, MEM_THRESH, THRESHOLD
+*/
+#define ODN_PDP_MEM_THRESH_THRESHOLD_MASK		(0x000001FF)
+#define ODN_PDP_MEM_THRESH_THRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_MEM_THRESH_THRESHOLD_SHIFT		(0)
+#define ODN_PDP_MEM_THRESH_THRESHOLD_LENGTH		(9)
+#define ODN_PDP_MEM_THRESH_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_ALTERNATE_3D_CTRL_OFFSET		(0x07B0)
+
+/* PDP, ALTERNATE_3D_CTRL, ALT3D_ON
+*/
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_MASK		(0x00000010)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LSBMASK		(0x00000001)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SHIFT		(4)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LENGTH		(1)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, ALTERNATE_3D_CTRL, ALT3D_BLENDSEL
+*/
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_MASK		(0x00000007)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LSBMASK		(0x00000007)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SHIFT		(0)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LENGTH		(3)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA0_R_OFFSET		(0x07B4)
+
+/* PDP, GAMMA0_R, GAMMA0_R
+*/
+#define ODN_PDP_GAMMA0_R_GAMMA0_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA0_R_GAMMA0_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA0_R_GAMMA0_R_SHIFT		(0)
+#define ODN_PDP_GAMMA0_R_GAMMA0_R_LENGTH		(10)
+#define ODN_PDP_GAMMA0_R_GAMMA0_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA0_GB_OFFSET		(0x07B8)
+
+/* PDP, GAMMA0_GB, GAMMA0_G
+*/
+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_SHIFT		(16)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_LENGTH		(10)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA0_GB, GAMMA0_B
+*/
+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_SHIFT		(0)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_LENGTH		(10)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA1_R_OFFSET		(0x07BC)
+
+/* PDP, GAMMA1_R, GAMMA1_R
+*/
+#define ODN_PDP_GAMMA1_R_GAMMA1_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA1_R_GAMMA1_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA1_R_GAMMA1_R_SHIFT		(0)
+#define ODN_PDP_GAMMA1_R_GAMMA1_R_LENGTH		(10)
+#define ODN_PDP_GAMMA1_R_GAMMA1_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA1_GB_OFFSET		(0x07C0)
+
+/* PDP, GAMMA1_GB, GAMMA1_G
+*/
+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_SHIFT		(16)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_LENGTH		(10)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA1_GB, GAMMA1_B
+*/
+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_SHIFT		(0)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_LENGTH		(10)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA2_R_OFFSET		(0x07C4)
+
+/* PDP, GAMMA2_R, GAMMA2_R
+*/
+#define ODN_PDP_GAMMA2_R_GAMMA2_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA2_R_GAMMA2_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA2_R_GAMMA2_R_SHIFT		(0)
+#define ODN_PDP_GAMMA2_R_GAMMA2_R_LENGTH		(10)
+#define ODN_PDP_GAMMA2_R_GAMMA2_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA2_GB_OFFSET		(0x07C8)
+
+/* PDP, GAMMA2_GB, GAMMA2_G
+*/
+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_SHIFT		(16)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_LENGTH		(10)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA2_GB, GAMMA2_B
+*/
+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_SHIFT		(0)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_LENGTH		(10)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA3_R_OFFSET		(0x07CC)
+
+/* PDP, GAMMA3_R, GAMMA3_R
+*/
+#define ODN_PDP_GAMMA3_R_GAMMA3_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA3_R_GAMMA3_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA3_R_GAMMA3_R_SHIFT		(0)
+#define ODN_PDP_GAMMA3_R_GAMMA3_R_LENGTH		(10)
+#define ODN_PDP_GAMMA3_R_GAMMA3_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA3_GB_OFFSET		(0x07D0)
+
+/* PDP, GAMMA3_GB, GAMMA3_G
+*/
+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_SHIFT		(16)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_LENGTH		(10)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA3_GB, GAMMA3_B
+*/
+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_SHIFT		(0)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_LENGTH		(10)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA4_R_OFFSET		(0x07D4)
+
+/* PDP, GAMMA4_R, GAMMA4_R
+*/
+#define ODN_PDP_GAMMA4_R_GAMMA4_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA4_R_GAMMA4_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA4_R_GAMMA4_R_SHIFT		(0)
+#define ODN_PDP_GAMMA4_R_GAMMA4_R_LENGTH		(10)
+#define ODN_PDP_GAMMA4_R_GAMMA4_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA4_GB_OFFSET		(0x07D8)
+
+/* PDP, GAMMA4_GB, GAMMA4_G
+*/
+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_SHIFT		(16)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_LENGTH		(10)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA4_GB, GAMMA4_B
+*/
+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_SHIFT		(0)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_LENGTH		(10)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA5_R_OFFSET		(0x07DC)
+
+/* PDP, GAMMA5_R, GAMMA5_R
+*/
+#define ODN_PDP_GAMMA5_R_GAMMA5_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA5_R_GAMMA5_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA5_R_GAMMA5_R_SHIFT		(0)
+#define ODN_PDP_GAMMA5_R_GAMMA5_R_LENGTH		(10)
+#define ODN_PDP_GAMMA5_R_GAMMA5_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA5_GB_OFFSET		(0x07E0)
+
+/* PDP, GAMMA5_GB, GAMMA5_G
+*/
+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_SHIFT		(16)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_LENGTH		(10)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA5_GB, GAMMA5_B
+*/
+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_SHIFT		(0)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_LENGTH		(10)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA6_R_OFFSET		(0x07E4)
+
+/* PDP, GAMMA6_R, GAMMA6_R
+*/
+#define ODN_PDP_GAMMA6_R_GAMMA6_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA6_R_GAMMA6_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA6_R_GAMMA6_R_SHIFT		(0)
+#define ODN_PDP_GAMMA6_R_GAMMA6_R_LENGTH		(10)
+#define ODN_PDP_GAMMA6_R_GAMMA6_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA6_GB_OFFSET		(0x07E8)
+
+/* PDP, GAMMA6_GB, GAMMA6_G
+*/
+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_SHIFT		(16)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_LENGTH		(10)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA6_GB, GAMMA6_B
+*/
+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_SHIFT		(0)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_LENGTH		(10)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA7_R_OFFSET		(0x07EC)
+
+/* PDP, GAMMA7_R, GAMMA7_R
+*/
+#define ODN_PDP_GAMMA7_R_GAMMA7_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA7_R_GAMMA7_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA7_R_GAMMA7_R_SHIFT		(0)
+#define ODN_PDP_GAMMA7_R_GAMMA7_R_LENGTH		(10)
+#define ODN_PDP_GAMMA7_R_GAMMA7_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA7_GB_OFFSET		(0x07F0)
+
+/* PDP, GAMMA7_GB, GAMMA7_G
+*/
+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_SHIFT		(16)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_LENGTH		(10)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA7_GB, GAMMA7_B
+*/
+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_SHIFT		(0)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_LENGTH		(10)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA8_R_OFFSET		(0x07F4)
+
+/* PDP, GAMMA8_R, GAMMA8_R
+*/
+#define ODN_PDP_GAMMA8_R_GAMMA8_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA8_R_GAMMA8_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA8_R_GAMMA8_R_SHIFT		(0)
+#define ODN_PDP_GAMMA8_R_GAMMA8_R_LENGTH		(10)
+#define ODN_PDP_GAMMA8_R_GAMMA8_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA8_GB_OFFSET		(0x07F8)
+
+/* PDP, GAMMA8_GB, GAMMA8_G
+*/
+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_SHIFT		(16)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_LENGTH		(10)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA8_GB, GAMMA8_B
+*/
+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_SHIFT		(0)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_LENGTH		(10)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA9_R_OFFSET		(0x07FC)
+
+/* PDP, GAMMA9_R, GAMMA9_R
+*/
+#define ODN_PDP_GAMMA9_R_GAMMA9_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA9_R_GAMMA9_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA9_R_GAMMA9_R_SHIFT		(0)
+#define ODN_PDP_GAMMA9_R_GAMMA9_R_LENGTH		(10)
+#define ODN_PDP_GAMMA9_R_GAMMA9_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA9_GB_OFFSET		(0x0800)
+
+/* PDP, GAMMA9_GB, GAMMA9_G
+*/
+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_SHIFT		(16)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_LENGTH		(10)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA9_GB, GAMMA9_B
+*/
+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_SHIFT		(0)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_LENGTH		(10)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA10_R_OFFSET		(0x0804)
+
+/* PDP, GAMMA10_R, GAMMA10_R
+*/
+#define ODN_PDP_GAMMA10_R_GAMMA10_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA10_R_GAMMA10_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA10_R_GAMMA10_R_SHIFT		(0)
+#define ODN_PDP_GAMMA10_R_GAMMA10_R_LENGTH		(10)
+#define ODN_PDP_GAMMA10_R_GAMMA10_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA10_GB_OFFSET		(0x0808)
+
+/* PDP, GAMMA10_GB, GAMMA10_G
+*/
+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_SHIFT		(16)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_LENGTH		(10)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA10_GB, GAMMA10_B
+*/
+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_SHIFT		(0)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_LENGTH		(10)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA11_R_OFFSET		(0x080C)
+
+/* PDP, GAMMA11_R, GAMMA11_R
+*/
+#define ODN_PDP_GAMMA11_R_GAMMA11_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA11_R_GAMMA11_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA11_R_GAMMA11_R_SHIFT		(0)
+#define ODN_PDP_GAMMA11_R_GAMMA11_R_LENGTH		(10)
+#define ODN_PDP_GAMMA11_R_GAMMA11_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA11_GB_OFFSET		(0x0810)
+
+/* PDP, GAMMA11_GB, GAMMA11_G
+*/
+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_SHIFT		(16)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_LENGTH		(10)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA11_GB, GAMMA11_B
+*/
+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_SHIFT		(0)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_LENGTH		(10)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA12_R_OFFSET		(0x0814)
+
+/* PDP, GAMMA12_R, GAMMA12_R
+*/
+#define ODN_PDP_GAMMA12_R_GAMMA12_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA12_R_GAMMA12_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA12_R_GAMMA12_R_SHIFT		(0)
+#define ODN_PDP_GAMMA12_R_GAMMA12_R_LENGTH		(10)
+#define ODN_PDP_GAMMA12_R_GAMMA12_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA12_GB_OFFSET		(0x0818)
+
+/* PDP, GAMMA12_GB, GAMMA12_G
+*/
+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_SHIFT		(16)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_LENGTH		(10)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA12_GB, GAMMA12_B
+*/
+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_SHIFT		(0)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_LENGTH		(10)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA13_R_OFFSET		(0x081C)
+
+/* PDP, GAMMA13_R, GAMMA13_R
+*/
+#define ODN_PDP_GAMMA13_R_GAMMA13_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA13_R_GAMMA13_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA13_R_GAMMA13_R_SHIFT		(0)
+#define ODN_PDP_GAMMA13_R_GAMMA13_R_LENGTH		(10)
+#define ODN_PDP_GAMMA13_R_GAMMA13_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA13_GB_OFFSET		(0x0820)
+
+/* PDP, GAMMA13_GB, GAMMA13_G
+*/
+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_SHIFT		(16)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_LENGTH		(10)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA13_GB, GAMMA13_B
+*/
+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_SHIFT		(0)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_LENGTH		(10)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA14_R_OFFSET		(0x0824)
+
+/* PDP, GAMMA14_R, GAMMA14_R
+*/
+#define ODN_PDP_GAMMA14_R_GAMMA14_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA14_R_GAMMA14_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA14_R_GAMMA14_R_SHIFT		(0)
+#define ODN_PDP_GAMMA14_R_GAMMA14_R_LENGTH		(10)
+#define ODN_PDP_GAMMA14_R_GAMMA14_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA14_GB_OFFSET		(0x0828)
+
+/* PDP, GAMMA14_GB, GAMMA14_G
+*/
+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_SHIFT		(16)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_LENGTH		(10)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA14_GB, GAMMA14_B
+*/
+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_SHIFT		(0)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_LENGTH		(10)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA15_R_OFFSET		(0x082C)
+
+/* PDP, GAMMA15_R, GAMMA15_R
+*/
+#define ODN_PDP_GAMMA15_R_GAMMA15_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA15_R_GAMMA15_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA15_R_GAMMA15_R_SHIFT		(0)
+#define ODN_PDP_GAMMA15_R_GAMMA15_R_LENGTH		(10)
+#define ODN_PDP_GAMMA15_R_GAMMA15_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA15_GB_OFFSET		(0x0830)
+
+/* PDP, GAMMA15_GB, GAMMA15_G
+*/
+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_SHIFT		(16)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_LENGTH		(10)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA15_GB, GAMMA15_B
+*/
+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_SHIFT		(0)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_LENGTH		(10)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA16_R_OFFSET		(0x0834)
+
+/* PDP, GAMMA16_R, GAMMA16_R
+*/
+#define ODN_PDP_GAMMA16_R_GAMMA16_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA16_R_GAMMA16_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA16_R_GAMMA16_R_SHIFT		(0)
+#define ODN_PDP_GAMMA16_R_GAMMA16_R_LENGTH		(10)
+#define ODN_PDP_GAMMA16_R_GAMMA16_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA16_GB_OFFSET		(0x0838)
+
+/* PDP, GAMMA16_GB, GAMMA16_G
+*/
+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_SHIFT		(16)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_LENGTH		(10)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA16_GB, GAMMA16_B
+*/
+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_SHIFT		(0)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_LENGTH		(10)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA17_R_OFFSET		(0x083C)
+
+/* PDP, GAMMA17_R, GAMMA17_R
+*/
+#define ODN_PDP_GAMMA17_R_GAMMA17_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA17_R_GAMMA17_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA17_R_GAMMA17_R_SHIFT		(0)
+#define ODN_PDP_GAMMA17_R_GAMMA17_R_LENGTH		(10)
+#define ODN_PDP_GAMMA17_R_GAMMA17_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA17_GB_OFFSET		(0x0840)
+
+/* PDP, GAMMA17_GB, GAMMA17_G
+*/
+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_SHIFT		(16)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_LENGTH		(10)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA17_GB, GAMMA17_B
+*/
+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_SHIFT		(0)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_LENGTH		(10)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA18_R_OFFSET		(0x0844)
+
+/* PDP, GAMMA18_R, GAMMA18_R
+*/
+#define ODN_PDP_GAMMA18_R_GAMMA18_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA18_R_GAMMA18_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA18_R_GAMMA18_R_SHIFT		(0)
+#define ODN_PDP_GAMMA18_R_GAMMA18_R_LENGTH		(10)
+#define ODN_PDP_GAMMA18_R_GAMMA18_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA18_GB_OFFSET		(0x0848)
+
+/* PDP, GAMMA18_GB, GAMMA18_G
+*/
+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_SHIFT		(16)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_LENGTH		(10)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA18_GB, GAMMA18_B
+*/
+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_SHIFT		(0)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_LENGTH		(10)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA19_R_OFFSET		(0x084C)
+
+/* PDP, GAMMA19_R, GAMMA19_R
+*/
+#define ODN_PDP_GAMMA19_R_GAMMA19_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA19_R_GAMMA19_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA19_R_GAMMA19_R_SHIFT		(0)
+#define ODN_PDP_GAMMA19_R_GAMMA19_R_LENGTH		(10)
+#define ODN_PDP_GAMMA19_R_GAMMA19_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA19_GB_OFFSET		(0x0850)
+
+/* PDP, GAMMA19_GB, GAMMA19_G
+*/
+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_SHIFT		(16)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_LENGTH		(10)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA19_GB, GAMMA19_B
+*/
+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_SHIFT		(0)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_LENGTH		(10)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA20_R_OFFSET		(0x0854)
+
+/* PDP, GAMMA20_R, GAMMA20_R
+*/
+#define ODN_PDP_GAMMA20_R_GAMMA20_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA20_R_GAMMA20_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA20_R_GAMMA20_R_SHIFT		(0)
+#define ODN_PDP_GAMMA20_R_GAMMA20_R_LENGTH		(10)
+#define ODN_PDP_GAMMA20_R_GAMMA20_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA20_GB_OFFSET		(0x0858)
+
+/* PDP, GAMMA20_GB, GAMMA20_G
+*/
+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_SHIFT		(16)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_LENGTH		(10)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA20_GB, GAMMA20_B
+*/
+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_SHIFT		(0)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_LENGTH		(10)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA21_R_OFFSET		(0x085C)
+
+/* PDP, GAMMA21_R, GAMMA21_R
+*/
+#define ODN_PDP_GAMMA21_R_GAMMA21_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA21_R_GAMMA21_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA21_R_GAMMA21_R_SHIFT		(0)
+#define ODN_PDP_GAMMA21_R_GAMMA21_R_LENGTH		(10)
+#define ODN_PDP_GAMMA21_R_GAMMA21_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA21_GB_OFFSET		(0x0860)
+
+/* PDP, GAMMA21_GB, GAMMA21_G
+*/
+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_SHIFT		(16)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_LENGTH		(10)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA21_GB, GAMMA21_B
+*/
+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_SHIFT		(0)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_LENGTH		(10)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA22_R_OFFSET		(0x0864)
+
+/* PDP, GAMMA22_R, GAMMA22_R
+*/
+#define ODN_PDP_GAMMA22_R_GAMMA22_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA22_R_GAMMA22_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA22_R_GAMMA22_R_SHIFT		(0)
+#define ODN_PDP_GAMMA22_R_GAMMA22_R_LENGTH		(10)
+#define ODN_PDP_GAMMA22_R_GAMMA22_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA22_GB_OFFSET		(0x0868)
+
+/* PDP, GAMMA22_GB, GAMMA22_G
+*/
+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_SHIFT		(16)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_LENGTH		(10)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA22_GB, GAMMA22_B
+*/
+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_SHIFT		(0)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_LENGTH		(10)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA23_R_OFFSET		(0x086C)
+
+/* PDP, GAMMA23_R, GAMMA23_R
+*/
+#define ODN_PDP_GAMMA23_R_GAMMA23_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA23_R_GAMMA23_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA23_R_GAMMA23_R_SHIFT		(0)
+#define ODN_PDP_GAMMA23_R_GAMMA23_R_LENGTH		(10)
+#define ODN_PDP_GAMMA23_R_GAMMA23_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA23_GB_OFFSET		(0x0870)
+
+/* PDP, GAMMA23_GB, GAMMA23_G
+*/
+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_SHIFT		(16)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_LENGTH		(10)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA23_GB, GAMMA23_B
+*/
+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_SHIFT		(0)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_LENGTH		(10)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA24_R_OFFSET		(0x0874)
+
+/* PDP, GAMMA24_R, GAMMA24_R
+*/
+#define ODN_PDP_GAMMA24_R_GAMMA24_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA24_R_GAMMA24_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA24_R_GAMMA24_R_SHIFT		(0)
+#define ODN_PDP_GAMMA24_R_GAMMA24_R_LENGTH		(10)
+#define ODN_PDP_GAMMA24_R_GAMMA24_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA24_GB_OFFSET		(0x0878)
+
+/* PDP, GAMMA24_GB, GAMMA24_G
+*/
+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_SHIFT		(16)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_LENGTH		(10)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA24_GB, GAMMA24_B
+*/
+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_SHIFT		(0)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_LENGTH		(10)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA25_R_OFFSET		(0x087C)
+
+/* PDP, GAMMA25_R, GAMMA25_R
+*/
+#define ODN_PDP_GAMMA25_R_GAMMA25_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA25_R_GAMMA25_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA25_R_GAMMA25_R_SHIFT		(0)
+#define ODN_PDP_GAMMA25_R_GAMMA25_R_LENGTH		(10)
+#define ODN_PDP_GAMMA25_R_GAMMA25_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA25_GB_OFFSET		(0x0880)
+
+/* PDP, GAMMA25_GB, GAMMA25_G
+*/
+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_SHIFT		(16)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_LENGTH		(10)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA25_GB, GAMMA25_B
+*/
+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_SHIFT		(0)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_LENGTH		(10)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA26_R_OFFSET		(0x0884)
+
+/* PDP, GAMMA26_R, GAMMA26_R
+*/
+#define ODN_PDP_GAMMA26_R_GAMMA26_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA26_R_GAMMA26_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA26_R_GAMMA26_R_SHIFT		(0)
+#define ODN_PDP_GAMMA26_R_GAMMA26_R_LENGTH		(10)
+#define ODN_PDP_GAMMA26_R_GAMMA26_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA26_GB_OFFSET		(0x0888)
+
+/* PDP, GAMMA26_GB, GAMMA26_G
+*/
+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_SHIFT		(16)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_LENGTH		(10)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA26_GB, GAMMA26_B
+*/
+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_SHIFT		(0)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_LENGTH		(10)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA27_R_OFFSET		(0x088C)
+
+/* PDP, GAMMA27_R, GAMMA27_R
+*/
+#define ODN_PDP_GAMMA27_R_GAMMA27_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA27_R_GAMMA27_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA27_R_GAMMA27_R_SHIFT		(0)
+#define ODN_PDP_GAMMA27_R_GAMMA27_R_LENGTH		(10)
+#define ODN_PDP_GAMMA27_R_GAMMA27_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA27_GB_OFFSET		(0x0890)
+
+/* PDP, GAMMA27_GB, GAMMA27_G
+*/
+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_SHIFT		(16)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_LENGTH		(10)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA27_GB, GAMMA27_B
+*/
+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_SHIFT		(0)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_LENGTH		(10)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA28_R_OFFSET		(0x0894)
+
+/* PDP, GAMMA28_R, GAMMA28_R
+*/
+#define ODN_PDP_GAMMA28_R_GAMMA28_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA28_R_GAMMA28_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA28_R_GAMMA28_R_SHIFT		(0)
+#define ODN_PDP_GAMMA28_R_GAMMA28_R_LENGTH		(10)
+#define ODN_PDP_GAMMA28_R_GAMMA28_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA28_GB_OFFSET		(0x0898)
+
+/* PDP, GAMMA28_GB, GAMMA28_G
+*/
+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_SHIFT		(16)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_LENGTH		(10)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA28_GB, GAMMA28_B
+*/
+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_SHIFT		(0)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_LENGTH		(10)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA29_R_OFFSET		(0x089C)
+
+/* PDP, GAMMA29_R, GAMMA29_R
+*/
+#define ODN_PDP_GAMMA29_R_GAMMA29_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA29_R_GAMMA29_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA29_R_GAMMA29_R_SHIFT		(0)
+#define ODN_PDP_GAMMA29_R_GAMMA29_R_LENGTH		(10)
+#define ODN_PDP_GAMMA29_R_GAMMA29_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA29_GB_OFFSET		(0x08A0)
+
+/* PDP, GAMMA29_GB, GAMMA29_G
+*/
+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_SHIFT		(16)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_LENGTH		(10)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA29_GB, GAMMA29_B
+*/
+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_SHIFT		(0)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_LENGTH		(10)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA30_R_OFFSET		(0x08A4)
+
+/* PDP, GAMMA30_R, GAMMA30_R
+*/
+#define ODN_PDP_GAMMA30_R_GAMMA30_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA30_R_GAMMA30_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA30_R_GAMMA30_R_SHIFT		(0)
+#define ODN_PDP_GAMMA30_R_GAMMA30_R_LENGTH		(10)
+#define ODN_PDP_GAMMA30_R_GAMMA30_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA30_GB_OFFSET		(0x08A8)
+
+/* PDP, GAMMA30_GB, GAMMA30_G
+*/
+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_SHIFT		(16)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_LENGTH		(10)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA30_GB, GAMMA30_B
+*/
+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_SHIFT		(0)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_LENGTH		(10)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA31_R_OFFSET		(0x08AC)
+
+/* PDP, GAMMA31_R, GAMMA31_R
+*/
+#define ODN_PDP_GAMMA31_R_GAMMA31_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA31_R_GAMMA31_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA31_R_GAMMA31_R_SHIFT		(0)
+#define ODN_PDP_GAMMA31_R_GAMMA31_R_LENGTH		(10)
+#define ODN_PDP_GAMMA31_R_GAMMA31_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA31_GB_OFFSET		(0x08B0)
+
+/* PDP, GAMMA31_GB, GAMMA31_G
+*/
+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_SHIFT		(16)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_LENGTH		(10)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA31_GB, GAMMA31_B
+*/
+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_SHIFT		(0)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_LENGTH		(10)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA32_R_OFFSET			(0x08B4)
+
+/* PDP, GAMMA32_R, GAMMA32_R
+*/
+#define ODN_PDP_GAMMA32_R_GAMMA32_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA32_R_GAMMA32_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA32_R_GAMMA32_R_SHIFT		(0)
+#define ODN_PDP_GAMMA32_R_GAMMA32_R_LENGTH		(10)
+#define ODN_PDP_GAMMA32_R_GAMMA32_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA32_GB_OFFSET			(0x08B8)
+
+/* PDP, GAMMA32_GB, GAMMA32_G
+*/
+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_SHIFT		(16)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_LENGTH		(10)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA32_GB, GAMMA32_B
+*/
+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_SHIFT		(0)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_LENGTH		(10)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VEVENT_OFFSET				(0x08BC)
+
+/* PDP, VEVENT, VEVENT
+*/
+#define ODN_PDP_VEVENT_VEVENT_MASK			(0x1FFF0000)
+#define ODN_PDP_VEVENT_VEVENT_LSBMASK			(0x00001FFF)
+#define ODN_PDP_VEVENT_VEVENT_SHIFT			(16)
+#define ODN_PDP_VEVENT_VEVENT_LENGTH			(13)
+#define ODN_PDP_VEVENT_VEVENT_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VEVENT, VFETCH
+*/
+#define ODN_PDP_VEVENT_VFETCH_MASK			(0x00001FFF)
+#define ODN_PDP_VEVENT_VFETCH_LSBMASK			(0x00001FFF)
+#define ODN_PDP_VEVENT_VFETCH_SHIFT			(0)
+#define ODN_PDP_VEVENT_VFETCH_LENGTH			(13)
+#define ODN_PDP_VEVENT_VFETCH_SIGNED_FIELD		IMG_FALSE
+
+#define ODN_PDP_HDECTRL_OFFSET				(0x08C0)
+
+/* PDP, HDECTRL, HDES
+*/
+#define ODN_PDP_HDECTRL_HDES_MASK		(0x1FFF0000)
+#define ODN_PDP_HDECTRL_HDES_LSBMASK		(0x00001FFF)
+#define ODN_PDP_HDECTRL_HDES_SHIFT		(16)
+#define ODN_PDP_HDECTRL_HDES_LENGTH		(13)
+#define ODN_PDP_HDECTRL_HDES_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, HDECTRL, HDEF
+*/
+#define ODN_PDP_HDECTRL_HDEF_MASK		(0x00001FFF)
+#define ODN_PDP_HDECTRL_HDEF_LSBMASK		(0x00001FFF)
+#define ODN_PDP_HDECTRL_HDEF_SHIFT		(0)
+#define ODN_PDP_HDECTRL_HDEF_LENGTH		(13)
+#define ODN_PDP_HDECTRL_HDEF_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VDECTRL_OFFSET			(0x08C4)
+
+/* PDP, VDECTRL, VDES
+*/
+#define ODN_PDP_VDECTRL_VDES_MASK		(0x1FFF0000)
+#define ODN_PDP_VDECTRL_VDES_LSBMASK		(0x00001FFF)
+#define ODN_PDP_VDECTRL_VDES_SHIFT		(16)
+#define ODN_PDP_VDECTRL_VDES_LENGTH		(13)
+#define ODN_PDP_VDECTRL_VDES_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VDECTRL, VDEF
+*/
+#define ODN_PDP_VDECTRL_VDEF_MASK		(0x00001FFF)
+#define ODN_PDP_VDECTRL_VDEF_LSBMASK		(0x00001FFF)
+#define ODN_PDP_VDECTRL_VDEF_SHIFT		(0)
+#define ODN_PDP_VDECTRL_VDEF_LENGTH		(13)
+#define ODN_PDP_VDECTRL_VDEF_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_OPMASK_R_OFFSET			(0x08C8)
+
+/* PDP, OPMASK_R, MASKLEVEL
+*/
+#define ODN_PDP_OPMASK_R_MASKLEVEL_MASK		(0x80000000)
+#define ODN_PDP_OPMASK_R_MASKLEVEL_LSBMASK		(0x00000001)
+#define ODN_PDP_OPMASK_R_MASKLEVEL_SHIFT		(31)
+#define ODN_PDP_OPMASK_R_MASKLEVEL_LENGTH		(1)
+#define ODN_PDP_OPMASK_R_MASKLEVEL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, OPMASK_R, BLANKLEVEL
+*/
+#define ODN_PDP_OPMASK_R_BLANKLEVEL_MASK		(0x40000000)
+#define ODN_PDP_OPMASK_R_BLANKLEVEL_LSBMASK		(0x00000001)
+#define ODN_PDP_OPMASK_R_BLANKLEVEL_SHIFT		(30)
+#define ODN_PDP_OPMASK_R_BLANKLEVEL_LENGTH		(1)
+#define ODN_PDP_OPMASK_R_BLANKLEVEL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, OPMASK_R, MASKR
+*/
+#define ODN_PDP_OPMASK_R_MASKR_MASK		(0x000003FF)
+#define ODN_PDP_OPMASK_R_MASKR_LSBMASK		(0x000003FF)
+#define ODN_PDP_OPMASK_R_MASKR_SHIFT		(0)
+#define ODN_PDP_OPMASK_R_MASKR_LENGTH		(10)
+#define ODN_PDP_OPMASK_R_MASKR_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_OPMASK_GB_OFFSET		(0x08CC)
+
+/* PDP, OPMASK_GB, MASKG
+*/
+#define ODN_PDP_OPMASK_GB_MASKG_MASK		(0x03FF0000)
+#define ODN_PDP_OPMASK_GB_MASKG_LSBMASK		(0x000003FF)
+#define ODN_PDP_OPMASK_GB_MASKG_SHIFT		(16)
+#define ODN_PDP_OPMASK_GB_MASKG_LENGTH		(10)
+#define ODN_PDP_OPMASK_GB_MASKG_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, OPMASK_GB, MASKB
+*/
+#define ODN_PDP_OPMASK_GB_MASKB_MASK		(0x000003FF)
+#define ODN_PDP_OPMASK_GB_MASKB_LSBMASK		(0x000003FF)
+#define ODN_PDP_OPMASK_GB_MASKB_SHIFT		(0)
+#define ODN_PDP_OPMASK_GB_MASKB_LENGTH		(10)
+#define ODN_PDP_OPMASK_GB_MASKB_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_REGLD_ADDR_CTRL_OFFSET		(0x08D0)
+
+/* PDP, REGLD_ADDR_CTRL, REGLD_ADDRIN
+*/
+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_MASK		(0xFFFFFFF0)
+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LSBMASK		(0x0FFFFFFF)
+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SHIFT		(4)
+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LENGTH		(28)
+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_REGLD_ADDR_STAT_OFFSET		(0x08D4)
+
+/* PDP, REGLD_ADDR_STAT, REGLD_ADDROUT
+*/
+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_MASK		(0xFFFFFFF0)
+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LSBMASK		(0x0FFFFFFF)
+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SHIFT		(4)
+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LENGTH		(28)
+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_REGLD_STAT_OFFSET		(0x08D8)
+
+/* PDP, REGLD_STAT, REGLD_ADDREN
+*/
+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_MASK		(0x00800000)
+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_LSBMASK		(0x00000001)
+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_SHIFT		(23)
+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_LENGTH		(1)
+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_REGLD_CTRL_OFFSET		(0x08DC)
+
+/* PDP, REGLD_CTRL, REGLD_ADDRLEN
+*/
+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_MASK		(0xFF000000)
+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_LSBMASK	(0x000000FF)
+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_SHIFT		(24)
+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_LENGTH		(8)
+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, REGLD_CTRL, REGLD_VAL
+*/
+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_MASK		(0x00800000)
+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_LSBMASK		(0x00000001)
+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_SHIFT		(23)
+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_LENGTH		(1)
+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_UPDCTRL_OFFSET			(0x08E0)
+
+/* PDP, UPDCTRL, UPDFIELD
+*/
+#define ODN_PDP_UPDCTRL_UPDFIELD_MASK		(0x00000001)
+#define ODN_PDP_UPDCTRL_UPDFIELD_LSBMASK	(0x00000001)
+#define ODN_PDP_UPDCTRL_UPDFIELD_SHIFT		(0)
+#define ODN_PDP_UPDCTRL_UPDFIELD_LENGTH		(1)
+#define ODN_PDP_UPDCTRL_UPDFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_INTCTRL_OFFSET			(0x08E4)
+
+/* PDP, PVR_ODN_PDP_INTCTRL, HBLNK_LINE
+*/
+#define ODN_PDP_INTCTRL_HBLNK_LINE_MASK		(0x00010000)
+#define ODN_PDP_INTCTRL_HBLNK_LINE_LSBMASK	(0x00000001)
+#define ODN_PDP_INTCTRL_HBLNK_LINE_SHIFT	(16)
+#define ODN_PDP_INTCTRL_HBLNK_LINE_LENGTH	(1)
+#define ODN_PDP_INTCTRL_HBLNK_LINE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_ODN_PDP_INTCTRL, HBLNK_LINENO
+*/
+#define ODN_PDP_INTCTRL_HBLNK_LINENO_MASK	(0x00001FFF)
+#define ODN_PDP_INTCTRL_HBLNK_LINENO_LSBMASK	(0x00001FFF)
+#define ODN_PDP_INTCTRL_HBLNK_LINENO_SHIFT	(0)
+#define ODN_PDP_INTCTRL_HBLNK_LINENO_LENGTH	(13)
+#define ODN_PDP_INTCTRL_HBLNK_LINENO_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_PDISETUP_OFFSET		(0x0900)
+
+/* PDP, PDISETUP, PDI_BLNKLVL
+*/
+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_MASK		(0x00000040)
+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_LSBMASK		(0x00000001)
+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_SHIFT		(6)
+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_LENGTH		(1)
+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_BLNK
+*/
+#define ODN_PDP_PDISETUP_PDI_BLNK_MASK		(0x00000020)
+#define ODN_PDP_PDISETUP_PDI_BLNK_LSBMASK		(0x00000001)
+#define ODN_PDP_PDISETUP_PDI_BLNK_SHIFT		(5)
+#define ODN_PDP_PDISETUP_PDI_BLNK_LENGTH		(1)
+#define ODN_PDP_PDISETUP_PDI_BLNK_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_PWR
+*/
+#define ODN_PDP_PDISETUP_PDI_PWR_MASK		(0x00000010)
+#define ODN_PDP_PDISETUP_PDI_PWR_LSBMASK		(0x00000001)
+#define ODN_PDP_PDISETUP_PDI_PWR_SHIFT		(4)
+#define ODN_PDP_PDISETUP_PDI_PWR_LENGTH		(1)
+#define ODN_PDP_PDISETUP_PDI_PWR_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_EN
+*/
+#define ODN_PDP_PDISETUP_PDI_EN_MASK		(0x00000008)
+#define ODN_PDP_PDISETUP_PDI_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_PDISETUP_PDI_EN_SHIFT		(3)
+#define ODN_PDP_PDISETUP_PDI_EN_LENGTH		(1)
+#define ODN_PDP_PDISETUP_PDI_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_GDEN
+*/
+#define ODN_PDP_PDISETUP_PDI_GDEN_MASK		(0x00000004)
+#define ODN_PDP_PDISETUP_PDI_GDEN_LSBMASK		(0x00000001)
+#define ODN_PDP_PDISETUP_PDI_GDEN_SHIFT		(2)
+#define ODN_PDP_PDISETUP_PDI_GDEN_LENGTH		(1)
+#define ODN_PDP_PDISETUP_PDI_GDEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_NFEN
+*/
+#define ODN_PDP_PDISETUP_PDI_NFEN_MASK		(0x00000002)
+#define ODN_PDP_PDISETUP_PDI_NFEN_LSBMASK		(0x00000001)
+#define ODN_PDP_PDISETUP_PDI_NFEN_SHIFT		(1)
+#define ODN_PDP_PDISETUP_PDI_NFEN_LENGTH		(1)
+#define ODN_PDP_PDISETUP_PDI_NFEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_CR
+*/
+#define ODN_PDP_PDISETUP_PDI_CR_MASK		(0x00000001)
+#define ODN_PDP_PDISETUP_PDI_CR_LSBMASK		(0x00000001)
+#define ODN_PDP_PDISETUP_PDI_CR_SHIFT		(0)
+#define ODN_PDP_PDISETUP_PDI_CR_LENGTH		(1)
+#define ODN_PDP_PDISETUP_PDI_CR_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PDITIMING0_OFFSET		(0x0904)
+
+/* PDP, PDITIMING0, PDI_PWRSVGD
+*/
+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_MASK		(0x0F000000)
+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_LSBMASK		(0x0000000F)
+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_SHIFT		(24)
+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_LENGTH		(4)
+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDITIMING0, PDI_LSDEL
+*/
+#define ODN_PDP_PDITIMING0_PDI_LSDEL_MASK		(0x007F0000)
+#define ODN_PDP_PDITIMING0_PDI_LSDEL_LSBMASK		(0x0000007F)
+#define ODN_PDP_PDITIMING0_PDI_LSDEL_SHIFT		(16)
+#define ODN_PDP_PDITIMING0_PDI_LSDEL_LENGTH		(7)
+#define ODN_PDP_PDITIMING0_PDI_LSDEL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDITIMING0, PDI_PWRSV2GD2
+*/
+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_MASK		(0x000003FF)
+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_LSBMASK		(0x000003FF)
+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_SHIFT		(0)
+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_LENGTH		(10)
+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PDITIMING1_OFFSET		(0x0908)
+
+/* PDP, PDITIMING1, PDI_NLDEL
+*/
+#define ODN_PDP_PDITIMING1_PDI_NLDEL_MASK		(0x000F0000)
+#define ODN_PDP_PDITIMING1_PDI_NLDEL_LSBMASK		(0x0000000F)
+#define ODN_PDP_PDITIMING1_PDI_NLDEL_SHIFT		(16)
+#define ODN_PDP_PDITIMING1_PDI_NLDEL_LENGTH		(4)
+#define ODN_PDP_PDITIMING1_PDI_NLDEL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDITIMING1, PDI_ACBDEL
+*/
+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_MASK		(0x000003FF)
+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_LSBMASK		(0x000003FF)
+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_SHIFT		(0)
+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_LENGTH		(10)
+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PDICOREID_OFFSET		(0x090C)
+
+/* PDP, PDICOREID, PDI_GROUP_ID
+*/
+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_MASK		(0xFF000000)
+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_LSBMASK		(0x000000FF)
+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_SHIFT		(24)
+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_LENGTH		(8)
+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDICOREID, PDI_CORE_ID
+*/
+#define ODN_PDP_PDICOREID_PDI_CORE_ID_MASK		(0x00FF0000)
+#define ODN_PDP_PDICOREID_PDI_CORE_ID_LSBMASK		(0x000000FF)
+#define ODN_PDP_PDICOREID_PDI_CORE_ID_SHIFT		(16)
+#define ODN_PDP_PDICOREID_PDI_CORE_ID_LENGTH		(8)
+#define ODN_PDP_PDICOREID_PDI_CORE_ID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDICOREID, PDI_CONFIG_ID
+*/
+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_MASK		(0x0000FFFF)
+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_SHIFT		(0)
+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_LENGTH		(16)
+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PDICOREREV_OFFSET		(0x0910)
+
+/* PDP, PDICOREREV, PDI_MAJOR_REV
+*/
+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_MASK		(0x00FF0000)
+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_LSBMASK		(0x000000FF)
+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_SHIFT		(16)
+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_LENGTH		(8)
+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDICOREREV, PDI_MINOR_REV
+*/
+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_MASK		(0x0000FF00)
+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_LSBMASK		(0x000000FF)
+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_SHIFT		(8)
+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_LENGTH		(8)
+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDICOREREV, PDI_MAINT_REV
+*/
+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_MASK		(0x000000FF)
+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_LSBMASK		(0x000000FF)
+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_SHIFT		(0)
+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_LENGTH		(8)
+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX2_OFFSET		(0x0920)
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y1
+*/
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_MASK		(0x000000C0)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LSBMASK		(0x00000003)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LENGTH		(2)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y1
+*/
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_MASK		(0x00000030)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LSBMASK		(0x00000003)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SHIFT		(4)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LENGTH		(2)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y0
+*/
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_MASK		(0x0000000C)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LSBMASK		(0x00000003)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SHIFT		(2)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LENGTH		(2)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y0
+*/
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_MASK		(0x00000003)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LSBMASK		(0x00000003)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LENGTH		(2)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX4_0_OFFSET		(0x0924)
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y1
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_MASK		(0xF0000000)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SHIFT		(28)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y1
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_MASK		(0x0F000000)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y1
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_MASK		(0x00F00000)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SHIFT		(20)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y1
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_MASK		(0x000F0000)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SHIFT		(16)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y0
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_MASK		(0x0000F000)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y0
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_MASK		(0x00000F00)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SHIFT		(8)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y0
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_MASK		(0x000000F0)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SHIFT		(4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y0
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_MASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX4_1_OFFSET		(0x0928)
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y3
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_MASK		(0xF0000000)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SHIFT		(28)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y3
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_MASK		(0x0F000000)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y3
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_MASK		(0x00F00000)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SHIFT		(20)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y3
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_MASK		(0x000F0000)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SHIFT		(16)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y2
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_MASK		(0x0000F000)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y2
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_MASK		(0x00000F00)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SHIFT		(8)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y2
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_MASK		(0x000000F0)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SHIFT		(4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y2
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_MASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_0_OFFSET		(0x092C)
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X4Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X3Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X2Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X1Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X0Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_1_OFFSET		(0x0930)
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X1Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X0Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X7Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X6Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X5Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_2_OFFSET		(0x0934)
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X6Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X5Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X4Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X3Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X2Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_3_OFFSET		(0x0938)
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X3Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X2Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X1Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X0Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X7Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_4_OFFSET		(0x093C)
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X0Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X7Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X6Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X5Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X4Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_5_OFFSET		(0x0940)
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X5Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X4Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X3Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X2Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X1Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_6_OFFSET		(0x0944)
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X2Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X1Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X0Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X7Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X6Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_7_OFFSET		(0x0948)
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X7Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X6Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X5Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X4Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X3Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_8_OFFSET		(0x094C)
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X4Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X3Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X2Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X1Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X0Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_9_OFFSET		(0x0950)
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X1Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X0Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X7Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X6Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X5Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_10_OFFSET		(0x0954)
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X6Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X5Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X4Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X3Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X2Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_11_OFFSET		(0x0958)
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X3Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X2Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X1Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X0Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X7Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_12_OFFSET		(0x095C)
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X7Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X6Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X5Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X4Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1_MEMCTRL_OFFSET		(0x0960)
+
+/* PDP, GRPH1_MEMCTRL, GRPH1_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_MEMCTRL, GRPH1_BURSTLEN
+*/
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_MASK		(0x000000FF)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SHIFT		(0)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LENGTH		(8)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1_MEM_THRESH_OFFSET		(0x0964)
+
+/* PDP, GRPH1_MEM_THRESH, GRPH1_UVTHRESHOLD
+*/
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_MASK		(0xFF000000)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SHIFT		(24)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LENGTH		(8)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_MEM_THRESH, GRPH1_YTHRESHOLD
+*/
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_MASK		(0x001FF000)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SHIFT		(12)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LENGTH		(9)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_MEM_THRESH, GRPH1_THRESHOLD
+*/
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_MASK		(0x000001FF)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SHIFT		(0)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LENGTH		(9)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH2_MEMCTRL_OFFSET		(0x0968)
+
+/* PDP, GRPH2_MEMCTRL, GRPH2_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_MEMCTRL, GRPH2_BURSTLEN
+*/
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_MASK		(0x000000FF)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SHIFT		(0)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LENGTH		(8)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH2_MEM_THRESH_OFFSET		(0x096C)
+
+/* PDP, GRPH2_MEM_THRESH, GRPH2_UVTHRESHOLD
+*/
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_MASK		(0xFF000000)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SHIFT		(24)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LENGTH		(8)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_MEM_THRESH, GRPH2_YTHRESHOLD
+*/
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_MASK		(0x001FF000)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SHIFT		(12)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LENGTH		(9)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_MEM_THRESH, GRPH2_THRESHOLD
+*/
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_MASK		(0x000001FF)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SHIFT		(0)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LENGTH		(9)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH3_MEMCTRL_OFFSET		(0x0970)
+
+/* PDP, GRPH3_MEMCTRL, GRPH3_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_MEMCTRL, GRPH3_BURSTLEN
+*/
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_MASK		(0x000000FF)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SHIFT		(0)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LENGTH		(8)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH3_MEM_THRESH_OFFSET		(0x0974)
+
+/* PDP, GRPH3_MEM_THRESH, GRPH3_UVTHRESHOLD
+*/
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_MASK		(0xFF000000)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SHIFT		(24)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LENGTH		(8)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_MEM_THRESH, GRPH3_YTHRESHOLD
+*/
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_MASK		(0x001FF000)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SHIFT		(12)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LENGTH		(9)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_MEM_THRESH, GRPH3_THRESHOLD
+*/
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_MASK		(0x000001FF)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SHIFT		(0)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LENGTH		(9)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH4_MEMCTRL_OFFSET		(0x0978)
+
+/* PDP, GRPH4_MEMCTRL, GRPH4_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_MEMCTRL, GRPH4_BURSTLEN
+*/
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_MASK		(0x000000FF)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SHIFT		(0)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LENGTH		(8)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH4_MEM_THRESH_OFFSET		(0x097C)
+
+/* PDP, GRPH4_MEM_THRESH, GRPH4_UVTHRESHOLD
+*/
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_MASK		(0xFF000000)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SHIFT		(24)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LENGTH		(8)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_MEM_THRESH, GRPH4_YTHRESHOLD
+*/
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_MASK		(0x001FF000)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SHIFT		(12)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LENGTH		(9)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_MEM_THRESH, GRPH4_THRESHOLD
+*/
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_MASK		(0x000001FF)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SHIFT		(0)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LENGTH		(9)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1_MEMCTRL_OFFSET		(0x0980)
+
+/* PDP, VID1_MEMCTRL, VID1_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_MEMCTRL, VID1_BURSTLEN
+*/
+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_MASK		(0x000000FF)
+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_SHIFT		(0)
+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_LENGTH		(8)
+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1_MEM_THRESH_OFFSET		(0x0984)
+
+/* PDP, VID1_MEM_THRESH, VID1_UVTHRESHOLD
+*/
+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_MASK		(0xFF000000)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SHIFT		(24)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LENGTH		(8)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_MEM_THRESH, VID1_YTHRESHOLD
+*/
+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_MASK		(0x001FF000)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SHIFT		(12)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LENGTH		(9)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_MEM_THRESH, VID1_THRESHOLD
+*/
+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_MASK		(0x000001FF)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SHIFT		(0)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LENGTH		(9)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2_MEMCTRL_OFFSET		(0x0988)
+
+/* PDP, VID2_MEMCTRL, VID2_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_MEMCTRL, VID2_BURSTLEN
+*/
+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_MASK		(0x000000FF)
+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_SHIFT		(0)
+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_LENGTH		(8)
+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2_MEM_THRESH_OFFSET		(0x098C)
+
+/* PDP, VID2_MEM_THRESH, VID2_UVTHRESHOLD
+*/
+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_MASK		(0xFF000000)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SHIFT		(24)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LENGTH		(8)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_MEM_THRESH, VID2_YTHRESHOLD
+*/
+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_MASK		(0x001FF000)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SHIFT		(12)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LENGTH		(9)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_MEM_THRESH, VID2_THRESHOLD
+*/
+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_MASK		(0x000001FF)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SHIFT		(0)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LENGTH		(9)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3_MEMCTRL_OFFSET		(0x0990)
+
+/* PDP, VID3_MEMCTRL, VID3_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_MEMCTRL, VID3_BURSTLEN
+*/
+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_MASK		(0x000000FF)
+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_SHIFT		(0)
+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_LENGTH		(8)
+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3_MEM_THRESH_OFFSET		(0x0994)
+
+/* PDP, VID3_MEM_THRESH, VID3_UVTHRESHOLD
+*/
+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_MASK		(0xFF000000)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SHIFT		(24)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LENGTH		(8)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_MEM_THRESH, VID3_YTHRESHOLD
+*/
+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_MASK		(0x001FF000)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SHIFT		(12)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LENGTH		(9)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_MEM_THRESH, VID3_THRESHOLD
+*/
+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_MASK		(0x000001FF)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SHIFT		(0)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LENGTH		(9)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4_MEMCTRL_OFFSET		(0x0998)
+
+/* PDP, VID4_MEMCTRL, VID4_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_MEMCTRL, VID4_BURSTLEN
+*/
+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_MASK		(0x000000FF)
+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_SHIFT		(0)
+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_LENGTH		(8)
+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4_MEM_THRESH_OFFSET		(0x099C)
+
+/* PDP, VID4_MEM_THRESH, VID4_UVTHRESHOLD
+*/
+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_MASK		(0xFF000000)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SHIFT		(24)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LENGTH		(8)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_MEM_THRESH, VID4_YTHRESHOLD
+*/
+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_MASK		(0x001FF000)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SHIFT		(12)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LENGTH		(9)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_MEM_THRESH, VID4_THRESHOLD
+*/
+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_MASK		(0x000001FF)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SHIFT		(0)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LENGTH		(9)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1_PANIC_THRESH_OFFSET		(0x09A0)
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SHIFT		(31)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LENGTH		(1)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SHIFT		(30)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LENGTH		(1)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH2_PANIC_THRESH_OFFSET		(0x09A4)
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SHIFT		(31)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LENGTH		(1)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SHIFT		(30)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LENGTH		(1)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH3_PANIC_THRESH_OFFSET		(0x09A8)
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SHIFT		(31)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LENGTH		(1)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SHIFT		(30)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LENGTH		(1)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH4_PANIC_THRESH_OFFSET		(0x09AC)
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SHIFT		(31)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LENGTH		(1)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SHIFT		(30)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LENGTH		(1)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1_PANIC_THRESH_OFFSET		(0x09B0)
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SHIFT		(31)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LENGTH		(1)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SHIFT		(30)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LENGTH		(1)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2_PANIC_THRESH_OFFSET		(0x09B4)
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SHIFT		(31)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LENGTH		(1)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SHIFT		(30)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LENGTH		(1)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3_PANIC_THRESH_OFFSET		(0x09B8)
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SHIFT		(31)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LENGTH		(1)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SHIFT		(30)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LENGTH		(1)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4_PANIC_THRESH_OFFSET		(0x09BC)
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SHIFT		(31)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LENGTH		(1)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SHIFT		(30)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LENGTH		(1)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_BURST_BOUNDARY_OFFSET		(0x09C0)
+
+/* PDP, BURST_BOUNDARY, BURST_BOUNDARY
+*/
+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_MASK		(0x0000003F)
+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_LSBMASK		(0x0000003F)
+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_SHIFT		(0)
+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_LENGTH		(6)
+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_SIGNED_FIELD	IMG_FALSE
+
+
+/* ---------------------- End of register definitions ---------------------- */
+
+/* NUMREG defines the extent of register address space.
+*/
+
+#define		ODN_PDP_NUMREG	   ((0x09C0 >> 2)+1)
+
+/* Info about video plane addresses */
+#define ODN_PDP_YADDR_BITS		(ODN_PDP_VID1BASEADDR_VID1BASEADDR_LENGTH)
+#define ODN_PDP_YADDR_ALIGN		5
+#define ODN_PDP_UADDR_BITS		(ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH)
+#define ODN_PDP_UADDR_ALIGN		5
+#define ODN_PDP_VADDR_BITS		(ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH)
+#define ODN_PDP_VADDR_ALIGN		5
+
+#define ODN_PDP_YSTRIDE_BITS	(ODN_PDP_VID1STRIDE_VID1STRIDE_LENGTH)
+#define ODN_PDP_YSTRIDE_ALIGN	5
+
+#define ODN_PDP_MAX_INPUT_WIDTH (ODN_PDP_VID1SIZE_VID1WIDTH_LSBMASK + 1)
+#define ODN_PDP_MAX_INPUT_HEIGHT (ODN_PDP_VID1SIZE_VID1HEIGHT_LSBMASK + 1)
+
+/* Maximum 6 bytes per pixel for RGB161616 */
+#define ODN_PDP_MAX_IMAGE_BYTES (ODN_PDP_MAX_INPUT_WIDTH * ODN_PDP_MAX_INPUT_HEIGHT * 6)
+
+/* Round up */
+#define ODN_PDP_MAX_IMAGE_PAGES ((ODN_PDP_MAX_IMAGE_BYTES+PAGE_SIZE-1)/PAGE_SIZE)
+
+#define ODN_PDP_YADDR_MAX		(((1 << ODN_PDP_YADDR_BITS) - 1) << ODN_PDP_YADDR_ALIGN)
+#define ODN_PDP_UADDR_MAX		(((1 << ODN_PDP_UADDR_BITS) - 1) << ODN_PDP_UADDR_ALIGN)
+#define ODN_PDP_VADDR_MAX		(((1 << ODN_PDP_VADDR_BITS) - 1) << ODN_PDP_VADDR_ALIGN)
+#define ODN_PDP_YSTRIDE_MAX		((1 << ODN_PDP_YSTRIDE_BITS) << ODN_PDP_YSTRIDE_ALIGN)
+#define ODN_PDP_YADDR_ALIGNMASK		((1 << ODN_PDP_YADDR_ALIGN) - 1)
+#define ODN_PDP_UADDR_ALIGNMASK		((1 << ODN_PDP_UADDR_ALIGN) - 1)
+#define ODN_PDP_VADDR_ALIGNMASK		((1 << ODN_PDP_VADDR_ALIGN) - 1)
+#define ODN_PDP_YSTRIDE_ALIGNMASK	((1 << ODN_PDP_YSTRIDE_ALIGN) - 1)
+
+/* Field Values (some are reserved for future use) */
+#define ODN_PDP_SURF_PIXFMT_RGB332					0x3
+#define ODN_PDP_SURF_PIXFMT_ARGB4444				0x4
+#define ODN_PDP_SURF_PIXFMT_ARGB1555				0x5
+#define ODN_PDP_SURF_PIXFMT_RGB888					0x6
+#define ODN_PDP_SURF_PIXFMT_RGB565					0x7
+#define ODN_PDP_SURF_PIXFMT_ARGB8888				0x8
+#define ODN_PDP_SURF_PIXFMT_420_PL8					0x9
+#define ODN_PDP_SURF_PIXFMT_420_PL8IVU				0xA
+#define ODN_PDP_SURF_PIXFMT_420_PL8IUV				0xB
+#define ODN_PDP_SURF_PIXFMT_422_UY0VY1_8888			0xC
+#define ODN_PDP_SURF_PIXFMT_422_VY0UY1_8888			0xD
+#define ODN_PDP_SURF_PIXFMT_422_Y0UY1V_8888			0xE
+#define ODN_PDP_SURF_PIXFMT_422_Y0VY1U_8888			0xF
+#define ODN_PDP_SURF_PIXFMT_AYUV8888				0x10
+#define ODN_PDP_SURF_PIXFMT_YUV101010				0x15
+#define ODN_PDP_SURF_PIXFMT_RGB101010				0x17
+#define ODN_PDP_SURF_PIXFMT_420_PL10IUV				0x18
+#define ODN_PDP_SURF_PIXFMT_420_PL10IVU				0x19
+#define ODN_PDP_SURF_PIXFMT_422_PL10IUV				0x1A
+#define ODN_PDP_SURF_PIXFMT_422_PL10IVU				0x1B
+#define ODN_PDP_SURF_PIXFMT_RGB121212				0x1E
+#define ODN_PDP_SURF_PIXFMT_RGB161616				0x1F
+
+#define ODN_PDP_CTRL_CKEYSRC_PREV					0x0
+#define ODN_PDP_CTRL_CKEYSRC_CUR					0x1
+
+#define ODN_PDP_MEMCTRL_MEMREFRESH_ALWAYS			0x0
+#define ODN_PDP_MEMCTRL_MEMREFRESH_HBLNK			0x1
+#define ODN_PDP_MEMCTRL_MEMREFRESH_VBLNK			0x2
+#define ODN_PDP_MEMCTRL_MEMREFRESH_BOTH				0x3
+
+#define ODN_PDP_3D_CTRL_BLENDSEL_BGND_WITH_POS0		0x0
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS0_WITH_POS1		0x1
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS1_WITH_POS2		0x2
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS2_WITH_POS3		0x3
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS3_WITH_POS4		0x4
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS4_WITH_POS5		0x5
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS5_WITH_POS6		0x6
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS6_WITH_POS7		0x7
+
+#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_Y_STRIDE	0x0
+#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_DOUBLE_Y_STRIDE 0x1
+#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_HALF_Y_STRIDE 0x2
+
+#define ODN_PDP_PROCAMP_OUTPUT_OFFSET_FRACTIONAL_BITS 1
+#define ODN_PDP_PROCAMP_COEFFICIENT_FRACTIONAL_BITS	10
+
+/*---------------------------------------------------------------------------*/
+
+#endif /* ODN_PDP_REGS_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/odin_regs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/odin_regs.h
new file mode 100644
index 0000000..01bd597
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/odin_regs.h
@@ -0,0 +1,924 @@
+/****************************************************************************
+@Title          Odin system control register definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+@Description    Odin FPGA register defs for IMG 3rd generation TCF
+
+	Auto generated headers, eg. odn_core.h:
+		regconv -d . -a 8 odn_core.def
+
+	Source files :
+		odn_core.def
+		mca_debug.def
+		sai_rx_debug.def
+		sai_tx_debug.def
+		ad_tx.def
+
+	Changes:
+		Removed obsolete copyright dates
+		Changed lower case to upper case
+			(eg. odn_core changed to ODN_CORE)
+		Changed PVR5__ to ODN_
+		Merged multiple .def files into one header
+
+****************************************************************************/
+
+/* tab size 4 */
+
+#ifndef _ODIN_REGS_H_
+#define _ODIN_REGS_H_
+
+/******************************
+  Generated from: odn_core.def
+*******************************/
+
+/*
+	Register ID
+*/
+#define ODN_CORE_ID                             0x0000
+#define ODN_ID_VARIANT_MASK                     0x0000FFFFU
+#define ODN_ID_VARIANT_SHIFT                    0
+#define ODN_ID_VARIANT_SIGNED                   0
+
+#define ODN_ID_ID_MASK                          0xFFFF0000U
+#define ODN_ID_ID_SHIFT                         16
+#define ODN_ID_ID_SIGNED                        0
+
+/*
+	Register REVISION
+*/
+#define ODN_CORE_REVISION                       0x0004
+#define ODN_REVISION_MINOR_MASK                 0x0000FFFFU
+#define ODN_REVISION_MINOR_SHIFT                0
+#define ODN_REVISION_MINOR_SIGNED               0
+
+#define ODN_REVISION_MAJOR_MASK                 0xFFFF0000U
+#define ODN_REVISION_MAJOR_SHIFT                16
+#define ODN_REVISION_MAJOR_SIGNED               0
+
+/*
+	Register CHANGE_SET
+*/
+#define ODN_CORE_CHANGE_SET                     0x0008
+#define ODN_CHANGE_SET_SET_MASK                 0xFFFFFFFFU
+#define ODN_CHANGE_SET_SET_SHIFT                0
+#define ODN_CHANGE_SET_SET_SIGNED               0
+
+/*
+	Register USER_ID
+*/
+#define ODN_CORE_USER_ID                        0x000C
+#define ODN_USER_ID_ID_MASK                     0x0000000FU
+#define ODN_USER_ID_ID_SHIFT                    0
+#define ODN_USER_ID_ID_SIGNED                   0
+
+/*
+	Register USER_BUILD
+*/
+#define ODN_CORE_USER_BUILD                     0x0010
+#define ODN_USER_BUILD_BUILD_MASK               0xFFFFFFFFU
+#define ODN_USER_BUILD_BUILD_SHIFT              0
+#define ODN_USER_BUILD_BUILD_SIGNED             0
+
+/*
+	Register SW_IF_VERSION
+*/
+#define ODN_CORE_SW_IF_VERSION                  0x0014
+#define ODN_SW_IF_VERSION_VERSION_MASK          0x0000FFFFU
+#define ODN_SW_IF_VERSION_VERSION_SHIFT         0
+#define ODN_SW_IF_VERSION_VERSION_SIGNED        0
+
+/*
+	Register INTERNAL_RESETN
+*/
+#define ODN_CORE_INTERNAL_RESETN                0x0080
+#define ODN_INTERNAL_RESETN_DDR_MASK            0x00000001U
+#define ODN_INTERNAL_RESETN_DDR_SHIFT           0
+#define ODN_INTERNAL_RESETN_DDR_SIGNED          0
+
+#define ODN_INTERNAL_RESETN_MIG0_MASK           0x00000002U
+#define ODN_INTERNAL_RESETN_MIG0_SHIFT          1
+#define ODN_INTERNAL_RESETN_MIG0_SIGNED         0
+
+#define ODN_INTERNAL_RESETN_MIG1_MASK           0x00000004U
+#define ODN_INTERNAL_RESETN_MIG1_SHIFT          2
+#define ODN_INTERNAL_RESETN_MIG1_SIGNED         0
+
+#define ODN_INTERNAL_RESETN_PDP1_MASK           0x00000008U
+#define ODN_INTERNAL_RESETN_PDP1_SHIFT          3
+#define ODN_INTERNAL_RESETN_PDP1_SIGNED         0
+
+#define ODN_INTERNAL_RESETN_PDP2_MASK           0x00000010U
+#define ODN_INTERNAL_RESETN_PDP2_SHIFT          4
+#define ODN_INTERNAL_RESETN_PDP2_SIGNED         0
+
+#define ODN_INTERNAL_RESETN_PERIP_MASK          0x00000020U
+#define ODN_INTERNAL_RESETN_PERIP_SHIFT         5
+#define ODN_INTERNAL_RESETN_PERIP_SIGNED        0
+
+#define ODN_INTERNAL_RESETN_GIST_MASK           0x00000040U
+#define ODN_INTERNAL_RESETN_GIST_SHIFT          6
+#define ODN_INTERNAL_RESETN_GIST_SIGNED         0
+
+#define ODN_INTERNAL_RESETN_PIKE_MASK           0x00000080U
+#define ODN_INTERNAL_RESETN_PIKE_SHIFT          7
+#define ODN_INTERNAL_RESETN_PIKE_SIGNED         0
+
+/*
+	Register EXTERNAL_RESETN
+*/
+#define ODN_CORE_EXTERNAL_RESETN                0x0084
+#define ODN_EXTERNAL_RESETN_DUT_MASK            0x00000001U
+#define ODN_EXTERNAL_RESETN_DUT_SHIFT           0
+#define ODN_EXTERNAL_RESETN_DUT_SIGNED          0
+
+#define ODN_EXTERNAL_RESETN_DUT_SPI_MASK        0x00000002U
+#define ODN_EXTERNAL_RESETN_DUT_SPI_SHIFT       1
+#define ODN_EXTERNAL_RESETN_DUT_SPI_SIGNED      0
+
+/*
+	Register EXTERNAL_RESET
+*/
+#define ODN_CORE_EXTERNAL_RESET                 0x0088
+#define ODN_EXTERNAL_RESET_PVT_CAL_MASK         0x00000001U
+#define ODN_EXTERNAL_RESET_PVT_CAL_SHIFT        0
+#define ODN_EXTERNAL_RESET_PVT_CAL_SIGNED       0
+
+#define ODN_EXTERNAL_RESET_PLL_MASK             0x00000002U
+#define ODN_EXTERNAL_RESET_PLL_SHIFT            1
+#define ODN_EXTERNAL_RESET_PLL_SIGNED           0
+
+/*
+	Register INTERNAL_AUTO_RESETN
+*/
+#define ODN_CORE_INTERNAL_AUTO_RESETN           0x008C
+#define ODN_INTERNAL_AUTO_RESETN_AUX_MASK       0x00000001U
+#define ODN_INTERNAL_AUTO_RESETN_AUX_SHIFT      0
+#define ODN_INTERNAL_AUTO_RESETN_AUX_SIGNED     0
+
+/*
+	Register CLK_GEN_RESET
+*/
+#define ODN_CORE_CLK_GEN_RESET                  0x0090
+#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_MASK    0x00000001U
+#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_SHIFT   0
+#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_SIGNED  0
+
+#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_MASK      0x00000002U
+#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_SHIFT     1
+#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_SIGNED    0
+
+#define ODN_CLK_GEN_RESET_MULTI_MMCM_MASK       0x00000004U
+#define ODN_CLK_GEN_RESET_MULTI_MMCM_SHIFT      2
+#define ODN_CLK_GEN_RESET_MULTI_MMCM_SIGNED     0
+
+#define ODN_CLK_GEN_RESET_PDP_MMCM_MASK         0x00000008U
+#define ODN_CLK_GEN_RESET_PDP_MMCM_SHIFT        3
+#define ODN_CLK_GEN_RESET_PDP_MMCM_SIGNED       0
+
+/*
+	Register INTERRUPT_STATUS
+*/
+#define ODN_CORE_INTERRUPT_STATUS               0x0100
+#define ODN_INTERRUPT_STATUS_DUT_MASK           0x00000001U
+#define ODN_INTERRUPT_STATUS_DUT_SHIFT          0
+#define ODN_INTERRUPT_STATUS_DUT_SIGNED         0
+
+#define ODN_INTERRUPT_STATUS_PDP1_MASK          0x00000002U
+#define ODN_INTERRUPT_STATUS_PDP1_SHIFT         1
+#define ODN_INTERRUPT_STATUS_PDP1_SIGNED        0
+
+#define ODN_INTERRUPT_STATUS_PDP2_MASK          0x00000004U
+#define ODN_INTERRUPT_STATUS_PDP2_SHIFT         2
+#define ODN_INTERRUPT_STATUS_PDP2_SIGNED        0
+
+#define ODN_INTERRUPT_STATUS_PERIP_MASK         0x00000008U
+#define ODN_INTERRUPT_STATUS_PERIP_SHIFT        3
+#define ODN_INTERRUPT_STATUS_PERIP_SIGNED       0
+
+#define ODN_INTERRUPT_STATUS_UART_MASK          0x00000010U
+#define ODN_INTERRUPT_STATUS_UART_SHIFT         4
+#define ODN_INTERRUPT_STATUS_UART_SIGNED        0
+
+#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_MASK 0x00000020U
+#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_SHIFT 5
+#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_MASK 0x00000040U
+#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_SHIFT 6
+#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_MASK 0x00000080U
+#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_SHIFT 7
+#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_MASK 0x00000100U
+#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_SHIFT 8
+#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_STATUS_IRQ_TEST_MASK      0x40000000U
+#define ODN_INTERRUPT_STATUS_IRQ_TEST_SHIFT     30
+#define ODN_INTERRUPT_STATUS_IRQ_TEST_SIGNED    0
+
+#define ODN_INTERRUPT_STATUS_MASTER_STATUS_MASK 0x80000000U
+#define ODN_INTERRUPT_STATUS_MASTER_STATUS_SHIFT 31
+#define ODN_INTERRUPT_STATUS_MASTER_STATUS_SIGNED 0
+
+/*
+	Register INTERRUPT_ENABLE
+*/
+#define ODN_CORE_INTERRUPT_ENABLE               0x0104
+#define ODN_INTERRUPT_ENABLE_DUT_MASK           0x00000001U
+#define ODN_INTERRUPT_ENABLE_DUT_SHIFT          0
+#define ODN_INTERRUPT_ENABLE_DUT_SIGNED         0
+
+#define ODN_INTERRUPT_ENABLE_PDP1_MASK          0x00000002U
+#define ODN_INTERRUPT_ENABLE_PDP1_SHIFT         1
+#define ODN_INTERRUPT_ENABLE_PDP1_SIGNED        0
+
+#define ODN_INTERRUPT_ENABLE_PDP2_MASK          0x00000004U
+#define ODN_INTERRUPT_ENABLE_PDP2_SHIFT         2
+#define ODN_INTERRUPT_ENABLE_PDP2_SIGNED        0
+
+#define ODN_INTERRUPT_ENABLE_PERIP_MASK         0x00000008U
+#define ODN_INTERRUPT_ENABLE_PERIP_SHIFT        3
+#define ODN_INTERRUPT_ENABLE_PERIP_SIGNED       0
+
+#define ODN_INTERRUPT_ENABLE_UART_MASK          0x00000010U
+#define ODN_INTERRUPT_ENABLE_UART_SHIFT         4
+#define ODN_INTERRUPT_ENABLE_UART_SIGNED        0
+
+#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_MASK 0x00000020U
+#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_SHIFT 5
+#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_MASK 0x00000040U
+#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_SHIFT 6
+#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_MASK 0x00000080U
+#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_SHIFT 7
+#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_MASK 0x00000100U
+#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_SHIFT 8
+#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_ENABLE_IRQ_TEST_MASK      0x40000000U
+#define ODN_INTERRUPT_ENABLE_IRQ_TEST_SHIFT     30
+#define ODN_INTERRUPT_ENABLE_IRQ_TEST_SIGNED    0
+
+#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_MASK 0x80000000U
+#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_SHIFT 31
+#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_SIGNED 0
+
+/*
+	Register INTERRUPT_CLR
+*/
+#define ODN_CORE_INTERRUPT_CLR                  0x010C
+#define ODN_INTERRUPT_CLR_DUT_MASK              0x00000001U
+#define ODN_INTERRUPT_CLR_DUT_SHIFT             0
+#define ODN_INTERRUPT_CLR_DUT_SIGNED            0
+
+#define ODN_INTERRUPT_CLR_PDP1_MASK             0x00000002U
+#define ODN_INTERRUPT_CLR_PDP1_SHIFT            1
+#define ODN_INTERRUPT_CLR_PDP1_SIGNED           0
+
+#define ODN_INTERRUPT_CLR_PDP2_MASK             0x00000004U
+#define ODN_INTERRUPT_CLR_PDP2_SHIFT            2
+#define ODN_INTERRUPT_CLR_PDP2_SIGNED           0
+
+#define ODN_INTERRUPT_CLR_PERIP_MASK            0x00000008U
+#define ODN_INTERRUPT_CLR_PERIP_SHIFT           3
+#define ODN_INTERRUPT_CLR_PERIP_SIGNED          0
+
+#define ODN_INTERRUPT_CLR_UART_MASK             0x00000010U
+#define ODN_INTERRUPT_CLR_UART_SHIFT            4
+#define ODN_INTERRUPT_CLR_UART_SIGNED           0
+
+#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_MASK  0x00000020U
+#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_SHIFT 5
+#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_MASK   0x00000040U
+#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_SHIFT  6
+#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_MASK 0x00000080U
+#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_SHIFT 7
+#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_MASK  0x00000100U
+#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_SHIFT 8
+#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_CLR_IRQ_TEST_MASK         0x40000000U
+#define ODN_INTERRUPT_CLR_IRQ_TEST_SHIFT        30
+#define ODN_INTERRUPT_CLR_IRQ_TEST_SIGNED       0
+
+#define ODN_INTERRUPT_CLR_MASTER_CLEAR_MASK     0x80000000U
+#define ODN_INTERRUPT_CLR_MASTER_CLEAR_SHIFT    31
+#define ODN_INTERRUPT_CLR_MASTER_CLEAR_SIGNED   0
+
+/*
+	Register INTERRUPT_TEST
+*/
+#define ODN_CORE_INTERRUPT_TEST                 0x0110
+#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_MASK  0x00000001U
+#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_SHIFT 0
+#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_SIGNED 0
+
+/*
+	Register SYSTEM_ID
+*/
+#define ODN_CORE_SYSTEM_ID                      0x0120
+#define ODN_SYSTEM_ID_ID_MASK                   0x0000FFFFU
+#define ODN_SYSTEM_ID_ID_SHIFT                  0
+#define ODN_SYSTEM_ID_ID_SIGNED                 0
+
+/*
+	Register NUM_GPIO
+*/
+#define ODN_CORE_NUM_GPIO                       0x0180
+#define ODN_NUM_GPIO_NUMBER_MASK                0x0000000FU
+#define ODN_NUM_GPIO_NUMBER_SHIFT               0
+#define ODN_NUM_GPIO_NUMBER_SIGNED              0
+
+/*
+	Register GPIO_EN
+*/
+#define ODN_CORE_GPIO_EN                        0x0184
+#define ODN_GPIO_EN_DIRECTION_MASK              0x000000FFU
+#define ODN_GPIO_EN_DIRECTION_SHIFT             0
+#define ODN_GPIO_EN_DIRECTION_SIGNED            0
+
+/*
+	Register GPIO
+*/
+#define ODN_CORE_GPIO                           0x0188
+#define ODN_GPIO_GPIO_MASK                      0x000000FFU
+#define ODN_GPIO_GPIO_SHIFT                     0
+#define ODN_GPIO_GPIO_SIGNED                    0
+
+/*
+	Register NUM_DUT_CTRL
+*/
+#define ODN_CORE_NUM_DUT_CTRL                   0x0190
+#define ODN_NUM_DUT_CTRL_NUM_PINS_MASK          0xFFFFFFFFU
+#define ODN_NUM_DUT_CTRL_NUM_PINS_SHIFT         0
+#define ODN_NUM_DUT_CTRL_NUM_PINS_SIGNED        0
+
+/*
+	Register DUT_CTRL1
+*/
+#define ODN_CORE_DUT_CTRL1                      0x0194
+#define ODN_DUT_CTRL1_CONTROL1_MASK             0x3FFFFFFFU
+#define ODN_DUT_CTRL1_CONTROL1_SHIFT            0
+#define ODN_DUT_CTRL1_CONTROL1_SIGNED           0
+
+#define ODN_DUT_CTRL1_FBDC_BYPASS_MASK          0x40000000U
+#define ODN_DUT_CTRL1_FBDC_BYPASS_SHIFT         30
+#define ODN_DUT_CTRL1_FBDC_BYPASS_SIGNED        0
+
+#define ODN_DUT_CTRL1_DUT_MST_OFFSET_MASK       0x80000000U
+#define ODN_DUT_CTRL1_DUT_MST_OFFSET_SHIFT      31
+#define ODN_DUT_CTRL1_DUT_MST_OFFSET_SIGNED     0
+
+/*
+	Register DUT_CTRL2
+*/
+#define ODN_CORE_DUT_CTRL2                      0x0198
+#define ODN_DUT_CTRL2_CONTROL2_MASK             0xFFFFFFFFU
+#define ODN_DUT_CTRL2_CONTROL2_SHIFT            0
+#define ODN_DUT_CTRL2_CONTROL2_SIGNED           0
+
+/*
+	Register NUM_DUT_STAT
+*/
+#define ODN_CORE_NUM_DUT_STAT                   0x019C
+#define ODN_NUM_DUT_STAT_NUM_PINS_MASK          0xFFFFFFFFU
+#define ODN_NUM_DUT_STAT_NUM_PINS_SHIFT         0
+#define ODN_NUM_DUT_STAT_NUM_PINS_SIGNED        0
+
+/*
+	Register DUT_STAT1
+*/
+#define ODN_CORE_DUT_STAT1                      0x01A0
+#define ODN_DUT_STAT1_STATUS1_MASK              0xFFFFFFFFU
+#define ODN_DUT_STAT1_STATUS1_SHIFT             0
+#define ODN_DUT_STAT1_STATUS1_SIGNED            0
+
+/*
+	Register DUT_STAT2
+*/
+#define ODN_CORE_DUT_STAT2                      0x01A4
+#define ODN_DUT_STAT2_STATUS2_MASK              0xFFFFFFFFU
+#define ODN_DUT_STAT2_STATUS2_SHIFT             0
+#define ODN_DUT_STAT2_STATUS2_SIGNED            0
+
+/*
+	Register DASH_LEDS
+*/
+#define ODN_CORE_DASH_LEDS                      0x01A8
+#define ODN_DASH_LEDS_REPA_MASK                 0xFFF00000U
+#define ODN_DASH_LEDS_REPA_SHIFT                20
+#define ODN_DASH_LEDS_REPA_SIGNED               0
+
+#define ODN_DASH_LEDS_PIKE_MASK                 0x00000FFFU
+#define ODN_DASH_LEDS_PIKE_SHIFT                0
+#define ODN_DASH_LEDS_PIKE_SIGNED               0
+
+/*
+	Register DUT_CLK_INFO
+*/
+#define ODN_CORE_DUT_CLK_INFO                   0x01B0
+#define ODN_DUT_CLK_INFO_CORE_MASK              0x0000FFFFU
+#define ODN_DUT_CLK_INFO_CORE_SHIFT             0
+#define ODN_DUT_CLK_INFO_CORE_SIGNED            0
+
+#define ODN_DUT_CLK_INFO_MEM_MASK               0xFFFF0000U
+#define ODN_DUT_CLK_INFO_MEM_SHIFT              16
+#define ODN_DUT_CLK_INFO_MEM_SIGNED             0
+
+/*
+	Register DUT_CLK_PHSE
+*/
+#define ODN_CORE_DUT_CLK_PHSE                   0x01B4
+#define ODN_DUT_CLK_PHSE_MEM_REQ_MASK           0x0000FFFFU
+#define ODN_DUT_CLK_PHSE_MEM_REQ_SHIFT          0
+#define ODN_DUT_CLK_PHSE_MEM_REQ_SIGNED         0
+
+#define ODN_DUT_CLK_PHSE_MEM_RD_MASK            0xFFFF0000U
+#define ODN_DUT_CLK_PHSE_MEM_RD_SHIFT           16
+#define ODN_DUT_CLK_PHSE_MEM_RD_SIGNED          0
+
+/*
+	Register CORE_STATUS
+*/
+#define ODN_CORE_CORE_STATUS                    0x0200
+#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_MASK   0x00000001U
+#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_SHIFT  0
+#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_SIGNED 0
+
+#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_MASK 0x00000010U
+#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_SHIFT 4
+#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_SIGNED 0
+
+#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_MASK 0x00000020U
+#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_SHIFT 5
+#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_SIGNED 0
+
+#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_MASK 0x00000040U
+#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_SHIFT 6
+#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_SIGNED 0
+
+#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_MASK 0x00000080U
+#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_SHIFT 7
+#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_SIGNED 0
+
+#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_MASK 0x00000100U
+#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_SHIFT 8
+#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_SIGNED 0
+
+#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_MASK 0x00000200U
+#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_SHIFT 9
+#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_SIGNED 0
+
+#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_MASK 0x00001000U
+#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SHIFT 12
+#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SIGNED 0
+
+#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_MASK 0x00002000U
+#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SHIFT 13
+#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SIGNED 0
+
+/*
+	Register CORE_CONTROL
+*/
+#define ODN_CORE_CORE_CONTROL                   0x0204
+#define ODN_CORE_CONTROL_BAR4_OFFSET_MASK       0x0000001FU
+#define ODN_CORE_CONTROL_BAR4_OFFSET_SHIFT      0
+#define ODN_CORE_CONTROL_BAR4_OFFSET_SIGNED     0
+
+#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_MASK 0x00000300U
+#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SHIFT 8
+#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SIGNED 0
+
+#define ODN_CORE_CONTROL_HDMI_MODULE_EN_MASK    0x00001C00U
+#define ODN_CORE_CONTROL_HDMI_MODULE_EN_SHIFT   10
+#define ODN_CORE_CONTROL_HDMI_MODULE_EN_SIGNED  0
+
+#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_MASK 0x00002000U
+#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SHIFT 13
+#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SIGNED 0
+
+#define ODN_CORE_CONTROL_PDP1_OFFSET_MASK       0x00070000U
+#define ODN_CORE_CONTROL_PDP1_OFFSET_SHIFT      16
+#define ODN_CORE_CONTROL_PDP1_OFFSET_SIGNED     0
+
+#define ODN_CORE_CONTROL_PDP2_OFFSET_MASK       0x00700000U
+#define ODN_CORE_CONTROL_PDP2_OFFSET_SHIFT      20
+#define ODN_CORE_CONTROL_PDP2_OFFSET_SIGNED     0
+
+#define ODN_CORE_CONTROL_DUT_OFFSET_MASK        0x07000000U
+#define ODN_CORE_CONTROL_DUT_OFFSET_SHIFT       24
+#define ODN_CORE_CONTROL_DUT_OFFSET_SIGNED      0
+
+/*
+	Register REG_BANK_STATUS
+*/
+#define ODN_CORE_REG_BANK_STATUS                0x0208
+#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_MASK 0xFFFFFFFFU
+#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SHIFT 0
+#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SIGNED 0
+
+/*
+	Register MMCM_LOCK_STATUS
+*/
+#define ODN_CORE_MMCM_LOCK_STATUS               0x020C
+#define ODN_MMCM_LOCK_STATUS_DUT_CORE_MASK      0x00000001U
+#define ODN_MMCM_LOCK_STATUS_DUT_CORE_SHIFT     0
+#define ODN_MMCM_LOCK_STATUS_DUT_CORE_SIGNED    0
+
+#define ODN_MMCM_LOCK_STATUS_DUT_IF_MASK        0x00000002U
+#define ODN_MMCM_LOCK_STATUS_DUT_IF_SHIFT       1
+#define ODN_MMCM_LOCK_STATUS_DUT_IF_SIGNED      0
+
+#define ODN_MMCM_LOCK_STATUS_MULTI_MASK         0x00000004U
+#define ODN_MMCM_LOCK_STATUS_MULTI_SHIFT        2
+#define ODN_MMCM_LOCK_STATUS_MULTI_SIGNED       0
+
+#define ODN_MMCM_LOCK_STATUS_PDPP_MASK          0x00000008U
+#define ODN_MMCM_LOCK_STATUS_PDPP_SHIFT         3
+#define ODN_MMCM_LOCK_STATUS_PDPP_SIGNED        0
+
+/*
+	Register GIST_STATUS
+*/
+#define ODN_CORE_GIST_STATUS                    0x0210
+#define ODN_GIST_STATUS_MST_MASK                0x000001FFU
+#define ODN_GIST_STATUS_MST_SHIFT               0
+#define ODN_GIST_STATUS_MST_SIGNED              0
+
+#define ODN_GIST_STATUS_SLV_MASK                0x001FF000U
+#define ODN_GIST_STATUS_SLV_SHIFT               12
+#define ODN_GIST_STATUS_SLV_SIGNED              0
+
+#define ODN_GIST_STATUS_SLV_OUT_MASK            0x03000000U
+#define ODN_GIST_STATUS_SLV_OUT_SHIFT           24
+#define ODN_GIST_STATUS_SLV_OUT_SIGNED          0
+
+#define ODN_GIST_STATUS_MST_OUT_MASK            0x70000000U
+#define ODN_GIST_STATUS_MST_OUT_SHIFT           28
+#define ODN_GIST_STATUS_MST_OUT_SIGNED          0
+
+/*
+	Register DUT_MST_ADD
+*/
+#define ODN_CORE_DUT_MST_ADD                    0x0214
+#define ODN_DUT_MST_ADD_SLV_OUT_MASK            0x0000003FU
+#define ODN_DUT_MST_ADD_SLV_OUT_SHIFT           0
+#define ODN_DUT_MST_ADD_SLV_OUT_SIGNED          0
+
+
+/****************************
+  Generated from: ad_tx.def
+*****************************/
+
+/*
+	Register ADT_CONTROL
+*/
+#define ODN_AD_TX_DEBUG_ADT_CONTROL             0x0000
+#define ODN_SET_ADTX_READY_MASK                 0x00000004U
+#define ODN_SET_ADTX_READY_SHIFT                2
+#define ODN_SET_ADTX_READY_SIGNED               0
+
+#define ODN_SEND_ALIGN_DATA_MASK                0x00000002U
+#define ODN_SEND_ALIGN_DATA_SHIFT               1
+#define ODN_SEND_ALIGN_DATA_SIGNED              0
+
+#define ODN_ENABLE_FLUSHING_MASK                0x00000001U
+#define ODN_ENABLE_FLUSHING_SHIFT               0
+#define ODN_ENABLE_FLUSHING_SIGNED              0
+
+/*
+	Register ADT_STATUS
+*/
+#define ODN_AD_TX_DEBUG_ADT_STATUS              0x0004
+#define ODN_REQUEST_COMPLETE_MASK               0x00000001U
+#define ODN_REQUEST_COMPLETE_SHIFT              0
+#define ODN_REQUEST_COMPLETE_SIGNED             0
+
+
+/******************************
+ Generated from: mca_debug.def
+*******************************/
+
+/*
+	Register MCA_CONTROL
+*/
+#define ODN_MCA_DEBUG_MCA_CONTROL               0x0000
+#define ODN_ALIGN_START_MASK                    0x00000001U
+#define ODN_ALIGN_START_SHIFT                   0
+#define ODN_ALIGN_START_SIGNED                  0
+
+/*
+	Register MCA_STATUS
+*/
+#define ODN_MCA_DEBUG_MCA_STATUS                0x0004
+#define ODN_TCHECK_SDEBUG_MASK                  0x40000000U
+#define ODN_TCHECK_SDEBUG_SHIFT                 30
+#define ODN_TCHECK_SDEBUG_SIGNED                0
+
+#define ODN_CHECK_SDEBUG_MASK                   0x20000000U
+#define ODN_CHECK_SDEBUG_SHIFT                  29
+#define ODN_CHECK_SDEBUG_SIGNED                 0
+
+#define ODN_ALIGN_SDEBUG_MASK                   0x10000000U
+#define ODN_ALIGN_SDEBUG_SHIFT                  28
+#define ODN_ALIGN_SDEBUG_SIGNED                 0
+
+#define ODN_FWAIT_SDEBUG_MASK                   0x08000000U
+#define ODN_FWAIT_SDEBUG_SHIFT                  27
+#define ODN_FWAIT_SDEBUG_SIGNED                 0
+
+#define ODN_IDLE_SDEBUG_MASK                    0x04000000U
+#define ODN_IDLE_SDEBUG_SHIFT                   26
+#define ODN_IDLE_SDEBUG_SIGNED                  0
+
+#define ODN_FIFO_FULL_MASK                      0x03FF0000U
+#define ODN_FIFO_FULL_SHIFT                     16
+#define ODN_FIFO_FULL_SIGNED                    0
+
+#define ODN_FIFO_EMPTY_MASK                     0x0000FFC0U
+#define ODN_FIFO_EMPTY_SHIFT                    6
+#define ODN_FIFO_EMPTY_SIGNED                   0
+
+#define ODN_TAG_CHECK_ERROR_MASK                0x00000020U
+#define ODN_TAG_CHECK_ERROR_SHIFT               5
+#define ODN_TAG_CHECK_ERROR_SIGNED              0
+
+#define ODN_ALIGN_CHECK_ERROR_MASK              0x00000010U
+#define ODN_ALIGN_CHECK_ERROR_SHIFT             4
+#define ODN_ALIGN_CHECK_ERROR_SIGNED            0
+
+#define ODN_ALIGN_ERROR_MASK                    0x00000008U
+#define ODN_ALIGN_ERROR_SHIFT                   3
+#define ODN_ALIGN_ERROR_SIGNED                  0
+
+#define ODN_TAG_CHECKING_OK_MASK                0x00000004U
+#define ODN_TAG_CHECKING_OK_SHIFT               2
+#define ODN_TAG_CHECKING_OK_SIGNED              0
+
+#define ODN_ALIGN_CHECK_OK_MASK                 0x00000002U
+#define ODN_ALIGN_CHECK_OK_SHIFT                1
+#define ODN_ALIGN_CHECK_OK_SIGNED               0
+
+#define ODN_ALIGNMENT_FOUND_MASK                0x00000001U
+#define ODN_ALIGNMENT_FOUND_SHIFT               0
+#define ODN_ALIGNMENT_FOUND_SIGNED              0
+
+
+/*********************************
+ Generated from: sai_rx_debug.def
+**********************************/
+
+/*
+	Register SIG_RESULT
+*/
+#define ODN_SAI_RX_DEBUG_SIG_RESULT             0x0000
+#define ODN_SIG_RESULT_VALUE_MASK               0xFFFFFFFFU
+#define ODN_SIG_RESULT_VALUE_SHIFT              0
+#define ODN_SIG_RESULT_VALUE_SIGNED             0
+
+/*
+	Register INIT_SIG
+*/
+#define ODN_SAI_RX_DEBUG_INIT_SIG               0x0004
+#define ODN_INIT_SIG_VALUE_MASK                 0x00000001U
+#define ODN_INIT_SIG_VALUE_SHIFT                0
+#define ODN_INIT_SIG_VALUE_SIGNED               0
+
+/*
+	Register SAI_BYPASS
+*/
+#define ODN_SAI_RX_DEBUG_SAI_BYPASS             0x0008
+#define ODN_BYPASS_CLK_TAPS_VALUE_MASK          0x000003FFU
+#define ODN_BYPASS_CLK_TAPS_VALUE_SHIFT         0
+#define ODN_BYPASS_CLK_TAPS_VALUE_SIGNED        0
+
+#define ODN_BYPASS_SET_MASK                     0x00010000U
+#define ODN_BYPASS_SET_SHIFT                    16
+#define ODN_BYPASS_SET_SIGNED                   0
+
+#define ODN_BYPASS_EN_MASK                      0x00100000U
+#define ODN_BYPASS_EN_SHIFT                     20
+#define ODN_BYPASS_EN_SIGNED                    0
+
+#define ODN_EN_STATUS_MASK                      0x01000000U
+#define ODN_EN_STATUS_SHIFT                     24
+#define ODN_EN_STATUS_SIGNED                    0
+
+/*
+	Register SAI_CLK_TAPS
+*/
+#define ODN_SAI_RX_DEBUG_SAI_CLK_TAPS           0x000C
+#define ODN_CLK_TAPS_VALUE_MASK                 0x000003FFU
+#define ODN_CLK_TAPS_VALUE_SHIFT                0
+#define ODN_CLK_TAPS_VALUE_SIGNED               0
+
+#define ODN_TRAINING_COMPLETE_MASK              0x00010000U
+#define ODN_TRAINING_COMPLETE_SHIFT             16
+#define ODN_TRAINING_COMPLETE_SIGNED            0
+
+/*
+	Register SAI_EYES
+*/
+#define ODN_SAI_RX_DEBUG_SAI_EYES               0x0010
+#define ODN_MIN_EYE_END_MASK                    0x0000FFFFU
+#define ODN_MIN_EYE_END_SHIFT                   0
+#define ODN_MIN_EYE_END_SIGNED                  0
+
+#define ODN_MAX_EYE_START_MASK                  0xFFFF0000U
+#define ODN_MAX_EYE_START_SHIFT                 16
+#define ODN_MAX_EYE_START_SIGNED                0
+
+/*
+	Register SAI_DDR_INVERT
+*/
+#define ODN_SAI_RX_DEBUG_SAI_DDR_INVERT         0x0014
+#define ODN_DDR_INVERT_MASK                     0x00000001U
+#define ODN_DDR_INVERT_SHIFT                    0
+#define ODN_DDR_INVERT_SIGNED                   0
+
+#define ODN_OVERIDE_VALUE_MASK                  0x00010000U
+#define ODN_OVERIDE_VALUE_SHIFT                 16
+#define ODN_OVERIDE_VALUE_SIGNED                0
+
+#define ODN_INVERT_OVERIDE_MASK                 0x00100000U
+#define ODN_INVERT_OVERIDE_SHIFT                20
+#define ODN_INVERT_OVERIDE_SIGNED               0
+
+/*
+	Register SAI_TRAIN_ACK
+*/
+#define ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK          0x0018
+#define ODN_TRAIN_ACK_FAIL_MASK                 0x00000001U
+#define ODN_TRAIN_ACK_FAIL_SHIFT                0
+#define ODN_TRAIN_ACK_FAIL_SIGNED               0
+
+#define ODN_TRAIN_ACK_FAIL_COUNT_MASK           0x000000F0U
+#define ODN_TRAIN_ACK_FAIL_COUNT_SHIFT          4
+#define ODN_TRAIN_ACK_FAIL_COUNT_SIGNED         0
+
+#define ODN_TRAIN_ACK_COMPLETE_MASK             0x00000100U
+#define ODN_TRAIN_ACK_COMPLETE_SHIFT            8
+#define ODN_TRAIN_ACK_COMPLETE_SIGNED           0
+
+#define ODN_TRAIN_ACK_OVERIDE_MASK              0x00001000U
+#define ODN_TRAIN_ACK_OVERIDE_SHIFT             12
+#define ODN_TRAIN_ACK_OVERIDE_SIGNED            0
+
+/*
+	Register SAI_TRAIN_ACK_COUNT
+*/
+#define ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK_COUNT    0x001C
+#define ODN_TRAIN_COUNT_MASK                    0xFFFFFFFFU
+#define ODN_TRAIN_COUNT_SHIFT                   0
+#define ODN_TRAIN_COUNT_SIGNED                  0
+
+/*
+	Register SAI_CHANNEL_NUMBER
+*/
+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_NUMBER     0x0020
+#define ODN_CHANNEL_NUMBER_MASK                 0x0000FFFFU
+#define ODN_CHANNEL_NUMBER_SHIFT                0
+#define ODN_CHANNEL_NUMBER_SIGNED               0
+
+/*
+	Register SAI_CHANNEL_EYE_START
+*/
+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_START  0x0024
+#define ODN_CHANNEL_EYE_START_MASK              0xFFFFFFFFU
+#define ODN_CHANNEL_EYE_START_SHIFT             0
+#define ODN_CHANNEL_EYE_START_SIGNED            0
+
+/*
+	Register SAI_CHANNEL_EYE_END
+*/
+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_END    0x0028
+#define ODN_CHANNEL_EYE_END_MASK                0xFFFFFFFFU
+#define ODN_CHANNEL_EYE_END_SHIFT               0
+#define ODN_CHANNEL_EYE_END_SIGNED              0
+
+/*
+	Register SAI_CHANNEL_EYE_PATTERN
+*/
+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_PATTERN 0x002C
+#define ODN_CHANNEL_EYE_PATTERN_MASK            0xFFFFFFFFU
+#define ODN_CHANNEL_EYE_PATTERN_SHIFT           0
+#define ODN_CHANNEL_EYE_PATTERN_SIGNED          0
+
+/*
+	Register SAI_CHANNEL_EYE_DEBUG
+*/
+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_DEBUG  0x0030
+#define ODN_CHANNEL_EYE_SENSE_MASK              0x00000001U
+#define ODN_CHANNEL_EYE_SENSE_SHIFT             0
+#define ODN_CHANNEL_EYE_SENSE_SIGNED            0
+
+#define ODN_CHANNEL_EYE_COMPLETE_MASK           0x00000002U
+#define ODN_CHANNEL_EYE_COMPLETE_SHIFT          1
+#define ODN_CHANNEL_EYE_COMPLETE_SIGNED         0
+
+
+/*********************************
+ Generated from: sai_tx_debug.def
+**********************************/
+
+/*
+	Register SIG_RESULT
+*/
+#define ODN_SAI_TX_DEBUG_SIG_RESULT             0x0000
+#define ODN_TX_SIG_RESULT_VALUE_MASK            0xFFFFFFFFU
+#define ODN_TX_SIG_RESULT_VALUE_SHIFT           0
+#define ODN_TX_SIG_RESULT_VALUE_SIGNED          0
+
+/*
+	Register INIT_SIG
+*/
+#define ODN_SAI_TX_DEBUG_INIT_SIG               0x0004
+#define ODN_TX_INIT_SIG_VALUE_MASK              0x00000001U
+#define ODN_TX_INIT_SIG_VALUE_SHIFT             0
+#define ODN_TX_INIT_SIG_VALUE_SIGNED            0
+
+/*
+	Register SAI_BYPASS
+*/
+#define ODN_SAI_TX_DEBUG_SAI_BYPASS             0x0008
+#define ODN_TX_BYPASS_EN_MASK                   0x00000001U
+#define ODN_TX_BYPASS_EN_SHIFT                  0
+#define ODN_TX_BYPASS_EN_SIGNED                 0
+
+#define ODN_TX_ACK_RESEND_MASK                  0x00000002U
+#define ODN_TX_ACK_RESEND_SHIFT                 1
+#define ODN_TX_ACK_RESEND_SIGNED                0
+
+#define ODN_TX_DISABLE_ACK_SEND_MASK            0x00000004U
+#define ODN_TX_DISABLE_ACK_SEND_SHIFT           2
+#define ODN_TX_DISABLE_ACK_SEND_SIGNED          0
+
+/*
+	Register SAI_STATUS
+*/
+#define ODN_SAI_TX_DEBUG_SAI_STATUS             0x000C
+#define ODN_TX_TRAINING_COMPLETE_MASK           0x00000001U
+#define ODN_TX_TRAINING_COMPLETE_SHIFT          0
+#define ODN_TX_TRAINING_COMPLETE_SIGNED         0
+
+#define ODN_TX_TRAINING_ACK_COMPLETE_MASK       0x00000002U
+#define ODN_TX_TRAINING_ACK_COMPLETE_SHIFT      1
+#define ODN_TX_TRAINING_ACK_COMPLETE_SIGNED     0
+
+
+
+#endif /* _ODIN_REGS_H_ */
+
+/*****************************************************************************
+ End of file (odin_regs.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_apollo.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_apollo.c
new file mode 100644
index 0000000..f78fa74
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_apollo.c
@@ -0,0 +1,334 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/delay.h>
+
+#include "pdp_apollo.h"
+#include "pdp_common.h"
+#include "pdp_regs.h"
+#include "tcf_rgbpdp_regs.h"
+#include "tcf_pll.h"
+
+/* Map a register to the "pll-regs" region */
+#define PLL_REG(n) ((n) - TCF_PLL_PLL_PDP_CLK0)
+
+bool pdp_apollo_clocks_set(struct device *dev,
+			   void __iomem *pdp_reg, void __iomem *pll_reg,
+			   u32 clock_in_mhz,
+			   void __iomem *odn_core_reg,
+			   u32 hdisplay, u32 vdisplay)
+{
+	/*
+	 * Setup TCF_CR_PLL_PDP_CLK1TO5 based on the main clock speed
+	 * (clock 0 or 3)
+	 */
+	const u32 clock = (clock_in_mhz >= 50) ? 0 : 0x3;
+
+	/* Set phase 0, ratio 50:50 and frequency in MHz */
+	pll_wreg32(pll_reg, PLL_REG(TCF_PLL_PLL_PDP_CLK0), clock_in_mhz);
+
+	pll_wreg32(pll_reg, PLL_REG(TCF_PLL_PLL_PDP_CLK1TO5), clock);
+
+	/* Now initiate reprogramming of the PLLs */
+	pll_wreg32(pll_reg, PLL_REG(TCF_PLL_PLL_PDP_DRP_GO), 0x1);
+
+	udelay(1000);
+
+	pll_wreg32(pll_reg, PLL_REG(TCF_PLL_PLL_PDP_DRP_GO), 0x0);
+
+	return true;
+}
+
+void pdp_apollo_set_updates_enabled(struct device *dev, void __iomem *pdp_reg,
+				    bool enable)
+{
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set updates: %s\n", enable ? "enable" : "disable");
+#endif
+	/* nothing to do here */
+}
+
+void pdp_apollo_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg,
+				    bool enable)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set syncgen: %s\n", enable ? "enable" : "disable");
+#endif
+
+	value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL);
+	value = REG_VALUE_SET(value, enable ? 0x1 : 0x0,
+			      SYNCACTIVE_SHIFT, SYNCACTIVE_MASK);
+	pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL, value);
+}
+
+void pdp_apollo_set_powerdwn_enabled(struct device *dev, void __iomem *pdp_reg,
+				     bool enable)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set powerdwn: %s\n", enable ? "enable" : "disable");
+#endif
+
+	value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL);
+	value = REG_VALUE_SET(value, enable ? 0x1 : 0x0,
+			      POWERDN_SHIFT, POWERDN_MASK);
+	pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL, value);
+}
+
+void pdp_apollo_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg,
+				   bool enable)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set vblank: %s\n", enable ? "enable" : "disable");
+#endif
+
+	value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB);
+	value = REG_VALUE_SET(value, enable ? 0x1 : 0x0,
+			      INTEN_VBLNK0_SHIFT, INTEN_VBLNK0_MASK);
+	pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB, value);
+}
+
+bool pdp_apollo_check_and_clear_vblank(struct device *dev,
+				       void __iomem *pdp_reg)
+{
+	u32 value;
+
+	value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_INTSTAT);
+
+	if (REG_VALUE_GET(value, INTS_VBLNK0_SHIFT, INTS_VBLNK0_MASK)) {
+		value = REG_VALUE_SET(0, 0x1,
+				      INTCLR_VBLNK0_SHIFT, INTCLR_VBLNK0_MASK);
+		pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_INTCLEAR, value);
+		return true;
+	}
+	return false;
+}
+
+void pdp_apollo_set_plane_enabled(struct device *dev, void __iomem *pdp_reg,
+				  u32 plane, bool enable)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set plane %u: %s\n",
+		 plane, enable ? "enable" : "disable");
+#endif
+
+	if (plane > 0) {
+		dev_err(dev, "Maximum of 1 plane is supported\n");
+		return;
+	}
+
+	value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL);
+	value = REG_VALUE_SET(value, enable ? 0x1 : 0x0,
+			      STR1STREN_SHIFT, STR1STREN_MASK);
+	pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL, value);
+}
+
+void pdp_apollo_reset_planes(struct device *dev, void __iomem *pdp_reg)
+{
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Reset planes\n");
+#endif
+
+	pdp_apollo_set_plane_enabled(dev, pdp_reg, 0, false);
+}
+
+void pdp_apollo_set_surface(struct device *dev, void __iomem *pdp_reg,
+			    u32 plane, u32 address,
+			    u32 posx, u32 posy,
+			    u32 width, u32 height, u32 stride,
+			    u32 format, u32 alpha, bool blend)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev,
+		 "Set surface: size=%dx%d stride=%d format=%d address=0x%x\n",
+		 width, height, stride, format, address);
+#endif
+
+	if (plane > 0) {
+		dev_err(dev, "Maximum of 1 plane is supported\n");
+		return;
+	}
+
+	/* Size & format */
+	value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF);
+	value = REG_VALUE_SET(value, width - 1,
+			      STR1WIDTH_SHIFT, STR1WIDTH_MASK);
+	value = REG_VALUE_SET(value, height - 1,
+			      STR1HEIGHT_SHIFT, STR1HEIGHT_MASK);
+	value = REG_VALUE_SET(value, format,
+			      STR1PIXFMT_SHIFT, STR1PIXFMT_MASK);
+	pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF, value);
+	/* Stride */
+	value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_PDP_STR1POSN);
+	value = REG_VALUE_SET(value,
+			      (stride >> DCPDP_STR1POSN_STRIDE_SHIFT) - 1,
+			      STR1STRIDE_SHIFT, STR1STRIDE_MASK);
+	pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_PDP_STR1POSN, value);
+	/* Disable interlaced output */
+	value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL);
+	value = REG_VALUE_SET(value, 0x0,
+			      STR1INTFIELD_SHIFT,
+			      STR1INTFIELD_MASK);
+	/* Frame buffer base address */
+	value = REG_VALUE_SET(value,
+			      address >> DCPDP_STR1ADDRCTRL_BASE_ADDR_SHIFT,
+			      STR1BASE_SHIFT, STR1BASE_MASK);
+	pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL, value);
+}
+
+void pdp_apollo_mode_set(struct device *dev, void __iomem *pdp_reg,
+			 u32 h_display, u32 v_display,
+			 u32 hbps, u32 ht, u32 has,
+			 u32 hlbs, u32 hfps, u32 hrbs,
+			 u32 vbps, u32 vt, u32 vas,
+			 u32 vtbs, u32 vfps, u32 vbbs,
+			 bool nhsync, bool nvsync)
+{
+	u32 value;
+
+	dev_info(dev, "Set mode: %dx%d\n", h_display, v_display);
+#ifdef PDP_VERBOSE
+	dev_info(dev, " ht: %d hbps %d has %d hlbs %d hfps %d hrbs %d\n",
+		 ht, hbps, has, hlbs, hfps, hrbs);
+	dev_info(dev, " vt: %d vbps %d vas %d vtbs %d vfps %d vbbs %d\n",
+		 vt, vbps, vas, vtbs, vfps, vbbs);
+#endif
+
+#if 0
+	/* I don't really know what this is doing but it was in the Android
+	 * implementation (not in the Linux one). Seems not to be necessary
+	 * though!
+	 */
+	if (pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL)
+		!= 0x0000C010) {
+		/* Buffer request threshold */
+		pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL,
+				   0x00001C10);
+	}
+#endif
+
+	/* Border colour */
+	value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL);
+	value = REG_VALUE_SET(value, 0x0, BORDCOL_SHIFT, BORDCOL_MASK);
+	pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL, value);
+
+	/* Update control */
+	value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL);
+	value = REG_VALUE_SET(value, 0x0, UPDFIELD_SHIFT, UPDFIELD_MASK);
+	pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL, value);
+
+	/* Set hsync timings */
+	value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1);
+	value = REG_VALUE_SET(value, hbps, HBPS_SHIFT, HBPS_MASK);
+	value = REG_VALUE_SET(value, ht, HT_SHIFT, HT_MASK);
+	pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1, value);
+
+	value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2);
+	value = REG_VALUE_SET(value, has, HAS_SHIFT, HAS_MASK);
+	value = REG_VALUE_SET(value, hlbs, HLBS_SHIFT, HLBS_MASK);
+	pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2, value);
+
+	value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3);
+	value = REG_VALUE_SET(value, hfps, HFPS_SHIFT, HFPS_MASK);
+	value = REG_VALUE_SET(value, hrbs, HRBS_SHIFT, HRBS_MASK);
+	pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3, value);
+
+	/* Set vsync timings */
+	value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1);
+	value = REG_VALUE_SET(value, vbps, VBPS_SHIFT, VBPS_MASK);
+	value = REG_VALUE_SET(value, vt, VT_SHIFT, VT_MASK);
+	pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1, value);
+
+	value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2);
+	value = REG_VALUE_SET(value, vas, VAS_SHIFT, VAS_MASK);
+	value = REG_VALUE_SET(value, vtbs, VTBS_SHIFT, VTBS_MASK);
+	pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2, value);
+
+	value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3);
+	value = REG_VALUE_SET(value, vfps, VFPS_SHIFT, VFPS_MASK);
+	value = REG_VALUE_SET(value, vbbs, VBBS_SHIFT, VBBS_MASK);
+	pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3, value);
+
+	/* Horizontal data enable */
+	value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL);
+	value = REG_VALUE_SET(value, hlbs, HDES_SHIFT, HDES_MASK);
+	value = REG_VALUE_SET(value, hfps, HDEF_SHIFT, HDEF_MASK);
+	pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL, value);
+
+	/* Vertical data enable */
+	value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL);
+	value = REG_VALUE_SET(value, vtbs, VDES_SHIFT, VDES_MASK);
+	value = REG_VALUE_SET(value, vfps, VDEF_SHIFT, VDEF_MASK);
+	pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL, value);
+
+	/* Vertical event start and vertical fetch start */
+	value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT);
+	value = REG_VALUE_SET(value, vbps, VFETCH_SHIFT, VFETCH_MASK);
+	value = REG_VALUE_SET(value, vfps, VEVENT_SHIFT, VEVENT_MASK);
+	pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT, value);
+
+	/* Set up polarities of sync/blank */
+	value = REG_VALUE_SET(0, 0x1, BLNKPOL_SHIFT, BLNKPOL_MASK);
+
+	/*
+	 * Enable this if you want vblnk1. You also need to change to vblnk1
+	 * in the interrupt handler.
+	 */
+#if 0
+	value = REG_VALUE_SET(value, 0x1, FIELDPOL_SHIFT, FIELDPOL_MASK);
+#endif
+	if (nhsync)
+		value = REG_VALUE_SET(value, 0x1, HSPOL_SHIFT, HSPOL_MASK);
+	if (nvsync)
+		value = REG_VALUE_SET(value, 0x1, VSPOL_SHIFT, VSPOL_MASK);
+	pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL, value);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_apollo.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_apollo.h
new file mode 100644
index 0000000..a773d415
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_apollo.h
@@ -0,0 +1,90 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PDP_APOLLO_H__)
+#define __PDP_APOLLO_H__
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+bool pdp_apollo_clocks_set(struct device *dev,
+			   void __iomem *pdp_reg, void __iomem *pll_reg,
+			   u32 clock_in_mhz,
+			   void __iomem *odn_core_reg,
+			   u32 hdisplay, u32 vdisplay);
+
+void pdp_apollo_set_updates_enabled(struct device *dev,	void __iomem *pdp_reg,
+				    bool enable);
+
+void pdp_apollo_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg,
+				    bool enable);
+
+void pdp_apollo_set_powerdwn_enabled(struct device *dev, void __iomem *pdp_reg,
+				     bool enable);
+
+void pdp_apollo_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg,
+				   bool enable);
+
+bool pdp_apollo_check_and_clear_vblank(struct device *dev,
+				       void __iomem *pdp_reg);
+
+void pdp_apollo_set_plane_enabled(struct device *dev, void __iomem *pdp_reg,
+				  u32 plane, bool enable);
+
+void pdp_apollo_reset_planes(struct device *dev, void __iomem *pdp_reg);
+
+void pdp_apollo_set_surface(struct device *dev,	void __iomem *pdp_reg,
+			    u32 plane, u32 address,
+			    u32 posx, u32 posy,
+			    u32 width, u32 height, u32 stride,
+			    u32 format, u32 alpha, bool blend);
+
+void pdp_apollo_mode_set(struct device *dev, void __iomem *pdp_reg,
+			 u32 h_display, u32 v_display,
+			 u32 hbps, u32 ht, u32 has,
+			 u32 hlbs, u32 hfps, u32 hrbs,
+			 u32 vbps, u32 vt, u32 vas,
+			 u32 vtbs, u32 vfps, u32 vbbs,
+			 bool nhsync, bool nvsync);
+
+#endif /* __PDP_APOLLO_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_common.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_common.h
new file mode 100644
index 0000000..5dbbb14
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_common.h
@@ -0,0 +1,99 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PDP_COMMON_H__)
+#define __PDP_COMMON_H__
+
+#include <linux/io.h>
+
+/*#define PDP_VERBOSE*/
+
+#define REG_VALUE_GET(v, s, m) \
+	(u32)(((v) & (m)) >> (s))
+#define REG_VALUE_SET(v, b, s, m) \
+	(u32)(((v) & (u32)~(m)) | (u32)(((b) << (s)) & (m)))
+/* Active low */
+#define REG_VALUE_LO(v, b, s, m) \
+	(u32)((v) & ~(u32)(((b) << (s)) & (m)))
+
+enum pdp_version {
+	PDP_VERSION_APOLLO,
+	PDP_VERSION_ODIN,
+	PDP_VERSION_PLATO,
+};
+
+/* Register R-W */
+static inline u32 core_rreg32(void __iomem *base, resource_size_t reg)
+{
+	return ioread32(base + reg);
+}
+
+static inline void core_wreg32(void __iomem *base, resource_size_t reg,
+			       u32 value)
+{
+	iowrite32(value, base + reg);
+}
+
+static inline u32 pdp_rreg32(void __iomem *base, resource_size_t reg)
+{
+	return ioread32(base + reg);
+}
+
+static inline void pdp_wreg32(void __iomem *base, resource_size_t reg,
+			      u32 value)
+{
+	iowrite32(value, base + reg);
+}
+
+static inline u32 pll_rreg32(void __iomem *base, resource_size_t reg)
+{
+	return ioread32(base + reg);
+}
+
+static inline void pll_wreg32(void __iomem *base, resource_size_t reg,
+			      u32 value)
+{
+	iowrite32(value, base + reg);
+}
+
+#endif /* __PDP_COMMON_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_odin.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_odin.c
new file mode 100644
index 0000000..2179b29
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_odin.c
@@ -0,0 +1,933 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/delay.h>
+
+#include "pdp_common.h"
+#include "pdp_odin.h"
+#include "odin_defs.h"
+#include "odin_regs.h"
+
+#define ODIN_PLL_REG(n)	((n) - ODN_PDP_P_CLK_OUT_DIVIDER_REG1)
+
+struct odin_displaymode {
+	int w;		/* display width */
+	int h;		/* display height */
+	int id;		/* pixel clock input divider */
+	int m;		/* pixel clock multiplier */
+	int od1;	/* pixel clock output divider */
+	int od2;	/* mem clock output divider */
+};
+
+/*
+ * For Odin, only the listed modes below are supported.
+ * 1080p id=5, m=37, od1=5, od2=5
+ * 720p id=5, m=37, od1=10, od2=5
+ * 1280x1024 id=1, m=14, od1=13, od2=8
+ * 1440x900 id=5, m=53, od1=10, od2=8
+ * 1280x960 id=3, m=40, od1=13, od2=9
+ * 1024x768 id=1, m=13, od1=20, od2=10
+ * 800x600 id=2, m=20, od1=25, od2=7
+ * 640x480 id=1, m=12, od1=48, od2=9
+ * ... where id is the PDP_P_CLK input divider,
+ * m is PDP_P_CLK multiplier regs 1 to 3
+ * od1 is PDP_P_clk output divider regs 1 to 3
+ * od2 is PDP_M_clk output divider regs 1 to 2
+ */
+static const struct odin_displaymode odin_modes[] = {
+	{.w = 1920, .h = 1080, .id = 5, .m = 37, .od1 = 5, .od2 = 5},
+	{.w = 1280, .h = 720, .id = 5, .m = 37, .od1 = 10, .od2 = 5},
+	{.w = 1280, .h = 1024, .id = 1, .m = 14, .od1 = 13, .od2 = 10},
+	{.w = 1440, .h = 900, .id = 5, .m = 53, .od1 = 10, .od2 = 8},
+	{.w = 1280, .h = 960, .id = 3, .m = 40, .od1 = 13, .od2 = 9},
+	{.w = 1024, .h = 768, .id = 1, .m = 13, .od1 = 20, .od2 = 10},
+	{.w = 800, .h = 600, .id = 2, .m = 20, .od1 = 25, .od2 = 7},
+	{.w = 640, .h = 480, .id = 1, .m = 12, .od1 = 48, .od2 = 9},
+	{.w = 0, .h = 0, .id = 0, .m = 0, .od1 = 0, .od2 = 0}
+};
+
+static const u32 GRPH_SURF_OFFSET[] = {
+	ODN_PDP_GRPH1SURF_OFFSET,
+	ODN_PDP_GRPH2SURF_OFFSET,
+	ODN_PDP_VID1SURF_OFFSET,
+	ODN_PDP_GRPH4SURF_OFFSET
+};
+static const u32 GRPH_SURF_GRPH_PIXFMT_SHIFT[] = {
+	ODN_PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT,
+	ODN_PDP_GRPH2SURF_GRPH2PIXFMT_SHIFT,
+	ODN_PDP_VID1SURF_VID1PIXFMT_SHIFT,
+	ODN_PDP_GRPH4SURF_GRPH4PIXFMT_SHIFT
+};
+static const u32 GRPH_SURF_GRPH_PIXFMT_MASK[] = {
+	ODN_PDP_GRPH1SURF_GRPH1PIXFMT_MASK,
+	ODN_PDP_GRPH2SURF_GRPH2PIXFMT_MASK,
+	ODN_PDP_VID1SURF_VID1PIXFMT_MASK,
+	ODN_PDP_GRPH4SURF_GRPH4PIXFMT_MASK
+};
+static const u32 GRPH_GALPHA_OFFSET[] = {
+	ODN_PDP_GRPH1GALPHA_OFFSET,
+	ODN_PDP_GRPH2GALPHA_OFFSET,
+	ODN_PDP_VID1GALPHA_OFFSET,
+	ODN_PDP_GRPH4GALPHA_OFFSET
+};
+static const u32 GRPH_GALPHA_GRPH_GALPHA_SHIFT[] = {
+	ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_SHIFT,
+	ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_SHIFT,
+	ODN_PDP_VID1GALPHA_VID1GALPHA_SHIFT,
+	ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_SHIFT
+};
+static const u32 GRPH_GALPHA_GRPH_GALPHA_MASK[] = {
+	ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_MASK,
+	ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_MASK,
+	ODN_PDP_VID1GALPHA_VID1GALPHA_MASK,
+	ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_MASK
+};
+static const u32 GRPH_CTRL_OFFSET[] = {
+	ODN_PDP_GRPH1CTRL_OFFSET,
+	ODN_PDP_GRPH2CTRL_OFFSET,
+	ODN_PDP_VID1CTRL_OFFSET,
+	ODN_PDP_GRPH4CTRL_OFFSET,
+};
+static const u32 GRPH_CTRL_GRPH_BLEND_SHIFT[] = {
+	ODN_PDP_GRPH1CTRL_GRPH1BLEND_SHIFT,
+	ODN_PDP_GRPH2CTRL_GRPH2BLEND_SHIFT,
+	ODN_PDP_VID1CTRL_VID1BLEND_SHIFT,
+	ODN_PDP_GRPH4CTRL_GRPH4BLEND_SHIFT
+};
+static const u32 GRPH_CTRL_GRPH_BLEND_MASK[] = {
+	ODN_PDP_GRPH1CTRL_GRPH1BLEND_MASK,
+	ODN_PDP_GRPH2CTRL_GRPH2BLEND_MASK,
+	ODN_PDP_VID1CTRL_VID1BLEND_MASK,
+	ODN_PDP_GRPH4CTRL_GRPH4BLEND_MASK
+};
+static const u32 GRPH_CTRL_GRPH_BLENDPOS_SHIFT[] = {
+	ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_SHIFT,
+	ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_SHIFT,
+	ODN_PDP_VID1CTRL_VID1BLENDPOS_SHIFT,
+	ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_SHIFT
+};
+static const u32 GRPH_CTRL_GRPH_BLENDPOS_MASK[] = {
+	ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_MASK,
+	ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_MASK,
+	ODN_PDP_VID1CTRL_VID1BLENDPOS_MASK,
+	ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_MASK
+};
+static const u32 GRPH_CTRL_GRPH_STREN_SHIFT[] = {
+	ODN_PDP_GRPH1CTRL_GRPH1STREN_SHIFT,
+	ODN_PDP_GRPH2CTRL_GRPH2STREN_SHIFT,
+	ODN_PDP_VID1CTRL_VID1STREN_SHIFT,
+	ODN_PDP_GRPH4CTRL_GRPH4STREN_SHIFT
+};
+static const u32 GRPH_CTRL_GRPH_STREN_MASK[] = {
+	ODN_PDP_GRPH1CTRL_GRPH1STREN_MASK,
+	ODN_PDP_GRPH2CTRL_GRPH2STREN_MASK,
+	ODN_PDP_VID1CTRL_VID1STREN_MASK,
+	ODN_PDP_GRPH4CTRL_GRPH4STREN_MASK
+};
+static const u32 GRPH_POSN_OFFSET[] = {
+	ODN_PDP_GRPH1POSN_OFFSET,
+	ODN_PDP_GRPH2POSN_OFFSET,
+	ODN_PDP_VID1POSN_OFFSET,
+	ODN_PDP_GRPH4POSN_OFFSET
+};
+static const u32 GRPH_POSN_GRPH_XSTART_SHIFT[] = {
+	ODN_PDP_GRPH1POSN_GRPH1XSTART_SHIFT,
+	ODN_PDP_GRPH2POSN_GRPH2XSTART_SHIFT,
+	ODN_PDP_VID1POSN_VID1XSTART_SHIFT,
+	ODN_PDP_GRPH4POSN_GRPH4XSTART_SHIFT,
+};
+static const u32 GRPH_POSN_GRPH_XSTART_MASK[] = {
+	ODN_PDP_GRPH1POSN_GRPH1XSTART_MASK,
+	ODN_PDP_GRPH2POSN_GRPH2XSTART_MASK,
+	ODN_PDP_VID1POSN_VID1XSTART_MASK,
+	ODN_PDP_GRPH4POSN_GRPH4XSTART_MASK,
+};
+static const u32 GRPH_POSN_GRPH_YSTART_SHIFT[] = {
+	ODN_PDP_GRPH1POSN_GRPH1YSTART_SHIFT,
+	ODN_PDP_GRPH2POSN_GRPH2YSTART_SHIFT,
+	ODN_PDP_VID1POSN_VID1YSTART_SHIFT,
+	ODN_PDP_GRPH4POSN_GRPH4YSTART_SHIFT,
+};
+static const u32 GRPH_POSN_GRPH_YSTART_MASK[] = {
+	ODN_PDP_GRPH1POSN_GRPH1YSTART_MASK,
+	ODN_PDP_GRPH2POSN_GRPH2YSTART_MASK,
+	ODN_PDP_VID1POSN_VID1YSTART_MASK,
+	ODN_PDP_GRPH4POSN_GRPH4YSTART_MASK,
+};
+static const u32 GRPH_SIZE_OFFSET[] = {
+	ODN_PDP_GRPH1SIZE_OFFSET,
+	ODN_PDP_GRPH2SIZE_OFFSET,
+	ODN_PDP_VID1SIZE_OFFSET,
+	ODN_PDP_GRPH4SIZE_OFFSET,
+};
+static const u32 GRPH_SIZE_GRPH_WIDTH_SHIFT[] = {
+	ODN_PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT,
+	ODN_PDP_GRPH2SIZE_GRPH2WIDTH_SHIFT,
+	ODN_PDP_VID1SIZE_VID1WIDTH_SHIFT,
+	ODN_PDP_GRPH4SIZE_GRPH4WIDTH_SHIFT
+};
+static const u32 GRPH_SIZE_GRPH_WIDTH_MASK[] = {
+	ODN_PDP_GRPH1SIZE_GRPH1WIDTH_MASK,
+	ODN_PDP_GRPH2SIZE_GRPH2WIDTH_MASK,
+	ODN_PDP_VID1SIZE_VID1WIDTH_MASK,
+	ODN_PDP_GRPH4SIZE_GRPH4WIDTH_MASK
+};
+static const u32 GRPH_SIZE_GRPH_HEIGHT_SHIFT[] = {
+	ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT,
+	ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_SHIFT,
+	ODN_PDP_VID1SIZE_VID1HEIGHT_SHIFT,
+	ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_SHIFT
+};
+static const u32 GRPH_SIZE_GRPH_HEIGHT_MASK[] = {
+	ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_MASK,
+	ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_MASK,
+	ODN_PDP_VID1SIZE_VID1HEIGHT_MASK,
+	ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_MASK
+};
+static const u32 GRPH_STRIDE_OFFSET[] = {
+	ODN_PDP_GRPH1STRIDE_OFFSET,
+	ODN_PDP_GRPH2STRIDE_OFFSET,
+	ODN_PDP_VID1STRIDE_OFFSET,
+	ODN_PDP_GRPH4STRIDE_OFFSET
+};
+static const u32 GRPH_STRIDE_GRPH_STRIDE_SHIFT[] = {
+	ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT,
+	ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_SHIFT,
+	ODN_PDP_VID1STRIDE_VID1STRIDE_SHIFT,
+	ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_SHIFT
+};
+static const u32 GRPH_STRIDE_GRPH_STRIDE_MASK[] = {
+	ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_MASK,
+	ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_MASK,
+	ODN_PDP_VID1STRIDE_VID1STRIDE_MASK,
+	ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_MASK
+};
+static const u32 GRPH_INTERLEAVE_CTRL_OFFSET[] = {
+	ODN_PDP_GRPH1INTERLEAVE_CTRL_OFFSET,
+	ODN_PDP_GRPH2INTERLEAVE_CTRL_OFFSET,
+	ODN_PDP_VID1INTERLEAVE_CTRL_OFFSET,
+	ODN_PDP_GRPH4INTERLEAVE_CTRL_OFFSET
+};
+static const u32 GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_SHIFT[] = {
+	ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SHIFT,
+	ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SHIFT,
+	ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SHIFT,
+	ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SHIFT
+};
+static const u32 GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_MASK[] = {
+	ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_MASK,
+	ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_MASK,
+	ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_MASK,
+	ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_MASK
+};
+static const u32 GRPH_BASEADDR_OFFSET[] = {
+	ODN_PDP_GRPH1BASEADDR_OFFSET,
+	ODN_PDP_GRPH2BASEADDR_OFFSET,
+	ODN_PDP_VID1BASEADDR_OFFSET,
+	ODN_PDP_GRPH4BASEADDR_OFFSET
+};
+
+
+static void get_odin_clock_settings(u32 value, u32 *lo_time, u32 *hi_time,
+				u32 *no_count, u32 *edge)
+{
+	u32 lt, ht;
+
+	/* If the value is 1, High Time & Low Time are both set to 1
+	 * and the NOCOUNT bit is set to 1.
+	 */
+	if (value == 1) {
+		*lo_time = 1;
+		*hi_time = 1;
+
+		/* If od is an odd number then write 1 to NO_COUNT
+		 * otherwise write 0.
+		 */
+		*no_count = 1;
+
+		/* If m is and odd number then write 1 to EDGE bit of MR2
+		 * otherwise write 0.
+		 * If id is an odd number then write 1 to EDGE bit of ID
+		 * otherwise write 0.
+		 */
+		*edge = 0;
+		return;
+	}
+	*no_count = 0;
+
+	/* High Time & Low time is half the value listed for each PDP mode */
+	lt = value>>1;
+	ht = lt;
+
+	/* If the value is odd, Low Time is rounded up to nearest integer
+	 * and High Time is rounded down, and Edge is set to 1.
+	 */
+	if (value & 1) {
+		lt++;
+
+		/* If m is and odd number then write 1 to EDGE bit of MR2
+		 * otherwise write 0.
+		 * If id is an odd number then write 1 to EDGE bit of ID
+		 * otherwise write 0.
+		 */
+		*edge = 1;
+
+	} else {
+		*edge = 0;
+	}
+	*hi_time = ht;
+	*lo_time = lt;
+}
+
+static const struct odin_displaymode *get_odin_mode(int w, int h)
+{
+	int n = 0;
+
+	do {
+		if ((odin_modes[n].w == w) && (odin_modes[n].h == h))
+			return odin_modes+n;
+
+	} while (odin_modes[n++].w);
+
+	return NULL;
+}
+
+bool pdp_odin_clocks_set(struct device *dev,
+			 void __iomem *pdp_reg, void __iomem *pll_reg,
+			 u32 clock_freq,
+			 void __iomem *odn_core_reg,
+			 u32 hdisplay, u32 vdisplay)
+{
+	u32 value;
+	const struct odin_displaymode *odispl;
+	u32 hi_time, lo_time, no_count, edge;
+	u32 core_id, core_rev;
+
+	core_id = pdp_rreg32(pdp_reg, ODN_PDP_CORE_ID_OFFSET);
+	dev_info(dev, "Odin-PDP CORE_ID  %08X\n", core_id);
+
+	core_rev = pdp_rreg32(odn_core_reg, ODN_PDP_CORE_REV_OFFSET);
+	dev_info(dev, "Odin-PDP CORE_REV %08X\n", core_rev);
+
+	odispl = get_odin_mode(hdisplay, vdisplay);
+	if (!odispl) {
+		dev_err(dev, "Display mode not supported.\n");
+		return false;
+	}
+
+	/*
+	 * The PDP uses a Xilinx clock that requires read
+	 * modify write for all registers.
+	 * It is essential that only the specified bits are changed
+	 * because other bits are in use.
+	 * To change PDP clocks reset PDP & PDP mmcm (PLL) first,
+	 * then apply changes and then un-reset mmcm & PDP.
+	 * Warm reset will keep the changes.
+	 *    wr 0x000080 0x1f7 ; # reset pdp
+	 *    wr 0x000090 8 ; # reset pdp mmcm
+	 * then apply clock changes, then
+	 *    wr 0x000090 0x0 ; # un-reset pdp mmcm
+	 *    wr 0x000080 0x1ff ; # un-reset pdp
+	 */
+
+	/*
+	 * Hold Odin PDP1 in reset while changing the clock regs.
+	 * Set the PDP1 bit of ODN_CORE_INTERNAL_RESETN low to reset.
+	 * set bit 3 to 0 (active low)
+	 */
+	value = core_rreg32(odn_core_reg, ODN_CORE_INTERNAL_RESETN);
+	value = REG_VALUE_LO(value, 1, ODN_INTERNAL_RESETN_PDP1_SHIFT,
+			     ODN_INTERNAL_RESETN_PDP1_MASK);
+	core_wreg32(odn_core_reg, ODN_CORE_INTERNAL_RESETN, value);
+
+	/*
+	 * Hold the PDP MMCM in reset while changing the clock regs.
+	 * Set the PDP1 bit of ODN_CORE_CLK_GEN_RESET high to reset.
+	 */
+	value = core_rreg32(odn_core_reg, ODN_CORE_CLK_GEN_RESET);
+	value = REG_VALUE_SET(value, 0x1,
+			      ODN_INTERNAL_RESETN_PDP1_SHIFT,
+			      ODN_INTERNAL_RESETN_PDP1_MASK);
+	core_wreg32(odn_core_reg, ODN_CORE_CLK_GEN_RESET, value);
+
+	/* Pixel clock Input divider */
+	get_odin_clock_settings(odispl->id, &lo_time, &hi_time,
+				&no_count, &edge);
+
+	value = pll_rreg32(pll_reg,
+			   ODIN_PLL_REG(ODN_PDP_P_CLK_IN_DIVIDER_REG));
+	value = REG_VALUE_SET(value, lo_time,
+			      ODN_PDP_PCLK_IDIV_LO_TIME_SHIFT,
+			      ODN_PDP_PCLK_IDIV_LO_TIME_MASK);
+	value = REG_VALUE_SET(value, hi_time,
+			      ODN_PDP_PCLK_IDIV_HI_TIME_SHIFT,
+			      ODN_PDP_PCLK_IDIV_HI_TIME_MASK);
+	value = REG_VALUE_SET(value, no_count,
+			      ODN_PDP_PCLK_IDIV_NOCOUNT_SHIFT,
+			      ODN_PDP_PCLK_IDIV_NOCOUNT_MASK);
+	value = REG_VALUE_SET(value, edge,
+			      ODN_PDP_PCLK_IDIV_EDGE_SHIFT,
+			      ODN_PDP_PCLK_IDIV_EDGE_MASK);
+	pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_IN_DIVIDER_REG),
+		   value);
+
+	/* Pixel clock Output divider */
+	get_odin_clock_settings(odispl->od1, &lo_time, &hi_time,
+				&no_count, &edge);
+
+	/* Pixel clock Output divider reg1 */
+	value = pll_rreg32(pll_reg,
+			   ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG1));
+	value = REG_VALUE_SET(value, lo_time,
+			      ODN_PDP_PCLK_ODIV1_LO_TIME_SHIFT,
+			      ODN_PDP_PCLK_ODIV1_LO_TIME_MASK);
+	value = REG_VALUE_SET(value, hi_time,
+			      ODN_PDP_PCLK_ODIV1_HI_TIME_SHIFT,
+			      ODN_PDP_PCLK_ODIV1_HI_TIME_MASK);
+	pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG1),
+		   value);
+
+	/* Pixel clock Output divider reg2 */
+	value = pll_rreg32(pll_reg,
+			   ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG2));
+	value = REG_VALUE_SET(value, no_count,
+			      ODN_PDP_PCLK_ODIV2_NOCOUNT_SHIFT,
+			      ODN_PDP_PCLK_ODIV2_NOCOUNT_MASK);
+	value = REG_VALUE_SET(value, edge,
+			      ODN_PDP_PCLK_ODIV2_EDGE_SHIFT,
+			      ODN_PDP_PCLK_ODIV2_EDGE_MASK);
+	pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG2),
+		   value);
+
+	/* Pixel clock Multiplier */
+	get_odin_clock_settings(odispl->m, &lo_time, &hi_time,
+				&no_count, &edge);
+
+	/* Pixel clock Multiplier reg1 */
+	value = pll_rreg32(pll_reg,
+			   ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG1));
+	value = REG_VALUE_SET(value, lo_time,
+			      ODN_PDP_PCLK_MUL1_LO_TIME_SHIFT,
+			      ODN_PDP_PCLK_MUL1_LO_TIME_MASK);
+	value = REG_VALUE_SET(value, hi_time,
+			      ODN_PDP_PCLK_MUL1_HI_TIME_SHIFT,
+			      ODN_PDP_PCLK_MUL1_HI_TIME_MASK);
+	pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG1),
+		   value);
+
+	/* Pixel clock Multiplier reg2 */
+	value = pll_rreg32(pll_reg,
+			   ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG2));
+	value = REG_VALUE_SET(value, no_count,
+			      ODN_PDP_PCLK_MUL2_NOCOUNT_SHIFT,
+			      ODN_PDP_PCLK_MUL2_NOCOUNT_MASK);
+	value = REG_VALUE_SET(value, edge,
+			      ODN_PDP_PCLK_MUL2_EDGE_SHIFT,
+			      ODN_PDP_PCLK_MUL2_EDGE_MASK);
+	pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG2),
+		   value);
+
+	/* Mem clock Output divider */
+	get_odin_clock_settings(odispl->od2, &lo_time, &hi_time,
+				&no_count, &edge);
+
+	/* Mem clock Output divider reg1 */
+	value = pll_rreg32(pll_reg,
+			   ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG1));
+	value = REG_VALUE_SET(value, lo_time,
+			      ODN_PDP_MCLK_ODIV1_LO_TIME_SHIFT,
+			      ODN_PDP_MCLK_ODIV1_LO_TIME_MASK);
+	value = REG_VALUE_SET(value, hi_time,
+			      ODN_PDP_MCLK_ODIV1_HI_TIME_SHIFT,
+			      ODN_PDP_MCLK_ODIV1_HI_TIME_MASK);
+	pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG1),
+		   value);
+
+	/* Mem clock Output divider reg2 */
+	value = pll_rreg32(pll_reg,
+			   ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG2));
+	value = REG_VALUE_SET(value, no_count,
+			      ODN_PDP_MCLK_ODIV2_NOCOUNT_SHIFT,
+			      ODN_PDP_MCLK_ODIV2_NOCOUNT_MASK);
+	value = REG_VALUE_SET(value, edge,
+			      ODN_PDP_MCLK_ODIV2_EDGE_SHIFT,
+			      ODN_PDP_MCLK_ODIV2_EDGE_MASK);
+	pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG2),
+		   value);
+
+	/*
+	 * Take the PDP MMCM out of reset.
+	 * Set the PDP1 bit of ODN_CORE_CLK_GEN_RESET to 0.
+	 */
+	value = core_rreg32(odn_core_reg, ODN_CORE_CLK_GEN_RESET);
+	value = REG_VALUE_LO(value, 1, ODN_INTERNAL_RESETN_PDP1_SHIFT,
+			     ODN_INTERNAL_RESETN_PDP1_MASK);
+	core_wreg32(odn_core_reg, ODN_CORE_CLK_GEN_RESET, value);
+
+	/*
+	 * Wait until MMCM_LOCK_STATUS_PDPP bit is '1' in register
+	 * MMCM_LOCK_STATUS. Issue an error if this does not
+	 * go to '1' within 500ms.
+	 */
+	{
+		int count;
+		bool locked = false;
+
+		for (count = 0; count < 10; count++) {
+			value = core_rreg32(odn_core_reg,
+					    ODN_CORE_MMCM_LOCK_STATUS);
+			if (value & ODN_MMCM_LOCK_STATUS_PDPP) {
+				locked = true;
+				break;
+			}
+			msleep(50);
+		}
+
+		if (!locked) {
+			dev_err(dev, "The MMCM pll did not lock\n");
+			return false;
+		}
+	}
+
+	/*
+	 * Take Odin-PDP1 out of reset:
+	 * Set the PDP1 bit of ODN_CORE_INTERNAL_RESETN to 1.
+	 */
+	value = core_rreg32(odn_core_reg, ODN_CORE_INTERNAL_RESETN);
+	value = REG_VALUE_SET(value, 1, ODN_INTERNAL_RESETN_PDP1_SHIFT,
+			      ODN_INTERNAL_RESETN_PDP1_MASK);
+	core_wreg32(odn_core_reg, ODN_CORE_INTERNAL_RESETN, value);
+
+	return true;
+}
+
+void pdp_odin_set_updates_enabled(struct device *dev, void __iomem *pdp_reg,
+				  bool enable)
+{
+	u32 value = enable ?
+		(1 << ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SHIFT |
+		 1 << ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT) :
+		0x0;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set updates: %s\n", enable ? "enable" : "disable");
+#endif
+
+	pdp_wreg32(pdp_reg, ODN_PDP_REGISTER_UPDATE_CTRL_OFFSET, value);
+}
+
+void pdp_odin_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg,
+				  bool enable)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set syncgen: %s\n", enable ? "enable" : "disable");
+#endif
+
+	value = pdp_rreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET);
+
+	value = REG_VALUE_SET(value,
+			      enable ? ODN_SYNC_GEN_ENABLE : ODN_SYNC_GEN_DISABLE,
+			      ODN_PDP_SYNCCTRL_SYNCACTIVE_SHIFT,
+			      ODN_PDP_SYNCCTRL_SYNCACTIVE_MASK);
+
+	/* Invert the pixel clock */
+	value = REG_VALUE_SET(value, ODN_PIXEL_CLOCK_INVERTED,
+			      ODN_PDP_SYNCCTRL_CLKPOL_SHIFT,
+			      ODN_PDP_SYNCCTRL_CLKPOL_MASK);
+
+	/* Set the Horizontal Sync Polarity to active high */
+	value = REG_VALUE_LO(value, ODN_HSYNC_POLARITY_ACTIVE_HIGH,
+			     ODN_PDP_SYNCCTRL_HSPOL_SHIFT,
+			     ODN_PDP_SYNCCTRL_HSPOL_MASK);
+
+	pdp_wreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET, value);
+
+	/* Check for underruns when the sync generator
+	 * is being turned off.
+	 */
+	if (!enable) {
+		value = pdp_rreg32(pdp_reg, ODN_PDP_INTSTAT_OFFSET);
+		value &= ODN_PDP_INTSTAT_ALL_OURUN_MASK;
+
+		if (value) {
+			dev_warn(dev, "underruns detected. status=0x%08X\n",
+				 value);
+		} else {
+			dev_info(dev, "no underruns detected\n");
+		}
+	}
+}
+
+void pdp_odin_set_powerdwn_enabled(struct device *dev, void __iomem *pdp_reg,
+				   bool enable)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set powerdwn: %s\n", enable ? "enable" : "disable");
+#endif
+
+	value = pdp_rreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET);
+
+	value = REG_VALUE_SET(value, enable ? 0x1 : 0x0,
+			      ODN_PDP_SYNCCTRL_POWERDN_SHIFT,
+			      ODN_PDP_SYNCCTRL_POWERDN_MASK);
+
+	pdp_wreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET, value);
+}
+
+void pdp_odin_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg,
+				 bool enable)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set vblank: %s\n", enable ? "enable" : "disable");
+#endif
+
+	pdp_wreg32(pdp_reg, ODN_PDP_INTCLR_OFFSET, ODN_PDP_INTCLR_ALL);
+
+	value = pdp_rreg32(pdp_reg, ODN_PDP_INTENAB_OFFSET);
+	value = REG_VALUE_SET(value, enable ? 0x1 : 0x0,
+			      ODN_PDP_INTENAB_INTEN_VBLNK0_SHIFT,
+			      ODN_PDP_INTENAB_INTEN_VBLNK0_MASK);
+	value = enable ? (1 << ODN_PDP_INTENAB_INTEN_VBLNK0_SHIFT) : 0;
+	pdp_wreg32(pdp_reg, ODN_PDP_INTENAB_OFFSET, value);
+}
+
+bool pdp_odin_check_and_clear_vblank(struct device *dev,
+				     void __iomem *pdp_reg)
+{
+	u32 value;
+
+	value = pdp_rreg32(pdp_reg, ODN_PDP_INTSTAT_OFFSET);
+
+	if (REG_VALUE_GET(value,
+			  ODN_PDP_INTSTAT_INTS_VBLNK0_SHIFT,
+			  ODN_PDP_INTSTAT_INTS_VBLNK0_MASK)) {
+		pdp_wreg32(pdp_reg, ODN_PDP_INTCLR_OFFSET,
+			   (1 << ODN_PDP_INTCLR_INTCLR_VBLNK0_SHIFT));
+
+		return true;
+	}
+	return false;
+}
+
+void pdp_odin_set_plane_enabled(struct device *dev, void __iomem *pdp_reg,
+				u32 plane, bool enable)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set plane %u: %s\n",
+		 plane, enable ? "enable" : "disable");
+#endif
+
+	if (plane > 3) {
+		dev_err(dev, "Maximum of 4 planes are supported\n");
+		return;
+	}
+
+	value = pdp_rreg32(pdp_reg, GRPH_CTRL_OFFSET[plane]);
+	value = REG_VALUE_SET(value, enable ? 0x1 : 0x0,
+			      GRPH_CTRL_GRPH_STREN_SHIFT[plane],
+			      GRPH_CTRL_GRPH_STREN_MASK[plane]);
+	pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[plane], value);
+}
+
+void pdp_odin_reset_planes(struct device *dev, void __iomem *pdp_reg)
+{
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Reset planes\n");
+#endif
+
+	pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[0], 0x00000000);
+	pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[1], 0x01000000);
+	pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[2], 0x02000000);
+	pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[3], 0x03000000);
+}
+
+void pdp_odin_set_surface(struct device *dev, void __iomem *pdp_reg,
+			  u32 plane, u32 address,
+			  u32 posx, u32 posy,
+			  u32 width, u32 height, u32 stride,
+			  u32 format, u32 alpha, bool blend)
+{
+	/*
+	 * Use a blender based on the plane number (this defines the Z
+	 * ordering)
+	 */
+	static const int GRPH_BLEND_POS[] = { 0x0, 0x1, 0x2, 0x3 };
+	u32 blend_mode;
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev,
+		 "Set surface: plane=%d pos=%d:%d size=%dx%d stride=%d "
+		 "format=%d alpha=%d address=0x%x\n",
+		 plane, posx, posy, width, height, stride,
+		 format, alpha, address);
+#endif
+
+	if (plane > 3) {
+		dev_err(dev, "Maximum of 4 planes are supported\n");
+		return;
+	}
+
+	if (address & 0xf)
+		dev_warn(dev, "The frame buffer address is not aligned\n");
+
+	/* Frame buffer base address */
+	pdp_wreg32(pdp_reg, GRPH_BASEADDR_OFFSET[plane], address);
+
+	/* Pos */
+	value = REG_VALUE_SET(0x0, posx,
+			      GRPH_POSN_GRPH_XSTART_SHIFT[plane],
+			      GRPH_POSN_GRPH_XSTART_MASK[plane]);
+	value = REG_VALUE_SET(value, posy,
+			      GRPH_POSN_GRPH_YSTART_SHIFT[plane],
+			      GRPH_POSN_GRPH_YSTART_MASK[plane]);
+	pdp_wreg32(pdp_reg, GRPH_POSN_OFFSET[plane], value);
+
+	/* Size */
+	value = REG_VALUE_SET(0x0, width - 1,
+			      GRPH_SIZE_GRPH_WIDTH_SHIFT[plane],
+			      GRPH_SIZE_GRPH_WIDTH_MASK[plane]);
+	value = REG_VALUE_SET(value, height - 1,
+			      GRPH_SIZE_GRPH_HEIGHT_SHIFT[plane],
+			      GRPH_SIZE_GRPH_HEIGHT_MASK[plane]);
+	pdp_wreg32(pdp_reg, GRPH_SIZE_OFFSET[plane], value);
+
+	/* Stride */
+	value = REG_VALUE_SET(0x0, (stride >> 4) - 1,
+			      GRPH_STRIDE_GRPH_STRIDE_SHIFT[plane],
+			      GRPH_STRIDE_GRPH_STRIDE_MASK[plane]);
+	pdp_wreg32(pdp_reg, GRPH_STRIDE_OFFSET[plane], value);
+
+	/* Interlace mode: progressive */
+	value = REG_VALUE_SET(0x0, ODN_INTERLACE_DISABLE,
+			      GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_SHIFT[plane],
+			      GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_MASK[plane]);
+	pdp_wreg32(pdp_reg, GRPH_INTERLEAVE_CTRL_OFFSET[plane], value);
+
+	/* Format */
+	value = REG_VALUE_SET(0x0, format,
+			      GRPH_SURF_GRPH_PIXFMT_SHIFT[plane],
+			      GRPH_SURF_GRPH_PIXFMT_MASK[plane]);
+	pdp_wreg32(pdp_reg, GRPH_SURF_OFFSET[plane], value);
+
+	/* Global alpha (0...1023) */
+	value = REG_VALUE_SET(0x0, ((1024 * 256) / 255 * alpha) / 256,
+			      GRPH_GALPHA_GRPH_GALPHA_SHIFT[plane],
+			      GRPH_GALPHA_GRPH_GALPHA_MASK[plane]);
+	pdp_wreg32(pdp_reg, GRPH_GALPHA_OFFSET[plane], value);
+	value = pdp_rreg32(pdp_reg, GRPH_CTRL_OFFSET[plane]);
+
+	/* Blend mode */
+	if (blend) {
+		if (alpha != 255)
+			blend_mode = 0x2; /* 0b10 = global alpha blending */
+		else
+			blend_mode = 0x3; /* 0b11 = pixel alpha blending */
+	} else {
+		blend_mode = 0x0; /* 0b00 = no blending */
+	}
+	value = REG_VALUE_SET(0x0, blend_mode,
+			      GRPH_CTRL_GRPH_BLEND_SHIFT[plane],
+			      GRPH_CTRL_GRPH_BLEND_MASK[plane]);
+
+	/* Blend position */
+	value = REG_VALUE_SET(value, GRPH_BLEND_POS[plane],
+			      GRPH_CTRL_GRPH_BLENDPOS_SHIFT[plane],
+			      GRPH_CTRL_GRPH_BLENDPOS_MASK[plane]);
+	pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[plane], value);
+}
+
+void pdp_odin_mode_set(struct device *dev, void __iomem *pdp_reg,
+		       u32 h_display, u32 v_display,
+		       u32 hbps, u32 ht, u32 has,
+		       u32 hlbs, u32 hfps, u32 hrbs,
+		       u32 vbps, u32 vt, u32 vas,
+		       u32 vtbs, u32 vfps, u32 vbbs,
+		       bool nhsync, bool nvsync)
+{
+	u32 value;
+
+	dev_info(dev, "Set mode: %dx%d\n", h_display, v_display);
+#ifdef PDP_VERBOSE
+	dev_info(dev, " ht: %d hbps %d has %d hlbs %d hfps %d hrbs %d\n",
+		 ht, hbps, has, hlbs, hfps, hrbs);
+	dev_info(dev, " vt: %d vbps %d vas %d vtbs %d vfps %d vbbs %d\n",
+		 vt, vbps, vas, vtbs, vfps, vbbs);
+#endif
+
+	/* Border colour: 10bits per channel */
+	pdp_wreg32(pdp_reg, ODN_PDP_BORDCOL_R_OFFSET, 0x0);
+	pdp_wreg32(pdp_reg, ODN_PDP_BORDCOL_GB_OFFSET, 0x0);
+
+	/* Background: 10bits per channel */
+	value = pdp_rreg32(pdp_reg, ODN_PDP_BGNDCOL_AR_OFFSET);
+	value = REG_VALUE_SET(value, 0x3ff,
+			      ODN_PDP_BGNDCOL_AR_BGNDCOL_A_SHIFT,
+			      ODN_PDP_BGNDCOL_AR_BGNDCOL_A_MASK);
+	value = REG_VALUE_SET(value, 0x0,
+			      ODN_PDP_BGNDCOL_AR_BGNDCOL_R_SHIFT,
+			      ODN_PDP_BGNDCOL_AR_BGNDCOL_R_MASK);
+	pdp_wreg32(pdp_reg, ODN_PDP_BGNDCOL_AR_OFFSET, value);
+
+	value = pdp_rreg32(pdp_reg, ODN_PDP_BGNDCOL_GB_OFFSET);
+	value = REG_VALUE_SET(value, 0x0,
+			      ODN_PDP_BGNDCOL_GB_BGNDCOL_G_SHIFT,
+			      ODN_PDP_BGNDCOL_GB_BGNDCOL_G_MASK);
+	value = REG_VALUE_SET(value, 0x0,
+			      ODN_PDP_BGNDCOL_GB_BGNDCOL_B_SHIFT,
+			      ODN_PDP_BGNDCOL_GB_BGNDCOL_B_MASK);
+	pdp_wreg32(pdp_reg, ODN_PDP_BGNDCOL_GB_OFFSET, value);
+	pdp_wreg32(pdp_reg, ODN_PDP_BORDCOL_GB_OFFSET, 0x0);
+
+	/* Update control */
+	value = pdp_rreg32(pdp_reg, ODN_PDP_UPDCTRL_OFFSET);
+	value = REG_VALUE_SET(value, 0x0,
+			      ODN_PDP_UPDCTRL_UPDFIELD_SHIFT,
+			      ODN_PDP_UPDCTRL_UPDFIELD_MASK);
+	pdp_wreg32(pdp_reg, ODN_PDP_UPDCTRL_OFFSET, value);
+
+	/* Horizontal timing */
+	value = pdp_rreg32(pdp_reg, ODN_PDP_HSYNC1_OFFSET);
+	value = REG_VALUE_SET(value, hbps,
+			      ODN_PDP_HSYNC1_HBPS_SHIFT,
+			      ODN_PDP_HSYNC1_HBPS_MASK);
+	value = REG_VALUE_SET(value, ht,
+			      ODN_PDP_HSYNC1_HT_SHIFT,
+			      ODN_PDP_HSYNC1_HT_MASK);
+	pdp_wreg32(pdp_reg, ODN_PDP_HSYNC1_OFFSET, value);
+
+	value = pdp_rreg32(pdp_reg, ODN_PDP_HSYNC2_OFFSET);
+	value = REG_VALUE_SET(value, has,
+			      ODN_PDP_HSYNC2_HAS_SHIFT,
+			      ODN_PDP_HSYNC2_HAS_MASK);
+	value = REG_VALUE_SET(value, hlbs,
+			      ODN_PDP_HSYNC2_HLBS_SHIFT,
+			      ODN_PDP_HSYNC2_HLBS_MASK);
+	pdp_wreg32(pdp_reg, ODN_PDP_HSYNC2_OFFSET, value);
+
+	value = pdp_rreg32(pdp_reg, ODN_PDP_HSYNC3_OFFSET);
+	value = REG_VALUE_SET(value, hfps,
+			      ODN_PDP_HSYNC3_HFPS_SHIFT,
+			      ODN_PDP_HSYNC3_HFPS_MASK);
+	value = REG_VALUE_SET(value, hrbs,
+			      ODN_PDP_HSYNC3_HRBS_SHIFT,
+			      ODN_PDP_HSYNC3_HRBS_MASK);
+	pdp_wreg32(pdp_reg, ODN_PDP_HSYNC3_OFFSET, value);
+
+	/* Vertical timing */
+	value = pdp_rreg32(pdp_reg, ODN_PDP_VSYNC1_OFFSET);
+	value = REG_VALUE_SET(value, vbps,
+			      ODN_PDP_VSYNC1_VBPS_SHIFT,
+			      ODN_PDP_VSYNC1_VBPS_MASK);
+	value = REG_VALUE_SET(value, vt,
+			      ODN_PDP_VSYNC1_VT_SHIFT,
+			      ODN_PDP_VSYNC1_VT_MASK);
+	pdp_wreg32(pdp_reg, ODN_PDP_VSYNC1_OFFSET, value);
+
+	value = pdp_rreg32(pdp_reg, ODN_PDP_VSYNC2_OFFSET);
+	value = REG_VALUE_SET(value, vas,
+			      ODN_PDP_VSYNC2_VAS_SHIFT,
+			      ODN_PDP_VSYNC2_VAS_MASK);
+	value = REG_VALUE_SET(value, vtbs,
+			      ODN_PDP_VSYNC2_VTBS_SHIFT,
+			      ODN_PDP_VSYNC2_VTBS_MASK);
+	pdp_wreg32(pdp_reg, ODN_PDP_VSYNC2_OFFSET, value);
+
+	value = pdp_rreg32(pdp_reg, ODN_PDP_VSYNC3_OFFSET);
+	value = REG_VALUE_SET(value, vfps,
+			      ODN_PDP_VSYNC3_VFPS_SHIFT,
+			      ODN_PDP_VSYNC3_VFPS_MASK);
+	value = REG_VALUE_SET(value, vbbs,
+			      ODN_PDP_VSYNC3_VBBS_SHIFT,
+			      ODN_PDP_VSYNC3_VBBS_MASK);
+	pdp_wreg32(pdp_reg, ODN_PDP_VSYNC3_OFFSET, value);
+
+	/* Horizontal data enable */
+	value = pdp_rreg32(pdp_reg, ODN_PDP_HDECTRL_OFFSET);
+	value = REG_VALUE_SET(value, hlbs,
+			      ODN_PDP_HDECTRL_HDES_SHIFT,
+			      ODN_PDP_HDECTRL_HDES_MASK);
+	value = REG_VALUE_SET(value, hfps,
+			      ODN_PDP_HDECTRL_HDEF_SHIFT,
+			      ODN_PDP_HDECTRL_HDEF_MASK);
+	pdp_wreg32(pdp_reg, ODN_PDP_HDECTRL_OFFSET, value);
+
+	/* Vertical data enable */
+	value = pdp_rreg32(pdp_reg, ODN_PDP_VDECTRL_OFFSET);
+	value = REG_VALUE_SET(value, vtbs,
+			      ODN_PDP_VDECTRL_VDES_SHIFT,
+			      ODN_PDP_VDECTRL_VDES_MASK);
+	value = REG_VALUE_SET(value, vfps,
+			      ODN_PDP_VDECTRL_VDEF_SHIFT,
+			      ODN_PDP_VDECTRL_VDEF_MASK);
+	pdp_wreg32(pdp_reg, ODN_PDP_VDECTRL_OFFSET, value);
+
+	/* Vertical event start and vertical fetch start */
+	value = pdp_rreg32(pdp_reg, ODN_PDP_VEVENT_OFFSET);
+	value = REG_VALUE_SET(value, vbps,
+			      ODN_PDP_VEVENT_VFETCH_SHIFT,
+			      ODN_PDP_VEVENT_VFETCH_MASK);
+	pdp_wreg32(pdp_reg, ODN_PDP_VEVENT_OFFSET, value);
+
+	/* Set up polarities of sync/blank */
+	value = REG_VALUE_SET(0, 0x1,
+			      ODN_PDP_SYNCCTRL_BLNKPOL_SHIFT,
+			      ODN_PDP_SYNCCTRL_BLNKPOL_MASK);
+	if (nhsync)
+		value = REG_VALUE_SET(value, 0x1,
+				      ODN_PDP_SYNCCTRL_HSPOL_SHIFT,
+				      ODN_PDP_SYNCCTRL_HSPOL_MASK);
+	if (nvsync)
+		value = REG_VALUE_SET(value, 0x1,
+				      ODN_PDP_SYNCCTRL_VSPOL_SHIFT,
+				      ODN_PDP_SYNCCTRL_VSPOL_MASK);
+	pdp_wreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET, value);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_odin.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_odin.h
new file mode 100644
index 0000000..36dc89e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_odin.h
@@ -0,0 +1,93 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PDP_ODIN_H__)
+#define __PDP_ODIN_H__
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+/* include here for ODN_PDP_SURF_PIXFMT_ARGB8888 as this is part of the API */
+#include "odin_pdp_regs.h"
+
+bool pdp_odin_clocks_set(struct device *dev,
+			 void __iomem *pdp_reg, void __iomem *pll_reg,
+			 u32 clock_freq,
+			 void __iomem *odn_core_reg,
+			 u32 hdisplay, u32 vdisplay);
+
+void pdp_odin_set_updates_enabled(struct device *dev, void __iomem *pdp_reg,
+				  bool enable);
+
+void pdp_odin_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg,
+				  bool enable);
+
+void pdp_odin_set_powerdwn_enabled(struct device *dev, void __iomem *pdp_reg,
+				   bool enable);
+
+void pdp_odin_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg,
+				 bool enable);
+
+bool pdp_odin_check_and_clear_vblank(struct device *dev,
+				     void __iomem *pdp_reg);
+
+void pdp_odin_set_plane_enabled(struct device *dev, void __iomem *pdp_reg,
+				u32 plane, bool enable);
+
+void pdp_odin_reset_planes(struct device *dev, void __iomem *pdp_reg);
+
+void pdp_odin_set_surface(struct device *dev, void __iomem *pdp_reg,
+			  u32 plane, u32 address,
+			  u32 posx, u32 posy,
+			  u32 width, u32 height, u32 stride,
+			  u32 format, u32 alpha, bool blend);
+
+void pdp_odin_mode_set(struct device *dev, void __iomem *pdp_reg,
+		       u32 h_display, u32 v_display,
+		       u32 hbps, u32 ht, u32 has,
+		       u32 hlbs, u32 hfps, u32 hrbs,
+		       u32 vbps, u32 vt, u32 vas,
+		       u32 vtbs, u32 vfps, u32 vbbs,
+		       bool nhsync, bool nvsync);
+
+#endif /* __PDP_ODIN_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_plato.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_plato.c
new file mode 100644
index 0000000..9f79a12
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_plato.c
@@ -0,0 +1,341 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pdp_common.h"
+#include "pdp_plato.h"
+#include "pdp2_mmu_regs.h"
+#include "pdp2_regs.h"
+
+#define PLATO_PDP_STRIDE_SHIFT 5
+
+
+void pdp_plato_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg,
+				   bool enable)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set syncgen: %s\n", enable ? "enable" : "disable");
+#endif
+
+	value = pdp_rreg32(pdp_reg, PDP_SYNCCTRL_OFFSET);
+	/* Starts Sync Generator. */
+	value = REG_VALUE_SET(value, enable ? 0x1 : 0x0,
+			      PDP_SYNCCTRL_SYNCACTIVE_SHIFT,
+			      PDP_SYNCCTRL_SYNCACTIVE_MASK);
+	/* Controls polarity of pixel clock: Pixel clock is inverted */
+	value = REG_VALUE_SET(value, 0x01,
+			      PDP_SYNCCTRL_CLKPOL_SHIFT,
+			      PDP_SYNCCTRL_CLKPOL_MASK);
+	pdp_wreg32(pdp_reg, PDP_SYNCCTRL_OFFSET, value);
+}
+
+void pdp_plato_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg,
+				  bool enable)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set vblank: %s\n", enable ? "enable" : "disable");
+#endif
+
+	pdp_wreg32(pdp_reg, PDP_INTCLR_OFFSET, 0xFFFFFFFF);
+
+	value = pdp_rreg32(pdp_reg, PDP_INTENAB_OFFSET);
+	value = REG_VALUE_SET(value, enable ? 0x1 : 0x0,
+			      PDP_INTENAB_INTEN_VBLNK0_SHIFT,
+			      PDP_INTENAB_INTEN_VBLNK0_MASK);
+	pdp_wreg32(pdp_reg, PDP_INTENAB_OFFSET, value);
+}
+
+bool pdp_plato_check_and_clear_vblank(struct device *dev,
+				      void __iomem *pdp_reg)
+{
+	u32 value;
+
+	value = pdp_rreg32(pdp_reg, PDP_INTSTAT_OFFSET);
+
+	if (REG_VALUE_GET(value,
+			  PDP_INTSTAT_INTS_VBLNK0_SHIFT,
+			  PDP_INTSTAT_INTS_VBLNK0_MASK)) {
+		pdp_wreg32(pdp_reg, PDP_INTCLR_OFFSET,
+			   (1 << PDP_INTCLR_INTCLR_VBLNK0_SHIFT));
+		return true;
+	}
+
+	return false;
+}
+
+void pdp_plato_set_plane_enabled(struct device *dev, void __iomem *pdp_reg,
+				 u32 plane, bool enable)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set plane %u: %s\n",
+		 plane, enable ? "enable" : "disable");
+#endif
+	value = pdp_rreg32(pdp_reg, PDP_GRPH1CTRL_OFFSET);
+	value = REG_VALUE_SET(value, enable ? 0x1 : 0x0,
+			      PDP_GRPH1CTRL_GRPH1STREN_SHIFT,
+			      PDP_GRPH1CTRL_GRPH1STREN_MASK);
+	pdp_wreg32(pdp_reg, PDP_GRPH1CTRL_OFFSET, value);
+}
+
+void pdp_plato_set_surface(struct device *dev,
+			   void __iomem *pdp_reg, void __iomem *pdp_bif_reg,
+			   u32 plane, u64 address,
+			   u32 posx, u32 posy,
+			   u32 width, u32 height, u32 stride,
+			   u32 format, u32 alpha, bool blend)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev,
+		 "Set surface: size=%dx%d stride=%d format=%d address=0x%llx\n",
+		 width, height, stride, format, address);
+#endif
+
+	pdp_wreg32(pdp_reg, PDP_REGISTER_UPDATE_CTRL_OFFSET, 0x0);
+	/*
+	 * Set the offset position to (0,0) as we've already added any offset
+	 * to the base address.
+	 */
+	pdp_wreg32(pdp_reg, PDP_GRPH1POSN_OFFSET, 0);
+
+	/* Set the frame buffer base address */
+	if (address & 0xF)
+		dev_warn(dev, "The frame buffer address is not aligned\n");
+
+	pdp_wreg32(pdp_reg, PDP_GRPH1BASEADDR_OFFSET,
+		   (u32)address & PDP_GRPH1BASEADDR_GRPH1BASEADDR_MASK);
+
+	/*
+	 * Write 8 msb of the address to address extension bits in the PDP
+	 * MMU control register.
+	 */
+	value = pdp_rreg32(pdp_bif_reg, PDP_BIF_ADDRESS_CONTROL_OFFSET);
+	value = REG_VALUE_SET(value, address >> 32,
+			      PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_SHIFT,
+			      PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_MASK);
+	value = REG_VALUE_SET(value, 0x00,
+			      PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_SHIFT,
+			      PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_MASK);
+	value = REG_VALUE_SET(value, 0x01,
+			      PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_SHIFT,
+			      PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_MASK);
+	pdp_wreg32(pdp_bif_reg, PDP_BIF_ADDRESS_CONTROL_OFFSET, value);
+
+	/* Set the framebuffer pixel format */
+	value = pdp_rreg32(pdp_reg, PDP_GRPH1SURF_OFFSET);
+	value = REG_VALUE_SET(value, format,
+			      PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT,
+			      PDP_GRPH1SURF_GRPH1PIXFMT_MASK);
+	pdp_wreg32(pdp_reg, PDP_GRPH1SURF_OFFSET, value);
+	/*
+	 * Set the framebuffer size (this might be smaller than the resolution)
+	 */
+	value = REG_VALUE_SET(0, width - 1,
+			      PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT,
+			      PDP_GRPH1SIZE_GRPH1WIDTH_MASK);
+	value = REG_VALUE_SET(value, height - 1,
+			      PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT,
+			      PDP_GRPH1SIZE_GRPH1HEIGHT_MASK);
+	pdp_wreg32(pdp_reg, PDP_GRPH1SIZE_OFFSET, value);
+
+	/* Set the framebuffer stride in 16byte words */
+	value = REG_VALUE_SET(0, (stride >> PLATO_PDP_STRIDE_SHIFT) - 1,
+			      PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT,
+			      PDP_GRPH1STRIDE_GRPH1STRIDE_MASK);
+	pdp_wreg32(pdp_reg, PDP_GRPH1STRIDE_OFFSET, value);
+
+	/* Enable the register writes on the next vblank */
+	pdp_wreg32(pdp_reg, PDP_REGISTER_UPDATE_CTRL_OFFSET, 0x3);
+
+	/*
+	 * Issues with NoC sending interleaved read responses to PDP require
+	 * burst to be 1.
+	 */
+	value = REG_VALUE_SET(0, 0x02,
+			      PDP_MEMCTRL_MEMREFRESH_SHIFT,
+			      PDP_MEMCTRL_MEMREFRESH_MASK);
+	value = REG_VALUE_SET(value, 0x01,
+			      PDP_MEMCTRL_BURSTLEN_SHIFT,
+			      PDP_MEMCTRL_BURSTLEN_MASK);
+	pdp_wreg32(pdp_reg, PDP_MEMCTRL_OFFSET, value);
+}
+
+void pdp_plato_mode_set(struct device *dev, void __iomem *pdp_reg,
+			u32 h_display, u32 v_display,
+			u32 hbps, u32 ht, u32 has,
+			u32 hlbs, u32 hfps, u32 hrbs,
+			u32 vbps, u32 vt, u32 vas,
+			u32 vtbs, u32 vfps, u32 vbbs,
+			bool nhsync, bool nvsync)
+{
+	u32 value;
+
+	dev_info(dev, "Set mode: %dx%d\n", h_display, v_display);
+#ifdef PDP_VERBOSE
+	dev_info(dev, " ht: %d hbps %d has %d hlbs %d hfps %d hrbs %d\n",
+		 ht, hbps, has, hlbs, hfps, hrbs);
+	dev_info(dev, " vt: %d vbps %d vas %d vtbs %d vfps %d vbbs %d\n",
+		 vt, vbps, vas, vtbs, vfps, vbbs);
+#endif
+
+	/* Update control */
+	value = pdp_rreg32(pdp_reg, PDP_REGISTER_UPDATE_CTRL_OFFSET);
+	value = REG_VALUE_SET(value, 0x0,
+			      PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT,
+			      PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_MASK);
+	pdp_wreg32(pdp_reg, PDP_REGISTER_UPDATE_CTRL_OFFSET, value);
+
+	/* Set hsync timings */
+	value = pdp_rreg32(pdp_reg, PDP_HSYNC1_OFFSET);
+	value = REG_VALUE_SET(value, hbps,
+			      PDP_HSYNC1_HBPS_SHIFT,
+			      PDP_HSYNC1_HBPS_MASK);
+	value = REG_VALUE_SET(value, ht,
+			      PDP_HSYNC1_HT_SHIFT,
+			      PDP_HSYNC1_HT_MASK);
+	pdp_wreg32(pdp_reg, PDP_HSYNC1_OFFSET, value);
+
+	value = pdp_rreg32(pdp_reg, PDP_HSYNC2_OFFSET);
+	value = REG_VALUE_SET(value, has,
+			      PDP_HSYNC2_HAS_SHIFT,
+			      PDP_HSYNC2_HAS_MASK);
+	value = REG_VALUE_SET(value, hlbs,
+			      PDP_HSYNC2_HLBS_SHIFT,
+			      PDP_HSYNC2_HLBS_MASK);
+	pdp_wreg32(pdp_reg, PDP_HSYNC2_OFFSET, value);
+
+	value = pdp_rreg32(pdp_reg, PDP_HSYNC3_OFFSET);
+	value = REG_VALUE_SET(value, hfps,
+			      PDP_HSYNC3_HFPS_SHIFT,
+			      PDP_HSYNC3_HFPS_MASK);
+	value = REG_VALUE_SET(value, hrbs,
+			      PDP_HSYNC3_HRBS_SHIFT,
+			      PDP_HSYNC3_HRBS_MASK);
+	pdp_wreg32(pdp_reg, PDP_HSYNC3_OFFSET, value);
+
+	/* Set vsync timings */
+	value = pdp_rreg32(pdp_reg, PDP_VSYNC1_OFFSET);
+	value = REG_VALUE_SET(value, vbps,
+			      PDP_VSYNC1_VBPS_SHIFT,
+			      PDP_VSYNC1_VBPS_MASK);
+	value = REG_VALUE_SET(value, vt,
+			      PDP_VSYNC1_VT_SHIFT,
+			      PDP_VSYNC1_VT_MASK);
+	pdp_wreg32(pdp_reg, PDP_VSYNC1_OFFSET, value);
+
+	value = pdp_rreg32(pdp_reg, PDP_VSYNC2_OFFSET);
+	value = REG_VALUE_SET(value, vas,
+			      PDP_VSYNC2_VAS_SHIFT,
+			      PDP_VSYNC2_VAS_MASK);
+	value = REG_VALUE_SET(value, vtbs,
+			      PDP_VSYNC2_VTBS_SHIFT,
+			      PDP_VSYNC2_VTBS_MASK);
+	pdp_wreg32(pdp_reg, PDP_VSYNC2_OFFSET, value);
+
+	value = pdp_rreg32(pdp_reg, PDP_VSYNC3_OFFSET);
+	value = REG_VALUE_SET(value, vfps,
+			      PDP_VSYNC3_VFPS_SHIFT,
+			      PDP_VSYNC3_VFPS_MASK);
+	value = REG_VALUE_SET(value, vbbs,
+			      PDP_VSYNC3_VBBS_SHIFT,
+			      PDP_VSYNC3_VBBS_MASK);
+	pdp_wreg32(pdp_reg, PDP_VSYNC3_OFFSET, value);
+
+	/* Horizontal data enable */
+	value = pdp_rreg32(pdp_reg, PDP_HDECTRL_OFFSET);
+	value = REG_VALUE_SET(value, has,
+			      PDP_HDECTRL_HDES_SHIFT,
+			      PDP_HDECTRL_HDES_MASK);
+	value = REG_VALUE_SET(value, hrbs,
+			      PDP_HDECTRL_HDEF_SHIFT,
+			      PDP_HDECTRL_HDEF_MASK);
+	pdp_wreg32(pdp_reg, PDP_HDECTRL_OFFSET, value);
+
+	/* Vertical data enable */
+	value = pdp_rreg32(pdp_reg, PDP_VDECTRL_OFFSET);
+	value = REG_VALUE_SET(value, vtbs, /* XXX: we're setting this to VAS */
+			      PDP_VDECTRL_VDES_SHIFT,
+			      PDP_VDECTRL_VDES_MASK);
+	value = REG_VALUE_SET(value, vfps, /* XXX: set to VBBS */
+			      PDP_VDECTRL_VDEF_SHIFT,
+			      PDP_VDECTRL_VDEF_MASK);
+	pdp_wreg32(pdp_reg, PDP_VDECTRL_OFFSET, value);
+
+	/* Vertical event start and vertical fetch start */
+	value = 0;
+	value = REG_VALUE_SET(value, 0,
+			      PDP_VEVENT_VEVENT_SHIFT,
+			      PDP_VEVENT_VEVENT_MASK);
+	value = REG_VALUE_SET(value, vbps,
+			      PDP_VEVENT_VFETCH_SHIFT,
+			      PDP_VEVENT_VFETCH_MASK);
+	value = REG_VALUE_SET(value, vfps,
+			      PDP_VEVENT_VEVENT_SHIFT,
+			      PDP_VEVENT_VEVENT_MASK);
+	pdp_wreg32(pdp_reg, PDP_VEVENT_OFFSET, value);
+
+	/* Set up polarities of sync/blank */
+	value = REG_VALUE_SET(0, 0x1,
+			      PDP_SYNCCTRL_BLNKPOL_SHIFT,
+			      PDP_SYNCCTRL_BLNKPOL_MASK);
+
+	if (nhsync)
+		value = REG_VALUE_SET(value, 0x1,
+				      PDP_SYNCCTRL_HSPOL_SHIFT,
+				      PDP_SYNCCTRL_HSPOL_MASK);
+
+	if (nvsync)
+		value = REG_VALUE_SET(value, 0x1,
+				      PDP_SYNCCTRL_VSPOL_SHIFT,
+				      PDP_SYNCCTRL_VSPOL_MASK);
+
+	pdp_wreg32(pdp_reg,
+		   PDP_SYNCCTRL_OFFSET,
+		   value);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_plato.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_plato.h
new file mode 100644
index 0000000..c966b54
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_plato.h
@@ -0,0 +1,88 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PDP_PLATO_H__)
+#define __PDP_PLATO_H__
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+#define PLATO_PDP_PIXEL_FORMAT_G        (0x00)
+#define PLATO_PDP_PIXEL_FORMAT_ARGB4    (0x04)
+#define PLATO_PDP_PIXEL_FORMAT_ARGB1555 (0x05)
+#define PLATO_PDP_PIXEL_FORMAT_RGB8     (0x06)
+#define PLATO_PDP_PIXEL_FORMAT_RGB565   (0x07)
+#define PLATO_PDP_PIXEL_FORMAT_ARGB8    (0x08)
+#define PLATO_PDP_PIXEL_FORMAT_AYUV8    (0x10)
+#define PLATO_PDP_PIXEL_FORMAT_YUV10    (0x15)
+#define PLATO_PDP_PIXEL_FORMAT_RGBA8    (0x16)
+
+
+void pdp_plato_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg,
+				   bool enable);
+
+void pdp_plato_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg,
+				  bool enable);
+
+bool pdp_plato_check_and_clear_vblank(struct device *dev,
+				      void __iomem *pdp_reg);
+
+void pdp_plato_set_plane_enabled(struct device *dev, void __iomem *pdp_reg,
+				 u32 plane, bool enable);
+
+void pdp_plato_set_surface(struct device *dev,
+			   void __iomem *pdp_reg, void __iomem *pdp_bif_reg,
+			   u32 plane, u64 address,
+			   u32 posx, u32 posy,
+			   u32 width, u32 height, u32 stride,
+			   u32 format, u32 alpha, bool blend);
+
+void pdp_plato_mode_set(struct device *dev, void __iomem *pdp_reg,
+			u32 h_display, u32 v_display,
+			u32 hbps, u32 ht, u32 has,
+			u32 hlbs, u32 hfps, u32 hrbs,
+			u32 vbps, u32 vt, u32 vas,
+			u32 vtbs, u32 vfps, u32 vbbs,
+			bool nhsync, bool nvsync);
+
+#endif /* __PDP_PLATO_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_regs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_regs.h
new file mode 100644
index 0000000..bd26b06
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/pdp_regs.h
@@ -0,0 +1,75 @@
+/*************************************************************************/ /*!
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PDP_REGS_H__)
+#define __PDP_REGS_H__
+
+/*************************************************************************/ /*!
+ PCI Device Information
+*/ /**************************************************************************/
+
+#define DCPDP_VENDOR_ID_POWERVR			(0x1010)
+
+#define DCPDP_DEVICE_ID_PCI_APOLLO_FPGA		(0x1CF1)
+#define DCPDP_DEVICE_ID_PCIE_APOLLO_FPGA	(0x1CF2)
+
+/*************************************************************************/ /*!
+ PCI Device Base Address Information
+*/ /**************************************************************************/
+
+/* PLL and PDP registers on base address register 0 */
+#define DCPDP_REG_PCI_BASENUM			(0)
+
+#define DCPDP_PCI_PLL_REG_OFFSET		(0x1000)
+#define DCPDP_PCI_PLL_REG_SIZE			(0x0400)
+
+#define DCPDP_PCI_PDP_REG_OFFSET		(0xC000)
+#define DCPDP_PCI_PDP_REG_SIZE			(0x2000)
+
+/*************************************************************************/ /*!
+ Misc register information
+*/ /**************************************************************************/
+
+/* This information isn't captured in tcf_rgbpdp_regs.h so define it here */
+#define DCPDP_STR1SURF_FORMAT_ARGB8888		(0xE)
+#define DCPDP_STR1ADDRCTRL_BASE_ADDR_SHIFT	(4)
+#define DCPDP_STR1POSN_STRIDE_SHIFT		(4)
+
+#endif /* !defined(__PDP_REGS_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/sysconfig.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/sysconfig.c
new file mode 100644
index 0000000..a72ae02
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/sysconfig.c
@@ -0,0 +1,818 @@
+/*************************************************************************/ /*!
+@File
+@Title          System Configuration
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    System Configuration functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+
+#include "sysinfo.h"
+#include "apollo_regs.h"
+
+#include "pvrsrv_device.h"
+#include "rgxdevice.h"
+#include "syscommon.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+
+#if defined(SUPPORT_ION)
+#include PVR_ANDROID_ION_HEADER
+#include "ion_support.h"
+#include "ion_sys.h"
+#endif
+
+#include "tc_drv.h"
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#if !defined(LMA)
+#error TC only supports LMA at the minute
+#endif
+
+#if TC_MEMORY_CONFIG != TC_MEMORY_LOCAL
+#error TC only supports TC_MEMORY_LOCAL at the minute
+#endif
+
+/* These must be consecutive */
+#define PHYS_HEAP_IDX_GENERAL	0
+#define PHYS_HEAP_IDX_DMABUF	1
+#define PHYS_HEAP_IDX_COUNT		2
+
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (10)
+
+#if defined(PVR_DVFS) || defined(SUPPORT_PDVFS)
+
+/* Dummy DVFS configuration used purely for testing purposes */
+
+static const IMG_OPP asOPPTable[] =
+{
+	{ 8,  25000000},
+	{ 16, 50000000},
+	{ 32, 75000000},
+	{ 64, 100000000},
+};
+
+#define LEVEL_COUNT (sizeof(asOPPTable) / sizeof(IMG_OPP))
+
+static void SetFrequency(IMG_UINT32 ui32Frequency)
+{
+	PVR_DPF((PVR_DBG_ERROR, "SetFrequency %u", ui32Frequency));
+}
+
+static void SetVoltage(IMG_UINT32 ui32Voltage)
+{
+	PVR_DPF((PVR_DBG_ERROR, "SetVoltage %u", ui32Voltage));
+}
+
+#endif
+
+static void TCLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+				      IMG_UINT32 ui32NumOfAddr,
+				      IMG_DEV_PHYADDR *psDevPAddr,
+				      IMG_CPU_PHYADDR *psCpuPAddr);
+
+static void TCLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+				      IMG_UINT32 ui32NumOfAddr,
+				      IMG_CPU_PHYADDR *psCpuPAddr,
+				      IMG_DEV_PHYADDR *psDevPAddr);
+
+static IMG_UINT32 TCLocalGetRegionId(IMG_HANDLE hPrivData,
+					  PVRSRV_MEMALLOCFLAGS_T uiAllocFlags);
+
+static PHYS_HEAP_FUNCTIONS gsLocalPhysHeapFuncs =
+{
+	.pfnCpuPAddrToDevPAddr = TCLocalCpuPAddrToDevPAddr,
+	.pfnDevPAddrToCpuPAddr = TCLocalDevPAddrToCpuPAddr,
+	.pfnGetRegionId = TCLocalGetRegionId,
+};
+
+static void TCIonCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+				    IMG_UINT32 ui32NumOfAddr,
+				    IMG_DEV_PHYADDR *psDevPAddr,
+				    IMG_CPU_PHYADDR *psCpuPAddr);
+
+static void TCIonDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+				    IMG_UINT32 ui32NumOfAddr,
+				    IMG_CPU_PHYADDR *psCpuPAddr,
+				    IMG_DEV_PHYADDR *psDevPAddr);
+
+static IMG_UINT32 TCIonGetRegionId(IMG_HANDLE hPrivData,
+					  PVRSRV_MEMALLOCFLAGS_T uiAllocFlags);
+
+static PHYS_HEAP_FUNCTIONS gsIonPhysHeapFuncs =
+{
+	.pfnCpuPAddrToDevPAddr = TCIonCpuPAddrToDevPAddr,
+	.pfnDevPAddrToCpuPAddr = TCIonDevPAddrToCpuPAddr,
+	.pfnGetRegionId = TCIonGetRegionId,
+};
+
+/* BIF Tiling mode configuration */
+static RGXFWIF_BIFTILINGMODE geBIFTilingMode = RGXFWIF_BIFTILINGMODE_256x16;
+
+/* Default BIF tiling heap x-stride configurations. */
+static IMG_UINT32 gauiBIFTilingHeapXStrides[RGXFWIF_NUM_BIF_TILING_CONFIGS] =
+{
+	0, /* BIF tiling heap 1 x-stride */
+	1, /* BIF tiling heap 2 x-stride */
+	2, /* BIF tiling heap 3 x-stride */
+	3  /* BIF tiling heap 4 x-stride */
+};
+
+typedef struct _SYS_DATA_ SYS_DATA;
+
+struct _SYS_DATA_
+{
+	struct platform_device *pdev;
+
+	struct tc_rogue_platform_data *pdata;
+
+	struct resource *registers;
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+	struct ion_client *ion_client;
+	struct ion_handle *ion_rogue_allocation;
+#endif
+};
+
+#define SYSTEM_INFO_FORMAT_STRING "FPGA Revision: %s - TCF Core Revision: %s - TCF Core Target Build ID: %s - PCI Version: %s - Macro Version: %s"
+#define FPGA_REV_MAX_LEN      8 /* current longest format: "x.y.z" */
+#define TCF_CORE_REV_MAX_LEN  8 /* current longest format: "x.y.z" */
+#define TCF_CORE_CFG_MAX_LEN  4 /* current longest format: "x" */
+#define PCI_VERSION_MAX_LEN   4 /* current longest format: "x" */
+#define MACRO_VERSION_MAX_LEN 8 /* current longest format: "x.yz" */
+
+static IMG_CHAR *GetDeviceVersionString(SYS_DATA *psSysData)
+{
+	int err;
+	char str_fpga_rev[FPGA_REV_MAX_LEN]={0};
+	char str_tcf_core_rev[TCF_CORE_REV_MAX_LEN]={0};
+	char str_tcf_core_target_build_id[TCF_CORE_CFG_MAX_LEN]={0};
+	char str_pci_ver[PCI_VERSION_MAX_LEN]={0};
+	char str_macro_ver[MACRO_VERSION_MAX_LEN]={0};
+
+	IMG_CHAR *pszVersion;
+	IMG_UINT32 ui32StringLength;
+
+	err = tc_sys_strings(psSysData->pdev->dev.parent,
+							 str_fpga_rev, sizeof(str_fpga_rev),
+							 str_tcf_core_rev, sizeof(str_tcf_core_rev),
+							 str_tcf_core_target_build_id, sizeof(str_tcf_core_target_build_id),
+							 str_pci_ver, sizeof(str_pci_ver),
+							 str_macro_ver, sizeof(str_macro_ver));
+	if (err)
+	{
+		return NULL;
+	}
+
+	/* Calculate how much space we need to allocate for the string */
+	ui32StringLength = OSStringLength(SYSTEM_INFO_FORMAT_STRING);
+	ui32StringLength += OSStringLength(str_fpga_rev);
+	ui32StringLength += OSStringLength(str_tcf_core_rev);
+	ui32StringLength += OSStringLength(str_tcf_core_target_build_id);
+	ui32StringLength += OSStringLength(str_pci_ver);
+	ui32StringLength += OSStringLength(str_macro_ver);
+
+	/* Create the version string */
+	pszVersion = OSAllocMem(ui32StringLength * sizeof(IMG_CHAR));
+	if (pszVersion)
+	{
+		OSSNPrintf(&pszVersion[0], ui32StringLength,
+				   SYSTEM_INFO_FORMAT_STRING,
+				   str_fpga_rev,
+				   str_tcf_core_rev,
+				   str_tcf_core_target_build_id,
+				   str_pci_ver,
+				   str_macro_ver);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: failed to create format string", __func__));
+	}
+
+	return pszVersion;
+}
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+static SYS_DATA *gpsIonPrivateData;
+
+PVRSRV_ERROR IonInit(void *pvPrivateData)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	SYS_DATA *psSysData = pvPrivateData;
+	gpsIonPrivateData = psSysData;
+
+	psSysData->ion_client = ion_client_create(psSysData->pdata->ion_device, SYS_RGX_DEV_NAME);
+	if (IS_ERR(psSysData->ion_client))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create ION client (%ld)", __func__, PTR_ERR(psSysData->ion_client)));
+		/* FIXME: Find a better matching error code */
+		eError = PVRSRV_ERROR_PCI_CALL_FAILED;
+		goto err_out;
+	}
+	/* Allocate the whole rogue ion heap and pass that to services to manage */
+	psSysData->ion_rogue_allocation = ion_alloc(psSysData->ion_client, psSysData->pdata->rogue_heap_memory_size, 4096, (1 << psSysData->pdata->ion_heap_id), 0);
+	if (IS_ERR(psSysData->ion_rogue_allocation))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate ION rogue buffer (%ld)", __func__, PTR_ERR(psSysData->ion_rogue_allocation)));
+		/* FIXME: Find a better matching error code */
+		eError = PVRSRV_ERROR_PCI_CALL_FAILED;
+		goto err_destroy_client;
+
+	}
+
+	return PVRSRV_OK;
+err_destroy_client:
+	ion_client_destroy(psSysData->ion_client);
+	psSysData->ion_client = NULL;
+err_out:
+	return eError;
+}
+
+void IonDeinit(void)
+{
+	SYS_DATA *psSysData = gpsIonPrivateData;
+	ion_free(psSysData->ion_client, psSysData->ion_rogue_allocation);
+	psSysData->ion_rogue_allocation = NULL;
+	ion_client_destroy(psSysData->ion_client);
+	psSysData->ion_client = NULL;
+}
+
+struct ion_device *IonDevAcquire(void)
+{
+	return gpsIonPrivateData->pdata->ion_device;
+}
+
+void IonDevRelease(struct ion_device *ion_device)
+{
+	PVR_ASSERT(ion_device == gpsIonPrivateData->pdata->ion_device);
+}
+#endif /* defined(SUPPORT_ION) */
+
+static void TCLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+				      IMG_UINT32 ui32NumOfAddr,
+				      IMG_DEV_PHYADDR *psDevPAddr,
+				      IMG_CPU_PHYADDR *psCpuPAddr)
+{
+	PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+
+	/* Optimise common case */
+	psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr - psDevConfig->pasPhysHeaps[0].pasRegions[0].sStartAddr.uiAddr;
+	if (ui32NumOfAddr > 1)
+	{
+		IMG_UINT32 ui32Idx;
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+		{
+			psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr - psDevConfig->pasPhysHeaps[0].pasRegions[0].sStartAddr.uiAddr;
+		}
+	}
+}
+
+static void TCLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+				      IMG_UINT32 ui32NumOfAddr,
+				      IMG_CPU_PHYADDR *psCpuPAddr,
+				      IMG_DEV_PHYADDR *psDevPAddr)
+{
+	PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+
+	/* Optimise common case */
+	psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr + psDevConfig->pasPhysHeaps[0].pasRegions[0].sStartAddr.uiAddr;
+	if (ui32NumOfAddr > 1)
+	{
+		IMG_UINT32 ui32Idx;
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+		{
+			psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr + psDevConfig->pasPhysHeaps[0].pasRegions[0].sStartAddr.uiAddr;
+		}
+	}
+}
+
+static IMG_UINT32 TCLocalGetRegionId(IMG_HANDLE hPrivData,
+					  PVRSRV_MEMALLOCFLAGS_T uiAllocFlags)
+{
+	/* Return first region which is always valid */
+	return 0;
+}
+
+static void TCIonCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+				    IMG_UINT32 ui32NumOfAddr,
+				    IMG_DEV_PHYADDR *psDevPAddr,
+				    IMG_CPU_PHYADDR *psCpuPAddr)
+{
+	PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+	SYS_DATA *psSysData = psDevConfig->hSysData;
+
+	/* Optimise common case */
+	psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr - psSysData->pdata->tc_memory_base;
+	if (ui32NumOfAddr > 1)
+	{
+		IMG_UINT32 ui32Idx;
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+		{
+			psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr - psSysData->pdata->tc_memory_base;
+		}
+	}
+}
+
+static void TCIonDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+				    IMG_UINT32 ui32NumOfAddr,
+				    IMG_CPU_PHYADDR *psCpuPAddr,
+				    IMG_DEV_PHYADDR *psDevPAddr)
+{
+	PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+	SYS_DATA *psSysData = psDevConfig->hSysData;
+
+	/* Optimise common case */
+	psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr + psSysData->pdata->tc_memory_base;
+	if (ui32NumOfAddr > 1)
+	{
+		IMG_UINT32 ui32Idx;
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+		{
+			psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr + psSysData->pdata->tc_memory_base;
+		}
+	}
+}
+
+static IMG_UINT32 TCIonGetRegionId(IMG_HANDLE hPrivData,
+					  PVRSRV_MEMALLOCFLAGS_T uiAllocFlags)
+{
+	/* Return first region which is always valid */
+	return 0;
+}
+
+static PVRSRV_ERROR PhysHeapsCreate(SYS_DATA *psSysData,
+									void *pvPrivData,
+									PHYS_HEAP_CONFIG **ppasPhysHeapsOut,
+									IMG_UINT32 *puiPhysHeapCountOut)
+{
+	static IMG_UINT32 uiHeapIDBase = 0;
+	PHYS_HEAP_CONFIG *pasPhysHeaps;
+	PHYS_HEAP_REGION *psRegion;
+	PVRSRV_ERROR eError;
+
+	pasPhysHeaps = OSAllocMem(sizeof(*pasPhysHeaps) * PHYS_HEAP_IDX_COUNT);
+	if (!pasPhysHeaps)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psRegion = OSAllocMem(sizeof(*psRegion));
+	if (!psRegion)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorFreePhysHeaps;
+	}
+
+	psRegion->sStartAddr.uiAddr = psSysData->pdata->rogue_heap_memory_base;
+	psRegion->sCardBase.uiAddr = 0;
+	psRegion->uiSize = psSysData->pdata->rogue_heap_memory_size;
+
+	pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32PhysHeapID =
+		uiHeapIDBase + PHYS_HEAP_IDX_GENERAL;
+	pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].eType = PHYS_HEAP_TYPE_LMA;
+	pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].pszPDumpMemspaceName = "LMA";
+	pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].psMemFuncs = &gsLocalPhysHeapFuncs;
+	pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].pasRegions = psRegion;
+	pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32NumOfRegions = 1;
+	pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].hPrivData = pvPrivData;
+
+	psRegion = OSAllocMem(sizeof(*psRegion));
+	if (!psRegion)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorGeneralPhysHeapDestroy;
+	}
+
+	psRegion->sStartAddr.uiAddr = psSysData->pdata->pdp_heap_memory_base;
+	psRegion->sCardBase.uiAddr = 0;
+	psRegion->uiSize = psSysData->pdata->pdp_heap_memory_size;
+
+	pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].ui32PhysHeapID =
+		uiHeapIDBase + PHYS_HEAP_IDX_DMABUF;
+	pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].eType = PHYS_HEAP_TYPE_LMA;
+	pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].pszPDumpMemspaceName = "LMA";
+	pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].psMemFuncs = &gsIonPhysHeapFuncs;
+	pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].pasRegions = psRegion;
+	pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].ui32NumOfRegions = 1;
+	pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].hPrivData = pvPrivData;
+
+	uiHeapIDBase += PHYS_HEAP_IDX_COUNT;
+
+	*ppasPhysHeapsOut = pasPhysHeaps;
+	*puiPhysHeapCountOut = PHYS_HEAP_IDX_COUNT;
+
+	return PVRSRV_OK;
+
+ErrorGeneralPhysHeapDestroy:
+	OSFreeMem(pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].pasRegions);
+
+ErrorFreePhysHeaps:
+	OSFreeMem(pasPhysHeaps);
+	return eError;
+}
+
+static void PhysHeapsDestroy(PHYS_HEAP_CONFIG *pasPhysHeaps,
+							 IMG_UINT32 uiPhysHeapCount)
+{
+	IMG_UINT32 i;
+
+	for (i = 0; i < uiPhysHeapCount; i++)
+	{
+		if (pasPhysHeaps[i].pasRegions)
+		{
+			OSFreeMem(pasPhysHeaps[i].pasRegions);
+		}
+	}
+
+	OSFreeMem(pasPhysHeaps);
+}
+
+static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData,
+									   PVRSRV_DEVICE_CONFIG **ppsDevConfigOut)
+{
+	PVRSRV_DEVICE_CONFIG *psDevConfig;
+	RGX_DATA *psRGXData;
+	RGX_TIMING_INFORMATION *psRGXTimingInfo;
+	PHYS_HEAP_CONFIG *pasPhysHeaps;
+	IMG_UINT32 uiPhysHeapCount;
+	PVRSRV_ERROR eError;
+
+	psDevConfig = OSAllocZMem(sizeof(*psDevConfig) +
+							  sizeof(*psRGXData) +
+							  sizeof(*psRGXTimingInfo));
+	if (!psDevConfig)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig));
+	psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData));
+
+	eError = PhysHeapsCreate(psSysData, psDevConfig, &pasPhysHeaps, &uiPhysHeapCount);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorFreeDevConfig;
+	}
+
+	/* Setup RGX specific timing data */
+	psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(&psSysData->pdev->dev) * 6;
+	psRGXTimingInfo->bEnableActivePM = IMG_FALSE;
+	psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE;
+	psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS;
+
+	/* Set up the RGX data */
+	psRGXData->psRGXTimingInfo = psRGXTimingInfo;
+
+	/* Setup the device config */
+	psDevConfig->pvOSDevice = &psSysData->pdev->dev;
+	psDevConfig->pszName = "tc";
+	psDevConfig->pszVersion = GetDeviceVersionString(psSysData);
+
+	psDevConfig->sRegsCpuPBase.uiAddr = psSysData->registers->start;
+	psDevConfig->ui32RegsSize = resource_size(psSysData->registers);
+
+	psDevConfig->ui32IRQ = TC_INTERRUPT_EXT;
+
+	psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE;
+
+	psDevConfig->pasPhysHeaps = pasPhysHeaps;
+	psDevConfig->ui32PhysHeapCount = uiPhysHeapCount;
+
+	psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] =
+		pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32PhysHeapID;
+	psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] =
+		pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32PhysHeapID;
+	psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] =
+		pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32PhysHeapID;
+	psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL] =
+		pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].ui32PhysHeapID;
+
+	psDevConfig->eBIFTilingMode = geBIFTilingMode;
+	psDevConfig->pui32BIFTilingHeapConfigs = &gauiBIFTilingHeapXStrides[0];
+	psDevConfig->ui32BIFTilingHeapCount = ARRAY_SIZE(gauiBIFTilingHeapXStrides);
+
+	psDevConfig->hDevData = psRGXData;
+	psDevConfig->hSysData = psSysData;
+
+#if defined(PVR_DVFS) || defined(SUPPORT_PDVFS)
+	/* Dummy DVFS configuration used purely for testing purposes */
+	psDevConfig->sDVFS.sDVFSDeviceCfg.pasOPPTable = asOPPTable;
+	psDevConfig->sDVFS.sDVFSDeviceCfg.ui32OPPTableSize = LEVEL_COUNT;
+	psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetFrequency = SetFrequency;
+	psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetVoltage = SetVoltage;
+#endif
+#if defined(PVR_DVFS)
+	psDevConfig->sDVFS.sDVFSDeviceCfg.ui32PollMs = 1000;
+	psDevConfig->sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_TRUE;
+	psDevConfig->sDVFS.sDVFSGovernorCfg.ui32UpThreshold = 90;
+	psDevConfig->sDVFS.sDVFSGovernorCfg.ui32DownDifferential = 10;
+#endif
+
+	*ppsDevConfigOut = psDevConfig;
+
+	return PVRSRV_OK;
+
+ErrorFreeDevConfig:
+	OSFreeMem(psDevConfig);
+	return eError;
+}
+
+static void DeviceConfigDestroy(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	if (psDevConfig->pszVersion)
+	{
+		OSFreeMem(psDevConfig->pszVersion);
+	}
+
+	PhysHeapsDestroy(psDevConfig->pasPhysHeaps, psDevConfig->ui32PhysHeapCount);
+
+	OSFreeMem(psDevConfig);
+}
+
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig)
+{
+	PVRSRV_DEVICE_CONFIG *psDevConfig;
+	SYS_DATA *psSysData;
+	resource_size_t uiRegistersSize;
+	PVRSRV_ERROR eError;
+	int err = 0;
+
+	PVR_ASSERT(pvOSDevice);
+
+#if TC_MEMORY_CONFIG == TC_MEMORY_LOCAL
+	/*
+	 * The device cannot address system memory, so there is no DMA
+	 * limitation.
+	 */
+	dma_set_mask(pvOSDevice, DMA_BIT_MASK(64));
+#else
+	dma_set_mask(pvOSDevice, DMA_BIT_MASK(32));
+#endif
+
+	psSysData = OSAllocZMem(sizeof(*psSysData));
+	if (psSysData == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psSysData->pdev = to_platform_device((struct device *)pvOSDevice);
+	psSysData->pdata = psSysData->pdev->dev.platform_data;
+
+	err = tc_enable(psSysData->pdev->dev.parent);
+	if (err)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to enable PCI device (%d)", __func__, err));
+		eError = PVRSRV_ERROR_PCI_CALL_FAILED;
+		goto ErrFreeSysData;
+	}
+
+	psSysData->registers = platform_get_resource_byname(psSysData->pdev,
+														IORESOURCE_MEM,
+														"rogue-regs");
+	if (!psSysData->registers)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to get Rogue register information",
+				 __func__));
+		eError = PVRSRV_ERROR_PCI_REGION_UNAVAILABLE;
+		goto ErrorDevDisable;
+	}
+
+	/* Check the address range is large enough. */
+	uiRegistersSize = resource_size(psSysData->registers);
+	if (uiRegistersSize < SYS_RGX_REG_REGION_SIZE)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Rogue register region isn't big enough (was %pa, required 0x%08x)",
+				 __func__, &uiRegistersSize, SYS_RGX_REG_REGION_SIZE));
+
+		eError = PVRSRV_ERROR_PCI_REGION_TOO_SMALL;
+		goto ErrorDevDisable;
+	}
+
+	/* Reserve the address range */
+	if (!request_mem_region(psSysData->registers->start,
+							resource_size(psSysData->registers),
+							SYS_RGX_DEV_NAME))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Rogue register memory region not available",
+				 __func__));
+		eError = PVRSRV_ERROR_PCI_CALL_FAILED;
+
+		goto ErrorDevDisable;
+	}
+
+	eError = DeviceConfigCreate(psSysData, &psDevConfig);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorReleaseMemRegion;
+	}
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+	eError = IonInit(psSysData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise ION", __func__));
+		goto ErrorDeviceConfigDestroy;
+	}
+#endif
+
+	*ppsDevConfig = psDevConfig;
+
+	return PVRSRV_OK;
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ErrorDeviceConfigDestroy:
+	DeviceConfigDestroy(psDevConfig);
+#endif
+ErrorReleaseMemRegion:
+	release_mem_region(psSysData->registers->start,
+					   resource_size(psSysData->registers));
+ErrorDevDisable:
+	tc_disable(psSysData->pdev->dev.parent);
+ErrFreeSysData:
+	OSFreeMem(psSysData);
+	return eError;
+}
+
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	SYS_DATA *psSysData = (SYS_DATA *)psDevConfig->hSysData;
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+	IonDeinit();
+#endif
+
+	DeviceConfigDestroy(psDevConfig);
+
+	release_mem_region(psSysData->registers->start,
+					   resource_size(psSysData->registers));
+	tc_disable(psSysData->pdev->dev.parent);
+
+	OSFreeMem(psSysData);
+}
+
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile)
+{
+#if defined(TC_APOLLO_TCF5)
+	PVR_UNREFERENCED_PARAMETER(psDevConfig);
+	PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+	return PVRSRV_OK;
+#else
+	SYS_DATA *psSysData = psDevConfig->hSysData;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	u32 tmp = 0;
+	u32 pll;
+
+	PVR_DUMPDEBUG_LOG("------[ rgx_tc system debug ]------");
+
+	if (tc_sys_info(psSysData->pdev->dev.parent, &tmp, &pll))
+		goto err_out;
+
+	if (tmp > 0)
+		PVR_DUMPDEBUG_LOG("Chip temperature: %d degrees C", tmp);
+	PVR_DUMPDEBUG_LOG("PLL status: %x", pll);
+
+err_out:
+	return eError;
+#endif
+}
+
+typedef struct
+{
+	struct device *psDev;
+	int iInterruptID;
+	void *pvData;
+	PFN_LISR pfnLISR;
+} LISR_DATA;
+
+static void TCInterruptHandler(void* pvData)
+{
+	LISR_DATA *psLISRData = pvData;
+	psLISRData->pfnLISR(psLISRData->pvData);
+}
+
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+				  IMG_UINT32 ui32IRQ,
+				  const IMG_CHAR *pszName,
+				  PFN_LISR pfnLISR,
+				  void *pvData,
+				  IMG_HANDLE *phLISRData)
+{
+	SYS_DATA *psSysData = (SYS_DATA *)hSysData;
+	LISR_DATA *psLISRData;
+	PVRSRV_ERROR eError;
+	int err;
+
+	if (ui32IRQ != TC_INTERRUPT_EXT)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: No device matching IRQ %d", __func__, ui32IRQ));
+		return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+	}
+
+	psLISRData = OSAllocZMem(sizeof(*psLISRData));
+	if (!psLISRData)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_out;
+	}
+
+	psLISRData->pfnLISR = pfnLISR;
+	psLISRData->pvData = pvData;
+	psLISRData->iInterruptID = ui32IRQ;
+	psLISRData->psDev = psSysData->pdev->dev.parent;
+
+	err = tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, TCInterruptHandler, psLISRData);
+	if (err)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: tc_set_interrupt_handler() failed (%d)", __func__, err));
+		eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+		goto err_free_data;
+	}
+
+	err = tc_enable_interrupt(psLISRData->psDev, psLISRData->iInterruptID);
+	if (err)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: tc_enable_interrupt() failed (%d)", __func__, err));
+		eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+		goto err_unset_interrupt_handler;
+	}
+
+	*phLISRData = psLISRData;
+	eError = PVRSRV_OK;
+
+	PVR_TRACE(("Installed device LISR %pf to irq %u", pfnLISR, ui32IRQ));
+
+err_out:
+	return eError;
+err_unset_interrupt_handler:
+	tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL);
+err_free_data:
+	OSFreeMem(psLISRData);
+	goto err_out;
+}
+
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+	LISR_DATA *psLISRData = (LISR_DATA *) hLISRData;
+	int err;
+
+	err = tc_disable_interrupt(psLISRData->psDev, psLISRData->iInterruptID);
+	if (err)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: tc_enable_interrupt() failed (%d)", __func__, err));
+	}
+
+	err = tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL);
+	if (err)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: tc_set_interrupt_handler() failed (%d)", __func__, err));
+	}
+
+	PVR_TRACE(("Uninstalled device LISR %pf from irq %u", psLISRData->pfnLISR, psLISRData->iInterruptID));
+
+	OSFreeMem(psLISRData);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/sysinfo.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/sysinfo.h
new file mode 100644
index 0000000..7f6a5b4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/sysinfo.h
@@ -0,0 +1,60 @@
+/*************************************************************************/ /*!
+@File
+@Title          System Description Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides system-specific declarations and macros
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__SYSINFO_H__)
+#define __SYSINFO_H__
+
+/*!< System specific poll/timeout details */
+#if defined (VIRTUAL_PLATFORM)
+#define MAX_HW_TIME_US                           (240000000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT  (120000)
+#else
+#define MAX_HW_TIME_US                           (500000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT  (1500)//(10000)
+#endif
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000)
+#define WAIT_TRY_COUNT                           (10000)
+
+#define SYS_RGX_DEV_NAME "tc_rogue"
+
+#endif /* !defined(__SYSINFO_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_apollo.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_apollo.c
new file mode 100644
index 0000000..0456b15
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_apollo.c
@@ -0,0 +1,1354 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*
+ * This is a device driver for the apollo testchip framework. It creates
+ * platform devices for the pdp and ext sub-devices, and exports functions to
+ * manage the shared interrupt handling
+ */
+
+#include <linux/version.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include <linux/thermal.h>
+
+#include "tc_drv_internal.h"
+#include "tc_apollo.h"
+#include "tc_ion.h"
+
+#include "apollo_regs.h"
+#include "tcf_clk_ctrl.h"
+#include "tcf_pll.h"
+
+#if defined(SUPPORT_APOLLO_FPGA)
+#include "tc_apollo_debugfs.h"
+#endif /* defined(SUPPORT_APOLLO_FPGA) */
+
+#define TC_INTERRUPT_FLAG_PDP      (1 << PDP1_INT_SHIFT)
+#define TC_INTERRUPT_FLAG_EXT      (1 << EXT_INT_SHIFT)
+
+#define PCI_VENDOR_ID_POWERVR      0x1010
+#define DEVICE_ID_PCI_APOLLO_FPGA  0x1CF1
+#define DEVICE_ID_PCIE_APOLLO_FPGA 0x1CF2
+
+#define APOLLO_MEM_PCI_BASENUM	   (2)
+
+static struct {
+	struct thermal_zone_device *thermal_zone;
+
+#if  defined(SUPPORT_APOLLO_FPGA)
+	struct tc_io_region fpga;
+	struct apollo_debugfs_fpga_entries fpga_entries;
+#endif
+} apollo_pdata;
+
+#if defined(SUPPORT_APOLLO_FPGA)
+
+#define APOLLO_DEVICE_NAME_FPGA "apollo_fpga"
+
+struct apollo_fpga_platform_data {
+	resource_size_t tc_memory_base;
+
+	resource_size_t pdp_heap_memory_base;
+	resource_size_t pdp_heap_memory_size;
+};
+
+#endif /* defined(SUPPORT_APOLLO_FPGA) */
+
+static void spi_write(struct tc_device *tc, u32 off, u32 val)
+{
+	iowrite32(off, tc->tcf.registers
+		  + TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR);
+	iowrite32(val, tc->tcf.registers
+		  + TCF_CLK_CTRL_TCF_SPI_MST_WDATA);
+	iowrite32(TCF_SPI_MST_GO_MASK, tc->tcf.registers
+		  + TCF_CLK_CTRL_TCF_SPI_MST_GO);
+	udelay(1000);
+}
+
+static int spi_read(struct tc_device *tc, u32 off, u32 *val)
+{
+	int cnt = 0;
+	u32 spi_mst_status;
+
+	iowrite32(0x40000 | off, tc->tcf.registers
+		  + TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR);
+	iowrite32(TCF_SPI_MST_GO_MASK, tc->tcf.registers
+		  + TCF_CLK_CTRL_TCF_SPI_MST_GO);
+
+	udelay(100);
+
+	do {
+		spi_mst_status = ioread32(tc->tcf.registers
+					  + TCF_CLK_CTRL_TCF_SPI_MST_STATUS);
+
+		if (cnt++ > 10000) {
+			dev_err(&tc->pdev->dev,
+				"%s: Time out reading SPI reg (0x%x)\n",
+				__func__, off);
+			return -1;
+		}
+
+	} while (spi_mst_status != 0x08);
+
+	*val = ioread32(tc->tcf.registers
+			+ TCF_CLK_CTRL_TCF_SPI_MST_RDATA);
+
+	return 0;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0))
+static int apollo_thermal_get_temp(struct thermal_zone_device *thermal,
+				   unsigned long *t)
+#else
+static int apollo_thermal_get_temp(struct thermal_zone_device *thermal,
+				   int *t)
+#endif
+{
+	struct tc_device *tc;
+	int err = -ENODEV;
+	u32 tmp;
+
+	if (!thermal)
+		goto err_out;
+
+	tc = (struct tc_device *)thermal->devdata;
+
+	if (!tc)
+		goto err_out;
+
+	if (spi_read(tc, TCF_TEMP_SENSOR_SPI_OFFSET, &tmp)) {
+		dev_err(&tc->pdev->dev,
+				"Failed to read apollo temperature sensor\n");
+
+		goto err_out;
+	}
+
+	/* Report this in millidegree Celsius */
+	*t = TCF_TEMP_SENSOR_TO_C(tmp) * 1000;
+
+	err = 0;
+
+err_out:
+	return err;
+}
+
+static struct thermal_zone_device_ops apollo_thermal_dev_ops = {
+	.get_temp = apollo_thermal_get_temp,
+};
+
+#if defined(SUPPORT_RGX)
+
+static void pll_write_reg(struct tc_device *tc,
+	resource_size_t reg_offset, u32 reg_value)
+{
+	BUG_ON(reg_offset < TCF_PLL_PLL_CORE_CLK0);
+	BUG_ON(reg_offset > tc->tcf_pll.region.size +
+		TCF_PLL_PLL_CORE_CLK0 - 4);
+
+	/* Tweak the offset because we haven't mapped the full pll region */
+	iowrite32(reg_value, tc->tcf_pll.registers +
+		reg_offset - TCF_PLL_PLL_CORE_CLK0);
+}
+
+static u32 sai_read_es2(struct tc_device *tc, u32 addr)
+{
+	iowrite32(0x200 | addr, tc->tcf.registers + 0x300);
+	iowrite32(0x1 | addr, tc->tcf.registers + 0x318);
+	return ioread32(tc->tcf.registers + 0x310);
+}
+
+static int apollo_align_interface_es2(struct tc_device *tc)
+{
+	u32 reg = 0;
+	u32 reg_reset_n;
+	int reset_cnt = 0;
+	int err = -EFAULT;
+	bool aligned = false;
+
+	/* Try to enable the core clock PLL */
+	spi_write(tc, 0x1, 0x0);
+	reg  = ioread32(tc->tcf.registers + 0x320);
+	reg |= 0x1;
+	iowrite32(reg, tc->tcf.registers + 0x320);
+	reg &= 0xfffffffe;
+	iowrite32(reg, tc->tcf.registers + 0x320);
+	msleep(1000);
+
+	if (spi_read(tc, 0x2, &reg)) {
+		dev_err(&tc->pdev->dev,
+				"Unable to read PLL status\n");
+		goto err_out;
+	}
+
+	if (reg == 0x1) {
+		/* Select DUT PLL as core clock */
+		reg  = ioread32(tc->tcf.registers +
+			TCF_CLK_CTRL_DUT_CONTROL_1);
+		reg &= 0xfffffff7;
+		iowrite32(reg, tc->tcf.registers +
+			TCF_CLK_CTRL_DUT_CONTROL_1);
+	} else {
+		dev_err(&tc->pdev->dev,
+			"PLL has failed to lock, status = %x\n", reg);
+		goto err_out;
+	}
+
+	reg_reset_n = ioread32(tc->tcf.registers +
+		TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+
+	while (!aligned && reset_cnt < 10 &&
+			tc->version != APOLLO_VERSION_TCF_5) {
+		int bank;
+		u32 eyes;
+		u32 clk_taps;
+		u32 train_ack;
+
+		++reset_cnt;
+
+		/* Reset the DUT to allow the SAI to retrain */
+		reg_reset_n &= ~(0x1 << DUT_RESETN_SHIFT);
+		iowrite32(reg_reset_n, tc->tcf.registers +
+			  TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+		udelay(100);
+		reg_reset_n |= (0x1 << DUT_RESETN_SHIFT);
+		iowrite32(reg_reset_n, tc->tcf.registers +
+			  TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+		udelay(100);
+
+		/* Assume alignment passed, if any bank fails on either DUT or
+		 * FPGA we will set this to false and try again for a max of 10
+		 * times.
+		 */
+		aligned = true;
+
+		/* For each of the banks */
+		for (bank = 0; bank < 10; bank++) {
+			int bank_aligned = 0;
+			/* Check alignment on the DUT */
+			u32 bank_base = 0x7000 + (0x1000 * bank);
+
+			spi_read(tc, bank_base + 0x4, &eyes);
+			spi_read(tc, bank_base + 0x3, &clk_taps);
+			spi_read(tc, bank_base + 0x6, &train_ack);
+
+			bank_aligned = tc_is_interface_aligned(
+					eyes, clk_taps, train_ack);
+			if (!bank_aligned) {
+				dev_warn(&tc->pdev->dev,
+					"Alignment check failed, retrying\n");
+				aligned = false;
+				break;
+			}
+
+			/* Check alignment on the FPGA */
+			bank_base = 0xb0 + (0x10 * bank);
+
+			eyes = sai_read_es2(tc, bank_base + 0x4);
+			clk_taps = sai_read_es2(tc, bank_base + 0x3);
+			train_ack = sai_read_es2(tc, bank_base + 0x6);
+
+			bank_aligned = tc_is_interface_aligned(
+					eyes, clk_taps, train_ack);
+
+			if (!bank_aligned) {
+				dev_warn(&tc->pdev->dev,
+					"Alignment check failed, retrying\n");
+				aligned = false;
+				break;
+			}
+		}
+	}
+
+	if (!aligned) {
+		dev_err(&tc->pdev->dev, "Unable to initialise the testchip (interface alignment failure), please restart the system.\n");
+		/* We are not returning an error here, cause VP doesn't
+		 * implement the necessary registers although they claim to be
+		 * TC compatible. */
+	}
+
+	if (reset_cnt > 1) {
+		dev_dbg(&tc->pdev->dev, "Note: The testchip required more than one reset to find a good interface alignment!\n");
+		dev_dbg(&tc->pdev->dev, "      This should be harmless, but if you do suspect foul play, please reset the machine.\n");
+		dev_dbg(&tc->pdev->dev, "      If you continue to see this message you may want to report it to PowerVR Verification Platforms.\n");
+	}
+
+	err = 0;
+err_out:
+	return err;
+}
+
+static void apollo_set_clocks(struct tc_device *tc,
+			      int core_clock, int mem_clock, int sys_clock)
+{
+	u32 val;
+
+	/* This is disabled for TCF2 since the current FPGA builds do not
+	 * like their core clocks being set (it takes apollo down).
+	 */
+	if (tc->version != APOLLO_VERSION_TCF_2) {
+		val = core_clock / 1000000;
+		pll_write_reg(tc, TCF_PLL_PLL_CORE_CLK0, val);
+
+		val = 0x1 << PLL_CORE_DRP_GO_SHIFT;
+		pll_write_reg(tc, TCF_PLL_PLL_CORE_DRP_GO, val);
+	}
+
+	val = mem_clock / 1000000;
+	pll_write_reg(tc, TCF_PLL_PLL_MEMIF_CLK0, val);
+
+	val = 0x1 << PLL_MEM_DRP_GO_SHIFT;
+	pll_write_reg(tc, TCF_PLL_PLL_MEM_DRP_GO, val);
+
+	if (tc->version == APOLLO_VERSION_TCF_5) {
+		val = sys_clock / 1000000;
+		pll_write_reg(tc, TCF_PLL_PLL_SYSIF_CLK0, val);
+
+		val = 0x1 << PLL_MEM_DRP_GO_SHIFT;
+		pll_write_reg(tc, TCF_PLL_PLL_SYS_DRP_GO, val);
+	}
+
+	dev_info(&tc->pdev->dev, "Setting clocks to %uMHz/%uMHz\n",
+			 core_clock / 1000000,
+			 mem_clock / 1000000);
+	udelay(400);
+}
+
+static void apollo_set_mem_latency(struct tc_device *tc,
+				   int mem_latency, int mem_wresp_latency)
+{
+	u32 regval = 0;
+
+	if (mem_latency <= 4) {
+		/* The total memory read latency cannot be lower than the
+		 * amount of cycles consumed by the hardware to do a read.
+		 * Set the memory read latency to 0 cycles.
+		 */
+		mem_latency = 0;
+	} else {
+		mem_latency -= 4;
+
+		dev_info(&tc->pdev->dev,
+			 "Setting memory read latency to %i cycles\n",
+			 mem_latency);
+	}
+
+	if (mem_wresp_latency <= 2) {
+		/* The total memory write latency cannot be lower than the
+		 * amount of cycles consumed by the hardware to do a write.
+		 * Set the memory write latency to 0 cycles.
+		 */
+		mem_wresp_latency = 0;
+	} else {
+		mem_wresp_latency -= 2;
+
+		dev_info(&tc->pdev->dev,
+			 "Setting memory write response latency to %i cycles\n",
+			 mem_wresp_latency);
+	}
+
+	mem_latency |= mem_wresp_latency << 16;
+
+	spi_write(tc, 0x1009, mem_latency);
+
+	if (spi_read(tc, 0x1009, &regval) != 0) {
+		dev_err(&tc->pdev->dev,
+			"Failed to read back memory latency register");
+		return;
+	}
+
+	if (mem_latency != regval) {
+		dev_err(&tc->pdev->dev,
+			"Memory latency register doesn't match requested value"
+			" (actual: %#08x, expected: %#08x)\n",
+			regval, mem_latency);
+	}
+}
+
+static void apollo_fpga_update_dut_clk_freq(struct tc_device *tc,
+					    int *core_clock, int *mem_clock)
+{
+	struct device *dev = &tc->pdev->dev;
+
+#if defined(SUPPORT_FPGA_DUT_CLK_INFO)
+	u32 reg;
+
+	/* DUT_CLK_INFO available only if SW_IF_VERSION >= 1 */
+	reg = ioread32(tc->tcf.registers + TCF_CLK_CTRL_SW_IF_VERSION);
+	reg = (reg & VERSION_MASK) >> VERSION_SHIFT;
+
+	if (reg >= 1) {
+		reg = ioread32(tc->tcf.registers + TCF_CLK_CTRL_DUT_CLK_INFO);
+
+		if ((reg != 0) && (reg != 0xbaadface) && (reg != 0xffffffff)) {
+			dev_info(dev, "TCF_CLK_CTRL_DUT_CLK_INFO = %08x\n", reg);
+			dev_info(dev, "Overriding provided DUT clock values: "
+				 "core %i, mem %i\n",
+				 *core_clock, *mem_clock);
+
+			*core_clock = ((reg & CORE_MASK) >> CORE_SHIFT) * 1000000;
+			*mem_clock = ((reg & MEM_MASK) >> MEM_SHIFT) * 1000000;
+		}
+	}
+#endif
+
+	dev_info(dev, "DUT clock values: core %i, mem %i\n",
+		 *core_clock, *mem_clock);
+}
+
+#endif /* defined(SUPPORT_RGX) */
+
+static void apollo_set_mem_mode(struct tc_device *tc)
+{
+	u32 val;
+
+	val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_TEST_CTRL);
+	val &= ~(ADDRESS_FORCE_MASK | PCI_TEST_MODE_MASK | HOST_ONLY_MODE_MASK
+		| HOST_PHY_MODE_MASK);
+	val |= (0x1 << ADDRESS_FORCE_SHIFT);
+	iowrite32(val, tc->tcf.registers + TCF_CLK_CTRL_TEST_CTRL);
+}
+
+static int apollo_hard_reset(struct tc_device *tc,
+			     int core_clock, int mem_clock, int sys_clock)
+{
+	u32 reg;
+	u32 reg_reset_n = 0;
+
+	int err = 0;
+
+	/* This is required for SPI reset which is not yet implemented. */
+	/*u32 aux_reset_n;*/
+
+	if (tc->version == APOLLO_VERSION_TCF_2) {
+		/* Power down */
+		reg = ioread32(tc->tcf.registers +
+			TCF_CLK_CTRL_DUT_CONTROL_1);
+		reg &= ~DUT_CTRL_VCC_0V9EN;
+		reg &= ~DUT_CTRL_VCC_1V8EN;
+		reg |= DUT_CTRL_VCC_IO_INH;
+		reg |= DUT_CTRL_VCC_CORE_INH;
+		iowrite32(reg, tc->tcf.registers +
+			TCF_CLK_CTRL_DUT_CONTROL_1);
+		msleep(500);
+	}
+
+	/* Put everything into reset */
+	iowrite32(reg_reset_n, tc->tcf.registers +
+		TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+
+	/* Take PDP1 and PDP2 out of reset */
+	reg_reset_n |= (0x1 << PDP1_RESETN_SHIFT);
+	reg_reset_n |= (0x1 << PDP2_RESETN_SHIFT);
+
+	iowrite32(reg_reset_n, tc->tcf.registers +
+		TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+	msleep(100);
+
+	/* Take DDR out of reset */
+	reg_reset_n |= (0x1 << DDR_RESETN_SHIFT);
+	iowrite32(reg_reset_n, tc->tcf.registers +
+		  TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+
+#if defined(SUPPORT_RGX)
+	if (tc->version == APOLLO_VERSION_TCF_5)
+		apollo_fpga_update_dut_clk_freq(tc, &core_clock, &mem_clock);
+
+	/* Set clock speed here, before reset. */
+	apollo_set_clocks(tc, core_clock, mem_clock, sys_clock);
+
+	/* Put take GLB_CLKG and SCB out of reset */
+	reg_reset_n |= (0x1 << GLB_CLKG_EN_SHIFT);
+	reg_reset_n |= (0x1 << SCB_RESETN_SHIFT);
+	iowrite32(reg_reset_n, tc->tcf.registers +
+		  TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+	msleep(100);
+
+	if (tc->version == APOLLO_VERSION_TCF_2) {
+		/* Enable the voltage control regulators on DUT */
+		reg = ioread32(tc->tcf.registers +
+			TCF_CLK_CTRL_DUT_CONTROL_1);
+		reg |= DUT_CTRL_VCC_0V9EN;
+		reg |= DUT_CTRL_VCC_1V8EN;
+		reg &= ~DUT_CTRL_VCC_IO_INH;
+		reg &= ~DUT_CTRL_VCC_CORE_INH;
+		iowrite32(reg, tc->tcf.registers +
+			TCF_CLK_CTRL_DUT_CONTROL_1);
+		msleep(300);
+	}
+
+	/* Take DUT_DCM out of reset */
+	reg_reset_n |= (0x1 << DUT_DCM_RESETN_SHIFT);
+	iowrite32(reg_reset_n, tc->tcf.registers +
+		  TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+	msleep(100);
+
+
+	err = tc_iopol32_nonzero(DCM_LOCK_STATUS_MASK,
+		tc->tcf.registers + TCF_CLK_CTRL_DCM_LOCK_STATUS);
+
+	if (err != 0)
+		goto err_out;
+
+	if (tc->version == APOLLO_VERSION_TCF_2) {
+		/* Set ODT to a specific value that seems to provide the most
+		 * stable signals.
+		 */
+		spi_write(tc, 0x11, 0x413130);
+	}
+
+	/* Take DUT out of reset */
+	reg_reset_n |= (0x1 << DUT_RESETN_SHIFT);
+	iowrite32(reg_reset_n, tc->tcf.registers +
+		  TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+	msleep(100);
+
+	if (tc->version != APOLLO_VERSION_TCF_5) {
+		err = apollo_align_interface_es2(tc);
+		if (err)
+			goto err_out;
+	}
+
+#endif /* defined(SUPPORT_RGX) */
+
+	if (tc->version == APOLLO_VERSION_TCF_2) {
+		/* Enable the temperature sensor */
+		spi_write(tc, 0xc, 0); /* power up */
+		spi_write(tc, 0xc, 2); /* reset */
+		spi_write(tc, 0xc, 6); /* init & run */
+
+		/* Register a new thermal zone */
+		apollo_pdata.thermal_zone =
+			thermal_zone_device_register("apollo", 0, 0, tc,
+						     &apollo_thermal_dev_ops,
+						     NULL, 0, 0);
+		if (IS_ERR(apollo_pdata.thermal_zone)) {
+			dev_warn(&tc->pdev->dev, "Couldn't register thermal zone");
+			apollo_pdata.thermal_zone = NULL;
+		}
+	}
+
+	reg = ioread32(tc->tcf.registers + TCF_CLK_CTRL_SW_IF_VERSION);
+	reg = (reg & VERSION_MASK) >> VERSION_SHIFT;
+
+	if (reg == 0) {
+		u32 build_inc;
+		u32 build_owner;
+
+		/* Check the build */
+		reg = ioread32(tc->tcf.registers + TCF_CLK_CTRL_FPGA_DES_REV_1);
+		build_inc = (reg >> 12) & 0xff;
+		build_owner = (reg >> 20) & 0xf;
+
+		if (build_inc) {
+			dev_alert(&tc->pdev->dev,
+				"BE WARNED: You are not running a tagged release of the FPGA!\n");
+
+			dev_alert(&tc->pdev->dev, "Owner: 0x%01x, Inc: 0x%02x\n",
+				  build_owner, build_inc);
+		}
+
+		dev_info(&tc->pdev->dev, "FPGA Release: %u.%02u\n",
+			 reg >> 8 & 0xf, reg & 0xff);
+	}
+
+#if defined(SUPPORT_RGX)
+err_out:
+#endif /* defined(SUPPORT_RGX) */
+	return err;
+}
+
+static int apollo_hw_init(struct tc_device *tc,
+			  int core_clock, int mem_clock, int sys_clock,
+			  int mem_latency, int mem_wresp_latency)
+{
+	int err = 0;
+
+	err = apollo_hard_reset(tc, core_clock, mem_clock, sys_clock);
+	if (err)
+		goto err_out;
+
+	apollo_set_mem_mode(tc);
+
+#if defined(SUPPORT_RGX)
+	if (tc->version == APOLLO_VERSION_TCF_BONNIE) {
+		u32 reg;
+		/* Enable ASTC via SPI */
+		if (spi_read(tc, 0xf, &reg)) {
+			dev_err(&tc->pdev->dev,
+				"Failed to read apollo ASTC register\n");
+			err = -ENODEV;
+			goto err_out;
+		}
+
+		reg |= 0x1 << 4;
+		spi_write(tc, 0xf, reg);
+	} else if (tc->version == APOLLO_VERSION_TCF_5) {
+		apollo_set_mem_latency(tc, mem_latency, mem_wresp_latency);
+	}
+#endif /* defined(SUPPORT_RGX) */
+
+err_out:
+	return err;
+}
+
+static int apollo_enable_irq(struct tc_device *tc)
+{
+	int err = 0;
+
+#if defined(TC_FAKE_INTERRUPTS)
+	setup_timer(&tc->timer, tc_irq_fake_wrapper,
+		(unsigned long)tc);
+	mod_timer(&tc->timer,
+		jiffies + msecs_to_jiffies(FAKE_INTERRUPT_TIME_MS));
+#else
+	{
+		u32 val;
+
+		iowrite32(0, tc->tcf.registers +
+			TCF_CLK_CTRL_INTERRUPT_ENABLE);
+		iowrite32(0xffffffff, tc->tcf.registers +
+			TCF_CLK_CTRL_INTERRUPT_CLEAR);
+
+		/* Set sense to active high */
+		val = ioread32(tc->tcf.registers +
+			TCF_CLK_CTRL_INTERRUPT_OP_CFG) & ~(INT_SENSE_MASK);
+		iowrite32(val, tc->tcf.registers +
+			TCF_CLK_CTRL_INTERRUPT_OP_CFG);
+
+		err = request_irq(tc->pdev->irq, apollo_irq_handler,
+			IRQF_SHARED, DRV_NAME, tc);
+	}
+#endif
+	return err;
+}
+
+static void apollo_disable_irq(struct tc_device *tc)
+{
+#if defined(TC_FAKE_INTERRUPTS)
+	del_timer_sync(&tc->timer);
+#else
+	iowrite32(0, tc->tcf.registers +
+		TCF_CLK_CTRL_INTERRUPT_ENABLE);
+	iowrite32(0xffffffff, tc->tcf.registers +
+		TCF_CLK_CTRL_INTERRUPT_CLEAR);
+
+	free_irq(tc->pdev->irq, tc);
+#endif
+}
+
+static enum tc_version_t
+apollo_detect_tc_version(struct tc_device *tc)
+{
+	u32 val = ioread32(tc->tcf.registers +
+		       TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG);
+
+	switch (val) {
+	default:
+		dev_err(&tc->pdev->dev,
+			"Unknown TCF core target build ID (0x%x) - assuming Hood ES2 - PLEASE REPORT TO ANDROID TEAM\n",
+			val);
+		/* Fall-through */
+	case 5:
+		dev_err(&tc->pdev->dev, "Looks like a Hood ES2 TC\n");
+		return APOLLO_VERSION_TCF_2;
+	case 1:
+		dev_err(&tc->pdev->dev, "Looks like a TCF5\n");
+		return APOLLO_VERSION_TCF_5;
+	case 6:
+		dev_err(&tc->pdev->dev, "Looks like a Bonnie TC\n");
+		return APOLLO_VERSION_TCF_BONNIE;
+	}
+}
+
+static u32 apollo_interrupt_id_to_flag(int interrupt_id)
+{
+	switch (interrupt_id) {
+	case TC_INTERRUPT_PDP:
+		return TC_INTERRUPT_FLAG_PDP;
+	case TC_INTERRUPT_EXT:
+		return TC_INTERRUPT_FLAG_EXT;
+	default:
+		BUG();
+	}
+}
+
+static int apollo_dev_init(struct tc_device *tc, struct pci_dev *pdev,
+			   int pdp_mem_size, int secure_mem_size)
+{
+	int err;
+
+	/* Reserve and map the tcf_clk / "sys" registers */
+	err = setup_io_region(pdev, &tc->tcf,
+		SYS_APOLLO_REG_PCI_BASENUM,
+		SYS_APOLLO_REG_SYS_OFFSET, SYS_APOLLO_REG_SYS_SIZE);
+	if (err)
+		goto err_out;
+
+	/* Reserve and map the tcf_pll registers */
+	err = setup_io_region(pdev, &tc->tcf_pll,
+		SYS_APOLLO_REG_PCI_BASENUM,
+		SYS_APOLLO_REG_PLL_OFFSET + TCF_PLL_PLL_CORE_CLK0,
+		TCF_PLL_PLL_DRP_STATUS - TCF_PLL_PLL_CORE_CLK0 + 4);
+	if (err)
+		goto err_unmap_sys_registers;
+
+#if defined(SUPPORT_APOLLO_FPGA)
+#define FPGA_REGISTERS_SIZE 4
+	/* If this is a special 'fgpa' build, have the apollo driver manage
+	 * the second register bar.
+	 */
+	err = setup_io_region(pdev, &apollo_pdata.fpga,
+		SYS_RGX_REG_PCI_BASENUM, 0, FPGA_REGISTERS_SIZE);
+	if (err)
+		goto err_unmap_pll_registers;
+#endif
+
+	/* Detect testchip version */
+	tc->version = apollo_detect_tc_version(tc);
+
+	/* Setup card memory */
+	tc->tc_mem.base =
+		pci_resource_start(pdev, APOLLO_MEM_PCI_BASENUM);
+	tc->tc_mem.size =
+		pci_resource_len(pdev, APOLLO_MEM_PCI_BASENUM);
+
+	if (tc->tc_mem.size < pdp_mem_size) {
+		dev_err(&pdev->dev,
+			"Apollo MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu",
+			APOLLO_MEM_PCI_BASENUM,
+			(unsigned long)tc->tc_mem.size,
+			(unsigned long)pdp_mem_size);
+		err = -EIO;
+		goto err_unmap_fpga_registers;
+	}
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+	if (tc->tc_mem.size <
+	    (pdp_mem_size + secure_mem_size)) {
+		dev_err(&pdev->dev,
+			"Apollo MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu plus the requested secure heap size %lu",
+			APOLLO_MEM_PCI_BASENUM,
+			(unsigned long)tc->tc_mem.size,
+			(unsigned long)pdp_mem_size,
+			(unsigned long)secure_mem_size);
+		err = -EIO;
+		goto err_unmap_fpga_registers;
+	}
+#endif
+
+	err = tc_mtrr_setup(tc);
+	if (err)
+		goto err_unmap_fpga_registers;
+
+	/* Setup ranges for the device heaps */
+	tc->pdp_heap_mem_size = pdp_mem_size;
+
+	/* We know ext_heap_mem_size won't underflow as we've compared
+	 * tc_mem.size against the pdp_mem_size value earlier
+	 */
+	tc->ext_heap_mem_size =
+		tc->tc_mem.size - tc->pdp_heap_mem_size;
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+	tc->ext_heap_mem_size -= secure_mem_size;
+#endif
+
+	if (tc->ext_heap_mem_size < TC_EXT_MINIMUM_MEM_SIZE) {
+		dev_warn(&pdev->dev,
+			"Apollo MEM region (bar %d) has size of %lu, with %lu pdp_mem_size only %lu bytes are left for ext device, which looks too small",
+			APOLLO_MEM_PCI_BASENUM,
+			(unsigned long)tc->tc_mem.size,
+			(unsigned long)pdp_mem_size,
+			(unsigned long)tc->ext_heap_mem_size);
+		/* Continue as this is only a 'helpful warning' not a hard
+		 * requirement
+		 */
+	}
+
+	tc->ext_heap_mem_base = tc->tc_mem.base;
+	tc->pdp_heap_mem_base =
+		tc->tc_mem.base + tc->ext_heap_mem_size;
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+	tc->secure_heap_mem_base = tc->pdp_heap_mem_base +
+		tc->pdp_heap_mem_size;
+	tc->secure_heap_mem_size = secure_mem_size;
+#endif
+
+#if defined(SUPPORT_ION)
+	err = tc_ion_init(tc, APOLLO_MEM_PCI_BASENUM);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to initialise ION\n");
+		goto err_unmap_fpga_registers;
+	}
+#endif
+
+#if defined(SUPPORT_APOLLO_FPGA)
+	apollo_debugfs_add_fpga_entries(tc, &apollo_pdata.fpga,
+					&apollo_pdata.fpga_entries);
+#endif /* defined(SUPPORT_APOLLO_FPGA) */
+
+err_out:
+	return err;
+err_unmap_fpga_registers:
+#if defined(SUPPORT_APOLLO_FPGA)
+	iounmap(apollo_pdata.fpga.registers);
+	release_pci_io_addr(pdev, SYS_RGX_REG_PCI_BASENUM,
+		apollo_pdata.fpga.region.base, apollo_pdata.fpga.region.size);
+err_unmap_pll_registers:
+#endif /* defined(SUPPORT_APOLLO_FPGA) */
+	iounmap(tc->tcf_pll.registers);
+	release_pci_io_addr(pdev, SYS_APOLLO_REG_PCI_BASENUM,
+		tc->tcf_pll.region.base, tc->tcf_pll.region.size);
+err_unmap_sys_registers:
+	iounmap(tc->tcf.registers);
+	release_pci_io_addr(pdev, SYS_APOLLO_REG_PCI_BASENUM,
+		tc->tcf.region.base, tc->tcf.region.size);
+	goto err_out;
+}
+
+static void apollo_dev_cleanup(struct tc_device *tc)
+{
+#if defined(SUPPORT_APOLLO_FPGA)
+	apollo_debugfs_remove_fpga_entries(&apollo_pdata.fpga_entries);
+#endif
+
+#if defined(SUPPORT_ION)
+	tc_ion_deinit(tc, APOLLO_MEM_PCI_BASENUM);
+#endif
+
+	tc_mtrr_cleanup(tc);
+
+#if defined(SUPPORT_APOLLO_FPGA)
+	iounmap(apollo_pdata.fpga.registers);
+	release_pci_io_addr(tc->pdev, SYS_RGX_REG_PCI_BASENUM,
+		apollo_pdata.fpga.region.base, apollo_pdata.fpga.region.size);
+#endif
+
+	iounmap(tc->tcf_pll.registers);
+	release_pci_io_addr(tc->pdev, SYS_APOLLO_REG_PCI_BASENUM,
+		tc->tcf_pll.region.base, tc->tcf_pll.region.size);
+
+	iounmap(tc->tcf.registers);
+	release_pci_io_addr(tc->pdev, SYS_APOLLO_REG_PCI_BASENUM,
+		tc->tcf.region.base, tc->tcf.region.size);
+
+	if (apollo_pdata.thermal_zone)
+		thermal_zone_device_unregister(apollo_pdata.thermal_zone);
+}
+
+int apollo_init(struct tc_device *tc, struct pci_dev *pdev,
+		int core_clock, int mem_clock, int sys_clock,
+		int pdp_mem_size, int secure_mem_size,
+		int mem_latency, int mem_wresp_latency)
+{
+	int err = 0;
+
+	err = apollo_dev_init(tc, pdev, pdp_mem_size, secure_mem_size);
+	if (err) {
+		dev_err(&pdev->dev, "apollo_dev_init failed\n");
+		goto err_out;
+	}
+
+	err = apollo_hw_init(tc, core_clock, mem_clock, sys_clock,
+			     mem_latency, mem_wresp_latency);
+	if (err) {
+		dev_err(&pdev->dev, "apollo_hw_init failed\n");
+		goto err_dev_cleanup;
+	}
+
+	err = apollo_enable_irq(tc);
+	if (err) {
+		dev_err(&pdev->dev,
+			"Failed to initialise IRQ\n");
+		goto err_dev_cleanup;
+	}
+
+err_out:
+	return err;
+
+err_dev_cleanup:
+	apollo_dev_cleanup(tc);
+	goto err_out;
+}
+
+int apollo_cleanup(struct tc_device *tc)
+{
+	apollo_disable_irq(tc);
+	apollo_dev_cleanup(tc);
+
+	return 0;
+}
+
+int apollo_register_pdp_device(struct tc_device *tc)
+{
+	int err = 0;
+	resource_size_t reg_start =
+		pci_resource_start(tc->pdev,
+				   SYS_APOLLO_REG_PCI_BASENUM);
+	struct resource pdp_resources_es2[] = {
+		DEFINE_RES_MEM_NAMED(reg_start + SYS_APOLLO_REG_PDP1_OFFSET,
+				SYS_APOLLO_REG_PDP1_SIZE, "pdp-regs"),
+		DEFINE_RES_MEM_NAMED(reg_start +
+				SYS_APOLLO_REG_PLL_OFFSET +
+				TCF_PLL_PLL_PDP_CLK0,
+				TCF_PLL_PLL_PDP2_DRP_GO -
+				TCF_PLL_PLL_PDP_CLK0 + 4, "pll-regs"),
+	};
+	struct resource pdp_resources_tcf5[] = {
+		DEFINE_RES_MEM_NAMED(reg_start + SYS_APOLLO_REG_PDP1_OFFSET,
+				SYS_APOLLO_REG_PDP1_SIZE, "pdp-regs"),
+		DEFINE_RES_MEM_NAMED(reg_start +
+				SYS_APOLLO_REG_PLL_OFFSET +
+				TCF_PLL_PLL_PDP_CLK0,
+				TCF_PLL_PLL_PDP2_DRP_GO -
+				TCF_PLL_PLL_PDP_CLK0 + 4, "pll-regs"),
+		DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev,
+				TC5_SYS_APOLLO_REG_PCI_BASENUM)
+				+ TC5_SYS_APOLLO_REG_PDP2_OFFSET,
+			TC5_SYS_APOLLO_REG_PDP2_SIZE, "tc5-pdp2-regs"),
+
+		DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev,
+				TC5_SYS_APOLLO_REG_PCI_BASENUM)
+				+ TC5_SYS_APOLLO_REG_PDP2_FBDC_OFFSET,
+				TC5_SYS_APOLLO_REG_PDP2_FBDC_SIZE,
+				"tc5-pdp2-fbdc-regs"),
+
+		DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev,
+				TC5_SYS_APOLLO_REG_PCI_BASENUM)
+				+ TC5_SYS_APOLLO_REG_HDMI_OFFSET,
+				TC5_SYS_APOLLO_REG_HDMI_SIZE,
+				"tc5-adv5711-regs"),
+	};
+
+	struct tc_pdp_platform_data pdata = {
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+		.ion_device = tc->ion_device,
+		.ion_heap_id = ION_HEAP_TC_PDP,
+#endif
+		.memory_base = tc->tc_mem.base,
+		.pdp_heap_memory_base = tc->pdp_heap_mem_base,
+		.pdp_heap_memory_size = tc->pdp_heap_mem_size,
+	};
+	struct platform_device_info pdp_device_info = {
+		.parent = &tc->pdev->dev,
+		.name = APOLLO_DEVICE_NAME_PDP,
+		.id = -2,
+		.data = &pdata,
+		.size_data = sizeof(pdata),
+#if (TC_MEMORY_CONFIG == TC_MEMORY_LOCAL) || \
+	(TC_MEMORY_CONFIG == TC_MEMORY_HYBRID)
+		/*
+		 * The PDP does not access system memory, so there is no
+		 * DMA limitation.
+		 */
+		.dma_mask = DMA_BIT_MASK(64),
+#else
+		.dma_mask = DMA_BIT_MASK(32),
+#endif
+	};
+
+	if (tc->version == APOLLO_VERSION_TCF_5) {
+		pdp_device_info.res = pdp_resources_tcf5;
+		pdp_device_info.num_res = ARRAY_SIZE(pdp_resources_tcf5);
+	} else if (tc->version == APOLLO_VERSION_TCF_2 ||
+			tc->version == APOLLO_VERSION_TCF_BONNIE) {
+		pdp_device_info.res = pdp_resources_es2;
+		pdp_device_info.num_res = ARRAY_SIZE(pdp_resources_es2);
+	} else {
+		dev_err(&tc->pdev->dev,
+			"Unable to set PDP resource info for unknown apollo device\n");
+	}
+
+	tc->pdp_dev = platform_device_register_full(&pdp_device_info);
+	if (IS_ERR(tc->pdp_dev)) {
+		err = PTR_ERR(tc->pdp_dev);
+		dev_err(&tc->pdev->dev,
+			"Failed to register PDP device (%d)\n", err);
+		tc->pdp_dev = NULL;
+		goto err;
+	}
+err:
+	return err;
+}
+
+#if defined(SUPPORT_RGX)
+
+int apollo_register_ext_device(struct tc_device *tc)
+{
+	int err = 0;
+	struct resource rogue_resources[] = {
+		DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev,
+				SYS_RGX_REG_PCI_BASENUM),
+			 SYS_RGX_REG_REGION_SIZE, "rogue-regs"),
+	};
+	struct tc_rogue_platform_data pdata = {
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+		.ion_device = tc->ion_device,
+		.ion_heap_id = ION_HEAP_TC_ROGUE,
+#endif
+		.tc_memory_base = tc->tc_mem.base,
+		.pdp_heap_memory_base = tc->pdp_heap_mem_base,
+		.pdp_heap_memory_size = tc->pdp_heap_mem_size,
+		.rogue_heap_memory_base = tc->ext_heap_mem_base,
+		.rogue_heap_memory_size = tc->ext_heap_mem_size,
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+		.secure_heap_memory_base = tc->secure_heap_mem_base,
+		.secure_heap_memory_size = tc->secure_heap_mem_size,
+#endif
+	};
+	struct platform_device_info rogue_device_info = {
+		.parent = &tc->pdev->dev,
+		.name = TC_DEVICE_NAME_ROGUE,
+		.id = -2,
+		.res = rogue_resources,
+		.num_res = ARRAY_SIZE(rogue_resources),
+		.data = &pdata,
+		.size_data = sizeof(pdata),
+#if (TC_MEMORY_CONFIG == TC_MEMORY_LOCAL)
+		/*
+		 * Rogue does not access system memory, so there is no DMA
+		 * limitation.
+		 */
+		.dma_mask = DMA_BIT_MASK(64),
+#else
+		.dma_mask = DMA_BIT_MASK(32),
+#endif
+	};
+
+	tc->ext_dev
+		= platform_device_register_full(&rogue_device_info);
+
+	if (IS_ERR(tc->ext_dev)) {
+		err = PTR_ERR(tc->ext_dev);
+		dev_err(&tc->pdev->dev,
+			"Failed to register rogue device (%d)\n", err);
+		tc->ext_dev = NULL;
+	}
+	return err;
+}
+
+#elif defined(SUPPORT_APOLLO_FPGA)
+
+int apollo_register_ext_device(struct tc_device *tc)
+{
+	int err = 0;
+	struct resource fpga_resources[] = {
+		/* FIXME: Don't overload SYS_RGX_REG_xxx for FPGA */
+		DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev,
+				SYS_RGX_REG_PCI_BASENUM),
+			 SYS_RGX_REG_REGION_SIZE, "fpga-regs"),
+	};
+	struct apollo_fpga_platform_data pdata = {
+		.tc_memory_base = tc->tc_mem.base,
+		.pdp_heap_memory_base = tc->pdp_heap_mem_base,
+		.pdp_heap_memory_size = tc->pdp_heap_mem_size,
+	};
+	struct platform_device_info fpga_device_info = {
+		.parent = &tc->pdev->dev,
+		.name = APOLLO_DEVICE_NAME_FPGA,
+		.id = -1,
+		.res = fpga_resources,
+		.num_res = ARRAY_SIZE(fpga_resources),
+		.data = &pdata,
+		.size_data = sizeof(pdata),
+#if (TC_MEMORY_CONFIG == TC_MEMORY_LOCAL)
+		/*
+		 * The FPGA does not access system memory, so there is no DMA
+		 * limitation.
+		 */
+		.dma_mask = DMA_BIT_MASK(64),
+#else
+		.dma_mask = DMA_BIT_MASK(32),
+#endif
+	};
+
+	tc->ext_dev = platform_device_register_full(&fpga_device_info);
+	if (IS_ERR(tc->ext_dev)) {
+		err = PTR_ERR(tc->ext_dev);
+		dev_err(&tc->pdev->dev,
+			"Failed to register fpga device (%d)\n", err);
+		tc->ext_dev = NULL;
+		/* Fall through */
+	}
+
+	return err;
+}
+
+#else /* defined(SUPPORT_APOLLO_FPGA) */
+
+int apollo_register_ext_device(struct tc_device *tc)
+{
+	return 0;
+}
+
+#endif /* defined(SUPPORT_RGX) */
+
+void apollo_enable_interrupt_register(struct tc_device *tc,
+				      int interrupt_id)
+{
+	u32 val;
+
+	if (interrupt_id == TC_INTERRUPT_PDP ||
+		interrupt_id == TC_INTERRUPT_EXT) {
+		val = ioread32(
+			tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE);
+		val |= apollo_interrupt_id_to_flag(interrupt_id);
+		iowrite32(val,
+			tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE);
+	}
+}
+
+void apollo_disable_interrupt_register(struct tc_device *tc,
+				       int interrupt_id)
+{
+	u32 val;
+
+	if (interrupt_id == TC_INTERRUPT_PDP ||
+		interrupt_id == TC_INTERRUPT_EXT) {
+		val = ioread32(
+			tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE);
+		val &= ~(apollo_interrupt_id_to_flag(interrupt_id));
+		iowrite32(val,
+			tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE);
+	}
+}
+
+irqreturn_t apollo_irq_handler(int irq, void *data)
+{
+	u32 interrupt_status;
+	u32 interrupt_clear = 0;
+	unsigned long flags;
+	irqreturn_t ret = IRQ_NONE;
+	struct tc_device *tc = (struct tc_device *)data;
+
+	spin_lock_irqsave(&tc->interrupt_handler_lock, flags);
+
+#if defined(TC_FAKE_INTERRUPTS)
+	/* If we're faking interrupts pretend we got both ext and PDP ints */
+	interrupt_status = TC_INTERRUPT_FLAG_EXT
+		| TC_INTERRUPT_FLAG_PDP;
+#else
+	interrupt_status = ioread32(tc->tcf.registers
+			+ TCF_CLK_CTRL_INTERRUPT_STATUS);
+#endif
+
+	if (interrupt_status & TC_INTERRUPT_FLAG_EXT) {
+		struct tc_interrupt_handler *ext_int =
+			&tc->interrupt_handlers[TC_INTERRUPT_EXT];
+
+		if (ext_int->enabled && ext_int->handler_function) {
+			ext_int->handler_function(ext_int->handler_data);
+			interrupt_clear |= TC_INTERRUPT_FLAG_EXT;
+		}
+		ret = IRQ_HANDLED;
+	}
+	if (interrupt_status & TC_INTERRUPT_FLAG_PDP) {
+		struct tc_interrupt_handler *pdp_int =
+			&tc->interrupt_handlers[TC_INTERRUPT_PDP];
+
+		if (pdp_int->enabled && pdp_int->handler_function) {
+			pdp_int->handler_function(pdp_int->handler_data);
+			interrupt_clear |= TC_INTERRUPT_FLAG_PDP;
+		}
+		ret = IRQ_HANDLED;
+	}
+
+	if (tc->version == APOLLO_VERSION_TCF_5) {
+		/* On TC5 the interrupt is not  by the TC framework, but
+		 * by the PDP itself. So we always have to callback to the tc5
+		 * pdp code regardless of the interrupt status of the TCF.
+		 */
+		struct tc_interrupt_handler *pdp_int =
+			&tc->interrupt_handlers[TC_INTERRUPT_TC5_PDP];
+
+		if (pdp_int->enabled && pdp_int->handler_function) {
+			pdp_int->handler_function(pdp_int->handler_data);
+			ret = IRQ_HANDLED;
+		}
+	}
+
+	if (interrupt_clear)
+		iowrite32(0xffffffff,
+			tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_CLEAR);
+
+	spin_unlock_irqrestore(&tc->interrupt_handler_lock, flags);
+
+	return ret;
+}
+
+int apollo_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll)
+{
+	int err = 0;
+
+	*tmp = 0;
+	*pll = 0;
+
+	if (tc->version == APOLLO_VERSION_TCF_5)
+		/* Not implemented on TCF5 */
+		goto err_out;
+	else if (tc->version == APOLLO_VERSION_TCF_2) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0))
+		unsigned long t;
+#else
+		int t;
+#endif
+
+		err = apollo_thermal_get_temp(apollo_pdata.thermal_zone, &t);
+		if (err)
+			goto err_out;
+		*tmp = t / 1000;
+	}
+
+	if (spi_read(tc, 0x2, pll)) {
+		dev_err(&tc->pdev->dev, "Failed to read PLL status\n");
+		err = -ENODEV;
+		goto err_out;
+	}
+
+err_out:
+	return err;
+}
+
+int apollo_sys_strings(struct tc_device *tc,
+		       char *str_fpga_rev, size_t size_fpga_rev,
+		       char *str_tcf_core_rev, size_t size_tcf_core_rev,
+		       char *str_tcf_core_target_build_id,
+		       size_t size_tcf_core_target_build_id,
+		       char *str_pci_ver, size_t size_pci_ver,
+		       char *str_macro_ver, size_t size_macro_ver)
+{
+	int err = 0;
+	u32 val;
+	resource_size_t host_fpga_base;
+	void __iomem *host_fpga_registers;
+
+	/* To get some of the version information we need to read from a
+	 * register that we don't normally have mapped. Map it temporarily
+	 * (without trying to reserve it) to get the information we need.
+	 */
+	host_fpga_base =
+		pci_resource_start(tc->pdev, SYS_APOLLO_REG_PCI_BASENUM)
+				+ 0x40F0;
+
+	host_fpga_registers = ioremap_nocache(host_fpga_base, 0x04);
+	if (!host_fpga_registers) {
+		dev_err(&tc->pdev->dev,
+			"Failed to map host fpga registers\n");
+		err = -EIO;
+		goto err_out;
+	}
+
+	/* Create the components of the PCI and macro versions */
+	val = ioread32(host_fpga_registers);
+	snprintf(str_pci_ver, size_pci_ver, "%d",
+		 HEX2DEC((val & 0x00FF0000) >> 16));
+	snprintf(str_macro_ver, size_macro_ver, "%d.%d",
+		 (val & 0x00000F00) >> 8,
+		 HEX2DEC((val & 0x000000FF) >> 0));
+
+	/* Unmap the register now that we no longer need it */
+	iounmap(host_fpga_registers);
+
+	/*
+	 * Check bits 7:0 of register 0x28 (TCF_CORE_REV_REG or SW_IF_VERSION
+	 * depending on its own value) to find out how the driver should
+	 * generate the strings for FPGA and core revision.
+	 */
+	val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_SW_IF_VERSION);
+	val = (val & VERSION_MASK) >> VERSION_SHIFT;
+
+	if (val == 0) {
+		/* Create the components of the TCF core revision number */
+		val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_TCF_CORE_REV_REG);
+		snprintf(str_tcf_core_rev, size_tcf_core_rev, "%d.%d.%d",
+			 HEX2DEC((val & TCF_CORE_REV_REG_MAJOR_MASK)
+				 >> TCF_CORE_REV_REG_MAJOR_SHIFT),
+			 HEX2DEC((val & TCF_CORE_REV_REG_MINOR_MASK)
+				 >> TCF_CORE_REV_REG_MINOR_SHIFT),
+			 HEX2DEC((val & TCF_CORE_REV_REG_MAINT_MASK)
+				 >> TCF_CORE_REV_REG_MAINT_SHIFT));
+
+		/* Create the components of the FPGA revision number */
+		val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_FPGA_REV_REG);
+		snprintf(str_fpga_rev, size_fpga_rev, "%d.%d.%d",
+			 HEX2DEC((val & FPGA_REV_REG_MAJOR_MASK)
+				 >> FPGA_REV_REG_MAJOR_SHIFT),
+			 HEX2DEC((val & FPGA_REV_REG_MINOR_MASK)
+				 >> FPGA_REV_REG_MINOR_SHIFT),
+			 HEX2DEC((val & FPGA_REV_REG_MAINT_MASK)
+				 >> FPGA_REV_REG_MAINT_SHIFT));
+	} else if (val == 1) {
+		/* Create the components of the TCF core revision number */
+		snprintf(str_tcf_core_rev, size_tcf_core_rev, "%d", val);
+
+		/* Create the components of the FPGA revision number */
+		val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_REL);
+		snprintf(str_fpga_rev, size_fpga_rev, "%d.%d",
+			 HEX2DEC((val & MAJOR_MASK) >> MAJOR_SHIFT),
+			 HEX2DEC((val & MINOR_MASK) >> MINOR_SHIFT));
+	} else {
+		dev_warn(&tc->pdev->dev,
+			 "%s: unrecognised SW_IF_VERSION %#08x\n",
+			 __func__, val);
+
+		/* Create the components of the TCF core revision number */
+		snprintf(str_tcf_core_rev, size_tcf_core_rev, "%d", val);
+
+		/* Create the components of the FPGA revision number */
+		snprintf(str_fpga_rev, size_fpga_rev, "N/A");
+	}
+
+	/* Create the component of the TCF core target build ID */
+	val = ioread32(tc->tcf.registers +
+		       TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG);
+	snprintf(str_tcf_core_target_build_id, size_tcf_core_target_build_id,
+		"%d",
+		(val & TCF_CORE_TARGET_BUILD_ID_MASK)
+		>> TCF_CORE_TARGET_BUILD_ID_SHIFT);
+
+err_out:
+	return err;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_apollo.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_apollo.h
new file mode 100644
index 0000000..f7100f8
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_apollo.h
@@ -0,0 +1,79 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _APOLLO_DRV_H
+#define _APOLLO_DRV_H
+
+#include "tc_drv_internal.h"
+#include "apollo_regs.h"
+
+#if defined(SUPPORT_RGX) && defined(SUPPORT_APOLLO_FPGA)
+#error Define either SUPPORT_RGX or SUPPORT_APOLLO_FGPA, not both
+#endif
+
+int apollo_init(struct tc_device *tc, struct pci_dev *pdev,
+		int core_clock, int mem_clock, int sys_clock,
+		int pdp_mem_size, int secure_mem_size,
+		int mem_latency, int mem_wresp_latency);
+int apollo_cleanup(struct tc_device *tc);
+
+int apollo_register_pdp_device(struct tc_device *tc);
+int apollo_register_ext_device(struct tc_device *tc);
+
+void apollo_enable_interrupt_register(struct tc_device *tc,
+				      int interrupt_id);
+void apollo_disable_interrupt_register(struct tc_device *tc,
+				       int interrupt_id);
+
+irqreturn_t apollo_irq_handler(int irq, void *data);
+
+int apollo_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll);
+int apollo_sys_strings(struct tc_device *tc,
+		       char *str_fpga_rev, size_t size_fpga_rev,
+		       char *str_tcf_core_rev, size_t size_tcf_core_rev,
+		       char *str_tcf_core_target_build_id,
+		       size_t size_tcf_core_target_build_id,
+		       char *str_pci_ver, size_t size_pci_ver,
+		       char *str_macro_ver, size_t size_macro_ver);
+
+#endif /* _APOLLO_DRV_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_drv.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_drv.c
new file mode 100644
index 0000000..a538cc9
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_drv.c
@@ -0,0 +1,784 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*
+ * This is a device driver for the testchip framework. It creates platform
+ * devices for the pdp and ext sub-devices, and exports functions to manage the
+ * shared interrupt handling
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+
+#if defined(CONFIG_MTRR)
+#include <asm/mtrr.h>
+#endif
+
+#include "pvrmodule.h"
+
+#include "tc_apollo.h"
+#include "tc_odin.h"
+
+/* How much memory to give to the PDP heap (used for pdp buffers). */
+#define TC_PDP_MEM_SIZE_BYTES           ((TC_DISPLAY_MEM_SIZE)*1024*1024)
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+/* How much memory to give to the secure heap. */
+#define TC_SECURE_MEM_SIZE_BYTES        ((TC_SECURE_MEM_SIZE)*1024*1024)
+#endif
+
+#define PCI_VENDOR_ID_POWERVR		0x1010
+#define DEVICE_ID_PCI_APOLLO_FPGA	0x1CF1
+#define DEVICE_ID_PCIE_APOLLO_FPGA	0x1CF2
+
+MODULE_DESCRIPTION("PowerVR testchip framework driver");
+
+static int tc_core_clock = RGX_TC_CORE_CLOCK_SPEED;
+module_param(tc_core_clock, int, 0444);
+MODULE_PARM_DESC(tc_core_clock, "TC core clock speed");
+
+static int tc_mem_clock = RGX_TC_MEM_CLOCK_SPEED;
+module_param(tc_mem_clock, int, 0444);
+MODULE_PARM_DESC(tc_mem_clock, "TC memory clock speed");
+
+static int tc_sys_clock = RGX_TC_SYS_CLOCK_SPEED;
+module_param(tc_sys_clock, int, 0444);
+MODULE_PARM_DESC(tc_sys_clock, "TC system clock speed (TCF5 only)");
+
+static int tc_mem_latency;
+module_param(tc_mem_latency, int, 0444);
+MODULE_PARM_DESC(tc_mem_latency, "TC memory read latency in cycles (TCF5 only)");
+
+static int tc_wresp_latency;
+module_param(tc_wresp_latency, int, 0444);
+MODULE_PARM_DESC(tc_wresp_latency, "TC memory write response latency in cycles (TCF5 only)");
+
+static unsigned long tc_pdp_mem_size = TC_PDP_MEM_SIZE_BYTES;
+module_param(tc_pdp_mem_size, ulong, 0444);
+MODULE_PARM_DESC(tc_pdp_mem_size,
+	"TC PDP reserved memory size in bytes");
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+static unsigned long tc_secure_mem_size = TC_SECURE_MEM_SIZE_BYTES;
+module_param(tc_secure_mem_size, ulong, 0444);
+MODULE_PARM_DESC(tc_secure_mem_size,
+	"TC secure reserved memory size in bytes");
+#endif
+
+static struct debugfs_blob_wrapper tc_debugfs_rogue_name_blobs[] = {
+	[APOLLO_VERSION_TCF_2] = {
+		.data = "hood", /* probably */
+		.size = sizeof("hood") - 1,
+	},
+	[APOLLO_VERSION_TCF_5] = {
+		.data = "fpga (unknown)",
+		.size = sizeof("fpga (unknown)") - 1,
+	},
+	[APOLLO_VERSION_TCF_BONNIE] = {
+		.data = "bonnie",
+		.size = sizeof("bonnie") - 1,
+	},
+	[ODIN_VERSION_TCF_BONNIE] = {
+		.data = "bonnie",
+		.size = sizeof("bonnie") - 1,
+	},
+	[ODIN_VERSION_FPGA] = {
+		.data = "fpga (unknown)",
+		.size = sizeof("fpga (unknown)") - 1,
+	},
+};
+
+#if defined(CONFIG_MTRR) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0))
+/*
+ * A return value of:
+ *      0 or more means success
+ *     -1 means we were unable to add an mtrr but we should continue
+ *     -2 means we were unable to add an mtrr but we shouldn't continue
+ */
+static int mtrr_setup(struct pci_dev *pdev,
+		      resource_size_t mem_start,
+		      resource_size_t mem_size)
+{
+	int err;
+	int mtrr;
+
+	/* Reset MTRR */
+	mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_UNCACHABLE, 0);
+	if (mtrr < 0) {
+		dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n",
+			__LINE__, __func__, mtrr);
+		mtrr = -2;
+		goto err_out;
+	}
+
+	err = mtrr_del(mtrr, mem_start, mem_size);
+	if (err < 0) {
+		dev_err(&pdev->dev, "%d - %s: mtrr_del failed (%d)\n",
+			__LINE__, __func__, err);
+		mtrr = -2;
+		goto err_out;
+	}
+
+	mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_WRBACK, 0);
+	if (mtrr < 0) {
+		/* Stop, but not an error as this may be already be setup */
+		dev_dbg(&pdev->dev,
+			"%d - %s: mtrr_add failed (%d) - probably means the mtrr is already setup\n",
+			__LINE__, __func__, mtrr);
+		mtrr = -1;
+		goto err_out;
+	}
+
+	err = mtrr_del(mtrr, mem_start, mem_size);
+	if (err < 0) {
+		dev_err(&pdev->dev, "%d - %s: mtrr_del failed (%d)\n",
+			__LINE__, __func__, err);
+		mtrr = -2;
+		goto err_out;
+	}
+
+	if (mtrr == 0) {
+		/* Replace 0 with a non-overlapping WRBACK mtrr */
+		err = mtrr_add(0, mem_start, MTRR_TYPE_WRBACK, 0);
+		if (err < 0) {
+			dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n",
+				__LINE__, __func__, err);
+			mtrr = -2;
+			goto err_out;
+		}
+	}
+
+	mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_WRCOMB, 0);
+	if (mtrr < 0) {
+		dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n",
+			__LINE__, __func__, mtrr);
+		mtrr = -1;
+	}
+
+err_out:
+	return mtrr;
+}
+#endif /* defined(CONFIG_MTRR) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) */
+
+int tc_mtrr_setup(struct tc_device *tc)
+{
+	int err = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+	/* Register the LMA as write combined */
+	err = arch_io_reserve_memtype_wc(tc->tc_mem.base,
+					 tc->tc_mem.size);
+	if (err)
+		return -ENODEV;
+#endif
+	/* Enable write combining */
+	tc->mtrr = arch_phys_wc_add(tc->tc_mem.base,
+				    tc->tc_mem.size);
+	if (tc->mtrr < 0) {
+		err = -ENODEV;
+		goto err_out;
+	}
+
+#elif defined(CONFIG_MTRR)
+	/* Enable mtrr region caching */
+	tc->mtrr = mtrr_setup(tc->pdev,
+			      tc->tc_mem.base,
+			      tc->tc_mem.size);
+	if (tc->mtrr == -2) {
+		err = -ENODEV;
+		goto err_out;
+	}
+#endif
+	return err;
+
+err_out:
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+	arch_io_free_memtype_wc(tc->tc_mem.base,
+				tc->tc_mem.size);
+#endif
+	return err;
+}
+
+void tc_mtrr_cleanup(struct tc_device *tc)
+{
+	if (tc->mtrr >= 0) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+		arch_phys_wc_del(tc->mtrr);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+		arch_io_free_memtype_wc(tc->tc_mem.base,
+					tc->tc_mem.size);
+#endif
+#elif defined(CONFIG_MTRR)
+		int err;
+
+		err = mtrr_del(tc->mtrr,
+			       tc->tc_mem.base,
+			       tc->tc_mem.size);
+		if (err < 0)
+			dev_err(&tc->pdev->dev,
+				"mtrr_del failed (%d)\n", err);
+#endif
+	}
+}
+
+int tc_is_interface_aligned(u32 eyes, u32 clk_taps, u32 train_ack)
+{
+	u32	max_eye_start = eyes >> 16;
+	u32	min_eye_end   = eyes & 0xffff;
+
+	/* If either the training or training ack failed, we haven't aligned */
+	if (!(clk_taps & 0x10000) || !(train_ack & 0x100))
+		return 0;
+
+	/* If the max eye >= min eye it means the readings are nonsense */
+	if (max_eye_start >= min_eye_end)
+		return 0;
+
+	/* If we failed the ack pattern more than 4 times */
+	if (((train_ack & 0xf0) >> 4) > 4)
+		return 0;
+
+	/* If there is less than 7 taps (240ps @40ps/tap, this number should be
+	 * lower for the fpga, since its taps are bigger We should really
+	 * calculate the "7" based on the interface clock speed.
+	 */
+	if ((min_eye_end - max_eye_start) < 7)
+		return 0;
+
+	return 1;
+}
+
+int tc_iopol32_nonzero(u32 mask, void __iomem *addr)
+{
+	int polnum;
+	u32 read_value;
+
+	for (polnum = 0; polnum < 50; polnum++) {
+		read_value = ioread32(addr) & mask;
+		if (read_value != 0)
+			break;
+		msleep(20);
+	}
+	if (polnum == 50) {
+		pr_err(DRV_NAME " iopol32_nonzero timeout\n");
+		return -ETIME;
+	}
+	return 0;
+}
+
+int request_pci_io_addr(struct pci_dev *pdev, u32 index,
+	resource_size_t offset, resource_size_t length)
+{
+	resource_size_t start, end;
+
+	start = pci_resource_start(pdev, index);
+	end = pci_resource_end(pdev, index);
+
+	if ((start + offset + length - 1) > end)
+		return -EIO;
+	if (pci_resource_flags(pdev, index) & IORESOURCE_IO) {
+		if (request_region(start + offset, length, DRV_NAME) == NULL)
+			return -EIO;
+	} else {
+		if (request_mem_region(start + offset, length, DRV_NAME)
+			== NULL)
+			return -EIO;
+	}
+	return 0;
+}
+
+void release_pci_io_addr(struct pci_dev *pdev, u32 index,
+	resource_size_t start, resource_size_t length)
+{
+	if (pci_resource_flags(pdev, index) & IORESOURCE_IO)
+		release_region(start, length);
+	else
+		release_mem_region(start, length);
+}
+
+int setup_io_region(struct pci_dev *pdev,
+	struct tc_io_region *region, u32 index,
+	resource_size_t offset,	resource_size_t size)
+{
+	int err;
+	resource_size_t pci_phys_addr;
+
+	err = request_pci_io_addr(pdev, index, offset, size);
+	if (err) {
+		dev_err(&pdev->dev,
+			"Failed to request tc registers (err=%d)\n", err);
+		return -EIO;
+	}
+	pci_phys_addr = pci_resource_start(pdev, index);
+	region->region.base = pci_phys_addr + offset;
+	region->region.size = size;
+
+	region->registers
+		= ioremap_nocache(region->region.base, region->region.size);
+
+	if (!region->registers) {
+		dev_err(&pdev->dev, "Failed to map tc registers\n");
+		release_pci_io_addr(pdev, index,
+			region->region.base, region->region.size);
+		return -EIO;
+	}
+	return 0;
+}
+
+#if defined(TC_FAKE_INTERRUPTS)
+void tc_irq_fake_wrapper(unsigned long data)
+{
+	struct tc_device *tc = (struct tc_device *)data;
+
+	if (tc->odin)
+		odin_irq_handler(0, tc);
+	else
+		apollo_irq_handler(0, tc);
+
+	mod_timer(&tc->timer,
+		jiffies + msecs_to_jiffies(FAKE_INTERRUPT_TIME_MS));
+}
+#endif
+
+static int tc_register_pdp_device(struct tc_device *tc)
+{
+	int err = 0;
+
+	if (tc->odin)
+		err = odin_register_pdp_device(tc);
+	else
+		err = apollo_register_pdp_device(tc);
+
+	return err;
+}
+
+static int tc_register_ext_device(struct tc_device *tc)
+{
+	int err = 0;
+
+	if (tc->odin)
+		err = odin_register_ext_device(tc);
+	else
+		err = apollo_register_ext_device(tc);
+
+	return err;
+}
+
+static void tc_devres_release(struct device *dev, void *res)
+{
+	/* No extra cleanup needed */
+}
+
+static int tc_cleanup(struct pci_dev *pdev)
+{
+	struct tc_device *tc = devres_find(&pdev->dev,
+					   tc_devres_release, NULL, NULL);
+	int i, err = 0;
+
+	if (!tc) {
+		dev_err(&pdev->dev, "No tc device resources found\n");
+		return -ENODEV;
+	}
+
+	debugfs_remove(tc->debugfs_rogue_name);
+
+	for (i = 0; i < TC_INTERRUPT_COUNT; i++)
+		if (tc->interrupt_handlers[i].enabled)
+			tc_disable_interrupt(&pdev->dev, i);
+
+	if (tc->odin)
+		err = odin_cleanup(tc);
+	else
+		err = apollo_cleanup(tc);
+
+	debugfs_remove(tc->debugfs_tc_dir);
+
+	return err;
+}
+
+static int tc_init(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct tc_device *tc;
+	int err = 0;
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+	int sec_mem_size = TC_SECURE_MEM_SIZE_BYTES;
+#else /* defined(SUPPORT_FAKE_SECURE_ION_HEAP) */
+	int sec_mem_size = 0;
+#endif /* defined(SUPPORT_FAKE_SECURE_ION_HEAP) */
+
+	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
+		return -ENOMEM;
+
+	tc = devres_alloc(tc_devres_release,
+		sizeof(*tc), GFP_KERNEL);
+	if (!tc) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	devres_add(&pdev->dev, tc);
+
+	err = tc_enable(&pdev->dev);
+	if (err) {
+		dev_err(&pdev->dev,
+			"tc_enable failed %d\n", err);
+		goto err_release;
+	}
+
+	tc->pdev = pdev;
+
+	spin_lock_init(&tc->interrupt_handler_lock);
+	spin_lock_init(&tc->interrupt_enable_lock);
+
+	tc->debugfs_tc_dir = debugfs_create_dir(DRV_NAME, NULL);
+
+	if (pdev->vendor == PCI_VENDOR_ID_ODIN &&
+	    pdev->device == DEVICE_ID_ODIN) {
+
+		dev_info(&pdev->dev, "Odin detected");
+		tc->odin = true;
+
+		err = odin_init(tc, pdev,
+				tc_core_clock, tc_mem_clock,
+				tc_pdp_mem_size, sec_mem_size,
+				tc_mem_latency, tc_wresp_latency);
+		if (err)
+			goto err_dev_cleanup;
+
+	} else {
+		dev_info(&pdev->dev, "Apollo detected");
+		tc->odin = false;
+
+		err = apollo_init(tc, pdev,
+				  tc_core_clock, tc_mem_clock, tc_sys_clock,
+				  tc_pdp_mem_size, sec_mem_size,
+				  tc_mem_latency, tc_wresp_latency);
+		if (err)
+			goto err_dev_cleanup;
+	}
+
+	/* Add the rogue name debugfs entry */
+	tc->debugfs_rogue_name =
+		debugfs_create_blob("rogue-name", 0444,
+			tc->debugfs_tc_dir,
+			&tc_debugfs_rogue_name_blobs[tc->version]);
+
+#if defined(TC_FAKE_INTERRUPTS)
+	dev_warn(&pdev->dev, "WARNING: Faking interrupts every %d ms",
+		FAKE_INTERRUPT_TIME_MS);
+#endif
+
+	/* Register pdp and ext platform devices */
+	err = tc_register_pdp_device(tc);
+	if (err)
+		goto err_dev_cleanup;
+
+	err = tc_register_ext_device(tc);
+	if (err)
+		goto err_dev_cleanup;
+
+	devres_remove_group(&pdev->dev, NULL);
+
+err_out:
+	if (err)
+		dev_err(&pdev->dev, "%s: failed\n", __func__);
+
+	return err;
+
+err_dev_cleanup:
+	tc_cleanup(pdev);
+	tc_disable(&pdev->dev);
+err_release:
+	devres_release_group(&pdev->dev, NULL);
+	goto err_out;
+}
+
+static void tc_exit(struct pci_dev *pdev)
+{
+	struct tc_device *tc = devres_find(&pdev->dev,
+					   tc_devres_release, NULL, NULL);
+
+	if (!tc) {
+		dev_err(&pdev->dev, "No tc device resources found\n");
+		return;
+	}
+
+	if (tc->pdp_dev)
+		platform_device_unregister(tc->pdp_dev);
+
+	if (tc->ext_dev)
+		platform_device_unregister(tc->ext_dev);
+
+	tc_cleanup(pdev);
+
+	tc_disable(&pdev->dev);
+}
+
+static struct pci_device_id tc_pci_tbl[] = {
+	{ PCI_VDEVICE(POWERVR, DEVICE_ID_PCI_APOLLO_FPGA) },
+	{ PCI_VDEVICE(POWERVR, DEVICE_ID_PCIE_APOLLO_FPGA) },
+	{ PCI_VDEVICE(ODIN, DEVICE_ID_ODIN) },
+	{ },
+};
+
+static struct pci_driver tc_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= tc_pci_tbl,
+	.probe		= tc_init,
+	.remove		= tc_exit,
+};
+
+module_pci_driver(tc_pci_driver);
+
+MODULE_DEVICE_TABLE(pci, tc_pci_tbl);
+
+int tc_enable(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	return pci_enable_device(pdev);
+}
+EXPORT_SYMBOL(tc_enable);
+
+void tc_disable(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	pci_disable_device(pdev);
+}
+EXPORT_SYMBOL(tc_disable);
+
+int tc_set_interrupt_handler(struct device *dev, int interrupt_id,
+	void (*handler_function)(void *), void *data)
+{
+	struct tc_device *tc = devres_find(dev, tc_devres_release,
+		NULL, NULL);
+	int err = 0;
+	unsigned long flags;
+
+	if (!tc) {
+		dev_err(dev, "No tc device resources found\n");
+		err = -ENODEV;
+		goto err_out;
+	}
+
+	if (interrupt_id < 0 || interrupt_id >= TC_INTERRUPT_COUNT) {
+		dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id);
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	spin_lock_irqsave(&tc->interrupt_handler_lock, flags);
+
+	tc->interrupt_handlers[interrupt_id].handler_function =
+		handler_function;
+	tc->interrupt_handlers[interrupt_id].handler_data = data;
+
+	spin_unlock_irqrestore(&tc->interrupt_handler_lock, flags);
+
+err_out:
+	return err;
+}
+EXPORT_SYMBOL(tc_set_interrupt_handler);
+
+int tc_enable_interrupt(struct device *dev, int interrupt_id)
+{
+	struct tc_device *tc = devres_find(dev, tc_devres_release,
+		NULL, NULL);
+	int err = 0;
+	unsigned long flags;
+
+	if (!tc) {
+		dev_err(dev, "No tc device resources found\n");
+		err = -ENODEV;
+		goto err_out;
+	}
+	if (interrupt_id < 0 || interrupt_id >= TC_INTERRUPT_COUNT) {
+		dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id);
+		err = -EINVAL;
+		goto err_out;
+	}
+	spin_lock_irqsave(&tc->interrupt_enable_lock, flags);
+
+	if (tc->interrupt_handlers[interrupt_id].enabled) {
+		dev_warn(dev, "Interrupt ID %d already enabled\n",
+			interrupt_id);
+		err = -EEXIST;
+		goto err_unlock;
+	}
+	tc->interrupt_handlers[interrupt_id].enabled = true;
+
+	if (tc->odin)
+		odin_enable_interrupt_register(tc, interrupt_id);
+	else
+		apollo_enable_interrupt_register(tc, interrupt_id);
+
+err_unlock:
+	spin_unlock_irqrestore(&tc->interrupt_enable_lock, flags);
+err_out:
+	return err;
+}
+EXPORT_SYMBOL(tc_enable_interrupt);
+
+int tc_disable_interrupt(struct device *dev, int interrupt_id)
+{
+	struct tc_device *tc = devres_find(dev, tc_devres_release,
+		NULL, NULL);
+	int err = 0;
+	unsigned long flags;
+
+	if (!tc) {
+		dev_err(dev, "No tc device resources found\n");
+		err = -ENODEV;
+		goto err_out;
+	}
+	if (interrupt_id < 0 || interrupt_id >= TC_INTERRUPT_COUNT) {
+		dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id);
+		err = -EINVAL;
+		goto err_out;
+	}
+	spin_lock_irqsave(&tc->interrupt_enable_lock, flags);
+
+	if (!tc->interrupt_handlers[interrupt_id].enabled) {
+		dev_warn(dev, "Interrupt ID %d already disabled\n",
+			interrupt_id);
+	}
+	tc->interrupt_handlers[interrupt_id].enabled = false;
+
+	if (tc->odin)
+		odin_disable_interrupt_register(tc, interrupt_id);
+	else
+		apollo_disable_interrupt_register(tc, interrupt_id);
+
+	spin_unlock_irqrestore(&tc->interrupt_enable_lock, flags);
+err_out:
+	return err;
+}
+EXPORT_SYMBOL(tc_disable_interrupt);
+
+int tc_sys_info(struct device *dev, u32 *tmp, u32 *pll)
+{
+	int err = -ENODEV;
+	struct tc_device *tc = devres_find(dev, tc_devres_release,
+		NULL, NULL);
+
+	if (!tc) {
+		dev_err(dev, "No tc device resources found\n");
+		goto err_out;
+	}
+
+	if (tc->odin)
+		err = odin_sys_info(tc, tmp, pll);
+	else
+		err = apollo_sys_info(tc, tmp, pll);
+
+err_out:
+	return err;
+}
+EXPORT_SYMBOL(tc_sys_info);
+
+int tc_sys_strings(struct device *dev,
+		   char *str_fpga_rev, size_t size_fpga_rev,
+		   char *str_tcf_core_rev, size_t size_tcf_core_rev,
+		   char *str_tcf_core_target_build_id,
+		   size_t size_tcf_core_target_build_id,
+		   char *str_pci_ver, size_t size_pci_ver,
+		   char *str_macro_ver, size_t size_macro_ver)
+{
+	int err = -ENODEV;
+
+	struct tc_device *tc = devres_find(dev, tc_devres_release,
+		NULL, NULL);
+
+	if (!tc) {
+		dev_err(dev, "No tc device resources found\n");
+		goto err_out;
+	}
+
+	if (!str_fpga_rev ||
+	    !size_fpga_rev ||
+	    !str_tcf_core_rev ||
+	    !size_tcf_core_rev ||
+	    !str_tcf_core_target_build_id ||
+	    !size_tcf_core_target_build_id ||
+	    !str_pci_ver ||
+	    !size_pci_ver ||
+	    !str_macro_ver ||
+	    !size_macro_ver) {
+
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	if (tc->odin) {
+		err = odin_sys_strings(tc,
+				 str_fpga_rev, size_fpga_rev,
+				 str_tcf_core_rev, size_tcf_core_rev,
+				 str_tcf_core_target_build_id,
+				 size_tcf_core_target_build_id,
+				 str_pci_ver, size_pci_ver,
+				 str_macro_ver, size_macro_ver);
+	} else {
+		err = apollo_sys_strings(tc,
+				 str_fpga_rev, size_fpga_rev,
+				 str_tcf_core_rev, size_tcf_core_rev,
+				 str_tcf_core_target_build_id,
+				 size_tcf_core_target_build_id,
+				 str_pci_ver, size_pci_ver,
+				 str_macro_ver, size_macro_ver);
+	}
+
+err_out:
+	return err;
+}
+EXPORT_SYMBOL(tc_sys_strings);
+
+int tc_core_clock_speed(struct device *dev)
+{
+	return tc_core_clock;
+}
+EXPORT_SYMBOL(tc_core_clock_speed);
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_drv.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_drv.h
new file mode 100644
index 0000000..197c4ab
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_drv.h
@@ -0,0 +1,150 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _TC_DRV_H
+#define _TC_DRV_H
+
+/*
+ * This contains the hooks for the testchip driver, as used by the Rogue and
+ * PDP sub-devices, and the platform data passed to each of their drivers
+ */
+
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+
+/* Valid values for the TC_MEMORY_CONFIG configuration option */
+#define TC_MEMORY_LOCAL		1
+#define TC_MEMORY_HOST		2
+#define TC_MEMORY_HYBRID	3
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+
+#include PVR_ANDROID_ION_HEADER
+
+/* NOTE: This should be kept in sync with the user side (in buffer_generic.c) */
+#if defined(SUPPORT_RGX)
+#define ION_HEAP_TC_ROGUE    (ION_HEAP_TYPE_CUSTOM+1)
+#endif
+#define ION_HEAP_TC_PDP      (ION_HEAP_TYPE_CUSTOM+2)
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+#define ION_HEAP_TC_SECURE   (ION_HEAP_TYPE_CUSTOM+3)
+#endif
+
+#endif /* defined(SUPPORT_ION) */
+
+#define TC_INTERRUPT_PDP     0
+#define TC_INTERRUPT_EXT     1
+#define TC_INTERRUPT_TC5_PDP 2
+#define TC_INTERRUPT_COUNT   3
+
+int tc_enable(struct device *dev);
+void tc_disable(struct device *dev);
+
+int tc_enable_interrupt(struct device *dev, int interrupt_id);
+int tc_disable_interrupt(struct device *dev, int interrupt_id);
+
+int tc_set_interrupt_handler(struct device *dev, int interrupt_id,
+	void (*handler_function)(void *), void *handler_data);
+
+int tc_sys_info(struct device *dev, u32 *tmp, u32 *pll);
+int tc_sys_strings(struct device *dev,
+	char *str_fpga_rev, size_t size_fpga_rev, char *str_tcf_core_rev,
+	size_t size_tcf_core_rev, char *str_tcf_core_target_build_id,
+	size_t size_tcf_core_target_build_id, char *str_pci_ver,
+	size_t size_pci_ver, char *str_macro_ver, size_t size_macro_ver);
+int tc_core_clock_speed(struct device *dev);
+
+#define APOLLO_DEVICE_NAME_PDP   "apollo_pdp"
+#define ODN_DEVICE_NAME_PDP      "odin_pdp"
+
+/* The following structs are initialised and passed down by the parent tc
+ * driver to the respective sub-drivers
+ */
+
+struct tc_pdp_platform_data {
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+	struct ion_device *ion_device;
+	int ion_heap_id;
+#endif
+	resource_size_t memory_base;
+
+	/* The following is used by the drm_pdp driver as it manages the
+	 * pdp memory
+	 */
+	resource_size_t pdp_heap_memory_base;
+	resource_size_t pdp_heap_memory_size;
+};
+
+#if defined(SUPPORT_RGX)
+
+#define TC_DEVICE_NAME_ROGUE "tc_rogue"
+
+struct tc_rogue_platform_data {
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+	struct ion_device *ion_device;
+	int ion_heap_id;
+#endif
+
+	/* The base address of the testchip memory (CPU physical address) -
+	 * used to convert from CPU-Physical to device-physical addresses
+	 */
+	resource_size_t tc_memory_base;
+
+	/* The following is used to setup the services heaps that map to the
+	 * ion heaps
+	 */
+	resource_size_t pdp_heap_memory_base;
+	resource_size_t pdp_heap_memory_size;
+	resource_size_t rogue_heap_memory_base;
+	resource_size_t rogue_heap_memory_size;
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+	resource_size_t secure_heap_memory_base;
+	resource_size_t secure_heap_memory_size;
+#endif
+};
+
+#endif /* defined(SUPPORT_RGX) */
+
+#endif /* _TC_DRV_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_drv_internal.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_drv_internal.h
new file mode 100644
index 0000000..fa22fb2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_drv_internal.h
@@ -0,0 +1,180 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _TC_DRV_INTERNAL_H
+#define _TC_DRV_INTERNAL_H
+
+#include "tc_drv.h"
+
+#include <linux/version.h>
+
+#if defined(TC_FAKE_INTERRUPTS)
+#define FAKE_INTERRUPT_TIME_MS 1600
+#include <linux/timer.h>
+#include <linux/time.h>
+#endif
+
+#define DRV_NAME "tc"
+
+/* This is a guess of what's a minimum sensible size for the ext heap
+ * It is only used for a warning if the ext heap is smaller, and does
+ * not affect the functional logic in any way
+ */
+#define TC_EXT_MINIMUM_MEM_SIZE (10*1024*1024)
+
+#if defined(SUPPORT_ION)
+ #if defined(SUPPORT_RGX) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+  #define TC_ION_HEAP_BASE_COUNT 3
+ #else
+  #define TC_ION_HEAP_BASE_COUNT 2
+ #endif
+
+ #if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+  #define TC_ION_HEAP_COUNT (TC_ION_HEAP_BASE_COUNT + 1)
+ #else
+  #define TC_ION_HEAP_COUNT TC_ION_HEAP_BASE_COUNT
+ #endif
+#endif /* defined(SUPPORT_ION) */
+
+/* Convert a byte offset to a 32 bit dword offset */
+#define DWORD_OFFSET(byte_offset)  ((byte_offset)>>2)
+
+#define HEX2DEC(v)                 ((((v) >> 4) * 10) + ((v) & 0x0F))
+
+enum tc_version_t {
+	TC_INVALID_VERSION,
+	APOLLO_VERSION_TCF_2,
+	APOLLO_VERSION_TCF_5,
+	APOLLO_VERSION_TCF_BONNIE,
+	ODIN_VERSION_TCF_BONNIE,
+	ODIN_VERSION_FPGA
+};
+
+struct tc_interrupt_handler {
+	bool enabled;
+	void (*handler_function)(void *);
+	void *handler_data;
+};
+
+struct tc_region {
+	resource_size_t base;
+	resource_size_t size;
+};
+
+struct tc_io_region {
+	struct tc_region region;
+	void __iomem *registers;
+};
+
+struct tc_device {
+	struct pci_dev *pdev;
+
+	enum tc_version_t version;
+	bool odin;
+
+	struct tc_io_region tcf;
+	struct tc_io_region tcf_pll;
+
+	struct tc_region tc_mem;
+
+	struct platform_device *pdp_dev;
+
+	resource_size_t pdp_heap_mem_base;
+	resource_size_t pdp_heap_mem_size;
+
+	struct platform_device *ext_dev;
+
+	resource_size_t ext_heap_mem_base;
+	resource_size_t ext_heap_mem_size;
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+	resource_size_t secure_heap_mem_base;
+	resource_size_t secure_heap_mem_size;
+#endif
+
+#if defined(CONFIG_MTRR) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+	int mtrr;
+#endif
+	spinlock_t interrupt_handler_lock;
+	spinlock_t interrupt_enable_lock;
+
+	struct tc_interrupt_handler
+		interrupt_handlers[TC_INTERRUPT_COUNT];
+
+#if defined(TC_FAKE_INTERRUPTS)
+	struct timer_list timer;
+#endif
+
+#if defined(SUPPORT_ION)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+	struct ion_device *ion_device;
+#endif
+	struct ion_heap *ion_heaps[TC_ION_HEAP_COUNT];
+	int ion_heap_count;
+#endif
+
+	struct dentry *debugfs_tc_dir;
+	struct dentry *debugfs_rogue_name;
+};
+
+int tc_mtrr_setup(struct tc_device *tc);
+void tc_mtrr_cleanup(struct tc_device *tc);
+
+int tc_is_interface_aligned(u32 eyes, u32 clk_taps, u32 train_ack);
+
+int tc_iopol32_nonzero(u32 mask, void __iomem *addr);
+
+int request_pci_io_addr(struct pci_dev *pdev, u32 index,
+	resource_size_t offset, resource_size_t length);
+void release_pci_io_addr(struct pci_dev *pdev, u32 index,
+	resource_size_t start, resource_size_t length);
+
+int setup_io_region(struct pci_dev *pdev,
+	struct tc_io_region *region, u32 index,
+	resource_size_t offset,	resource_size_t size);
+
+#if defined(TC_FAKE_INTERRUPTS)
+void tc_irq_fake_wrapper(unsigned long data);
+#endif /* defined(TC_FAKE_INTERRUPTS) */
+
+#endif /* _TC_DRV_INTERNAL_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_ion.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_ion.h
new file mode 100644
index 0000000..d24b51a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_ion.h
@@ -0,0 +1,54 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _TC_ION_H
+#define _TC_ION_H
+
+struct ion_client;
+struct tc_device;
+
+int tc_ion_init(struct tc_device *tc, int mem_bar);
+
+void tc_ion_deinit(struct tc_device *tc, int mem_bar);
+
+#endif /* _TC_ION_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_odin.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_odin.c
new file mode 100644
index 0000000..1e35bf3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_odin.c
@@ -0,0 +1,1423 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*
+ * This is a device driver for the odin testchip framework. It creates
+ * platform devices for the pdp and ext sub-devices, and exports functions
+ * to manage the shared interrupt handling
+ */
+
+#include <linux/version.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+
+#include "tc_drv_internal.h"
+#include "tc_odin.h"
+#include "tc_ion.h"
+
+/* Odin (3rd gen TCF FPGA) */
+#include "odin_defs.h"
+#include "odin_regs.h"
+#include "bonnie_tcf.h"
+
+/* Macro's to set and get register fields */
+#define REG_FIELD_GET(v, str) \
+	(u32)(((v) & (s##_MASK)) >> (s##_SHIFT))
+#define REG_FIELD_SET(v, f, str) \
+	v = (u32)(((v) & (u32)~(str##_MASK)) | \
+		  (u32)(((f) << (str##_SHIFT)) & (str##_MASK)))
+
+#define SAI_STATUS_UNALIGNED 0
+#define SAI_STATUS_ALIGNED   1
+#define SAI_STATUS_ERROR     2
+
+#if defined(SUPPORT_RGX)
+
+static void spi_write(struct tc_device *tc, u32 off, u32 val)
+{
+	iowrite32(off, tc->tcf.registers
+		  + ODN_REG_BANK_TCF_SPI_MASTER
+		  + ODN_SPI_MST_ADDR_RDNWR);
+	iowrite32(val, tc->tcf.registers
+		  + ODN_REG_BANK_TCF_SPI_MASTER
+		  + ODN_SPI_MST_WDATA);
+	iowrite32(0x1, tc->tcf.registers
+		  + ODN_REG_BANK_TCF_SPI_MASTER
+		  + ODN_SPI_MST_GO);
+	udelay(1000);
+}
+
+static int spi_read(struct tc_device *tc, u32 off, u32 *val)
+{
+	int cnt = 0;
+	u32 spi_mst_status;
+
+	iowrite32(0x40000 | off, tc->tcf.registers
+		  + ODN_REG_BANK_TCF_SPI_MASTER
+		  + ODN_SPI_MST_ADDR_RDNWR);
+	iowrite32(0x1, tc->tcf.registers
+		  + ODN_REG_BANK_TCF_SPI_MASTER
+		  + ODN_SPI_MST_GO);
+	udelay(100);
+
+	do {
+		spi_mst_status = ioread32(tc->tcf.registers
+					  + ODN_REG_BANK_TCF_SPI_MASTER
+					  + ODN_SPI_MST_STATUS);
+
+		if (cnt++ > 10000) {
+			dev_err(&tc->pdev->dev,
+				"%s: Time out reading SPI reg (0x%x)\n",
+				__func__, off);
+			return -1;
+		}
+
+	} while (spi_mst_status != 0x08);
+
+	*val = ioread32(tc->tcf.registers
+			+ ODN_REG_BANK_TCF_SPI_MASTER
+			+ ODN_SPI_MST_RDATA);
+
+	return 0;
+}
+
+/* returns 1 for aligned, 0 for unaligned */
+static int get_odin_sai_status(struct tc_device *tc, int bank)
+{
+	void __iomem *bank_addr = tc->tcf.registers
+					+ ODN_REG_BANK_SAI_RX_DDR(bank);
+	void __iomem *reg_addr;
+	u32 eyes;
+	u32 clk_taps;
+	u32 train_ack;
+
+	reg_addr = bank_addr + ODN_SAI_RX_DEBUG_SAI_EYES;
+	eyes = ioread32(reg_addr);
+
+	reg_addr = bank_addr + ODN_SAI_RX_DEBUG_SAI_CLK_TAPS;
+	clk_taps = ioread32(reg_addr);
+
+	reg_addr = bank_addr + ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK;
+	train_ack = ioread32(reg_addr);
+
+#if 0 /* enable this to get debug info if the board is not aligning */
+	dev_info(&tc->pdev->dev,
+		"odin bank %d align: eyes=%08x clk_taps=%08x train_ack=%08x\n",
+		bank, eyes, clk_taps, train_ack);
+#endif
+
+	if (tc_is_interface_aligned(eyes, clk_taps, train_ack))
+		return SAI_STATUS_ALIGNED;
+
+	dev_warn(&tc->pdev->dev, "odin bank %d is unaligned\n", bank);
+	return SAI_STATUS_UNALIGNED;
+}
+
+/* Read the odin multi clocked bank align status.
+ * Returns 1 for aligned, 0 for unaligned
+ */
+static int read_odin_mca_status(struct tc_device *tc)
+{
+	void __iomem *bank_addr = tc->tcf.registers
+					+ ODN_REG_BANK_MULTI_CLK_ALIGN;
+	void __iomem *reg_addr = bank_addr + ODN_MCA_DEBUG_MCA_STATUS;
+	u32 mca_status;
+
+	mca_status = ioread32(reg_addr);
+
+#if 0 /* Enable this if there are alignment issues */
+	dev_info(&tc->pdev->dev,
+		"Odin MCA_STATUS = %08x\n", mca_status);
+#endif
+	return mca_status & ODN_ALIGNMENT_FOUND_MASK;
+}
+
+/* Read the DUT multi clocked bank align status.
+ * Returns 1 for aligned, 0 for unaligned
+ */
+static int read_dut_mca_status(struct tc_device *tc)
+{
+	u32 mca_status;
+	const int mca_status_register_offset = 1; /* not in bonnie_tcf.h */
+	int spi_address = DWORD_OFFSET(BONNIE_TCF_OFFSET_MULTI_CLK_ALIGN);
+
+	spi_address = DWORD_OFFSET(BONNIE_TCF_OFFSET_MULTI_CLK_ALIGN)
+			+ mca_status_register_offset;
+
+	spi_read(tc, spi_address, &mca_status);
+
+#if 0 /* Enable this if there are alignment issues */
+	dev_info(&tc->pdev->dev,
+		"DUT MCA_STATUS = %08x\n", mca_status);
+#endif
+	return mca_status & 1;  /* 'alignment found' status is in bit 1 */
+}
+
+/* returns 1 for aligned, 0 for unaligned */
+static int get_dut_sai_status(struct tc_device *tc, int bank)
+{
+	u32 eyes;
+	u32 clk_taps;
+	u32 train_ack;
+	const u32 bank_base = DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_RX_1
+				+ (BONNIE_TCF_OFFSET_SAI_RX_DELTA * bank));
+	int spi_timeout;
+
+	spi_timeout = spi_read(tc, bank_base
+		+ DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_EYES), &eyes);
+	if (spi_timeout)
+		return SAI_STATUS_ERROR;
+
+	spi_read(tc, bank_base
+		+ DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_CLK_TAPS), &clk_taps);
+	spi_read(tc, bank_base
+		+ DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_TRAIN_ACK), &train_ack);
+
+#if 0 /* enable this to get debug info if the board is not aligning */
+	dev_info(&tc->pdev->dev,
+		"dut  bank %d align: eyes=%08x clk_taps=%08x train_ack=%08x\n",
+		bank, eyes, clk_taps, train_ack);
+#endif
+
+	if (tc_is_interface_aligned(eyes, clk_taps, train_ack))
+		return SAI_STATUS_ALIGNED;
+
+	dev_warn(&tc->pdev->dev, "dut bank %d is unaligned\n", bank);
+	return SAI_STATUS_UNALIGNED;
+}
+
+/*
+ * Returns the divider group register fields for the specified counter value.
+ * See Xilinx Application Note xapp888.
+ */
+static void odin_mmcm_reg_param_calc(u32 value, u32 *low, u32 *high,
+				     u32 *edge, u32 *no_count)
+{
+	if (value == 1U) {
+		*no_count = 1U;
+		*edge = 0;
+		*high = 0;
+		*low = 0;
+	} else {
+		*no_count = 0;
+		*edge = value % 2U;
+		*high = value >> 1;
+		*low = (value + *edge) >> 1U;
+	}
+}
+
+/*
+ * Returns the MMCM Input Divider, FB Multiplier and Output Divider values for
+ * the specified input frequency and target output frequency.
+ * Function doesn't support fractional values for multiplier and output divider
+ * As per Xilinx 7 series FPGAs clocking resources user guide, aims for highest
+ * VCO and smallest D and M.
+ * Configured for Xilinx Virtex7 speed grade 2.
+ */
+static int odin_mmcm_counter_calc(struct device *dev,
+				  u32 freq_input, u32 freq_output,
+				  u32 *d, u32 *m, u32 *o)
+{
+	u32 d_min, d_max;
+	u32 m_min, m_max, m_ideal;
+	u32 d_cur, m_cur, o_cur;
+	u32 best_diff, d_best, m_best, o_best;
+
+	/*
+	 * Check specified input frequency is within range
+	 */
+	if (freq_input < ODN_INPUT_CLOCK_SPEED_MIN) {
+		dev_err(dev, "Input frequency (%u hz) below minimum supported value (%u hz)\n",
+			freq_input, ODN_INPUT_CLOCK_SPEED_MIN);
+		return -EINVAL;
+	}
+	if (freq_input > ODN_INPUT_CLOCK_SPEED_MAX) {
+		dev_err(dev, "Input frequency (%u hz) above maximum supported value (%u hz)\n",
+			freq_input, ODN_INPUT_CLOCK_SPEED_MAX);
+		return -EINVAL;
+	}
+
+	/*
+	 * Check specified target frequency is within range
+	 */
+	if (freq_output < ODN_OUTPUT_CLOCK_SPEED_MIN) {
+		dev_err(dev, "Output frequency (%u hz) below minimum supported value (%u hz)\n",
+			freq_input, ODN_OUTPUT_CLOCK_SPEED_MIN);
+		return -EINVAL;
+	}
+	if (freq_output > ODN_OUTPUT_CLOCK_SPEED_MAX) {
+		dev_err(dev, "Output frequency (%u hz) above maximum supported value (%u hz)\n",
+			freq_output, ODN_OUTPUT_CLOCK_SPEED_MAX);
+		return -EINVAL;
+	}
+
+	/*
+	 * Calculate min and max for Input Divider.
+	 * Refer Xilinx 7 series FPGAs clocking resources user guide
+	 * equation 3-6 and 3-7
+	 */
+	d_min = DIV_ROUND_UP(freq_input, ODN_PFD_MAX);
+	d_max = min(freq_input/ODN_PFD_MIN, (u32)ODN_DREG_VALUE_MAX);
+
+	/*
+	 * Calculate min and max for Input Divider.
+	 * Refer Xilinx 7 series FPGAs clocking resources user guide.
+	 * equation 3-8 and 3-9
+	 */
+	m_min = DIV_ROUND_UP((ODN_VCO_MIN * d_min), freq_input);
+	m_max = min(((ODN_VCO_MAX * d_max) / freq_input),
+		    (u32)ODN_MREG_VALUE_MAX);
+
+	for (d_cur = d_min; d_cur <= d_max; d_cur++) {
+		/*
+		 * Refer Xilinx 7 series FPGAs clocking resources user guide.
+		 * equation 3-10
+		 */
+		m_ideal = min(((d_cur * ODN_VCO_MAX)/freq_input), m_max);
+
+		for (m_cur = m_ideal; m_cur >= m_min; m_cur -= 1) {
+			/**
+			 * Skip if VCO for given 'm' and 'd' value is not an
+			 * integer since fractional component is not supported
+			 */
+			if (((freq_input * m_cur) % d_cur) != 0)
+				continue;
+
+			/**
+			 * Skip if divider for given 'm' and 'd' value is not
+			 * an integer since fractional component is not
+			 * supported
+			 */
+			if ((freq_input * m_cur) % (d_cur * freq_output) != 0)
+				continue;
+
+			/**
+			 * Calculate output divider value.
+			 */
+			o_cur = (freq_input * m_cur)/(d_cur * freq_output);
+
+			*d = d_cur;
+			*m = m_cur;
+			*o = o_cur;
+			return 0;
+		}
+	}
+
+	/* Failed to find exact optimal solution with high VCO. Brute-force find a suitable config,
+	 * again prioritising high VCO, to get lowest jitter */
+	d_min = 1; d_max = (u32)ODN_DREG_VALUE_MAX;
+	m_min = 1; m_max = (u32)ODN_MREG_VALUE_MAX;
+	best_diff = 0xFFFFFFFF;
+
+	for (d_cur = d_min; d_cur <= d_max; d_cur++) {
+		for (m_cur = m_max; m_cur >= m_min; m_cur -= 1) {
+			u32 pfd, vco, o_avg, o_min, o_max;
+
+			pfd = freq_input / d_cur;
+			vco = pfd * m_cur;
+
+			if (pfd < ODN_PFD_MIN)
+				continue;
+
+			if (pfd > ODN_PFD_MAX)
+				continue;
+
+			if (vco < ODN_VCO_MIN)
+				continue;
+
+			if (vco > ODN_VCO_MAX)
+				continue;
+
+			/* A range of -1/+3 around o_avg gives us 100kHz granularity. It can be extended further. */
+			o_avg = vco / freq_output;
+			o_min = (o_avg >= 2) ? (o_avg - 1) : 1;
+			o_max = o_avg + 3;
+			if (o_max > (u32)ODN_OREG_VALUE_MAX)
+				o_max = (u32)ODN_OREG_VALUE_MAX;
+
+			for (o_cur = o_min; o_cur <= o_max; o_cur++) {
+				u32 freq_cur, diff_cur;
+
+				freq_cur = vco / o_cur;
+
+				if (freq_cur > freq_output)
+					continue;
+
+				diff_cur = freq_output - freq_cur;
+
+				if (diff_cur == 0) {
+					/* Found an exact match */
+					*d = d_cur;
+					*m = m_cur;
+					*o = o_cur;
+					return 0;
+				}
+
+				if (diff_cur < best_diff) {
+					best_diff = diff_cur;
+					d_best = d_cur;
+					m_best = m_cur;
+					o_best = o_cur;
+				}
+			}
+		}
+	}
+
+	if (best_diff != 0xFFFFFFFF) {
+		dev_warn(dev, "Odin: Found similar freq of %u Hz\n", freq_output - best_diff);
+		*d = d_best;
+		*m = m_best;
+		*o = o_best;
+		return 0;
+	}
+
+	dev_err(dev, "Odin: Unable to find integer values for d, m and o for requested frequency (%u)\n",
+		freq_output);
+
+	return -ERANGE;
+}
+
+static int odin_fpga_set_dut_core_clk(struct tc_device *tc,
+				      u32 input_clk, u32 output_clk)
+{
+	int err = 0;
+	u32 in_div, mul, out_div;
+	u32 high_time, low_time, edge, no_count;
+	u32 value;
+	void __iomem *base = tc->tcf.registers;
+	void __iomem *clk_blk_base = base + ODN_REG_BANK_ODN_CLK_BLK;
+	struct device *dev = &tc->pdev->dev;
+
+	err = odin_mmcm_counter_calc(dev, input_clk, output_clk, &in_div,
+				     &mul, &out_div);
+	if (err != 0)
+		return err;
+
+	/* Put DUT into reset */
+	iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK,
+		  base + ODN_CORE_EXTERNAL_RESETN);
+	msleep(20);
+
+	/* Put DUT Core MMCM into reset */
+	iowrite32(ODN_CLK_GEN_RESET_DUT_CORE_MMCM_MASK,
+		  base + ODN_CORE_CLK_GEN_RESET);
+	msleep(20);
+
+	/* Calculate the register fields for output divider */
+	odin_mmcm_reg_param_calc(out_div, &high_time, &low_time,
+				 &edge, &no_count);
+
+	/* Read-modify-write the required fields to output divider register 1 */
+	value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER1);
+	REG_FIELD_SET(value, high_time,
+			ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME);
+	REG_FIELD_SET(value, low_time,
+			ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME);
+	iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER1);
+
+	/* Read-modify-write the required fields to output divider register 2 */
+	value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER2);
+	REG_FIELD_SET(value, edge,
+			ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE);
+	REG_FIELD_SET(value, no_count,
+			ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT);
+	iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER2);
+
+	/* Calculate the register fields for multiplier */
+	odin_mmcm_reg_param_calc(mul, &high_time, &low_time,
+				 &edge, &no_count);
+
+	/* Read-modify-write the required fields to multiplier register 1*/
+	value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER1);
+	REG_FIELD_SET(value, high_time,
+			ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME);
+	REG_FIELD_SET(value, low_time,
+			ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME);
+	iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER1);
+
+	/* Read-modify-write the required fields to multiplier register 2 */
+	value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER2);
+	REG_FIELD_SET(value, edge,
+			ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE);
+	REG_FIELD_SET(value, no_count,
+			ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT);
+	iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER2);
+
+	/* Calculate the register fields for input divider */
+	odin_mmcm_reg_param_calc(in_div, &high_time, &low_time,
+				 &edge, &no_count);
+
+	/* Read-modify-write the required fields to input divider register 1 */
+	value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_IN_DIVIDER1);
+	REG_FIELD_SET(value, high_time,
+			 ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME);
+	REG_FIELD_SET(value, low_time,
+			 ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME);
+	REG_FIELD_SET(value, edge,
+			 ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE);
+	REG_FIELD_SET(value, no_count,
+			 ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT);
+	iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_IN_DIVIDER1);
+
+	/* Bring DUT clock MMCM out of reset */
+	iowrite32(0, tc->tcf.registers + ODN_CORE_CLK_GEN_RESET);
+
+	err = tc_iopol32_nonzero(ODN_MMCM_LOCK_STATUS_DUT_CORE,
+				 base + ODN_CORE_MMCM_LOCK_STATUS);
+	if (err != 0) {
+		dev_err(dev, "MMCM failed to lock for DUT core\n");
+		return err;
+	}
+
+	/* Bring DUT out of reset */
+	iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK |
+		  ODN_EXTERNAL_RESETN_DUT_MASK,
+		  tc->tcf.registers + ODN_CORE_EXTERNAL_RESETN);
+	msleep(20);
+
+	dev_info(dev, "DUT core clock set-up successful\n");
+
+	return err;
+}
+
+static int odin_fpga_set_dut_if_clk(struct tc_device *tc,
+				    u32 input_clk, u32 output_clk)
+{
+	int err = 0;
+	u32 in_div, mul, out_div;
+	u32 high_time, low_time, edge, no_count;
+	u32 value;
+	void __iomem *base = tc->tcf.registers;
+	void __iomem *clk_blk_base = base + ODN_REG_BANK_ODN_CLK_BLK;
+	struct device *dev = &tc->pdev->dev;
+
+	err = odin_mmcm_counter_calc(dev, input_clk, output_clk,
+				     &in_div, &mul, &out_div);
+	if (err != 0)
+		return err;
+
+	/* Put DUT into reset */
+	iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK,
+		  base + ODN_CORE_EXTERNAL_RESETN);
+	msleep(20);
+
+	/* Put DUT Core MMCM into reset */
+	iowrite32(ODN_CLK_GEN_RESET_DUT_IF_MMCM_MASK,
+		  base + ODN_CORE_CLK_GEN_RESET);
+	msleep(20);
+
+	/* Calculate the register fields for output divider */
+	odin_mmcm_reg_param_calc(out_div, &high_time, &low_time,
+				 &edge, &no_count);
+
+	/* Read-modify-write the required fields to output divider register 1 */
+	value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER1);
+	REG_FIELD_SET(value, high_time,
+			ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME);
+	REG_FIELD_SET(value, low_time,
+			ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME);
+	iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER1);
+
+	/* Read-modify-write the required fields to output divider register 2 */
+	value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER2);
+	REG_FIELD_SET(value, edge,
+			ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE);
+	REG_FIELD_SET(value, no_count,
+			ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT);
+	iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER2);
+
+	/* Calculate the register fields for multiplier */
+	odin_mmcm_reg_param_calc(mul, &high_time, &low_time, &edge, &no_count);
+
+	/* Read-modify-write the required fields to multiplier register 1*/
+	value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER1);
+	REG_FIELD_SET(value, high_time,
+			ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME);
+	REG_FIELD_SET(value, low_time,
+			ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME);
+	iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER1);
+
+	/* Read-modify-write the required fields to multiplier register 2 */
+	value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER2);
+	REG_FIELD_SET(value, edge,
+			ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE);
+	REG_FIELD_SET(value, no_count,
+			ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT);
+	iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER2);
+
+	/* Calculate the register fields for input divider */
+	odin_mmcm_reg_param_calc(in_div, &high_time, &low_time,
+				 &edge, &no_count);
+
+	/* Read-modify-write the required fields to input divider register 1 */
+	value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_IN_DIVIDER1);
+	REG_FIELD_SET(value, high_time,
+			 ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME);
+	REG_FIELD_SET(value, low_time,
+			 ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME);
+	REG_FIELD_SET(value, edge,
+			 ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE);
+	REG_FIELD_SET(value, no_count,
+			 ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT);
+	iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_IN_DIVIDER1);
+
+	/* Bring DUT interface clock MMCM out of reset */
+	iowrite32(0, tc->tcf.registers + ODN_CORE_CLK_GEN_RESET);
+
+	err = tc_iopol32_nonzero(ODN_MMCM_LOCK_STATUS_DUT_IF,
+				 base + ODN_CORE_MMCM_LOCK_STATUS);
+	if (err != 0) {
+		dev_err(dev, "MMCM failed to lock for DUT IF\n");
+		return err;
+	}
+
+	/* Bring DUT out of reset */
+	iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK |
+		  ODN_EXTERNAL_RESETN_DUT_MASK,
+		  tc->tcf.registers + ODN_CORE_EXTERNAL_RESETN);
+	msleep(20);
+
+	dev_info(dev, "DUT IF clock set-up successful\n");
+
+	return err;
+}
+
+static void odin_fpga_update_dut_clk_freq(struct tc_device *tc,
+					  int *core_clock, int *mem_clock)
+{
+	struct device *dev = &tc->pdev->dev;
+
+#if defined(SUPPORT_FPGA_DUT_CLK_INFO)
+	int dut_clk_info = ioread32(tc->tcf.registers + ODN_CORE_DUT_CLK_INFO);
+
+	if ((dut_clk_info != 0) && (dut_clk_info != 0xbaadface) && (dut_clk_info != 0xffffffff)) {
+		dev_info(dev, "ODN_DUT_CLK_INFO = %08x\n", dut_clk_info);
+		dev_info(dev, "Overriding provided DUT clock values: core %i, mem %i\n",
+			 *core_clock, *mem_clock);
+
+		*core_clock = ((dut_clk_info & ODN_DUT_CLK_INFO_CORE_MASK)
+			       >> ODN_DUT_CLK_INFO_CORE_SHIFT) * 1000000;
+
+		*mem_clock = ((dut_clk_info & ODN_DUT_CLK_INFO_MEM_MASK)
+			       >> ODN_DUT_CLK_INFO_MEM_SHIFT) * 1000000;
+	}
+#endif
+
+	dev_info(dev, "DUT clock values: core %i, mem %i\n",
+		 *core_clock, *mem_clock);
+}
+
+static int odin_hard_reset_fpga(struct tc_device *tc,
+				int core_clock, int mem_clock)
+{
+	int err = 0;
+
+	odin_fpga_update_dut_clk_freq(tc, &core_clock, &mem_clock);
+
+	err = odin_fpga_set_dut_core_clk(tc, ODN_INPUT_CLOCK_SPEED, core_clock);
+	if (err != 0)
+		goto err_out;
+
+	err = odin_fpga_set_dut_if_clk(tc, ODN_INPUT_CLOCK_SPEED, mem_clock);
+
+err_out:
+	return err;
+}
+
+static int odin_hard_reset_bonnie(struct tc_device *tc)
+{
+	int reset_cnt = 0;
+	bool aligned = false;
+	int alignment_found;
+
+	msleep(100);
+
+	/* It is essential to do an SPI reset once on power-up before
+	 * doing any DUT reads via the SPI interface.
+	 */
+	iowrite32(1, tc->tcf.registers		/* set bit 1 low */
+			+ ODN_CORE_EXTERNAL_RESETN);
+	msleep(20);
+
+	iowrite32(3, tc->tcf.registers		/* set bit 1 high */
+			+ ODN_CORE_EXTERNAL_RESETN);
+	msleep(20);
+
+	while (!aligned && (reset_cnt < 20)) {
+
+		int bank;
+
+		/* Reset the DUT to allow the SAI to retrain */
+		iowrite32(2, /* set bit 0 low */
+			tc->tcf.registers
+			+ ODN_CORE_EXTERNAL_RESETN);
+
+		/* Hold the DUT in reset for 50mS */
+		msleep(50);
+
+		/* Take the DUT out of reset */
+		iowrite32(3, /* set bit 0 hi */
+			tc->tcf.registers
+			+ ODN_CORE_EXTERNAL_RESETN);
+		reset_cnt++;
+
+		/* Wait 200mS for the DUT to stabilise */
+		msleep(200);
+
+		/* Check the odin Multi Clocked bank Align status */
+		alignment_found = read_odin_mca_status(tc);
+		dev_info(&tc->pdev->dev,
+				"Odin mca_status indicates %s\n",
+				(alignment_found)?"aligned":"UNALIGNED");
+
+		/* Check the DUT MCA status */
+		alignment_found = read_dut_mca_status(tc);
+		dev_info(&tc->pdev->dev,
+				"DUT mca_status indicates %s\n",
+				(alignment_found)?"aligned":"UNALIGNED");
+
+		/* If all banks have aligned then the reset was successful */
+		for (bank = 0; bank < 10; bank++) {
+
+			int dut_aligned = 0;
+			int odin_aligned = 0;
+
+			odin_aligned = get_odin_sai_status(tc, bank);
+			dut_aligned = get_dut_sai_status(tc, bank);
+
+			if (dut_aligned == SAI_STATUS_ERROR)
+				return SAI_STATUS_ERROR;
+
+			if (!dut_aligned || !odin_aligned) {
+				aligned = false;
+				break;
+			}
+			aligned = true;
+		}
+
+		if (aligned) {
+			dev_info(&tc->pdev->dev,
+				"all banks have aligned\n");
+			break;
+		}
+
+		dev_warn(&tc->pdev->dev,
+			"Warning- not all banks have aligned. Trying again.\n");
+	}
+
+	if (!aligned)
+		dev_warn(&tc->pdev->dev, "odin_hard_reset failed\n");
+
+	return (aligned) ? 0 : 1; /* return 0 for success */
+}
+
+static void odin_set_mem_latency(struct tc_device *tc,
+				 int mem_latency, int mem_wresp_latency)
+{
+	u32 regval = 0;
+
+	if (mem_latency <= 4) {
+		/* The total memory read latency cannot be lower than the
+		 * amount of cycles consumed by the hardware to do a read.
+		 * Set the memory read latency to 0 cycles.
+		 */
+		mem_latency = 0;
+	} else {
+		mem_latency -= 4;
+
+		dev_info(&tc->pdev->dev,
+			 "Setting memory read latency to %i cycles\n",
+			 mem_latency);
+	}
+
+	if (mem_wresp_latency <= 2) {
+		/* The total memory write latency cannot be lower than the
+		 * amount of cycles consumed by the hardware to do a write.
+		 * Set the memory write latency to 0 cycles.
+		 */
+		mem_wresp_latency = 0;
+	} else {
+		mem_wresp_latency -= 2;
+
+		dev_info(&tc->pdev->dev,
+			 "Setting memory write response latency to %i cycles\n",
+			 mem_wresp_latency);
+	}
+
+	mem_latency |= mem_wresp_latency << 16;
+
+	spi_write(tc, 0x1009, mem_latency);
+
+	if (spi_read(tc, 0x1009, &regval) != 0) {
+		dev_err(&tc->pdev->dev,
+			"Failed to read back memory latency register");
+		return;
+	}
+
+	if (mem_latency != regval) {
+		dev_err(&tc->pdev->dev,
+			"Memory latency register doesn't match requested value"
+			" (actual: %#08x, expected: %#08x)\n",
+			regval, mem_latency);
+	}
+}
+
+#endif /* defined(SUPPORT_RGX) */
+
+static void odin_set_mem_mode(struct tc_device *tc)
+{
+	u32 val;
+
+	if (tc->version != ODIN_VERSION_FPGA)
+		return;
+
+	/* Enable memory offset to be applied to DUT and PDP1 */
+	iowrite32(0x80000A10, tc->tcf.registers + ODN_CORE_DUT_CTRL1);
+
+	/* Apply memory offset to GPU and PDP1 to point to DDR memory.
+	 * Enable HDMI.
+	 */
+	val = (0x4 << ODN_CORE_CONTROL_DUT_OFFSET_SHIFT) |
+	      (0x4 << ODN_CORE_CONTROL_PDP1_OFFSET_SHIFT) |
+	      (0x2 << ODN_CORE_CONTROL_HDMI_MODULE_EN_SHIFT) |
+	      (0x1 << ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SHIFT);
+	iowrite32(val, tc->tcf.registers + ODN_CORE_CORE_CONTROL);
+}
+
+/* Do a hard reset on the DUT */
+static int odin_hard_reset(struct tc_device *tc, int core_clock, int mem_clock)
+{
+#if defined(SUPPORT_RGX)
+	if (tc->version == ODIN_VERSION_TCF_BONNIE)
+		return odin_hard_reset_bonnie(tc);
+	if (tc->version == ODIN_VERSION_FPGA)
+		return odin_hard_reset_fpga(tc, core_clock, mem_clock);
+
+	dev_err(&tc->pdev->dev, "Invalid Odin version");
+	return 1;
+#else /* defined(SUPPORT_RGX) */
+	return 0;
+#endif /* defined(SUPPORT_RGX) */
+}
+
+static int odin_hw_init(struct tc_device *tc, int core_clock, int mem_clock,
+			int mem_latency, int mem_wresp_latency)
+{
+	int err;
+
+	err = odin_hard_reset(tc, core_clock, mem_clock);
+	if (err) {
+		dev_err(&tc->pdev->dev, "Failed to initialise Odin");
+		goto err_out;
+	}
+
+	odin_set_mem_mode(tc);
+
+#if defined(SUPPORT_RGX)
+	if (tc->version == ODIN_VERSION_FPGA)
+		odin_set_mem_latency(tc, mem_latency, mem_wresp_latency);
+#endif /* defined(SUPPORT_RGX) */
+
+err_out:
+	return err;
+}
+
+static int odin_enable_irq(struct tc_device *tc)
+{
+	int err = 0;
+
+#if defined(TC_FAKE_INTERRUPTS)
+	setup_timer(&tc->timer, tc_irq_fake_wrapper,
+		(unsigned long)tc);
+	mod_timer(&tc->timer,
+		jiffies + msecs_to_jiffies(FAKE_INTERRUPT_TIME_MS));
+#else
+	iowrite32(0, tc->tcf.registers +
+		ODN_CORE_INTERRUPT_ENABLE);
+	iowrite32(0xffffffff, tc->tcf.registers +
+		ODN_CORE_INTERRUPT_CLR);
+
+	dev_info(&tc->pdev->dev,
+		"Registering IRQ %d for use by Odin\n",
+		tc->pdev->irq);
+
+	err = request_irq(tc->pdev->irq, odin_irq_handler,
+		IRQF_SHARED, DRV_NAME, tc);
+
+	if (err) {
+		dev_err(&tc->pdev->dev,
+			"Error - IRQ %d failed to register\n",
+			tc->pdev->irq);
+	} else {
+		dev_info(&tc->pdev->dev,
+			"IRQ %d was successfully registered for use by Odin\n",
+			tc->pdev->irq);
+	}
+#endif
+	return err;
+}
+
+static void odin_disable_irq(struct tc_device *tc)
+{
+#if defined(TC_FAKE_INTERRUPTS)
+	del_timer_sync(&tc->timer);
+#else
+	iowrite32(0, tc->tcf.registers +
+			ODN_CORE_INTERRUPT_ENABLE);
+	iowrite32(0xffffffff, tc->tcf.registers +
+			ODN_CORE_INTERRUPT_CLR);
+
+	free_irq(tc->pdev->irq, tc);
+#endif
+}
+
+static enum tc_version_t
+odin_detect_daughterboard_version(struct tc_device *tc)
+{
+	u32 reg = ioread32(tc->tcf.registers + ODN_REG_BANK_DB_TYPE_ID);
+	u32 val = reg;
+
+	val = (val & ODN_REG_BANK_DB_TYPE_ID_TYPE_MASK) >>
+		ODN_REG_BANK_DB_TYPE_ID_TYPE_SHIFT;
+
+	switch (val) {
+	default:
+		dev_err(&tc->pdev->dev,
+			"Unknown odin version ID type %#x "
+			"(DB_TYPE_ID: %#08x)\n",
+			val, reg);
+		return TC_INVALID_VERSION;
+	case 1:
+		dev_info(&tc->pdev->dev, "DUT: Bonnie TC\n");
+		return ODIN_VERSION_TCF_BONNIE;
+	case 2:
+	case 3:
+		dev_info(&tc->pdev->dev, "DUT: FPGA\n");
+		return ODIN_VERSION_FPGA;
+	}
+}
+
+static int odin_dev_init(struct tc_device *tc, struct pci_dev *pdev,
+			 int pdp_mem_size, int secure_mem_size)
+{
+	int err;
+	u32 val;
+
+	/* Reserve and map the tcf system registers */
+	err = setup_io_region(pdev, &tc->tcf,
+		ODN_SYS_BAR, ODN_SYS_REGS_OFFSET, ODN_SYS_REGS_SIZE);
+	if (err)
+		goto err_out;
+
+	tc->version = odin_detect_daughterboard_version(tc);
+	if (tc->version == TC_INVALID_VERSION) {
+		err = -EIO;
+		goto err_odin_unmap_sys_registers;
+	}
+
+	/* Setup card memory */
+	tc->tc_mem.base = pci_resource_start(pdev, ODN_DDR_BAR);
+	tc->tc_mem.size = pci_resource_len(pdev, ODN_DDR_BAR);
+
+	if (tc->tc_mem.size < pdp_mem_size) {
+		dev_err(&pdev->dev,
+			"Odin MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu",
+			ODN_DDR_BAR,
+			(unsigned long)tc->tc_mem.size,
+			(unsigned long)pdp_mem_size);
+
+		err = -EIO;
+		goto err_odin_unmap_sys_registers;
+	}
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+	if (tc->tc_mem.size <
+	    (pdp_mem_size + secure_mem_size)) {
+		dev_err(&pdev->dev,
+			"Odin MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu plus the requested secure heap size %lu",
+			ODN_DDR_BAR,
+			(unsigned long)tc->tc_mem.size,
+			(unsigned long)pdp_mem_size,
+			(unsigned long)secure_mem_size);
+		err = -EIO;
+		goto err_odin_unmap_sys_registers;
+	}
+#endif
+
+	err = tc_mtrr_setup(tc);
+	if (err)
+		goto err_odin_unmap_sys_registers;
+
+	/* Setup ranges for the device heaps */
+	tc->pdp_heap_mem_size = pdp_mem_size;
+
+	/* We know ext_heap_mem_size won't underflow as we've compared
+	 * tc_mem.size against the pdp_mem_size value earlier
+	 */
+	tc->ext_heap_mem_size =
+		tc->tc_mem.size - tc->pdp_heap_mem_size;
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+	tc->ext_heap_mem_size -= secure_mem_size;
+#endif
+
+	if (tc->ext_heap_mem_size < TC_EXT_MINIMUM_MEM_SIZE) {
+		dev_warn(&pdev->dev,
+			"Odin MEM region (bar 4) has size of %lu, with %lu pdp_mem_size only %lu bytes are left for ext device, which looks too small",
+			(unsigned long)tc->tc_mem.size,
+			(unsigned long)pdp_mem_size,
+			(unsigned long)tc->ext_heap_mem_size);
+		/* Continue as this is only a 'helpful warning' not a hard
+		 * requirement
+		 */
+	}
+	tc->ext_heap_mem_base = tc->tc_mem.base;
+	tc->pdp_heap_mem_base =
+		tc->tc_mem.base + tc->ext_heap_mem_size;
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+	tc->secure_heap_mem_base = tc->pdp_heap_mem_base +
+		tc->pdp_heap_mem_size;
+	tc->secure_heap_mem_size = secure_mem_size;
+#endif
+
+#if defined(SUPPORT_ION)
+	err = tc_ion_init(tc, ODN_DDR_BAR);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to initialise ION\n");
+		goto err_odin_unmap_sys_registers;
+	}
+#endif
+
+	val = ioread32(tc->tcf.registers + ODN_CORE_REVISION);
+	dev_info(&pdev->dev, "ODN_CORE_REVISION = %08x\n", val);
+
+	val = ioread32(tc->tcf.registers + ODN_CORE_CHANGE_SET);
+	dev_info(&pdev->dev, "ODN_CORE_CHANGE_SET = %08x\n", val);
+
+	val = ioread32(tc->tcf.registers + ODN_CORE_USER_ID);
+	dev_info(&pdev->dev, "ODN_CORE_USER_ID = %08x\n", val);
+
+	val = ioread32(tc->tcf.registers + ODN_CORE_USER_BUILD);
+	dev_info(&pdev->dev, "ODN_CORE_USER_BUILD = %08x\n", val);
+
+err_out:
+	return err;
+
+err_odin_unmap_sys_registers:
+	dev_info(&pdev->dev,
+		 "%s: failed - unmapping the io regions.\n", __func__);
+
+	iounmap(tc->tcf.registers);
+	release_pci_io_addr(pdev, ODN_SYS_BAR,
+			 tc->tcf.region.base, tc->tcf.region.size);
+	goto err_out;
+}
+
+static void odin_dev_cleanup(struct tc_device *tc)
+{
+#if defined(SUPPORT_ION)
+	tc_ion_deinit(tc, ODN_DDR_BAR);
+#endif
+
+	tc_mtrr_cleanup(tc);
+
+	iounmap(tc->tcf.registers);
+
+	release_pci_io_addr(tc->pdev,
+			ODN_SYS_BAR,
+			tc->tcf.region.base,
+			tc->tcf.region.size);
+}
+
+static u32 odin_interrupt_id_to_flag(int interrupt_id)
+{
+	switch (interrupt_id) {
+	case TC_INTERRUPT_PDP:
+		return ODN_INTERRUPT_ENABLE_PDP1;
+	case TC_INTERRUPT_EXT:
+		return ODN_INTERRUPT_ENABLE_DUT;
+	default:
+		BUG();
+	}
+}
+
+int odin_init(struct tc_device *tc, struct pci_dev *pdev,
+	      int core_clock, int mem_clock,
+	      int pdp_mem_size, int secure_mem_size,
+	      int mem_latency, int mem_wresp_latency)
+{
+	int err = 0;
+
+	err = odin_dev_init(tc, pdev, pdp_mem_size, secure_mem_size);
+	if (err) {
+		dev_err(&pdev->dev, "odin_dev_init failed\n");
+		goto err_out;
+	}
+
+	err = odin_hw_init(tc, core_clock, mem_clock,
+			   mem_latency, mem_wresp_latency);
+	if (err) {
+		dev_err(&pdev->dev, "odin_hw_init failed\n");
+		goto err_dev_cleanup;
+	}
+
+	err = odin_enable_irq(tc);
+	if (err) {
+		dev_err(&pdev->dev,
+			"Failed to initialise IRQ\n");
+		goto err_dev_cleanup;
+	}
+
+err_out:
+	return err;
+
+err_dev_cleanup:
+	odin_dev_cleanup(tc);
+	goto err_out;
+}
+
+int odin_cleanup(struct tc_device *tc)
+{
+	odin_disable_irq(tc);
+	odin_dev_cleanup(tc);
+
+	return 0;
+}
+
+int odin_register_pdp_device(struct tc_device *tc)
+{
+	int err = 0;
+	resource_size_t reg_start = pci_resource_start(tc->pdev, ODN_SYS_BAR);
+	struct resource pdp_resources_odin[] = {
+		DEFINE_RES_MEM_NAMED(reg_start +
+				ODN_PDP_REGS_OFFSET, /* start */
+				ODN_PDP_REGS_SIZE, /* size */
+				"pdp-regs"),
+		DEFINE_RES_MEM_NAMED(reg_start +
+				ODN_SYS_REGS_OFFSET +
+				ODN_REG_BANK_ODN_CLK_BLK +
+				ODN_PDP_P_CLK_OUT_DIVIDER_REG1, /* start */
+				ODN_PDP_P_CLK_IN_DIVIDER_REG -
+				ODN_PDP_P_CLK_OUT_DIVIDER_REG1 + 4, /* size */
+				"pll-regs"),
+		DEFINE_RES_MEM_NAMED(reg_start +
+				ODN_SYS_REGS_OFFSET +
+				ODN_REG_BANK_CORE, /* start */
+				ODN_CORE_MMCM_LOCK_STATUS + 4, /* size */
+				"odn-core"),
+	};
+
+	struct tc_pdp_platform_data pdata = {
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+		.ion_device = tc->ion_device,
+		.ion_heap_id = ION_HEAP_TC_PDP,
+#endif
+		.memory_base = tc->tc_mem.base,
+		.pdp_heap_memory_base = tc->pdp_heap_mem_base,
+		.pdp_heap_memory_size = tc->pdp_heap_mem_size,
+	};
+	struct platform_device_info pdp_device_info = {
+		.parent = &tc->pdev->dev,
+		.name = ODN_DEVICE_NAME_PDP,
+		.id = -2,
+		.data = &pdata,
+		.size_data = sizeof(pdata),
+#if (TC_MEMORY_CONFIG == TC_MEMORY_LOCAL) || \
+	(TC_MEMORY_CONFIG == TC_MEMORY_HYBRID)
+		/*
+		 * The PDP cannot address system memory, so there is no
+		 * DMA limitation.
+		 */
+		.dma_mask = DMA_BIT_MASK(64),
+#else
+		.dma_mask = DMA_BIT_MASK(32),
+#endif
+	};
+
+	pdp_device_info.res = pdp_resources_odin;
+	pdp_device_info.num_res = ARRAY_SIZE(pdp_resources_odin);
+
+	tc->pdp_dev = platform_device_register_full(&pdp_device_info);
+	if (IS_ERR(tc->pdp_dev)) {
+		err = PTR_ERR(tc->pdp_dev);
+		dev_err(&tc->pdev->dev,
+			"Failed to register PDP device (%d)\n", err);
+		tc->pdp_dev = NULL;
+		goto err_out;
+	}
+
+err_out:
+	return err;
+}
+
+int odin_register_ext_device(struct tc_device *tc)
+{
+#if defined(SUPPORT_RGX)
+	int err = 0;
+	struct resource odin_rogue_resources[] = {
+		DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev,
+							ODN_DUT_SOCIF_BAR),
+				     ODN_DUT_SOCIF_SIZE, "rogue-regs"),
+	};
+	struct tc_rogue_platform_data pdata = {
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+		.ion_device = tc->ion_device,
+		.ion_heap_id = ION_HEAP_TC_ROGUE,
+#endif
+		.tc_memory_base = tc->tc_mem.base,
+		.pdp_heap_memory_base = tc->pdp_heap_mem_base,
+		.pdp_heap_memory_size = tc->pdp_heap_mem_size,
+		.rogue_heap_memory_base = tc->ext_heap_mem_base,
+		.rogue_heap_memory_size = tc->ext_heap_mem_size,
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+		.secure_heap_memory_base = tc->secure_heap_mem_base,
+		.secure_heap_memory_size = tc->secure_heap_mem_size,
+#endif
+	};
+	struct platform_device_info odin_rogue_dev_info = {
+		.parent = &tc->pdev->dev,
+		.name = TC_DEVICE_NAME_ROGUE,
+		.id = -2,
+		.res = odin_rogue_resources,
+		.num_res = ARRAY_SIZE(odin_rogue_resources),
+		.data = &pdata,
+		.size_data = sizeof(pdata),
+#if (TC_MEMORY_CONFIG == TC_MEMORY_LOCAL)
+		/*
+		 * The FPGA cannot address system memory, so there is no DMA
+		 * limitation.
+		 */
+		.dma_mask = DMA_BIT_MASK(64),
+#else
+		.dma_mask = DMA_BIT_MASK(32),
+#endif
+	};
+
+	tc->ext_dev
+		= platform_device_register_full(&odin_rogue_dev_info);
+
+	if (IS_ERR(tc->ext_dev)) {
+		err = PTR_ERR(tc->ext_dev);
+		dev_err(&tc->pdev->dev,
+			"Failed to register rogue device (%d)\n", err);
+		tc->ext_dev = NULL;
+	}
+	return err;
+#else /* defined(SUPPORT_RGX) */
+	return 0;
+#endif /* defined(SUPPORT_RGX) */
+}
+
+void odin_enable_interrupt_register(struct tc_device *tc,
+				    int interrupt_id)
+{
+	u32 val;
+	u32 flag;
+
+	switch (interrupt_id) {
+	case TC_INTERRUPT_PDP:
+		dev_info(&tc->pdev->dev,
+			"Enabling Odin PDP interrupts\n");
+		break;
+	case TC_INTERRUPT_EXT:
+		dev_info(&tc->pdev->dev,
+			"Enabling Odin DUT interrupts\n");
+		break;
+	default:
+		dev_err(&tc->pdev->dev,
+			"Error - illegal interrupt id\n");
+		return;
+	}
+
+	val = ioread32(tc->tcf.registers +
+		       ODN_CORE_INTERRUPT_ENABLE);
+	flag = odin_interrupt_id_to_flag(interrupt_id);
+	val |= flag;
+	iowrite32(val, tc->tcf.registers +
+		  ODN_CORE_INTERRUPT_ENABLE);
+}
+
+void odin_disable_interrupt_register(struct tc_device *tc,
+				     int interrupt_id)
+{
+	u32 val;
+
+	switch (interrupt_id) {
+	case TC_INTERRUPT_PDP:
+		dev_info(&tc->pdev->dev,
+			"Disabling Odin PDP interrupts\n");
+		break;
+	case TC_INTERRUPT_EXT:
+		dev_info(&tc->pdev->dev,
+			"Disabling Odin DUT interrupts\n");
+		break;
+	default:
+		dev_err(&tc->pdev->dev,
+			"Error - illegal interrupt id\n");
+		return;
+	}
+	val = ioread32(tc->tcf.registers +
+		       ODN_CORE_INTERRUPT_ENABLE);
+	val &= ~(odin_interrupt_id_to_flag(interrupt_id));
+	iowrite32(val, tc->tcf.registers +
+		  ODN_CORE_INTERRUPT_ENABLE);
+}
+
+irqreturn_t odin_irq_handler(int irq, void *data)
+{
+	u32 interrupt_status;
+	u32 interrupt_clear = 0;
+	unsigned long flags;
+	irqreturn_t ret = IRQ_NONE;
+	struct tc_device *tc = (struct tc_device *)data;
+
+	spin_lock_irqsave(&tc->interrupt_handler_lock, flags);
+
+#if defined(TC_FAKE_INTERRUPTS)
+	/* If we're faking interrupts pretend we got both ext and PDP ints */
+	interrupt_status = ODN_INTERRUPT_STATUS_DUT
+		| ODN_INTERRUPT_STATUS_PDP1;
+#else
+	interrupt_status = ioread32(tc->tcf.registers +
+				    ODN_CORE_INTERRUPT_STATUS);
+#endif
+
+	if (interrupt_status & ODN_INTERRUPT_STATUS_DUT) {
+		struct tc_interrupt_handler *ext_int =
+			&tc->interrupt_handlers[TC_INTERRUPT_EXT];
+
+		if (ext_int->enabled && ext_int->handler_function) {
+			ext_int->handler_function(ext_int->handler_data);
+			interrupt_clear |= ODN_INTERRUPT_CLEAR_DUT;
+		}
+		ret = IRQ_HANDLED;
+	}
+	if (interrupt_status & ODN_INTERRUPT_STATUS_PDP1) {
+		struct tc_interrupt_handler *pdp_int =
+			&tc->interrupt_handlers[TC_INTERRUPT_PDP];
+
+		if (pdp_int->enabled && pdp_int->handler_function) {
+			pdp_int->handler_function(pdp_int->handler_data);
+			interrupt_clear |= ODN_INTERRUPT_CLEAR_PDP1;
+		}
+		ret = IRQ_HANDLED;
+	}
+
+	if (interrupt_clear)
+		iowrite32(interrupt_clear,
+			  tc->tcf.registers + ODN_CORE_INTERRUPT_CLR);
+
+	spin_unlock_irqrestore(&tc->interrupt_handler_lock, flags);
+
+	return ret;
+}
+
+int odin_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll)
+{
+	*tmp = 0;
+	*pll = 0;
+	return 0;
+}
+
+int odin_sys_strings(struct tc_device *tc,
+		     char *str_fpga_rev, size_t size_fpga_rev,
+		     char *str_tcf_core_rev, size_t size_tcf_core_rev,
+		     char *str_tcf_core_target_build_id,
+		     size_t size_tcf_core_target_build_id,
+		     char *str_pci_ver, size_t size_pci_ver,
+		     char *str_macro_ver, size_t size_macro_ver)
+{
+	u32 val;
+	char temp_str[12];
+
+	/* Read the Odin major and minor revision ID register Rx-xx */
+	val = ioread32(tc->tcf.registers + ODN_CORE_REVISION);
+
+	snprintf(str_tcf_core_rev,
+		 size_tcf_core_rev,
+		 "%d.%d",
+		 HEX2DEC((val & ODN_REVISION_MAJOR_MASK)
+			 >> ODN_REVISION_MAJOR_SHIFT),
+		 HEX2DEC((val & ODN_REVISION_MINOR_MASK)
+			 >> ODN_REVISION_MINOR_SHIFT));
+
+	dev_info(&tc->pdev->dev, "Odin core revision %s\n",
+		 str_tcf_core_rev);
+
+	/* Read the Odin register containing the Perforce changelist
+	 * value that the FPGA build was generated from
+	 */
+	val = ioread32(tc->tcf.registers + ODN_CORE_CHANGE_SET);
+
+	snprintf(str_tcf_core_target_build_id,
+		 size_tcf_core_target_build_id,
+		 "%d",
+		 (val & ODN_CHANGE_SET_SET_MASK)
+		 >> ODN_CHANGE_SET_SET_SHIFT);
+
+	/* Read the Odin User_ID register containing the User ID for
+	 * identification of a modified build
+	 */
+	val = ioread32(tc->tcf.registers + ODN_CORE_USER_ID);
+
+	snprintf(temp_str,
+		 sizeof(temp_str),
+		 "%d",
+		 HEX2DEC((val & ODN_USER_ID_ID_MASK)
+			 >> ODN_USER_ID_ID_SHIFT));
+
+	/* Read the Odin User_Build register containing the User build
+	 * number for identification of modified builds
+	 */
+	val = ioread32(tc->tcf.registers + ODN_CORE_USER_BUILD);
+
+	snprintf(temp_str,
+		 sizeof(temp_str),
+		 "%d",
+		 HEX2DEC((val & ODN_USER_BUILD_BUILD_MASK)
+			 >> ODN_USER_BUILD_BUILD_SHIFT));
+
+	return 0;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_odin.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_odin.h
new file mode 100644
index 0000000..45f5801
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tc_odin.h
@@ -0,0 +1,75 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _ODIN_DRV_H
+#define _ODIN_DRV_H
+
+#include "tc_drv_internal.h"
+#include "odin_defs.h"
+
+int odin_init(struct tc_device *tc, struct pci_dev *pdev,
+	      int core_clock, int mem_clock,
+	      int pdp_mem_size, int secure_mem_size,
+	      int mem_latency, int mem_wresp_latency);
+int odin_cleanup(struct tc_device *tc);
+
+int odin_register_pdp_device(struct tc_device *tc);
+int odin_register_ext_device(struct tc_device *tc);
+
+void odin_enable_interrupt_register(struct tc_device *tc,
+				    int interrupt_id);
+void odin_disable_interrupt_register(struct tc_device *tc,
+				     int interrupt_id);
+
+irqreturn_t odin_irq_handler(int irq, void *data);
+
+int odin_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll);
+int odin_sys_strings(struct tc_device *tc,
+		     char *str_fpga_rev, size_t size_fpga_rev,
+		     char *str_tcf_core_rev, size_t size_tcf_core_rev,
+		     char *str_tcf_core_target_build_id,
+		     size_t size_tcf_core_target_build_id,
+		     char *str_pci_ver, size_t size_pci_ver,
+		     char *str_macro_ver, size_t size_macro_ver);
+
+#endif /* _ODIN_DRV_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tcf_clk_ctrl.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tcf_clk_ctrl.h
new file mode 100644
index 0000000..cc7b10f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tcf_clk_ctrl.h
@@ -0,0 +1,1018 @@
+/*************************************************************************/ /*!
+@Title          Test Chip Framework system control register definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Autogenerated C -- do not edit
+                Generated from: tcf_clk_ctrl.def
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_TCF_CLK_CTRL_H_)
+#define _TCF_CLK_CTRL_H_
+
+/*
+ * The following register definitions are valid if register 0x28 has value 0.
+ */
+
+/*
+	Register FPGA_ID_REG
+*/
+#define TCF_CLK_CTRL_FPGA_ID_REG            0x0000
+#define FPGA_ID_REG_CORE_CFG_MASK           0x0000FFFFU
+#define FPGA_ID_REG_CORE_CFG_SHIFT          0
+#define FPGA_ID_REG_CORE_CFG_SIGNED         0
+
+#define FPGA_ID_REG_CORE_ID_MASK            0xFFFF0000U
+#define FPGA_ID_REG_CORE_ID_SHIFT           16
+#define FPGA_ID_REG_CORE_ID_SIGNED          0
+
+/*
+	Register FPGA_REV_REG
+*/
+#define TCF_CLK_CTRL_FPGA_REV_REG           0x0008
+#define FPGA_REV_REG_MAINT_MASK             0x000000FFU
+#define FPGA_REV_REG_MAINT_SHIFT            0
+#define FPGA_REV_REG_MAINT_SIGNED           0
+
+#define FPGA_REV_REG_MINOR_MASK             0x0000FF00U
+#define FPGA_REV_REG_MINOR_SHIFT            8
+#define FPGA_REV_REG_MINOR_SIGNED           0
+
+#define FPGA_REV_REG_MAJOR_MASK             0x00FF0000U
+#define FPGA_REV_REG_MAJOR_SHIFT            16
+#define FPGA_REV_REG_MAJOR_SIGNED           0
+
+#define FPGA_REV_REG_DESIGNER_MASK          0xFF000000U
+#define FPGA_REV_REG_DESIGNER_SHIFT         24
+#define FPGA_REV_REG_DESIGNER_SIGNED        0
+
+/*
+	Register FPGA_DES_REV_1
+*/
+#define TCF_CLK_CTRL_FPGA_DES_REV_1         0x0010
+#define FPGA_DES_REV_1_MASK                 0xFFFFFFFFU
+#define FPGA_DES_REV_1_SHIFT                0
+#define FPGA_DES_REV_1_SIGNED               0
+
+/*
+	Register FPGA_DES_REV_2
+*/
+#define TCF_CLK_CTRL_FPGA_DES_REV_2         0x0018
+#define FPGA_DES_REV_2_MASK                 0xFFFFFFFFU
+#define FPGA_DES_REV_2_SHIFT                0
+#define FPGA_DES_REV_2_SIGNED               0
+
+/*
+	Register TCF_CORE_ID_REG
+*/
+#define TCF_CLK_CTRL_TCF_CORE_ID_REG        0x0020
+#define TCF_CORE_ID_REG_CORE_CFG_MASK       0x0000FFFFU
+#define TCF_CORE_ID_REG_CORE_CFG_SHIFT      0
+#define TCF_CORE_ID_REG_CORE_CFG_SIGNED     0
+
+#define TCF_CORE_ID_REG_CORE_ID_MASK        0xFFFF0000U
+#define TCF_CORE_ID_REG_CORE_ID_SHIFT       16
+#define TCF_CORE_ID_REG_CORE_ID_SIGNED      0
+
+/*
+	Register TCF_CORE_REV_REG
+*/
+#define TCF_CLK_CTRL_TCF_CORE_REV_REG       0x0028
+#define TCF_CORE_REV_REG_MAINT_MASK         0x000000FFU
+#define TCF_CORE_REV_REG_MAINT_SHIFT        0
+#define TCF_CORE_REV_REG_MAINT_SIGNED       0
+
+#define TCF_CORE_REV_REG_MINOR_MASK         0x0000FF00U
+#define TCF_CORE_REV_REG_MINOR_SHIFT        8
+#define TCF_CORE_REV_REG_MINOR_SIGNED       0
+
+#define TCF_CORE_REV_REG_MAJOR_MASK         0x00FF0000U
+#define TCF_CORE_REV_REG_MAJOR_SHIFT        16
+#define TCF_CORE_REV_REG_MAJOR_SIGNED       0
+
+#define TCF_CORE_REV_REG_DESIGNER_MASK      0xFF000000U
+#define TCF_CORE_REV_REG_DESIGNER_SHIFT     24
+#define TCF_CORE_REV_REG_DESIGNER_SIGNED    0
+
+/*
+	Register TCF_CORE_DES_REV_1
+*/
+#define TCF_CLK_CTRL_TCF_CORE_DES_REV_1     0x0030
+#define TCF_CORE_DES_REV_1_MASK             0xFFFFFFFFU
+#define TCF_CORE_DES_REV_1_SHIFT            0
+#define TCF_CORE_DES_REV_1_SIGNED           0
+
+/*
+	Register TCF_CORE_DES_REV_2
+*/
+#define TCF_CLK_CTRL_TCF_CORE_DES_REV_2     0x0038
+#define TCF_CORE_DES_REV_2_MASK             0xFFFFFFFFU
+#define TCF_CORE_DES_REV_2_SHIFT            0
+#define TCF_CORE_DES_REV_2_SIGNED           0
+
+
+/*
+ * The following register definitions are valid if register 0x28 has value 1.
+ */
+
+/*
+	Register ID
+*/
+#define TCF_CLK_CTRL_ID                     0x0000
+#define VARIANT_MASK                        0x0000FFFFU
+#define VARIANT_SHIFT                       0
+#define VARIANT_SIGNED                      0
+
+#define ID_MASK                             0xFFFF0000U
+#define ID_SHIFT                            16
+#define ID_SIGNED                           0
+
+/*
+	Register REL
+*/
+#define TCF_CLK_CTRL_REL                    0x0008
+#define MINOR_MASK                          0x0000FFFFU
+#define MINOR_SHIFT                         0
+#define MINOR_SIGNED                        0
+
+#define MAJOR_MASK                          0xFFFF0000U
+#define MAJOR_SHIFT                         16
+#define MAJOR_SIGNED                        0
+
+/*
+	Register CHANGE_SET
+*/
+#define TCF_CLK_CTRL_CHANGE_SET             0x0010
+#define SET_MASK                            0xFFFFFFFFU
+#define SET_SHIFT                           0
+#define SET_SIGNED                          0
+
+/*
+	Register USER_ID
+*/
+#define TCF_CLK_CTRL_USER_ID                0x0018
+#define USER_ID_MASK                        0x0000000FU
+#define USER_ID_SHIFT                       0
+#define USER_ID_SIGNED                      0
+
+/*
+	Register USER_BUILD
+*/
+#define TCF_CLK_CTRL_USER_BUILD             0x0020
+#define BUILD_MASK                          0xFFFFFFFFU
+#define BUILD_SHIFT                         0
+#define BUILD_SIGNED                        0
+
+/*
+	Register SW_IF_VERSION
+*/
+#define TCF_CLK_CTRL_SW_IF_VERSION          0x0028
+#define VERSION_MASK                        0x0000FFFFU
+#define VERSION_SHIFT                       0
+#define VERSION_SIGNED                      0
+
+/*
+ * The following register definitions are valid for all Apollo builds,
+ * even if some of the registers are not available for certain cores.
+ */
+
+/*
+	Register SCB_GENERAL_CONTROL
+*/
+#define TCF_CLK_CTRL_SCB_GENERAL_CONTROL    0x0040
+#define SCB_GC_TRANS_HALT_MASK              0x00000200U
+#define SCB_GC_TRANS_HALT_SHIFT             9
+#define SCB_GC_TRANS_HALT_SIGNED            0
+
+#define SCB_GC_CKD_REGS_MASK                0x00000100U
+#define SCB_GC_CKD_REGS_SHIFT               8
+#define SCB_GC_CKD_REGS_SIGNED              0
+
+#define SCB_GC_CKD_SLAVE_MASK               0x00000080U
+#define SCB_GC_CKD_SLAVE_SHIFT              7
+#define SCB_GC_CKD_SLAVE_SIGNED             0
+
+#define SCB_GC_CKD_MASTER_MASK              0x00000040U
+#define SCB_GC_CKD_MASTER_SHIFT             6
+#define SCB_GC_CKD_MASTER_SIGNED            0
+
+#define SCB_GC_CKD_XDATA_MASK               0x00000020U
+#define SCB_GC_CKD_XDATA_SHIFT              5
+#define SCB_GC_CKD_XDATA_SIGNED             0
+
+#define SCB_GC_SFR_REG_MASK                 0x00000010U
+#define SCB_GC_SFR_REG_SHIFT                4
+#define SCB_GC_SFR_REG_SIGNED               0
+
+#define SCB_GC_SFR_SLAVE_MASK               0x00000008U
+#define SCB_GC_SFR_SLAVE_SHIFT              3
+#define SCB_GC_SFR_SLAVE_SIGNED             0
+
+#define SCB_GC_SFR_MASTER_MASK              0x00000004U
+#define SCB_GC_SFR_MASTER_SHIFT             2
+#define SCB_GC_SFR_MASTER_SIGNED            0
+
+#define SCB_GC_SFR_DET_DATA_MASK            0x00000002U
+#define SCB_GC_SFR_DET_DATA_SHIFT           1
+#define SCB_GC_SFR_DET_DATA_SIGNED          0
+
+#define SCB_GC_SFR_GEN_DATA_MASK            0x00000001U
+#define SCB_GC_SFR_GEN_DATA_SHIFT           0
+#define SCB_GC_SFR_GEN_DATA_SIGNED          0
+
+/*
+	Register SCB_MASTER_READ_COUNT
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_READ_COUNT  0x0048
+#define MASTER_READ_COUNT_MASK              0x0000FFFFU
+#define MASTER_READ_COUNT_SHIFT             0
+#define MASTER_READ_COUNT_SIGNED            0
+
+/*
+	Register SCB_MASTER_READ_DATA
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_READ_DATA   0x0050
+#define MASTER_READ_DATA_MASK               0x000000FFU
+#define MASTER_READ_DATA_SHIFT              0
+#define MASTER_READ_DATA_SIGNED             0
+
+/*
+	Register SCB_MASTER_ADDRESS
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_ADDRESS     0x0058
+#define SCB_MASTER_ADDRESS_MASK             0x000003FFU
+#define SCB_MASTER_ADDRESS_SHIFT            0
+#define SCB_MASTER_ADDRESS_SIGNED           0
+
+/*
+	Register SCB_MASTER_WRITE_DATA
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_WRITE_DATA  0x0060
+#define MASTER_WRITE_DATA_MASK              0x000000FFU
+#define MASTER_WRITE_DATA_SHIFT             0
+#define MASTER_WRITE_DATA_SIGNED            0
+
+/*
+	Register SCB_MASTER_WRITE_COUNT
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_WRITE_COUNT 0x0068
+#define MASTER_WRITE_COUNT_MASK             0x0000FFFFU
+#define MASTER_WRITE_COUNT_SHIFT            0
+#define MASTER_WRITE_COUNT_SIGNED           0
+
+/*
+	Register SCB_BUS_SELECT
+*/
+#define TCF_CLK_CTRL_SCB_BUS_SELECT         0x0070
+#define BUS_SELECT_MASK                     0x00000003U
+#define BUS_SELECT_SHIFT                    0
+#define BUS_SELECT_SIGNED                   0
+
+/*
+	Register SCB_MASTER_FILL_STATUS
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_FILL_STATUS 0x0078
+#define MASTER_WRITE_FIFO_EMPTY_MASK        0x00000008U
+#define MASTER_WRITE_FIFO_EMPTY_SHIFT       3
+#define MASTER_WRITE_FIFO_EMPTY_SIGNED      0
+
+#define MASTER_WRITE_FIFO_FULL_MASK         0x00000004U
+#define MASTER_WRITE_FIFO_FULL_SHIFT        2
+#define MASTER_WRITE_FIFO_FULL_SIGNED       0
+
+#define MASTER_READ_FIFO_EMPTY_MASK         0x00000002U
+#define MASTER_READ_FIFO_EMPTY_SHIFT        1
+#define MASTER_READ_FIFO_EMPTY_SIGNED       0
+
+#define MASTER_READ_FIFO_FULL_MASK          0x00000001U
+#define MASTER_READ_FIFO_FULL_SHIFT         0
+#define MASTER_READ_FIFO_FULL_SIGNED        0
+
+/*
+	Register CLK_AND_RST_CTRL
+*/
+#define TCF_CLK_CTRL_CLK_AND_RST_CTRL       0x0080
+#define GLB_CLKG_EN_MASK                    0x00020000U
+#define GLB_CLKG_EN_SHIFT                   17
+#define GLB_CLKG_EN_SIGNED                  0
+
+#define CLK_GATE_CNTL_MASK                  0x00010000U
+#define CLK_GATE_CNTL_SHIFT                 16
+#define CLK_GATE_CNTL_SIGNED                0
+
+#define DUT_DCM_RESETN_MASK                 0x00000400U
+#define DUT_DCM_RESETN_SHIFT                10
+#define DUT_DCM_RESETN_SIGNED               0
+
+#define MEM_RESYNC_BYPASS_MASK              0x00000200U
+#define MEM_RESYNC_BYPASS_SHIFT             9
+#define MEM_RESYNC_BYPASS_SIGNED            0
+
+#define SYS_RESYNC_BYPASS_MASK              0x00000100U
+#define SYS_RESYNC_BYPASS_SHIFT             8
+#define SYS_RESYNC_BYPASS_SIGNED            0
+
+#define SCB_RESETN_MASK                     0x00000010U
+#define SCB_RESETN_SHIFT                    4
+#define SCB_RESETN_SIGNED                   0
+
+#define PDP2_RESETN_MASK                    0x00000008U
+#define PDP2_RESETN_SHIFT                   3
+#define PDP2_RESETN_SIGNED                  0
+
+#define PDP1_RESETN_MASK                    0x00000004U
+#define PDP1_RESETN_SHIFT                   2
+#define PDP1_RESETN_SIGNED                  0
+
+#define DDR_RESETN_MASK                     0x00000002U
+#define DDR_RESETN_SHIFT                    1
+#define DDR_RESETN_SIGNED                   0
+
+#define DUT_RESETN_MASK                     0x00000001U
+#define DUT_RESETN_SHIFT                    0
+#define DUT_RESETN_SIGNED                   0
+
+/*
+	Register TEST_REG_OUT
+*/
+#define TCF_CLK_CTRL_TEST_REG_OUT           0x0088
+#define TEST_REG_OUT_MASK                   0xFFFFFFFFU
+#define TEST_REG_OUT_SHIFT                  0
+#define TEST_REG_OUT_SIGNED                 0
+
+/*
+	Register TEST_REG_IN
+*/
+#define TCF_CLK_CTRL_TEST_REG_IN            0x0090
+#define TEST_REG_IN_MASK                    0xFFFFFFFFU
+#define TEST_REG_IN_SHIFT                   0
+#define TEST_REG_IN_SIGNED                  0
+
+/*
+	Register TEST_CTRL
+*/
+#define TCF_CLK_CTRL_TEST_CTRL              0x0098
+#define PCI_TEST_OFFSET_MASK                0xF8000000U
+#define PCI_TEST_OFFSET_SHIFT               27
+#define PCI_TEST_OFFSET_SIGNED              0
+
+#define PDP1_HOST_MEM_SELECT_MASK           0x00000200U
+#define PDP1_HOST_MEM_SELECT_SHIFT          9
+#define PDP1_HOST_MEM_SELECT_SIGNED         0
+
+#define HOST_PHY_MODE_MASK                  0x00000100U
+#define HOST_PHY_MODE_SHIFT                 8
+#define HOST_PHY_MODE_SIGNED                0
+
+#define HOST_ONLY_MODE_MASK                 0x00000080U
+#define HOST_ONLY_MODE_SHIFT                7
+#define HOST_ONLY_MODE_SIGNED               0
+
+#define PCI_TEST_MODE_MASK                  0x00000040U
+#define PCI_TEST_MODE_SHIFT                 6
+#define PCI_TEST_MODE_SIGNED                0
+
+#define TURN_OFF_DDR_MASK                   0x00000020U
+#define TURN_OFF_DDR_SHIFT                  5
+#define TURN_OFF_DDR_SIGNED                 0
+
+#define SYS_RD_CLK_INV_MASK                 0x00000010U
+#define SYS_RD_CLK_INV_SHIFT                4
+#define SYS_RD_CLK_INV_SIGNED               0
+
+#define MEM_REQ_CLK_INV_MASK                0x00000008U
+#define MEM_REQ_CLK_INV_SHIFT               3
+#define MEM_REQ_CLK_INV_SIGNED              0
+
+#define BURST_SPLIT_MASK                    0x00000004U
+#define BURST_SPLIT_SHIFT                   2
+#define BURST_SPLIT_SIGNED                  0
+
+#define CLK_INVERSION_MASK                  0x00000002U
+#define CLK_INVERSION_SHIFT                 1
+#define CLK_INVERSION_SIGNED                0
+
+#define ADDRESS_FORCE_MASK                  0x00000001U
+#define ADDRESS_FORCE_SHIFT                 0
+#define ADDRESS_FORCE_SIGNED                0
+
+/*
+	Register CLEAR_HOST_MEM_SIG
+*/
+#define TCF_CLK_CTRL_CLEAR_HOST_MEM_SIG     0x00A0
+#define SIGNATURE_TAG_ID_MASK               0x00000F00U
+#define SIGNATURE_TAG_ID_SHIFT              8
+#define SIGNATURE_TAG_ID_SIGNED             0
+
+#define CLEAR_HOST_MEM_SIGNATURE_MASK       0x00000001U
+#define CLEAR_HOST_MEM_SIGNATURE_SHIFT      0
+#define CLEAR_HOST_MEM_SIGNATURE_SIGNED     0
+
+/*
+	Register HOST_MEM_SIGNATURE
+*/
+#define TCF_CLK_CTRL_HOST_MEM_SIGNATURE     0x00A8
+#define HOST_MEM_SIGNATURE_MASK             0xFFFFFFFFU
+#define HOST_MEM_SIGNATURE_SHIFT            0
+#define HOST_MEM_SIGNATURE_SIGNED           0
+
+/*
+	Register INTERRUPT_STATUS
+*/
+#define TCF_CLK_CTRL_INTERRUPT_STATUS       0x00C8
+#define INTERRUPT_MASTER_STATUS_MASK        0x80000000U
+#define INTERRUPT_MASTER_STATUS_SHIFT       31
+#define INTERRUPT_MASTER_STATUS_SIGNED      0
+
+#define OTHER_INTS_MASK                     0x7FFE0000U
+#define OTHER_INTS_SHIFT                    17
+#define OTHER_INTS_SIGNED                   0
+
+#define HOST_MST_NORESPONSE_MASK            0x00010000U
+#define HOST_MST_NORESPONSE_SHIFT           16
+#define HOST_MST_NORESPONSE_SIGNED          0
+
+#define PDP2_INT_MASK                       0x00008000U
+#define PDP2_INT_SHIFT                      15
+#define PDP2_INT_SIGNED                     0
+
+#define PDP1_INT_MASK                       0x00004000U
+#define PDP1_INT_SHIFT                      14
+#define PDP1_INT_SIGNED                     0
+
+#define EXT_INT_MASK                        0x00002000U
+#define EXT_INT_SHIFT                       13
+#define EXT_INT_SIGNED                      0
+
+#define SCB_MST_HLT_BIT_MASK                0x00001000U
+#define SCB_MST_HLT_BIT_SHIFT               12
+#define SCB_MST_HLT_BIT_SIGNED              0
+
+#define SCB_SLV_EVENT_MASK                  0x00000800U
+#define SCB_SLV_EVENT_SHIFT                 11
+#define SCB_SLV_EVENT_SIGNED                0
+
+#define SCB_TDONE_RX_MASK                   0x00000400U
+#define SCB_TDONE_RX_SHIFT                  10
+#define SCB_TDONE_RX_SIGNED                 0
+
+#define SCB_SLV_WT_RD_DAT_MASK              0x00000200U
+#define SCB_SLV_WT_RD_DAT_SHIFT             9
+#define SCB_SLV_WT_RD_DAT_SIGNED            0
+
+#define SCB_SLV_WT_PRV_RD_MASK              0x00000100U
+#define SCB_SLV_WT_PRV_RD_SHIFT             8
+#define SCB_SLV_WT_PRV_RD_SIGNED            0
+
+#define SCB_SLV_WT_WR_DAT_MASK              0x00000080U
+#define SCB_SLV_WT_WR_DAT_SHIFT             7
+#define SCB_SLV_WT_WR_DAT_SIGNED            0
+
+#define SCB_MST_WT_RD_DAT_MASK              0x00000040U
+#define SCB_MST_WT_RD_DAT_SHIFT             6
+#define SCB_MST_WT_RD_DAT_SIGNED            0
+
+#define SCB_ADD_ACK_ERR_MASK                0x00000020U
+#define SCB_ADD_ACK_ERR_SHIFT               5
+#define SCB_ADD_ACK_ERR_SIGNED              0
+
+#define SCB_WR_ACK_ERR_MASK                 0x00000010U
+#define SCB_WR_ACK_ERR_SHIFT                4
+#define SCB_WR_ACK_ERR_SIGNED               0
+
+#define SCB_SDAT_LO_TIM_MASK                0x00000008U
+#define SCB_SDAT_LO_TIM_SHIFT               3
+#define SCB_SDAT_LO_TIM_SIGNED              0
+
+#define SCB_SCLK_LO_TIM_MASK                0x00000004U
+#define SCB_SCLK_LO_TIM_SHIFT               2
+#define SCB_SCLK_LO_TIM_SIGNED              0
+
+#define SCB_UNEX_START_BIT_MASK             0x00000002U
+#define SCB_UNEX_START_BIT_SHIFT            1
+#define SCB_UNEX_START_BIT_SIGNED           0
+
+#define SCB_BUS_INACTIVE_MASK               0x00000001U
+#define SCB_BUS_INACTIVE_SHIFT              0
+#define SCB_BUS_INACTIVE_SIGNED             0
+
+/*
+	Register INTERRUPT_OP_CFG
+*/
+#define TCF_CLK_CTRL_INTERRUPT_OP_CFG       0x00D0
+#define PULSE_NLEVEL_MASK                   0x80000000U
+#define PULSE_NLEVEL_SHIFT                  31
+#define PULSE_NLEVEL_SIGNED                 0
+
+#define INT_SENSE_MASK                      0x40000000U
+#define INT_SENSE_SHIFT                     30
+#define INT_SENSE_SIGNED                    0
+
+#define INTERRUPT_DEST_MASK                 0x0000000FU
+#define INTERRUPT_DEST_SHIFT                0
+#define INTERRUPT_DEST_SIGNED               0
+
+/*
+	Register INTERRUPT_ENABLE
+*/
+#define TCF_CLK_CTRL_INTERRUPT_ENABLE       0x00D8
+#define INTERRUPT_MASTER_ENABLE_MASK        0x80000000U
+#define INTERRUPT_MASTER_ENABLE_SHIFT       31
+#define INTERRUPT_MASTER_ENABLE_SIGNED      0
+
+#define INTERRUPT_ENABLE_MASK               0x7FFFFFFFU
+#define INTERRUPT_ENABLE_SHIFT              0
+#define INTERRUPT_ENABLE_SIGNED             0
+
+/*
+	Register INTERRUPT_CLEAR
+*/
+#define TCF_CLK_CTRL_INTERRUPT_CLEAR        0x00E0
+#define INTERRUPT_MASTER_CLEAR_MASK         0x80000000U
+#define INTERRUPT_MASTER_CLEAR_SHIFT        31
+#define INTERRUPT_MASTER_CLEAR_SIGNED       0
+
+#define INTERRUPT_CLEAR_MASK                0x7FFFFFFFU
+#define INTERRUPT_CLEAR_SHIFT               0
+#define INTERRUPT_CLEAR_SIGNED              0
+
+/*
+	Register YCC_RGB_CTRL
+*/
+#define TCF_CLK_CTRL_YCC_RGB_CTRL           0x00E8
+#define RGB_CTRL1_MASK                      0x000001FFU
+#define RGB_CTRL1_SHIFT                     0
+#define RGB_CTRL1_SIGNED                    0
+
+#define RGB_CTRL2_MASK                      0x01FF0000U
+#define RGB_CTRL2_SHIFT                     16
+#define RGB_CTRL2_SIGNED                    0
+
+/*
+	Register EXP_BRD_CTRL
+*/
+#define TCF_CLK_CTRL_EXP_BRD_CTRL           0x00F8
+#define PDP1_DATA_EN_MASK                   0x00000003U
+#define PDP1_DATA_EN_SHIFT                  0
+#define PDP1_DATA_EN_SIGNED                 0
+
+#define PDP2_DATA_EN_MASK                   0x00000030U
+#define PDP2_DATA_EN_SHIFT                  4
+#define PDP2_DATA_EN_SIGNED                 0
+
+#define EXP_BRD_OUTPUT_MASK                 0xFFFFFF00U
+#define EXP_BRD_OUTPUT_SHIFT                8
+#define EXP_BRD_OUTPUT_SIGNED               0
+
+/*
+	Register HOSTIF_CONTROL
+*/
+#define TCF_CLK_CTRL_HOSTIF_CONTROL         0x0100
+#define HOSTIF_CTRL_MASK                    0x000000FFU
+#define HOSTIF_CTRL_SHIFT                   0
+#define HOSTIF_CTRL_SIGNED                  0
+
+/*
+	Register DUT_CONTROL_1
+*/
+#define TCF_CLK_CTRL_DUT_CONTROL_1          0x0108
+#define DUT_CTRL_1_MASK                     0xFFFFFFFFU
+#define DUT_CTRL_1_SHIFT                    0
+#define DUT_CTRL_1_SIGNED                   0
+
+/* TC ES2 additional needs those: */
+#define DUT_CTRL_TEST_MODE_SHIFT            0
+#define DUT_CTRL_TEST_MODE_MASK             0x3
+
+#define DUT_CTRL_VCC_0V9EN                  (1<<12)
+#define DUT_CTRL_VCC_1V8EN                  (1<<13)
+#define DUT_CTRL_VCC_IO_INH                 (1<<14)
+#define DUT_CTRL_VCC_CORE_INH               (1<<15)
+
+/*
+	Register DUT_STATUS_1
+*/
+#define TCF_CLK_CTRL_DUT_STATUS_1           0x0110
+#define DUT_STATUS_1_MASK                   0xFFFFFFFFU
+#define DUT_STATUS_1_SHIFT                  0
+#define DUT_STATUS_1_SIGNED                 0
+
+/*
+	Register DUT_CTRL_NOT_STAT_1
+*/
+#define TCF_CLK_CTRL_DUT_CTRL_NOT_STAT_1    0x0118
+#define DUT_STAT_NOT_CTRL_1_MASK            0xFFFFFFFFU
+#define DUT_STAT_NOT_CTRL_1_SHIFT           0
+#define DUT_STAT_NOT_CTRL_1_SIGNED          0
+
+/*
+	Register DUT_CONTROL_2
+*/
+#define TCF_CLK_CTRL_DUT_CONTROL_2          0x0120
+#define DUT_CTRL_2_MASK                     0xFFFFFFFFU
+#define DUT_CTRL_2_SHIFT                    0
+#define DUT_CTRL_2_SIGNED                   0
+
+/*
+	Register DUT_STATUS_2
+*/
+#define TCF_CLK_CTRL_DUT_STATUS_2           0x0128
+#define DUT_STATUS_2_MASK                   0xFFFFFFFFU
+#define DUT_STATUS_2_SHIFT                  0
+#define DUT_STATUS_2_SIGNED                 0
+
+/*
+	Register DUT_CTRL_NOT_STAT_2
+*/
+#define TCF_CLK_CTRL_DUT_CTRL_NOT_STAT_2    0x0130
+#define DUT_CTRL_NOT_STAT_2_MASK            0xFFFFFFFFU
+#define DUT_CTRL_NOT_STAT_2_SHIFT           0
+#define DUT_CTRL_NOT_STAT_2_SIGNED          0
+
+/*
+	Register BUS_CAP_BASE_ADDR
+*/
+#define TCF_CLK_CTRL_BUS_CAP_BASE_ADDR      0x0138
+#define BUS_CAP_BASE_ADDR_MASK              0xFFFFFFFFU
+#define BUS_CAP_BASE_ADDR_SHIFT             0
+#define BUS_CAP_BASE_ADDR_SIGNED            0
+
+/*
+	Register BUS_CAP_ENABLE
+*/
+#define TCF_CLK_CTRL_BUS_CAP_ENABLE         0x0140
+#define BUS_CAP_ENABLE_MASK                 0x00000001U
+#define BUS_CAP_ENABLE_SHIFT                0
+#define BUS_CAP_ENABLE_SIGNED               0
+
+/*
+	Register BUS_CAP_COUNT
+*/
+#define TCF_CLK_CTRL_BUS_CAP_COUNT          0x0148
+#define BUS_CAP_COUNT_MASK                  0xFFFFFFFFU
+#define BUS_CAP_COUNT_SHIFT                 0
+#define BUS_CAP_COUNT_SIGNED                0
+
+/*
+	Register DCM_LOCK_STATUS
+*/
+#define TCF_CLK_CTRL_DCM_LOCK_STATUS        0x0150
+#define DCM_LOCK_STATUS_MASK                0x00000007U
+#define DCM_LOCK_STATUS_SHIFT               0
+#define DCM_LOCK_STATUS_SIGNED              0
+
+/*
+	Register AUX_DUT_RESETNS
+*/
+#define TCF_CLK_CTRL_AUX_DUT_RESETNS        0x0158
+#define AUX_DUT_RESETNS_MASK                0x0000000FU
+#define AUX_DUT_RESETNS_SHIFT               0
+#define AUX_DUT_RESETNS_SIGNED              0
+
+/*
+	Register TCF_SPI_MST_ADDR_RDNWR
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR 0x0160
+#define TCF_SPI_MST_ADDR_MASK               0x0003FFFFU
+#define TCF_SPI_MST_ADDR_SHIFT              0
+#define TCF_SPI_MST_ADDR_SIGNED             0
+
+#define TCF_SPI_MST_RDNWR_MASK              0x00040000U
+#define TCF_SPI_MST_RDNWR_SHIFT             18
+#define TCF_SPI_MST_RDNWR_SIGNED            0
+
+#define TCF_SPI_MST_SLAVE_ID_MASK           0x00080000U
+#define TCF_SPI_MST_SLAVE_ID_SHIFT          19
+#define TCF_SPI_MST_SLAVE_ID_SIGNED         0
+
+#define TCF_SPI_MST_MASTER_ID_MASK          0x00300000U
+#define TCF_SPI_MST_MASTER_ID_SHIFT         20
+#define TCF_SPI_MST_MASTER_ID_SIGNED        0
+
+/*
+	Register TCF_SPI_MST_WDATA
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_WDATA      0x0168
+#define TCF_SPI_MST_WDATA_MASK              0xFFFFFFFFU
+#define TCF_SPI_MST_WDATA_SHIFT             0
+#define TCF_SPI_MST_WDATA_SIGNED            0
+
+/*
+	Register TCF_SPI_MST_RDATA
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_RDATA      0x0170
+#define TCF_SPI_MST_RDATA_MASK              0xFFFFFFFFU
+#define TCF_SPI_MST_RDATA_SHIFT             0
+#define TCF_SPI_MST_RDATA_SIGNED            0
+
+/*
+	Register TCF_SPI_MST_STATUS
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_STATUS     0x0178
+#define TCF_SPI_MST_STATUS_MASK             0x0000000FU
+#define TCF_SPI_MST_STATUS_SHIFT            0
+#define TCF_SPI_MST_STATUS_SIGNED           0
+
+/*
+	Register TCF_SPI_MST_GO
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_GO         0x0180
+#define TCF_SPI_MST_GO_MASK                 0x00000001U
+#define TCF_SPI_MST_GO_SHIFT                0
+#define TCF_SPI_MST_GO_SIGNED               0
+
+/*
+	Register EXT_SIG_CTRL
+*/
+#define TCF_CLK_CTRL_EXT_SIG_CTRL           0x0188
+#define EXT_SYS_REQ_SIG_START_MASK          0x00000001U
+#define EXT_SYS_REQ_SIG_START_SHIFT         0
+#define EXT_SYS_REQ_SIG_START_SIGNED        0
+
+#define EXT_SYS_RD_SIG_START_MASK           0x00000002U
+#define EXT_SYS_RD_SIG_START_SHIFT          1
+#define EXT_SYS_RD_SIG_START_SIGNED         0
+
+#define EXT_MEM_REQ_SIG_START_MASK          0x00000004U
+#define EXT_MEM_REQ_SIG_START_SHIFT         2
+#define EXT_MEM_REQ_SIG_START_SIGNED        0
+
+#define EXT_MEM_RD_SIG_START_MASK           0x00000008U
+#define EXT_MEM_RD_SIG_START_SHIFT          3
+#define EXT_MEM_RD_SIG_START_SIGNED         0
+
+/*
+	Register EXT_SYS_REQ_SIG
+*/
+#define TCF_CLK_CTRL_EXT_SYS_REQ_SIG        0x0190
+#define EXT_SYS_REQ_SIG_MASK                0xFFFFFFFFU
+#define EXT_SYS_REQ_SIG_SHIFT               0
+#define EXT_SYS_REQ_SIG_SIGNED              0
+
+/*
+	Register EXT_SYS_RD_SIG
+*/
+#define TCF_CLK_CTRL_EXT_SYS_RD_SIG         0x0198
+#define EXT_SYS_RD_SIG_MASK                 0xFFFFFFFFU
+#define EXT_SYS_RD_SIG_SHIFT                0
+#define EXT_SYS_RD_SIG_SIGNED               0
+
+/*
+	Register EXT_MEM_REQ_SIG
+*/
+#define TCF_CLK_CTRL_EXT_MEM_REQ_SIG        0x01A0
+#define EXT_MEM_REQ_SIG_MASK                0xFFFFFFFFU
+#define EXT_MEM_REQ_SIG_SHIFT               0
+#define EXT_MEM_REQ_SIG_SIGNED              0
+
+/*
+	Register EXT_MEM_RD_SIG
+*/
+#define TCF_CLK_CTRL_EXT_MEM_RD_SIG         0x01A8
+#define EXT_MEM_RD_SIG_MASK                 0xFFFFFFFFU
+#define EXT_MEM_RD_SIG_SHIFT                0
+#define EXT_MEM_RD_SIG_SIGNED               0
+
+/*
+	Register EXT_SYS_REQ_WR_CNT
+*/
+#define TCF_CLK_CTRL_EXT_SYS_REQ_WR_CNT     0x01B0
+#define EXT_SYS_REQ_WR_CNT_MASK             0xFFFFFFFFU
+#define EXT_SYS_REQ_WR_CNT_SHIFT            0
+#define EXT_SYS_REQ_WR_CNT_SIGNED           0
+
+/*
+	Register EXT_SYS_REQ_RD_CNT
+*/
+#define TCF_CLK_CTRL_EXT_SYS_REQ_RD_CNT     0x01B8
+#define EXT_SYS_REQ_RD_CNT_MASK             0xFFFFFFFFU
+#define EXT_SYS_REQ_RD_CNT_SHIFT            0
+#define EXT_SYS_REQ_RD_CNT_SIGNED           0
+
+/*
+	Register EXT_SYS_RD_CNT
+*/
+#define TCF_CLK_CTRL_EXT_SYS_RD_CNT         0x01C0
+#define EXT_SYS_RD_CNT_MASK                 0xFFFFFFFFU
+#define EXT_SYS_RD_CNT_SHIFT                0
+#define EXT_SYS_RD_CNT_SIGNED               0
+
+/*
+	Register EXT_MEM_REQ_WR_CNT
+*/
+#define TCF_CLK_CTRL_EXT_MEM_REQ_WR_CNT     0x01C8
+#define EXT_MEM_REQ_WR_CNT_MASK             0xFFFFFFFFU
+#define EXT_MEM_REQ_WR_CNT_SHIFT            0
+#define EXT_MEM_REQ_WR_CNT_SIGNED           0
+
+/*
+	Register EXT_MEM_REQ_RD_CNT
+*/
+#define TCF_CLK_CTRL_EXT_MEM_REQ_RD_CNT     0x01D0
+#define EXT_MEM_REQ_RD_CNT_MASK             0xFFFFFFFFU
+#define EXT_MEM_REQ_RD_CNT_SHIFT            0
+#define EXT_MEM_REQ_RD_CNT_SIGNED           0
+
+/*
+	Register EXT_MEM_RD_CNT
+*/
+#define TCF_CLK_CTRL_EXT_MEM_RD_CNT         0x01D8
+#define EXT_MEM_RD_CNT_MASK                 0xFFFFFFFFU
+#define EXT_MEM_RD_CNT_SHIFT                0
+#define EXT_MEM_RD_CNT_SIGNED               0
+
+/*
+	Register TCF_CORE_TARGET_BUILD_CFG
+*/
+#define TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG 0x01E0
+#define TCF_CORE_TARGET_BUILD_ID_MASK       0x000000FFU
+#define TCF_CORE_TARGET_BUILD_ID_SHIFT      0
+#define TCF_CORE_TARGET_BUILD_ID_SIGNED     0
+
+/*
+	Register MEM_THROUGH_SYS
+*/
+#define TCF_CLK_CTRL_MEM_THROUGH_SYS        0x01E8
+#define MEM_THROUGH_SYS_MASK                0x00000001U
+#define MEM_THROUGH_SYS_SHIFT               0
+#define MEM_THROUGH_SYS_SIGNED              0
+
+/*
+	Register HOST_PHY_OFFSET
+*/
+#define TCF_CLK_CTRL_HOST_PHY_OFFSET        0x01F0
+#define HOST_PHY_OFFSET_MASK                0xFFFFFFFFU
+#define HOST_PHY_OFFSET_SHIFT               0
+#define HOST_PHY_OFFSET_SIGNED              0
+
+/*
+	Register DEBUG_REG_SEL
+*/
+#define TCF_CLK_CTRL_DEBUG_REG_SEL          0x01F8
+#define DEBUG_REG_SELECT_MASK               0xFFFFFFFFU
+#define DEBUG_REG_SELECT_SHIFT              0
+#define DEBUG_REG_SELECT_SIGNED             0
+
+/*
+	Register DEBUG_REG
+*/
+#define TCF_CLK_CTRL_DEBUG_REG              0x0200
+#define DEBUG_REG_VALUE_MASK                0xFFFFFFFFU
+#define DEBUG_REG_VALUE_SHIFT               0
+#define DEBUG_REG_VALUE_SIGNED              0
+
+/*
+	Register JTAG_CTRL
+*/
+#define TCF_CLK_CTRL_JTAG_CTRL              0x0208
+#define JTAG_TRST_MASK                      0x00000001U
+#define JTAG_TRST_SHIFT                     0
+#define JTAG_TRST_SIGNED                    0
+
+#define JTAG_TMS_MASK                       0x00000002U
+#define JTAG_TMS_SHIFT                      1
+#define JTAG_TMS_SIGNED                     0
+
+#define JTAG_TCK_MASK                       0x00000004U
+#define JTAG_TCK_SHIFT                      2
+#define JTAG_TCK_SIGNED                     0
+
+#define JTAG_TDO_MASK                       0x00000008U
+#define JTAG_TDO_SHIFT                      3
+#define JTAG_TDO_SIGNED                     0
+
+#define JTAG_TDI_MASK                       0x00000010U
+#define JTAG_TDI_SHIFT                      4
+#define JTAG_TDI_SIGNED                     0
+
+#define JTAG_DASH_N_REG_MASK                0x40000000U
+#define JTAG_DASH_N_REG_SHIFT               30
+#define JTAG_DASH_N_REG_SIGNED              0
+
+#define JTAG_DISABLE_MASK                   0x80000000U
+#define JTAG_DISABLE_SHIFT                  31
+#define JTAG_DISABLE_SIGNED                 0
+
+/*
+	Register SAI_DEBUG_RDNWR
+*/
+#define TCF_CLK_CTRL_SAI_DEBUG_RDNWR        0x0300
+#define SAI_DEBUG_REG_ADDR_MASK             0x000001FFU
+#define SAI_DEBUG_REG_ADDR_SHIFT            0
+#define SAI_DEBUG_REG_ADDR_SIGNED           0
+
+#define SAI_DEBUG_REG_RDNWR_MASK            0x00000200U
+#define SAI_DEBUG_REG_RDNWR_SHIFT           9
+#define SAI_DEBUG_REG_RDNWR_SIGNED          0
+
+/*
+	Register SAI_DEBUG_WDATA
+*/
+#define TCF_CLK_CTRL_SAI_DEBUG_WDATA        0x0308
+#define SAI_DEBUG_REG_WDATA_MASK            0xFFFFFFFFU
+#define SAI_DEBUG_REG_WDATA_SHIFT           0
+#define SAI_DEBUG_REG_WDATA_SIGNED          0
+
+/*
+	Register SAI_DEBUG_RDATA
+*/
+#define TCF_CLK_CTRL_SAI_DEBUG_RDATA        0x0310
+#define SAI_DEBUG_REG_RDATA_MASK            0xFFFFFFFFU
+#define SAI_DEBUG_REG_RDATA_SHIFT           0
+#define SAI_DEBUG_REG_RDATA_SIGNED          0
+
+/*
+	Register SAI_DEBUG_GO
+*/
+#define TCF_CLK_CTRL_SAI_DEBUG_GO           0x0318
+#define SAI_DEBUG_REG_GO_MASK               0x00000001U
+#define SAI_DEBUG_REG_GO_SHIFT              0
+#define SAI_DEBUG_REG_GO_SIGNED             0
+
+/*
+	Register AUX_DUT_RESETS
+*/
+#define TCF_CLK_CTRL_AUX_DUT_RESETS         0x0320
+#define AUX_DUT_RESETS_MASK                 0x0000000FU
+#define AUX_DUT_RESETS_SHIFT                0
+#define AUX_DUT_RESETS_SIGNED               0
+
+/*
+	Register DUT_CLK_CTRL
+*/
+#define TCF_CLK_CTRL_DUT_CLK_CTRL           0x0328
+#define MEM_REQ_PHSE_MASK                   0x0000FFFFU
+#define MEM_REQ_PHSE_SHIFT                  0
+#define MEM_REQ_PHSE_SIGNED                 0
+
+/*
+	Register DUT_CLK_STATUS
+*/
+#define TCF_CLK_CTRL_DUT_CLK_STATUS         0x0330
+#define MEM_REQ_PHSE_SET_MASK               0x00000003U
+#define MEM_REQ_PHSE_SET_SHIFT              0
+#define MEM_REQ_PHSE_SET_SIGNED             0
+
+/*
+	Register DUT_CLK_INFO
+*/
+#define TCF_CLK_CTRL_DUT_CLK_INFO           0x0340
+#define CORE_MASK                           0x0000FFFFU
+#define CORE_SHIFT                          0
+#define CORE_SIGNED                         0
+
+#define MEM_MASK                            0xFFFF0000U
+#define MEM_SHIFT                           16
+#define MEM_SIGNED                          0
+
+/*
+	Register DUT_CLK_PHSE
+*/
+#define TCF_CLK_CTRL_DUT_CLK_PHSE           0x0348
+#define MEM_REQ_MASK                        0x0000FFFFU
+#define MEM_REQ_SHIFT                       0
+#define MEM_REQ_SIGNED                      0
+
+#define MEM_RD_MASK                         0xFFFF0000U
+#define MEM_RD_SHIFT                        16
+#define MEM_RD_SIGNED                       0
+
+#endif /* !defined(_TCF_CLK_CTRL_H_) */
+
+/*****************************************************************************
+ End of file (tcf_clk_ctrl.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tcf_pll.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tcf_pll.h
new file mode 100644
index 0000000..71eaf92
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tcf_pll.h
@@ -0,0 +1,311 @@
+/*************************************************************************/ /*!
+@Title          Test Chip Framework PDP register definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Autogenerated C -- do not edit
+                Generated from tcf_pll.def
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_TCF_PLL_H_)
+#define _TCF_PLL_H_
+
+/*
+	Register PLL_DDR2_CLK0
+*/
+#define TCF_PLL_PLL_DDR2_CLK0               0x0000
+#define DDR2_PLL_CLK0_PHS_MASK              0x00300000U
+#define DDR2_PLL_CLK0_PHS_SHIFT             20
+#define DDR2_PLL_CLK0_PHS_SIGNED            0
+
+#define DDR2_PLL_CLK0_MS_MASK               0x00030000U
+#define DDR2_PLL_CLK0_MS_SHIFT              16
+#define DDR2_PLL_CLK0_MS_SIGNED             0
+
+#define DDR2_PLL_CLK0_FREQ_MASK             0x000001FFU
+#define DDR2_PLL_CLK0_FREQ_SHIFT            0
+#define DDR2_PLL_CLK0_FREQ_SIGNED           0
+
+/*
+	Register PLL_DDR2_CLK1TO5
+*/
+#define TCF_PLL_PLL_DDR2_CLK1TO5            0x0008
+#define DDR2_PLL_CLK1TO5_PHS_MASK           0x3FF00000U
+#define DDR2_PLL_CLK1TO5_PHS_SHIFT          20
+#define DDR2_PLL_CLK1TO5_PHS_SIGNED         0
+
+#define DDR2_PLL_CLK1TO5_MS_MASK            0x000FFC00U
+#define DDR2_PLL_CLK1TO5_MS_SHIFT           10
+#define DDR2_PLL_CLK1TO5_MS_SIGNED          0
+
+#define DDR2_PLL_CLK1TO5_FREQ_MASK          0x000003FFU
+#define DDR2_PLL_CLK1TO5_FREQ_SHIFT         0
+#define DDR2_PLL_CLK1TO5_FREQ_SIGNED        0
+
+/*
+	Register PLL_DDR2_DRP_GO
+*/
+#define TCF_PLL_PLL_DDR2_DRP_GO             0x0010
+#define PLL_DDR2_DRP_GO_MASK                0x00000001U
+#define PLL_DDR2_DRP_GO_SHIFT               0
+#define PLL_DDR2_DRP_GO_SIGNED              0
+
+/*
+	Register PLL_PDP_CLK0
+*/
+#define TCF_PLL_PLL_PDP_CLK0                0x0018
+#define PDP_PLL_CLK0_PHS_MASK               0x00300000U
+#define PDP_PLL_CLK0_PHS_SHIFT              20
+#define PDP_PLL_CLK0_PHS_SIGNED             0
+
+#define PDP_PLL_CLK0_MS_MASK                0x00030000U
+#define PDP_PLL_CLK0_MS_SHIFT               16
+#define PDP_PLL_CLK0_MS_SIGNED              0
+
+#define PDP_PLL_CLK0_FREQ_MASK              0x000001FFU
+#define PDP_PLL_CLK0_FREQ_SHIFT             0
+#define PDP_PLL_CLK0_FREQ_SIGNED            0
+
+/*
+	Register PLL_PDP_CLK1TO5
+*/
+#define TCF_PLL_PLL_PDP_CLK1TO5             0x0020
+#define PDP_PLL_CLK1TO5_PHS_MASK            0x3FF00000U
+#define PDP_PLL_CLK1TO5_PHS_SHIFT           20
+#define PDP_PLL_CLK1TO5_PHS_SIGNED          0
+
+#define PDP_PLL_CLK1TO5_MS_MASK             0x000FFC00U
+#define PDP_PLL_CLK1TO5_MS_SHIFT            10
+#define PDP_PLL_CLK1TO5_MS_SIGNED           0
+
+#define PDP_PLL_CLK1TO5_FREQ_MASK           0x000003FFU
+#define PDP_PLL_CLK1TO5_FREQ_SHIFT          0
+#define PDP_PLL_CLK1TO5_FREQ_SIGNED         0
+
+/*
+	Register PLL_PDP_DRP_GO
+*/
+#define TCF_PLL_PLL_PDP_DRP_GO              0x0028
+#define PLL_PDP_DRP_GO_MASK                 0x00000001U
+#define PLL_PDP_DRP_GO_SHIFT                0
+#define PLL_PDP_DRP_GO_SIGNED               0
+
+/*
+	Register PLL_PDP2_CLK0
+*/
+#define TCF_PLL_PLL_PDP2_CLK0               0x0030
+#define PDP2_PLL_CLK0_PHS_MASK              0x00300000U
+#define PDP2_PLL_CLK0_PHS_SHIFT             20
+#define PDP2_PLL_CLK0_PHS_SIGNED            0
+
+#define PDP2_PLL_CLK0_MS_MASK               0x00030000U
+#define PDP2_PLL_CLK0_MS_SHIFT              16
+#define PDP2_PLL_CLK0_MS_SIGNED             0
+
+#define PDP2_PLL_CLK0_FREQ_MASK             0x000001FFU
+#define PDP2_PLL_CLK0_FREQ_SHIFT            0
+#define PDP2_PLL_CLK0_FREQ_SIGNED           0
+
+/*
+	Register PLL_PDP2_CLK1TO5
+*/
+#define TCF_PLL_PLL_PDP2_CLK1TO5            0x0038
+#define PDP2_PLL_CLK1TO5_PHS_MASK           0x3FF00000U
+#define PDP2_PLL_CLK1TO5_PHS_SHIFT          20
+#define PDP2_PLL_CLK1TO5_PHS_SIGNED         0
+
+#define PDP2_PLL_CLK1TO5_MS_MASK            0x000FFC00U
+#define PDP2_PLL_CLK1TO5_MS_SHIFT           10
+#define PDP2_PLL_CLK1TO5_MS_SIGNED          0
+
+#define PDP2_PLL_CLK1TO5_FREQ_MASK          0x000003FFU
+#define PDP2_PLL_CLK1TO5_FREQ_SHIFT         0
+#define PDP2_PLL_CLK1TO5_FREQ_SIGNED        0
+
+/*
+	Register PLL_PDP2_DRP_GO
+*/
+#define TCF_PLL_PLL_PDP2_DRP_GO             0x0040
+#define PLL_PDP2_DRP_GO_MASK                0x00000001U
+#define PLL_PDP2_DRP_GO_SHIFT               0
+#define PLL_PDP2_DRP_GO_SIGNED              0
+
+/*
+	Register PLL_CORE_CLK0
+*/
+#define TCF_PLL_PLL_CORE_CLK0               0x0048
+#define CORE_PLL_CLK0_PHS_MASK              0x00300000U
+#define CORE_PLL_CLK0_PHS_SHIFT             20
+#define CORE_PLL_CLK0_PHS_SIGNED            0
+
+#define CORE_PLL_CLK0_MS_MASK               0x00030000U
+#define CORE_PLL_CLK0_MS_SHIFT              16
+#define CORE_PLL_CLK0_MS_SIGNED             0
+
+#define CORE_PLL_CLK0_FREQ_MASK             0x000001FFU
+#define CORE_PLL_CLK0_FREQ_SHIFT            0
+#define CORE_PLL_CLK0_FREQ_SIGNED           0
+
+/*
+	Register PLL_CORE_CLK1TO5
+*/
+#define TCF_PLL_PLL_CORE_CLK1TO5            0x0050
+#define CORE_PLL_CLK1TO5_PHS_MASK           0x3FF00000U
+#define CORE_PLL_CLK1TO5_PHS_SHIFT          20
+#define CORE_PLL_CLK1TO5_PHS_SIGNED         0
+
+#define CORE_PLL_CLK1TO5_MS_MASK            0x000FFC00U
+#define CORE_PLL_CLK1TO5_MS_SHIFT           10
+#define CORE_PLL_CLK1TO5_MS_SIGNED          0
+
+#define CORE_PLL_CLK1TO5_FREQ_MASK          0x000003FFU
+#define CORE_PLL_CLK1TO5_FREQ_SHIFT         0
+#define CORE_PLL_CLK1TO5_FREQ_SIGNED        0
+
+/*
+	Register PLL_CORE_DRP_GO
+*/
+#define TCF_PLL_PLL_CORE_DRP_GO             0x0058
+#define PLL_CORE_DRP_GO_MASK                0x00000001U
+#define PLL_CORE_DRP_GO_SHIFT               0
+#define PLL_CORE_DRP_GO_SIGNED              0
+
+/*
+	Register PLL_SYSIF_CLK0
+*/
+#define TCF_PLL_PLL_SYSIF_CLK0              0x0060
+#define SYSIF_PLL_CLK0_PHS_MASK             0x00300000U
+#define SYSIF_PLL_CLK0_PHS_SHIFT            20
+#define SYSIF_PLL_CLK0_PHS_SIGNED           0
+
+#define SYSIF_PLL_CLK0_MS_MASK              0x00030000U
+#define SYSIF_PLL_CLK0_MS_SHIFT             16
+#define SYSIF_PLL_CLK0_MS_SIGNED            0
+
+#define SYSIF_PLL_CLK0_FREQ_MASK            0x000001FFU
+#define SYSIF_PLL_CLK0_FREQ_SHIFT           0
+#define SYSIF_PLL_CLK0_FREQ_SIGNED          0
+
+/*
+	Register PLL_SYSIF_CLK1TO5
+*/
+#define TCF_PLL_PLL_SYSIF_CLK1TO5           0x0068
+#define SYSIF_PLL_CLK1TO5_PHS_MASK          0x3FF00000U
+#define SYSIF_PLL_CLK1TO5_PHS_SHIFT         20
+#define SYSIF_PLL_CLK1TO5_PHS_SIGNED        0
+
+#define SYSIF_PLL_CLK1TO5_MS_MASK           0x000FFC00U
+#define SYSIF_PLL_CLK1TO5_MS_SHIFT          10
+#define SYSIF_PLL_CLK1TO5_MS_SIGNED         0
+
+#define SYSIF_PLL_CLK1TO5_FREQ_MASK         0x000003FFU
+#define SYSIF_PLL_CLK1TO5_FREQ_SHIFT        0
+#define SYSIF_PLL_CLK1TO5_FREQ_SIGNED       0
+
+/*
+	Register PLL_SYS_DRP_GO
+*/
+#define TCF_PLL_PLL_SYS_DRP_GO              0x0070
+#define PLL_SYS_DRP_GO_MASK                 0x00000001U
+#define PLL_SYS_DRP_GO_SHIFT                0
+#define PLL_SYS_DRP_GO_SIGNED               0
+
+/*
+	Register PLL_MEMIF_CLK0
+*/
+#define TCF_PLL_PLL_MEMIF_CLK0              0x0078
+#define MEMIF_PLL_CLK0_PHS_MASK             0x00300000U
+#define MEMIF_PLL_CLK0_PHS_SHIFT            20
+#define MEMIF_PLL_CLK0_PHS_SIGNED           0
+
+#define MEMIF_PLL_CLK0_MS_MASK              0x00030000U
+#define MEMIF_PLL_CLK0_MS_SHIFT             16
+#define MEMIF_PLL_CLK0_MS_SIGNED            0
+
+#define MEMIF_PLL_CLK0_FREQ_MASK            0x000001FFU
+#define MEMIF_PLL_CLK0_FREQ_SHIFT           0
+#define MEMIF_PLL_CLK0_FREQ_SIGNED          0
+
+/*
+	Register PLL_MEMIF_CLK1TO5
+*/
+#define TCF_PLL_PLL_MEMIF_CLK1TO5           0x0080
+#define MEMIF_PLL_CLK1TO5_PHS_MASK          0x3FF00000U
+#define MEMIF_PLL_CLK1TO5_PHS_SHIFT         20
+#define MEMIF_PLL_CLK1TO5_PHS_SIGNED        0
+
+#define MEMIF_PLL_CLK1TO5_MS_MASK           0x000FFC00U
+#define MEMIF_PLL_CLK1TO5_MS_SHIFT          10
+#define MEMIF_PLL_CLK1TO5_MS_SIGNED         0
+
+#define MEMIF_PLL_CLK1TO5_FREQ_MASK         0x000003FFU
+#define MEMIF_PLL_CLK1TO5_FREQ_SHIFT        0
+#define MEMIF_PLL_CLK1TO5_FREQ_SIGNED       0
+
+/*
+	Register PLL_MEM_DRP_GO
+*/
+#define TCF_PLL_PLL_MEM_DRP_GO              0x0088
+#define PLL_MEM_DRP_GO_MASK                 0x00000001U
+#define PLL_MEM_DRP_GO_SHIFT                0
+#define PLL_MEM_DRP_GO_SIGNED               0
+
+/*
+	Register PLL_ALL_DRP_GO
+*/
+#define TCF_PLL_PLL_ALL_DRP_GO              0x0090
+#define PLL_ALL_DRP_GO_MASK                 0x00000001U
+#define PLL_ALL_DRP_GO_SHIFT                0
+#define PLL_ALL_DRP_GO_SIGNED               0
+
+/*
+	Register PLL_DRP_STATUS
+*/
+#define TCF_PLL_PLL_DRP_STATUS              0x0098
+#define PLL_LOCKS_MASK                      0x00003F00U
+#define PLL_LOCKS_SHIFT                     8
+#define PLL_LOCKS_SIGNED                    0
+
+#define PLL_DRP_GOOD_MASK                   0x0000003FU
+#define PLL_DRP_GOOD_SHIFT                  0
+#define PLL_DRP_GOOD_SIGNED                 0
+
+#endif /* !defined(_TCF_PLL_H_) */
+
+/*****************************************************************************
+ End of file (tcf_pll.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tcf_rgbpdp_regs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tcf_rgbpdp_regs.h
new file mode 100644
index 0000000..e87ba61
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/apollo/tcf_rgbpdp_regs.h
@@ -0,0 +1,559 @@
+/*************************************************************************/ /*!
+@Title          Test Chip Framework PDP register definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Autogenerated C -- do not edit
+                Generated from: tcf_rgbpdp_regs.def
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_TCF_RGBPDP_REGS_H_)
+#define _TCF_RGBPDP_REGS_H_
+
+/*
+	Register PVR_TCF_RGBPDP_STR1SURF
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF  0x0000
+#define STR1HEIGHT_MASK                     0x000007FFU
+#define STR1HEIGHT_SHIFT                    0
+#define STR1HEIGHT_SIGNED                   0
+
+#define STR1WIDTH_MASK                      0x003FF800U
+#define STR1WIDTH_SHIFT                     11
+#define STR1WIDTH_SIGNED                    0
+
+#define STR1PIXFMT_MASK                     0x0F000000U
+#define STR1PIXFMT_SHIFT                    24
+#define STR1PIXFMT_SIGNED                   0
+
+/*
+	Register PVR_TCF_RGBPDP_STR1ADDRCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL 0x0004
+#define STR1BASE_MASK                       0x03FFFFFFU
+#define STR1BASE_SHIFT                      0
+#define STR1BASE_SIGNED                     0
+
+#define STR1INTFIELD_MASK                   0x40000000U
+#define STR1INTFIELD_SHIFT                  30
+#define STR1INTFIELD_SIGNED                 0
+
+#define STR1STREN_MASK                      0x80000000U
+#define STR1STREN_SHIFT                     31
+#define STR1STREN_SIGNED                    0
+
+/*
+	Register PVR_PDP_STR1POSN
+*/
+#define TCF_RGBPDP_PVR_PDP_STR1POSN         0x0008
+#define STR1STRIDE_MASK                     0x000003FFU
+#define STR1STRIDE_SHIFT                    0
+#define STR1STRIDE_SIGNED                   0
+
+/*
+	Register PVR_TCF_RGBPDP_MEMCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_MEMCTRL   0x000C
+#define MEMREFRESH_MASK                     0xC0000000U
+#define MEMREFRESH_SHIFT                    30
+#define MEMREFRESH_SIGNED                   0
+
+/*
+	Register PVR_TCF_RGBPDP_STRCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL   0x0010
+#define BURSTLEN_GFX_MASK                   0x000000FFU
+#define BURSTLEN_GFX_SHIFT                  0
+#define BURSTLEN_GFX_SIGNED                 0
+
+#define THRESHOLD_GFX_MASK                  0x0000FF00U
+#define THRESHOLD_GFX_SHIFT                 8
+#define THRESHOLD_GFX_SIGNED                0
+
+/*
+	Register PVR_TCF_RGBPDP_SYNCCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL  0x0014
+#define HSDIS_MASK                          0x00000001U
+#define HSDIS_SHIFT                         0
+#define HSDIS_SIGNED                        0
+
+#define HSPOL_MASK                          0x00000002U
+#define HSPOL_SHIFT                         1
+#define HSPOL_SIGNED                        0
+
+#define VSDIS_MASK                          0x00000004U
+#define VSDIS_SHIFT                         2
+#define VSDIS_SIGNED                        0
+
+#define VSPOL_MASK                          0x00000008U
+#define VSPOL_SHIFT                         3
+#define VSPOL_SIGNED                        0
+
+#define BLNKDIS_MASK                        0x00000010U
+#define BLNKDIS_SHIFT                       4
+#define BLNKDIS_SIGNED                      0
+
+#define BLNKPOL_MASK                        0x00000020U
+#define BLNKPOL_SHIFT                       5
+#define BLNKPOL_SIGNED                      0
+
+#define HS_SLAVE_MASK                       0x00000040U
+#define HS_SLAVE_SHIFT                      6
+#define HS_SLAVE_SIGNED                     0
+
+#define VS_SLAVE_MASK                       0x00000080U
+#define VS_SLAVE_SHIFT                      7
+#define VS_SLAVE_SIGNED                     0
+
+#define INTERLACE_MASK                      0x00000100U
+#define INTERLACE_SHIFT                     8
+#define INTERLACE_SIGNED                    0
+
+#define FIELDPOL_MASK                       0x00000200U
+#define FIELDPOL_SHIFT                      9
+#define FIELDPOL_SIGNED                     0
+
+#define CLKPOL_MASK                         0x00000800U
+#define CLKPOL_SHIFT                        11
+#define CLKPOL_SIGNED                       0
+
+#define CSYNC_EN_MASK                       0x00001000U
+#define CSYNC_EN_SHIFT                      12
+#define CSYNC_EN_SIGNED                     0
+
+#define FIELD_EN_MASK                       0x00002000U
+#define FIELD_EN_SHIFT                      13
+#define FIELD_EN_SIGNED                     0
+
+#define UPDWAIT_MASK                        0x000F0000U
+#define UPDWAIT_SHIFT                       16
+#define UPDWAIT_SIGNED                      0
+
+#define UPDCTRL_MASK                        0x01000000U
+#define UPDCTRL_SHIFT                       24
+#define UPDCTRL_SIGNED                      0
+
+#define UPDINTCTRL_MASK                     0x02000000U
+#define UPDINTCTRL_SHIFT                    25
+#define UPDINTCTRL_SIGNED                   0
+
+#define UPDSYNCTRL_MASK                     0x04000000U
+#define UPDSYNCTRL_SHIFT                    26
+#define UPDSYNCTRL_SIGNED                   0
+
+#define POWERDN_MASK                        0x10000000U
+#define POWERDN_SHIFT                       28
+#define POWERDN_SIGNED                      0
+
+#define DISP_RST_MASK                       0x20000000U
+#define DISP_RST_SHIFT                      29
+#define DISP_RST_SIGNED                     0
+
+#define SYNCACTIVE_MASK                     0x80000000U
+#define SYNCACTIVE_SHIFT                    31
+#define SYNCACTIVE_SIGNED                   0
+
+/*
+	Register PVR_TCF_RGBPDP_BORDCOL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL   0x0018
+#define BORDCOL_MASK                        0x00FFFFFFU
+#define BORDCOL_SHIFT                       0
+#define BORDCOL_SIGNED                      0
+
+/*
+	Register PVR_TCF_RGBPDP_UPDCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL   0x001C
+#define UPDFIELD_MASK                       0x00000001U
+#define UPDFIELD_SHIFT                      0
+#define UPDFIELD_SIGNED                     0
+
+/*
+	Register PVR_TCF_RGBPDP_HSYNC1
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1    0x0020
+#define HT_MASK                             0x00000FFFU
+#define HT_SHIFT                            0
+#define HT_SIGNED                           0
+
+#define HBPS_MASK                           0x0FFF0000U
+#define HBPS_SHIFT                          16
+#define HBPS_SIGNED                         0
+
+/*
+	Register PVR_TCF_RGBPDP_HSYNC2
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2    0x0024
+#define HLBS_MASK                           0x00000FFFU
+#define HLBS_SHIFT                          0
+#define HLBS_SIGNED                         0
+
+#define HAS_MASK                            0x0FFF0000U
+#define HAS_SHIFT                           16
+#define HAS_SIGNED                          0
+
+/*
+	Register PVR_TCF_RGBPDP_HSYNC3
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3    0x0028
+#define HRBS_MASK                           0x00000FFFU
+#define HRBS_SHIFT                          0
+#define HRBS_SIGNED                         0
+
+#define HFPS_MASK                           0x0FFF0000U
+#define HFPS_SHIFT                          16
+#define HFPS_SIGNED                         0
+
+/*
+	Register PVR_TCF_RGBPDP_VSYNC1
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1    0x002C
+#define VT_MASK                             0x00000FFFU
+#define VT_SHIFT                            0
+#define VT_SIGNED                           0
+
+#define VBPS_MASK                           0x0FFF0000U
+#define VBPS_SHIFT                          16
+#define VBPS_SIGNED                         0
+
+/*
+	Register PVR_TCF_RGBPDP_VSYNC2
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2    0x0030
+#define VTBS_MASK                           0x00000FFFU
+#define VTBS_SHIFT                          0
+#define VTBS_SIGNED                         0
+
+#define VAS_MASK                            0x0FFF0000U
+#define VAS_SHIFT                           16
+#define VAS_SIGNED                          0
+
+/*
+	Register PVR_TCF_RGBPDP_VSYNC3
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3    0x0034
+#define VBBS_MASK                           0x00000FFFU
+#define VBBS_SHIFT                          0
+#define VBBS_SIGNED                         0
+
+#define VFPS_MASK                           0x0FFF0000U
+#define VFPS_SHIFT                          16
+#define VFPS_SIGNED                         0
+
+/*
+	Register PVR_TCF_RGBPDP_HDECTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL   0x0038
+#define HDEF_MASK                           0x00000FFFU
+#define HDEF_SHIFT                          0
+#define HDEF_SIGNED                         0
+
+#define HDES_MASK                           0x0FFF0000U
+#define HDES_SHIFT                          16
+#define HDES_SIGNED                         0
+
+/*
+	Register PVR_TCF_RGBPDP_VDECTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL   0x003C
+#define VDEF_MASK                           0x00000FFFU
+#define VDEF_SHIFT                          0
+#define VDEF_SIGNED                         0
+
+#define VDES_MASK                           0x0FFF0000U
+#define VDES_SHIFT                          16
+#define VDES_SIGNED                         0
+
+/*
+	Register PVR_TCF_RGBPDP_VEVENT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT    0x0040
+#define VFETCH_MASK                         0x00000FFFU
+#define VFETCH_SHIFT                        0
+#define VFETCH_SIGNED                       0
+
+#define VEVENT_MASK                         0x0FFF0000U
+#define VEVENT_SHIFT                        16
+#define VEVENT_SIGNED                       0
+
+/*
+	Register PVR_TCF_RGBPDP_OPMASK
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_OPMASK    0x0044
+#define MASKR_MASK                          0x000000FFU
+#define MASKR_SHIFT                         0
+#define MASKR_SIGNED                        0
+
+#define MASKG_MASK                          0x0000FF00U
+#define MASKG_SHIFT                         8
+#define MASKG_SIGNED                        0
+
+#define MASKB_MASK                          0x00FF0000U
+#define MASKB_SHIFT                         16
+#define MASKB_SIGNED                        0
+
+#define BLANKLEVEL_MASK                     0x40000000U
+#define BLANKLEVEL_SHIFT                    30
+#define BLANKLEVEL_SIGNED                   0
+
+#define MASKLEVEL_MASK                      0x80000000U
+#define MASKLEVEL_SHIFT                     31
+#define MASKLEVEL_SIGNED                    0
+
+/*
+	Register PVR_TCF_RGBPDP_INTSTAT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTSTAT   0x0048
+#define INTS_HBLNK0_MASK                    0x00000001U
+#define INTS_HBLNK0_SHIFT                   0
+#define INTS_HBLNK0_SIGNED                  0
+
+#define INTS_HBLNK1_MASK                    0x00000002U
+#define INTS_HBLNK1_SHIFT                   1
+#define INTS_HBLNK1_SIGNED                  0
+
+#define INTS_VBLNK0_MASK                    0x00000004U
+#define INTS_VBLNK0_SHIFT                   2
+#define INTS_VBLNK0_SIGNED                  0
+
+#define INTS_VBLNK1_MASK                    0x00000008U
+#define INTS_VBLNK1_SHIFT                   3
+#define INTS_VBLNK1_SIGNED                  0
+
+#define INTS_STR1URUN_MASK                  0x00000010U
+#define INTS_STR1URUN_SHIFT                 4
+#define INTS_STR1URUN_SIGNED                0
+
+#define INTS_STR1ORUN_MASK                  0x00000020U
+#define INTS_STR1ORUN_SHIFT                 5
+#define INTS_STR1ORUN_SIGNED                0
+
+#define INTS_DISPURUN_MASK                  0x00000040U
+#define INTS_DISPURUN_SHIFT                 6
+#define INTS_DISPURUN_SIGNED                0
+
+/*
+	Register PVR_TCF_RGBPDP_INTENAB
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB   0x004C
+#define INTEN_HBLNK0_MASK                   0x00000001U
+#define INTEN_HBLNK0_SHIFT                  0
+#define INTEN_HBLNK0_SIGNED                 0
+
+#define INTEN_HBLNK1_MASK                   0x00000002U
+#define INTEN_HBLNK1_SHIFT                  1
+#define INTEN_HBLNK1_SIGNED                 0
+
+#define INTEN_VBLNK0_MASK                   0x00000004U
+#define INTEN_VBLNK0_SHIFT                  2
+#define INTEN_VBLNK0_SIGNED                 0
+
+#define INTEN_VBLNK1_MASK                   0x00000008U
+#define INTEN_VBLNK1_SHIFT                  3
+#define INTEN_VBLNK1_SIGNED                 0
+
+#define INTEN_STR1URUN_MASK                 0x00000010U
+#define INTEN_STR1URUN_SHIFT                4
+#define INTEN_STR1URUN_SIGNED               0
+
+#define INTEN_STR1ORUN_MASK                 0x00000020U
+#define INTEN_STR1ORUN_SHIFT                5
+#define INTEN_STR1ORUN_SIGNED               0
+
+#define INTEN_DISPURUN_MASK                 0x00000040U
+#define INTEN_DISPURUN_SHIFT                6
+#define INTEN_DISPURUN_SIGNED               0
+
+/*
+	Register PVR_TCF_RGBPDP_INTCLEAR
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTCLEAR  0x0050
+#define INTCLR_HBLNK0_MASK                  0x00000001U
+#define INTCLR_HBLNK0_SHIFT                 0
+#define INTCLR_HBLNK0_SIGNED                0
+
+#define INTCLR_HBLNK1_MASK                  0x00000002U
+#define INTCLR_HBLNK1_SHIFT                 1
+#define INTCLR_HBLNK1_SIGNED                0
+
+#define INTCLR_VBLNK0_MASK                  0x00000004U
+#define INTCLR_VBLNK0_SHIFT                 2
+#define INTCLR_VBLNK0_SIGNED                0
+
+#define INTCLR_VBLNK1_MASK                  0x00000008U
+#define INTCLR_VBLNK1_SHIFT                 3
+#define INTCLR_VBLNK1_SIGNED                0
+
+#define INTCLR_STR1URUN_MASK                0x00000010U
+#define INTCLR_STR1URUN_SHIFT               4
+#define INTCLR_STR1URUN_SIGNED              0
+
+#define INTCLR_STR1ORUN_MASK                0x00000020U
+#define INTCLR_STR1ORUN_SHIFT               5
+#define INTCLR_STR1ORUN_SIGNED              0
+
+#define INTCLR_DISPURUN_MASK                0x00000040U
+#define INTCLR_DISPURUN_SHIFT               6
+#define INTCLR_DISPURUN_SIGNED              0
+
+/*
+	Register PVR_TCF_RGBPDP_INTCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTCTRL   0x0054
+#define HBLNK_LINENO_MASK                   0x00000FFFU
+#define HBLNK_LINENO_SHIFT                  0
+#define HBLNK_LINENO_SIGNED                 0
+
+#define HBLNK_LINE_MASK                     0x00010000U
+#define HBLNK_LINE_SHIFT                    16
+#define HBLNK_LINE_SIGNED                   0
+
+/*
+	Register PVR_TCF_RGBPDP_SIGNAT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_SIGNAT    0x0058
+#define SIGNATURE_MASK                      0xFFFFFFFFU
+#define SIGNATURE_SHIFT                     0
+#define SIGNATURE_SIGNED                    0
+
+/*
+	Register PVR_TCF_RGBPDP_LINESTAT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_LINESTAT  0x005C
+#define LINENO_MASK                         0x00000FFFU
+#define LINENO_SHIFT                        0
+#define LINENO_SIGNED                       0
+
+/*
+	Register PVR_TCF_RGBPDP_DBGCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGCTRL   0x0060
+#define DBG_ENAB_MASK                       0x00000001U
+#define DBG_ENAB_SHIFT                      0
+#define DBG_ENAB_SIGNED                     0
+
+#define DBG_READ_MASK                       0x00000002U
+#define DBG_READ_SHIFT                      1
+#define DBG_READ_SIGNED                     0
+
+/*
+	Register PVR_TCF_RGBPDP_DBGDATA
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGDATA   0x0064
+#define DBG_DATA_MASK                       0x00FFFFFFU
+#define DBG_DATA_SHIFT                      0
+#define DBG_DATA_SIGNED                     0
+
+/*
+	Register PVR_TCF_RGBPDP_DBGSIDE
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGSIDE   0x0068
+#define DBG_SIDE_MASK                       0x00000007U
+#define DBG_SIDE_SHIFT                      0
+#define DBG_SIDE_SIGNED                     0
+
+#define DBG_VAL_MASK                        0x00000008U
+#define DBG_VAL_SHIFT                       3
+#define DBG_VAL_SIGNED                      0
+
+/*
+	Register PVR_TCF_RGBPDP_REGLD_STAT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_REGLD_STAT 0x0070
+#define REGLD_ADDROUT_MASK                  0x00FFFFFFU
+#define REGLD_ADDROUT_SHIFT                 0
+#define REGLD_ADDROUT_SIGNED                0
+
+#define REGLD_ADDREN_MASK                   0x80000000U
+#define REGLD_ADDREN_SHIFT                  31
+#define REGLD_ADDREN_SIGNED                 0
+
+/*
+	Register PVR_TCF_RGBPDP_REGLD_CTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_REGLD_CTRL 0x0074
+#define REGLD_ADDRIN_MASK                   0x00FFFFFFU
+#define REGLD_ADDRIN_SHIFT                  0
+#define REGLD_ADDRIN_SIGNED                 0
+
+#define REGLD_VAL_MASK                      0x01000000U
+#define REGLD_VAL_SHIFT                     24
+#define REGLD_VAL_SIGNED                    0
+
+#define REGLD_ADDRLEN_MASK                  0xFE000000U
+#define REGLD_ADDRLEN_SHIFT                 25
+#define REGLD_ADDRLEN_SIGNED                0
+
+/*
+	Register PVR_TCF_RGBPDP_CORE_ID
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_CORE_ID   0x0078
+#define CONFIG_ID_MASK                      0x0000FFFFU
+#define CONFIG_ID_SHIFT                     0
+#define CONFIG_ID_SIGNED                    0
+
+#define CORE_ID_MASK                        0x00FF0000U
+#define CORE_ID_SHIFT                       16
+#define CORE_ID_SIGNED                      0
+
+#define GROUP_ID_MASK                       0xFF000000U
+#define GROUP_ID_SHIFT                      24
+#define GROUP_ID_SIGNED                     0
+
+/*
+	Register PVR_TCF_RGBPDP_CORE_REV
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_CORE_REV  0x007C
+#define MAINT_REV_MASK                      0x000000FFU
+#define MAINT_REV_SHIFT                     0
+#define MAINT_REV_SIGNED                    0
+
+#define MINOR_REV_MASK                      0x0000FF00U
+#define MINOR_REV_SHIFT                     8
+#define MINOR_REV_SIGNED                    0
+
+#define MAJOR_REV_MASK                      0x00FF0000U
+#define MAJOR_REV_SHIFT                     16
+#define MAJOR_REV_SIGNED                    0
+
+#endif /* !defined(_TCF_RGBPDP_REGS_H_) */
+
+/*****************************************************************************
+ End of file (tcf_rgbpdp_regs.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/cache_km.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/cache_km.c
new file mode 100644
index 0000000..34142ed
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/cache_km.c
@@ -0,0 +1,3526 @@
+/*************************************************************************/ /*!
+@File           cache_km.c
+@Title          CPU d-cache maintenance operations framework
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements server side code for CPU d-cache maintenance taking
+                into account the idiosyncrasies of the various types of CPU
+                d-cache instruction-set architecture (ISA) maintenance
+                mechanisms.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#if defined(LINUX)
+#include <linux/version.h>
+#include <linux/uaccess.h>
+#include <asm/current.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#endif
+
+#include "pmr.h"
+#include "log2.h"
+#include "device.h"
+#include "pvrsrv.h"
+#include "osfunc.h"
+#include "cache_km.h"
+#include "pvr_debug.h"
+#include "lock_types.h"
+#include "allocmem.h"
+#include "process_stats.h"
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+#include "ri_server.h"
+#endif
+#include "devicemem.h"
+#include "pvrsrv_apphint.h"
+#include "pvrsrv_sync_server.h"
+#include "km_apphint_defs.h"
+
+/* This header must always be included last */
+#if defined(LINUX)
+#include "kernel_compatibility.h"
+#endif
+
+/* Top-level file-local build definitions */
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS) && defined(LINUX)
+#define CACHEOP_DEBUG
+#define CACHEOP_STATS_ITEMS_MAX 			32
+#define INCR_WRAP(x)						((x+1) >= CACHEOP_STATS_ITEMS_MAX ? 0 : (x+1))
+#define DECR_WRAP(x)						((x-1) < 0 ? (CACHEOP_STATS_ITEMS_MAX-1) : (x-1))
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+/* Refer to CacheOpStatsExecLogHeader() for header item names */
+#define CACHEOP_RI_PRINTF_HEADER			"%-8s %-10s %-10s %-5s %-16s %-16s %-10s %-10s %-18s %-18s %-12s"
+#define CACHEOP_RI_PRINTF					"%-8d %-10s %-10s %-5s 0x%-14llx 0x%-14llx 0x%-8llx 0x%-8llx %-18llu %-18llu 0x%-10x\n"
+#else
+#define CACHEOP_PRINTF_HEADER				"%-8s %-10s %-10s %-5s %-10s %-10s %-18s %-18s %-12s"
+#define CACHEOP_PRINTF						"%-8d %-10s %-10s %-5s 0x%-8llx 0x%-8llx %-18llu %-18llu 0x%-10x\n"
+#endif
+#endif
+
+//#define CACHEOP_NO_CACHE_LINE_ALIGNED_ROUNDING		/* Force OS page (not cache line) flush granularity */
+#define CACHEOP_PVR_ASSERT(x)							/* Define as PVR_ASSERT(x), enable for swdev & testing */
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+#define CACHEOP_THREAD_WAIT_TIMEOUT			0ULL		/* Wait indefinitely */
+#else
+#define CACHEOP_THREAD_WAIT_TIMEOUT			500000ULL	/* Wait 500ms between wait unless woken-up on demand */
+#endif
+#define CACHEOP_FENCE_WAIT_TIMEOUT			1000ULL		/* Wait 1ms between wait events unless woken-up */
+#define CACHEOP_FENCE_RETRY_ABORT			1000ULL		/* Fence retries that aborts fence operation */
+#define CACHEOP_SEQ_MIDPOINT (IMG_UINT32)	0x7FFFFFFF	/* Where seqNum(s) are rebase, compared at */
+#define CACHEOP_ABORT_FENCE_ERROR_STRING	"detected stalled client, retrying cacheop fence"
+#define CACHEOP_NO_GFLUSH_ERROR_STRING		"global flush requested on CPU without support"
+#define CACHEOP_DEVMEM_OOR_ERROR_STRING		"cacheop device memory request is out of range"
+#define CACHEOP_MAX_DEBUG_MESSAGE_LEN		160
+
+typedef struct _CACHEOP_WORK_ITEM_
+{
+	PMR *psPMR;
+	IMG_UINT32 ui32GFSeqNum;
+	IMG_UINT32 ui32OpSeqNum;
+	IMG_DEVMEM_SIZE_T uiSize;
+	PVRSRV_CACHE_OP uiCacheOp;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	PVRSRV_TIMELINE iTimeline;
+	SYNC_TIMELINE_OBJ sSWTimelineObj;
+	PVRSRV_DEVICE_NODE *psDevNode;
+#if defined(CACHEOP_DEBUG)
+	IMG_UINT64 ui64EnqueuedTime;
+	IMG_UINT64 ui64DequeuedTime;
+	IMG_UINT64 ui64ExecuteTime;
+	IMG_BOOL bDeferred;
+	IMG_BOOL bKMReq;
+	IMG_BOOL bRBF;
+	IMG_BOOL bUMF;
+	IMG_PID pid;
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+	RGXFWIF_DM eFenceOpType;
+#endif
+#endif
+} CACHEOP_WORK_ITEM;
+
+typedef struct _CACHEOP_STATS_EXEC_ITEM_
+{
+	IMG_PID pid;
+	IMG_UINT32 ui32OpSeqNum;
+	PVRSRV_CACHE_OP uiCacheOp;
+	IMG_DEVMEM_SIZE_T uiOffset;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_UINT64 ui64EnqueuedTime;
+	IMG_UINT64 ui64DequeuedTime;
+	IMG_UINT64 ui64ExecuteTime;
+	IMG_BOOL bIsFence;
+	IMG_BOOL bKMReq;
+	IMG_BOOL bRBF;
+	IMG_BOOL bUMF;
+	IMG_BOOL bDeferred;
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+	IMG_DEV_VIRTADDR sDevVAddr;
+	IMG_DEV_PHYADDR sDevPAddr;
+	RGXFWIF_DM eFenceOpType;
+#endif
+} CACHEOP_STATS_EXEC_ITEM;
+
+typedef enum _CACHEOP_CONFIG_
+{
+	CACHEOP_CONFIG_DEFAULT = 0,
+	/* cache flush mechanism types */
+	CACHEOP_CONFIG_KRBF    = 1,
+	CACHEOP_CONFIG_KGF     = 2,
+	CACHEOP_CONFIG_URBF    = 4,
+	/* sw-emulated deferred flush mechanism */
+	CACHEOP_CONFIG_KDF     = 8,
+	/* pseudo configuration items */
+	CACHEOP_CONFIG_LAST    = 16,
+	CACHEOP_CONFIG_KLOG    = 16,
+	CACHEOP_CONFIG_ALL     = 31
+} CACHEOP_CONFIG;
+
+typedef struct _CACHEOP_WORK_QUEUE_
+{
+/*
+ * Init. state & primary device node framework
+ * is anchored on.
+ */
+	IMG_BOOL bInit;
+/*
+  MMU page size/shift & d-cache line size
+ */
+	size_t uiPageSize;
+	IMG_UINT32 uiLineSize;
+	IMG_UINT32 uiLineShift;
+	IMG_UINT32 uiPageShift;
+	PVRSRV_CACHE_OP_ADDR_TYPE uiCacheOpAddrType;
+/*
+  CacheOp deferred queueing protocol
+  + Implementation geared for performance, atomic counter based
+	- Value Space is 0 -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> n.
+	- Index Space is 0 -> 1 -> 2 -> 3 -> 0 -> 1 -> 2 -> 3 -> 0 -> m.
+		- Index = Value modulo CACHEOP_INDICES_LOG2_SIZE.
+  + Write counter never collides with read counter in index space
+	- Unless at start of day when both are initialised to zero.
+	- This means we sacrifice one entry when the queue is full.
+	- Incremented by producer
+		- Value space tracks total number of CacheOps queued.
+		- Index space identifies CacheOp CCB queue index.
+  + Read counter increments towards write counter in value space
+	- Empty queue occurs when read equals write counter.
+	- Wrap-round logic handled by consumer as/when needed.
+	- Incremented by consumer
+		- Value space tracks total # of CacheOps executed.
+		- Index space identifies CacheOp CCB queue index.
+  + Total queued size adjusted up/down during write/read activity
+	- Counter might overflow but does not compromise framework.
+ */
+	ATOMIC_T hReadCounter;
+	ATOMIC_T hWriteCounter;
+/*
+  CacheOp sequence numbers
+  + hCommonSeqNum:
+	- Common sequence, numbers every CacheOp operation in both UM/KM.
+	- In KM
+		- Every deferred CacheOp (on behalf of UM) gets a unique seqNum.
+		- Last executed deferred CacheOp updates gsCwq.hCompletedSeqNum.
+		- Every GF operation (if supported) also gets a unique seqNum.
+		- Last executed GF operation updates CACHEOP_INFO_GFSEQNUM0.
+		- Under debug, all CacheOp gets a unique seqNum for tracking.
+		- This includes all UM/KM synchronous non-deferred CacheOp(s)
+	- In UM
+		- If the processor architecture supports GF maintenance (in KM)
+		- All UM CacheOp samples CACHEOP_INFO_GFSEQNUM0 via info. page.
+		- CacheOp(s) discarded if another GF occurs before execution.
+		- CacheOp(s) discarding happens in both UM and KM space.
+  + hCompletedSeqNum:
+	- Tracks last executed KM/deferred RBF/Global<timeline> CacheOp(s)
+  + hDeferredSize:
+	- Running total of size of currently deferred CacheOp in queue.
+ */
+	ATOMIC_T hDeferredSize;
+	ATOMIC_T hCommonSeqNum;
+	ATOMIC_T hCompletedSeqNum;
+/*
+  CacheOp information page
+  + psInfoPagePMR:
+	- Single system-wide OS page that is multi-mapped in UM/KM.
+	- Mapped into clients using read-only memory protection.
+	- Mapped into server using read/write memory protection.
+	- Contains information pertaining to cache framework.
+  + pui32InfoPage:
+	- Server linear address pointer to said information page.
+	- Each info-page entry currently of sizeof(IMG_UINT32).
+ */
+	PMR *psInfoPagePMR;
+	IMG_UINT32 *pui32InfoPage;
+/*
+  CacheOp deferred work-item queue
+  + CACHEOP_INDICES_LOG2_SIZE
+	- Sized using GF/RBF ratio
+ */
+#define CACHEOP_INDICES_LOG2_SIZE	(4)
+#define CACHEOP_INDICES_MAX			(1 << CACHEOP_INDICES_LOG2_SIZE)
+#define CACHEOP_INDICES_MASK		(CACHEOP_INDICES_MAX-1)
+	CACHEOP_WORK_ITEM asWorkItems[CACHEOP_INDICES_MAX];
+#if defined(CACHEOP_DEBUG)
+/*
+  CacheOp statistics
+ */
+	void *pvStatsEntry;
+	IMG_HANDLE hStatsExecLock;
+	IMG_UINT32 ui32ServerASync;
+	IMG_UINT32 ui32ServerSyncVA;
+	IMG_UINT32 ui32ServerSync;
+	IMG_UINT32 ui32ServerRBF;
+	IMG_UINT32 ui32ServerGF;
+	IMG_UINT32 ui32ServerDGF;
+	IMG_UINT32 ui32ServerDTL;
+	IMG_UINT32 ui32ClientSync;
+	IMG_UINT32 ui32ClientRBF;
+	IMG_UINT32 ui32KMDiscards;
+	IMG_UINT32 ui32UMDiscards;
+	IMG_UINT32 ui32TotalFenceOps;
+	IMG_UINT32 ui32TotalExecOps;
+	IMG_UINT32 ui32AvgExecTime;
+	IMG_UINT32 ui32AvgFenceTime;
+	IMG_INT32 i32StatsExecWriteIdx;
+	CACHEOP_STATS_EXEC_ITEM asStatsExecuted[CACHEOP_STATS_ITEMS_MAX];
+#endif
+/*
+  CacheOp (re)configuration
+ */
+	void *pvConfigTune;
+	IMG_HANDLE hConfigLock;
+/*
+  CacheOp deferred worker thread
+  + eConfig
+	- Runtime configuration
+  + hWorkerThread
+	- CacheOp thread handler
+  + hThreadWakeUpEvtObj
+	- Event object to drive CacheOp worker thread sleep/wake-ups.
+  + hClientWakeUpEvtObj
+	- Event object to unblock stalled clients waiting on queue.
+ */
+	CACHEOP_CONFIG	eConfig;
+	IMG_UINT32		ui32Config;
+	IMG_HANDLE		hWorkerThread;
+	IMG_HANDLE 		hDeferredLock;
+	IMG_HANDLE 		hGlobalFlushLock;
+	IMG_HANDLE		hThreadWakeUpEvtObj;
+	IMG_HANDLE		hClientWakeUpEvtObj;
+	IMG_UINT32		ui32FenceWaitTimeUs;
+	IMG_UINT32		ui32FenceRetryAbort;
+	IMG_BOOL		bNoGlobalFlushImpl;
+	IMG_BOOL		bSupportsUMFlush;
+} CACHEOP_WORK_QUEUE;
+
+/* Top-level CacheOp framework object */
+static CACHEOP_WORK_QUEUE gsCwq;
+
+#define CacheOpConfigSupports(e) ((gsCwq.eConfig & (e)) ? IMG_TRUE : IMG_FALSE)
+
+static INLINE IMG_UINT32 CacheOpIdxRead(ATOMIC_T *phCounter)
+{
+	IMG_UINT32 ui32Idx = OSAtomicRead(phCounter);
+	return ui32Idx & CACHEOP_INDICES_MASK;
+}
+
+static INLINE IMG_UINT32 CacheOpIdxIncrement(ATOMIC_T *phCounter)
+{
+	IMG_UINT32 ui32Idx = OSAtomicIncrement(phCounter);
+	return ui32Idx & CACHEOP_INDICES_MASK;
+}
+
+static INLINE IMG_UINT32 CacheOpIdxNext(ATOMIC_T *phCounter)
+{
+	IMG_UINT32 ui32Idx = OSAtomicRead(phCounter);
+	return ++ui32Idx & CACHEOP_INDICES_MASK;
+}
+
+static INLINE IMG_UINT32 CacheOpIdxSpan(ATOMIC_T *phLhs, ATOMIC_T *phRhs)
+{
+	return OSAtomicRead(phLhs) - OSAtomicRead(phRhs);
+}
+
+static INLINE IMG_UINT64 DivBy10(IMG_UINT64 uiNum)
+{
+	IMG_UINT64 uiQuot;
+	IMG_UINT64 uiRem;
+
+	uiQuot = (uiNum >> 1) + (uiNum >> 2);
+	uiQuot = uiQuot + (uiQuot >> 4);
+	uiQuot = uiQuot + (uiQuot >> 8);
+	uiQuot = uiQuot + (uiQuot >> 16);
+	uiQuot = uiQuot >> 3;
+	uiRem  = uiNum - (((uiQuot << 2) + uiQuot) << 1);
+
+	return uiQuot + (uiRem > 9);
+}
+
+/* Callback to dump info of cacheop thread in debug_dump */
+static void CacheOpThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf,
+				  				void *pvDumpDebugFile)
+{
+	PVR_DUMPDEBUG_LOG("    Configuration: QSZ: %d, UKT: %d, KDFT: %d, "
+			  "KGFT: %d, LINESIZE: %d, PGSIZE: %d, KDF: %s, "
+			  "URBF: %s, KGF: %s, KRBF: %s",
+			  CACHEOP_INDICES_MAX,
+			  gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD],
+			  gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD],
+			  gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD],
+			  gsCwq.pui32InfoPage[CACHEOP_INFO_LINESIZE],
+			  gsCwq.pui32InfoPage[CACHEOP_INFO_PGSIZE],
+			  gsCwq.eConfig & CACHEOP_CONFIG_KDF  ? "Yes" : "No",
+			  gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No",
+			  gsCwq.eConfig & CACHEOP_CONFIG_KGF  ? "Yes" : "No",
+			  gsCwq.eConfig & CACHEOP_CONFIG_KRBF ? "Yes" : "No"
+			  );
+	PVR_DUMPDEBUG_LOG("    Pending deferred CacheOp entries : %u",
+			  CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter));
+}
+
+#if defined(CACHEOP_DEBUG)
+static INLINE void CacheOpStatsExecLogHeader(IMG_CHAR szBuffer[CACHEOP_MAX_DEBUG_MESSAGE_LEN])
+{
+	OSSNPrintf(szBuffer, CACHEOP_MAX_DEBUG_MESSAGE_LEN,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+				CACHEOP_RI_PRINTF_HEADER,
+#else
+				CACHEOP_PRINTF_HEADER,
+#endif
+				"Pid",
+				"CacheOp",
+				"  Type",
+				"Mode",
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+				"DevVAddr",
+				"DevPAddr",
+#endif
+				"Offset",
+				"Size",
+				"xTime (us)",
+				"qTime (us)",
+				"SeqNum");
+}
+
+static void CacheOpStatsExecLogWrite(CACHEOP_WORK_ITEM *psCacheOpWorkItem)
+{
+	IMG_UINT64 ui64ExecuteTime;
+	IMG_UINT64 ui64EnqueuedTime;
+	IMG_INT32 i32WriteOffset;
+
+	if (!psCacheOpWorkItem->ui32OpSeqNum && !psCacheOpWorkItem->uiCacheOp)
+	{
+		/* This breaks the logic of read-out, so we do not queue items
+		   with zero sequence number and no CacheOp */
+		return;
+	}
+	else if (psCacheOpWorkItem->bKMReq && !CacheOpConfigSupports(CACHEOP_CONFIG_KLOG))
+	{
+		/* KM logs spams the history due to frequency, this remove its completely */
+		return;
+	}
+
+	OSLockAcquire(gsCwq.hStatsExecLock);
+
+	i32WriteOffset = gsCwq.i32StatsExecWriteIdx;
+	gsCwq.asStatsExecuted[i32WriteOffset].pid = psCacheOpWorkItem->pid;
+	gsCwq.i32StatsExecWriteIdx = INCR_WRAP(gsCwq.i32StatsExecWriteIdx);
+	gsCwq.asStatsExecuted[i32WriteOffset].bRBF = psCacheOpWorkItem->bRBF;
+	gsCwq.asStatsExecuted[i32WriteOffset].bUMF = psCacheOpWorkItem->bUMF;
+	gsCwq.asStatsExecuted[i32WriteOffset].uiSize = psCacheOpWorkItem->uiSize;
+	gsCwq.asStatsExecuted[i32WriteOffset].bKMReq = psCacheOpWorkItem->bKMReq;
+	gsCwq.asStatsExecuted[i32WriteOffset].uiOffset	= psCacheOpWorkItem->uiOffset;
+	gsCwq.asStatsExecuted[i32WriteOffset].uiCacheOp = psCacheOpWorkItem->uiCacheOp;
+	gsCwq.asStatsExecuted[i32WriteOffset].bDeferred = psCacheOpWorkItem->bDeferred;
+	gsCwq.asStatsExecuted[i32WriteOffset].ui32OpSeqNum	= psCacheOpWorkItem->ui32OpSeqNum;
+	gsCwq.asStatsExecuted[i32WriteOffset].ui64ExecuteTime = psCacheOpWorkItem->ui64ExecuteTime;
+	gsCwq.asStatsExecuted[i32WriteOffset].ui64EnqueuedTime = psCacheOpWorkItem->ui64EnqueuedTime;
+	gsCwq.asStatsExecuted[i32WriteOffset].ui64DequeuedTime = psCacheOpWorkItem->ui64DequeuedTime;
+	/* During early system initialisation, only non-fence & non-PMR CacheOps are processed */
+	gsCwq.asStatsExecuted[i32WriteOffset].bIsFence = gsCwq.bInit && !psCacheOpWorkItem->psPMR;
+	CACHEOP_PVR_ASSERT(gsCwq.asStatsExecuted[i32WriteOffset].pid);
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+	if (gsCwq.bInit && psCacheOpWorkItem->psPMR)
+	{
+		IMG_CPU_PHYADDR sDevPAddr;
+		PVRSRV_ERROR eError;
+		IMG_BOOL bValid;
+
+		/* Get more detailed information regarding the sub allocations that
+		   PMR has from RI manager for process that requested the CacheOp */
+		eError = RIDumpProcessListKM(psCacheOpWorkItem->psPMR,
+									 gsCwq.asStatsExecuted[i32WriteOffset].pid,
+									 gsCwq.asStatsExecuted[i32WriteOffset].uiOffset,
+									 &gsCwq.asStatsExecuted[i32WriteOffset].sDevVAddr);
+		if (eError != PVRSRV_OK)
+		{
+			goto e0;
+		}
+
+		/* (Re)lock here as some PMR might have not been locked */
+		eError = PMRLockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+		if (eError != PVRSRV_OK)
+		{
+			goto e0;
+		}
+
+		eError = PMR_CpuPhysAddr(psCacheOpWorkItem->psPMR,
+								 gsCwq.uiPageShift,
+								 1,
+								 gsCwq.asStatsExecuted[i32WriteOffset].uiOffset,
+								 &sDevPAddr,
+								 &bValid);
+		if (eError != PVRSRV_OK)
+		{
+			eError = PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+			PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses");
+			goto e0;
+		}
+
+		eError = PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+		PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses");
+
+		gsCwq.asStatsExecuted[i32WriteOffset].sDevPAddr.uiAddr = sDevPAddr.uiAddr;
+	}
+
+	if (gsCwq.asStatsExecuted[i32WriteOffset].bIsFence)
+	{
+		gsCwq.asStatsExecuted[i32WriteOffset].eFenceOpType = psCacheOpWorkItem->eFenceOpType;
+	}
+#endif
+
+	/* Convert timing from nano-seconds to micro-seconds */
+	ui64ExecuteTime = gsCwq.asStatsExecuted[i32WriteOffset].ui64ExecuteTime;
+	ui64EnqueuedTime = gsCwq.asStatsExecuted[i32WriteOffset].ui64EnqueuedTime;
+	ui64ExecuteTime = DivBy10(DivBy10(DivBy10(ui64ExecuteTime)));
+	ui64EnqueuedTime = DivBy10(DivBy10(DivBy10(ui64EnqueuedTime)));
+
+	/* Coalesced (to global) deferred CacheOps do not contribute to statistics,
+	   as both enqueue/execute time is identical for these CacheOps */
+	if (!gsCwq.asStatsExecuted[i32WriteOffset].bIsFence)
+	{
+		/* Calculate the rolling approximate average execution time */
+		IMG_UINT32 ui32Time = ui64EnqueuedTime < ui64ExecuteTime ?
+									ui64ExecuteTime - ui64EnqueuedTime :
+									ui64EnqueuedTime - ui64ExecuteTime;
+		if (gsCwq.ui32TotalExecOps > 2 && ui32Time)
+		{
+			gsCwq.ui32AvgExecTime -= (gsCwq.ui32AvgExecTime / gsCwq.ui32TotalExecOps);
+			gsCwq.ui32AvgExecTime += (ui32Time / gsCwq.ui32TotalExecOps);
+		}
+		else if (ui32Time)
+		{
+			gsCwq.ui32AvgExecTime = (IMG_UINT32)ui32Time;
+		}
+	}
+
+	if (! gsCwq.asStatsExecuted[i32WriteOffset].bKMReq)
+	{
+		/* This operation queues only UM CacheOp in per-PID process statistics database */
+		PVRSRVStatsUpdateCacheOpStats(gsCwq.asStatsExecuted[i32WriteOffset].uiCacheOp,
+						gsCwq.asStatsExecuted[i32WriteOffset].ui32OpSeqNum,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+						gsCwq.asStatsExecuted[i32WriteOffset].sDevVAddr,
+						gsCwq.asStatsExecuted[i32WriteOffset].sDevPAddr,
+						gsCwq.asStatsExecuted[i32WriteOffset].eFenceOpType,
+#endif
+						gsCwq.asStatsExecuted[i32WriteOffset].uiOffset,
+						gsCwq.asStatsExecuted[i32WriteOffset].uiSize,
+						ui64EnqueuedTime < ui64ExecuteTime ?
+							ui64ExecuteTime - ui64EnqueuedTime:
+							ui64EnqueuedTime - ui64ExecuteTime,
+						gsCwq.asStatsExecuted[i32WriteOffset].bRBF,
+						gsCwq.asStatsExecuted[i32WriteOffset].bUMF,
+						gsCwq.asStatsExecuted[i32WriteOffset].bIsFence,
+						psCacheOpWorkItem->pid);
+	}
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+e0:
+#endif
+	OSLockRelease(gsCwq.hStatsExecLock);
+}
+
+static void CacheOpStatsExecLogRead(void *pvFilePtr, void *pvData,
+								OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	IMG_CHAR *pszFlushype;
+	IMG_CHAR *pszCacheOpType;
+	IMG_CHAR *pszFlushSource;
+	IMG_INT32 i32ReadOffset;
+	IMG_INT32 i32WriteOffset;
+	IMG_UINT64 ui64EnqueuedTime;
+	IMG_UINT64 ui64DequeuedTime;
+	IMG_UINT64 ui64ExecuteTime;
+	IMG_CHAR szBuffer[CACHEOP_MAX_DEBUG_MESSAGE_LEN] = {0};
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	OSLockAcquire(gsCwq.hStatsExecLock);
+
+	pfnOSStatsPrintf(pvFilePtr,
+			"Primary CPU d-cache architecture: LSZ: 0x%d, URBF: %s, KGF: %s, KRBF: %s\n",
+			gsCwq.uiLineSize,
+			gsCwq.bSupportsUMFlush ? "Yes" : "No",
+			!gsCwq.bNoGlobalFlushImpl ? "Yes" : "No",
+			"Yes" /* KRBF mechanism always available */
+		);
+
+	pfnOSStatsPrintf(pvFilePtr,
+			"Configuration: QSZ: %d, UKT: %d, KDFT: %d, KGFT: %d, KDF: %s, URBF: %s, KGF: %s, KRBF: %s\n",
+			CACHEOP_INDICES_MAX,
+			gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD],
+			gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD],
+			gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD],
+			gsCwq.eConfig & CACHEOP_CONFIG_KDF  ? "Yes" : "No",
+			gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No",
+			gsCwq.eConfig & CACHEOP_CONFIG_KGF  ? "Yes" : "No",
+			gsCwq.eConfig & CACHEOP_CONFIG_KRBF ? "Yes" : "No"
+		);
+
+	pfnOSStatsPrintf(pvFilePtr,
+			"Summary: OP[F][TL] (tot.avg): %d.%d/%d.%d/%d, [KM][UM][A]SYNC: %d.%d/%d/%d, RBF (um/km): %d/%d, [D]GF (km): %d/%d, DSC (um/km): %d/%d\n",
+			gsCwq.ui32TotalExecOps, gsCwq.ui32AvgExecTime, gsCwq.ui32TotalFenceOps, gsCwq.ui32AvgFenceTime, gsCwq.ui32ServerDTL,
+			gsCwq.ui32ServerSync, gsCwq.ui32ServerSyncVA, gsCwq.ui32ClientSync,	gsCwq.ui32ServerASync,
+			gsCwq.ui32ClientRBF,   gsCwq.ui32ServerRBF,
+			gsCwq.ui32ServerDGF,   gsCwq.ui32ServerGF,
+			gsCwq.ui32UMDiscards,  gsCwq.ui32KMDiscards
+		);
+
+	CacheOpStatsExecLogHeader(szBuffer);
+	pfnOSStatsPrintf(pvFilePtr, "%s\n", szBuffer);
+
+	i32WriteOffset = gsCwq.i32StatsExecWriteIdx;
+	for (i32ReadOffset = DECR_WRAP(i32WriteOffset);
+		 i32ReadOffset != i32WriteOffset;
+		 i32ReadOffset = DECR_WRAP(i32ReadOffset))
+	{
+		if (!gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum &&
+			!gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp)
+		{
+			break;
+		}
+
+		/* Convert from nano-seconds to micro-seconds */
+		ui64ExecuteTime = gsCwq.asStatsExecuted[i32ReadOffset].ui64ExecuteTime;
+		ui64EnqueuedTime = gsCwq.asStatsExecuted[i32ReadOffset].ui64EnqueuedTime;
+		ui64DequeuedTime = gsCwq.asStatsExecuted[i32ReadOffset].ui64DequeuedTime;
+		ui64ExecuteTime = DivBy10(DivBy10(DivBy10(ui64ExecuteTime)));
+		ui64EnqueuedTime = DivBy10(DivBy10(DivBy10(ui64EnqueuedTime)));
+		ui64DequeuedTime = ui64DequeuedTime ? DivBy10(DivBy10(DivBy10(ui64DequeuedTime))) : 0;
+
+		if (gsCwq.asStatsExecuted[i32ReadOffset].bIsFence)
+		{
+			IMG_CHAR *pszMode = "";
+			IMG_CHAR *pszFenceType = "";
+			pszCacheOpType = "Fence";
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+			pszMode = gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp != PVRSRV_CACHE_OP_GLOBAL ? "" : "  GF  ";
+			switch (gsCwq.asStatsExecuted[i32ReadOffset].eFenceOpType)
+			{
+				case RGXFWIF_DM_GP:
+					pszFenceType = " GP/GF";
+					break;
+
+				case RGXFWIF_DM_TDM:
+					pszFenceType = "  TDM ";
+					break;
+
+				case RGXFWIF_DM_TA:
+					pszFenceType = "  TA ";
+					break;
+
+				case RGXFWIF_DM_3D:
+					pszFenceType = "  PDM ";
+					break;
+
+				case RGXFWIF_DM_CDM:
+					pszFenceType = "  CDM ";
+					break;
+
+				default:
+					CACHEOP_PVR_ASSERT(0);
+					break;
+			}
+#else
+			/* The CacheOp fence operation also triggered a global cache flush operation */
+			pszFenceType =
+				gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp != PVRSRV_CACHE_OP_GLOBAL ? "" : "   GF ";
+#endif
+			pfnOSStatsPrintf(pvFilePtr,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+							CACHEOP_RI_PRINTF,
+#else
+							CACHEOP_PRINTF,
+#endif
+							gsCwq.asStatsExecuted[i32ReadOffset].pid,
+							pszCacheOpType,
+							pszFenceType,
+							pszMode,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+							"",
+							"",
+#endif
+							gsCwq.asStatsExecuted[i32ReadOffset].uiOffset,
+							gsCwq.asStatsExecuted[i32ReadOffset].uiSize,
+							ui64EnqueuedTime < ui64ExecuteTime ?
+									ui64ExecuteTime - ui64EnqueuedTime
+										:
+									ui64EnqueuedTime - ui64ExecuteTime,
+							ui64EnqueuedTime < ui64DequeuedTime ?
+									ui64DequeuedTime - ui64EnqueuedTime
+										:
+									!ui64DequeuedTime ? 0 : ui64EnqueuedTime - ui64DequeuedTime,
+							gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum);
+		}
+		else
+		{
+			if (gsCwq.asStatsExecuted[i32ReadOffset].bRBF)
+			{
+				IMG_DEVMEM_SIZE_T ui64NumOfPages;
+
+				ui64NumOfPages = gsCwq.asStatsExecuted[i32ReadOffset].uiSize >> gsCwq.uiPageShift;
+				if (ui64NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC)
+				{
+					pszFlushype = "RBF.Fast";
+				}
+				else
+				{
+					pszFlushype = "RBF.Slow";
+				}
+			}
+			else
+			{
+				pszFlushype = "   GF ";
+			}
+
+			if (gsCwq.asStatsExecuted[i32ReadOffset].bUMF)
+			{
+				pszFlushSource = " UM";
+			}
+			else
+			{
+				/*
+				   - Request originates directly from a KM thread or in KM (KM<), or
+				   - Request originates from a UM thread and is KM deferred (KM+), or
+				   - Request is/was discarded due to an 'else-[when,where]' GFlush
+				     - i.e. GF occurs either (a)sync to current UM/KM thread
+				*/
+				pszFlushSource =
+					gsCwq.asStatsExecuted[i32ReadOffset].bKMReq ? " KM<" :
+					gsCwq.asStatsExecuted[i32ReadOffset].bDeferred && gsCwq.asStatsExecuted[i32ReadOffset].ui64ExecuteTime ? " KM+" :
+					!gsCwq.asStatsExecuted[i32ReadOffset].ui64ExecuteTime ? " KM-" : " KM";
+			}
+
+			switch (gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp)
+			{
+				case PVRSRV_CACHE_OP_NONE:
+					pszCacheOpType = "None";
+					break;
+				case PVRSRV_CACHE_OP_CLEAN:
+					pszCacheOpType = "Clean";
+					break;
+				case PVRSRV_CACHE_OP_INVALIDATE:
+					pszCacheOpType = "Invalidate";
+					break;
+				case PVRSRV_CACHE_OP_FLUSH:
+					pszCacheOpType = "Flush";
+					break;
+				case PVRSRV_CACHE_OP_GLOBAL:
+					pszCacheOpType = "GFlush";
+					break;
+				case PVRSRV_CACHE_OP_TIMELINE:
+					pszCacheOpType = "Timeline";
+					pszFlushype = "      ";
+					break;
+				default:
+					if ((IMG_UINT32)gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp == (IMG_UINT32)(PVRSRV_CACHE_OP_GLOBAL|PVRSRV_CACHE_OP_TIMELINE))
+					{
+						pszCacheOpType = "Timeline";
+					}
+					else
+					{
+						pszCacheOpType = "Unknown";
+						gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum =
+								(IMG_UINT32) gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp;
+					}
+					break;
+			}
+
+			pfnOSStatsPrintf(pvFilePtr,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+							CACHEOP_RI_PRINTF,
+#else
+							CACHEOP_PRINTF,
+#endif
+							gsCwq.asStatsExecuted[i32ReadOffset].pid,
+							pszCacheOpType,
+							pszFlushype,
+							pszFlushSource,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+							gsCwq.asStatsExecuted[i32ReadOffset].sDevVAddr.uiAddr,
+							gsCwq.asStatsExecuted[i32ReadOffset].sDevPAddr.uiAddr,
+#endif
+							gsCwq.asStatsExecuted[i32ReadOffset].uiOffset,
+							gsCwq.asStatsExecuted[i32ReadOffset].uiSize,
+							ui64EnqueuedTime < ui64ExecuteTime ?
+										ui64ExecuteTime - ui64EnqueuedTime
+											:
+										ui64EnqueuedTime - ui64ExecuteTime,
+							ui64EnqueuedTime < ui64DequeuedTime ?
+									ui64DequeuedTime - ui64EnqueuedTime
+										:
+									!ui64DequeuedTime ? 0 : ui64EnqueuedTime - ui64DequeuedTime,
+							gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum);
+		}
+	}
+
+	OSLockRelease(gsCwq.hStatsExecLock);
+}
+#endif /* defined(CACHEOP_DEBUG) */
+
+static INLINE void CacheOpStatsReset(void)
+{
+#if defined(CACHEOP_DEBUG)
+	gsCwq.ui32KMDiscards    = 0;
+	gsCwq.ui32UMDiscards    = 0;
+	gsCwq.ui32TotalExecOps  = 0;
+	gsCwq.ui32TotalFenceOps = 0;
+	gsCwq.ui32AvgExecTime   = 0;
+	gsCwq.ui32AvgFenceTime  = 0;
+	gsCwq.ui32ClientRBF     = 0;
+	gsCwq.ui32ClientSync    = 0;
+	gsCwq.ui32ServerRBF     = 0;
+	gsCwq.ui32ServerASync   = 0;
+	gsCwq.ui32ServerSyncVA  = 0;
+	gsCwq.ui32ServerSync    = 0;
+	gsCwq.ui32ServerGF      = 0;
+	gsCwq.ui32ServerDGF     = 0;
+	gsCwq.ui32ServerDTL     = 0;
+	gsCwq.i32StatsExecWriteIdx = 0;
+	OSCachedMemSet(gsCwq.asStatsExecuted, 0, sizeof(gsCwq.asStatsExecuted));
+#endif
+}
+
+static void CacheOpConfigUpdate(IMG_UINT32 ui32Config)
+{
+	OSLockAcquire(gsCwq.hConfigLock);
+
+	/* Step 0, set the gsCwq.eConfig bits */
+	if (!(ui32Config & (CACHEOP_CONFIG_LAST - 1)))
+	{
+		gsCwq.eConfig = CACHEOP_CONFIG_KRBF | CACHEOP_CONFIG_KDF;
+		if (! gsCwq.bNoGlobalFlushImpl)
+		{
+			gsCwq.eConfig |= CACHEOP_CONFIG_KGF;
+		}
+		if (gsCwq.bSupportsUMFlush)
+		{
+			gsCwq.eConfig |= CACHEOP_CONFIG_URBF;
+		}
+	}
+	else
+	{
+		if (ui32Config & CACHEOP_CONFIG_KRBF)
+		{
+			gsCwq.eConfig |= CACHEOP_CONFIG_KRBF;
+		}
+		else
+		{
+			gsCwq.eConfig &= ~CACHEOP_CONFIG_KRBF;
+		}
+
+		if (ui32Config & CACHEOP_CONFIG_KDF)
+		{
+			gsCwq.eConfig |= CACHEOP_CONFIG_KDF;
+		}
+		else
+		{
+			gsCwq.eConfig &= ~CACHEOP_CONFIG_KDF;
+		}
+
+		if (!gsCwq.bNoGlobalFlushImpl && (ui32Config & CACHEOP_CONFIG_KGF))
+		{
+			gsCwq.eConfig |= CACHEOP_CONFIG_KGF;
+		}
+		else
+		{
+			gsCwq.eConfig &= ~CACHEOP_CONFIG_KGF;
+		}
+
+		if (gsCwq.bSupportsUMFlush && (ui32Config & CACHEOP_CONFIG_URBF))
+		{
+			gsCwq.eConfig |= CACHEOP_CONFIG_URBF;
+		}
+		else
+		{
+			gsCwq.eConfig &= ~CACHEOP_CONFIG_URBF;
+		}
+	}
+
+	if (ui32Config & CACHEOP_CONFIG_KLOG)
+	{
+		/* Suppress logs from KM caller */
+		gsCwq.eConfig |= CACHEOP_CONFIG_KLOG;
+	}
+	else
+	{
+		gsCwq.eConfig &= ~CACHEOP_CONFIG_KLOG;
+	}
+
+	/* Step 1, set gsCwq.ui32Config based on gsCwq.eConfig */
+	ui32Config = 0;
+	if (gsCwq.eConfig & CACHEOP_CONFIG_KRBF)
+	{
+		ui32Config |= CACHEOP_CONFIG_KRBF;
+	}
+	if (gsCwq.eConfig & CACHEOP_CONFIG_KDF)
+	{
+		ui32Config |= CACHEOP_CONFIG_KDF;
+	}
+	if (gsCwq.eConfig & CACHEOP_CONFIG_KGF)
+	{
+		ui32Config |= CACHEOP_CONFIG_KGF;
+	}
+	if (gsCwq.eConfig & CACHEOP_CONFIG_URBF)
+	{
+		ui32Config |= CACHEOP_CONFIG_URBF;
+	}
+	if (gsCwq.eConfig & CACHEOP_CONFIG_KLOG)
+	{
+		ui32Config |= CACHEOP_CONFIG_KLOG;
+	}
+	gsCwq.ui32Config = ui32Config;
+
+	/* Step 2, Bar RBF promotion to GF, unless a GF is implemented */
+	gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD] = (IMG_UINT32)~0;
+	if (! gsCwq.bNoGlobalFlushImpl)
+	{
+		gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD] = (IMG_UINT32)PVR_DIRTY_BYTES_FLUSH_THRESHOLD;
+	}
+
+	/* Step 3, in certain cases where a CacheOp/VA is provided, this threshold determines at what point
+	   the optimisation due to the presence of said VA (i.e. us not having to remap the PMR pages in KM)
+	   is clawed-back because of the overhead of maintaining such large request which might stalls the
+	   user thread; so to hide this latency have these CacheOps executed on deferred CacheOp thread */
+	gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] = (IMG_UINT32)(PVR_DIRTY_BYTES_FLUSH_THRESHOLD >> 2);
+
+	/* Step 4, if no UM support, all requests are done in KM so zero these forcing all client requests
+	   to come down into the KM for maintenance */
+	gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = 0;
+	gsCwq.pui32InfoPage[CACHEOP_INFO_UMRBFONLY] = 0;
+	if (gsCwq.bSupportsUMFlush)
+	{
+		/* If URBF has been selected exclusively OR selected but there is no GF implementation */
+		if ((gsCwq.eConfig & CACHEOP_CONFIG_URBF) &&
+			(gsCwq.bNoGlobalFlushImpl || !((gsCwq.ui32Config & (CACHEOP_CONFIG_LAST-1)) & ~CACHEOP_CONFIG_URBF)))
+		{
+			/* If only URBF has been selected, simulate without GF support OR no GF support means all client
+			   requests should be done in UM. In both cases, set this threshold to the highest value to
+			   prevent any client requests coming down to the server for maintenance. */
+			gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = (IMG_UINT32)~0;
+			gsCwq.pui32InfoPage[CACHEOP_INFO_UMRBFONLY] = 1;
+		}
+		/* This is the default entry for setting the UM info. page entries */
+		else if ((gsCwq.eConfig & CACHEOP_CONFIG_URBF) && !gsCwq.bNoGlobalFlushImpl)
+		{
+			/* Set UM/KM threshold, all request sizes above this goes to server for GF maintenance _only_
+			   because UM flushes already have VA acquired, no cost is incurred in per-page (re)mapping
+			   of the to-be maintained PMR/page(s) as it the case with KM flushing so disallow KDF */
+
+			/* Assume an average UM flush performance, anything above should be promoted to GF.
+			   For x86 UMA/LMA, we avoid KDF because remapping PMR/pages in KM might fail due to exhausted
+			   or fragmented VMALLOC kernel VA space */
+			gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD];
+		}
+	}
+
+	/* Step 5, reset stats. */
+	CacheOpStatsReset();
+
+	OSLockRelease(gsCwq.hConfigLock);
+}
+
+static INLINE void CacheOpConfigRead(void *pvFilePtr,
+									 void *pvData,
+									 OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	PVR_UNREFERENCED_PARAMETER(pvData);
+	pfnOSStatsPrintf(pvFilePtr,
+			"KDF: %s, URBF: %s, KGF: %s, KRBF: %s\n",
+			gsCwq.eConfig & CACHEOP_CONFIG_KDF  ? "Yes" : "No",
+			gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No",
+			gsCwq.eConfig & CACHEOP_CONFIG_KGF  ? "Yes" : "No",
+			gsCwq.eConfig & CACHEOP_CONFIG_KRBF ? "Yes" : "No"
+		);
+}
+
+static INLINE PVRSRV_ERROR CacheOpConfigQuery(const PVRSRV_DEVICE_NODE *psDevNode,
+											const void *psPrivate,
+											IMG_UINT32 *pui32Value)
+{
+	IMG_UINT32 ui32ID = (IMG_UINT32)(uintptr_t) psPrivate;
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+	switch (ui32ID)
+	{
+		case APPHINT_ID_CacheOpConfig:
+			*pui32Value = gsCwq.ui32Config;
+			break;
+
+		case APPHINT_ID_CacheOpGFThresholdSize:
+			*pui32Value = gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD];
+			break;
+
+		case APPHINT_ID_CacheOpUMKMThresholdSize:
+			*pui32Value = gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD];
+			break;
+
+		default:
+			break;
+	}
+
+	return PVRSRV_OK;
+}
+
+static INLINE PVRSRV_ERROR CacheOpConfigSet(const PVRSRV_DEVICE_NODE *psDevNode,
+											const void *psPrivate,
+											IMG_UINT32 ui32Value)
+{
+	IMG_UINT32 ui32ID = (IMG_UINT32)(uintptr_t) psPrivate;
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+	switch (ui32ID)
+	{
+		case APPHINT_ID_CacheOpConfig:
+			CacheOpConfigUpdate(ui32Value & CACHEOP_CONFIG_ALL);
+			break;
+
+		case APPHINT_ID_CacheOpGFThresholdSize:
+		{
+			if (!ui32Value || gsCwq.bNoGlobalFlushImpl)
+			{
+				/* CPU ISA does not support GF, silently ignore request to adjust threshold */
+				PVR_ASSERT(gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD] == (IMG_UINT32)~0);
+				break;
+			}
+			else if (ui32Value < gsCwq.uiPageSize)
+			{
+				/* Silently round-up to OS page size */
+				ui32Value = gsCwq.uiPageSize;
+			}
+
+			/* Align to OS page size */
+			ui32Value &= ~(gsCwq.uiPageSize - 1);
+
+			/* Adjust KM deferred threshold given this updated KM global threshold */
+			if (ui32Value == gsCwq.uiPageSize || ui32Value < gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD])
+			{
+				gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] = ui32Value >> 2;
+			}
+
+			gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD] = ui32Value;
+
+			break;
+		}
+
+		case APPHINT_ID_CacheOpUMKMThresholdSize:
+		{
+			if (!ui32Value || !gsCwq.bSupportsUMFlush)
+			{
+				/* CPU ISA does not support UM flush, therefore every request goes down into
+				   the KM, silently ignore request to adjust threshold */
+				PVR_ASSERT(! gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD]);
+				break;
+			}
+			else if (ui32Value < gsCwq.uiPageSize)
+			{
+				/* Silently round-up to OS page size */
+				ui32Value = gsCwq.uiPageSize;
+			}
+
+			/* Align to OS page size */
+			ui32Value &= ~(gsCwq.uiPageSize - 1);
+
+			if (gsCwq.bNoGlobalFlushImpl || ui32Value < gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD])
+			{
+				/* CPU ISA does not support GF also, therefore it seems there is no benefit to
+				   re-routing this to KM as request won't be promoted to GF but request can
+				   benefit from KM async. execution so ensure KM deferred threshold applies */
+				PVR_ASSERT(gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD] == (IMG_UINT32)~0);
+				gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] = ui32Value >> 1;
+				break;
+			}
+
+			gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = ui32Value;
+
+			break;
+		}
+
+		default:
+			break;
+	}
+
+	return PVRSRV_OK;
+}
+
+static INLINE void CacheOpQItemRecycle(CACHEOP_WORK_ITEM *psCacheOpWorkItem)
+{
+	PVRSRV_ERROR eError;
+	eError = PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+	PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses");
+	/* Set to max as precaution should recycling this CacheOp index fail
+	   to reset it, this is purely to safe-guard having to discard such
+	   subsequent deferred CacheOps or signal the sw sync timeline */
+	psCacheOpWorkItem->iTimeline = PVRSRV_NO_TIMELINE;
+	psCacheOpWorkItem->ui32GFSeqNum = (IMG_UINT32)~0;
+	psCacheOpWorkItem->ui32OpSeqNum = (IMG_UINT32)~0;
+#if defined(CACHEOP_DEBUG)
+	psCacheOpWorkItem->psPMR = (void *)(uintptr_t)~0;
+#endif
+}
+
+static INLINE void CacheOpQItemReadCheck(CACHEOP_WORK_ITEM *psCacheOpWorkItem)
+{
+#if defined(CACHEOP_DEBUG)
+	CACHEOP_PVR_ASSERT(psCacheOpWorkItem->psPMR);
+	CACHEOP_PVR_ASSERT(psCacheOpWorkItem->psPMR != (void *)(uintptr_t)~0);
+	CACHEOP_PVR_ASSERT(psCacheOpWorkItem->ui32OpSeqNum != (IMG_UINT32)~0);
+	if (CacheOpConfigSupports(CACHEOP_CONFIG_KGF))
+	{
+		CACHEOP_PVR_ASSERT(psCacheOpWorkItem->ui32GFSeqNum != (IMG_UINT32)~0);
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(psCacheOpWorkItem);
+#endif
+}
+
+static INLINE void CacheOpQItemWriteCheck(CACHEOP_WORK_ITEM *psCacheOpWorkItem)
+{
+#if defined(CACHEOP_DEBUG)
+	CACHEOP_PVR_ASSERT(psCacheOpWorkItem->psPMR == (void *)(uintptr_t)~0);
+	CACHEOP_PVR_ASSERT(psCacheOpWorkItem->ui32OpSeqNum == (IMG_UINT32)~0);
+	CACHEOP_PVR_ASSERT(psCacheOpWorkItem->ui32GFSeqNum == (IMG_UINT32)~0);
+	CACHEOP_PVR_ASSERT(psCacheOpWorkItem->iTimeline == PVRSRV_NO_TIMELINE);
+#else
+	PVR_UNREFERENCED_PARAMETER(psCacheOpWorkItem);
+#endif
+}
+
+static INLINE IMG_UINT32 CacheOpGetNextCommonSeqNum(void)
+{
+	IMG_UINT32 ui32SeqNum = OSAtomicIncrement(&gsCwq.hCommonSeqNum);
+	if (! ui32SeqNum)
+	{
+		/* Zero is _not_ a valid sequence value, doing so simplifies _all_
+		   subsequent fence checking when no cache maintenance operation
+		   is outstanding as in this case a fence value of zero is supplied. */
+		if (CacheOpConfigSupports(CACHEOP_CONFIG_KGF))
+		{
+			/* Also when seqNum wraps around/crosses zero, this requires us to
+			   ensure that GFSEQNUM is not erroneously higher than any/all client
+			   seqNum(s) in the system during this wrap-around transition so we
+			   disable both momentarily until the next GF comes along. This has
+			   the effect that all subsequent in-flight discards using ">" is
+			   never true seeing zero is _not_ greater than anything and all
+			   "<=" comparison are always true seeing zero is always less than
+			   all non-zero integers. The additional GF here is done mostly to
+			   account for race condition(s) during this transition for all
+			   pending seqNum(s) that are still behind zero. */
+			gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] = 0;
+			gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM1] = 0;
+			ui32SeqNum = OSAtomicIncrement(&gsCwq.hCommonSeqNum);
+			(void) OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+		}
+		else
+		{
+			ui32SeqNum = OSAtomicIncrement(&gsCwq.hCommonSeqNum);
+		}
+	}
+	return ui32SeqNum;
+}
+
+static INLINE IMG_BOOL CacheOpFenceCheck(IMG_UINT32 ui32CompletedSeqNum,
+										 IMG_UINT32 ui32FenceSeqNum)
+{
+	IMG_UINT32 ui32RebasedCompletedNum;
+	IMG_UINT32 ui32RebasedFenceNum;
+	IMG_UINT32 ui32Rebase;
+
+	if (ui32FenceSeqNum == 0)
+	{
+		return IMG_TRUE;
+	}
+
+	/*
+	   The problem statement is how to compare two values on
+	   a numerical sequentially incrementing timeline in the
+	   presence of wrap around arithmetic semantics using a
+	   single ui32 counter & atomic (increment) operations.
+
+	   The rationale for the solution here is to rebase the
+	   incoming values to the sequence midpoint and perform
+	   comparisons there; this allows us to handle overflow
+	   or underflow wrap-round using only a single integer.
+
+	   NOTE: Here we assume that the absolute value of the
+	   difference between the two incoming values in _not_
+	   greater than CACHEOP_SEQ_MIDPOINT. This assumption
+	   holds as it implies that it is very _unlikely_ that 2
+	   billion CacheOp requests could have been made between
+	   a single client's CacheOp request & the corresponding
+	   fence check. This code sequence is hopefully a _more_
+	   hand optimised (branchless) version of this:
+
+		   x = ui32CompletedOpSeqNum
+		   y = ui32FenceOpSeqNum
+
+		   if (|x - y| < CACHEOP_SEQ_MIDPOINT)
+			   return (x - y) >= 0 ? true : false
+		   else
+			   return (y - x) >= 0 ? true : false
+	 */
+	ui32Rebase = CACHEOP_SEQ_MIDPOINT - ui32CompletedSeqNum;
+
+	/* ui32Rebase could be either positive/negative, in
+	   any case we still perform operation using unsigned
+	   semantics as 2's complement notation always means
+	   we end up with the correct result */
+	ui32RebasedCompletedNum = ui32Rebase + ui32CompletedSeqNum;
+	ui32RebasedFenceNum = ui32Rebase + ui32FenceSeqNum;
+
+	return (ui32RebasedCompletedNum >= ui32RebasedFenceNum);
+}
+
+static INLINE PVRSRV_ERROR CacheOpTimelineBind(PVRSRV_DEVICE_NODE *psDevNode,
+											   CACHEOP_WORK_ITEM *psCacheOpWorkItem,
+											   PVRSRV_TIMELINE iTimeline)
+{
+	PVRSRV_ERROR eError;
+
+	/* Always default the incoming CacheOp work-item to safe values */
+	SyncClearTimelineObj(&psCacheOpWorkItem->sSWTimelineObj);
+	psCacheOpWorkItem->iTimeline = PVRSRV_NO_TIMELINE;
+	psCacheOpWorkItem->psDevNode = psDevNode;
+	if (iTimeline == PVRSRV_NO_TIMELINE)
+	{
+		return PVRSRV_OK;
+	}
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	psCacheOpWorkItem->iTimeline = iTimeline;
+	eError = SyncSWGetTimelineObj(iTimeline, &psCacheOpWorkItem->sSWTimelineObj);
+	PVR_LOG_IF_ERROR(eError, "SyncSWGetTimelineObj");
+#else
+	eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+
+	return eError;
+}
+
+static INLINE PVRSRV_ERROR CacheOpTimelineExec(CACHEOP_WORK_ITEM *psCacheOpWorkItem)
+{
+	PVRSRV_ERROR eError;
+
+	if (psCacheOpWorkItem->iTimeline == PVRSRV_NO_TIMELINE)
+	{
+		return PVRSRV_OK;
+	}
+	CACHEOP_PVR_ASSERT(psCacheOpWorkItem->sSWTimelineObj.pvTlObj);
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	eError = SyncSWTimelineAdvanceKM(psCacheOpWorkItem->psDevNode,
+	                                 &psCacheOpWorkItem->sSWTimelineObj);
+	(void) SyncSWTimelineReleaseKM(&psCacheOpWorkItem->sSWTimelineObj);
+#else
+	eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+
+	return eError;
+}
+
+static INLINE PVRSRV_ERROR CacheOpGlobalFlush(void)
+{
+#if !defined(CACHEFLUSH_ISA_SUPPORTS_GLOBAL_FLUSH)
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#else
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32OpSeqNum = gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+
+	if (! CacheOpConfigSupports(CACHEOP_CONFIG_KGF))
+	{
+		return PVRSRV_ERROR_NOT_SUPPORTED;
+	}
+
+	OSLockAcquire(gsCwq.hGlobalFlushLock);
+	if (ui32OpSeqNum < gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0])
+	{
+#if defined(CACHEOP_DEBUG)
+		gsCwq.ui32KMDiscards += 1;
+#endif
+		eError = PVRSRV_OK;
+		goto exit;
+	}
+
+	/* User space sampling the information-page seqNumbers after this point
+	   and before the corresponding GFSEQNUM0 update leads to an invalid
+	   sampling which must be discarded by UM. This implements a lockless
+	   critical region for a single KM(writer) & multiple UM/KM(readers) */
+	ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+	gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM1] = ui32OpSeqNum;
+
+	eError = OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+	PVR_LOGG_IF_ERROR(eError, "OSCPUOperation(PVRSRV_CACHE_OP_FLUSH)", exit);
+
+	gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] = ui32OpSeqNum;
+	OSAtomicWrite(&gsCwq.hDeferredSize, 0);
+#if defined(CACHEOP_DEBUG)
+	gsCwq.ui32ServerGF += 1;
+#endif
+
+exit:
+	OSLockRelease(gsCwq.hGlobalFlushLock);
+	return eError;
+#endif
+}
+
+static INLINE void CacheOpExecRangeBased(PVRSRV_DEVICE_NODE *psDevNode,
+										PVRSRV_CACHE_OP uiCacheOp,
+										IMG_BYTE *pbCpuVirtAddr,
+										IMG_CPU_PHYADDR sCpuPhyAddr,
+										IMG_DEVMEM_OFFSET_T uiPgAlignedOffset,
+										IMG_DEVMEM_OFFSET_T uiCLAlignedStartOffset,
+										IMG_DEVMEM_OFFSET_T uiCLAlignedEndOffset)
+{
+	IMG_BYTE *pbCpuVirtAddrEnd;
+	IMG_BYTE *pbCpuVirtAddrStart;
+	IMG_CPU_PHYADDR sCpuPhyAddrEnd;
+	IMG_CPU_PHYADDR sCpuPhyAddrStart;
+	IMG_DEVMEM_SIZE_T uiRelFlushSize;
+	IMG_DEVMEM_OFFSET_T uiRelFlushOffset;
+	IMG_DEVMEM_SIZE_T uiNextPgAlignedOffset;
+
+	/* These quantities allows us to perform cache operations
+	   at cache-line granularity thereby ensuring we do not
+	   perform more than is necessary */
+	CACHEOP_PVR_ASSERT(uiPgAlignedOffset < uiCLAlignedEndOffset);
+	uiRelFlushSize = (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize;
+	uiRelFlushOffset = 0;
+
+	if (uiCLAlignedStartOffset > uiPgAlignedOffset)
+	{
+		/* Zero unless initially starting at an in-page offset */
+		uiRelFlushOffset = uiCLAlignedStartOffset - uiPgAlignedOffset;
+		uiRelFlushSize -= uiRelFlushOffset;
+	}
+
+	/* uiRelFlushSize is gsCwq.uiPageSize unless current outstanding CacheOp
+	   size is smaller. The 1st case handles in-page CacheOp range and
+	   the 2nd case handles multiple-page CacheOp range with a last
+	   CacheOp size that is less than gsCwq.uiPageSize */
+	uiNextPgAlignedOffset = uiPgAlignedOffset + (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize;
+	if (uiNextPgAlignedOffset < uiPgAlignedOffset)
+	{
+		/* uiNextPgAlignedOffset is greater than uiCLAlignedEndOffset
+		   by implication of this wrap-round; this only happens when
+		   uiPgAlignedOffset is the last page aligned offset */
+		uiRelFlushSize = uiRelFlushOffset ?
+				uiCLAlignedEndOffset - uiCLAlignedStartOffset :
+				uiCLAlignedEndOffset - uiPgAlignedOffset;
+	}
+	else
+	{
+		if (uiNextPgAlignedOffset > uiCLAlignedEndOffset)
+		{
+			uiRelFlushSize = uiRelFlushOffset ?
+					uiCLAlignedEndOffset - uiCLAlignedStartOffset :
+					uiCLAlignedEndOffset - uiPgAlignedOffset;
+		}
+	}
+
+	/* More efficient to request cache maintenance operation for full
+	   relative range as opposed to multiple cache-aligned ranges */
+	sCpuPhyAddrStart.uiAddr = sCpuPhyAddr.uiAddr + uiRelFlushOffset;
+	sCpuPhyAddrEnd.uiAddr = sCpuPhyAddrStart.uiAddr + uiRelFlushSize;
+	if (pbCpuVirtAddr)
+	{
+		pbCpuVirtAddrStart = pbCpuVirtAddr + uiRelFlushOffset;
+		pbCpuVirtAddrEnd = pbCpuVirtAddrStart + uiRelFlushSize;
+	}
+	else
+	{
+		/* Some OS/Env layer support functions expect NULL(s) */
+		pbCpuVirtAddrStart = NULL;
+		pbCpuVirtAddrEnd = NULL;
+	}
+
+	/* Perform requested CacheOp on the CPU data cache for successive cache
+	   line worth of bytes up to page or in-page cache-line boundary */
+	switch (uiCacheOp)
+	{
+		case PVRSRV_CACHE_OP_CLEAN:
+			OSCPUCacheCleanRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd,
+									sCpuPhyAddrStart, sCpuPhyAddrEnd);
+			break;
+		case PVRSRV_CACHE_OP_INVALIDATE:
+			OSCPUCacheInvalidateRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd,
+									sCpuPhyAddrStart, sCpuPhyAddrEnd);
+			break;
+		case PVRSRV_CACHE_OP_FLUSH:
+			OSCPUCacheFlushRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd,
+									sCpuPhyAddrStart, sCpuPhyAddrEnd);
+			break;
+		default:
+			PVR_DPF((PVR_DBG_ERROR,	"%s: Invalid cache operation type %d",
+					__func__, uiCacheOp));
+			break;
+	}
+
+#if defined(CACHEOP_DEBUG)
+	/* Tracks the number of kernel-mode cacheline maintenance instructions */
+	gsCwq.ui32ServerRBF += (uiRelFlushSize & ((IMG_DEVMEM_SIZE_T)~(gsCwq.uiLineSize - 1))) >> gsCwq.uiLineShift;
+#endif
+}
+
+static INLINE void CacheOpExecRangeBasedVA(PVRSRV_DEVICE_NODE *psDevNode,
+										 IMG_CPU_VIRTADDR pvAddress,
+										 IMG_DEVMEM_SIZE_T uiSize,
+										 PVRSRV_CACHE_OP uiCacheOp)
+{
+	IMG_CPU_PHYADDR sCpuPhyAddrUnused =
+		{ IMG_CAST_TO_CPUPHYADDR_UINT(0xCAFEF00DDEADBEEFULL) };
+	IMG_BYTE *pbEnd = (IMG_BYTE*)((uintptr_t)pvAddress + (uintptr_t)uiSize);
+	IMG_BYTE *pbStart = (IMG_BYTE*)((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiLineSize-1));
+
+	/*
+	  If the start/end address isn't aligned to cache line size, round it up to the
+	  nearest multiple; this ensures that we flush all the cache lines affected by
+	  unaligned start/end addresses.
+	 */
+	pbEnd = (IMG_BYTE *) PVR_ALIGN((uintptr_t)pbEnd, (uintptr_t)gsCwq.uiLineSize);
+	switch (uiCacheOp)
+	{
+		case PVRSRV_CACHE_OP_CLEAN:
+			OSCPUCacheCleanRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused);
+			break;
+		case PVRSRV_CACHE_OP_INVALIDATE:
+			OSCPUCacheInvalidateRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused);
+			break;
+		case PVRSRV_CACHE_OP_FLUSH:
+			OSCPUCacheFlushRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused);
+			break;
+		default:
+			PVR_DPF((PVR_DBG_ERROR,	"%s: Invalid cache operation type %d",
+					 __func__, uiCacheOp));
+			break;
+	}
+
+#if defined(CACHEOP_DEBUG)
+	/* Tracks the number of kernel-mode cacheline maintenance instructions */
+	gsCwq.ui32ServerRBF += (uiSize & ((IMG_DEVMEM_SIZE_T)~(gsCwq.uiLineSize - 1))) >> gsCwq.uiLineShift;
+#endif
+}
+
+static INLINE PVRSRV_ERROR CacheOpValidateVAOffset(PMR *psPMR,
+												IMG_CPU_VIRTADDR pvAddress,
+												IMG_DEVMEM_OFFSET_T uiOffset,
+												IMG_DEVMEM_SIZE_T uiSize,
+												void **ppvOutAddress)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+#if defined(LINUX) && !defined(CACHEFLUSH_NO_KMRBF_USING_UMVA)
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+#endif
+	void __user *pvAddr;
+
+	if (! pvAddress)
+	{
+		/* As pvAddress is optional, NULL is expected from UM/KM requests */
+		pvAddr = NULL;
+		goto e0;
+	}
+
+#if !defined(LINUX) || defined(CACHEFLUSH_NO_KMRBF_USING_UMVA)
+	pvAddr = NULL;
+#else
+	/* Validate VA, assume most basic address limit access_ok() check */
+	pvAddr = (void __user *)(uintptr_t)((uintptr_t)pvAddress + uiOffset);
+	if (!access_ok(pvAddr, uiSize))
+	{
+		pvAddr = NULL;
+		if (! mm)
+		{
+			/* Bad KM request, don't silently ignore */
+			eError = PVRSRV_ERROR_INVALID_CPU_ADDR;
+			goto e0;
+		}
+	}
+	else if (mm)
+	{
+		down_read(&mm->mmap_sem);
+
+		vma = find_vma(mm, (unsigned long)(uintptr_t)pvAddr);
+		if (!vma ||
+			vma->vm_start > (unsigned long)(uintptr_t)pvAddr ||
+			vma->vm_end - vma->vm_start > (unsigned long)(uintptr_t)uiSize)
+		{
+			/* Out of range mm_struct->vm_area VA */
+			pvAddr = NULL;
+		}
+		else if (vma->vm_private_data != psPMR)
+		{
+			/*
+			   Unknown mm_struct->vm_area VA, can't risk dcache maintenance using
+			   this VA as the client user space mapping could be removed without
+			   us knowing which might induce CPU fault during cache maintenance.
+			*/
+			pvAddr = NULL;
+		}
+		else if ((uintptr_t)pvAddress < (uintptr_t)gsCwq.uiPageSize)
+		{
+			/* Silently suppress UM NULL page pointers */
+			pvAddr = NULL;
+		}
+
+		up_read(&mm->mmap_sem);
+	}
+	else
+	{
+		pgd_t *pgd;
+		p4d_t *p4d;
+		pud_t *pud;
+		pmd_t *pmd;
+		pte_t *ptep;
+		mm = current->active_mm;
+
+		/*
+			For KM requests perform additional VA validation, so we walk the
+			kernel page-table structures to be sure VA is safe to use.
+		*/
+		pgd = pgd_offset(mm, (uintptr_t)pvAddr);
+		if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+		{
+			eError = PVRSRV_ERROR_INVALID_CPU_ADDR;
+			pvAddr = NULL;
+			goto e0;
+		}
+
+		p4d = p4d_offset(pgd, (uintptr_t)pvAddr);
+		if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
+		{
+			eError = PVRSRV_ERROR_INVALID_CPU_ADDR;
+			pvAddr = NULL;
+			goto e0;
+		}
+
+		pud = pud_offset(p4d, (uintptr_t)pvAddr);
+		if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+		{
+			eError = PVRSRV_ERROR_INVALID_CPU_ADDR;
+			pvAddr = NULL;
+			goto e0;
+		}
+
+		pmd = pmd_offset(pud, (uintptr_t)pvAddr);
+		if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+		{
+			eError = PVRSRV_ERROR_INVALID_CPU_ADDR;
+			pvAddr = NULL;
+			goto e0;
+		}
+
+		ptep = pte_offset_map(pmd, (uintptr_t)pvAddr);
+		if (!ptep || !pte_present(*ptep))
+		{
+			eError = PVRSRV_ERROR_INVALID_CPU_ADDR;
+			pvAddr = NULL;
+			goto e0;
+		}
+	}
+#endif
+
+e0:
+	*ppvOutAddress = (IMG_CPU_VIRTADDR __force) pvAddr;
+	return eError;
+}
+
+static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR,
+									IMG_CPU_VIRTADDR pvAddress,
+									IMG_DEVMEM_OFFSET_T uiOffset,
+									IMG_DEVMEM_SIZE_T uiSize,
+									PVRSRV_CACHE_OP uiCacheOp,
+									IMG_UINT32 ui32GFlushSeqNum,
+									IMG_BOOL bIsRequestValidated,
+									IMG_BOOL *pbUsedGlobalFlush)
+{
+	IMG_HANDLE hPrivOut;
+	IMG_BOOL bPMRIsSparse;
+	IMG_UINT32 ui32PageIndex;
+	IMG_UINT32 ui32NumOfPages;
+	IMG_DEVMEM_SIZE_T uiOutSize;
+	PVRSRV_DEVICE_NODE *psDevNode;
+	IMG_DEVMEM_SIZE_T uiPgAlignedSize;
+	IMG_DEVMEM_OFFSET_T uiPgAlignedOffset;
+	IMG_DEVMEM_OFFSET_T uiCLAlignedEndOffset;
+	IMG_DEVMEM_OFFSET_T uiPgAlignedEndOffset;
+	IMG_DEVMEM_OFFSET_T uiCLAlignedStartOffset;
+	IMG_DEVMEM_OFFSET_T uiPgAlignedStartOffset;
+	IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_CPU_PHYADDR asCpuPhyAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_CPU_PHYADDR *psCpuPhyAddr = asCpuPhyAddr;
+	IMG_BOOL bIsPMRInfoValid = IMG_FALSE;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_BYTE *pbCpuVirtAddr = NULL;
+	IMG_BOOL *pbValid = abValid;
+	*pbUsedGlobalFlush = IMG_FALSE;
+
+	if (uiCacheOp == PVRSRV_CACHE_OP_NONE || uiCacheOp == PVRSRV_CACHE_OP_TIMELINE)
+	{
+		return PVRSRV_OK;
+	}
+
+	/* Check for explicitly requested-for KGF or KRBF promoted to KGF requests */
+	if (uiCacheOp == PVRSRV_CACHE_OP_GLOBAL || uiSize == 0 ||
+	    (IMG_UINT32)uiSize > gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD])
+	{
+		/* Discard if an else-when KGF has occurred in the interim time */
+		if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > ui32GFlushSeqNum)
+		{
+#if defined(CACHEOP_DEBUG)
+			gsCwq.ui32KMDiscards += 1;
+#endif
+			return PVRSRV_OK;
+		}
+		/* Some CPU ISA support KGF, if it fails fall-back to KRBF */
+		else if ((eError = CacheOpGlobalFlush()) == PVRSRV_OK)
+		{
+			*pbUsedGlobalFlush = IMG_TRUE;
+			return PVRSRV_OK;
+		}
+		/* Request with uiSize=0 is treated as a KGF request as well */
+		else if (uiCacheOp == PVRSRV_CACHE_OP_GLOBAL || uiSize == 0)
+		{
+			/* Cannot fall-back to KRBF as an explicit KGF was erroneously requested for */
+			PVR_LOGR_IF_ERROR(eError, CACHEOP_NO_GFLUSH_ERROR_STRING);
+			CACHEOP_PVR_ASSERT(0);
+		}
+	}
+
+	if (! bIsRequestValidated)
+	{
+		IMG_DEVMEM_SIZE_T uiLPhysicalSize;
+
+		/* Need to validate parameters before proceeding */
+		eError = PMR_PhysicalSize(psPMR, &uiLPhysicalSize);
+		PVR_LOGR_IF_ERROR(eError, "uiLPhysicalSize");
+
+		PVR_LOGR_IF_FALSE(((uiOffset+uiSize) <= uiLPhysicalSize), CACHEOP_DEVMEM_OOR_ERROR_STRING, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE);
+
+		eError = PMRLockSysPhysAddresses(psPMR);
+		PVR_LOGR_IF_ERROR(eError, "PMRLockSysPhysAddresses");
+	}
+
+	/* Fast track the request if a CPU VA is provided and CPU ISA supports VA only maintenance */
+	eError = CacheOpValidateVAOffset(psPMR, pvAddress, uiOffset, uiSize, (void**)&pbCpuVirtAddr);
+	if (eError == PVRSRV_OK)
+	{
+		pvAddress = pbCpuVirtAddr;
+
+		if (pvAddress && gsCwq.uiCacheOpAddrType == PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+		{
+			CacheOpExecRangeBasedVA(PMR_DeviceNode(psPMR), pvAddress, uiSize, uiCacheOp);
+			if (! bIsRequestValidated)
+			{
+				eError = PMRUnlockSysPhysAddresses(psPMR);
+				PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses");
+			}
+#if defined(CACHEOP_DEBUG)
+			gsCwq.ui32ServerSyncVA += 1;
+#endif
+			return PVRSRV_OK;
+		}
+		else if (pvAddress)
+		{
+			/* Round down the incoming VA (if any) down to the nearest page aligned VA */
+			 pvAddress = (void*)((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiPageSize-1));
+#if defined(CACHEOP_DEBUG)
+			gsCwq.ui32ServerSyncVA += 1;
+#endif
+		}
+	}
+	else
+	{
+		/*
+		 * This validation pathway has been added to accommodate any/all requests that might
+		 * cause the kernel to Oops; essentially, KM requests should prevalidate cache maint.
+		 * parameters but if this fails then we would rather fail gracefully than cause the
+		 * kernel to Oops so instead we log the fact that an invalid KM virtual address was
+		 * supplied and what action was taken to mitigate against kernel Oops(ing) if any.
+		 */
+		CACHEOP_PVR_ASSERT(pbCpuVirtAddr == NULL);
+
+		if (gsCwq.uiCacheOpAddrType == PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL)
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+					"%s: Invalid vaddress 0x%p in CPU d-cache maint. op, using paddress",
+					__func__,
+					pvAddress));
+
+			/* We can still proceed as kernel/cpu uses CPU PA for d-cache maintenance */
+			pvAddress = NULL;
+		}
+		else if (CacheOpGlobalFlush() == PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+					"%s: Invalid vaddress 0x%p in CPU d-cache maint. op, used global flush",
+					__func__,
+					pvAddress));
+
+			/* Saved by global flush impl. */
+			*pbUsedGlobalFlush = IMG_TRUE;
+			eError = PVRSRV_OK;
+			goto e0;
+		}
+		else
+		{
+			/*
+			 * The approach here is to attempt a reacquisition of the PMR kernel VA and see if
+			 * said VA corresponds to the parameter VA, if so fail requested cache maint. op.
+			 * cause this indicates some kind of internal, memory and/or meta-data corruption
+			 * else we reissue the request using this (re)acquired alias PMR kernel VA.
+			 */
+			if (PMR_IsSparse(psPMR))
+			{
+				eError = PMRAcquireSparseKernelMappingData(psPMR,
+														   0,
+														   gsCwq.uiPageSize,
+														   (void **)&pbCpuVirtAddr,
+														   (size_t*)&uiOutSize,
+														   &hPrivOut);
+				PVR_LOGG_IF_ERROR(eError, "PMRAcquireSparseKernelMappingData", e0);
+			}
+			else
+			{
+				eError = PMRAcquireKernelMappingData(psPMR,
+													 0,
+													 gsCwq.uiPageSize,
+													 (void **)&pbCpuVirtAddr,
+													 (size_t*)&uiOutSize,
+													 &hPrivOut);
+				PVR_LOGG_IF_ERROR(eError, "PMRAcquireKernelMappingData", e0);
+			}
+
+			/* Here, we only compare these CPU virtual addresses at granularity of the OS page size */
+			if ((uintptr_t)pbCpuVirtAddr == ((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiPageSize-1)))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Invalid vaddress 0x%p in CPU d-cache maint. op, no alt. so failing request",
+						__func__,
+						pvAddress));
+
+				eError = PMRReleaseKernelMappingData(psPMR, hPrivOut);
+				PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData");
+
+				eError = PVRSRV_ERROR_INVALID_CPU_ADDR;
+				goto e0;
+			}
+			else if (gsCwq.uiCacheOpAddrType == PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+			{
+				PVR_DPF((PVR_DBG_WARNING,
+						"%s: Bad vaddress 0x%p in CPU d-cache maint. op, using reacquired vaddress 0x%p",
+						__func__,
+						pvAddress,
+						pbCpuVirtAddr));
+
+				/* Note that this might still fail if there is kernel memory/meta-data corruption;
+				   there is not much we can do here but at the least we will be informed of this
+				   before the kernel Oops(ing) */
+				CacheOpExecRangeBasedVA(PMR_DeviceNode(psPMR), pbCpuVirtAddr, uiSize, uiCacheOp);
+
+				eError = PMRReleaseKernelMappingData(psPMR, hPrivOut);
+				PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData");
+
+				eError = PVRSRV_OK;
+				goto e0;
+			}
+			else
+			{
+				/* At this junction, we have exhausted every possible work-around possible but we do
+				   know that VA reacquisition returned another/alias page-aligned VA; so with this
+				   future expectation of PMRAcquireKernelMappingData(), we proceed */
+				PVR_DPF((PVR_DBG_WARNING,
+						"%s: Bad vaddress %p in CPU d-cache maint. op, will use reacquired vaddress",
+						__func__,
+						pvAddress));
+
+				eError = PMRReleaseKernelMappingData(psPMR, hPrivOut);
+				PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData");
+
+				/* NULL this to force per-page reacquisition down-stream */
+				pvAddress = NULL;
+			}
+		}
+	}
+
+	/* NULL clobbered var., OK to proceed */
+	pbCpuVirtAddr = NULL;
+	eError = PVRSRV_OK;
+
+	/* Need this for kernel mapping */
+	bPMRIsSparse = PMR_IsSparse(psPMR);
+	psDevNode = PMR_DeviceNode(psPMR);
+
+	/* Round the incoming offset down to the nearest cache-line / page aligned-address */
+	uiCLAlignedEndOffset = uiOffset + uiSize;
+	uiCLAlignedEndOffset = PVR_ALIGN(uiCLAlignedEndOffset, (IMG_DEVMEM_SIZE_T)gsCwq.uiLineSize);
+	uiCLAlignedStartOffset = (uiOffset & ~((IMG_DEVMEM_OFFSET_T)gsCwq.uiLineSize-1));
+
+	uiPgAlignedEndOffset = uiCLAlignedEndOffset;
+	uiPgAlignedEndOffset = PVR_ALIGN(uiPgAlignedEndOffset, (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize);
+	uiPgAlignedStartOffset = (uiOffset & ~((IMG_DEVMEM_OFFSET_T)gsCwq.uiPageSize-1));
+	uiPgAlignedSize = uiPgAlignedEndOffset - uiPgAlignedStartOffset;
+
+#if defined(CACHEOP_NO_CACHE_LINE_ALIGNED_ROUNDING)
+	/* For internal debug if cache-line optimised
+	   flushing is suspected of causing data corruption */
+	uiCLAlignedStartOffset = uiPgAlignedStartOffset;
+	uiCLAlignedEndOffset = uiPgAlignedEndOffset;
+#endif
+
+	/* Type of allocation backing the PMR data */
+	ui32NumOfPages = uiPgAlignedSize >> gsCwq.uiPageShift;
+	if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+	{
+		/* The pbValid array is allocated first as it is needed in
+		   both physical/virtual cache maintenance methods */
+		pbValid = OSAllocZMem(ui32NumOfPages * sizeof(IMG_BOOL));
+		if (! pbValid)
+		{
+			pbValid = abValid;
+		}
+		else if (gsCwq.uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+		{
+			psCpuPhyAddr = OSAllocZMem(ui32NumOfPages * sizeof(IMG_CPU_PHYADDR));
+			if (! psCpuPhyAddr)
+			{
+				psCpuPhyAddr = asCpuPhyAddr;
+				OSFreeMem(pbValid);
+				pbValid = abValid;
+			}
+		}
+	}
+
+	/* We always retrieve PMR data in bulk, up-front if number of pages is within
+	   PMR_MAX_TRANSLATION_STACK_ALLOC limits else we check to ensure that a
+	   dynamic buffer has been allocated to satisfy requests outside limits */
+	if (ui32NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC || pbValid != abValid)
+	{
+		if (gsCwq.uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+		{
+			/* Look-up PMR CpuPhyAddr once, if possible */
+			eError = PMR_CpuPhysAddr(psPMR,
+									 gsCwq.uiPageShift,
+									 ui32NumOfPages,
+									 uiPgAlignedStartOffset,
+									 psCpuPhyAddr,
+									 pbValid);
+			if (eError == PVRSRV_OK)
+			{
+				bIsPMRInfoValid = IMG_TRUE;
+			}
+		}
+		else
+		{
+			/* Look-up PMR per-page validity once, if possible */
+			eError = PMR_IsOffsetValid(psPMR,
+									   gsCwq.uiPageShift,
+									   ui32NumOfPages,
+									   uiPgAlignedStartOffset,
+									   pbValid);
+			bIsPMRInfoValid = (eError == PVRSRV_OK) ? IMG_TRUE : IMG_FALSE;
+		}
+	}
+
+	/* For each (possibly non-contiguous) PMR page(s), carry out the requested cache maint. op. */
+	for (uiPgAlignedOffset = uiPgAlignedStartOffset, ui32PageIndex = 0;
+		 uiPgAlignedOffset < uiPgAlignedEndOffset;
+		 uiPgAlignedOffset += (IMG_DEVMEM_OFFSET_T) gsCwq.uiPageSize, ui32PageIndex += 1)
+	{
+		/* Just before issuing the CacheOp RBF, check if it can be discarded */
+		if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > ui32GFlushSeqNum)
+		{
+#if defined(CACHEOP_DEBUG)
+			gsCwq.ui32KMDiscards += 1;
+#endif
+			break;
+		}
+
+		if (! bIsPMRInfoValid)
+		{
+			/* Never cross page boundary without looking up corresponding PMR page physical
+			   address and/or page validity if these were not looked-up, in bulk, up-front */
+			ui32PageIndex = 0;
+			if (gsCwq.uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+			{
+				eError = PMR_CpuPhysAddr(psPMR,
+										 gsCwq.uiPageShift,
+										 1,
+										 uiPgAlignedOffset,
+										 psCpuPhyAddr,
+										 pbValid);
+				PVR_LOGG_IF_ERROR(eError, "PMR_CpuPhysAddr", e0);
+			}
+			else
+			{
+				eError = PMR_IsOffsetValid(psPMR,
+										  gsCwq.uiPageShift,
+										  1,
+										  uiPgAlignedOffset,
+										  pbValid);
+				PVR_LOGG_IF_ERROR(eError, "PMR_IsOffsetValid", e0);
+			}
+		}
+
+		/* Skip invalid PMR pages (i.e. sparse) */
+		if (pbValid[ui32PageIndex] == IMG_FALSE)
+		{
+			CACHEOP_PVR_ASSERT(bPMRIsSparse);
+			continue;
+		}
+
+		if (pvAddress)
+		{
+			/* The caller has supplied either a KM/UM CpuVA, so use it unconditionally */
+			pbCpuVirtAddr =
+				(void *)(uintptr_t)((uintptr_t)pvAddress + (uintptr_t)(uiPgAlignedOffset-uiPgAlignedStartOffset));
+		}
+		/* Skip CpuVA acquire if CacheOp can be maintained entirely using CpuPA */
+		else if (gsCwq.uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL)
+		{
+			if (bPMRIsSparse)
+			{
+				eError =
+					PMRAcquireSparseKernelMappingData(psPMR,
+													  uiPgAlignedOffset,
+													  gsCwq.uiPageSize,
+													  (void **)&pbCpuVirtAddr,
+													  (size_t*)&uiOutSize,
+													  &hPrivOut);
+				PVR_LOGG_IF_ERROR(eError, "PMRAcquireSparseKernelMappingData", e0);
+			}
+			else
+			{
+				eError =
+					PMRAcquireKernelMappingData(psPMR,
+												uiPgAlignedOffset,
+												gsCwq.uiPageSize,
+												(void **)&pbCpuVirtAddr,
+												(size_t*)&uiOutSize,
+												&hPrivOut);
+				PVR_LOGG_IF_ERROR(eError, "PMRAcquireKernelMappingData", e0);
+			}
+		}
+
+		/* Issue actual cache maintenance for PMR */
+		CacheOpExecRangeBased(psDevNode,
+							uiCacheOp,
+							pbCpuVirtAddr,
+							(gsCwq.uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL) ?
+								psCpuPhyAddr[ui32PageIndex] : psCpuPhyAddr[0],
+							uiPgAlignedOffset,
+							uiCLAlignedStartOffset,
+							uiCLAlignedEndOffset);
+
+		if (! pvAddress)
+		{
+			/* The caller has not supplied either a KM/UM CpuVA, release mapping */
+			if (gsCwq.uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL)
+			{
+				eError = PMRReleaseKernelMappingData(psPMR, hPrivOut);
+				PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData");
+			}
+		}
+	}
+
+e0:
+	if (psCpuPhyAddr != asCpuPhyAddr)
+	{
+		OSFreeMem(psCpuPhyAddr);
+	}
+
+	if (pbValid != abValid)
+	{
+		OSFreeMem(pbValid);
+	}
+
+	if (! bIsRequestValidated)
+	{
+		eError = PMRUnlockSysPhysAddresses(psPMR);
+		PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses");
+	}
+
+	return eError;
+}
+
+static PVRSRV_ERROR CacheOpQListExecGlobal(void)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 ui32NumOfEntries;
+	CACHEOP_WORK_ITEM *psCacheOpWorkItem;
+#if defined(CACHEOP_DEBUG)
+	IMG_UINT64 uiTimeNow = 0;
+	IMG_UINT64 ui64DequeuedTime;
+	CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+#endif
+	CACHEOP_PVR_ASSERT(!gsCwq.bNoGlobalFlushImpl);
+
+	/* Take the current snapshot of queued CacheOps before we issue a global cache
+	   flush operation so that we retire the right amount of CacheOps that has
+	   been affected by the to-be executed global CacheOp */
+	ui32NumOfEntries = CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter);
+	if (OSAtomicRead(&gsCwq.hWriteCounter) < OSAtomicRead(&gsCwq.hReadCounter))
+	{
+		/* Branch handles when the write-counter has wrapped-around in value space.
+		   The logic here works seeing the read-counter does not change value for
+		   the duration of this function so we don't run the risk of it too wrapping
+		   round whilst the number of entries is being determined here, that is to
+		   say, the consumer in this framework is single threaded and this function
+		   is that consumer thread */
+		ui32NumOfEntries = CacheOpIdxSpan(&gsCwq.hReadCounter, &gsCwq.hWriteCounter);
+
+		/* Two's complement arithmetic gives the number of entries */
+		ui32NumOfEntries = CACHEOP_INDICES_MAX - ui32NumOfEntries;
+	}
+	if (! ui32NumOfEntries)
+	{
+		return PVRSRV_OK;
+	}
+#if defined(CACHEOP_DEBUG)
+	CACHEOP_PVR_ASSERT(ui32NumOfEntries < CACHEOP_INDICES_MAX);
+#endif
+
+	/* Use the current/latest queue-tail work-item's GF/SeqNum to predicate GF */
+	psCacheOpWorkItem = &gsCwq.asWorkItems[CacheOpIdxRead(&gsCwq.hWriteCounter)];
+	CacheOpQItemReadCheck(psCacheOpWorkItem);
+#if defined(CACHEOP_DEBUG)
+	/* The time waiting in the queue to be serviced */
+	ui64DequeuedTime = OSClockns64();
+#endif
+
+	/* Check if items between [hRead/hWrite]Counter can be discarded before issuing GF */
+	if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > psCacheOpWorkItem->ui32GFSeqNum)
+	{
+		/* The currently discarded CacheOp item updates gsCwq.hCompletedSeqNum */
+		OSAtomicWrite(&gsCwq.hCompletedSeqNum, psCacheOpWorkItem->ui32OpSeqNum);
+#if defined(CACHEOP_DEBUG)
+		gsCwq.ui32KMDiscards += ui32NumOfEntries;
+#endif
+	}
+	else
+	{
+		eError = CacheOpGlobalFlush();
+		PVR_LOGR_IF_ERROR(eError, "CacheOpGlobalFlush");
+#if defined(CACHEOP_DEBUG)
+		uiTimeNow = OSClockns64();
+		sCacheOpWorkItem.bDeferred = IMG_TRUE;
+		sCacheOpWorkItem.ui64ExecuteTime = uiTimeNow;
+		sCacheOpWorkItem.psPMR = gsCwq.psInfoPagePMR;
+		sCacheOpWorkItem.pid = OSGetCurrentProcessID();
+		sCacheOpWorkItem.uiCacheOp = PVRSRV_CACHE_OP_GLOBAL;
+		sCacheOpWorkItem.ui64DequeuedTime = ui64DequeuedTime;
+		sCacheOpWorkItem.ui64EnqueuedTime = psCacheOpWorkItem->ui64EnqueuedTime;
+		sCacheOpWorkItem.ui32OpSeqNum = gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+#endif
+	}
+
+	while (ui32NumOfEntries)
+	{
+		psCacheOpWorkItem = &gsCwq.asWorkItems[CacheOpIdxNext(&gsCwq.hReadCounter)];
+		CacheOpQItemReadCheck(psCacheOpWorkItem);
+
+#if defined(CACHEOP_DEBUG)
+		if (psCacheOpWorkItem->uiCacheOp != PVRSRV_CACHE_OP_GLOBAL)
+		{
+			psCacheOpWorkItem->bRBF = IMG_FALSE;
+			if (! uiTimeNow)
+			{
+				/* Measure deferred queueing overhead only */
+				uiTimeNow = OSClockns64();
+				psCacheOpWorkItem->ui64ExecuteTime = uiTimeNow;
+			}
+			else
+			{
+				psCacheOpWorkItem->ui64ExecuteTime = uiTimeNow;
+			}
+			psCacheOpWorkItem->ui64DequeuedTime = ui64DequeuedTime;
+			CacheOpStatsExecLogWrite(psCacheOpWorkItem);
+		}
+		/* Something's gone horribly wrong if these 2 counters are identical at this point */
+		CACHEOP_PVR_ASSERT(OSAtomicRead(&gsCwq.hWriteCounter) != OSAtomicRead(&gsCwq.hReadCounter));
+#endif
+
+		/* If CacheOp is timeline(d), notify timeline waiters */
+		eError = CacheOpTimelineExec(psCacheOpWorkItem);
+		PVR_LOG_IF_ERROR(eError, "CacheOpTimelineExec");
+
+		/* Mark index as ready for recycling for next CacheOp */
+		CacheOpQItemRecycle(psCacheOpWorkItem);
+		(void) CacheOpIdxIncrement(&gsCwq.hReadCounter);
+		ui32NumOfEntries = ui32NumOfEntries - 1;
+	}
+
+#if defined(CACHEOP_DEBUG)
+	if (uiTimeNow)
+	{
+		/* Only log GF that was actually executed */
+		CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+	}
+#endif
+
+	return eError;
+}
+
+static PVRSRV_ERROR CacheOpQListExecRangeBased(void)
+{
+	IMG_UINT32 ui32NumOfEntries;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 ui32WriteCounter = ~0;
+	IMG_BOOL bUsedGlobalFlush = IMG_FALSE;
+	CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL;
+#if defined(CACHEOP_DEBUG)
+	IMG_UINT64 uiTimeNow = 0;
+#endif
+
+	/* Take a snapshot of the current count of deferred entries at this junction */
+	ui32NumOfEntries = CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter);
+	if (! ui32NumOfEntries)
+	{
+		return PVRSRV_OK;
+	}
+#if defined(CACHEOP_DEBUG)
+	CACHEOP_PVR_ASSERT(ui32NumOfEntries < CACHEOP_INDICES_MAX);
+#endif
+
+	while (ui32NumOfEntries)
+	{
+		if (! OSAtomicRead(&gsCwq.hReadCounter))
+		{
+			/* Normally, the read-counter will trail the write counter until the write
+			   counter wraps-round to zero. Under this condition we (re)calculate as the
+			   read-counter too is wrapping around at this point */
+			ui32NumOfEntries = CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter);
+		}
+#if defined(CACHEOP_DEBUG)
+		/* Something's gone horribly wrong if these 2 counters are identical at this point */
+		CACHEOP_PVR_ASSERT(OSAtomicRead(&gsCwq.hWriteCounter) != OSAtomicRead(&gsCwq.hReadCounter));
+#endif
+
+		/* Select the next pending deferred work-item for RBF cache maintenance */
+		psCacheOpWorkItem = &gsCwq.asWorkItems[CacheOpIdxNext(&gsCwq.hReadCounter)];
+		CacheOpQItemReadCheck(psCacheOpWorkItem);
+#if defined(CACHEOP_DEBUG)
+		/* The time waiting in the queue to be serviced */
+		psCacheOpWorkItem->ui64DequeuedTime = OSClockns64();
+#endif
+
+		/* The following CacheOpPMRExec() could trigger a GF, so we (re)read this
+		   counter just in case so that we know all such pending CacheOp(s) that will
+		   benefit from this soon-to-be-executed GF */
+		ui32WriteCounter = CacheOpConfigSupports(CACHEOP_CONFIG_KGF) ?
+								OSAtomicRead(&gsCwq.hWriteCounter) : ui32WriteCounter;
+
+		eError = CacheOpPMRExec(psCacheOpWorkItem->psPMR,
+								NULL, /* No UM virtual address */
+								psCacheOpWorkItem->uiOffset,
+								psCacheOpWorkItem->uiSize,
+								psCacheOpWorkItem->uiCacheOp,
+								psCacheOpWorkItem->ui32GFSeqNum,
+								IMG_TRUE, /* PMR is pre-validated */
+								&bUsedGlobalFlush);
+		if (eError != PVRSRV_OK)
+		{
+#if defined(CACHEOP_DEBUG)
+#define PID_FMTSPEC " PID:%u"
+#define CACHE_OP_WORK_PID psCacheOpWorkItem->pid
+#else
+#define PID_FMTSPEC "%s"
+#define CACHE_OP_WORK_PID ""
+#endif
+
+			PVR_LOG(("Deferred CacheOpPMRExec failed:"
+					 PID_FMTSPEC
+					 " PMR:%p"
+					 " Offset:%" IMG_UINT64_FMTSPECX
+					 " Size:%" IMG_UINT64_FMTSPECX
+					 " CacheOp:%d,"
+					 " error: %d",
+					CACHE_OP_WORK_PID,
+					psCacheOpWorkItem->psPMR,
+					psCacheOpWorkItem->uiOffset,
+					psCacheOpWorkItem->uiSize,
+					psCacheOpWorkItem->uiCacheOp,
+					eError));
+
+#undef PID_FMTSPEC
+#undef CACHE_OP_WORK_PID
+		}
+		else if (bUsedGlobalFlush)
+		{
+#if defined(CACHEOP_DEBUG)
+			psCacheOpWorkItem->ui32OpSeqNum = gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+#endif
+			break;
+		}
+
+#if defined(CACHEOP_DEBUG)
+		if (psCacheOpWorkItem->uiCacheOp != PVRSRV_CACHE_OP_GLOBAL)
+		{
+			psCacheOpWorkItem->bRBF = IMG_TRUE;
+			psCacheOpWorkItem->ui64ExecuteTime = OSClockns64();
+			CacheOpStatsExecLogWrite(psCacheOpWorkItem);
+		}
+		else
+		{
+			CACHEOP_PVR_ASSERT(!gsCwq.bNoGlobalFlushImpl);
+		}
+#endif
+
+		/* The currently executed CacheOp item updates gsCwq.hCompletedSeqNum.
+		   NOTE: This CacheOp item might be a discard item, if so its seqNum
+		   still updates the gsCwq.hCompletedSeqNum */
+		OSAtomicWrite(&gsCwq.hCompletedSeqNum, psCacheOpWorkItem->ui32OpSeqNum);
+		OSAtomicSubtract(&gsCwq.hDeferredSize, psCacheOpWorkItem->uiSize);
+
+		/* If CacheOp is timeline(d), notify timeline waiters */
+		eError = CacheOpTimelineExec(psCacheOpWorkItem);
+		PVR_LOG_IF_ERROR(eError, "CacheOpTimelineExec");
+
+		/* Indicate that this CCB work-item slot is now free for (re)use */
+		CacheOpQItemRecycle(psCacheOpWorkItem);
+		(void) CacheOpIdxIncrement(&gsCwq.hReadCounter);
+		ui32NumOfEntries = ui32NumOfEntries - 1;
+	}
+
+	if (bUsedGlobalFlush)
+	{
+#if defined(CACHEOP_DEBUG)
+		uiTimeNow = OSClockns64();
+		CACHEOP_PVR_ASSERT(OSAtomicRead(&gsCwq.hWriteCounter) != OSAtomicRead(&gsCwq.hReadCounter));
+#endif
+
+		/* Snapshot of queued CacheOps before the global cache flush was issued */
+		ui32NumOfEntries = ui32WriteCounter - OSAtomicRead(&gsCwq.hReadCounter);
+		if (ui32WriteCounter < OSAtomicRead(&gsCwq.hReadCounter))
+		{
+			/* Branch handles when the write-counter has wrapped-around in value space */
+			ui32NumOfEntries = OSAtomicRead(&gsCwq.hReadCounter) - ui32WriteCounter;
+			ui32NumOfEntries = CACHEOP_INDICES_MAX - ui32NumOfEntries;
+		}
+
+		while (ui32NumOfEntries)
+		{
+			CacheOpQItemReadCheck(psCacheOpWorkItem);
+
+#if defined(CACHEOP_DEBUG)
+			psCacheOpWorkItem->bRBF = IMG_FALSE;
+			psCacheOpWorkItem->ui64ExecuteTime = uiTimeNow;
+			if (psCacheOpWorkItem->uiCacheOp == PVRSRV_CACHE_OP_GLOBAL)
+			{
+				CACHEOP_PVR_ASSERT(!gsCwq.bNoGlobalFlushImpl);
+				psCacheOpWorkItem->pid = OSGetCurrentProcessID();
+			}
+			CacheOpStatsExecLogWrite(psCacheOpWorkItem);
+#endif
+
+			eError = CacheOpTimelineExec(psCacheOpWorkItem);
+			PVR_LOG_IF_ERROR(eError, "CacheOpTimelineExec");
+
+			/* Mark index as ready for recycling for next CacheOp */
+			CacheOpQItemRecycle(psCacheOpWorkItem);
+			(void) CacheOpIdxIncrement(&gsCwq.hReadCounter);
+			ui32NumOfEntries = ui32NumOfEntries - 1;
+			psCacheOpWorkItem = &gsCwq.asWorkItems[CacheOpIdxNext(&gsCwq.hReadCounter)];
+		}
+	}
+
+	return eError;
+}
+
+static INLINE PVRSRV_ERROR CacheOpQListExec(void)
+{
+	PVRSRV_ERROR eError;
+
+	if (CacheOpConfigSupports(CACHEOP_CONFIG_KGF) &&
+		(!CacheOpConfigSupports(CACHEOP_CONFIG_KRBF)
+		 || OSAtomicRead(&gsCwq.hDeferredSize) > gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD]))
+	{
+		eError = CacheOpQListExecGlobal();
+		PVR_LOG_IF_ERROR(eError, "CacheOpQListExecGlobal");
+	}
+	else
+	{
+		eError = CacheOpQListExecRangeBased();
+		PVR_LOG_IF_ERROR(eError, "CacheOpQListExecRangeBased");
+	}
+
+	/* Signal any waiting threads blocked on CacheOp fence checks update
+	   completed sequence number to last queue work item */
+	eError = OSEventObjectSignal(gsCwq.hClientWakeUpEvtObj);
+	PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+
+	return eError;
+}
+
+static void CacheOpThread(void *pvData)
+{
+	PVRSRV_DATA *psPVRSRVData = pvData;
+	IMG_HANDLE hOSEvent;
+	PVRSRV_ERROR eError;
+
+	/* Open CacheOp thread event object, abort driver if event object open fails */
+	eError = OSEventObjectOpen(gsCwq.hThreadWakeUpEvtObj, &hOSEvent);
+	PVR_LOG_IF_ERROR(eError, "OSEventObjectOpen");
+
+	/* While driver is in good state & loaded, perform pending cache maintenance */
+	while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) && gsCwq.bInit)
+	{
+		/* Sleep-wait here until when signalled for new queued CacheOp work items;
+		   when woken-up, drain deferred queue completely before next event-wait */
+		(void) OSEventObjectWaitKernel(hOSEvent, CACHEOP_THREAD_WAIT_TIMEOUT);
+		while (CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter))
+		{
+			eError = CacheOpQListExec();
+			PVR_LOG_IF_ERROR(eError, "CacheOpQListExec");
+		}
+	}
+
+	eError = CacheOpQListExec();
+	PVR_LOG_IF_ERROR(eError, "CacheOpQListExec");
+
+	eError = OSEventObjectClose(hOSEvent);
+	PVR_LOG_IF_ERROR(eError, "OSEventObjectClose");
+}
+
+static PVRSRV_ERROR CacheOpBatchExecTimeline(PVRSRV_DEVICE_NODE *psDevNode,
+											 PVRSRV_TIMELINE iTimeline,
+											 IMG_BOOL bUsedGlobalFlush,
+											 IMG_UINT32 ui32CurrentFenceSeqNum,
+											 IMG_UINT32 *pui32NextFenceSeqNum)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32NextIdx;
+	CACHEOP_WORK_ITEM sCacheOpWorkItem = { };
+	CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL;
+
+	eError = CacheOpTimelineBind(psDevNode, &sCacheOpWorkItem, iTimeline);
+	PVR_LOGR_IF_ERROR(eError, "CacheOpTimelineBind");
+
+	OSLockAcquire(gsCwq.hDeferredLock);
+
+	/*
+	   Check if there is any deferred queueing space available and that nothing is
+	   currently queued. This second check is required as Android where timelines
+	   are used sets a timeline signalling deadline of 1000ms to signal timelines
+	   else complains. So seeing we cannot be sure how long the CacheOp presently
+	   in the queue would take we should not send this timeline down the queue as
+	   well.
+	 */
+	ui32NextIdx = CacheOpIdxNext(&gsCwq.hWriteCounter);
+	if (!CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter) &&
+		CacheOpIdxRead(&gsCwq.hReadCounter) != ui32NextIdx)
+	{
+		psCacheOpWorkItem = &gsCwq.asWorkItems[ui32NextIdx];
+		CacheOpQItemWriteCheck(psCacheOpWorkItem);
+
+		psCacheOpWorkItem->sSWTimelineObj = sCacheOpWorkItem.sSWTimelineObj;
+		psCacheOpWorkItem->iTimeline = sCacheOpWorkItem.iTimeline;
+		psCacheOpWorkItem->ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+		psCacheOpWorkItem->uiCacheOp = PVRSRV_CACHE_OP_TIMELINE;
+		psCacheOpWorkItem->uiOffset = (IMG_DEVMEM_OFFSET_T)0;
+		psCacheOpWorkItem->uiSize = (IMG_DEVMEM_SIZE_T)0;
+		psCacheOpWorkItem->ui32GFSeqNum = 0;
+		psCacheOpWorkItem->psDevNode = psDevNode;
+		/* Defer timeline using information page PMR */
+		psCacheOpWorkItem->psPMR = gsCwq.psInfoPagePMR;
+		eError = PMRLockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+		PVR_LOGG_IF_ERROR(eError, "PMRLockSysPhysAddresses", e0);
+#if defined(CACHEOP_DEBUG)
+		psCacheOpWorkItem->pid = OSGetCurrentClientProcessIDKM();
+		psCacheOpWorkItem->ui64EnqueuedTime = OSClockns64();
+		gsCwq.ui32ServerASync += 1;
+		gsCwq.ui32ServerDTL += 1;
+#endif
+
+		/* Mark index ready for cache maintenance */
+		(void) CacheOpIdxIncrement(&gsCwq.hWriteCounter);
+
+		OSLockRelease(gsCwq.hDeferredLock);
+
+		/* Signal the CacheOp thread to ensure this GF get processed */
+		eError = OSEventObjectSignal(gsCwq.hThreadWakeUpEvtObj);
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+	}
+	else
+	{
+		IMG_BOOL bExecTimeline = IMG_TRUE;
+		IMG_UINT32 ui32CompletedOpSeqNum = OSAtomicRead(&gsCwq.hCompletedSeqNum);
+
+		OSLockRelease(gsCwq.hDeferredLock);
+
+		/*
+		   This pathway requires careful handling here as the client CacheOp(s) predicated on this
+		   timeline might have been broken-up (i.e. batched) into several server requests by client:
+		   1 - In the first case, a CacheOp from an earlier batch is still in-flight, so we check if
+		   this is the case because even though we might have executed all the CacheOps in this batch
+		   synchronously, we cannot be sure that any in-flight CacheOp pending on this client is not
+		   predicated on this timeline hence we need to synchronise here for safety by fencing until
+		   all in-flight CacheOps are completed. NOTE: On Android, this might cause issues due to
+		   timelines notification deadlines so we do not fence (i.e. cannot sleep or wait) here to
+		   synchronise, instead nudge services client to retry the request if there is no GF support.
+		   2 - In the second case, there is no in-flight CacheOp for this client in which case just
+		   continue processing as normal.
+		 */
+		if (!bUsedGlobalFlush && !CacheOpFenceCheck(ui32CompletedOpSeqNum, ui32CurrentFenceSeqNum))
+		{
+#if defined(ANDROID)
+			bExecTimeline = IMG_TRUE;
+			if (CacheOpGlobalFlush() != PVRSRV_OK)
+			{
+				bExecTimeline = IMG_FALSE;
+				eError = PVRSRV_ERROR_RETRY;
+			}
+#else
+			eError = CacheOpFence ((RGXFWIF_DM)0, ui32CurrentFenceSeqNum);
+			PVR_LOG_IF_ERROR(eError, "CacheOpFence");
+
+			/* CacheOpFence() might have triggered a GF so we take advantage of it */
+			if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > ui32CurrentFenceSeqNum)
+			{
+				*pui32NextFenceSeqNum = 0;
+			}
+#endif
+		}
+
+		if (bExecTimeline)
+		{
+			/* CacheOp fence requirement met, signal timeline */
+			eError = CacheOpTimelineExec(&sCacheOpWorkItem);
+			PVR_LOG_IF_ERROR(eError, "CacheOpTimelineExec");
+		}
+	}
+
+	return eError;
+e0:
+	if (psCacheOpWorkItem)
+	{
+		/* Need to ensure we leave this CacheOp QItem in the proper recycled state */
+		CacheOpQItemRecycle(psCacheOpWorkItem);
+		OSLockRelease(gsCwq.hDeferredLock);
+	}
+
+	return eError;
+}
+
+static PVRSRV_ERROR CacheOpBatchExecRangeBased(PVRSRV_DEVICE_NODE *psDevNode,
+											PMR **ppsPMR,
+											IMG_CPU_VIRTADDR *pvAddress,
+											IMG_DEVMEM_OFFSET_T *puiOffset,
+											IMG_DEVMEM_SIZE_T *puiSize,
+											PVRSRV_CACHE_OP *puiCacheOp,
+											IMG_UINT32 ui32NumCacheOps,
+											PVRSRV_TIMELINE uiTimeline,
+											IMG_UINT32 ui32GlobalFlushSeqNum,
+											IMG_UINT32 uiCurrentFenceSeqNum,
+											IMG_UINT32 *pui32NextFenceSeqNum)
+{
+	IMG_UINT32 ui32Idx;
+	IMG_UINT32 ui32NextIdx;
+	IMG_BOOL bBatchHasTimeline;
+	IMG_BOOL bCacheOpConfigKDF;
+	IMG_BOOL bCacheOpConfigKRBF;
+	IMG_DEVMEM_SIZE_T uiLogicalSize;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_BOOL bUseGlobalFlush = IMG_FALSE;
+	CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL;
+#if defined(CACHEOP_DEBUG)
+	CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+	IMG_UINT32 ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+	sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+#endif
+
+	/* Check if batch has an associated timeline update */
+	bBatchHasTimeline = puiCacheOp[ui32NumCacheOps-1] & PVRSRV_CACHE_OP_TIMELINE;
+	puiCacheOp[ui32NumCacheOps-1] &= ~(PVRSRV_CACHE_OP_GLOBAL | PVRSRV_CACHE_OP_TIMELINE);
+
+	/* Check if config. supports kernel deferring of cacheops */
+	bCacheOpConfigKDF = CacheOpConfigSupports(CACHEOP_CONFIG_KDF);
+	bCacheOpConfigKRBF = CacheOpConfigSupports(CACHEOP_CONFIG_KRBF);
+
+	/*
+	   Client expects the next fence seqNum to be zero unless the server has deferred
+	   at least one CacheOp in the submitted queue in which case the server informs
+	   the client of the last CacheOp seqNum deferred in this batch.
+	*/
+	for (*pui32NextFenceSeqNum = 0, ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++)
+	{
+		if (! puiSize[ui32Idx])
+		{
+			/* Fail UM request, don't silently ignore */
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto e0;
+		}
+		else if (bCacheOpConfigKDF)
+		{
+			/* Check if there is deferred queueing space available */
+			ui32NextIdx = CacheOpIdxNext(&gsCwq.hWriteCounter);
+			if (ui32NextIdx != CacheOpIdxRead(&gsCwq.hReadCounter))
+			{
+				psCacheOpWorkItem = &gsCwq.asWorkItems[ui32NextIdx];
+			}
+		}
+
+		/*
+		   Normally, we would like to defer client CacheOp(s) but we may not always be in a
+		   position or is necessary to do so based on the following reasons:
+		   0 - There is currently no queueing space left to enqueue this CacheOp, this might
+		       imply the system is queueing more requests than can be consumed by the CacheOp
+		       thread in time.
+		   1 - Batch has timeline, action this now due to Android timeline signaling deadlines.
+		   2 - Configuration does not support deferring of cache maintenance operations so we
+		       execute the batch synchronously/immediately.
+		   3 - CacheOp has an INVALIDATE, as this is used to transfer device memory buffer
+		       ownership back to the processor, we cannot defer it so action it immediately.
+		   4 - CacheOp size too small (single OS page size) to warrant overhead of deferment,
+		       this will not be considered if KRBF is not present, as it implies defer all.
+		   5 - CacheOp size OK for deferment, but a client virtual address is supplied so we
+		       might has well just take advantage of said VA & flush immediately in UM context.
+		   6 - Prevent DoS attack if a malicious client queues something very large, say 1GiB
+		       and the processor cache ISA does not have a global flush implementation. Here
+			   we upper bound this threshold to PVR_DIRTY_BYTES_FLUSH_THRESHOLD.
+		   7 - Ensure QoS (load balancing) by not over-loading queue with too much requests,
+		       here the (pseudo) alternate queue is the user context so we execute directly
+		       on it if the processor cache ISA does not have a global flush implementation.
+		*/
+		if (!psCacheOpWorkItem  ||
+			bBatchHasTimeline   ||
+			!bCacheOpConfigKDF  ||
+			puiCacheOp[ui32Idx] & PVRSRV_CACHE_OP_INVALIDATE ||
+			(bCacheOpConfigKRBF && puiSize[ui32Idx] <= (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize) ||
+			(pvAddress[ui32Idx] && puiSize[ui32Idx] < (IMG_DEVMEM_SIZE_T)gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD]) ||
+			(gsCwq.bNoGlobalFlushImpl && puiSize[ui32Idx] >= (IMG_DEVMEM_SIZE_T)(gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] << 2)) ||
+			(gsCwq.bNoGlobalFlushImpl && OSAtomicRead(&gsCwq.hDeferredSize) >= gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] << CACHEOP_INDICES_LOG2_SIZE))
+		{
+			/* When the CacheOp thread not keeping up, trash d-cache */
+			bUseGlobalFlush = !psCacheOpWorkItem && bCacheOpConfigKDF ? IMG_TRUE : IMG_FALSE;
+#if defined(CACHEOP_DEBUG)
+			sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64();
+			gsCwq.ui32ServerSync += 1;
+#endif
+			psCacheOpWorkItem = NULL;
+
+			eError = CacheOpPMRExec(ppsPMR[ui32Idx],
+									pvAddress[ui32Idx],
+									puiOffset[ui32Idx],
+									puiSize[ui32Idx],
+									puiCacheOp[ui32Idx],
+									ui32GlobalFlushSeqNum,
+									IMG_FALSE,
+									&bUseGlobalFlush);
+			PVR_LOGG_IF_ERROR(eError, "CacheOpExecPMR", e0);
+
+#if defined(CACHEOP_DEBUG)
+			sCacheOpWorkItem.ui64ExecuteTime = OSClockns64();
+			sCacheOpWorkItem.bRBF = !bUseGlobalFlush;
+			sCacheOpWorkItem.ui32OpSeqNum = bUseGlobalFlush ?
+				gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] : ui32OpSeqNum;
+			sCacheOpWorkItem.psPMR = ppsPMR[ui32Idx];
+			sCacheOpWorkItem.uiSize = puiSize[ui32Idx];
+			sCacheOpWorkItem.uiOffset = puiOffset[ui32Idx];
+			sCacheOpWorkItem.uiCacheOp = puiCacheOp[ui32Idx];
+			CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+#endif
+
+			if (bUseGlobalFlush) break;
+			continue;
+		}
+
+		/* Need to validate request parameters here before enqueing */
+		eError = PMR_LogicalSize(ppsPMR[ui32Idx], &uiLogicalSize);
+		PVR_LOGG_IF_ERROR(eError, "PMR_LogicalSize", e0);
+		eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+		PVR_LOGG_IF_FALSE(((puiOffset[ui32Idx]+puiSize[ui32Idx]) <= uiLogicalSize), CACHEOP_DEVMEM_OOR_ERROR_STRING, e0);
+		eError = PVRSRV_OK;
+
+		/* For safety, take reference here in user context */
+		eError = PMRLockSysPhysAddresses(ppsPMR[ui32Idx]);
+		PVR_LOGG_IF_ERROR(eError, "PMRLockSysPhysAddresses", e0);
+
+		OSLockAcquire(gsCwq.hDeferredLock);
+
+		/* Select next item off the queue to defer with */
+		ui32NextIdx = CacheOpIdxNext(&gsCwq.hWriteCounter);
+		if (ui32NextIdx != CacheOpIdxRead(&gsCwq.hReadCounter))
+		{
+			psCacheOpWorkItem = &gsCwq.asWorkItems[ui32NextIdx];
+			CacheOpQItemWriteCheck(psCacheOpWorkItem);
+		}
+		else
+		{
+			/* Retry, disable KDF for this batch */
+			OSLockRelease(gsCwq.hDeferredLock);
+			bCacheOpConfigKDF = IMG_FALSE;
+			psCacheOpWorkItem = NULL;
+			ui32Idx = ui32Idx - 1;
+			continue;
+		}
+
+		/* Timeline need to be looked-up (i.e. bind) in the user context
+		   before deferring into the CacheOp thread kernel context */
+		eError = CacheOpTimelineBind(psDevNode, psCacheOpWorkItem, PVRSRV_NO_TIMELINE);
+		PVR_LOGG_IF_ERROR(eError, "CacheOpTimelineBind", e1);
+
+		/* Prepare & enqueue next deferred work item for CacheOp thread */
+		psCacheOpWorkItem->ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+		*pui32NextFenceSeqNum = psCacheOpWorkItem->ui32OpSeqNum;
+		psCacheOpWorkItem->ui32GFSeqNum = ui32GlobalFlushSeqNum;
+		psCacheOpWorkItem->uiCacheOp = puiCacheOp[ui32Idx];
+		psCacheOpWorkItem->uiOffset = puiOffset[ui32Idx];
+		psCacheOpWorkItem->uiSize = puiSize[ui32Idx];
+		psCacheOpWorkItem->psPMR = ppsPMR[ui32Idx];
+		psCacheOpWorkItem->psDevNode = psDevNode;
+#if defined(CACHEOP_DEBUG)
+		psCacheOpWorkItem->ui64EnqueuedTime = OSClockns64();
+		psCacheOpWorkItem->pid = sCacheOpWorkItem.pid;
+		psCacheOpWorkItem->bDeferred = IMG_TRUE;
+		psCacheOpWorkItem->bKMReq = IMG_FALSE;
+		psCacheOpWorkItem->bUMF = IMG_FALSE;
+		gsCwq.ui32ServerASync += 1;
+#endif
+
+		/* Increment deferred size & mark index ready for cache maintenance */
+		OSAtomicAdd(&gsCwq.hDeferredSize, (IMG_UINT32)puiSize[ui32Idx]);
+		(void) CacheOpIdxIncrement(&gsCwq.hWriteCounter);
+
+		OSLockRelease(gsCwq.hDeferredLock);
+		psCacheOpWorkItem = NULL;
+	}
+
+	/* Signal the CacheOp thread to ensure these items get processed */
+	eError = OSEventObjectSignal(gsCwq.hThreadWakeUpEvtObj);
+	PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+
+	if (bUseGlobalFlush)
+	{
+#if defined(CACHEOP_DEBUG)
+		/* GF was logged already in the loop above, so rest if any are discards */
+		sCacheOpWorkItem.ui64ExecuteTime = sCacheOpWorkItem.ui64EnqueuedTime;
+		sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+		while (++ui32Idx < ui32NumCacheOps)
+		{
+			sCacheOpWorkItem.psPMR = ppsPMR[ui32Idx];
+			sCacheOpWorkItem.uiSize = puiSize[ui32Idx];
+			sCacheOpWorkItem.uiOffset = puiOffset[ui32Idx];
+			sCacheOpWorkItem.uiCacheOp = puiCacheOp[ui32Idx];
+			CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+			gsCwq.ui32KMDiscards += 1;
+		}
+#endif
+
+		/* No next UM fence seqNum */
+		*pui32NextFenceSeqNum = 0;
+	}
+
+e1:
+	if (psCacheOpWorkItem)
+	{
+		/* Need to ensure we leave this CacheOp QItem in the proper recycled state */
+		CacheOpQItemRecycle(psCacheOpWorkItem);
+		OSLockRelease(gsCwq.hDeferredLock);
+	}
+e0:
+	if (bBatchHasTimeline)
+	{
+		PVRSRV_ERROR eError2;
+		eError2 = CacheOpBatchExecTimeline(psDevNode, uiTimeline, bUseGlobalFlush,
+										   uiCurrentFenceSeqNum, pui32NextFenceSeqNum);
+		eError = (eError2 == PVRSRV_ERROR_RETRY) ? eError2 : eError;
+	}
+
+	return eError;
+}
+
+static PVRSRV_ERROR CacheOpBatchExecGlobal(PVRSRV_DEVICE_NODE *psDevNode,
+									PMR **ppsPMR,
+									IMG_CPU_VIRTADDR *pvAddress,
+									IMG_DEVMEM_OFFSET_T *puiOffset,
+									IMG_DEVMEM_SIZE_T *puiSize,
+									PVRSRV_CACHE_OP *puiCacheOp,
+									IMG_UINT32 ui32NumCacheOps,
+									PVRSRV_TIMELINE uiTimeline,
+									IMG_UINT32 ui32GlobalFlushSeqNum,
+									IMG_UINT32 uiCurrentFenceSeqNum,
+									IMG_UINT32 *pui32NextFenceSeqNum)
+{
+	IMG_UINT32 ui32Idx;
+	IMG_BOOL bBatchHasTimeline;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_BOOL bUseGlobalFlush = IMG_FALSE;
+	CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL;
+#if	defined(CACHEOP_DEBUG)
+	IMG_DEVMEM_SIZE_T uiTotalSize = 0;
+	CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+	sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+#endif
+#if !defined(CACHEFLUSH_ISA_SUPPORTS_GLOBAL_FLUSH)
+	PVR_LOGR_IF_ERROR(PVRSRV_ERROR_NOT_SUPPORTED, CACHEOP_NO_GFLUSH_ERROR_STRING);
+#endif
+	PVR_UNREFERENCED_PARAMETER(pvAddress);
+
+	/* Check if batch has an associated timeline update request */
+	bBatchHasTimeline = puiCacheOp[ui32NumCacheOps-1] & PVRSRV_CACHE_OP_TIMELINE;
+	puiCacheOp[ui32NumCacheOps-1] &= ~(PVRSRV_CACHE_OP_GLOBAL | PVRSRV_CACHE_OP_TIMELINE);
+
+	/* Skip operation if an else-when GF has occurred in the interim time */
+	if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > ui32GlobalFlushSeqNum)
+	{
+#if	defined(CACHEOP_DEBUG)
+		sCacheOpWorkItem.ui32OpSeqNum = ui32GlobalFlushSeqNum;
+#endif
+		bUseGlobalFlush = IMG_TRUE;
+		*pui32NextFenceSeqNum = 0;
+		goto exec_timeline;
+	}
+
+	/* Here we need to check that client batch does not contain an INVALIDATE CacheOp */
+	for (*pui32NextFenceSeqNum = 0, ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++)
+	{
+#if	defined(CACHEOP_DEBUG)
+		IMG_DEVMEM_SIZE_T uiLogicalSize;
+		uiTotalSize += puiSize[ui32Idx];
+		/* There is no need to validate request parameters as we are about
+		   to issue a GF but this might lead to issues being reproducible
+		   in one config but not the other, so valid under debug */
+		eError = PMR_LogicalSize(ppsPMR[ui32Idx], &uiLogicalSize);
+		PVR_LOGG_IF_ERROR(eError, "PMR_LogicalSize", e0);
+		eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+		PVR_LOGG_IF_FALSE(((puiOffset[ui32Idx]+puiSize[ui32Idx]) <= uiLogicalSize), CACHEOP_DEVMEM_OOR_ERROR_STRING, e0);
+		eError = PVRSRV_OK;
+#endif
+		if (! puiSize[ui32Idx])
+		{
+			/* Fail UM request, don't silently ignore */
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto e0;
+		}
+		else if (puiCacheOp[ui32Idx] & PVRSRV_CACHE_OP_INVALIDATE)
+		{
+			/* Invalidates cannot be deferred */
+			bUseGlobalFlush = IMG_TRUE;
+		}
+	}
+
+	OSLockAcquire(gsCwq.hDeferredLock);
+
+	/*
+	   Normally, we would like to defer client CacheOp(s) but we may not always be in a
+	   position to do so based on the following reasons:
+	   0 - Batch has an INVALIDATE, as this is used to transfer device memory buffer
+	       ownership back to the processor, we cannot defer it so action it immediately.
+	   1 - Configuration does not support deferring of cache maintenance operations so
+		   we execute synchronously/immediately.
+	   2 - There is currently no queueing space left to enqueue this CacheOp, this might
+	       imply the system is queueing more requests that can be consumed by the CacheOp
+	       thread in time.
+	   3 - Batch has a timeline and there is currently something queued, we cannot defer
+	       because currently queued operation(s) might take quite a while to action which
+	       might cause a timeline deadline timeout.
+	*/
+	if (bUseGlobalFlush ||
+		!CacheOpConfigSupports(CACHEOP_CONFIG_KDF) ||
+		CacheOpIdxNext(&gsCwq.hWriteCounter) == CacheOpIdxRead(&gsCwq.hReadCounter) ||
+		(bBatchHasTimeline && CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter)))
+
+	{
+		OSLockRelease(gsCwq.hDeferredLock);
+#if	defined(CACHEOP_DEBUG)
+		sCacheOpWorkItem.ui32OpSeqNum =	CacheOpGetNextCommonSeqNum();
+		sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64();
+#endif
+		eError = CacheOpGlobalFlush();
+		PVR_LOGG_IF_ERROR(eError, "CacheOpGlobalFlush", e0);
+		bUseGlobalFlush = IMG_TRUE;
+#if	defined(CACHEOP_DEBUG)
+		sCacheOpWorkItem.ui64ExecuteTime = OSClockns64();
+		gsCwq.ui32ServerSync += 1;
+#endif
+		goto exec_timeline;
+	}
+
+	/* Select next item off queue to defer this GF and possibly timeline with */
+	psCacheOpWorkItem = &gsCwq.asWorkItems[CacheOpIdxNext(&gsCwq.hWriteCounter)];
+	CacheOpQItemWriteCheck(psCacheOpWorkItem);
+
+	/* Defer the GF using information page PMR */
+	psCacheOpWorkItem->psPMR = gsCwq.psInfoPagePMR;
+	eError = PMRLockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+	PVR_LOGG_IF_ERROR(eError, "PMRLockSysPhysAddresses", e0);
+
+	/* Timeline object has to be looked-up here in user context */
+	eError = CacheOpTimelineBind(psDevNode, psCacheOpWorkItem, uiTimeline);
+	PVR_LOGG_IF_ERROR(eError, "CacheOpTimelineBind", e0);
+
+	/* Prepare & enqueue next deferred work item for CacheOp thread */
+	*pui32NextFenceSeqNum = CacheOpGetNextCommonSeqNum();
+	psCacheOpWorkItem->ui32OpSeqNum = *pui32NextFenceSeqNum;
+	psCacheOpWorkItem->ui32GFSeqNum = ui32GlobalFlushSeqNum;
+	psCacheOpWorkItem->uiCacheOp = PVRSRV_CACHE_OP_GLOBAL;
+	psCacheOpWorkItem->uiOffset = (IMG_DEVMEM_OFFSET_T)0;
+	psCacheOpWorkItem->uiSize = (IMG_DEVMEM_SIZE_T)0;
+#if defined(CACHEOP_DEBUG)
+	/* Note client pid & queueing time of deferred GF CacheOp */
+	psCacheOpWorkItem->ui64EnqueuedTime = OSClockns64();
+	psCacheOpWorkItem->pid = sCacheOpWorkItem.pid;
+	OSAtomicAdd(&gsCwq.hDeferredSize, uiTotalSize);
+	psCacheOpWorkItem->uiSize = uiTotalSize;
+	psCacheOpWorkItem->bDeferred = IMG_TRUE;
+	psCacheOpWorkItem->bKMReq = IMG_FALSE;
+	psCacheOpWorkItem->bUMF = IMG_FALSE;
+	/* Client CacheOp is logged using the deferred seqNum */
+	sCacheOpWorkItem.ui32OpSeqNum =	*pui32NextFenceSeqNum;
+	sCacheOpWorkItem.ui64EnqueuedTime = psCacheOpWorkItem->ui64EnqueuedTime;
+	sCacheOpWorkItem.ui64ExecuteTime = psCacheOpWorkItem->ui64EnqueuedTime;
+	/* Update the CacheOp statistics */
+	gsCwq.ui32ServerASync += 1;
+	gsCwq.ui32ServerDGF += 1;
+#endif
+
+	/* Mark index ready for cache maintenance */
+	(void) CacheOpIdxIncrement(&gsCwq.hWriteCounter);
+
+	OSLockRelease(gsCwq.hDeferredLock);
+
+	/* Signal CacheOp thread to ensure this GF get processed */
+	eError = OSEventObjectSignal(gsCwq.hThreadWakeUpEvtObj);
+	PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+
+exec_timeline:
+	if (bUseGlobalFlush && bBatchHasTimeline)
+	{
+		eError = CacheOpBatchExecTimeline(psDevNode, uiTimeline, bUseGlobalFlush,
+										  uiCurrentFenceSeqNum, pui32NextFenceSeqNum);
+	}
+
+#if	defined(CACHEOP_DEBUG)
+	for (ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++)
+	{
+		sCacheOpWorkItem.psPMR = ppsPMR[ui32Idx];
+		sCacheOpWorkItem.uiSize = puiSize[ui32Idx];
+		sCacheOpWorkItem.uiOffset = puiOffset[ui32Idx];
+		sCacheOpWorkItem.uiCacheOp = puiCacheOp[ui32Idx];
+		if (bUseGlobalFlush)
+		{
+			if (sCacheOpWorkItem.ui64ExecuteTime && ui32Idx)
+			{
+				/* Only first item carries the real execution time, rest are discards */
+				sCacheOpWorkItem.ui64EnqueuedTime = sCacheOpWorkItem.ui64ExecuteTime;
+			}
+			gsCwq.ui32KMDiscards += !sCacheOpWorkItem.ui64ExecuteTime ? 1 : ui32Idx ? 1 : 0;
+		}
+		CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+	}
+#endif
+
+	return eError;
+e0:
+	if (psCacheOpWorkItem)
+	{
+		/* Need to ensure we leave this CacheOp QItem in the proper recycled state */
+		CacheOpQItemRecycle(psCacheOpWorkItem);
+		OSLockRelease(gsCwq.hDeferredLock);
+	}
+
+	if (bBatchHasTimeline)
+	{
+		PVRSRV_ERROR eError2;
+		eError2 = CacheOpBatchExecTimeline(psDevNode, uiTimeline, IMG_FALSE,
+										   uiCurrentFenceSeqNum, pui32NextFenceSeqNum);
+		eError = (eError2 == PVRSRV_ERROR_RETRY) ? eError2 : eError;
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR CacheOpExec (PPVRSRV_DEVICE_NODE psDevNode,
+						  void *pvVirtStart,
+						  void *pvVirtEnd,
+						  IMG_CPU_PHYADDR sCPUPhysStart,
+						  IMG_CPU_PHYADDR sCPUPhysEnd,
+						  PVRSRV_CACHE_OP uiCacheOp)
+{
+	PVRSRV_ERROR eError = PVRSRV_ERROR_RETRY;
+#if	defined(CACHEOP_DEBUG)
+	IMG_BOOL bUsedGlobalFlush = IMG_FALSE;
+	CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+	sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64();
+#endif
+
+	if (gsCwq.bInit)
+	{
+		IMG_DEVMEM_SIZE_T uiSize = sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr;
+		if ((IMG_UINT32)uiSize > gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD])
+		{
+			eError = CacheOpGlobalFlush();
+		}
+	}
+
+	if (eError == PVRSRV_OK)
+	{
+#if	defined(CACHEOP_DEBUG)
+		bUsedGlobalFlush = IMG_TRUE;
+#endif
+	}
+	else
+	{
+		switch (uiCacheOp)
+		{
+			case PVRSRV_CACHE_OP_CLEAN:
+				OSCPUCacheCleanRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd);
+				break;
+			case PVRSRV_CACHE_OP_INVALIDATE:
+				OSCPUCacheInvalidateRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd);
+				break;
+			case PVRSRV_CACHE_OP_FLUSH:
+				OSCPUCacheFlushRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd);
+				break;
+			default:
+				PVR_DPF((PVR_DBG_ERROR,	"%s: Invalid cache operation type %d",
+						 __func__, uiCacheOp));
+				break;
+		}
+		eError = PVRSRV_OK;
+	}
+
+#if	defined(CACHEOP_DEBUG)
+	if (! CacheOpConfigSupports(CACHEOP_CONFIG_KLOG))
+	{
+		if (bUsedGlobalFlush)
+		{
+			/* Undo the accounting for server GF done in CacheOpGlobalFlush() */
+			gsCwq.ui32ServerGF -= 1;
+		}
+	}
+	else
+	{
+		gsCwq.ui32TotalExecOps += 1;
+		if (! bUsedGlobalFlush)
+		{
+			gsCwq.ui32ServerSync += 1;
+			gsCwq.ui32ServerRBF +=
+				((sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr) & ((IMG_DEVMEM_SIZE_T)~(gsCwq.uiLineSize - 1))) >> gsCwq.uiLineShift;
+		}
+		sCacheOpWorkItem.uiOffset = 0;
+		sCacheOpWorkItem.bKMReq = IMG_TRUE;
+		sCacheOpWorkItem.uiCacheOp = uiCacheOp;
+		sCacheOpWorkItem.bRBF = !bUsedGlobalFlush;
+		/* Use information page PMR for logging KM request */
+		sCacheOpWorkItem.psPMR = gsCwq.psInfoPagePMR;
+		sCacheOpWorkItem.ui64ExecuteTime = OSClockns64();
+		sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+		sCacheOpWorkItem.ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+		sCacheOpWorkItem.uiSize = (sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr);
+		CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+	}
+#endif
+
+	return eError;
+}
+
+PVRSRV_ERROR CacheOpValExec(PMR *psPMR,
+						    IMG_UINT64 uiAddress,
+						    IMG_DEVMEM_OFFSET_T uiOffset,
+						    IMG_DEVMEM_SIZE_T uiSize,
+						    PVRSRV_CACHE_OP uiCacheOp)
+{
+	PVRSRV_ERROR eError;
+	IMG_CPU_VIRTADDR pvAddress = (IMG_CPU_VIRTADDR)(uintptr_t)uiAddress;
+	IMG_BOOL bUseGlobalFlush = (IMG_UINT32)uiSize > gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD];
+#if	defined(CACHEOP_DEBUG)
+	CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+	gsCwq.ui32TotalExecOps += 1;
+	gsCwq.ui32ServerSync += 1;
+	sCacheOpWorkItem.psPMR = psPMR;
+	sCacheOpWorkItem.uiSize = uiSize;
+	sCacheOpWorkItem.uiOffset = uiOffset;
+	sCacheOpWorkItem.uiCacheOp = uiCacheOp;
+	sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+	sCacheOpWorkItem.ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+	sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64();
+#endif
+
+	eError = CacheOpPMRExec(psPMR,
+							pvAddress,
+							uiOffset,
+							uiSize,
+							uiCacheOp,
+							gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0],
+							IMG_FALSE,
+							&bUseGlobalFlush);
+	PVR_LOGG_IF_ERROR(eError, "CacheOpPMRExec", e0);
+
+#if	defined(CACHEOP_DEBUG)
+	sCacheOpWorkItem.bRBF = !bUseGlobalFlush;
+	sCacheOpWorkItem.ui64ExecuteTime = OSClockns64();
+	CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+#endif
+
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR CacheOpQueue (CONNECTION_DATA *psConnection,
+						   PVRSRV_DEVICE_NODE *psDevNode,
+						   IMG_UINT32 ui32NumCacheOps,
+						   PMR **ppsPMR,
+						   IMG_UINT64 *puiAddress,
+						   IMG_DEVMEM_OFFSET_T *puiOffset,
+						   IMG_DEVMEM_SIZE_T *puiSize,
+						   PVRSRV_CACHE_OP *puiCacheOp,
+						   IMG_UINT32 ui32OpTimeline,
+						   IMG_UINT32 ui32ClientGFSeqNum,
+						   IMG_UINT32 uiCurrentFenceSeqNum,
+						   IMG_UINT32 *pui32NextFenceSeqNum)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_TIMELINE uiTimeline = (PVRSRV_TIMELINE)ui32OpTimeline;
+	IMG_CPU_VIRTADDR *pvAddress = (IMG_CPU_VIRTADDR*)(uintptr_t)puiAddress;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(CACHEFLUSH_ISA_SUPPORTS_GLOBAL_FLUSH)
+	PVR_LOGR_IF_FALSE((ui32ClientGFSeqNum == 0),
+					  "CacheOpQueue(ui32ClientGFSeqNum > 0)",
+	                  PVRSRV_ERROR_INVALID_PARAMS);
+#endif
+#if defined(CACHEOP_DEBUG)
+	gsCwq.ui32TotalExecOps += ui32NumCacheOps;
+#endif
+
+	if (! gsCwq.bInit)
+	{
+		PVR_LOG(("CacheOp framework not initialised, failing request"));
+		return PVRSRV_ERROR_NOT_INITIALISED;
+	}
+	else if (! ui32NumCacheOps)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	/* Ensure any single timeline CacheOp request is processed immediately */
+	else if (ui32NumCacheOps == 1 && puiCacheOp[0] == PVRSRV_CACHE_OP_TIMELINE)
+	{
+		eError = CacheOpBatchExecTimeline(psDevNode, uiTimeline, IMG_TRUE, uiCurrentFenceSeqNum, pui32NextFenceSeqNum);
+	}
+	/* Services client explicitly requested a GF or config is GF only (i.e. no KRBF support), this takes priority */
+	else if (CacheOpConfigSupports(CACHEOP_CONFIG_KGF) &&
+			 ((puiCacheOp[ui32NumCacheOps-1] & PVRSRV_CACHE_OP_GLOBAL) || !CacheOpConfigSupports(CACHEOP_CONFIG_KRBF)))
+	{
+		eError =
+			CacheOpBatchExecGlobal(psDevNode,
+								   ppsPMR,
+								   pvAddress,
+								   puiOffset,
+								   puiSize,
+								   puiCacheOp,
+								   ui32NumCacheOps,
+								   uiTimeline,
+								   ui32ClientGFSeqNum,
+								   uiCurrentFenceSeqNum,
+								   pui32NextFenceSeqNum);
+	}
+	/* This is the default entry for all client requests */
+	else
+	{
+		if (!(gsCwq.eConfig & (CACHEOP_CONFIG_LAST-1)))
+		{
+			/* default the configuration before execution */
+			CacheOpConfigUpdate(CACHEOP_CONFIG_DEFAULT);
+		}
+
+		eError =
+			CacheOpBatchExecRangeBased(psDevNode,
+									   ppsPMR,
+									   pvAddress,
+									   puiOffset,
+									   puiSize,
+									   puiCacheOp,
+									   ui32NumCacheOps,
+									   uiTimeline,
+									   ui32ClientGFSeqNum,
+									   uiCurrentFenceSeqNum,
+									   pui32NextFenceSeqNum);
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR CacheOpFence (RGXFWIF_DM eFenceOpType, IMG_UINT32 ui32FenceOpSeqNum)
+{
+	IMG_HANDLE hOSEvent;
+	PVRSRV_ERROR eError2;
+	IMG_UINT32 ui32RetryAbort;
+	IMG_UINT32 ui32CompletedOpSeqNum;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+#if defined(CACHEOP_DEBUG)
+	IMG_UINT64 uiTimeNow;
+	CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+	sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+	sCacheOpWorkItem.ui32OpSeqNum = ui32FenceOpSeqNum;
+	sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64();
+	uiTimeNow = sCacheOpWorkItem.ui64EnqueuedTime;
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+	sCacheOpWorkItem.eFenceOpType = eFenceOpType;
+#endif
+	sCacheOpWorkItem.uiSize = (uintptr_t) OSAtomicRead(&gsCwq.hCompletedSeqNum);
+	sCacheOpWorkItem.uiOffset = (uintptr_t) gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+#endif
+	PVR_UNREFERENCED_PARAMETER(eFenceOpType);
+
+	/* CacheOp(s) this thread is fencing for has already been satisfied by an
+	   else-when GF. Another way of looking at this, if last else-when GF is
+	   logically behind or momentarily disabled (zero) then we have to flush
+	   the cache */
+	if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > ui32FenceOpSeqNum)
+	{
+#if defined(CACHEOP_DEBUG)
+		sCacheOpWorkItem.uiOffset = (uintptr_t) gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+#endif
+		goto e0;
+	}
+
+	/* If initial fence check fails, then wait-and-retry in loop */
+	ui32CompletedOpSeqNum = OSAtomicRead(&gsCwq.hCompletedSeqNum);
+	if (CacheOpFenceCheck(ui32CompletedOpSeqNum, ui32FenceOpSeqNum))
+	{
+#if defined(CACHEOP_DEBUG)
+		sCacheOpWorkItem.uiSize = (uintptr_t) ui32CompletedOpSeqNum;
+#endif
+		goto e0;
+	}
+
+	/* Open CacheOp update event object, if event open fails return error */
+	eError2 = OSEventObjectOpen(gsCwq.hClientWakeUpEvtObj, &hOSEvent);
+	PVR_LOGG_IF_ERROR(eError2, "OSEventObjectOpen", e0);
+
+	/* Linear (i.e. use exponential?) back-off, upper bounds user wait */
+	for (ui32RetryAbort = gsCwq.ui32FenceRetryAbort; ;--ui32RetryAbort)
+	{
+		/* (Re)read completed CacheOp sequence number before waiting */
+		ui32CompletedOpSeqNum = OSAtomicRead(&gsCwq.hCompletedSeqNum);
+		if (CacheOpFenceCheck(ui32CompletedOpSeqNum, ui32FenceOpSeqNum))
+		{
+#if defined(CACHEOP_DEBUG)
+			sCacheOpWorkItem.uiSize = (uintptr_t) ui32CompletedOpSeqNum;
+#endif
+			break;
+		}
+
+		/*
+		   For cache ISA with GF support, the wait(ms) must be set to be around
+		   25% GF overhead and as such there is no point waiting longer, we just
+		   perform a GF as it means the CacheOp thread is really lagging behind.
+		   Lastly, we cannot (or should not) hang the client thread indefinitely
+		   so after a certain duration, we just give up. What this duration is,
+		   is hard to state but for now we set it to be 1 seconds, which is the
+		   product of CACHEOP_FENCE_[WAIT_TIMEOUT * RETRY_ABORT]. We ask the
+		   client to retry the operation by exiting with PVRSRV_ERROR_RETRY.
+		*/
+		(void) OSEventObjectWaitTimeout(hOSEvent, gsCwq.ui32FenceWaitTimeUs);
+		if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > ui32FenceOpSeqNum)
+		{
+#if defined(CACHEOP_DEBUG)
+			sCacheOpWorkItem.uiOffset = (uintptr_t) gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+			uiTimeNow = OSClockns64();
+#endif
+			break;
+		}
+		else if (CacheOpConfigSupports(CACHEOP_CONFIG_KGF))
+		{
+			eError2 = CacheOpGlobalFlush();
+			PVR_LOG_IF_ERROR(eError2, "CacheOpGlobalFlush");
+#if defined(CACHEOP_DEBUG)
+			sCacheOpWorkItem.uiCacheOp = PVRSRV_CACHE_OP_GLOBAL;
+			sCacheOpWorkItem.uiOffset = (uintptr_t) gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+			uiTimeNow = OSClockns64();
+#endif
+			break;
+		}
+		else if (! ui32RetryAbort)
+		{
+#if defined(CACHEOP_DEBUG)
+			sCacheOpWorkItem.uiSize = (uintptr_t) OSAtomicRead(&gsCwq.hCompletedSeqNum);
+			sCacheOpWorkItem.uiOffset = (uintptr_t) gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+			uiTimeNow = OSClockns64();
+#endif
+			PVR_LOG(("CacheOpFence() event: "CACHEOP_ABORT_FENCE_ERROR_STRING));
+			eError = PVRSRV_ERROR_RETRY;
+			break;
+		}
+		else
+		{
+#if defined(CACHEOP_DEBUG)
+			uiTimeNow = OSClockns64();
+#endif
+		}
+	}
+
+	eError2 = OSEventObjectClose(hOSEvent);
+	PVR_LOG_IF_ERROR(eError2, "OSEventObjectOpen");
+
+e0:
+#if defined(CACHEOP_DEBUG)
+	sCacheOpWorkItem.ui64ExecuteTime = uiTimeNow;
+	if (ui32FenceOpSeqNum)
+	{
+		/* Only fence(s) pending on CacheOp(s) contribute towards statistics,
+		   here we calculate the rolling approximate average waiting time
+		   for these fence(s) */
+		IMG_UINT32 ui64EnqueuedTime = sCacheOpWorkItem.ui64EnqueuedTime;
+		IMG_UINT32 ui64ExecuteTime = sCacheOpWorkItem.ui64ExecuteTime;
+		IMG_UINT32 ui32Time = ui64EnqueuedTime < ui64ExecuteTime ?
+									ui64ExecuteTime - ui64EnqueuedTime :
+									ui64EnqueuedTime - ui64ExecuteTime;
+		ui32Time = DivBy10(DivBy10(DivBy10(ui32Time)));
+		gsCwq.ui32TotalFenceOps += 1;
+		if (gsCwq.ui32TotalFenceOps > 2)
+		{
+			gsCwq.ui32AvgFenceTime -= (gsCwq.ui32AvgFenceTime / gsCwq.ui32TotalFenceOps);
+			gsCwq.ui32AvgFenceTime += (ui32Time / gsCwq.ui32TotalFenceOps);
+		}
+		else if (ui32Time)
+		{
+			gsCwq.ui32AvgFenceTime = (IMG_UINT32)ui32Time;
+		}
+	}
+	CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+#endif
+
+	return eError;
+}
+
+PVRSRV_ERROR CacheOpLog (PMR *psPMR,
+						 IMG_UINT64 puiAddress,
+						 IMG_DEVMEM_OFFSET_T uiOffset,
+						 IMG_DEVMEM_SIZE_T uiSize,
+						 IMG_UINT64 ui64EnqueuedTimeUs,
+						 IMG_UINT64 ui64ExecuteTimeUs,
+						 IMG_UINT32 ui32NumRBF,
+						 IMG_BOOL bIsDiscard,
+						 PVRSRV_CACHE_OP uiCacheOp)
+{
+#if defined(CACHEOP_DEBUG)
+	CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+	PVR_UNREFERENCED_PARAMETER(puiAddress);
+
+	sCacheOpWorkItem.psPMR = psPMR;
+	sCacheOpWorkItem.uiSize = uiSize;
+	sCacheOpWorkItem.uiOffset = uiOffset;
+	sCacheOpWorkItem.uiCacheOp = uiCacheOp;
+	sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+	sCacheOpWorkItem.ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+
+	sCacheOpWorkItem.ui64EnqueuedTime = ui64EnqueuedTimeUs;
+	sCacheOpWorkItem.ui64ExecuteTime = ui64ExecuteTimeUs;
+	sCacheOpWorkItem.bUMF = IMG_TRUE;
+	sCacheOpWorkItem.bRBF = bIsDiscard ? IMG_FALSE : IMG_TRUE;
+	gsCwq.ui32UMDiscards += bIsDiscard ? 1 : 0;
+	gsCwq.ui32ClientRBF += bIsDiscard ? 0 : ui32NumRBF;
+	gsCwq.ui32ClientSync += 1;
+	gsCwq.ui32TotalExecOps += 1;
+
+	CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+#else
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(uiCacheOp);
+	PVR_UNREFERENCED_PARAMETER(ui32NumRBF);
+	PVR_UNREFERENCED_PARAMETER(puiAddress);
+	PVR_UNREFERENCED_PARAMETER(ui64ExecuteTimeUs);
+	PVR_UNREFERENCED_PARAMETER(ui64EnqueuedTimeUs);
+#endif
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR CacheOpInit2 (void)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	/* Create an event object for pending CacheOp work items */
+	eError = OSEventObjectCreate("PVRSRV_CACHEOP_EVENTOBJECT", &gsCwq.hThreadWakeUpEvtObj);
+	PVR_LOGG_IF_ERROR(eError, "OSEventObjectCreate", e0);
+
+	/* Create an event object for updating pending fence checks on CacheOp */
+	eError = OSEventObjectCreate("PVRSRV_CACHEOP_EVENTOBJECT", &gsCwq.hClientWakeUpEvtObj);
+	PVR_LOGG_IF_ERROR(eError, "OSEventObjectCreate", e0);
+
+	/* Appending work-items is not concurrent, lock protects against this */
+	eError = OSLockCreate((POS_LOCK*)&gsCwq.hDeferredLock);
+	PVR_LOGG_IF_ERROR(eError, "OSLockCreate", e0);
+
+	/* Apphint read/write is not concurrent, so lock protects against this */
+	eError = OSLockCreate((POS_LOCK*)&gsCwq.hConfigLock);
+	PVR_LOGG_IF_ERROR(eError, "OSLockCreate", e0);
+
+	/* Determine CPU cache ISA maintenance mechanism available, GF and UMF */
+#if defined(__arm__) || defined(__arm64__) || defined(__aarch64__)
+	gsCwq.bNoGlobalFlushImpl = IMG_TRUE;
+#else
+	gsCwq.bNoGlobalFlushImpl = (OSCPUOperation(PVRSRV_CACHE_OP_FLUSH) != PVRSRV_OK) ? IMG_TRUE : IMG_FALSE;
+#endif
+	if (! gsCwq.bNoGlobalFlushImpl)
+	{
+		IMG_UINT64 uiIdx;
+		IMG_UINT64 uiTime = 0;
+		IMG_UINT64 uiTimeAfter;
+		IMG_UINT64 uiTimeBefore;
+
+		for (uiIdx = 0; uiIdx < 4; uiIdx++)
+		{
+			/* Take average of four GF */
+			uiTimeBefore = OSClockns64();
+			(void) OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+			uiTimeAfter = OSClockns64();
+
+			uiTimeBefore = DivBy10(DivBy10(DivBy10(uiTimeBefore)));
+			uiTimeAfter = DivBy10(DivBy10(DivBy10(uiTimeAfter)));
+			uiTime += uiTimeBefore < uiTimeAfter ?
+								uiTimeAfter  - uiTimeBefore :
+								uiTimeBefore - uiTimeAfter;
+		}
+
+		gsCwq.ui32FenceWaitTimeUs = (IMG_UINT32)(uiTime >> 2);
+		gsCwq.ui32FenceRetryAbort = ~0;
+	}
+	else
+	{
+		gsCwq.ui32FenceWaitTimeUs = CACHEOP_FENCE_WAIT_TIMEOUT;
+		gsCwq.ui32FenceRetryAbort = CACHEOP_FENCE_RETRY_ABORT;
+	}
+#if defined(CACHEFLUSH_ISA_SUPPORTS_UM_FLUSH)
+	gsCwq.bSupportsUMFlush = IMG_TRUE;
+#else
+	gsCwq.bSupportsUMFlush = IMG_FALSE;
+#endif
+
+	gsCwq.pui32InfoPage = psPVRSRVData->pui32InfoPage;
+	gsCwq.psInfoPagePMR = psPVRSRVData->psInfoPagePMR;
+
+	/* Normally, platforms should use their default configurations, put exceptions here */
+#if defined(__i386__) || defined(__x86_64__)
+#if !defined(TC_MEMORY_CONFIG)
+	CacheOpConfigUpdate(CACHEOP_CONFIG_URBF | CACHEOP_CONFIG_KGF | CACHEOP_CONFIG_KDF);
+#else
+	CacheOpConfigUpdate(CACHEOP_CONFIG_KGF | CACHEOP_CONFIG_KDF);
+#endif
+#else /* defined(__x86__) */
+	CacheOpConfigUpdate(CACHEOP_CONFIG_DEFAULT);
+#endif
+
+	/* Initialise the remaining occupants of the CacheOp information page */
+	gsCwq.pui32InfoPage[CACHEOP_INFO_PGSIZE]    = (IMG_UINT32)gsCwq.uiPageSize;
+	gsCwq.pui32InfoPage[CACHEOP_INFO_LINESIZE]  = (IMG_UINT32)gsCwq.uiLineSize;
+	gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] = (IMG_UINT32)0;
+	gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM1] = (IMG_UINT32)0;
+
+	/* Set before spawning thread */
+	gsCwq.bInit = IMG_TRUE;
+
+	/* Create a thread which is used to execute the deferred CacheOp(s),
+	   these are CacheOp(s) executed by the server on behalf of clients
+	   asynchronously. All clients synchronise with the server before
+	   submitting any HW operation (i.e. device kicks) to ensure that
+	   client device work-load memory is coherent */
+	eError = OSThreadCreatePriority(&gsCwq.hWorkerThread,
+									"pvr_cacheop",
+									CacheOpThread,
+									CacheOpThreadDumpInfo,
+									IMG_TRUE,
+									psPVRSRVData,
+									OS_THREAD_HIGHEST_PRIORITY);
+	PVR_LOGG_IF_ERROR(eError, "OSThreadCreatePriority", e0);
+
+	/* Writing the unsigned integer binary encoding of CACHEOP_CONFIG
+	   into this file cycles through avail. configuration(s) */
+	gsCwq.pvConfigTune = OSCreateStatisticEntry("cacheop_config",
+											NULL,
+											CacheOpConfigRead,
+											NULL);
+	PVR_LOGG_IF_FALSE(gsCwq.pvConfigTune, "OSCreateStatisticEntry", e0);
+
+	/* Register the CacheOp framework (re)configuration handlers */
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_CacheOpConfig,
+										CacheOpConfigQuery,
+										CacheOpConfigSet,
+										APPHINT_OF_DRIVER_NO_DEVICE,
+										(void *) APPHINT_ID_CacheOpConfig);
+
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_CacheOpGFThresholdSize,
+										CacheOpConfigQuery,
+										CacheOpConfigSet,
+										APPHINT_OF_DRIVER_NO_DEVICE,
+										(void *) APPHINT_ID_CacheOpGFThresholdSize);
+
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_CacheOpUMKMThresholdSize,
+										CacheOpConfigQuery,
+										CacheOpConfigSet,
+										APPHINT_OF_DRIVER_NO_DEVICE,
+										(void *) APPHINT_ID_CacheOpUMKMThresholdSize);
+
+	return PVRSRV_OK;
+e0:
+	CacheOpDeInit2();
+	return eError;
+}
+
+void CacheOpDeInit2 (void)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	gsCwq.bInit = IMG_FALSE;
+
+	if (gsCwq.hThreadWakeUpEvtObj)
+	{
+		eError = OSEventObjectSignal(gsCwq.hThreadWakeUpEvtObj);
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+	}
+
+	if (gsCwq.hClientWakeUpEvtObj)
+	{
+		eError = OSEventObjectSignal(gsCwq.hClientWakeUpEvtObj);
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+	}
+
+	if (gsCwq.hWorkerThread)
+	{
+		LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US)
+		{
+			eError = OSThreadDestroy(gsCwq.hWorkerThread);
+			if (PVRSRV_OK == eError)
+			{
+				gsCwq.hWorkerThread = NULL;
+				break;
+			}
+			OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+		PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+		gsCwq.hWorkerThread = NULL;
+	}
+
+	if (gsCwq.hClientWakeUpEvtObj)
+	{
+		eError = OSEventObjectDestroy(gsCwq.hClientWakeUpEvtObj);
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+		gsCwq.hClientWakeUpEvtObj = NULL;
+	}
+
+	if (gsCwq.hThreadWakeUpEvtObj)
+	{
+		eError = OSEventObjectDestroy(gsCwq.hThreadWakeUpEvtObj);
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+		gsCwq.hThreadWakeUpEvtObj = NULL;
+	}
+
+	if (gsCwq.hConfigLock)
+	{
+		eError = OSLockDestroy(gsCwq.hConfigLock);
+		PVR_LOG_IF_ERROR(eError, "OSLockDestroy");
+		gsCwq.hConfigLock = NULL;
+	}
+
+	if (gsCwq.hDeferredLock)
+	{
+		eError = OSLockDestroy(gsCwq.hDeferredLock);
+		PVR_LOG_IF_ERROR(eError, "OSLockDestroy");
+		gsCwq.hDeferredLock = NULL;
+	}
+
+	if (gsCwq.pvConfigTune)
+	{
+		OSRemoveStatisticEntry(&gsCwq.pvConfigTune);
+	}
+
+	gsCwq.pui32InfoPage = NULL;
+	gsCwq.psInfoPagePMR = NULL;
+}
+
+PVRSRV_ERROR CacheOpInit (void)
+{
+	IMG_UINT32 idx;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* DDK initialisation is anticipated to be performed on the boot
+	   processor (little core in big/little systems) though this may
+	   not always be the case. If so, the value cached here is the
+	   system wide safe (i.e. smallest) L1 d-cache line size value
+	   on any/such platforms with mismatched d-cache line sizes */
+	gsCwq.uiPageSize = OSGetPageSize();
+	gsCwq.uiPageShift = OSGetPageShift();
+	gsCwq.uiLineSize = OSCPUCacheAttributeSize(PVR_DCACHE_LINE_SIZE);
+	gsCwq.uiLineShift = ExactLog2(gsCwq.uiLineSize);
+	PVR_LOGR_IF_FALSE((gsCwq.uiLineSize && gsCwq.uiPageSize && gsCwq.uiPageShift), "", PVRSRV_ERROR_INIT_FAILURE);
+	gsCwq.uiCacheOpAddrType = OSCPUCacheOpAddressType();
+
+	/* More information regarding these atomic counters can be found
+	   in the CACHEOP_WORK_QUEUE type definition at top of file */
+	OSAtomicWrite(&gsCwq.hCompletedSeqNum, 0);
+	OSAtomicWrite(&gsCwq.hCommonSeqNum, 0);
+	OSAtomicWrite(&gsCwq.hDeferredSize, 0);
+	OSAtomicWrite(&gsCwq.hWriteCounter, 0);
+	OSAtomicWrite(&gsCwq.hReadCounter, 0);
+
+	for (idx = 0; idx < CACHEOP_INDICES_MAX; idx++)
+	{
+		gsCwq.asWorkItems[idx].iTimeline = PVRSRV_NO_TIMELINE;
+		gsCwq.asWorkItems[idx].psPMR = (void *)(uintptr_t)~0;
+		gsCwq.asWorkItems[idx].ui32OpSeqNum = (IMG_UINT32)~0;
+		gsCwq.asWorkItems[idx].ui32GFSeqNum = (IMG_UINT32)~0;
+	}
+
+	/* Lock prevents multiple threads from issuing surplus to requirement GF */
+	eError = OSLockCreate((POS_LOCK*)&gsCwq.hGlobalFlushLock);
+	PVR_LOGG_IF_ERROR(eError, "OSLockCreate", e0);
+
+#if defined(CACHEOP_DEBUG)
+	/* debugfs file read-out is not concurrent, so lock protects against this */
+	eError = OSLockCreate((POS_LOCK*)&gsCwq.hStatsExecLock);
+	PVR_LOGG_IF_ERROR(eError, "OSLockCreate", e0);
+
+	gsCwq.i32StatsExecWriteIdx = 0;
+	OSCachedMemSet(gsCwq.asStatsExecuted, 0, sizeof(gsCwq.asStatsExecuted));
+
+	/* File captures the most recent subset of CacheOp(s) executed */
+	gsCwq.pvStatsEntry = OSCreateStatisticEntry("cacheop_history",
+												NULL,
+												CacheOpStatsExecLogRead,
+												NULL);
+	PVR_LOGG_IF_ERROR(eError, "OSCreateStatisticEntry", e0);
+#endif
+
+e0:
+	return eError;
+}
+
+void CacheOpDeInit (void)
+{
+#if defined(CACHEOP_DEBUG)
+	if (gsCwq.hStatsExecLock)
+	{
+		(void) OSLockDestroy(gsCwq.hStatsExecLock);
+		gsCwq.hStatsExecLock = NULL;
+	}
+
+	if (gsCwq.pvStatsEntry)
+	{
+		OSRemoveStatisticEntry(&gsCwq.pvStatsEntry);
+	}
+#endif
+	if (gsCwq.hGlobalFlushLock)
+	{
+		(void) OSLockDestroy(gsCwq.hGlobalFlushLock);
+		gsCwq.hGlobalFlushLock = NULL;
+	}
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/cache_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/cache_km.h
new file mode 100644
index 0000000..ef0f8e7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/cache_km.h
@@ -0,0 +1,167 @@
+/*************************************************************************/ /*!
+@File           cache.h
+@Title          CPU cache management header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _CACHE_KM_H_
+#define _CACHE_KM_H_
+
+#if defined(LINUX)
+#include <linux/version.h>
+#else
+#define KERNEL_VERSION
+#endif
+
+#include "pvrsrv_error.h"
+#include "os_cpu_cache.h"
+#include "img_types.h"
+#include "cache_ops.h"
+#include "device.h"
+#include "pmr.h"
+
+typedef IMG_UINT32 PVRSRV_CACHE_OP_ADDR_TYPE;	/*!< Represents CPU address type required for CPU d-cache maintenance */
+#define PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL	0x1	/*!< Operation requires CPU virtual address only */
+#define PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL	0x2	/*!< Operation requires CPU physical address only */
+#define PVRSRV_CACHE_OP_ADDR_TYPE_BOTH		0x3	/*!< Operation requires both CPU virtual & physical addresses */
+
+#include "connection_server.h"
+
+/*
+ * CacheOpInit() & CacheOpDeInit()
+ *
+ * This must be called to initialise the KM cache maintenance framework.
+ * This is called early during the driver/module (un)loading phase.
+ */
+PVRSRV_ERROR CacheOpInit(void);
+void CacheOpDeInit(void);
+
+/*
+ * CacheOpInit2() & CacheOpDeInit2()
+ *
+ * This must be called to initialise the UM cache maintenance framework.
+ * This is called when the driver is loaded/unloaded from the kernel.
+ */
+PVRSRV_ERROR CacheOpInit2(void);
+void CacheOpDeInit2(void);
+
+/*
+ * CacheOpExec()
+ *
+ * This is the primary CPU data-cache maintenance interface and it is
+ * always guaranteed to be synchronous; the arguments supplied must be
+ * pre-validated for performance reasons else the d-cache maintenance
+ * operation might cause the underlying OS kernel to fault.
+ */
+PVRSRV_ERROR CacheOpExec (PPVRSRV_DEVICE_NODE psDevNode,
+						void *pvVirtStart,
+						void *pvVirtEnd,
+						IMG_CPU_PHYADDR sCPUPhysStart,
+						IMG_CPU_PHYADDR sCPUPhysEnd,
+						PVRSRV_CACHE_OP uiCacheOp);
+
+/*
+ * CacheOpValExec()
+ *
+ * Same as CacheOpExec(), except arguments are _Validated_ before being
+ * presented to the underlying OS kernel for CPU data-cache maintenance.
+ * The uiAddress is the start CPU virtual address for the to-be d-cache
+ * maintained PMR, it can be NULL in which case a remap will be performed
+ * internally, if required for cache maintenance. This is primarily used
+ * as the services client bridge call handler for synchronous user-mode
+ * cache maintenance requests.
+ */
+PVRSRV_ERROR CacheOpValExec(PMR *psPMR,
+							IMG_UINT64 uiAddress,
+							IMG_DEVMEM_OFFSET_T uiOffset,
+							IMG_DEVMEM_SIZE_T uiSize,
+							PVRSRV_CACHE_OP uiCacheOp);
+
+/*
+ * CacheOpQueue()
+ *
+ * This is the secondary cache maintenance interface and it is not
+ * guaranteed to be synchronous in that requests could be deferred
+ * and executed asynchronously. This interface is primarily meant
+ * as services client bridge call handler. Both uiInfoPgGFSeqNum
+ * and ui32[Current,Next]FenceSeqNum implements an internal client
+ * server queueing protocol so making use of this interface outside
+ * of services client is not recommended and should not be done.
+ */
+PVRSRV_ERROR CacheOpQueue (CONNECTION_DATA *psConnection,
+					    PPVRSRV_DEVICE_NODE psDevNode,
+						IMG_UINT32 ui32OpCount,
+						PMR **ppsPMR,
+						IMG_UINT64 *puiAddress,
+						IMG_DEVMEM_OFFSET_T *puiOffset,
+						IMG_DEVMEM_SIZE_T *puiSize,
+						PVRSRV_CACHE_OP *puiCacheOp,
+						IMG_UINT32 ui32OpTimeline,
+						IMG_UINT32 uiOpInfoPgGFSeqNum,
+						IMG_UINT32 uiCurrentFenceSeqNum,
+						IMG_UINT32 *puiNextFenceSeqNum);
+
+/*
+ * CacheOpFence()
+ *
+ * This is used for fencing for any client in-flight cache maintenance
+ * operations that might have been deferred by the use of CacheOpQueue().
+ * This should be called before any subsequent HW device kicks to ensure
+ * device memory is coherent with the HW before the kick.
+ */
+PVRSRV_ERROR CacheOpFence (RGXFWIF_DM eOpType, IMG_UINT32 ui32OpSeqNum);
+
+/*
+ * CacheOpLog()
+ *
+ * This is used for logging client cache maintenance operations that
+ * was executed in user-space.
+ */
+PVRSRV_ERROR CacheOpLog (PMR *psPMR,
+						IMG_UINT64 uiAddress,
+						IMG_DEVMEM_OFFSET_T uiOffset,
+						IMG_DEVMEM_SIZE_T uiSize,
+						IMG_UINT64 ui64QueuedTimeMs,
+						IMG_UINT64 ui64ExecuteTimeMs,
+						IMG_UINT32 ui32NumRBF,
+						IMG_BOOL bIsDiscard,
+						PVRSRV_CACHE_OP uiCacheOp);
+
+#endif	/* _CACHE_KM_H_ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/cache_ops.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/cache_ops.h
new file mode 100644
index 0000000..3af4694
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/cache_ops.h
@@ -0,0 +1,56 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services cache management header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines for cache management which are visible internally
+                and externally
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _CACHE_OPS_H_
+#define _CACHE_OPS_H_
+#include "img_types.h"
+
+#define CACHE_BATCH_MAX (8)
+typedef IMG_UINT32 PVRSRV_CACHE_OP;				/*!< Type represents cache maintenance operation */
+#define PVRSRV_CACHE_OP_NONE				0x0	/*!< No operation */
+#define PVRSRV_CACHE_OP_CLEAN				0x1	/*!< Flush w/o invalidate */
+#define PVRSRV_CACHE_OP_INVALIDATE			0x2	/*!< Invalidate w/o flush */
+#define PVRSRV_CACHE_OP_FLUSH				0x3	/*!< Flush w/ invalidate */
+
+#endif	/* _CACHE_OPS_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_cache_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_cache_bridge.h
new file mode 100644
index 0000000..28b35ff
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_cache_bridge.h
@@ -0,0 +1,104 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for cache
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for cache
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_CACHE_BRIDGE_H
+#define CLIENT_CACHE_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_cache_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpQueue(IMG_HANDLE hBridge,
+							  IMG_UINT32
+							  ui32NumCacheOps,
+							  IMG_HANDLE * phPMR,
+							  IMG_UINT64 *
+							  pui64Address,
+							  IMG_DEVMEM_OFFSET_T *
+							  puiOffset,
+							  IMG_DEVMEM_SIZE_T *
+							  puiSize,
+							  PVRSRV_CACHE_OP *
+							  piuCacheOp,
+							  IMG_UINT32
+							  ui32OpTimeline,
+							  IMG_UINT32
+							  ui32OpInfoPgGFSeqNum,
+							  IMG_UINT32
+							  ui32CurrentFenceSeqNum,
+							  IMG_UINT32 *
+							  pui32NextFenceSeqNum);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpExec(IMG_HANDLE hBridge,
+							 IMG_HANDLE hPMR,
+							 IMG_UINT64 ui64Address,
+							 IMG_DEVMEM_OFFSET_T
+							 uiOffset,
+							 IMG_DEVMEM_SIZE_T
+							 uiSize,
+							 PVRSRV_CACHE_OP
+							 iuCacheOp);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpLog(IMG_HANDLE hBridge,
+							IMG_HANDLE hPMR,
+							IMG_UINT64 ui64Address,
+							IMG_DEVMEM_OFFSET_T
+							uiOffset,
+							IMG_DEVMEM_SIZE_T
+							uiSize,
+							IMG_INT64
+							i64QueuedTimeUs,
+							IMG_INT64
+							i64ExecuteTimeUs,
+							IMG_INT32 i32NumRBF,
+							IMG_BOOL bIsDiscard,
+							PVRSRV_CACHE_OP
+							iuCacheOp);
+
+#endif /* CLIENT_CACHE_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_cache_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_cache_direct_bridge.c
new file mode 100644
index 0000000..a589faf
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_cache_direct_bridge.c
@@ -0,0 +1,148 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for cache
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for cache
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_cache_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "cache_ops.h"
+
+#include "cache_km.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpQueue(IMG_HANDLE hBridge,
+							  IMG_UINT32
+							  ui32NumCacheOps,
+							  IMG_HANDLE * phPMR,
+							  IMG_UINT64 *
+							  pui64Address,
+							  IMG_DEVMEM_OFFSET_T *
+							  puiOffset,
+							  IMG_DEVMEM_SIZE_T *
+							  puiSize,
+							  PVRSRV_CACHE_OP *
+							  piuCacheOp,
+							  IMG_UINT32
+							  ui32OpTimeline,
+							  IMG_UINT32
+							  ui32OpInfoPgGFSeqNum,
+							  IMG_UINT32
+							  ui32CurrentFenceSeqNum,
+							  IMG_UINT32 *
+							  pui32NextFenceSeqNum)
+{
+	PVRSRV_ERROR eError;
+	PMR **psPMRInt;
+
+	psPMRInt = (PMR **) phPMR;
+
+	eError =
+	    CacheOpQueue(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+			 ui32NumCacheOps,
+			 psPMRInt,
+			 pui64Address,
+			 puiOffset,
+			 puiSize,
+			 piuCacheOp,
+			 ui32OpTimeline,
+			 ui32OpInfoPgGFSeqNum,
+			 ui32CurrentFenceSeqNum, pui32NextFenceSeqNum);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpExec(IMG_HANDLE hBridge,
+							 IMG_HANDLE hPMR,
+							 IMG_UINT64 ui64Address,
+							 IMG_DEVMEM_OFFSET_T
+							 uiOffset,
+							 IMG_DEVMEM_SIZE_T
+							 uiSize,
+							 PVRSRV_CACHE_OP
+							 iuCacheOp)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+	    CacheOpValExec(psPMRInt, ui64Address, uiOffset, uiSize, iuCacheOp);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpLog(IMG_HANDLE hBridge,
+							IMG_HANDLE hPMR,
+							IMG_UINT64 ui64Address,
+							IMG_DEVMEM_OFFSET_T
+							uiOffset,
+							IMG_DEVMEM_SIZE_T
+							uiSize,
+							IMG_INT64
+							i64QueuedTimeUs,
+							IMG_INT64
+							i64ExecuteTimeUs,
+							IMG_INT32 i32NumRBF,
+							IMG_BOOL bIsDiscard,
+							PVRSRV_CACHE_OP
+							iuCacheOp)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+	    CacheOpLog(psPMRInt,
+		       ui64Address,
+		       uiOffset,
+		       uiSize,
+		       i64QueuedTimeUs,
+		       i64ExecuteTimeUs, i32NumRBF, bIsDiscard, iuCacheOp);
+
+	return eError;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_devicememhistory_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_devicememhistory_bridge.h
new file mode 100644
index 0000000..4fc6ec1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_devicememhistory_bridge.h
@@ -0,0 +1,132 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for devicememhistory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for devicememhistory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_DEVICEMEMHISTORY_BRIDGE_H
+#define CLIENT_DEVICEMEMHISTORY_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_devicememhistory_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMap(IMG_HANDLE
+								 hBridge,
+								 IMG_HANDLE
+								 hPMR,
+								 IMG_DEVMEM_SIZE_T
+								 uiOffset,
+								 IMG_DEV_VIRTADDR
+								 sDevVAddr,
+								 IMG_DEVMEM_SIZE_T
+								 uiSize,
+								 const IMG_CHAR
+								 * puiText,
+								 IMG_UINT32
+								 ui32Log2PageSize,
+								 IMG_UINT32
+								 ui32AllocationIndex,
+								 IMG_UINT32 *
+								 pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmap(IMG_HANDLE
+								   hBridge,
+								   IMG_HANDLE
+								   hPMR,
+								   IMG_DEVMEM_SIZE_T
+								   uiOffset,
+								   IMG_DEV_VIRTADDR
+								   sDevVAddr,
+								   IMG_DEVMEM_SIZE_T
+								   uiSize,
+								   const
+								   IMG_CHAR *
+								   puiText,
+								   IMG_UINT32
+								   ui32Log2PageSize,
+								   IMG_UINT32
+								   ui32AllocationIndex,
+								   IMG_UINT32 *
+								   pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV
+BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge,
+				IMG_DEV_VIRTADDR sBaseDevVAddr,
+				IMG_UINT32 ui32ui32StartPage,
+				IMG_UINT32 ui32NumPages,
+				IMG_DEVMEM_SIZE_T uiAllocSize,
+				const IMG_CHAR * puiText,
+				IMG_UINT32 ui32Log2PageSize,
+				IMG_UINT32 ui32AllocationIndex,
+				IMG_UINT32 * pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV
+BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge,
+				  IMG_DEV_VIRTADDR sBaseDevVAddr,
+				  IMG_UINT32 ui32ui32StartPage,
+				  IMG_UINT32 ui32NumPages,
+				  IMG_DEVMEM_SIZE_T uiAllocSize,
+				  const IMG_CHAR * puiText,
+				  IMG_UINT32 ui32Log2PageSize,
+				  IMG_UINT32 ui32AllocationIndex,
+				  IMG_UINT32 * pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV
+BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge, IMG_HANDLE hPMR,
+				   IMG_DEVMEM_SIZE_T uiOffset,
+				   IMG_DEV_VIRTADDR sDevVAddr,
+				   IMG_DEVMEM_SIZE_T uiSize,
+				   const IMG_CHAR * puiText,
+				   IMG_UINT32 ui32Log2PageSize,
+				   IMG_UINT32 ui32AllocPageCount,
+				   IMG_UINT32 * pui32AllocPageIndices,
+				   IMG_UINT32 ui32FreePageCount,
+				   IMG_UINT32 * pui32FreePageIndices,
+				   IMG_UINT32 ui32AllocationIndex,
+				   IMG_UINT32 * pui32AllocationIndexOut);
+
+#endif /* CLIENT_DEVICEMEMHISTORY_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_devicememhistory_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_devicememhistory_direct_bridge.c
new file mode 100644
index 0000000..33336f4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_devicememhistory_direct_bridge.c
@@ -0,0 +1,221 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for devicememhistory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for devicememhistory
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_devicememhistory_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "img_types.h"
+#include "img_defs.h"
+#include "devicemem_typedefs.h"
+
+#include "devicemem_history_server.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMap(IMG_HANDLE
+								 hBridge,
+								 IMG_HANDLE
+								 hPMR,
+								 IMG_DEVMEM_SIZE_T
+								 uiOffset,
+								 IMG_DEV_VIRTADDR
+								 sDevVAddr,
+								 IMG_DEVMEM_SIZE_T
+								 uiSize,
+								 const IMG_CHAR
+								 * puiText,
+								 IMG_UINT32
+								 ui32Log2PageSize,
+								 IMG_UINT32
+								 ui32AllocationIndex,
+								 IMG_UINT32 *
+								 pui32AllocationIndexOut)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+	    DevicememHistoryMapKM(psPMRInt,
+				  uiOffset,
+				  sDevVAddr,
+				  uiSize,
+				  puiText,
+				  ui32Log2PageSize,
+				  ui32AllocationIndex, pui32AllocationIndexOut);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmap(IMG_HANDLE
+								   hBridge,
+								   IMG_HANDLE
+								   hPMR,
+								   IMG_DEVMEM_SIZE_T
+								   uiOffset,
+								   IMG_DEV_VIRTADDR
+								   sDevVAddr,
+								   IMG_DEVMEM_SIZE_T
+								   uiSize,
+								   const
+								   IMG_CHAR *
+								   puiText,
+								   IMG_UINT32
+								   ui32Log2PageSize,
+								   IMG_UINT32
+								   ui32AllocationIndex,
+								   IMG_UINT32 *
+								   pui32AllocationIndexOut)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+	    DevicememHistoryUnmapKM(psPMRInt,
+				    uiOffset,
+				    sDevVAddr,
+				    uiSize,
+				    puiText,
+				    ui32Log2PageSize,
+				    ui32AllocationIndex,
+				    pui32AllocationIndexOut);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV
+BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge,
+				IMG_DEV_VIRTADDR sBaseDevVAddr,
+				IMG_UINT32 ui32ui32StartPage,
+				IMG_UINT32 ui32NumPages,
+				IMG_DEVMEM_SIZE_T uiAllocSize,
+				const IMG_CHAR * puiText,
+				IMG_UINT32 ui32Log2PageSize,
+				IMG_UINT32 ui32AllocationIndex,
+				IMG_UINT32 * pui32AllocationIndexOut)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	eError =
+	    DevicememHistoryMapVRangeKM(sBaseDevVAddr,
+					ui32ui32StartPage,
+					ui32NumPages,
+					uiAllocSize,
+					puiText,
+					ui32Log2PageSize,
+					ui32AllocationIndex,
+					pui32AllocationIndexOut);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV
+BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge,
+				  IMG_DEV_VIRTADDR sBaseDevVAddr,
+				  IMG_UINT32 ui32ui32StartPage,
+				  IMG_UINT32 ui32NumPages,
+				  IMG_DEVMEM_SIZE_T uiAllocSize,
+				  const IMG_CHAR * puiText,
+				  IMG_UINT32 ui32Log2PageSize,
+				  IMG_UINT32 ui32AllocationIndex,
+				  IMG_UINT32 * pui32AllocationIndexOut)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	eError =
+	    DevicememHistoryUnmapVRangeKM(sBaseDevVAddr,
+					  ui32ui32StartPage,
+					  ui32NumPages,
+					  uiAllocSize,
+					  puiText,
+					  ui32Log2PageSize,
+					  ui32AllocationIndex,
+					  pui32AllocationIndexOut);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV
+BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge, IMG_HANDLE hPMR,
+				   IMG_DEVMEM_SIZE_T uiOffset,
+				   IMG_DEV_VIRTADDR sDevVAddr,
+				   IMG_DEVMEM_SIZE_T uiSize,
+				   const IMG_CHAR * puiText,
+				   IMG_UINT32 ui32Log2PageSize,
+				   IMG_UINT32 ui32AllocPageCount,
+				   IMG_UINT32 * pui32AllocPageIndices,
+				   IMG_UINT32 ui32FreePageCount,
+				   IMG_UINT32 * pui32FreePageIndices,
+				   IMG_UINT32 ui32AllocationIndex,
+				   IMG_UINT32 * pui32AllocationIndexOut)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+	    DevicememHistorySparseChangeKM(psPMRInt,
+					   uiOffset,
+					   sDevVAddr,
+					   uiSize,
+					   puiText,
+					   ui32Log2PageSize,
+					   ui32AllocPageCount,
+					   pui32AllocPageIndices,
+					   ui32FreePageCount,
+					   pui32FreePageIndices,
+					   ui32AllocationIndex,
+					   pui32AllocationIndexOut);
+
+	return eError;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_htbuffer_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_htbuffer_bridge.h
new file mode 100644
index 0000000..c32b759
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_htbuffer_bridge.h
@@ -0,0 +1,75 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for htbuffer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for htbuffer
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_HTBUFFER_BRIDGE_H
+#define CLIENT_HTBUFFER_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_htbuffer_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBControl(IMG_HANDLE hBridge,
+							IMG_UINT32
+							ui32NumGroups,
+							IMG_UINT32 *
+							pui32GroupEnable,
+							IMG_UINT32 ui32LogLevel,
+							IMG_UINT32
+							ui32EnablePID,
+							IMG_UINT32 ui32LogMode,
+							IMG_UINT32 ui32OpMode);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBLog(IMG_HANDLE hBridge,
+						    IMG_UINT32 ui32PID,
+						    IMG_UINT64 ui64TimeStamp,
+						    IMG_UINT32 ui32SF,
+						    IMG_UINT32 ui32NumArgs,
+						    IMG_UINT32 * pui32Args);
+
+#endif /* CLIENT_HTBUFFER_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_htbuffer_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_htbuffer_direct_bridge.c
new file mode 100644
index 0000000..699da1f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_htbuffer_direct_bridge.c
@@ -0,0 +1,91 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for htbuffer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for htbuffer
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_htbuffer_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "devicemem_typedefs.h"
+#include "htbuffer_types.h"
+
+#include "htbserver.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBControl(IMG_HANDLE hBridge,
+							IMG_UINT32
+							ui32NumGroups,
+							IMG_UINT32 *
+							pui32GroupEnable,
+							IMG_UINT32 ui32LogLevel,
+							IMG_UINT32
+							ui32EnablePID,
+							IMG_UINT32 ui32LogMode,
+							IMG_UINT32 ui32OpMode)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	eError =
+	    HTBControlKM(ui32NumGroups,
+			 pui32GroupEnable,
+			 ui32LogLevel, ui32EnablePID, ui32LogMode, ui32OpMode);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBLog(IMG_HANDLE hBridge,
+						    IMG_UINT32 ui32PID,
+						    IMG_UINT64 ui64TimeStamp,
+						    IMG_UINT32 ui32SF,
+						    IMG_UINT32 ui32NumArgs,
+						    IMG_UINT32 * pui32Args)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	eError =
+	    HTBLogKM(ui32PID, ui64TimeStamp, ui32SF, ui32NumArgs, pui32Args);
+
+	return eError;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_mm_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_mm_bridge.h
new file mode 100644
index 0000000..134618a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_mm_bridge.h
@@ -0,0 +1,364 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for mm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for mm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_MM_BRIDGE_H
+#define CLIENT_MM_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_mm_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRExportPMR(IMG_HANDLE hBridge,
+							  IMG_HANDLE hPMR,
+							  IMG_HANDLE *
+							  phPMRExport,
+							  IMG_UINT64 *
+							  pui64Size,
+							  IMG_UINT32 *
+							  pui32Log2Contig,
+							  IMG_UINT64 *
+							  pui64Password);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnexportPMR(IMG_HANDLE hBridge,
+							    IMG_HANDLE
+							    hPMRExport);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRGetUID(IMG_HANDLE hBridge,
+						       IMG_HANDLE hPMR,
+						       IMG_UINT64 * pui64UID);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRMakeLocalImportHandle(IMG_HANDLE
+								      hBridge,
+								      IMG_HANDLE
+								      hBuffer,
+								      IMG_HANDLE
+								      *
+								      phExtMem);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV
+BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge, IMG_HANDLE hExtMem);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRImportPMR(IMG_HANDLE hBridge,
+							  IMG_HANDLE hPMRExport,
+							  IMG_UINT64
+							  ui64uiPassword,
+							  IMG_UINT64 ui64uiSize,
+							  IMG_UINT32
+							  ui32uiLog2Contig,
+							  IMG_HANDLE * phPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRLocalImportPMR(IMG_HANDLE
+							       hBridge,
+							       IMG_HANDLE
+							       hExtHandle,
+							       IMG_HANDLE *
+							       phPMR,
+							       IMG_DEVMEM_SIZE_T
+							       * puiSize,
+							       IMG_DEVMEM_ALIGN_T
+							       * psAlign);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefPMR(IMG_HANDLE hBridge,
+							 IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefUnlockPMR(IMG_HANDLE
+							       hBridge,
+							       IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedPMR(IMG_HANDLE
+								    hBridge,
+								    IMG_DEVMEM_SIZE_T
+								    uiSize,
+								    IMG_DEVMEM_SIZE_T
+								    uiChunkSize,
+								    IMG_UINT32
+								    ui32NumPhysChunks,
+								    IMG_UINT32
+								    ui32NumVirtChunks,
+								    IMG_UINT32 *
+								    pui32MappingTable,
+								    IMG_UINT32
+								    ui32Log2PageSize,
+								    PVRSRV_MEMALLOCFLAGS_T
+								    uiFlags,
+								    IMG_UINT32
+								    ui32AnnotationLength,
+								    const
+								    IMG_CHAR *
+								    puiAnnotation,
+								    IMG_PID
+								    ui32PID,
+								    IMG_HANDLE *
+								    phPMRPtr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV
+BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge, IMG_DEVMEM_SIZE_T uiSize,
+				   IMG_DEVMEM_SIZE_T uiChunkSize,
+				   IMG_UINT32 ui32NumPhysChunks,
+				   IMG_UINT32 ui32NumVirtChunks,
+				   IMG_UINT32 * pui32MappingTable,
+				   IMG_UINT32 ui32Log2PageSize,
+				   PVRSRV_MEMALLOCFLAGS_T uiFlags,
+				   IMG_UINT32 ui32AnnotationLength,
+				   const IMG_CHAR * puiAnnotation,
+				   IMG_PID ui32PID, IMG_HANDLE * phPMRPtr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemImportSecBuf(IMG_HANDLE
+								 hBridge,
+								 IMG_DEVMEM_SIZE_T
+								 uiSize,
+								 IMG_UINT32
+								 ui32Log2Align,
+								 PVRSRV_MEMALLOCFLAGS_T
+								 uiFlags,
+								 IMG_HANDLE *
+								 phPMRPtr,
+								 IMG_UINT64 *
+								 pui64SecBufHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPin(IMG_HANDLE hBridge,
+							  IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpin(IMG_HANDLE hBridge,
+							    IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPinValidate(IMG_HANDLE
+								  hBridge,
+								  IMG_HANDLE
+								  hMapping,
+								  IMG_HANDLE
+								  hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpinInvalidate(IMG_HANDLE
+								      hBridge,
+								      IMG_HANDLE
+								      hMapping,
+								      IMG_HANDLE
+								      hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxCreate(IMG_HANDLE
+								hBridge,
+								IMG_BOOL
+								bbKernelMemoryCtx,
+								IMG_HANDLE *
+								phDevMemServerContext,
+								IMG_HANDLE *
+								phPrivData,
+								IMG_UINT32 *
+								pui32CPUCacheLineSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxDestroy(IMG_HANDLE
+								 hBridge,
+								 IMG_HANDLE
+								 hDevmemServerContext);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapCreate(IMG_HANDLE
+								 hBridge,
+								 IMG_HANDLE
+								 hDevmemCtx,
+								 IMG_DEV_VIRTADDR
+								 sHeapBaseAddr,
+								 IMG_DEVMEM_SIZE_T
+								 uiHeapLength,
+								 IMG_UINT32
+								 ui32Log2DataPageSize,
+								 IMG_HANDLE *
+								 phDevmemHeapPtr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapDestroy(IMG_HANDLE
+								  hBridge,
+								  IMG_HANDLE
+								  hDevmemHeap);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPMR(IMG_HANDLE hBridge,
+							     IMG_HANDLE
+							     hDevmemServerHeap,
+							     IMG_HANDLE
+							     hReservation,
+							     IMG_HANDLE hPMR,
+							     PVRSRV_MEMALLOCFLAGS_T
+							     uiMapFlags,
+							     IMG_HANDLE *
+							     phMapping);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPMR(IMG_HANDLE
+							       hBridge,
+							       IMG_HANDLE
+							       hMapping);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntReserveRange(IMG_HANDLE
+								   hBridge,
+								   IMG_HANDLE
+								   hDevmemServerHeap,
+								   IMG_DEV_VIRTADDR
+								   sAddress,
+								   IMG_DEVMEM_SIZE_T
+								   uiLength,
+								   IMG_HANDLE *
+								   phReservation);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnreserveRange(IMG_HANDLE
+								     hBridge,
+								     IMG_HANDLE
+								     hReservation);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeChangeSparseMem(IMG_HANDLE hBridge,
+							     IMG_HANDLE
+							     hSrvDevMemHeap,
+							     IMG_HANDLE hPMR,
+							     IMG_UINT32
+							     ui32AllocPageCount,
+							     IMG_UINT32 *
+							     pui32AllocPageIndices,
+							     IMG_UINT32
+							     ui32FreePageCount,
+							     IMG_UINT32 *
+							     pui32FreePageIndices,
+							     IMG_UINT32
+							     ui32SparseFlags,
+							     PVRSRV_MEMALLOCFLAGS_T
+							     uiFlags,
+							     IMG_DEV_VIRTADDR
+							     sDevVAddr,
+							     IMG_UINT64
+							     ui64CPUVAddr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPages(IMG_HANDLE
+							       hBridge,
+							       IMG_HANDLE
+							       hReservation,
+							       IMG_HANDLE hPMR,
+							       IMG_UINT32
+							       ui32PageCount,
+							       IMG_UINT32
+							       ui32PhysicalPgOffset,
+							       PVRSRV_MEMALLOCFLAGS_T
+							       uiFlags,
+							       IMG_DEV_VIRTADDR
+							       sDevVAddr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPages(IMG_HANDLE
+								 hBridge,
+								 IMG_HANDLE
+								 hReservation,
+								 IMG_DEV_VIRTADDR
+								 sDevVAddr,
+								 IMG_UINT32
+								 ui32PageCount);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIsVDevAddrValid(IMG_HANDLE
+								   hBridge,
+								   IMG_HANDLE
+								   hDevmemCtx,
+								   IMG_DEV_VIRTADDR
+								   sAddress);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigCount(IMG_HANDLE
+								    hBridge,
+								    IMG_UINT32 *
+								    pui32NumHeapConfigs);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapCount(IMG_HANDLE
+							      hBridge,
+							      IMG_UINT32
+							      ui32HeapConfigIndex,
+							      IMG_UINT32 *
+							      pui32NumHeaps);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigName(IMG_HANDLE
+								   hBridge,
+								   IMG_UINT32
+								   ui32HeapConfigIndex,
+								   IMG_UINT32
+								   ui32HeapConfigNameBufSz,
+								   IMG_CHAR *
+								   puiHeapConfigName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapDetails(IMG_HANDLE
+								hBridge,
+								IMG_UINT32
+								ui32HeapConfigIndex,
+								IMG_UINT32
+								ui32HeapIndex,
+								IMG_UINT32
+								ui32HeapNameBufSz,
+								IMG_CHAR *
+								puiHeapNameOut,
+								IMG_DEV_VIRTADDR
+								*
+								psDevVAddrBase,
+								IMG_DEVMEM_SIZE_T
+								* puiHeapLength,
+								IMG_UINT32 *
+								pui32Log2DataPageSizeOut,
+								IMG_UINT32 *
+								pui32Log2ImportAlignmentOut,
+								IMG_UINT32 *
+								pui32Log2TilingStrideFactorOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV
+BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx,
+				  IMG_UINT32 ui32PID, IMG_BOOL bRegister);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeGetMaxDevMemSize(IMG_HANDLE
+							      hBridge,
+							      IMG_DEVMEM_SIZE_T
+							      * puiLMASize,
+							      IMG_DEVMEM_SIZE_T
+							      * puiUMASize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemGetFaultAddress(IMG_HANDLE
+								   hBridge,
+								   IMG_HANDLE
+								   hDevmemCtx,
+								   IMG_DEV_VIRTADDR
+								   *
+								   psFaultAddress);
+
+#endif /* CLIENT_MM_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_mm_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_mm_direct_bridge.c
new file mode 100644
index 0000000..6e82c1e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_mm_direct_bridge.c
@@ -0,0 +1,829 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for mm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for mm
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_mm_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "devicemem_heapcfg.h"
+#include "physmem.h"
+#include "physmem_tdsecbuf.h"
+#include "devicemem_utils.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRExportPMR(IMG_HANDLE hBridge,
+							  IMG_HANDLE hPMR,
+							  IMG_HANDLE *
+							  phPMRExport,
+							  IMG_UINT64 *
+							  pui64Size,
+							  IMG_UINT32 *
+							  pui32Log2Contig,
+							  IMG_UINT64 *
+							  pui64Password)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMRInt;
+	PMR_EXPORT *psPMRExportInt = NULL;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+	    PMRExportPMR(psPMRInt,
+			 &psPMRExportInt,
+			 pui64Size, pui32Log2Contig, pui64Password);
+
+	*phPMRExport = psPMRExportInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnexportPMR(IMG_HANDLE hBridge,
+							    IMG_HANDLE
+							    hPMRExport)
+{
+	PVRSRV_ERROR eError;
+	PMR_EXPORT *psPMRExportInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRExportInt = (PMR_EXPORT *) hPMRExport;
+
+	eError = PMRUnexportPMR(psPMRExportInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRGetUID(IMG_HANDLE hBridge,
+						       IMG_HANDLE hPMR,
+						       IMG_UINT64 * pui64UID)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError = PMRGetUID(psPMRInt, pui64UID);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRMakeLocalImportHandle(IMG_HANDLE
+								      hBridge,
+								      IMG_HANDLE
+								      hBuffer,
+								      IMG_HANDLE
+								      *
+								      phExtMem)
+{
+	PVRSRV_ERROR eError;
+	PMR *psBufferInt;
+	PMR *psExtMemInt = NULL;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psBufferInt = (PMR *) hBuffer;
+
+	eError = PMRMakeLocalImportHandle(psBufferInt, &psExtMemInt);
+
+	*phExtMem = psExtMemInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV
+BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge, IMG_HANDLE hExtMem)
+{
+	PVRSRV_ERROR eError;
+	PMR *psExtMemInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psExtMemInt = (PMR *) hExtMem;
+
+	eError = PMRUnmakeLocalImportHandle(psExtMemInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRImportPMR(IMG_HANDLE hBridge,
+							  IMG_HANDLE hPMRExport,
+							  IMG_UINT64
+							  ui64uiPassword,
+							  IMG_UINT64 ui64uiSize,
+							  IMG_UINT32
+							  ui32uiLog2Contig,
+							  IMG_HANDLE * phPMR)
+{
+	PVRSRV_ERROR eError;
+	PMR_EXPORT *psPMRExportInt;
+	PMR *psPMRInt = NULL;
+
+	psPMRExportInt = (PMR_EXPORT *) hPMRExport;
+
+	eError =
+	    PhysmemImportPMR(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+			     psPMRExportInt,
+			     ui64uiPassword,
+			     ui64uiSize, ui32uiLog2Contig, &psPMRInt);
+
+	*phPMR = psPMRInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRLocalImportPMR(IMG_HANDLE
+							       hBridge,
+							       IMG_HANDLE
+							       hExtHandle,
+							       IMG_HANDLE *
+							       phPMR,
+							       IMG_DEVMEM_SIZE_T
+							       * puiSize,
+							       IMG_DEVMEM_ALIGN_T
+							       * psAlign)
+{
+	PVRSRV_ERROR eError;
+	PMR *psExtHandleInt;
+	PMR *psPMRInt = NULL;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psExtHandleInt = (PMR *) hExtHandle;
+
+	eError = PMRLocalImportPMR(psExtHandleInt, &psPMRInt, puiSize, psAlign);
+
+	*phPMR = psPMRInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefPMR(IMG_HANDLE hBridge,
+							 IMG_HANDLE hPMR)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError = PMRUnrefPMR(psPMRInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefUnlockPMR(IMG_HANDLE
+							       hBridge,
+							       IMG_HANDLE hPMR)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError = PMRUnrefUnlockPMR(psPMRInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedPMR(IMG_HANDLE
+								    hBridge,
+								    IMG_DEVMEM_SIZE_T
+								    uiSize,
+								    IMG_DEVMEM_SIZE_T
+								    uiChunkSize,
+								    IMG_UINT32
+								    ui32NumPhysChunks,
+								    IMG_UINT32
+								    ui32NumVirtChunks,
+								    IMG_UINT32 *
+								    pui32MappingTable,
+								    IMG_UINT32
+								    ui32Log2PageSize,
+								    PVRSRV_MEMALLOCFLAGS_T
+								    uiFlags,
+								    IMG_UINT32
+								    ui32AnnotationLength,
+								    const
+								    IMG_CHAR *
+								    puiAnnotation,
+								    IMG_PID
+								    ui32PID,
+								    IMG_HANDLE *
+								    phPMRPtr)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMRPtrInt = NULL;
+
+	eError =
+	    PhysmemNewRamBackedPMR(NULL,
+				   (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+				   uiSize, uiChunkSize, ui32NumPhysChunks,
+				   ui32NumVirtChunks, pui32MappingTable,
+				   ui32Log2PageSize, uiFlags,
+				   ui32AnnotationLength, puiAnnotation, ui32PID,
+				   &psPMRPtrInt);
+
+	*phPMRPtr = psPMRPtrInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV
+BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge, IMG_DEVMEM_SIZE_T uiSize,
+				   IMG_DEVMEM_SIZE_T uiChunkSize,
+				   IMG_UINT32 ui32NumPhysChunks,
+				   IMG_UINT32 ui32NumVirtChunks,
+				   IMG_UINT32 * pui32MappingTable,
+				   IMG_UINT32 ui32Log2PageSize,
+				   PVRSRV_MEMALLOCFLAGS_T uiFlags,
+				   IMG_UINT32 ui32AnnotationLength,
+				   const IMG_CHAR * puiAnnotation,
+				   IMG_PID ui32PID, IMG_HANDLE * phPMRPtr)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMRPtrInt = NULL;
+
+	eError =
+	    PhysmemNewRamBackedLockedPMR(NULL,
+					 (PVRSRV_DEVICE_NODE *) ((void *)
+								 hBridge),
+					 uiSize, uiChunkSize, ui32NumPhysChunks,
+					 ui32NumVirtChunks, pui32MappingTable,
+					 ui32Log2PageSize, uiFlags,
+					 ui32AnnotationLength, puiAnnotation,
+					 ui32PID, &psPMRPtrInt);
+
+	*phPMRPtr = psPMRPtrInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemImportSecBuf(IMG_HANDLE
+								 hBridge,
+								 IMG_DEVMEM_SIZE_T
+								 uiSize,
+								 IMG_UINT32
+								 ui32Log2Align,
+								 PVRSRV_MEMALLOCFLAGS_T
+								 uiFlags,
+								 IMG_HANDLE *
+								 phPMRPtr,
+								 IMG_UINT64 *
+								 pui64SecBufHandle)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMRPtrInt = NULL;
+
+	eError =
+	    PhysmemImportSecBuf(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+				uiSize,
+				ui32Log2Align,
+				uiFlags, &psPMRPtrInt, pui64SecBufHandle);
+
+	*phPMRPtr = psPMRPtrInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPin(IMG_HANDLE hBridge,
+							  IMG_HANDLE hPMR)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError = DevmemIntPin(psPMRInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpin(IMG_HANDLE hBridge,
+							    IMG_HANDLE hPMR)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError = DevmemIntUnpin(psPMRInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPinValidate(IMG_HANDLE
+								  hBridge,
+								  IMG_HANDLE
+								  hMapping,
+								  IMG_HANDLE
+								  hPMR)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_MAPPING *psMappingInt;
+	PMR *psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psMappingInt = (DEVMEMINT_MAPPING *) hMapping;
+	psPMRInt = (PMR *) hPMR;
+
+	eError = DevmemIntPinValidate(psMappingInt, psPMRInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpinInvalidate(IMG_HANDLE
+								      hBridge,
+								      IMG_HANDLE
+								      hMapping,
+								      IMG_HANDLE
+								      hPMR)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_MAPPING *psMappingInt;
+	PMR *psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psMappingInt = (DEVMEMINT_MAPPING *) hMapping;
+	psPMRInt = (PMR *) hPMR;
+
+	eError = DevmemIntUnpinInvalidate(psMappingInt, psPMRInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxCreate(IMG_HANDLE
+								hBridge,
+								IMG_BOOL
+								bbKernelMemoryCtx,
+								IMG_HANDLE *
+								phDevMemServerContext,
+								IMG_HANDLE *
+								phPrivData,
+								IMG_UINT32 *
+								pui32CPUCacheLineSize)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_CTX *psDevMemServerContextInt = NULL;
+	IMG_HANDLE hPrivDataInt = NULL;
+
+	eError =
+	    DevmemIntCtxCreate(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+			       bbKernelMemoryCtx,
+			       &psDevMemServerContextInt,
+			       &hPrivDataInt, pui32CPUCacheLineSize);
+
+	*phDevMemServerContext = psDevMemServerContextInt;
+	*phPrivData = hPrivDataInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxDestroy(IMG_HANDLE
+								 hBridge,
+								 IMG_HANDLE
+								 hDevmemServerContext)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_CTX *psDevmemServerContextInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext;
+
+	eError = DevmemIntCtxDestroy(psDevmemServerContextInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapCreate(IMG_HANDLE
+								 hBridge,
+								 IMG_HANDLE
+								 hDevmemCtx,
+								 IMG_DEV_VIRTADDR
+								 sHeapBaseAddr,
+								 IMG_DEVMEM_SIZE_T
+								 uiHeapLength,
+								 IMG_UINT32
+								 ui32Log2DataPageSize,
+								 IMG_HANDLE *
+								 phDevmemHeapPtr)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_CTX *psDevmemCtxInt;
+	DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+	eError =
+	    DevmemIntHeapCreate(psDevmemCtxInt,
+				sHeapBaseAddr,
+				uiHeapLength,
+				ui32Log2DataPageSize, &psDevmemHeapPtrInt);
+
+	*phDevmemHeapPtr = psDevmemHeapPtrInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapDestroy(IMG_HANDLE
+								  hBridge,
+								  IMG_HANDLE
+								  hDevmemHeap)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_HEAP *psDevmemHeapInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemHeapInt = (DEVMEMINT_HEAP *) hDevmemHeap;
+
+	eError = DevmemIntHeapDestroy(psDevmemHeapInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPMR(IMG_HANDLE hBridge,
+							     IMG_HANDLE
+							     hDevmemServerHeap,
+							     IMG_HANDLE
+							     hReservation,
+							     IMG_HANDLE hPMR,
+							     PVRSRV_MEMALLOCFLAGS_T
+							     uiMapFlags,
+							     IMG_HANDLE *
+							     phMapping)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_HEAP *psDevmemServerHeapInt;
+	DEVMEMINT_RESERVATION *psReservationInt;
+	PMR *psPMRInt;
+	DEVMEMINT_MAPPING *psMappingInt = NULL;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap;
+	psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+	    DevmemIntMapPMR(psDevmemServerHeapInt,
+			    psReservationInt,
+			    psPMRInt, uiMapFlags, &psMappingInt);
+
+	*phMapping = psMappingInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPMR(IMG_HANDLE
+							       hBridge,
+							       IMG_HANDLE
+							       hMapping)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_MAPPING *psMappingInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psMappingInt = (DEVMEMINT_MAPPING *) hMapping;
+
+	eError = DevmemIntUnmapPMR(psMappingInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntReserveRange(IMG_HANDLE
+								   hBridge,
+								   IMG_HANDLE
+								   hDevmemServerHeap,
+								   IMG_DEV_VIRTADDR
+								   sAddress,
+								   IMG_DEVMEM_SIZE_T
+								   uiLength,
+								   IMG_HANDLE *
+								   phReservation)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_HEAP *psDevmemServerHeapInt;
+	DEVMEMINT_RESERVATION *psReservationInt = NULL;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap;
+
+	eError =
+	    DevmemIntReserveRange(psDevmemServerHeapInt,
+				  sAddress, uiLength, &psReservationInt);
+
+	*phReservation = psReservationInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnreserveRange(IMG_HANDLE
+								     hBridge,
+								     IMG_HANDLE
+								     hReservation)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_RESERVATION *psReservationInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+
+	eError = DevmemIntUnreserveRange(psReservationInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeChangeSparseMem(IMG_HANDLE hBridge,
+							     IMG_HANDLE
+							     hSrvDevMemHeap,
+							     IMG_HANDLE hPMR,
+							     IMG_UINT32
+							     ui32AllocPageCount,
+							     IMG_UINT32 *
+							     pui32AllocPageIndices,
+							     IMG_UINT32
+							     ui32FreePageCount,
+							     IMG_UINT32 *
+							     pui32FreePageIndices,
+							     IMG_UINT32
+							     ui32SparseFlags,
+							     PVRSRV_MEMALLOCFLAGS_T
+							     uiFlags,
+							     IMG_DEV_VIRTADDR
+							     sDevVAddr,
+							     IMG_UINT64
+							     ui64CPUVAddr)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_HEAP *psSrvDevMemHeapInt;
+	PMR *psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSrvDevMemHeapInt = (DEVMEMINT_HEAP *) hSrvDevMemHeap;
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+	    DevmemIntChangeSparse(psSrvDevMemHeapInt,
+				  psPMRInt,
+				  ui32AllocPageCount,
+				  pui32AllocPageIndices,
+				  ui32FreePageCount,
+				  pui32FreePageIndices,
+				  ui32SparseFlags,
+				  uiFlags, sDevVAddr, ui64CPUVAddr);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPages(IMG_HANDLE
+							       hBridge,
+							       IMG_HANDLE
+							       hReservation,
+							       IMG_HANDLE hPMR,
+							       IMG_UINT32
+							       ui32PageCount,
+							       IMG_UINT32
+							       ui32PhysicalPgOffset,
+							       PVRSRV_MEMALLOCFLAGS_T
+							       uiFlags,
+							       IMG_DEV_VIRTADDR
+							       sDevVAddr)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_RESERVATION *psReservationInt;
+	PMR *psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+	    DevmemIntMapPages(psReservationInt,
+			      psPMRInt,
+			      ui32PageCount,
+			      ui32PhysicalPgOffset, uiFlags, sDevVAddr);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPages(IMG_HANDLE
+								 hBridge,
+								 IMG_HANDLE
+								 hReservation,
+								 IMG_DEV_VIRTADDR
+								 sDevVAddr,
+								 IMG_UINT32
+								 ui32PageCount)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_RESERVATION *psReservationInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+
+	eError =
+	    DevmemIntUnmapPages(psReservationInt, sDevVAddr, ui32PageCount);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIsVDevAddrValid(IMG_HANDLE
+								   hBridge,
+								   IMG_HANDLE
+								   hDevmemCtx,
+								   IMG_DEV_VIRTADDR
+								   sAddress)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_CTX *psDevmemCtxInt;
+
+	psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+	eError =
+	    DevmemIntIsVDevAddrValid(NULL,
+				     (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+				     psDevmemCtxInt, sAddress);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigCount(IMG_HANDLE
+								    hBridge,
+								    IMG_UINT32 *
+								    pui32NumHeapConfigs)
+{
+	PVRSRV_ERROR eError;
+
+	eError =
+	    HeapCfgHeapConfigCount(NULL,
+				   (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+				   pui32NumHeapConfigs);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapCount(IMG_HANDLE
+							      hBridge,
+							      IMG_UINT32
+							      ui32HeapConfigIndex,
+							      IMG_UINT32 *
+							      pui32NumHeaps)
+{
+	PVRSRV_ERROR eError;
+
+	eError =
+	    HeapCfgHeapCount(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+			     ui32HeapConfigIndex, pui32NumHeaps);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigName(IMG_HANDLE
+								   hBridge,
+								   IMG_UINT32
+								   ui32HeapConfigIndex,
+								   IMG_UINT32
+								   ui32HeapConfigNameBufSz,
+								   IMG_CHAR *
+								   puiHeapConfigName)
+{
+	PVRSRV_ERROR eError;
+
+	eError =
+	    HeapCfgHeapConfigName(NULL,
+				  (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+				  ui32HeapConfigIndex, ui32HeapConfigNameBufSz,
+				  puiHeapConfigName);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapDetails(IMG_HANDLE
+								hBridge,
+								IMG_UINT32
+								ui32HeapConfigIndex,
+								IMG_UINT32
+								ui32HeapIndex,
+								IMG_UINT32
+								ui32HeapNameBufSz,
+								IMG_CHAR *
+								puiHeapNameOut,
+								IMG_DEV_VIRTADDR
+								*
+								psDevVAddrBase,
+								IMG_DEVMEM_SIZE_T
+								* puiHeapLength,
+								IMG_UINT32 *
+								pui32Log2DataPageSizeOut,
+								IMG_UINT32 *
+								pui32Log2ImportAlignmentOut,
+								IMG_UINT32 *
+								pui32Log2TilingStrideFactorOut)
+{
+	PVRSRV_ERROR eError;
+
+	eError =
+	    HeapCfgHeapDetails(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+			       ui32HeapConfigIndex,
+			       ui32HeapIndex,
+			       ui32HeapNameBufSz,
+			       puiHeapNameOut,
+			       psDevVAddrBase,
+			       puiHeapLength,
+			       pui32Log2DataPageSizeOut,
+			       pui32Log2ImportAlignmentOut,
+			       pui32Log2TilingStrideFactorOut);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV
+BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx,
+				  IMG_UINT32 ui32PID, IMG_BOOL bRegister)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_CTX *psDevmemCtxInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+	eError =
+	    DevmemIntRegisterPFNotifyKM(psDevmemCtxInt, ui32PID, bRegister);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeGetMaxDevMemSize(IMG_HANDLE
+							      hBridge,
+							      IMG_DEVMEM_SIZE_T
+							      * puiLMASize,
+							      IMG_DEVMEM_SIZE_T
+							      * puiUMASize)
+{
+	PVRSRV_ERROR eError;
+
+	eError =
+	    PVRSRVGetMaxDevMemSizeKM(NULL,
+				     (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+				     puiLMASize, puiUMASize);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemGetFaultAddress(IMG_HANDLE
+								   hBridge,
+								   IMG_HANDLE
+								   hDevmemCtx,
+								   IMG_DEV_VIRTADDR
+								   *
+								   psFaultAddress)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_CTX *psDevmemCtxInt;
+
+	psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+	eError =
+	    DevmemIntGetFaultAddress(NULL,
+				     (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+				     psDevmemCtxInt, psFaultAddress);
+
+	return eError;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_pvrtl_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_pvrtl_bridge.h
new file mode 100644
index 0000000..c3cc129
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_pvrtl_bridge.h
@@ -0,0 +1,113 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for pvrtl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for pvrtl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_PVRTL_BRIDGE_H
+#define CLIENT_PVRTL_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pvrtl_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLOpenStream(IMG_HANDLE hBridge,
+							  const IMG_CHAR *
+							  puiName,
+							  IMG_UINT32 ui32Mode,
+							  IMG_HANDLE * phSD,
+							  IMG_HANDLE * phTLPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCloseStream(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSD);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLAcquireData(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSD,
+							   IMG_UINT32 *
+							   pui32ReadOffset,
+							   IMG_UINT32 *
+							   pui32ReadLen);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReleaseData(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSD,
+							   IMG_UINT32
+							   ui32ReadOffset,
+							   IMG_UINT32
+							   ui32ReadLen);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLDiscoverStreams(IMG_HANDLE
+							       hBridge,
+							       const IMG_CHAR *
+							       puiNamePattern,
+							       IMG_UINT32
+							       ui32Size,
+							       IMG_CHAR *
+							       puiStreams,
+							       IMG_UINT32 *
+							       pui32NumFound);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReserveStream(IMG_HANDLE hBridge,
+							     IMG_HANDLE hSD,
+							     IMG_UINT32 *
+							     pui32BufferOffset,
+							     IMG_UINT32
+							     ui32Size,
+							     IMG_UINT32
+							     ui32SizeMin,
+							     IMG_UINT32 *
+							     pui32Available);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCommitStream(IMG_HANDLE hBridge,
+							    IMG_HANDLE hSD,
+							    IMG_UINT32
+							    ui32ReqSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLWriteData(IMG_HANDLE hBridge,
+							 IMG_HANDLE hSD,
+							 IMG_UINT32 ui32Size,
+							 IMG_BYTE * psData);
+
+#endif /* CLIENT_PVRTL_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_pvrtl_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_pvrtl_direct_bridge.c
new file mode 100644
index 0000000..481ed79
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_pvrtl_direct_bridge.c
@@ -0,0 +1,200 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for pvrtl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for pvrtl
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_pvrtl_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "devicemem_typedefs.h"
+#include "pvrsrv_tlcommon.h"
+
+#include "tlserver.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLOpenStream(IMG_HANDLE hBridge,
+							  const IMG_CHAR *
+							  puiName,
+							  IMG_UINT32 ui32Mode,
+							  IMG_HANDLE * phSD,
+							  IMG_HANDLE * phTLPMR)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC *psSDInt = NULL;
+	PMR *psTLPMRInt = NULL;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	eError = TLServerOpenStreamKM(puiName, ui32Mode, &psSDInt, &psTLPMRInt);
+
+	*phSD = psSDInt;
+	*phTLPMR = psTLPMRInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCloseStream(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSD)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC *psSDInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSDInt = (TL_STREAM_DESC *) hSD;
+
+	eError = TLServerCloseStreamKM(psSDInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLAcquireData(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSD,
+							   IMG_UINT32 *
+							   pui32ReadOffset,
+							   IMG_UINT32 *
+							   pui32ReadLen)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC *psSDInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSDInt = (TL_STREAM_DESC *) hSD;
+
+	eError = TLServerAcquireDataKM(psSDInt, pui32ReadOffset, pui32ReadLen);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReleaseData(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSD,
+							   IMG_UINT32
+							   ui32ReadOffset,
+							   IMG_UINT32
+							   ui32ReadLen)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC *psSDInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSDInt = (TL_STREAM_DESC *) hSD;
+
+	eError = TLServerReleaseDataKM(psSDInt, ui32ReadOffset, ui32ReadLen);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLDiscoverStreams(IMG_HANDLE
+							       hBridge,
+							       const IMG_CHAR *
+							       puiNamePattern,
+							       IMG_UINT32
+							       ui32Size,
+							       IMG_CHAR *
+							       puiStreams,
+							       IMG_UINT32 *
+							       pui32NumFound)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	eError =
+	    TLServerDiscoverStreamsKM(puiNamePattern,
+				      ui32Size, puiStreams, pui32NumFound);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReserveStream(IMG_HANDLE hBridge,
+							     IMG_HANDLE hSD,
+							     IMG_UINT32 *
+							     pui32BufferOffset,
+							     IMG_UINT32
+							     ui32Size,
+							     IMG_UINT32
+							     ui32SizeMin,
+							     IMG_UINT32 *
+							     pui32Available)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC *psSDInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSDInt = (TL_STREAM_DESC *) hSD;
+
+	eError =
+	    TLServerReserveStreamKM(psSDInt,
+				    pui32BufferOffset,
+				    ui32Size, ui32SizeMin, pui32Available);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCommitStream(IMG_HANDLE hBridge,
+							    IMG_HANDLE hSD,
+							    IMG_UINT32
+							    ui32ReqSize)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC *psSDInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSDInt = (TL_STREAM_DESC *) hSD;
+
+	eError = TLServerCommitStreamKM(psSDInt, ui32ReqSize);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLWriteData(IMG_HANDLE hBridge,
+							 IMG_HANDLE hSD,
+							 IMG_UINT32 ui32Size,
+							 IMG_BYTE * psData)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC *psSDInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSDInt = (TL_STREAM_DESC *) hSD;
+
+	eError = TLServerWriteDataKM(psSDInt, ui32Size, psData);
+
+	return eError;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_ri_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_ri_bridge.h
new file mode 100644
index 0000000..6778fce
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_ri_bridge.h
@@ -0,0 +1,120 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for ri
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for ri
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_RI_BRIDGE_H
+#define CLIENT_RI_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_ri_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntry(IMG_HANDLE hBridge,
+							     IMG_HANDLE
+							     hPMRHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteMEMDESCEntry(IMG_HANDLE
+								 hBridge,
+								 IMG_HANDLE
+								 hPMRHandle,
+								 IMG_UINT32
+								 ui32TextBSize,
+								 const IMG_CHAR
+								 * puiTextB,
+								 IMG_UINT64
+								 ui64Offset,
+								 IMG_UINT64
+								 ui64Size,
+								 IMG_BOOL
+								 bIsImport,
+								 IMG_BOOL
+								 bIsSuballoc,
+								 IMG_HANDLE *
+								 phRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteProcListEntry(IMG_HANDLE
+								  hBridge,
+								  IMG_UINT32
+								  ui32TextBSize,
+								  const IMG_CHAR
+								  * puiTextB,
+								  IMG_UINT64
+								  ui64Size,
+								  IMG_UINT64
+								  ui64DevVAddr,
+								  IMG_HANDLE *
+								  phRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCAddr(IMG_HANDLE
+								 hBridge,
+								 IMG_HANDLE
+								 hRIHandle,
+								 IMG_DEV_VIRTADDR
+								 sAddr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDeleteMEMDESCEntry(IMG_HANDLE
+								  hBridge,
+								  IMG_HANDLE
+								  hRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpList(IMG_HANDLE hBridge,
+							IMG_HANDLE hPMRHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpAll(IMG_HANDLE hBridge);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpProcess(IMG_HANDLE hBridge,
+							   IMG_PID ui32Pid);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntryWithOwner(IMG_HANDLE
+								      hBridge,
+								      IMG_HANDLE
+								      hPMRHandle,
+								      IMG_PID
+								      ui32Owner);
+
+#endif /* CLIENT_RI_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_ri_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_ri_direct_bridge.c
new file mode 100644
index 0000000..d7c8dfb
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_ri_direct_bridge.c
@@ -0,0 +1,217 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for ri
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for ri
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_ri_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "ri_typedefs.h"
+
+#include "ri_server.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntry(IMG_HANDLE hBridge,
+							     IMG_HANDLE
+							     hPMRHandle)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMRHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRHandleInt = (PMR *) hPMRHandle;
+
+	eError = RIWritePMREntryKM(psPMRHandleInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteMEMDESCEntry(IMG_HANDLE
+								 hBridge,
+								 IMG_HANDLE
+								 hPMRHandle,
+								 IMG_UINT32
+								 ui32TextBSize,
+								 const IMG_CHAR
+								 * puiTextB,
+								 IMG_UINT64
+								 ui64Offset,
+								 IMG_UINT64
+								 ui64Size,
+								 IMG_BOOL
+								 bIsImport,
+								 IMG_BOOL
+								 bIsSuballoc,
+								 IMG_HANDLE *
+								 phRIHandle)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMRHandleInt;
+	RI_HANDLE psRIHandleInt = NULL;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRHandleInt = (PMR *) hPMRHandle;
+
+	eError =
+	    RIWriteMEMDESCEntryKM(psPMRHandleInt,
+				  ui32TextBSize,
+				  puiTextB,
+				  ui64Offset,
+				  ui64Size,
+				  bIsImport, bIsSuballoc, &psRIHandleInt);
+
+	*phRIHandle = psRIHandleInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteProcListEntry(IMG_HANDLE
+								  hBridge,
+								  IMG_UINT32
+								  ui32TextBSize,
+								  const IMG_CHAR
+								  * puiTextB,
+								  IMG_UINT64
+								  ui64Size,
+								  IMG_UINT64
+								  ui64DevVAddr,
+								  IMG_HANDLE *
+								  phRIHandle)
+{
+	PVRSRV_ERROR eError;
+	RI_HANDLE psRIHandleInt = NULL;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	eError =
+	    RIWriteProcListEntryKM(ui32TextBSize,
+				   puiTextB,
+				   ui64Size, ui64DevVAddr, &psRIHandleInt);
+
+	*phRIHandle = psRIHandleInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCAddr(IMG_HANDLE
+								 hBridge,
+								 IMG_HANDLE
+								 hRIHandle,
+								 IMG_DEV_VIRTADDR
+								 sAddr)
+{
+	PVRSRV_ERROR eError;
+	RI_HANDLE psRIHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psRIHandleInt = (RI_HANDLE) hRIHandle;
+
+	eError = RIUpdateMEMDESCAddrKM(psRIHandleInt, sAddr);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDeleteMEMDESCEntry(IMG_HANDLE
+								  hBridge,
+								  IMG_HANDLE
+								  hRIHandle)
+{
+	PVRSRV_ERROR eError;
+	RI_HANDLE psRIHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psRIHandleInt = (RI_HANDLE) hRIHandle;
+
+	eError = RIDeleteMEMDESCEntryKM(psRIHandleInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpList(IMG_HANDLE hBridge,
+							IMG_HANDLE hPMRHandle)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMRHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRHandleInt = (PMR *) hPMRHandle;
+
+	eError = RIDumpListKM(psPMRHandleInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpAll(IMG_HANDLE hBridge)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	eError = RIDumpAllKM();
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpProcess(IMG_HANDLE hBridge,
+							   IMG_PID ui32Pid)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	eError = RIDumpProcessKM(ui32Pid);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntryWithOwner(IMG_HANDLE
+								      hBridge,
+								      IMG_HANDLE
+								      hPMRHandle,
+								      IMG_PID
+								      ui32Owner)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMRHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRHandleInt = (PMR *) hPMRHandle;
+
+	eError = RIWritePMREntryWithOwnerKM(psPMRHandleInt, ui32Owner);
+
+	return eError;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_sync_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_sync_bridge.h
new file mode 100644
index 0000000..1de303a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_sync_bridge.h
@@ -0,0 +1,246 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for sync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for sync
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_SYNC_BRIDGE_H
+#define CLIENT_SYNC_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_sync_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeAllocSyncPrimitiveBlock(IMG_HANDLE
+								     hBridge,
+								     IMG_HANDLE
+								     *
+								     phSyncHandle,
+								     IMG_UINT32
+								     *
+								     pui32SyncPrimVAddr,
+								     IMG_UINT32
+								     *
+								     pui32SyncPrimBlockSize,
+								     IMG_HANDLE
+								     *
+								     phhSyncPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeFreeSyncPrimitiveBlock(IMG_HANDLE
+								    hBridge,
+								    IMG_HANDLE
+								    hSyncHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimSet(IMG_HANDLE hBridge,
+							 IMG_HANDLE hSyncHandle,
+							 IMG_UINT32 ui32Index,
+							 IMG_UINT32 ui32Value);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncPrimSet(IMG_HANDLE
+							       hBridge,
+							       IMG_HANDLE
+							       hSyncHandle,
+							       IMG_UINT32
+							       ui32Value);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncAlloc(IMG_HANDLE hBridge,
+							     IMG_HANDLE *
+							     phSyncHandle,
+							     IMG_UINT32 *
+							     pui32SyncPrimVAddr,
+							     IMG_UINT32
+							     ui32ClassNameSize,
+							     const IMG_CHAR *
+							     puiClassName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncFree(IMG_HANDLE hBridge,
+							    IMG_HANDLE
+							    hSyncHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncQueueHWOp(IMG_HANDLE
+								 hBridge,
+								 IMG_HANDLE
+								 hSyncHandle,
+								 IMG_BOOL
+								 bbUpdate,
+								 IMG_UINT32 *
+								 pui32FenceValue,
+								 IMG_UINT32 *
+								 pui32UpdateValue);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncGetStatus(IMG_HANDLE
+								 hBridge,
+								 IMG_UINT32
+								 ui32SyncCount,
+								 IMG_HANDLE *
+								 phSyncHandle,
+								 IMG_UINT32 *
+								 pui32UID,
+								 IMG_UINT32 *
+								 pui32FWAddr,
+								 IMG_UINT32 *
+								 pui32CurrentOp,
+								 IMG_UINT32 *
+								 pui32NextOp);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpCreate(IMG_HANDLE
+							      hBridge,
+							      IMG_UINT32
+							      ui32SyncBlockCount,
+							      IMG_HANDLE *
+							      phBlockList,
+							      IMG_UINT32
+							      ui32ClientSyncCount,
+							      IMG_UINT32 *
+							      pui32SyncBlockIndex,
+							      IMG_UINT32 *
+							      pui32Index,
+							      IMG_UINT32
+							      ui32ServerSyncCount,
+							      IMG_HANDLE *
+							      phServerSync,
+							      IMG_HANDLE *
+							      phServerCookie);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpTake(IMG_HANDLE hBridge,
+							    IMG_HANDLE
+							    hServerCookie,
+							    IMG_UINT32
+							    ui32ClientSyncCount,
+							    IMG_UINT32 *
+							    pui32Flags,
+							    IMG_UINT32 *
+							    pui32FenceValue,
+							    IMG_UINT32 *
+							    pui32UpdateValue,
+							    IMG_UINT32
+							    ui32ServerSyncCount,
+							    IMG_UINT32 *
+							    pui32ServerFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpReady(IMG_HANDLE hBridge,
+							     IMG_HANDLE
+							     hServerCookie,
+							     IMG_BOOL *
+							     pbReady);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpComplete(IMG_HANDLE
+								hBridge,
+								IMG_HANDLE
+								hServerCookie);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpDestroy(IMG_HANDLE
+							       hBridge,
+							       IMG_HANDLE
+							       hServerCookie);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDump(IMG_HANDLE hBridge,
+							   IMG_HANDLE
+							   hSyncHandle,
+							   IMG_UINT32
+							   ui32Offset);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpValue(IMG_HANDLE
+								hBridge,
+								IMG_HANDLE
+								hSyncHandle,
+								IMG_UINT32
+								ui32Offset,
+								IMG_UINT32
+								ui32Value);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpPol(IMG_HANDLE
+							      hBridge,
+							      IMG_HANDLE
+							      hSyncHandle,
+							      IMG_UINT32
+							      ui32Offset,
+							      IMG_UINT32
+							      ui32Value,
+							      IMG_UINT32
+							      ui32Mask,
+							      PDUMP_POLL_OPERATOR
+							      eOperator,
+							      PDUMP_FLAGS_T
+							      uiPDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpPDumpPol(IMG_HANDLE
+								hBridge,
+								IMG_HANDLE
+								hServerCookie,
+								PDUMP_POLL_OPERATOR
+								eOperator,
+								PDUMP_FLAGS_T
+								uiPDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpCBP(IMG_HANDLE
+							      hBridge,
+							      IMG_HANDLE
+							      hSyncHandle,
+							      IMG_UINT32
+							      ui32Offset,
+							      IMG_DEVMEM_OFFSET_T
+							      uiWriteOffset,
+							      IMG_DEVMEM_SIZE_T
+							      uiPacketSize,
+							      IMG_DEVMEM_SIZE_T
+							      uiBufferSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncAllocEvent(IMG_HANDLE hBridge,
+							    IMG_BOOL
+							    bServerSync,
+							    IMG_UINT32
+							    ui32FWAddr,
+							    IMG_UINT32
+							    ui32ClassNameSize,
+							    const IMG_CHAR *
+							    puiClassName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncFreeEvent(IMG_HANDLE hBridge,
+							   IMG_UINT32
+							   ui32FWAddr);
+
+#endif /* CLIENT_SYNC_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_sync_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_sync_direct_bridge.c
new file mode 100644
index 0000000..b5e2728
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_sync_direct_bridge.c
@@ -0,0 +1,654 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for sync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for sync
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_sync_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "devicemem_typedefs.h"
+#include "pvrsrv_sync_km.h"
+
+#include "sync.h"
+#include "sync_server.h"
+#include "pdump.h"
+#include "pvrsrv_sync_km.h"
+#include "sync_fallback_server.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeAllocSyncPrimitiveBlock(IMG_HANDLE
+								     hBridge,
+								     IMG_HANDLE
+								     *
+								     phSyncHandle,
+								     IMG_UINT32
+								     *
+								     pui32SyncPrimVAddr,
+								     IMG_UINT32
+								     *
+								     pui32SyncPrimBlockSize,
+								     IMG_HANDLE
+								     *
+								     phhSyncPMR)
+{
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+	PMR *pshSyncPMRInt = NULL;
+
+	eError =
+	    PVRSRVAllocSyncPrimitiveBlockKM(NULL,
+					    (PVRSRV_DEVICE_NODE *) ((void *)
+								    hBridge),
+					    &psSyncHandleInt,
+					    pui32SyncPrimVAddr,
+					    pui32SyncPrimBlockSize,
+					    &pshSyncPMRInt);
+
+	*phSyncHandle = psSyncHandleInt;
+	*phhSyncPMR = pshSyncPMRInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeFreeSyncPrimitiveBlock(IMG_HANDLE
+								    hBridge,
+								    IMG_HANDLE
+								    hSyncHandle)
+{
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK *psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+	eError = PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimSet(IMG_HANDLE hBridge,
+							 IMG_HANDLE hSyncHandle,
+							 IMG_UINT32 ui32Index,
+							 IMG_UINT32 ui32Value)
+{
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK *psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+	eError = PVRSRVSyncPrimSetKM(psSyncHandleInt, ui32Index, ui32Value);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncPrimSet(IMG_HANDLE
+							       hBridge,
+							       IMG_HANDLE
+							       hSyncHandle,
+							       IMG_UINT32
+							       ui32Value)
+{
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	PVRSRV_ERROR eError;
+	SERVER_SYNC_PRIMITIVE *psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SERVER_SYNC_PRIMITIVE *) hSyncHandle;
+
+	eError = PVRSRVServerSyncPrimSetKM(psSyncHandleInt, ui32Value);
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+	PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncAlloc(IMG_HANDLE hBridge,
+							     IMG_HANDLE *
+							     phSyncHandle,
+							     IMG_UINT32 *
+							     pui32SyncPrimVAddr,
+							     IMG_UINT32
+							     ui32ClassNameSize,
+							     const IMG_CHAR *
+							     puiClassName)
+{
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	PVRSRV_ERROR eError;
+	SERVER_SYNC_PRIMITIVE *psSyncHandleInt = NULL;
+
+	eError =
+	    PVRSRVServerSyncAllocKM(NULL,
+				    (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+				    &psSyncHandleInt, pui32SyncPrimVAddr,
+				    ui32ClassNameSize, puiClassName);
+
+	*phSyncHandle = psSyncHandleInt;
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(phSyncHandle);
+	PVR_UNREFERENCED_PARAMETER(pui32SyncPrimVAddr);
+	PVR_UNREFERENCED_PARAMETER(ui32ClassNameSize);
+	PVR_UNREFERENCED_PARAMETER(puiClassName);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncFree(IMG_HANDLE hBridge,
+							    IMG_HANDLE
+							    hSyncHandle)
+{
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	PVRSRV_ERROR eError;
+	SERVER_SYNC_PRIMITIVE *psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SERVER_SYNC_PRIMITIVE *) hSyncHandle;
+
+	eError = PVRSRVServerSyncFreeKM(psSyncHandleInt);
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+	PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncQueueHWOp(IMG_HANDLE
+								 hBridge,
+								 IMG_HANDLE
+								 hSyncHandle,
+								 IMG_BOOL
+								 bbUpdate,
+								 IMG_UINT32 *
+								 pui32FenceValue,
+								 IMG_UINT32 *
+								 pui32UpdateValue)
+{
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	PVRSRV_ERROR eError;
+	SERVER_SYNC_PRIMITIVE *psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SERVER_SYNC_PRIMITIVE *) hSyncHandle;
+
+	eError =
+	    PVRSRVServerSyncQueueHWOpKM(psSyncHandleInt,
+					bbUpdate,
+					pui32FenceValue, pui32UpdateValue);
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+	PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+	PVR_UNREFERENCED_PARAMETER(bbUpdate);
+	PVR_UNREFERENCED_PARAMETER(pui32FenceValue);
+	PVR_UNREFERENCED_PARAMETER(pui32UpdateValue);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncGetStatus(IMG_HANDLE
+								 hBridge,
+								 IMG_UINT32
+								 ui32SyncCount,
+								 IMG_HANDLE *
+								 phSyncHandle,
+								 IMG_UINT32 *
+								 pui32UID,
+								 IMG_UINT32 *
+								 pui32FWAddr,
+								 IMG_UINT32 *
+								 pui32CurrentOp,
+								 IMG_UINT32 *
+								 pui32NextOp)
+{
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	PVRSRV_ERROR eError;
+	SERVER_SYNC_PRIMITIVE **psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SERVER_SYNC_PRIMITIVE **) phSyncHandle;
+
+	eError =
+	    PVRSRVServerSyncGetStatusKM(ui32SyncCount,
+					psSyncHandleInt,
+					pui32UID,
+					pui32FWAddr,
+					pui32CurrentOp, pui32NextOp);
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+	PVR_UNREFERENCED_PARAMETER(ui32SyncCount);
+	PVR_UNREFERENCED_PARAMETER(phSyncHandle);
+	PVR_UNREFERENCED_PARAMETER(pui32UID);
+	PVR_UNREFERENCED_PARAMETER(pui32FWAddr);
+	PVR_UNREFERENCED_PARAMETER(pui32CurrentOp);
+	PVR_UNREFERENCED_PARAMETER(pui32NextOp);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpCreate(IMG_HANDLE
+							      hBridge,
+							      IMG_UINT32
+							      ui32SyncBlockCount,
+							      IMG_HANDLE *
+							      phBlockList,
+							      IMG_UINT32
+							      ui32ClientSyncCount,
+							      IMG_UINT32 *
+							      pui32SyncBlockIndex,
+							      IMG_UINT32 *
+							      pui32Index,
+							      IMG_UINT32
+							      ui32ServerSyncCount,
+							      IMG_HANDLE *
+							      phServerSync,
+							      IMG_HANDLE *
+							      phServerCookie)
+{
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK **psBlockListInt;
+	SERVER_SYNC_PRIMITIVE **psServerSyncInt;
+	SERVER_OP_COOKIE *psServerCookieInt = NULL;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psBlockListInt = (SYNC_PRIMITIVE_BLOCK **) phBlockList;
+	psServerSyncInt = (SERVER_SYNC_PRIMITIVE **) phServerSync;
+
+	eError =
+	    PVRSRVSyncPrimOpCreateKM(ui32SyncBlockCount,
+				     psBlockListInt,
+				     ui32ClientSyncCount,
+				     pui32SyncBlockIndex,
+				     pui32Index,
+				     ui32ServerSyncCount,
+				     psServerSyncInt, &psServerCookieInt);
+
+	*phServerCookie = psServerCookieInt;
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+	PVR_UNREFERENCED_PARAMETER(ui32SyncBlockCount);
+	PVR_UNREFERENCED_PARAMETER(phBlockList);
+	PVR_UNREFERENCED_PARAMETER(ui32ClientSyncCount);
+	PVR_UNREFERENCED_PARAMETER(pui32SyncBlockIndex);
+	PVR_UNREFERENCED_PARAMETER(pui32Index);
+	PVR_UNREFERENCED_PARAMETER(ui32ServerSyncCount);
+	PVR_UNREFERENCED_PARAMETER(phServerSync);
+	PVR_UNREFERENCED_PARAMETER(phServerCookie);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpTake(IMG_HANDLE hBridge,
+							    IMG_HANDLE
+							    hServerCookie,
+							    IMG_UINT32
+							    ui32ClientSyncCount,
+							    IMG_UINT32 *
+							    pui32Flags,
+							    IMG_UINT32 *
+							    pui32FenceValue,
+							    IMG_UINT32 *
+							    pui32UpdateValue,
+							    IMG_UINT32
+							    ui32ServerSyncCount,
+							    IMG_UINT32 *
+							    pui32ServerFlags)
+{
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	PVRSRV_ERROR eError;
+	SERVER_OP_COOKIE *psServerCookieInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+	eError =
+	    PVRSRVSyncPrimOpTakeKM(psServerCookieInt,
+				   ui32ClientSyncCount,
+				   pui32Flags,
+				   pui32FenceValue,
+				   pui32UpdateValue,
+				   ui32ServerSyncCount, pui32ServerFlags);
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+	PVR_UNREFERENCED_PARAMETER(hServerCookie);
+	PVR_UNREFERENCED_PARAMETER(ui32ClientSyncCount);
+	PVR_UNREFERENCED_PARAMETER(pui32Flags);
+	PVR_UNREFERENCED_PARAMETER(pui32FenceValue);
+	PVR_UNREFERENCED_PARAMETER(pui32UpdateValue);
+	PVR_UNREFERENCED_PARAMETER(ui32ServerSyncCount);
+	PVR_UNREFERENCED_PARAMETER(pui32ServerFlags);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpReady(IMG_HANDLE hBridge,
+							     IMG_HANDLE
+							     hServerCookie,
+							     IMG_BOOL * pbReady)
+{
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	PVRSRV_ERROR eError;
+	SERVER_OP_COOKIE *psServerCookieInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+	eError = PVRSRVSyncPrimOpReadyKM(psServerCookieInt, pbReady);
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+	PVR_UNREFERENCED_PARAMETER(hServerCookie);
+	PVR_UNREFERENCED_PARAMETER(pbReady);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpComplete(IMG_HANDLE
+								hBridge,
+								IMG_HANDLE
+								hServerCookie)
+{
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	PVRSRV_ERROR eError;
+	SERVER_OP_COOKIE *psServerCookieInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+	eError = PVRSRVSyncPrimOpCompleteKM(psServerCookieInt);
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+	PVR_UNREFERENCED_PARAMETER(hServerCookie);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpDestroy(IMG_HANDLE
+							       hBridge,
+							       IMG_HANDLE
+							       hServerCookie)
+{
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	PVRSRV_ERROR eError;
+	SERVER_OP_COOKIE *psServerCookieInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+	eError = PVRSRVSyncPrimOpDestroyKM(psServerCookieInt);
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+	PVR_UNREFERENCED_PARAMETER(hServerCookie);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDump(IMG_HANDLE hBridge,
+							   IMG_HANDLE
+							   hSyncHandle,
+							   IMG_UINT32
+							   ui32Offset)
+{
+#if defined(PDUMP)
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK *psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+	eError = PVRSRVSyncPrimPDumpKM(psSyncHandleInt, ui32Offset);
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+	PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpValue(IMG_HANDLE
+								hBridge,
+								IMG_HANDLE
+								hSyncHandle,
+								IMG_UINT32
+								ui32Offset,
+								IMG_UINT32
+								ui32Value)
+{
+#if defined(PDUMP)
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK *psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+	eError =
+	    PVRSRVSyncPrimPDumpValueKM(psSyncHandleInt, ui32Offset, ui32Value);
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+	PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpPol(IMG_HANDLE
+							      hBridge,
+							      IMG_HANDLE
+							      hSyncHandle,
+							      IMG_UINT32
+							      ui32Offset,
+							      IMG_UINT32
+							      ui32Value,
+							      IMG_UINT32
+							      ui32Mask,
+							      PDUMP_POLL_OPERATOR
+							      eOperator,
+							      PDUMP_FLAGS_T
+							      uiPDumpFlags)
+{
+#if defined(PDUMP)
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK *psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+	eError =
+	    PVRSRVSyncPrimPDumpPolKM(psSyncHandleInt,
+				     ui32Offset,
+				     ui32Value,
+				     ui32Mask, eOperator, uiPDumpFlags);
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+	PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpPDumpPol(IMG_HANDLE
+								hBridge,
+								IMG_HANDLE
+								hServerCookie,
+								PDUMP_POLL_OPERATOR
+								eOperator,
+								PDUMP_FLAGS_T
+								uiPDumpFlags)
+{
+#if defined(SUPPORT_SERVER_SYNC_IMPL_PDUMP)
+	PVRSRV_ERROR eError;
+	SERVER_OP_COOKIE *psServerCookieInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+	eError =
+	    PVRSRVSyncPrimOpPDumpPolKM(psServerCookieInt,
+				       eOperator, uiPDumpFlags);
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+	PVR_UNREFERENCED_PARAMETER(hServerCookie);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpCBP(IMG_HANDLE
+							      hBridge,
+							      IMG_HANDLE
+							      hSyncHandle,
+							      IMG_UINT32
+							      ui32Offset,
+							      IMG_DEVMEM_OFFSET_T
+							      uiWriteOffset,
+							      IMG_DEVMEM_SIZE_T
+							      uiPacketSize,
+							      IMG_DEVMEM_SIZE_T
+							      uiBufferSize)
+{
+#if defined(PDUMP)
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK *psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+	eError =
+	    PVRSRVSyncPrimPDumpCBPKM(psSyncHandleInt,
+				     ui32Offset,
+				     uiWriteOffset, uiPacketSize, uiBufferSize);
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+	PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+	PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+	PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncAllocEvent(IMG_HANDLE hBridge,
+							    IMG_BOOL
+							    bServerSync,
+							    IMG_UINT32
+							    ui32FWAddr,
+							    IMG_UINT32
+							    ui32ClassNameSize,
+							    const IMG_CHAR *
+							    puiClassName)
+{
+	PVRSRV_ERROR eError;
+
+	eError =
+	    PVRSRVSyncAllocEventKM(NULL,
+				   (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+				   bServerSync, ui32FWAddr, ui32ClassNameSize,
+				   puiClassName);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncFreeEvent(IMG_HANDLE hBridge,
+							   IMG_UINT32
+							   ui32FWAddr)
+{
+	PVRSRV_ERROR eError;
+
+	eError =
+	    PVRSRVSyncFreeEventKM(NULL,
+				  (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+				  ui32FWAddr);
+
+	return eError;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_synctracking_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_synctracking_bridge.h
new file mode 100644
index 0000000..d9316e7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_synctracking_bridge.h
@@ -0,0 +1,78 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for synctracking
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for synctracking
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_SYNCTRACKING_BRIDGE_H
+#define CLIENT_SYNCTRACKING_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_synctracking_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordRemoveByHandle(IMG_HANDLE
+								      hBridge,
+								      IMG_HANDLE
+								      hhRecord);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordAdd(IMG_HANDLE hBridge,
+							   IMG_HANDLE *
+							   phhRecord,
+							   IMG_HANDLE
+							   hhServerSyncPrimBlock,
+							   IMG_UINT32
+							   ui32ui32FwBlockAddr,
+							   IMG_UINT32
+							   ui32ui32SyncOffset,
+							   IMG_BOOL
+							   bbServerSync,
+							   IMG_UINT32
+							   ui32ClassNameSize,
+							   const IMG_CHAR *
+							   puiClassName);
+
+#endif /* CLIENT_SYNCTRACKING_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_synctracking_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_synctracking_direct_bridge.c
new file mode 100644
index 0000000..1a1efdb
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/client_synctracking_direct_bridge.c
@@ -0,0 +1,103 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for synctracking
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for synctracking
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_synctracking_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+
+#include "sync.h"
+#include "sync_server.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordRemoveByHandle(IMG_HANDLE
+								      hBridge,
+								      IMG_HANDLE
+								      hhRecord)
+{
+	PVRSRV_ERROR eError;
+	SYNC_RECORD_HANDLE pshRecordInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	pshRecordInt = (SYNC_RECORD_HANDLE) hhRecord;
+
+	eError = PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordAdd(IMG_HANDLE hBridge,
+							   IMG_HANDLE *
+							   phhRecord,
+							   IMG_HANDLE
+							   hhServerSyncPrimBlock,
+							   IMG_UINT32
+							   ui32ui32FwBlockAddr,
+							   IMG_UINT32
+							   ui32ui32SyncOffset,
+							   IMG_BOOL
+							   bbServerSync,
+							   IMG_UINT32
+							   ui32ClassNameSize,
+							   const IMG_CHAR *
+							   puiClassName)
+{
+	PVRSRV_ERROR eError;
+	SYNC_RECORD_HANDLE pshRecordInt = NULL;
+	SYNC_PRIMITIVE_BLOCK *pshServerSyncPrimBlockInt;
+
+	pshServerSyncPrimBlockInt =
+	    (SYNC_PRIMITIVE_BLOCK *) hhServerSyncPrimBlock;
+
+	eError =
+	    PVRSRVSyncRecordAddKM(NULL,
+				  (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+				  &pshRecordInt, pshServerSyncPrimBlockInt,
+				  ui32ui32FwBlockAddr, ui32ui32SyncOffset,
+				  bbServerSync, ui32ClassNameSize,
+				  puiClassName);
+
+	*phhRecord = pshRecordInt;
+	return eError;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_cache_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_cache_bridge.h
new file mode 100644
index 0000000..a62070b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_cache_bridge.h
@@ -0,0 +1,130 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for cache
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for cache
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_CACHE_BRIDGE_H
+#define COMMON_CACHE_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "cache_ops.h"
+
+#define PVRSRV_BRIDGE_CACHE_CMD_FIRST			0
+#define PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE			PVRSRV_BRIDGE_CACHE_CMD_FIRST+0
+#define PVRSRV_BRIDGE_CACHE_CACHEOPEXEC			PVRSRV_BRIDGE_CACHE_CMD_FIRST+1
+#define PVRSRV_BRIDGE_CACHE_CACHEOPLOG			PVRSRV_BRIDGE_CACHE_CMD_FIRST+2
+#define PVRSRV_BRIDGE_CACHE_CMD_LAST			(PVRSRV_BRIDGE_CACHE_CMD_FIRST+2)
+
+/*******************************************
+            CacheOpQueue
+ *******************************************/
+
+/* Bridge in structure for CacheOpQueue */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPQUEUE_TAG
+{
+	IMG_UINT32 ui32NumCacheOps;
+	IMG_HANDLE *phPMR;
+	IMG_UINT64 *pui64Address;
+	IMG_DEVMEM_OFFSET_T *puiOffset;
+	IMG_DEVMEM_SIZE_T *puiSize;
+	PVRSRV_CACHE_OP *piuCacheOp;
+	IMG_UINT32 ui32OpTimeline;
+	IMG_UINT32 ui32OpInfoPgGFSeqNum;
+	IMG_UINT32 ui32CurrentFenceSeqNum;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_CACHEOPQUEUE;
+
+/* Bridge out structure for CacheOpQueue */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPQUEUE_TAG
+{
+	IMG_UINT32 ui32NextFenceSeqNum;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_CACHEOPQUEUE;
+
+/*******************************************
+            CacheOpExec
+ *******************************************/
+
+/* Bridge in structure for CacheOpExec */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPEXEC_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_UINT64 ui64Address;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_DEVMEM_SIZE_T uiSize;
+	PVRSRV_CACHE_OP iuCacheOp;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_CACHEOPEXEC;
+
+/* Bridge out structure for CacheOpExec */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPEXEC_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_CACHEOPEXEC;
+
+/*******************************************
+            CacheOpLog
+ *******************************************/
+
+/* Bridge in structure for CacheOpLog */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPLOG_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_UINT64 ui64Address;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_INT64 i64QueuedTimeUs;
+	IMG_INT64 i64ExecuteTimeUs;
+	IMG_INT32 i32NumRBF;
+	IMG_BOOL bIsDiscard;
+	PVRSRV_CACHE_OP iuCacheOp;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_CACHEOPLOG;
+
+/* Bridge out structure for CacheOpLog */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPLOG_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_CACHEOPLOG;
+
+#endif /* COMMON_CACHE_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_cmm_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_cmm_bridge.h
new file mode 100644
index 0000000..52bdef5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_cmm_bridge.h
@@ -0,0 +1,113 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for cmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for cmm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_CMM_BRIDGE_H
+#define COMMON_CMM_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+
+#define PVRSRV_BRIDGE_CMM_CMD_FIRST			0
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX			PVRSRV_BRIDGE_CMM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX			PVRSRV_BRIDGE_CMM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX			PVRSRV_BRIDGE_CMM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_CMM_CMD_LAST			(PVRSRV_BRIDGE_CMM_CMD_FIRST+2)
+
+/*******************************************
+            DevmemIntExportCtx
+ *******************************************/
+
+/* Bridge in structure for DevmemIntExportCtx */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX_TAG
+{
+	IMG_HANDLE hContext;
+	IMG_HANDLE hPMR;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX;
+
+/* Bridge out structure for DevmemIntExportCtx */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX_TAG
+{
+	IMG_HANDLE hContextExport;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX;
+
+/*******************************************
+            DevmemIntUnexportCtx
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnexportCtx */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX_TAG
+{
+	IMG_HANDLE hContextExport;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX;
+
+/* Bridge out structure for DevmemIntUnexportCtx */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX;
+
+/*******************************************
+            DevmemIntAcquireRemoteCtx
+ *******************************************/
+
+/* Bridge in structure for DevmemIntAcquireRemoteCtx */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX;
+
+/* Bridge out structure for DevmemIntAcquireRemoteCtx */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX_TAG
+{
+	IMG_HANDLE hContext;
+	IMG_HANDLE hPrivData;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX;
+
+#endif /* COMMON_CMM_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_devicememhistory_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_devicememhistory_bridge.h
new file mode 100644
index 0000000..da7af1b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_devicememhistory_bridge.h
@@ -0,0 +1,184 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for devicememhistory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for devicememhistory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_DEVICEMEMHISTORY_BRIDGE_H
+#define COMMON_DEVICEMEMHISTORY_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "devicemem_typedefs.h"
+
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST			0
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP			PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP			PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE			PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE			PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+3
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE			PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST			(PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4)
+
+/*******************************************
+            DevicememHistoryMap
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryMap */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_SIZE_T uiOffset;
+	IMG_DEV_VIRTADDR sDevVAddr;
+	IMG_DEVMEM_SIZE_T uiSize;
+	const IMG_CHAR *puiText;
+	IMG_UINT32 ui32Log2PageSize;
+	IMG_UINT32 ui32AllocationIndex;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP;
+
+/* Bridge out structure for DevicememHistoryMap */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP_TAG
+{
+	IMG_UINT32 ui32AllocationIndexOut;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP;
+
+/*******************************************
+            DevicememHistoryUnmap
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryUnmap */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_SIZE_T uiOffset;
+	IMG_DEV_VIRTADDR sDevVAddr;
+	IMG_DEVMEM_SIZE_T uiSize;
+	const IMG_CHAR *puiText;
+	IMG_UINT32 ui32Log2PageSize;
+	IMG_UINT32 ui32AllocationIndex;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP;
+
+/* Bridge out structure for DevicememHistoryUnmap */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP_TAG
+{
+	IMG_UINT32 ui32AllocationIndexOut;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP;
+
+/*******************************************
+            DevicememHistoryMapVRange
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryMapVRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE_TAG
+{
+	IMG_DEV_VIRTADDR sBaseDevVAddr;
+	IMG_UINT32 ui32ui32StartPage;
+	IMG_UINT32 ui32NumPages;
+	IMG_DEVMEM_SIZE_T uiAllocSize;
+	const IMG_CHAR *puiText;
+	IMG_UINT32 ui32Log2PageSize;
+	IMG_UINT32 ui32AllocationIndex;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE;
+
+/* Bridge out structure for DevicememHistoryMapVRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE_TAG
+{
+	IMG_UINT32 ui32AllocationIndexOut;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE;
+
+/*******************************************
+            DevicememHistoryUnmapVRange
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryUnmapVRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE_TAG
+{
+	IMG_DEV_VIRTADDR sBaseDevVAddr;
+	IMG_UINT32 ui32ui32StartPage;
+	IMG_UINT32 ui32NumPages;
+	IMG_DEVMEM_SIZE_T uiAllocSize;
+	const IMG_CHAR *puiText;
+	IMG_UINT32 ui32Log2PageSize;
+	IMG_UINT32 ui32AllocationIndex;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE;
+
+/* Bridge out structure for DevicememHistoryUnmapVRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE_TAG
+{
+	IMG_UINT32 ui32AllocationIndexOut;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE;
+
+/*******************************************
+            DevicememHistorySparseChange
+ *******************************************/
+
+/* Bridge in structure for DevicememHistorySparseChange */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_SIZE_T uiOffset;
+	IMG_DEV_VIRTADDR sDevVAddr;
+	IMG_DEVMEM_SIZE_T uiSize;
+	const IMG_CHAR *puiText;
+	IMG_UINT32 ui32Log2PageSize;
+	IMG_UINT32 ui32AllocPageCount;
+	IMG_UINT32 *pui32AllocPageIndices;
+	IMG_UINT32 ui32FreePageCount;
+	IMG_UINT32 *pui32FreePageIndices;
+	IMG_UINT32 ui32AllocationIndex;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE;
+
+/* Bridge out structure for DevicememHistorySparseChange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE_TAG
+{
+	IMG_UINT32 ui32AllocationIndexOut;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE;
+
+#endif /* COMMON_DEVICEMEMHISTORY_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_dmabuf_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_dmabuf_bridge.h
new file mode 100644
index 0000000..9c7e825
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_dmabuf_bridge.h
@@ -0,0 +1,126 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for dmabuf
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for dmabuf
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_DMABUF_BRIDGE_H
+#define COMMON_DMABUF_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_memallocflags.h"
+
+#define PVRSRV_BRIDGE_DMABUF_CMD_FIRST			0
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF			PVRSRV_BRIDGE_DMABUF_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF			PVRSRV_BRIDGE_DMABUF_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF			PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DMABUF_CMD_LAST			(PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2)
+
+/*******************************************
+            PhysmemImportDmaBuf
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportDmaBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF_TAG
+{
+	IMG_INT ifd;
+	PVRSRV_MEMALLOCFLAGS_T uiFlags;
+	IMG_UINT32 ui32NameSize;
+	const IMG_CHAR *puiName;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF;
+
+/* Bridge out structure for PhysmemImportDmaBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF_TAG
+{
+	IMG_HANDLE hPMRPtr;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_DEVMEM_ALIGN_T sAlign;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF;
+
+/*******************************************
+            PhysmemExportDmaBuf
+ *******************************************/
+
+/* Bridge in structure for PhysmemExportDmaBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF;
+
+/* Bridge out structure for PhysmemExportDmaBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF_TAG
+{
+	IMG_INT iFd;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF;
+
+/*******************************************
+            PhysmemImportSparseDmaBuf
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportSparseDmaBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF_TAG
+{
+	IMG_INT ifd;
+	PVRSRV_MEMALLOCFLAGS_T uiFlags;
+	IMG_DEVMEM_SIZE_T uiChunkSize;
+	IMG_UINT32 ui32NumPhysChunks;
+	IMG_UINT32 ui32NumVirtChunks;
+	IMG_UINT32 *pui32MappingTable;
+	IMG_UINT32 ui32NameSize;
+	const IMG_CHAR *puiName;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF;
+
+/* Bridge out structure for PhysmemImportSparseDmaBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF_TAG
+{
+	IMG_HANDLE hPMRPtr;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_DEVMEM_ALIGN_T sAlign;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF;
+
+#endif /* COMMON_DMABUF_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_htbuffer_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_htbuffer_bridge.h
new file mode 100644
index 0000000..a936da3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_htbuffer_bridge.h
@@ -0,0 +1,102 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for htbuffer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for htbuffer
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_HTBUFFER_BRIDGE_H
+#define COMMON_HTBUFFER_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "htbuffer_types.h"
+
+#define PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST			0
+#define PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL			PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+0
+#define PVRSRV_BRIDGE_HTBUFFER_HTBLOG			PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1
+#define PVRSRV_BRIDGE_HTBUFFER_CMD_LAST			(PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1)
+
+/*******************************************
+            HTBControl
+ *******************************************/
+
+/* Bridge in structure for HTBControl */
+typedef struct PVRSRV_BRIDGE_IN_HTBCONTROL_TAG
+{
+	IMG_UINT32 ui32NumGroups;
+	IMG_UINT32 *pui32GroupEnable;
+	IMG_UINT32 ui32LogLevel;
+	IMG_UINT32 ui32EnablePID;
+	IMG_UINT32 ui32LogMode;
+	IMG_UINT32 ui32OpMode;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HTBCONTROL;
+
+/* Bridge out structure for HTBControl */
+typedef struct PVRSRV_BRIDGE_OUT_HTBCONTROL_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HTBCONTROL;
+
+/*******************************************
+            HTBLog
+ *******************************************/
+
+/* Bridge in structure for HTBLog */
+typedef struct PVRSRV_BRIDGE_IN_HTBLOG_TAG
+{
+	IMG_UINT32 ui32PID;
+	IMG_UINT64 ui64TimeStamp;
+	IMG_UINT32 ui32SF;
+	IMG_UINT32 ui32NumArgs;
+	IMG_UINT32 *pui32Args;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HTBLOG;
+
+/* Bridge out structure for HTBLog */
+typedef struct PVRSRV_BRIDGE_OUT_HTBLOG_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HTBLOG;
+
+#endif /* COMMON_HTBUFFER_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_mm_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_mm_bridge.h
new file mode 100644
index 0000000..fbc1365
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_mm_bridge.h
@@ -0,0 +1,745 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for mm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for mm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_MM_BRIDGE_H
+#define COMMON_MM_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+#define PVRSRV_BRIDGE_MM_CMD_FIRST			0
+#define PVRSRV_BRIDGE_MM_PMREXPORTPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_MM_PMRGETUID			PVRSRV_BRIDGE_MM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE			PVRSRV_BRIDGE_MM_CMD_FIRST+3
+#define PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE			PVRSRV_BRIDGE_MM_CMD_FIRST+4
+#define PVRSRV_BRIDGE_MM_PMRIMPORTPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+5
+#define PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+6
+#define PVRSRV_BRIDGE_MM_PMRUNREFPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+7
+#define PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+8
+#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+9
+#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+10
+#define PVRSRV_BRIDGE_MM_PHYSMEMIMPORTSECBUF			PVRSRV_BRIDGE_MM_CMD_FIRST+11
+#define PVRSRV_BRIDGE_MM_DEVMEMINTPIN			PVRSRV_BRIDGE_MM_CMD_FIRST+12
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN			PVRSRV_BRIDGE_MM_CMD_FIRST+13
+#define PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE			PVRSRV_BRIDGE_MM_CMD_FIRST+14
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE			PVRSRV_BRIDGE_MM_CMD_FIRST+15
+#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE			PVRSRV_BRIDGE_MM_CMD_FIRST+16
+#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY			PVRSRV_BRIDGE_MM_CMD_FIRST+17
+#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE			PVRSRV_BRIDGE_MM_CMD_FIRST+18
+#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY			PVRSRV_BRIDGE_MM_CMD_FIRST+19
+#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+20
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+21
+#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE			PVRSRV_BRIDGE_MM_CMD_FIRST+22
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE			PVRSRV_BRIDGE_MM_CMD_FIRST+23
+#define PVRSRV_BRIDGE_MM_CHANGESPARSEMEM			PVRSRV_BRIDGE_MM_CMD_FIRST+24
+#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES			PVRSRV_BRIDGE_MM_CMD_FIRST+25
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES			PVRSRV_BRIDGE_MM_CMD_FIRST+26
+#define PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID			PVRSRV_BRIDGE_MM_CMD_FIRST+27
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT			PVRSRV_BRIDGE_MM_CMD_FIRST+28
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT			PVRSRV_BRIDGE_MM_CMD_FIRST+29
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME			PVRSRV_BRIDGE_MM_CMD_FIRST+30
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS			PVRSRV_BRIDGE_MM_CMD_FIRST+31
+#define PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM			PVRSRV_BRIDGE_MM_CMD_FIRST+32
+#define PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE			PVRSRV_BRIDGE_MM_CMD_FIRST+33
+#define PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS			PVRSRV_BRIDGE_MM_CMD_FIRST+34
+#define PVRSRV_BRIDGE_MM_CMD_LAST			(PVRSRV_BRIDGE_MM_CMD_FIRST+34)
+
+/*******************************************
+            PMRExportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRExportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMREXPORTPMR_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMREXPORTPMR;
+
+/* Bridge out structure for PMRExportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMREXPORTPMR_TAG
+{
+	IMG_HANDLE hPMRExport;
+	IMG_UINT64 ui64Size;
+	IMG_UINT32 ui32Log2Contig;
+	IMG_UINT64 ui64Password;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMREXPORTPMR;
+
+/*******************************************
+            PMRUnexportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRUnexportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR_TAG
+{
+	IMG_HANDLE hPMRExport;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR;
+
+/* Bridge out structure for PMRUnexportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR;
+
+/*******************************************
+            PMRGetUID
+ *******************************************/
+
+/* Bridge in structure for PMRGetUID */
+typedef struct PVRSRV_BRIDGE_IN_PMRGETUID_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRGETUID;
+
+/* Bridge out structure for PMRGetUID */
+typedef struct PVRSRV_BRIDGE_OUT_PMRGETUID_TAG
+{
+	IMG_UINT64 ui64UID;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRGETUID;
+
+/*******************************************
+            PMRMakeLocalImportHandle
+ *******************************************/
+
+/* Bridge in structure for PMRMakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE_TAG
+{
+	IMG_HANDLE hBuffer;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE;
+
+/* Bridge out structure for PMRMakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE_TAG
+{
+	IMG_HANDLE hExtMem;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE;
+
+/*******************************************
+            PMRUnmakeLocalImportHandle
+ *******************************************/
+
+/* Bridge in structure for PMRUnmakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE_TAG
+{
+	IMG_HANDLE hExtMem;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE;
+
+/* Bridge out structure for PMRUnmakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE;
+
+/*******************************************
+            PMRImportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRImportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRIMPORTPMR_TAG
+{
+	IMG_HANDLE hPMRExport;
+	IMG_UINT64 ui64uiPassword;
+	IMG_UINT64 ui64uiSize;
+	IMG_UINT32 ui32uiLog2Contig;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRIMPORTPMR;
+
+/* Bridge out structure for PMRImportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRIMPORTPMR_TAG
+{
+	IMG_HANDLE hPMR;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRIMPORTPMR;
+
+/*******************************************
+            PMRLocalImportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRLocalImportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR_TAG
+{
+	IMG_HANDLE hExtHandle;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR;
+
+/* Bridge out structure for PMRLocalImportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_DEVMEM_ALIGN_T sAlign;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR;
+
+/*******************************************
+            PMRUnrefPMR
+ *******************************************/
+
+/* Bridge in structure for PMRUnrefPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNREFPMR_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRUNREFPMR;
+
+/* Bridge out structure for PMRUnrefPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFPMR_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRUNREFPMR;
+
+/*******************************************
+            PMRUnrefUnlockPMR
+ *******************************************/
+
+/* Bridge in structure for PMRUnrefUnlockPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR;
+
+/* Bridge out structure for PMRUnrefUnlockPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR;
+
+/*******************************************
+            PhysmemNewRamBackedPMR
+ *******************************************/
+
+/* Bridge in structure for PhysmemNewRamBackedPMR */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR_TAG
+{
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_DEVMEM_SIZE_T uiChunkSize;
+	IMG_UINT32 ui32NumPhysChunks;
+	IMG_UINT32 ui32NumVirtChunks;
+	IMG_UINT32 *pui32MappingTable;
+	IMG_UINT32 ui32Log2PageSize;
+	PVRSRV_MEMALLOCFLAGS_T uiFlags;
+	IMG_UINT32 ui32AnnotationLength;
+	const IMG_CHAR *puiAnnotation;
+	IMG_PID ui32PID;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR;
+
+/* Bridge out structure for PhysmemNewRamBackedPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR_TAG
+{
+	IMG_HANDLE hPMRPtr;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR;
+
+/*******************************************
+            PhysmemNewRamBackedLockedPMR
+ *******************************************/
+
+/* Bridge in structure for PhysmemNewRamBackedLockedPMR */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG
+{
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_DEVMEM_SIZE_T uiChunkSize;
+	IMG_UINT32 ui32NumPhysChunks;
+	IMG_UINT32 ui32NumVirtChunks;
+	IMG_UINT32 *pui32MappingTable;
+	IMG_UINT32 ui32Log2PageSize;
+	PVRSRV_MEMALLOCFLAGS_T uiFlags;
+	IMG_UINT32 ui32AnnotationLength;
+	const IMG_CHAR *puiAnnotation;
+	IMG_PID ui32PID;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR;
+
+/* Bridge out structure for PhysmemNewRamBackedLockedPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG
+{
+	IMG_HANDLE hPMRPtr;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR;
+
+/*******************************************
+            PhysmemImportSecBuf
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportSecBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSECBUF_TAG
+{
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_UINT32 ui32Log2Align;
+	PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSECBUF;
+
+/* Bridge out structure for PhysmemImportSecBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSECBUF_TAG
+{
+	IMG_HANDLE hPMRPtr;
+	IMG_UINT64 ui64SecBufHandle;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSECBUF;
+
+/*******************************************
+            DevmemIntPin
+ *******************************************/
+
+/* Bridge in structure for DevmemIntPin */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPIN_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTPIN;
+
+/* Bridge out structure for DevmemIntPin */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPIN_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTPIN;
+
+/*******************************************
+            DevmemIntUnpin
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnpin */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN;
+
+/* Bridge out structure for DevmemIntUnpin */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN;
+
+/*******************************************
+            DevmemIntPinValidate
+ *******************************************/
+
+/* Bridge in structure for DevmemIntPinValidate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE_TAG
+{
+	IMG_HANDLE hMapping;
+	IMG_HANDLE hPMR;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE;
+
+/* Bridge out structure for DevmemIntPinValidate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE;
+
+/*******************************************
+            DevmemIntUnpinInvalidate
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnpinInvalidate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE_TAG
+{
+	IMG_HANDLE hMapping;
+	IMG_HANDLE hPMR;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE;
+
+/* Bridge out structure for DevmemIntUnpinInvalidate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE;
+
+/*******************************************
+            DevmemIntCtxCreate
+ *******************************************/
+
+/* Bridge in structure for DevmemIntCtxCreate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE_TAG
+{
+	IMG_BOOL bbKernelMemoryCtx;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE;
+
+/* Bridge out structure for DevmemIntCtxCreate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE_TAG
+{
+	IMG_HANDLE hDevMemServerContext;
+	IMG_HANDLE hPrivData;
+	IMG_UINT32 ui32CPUCacheLineSize;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE;
+
+/*******************************************
+            DevmemIntCtxDestroy
+ *******************************************/
+
+/* Bridge in structure for DevmemIntCtxDestroy */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY_TAG
+{
+	IMG_HANDLE hDevmemServerContext;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY;
+
+/* Bridge out structure for DevmemIntCtxDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY;
+
+/*******************************************
+            DevmemIntHeapCreate
+ *******************************************/
+
+/* Bridge in structure for DevmemIntHeapCreate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE_TAG
+{
+	IMG_HANDLE hDevmemCtx;
+	IMG_DEV_VIRTADDR sHeapBaseAddr;
+	IMG_DEVMEM_SIZE_T uiHeapLength;
+	IMG_UINT32 ui32Log2DataPageSize;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE;
+
+/* Bridge out structure for DevmemIntHeapCreate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE_TAG
+{
+	IMG_HANDLE hDevmemHeapPtr;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE;
+
+/*******************************************
+            DevmemIntHeapDestroy
+ *******************************************/
+
+/* Bridge in structure for DevmemIntHeapDestroy */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY_TAG
+{
+	IMG_HANDLE hDevmemHeap;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY;
+
+/* Bridge out structure for DevmemIntHeapDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY;
+
+/*******************************************
+            DevmemIntMapPMR
+ *******************************************/
+
+/* Bridge in structure for DevmemIntMapPMR */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR_TAG
+{
+	IMG_HANDLE hDevmemServerHeap;
+	IMG_HANDLE hReservation;
+	IMG_HANDLE hPMR;
+	PVRSRV_MEMALLOCFLAGS_T uiMapFlags;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR;
+
+/* Bridge out structure for DevmemIntMapPMR */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR_TAG
+{
+	IMG_HANDLE hMapping;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR;
+
+/*******************************************
+            DevmemIntUnmapPMR
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnmapPMR */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR_TAG
+{
+	IMG_HANDLE hMapping;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR;
+
+/* Bridge out structure for DevmemIntUnmapPMR */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR;
+
+/*******************************************
+            DevmemIntReserveRange
+ *******************************************/
+
+/* Bridge in structure for DevmemIntReserveRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE_TAG
+{
+	IMG_HANDLE hDevmemServerHeap;
+	IMG_DEV_VIRTADDR sAddress;
+	IMG_DEVMEM_SIZE_T uiLength;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE;
+
+/* Bridge out structure for DevmemIntReserveRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE_TAG
+{
+	IMG_HANDLE hReservation;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE;
+
+/*******************************************
+            DevmemIntUnreserveRange
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnreserveRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE_TAG
+{
+	IMG_HANDLE hReservation;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE;
+
+/* Bridge out structure for DevmemIntUnreserveRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE;
+
+/*******************************************
+            ChangeSparseMem
+ *******************************************/
+
+/* Bridge in structure for ChangeSparseMem */
+typedef struct PVRSRV_BRIDGE_IN_CHANGESPARSEMEM_TAG
+{
+	IMG_HANDLE hSrvDevMemHeap;
+	IMG_HANDLE hPMR;
+	IMG_UINT32 ui32AllocPageCount;
+	IMG_UINT32 *pui32AllocPageIndices;
+	IMG_UINT32 ui32FreePageCount;
+	IMG_UINT32 *pui32FreePageIndices;
+	IMG_UINT32 ui32SparseFlags;
+	PVRSRV_MEMALLOCFLAGS_T uiFlags;
+	IMG_DEV_VIRTADDR sDevVAddr;
+	IMG_UINT64 ui64CPUVAddr;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_CHANGESPARSEMEM;
+
+/* Bridge out structure for ChangeSparseMem */
+typedef struct PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM;
+
+/*******************************************
+            DevmemIntMapPages
+ *******************************************/
+
+/* Bridge in structure for DevmemIntMapPages */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES_TAG
+{
+	IMG_HANDLE hReservation;
+	IMG_HANDLE hPMR;
+	IMG_UINT32 ui32PageCount;
+	IMG_UINT32 ui32PhysicalPgOffset;
+	PVRSRV_MEMALLOCFLAGS_T uiFlags;
+	IMG_DEV_VIRTADDR sDevVAddr;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES;
+
+/* Bridge out structure for DevmemIntMapPages */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES;
+
+/*******************************************
+            DevmemIntUnmapPages
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnmapPages */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES_TAG
+{
+	IMG_HANDLE hReservation;
+	IMG_DEV_VIRTADDR sDevVAddr;
+	IMG_UINT32 ui32PageCount;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES;
+
+/* Bridge out structure for DevmemIntUnmapPages */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES;
+
+/*******************************************
+            DevmemIsVDevAddrValid
+ *******************************************/
+
+/* Bridge in structure for DevmemIsVDevAddrValid */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID_TAG
+{
+	IMG_HANDLE hDevmemCtx;
+	IMG_DEV_VIRTADDR sAddress;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID;
+
+/* Bridge out structure for DevmemIsVDevAddrValid */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID;
+
+/*******************************************
+            HeapCfgHeapConfigCount
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapConfigCount */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT_TAG
+{
+	IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT;
+
+/* Bridge out structure for HeapCfgHeapConfigCount */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT_TAG
+{
+	IMG_UINT32 ui32NumHeapConfigs;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT;
+
+/*******************************************
+            HeapCfgHeapCount
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapCount */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT_TAG
+{
+	IMG_UINT32 ui32HeapConfigIndex;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT;
+
+/* Bridge out structure for HeapCfgHeapCount */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT_TAG
+{
+	IMG_UINT32 ui32NumHeaps;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT;
+
+/*******************************************
+            HeapCfgHeapConfigName
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapConfigName */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME_TAG
+{
+	IMG_UINT32 ui32HeapConfigIndex;
+	IMG_UINT32 ui32HeapConfigNameBufSz;
+	/* Output pointer puiHeapConfigName is also an implied input */
+	IMG_CHAR *puiHeapConfigName;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME;
+
+/* Bridge out structure for HeapCfgHeapConfigName */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME_TAG
+{
+	IMG_CHAR *puiHeapConfigName;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME;
+
+/*******************************************
+            HeapCfgHeapDetails
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapDetails */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS_TAG
+{
+	IMG_UINT32 ui32HeapConfigIndex;
+	IMG_UINT32 ui32HeapIndex;
+	IMG_UINT32 ui32HeapNameBufSz;
+	/* Output pointer puiHeapNameOut is also an implied input */
+	IMG_CHAR *puiHeapNameOut;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS;
+
+/* Bridge out structure for HeapCfgHeapDetails */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS_TAG
+{
+	IMG_CHAR *puiHeapNameOut;
+	IMG_DEV_VIRTADDR sDevVAddrBase;
+	IMG_DEVMEM_SIZE_T uiHeapLength;
+	IMG_UINT32 ui32Log2DataPageSizeOut;
+	IMG_UINT32 ui32Log2ImportAlignmentOut;
+	IMG_UINT32 ui32Log2TilingStrideFactorOut;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS;
+
+/*******************************************
+            DevmemIntRegisterPFNotifyKM
+ *******************************************/
+
+/* Bridge in structure for DevmemIntRegisterPFNotifyKM */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM_TAG
+{
+	IMG_HANDLE hDevmemCtx;
+	IMG_UINT32 ui32PID;
+	IMG_BOOL bRegister;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM;
+
+/* Bridge out structure for DevmemIntRegisterPFNotifyKM */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM;
+
+/*******************************************
+            GetMaxDevMemSize
+ *******************************************/
+
+/* Bridge in structure for GetMaxDevMemSize */
+typedef struct PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE_TAG
+{
+	IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE;
+
+/* Bridge out structure for GetMaxDevMemSize */
+typedef struct PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE_TAG
+{
+	IMG_DEVMEM_SIZE_T uiLMASize;
+	IMG_DEVMEM_SIZE_T uiUMASize;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE;
+
+/*******************************************
+            DevmemGetFaultAddress
+ *******************************************/
+
+/* Bridge in structure for DevmemGetFaultAddress */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS_TAG
+{
+	IMG_HANDLE hDevmemCtx;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS;
+
+/* Bridge out structure for DevmemGetFaultAddress */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS_TAG
+{
+	IMG_DEV_VIRTADDR sFaultAddress;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS;
+
+#endif /* COMMON_MM_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_pvrtl_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_pvrtl_bridge.h
new file mode 100644
index 0000000..2047536
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_pvrtl_bridge.h
@@ -0,0 +1,214 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for pvrtl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for pvrtl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_PVRTL_BRIDGE_H
+#define COMMON_PVRTL_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "pvrsrv_tlcommon.h"
+
+#define PVRSRV_BRIDGE_PVRTL_CMD_FIRST			0
+#define PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+4
+#define PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+5
+#define PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+6
+#define PVRSRV_BRIDGE_PVRTL_TLWRITEDATA			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7
+#define PVRSRV_BRIDGE_PVRTL_CMD_LAST			(PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7)
+
+/*******************************************
+            TLOpenStream
+ *******************************************/
+
+/* Bridge in structure for TLOpenStream */
+typedef struct PVRSRV_BRIDGE_IN_TLOPENSTREAM_TAG
+{
+	const IMG_CHAR *puiName;
+	IMG_UINT32 ui32Mode;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLOPENSTREAM;
+
+/* Bridge out structure for TLOpenStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLOPENSTREAM_TAG
+{
+	IMG_HANDLE hSD;
+	IMG_HANDLE hTLPMR;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLOPENSTREAM;
+
+/*******************************************
+            TLCloseStream
+ *******************************************/
+
+/* Bridge in structure for TLCloseStream */
+typedef struct PVRSRV_BRIDGE_IN_TLCLOSESTREAM_TAG
+{
+	IMG_HANDLE hSD;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLCLOSESTREAM;
+
+/* Bridge out structure for TLCloseStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLCLOSESTREAM_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLCLOSESTREAM;
+
+/*******************************************
+            TLAcquireData
+ *******************************************/
+
+/* Bridge in structure for TLAcquireData */
+typedef struct PVRSRV_BRIDGE_IN_TLACQUIREDATA_TAG
+{
+	IMG_HANDLE hSD;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLACQUIREDATA;
+
+/* Bridge out structure for TLAcquireData */
+typedef struct PVRSRV_BRIDGE_OUT_TLACQUIREDATA_TAG
+{
+	IMG_UINT32 ui32ReadOffset;
+	IMG_UINT32 ui32ReadLen;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLACQUIREDATA;
+
+/*******************************************
+            TLReleaseData
+ *******************************************/
+
+/* Bridge in structure for TLReleaseData */
+typedef struct PVRSRV_BRIDGE_IN_TLRELEASEDATA_TAG
+{
+	IMG_HANDLE hSD;
+	IMG_UINT32 ui32ReadOffset;
+	IMG_UINT32 ui32ReadLen;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLRELEASEDATA;
+
+/* Bridge out structure for TLReleaseData */
+typedef struct PVRSRV_BRIDGE_OUT_TLRELEASEDATA_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLRELEASEDATA;
+
+/*******************************************
+            TLDiscoverStreams
+ *******************************************/
+
+/* Bridge in structure for TLDiscoverStreams */
+typedef struct PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS_TAG
+{
+	const IMG_CHAR *puiNamePattern;
+	IMG_UINT32 ui32Size;
+	/* Output pointer puiStreams is also an implied input */
+	IMG_CHAR *puiStreams;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS;
+
+/* Bridge out structure for TLDiscoverStreams */
+typedef struct PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS_TAG
+{
+	IMG_CHAR *puiStreams;
+	IMG_UINT32 ui32NumFound;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS;
+
+/*******************************************
+            TLReserveStream
+ *******************************************/
+
+/* Bridge in structure for TLReserveStream */
+typedef struct PVRSRV_BRIDGE_IN_TLRESERVESTREAM_TAG
+{
+	IMG_HANDLE hSD;
+	IMG_UINT32 ui32Size;
+	IMG_UINT32 ui32SizeMin;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLRESERVESTREAM;
+
+/* Bridge out structure for TLReserveStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLRESERVESTREAM_TAG
+{
+	IMG_UINT32 ui32BufferOffset;
+	IMG_UINT32 ui32Available;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLRESERVESTREAM;
+
+/*******************************************
+            TLCommitStream
+ *******************************************/
+
+/* Bridge in structure for TLCommitStream */
+typedef struct PVRSRV_BRIDGE_IN_TLCOMMITSTREAM_TAG
+{
+	IMG_HANDLE hSD;
+	IMG_UINT32 ui32ReqSize;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLCOMMITSTREAM;
+
+/* Bridge out structure for TLCommitStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM;
+
+/*******************************************
+            TLWriteData
+ *******************************************/
+
+/* Bridge in structure for TLWriteData */
+typedef struct PVRSRV_BRIDGE_IN_TLWRITEDATA_TAG
+{
+	IMG_HANDLE hSD;
+	IMG_UINT32 ui32Size;
+	IMG_BYTE *psData;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_TLWRITEDATA;
+
+/* Bridge out structure for TLWriteData */
+typedef struct PVRSRV_BRIDGE_OUT_TLWRITEDATA_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_TLWRITEDATA;
+
+#endif /* COMMON_PVRTL_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxbreakpoint_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxbreakpoint_bridge.h
new file mode 100644
index 0000000..880f354
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxbreakpoint_bridge.h
@@ -0,0 +1,148 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxbreakpoint
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxbreakpoint
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXBREAKPOINT_BRIDGE_H
+#define COMMON_RGXBREAKPOINT_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT			PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT			PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT			PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT			PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS			PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_LAST			(PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+4)
+
+/*******************************************
+            RGXSetBreakpoint
+ *******************************************/
+
+/* Bridge in structure for RGXSetBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT_TAG
+{
+	IMG_HANDLE hPrivData;
+	IMG_UINT32 eFWDataMaster;
+	IMG_UINT32 ui32BreakpointAddr;
+	IMG_UINT32 ui32HandlerAddr;
+	IMG_UINT32 ui32DM;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT;
+
+/* Bridge out structure for RGXSetBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT;
+
+/*******************************************
+            RGXClearBreakpoint
+ *******************************************/
+
+/* Bridge in structure for RGXClearBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT_TAG
+{
+	IMG_HANDLE hPrivData;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT;
+
+/* Bridge out structure for RGXClearBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT;
+
+/*******************************************
+            RGXEnableBreakpoint
+ *******************************************/
+
+/* Bridge in structure for RGXEnableBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT_TAG
+{
+	IMG_HANDLE hPrivData;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT;
+
+/* Bridge out structure for RGXEnableBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT;
+
+/*******************************************
+            RGXDisableBreakpoint
+ *******************************************/
+
+/* Bridge in structure for RGXDisableBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT_TAG
+{
+	IMG_HANDLE hPrivData;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT;
+
+/* Bridge out structure for RGXDisableBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT;
+
+/*******************************************
+            RGXOverallocateBPRegisters
+ *******************************************/
+
+/* Bridge in structure for RGXOverallocateBPRegisters */
+typedef struct PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS_TAG
+{
+	IMG_UINT32 ui32TempRegs;
+	IMG_UINT32 ui32SharedRegs;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS;
+
+/* Bridge out structure for RGXOverallocateBPRegisters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS;
+
+#endif /* COMMON_RGXBREAKPOINT_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxcmp_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxcmp_bridge.h
new file mode 100644
index 0000000..09d76c8
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxcmp_bridge.h
@@ -0,0 +1,245 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxcmp
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxcmp
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXCMP_BRIDGE_H
+#define COMMON_RGXCMP_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_RGXCMP_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RGXCMP_CMD_LAST			(PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7)
+
+/*******************************************
+            RGXCreateComputeContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateComputeContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT_TAG
+{
+	IMG_UINT32 ui32Priority;
+	IMG_UINT32 ui32FrameworkCmdize;
+	IMG_BYTE *psFrameworkCmd;
+	IMG_HANDLE hPrivData;
+	IMG_DEV_VIRTADDR sResumeSignalAddr;
+	IMG_UINT32 ui32StaticComputecontextStateSize;
+	IMG_BYTE *psStaticComputecontextState;
+	IMG_UINT32 ui32PackedCCBSizeU88;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT;
+
+/* Bridge out structure for RGXCreateComputeContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT_TAG
+{
+	IMG_HANDLE hComputeContext;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT;
+
+/*******************************************
+            RGXDestroyComputeContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyComputeContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT_TAG
+{
+	IMG_HANDLE hComputeContext;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT;
+
+/* Bridge out structure for RGXDestroyComputeContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT;
+
+/*******************************************
+            RGXKickCDM
+ *******************************************/
+
+/* Bridge in structure for RGXKickCDM */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM_TAG
+{
+	IMG_HANDLE hComputeContext;
+	IMG_UINT32 ui32ClientCacheOpSeqNum;
+	IMG_UINT32 ui32ClientFenceCount;
+	IMG_HANDLE *phClientFenceUFOSyncPrimBlock;
+	IMG_UINT32 *pui32ClientFenceOffset;
+	IMG_UINT32 *pui32ClientFenceValue;
+	IMG_UINT32 ui32ClientUpdateCount;
+	IMG_HANDLE *phClientUpdateUFOSyncPrimBlock;
+	IMG_UINT32 *pui32ClientUpdateOffset;
+	IMG_UINT32 *pui32ClientUpdateValue;
+	IMG_UINT32 ui32ServerSyncCount;
+	IMG_UINT32 *pui32ServerSyncFlags;
+	IMG_HANDLE *phServerSyncs;
+	PVRSRV_FENCE hCheckFenceFd;
+	PVRSRV_TIMELINE hUpdateTimeline;
+	IMG_CHAR *puiUpdateFenceName;
+	IMG_UINT32 ui32CmdSize;
+	IMG_BYTE *psDMCmd;
+	IMG_UINT32 ui32PDumpFlags;
+	IMG_UINT32 ui32ExtJobRef;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXKICKCDM;
+
+/* Bridge out structure for RGXKickCDM */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKCDM_TAG
+{
+	PVRSRV_FENCE hUpdateFence;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXKICKCDM;
+
+/*******************************************
+            RGXFlushComputeData
+ *******************************************/
+
+/* Bridge in structure for RGXFlushComputeData */
+typedef struct PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA_TAG
+{
+	IMG_HANDLE hComputeContext;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA;
+
+/* Bridge out structure for RGXFlushComputeData */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA;
+
+/*******************************************
+            RGXSetComputeContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXSetComputeContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY_TAG
+{
+	IMG_HANDLE hComputeContext;
+	IMG_UINT32 ui32Priority;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetComputeContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY;
+
+/*******************************************
+            RGXGetLastComputeContextResetReason
+ *******************************************/
+
+/* Bridge in structure for RGXGetLastComputeContextResetReason */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON_TAG
+{
+	IMG_HANDLE hComputeContext;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON;
+
+/* Bridge out structure for RGXGetLastComputeContextResetReason */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON_TAG
+{
+	IMG_UINT32 ui32LastResetReason;
+	IMG_UINT32 ui32LastResetJobRef;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed))
+    PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON;
+
+/*******************************************
+            RGXNotifyComputeWriteOffsetUpdate
+ *******************************************/
+
+/* Bridge in structure for RGXNotifyComputeWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG
+{
+	IMG_HANDLE hComputeContext;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE;
+
+/* Bridge out structure for RGXNotifyComputeWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE;
+
+/*******************************************
+            RGXKickCDM2
+ *******************************************/
+
+/* Bridge in structure for RGXKickCDM2 */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM2_TAG
+{
+	IMG_HANDLE hComputeContext;
+	IMG_UINT32 ui32ClientCacheOpSeqNum;
+	IMG_UINT32 ui32ClientFenceCount;
+	IMG_HANDLE *phClientFenceUFOSyncPrimBlock;
+	IMG_UINT32 *pui32ClientFenceOffset;
+	IMG_UINT32 *pui32ClientFenceValue;
+	IMG_UINT32 ui32ClientUpdateCount;
+	IMG_HANDLE *phClientUpdateUFOSyncPrimBlock;
+	IMG_UINT32 *pui32ClientUpdateOffset;
+	IMG_UINT32 *pui32ClientUpdateValue;
+	PVRSRV_FENCE hCheckFenceFd;
+	PVRSRV_TIMELINE hUpdateTimeline;
+	IMG_CHAR *puiUpdateFenceName;
+	IMG_UINT32 ui32CmdSize;
+	IMG_BYTE *psDMCmd;
+	IMG_UINT32 ui32PDumpFlags;
+	IMG_UINT32 ui32ExtJobRef;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXKICKCDM2;
+
+/* Bridge out structure for RGXKickCDM2 */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKCDM2_TAG
+{
+	PVRSRV_FENCE hUpdateFence;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXKICKCDM2;
+
+#endif /* COMMON_RGXCMP_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxfwdbg_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxfwdbg_bridge.h
new file mode 100644
index 0000000..0d8d951
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxfwdbg_bridge.h
@@ -0,0 +1,183 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxfwdbg
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxfwdbg
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXFWDBG_BRIDGE_H
+#define COMMON_RGXFWDBG_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "rgx_bridge.h"
+#include "pvrsrv_memallocflags.h"
+
+#define PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSLCSETBYPASSSTATE			PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG			PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST			PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE			PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY			PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE			PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME			PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXFWDBG_CMD_LAST			(PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+6)
+
+/*******************************************
+            RGXFWDebugSLCSetBypassState
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugSLCSetBypassState */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSLCSETBYPASSSTATE_TAG
+{
+	IMG_UINT32 ui32Flags;
+	IMG_BOOL bIsBypassed;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFWDEBUGSLCSETBYPASSSTATE;
+
+/* Bridge out structure for RGXFWDebugSLCSetBypassState */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSLCSETBYPASSSTATE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFWDEBUGSLCSETBYPASSSTATE;
+
+/*******************************************
+            RGXFWDebugSetFWLog
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugSetFWLog */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG_TAG
+{
+	IMG_UINT32 ui32RGXFWLogType;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG;
+
+/* Bridge out structure for RGXFWDebugSetFWLog */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG;
+
+/*******************************************
+            RGXFWDebugDumpFreelistPageList
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugDumpFreelistPageList */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST_TAG
+{
+	IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST;
+
+/* Bridge out structure for RGXFWDebugDumpFreelistPageList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST;
+
+/*******************************************
+            RGXFWDebugSetHCSDeadline
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugSetHCSDeadline */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE_TAG
+{
+	IMG_UINT32 ui32RGXHCSDeadline;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE;
+
+/* Bridge out structure for RGXFWDebugSetHCSDeadline */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE;
+
+/*******************************************
+            RGXFWDebugSetOSidPriority
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugSetOSidPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY_TAG
+{
+	IMG_UINT32 ui32OSid;
+	IMG_UINT32 ui32Priority;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY;
+
+/* Bridge out structure for RGXFWDebugSetOSidPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY;
+
+/*******************************************
+            RGXFWDebugSetOSNewOnlineState
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugSetOSNewOnlineState */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE_TAG
+{
+	IMG_UINT32 ui32OSid;
+	IMG_UINT32 ui32OSNewState;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE;
+
+/* Bridge out structure for RGXFWDebugSetOSNewOnlineState */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE;
+
+/*******************************************
+            RGXCurrentTime
+ *******************************************/
+
+/* Bridge in structure for RGXCurrentTime */
+typedef struct PVRSRV_BRIDGE_IN_RGXCURRENTTIME_TAG
+{
+	IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCURRENTTIME;
+
+/* Bridge out structure for RGXCurrentTime */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCURRENTTIME_TAG
+{
+	IMG_UINT64 ui64Time;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCURRENTTIME;
+
+#endif /* COMMON_RGXFWDBG_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxhwperf_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxhwperf_bridge.h
new file mode 100644
index 0000000..7c23eef
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxhwperf_bridge.h
@@ -0,0 +1,152 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxhwperf
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxhwperf
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXHWPERF_BRIDGE_H
+#define COMMON_RGXHWPERF_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "rgx_hwperf.h"
+
+#define PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF			PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS			PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS			PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS			PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS			PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST			(PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+4)
+
+/*******************************************
+            RGXCtrlHWPerf
+ *******************************************/
+
+/* Bridge in structure for RGXCtrlHWPerf */
+typedef struct PVRSRV_BRIDGE_IN_RGXCTRLHWPERF_TAG
+{
+	IMG_UINT32 ui32StreamId;
+	IMG_BOOL bToggle;
+	IMG_UINT64 ui64Mask;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCTRLHWPERF;
+
+/* Bridge out structure for RGXCtrlHWPerf */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF;
+
+/*******************************************
+            RGXConfigEnableHWPerfCounters
+ *******************************************/
+
+/* Bridge in structure for RGXConfigEnableHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGENABLEHWPERFCOUNTERS_TAG
+{
+	IMG_UINT32 ui32ArrayLen;
+	RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigs;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCONFIGENABLEHWPERFCOUNTERS;
+
+/* Bridge out structure for RGXConfigEnableHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGENABLEHWPERFCOUNTERS_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCONFIGENABLEHWPERFCOUNTERS;
+
+/*******************************************
+            RGXCtrlHWPerfCounters
+ *******************************************/
+
+/* Bridge in structure for RGXCtrlHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXCTRLHWPERFCOUNTERS_TAG
+{
+	IMG_BOOL bEnable;
+	IMG_UINT32 ui32ArrayLen;
+	IMG_UINT16 *pui16BlockIDs;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCTRLHWPERFCOUNTERS;
+
+/* Bridge out structure for RGXCtrlHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCTRLHWPERFCOUNTERS_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCTRLHWPERFCOUNTERS;
+
+/*******************************************
+            RGXConfigCustomCounters
+ *******************************************/
+
+/* Bridge in structure for RGXConfigCustomCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS_TAG
+{
+	IMG_UINT16 ui16CustomBlockID;
+	IMG_UINT16 ui16NumCustomCounters;
+	IMG_UINT32 *pui32CustomCounterIDs;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS;
+
+/* Bridge out structure for RGXConfigCustomCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS;
+
+/*******************************************
+            RGXGetHWPerfBvncFeatureFlags
+ *******************************************/
+
+/* Bridge in structure for RGXGetHWPerfBvncFeatureFlags */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS_TAG
+{
+	IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS;
+
+/* Bridge out structure for RGXGetHWPerfBvncFeatureFlags */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS_TAG
+{
+	RGX_HWPERF_BVNC sBVNC;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS;
+
+#endif /* COMMON_RGXHWPERF_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxkicksync_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxkicksync_bridge.h
new file mode 100644
index 0000000..c556184
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxkicksync_bridge.h
@@ -0,0 +1,161 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxkicksync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxkicksync
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXKICKSYNC_BRIDGE_H
+#define COMMON_RGXKICKSYNC_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT			PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT			PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC			PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2			PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST			(PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+3)
+
+/*******************************************
+            RGXCreateKickSyncContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateKickSyncContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT_TAG
+{
+	IMG_HANDLE hPrivData;
+	IMG_UINT32 ui32PackedCCBSizeU88;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT;
+
+/* Bridge out structure for RGXCreateKickSyncContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT_TAG
+{
+	IMG_HANDLE hKickSyncContext;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT;
+
+/*******************************************
+            RGXDestroyKickSyncContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyKickSyncContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT_TAG
+{
+	IMG_HANDLE hKickSyncContext;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT;
+
+/* Bridge out structure for RGXDestroyKickSyncContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT;
+
+/*******************************************
+            RGXKickSync
+ *******************************************/
+
+/* Bridge in structure for RGXKickSync */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKSYNC_TAG
+{
+	IMG_HANDLE hKickSyncContext;
+	IMG_UINT32 ui32ClientCacheOpSeqNum;
+	IMG_UINT32 ui32ClientFenceCount;
+	IMG_HANDLE *phFenceUFOSyncPrimBlock;
+	IMG_UINT32 *pui32FenceSyncOffset;
+	IMG_UINT32 *pui32FenceValue;
+	IMG_UINT32 ui32ClientUpdateCount;
+	IMG_HANDLE *phUpdateUFOSyncPrimBlock;
+	IMG_UINT32 *pui32UpdateSyncOffset;
+	IMG_UINT32 *pui32UpdateValue;
+	IMG_UINT32 ui32ServerSyncCount;
+	IMG_UINT32 *pui32ServerSyncFlags;
+	IMG_HANDLE *phServerSync;
+	PVRSRV_FENCE hCheckFenceFD;
+	PVRSRV_TIMELINE hTimelineFenceFD;
+	IMG_CHAR *puiUpdateFenceName;
+	IMG_UINT32 ui32ExtJobRef;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXKICKSYNC;
+
+/* Bridge out structure for RGXKickSync */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKSYNC_TAG
+{
+	PVRSRV_FENCE hUpdateFenceFD;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXKICKSYNC;
+
+/*******************************************
+            RGXKickSync2
+ *******************************************/
+
+/* Bridge in structure for RGXKickSync2 */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKSYNC2_TAG
+{
+	IMG_HANDLE hKickSyncContext;
+	IMG_UINT32 ui32ClientCacheOpSeqNum;
+	IMG_UINT32 ui32ClientFenceCount;
+	IMG_HANDLE *phFenceUFOSyncPrimBlock;
+	IMG_UINT32 *pui32FenceSyncOffset;
+	IMG_UINT32 *pui32FenceValue;
+	IMG_UINT32 ui32ClientUpdateCount;
+	IMG_HANDLE *phUpdateUFOSyncPrimBlock;
+	IMG_UINT32 *pui32UpdateSyncOffset;
+	IMG_UINT32 *pui32UpdateValue;
+	PVRSRV_FENCE hCheckFenceFD;
+	PVRSRV_TIMELINE hTimelineFenceFD;
+	IMG_CHAR *puiUpdateFenceName;
+	IMG_UINT32 ui32ExtJobRef;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXKICKSYNC2;
+
+/* Bridge out structure for RGXKickSync2 */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKSYNC2_TAG
+{
+	PVRSRV_FENCE hUpdateFenceFD;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXKICKSYNC2;
+
+#endif /* COMMON_RGXKICKSYNC_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxregconfig_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxregconfig_bridge.h
new file mode 100644
index 0000000..c1cab95
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxregconfig_bridge.h
@@ -0,0 +1,145 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxregconfig
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxregconfig
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXREGCONFIG_BRIDGE_H
+#define COMMON_RGXREGCONFIG_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+#define PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE			PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG			PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG			PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG			PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG			PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXREGCONFIG_CMD_LAST			(PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+4)
+
+/*******************************************
+            RGXSetRegConfigType
+ *******************************************/
+
+/* Bridge in structure for RGXSetRegConfigType */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE_TAG
+{
+	IMG_UINT8 ui8RegPowerIsland;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE;
+
+/* Bridge out structure for RGXSetRegConfigType */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE;
+
+/*******************************************
+            RGXAddRegconfig
+ *******************************************/
+
+/* Bridge in structure for RGXAddRegconfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXADDREGCONFIG_TAG
+{
+	IMG_UINT32 ui32RegAddr;
+	IMG_UINT64 ui64RegValue;
+	IMG_UINT64 ui64RegMask;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXADDREGCONFIG;
+
+/* Bridge out structure for RGXAddRegconfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG;
+
+/*******************************************
+            RGXClearRegConfig
+ *******************************************/
+
+/* Bridge in structure for RGXClearRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG_TAG
+{
+	IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG;
+
+/* Bridge out structure for RGXClearRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG;
+
+/*******************************************
+            RGXEnableRegConfig
+ *******************************************/
+
+/* Bridge in structure for RGXEnableRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG_TAG
+{
+	IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG;
+
+/* Bridge out structure for RGXEnableRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG;
+
+/*******************************************
+            RGXDisableRegConfig
+ *******************************************/
+
+/* Bridge in structure for RGXDisableRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG_TAG
+{
+	IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG;
+
+/* Bridge out structure for RGXDisableRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG;
+
+#endif /* COMMON_RGXREGCONFIG_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxsignals_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxsignals_bridge.h
new file mode 100644
index 0000000..de523c2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxsignals_bridge.h
@@ -0,0 +1,76 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxsignals
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxsignals
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXSIGNALS_BRIDGE_H
+#define COMMON_RGXSIGNALS_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+#define PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE			PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXSIGNALS_CMD_LAST			(PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST+0)
+
+/*******************************************
+            RGXNotifySignalUpdate
+ *******************************************/
+
+/* Bridge in structure for RGXNotifySignalUpdate */
+typedef struct PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE_TAG
+{
+	IMG_HANDLE hPrivData;
+	IMG_DEV_VIRTADDR sDevSignalAddress;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE;
+
+/* Bridge out structure for RGXNotifySignalUpdate */
+typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE;
+
+#endif /* COMMON_RGXSIGNALS_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxta3d_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxta3d_bridge.h
new file mode 100644
index 0000000..e55376e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxta3d_bridge.h
@@ -0,0 +1,538 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxta3d
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxta3d
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXTA3D_BRIDGE_H
+#define COMMON_RGXTA3D_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+#include "rgx_fwif_shared.h"
+#include "devicemem_typedefs.h"
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATA			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATA			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERTARGET			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERTARGET			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+9
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+10
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+11
+#define PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+12
+#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13
+#define PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+14
+#define PVRSRV_BRIDGE_RGXTA3D_RGXGETPARTIALRENDERCOUNT			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+15
+#define PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+16
+#define PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+17
+#define PVRSRV_BRIDGE_RGXTA3D_CMD_LAST			(PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+17)
+
+/*******************************************
+            RGXCreateHWRTData
+ *******************************************/
+
+/* Bridge in structure for RGXCreateHWRTData */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATA_TAG
+{
+	IMG_UINT32 ui32RenderTarget;
+	IMG_DEV_VIRTADDR sPMMlistDevVAddr;
+	IMG_HANDLE *phapsFreeLists;
+	IMG_UINT32 ui32PPPScreen;
+	IMG_UINT64 ui64MultiSampleCtl;
+	IMG_UINT64 ui64FlippedMultiSampleCtl;
+	IMG_UINT32 ui32TPCStride;
+	IMG_DEV_VIRTADDR sTailPtrsDevVAddr;
+	IMG_UINT32 ui32TPCSize;
+	IMG_UINT32 ui32TEScreen;
+	IMG_UINT32 ui32TEAA;
+	IMG_UINT32 ui32TEMTILE1;
+	IMG_UINT32 ui32TEMTILE2;
+	IMG_UINT32 ui32MTileStride;
+	IMG_UINT32 ui32ui32ISPMergeLowerX;
+	IMG_UINT32 ui32ui32ISPMergeLowerY;
+	IMG_UINT32 ui32ui32ISPMergeUpperX;
+	IMG_UINT32 ui32ui32ISPMergeUpperY;
+	IMG_UINT32 ui32ui32ISPMergeScaleX;
+	IMG_UINT32 ui32ui32ISPMergeScaleY;
+	IMG_DEV_VIRTADDR ssMacrotileArrayDevVAddr;
+	IMG_DEV_VIRTADDR ssRgnHeaderDevVAddr;
+	IMG_DEV_VIRTADDR ssRTCDevVAddr;
+	IMG_UINT64 ui64uiRgnHeaderSize;
+	IMG_UINT32 ui32ui32ISPMtileSize;
+	IMG_UINT16 ui16MaxRTs;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATA;
+
+/* Bridge out structure for RGXCreateHWRTData */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATA_TAG
+{
+	IMG_HANDLE hCleanupCookie;
+	IMG_HANDLE hsHWRTDataMemDesc;
+	IMG_UINT32 ui32FWHWRTData;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATA;
+
+/*******************************************
+            RGXDestroyHWRTData
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyHWRTData */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATA_TAG
+{
+	IMG_HANDLE hCleanupCookie;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATA;
+
+/* Bridge out structure for RGXDestroyHWRTData */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATA_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATA;
+
+/*******************************************
+            RGXCreateRenderTarget
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRenderTarget */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERTARGET_TAG
+{
+	IMG_DEV_VIRTADDR spsVHeapTableDevVAddr;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATERENDERTARGET;
+
+/* Bridge out structure for RGXCreateRenderTarget */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERTARGET_TAG
+{
+	IMG_HANDLE hsRenderTargetMemDesc;
+	IMG_UINT32 ui32sRenderTargetFWDevVAddr;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERENDERTARGET;
+
+/*******************************************
+            RGXDestroyRenderTarget
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRenderTarget */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRENDERTARGET_TAG
+{
+	IMG_HANDLE hsRenderTargetMemDesc;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRENDERTARGET;
+
+/* Bridge out structure for RGXDestroyRenderTarget */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERTARGET_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERTARGET;
+
+/*******************************************
+            RGXCreateZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXCreateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER_TAG
+{
+	IMG_HANDLE hReservation;
+	IMG_HANDLE hPMR;
+	PVRSRV_MEMALLOCFLAGS_T uiMapFlags;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER;
+
+/* Bridge out structure for RGXCreateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER_TAG
+{
+	IMG_HANDLE hsZSBufferKM;
+	IMG_UINT32 ui32sZSBufferFWDevVAddr;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER;
+
+/*******************************************
+            RGXDestroyZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER_TAG
+{
+	IMG_HANDLE hsZSBufferMemDesc;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER;
+
+/* Bridge out structure for RGXDestroyZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER;
+
+/*******************************************
+            RGXPopulateZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXPopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER_TAG
+{
+	IMG_HANDLE hsZSBufferKM;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER;
+
+/* Bridge out structure for RGXPopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER_TAG
+{
+	IMG_HANDLE hsPopulation;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER;
+
+/*******************************************
+            RGXUnpopulateZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXUnpopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER_TAG
+{
+	IMG_HANDLE hsPopulation;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER;
+
+/* Bridge out structure for RGXUnpopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER;
+
+/*******************************************
+            RGXCreateFreeList
+ *******************************************/
+
+/* Bridge in structure for RGXCreateFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEFREELIST_TAG
+{
+	IMG_UINT32 ui32ui32MaxFLPages;
+	IMG_UINT32 ui32ui32InitFLPages;
+	IMG_UINT32 ui32ui32GrowFLPages;
+	IMG_UINT32 ui32ui32GrowParamThreshold;
+	IMG_HANDLE hsGlobalFreeList;
+	IMG_BOOL bbFreeListCheck;
+	IMG_DEV_VIRTADDR spsFreeListDevVAddr;
+	IMG_HANDLE hsFreeListPMR;
+	IMG_DEVMEM_OFFSET_T uiPMROffset;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATEFREELIST;
+
+/* Bridge out structure for RGXCreateFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST_TAG
+{
+	IMG_HANDLE hCleanupCookie;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST;
+
+/*******************************************
+            RGXDestroyFreeList
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST_TAG
+{
+	IMG_HANDLE hCleanupCookie;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST;
+
+/* Bridge out structure for RGXDestroyFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST;
+
+/*******************************************
+            RGXCreateRenderContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRenderContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT_TAG
+{
+	IMG_UINT32 ui32Priority;
+	IMG_DEV_VIRTADDR sVDMCallStackAddr;
+	IMG_UINT32 ui32FrameworkCmdize;
+	IMG_BYTE *psFrameworkCmd;
+	IMG_HANDLE hPrivData;
+	IMG_UINT32 ui32CtxSwitchSize;
+	IMG_BYTE *psRegs;
+	IMG_UINT32 ui32StaticRendercontextStateSize;
+	IMG_BYTE *psStaticRendercontextState;
+	IMG_UINT32 ui32PackedCCBSizeU8888;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT;
+
+/* Bridge out structure for RGXCreateRenderContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT_TAG
+{
+	IMG_HANDLE hRenderContext;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT;
+
+/*******************************************
+            RGXDestroyRenderContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRenderContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT_TAG
+{
+	IMG_HANDLE hCleanupCookie;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT;
+
+/* Bridge out structure for RGXDestroyRenderContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT;
+
+/*******************************************
+            RGXKickTA3D
+ *******************************************/
+
+/* Bridge in structure for RGXKickTA3D */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKTA3D_TAG
+{
+	IMG_HANDLE hRenderContext;
+	IMG_UINT32 ui32ClientCacheOpSeqNum;
+	IMG_UINT32 ui32ClientTAFenceCount;
+	IMG_HANDLE *phClientTAFenceSyncPrimBlock;
+	IMG_UINT32 *pui32ClientTAFenceSyncOffset;
+	IMG_UINT32 *pui32ClientTAFenceValue;
+	IMG_UINT32 ui32ClientTAUpdateCount;
+	IMG_HANDLE *phClientTAUpdateSyncPrimBlock;
+	IMG_UINT32 *pui32ClientTAUpdateSyncOffset;
+	IMG_UINT32 *pui32ClientTAUpdateValue;
+	IMG_UINT32 ui32ServerTASyncPrims;
+	IMG_UINT32 *pui32ServerTASyncFlags;
+	IMG_HANDLE *phServerTASyncs;
+	IMG_UINT32 ui32Client3DFenceCount;
+	IMG_HANDLE *phClient3DFenceSyncPrimBlock;
+	IMG_UINT32 *pui32Client3DFenceSyncOffset;
+	IMG_UINT32 *pui32Client3DFenceValue;
+	IMG_UINT32 ui32Client3DUpdateCount;
+	IMG_HANDLE *phClient3DUpdateSyncPrimBlock;
+	IMG_UINT32 *pui32Client3DUpdateSyncOffset;
+	IMG_UINT32 *pui32Client3DUpdateValue;
+	IMG_UINT32 ui32Server3DSyncPrims;
+	IMG_UINT32 *pui32Server3DSyncFlags;
+	IMG_HANDLE *phServer3DSyncs;
+	IMG_HANDLE hPRFenceUFOSyncPrimBlock;
+	IMG_UINT32 ui32FRFenceUFOSyncOffset;
+	IMG_UINT32 ui32FRFenceValue;
+	PVRSRV_FENCE hCheckFence;
+	PVRSRV_TIMELINE hUpdateTimeline;
+	IMG_CHAR *puiUpdateFenceName;
+	PVRSRV_FENCE hCheckFence3D;
+	PVRSRV_TIMELINE hUpdateTimeline3D;
+	IMG_CHAR *puiUpdateFenceName3D;
+	IMG_UINT32 ui32TACmdSize;
+	IMG_BYTE *psTACmd;
+	IMG_UINT32 ui323DPRCmdSize;
+	IMG_BYTE *ps3DPRCmd;
+	IMG_UINT32 ui323DCmdSize;
+	IMG_BYTE *ps3DCmd;
+	IMG_UINT32 ui32ExtJobRef;
+	IMG_BOOL bbLastTAInScene;
+	IMG_BOOL bbKickTA;
+	IMG_BOOL bbKickPR;
+	IMG_BOOL bbKick3D;
+	IMG_BOOL bbAbort;
+	IMG_UINT32 ui32PDumpFlags;
+	IMG_HANDLE hRTDataCleanup;
+	IMG_HANDLE hZBuffer;
+	IMG_HANDLE hSBuffer;
+	IMG_HANDLE hMSAAScratchBuffer;
+	IMG_UINT32 ui32SyncPMRCount;
+	IMG_UINT32 *pui32SyncPMRFlags;
+	IMG_HANDLE *phSyncPMRs;
+	IMG_UINT32 ui32RenderTargetSize;
+	IMG_UINT32 ui32NumberOfDrawCalls;
+	IMG_UINT32 ui32NumberOfIndices;
+	IMG_UINT32 ui32NumberOfMRTs;
+	IMG_UINT64 ui64Deadline;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXKICKTA3D;
+
+/* Bridge out structure for RGXKickTA3D */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKTA3D_TAG
+{
+	PVRSRV_FENCE hUpdateFence;
+	PVRSRV_FENCE hUpdateFence3D;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXKICKTA3D;
+
+/*******************************************
+            RGXSetRenderContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXSetRenderContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY_TAG
+{
+	IMG_HANDLE hRenderContext;
+	IMG_UINT32 ui32Priority;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetRenderContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY;
+
+/*******************************************
+            RGXGetLastRenderContextResetReason
+ *******************************************/
+
+/* Bridge in structure for RGXGetLastRenderContextResetReason */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON_TAG
+{
+	IMG_HANDLE hRenderContext;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON;
+
+/* Bridge out structure for RGXGetLastRenderContextResetReason */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON_TAG
+{
+	IMG_UINT32 ui32LastResetReason;
+	IMG_UINT32 ui32LastResetJobRef;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON;
+
+/*******************************************
+            RGXGetPartialRenderCount
+ *******************************************/
+
+/* Bridge in structure for RGXGetPartialRenderCount */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETPARTIALRENDERCOUNT_TAG
+{
+	IMG_HANDLE hHWRTDataMemDesc;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXGETPARTIALRENDERCOUNT;
+
+/* Bridge out structure for RGXGetPartialRenderCount */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETPARTIALRENDERCOUNT_TAG
+{
+	IMG_UINT32 ui32NumPartialRenders;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXGETPARTIALRENDERCOUNT;
+
+/*******************************************
+            RGXRenderContextStalled
+ *******************************************/
+
+/* Bridge in structure for RGXRenderContextStalled */
+typedef struct PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED_TAG
+{
+	IMG_HANDLE hRenderContext;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED;
+
+/* Bridge out structure for RGXRenderContextStalled */
+typedef struct PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED;
+
+/*******************************************
+            RGXKickTA3D2
+ *******************************************/
+
+/* Bridge in structure for RGXKickTA3D2 */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKTA3D2_TAG
+{
+	IMG_HANDLE hRenderContext;
+	IMG_UINT32 ui32ClientCacheOpSeqNum;
+	IMG_UINT32 ui32ClientTAFenceCount;
+	IMG_HANDLE *phClientTAFenceSyncPrimBlock;
+	IMG_UINT32 *pui32ClientTAFenceSyncOffset;
+	IMG_UINT32 *pui32ClientTAFenceValue;
+	IMG_UINT32 ui32ClientTAUpdateCount;
+	IMG_HANDLE *phClientTAUpdateSyncPrimBlock;
+	IMG_UINT32 *pui32ClientTAUpdateSyncOffset;
+	IMG_UINT32 *pui32ClientTAUpdateValue;
+	IMG_UINT32 ui32Client3DFenceCount;
+	IMG_HANDLE *phClient3DFenceSyncPrimBlock;
+	IMG_UINT32 *pui32Client3DFenceSyncOffset;
+	IMG_UINT32 *pui32Client3DFenceValue;
+	IMG_UINT32 ui32Client3DUpdateCount;
+	IMG_HANDLE *phClient3DUpdateSyncPrimBlock;
+	IMG_UINT32 *pui32Client3DUpdateSyncOffset;
+	IMG_UINT32 *pui32Client3DUpdateValue;
+	IMG_HANDLE hPRFenceUFOSyncPrimBlock;
+	IMG_UINT32 ui32FRFenceUFOSyncOffset;
+	IMG_UINT32 ui32FRFenceValue;
+	PVRSRV_FENCE hCheckFence;
+	PVRSRV_TIMELINE hUpdateTimeline;
+	IMG_CHAR *puiUpdateFenceName;
+	PVRSRV_FENCE hCheckFence3D;
+	PVRSRV_TIMELINE hUpdateTimeline3D;
+	IMG_CHAR *puiUpdateFenceName3D;
+	IMG_UINT32 ui32TACmdSize;
+	IMG_BYTE *psTACmd;
+	IMG_UINT32 ui323DPRCmdSize;
+	IMG_BYTE *ps3DPRCmd;
+	IMG_UINT32 ui323DCmdSize;
+	IMG_BYTE *ps3DCmd;
+	IMG_UINT32 ui32ExtJobRef;
+	IMG_BOOL bbLastTAInScene;
+	IMG_BOOL bbKickTA;
+	IMG_BOOL bbKickPR;
+	IMG_BOOL bbKick3D;
+	IMG_BOOL bbAbort;
+	IMG_UINT32 ui32PDumpFlags;
+	IMG_HANDLE hRTDataCleanup;
+	IMG_HANDLE hZBuffer;
+	IMG_HANDLE hSBuffer;
+	IMG_HANDLE hMSAAScratchBuffer;
+	IMG_UINT32 ui32SyncPMRCount;
+	IMG_UINT32 *pui32SyncPMRFlags;
+	IMG_HANDLE *phSyncPMRs;
+	IMG_UINT32 ui32RenderTargetSize;
+	IMG_UINT32 ui32NumberOfDrawCalls;
+	IMG_UINT32 ui32NumberOfIndices;
+	IMG_UINT32 ui32NumberOfMRTs;
+	IMG_UINT64 ui64Deadline;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXKICKTA3D2;
+
+/* Bridge out structure for RGXKickTA3D2 */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKTA3D2_TAG
+{
+	PVRSRV_FENCE hUpdateFence;
+	PVRSRV_FENCE hUpdateFence3D;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXKICKTA3D2;
+
+#endif /* COMMON_RGXTA3D_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxtq2_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxtq2_bridge.h
new file mode 100644
index 0000000..9db446d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxtq2_bridge.h
@@ -0,0 +1,212 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxtq2
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxtq2
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXTQ2_BRIDGE_H
+#define COMMON_RGXTQ2_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT			PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT			PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER			PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY			PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE			PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2			PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXTQ2_CMD_LAST			(PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+5)
+
+/*******************************************
+            RGXTDMCreateTransferContext
+ *******************************************/
+
+/* Bridge in structure for RGXTDMCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT_TAG
+{
+	IMG_UINT32 ui32Priority;
+	IMG_UINT32 ui32FrameworkCmdize;
+	IMG_BYTE *psFrameworkCmd;
+	IMG_HANDLE hPrivData;
+	IMG_UINT32 ui32PackedCCBSizeU88;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT;
+
+/* Bridge out structure for RGXTDMCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT_TAG
+{
+	IMG_HANDLE hTransferContext;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT;
+
+/*******************************************
+            RGXTDMDestroyTransferContext
+ *******************************************/
+
+/* Bridge in structure for RGXTDMDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT_TAG
+{
+	IMG_HANDLE hTransferContext;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT;
+
+/* Bridge out structure for RGXTDMDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT;
+
+/*******************************************
+            RGXTDMSubmitTransfer
+ *******************************************/
+
+/* Bridge in structure for RGXTDMSubmitTransfer */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER_TAG
+{
+	IMG_HANDLE hTransferContext;
+	IMG_UINT32 ui32PDumpFlags;
+	IMG_UINT32 ui32ClientCacheOpSeqNum;
+	IMG_UINT32 ui32ClientFenceCount;
+	IMG_HANDLE *phFenceUFOSyncPrimBlock;
+	IMG_UINT32 *pui32FenceSyncOffset;
+	IMG_UINT32 *pui32FenceValue;
+	IMG_UINT32 ui32ClientUpdateCount;
+	IMG_HANDLE *phUpdateUFOSyncPrimBlock;
+	IMG_UINT32 *pui32UpdateSyncOffset;
+	IMG_UINT32 *pui32UpdateValue;
+	IMG_UINT32 ui32ServerSyncCount;
+	IMG_UINT32 *pui32ServerSyncFlags;
+	IMG_HANDLE *phServerSync;
+	PVRSRV_FENCE hCheckFenceFD;
+	PVRSRV_TIMELINE hUpdateTimeline;
+	IMG_CHAR *puiUpdateFenceName;
+	IMG_UINT32 ui32CommandSize;
+	IMG_UINT8 *pui8FWCommand;
+	IMG_UINT32 ui32ExternalJobReference;
+	IMG_UINT32 ui32SyncPMRCount;
+	IMG_UINT32 *pui32SyncPMRFlags;
+	IMG_HANDLE *phSyncPMRs;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER;
+
+/* Bridge out structure for RGXTDMSubmitTransfer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER_TAG
+{
+	PVRSRV_FENCE hUpdateFence;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER;
+
+/*******************************************
+            RGXTDMSetTransferContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXTDMSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG
+{
+	IMG_HANDLE hTransferContext;
+	IMG_UINT32 ui32Priority;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXTDMSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY;
+
+/*******************************************
+            RGXTDMNotifyWriteOffsetUpdate
+ *******************************************/
+
+/* Bridge in structure for RGXTDMNotifyWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG
+{
+	IMG_HANDLE hTransferContext;
+	IMG_UINT32 ui32PDumpFlags;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE;
+
+/* Bridge out structure for RGXTDMNotifyWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE;
+
+/*******************************************
+            RGXTDMSubmitTransfer2
+ *******************************************/
+
+/* Bridge in structure for RGXTDMSubmitTransfer2 */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2_TAG
+{
+	IMG_HANDLE hTransferContext;
+	IMG_UINT32 ui32PDumpFlags;
+	IMG_UINT32 ui32ClientCacheOpSeqNum;
+	IMG_UINT32 ui32ClientFenceCount;
+	IMG_HANDLE *phFenceUFOSyncPrimBlock;
+	IMG_UINT32 *pui32FenceSyncOffset;
+	IMG_UINT32 *pui32FenceValue;
+	IMG_UINT32 ui32ClientUpdateCount;
+	IMG_HANDLE *phUpdateUFOSyncPrimBlock;
+	IMG_UINT32 *pui32UpdateSyncOffset;
+	IMG_UINT32 *pui32UpdateValue;
+	PVRSRV_FENCE hCheckFenceFD;
+	PVRSRV_TIMELINE hUpdateTimeline;
+	IMG_CHAR *puiUpdateFenceName;
+	IMG_UINT32 ui32CommandSize;
+	IMG_UINT8 *pui8FWCommand;
+	IMG_UINT32 ui32ExternalJobReference;
+	IMG_UINT32 ui32SyncPMRCount;
+	IMG_UINT32 *pui32SyncPMRFlags;
+	IMG_HANDLE *phSyncPMRs;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2;
+
+/* Bridge out structure for RGXTDMSubmitTransfer2 */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2_TAG
+{
+	PVRSRV_FENCE hUpdateFence;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2;
+
+#endif /* COMMON_RGXTQ2_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxtq_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxtq_bridge.h
new file mode 100644
index 0000000..e58ad48e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_rgxtq_bridge.h
@@ -0,0 +1,200 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxtq
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxtq
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXTQ_BRIDGE_H
+#define COMMON_RGXTQ_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_RGXTQ_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT			PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT			PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER			PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY			PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2			PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXTQ_CMD_LAST			(PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+4)
+
+/*******************************************
+            RGXCreateTransferContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT_TAG
+{
+	IMG_UINT32 ui32Priority;
+	IMG_UINT32 ui32FrameworkCmdize;
+	IMG_BYTE *psFrameworkCmd;
+	IMG_HANDLE hPrivData;
+	IMG_UINT32 ui32PackedCCBSizeU8888;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT;
+
+/* Bridge out structure for RGXCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT_TAG
+{
+	IMG_HANDLE hTransferContext;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT;
+
+/*******************************************
+            RGXDestroyTransferContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT_TAG
+{
+	IMG_HANDLE hTransferContext;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT;
+
+/* Bridge out structure for RGXDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT;
+
+/*******************************************
+            RGXSubmitTransfer
+ *******************************************/
+
+/* Bridge in structure for RGXSubmitTransfer */
+typedef struct PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER_TAG
+{
+	IMG_HANDLE hTransferContext;
+	IMG_UINT32 ui32ClientCacheOpSeqNum;
+	IMG_UINT32 ui32PrepareCount;
+	IMG_UINT32 *pui32ClientFenceCount;
+	IMG_HANDLE **phFenceUFOSyncPrimBlock;
+	IMG_UINT32 **pui32FenceSyncOffset;
+	IMG_UINT32 **pui32FenceValue;
+	IMG_UINT32 *pui32ClientUpdateCount;
+	IMG_HANDLE **phUpdateUFOSyncPrimBlock;
+	IMG_UINT32 **pui32UpdateSyncOffset;
+	IMG_UINT32 **pui32UpdateValue;
+	IMG_UINT32 *pui32ServerSyncCount;
+	IMG_UINT32 **pui32ServerSyncFlags;
+	IMG_HANDLE **phServerSync;
+	PVRSRV_FENCE hCheckFenceFD;
+	PVRSRV_TIMELINE h2DUpdateTimeline;
+	PVRSRV_TIMELINE h3DUpdateTimeline;
+	IMG_CHAR *puiUpdateFenceName;
+	IMG_UINT32 *pui32CommandSize;
+	IMG_UINT8 **pui8FWCommand;
+	IMG_UINT32 *pui32TQPrepareFlags;
+	IMG_UINT32 ui32ExtJobRef;
+	IMG_UINT32 ui32SyncPMRCount;
+	IMG_UINT32 *pui32SyncPMRFlags;
+	IMG_HANDLE *phSyncPMRs;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER;
+
+/* Bridge out structure for RGXSubmitTransfer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER_TAG
+{
+	PVRSRV_FENCE h2DUpdateFence;
+	PVRSRV_FENCE h3DUpdateFence;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER;
+
+/*******************************************
+            RGXSetTransferContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY_TAG
+{
+	IMG_HANDLE hTransferContext;
+	IMG_UINT32 ui32Priority;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY;
+
+/*******************************************
+            RGXSubmitTransfer2
+ *******************************************/
+
+/* Bridge in structure for RGXSubmitTransfer2 */
+typedef struct PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2_TAG
+{
+	IMG_HANDLE hTransferContext;
+	IMG_UINT32 ui32ClientCacheOpSeqNum;
+	IMG_UINT32 ui32PrepareCount;
+	IMG_UINT32 *pui32ClientFenceCount;
+	IMG_HANDLE **phFenceUFOSyncPrimBlock;
+	IMG_UINT32 **pui32FenceSyncOffset;
+	IMG_UINT32 **pui32FenceValue;
+	IMG_UINT32 *pui32ClientUpdateCount;
+	IMG_HANDLE **phUpdateUFOSyncPrimBlock;
+	IMG_UINT32 **pui32UpdateSyncOffset;
+	IMG_UINT32 **pui32UpdateValue;
+	PVRSRV_FENCE hCheckFenceFD;
+	PVRSRV_TIMELINE h2DUpdateTimeline;
+	PVRSRV_TIMELINE h3DUpdateTimeline;
+	IMG_CHAR *puiUpdateFenceName;
+	IMG_UINT32 *pui32CommandSize;
+	IMG_UINT8 **pui8FWCommand;
+	IMG_UINT32 *pui32TQPrepareFlags;
+	IMG_UINT32 ui32ExtJobRef;
+	IMG_UINT32 ui32SyncPMRCount;
+	IMG_UINT32 *pui32SyncPMRFlags;
+	IMG_HANDLE *phSyncPMRs;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2;
+
+/* Bridge out structure for RGXSubmitTransfer2 */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2_TAG
+{
+	PVRSRV_FENCE h2DUpdateFence;
+	PVRSRV_FENCE h3DUpdateFence;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2;
+
+#endif /* COMMON_RGXTQ_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_ri_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_ri_bridge.h
new file mode 100644
index 0000000..e562671
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_ri_bridge.h
@@ -0,0 +1,224 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for ri
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for ri
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RI_BRIDGE_H
+#define COMMON_RI_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "ri_typedefs.h"
+
+#define PVRSRV_BRIDGE_RI_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY			PVRSRV_BRIDGE_RI_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY			PVRSRV_BRIDGE_RI_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY			PVRSRV_BRIDGE_RI_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR			PVRSRV_BRIDGE_RI_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY			PVRSRV_BRIDGE_RI_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RI_RIDUMPLIST			PVRSRV_BRIDGE_RI_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RI_RIDUMPALL			PVRSRV_BRIDGE_RI_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RI_RIDUMPPROCESS			PVRSRV_BRIDGE_RI_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER			PVRSRV_BRIDGE_RI_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RI_CMD_LAST			(PVRSRV_BRIDGE_RI_CMD_FIRST+8)
+
+/*******************************************
+            RIWritePMREntry
+ *******************************************/
+
+/* Bridge in structure for RIWritePMREntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY_TAG
+{
+	IMG_HANDLE hPMRHandle;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY;
+
+/* Bridge out structure for RIWritePMREntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY;
+
+/*******************************************
+            RIWriteMEMDESCEntry
+ *******************************************/
+
+/* Bridge in structure for RIWriteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY_TAG
+{
+	IMG_HANDLE hPMRHandle;
+	IMG_UINT32 ui32TextBSize;
+	const IMG_CHAR *puiTextB;
+	IMG_UINT64 ui64Offset;
+	IMG_UINT64 ui64Size;
+	IMG_BOOL bIsImport;
+	IMG_BOOL bIsSuballoc;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY;
+
+/* Bridge out structure for RIWriteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY_TAG
+{
+	IMG_HANDLE hRIHandle;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY;
+
+/*******************************************
+            RIWriteProcListEntry
+ *******************************************/
+
+/* Bridge in structure for RIWriteProcListEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY_TAG
+{
+	IMG_UINT32 ui32TextBSize;
+	const IMG_CHAR *puiTextB;
+	IMG_UINT64 ui64Size;
+	IMG_UINT64 ui64DevVAddr;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY;
+
+/* Bridge out structure for RIWriteProcListEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY_TAG
+{
+	IMG_HANDLE hRIHandle;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY;
+
+/*******************************************
+            RIUpdateMEMDESCAddr
+ *******************************************/
+
+/* Bridge in structure for RIUpdateMEMDESCAddr */
+typedef struct PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR_TAG
+{
+	IMG_HANDLE hRIHandle;
+	IMG_DEV_VIRTADDR sAddr;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR;
+
+/* Bridge out structure for RIUpdateMEMDESCAddr */
+typedef struct PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR;
+
+/*******************************************
+            RIDeleteMEMDESCEntry
+ *******************************************/
+
+/* Bridge in structure for RIDeleteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY_TAG
+{
+	IMG_HANDLE hRIHandle;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY;
+
+/* Bridge out structure for RIDeleteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY;
+
+/*******************************************
+            RIDumpList
+ *******************************************/
+
+/* Bridge in structure for RIDumpList */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPLIST_TAG
+{
+	IMG_HANDLE hPMRHandle;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIDUMPLIST;
+
+/* Bridge out structure for RIDumpList */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPLIST_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIDUMPLIST;
+
+/*******************************************
+            RIDumpAll
+ *******************************************/
+
+/* Bridge in structure for RIDumpAll */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPALL_TAG
+{
+	IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIDUMPALL;
+
+/* Bridge out structure for RIDumpAll */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPALL_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIDUMPALL;
+
+/*******************************************
+            RIDumpProcess
+ *******************************************/
+
+/* Bridge in structure for RIDumpProcess */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPPROCESS_TAG
+{
+	IMG_PID ui32Pid;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIDUMPPROCESS;
+
+/* Bridge out structure for RIDumpProcess */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPPROCESS_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIDUMPPROCESS;
+
+/*******************************************
+            RIWritePMREntryWithOwner
+ *******************************************/
+
+/* Bridge in structure for RIWritePMREntryWithOwner */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER_TAG
+{
+	IMG_HANDLE hPMRHandle;
+	IMG_PID ui32Owner;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER;
+
+/* Bridge out structure for RIWritePMREntryWithOwner */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER;
+
+#endif /* COMMON_RI_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_srvcore_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_srvcore_bridge.h
new file mode 100644
index 0000000..1a12d0f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_srvcore_bridge.h
@@ -0,0 +1,350 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for srvcore
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for srvcore
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_SRVCORE_BRIDGE_H
+#define COMMON_SRVCORE_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_device_types.h"
+#include "cache_ops.h"
+
+#define PVRSRV_BRIDGE_SRVCORE_CMD_FIRST			0
+#define PVRSRV_BRIDGE_SRVCORE_CONNECT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SRVCORE_DISCONNECT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+2
+#define PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+3
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+4
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+5
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+6
+#define PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+7
+#define PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+8
+#define PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+9
+#define PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+10
+#define PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+11
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+12
+#define PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+13
+#define PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+14
+#define PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+15
+#define PVRSRV_BRIDGE_SRVCORE_CMD_LAST			(PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+15)
+
+/*******************************************
+            Connect
+ *******************************************/
+
+/* Bridge in structure for Connect */
+typedef struct PVRSRV_BRIDGE_IN_CONNECT_TAG
+{
+	IMG_UINT32 ui32Flags;
+	IMG_UINT32 ui32ClientBuildOptions;
+	IMG_UINT32 ui32ClientDDKVersion;
+	IMG_UINT32 ui32ClientDDKBuild;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_CONNECT;
+
+/* Bridge out structure for Connect */
+typedef struct PVRSRV_BRIDGE_OUT_CONNECT_TAG
+{
+	IMG_UINT8 ui8KernelArch;
+	IMG_UINT32 ui32CapabilityFlags;
+	IMG_UINT32 ui32PVRBridges;
+	IMG_UINT32 ui32RGXBridges;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_CONNECT;
+
+/*******************************************
+            Disconnect
+ *******************************************/
+
+/* Bridge in structure for Disconnect */
+typedef struct PVRSRV_BRIDGE_IN_DISCONNECT_TAG
+{
+	IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DISCONNECT;
+
+/* Bridge out structure for Disconnect */
+typedef struct PVRSRV_BRIDGE_OUT_DISCONNECT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DISCONNECT;
+
+/*******************************************
+            AcquireGlobalEventObject
+ *******************************************/
+
+/* Bridge in structure for AcquireGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT_TAG
+{
+	IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT;
+
+/* Bridge out structure for AcquireGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT_TAG
+{
+	IMG_HANDLE hGlobalEventObject;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT;
+
+/*******************************************
+            ReleaseGlobalEventObject
+ *******************************************/
+
+/* Bridge in structure for ReleaseGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT_TAG
+{
+	IMG_HANDLE hGlobalEventObject;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT;
+
+/* Bridge out structure for ReleaseGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT;
+
+/*******************************************
+            EventObjectOpen
+ *******************************************/
+
+/* Bridge in structure for EventObjectOpen */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN_TAG
+{
+	IMG_HANDLE hEventObject;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN;
+
+/* Bridge out structure for EventObjectOpen */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN_TAG
+{
+	IMG_HANDLE hOSEvent;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN;
+
+/*******************************************
+            EventObjectWait
+ *******************************************/
+
+/* Bridge in structure for EventObjectWait */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT_TAG
+{
+	IMG_HANDLE hOSEventKM;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT;
+
+/* Bridge out structure for EventObjectWait */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT;
+
+/*******************************************
+            EventObjectClose
+ *******************************************/
+
+/* Bridge in structure for EventObjectClose */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE_TAG
+{
+	IMG_HANDLE hOSEventKM;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE;
+
+/* Bridge out structure for EventObjectClose */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE;
+
+/*******************************************
+            DumpDebugInfo
+ *******************************************/
+
+/* Bridge in structure for DumpDebugInfo */
+typedef struct PVRSRV_BRIDGE_IN_DUMPDEBUGINFO_TAG
+{
+	IMG_UINT32 ui32ui32VerbLevel;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_DUMPDEBUGINFO;
+
+/* Bridge out structure for DumpDebugInfo */
+typedef struct PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO;
+
+/*******************************************
+            GetDevClockSpeed
+ *******************************************/
+
+/* Bridge in structure for GetDevClockSpeed */
+typedef struct PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED_TAG
+{
+	IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED;
+
+/* Bridge out structure for GetDevClockSpeed */
+typedef struct PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED_TAG
+{
+	IMG_UINT32 ui32ui32ClockSpeed;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED;
+
+/*******************************************
+            HWOpTimeout
+ *******************************************/
+
+/* Bridge in structure for HWOpTimeout */
+typedef struct PVRSRV_BRIDGE_IN_HWOPTIMEOUT_TAG
+{
+	IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_HWOPTIMEOUT;
+
+/* Bridge out structure for HWOpTimeout */
+typedef struct PVRSRV_BRIDGE_OUT_HWOPTIMEOUT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_HWOPTIMEOUT;
+
+/*******************************************
+            AlignmentCheck
+ *******************************************/
+
+/* Bridge in structure for AlignmentCheck */
+typedef struct PVRSRV_BRIDGE_IN_ALIGNMENTCHECK_TAG
+{
+	IMG_UINT32 ui32AlignChecksSize;
+	IMG_UINT32 *pui32AlignChecks;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_ALIGNMENTCHECK;
+
+/* Bridge out structure for AlignmentCheck */
+typedef struct PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK;
+
+/*******************************************
+            GetDeviceStatus
+ *******************************************/
+
+/* Bridge in structure for GetDeviceStatus */
+typedef struct PVRSRV_BRIDGE_IN_GETDEVICESTATUS_TAG
+{
+	IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_GETDEVICESTATUS;
+
+/* Bridge out structure for GetDeviceStatus */
+typedef struct PVRSRV_BRIDGE_OUT_GETDEVICESTATUS_TAG
+{
+	IMG_UINT32 ui32DeviceSatus;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_GETDEVICESTATUS;
+
+/*******************************************
+            EventObjectWaitTimeout
+ *******************************************/
+
+/* Bridge in structure for EventObjectWaitTimeout */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT_TAG
+{
+	IMG_HANDLE hOSEventKM;
+	IMG_UINT64 ui64uiTimeoutus;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT;
+
+/* Bridge out structure for EventObjectWaitTimeout */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT;
+
+/*******************************************
+            FindProcessMemStats
+ *******************************************/
+
+/* Bridge in structure for FindProcessMemStats */
+typedef struct PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS_TAG
+{
+	IMG_UINT32 ui32PID;
+	IMG_UINT32 ui32ArrSize;
+	IMG_BOOL bbAllProcessStats;
+	/* Output pointer pui32MemStatsArray is also an implied input */
+	IMG_UINT32 *pui32MemStatsArray;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS;
+
+/* Bridge out structure for FindProcessMemStats */
+typedef struct PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS_TAG
+{
+	IMG_UINT32 *pui32MemStatsArray;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS;
+
+/*******************************************
+            AcquireInfoPage
+ *******************************************/
+
+/* Bridge in structure for AcquireInfoPage */
+typedef struct PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE_TAG
+{
+	IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE;
+
+/* Bridge out structure for AcquireInfoPage */
+typedef struct PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE_TAG
+{
+	IMG_HANDLE hPMR;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE;
+
+/*******************************************
+            ReleaseInfoPage
+ *******************************************/
+
+/* Bridge in structure for ReleaseInfoPage */
+typedef struct PVRSRV_BRIDGE_IN_RELEASEINFOPAGE_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_RELEASEINFOPAGE;
+
+/* Bridge out structure for ReleaseInfoPage */
+typedef struct PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE;
+
+#endif /* COMMON_SRVCORE_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_sync_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_sync_bridge.h
new file mode 100644
index 0000000..3b6d262
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_sync_bridge.h
@@ -0,0 +1,458 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for sync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for sync
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_SYNC_BRIDGE_H
+#define COMMON_SYNC_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "devicemem_typedefs.h"
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_SYNC_CMD_FIRST			0
+#define PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK			PVRSRV_BRIDGE_SYNC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK			PVRSRV_BRIDGE_SYNC_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMSET			PVRSRV_BRIDGE_SYNC_CMD_FIRST+2
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCPRIMSET			PVRSRV_BRIDGE_SYNC_CMD_FIRST+3
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCALLOC			PVRSRV_BRIDGE_SYNC_CMD_FIRST+4
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCFREE			PVRSRV_BRIDGE_SYNC_CMD_FIRST+5
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCQUEUEHWOP			PVRSRV_BRIDGE_SYNC_CMD_FIRST+6
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCGETSTATUS			PVRSRV_BRIDGE_SYNC_CMD_FIRST+7
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCREATE			PVRSRV_BRIDGE_SYNC_CMD_FIRST+8
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPTAKE			PVRSRV_BRIDGE_SYNC_CMD_FIRST+9
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPREADY			PVRSRV_BRIDGE_SYNC_CMD_FIRST+10
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCOMPLETE			PVRSRV_BRIDGE_SYNC_CMD_FIRST+11
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPDESTROY			PVRSRV_BRIDGE_SYNC_CMD_FIRST+12
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP			PVRSRV_BRIDGE_SYNC_CMD_FIRST+13
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE			PVRSRV_BRIDGE_SYNC_CMD_FIRST+14
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL			PVRSRV_BRIDGE_SYNC_CMD_FIRST+15
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPPDUMPPOL			PVRSRV_BRIDGE_SYNC_CMD_FIRST+16
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP			PVRSRV_BRIDGE_SYNC_CMD_FIRST+17
+#define PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT			PVRSRV_BRIDGE_SYNC_CMD_FIRST+18
+#define PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT			PVRSRV_BRIDGE_SYNC_CMD_FIRST+19
+#define PVRSRV_BRIDGE_SYNC_CMD_LAST			(PVRSRV_BRIDGE_SYNC_CMD_FIRST+19)
+
+/*******************************************
+            AllocSyncPrimitiveBlock
+ *******************************************/
+
+/* Bridge in structure for AllocSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK_TAG
+{
+	IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK;
+
+/* Bridge out structure for AllocSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32SyncPrimVAddr;
+	IMG_UINT32 ui32SyncPrimBlockSize;
+	IMG_HANDLE hhSyncPMR;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK;
+
+/*******************************************
+            FreeSyncPrimitiveBlock
+ *******************************************/
+
+/* Bridge in structure for FreeSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK_TAG
+{
+	IMG_HANDLE hSyncHandle;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK;
+
+/* Bridge out structure for FreeSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK;
+
+/*******************************************
+            SyncPrimSet
+ *******************************************/
+
+/* Bridge in structure for SyncPrimSet */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMSET_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32Index;
+	IMG_UINT32 ui32Value;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMSET;
+
+/* Bridge out structure for SyncPrimSet */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMSET_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMSET;
+
+/*******************************************
+            ServerSyncPrimSet
+ *******************************************/
+
+/* Bridge in structure for ServerSyncPrimSet */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCPRIMSET_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32Value;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCPRIMSET;
+
+/* Bridge out structure for ServerSyncPrimSet */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCPRIMSET_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCPRIMSET;
+
+/*******************************************
+            ServerSyncAlloc
+ *******************************************/
+
+/* Bridge in structure for ServerSyncAlloc */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCALLOC_TAG
+{
+	IMG_UINT32 ui32ClassNameSize;
+	const IMG_CHAR *puiClassName;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCALLOC;
+
+/* Bridge out structure for ServerSyncAlloc */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCALLOC_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32SyncPrimVAddr;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCALLOC;
+
+/*******************************************
+            ServerSyncFree
+ *******************************************/
+
+/* Bridge in structure for ServerSyncFree */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCFREE_TAG
+{
+	IMG_HANDLE hSyncHandle;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCFREE;
+
+/* Bridge out structure for ServerSyncFree */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCFREE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCFREE;
+
+/*******************************************
+            ServerSyncQueueHWOp
+ *******************************************/
+
+/* Bridge in structure for ServerSyncQueueHWOp */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCQUEUEHWOP_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_BOOL bbUpdate;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCQUEUEHWOP;
+
+/* Bridge out structure for ServerSyncQueueHWOp */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCQUEUEHWOP_TAG
+{
+	IMG_UINT32 ui32FenceValue;
+	IMG_UINT32 ui32UpdateValue;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCQUEUEHWOP;
+
+/*******************************************
+            ServerSyncGetStatus
+ *******************************************/
+
+/* Bridge in structure for ServerSyncGetStatus */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCGETSTATUS_TAG
+{
+	IMG_UINT32 ui32SyncCount;
+	IMG_HANDLE *phSyncHandle;
+	/* Output pointer pui32UID is also an implied input */
+	IMG_UINT32 *pui32UID;
+	/* Output pointer pui32FWAddr is also an implied input */
+	IMG_UINT32 *pui32FWAddr;
+	/* Output pointer pui32CurrentOp is also an implied input */
+	IMG_UINT32 *pui32CurrentOp;
+	/* Output pointer pui32NextOp is also an implied input */
+	IMG_UINT32 *pui32NextOp;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCGETSTATUS;
+
+/* Bridge out structure for ServerSyncGetStatus */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCGETSTATUS_TAG
+{
+	IMG_UINT32 *pui32UID;
+	IMG_UINT32 *pui32FWAddr;
+	IMG_UINT32 *pui32CurrentOp;
+	IMG_UINT32 *pui32NextOp;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCGETSTATUS;
+
+/*******************************************
+            SyncPrimOpCreate
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpCreate */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPCREATE_TAG
+{
+	IMG_UINT32 ui32SyncBlockCount;
+	IMG_HANDLE *phBlockList;
+	IMG_UINT32 ui32ClientSyncCount;
+	IMG_UINT32 *pui32SyncBlockIndex;
+	IMG_UINT32 *pui32Index;
+	IMG_UINT32 ui32ServerSyncCount;
+	IMG_HANDLE *phServerSync;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPCREATE;
+
+/* Bridge out structure for SyncPrimOpCreate */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPCREATE_TAG
+{
+	IMG_HANDLE hServerCookie;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPCREATE;
+
+/*******************************************
+            SyncPrimOpTake
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpTake */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPTAKE_TAG
+{
+	IMG_HANDLE hServerCookie;
+	IMG_UINT32 ui32ClientSyncCount;
+	IMG_UINT32 *pui32Flags;
+	IMG_UINT32 *pui32FenceValue;
+	IMG_UINT32 *pui32UpdateValue;
+	IMG_UINT32 ui32ServerSyncCount;
+	IMG_UINT32 *pui32ServerFlags;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPTAKE;
+
+/* Bridge out structure for SyncPrimOpTake */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPTAKE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPTAKE;
+
+/*******************************************
+            SyncPrimOpReady
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpReady */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPREADY_TAG
+{
+	IMG_HANDLE hServerCookie;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPREADY;
+
+/* Bridge out structure for SyncPrimOpReady */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPREADY_TAG
+{
+	IMG_BOOL bReady;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPREADY;
+
+/*******************************************
+            SyncPrimOpComplete
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpComplete */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPCOMPLETE_TAG
+{
+	IMG_HANDLE hServerCookie;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPCOMPLETE;
+
+/* Bridge out structure for SyncPrimOpComplete */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPCOMPLETE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPCOMPLETE;
+
+/*******************************************
+            SyncPrimOpDestroy
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpDestroy */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPDESTROY_TAG
+{
+	IMG_HANDLE hServerCookie;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPDESTROY;
+
+/* Bridge out structure for SyncPrimOpDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPDESTROY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPDESTROY;
+
+/*******************************************
+            SyncPrimPDump
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDump */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32Offset;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP;
+
+/* Bridge out structure for SyncPrimPDump */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP;
+
+/*******************************************
+            SyncPrimPDumpValue
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpValue */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32Offset;
+	IMG_UINT32 ui32Value;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE;
+
+/* Bridge out structure for SyncPrimPDumpValue */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE;
+
+/*******************************************
+            SyncPrimPDumpPol
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpPol */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32Offset;
+	IMG_UINT32 ui32Value;
+	IMG_UINT32 ui32Mask;
+	PDUMP_POLL_OPERATOR eOperator;
+	PDUMP_FLAGS_T uiPDumpFlags;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL;
+
+/* Bridge out structure for SyncPrimPDumpPol */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL;
+
+/*******************************************
+            SyncPrimOpPDumpPol
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpPDumpPol */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPPDUMPPOL_TAG
+{
+	IMG_HANDLE hServerCookie;
+	PDUMP_POLL_OPERATOR eOperator;
+	PDUMP_FLAGS_T uiPDumpFlags;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPPDUMPPOL;
+
+/* Bridge out structure for SyncPrimOpPDumpPol */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPPDUMPPOL_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPPDUMPPOL;
+
+/*******************************************
+            SyncPrimPDumpCBP
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpCBP */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32Offset;
+	IMG_DEVMEM_OFFSET_T uiWriteOffset;
+	IMG_DEVMEM_SIZE_T uiPacketSize;
+	IMG_DEVMEM_SIZE_T uiBufferSize;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP;
+
+/* Bridge out structure for SyncPrimPDumpCBP */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP;
+
+/*******************************************
+            SyncAllocEvent
+ *******************************************/
+
+/* Bridge in structure for SyncAllocEvent */
+typedef struct PVRSRV_BRIDGE_IN_SYNCALLOCEVENT_TAG
+{
+	IMG_BOOL bServerSync;
+	IMG_UINT32 ui32FWAddr;
+	IMG_UINT32 ui32ClassNameSize;
+	const IMG_CHAR *puiClassName;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCALLOCEVENT;
+
+/* Bridge out structure for SyncAllocEvent */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT;
+
+/*******************************************
+            SyncFreeEvent
+ *******************************************/
+
+/* Bridge in structure for SyncFreeEvent */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFREEEVENT_TAG
+{
+	IMG_UINT32 ui32FWAddr;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCFREEEVENT;
+
+/* Bridge out structure for SyncFreeEvent */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFREEEVENT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCFREEEVENT;
+
+#endif /* COMMON_SYNC_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_synctracking_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_synctracking_bridge.h
new file mode 100644
index 0000000..340f736
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/common_synctracking_bridge.h
@@ -0,0 +1,96 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for synctracking
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for synctracking
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_SYNCTRACKING_BRIDGE_H
+#define COMMON_SYNCTRACKING_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST			0
+#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE			PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD			PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_LAST			(PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1)
+
+/*******************************************
+            SyncRecordRemoveByHandle
+ *******************************************/
+
+/* Bridge in structure for SyncRecordRemoveByHandle */
+typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE_TAG
+{
+	IMG_HANDLE hhRecord;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE;
+
+/* Bridge out structure for SyncRecordRemoveByHandle */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE;
+
+/*******************************************
+            SyncRecordAdd
+ *******************************************/
+
+/* Bridge in structure for SyncRecordAdd */
+typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDADD_TAG
+{
+	IMG_HANDLE hhServerSyncPrimBlock;
+	IMG_UINT32 ui32ui32FwBlockAddr;
+	IMG_UINT32 ui32ui32SyncOffset;
+	IMG_BOOL bbServerSync;
+	IMG_UINT32 ui32ClassNameSize;
+	const IMG_CHAR *puiClassName;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_IN_SYNCRECORDADD;
+
+/* Bridge out structure for SyncRecordAdd */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDADD_TAG
+{
+	IMG_HANDLE hhRecord;
+	PVRSRV_ERROR eError;
+} __attribute__ ((packed)) PVRSRV_BRIDGE_OUT_SYNCRECORDADD;
+
+#endif /* COMMON_SYNCTRACKING_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/config_kernel.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/config_kernel.h
new file mode 100644
index 0000000..1ab127d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/config_kernel.h
@@ -0,0 +1,162 @@
+#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES 8
+#define DISPLAY_CONTROLLER drm_pdp
+#define GPUVIRT_VALIDATION_NUM_OS 8
+#define GPUVIRT_VALIDATION_NUM_REGIONS 2
+#define HWR_DEFAULT_ENABLED
+#define LINUX
+#define LMA
+#define PDUMP_STREAMBUF_MAX_SIZE_MB 16
+#define PDVFS_COM PDVFS_COM_HOST
+#define PDVFS_COM_AP 2
+#define PDVFS_COM_HOST 1
+#define PDVFS_COM_PMC 3
+#define PVRSRV_APPHINT_ASSERTONHWRTRIGGER IMG_FALSE
+#define PVRSRV_APPHINT_ASSERTOUTOFMEMORY IMG_FALSE
+#define PVRSRV_APPHINT_BIFTILINGMODE 4
+#define PVRSRV_APPHINT_CACHEOPCONFIG 0
+#define PVRSRV_APPHINT_CACHEOPGFTHRESHOLDSIZE 0
+#define PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE 0
+#define PVRSRV_APPHINT_CHECKMLIST APPHNT_BLDVAR_DEBUG
+#define PVRSRV_APPHINT_CLEANUPTHREADPRIORITY 5
+#define PVRSRV_APPHINT_DISABLECLOCKGATING 0
+#define PVRSRV_APPHINT_DISABLEDMOVERLAP 0
+#define PVRSRV_APPHINT_DISABLEFEDLOGGING IMG_FALSE
+#define PVRSRV_APPHINT_DISABLEPDUMPPANIC IMG_FALSE
+#define PVRSRV_APPHINT_DRIVERMODE 0x7FFFFFFF
+#define PVRSRV_APPHINT_DUSTREQUESTINJECT IMG_FALSE
+#define PVRSRV_APPHINT_EMUMAXFREQ 0
+#define PVRSRV_APPHINT_ENABLEAPM RGX_ACTIVEPM_DEFAULT
+#define PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE 0
+#define PVRSRV_APPHINT_ENABLEFTRACEGPU IMG_FALSE
+#define PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING IMG_FALSE
+#define PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH RGXFWIF_INICFG_CTXSWITCH_DM_ALL
+#define PVRSRV_APPHINT_ENABLEFWPOISONONFREE IMG_FALSE
+#define PVRSRV_APPHINT_ENABLEHTBLOGGROUP 0
+#define PVRSRV_APPHINT_ENABLELOGGROUP RGXFWIF_LOG_TYPE_NONE
+#define PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG IMG_FALSE
+#define PVRSRV_APPHINT_ENABLERDPOWERISLAND RGX_RD_POWER_ISLAND_DEFAULT
+#define PVRSRV_APPHINT_ENABLESIGNATURECHECKS APPHNT_BLDVAR_ENABLESIGNATURECHECKS
+#define PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG IMG_FALSE
+#define PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE 0
+#define PVRSRV_APPHINT_FIRMWARELOGTYPE 0
+#define PVRSRV_APPHINT_FIRMWAREPERF FW_PERF_CONF_NONE
+#define PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN
+#define PVRSRV_APPHINT_FWPOISONONFREEVALUE 0xBD
+#define PVRSRV_APPHINT_FWTRACEBUFSIZEINDWORDS RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS
+#define PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE 0x4000
+#define PVRSRV_APPHINT_GPIOVALIDATIONMODE 0
+#define PVRSRV_APPHINT_HTBOPERATIONMODE HTB_OPMODE_DROPOLDEST
+#define PVRSRV_APPHINT_HTBUFFERSIZE 64
+#define PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE 786432
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN 0
+#define PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER 0
+#define PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB 2048
+#define PVRSRV_APPHINT_HWPERFFWFILTER 0
+#define PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB 2048
+#define PVRSRV_APPHINT_HWPERFHOSTFILTER 0
+#define PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS 50
+#define PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT APPHNT_BLDVAR_DBGDUMPLIMIT
+#define PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC IMG_FALSE
+#define PVRSRV_APPHINT_JONESDISABLEMASK 0
+#define PVRSRV_APPHINT_NEWFILTERINGMODE 1
+#define PVRSRV_APPHINT_OSIDREGION0MAX "0x3FFFFFFF 0x0FFFFFFF 0x17FFFFFF 0x1FFFFFFF 0x27FFFFFF 0x2FFFFFFF 0x37FFFFFF 0x3FFFFFFF"
+#define PVRSRV_APPHINT_OSIDREGION0MIN "0x00000000 0x04000000 0x10000000 0x18000000 0x20000000 0x28000000 0x30000000 0x38000000"
+#define PVRSRV_APPHINT_OSIDREGION1MAX "0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF"
+#define PVRSRV_APPHINT_OSIDREGION1MIN "0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000"
+#define PVRSRV_APPHINT_PHYSMEMTESTPASSES APPHNT_PHYSMEMTEST_ENABLE
+#define PVRSRV_APPHINT_RGXBVNC ""
+#define PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE RGXFW_SIG_BUFFER_SIZE_MIN
+#define PVRSRV_APPHINT_TIMECORRCLOCK 0
+#define PVRSRV_APPHINT_TRUNCATEMODE 0
+#define PVRSRV_APPHINT_USEMETAT1 RGX_META_T1_OFF
+#define PVRSRV_APPHINT_VALIDATEIRQ 0
+#define PVRSRV_APPHINT_VDMCONTEXTSWITCHMODE RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX
+#define PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY 0
+#define PVRSRV_APPHINT_ZEROFREELIST IMG_FALSE
+#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO
+#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD 90
+#define PVRSRV_ENABLE_PROCESS_STATS
+#define PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN 256
+#define PVRSRV_MODNAME "pvrsrvkm"
+#define PVRSRV_NEED_PVR_DPF
+#define PVRSRV_NEED_PVR_STACKTRACE_NATIVE
+#define PVRSRV_POISON_ON_ALLOC_VALUE 0xd9
+#define PVRSRV_POISON_ON_FREE_VALUE 0x63
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_3D 17
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_CDM 15
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_KICKSYNC 13
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TA 16
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ2D 15
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ3D 15
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D 16
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM 13
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC 13
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA 15
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D 14
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D 14
+#define PVRSRV_STALLED_CCB_ACTION
+#define PVRSRV_SYNC_CHECKPOINT_CCB
+#define PVRSRV_VZ_NUM_OSID
+#define PVRSYNC_MODNAME "pvr_sync"
+#define PVR_BUILD_DIR "tc_linux"
+#define PVR_DIRTY_BYTES_FLUSH_THRESHOLD 524288
+#define PVR_DRM_NAME "pvr"
+#define PVR_GPIO_MODE PVR_GPIO_MODE_GENERAL
+#define PVR_GPIO_MODE_GENERAL 1
+#define PVR_GPIO_MODE_POWMON_PIN 2
+#define PVR_LDM_DRIVER_REGISTRATION_NAME "pvrsrvkm"
+#define PVR_LDM_PLATFORM_PRE_REGISTERED
+#define PVR_LINUX_BLOB_CACHE_SIZE_MEGABYTES 20
+#define PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD 256
+#define PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD 16384
+#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 2
+#define PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES 20480
+#define PVR_LINUX_PHYSMEM_MAX_POOL_PAGES 10240
+#define PVR_POWER_ACTOR_MEASUREMENT_PERIOD_MS 10U
+#define PVR_POWER_MONITOR_HWPERF
+#define PVR_USE_FENCE_SYNC_MODEL 1
+#define RGX_BNC_CONFIG_KM_HEADER "configs/rgxconfig_km_1.V.4.5.h"
+#define RGX_BVNC_CORE_KM_HEADER "cores/rgxcore_km_1.82.4.5.h"
+#define RGX_FW_FILENAME "rgx.fw"
+#define RGX_FW_HEAP_SHIFT 25
+#define RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS 0
+#define SOC_TIMER_FREQ 20
+#define SUPPORT_BUFFER_SYNC 1
+#define SUPPORT_DBGDRV_EVENT_OBJECTS
+#define SUPPORT_LINUX_X86_PAT
+#define SUPPORT_LINUX_X86_WRITECOMBINE
+#define SUPPORT_MMU_PENDING_FAULT_PROTECTION
+#define SUPPORT_MULTIBVNC
+#define SUPPORT_MULTIBVNC_RUNTIME_BVNC_ACQUISITION
+#define SUPPORT_NATIVE_FENCE_SYNC
+#define SUPPORT_PERCONTEXT_FREELIST
+#define SUPPORT_PHYSMEM_TEST
+#define SUPPORT_RGX 1
+#define SUPPORT_SERVER_SYNC_IMPL
+#define TC_APOLLO_ES2
+#define TC_DISPLAY_MEM_SIZE 383
+#define TC_MEMORY_CONFIG TC_MEMORY_LOCAL
+#define TC_SECURE_MEM_SIZE 128
+#ifdef CONFIG_DRM_POWERVR_ROGUE_DEBUG
+#define DEBUG
+#define DEBUG_BRIDGE_KM
+#define DEBUG_HANDLEALLOC_KM
+#define DEBUG_LINUX_MEMORY_ALLOCATIONS
+#define DEBUG_LINUX_MEM_AREAS
+#define DEBUG_LINUX_MMAP_AREAS
+#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE
+#define PVRSRV_ENABLE_GPU_MEMORY_INFO
+#define PVRSRV_ENABLE_SYNC_POISONING
+#define PVR_ANNOTATION_MAX_LEN 96
+#define PVR_BUILD_TYPE "debug"
+#define RGXFW_ALIGNCHECKS
+#define TRACK_FW_BOOT
+#else
+#define PVR_ANNOTATION_MAX_LEN 63
+#define PVR_BUILD_TYPE "release"
+#define RELEASE
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/config_kernel.mk b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/config_kernel.mk
new file mode 100644
index 0000000..4df8034
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/config_kernel.mk
@@ -0,0 +1,35 @@
+override DISPLAY_CONTROLLER := drm_pdp
+override LMA := 1
+override METAG_VERSION_NEEDED := 2.8.1.0.3
+override MIPS_VERSION_NEEDED := 2014.07-1
+override PDVFS_COM := PDVFS_COM_HOST
+override PDVFS_COM_AP := 2
+override PDVFS_COM_HOST := 1
+override PDVFS_COM_PMC := 3
+override PVRSRV_MODNAME := pvrsrvkm
+override PVRSYNC_MODNAME := pvr_sync
+override PVR_BUILD_DIR := tc_linux
+override PVR_GPIO_MODE := PVR_GPIO_MODE_GENERAL
+override PVR_GPIO_MODE_GENERAL := 1
+override PVR_GPIO_MODE_POWMON_PIN := 2
+override PVR_HANDLE_BACKEND := idr
+override PVR_SYSTEM := rgx_linux_tc
+override PVR_USE_FENCE_SYNC_MODEL := 1
+override RGX_TIMECORR_CLOCK := mono
+override SUPPORT_BUFFER_SYNC := 1
+override SUPPORT_DMABUF_BRIDGE := 1
+override SUPPORT_DMA_FENCE := 1
+override SUPPORT_NATIVE_FENCE_SYNC := 1
+override SUPPORT_PHYSMEM_TEST := 1
+override SUPPORT_RGX := 1
+override SUPPORT_SERVER_SYNC_IMPL := 1
+override VMM_TYPE := stub
+override undefine SUPPORT_DISPLAY_CLASS
+ifeq ($(CONFIG_DRM_POWERVR_ROGUE_DEBUG),y)
+override BUILD := debug
+override PVRSRV_ENABLE_GPU_MEMORY_INFO := 1
+override PVR_BUILD_TYPE := debug
+else
+override BUILD := release
+override PVR_BUILD_TYPE := release
+endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/configs/rgxconfig_km_1.V.4.5.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/configs/rgxconfig_km_1.V.4.5.h
new file mode 100644
index 0000000..9e7e5d3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/configs/rgxconfig_km_1.V.4.5.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 1.V.4.5
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_1_V_4_5_H
+#define RGXCONFIG_KM_1_V_4_5_H
+
+/* Automatically generated file (08/07/2019 09:00:54): Do not edit manually */
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 5
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4U)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128U * 1024U)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3U)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (1U)
+#define RGX_FEATURE_META_COREMEM_SIZE (0U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+
+
+#endif /* RGXCONFIG_1_V_4_5_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/connection_server.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/connection_server.c
new file mode 100644
index 0000000..8f04396
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/connection_server.c
@@ -0,0 +1,519 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server side connection management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Handles connections coming from the client and the management
+                connection based information
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "handle.h"
+#include "pvrsrv.h"
+#include "connection_server.h"
+#include "osconnection_server.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "sync_server.h"
+#include "process_stats.h"
+#include "pdump_km.h"
+#include "osfunc.h"
+#include "tlstream.h"
+
+/* PID associated with Connection currently being purged by Cleanup thread */
+static IMG_PID gCurrentPurgeConnectionPid;
+
+static PVRSRV_ERROR ConnectionDataDestroy(CONNECTION_DATA *psConnection)
+{
+	PVRSRV_ERROR eError;
+	PROCESS_HANDLE_BASE *psProcessHandleBase;
+	IMG_UINT64 ui64MaxBridgeTime;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	if (psPVRSRVData->bUnload)
+	{
+		/* driver is unloading so do not allow the bridge lock to be released */
+		ui64MaxBridgeTime = 0;
+	}
+	else
+	{
+		ui64MaxBridgeTime = CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS;
+	}
+
+	if (psConnection == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Missing connection!", __func__));
+		PVR_ASSERT(0);
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Close HWPerfClient stream here even though we created it in
+	 * PVRSRVConnectKM(). */
+	if (psConnection->hClientTLStream)
+	{
+		TLStreamClose(psConnection->hClientTLStream);
+		psConnection->hClientTLStream = NULL;
+		PVR_DPF((PVR_DBG_MESSAGE, "Destroyed private stream."));
+	}
+
+	/* Get process handle base to decrement the refcount */
+	psProcessHandleBase = psConnection->psProcessHandleBase;
+
+	if (psProcessHandleBase != NULL)
+	{
+		/* acquire the lock now to ensure unref and removal from the
+		 * hash table is atomic.
+		 * if the refcount becomes zero then the lock needs to be held
+		 * until the entry is removed from the hash table.
+		 */
+		OSLockAcquire(psPVRSRVData->hProcessHandleBase_Lock);
+
+		/* In case the refcount becomes 0 we can remove the process handle base */
+		if (OSAtomicDecrement(&psProcessHandleBase->iRefCount) == 0)
+		{
+			uintptr_t uiHashValue;
+
+			uiHashValue = HASH_Remove(psPVRSRVData->psProcessHandleBase_Table, psConnection->pid);
+			OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock);
+
+			if (!uiHashValue)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Failed to remove handle base from hash table.",
+						__func__));
+				return PVRSRV_ERROR_UNABLE_TO_REMOVE_HASH_VALUE;
+			}
+
+			eError = PVRSRVFreeKernelHandles(psProcessHandleBase->psHandleBase);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Couldn't free kernel handles for process (%s)",
+						__func__, PVRSRVGetErrorString(eError)));
+
+				return eError;
+			}
+
+			eError = PVRSRVFreeHandleBase(psProcessHandleBase->psHandleBase, ui64MaxBridgeTime);
+			if (eError != PVRSRV_OK)
+			{
+				if (eError != PVRSRV_ERROR_RETRY)
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Couldn't free handle base for process (%s)",
+						 __func__, PVRSRVGetErrorString(eError)));
+				}
+
+				return eError;
+			}
+
+			OSFreeMem(psProcessHandleBase);
+		}
+		else
+		{
+			OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock);
+		}
+
+		psConnection->psProcessHandleBase = NULL;
+	}
+
+	/* Free handle base for this connection */
+	if (psConnection->psHandleBase != NULL)
+	{
+		eError = PVRSRVFreeHandleBase(psConnection->psHandleBase, ui64MaxBridgeTime);
+		if (eError != PVRSRV_OK)
+		{
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Couldn't free handle base for connection (%s)",
+					 __func__, PVRSRVGetErrorString(eError)));
+			}
+
+			return eError;
+		}
+
+		psConnection->psHandleBase = NULL;
+	}
+
+	if (psConnection->psSyncConnectionData != NULL)
+	{
+		SyncUnregisterConnection(psConnection->psSyncConnectionData);
+		psConnection->psSyncConnectionData = NULL;
+	}
+
+	if (psConnection->psPDumpConnectionData != NULL)
+	{
+		PDumpUnregisterConnection(psConnection->psPDumpConnectionData);
+		psConnection->psPDumpConnectionData = NULL;
+	}
+
+	/* Call environment specific connection data deinit function */
+	if (psConnection->hOsPrivateData != NULL)
+	{
+		eError = OSConnectionPrivateDataDeInit(psConnection->hOsPrivateData);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+				 "%s: OSConnectionPrivateDataDeInit failed (%s)",
+				 __func__, PVRSRVGetErrorString(eError)));
+
+			return eError;
+		}
+
+		psConnection->hOsPrivateData = NULL;
+	}
+
+	/* Close the PID stats entry as late as possible to catch all frees */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+	if (psConnection->hProcessStats != NULL)
+	{
+		PVRSRVStatsDeregisterProcess(psConnection->hProcessStats);
+		psConnection->hProcessStats = NULL;
+	}
+#endif
+
+	OSFreeMemNoStats(psConnection);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVConnectionConnect(void **ppvPrivData, void *pvOSData)
+{
+	CONNECTION_DATA *psConnection;
+	PVRSRV_ERROR eError;
+	PROCESS_HANDLE_BASE *psProcessHandleBase;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	/* Allocate connection data area, no stats since process not registered yet */
+	psConnection = OSAllocZMemNoStats(sizeof(*psConnection));
+	if (psConnection == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Couldn't allocate connection data",
+			 __func__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* Allocate process statistics as early as possible to catch all allocs */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+	eError = PVRSRVStatsRegisterProcess(&psConnection->hProcessStats);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Couldn't register process statistics (%s)",
+			 __func__, PVRSRVGetErrorString(eError)));
+		goto failure;
+	}
+#endif
+
+	/* Call environment specific connection data init function */
+	eError = OSConnectionPrivateDataInit(&psConnection->hOsPrivateData, pvOSData);
+	PVR_LOGG_IF_ERROR(eError, "OSConnectionPrivateDataInit", failure);
+
+	psConnection->pid = OSGetCurrentClientProcessIDKM();
+	OSStringLCopy(psConnection->pszProcName, OSGetCurrentClientProcessNameKM(), PVRSRV_CONNECTION_PROCESS_NAME_LEN);
+
+#if defined(DEBUG) || defined(PDUMP)
+	PVR_LOG(("%s connected", psConnection->pszProcName));
+#endif
+
+	/* Register this connection with the sync core */
+	eError = SyncRegisterConnection(&psConnection->psSyncConnectionData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Couldn't register the sync data", __func__));
+		goto failure;
+	}
+
+	/*
+	 * Register this connection and Sync PDump callback with
+	 * the pdump core. Pass in the Sync connection data.
+	 */
+	eError = PDumpRegisterConnection(psConnection->psSyncConnectionData,
+	                                  SyncConnectionPDumpSyncBlocks,
+	                                  &psConnection->psPDumpConnectionData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Couldn't register the PDump data",
+			 __func__));
+		goto failure;
+	}
+
+	/* Allocate handle base for this connection */
+	eError = PVRSRVAllocHandleBase(&psConnection->psHandleBase,
+	                               PVRSRV_HANDLE_BASE_TYPE_CONNECTION);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Couldn't allocate handle base for connection (%s)",
+			 __func__, PVRSRVGetErrorString(eError)));
+		goto failure;
+	}
+
+	/* Try to get process handle base if it already exists */
+	OSLockAcquire(psPVRSRVData->hProcessHandleBase_Lock);
+	psProcessHandleBase = (PROCESS_HANDLE_BASE*) HASH_Retrieve(PVRSRVGetPVRSRVData()->psProcessHandleBase_Table,
+	                                                           psConnection->pid);
+
+	/* In case there is none we are going to allocate one */
+	if (psProcessHandleBase == NULL)
+	{
+		psProcessHandleBase = OSAllocZMem(sizeof(PROCESS_HANDLE_BASE));
+		if (psProcessHandleBase == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to allocate handle base, oom.",
+					__func__));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto failureLock;
+		}
+
+		OSAtomicWrite(&psProcessHandleBase->iRefCount, 0);
+
+		/* Allocate handle base for this process */
+		eError = PVRSRVAllocHandleBase(&psProcessHandleBase->psHandleBase,
+		                               PVRSRV_HANDLE_BASE_TYPE_PROCESS);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: Couldn't allocate handle base for process (%s)",
+			         __func__,
+			         PVRSRVGetErrorString(eError)));
+			OSFreeMem(psProcessHandleBase);
+			goto failureLock;
+		}
+
+		/* Insert the handle base into the global hash table */
+		if (!HASH_Insert(PVRSRVGetPVRSRVData()->psProcessHandleBase_Table,
+		                 psConnection->pid,
+		                 (uintptr_t) psProcessHandleBase))
+		{
+
+			eError = PVRSRV_ERROR_UNABLE_TO_INSERT_HASH_VALUE;
+
+			PVRSRVFreeHandleBase(psProcessHandleBase->psHandleBase, 0);
+
+			OSFreeMem(psProcessHandleBase);
+			goto failureLock;
+		}
+	}
+	OSAtomicIncrement(&psProcessHandleBase->iRefCount);
+
+	OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock);
+
+	OSLockAcquire(psPVRSRVData->hConnectionsLock);
+	dllist_add_to_tail(&psPVRSRVData->sConnections, &psConnection->sConnectionListNode);
+	OSLockRelease(psPVRSRVData->hConnectionsLock);
+
+	psConnection->psProcessHandleBase = psProcessHandleBase;
+
+	*ppvPrivData = psConnection;
+
+	return eError;
+
+failureLock:
+	OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock);
+failure:
+	ConnectionDataDestroy(psConnection);
+
+	return eError;
+}
+
+static PVRSRV_ERROR _CleanupThreadPurgeConnectionData(void *pvConnectionData)
+{
+	PVRSRV_ERROR eErrorConnection, eErrorKernel;
+	CONNECTION_DATA *psConnectionData = pvConnectionData;
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSAcquireBridgeLock();
+#endif
+
+	gCurrentPurgeConnectionPid = psConnectionData->pid;
+
+	eErrorConnection = ConnectionDataDestroy(psConnectionData);
+	if (eErrorConnection != PVRSRV_OK)
+	{
+		if (eErrorConnection == PVRSRV_ERROR_RETRY)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE,
+				 "%s: Failed to purge connection data %p "
+				 "(deferring destruction)",
+				 __func__,
+				 psConnectionData));
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE,
+			 "%s: Connection data %p deferred destruction finished",
+			 __func__,
+			 psConnectionData));
+	}
+
+	/* Check if possible resize the global handle base */
+	eErrorKernel = PVRSRVPurgeHandles(KERNEL_HANDLE_BASE);
+	if (eErrorKernel != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Purge of global handle pool failed (%s)",
+			 __func__,
+			 PVRSRVGetErrorString(eErrorKernel)));
+	}
+
+	gCurrentPurgeConnectionPid = 0;
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSReleaseBridgeLock();
+#endif
+
+	return eErrorConnection;
+}
+
+void PVRSRVConnectionDisconnect(void *pvDataPtr)
+{
+	CONNECTION_DATA *psConnectionData = pvDataPtr;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	OSLockAcquire(psPVRSRVData->hConnectionsLock);
+	dllist_remove_node(&psConnectionData->sConnectionListNode);
+	OSLockRelease(psPVRSRVData->hConnectionsLock);
+
+	/* Notify the PDump core if the pdump control client is disconnecting */
+	if (psConnectionData->ui32ClientFlags & SRV_FLAGS_PDUMPCTRL)
+	{
+		PDumpDisconnectionNotify();
+	}
+
+#if defined(DEBUG) || defined(PDUMP)
+	PVR_LOG(("%s disconnected", psConnectionData->pszProcName));
+#endif
+
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK)
+#endif
+	{
+		/* Defer the release of the connection data */
+		psConnectionData->sCleanupThreadFn.pfnFree = _CleanupThreadPurgeConnectionData;
+		psConnectionData->sCleanupThreadFn.pvData = psConnectionData;
+		psConnectionData->sCleanupThreadFn.bDependsOnHW = IMG_FALSE;
+		CLEANUP_THREAD_SET_RETRY_COUNT(&psConnectionData->sCleanupThreadFn,
+		                               CLEANUP_THREAD_RETRY_COUNT_DEFAULT);
+		PVRSRVCleanupThreadAddWork(&psConnectionData->sCleanupThreadFn);
+	}
+}
+
+IMG_PID PVRSRVGetPurgeConnectionPid(void)
+{
+	return gCurrentPurgeConnectionPid;
+}
+
+/* Prefix for debug messages about Active Connections */
+#define ACTIVE_PREFIX "Active connects:"
+
+void PVRSRVConnectionDebugNotify(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                 void *pvDumpDebugFile)
+{
+	PDLLIST_NODE pNext, pNode;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	OSLockAcquire(psPVRSRVData->hConnectionsLock);
+	if (dllist_is_empty(&psPVRSRVData->sConnections))
+	{
+		PVR_DUMPDEBUG_LOG(ACTIVE_PREFIX " No active connections");
+	}
+	else
+	{
+#define MAX_DEBUG_DUMP_STRING_LEN 150
+#define MAX_DEBUG_DUMP_CONNECTION_STR_LEN 26
+		IMG_CHAR sActiveConnections[MAX_DEBUG_DUMP_STRING_LEN];
+		IMG_UINT16 i, uiPos = 0;
+		IMG_BOOL bPrinted = IMG_FALSE;
+		size_t uiSize = sizeof (sActiveConnections);
+
+		OSStringLCopy(sActiveConnections, ACTIVE_PREFIX, uiSize);
+		uiPos = sizeof (ACTIVE_PREFIX) - 1;	/* Next buffer location to fill */
+		uiSize -= uiPos;	/* Remaining space to use in sActiveConnections[] */
+
+		dllist_foreach_node(&psPVRSRVData->sConnections, pNode, pNext)
+		{
+			CONNECTION_DATA *sData = IMG_CONTAINER_OF(pNode, CONNECTION_DATA, sConnectionListNode);
+
+			IMG_CHAR sTmpBuff[MAX_DEBUG_DUMP_CONNECTION_STR_LEN];
+			i = OSSNPrintf(sTmpBuff, MAX_DEBUG_DUMP_CONNECTION_STR_LEN, " %d (%s),", sData->pid, sData->pszProcName);
+			i = MIN(MAX_DEBUG_DUMP_CONNECTION_STR_LEN, i);
+			bPrinted = IMG_FALSE;
+
+			OSStringLCopy(sActiveConnections+uiPos, sTmpBuff, uiSize);
+
+			// Move the write offset to the end of the current string
+			uiPos += i;
+			// Update the amount of remaining space available to copy into
+			uiSize -= i;
+
+			// If there is not enough space to add another connection to this line, output the line
+			if (uiSize <= MAX_DEBUG_DUMP_CONNECTION_STR_LEN)
+			{
+				PVR_DUMPDEBUG_LOG("%s", sActiveConnections);
+
+				/*
+				 * Remove the "Active connects:" prefix from the buffer.
+				 * Leave the subsequent buffer contents indented by the same
+				 * amount to aid in interpreting the debug output.
+				 */
+				OSCachedMemSet(sActiveConnections, ' ', sizeof (ACTIVE_PREFIX));
+				uiPos = sizeof (ACTIVE_PREFIX) - 1;
+				// Reset the amount of space available to copy into
+				uiSize = MAX_DEBUG_DUMP_STRING_LEN - uiPos;
+				bPrinted = IMG_TRUE;
+			}
+		}
+
+		// Only print the current line if it hasn't already been printed
+		if (!bPrinted)
+		{
+			// Strip of the final comma
+			sActiveConnections[OSStringNLength(sActiveConnections, MAX_DEBUG_DUMP_STRING_LEN) - 1] = '\0';
+			PVR_DUMPDEBUG_LOG("%s", sActiveConnections);
+		}
+#undef MAX_DEBUG_DUMP_STRING_LEN
+#undef MAX_DEBUG_DUMP_CONNECTIONS_PER_LINE
+	}
+	OSLockRelease(psPVRSRVData->hConnectionsLock);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/connection_server.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/connection_server.h
new file mode 100644
index 0000000..1423f18
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/connection_server.h
@@ -0,0 +1,114 @@
+/**************************************************************************/ /*!
+@File
+@Title          Server side connection management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    API for server side connection management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(_CONNECTION_SERVER_H_)
+#define _CONNECTION_SERVER_H_
+
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "handle.h"
+#include "pvrsrv_cleanup.h"
+
+/* Variable used to hold in memory the timeout for the current time slice*/
+extern IMG_UINT64 gui64TimesliceLimit;
+/* Counter number of handle data freed during the current time slice */
+extern IMG_UINT32 gui32HandleDataFreeCounter;
+/* Set the maximum time the freeing of the resources can keep the lock */
+#define CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS (3000 * 1000) /* 3ms */
+
+typedef struct _CONNECTION_DATA_
+{
+	PVRSRV_HANDLE_BASE		*psHandleBase;
+	PROCESS_HANDLE_BASE		*psProcessHandleBase;
+	struct _SYNC_CONNECTION_DATA_	*psSyncConnectionData;
+	struct _PDUMP_CONNECTION_DATA_	*psPDumpConnectionData;
+
+	/* Holds the client flags supplied at connection time */
+	IMG_UINT32			ui32ClientFlags;
+
+	/*
+	 * OS specific data can be stored via this handle.
+	 * See osconnection_server.h for a generic mechanism
+	 * for initialising this field.
+	 */
+	IMG_HANDLE			hOsPrivateData;
+
+#define PVRSRV_CONNECTION_PROCESS_NAME_LEN (16)
+	IMG_PID				pid;
+	IMG_CHAR            pszProcName[PVRSRV_CONNECTION_PROCESS_NAME_LEN];
+
+	IMG_HANDLE			hProcessStats;
+
+	IMG_HANDLE			hClientTLStream;
+
+	/* Structure which is hooked into the cleanup thread work list */
+	PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn;
+
+	DLLIST_NODE         sConnectionListNode;
+
+	/* List navigation for deferred freeing of connection data */
+	struct _CONNECTION_DATA_	**ppsThis;
+	struct _CONNECTION_DATA_	*psNext;
+} CONNECTION_DATA;
+
+#include "osconnection_server.h"
+
+PVRSRV_ERROR PVRSRVConnectionConnect(void **ppvPrivData, void *pvOSData);
+void PVRSRVConnectionDisconnect(void *pvPrivData);
+
+IMG_PID PVRSRVGetPurgeConnectionPid(void);
+
+void PVRSRVConnectionDebugNotify(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                 void *pvDumpDebugFile);
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVConnectionPrivateData)
+#endif
+static INLINE
+IMG_HANDLE PVRSRVConnectionPrivateData(CONNECTION_DATA *psConnection)
+{
+	return (psConnection != NULL) ? psConnection->hOsPrivateData : NULL;
+}
+
+#endif /* !defined(_CONNECTION_SERVER_H_) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/cores/rgxcore_km_1.82.4.5.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/cores/rgxcore_km_1.82.4.5.h
new file mode 100644
index 0000000..fd888b4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/cores/rgxcore_km_1.82.4.5.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 1.82.4.5
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_1_82_4_5_H
+#define RGXCORE_KM_1_82_4_5_H
+
+/* Automatically generated file (14/12/2018 09:01:16): Do not edit manually */
+/* CS: @2503111 */
+
+/******************************************************************************
+ * BVNC = 1.82.4.5
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 82
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 5
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_44455
+#define FIX_HW_BRN_54441
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* RGXCORE_KM_1_82_4_5_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/device.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/device.h
new file mode 100644
index 0000000..6f24e236
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/device.h
@@ -0,0 +1,437 @@
+/**************************************************************************/ /*!
+@File
+@Title          Common Device header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device related function templates and defines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __DEVICE_H__
+#define __DEVICE_H__
+
+
+#include "devicemem_heapcfg.h"
+#include "mmu_common.h"
+#include "ra.h"  		/* RA_ARENA */
+#include "pvrsrv_device.h"
+#include "sync_checkpoint.h"
+#include "srvkm.h"
+#include "physheap.h"
+#include <powervr/sync_external.h>
+#include "sysinfo.h"
+#include "dllist.h"
+
+#include "rgx_bvnc_defs_km.h"
+
+#include "lock.h"
+
+#include "power.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+typedef struct _PVRSRV_POWER_DEV_TAG_ *PPVRSRV_POWER_DEV;
+
+struct SYNC_RECORD;
+
+/*********************************************************************/ /*!
+ @Function      AllocUFOCallback
+ @Description   Device specific callback for allocation of an UFO block
+
+ @Input         psDeviceNode          Pointer to device node to allocate
+                                      the UFO for.
+ @Output        ppsMemDesc            Pointer to pointer for the memdesc of
+                                      the allocation
+ @Output        pui32SyncAddr         FW Base address of the UFO block
+ @Output        puiSyncPrimBlockSize  Size of the UFO block
+
+ @Return        PVRSRV_OK if allocation was successful
+ */
+/*********************************************************************/
+typedef PVRSRV_ERROR (*AllocUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+														DEVMEM_MEMDESC **ppsMemDesc,
+														IMG_UINT32 *pui32SyncAddr,
+														IMG_UINT32 *puiSyncPrimBlockSize);
+
+/*********************************************************************/ /*!
+ @Function      FreeUFOCallback
+ @Description   Device specific callback for freeing of an UFO
+
+ @Input         psDeviceNode    Pointer to device node that the UFO block was
+                                allocated from.
+ @Input         psMemDesc       Pointer to pointer for the memdesc of
+                                the UFO block to free.
+ */
+/*********************************************************************/
+typedef void (*FreeUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+									 DEVMEM_MEMDESC *psMemDesc);
+
+typedef struct _PVRSRV_DEVICE_IDENTIFIER_
+{
+	/* Pdump memory and register bank names */
+	IMG_CHAR				*pszPDumpDevName;
+	IMG_CHAR				*pszPDumpRegName;
+
+	/* Under Linux, this is the minor number of RenderNode corresponding to this Device */
+	IMG_INT32				i32UMIdentifier;
+} PVRSRV_DEVICE_IDENTIFIER;
+
+typedef struct _DEVICE_MEMORY_INFO_
+{
+	/* heap count.  Doesn't include additional heaps from PVRSRVCreateDeviceMemHeap */
+	IMG_UINT32				ui32HeapCount;
+
+	/* Blueprints for creating new device memory contexts */
+	IMG_UINT32              uiNumHeapConfigs;
+	DEVMEM_HEAP_CONFIG      *psDeviceMemoryHeapConfigArray;
+	DEVMEM_HEAP_BLUEPRINT   *psDeviceMemoryHeap;
+} DEVICE_MEMORY_INFO;
+
+
+typedef struct _PG_HANDLE_
+{
+	union
+	{
+		void *pvHandle;
+		IMG_UINT64 ui64Handle;
+	}u;
+	/*Order of the corresponding allocation */
+	IMG_UINT32	ui32Order;
+} PG_HANDLE;
+
+#define MMU_BAD_PHYS_ADDR (0xbadbad00badULL)
+#define DUMMY_PAGE	("DUMMY_PAGE")
+#define DEV_ZERO_PAGE	("DEV_ZERO_PAGE")
+
+typedef struct __DEFAULT_PAGE__
+{
+	/*Page handle for the page allocated (UMA/LMA)*/
+	PG_HANDLE	sPageHandle;
+	POS_LOCK	psPgLock;
+	ATOMIC_T	atRefCounter;
+	/*Default page size in terms of log2 */
+	IMG_UINT32	ui32Log2PgSize;
+	IMG_UINT64	ui64PgPhysAddr;
+#if defined(PDUMP)
+	IMG_HANDLE hPdumpPg;
+#endif
+} PVRSRV_DEF_PAGE;
+
+typedef enum _PVRSRV_DEVICE_STATE_
+{
+	PVRSRV_DEVICE_STATE_UNDEFINED = 0,
+	PVRSRV_DEVICE_STATE_INIT,
+	PVRSRV_DEVICE_STATE_ACTIVE,
+	PVRSRV_DEVICE_STATE_DEINIT,
+	PVRSRV_DEVICE_STATE_BAD,
+} PVRSRV_DEVICE_STATE;
+
+typedef enum _PVRSRV_DEVICE_HEALTH_STATUS_
+{
+	PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED = 0,
+	PVRSRV_DEVICE_HEALTH_STATUS_OK,
+	PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING,
+	PVRSRV_DEVICE_HEALTH_STATUS_DEAD,
+	PVRSRV_DEVICE_HEALTH_STATUS_FAULT
+} PVRSRV_DEVICE_HEALTH_STATUS;
+
+typedef enum _PVRSRV_DEVICE_HEALTH_REASON_
+{
+	PVRSRV_DEVICE_HEALTH_REASON_NONE = 0,
+	PVRSRV_DEVICE_HEALTH_REASON_ASSERTED,
+	PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING,
+	PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS,
+	PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT,
+	PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED,
+	PVRSRV_DEVICE_HEALTH_REASON_IDLING,
+	PVRSRV_DEVICE_HEALTH_REASON_RESTARTING
+} PVRSRV_DEVICE_HEALTH_REASON;
+
+typedef PVRSRV_ERROR (*FN_CREATERAMBACKEDPMR)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+										IMG_DEVMEM_SIZE_T uiSize,
+										IMG_DEVMEM_SIZE_T uiChunkSize,
+										IMG_UINT32 ui32NumPhysChunks,
+										IMG_UINT32 ui32NumVirtChunks,
+										IMG_UINT32 *pui32MappingTable,
+										IMG_UINT32 uiLog2PageSize,
+										PVRSRV_MEMALLOCFLAGS_T uiFlags,
+										const IMG_CHAR *pszAnnotation,
+										IMG_PID uiPid,
+										PMR **ppsPMRPtr);
+
+typedef struct _PVRSRV_DEVICE_NODE_
+{
+	PVRSRV_DEVICE_IDENTIFIER	sDevId;
+
+	PVRSRV_DEVICE_STATE			eDevState;
+	ATOMIC_T					eHealthStatus; /* Holds values from PVRSRV_DEVICE_HEALTH_STATUS */
+	ATOMIC_T					eHealthReason; /* Holds values from PVRSRV_DEVICE_HEALTH_REASON */
+
+	IMG_HANDLE						*hDebugTable;
+
+	/* device specific MMU attributes */
+	MMU_DEVICEATTRIBS      *psMMUDevAttrs;
+	/* device specific MMU firmware atrributes, used only in some devices*/
+	MMU_DEVICEATTRIBS      *psFirmwareMMUDevAttrs;
+
+	/* lock for power state transitions */
+	POS_LOCK				hPowerLock;
+	/* current system device power state */
+	PVRSRV_SYS_POWER_STATE	eCurrentSysPowerState;
+	PPVRSRV_POWER_DEV	psPowerDev;
+
+	/*
+		callbacks the device must support:
+	*/
+
+	FN_CREATERAMBACKEDPMR pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_LAST];
+
+	PVRSRV_ERROR (*pfnDevPxAlloc)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, size_t uiSize,
+									PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr);
+
+	void (*pfnDevPxFree)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, PG_HANDLE *psMemHandle);
+
+	PVRSRV_ERROR (*pfnDevPxMap)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, PG_HANDLE *pshMemHandle,
+								size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+								void **pvPtr);
+
+	void (*pfnDevPxUnMap)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+						  PG_HANDLE *psMemHandle, void *pvPtr);
+
+	PVRSRV_ERROR (*pfnDevPxClean)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+								PG_HANDLE *pshMemHandle,
+								IMG_UINT32 uiOffset,
+								IMG_UINT32 uiLength);
+
+	IMG_UINT32 uiMMUPxLog2AllocGran;
+
+	void (*pfnMMUCacheInvalidate)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+								  IMG_HANDLE hDeviceData,
+								  MMU_LEVEL eLevel,
+								  IMG_BOOL bUnmap);
+
+	PVRSRV_ERROR (*pfnMMUCacheInvalidateKick)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+	                                          IMG_UINT16 *pui16NextMMUInvalidateUpdate,
+	                                          IMG_BOOL bInterrupt);
+
+	IMG_UINT32 (*pfnMMUCacheGetInvalidateCounter)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+
+	void (*pfnDumpDebugInfo)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+	PVRSRV_ERROR (*pfnUpdateHealthStatus)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+	                                      IMG_BOOL bIsTimerPoll);
+
+	PVRSRV_ERROR (*pfnResetHWRLogs)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+	/* Method to drain device HWPerf packets from firmware buffer to host buffer */
+	PVRSRV_ERROR (*pfnServiceHWPerf)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+	PVRSRV_ERROR (*pfnDeviceVersionString)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_CHAR **ppszVersionString);
+
+	PVRSRV_ERROR (*pfnDeviceClockSpeed)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_PUINT32 pui32RGXClockSpeed);
+
+	PVRSRV_ERROR (*pfnSoftReset)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64ResetValue1, IMG_UINT64 ui64ResetValue2);
+
+	PVRSRV_ERROR (*pfnAlignmentCheck)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 ui32FWAlignChecksSize, IMG_UINT32 aui32FWAlignChecks[]);
+	IMG_BOOL	(*pfnCheckDeviceFeature)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64FeatureMask);
+
+	IMG_INT32	(*pfnGetDeviceFeatureValue)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, enum _RGX_FEATURE_WITH_VALUE_INDEX_ eFeatureIndex);
+
+	IMG_BOOL (*pfnHasFBCDCVersion31)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+	PVRSRV_DEVICE_CONFIG	*psDevConfig;
+
+	/* device post-finalise compatibility check */
+	PVRSRV_ERROR			(*pfnInitDeviceCompatCheck) (struct _PVRSRV_DEVICE_NODE_*);
+
+	/* information about the device's address space and heaps */
+	DEVICE_MEMORY_INFO		sDevMemoryInfo;
+
+	/* device's shared-virtual-memory heap max virtual address */
+	IMG_UINT64				ui64GeneralSVMHeapTopVA;
+
+	ATOMIC_T				iNumClockSpeedChanges;
+
+	/* private device information */
+	void					*pvDevice;
+
+
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	RA_ARENA                *psOSidSubArena[GPUVIRT_VALIDATION_NUM_OS];
+#endif
+
+
+#define PVRSRV_MAX_RA_NAME_LENGTH (50)
+	RA_ARENA				**apsLocalDevMemArenas;
+	IMG_CHAR				**apszRANames;
+	IMG_UINT32				ui32NumOfLocalMemArenas;
+
+	IMG_CHAR				szKernelFwRawRAName[RGXFW_NUM_OS][PVRSRV_MAX_RA_NAME_LENGTH];
+	IMG_CHAR				szKernelFwMainRAName[RGXFW_NUM_OS][PVRSRV_MAX_RA_NAME_LENGTH];
+	IMG_CHAR				szKernelFwConfigRAName[RGXFW_NUM_OS][PVRSRV_MAX_RA_NAME_LENGTH];
+	RA_ARENA				*psKernelFwRawMemArena[RGXFW_NUM_OS];
+	RA_ARENA				*psKernelFwMainMemArena[RGXFW_NUM_OS];
+	RA_ARENA				*psKernelFwConfigMemArena[RGXFW_NUM_OS];
+	RA_BASE_T				ui64RABase[RGXFW_NUM_OS];
+	IMG_UINT32				uiKernelFwRAIdx;
+
+	IMG_UINT32				ui32RegisteredPhysHeaps;
+	PHYS_HEAP				**papsRegisteredPhysHeaps;
+
+	/*
+	 * Pointers to the device's physical memory heap(s)
+	 * The first entry (apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]) will be used for allocations
+	 *  where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL flag is not set. Normally this will be an LMA heap
+	 *  (but the device configuration could specify a UMA heap here, if desired)
+	 * The second entry (apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]) will be used for allocations
+	 *  where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL flag is set. Normally this will be a UMA heap
+	 *  (but the configuration could specify an LMA heap here, if desired)
+	 * The third entry (apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]) will be used for allocations
+	 *  where the PVRSRV_MEMALLOCFLAG_FW_LOCAL flag is set; this is used when virtualization is enabled
+	 * The device configuration will always specify two physical heap IDs - in the event of the device
+	 *  only using one physical heap, both of these IDs will be the same, and hence both pointers below
+	 *  will also be the same; when virtualization is enabled the device configuration specifies
+	 *  three physical heap IDs, the last being for PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL allocations
+	 */
+	PHYS_HEAP				*apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_LAST];
+
+#if defined(SUPPORT_DEDICATED_FW_MEMORY)
+	PHYS_HEAP				*psDedicatedFWMemHeap;
+	RA_ARENA				*psDedicatedFWMemArena;
+#endif
+
+	struct _PVRSRV_DEVICE_NODE_	*psNext;
+	struct _PVRSRV_DEVICE_NODE_	**ppsThis;
+
+	/* Functions for notification about memory contexts */
+	PVRSRV_ERROR			(*pfnRegisterMemoryContext)(struct _PVRSRV_DEVICE_NODE_	*psDeviceNode,
+														MMU_CONTEXT					*psMMUContext,
+														IMG_HANDLE					*hPrivData);
+	void					(*pfnUnregisterMemoryContext)(IMG_HANDLE hPrivData);
+
+	/* Functions for allocation/freeing of UFOs */
+	AllocUFOBlockCallback	pfnAllocUFOBlock;	/*!< Callback for allocation of a block of UFO memory */
+	FreeUFOBlockCallback	pfnFreeUFOBlock;	/*!< Callback for freeing of a block of UFO memory */
+
+	IMG_HANDLE				hSyncServerNotify;
+	POS_LOCK				hSyncServerListLock;
+	DLLIST_NODE				sSyncServerSyncsList;
+
+	IMG_HANDLE				hSyncServerRecordNotify;
+	POS_LOCK				hSyncServerRecordLock;
+	IMG_UINT32				ui32SyncServerRecordCount;
+	IMG_UINT32				ui32SyncServerRecordCountHighWatermark;
+	DLLIST_NODE				sSyncServerRecordList;
+	struct SYNC_RECORD		*apsSyncServerRecordsFreed[PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN];
+	IMG_UINT32				uiSyncServerRecordFreeIdx;
+
+	IMG_HANDLE				hSyncCheckpointRecordNotify;
+	POS_LOCK				hSyncCheckpointRecordLock;
+	IMG_UINT32				ui32SyncCheckpointRecordCount;
+	IMG_UINT32				ui32SyncCheckpointRecordCountHighWatermark;
+	DLLIST_NODE				sSyncCheckpointRecordList;
+	struct SYNC_CHECKPOINT_RECORD	*apsSyncCheckpointRecordsFreed[PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN];
+	IMG_UINT32				uiSyncCheckpointRecordFreeIdx;
+
+	IMG_HANDLE				hSyncCheckpointNotify;
+	POS_LOCK				hSyncCheckpointListLock;
+	DLLIST_NODE				sSyncCheckpointSyncsList;
+
+	PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext;
+	PSYNC_PRIM_CONTEXT		hSyncPrimContext;
+
+	PVRSRV_CLIENT_SYNC_PRIM	*psSyncPrim;
+	/* With this sync-prim we make sure the MMU cache is flushed
+	 * before we free the page table memory */
+	PVRSRV_CLIENT_SYNC_PRIM	*psMMUCacheSyncPrim;
+	IMG_UINT16				ui16NextMMUInvalidateUpdate;
+
+	IMG_HANDLE				hCmdCompNotify;
+	IMG_HANDLE				hDbgReqNotify;
+	IMG_HANDLE				hHtbDbgReqNotify;
+	IMG_HANDLE				hAppHintDbgReqNotify;
+	IMG_HANDLE				hThreadsDbgReqNotify;
+
+	PVRSRV_DEF_PAGE			sDummyPage;
+	PVRSRV_DEF_PAGE		    sDevZeroPage;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	POSWR_LOCK				hMemoryContextPageFaultNotifyListLock;
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+	DLLIST_NODE				sMemoryContextPageFaultNotifyListHead;
+
+#if defined(PDUMP)
+	/* 	device-level callback which is called when pdump.exe starts.
+	 *	Should be implemented in device-specific init code, e.g. rgxinit.c
+	 */
+	PVRSRV_ERROR			(*pfnPDumpInitDevice)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+	/* device-level callback to return pdump ID associated to a memory context */
+	IMG_UINT32				(*pfnMMUGetContextID)(IMG_HANDLE hDevMemContext);
+#endif
+
+#if defined(SUPPORT_VALIDATION) && !defined(PVRSRV_USE_BRIDGE_LOCK)
+	POS_LOCK				hValidationLock;
+#endif
+} PVRSRV_DEVICE_NODE;
+
+/*
+ * Macros to be used instead of calling directly the pfns since these macros
+ * will expand the feature passed as argument into the bitmask/index to work
+ * with the macros defined in rgx_bvnc_defs_km.h
+ */
+#define PVRSRV_IS_FEATURE_SUPPORTED(psDevNode, Feature) \
+		psDevNode->pfnCheckDeviceFeature(psDevNode, RGX_FEATURE_##Feature##_BIT_MASK)
+#define PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, Feature) \
+		psDevNode->pfnGetDeviceFeatureValue(psDevNode, RGX_FEATURE_##Feature##_IDX)
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode,
+											   IMG_BOOL bInitSuccessful);
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR IMG_CALLCONV RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32ClientBuildOptions);
+
+
+#endif /* __DEVICE_H__ */
+
+/******************************************************************************
+ End of file (device.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/device_connection.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/device_connection.h
new file mode 100644
index 0000000..72793c3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/device_connection.h
@@ -0,0 +1,115 @@
+/*************************************************************************/ /*!
+@File           device_connection.h
+@Title
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__DEVICE_CONNECTION_H__)
+#define __DEVICE_CONNECTION_H__
+
+#include "img_types.h"
+#include "img_defs.h"
+
+#if defined(__KERNEL__)
+typedef struct _PVRSRV_DEVICE_NODE_ *SHARED_DEV_CONNECTION;
+#else
+#include "connection.h"
+typedef const struct _PVRSRV_DEV_CONNECTION_ *SHARED_DEV_CONNECTION;
+#endif
+
+/******************************************************************************
+ * Device capability flags and masks
+ *
+ * Following bitmask shows allocated ranges and values for our device
+ * capability settings:
+ *
+ * 31 27  23  19  15  11   7   3  0
+ * |...|...|...|...|...|...|...|...
+ *                               ** CACHE_COHERENT                   [0x1..0x2]
+ *                                x  PVRSRV_CACHE_COHERENT_DEVICE_FLAG
+ *                               x.  PVRSRV_CACHE_COHERENT_CPU_FLAG
+ *                             *... NONMAPPABLE_MEMORY                    [0x8]
+ *                             x...  PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG
+ *                            *.... PDUMP_IS_RECORDING                   [0x10]
+ *                            x....  PVRSRV_PDUMP_IS_RECORDING
+ *                      ***........ DEVMEM_SVM_ALLOC             [0x100..0x400]
+ *                        x........  PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED
+ *                       x.........  PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED
+ *                      x..........  PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL
+ *                     *........... FBCDC_V3_1             [0x800]
+ *                     x...........  FBCDC_V3_1_USED
+ * |...|...|...|...|...|...|...|...
+ *****************************************************************************/
+
+/* Flag to be passed over the bridge during connection stating whether CPU cache coherent is available*/
+#define PVRSRV_CACHE_COHERENT_SHIFT (0)
+#define	PVRSRV_CACHE_COHERENT_DEVICE_FLAG (1U << PVRSRV_CACHE_COHERENT_SHIFT)
+#define	PVRSRV_CACHE_COHERENT_CPU_FLAG (2U << PVRSRV_CACHE_COHERENT_SHIFT)
+#define	PVRSRV_CACHE_COHERENT_EMULATE_FLAG (4U << PVRSRV_CACHE_COHERENT_SHIFT)
+#define PVRSRV_CACHE_COHERENT_MASK (7U << PVRSRV_CACHE_COHERENT_SHIFT)
+
+/* Flag to be passed over the bridge during connection stating whether CPU non-mappable memory is present */
+#define PVRSRV_NONMAPPABLE_MEMORY_PRESENT_SHIFT (7)
+#define PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG (1U << PVRSRV_NONMAPPABLE_MEMORY_PRESENT_SHIFT)
+
+/* Flag to be passed over the bridge during connection stating SVM allocation availability */
+#define PVRSRV_DEVMEM_SVM_ALLOC_SHIFT (8)
+#define PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED (1U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT)
+#define PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED (2U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT)
+#define PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL (4U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT)
+
+#define PVRSRV_PDUMP_IS_RECORDING_SHIFT (4)
+#define PVRSRV_PDUMP_IS_RECORDING (1U << PVRSRV_PDUMP_IS_RECORDING_SHIFT)
+
+/* Flag to be passed over the bridge during connection stating whether GPU uses FBCDC v3.1 */
+#define PVRSRV_FBCDC_V3_1_USED_SHIFT (11)
+#define PVRSRV_FBCDC_V3_1_USED (1U << PVRSRV_FBCDC_V3_1_USED_SHIFT)
+
+static INLINE IMG_HANDLE GetBridgeHandle(SHARED_DEV_CONNECTION hDevConnection)
+{
+#if defined(__KERNEL__)
+    return hDevConnection;
+#else
+    return hDevConnection->hServices;
+#endif
+}
+
+
+#endif /* !defined(__DEVICE_CONNECTION_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem.c
new file mode 100644
index 0000000..c603da7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem.c
@@ -0,0 +1,3093 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Front End (nominally Client side part, but now invokable
+                from server too) of device memory management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "devicemem.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "allocmem.h"
+#include "ra.h"
+#include "osfunc.h"
+#include "osmmap.h"
+#include "devicemem_utils.h"
+#include "client_mm_bridge.h"
+#include "client_cache_bridge.h"
+#include "services_km.h"
+
+#if defined(PDUMP)
+#if defined(__KERNEL__)
+#include "pdump_km.h"
+#else
+#include "pdump_um.h"
+#endif
+#include "devicemem_pdump.h"
+#endif
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+#include "client_ri_bridge.h"
+#endif
+#include "client_devicememhistory_bridge.h"
+#include "info_page_client.h"
+
+#include "rgx_heaps.h"
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#include "rgxdefs_km.h"
+#include "rgx_bvnc_defs_km.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "pvr_ricommon.h"
+#if defined(LINUX)
+#include "linux/kernel.h"
+#endif
+#else
+#include "rgxdefs.h"
+#endif
+
+#if defined(__KERNEL__) && defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+extern PVRSRV_ERROR RIDumpAllKM(void);
+#endif
+
+#if defined(__KERNEL__)
+#define GET_ERROR_STRING(eError) PVRSRVGetErrorString(eError)
+#else
+#define GET_ERROR_STRING(eError) PVRSRVGetErrorString(eError)
+#endif
+
+#if defined(__KERNEL__)
+/* Derive the virtual from the hPMR */
+static
+IMG_UINT64 _GuestFWHeapVA(PMR *psPMR, PVRSRV_DEVICE_NODE *psDevNode)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS;
+
+	IMG_DEV_PHYADDR sDevAddr;
+	IMG_BOOL bValid;
+
+	PHYS_HEAP *psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+	IMG_DEV_PHYADDR sHeapAddr;
+
+	eError = PhysHeapRegionGetDevPAddr(psPhysHeap, 0, &sHeapAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOG_IF_ERROR(eError, "_GuestFWHeapVA: PMRLockSysPhysAddr");
+		goto fail;
+	}
+
+#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES)
+{
+	IMG_DEV_PHYADDR sDevPAddrCorrected;
+
+	PhysHeapCpuPAddrToDevPAddr(psPhysHeap,1,&sDevPAddrCorrected,(IMG_CPU_PHYADDR *)&sHeapAddr);
+	sHeapAddr.uiAddr = sDevPAddrCorrected.uiAddr;
+}
+#endif
+
+	eError = PMRLockSysPhysAddresses(psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOG_IF_ERROR(eError, "_GuestFWHeapVA: PMRLockSysPhysAddr");
+		goto fail;
+	}
+
+	eError = PMR_DevPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sDevAddr, &bValid);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOG_IF_ERROR(eError, "_GuestFWHeapVA: PMR_DevPhysAddr");
+		eError = PMRUnlockSysPhysAddresses(psPMR);
+		PVR_LOG_IF_ERROR(eError, "_GuestFWHeapVA: PMRUnlockSysPhysAddr");
+		goto fail;
+	}
+
+	eError = PMRUnlockSysPhysAddresses(psPMR);
+	PVR_LOG_IF_ERROR(eError, "_GuestFWHeapVA: PMRUnlockSysPhysAddr");
+
+	ui64OptionalMapAddress = RGX_FIRMWARE_RAW_HEAP_BASE | (sDevAddr.uiAddr - sHeapAddr.uiAddr);
+
+	PVR_DPF((PVR_DBG_ALLOC, "%s: RGX_FIRMWARE_RAW_HEAP_BASE = 0x%"IMG_UINT64_FMTSPECx" sDevAddr.uiAddr = 0x%"IMG_UINT64_FMTSPECx" sHeapAddr.uiAddr = 0x%"IMG_UINT64_FMTSPECx" => ui64OptionalMapAddress = 0x%"IMG_UINT64_FMTSPECx,
+	         __func__, (IMG_UINT64) RGX_FIRMWARE_RAW_HEAP_BASE, sDevAddr.uiAddr, sHeapAddr.uiAddr, ui64OptionalMapAddress));
+fail:
+	return ui64OptionalMapAddress;
+}
+#endif
+
+/*****************************************************************************
+ *                    Sub allocation internals                               *
+ *****************************************************************************/
+
+static INLINE void
+_CheckAnnotationLength(const IMG_CHAR *pszAnnotation)
+{
+	IMG_UINT32 length = OSStringLength(pszAnnotation);
+
+	if (length >= DEVMEM_ANNOTATION_MAX_LEN)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: Annotation \"%s\" has been truncated to %d characters from %d characters",
+				__func__, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN - 1, length));
+	}
+}
+
+static PVRSRV_ERROR
+_AllocateDeviceMemory(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_UINT32 uiLog2Quantum,
+		IMG_DEVMEM_SIZE_T uiSize,
+		IMG_DEVMEM_SIZE_T uiChunkSize,
+		IMG_UINT32 ui32NumPhysChunks,
+		IMG_UINT32 ui32NumVirtChunks,
+		IMG_UINT32 *pui32MappingTable,
+		IMG_DEVMEM_ALIGN_T uiAlign,
+		DEVMEM_FLAGS_T uiFlags,
+		IMG_BOOL bExportable,
+		const IMG_CHAR *pszAnnotation,
+		DEVMEM_IMPORT **ppsImport)
+{
+	DEVMEM_IMPORT *psImport;
+	DEVMEM_FLAGS_T uiPMRFlags;
+	IMG_HANDLE hPMR;
+	PVRSRV_ERROR eError;
+
+	eError = _DevmemImportStructAlloc(hDevConnection,
+			&psImport);
+	if (eError != PVRSRV_OK)
+	{
+		goto failAlloc;
+	}
+
+	/* check if shift value is not too big (sizeof(1ULL)) */
+	PVR_ASSERT(uiLog2Quantum < sizeof(unsigned long long) * 8);
+	/* Check the size is a multiple of the quantum */
+	PVR_ASSERT((uiSize & ((1ULL<<uiLog2Quantum)-1)) == 0);
+
+	_CheckAnnotationLength(pszAnnotation);
+
+	/* Pass only the PMR flags down */
+	uiPMRFlags = uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK;
+	eError = BridgePhysmemNewRamBackedPMR(GetBridgeHandle(hDevConnection),
+			uiSize,
+			uiChunkSize,
+			ui32NumPhysChunks,
+			ui32NumVirtChunks,
+			pui32MappingTable,
+			uiLog2Quantum,
+			uiPMRFlags,
+			OSStringNLength(pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN - 1) + 1,
+			pszAnnotation,
+			OSGetCurrentProcessID(),
+			&hPMR);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate memory for %s (%s)",
+				__func__,
+				pszAnnotation,
+				PVRSRVGETERRORSTRING(eError)));
+		goto failPMR;
+	}
+
+	_DevmemImportStructInit(psImport,
+			uiSize,
+			uiAlign,
+			uiFlags,
+			hPMR,
+			bExportable ? DEVMEM_PROPERTIES_EXPORTABLE : 0);
+
+	*ppsImport = psImport;
+	return PVRSRV_OK;
+
+	failPMR:
+	_DevmemImportDiscard(psImport);
+	failAlloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+
+/*****************************************************************************
+ *                    Sub allocation internals                               *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DeviceMemChangeSparse(DEVMEM_MEMDESC *psMemDesc,
+		IMG_UINT32 ui32AllocPageCount,
+		IMG_UINT32 *paui32AllocPageIndices,
+		IMG_UINT32 ui32FreePageCount,
+		IMG_UINT32 *pauiFreePageIndices,
+		SPARSE_MEM_RESIZE_FLAGS uiSparseFlags)
+{
+	PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+	DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+	SHARED_DEV_CONNECTION hDevConnection;
+	IMG_HANDLE hPMR;
+	IMG_HANDLE hSrvDevMemHeap;
+	POS_LOCK hLock;
+	IMG_DEV_VIRTADDR sDevVAddr;
+	IMG_CPU_VIRTADDR pvCpuVAddr;
+
+	if (NULL == psImport)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Sparse memory import", __func__));
+		goto e0;
+	}
+
+	hDevConnection = psImport->hDevConnection;
+	hPMR = psImport->hPMR;
+	hLock = psImport->hLock;
+	sDevVAddr = psImport->sDeviceImport.sDevVAddr;
+	pvCpuVAddr = psImport->sCPUImport.pvCPUVAddr;
+
+	if (NULL == hDevConnection)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Bridge handle", __func__));
+		goto e0;
+	}
+
+	if (NULL == hPMR)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid PMR handle", __func__));
+		goto e0;
+	}
+
+	if ((uiSparseFlags & SPARSE_RESIZE_BOTH) && (0 == sDevVAddr.uiAddr))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Device Virtual Map", __func__));
+		goto e0;
+	}
+
+	if ((uiSparseFlags & SPARSE_MAP_CPU_ADDR) && (NULL == pvCpuVAddr))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid CPU Virtual Map", __func__));
+		goto e0;
+	}
+
+	if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_SECURE)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Secure buffers currently do not support sparse changes",
+				__func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: This memory descriptor doesn't support sparse changes",
+				__func__));
+		eError = PVRSRV_ERROR_INVALID_REQUEST;
+		goto e0;
+	}
+
+	hSrvDevMemHeap = psImport->sDeviceImport.psHeap->hDevMemServerHeap;
+
+	OSLockAcquire(hLock);
+
+	eError = BridgeChangeSparseMem(GetBridgeHandle(hDevConnection),
+			hSrvDevMemHeap,
+			hPMR,
+			ui32AllocPageCount,
+			paui32AllocPageIndices,
+			ui32FreePageCount,
+			pauiFreePageIndices,
+			uiSparseFlags,
+			psImport->uiFlags,
+			sDevVAddr,
+			(IMG_UINT64)((uintptr_t)pvCpuVAddr));
+
+	OSLockRelease(hLock);
+
+	if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+	{
+		BridgeDevicememHistorySparseChange(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+				psMemDesc->psImport->hPMR,
+				psMemDesc->uiOffset,
+				psMemDesc->sDeviceMemDesc.sDevVAddr,
+				psMemDesc->uiAllocSize,
+				psMemDesc->szText,
+				DevmemGetHeapLog2PageSize(psImport->sDeviceImport.psHeap),
+				ui32AllocPageCount,
+				paui32AllocPageIndices,
+				ui32FreePageCount,
+				pauiFreePageIndices,
+				psMemDesc->ui32AllocationIndex,
+				&psMemDesc->ui32AllocationIndex);
+	}
+
+#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+	if ((PVRSRV_OK == eError) && (psMemDesc->sCPUMemDesc.ui32RefCount))
+	{
+		/*
+		 * Release the CPU Virtual mapping here
+		 * the caller is supposed to map entire range again
+		 */
+		DevmemReleaseCpuVirtAddr(psMemDesc);
+	}
+#endif
+
+	e0:
+	return eError;
+}
+
+static void
+_FreeDeviceMemory(DEVMEM_IMPORT *psImport)
+{
+	_DevmemImportStructRelease(psImport);
+}
+
+static PVRSRV_ERROR
+_SubAllocImportAlloc(RA_PERARENA_HANDLE hArena,
+		RA_LENGTH_T uiSize,
+		RA_FLAGS_T _flags,
+		const IMG_CHAR *pszAnnotation,
+		/* returned data */
+		RA_BASE_T *puiBase,
+		RA_LENGTH_T *puiActualSize,
+		RA_PERISPAN_HANDLE *phImport)
+{
+	/* When suballocations need a new lump of memory, the RA calls
+	   back here.  Later, in the kernel, we must construct a new PMR
+	   and a pairing between the new lump of virtual memory and the
+	   PMR (whether or not such PMR is backed by physical memory) */
+	DEVMEM_HEAP *psHeap;
+	DEVMEM_IMPORT *psImport;
+	IMG_DEVMEM_ALIGN_T uiAlign;
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32MappingTable = 0;
+	DEVMEM_FLAGS_T uiFlags = (DEVMEM_FLAGS_T) _flags;
+	IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS;
+
+	/* Per-arena private handle is, for us, the heap */
+	psHeap = hArena;
+
+	/* align to the l.s.b. of the size...  e.g. 96kiB aligned to
+	   32kiB. NB: There is an argument to say that the RA should never
+	   ask us for Non-power-of-2 size anyway, but I don't want to make
+	   that restriction arbitrarily now */
+	uiAlign = uiSize & ~(uiSize-1);
+
+	/* Technically this is only required for guest drivers due to
+	   fw heaps being pre-allocated and pre-mapped resulting in
+	   a 1:1 (i.e. virtual : physical) offset correlation but we
+	   force this behaviour for all drivers to maintain consistency
+	   (i.e. heap->VA uiAlign <= heap->PA uiLog2Quantum) */
+	if (uiAlign > (IMG_DEVMEM_ALIGN_T)(1ULL << psHeap->uiLog2Quantum))
+	{
+		uiAlign = (IMG_DEVMEM_ALIGN_T)(1ULL << psHeap->uiLog2Quantum);
+	}
+
+	/* The RA should not have invoked us with a size that is not a
+	   multiple of the quantum anyway */
+	PVR_ASSERT((uiSize & ((1ULL<<psHeap->uiLog2Quantum)-1)) == 0);
+
+	eError = _AllocateDeviceMemory(psHeap->psCtx->hDevConnection,
+			psHeap->uiLog2Quantum,
+			uiSize,
+			uiSize,
+			1,
+			1,
+			&ui32MappingTable,
+			uiAlign,
+			uiFlags,
+			IMG_FALSE,
+			"PMR sub-allocated",
+			&psImport);
+	if (eError != PVRSRV_OK)
+	{
+		goto failAlloc;
+	}
+
+#if defined(PDUMP) && defined(DEBUG)
+#if defined(__KERNEL__)
+	PDUMPCOMMENTWITHFLAGS(PDUMP_CONT,
+			"Created PMR for sub-allocations with handle ID: 0x%p Annotation: \"%s\" (PID %u)",
+			psImport->hPMR, pszAnnotation, OSGetCurrentProcessID());
+#else
+	PDUMPCOMMENTF(psHeap->psCtx->hDevConnection, PDUMP_FLAGS_CONTINUOUS,
+			"Created PMR for sub-allocations with handle ID: %p Annotation: \"%s\" (PID %u)",
+			psImport->hPMR, pszAnnotation, OSGetCurrentProcessID());
+#endif
+#else
+	PVR_UNREFERENCED_PARAMETER(pszAnnotation);
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI))
+	{
+#if defined(__KERNEL__)
+		PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psHeap->psCtx->hDevConnection;
+		PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDevNode->pvDevice;
+
+		PVR_ASSERT(PVRSRV_CHECK_FW_LOCAL(uiFlags));
+
+		/* If allocation is made by the Kernel from the firmware heap, account for it
+		 * under the PVR_SYS_ALLOC_PID.
+		 */
+		if ((psHeap == psDevInfo->psFirmwareMainHeap) || (psHeap == psDevInfo->psFirmwareConfigHeap))
+		{
+			eError = BridgeRIWritePMREntryWithOwner (GetBridgeHandle(psImport->hDevConnection),
+					psImport->hPMR,
+					PVR_SYS_ALLOC_PID);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntryWithOwner failed (Error=%d)", __func__, eError));
+			}
+		}
+		else
+#endif
+		{
+			eError = BridgeRIWritePMREntry (GetBridgeHandle(psImport->hDevConnection),
+					psImport->hPMR);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntry failed (Error=%d)", __func__, eError));
+			}
+		}
+	}
+#endif
+
+#if defined(__KERNEL__)
+	{
+		PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psHeap->psCtx->hDevConnection;
+		PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDevNode->pvDevice;
+
+		if (((psHeap == psDevInfo->psFirmwareMainHeap) ||
+		    (psHeap == psDevInfo->psFirmwareConfigHeap)) &&
+		    PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+		{
+			ui64OptionalMapAddress = _GuestFWHeapVA(psImport->hPMR, psDevNode);
+		}
+	}
+#endif
+
+	/*
+		Suballocations always get mapped into the device was we need to
+		key the RA off something and as we can't export suballocations
+		there is no valid reason to request an allocation an not map it
+	 */
+	eError = _DevmemImportStructDevMap(psHeap,
+			IMG_TRUE,
+			psImport,
+			ui64OptionalMapAddress);
+	if (eError != PVRSRV_OK)
+	{
+		goto failMap;
+	}
+
+	/* Mark this import struct as zeroed so we can save some PDump LDBs
+	 * and do not have to CPU map + memset()*/
+	if (uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+	{
+		psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_ZEROED;
+	}
+	else if (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC)
+	{
+		psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_POISONED;
+	}
+	psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_CLEAN;
+
+	*puiBase = psImport->sDeviceImport.sDevVAddr.uiAddr;
+	*puiActualSize = uiSize;
+	*phImport = psImport;
+
+	return PVRSRV_OK;
+
+	/* error exit paths follow */
+
+	failMap:
+	_FreeDeviceMemory(psImport);
+	failAlloc:
+
+	return eError;
+}
+
+static void
+_SubAllocImportFree(RA_PERARENA_HANDLE hArena,
+		RA_BASE_T uiBase,
+		RA_PERISPAN_HANDLE hImport)
+{
+	DEVMEM_IMPORT *psImport = hImport;
+
+	PVR_ASSERT(psImport != NULL);
+	PVR_ASSERT(hArena == psImport->sDeviceImport.psHeap);
+	PVR_ASSERT(uiBase == psImport->sDeviceImport.sDevVAddr.uiAddr);
+
+	_DevmemImportStructDevUnmap(psImport);
+	_DevmemImportStructRelease(psImport);
+}
+
+/*****************************************************************************
+ *                    Devmem context internals                               *
+ *****************************************************************************/
+
+static PVRSRV_ERROR
+_PopulateContextFromBlueprint(struct _DEVMEM_CONTEXT_ *psCtx,
+		DEVMEM_HEAPCFGID uiHeapBlueprintID)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_ERROR eError2;
+	struct _DEVMEM_HEAP_ **ppsHeapArray;
+	IMG_UINT32 uiNumHeaps;
+	IMG_UINT32 uiHeapsToUnwindOnError;
+	IMG_UINT32 uiHeapIndex;
+	IMG_DEV_VIRTADDR sDevVAddrBase;
+	IMG_CHAR aszHeapName[DEVMEM_HEAPNAME_MAXLENGTH];
+	IMG_DEVMEM_SIZE_T uiHeapLength;
+	IMG_DEVMEM_LOG2ALIGN_T uiLog2DataPageSize;
+	IMG_DEVMEM_LOG2ALIGN_T uiLog2ImportAlignment;
+	IMG_DEVMEM_LOG2ALIGN_T uiLog2TilingStrideFactor;
+
+	eError = DevmemHeapCount(psCtx->hDevConnection,
+			uiHeapBlueprintID,
+			&uiNumHeaps);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	if (uiNumHeaps == 0)
+	{
+		ppsHeapArray = NULL;
+	}
+	else
+	{
+		ppsHeapArray = OSAllocMem(sizeof(*ppsHeapArray) * uiNumHeaps);
+		if (ppsHeapArray == NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+	}
+
+	uiHeapsToUnwindOnError = 0;
+
+	for (uiHeapIndex = 0; uiHeapIndex < uiNumHeaps; uiHeapIndex++)
+	{
+		eError = DevmemHeapDetails(psCtx->hDevConnection,
+				uiHeapBlueprintID,
+				uiHeapIndex,
+				&aszHeapName[0],
+				sizeof(aszHeapName),
+				&sDevVAddrBase,
+				&uiHeapLength,
+				&uiLog2DataPageSize,
+				&uiLog2ImportAlignment,
+				&uiLog2TilingStrideFactor);
+		if (eError != PVRSRV_OK)
+		{
+			goto e1;
+		}
+
+		eError = DevmemCreateHeap(psCtx,
+				sDevVAddrBase,
+				uiHeapLength,
+				uiLog2DataPageSize,
+				uiLog2ImportAlignment,
+				uiLog2TilingStrideFactor,
+				aszHeapName,
+				uiHeapBlueprintID,
+				&ppsHeapArray[uiHeapIndex]);
+		if (eError != PVRSRV_OK)
+		{
+			goto e1;
+		}
+
+		uiHeapsToUnwindOnError = uiHeapIndex + 1;
+	}
+
+	psCtx->uiAutoHeapCount = uiNumHeaps;
+	psCtx->ppsAutoHeapArray = ppsHeapArray;
+
+	PVR_ASSERT(psCtx->uiNumHeaps >= psCtx->uiAutoHeapCount);
+	PVR_ASSERT(psCtx->uiAutoHeapCount == uiNumHeaps);
+
+	return PVRSRV_OK;
+
+	/* error exit paths */
+	e1:
+	for (uiHeapIndex = 0; uiHeapIndex < uiHeapsToUnwindOnError; uiHeapIndex++)
+	{
+		eError2 = DevmemDestroyHeap(ppsHeapArray[uiHeapIndex]);
+		PVR_ASSERT(eError2 == PVRSRV_OK);
+	}
+
+	if (uiNumHeaps != 0)
+	{
+		OSFreeMem(ppsHeapArray);
+	}
+
+	e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static PVRSRV_ERROR
+_UnpopulateContextFromBlueprint(struct _DEVMEM_CONTEXT_ *psCtx)
+{
+	PVRSRV_ERROR eReturn = PVRSRV_OK;
+	PVRSRV_ERROR eError2;
+	IMG_UINT32 uiHeapIndex;
+	IMG_BOOL bDoCheck = IMG_TRUE;
+#if defined(__KERNEL__)
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		bDoCheck = IMG_FALSE;
+	}
+#endif
+
+	for (uiHeapIndex = 0; uiHeapIndex < psCtx->uiAutoHeapCount; uiHeapIndex++)
+	{
+		if (!psCtx->ppsAutoHeapArray[uiHeapIndex])
+		{
+			continue;
+		}
+
+		eError2 = DevmemDestroyHeap(psCtx->ppsAutoHeapArray[uiHeapIndex]);
+		if (eError2 != PVRSRV_OK)
+		{
+			eReturn = eError2;
+		}
+		else
+		{
+			psCtx->ppsAutoHeapArray[uiHeapIndex] = NULL;
+		}
+	}
+
+	if ((!bDoCheck || (eReturn == PVRSRV_OK)) && psCtx->ppsAutoHeapArray)
+	{
+		OSFreeMem(psCtx->ppsAutoHeapArray);
+		psCtx->ppsAutoHeapArray = NULL;
+		psCtx->uiAutoHeapCount = 0;
+	}
+
+	return eReturn;
+}
+
+static PVRSRV_ERROR
+_AllocateMCUFenceAddress(struct _DEVMEM_CONTEXT_ *psCtx)
+{
+	PVRSRV_ERROR		eError;
+	DEVMEM_HEAP			*psGeneralHeap;
+	IMG_DEV_VIRTADDR	sTempMCUFenceAddr;
+
+	eError = DevmemFindHeapByName(psCtx, RGX_GENERAL_HEAP_IDENT, &psGeneralHeap);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: General Heap not found (%s)", __func__, GET_ERROR_STRING(eError)));
+		goto e0;
+	}
+
+	/* MCUFence: Fixed address reserved per Memory Context */
+	eError = DevmemAllocate(psGeneralHeap,
+			sizeof(IMG_UINT32),
+			RGX_CR_MCU_FENCE_ADDR_ALIGNSIZE,
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE,
+			"MCUFence",
+			&psCtx->psMCUFenceMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate MCU fence word (%s)", __func__, GET_ERROR_STRING(eError)));
+		goto e0;
+	}
+
+	/* This is the first memory allocation on General Heap so its virtual address
+	 * is always equal to heap base address. Storing this address separately is not required. */
+	eError = DevmemMapToDevice(psCtx->psMCUFenceMemDesc, psGeneralHeap, &sTempMCUFenceAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map MCU fence word (%s)", __func__, GET_ERROR_STRING(eError)));
+		goto e1;
+	}
+	else if (sTempMCUFenceAddr.uiAddr != psGeneralHeap->sBaseAddress.uiAddr)
+	{
+
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: MCU_FENCE address (%" IMG_UINT64_FMTSPECx ") "
+				 "not at the start of General Heap (%" IMG_UINT64_FMTSPECx ")",
+				__func__, sTempMCUFenceAddr.uiAddr,
+				 psGeneralHeap->sBaseAddress.uiAddr));
+		eError = PVRSRV_ERROR_DEVICEMEM_MAP_FAILED;
+		goto e1;
+	}
+
+	e0:
+	return eError;
+
+	e1:
+	DevmemFree(psCtx->psMCUFenceMemDesc);
+	psCtx->psMCUFenceMemDesc = NULL;
+	return eError;
+}
+
+/*****************************************************************************
+ *                    Devmem context functions                               *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemCreateContext(SHARED_DEV_CONNECTION hDevConnection,
+		DEVMEM_HEAPCFGID uiHeapBlueprintID,
+		IMG_BOOL bMCUFenceAllocation,
+		DEVMEM_CONTEXT **ppsCtxPtr)
+{
+	PVRSRV_ERROR		eError;
+	DEVMEM_CONTEXT		*psCtx;
+	/* handle to the server-side counterpart of the device memory
+	   context (specifically, for handling mapping to device MMU) */
+	IMG_HANDLE			hDevMemServerContext;
+	IMG_HANDLE			hPrivData;
+	IMG_BOOL			bHeapCfgMetaId = (uiHeapBlueprintID == DEVMEM_HEAPCFG_META);
+
+	if (ppsCtxPtr == NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	psCtx = OSAllocMem(sizeof *psCtx);
+	if (psCtx == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	psCtx->uiNumHeaps = 0;
+
+	psCtx->hDevConnection = hDevConnection;
+
+	/* Create (server-side) Device Memory context */
+	eError = BridgeDevmemIntCtxCreate(GetBridgeHandle(psCtx->hDevConnection),
+			bHeapCfgMetaId,
+			&hDevMemServerContext,
+			&hPrivData,
+			&psCtx->ui32CPUCacheLineSize);
+	if (eError != PVRSRV_OK) goto e1;
+
+	psCtx->hDevMemServerContext = hDevMemServerContext;
+	psCtx->hPrivData = hPrivData;
+
+	/* automagic heap creation */
+	psCtx->uiAutoHeapCount = 0;
+
+	eError = _PopulateContextFromBlueprint(psCtx, uiHeapBlueprintID);
+	if (eError != PVRSRV_OK) goto e2;
+
+	/* Allocate a word at the start of the General heap to be used as MCU_FENCE Address */
+	if (uiHeapBlueprintID == DEVMEM_HEAPCFG_FORCLIENTS  &&  bMCUFenceAllocation)
+	{
+		eError = _AllocateMCUFenceAddress(psCtx);
+		if (eError != PVRSRV_OK) goto e2;
+	}
+	else
+	{
+		psCtx->psMCUFenceMemDesc = NULL;
+	}
+
+	*ppsCtxPtr = psCtx;
+
+	PVR_ASSERT(psCtx->uiNumHeaps == psCtx->uiAutoHeapCount);
+	return PVRSRV_OK;
+
+	/* error exit paths follow */
+
+	e2:
+	PVR_ASSERT(psCtx->uiAutoHeapCount == 0);
+	PVR_ASSERT(psCtx->uiNumHeaps == 0);
+	BridgeDevmemIntCtxDestroy(GetBridgeHandle(psCtx->hDevConnection), hDevMemServerContext);
+
+	e1:
+	OSFreeMem(psCtx);
+
+	e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx,
+		IMG_HANDLE *hPrivData)
+{
+	PVRSRV_ERROR eError;
+
+	if ((psCtx == NULL) || (hPrivData == NULL))
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	*hPrivData = psCtx->hPrivData;
+	return PVRSRV_OK;
+
+	e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx)
+{
+	PVRSRV_ERROR eError;
+
+	if (psCtx == NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+	return PVRSRV_OK;
+
+	e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemFindHeapByName(const struct _DEVMEM_CONTEXT_ *psCtx,
+		const IMG_CHAR *pszHeapName,
+		struct _DEVMEM_HEAP_ **ppsHeapRet)
+{
+	IMG_UINT32 uiHeapIndex;
+
+	/* N.B.  This func is only useful for finding "automagic" heaps by name */
+	for (uiHeapIndex = 0;
+			uiHeapIndex < psCtx->uiAutoHeapCount;
+			uiHeapIndex++)
+	{
+		if (!OSStringCompare(psCtx->ppsAutoHeapArray[uiHeapIndex]->pszName, pszHeapName))
+		{
+			*ppsHeapRet = psCtx->ppsAutoHeapArray[uiHeapIndex];
+			return PVRSRV_OK;
+		}
+	}
+
+	return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemDestroyContext(DEVMEM_CONTEXT *psCtx)
+{
+	PVRSRV_ERROR eError;
+	IMG_BOOL bDoCheck = IMG_TRUE;
+
+#if defined(__KERNEL__)
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		bDoCheck = IMG_FALSE;
+	}
+#endif
+
+	if (psCtx == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (psCtx->psMCUFenceMemDesc != NULL)
+	{
+		DevmemReleaseDevVirtAddr(psCtx->psMCUFenceMemDesc);
+		DevmemFree(psCtx->psMCUFenceMemDesc);
+	}
+
+	eError = _UnpopulateContextFromBlueprint(psCtx);
+	if (bDoCheck && eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: _UnpopulateContextFromBlueprint failed (%d) leaving %d heaps",
+				__func__, eError, psCtx->uiNumHeaps));
+		goto e1;
+	}
+
+	eError = BridgeDevmemIntCtxDestroy(GetBridgeHandle(psCtx->hDevConnection),
+			psCtx->hDevMemServerContext);
+	if (bDoCheck && eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: BridgeDevmemIntCtxDestroy failed (%d)",
+				__func__, eError));
+		goto e1;
+	}
+
+	/* should be no more heaps left */
+	if (bDoCheck && psCtx->uiNumHeaps)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Additional heaps remain in DEVMEM_CONTEXT",
+				__func__));
+		eError = PVRSRV_ERROR_DEVICEMEM_ADDITIONAL_HEAPS_IN_CONTEXT;
+		goto e1;
+	}
+
+	OSDeviceMemSet(psCtx, 0, sizeof(*psCtx));
+	OSFreeMem(psCtx);
+
+	e1:
+	return eError;
+}
+
+/*****************************************************************************
+ *                 Devmem heap query functions                               *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapConfigCount(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_UINT32 *puiNumHeapConfigsOut)
+{
+	PVRSRV_ERROR eError;
+	eError = BridgeHeapCfgHeapConfigCount(GetBridgeHandle(hDevConnection),
+			puiNumHeapConfigsOut);
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapCount(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_UINT32 uiHeapConfigIndex,
+		IMG_UINT32 *puiNumHeapsOut)
+{
+	PVRSRV_ERROR eError;
+	eError = BridgeHeapCfgHeapCount(GetBridgeHandle(hDevConnection),
+			uiHeapConfigIndex,
+			puiNumHeapsOut);
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapConfigName(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_UINT32 uiHeapConfigIndex,
+		IMG_CHAR *pszConfigNameOut,
+		IMG_UINT32 uiConfigNameBufSz)
+{
+	PVRSRV_ERROR eError;
+	eError = BridgeHeapCfgHeapConfigName(GetBridgeHandle(hDevConnection),
+			uiHeapConfigIndex,
+			uiConfigNameBufSz,
+			pszConfigNameOut);
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapDetails(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_UINT32 uiHeapConfigIndex,
+		IMG_UINT32 uiHeapIndex,
+		IMG_CHAR *pszHeapNameOut,
+		IMG_UINT32 uiHeapNameBufSz,
+		IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+		IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+		IMG_UINT32 *puiLog2DataPageSizeOut,
+		IMG_UINT32 *puiLog2ImportAlignmentOut,
+		IMG_UINT32 *puiLog2TilingStrideFactor)
+{
+	PVRSRV_ERROR eError;
+
+	eError = BridgeHeapCfgHeapDetails(GetBridgeHandle(hDevConnection),
+			uiHeapConfigIndex,
+			uiHeapIndex,
+			uiHeapNameBufSz,
+			pszHeapNameOut,
+			psDevVAddrBaseOut,
+			puiHeapLengthOut,
+			puiLog2DataPageSizeOut,
+			puiLog2ImportAlignmentOut,
+			puiLog2TilingStrideFactor);
+
+	VG_MARK_INITIALIZED(pszHeapNameOut,uiHeapNameBufSz);
+
+	return eError;
+}
+
+/*****************************************************************************
+ *                    Devmem heap functions                                  *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetHeapInt(DEVMEM_HEAP *psHeap,
+		IMG_HANDLE *phDevmemHeap)
+{
+	if (psHeap == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	*phDevmemHeap = psHeap->hDevMemServerHeap;
+	return PVRSRV_OK;
+}
+
+/* See devicemem.h for important notes regarding the arguments
+   to this function */
+IMG_INTERNAL PVRSRV_ERROR
+DevmemCreateHeap(DEVMEM_CONTEXT *psCtx,
+		IMG_DEV_VIRTADDR sBaseAddress,
+		IMG_DEVMEM_SIZE_T uiLength,
+		IMG_UINT32 ui32Log2Quantum,
+		IMG_UINT32 ui32Log2ImportAlignment,
+		IMG_UINT32 ui32Log2TilingStrideFactor,
+		const IMG_CHAR *pszName,
+		DEVMEM_HEAPCFGID uiHeapBlueprintID,
+		DEVMEM_HEAP **ppsHeapPtr)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_ERROR eError2;
+	DEVMEM_HEAP *psHeap;
+	/* handle to the server-side counterpart of the device memory heap
+	  (specifically, for handling mapping to device MMU */
+	IMG_HANDLE hDevMemServerHeap;
+	IMG_BOOL bRANoSplit = IMG_FALSE;
+
+	IMG_CHAR aszBuf[100];
+	IMG_CHAR *pszStr;
+
+	if (ppsHeapPtr == NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	psHeap = OSAllocMem(sizeof *psHeap);
+	if (psHeap == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	/* Need to keep local copy of heap name, so caller may free theirs */
+	pszStr = OSAllocMem(OSStringLength(pszName)+1);
+	if (pszStr == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e1;
+	}
+	OSStringCopy(pszStr, pszName);
+	psHeap->pszName = pszStr;
+
+	psHeap->uiSize = uiLength;
+	psHeap->sBaseAddress = sBaseAddress;
+	OSAtomicWrite(&psHeap->hImportCount,0);
+
+	OSSNPrintf(aszBuf, sizeof(aszBuf),
+			"NDM heap '%s' (suballocs) ctx:%p",
+			pszName, psCtx);
+	pszStr = OSAllocMem(OSStringLength(aszBuf)+1);
+	if (pszStr == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e2;
+	}
+	OSStringCopy(pszStr, aszBuf);
+	psHeap->pszSubAllocRAName = pszStr;
+
+#if defined(PDUMP)
+	/* The META heap is shared globally so a single physical memory import
+	 * may be used to satisfy allocations of different processes.
+	 * This is problematic when PDumping because the physical memory
+	 * import used to satisfy a new allocation may actually have been
+	 * imported (and thus the PDump MALLOC generated) before the PDump
+	 * client was started, leading to the MALLOC being missing.
+	 *
+	 * This is solved by disabling splitting of imports for the META physmem
+	 * RA, meaning that every firmware allocation gets its own import, thus
+	 * ensuring the MALLOC is present for every allocation made within the
+	 * pdump capture range
+	 */
+	if (uiHeapBlueprintID == DEVMEM_HEAPCFG_META)
+	{
+		bRANoSplit = IMG_TRUE;
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(uiHeapBlueprintID);
+#endif
+
+
+	psHeap->psSubAllocRA = RA_Create(psHeap->pszSubAllocRAName,
+			/* Subsequent imports: */
+			ui32Log2Quantum,
+			RA_LOCKCLASS_2,
+			_SubAllocImportAlloc,
+			_SubAllocImportFree,
+			(RA_PERARENA_HANDLE) psHeap,
+			bRANoSplit);
+	if (psHeap->psSubAllocRA == NULL)
+	{
+		eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA;
+		goto e3;
+	}
+
+	psHeap->uiLog2ImportAlignment = ui32Log2ImportAlignment;
+	psHeap->uiLog2TilingStrideFactor = ui32Log2TilingStrideFactor;
+	psHeap->uiLog2Quantum = ui32Log2Quantum;
+
+	if (!OSStringCompare(pszName, RGX_GENERAL_SVM_HEAP_IDENT))
+	{
+		/* The SVM heap normally starts out as this type though
+		   it may transition to DEVMEM_HEAP_TYPE_USER_MANAGED
+		   on platforms with more processor virtual address
+		   bits than device virtual address bits */
+		psHeap->eHeapType = DEVMEM_HEAP_TYPE_KERNEL_MANAGED;
+	}
+	else
+	{
+		psHeap->eHeapType = DEVMEM_HEAP_TYPE_UNKNOWN;
+	}
+
+	OSSNPrintf(aszBuf, sizeof(aszBuf),
+			"NDM heap '%s' (QVM) ctx:%p",
+			pszName, psCtx);
+	pszStr = OSAllocMem(OSStringLength(aszBuf)+1);
+	if (pszStr == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e4;
+	}
+	OSStringCopy(pszStr, aszBuf);
+	psHeap->pszQuantizedVMRAName = pszStr;
+
+	psHeap->psQuantizedVMRA = RA_Create(psHeap->pszQuantizedVMRAName,
+			/* Subsequent import: */
+			0, RA_LOCKCLASS_1, NULL, NULL,
+			(RA_PERARENA_HANDLE) psHeap,
+			IMG_FALSE);
+	if (psHeap->psQuantizedVMRA == NULL)
+	{
+		eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA;
+		goto e5;
+	}
+
+	if (!RA_Add(psHeap->psQuantizedVMRA,
+			(RA_BASE_T)sBaseAddress.uiAddr,
+			(RA_LENGTH_T)uiLength,
+			(RA_FLAGS_T)0, /* This RA doesn't use or need flags */
+			NULL /* per ispan handle */))
+	{
+		RA_Delete(psHeap->psQuantizedVMRA);
+		eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA;
+		goto e5;
+	}
+
+	psHeap->psCtx = psCtx;
+
+
+	/* Create server-side counterpart of Device Memory heap */
+	eError = BridgeDevmemIntHeapCreate(GetBridgeHandle(psCtx->hDevConnection),
+			psCtx->hDevMemServerContext,
+			sBaseAddress,
+			uiLength,
+			ui32Log2Quantum,
+			&hDevMemServerHeap);
+	if (eError != PVRSRV_OK)
+	{
+		goto e6;
+	}
+	psHeap->hDevMemServerHeap = hDevMemServerHeap;
+
+	eError = OSLockCreate(&psHeap->hLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto e7;
+	}
+
+	psHeap->psCtx->uiNumHeaps ++;
+	*ppsHeapPtr = psHeap;
+
+#if defined PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING
+	psHeap->psMemDescList = NULL;
+#endif /* PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING */
+
+	return PVRSRV_OK;
+
+	/* error exit paths */
+	e7:
+	eError2 = BridgeDevmemIntHeapDestroy(GetBridgeHandle(psCtx->hDevConnection),
+			psHeap->hDevMemServerHeap);
+	PVR_ASSERT (eError2 == PVRSRV_OK);
+	e6:
+	if (psHeap->psQuantizedVMRA)
+		RA_Delete(psHeap->psQuantizedVMRA);
+	e5:
+	if (psHeap->pszQuantizedVMRAName)
+		OSFreeMem(psHeap->pszQuantizedVMRAName);
+	e4:
+	RA_Delete(psHeap->psSubAllocRA);
+	e3:
+	OSFreeMem(psHeap->pszSubAllocRAName);
+	e2:
+	OSFreeMem(psHeap->pszName);
+	e1:
+	OSFreeMem(psHeap);
+	e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetHeapBaseDevVAddr(struct _DEVMEM_HEAP_ *psHeap,
+		IMG_DEV_VIRTADDR *pDevVAddr)
+{
+	if (psHeap == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*pDevVAddr = psHeap->sBaseAddress;
+
+	return PVRSRV_OK;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum,
+		IMG_DEVMEM_SIZE_T *puiSize,
+		IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+	IMG_DEVMEM_SIZE_T uiSize = *puiSize;
+	IMG_DEVMEM_ALIGN_T uiAlign = *puiAlign;
+
+	/* Just in case someone changes definition of IMG_DEVMEM_ALIGN_T. */
+	static_assert(sizeof(unsigned long long) == sizeof(uiAlign),
+	              "invalid uiAlign size");
+	/* This value is used for shifting so it cannot be greater than number
+	 * of bits in unsigned long long (sizeof(1ULL)). Using greater value is
+	 * undefined behaviour. */
+	if (uiLog2Quantum >= sizeof(unsigned long long) * 8)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if ((1ULL << uiLog2Quantum) > uiAlign)
+	{
+		uiAlign = 1ULL << uiLog2Quantum;
+	}
+	uiSize = (uiSize + uiAlign - 1) & ~(uiAlign - 1);
+
+	*puiSize = uiSize;
+	*puiAlign = uiAlign;
+
+	return PVRSRV_OK;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemDestroyHeap(DEVMEM_HEAP *psHeap)
+{
+	PVRSRV_ERROR eError;
+	IMG_INT uiImportCount;
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	IMG_BOOL bDoCheck = IMG_TRUE;
+#if defined(__KERNEL__)
+	if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		bDoCheck = IMG_FALSE;
+	}
+#endif
+#endif
+
+	if (psHeap == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	uiImportCount = OSAtomicRead(&psHeap->hImportCount);
+	if (uiImportCount > 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%d(%s) leaks remain", uiImportCount, psHeap->pszName));
+#if defined(__KERNEL__)
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+		PVR_DPF((PVR_DBG_ERROR, "Details of remaining allocated device memory (for all processes):"));
+		RIDumpAllKM();
+#else
+		PVR_DPF((PVR_DBG_ERROR, "Compile with PVRSRV_ENABLE_GPU_MEMORY_INFO=1 to get a full "
+				"list of all driver allocations."));
+#endif
+#endif
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+		if (bDoCheck)
+#endif
+		{
+			return PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP;
+		}
+	}
+
+	eError = BridgeDevmemIntHeapDestroy(GetBridgeHandle(psHeap->psCtx->hDevConnection),
+			psHeap->hDevMemServerHeap);
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	if (bDoCheck)
+#endif
+	{
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: BridgeDevmemIntHeapDestroy failed (%d)",
+					__func__, eError));
+			return eError;
+		}
+	}
+
+	PVR_ASSERT(psHeap->psCtx->uiNumHeaps > 0);
+	psHeap->psCtx->uiNumHeaps--;
+
+	OSLockDestroy(psHeap->hLock);
+
+	if (psHeap->psQuantizedVMRA)
+	{
+		RA_Delete(psHeap->psQuantizedVMRA);
+	}
+	if (psHeap->pszQuantizedVMRAName)
+	{
+		OSFreeMem(psHeap->pszQuantizedVMRAName);
+	}
+
+	RA_Delete(psHeap->psSubAllocRA);
+	OSFreeMem(psHeap->pszSubAllocRAName);
+
+	OSFreeMem(psHeap->pszName);
+
+	OSDeviceMemSet(psHeap, 0, sizeof(*psHeap));
+	OSFreeMem(psHeap);
+
+	return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ *                Devmem allocation/free functions                           *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier,
+		DEVMEM_HEAP *psHeap,
+		IMG_DEVMEM_SIZE_T uiSize,
+		IMG_DEVMEM_ALIGN_T uiAlign,
+		DEVMEM_FLAGS_T uiFlags,
+		const IMG_CHAR *pszText,
+		DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	RA_BASE_T uiAllocatedAddr = 0;
+	RA_LENGTH_T uiAllocatedSize;
+	RA_PERISPAN_HANDLE hImport; /* the "import" from which this sub-allocation came */
+	PVRSRV_ERROR eError;
+	DEVMEM_MEMDESC *psMemDesc = NULL;
+	IMG_DEVMEM_OFFSET_T uiOffset = 0;
+	DEVMEM_IMPORT *psImport;
+	IMG_UINT32 ui32CPUCacheLineSize;
+	void *pvAddr = NULL;
+
+	IMG_BOOL bImportClean;
+	IMG_BOOL bCPUCleanFlag = PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags);
+	IMG_BOOL bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags);
+	IMG_BOOL bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags);
+	IMG_BOOL bCPUCached = (PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) ||
+			PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags));
+	IMG_BOOL bGPUCached = (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) ||
+			PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags));
+	PVRSRV_CACHE_OP eOp = PVRSRV_CACHE_OP_INVALIDATE;
+	IMG_UINT32	ui32CacheLineSize = 0;
+
+	if (uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+	{
+		/* Deferred Allocation not supported on SubAllocs*/
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto failParams;
+	}
+
+	if (psHeap == NULL || psHeap->psCtx == NULL ||ppsMemDescPtr == NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto failParams;
+	}
+
+#if defined(__KERNEL__)
+	{
+		/* The hDevConnection holds two different types of pointers depending on the
+		 * address space in which it is used.
+		 * In this instance the variable points to the device node in server */
+		PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psHeap->psCtx->hDevConnection;
+		ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, SLC_CACHE_LINE_SIZE_BITS));
+	}
+#else
+	ui32CacheLineSize = ROGUE_CACHE_LINE_SIZE;
+#endif
+
+	/* The following logic makes sure that any cached memory is aligned to both the CPU and GPU.
+	 * To be aligned on both you have to take the Lowest Common Multiple (LCM) of the cache line sizes of each.
+	 * As the possibilities are all powers of 2 then simply the largest number can be picked as the LCM.
+	 * Therefore this algorithm just picks the highest from the CPU, GPU and given alignments.
+	 */
+	ui32CPUCacheLineSize = psHeap->psCtx->ui32CPUCacheLineSize;
+	/* If the CPU cache line size is larger than the alignment given then it is the lowest common multiple
+	 * Also checking if the allocation is going to be cached on the CPU
+	 * Currently there is no check for the validity of the cache coherent option.
+	 * In this case, the alignment could be applied but the mode could still fall back to uncached.
+	 */
+	if (ui32CPUCacheLineSize > uiAlign && bCPUCached)
+	{
+		uiAlign = ui32CPUCacheLineSize;
+	}
+
+	/* If the GPU cache line size is larger than the alignment given then it is the lowest common multiple
+	 * Also checking if the allocation is going to be cached on the GPU via checking for any of the cached options.
+	 * Currently there is no check for the validity of the cache coherent option.
+	 * In this case, the alignment could be applied but the mode could still fall back to uncached.
+	 */
+	if (ui32CacheLineSize > uiAlign && bGPUCached)
+	{
+		uiAlign = ui32CacheLineSize;
+	}
+
+	eError = _DevmemValidateParams(uiSize,
+			uiAlign,
+			&uiFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto failParams;
+	}
+
+	eError =_DevmemMemDescAlloc(&psMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		goto failMemDescAlloc;
+	}
+
+	/* No request for exportable memory so use the RA */
+	eError = RA_Alloc(psHeap->psSubAllocRA,
+			uiSize,
+			uiPreAllocMultiplier,
+			uiFlags,
+			uiAlign,
+			pszText,
+			&uiAllocatedAddr,
+			&uiAllocatedSize,
+			&hImport);
+	if (PVRSRV_OK != eError)
+	{
+		goto failDeviceMemAlloc;
+	}
+
+	psImport = hImport;
+
+	/* This assignment is assuming the RA returns an hImport where suballocations
+	 * can be made from if uiSize is NOT a page multiple of the passed heap.
+	 *
+	 * So we check if uiSize is a page multiple and mark it as exportable
+	 * if it is not.
+	 * */
+	if (!(uiSize & ((1ULL << psHeap->uiLog2Quantum) - 1)) &&
+	    (uiPreAllocMultiplier == RA_NO_IMPORT_MULTIPLIER))
+	{
+		psImport->uiProperties |= DEVMEM_PROPERTIES_EXPORTABLE;
+	}
+	psImport->uiProperties |= DEVMEM_PROPERTIES_SUBALLOCATABLE;
+
+	uiOffset = uiAllocatedAddr - psImport->sDeviceImport.sDevVAddr.uiAddr;
+
+#if defined(PDUMP) && defined(DEBUG)
+#if defined(__KERNEL__)
+	PDUMPCOMMENTWITHFLAGS(PDUMP_CONT,
+			"Suballocated %u Byte for \"%s\" from PMR with handle ID: 0x%p (PID %u)",
+			(IMG_UINT32) uiSize, pszText, psImport->hPMR, OSGetCurrentProcessID());
+#else
+	PDUMPCOMMENTF(psHeap->psCtx->hDevConnection, PDUMP_FLAGS_CONTINUOUS,
+			"Suballocated %u Byte for \"%s\" from PMR with handle ID: %p (PID %u)",
+			(IMG_UINT32) uiSize,
+			pszText,
+			psImport->hPMR,
+			OSGetCurrentProcessID());
+#endif
+#endif
+
+	_DevmemMemDescInit(psMemDesc,
+			uiOffset,
+			psImport,
+			uiSize);
+
+	bImportClean = ((psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_CLEAN) != 0);
+
+	/* Zero the memory */
+	if (bZero)
+	{
+		/* Has the import been zeroed on allocation and were no suballocations returned to it so far? */
+		bImportClean = bImportClean && ((psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_ZEROED) != 0);
+
+		if (!bImportClean)
+		{
+			eOp = PVRSRV_CACHE_OP_FLUSH;
+
+			eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvAddr);
+			if (eError != PVRSRV_OK)
+			{
+				goto failMaintenance;
+			}
+
+			/* uiSize is a 64-bit quantity whereas the 3rd argument
+			 * to OSDeviceMemSet is a 32-bit quantity on 32-bit systems
+			 * hence a compiler warning of implicit cast and loss of data.
+			 * Added explicit cast and assert to remove warning.
+			 */
+			PVR_ASSERT(uiSize < IMG_UINT32_MAX);
+
+			OSDeviceMemSet(pvAddr, 0x0, (size_t) uiSize);
+#if defined(PDUMP)
+			DevmemPDumpLoadZeroMem(psMemDesc, 0, uiSize, PDUMP_FLAGS_CONTINUOUS);
+#endif
+		}
+	}
+	else if (bPoisonOnAlloc)
+	{
+		/* Has the import been poisoned on allocation and were no suballocations returned to it so far? */
+		bPoisonOnAlloc = (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_POISONED) != 0;
+
+		if (!bPoisonOnAlloc)
+		{
+			eOp = PVRSRV_CACHE_OP_FLUSH;
+
+			eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvAddr);
+			if (eError != PVRSRV_OK)
+			{
+				goto failMaintenance;
+			}
+
+			if (PVRSRV_CHECK_CPU_UNCACHED(uiFlags) ||
+					PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags))
+			{
+				OSDeviceMemSet(pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE,
+						uiSize);
+			}
+			else
+			{
+				OSCachedMemSet(pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE,
+						uiSize);
+			}
+
+			bPoisonOnAlloc = IMG_TRUE;
+		}
+	}
+
+	/* Flush or invalidate */
+	if (bCPUCached && !bImportClean && (bZero || bCPUCleanFlag || bPoisonOnAlloc))
+	{
+		/* BridgeCacheOpQueue _may_ be deferred so use BridgeCacheOpExec
+		   to ensure this cache maintenance is actioned immediately */
+		eError = BridgeCacheOpExec (GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+				psMemDesc->psImport->hPMR,
+				(IMG_UINT64)(uintptr_t)
+				pvAddr - psMemDesc->uiOffset,
+				psMemDesc->uiOffset,
+				psMemDesc->uiAllocSize,
+				eOp);
+		if (eError != PVRSRV_OK)
+		{
+			goto failMaintenance;
+		}
+	}
+
+	if (pvAddr)
+	{
+		DevmemReleaseCpuVirtAddr(psMemDesc);
+		pvAddr = NULL;
+	}
+
+	/* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+	 * the allocation gets mapped/unmapped
+	 */
+	_CheckAnnotationLength(pszText);
+#if defined(__KERNEL__)
+	OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN);
+#else
+	OSStringNCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN);
+	psMemDesc->szText[DEVMEM_ANNOTATION_MAX_LEN - 1] = '\0';
+#endif	/* if defined(__KERNEL__) */
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI))
+	{
+		/* Attach RI information */
+		eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+				psMemDesc->psImport->hPMR,
+				OSStringNLength(psMemDesc->szText, DEVMEM_ANNOTATION_MAX_LEN),
+				psMemDesc->szText,
+				psMemDesc->uiOffset,
+				uiAllocatedSize,
+				IMG_FALSE,
+				IMG_TRUE,
+				&(psMemDesc->hRIHandle));
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (Error=%d)", __func__, eError));
+		}
+	}
+#else /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+	PVR_UNREFERENCED_PARAMETER (pszText);
+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+
+	*ppsMemDescPtr = psMemDesc;
+
+	return PVRSRV_OK;
+
+	/* error exit paths follow */
+
+	failMaintenance:
+	if (pvAddr)
+	{
+		DevmemReleaseCpuVirtAddr(psMemDesc);
+		pvAddr = NULL;
+	}
+	_DevmemMemDescRelease(psMemDesc);
+	psMemDesc = NULL;	/* Make sure we don't do a discard after the release */
+	failDeviceMemAlloc:
+	if (psMemDesc)
+	{
+		_DevmemMemDescDiscard(psMemDesc);
+	}
+	failMemDescAlloc:
+	failParams:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	PVR_DPF((PVR_DBG_ERROR,
+			"%s: Failed! Error is %s. Allocation size: " IMG_DEVMEM_SIZE_FMTSPEC,
+			__func__,
+			PVRSRVGETERRORSTRING(eError),
+			uiSize));
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_DEVMEM_SIZE_T uiSize,
+		IMG_DEVMEM_ALIGN_T uiAlign,
+		IMG_UINT32 uiLog2HeapPageSize,
+		DEVMEM_FLAGS_T uiFlags,
+		const IMG_CHAR *pszText,
+		DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	PVRSRV_ERROR eError;
+	DEVMEM_MEMDESC *psMemDesc = NULL;
+	DEVMEM_IMPORT *psImport;
+	IMG_UINT32 ui32MappingTable = 0;
+
+	eError = DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize,
+			&uiSize,
+			&uiAlign);
+	if (eError != PVRSRV_OK)
+	{
+		goto failParams;
+	}
+
+	eError = _DevmemValidateParams(uiSize,
+			uiAlign,
+			&uiFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto failParams;
+	}
+
+	eError =_DevmemMemDescAlloc(&psMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		goto failMemDescAlloc;
+	}
+
+	eError = _AllocateDeviceMemory(hDevConnection,
+			uiLog2HeapPageSize,
+			uiSize,
+			uiSize,
+			1,
+			1,
+			&ui32MappingTable,
+			uiAlign,
+			uiFlags,
+			IMG_TRUE,
+			pszText,
+			&psImport);
+	if (eError != PVRSRV_OK)
+	{
+		goto failDeviceMemAlloc;
+	}
+
+	_DevmemMemDescInit(psMemDesc,
+			0,
+			psImport,
+			uiSize);
+
+	*ppsMemDescPtr = psMemDesc;
+
+	/* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+	 * the allocation gets mapped/unmapped
+	 */
+	_CheckAnnotationLength(pszText);
+#if defined(__KERNEL__)
+	OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN);
+#else
+	OSStringNCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN);
+	psMemDesc->szText[DEVMEM_ANNOTATION_MAX_LEN - 1] = '\0';
+#endif	/* if defined(__KERNEL__) */
+
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI))
+	{
+		eError = BridgeRIWritePMREntry (GetBridgeHandle(psImport->hDevConnection),
+				psImport->hPMR);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntry failed (Error=%d)", __func__, eError));
+		}
+
+		/* Attach RI information */
+		eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psImport->hDevConnection),
+				psImport->hPMR,
+				sizeof("^"),
+				"^",
+				psMemDesc->uiOffset,
+				uiSize,
+				IMG_FALSE,
+				IMG_FALSE,
+				&psMemDesc->hRIHandle);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (Error=%d)", __func__, eError));
+		}
+	}
+#else  /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+	PVR_UNREFERENCED_PARAMETER (pszText);
+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+
+	return PVRSRV_OK;
+
+	/* error exit paths follow */
+
+	failDeviceMemAlloc:
+	_DevmemMemDescDiscard(psMemDesc);
+
+	failMemDescAlloc:
+	failParams:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	PVR_DPF((PVR_DBG_ERROR,
+			"%s: Failed! Error is %s. Allocation size: " IMG_DEVMEM_SIZE_FMTSPEC,
+			__func__,
+			PVRSRVGETERRORSTRING(eError),
+			uiSize));
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_DEVMEM_SIZE_T uiSize,
+		IMG_DEVMEM_SIZE_T uiChunkSize,
+		IMG_UINT32 ui32NumPhysChunks,
+		IMG_UINT32 ui32NumVirtChunks,
+		IMG_UINT32 *pui32MappingTable,
+		IMG_DEVMEM_ALIGN_T uiAlign,
+		IMG_UINT32 uiLog2HeapPageSize,
+		DEVMEM_FLAGS_T uiFlags,
+		const IMG_CHAR *pszText,
+		DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	PVRSRV_ERROR eError;
+	DEVMEM_MEMDESC *psMemDesc = NULL;
+	DEVMEM_IMPORT *psImport;
+
+	eError = DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize,
+			&uiSize,
+			&uiAlign);
+	if (eError != PVRSRV_OK)
+	{
+		goto failParams;
+	}
+
+	eError = _DevmemValidateParams(uiSize,
+			uiAlign,
+			&uiFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto failParams;
+	}
+
+	eError =_DevmemMemDescAlloc(&psMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		goto failMemDescAlloc;
+	}
+
+	eError = _AllocateDeviceMemory(hDevConnection,
+			uiLog2HeapPageSize,
+			uiSize,
+			uiChunkSize,
+			ui32NumPhysChunks,
+			ui32NumVirtChunks,
+			pui32MappingTable,
+			uiAlign,
+			uiFlags,
+			IMG_TRUE,
+			pszText,
+			&psImport);
+	if (eError != PVRSRV_OK)
+	{
+		goto failDeviceMemAlloc;
+	}
+
+	_DevmemMemDescInit(psMemDesc,
+			0,
+			psImport,
+			uiSize);
+
+	/* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+	 * the allocation gets mapped/unmapped
+	 */
+	_CheckAnnotationLength(pszText);
+#if defined(__KERNEL__)
+	OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN);
+#else
+	OSStringNCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN);
+	psMemDesc->szText[DEVMEM_ANNOTATION_MAX_LEN - 1] = '\0';
+#endif	/* if defined(__KERNEL__) */
+
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI))
+	{
+		eError = BridgeRIWritePMREntry (GetBridgeHandle(psImport->hDevConnection),
+				psImport->hPMR);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntry failed (Error=%d)", __func__, eError));
+		}
+
+		/* Attach RI information */
+		eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+				psMemDesc->psImport->hPMR,
+				sizeof("^"),
+				"^",
+				psMemDesc->uiOffset,
+				uiSize,
+				IMG_FALSE,
+				IMG_FALSE,
+				&psMemDesc->hRIHandle);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (Error=%d)", __func__, eError));
+		}
+	}
+#else  /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+	PVR_UNREFERENCED_PARAMETER (pszText);
+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+
+	*ppsMemDescPtr = psMemDesc;
+
+	return PVRSRV_OK;
+
+	/* error exit paths follow */
+
+	failDeviceMemAlloc:
+	_DevmemMemDescDiscard(psMemDesc);
+
+	failMemDescAlloc:
+	failParams:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	PVR_DPF((PVR_DBG_ERROR,
+			"%s: Failed! Error is %s. Allocation size: " IMG_DEVMEM_SIZE_FMTSPEC,
+			__func__,
+			PVRSRVGETERRORSTRING(eError),
+			uiSize));
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_HANDLE hServerHandle,
+		IMG_HANDLE *hLocalImportHandle)
+{
+	return BridgePMRMakeLocalImportHandle(GetBridgeHandle(hDevConnection),
+			hServerHandle,
+			hLocalImportHandle);
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemUnmakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_HANDLE hLocalImportHandle)
+{
+	return BridgePMRUnmakeLocalImportHandle(GetBridgeHandle(hDevConnection), hLocalImportHandle);
+}
+
+/*****************************************************************************
+ *                Devmem unsecure export functions                           *
+ *****************************************************************************/
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+
+static PVRSRV_ERROR
+_Mapping_Export(DEVMEM_IMPORT *psImport,
+		DEVMEM_EXPORTHANDLE *phPMRExportHandlePtr,
+		DEVMEM_EXPORTKEY *puiExportKeyPtr,
+		DEVMEM_SIZE_T *puiSize,
+		DEVMEM_LOG2ALIGN_T *puiLog2Contig)
+{
+	/* Gets an export handle and key for the PMR used for this mapping */
+	/* Can only be done if there are no suballocations for this mapping */
+
+	PVRSRV_ERROR eError;
+	DEVMEM_EXPORTHANDLE hPMRExportHandle;
+	DEVMEM_EXPORTKEY uiExportKey;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig;
+
+	if (psImport == NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto failParams;
+	}
+
+	if ((psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE) == 0)
+	{
+		eError = PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION;
+		goto failParams;
+	}
+
+	eError = BridgePMRExportPMR(GetBridgeHandle(psImport->hDevConnection),
+			psImport->hPMR,
+			&hPMRExportHandle,
+			&uiSize,
+			&uiLog2Contig,
+			&uiExportKey);
+	if (eError != PVRSRV_OK)
+	{
+		goto failExport;
+	}
+
+	PVR_ASSERT(uiSize == psImport->uiSize);
+
+	*phPMRExportHandlePtr = hPMRExportHandle;
+	*puiExportKeyPtr = uiExportKey;
+	*puiSize = uiSize;
+	*puiLog2Contig = uiLog2Contig;
+
+	return PVRSRV_OK;
+
+	/* error exit paths follow */
+
+	failExport:
+	failParams:
+
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+
+}
+
+static void
+_Mapping_Unexport(DEVMEM_IMPORT *psImport,
+		DEVMEM_EXPORTHANDLE hPMRExportHandle)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT (psImport != NULL);
+
+	eError = BridgePMRUnexportPMR(GetBridgeHandle(psImport->hDevConnection),
+			hPMRExportHandle);
+	PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemExport(DEVMEM_MEMDESC *psMemDesc,
+		DEVMEM_EXPORTCOOKIE *psExportCookie)
+{
+	/* Caller to provide storage for export cookie struct */
+	PVRSRV_ERROR eError;
+	IMG_HANDLE hPMRExportHandle = 0;
+	IMG_UINT64 uiPMRExportPassword = 0;
+	IMG_DEVMEM_SIZE_T uiSize = 0;
+	IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig = 0;
+
+	if (psMemDesc == NULL || psExportCookie == NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	if (DEVMEM_PROPERTIES_EXPORTABLE !=
+			(psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: This Memory (0x%p) cannot be exported!...",
+				__func__, psMemDesc));
+		eError = PVRSRV_ERROR_INVALID_REQUEST;
+		goto e0;
+	}
+
+	eError = _Mapping_Export(psMemDesc->psImport,
+			&hPMRExportHandle,
+			&uiPMRExportPassword,
+			&uiSize,
+			&uiLog2Contig);
+	if (eError != PVRSRV_OK)
+	{
+		psExportCookie->uiSize = 0;
+		goto e0;
+	}
+
+	psExportCookie->hPMRExportHandle = hPMRExportHandle;
+	psExportCookie->uiPMRExportPassword = uiPMRExportPassword;
+	psExportCookie->uiSize = uiSize;
+	psExportCookie->uiLog2ContiguityGuarantee = uiLog2Contig;
+
+	return PVRSRV_OK;
+
+	/* error exit paths follow */
+
+	e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+IMG_INTERNAL void
+DevmemUnexport(DEVMEM_MEMDESC *psMemDesc,
+		DEVMEM_EXPORTCOOKIE *psExportCookie)
+{
+	_Mapping_Unexport(psMemDesc->psImport,
+			psExportCookie->hPMRExportHandle);
+
+	psExportCookie->uiSize = 0;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemImport(SHARED_DEV_CONNECTION hDevConnection,
+		DEVMEM_EXPORTCOOKIE *psCookie,
+		DEVMEM_FLAGS_T uiFlags,
+		DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	DEVMEM_MEMDESC *psMemDesc = NULL;
+	DEVMEM_IMPORT *psImport;
+	IMG_HANDLE hPMR;
+	PVRSRV_ERROR eError;
+
+	if (ppsMemDescPtr == NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto failParams;
+	}
+
+	eError =_DevmemMemDescAlloc(&psMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		goto failMemDescAlloc;
+	}
+
+	eError = _DevmemImportStructAlloc(hDevConnection,
+			&psImport);
+	if (eError != PVRSRV_OK)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto failImportAlloc;
+	}
+
+	/* Get a handle to the PMR (inc refcount) */
+	eError = BridgePMRImportPMR(GetBridgeHandle(hDevConnection),
+			psCookie->hPMRExportHandle,
+			psCookie->uiPMRExportPassword,
+			psCookie->uiSize, /* not trusted - just for sanity checks */
+			psCookie->uiLog2ContiguityGuarantee, /* not trusted - just for sanity checks */
+			&hPMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto failImport;
+	}
+
+	_DevmemImportStructInit(psImport,
+			psCookie->uiSize,
+			1ULL << psCookie->uiLog2ContiguityGuarantee,
+			uiFlags,
+			hPMR,
+			DEVMEM_PROPERTIES_IMPORTED |
+			DEVMEM_PROPERTIES_EXPORTABLE);
+
+	_DevmemMemDescInit(psMemDesc,
+			0,
+			psImport,
+			psImport->uiSize);
+
+	*ppsMemDescPtr = psMemDesc;
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI))
+	{
+		/* Attach RI information */
+		eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+				psMemDesc->psImport->hPMR,
+				sizeof("^"),
+				"^",
+				psMemDesc->uiOffset,
+				psMemDesc->psImport->uiSize,
+				IMG_TRUE,
+				IMG_TRUE,
+				&psMemDesc->hRIHandle);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (Error=%d)", __func__, eError));
+		}
+	}
+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+
+	return PVRSRV_OK;
+
+	/* error exit paths follow */
+
+	failImport:
+	_DevmemImportDiscard(psImport);
+	failImportAlloc:
+	_DevmemMemDescDiscard(psMemDesc);
+	failMemDescAlloc:
+	failParams:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+/*****************************************************************************
+ *                   Common MemDesc functions                                *
+ *****************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemUnpin(DEVMEM_MEMDESC *psMemDesc)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+
+	if (psImport->uiProperties & DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE)
+	{
+		eError = PVRSRV_ERROR_INVALID_REQUEST;
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: The passed allocation is not valid to unpin",
+				__func__));
+
+		goto e_exit;
+	}
+
+	/* Stop if the allocation might have suballocations. */
+	if (!(psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE))
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: The passed allocation is not valid to unpin because "
+				"there might be suballocations on it. Make sure you allocate a page multiple "
+				"of the heap when using PVRSRVAllocDeviceMem()",
+				__func__));
+
+		goto e_exit;
+	}
+
+	/* Stop if the Import is still mapped to CPU */
+	if (psImport->sCPUImport.ui32RefCount)
+	{
+		eError = PVRSRV_ERROR_STILL_MAPPED;
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: There are still %u references on the CPU mapping. "
+				"Please remove all CPU mappings before unpinning.",
+				__func__,
+				psImport->sCPUImport.ui32RefCount));
+
+		goto e_exit;
+	}
+
+	/* Only unpin if it is not already unpinned
+	 * Return PVRSRV_OK */
+	if (psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+	{
+		goto e_exit;
+	}
+
+	/* Unpin it and invalidate mapping */
+	if (psImport->sDeviceImport.bMapped)
+	{
+		eError = BridgeDevmemIntUnpinInvalidate(GetBridgeHandle(psImport->hDevConnection),
+				psImport->sDeviceImport.hMapping,
+				psImport->hPMR);
+	}
+	else
+	{
+		/* Or just unpin it */
+		eError = BridgeDevmemIntUnpin(GetBridgeHandle(psImport->hDevConnection),
+				psImport->hPMR);
+	}
+
+	/* Update flags and RI when call was successful */
+	if (eError == PVRSRV_OK)
+	{
+		psImport->uiProperties |= DEVMEM_PROPERTIES_UNPINNED;
+	}
+	else
+	{
+		/* Or just show what went wrong */
+		PVR_DPF((PVR_DBG_ERROR, "%s: Unpin aborted because of error %d",
+				__func__,
+				eError));
+	}
+
+	e_exit:
+	return eError;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPin(DEVMEM_MEMDESC *psMemDesc)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+
+	if (psImport->uiProperties & DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE)
+	{
+		eError = PVRSRV_ERROR_INVALID_REQUEST;
+		goto e_exit;
+	}
+
+	/* Only pin if it is unpinned */
+	if ((psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED) == 0)
+	{
+		goto e_exit;
+	}
+
+	/* Pin it and make mapping valid */
+	if (psImport->sDeviceImport.bMapped)
+	{
+		eError = BridgeDevmemIntPinValidate(GetBridgeHandle(psImport->hDevConnection),
+				psImport->sDeviceImport.hMapping,
+				psImport->hPMR);
+	}
+	else
+	{
+		/* Or just pin it */
+		eError = BridgeDevmemIntPin(GetBridgeHandle(psImport->hDevConnection),
+				psImport->hPMR);
+	}
+
+	if ((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_PMR_NEW_MEMORY))
+	{
+		psImport->uiProperties &= ~DEVMEM_PROPERTIES_UNPINNED;
+	}
+	else
+	{
+		/* Or just show what went wrong */
+		PVR_DPF((PVR_DBG_ERROR, "%s: Pin aborted because of error %d",
+				__func__,
+				eError));
+	}
+
+	e_exit:
+	return eError;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetSize(DEVMEM_MEMDESC *psMemDesc, IMG_DEVMEM_SIZE_T* puiSize)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	*puiSize = psMemDesc->uiAllocSize;
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetAnnotation(DEVMEM_MEMDESC *psMemDesc, IMG_CHAR **pszAnnotation)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	*pszAnnotation = psMemDesc->szText;
+
+	return eError;
+}
+
+/*
+	This function is called for freeing any class of memory
+ */
+IMG_INTERNAL IMG_BOOL
+DevmemFree(DEVMEM_MEMDESC *psMemDesc)
+{
+	if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_SECURE)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Please use methods dedicated to secure buffers.",
+				__func__));
+		return IMG_FALSE;
+	}
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI))
+	{
+		if (psMemDesc->hRIHandle)
+		{
+			PVRSRV_ERROR eError;
+
+			eError = BridgeRIDeleteMEMDESCEntry(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+					psMemDesc->hRIHandle);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIDeleteMEMDESCEntry failed (Error=%d)", __func__, eError));
+			}
+		}
+	}
+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+
+	return _DevmemMemDescRelease(psMemDesc);
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc,
+		DEVMEM_HEAP *psHeap,
+		IMG_DEV_VIRTADDR *psDevVirtAddr)
+{
+	DEVMEM_IMPORT *psImport;
+	IMG_DEV_VIRTADDR sDevVAddr;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bMap = IMG_TRUE;
+	IMG_BOOL bDestroyed = IMG_FALSE;
+	IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS;
+
+	/* Do not try to map unpinned memory */
+	if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+	{
+		eError = PVRSRV_ERROR_INVALID_MAP_REQUEST;
+		goto failFlags;
+	}
+
+	OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+	if (psHeap == NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto failParams;
+	}
+
+	if (psMemDesc->sDeviceMemDesc.ui32RefCount != 0)
+	{
+		eError = PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED;
+		goto failCheck;
+	}
+
+	/* Don't map memory for deferred allocations */
+	if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+	{
+		PVR_ASSERT(psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE);
+		bMap = IMG_FALSE;
+	}
+
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+			__func__,
+			psMemDesc,
+			psMemDesc->sDeviceMemDesc.ui32RefCount,
+			psMemDesc->sDeviceMemDesc.ui32RefCount+1);
+
+	psImport = psMemDesc->psImport;
+	_DevmemMemDescAcquire(psMemDesc);
+
+#if defined(__KERNEL__)
+	{
+		PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psHeap->psCtx->hDevConnection;
+		PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDevNode->pvDevice;
+
+		if (((psHeap == psDevInfo->psFirmwareMainHeap) ||
+		     (psHeap == psDevInfo->psFirmwareConfigHeap)) &&
+		     PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+		{
+			ui64OptionalMapAddress = _GuestFWHeapVA(psImport->hPMR, psDevNode);
+		}
+	}
+#endif
+
+	eError = _DevmemImportStructDevMap(psHeap,
+			bMap,
+			psImport,
+			ui64OptionalMapAddress);
+	if (eError != PVRSRV_OK)
+	{
+		goto failMap;
+	}
+
+	sDevVAddr.uiAddr = psImport->sDeviceImport.sDevVAddr.uiAddr;
+	sDevVAddr.uiAddr += psMemDesc->uiOffset;
+	psMemDesc->sDeviceMemDesc.sDevVAddr = sDevVAddr;
+	psMemDesc->sDeviceMemDesc.ui32RefCount++;
+
+	*psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr;
+
+	OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+	if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+	{
+		BridgeDevicememHistoryMap(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+				psMemDesc->psImport->hPMR,
+				psMemDesc->uiOffset,
+				psMemDesc->sDeviceMemDesc.sDevVAddr,
+				psMemDesc->uiAllocSize,
+				psMemDesc->szText,
+				DevmemGetHeapLog2PageSize(psHeap),
+				psMemDesc->ui32AllocationIndex,
+				&psMemDesc->ui32AllocationIndex);
+	}
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI))
+	{
+		if (psMemDesc->hRIHandle)
+		{
+			eError = BridgeRIUpdateMEMDESCAddr(GetBridgeHandle(psImport->hDevConnection),
+					psMemDesc->hRIHandle,
+					psImport->sDeviceImport.sDevVAddr);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIUpdateMEMDESCAddr failed (Error=%d)", __func__, eError));
+			}
+		}
+	}
+#endif
+
+	return PVRSRV_OK;
+
+	failMap:
+	bDestroyed = _DevmemMemDescRelease(psMemDesc);
+	failCheck:
+	failParams:
+	if (!bDestroyed)
+	{
+		OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+	}
+	PVR_ASSERT(eError != PVRSRV_OK);
+	failFlags:
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc,
+		DEVMEM_HEAP *psHeap,
+		IMG_DEV_VIRTADDR sDevVirtAddr)
+{
+	DEVMEM_IMPORT *psImport;
+	IMG_DEV_VIRTADDR sDevVAddr;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bMap = IMG_TRUE;
+	IMG_BOOL bDestroyed = IMG_FALSE;
+
+	/* Do not try to map unpinned memory */
+	if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+	{
+		eError = PVRSRV_ERROR_INVALID_MAP_REQUEST;
+		goto failFlags;
+	}
+
+	OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+	if (psHeap == NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto failParams;
+	}
+
+	if (psMemDesc->sDeviceMemDesc.ui32RefCount != 0)
+	{
+		eError = PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED;
+		goto failCheck;
+	}
+
+	/* Don't map memory for deferred allocations */
+	if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+	{
+		PVR_ASSERT(psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE);
+		bMap = IMG_FALSE;
+	}
+
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+			__func__,
+			psMemDesc,
+			psMemDesc->sDeviceMemDesc.ui32RefCount,
+			psMemDesc->sDeviceMemDesc.ui32RefCount+1);
+
+	psImport = psMemDesc->psImport;
+	_DevmemMemDescAcquire(psMemDesc);
+
+	eError = _DevmemImportStructDevMap(psHeap,
+			bMap,
+			psImport,
+			sDevVirtAddr.uiAddr);
+	if (eError != PVRSRV_OK)
+	{
+		goto failMap;
+	}
+
+	sDevVAddr.uiAddr = psImport->sDeviceImport.sDevVAddr.uiAddr;
+	sDevVAddr.uiAddr += psMemDesc->uiOffset;
+	psMemDesc->sDeviceMemDesc.sDevVAddr = sDevVAddr;
+	psMemDesc->sDeviceMemDesc.ui32RefCount++;
+
+	OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+	if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+	{
+		BridgeDevicememHistoryMap(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+				psMemDesc->psImport->hPMR,
+				psMemDesc->uiOffset,
+				psMemDesc->sDeviceMemDesc.sDevVAddr,
+				psMemDesc->uiAllocSize,
+				psMemDesc->szText,
+				DevmemGetHeapLog2PageSize(psHeap),
+				psMemDesc->ui32AllocationIndex,
+				&psMemDesc->ui32AllocationIndex);
+	}
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI))
+	{
+		if (psMemDesc->hRIHandle)
+		{
+			eError = BridgeRIUpdateMEMDESCAddr(GetBridgeHandle(psImport->hDevConnection),
+					psMemDesc->hRIHandle,
+					psImport->sDeviceImport.sDevVAddr);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIUpdateMEMDESCAddr failed (Error=%d)", __func__, eError));
+			}
+		}
+	}
+#endif
+
+	return PVRSRV_OK;
+
+	failMap:
+	bDestroyed = _DevmemMemDescRelease(psMemDesc);
+	failCheck:
+	failParams:
+	if (!bDestroyed)
+	{
+		OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+	}
+	PVR_ASSERT(eError != PVRSRV_OK);
+	failFlags:
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+		IMG_DEV_VIRTADDR *psDevVirtAddr)
+{
+	PVRSRV_ERROR eError;
+
+	/* Do not try to map unpinned memory */
+	if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+	{
+		eError = PVRSRV_ERROR_INVALID_MAP_REQUEST;
+		goto failCheck;
+	}
+
+	OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+			__func__,
+			psMemDesc,
+			psMemDesc->sDeviceMemDesc.ui32RefCount,
+			psMemDesc->sDeviceMemDesc.ui32RefCount+1);
+
+	if (psMemDesc->sDeviceMemDesc.ui32RefCount == 0)
+	{
+		eError = PVRSRV_ERROR_DEVICEMEM_NO_MAPPING;
+		goto failRelease;
+	}
+	psMemDesc->sDeviceMemDesc.ui32RefCount++;
+
+	*psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr;
+	OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+	return PVRSRV_OK;
+
+	failRelease:
+	OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+	PVR_ASSERT(eError != PVRSRV_OK);
+	failCheck:
+	return eError;
+}
+
+IMG_INTERNAL void
+DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc)
+{
+	PVR_ASSERT(psMemDesc != NULL);
+
+	OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+			__func__,
+			psMemDesc,
+			psMemDesc->sDeviceMemDesc.ui32RefCount,
+			psMemDesc->sDeviceMemDesc.ui32RefCount-1);
+
+	PVR_ASSERT(psMemDesc->sDeviceMemDesc.ui32RefCount != 0);
+
+	if (--psMemDesc->sDeviceMemDesc.ui32RefCount == 0)
+	{
+		if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+		{
+			BridgeDevicememHistoryUnmap(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+					psMemDesc->psImport->hPMR,
+					psMemDesc->uiOffset,
+					psMemDesc->sDeviceMemDesc.sDevVAddr,
+					psMemDesc->uiAllocSize,
+					psMemDesc->szText,
+					DevmemGetHeapLog2PageSize(psMemDesc->psImport->sDeviceImport.psHeap),
+					psMemDesc->ui32AllocationIndex,
+					&psMemDesc->ui32AllocationIndex);
+		}
+
+		_DevmemImportStructDevUnmap(psMemDesc->psImport);
+		OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+		_DevmemMemDescRelease(psMemDesc);
+	}
+	else
+	{
+		OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+	}
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+		void **ppvCpuVirtAddr)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psMemDesc != NULL);
+	PVR_ASSERT(ppvCpuVirtAddr != NULL);
+
+	if (psMemDesc->psImport->uiProperties &
+			(DEVMEM_PROPERTIES_UNPINNED | DEVMEM_PROPERTIES_SECURE))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Allocation is currently unpinned or a secure buffer. "
+				"Not possible to map to CPU!",
+				__func__));
+		eError = PVRSRV_ERROR_INVALID_MAP_REQUEST;
+		goto failFlags;
+	}
+
+	if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_NO_CPU_MAPPING)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: CPU Mapping is not possible on this allocation!",
+				__func__));
+		eError = PVRSRV_ERROR_INVALID_MAP_REQUEST;
+		goto failFlags;
+	}
+
+	OSLockAcquire(psMemDesc->sCPUMemDesc.hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+			__func__,
+			psMemDesc,
+			psMemDesc->sCPUMemDesc.ui32RefCount,
+			psMemDesc->sCPUMemDesc.ui32RefCount+1);
+
+	if (psMemDesc->sCPUMemDesc.ui32RefCount++ == 0)
+	{
+		DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+		IMG_UINT8 *pui8CPUVAddr;
+
+		_DevmemMemDescAcquire(psMemDesc);
+		eError = _DevmemImportStructCPUMap(psImport);
+		if (eError != PVRSRV_OK)
+		{
+			goto failMap;
+		}
+
+		pui8CPUVAddr = psImport->sCPUImport.pvCPUVAddr;
+		pui8CPUVAddr += psMemDesc->uiOffset;
+		psMemDesc->sCPUMemDesc.pvCPUVAddr = pui8CPUVAddr;
+	}
+	*ppvCpuVirtAddr = psMemDesc->sCPUMemDesc.pvCPUVAddr;
+
+	VG_MARK_INITIALIZED(*ppvCpuVirtAddr, psMemDesc->psImport->uiSize);
+
+	OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+
+	return PVRSRV_OK;
+
+	failMap:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	psMemDesc->sCPUMemDesc.ui32RefCount--;
+
+	if (!_DevmemMemDescRelease(psMemDesc))
+	{
+		OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+	}
+	failFlags:
+	return eError;
+}
+
+IMG_INTERNAL void
+DevmemReacquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+		void **ppvCpuVirtAddr)
+{
+	PVR_ASSERT(psMemDesc != NULL);
+	PVR_ASSERT(ppvCpuVirtAddr != NULL);
+
+	if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_NO_CPU_MAPPING)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: CPU UnMapping is not possible on this allocation!",
+				__func__));
+		return;
+	}
+
+	OSLockAcquire(psMemDesc->sCPUMemDesc.hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+			__func__,
+			psMemDesc,
+			psMemDesc->sCPUMemDesc.ui32RefCount,
+			psMemDesc->sCPUMemDesc.ui32RefCount+1);
+
+	*ppvCpuVirtAddr = NULL;
+	if (psMemDesc->sCPUMemDesc.ui32RefCount)
+	{
+		*ppvCpuVirtAddr = psMemDesc->sCPUMemDesc.pvCPUVAddr;
+		psMemDesc->sCPUMemDesc.ui32RefCount += 1;
+	}
+
+	VG_MARK_INITIALIZED(*ppvCpuVirtAddr, psMemDesc->psImport->uiSize);
+	OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+}
+
+IMG_INTERNAL void
+DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc)
+{
+	PVR_ASSERT(psMemDesc != NULL);
+
+	if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_NO_CPU_MAPPING)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: CPU UnMapping is not possible on this allocation!",
+				__func__));
+		return;
+	}
+
+	OSLockAcquire(psMemDesc->sCPUMemDesc.hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+			__func__,
+			psMemDesc,
+			psMemDesc->sCPUMemDesc.ui32RefCount,
+			psMemDesc->sCPUMemDesc.ui32RefCount-1);
+
+	PVR_ASSERT(psMemDesc->sCPUMemDesc.ui32RefCount != 0);
+
+	if (--psMemDesc->sCPUMemDesc.ui32RefCount == 0)
+	{
+		OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+		_DevmemImportStructCPUUnmap(psMemDesc->psImport);
+		_DevmemMemDescRelease(psMemDesc);
+	}
+	else
+	{
+		OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+	}
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+		IMG_HANDLE *phImport)
+{
+	if ((psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE) == 0)
+	{
+		return PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION;
+	}
+
+	*phImport = psMemDesc->psImport->hPMR;
+
+	return PVRSRV_OK;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc,
+		IMG_UINT64 *pui64UID)
+{
+	DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+	PVRSRV_ERROR eError;
+
+	if (!(psImport->uiProperties & (DEVMEM_PROPERTIES_IMPORTED |
+		                        DEVMEM_PROPERTIES_EXPORTABLE)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: This Memory (0x%p) doesn't support the functionality requested...",
+				__func__, psMemDesc));
+		return PVRSRV_ERROR_INVALID_REQUEST;
+	}
+
+	eError = BridgePMRGetUID(GetBridgeHandle(psImport->hDevConnection),
+			psImport->hPMR,
+			pui64UID);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc,
+		IMG_HANDLE *hReservation)
+{
+	DEVMEM_IMPORT *psImport;
+
+	PVR_ASSERT(psMemDesc);
+	psImport = psMemDesc->psImport;
+
+	PVR_ASSERT(psImport);
+	*hReservation = psImport->sDeviceImport.hReservation;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc,
+		IMG_HANDLE *phPMR,
+		IMG_DEVMEM_OFFSET_T *puiPMROffset)
+{
+	DEVMEM_IMPORT *psImport;
+
+	PVR_ASSERT(psMemDesc);
+	*puiPMROffset = psMemDesc->uiOffset;
+	psImport = psMemDesc->psImport;
+
+	PVR_ASSERT(psImport);
+	*phPMR = psImport->hPMR;
+
+	return PVRSRV_OK;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc,
+		DEVMEM_FLAGS_T *puiFlags)
+{
+	DEVMEM_IMPORT *psImport;
+
+	PVR_ASSERT(psMemDesc);
+	psImport = psMemDesc->psImport;
+
+	PVR_ASSERT(psImport);
+	*puiFlags = psImport->uiFlags;
+
+	return PVRSRV_OK;
+}
+
+IMG_INTERNAL SHARED_DEV_CONNECTION
+DevmemGetConnection(DEVMEM_MEMDESC *psMemDesc)
+{
+	return psMemDesc->psImport->hDevConnection;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemLocalImport(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_HANDLE hExtHandle,
+		DEVMEM_FLAGS_T uiFlags,
+		DEVMEM_MEMDESC **ppsMemDescPtr,
+		IMG_DEVMEM_SIZE_T *puiSizePtr,
+		const IMG_CHAR *pszAnnotation)
+{
+	DEVMEM_MEMDESC *psMemDesc = NULL;
+	DEVMEM_IMPORT *psImport;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_DEVMEM_ALIGN_T uiAlign;
+	IMG_HANDLE hPMR;
+	PVRSRV_ERROR eError;
+
+	if (ppsMemDescPtr == NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto failParams;
+	}
+
+	eError =_DevmemMemDescAlloc(&psMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		goto failMemDescAlloc;
+	}
+
+	eError = _DevmemImportStructAlloc(hDevConnection,
+			&psImport);
+	if (eError != PVRSRV_OK)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto failImportAlloc;
+	}
+
+	/* Get the PMR handle and its size from the server */
+	eError = BridgePMRLocalImportPMR(GetBridgeHandle(hDevConnection),
+			hExtHandle,
+			&hPMR,
+			&uiSize,
+			&uiAlign);
+	if (eError != PVRSRV_OK)
+	{
+		goto failImport;
+	}
+
+	_DevmemImportStructInit(psImport,
+			uiSize,
+			uiAlign,
+			uiFlags,
+			hPMR,
+			DEVMEM_PROPERTIES_IMPORTED |
+			DEVMEM_PROPERTIES_EXPORTABLE);
+
+	_DevmemMemDescInit(psMemDesc,
+			0,
+			psImport,
+			uiSize);
+
+	*ppsMemDescPtr = psMemDesc;
+	if (puiSizePtr)
+		*puiSizePtr = uiSize;
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI))
+	{
+		/* Attach RI information.
+		 * Set backed size to 0 since this allocation has been allocated
+		 * by the same process and has been accounted for. */
+		eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+				psMemDesc->psImport->hPMR,
+				sizeof("^"),
+				"^",
+				psMemDesc->uiOffset,
+				psMemDesc->psImport->uiSize,
+				IMG_TRUE,
+				IMG_FALSE,
+				&(psMemDesc->hRIHandle));
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (Error=%d)", __func__, eError));
+		}
+	}
+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+
+
+	/* Copy the allocation descriptive name and size so it can be passed
+	 * to DevicememHistory when the allocation gets mapped/unmapped
+	 */
+	_CheckAnnotationLength(pszAnnotation);
+#if defined(__KERNEL__)
+	OSStringLCopy(psMemDesc->szText, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN);
+#else
+	OSStringNCopy(psMemDesc->szText, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN);
+	psMemDesc->szText[DEVMEM_ANNOTATION_MAX_LEN - 1] = '\0';
+#endif	/* if defined(__KERNEL__) */
+
+	return PVRSRV_OK;
+
+	failImport:
+	_DevmemImportDiscard(psImport);
+	failImportAlloc:
+	_DevmemMemDescDiscard(psMemDesc);
+	failMemDescAlloc:
+	failParams:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext,
+		IMG_DEV_VIRTADDR sDevVAddr)
+{
+	return BridgeDevmemIsVDevAddrValid(GetBridgeHandle(psContext->hDevConnection),
+			psContext->hDevMemServerContext,
+			sDevVAddr);
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetFaultAddress(DEVMEM_CONTEXT *psContext,
+		IMG_DEV_VIRTADDR *psFaultAddress)
+{
+	return BridgeDevmemGetFaultAddress(GetBridgeHandle(psContext->hDevConnection),
+			psContext->hDevMemServerContext,
+			psFaultAddress);
+}
+
+IMG_INTERNAL IMG_UINT32
+DevmemGetHeapLog2PageSize(DEVMEM_HEAP *psHeap)
+{
+	return psHeap->uiLog2Quantum;
+}
+
+IMG_INTERNAL IMG_UINT32
+DevmemGetHeapTilingProperties(DEVMEM_HEAP *psHeap,
+		IMG_UINT32 *puiLog2ImportAlignment,
+		IMG_UINT32 *puiLog2TilingStrideFactor)
+{
+	*puiLog2ImportAlignment = psHeap->uiLog2ImportAlignment;
+	*puiLog2TilingStrideFactor = psHeap->uiLog2TilingStrideFactor;
+	return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function       RegisterDevMemPFNotify
+@Description    Registers that the application wants to be signaled when a page
+                fault occurs.
+
+@Input          psContext      Memory context the process that would like to
+                               be notified about.
+@Input          ui32PID        The PID of the calling process.
+@Input          bRegister      If true, register. If false, de-register.
+@Return         PVRSRV_ERROR:  PVRSRV_OK on success. Otherwise, a PVRSRV_
+                               error code
+ */ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext,
+		IMG_UINT32     ui32PID,
+		IMG_BOOL       bRegister)
+{
+	PVRSRV_ERROR eError;
+
+	eError = BridgeDevmemIntRegisterPFNotifyKM(GetBridgeHandle(psContext->hDevConnection),
+			psContext->hDevMemServerContext,
+			ui32PID,
+			bRegister);
+	if (eError == PVRSRV_ERROR_BRIDGE_CALL_FAILED)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Bridge Call Failed: This could suggest a UM/KM mismatch (%d)",
+				__func__,
+				(IMG_INT)(eError)));
+	}
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+GetMaxDevMemSize(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_DEVMEM_SIZE_T *puiLMASize,
+		IMG_DEVMEM_SIZE_T *puiUMASize)
+{
+	return BridgeGetMaxDevMemSize(GetBridgeHandle(hDevConnection),
+			puiLMASize,
+			puiUMASize);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem.h
new file mode 100644
index 0000000..b521846
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem.h
@@ -0,0 +1,689 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management core internal
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services internal interface to core device memory management
+                functions that are shared between client and server code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SRVCLIENT_DEVICEMEM_H
+#define SRVCLIENT_DEVICEMEM_H
+
+/********************************************************************************
+ *                                                                              *
+ *   +------------+   +------------+    +--------------+      +--------------+  *
+ *   | a   sub-   |   | a   sub-   |    |  an          |      | allocation   |  *
+ *   | allocation |   | allocation |    |  allocation  |      | also mapped  |  *
+ *   |            |   |            |    |  in proc 1   |      | into proc 2  |  *
+ *   +------------+   +------------+    +--------------+      +--------------+  *
+ *             |         |                     |                     |          *
+ *          +--------------+            +--------------+      +--------------+  *
+ *          | page   gran- |            | page   gran- |      | page   gran- |  *
+ *          | ular mapping |            | ular mapping |      | ular mapping |  *
+ *          +--------------+            +--------------+      +--------------+  *
+ *                 |                                 |          |               *
+ *                 |                                 |          |               *
+ *                 |                                 |          |               *
+ *          +--------------+                       +--------------+             *
+ *          |              |                       |              |             *
+ *          | A  "P.M.R."  |                       | A  "P.M.R."  |             *
+ *          |              |                       |              |             *
+ *          +--------------+                       +--------------+             *
+ *                                                                              *
+ ********************************************************************************/
+
+/*
+    All device memory allocations are ultimately a view upon (not
+    necessarily the whole of) a "PMR".
+
+    A PMR is a "Physical Memory Resource", which may be a
+    "pre-faulted" lump of physical memory, or it may be a
+    representation of some physical memory that will be instantiated
+    at some future time.
+
+    PMRs always represent multiple of some power-of-2 "contiguity"
+    promised by the PMR, which will allow them to be mapped in whole
+    pages into the device MMU.  As memory allocations may be smaller
+    than a page, these mappings may be suballocated and thus shared
+    between multiple allocations in one process.  A PMR may also be
+    mapped simultaneously into multiple device memory contexts
+    (cross-process scenario), however, for security reasons, it is not
+    legal to share a PMR "both ways" at once, that is, mapped into
+    multiple processes and divided up amongst several suballocations.
+
+    This PMR terminology is introduced here for background
+    information, but is generally of little concern to the caller of
+    this API.  This API handles suballocations and mappings, and the
+    caller thus deals primarily with MEMORY DESCRIPTORS representing
+    an allocation or suballocation, HEAPS representing ranges of
+    virtual addresses in a CONTEXT.
+*/
+
+/*
+   |<---------------------------context------------------------------>|
+   |<-------heap------->|   |<-------heap------->|<-------heap------->|
+   |<-alloc->|          |   |<-alloc->|<-alloc->||   |<-alloc->|      |
+*/
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+#include "pdump.h"
+
+#include "device_connection.h"
+
+
+typedef IMG_UINT32 DEVMEM_HEAPCFGID;
+#define DEVMEM_HEAPCFG_FORCLIENTS 0
+#define DEVMEM_HEAPCFG_META 1
+
+
+
+
+
+/*
+  In order to call the server side functions, we need a bridge handle.
+  We abstract that here, as we may wish to change its form.
+ */
+
+typedef IMG_HANDLE DEVMEM_BRIDGE_HANDLE;
+
+/**************************************************************************/ /*!
+@Function       DevmemUnpin
+@Description    This is the counterpart to DevmemPin(). It is meant to be
+                called before repinning an allocation.
+
+                For a detailed description see client API documentation.
+
+@Input          phMemDesc       The MemDesc that is going to be unpinned.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the memory is
+                                registered to be reclaimed. Error otherwise.
+*/ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemUnpin(DEVMEM_MEMDESC *psMemDesc);
+
+/**************************************************************************/ /*!
+@Function       DevmemPin
+@Description    This is the counterpart to DevmemUnpin(). It is meant to be
+                called after unpinning an allocation.
+
+                For a detailed description see client API documentation.
+
+@Input          phMemDesc       The MemDesc that is going to be pinned.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the allocation content
+                                was successfully restored.
+
+                                PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+                                could not be restored and new physical memory
+                                was allocated.
+
+                                A different error otherwise.
+*/ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPin(DEVMEM_MEMDESC *psMemDesc);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetHeapInt(DEVMEM_HEAP *psHeap,
+				 IMG_HANDLE *phDevmemHeap);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetSize(DEVMEM_MEMDESC *psMemDesc,
+			  IMG_DEVMEM_SIZE_T* puiSize);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetAnnotation(DEVMEM_MEMDESC *psMemDesc,
+			        IMG_CHAR **pszAnnotation);
+
+/*
+ * DevmemCreateContext()
+ *
+ * Create a device memory context
+ *
+ * This must be called before any heap is created in this context
+ *
+ * Caller to provide bridge handle which will be squirreled away
+ * internally and used for all future operations on items from this
+ * memory context.  Caller also to provide devicenode handle, as this
+ * is used for MMU configuration and also to determine the heap
+ * configuration for the auto-instantiated heaps.
+ *
+ * Note that when compiled in services/server, the hBridge is not used
+ * and is thrown away by the "fake" direct bridge.  (This may change.
+ * It is recommended that NULL be passed for the handle for now)
+ *
+ * hDeviceNode and uiHeapBlueprintID shall together dictate which
+ * heap-config to use. bMCUFenceAllocation specifies if the context
+ * requires a MCU Fence allocation setup.
+ *
+ * This will cause the server side counterpart to be created also.
+ *
+ * If you call DevmemCreateContext() (and the call succeeds) you
+ * are promising that you will later call Devmem_ContextDestroy(),
+ * except for abnormal process termination in which case it is
+ * expected it will be destroyed as part of handle clean up.
+ *
+ * Caller to provide storage for the pointer to the NEWDEVMEM_CONTEXT
+ * object thusly created.
+ */
+extern PVRSRV_ERROR
+DevmemCreateContext(SHARED_DEV_CONNECTION hDevConnection,
+                    DEVMEM_HEAPCFGID uiHeapBlueprintID,
+                    IMG_BOOL bMCUFenceAllocation,
+                    DEVMEM_CONTEXT **ppsCtxPtr);
+
+/*
+ * DevmemAcquireDevPrivData()
+ *
+ * Acquire the device private data for this memory context
+ */
+PVRSRV_ERROR
+DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx,
+                         IMG_HANDLE *hPrivData);
+
+/*
+ * DevmemReleaseDevPrivData()
+ *
+ * Release the device private data for this memory context
+ */
+PVRSRV_ERROR
+DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx);
+
+/*
+ * DevmemDestroyContext()
+ *
+ * Undoes that done by DevmemCreateContext()
+ */
+extern PVRSRV_ERROR
+DevmemDestroyContext(DEVMEM_CONTEXT *psCtx);
+
+/*
+ * DevmemCreateHeap()
+ *
+ * Create a heap in the given context.
+ *
+ * N.B.  Not intended to be called directly, though it can be.
+ * Normally, heaps are instantiated at context creation time according
+ * to the specified blueprint.  See DevmemCreateContext() for details.
+ *
+ * This will cause MMU code to set up data structures for the heap,
+ * but may not cause page tables to be modified until allocations are
+ * made from the heap.
+ *
+ * The "Quantum" is both the device MMU page size to be configured for
+ * this heap, and the unit multiples of which "quantized" allocations
+ * are made (allocations smaller than this, known as "suballocations"
+ * will be made from a "sub alloc RA" and will "import" chunks
+ * according to this quantum)
+ *
+ * Where imported PMRs (or, for example, PMRs created by device class
+ * buffers) are mapped into this heap, it is important that the
+ * physical contiguity guarantee offered by the PMR is greater than or
+ * equal to the quantum size specified here, otherwise the attempt to
+ * map it will fail.  "Normal" allocations via Devmem_Allocate
+ * shall automatically meet this requirement, as each "import" will
+ * trigger the creation of a PMR with the desired contiguity.  The
+ * supported quantum sizes in that case shall be dictated by the OS
+ * specific implementation of PhysmemNewOSRamBackedPMR() (see)
+ */
+extern PVRSRV_ERROR
+DevmemCreateHeap(DEVMEM_CONTEXT *psCtxPtr,
+                 /* base and length of heap */
+                 IMG_DEV_VIRTADDR sBaseAddress,
+                 IMG_DEVMEM_SIZE_T uiLength,
+                 /* log2 of allocation quantum, i.e. "page" size.
+                    All allocations (that go to server side) are
+                    multiples of this.  We use a client-side RA to
+                    make sub-allocations from this */
+                 IMG_UINT32 ui32Log2Quantum,
+                 /* The minimum import alignment for this heap */
+                 IMG_UINT32 ui32Log2ImportAlignment,
+                 /* (For tiling heaps) the factor to use to convert
+                    alignment to optimum buffer stride */
+                 IMG_UINT32 ui32Log2TilingStrideFactor,
+                 /* Name of heap for debug */
+                 /* N.B.  Okay to exist on caller's stack - this
+                    func takes a copy if it needs it. */
+                 const IMG_CHAR *pszName,
+                 DEVMEM_HEAPCFGID uiHeapBlueprintID,
+                 DEVMEM_HEAP **ppsHeapPtr);
+/*
+ * DevmemDestroyHeap()
+ *
+ * Reverses DevmemCreateHeap()
+ *
+ * N.B. All allocations must have been freed and all mappings must
+ * have been unmapped before invoking this call
+ */
+extern PVRSRV_ERROR
+DevmemDestroyHeap(DEVMEM_HEAP *psHeap);
+
+/*
+ * DevmemExportalignAdjustSizeAndAlign()
+ * Compute the Size and Align passed to avoid suballocations
+ * (used when allocation with PVRSRV_MEMALLOCFLAG_EXPORTALIGN).
+ *
+ * Returns PVRSRV_ERROR_INVALID_PARAMS if uiLog2Quantum has invalid value.
+ */
+IMG_INTERNAL PVRSRV_ERROR
+DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum,
+                                    IMG_DEVMEM_SIZE_T *puiSize,
+                                    IMG_DEVMEM_ALIGN_T *puiAlign);
+
+/*
+ * DevmemSubAllocate()
+ *
+ * Makes an allocation (possibly a "suballocation", as described
+ * below) of device virtual memory from this heap.
+ *
+ * The size and alignment of the allocation will be honoured by the RA
+ * that allocates the "suballocation".  The resulting allocation will
+ * be mapped into GPU virtual memory and the physical memory to back
+ * it will exist, by the time this call successfully completes.
+ *
+ * The size must be a positive integer multiple of the alignment.
+ * (i.e. the alignment specifies the alignment of both the start and
+ * the end of the resulting allocation.)
+ *
+ * Allocations made via this API are routed through a "suballocation
+ * RA" which is responsible for ensuring that small allocations can be
+ * made without wasting physical memory in the server.  Furthermore,
+ * such suballocations can be made entirely client side without
+ * needing to go to the server unless the allocation spills into a new
+ * page.
+ *
+ * Such suballocations cause many allocations to share the same "PMR".
+ * This happens only when the flags match exactly.
+ *
+ */
+
+PVRSRV_ERROR
+DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier,
+                  DEVMEM_HEAP *psHeap,
+                  IMG_DEVMEM_SIZE_T uiSize,
+                  IMG_DEVMEM_ALIGN_T uiAlign,
+                  DEVMEM_FLAGS_T uiFlags,
+                  const IMG_CHAR *pszText,
+                  DEVMEM_MEMDESC **ppsMemDescPtr);
+
+#define DevmemAllocate(...) \
+    DevmemSubAllocate(DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER, __VA_ARGS__)
+
+PVRSRV_ERROR
+DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection,
+                         IMG_DEVMEM_SIZE_T uiSize,
+                         IMG_DEVMEM_ALIGN_T uiAlign,
+                         IMG_UINT32 uiLog2HeapPageSize,
+                         DEVMEM_FLAGS_T uiFlags,
+                         const IMG_CHAR *pszText,
+                         DEVMEM_MEMDESC **ppsMemDescPtr);
+
+PVRSRV_ERROR
+DeviceMemChangeSparse(DEVMEM_MEMDESC *psMemDesc,
+                      IMG_UINT32 ui32AllocPageCount,
+                      IMG_UINT32 *paui32AllocPageIndices,
+                      IMG_UINT32 ui32FreePageCount,
+                      IMG_UINT32 *pauiFreePageIndices,
+                      SPARSE_MEM_RESIZE_FLAGS uiFlags);
+
+PVRSRV_ERROR
+DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection,
+                     IMG_DEVMEM_SIZE_T uiSize,
+                     IMG_DEVMEM_SIZE_T uiChunkSize,
+                     IMG_UINT32 ui32NumPhysChunks,
+                     IMG_UINT32 ui32NumVirtChunks,
+                     IMG_UINT32 *pui32MappingTable,
+                     IMG_DEVMEM_ALIGN_T uiAlign,
+                     IMG_UINT32 uiLog2HeapPageSize,
+                     DEVMEM_FLAGS_T uiFlags,
+                     const IMG_CHAR *pszText,
+                     DEVMEM_MEMDESC **ppsMemDescPtr);
+
+/*
+ * DevmemFree()
+ *
+ * Reverses that done by DevmemSubAllocate() N.B.  The underlying
+ * mapping and server side allocation _may_ not be torn down, for
+ * example, if the allocation has been exported, or if multiple
+ * allocations were suballocated from the same mapping, but this is
+ * properly refcounted, so the caller does not have to care.
+ */
+
+extern IMG_BOOL
+DevmemFree(DEVMEM_MEMDESC *psMemDesc);
+
+/*
+	DevmemMapToDevice:
+
+	Map an allocation to the device it was allocated from.
+	This function _must_ be called before any call to
+	DevmemAcquireDevVirtAddr is made as it binds the allocation
+	to the heap.
+	DevmemReleaseDevVirtAddr is used to release the reference
+	to the device mapping this function created, but it doesn't
+	mean that the memory will actually be unmapped from the
+	device as other references to the mapping obtained via
+	DevmemAcquireDevVirtAddr could still be active.
+*/
+PVRSRV_ERROR DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc,
+							   DEVMEM_HEAP *psHeap,
+							   IMG_DEV_VIRTADDR *psDevVirtAddr);
+
+/*
+	DevmemMapToDeviceAddress:
+
+	Same as DevmemMapToDevice but the caller chooses the address
+	to map to.
+*/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc,
+                         DEVMEM_HEAP *psHeap,
+                         IMG_DEV_VIRTADDR sDevVirtAddr);
+
+/*
+	DevmemAcquireDevVirtAddr
+
+	Acquire the MemDesc's device virtual address.
+	This function _must_ be called after DevmemMapToDevice
+	and is expected to be used be functions which didn't allocate
+	the MemDesc but need to know it's address
+ */
+PVRSRV_ERROR DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+                                      IMG_DEV_VIRTADDR *psDevVirtAddrRet);
+/*
+ * DevmemReleaseDevVirtAddr()
+ *
+ * give up the licence to use the device virtual address that was
+ * acquired by "Acquire" or "MapToDevice"
+ */
+extern void
+DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc);
+
+/*
+ * DevmemAcquireCpuVirtAddr()
+ *
+ * Acquires a license to use the cpu virtual address of this mapping.
+ * Note that the memory may not have been mapped into cpu virtual
+ * memory prior to this call.  On first "acquire" the memory will be
+ * mapped in (if it wasn't statically mapped in) and on last put it
+ * _may_ become unmapped.  Later calling "Acquire" again, _may_ cause
+ * the memory to be mapped at a different address.
+ */
+PVRSRV_ERROR DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+                                      void **ppvCpuVirtAddr);
+
+/*
+ * DevmemReacquireCpuVirtAddr()
+ *
+ * (Re)acquires license to use the cpu virtual address of this mapping
+ * if (and only if) there is already a pre-existing license to use the
+ * cpu virtual address for the mapping, returns NULL otherwise.
+ */
+void DevmemReacquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+                                void **ppvCpuVirtAddr);
+
+/*
+ * DevmemReleaseDevVirtAddr()
+ *
+ * give up the licence to use the cpu virtual address that was granted
+ * with the "Get" call.
+ */
+extern void
+DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc);
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+/*
+ * DevmemExport()
+ *
+ * Given a memory allocation allocated with DevmemAllocateExportable()
+ * create a "cookie" that can be passed intact by the caller's own choice
+ * of secure IPC to another process and used as the argument to "map"
+ * to map this memory into a heap in the target processes.  N.B.  This can
+ * also be used to map into multiple heaps in one process, though that's not
+ * the intention.
+ *
+ * Note, the caller must later call Unexport before freeing the
+ * memory.
+ */
+PVRSRV_ERROR DevmemExport(DEVMEM_MEMDESC *psMemDesc,
+                          DEVMEM_EXPORTCOOKIE *psExportCookie);
+
+
+void DevmemUnexport(DEVMEM_MEMDESC *psMemDesc,
+					DEVMEM_EXPORTCOOKIE *psExportCookie);
+
+PVRSRV_ERROR
+DevmemImport(SHARED_DEV_CONNECTION hDevConnection,
+			 DEVMEM_EXPORTCOOKIE *psCookie,
+			 DEVMEM_FLAGS_T uiFlags,
+			 DEVMEM_MEMDESC **ppsMemDescPtr);
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+/*
+ * DevmemMakeLocalImportHandle()
+ *
+ * This is a "special case" function for making a server export cookie
+ * which went through the direct bridge into an export cookie that can
+ * be passed through the client bridge.
+ */
+PVRSRV_ERROR
+DevmemMakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection,
+                            IMG_HANDLE hServerExport,
+                            IMG_HANDLE *hClientExport);
+
+/*
+ * DevmemUnmakeLocalImportHandle()
+ *
+ * Free any resource associated with the Make operation
+ */
+PVRSRV_ERROR
+DevmemUnmakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection,
+                              IMG_HANDLE hClientExport);
+
+/*
+ *
+ * The following set of functions is specific to the heap "blueprint"
+ * stuff, for automatic creation of heaps when a context is created
+ *
+ */
+
+
+/* Devmem_HeapConfigCount: returns the number of heap configs that
+   this device has.  Note that there is no acquire/release semantics
+   required, as this data is guaranteed to be constant for the
+   lifetime of the device node */
+extern PVRSRV_ERROR
+DevmemHeapConfigCount(SHARED_DEV_CONNECTION hDevConnection,
+                      IMG_UINT32 *puiNumHeapConfigsOut);
+
+/* Devmem_HeapCount: returns the number of heaps that a given heap
+   config on this device has.  Note that there is no acquire/release
+   semantics required, as this data is guaranteed to be constant for
+   the lifetime of the device node */
+extern PVRSRV_ERROR
+DevmemHeapCount(SHARED_DEV_CONNECTION hDevConnection,
+                IMG_UINT32 uiHeapConfigIndex,
+                IMG_UINT32 *puiNumHeapsOut);
+/* Devmem_HeapConfigName: return the name of the given heap config.
+   The caller is to provide the storage for the returned string and
+   indicate the number of bytes (including null terminator) for such
+   string in the BufSz arg.  Note that there is no acquire/release
+   semantics required, as this data is guaranteed to be constant for
+   the lifetime of the device node.
+ */
+extern PVRSRV_ERROR
+DevmemHeapConfigName(SHARED_DEV_CONNECTION hsDevConnection,
+                     IMG_UINT32 uiHeapConfigIndex,
+                     IMG_CHAR *pszConfigNameOut,
+                     IMG_UINT32 uiConfigNameBufSz);
+
+/* Devmem_HeapDetails: fetches all the metadata that is recorded in
+   this heap "blueprint".  Namely: heap name (caller to provide
+   storage, and indicate buffer size (including null terminator) in
+   BufSz arg), device virtual address and length, log2 of data page
+   size (will be one of 12, 14, 16, 18, 20, 21, at time of writing).
+   Note that there is no acquire/release semantics required, as this
+   data is guaranteed to be constant for the lifetime of the device
+   node. */
+extern PVRSRV_ERROR
+DevmemHeapDetails(SHARED_DEV_CONNECTION hDevConnection,
+                  IMG_UINT32 uiHeapConfigIndex,
+                  IMG_UINT32 uiHeapIndex,
+                  IMG_CHAR *pszHeapNameOut,
+                  IMG_UINT32 uiHeapNameBufSz,
+                  IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+                  IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+                  IMG_UINT32 *puiLog2DataPageSize,
+                  IMG_UINT32 *puiLog2ImportAlignmentOut,
+                  IMG_UINT32 *puiLog2TilingStrideFactor);
+
+/*
+ * Devmem_FindHeapByName()
+ *
+ * returns the heap handle for the named _automagic_ heap in this
+ * context.  "automagic" heaps are those that are born with the
+ * context from a blueprint
+ */
+extern PVRSRV_ERROR
+DevmemFindHeapByName(const DEVMEM_CONTEXT *psCtx,
+                     const IMG_CHAR *pszHeapName,
+                     DEVMEM_HEAP **ppsHeapRet);
+
+/*
+ * DevmemGetHeapBaseDevVAddr()
+ *
+ * returns the device virtual address of the base of the heap.
+ */
+
+PVRSRV_ERROR
+DevmemGetHeapBaseDevVAddr(DEVMEM_HEAP *psHeap,
+			  IMG_DEV_VIRTADDR *pDevVAddr);
+
+extern PVRSRV_ERROR
+DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+			   IMG_HANDLE *phImport);
+
+extern PVRSRV_ERROR
+DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc,
+						   IMG_UINT64 *pui64UID);
+
+PVRSRV_ERROR
+DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc,
+				IMG_HANDLE *hReservation);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc,
+		IMG_HANDLE *hPMR,
+		IMG_DEVMEM_OFFSET_T *puiPMROffset);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc,
+				DEVMEM_FLAGS_T *puiFlags);
+
+IMG_INTERNAL SHARED_DEV_CONNECTION
+DevmemGetConnection(DEVMEM_MEMDESC *psMemDesc);
+
+PVRSRV_ERROR
+DevmemLocalImport(SHARED_DEV_CONNECTION hDevConnection,
+				  IMG_HANDLE hExtHandle,
+				  DEVMEM_FLAGS_T uiFlags,
+				  DEVMEM_MEMDESC **ppsMemDescPtr,
+				  IMG_DEVMEM_SIZE_T *puiSizePtr,
+				  const IMG_CHAR *pszAnnotation);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext,
+                         IMG_DEV_VIRTADDR sDevVAddr);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetFaultAddress(DEVMEM_CONTEXT *psContext,
+                      IMG_DEV_VIRTADDR *psFaultAddress);
+
+/* DevmemGetHeapLog2PageSize()
+ *
+ * Get the page size used for a certain heap.
+ */
+IMG_UINT32
+DevmemGetHeapLog2PageSize(DEVMEM_HEAP *psHeap);
+
+/* DevmemGetHeapTilingProperties()
+ *
+ * Get the import alignment and tiling stride factor used for a certain heap.
+ */
+IMG_UINT32
+DevmemGetHeapTilingProperties(DEVMEM_HEAP *psHeap,
+                              IMG_UINT32 *puiLog2ImportAlignment,
+                              IMG_UINT32 *puiLog2TilingStrideFactor);
+
+/**************************************************************************/ /*!
+@Function       RegisterDevMemPFNotify
+@Description    Registers that the application wants to be signaled when a page
+                fault occurs.
+
+@Input          psContext      Memory context the process that would like to
+                               be notified about.
+@Input          ui32PID        The PID  of the calling process.
+@Input          bRegister      If true, register. If false, de-register.
+@Return         PVRSRV_ERROR:  PVRSRV_OK on success. Otherwise, a PVRSRV_
+                               error code
+*/ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext,
+                       IMG_UINT32     ui32PID,
+                       IMG_BOOL       bRegister);
+
+/**************************************************************************/ /*!
+@Function       GetMaxDevMemSize
+@Description    Get the amount of device memory on current platform
+		(memory size in Bytes)
+@Output         puiLMASize            LMA memory size
+@Output         puiUMASize            UMA memory size
+@Return         Error code
+*/ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+GetMaxDevMemSize(SHARED_DEV_CONNECTION hDevConnection,
+		 IMG_DEVMEM_SIZE_T *puiLMASize,
+		 IMG_DEVMEM_SIZE_T *puiUMASize);
+
+#endif /* #ifndef SRVCLIENT_DEVICEMEM_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_heapcfg.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_heapcfg.c
new file mode 100644
index 0000000..7ef7820f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_heapcfg.c
@@ -0,0 +1,137 @@
+/*************************************************************************/ /*!
+@File           devicemem_heapcfg.c
+@Title          Temporary Device Memory 2 stuff
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device memory management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+/* our exported API */
+#include "devicemem_heapcfg.h"
+
+#include "device.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+
+#include "connection_server.h"
+
+PVRSRV_ERROR
+HeapCfgHeapConfigCount(CONNECTION_DATA * psConnection,
+					   const PVRSRV_DEVICE_NODE *psDeviceNode,
+					   IMG_UINT32 *puiNumHeapConfigsOut)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	*puiNumHeapConfigsOut = psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+HeapCfgHeapCount(CONNECTION_DATA * psConnection,
+				 const PVRSRV_DEVICE_NODE *psDeviceNode,
+				 IMG_UINT32 uiHeapConfigIndex,
+				 IMG_UINT32 *puiNumHeapsOut)
+{
+	if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs)
+	{
+		return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX;
+	}
+
+	*puiNumHeapsOut = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+HeapCfgHeapConfigName(CONNECTION_DATA * psConnection,
+					  const PVRSRV_DEVICE_NODE *psDeviceNode,
+					  IMG_UINT32 uiHeapConfigIndex,
+					  IMG_UINT32 uiHeapConfigNameBufSz,
+					  IMG_CHAR *pszHeapConfigNameOut)
+{
+	if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs)
+	{
+		return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX;
+	}
+
+	OSSNPrintf(pszHeapConfigNameOut, uiHeapConfigNameBufSz, "%s", psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].pszName);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+HeapCfgHeapDetails(CONNECTION_DATA * psConnection,
+				   const PVRSRV_DEVICE_NODE *psDeviceNode,
+				   IMG_UINT32 uiHeapConfigIndex,
+				   IMG_UINT32 uiHeapIndex,
+				   IMG_UINT32 uiHeapNameBufSz,
+				   IMG_CHAR *pszHeapNameOut,
+				   IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+				   IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+				   IMG_UINT32 *puiLog2DataPageSizeOut,
+				   IMG_UINT32 *puiLog2ImportAlignmentOut,
+				   IMG_UINT32 *puiLog2TilingStrideFactorOut)
+{
+	DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint;
+
+	if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs)
+	{
+		return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX;
+	}
+
+	if (uiHeapIndex >= psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps)
+	{
+		return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX;
+	}
+
+	psHeapBlueprint = &psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].psHeapBlueprintArray[uiHeapIndex];
+
+	OSSNPrintf(pszHeapNameOut, uiHeapNameBufSz, "%s", psHeapBlueprint->pszName);
+	*psDevVAddrBaseOut = psHeapBlueprint->sHeapBaseAddr;
+	*puiHeapLengthOut = psHeapBlueprint->uiHeapLength;
+	*puiLog2DataPageSizeOut = psHeapBlueprint->uiLog2DataPageSize;
+	*puiLog2ImportAlignmentOut = psHeapBlueprint->uiLog2ImportAlignment;
+	*puiLog2TilingStrideFactorOut = psHeapBlueprint->uiLog2TilingStrideFactor;
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_heapcfg.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_heapcfg.h
new file mode 100644
index 0000000..8933831
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_heapcfg.h
@@ -0,0 +1,163 @@
+/**************************************************************************/ /*!
+@File
+@Title          Temporary Device Memory 2 stuff
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device memory management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __DEVICEMEMHEAPCFG_H__
+#define __DEVICEMEMHEAPCFG_H__
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+
+/* FIXME: Find a better way of defining _PVRSRV_DEVICE_NODE_ */
+struct _PVRSRV_DEVICE_NODE_;
+/* FIXME: Find a better way of defining _CONNECTION_DATA_ */
+struct _CONNECTION_DATA_;
+
+
+/*
+  A "heap config" is a blueprint to be used for initial setting up of
+  heaps when a device memory context is created.
+
+  We define a data structure to define this, but it's really down to
+  the caller to populate it.  This is all expected to be in-kernel.
+  We provide an API that client code can use to enquire about the
+  blueprint, such that it may do the heap setup during the context
+  creation call on behalf of the user */
+
+/* blueprint for a single heap */
+typedef struct _DEVMEM_HEAP_BLUEPRINT_
+{
+	/* Name of this heap - for debug purposes, and perhaps for lookup
+	by name? */
+	const IMG_CHAR *pszName;
+
+	/* Virtual address of the beginning of the heap.  This _must_ be a
+	multiple of the data page size for the heap.  It is
+	_recommended_ that it be coarser than that - especially, it
+	should begin on a boundary appropriate to the MMU for the
+	device.  For Rogue, this is a Page Directory boundary, or 1GB
+	(virtual address a multiple of 0x0040000000). */
+	IMG_DEV_VIRTADDR sHeapBaseAddr;
+
+	/* Length of the heap.  Given that the END address of the heap has
+	a similar restriction to that of the _beginning_ of the heap.
+	That is the heap length _must_ be a whole number of data pages.
+	Again, the recommendation is that it ends on a 1GB boundary.
+	Again, this is not essential, but we do know that (at the time
+	of writing) the current implementation of mmu_common.c is such
+	that no two heaps may share a page directory, thus the
+	remaining virtual space would be wasted if the length were not
+	a multiple of 1GB */
+	IMG_DEVMEM_SIZE_T uiHeapLength;
+
+	/* Data page size.  This is the page size that is going to get
+	programmed into the MMU, so it needs to be a valid one for the
+	device.  Importantly, the start address and length _must_ be
+	multiples of this page size.  Note that the page size is
+	specified as the log 2 relative to 1 byte (e.g. 12 indicates
+	4kB) */
+	IMG_UINT32 uiLog2DataPageSize;
+
+	/* Import alignment.  Force imports to this heap to be
+	aligned to at least this value */
+	IMG_UINT32 uiLog2ImportAlignment;
+
+	/* Tiled heaps have an optimum byte-stride, this can be derived from
+	the heap alignment and tiling mode. This is abstracted here such that
+	Log2ByteStride = Log2Alignment - Log2TilingStrideFactor */
+	IMG_UINT32 uiLog2TilingStrideFactor;
+} DEVMEM_HEAP_BLUEPRINT;
+
+/* entire named heap config */
+typedef struct _DEVMEM_HEAP_CONFIG_
+{
+    /* Name of this heap config - for debug and maybe lookup */
+    const IMG_CHAR *pszName;
+
+    /* Number of heaps in this config */
+    IMG_UINT32 uiNumHeaps;
+
+    /* Array of individual heap blueprints as defined above */
+    DEVMEM_HEAP_BLUEPRINT *psHeapBlueprintArray;
+} DEVMEM_HEAP_CONFIG;
+
+
+extern PVRSRV_ERROR
+HeapCfgHeapConfigCount(struct _CONNECTION_DATA_ * psConnection,
+    const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+    IMG_UINT32 *puiNumHeapConfigsOut
+);
+
+extern PVRSRV_ERROR
+HeapCfgHeapCount(struct _CONNECTION_DATA_ * psConnection,
+    const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+    IMG_UINT32 uiHeapConfigIndex,
+    IMG_UINT32 *puiNumHeapsOut
+);
+
+extern PVRSRV_ERROR
+HeapCfgHeapConfigName(struct _CONNECTION_DATA_ * psConnection,
+    const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+    IMG_UINT32 uiHeapConfigIndex,
+    IMG_UINT32 uiHeapConfigNameBufSz,
+    IMG_CHAR *pszHeapConfigNameOut
+);
+
+extern PVRSRV_ERROR
+HeapCfgHeapDetails(struct _CONNECTION_DATA_ * psConnection,
+    const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+    IMG_UINT32 uiHeapConfigIndex,
+    IMG_UINT32 uiHeapIndex,
+    IMG_UINT32 uiHeapNameBufSz,
+    IMG_CHAR *pszHeapNameOut,
+    IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+    IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+    IMG_UINT32 *puiLog2DataPageSizeOut,
+    IMG_UINT32 *puiLog2ImportAlignmentOut,
+    IMG_UINT32 *puiLog2TilingStrideFactorOut
+);
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_history_server.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_history_server.c
new file mode 100644
index 0000000..cb2c249
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_history_server.c
@@ -0,0 +1,1908 @@
+/*************************************************************************/ /*!
+@File
+@Title          Devicemem history functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Devicemem history functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "allocmem.h"
+#include "img_defs.h"
+#include "pmr.h"
+#include "pvrsrv.h"
+#include "pvrsrv_device.h"
+#include "pvr_debug.h"
+#include "devicemem_server.h"
+#include "lock.h"
+#include "devicemem_history_server.h"
+#include "pdump_km.h"
+
+#define ALLOCATION_LIST_NUM_ENTRIES 10000
+
+/* data type to hold an allocation index.
+ * we make it 16 bits wide if possible
+ */
+#if ALLOCATION_LIST_NUM_ENTRIES <= 0xFFFF
+typedef uint16_t ALLOC_INDEX_T;
+#else
+typedef uint32_t ALLOC_INDEX_T;
+#endif
+
+/* a record describing a single allocation known to DeviceMemHistory.
+ * this is an element in a doubly linked list of allocations
+ */
+typedef struct _RECORD_ALLOCATION_
+{
+	/* time when this RECORD_ALLOCATION was created/initialised */
+	IMG_UINT64 ui64CreationTime;
+	/* serial number of the PMR relating to this allocation */
+	IMG_UINT64 ui64Serial;
+	/* base DevVAddr of this allocation */
+	IMG_DEV_VIRTADDR sDevVAddr;
+	/* size in bytes of this allocation */
+	IMG_DEVMEM_SIZE_T uiSize;
+	/* Log2 page size of this allocation's GPU pages */
+	IMG_UINT32 ui32Log2PageSize;
+	/* Process ID (PID) this allocation belongs to */
+	IMG_PID uiPID;
+	/* index of previous allocation in the list */
+	ALLOC_INDEX_T ui32Prev;
+	/* index of next allocation in the list */
+	ALLOC_INDEX_T ui32Next;
+	/* annotation/name of this allocation */
+	IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN];
+} RECORD_ALLOCATION;
+
+/* each command in the circular buffer is prefixed with an 8-bit value
+ * denoting the command type
+ */
+typedef enum _COMMAND_TYPE_
+{
+	COMMAND_TYPE_NONE,
+	COMMAND_TYPE_TIMESTAMP,
+	COMMAND_TYPE_MAP_ALL,
+	COMMAND_TYPE_UNMAP_ALL,
+	COMMAND_TYPE_MAP_RANGE,
+	COMMAND_TYPE_UNMAP_RANGE,
+	/* sentinel value */
+	COMMAND_TYPE_COUNT,
+} COMMAND_TYPE;
+
+/* Timestamp command:
+ * This command is inserted into the circular buffer to provide an updated
+ * timestamp.
+ * The nanosecond-accuracy timestamp is packed into a 56-bit integer, in order
+ * for the whole command to fit into 8 bytes.
+ */
+typedef struct _COMMAND_TIMESTAMP_
+{
+	IMG_UINT8 aui8TimeNs[7];
+} COMMAND_TIMESTAMP;
+
+/* MAP_ALL command:
+ * This command denotes the allocation at the given index was wholly mapped
+ * in to the GPU MMU
+ */
+typedef struct _COMMAND_MAP_ALL_
+{
+	ALLOC_INDEX_T uiAllocIndex;
+} COMMAND_MAP_ALL;
+
+/* UNMAP_ALL command:
+ * This command denotes the allocation at the given index was wholly unmapped
+ * from the GPU MMU
+ * Note: COMMAND_MAP_ALL and COMMAND_UNMAP_ALL commands have the same layout.
+ */
+typedef COMMAND_MAP_ALL COMMAND_UNMAP_ALL;
+
+/* packing attributes for the MAP_RANGE command */
+#define MAP_RANGE_MAX_START ((1 << 18) - 1)
+#define MAP_RANGE_MAX_RANGE ((1 << 12) - 1)
+
+/* MAP_RANGE command:
+ * Denotes a range of pages within the given allocation being mapped.
+ * The range is expressed as [Page Index] + [Page Count]
+ * This information is packed into a 40-bit integer, in order to make
+ * the command size 8 bytes.
+ */
+
+typedef struct _COMMAND_MAP_RANGE_
+{
+	IMG_UINT8 aui8Data[5];
+	ALLOC_INDEX_T uiAllocIndex;
+} COMMAND_MAP_RANGE;
+
+/* UNMAP_RANGE command:
+ * Denotes a range of pages within the given allocation being mapped.
+ * The range is expressed as [Page Index] + [Page Count]
+ * This information is packed into a 40-bit integer, in order to make
+ * the command size 8 bytes.
+ * Note: COMMAND_MAP_RANGE and COMMAND_UNMAP_RANGE commands have the same layout.
+ */
+typedef COMMAND_MAP_RANGE COMMAND_UNMAP_RANGE;
+
+/* wrapper structure for a command */
+typedef struct _COMMAND_WRAPPER_
+{
+	IMG_UINT8 ui8Type;
+	union {
+		COMMAND_TIMESTAMP sTimeStamp;
+		COMMAND_MAP_ALL sMapAll;
+		COMMAND_UNMAP_ALL sUnmapAll;
+		COMMAND_MAP_RANGE sMapRange;
+		COMMAND_UNMAP_RANGE sUnmapRange;
+	} u;
+} COMMAND_WRAPPER;
+
+/* target size for the circular buffer of commands */
+#define CIRCULAR_BUFFER_SIZE_KB 2048
+/* turn the circular buffer target size into a number of commands */
+#define CIRCULAR_BUFFER_NUM_COMMANDS ((CIRCULAR_BUFFER_SIZE_KB * 1024) / sizeof(COMMAND_WRAPPER))
+
+/* index value denoting the end of a list */
+#define END_OF_LIST 0xFFFFFFFF
+#define ALLOC_INDEX_TO_PTR(idx) (&(gsDevicememHistoryData.sRecords.pasAllocations[idx]))
+#define CHECK_ALLOC_INDEX(idx) (idx < ALLOCATION_LIST_NUM_ENTRIES)
+
+/* wrapper structure for the allocation records and the commands circular buffer */
+typedef struct _RECORDS_
+{
+	RECORD_ALLOCATION *pasAllocations;
+	IMG_UINT32 ui32AllocationsListHead;
+
+	IMG_UINT32 ui32Head;
+	IMG_UINT32 ui32Tail;
+	COMMAND_WRAPPER *pasCircularBuffer;
+} RECORDS;
+
+typedef struct _DEVICEMEM_HISTORY_DATA_
+{
+	/* debugfs entry */
+	void *pvStatsEntry;
+
+	RECORDS sRecords;
+	POS_LOCK hLock;
+} DEVICEMEM_HISTORY_DATA;
+
+static DEVICEMEM_HISTORY_DATA gsDevicememHistoryData;
+
+static void DevicememHistoryLock(void)
+{
+	OSLockAcquire(gsDevicememHistoryData.hLock);
+}
+
+static void DevicememHistoryUnlock(void)
+{
+	OSLockRelease(gsDevicememHistoryData.hLock);
+}
+
+/* given a time stamp, calculate the age in nanoseconds */
+static IMG_UINT64 _CalculateAge(IMG_UINT64 ui64Now,
+						IMG_UINT64 ui64Then,
+						IMG_UINT64 ui64Max)
+{
+	if (ui64Now >= ui64Then)
+	{
+		/* no clock wrap */
+		return ui64Now - ui64Then;
+	}
+	else
+	{
+		/* clock has wrapped */
+		return (ui64Max - ui64Then) + ui64Now + 1;
+	}
+}
+
+/* AcquireCBSlot:
+ * Acquire the next slot in the circular buffer and
+ * move the circular buffer head along by one
+ * Returns a pointer to the acquired slot.
+ */
+static COMMAND_WRAPPER *AcquireCBSlot(void)
+{
+	COMMAND_WRAPPER *psSlot;
+
+	psSlot = &gsDevicememHistoryData.sRecords.pasCircularBuffer[gsDevicememHistoryData.sRecords.ui32Head];
+
+	gsDevicememHistoryData.sRecords.ui32Head =
+		(gsDevicememHistoryData.sRecords.ui32Head + 1)
+				% CIRCULAR_BUFFER_NUM_COMMANDS;
+
+	return psSlot;
+}
+
+/* TimeStampPack:
+ * Packs the given timestamp value into the COMMAND_TIMESTAMP structure.
+ * This takes a 64-bit nanosecond timestamp and packs it in to a 56-bit
+ * integer in the COMMAND_TIMESTAMP command.
+ */
+static void TimeStampPack(COMMAND_TIMESTAMP *psTimeStamp, IMG_UINT64 ui64Now)
+{
+	IMG_UINT32 i;
+
+	for (i = 0; i < ARRAY_SIZE(psTimeStamp->aui8TimeNs); i++)
+	{
+		psTimeStamp->aui8TimeNs[i] = ui64Now & 0xFF;
+		ui64Now >>= 8;
+	}
+}
+
+/* packing a 64-bit nanosecond into a 7-byte integer loses the
+ * top 8 bits of data. This must be taken into account when
+ * comparing a full timestamp against an unpacked timestamp
+ */
+#define TIME_STAMP_MASK ((1LLU << 56) - 1)
+#define DO_TIME_STAMP_MASK(ns64) (ns64 & TIME_STAMP_MASK)
+
+/* TimeStampUnpack:
+ * Unpack the timestamp value from the given COMMAND_TIMESTAMP command
+ */
+static IMG_UINT64 TimeStampUnpack(COMMAND_TIMESTAMP *psTimeStamp)
+{
+	IMG_UINT64 ui64TimeNs = 0;
+	IMG_UINT32 i;
+
+	for (i = ARRAY_SIZE(psTimeStamp->aui8TimeNs); i > 0; i--)
+	{
+		ui64TimeNs <<= 8;
+		ui64TimeNs |= (IMG_UINT64) psTimeStamp->aui8TimeNs[i - 1];
+	}
+
+	return ui64TimeNs;
+}
+
+#if defined(PDUMP)
+
+static void EmitPDumpAllocation(IMG_UINT32 ui32AllocationIndex,
+					RECORD_ALLOCATION *psAlloc)
+{
+	PDUMPCOMMENT("[SrvPFD] Allocation: %u"
+			" Addr: " IMG_DEV_VIRTADDR_FMTSPEC
+			" Size: " IMG_DEVMEM_SIZE_FMTSPEC
+			" Page size: %u"
+			" PID: %u"
+			" Process: %s"
+			" Name: %s",
+			ui32AllocationIndex,
+			psAlloc->sDevVAddr.uiAddr,
+			psAlloc->uiSize,
+			1U << psAlloc->ui32Log2PageSize,
+			psAlloc->uiPID,
+			OSGetCurrentClientProcessNameKM(),
+			psAlloc->szName);
+}
+
+static void EmitPDumpMapUnmapAll(COMMAND_TYPE eType,
+					IMG_UINT32 ui32AllocationIndex)
+{
+	const IMG_CHAR *pszOpName;
+
+	switch (eType)
+	{
+		case COMMAND_TYPE_MAP_ALL:
+			pszOpName = "MAP_ALL";
+			break;
+		case COMMAND_TYPE_UNMAP_ALL:
+			pszOpName = "UNMAP_ALL";
+			break;
+		default:
+			PVR_DPF((PVR_DBG_ERROR, "EmitPDumpMapUnmapAll: Invalid type: %u",
+										eType));
+			return;
+
+	}
+
+	PDUMPCOMMENT("[SrvPFD] Op: %s Allocation: %u",
+								pszOpName,
+								ui32AllocationIndex);
+}
+
+static void EmitPDumpMapUnmapRange(COMMAND_TYPE eType,
+					IMG_UINT32 ui32AllocationIndex,
+					IMG_UINT32 ui32StartPage,
+					IMG_UINT32 ui32Count)
+{
+	const IMG_CHAR *pszOpName;
+
+	switch (eType)
+	{
+		case COMMAND_TYPE_MAP_RANGE:
+			pszOpName = "MAP_RANGE";
+			break;
+		case COMMAND_TYPE_UNMAP_RANGE:
+			pszOpName = "UNMAP_RANGE";
+			break;
+		default:
+			PVR_DPF((PVR_DBG_ERROR, "EmitPDumpMapUnmapRange: Invalid type: %u",
+										eType));
+			return;
+	}
+
+	PDUMPCOMMENT("[SrvPFD] Op: %s Allocation: %u Start Page: %u Count: %u",
+									pszOpName,
+									ui32AllocationIndex,
+									ui32StartPage,
+									ui32Count);
+}
+
+#endif
+
+/* InsertTimeStampCommand:
+ * Insert a timestamp command into the circular buffer.
+ */
+static void InsertTimeStampCommand(IMG_UINT64 ui64Now)
+{
+	COMMAND_WRAPPER *psCommand;
+
+	psCommand = AcquireCBSlot();
+
+	psCommand->ui8Type = COMMAND_TYPE_TIMESTAMP;
+
+	TimeStampPack(&psCommand->u.sTimeStamp, ui64Now);
+}
+
+/* InsertMapAllCommand:
+ * Insert a "MAP_ALL" command for the given allocation into the circular buffer
+ */
+static void InsertMapAllCommand(IMG_UINT32 ui32AllocIndex)
+{
+	COMMAND_WRAPPER *psCommand;
+
+	psCommand = AcquireCBSlot();
+
+	psCommand->ui8Type = COMMAND_TYPE_MAP_ALL;
+	psCommand->u.sMapAll.uiAllocIndex = ui32AllocIndex;
+
+#if defined(PDUMP)
+	EmitPDumpMapUnmapAll(COMMAND_TYPE_MAP_ALL, ui32AllocIndex);
+#endif
+}
+
+/* InsertUnmapAllCommand:
+ * Insert a "UNMAP_ALL" command for the given allocation into the circular buffer
+ */
+static void InsertUnmapAllCommand(IMG_UINT32 ui32AllocIndex)
+{
+	COMMAND_WRAPPER *psCommand;
+
+	psCommand = AcquireCBSlot();
+
+	psCommand->ui8Type = COMMAND_TYPE_UNMAP_ALL;
+	psCommand->u.sUnmapAll.uiAllocIndex = ui32AllocIndex;
+
+#if defined(PDUMP)
+	EmitPDumpMapUnmapAll(COMMAND_TYPE_UNMAP_ALL, ui32AllocIndex);
+#endif
+}
+
+/* MapRangePack:
+ * Pack the given StartPage and Count values into the 40-bit representation
+ * in the MAP_RANGE command.
+ */
+static void MapRangePack(COMMAND_MAP_RANGE *psMapRange,
+						IMG_UINT32 ui32StartPage,
+						IMG_UINT32 ui32Count)
+{
+	IMG_UINT64 ui64Data;
+	IMG_UINT32 i;
+
+	/* we must encode the data into 40 bits:
+	 *   18 bits for the start page index
+	 *   12 bits for the range
+	*/
+
+	PVR_ASSERT(ui32StartPage <= MAP_RANGE_MAX_START);
+	PVR_ASSERT(ui32Count <= MAP_RANGE_MAX_RANGE);
+
+	ui64Data = (((IMG_UINT64) ui32StartPage) << 12) | ui32Count;
+
+	for (i = 0; i < ARRAY_SIZE(psMapRange->aui8Data); i++)
+	{
+		psMapRange->aui8Data[i] = ui64Data & 0xFF;
+		ui64Data >>= 8;
+	}
+}
+
+/* MapRangePack:
+ * Unpack the StartPage and Count values from the 40-bit representation
+ * in the MAP_RANGE command.
+ */
+static void MapRangeUnpack(COMMAND_MAP_RANGE *psMapRange,
+						IMG_UINT32 *pui32StartPage,
+						IMG_UINT32 *pui32Count)
+{
+	IMG_UINT64 ui64Data = 0;
+	IMG_UINT32 i;
+
+	for (i = ARRAY_SIZE(psMapRange->aui8Data); i > 0; i--)
+	{
+		ui64Data <<= 8;
+		ui64Data |= (IMG_UINT64) psMapRange->aui8Data[i - 1];
+	}
+
+	*pui32StartPage = (ui64Data >> 12);
+	*pui32Count = ui64Data & ((1 << 12) - 1);
+}
+
+/* InsertMapRangeCommand:
+ * Insert a MAP_RANGE command into the circular buffer with the given
+ * StartPage and Count values.
+ */
+static void InsertMapRangeCommand(IMG_UINT32 ui32AllocIndex,
+						IMG_UINT32 ui32StartPage,
+						IMG_UINT32 ui32Count)
+{
+	COMMAND_WRAPPER *psCommand;
+
+	psCommand = AcquireCBSlot();
+
+	psCommand->ui8Type = COMMAND_TYPE_MAP_RANGE;
+	psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex;
+
+	MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count);
+
+#if defined(PDUMP)
+	EmitPDumpMapUnmapRange(COMMAND_TYPE_MAP_RANGE,
+							ui32AllocIndex,
+							ui32StartPage,
+							ui32Count);
+#endif
+}
+
+/* InsertUnmapRangeCommand:
+ * Insert a UNMAP_RANGE command into the circular buffer with the given
+ * StartPage and Count values.
+ */
+static void InsertUnmapRangeCommand(IMG_UINT32 ui32AllocIndex,
+						IMG_UINT32 ui32StartPage,
+						IMG_UINT32 ui32Count)
+{
+	COMMAND_WRAPPER *psCommand;
+
+	psCommand = AcquireCBSlot();
+
+	psCommand->ui8Type = COMMAND_TYPE_UNMAP_RANGE;
+	psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex;
+
+	MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count);
+
+#if defined(PDUMP)
+	EmitPDumpMapUnmapRange(COMMAND_TYPE_UNMAP_RANGE,
+							ui32AllocIndex,
+							ui32StartPage,
+							ui32Count);
+#endif
+}
+
+/* InsertAllocationToList:
+ * Helper function for the allocation list.
+ * Inserts the given allocation at the head of the list, whose current head is
+ * pointed to by pui32ListHead
+ */
+static void InsertAllocationToList(IMG_UINT32 *pui32ListHead, IMG_UINT32 ui32Alloc)
+{
+	RECORD_ALLOCATION *psAlloc;
+
+	psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+	if (*pui32ListHead == END_OF_LIST)
+	{
+		/* list is currently empty, so just replace it */
+		*pui32ListHead = ui32Alloc;
+		psAlloc->ui32Next = psAlloc->ui32Prev = *pui32ListHead;
+	}
+	else
+	{
+		RECORD_ALLOCATION *psHeadAlloc;
+		RECORD_ALLOCATION *psTailAlloc;
+
+		psHeadAlloc = ALLOC_INDEX_TO_PTR(*pui32ListHead);
+		psTailAlloc = ALLOC_INDEX_TO_PTR(psHeadAlloc->ui32Prev);
+
+		/* make the new alloc point forwards to the previous head */
+		psAlloc->ui32Next = *pui32ListHead;
+		/* make the new alloc point backwards to the previous tail */
+		psAlloc->ui32Prev = psHeadAlloc->ui32Prev;
+
+		/* the head is now our new alloc */
+		*pui32ListHead = ui32Alloc;
+
+		/* the old head now points back to the new head */
+		psHeadAlloc->ui32Prev = *pui32ListHead;
+
+		/* the tail now points forward to the new head */
+		psTailAlloc->ui32Next = ui32Alloc;
+	}
+}
+
+static void InsertAllocationToBusyList(IMG_UINT32 ui32Alloc)
+{
+	InsertAllocationToList(&gsDevicememHistoryData.sRecords.ui32AllocationsListHead, ui32Alloc);
+}
+
+/* RemoveAllocationFromList:
+ * Helper function for the allocation list.
+ * Removes the given allocation from the list, whose head is
+ * pointed to by pui32ListHead
+ */
+static void RemoveAllocationFromList(IMG_UINT32 *pui32ListHead, IMG_UINT32 ui32Alloc)
+{
+	RECORD_ALLOCATION *psAlloc;
+
+	psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+	/* if this is the only element in the list then just make the list empty */
+	if ((*pui32ListHead == ui32Alloc) && (psAlloc->ui32Next == ui32Alloc))
+	{
+		*pui32ListHead = END_OF_LIST;
+	}
+	else
+	{
+		RECORD_ALLOCATION *psPrev, *psNext;
+
+		psPrev = ALLOC_INDEX_TO_PTR(psAlloc->ui32Prev);
+		psNext = ALLOC_INDEX_TO_PTR(psAlloc->ui32Next);
+
+		/* remove the allocation from the list */
+		psPrev->ui32Next = psAlloc->ui32Next;
+		psNext->ui32Prev = psAlloc->ui32Prev;
+
+		/* if this allocation is the head then update the head */
+		if (*pui32ListHead == ui32Alloc)
+		{
+			*pui32ListHead = psAlloc->ui32Prev;
+		}
+	}
+}
+
+static void RemoveAllocationFromBusyList(IMG_UINT32 ui32Alloc)
+{
+	RemoveAllocationFromList(&gsDevicememHistoryData.sRecords.ui32AllocationsListHead, ui32Alloc);
+}
+
+/* TouchBusyAllocation:
+ * Move the given allocation to the head of the list
+ */
+static void TouchBusyAllocation(IMG_UINT32 ui32Alloc)
+{
+	RemoveAllocationFromBusyList(ui32Alloc);
+	InsertAllocationToBusyList(ui32Alloc);
+}
+
+static INLINE IMG_BOOL IsAllocationListEmpty(IMG_UINT32 ui32ListHead)
+{
+	return ui32ListHead == END_OF_LIST;
+}
+
+/* GetOldestBusyAllocation:
+ * Returns the index of the oldest allocation in the MRU list
+ */
+static IMG_UINT32 GetOldestBusyAllocation(void)
+{
+	IMG_UINT32 ui32Alloc;
+	RECORD_ALLOCATION *psAlloc;
+
+	ui32Alloc = gsDevicememHistoryData.sRecords.ui32AllocationsListHead;
+
+	if (ui32Alloc == END_OF_LIST)
+	{
+		return END_OF_LIST;
+	}
+
+	psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+	return psAlloc->ui32Prev;
+}
+
+static IMG_UINT32 GetFreeAllocation(void)
+{
+	IMG_UINT32 ui32Alloc;
+
+	ui32Alloc = GetOldestBusyAllocation();
+
+	return ui32Alloc;
+}
+
+
+/* InitialiseAllocation:
+ * Initialise the given allocation structure with the given properties
+ */
+static void InitialiseAllocation(RECORD_ALLOCATION *psAlloc,
+							const IMG_CHAR *pszName,
+							IMG_UINT64 ui64Serial,
+							IMG_PID uiPID,
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEVMEM_SIZE_T uiSize,
+							IMG_UINT32 ui32Log2PageSize)
+{
+	OSStringLCopy(psAlloc->szName, pszName, sizeof(psAlloc->szName));
+	psAlloc->ui64Serial = ui64Serial;
+	psAlloc->uiPID = uiPID;
+	psAlloc->sDevVAddr = sDevVAddr;
+	psAlloc->uiSize = uiSize;
+	psAlloc->ui32Log2PageSize = ui32Log2PageSize;
+	psAlloc->ui64CreationTime = OSClockns64();
+}
+
+/* CreateAllocation:
+ * Creates a new allocation with the given properties then outputs the
+ * index of the allocation
+ */
+static PVRSRV_ERROR CreateAllocation(const IMG_CHAR *pszName,
+							IMG_UINT64 ui64Serial,
+							IMG_PID uiPID,
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEVMEM_SIZE_T uiSize,
+							IMG_UINT32 ui32Log2PageSize,
+							IMG_BOOL bAutoPurge,
+							IMG_UINT32 *puiAllocationIndex)
+{
+	IMG_UINT32 ui32Alloc;
+	RECORD_ALLOCATION *psAlloc;
+
+	ui32Alloc = GetFreeAllocation();
+
+	psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+	InitialiseAllocation(ALLOC_INDEX_TO_PTR(ui32Alloc),
+						pszName,
+						ui64Serial,
+						uiPID,
+						sDevVAddr,
+						uiSize,
+						ui32Log2PageSize);
+
+	/* put the newly initialised allocation at the front of the MRU list */
+	TouchBusyAllocation(ui32Alloc);
+
+	*puiAllocationIndex = ui32Alloc;
+
+#if defined(PDUMP)
+	EmitPDumpAllocation(ui32Alloc, psAlloc);
+#endif
+
+	return PVRSRV_OK;
+}
+
+/* MatchAllocation:
+ * Tests if the allocation at the given index matches the supplied properties.
+ * Returns IMG_TRUE if it is a match, otherwise IMG_FALSE.
+ */
+static IMG_BOOL MatchAllocation(IMG_UINT32 ui32AllocationIndex,
+						IMG_UINT64 ui64Serial,
+						IMG_DEV_VIRTADDR sDevVAddr,
+						IMG_DEVMEM_SIZE_T uiSize,
+						const IMG_CHAR *pszName,
+						IMG_UINT32 ui32Log2PageSize,
+						IMG_PID uiPID)
+{
+	RECORD_ALLOCATION *psAlloc;
+
+	psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocationIndex);
+
+	return 	(psAlloc->ui64Serial == ui64Serial) &&
+			(psAlloc->sDevVAddr.uiAddr == sDevVAddr.uiAddr) &&
+			(psAlloc->uiSize == uiSize) &&
+			(psAlloc->ui32Log2PageSize == ui32Log2PageSize) &&
+			(OSStringCompare(psAlloc->szName, pszName) == 0);
+}
+
+/* FindOrCreateAllocation:
+ * Convenience function.
+ * Given a set of allocation properties (serial, DevVAddr, size, name, etc),
+ * this function will look for an existing record of this allocation and
+ * create the allocation if there is no existing record
+ */
+static PVRSRV_ERROR FindOrCreateAllocation(IMG_UINT32 ui32AllocationIndexHint,
+							IMG_UINT64 ui64Serial,
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEVMEM_SIZE_T uiSize,
+							const char *pszName,
+							IMG_UINT32 ui32Log2PageSize,
+							IMG_PID uiPID,
+							IMG_BOOL bSparse,
+							IMG_UINT32 *pui32AllocationIndexOut,
+							IMG_BOOL *pbCreated)
+{
+	IMG_UINT32 ui32AllocationIndex;
+	PVRSRV_ERROR eError;
+
+	if (ui32AllocationIndexHint != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE)
+	{
+		IMG_BOOL bHaveAllocation;
+
+		/* first, try to match against the index given by the client.
+		 * if the caller provided a hint but the allocation record is no longer
+		 * there, it must have been purged, so go ahead and create a new allocation
+		 */
+		bHaveAllocation = MatchAllocation(ui32AllocationIndexHint,
+								ui64Serial,
+								sDevVAddr,
+								uiSize,
+								pszName,
+								ui32Log2PageSize,
+								uiPID);
+		if (bHaveAllocation)
+		{
+			*pbCreated = IMG_FALSE;
+			*pui32AllocationIndexOut = ui32AllocationIndexHint;
+			return PVRSRV_OK;
+		}
+	}
+
+	/* if there is no record of the allocation then we
+	 * create it now
+	 */
+	eError = CreateAllocation(pszName,
+					ui64Serial,
+					uiPID,
+					sDevVAddr,
+					uiSize,
+					ui32Log2PageSize,
+					IMG_TRUE,
+					&ui32AllocationIndex);
+
+	if (eError == PVRSRV_OK)
+	{
+		*pui32AllocationIndexOut = ui32AllocationIndex;
+		*pbCreated = IMG_TRUE;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			"%s: Failed to create record for allocation %s",
+								__func__,
+								pszName));
+	}
+
+	return eError;
+}
+
+/* GenerateMapUnmapCommandsForSparsePMR:
+ * Generate the MAP_RANGE or UNMAP_RANGE commands for the sparse PMR, using the PMR's
+ * current mapping table
+ *
+ * PMR: The PMR whose mapping table to read.
+ * ui32AllocIndex: The allocation to attribute the MAP_RANGE/UNMAP range commands to.
+ * bMap: Set to TRUE for mapping or IMG_FALSE for unmapping
+ *
+ * This function goes through every page in the PMR's mapping table and looks for
+ * virtually contiguous ranges to record as being mapped or unmapped.
+ */
+static void GenerateMapUnmapCommandsForSparsePMR(PMR *psPMR,
+							IMG_UINT32 ui32AllocIndex,
+							IMG_BOOL bMap)
+{
+	PMR_MAPPING_TABLE *psMappingTable;
+	IMG_UINT32 ui32DonePages = 0;
+	IMG_UINT32 ui32NumPages;
+	IMG_UINT32 i;
+	IMG_BOOL bInARun = IMG_FALSE;
+	IMG_UINT32 ui32CurrentStart = 0;
+	IMG_UINT32 ui32RunCount = 0;
+
+	psMappingTable = PMR_GetMappigTable(psPMR);
+	ui32NumPages = psMappingTable->ui32NumPhysChunks;
+
+	if (ui32NumPages == 0)
+	{
+		/* nothing to do */
+		return;
+	}
+
+	for (i = 0; i < psMappingTable->ui32NumVirtChunks; i++)
+	{
+		if (psMappingTable->aui32Translation[i] != TRANSLATION_INVALID)
+		{
+			if (!bInARun)
+			{
+				bInARun = IMG_TRUE;
+				ui32CurrentStart = i;
+				ui32RunCount = 1;
+			}
+			else
+			{
+				ui32RunCount++;
+			}
+		}
+
+		if (bInARun)
+		{
+			/* test if we need to end this current run and generate the command,
+			 * either because the next page is not virtually contiguous
+			 * to the current page, we have reached the maximum range,
+			 * or this is the last page in the mapping table
+			 */
+			if ((psMappingTable->aui32Translation[i] == TRANSLATION_INVALID) ||
+				(ui32RunCount == MAP_RANGE_MAX_RANGE) ||
+				(i == (psMappingTable->ui32NumVirtChunks - 1)))
+			{
+				if (bMap)
+				{
+					InsertMapRangeCommand(ui32AllocIndex,
+										ui32CurrentStart,
+										ui32RunCount);
+				}
+				else
+				{
+					InsertUnmapRangeCommand(ui32AllocIndex,
+										ui32CurrentStart,
+										ui32RunCount);
+				}
+
+				ui32DonePages += ui32RunCount;
+
+				if (ui32DonePages == ui32NumPages)
+				{
+					 break;
+				}
+
+				bInARun = IMG_FALSE;
+			}
+		}
+	}
+
+}
+
+/* GenerateMapUnmapCommandsForChangeList:
+ * Generate the MAP_RANGE or UNMAP_RANGE commands for the sparse PMR, using the
+ * list of page change (page map or page unmap) indices given.
+ *
+ * ui32NumPages: Number of pages which have changed.
+ * pui32PageList: List of indices of the pages which have changed.
+ * ui32AllocIndex: The allocation to attribute the MAP_RANGE/UNMAP range commands to.
+ * bMap: Set to TRUE for mapping or IMG_FALSE for unmapping
+ *
+ * This function goes through every page in the list and looks for
+ * virtually contiguous ranges to record as being mapped or unmapped.
+ */
+static void GenerateMapUnmapCommandsForChangeList(IMG_UINT32 ui32NumPages,
+							IMG_UINT32 *pui32PageList,
+							IMG_UINT32 ui32AllocIndex,
+							IMG_BOOL bMap)
+{
+	IMG_UINT32 i;
+	IMG_BOOL bInARun = IMG_FALSE;
+	IMG_UINT32 ui32CurrentStart = 0;
+	IMG_UINT32 ui32RunCount = 0;
+
+	for (i = 0; i < ui32NumPages; i++)
+	{
+		if (!bInARun)
+		{
+			bInARun = IMG_TRUE;
+			ui32CurrentStart = pui32PageList[i];
+		}
+
+		ui32RunCount++;
+
+		 /* we flush if:
+		 * - the next page in the list is not one greater than the current page
+		 * - this is the last page in the list
+		 * - we have reached the maximum range size
+		 */
+		if ((i == (ui32NumPages - 1)) ||
+			((pui32PageList[i] + 1) != pui32PageList[i + 1]) ||
+			(ui32RunCount == MAP_RANGE_MAX_RANGE))
+		{
+			if (bMap)
+			{
+				InsertMapRangeCommand(ui32AllocIndex,
+									ui32CurrentStart,
+									ui32RunCount);
+			}
+			else
+			{
+				InsertUnmapRangeCommand(ui32AllocIndex,
+									ui32CurrentStart,
+									ui32RunCount);
+			}
+
+			bInARun = IMG_FALSE;
+			ui32RunCount = 0;
+		}
+	}
+}
+
+/* DevicememHistoryMapKM:
+ * Entry point for when an allocation is mapped into the MMU GPU
+ *
+ * psPMR: The PMR to which the allocation belongs.
+ * ui32Offset: The offset within the PMR at which the allocation begins.
+ * sDevVAddr: The DevVAddr at which the allocation begins.
+ * szName: Annotation/name for the allocation.
+ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form.
+ * ui32AllocationIndex: Allocation index as provided by the client.
+ *                      We will use this as a short-cut to find the allocation
+ *                      in our records.
+ * pui32AllocationIndexOut: An updated allocation index for the client.
+ *                          This may be a new value if we just created the
+ *                          allocation record.
+ */
+PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR,
+							IMG_UINT32 ui32Offset,
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEVMEM_SIZE_T uiSize,
+							const char szName[DEVMEM_ANNOTATION_MAX_LEN],
+							IMG_UINT32 ui32Log2PageSize,
+							IMG_UINT32 ui32AllocationIndex,
+							IMG_UINT32 *pui32AllocationIndexOut)
+{
+	IMG_BOOL bSparse = PMR_IsSparse(psPMR);
+	IMG_UINT64 ui64Serial;
+	IMG_PID uiPID = OSGetCurrentClientProcessIDKM();
+	PVRSRV_ERROR eError;
+	IMG_BOOL bCreated;
+
+	if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+		!CHECK_ALLOC_INDEX(ui32AllocationIndex))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+								__func__,
+								ui32AllocationIndex));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	PMRGetUID(psPMR, &ui64Serial);
+
+	DevicememHistoryLock();
+
+	eError = FindOrCreateAllocation(ui32AllocationIndex,
+						ui64Serial,
+						sDevVAddr,
+						uiSize,
+						szName,
+						ui32Log2PageSize,
+						uiPID,
+						bSparse,
+						&ui32AllocationIndex,
+						&bCreated);
+
+	if ((eError == PVRSRV_OK) && !bCreated)
+	{
+		/* touch the allocation so it goes to the head of our MRU list */
+		TouchBusyAllocation(ui32AllocationIndex);
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+									__func__,
+									szName,
+									PVRSRVGETERRORSTRING(eError)));
+		goto out_unlock;
+	}
+
+	if (!bSparse)
+	{
+		InsertMapAllCommand(ui32AllocationIndex);
+	}
+	else
+	{
+		GenerateMapUnmapCommandsForSparsePMR(psPMR,
+								ui32AllocationIndex,
+								IMG_TRUE);
+	}
+
+	InsertTimeStampCommand(OSClockns64());
+
+	*pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+	DevicememHistoryUnlock();
+
+	return eError;
+}
+
+static void VRangeInsertMapUnmapCommands(IMG_BOOL bMap,
+							IMG_UINT32 ui32AllocationIndex,
+							IMG_DEV_VIRTADDR sBaseDevVAddr,
+							IMG_UINT32 ui32StartPage,
+							IMG_UINT32 ui32NumPages,
+							const IMG_CHAR *pszName)
+{
+	while (ui32NumPages > 0)
+	{
+		IMG_UINT32 ui32PagesToAdd;
+
+		ui32PagesToAdd = MIN(ui32NumPages, MAP_RANGE_MAX_RANGE);
+
+		if (ui32StartPage > MAP_RANGE_MAX_START)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "Cannot record %s range beginning at page "
+									"%u on allocation %s",
+									bMap ? "map" : "unmap",
+									ui32StartPage,
+									pszName));
+			return;
+		}
+
+		if (bMap)
+		{
+			InsertMapRangeCommand(ui32AllocationIndex,
+								ui32StartPage,
+								ui32PagesToAdd);
+		}
+		else
+		{
+			InsertUnmapRangeCommand(ui32AllocationIndex,
+								ui32StartPage,
+								ui32PagesToAdd);
+		}
+
+		ui32StartPage += ui32PagesToAdd;
+		ui32NumPages -= ui32PagesToAdd;
+	}
+}
+
+PVRSRV_ERROR DevicememHistoryMapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr,
+						IMG_UINT32 ui32StartPage,
+						IMG_UINT32 ui32NumPages,
+						IMG_DEVMEM_SIZE_T uiAllocSize,
+						const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN],
+						IMG_UINT32 ui32Log2PageSize,
+						IMG_UINT32 ui32AllocationIndex,
+						IMG_UINT32 *pui32AllocationIndexOut)
+{
+	IMG_PID uiPID = OSGetCurrentClientProcessIDKM();
+	PVRSRV_ERROR eError;
+	IMG_BOOL bCreated;
+
+	if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+		!CHECK_ALLOC_INDEX(ui32AllocationIndex))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+								__func__,
+							ui32AllocationIndex));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	DevicememHistoryLock();
+
+	eError = FindOrCreateAllocation(ui32AllocationIndex,
+						0,
+						sBaseDevVAddr,
+						uiAllocSize,
+						szName,
+						ui32Log2PageSize,
+						uiPID,
+						IMG_FALSE,
+						&ui32AllocationIndex,
+						&bCreated);
+
+	if ((eError == PVRSRV_OK) && !bCreated)
+	{
+		/* touch the allocation so it goes to the head of our MRU list */
+		TouchBusyAllocation(ui32AllocationIndex);
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+									__func__,
+									szName,
+									PVRSRVGETERRORSTRING(eError)));
+		goto out_unlock;
+	}
+
+	VRangeInsertMapUnmapCommands(IMG_TRUE,
+						ui32AllocationIndex,
+						sBaseDevVAddr,
+						ui32StartPage,
+						ui32NumPages,
+						szName);
+
+	*pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+	DevicememHistoryUnlock();
+
+	return eError;
+
+}
+
+PVRSRV_ERROR DevicememHistoryUnmapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr,
+						IMG_UINT32 ui32StartPage,
+						IMG_UINT32 ui32NumPages,
+						IMG_DEVMEM_SIZE_T uiAllocSize,
+						const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN],
+						IMG_UINT32 ui32Log2PageSize,
+						IMG_UINT32 ui32AllocationIndex,
+						IMG_UINT32 *pui32AllocationIndexOut)
+{
+	IMG_PID uiPID = OSGetCurrentClientProcessIDKM();
+	PVRSRV_ERROR eError;
+	IMG_BOOL bCreated;
+
+	if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+		!CHECK_ALLOC_INDEX(ui32AllocationIndex))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+								__func__,
+							ui32AllocationIndex));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	DevicememHistoryLock();
+
+	eError = FindOrCreateAllocation(ui32AllocationIndex,
+						0,
+						sBaseDevVAddr,
+						uiAllocSize,
+						szName,
+						ui32Log2PageSize,
+						uiPID,
+						IMG_FALSE,
+						&ui32AllocationIndex,
+						&bCreated);
+
+	if ((eError == PVRSRV_OK) && !bCreated)
+	{
+		/* touch the allocation so it goes to the head of our MRU list */
+		TouchBusyAllocation(ui32AllocationIndex);
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+									__func__,
+									szName,
+									PVRSRVGETERRORSTRING(eError)));
+		goto out_unlock;
+	}
+
+	VRangeInsertMapUnmapCommands(IMG_FALSE,
+						ui32AllocationIndex,
+						sBaseDevVAddr,
+						ui32StartPage,
+						ui32NumPages,
+						szName);
+
+	*pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+	DevicememHistoryUnlock();
+
+	return eError;
+}
+
+
+
+/* DevicememHistoryUnmapKM:
+ * Entry point for when an allocation is unmapped from the MMU GPU
+ *
+ * psPMR: The PMR to which the allocation belongs.
+ * ui32Offset: The offset within the PMR at which the allocation begins.
+ * sDevVAddr: The DevVAddr at which the allocation begins.
+ * szName: Annotation/name for the allocation.
+ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form.
+ * ui32AllocationIndex: Allocation index as provided by the client.
+ *                      We will use this as a short-cut to find the allocation
+ *                      in our records.
+ * pui32AllocationIndexOut: An updated allocation index for the client.
+ *                          This may be a new value if we just created the
+ *                          allocation record.
+ */
+PVRSRV_ERROR DevicememHistoryUnmapKM(PMR *psPMR,
+							IMG_UINT32 ui32Offset,
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEVMEM_SIZE_T uiSize,
+							const char szName[DEVMEM_ANNOTATION_MAX_LEN],
+							IMG_UINT32 ui32Log2PageSize,
+							IMG_UINT32 ui32AllocationIndex,
+							IMG_UINT32 *pui32AllocationIndexOut)
+{
+	IMG_BOOL bSparse = PMR_IsSparse(psPMR);
+	IMG_UINT64 ui64Serial;
+	IMG_PID uiPID = OSGetCurrentClientProcessIDKM();
+	PVRSRV_ERROR eError;
+	IMG_BOOL bCreated;
+
+	if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+		!CHECK_ALLOC_INDEX(ui32AllocationIndex))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+								__func__,
+								ui32AllocationIndex));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	PMRGetUID(psPMR, &ui64Serial);
+
+	DevicememHistoryLock();
+
+	eError = FindOrCreateAllocation(ui32AllocationIndex,
+						ui64Serial,
+						sDevVAddr,
+						uiSize,
+						szName,
+						ui32Log2PageSize,
+						uiPID,
+						bSparse,
+						&ui32AllocationIndex,
+						&bCreated);
+
+	if ((eError == PVRSRV_OK) && !bCreated)
+	{
+		/* touch the allocation so it goes to the head of our MRU list */
+		TouchBusyAllocation(ui32AllocationIndex);
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+									__func__,
+									szName,
+									PVRSRVGETERRORSTRING(eError)));
+		goto out_unlock;
+	}
+
+	if (!bSparse)
+	{
+		InsertUnmapAllCommand(ui32AllocationIndex);
+	}
+	else
+	{
+		GenerateMapUnmapCommandsForSparsePMR(psPMR,
+								ui32AllocationIndex,
+								IMG_FALSE);
+	}
+
+	InsertTimeStampCommand(OSClockns64());
+
+	*pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+	DevicememHistoryUnlock();
+
+	return eError;
+}
+
+/* DevicememHistorySparseChangeKM:
+ * Entry point for when a sparse allocation is changed, such that some of the
+ * pages within the sparse allocation are mapped or unmapped.
+ *
+ * psPMR: The PMR to which the allocation belongs.
+ * ui32Offset: The offset within the PMR at which the allocation begins.
+ * sDevVAddr: The DevVAddr at which the allocation begins.
+ * szName: Annotation/name for the allocation.
+ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form.
+ * ui32AllocPageCount: Number of pages which have been mapped.
+ * paui32AllocPageIndices: Indices of pages which have been mapped.
+ * ui32FreePageCount: Number of pages which have been unmapped.
+ * paui32FreePageIndices: Indices of pages which have been unmapped.
+ * ui32AllocationIndex: Allocation index as provided by the client.
+ *                      We will use this as a short-cut to find the allocation
+ *                      in our records.
+ * pui32AllocationIndexOut: An updated allocation index for the client.
+ *                          This may be a new value if we just created the
+ *                          allocation record.
+ */
+PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR,
+							IMG_UINT32 ui32Offset,
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEVMEM_SIZE_T uiSize,
+							const char szName[DEVMEM_ANNOTATION_MAX_LEN],
+							IMG_UINT32 ui32Log2PageSize,
+							IMG_UINT32 ui32AllocPageCount,
+							IMG_UINT32 *paui32AllocPageIndices,
+							IMG_UINT32 ui32FreePageCount,
+							IMG_UINT32 *paui32FreePageIndices,
+							IMG_UINT32 ui32AllocationIndex,
+							IMG_UINT32 *pui32AllocationIndexOut)
+{
+	IMG_UINT64 ui64Serial;
+	IMG_PID uiPID = OSGetCurrentClientProcessIDKM();
+	PVRSRV_ERROR eError;
+	IMG_BOOL bCreated;
+
+	if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+		!CHECK_ALLOC_INDEX(ui32AllocationIndex))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+								__func__,
+								ui32AllocationIndex));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	PMRGetUID(psPMR, &ui64Serial);
+
+	DevicememHistoryLock();
+
+	eError = FindOrCreateAllocation(ui32AllocationIndex,
+						ui64Serial,
+						sDevVAddr,
+						uiSize,
+						szName,
+						ui32Log2PageSize,
+						uiPID,
+						IMG_TRUE /* bSparse */,
+						&ui32AllocationIndex,
+						&bCreated);
+
+	if ((eError == PVRSRV_OK) && !bCreated)
+	{
+		/* touch the allocation so it goes to the head of our MRU list */
+		TouchBusyAllocation(ui32AllocationIndex);
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+									__func__,
+									szName,
+									PVRSRVGETERRORSTRING(eError)));
+		goto out_unlock;
+	}
+
+	GenerateMapUnmapCommandsForChangeList(ui32AllocPageCount,
+							paui32AllocPageIndices,
+							ui32AllocationIndex,
+							IMG_TRUE);
+
+	GenerateMapUnmapCommandsForChangeList(ui32FreePageCount,
+							paui32FreePageIndices,
+							ui32AllocationIndex,
+							IMG_FALSE);
+
+	InsertTimeStampCommand(OSClockns64());
+
+	*pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+	DevicememHistoryUnlock();
+
+	return eError;
+
+}
+
+/* CircularBufferIterateStart:
+ * Initialise local state for iterating over the circular buffer
+ */
+static void CircularBufferIterateStart(IMG_UINT32 *pui32Head, IMG_UINT32 *pui32Iter)
+{
+	*pui32Head = gsDevicememHistoryData.sRecords.ui32Head;
+
+	if (*pui32Head != 0)
+	{
+		*pui32Iter = *pui32Head - 1;
+	}
+	else
+	{
+		*pui32Iter = CIRCULAR_BUFFER_NUM_COMMANDS - 1;
+	}
+}
+
+/* CircularBufferIteratePrevious:
+ * Iterate to the previous item in the circular buffer.
+ * This is called repeatedly to iterate over the whole circular buffer.
+ */
+static COMMAND_WRAPPER *CircularBufferIteratePrevious(IMG_UINT32 ui32Head,
+							IMG_UINT32 *pui32Iter,
+							COMMAND_TYPE *peType,
+							IMG_BOOL *pbLast)
+{
+	IMG_UINT8 *pui8Header;
+	COMMAND_WRAPPER *psOut = NULL;
+
+	psOut = gsDevicememHistoryData.sRecords.pasCircularBuffer + *pui32Iter;
+
+	pui8Header = (IMG_UINT8 *) psOut;
+
+	/* sanity check the command looks valid.
+	 * this condition should never happen, but check for it anyway
+	 * and try to handle it
+	 */
+	if (*pui8Header >= COMMAND_TYPE_COUNT)
+	{
+		/* invalid header detected. Circular buffer corrupted? */
+		PVR_DPF((PVR_DBG_ERROR, "CircularBufferIteratePrevious: "
+							"Invalid header: %u",
+							*pui8Header));
+		*pbLast = IMG_TRUE;
+		return NULL;
+	}
+
+	*peType = *pui8Header;
+
+	if (*pui32Iter != 0)
+	{
+		(*pui32Iter)--;
+	}
+	else
+	{
+		*pui32Iter = CIRCULAR_BUFFER_NUM_COMMANDS - 1;
+	}
+
+
+	/* inform the caller this is the last command if either we have reached
+	 * the head (where we started) or if we have reached an empty command,
+	 * which means we have covered all populated entries
+	 */
+	if ((*pui32Iter == ui32Head) || (*peType == COMMAND_TYPE_NONE))
+	{
+		/* this is the final iteration */
+		*pbLast = IMG_TRUE;
+	}
+
+	return psOut;
+}
+
+/* MapUnmapCommandGetInfo:
+ * Helper function to get the address and mapping information from a MAP_ALL, UNMAP_ALL,
+ * MAP_RANGE or UNMAP_RANGE command
+ */
+static void MapUnmapCommandGetInfo(COMMAND_WRAPPER *psCommand,
+					COMMAND_TYPE eType,
+					IMG_DEV_VIRTADDR *psDevVAddrStart,
+					IMG_DEV_VIRTADDR *psDevVAddrEnd,
+					IMG_BOOL *pbMap,
+					IMG_UINT32 *pui32AllocIndex)
+{
+	if ((eType == COMMAND_TYPE_MAP_ALL) || ((eType == COMMAND_TYPE_UNMAP_ALL)))
+	{
+		COMMAND_MAP_ALL *psMapAll = &psCommand->u.sMapAll;
+		RECORD_ALLOCATION *psAlloc;
+
+		*pbMap = (eType == COMMAND_TYPE_MAP_ALL);
+		*pui32AllocIndex = psMapAll->uiAllocIndex;
+
+		psAlloc = ALLOC_INDEX_TO_PTR(psMapAll->uiAllocIndex);
+
+		*psDevVAddrStart = psAlloc->sDevVAddr;
+		psDevVAddrEnd->uiAddr = psDevVAddrStart->uiAddr + psAlloc->uiSize - 1;
+	}
+	else if ((eType == COMMAND_TYPE_MAP_RANGE) || ((eType == COMMAND_TYPE_UNMAP_RANGE)))
+	{
+		COMMAND_MAP_RANGE *psMapRange = &psCommand->u.sMapRange;
+		RECORD_ALLOCATION *psAlloc;
+		IMG_UINT32 ui32StartPage, ui32Count;
+
+		*pbMap = (eType == COMMAND_TYPE_MAP_RANGE);
+		*pui32AllocIndex = psMapRange->uiAllocIndex;
+
+		psAlloc = ALLOC_INDEX_TO_PTR(psMapRange->uiAllocIndex);
+
+		MapRangeUnpack(psMapRange, &ui32StartPage, &ui32Count);
+
+		psDevVAddrStart->uiAddr = psAlloc->sDevVAddr.uiAddr +
+				((1ULL << psAlloc->ui32Log2PageSize) * ui32StartPage);
+
+		psDevVAddrEnd->uiAddr = psDevVAddrStart->uiAddr +
+				((1ULL << psAlloc->ui32Log2PageSize) * ui32Count) - 1;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid command type: %u",
+								__func__,
+								eType));
+	}
+}
+
+/* DevicememHistoryQuery:
+ * Entry point for rgxdebug to look up addresses relating to a page fault
+ */
+IMG_BOOL DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn,
+                               DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut,
+                               IMG_UINT32 ui32PageSizeBytes,
+                               IMG_BOOL bMatchAnyAllocInPage)
+{
+	IMG_UINT32 ui32Head, ui32Iter;
+	COMMAND_TYPE eType = COMMAND_TYPE_NONE;
+	COMMAND_WRAPPER *psCommand = NULL;
+	IMG_BOOL bLast = IMG_FALSE;
+	IMG_UINT64 ui64StartTime = OSClockns64();
+	IMG_UINT64 ui64TimeNs = 0;
+
+	/* initialise the results count for the caller */
+	psQueryOut->ui32NumResults = 0;
+
+	DevicememHistoryLock();
+
+	/* if the search is constrained to a particular PID then we
+	 * first search the list of allocations to see if this
+	 * PID is known to us
+	 */
+	if (psQueryIn->uiPID != DEVICEMEM_HISTORY_PID_ANY)
+	{
+		IMG_UINT32 ui32Alloc;
+		ui32Alloc = gsDevicememHistoryData.sRecords.ui32AllocationsListHead;
+
+		while (ui32Alloc != END_OF_LIST)
+		{
+			RECORD_ALLOCATION *psAlloc;
+
+			psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+			if (psAlloc->uiPID == psQueryIn->uiPID)
+			{
+				goto found_pid;
+			}
+
+			if (ui32Alloc == gsDevicememHistoryData.sRecords.ui32AllocationsListHead)
+			{
+				/* gone through whole list */
+				break;
+			}
+		}
+
+		/* PID not found, so we do not have any suitable data for this
+		 * page fault
+		 */
+		goto out_unlock;
+	}
+
+found_pid:
+
+	CircularBufferIterateStart(&ui32Head, &ui32Iter);
+
+	while (!bLast)
+	{
+		psCommand = CircularBufferIteratePrevious(ui32Head, &ui32Iter, &eType, &bLast);
+
+		if (eType == COMMAND_TYPE_TIMESTAMP)
+		{
+			ui64TimeNs = TimeStampUnpack(&psCommand->u.sTimeStamp);
+			continue;
+		}
+
+		if ((eType == COMMAND_TYPE_MAP_ALL) ||
+			(eType == COMMAND_TYPE_UNMAP_ALL) ||
+			(eType == COMMAND_TYPE_MAP_RANGE) ||
+			(eType == COMMAND_TYPE_UNMAP_RANGE))
+		{
+			RECORD_ALLOCATION *psAlloc;
+			IMG_DEV_VIRTADDR sAllocStartAddrOrig, sAllocEndAddrOrig;
+			IMG_DEV_VIRTADDR sAllocStartAddr, sAllocEndAddr;
+			IMG_BOOL bMap;
+			IMG_UINT32 ui32AllocIndex;
+
+			MapUnmapCommandGetInfo(psCommand,
+							eType,
+							&sAllocStartAddrOrig,
+							&sAllocEndAddrOrig,
+							&bMap,
+							&ui32AllocIndex);
+
+			sAllocStartAddr = sAllocStartAddrOrig;
+			sAllocEndAddr = sAllocEndAddrOrig;
+
+			psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocIndex);
+
+			/* skip this command if we need to search within
+			 * a particular PID, and this allocation is not from
+			 * that PID
+			 */
+			if ((psQueryIn->uiPID != DEVICEMEM_HISTORY_PID_ANY) &&
+				(psAlloc->uiPID != psQueryIn->uiPID))
+			{
+				continue;
+			}
+
+			/* if the allocation was created after this event, then this
+			 * event must be for an old/removed allocation, so skip it
+			 */
+			if (DO_TIME_STAMP_MASK(psAlloc->ui64CreationTime) > ui64TimeNs)
+			{
+				continue;
+			}
+
+			/* if the caller wants us to match any allocation in the
+			 * same page as the allocation then tweak the real start/end
+			 * addresses of the allocation here
+			 */
+			if (bMatchAnyAllocInPage)
+			{
+				sAllocStartAddr.uiAddr = sAllocStartAddr.uiAddr & ~(IMG_UINT64) (ui32PageSizeBytes - 1);
+				sAllocEndAddr.uiAddr = (sAllocEndAddr.uiAddr + ui32PageSizeBytes - 1) & ~(IMG_UINT64) (ui32PageSizeBytes - 1);
+			}
+
+			if ((psQueryIn->sDevVAddr.uiAddr >= sAllocStartAddr.uiAddr) &&
+				(psQueryIn->sDevVAddr.uiAddr <  sAllocEndAddr.uiAddr))
+			{
+				DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult = &psQueryOut->sResults[psQueryOut->ui32NumResults];
+
+				OSStringLCopy(psResult->szString, psAlloc->szName, sizeof(psResult->szString));
+				psResult->sBaseDevVAddr = psAlloc->sDevVAddr;
+				psResult->uiSize = psAlloc->uiSize;
+				psResult->bMap = bMap;
+				psResult->ui64Age = _CalculateAge(ui64StartTime, ui64TimeNs, TIME_STAMP_MASK);
+				psResult->ui64When = ui64TimeNs;
+				/* write the responsible PID in the placeholder */
+				psResult->sProcessInfo.uiPID = psAlloc->uiPID;
+
+				if ((eType == COMMAND_TYPE_MAP_ALL) || (eType == COMMAND_TYPE_UNMAP_ALL))
+				{
+					psResult->bRange = IMG_FALSE;
+					psResult->bAll = IMG_TRUE;
+				}
+				else
+				{
+					psResult->bRange = IMG_TRUE;
+					MapRangeUnpack(&psCommand->u.sMapRange,
+										&psResult->ui32StartPage,
+										&psResult->ui32PageCount);
+					psResult->bAll = (psResult->ui32PageCount * (1U << psAlloc->ui32Log2PageSize))
+											== psAlloc->uiSize;
+					psResult->sMapStartAddr = sAllocStartAddrOrig;
+					psResult->sMapEndAddr = sAllocEndAddrOrig;
+				}
+
+				psQueryOut->ui32NumResults++;
+
+				if (psQueryOut->ui32NumResults == DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS)
+				{
+					break;
+				}
+			}
+		}
+	}
+
+out_unlock:
+	DevicememHistoryUnlock();
+
+	return psQueryOut->ui32NumResults > 0;
+}
+
+static void DeviceMemHistoryFmt(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN],
+							IMG_PID uiPID,
+							const IMG_CHAR *pszName,
+							const IMG_CHAR *pszAction,
+							IMG_DEV_VIRTADDR sDevVAddrStart,
+							IMG_DEV_VIRTADDR sDevVAddrEnd,
+							IMG_UINT64 ui64TimeNs)
+{
+
+	OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN,
+				/* PID NAME MAP/UNMAP MIN-MAX SIZE AbsUS AgeUS*/
+				"%04u %-40s %-10s "
+				IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC " "
+				"0x%08" IMG_UINT64_FMTSPECX
+				"%013" IMG_UINT64_FMTSPEC, /* 13 digits is over 2 hours of ns */
+				uiPID,
+				pszName,
+				pszAction,
+				sDevVAddrStart.uiAddr,
+				sDevVAddrEnd.uiAddr,
+				sDevVAddrEnd.uiAddr - sDevVAddrStart.uiAddr,
+				ui64TimeNs);
+}
+
+static void DeviceMemHistoryFmtHeader(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN])
+{
+	OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN,
+				"%-4s %-40s %-6s   %10s   %10s   %8s %13s",
+				"PID",
+				"NAME",
+				"ACTION",
+				"ADDR MIN",
+				"ADDR MAX",
+				"SIZE",
+				"ABS NS");
+}
+
+static const char *CommandTypeToString(COMMAND_TYPE eType)
+{
+	switch (eType)
+	{
+		case COMMAND_TYPE_MAP_ALL:
+			return "MapAll";
+		case COMMAND_TYPE_UNMAP_ALL:
+			return "UnmapAll";
+		case COMMAND_TYPE_MAP_RANGE:
+			return "MapRange";
+		case COMMAND_TYPE_UNMAP_RANGE:
+			return "UnmapRange";
+		case COMMAND_TYPE_TIMESTAMP:
+			return "TimeStamp";
+		default:
+			return "???";
+	}
+}
+
+static void DevicememHistoryPrintAll(void *pvFilePtr, OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+	IMG_UINT32 ui32Iter;
+	IMG_UINT32 ui32Head;
+	IMG_BOOL bLast = IMG_FALSE;
+	IMG_UINT64 ui64TimeNs = 0;
+	IMG_UINT64 ui64StartTime = OSClockns64();
+
+	DeviceMemHistoryFmtHeader(szBuffer);
+	pfnOSStatsPrintf(pvFilePtr, "%s\n", szBuffer);
+
+	CircularBufferIterateStart(&ui32Head, &ui32Iter);
+
+	while (!bLast)
+	{
+		COMMAND_WRAPPER *psCommand;
+		COMMAND_TYPE eType = COMMAND_TYPE_NONE;
+
+		psCommand = CircularBufferIteratePrevious(ui32Head, &ui32Iter, &eType, &bLast);
+
+		if (eType == COMMAND_TYPE_TIMESTAMP)
+		{
+			ui64TimeNs = TimeStampUnpack(&psCommand->u.sTimeStamp);
+			continue;
+		}
+
+
+		if ((eType == COMMAND_TYPE_MAP_ALL) ||
+			(eType == COMMAND_TYPE_UNMAP_ALL) ||
+			(eType == COMMAND_TYPE_MAP_RANGE) ||
+			(eType == COMMAND_TYPE_UNMAP_RANGE))
+		{
+			RECORD_ALLOCATION *psAlloc;
+			IMG_DEV_VIRTADDR sDevVAddrStart, sDevVAddrEnd;
+			IMG_BOOL bMap;
+			IMG_UINT32 ui32AllocIndex;
+
+			MapUnmapCommandGetInfo(psCommand,
+								eType,
+								&sDevVAddrStart,
+								&sDevVAddrEnd,
+								&bMap,
+								&ui32AllocIndex);
+
+			psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocIndex);
+
+			if (DO_TIME_STAMP_MASK(psAlloc->ui64CreationTime) > ui64TimeNs)
+			{
+				/* if this event relates to an allocation we
+				 * are no longer tracking then do not print it
+				 */
+				continue;
+			}
+
+			DeviceMemHistoryFmt(szBuffer,
+								psAlloc->uiPID,
+								psAlloc->szName,
+								CommandTypeToString(eType),
+								sDevVAddrStart,
+								sDevVAddrEnd,
+								ui64TimeNs);
+
+			pfnOSStatsPrintf(pvFilePtr, "%s\n", szBuffer);
+		}
+	}
+
+	pfnOSStatsPrintf(pvFilePtr, "\nTimestamp reference: %013" IMG_UINT64_FMTSPEC "\n", ui64StartTime);
+}
+
+static void DevicememHistoryPrintAllWrapper(void *pvFilePtr, void *pvData, OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	PVR_UNREFERENCED_PARAMETER(pvData);
+	DevicememHistoryLock();
+	DevicememHistoryPrintAll(pvFilePtr, pfnOSStatsPrintf);
+	DevicememHistoryUnlock();
+}
+
+static PVRSRV_ERROR CreateRecords(void)
+{
+	gsDevicememHistoryData.sRecords.pasAllocations =
+			OSAllocMem(sizeof(RECORD_ALLOCATION) * ALLOCATION_LIST_NUM_ENTRIES);
+
+	if (gsDevicememHistoryData.sRecords.pasAllocations == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* Allocated and initialise the circular buffer with zeros so every
+	 * command is initialised as a command of type COMMAND_TYPE_NONE. */
+	gsDevicememHistoryData.sRecords.pasCircularBuffer =
+			OSAllocZMem(sizeof(COMMAND_WRAPPER) * CIRCULAR_BUFFER_NUM_COMMANDS);
+
+	if (gsDevicememHistoryData.sRecords.pasCircularBuffer == NULL)
+	{
+		OSFreeMem(gsDevicememHistoryData.sRecords.pasAllocations);
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	return PVRSRV_OK;
+}
+
+static void DestroyRecords(void)
+{
+	OSFreeMem(gsDevicememHistoryData.sRecords.pasCircularBuffer);
+	OSFreeMem(gsDevicememHistoryData.sRecords.pasAllocations);
+}
+
+static void InitialiseRecords(void)
+{
+	IMG_UINT32 i;
+
+	/* initialise the allocations list */
+
+	gsDevicememHistoryData.sRecords.pasAllocations[0].ui32Prev = ALLOCATION_LIST_NUM_ENTRIES - 1;
+	gsDevicememHistoryData.sRecords.pasAllocations[0].ui32Next = 1;
+
+	for (i = 1; i < ALLOCATION_LIST_NUM_ENTRIES; i++)
+	{
+		gsDevicememHistoryData.sRecords.pasAllocations[i].ui32Prev = i - 1;
+		gsDevicememHistoryData.sRecords.pasAllocations[i].ui32Next = i + 1;
+	}
+
+	gsDevicememHistoryData.sRecords.pasAllocations[ALLOCATION_LIST_NUM_ENTRIES - 1].ui32Next = 0;
+
+	gsDevicememHistoryData.sRecords.ui32AllocationsListHead = 0;
+}
+
+PVRSRV_ERROR DevicememHistoryInitKM(void)
+{
+	PVRSRV_ERROR eError;
+
+	eError = OSLockCreate(&gsDevicememHistoryData.hLock);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DevicememHistoryInitKM: Failed to create lock"));
+		goto err_lock;
+	}
+
+	eError = CreateRecords();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DevicememHistoryInitKM: Failed to create records"));
+		goto err_allocations;
+	}
+
+	InitialiseRecords();
+
+	gsDevicememHistoryData.pvStatsEntry = OSCreateStatisticEntry("devicemem_history",
+						NULL,
+						DevicememHistoryPrintAllWrapper,
+						NULL);
+
+	return PVRSRV_OK;
+
+err_allocations:
+	OSLockDestroy(gsDevicememHistoryData.hLock);
+err_lock:
+	return eError;
+}
+
+void DevicememHistoryDeInitKM(void)
+{
+	if (gsDevicememHistoryData.pvStatsEntry != NULL)
+	{
+		OSRemoveStatisticEntry(&gsDevicememHistoryData.pvStatsEntry);
+	}
+
+	DestroyRecords();
+
+	OSLockDestroy(gsDevicememHistoryData.hLock);
+}
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_history_server.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_history_server.h
new file mode 100644
index 0000000..40df0f5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_history_server.h
@@ -0,0 +1,154 @@
+/*************************************************************************/ /*!
+@File			devicemem_history_server.h
+@Title          Resource Information abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Devicemem History functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEVICEMEM_HISTORY_SERVER_H_
+#define _DEVICEMEM_HISTORY_SERVER_H_
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "rgxmem.h"
+#include "devicemem_utils.h"
+
+extern PVRSRV_ERROR
+DevicememHistoryInitKM(void);
+
+extern void
+DevicememHistoryDeInitKM(void);
+
+PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR,
+							IMG_UINT32 ui32Offset,
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEVMEM_SIZE_T uiSize,
+							const char szName[DEVMEM_ANNOTATION_MAX_LEN],
+							IMG_UINT32 ui32PageSize,
+							IMG_UINT32 ui32AllocationIndex,
+							IMG_UINT32 *pui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistoryUnmapKM(PMR *psPMR,
+							IMG_UINT32 ui32Offset,
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEVMEM_SIZE_T uiSize,
+							const char szName[DEVMEM_ANNOTATION_MAX_LEN],
+							IMG_UINT32 ui32PageSize,
+							IMG_UINT32 ui32AllocationIndex,
+							IMG_UINT32 *pui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistoryMapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr,
+							IMG_UINT32 ui32StartPage,
+							IMG_UINT32 ui32NumPages,
+							IMG_DEVMEM_SIZE_T uiAllocSize,
+							const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN],
+							IMG_UINT32 ui32Log2PageSize,
+							IMG_UINT32 ui32AllocationIndex,
+							IMG_UINT32 *ui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistoryUnmapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr,
+							IMG_UINT32 ui32StartPage,
+							IMG_UINT32 ui32NumPages,
+							IMG_DEVMEM_SIZE_T uiAllocSize,
+							const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN],
+							IMG_UINT32 ui32Log2PageSize,
+							IMG_UINT32 ui32AllocationIndex,
+							IMG_UINT32 *ui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR,
+							IMG_UINT32 ui32Offset,
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEVMEM_SIZE_T uiSize,
+							const char szName[DEVMEM_ANNOTATION_MAX_LEN],
+							IMG_UINT32 ui32PageSize,
+							IMG_UINT32 ui32AllocPageCount,
+							IMG_UINT32 *paui32AllocPageIndices,
+							IMG_UINT32 ui32FreePageCount,
+							IMG_UINT32 *pauiFreePageIndices,
+							IMG_UINT32 AllocationIndex,
+							IMG_UINT32 *pui32AllocationIndexOut);
+
+/* used when the PID does not matter */
+#define DEVICEMEM_HISTORY_PID_ANY 0xFFFFFFFE
+
+typedef struct _DEVICEMEM_HISTORY_QUERY_IN_
+{
+	IMG_PID uiPID;
+	IMG_DEV_VIRTADDR sDevVAddr;
+} DEVICEMEM_HISTORY_QUERY_IN;
+
+/* Store up to 4 results for a lookup. In the case of the faulting page being
+ * re-mapped between the page fault occurring on HW and the page fault analysis
+ * being done, the second result entry will show the allocation being unmapped.
+ * A further 2 entries are added to cater for multiple buffers in the same page.
+ */
+#define DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS 4
+
+typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_RESULT_
+{
+	IMG_CHAR szString[DEVMEM_ANNOTATION_MAX_LEN];
+	IMG_DEV_VIRTADDR sBaseDevVAddr;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_BOOL bMap;
+	IMG_BOOL bRange;
+	IMG_BOOL bAll;
+	IMG_UINT64 ui64When;
+	IMG_UINT64 ui64Age;
+	/* info for sparse map/unmap operations (i.e. bRange=IMG_TRUE) */
+	IMG_UINT32 ui32StartPage;
+	IMG_UINT32 ui32PageCount;
+	IMG_DEV_VIRTADDR sMapStartAddr;
+	IMG_DEV_VIRTADDR sMapEndAddr;
+	RGXMEM_PROCESS_INFO sProcessInfo;
+} DEVICEMEM_HISTORY_QUERY_OUT_RESULT;
+
+typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_
+{
+	IMG_UINT32 ui32NumResults;
+	/* result 0 is the newest */
+	DEVICEMEM_HISTORY_QUERY_OUT_RESULT sResults[DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS];
+} DEVICEMEM_HISTORY_QUERY_OUT;
+
+extern IMG_BOOL
+DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn,
+                      DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut,
+                      IMG_UINT32 ui32PageSizeBytes,
+                      IMG_BOOL bMatchAnyAllocInPage);
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_pdump.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_pdump.h
new file mode 100644
index 0000000..1a1d690
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_pdump.h
@@ -0,0 +1,331 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management PDump internal
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services internal interface to PDump device memory management
+                functions that are shared between client and server code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEVICEMEM_PDUMP_H_
+#define _DEVICEMEM_PDUMP_H_
+
+#include "devicemem.h"
+#include "pdumpdefs.h"
+#include "pdump.h"
+
+#if defined(PDUMP)
+/*
+ * DevmemPDumpMem()
+ *
+ * takes a memory descriptor, offset, and size, and takes the current
+ * contents of the memory at that location and writes it to the prm
+ * pdump file, and emits a pdump LDB to load the data from that file.
+ * The intention here is that the contents of the simulated buffer
+ * upon pdump playback will be made to be the same as they are when
+ * this command is run, enabling pdump of cases where the memory has
+ * been modified externally, i.e. by the host cpu or by a third
+ * party.
+ */
+extern void
+DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc,
+                   IMG_DEVMEM_OFFSET_T uiOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpZeroMem()
+ *
+ * as DevmemPDumpMem() but the PDump allocation will be populated with zeros from
+ * the zero page in the parameter stream
+ */
+extern void
+DevmemPDumpLoadZeroMem(DEVMEM_MEMDESC *psMemDesc,
+                   IMG_DEVMEM_OFFSET_T uiOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpMemValue()
+ *
+ * As above but dumps the value at a dword-aligned address in plain
+ * text to the pdump script2 file. Useful for patching a buffer at
+ * pdump playback by simply editing the script output file.
+ *
+ * (The same functionality can be achieved by the above function but
+ *  the binary PARAM file must be patched in that case.)
+ */
+IMG_INTERNAL void
+DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc,
+                        IMG_DEVMEM_OFFSET_T uiOffset,
+                        IMG_UINT32 ui32Value,
+                        PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpMemValue64()
+ *
+ * As above but dumps the 64bit-value at a dword-aligned address in plain
+ * text to the pdump script2 file. Useful for patching a buffer at
+ * pdump playback by simply editing the script output file.
+ *
+ * (The same functionality can be achieved by the above function but
+ *  the binary PARAM file must be patched in that case.)
+ */
+IMG_INTERNAL void
+DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc,
+                        IMG_DEVMEM_OFFSET_T uiOffset,
+                        IMG_UINT64 ui64Value,
+                        PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpPageCatBaseToSAddr()
+ *
+ * Returns the symbolic address of a piece of memory represented
+ * by an offset into the mem descriptor.
+ */
+extern PVRSRV_ERROR
+DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC		*psMemDesc,
+							  IMG_DEVMEM_OFFSET_T	*puiMemOffset,
+							  IMG_CHAR				*pszName,
+							  IMG_UINT32			ui32Size);
+
+/*
+ * DevmemPDumpSaveToFile()
+ *
+ * emits a pdump SAB to cause the current contents of the memory to be
+ * written to the given file during playback
+ */
+extern void
+DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc,
+                      IMG_DEVMEM_OFFSET_T uiOffset,
+                      IMG_DEVMEM_SIZE_T uiSize,
+                      const IMG_CHAR *pszFilename,
+                      IMG_UINT32 uiFileOffset);
+
+/*
+ * DevmemPDumpSaveToFileVirtual()
+ *
+ * emits a pdump SAB, just like DevmemPDumpSaveToFile(), but uses the
+ * virtual address and device MMU context to cause the pdump player to
+ * traverse the MMU page tables itself.
+ */
+extern void
+DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc,
+                             IMG_DEVMEM_OFFSET_T uiOffset,
+                             IMG_DEVMEM_SIZE_T uiSize,
+                             const IMG_CHAR *pszFilename,
+							 IMG_UINT32 ui32FileOffset,
+							 IMG_UINT32 ui32PdumpFlags);
+
+
+/*
+ *
+ * Devmem_PDumpDevmemPol32()
+ *
+ * writes a PDump 'POL' command to wait for a masked 32-bit memory
+ * location to become the specified value
+ */
+extern PVRSRV_ERROR
+DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc,
+                           IMG_DEVMEM_OFFSET_T uiOffset,
+                           IMG_UINT32 ui32Value,
+                           IMG_UINT32 ui32Mask,
+                           PDUMP_POLL_OPERATOR eOperator,
+                           PDUMP_FLAGS_T ui32PDumpFlags);
+
+/*
+ * DevmemPDumpCBP()
+ *
+ * Polls for space in circular buffer. Reads the read offset
+ * from memory and waits until there is enough space to write
+ * the packet.
+ *
+ * hMemDesc      - MemDesc which contains the read offset
+ * uiReadOffset  - Offset into MemDesc to the read offset
+ * uiWriteOffset - Current write offset
+ * uiPacketSize  - Size of packet to write
+ * uiBufferSize  - Size of circular buffer
+ */
+extern PVRSRV_ERROR
+DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc,
+				IMG_DEVMEM_OFFSET_T uiReadOffset,
+				IMG_DEVMEM_OFFSET_T uiWriteOffset,
+				IMG_DEVMEM_SIZE_T uiPacketSize,
+				IMG_DEVMEM_SIZE_T uiBufferSize);
+
+#else	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMem)
+#endif
+static INLINE void
+DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc,
+                   IMG_DEVMEM_OFFSET_T uiOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMemValue32)
+#endif
+static INLINE void
+DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc,
+                        IMG_DEVMEM_OFFSET_T uiOffset,
+                        IMG_UINT32 ui32Value,
+                        PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMemValue64)
+#endif
+static INLINE void
+DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc,
+                        IMG_DEVMEM_OFFSET_T uiOffset,
+                        IMG_UINT64 ui64Value,
+                        PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(ui64Value);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpPageCatBaseToSAddr)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC		*psMemDesc,
+							  IMG_DEVMEM_OFFSET_T	*puiMemOffset,
+							  IMG_CHAR				*pszName,
+							  IMG_UINT32			ui32Size)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(puiMemOffset);
+	PVR_UNREFERENCED_PARAMETER(pszName);
+	PVR_UNREFERENCED_PARAMETER(ui32Size);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpSaveToFile)
+#endif
+static INLINE void
+DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc,
+                      IMG_DEVMEM_OFFSET_T uiOffset,
+                      IMG_DEVMEM_SIZE_T uiSize,
+                      const IMG_CHAR *pszFilename,
+                      IMG_UINT32 uiFileOffset)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(pszFilename);
+	PVR_UNREFERENCED_PARAMETER(uiFileOffset);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpSaveToFileVirtual)
+#endif
+static INLINE void
+DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc,
+                             IMG_DEVMEM_OFFSET_T uiOffset,
+                             IMG_DEVMEM_SIZE_T uiSize,
+                             const IMG_CHAR *pszFilename,
+							 IMG_UINT32 ui32FileOffset,
+							 IMG_UINT32 ui32PdumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(pszFilename);
+	PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32PdumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpDevmemPol32)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc,
+                           IMG_DEVMEM_OFFSET_T uiOffset,
+                           IMG_UINT32 ui32Value,
+                           IMG_UINT32 ui32Mask,
+                           PDUMP_POLL_OPERATOR eOperator,
+                           PDUMP_FLAGS_T ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpCBP)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc,
+				IMG_DEVMEM_OFFSET_T uiReadOffset,
+				IMG_DEVMEM_OFFSET_T uiWriteOffset,
+				IMG_DEVMEM_SIZE_T uiPacketSize,
+				IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiReadOffset);
+	PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+	PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+	PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+
+	return PVRSRV_OK;
+}
+#endif	/* PDUMP */
+#endif	/* _DEVICEMEM_PDUMP_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_server.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_server.c
new file mode 100644
index 0000000..d136815
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_server.c
@@ -0,0 +1,1836 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Server-side component of the Device Memory Management.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* our exported API */
+#include "devicemem_server.h"
+#include "devicemem_utils.h"
+#include "devicemem.h"
+
+#include "device.h" /* For device node */
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "mmu_common.h"
+#include "pdump_km.h"
+#include "pmr.h"
+#include "physmem.h"
+
+#include "allocmem.h"
+#include "osfunc.h"
+#include "lock.h"
+
+#include "rgx_bvnc_defs_km.h"
+
+#define DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE (1 << 0)
+
+struct _DEVMEMINT_CTX_
+{
+	PVRSRV_DEVICE_NODE *psDevNode;
+
+	/* MMU common code needs to have a context. There's a one-to-one
+	   correspondence between device memory context and MMU context,
+	   but we have the abstraction here so that we don't need to care
+	   what the MMU does with its context, and the MMU code need not
+	   know about us at all. */
+	MMU_CONTEXT *psMMUContext;
+
+	ATOMIC_T hRefCount;
+
+	/* This handle is for devices that require notification when a new
+	   memory context is created and they need to store private data that
+	   is associated with the context. */
+	IMG_HANDLE hPrivData;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	/* Protects access to sProcessNotifyListHead */
+	POSWR_LOCK hListLock;
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+	/* The following tracks UM applications that need to be notified of a
+	 * page fault */
+	DLLIST_NODE sProcessNotifyListHead;
+	/* The following is a node for the list of registered devmem contexts */
+	DLLIST_NODE sPageFaultNotifyListElem;
+
+	/* Device virtual address of a page fault on this context */
+	IMG_DEV_VIRTADDR sFaultAddress;
+
+	/* General purpose flags */
+	IMG_UINT32 ui32Flags;
+};
+
+struct _DEVMEMINT_CTX_EXPORT_
+{
+	DEVMEMINT_CTX *psDevmemCtx;
+	PMR *psPMR;
+	ATOMIC_T hRefCount;
+	DLLIST_NODE sNode;
+};
+
+struct _DEVMEMINT_HEAP_
+{
+	struct _DEVMEMINT_CTX_ *psDevmemCtx;
+	IMG_UINT32 uiLog2PageSize;
+	ATOMIC_T hRefCount;
+};
+
+struct _DEVMEMINT_RESERVATION_
+{
+	struct _DEVMEMINT_HEAP_ *psDevmemHeap;
+	IMG_DEV_VIRTADDR sBase;
+	IMG_DEVMEM_SIZE_T uiLength;
+};
+
+struct _DEVMEMINT_MAPPING_
+{
+	struct _DEVMEMINT_RESERVATION_ *psReservation;
+	PMR *psPMR;
+	IMG_UINT32 uiNumPages;
+};
+
+struct _DEVMEMINT_PF_NOTIFY_
+{
+	IMG_UINT32  ui32PID;
+	DLLIST_NODE sProcessNotifyListElem;
+};
+
+/*************************************************************************/ /*!
+@Function       _DevmemIntCtxAcquire
+@Description    Acquire a reference to the provided device memory context.
+@Return         None
+*/ /**************************************************************************/
+static INLINE void _DevmemIntCtxAcquire(DEVMEMINT_CTX *psDevmemCtx)
+{
+	OSAtomicIncrement(&psDevmemCtx->hRefCount);
+}
+
+/*************************************************************************/ /*!
+@Function       _DevmemIntCtxRelease
+@Description    Release the reference to the provided device memory context.
+                If this is the last reference which was taken then the
+                memory context will be freed.
+@Return         None
+*/ /**************************************************************************/
+static INLINE void _DevmemIntCtxRelease(DEVMEMINT_CTX *psDevmemCtx)
+{
+	if (OSAtomicDecrement(&psDevmemCtx->hRefCount) == 0)
+	{
+		/* The last reference has gone, destroy the context */
+		PVRSRV_DEVICE_NODE *psDevNode = psDevmemCtx->psDevNode;
+		DLLIST_NODE *psNode, *psNodeNext;
+
+		/* If there are any PIDs registered for page fault notification.
+		 * Loop through the registered PIDs and free each one */
+		dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext)
+		{
+			DEVMEMINT_PF_NOTIFY *psNotifyNode =
+				IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+			dllist_remove_node(psNode);
+			OSFreeMem(psNotifyNode);
+		}
+
+		/* If this context is in the list registered for a debugger, remove
+		 * from that list */
+		if (dllist_node_is_in_list(&psDevmemCtx->sPageFaultNotifyListElem))
+		{
+			dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem);
+		}
+
+		if (psDevNode->pfnUnregisterMemoryContext)
+		{
+			psDevNode->pfnUnregisterMemoryContext(psDevmemCtx->hPrivData);
+		}
+		MMU_ContextDestroy(psDevmemCtx->psMMUContext);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSWRLockDestroy(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed memory context %p",
+				 __func__, psDevmemCtx));
+		OSFreeMem(psDevmemCtx);
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       _DevmemIntHeapAcquire
+@Description    Acquire a reference to the provided device memory heap.
+@Return         None
+*/ /**************************************************************************/
+static INLINE void _DevmemIntHeapAcquire(DEVMEMINT_HEAP *psDevmemHeap)
+{
+	OSAtomicIncrement(&psDevmemHeap->hRefCount);
+}
+
+/*************************************************************************/ /*!
+@Function       _DevmemIntHeapRelease
+@Description    Release the reference to the provided device memory heap.
+                If this is the last reference which was taken then the
+                memory context will be freed.
+@Return         None
+*/ /**************************************************************************/
+static INLINE void _DevmemIntHeapRelease(DEVMEMINT_HEAP *psDevmemHeap)
+{
+	OSAtomicDecrement(&psDevmemHeap->hRefCount);
+}
+
+PVRSRV_ERROR
+DevmemIntUnpin(PMR *psPMR)
+{
+	PVRSRV_ERROR eError;
+
+	/* Unpin */
+	eError = PMRUnpinPMR(psPMR, IMG_FALSE);
+
+	return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntUnpinInvalidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR)
+{
+	PVRSRV_ERROR eError;
+
+	eError = PMRUnpinPMR(psPMR, IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		goto e_exit;
+	}
+
+	/* Invalidate mapping */
+	eError = MMU_ChangeValidity(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+	                            psDevmemMapping->psReservation->sBase,
+	                            psDevmemMapping->uiNumPages,
+	                            psDevmemMapping->psReservation->psDevmemHeap->uiLog2PageSize,
+	                            IMG_FALSE, /* !< Choose to invalidate PT entries */
+	                            psPMR);
+
+e_exit:
+	return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntPin(PMR *psPMR)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* Start the pinning */
+	eError = PMRPinPMR(psPMR);
+
+	return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntPinValidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_ERROR eErrorMMU = PVRSRV_OK;
+	IMG_UINT32 uiLog2PageSize = psDevmemMapping->psReservation->psDevmemHeap->uiLog2PageSize;
+
+	/* Start the pinning */
+	eError = PMRPinPMR(psPMR);
+
+	if (eError == PVRSRV_OK)
+	{
+		/* Make mapping valid again */
+		eErrorMMU = MMU_ChangeValidity(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+		                            psDevmemMapping->psReservation->sBase,
+		                            psDevmemMapping->uiNumPages,
+		                            uiLog2PageSize,
+		                            IMG_TRUE, /* !< Choose to make PT entries valid again */
+		                            psPMR);
+	}
+	else if (eError == PVRSRV_ERROR_PMR_NEW_MEMORY)
+	{
+		/* If we lost the physical backing we have to map it again because
+		 * the old physical addresses are not valid anymore. */
+		IMG_UINT32 uiFlags;
+		uiFlags = PMR_Flags(psPMR);
+
+		eErrorMMU = MMU_MapPages(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+		                         uiFlags,
+		                         psDevmemMapping->psReservation->sBase,
+		                         psPMR,
+		                         0,
+		                         psDevmemMapping->uiNumPages,
+		                         NULL,
+		                         uiLog2PageSize);
+	}
+
+	/* Just overwrite eError if the mappings failed.
+	 * PMR_NEW_MEMORY has to be propagated to the user. */
+	if (eErrorMMU != PVRSRV_OK)
+	{
+		eError = eErrorMMU;
+	}
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemServerGetImportHandle
+@Description    For given exportable memory descriptor returns PMR handle.
+@Return         Memory is exportable - Success
+                PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+                            IMG_HANDLE *phImport)
+{
+	PVRSRV_ERROR eError;
+
+	if ((psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE) == 0)
+	{
+		eError = PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION;
+		goto e0;
+	}
+
+	*phImport = psMemDesc->psImport->hPMR;
+	return PVRSRV_OK;
+
+e0:
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemServerGetHeapHandle
+@Description    For given reservation returns the Heap handle.
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation,
+                          IMG_HANDLE *phHeap)
+{
+	*phHeap = psReservation->psDevmemHeap;
+	return PVRSRV_OK;
+}
+
+
+/*************************************************************************/ /*!
+@Function       DevmemIntCtxCreate
+@Description    Creates and initialises a device memory context.
+@Return         valid Device Memory context handle - Success
+                PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntCtxCreate(CONNECTION_DATA *psConnection,
+                   PVRSRV_DEVICE_NODE *psDeviceNode,
+                   IMG_BOOL bKernelMemoryCtx,
+                   DEVMEMINT_CTX **ppsDevmemCtxPtr,
+                   IMG_HANDLE *hPrivData,
+                   IMG_UINT32 *pui32CPUCacheLineSize)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_CTX *psDevmemCtx;
+	IMG_HANDLE hPrivDataInt = NULL;
+	MMU_DEVICEATTRIBS      *psMMUDevAttrs;
+
+	if ((psDeviceNode->pfnCheckDeviceFeature) &&
+		PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS))
+	{
+		psMMUDevAttrs = bKernelMemoryCtx ?
+		                psDeviceNode->psFirmwareMMUDevAttrs :
+		                psDeviceNode->psMMUDevAttrs;
+	}
+	else
+	{
+		psMMUDevAttrs = psDeviceNode->psMMUDevAttrs;
+		PVR_UNREFERENCED_PARAMETER(bKernelMemoryCtx);
+	}
+
+
+	PVR_DPF((PVR_DBG_MESSAGE, "%s", __func__));
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	/*
+	 * Ensure that we are safe to perform unaligned accesses on memory
+	 * we mark write-combine, as the compiler might generate
+	 * instructions operating on this memory which require this
+	 * assumption to be true.
+	 */
+	PVR_ASSERT(OSIsWriteCombineUnalignedSafe());
+
+	/* allocate a Devmem context */
+	psDevmemCtx = OSAllocMem(sizeof *psDevmemCtx);
+	if (psDevmemCtx == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_DPF ((PVR_DBG_ERROR, "%s: Alloc failed", __func__));
+		goto fail_alloc;
+	}
+
+	OSAtomicWrite(&psDevmemCtx->hRefCount, 1);
+	psDevmemCtx->psDevNode = psDeviceNode;
+
+	/* Call down to MMU context creation */
+
+	eError = MMU_ContextCreate(psDeviceNode,
+	                           &psDevmemCtx->psMMUContext,
+	                           psMMUDevAttrs);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: MMU_ContextCreate failed", __func__));
+		goto fail_mmucontext;
+	}
+
+
+	if (psDeviceNode->pfnRegisterMemoryContext)
+	{
+		eError = psDeviceNode->pfnRegisterMemoryContext(psDeviceNode, psDevmemCtx->psMMUContext, &hPrivDataInt);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to register MMU context",
+					 __func__));
+			goto fail_register;
+		}
+	}
+
+	/* Store the private data as it is required to unregister the memory context */
+	psDevmemCtx->hPrivData = hPrivDataInt;
+	*hPrivData = hPrivDataInt;
+	*ppsDevmemCtxPtr = psDevmemCtx;
+
+	/* Pass the CPU cache line size through the bridge to the user mode as it can't be queried in user mode.*/
+	*pui32CPUCacheLineSize = OSCPUCacheAttributeSize(PVR_DCACHE_LINE_SIZE);
+
+	/* Initialise the PID notify list */
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSWRLockCreate(&psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+	dllist_init(&(psDevmemCtx->sProcessNotifyListHead));
+	psDevmemCtx->sPageFaultNotifyListElem.psNextNode = NULL;
+	psDevmemCtx->sPageFaultNotifyListElem.psPrevNode = NULL;
+
+	/* Initialise page fault address */
+	psDevmemCtx->sFaultAddress.uiAddr = 0ULL;
+
+	/* Initialise flags */
+	psDevmemCtx->ui32Flags = 0;
+
+	return PVRSRV_OK;
+
+fail_register:
+	MMU_ContextDestroy(psDevmemCtx->psMMUContext);
+fail_mmucontext:
+	OSFreeMem(psDevmemCtx);
+fail_alloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntHeapCreate
+@Description    Creates and initialises a device memory heap.
+@Return         valid Device Memory heap handle - Success
+                PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx,
+                    IMG_DEV_VIRTADDR sHeapBaseAddr,
+                    IMG_DEVMEM_SIZE_T uiHeapLength,
+                    IMG_UINT32 uiLog2DataPageSize,
+                    DEVMEMINT_HEAP **ppsDevmemHeapPtr)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_HEAP *psDevmemHeap;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: DevmemIntHeap_Create", __func__));
+
+	/* allocate a Devmem context */
+	psDevmemHeap = OSAllocMem(sizeof *psDevmemHeap);
+	if (psDevmemHeap == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_DPF ((PVR_DBG_ERROR, "%s: Alloc failed", __func__));
+		goto fail_alloc;
+	}
+
+	psDevmemHeap->psDevmemCtx = psDevmemCtx;
+
+	_DevmemIntCtxAcquire(psDevmemHeap->psDevmemCtx);
+
+	OSAtomicWrite(&psDevmemHeap->hRefCount, 1);
+
+	psDevmemHeap->uiLog2PageSize = uiLog2DataPageSize;
+
+	*ppsDevmemHeapPtr = psDevmemHeap;
+
+	return PVRSRV_OK;
+
+fail_alloc:
+	return eError;
+}
+
+#define PVR_DUMMY_PAGE_INIT_VALUE	(0x0)
+#define PVR_ZERO_PAGE_INIT_VALUE	(0x0)
+
+static PVRSRV_ERROR DevmemIntAllocDefBackingPage(DEVMEMINT_CTX *psDevMemCtx,
+                                            PVRSRV_DEF_PAGE *psDefPage,
+                                            IMG_INT	uiInitValue,
+                                            IMG_CHAR *pcDefPageName,
+                                            IMG_BOOL bInitPage)
+{
+	PVRSRV_DEVICE_NODE *psDevNode = psDevMemCtx->psDevNode;
+	IMG_UINT32 ui32RefCnt;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	OSLockAcquire(psDefPage->psPgLock);
+
+	/* We know there will not be 4G number of sparse PMR's */
+	ui32RefCnt = OSAtomicIncrement(&psDefPage->atRefCounter);
+
+	if (1 == ui32RefCnt)
+	{
+		IMG_DEV_PHYADDR	sDevPhysAddr = {0};
+
+#if defined(PDUMP)
+		PDUMPCOMMENT("Alloc %s page object", pcDefPageName);
+#endif
+		/* Allocate the dummy page required for sparse backing */
+		eError = DevPhysMemAlloc(psDevNode,
+		                         (1 << psDefPage->ui32Log2PgSize),
+		                         0,
+		                         uiInitValue,
+		                         bInitPage,
+#if	defined(PDUMP)
+								 MMU_GetPxPDumpMemSpaceName(psDevMemCtx->psMMUContext),
+		                         pcDefPageName,
+		                         &psDefPage->hPdumpPg,
+#endif
+		                         &psDefPage->sPageHandle,
+		                         &sDevPhysAddr);
+		if (PVRSRV_OK != eError)
+		{
+			OSAtomicDecrement(&psDefPage->atRefCounter);
+		}
+		else
+		{
+			psDefPage->ui64PgPhysAddr = sDevPhysAddr.uiAddr;
+		}
+
+	}
+
+	OSLockRelease(psDefPage->psPgLock);
+
+	return eError;
+}
+
+static void DevmemIntFreeDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode,
+                                   PVRSRV_DEF_PAGE *psDefPage,
+                                   IMG_CHAR *pcDefPageName)
+{
+	IMG_UINT32 ui32RefCnt;
+
+	ui32RefCnt = OSAtomicRead(&psDefPage->atRefCounter);
+
+	/* For the cases where the dummy page allocation fails due to lack of memory
+	 * The refcount can still be 0 even for a sparse allocation */
+	if (0 != ui32RefCnt)
+	{
+		OSLockAcquire(psDefPage->psPgLock);
+
+		/* We know there will not be 4G number of sparse PMR's */
+		ui32RefCnt = OSAtomicDecrement(&psDefPage->atRefCounter);
+
+		if (0 == ui32RefCnt)
+		{
+			PDUMPCOMMENT("Free %s page object", pcDefPageName);
+
+			/* Free the dummy page when refcount reaches zero */
+			DevPhysMemFree(psDevNode,
+#if defined(PDUMP)
+			               psDefPage->hPdumpPg,
+#endif
+			               &psDefPage->sPageHandle);
+
+#if defined(PDUMP)
+			psDefPage->hPdumpPg = NULL;
+#endif
+			psDefPage->ui64PgPhysAddr = MMU_BAD_PHYS_ADDR;
+		}
+
+		OSLockRelease(psDefPage->psPgLock);
+	}
+
+}
+
+PVRSRV_ERROR
+DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation,
+                  PMR *psPMR,
+                  IMG_UINT32 ui32PageCount,
+                  IMG_UINT32 ui32PhysicalPgOffset,
+                  PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                  IMG_DEV_VIRTADDR sDevVAddrBase)
+{
+	PVRSRV_ERROR eError;
+
+	if (psReservation->psDevmemHeap->uiLog2PageSize > PMR_GetLog2Contiguity(psPMR))
+	{
+		PVR_DPF ((PVR_DBG_ERROR,
+				"%s: Device heap and PMR have incompatible Log2Contiguity (%u - %u). "
+				"PMR contiguity must be a multiple of the heap contiguity!",
+				__func__,
+				psReservation->psDevmemHeap->uiLog2PageSize,
+				PMR_GetLog2Contiguity(psPMR) ));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	eError = MMU_MapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+	                      uiFlags,
+	                      sDevVAddrBase,
+	                      psPMR,
+	                      ui32PhysicalPgOffset,
+	                      ui32PageCount,
+	                      NULL,
+	                      psReservation->psDevmemHeap->uiLog2PageSize);
+
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation,
+                    IMG_DEV_VIRTADDR sDevVAddrBase,
+                    IMG_UINT32 ui32PageCount)
+{
+	/* Unmap the pages and mark them invalid in the MMU PTE */
+	MMU_UnmapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+	               0,
+	               sDevVAddrBase,
+	               ui32PageCount,
+	               NULL,
+	               psReservation->psDevmemHeap->uiLog2PageSize,
+	               0);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap,
+                DEVMEMINT_RESERVATION *psReservation,
+                PMR *psPMR,
+                PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+                DEVMEMINT_MAPPING **ppsMappingPtr)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_MAPPING *psMapping;
+	/* number of pages (device pages) that allocation spans */
+	IMG_UINT32 ui32NumDevPages;
+	/* device virtual address of start of allocation */
+	IMG_DEV_VIRTADDR sAllocationDevVAddr;
+	/* and its length */
+	IMG_DEVMEM_SIZE_T uiAllocationSize;
+	IMG_UINT32 uiLog2HeapContiguity = psDevmemHeap->uiLog2PageSize;
+	IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE;
+	PVRSRV_DEVICE_NODE *psDevNode;
+	PMR_FLAGS_T uiPMRFlags;
+	PVRSRV_DEF_PAGE *psDefPage;
+	IMG_CHAR *pszPageName;
+
+	if (uiLog2HeapContiguity > PMR_GetLog2Contiguity(psPMR))
+	{
+		PVR_DPF ((PVR_DBG_ERROR,
+				"%s: Device heap and PMR have incompatible contiguity (%u - %u). "
+				"Heap contiguity must be a multiple of the heap contiguity!",
+				__func__,
+				uiLog2HeapContiguity,
+				PMR_GetLog2Contiguity(psPMR) ));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+	psDevNode = psDevmemHeap->psDevmemCtx->psDevNode;
+
+	/* allocate memory to record the mapping info */
+	psMapping = OSAllocMem(sizeof *psMapping);
+	if (psMapping == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_DPF ((PVR_DBG_ERROR, "DevmemIntMapPMR: Alloc failed"));
+		goto e0;
+	}
+
+	uiAllocationSize = psReservation->uiLength;
+
+
+	ui32NumDevPages = 0xffffffffU & ( ( (uiAllocationSize - 1) >> uiLog2HeapContiguity) + 1);
+	PVR_ASSERT((IMG_DEVMEM_SIZE_T) ui32NumDevPages << uiLog2HeapContiguity == uiAllocationSize);
+
+	eError = PMRLockSysPhysAddresses(psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto e2;
+	}
+
+	sAllocationDevVAddr = psReservation->sBase;
+
+	/*Check if the PMR that needs to be mapped is sparse */
+	bIsSparse = PMR_IsSparse(psPMR);
+	if (bIsSparse)
+	{
+		/*Get the flags*/
+		uiPMRFlags = PMR_Flags(psPMR);
+		bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags);
+
+		if (bNeedBacking)
+		{
+			IMG_INT uiInitValue;
+
+			if (PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiPMRFlags))
+			{
+				psDefPage = &psDevmemHeap->psDevmemCtx->psDevNode->sDevZeroPage;
+				uiInitValue = PVR_ZERO_PAGE_INIT_VALUE;
+				pszPageName = DEV_ZERO_PAGE;
+			}
+			else
+			{
+				psDefPage = &psDevmemHeap->psDevmemCtx->psDevNode->sDummyPage;
+				uiInitValue = PVR_DUMMY_PAGE_INIT_VALUE;
+				pszPageName = DUMMY_PAGE;
+			}
+
+			/* Error is logged with in the function if any failures.
+			 * As the allocation fails we need to fail the map request and
+			 * return appropriate error
+			 *
+			 * Allocation of dummy/zero page is done after locking the pages for PMR physically
+			 * By implementing this way, the best case path of dummy/zero page being most likely to be
+			 * allocated after physically locking down pages, is considered.
+			 * If the dummy/zero page allocation fails, we do unlock the physical address and the impact
+			 * is a bit more in on demand mode of operation */
+			eError = DevmemIntAllocDefBackingPage(psDevmemHeap->psDevmemCtx,
+			                                      	  psDefPage,
+			                                      	  uiInitValue,
+			                                      	  pszPageName,
+			                                      	  IMG_TRUE);
+			if (PVRSRV_OK != eError)
+			{
+				goto e3;
+			}
+		}
+
+		/* N.B. We pass mapping permission flags to MMU_MapPages and let
+		 * it reject the mapping if the permissions on the PMR are not compatible. */
+		eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext,
+		                      uiMapFlags,
+		                      sAllocationDevVAddr,
+		                      psPMR,
+		                      0,
+		                      ui32NumDevPages,
+		                      NULL,
+		                      uiLog2HeapContiguity);
+		if (PVRSRV_OK != eError)
+		{
+			goto e4;
+		}
+	}
+	else
+	{
+		eError = MMU_MapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext,
+		                        sAllocationDevVAddr,
+		                        psPMR,
+		                        ui32NumDevPages << uiLog2HeapContiguity,
+		                        uiMapFlags,
+		                        uiLog2HeapContiguity);
+		if (PVRSRV_OK != eError)
+		{
+			goto e3;
+		}
+	}
+
+	psMapping->psReservation = psReservation;
+	psMapping->uiNumPages = ui32NumDevPages;
+	psMapping->psPMR = psPMR;
+
+	/* Don't bother with refcount on reservation, as a reservation
+	   only ever holds one mapping, so we directly increment the
+	   refcount on the heap instead */
+	_DevmemIntHeapAcquire(psMapping->psReservation->psDevmemHeap);
+
+	*ppsMappingPtr = psMapping;
+
+	return PVRSRV_OK;
+e4:
+	if (bNeedBacking)
+	{
+		/*if the mapping failed, the allocated dummy ref count need
+		 * to be handled accordingly */
+		DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode,
+		                            psDefPage,
+		                            pszPageName);
+	}
+e3:
+	{
+		PVRSRV_ERROR eError1=PVRSRV_OK;
+		eError1 = PMRUnlockSysPhysAddresses(psPMR);
+		if (PVRSRV_OK != eError1)
+		{
+			PVR_DPF ((PVR_DBG_ERROR, "%s: Failed to unlock the physical addresses",__func__));
+		}
+		*ppsMappingPtr = NULL;
+	}
+e2:
+	OSFreeMem(psMapping);
+
+e0:
+	PVR_ASSERT (eError != PVRSRV_OK);
+	return eError;
+}
+
+
+PVRSRV_ERROR
+DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_HEAP *psDevmemHeap = psMapping->psReservation->psDevmemHeap;
+	/* device virtual address of start of allocation */
+	IMG_DEV_VIRTADDR sAllocationDevVAddr;
+	/* number of pages (device pages) that allocation spans */
+	IMG_UINT32 ui32NumDevPages;
+	IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE;
+	PMR_FLAGS_T uiPMRFlags;
+
+	ui32NumDevPages = psMapping->uiNumPages;
+	sAllocationDevVAddr = psMapping->psReservation->sBase;
+
+	/*Check if the PMR that needs to be mapped is sparse */
+	bIsSparse = PMR_IsSparse(psMapping->psPMR);
+
+	if (bIsSparse)
+	{
+		/*Get the flags*/
+		uiPMRFlags = PMR_Flags(psMapping->psPMR);
+		bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags);
+
+		if (bNeedBacking)
+		{
+			if (PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiPMRFlags))
+			{
+				DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode,
+											&psDevmemHeap->psDevmemCtx->psDevNode->sDevZeroPage,
+											DEV_ZERO_PAGE);
+			}
+			else
+			{
+				DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode,
+											&psDevmemHeap->psDevmemCtx->psDevNode->sDummyPage,
+											DUMMY_PAGE);
+			}
+		}
+
+		MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+				0,
+				sAllocationDevVAddr,
+				ui32NumDevPages,
+				NULL,
+				psMapping->psReservation->psDevmemHeap->uiLog2PageSize,
+				0);
+	}
+	else
+	{
+		MMU_UnmapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext,
+		                 sAllocationDevVAddr,
+		                 ui32NumDevPages,
+		                 psMapping->psReservation->psDevmemHeap->uiLog2PageSize);
+	}
+
+
+
+	eError = PMRUnlockSysPhysAddresses(psMapping->psPMR);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Don't bother with refcount on reservation, as a reservation
+	   only ever holds one mapping, so we directly decrement the
+	   refcount on the heap instead */
+	_DevmemIntHeapRelease(psDevmemHeap);
+
+	OSFreeMem(psMapping);
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap,
+                      IMG_DEV_VIRTADDR sAllocationDevVAddr,
+                      IMG_DEVMEM_SIZE_T uiAllocationSize,
+                      DEVMEMINT_RESERVATION **ppsReservationPtr)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_RESERVATION *psReservation;
+
+	/* allocate memory to record the reservation info */
+	psReservation = OSAllocMem(sizeof *psReservation);
+	if (psReservation == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_DPF ((PVR_DBG_ERROR, "DevmemIntReserveRange: Alloc failed"));
+		goto e0;
+	}
+
+	psReservation->sBase = sAllocationDevVAddr;
+	psReservation->uiLength = uiAllocationSize;
+
+	eError = MMU_Alloc(psDevmemHeap->psDevmemCtx->psMMUContext,
+	                   uiAllocationSize,
+	                   &uiAllocationSize,
+	                   0, /* IMG_UINT32 uiProtFlags */
+	                   0, /* alignment is n/a since we supply devvaddr */
+	                   &sAllocationDevVAddr,
+	                   psDevmemHeap->uiLog2PageSize);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	/* since we supplied the virt addr, MMU_Alloc shouldn't have
+	   chosen a new one for us */
+	PVR_ASSERT(sAllocationDevVAddr.uiAddr == psReservation->sBase.uiAddr);
+
+	_DevmemIntHeapAcquire(psDevmemHeap);
+
+	psReservation->psDevmemHeap = psDevmemHeap;
+	*ppsReservationPtr = psReservation;
+
+	return PVRSRV_OK;
+
+	/*
+	 *  error exit paths follow
+	 */
+
+e1:
+	OSFreeMem(psReservation);
+
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psReservation)
+{
+	IMG_DEV_VIRTADDR sBase        = psReservation->sBase;
+	IMG_UINT32 uiLength           = psReservation->uiLength;
+	IMG_UINT32 uiLog2DataPageSize = psReservation->psDevmemHeap->uiLog2PageSize;
+
+	MMU_Free(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+	         sBase,
+	         uiLength,
+	         uiLog2DataPageSize);
+
+	_DevmemIntHeapRelease(psReservation->psDevmemHeap);
+	OSFreeMem(psReservation);
+
+    return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap)
+{
+	if (OSAtomicRead(&psDevmemHeap->hRefCount) != 1)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "BUG!  %s called but has too many references (%d) "
+		         "which probably means allocations have been made from the heap and not freed",
+		         __func__,
+		         OSAtomicRead(&psDevmemHeap->hRefCount)));
+
+		/*
+		 * Try again later when you've freed all the memory
+		 *
+		 * Note:
+		 * We don't expect the application to retry (after all this call would
+		 * succeed if the client had freed all the memory which it should have
+		 * done before calling this function). However, given there should be
+		 * an associated handle, when the handle base is destroyed it will free
+		 * any allocations leaked by the client and then it will retry this call,
+		 * which should then succeed.
+		 */
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	PVR_ASSERT(OSAtomicRead(&psDevmemHeap->hRefCount) == 1);
+
+	_DevmemIntCtxRelease(psDevmemHeap->psDevmemCtx);
+
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed heap %p", __func__, psDevmemHeap));
+	OSFreeMem(psDevmemHeap);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap,
+                      PMR *psPMR,
+                      IMG_UINT32 ui32AllocPageCount,
+                      IMG_UINT32 *pai32AllocIndices,
+                      IMG_UINT32 ui32FreePageCount,
+                      IMG_UINT32 *pai32FreeIndices,
+                      SPARSE_MEM_RESIZE_FLAGS uiSparseFlags,
+                      PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                      IMG_DEV_VIRTADDR sDevVAddrBase,
+                      IMG_UINT64 sCpuVAddrBase)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	IMG_UINT32 uiLog2PMRContiguity = PMR_GetLog2Contiguity(psPMR);
+	IMG_UINT32 uiLog2HeapContiguity = psDevmemHeap->uiLog2PageSize;
+	IMG_UINT32 uiOrderDiff = uiLog2PMRContiguity - uiLog2HeapContiguity;
+	IMG_UINT32 uiPagesPerOrder = 1 << uiOrderDiff;
+
+	IMG_UINT32 *pai32MapIndices = pai32AllocIndices;
+	IMG_UINT32 *pai32UnmapIndices = pai32FreeIndices;
+	IMG_UINT32 uiMapPageCount = ui32AllocPageCount;
+	IMG_UINT32 uiUnmapPageCount = ui32FreePageCount;
+
+	/* Special case:
+	 * Adjust indices if we map into a heap that uses smaller page sizes
+	 * than the physical allocation itself.
+	 * The incoming parameters are all based on the page size of the PMR
+	 * but the mapping functions expects parameters to be in terms of heap page sizes. */
+	if (uiOrderDiff != 0)
+	{
+		IMG_UINT32 uiPgIdx, uiPgOffset;
+
+		uiMapPageCount = (uiMapPageCount << uiOrderDiff);
+		uiUnmapPageCount = (uiUnmapPageCount << uiOrderDiff);
+
+		pai32MapIndices = OSAllocMem(uiMapPageCount * sizeof(*pai32MapIndices));
+		if (!pai32MapIndices)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+
+		pai32UnmapIndices = OSAllocMem(uiUnmapPageCount * sizeof(*pai32UnmapIndices));
+		if (!pai32UnmapIndices)
+		{
+			OSFreeMem(pai32MapIndices);
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+
+		/* Every chunk index needs to be translated from physical indices
+		 * into heap based indices. */
+		for (uiPgIdx = 0; uiPgIdx < ui32AllocPageCount; uiPgIdx++)
+		{
+			for (uiPgOffset = 0; uiPgOffset < uiPagesPerOrder; uiPgOffset++)
+			{
+				pai32MapIndices[uiPgIdx*uiPagesPerOrder + uiPgOffset] =
+						pai32AllocIndices[uiPgIdx]*uiPagesPerOrder + uiPgOffset;
+			}
+		}
+
+		for (uiPgIdx = 0; uiPgIdx < ui32FreePageCount; uiPgIdx++)
+		{
+			for (uiPgOffset = 0; uiPgOffset < uiPagesPerOrder; uiPgOffset++)
+			{
+				pai32UnmapIndices[uiPgIdx*uiPagesPerOrder + uiPgOffset] =
+						pai32FreeIndices[uiPgIdx]*uiPagesPerOrder + uiPgOffset;
+			}
+		}
+	}
+
+	/*
+	 * The order of steps in which this request is done is given below. The order of
+	 * operations is very important in this case:
+	 *
+	 * 1. The parameters are validated in function PMR_ChangeSparseMem below.
+	 *    A successful response indicates all the parameters are correct.
+	 *    In failure case we bail out from here without processing further.
+	 * 2. On success, get the PMR specific operations done. this includes page alloc, page free
+	 *    and the corresponding PMR status changes.
+	 *    when this call fails, it is ensured that the state of the PMR before is
+	 *    not disturbed. If it succeeds, then we can go ahead with the subsequent steps.
+	 * 3. Invalidate the GPU page table entries for the pages to be freed.
+	 * 4. Write the GPU page table entries for the pages that got allocated.
+	 * 5. Change the corresponding CPU space map.
+	 *
+	 * The above steps can be selectively controlled using flags.
+	 */
+	if (uiSparseFlags & (SPARSE_REMAP_MEM | SPARSE_RESIZE_BOTH))
+	{
+		/* Do the PMR specific changes first */
+		eError = PMR_ChangeSparseMem(psPMR,
+		                             ui32AllocPageCount,
+		                             pai32AllocIndices,
+		                             ui32FreePageCount,
+		                             pai32FreeIndices,
+		                             uiSparseFlags);
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE,
+					"%s: Failed to do PMR specific changes.",
+					__func__));
+			goto e1;
+		}
+
+		/* Invalidate the page table entries for the free pages.
+		 * Optimisation later would be not to touch the ones that gets re-mapped */
+		if ((0 != ui32FreePageCount) && (uiSparseFlags & SPARSE_RESIZE_FREE))
+		{
+			PMR_FLAGS_T uiPMRFlags;
+
+			/*Get the flags*/
+			uiPMRFlags = PMR_Flags(psPMR);
+
+			if (SPARSE_REMAP_MEM != (uiSparseFlags & SPARSE_REMAP_MEM))
+			{
+				/* Unmap the pages and mark them invalid in the MMU PTE */
+				MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+				                uiFlags,
+				                sDevVAddrBase,
+				                uiUnmapPageCount,
+				                pai32UnmapIndices,
+				                uiLog2HeapContiguity,
+				                uiPMRFlags);
+			}
+		}
+
+		/* Wire the pages tables that got allocated */
+		if ((0 != ui32AllocPageCount) && (uiSparseFlags & SPARSE_RESIZE_ALLOC))
+		{
+			/* Map the pages and mark them Valid in the MMU PTE */
+			eError = MMU_MapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+			                       uiFlags,
+			                       sDevVAddrBase,
+			                       psPMR,
+			                       0,
+			                       uiMapPageCount,
+			                       pai32MapIndices,
+			                       uiLog2HeapContiguity);
+
+			if (PVRSRV_OK != eError)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE,
+						"%s: Failed to map alloc indices.",
+						__func__));
+				goto e1;
+			}
+		}
+
+		/* Currently only used for debug */
+		if (SPARSE_REMAP_MEM == (uiSparseFlags & SPARSE_REMAP_MEM))
+		{
+			eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext,
+			                      uiFlags,
+			                      sDevVAddrBase,
+			                      psPMR,
+			                      0,
+			                      uiMapPageCount,
+			                      pai32UnmapIndices,
+			                      uiLog2HeapContiguity);
+			if (PVRSRV_OK != eError)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE,
+						"%s: Failed to map Free indices.",
+						__func__));
+				goto e1;
+			}
+		}
+	}
+
+#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+	/* Do the changes in sparse on to the CPU virtual map accordingly */
+	if (uiSparseFlags & SPARSE_MAP_CPU_ADDR)
+	{
+		if (sCpuVAddrBase != 0)
+		{
+			eError = PMR_ChangeSparseMemCPUMap(psPMR,
+			                                   sCpuVAddrBase,
+			                                   ui32AllocPageCount,
+			                                   pai32AllocIndices,
+			                                   ui32FreePageCount,
+			                                   pai32FreeIndices);
+			if (PVRSRV_OK != eError)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE,
+				        "%s: Failed to map to CPU addr space.",
+				        __func__));
+				goto e0;
+			}
+		}
+	}
+#endif
+
+e1:
+	if (pai32MapIndices != pai32AllocIndices)
+	{
+		OSFreeMem(pai32MapIndices);
+	}
+	if (pai32UnmapIndices != pai32FreeIndices)
+	{
+		OSFreeMem(pai32UnmapIndices);
+	}
+e0:
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntCtxDestroy
+@Description    Destroy that created by DevmemIntCtxCreate
+@Input          psDevmemCtx   Device Memory context
+@Return         cannot fail.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntCtxDestroy(DEVMEMINT_CTX *psDevmemCtx)
+{
+	/*
+	   We can't determine if we should be freeing the context here
+	   as it refcount!=1 could be due to either the fact that heap(s)
+	   remain with allocations on them, or that this memory context
+	   has been exported.
+	   As the client couldn't do anything useful with this information
+	   anyway and the fact that the refcount will ensure we only
+	   free the context when _all_ references have been released
+	   don't bother checking and just return OK regardless.
+	   */
+	_DevmemIntCtxRelease(psDevmemCtx);
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection,
+                                      PVRSRV_DEVICE_NODE *psDevNode,
+                                      DEVMEMINT_CTX *psDevMemContext,
+                                      IMG_DEV_VIRTADDR sDevAddr)
+{
+	IMG_UINT32 i, j, uiLog2HeapPageSize = 0;
+	DEVICE_MEMORY_INFO *psDinfo = &psDevNode->sDevMemoryInfo;
+	DEVMEM_HEAP_CONFIG *psConfig = psDinfo->psDeviceMemoryHeapConfigArray;
+
+	IMG_BOOL bFound = IMG_FALSE;
+
+	for (i = 0;
+		 i < psDinfo->uiNumHeapConfigs && !bFound;
+		 i++)
+	{
+		for (j = 0;
+			 j < psConfig[i].uiNumHeaps  && !bFound;
+			 j++)
+		{
+			IMG_DEV_VIRTADDR uiBase =
+					psConfig[i].psHeapBlueprintArray[j].sHeapBaseAddr;
+			IMG_DEVMEM_SIZE_T uiSize =
+					psConfig[i].psHeapBlueprintArray[j].uiHeapLength;
+
+			if ((sDevAddr.uiAddr >= uiBase.uiAddr) &&
+				(sDevAddr.uiAddr < (uiBase.uiAddr + uiSize)))
+			{
+				uiLog2HeapPageSize =
+						psConfig[i].psHeapBlueprintArray[j].uiLog2DataPageSize;
+				bFound = IMG_TRUE;
+			}
+		}
+	}
+
+	if (uiLog2HeapPageSize == 0)
+	{
+		return PVRSRV_ERROR_INVALID_GPU_ADDR;
+	}
+
+	return MMU_IsVDevAddrValid(psDevMemContext->psMMUContext,
+	                           uiLog2HeapPageSize,
+	                           sDevAddr) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_GPU_ADDR;
+}
+
+PVRSRV_ERROR DevmemIntGetFaultAddress(CONNECTION_DATA * psConnection,
+                                      PVRSRV_DEVICE_NODE *psDevNode,
+                                      DEVMEMINT_CTX *psDevMemContext,
+                                      IMG_DEV_VIRTADDR *psFaultAddress)
+{
+	if ((psDevMemContext->ui32Flags & DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE) == 0)
+	{
+		return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+	}
+
+	*psFaultAddress = psDevMemContext->sFaultAddress;
+	psDevMemContext->ui32Flags &= ~DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE;
+
+	return PVRSRV_OK;
+}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+static POSWR_LOCK g_hExportCtxListLock;
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+static DLLIST_NODE g_sExportCtxList;
+
+PVRSRV_ERROR
+DevmemIntInit(void)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	dllist_init(&g_sExportCtxList);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	eError = OSWRLockCreate(&g_hExportCtxListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+	return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntDeInit(void)
+{
+	PVR_ASSERT(dllist_is_empty(&g_sExportCtxList));
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSWRLockDestroy(g_hExportCtxListLock);
+#endif
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntExportCtx(DEVMEMINT_CTX *psContext,
+                   PMR *psPMR,
+                   DEVMEMINT_CTX_EXPORT **ppsContextExport)
+{
+	DEVMEMINT_CTX_EXPORT *psCtxExport;
+
+	psCtxExport = OSAllocMem(sizeof(DEVMEMINT_CTX_EXPORT));
+	if (psCtxExport == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Failed to export context. System currently out of memory",
+		         __func__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	_DevmemIntCtxAcquire(psContext);
+	PMRRefPMR(psPMR);
+	psCtxExport->psDevmemCtx = psContext;
+	psCtxExport->psPMR = psPMR;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSWRLockAcquireWrite(g_hExportCtxListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+	dllist_add_to_tail(&g_sExportCtxList, &psCtxExport->sNode);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSWRLockReleaseWrite(g_hExportCtxListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+	*ppsContextExport = psCtxExport;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntUnexportCtx(DEVMEMINT_CTX_EXPORT *psContextExport)
+{
+	PMRUnrefPMR(psContextExport->psPMR);
+	_DevmemIntCtxRelease(psContextExport->psDevmemCtx);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSWRLockAcquireWrite(g_hExportCtxListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+	dllist_remove_node(&psContextExport->sNode);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSWRLockReleaseWrite(g_hExportCtxListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+	OSFreeMem(psContextExport);
+
+	/* Unable to find exported context, return error */
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntAcquireRemoteCtx(PMR *psPMR,
+                          DEVMEMINT_CTX **ppsContext,
+                          IMG_HANDLE *phPrivData)
+{
+	PDLLIST_NODE psListNode, psListNodeNext;
+	DEVMEMINT_CTX_EXPORT *psCtxExport;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSWRLockAcquireRead(g_hExportCtxListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+	/* Find context from list using PMR as key */
+	dllist_foreach_node(&g_sExportCtxList, psListNode, psListNodeNext)
+	{
+		psCtxExport = IMG_CONTAINER_OF(psListNode, DEVMEMINT_CTX_EXPORT, sNode);
+		if (psCtxExport->psPMR == psPMR)
+		{
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSWRLockReleaseRead(g_hExportCtxListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+			_DevmemIntCtxAcquire(psCtxExport->psDevmemCtx);
+			*ppsContext = psCtxExport->psDevmemCtx;
+			*phPrivData = psCtxExport->psDevmemCtx->hPrivData;
+			return PVRSRV_OK;
+		}
+	}
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSWRLockReleaseRead(g_hExportCtxListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+	/* Unable to find exported context, return error */
+	PVR_DPF((PVR_DBG_ERROR,
+			"%s: Failed to acquire remote context. Could not retrieve context with given PMR",
+			__func__));
+	return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntRegisterPFNotify
+@Description    Registers a PID to be notified when a page fault occurs on a
+                specific device memory context.
+@Input          psDevmemCtx    The context to be notified about.
+@Input          ui32PID        The PID of the process that would like to be
+                               notified.
+@Input          bRegister      If true, register. If false, de-register.
+@Return         PVRSRV_ERROR.
+*/ /**************************************************************************/
+PVRSRV_ERROR DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx,
+                                         IMG_INT32     ui32PID,
+                                         IMG_BOOL      bRegister)
+{
+	PVRSRV_DEVICE_NODE *psDevNode;
+	DLLIST_NODE         *psNode, *psNodeNext;
+	DEVMEMINT_PF_NOTIFY *psNotifyNode;
+	IMG_BOOL            bPresent = IMG_FALSE;
+
+	if (psDevmemCtx == NULL)
+	{
+		PVR_ASSERT(!"Devmem Context Missing");
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevNode = psDevmemCtx->psDevNode;
+
+	if (bRegister)
+	{
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSWRLockAcquireRead(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+		/* If this is the first PID in the list, the device memory context
+		 * needs to be registered for notification */
+		if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead))
+		{
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSWRLockReleaseRead(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+			dllist_add_to_tail(&psDevNode->sMemoryContextPageFaultNotifyListHead,
+			                   &psDevmemCtx->sPageFaultNotifyListElem);
+		}
+		else
+		{
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSWRLockReleaseRead(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+		}
+	}
+
+	/* Loop through the registered PIDs and check whether this one is
+	 * present */
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSWRLockAcquireRead(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+	dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext)
+	{
+		psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+
+		if (psNotifyNode->ui32PID == ui32PID)
+		{
+			bPresent = IMG_TRUE;
+			break;
+		}
+	}
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSWRLockReleaseRead(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+	if (bRegister)
+	{
+		if (bPresent)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: Trying to register a PID that is already registered",
+			         __func__));
+			return PVRSRV_ERROR_PID_ALREADY_REGISTERED;
+		}
+
+		psNotifyNode = OSAllocMem(sizeof(*psNotifyNode));
+		if (psNotifyNode == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: Unable to allocate memory for the notify list",
+			          __func__));
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+		psNotifyNode->ui32PID = ui32PID;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSWRLockAcquireWrite(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+		dllist_add_to_tail(&(psDevmemCtx->sProcessNotifyListHead), &(psNotifyNode->sProcessNotifyListElem));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSWRLockReleaseWrite(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+	}
+	else
+	{
+		if (!bPresent)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: Trying to unregister a PID that is not registered",
+			         __func__));
+			return PVRSRV_ERROR_PID_NOT_REGISTERED;
+		}
+		dllist_remove_node(psNode);
+		psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+		OSFreeMem(psNotifyNode);
+	}
+
+	if (!bRegister)
+	{
+		/* If the last process in the list is being unregistered, then also
+		 * unregister the device memory context from the notify list. */
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSWRLockAcquireWrite(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+		if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead))
+		{
+			dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem);
+		}
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSWRLockReleaseWrite(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+	}
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntPFNotify
+@Description    Notifies any processes that have registered themselves to be
+                notified when a page fault happens on a specific device memory
+                context.
+@Input          *psDevNode           The device node.
+@Input          ui64FaultedPCAddress The page catalogue address that faulted.
+@Input          sFaultAddress        The address that triggered the fault.
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode,
+                               IMG_UINT64         ui64FaultedPCAddress,
+                               IMG_DEV_VIRTADDR   sFaultAddress)
+{
+	DLLIST_NODE         *psNode, *psNodeNext;
+	DEVMEMINT_PF_NOTIFY *psNotifyNode;
+	PVRSRV_ERROR        eError;
+	DEVMEMINT_CTX       *psDevmemCtx = NULL;
+	IMG_BOOL            bFailed = IMG_FALSE;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSWRLockAcquireRead(psDevNode->hMemoryContextPageFaultNotifyListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+	if (dllist_is_empty(&(psDevNode->sMemoryContextPageFaultNotifyListHead)))
+	{
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+		return PVRSRV_OK;
+	}
+
+	dllist_foreach_node(&(psDevNode->sMemoryContextPageFaultNotifyListHead), psNode, psNodeNext)
+	{
+		DEVMEMINT_CTX *psThisContext =
+			IMG_CONTAINER_OF(psNode, DEVMEMINT_CTX, sPageFaultNotifyListElem);
+		IMG_DEV_PHYADDR sPCDevPAddr;
+
+		eError = MMU_AcquireBaseAddr(psThisContext->psMMUContext, &sPCDevPAddr);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: Failed to Acquire Base Address (%s)",
+			         __func__,
+			         PVRSRVGetErrorString(eError)));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+			return eError;
+		}
+
+		if (sPCDevPAddr.uiAddr == ui64FaultedPCAddress)
+		{
+			psDevmemCtx = psThisContext;
+			break;
+		}
+	}
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+	if (psDevmemCtx == NULL)
+	{
+		/* Not found, just return */
+		return PVRSRV_OK;
+	}
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSWRLockAcquireRead(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+	/*
+	 * Store the first occurrence of a page fault address,
+	 * until that address is consumed by a client.
+	 */
+	if ((psDevmemCtx->ui32Flags & DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE) == 0)
+	{
+		psDevmemCtx->sFaultAddress = sFaultAddress;
+		psDevmemCtx->ui32Flags |= DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE;
+	}
+
+	/* Loop through each registered PID and send a signal to the process */
+	dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext)
+	{
+		psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+
+		eError = OSDebugSignalPID(psNotifyNode->ui32PID);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: Unable to signal process for PID: %u",
+			         __func__,
+			         psNotifyNode->ui32PID));
+
+			PVR_ASSERT(!"Unable to signal process");
+
+			bFailed = IMG_TRUE;
+		}
+	}
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSWRLockReleaseRead(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+	if (bFailed)
+	{
+		return PVRSRV_ERROR_SIGNAL_FAILED;
+	}
+
+	return PVRSRV_OK;
+}
+
+#if defined (PDUMP)
+IMG_UINT32 DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext)
+{
+	IMG_UINT32 ui32MMUContextID;
+	MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32MMUContextID);
+	return ui32MMUContextID;
+}
+
+PVRSRV_ERROR
+DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+                                IMG_DEV_VIRTADDR sDevAddrStart,
+                                IMG_DEVMEM_SIZE_T uiSize,
+                                IMG_UINT32 ui32ArraySize,
+                                const IMG_CHAR *pszFilename,
+                                IMG_UINT32 ui32FileOffset,
+                                IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 uiPDumpMMUCtx;
+
+	PVR_UNREFERENCED_PARAMETER(ui32ArraySize);
+
+	eError = MMU_AcquirePDumpMMUContext(psDevmemCtx->psMMUContext,
+			&uiPDumpMMUCtx);
+
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/*
+	   The following SYSMEM refers to the 'MMU Context', hence it
+	   should be the MMU context, not the PMR, that says what the PDump
+	   MemSpace tag is?
+	   From a PDump P.O.V. it doesn't matter which name space we use as long
+	   as that MemSpace is used on the 'MMU Context' we're dumping from
+	   */
+	eError = PDumpMMUSAB(psDevmemCtx->psDevNode->sDevId.pszPDumpDevName,
+	                     uiPDumpMMUCtx,
+	                     sDevAddrStart,
+	                     uiSize,
+	                     pszFilename,
+	                     ui32FileOffset,
+	                     ui32PDumpFlags);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	MMU_ReleasePDumpMMUContext(psDevmemCtx->psMMUContext);
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+DevmemIntPDumpBitmap(CONNECTION_DATA * psConnection,
+                     PVRSRV_DEVICE_NODE *psDeviceNode,
+                     IMG_CHAR *pszFileName,
+                     IMG_UINT32 ui32FileOffset,
+                     IMG_UINT32 ui32Width,
+                     IMG_UINT32 ui32Height,
+                     IMG_UINT32 ui32StrideInBytes,
+                     IMG_DEV_VIRTADDR sDevBaseAddr,
+                     DEVMEMINT_CTX *psDevMemContext,
+                     IMG_UINT32 ui32Size,
+                     PDUMP_PIXEL_FORMAT ePixelFormat,
+                     IMG_UINT32 ui32AddrMode,
+                     IMG_UINT32 ui32PDumpFlags)
+{
+	IMG_UINT32 ui32ContextID;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DevmemIntPDumpBitmap: Failed to acquire MMU context"));
+		return PVRSRV_ERROR_FAILED_TO_ALLOC_MMUCONTEXT_ID;
+	}
+
+	eError = PDumpBitmapKM(psDeviceNode,
+	                       pszFileName,
+	                       ui32FileOffset,
+	                       ui32Width,
+	                       ui32Height,
+	                       ui32StrideInBytes,
+	                       sDevBaseAddr,
+	                       ui32ContextID,
+	                       ui32Size,
+	                       ePixelFormat,
+	                       ui32AddrMode,
+	                       ui32PDumpFlags);
+
+	/* Don't care about return value */
+	MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext);
+
+	return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntPDumpImageDescriptor(CONNECTION_DATA * psConnection,
+							  PVRSRV_DEVICE_NODE *psDeviceNode,
+							  DEVMEMINT_CTX *psDevMemContext,
+							  IMG_UINT32 ui32Size,
+							  const IMG_CHAR *pszFileName,
+							  IMG_DEV_VIRTADDR sData,
+							  IMG_UINT32 ui32DataSize,
+							  IMG_UINT32 ui32LogicalWidth,
+							  IMG_UINT32 ui32LogicalHeight,
+							  IMG_UINT32 ui32PhysicalWidth,
+							  IMG_UINT32 ui32PhysicalHeight,
+							  PDUMP_PIXEL_FORMAT ePixFmt,
+							  IMG_MEMLAYOUT eMemLayout,
+							  IMG_FB_COMPRESSION eFBCompression,
+							  const IMG_UINT32 *paui32FBCClearColour,
+							  PDUMP_FBC_SWIZZLE eFBCSwizzle,
+							  IMG_DEV_VIRTADDR sHeader,
+							  IMG_UINT32 ui32HeaderSize,
+							  IMG_UINT32 ui32PDumpFlags)
+{
+	IMG_UINT32 ui32ContextID;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(ui32Size);
+
+	eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID);
+	PVR_LOGR_IF_ERROR(eError, "MMU_AcquirePDumpMMUContext");
+
+	eError = PDumpImageDescriptor(psDeviceNode,
+									ui32ContextID,
+									(IMG_CHAR *)pszFileName,
+									sData,
+									ui32DataSize,
+									ui32LogicalWidth,
+									ui32LogicalHeight,
+									ui32PhysicalWidth,
+									ui32PhysicalHeight,
+									ePixFmt,
+									eMemLayout,
+									eFBCompression,
+									paui32FBCClearColour,
+									eFBCSwizzle,
+									sHeader,
+									ui32HeaderSize,
+									ui32PDumpFlags);
+	PVR_LOG_IF_ERROR(eError, "PDumpImageDescriptor");
+
+	/* Don't care about return value */
+	(void) MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext);
+
+	return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntPDumpDataDescriptor(CONNECTION_DATA * psConnection,
+							  PVRSRV_DEVICE_NODE *psDeviceNode,
+							  DEVMEMINT_CTX *psDevMemContext,
+							  IMG_UINT32 ui32Size,
+							  const IMG_CHAR *pszFileName,
+							  IMG_DEV_VIRTADDR sData,
+							  IMG_UINT32 ui32DataSize,
+							  IMG_UINT32 ui32ElementType,
+							  IMG_UINT32 ui32ElementCount,
+							  IMG_UINT32 ui32PDumpFlags)
+{
+	IMG_UINT32 ui32ContextID;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(ui32Size);
+
+	eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID);
+	PVR_LOGR_IF_ERROR(eError, "MMU_AcquirePDumpMMUContext");
+
+	eError = PDumpDataDescriptor(psDeviceNode,
+									ui32ContextID,
+									(IMG_CHAR *)pszFileName,
+									sData,
+									ui32DataSize,
+									ui32ElementType,
+									ui32ElementCount,
+									ui32PDumpFlags);
+	PVR_LOG_IF_ERROR(eError, "PDumpDataDescriptor");
+
+	/* Don't care about return value */
+	(void) MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext);
+
+	return eError;
+}
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_server.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_server.h
new file mode 100644
index 0000000..16ea65e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_server.h
@@ -0,0 +1,620 @@
+/**************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header file for server side component of device memory management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __DEVICEMEM_SERVER_H__
+#define __DEVICEMEM_SERVER_H__
+
+#include "device.h" /* For device node */
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "connection_server.h"
+
+#include "pmr.h"
+
+
+typedef struct _DEVMEMINT_CTX_ DEVMEMINT_CTX;
+typedef struct _DEVMEMINT_CTX_EXPORT_ DEVMEMINT_CTX_EXPORT;
+typedef struct _DEVMEMINT_HEAP_ DEVMEMINT_HEAP;
+
+typedef struct _DEVMEMINT_RESERVATION_ DEVMEMINT_RESERVATION;
+typedef struct _DEVMEMINT_MAPPING_ DEVMEMINT_MAPPING;
+typedef struct _DEVMEMINT_PF_NOTIFY_ DEVMEMINT_PF_NOTIFY;
+
+
+/**************************************************************************/ /*!
+@Function       DevmemIntUnpin
+@Description    This is the counterpart to DevmemPin(). It is meant to be
+                called when the allocation is NOT mapped in the device virtual
+                space.
+
+@Input          psPMR           The physical memory to unpin.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the memory is
+                                registered to be reclaimed. Error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR DevmemIntUnpin(PMR *psPMR);
+
+/**************************************************************************/ /*!
+@Function       DevmemIntUnpinInvalidate
+@Description    This is the counterpart to DevmemIntPinValidate(). It is meant to be
+                called for allocations that ARE mapped in the device virtual space
+                and we have to invalidate the mapping.
+
+@Input          psPMR           The physical memory to unpin.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the memory is
+                                registered to be reclaimed. Error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR DevmemIntUnpinInvalidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR);
+
+/**************************************************************************/ /*!
+@Function       DevmemIntPin
+@Description    This is the counterpart to DevmemIntUnpin().
+                Is meant to be called if there is NO device mapping present.
+
+@Input          psPMR           The physical memory to pin.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the allocation content
+                                was successfully restored.
+
+                                PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+                                could not be restored and new physical memory
+                                was allocated.
+
+                                A different error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR DevmemIntPin(PMR *psPMR);
+
+/**************************************************************************/ /*!
+@Function       DevmemIntPinValidate
+@Description    This is the counterpart to DevmemIntUnpinInvalidate().
+                Is meant to be called if there is IS a device mapping present
+                that needs to be taken care of.
+
+@Input          psDevmemMapping The mapping structure used for the passed PMR.
+
+@Input          psPMR           The physical memory to pin.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the allocation content
+                                was successfully restored.
+
+                                PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+                                could not be restored and new physical memory
+                                was allocated.
+
+                                A different error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR DevmemIntPinValidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR);
+/*
+ * DevmemServerGetImportHandle()
+ *
+ * For given exportable memory descriptor returns PMR handle
+ *
+ */
+PVRSRV_ERROR
+DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+                            IMG_HANDLE *phImport);
+
+/*
+ * DevmemServerGetHeapHandle()
+ *
+ * For given reservation returns the Heap handle
+ *
+ */
+PVRSRV_ERROR
+DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation,
+                          IMG_HANDLE *phHeap);
+
+/*
+ * DevmemIntCtxCreate()
+ *
+ * Create a Server-side Device Memory Context.  This is usually the
+ * counterpart of the client side memory context, and indeed is
+ * usually created at the same time.
+ *
+ * You must have one of these before creating any heaps.
+ *
+ * All heaps must have been destroyed before calling
+ * DevmemIntCtxDestroy()
+ *
+ * If you call DevmemIntCtxCreate() (and it succeeds) you are promising
+ * to later call DevmemIntCtxDestroy()
+ *
+ * Note that this call will cause the device MMU code to do some work
+ * for creating the device memory context, but it does not guarantee
+ * that a page catalogue will have been created, as this may be
+ * deferred until first allocation.
+ *
+ * Caller to provide storage for a pointer to the DEVMEM_CTX object
+ * that will be created by this call.
+ */
+extern PVRSRV_ERROR
+DevmemIntCtxCreate(CONNECTION_DATA *psConnection,
+                   PVRSRV_DEVICE_NODE *psDeviceNode,
+                   /* devnode / perproc etc */
+                   IMG_BOOL bKernelMemoryCtx,
+                   DEVMEMINT_CTX **ppsDevmemCtxPtr,
+                   IMG_HANDLE *hPrivData,
+                   IMG_UINT32 *pui32CPUCacheLineSize);
+/*
+ * DevmemIntCtxDestroy()
+ *
+ * Undoes a prior DevmemIntCtxCreate or DevmemIntCtxImport.
+ */
+extern PVRSRV_ERROR
+DevmemIntCtxDestroy(DEVMEMINT_CTX *psDevmemCtx);
+
+/*
+ * DevmemIntHeapCreate()
+ *
+ * Creates a new heap in this device memory context.  This will cause
+ * a call into the MMU code to allocate various data structures for
+ * managing this heap.  It will not necessarily cause any page tables
+ * to be set up, as this can be deferred until first allocation.
+ * (i.e. we shouldn't care - it's up to the MMU code)
+ *
+ * Note that the data page size must be specified (as log 2).  The
+ * data page size as specified here will be communicated to the mmu
+ * module, and thus may determine the page size configured in page
+ * directory entries for subsequent allocations from this heap.  It is
+ * essential that the page size here is less than or equal to the
+ * "minimum contiguity guarantee" of any PMR that you subsequently
+ * attempt to map to this heap.
+ *
+ * If you call DevmemIntHeapCreate() (and the call succeeds) you are
+ * promising that you shall subsequently call DevmemIntHeapDestroy()
+ *
+ * Caller to provide storage for a pointer to the DEVMEM_HEAP object
+ * that will be created by this call.
+ */
+extern PVRSRV_ERROR
+DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx,
+                    IMG_DEV_VIRTADDR sHeapBaseAddr,
+                    IMG_DEVMEM_SIZE_T uiHeapLength,
+                    IMG_UINT32 uiLog2DataPageSize,
+                    DEVMEMINT_HEAP **ppsDevmemHeapPtr);
+/*
+ * DevmemIntHeapDestroy()
+ *
+ * Destroys a heap previously created with DevmemIntHeapCreate()
+ *
+ * All allocations from his heap must have been freed before this
+ * call.
+ */
+extern PVRSRV_ERROR
+DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap);
+
+/*
+ * DevmemIntMapPMR()
+ *
+ * Maps the given PMR to the virtual range previously allocated with
+ * DevmemIntReserveRange()
+ *
+ * If appropriate, the PMR must have had its physical backing
+ * committed, as this call will call into the MMU code to set up the
+ * page tables for this allocation, which shall in turn request the
+ * physical addresses from the PMR.  Alternatively, the PMR
+ * implementation can choose to do so off the back of the "lock"
+ * callback, which it will receive as a result (indirectly) of this
+ * call.
+ *
+ * This function makes no promise w.r.t. the circumstances that it can
+ * be called, and these would be "inherited" from the implementation
+ * of the PMR.  For example if the PMR "lock" callback causes pages to
+ * be pinned at that time (which may cause scheduling or disk I/O
+ * etc.) then it would not be legal to "Map" the PMR in a context
+ * where scheduling events are disallowed.
+ *
+ * If you call DevmemIntMapPMR() (and the call succeeds) then you are
+ * promising that you shall later call DevmemIntUnmapPMR()
+ */
+extern PVRSRV_ERROR
+DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap,
+                DEVMEMINT_RESERVATION *psReservation,
+                PMR *psPMR,
+                PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+                DEVMEMINT_MAPPING **ppsMappingPtr);
+/*
+ * DevmemIntUnmapPMR()
+ *
+ * Reverses the mapping caused by DevmemIntMapPMR()
+ */
+extern PVRSRV_ERROR
+DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping);
+
+/* DevmemIntMapPages()
+ *
+ * Maps an arbitrary amount of pages from a PMR to a reserved range
+ *
+ * @input         psReservation      Reservation handle for the range
+ * @input         psPMR              PMR that is mapped
+ * @input         ui32PageCount      Number of consecutive pages that are mapped
+ * @input         uiPhysicalOffset   Logical offset in the PMR
+ * @input         uiFlags            Mapping flags
+ * @input         sDevVAddrBase      Virtual address base to start the mapping from
+ */
+extern PVRSRV_ERROR
+DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation,
+                  PMR *psPMR,
+                  IMG_UINT32 ui32PageCount,
+                  IMG_UINT32 ui32PhysicalPgOffset,
+                  PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                  IMG_DEV_VIRTADDR sDevVAddrBase);
+
+/* DevmemIntUnmapPages()
+ *
+ * Unmaps an arbitrary amount of pages from a reserved range
+ *
+ * @input         psReservation      Reservation handle for the range
+ * @input         sDevVAddrBase      Virtual address base to start from
+ * @input         ui32PageCount      Number of consecutive pages that are unmapped
+  */
+extern PVRSRV_ERROR
+DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation,
+                    IMG_DEV_VIRTADDR sDevVAddrBase,
+                    IMG_UINT32 ui32PageCount);
+
+/*
+ * DevmemIntReserveRange()
+ *
+ * Indicates that the specified range should be reserved from the
+ * given heap.
+ *
+ * In turn causes the page tables to be allocated to cover the
+ * specified range.
+ *
+ * If you call DevmemIntReserveRange() (and the call succeeds) then you
+ * are promising that you shall later call DevmemIntUnreserveRange()
+ */
+extern PVRSRV_ERROR
+DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap,
+                      IMG_DEV_VIRTADDR sAllocationDevVAddr,
+                      IMG_DEVMEM_SIZE_T uiAllocationSize,
+                      DEVMEMINT_RESERVATION **ppsReservationPtr);
+/*
+ * DevmemIntUnreserveRange()
+ *
+ * Undoes the state change caused by DevmemIntReserveRage()
+ */
+extern PVRSRV_ERROR
+DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psDevmemReservation);
+
+/*************************************************************************/ /*!
+@Function       DevmemIntChangeSparse
+@Description    Changes the sparse allocations of a PMR by allocating and freeing
+                pages and changing their corresponding CPU and GPU mappings.
+
+@input          psDevmemHeap          Pointer to the heap we map on
+@input          psPMR                 The PMR we want to map
+@input          ui32AllocPageCount    Number of pages to allocate
+@input          pai32AllocIndices     The logical PMR indices where pages will
+                                      be allocated. May be NULL.
+@input          ui32FreePageCount     Number of pages to free
+@input          pai32FreeIndices      The logical PMR indices where pages will
+                                      be freed. May be NULL.
+@input          uiSparseFlags         Flags passed in to determine which kind
+                                      of sparse change the user wanted.
+                                      See devicemem_typedefs.h for details.
+@input          uiFlags               The memalloc flags for this virtual range.
+@input          sDevVAddrBase         The base address of the virtual range of
+                                      this sparse allocation.
+@input          sCpuVAddrBase         The CPU base address of this allocation.
+                                      May be 0 if not existing.
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+extern PVRSRV_ERROR
+DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap,
+                      PMR *psPMR,
+                      IMG_UINT32 ui32AllocPageCount,
+                      IMG_UINT32 *pai32AllocIndices,
+                      IMG_UINT32 ui32FreePageCount,
+                      IMG_UINT32 *pai32FreeIndices,
+                      SPARSE_MEM_RESIZE_FLAGS uiSparseFlags,
+                      PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                      IMG_DEV_VIRTADDR sDevVAddrBase,
+                      IMG_UINT64 sCpuVAddrBase);
+
+extern PVRSRV_ERROR
+DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection,
+                         PVRSRV_DEVICE_NODE *psDevNode,
+                         DEVMEMINT_CTX *psDevMemContext,
+                         IMG_DEV_VIRTADDR sDevAddr);
+
+extern PVRSRV_ERROR
+DevmemIntGetFaultAddress(CONNECTION_DATA * psConnection,
+                         PVRSRV_DEVICE_NODE *psDevNode,
+                         DEVMEMINT_CTX *psDevMemContext,
+                         IMG_DEV_VIRTADDR *psFaultAddress);
+
+/*************************************************************************/ /*!
+@Function       DevmemIntRegisterPFNotify
+@Description    Registers a PID to be notified when a page fault occurs on a
+                specific device memory context.
+@Input          psDevmemCtx    The context to be notified about.
+@Input          ui32PID        The PID of the process that would like to be
+                               notified.
+@Input          bRegister      If true, register. If false, de-register.
+@Return         PVRSRV_ERROR.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx,
+                            IMG_INT32     ui32PID,
+                            IMG_BOOL      bRegister);
+
+/*************************************************************************/ /*!
+@Function       DevmemIntPFNotify
+@Description    Notifies any processes that have registered themselves to be
+                notified when a page fault happens on a specific device memory
+                context.
+@Input          *psDevNode           The device node.
+@Input          ui64FaultedPCAddress The page catalogue address that faulted.
+@Input          sFaultAddress        The address that triggered the fault.
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode,
+                               IMG_UINT64         ui64FaultedPCAddress,
+                               IMG_DEV_VIRTADDR   sFaultAddress);
+
+#if defined(PDUMP)
+/*
+ * DevmemIntPDumpSaveToFileVirtual()
+ *
+ * Writes out PDump "SAB" commands with the data found in memory at
+ * the given virtual address.
+ */
+extern PVRSRV_ERROR
+DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+                                IMG_DEV_VIRTADDR sDevAddrStart,
+                                IMG_DEVMEM_SIZE_T uiSize,
+                                IMG_UINT32 uiArraySize,
+                                const IMG_CHAR *pszFilename,
+                                IMG_UINT32 ui32FileOffset,
+                                IMG_UINT32 ui32PDumpFlags);
+
+extern IMG_UINT32
+DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext);
+
+extern PVRSRV_ERROR
+DevmemIntPDumpBitmap(CONNECTION_DATA * psConnection,
+                     PVRSRV_DEVICE_NODE *psDeviceNode,
+                     IMG_CHAR *pszFileName,
+                     IMG_UINT32 ui32FileOffset,
+                     IMG_UINT32 ui32Width,
+                     IMG_UINT32 ui32Height,
+                     IMG_UINT32 ui32StrideInBytes,
+                     IMG_DEV_VIRTADDR sDevBaseAddr,
+                     DEVMEMINT_CTX *psDevMemContext,
+                     IMG_UINT32 ui32Size,
+                     PDUMP_PIXEL_FORMAT ePixelFormat,
+                     IMG_UINT32 ui32AddrMode,
+                     IMG_UINT32 ui32PDumpFlags);
+
+extern PVRSRV_ERROR
+DevmemIntPDumpImageDescriptor(CONNECTION_DATA * psConnection,
+                              PVRSRV_DEVICE_NODE *psDeviceNode,
+                              DEVMEMINT_CTX *psDevMemContext,
+                              IMG_UINT32 ui32Size,
+                              const IMG_CHAR *pszFileName,
+                              IMG_DEV_VIRTADDR sData,
+                              IMG_UINT32 ui32DataSize,
+                              IMG_UINT32 ui32LogicalWidth,
+                              IMG_UINT32 ui32LogicalHeight,
+                              IMG_UINT32 ui32PhysicalWidth,
+                              IMG_UINT32 ui32PhysicalHeight,
+                              PDUMP_PIXEL_FORMAT ePixFmt,
+                              IMG_MEMLAYOUT eMemLayout,
+                              IMG_FB_COMPRESSION eFBCompression,
+                              const IMG_UINT32 *paui32FBCClearColour,
+                              PDUMP_FBC_SWIZZLE eFBCSwizzle,
+                              IMG_DEV_VIRTADDR sHeader,
+                              IMG_UINT32 ui32HeaderSize,
+                              IMG_UINT32 ui32PDumpFlags);
+
+extern PVRSRV_ERROR
+DevmemIntPDumpDataDescriptor(CONNECTION_DATA * psConnection,
+                              PVRSRV_DEVICE_NODE *psDeviceNode,
+                              DEVMEMINT_CTX *psDevMemContext,
+                              IMG_UINT32 ui32Size,
+                              const IMG_CHAR *pszFileName,
+                              IMG_DEV_VIRTADDR sData,
+                              IMG_UINT32 ui32DataSize,
+                              IMG_UINT32 ui32ElementType,
+                              IMG_UINT32 ui32ElementCount,
+                              IMG_UINT32 ui32PDumpFlags);
+#else	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemIntPDumpSaveToFileVirtual)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+                                IMG_DEV_VIRTADDR sDevAddrStart,
+                                IMG_DEVMEM_SIZE_T uiSize,
+                                IMG_UINT32 uiArraySize,
+                                const IMG_CHAR *pszFilename,
+                                IMG_UINT32 ui32FileOffset,
+                                IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevmemCtx);
+	PVR_UNREFERENCED_PARAMETER(sDevAddrStart);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiArraySize);
+	PVR_UNREFERENCED_PARAMETER(pszFilename);
+	PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemIntPDumpBitmap)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemIntPDumpBitmap(CONNECTION_DATA * psConnection,
+                     PVRSRV_DEVICE_NODE *psDeviceNode,
+                     IMG_CHAR *pszFileName,
+                     IMG_UINT32 ui32FileOffset,
+                     IMG_UINT32 ui32Width,
+                     IMG_UINT32 ui32Height,
+                     IMG_UINT32 ui32StrideInBytes,
+                     IMG_DEV_VIRTADDR sDevBaseAddr,
+                     DEVMEMINT_CTX *psDevMemContext,
+                     IMG_UINT32 ui32Size,
+                     PDUMP_PIXEL_FORMAT ePixelFormat,
+                     IMG_UINT32 ui32AddrMode,
+                     IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(pszFileName);
+	PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Width);
+	PVR_UNREFERENCED_PARAMETER(ui32Height);
+	PVR_UNREFERENCED_PARAMETER(ui32StrideInBytes);
+	PVR_UNREFERENCED_PARAMETER(sDevBaseAddr);
+	PVR_UNREFERENCED_PARAMETER(psDevMemContext);
+	PVR_UNREFERENCED_PARAMETER(ui32Size);
+	PVR_UNREFERENCED_PARAMETER(ePixelFormat);
+	PVR_UNREFERENCED_PARAMETER(ui32AddrMode);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemIntPDumpImageDescriptor)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemIntPDumpImageDescriptor(CONNECTION_DATA * psConnection,
+                              PVRSRV_DEVICE_NODE *psDeviceNode,
+                              DEVMEMINT_CTX *psDevMemContext,
+                              IMG_UINT32 ui32Size,
+                              const IMG_CHAR *pszFileName,
+                              IMG_DEV_VIRTADDR sData,
+                              IMG_UINT32 ui32DataSize,
+                              IMG_UINT32 ui32LogicalWidth,
+                              IMG_UINT32 ui32LogicalHeight,
+                              IMG_UINT32 ui32PhysicalWidth,
+                              IMG_UINT32 ui32PhysicalHeight,
+                              PDUMP_PIXEL_FORMAT ePixFmt,
+                              IMG_MEMLAYOUT eMemLayout,
+                              IMG_FB_COMPRESSION eFBCompression,
+                              const IMG_UINT32 *paui32FBCClearColour,
+                              PDUMP_FBC_SWIZZLE eFBCSwizzle,
+                              IMG_DEV_VIRTADDR sHeader,
+                              IMG_UINT32 ui32HeaderSize,
+                              IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(psDevMemContext);
+	PVR_UNREFERENCED_PARAMETER(ui32Size);
+	PVR_UNREFERENCED_PARAMETER(pszFileName);
+	PVR_UNREFERENCED_PARAMETER(sData);
+	PVR_UNREFERENCED_PARAMETER(ui32DataSize);
+	PVR_UNREFERENCED_PARAMETER(ui32LogicalWidth);
+	PVR_UNREFERENCED_PARAMETER(ui32LogicalHeight);
+	PVR_UNREFERENCED_PARAMETER(ui32PhysicalWidth);
+	PVR_UNREFERENCED_PARAMETER(ui32PhysicalHeight);
+	PVR_UNREFERENCED_PARAMETER(ePixFmt);
+	PVR_UNREFERENCED_PARAMETER(eMemLayout);
+	PVR_UNREFERENCED_PARAMETER(eFBCompression);
+	PVR_UNREFERENCED_PARAMETER(paui32FBCClearColour);
+	PVR_UNREFERENCED_PARAMETER(eFBCSwizzle);
+	PVR_UNREFERENCED_PARAMETER(sHeader);
+	PVR_UNREFERENCED_PARAMETER(ui32HeaderSize);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemIntPDumpDataDescriptor)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemIntPDumpDataDescriptor(CONNECTION_DATA * psConnection,
+                              PVRSRV_DEVICE_NODE *psDeviceNode,
+                              DEVMEMINT_CTX *psDevMemContext,
+                              IMG_UINT32 ui32Size,
+                              const IMG_CHAR *pszFileName,
+                              IMG_DEV_VIRTADDR sData,
+                              IMG_UINT32 ui32DataSize,
+                              IMG_UINT32 ui32ElementType,
+                              IMG_UINT32 ui32ElementCount,
+                              IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(psDevMemContext);
+	PVR_UNREFERENCED_PARAMETER(ui32Size);
+	PVR_UNREFERENCED_PARAMETER(pszFileName);
+	PVR_UNREFERENCED_PARAMETER(sData);
+	PVR_UNREFERENCED_PARAMETER(ui32DataSize);
+	PVR_UNREFERENCED_PARAMETER(ui32ElementType);
+	PVR_UNREFERENCED_PARAMETER(ui32ElementCount);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+
+#endif	/* PDUMP */
+
+PVRSRV_ERROR
+DevmemIntInit(void);
+
+PVRSRV_ERROR
+DevmemIntDeInit(void);
+
+PVRSRV_ERROR
+DevmemIntExportCtx(DEVMEMINT_CTX *psContext,
+                   PMR *psPMR,
+                   DEVMEMINT_CTX_EXPORT **ppsContextExport);
+
+PVRSRV_ERROR
+DevmemIntUnexportCtx(DEVMEMINT_CTX_EXPORT *psContextExport);
+
+PVRSRV_ERROR
+DevmemIntAcquireRemoteCtx(PMR *psPMR,
+                          DEVMEMINT_CTX **ppsContext,
+                          IMG_HANDLE *phPrivData);
+
+#endif /* ifndef __DEVICEMEM_SERVER_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_server_utils.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_server_utils.h
new file mode 100644
index 0000000..484ce63
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_server_utils.h
@@ -0,0 +1,204 @@
+/**************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header file utilities that are specific to device memory functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "device.h"
+#include "pvrsrv_memallocflags.h"
+#include "pvrsrv.h"
+
+static INLINE PVRSRV_ERROR DevmemCPUCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode,
+											  PVRSRV_MEMALLOCFLAGS_T ulFlags,
+											  IMG_UINT32 *pui32Ret)
+{
+	IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags);
+	IMG_UINT32 ui32Ret;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_ASSERT(ui32CPUCacheMode == PVRSRV_CPU_CACHE_MODE(ulFlags));
+
+	switch (ui32CPUCacheMode)
+	{
+		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+			ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED;
+			break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+			ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE;
+			break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT:
+			ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED;
+			break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT:
+
+			/*
+			 * If system has no coherency but coherency has been requested for CPU
+			 * and GPU we currently have to fall back to uncached.
+			 *
+			 * Usually the first case here should return an error but as long as a lot
+			 * of services allocations using both CPU/GPU coherency flags and rely on
+			 * the UNCACHED fallback we have to leave it here.
+			*/
+			if ( (PVRSRV_GPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) &&
+				!(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) )
+			{
+				ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED;
+			}
+			else
+			{
+				ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED;
+			}
+
+			break;
+
+		default:
+			PVR_LOG(("DevmemCPUCacheMode: Unknown CPU cache mode 0x%08x", ui32CPUCacheMode));
+			PVR_ASSERT(0);
+			/*
+				We should never get here, but if we do then setting the mode
+				to uncached is the safest thing to do.
+			*/
+			ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED;
+			eError = PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+			break;
+	}
+
+	*pui32Ret = ui32Ret;
+
+	return eError;
+}
+
+static INLINE PVRSRV_ERROR DevmemDeviceCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode,
+												 PVRSRV_MEMALLOCFLAGS_T ulFlags,
+												 IMG_UINT32 *pui32Ret)
+{
+	IMG_UINT32 ui32DeviceCacheMode = PVRSRV_GPU_CACHE_MODE(ulFlags);
+	IMG_UINT32 ui32Ret;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_ASSERT(ui32DeviceCacheMode == PVRSRV_GPU_CACHE_MODE(ulFlags));
+
+	switch (ui32DeviceCacheMode)
+	{
+		case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED:
+			ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED;
+			break;
+
+		case PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE:
+			ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE;
+			break;
+
+		case PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT:
+			ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED;
+			break;
+
+		case PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT:
+
+			/*
+			 * If system has no coherency but coherency has been requested for CPU
+			 * and GPU we currently have to fall back to uncached.
+			 *
+			 * Usually the first case here should return an error but as long as a lot
+			 * of services allocations using both CPU/GPU coherency flags and rely on
+			 * the UNCACHED fallback we have to leave it here.
+			*/
+			if ( (PVRSRV_CPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) &&
+				!(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) )
+			{
+				ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED;
+			}
+			else
+			{
+				ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED;
+			}
+
+			break;
+
+		default:
+			PVR_LOG(("DevmemDeviceCacheMode: Unknown device cache mode 0x%08x", ui32DeviceCacheMode));
+			PVR_ASSERT(0);
+			/*
+				We should never get here, but if we do then setting the mode
+				to uncached is the safest thing to do.
+			*/
+			ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED;
+			eError = PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+			break;
+	}
+
+	*pui32Ret = ui32Ret;
+
+	return eError;
+}
+
+static INLINE IMG_BOOL DevmemCPUCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode,
+											   PVRSRV_MEMALLOCFLAGS_T ulFlags)
+{
+	IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags);
+	IMG_BOOL bRet = IMG_FALSE;
+
+	PVR_ASSERT(ui32CPUCacheMode == PVRSRV_CPU_CACHE_MODE(ulFlags));
+
+	if (ui32CPUCacheMode == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT)
+	{
+		bRet = PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig);
+	}
+	return bRet;
+}
+
+static INLINE IMG_BOOL DevmemDeviceCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode,
+												  PVRSRV_MEMALLOCFLAGS_T ulFlags)
+{
+	IMG_UINT32 ui32DeviceCacheMode = PVRSRV_GPU_CACHE_MODE(ulFlags);
+	IMG_BOOL bRet = IMG_FALSE;
+
+	PVR_ASSERT(ui32DeviceCacheMode == PVRSRV_GPU_CACHE_MODE(ulFlags));
+
+	if (ui32DeviceCacheMode == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT)
+	{
+		bRet = PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig);
+	}
+	return bRet;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_typedefs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_typedefs.h
new file mode 100644
index 0000000..ffcc9ad4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_typedefs.h
@@ -0,0 +1,141 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Client side part of device memory management -- this file
+                is forked from new_devmem_allocation.h as this one has to
+                reside in the top level include so that client code is able
+                to make use of the typedefs.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DEVICEMEM_TYPEDEFS_H
+#define DEVICEMEM_TYPEDEFS_H
+
+#include <powervr/mem_types.h>
+#include "img_types.h"
+#include "pvrsrv_memallocflags.h"
+
+typedef struct _DEVMEM_CONTEXT_ DEVMEM_CONTEXT;		/*!< Convenience typedef for struct _DEVMEM_CONTEXT_ */
+typedef struct _DEVMEM_HEAP_ DEVMEM_HEAP;			/*!< Convenience typedef for struct _DEVMEM_HEAP_ */
+typedef struct _DEVMEM_MEMDESC_ DEVMEM_MEMDESC;		/*!< Convenience typedef for struct _DEVMEM_MEMDESC_ */
+typedef struct _DEVMEM_PAGELIST_ DEVMEM_PAGELIST;	/*!< Convenience typedef for struct _DEVMEM_PAGELIST_ */
+typedef PVRSRV_MEMALLOCFLAGS_T DEVMEM_FLAGS_T;		/*!< Convenience typedef for PVRSRV_MEMALLOCFLAGS_T */
+
+typedef IMG_HANDLE DEVMEM_EXPORTHANDLE;             /*!< Typedef for DeviceMem Export Handle */
+typedef IMG_UINT64 DEVMEM_EXPORTKEY;                /*!< Typedef for DeviceMem Export Key */
+typedef IMG_DEVMEM_SIZE_T DEVMEM_SIZE_T;            /*!< Typedef for DeviceMem SIZE_T */
+typedef IMG_DEVMEM_LOG2ALIGN_T DEVMEM_LOG2ALIGN_T;  /*!< Typedef for DeviceMem LOG2 Alignment */
+
+typedef struct _DEVMEMX_PHYS_MEMDESC_ DEVMEMX_PHYSDESC;    /*!< Convenience typedef for DevmemX physical */
+typedef struct _DEVMEMX_VIRT_MEMDESC_ DEVMEMX_VIRTDESC;    /*!< Convenience typedef for DevmemX virtual */
+
+/*! calling code needs all the info in this struct, to be able to pass it around */
+typedef struct
+{
+    /*! A handle to the PMR. */
+    IMG_HANDLE hPMRExportHandle;
+    /*! The "key" to prove we have authorization to use this PMR */
+    IMG_UINT64 uiPMRExportPassword;
+    /*! Size and alignment properties for this PMR.  Note, these
+       numbers are not trusted in kernel, but we need to cache them
+       client-side in order to allocate from the VM arena.  The kernel
+       will know the actual alignment and size of the PMR and thus
+       would prevent client code from breaching security here.  Ditto
+       for physmem granularity (aka page size) if this is different
+       from alignment */
+    IMG_DEVMEM_SIZE_T uiSize;
+    /*! We call this "contiguity guarantee" to be more precise than
+       calling it "alignment" or "page size", terms which may seem
+       similar but have different emphasis.  The number reported here
+       is the minimum contiguity guarantee from the creator of the
+       PMR.  Now, there is no requirement to allocate that coarsely
+       from the RA.  The alignment given to the RA simply needs to be
+       at least as coarse as the device page size for the heap we
+       ultimately intend to map into.  What is important is that the
+       device MMU data page size is not greater than the minimum
+       contiguity guarantee from the PMR.  This value is reported to
+       the client in order that it can choose to make early checks and
+       perhaps decide which heap (in a variable page size scenario) it
+       would be safe to map this PMR into.  For convenience, the
+       client may choose to use this argument as the alignment of the
+       virtual range he chooses to allocate, but this is _not_
+       necessary and in many cases would be able to get away with a
+       finer alignment, should the heap into which this PMR will be
+       mapped support it. */
+    IMG_DEVMEM_LOG2ALIGN_T uiLog2ContiguityGuarantee;
+} DEVMEM_EXPORTCOOKIE;
+
+/* Enum that describes the operation associated with changing sparse memory*/
+typedef enum Resize {
+	SPARSE_RESIZE_NONE = 0,
+
+	/* This should be set to indicate the change needs allocation */
+	SPARSE_RESIZE_ALLOC = 1,
+
+	/* This should be set to indicate the change needs free */
+	SPARSE_RESIZE_FREE = 2,
+
+	SPARSE_RESIZE_BOTH = ((IMG_UINT8)SPARSE_RESIZE_ALLOC | (IMG_UINT8)SPARSE_RESIZE_FREE),
+
+	/* This should be set to silently swap underlying physical memory
+	 * without disturbing its device or cpu virtual maps
+	 * This flag is not supported in the case of PDUMP and could lead to
+	 * PDUMP panic when used */
+	SPARSE_REMAP_MEM = 4,
+
+	/* Should be set to get the sparse changes appear in cpu virtual map */
+	SPARSE_MAP_CPU_ADDR = 8
+}SPARSE_MEM_RESIZE_FLAGS;
+
+/* To be used with all the sparse allocations that get mapped to CPU Virtual space
+ * The sparse allocation CPU mapping is torn down and re-mapped every time the
+ * sparse allocation layout changes */
+#define PVRSRV_UNMAP_ON_SPARSE_CHANGE 1
+
+/* To use with DevmemSubAllocate() as the default factor if no
+ * over-allocation is desired. */
+#define DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER 1
+
+/* Defines the max length for PMR, MemDesc, Device memory
+ * History and RI debug annotations stored in memory, including
+ * the null terminator. */
+#define DEVMEM_ANNOTATION_MAX_LEN (PVR_ANNOTATION_MAX_LEN + 1)
+
+#endif /* #ifndef DEVICEMEM_TYPEDEFS_H */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_utils.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_utils.c
new file mode 100644
index 0000000..7ab62b0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_utils.c
@@ -0,0 +1,1068 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management internal utility functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Utility functions used internally by device memory management
+                code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "allocmem.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "ra.h"
+#include "devicemem_utils.h"
+#include "client_mm_bridge.h"
+
+/*
+	SVM heap management support functions for CPU (un)mapping
+ */
+#define DEVMEM_MAP_SVM_USER_MANAGED_RETRY				2
+
+static inline PVRSRV_ERROR
+_DevmemCPUMapSVMKernelManaged(DEVMEM_HEAP *psHeap,
+		DEVMEM_IMPORT *psImport,
+		IMG_UINT64 *ui64MapAddress)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT64 ui64SvmMapAddr;
+	IMG_UINT64 ui64SvmMapAddrEnd;
+	IMG_UINT64 ui64SvmHeapAddrEnd;
+
+	/* SVM heap management is always XXX_KERNEL_MANAGED unless we
+	   have triggered the fall back code-path in which case we
+	   should not be calling into this code-path */
+	PVR_ASSERT(psHeap->eHeapType == DEVMEM_HEAP_TYPE_KERNEL_MANAGED);
+
+	/* By acquiring the CPU virtual address here, it essentially
+	   means we lock-down the virtual address for the duration
+	   of the life-cycle of the allocation until a de-allocation
+	   request comes in. Thus the allocation is guaranteed not to
+	   change its virtual address on the CPU during its life-time.
+	   NOTE: Import might have already been CPU Mapped before now,
+	   normally this is not a problem, see fall back */
+	eError = _DevmemImportStructCPUMap(psImport);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Unable to CPU map (lock-down) device memory for SVM use",
+				__func__));
+		eError = PVRSRV_ERROR_DEVICEMEM_MAP_FAILED;
+		goto failSVM;
+	}
+
+	/* Supplied kernel mmap virtual address is also device virtual address;
+	   calculate the heap & kernel supplied mmap virtual address limits */
+	ui64SvmMapAddr = (IMG_UINT64)(uintptr_t)psImport->sCPUImport.pvCPUVAddr;
+	ui64SvmHeapAddrEnd = psHeap->sBaseAddress.uiAddr + psHeap->uiSize;
+	ui64SvmMapAddrEnd = ui64SvmMapAddr + psImport->uiSize;
+	PVR_ASSERT(ui64SvmMapAddr != (IMG_UINT64)0);
+
+	/* SVM limit test may fail if processor has more virtual address bits than device */
+	if ((ui64SvmMapAddr >= ui64SvmHeapAddrEnd || ui64SvmMapAddrEnd > ui64SvmHeapAddrEnd) ||
+		(ui64SvmMapAddr & ~(ui64SvmHeapAddrEnd - 1)))
+	{
+		/* Unmap incompatible SVM virtual address, this
+		   may not release address if it was elsewhere
+		   CPU Mapped before call into this function */
+		_DevmemImportStructCPUUnmap(psImport);
+
+		/* Flag incompatible SVM mapping */
+		eError = PVRSRV_ERROR_BAD_MAPPING;
+		goto failSVM;
+	}
+
+	*ui64MapAddress = ui64SvmMapAddr;
+	failSVM:
+	/* either OK, MAP_FAILED or BAD_MAPPING */
+	return eError;
+}
+
+static inline void
+_DevmemCPUUnmapSVMKernelManaged(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport)
+{
+	PVR_UNREFERENCED_PARAMETER(psHeap);
+	_DevmemImportStructCPUUnmap(psImport);
+}
+
+static inline PVRSRV_ERROR
+_DevmemCPUMapSVMUserManaged(DEVMEM_HEAP *psHeap,
+		DEVMEM_IMPORT *psImport,
+		IMG_UINT uiAlign,
+		IMG_UINT64 *ui64MapAddress)
+{
+	RA_LENGTH_T uiAllocatedSize;
+	RA_BASE_T uiAllocatedAddr;
+	IMG_UINT64 ui64SvmMapAddr;
+	IMG_UINT uiRetry = 0;
+	PVRSRV_ERROR eError;
+
+	/* If SVM heap management has transitioned to XXX_USER_MANAGED,
+	   this is essentially a fall back approach that ensures we
+	   continue to satisfy SVM alloc. This approach is not without
+	   hazards in that we may specify a virtual address that is
+	   already in use by the user process */
+	PVR_ASSERT(psHeap->eHeapType == DEVMEM_HEAP_TYPE_USER_MANAGED);
+
+	/* Normally, for SVM heap allocations, CPUMap _must_ be done
+	   before DevMap; ideally the initial CPUMap should be done by
+	   SVM functions though this is not a hard requirement as long
+	   as the prior elsewhere obtained CPUMap virtual address meets
+	   SVM address requirements. This is a fall-back code-pathway
+	   so we have to test that this assumption holds before we
+	   progress any further */
+	OSLockAcquire(psImport->sCPUImport.hLock);
+
+	if (psImport->sCPUImport.ui32RefCount)
+	{
+		/* Already CPU Mapped SVM heap allocation, this prior elsewhere
+		   obtained virtual address is responsible for the above
+		   XXX_KERNEL_MANAGED failure. As we are not responsible for
+		   this, we cannot progress any further so need to fail */
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Previously obtained CPU map address not SVM compatible"
+				, __func__));
+
+		/* Revert SVM heap to DEVMEM_HEAP_TYPE_KERNEL_MANAGED */
+		psHeap->eHeapType = DEVMEM_HEAP_TYPE_KERNEL_MANAGED;
+		PVR_DPF((PVR_DBG_MESSAGE,
+				"%s: Reverting SVM heap back to kernel managed",
+				__func__));
+
+		OSLockRelease(psImport->sCPUImport.hLock);
+
+		/* Do we need a more specific error code here */
+		eError = PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED;
+		goto failSVM;
+	}
+
+	OSLockRelease(psImport->sCPUImport.hLock);
+
+	do
+	{
+		/* Next we proceed to instruct the kernel to use the RA_Alloc supplied
+		   virtual address to map-in this SVM import suballocation; there is no
+		   guarantee that this RA_Alloc virtual address may not collide with an
+		   already in-use VMA range in the process */
+		eError = RA_Alloc(psHeap->psQuantizedVMRA,
+				psImport->uiSize,
+				RA_NO_IMPORT_MULTIPLIER,
+				0, /* flags: this RA doesn't use flags*/
+				uiAlign,
+				"SVM_Virtual_Alloc",
+				&uiAllocatedAddr,
+				&uiAllocatedSize,
+				NULL /* don't care about per-import priv data */);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Cannot RA allocate SVM compatible address",
+					__func__));
+			goto failSVM;
+		}
+
+		/* No reason for allocated virtual size to be different from
+		   the PMR's size */
+		psImport->sCPUImport.pvCPUVAddr = (void*)(uintptr_t)uiAllocatedAddr;
+		PVR_ASSERT(uiAllocatedSize == psImport->uiSize);
+
+		/* Map the import or allocation using the RA_Alloc virtual address;
+		   the kernel may fail the request if the supplied virtual address
+		   is already in-use in which case we re-try using another virtual
+		   address obtained from the RA_Alloc */
+		eError = _DevmemImportStructCPUMap(psImport);
+		if (eError != PVRSRV_OK)
+		{
+			/* For now we simply discard failed RA_Alloc() obtained virtual
+			   address (i.e. plenty of virtual space), this prevents us from
+			   re-using these and furthermore essentially blacklists these
+			   addresses from future SVM consideration; We exit fall-back
+			   attempt if retry exceeds the fall-back retry limit */
+			if (uiRetry++ > DEVMEM_MAP_SVM_USER_MANAGED_RETRY)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Cannot find SVM compatible address, bad mapping",
+						__func__));
+				eError = PVRSRV_ERROR_BAD_MAPPING;
+				goto failSVM;
+			}
+		}
+		else
+		{
+			/* Found compatible SVM virtual address, set as device virtual address */
+			ui64SvmMapAddr = (IMG_UINT64)(uintptr_t)psImport->sCPUImport.pvCPUVAddr;
+		}
+	} while (eError != PVRSRV_OK);
+
+	*ui64MapAddress = ui64SvmMapAddr;
+	failSVM:
+	return eError;
+}
+
+static inline void
+_DevmemCPUUnmapSVMUserManaged(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport)
+{
+	RA_BASE_T uiAllocatedAddr;
+
+	/* We only free SVM compatible addresses, all addresses in
+	   the blacklist are essentially excluded from future RA_Alloc */
+	uiAllocatedAddr = psImport->sDeviceImport.sDevVAddr.uiAddr;
+	RA_Free(psHeap->psQuantizedVMRA, uiAllocatedAddr);
+
+	_DevmemImportStructCPUUnmap(psImport);
+}
+
+static inline PVRSRV_ERROR
+_DevmemImportStructDevMapSVM(DEVMEM_HEAP *psHeap,
+		DEVMEM_IMPORT *psImport,
+		IMG_UINT uiAlign,
+		IMG_UINT64 *ui64MapAddress)
+{
+	PVRSRV_ERROR eError;
+
+	switch (psHeap->eHeapType)
+	{
+	case DEVMEM_HEAP_TYPE_KERNEL_MANAGED:
+		eError = _DevmemCPUMapSVMKernelManaged(psHeap,
+				psImport,
+				ui64MapAddress);
+		if (eError == PVRSRV_ERROR_BAD_MAPPING)
+		{
+			/* If the SVM map address is outside of SVM heap limits,
+				   change heap type to DEVMEM_HEAP_TYPE_USER_MANAGED */
+			psHeap->eHeapType = DEVMEM_HEAP_TYPE_USER_MANAGED;
+
+			PVR_DPF((PVR_DBG_WARNING,
+					"%s: Kernel managed SVM heap is now user managed",
+					__func__));
+
+			/* Retry using user managed fall-back approach */
+			eError = _DevmemCPUMapSVMUserManaged(psHeap,
+					psImport,
+					uiAlign,
+					ui64MapAddress);
+		}
+		break;
+
+	case DEVMEM_HEAP_TYPE_USER_MANAGED:
+		eError = _DevmemCPUMapSVMUserManaged(psHeap,
+				psImport,
+				uiAlign,
+				ui64MapAddress);
+		break;
+
+	default:
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		break;
+	}
+
+	return eError;
+}
+
+static inline void
+_DevmemImportStructDevUnmapSVM(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport)
+{
+	switch (psHeap->eHeapType)
+	{
+	case DEVMEM_HEAP_TYPE_KERNEL_MANAGED:
+		_DevmemCPUUnmapSVMKernelManaged(psHeap, psImport);
+		break;
+
+	case DEVMEM_HEAP_TYPE_USER_MANAGED:
+		_DevmemCPUUnmapSVMUserManaged(psHeap, psImport);
+		break;
+
+	default:
+		break;
+	}
+}
+
+/*
+	The Devmem import structure is the structure we use
+	to manage memory that is "imported" (which is page
+	granular) from the server into our process, this
+	includes allocations.
+
+	This allows memory to be imported without requiring
+	any CPU or device mapping. Memory can then be mapped
+	into the device or CPU on demand, but neither is
+	required.
+ */
+
+IMG_INTERNAL
+void _DevmemImportStructAcquire(DEVMEM_IMPORT *psImport)
+{
+	IMG_INT iRefCount = OSAtomicIncrement(&psImport->hRefCount);
+	PVR_UNREFERENCED_PARAMETER(iRefCount);
+	PVR_ASSERT(iRefCount != 1);
+
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+			__func__,
+			psImport,
+			iRefCount-1,
+			iRefCount);
+}
+
+IMG_INTERNAL
+IMG_BOOL _DevmemImportStructRelease(DEVMEM_IMPORT *psImport)
+{
+	IMG_INT iRefCount = OSAtomicDecrement(&psImport->hRefCount);
+	PVR_ASSERT(iRefCount >= 0);
+
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+			__func__,
+			psImport,
+			iRefCount+1,
+			iRefCount);
+
+	if (iRefCount == 0)
+	{
+		BridgePMRUnrefPMR(GetBridgeHandle(psImport->hDevConnection),
+				psImport->hPMR);
+		OSLockDestroy(psImport->sCPUImport.hLock);
+		OSLockDestroy(psImport->sDeviceImport.hLock);
+		OSLockDestroy(psImport->hLock);
+		OSFreeMem(psImport);
+
+		return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+IMG_INTERNAL
+void _DevmemImportDiscard(DEVMEM_IMPORT *psImport)
+{
+	PVR_ASSERT(OSAtomicRead(&psImport->hRefCount) == 0);
+	OSLockDestroy(psImport->sCPUImport.hLock);
+	OSLockDestroy(psImport->sDeviceImport.hLock);
+	OSLockDestroy(psImport->hLock);
+	OSFreeMem(psImport);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc)
+{
+	DEVMEM_MEMDESC *psMemDesc;
+	PVRSRV_ERROR eError;
+
+	/* Must be zeroed in case it needs to be freed before it is initialised */
+	psMemDesc = OSAllocZMem(sizeof(DEVMEM_MEMDESC));
+	if (psMemDesc == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto failAlloc;
+	}
+
+	eError = OSLockCreate(&psMemDesc->hLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto failMDLock;
+	}
+
+	eError = OSLockCreate(&psMemDesc->sDeviceMemDesc.hLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto failDMDLock;
+	}
+
+	eError = OSLockCreate(&psMemDesc->sCPUMemDesc.hLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto failCMDLock;
+	}
+
+	OSAtomicWrite(&psMemDesc->hRefCount, 0);
+
+	*ppsMemDesc = psMemDesc;
+
+	return PVRSRV_OK;
+
+	failCMDLock:
+	OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock);
+	failDMDLock:
+	OSLockDestroy(psMemDesc->hLock);
+	failMDLock:
+	OSFreeMem(psMemDesc);
+	failAlloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+/*
+	Init the MemDesc structure
+ */
+IMG_INTERNAL
+void _DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc,
+		IMG_DEVMEM_OFFSET_T uiOffset,
+		DEVMEM_IMPORT *psImport,
+		IMG_DEVMEM_SIZE_T uiSize)
+{
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+			__func__,
+			psMemDesc,
+			0,
+			1);
+
+	psMemDesc->psImport = psImport;
+	psMemDesc->uiOffset = uiOffset;
+
+	psMemDesc->sDeviceMemDesc.ui32RefCount = 0;
+	psMemDesc->sCPUMemDesc.ui32RefCount = 0;
+	psMemDesc->uiAllocSize = uiSize;
+	psMemDesc->hPrivData = NULL;
+	psMemDesc->ui32AllocationIndex = DEVICEMEM_HISTORY_ALLOC_INDEX_NONE;
+
+	OSAtomicWrite(&psMemDesc->hRefCount, 1);
+}
+
+IMG_INTERNAL
+void _DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc)
+{
+	IMG_INT iRefCount = 0;
+
+	iRefCount = OSAtomicIncrement(&psMemDesc->hRefCount);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+			__func__,
+			psMemDesc,
+			iRefCount-1,
+			iRefCount);
+
+	PVR_UNREFERENCED_PARAMETER(iRefCount);
+}
+
+IMG_INTERNAL
+IMG_BOOL _DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc)
+{
+	IMG_INT iRefCount;
+	PVR_ASSERT(psMemDesc != NULL);
+
+	iRefCount = OSAtomicDecrement(&psMemDesc->hRefCount);
+	PVR_ASSERT(iRefCount >= 0);
+
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+			__func__,
+			psMemDesc,
+			iRefCount+1,
+			iRefCount);
+
+	if (iRefCount == 0)
+	{
+		if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_SUBALLOCATABLE)
+		{
+			/* As soon as the first sub-allocation on the psImport is freed
+			 * we might get dirty memory when reusing it.
+			 * We have to delete the ZEROED, CLEAN & POISONED flag */
+
+			psMemDesc->psImport->uiProperties &=
+					~(DEVMEM_PROPERTIES_IMPORT_IS_ZEROED |
+							DEVMEM_PROPERTIES_IMPORT_IS_CLEAN |
+							DEVMEM_PROPERTIES_IMPORT_IS_POISONED);
+
+			RA_Free(psMemDesc->psImport->sDeviceImport.psHeap->psSubAllocRA,
+					psMemDesc->psImport->sDeviceImport.sDevVAddr.uiAddr +
+					psMemDesc->uiOffset);
+		}
+		else
+		{
+			_DevmemImportStructRelease(psMemDesc->psImport);
+		}
+
+		OSLockDestroy(psMemDesc->sCPUMemDesc.hLock);
+		OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock);
+		OSLockDestroy(psMemDesc->hLock);
+		OSFreeMem(psMemDesc);
+
+		return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+IMG_INTERNAL
+void _DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc)
+{
+	PVR_ASSERT(OSAtomicRead(&psMemDesc->hRefCount) == 0);
+
+	OSLockDestroy(psMemDesc->sCPUMemDesc.hLock);
+	OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock);
+	OSLockDestroy(psMemDesc->hLock);
+	OSFreeMem(psMemDesc);
+}
+
+
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize,
+		IMG_DEVMEM_ALIGN_T uiAlign,
+		DEVMEM_FLAGS_T *puiFlags)
+{
+	if ((*puiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) &&
+			(*puiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Zero on Alloc and Poison on Alloc are mutually exclusive.",
+				__func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (uiAlign & (uiAlign-1))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: The requested alignment is not a power of two.",
+				__func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (uiSize == 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Please request a non-zero size value.",
+				__func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* If zero flag is set we have to have write access to the page. */
+	if (PVRSRV_CHECK_ZERO_ON_ALLOC(*puiFlags) || PVRSRV_CHECK_CPU_WRITEABLE(*puiFlags))
+	{
+		(*puiFlags) |= PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+				PVRSRV_MEMALLOCFLAG_CPU_READABLE;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*
+	Allocate and init an import structure
+ */
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemImportStructAlloc(SHARED_DEV_CONNECTION hDevConnection,
+		DEVMEM_IMPORT **ppsImport)
+{
+	DEVMEM_IMPORT *psImport;
+	PVRSRV_ERROR eError;
+
+	psImport = OSAllocMem(sizeof *psImport);
+	if (psImport == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* Setup some known bad values for things we don't have yet */
+	psImport->sDeviceImport.hReservation = LACK_OF_RESERVATION_POISON;
+	psImport->sDeviceImport.hMapping = LACK_OF_MAPPING_POISON;
+	psImport->sDeviceImport.psHeap = NULL;
+	psImport->sDeviceImport.bMapped = IMG_FALSE;
+
+	eError = OSLockCreate(&psImport->sDeviceImport.hLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto failDIOSLockCreate;
+	}
+
+	psImport->sCPUImport.hOSMMapData = NULL;
+	psImport->sCPUImport.pvCPUVAddr = NULL;
+
+	eError = OSLockCreate(&psImport->sCPUImport.hLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto failCIOSLockCreate;
+	}
+
+	/* Set up common elements */
+	psImport->hDevConnection = hDevConnection;
+
+	/* Setup properties */
+	psImport->uiProperties = 0;
+
+	/* Setup refcounts */
+	psImport->sDeviceImport.ui32RefCount = 0;
+	psImport->sCPUImport.ui32RefCount = 0;
+	OSAtomicWrite(&psImport->hRefCount, 0);
+
+	/* Create the lock */
+	eError = OSLockCreate(&psImport->hLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto failILockAlloc;
+	}
+
+	*ppsImport = psImport;
+
+	return PVRSRV_OK;
+
+	failILockAlloc:
+	OSLockDestroy(psImport->sCPUImport.hLock);
+	failCIOSLockCreate:
+	OSLockDestroy(psImport->sDeviceImport.hLock);
+	failDIOSLockCreate:
+	OSFreeMem(psImport);
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+/*
+	Initialise the import structure
+ */
+IMG_INTERNAL
+void _DevmemImportStructInit(DEVMEM_IMPORT *psImport,
+		IMG_DEVMEM_SIZE_T uiSize,
+		IMG_DEVMEM_ALIGN_T uiAlign,
+		DEVMEM_FLAGS_T uiFlags,
+		IMG_HANDLE hPMR,
+		DEVMEM_PROPERTIES_T uiProperties)
+{
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+			__func__,
+			psImport,
+			0,
+			1);
+
+	psImport->uiSize = uiSize;
+	psImport->uiAlign = uiAlign;
+	psImport->uiFlags = uiFlags;
+	psImport->hPMR = hPMR;
+	psImport->uiProperties = uiProperties;
+	OSAtomicWrite(&psImport->hRefCount, 1);
+}
+
+/*
+	Map an import to the device
+ */
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemImportStructDevMap(DEVMEM_HEAP *psHeap,
+		IMG_BOOL bMap,
+		DEVMEM_IMPORT *psImport,
+		IMG_UINT64 ui64OptionalMapAddress)
+{
+	DEVMEM_DEVICE_IMPORT *psDeviceImport;
+	RA_BASE_T uiAllocatedAddr;
+	RA_LENGTH_T uiAllocatedSize;
+	IMG_DEV_VIRTADDR sBase;
+	IMG_HANDLE hReservation;
+	PVRSRV_ERROR eError;
+	IMG_UINT uiAlign;
+	IMG_BOOL bDestroyed = IMG_FALSE;
+
+	/* Round the provided import alignment to the configured heap alignment */
+	uiAlign = 1ULL << psHeap->uiLog2ImportAlignment;
+	uiAlign = (psImport->uiAlign + uiAlign - 1) & ~(uiAlign-1);
+
+	psDeviceImport = &psImport->sDeviceImport;
+
+	OSLockAcquire(psDeviceImport->hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+			__func__,
+			psImport,
+			psDeviceImport->ui32RefCount,
+			psDeviceImport->ui32RefCount+1);
+
+	if (psDeviceImport->ui32RefCount++ == 0)
+	{
+		_DevmemImportStructAcquire(psImport);
+
+		OSAtomicIncrement(&psHeap->hImportCount);
+
+		if (PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags))
+		{
+			/*  SVM (shared virtual memory) imports or allocations always
+				need to acquire CPU virtual address first as address is
+				used to map the allocation into the device virtual address
+				space; i.e. the virtual address of the allocation for both
+				the CPU/GPU must be identical. */
+			eError = _DevmemImportStructDevMapSVM(psHeap,
+					psImport,
+					uiAlign,
+					&ui64OptionalMapAddress);
+			if (eError != PVRSRV_OK)
+			{
+				goto failVMRAAlloc;
+			}
+		}
+
+		if (ui64OptionalMapAddress == 0)
+		{
+			if (psHeap->eHeapType == DEVMEM_HEAP_TYPE_USER_MANAGED ||
+					psHeap->eHeapType == DEVMEM_HEAP_TYPE_KERNEL_MANAGED)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						psHeap->eHeapType == DEVMEM_HEAP_TYPE_USER_MANAGED ?
+								"%s: Heap is user managed, please use PVRSRVMapToDeviceAddress().":
+								"%s: Heap is kernel managed, use right allocation flags (e.g. SVM).",
+								__func__));
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				goto failVMRAAlloc;
+			}
+			psHeap->eHeapType = DEVMEM_HEAP_TYPE_RA_MANAGED;
+
+			/* Allocate space in the VM */
+			eError = RA_Alloc(psHeap->psQuantizedVMRA,
+					psImport->uiSize,
+					RA_NO_IMPORT_MULTIPLIER,
+					0, /* flags: this RA doesn't use flags*/
+					uiAlign,
+					"Virtual_Alloc",
+					&uiAllocatedAddr,
+					&uiAllocatedSize,
+					NULL /* don't care about per-import priv data */
+			);
+			if (PVRSRV_OK != eError)
+			{
+				eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM;
+				goto failVMRAAlloc;
+			}
+
+			/* No reason for the allocated virtual size to be different from
+			   the PMR's size */
+			PVR_ASSERT(uiAllocatedSize == psImport->uiSize);
+
+			sBase.uiAddr = uiAllocatedAddr;
+
+		}
+		else
+		{
+			IMG_UINT64 uiHeapAddrEnd;
+
+			switch (psHeap->eHeapType)
+			{
+			case DEVMEM_HEAP_TYPE_UNKNOWN:
+				/* DEVMEM_HEAP_TYPE_USER_MANAGED can apply to _any_
+					   heap and can only be determined here. This heap
+					   type transitions from DEVMEM_HEAP_TYPE_UNKNOWN
+					   to DEVMEM_HEAP_TYPE_USER_MANAGED on 1st alloc */
+				psHeap->eHeapType = DEVMEM_HEAP_TYPE_USER_MANAGED;
+				break;
+
+			case DEVMEM_HEAP_TYPE_USER_MANAGED:
+			case DEVMEM_HEAP_TYPE_KERNEL_MANAGED:
+				if (! psHeap->uiSize)
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							psHeap->eHeapType == DEVMEM_HEAP_TYPE_USER_MANAGED ?
+									"%s: Heap DEVMEM_HEAP_TYPE_USER_MANAGED is disabled.":
+									"%s: Heap DEVMEM_HEAP_TYPE_KERNEL_MANAGED is disabled."
+									, __func__));
+					eError = PVRSRV_ERROR_INVALID_HEAP;
+					goto failVMRAAlloc;
+				}
+				break;
+
+			case DEVMEM_HEAP_TYPE_RA_MANAGED:
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: This heap is managed by an RA, please use PVRSRVMapToDevice()"
+						" and don't use allocation flags that assume differently (e.g. SVM)."
+						, __func__));
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				goto failVMRAAlloc;
+
+			default:
+				break;
+			}
+
+			/* Ensure supplied ui64OptionalMapAddress is within heap range */
+			uiHeapAddrEnd = psHeap->sBaseAddress.uiAddr + psHeap->uiSize;
+			if (ui64OptionalMapAddress >= uiHeapAddrEnd ||
+					ui64OptionalMapAddress + psImport->uiSize > uiHeapAddrEnd)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: ui64OptionalMapAddress %p is outside of heap limits <%p:%p>."
+						, __func__
+						, (void*)(uintptr_t)ui64OptionalMapAddress
+						, (void*)(uintptr_t)psHeap->sBaseAddress.uiAddr
+						, (void*)(uintptr_t)uiHeapAddrEnd));
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				goto failVMRAAlloc;
+			}
+
+			if (ui64OptionalMapAddress & ((1 << psHeap->uiLog2Quantum) - 1))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Invalid address to map to. Please provide an "
+						"address aligned to a page multiple of the heap."
+						, __func__));
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				goto failVMRAAlloc;
+			}
+
+			uiAllocatedAddr = ui64OptionalMapAddress;
+
+			if (psImport->uiSize & ((1 << psHeap->uiLog2Quantum) - 1))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Invalid heap to map to. "
+						"Please choose a heap that can handle smaller page sizes."
+						, __func__));
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				goto failVMRAAlloc;
+			}
+			uiAllocatedSize = psImport->uiSize;
+			sBase.uiAddr = uiAllocatedAddr;
+		}
+
+		/* Setup page tables for the allocated VM space */
+		eError = BridgeDevmemIntReserveRange(GetBridgeHandle(psHeap->psCtx->hDevConnection),
+				psHeap->hDevMemServerHeap,
+				sBase,
+				uiAllocatedSize,
+				&hReservation);
+		if (eError != PVRSRV_OK)
+		{
+			goto failReserve;
+		}
+
+		if (bMap)
+		{
+			DEVMEM_FLAGS_T uiMapFlags;
+
+			uiMapFlags = psImport->uiFlags & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK;
+
+			/* Actually map the PMR to allocated VM space */
+			eError = BridgeDevmemIntMapPMR(GetBridgeHandle(psHeap->psCtx->hDevConnection),
+					psHeap->hDevMemServerHeap,
+					hReservation,
+					psImport->hPMR,
+					uiMapFlags,
+					&psDeviceImport->hMapping);
+			if (eError != PVRSRV_OK)
+			{
+				goto failMap;
+			}
+			psDeviceImport->bMapped = IMG_TRUE;
+		}
+
+		/* Setup device mapping specific parts of the mapping info */
+		psDeviceImport->hReservation = hReservation;
+		psDeviceImport->sDevVAddr.uiAddr = uiAllocatedAddr;
+		psDeviceImport->psHeap = psHeap;
+	}
+	else
+	{
+		/*
+			Check that we've been asked to map it into the
+			same heap 2nd time around
+		 */
+		if (psHeap != psDeviceImport->psHeap)
+		{
+			eError = PVRSRV_ERROR_INVALID_HEAP;
+			goto failParams;
+		}
+	}
+	OSLockRelease(psDeviceImport->hLock);
+
+	return PVRSRV_OK;
+
+	failMap:
+	BridgeDevmemIntUnreserveRange(GetBridgeHandle(psHeap->psCtx->hDevConnection),
+			hReservation);
+	failReserve:
+	if (ui64OptionalMapAddress == 0)
+	{
+		RA_Free(psHeap->psQuantizedVMRA,
+				uiAllocatedAddr);
+	}
+	failVMRAAlloc:
+	bDestroyed = _DevmemImportStructRelease(psImport);
+	OSAtomicDecrement(&psHeap->hImportCount);
+	failParams:
+	if (!bDestroyed)
+	{
+		psDeviceImport->ui32RefCount--;
+		OSLockRelease(psDeviceImport->hLock);
+	}
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*
+	Unmap an import from the Device
+ */
+IMG_INTERNAL
+void _DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport)
+{
+	PVRSRV_ERROR eError;
+	DEVMEM_DEVICE_IMPORT *psDeviceImport;
+
+	psDeviceImport = &psImport->sDeviceImport;
+
+	OSLockAcquire(psDeviceImport->hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+			__func__,
+			psImport,
+			psDeviceImport->ui32RefCount,
+			psDeviceImport->ui32RefCount-1);
+
+	if (--psDeviceImport->ui32RefCount == 0)
+	{
+		DEVMEM_HEAP *psHeap = psDeviceImport->psHeap;
+
+		if (psDeviceImport->bMapped)
+		{
+			eError = BridgeDevmemIntUnmapPMR(GetBridgeHandle(psImport->hDevConnection),
+					psDeviceImport->hMapping);
+			PVR_ASSERT(eError == PVRSRV_OK);
+		}
+
+		eError = BridgeDevmemIntUnreserveRange(GetBridgeHandle(psImport->hDevConnection),
+				psDeviceImport->hReservation);
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		psDeviceImport->bMapped = IMG_FALSE;
+		psDeviceImport->hMapping = LACK_OF_MAPPING_POISON;
+		psDeviceImport->hReservation = LACK_OF_RESERVATION_POISON;
+
+		if (psHeap->eHeapType == DEVMEM_HEAP_TYPE_RA_MANAGED)
+		{
+			RA_Free(psHeap->psQuantizedVMRA,
+					psDeviceImport->sDevVAddr.uiAddr);
+		}
+
+		if (PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags))
+		{
+			_DevmemImportStructDevUnmapSVM(psHeap, psImport);
+		}
+
+		OSLockRelease(psDeviceImport->hLock);
+
+		_DevmemImportStructRelease(psImport);
+
+		OSAtomicDecrement(&psHeap->hImportCount);
+	}
+	else
+	{
+		OSLockRelease(psDeviceImport->hLock);
+	}
+}
+
+/*
+	Map an import into the CPU
+ */
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport)
+{
+	PVRSRV_ERROR eError;
+	DEVMEM_CPU_IMPORT *psCPUImport;
+	size_t uiMappingLength;
+
+	psCPUImport = &psImport->sCPUImport;
+
+	OSLockAcquire(psCPUImport->hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+			__func__,
+			psImport,
+			psCPUImport->ui32RefCount,
+			psCPUImport->ui32RefCount+1);
+
+	if (psCPUImport->ui32RefCount++ == 0)
+	{
+		_DevmemImportStructAcquire(psImport);
+
+		eError = OSMMapPMR(GetBridgeHandle(psImport->hDevConnection),
+				psImport->hPMR,
+				psImport->uiSize,
+				psImport->uiFlags,
+				&psCPUImport->hOSMMapData,
+				&psCPUImport->pvCPUVAddr,
+				&uiMappingLength);
+		if (eError != PVRSRV_OK)
+		{
+			goto failMap;
+		}
+
+		/* MappingLength might be rounded up to page size */
+		PVR_ASSERT(uiMappingLength >= psImport->uiSize);
+	}
+	OSLockRelease(psCPUImport->hLock);
+
+	return PVRSRV_OK;
+
+	failMap:
+	psCPUImport->ui32RefCount--;
+	if (!_DevmemImportStructRelease(psImport))
+	{
+		OSLockRelease(psCPUImport->hLock);
+	}
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*
+	Unmap an import from the CPU
+ */
+IMG_INTERNAL
+void _DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport)
+{
+	DEVMEM_CPU_IMPORT *psCPUImport;
+
+	psCPUImport = &psImport->sCPUImport;
+
+	OSLockAcquire(psCPUImport->hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+			__func__,
+			psImport,
+			psCPUImport->ui32RefCount,
+			psCPUImport->ui32RefCount-1);
+
+	if (--psCPUImport->ui32RefCount == 0)
+	{
+		/* psImport->uiSize is a 64-bit quantity whereas the 5th
+		 * argument to OSUnmapPMR is a 32-bit quantity on 32-bit systems
+		 * hence a compiler warning of implicit cast and loss of data.
+		 * Added explicit cast and assert to remove warning.
+		 */
+#if defined(LINUX) && defined(__i386__)
+		PVR_ASSERT(psImport->uiSize<IMG_UINT32_MAX);
+#endif
+		OSMUnmapPMR(GetBridgeHandle(psImport->hDevConnection),
+				psImport->hPMR,
+				psCPUImport->hOSMMapData,
+				psCPUImport->pvCPUVAddr,
+				psImport->uiSize);
+
+		OSLockRelease(psCPUImport->hLock);
+
+		_DevmemImportStructRelease(psImport);
+	}
+	else
+	{
+		OSLockRelease(psCPUImport->hLock);
+	}
+}
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_utils.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_utils.h
new file mode 100644
index 0000000..9ddd1ac
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/devicemem_utils.h
@@ -0,0 +1,462 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management internal utility functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Utility functions used internally by device memory management
+                code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEVICEMEM_UTILS_H_
+#define _DEVICEMEM_UTILS_H_
+
+#include "devicemem.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvr_debug.h"
+#include "allocmem.h"
+#include "ra.h"
+#include "osfunc.h"
+#include "lock.h"
+#include "osmmap.h"
+
+#define DEVMEM_HEAPNAME_MAXLENGTH 160
+
+#if defined(DEVMEM_DEBUG) && defined(REFCOUNT_DEBUG)
+#define DEVMEM_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_ERROR, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define DEVMEM_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+/* If we need a "hMapping" but we don't have a server-side mapping, we poison
+ * the entry with this value so that it's easily recognised in the debugger.
+ * Note that this is potentially a valid handle, but then so is NULL, which is
+ * no better, indeed worse, as it's not obvious in the debugger. The value
+ * doesn't matter. We _never_ use it (and because it's valid, we never assert
+ * it isn't this) but it's nice to have a value in the source code that we can
+ * grep for if things go wrong.
+ */
+#define LACK_OF_MAPPING_POISON ((IMG_HANDLE)0x6116dead)
+#define LACK_OF_RESERVATION_POISON ((IMG_HANDLE)0x7117dead)
+
+#define DEVICEMEM_HISTORY_ALLOC_INDEX_NONE 0xFFFFFFFF
+
+struct _DEVMEM_CONTEXT_ {
+
+	SHARED_DEV_CONNECTION hDevConnection;
+
+	/* Number of heaps that have been created in this context
+	 * (regardless of whether they have allocations)
+	 */
+	IMG_UINT32 uiNumHeaps;
+
+	/* Each "DEVMEM_CONTEXT" has a counterpart in the server, which
+	 * is responsible for handling the mapping into device MMU.
+	 * We have a handle to that here.
+	 */
+	IMG_HANDLE hDevMemServerContext;
+
+	/* Number of automagically created heaps in this context,
+	 *  i.e. those that are born at context creation time from the
+	 * chosen "heap config" or "blueprint"
+	 */
+	IMG_UINT32 uiAutoHeapCount;
+
+	/* Pointer to array of such heaps */
+	struct _DEVMEM_HEAP_ **ppsAutoHeapArray;
+
+	/* The cache line size for use when allocating memory,
+	 * as it is not queryable on the client side
+	 */
+	IMG_UINT32 ui32CPUCacheLineSize;
+
+	/* Private data handle for device specific data */
+	IMG_HANDLE hPrivData;
+
+	/* Memory allocated to be used for MCU fences */
+	DEVMEM_MEMDESC *psMCUFenceMemDesc;
+};
+
+
+typedef enum
+{
+	DEVMEM_HEAP_TYPE_UNKNOWN = 0,
+	DEVMEM_HEAP_TYPE_USER_MANAGED,
+	DEVMEM_HEAP_TYPE_KERNEL_MANAGED,
+	DEVMEM_HEAP_TYPE_RA_MANAGED,
+}DEVMEM_HEAP_TYPE;
+
+struct _DEVMEM_HEAP_ {
+	/* Name of heap - for debug and lookup purposes. */
+	IMG_CHAR *pszName;
+
+	/* Number of live imports in the heap */
+	ATOMIC_T hImportCount;
+
+	/* Base address and size of heap, required by clients due to some
+	 * requesters not being full range
+	 */
+	IMG_DEV_VIRTADDR sBaseAddress;
+	DEVMEM_SIZE_T uiSize;
+
+	/* The heap type, describing if the space is managed by the user or an RA
+	 */
+	DEVMEM_HEAP_TYPE eHeapType;
+
+	/* This RA is for managing sub-allocations in virtual space. Two more
+	 * RA's will be used under the Hood for managing the coarser allocation
+	 * of virtual space from the heap, and also for managing the physical
+	 * backing storage.
+	 */
+	RA_ARENA *psSubAllocRA;
+	IMG_CHAR *pszSubAllocRAName;
+
+	/* This RA is for the coarse allocation of virtual space from the heap */
+	RA_ARENA *psQuantizedVMRA;
+	IMG_CHAR *pszQuantizedVMRAName;
+
+	/* We also need to store a copy of the quantum size in order to feed
+	 * this down to the server.
+	 */
+	IMG_UINT32 uiLog2Quantum;
+
+	/* Store a copy of the minimum import alignment */
+	IMG_UINT32 uiLog2ImportAlignment;
+
+	/* The relationship between tiled heap alignment and heap byte-stride
+	 * (dependent on tiling mode, abstracted here)
+	 */
+	IMG_UINT32 uiLog2TilingStrideFactor;
+
+	/* The parent memory context for this heap */
+	struct _DEVMEM_CONTEXT_ *psCtx;
+
+	/* Lock to protect this structure */
+	POS_LOCK hLock;
+
+	/* Each "DEVMEM_HEAP" has a counterpart in the server, which is
+	 * responsible for handling the mapping into device MMU.
+	 * We have a handle to that here.
+	 */
+	IMG_HANDLE hDevMemServerHeap;
+};
+
+typedef IMG_UINT32 DEVMEM_PROPERTIES_T;                  /*!< Typedef for Devicemem properties */
+#define DEVMEM_PROPERTIES_EXPORTABLE         (1UL<<0)    /*!< Is it exportable? */
+#define DEVMEM_PROPERTIES_IMPORTED           (1UL<<1)    /*!< Is it imported from another process? */
+#define DEVMEM_PROPERTIES_SUBALLOCATABLE     (1UL<<2)    /*!< Is it suballocatable? */
+#define DEVMEM_PROPERTIES_UNPINNED           (1UL<<3)    /*!< Is it currently pinned? */
+#define DEVMEM_PROPERTIES_IMPORT_IS_ZEROED   (1UL<<4)    /*!< Is the memory fully zeroed? */
+#define DEVMEM_PROPERTIES_IMPORT_IS_CLEAN    (1UL<<5)    /*!< Is the memory clean, i.e. not been used before? */
+#define DEVMEM_PROPERTIES_SECURE             (1UL<<6)    /*!< Is it a special secure buffer? No CPU maps allowed! */
+#define DEVMEM_PROPERTIES_IMPORT_IS_POISONED (1UL<<7)    /*!< Is the memory fully poisoned? */
+#define DEVMEM_PROPERTIES_NO_CPU_MAPPING     (1UL<<8)    /* No CPU Mapping is allowed, RW attributes
+                                                            are further derived from allocation memory flags */
+#define DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE	 (1UL<<9)    /* No sparse resizing allowed, once a memory
+                                                            layout is chosen, no change allowed later,
+                                                            This includes pinning and unpinning */
+
+
+typedef struct _DEVMEM_DEVICE_IMPORT_ {
+	DEVMEM_HEAP *psHeap;            /*!< Heap this import is bound to */
+	IMG_DEV_VIRTADDR sDevVAddr;     /*!< Device virtual address of the import */
+	IMG_UINT32 ui32RefCount;        /*!< Refcount of the device virtual address */
+	IMG_HANDLE hReservation;        /*!< Device memory reservation handle */
+	IMG_HANDLE hMapping;            /*!< Device mapping handle */
+	IMG_BOOL bMapped;               /*!< This is import mapped? */
+	POS_LOCK hLock;                 /*!< Lock to protect the device import */
+} DEVMEM_DEVICE_IMPORT;
+
+typedef struct _DEVMEM_CPU_IMPORT_ {
+	void *pvCPUVAddr;               /*!< CPU virtual address of the import */
+	IMG_UINT32 ui32RefCount;        /*!< Refcount of the CPU virtual address */
+	IMG_HANDLE hOSMMapData;         /*!< CPU mapping handle */
+	POS_LOCK hLock;                 /*!< Lock to protect the CPU import */
+} DEVMEM_CPU_IMPORT;
+
+typedef struct _DEVMEM_IMPORT_ {
+	SHARED_DEV_CONNECTION hDevConnection;
+	IMG_DEVMEM_ALIGN_T uiAlign;         /*!< Alignment of the PMR */
+	DEVMEM_SIZE_T uiSize;               /*!< Size of import */
+	ATOMIC_T hRefCount;                 /*!< Refcount for this import */
+	DEVMEM_PROPERTIES_T uiProperties;   /*!< Stores properties of an import like if
+	                                         it is exportable, pinned or suballocatable */
+	IMG_HANDLE hPMR;                    /*!< Handle to the PMR */
+	DEVMEM_FLAGS_T uiFlags;             /*!< Flags for this import */
+	POS_LOCK hLock;                     /*!< Lock to protect the import */
+
+	DEVMEM_DEVICE_IMPORT sDeviceImport; /*!< Device specifics of the import */
+	DEVMEM_CPU_IMPORT sCPUImport;       /*!< CPU specifics of the import */
+} DEVMEM_IMPORT;
+
+typedef struct _DEVMEM_DEVICE_MEMDESC_ {
+	IMG_DEV_VIRTADDR sDevVAddr;     /*!< Device virtual address of the allocation */
+	IMG_UINT32 ui32RefCount;        /*!< Refcount of the device virtual address */
+	POS_LOCK hLock;                 /*!< Lock to protect device memdesc */
+} DEVMEM_DEVICE_MEMDESC;
+
+typedef struct _DEVMEM_CPU_MEMDESC_ {
+	void *pvCPUVAddr;           /*!< CPU virtual address of the import */
+	IMG_UINT32 ui32RefCount;    /*!< Refcount of the device CPU address */
+	POS_LOCK hLock;             /*!< Lock to protect CPU memdesc */
+} DEVMEM_CPU_MEMDESC;
+
+struct _DEVMEM_MEMDESC_ {
+	DEVMEM_IMPORT *psImport;                /*!< Import this memdesc is on */
+	IMG_DEVMEM_OFFSET_T uiOffset;           /*!< Offset into import where our allocation starts */
+	IMG_DEVMEM_SIZE_T uiAllocSize;          /*!< Size of the allocation */
+	ATOMIC_T hRefCount;                     /*!< Refcount of the memdesc */
+	POS_LOCK hLock;                         /*!< Lock to protect memdesc */
+	IMG_HANDLE hPrivData;
+
+	DEVMEM_DEVICE_MEMDESC sDeviceMemDesc;   /*!< Device specifics of the memdesc */
+	DEVMEM_CPU_MEMDESC sCPUMemDesc;         /*!< CPU specifics of the memdesc */
+
+	IMG_CHAR szText[DEVMEM_ANNOTATION_MAX_LEN]; /*!< Annotation for this memdesc */
+
+	IMG_UINT32 ui32AllocationIndex;
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	IMG_HANDLE hRIHandle;                   /*!< Handle to RI information */
+#endif
+};
+
+/* The physical descriptor used to store handles and information of device
+ * physical allocations.
+ */
+struct _DEVMEMX_PHYS_MEMDESC_ {
+	IMG_UINT32 uiNumPages;                  /*!< Number of pages that the import has*/
+	IMG_UINT32 uiLog2PageSize;              /*!< Page size */
+	ATOMIC_T hRefCount;                     /*!< Refcount of the memdesc */
+	DEVMEM_FLAGS_T uiFlags;                 /*!< Flags for this import */
+	IMG_HANDLE hPMR;                        /*!< Handle to the PMR */
+	DEVMEM_CPU_IMPORT sCPUImport;           /*!< CPU specifics of the memdesc */
+	DEVMEM_BRIDGE_HANDLE hBridge;           /*!< Bridge connection for the server */
+};
+
+/* The virtual descriptor used to store handles and information of a device
+ * virtual range and the mappings to it.
+ */
+struct _DEVMEMX_VIRT_MEMDESC_ {
+	IMG_UINT32 uiNumPages;                  /*!< Number of pages that the import has*/
+	DEVMEM_FLAGS_T uiFlags;                 /*!< Flags for this import */
+	DEVMEMX_PHYSDESC **apsPhysDescTable;    /*!< Table to store links to physical descs */
+	DEVMEM_DEVICE_IMPORT sDeviceImport;     /*!< Device specifics of the memdesc */
+
+	IMG_CHAR szText[DEVMEM_ANNOTATION_MAX_LEN]; /*!< Annotation for this virt memdesc */
+	IMG_UINT32 ui32AllocationIndex;         /*!< To track mappings in this range */
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	IMG_HANDLE hRIHandle;                   /*!< Handle to RI information */
+#endif
+};
+
+#define DEVICEMEM_UTILS_NO_ADDRESS 0
+
+/******************************************************************************
+@Function       _DevmemValidateParams
+@Description    Check if flags are conflicting and if align is a size multiple.
+
+@Input          uiSize      Size of the import.
+@Input          uiAlign     Alignment of the import.
+@Input          puiFlags    Pointer to the flags for the import.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize,
+                                   IMG_DEVMEM_ALIGN_T uiAlign,
+                                   DEVMEM_FLAGS_T *puiFlags);
+
+/******************************************************************************
+@Function       _DevmemImportStructAlloc
+@Description    Allocates memory for an import struct. Does not allocate a PMR!
+                Create locks for CPU and Devmem mappings.
+
+@Input          hDevConnection  Connection to use for calls from the import.
+@Input          ppsImport       The import to allocate.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemImportStructAlloc(SHARED_DEV_CONNECTION hDevConnection,
+                                      DEVMEM_IMPORT **ppsImport);
+
+/******************************************************************************
+@Function       _DevmemImportStructInit
+@Description    Initialises the import struct with the given parameters.
+                Set it's refcount to 1!
+
+@Input          psImport     The import to initialise.
+@Input          uiSize       Size of the import.
+@Input          uiAlign      Alignment of allocations in the import.
+@Input          uiMapFlags
+@Input          hPMR         Reference to the PMR of this import struct.
+@Input          uiProperties Properties of the import. Is it exportable,
+                              imported, suballocatable, unpinned?
+******************************************************************************/
+void _DevmemImportStructInit(DEVMEM_IMPORT *psImport,
+                             IMG_DEVMEM_SIZE_T uiSize,
+                             IMG_DEVMEM_ALIGN_T uiAlign,
+                             PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+                             IMG_HANDLE hPMR,
+                             DEVMEM_PROPERTIES_T uiProperties);
+
+/******************************************************************************
+@Function       _DevmemImportStructDevMap
+@Description    NEVER call after the last _DevmemMemDescRelease()
+                Maps the PMR referenced by the import struct to the device's
+                virtual address space.
+                Does nothing but increase the cpu mapping refcount if the
+                import struct was already mapped.
+
+@Input          psHeap    The heap to map to.
+@Input          bMap      Caller can choose if the import should be really
+                          mapped in the page tables or if just a virtual range
+                          should be reserved and the refcounts increased.
+@Input          psImport  The import we want to map.
+@Input          uiOptionalMapAddress  An optional address to map to.
+                                      Pass DEVICEMEM_UTILS_NOADDRESS if not used.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemImportStructDevMap(DEVMEM_HEAP *psHeap,
+                                       IMG_BOOL bMap,
+                                       DEVMEM_IMPORT *psImport,
+                                       IMG_UINT64 uiOptionalMapAddress);
+
+/******************************************************************************
+@Function       _DevmemImportStructDevUnmap
+@Description    Unmaps the PMR referenced by the import struct from the
+                device's virtual address space.
+                If this was not the last remaining CPU mapping on the import
+                struct only the cpu mapping refcount is decreased.
+******************************************************************************/
+void _DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       _DevmemImportStructCPUMap
+@Description    NEVER call after the last _DevmemMemDescRelease()
+                Maps the PMR referenced by the import struct to the CPU's
+                virtual address space.
+                Does nothing but increase the cpu mapping refcount if the
+                import struct was already mapped.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       _DevmemImportStructCPUUnmap
+@Description    Unmaps the PMR referenced by the import struct from the CPU's
+                virtual address space.
+                If this was not the last remaining CPU mapping on the import
+                struct only the cpu mapping refcount is decreased.
+******************************************************************************/
+void _DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport);
+
+
+/******************************************************************************
+@Function       _DevmemImportStructAcquire
+@Description    Acquire an import struct by increasing it's refcount.
+******************************************************************************/
+void _DevmemImportStructAcquire(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       _DevmemImportStructRelease
+@Description    Reduces the refcount of the import struct.
+                Destroys the import in the case it was the last reference.
+                Destroys underlying PMR if this import was the last reference
+                to it.
+@return         A boolean to signal if the import was destroyed. True = yes.
+******************************************************************************/
+IMG_BOOL _DevmemImportStructRelease(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       _DevmemImportDiscard
+@Description    Discard a created, but unitilised import structure.
+                This must only be called before _DevmemImportStructInit
+                after which _DevmemImportStructRelease must be used to
+                "free" the import structure.
+******************************************************************************/
+void _DevmemImportDiscard(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       _DevmemMemDescAlloc
+@Description    Allocates a MemDesc and create it's various locks.
+                Zero the allocated memory.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc);
+
+/******************************************************************************
+@Function       _DevmemMemDescInit
+@Description    Sets the given offset and import struct fields in the MemDesc.
+                Initialises refcount to 1 and other values to 0.
+
+@Input          psMemDesc    MemDesc to initialise.
+@Input          uiOffset     Offset in the import structure.
+@Input          psImport     Import the MemDesc is on.
+@Input          uiAllocSize  Size of the allocation
+******************************************************************************/
+void _DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc,
+						IMG_DEVMEM_OFFSET_T uiOffset,
+						DEVMEM_IMPORT *psImport,
+						IMG_DEVMEM_SIZE_T uiAllocSize);
+
+/******************************************************************************
+@Function       _DevmemMemDescAcquire
+@Description    Acquires the MemDesc by increasing it's refcount.
+******************************************************************************/
+void _DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc);
+
+/******************************************************************************
+@Function       _DevmemMemDescRelease
+@Description    Releases the MemDesc by reducing it's refcount.
+                Destroy the MemDesc if it's recount is 0.
+                Destroy the import struct the MemDesc is on if that was the
+                last MemDesc on the import, probably following the destruction
+                of the underlying PMR.
+@return         A boolean to signal if the MemDesc was destroyed. True = yes.
+******************************************************************************/
+IMG_BOOL _DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc);
+
+/******************************************************************************
+@Function       _DevmemMemDescDiscard
+@Description    Discard a created, but uninitialised MemDesc structure.
+                This must only be called before _DevmemMemDescInit after
+                which _DevmemMemDescRelease must be used to "free" the
+                MemDesc structure.
+******************************************************************************/
+void _DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc);
+
+#endif /* _DEVICEMEM_UTILS_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/dllist.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/dllist.h
new file mode 100644
index 0000000..bd3aafa
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/dllist.h
@@ -0,0 +1,305 @@
+/*************************************************************************/ /*!
+@File
+@Title          Double linked list header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Double linked list interface
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DLLIST_H
+#define DLLIST_H
+
+#include "img_types.h"
+#include "img_defs.h"
+
+/*!
+	Pointer to a linked list node
+*/
+typedef struct DLLIST_NODE_	*PDLLIST_NODE;
+
+
+/*!
+	Node in a linked list
+*/
+/*
+ * Note: the following structure's size is architecture-dependent and
+ * clients may need to create a mirror the structure definition if it needs
+ * to be used in a structure shared between host and device. Consider such
+ * clients if any changes are made to this structure.
+ */
+typedef struct DLLIST_NODE_
+{
+	struct DLLIST_NODE_	*psPrevNode;
+	struct DLLIST_NODE_	*psNextNode;
+} DLLIST_NODE;
+
+
+/*!
+	Static initialiser
+*/
+#define DECLARE_DLLIST(n) \
+DLLIST_NODE n = {&n, &n}
+
+
+/*************************************************************************/ /*!
+@Function       dllist_init
+
+@Description    Initialize a new double linked list
+
+@Input          psListHead              List head Node
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_init(PDLLIST_NODE psListHead)
+{
+	psListHead->psPrevNode = psListHead;
+	psListHead->psNextNode = psListHead;
+}
+
+/*************************************************************************/ /*!
+@Function       dllist_is_empty
+
+@Description    Returns whether the list is empty
+
+@Input          psListHead              List head Node
+
+*/
+/*****************************************************************************/
+static INLINE
+IMG_BOOL dllist_is_empty(PDLLIST_NODE psListHead)
+{
+	return (IMG_BOOL) ((psListHead->psPrevNode == psListHead)
+	                   && (psListHead->psNextNode == psListHead));
+}
+
+/*************************************************************************/ /*!
+@Function       dllist_add_to_head
+
+@Description    Add psNewNode to head of list psListHead
+
+@Input          psListHead             Head Node
+@Input          psNewNode              New Node
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_add_to_head(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode)
+{
+	PDLLIST_NODE psTmp;
+
+	psTmp = psListHead->psNextNode;
+
+	psListHead->psNextNode = psNewNode;
+	psNewNode->psNextNode = psTmp;
+
+	psTmp->psPrevNode = psNewNode;
+	psNewNode->psPrevNode = psListHead;
+}
+
+
+/*************************************************************************/ /*!
+@Function       dllist_add_to_tail
+
+@Description    Add psNewNode to tail of list psListHead
+
+@Input          psListHead             Head Node
+@Input          psNewNode              New Node
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_add_to_tail(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode)
+{
+	PDLLIST_NODE psTmp;
+
+	psTmp = psListHead->psPrevNode;
+
+	psListHead->psPrevNode = psNewNode;
+	psNewNode->psPrevNode = psTmp;
+
+	psTmp->psNextNode = psNewNode;
+	psNewNode->psNextNode = psListHead;
+}
+
+/*************************************************************************/ /*!
+@Function       dllist_node_is_in_list
+
+@Description    Returns IMG_TRUE if psNode is in a list
+
+@Input          psNode             List node
+
+*/
+/*****************************************************************************/
+static INLINE
+IMG_BOOL dllist_node_is_in_list(PDLLIST_NODE psNode)
+{
+	return (IMG_BOOL) (psNode->psNextNode != NULL);
+}
+
+/*************************************************************************/ /*!
+@Function       dllist_get_next_node
+
+@Description    Returns the list node after psListHead or NULL psListHead
+				is the only element in the list.
+
+@Input          psListHead             List node to start the operation
+
+*/
+/*****************************************************************************/
+static INLINE
+PDLLIST_NODE dllist_get_next_node(PDLLIST_NODE psListHead)
+{
+	if (psListHead->psNextNode == psListHead)
+	{
+		return NULL;
+	}
+	else
+	{
+		return psListHead->psNextNode;
+	}
+}
+
+
+/*************************************************************************/ /*!
+@Function       dllist_remove_node
+
+@Description    Removes psListNode from the list where it currently belongs
+
+@Input          psListNode             List node to be removed
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_remove_node(PDLLIST_NODE psListNode)
+{
+	psListNode->psNextNode->psPrevNode = psListNode->psPrevNode;
+	psListNode->psPrevNode->psNextNode = psListNode->psNextNode;
+
+	/* Clear the node to show it's not on a list */
+	psListNode->psPrevNode = NULL;
+	psListNode->psNextNode = NULL;
+}
+
+/*************************************************************************/ /*!
+@Function       dllist_replace_head
+
+@Description    Moves the list from psOldHead to psNewHead
+
+@Input          psOldHead       List node to be replaced. Will become a head
+                                node of an empty list.
+@Input          psNewHead       List node to be inserted. Must be an empty list
+                                head.
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_replace_head(PDLLIST_NODE psOldHead, PDLLIST_NODE psNewHead)
+{
+	if (dllist_is_empty(psOldHead))
+	{
+		psNewHead->psNextNode = psNewHead;
+		psNewHead->psPrevNode = psNewHead;
+	}
+	else
+	{
+		/* Change the neighbouring nodes */
+		psOldHead->psNextNode->psPrevNode = psNewHead;
+		psOldHead->psPrevNode->psNextNode = psNewHead;
+
+		/* Copy the old data to the new node */
+		psNewHead->psNextNode = psOldHead->psNextNode;
+		psNewHead->psPrevNode = psOldHead->psPrevNode;
+
+		/* Remove links to the previous list */
+		psOldHead->psNextNode = psOldHead;
+		psOldHead->psPrevNode = psOldHead;
+	}
+
+
+}
+
+/*************************************************************************/ /*!
+@Function       dllist_foreach_node
+
+@Description    Walk through all the nodes on the list.
+				Safe against removal of (node).
+
+@Input          list_head			List node to start the operation
+@Input			node				Current list node
+@Input			next				Node after the current one
+
+*/
+/*****************************************************************************/
+#define dllist_foreach_node(list_head, node, next)						\
+	for (node = (list_head)->psNextNode, next = (node)->psNextNode;		\
+		 node != (list_head);											\
+		 node = next, next = (node)->psNextNode)
+
+#define dllist_foreach_node_backwards(list_head, node, prev)			\
+	for (node = (list_head)->psPrevNode, prev = (node)->psPrevNode;		\
+		 node != (list_head);											\
+		 node = prev, prev = (node)->psPrevNode)
+
+
+/*************************************************************************/ /*!
+@Function       dllist_foreach
+
+@Description    Simplification of dllist_foreach_node.
+				Walk through all the nodes on the list.
+				Safe against removal of currently-iterated node.
+
+				Adds utility-macro dllist_cur() to typecast the current node.
+
+@Input          list_head			List node to start the operation
+
+*/
+/*****************************************************************************/
+#define dllist_foreach(list_head)	\
+	for (DLLIST_NODE *_DllNode = (list_head).psNextNode, *_DllNext = _DllNode->psNextNode;		\
+		 _DllNode != &(list_head);																\
+		 _DllNode = _DllNext, _DllNext = _DllNode->psNextNode)
+
+#define dllist_foreach_backwards(list_head)	\
+	for (DLLIST_NODE *_DllNode = (list_head).psPrevNode, *_DllPrev = _DllNode->psPrevNode;		\
+		 _DllNode != &(list_head);																\
+		 _DllNode = _DllPrev, _DllPrev = _DllNode->psPrevNode)
+
+#define dllist_cur(type, member)	IMG_CONTAINER_OF(_DllNode, type, member)
+
+#endif	/* DLLIST_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/env_connection.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/env_connection.h
new file mode 100644
index 0000000..307ee29
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/env_connection.h
@@ -0,0 +1,90 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server side connection management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Linux specific server side connection management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_ENV_CONNECTION_H_)
+#define _ENV_CONNECTION_H_
+
+#include <linux/version.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+#include "handle.h"
+#include "pvr_debug.h"
+#include "device.h"
+
+#if defined(SUPPORT_ION)
+#include PVR_ANDROID_ION_HEADER
+#include "ion_sys.h"
+#include "allocmem.h"
+#endif
+
+typedef struct _ENV_CONNECTION_PRIVATE_DATA_
+{
+	struct file *psFile;
+	PVRSRV_DEVICE_NODE *psDevNode;
+} ENV_CONNECTION_PRIVATE_DATA;
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+#define ION_CLIENT_NAME_SIZE	50
+
+typedef struct _ENV_ION_CONNECTION_DATA_
+{
+	IMG_CHAR azIonClientName[ION_CLIENT_NAME_SIZE];
+	struct ion_device *psIonDev;
+	struct ion_client *psIonClient;
+} ENV_ION_CONNECTION_DATA;
+#endif
+
+typedef struct _ENV_CONNECTION_DATA_
+{
+	pid_t owner;
+
+	struct file *psFile;
+	PVRSRV_DEVICE_NODE *psDevNode;
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+	ENV_ION_CONNECTION_DATA *psIonData;
+#endif
+} ENV_CONNECTION_DATA;
+
+#endif /* !defined(_ENV_CONNECTION_H_) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/event.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/event.c
new file mode 100644
index 0000000..8752bd0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/event.c
@@ -0,0 +1,545 @@
+/*************************************************************************/ /*!
+@File
+@Title          Event Object
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <asm/page.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)
+#include <linux/sched/signal.h>
+#endif
+#include <linux/interrupt.h>
+#include <asm/hardirq.h>
+#include <linux/timer.h>
+#include <linux/capability.h>
+#include <linux/freezer.h>
+#include <linux/uaccess.h>
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "allocmem.h"
+#include "event.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "pvr_bridge_k.h"
+
+#include "osfunc.h"
+
+/* Uncomment to enable event object stats that are useful for debugging.
+ * The stats can be gotten at any time (during lifetime of event object)
+ * using OSEventObjectDumpdebugInfo API */
+// #define LINUX_EVENT_OBJECT_STATS
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+/* Returns pointer to task_struct that belongs to thread which acquired
+ * bridge lock. */
+extern struct task_struct *BridgeLockGetOwner(void);
+extern IMG_BOOL BridgeLockIsLocked(void);
+#endif
+
+
+typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG
+{
+	rwlock_t sLock;
+	/* Counts how many times event object was signalled i.e. how many times
+	 * LinuxEventObjectSignal() was called on a given event object.
+	 * Used for detecting pending signals.
+	 * Note that this is in no way related to OS signals. */
+	atomic_t sEventSignalCount;
+	struct list_head sList;
+} PVRSRV_LINUX_EVENT_OBJECT_LIST;
+
+
+typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG
+{
+	IMG_UINT32 ui32EventSignalCountPrevious;
+#if defined(DEBUG)
+	IMG_UINT ui32Stats;
+#endif
+
+#ifdef LINUX_EVENT_OBJECT_STATS
+	POS_LOCK hLock;
+	IMG_UINT32 ui32ScheduleAvoided;
+	IMG_UINT32 ui32ScheduleCalled;
+	IMG_UINT32 ui32ScheduleSleptFully;
+	IMG_UINT32 ui32ScheduleSleptPartially;
+	IMG_UINT32 ui32ScheduleReturnedImmediately;
+#endif
+	wait_queue_head_t sWait;
+	struct list_head sList;
+	PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList;
+} PVRSRV_LINUX_EVENT_OBJECT;
+
+/*!
+******************************************************************************
+
+ @Function	LinuxEventObjectListCreate
+
+ @Description
+
+ Linux wait object list creation
+
+ @Output    hOSEventKM : Pointer to the event object list handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList)
+{
+	PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList;
+
+	psEvenObjectList = OSAllocMem(sizeof(*psEvenObjectList));
+	if (psEvenObjectList == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectCreate: failed to allocate memory for event list"));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	INIT_LIST_HEAD(&psEvenObjectList->sList);
+
+	rwlock_init(&psEvenObjectList->sLock);
+	atomic_set(&psEvenObjectList->sEventSignalCount, 0);
+
+	*phEventObjectList = (IMG_HANDLE *) psEvenObjectList;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	LinuxEventObjectListDestroy
+
+ @Description
+
+ Linux wait object list destruction
+
+ @Input    hOSEventKM : Event object list handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList)
+{
+	PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList;
+
+	if (psEvenObjectList)
+	{
+		if (!list_empty(&psEvenObjectList->sList))
+		{
+			 PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectListDestroy: Event List is not empty"));
+			 return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+		}
+		OSFreeMem(psEvenObjectList);
+		/*not nulling pointer, copy on stack*/
+	}
+	return PVRSRV_OK;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	LinuxEventObjectDelete
+
+ @Description
+
+ Linux wait object removal
+
+ @Input    hOSEventObject : Event object handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject)
+{
+	if (hOSEventObject)
+	{
+		PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject;
+		PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList;
+
+		write_lock_bh(&psLinuxEventObjectList->sLock);
+		list_del(&psLinuxEventObject->sList);
+		write_unlock_bh(&psLinuxEventObjectList->sLock);
+
+#ifdef LINUX_EVENT_OBJECT_STATS
+		OSLockDestroy(psLinuxEventObject->hLock);
+#endif
+
+#if defined(DEBUG)
+//		PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectDelete: Event object waits: %u", psLinuxEventObject->ui32Stats));
+#endif
+
+		OSFreeMem(psLinuxEventObject);
+		/*not nulling pointer, copy on stack*/
+
+		return PVRSRV_OK;
+	}
+	return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+}
+
+/*!
+******************************************************************************
+
+ @Function	LinuxEventObjectAdd
+
+ @Description
+
+ Linux wait object addition
+
+ @Input    hOSEventObjectList : Event object list handle
+ @Output   phOSEventObject : Pointer to the event object handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject)
+ {
+	PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
+	PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
+
+	/* allocate completion variable */
+	psLinuxEventObject = OSAllocMem(sizeof(*psLinuxEventObject));
+	if (psLinuxEventObject == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory"));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	INIT_LIST_HEAD(&psLinuxEventObject->sList);
+
+	/* Start with the timestamp at which event object was added to the list */
+	psLinuxEventObject->ui32EventSignalCountPrevious = atomic_read(&psLinuxEventObjectList->sEventSignalCount);
+
+#ifdef LINUX_EVENT_OBJECT_STATS
+	PVR_LOGR_IF_ERROR(OSLockCreate(&psLinuxEventObject->hLock), "OSLockCreate");
+	psLinuxEventObject->ui32ScheduleAvoided = 0;
+	psLinuxEventObject->ui32ScheduleCalled = 0;
+	psLinuxEventObject->ui32ScheduleSleptFully = 0;
+	psLinuxEventObject->ui32ScheduleSleptPartially = 0;
+	psLinuxEventObject->ui32ScheduleReturnedImmediately = 0;
+#endif
+
+#if defined(DEBUG)
+	psLinuxEventObject->ui32Stats = 0;
+#endif
+	init_waitqueue_head(&psLinuxEventObject->sWait);
+
+	psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList;
+
+	write_lock_bh(&psLinuxEventObjectList->sLock);
+	list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList);
+	write_unlock_bh(&psLinuxEventObjectList->sLock);
+
+	*phOSEventObject = psLinuxEventObject;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	LinuxEventObjectSignal
+
+ @Description
+
+ Linux wait object signaling function
+
+ @Input    hOSEventObjectList : Event object list handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList)
+{
+	PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
+	PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
+	struct list_head *psListEntry, *psListEntryTemp, *psList;
+	psList = &psLinuxEventObjectList->sList;
+
+	/* Move the timestamp ahead for this call, so a potential "Wait" from any
+	 * EventObject/s doesn't wait for the signal to occur before returning. Early
+	 * setting/incrementing of timestamp reduces the window where a concurrent
+	 * "Wait" call might block while "this" Signal call is being processed */
+	atomic_inc(&psLinuxEventObjectList->sEventSignalCount);
+
+	read_lock_bh(&psLinuxEventObjectList->sLock);
+	list_for_each_safe(psListEntry, psListEntryTemp, psList)
+	{
+		psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList);
+		wake_up_interruptible(&psLinuxEventObject->sWait);
+	}
+	read_unlock_bh(&psLinuxEventObjectList->sLock);
+
+	return PVRSRV_OK;
+}
+
+static void _TryToFreeze(void)
+{
+	/* if we reach zero it means that all of the threads called try_to_freeze */
+	LinuxBridgeNumActiveKernelThreadsDecrement();
+
+	/* Returns true if the thread was frozen, should we do anything with this
+	* information? What do we return? Which one is the error case? */
+	try_to_freeze();
+
+	LinuxBridgeNumActiveKernelThreadsIncrement();
+}
+
+void LinuxEventObjectDumpDebugInfo(IMG_HANDLE hOSEventObject)
+{
+#ifdef LINUX_EVENT_OBJECT_STATS
+	PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject;
+
+	OSLockAcquire(psLinuxEventObject->hLock);
+	PVR_LOG(("%s: EvObj(%p) schedule: Avoided(%u) Called(%u) ReturnedImmediately(%u) SleptFully(%u) SleptPartially(%u)",
+	         __func__, psLinuxEventObject, psLinuxEventObject->ui32ScheduleAvoided,
+			 psLinuxEventObject->ui32ScheduleCalled, psLinuxEventObject->ui32ScheduleReturnedImmediately,
+			 psLinuxEventObject->ui32ScheduleSleptFully, psLinuxEventObject->ui32ScheduleSleptPartially));
+	OSLockRelease(psLinuxEventObject->hLock);
+#else
+	PVR_LOG(("%s: LINUX_EVENT_OBJECT_STATS disabled!", __func__));
+#endif
+}
+
+/*!
+******************************************************************************
+
+ @Function	LinuxEventObjectWait
+
+ @Description
+
+ Linux wait object routine
+
+ @Input    hOSEventObject : Event object handle
+
+ @Input   ui64Timeoutus : Time out value in usec
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject,
+                                  IMG_UINT64 ui64Timeoutus,
+                                  IMG_BOOL bHoldBridgeLock,
+                                  IMG_BOOL bFreezable)
+{
+	IMG_UINT32 ui32EventSignalCount;
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	IMG_BOOL bReleasePVRLock;
+#endif
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	IMG_UINT32 ui32Remainder;
+	long timeOutJiffies;
+#ifdef LINUX_EVENT_OBJECT_STATS
+	long totalTimeoutJiffies;
+	IMG_BOOL bScheduleCalled = IMG_FALSE;
+#endif
+
+	DEFINE_WAIT(sWait);
+
+	PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject;
+	PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList;
+
+	/* Check if the driver is good shape */
+	if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		return PVRSRV_ERROR_TIMEOUT;
+	}
+
+	/* usecs_to_jiffies only takes an uint. So if our timeout is bigger than an
+	 * uint use the msec version. With such a long timeout we really don't need
+	 * the high resolution of usecs. */
+	if (ui64Timeoutus > 0xffffffffULL)
+		timeOutJiffies = msecs_to_jiffies(OSDivide64(ui64Timeoutus, 1000, &ui32Remainder));
+	else
+		timeOutJiffies = usecs_to_jiffies(ui64Timeoutus);
+
+#ifdef LINUX_EVENT_OBJECT_STATS
+	totalTimeoutJiffies = timeOutJiffies;
+#endif
+
+	do
+	{
+		prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE);
+		ui32EventSignalCount = (IMG_UINT32) atomic_read(&psLinuxEventObjectList->sEventSignalCount);
+
+		if (psLinuxEventObject->ui32EventSignalCountPrevious != ui32EventSignalCount)
+		{
+			/* There is a pending event signal i.e. LinuxEventObjectSignal()
+			 * was called on the event object since the last time we checked.
+			 * Return without waiting. */
+			break;
+		}
+
+		if (signal_pending(current))
+		{
+			/* There is an OS signal pending so return.
+			 * This allows to kill/interrupt user space processes which
+			 * are waiting on this event object. */
+			break;
+		}
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+		/* Check thread holds the current PVR/bridge lock before obeying the
+		 * 'release before deschedule' behaviour. Some threads choose not to
+		 * hold the bridge lock in their implementation.
+		 */
+		bReleasePVRLock = (!bHoldBridgeLock && BridgeLockIsLocked() && current == BridgeLockGetOwner());
+		if (bReleasePVRLock)
+		{
+			OSReleaseBridgeLock();
+		}
+#else
+		PVR_UNREFERENCED_PARAMETER(bHoldBridgeLock);
+#endif
+
+#ifdef LINUX_EVENT_OBJECT_STATS
+		bScheduleCalled = IMG_TRUE;
+#endif
+		timeOutJiffies = schedule_timeout(timeOutJiffies);
+
+		if (bFreezable)
+		{
+			_TryToFreeze();
+		}
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+		if (bReleasePVRLock)
+		{
+			OSAcquireBridgeLock();
+		}
+#endif
+#if defined(DEBUG)
+		psLinuxEventObject->ui32Stats++;
+#endif
+
+
+	} while (timeOutJiffies);
+
+	finish_wait(&psLinuxEventObject->sWait, &sWait);
+
+	psLinuxEventObject->ui32EventSignalCountPrevious = ui32EventSignalCount;
+
+#ifdef LINUX_EVENT_OBJECT_STATS
+	OSLockAcquire(psLinuxEventObject->hLock);
+	if (bScheduleCalled)
+	{
+		psLinuxEventObject->ui32ScheduleCalled++;
+		if (totalTimeoutJiffies == timeOutJiffies)
+		{
+			psLinuxEventObject->ui32ScheduleReturnedImmediately++;
+		}
+		else if (timeOutJiffies == 0)
+		{
+			psLinuxEventObject->ui32ScheduleSleptFully++;
+		}
+		else
+		{
+			psLinuxEventObject->ui32ScheduleSleptPartially++;
+		}
+	}
+	else
+	{
+		psLinuxEventObject->ui32ScheduleAvoided++;
+	}
+	OSLockRelease(psLinuxEventObject->hLock);
+#endif
+
+	if (signal_pending(current))
+	{
+		return PVRSRV_ERROR_INTERRUPTED;
+	}
+	else
+	{
+		return timeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT;
+	}
+}
+
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+
+PVRSRV_ERROR LinuxEventObjectWaitUntilSignalled(IMG_HANDLE hOSEventObject)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	DEFINE_WAIT(sWait);
+
+	PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject =
+			(PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject;
+	PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList =
+			psLinuxEventObject->psLinuxEventObjectList;
+
+	/* Check if the driver is in good shape */
+	if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		return PVRSRV_ERROR_TIMEOUT;
+	}
+
+	prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE);
+
+	if (psLinuxEventObject->ui32EventSignalCountPrevious !=
+	    (IMG_UINT32) atomic_read(&psLinuxEventObjectList->sEventSignalCount))
+	{
+		/* There is a pending signal, so return without waiting */
+		goto finish;
+	}
+
+	schedule();
+
+	_TryToFreeze();
+
+finish:
+	finish_wait(&psLinuxEventObject->sWait, &sWait);
+
+	psLinuxEventObject->ui32EventSignalCountPrevious =
+			(IMG_UINT32) atomic_read(&psLinuxEventObjectList->sEventSignalCount);
+
+	return PVRSRV_OK;
+}
+
+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/event.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/event.h
new file mode 100644
index 0000000..f13a1f8
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/event.h
@@ -0,0 +1,55 @@
+/*************************************************************************/ /*!
+@File
+@Title          Event Object
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList);
+PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList);
+PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject);
+PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject);
+PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList);
+PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject,
+                                  IMG_UINT64 ui64Timeoutus,
+                                  IMG_BOOL bHoldBridgeLock,
+                                  IMG_BOOL bFreezable);
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+PVRSRV_ERROR LinuxEventObjectWaitUntilSignalled(IMG_HANDLE hOSEventObject);
+#endif
+void LinuxEventObjectDumpDebugInfo(IMG_HANDLE hOSEventObject);
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/fwtrace_string.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/fwtrace_string.h
new file mode 100644
index 0000000..5f0fdf0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/fwtrace_string.h
@@ -0,0 +1,57 @@
+/*************************************************************************/ /*!
+@File           fwtrace_string.h
+@Title          RGX Firmware trace strings for KM
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Platform       Generic
+@Description    This file defines SFs tuple.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef _KM_TRACE_STRING_H_
+#define _KM_TRACE_STRING_H_
+
+#include "rgx_fwif_sf.h"
+
+/*  The tuple pairs that will be generated using XMacros will be stored here.
+ *   This macro definition must match the definition of SFids in rgx_fwif_sf.h */
+static const RGXKM_STID_FMT SFs[]= {
+#define X(a, b, c, d, e) { RGXFW_LOG_CREATESFID(a,b,e) , d },
+	RGXFW_LOG_SFIDLIST
+#undef X
+};
+
+#endif /* _KM_TRACE_STRING_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/handle.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/handle.c
new file mode 100644
index 0000000..129d8ac
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/handle.c
@@ -0,0 +1,2537 @@
+/*************************************************************************/ /*!
+@File
+@Title          Resource Handle Manager
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provide resource handle management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+/* See handle.h for a description of the handle API. */
+
+/*
+ * The implementation supports movable handle structures, allowing the address
+ * of a handle structure to change without having to fix up pointers in
+ * any of the handle structures. For example, the linked list mechanism
+ * used to link subhandles together uses handle array indices rather than
+ * pointers to the structures themselves.
+ */
+
+#if defined(LINUX)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "img_defs.h"
+#include "handle.h"
+#include "handle_impl.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvrsrv.h"
+
+#define	HANDLE_HASH_TAB_INIT_SIZE		32
+
+#define	SET_FLAG(v, f)				((void)((v) |= (f)))
+#define	CLEAR_FLAG(v, f)			((void)((v) &= (IMG_UINT)~(f)))
+#define	TEST_FLAG(v, f)				((IMG_BOOL)(((v) & (f)) != 0))
+
+#define	TEST_ALLOC_FLAG(psHandleData, f)	TEST_FLAG((psHandleData)->eFlag, f)
+
+
+/* Linked list structure. Used for both the list head and list items */
+typedef struct _HANDLE_LIST_
+{
+	IMG_HANDLE hPrev;
+	IMG_HANDLE hNext;
+	IMG_HANDLE hParent;
+} HANDLE_LIST;
+
+typedef struct _HANDLE_DATA_
+{
+	/* The handle that represents this structure */
+	IMG_HANDLE hHandle;
+
+	/* Handle type */
+	PVRSRV_HANDLE_TYPE eType;
+
+	/* Flags specified when the handle was allocated */
+	PVRSRV_HANDLE_ALLOC_FLAG eFlag;
+
+	/* Pointer to the data that the handle represents */
+	void *pvData;
+
+	/*
+	 * Callback specified at handle allocation time to
+	 * release/destroy/free the data represented by the
+	 * handle when it's reference count reaches 0. This
+	 * should always be NULL for subhandles.
+	 */
+	PFN_HANDLE_RELEASE pfnReleaseData;
+
+	/* List head for subhandles of this handle */
+	HANDLE_LIST sChildren;
+
+	/* List entry for sibling subhandles */
+	HANDLE_LIST sSiblings;
+
+	/* Reference count. The pfnReleaseData callback gets called when the
+	 * reference count hits zero
+	 */
+	IMG_UINT32 ui32RefCount;
+} HANDLE_DATA;
+
+struct _HANDLE_BASE_
+{
+	/* Pointer to a handle implementations base structure */
+	HANDLE_IMPL_BASE *psImplBase;
+
+	/*
+	 * Pointer to handle hash table.
+	 * The hash table is used to do reverse lookups, converting data
+	 * pointers to handles.
+	 */
+	HASH_TABLE *psHashTab;
+
+	/* Type specific (connection/global/process) Lock handle */
+	POS_LOCK hLock;
+
+	/* Can be connection, process, global */
+	PVRSRV_HANDLE_BASE_TYPE eType;
+};
+
+/*
+ * The key for the handle hash table is an array of three elements, the
+ * pointer to the resource, the resource type and the parent handle (or
+ * NULL if there is no parent). The eHandKey enumeration gives the
+ * array indices of the elements making up the key.
+ */
+enum eHandKey
+{
+	HAND_KEY_DATA = 0,
+	HAND_KEY_TYPE,
+	HAND_KEY_PARENT,
+	HAND_KEY_LEN		/* Must be last item in list */
+};
+
+/* HAND_KEY is the type of the hash table key */
+typedef uintptr_t HAND_KEY[HAND_KEY_LEN];
+
+typedef struct FREE_HANDLE_DATA_TAG
+{
+	PVRSRV_HANDLE_BASE *psBase;
+	PVRSRV_HANDLE_TYPE eHandleFreeType;
+	/* timing data (ns) to release bridge lock upon the deadline */
+	IMG_UINT64 ui64TimeStart;
+	IMG_UINT64 ui64MaxBridgeTime;
+} FREE_HANDLE_DATA;
+
+typedef struct FREE_KERNEL_HANDLE_DATA_TAG
+{
+	PVRSRV_HANDLE_BASE *psBase;
+	HANDLE_DATA *psProcessHandleData;
+	IMG_HANDLE hKernelHandle;
+} FREE_KERNEL_HANDLE_DATA;
+
+/* Stores a pointer to the function table of the handle back-end in use */
+static HANDLE_IMPL_FUNCTAB const *gpsHandleFuncs;
+
+static POS_LOCK gKernelHandleLock;
+static IMG_BOOL gbLockInitialised = IMG_FALSE;
+
+void LockHandle(PVRSRV_HANDLE_BASE *psBase)
+{
+	OSLockAcquire(psBase->hLock);
+}
+
+void UnlockHandle(PVRSRV_HANDLE_BASE *psBase)
+{
+	OSLockRelease(psBase->hLock);
+}
+
+/*
+ * Kernel handle base structure. This is used for handles that are not
+ * allocated on behalf of a particular process.
+ */
+PVRSRV_HANDLE_BASE *gpsKernelHandleBase = NULL;
+
+/* Increase the reference count on the given handle.
+ * The handle lock must already be acquired.
+ * Returns: the reference count after the increment
+ */
+static inline IMG_UINT32 _HandleRef(HANDLE_DATA *psHandleData)
+{
+#if defined PVRSRV_DEBUG_HANDLE_LOCK
+	FREE_HANDLE_DATA *psData = (FREE_HANDLE_DATA *)psHandleData->pvData;
+	if (!OSLockIsLocked(psData->psBase->hLock))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Handle lock is not locked", __func__));
+		OSDumpStack();
+	}
+#endif
+	psHandleData->ui32RefCount++;
+	return psHandleData->ui32RefCount;
+}
+
+/* Decrease the reference count on the given handle.
+ * The handle lock must already be acquired.
+ * Returns: the reference count after the decrement
+ */
+static inline IMG_UINT32 _HandleUnref(HANDLE_DATA *psHandleData)
+{
+#if defined PVRSRV_DEBUG_HANDLE_LOCK
+	FREE_HANDLE_DATA *psData = (FREE_HANDLE_DATA *)psHandleData->pvData;
+	if (!OSLockIsLocked(psData->psBase->hLock))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Handle lock is not locked", __func__));
+		OSDumpStack();
+	}
+#endif
+	PVR_ASSERT(psHandleData->ui32RefCount > 0);
+	psHandleData->ui32RefCount--;
+
+	return psHandleData->ui32RefCount;
+}
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+static const IMG_CHAR *HandleTypeToString(PVRSRV_HANDLE_TYPE eType)
+{
+	#define HANDLETYPE(x) \
+			case PVRSRV_HANDLE_TYPE_##x: \
+				return #x;
+	switch (eType)
+	{
+		#include "handle_types.h"
+		#undef HANDLETYPE
+
+		default:
+			return "INVALID";
+	}
+}
+#endif /* PVRSRV_NEED_PVR_DPF */
+
+/*!
+******************************************************************************
+
+ @Function	GetHandleData
+
+ @Description	Get the handle data structure for a given handle
+
+ @Input		psBase - pointer to handle base structure
+		ppsHandleData - location to return pointer to handle data structure
+		hHandle - handle from client
+		eType - handle type or PVRSRV_HANDLE_TYPE_NONE if the
+			handle type is not to be checked.
+
+ @Output	ppsHandleData - points to a pointer to the handle data structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(GetHandleData)
+#endif
+static INLINE
+PVRSRV_ERROR GetHandleData(PVRSRV_HANDLE_BASE *psBase,
+			   HANDLE_DATA **ppsHandleData,
+			   IMG_HANDLE hHandle,
+			   PVRSRV_HANDLE_TYPE eType)
+{
+	HANDLE_DATA *psHandleData;
+	PVRSRV_ERROR eError;
+
+	eError = gpsHandleFuncs->pfnGetHandleData(psBase->psImplBase,
+						  hHandle,
+						  (void **)&psHandleData);
+	if (unlikely(eError != PVRSRV_OK))
+	{
+		return eError;
+	}
+
+	/*
+	 * Unless PVRSRV_HANDLE_TYPE_NONE was passed in to this function,
+	 * check handle is of the correct type.
+	 */
+	if (unlikely(eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandleData->eType))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "GetHandleData: Type mismatch. Lookup request: Handle %p, type: %s (%u) but stored handle is type %s (%u)",
+			 hHandle,
+			 HandleTypeToString(eType),
+			 eType,
+			 HandleTypeToString(psHandleData->eType),
+			 psHandleData->eType));
+		return PVRSRV_ERROR_HANDLE_TYPE_MISMATCH;
+	}
+
+	/* Return the handle structure */
+	*ppsHandleData = psHandleData;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	HandleListInit
+
+ @Description	Initialise a linked list structure embedded in a handle
+		structure.
+
+ @Input		hHandle - handle containing the linked list structure
+		psList - pointer to linked list structure
+		hParent - parent handle or NULL
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListInit)
+#endif
+static INLINE
+void HandleListInit(IMG_HANDLE hHandle, HANDLE_LIST *psList, IMG_HANDLE hParent)
+{
+	psList->hPrev = hHandle;
+	psList->hNext = hHandle;
+	psList->hParent = hParent;
+}
+
+/*!
+******************************************************************************
+
+ @Function	InitParentList
+
+ @Description	Initialise the children list head in a handle structure.
+		The children are the subhandles of this handle.
+
+ @Input		psHandleData - pointer to handle data structure
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitParentList)
+#endif
+static INLINE
+void InitParentList(HANDLE_DATA *psHandleData)
+{
+	IMG_HANDLE hParent = psHandleData->hHandle;
+
+	HandleListInit(hParent, &psHandleData->sChildren, hParent);
+}
+
+/*!
+******************************************************************************
+
+ @Function	InitChildEntry
+
+ @Description	Initialise the child list entry in a handle structure.
+		The list entry is used to link together subhandles of
+		a given handle.
+
+ @Input		psHandleData - pointer to handle data structure
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitChildEntry)
+#endif
+static INLINE
+void InitChildEntry(HANDLE_DATA *psHandleData)
+{
+	HandleListInit(psHandleData->hHandle, &psHandleData->sSiblings, NULL);
+}
+
+/*!
+******************************************************************************
+
+ @Function	HandleListIsEmpty
+
+ @Description	Determine whether a given linked list is empty.
+
+ @Input		hHandle - handle containing the list head
+		psList - pointer to the list head
+
+ @Return	IMG_TRUE if the list is empty, IMG_FALSE if it isn't.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListIsEmpty)
+#endif
+static INLINE
+IMG_BOOL HandleListIsEmpty(IMG_HANDLE hHandle, HANDLE_LIST *psList) /* Instead of passing in the handle can we not just do (psList->hPrev == psList->hNext) ? IMG_TRUE : IMG_FALSE ??? */
+{
+	IMG_BOOL bIsEmpty;
+
+	bIsEmpty = (IMG_BOOL)(psList->hNext == hHandle);
+
+#ifdef	DEBUG
+	{
+		IMG_BOOL bIsEmpty2;
+
+		bIsEmpty2 = (IMG_BOOL)(psList->hPrev == hHandle);
+		PVR_ASSERT(bIsEmpty == bIsEmpty2);
+	}
+#endif
+
+	return bIsEmpty;
+}
+
+#ifdef DEBUG
+/*!
+******************************************************************************
+
+ @Function	NoChildren
+
+ @Description	Determine whether a handle has any subhandles
+
+ @Input		psHandleData - pointer to handle data structure
+
+ @Return	IMG_TRUE if the handle has no subhandles, IMG_FALSE if it does.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(NoChildren)
+#endif
+static INLINE
+IMG_BOOL NoChildren(HANDLE_DATA *psHandleData)
+{
+	PVR_ASSERT(psHandleData->sChildren.hParent == psHandleData->hHandle);
+
+	return HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sChildren);
+}
+
+/*!
+******************************************************************************
+
+ @Function	NoParent
+
+ @Description	Determine whether a handle is a subhandle
+
+ @Input		psHandleData - pointer to handle data structure
+
+ @Return	IMG_TRUE if the handle is not a subhandle, IMG_FALSE if it is.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(NoParent)
+#endif
+static INLINE
+IMG_BOOL NoParent(HANDLE_DATA *psHandleData)
+{
+	if (HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sSiblings))
+	{
+		PVR_ASSERT(psHandleData->sSiblings.hParent == NULL);
+
+		return IMG_TRUE;
+	}
+	else
+	{
+		PVR_ASSERT(psHandleData->sSiblings.hParent != NULL);
+	}
+	return IMG_FALSE;
+}
+#endif /*DEBUG*/
+
+/*!
+******************************************************************************
+
+ @Function	ParentHandle
+
+ @Description	Determine the parent of a handle
+
+ @Input		psHandleData - pointer to handle data structure
+
+ @Return	Parent handle, or NULL if the handle is not a subhandle.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(ParentHandle)
+#endif
+static INLINE
+IMG_HANDLE ParentHandle(HANDLE_DATA *psHandleData)
+{
+	return psHandleData->sSiblings.hParent;
+}
+
+/*
+ * GetHandleListFromHandleAndOffset is used to generate either a
+ * pointer to the subhandle list head, or a pointer to the linked list
+ * structure of an item on a subhandle list.
+ * The list head is itself on the list, but is at a different offset
+ * in the handle structure to the linked list structure for items on
+ * the list. The two linked list structures are differentiated by
+ * the third parameter, containing the parent handle. The parent field
+ * in the list head structure references the handle structure that contains
+ * it. For items on the list, the parent field in the linked list structure
+ * references the parent handle, which will be different from the handle
+ * containing the linked list structure.
+ */
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(GetHandleListFromHandleAndOffset)
+#endif
+static INLINE
+HANDLE_LIST *GetHandleListFromHandleAndOffset(PVRSRV_HANDLE_BASE *psBase,
+					      IMG_HANDLE hEntry,
+					      IMG_HANDLE hParent,
+					      size_t uiParentOffset,
+					      size_t uiEntryOffset)
+{
+	HANDLE_DATA *psHandleData = NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psBase != NULL);
+
+	eError = GetHandleData(psBase,
+			       &psHandleData,
+			       hEntry,
+			       PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		return NULL;
+	}
+
+	if (hEntry == hParent)
+	{
+		return (HANDLE_LIST *)((IMG_CHAR *)psHandleData + uiParentOffset);
+	}
+	else
+	{
+		return (HANDLE_LIST *)((IMG_CHAR *)psHandleData + uiEntryOffset);
+	}
+}
+
+/*!
+******************************************************************************
+
+ @Function	HandleListInsertBefore
+
+ @Description	Insert a handle before a handle currently on the list.
+
+ @Input		hEntry - handle to be inserted after
+		psEntry - pointer to handle structure to be inserted after
+		uiParentOffset - offset to list head struct in handle structure
+		hNewEntry - handle to be inserted
+		psNewEntry - pointer to handle structure of item to be inserted
+		uiEntryOffset - offset of list item struct in handle structure
+		hParent - parent handle of hNewEntry
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListInsertBefore)
+#endif
+static INLINE
+PVRSRV_ERROR HandleListInsertBefore(PVRSRV_HANDLE_BASE *psBase,
+				    IMG_HANDLE hEntry,
+				    HANDLE_LIST *psEntry,
+				    size_t uiParentOffset,
+				    IMG_HANDLE hNewEntry,
+				    HANDLE_LIST *psNewEntry,
+				    size_t uiEntryOffset,
+				    IMG_HANDLE hParent)
+{
+	HANDLE_LIST *psPrevEntry;
+
+	if (psBase == NULL || psEntry == NULL || psNewEntry == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psPrevEntry = GetHandleListFromHandleAndOffset(psBase,
+						       psEntry->hPrev,
+						       hParent,
+						       uiParentOffset,
+						       uiEntryOffset);
+	if (psPrevEntry == NULL)
+	{
+		return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+	}
+
+	PVR_ASSERT(psNewEntry->hParent == NULL);
+	PVR_ASSERT(hEntry == psPrevEntry->hNext);
+
+#if defined(DEBUG)
+	{
+		HANDLE_LIST *psParentList;
+
+		psParentList = GetHandleListFromHandleAndOffset(psBase,
+								hParent,
+								hParent,
+								uiParentOffset,
+								uiParentOffset);
+		PVR_ASSERT(psParentList && psParentList->hParent == hParent);
+	}
+#endif /* defined(DEBUG) */
+
+	psNewEntry->hPrev = psEntry->hPrev;
+	psEntry->hPrev = hNewEntry;
+
+	psNewEntry->hNext = hEntry;
+	psPrevEntry->hNext = hNewEntry;
+
+	psNewEntry->hParent = hParent;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	AdoptChild
+
+ @Description	Assign a subhandle to a handle
+
+ @Input		psParentData - pointer to handle structure of parent handle
+		psChildData - pointer to handle structure of child subhandle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(AdoptChild)
+#endif
+static INLINE
+PVRSRV_ERROR AdoptChild(PVRSRV_HANDLE_BASE *psBase,
+			HANDLE_DATA *psParentData,
+			HANDLE_DATA *psChildData)
+{
+	IMG_HANDLE hParent = psParentData->sChildren.hParent;
+
+	PVR_ASSERT(hParent == psParentData->hHandle);
+
+	return HandleListInsertBefore(psBase,
+				      hParent,
+				      &psParentData->sChildren,
+				      offsetof(HANDLE_DATA, sChildren),
+				      psChildData->hHandle,
+				      &psChildData->sSiblings,
+				      offsetof(HANDLE_DATA, sSiblings),
+				      hParent);
+}
+
+/*!
+******************************************************************************
+
+ @Function	HandleListRemove
+
+ @Description	Remove a handle from a list
+
+ @Input		hEntry - handle to be removed
+		psEntry - pointer to handle structure of item to be removed
+		uiEntryOffset - offset of list item struct in handle structure
+		uiParentOffset - offset to list head struct in handle structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListRemove)
+#endif
+static INLINE
+PVRSRV_ERROR HandleListRemove(PVRSRV_HANDLE_BASE *psBase,
+			      IMG_HANDLE hEntry,
+			      HANDLE_LIST *psEntry,
+			      size_t uiEntryOffset,
+			      size_t uiParentOffset)
+{
+	if (unlikely(psBase == NULL || psEntry == NULL))
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (!HandleListIsEmpty(hEntry, psEntry))
+	{
+		HANDLE_LIST *psPrev;
+		HANDLE_LIST *psNext;
+
+		psPrev = GetHandleListFromHandleAndOffset(psBase,
+							  psEntry->hPrev,
+							  psEntry->hParent,
+							  uiParentOffset,
+							  uiEntryOffset);
+		if (psPrev == NULL)
+		{
+			return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+		}
+
+		psNext = GetHandleListFromHandleAndOffset(psBase,
+							  psEntry->hNext,
+							  psEntry->hParent,
+							  uiParentOffset,
+							  uiEntryOffset);
+		if (psNext == NULL)
+		{
+			return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+		}
+
+		/*
+		 * The list head is on the list, and we don't want to
+		 * remove it.
+		 */
+		PVR_ASSERT(psEntry->hParent != NULL);
+
+		psPrev->hNext = psEntry->hNext;
+		psNext->hPrev = psEntry->hPrev;
+
+		HandleListInit(hEntry, psEntry, NULL);
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	UnlinkFromParent
+
+ @Description	Remove a subhandle from its parents list
+
+ @Input		psHandleData - pointer to handle data structure of child subhandle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(UnlinkFromParent)
+#endif
+static INLINE
+PVRSRV_ERROR UnlinkFromParent(PVRSRV_HANDLE_BASE *psBase,
+			      HANDLE_DATA *psHandleData)
+{
+	return HandleListRemove(psBase,
+				psHandleData->hHandle,
+				&psHandleData->sSiblings,
+				offsetof(HANDLE_DATA, sSiblings),
+				offsetof(HANDLE_DATA, sChildren));
+}
+
+/*!
+******************************************************************************
+
+ @Function	HandleListIterate
+
+ @Description	Iterate over the items in a list
+
+ @Input		psHead - pointer to list head
+		uiParentOffset - offset to list head struct in handle structure
+		uiEntryOffset - offset of list item struct in handle structure
+		pfnIterFunc - function to be called for each handle in the list
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListIterate)
+#endif
+static INLINE
+PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE *psBase,
+			       HANDLE_LIST *psHead,
+			       size_t uiParentOffset,
+			       size_t uiEntryOffset,
+			       PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE))
+{
+	IMG_HANDLE hHandle = psHead->hNext;
+	IMG_HANDLE hParent = psHead->hParent;
+	IMG_HANDLE hNext;
+
+	PVR_ASSERT(psHead->hParent != NULL);
+
+	/*
+ 	 * Follow the next chain from the list head until we reach
+ 	 * the list head again, which signifies the end of the list.
+ 	 */
+	while (hHandle != hParent)
+	{
+		HANDLE_LIST *psEntry;
+		PVRSRV_ERROR eError;
+
+		psEntry = GetHandleListFromHandleAndOffset(psBase,
+							   hHandle,
+							   hParent,
+							   uiParentOffset,
+							   uiEntryOffset);
+		if (psEntry == NULL)
+		{
+			return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+		}
+
+		PVR_ASSERT(psEntry->hParent == psHead->hParent);
+
+		/*
+		 * Get the next index now, in case the list item is
+		 * modified by the iteration function.
+		 */
+		hNext = psEntry->hNext;
+
+		eError = (*pfnIterFunc)(psBase, hHandle);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+
+		hHandle = hNext;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	IterateOverChildren
+
+ @Description	Iterate over the subhandles of a parent handle
+
+ @Input		psParentData - pointer to parent handle structure
+		pfnIterFunc - function to be called for each subhandle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(IterateOverChildren)
+#endif
+static INLINE
+PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE *psBase,
+				 HANDLE_DATA *psParentData,
+				 PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE))
+{
+	 return HandleListIterate(psBase,
+				  &psParentData->sChildren,
+				  offsetof(HANDLE_DATA, sChildren),
+				  offsetof(HANDLE_DATA, sSiblings),
+				  pfnIterFunc);
+}
+
+/*!
+******************************************************************************
+
+ @Function	ParentIfPrivate
+
+ @Description	Return the parent handle if the handle was allocated
+		with PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE, else return
+		NULL
+
+ @Input		psHandleData - pointer to handle data structure
+
+ @Return	Parent handle, or NULL
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(ParentIfPrivate)
+#endif
+static INLINE
+IMG_HANDLE ParentIfPrivate(HANDLE_DATA *psHandleData)
+{
+	return TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
+			ParentHandle(psHandleData) : NULL;
+}
+
+/*!
+******************************************************************************
+
+ @Function	InitKey
+
+ @Description	Initialise a hash table key for the current process
+
+ @Input		psBase - pointer to handle base structure
+		aKey - pointer to key
+		pvData - pointer to the resource the handle represents
+		eType - type of resource
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitKey)
+#endif
+static INLINE
+void InitKey(HAND_KEY aKey,
+	     PVRSRV_HANDLE_BASE *psBase,
+	     void *pvData,
+	     PVRSRV_HANDLE_TYPE eType,
+	     IMG_HANDLE hParent)
+{
+	PVR_UNREFERENCED_PARAMETER(psBase);
+
+	aKey[HAND_KEY_DATA] = (uintptr_t)pvData;
+	aKey[HAND_KEY_TYPE] = (uintptr_t)eType;
+	aKey[HAND_KEY_PARENT] = (uintptr_t)hParent;
+}
+
+static PVRSRV_ERROR FreeHandleWrapper(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle);
+
+/*!
+******************************************************************************
+
+ @Function	FreeHandle
+
+ @Description	Free a handle data structure.
+
+ @Input		psBase - Pointer to handle base structure
+		hHandle - Handle to be freed
+		eType - Type of the handle to be freed
+		ppvData - Location for data associated with the freed handle
+
+ @Output 		ppvData - Points to data that was associated with the freed handle
+
+ @Return	PVRSRV_OK or PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR FreeHandle(PVRSRV_HANDLE_BASE *psBase,
+			       IMG_HANDLE hHandle,
+			       PVRSRV_HANDLE_TYPE eType,
+			       void **ppvData)
+{
+	HANDLE_DATA *psHandleData = NULL;
+	HANDLE_DATA *psReleasedHandleData;
+	PVRSRV_ERROR eError;
+
+	eError = GetHandleData(psBase, &psHandleData, hHandle, eType);
+	if (unlikely(eError != PVRSRV_OK))
+	{
+		return eError;
+	}
+
+	if (_HandleUnref(psHandleData) > 0)
+	{
+		/* this handle still has references so do not destroy it
+		 * or the underlying object yet
+		 */
+		return PVRSRV_OK;
+	}
+
+	/* Call the release data callback for each reference on the handle */
+	if (psHandleData->pfnReleaseData != NULL)
+	{
+		eError = psHandleData->pfnReleaseData(psHandleData->pvData);
+		if (eError == PVRSRV_ERROR_RETRY)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE,
+				 "%s: "
+				 "Got retry while calling release data callback for %p (type = %d)",
+				 __func__,
+				 hHandle,
+				 (IMG_UINT32)psHandleData->eType));
+
+			/* the caller should retry, so retain a reference on the handle */
+			_HandleRef(psHandleData);
+
+			return eError;
+		}
+		else if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+	{
+		HAND_KEY aKey;
+		IMG_HANDLE hRemovedHandle;
+
+		InitKey(aKey, psBase, psHandleData->pvData, psHandleData->eType, ParentIfPrivate(psHandleData));
+
+		hRemovedHandle = (IMG_HANDLE)HASH_Remove_Extended(psBase->psHashTab, aKey);
+
+		PVR_ASSERT(hRemovedHandle != NULL);
+		PVR_ASSERT(hRemovedHandle == psHandleData->hHandle);
+		PVR_UNREFERENCED_PARAMETER(hRemovedHandle);
+	}
+
+	eError = UnlinkFromParent(psBase, psHandleData);
+	if (unlikely(eError != PVRSRV_OK))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Error whilst unlinking from parent handle (%s)",
+			 __func__,
+			 PVRSRVGetErrorString(eError)));
+		return eError;
+	}
+
+	/* Free children */
+	eError = IterateOverChildren(psBase, psHandleData, FreeHandleWrapper);
+	if (unlikely(eError != PVRSRV_OK))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Error whilst freeing subhandles (%s)",
+			 __func__,
+			 PVRSRVGetErrorString(eError)));
+		return eError;
+	}
+
+	eError = gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase,
+						  psHandleData->hHandle,
+						  (void **)&psReleasedHandleData);
+	if (unlikely(eError == PVRSRV_OK))
+	{
+		PVR_ASSERT(psReleasedHandleData == psHandleData);
+	}
+
+	if (ppvData)
+	{
+		*ppvData = psHandleData->pvData;
+	}
+
+	OSFreeMem(psHandleData);
+
+	return eError;
+}
+
+static PVRSRV_ERROR FreeHandleWrapper(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle)
+{
+	return FreeHandle(psBase, hHandle, PVRSRV_HANDLE_TYPE_NONE, NULL);
+}
+
+/*!
+******************************************************************************
+
+ @Function	FindHandle
+
+ @Description	Find handle corresponding to a resource pointer
+
+ @Input		psBase - pointer to handle base structure
+		pvData - pointer to resource to be associated with the handle
+		eType - the type of resource
+
+ @Return	the handle, or NULL if not found
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(FindHandle)
+#endif
+static INLINE
+IMG_HANDLE FindHandle(PVRSRV_HANDLE_BASE *psBase,
+		      void *pvData,
+		      PVRSRV_HANDLE_TYPE eType,
+		      IMG_HANDLE hParent)
+{
+	HAND_KEY aKey;
+
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+
+	InitKey(aKey, psBase, pvData, eType, hParent);
+
+	return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey);
+}
+
+/*!
+******************************************************************************
+
+ @Function	AllocHandle
+
+ @Description	Allocate a new handle
+
+ @Input		phHandle - location for new handle
+		pvData - pointer to resource to be associated with the handle
+		eType - the type of resource
+		hParent - parent handle or NULL
+		pfnReleaseData - Function to release resource at handle release
+		                 time
+
+ @Output	phHandle - points to new handle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase,
+				IMG_HANDLE *phHandle,
+				void *pvData,
+				PVRSRV_HANDLE_TYPE eType,
+				PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+				IMG_HANDLE hParent,
+				PFN_HANDLE_RELEASE pfnReleaseData)
+{
+	HANDLE_DATA *psNewHandleData;
+	IMG_HANDLE hHandle;
+	PVRSRV_ERROR eError;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(psBase != NULL && psBase->psHashTab != NULL);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+	{
+		/* Handle must not already exist */
+		PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == NULL);
+	}
+
+	psNewHandleData = OSAllocZMem(sizeof(*psNewHandleData));
+	if (psNewHandleData == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Couldn't allocate handle data",
+			 __func__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	eError = gpsHandleFuncs->pfnAcquireHandle(psBase->psImplBase, &hHandle, psNewHandleData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Failed to acquire a handle",
+			 __func__));
+		goto ErrorFreeHandleData;
+	}
+
+	/*
+	 * If a data pointer can be associated with multiple handles, we
+	 * don't put the handle in the hash table, as the data pointer
+	 * may not map to a unique handle
+	 */
+	if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+	{
+		HAND_KEY aKey;
+
+		/* Initialise hash key */
+		InitKey(aKey, psBase, pvData, eType, hParent);
+
+		/* Put the new handle in the hash table */
+		if (!HASH_Insert_Extended(psBase->psHashTab, aKey, (uintptr_t)hHandle))
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Couldn't add handle to hash table",
+				 __func__));
+			eError = PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+			goto ErrorReleaseHandle;
+		}
+	}
+
+	psNewHandleData->hHandle = hHandle;
+	psNewHandleData->eType = eType;
+	psNewHandleData->eFlag = eFlag;
+	psNewHandleData->pvData = pvData;
+	psNewHandleData->pfnReleaseData = pfnReleaseData;
+	psNewHandleData->ui32RefCount = 1;
+
+	InitParentList(psNewHandleData);
+#if defined(DEBUG)
+	PVR_ASSERT(NoChildren(psNewHandleData));
+#endif
+
+	InitChildEntry(psNewHandleData);
+#if defined(DEBUG)
+	PVR_ASSERT(NoParent(psNewHandleData));
+#endif
+
+	/* Return the new handle to the client */
+	*phHandle = psNewHandleData->hHandle;
+
+	return PVRSRV_OK;
+
+ErrorReleaseHandle:
+	(void)gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase, hHandle, NULL);
+
+ErrorFreeHandleData:
+	OSFreeMem(psNewHandleData);
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVAllocHandle
+
+ @Description	Allocate a handle
+
+ @Input		phHandle - location for new handle
+		pvData - pointer to resource to be associated with the handle
+		eType - the type of resource
+		pfnReleaseData - Function to release resource at handle release
+		                 time
+
+ @Output	phHandle - points to new handle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase,
+			       IMG_HANDLE *phHandle,
+			       void *pvData,
+			       PVRSRV_HANDLE_TYPE eType,
+			       PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+			       PFN_HANDLE_RELEASE pfnReleaseData)
+{
+	PVRSRV_ERROR eError;
+
+	LockHandle(psBase);
+	eError = PVRSRVAllocHandleUnlocked(psBase, phHandle, pvData, eType, eFlag, pfnReleaseData);
+	UnlockHandle(psBase);
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVAllocHandleUnlocked
+
+ @Description	Allocate a handle without acquiring/releasing the handle
+		lock. The function assumes you hold the lock when called.
+
+ @Input		phHandle - location for new handle
+		pvData - pointer to resource to be associated with the handle
+		eType - the type of resource
+		pfnReleaseData - Function to release resource at handle release
+		                 time
+
+ @Output	phHandle - points to new handle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+			       IMG_HANDLE *phHandle,
+			       void *pvData,
+			       PVRSRV_HANDLE_TYPE eType,
+			       PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+			       PFN_HANDLE_RELEASE pfnReleaseData)
+{
+	PVRSRV_ERROR eError;
+
+	*phHandle = NULL;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Missing handle base", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto Exit;
+	}
+
+	if (pfnReleaseData == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Missing release function", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto Exit;
+	}
+
+	eError = AllocHandle(psBase, phHandle, pvData, eType, eFlag, NULL, pfnReleaseData);
+
+Exit:
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVAllocSubHandle
+
+ @Description	Allocate a subhandle
+
+ @Input		phHandle - location for new subhandle
+		pvData - pointer to resource to be associated with the subhandle
+		eType - the type of resource
+		hParent - parent handle
+
+ @Output	phHandle - points to new subhandle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase,
+				  IMG_HANDLE *phHandle,
+				  void *pvData,
+				  PVRSRV_HANDLE_TYPE eType,
+				  PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+				  IMG_HANDLE hParent)
+{
+	PVRSRV_ERROR eError;
+
+	LockHandle(psBase);
+	eError = PVRSRVAllocSubHandleUnlocked(psBase, phHandle, pvData, eType, eFlag, hParent);
+	UnlockHandle(psBase);
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVAllocSubHandleUnlocked
+
+ @Description	Allocate a subhandle without acquiring/releasing the
+		handle lock. The function assumes you hold the lock when called.
+
+ @Input		phHandle - location for new subhandle
+		pvData - pointer to resource to be associated with the subhandle
+		eType - the type of resource
+		hParent - parent handle
+
+ @Output	phHandle - points to new subhandle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocSubHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+				  IMG_HANDLE *phHandle,
+				  void *pvData,
+				  PVRSRV_HANDLE_TYPE eType,
+				  PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+				  IMG_HANDLE hParent)
+{
+	HANDLE_DATA *psPHandleData = NULL;
+	HANDLE_DATA *psCHandleData = NULL;
+	IMG_HANDLE hParentKey;
+	IMG_HANDLE hHandle;
+	PVRSRV_ERROR eError;
+
+	*phHandle = NULL;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Missing handle base", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto Exit;
+	}
+
+	hParentKey = TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? hParent : NULL;
+
+	/* Lookup the parent handle */
+	eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Failed to get parent handle structure",
+			 __func__));
+		goto Exit;
+	}
+
+	eError = AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey, NULL);
+	if (eError != PVRSRV_OK)
+	{
+		goto Exit;
+	}
+
+	eError = GetHandleData(psBase, &psCHandleData, hHandle, PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		 "%s: Failed to get parent handle structure",
+		 __func__));
+
+		/* If we were able to allocate the handle then there should be no reason why we
+		   can't also get it's handle structure. Otherwise something has gone badly wrong. */
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		goto Exit;
+	}
+
+	/*
+	 * Get the parent handle structure again, in case the handle
+	 * structure has moved (depending on the implementation
+	 * of AllocHandle).
+	 */
+	eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Failed to get parent handle structure",
+			 __func__));
+
+		(void)FreeHandle(psBase, hHandle, eType, NULL);
+		goto Exit;
+	}
+
+	eError = AdoptChild(psBase, psPHandleData, psCHandleData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Parent handle failed to adopt subhandle",
+			 __func__));
+
+		(void)FreeHandle(psBase, hHandle, eType, NULL);
+		goto Exit;
+	}
+
+	*phHandle = hHandle;
+
+	eError = PVRSRV_OK;
+
+Exit:
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVFindHandle
+
+ @Description	Find handle corresponding to a resource pointer
+
+ @Input		phHandle - location for returned handle
+		pvData - pointer to resource to be associated with the handle
+		eType - the type of resource
+
+ @Output	phHandle - points to handle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase,
+			      IMG_HANDLE *phHandle,
+			      void *pvData,
+			      PVRSRV_HANDLE_TYPE eType)
+{
+	PVRSRV_ERROR eError;
+
+	LockHandle(psBase);
+	eError = PVRSRVFindHandleUnlocked(psBase, phHandle, pvData, eType);
+	UnlockHandle(psBase);
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVFindHandleUnlocked
+
+ @Description	Find handle corresponding to a resource pointer without
+		acquiring/releasing the handle lock. The function assumes you hold
+		the lock when called.
+
+ @Input		phHandle - location for returned handle
+		pvData - pointer to resource to be associated with the handle
+		eType - the type of resource
+
+ @Output	phHandle - points to handle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFindHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+			      IMG_HANDLE *phHandle,
+			      void *pvData,
+			      PVRSRV_HANDLE_TYPE eType)
+{
+	IMG_HANDLE hHandle;
+	PVRSRV_ERROR eError;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Missing handle base", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto Exit;
+	}
+
+	/* See if there is a handle for this data pointer */
+	hHandle = FindHandle(psBase, pvData, eType, NULL);
+	if (hHandle == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Error finding handle. Type %u",
+			 __func__,
+			 eType));
+
+		eError = PVRSRV_ERROR_HANDLE_NOT_FOUND;
+		goto Exit;
+	}
+
+	*phHandle = hHandle;
+
+	eError = PVRSRV_OK;
+
+Exit:
+	return eError;
+
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVLookupHandle
+
+ @Description	Lookup the data pointer corresponding to a handle
+
+ @Input		ppvData - location to return data pointer
+		hHandle - handle from client
+		eType - handle type
+		bRef - If TRUE, a reference will be added on the handle if the
+		       lookup is successful.
+
+ @Output	ppvData - points to the data pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase,
+				void **ppvData,
+				IMG_HANDLE hHandle,
+				PVRSRV_HANDLE_TYPE eType,
+				IMG_BOOL bRef)
+{
+	PVRSRV_ERROR eError;
+
+	LockHandle(psBase);
+	eError = PVRSRVLookupHandleUnlocked(psBase, ppvData, hHandle, eType, bRef);
+	UnlockHandle(psBase);
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVLookupHandleUnlocked
+
+ @Description	Lookup the data pointer corresponding to a handle without
+ 		acquiring/releasing the handle lock. The function assumes you
+		hold the lock when called.
+
+ @Input		ppvData - location to return data pointer
+		hHandle - handle from client
+		eType - handle type
+		bRef - If TRUE, a reference will be added on the handle if the
+		       lookup is successful.
+
+ @Output	ppvData - points to the data pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVLookupHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+				void **ppvData,
+				IMG_HANDLE hHandle,
+				PVRSRV_HANDLE_TYPE eType,
+				IMG_BOOL bRef)
+{
+	HANDLE_DATA *psHandleData = NULL;
+	PVRSRV_ERROR eError;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (unlikely(psBase == NULL))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Missing handle base", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto Exit;
+	}
+
+	eError = GetHandleData(psBase, &psHandleData, hHandle, eType);
+	if (unlikely(eError != PVRSRV_OK))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Error looking up handle (%s). Handle %p, type %u",
+			 __func__,
+			 PVRSRVGetErrorString(eError),
+			 (void*) hHandle,
+			 eType));
+#if defined(DEBUG) || defined(PVRSRV_NEED_PVR_DPF)
+		OSDumpStack();
+#endif
+		goto Exit;
+	}
+
+	if (bRef)
+	{
+		_HandleRef(psHandleData);
+	}
+
+	*ppvData = psHandleData->pvData;
+
+	eError = PVRSRV_OK;
+
+Exit:
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVLookupSubHandle
+
+ @Description	Lookup the data pointer corresponding to a subhandle
+
+ @Input		ppvData - location to return data pointer
+		hHandle - handle from client
+		eType - handle type
+		hAncestor - ancestor handle
+
+ @Output	ppvData - points to the data pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase,
+				   void **ppvData,
+				   IMG_HANDLE hHandle,
+				   PVRSRV_HANDLE_TYPE eType,
+				   IMG_HANDLE hAncestor)
+{
+	HANDLE_DATA *psPHandleData = NULL;
+	HANDLE_DATA *psCHandleData = NULL;
+	PVRSRV_ERROR eError;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Missing handle base", __func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	LockHandle(psBase);
+
+	eError = GetHandleData(psBase, &psCHandleData, hHandle, eType);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Error looking up subhandle (%s). Handle %p, type %u",
+			 __func__,
+			 PVRSRVGetErrorString(eError),
+			 (void*) hHandle,
+			 eType));
+		OSDumpStack();
+		goto ExitUnlock;
+	}
+
+	/* Look for hAncestor among the handle's ancestors */
+	for (psPHandleData = psCHandleData; ParentHandle(psPHandleData) != hAncestor; )
+	{
+		eError = GetHandleData(psBase, &psPHandleData, ParentHandle(psPHandleData), PVRSRV_HANDLE_TYPE_NONE);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Subhandle doesn't belong to given ancestor",
+				 __func__));
+			eError = PVRSRV_ERROR_INVALID_SUBHANDLE;
+			goto ExitUnlock;
+		}
+	}
+
+	*ppvData = psCHandleData->pvData;
+
+	eError = PVRSRV_OK;
+
+ExitUnlock:
+	UnlockHandle(psBase);
+
+	return eError;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVReleaseHandle
+
+ @Description	Release a handle that is no longer needed
+
+ @Input 	hHandle - handle from client
+		eType - handle type
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase,
+				 IMG_HANDLE hHandle,
+				 PVRSRV_HANDLE_TYPE eType)
+{
+	PVRSRV_ERROR eError;
+
+	LockHandle(psBase);
+	eError = PVRSRVReleaseHandleUnlocked(psBase, hHandle, eType);
+	UnlockHandle(psBase);
+
+	return eError;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVReleaseHandleUnlocked
+
+ @Description	Release a handle that is no longer needed without
+ 		acquiring/releasing the handle lock. The function assumes you
+		hold the lock when called.
+
+ @Input 	hHandle - handle from client
+		eType - handle type
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVReleaseHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+				 IMG_HANDLE hHandle,
+				 PVRSRV_HANDLE_TYPE eType)
+{
+	PVRSRV_ERROR eError;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (unlikely(psBase == NULL))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVReleaseHandle: Missing handle base"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto Exit;
+	}
+
+	eError = FreeHandle(psBase, hHandle, eType, NULL);
+
+Exit:
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVPurgeHandles
+
+ @Description	Purge handles for a given handle base
+
+ @Input 	psBase - pointer to handle base structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVPurgeHandles: Missing handle base"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	LockHandle(psBase);
+
+	eError = gpsHandleFuncs->pfnPurgeHandles(psBase->psImplBase);
+
+	UnlockHandle(psBase);
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVAllocHandleBase
+
+ @Description	Allocate a handle base structure for a process
+
+ @Input 	ppsBase - pointer to handle base structure pointer
+
+ @Output	ppsBase - points to handle base structure pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase,
+                                   PVRSRV_HANDLE_BASE_TYPE eType)
+{
+	PVRSRV_HANDLE_BASE *psBase;
+	PVRSRV_ERROR eError;
+
+	if (gpsHandleFuncs == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Handle management not initialised",
+			 __func__));
+		return PVRSRV_ERROR_NOT_READY;
+	}
+
+	if (ppsBase == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psBase = OSAllocZMem(sizeof(*psBase));
+	if (psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Couldn't allocate handle base",
+			 __func__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	eError = OSLockCreate(&psBase->hLock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Creation of base handle lock failed (%s)",
+			 __func__,
+			 PVRSRVGetErrorString(eError)));
+		goto ErrorFreeHandleBase;
+	}
+
+	psBase->eType = eType;
+
+	LockHandle(psBase);
+
+	eError = gpsHandleFuncs->pfnCreateHandleBase(&psBase->psImplBase);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorUnlock;
+	}
+
+	psBase->psHashTab = HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE,
+						 sizeof(HAND_KEY),
+						 HASH_Func_Default,
+						 HASH_Key_Comp_Default);
+	if (psBase->psHashTab == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Couldn't create data pointer hash table",
+			 __func__));
+		eError = PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE;
+		goto ErrorDestroyHandleBase;
+	}
+
+	*ppsBase = psBase;
+
+	UnlockHandle(psBase);
+
+	return PVRSRV_OK;
+
+ErrorDestroyHandleBase:
+	(void)gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase);
+
+ErrorUnlock:
+	UnlockHandle(psBase);
+	OSLockDestroy(psBase->hLock);
+
+ErrorFreeHandleBase:
+	OSFreeMem(psBase);
+
+	return eError;
+}
+
+#if defined(DEBUG)
+typedef struct _COUNT_HANDLE_DATA_
+{
+	PVRSRV_HANDLE_BASE *psBase;
+	IMG_UINT32 uiHandleDataCount;
+} COUNT_HANDLE_DATA;
+
+/* Used to count the number of handles that have data associated with them */
+static PVRSRV_ERROR CountHandleDataWrapper(IMG_HANDLE hHandle, void *pvData)
+{
+	COUNT_HANDLE_DATA *psData = (COUNT_HANDLE_DATA *)pvData;
+	HANDLE_DATA *psHandleData = NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (psData == NULL ||
+	    psData->psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Missing free data", __func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eError = GetHandleData(psData->psBase,
+			       &psHandleData,
+			       hHandle,
+			       PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Couldn't get handle data for handle",
+			 __func__));
+		return eError;
+	}
+
+	if (psHandleData != NULL)
+	{
+		psData->uiHandleDataCount++;
+	}
+
+	return PVRSRV_OK;
+}
+
+/* Print a handle in the handle base. Used with the iterator callback. */
+static PVRSRV_ERROR ListHandlesInBase(IMG_HANDLE hHandle, void *pvData)
+{
+	PVRSRV_HANDLE_BASE *psBase = (PVRSRV_HANDLE_BASE*) pvData;
+	HANDLE_DATA *psHandleData = NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Missing base", __func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eError = GetHandleData(psBase,
+			       &psHandleData,
+			       hHandle,
+			       PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't get handle data for handle", __func__));
+		return eError;
+	}
+
+	if (psHandleData != NULL)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "    Handle: %6u, Refs: %3u, Type: %s (%u), pvData<%p>",
+				(IMG_UINT32) (uintptr_t) psHandleData->hHandle,
+				psHandleData->ui32RefCount,
+				HandleTypeToString(psHandleData->eType),
+				psHandleData->eType,
+				psHandleData->pvData));
+	}
+
+	return PVRSRV_OK;
+}
+
+
+
+#endif /* defined(DEBUG) */
+
+static INLINE IMG_BOOL _CheckIfMaxTimeExpired(IMG_UINT64 ui64TimeStart, IMG_UINT64 ui64MaxBridgeTime)
+{
+	IMG_UINT64 ui64Diff;
+	IMG_UINT64 ui64Now = OSClockns64();
+
+	if (ui64Now >= ui64TimeStart)
+	{
+		ui64Diff = ui64Now - ui64TimeStart;
+	}
+	else
+	{
+		/* time has wrapped around */
+		ui64Diff = (UINT64_MAX - ui64TimeStart) + ui64Now;
+	}
+
+	return ui64Diff >= ui64MaxBridgeTime;
+}
+
+static PVRSRV_ERROR FreeKernelHandlesWrapperIterKernel(IMG_HANDLE hHandle, void *pvData)
+{
+	FREE_KERNEL_HANDLE_DATA *psData = (FREE_KERNEL_HANDLE_DATA *)pvData;
+	HANDLE_DATA *psKernelHandleData = NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(gpsHandleFuncs);
+
+	/* Get kernel handle data. */
+	eError = GetHandleData(KERNEL_HANDLE_BASE,
+			    &psKernelHandleData,
+			    hHandle,
+			    PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "FreeKernelHandlesWrapperIterKernel: Couldn't get handle data for kernel handle"));
+		return eError;
+	}
+
+	if (psKernelHandleData->pvData == psData->psProcessHandleData->pvData)
+	{
+		/* This kernel handle belongs to our process handle. */
+		psData->hKernelHandle = hHandle;
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR FreeKernelHandlesWrapperIterProcess(IMG_HANDLE hHandle, void *pvData)
+{
+	FREE_KERNEL_HANDLE_DATA *psData = (FREE_KERNEL_HANDLE_DATA *)pvData;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(gpsHandleFuncs);
+
+	/* Get process handle data. */
+	eError = GetHandleData(psData->psBase,
+			    &psData->psProcessHandleData,
+			    hHandle,
+			    PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "FreeKernelHandlesWrapperIterProcess: Couldn't get handle data for process handle"));
+		return eError;
+	}
+
+	if (psData->psProcessHandleData->eFlag == PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+#if defined(SUPPORT_INSECURE_EXPORT)
+		|| psData->psProcessHandleData->eType == PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT
+#endif
+		)
+	{
+		/* Only multi alloc process handles might be in kernel handle base. */
+		psData->hKernelHandle = NULL;
+		/* Iterate over kernel handles. */
+		eError = gpsHandleFuncs->pfnIterateOverHandles(KERNEL_HANDLE_BASE->psImplBase,
+									&FreeKernelHandlesWrapperIterKernel,
+									(void *)psData);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "FreeKernelHandlesWrapperIterProcess: Failed to iterate over kernel handles"));
+			return eError;
+		}
+
+		if (psData->hKernelHandle)
+		{
+			/* Release kernel handle which belongs to our process handle. */
+			eError = gpsHandleFuncs->pfnReleaseHandle(KERNEL_HANDLE_BASE->psImplBase,
+						psData->hKernelHandle,
+						NULL);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "FreeKernelHandlesWrapperIterProcess: Couldn't release kernel handle"));
+				return eError;
+			}
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR FreeHandleDataWrapper(IMG_HANDLE hHandle, void *pvData)
+{
+	FREE_HANDLE_DATA *psData = (FREE_HANDLE_DATA *)pvData;
+	HANDLE_DATA *psHandleData = NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (psData == NULL ||
+	    psData->psBase == NULL ||
+	    psData->eHandleFreeType == PVRSRV_HANDLE_TYPE_NONE)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Missing free data", __func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eError = GetHandleData(psData->psBase,
+			       &psHandleData,
+			       hHandle,
+			       PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Couldn't get handle data for handle",
+			 __func__));
+		return eError;
+	}
+
+	if (psHandleData == NULL || psHandleData->eType != psData->eHandleFreeType)
+	{
+		return PVRSRV_OK;
+	}
+
+	PVR_ASSERT(psHandleData->ui32RefCount > 0);
+
+	while (psHandleData->ui32RefCount != 0)
+	{
+		if (psHandleData->pfnReleaseData != NULL)
+		{
+			eError = psHandleData->pfnReleaseData(psHandleData->pvData);
+			if (eError == PVRSRV_ERROR_RETRY)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE,
+					 "%s: "
+					 "Got retry while calling release data callback for %p (type = %d)",
+					 __func__,
+					 hHandle,
+					 (IMG_UINT32)psHandleData->eType));
+
+				return eError;
+			}
+			else if (eError != PVRSRV_OK)
+			{
+				return eError;
+			}
+		}
+
+		_HandleUnref(psHandleData);
+	}
+
+	if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+	{
+		HAND_KEY aKey;
+		IMG_HANDLE hRemovedHandle;
+
+		InitKey(aKey,
+			psData->psBase,
+			psHandleData->pvData,
+			psHandleData->eType,
+			ParentIfPrivate(psHandleData));
+
+		hRemovedHandle = (IMG_HANDLE)HASH_Remove_Extended(psData->psBase->psHashTab, aKey);
+
+		PVR_ASSERT(hRemovedHandle != NULL);
+		PVR_ASSERT(hRemovedHandle == psHandleData->hHandle);
+		PVR_UNREFERENCED_PARAMETER(hRemovedHandle);
+	}
+
+	eError = gpsHandleFuncs->pfnSetHandleData(psData->psBase->psImplBase, hHandle, NULL);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	OSFreeMem(psHandleData);
+
+	/* If we reach the end of the time slice release we can release the global
+	 * lock, invoke the scheduler and reacquire the lock */
+	if ((psData->ui64MaxBridgeTime != 0) && _CheckIfMaxTimeExpired(psData->ui64TimeStart, psData->ui64MaxBridgeTime))
+	{
+		PVR_DPF((PVR_DBG_MESSAGE,
+			 "%s: Lock timeout (timeout: %" IMG_UINT64_FMTSPEC")",
+			 __func__,
+			 psData->ui64MaxBridgeTime));
+		UnlockHandle(psData->psBase);
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSReleaseBridgeLock();
+#endif
+		/* Invoke the scheduler to check if other processes are waiting for the lock */
+		OSReleaseThreadQuanta();
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSAcquireBridgeLock();
+#endif
+		LockHandle(psData->psBase);
+		/* Set again lock timeout and reset the counter */
+		psData->ui64TimeStart = OSClockns64();
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: Lock acquired again", __func__));
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_HANDLE_TYPE g_aeOrderedFreeList[] =
+{
+	PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+	PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+	PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+	PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP,
+	PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+	PVRSRV_HANDLE_TYPE_RGX_MEMORY_BLOCK,
+	PVRSRV_HANDLE_TYPE_RGX_POPULATION,
+	PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+	PVRSRV_HANDLE_TYPE_RGX_FWIF_RENDERTARGET,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT,
+	PVRSRV_HANDLE_TYPE_RI_HANDLE,
+	PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE,
+	PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+	PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+	PVRSRV_HANDLE_TYPE_SERVER_SYNC_EXPORT,
+	PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+	PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER,
+	PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT,
+	PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT,
+	PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+	PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_PAGELIST,
+	PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_SECURE_EXPORT,
+	PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+	PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+	PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+	PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+	PVRSRV_HANDLE_TYPE_DC_PIN_HANDLE,
+	PVRSRV_HANDLE_TYPE_DC_BUFFER,
+	PVRSRV_HANDLE_TYPE_DC_DISPLAY_CONTEXT,
+	PVRSRV_HANDLE_TYPE_DC_DEVICE,
+	PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+	PVRSRV_HANDLE_TYPE_MM_PLAT_CLEANUP
+};
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVFreeKernelHandles
+
+ @Description	Free kernel handles which belongs to process handles
+
+ @Input 	psBase - pointer to handle base structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFreeKernelHandles(PVRSRV_HANDLE_BASE *psBase)
+{
+	FREE_KERNEL_HANDLE_DATA sHandleData = { };
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(gpsHandleFuncs);
+
+	LockHandle(psBase);
+
+	sHandleData.psBase = psBase;
+	/* Iterate over process handles. */
+	eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+								&FreeKernelHandlesWrapperIterProcess,
+								(void *)&sHandleData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVFreeKernelHandles: Failed to iterate over handles (%s)",
+			 PVRSRVGetErrorString(eError)));
+		goto ExitUnlock;
+	}
+
+	eError = PVRSRV_OK;
+
+ExitUnlock:
+	UnlockHandle(psBase);
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVRetrieveProcessHandleBase
+
+ @Description	Returns a pointer to the process handle base for the current
+                process. If the current process is the cleanup thread, then
+                the process handle base for the process currently being
+                cleaned up is returned
+
+ @Return	Pointer to the process handle base, or NULL if not found.
+
+******************************************************************************/
+PVRSRV_HANDLE_BASE *PVRSRVRetrieveProcessHandleBase(void)
+{
+	PVRSRV_HANDLE_BASE *psHandleBase = NULL;
+	PROCESS_HANDLE_BASE *psProcHandleBase = NULL;
+	PVRSRV_DATA *psPvrData = PVRSRVGetPVRSRVData();
+	IMG_PID ui32PurgePid = PVRSRVGetPurgeConnectionPid();
+
+	OSLockAcquire(psPvrData->hProcessHandleBase_Lock);
+
+	/* Check to see if we're being called from the cleanup thread... */
+	if ((OSGetCurrentClientProcessIDKM() == psPvrData->cleanupThreadPid) &&
+		(ui32PurgePid > 0))
+	{
+		/* Check to see if the cleanup thread has already removed the
+		 * process handle base from the HASH table.
+		 */
+		psHandleBase = psPvrData->psProcessHandleBaseBeingFreed;
+		/* psHandleBase shouldn't be null, as cleanup thread
+		 * should be removing this from the HASH table before
+		 * we get here, so assert if not.
+		 */
+		PVR_ASSERT(psHandleBase);
+	}
+	else
+	{
+		/* Not being called from the cleanup thread, so return the process
+		 * handle base for the current process.
+		 */
+		psProcHandleBase = (PROCESS_HANDLE_BASE*) HASH_Retrieve(psPvrData->psProcessHandleBase_Table,
+																OSGetCurrentClientProcessIDKM());
+	}
+	OSLockRelease(psPvrData->hProcessHandleBase_Lock);
+
+	if (psHandleBase == NULL && psProcHandleBase != NULL)
+	{
+		psHandleBase = psProcHandleBase->psHandleBase;
+	}
+	return psHandleBase;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVFreeHandleBase
+
+ @Description	Free a handle base structure
+
+ @Input 	psBase - pointer to handle base structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime)
+{
+#if defined(DEBUG)
+	COUNT_HANDLE_DATA sCountData = { };
+#endif
+	FREE_HANDLE_DATA sHandleData = { };
+	IMG_UINT32 i;
+	PVRSRV_ERROR eError;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	IMG_PID uiCleanupPid = psPVRSRVData->cleanupThreadPid;
+
+	PVR_ASSERT(gpsHandleFuncs);
+
+	LockHandle(psBase);
+
+	/* If this is a process handle base being freed by the cleanup
+	 * thread, store this in psPVRSRVData->psProcessHandleBaseBeingFreed
+	 */
+	if ((OSGetCurrentClientProcessIDKM() == uiCleanupPid) &&
+	    (psBase->eType == PVRSRV_HANDLE_BASE_TYPE_PROCESS))
+	{
+		psPVRSRVData->psProcessHandleBaseBeingFreed = psBase;
+	}
+
+	sHandleData.psBase = psBase;
+	sHandleData.ui64TimeStart = OSClockns64();
+	sHandleData.ui64MaxBridgeTime = ui64MaxBridgeTime;
+
+
+#if defined(DEBUG)
+
+	sCountData.psBase = psBase;
+
+	eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+						       &CountHandleDataWrapper,
+						       (void *)&sCountData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Failed to perform handle count (%s)",
+			 __func__,
+			 PVRSRVGetErrorString(eError)));
+		goto ExitUnlock;
+	}
+
+	if (sCountData.uiHandleDataCount != 0)
+	{
+		IMG_BOOL bList = sCountData.uiHandleDataCount < HANDLE_DEBUG_LISTING_MAX_NUM;
+
+		PVR_DPF((PVR_DBG_WARNING,
+			 "%s: %u remaining handles in handle base 0x%p "
+			 "(PVRSRV_HANDLE_BASE_TYPE %u).%s",
+			 __func__,
+			 sCountData.uiHandleDataCount,
+			 psBase,
+			 psBase->eType,
+			 bList ? "": " Skipping details, too many items..."));
+
+		if (bList)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "-------- Listing Handles --------"));
+			(void) gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+			                                             &ListHandlesInBase,
+			                                             psBase);
+			PVR_DPF((PVR_DBG_WARNING, "-------- Done Listing    --------"));
+		}
+	}
+
+#endif /* defined(DEBUG) */
+
+	/*
+	 * As we're freeing handles based on type, make sure all
+	 * handles have actually had their data freed to avoid
+	 * resources being leaked
+	 */
+	for (i = 0; i < ARRAY_SIZE(g_aeOrderedFreeList); i++)
+	{
+		sHandleData.eHandleFreeType = g_aeOrderedFreeList[i];
+
+		/* Make sure all handles have been freed before destroying the handle base */
+		eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+							       &FreeHandleDataWrapper,
+							       (void *)&sHandleData);
+		if (eError != PVRSRV_OK)
+		{
+			goto ExitUnlock;
+		}
+	}
+
+
+	if (psBase->psHashTab != NULL)
+	{
+		HASH_Delete(psBase->psHashTab);
+	}
+
+	eError = gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase);
+	if (eError != PVRSRV_OK)
+	{
+		goto ExitUnlock;
+	}
+
+	UnlockHandle(psBase);
+	OSLockDestroy(psBase->hLock);
+	OSFreeMem(psBase);
+
+	return eError;
+
+ExitUnlock:
+	if (OSGetCurrentClientProcessIDKM() == uiCleanupPid)
+	{
+		psPVRSRVData->psProcessHandleBaseBeingFreed = NULL;
+	}
+	UnlockHandle(psBase);
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVHandleInit
+
+ @Description	Initialise handle management
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVHandleInit(void)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(gpsKernelHandleBase == NULL);
+	PVR_ASSERT(gpsHandleFuncs == NULL);
+	PVR_ASSERT(!gbLockInitialised);
+
+	eError = OSLockCreate(&gKernelHandleLock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Creation of handle global lock failed (%s)",
+			 __func__,
+			 PVRSRVGetErrorString(eError)));
+		return eError;
+	}
+	gbLockInitialised = IMG_TRUE;
+
+	eError = PVRSRVHandleGetFuncTable(&gpsHandleFuncs);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: PVRSRVHandleGetFuncTable failed (%s)",
+			 __func__,
+			 PVRSRVGetErrorString(eError)));
+		goto ErrorHandleDeinit;
+	}
+
+	eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase,
+	                               PVRSRV_HANDLE_BASE_TYPE_GLOBAL);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: PVRSRVAllocHandleBase failed (%s)",
+			 __func__,
+			 PVRSRVGetErrorString(eError)));
+		goto ErrorHandleDeinit;
+	}
+
+	eError = gpsHandleFuncs->pfnEnableHandlePurging(gpsKernelHandleBase->psImplBase);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: PVRSRVEnableHandlePurging failed (%s)",
+			 __func__,
+			 PVRSRVGetErrorString(eError)));
+		goto ErrorHandleDeinit;
+	}
+
+	return PVRSRV_OK;
+
+ErrorHandleDeinit:
+	(void) PVRSRVHandleDeInit();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVHandleDeInit
+
+ @Description	De-initialise handle management
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVHandleDeInit(void)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (gpsHandleFuncs != NULL)
+	{
+		if (gpsKernelHandleBase != NULL)
+		{
+			eError = PVRSRVFreeHandleBase(gpsKernelHandleBase, 0 /* do not release bridge lock */);
+			if (eError == PVRSRV_OK)
+			{
+				gpsKernelHandleBase = NULL;
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+					 "PVRSRVHandleDeInit: FreeHandleBase failed (%s)",
+					 PVRSRVGetErrorString(eError)));
+			}
+		}
+
+		if (eError == PVRSRV_OK)
+		{
+			gpsHandleFuncs = NULL;
+		}
+	}
+	else
+	{
+		/* If we don't have a handle function table we shouldn't have a handle base either */
+		PVR_ASSERT(gpsKernelHandleBase == NULL);
+	}
+
+	if (gbLockInitialised)
+	{
+		OSLockDestroy(gKernelHandleLock);
+		gbLockInitialised = IMG_FALSE;
+	}
+
+	return eError;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/handle.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/handle.h
new file mode 100644
index 0000000..a668e11
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/handle.h
@@ -0,0 +1,202 @@
+/**************************************************************************/ /*!
+@File
+@Title          Handle Manager API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provide handle management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__HANDLE_H__)
+#define __HANDLE_H__
+
+#include "lock_types.h"
+
+/*
+ * Handle API
+ * ----------
+ * The handle API is intended to provide handles for kernel resources,
+ * which can then be passed back to user space processes.
+ *
+ * The following functions comprise the API.  Each function takes a
+ * pointer to a PVRSRV_HANDLE_BASE strcture, one of which is allocated
+ * for each process, and stored in the per-process data area.  Use
+ * KERNEL_HANDLE_BASE for handles not allocated for a particular process,
+ * or for handles that need to be allocated before the PVRSRV_HANDLE_BASE
+ * structure for the process is available.
+ *
+ * PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType,
+ * 	PVRSRV_HANDLE_ALLOC_FLAG eFlag);
+ *
+ * Allocate a handle phHandle, for the resource of type eType pointed to by
+ * pvData.
+ *
+ * For handles that have a definite lifetime, where the corresponding
+ * resource is explicitly created and destroyed, eFlag should be zero.
+ *
+ * If a particular resource may be referenced multiple times by a
+ * given process, setting eFlag to PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ * will allow multiple handles to be allocated for the resource.
+ * Such handles cannot be found with PVRSRVFindHandle.
+ *
+ * PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType,
+ * 	PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
+ *
+ * This function is similar to PVRSRVAllocHandle, except that the allocated
+ * handles are associated with a parent handle, hParent, that has been
+ * allocated previously.  Subhandles are automatically deallocated when their
+ * parent handle is deallocated.
+ * Subhandles can be treated as ordinary handles.  For example, they may
+ * have subhandles of their own, and may be explicity deallocated using
+ * PVRSRVReleaseHandle (see below).
+ *
+ * PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Find the handle previously allocated for the resource pointed to by
+ * pvData, of type eType.  Handles allocated with the flag
+ * PVRSRV_HANDLE_ALLOC_FLAG_MULTI cannot be found using this
+ * function.
+ *
+ * PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Given a handle for a resource of type eType, return the pointer to the
+ * resource.
+ *
+ * PVRSRV_ERROR PVRSRVLookuSubHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType,
+ * 	IMH_HANDLE hAncestor);
+ *
+ * Similar to PVRSRVLookupHandle, but checks the handle is a descendant
+ * of hAncestor.
+ *
+ * PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Deallocate a handle of given type.
+ *
+ * Return the parent of a handle in *phParent, or NULL if the handle has
+ * no parent.
+ */
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "hash.h"
+
+typedef enum
+{
+	#define HANDLETYPE(x) PVRSRV_HANDLE_TYPE_##x,
+	#include "handle_types.h"
+	#undef HANDLETYPE
+} PVRSRV_HANDLE_TYPE;
+
+static_assert(PVRSRV_HANDLE_TYPE_NONE == 0, "PVRSRV_HANDLE_TYPE_NONE must be zero");
+
+typedef enum
+{
+	PVRSRV_HANDLE_BASE_TYPE_CONNECTION,
+	PVRSRV_HANDLE_BASE_TYPE_PROCESS,
+	PVRSRV_HANDLE_BASE_TYPE_GLOBAL
+} PVRSRV_HANDLE_BASE_TYPE;
+
+
+typedef enum
+{
+	/* No flags */
+	PVRSRV_HANDLE_ALLOC_FLAG_NONE = 		0,
+	/* Muliple handles can point at the given data pointer */
+	PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 		0x01,
+	/* Subhandles are allocated in a private handle space */
+	PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 		0x02
+} PVRSRV_HANDLE_ALLOC_FLAG;
+
+typedef struct _HANDLE_BASE_ PVRSRV_HANDLE_BASE;
+
+typedef struct _PROCESS_HANDLE_BASE_
+{
+	PVRSRV_HANDLE_BASE *psHandleBase;
+	ATOMIC_T iRefCount;
+
+} PROCESS_HANDLE_BASE;
+
+extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase;
+#define	KERNEL_HANDLE_BASE (gpsKernelHandleBase)
+
+#define HANDLE_DEBUG_LISTING_MAX_NUM 20
+
+typedef PVRSRV_ERROR (*PFN_HANDLE_RELEASE)(void *pvData);
+
+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, PFN_HANDLE_RELEASE pfnReleaseData);
+PVRSRV_ERROR PVRSRVAllocHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, PFN_HANDLE_RELEASE pfnReleaseData);
+
+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
+PVRSRV_ERROR PVRSRVAllocSubHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
+
+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType);
+PVRSRV_ERROR PVRSRVFindHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_BOOL bRef);
+PVRSRV_ERROR PVRSRVLookupHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_BOOL bRef);
+
+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor);
+
+PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+PVRSRV_ERROR PVRSRVReleaseHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase);
+
+PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase,
+                                   PVRSRV_HANDLE_BASE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime);
+
+PVRSRV_ERROR PVRSRVFreeKernelHandles(PVRSRV_HANDLE_BASE *psBase);
+
+PVRSRV_ERROR PVRSRVHandleInit(void);
+
+PVRSRV_ERROR PVRSRVHandleDeInit(void);
+
+PVRSRV_HANDLE_BASE *PVRSRVRetrieveProcessHandleBase(void);
+
+void LockHandle(PVRSRV_HANDLE_BASE *psBase);
+void UnlockHandle(PVRSRV_HANDLE_BASE *psBase);
+
+
+#endif /* !defined(__HANDLE_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/handle_idr.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/handle_idr.c
new file mode 100644
index 0000000..915abaa
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/handle_idr.c
@@ -0,0 +1,440 @@
+/*************************************************************************/ /*!
+@File
+@Title		Resource Handle Manager - IDR Back-end
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Provide IDR based resource handle management back-end
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/idr.h>
+
+#include "handle_impl.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+#define ID_VALUE_MIN	1
+#define ID_VALUE_MAX	INT_MAX
+
+#define	ID_TO_HANDLE(i) ((IMG_HANDLE)(uintptr_t)(i))
+#define	HANDLE_TO_ID(h) ((IMG_INT)(uintptr_t)(h))
+
+struct _HANDLE_IMPL_BASE_
+{
+	struct idr sIdr;
+
+	IMG_UINT32 ui32MaxHandleValue;
+
+	IMG_UINT32 ui32TotalHandCount;
+};
+
+typedef struct _HANDLE_ITER_DATA_WRAPPER_
+{
+	PFN_HANDLE_ITER pfnHandleIter;
+	void *pvHandleIterData;
+} HANDLE_ITER_DATA_WRAPPER;
+
+
+static int HandleIterFuncWrapper(int id, void *data, void *iter_data)
+{
+	HANDLE_ITER_DATA_WRAPPER *psIterData = (HANDLE_ITER_DATA_WRAPPER *)iter_data;
+
+	PVR_UNREFERENCED_PARAMETER(data);
+
+	return (int)psIterData->pfnHandleIter(ID_TO_HANDLE(id), psIterData->pvHandleIterData);
+}
+
+/*!
+******************************************************************************
+
+ @Function	AcquireHandle
+
+ @Description	Acquire a new handle
+
+ @Input		psBase - Pointer to handle base structure
+		phHandle - Points to a handle pointer
+		pvData - Pointer to resource to be associated with the handle
+
+ @Output	phHandle - Points to a handle pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR AcquireHandle(HANDLE_IMPL_BASE *psBase,
+				  IMG_HANDLE *phHandle,
+				  void *pvData)
+{
+	int id;
+	int result;
+
+	PVR_ASSERT(psBase != NULL);
+	PVR_ASSERT(phHandle != NULL);
+	PVR_ASSERT(pvData != NULL);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+	idr_preload(GFP_KERNEL);
+	id = idr_alloc(&psBase->sIdr, pvData, ID_VALUE_MIN, psBase->ui32MaxHandleValue + 1, 0);
+	idr_preload_end();
+
+	result = id;
+#else
+	do
+	{
+		if (idr_pre_get(&psBase->sIdr, GFP_KERNEL) == 0)
+		{
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+
+		result = idr_get_new_above(&psBase->sIdr, pvData, ID_VALUE_MIN, &id);
+	} while (result == -EAGAIN);
+
+	if ((IMG_UINT32)id > psBase->ui32MaxHandleValue)
+	{
+		idr_remove(&psBase->sIdr, id);
+		result = -ENOSPC;
+	}
+#endif
+
+	if (result < 0)
+	{
+		if (result == -ENOSPC)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Limit of %u handles reached",
+				 __func__, psBase->ui32MaxHandleValue));
+
+			return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+		}
+
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psBase->ui32TotalHandCount++;
+
+	*phHandle = ID_TO_HANDLE(id);
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	ReleaseHandle
+
+ @Description	Release a handle that is no longer needed.
+
+ @Input		psBase - Pointer to handle base structure
+		hHandle - Handle to release
+		ppvData - Points to a void data pointer
+
+ @Output	ppvData - Points to a void data pointer
+
+ @Return	PVRSRV_OK or PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR ReleaseHandle(HANDLE_IMPL_BASE *psBase,
+				  IMG_HANDLE hHandle,
+				  void **ppvData)
+{
+	int id = HANDLE_TO_ID(hHandle);
+	void *pvData;
+
+	PVR_ASSERT(psBase);
+
+	/* Get the data associated with the handle. If we get back NULL then
+	   it's an invalid handle */
+
+	pvData = idr_find(&psBase->sIdr, id);
+	if (likely(pvData))
+	{
+		idr_remove(&psBase->sIdr, id);
+		psBase->ui32TotalHandCount--;
+	}
+
+	if (unlikely(pvData == NULL))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Handle out of range (%u > %u)",
+			 __func__, id, psBase->ui32TotalHandCount));
+		return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+	}
+
+	if (ppvData)
+	{
+		*ppvData = pvData;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	GetHandleData
+
+ @Description	Get the data associated with the given handle
+
+ @Input		psBase - Pointer to handle base structure
+		hHandle - Handle from which data should be retrieved
+                ppvData - Points to a void data pointer
+
+ @Output	ppvData - Points to a void data pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR GetHandleData(HANDLE_IMPL_BASE *psBase,
+				  IMG_HANDLE hHandle,
+				  void **ppvData)
+{
+	int id = HANDLE_TO_ID(hHandle);
+	void *pvData;
+
+	PVR_ASSERT(psBase);
+	PVR_ASSERT(ppvData);
+
+	pvData = idr_find(&psBase->sIdr, id);
+	if (likely(pvData))
+	{
+		*ppvData = pvData;
+
+		return PVRSRV_OK;
+	}
+	else
+	{
+		return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+	}
+}
+
+/*!
+******************************************************************************
+
+ @Function	SetHandleData
+
+ @Description	Set the data associated with the given handle
+
+ @Input		psBase - Pointer to handle base structure
+		hHandle - Handle for which data should be changed
+		pvData - Pointer to new data to be associated with the handle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR SetHandleData(HANDLE_IMPL_BASE *psBase,
+				  IMG_HANDLE hHandle,
+				  void *pvData)
+{
+	int id = HANDLE_TO_ID(hHandle);
+	void *pvOldData;
+
+	PVR_ASSERT(psBase);
+
+	pvOldData = idr_replace(&psBase->sIdr, pvData, id);
+	if (IS_ERR(pvOldData))
+	{
+		if (PTR_ERR(pvOldData) == -ENOENT)
+		{
+			return PVRSRV_ERROR_HANDLE_NOT_ALLOCATED;
+		}
+		else
+		{
+			return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR IterateOverHandles(HANDLE_IMPL_BASE *psBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData)
+{
+	HANDLE_ITER_DATA_WRAPPER sIterData;
+
+	PVR_ASSERT(psBase);
+	PVR_ASSERT(pfnHandleIter);
+
+	sIterData.pfnHandleIter = pfnHandleIter;
+	sIterData.pvHandleIterData = pvHandleIterData;
+
+	return (PVRSRV_ERROR)idr_for_each(&psBase->sIdr, HandleIterFuncWrapper, &sIterData);
+}
+
+/*!
+******************************************************************************
+
+ @Function	EnableHandlePurging
+
+ @Description	Enable purging for a given handle base
+
+ @Input 	psBase - pointer to handle base structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR EnableHandlePurging(HANDLE_IMPL_BASE *psBase)
+{
+	PVR_UNREFERENCED_PARAMETER(psBase);
+	PVR_ASSERT(psBase);
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PurgeHandles
+
+ @Description	Purge handles for a given handle base
+
+ @Input 	psBase - Pointer to handle base structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR PurgeHandles(HANDLE_IMPL_BASE *psBase)
+{
+	PVR_UNREFERENCED_PARAMETER(psBase);
+	PVR_ASSERT(psBase);
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	CreateHandleBase
+
+ @Description	Create a handle base structure
+
+ @Input 	ppsBase - pointer to handle base structure pointer
+
+ @Output	ppsBase - points to handle base structure pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR CreateHandleBase(HANDLE_IMPL_BASE **ppsBase)
+{
+	HANDLE_IMPL_BASE *psBase;
+
+	PVR_ASSERT(ppsBase);
+
+	psBase = OSAllocZMem(sizeof(*psBase));
+	if (psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't allocate generic handle base",
+				 __func__));
+
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	idr_init(&psBase->sIdr);
+
+	psBase->ui32MaxHandleValue = ID_VALUE_MAX;
+	psBase->ui32TotalHandCount = 0;
+
+	*ppsBase = psBase;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	DestroyHandleBase
+
+ @Description	Destroy a handle base structure
+
+ @Input 	psBase - pointer to handle base structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR DestroyHandleBase(HANDLE_IMPL_BASE *psBase)
+{
+	PVR_ASSERT(psBase);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
+	idr_remove_all(&psBase->sIdr);
+#endif
+
+	/* Finally destroy the idr */
+	idr_destroy(&psBase->sIdr);
+
+	OSFreeMem(psBase);
+
+	return PVRSRV_OK;
+}
+
+
+static const HANDLE_IMPL_FUNCTAB g_sHandleFuncTab =
+{
+	.pfnAcquireHandle = AcquireHandle,
+	.pfnReleaseHandle = ReleaseHandle,
+	.pfnGetHandleData = GetHandleData,
+	.pfnSetHandleData = SetHandleData,
+	.pfnIterateOverHandles = IterateOverHandles,
+	.pfnEnableHandlePurging = EnableHandlePurging,
+	.pfnPurgeHandles = PurgeHandles,
+	.pfnCreateHandleBase = CreateHandleBase,
+	.pfnDestroyHandleBase = DestroyHandleBase
+};
+
+PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs)
+{
+	static IMG_BOOL bAcquired = IMG_FALSE;
+
+	if (bAcquired)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Function table already acquired",
+			 __func__));
+		return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+	}
+
+	if (ppsFuncs == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*ppsFuncs = &g_sHandleFuncTab;
+
+	bAcquired = IMG_TRUE;
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/handle_impl.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/handle_impl.h
new file mode 100644
index 0000000..ad35236
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/handle_impl.h
@@ -0,0 +1,89 @@
+/**************************************************************************/ /*!
+@File
+@Title          Implementation Callbacks for Handle Manager API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the handle manager API. This file is for declarations
+                and definitions that are private/internal to the handle manager
+                API but need to be shared between the generic handle manager
+                code and the various handle manager backends, i.e. the code that
+                implements the various callbacks.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__HANDLE_IMPL_H__)
+#define __HANDLE_IMPL_H__
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+typedef struct _HANDLE_IMPL_BASE_ HANDLE_IMPL_BASE;
+
+typedef PVRSRV_ERROR (*PFN_HANDLE_ITER)(IMG_HANDLE hHandle, void *pvData);
+
+typedef struct _HANDLE_IMPL_FUNCTAB_
+{
+	/* Acquire a new handle which is associated with the given data */
+	PVRSRV_ERROR (*pfnAcquireHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE *phHandle, void *pvData);
+
+	/* Release the given handle (optionally returning the data associated with it) */
+	PVRSRV_ERROR (*pfnReleaseHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData);
+
+	/* Get the data associated with the given handle */
+	PVRSRV_ERROR (*pfnGetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData);
+
+	/* Set the data associated with the given handle */
+	PVRSRV_ERROR (*pfnSetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void *pvData);
+
+	PVRSRV_ERROR (*pfnIterateOverHandles)(HANDLE_IMPL_BASE *psHandleBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData);
+
+	/* Enable handle purging on the given handle base */
+	PVRSRV_ERROR (*pfnEnableHandlePurging)(HANDLE_IMPL_BASE *psHandleBase);
+
+	/* Purge handles on the given handle base */
+	PVRSRV_ERROR (*pfnPurgeHandles)(HANDLE_IMPL_BASE *psHandleBase);
+
+	/* Create handle base */
+	PVRSRV_ERROR (*pfnCreateHandleBase)(HANDLE_IMPL_BASE **psHandleBase);
+
+	/* Destroy handle base */
+	PVRSRV_ERROR (*pfnDestroyHandleBase)(HANDLE_IMPL_BASE *psHandleBase);
+} HANDLE_IMPL_FUNCTAB;
+
+PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs);
+
+#endif /* !defined(__HANDLE_IMPL_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/handle_types.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/handle_types.h
new file mode 100644
index 0000000..734234e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/handle_types.h
@@ -0,0 +1,87 @@
+/**************************************************************************/ /*!
+@File
+@Title          Handle Manager handle types
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provide handle management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+/* NOTE: Do not add include guards to this file */
+
+HANDLETYPE(NONE)
+HANDLETYPE(SHARED_EVENT_OBJECT)
+HANDLETYPE(EVENT_OBJECT_CONNECT)
+HANDLETYPE(PMR_LOCAL_EXPORT_HANDLE)
+HANDLETYPE(PHYSMEM_PMR)
+HANDLETYPE(PHYSMEM_PMR_EXPORT)
+HANDLETYPE(PHYSMEM_PMR_SECURE_EXPORT)
+HANDLETYPE(DEVMEMINT_CTX)
+HANDLETYPE(DEVMEMINT_CTX_EXPORT)
+HANDLETYPE(DEVMEMINT_HEAP)
+HANDLETYPE(DEVMEMINT_RESERVATION)
+HANDLETYPE(DEVMEMINT_MAPPING)
+HANDLETYPE(RGX_FW_MEMDESC)
+HANDLETYPE(RGX_RTDATA_CLEANUP)
+HANDLETYPE(RGX_FREELIST)
+HANDLETYPE(RGX_MEMORY_BLOCK)
+HANDLETYPE(RGX_SERVER_RENDER_CONTEXT)
+HANDLETYPE(RGX_SERVER_TQ_CONTEXT)
+HANDLETYPE(RGX_SERVER_TQ_TDM_CONTEXT)
+HANDLETYPE(RGX_SERVER_COMPUTE_CONTEXT)
+HANDLETYPE(RGX_SERVER_KICKSYNC_CONTEXT)
+HANDLETYPE(SYNC_PRIMITIVE_BLOCK)
+HANDLETYPE(SERVER_SYNC_PRIMITIVE)
+HANDLETYPE(SERVER_SYNC_EXPORT)
+HANDLETYPE(SERVER_OP_COOKIE)
+HANDLETYPE(SYNC_RECORD_HANDLE)
+HANDLETYPE(PVRSRV_TIMELINE_SERVER)
+HANDLETYPE(PVRSRV_FENCE_SERVER)
+HANDLETYPE(PVRSRV_FENCE_EXPORT)
+HANDLETYPE(RGX_FWIF_RENDERTARGET)
+HANDLETYPE(RGX_FWIF_ZSBUFFER)
+HANDLETYPE(RGX_POPULATION)
+HANDLETYPE(DC_DEVICE)
+HANDLETYPE(DC_DISPLAY_CONTEXT)
+HANDLETYPE(DC_BUFFER)
+HANDLETYPE(DC_PIN_HANDLE)
+HANDLETYPE(DEVMEM_MEM_IMPORT)
+HANDLETYPE(PHYSMEM_PMR_PAGELIST)
+HANDLETYPE(PVR_TL_SD)
+HANDLETYPE(RI_HANDLE)
+HANDLETYPE(DEV_PRIV_DATA)
+HANDLETYPE(MM_PLAT_CLEANUP)
+HANDLETYPE(WORKEST_RETURN_DATA)
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/hash.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/hash.c
new file mode 100644
index 0000000..9b074e5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/hash.c
@@ -0,0 +1,683 @@
+/*************************************************************************/ /*!
+@File
+@Title          Self scaling hash tables.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+   Implements simple self scaling hash tables. Hash collisions are
+   handled by chaining entries together. Hash tables are increased in
+   size when they become more than (50%?) full and decreased in size
+   when less than (25%?) full. Hash tables are never decreased below
+   their initial size.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* include/ */
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+/* services/shared/include/ */
+#include "hash.h"
+
+/* services/client/include/ or services/server/include/ */
+#include "osfunc.h"
+#include "allocmem.h"
+
+//#define PERF_DBG_RESIZE
+#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE)
+#include <sys/time.h>
+#endif
+
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#endif
+
+#define	KEY_TO_INDEX(pHash, key, uSize) \
+	((pHash)->pfnHashFunc((pHash)->uKeySize, (key), (uSize)) % (uSize))
+
+#define	KEY_COMPARE(pHash, pKey1, pKey2) \
+	((pHash)->pfnKeyComp((pHash)->uKeySize, (pKey1), (pKey2)))
+
+#if defined(__linux__) && defined(__KERNEL__)
+#define _AllocMem OSAllocMemNoStats
+#define _AllocZMem OSAllocZMemNoStats
+#define _FreeMem OSFreeMemNoStats
+#else
+#define _AllocMem OSAllocMem
+#define _AllocZMem OSAllocZMem
+#define _FreeMem OSFreeMem
+#endif
+
+#define NO_SHRINK 0
+
+/* Each entry in a hash table is placed into a bucket */
+typedef struct _BUCKET_
+{
+	struct _BUCKET_ *pNext; /*!< the next bucket on the same chain */
+	uintptr_t v;            /*!< entry value */
+	uintptr_t k[];          /* PRQA S 0642 */
+	                        /* override dynamic array declaration warning */
+} BUCKET;
+
+struct _HASH_TABLE_
+{
+	IMG_UINT32 uSize;            /*!< current size of the hash table */
+	IMG_UINT32 uCount;           /*!< number of entries currently in the hash table */
+	IMG_UINT32 uMinimumSize;     /*!< the minimum size that the hash table should be re-sized to */
+	IMG_UINT32 uKeySize;         /*!< size of key in bytes */
+	IMG_UINT32 uShrinkThreshold; /*!< The threshold at which to trigger a shrink */
+	IMG_UINT32 uGrowThreshold;   /*!< The threshold at which to trigger a grow */
+	HASH_FUNC*     pfnHashFunc;  /*!< hash function */
+	HASH_KEY_COMP* pfnKeyComp;   /*!< key comparison function */
+	BUCKET**   ppBucketTable;    /*!< the hash table array */
+};
+
+/*************************************************************************/ /*!
+@Function       HASH_Func_Default
+@Description    Hash function intended for hashing keys composed of
+                uintptr_t arrays.
+@Input          uKeySize     The size of the hash key, in bytes.
+@Input          pKey         A pointer to the key to hash.
+@Input          uHashTabLen  The length of the hash table.
+@Return         The hash value.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_UINT32
+HASH_Func_Default (size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen)
+{
+	uintptr_t *p = (uintptr_t *)pKey;
+	IMG_UINT32 uKeyLen = uKeySize / sizeof(uintptr_t);
+	IMG_UINT32 ui;
+	IMG_UINT32 uHashKey = 0;
+
+	PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+
+	PVR_ASSERT((uKeySize % sizeof(uintptr_t)) == 0);
+
+	for (ui = 0; ui < uKeyLen; ui++)
+	{
+		IMG_UINT32 uHashPart = (IMG_UINT32)*p++;
+
+		uHashPart += (uHashPart << 12);
+		uHashPart ^= (uHashPart >> 22);
+		uHashPart += (uHashPart << 4);
+		uHashPart ^= (uHashPart >> 9);
+		uHashPart += (uHashPart << 10);
+		uHashPart ^= (uHashPart >> 2);
+		uHashPart += (uHashPart << 7);
+		uHashPart ^= (uHashPart >> 12);
+
+		uHashKey += uHashPart;
+	}
+
+	return uHashKey;
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Key_Comp_Default
+@Description    Compares keys composed of uintptr_t arrays.
+@Input          uKeySize    The size of the hash key, in bytes.
+@Input          pKey1       Pointer to first hash key to compare.
+@Input          pKey2       Pointer to second hash key to compare.
+@Return         IMG_TRUE    The keys match.
+                IMG_FALSE   The keys don't match.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+HASH_Key_Comp_Default (size_t uKeySize, void *pKey1, void *pKey2)
+{
+	uintptr_t *p1 = (uintptr_t *)pKey1;
+	uintptr_t *p2 = (uintptr_t *)pKey2;
+	IMG_UINT32 uKeyLen = uKeySize / sizeof(uintptr_t);
+	IMG_UINT32 ui;
+
+	PVR_ASSERT((uKeySize % sizeof(uintptr_t)) == 0);
+
+	for (ui = 0; ui < uKeyLen; ui++)
+	{
+		if (*p1++ != *p2++)
+			return IMG_FALSE;
+	}
+
+	return IMG_TRUE;
+}
+
+/*************************************************************************/ /*!
+@Function       _ChainInsert
+@Description    Insert a bucket into the appropriate hash table chain.
+@Input          pBucket       The bucket
+@Input          ppBucketTable The hash table
+@Input          uSize         The size of the hash table
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static void
+_ChainInsert (HASH_TABLE *pHash, BUCKET *pBucket, BUCKET **ppBucketTable, IMG_UINT32 uSize)
+{
+	IMG_UINT32 uIndex;
+
+	/* We assume that all parameters passed by the caller are valid. */
+	PVR_ASSERT (pBucket != NULL);
+	PVR_ASSERT (ppBucketTable != NULL);
+	PVR_ASSERT (uSize != 0);
+
+	uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize);	/* PRQA S 0432,0541 */ /* ignore dynamic array warning */
+	pBucket->pNext = ppBucketTable[uIndex];
+	ppBucketTable[uIndex] = pBucket;
+}
+
+/*************************************************************************/ /*!
+@Function       _Rehash
+@Description    Iterate over every entry in an old hash table and
+                rehash into the new table.
+@Input          ppOldTable   The old hash table
+@Input          uOldSize     The size of the old hash table
+@Input          ppNewTable   The new hash table
+@Input          uNewSize     The size of the new hash table
+@Return         None
+*/ /**************************************************************************/
+static void
+_Rehash (HASH_TABLE *pHash,
+		 BUCKET **ppOldTable, IMG_UINT32 uOldSize,
+		 BUCKET **ppNewTable, IMG_UINT32 uNewSize)
+{
+	IMG_UINT32 uIndex;
+	for (uIndex=0; uIndex< uOldSize; uIndex++)
+	{
+		BUCKET *pBucket;
+		pBucket = ppOldTable[uIndex];
+		while (pBucket != NULL)
+		{
+			BUCKET *pNextBucket = pBucket->pNext;
+			_ChainInsert (pHash, pBucket, ppNewTable, uNewSize);
+			pBucket = pNextBucket;
+		}
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       _Resize
+@Description    Attempt to resize a hash table, failure to allocate a
+                new larger hash table is not considered a hard failure.
+                We simply continue and allow the table to fill up, the
+                effect is to allow hash chains to become longer.
+@Input          pHash      Hash table to resize.
+@Input          uNewSize   Required table size.
+@Return         IMG_TRUE Success
+                IMG_FALSE Failed
+*/ /**************************************************************************/
+static IMG_BOOL
+_Resize (HASH_TABLE *pHash, IMG_UINT32 uNewSize)
+{
+	BUCKET **ppNewTable;
+	IMG_UINT32 uiThreshold = uNewSize >> 2;
+#if !defined(__KERNEL__)  && defined(PERF_DBG_RESIZE)
+	struct timeval start, end;
+#endif
+
+	if (uNewSize == pHash->uSize)
+	{
+		return IMG_TRUE;
+	}
+
+#if !defined(__KERNEL__)  && defined(PERF_DBG_RESIZE)
+	gettimeofday(&start, NULL);
+#endif
+
+	ppNewTable = _AllocZMem(sizeof(BUCKET *) * uNewSize);
+	if (ppNewTable == NULL)
+	{
+		return IMG_FALSE;
+	}
+
+	_Rehash(pHash, pHash->ppBucketTable, pHash->uSize, ppNewTable, uNewSize);
+
+	_FreeMem(pHash->ppBucketTable);
+
+#if !defined(__KERNEL__)  && defined(PERF_DBG_RESIZE)
+	gettimeofday(&end, NULL);
+	if ( start.tv_usec > end.tv_usec )
+	{
+		end.tv_usec = 1000000 - start.tv_usec + end.tv_usec;
+	}
+	else
+	{
+		end.tv_usec -=  start.tv_usec;
+	}
+
+	PVR_DPF((PVR_DBG_ERROR, "%s: H:%p O:%d N:%d C:%d G:%d S:%d T:%06luus", __func__, pHash, pHash->uSize, uNewSize, pHash->uCount, pHash->uGrowThreshold, pHash->uShrinkThreshold, end.tv_usec));
+#endif
+
+	/*not nulling pointer, being reassigned just below*/
+	pHash->ppBucketTable = ppNewTable;
+	pHash->uSize = uNewSize;
+
+	pHash->uGrowThreshold = uiThreshold * 3;
+	pHash->uShrinkThreshold = (uNewSize <= pHash->uMinimumSize) ? NO_SHRINK : uiThreshold;
+
+	return IMG_TRUE;
+}
+
+
+/*************************************************************************/ /*!
+@Function       HASH_Create_Extended
+@Description    Create a self scaling hash table, using the supplied
+                key size, and the supplied hash and key comparsion
+                functions.
+@Input          uInitialLen   Initial and minimum length of the
+                              hash table, where the length refers to the number
+                              of entries in the hash table, not its size in
+                              bytes.
+@Input          uKeySize      The size of the key, in bytes.
+@Input          pfnHashFunc   Pointer to hash function.
+@Input          pfnKeyComp    Pointer to key comparsion function.
+@Return         NULL or hash table handle.
+*/ /**************************************************************************/
+IMG_INTERNAL
+HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp)
+{
+	HASH_TABLE *pHash;
+
+	if (uInitialLen == 0 || uKeySize == 0 || pfnHashFunc == NULL || pfnKeyComp == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "HASH_Create_Extended: invalid input parameters"));
+		return NULL;
+	}
+
+	PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Create_Extended: InitialSize=0x%x", uInitialLen));
+
+	pHash = _AllocMem(sizeof(HASH_TABLE));
+	if (pHash == NULL)
+	{
+		return NULL;
+	}
+
+	pHash->uCount = 0;
+	pHash->uSize = uInitialLen;
+	pHash->uMinimumSize = uInitialLen;
+	pHash->uKeySize = uKeySize;
+	pHash->uGrowThreshold = (uInitialLen >> 2) * 3;
+	pHash->uShrinkThreshold = NO_SHRINK;
+	pHash->pfnHashFunc = pfnHashFunc;
+	pHash->pfnKeyComp = pfnKeyComp;
+
+	pHash->ppBucketTable = _AllocZMem(sizeof (BUCKET *) * pHash->uSize);
+	if (pHash->ppBucketTable == NULL)
+	{
+		_FreeMem(pHash);
+		/*not nulling pointer, out of scope*/
+		return NULL;
+	}
+
+	return pHash;
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Create
+@Description    Create a self scaling hash table with a key
+                consisting of a single uintptr_t, and using
+                the default hash and key comparison functions.
+@Input          uInitialLen   Initial and minimum length of the
+                              hash table, where the length refers to the
+                              number of entries in the hash table, not its size
+                              in bytes.
+@Return         NULL or hash table handle.
+*/ /**************************************************************************/
+IMG_INTERNAL
+HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen)
+{
+	return HASH_Create_Extended(uInitialLen, sizeof(uintptr_t),
+								&HASH_Func_Default, &HASH_Key_Comp_Default);
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Delete
+@Description    Delete a hash table created by HASH_Create_Extended or
+                HASH_Create.  All entries in the table must have been
+                removed before calling this function.
+@Input          pHash     Hash table
+@Return         None
+*/ /**************************************************************************/
+IMG_INTERNAL void
+HASH_Delete (HASH_TABLE *pHash)
+{
+	IMG_BOOL bDoCheck = IMG_TRUE;
+#if defined(__KERNEL__) && !defined(__QNXNTO__)
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	if (psPVRSRVData != NULL)
+	{
+		if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+		{
+			bDoCheck = IMG_FALSE;
+		}
+	}
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	else
+	{
+		bDoCheck = IMG_FALSE;
+	}
+#endif
+#endif
+	if (pHash != NULL)
+	{
+		PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Delete"));
+
+		if (bDoCheck)
+		{
+			PVR_ASSERT (pHash->uCount==0);
+		}
+		if (pHash->uCount != 0)
+		{
+			IMG_UINT32 i;
+			PVR_DPF ((PVR_DBG_ERROR, "%s: Leak detected in hash table!", __func__));
+			PVR_DPF ((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmemcontext", __func__));
+			PVR_DPF ((PVR_DBG_ERROR, "%s: Removing remaining %u hash entries.", __func__, pHash->uCount));
+
+			for (i = 0; i < pHash->uSize; i++)
+			{
+				BUCKET *pBucket = pHash->ppBucketTable[i];
+				while (pBucket != NULL)
+				{
+					BUCKET *pNextBucket = pBucket->pNext;
+					_FreeMem(pBucket);
+					pBucket = pNextBucket;
+				}
+			}
+
+		}
+		_FreeMem(pHash->ppBucketTable);
+		pHash->ppBucketTable = NULL;
+		_FreeMem(pHash);
+		/*not nulling pointer, copy on stack*/
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Insert_Extended
+@Description    Insert a key value pair into a hash table created
+                with HASH_Create_Extended.
+@Input          pHash     Hash table
+@Input          pKey      Pointer to the key.
+@Input          v         The value associated with the key.
+@Return         IMG_TRUE  - success
+                IMG_FALSE  - failure
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+HASH_Insert_Extended (HASH_TABLE *pHash, void *pKey, uintptr_t v)
+{
+	BUCKET *pBucket;
+
+	PVR_ASSERT (pHash != NULL);
+
+	if (pHash == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "HASH_Insert_Extended: invalid parameter"));
+		return IMG_FALSE;
+	}
+
+	pBucket = _AllocMem(sizeof(BUCKET) + pHash->uKeySize);
+	if (pBucket == NULL)
+	{
+		return IMG_FALSE;
+	}
+
+	pBucket->v = v;
+	/* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k (linux)*/
+	OSCachedMemCopy(pBucket->k, pKey, pHash->uKeySize);
+
+	_ChainInsert (pHash, pBucket, pHash->ppBucketTable, pHash->uSize);
+
+	pHash->uCount++;
+
+	/* check if we need to think about re-balancing */
+	if (pHash->uCount > pHash->uGrowThreshold)
+	{
+		/* Ignore the return code from _Resize because the hash table is
+		   still in a valid state and although not ideally sized, it is still
+		   functional */
+		_Resize (pHash, pHash->uSize << 1);
+	}
+
+	return IMG_TRUE;
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Insert
+@Description    Insert a key value pair into a hash table created with
+                HASH_Create.
+@Input          pHash     Hash table
+@Input          k         The key value.
+@Input          v         The value associated with the key.
+@Return         IMG_TRUE - success.
+                IMG_FALSE - failure.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+HASH_Insert (HASH_TABLE *pHash, uintptr_t k, uintptr_t v)
+{
+	return HASH_Insert_Extended(pHash, &k, v);
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Remove_Extended
+@Description    Remove a key from a hash table created with
+                HASH_Create_Extended.
+@Input          pHash     Hash table
+@Input          pKey      Pointer to key.
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Remove_Extended(HASH_TABLE *pHash, void *pKey)
+{
+	BUCKET **ppBucket;
+	IMG_UINT32 uIndex;
+
+	PVR_ASSERT (pHash != NULL);
+
+	if (pHash == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "HASH_Remove_Extended: Null hash table"));
+		return 0;
+	}
+
+	uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
+
+	for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL; ppBucket = &((*ppBucket)->pNext))
+	{
+		/* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */
+		if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
+		{
+			BUCKET *pBucket = *ppBucket;
+			uintptr_t v = pBucket->v;
+			(*ppBucket) = pBucket->pNext;
+
+			_FreeMem(pBucket);
+			/*not nulling original pointer, already overwritten*/
+
+			pHash->uCount--;
+
+			/* check if we need to think about re-balancing, when the shrink
+			 * threshold is 0 we are at the minimum size, no further shrink */
+			if (pHash->uCount < pHash->uShrinkThreshold)
+			{
+				/* Ignore the return code from _Resize because the
+				   hash table is still in a valid state and although
+				   not ideally sized, it is still functional */
+				_Resize(pHash, MAX(pHash->uSize >> 1, pHash->uMinimumSize));
+			}
+
+			return v;
+		}
+	}
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Remove
+@Description    Remove a key value pair from a hash table created
+                with HASH_Create.
+@Input          pHash     Hash table
+@Input          k         The key
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Remove (HASH_TABLE *pHash, uintptr_t k)
+{
+	return HASH_Remove_Extended(pHash, &k);
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Retrieve_Extended
+@Description    Retrieve a value from a hash table created with
+                HASH_Create_Extended.
+@Input          pHash     Hash table
+@Input          pKey      Pointer to the key.
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Retrieve_Extended (HASH_TABLE *pHash, void *pKey)
+{
+	BUCKET **ppBucket;
+	IMG_UINT32 uIndex;
+
+	PVR_ASSERT (pHash != NULL);
+
+	if (pHash == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "HASH_Retrieve_Extended: Null hash table"));
+		return 0;
+	}
+
+	uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
+
+	for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL; ppBucket = &((*ppBucket)->pNext))
+	{
+		/* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */
+		if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
+		{
+			BUCKET *pBucket = *ppBucket;
+			uintptr_t v = pBucket->v;
+
+			return v;
+		}
+	}
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Retrieve
+@Description    Retrieve a value from a hash table created with
+                HASH_Create.
+@Input          pHash     Hash table
+@Input          k         The key
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Retrieve (HASH_TABLE *pHash, uintptr_t k)
+{
+	return HASH_Retrieve_Extended(pHash, &k);
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Iterate
+@Description    Iterate over every entry in the hash table
+@Input          pHash - Hash table to iterate
+@Input          pfnCallback - Callback to call with the key and data for each
+							  entry in the hash table
+@Return         Callback error if any, otherwise PVRSRV_OK
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback)
+{
+	IMG_UINT32 uIndex;
+	for (uIndex=0; uIndex < pHash->uSize; uIndex++)
+	{
+		BUCKET *pBucket;
+		pBucket = pHash->ppBucketTable[uIndex];
+		while (pBucket != NULL)
+		{
+			PVRSRV_ERROR eError;
+			BUCKET *pNextBucket = pBucket->pNext;
+
+			eError = pfnCallback((uintptr_t) ((void *) *(pBucket->k)), pBucket->v);
+
+			/* The callback might want us to break out early */
+			if (eError != PVRSRV_OK)
+				return eError;
+
+			pBucket = pNextBucket;
+		}
+	}
+	return PVRSRV_OK;
+}
+
+#ifdef HASH_TRACE
+/*************************************************************************/ /*!
+@Function       HASH_Dump
+@Description    To dump the contents of a hash table in human readable
+                form.
+@Input          pHash     Hash table
+*/ /**************************************************************************/
+void
+HASH_Dump (HASH_TABLE *pHash)
+{
+	IMG_UINT32 uIndex;
+	IMG_UINT32 uMaxLength=0;
+	IMG_UINT32 uEmptyCount=0;
+
+	PVR_ASSERT (pHash != NULL);
+	for (uIndex=0; uIndex<pHash->uSize; uIndex++)
+	{
+		BUCKET *pBucket;
+		IMG_UINT32 uLength = 0;
+		if (pHash->ppBucketTable[uIndex] == NULL)
+		{
+			uEmptyCount++;
+		}
+		for (pBucket=pHash->ppBucketTable[uIndex];
+				pBucket != NULL;
+				pBucket = pBucket->pNext)
+		{
+			uLength++;
+		}
+		uMaxLength = MAX(uMaxLength, uLength);
+	}
+
+	PVR_TRACE(("hash table: uMinimumSize=%d  size=%d  count=%d",
+			   pHash->uMinimumSize, pHash->uSize, pHash->uCount));
+	PVR_TRACE(("  empty=%d  max=%d", uEmptyCount, uMaxLength));
+}
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/hash.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/hash.h
new file mode 100644
index 0000000..12e7814
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/hash.h
@@ -0,0 +1,228 @@
+/*************************************************************************/ /*!
+@File
+@Title          Self scaling hash tables
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements simple self scaling hash tables.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _HASH_H_
+#define _HASH_H_
+
+/* include5/ */
+#include "img_types.h"
+
+#include "pvrsrv_error.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * Keys passed to the comparsion function are only guaranteed to
+ * be aligned on an uintptr_t boundary.
+ */
+typedef IMG_UINT32 HASH_FUNC(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen);
+typedef IMG_BOOL HASH_KEY_COMP(size_t uKeySize, void *pKey1, void *pKey2);
+
+typedef struct _HASH_TABLE_ HASH_TABLE;
+
+typedef PVRSRV_ERROR (*HASH_pfnCallback) (
+	uintptr_t k,
+	uintptr_t v
+);
+
+/*************************************************************************/ /*!
+@Function       HASH_Func_Default
+@Description    Hash function intended for hashing keys composed of
+                uintptr_t arrays.
+@Input          uKeySize     The size of the hash key, in bytes.
+@Input          pKey         A pointer to the key to hash.
+@Input          uHashTabLen  The length of the hash table.
+@Return         The hash value.
+*/ /**************************************************************************/
+IMG_UINT32 HASH_Func_Default (size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen);
+
+/*************************************************************************/ /*!
+@Function       HASH_Key_Comp_Default
+@Description    Compares keys composed of uintptr_t arrays.
+@Input          uKeySize     The size of the hash key, in bytes.
+@Input          pKey1        Pointer to first hash key to compare.
+@Input          pKey2        Pointer to second hash key to compare.
+@Return         IMG_TRUE  - the keys match.
+                IMG_FALSE - the keys don't match.
+*/ /**************************************************************************/
+IMG_BOOL HASH_Key_Comp_Default (size_t uKeySize, void *pKey1, void *pKey2);
+
+/*************************************************************************/ /*!
+@Function       HASH_Create_Extended
+@Description    Create a self scaling hash table, using the supplied
+                key size, and the supllied hash and key comparsion
+                functions.
+@Input          uInitialLen   Initial and minimum length of the
+                              hash table, where the length refers to the number
+                              of entries in the hash table, not its size in
+                              bytes.
+@Input          uKeySize      The size of the key, in bytes.
+@Input          pfnHashFunc   Pointer to hash function.
+@Input          pfnKeyComp    Pointer to key comparsion function.
+@Return         NULL or hash table handle.
+*/ /**************************************************************************/
+HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp);
+
+/*************************************************************************/ /*!
+@Function       HASH_Create
+@Description    Create a self scaling hash table with a key
+                consisting of a single uintptr_t, and using
+                the default hash and key comparison functions.
+@Input          uInitialLen   Initial and minimum length of the
+                              hash table, where the length refers to the
+                              number of entries in the hash table, not its size
+                              in bytes.
+@Return         NULL or hash table handle.
+*/ /**************************************************************************/
+HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen);
+
+/*************************************************************************/ /*!
+@Function       HASH_Delete
+@Description    Delete a hash table created by HASH_Create_Extended or
+                HASH_Create.  All entries in the table must have been
+                removed before calling this function.
+@Input          pHash         Hash table
+*/ /**************************************************************************/
+void HASH_Delete (HASH_TABLE *pHash);
+
+/*************************************************************************/ /*!
+@Function       HASH_Insert_Extended
+@Description    Insert a key value pair into a hash table created
+                with HASH_Create_Extended.
+@Input          pHash         The hash table.
+@Input          pKey          Pointer to the key.
+@Input          v             The value associated with the key.
+@Return         IMG_TRUE  - success
+                IMG_FALSE  - failure
+*/ /**************************************************************************/
+IMG_BOOL HASH_Insert_Extended (HASH_TABLE *pHash, void *pKey, uintptr_t v);
+
+/*************************************************************************/ /*!
+@Function       HASH_Insert
+
+@Description    Insert a key value pair into a hash table created with
+                HASH_Create.
+@Input          pHash         The hash table.
+@Input          k             The key value.
+@Input          v             The value associated with the key.
+@Return         IMG_TRUE - success.
+                IMG_FALSE - failure.
+*/ /**************************************************************************/
+IMG_BOOL HASH_Insert (HASH_TABLE *pHash, uintptr_t k, uintptr_t v);
+
+/*************************************************************************/ /*!
+@Function       HASH_Remove_Extended
+@Description    Remove a key from a hash table created with
+                HASH_Create_Extended.
+@Input          pHash         The hash table.
+@Input          pKey          Pointer to key.
+@Return         0 if the key is missing, or the value associated
+                with the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Remove_Extended(HASH_TABLE *pHash, void *pKey);
+
+/*************************************************************************/ /*!
+@Function       HASH_Remove
+@Description    Remove a key value pair from a hash table created
+                with HASH_Create.
+@Input          pHash         The hash table.
+@Input          pKey          Pointer to key.
+@Return         0 if the key is missing, or the value associated
+                with the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Remove (HASH_TABLE *pHash, uintptr_t k);
+
+/*************************************************************************/ /*!
+@Function       HASH_Retrieve_Extended
+@Description    Retrieve a value from a hash table created with
+                HASH_Create_Extended.
+@Input          pHash         The hash table.
+@Input          pKey          Pointer to key.
+@Return         0 if the key is missing, or the value associated with
+                the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Retrieve_Extended (HASH_TABLE *pHash, void *pKey);
+
+/*************************************************************************/ /*!
+@Function       HASH_Retrieve
+@Description    Retrieve a value from a hash table created with
+                HASH_Create.
+@Input          pHash         The hash table.
+@Input          pKey          Pointer to key.
+@Return         0 if the key is missing, or the value associated with
+                the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Retrieve (HASH_TABLE *pHash, uintptr_t k);
+
+/*************************************************************************/ /*!
+@Function       HASH_Iterate
+@Description    Iterate over every entry in the hash table
+@Input          pHash			Hash table to iterate
+@Input          pfnCallback		Callback to call with the key and data for
+								each entry in the hash table
+@Return         Callback error if any, otherwise PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback);
+
+#ifdef HASH_TRACE
+/*************************************************************************/ /*!
+@Function       HASH_Dump
+@Description    Dump out some information about a hash table.
+@Input          pHash         The hash table.
+*/ /**************************************************************************/
+void HASH_Dump (HASH_TABLE *pHash);
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* _HASH_H_ */
+
+/******************************************************************************
+ End of file (hash.h)
+******************************************************************************/
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htb_debug.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htb_debug.c
new file mode 100644
index 0000000..af7c554
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htb_debug.c
@@ -0,0 +1,1251 @@
+/*************************************************************************/ /*!
+@File           htb_debug.c
+@Title          Debug Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides kernel side debugFS Functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "rgxdevice.h"
+#include "htbserver.h"
+#include "htbuffer.h"
+#include "htbuffer_types.h"
+#include "tlstream.h"
+#include "tlclient.h"
+#include "pvrsrv_tlcommon.h"
+#include "pvr_debugfs.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "htb_debug.h"
+#include "kernel_compatibility.h"
+
+// Global data handles for buffer manipulation and processing
+typedef struct
+{
+	PPVR_DEBUGFS_ENTRY_DATA psDumpHostDebugFSEntry;	/* debugFS entry hook */
+	IMG_HANDLE hStream;                 /* Stream handle for debugFS use */
+} HTB_DBG_INFO;
+
+static HTB_DBG_INFO g_sHTBData;
+
+// Enable for extra debug level
+//#define HTB_CHATTY	1
+
+/******************************************************************************
+ * debugFS display routines
+ *****************************************************************************/
+static int HTBDumpBuffer(DUMPDEBUG_PRINTF_FUNC *, void *, void *);
+static void _HBTraceSeqPrintf(void *, const IMG_CHAR *, ...);
+static int _DebugHBTraceSeqShow(struct seq_file *, void *);
+static void *_DebugHBTraceSeqStart(struct seq_file *, loff_t *);
+static void _DebugHBTraceSeqStop(struct seq_file *, void *);
+static void *_DebugHBTraceSeqNext(struct seq_file *, void *, loff_t *);
+
+static void _HBTraceSeqPrintf(void *pvDumpDebugFile,
+                              const IMG_CHAR *pszFormat, ...)
+{
+	struct seq_file *psSeqFile = (struct seq_file *)pvDumpDebugFile;
+	va_list         ArgList;
+
+	va_start(ArgList, pszFormat);
+	seq_vprintf(psSeqFile, pszFormat, ArgList);
+	va_end(ArgList);
+}
+
+static int _DebugHBTraceSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	int	retVal;
+
+	PVR_ASSERT(NULL != psSeqFile);
+
+	/* psSeqFile should never be NULL */
+	if (psSeqFile == NULL)
+	{
+		return -1;
+	}
+
+	/*
+	 * Ensure that we have a valid address to use to dump info from. If NULL we
+	 * return a failure code to terminate the seq_read() call. pvData is either
+	 * SEQ_START_TOKEN (for the initial call) or an HTB buffer address for
+	 * subsequent calls [returned from the NEXT function].
+	 */
+	if (pvData == NULL)
+	{
+		return -1;
+	}
+
+
+	retVal = HTBDumpBuffer(_HBTraceSeqPrintf, psSeqFile, pvData);
+
+#ifdef HTB_CHATTY
+	PVR_DPF((PVR_DBG_WARNING, "%s: Returning %d", __func__, retVal));
+#endif	/* HTB_CHATTY */
+
+	return retVal;
+}
+
+typedef struct {
+	IMG_PBYTE	pBuf;		/* Raw data buffer from TL stream */
+	IMG_UINT32	uiBufLen;	/* Amount of data to process from 'pBuf' */
+	IMG_UINT32	uiTotal;	/* Total bytes processed */
+	IMG_UINT32	uiMsgLen;	/* Length of HTB message to be processed */
+	IMG_PBYTE	pCurr;		/* pointer to current message to be decoded */
+	IMG_CHAR	szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];	/* Output string */
+} HTB_Sentinel_t;
+
+static IMG_UINT32 idToLogIdx(IMG_UINT32);	/* Forward declaration */
+
+/*
+ * HTB_GetNextMessage
+ *
+ * Get next non-empty message block from the buffer held in pSentinel->pBuf
+ * If we exhaust the data buffer we refill it (after releasing the previous
+ * message(s) [only one non-NULL message, but PAD messages will get released
+ * as we traverse them].
+ *
+ * Input:
+ *	pSentinel		references the already acquired data buffer
+ *
+ * Output:
+ *	pSentinel
+ *		-> uiMsglen updated to the size of the non-NULL message
+ *
+ * Returns:
+ *	Address of first non-NULL message in the buffer (if any)
+ *	NULL if there is no further data available from the stream and the buffer
+ *	contents have been drained.
+ */
+static IMG_PBYTE HTB_GetNextMessage(HTB_Sentinel_t *);
+static IMG_PBYTE HTB_GetNextMessage(HTB_Sentinel_t *pSentinel)
+{
+	IMG_PBYTE	pNext, pLast, pStart, pData = NULL;
+	IMG_PBYTE	pCurrent;		/* Current processing point within buffer */
+	PVRSRVTL_PPACKETHDR	ppHdr;	/* Current packet header */
+	IMG_UINT32	uiHdrType;		/* Packet header type */
+	IMG_UINT32	uiMsgSize;		/* Message size of current packet (bytes) */
+	IMG_UINT32	ui32DataSize;
+	IMG_UINT32	uiBufLen;
+	IMG_BOOL	bUnrecognizedErrorPrinted = IMG_FALSE;
+	IMG_UINT32	ui32Data;
+	IMG_UINT32	ui32LogIdx;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(NULL != pSentinel);
+
+	uiBufLen = pSentinel->uiBufLen;
+	/* Convert from byte to uint32 size */
+	ui32DataSize = pSentinel->uiBufLen / sizeof(IMG_UINT32);
+
+	pLast = pSentinel->pBuf + pSentinel->uiBufLen;
+
+	pStart = pSentinel->pBuf;
+
+	pNext = pStart;
+	pSentinel->uiMsgLen = 0;	// Reset count for this message
+	uiMsgSize = 0;				// nothing processed so far
+	ui32LogIdx = HTB_SF_LAST;	// Loop terminator condition
+
+	do
+	{
+		/*
+		 * If we've drained the buffer we must RELEASE and ACQUIRE some more.
+		 */
+		if (pNext >= pLast)
+		{
+			eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream);
+			PVR_ASSERT(eError == PVRSRV_OK);
+
+			eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+				g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen);
+
+			if (PVRSRV_OK != eError)
+			{
+				PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'", __func__,
+					"TLClientAcquireData", PVRSRVGETERRORSTRING(eError)));
+				return NULL;
+			}
+
+			// Reset our limits - if we've returned an empty buffer we're done.
+			pLast = pSentinel->pBuf + pSentinel->uiBufLen;
+			pStart = pSentinel->pBuf;
+			pNext = pStart;
+
+			if (pStart == NULL || pLast == NULL)
+			{
+				return NULL;
+			}
+		}
+
+		/*
+		 * We should have a header followed by data block(s) in the stream.
+		 */
+
+		pCurrent = pNext;
+		ppHdr = GET_PACKET_HDR(pCurrent);
+
+		if (ppHdr == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: Unexpected NULL packet in Host Trace buffer",
+			         __func__));
+			pSentinel->uiMsgLen += uiMsgSize;
+			return NULL;		// This should never happen
+		}
+
+		/*
+		 * This should *NEVER* fire. If it does it means we have got some
+		 * dubious packet header back from the HTB stream. In this case
+		 * the sensible thing is to abort processing and return to
+		 * the caller
+		 */
+		uiHdrType = GET_PACKET_TYPE(ppHdr);
+
+		PVR_ASSERT(uiHdrType < PVRSRVTL_PACKETTYPE_LAST &&
+			uiHdrType > PVRSRVTL_PACKETTYPE_UNDEF);
+
+		if (uiHdrType < PVRSRVTL_PACKETTYPE_LAST &&
+			uiHdrType > PVRSRVTL_PACKETTYPE_UNDEF)
+		{
+			/*
+			 * We have a (potentially) valid data header. We should see if
+			 * the associated packet header matches one of our expected
+			 * types.
+			 */
+			pNext = (IMG_PBYTE)GET_NEXT_PACKET_ADDR(ppHdr);
+
+			PVR_ASSERT(pNext != NULL);
+
+			uiMsgSize = (IMG_UINT32)((size_t)pNext - (size_t)ppHdr);
+
+			pSentinel->uiMsgLen += uiMsgSize;
+
+			pData = GET_PACKET_DATA_PTR(ppHdr);
+
+			/*
+			 * Handle non-DATA packet types. These include PAD fields which
+			 * may have data associated and other types. We simply discard
+			 * these as they have no decodable information within them.
+			 */
+			if (uiHdrType != PVRSRVTL_PACKETTYPE_DATA)
+			{
+				/*
+				 * Now release the current non-data packet and proceed to the
+				 * next entry (if any).
+				 */
+				eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE,
+				    g_sHTBData.hStream, uiMsgSize);
+
+#ifdef HTB_CHATTY
+				PVR_DPF((PVR_DBG_WARNING, "%s: Packet Type %x Length %u",
+					__func__, uiHdrType, uiMsgSize));
+#endif
+
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - '%s' message"
+						" size %u", __func__, "TLClientReleaseDataLess",
+						PVRSRVGETERRORSTRING(eError), uiMsgSize));
+				}
+
+				eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+					g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen);
+
+				if (PVRSRV_OK != eError)
+				{
+					PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - %s Giving up",
+						__func__, "TLClientAcquireData",
+						PVRSRVGETERRORSTRING(eError)));
+
+					return NULL;
+				}
+				pSentinel->uiMsgLen = 0;
+				// Reset our limits - if we've returned an empty buffer we're done.
+				pLast = pSentinel->pBuf + pSentinel->uiBufLen;
+				pStart = pSentinel->pBuf;
+				pNext = pStart;
+
+				if (pStart == NULL || pLast == NULL)
+				{
+					return NULL;
+				}
+				continue;
+			}
+			if (pData == NULL || pData >= pLast)
+			{
+				continue;
+			}
+			ui32Data = *(IMG_UINT32 *)pData;
+			ui32LogIdx = idToLogIdx(ui32Data);
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_WARNING, "Unexpected Header @%p value %x",
+				ppHdr, uiHdrType));
+
+			return NULL;
+		}
+
+		/*
+		 * Check if the unrecognized ID is valid and therefore, tracebuf
+		 * needs updating.
+		 */
+		if (HTB_SF_LAST == ui32LogIdx && HTB_LOG_VALIDID(ui32Data)
+			&& IMG_FALSE == bUnrecognizedErrorPrinted)
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+			    "%s: Unrecognised LOG value '%x' GID %x Params %d ID %x @ '%p'",
+			    __func__, ui32Data, HTB_SF_GID(ui32Data),
+			    HTB_SF_PARAMNUM(ui32Data), ui32Data & 0xfff, pData));
+			bUnrecognizedErrorPrinted = IMG_FALSE;
+		}
+
+	} while (HTB_SF_LAST == ui32LogIdx);
+
+#ifdef HTB_CHATTY
+	PVR_DPF((PVR_DBG_WARNING, "%s: Returning data @ %p Log value '%x'",
+		__func__, pCurrent, ui32Data));
+#endif	/* HTB_CHATTY */
+
+	return pCurrent;
+}
+
+/*
+ * HTB_GetFirstMessage
+ *
+ * Called from START to obtain the buffer address of the first message within
+ * pSentinel->pBuf. Will ACQUIRE data if the buffer is empty.
+ *
+ * Input:
+ *	pSentinel
+ *	puiPosition			Offset within the debugFS file
+ *
+ * Output:
+ *	pSentinel->pCurr	Set to reference the first valid non-NULL message within
+ *						the buffer. If no valid message is found set to NULL.
+ *	pSentinel
+ *		->pBuf		if unset on entry
+ *		->uiBufLen	if pBuf unset on entry
+ *
+ * Side-effects:
+ *	HTB TL stream will be updated to bypass any zero-length PAD messages before
+ *	the first non-NULL message (if any).
+ */
+static void HTB_GetFirstMessage(HTB_Sentinel_t *, loff_t *);
+static void HTB_GetFirstMessage(HTB_Sentinel_t *pSentinel, loff_t *puiPosition)
+{
+	PVRSRV_ERROR	eError;
+
+	if (pSentinel == NULL)
+		return;
+
+	if (pSentinel->pBuf == NULL)
+	{
+		/* Acquire data */
+		pSentinel->uiMsgLen = 0;
+
+		eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+		    g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen);
+
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'",
+			    __func__, "TLClientAcquireData", PVRSRVGETERRORSTRING(eError)));
+
+			pSentinel->pBuf = NULL;
+			pSentinel->pCurr = NULL;
+		}
+		else
+		{
+			/*
+			 * If there is no data available we set pSentinel->pCurr to NULL
+			 * and return. This is expected behaviour if we've drained the
+			 * data and nothing else has yet been produced.
+			 */
+			if (pSentinel->uiBufLen == 0 || pSentinel->pBuf == NULL)
+			{
+#ifdef HTB_CHATTY
+				PVR_DPF((PVR_DBG_WARNING, "%s: Empty Buffer @ %p", __func__,
+					pSentinel->pBuf));
+#endif	/* HTB_CHATTY */
+				pSentinel->pCurr = NULL;
+				return;
+			}
+		}
+	}
+
+	/* Locate next message within buffer. NULL => no more data to process */
+	pSentinel->pCurr = HTB_GetNextMessage(pSentinel);
+}
+
+/*
+ * _DebugHBTraceSeqStart:
+ *
+ * Returns the address to use for subsequent 'Show', 'Next', 'Stop' file ops.
+ * Return SEQ_START_TOKEN for the very first call and allocate a sentinel for
+ * use by the 'Show' routine and its helpers.
+ * This is stored in the psSeqFile->private hook field.
+ *
+ * We obtain access to the TLstream associated with the HTB. If this doesn't
+ * exist (because no pvrdebug capture trace has been set) we simply return with
+ * a NULL value which will stop the seq_file traversal.
+ */
+static void *_DebugHBTraceSeqStart(struct seq_file *psSeqFile,
+                                   loff_t *puiPosition)
+{
+	HTB_Sentinel_t	*pSentinel = (HTB_Sentinel_t *)psSeqFile->private;
+	PVRSRV_ERROR	eError;
+	IMG_UINT32		uiTLMode;
+	void			*retVal;
+	IMG_HANDLE		hStream;
+
+	/* Open the stream in non-blocking mode so that we can determine if there
+	 * is no data to consume. Also disable the producer callback (if any) and
+	 * the open callback so that we do not generate spurious trace data when
+	 * accessing the stream.
+	 */
+	uiTLMode = PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING|
+			   PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK|
+			   PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK;
+
+	/* If two or more processes try to read from this file at the same time
+	 * the TLClientOpenStream() function will handle this by allowing only
+	 * one of them to actually open the stream. The other process will get
+	 * an error stating that the stream is already open. The open function
+	 * is threads safe. */
+	eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, HTB_STREAM_NAME, uiTLMode,
+	    &hStream);
+
+	if (PVRSRV_ERROR_ALREADY_OPEN == eError)
+	{
+		/* Stream allows only one reader so return error if it's already
+		 * opened. */
+#ifdef HTB_CHATTY
+		PVR_DPF((PVR_DBG_WARNING, "%s: Stream handle %p already exists for %s",
+		    __func__, g_sHTBData.hStream, HTB_STREAM_NAME));
+#endif
+		return ERR_PTR(-EBUSY);
+	}
+	else if (PVRSRV_OK != eError)
+	{
+		/*
+		 * No stream available so nothing to report
+		 */
+		return NULL;
+	}
+
+	/* There is a window where hStream can be NULL but the stream is already
+	 * opened. This shouldn't matter since the TLClientOpenStream() will make
+	 * sure that only one stream can be opened and only one process can reach
+	 * this place at a time. Also the .stop function will be always called
+	 * after this function returns so there should be no risk of stream
+	 * not being closed. */
+	PVR_ASSERT(g_sHTBData.hStream == NULL);
+	g_sHTBData.hStream = hStream;
+
+	/*
+	 * Ensure we have our debug-specific data store allocated and hooked from
+	 * our seq_file private data.
+	 * If the allocation fails we can safely return NULL which will stop
+	 * further calls from the seq_file routines (NULL return from START or NEXT
+	 * means we have no (more) data to process)
+	 */
+	if (pSentinel == NULL)
+	{
+		pSentinel = (HTB_Sentinel_t *)OSAllocZMem(sizeof(HTB_Sentinel_t));
+		psSeqFile->private = pSentinel;
+	}
+
+	/*
+	 * Find the first message location within pSentinel->pBuf
+	 * => for SEQ_START_TOKEN we must issue our first ACQUIRE, also for the
+	 * subsequent re-START calls (if any).
+	 */
+
+	HTB_GetFirstMessage(pSentinel, puiPosition);
+
+	if (*puiPosition == 0)
+	{
+		retVal = SEQ_START_TOKEN;
+	}
+	else
+	{
+		if (pSentinel == NULL)
+		{
+			retVal = NULL;
+		}
+		else
+		{
+			retVal = (void *)pSentinel->pCurr;
+		}
+	}
+
+#ifdef HTB_CHATTY
+	PVR_DPF((PVR_DBG_WARNING, "%s: Returning %p, Stream %s @ %p", __func__,
+		 retVal, HTB_STREAM_NAME, g_sHTBData.hStream));
+#endif	/* HTB_CHATTY */
+
+	return retVal;
+
+}
+
+/*
+ * _DebugTBTraceSeqStop:
+ *
+ * Stop processing data collection and release any previously allocated private
+ * data structure if we have exhausted the previously filled data buffers.
+ */
+static void _DebugHBTraceSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	HTB_Sentinel_t	*pSentinel = (HTB_Sentinel_t *)psSeqFile->private;
+	IMG_UINT32		uiMsgLen;
+
+	if (NULL == pSentinel)
+		return;
+
+	uiMsgLen = pSentinel->uiMsgLen;
+
+#ifdef HTB_CHATTY
+	PVR_DPF((PVR_DBG_WARNING, "%s: MsgLen = %d", __func__, uiMsgLen));
+#endif	/* HTB_CHATTY */
+
+	/* If we get here the handle should never be NULL because
+	 * _DebugHBTraceSeqStart() shouldn't allow that. */
+	if (g_sHTBData.hStream != NULL)
+	{
+		PVRSRV_ERROR eError;
+
+		if (uiMsgLen != 0)
+		{
+			eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE,
+				g_sHTBData.hStream, uiMsgLen);
+
+			if (PVRSRV_OK != eError)
+			{
+				PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - %s, nBytes %u",
+					__func__, "TLClientReleaseDataLess",
+					PVRSRVGETERRORSTRING(eError), uiMsgLen));
+			}
+		}
+
+		eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream);
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()",
+				"TLClientCloseStream", PVRSRVGETERRORSTRING(eError),
+				__func__));
+		}
+		g_sHTBData.hStream = NULL;
+	}
+
+	if (pSentinel != NULL)
+	{
+		psSeqFile->private = NULL;
+		OSFreeMem(pSentinel);
+	}
+}
+
+
+/*
+ * _DebugHBTraceSeqNext:
+ *
+ * This is where we release any acquired data which has been processed by the
+ * SeqShow routine. If we have encountered a seq_file overflow we stop
+ * processing and return NULL. Otherwise we release the message that we
+ * previously processed and simply update our position pointer to the next
+ * valid HTB message (if any)
+ */
+static void *_DebugHBTraceSeqNext(struct seq_file *psSeqFile,
+                                  void *pvData,
+                                  loff_t *puiPosition)
+{
+	loff_t			curPos;
+	HTB_Sentinel_t	*pSentinel = (HTB_Sentinel_t *)psSeqFile->private;
+	PVRSRV_ERROR	eError;
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	if (puiPosition)
+	{
+		curPos = *puiPosition;
+		*puiPosition = curPos+1;
+	}
+
+	/*
+	 * Determine if we've had an overflow on the previous 'Show' call. If so
+	 * we leave the previously acquired data in the queue (by releasing 0 bytes)
+	 * and return NULL to end this seq_read() iteration.
+	 * If we have not overflowed we simply get the next HTB message and use that
+	 * for our display purposes
+	 */
+
+	if (seq_has_overflowed(psSeqFile))
+	{
+		(void)TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream, 0);
+
+#ifdef HTB_CHATTY
+		PVR_DPF((PVR_DBG_WARNING, "%s: OVERFLOW - returning NULL", __func__));
+#endif	/* HTB_CHATTY */
+
+		return (void *)NULL;
+	}
+	else
+	{
+		eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream,
+		    pSentinel->uiMsgLen);
+
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s' @ %p Length %d",
+			    __func__, "TLClientReleaseDataLess",
+			    PVRSRVGETERRORSTRING(eError), pSentinel->pCurr,
+			    pSentinel->uiMsgLen));
+			PVR_DPF((PVR_DBG_WARNING, "%s: Buffer @ %p..%p", __func__,
+			    pSentinel->pBuf,
+			    (IMG_PBYTE)(pSentinel->pBuf+pSentinel->uiBufLen)));
+
+		}
+
+		eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+		    g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen);
+
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'\nPrev message len %d",
+			    __func__, "TLClientAcquireData", PVRSRVGETERRORSTRING(eError),
+			    pSentinel->uiMsgLen));
+			pSentinel->pBuf = NULL;
+		}
+
+		pSentinel->uiMsgLen = 0;	// We don't (yet) know the message size
+	}
+
+#ifdef HTB_CHATTY
+	PVR_DPF((PVR_DBG_WARNING, "%s: Returning %p Msglen %d",
+		__func__, pSentinel->pBuf, pSentinel->uiMsgLen));
+#endif	/* HTB_CHATTY */
+
+	if (pSentinel->pBuf == NULL || pSentinel->uiBufLen == 0)
+	{
+		return NULL;
+	}
+
+	pSentinel->pCurr = HTB_GetNextMessage(pSentinel);
+
+	return pSentinel->pCurr;
+}
+
+static const struct seq_operations gsHTBReadOps = {
+	.start = _DebugHBTraceSeqStart,
+	.stop  = _DebugHBTraceSeqStop,
+	.next  = _DebugHBTraceSeqNext,
+	.show  = _DebugHBTraceSeqShow,
+};
+
+
+/******************************************************************************
+ * HTB Dumping routines and definitions
+ *****************************************************************************/
+#define IS_VALID_FMT_STRING(FMT) (strchr(FMT, '%') != NULL)
+#define MAX_STRING_SIZE (128)
+
+typedef enum
+{
+	TRACEBUF_ARG_TYPE_INT,
+	TRACEBUF_ARG_TYPE_ERR,
+	TRACEBUF_ARG_TYPE_NONE
+} TRACEBUF_ARG_TYPE;
+
+/*
+ * Array of all Host Trace log IDs used to convert the tracebuf data
+ */
+typedef struct _HTB_TRACEBUF_LOG_ {
+	HTB_LOG_SFids eSFId;
+	IMG_CHAR      *pszName;
+	IMG_CHAR      *pszFmt;
+	IMG_UINT32    ui32ArgNum;
+} HTB_TRACEBUF_LOG;
+
+static const HTB_TRACEBUF_LOG aLogs[] = {
+#define X(a, b, c, d, e) {HTB_LOG_CREATESFID(a,b,e), #c, d, e},
+	HTB_LOG_SFIDLIST
+#undef X
+};
+
+static const IMG_CHAR *aGroups[] = {
+#define X(A,B) #B,
+	HTB_LOG_SFGROUPLIST
+#undef X
+};
+static const IMG_UINT32 uiMax_aGroups = ARRAY_SIZE(aGroups) - 1;
+
+static TRACEBUF_ARG_TYPE ExtractOneArgFmt(IMG_CHAR **, IMG_CHAR *);
+/*
+ * ExtractOneArgFmt
+ *
+ * Scan the input 'printf-like' string *ppszFmt and return the next
+ * value string to be displayed. If there is no '%' format field in the
+ * string we return 'TRACEBUF_ARG_TYPE_NONE' and leave the input string
+ * untouched.
+ *
+ * Input
+ *	ppszFmt          reference to format string to be decoded
+ *	pszOneArgFmt     single field format from *ppszFmt
+ *
+ * Returns
+ *	TRACEBUF_ARG_TYPE_ERR       unrecognised argument
+ *	TRACEBUF_ARG_TYPE_INT       variable is of numeric type
+ *	TRACEBUF_ARG_TYPE_NONE      no variable reference in *ppszFmt
+ *
+ * Side-effect
+ *	*ppszFmt is updated to reference the next part of the format string
+ *	to be scanned
+ */
+static TRACEBUF_ARG_TYPE ExtractOneArgFmt(
+	IMG_CHAR **ppszFmt,
+	IMG_CHAR *pszOneArgFmt)
+{
+	IMG_CHAR          *pszFmt;
+	IMG_CHAR          *psT;
+	IMG_UINT32        ui32Count = MAX_STRING_SIZE;
+	IMG_UINT32        ui32OneArgSize;
+	TRACEBUF_ARG_TYPE eRet = TRACEBUF_ARG_TYPE_ERR;
+
+	if (NULL == ppszFmt)
+		return TRACEBUF_ARG_TYPE_ERR;
+
+	pszFmt = *ppszFmt;
+	if (NULL == pszFmt)
+		return TRACEBUF_ARG_TYPE_ERR;
+
+	/*
+	 * Find the first '%'
+	 * NOTE: we can be passed a simple string to display which will have no
+	 * parameters embedded within it. In this case we simply return
+	 * TRACEBUF_ARG_TYPE_NONE and the string contents will be the full pszFmt
+	 */
+	psT = strchr(pszFmt, '%');
+	if (psT == NULL)
+	{
+		return TRACEBUF_ARG_TYPE_NONE;
+	}
+
+	/* Find next conversion identifier after the initial '%' */
+	while ((*psT++) && (ui32Count-- > 0))
+	{
+		switch (*psT)
+		{
+			case 'd':
+			case 'i':
+			case 'o':
+			case 'u':
+			case 'x':
+			case 'X':
+			{
+				eRet = TRACEBUF_ARG_TYPE_INT;
+				goto _found_arg;
+			}
+			case 's':
+			{
+				eRet = TRACEBUF_ARG_TYPE_ERR;
+				goto _found_arg;
+			}
+		}
+	}
+
+	if ((psT == NULL) || (ui32Count == 0)) return TRACEBUF_ARG_TYPE_ERR;
+
+_found_arg:
+	ui32OneArgSize = psT - pszFmt + 1;
+	OSCachedMemCopy(pszOneArgFmt, pszFmt, ui32OneArgSize);
+	pszOneArgFmt[ui32OneArgSize] = '\0';
+
+	*ppszFmt = psT + 1;
+
+	return eRet;
+}
+
+static IMG_UINT32 idToLogIdx(IMG_UINT32 ui32CheckData)
+{
+	IMG_UINT32	i = 0;
+	for (i = 0; aLogs[i].eSFId != HTB_SF_LAST; i++)
+	{
+		if ( ui32CheckData == aLogs[i].eSFId )
+			return i;
+	}
+	/* Nothing found, return max value */
+	return HTB_SF_LAST;
+}
+
+/*
+ * DecodeHTB
+ *
+ * Decode the data buffer message located at pBuf. This should be a valid
+ * HTB message as we are provided with the start of the buffer. If empty there
+ * is no message to process. We update the uiMsgLen field with the size of the
+ * HTB message that we have processed so that it can be returned to the system
+ * on successful logging of the message to the output file.
+ *
+ *	Input
+ *		pSentinel reference to newly read data and pending completion data
+ *		          from a previous invocation [handle seq_file buffer overflow]
+ *		 -> pBuf         reference to raw data that we are to parse
+ *		 -> uiBufLen     total number of bytes of data available
+ *		 -> pCurr        start of message to decode
+ *
+ *		pvDumpDebugFile     output file
+ *		pfnDumpDebugPrintf  output generating routine
+ *
+ * Output
+ *		pSentinel
+ *		 -> uiMsgLen	length of the decoded message which will be freed to
+ *						the system on successful completion of the seq_file
+ *						update via _DebugHBTraceSeqNext(),
+ * Return Value
+ *		0				successful decode
+ *		-1				unsuccessful decode
+ */
+static int
+DecodeHTB(HTB_Sentinel_t *pSentinel,
+	void *pvDumpDebugFile, DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf)
+{
+	IMG_UINT32	ui32Data, ui32LogIdx, ui32ArgsCur;
+	IMG_CHAR	*pszFmt = NULL;
+	IMG_CHAR	aszOneArgFmt[MAX_STRING_SIZE];
+	IMG_BOOL	bUnrecognizedErrorPrinted = IMG_FALSE;
+
+	IMG_UINT32	ui32DataSize;
+	IMG_UINT32	uiBufLen = pSentinel->uiBufLen;
+	size_t	nPrinted;
+
+	IMG_PBYTE	pNext, pLast, pStart, pData = NULL;
+	PVRSRVTL_PPACKETHDR	ppHdr;	/* Current packet header */
+	IMG_UINT32	uiHdrType;		/* Packet header type */
+	IMG_UINT32	uiMsgSize;		/* Message size of current packet (bytes) */
+	IMG_BOOL	bPacketsDropped;
+
+	/* Convert from byte to uint32 size */
+	ui32DataSize = uiBufLen / sizeof(IMG_UINT32);
+
+	pLast = pSentinel->pBuf + pSentinel->uiBufLen;
+	pStart = pSentinel->pCurr;
+
+	pNext = pStart;
+	pSentinel->uiMsgLen = 0;	// Reset count for this message
+	uiMsgSize = 0;				// nothing processed so far
+	ui32LogIdx = HTB_SF_LAST;	// Loop terminator condition
+
+#ifdef HTB_CHATTY
+	PVR_DPF((PVR_DBG_WARNING, "%s: Buf @ %p..%p, Length = %d", __func__,
+		pStart, pLast, uiBufLen));
+#endif	/* HTB_CHATTY */
+
+	/*
+	 * We should have a DATA header with the necessary information following
+	 */
+	ppHdr = GET_PACKET_HDR(pStart);
+
+	if (ppHdr == NULL)
+	{
+			PVR_DPF((PVR_DBG_ERROR,
+			    "%s: Unexpected NULL packet in Host Trace buffer", __func__));
+			return -1;
+	}
+
+	uiHdrType = GET_PACKET_TYPE(ppHdr);
+	PVR_ASSERT(uiHdrType == PVRSRVTL_PACKETTYPE_DATA);
+
+	pNext = (IMG_PBYTE)GET_NEXT_PACKET_ADDR(ppHdr);
+
+	PVR_ASSERT(pNext != NULL);
+
+	uiMsgSize = (IMG_UINT32)((size_t)pNext - (size_t)ppHdr);
+
+	pSentinel->uiMsgLen += uiMsgSize;
+
+	pData = GET_PACKET_DATA_PTR(ppHdr);
+
+	if (pData == NULL || pData >= pLast)
+	{
+#ifdef HTB_CHATTY
+		PVR_DPF((PVR_DBG_WARNING, "%s: pData = %p, pLast = %p Returning 0",
+			__func__, pData, pLast));
+#endif	/* HTB_CHATTY */
+		return 0;
+	}
+
+	ui32Data = *(IMG_UINT32 *)pData;
+	ui32LogIdx = idToLogIdx(ui32Data);
+
+	/*
+	 * Check if the unrecognised ID is valid and therefore, tracebuf
+	 * needs updating.
+	 */
+	if (ui32LogIdx == HTB_SF_LAST)
+	{
+		if (HTB_LOG_VALIDID(ui32Data))
+		{
+			if (!bUnrecognizedErrorPrinted)
+			{
+				PVR_DPF((PVR_DBG_WARNING,
+				    "%s: Unrecognised LOG value '%x' GID %x Params %d ID %x @ '%p'",
+				    __func__, ui32Data, HTB_SF_GID(ui32Data),
+				    HTB_SF_PARAMNUM(ui32Data), ui32Data & 0xfff, pData));
+				bUnrecognizedErrorPrinted = IMG_TRUE;
+			}
+
+			return 0;
+		}
+
+		PVR_DPF((PVR_DBG_ERROR,
+		    "%s: Unrecognised and invalid LOG value detected '%x'",
+		    __func__, ui32Data));
+
+		return -1;
+	}
+
+	/* The string format we are going to display */
+	/*
+	 * The display will show the header (log-ID, group-ID, number of params)
+	 * The maximum parameter list length = 15 (only 4bits used to encode)
+	 * so we need HEADER + 15 * sizeof(UINT32) and the displayed string
+	 * describing the event. We use a buffer in the per-process pSentinel
+	 * structure to hold the data.
+	 */
+	pszFmt = aLogs[ui32LogIdx].pszFmt;
+
+	/* add the message payload size to the running count */
+	ui32ArgsCur = HTB_SF_PARAMNUM(ui32Data);
+
+	/* Determine if we've over-filled the buffer and had to drop packets */
+	bPacketsDropped = CHECK_PACKETS_DROPPED(ppHdr);
+	if (bPacketsDropped ||
+		(uiHdrType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED))
+	{
+		/* Flag this as it is useful to know ... */
+
+		PVR_DUMPDEBUG_LOG("\n<========================== *** PACKETS DROPPED *** ======================>\n");
+	}
+
+	{
+		IMG_UINT32 ui32Timestampns, ui32PID;
+		IMG_UINT64 ui64Timestamp, ui64TimestampSec;
+		IMG_CHAR	*szBuffer = pSentinel->szBuffer;	// Buffer start
+		IMG_CHAR	*pszBuffer = pSentinel->szBuffer;	// Current place in buf
+		size_t		uBufBytesAvailable = sizeof(pSentinel->szBuffer);
+		IMG_UINT32	*pui32Data = (IMG_UINT32 *)pData;
+		IMG_UINT32	ui_aGroupIdx;
+
+		// Get PID field from data stream
+		pui32Data++;
+		ui32PID = *pui32Data;
+		// Get Timestamp part 1 from data stream
+		pui32Data++;
+		ui64Timestamp = (IMG_UINT64) *pui32Data << 32;
+		// Get Timestamp part 2 from data stream
+		pui32Data++;
+		ui64Timestamp |= (IMG_UINT64) *pui32Data;
+		// Move to start of message contents data
+		pui32Data++;
+
+		/*
+		 * We need to snprintf the data to a local in-kernel buffer
+		 * and then PVR_DUMPDEBUG_LOG() that in one shot
+		 */
+		ui_aGroupIdx = MIN(HTB_SF_GID(ui32Data), uiMax_aGroups);
+
+		/* Divide by 1B to get seconds & mod using output var (nanosecond resolution)*/
+		ui64TimestampSec = OSDivide64r64(ui64Timestamp, 1000000000, &ui32Timestampns);
+
+ 		nPrinted = OSSNPrintf(szBuffer, uBufBytesAvailable, "%010"IMG_UINT64_FMTSPEC".%09u:%5u-%s> ",
+			ui64TimestampSec, ui32Timestampns, ui32PID, aGroups[ui_aGroupIdx]);
+		if (nPrinted >= uBufBytesAvailable)
+		{
+			PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed,"
+				" max space "IMG_SIZE_FMTSPEC"\n", nPrinted,
+				uBufBytesAvailable);
+
+			nPrinted = uBufBytesAvailable;	/* Ensure we don't overflow buffer */
+		}
+
+		PVR_DUMPDEBUG_LOG("%s", pszBuffer);
+		/* Update where our next 'output' point in the buffer is */
+		pszBuffer += nPrinted;
+		uBufBytesAvailable -= nPrinted;
+
+		/*
+		 * Print one argument at a time as this simplifies handling variable
+		 * number of arguments. Special case handling for no arguments.
+		 * This is the case for simple format strings such as
+		 * HTB_SF_MAIN_KICK_UNCOUNTED.
+		 */
+		if (ui32ArgsCur == 0)
+		{
+			if (pszFmt)
+			{
+				nPrinted = OSStringLCopy(pszBuffer, pszFmt, uBufBytesAvailable);
+				if (nPrinted >= uBufBytesAvailable)
+				{
+					PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed,"
+						" max space "IMG_SIZE_FMTSPEC"\n", nPrinted,
+						uBufBytesAvailable);
+					nPrinted = uBufBytesAvailable;	/* Ensure we don't overflow buffer */
+				}
+				PVR_DUMPDEBUG_LOG("%s", pszBuffer);
+				pszBuffer += nPrinted;
+				/* Don't update the uBufBytesAvailable as we have finished this
+				 * message decode. pszBuffer - szBuffer is the total amount of
+				 * data we have decoded.
+				 */
+			}
+		}
+		else
+		{
+			if (HTB_SF_GID(ui32Data) == HTB_GID_CTRL && HTB_SF_ID(ui32Data) == HTB_ID_MARK_SCALE)
+			{
+				IMG_UINT32 i;
+				IMG_UINT32 ui32ArgArray[HTB_MARK_SCALE_ARG_ARRAY_SIZE];
+				IMG_UINT64 ui64OSTS = 0;
+				IMG_UINT32 ui32OSTSRem = 0;
+				IMG_UINT64 ui64CRTS = 0;
+
+				/* Retrieve 6 args to an array */
+				for (i = 0; i < ARRAY_SIZE(ui32ArgArray); i++)
+				{
+					ui32ArgArray[i] = *pui32Data;
+					pui32Data++;
+					--ui32ArgsCur;
+				}
+
+				ui64OSTS = (IMG_UINT64) ui32ArgArray[HTB_ARG_OSTS_PT1] << 32 | ui32ArgArray[HTB_ARG_OSTS_PT2];
+				ui64CRTS = (IMG_UINT64) ui32ArgArray[HTB_ARG_CRTS_PT1] << 32 | ui32ArgArray[HTB_ARG_CRTS_PT2];
+
+				/* Divide by 1B to get seconds, remainder in nano seconds*/
+				ui64OSTS = OSDivide64r64(ui64OSTS, 1000000000, &ui32OSTSRem);
+
+				nPrinted = OSSNPrintf(pszBuffer,
+						              uBufBytesAvailable,
+						              "HTBFWMkSync Mark=%u OSTS=%010" IMG_UINT64_FMTSPEC ".%09u CRTS=%" IMG_UINT64_FMTSPEC " CalcClkSpd=%u \n",
+						              ui32ArgArray[HTB_ARG_SYNCMARK],
+						              ui64OSTS,
+						              ui32OSTSRem,
+						              ui64CRTS,
+						              ui32ArgArray[HTB_ARG_CLKSPD]);
+
+				if (nPrinted >= uBufBytesAvailable)
+				{
+					PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed,"
+						" max space "IMG_SIZE_FMTSPEC"\n", nPrinted,
+						uBufBytesAvailable);
+					nPrinted = uBufBytesAvailable;	/* Ensure we don't overflow buffer */
+				}
+
+				PVR_DUMPDEBUG_LOG("%s", pszBuffer);
+				pszBuffer += nPrinted;
+				uBufBytesAvailable -= nPrinted;
+			}
+			else
+			{
+				while (IS_VALID_FMT_STRING(pszFmt) && (uBufBytesAvailable > 0))
+				{
+					IMG_UINT32 ui32TmpArg = *pui32Data;
+					TRACEBUF_ARG_TYPE eArgType;
+
+					eArgType = ExtractOneArgFmt(&pszFmt, aszOneArgFmt);
+
+					pui32Data++;
+					ui32ArgsCur--;
+
+					switch (eArgType)
+					{
+						case TRACEBUF_ARG_TYPE_INT:
+							nPrinted = OSSNPrintf(pszBuffer, uBufBytesAvailable,
+								aszOneArgFmt, ui32TmpArg);
+							break;
+
+						case TRACEBUF_ARG_TYPE_NONE:
+							nPrinted = OSStringLCopy(pszBuffer, pszFmt,
+								uBufBytesAvailable);
+							break;
+
+						default:
+							nPrinted = OSSNPrintf(pszBuffer, uBufBytesAvailable,
+								"Error processing arguments, type not "
+								"recognized (fmt: %s)", aszOneArgFmt);
+							break;
+					}
+					if (nPrinted >= uBufBytesAvailable)
+					{
+						PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed,"
+							" max space "IMG_SIZE_FMTSPEC"\n", nPrinted,
+							uBufBytesAvailable);
+						nPrinted = uBufBytesAvailable;	/* Ensure we don't overflow buffer */
+					}
+					PVR_DUMPDEBUG_LOG("%s", pszBuffer);
+					pszBuffer += nPrinted;
+					uBufBytesAvailable -= nPrinted;
+				}
+				/* Display any remaining text in pszFmt string */
+				if (pszFmt)
+				{
+					nPrinted = OSStringLCopy(pszBuffer, pszFmt, uBufBytesAvailable);
+					if (nPrinted >= uBufBytesAvailable)
+					{
+						PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed,"
+							" max space "IMG_SIZE_FMTSPEC"\n", nPrinted,
+							uBufBytesAvailable);
+						nPrinted = uBufBytesAvailable;	/* Ensure we don't overflow buffer */
+					}
+					PVR_DUMPDEBUG_LOG("%s", pszBuffer);
+					pszBuffer += nPrinted;
+					/* Don't update the uBufBytesAvailable as we have finished this
+					 * message decode. pszBuffer - szBuffer is the total amount of
+					 * data we have decoded.
+					 */
+				}
+			}
+		}
+
+		/* Update total bytes processed */
+		pSentinel->uiTotal += (pszBuffer - szBuffer);
+	}
+	return 0;
+}
+
+/*
+ * HTBDumpBuffer: Dump the Host Trace Buffer using the TLClient API
+ *
+ * This routine just parses *one* message from the buffer.
+ * The stream will be opened by the Start() routine, closed by the Stop() and
+ * updated for data consumed by this routine once we have DebugPrintf'd it.
+ * We use the new TLReleaseDataLess() routine which enables us to update the
+ * HTB contents with just the amount of data we have successfully processed.
+ * If we need to leave the data available we can call this with a 0 count.
+ * This will happen in the case of a buffer overflow so that we can reprocess
+ * any data which wasn't handled before.
+ *
+ * In case of overflow or an error we return -1 otherwise 0
+ *
+ * Input:
+ *  pfnDumpDebugPrintf  output routine to display data
+ *  pvDumpDebugFile     seq_file handle (from kernel seq_read() call)
+ *  pvData              data address to start dumping from
+ *                      (set by Start() / Next())
+ */
+static int HTBDumpBuffer(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                          void *pvDumpDebugFile,
+                          void *pvData)
+{
+	struct seq_file *psSeqFile = (struct seq_file *)pvDumpDebugFile;
+	HTB_Sentinel_t  *pSentinel = (HTB_Sentinel_t *)psSeqFile->private;
+
+	PVR_ASSERT(NULL != pvData);
+
+	if (pvData == SEQ_START_TOKEN)
+	{
+		if (pSentinel->pCurr == NULL)
+		{
+#ifdef HTB_CHATTY
+			PVR_DPF((PVR_DBG_WARNING, "%s: SEQ_START_TOKEN, Empty buffer",
+				__func__));
+#endif	/* HTB_CHATTY */
+			return 0;
+		}
+		PVR_ASSERT(pSentinel->pCurr != NULL);
+
+		/* Display a Header as we have data to process */
+		seq_printf(psSeqFile, "%-20s:%-5s-%s  %s\n",
+			"Timestamp", "PID", "Group>", "Log Entry");
+	}
+	else
+	{
+		if (pvData != NULL)
+		{
+			PVR_ASSERT(pSentinel->pCurr == pvData);
+		}
+	}
+
+	return DecodeHTB(pSentinel, pvDumpDebugFile, pfnDumpDebugPrintf);
+}
+
+
+/******************************************************************************
+ * External Entry Point routines ...
+ *****************************************************************************/
+/*************************************************************************/ /*!
+ @Function     HTB_CreateFSEntry
+
+ @Description  Create the debugFS entry-point for the host-trace-buffer
+
+ @Returns      eError          internal error code, PVRSRV_OK on success
+
+ */ /*************************************************************************/
+PVRSRV_ERROR HTB_CreateFSEntry(void)
+{
+	PVRSRV_ERROR eError;
+
+	eError = PVRDebugFSCreateFile("host_trace", NULL,
+				      &gsHTBReadOps,
+				      NULL, NULL, NULL,
+				      &g_sHTBData.psDumpHostDebugFSEntry);
+
+	PVR_LOGR_IF_ERROR(eError, "PVRDebugFSCreateEntry");
+
+	return eError;
+}
+
+
+/*************************************************************************/ /*!
+ @Function     HTB_DestroyFSEntry
+
+ @Description  Destroy the debugFS entry-point created by earlier
+               HTB_CreateFSEntry() call.
+*/ /**************************************************************************/
+void HTB_DestroyFSEntry(void)
+{
+	if (g_sHTBData.psDumpHostDebugFSEntry)
+	{
+		PVRDebugFSRemoveFile(&g_sHTBData.psDumpHostDebugFSEntry);
+	}
+}
+
+/* EOF */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htb_debug.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htb_debug.h
new file mode 100644
index 0000000..92d7069
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htb_debug.h
@@ -0,0 +1,64 @@
+/*************************************************************************/ /*!
+@File           htb_debug.h
+@Title          Linux debugFS routine setup header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _HTB_DEBUG_H_
+#define _HTB_DEBUG_H_
+
+/**************************************************************************/ /*!
+ @Function     HTB_CreateFSEntry
+
+ @Description  Create the debugFS entry-point for the host-trace-buffer
+
+ @Returns      eError          internal error code, PVRSRV_OK on success
+
+ */ /**************************************************************************/
+PVRSRV_ERROR HTB_CreateFSEntry(void);
+
+/**************************************************************************/ /*!
+ @Function     HTB_DestroyFSEntry
+
+ @Description  Destroy the debugFS entry-point created by earlier
+               HTB_CreateFSEntry() call.
+*/ /**************************************************************************/
+void HTB_DestroyFSEntry(void);
+
+#endif /* _HTB_DEBUG_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbserver.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbserver.c
new file mode 100644
index 0000000..34f3bfb
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbserver.c
@@ -0,0 +1,883 @@
+/*************************************************************************/ /*!
+@File           htbserver.c
+@Title          Host Trace Buffer server implementation.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Host Trace Buffer provides a mechanism to log Host events to a
+                buffer in a similar way to the Firmware Trace mechanism.
+                Host Trace Buffer logs data using a Transport Layer buffer.
+                The Transport Layer and pvrtld tool provides the mechanism to
+                retrieve the trace data.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "htbserver.h"
+#include "htbuffer.h"
+#include "htbuffer_types.h"
+#include "tlstream.h"
+#include "pvrsrv_tlcommon.h"
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "pvrsrv_apphint.h"
+#include "oskm_apphint.h"
+
+/* size of circular buffer controlling the maximum number of concurrent PIDs logged */
+#define HTB_MAX_NUM_PID 8
+
+/* number of times to try rewriting a log entry */
+#define HTB_LOG_RETRY_COUNT 5
+
+/*************************************************************************/ /*!
+  Host Trace Buffer control information structure
+*/ /**************************************************************************/
+typedef struct
+{
+	IMG_UINT32 ui32BufferSize;      /*!< Requested buffer size in bytes
+                                         Once set this may not be changed */
+
+	HTB_OPMODE_CTRL eOpMode;        /*!< Control what trace data is dropped if
+                                         the buffer is full.
+                                         Once set this may not be changed */
+
+/*	IMG_UINT32 ui32GroupEnable; */  /*!< Flags word controlling groups to be
+                                         logged */
+
+	IMG_UINT32 ui32LogLevel;        /*!< Log level to control messages logged */
+
+	IMG_UINT32 aui32EnablePID[HTB_MAX_NUM_PID]; /*!< PIDs to enable logging for
+                                                     a specific set of processes */
+
+	IMG_UINT32 ui32PIDCount;        /*!< Current number of PIDs being logged */
+
+	IMG_UINT32 ui32PIDHead;         /*!< Head of the PID circular buffer */
+
+	HTB_LOGMODE_CTRL eLogMode;      /*!< Logging mode control */
+
+	IMG_BOOL bLogDropSignalled;     /*!< Flag indicating if a log message has
+                                         been signalled as dropped */
+
+	/* synchronisation parameters */
+	IMG_UINT64 ui64SyncOSTS;
+	IMG_UINT64 ui64SyncCRTS;
+	IMG_UINT32 ui32SyncCalcClkSpd;
+	IMG_UINT32 ui32SyncMarker;
+
+	IMG_BOOL bInitDone;             /* Set by HTBInit, reset by HTBDeInit */
+
+	POS_SPINLOCK hRepeatMarkerLock;     /*!< Spinlock used in HTBLogKM to protect global variables
+	                                     (ByteCount, OSTS, CRTS ClkSpeed)
+	                                     from becoming inconsistent due to calls from
+	                                     both KM and UM */
+
+	IMG_UINT32 ui32ByteCount; /* Byte count used for triggering repeat sync point */
+	/* static variables containing details of previous sync point */
+	IMG_UINT64 ui64OSTS;
+	IMG_UINT64 ui64CRTS;
+	IMG_UINT32 ui32ClkSpeed;
+
+} HTB_CTRL_INFO;
+
+
+/*************************************************************************/ /*!
+*/ /**************************************************************************/
+static const IMG_UINT32 MapFlags[] =
+{
+	0,                    /* HTB_OPMODE_UNDEF = 0 */
+	TL_OPMODE_DROP_NEWER, /* HTB_OPMODE_DROPLATEST */
+	TL_OPMODE_DROP_OLDEST,/* HTB_OPMODE_DROPOLDEST */
+	TL_OPMODE_BLOCK       /* HTB_OPMODE_BLOCK */
+};
+
+static_assert(0 == HTB_OPMODE_UNDEF,      "Unexpected value for HTB_OPMODE_UNDEF");
+static_assert(1 == HTB_OPMODE_DROPLATEST, "Unexpected value for HTB_OPMODE_DROPLATEST");
+static_assert(2 == HTB_OPMODE_DROPOLDEST, "Unexpected value for HTB_OPMODE_DROPOLDEST");
+static_assert(3 == HTB_OPMODE_BLOCK,      "Unexpected value for HTB_OPMODE_BLOCK");
+
+static_assert(1 == TL_OPMODE_DROP_NEWER,  "Unexpected value for TL_OPMODE_DROP_NEWER");
+static_assert(2 == TL_OPMODE_DROP_OLDEST, "Unexpected value for TL_OPMODE_DROP_OLDEST");
+static_assert(3 == TL_OPMODE_BLOCK,       "Unexpected value for TL_OPMODE_BLOCK");
+
+static const IMG_UINT32 g_ui32TLBaseFlags; //TL_FLAG_NO_SIGNAL_ON_COMMIT
+
+/* Minimum TL buffer size.
+ * Large enough for around 60 worst case messages or 200 average messages
+ */
+#define HTB_TL_BUFFER_SIZE_MIN	(0x10000)
+
+/* Minimum concentration of HTB packets in a TL Stream is 60%
+ * If we just put the HTB header in the TL stream (12 bytes), the TL overhead
+ * is 8 bytes for its own header, so for the smallest possible (and most
+ * inefficient) packet we have 3/5 of the buffer used for actual HTB data.
+ * This shift is used as a guaranteed estimation on when to produce a repeat
+ * packet. By shifting the size of the buffer by 1 we effectively /2 this
+ * under the 60% boundary chance we may have overwritten the marker and thus
+ * guaranteed to always have a marker in the stream */
+#define HTB_MARKER_PREDICTION_THRESHOLD(val) (val >> 1)
+
+static HTB_CTRL_INFO g_sCtrl;
+static IMG_BOOL g_bConfigured = IMG_FALSE;
+static IMG_HANDLE g_hTLStream;
+
+
+/************************************************************************/ /*!
+ @Function      _LookupFlags
+ @Description   Convert HTBuffer Operation mode to TLStream flags
+
+ @Input         eModeHTBuffer   Operation Mode
+
+ @Return        IMG_UINT32      TLStream FLags
+*/ /**************************************************************************/
+static IMG_UINT32
+_LookupFlags( HTB_OPMODE_CTRL eMode )
+{
+	return (eMode < ARRAY_SIZE(MapFlags)) ? MapFlags[eMode] : 0;
+}
+
+
+/************************************************************************/ /*!
+ @Function      _HTBLogDebugInfo
+ @Description   Debug dump handler used to dump the state of the HTB module.
+                Called for each verbosity level during a debug dump. Function
+                only prints state when called for High verbosity.
+
+ @Input         hDebugRequestHandle See PFN_DBGREQ_NOTIFY
+
+ @Input         ui32VerbLevel       See PFN_DBGREQ_NOTIFY
+
+ @Input         pfnDumpDebugPrintf  See PFN_DBGREQ_NOTIFY
+
+ @Input         pvDumpDebugFile     See PFN_DBGREQ_NOTIFY
+
+*/ /**************************************************************************/
+static void _HTBLogDebugInfo(
+		PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+		IMG_UINT32 ui32VerbLevel,
+		DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+		void *pvDumpDebugFile
+)
+{
+	PVR_UNREFERENCED_PARAMETER(hDebugRequestHandle);
+
+	if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH))
+	{
+
+		if (g_bConfigured)
+		{
+			IMG_INT i;
+
+			PVR_DUMPDEBUG_LOG("------[ HTB Log state: On ]------");
+
+			PVR_DUMPDEBUG_LOG("HTB Log mode: %d", g_sCtrl.eLogMode);
+			PVR_DUMPDEBUG_LOG("HTB Log level: %d", g_sCtrl.ui32LogLevel);
+			PVR_DUMPDEBUG_LOG("HTB Buffer Opmode: %d", g_sCtrl.eOpMode);
+
+			for (i=0; i < HTB_FLAG_NUM_EL; i++)
+			{
+				PVR_DUMPDEBUG_LOG("HTB Log group %d: %x", i, g_auiHTBGroupEnable[i]);
+			}
+		}
+		else
+		{
+			PVR_DUMPDEBUG_LOG("------[ HTB Log state: Off ]------");
+		}
+	}
+}
+
+/************************************************************************/ /*!
+ @Function      HTBDeviceCreate
+ @Description   Initialisation actions for HTB at device creation.
+
+ @Input         psDeviceNode    Reference to the device node in context
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBDeviceCreate(
+		PVRSRV_DEVICE_NODE *psDeviceNode
+)
+{
+	PVRSRV_ERROR eError;
+
+	eError = PVRSRVRegisterDbgRequestNotify(&psDeviceNode->hHtbDbgReqNotify,
+			 psDeviceNode, &_HTBLogDebugInfo, DEBUG_REQUEST_HTB, NULL);
+	PVR_LOG_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify");
+
+	return eError;
+}
+
+/************************************************************************/ /*!
+ @Function      HTBIDeviceDestroy
+ @Description   De-initialisation actions for HTB at device destruction.
+
+ @Input         psDeviceNode    Reference to the device node in context
+
+*/ /**************************************************************************/
+void
+HTBDeviceDestroy(
+		PVRSRV_DEVICE_NODE *psDeviceNode
+)
+{
+	if (psDeviceNode->hHtbDbgReqNotify)
+	{
+		/* No much we can do if it fails, driver unloading */
+		(void)PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hHtbDbgReqNotify);
+		psDeviceNode->hHtbDbgReqNotify = NULL;
+	}
+}
+
+static IMG_UINT32 g_ui32HTBufferSize = HTB_TL_BUFFER_SIZE_MIN;
+
+/*
+ * AppHint access routine forward definitions
+ */
+static PVRSRV_ERROR _HTBSetLogGroup(const PVRSRV_DEVICE_NODE *, const void *,
+                                    IMG_UINT32);
+static PVRSRV_ERROR _HTBReadLogGroup(const PVRSRV_DEVICE_NODE *, const void *,
+                                    IMG_UINT32 *);
+
+static PVRSRV_ERROR	_HTBSetOpMode(const PVRSRV_DEVICE_NODE *, const void *,
+                                   IMG_UINT32);
+static PVRSRV_ERROR _HTBReadOpMode(const PVRSRV_DEVICE_NODE *, const void *,
+                                    IMG_UINT32 *);
+
+static void _OnTLReaderOpenCallback(void *);
+
+/************************************************************************/ /*!
+ @Function      HTBInit
+ @Description   Allocate and initialise the Host Trace Buffer
+                The buffer size may be changed by specifying
+                HTBufferSizeInKB=xxxx
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBInit(void)
+{
+	void			*pvAppHintState = NULL;
+	IMG_UINT32		ui32AppHintDefault;
+	IMG_UINT32		ui32BufBytes;
+	PVRSRV_ERROR	eError;
+
+	if (g_sCtrl.bInitDone)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "HTBInit: Driver already initialised"));
+		return PVRSRV_ERROR_ALREADY_EXISTS;
+	}
+
+	/*
+	 * Buffer Size can be configured by specifying a value in the AppHint
+	 * This will only take effect at module load time so there is no query
+	 * or setting mechanism available.
+	 */
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HTBufferSizeInKB,
+										NULL,
+										NULL,
+	                                    APPHINT_OF_DRIVER_NO_DEVICE,
+	                                    NULL);
+
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableHTBLogGroup,
+	                                    _HTBReadLogGroup,
+	                                    _HTBSetLogGroup,
+	                                    APPHINT_OF_DRIVER_NO_DEVICE,
+	                                    NULL);
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HTBOperationMode,
+	                                    _HTBReadOpMode,
+	                                    _HTBSetOpMode,
+	                                    APPHINT_OF_DRIVER_NO_DEVICE,
+	                                    NULL);
+
+	/*
+	 * Now get whatever values have been configured for our AppHints
+	 */
+	OSCreateKMAppHintState(&pvAppHintState);
+	ui32AppHintDefault = HTB_TL_BUFFER_SIZE_MIN / 1024;
+	OSGetKMAppHintUINT32(pvAppHintState, HTBufferSizeInKB,
+						 &ui32AppHintDefault, &g_ui32HTBufferSize);
+	OSFreeKMAppHintState(pvAppHintState);
+
+	ui32BufBytes = g_ui32HTBufferSize * 1024;
+
+	/* initialise rest of state */
+	g_sCtrl.ui32BufferSize =
+		(ui32BufBytes < HTB_TL_BUFFER_SIZE_MIN)
+		? HTB_TL_BUFFER_SIZE_MIN
+		: ui32BufBytes;
+	g_sCtrl.eOpMode = HTB_OPMODE_DROPOLDEST;
+	g_sCtrl.ui32LogLevel = 0;
+	g_sCtrl.ui32PIDCount = 0;
+	g_sCtrl.ui32PIDHead = 0;
+	g_sCtrl.eLogMode = HTB_LOGMODE_ALLPID;
+	g_sCtrl.bLogDropSignalled = IMG_FALSE;
+
+	eError = OSSpinLockCreate(&g_sCtrl.hRepeatMarkerLock);
+	PVR_LOGR_IF_ERROR(eError, "OSSpinLockCreate");
+
+	g_sCtrl.bInitDone = IMG_TRUE;
+
+	/* Log the current driver parameter setting for the HTBufferSizeInKB.
+	 * We do this here as there is no other infrastructure for obtaining
+	 * the value.
+	 */
+	if (g_ui32HTBufferSize != ui32AppHintDefault)
+	{
+		PVR_LOG(("Increasing HTBufferSize to %uKB", g_ui32HTBufferSize));
+	}
+
+	return PVRSRV_OK;
+}
+
+/************************************************************************/ /*!
+ @Function      HTBDeInit
+ @Description   Close the Host Trace Buffer and free all resources. Must
+                perform a no-op if already de-initialised.
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBDeInit( void )
+{
+	if (!g_sCtrl.bInitDone)
+		return PVRSRV_OK;
+
+	if (g_hTLStream)
+	{
+		TLStreamClose( g_hTLStream );
+		g_hTLStream = NULL;
+	}
+
+	if (g_sCtrl.hRepeatMarkerLock != NULL)
+	{
+		OSSpinLockDestroy(g_sCtrl.hRepeatMarkerLock);
+		g_sCtrl.hRepeatMarkerLock = NULL;
+	}
+
+	g_sCtrl.bInitDone = IMG_FALSE;
+	return PVRSRV_OK;
+}
+
+
+/*************************************************************************/ /*!
+ AppHint interface functions
+*/ /**************************************************************************/
+static
+PVRSRV_ERROR _HTBSetLogGroup(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                             const void *psPrivate,
+                             IMG_UINT32 ui32Value)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+	return HTBControlKM(1, &ui32Value, 0, 0,
+	                    HTB_LOGMODE_UNDEF, HTB_OPMODE_UNDEF);
+}
+
+static
+PVRSRV_ERROR _HTBReadLogGroup(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                              const void *psPrivate,
+                              IMG_UINT32 *pui32Value)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+	*pui32Value = g_auiHTBGroupEnable[0];
+	return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR _HTBSetOpMode(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                           const void *psPrivate,
+                           IMG_UINT32 ui32Value)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+	return HTBControlKM(0, NULL, 0, 0, HTB_LOGMODE_UNDEF, ui32Value);
+}
+
+static
+PVRSRV_ERROR _HTBReadOpMode(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                            const void *psPrivate,
+                            IMG_UINT32 *pui32Value)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+	*pui32Value = (IMG_UINT32)g_sCtrl.eOpMode;
+	return PVRSRV_OK;
+}
+
+
+static void
+_OnTLReaderOpenCallback( void *pvArg )
+{
+	if ( g_hTLStream )
+	{
+		IMG_UINT64 ui64Time;
+		OSClockMonotonicns64(&ui64Time);
+		(void) HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
+		              g_sCtrl.ui32SyncMarker,
+		              ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)),
+		              ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)),
+		              ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)),
+		              ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)),
+		              g_sCtrl.ui32SyncCalcClkSpd);
+	}
+
+	PVR_UNREFERENCED_PARAMETER(pvArg);
+}
+
+
+/*************************************************************************/ /*!
+ @Function      HTBControlKM
+ @Description   Update the configuration of the Host Trace Buffer
+
+ @Input         ui32NumFlagGroups Number of group enable flags words
+
+ @Input         aui32GroupEnable  Flags words controlling groups to be logged
+
+ @Input         ui32LogLevel    Log level to record
+
+ @Input         ui32EnablePID   PID to enable logging for a specific process
+
+ @Input         eLogMode        Enable logging for all or specific processes,
+
+ @Input         eOpMode         Control the behaviour of the data buffer
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBControlKM(
+	const IMG_UINT32 ui32NumFlagGroups,
+	const IMG_UINT32 * aui32GroupEnable,
+	const IMG_UINT32 ui32LogLevel,
+	const IMG_UINT32 ui32EnablePID,
+	const HTB_LOGMODE_CTRL eLogMode,
+	const HTB_OPMODE_CTRL eOpMode
+)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT;
+	IMG_UINT32 i;
+	IMG_UINT64 ui64Time;
+	OSClockMonotonicns64(&ui64Time);
+
+	if ( !g_bConfigured && ui32NumFlagGroups )
+	{
+		eError = TLStreamCreate(
+				&g_hTLStream,
+				PVRSRVGetPVRSRVData()->psHostMemDeviceNode,
+				HTB_STREAM_NAME,
+				g_sCtrl.ui32BufferSize,
+				_LookupFlags(HTB_OPMODE_DROPOLDEST) | g_ui32TLBaseFlags,
+				_OnTLReaderOpenCallback, NULL, NULL, NULL);
+		PVR_LOGR_IF_ERROR(eError, "TLStreamCreate");
+		g_bConfigured = IMG_TRUE;
+	}
+
+	if (HTB_OPMODE_UNDEF != eOpMode && g_sCtrl.eOpMode != eOpMode)
+	{
+		g_sCtrl.eOpMode = eOpMode;
+		eError = TLStreamReconfigure(g_hTLStream, _LookupFlags(g_sCtrl.eOpMode | g_ui32TLBaseFlags));
+		while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- )
+		{
+			OSReleaseThreadQuanta();
+			eError = TLStreamReconfigure(g_hTLStream, _LookupFlags(g_sCtrl.eOpMode | g_ui32TLBaseFlags));
+		}
+		PVR_LOGR_IF_ERROR(eError, "TLStreamReconfigure");
+	}
+
+	if ( ui32EnablePID )
+	{
+		g_sCtrl.aui32EnablePID[g_sCtrl.ui32PIDHead] = ui32EnablePID;
+		g_sCtrl.ui32PIDHead++;
+		g_sCtrl.ui32PIDHead %= HTB_MAX_NUM_PID;
+		g_sCtrl.ui32PIDCount++;
+		if ( g_sCtrl.ui32PIDCount > HTB_MAX_NUM_PID )
+		{
+			g_sCtrl.ui32PIDCount = HTB_MAX_NUM_PID;
+		}
+	}
+
+	/* HTB_LOGMODE_ALLPID overrides ui32EnablePID */
+	if ( HTB_LOGMODE_ALLPID == eLogMode )
+	{
+		OSCachedMemSet(g_sCtrl.aui32EnablePID, 0, sizeof(g_sCtrl.aui32EnablePID));
+		g_sCtrl.ui32PIDCount = 0;
+		g_sCtrl.ui32PIDHead = 0;
+	}
+	if ( HTB_LOGMODE_UNDEF != eLogMode )
+	{
+		g_sCtrl.eLogMode = eLogMode;
+	}
+
+	if ( ui32NumFlagGroups )
+	{
+		for (i = 0; i < HTB_FLAG_NUM_EL && i < ui32NumFlagGroups; i++)
+		{
+			g_auiHTBGroupEnable[i] = aui32GroupEnable[i];
+		}
+		for (; i < HTB_FLAG_NUM_EL; i++)
+		{
+			g_auiHTBGroupEnable[i] = 0;
+		}
+	}
+
+	if ( ui32LogLevel )
+	{
+		g_sCtrl.ui32LogLevel = ui32LogLevel;
+	}
+
+	/* Dump the current configuration state */
+	eError = HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_OPMODE, g_sCtrl.eOpMode);
+	PVR_LOG_IF_ERROR(eError, "HTBLog");
+	eError = HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_ENABLE_GROUP, g_auiHTBGroupEnable[0]);
+	PVR_LOG_IF_ERROR(eError, "HTBLog");
+	eError = HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_LOG_LEVEL, g_sCtrl.ui32LogLevel);
+	PVR_LOG_IF_ERROR(eError, "HTBLog");
+	eError = HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_LOGMODE, g_sCtrl.eLogMode);
+	PVR_LOG_IF_ERROR(eError, "HTBLog");
+	for (i = 0; i < g_sCtrl.ui32PIDCount; i++)
+	{
+		eError = HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_ENABLE_PID, g_sCtrl.aui32EnablePID[i]);
+		PVR_LOG_IF_ERROR(eError, "HTBLog");
+	}
+	/* Else should never be hit as we set the spd when the power state is updated */
+	if (0 != g_sCtrl.ui32SyncMarker && 0 != g_sCtrl.ui32SyncCalcClkSpd)
+	{
+		eError = HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
+				g_sCtrl.ui32SyncMarker,
+				((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)),
+				((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)),
+				g_sCtrl.ui32SyncCalcClkSpd);
+		PVR_LOG_IF_ERROR(eError, "HTBLog");
+	}
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+*/ /**************************************************************************/
+static IMG_BOOL
+_ValidPID( IMG_UINT32 PID )
+{
+	IMG_UINT32 i;
+
+	for (i = 0; i < g_sCtrl.ui32PIDCount; i++)
+	{
+		if ( g_sCtrl.aui32EnablePID[i] == PID )
+		{
+			return IMG_TRUE;
+		}
+	}
+	return IMG_FALSE;
+}
+
+
+/*************************************************************************/ /*!
+ @Function      HTBSyncPartitionMarker
+ @Description   Write an HTB sync partition marker to the HTB log
+
+ @Input         ui33Marker      Marker value
+
+*/ /**************************************************************************/
+void
+HTBSyncPartitionMarker(
+	const IMG_UINT32 ui32Marker
+)
+{
+	g_sCtrl.ui32SyncMarker = ui32Marker;
+	if ( g_hTLStream )
+	{
+		PVRSRV_ERROR eError;
+		IMG_UINT64 ui64Time;
+		OSClockMonotonicns64(&ui64Time);
+
+		/* Else should never be hit as we set the spd when the power state is updated */
+		if (0 != g_sCtrl.ui32SyncCalcClkSpd)
+		{
+			eError = HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
+					ui32Marker,
+					((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)),
+					((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)),
+					g_sCtrl.ui32SyncCalcClkSpd);
+			if (PVRSRV_OK != eError)
+			{
+				PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "HTBLog", PVRSRVGETERRORSTRING(eError), __func__));
+			}
+		}
+	}
+}
+
+/*************************************************************************/ /*!
+ @Function      HTBSyncPartitionMarkerRepeat
+ @Description   Write a HTB sync partition marker to the HTB log, given
+                the previous values to repeat.
+
+ @Input         ui33Marker      Marker value
+ @Input         ui64SyncOSTS    previous OSTS
+ @Input         ui64SyncCRTS    previous CRTS
+ @Input         ui32ClkSpeed    previous Clock speed
+
+*/ /**************************************************************************/
+void
+HTBSyncPartitionMarkerRepeat(
+	const IMG_UINT32 ui32Marker,
+	const IMG_UINT64 ui64SyncOSTS,
+	const IMG_UINT64 ui64SyncCRTS,
+	const IMG_UINT32 ui32ClkSpeed
+)
+{
+	if ( g_hTLStream )
+	{
+		PVRSRV_ERROR eError;
+		IMG_UINT64 ui64Time;
+		OSClockMonotonicns64(&ui64Time);
+
+		/* Else should never be hit as we set the spd when the power state is updated */
+		if (0 != ui32ClkSpeed)
+		{
+			eError = HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
+					ui32Marker,
+					((IMG_UINT32)((ui64SyncOSTS>>32)&0xffffffffU)), ((IMG_UINT32)(ui64SyncOSTS&0xffffffffU)),
+					((IMG_UINT32)((ui64SyncCRTS>>32)&0xffffffffU)), ((IMG_UINT32)(ui64SyncCRTS&0xffffffffU)),
+					ui32ClkSpeed);
+			if (PVRSRV_OK != eError)
+			{
+				PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "HTBLog", PVRSRVGETERRORSTRING(eError), __func__));
+			}
+		}
+	}
+}
+
+/*************************************************************************/ /*!
+ @Function      HTBSyncScale
+ @Description   Write FW-Host synchronisation data to the HTB log when clocks
+                change or are re-calibrated
+
+ @Input         bLogValues      IMG_TRUE if value should be immediately written
+                                out to the log
+
+ @Input         ui32OSTS        OS Timestamp
+
+ @Input         ui32CRTS        Rogue timestamp
+
+ @Input         ui32CalcClkSpd  Calculated clock speed
+
+*/ /**************************************************************************/
+void
+HTBSyncScale(
+	const IMG_BOOL bLogValues,
+	const IMG_UINT64 ui64OSTS,
+	const IMG_UINT64 ui64CRTS,
+	const IMG_UINT32 ui32CalcClkSpd
+)
+{
+	g_sCtrl.ui64SyncOSTS = ui64OSTS;
+	g_sCtrl.ui64SyncCRTS = ui64CRTS;
+	g_sCtrl.ui32SyncCalcClkSpd = ui32CalcClkSpd;
+	if (g_hTLStream && bLogValues)
+	{
+		PVRSRV_ERROR eError;
+		IMG_UINT64 ui64Time;
+		OSClockMonotonicns64(&ui64Time);
+		eError = HTBLog((IMG_HANDLE) NULL, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
+				g_sCtrl.ui32SyncMarker,
+				((IMG_UINT32)((ui64OSTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64OSTS&0xffffffff)),
+				((IMG_UINT32)((ui64CRTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64CRTS&0xffffffff)),
+				ui32CalcClkSpd);
+		/*
+		 * Don't spam the log with non-failure cases
+		 */
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "HTBLog",
+				PVRSRVGETERRORSTRING(eError), __func__));
+		}
+	}
+}
+
+
+/*************************************************************************/ /*!
+ @Function      HTBLogKM
+ @Description   Record a Host Trace Buffer log event
+
+ @Input         PID             The PID of the process the event is associated
+                                with. This is provided as an argument rather
+                                than querying internally so that events associated
+                                with a particular process, but performed by
+                                another can be logged correctly.
+
+ @Input         ui64TimeStamp   The timestamp to be associated with this log event
+
+ @Input         SF              The log event ID
+
+ @Input         ...             Log parameters
+
+ @Return        PVRSRV_OK       Success.
+
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBLogKM(
+		IMG_UINT32 PID,
+		IMG_UINT64 ui64TimeStamp,
+		HTB_LOG_SFids SF,
+		IMG_UINT32 ui32NumArgs,
+		IMG_UINT32 * aui32Args
+)
+{
+	IMG_UINT64 ui64SpinLockFlags;
+	IMG_UINT32 ui32ReturnFlags = 0;
+
+	/* Local snapshot variables of global counters */
+	IMG_UINT64 ui64OSTSSnap;
+	IMG_UINT64 ui64CRTSSnap;
+	IMG_UINT32 ui32ClkSpeedSnap;
+
+	/* format of messages is: SF:PID:TIMEPT1:TIMEPT2:[PARn]*
+	 * Buffer is on the stack so we don't need a semaphore to guard it
+	 */
+	IMG_UINT32 aui32MessageBuffer[HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS];
+
+	/* Min HTB size is HTB_TL_BUFFER_SIZE_MIN : 10000 bytes and Max message/
+	 * packet size is 4*(HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS) = 72 bytes,
+	 * hence with these constraints this design is unlikely to get
+	 * PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED error
+	 */
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT;
+	IMG_UINT32 * pui32Message = aui32MessageBuffer;
+	IMG_UINT32 ui32MessageSize = 4 * (HTB_LOG_HEADER_SIZE+ui32NumArgs);
+
+	eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+	PVR_LOGG_IF_FALSE(ui32NumArgs == HTB_SF_PARAMNUM(SF), "ui32NumArgs invalid", ReturnError);
+	PVR_LOGG_IF_FALSE(!(ui32NumArgs != 0 && aui32Args == NULL), "aui32Args invalid", ReturnError);
+
+	eError = PVRSRV_ERROR_NOT_ENABLED;
+
+	if ( g_hTLStream
+			&& ( 0 == PID || ~0 == PID || HTB_LOGMODE_ALLPID == g_sCtrl.eLogMode || _ValidPID(PID) )
+/*			&& ( g_sCtrl.ui32GroupEnable & (0x1 << HTB_SF_GID(SF)) ) */
+/*			&& ( g_sCtrl.ui32LogLevel >= HTB_SF_LVL(SF) ) */
+			)
+	{
+		*pui32Message++ = SF;
+		*pui32Message++ = PID;
+		*pui32Message++ = ((IMG_UINT32)((ui64TimeStamp>>32)&0xffffffff));
+		*pui32Message++ = ((IMG_UINT32)(ui64TimeStamp&0xffffffff));
+		while ( ui32NumArgs )
+		{
+			ui32NumArgs--;
+			pui32Message[ui32NumArgs] = aui32Args[ui32NumArgs];
+		}
+
+		eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags );
+		while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- )
+		{
+			OSReleaseThreadQuanta();
+			eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags );
+		}
+
+		if ( PVRSRV_OK == eError )
+		{
+			g_sCtrl.bLogDropSignalled = IMG_FALSE;
+		}
+		else if ( PVRSRV_ERROR_STREAM_FULL != eError || !g_sCtrl.bLogDropSignalled )
+		{
+			PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "TLStreamWrite", PVRSRVGETERRORSTRING(eError), __func__));
+		}
+		if ( PVRSRV_ERROR_STREAM_FULL == eError )
+		{
+			g_sCtrl.bLogDropSignalled = IMG_TRUE;
+		}
+
+	}
+
+	if (SF == HTB_SF_CTRL_FWSYNC_MARK_SCALE)
+	{
+		OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, &ui64SpinLockFlags);
+
+		/* If a marker is being placed reset byte count from last marker */
+		g_sCtrl.ui32ByteCount = 0;
+		g_sCtrl.ui64OSTS = (IMG_UINT64)aui32Args[HTB_ARG_OSTS_PT1] << 32 | aui32Args[HTB_ARG_OSTS_PT2];
+		g_sCtrl.ui64CRTS = (IMG_UINT64)aui32Args[HTB_ARG_CRTS_PT1] << 32 | aui32Args[HTB_ARG_CRTS_PT2];
+		g_sCtrl.ui32ClkSpeed = aui32Args[HTB_ARG_CLKSPD];
+
+		OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, ui64SpinLockFlags);
+	}
+	else
+	{
+		OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, &ui64SpinLockFlags);
+		/* Increase global count */
+		g_sCtrl.ui32ByteCount += ui32MessageSize;
+
+		/* Check if packet has overwritten last marker/rpt &&
+		   If the packet count is over half the size of the buffer */
+		if (ui32ReturnFlags & TL_FLAG_OVERWRITE_DETECTED &&
+				 g_sCtrl.ui32ByteCount > HTB_MARKER_PREDICTION_THRESHOLD(g_sCtrl.ui32BufferSize))
+		{
+			/* Take snapshot of global variables */
+			ui64OSTSSnap = g_sCtrl.ui64OSTS;
+			ui64CRTSSnap = g_sCtrl.ui64CRTS;
+			ui32ClkSpeedSnap = g_sCtrl.ui32ClkSpeed;
+			/* Reset global variable counter */
+			g_sCtrl.ui32ByteCount = 0;
+			OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, ui64SpinLockFlags);
+
+			/* Produce a repeat marker */
+			HTBSyncPartitionMarkerRepeat(g_sCtrl.ui32SyncMarker, ui64OSTSSnap, ui64CRTSSnap, ui32ClkSpeedSnap);
+		}
+		else
+		{
+			OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, ui64SpinLockFlags);
+		}
+	}
+
+ReturnError:
+	return eError;
+}
+
+/* EOF */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbserver.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbserver.h
new file mode 100644
index 0000000..e205212
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbserver.h
@@ -0,0 +1,237 @@
+/*************************************************************************/ /*!
+@File           htbserver.h
+@Title          Host Trace Buffer server implementation.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+
+@Description    Host Trace Buffer provides a mechanism to log Host events to a
+                buffer in a similar way to the Firmware Trace mechanism.
+                Host Trace Buffer logs data using a Transport Layer buffer.
+                The Transport Layer and pvrtld tool provides the mechanism to
+                retrieve the trace data.
+
+                A Host Trace can be merged with a corresponding Firmware Trace.
+                This is achieved by inserting synchronisation data into both
+                traces and post processing to merge them.
+
+                The FW Trace will contain a "Sync Partition Marker". This is
+                updated every time the RGX is brought out of reset (RGX clock
+                timestamps reset at this point) and is repeated when the FW
+                Trace buffer wraps to ensure there is always at least 1
+                partition marker in the Firmware Trace buffer whenever it is
+                read.
+
+                The Host Trace will contain corresponding "Sync Partition
+                Markers" - #HTBSyncPartitionMarker(). Each partition is then
+                subdivided into "Sync Scale" sections - #HTBSyncScale(). The
+                "Sync Scale" data allows the timestamps from the two traces to
+                be correlated. The "Sync Scale" data is updated as part of the
+                standard RGX time correlation code (rgxtimecorr.c) and is
+                updated periodically including on power and clock changes.
+
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __HTBSERVER_H__
+#define __HTBSERVER_H__
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv.h"
+#include "htbuffer.h"
+
+
+/************************************************************************/ /*!
+ @Function      HTBIDeviceCreate
+ @Description   Initialisation actions for HTB at device creation.
+
+ @Input         psDeviceNode    Reference to the device node in context
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBDeviceCreate(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/************************************************************************/ /*!
+ @Function      HTBIDeviceDestroy
+ @Description   De-initialisation actions for HTB at device destruction.
+
+ @Input         psDeviceNode    Reference to the device node in context
+
+*/ /**************************************************************************/
+void
+HTBDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/************************************************************************/ /*!
+ @Function      HTBInit
+ @Description   Initialise the Host Trace Buffer and allocate all resources
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBInit(void);
+
+/************************************************************************/ /*!
+ @Function      HTBDeInit
+ @Description   Close the Host Trace Buffer and free all resources
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBDeInit(void);
+
+/*************************************************************************/ /*!
+ @Function      HTBConfigureKM
+ @Description   Configure or update the configuration of the Host Trace Buffer
+
+ @Input         ui32NameSize    Size of the pszName string
+
+ @Input         pszName         Name to use for the underlying data buffer
+
+ @Input         ui32BufferSize  Size of the underlying data buffer
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBConfigureKM(IMG_UINT32 ui32NameSize, const IMG_CHAR * pszName,
+			   const IMG_UINT32 ui32BufferSize);
+
+
+/*************************************************************************/ /*!
+ @Function      HTBControlKM
+ @Description   Update the configuration of the Host Trace Buffer
+
+ @Input         ui32NumFlagGroups Number of group enable flags words
+
+ @Input         aui32GroupEnable  Flags words controlling groups to be logged
+
+ @Input         ui32LogLevel    Log level to record
+
+ @Input         ui32EnablePID   PID to enable logging for a specific process
+
+ @Input         eLogMode        Enable logging for all or specific processes,
+
+ @Input         eOpMode         Control the behaviour of the data buffer
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBControlKM(const IMG_UINT32 ui32NumFlagGroups,
+			 const IMG_UINT32 *aui32GroupEnable,
+			 const IMG_UINT32 ui32LogLevel,
+			 const IMG_UINT32 ui32EnablePID,
+			 const HTB_LOGMODE_CTRL eLogMode,
+			 const HTB_OPMODE_CTRL eOpMode);
+
+
+/*************************************************************************/ /*!
+ @Function      HTBSyncPartitionMarker
+ @Description   Write an HTB sync partition marker to the HTB log
+
+ @Input         ui32Marker      Marker value
+
+*/ /**************************************************************************/
+void
+HTBSyncPartitionMarker(const IMG_UINT32 ui32Marker);
+
+/*************************************************************************/ /*!
+ @Function      HTBSyncPartitionMarkerRpt
+ @Description   Write a HTB sync partition marker to the HTB log, given
+                the previous values to repeat.
+
+ @Input         ui32Marker      Marker value
+ @Input         ui64SyncOSTS    previous OSTS
+ @Input         ui64SyncCRTS    previous CRTS
+ @Input         ui32ClkSpeed    previous Clockspeed
+
+*/ /**************************************************************************/
+void
+HTBSyncPartitionMarkerRepeat(const IMG_UINT32 ui32Marker,
+							 const IMG_UINT64 ui64SyncOSTS,
+							 const IMG_UINT64 ui64SyncCRTS,
+							 const IMG_UINT32 ui32ClkSpeed);
+
+/*************************************************************************/ /*!
+ @Function      HTBSyncScale
+ @Description   Write FW-Host synchronisation data to the HTB log when clocks
+                change or are re-calibrated
+
+ @Input         bLogValues      IMG_TRUE if value should be immediately written
+                                out to the log
+
+ @Input         ui64OSTS        OS Timestamp
+
+ @Input         ui64CRTS        Rogue timestamp
+
+ @Input         ui32CalcClkSpd  Calculated clock speed
+
+*/ /**************************************************************************/
+void
+HTBSyncScale(const IMG_BOOL bLogValues, const IMG_UINT64 ui64OSTS,
+			 const IMG_UINT64 ui64CRTS, const IMG_UINT32 ui32CalcClkSpd);
+
+/*************************************************************************/ /*!
+ @Function      HTBLogKM
+ @Description   Record a Host Trace Buffer log event
+
+ @Input         PID             The PID of the process the event is associated
+                                with. This is provided as an argument rather
+                                than querying internally so that events associated
+                                with a particular process, but performed by
+                                another can be logged correctly.
+
+ @Input         ui64TimeStamp   The timestamp to be associated with this log event
+
+ @Input         SF              The log event ID
+
+ @Input         ...             Log parameters
+
+ @Return        PVRSRV_OK       Success.
+
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBLogKM(IMG_UINT32 PID, IMG_UINT64 ui64TimeStamp, HTB_LOG_SFids SF,
+		 IMG_UINT32 ui32NumArgs, IMG_UINT32 *aui32Args);
+
+#endif /* __HTBSERVER_H__ */
+
+/* EOF */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbuffer.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbuffer.c
new file mode 100644
index 0000000..3bd211c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbuffer.c
@@ -0,0 +1,197 @@
+/*************************************************************************/ /*!
+@File			htbuffer.c
+@Title          Host Trace Buffer shared API.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Host Trace Buffer provides a mechanism to log Host events to a
+				buffer in a similar way to the Firmware Trace mechanism.
+				Host Trace Buffer logs data using a Transport Layer buffer.
+				The Transport Layer and pvrtld tool provides the mechanism to
+				retrieve the trace data.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stdarg.h>
+#include "htbuffer.h"
+//#include "allocmem.h"
+#include "osfunc.h"
+#include "client_htbuffer_bridge.h"
+#if defined(__KERNEL__)
+//#include "osfunc.h"
+#endif
+
+/* the group flags array of ints large enough to store all the group flags
+ * NB: This will only work while all logging is in the kernel
+ */
+IMG_INTERNAL HTB_FLAG_EL_T g_auiHTBGroupEnable[HTB_FLAG_NUM_EL] = {0};
+
+/*************************************************************************/ /*!
+ @Function      HTBControl
+ @Description   Update the configuration of the Host Trace Buffer
+
+ @Input         hSrvHandle      Server Handle
+
+ @Input         ui32NumFlagGroups Number of group enable flags words
+
+ @Input         aui32GroupEnable  Flags words controlling groups to be logged
+
+ @Input         ui32LogLevel    Log level to record
+
+ @Input         ui32EnablePID   PID to enable logging for a specific process
+
+ @Input         eLogPidMode     Enable logging for all or specific processes,
+
+ @Input         eOpMode         Control what trace data is dropped if the TL
+                                buffer is full
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBControl(
+	IMG_HANDLE hSrvHandle,
+	IMG_UINT32 ui32NumFlagGroups,
+	IMG_UINT32 * aui32GroupEnable,
+	IMG_UINT32 ui32LogLevel,
+	IMG_UINT32 ui32EnablePID,
+	HTB_LOGMODE_CTRL eLogPidMode,
+	HTB_OPMODE_CTRL eOpMode
+)
+{
+	return BridgeHTBControl(
+			hSrvHandle,
+			ui32NumFlagGroups,
+			aui32GroupEnable,
+			ui32LogLevel,
+			ui32EnablePID,
+			eLogPidMode,
+			eOpMode
+			);
+}
+
+
+/*************************************************************************/ /*!
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT64 ui64TimeStampus, HTB_LOG_SFids SF, va_list args)
+{
+#if defined(__KERNEL__)
+	IMG_UINT32 i;
+	IMG_UINT32 ui32NumArgs = HTB_SF_PARAMNUM(SF);
+	IMG_UINT32 aui32Args[HTB_LOG_MAX_PARAMS];
+
+	PVR_ASSERT(ui32NumArgs <= HTB_LOG_MAX_PARAMS);
+	ui32NumArgs = (ui32NumArgs>HTB_LOG_MAX_PARAMS)? HTB_LOG_MAX_PARAMS: ui32NumArgs;
+
+	/* unpack var args before sending over bridge */
+	for (i=0; i<ui32NumArgs; i++)
+	{
+		aui32Args[i] = va_arg(args, IMG_UINT32);
+	}
+
+	return BridgeHTBLog(hSrvHandle, PID, ui64TimeStampus, SF, ui32NumArgs, aui32Args);
+#else
+	PVR_UNREFERENCED_PARAMETER(hSrvHandle);
+	PVR_UNREFERENCED_PARAMETER(PID);
+	PVR_UNREFERENCED_PARAMETER(ui64TimeStampus);
+	PVR_UNREFERENCED_PARAMETER(SF);
+	PVR_UNREFERENCED_PARAMETER(args);
+
+	PVR_ASSERT(0=="HTB Logging in UM is not yet supported");
+	return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+}
+
+
+/*************************************************************************/ /*!
+ @Function      HTBLog
+ @Description   Record a Host Trace Buffer log event
+
+ @Input         PID			    The PID of the process the event is associated
+								with. This is provided as an argument rather
+								than querying internally so that events associated
+								with a particular process, but performed by
+								another can be logged correctly.
+
+ @Input			ui64TimeStampus	The timestamp to be associated with this log event
+
+ @Input         SF    			The log event ID
+
+ @Input			...				Log parameters
+
+ @Return        PVRSRV_OK       Success.
+
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT64 ui64TimeStampns, IMG_UINT32 SF, ...)
+{
+	PVRSRV_ERROR eError;
+	va_list args;
+	va_start(args, SF);
+	eError =_HTBLog(hSrvHandle, PID, ui64TimeStampns, SF, args);
+	va_end(args);
+	return eError;
+}
+
+
+/*************************************************************************/ /*!
+ @Function      HTBLogSimple
+ @Description   Record a Host Trace Buffer log event with implicit PID and Timestamp
+
+ @Input         SF              The log event ID
+
+ @Input         ...             Log parameters
+
+ @Return        PVRSRV_OK       Success.
+
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLogSimple(IMG_HANDLE hSrvHandle, IMG_UINT32 SF, ...)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT64 ui64Timestamp;
+	va_list args;
+	va_start(args, SF);
+	OSClockMonotonicns64(&ui64Timestamp);
+	eError = _HTBLog(hSrvHandle, OSGetCurrentProcessID(), ui64Timestamp, SF, args);
+	va_end(args);
+	return eError;
+}
+
+
+/* EOF */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbuffer.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbuffer.h
new file mode 100644
index 0000000..65e3506
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbuffer.h
@@ -0,0 +1,134 @@
+/*************************************************************************/ /*!
+@File           htbuffer.h
+@Title          Host Trace Buffer shared API.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Host Trace Buffer provides a mechanism to log Host events to a
+                buffer in a similar way to the Firmware Trace mechanism.
+                Host Trace Buffer logs data using a Transport Layer buffer.
+                The Transport Layer and pvrtld tool provides the mechanism to
+                retrieve the trace data.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __HTBUFFER_H__
+#define __HTBUFFER_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "htbuffer_sf.h"
+#include "htbuffer_types.h"
+#include "htbuffer_init.h"
+
+#if defined(__KERNEL__)
+#define HTBLOGK(SF, args...) do { if (HTB_GROUP_ENABLED(SF)) HTBLogSimple((IMG_HANDLE) NULL, SF, ## args); } while (0)
+
+/* Host Trace Buffer name */
+#define HTB_STREAM_NAME	"PVRHTBuffer"
+
+#else
+#define HTBLOG(handle, SF, args...) do { if (HTB_GROUP_ENABLED(SF)) HTBLogSimple(handle, SF, ## args); } while (0)
+#endif
+
+/* macros to cast 64 or 32-bit pointers into 32-bit integer components for Host Trace */
+#define HTBLOG_PTR_BITS_HIGH(p) ((IMG_UINT32)((((IMG_UINT64)((uintptr_t)p))>>32)&0xffffffff))
+#define HTBLOG_PTR_BITS_LOW(p)  ((IMG_UINT32)(((IMG_UINT64)((uintptr_t)p))&0xffffffff))
+
+/* macros to cast 64-bit integers into 32-bit integer components for Host Trace */
+#define HTBLOG_U64_BITS_HIGH(u) ((IMG_UINT32)((u>>32)&0xffffffff))
+#define HTBLOG_U64_BITS_LOW(u)  ((IMG_UINT32)(u&0xffffffff))
+
+/*************************************************************************/ /*!
+ @Function      HTBLog
+ @Description   Record a Host Trace Buffer log event
+
+ @Input         PID	            The PID of the process the event is associated
+                                with. This is provided as an argument rather
+                                than querying internally so that events associated
+                                with a particular process, but performed by
+                                another can be logged correctly.
+
+ @Input         TimeStampus     The timestamp in us for this event
+
+ @Input         SF              The log event ID
+
+ @Input         ...             Log parameters
+
+ @Return        PVRSRV_OK       Success.
+
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT64 ui64TimeStampns, IMG_UINT32 SF, ...);
+
+
+/*************************************************************************/ /*!
+ @Function      HTBLogSimple
+ @Description   Record a Host Trace Buffer log event with implicit PID and Timestamp
+
+ @Input         SF              The log event ID
+
+ @Input         ...             Log parameters
+
+ @Return        PVRSRV_OK       Success.
+
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLogSimple(IMG_HANDLE hSrvHandle, IMG_UINT32 SF, ...);
+
+
+
+/*  DEBUG log group enable */
+#if !defined(HTB_DEBUG_LOG_GROUP)
+#undef HTB_LOG_TYPE_DBG    /* No trace statements in this log group should be checked in */
+#define HTB_LOG_TYPE_DBG    __BUILDERROR__
+#endif
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __HTBUFFER_H__ */
+/*****************************************************************************
+ End of file (htbuffer.h)
+*****************************************************************************/
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbuffer_init.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbuffer_init.h
new file mode 100644
index 0000000..4df9446
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbuffer_init.h
@@ -0,0 +1,116 @@
+/*************************************************************************/ /*!
+@File           htbuffer_init.h
+@Title          Host Trace Buffer functions needed for Services initialisation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __HTBUFFER_INIT_H__
+#define __HTBUFFER_INIT_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_types.h"
+#include "img_defs.h"
+
+/*************************************************************************/ /*!
+ @Function      HTBConfigure
+ @Description   Configure the Host Trace Buffer.
+                Once these parameters are set they may not be changed
+
+ @Input         hSrvHandle      Server Handle
+
+ @Input         pszBufferName   Name to use for the TL buffer, this will be
+                                required to request trace data from the TL
+
+ @Input         ui32BufferSize  Requested TL buffer size in bytes
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBConfigure(
+	IMG_HANDLE hSrvHandle,
+	IMG_CHAR * pszBufferName,
+	IMG_UINT32 ui32BufferSize
+);
+
+/*************************************************************************/ /*!
+ @Function      HTBControl
+ @Description   Update the configuration of the Host Trace Buffer
+
+ @Input         hSrvHandle      Server Handle
+
+ @Input         ui32NumFlagGroups Number of group enable flags words
+
+ @Input         aui32GroupEnable  Flags words controlling groups to be logged
+
+ @Input         ui32LogLevel    Log level to record
+
+ @Input         ui32EnablePID   PID to enable logging for a specific process
+
+ @Input         eLogMode        Enable logging for all or specific processes,
+
+ @Input         eOpMode         Control what trace data is dropped if the TL
+                                buffer is full
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBControl(
+	IMG_HANDLE hSrvHandle,
+	IMG_UINT32 ui32NumFlagGroups,
+	IMG_UINT32 * aui32GroupEnable,
+	IMG_UINT32 ui32LogLevel,
+	IMG_UINT32 ui32EnablePID,
+	HTB_LOGMODE_CTRL eLogMode,
+	HTB_OPMODE_CTRL eOpMode
+);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __HTBUFFER_INIT_H__ */
+/*****************************************************************************
+ End of file (htbuffer_init.h)
+*****************************************************************************/
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbuffer_sf.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbuffer_sf.h
new file mode 100644
index 0000000..4f86d63
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbuffer_sf.h
@@ -0,0 +1,236 @@
+/*************************************************************************/ /*!
+@File           htbuffer_sf.h
+@Title          Host Trace Buffer interface string format specifiers
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the Host Trace Buffer logging messages. The following
+                list are the messages the host driver prints. Changing anything
+				but the first column or spelling mistakes in the strings will
+                break compatibility with log files created with older/newer
+                driver versions.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __HTBUFFER_SF_H__
+#define __HTBUFFER_SF_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*****************************************************************************
+ * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you
+ *          WILL BREAK host tracing message compatibility with previous
+ *          driver versions. Only add new ones, if so required.
+ ****************************************************************************/
+
+
+/* String used in pvrdebug -h output */
+#define HTB_LOG_GROUPS_STRING_LIST   "ctrl,mmu,sync,main,brg"
+
+/* Used in print statements to display log group state, one %s per group defined */
+#define HTB_LOG_ENABLED_GROUPS_LIST_PFSPEC  "%s%s%s%s%s"
+
+/* Available log groups - Master template
+ *
+ * Group usage is as follows:
+ *    CTRL  - Internal Host Trace information and synchronisation data
+ *    MMU   - MMU page mapping information
+ *    SYNC  - Synchronisation debug
+ *    MAIN  - Data master kicks, etc. tying in with the MAIN group in FWTrace
+ *    DBG   - Temporary debugging group, logs not to be left in the driver
+ *
+ */
+#define HTB_LOG_SFGROUPLIST                               \
+	X( HTB_GROUP_NONE,     NONE  )                        \
+/*     gid,                group flag / apphint name */   \
+	X( HTB_GROUP_CTRL,     CTRL  )                        \
+	X( HTB_GROUP_MMU,      MMU   )                        \
+	X( HTB_GROUP_SYNC,     SYNC  )                        \
+	X( HTB_GROUP_MAIN,     MAIN  )                        \
+	X( HTB_GROUP_BRG,      BRG  )                         \
+/* Debug group HTB_GROUP_DBG must always be last */       \
+	X( HTB_GROUP_DBG,      DBG   )
+
+
+/* Table of String Format specifiers, the group they belong and the number of
+ * arguments each expects. Xmacro styled macros are used to generate what is
+ * needed without requiring hand editing.
+ *
+ * id		: unique id within a group
+ * gid		: group id as defined above
+ * sym name	: symbolic name of enumerations used to identify message strings
+ * string	: Actual string
+ * #args	: number of arguments the string format requires
+ */
+#define HTB_LOG_SFIDLIST \
+/*id,  gid,             sym name,                       string,                           # arguments */ \
+X( 0,  HTB_GROUP_NONE,  HTB_SF_FIRST,                   "You should not use this string", 0) \
+\
+X( 1,  HTB_GROUP_CTRL,  HTB_SF_CTRL_LOGMODE,            "HTB log mode set to %d (1- all PID, 2 - restricted PID)\n", 1) \
+X( 2,  HTB_GROUP_CTRL,  HTB_SF_CTRL_ENABLE_PID,         "HTB enable logging for PID %d\n", 1) \
+X( 3,  HTB_GROUP_CTRL,  HTB_SF_CTRL_ENABLE_GROUP,       "HTB enable logging groups 0x%08x\n", 1) \
+X( 4,  HTB_GROUP_CTRL,  HTB_SF_CTRL_LOG_LEVEL,          "HTB log level set to %d\n", 1) \
+X( 5,  HTB_GROUP_CTRL,  HTB_SF_CTRL_OPMODE,             "HTB operating mode set to %d (1 - droplatest, 2 - drop oldest, 3 - block)\n", 1) \
+X( 6,  HTB_GROUP_CTRL,  HTB_SF_CTRL_FWSYNC_SCALE,       "HTBFWSync OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \
+X( 7,  HTB_GROUP_CTRL,  HTB_SF_CTRL_FWSYNC_SCALE_RPT,   "FW Sync scale info OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \
+X( 8,  HTB_GROUP_CTRL,  HTB_SF_CTRL_FWSYNC_MARK,        "FW Sync Partition marker: %d\n", 1) \
+X( 9,  HTB_GROUP_CTRL,  HTB_SF_CTRL_FWSYNC_MARK_RPT,    "FW Sync Partition repeat: %d\n", 1) \
+X( 10, HTB_GROUP_CTRL,  HTB_SF_CTRL_FWSYNC_MARK_SCALE,  "Text not used", 6)\
+\
+X( 1,  HTB_GROUP_MMU,   HTB_SF_MMU_PAGE_OP_TABLE,       "MMU page op table entry page_id=%08x%08x index=%d level=%d val=%08x%08x map=%d\n", 7) \
+X( 2,  HTB_GROUP_MMU,   HTB_SF_MMU_PAGE_OP_ALLOC,       "MMU allocating DevVAddr from %08x%08x to %08x%08x\n", 4) \
+X( 3,  HTB_GROUP_MMU,   HTB_SF_MMU_PAGE_OP_FREE,        "MMU freeing DevVAddr from %08x%08x to %08x%08x\n", 4) \
+X( 4,  HTB_GROUP_MMU,   HTB_SF_MMU_PAGE_OP_MAP,         "MMU mapping DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \
+X( 5,  HTB_GROUP_MMU,   HTB_SF_MMU_PAGE_OP_PMRMAP,      "MMU mapping PMR DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \
+X( 6,  HTB_GROUP_MMU,   HTB_SF_MMU_PAGE_OP_UNMAP,       "MMU unmapping DevVAddr %08x%08x\n", 2) \
+\
+X( 1,  HTB_GROUP_SYNC,  HTB_SF_SYNC_SERVER_ALLOC,       "Server sync allocation [%08X]\n", 1) \
+X( 2,  HTB_GROUP_SYNC,  HTB_SF_SYNC_SERVER_UNREF,       "Server sync unreferenced [%08X]\n", 1) \
+X( 3,  HTB_GROUP_SYNC,  HTB_SF_SYNC_PRIM_OP_CREATE,     "Sync OP create 0x%08x, block count=%d, server syncs=%d, client syncs=%d\n", 4) \
+X( 4,  HTB_GROUP_SYNC,  HTB_SF_SYNC_PRIM_OP_TAKE,       "Sync OP take 0x%08x server syncs=%d, client syncs=%d\n", 3) \
+X( 5,  HTB_GROUP_SYNC,  HTB_SF_SYNC_PRIM_OP_COMPLETE,   "Sync OP complete 0x%08x\n", 1) \
+X( 6,  HTB_GROUP_SYNC,  HTB_SF_SYNC_PRIM_OP_DESTROY,    "Sync OP destroy 0x%08x\n", 1) \
+\
+X( 1,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_TA,            "Kick TA: FWCtx %08X @ %d\n", 2) \
+X( 2,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_3D,            "Kick 3D: FWCtx %08X @ %d\n", 2) \
+X( 3,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_CDM,           "Kick CDM: FWCtx %08X @ %d\n", 2) \
+X( 4,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_RTU,           "Kick RTU: FWCtx %08X @ %d\n", 2) \
+X( 5,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_SHG,           "Kick SHG: FWCtx %08X @ %d\n", 2) \
+X( 6,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_2D,            "Kick 2D: FWCtx %08X @ %d\n", 2) \
+X( 7,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_UNCOUNTED,     "Kick (uncounted) for all DMs\n", 0) \
+X( 8,  HTB_GROUP_MAIN,  HTB_SF_MAIN_FWCCB_CMD,          "FW CCB Cmd: %d\n", 1) \
+X( 9,  HTB_GROUP_MAIN,  HTB_SF_MAIN_PRE_POWER,		"Pre-power duration @ phase [%d] (0-shutdown,1-startup) RGX: %llu ns SYS: %llu ns\n", 3) \
+X(10,  HTB_GROUP_MAIN,  HTB_SF_MAIN_POST_POWER,		"Post-power duration @ phase [%d] (0-shutdown,1-startup) SYS: %llu ns RGX: %llu ns\n", 3) \
+\
+X( 1,  HTB_GROUP_BRG,   HTB_SF_BRG_BRIDGE_CALL,         "Bridge call: start: %010u: bid %03d fid %d\n", 3) \
+X( 2,  HTB_GROUP_BRG,   HTB_SF_BRG_BRIDGE_CALL_ERR,     "Bridge call: start: %010u: bid %03d fid %d error %d\n", 4) \
+\
+X( 1,  HTB_GROUP_DBG,   HTB_SF_DBG_INTPAIR,             "0x%8.8x 0x%8.8x\n", 2) \
+\
+X( 65535, HTB_GROUP_NONE, HTB_SF_LAST,                  "You should not use this string\n", 15)
+
+
+
+/* gid - Group numbers */
+typedef enum _HTB_LOG_SFGROUPS {
+#define X(A,B) A,
+	HTB_LOG_SFGROUPLIST
+#undef X
+} HTB_LOG_SFGROUPS;
+
+
+/* group flags are stored in an array of elements */
+/* each of which have a certain number of bits */
+#define HTB_FLAG_EL_T                   IMG_UINT32
+#define HTB_FLAG_NUM_BITS_IN_EL         ( sizeof(HTB_FLAG_EL_T) * 8 )
+
+#define HTB_LOG_GROUP_FLAG_GROUP(gid)   ( (gid-1) / HTB_FLAG_NUM_BITS_IN_EL )
+#define HTB_LOG_GROUP_FLAG(gid)         (gid? (0x1 << ((gid-1)%HTB_FLAG_NUM_BITS_IN_EL)): 0)
+#define HTB_LOG_GROUP_FLAG_NAME(gid)    HTB_LOG_TYPE_ ## gid
+
+/* group enable flags */
+typedef enum _HTB_LOG_TYPE {
+#define X(a, b) HTB_LOG_GROUP_FLAG_NAME(b) = HTB_LOG_GROUP_FLAG(a),
+	HTB_LOG_SFGROUPLIST
+#undef X
+} HTB_LOG_TYPE;
+
+
+
+/*  The symbolic names found in the table above are assigned an ui32 value of
+ *  the following format:
+ *  31 30 28 27       20   19  16    15  12      11            0   bits
+ *  -   ---   ---- ----     ----      ----        ---- ---- ----
+ *     0-11: id number
+ *    12-15: group id number
+ *    16-19: number of parameters
+ *    20-27: unused
+ *    28-30: active: identify SF packet, otherwise regular int32
+ *       31: reserved for signed/unsigned compatibility
+ *
+ *   The following macro assigns those values to the enum generated SF ids list.
+ */
+#define HTB_LOG_IDMARKER            (0x70000000)
+#define HTB_LOG_CREATESFID(a,b,e)   (((a) | (b << 12) | (e << 16)) | HTB_LOG_IDMARKER)
+
+#define HTB_LOG_IDMASK              (0xFFF00000)
+#define HTB_LOG_VALIDID(I)          ( ((I) & HTB_LOG_IDMASK) == HTB_LOG_IDMARKER )
+
+typedef enum HTB_LOG_SFids {
+#define X(a, b, c, d, e) c = HTB_LOG_CREATESFID(a,b,e),
+	HTB_LOG_SFIDLIST
+#undef X
+} HTB_LOG_SFids;
+
+/* Return the group id that the given (enum generated) id belongs to */
+#define HTB_SF_GID(x) (((x)>>12) & 0xf)
+/* future improvement to support log levels */
+#define HTB_SF_LVL(x) (0)
+/* Returns how many arguments the SF(string format) for the given (enum generated) id requires */
+#define HTB_SF_PARAMNUM(x) (((x)>>16) & 0xf)
+/* Returns the id of given enum */
+#define HTB_SF_ID(x) (x & 0xfff)
+
+/* format of messages is: SF:PID:TIMEPT1:TIMEPT2:[PARn]*
+ */
+#define HTB_LOG_HEADER_SIZE         4
+#define HTB_LOG_MAX_PARAMS          15
+
+#if defined (__cplusplus)
+}
+#endif
+
+/* Defines for handling MARK_SCALE special case */
+#define HTB_GID_CTRL 1
+#define HTB_ID_MARK_SCALE 10
+#define HTB_MARK_SCALE_ARG_ARRAY_SIZE 6
+
+/* Defines for extracting args from array for special case MARK_SCALE */
+#define HTB_ARG_SYNCMARK 0
+#define HTB_ARG_OSTS_PT1 1
+#define HTB_ARG_OSTS_PT2 2
+#define HTB_ARG_CRTS_PT1 3
+#define HTB_ARG_CRTS_PT2 4
+#define HTB_ARG_CLKSPD   5
+
+#endif /* __HTBUFFER_SF_H__ */
+/*****************************************************************************
+ End of file (htbuffer_sf.h)
+*****************************************************************************/
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbuffer_types.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbuffer_types.h
new file mode 100644
index 0000000..c4f19b3e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/htbuffer_types.h
@@ -0,0 +1,124 @@
+/*************************************************************************/ /*!
+@File           htbuffer_types.h
+@Title          Host Trace Buffer types.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Host Trace Buffer provides a mechanism to log Host events to a
+                buffer in a similar way to the Firmware Trace mechanism.
+                Host Trace Buffer logs data using a Transport Layer buffer.
+                The Transport Layer and pvrtld tool provides the mechanism to
+                retrieve the trace data.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __HTBUFFER_TYPES_H__
+#define __HTBUFFER_TYPES_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+#include "htbuffer_sf.h"
+
+/* the group flags array of ints large enough to store all the group flags */
+#define HTB_FLAG_NUM_EL ( ((HTB_GROUP_DBG-1) / HTB_FLAG_NUM_BITS_IN_EL) + 1 )
+extern IMG_INTERNAL HTB_FLAG_EL_T g_auiHTBGroupEnable[HTB_FLAG_NUM_EL];
+
+#define HTB_GROUP_ENABLED(SF) (g_auiHTBGroupEnable[HTB_LOG_GROUP_FLAG_GROUP(HTB_SF_GID(SF))] & HTB_LOG_GROUP_FLAG(HTB_SF_GID(SF)))
+
+/*************************************************************************/ /*!
+ Host Trace Buffer operation mode
+ Care must be taken if changing this enum to ensure the MapFlags[] array
+ in htbserver.c is kept in-step.
+*/ /**************************************************************************/
+typedef enum
+{
+	/*! Undefined operation mode */
+	HTB_OPMODE_UNDEF = 0,
+
+	/*! Drop latest, intended for continuous logging to a UM daemon.
+	 *  If the daemon does not keep up, the most recent log data
+	 *  will be dropped
+	 */
+	HTB_OPMODE_DROPLATEST,
+
+	/*! Drop oldest, intended for crash logging.
+	 *  Data will be continuously written to a circular buffer.
+	 *  After a crash the buffer will contain events leading up to the crash
+	 */
+	HTB_OPMODE_DROPOLDEST,
+
+	/*! Block write if buffer is full
+	 */
+	HTB_OPMODE_BLOCK,
+
+	HTB_OPMODE_LAST = HTB_OPMODE_BLOCK
+} HTB_OPMODE_CTRL;
+
+
+/*************************************************************************/ /*!
+ Host Trace Buffer log mode control
+*/ /**************************************************************************/
+typedef enum
+{
+	/*! Undefined log mode, used if update is not applied */
+	HTB_LOGMODE_UNDEF = 0,
+
+	/*! Log trace messages for all PIDs.
+	 */
+	HTB_LOGMODE_ALLPID,
+
+	/*! Log trace messages for specific PIDs only.
+	 */
+	HTB_LOGMODE_RESTRICTEDPID,
+
+	HTB_LOGMODE_LAST = HTB_LOGMODE_RESTRICTEDPID
+} HTB_LOGMODE_CTRL;
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __HTBUFFER_TYPES_H__ */
+
+/*****************************************************************************
+ End of file (htbuffer.h)
+*****************************************************************************/
+
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/img_3dtypes.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/img_3dtypes.h
new file mode 100644
index 0000000..2b8ad7b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/img_3dtypes.h
@@ -0,0 +1,248 @@
+/*************************************************************************/ /*!
+@File
+@Title          Global 3D types for use by IMG APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines 3D types for use by IMG APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __IMG_3DTYPES_H__
+#define __IMG_3DTYPES_H__
+
+#include <powervr/buffer_attribs.h>
+#include "img_types.h"
+#include "img_defs.h"
+
+/**
+ * Comparison functions
+ * This comparison function is defined as:
+ * A {CmpFunc} B
+ * A is a reference value, e.g., incoming depth etc.
+ * B is the sample value, e.g., value in depth buffer.
+ */
+typedef enum _IMG_COMPFUNC_
+{
+	IMG_COMPFUNC_NEVER,			/**< The comparison never succeeds */
+	IMG_COMPFUNC_LESS,			/**< The comparison is a less-than operation */
+	IMG_COMPFUNC_EQUAL,			/**< The comparison is an equal-to operation */
+	IMG_COMPFUNC_LESS_EQUAL,	/**< The comparison is a less-than or equal-to
+									 operation */
+	IMG_COMPFUNC_GREATER,		/**< The comparison is a greater-than operation
+								*/
+	IMG_COMPFUNC_NOT_EQUAL,		/**< The comparison is a no-equal-to operation
+								*/
+	IMG_COMPFUNC_GREATER_EQUAL,	/**< The comparison is a greater-than or
+									 equal-to operation */
+	IMG_COMPFUNC_ALWAYS,		/**< The comparison always succeeds */
+} IMG_COMPFUNC;
+
+/**
+ * Stencil op functions
+ */
+typedef enum _IMG_STENCILOP_
+{
+	IMG_STENCILOP_KEEP,		/**< Keep original value */
+	IMG_STENCILOP_ZERO,		/**< Set stencil to 0 */
+	IMG_STENCILOP_REPLACE,	/**< Replace stencil entry */
+	IMG_STENCILOP_INCR_SAT,	/**< Increment stencil entry, clamping to max */
+	IMG_STENCILOP_DECR_SAT,	/**< Decrement stencil entry, clamping to zero */
+	IMG_STENCILOP_INVERT,	/**< Invert bits in stencil entry */
+	IMG_STENCILOP_INCR,		/**< Increment stencil entry,
+								 wrapping if necessary */
+	IMG_STENCILOP_DECR,		/**< Decrement stencil entry,
+								 wrapping if necessary */
+} IMG_STENCILOP;
+
+/**
+ * Alpha blending allows colours and textures on one surface
+ * to be blended with transparency onto another surface.
+ * These definitions apply to both source and destination blending
+ * states
+ */
+typedef enum _IMG_BLEND_
+{
+	IMG_BLEND_ZERO = 0,        /**< Blend factor is (0,0,0,0) */
+	IMG_BLEND_ONE,             /**< Blend factor is (1,1,1,1) */
+	IMG_BLEND_SRC_COLOUR,      /**< Blend factor is the source colour */
+	IMG_BLEND_INV_SRC_COLOUR,  /**< Blend factor is the inverted source colour
+									(i.e. 1-src_col) */
+	IMG_BLEND_SRC_ALPHA,       /**< Blend factor is the source alpha */
+	IMG_BLEND_INV_SRC_ALPHA,   /**< Blend factor is the inverted source alpha
+									(i.e. 1-src_alpha) */
+	IMG_BLEND_DEST_ALPHA,      /**< Blend factor is the destination alpha */
+	IMG_BLEND_INV_DEST_ALPHA,  /**< Blend factor is the inverted destination
+									alpha */
+	IMG_BLEND_DEST_COLOUR,     /**< Blend factor is the destination colour */
+	IMG_BLEND_INV_DEST_COLOUR, /**< Blend factor is the inverted destination
+									colour */
+	IMG_BLEND_SRC_ALPHASAT,    /**< Blend factor is the alpha saturation (the
+									minimum of (Src alpha,
+									1 - destination alpha)) */
+	IMG_BLEND_BLEND_FACTOR,    /**< Blend factor is a constant */
+	IMG_BLEND_INVBLEND_FACTOR, /**< Blend factor is a constant (inverted)*/
+	IMG_BLEND_SRC1_COLOUR,     /**< Blend factor is the colour outputted from
+									the pixel shader */
+	IMG_BLEND_INV_SRC1_COLOUR, /**< Blend factor is the inverted colour
+									outputted from the pixel shader */
+	IMG_BLEND_SRC1_ALPHA,      /**< Blend factor is the alpha outputted from
+									the pixel shader */
+	IMG_BLEND_INV_SRC1_ALPHA   /**< Blend factor is the inverted alpha
+									outputted from the pixel shader */
+} IMG_BLEND;
+
+/**
+ * The arithmetic operation to perform when blending
+ */
+typedef enum _IMG_BLENDOP_
+{
+	IMG_BLENDOP_ADD = 0,      /**< Result = (Source + Destination) */
+	IMG_BLENDOP_SUBTRACT,     /**< Result = (Source - Destination) */
+	IMG_BLENDOP_REV_SUBTRACT, /**< Result = (Destination - Source) */
+	IMG_BLENDOP_MIN,          /**< Result = min (Source, Destination) */
+	IMG_BLENDOP_MAX           /**< Result = max (Source, Destination) */
+} IMG_BLENDOP;
+
+/**
+ * Logical operation to perform when logic ops are enabled
+ */
+typedef enum _IMG_LOGICOP_
+{
+	IMG_LOGICOP_CLEAR = 0,     /**< Result = 0 */
+	IMG_LOGICOP_SET,           /**< Result = -1 */
+	IMG_LOGICOP_COPY,          /**< Result = Source */
+	IMG_LOGICOP_COPY_INVERTED, /**< Result = ~Source */
+	IMG_LOGICOP_NOOP,          /**< Result = Destination */
+	IMG_LOGICOP_INVERT,        /**< Result = ~Destination */
+	IMG_LOGICOP_AND,           /**< Result = Source & Destination */
+	IMG_LOGICOP_NAND,          /**< Result = ~(Source & Destination) */
+	IMG_LOGICOP_OR,            /**< Result = Source | Destination */
+	IMG_LOGICOP_NOR,           /**< Result = ~(Source | Destination) */
+	IMG_LOGICOP_XOR,           /**< Result = Source ^ Destination */
+	IMG_LOGICOP_EQUIV,         /**< Result = ~(Source ^ Destination) */
+	IMG_LOGICOP_AND_REVERSE,   /**< Result = Source & ~Destination */
+	IMG_LOGICOP_AND_INVERTED,  /**< Result = ~Source & Destination */
+	IMG_LOGICOP_OR_REVERSE,    /**< Result = Source | ~Destination */
+	IMG_LOGICOP_OR_INVERTED    /**< Result = ~Source | Destination */
+} IMG_LOGICOP;
+
+/**
+ * Type of fog blending supported
+ */
+typedef enum _IMG_FOGMODE_
+{
+	IMG_FOGMODE_NONE, /**< No fog blending - fog calculations are
+					   *   based on the value output from the vertex phase */
+	IMG_FOGMODE_LINEAR, /**< Linear interpolation */
+	IMG_FOGMODE_EXP, /**< Exponential */
+	IMG_FOGMODE_EXP2, /**< Exponential squaring */
+} IMG_FOGMODE;
+
+/**
+ * Types of filtering
+ */
+typedef enum _IMG_FILTER_
+{
+	IMG_FILTER_DONTCARE,	/**< Any filtering mode is acceptable */
+	IMG_FILTER_POINT,		/**< Point filtering */
+	IMG_FILTER_LINEAR,		/**< Bi-linear filtering */
+	IMG_FILTER_BICUBIC,		/**< Bi-cubic filtering */
+} IMG_FILTER;
+
+/**
+ * Addressing modes for textures
+ */
+typedef enum _IMG_ADDRESSMODE_
+{
+	IMG_ADDRESSMODE_REPEAT,	/**< Texture repeats continuously */
+	IMG_ADDRESSMODE_FLIP, /**< Texture flips on odd integer part */
+	IMG_ADDRESSMODE_CLAMP, /**< Texture clamped at 0 or 1 */
+	IMG_ADDRESSMODE_FLIPCLAMP, /**< Flipped once, then clamp */
+	IMG_ADDRESSMODE_CLAMPBORDER,
+	IMG_ADDRESSMODE_OGL_CLAMP,
+	IMG_ADDRESSMODE_OVG_TILEFILL,
+	IMG_ADDRESSMODE_DONTCARE,
+} IMG_ADDRESSMODE;
+
+/**
+ * Culling based on winding order of triangle.
+ */
+typedef enum _IMG_CULLMODE_
+{
+	IMG_CULLMODE_NONE,			/**< Don't cull */
+	IMG_CULLMODE_FRONTFACING,	/**< Front facing triangles */
+	IMG_CULLMODE_BACKFACING,	/**< Back facing triangles */
+} IMG_CULLMODE;
+
+/**
+  Colour for clearing surfaces.
+  The four elements of the 4 x 32 bit array will map to colour
+  R,G,B,A components, in order.
+  For YUV colour space the order is Y,U,V.
+  For Depth and Stencil formats D maps to R and S maps to G.
+*/
+typedef union _IMG_CLEAR_COLOUR_ {
+	IMG_UINT32        aui32[4];
+	IMG_INT32         ai32[4];
+	IMG_FLOAT         af32[4];
+} IMG_CLEAR_COLOUR;
+
+static_assert(sizeof(IMG_FLOAT) == sizeof(IMG_INT32), "Size of IMG_FLOAT is not 32 bits.");
+
+/*! ************************************************************************//**
+@brief          Specifies the MSAA resolve operation.
+*/ /**************************************************************************/
+typedef enum _IMG_RESOLVE_OP_
+{
+	IMG_RESOLVE_BLEND   = 0,          /*!< box filter on the samples */
+	IMG_RESOLVE_MIN     = 1,          /*!< minimum of the samples */
+	IMG_RESOLVE_MAX     = 2,          /*!< maximum of the samples */
+	IMG_RESOLVE_SAMPLE0 = 3,          /*!< choose sample 0 */
+	IMG_RESOLVE_SAMPLE1 = 4,          /*!< choose sample 1 */
+	IMG_RESOLVE_SAMPLE2 = 5,          /*!< choose sample 2 */
+	IMG_RESOLVE_SAMPLE3 = 6,          /*!< choose sample 3 */
+	IMG_RESOLVE_SAMPLE4 = 7,          /*!< choose sample 4 */
+	IMG_RESOLVE_SAMPLE5 = 8,          /*!< choose sample 5 */
+	IMG_RESOLVE_SAMPLE6 = 9,          /*!< choose sample 6 */
+	IMG_RESOLVE_SAMPLE7 = 10,         /*!< choose sample 7 */
+} IMG_RESOLVE_OP;
+
+
+#endif /* __IMG_3DTYPES_H__ */
+/******************************************************************************
+ End of file (img_3dtypes.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/img_defs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/img_defs.h
new file mode 100644
index 0000000..49e5b3d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/img_defs.h
@@ -0,0 +1,550 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common header containing type definitions for portability
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Contains variable and structure definitions. Any platform
+                specific types should be defined in this file.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef IMG_DEFS_H
+#define IMG_DEFS_H
+
+#if defined(LINUX) && defined(__KERNEL__)
+#include <linux/types.h>
+#else
+#include <stddef.h>
+#endif
+#if !(defined(LINUX) && defined(__KERNEL__))
+#include <assert.h>
+#endif
+
+#include "img_types.h"
+
+#if defined(NO_INLINE_FUNCS)
+	#define	INLINE
+	#define	FORCE_INLINE
+#else
+#if defined(__cplusplus) || defined(INTEGRITY_OS)
+	#if	!defined(INLINE)
+		#define INLINE				inline
+	#endif
+	#define	FORCE_INLINE			static inline
+#else
+#if	!defined(INLINE)
+	#define	INLINE					__inline
+#endif
+#if (defined(UNDER_WDDM) || defined(WINDOWS_WDF)) && defined(_X86_)
+	#define	FORCE_INLINE			__forceinline
+#else
+	#define	FORCE_INLINE			static __inline
+#endif
+#endif
+#endif
+
+/* True if the GCC version is at least the given version. False for older
+ * versions of GCC, or other compilers.
+ */
+#define GCC_VERSION_AT_LEAST(major, minor) \
+	(__GNUC__ > (major) || \
+	(__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
+
+/* Use Clang's __has_extension and __has_builtin macros if available. */
+#if defined(__has_extension)
+#define has_clang_extension(e) __has_extension(e)
+#else
+#define has_clang_extension(e) 0
+#endif
+
+#if defined(__has_builtin)
+#define has_clang_builtin(e) __has_builtin(e)
+#else
+#define has_clang_builtin(e) 0
+#endif
+
+/* Use this in any file, or use attributes under GCC - see below */
+#ifndef PVR_UNREFERENCED_PARAMETER
+#define	PVR_UNREFERENCED_PARAMETER(param) ((void)(param))
+#endif
+
+/* static_assert(condition, "message to print if it fails");
+ *
+ * Assert something at compile time. If the assertion fails, try to print
+ * the message, otherwise do nothing. static_assert is available if:
+ *
+ * - It's already defined as a macro (e.g. by <assert.h> in C11)
+ * - We're using MSVC which exposes static_assert unconditionally
+ * - We're using a C++ compiler that supports C++11
+ * - We're using GCC 4.6 and up in C mode (in which case it's available as
+ *   _Static_assert)
+ *
+ * In all other cases, fall back to an equivalent that makes an invalid
+ * declaration.
+ */
+#if !defined(static_assert) && !defined(_MSC_VER) && \
+		(!defined(__cplusplus) || __cplusplus < 201103L) || defined(__KLOCWORK__)
+	/* static_assert isn't already available */
+	#if !defined(__cplusplus) && (GCC_VERSION_AT_LEAST(4, 6) || \
+								  (defined(__clang__) && has_clang_extension(c_static_assert)))
+		#define static_assert _Static_assert
+	#else
+		#define static_assert(expr, message) \
+			extern int static_assert_failed[(expr) ? 1 : -1] __attribute__((unused))
+	#endif
+#else
+#if defined(CONFIG_L4)
+	/* Defined but not compatible with DDK usage, so undefine and ignore */
+	#undef static_assert
+	#define static_assert(expr, message)
+#endif
+#endif
+
+/*
+ * unreachable("explanation") can be used to indicate to the compiler that
+ * some parts of the code can never be reached, like the default branch
+ * of a switch that covers all real-world possibilities, even though there
+ * are other ints that exist for instance.
+ *
+ * The message will be printed as an assert() when debugging.
+ *
+ * Note: there is no need to add a 'return' or any error handling after
+ * calling unreachable(), as this call will never return.
+ */
+#if defined(LINUX) && defined(__KERNEL__)
+/* Kernel has its own unreachable(), which is a simple infinite loop */
+#elif GCC_VERSION_AT_LEAST(4, 5) || has_clang_builtin(__builtin_unreachable)
+	#define unreachable(msg) \
+		do { \
+			assert(!msg); \
+			__builtin_unreachable(); \
+		} while (0)
+#elif defined(_MSC_VER)
+	#define unreachable(msg) \
+		do { \
+			assert(!msg); \
+			__assume(0); \
+		} while (0)
+#else
+	#define unreachable(msg) \
+		do { \
+			assert(!msg); \
+			while (1); \
+		} while (0)
+#endif
+
+/*
+ * assume(x > 2 && x <= 7) works like an assert(), except it hints to the
+ * compiler what it can assume to optimise the code, like a limited range
+ * of parameter values.
+ */
+#if has_clang_builtin(__builtin_assume)
+	#define assume(expr) \
+		do { \
+			assert(expr); \
+			__builtin_assume(expr); \
+		} while (0)
+#elif defined(_MSC_VER)
+	#define assume(expr) \
+		do { \
+			assert(expr); \
+			__assume(expr); \
+		} while (0)
+#elif defined(LINUX) && defined(__KERNEL__)
+	#define assume(expr) ((void)(expr))
+#elif GCC_VERSION_AT_LEAST(4, 5) || has_clang_builtin(__builtin_unreachable)
+	#define assume(expr) \
+		do { \
+			if (unlikely(!(expr))) \
+				unreachable("Assumption isn't true: " # expr); \
+		} while (0)
+#else
+	#define assume(expr) assert(expr)
+#endif
+
+/*! Macro to calculate the n-byte aligned value from that supplied rounding up.
+ * n must be a power of two.
+ *
+ * Both arguments should be of a type with the same size otherwise the macro may
+ * cut off digits, e.g. imagine a 64 bit address in _x and a 32 bit value in _n.
+ */
+#define PVR_ALIGN(_x, _n)	(((_x)+((_n)-1U)) & ~((_n)-1U))
+
+#if defined(_WIN32)
+
+#if defined(WINDOWS_WDF)
+
+	/*
+	 * For WINDOWS_WDF drivers we don't want these defines to overwrite calling conventions propagated through the build system.
+	 * This 'empty' choice helps to resolve all the calling conv issues.
+	 *
+	 */
+	#define IMG_CALLCONV
+	#define C_CALLCONV
+
+	#define IMG_INTERNAL
+	#define IMG_RESTRICT __restrict
+
+	/*
+	 * The proper way of dll linking under MS compilers is made of two things:
+	 * - decorate implementation with __declspec(dllexport)
+	 *   this decoration helps compiler with making the so called
+	 *   'export library'
+	 * - decorate forward-declaration (in a source dependent on a dll) with
+	 *   __declspec(dllimport), this decoration helps the compiler to make
+	 *   faster and smaller code in terms of calling dll-imported functions
+	 *
+	 * Usually these decorations are performed by having a single macro define
+	 * making that expands to a proper __declspec() depending on the
+	 * translation unit, dllexport inside the dll source and dllimport outside
+	 * the dll source. Having IMG_EXPORT and IMG_IMPORT resolving to the same
+	 * __declspec() makes no sense, but at least works.
+	 */
+	#define IMG_IMPORT __declspec(dllexport)
+	#define IMG_EXPORT __declspec(dllexport)
+
+#else
+
+	#define IMG_CALLCONV __stdcall
+	#define IMG_INTERNAL
+	#define	IMG_EXPORT	__declspec(dllexport)
+	#define IMG_RESTRICT __restrict
+	#define C_CALLCONV	__cdecl
+
+	/*
+	 * IMG_IMPORT is defined as IMG_EXPORT so that headers and implementations
+	 * match. Some compilers require the header to be declared IMPORT, while
+	 * the implementation is declared EXPORT.
+	 */
+	#define	IMG_IMPORT	IMG_EXPORT
+
+#endif
+
+#if defined(UNDER_WDDM)
+	#ifndef	_INC_STDLIB
+		#if defined(__mips)
+			/* do nothing */
+		#elif defined(UNDER_MSBUILD)
+			_CRTIMP __declspec(noreturn) void __cdecl abort(void);
+		#else
+			_CRTIMP void __cdecl abort(void);
+		#endif
+	#endif
+#endif /* UNDER_WDDM */
+#else
+	#if (defined(LINUX) || defined(__QNXNTO__)) && defined(__KERNEL__)
+		#define IMG_INTERNAL
+		#define IMG_EXPORT
+		#define IMG_CALLCONV
+	#elif defined(LINUX) || defined(__METAG) || defined(__mips) || defined(__QNXNTO__)
+		#define IMG_CALLCONV
+		#define C_CALLCONV
+
+		#if defined(__METAG)
+			#define IMG_INTERNAL
+		#else
+			#define IMG_INTERNAL    __attribute__((visibility("hidden")))
+		#endif
+
+		#define IMG_EXPORT      __attribute__((visibility("default")))
+		#define IMG_RESTRICT    __restrict__
+	#elif defined(INTEGRITY_OS)
+		#define IMG_CALLCONV
+		#define IMG_INTERNAL
+		#define IMG_EXPORT
+		#define IMG_RESTRICT
+		#define C_CALLCONV
+		#define __cdecl
+
+		#ifndef USE_CODE
+			#define IMG_ABORT() printf("IMG_ABORT was called.\n")
+		#endif
+	#else
+		#error("define an OS")
+	#endif
+
+#endif
+
+/* Use default definition if not overridden */
+#ifndef IMG_ABORT
+	#if defined(EXIT_ON_ABORT)
+		#define IMG_ABORT()	exit(1)
+	#else
+		#define IMG_ABORT()	abort()
+	#endif
+#endif
+
+/* The best way to suppress unused parameter warnings using GCC is to use a
+ * variable attribute. Place the __maybe_unused between the type and name of an
+ * unused parameter in a function parameter list e.g. 'int __maybe_unused var'.
+ * This should only be used in GCC build environments, for example, in files
+ * that compile only on Linux.
+ * Other files should use PVR_UNREFERENCED_PARAMETER
+ */
+
+/* Kernel macros for compiler attributes */
+/* Note: param positions start at 1 */
+#if defined(LINUX) && defined(__KERNEL__)
+	#include <linux/compiler.h>
+
+	#if !defined(__fallthrough)
+		#if defined(__GNUC__) && GCC_VERSION_AT_LEAST(7, 0)
+			#define __fallthrough __attribute__((__fallthrough__))
+		#else
+			#define __fallthrough
+		#endif
+	#endif
+#elif defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES)
+	#define __must_check       __attribute__((warn_unused_result))
+	#define __maybe_unused     __attribute__((unused))
+	#define __malloc           __attribute__((malloc))
+
+	/* Bionic's <sys/cdefs.h> might have defined these already */
+	/* See https://android.googlesource.com/platform/bionic.git/+/master/libc/include/sys/cdefs.h */
+	#if !defined(__packed)
+		#define __packed           __attribute__((packed))
+	#endif
+	#if !defined(__aligned)
+		#define __aligned(n)       __attribute__((aligned(n)))
+	#endif
+	#if !defined(__noreturn)
+		#define __noreturn         __attribute__((noreturn))
+	#endif
+
+	/* That one compiler that supports attributes but doesn't support
+	 * the printf attribute... */
+	#if defined(__GNUC__)
+		#define __printf(fmt, va)  __attribute__((format(printf, fmt, va)))
+	#else
+		#define __printf(fmt, va)
+	#endif /* defined(__GNUC__) */
+
+	#if defined(__cplusplus) && (__cplusplus >= 201703L)
+		#define __fallthrough [[fallthrough]]
+	#elif defined(__GNUC__) && GCC_VERSION_AT_LEAST(7, 0)
+		#define __fallthrough __attribute__((__fallthrough__))
+	#else
+		#define __fallthrough
+	#endif
+
+	#define __user
+	#define __force
+	#define __iomem
+#else
+	/* Silently ignore those attributes */
+	#define __printf(fmt, va)
+	#define __packed
+	#define __aligned(n)
+	#define __must_check
+	#define __maybe_unused
+	#define __malloc
+
+	#if defined(_MSC_VER) || defined(CC_ARM)
+		#define __noreturn __declspec(noreturn)
+	#else
+		#define __noreturn
+	#endif
+
+	/* This may already been defined, e.g. by SAL (Source Annotation Language) */
+	#if !defined(__fallthrough)
+		#define __fallthrough
+	#endif
+
+	#define __user
+	#define __force
+	#define __iomem
+#endif
+
+
+/* Other attributes, following the same style */
+#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES)
+	#define __const_function      __attribute__((const))
+#else
+	#define __const_function
+#endif
+
+
+/* GCC builtins */
+#if defined(LINUX) && defined(__KERNEL__)
+	#include <linux/compiler.h>
+#elif defined(__GNUC__)
+
+/* Klocwork does not support __builtin_expect, which makes the actual condition
+ * expressions hidden during analysis, affecting it negatively. */
+#if !defined(__KLOCWORK__) && !defined(DEBUG)
+	#define likely(x)   __builtin_expect(!!(x), 1)
+	#define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
+
+	/* Compiler memory barrier to prevent reordering */
+	#define barrier() __asm__ __volatile__("": : :"memory")
+#else
+	#define barrier() do { static_assert(0, "barrier() isn't supported by your compiler"); } while(0)
+#endif
+
+/* That one OS that defines one but not the other... */
+#ifndef likely
+	#define likely(x)   (x)
+#endif
+#ifndef unlikely
+	#define unlikely(x) (x)
+#endif
+
+/* These two macros are also provided by the kernel */
+#ifndef BIT
+#define BIT(b) (1UL << (b))
+#endif
+
+#ifndef BIT_ULL
+#define BIT_ULL(b) (1ULL << (b))
+#endif
+
+#define BIT_SET(f, b)     BITMASK_SET((f),    BIT_ULL(b))
+#define BIT_UNSET(f, b)   BITMASK_UNSET((f),  BIT_ULL(b))
+#define BIT_TOGGLE(f, b)  BITMASK_TOGGLE((f), BIT_ULL(b))
+#define BIT_ISSET(f, b)   BITMASK_HAS((f),    BIT_ULL(b))
+
+#define BITMASK_SET(f, m)     (void) ((f) |= (m))
+#define BITMASK_UNSET(f, m)   (void) ((f) &= ~(m))
+#define BITMASK_TOGGLE(f, m)  (void) ((f) ^= (m))
+#define BITMASK_HAS(f, m)     (((f) & (m)) == (m)) /* the bits from the mask are all set */
+
+#ifndef MAX
+#define MAX(a ,b)	(((a) > (b)) ? (a) : (b))
+#endif
+
+#ifndef MIN
+#define MIN(a, b)	(((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef CLAMP
+#define CLAMP(min, max, n)  ((n) < (min) ? (min) : ((n) > (max) ? (max) : (n)))
+#endif
+
+
+#if defined(LINUX) && defined(__KERNEL__)
+	#include <linux/kernel.h>
+	#include <linux/bug.h>
+#endif
+
+/* Get a structure's address from the address of a member */
+#define IMG_CONTAINER_OF(ptr, type, member) \
+	(type *) ((uintptr_t) (ptr) - offsetof(type, member))
+
+/* The number of elements in a fixed-sized array */
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(ARR) (sizeof(ARR) / sizeof((ARR)[0]))
+#endif
+
+/* To guarantee that __func__ can be used, define it as a macro here if it
+   isn't already provided by the compiler. */
+#if defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus < 201103L)
+#define __func__ __FUNCTION__
+#endif
+
+#if defined(__cplusplus)
+/* C++ Specific:
+ * Disallow use of copy and assignment operator within a class.
+ * Should be placed under private. */
+#define IMG_DISALLOW_COPY_AND_ASSIGN(C) \
+	C(const C&); \
+	void operator=(const C&)
+#endif
+
+#if defined(SUPPORT_PVR_VALGRIND) && !defined(__METAG) && !defined(__mips)
+	#include "/usr/include/valgrind/memcheck.h"
+
+	#define VG_MARK_INITIALIZED(pvData,ui32Size)  VALGRIND_MAKE_MEM_DEFINED(pvData,ui32Size)
+	#define VG_MARK_NOACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_NOACCESS(pvData,ui32Size)
+	#define VG_MARK_ACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_UNDEFINED(pvData,ui32Size)
+#else
+	#if defined(_MSC_VER)
+	#	define PVR_MSC_SUPPRESS_4127 __pragma(warning(suppress:4127))
+	#else
+	#	define PVR_MSC_SUPPRESS_4127
+	#endif
+
+	#define VG_MARK_INITIALIZED(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while(0)
+	#define VG_MARK_NOACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while(0)
+	#define VG_MARK_ACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while(0)
+#endif
+
+#define IMG_STRINGIFY_IMPL(x) # x
+#define IMG_STRINGIFY(x) IMG_STRINGIFY_IMPL(x)
+
+#if defined(INTEGRITY_OS)
+	/* Definitions not present in INTEGRITY. */
+	#define PATH_MAX	200
+#endif
+
+#if defined(__clang__) || defined(__GNUC__)
+	/* __SIZEOF_POINTER__ is defined already by these compilers */
+#elif defined(INTEGRITY_OS)
+	#if defined(__Ptr_Is_64)
+		#define __SIZEOF_POINTER__ 8
+	#else
+		#define __SIZEOF_POINTER__ 4
+	#endif
+#elif defined(_WIN32)
+	#define __SIZEOF_POINTER__ sizeof(char *)
+#else
+	#warning Unknown OS - using default method to determine whether CPU arch is 64-bit.
+	#define __SIZEOF_POINTER__ sizeof(char *)
+#endif
+
+/* RDI8567: gcc/clang/llvm load/store optimisations may cause issues with
+ * uncached device memory allocations. Some pointers are made 'volatile'
+ * to prevent those optimisations being applied to writes through those
+ * pointers.
+ */
+#if (GCC_VERSION_AT_LEAST(7, 0) || defined(__clang__)) && (defined(__arm64__) || defined(__aarch64__))
+#define NOLDSTOPT volatile
+/* after applying 'volatile' to a pointer, we may need to cast it to 'void *'
+ * to keep it compatible with its existing uses.
+ */
+#define NOLDSTOPT_VOID (void *)
+
+#define NOLDSTOPT_REQUIRED 1
+#else
+#define NOLDSTOPT
+#define NOLDSTOPT_VOID
+#endif
+
+#endif /* IMG_DEFS_H */
+/*****************************************************************************
+ End of file (img_defs.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/img_types.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/img_types.h
new file mode 100644
index 0000000..84ede6a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/img_types.h
@@ -0,0 +1,293 @@
+/*************************************************************************/ /*!
+@File
+@Title          Global types for use by IMG APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines type aliases for use by IMG APIs.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef IMG_TYPES_H
+#define IMG_TYPES_H
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* To use C99 types and definitions, there are two special cases we need to
+ * cater for:
+ *
+ * - Visual Studio: in VS2010 or later, some standard headers are available,
+ *   and MSVC has its own built-in sized types. We can define the C99 types
+ *   in terms of these.
+ *
+ * - Linux kernel code: C99 sized types are defined in <linux/types.h>, but
+ *   some other features (like macros for constants or printf format
+ *   strings) are missing, so we need to fill in the gaps ourselves.
+ *
+ * For other cases (userspace code under Linux, Android or Neutrino, or
+ * firmware code), we can include the standard headers.
+ */
+#if defined(_MSC_VER)
+	#include <stdbool.h>		/* bool */
+	#include "msvc_types.h"
+#elif defined(LINUX) && defined(__KERNEL__)
+	#include <linux/types.h>
+	#include "kernel_types.h"
+#elif defined(LINUX) || defined(__METAG) || defined(__QNXNTO__) || defined(INTEGRITY_OS)
+	#include <stddef.h>			/* NULL */
+	#include <stdint.h>
+	#include <inttypes.h>		/* intX_t/uintX_t, format specifiers */
+	#include <limits.h>			/* INT_MIN, etc */
+	#include <stdbool.h>		/* bool */
+#elif defined(__mips)
+	#include <stddef.h>			/* NULL */
+	#include <inttypes.h>		/* intX_t/uintX_t, format specifiers */
+	#include <stdbool.h>		/* bool */
+#else
+	#error C99 support not set up for this build
+#endif
+
+/*
+ * Due to a Klocwork bug, 'true'/'false' constants are not recognized to be of
+ * boolean type. This results in large number of false-positives being reported
+ * (MISRA.ETYPE.ASSIGN.2012: "An expression value of essential type 'signed char'
+ * is assigned to an object of essential type 'bool'"). Work around this by
+ * redefining those constants with cast to bool added.
+ */
+#if defined(__KLOCWORK__) && !defined(__cplusplus)
+#undef true
+#undef false
+#define true ((bool) 1)
+#define false ((bool) 0)
+#endif
+
+typedef unsigned int	IMG_UINT;
+typedef int				IMG_INT;
+
+typedef uint8_t			IMG_UINT8,	*IMG_PUINT8;
+typedef uint8_t			IMG_BYTE,	*IMG_PBYTE;
+typedef int8_t			IMG_INT8;
+typedef char			IMG_CHAR,	*IMG_PCHAR;
+
+typedef uint16_t		IMG_UINT16,	*IMG_PUINT16;
+typedef int16_t			IMG_INT16;
+typedef uint32_t		IMG_UINT32,	*IMG_PUINT32;
+typedef int32_t			IMG_INT32,	*IMG_PINT32;
+#define IMG_UINT32_C(c) ((IMG_UINT32)UINT32_C(c))
+
+typedef uint64_t		IMG_UINT64,	*IMG_PUINT64;
+typedef int64_t			IMG_INT64;
+#define IMG_INT64_C(c)	INT64_C(c)
+#define IMG_UINT64_C(c)	UINT64_C(c)
+#define IMG_UINT64_FMTSPEC PRIu64
+#define IMG_UINT64_FMTSPECX PRIX64
+#define IMG_UINT64_FMTSPECx PRIx64
+#define IMG_UINT64_FMTSPECo PRIo64
+#define IMG_INT64_FMTSPECd PRId64
+
+#define IMG_UINT16_MAX	UINT16_MAX
+#define IMG_UINT32_MAX	UINT32_MAX
+#define IMG_UINT64_MAX	UINT64_MAX
+
+#define IMG_INT16_MAX	INT16_MAX
+#define IMG_INT32_MAX	INT32_MAX
+#define IMG_INT64_MAX	INT64_MAX
+
+/* Linux kernel mode does not use floating point */
+typedef float			IMG_FLOAT,	*IMG_PFLOAT;
+typedef double			IMG_DOUBLE;
+
+typedef union
+{
+	IMG_UINT32 ui32;
+	IMG_FLOAT f;
+} IMG_UINT32_FLOAT;
+
+typedef int				IMG_SECURE_TYPE;
+
+typedef	enum tag_img_bool
+{
+	IMG_FALSE		= 0,
+	IMG_TRUE		= 1,
+	IMG_FORCE_ALIGN = 0x7FFFFFFF
+} IMG_BOOL, *IMG_PBOOL;
+
+#if defined(UNDER_WDDM) || defined(WINDOWS_WDF)
+typedef IMG_CHAR const* IMG_PCCHAR;
+#endif
+
+#if defined(_MSC_VER) || defined(__MINGW32__)
+#define IMG_SIZE_FMTSPEC  "%Iu"
+#define IMG_SIZE_FMTSPECX "%Ix"
+#else
+#define IMG_SIZE_FMTSPEC  "%zu"
+#define IMG_SIZE_FMTSPECX "%zx"
+#endif
+
+#if defined(LINUX) && defined(__KERNEL__)
+/* prints the function name when used with printk */
+#define IMG_PFN_FMTSPEC "%pf"
+#else
+#define IMG_PFN_FMTSPEC "%p"
+#endif
+
+typedef void           *IMG_HANDLE;
+
+/* services/stream ID */
+typedef IMG_UINT64      IMG_SID;
+
+/* Process IDs */
+typedef IMG_UINT32      IMG_PID;
+
+/* OS connection type */
+typedef int             IMG_OS_CONNECTION;
+
+
+/*
+ * Address types.
+ * All types used to refer to a block of memory are wrapped in structures
+ * to enforce some degree of type safety, i.e. a IMG_DEV_VIRTADDR cannot
+ * be assigned to a variable of type IMG_DEV_PHYADDR because they are not the
+ * same thing.
+ *
+ * There is an assumption that the system contains at most one non-cpu mmu,
+ * and a memory block is only mapped by the MMU once.
+ *
+ * Different devices could have offset views of the physical address space.
+ *
+ */
+
+
+/*
+ *
+ * +------------+    +------------+      +------------+        +------------+
+ * |    CPU     |    |    DEV     |      |    DEV     |        |    DEV     |
+ * +------------+    +------------+      +------------+        +------------+
+ *       |                 |                   |                     |
+ *       | void *          |IMG_DEV_VIRTADDR   |IMG_DEV_VIRTADDR     |
+ *       |                 \-------------------/                     |
+ *       |                          |                                |
+ * +------------+             +------------+                         |
+ * |    MMU     |             |    MMU     |                         |
+ * +------------+             +------------+                         |
+ *       |                          |                                |
+ *       |                          |                                |
+ *       |                          |                                |
+ *   +--------+                +---------+                      +--------+
+ *   | Offset |                | (Offset)|                      | Offset |
+ *   +--------+                +---------+                      +--------+
+ *       |                          |                IMG_DEV_PHYADDR |
+ *       |                          |                                |
+ *       |                          | IMG_DEV_PHYADDR                |
+ * +---------------------------------------------------------------------+
+ * |                         System Address bus                          |
+ * +---------------------------------------------------------------------+
+ *
+ */
+
+#define IMG_DEV_VIRTADDR_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+#define IMG_DEVMEM_SIZE_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+#define IMG_DEVMEM_ALIGN_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+#define IMG_DEVMEM_OFFSET_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+
+/* cpu physical address */
+typedef struct
+{
+#if defined(UNDER_WDDM) || defined(WINDOWS_WDF)
+	uintptr_t uiAddr;
+#define IMG_CAST_TO_CPUPHYADDR_UINT(var)		(uintptr_t)(var)
+#elif defined(LINUX) && defined(__KERNEL__)
+	phys_addr_t uiAddr;
+#define IMG_CAST_TO_CPUPHYADDR_UINT(var)		(phys_addr_t)(var)
+#else
+	IMG_UINT64 uiAddr;
+#define IMG_CAST_TO_CPUPHYADDR_UINT(var)		(IMG_UINT64)(var)
+#endif
+} IMG_CPU_PHYADDR;
+
+/* device physical address */
+typedef struct
+{
+	IMG_UINT64 uiAddr;
+} IMG_DEV_PHYADDR;
+
+/* system physical address */
+typedef struct
+{
+	IMG_UINT64 uiAddr;
+} IMG_SYS_PHYADDR;
+
+/*
+	rectangle structure
+*/
+typedef struct
+{
+	IMG_INT32	x0;
+	IMG_INT32	y0;
+	IMG_INT32	x1;
+	IMG_INT32	y1;
+} IMG_RECT;
+
+typedef struct
+{
+	IMG_INT16	x0;
+	IMG_INT16	y0;
+	IMG_INT16	x1;
+	IMG_INT16	y1;
+} IMG_RECT_16;
+
+/*
+ * box structure
+ */
+typedef struct
+{
+	IMG_INT32	x0;
+	IMG_INT32	y0;
+	IMG_INT32	z0;
+	IMG_INT32	x1;
+	IMG_INT32	y1;
+	IMG_INT32	z1;
+} IMG_BOX;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif	/* IMG_TYPES_H */
+/******************************************************************************
+ End of file (img_types.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/info_page.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/info_page.h
new file mode 100644
index 0000000..a5f73eb
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/info_page.h
@@ -0,0 +1,98 @@
+/*************************************************************************/ /*!
+@File
+@Title          Kernel/User mode general purpose shared memory.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    General purpose memory shared between kernel driver and user
+                mode.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _INFO_PAGE_KM_H_
+#define _INFO_PAGE_KM_H_
+
+#include "pvrsrv_error.h"
+
+#include "pmr.h"
+#include "pvrsrv.h"
+
+/**
+ * @Function InfoPageCreate
+ * @Description Allocates resources for global information page.
+ * @Input psData pointer to PVRSRV data
+ * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error.
+ */
+PVRSRV_ERROR InfoPageCreate(PVRSRV_DATA *psData);
+
+/**
+ * @Function InfoPageDestroy
+ * @Description Frees all of the resource of global information page.
+ * @Input psData pointer to PVRSRV data
+ * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error.
+ */
+void InfoPageDestroy(PVRSRV_DATA *psData);
+
+/**
+ * @Function PVRSRVAcquireInfoPageKM()
+ * @Description This interface is used for obtaining the global information page
+ *              which acts as a general purpose shared memory between KM and UM.
+ *              The use of this information page outside of services is _not_
+ *              recommended.
+ * @Output ppsPMR handle to exported PMR
+ * @Return
+ */
+PVRSRV_ERROR PVRSRVAcquireInfoPageKM(PMR **ppsPMR);
+
+/**
+ * @Function PVRSRVReleaseInfoPageKM()
+ * @Description This function matches PVRSRVAcquireInfoPageKM().
+ * @Input psPMR handle to exported PMR
+ * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error.
+ */
+PVRSRV_ERROR PVRSRVReleaseInfoPageKM(PMR *psPMR);
+
+/**
+ * @Function GetInfoPageDebugFlagsKM()
+ * @Description Return info page debug flags
+ * @Return info page debug flags
+ */
+static INLINE IMG_UINT32 GetInfoPageDebugFlagsKM(void)
+{
+	return (PVRSRVGetPVRSRVData())->pui32InfoPage[DEBUG_FEATURE_FLAGS];
+}
+
+#endif /* _INFO_PAGE_KM_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/info_page_client.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/info_page_client.h
new file mode 100644
index 0000000..9df2461
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/info_page_client.h
@@ -0,0 +1,89 @@
+/*************************************************************************/ /*!
+@File
+@Title          Kernel/User mode general purpose shared memory.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    General purpose shared memory (i.e. information page) mapped by
+                kernel space driver and user space clients. All info page
+                entries are sizeof(IMG_UINT32) on both 32/64-bit environments.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef INFO_PAGE_CLIENT_H
+#define INFO_PAGE_CLIENT_H
+
+#include "device_connection.h"
+#include "info_page_defs.h"
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#endif
+
+/*************************************************************************/ /*!
+@Function      GetInfoPage
+
+@Description   Return Info Page address
+
+@Input         hDevConnection - Services device connection
+
+@Return        Info Page address
+*/
+/*****************************************************************************/
+static INLINE IMG_PUINT32 GetInfoPage(SHARED_DEV_CONNECTION hDevConnection)
+{
+#if defined(__KERNEL__)
+	return (PVRSRVGetPVRSRVData())->pui32InfoPage;
+#else
+    return hDevConnection->pui32InfoPage;
+#endif
+}
+
+/*************************************************************************/ /*!
+@Function      GetInfoPageDebugFlags
+
+@Description   Return Info Page debug flags
+
+@Input         hDevConnection - Services device connection
+
+@Return        Info Page debug flags
+*/
+/*****************************************************************************/
+static INLINE IMG_UINT32 GetInfoPageDebugFlags(SHARED_DEV_CONNECTION hDevConnection)
+{
+	return GetInfoPage(hDevConnection)[DEBUG_FEATURE_FLAGS];
+}
+
+#endif /* INFO_PAGE_CLIENT_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/info_page_defs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/info_page_defs.h
new file mode 100644
index 0000000..5aa493a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/info_page_defs.h
@@ -0,0 +1,96 @@
+/*************************************************************************/ /*!
+@File
+@Title          Kernel/User mode general purpose shared memory.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    General purpose shared memory (i.e. information page) mapped by
+			    kernel space driver and user space clients. All information page
+				entries are sizeof(IMG_UINT32) on both 32/64-bit environments.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _INFO_PAGE_DEFS_H_
+#define _INFO_PAGE_DEFS_H_
+
+
+/* CacheOp information page entries */
+#define CACHEOP_INFO_IDX_START     0x00
+#define CACHEOP_INFO_GFSEQNUM0     (CACHEOP_INFO_IDX_START + 0) /*!< Current global flush sequence number */
+#define CACHEOP_INFO_GFSEQNUM1     (CACHEOP_INFO_IDX_START + 1) /*!< Validity global flush sequence number */
+#define CACHEOP_INFO_UMRBFONLY     (CACHEOP_INFO_IDX_START + 2) /*!< Use UM flush only (i.e no KM GF) */
+#define CACHEOP_INFO_UMKMTHRESHLD  (CACHEOP_INFO_IDX_START + 3) /*!< UM=>KM routing threshold in bytes */
+#define CACHEOP_INFO_KMGFTHRESHLD  (CACHEOP_INFO_IDX_START + 4) /*!< KM/GF threshold in bytes */
+#define CACHEOP_INFO_KMDFTHRESHLD  (CACHEOP_INFO_IDX_START + 5) /*!< KM/DF threshold in bytes */
+#define CACHEOP_INFO_LINESIZE      (CACHEOP_INFO_IDX_START + 6) /*!< CPU data cache line size */
+#define CACHEOP_INFO_PGSIZE        (CACHEOP_INFO_IDX_START + 7) /*!< CPU MMU page size */
+#define CACHEOP_INFO_IDX_END       (CACHEOP_INFO_IDX_START + 8)
+
+/* HWPerf information page entries */
+#define HWPERF_INFO_IDX_START      (CACHEOP_INFO_IDX_END)
+#define HWPERF_FILTER_SERVICES_IDX (HWPERF_INFO_IDX_START + 0)
+#define HWPERF_FILTER_EGL_IDX      (HWPERF_INFO_IDX_START + 1)
+#define HWPERF_FILTER_OPENGLES_IDX (HWPERF_INFO_IDX_START + 2)
+#define HWPERF_FILTER_OPENCL_IDX   (HWPERF_INFO_IDX_START + 3)
+#define HWPERF_FILTER_VULKAN_IDX   (HWPERF_INFO_IDX_START + 4)
+#define HWPERF_INFO_IDX_END        (HWPERF_INFO_IDX_START + 5)
+
+/* BVNC of the core */
+#define CORE_ID_IDX_START                   (HWPERF_INFO_IDX_END)
+#define CORE_ID_BRANCH                      (CORE_ID_IDX_START + 0)
+#define CORE_ID_VERSION                     (CORE_ID_IDX_START + 1)
+#define CORE_ID_NUMBER_OF_SCALABLE_UNITS    (CORE_ID_IDX_START + 2)
+#define CORE_ID_CONFIG                      (CORE_ID_IDX_START + 3)
+#define CORE_ID_IDX_END                     (CORE_ID_IDX_START + 4)
+
+/* timeout values */
+#define TIMEOUT_INFO_IDX_START               (CORE_ID_IDX_END)
+#define TIMEOUT_INFO_VALUE_RETRIES           (TIMEOUT_INFO_IDX_START + 0)
+#define TIMEOUT_INFO_VALUE_TIMEOUT_MS        (TIMEOUT_INFO_IDX_START + 1)
+#define TIMEOUT_INFO_CONDITION_RETRIES       (TIMEOUT_INFO_IDX_START + 2)
+#define TIMEOUT_INFO_CONDITION_TIMEOUT_MS    (TIMEOUT_INFO_IDX_START + 3)
+#define TIMEOUT_INFO_EVENT_OBJECT_RETRIES    (TIMEOUT_INFO_IDX_START + 4)
+#define TIMEOUT_INFO_EVENT_OBJECT_TIMEOUT_MS (TIMEOUT_INFO_IDX_START + 5)
+#define TIMEOUT_INFO_IDX_END                 (TIMEOUT_INFO_IDX_START + 6)
+
+/* Debug features */
+#define DEBUG_FEATURE_FLAGS                  (TIMEOUT_INFO_IDX_END)
+#define DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED	0x1
+#define DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED		0x2
+#define DEBUG_FEATURE_FLAGS_IDX_END          (DEBUG_FEATURE_FLAGS + 1)
+
+
+#endif /* _INFO_PAGE_DEFS_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/info_page_km.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/info_page_km.c
new file mode 100644
index 0000000..b9d5324
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/info_page_km.c
@@ -0,0 +1,133 @@
+/*************************************************************************/ /*!
+@File           info_page_km.c
+@Title          Kernel/User space shared memory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements general purpose shared memory between kernel driver
+                and user mode.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "info_page_defs.h"
+#include "info_page.h"
+#include "pvrsrv.h"
+#include "devicemem.h"
+#include "pmr.h"
+
+PVRSRV_ERROR InfoPageCreate(PVRSRV_DATA *psData)
+{
+    const DEVMEM_FLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                                      PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                      PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                                      PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT |
+                                      PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                      PVRSRV_MEMALLOCFLAG_CPU_LOCAL;
+    PVRSRV_ERROR eError;
+
+    PVR_ASSERT(psData != NULL);
+
+    /* Allocate single page of memory for driver information page */
+    eError = DevmemAllocateExportable(psData->psHostMemDeviceNode,
+                                      OSGetPageSize(),
+                                      OSGetPageSize(),
+                                      OSGetPageShift(),
+                                      uiMemFlags,
+                                      "PVRSRVInfoPage",
+                                      &psData->psInfoPageMemDesc);
+    PVR_LOGG_IF_ERROR(eError, "DevmemAllocateExportable", e0);
+
+    eError =  DevmemAcquireCpuVirtAddr(psData->psInfoPageMemDesc,
+                                       (void **) &psData->pui32InfoPage);
+    PVR_LOGG_IF_ERROR(eError, "DevmemAllocateExportable", e0);
+
+    /* Look-up the memory descriptor PMR handle */
+    eError = DevmemLocalGetImportHandle(psData->psInfoPageMemDesc,
+                                        (void **) &psData->psInfoPagePMR);
+    PVR_LOGG_IF_ERROR(eError, "DevmemLocalGetImportHandle", e0);
+
+    eError = OSLockCreate(&psData->hInfoPageLock);
+    PVR_LOGG_IF_ERROR(eError, "OSLockCreate", e0);
+
+    return PVRSRV_OK;
+
+e0:
+    InfoPageDestroy(psData);
+    return eError;
+}
+
+void InfoPageDestroy(PVRSRV_DATA *psData)
+{
+    if (psData->psInfoPageMemDesc)
+    {
+        if (psData->pui32InfoPage != NULL)
+        {
+            DevmemReleaseCpuVirtAddr(psData->psInfoPageMemDesc);
+            psData->pui32InfoPage = NULL;
+        }
+
+        DevmemFree(psData->psInfoPageMemDesc);
+        psData->psInfoPageMemDesc = NULL;
+    }
+
+    if (psData->hInfoPageLock)
+    {
+        OSLockDestroy(psData->hInfoPageLock);
+        psData->hInfoPageLock = NULL;
+    }
+}
+
+PVRSRV_ERROR PVRSRVAcquireInfoPageKM(PMR **ppsPMR)
+{
+    PVRSRV_DATA *psData = PVRSRVGetPVRSRVData();
+
+    PVR_LOGR_IF_FALSE(psData->psInfoPageMemDesc != NULL, "invalid MEMDESC"
+                      " handle", PVRSRV_ERROR_INVALID_PARAMS);
+    PVR_LOGR_IF_FALSE(psData->psInfoPagePMR != NULL, "invalid PMR handle",
+                      PVRSRV_ERROR_INVALID_PARAMS);
+
+    /* Copy the PMR import handle back */
+    *ppsPMR = psData->psInfoPagePMR;
+
+    return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVReleaseInfoPageKM(PMR *ppsPMR)
+{
+    /* Nothing to do here as PMR is singleton */
+    PVR_UNREFERENCED_PARAMETER(ppsPMR);
+    return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/interrupt_support.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/interrupt_support.c
new file mode 100644
index 0000000..a2b3762
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/interrupt_support.c
@@ -0,0 +1,151 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/interrupt.h>
+
+#include "pvr_debug.h"
+#include "allocmem.h"
+#include "interrupt_support.h"
+
+typedef struct LISR_DATA_TAG
+{
+	IMG_UINT32	ui32IRQ;
+	PFN_SYS_LISR	pfnLISR;
+	void		*pvData;
+} LISR_DATA;
+
+static irqreturn_t SystemISRWrapper(int irq, void *dev_id)
+{
+	LISR_DATA *psLISRData = (LISR_DATA *)dev_id;
+
+	PVR_UNREFERENCED_PARAMETER(irq);
+
+	if (psLISRData)
+	{
+		if (psLISRData->pfnLISR(psLISRData->pvData))
+		{
+			return IRQ_HANDLED;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Missing interrupt data", __func__));
+	}
+
+	return IRQ_NONE;
+}
+
+PVRSRV_ERROR OSInstallSystemLISR(IMG_HANDLE *phLISR,
+				 IMG_UINT32 ui32IRQ,
+				 const IMG_CHAR *pszDevName,
+				 PFN_SYS_LISR pfnLISR,
+				 void *pvData,
+				 IMG_UINT32 ui32Flags)
+{
+	LISR_DATA *psLISRData;
+	unsigned long ulIRQFlags = 0;
+
+	if (pfnLISR == NULL || pvData == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (ui32Flags & ~SYS_IRQ_FLAG_MASK)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	switch (ui32Flags & SYS_IRQ_FLAG_TRIGGER_MASK)
+	{
+		case SYS_IRQ_FLAG_TRIGGER_DEFAULT:
+			break;
+		case SYS_IRQ_FLAG_TRIGGER_LOW:
+			ulIRQFlags |= IRQF_TRIGGER_LOW;
+			break;
+		case SYS_IRQ_FLAG_TRIGGER_HIGH:
+			ulIRQFlags |= IRQF_TRIGGER_HIGH;
+			break;
+		default:
+			return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (ui32Flags & SYS_IRQ_FLAG_SHARED)
+	{
+		ulIRQFlags |= IRQF_SHARED;
+	}
+
+	psLISRData = OSAllocMem(sizeof *psLISRData);
+	if (psLISRData == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psLISRData->ui32IRQ = ui32IRQ;
+	psLISRData->pfnLISR = pfnLISR;
+	psLISRData->pvData = pvData;
+
+	if (request_irq(ui32IRQ, SystemISRWrapper, ulIRQFlags, pszDevName, psLISRData))
+	{
+		OSFreeMem(psLISRData);
+
+		return PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER;
+	}
+
+	*phLISR = (IMG_HANDLE)psLISRData;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSUninstallSystemLISR(IMG_HANDLE hLISR)
+{
+	LISR_DATA *psLISRData = (LISR_DATA *)hLISR;
+
+	if (psLISRData == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	free_irq(psLISRData->ui32IRQ, psLISRData);
+
+	OSFreeMem(psLISRData);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/interrupt_support.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/interrupt_support.h
new file mode 100644
index 0000000..0cca1ac
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/interrupt_support.h
@@ -0,0 +1,103 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__INTERRUPT_SUPPORT_H__)
+#define __INTERRUPT_SUPPORT_H__
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_device.h"
+
+/*! Default trigger type for the interrupt line. */
+#define SYS_IRQ_FLAG_TRIGGER_DEFAULT (0x0 << 0)
+/*! Interrupt triggered when interrupt line is low. */
+#define SYS_IRQ_FLAG_TRIGGER_LOW     (0x1 << 0)
+/*! Interrupt triggered when interrupt line is high. */
+#define SYS_IRQ_FLAG_TRIGGER_HIGH    (0x2 << 0)
+/*! Interrupt trigger mask. */
+#define SYS_IRQ_FLAG_TRIGGER_MASK    (SYS_IRQ_FLAG_TRIGGER_DEFAULT | \
+                                      SYS_IRQ_FLAG_TRIGGER_LOW | \
+                                      SYS_IRQ_FLAG_TRIGGER_HIGH)
+/*! The irq is allowed to be shared among several devices. */
+#define SYS_IRQ_FLAG_SHARED          (0x1 << 8)
+
+/*! Interrupt flags mask. */
+#define SYS_IRQ_FLAG_MASK            (SYS_IRQ_FLAG_TRIGGER_MASK | \
+                                      SYS_IRQ_FLAG_SHARED)
+
+/*************************************************************************/ /*!
+@Description    Pointer to a system Low-level Interrupt Service Routine (LISR).
+@Input  pvData  Private data provided to the LISR.
+@Return         IMG_TRUE if interrupt handled, IMG_FALSE otherwise.
+*/ /**************************************************************************/
+typedef IMG_BOOL (*PFN_SYS_LISR)(void *pvData);
+
+/*************************************************************************/ /*!
+@Function       OSInstallSystemLISR
+@Description    Installs a system low-level interrupt handler
+@Output         phLISR                  On return, contains a handle to the
+                                        installed LISR
+@Input          ui32IRQ                 The IRQ number for which the
+                                        interrupt handler should be installed
+@Input          pszDevName              Name of the device for which the handler
+                                        is being installed
+@Input          pfnLISR                 A pointer to an interrupt handler
+                                        function
+@Input          pvData                  A pointer to data that should be passed
+                                        to pfnLISR when it is called
+@Input          ui32Flags               Interrupt flags
+@Return         PVRSRV_OK on success, a failure code otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR OSInstallSystemLISR(IMG_HANDLE *phLISR,
+				 IMG_UINT32 ui32IRQ,
+				 const IMG_CHAR *pszDevName,
+				 PFN_SYS_LISR pfnLISR,
+				 void *pvData,
+				 IMG_UINT32 ui32Flags);
+
+/*************************************************************************/ /*!
+@Function       OSUninstallSystemLISR
+@Description    Uninstalls a system low-level interrupt handler
+@Input          hLISRData              The handle to the LISR to uninstall
+@Return         PVRSRV_OK on success, a failure code otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR OSUninstallSystemLISR(IMG_HANDLE hLISRData);
+#endif /* !defined(__INTERRUPT_SUPPORT_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/kernel_compatibility.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/kernel_compatibility.h
new file mode 100644
index 0000000..21af7b3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/kernel_compatibility.h
@@ -0,0 +1,454 @@
+/*************************************************************************/ /*!
+@Title          Kernel versions compatibility macros
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Per-version macros to allow code to seamlessly use older kernel
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __KERNEL_COMPATIBILITY_H__
+#define __KERNEL_COMPATIBILITY_H__
+
+#include <linux/version.h>
+
+/*
+ * Stop supporting an old kernel? Remove the top block.
+ * New incompatible kernel?       Append a new block at the bottom.
+ *
+ * Please write you version test as `VERSION < X.Y`, and use the earliest
+ * possible version :)
+ */
+
+/* Linux 3.6 introduced seq_vprintf(). Earlier versions don't have this
+ * so we work around the limitation by vsnprintf() + seq_puts().
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#define seq_vprintf(seq_file, fmt, args) \
+do { \
+	char aszBuffer[512]; /* maximum message buffer size */ \
+	vsnprintf(aszBuffer, sizeof(aszBuffer), fmt, args); \
+	seq_puts(seq_file, aszBuffer); \
+} while(0)
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+
+/* Linux 3.7 split VM_RESERVED into VM_DONTDUMP and VM_DONTEXPAND */
+#define VM_DONTDUMP VM_RESERVED
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) */
+
+/*
+ * Note: this fix had to be written backwards because get_unused_fd_flags
+ * was already defined but not exported on kernels < 3.7
+ *
+ * When removing support for kernels < 3.7, this block should be removed
+ * and all `get_unused_fd()` should be manually replaced with
+ * `get_unused_fd_flags(0)`
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+
+/* Linux 3.19 removed get_unused_fd() */
+/* get_unused_fd_flags  was introduced in 3.7 */
+#define get_unused_fd() get_unused_fd_flags(0)
+
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+
+/*
+ * Headers shouldn't normally be included by this file but this is a special
+ * case as it's not obvious from the name that devfreq_add_device needs this
+ * include.
+ */
+#include <linux/string.h>
+
+#define devfreq_add_device(dev, profile, name, data) \
+	({ \
+		struct devfreq *__devfreq; \
+		if (name && !strcmp(name, "simple_ondemand")) \
+			__devfreq = devfreq_add_device(dev, profile, \
+							   &devfreq_simple_ondemand, data); \
+		else \
+			__devfreq = ERR_PTR(-EINVAL); \
+		__devfreq; \
+	})
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
+
+#define DRIVER_RENDER 0
+#define DRM_RENDER_ALLOW 0
+
+/* Linux 3.12 introduced a new shrinker API */
+#define SHRINK_STOP (~0UL)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+
+#define dev_pm_opp_get_opp_count(dev) opp_get_opp_count(dev)
+#define dev_pm_opp_get_freq(opp) opp_get_freq(opp)
+#define dev_pm_opp_get_voltage(opp) opp_get_voltage(opp)
+#define dev_pm_opp_add(dev, freq, u_volt) opp_add(dev, freq, u_volt)
+#define dev_pm_opp_find_freq_ceil(dev, freq) opp_find_freq_ceil(dev, freq)
+
+#if defined(CONFIG_ARM)
+/* Linux 3.13 renamed ioremap_cached to ioremap_cache */
+#define ioremap_cache(cookie, size) ioremap_cached(cookie, size)
+#endif /* defined(CONFIG_ARM) */
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+
+/* Linux 3.14 introduced a new set of sized min and max defines */
+#ifndef U32_MAX
+#define U32_MAX ((u32)UINT_MAX)
+#endif
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+
+/* Linux 3.17 changed the 3rd argument from a `struct page ***pages` to
+ * `struct page **pages` */
+#define map_vm_area(area, prot, pages) map_vm_area(area, prot, &pages)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
+
+/*
+ * Linux 4.7 removed this function but its replacement was available since 3.19.
+ */
+#define drm_crtc_send_vblank_event(crtc, e) drm_send_vblank_event((crtc)->dev, drm_crtc_index(crtc), e)
+
+/* seq_has_overflowed() was introduced in 3.19-rc1 but the structure elements
+ * have been available since 2.x
+ */
+#include <linux/seq_file.h>
+static inline bool seq_has_overflowed(struct seq_file *m)
+{
+	return m->count == m->size;
+}
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0))
+
+#define debugfs_create_file_size(name, mode, parent, data, fops, file_size) \
+	({ \
+		struct dentry *de; \
+		de = debugfs_create_file(name, mode, parent, data, fops); \
+		if (de) \
+			de->d_inode->i_size = file_size; \
+		de; \
+	})
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+
+/* Linux 4.4 renamed GFP_WAIT to GFP_RECLAIM */
+#define __GFP_RECLAIM __GFP_WAIT
+
+#if !defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
+#define dev_pm_opp_of_add_table(dev) of_init_opp_table(dev)
+#define dev_pm_opp_of_remove_table(dev) of_free_opp_table(dev)
+#else
+#define sync_fence_create(data_name, sync_pt) sync_fence_create(data_name, &(sync_pt)->base)
+#endif
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) && \
+	(!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)))
+
+/* Linux 4.5 added a new printf-style parameter for debug messages */
+
+#define drm_encoder_init(dev, encoder, funcs, encoder_type, name, ...) \
+	drm_encoder_init(dev, encoder, funcs, encoder_type)
+
+#define drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ...) \
+	({ (void) format_modifiers; drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type); })
+
+#define drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs, name, ...) \
+	drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs)
+
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0))
+
+#define drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ...) \
+	({ (void) format_modifiers; drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type, name, ##__VA_ARGS__); })
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+
+/*
+ * Linux 4.6 removed the first two parameters, the "struct task_struct" type
+ * pointer "current" is defined in asm/current.h, which makes it pointless
+ * to pass it on every function call.
+*/
+#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \
+	get_user_pages(current, current->mm, start, nr_pages, gup_flags & FOLL_WRITE, gup_flags & FOLL_FORCE, pages, vmas)
+
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0))
+
+/* Linux 4.9 replaced the write/force parameters with "gup_flags" */
+#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \
+	get_user_pages(start, nr_pages, gup_flags & FOLL_WRITE, gup_flags & FOLL_FORCE, pages, vmas)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \
+	(!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)))
+
+/*
+ * Linux 4.6 removed the start and end arguments as it now always maps
+ * the entire DMA-BUF.
+ * Additionally, dma_buf_end_cpu_access() now returns an int error.
+ */
+#define dma_buf_begin_cpu_access(DMABUF, DIRECTION) dma_buf_begin_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION)
+#define dma_buf_end_cpu_access(DMABUF, DIRECTION) ({ dma_buf_end_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION); 0; })
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \
+		  (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0))
+
+/* Linux 4.7 removed the first arguments as it's never been used */
+#define drm_gem_object_lookup(filp, handle) drm_gem_object_lookup((filp)->minor->dev, filp, handle)
+
+/* Linux 4.7 replaced nla_put_u64 with nla_put_u64_64bit */
+#define nla_put_u64_64bit(skb, attrtype, value, padattr) nla_put_u64(skb, attrtype, value)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0))
+
+/* Linux 4.9 changed the second argument to a drm_file pointer */
+#define drm_vma_node_is_allowed(node, file_priv) drm_vma_node_is_allowed(node, (file_priv)->filp)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+#define refcount_read(r) atomic_read(r)
+#define drm_mm_insert_node(mm, node, size) drm_mm_insert_node(mm, node, size, 0, DRM_MM_SEARCH_DEFAULT)
+
+#define drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd) drm_helper_mode_fill_fb_struct(fb, mode_cmd)
+
+/*
+ * In Linux Kernels >= 4.12 for x86 another level of page tables has been
+ * added. The added level (p4d) sits between pgd and pud, so when it
+ * doesn`t exist, pud_offset function takes pgd as a parameter instead
+ * of p4d.
+ */
+#define p4d_t pgd_t
+#define p4d_offset(pgd, address) (pgd)
+#define p4d_none(p4d) (0)
+#define p4d_bad(p4d) (0)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+
+#define drm_mode_object_get(obj)          drm_mode_object_reference(obj)
+#define drm_mode_object_put(obj)          drm_mode_object_unreference(obj)
+#define drm_connector_get(obj)            drm_connector_reference(obj)
+#define drm_connector_put(obj)            drm_connector_unreference(obj)
+#define drm_framebuffer_get(obj)          drm_framebuffer_reference(obj)
+#define drm_framebuffer_put(obj)          drm_framebuffer_unreference(obj)
+#define drm_gem_object_get(obj)           drm_gem_object_reference(obj)
+#define drm_gem_object_put(obj)           drm_gem_object_unreference(obj)
+#define __drm_gem_object_put(obj)         __drm_gem_object_unreference(obj)
+#define drm_gem_object_put_unlocked(obj)  drm_gem_object_unreference_unlocked(obj)
+#define drm_property_blob_get(obj)        drm_property_reference_blob(obj)
+#define drm_property_blob_put(obj)        drm_property_unreference_blob(obj)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0))
+
+#define drm_dev_put(dev) drm_dev_unref(dev)
+
+#define drm_mode_object_find(dev, file_priv, id, type) drm_mode_object_find(dev, id, type)
+#define drm_encoder_find(dev, file_priv, id) drm_encoder_find(dev, id)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0))
+
+#define drm_atomic_helper_check_plane_state(plane_state, crtc_state, \
+											min_scale, max_scale, \
+											can_position, can_update_disabled) \
+	({ \
+		const struct drm_rect __clip = { \
+			.x2 = crtc_state->crtc->mode.hdisplay, \
+			.y2 = crtc_state->crtc->mode.vdisplay, \
+		}; \
+		int __ret = drm_plane_helper_check_state(plane_state, \
+												 &__clip, \
+												 min_scale, max_scale, \
+												 can_position, \
+												 can_update_disabled); \
+		__ret; \
+	})
+
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0))
+
+#define drm_atomic_helper_check_plane_state(plane_state, crtc_state, \
+											min_scale, max_scale, \
+											can_position, can_update_disabled) \
+	({ \
+		const struct drm_rect __clip = { \
+			.x2 = crtc_state->crtc->mode.hdisplay, \
+			.y2 = crtc_state->crtc->mode.vdisplay, \
+		}; \
+		int __ret = drm_atomic_helper_check_plane_state(plane_state, \
+														crtc_state, \
+														&__clip, \
+														min_scale, max_scale, \
+														can_position, \
+														can_update_disabled); \
+		__ret; \
+	})
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
+
+#define drm_connector_attach_encoder(connector, encoder) \
+	drm_mode_connector_attach_encoder(connector, encoder)
+
+#define drm_connector_update_edid_property(connector, edid) \
+	drm_mode_connector_update_edid_property(connector, edid)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0))
+
+/*
+ * Work around architectures, e.g. MIPS, that define copy_from_user and
+ * copy_to_user as macros that call access_ok, as this gets redefined below.
+ * As of kernel 4.12, these functions are no longer defined per-architecture
+ * so this work around isn't needed.
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+#if defined(copy_from_user)
+ /*
+  * NOTE: This function should not be called directly as it exists simply to
+  * work around copy_from_user being defined as a macro that calls access_ok.
+  */
+static inline int
+__pvr_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	return copy_from_user(to, from, n);
+}
+
+#undef copy_from_user
+#define copy_from_user(to, from, n) __copy_from_user(to, from, n)
+#endif
+
+#if defined(copy_to_user)
+ /*
+  * NOTE: This function should not be called directly as it exists simply to
+  * work around copy_to_user being defined as a macro that calls access_ok.
+  */
+static inline int
+__pvr_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	return copy_to_user(to, from, n);
+}
+
+#undef copy_to_user
+#define copy_to_user(to, from, n) __copy_to_user(to, from, n)
+#endif
+#endif
+
+/*
+ * Linux 5.0 dropped the type argument.
+ *
+ * This is unused in at least Linux 3.4 and above for all architectures other
+ * than 'um' (User Mode Linux), which stopped using it in 4.2.
+ */
+#if defined(access_ok)
+ /*
+  * NOTE: This function should not be called directly as it exists simply to
+  * work around access_ok being defined as a macro.
+  */
+static inline int
+__pvr_access_ok_compat(int type, const void __user * addr, unsigned long size)
+{
+	return access_ok(type, addr, size);
+}
+
+#undef access_ok
+#define access_ok(addr, size) __pvr_access_ok_compat(0, addr, size)
+#else
+#define access_ok(addr, size) access_ok(0, addr, size)
+#endif
+
+#endif
+
+#if defined(CONFIG_L4)
+
+/*
+ * Headers shouldn't normally be included by this file but this is a special
+ * case to access the memory translation API when running on the L4 ukernel
+ */
+#include <asm/api-l4env/api.h>
+
+#undef page_to_phys
+#define page_to_phys(x) l4x_virt_to_phys((void *)((phys_addr_t)page_to_pfn(x) << PAGE_SHIFT))
+
+#endif /* defined(CONFIG_L4) */
+
+#endif /* __KERNEL_COMPATIBILITY_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/kernel_nospec.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/kernel_nospec.h
new file mode 100644
index 0000000..e27a3eb
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/kernel_nospec.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@Title          Macro to limit CPU speculative execution in kernel code
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Per-version macros to allow code to seamlessly use older kernel
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __KERNEL_NOSPEC_H__
+#define __KERNEL_NOSPEC_H__
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 2) ||			\
+	(LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) &&		\
+	 LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 18)) ||		\
+	(LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) &&		\
+	 LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 81)) ||		\
+	(LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) &&		\
+	 LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 118)))
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/nospec.h>
+#else
+#define array_index_nospec(index, size) (index)
+#endif
+
+/*
+ * For Ubuntu kernels, the features available for a given Linux version code
+ * may not match those in upstream kernels. This is the case for the
+ * availability of the array_index_nospec macro.
+ */
+#if !defined(array_index_nospec)
+#define array_index_nospec(index, size) (index)
+#endif
+
+#endif /* __KERNEL_NOSPEC_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/kernel_types.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/kernel_types.h
new file mode 100644
index 0000000..c93b59e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/kernel_types.h
@@ -0,0 +1,138 @@
+/*************************************************************************/ /*!
+@Title          C99-compatible types and definitions for Linux kernel code
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/kernel.h>
+
+/* Limits of specified-width integer types */
+
+/* S8_MIN, etc were added in kernel version 3.14. The other versions are for
+ * earlier kernels. They can be removed once older kernels don't need to be
+ * supported.
+ */
+#ifdef S8_MIN
+	#define INT8_MIN	S8_MIN
+#else
+	#define INT8_MIN	(-128)
+#endif
+
+#ifdef S8_MAX
+	#define INT8_MAX	S8_MAX
+#else
+	#define INT8_MAX	127
+#endif
+
+#ifdef U8_MAX
+	#define UINT8_MAX	U8_MAX
+#else
+	#define UINT8_MAX	0xFF
+#endif
+
+#ifdef S16_MIN
+	#define INT16_MIN	S16_MIN
+#else
+	#define INT16_MIN	(-32768)
+#endif
+
+#ifdef S16_MAX
+	#define INT16_MAX	S16_MAX
+#else
+	#define INT16_MAX	32767
+#endif
+
+#ifdef U16_MAX
+	#define UINT16_MAX	U16_MAX
+#else
+	#define UINT16_MAX	0xFFFF
+#endif
+
+#ifdef S32_MIN
+	#define INT32_MIN	S32_MIN
+#else
+	#define INT32_MIN	(-2147483647 - 1)
+#endif
+
+#ifdef S32_MAX
+	#define INT32_MAX	S32_MAX
+#else
+	#define INT32_MAX	2147483647
+#endif
+
+#ifdef U32_MAX
+	#define UINT32_MAX	U32_MAX
+#else
+	#define UINT32_MAX	0xFFFFFFFF
+#endif
+
+#ifdef S64_MIN
+	#define INT64_MIN	S64_MIN
+#else
+	#define INT64_MIN	(-9223372036854775807LL)
+#endif
+
+#ifdef S64_MAX
+	#define INT64_MAX	S64_MAX
+#else
+	#define INT64_MAX	9223372036854775807LL
+#endif
+
+#ifdef U64_MAX
+	#define UINT64_MAX	U64_MAX
+#else
+	#define UINT64_MAX	0xFFFFFFFFFFFFFFFFULL
+#endif
+
+/* Macros for integer constants */
+#define INT8_C			S8_C
+#define UINT8_C			U8_C
+#define INT16_C			S16_C
+#define UINT16_C		U16_C
+#define INT32_C			S32_C
+#define UINT32_C		U32_C
+#define INT64_C			S64_C
+#define UINT64_C		U64_C
+
+/* Format conversion of integer types <inttypes.h> */
+
+#define PRIX64		"llX"
+#define PRIx64		"llx"
+#define PRIu64		"llu"
+#define PRId64		"lld"
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/rgx_bvnc_defs_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/rgx_bvnc_defs_km.h
new file mode 100644
index 0000000..0d8319c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/rgx_bvnc_defs_km.h
@@ -0,0 +1,299 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file rgx_bvnc_defs_km.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/******************************************************************************
+ *                 Auto generated file by rgxbvnc_tablegen.py                 *
+ *                  This file should not be edited manually                   *
+ *****************************************************************************/
+
+#ifndef RGX_BVNC_DEFS_KM_H
+#define RGX_BVNC_DEFS_KM_H
+
+#include "img_types.h"
+#include "img_defs.h"
+
+#if defined(RGX_BVNC_DEFS_UM_H)
+#error "This file should not be included in conjunction with rgx_bvnc_defs_um.h"
+#endif
+
+#define BVNC_FIELD_WIDTH (16U)
+
+
+/******************************************************************************
+ * Mask and bit-position macros for features without values
+ *****************************************************************************/
+
+#define	RGX_FEATURE_AXI_ACELITE_POS                                 	(0U)
+#define	RGX_FEATURE_AXI_ACELITE_BIT_MASK                            	(IMG_UINT64_C(0x0000000000000001))
+
+#define	RGX_FEATURE_CLUSTER_GROUPING_POS                            	(1U)
+#define	RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK                       	(IMG_UINT64_C(0x0000000000000002))
+
+#define	RGX_FEATURE_COMPUTE_POS                                     	(2U)
+#define	RGX_FEATURE_COMPUTE_BIT_MASK                                	(IMG_UINT64_C(0x0000000000000004))
+
+#define	RGX_FEATURE_COMPUTE_MORTON_CAPABLE_POS                      	(3U)
+#define	RGX_FEATURE_COMPUTE_MORTON_CAPABLE_BIT_MASK                 	(IMG_UINT64_C(0x0000000000000008))
+
+#define	RGX_FEATURE_COMPUTE_OVERLAP_POS                             	(4U)
+#define	RGX_FEATURE_COMPUTE_OVERLAP_BIT_MASK                        	(IMG_UINT64_C(0x0000000000000010))
+
+#define	RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_POS               	(5U)
+#define	RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_BIT_MASK          	(IMG_UINT64_C(0x0000000000000020))
+
+#define	RGX_FEATURE_COREID_PER_OS_POS                               	(6U)
+#define	RGX_FEATURE_COREID_PER_OS_BIT_MASK                          	(IMG_UINT64_C(0x0000000000000040))
+
+#define	RGX_FEATURE_DUST_POWER_ISLAND_S7_POS                        	(7U)
+#define	RGX_FEATURE_DUST_POWER_ISLAND_S7_BIT_MASK                   	(IMG_UINT64_C(0x0000000000000080))
+
+#define	RGX_FEATURE_DYNAMIC_DUST_POWER_POS                          	(8U)
+#define	RGX_FEATURE_DYNAMIC_DUST_POWER_BIT_MASK                     	(IMG_UINT64_C(0x0000000000000100))
+
+#define	RGX_FEATURE_FASTRENDER_DM_POS                               	(9U)
+#define	RGX_FEATURE_FASTRENDER_DM_BIT_MASK                          	(IMG_UINT64_C(0x0000000000000200))
+
+#define	RGX_FEATURE_GPU_VIRTUALISATION_POS                          	(10U)
+#define	RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK                     	(IMG_UINT64_C(0x0000000000000400))
+
+#define	RGX_FEATURE_GS_RTA_SUPPORT_POS                              	(11U)
+#define	RGX_FEATURE_GS_RTA_SUPPORT_BIT_MASK                         	(IMG_UINT64_C(0x0000000000000800))
+
+#define	RGX_FEATURE_META_DMA_POS                                    	(12U)
+#define	RGX_FEATURE_META_DMA_BIT_MASK                               	(IMG_UINT64_C(0x0000000000001000))
+
+#define	RGX_FEATURE_MIPS_POS                                        	(13U)
+#define	RGX_FEATURE_MIPS_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000002000))
+
+#define	RGX_FEATURE_PBE2_IN_XE_POS                                  	(14U)
+#define	RGX_FEATURE_PBE2_IN_XE_BIT_MASK                             	(IMG_UINT64_C(0x0000000000004000))
+
+#define	RGX_FEATURE_PBVNC_COREID_REG_POS                            	(15U)
+#define	RGX_FEATURE_PBVNC_COREID_REG_BIT_MASK                       	(IMG_UINT64_C(0x0000000000008000))
+
+#define	RGX_FEATURE_PDS_PER_DUST_POS                                	(16U)
+#define	RGX_FEATURE_PDS_PER_DUST_BIT_MASK                           	(IMG_UINT64_C(0x0000000000010000))
+
+#define	RGX_FEATURE_PDS_TEMPSIZE8_POS                               	(17U)
+#define	RGX_FEATURE_PDS_TEMPSIZE8_BIT_MASK                          	(IMG_UINT64_C(0x0000000000020000))
+
+#define	RGX_FEATURE_PERFBUS_POS                                     	(18U)
+#define	RGX_FEATURE_PERFBUS_BIT_MASK                                	(IMG_UINT64_C(0x0000000000040000))
+
+#define	RGX_FEATURE_PERF_COUNTER_BATCH_POS                          	(19U)
+#define	RGX_FEATURE_PERF_COUNTER_BATCH_BIT_MASK                     	(IMG_UINT64_C(0x0000000000080000))
+
+#define	RGX_FEATURE_RAY_TRACING_DEPRECATED_POS                      	(20U)
+#define	RGX_FEATURE_RAY_TRACING_DEPRECATED_BIT_MASK                 	(IMG_UINT64_C(0x0000000000100000))
+
+#define	RGX_FEATURE_ROGUEXE_POS                                     	(21U)
+#define	RGX_FEATURE_ROGUEXE_BIT_MASK                                	(IMG_UINT64_C(0x0000000000200000))
+
+#define	RGX_FEATURE_S7_CACHE_HIERARCHY_POS                          	(22U)
+#define	RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK                     	(IMG_UINT64_C(0x0000000000400000))
+
+#define	RGX_FEATURE_S7_TOP_INFRASTRUCTURE_POS                       	(23U)
+#define	RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK                  	(IMG_UINT64_C(0x0000000000800000))
+
+#define	RGX_FEATURE_SCALABLE_VDM_GPP_POS                            	(24U)
+#define	RGX_FEATURE_SCALABLE_VDM_GPP_BIT_MASK                       	(IMG_UINT64_C(0x0000000001000000))
+
+#define	RGX_FEATURE_SIGNAL_SNOOPING_POS                             	(25U)
+#define	RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK                        	(IMG_UINT64_C(0x0000000002000000))
+
+#define	RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_POS            	(26U)
+#define	RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_BIT_MASK       	(IMG_UINT64_C(0x0000000004000000))
+
+#define	RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1_POS         	(27U)
+#define	RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1_BIT_MASK    	(IMG_UINT64_C(0x0000000008000000))
+
+#define	RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2_POS         	(28U)
+#define	RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2_BIT_MASK    	(IMG_UINT64_C(0x0000000010000000))
+
+#define	RGX_FEATURE_SINGLE_BIF_POS                                  	(29U)
+#define	RGX_FEATURE_SINGLE_BIF_BIT_MASK                             	(IMG_UINT64_C(0x0000000020000000))
+
+#define	RGX_FEATURE_SLCSIZE8_POS                                    	(30U)
+#define	RGX_FEATURE_SLCSIZE8_BIT_MASK                               	(IMG_UINT64_C(0x0000000040000000))
+
+#define	RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_POS                 	(31U)
+#define	RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_BIT_MASK            	(IMG_UINT64_C(0x0000000080000000))
+
+#define	RGX_FEATURE_SLC_VIVT_POS                                    	(32U)
+#define	RGX_FEATURE_SLC_VIVT_BIT_MASK                               	(IMG_UINT64_C(0x0000000100000000))
+
+#define	RGX_FEATURE_SYS_BUS_SECURE_RESET_POS                        	(33U)
+#define	RGX_FEATURE_SYS_BUS_SECURE_RESET_BIT_MASK                   	(IMG_UINT64_C(0x0000000200000000))
+
+#define	RGX_FEATURE_TDM_PDS_CHECKSUM_POS                            	(34U)
+#define	RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK                       	(IMG_UINT64_C(0x0000000400000000))
+
+#define	RGX_FEATURE_TESSELLATION_POS                                	(35U)
+#define	RGX_FEATURE_TESSELLATION_BIT_MASK                           	(IMG_UINT64_C(0x0000000800000000))
+
+#define	RGX_FEATURE_TLA_POS                                         	(36U)
+#define	RGX_FEATURE_TLA_BIT_MASK                                    	(IMG_UINT64_C(0x0000001000000000))
+
+#define	RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_POS         	(37U)
+#define	RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_BIT_MASK    	(IMG_UINT64_C(0x0000002000000000))
+
+#define	RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_POS                     	(38U)
+#define	RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_BIT_MASK                	(IMG_UINT64_C(0x0000004000000000))
+
+#define	RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_POS                  	(39U)
+#define	RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK             	(IMG_UINT64_C(0x0000008000000000))
+
+#define	RGX_FEATURE_VDM_DRAWINDIRECT_POS                            	(40U)
+#define	RGX_FEATURE_VDM_DRAWINDIRECT_BIT_MASK                       	(IMG_UINT64_C(0x0000010000000000))
+
+#define	RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_POS                        	(41U)
+#define	RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_BIT_MASK                   	(IMG_UINT64_C(0x0000020000000000))
+
+#define	RGX_FEATURE_XE_MEMORY_HIERARCHY_POS                         	(42U)
+#define	RGX_FEATURE_XE_MEMORY_HIERARCHY_BIT_MASK                    	(IMG_UINT64_C(0x0000040000000000))
+
+#define	RGX_FEATURE_XT_TOP_INFRASTRUCTURE_POS                       	(43U)
+#define	RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK                  	(IMG_UINT64_C(0x0000080000000000))
+
+
+/******************************************************************************
+ * Features with values indexes
+ *****************************************************************************/
+
+typedef enum _RGX_FEATURE_WITH_VALUE_INDEX_ {
+	RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_IDX,
+	RGX_FEATURE_FBCDC_ALGORITHM_IDX,
+	RGX_FEATURE_FBCDC_ARCHITECTURE_IDX,
+	RGX_FEATURE_GPU_VIRTUALISATION_NUM_OS_IDX,
+	RGX_FEATURE_META_IDX,
+	RGX_FEATURE_META_COREMEM_BANKS_IDX,
+	RGX_FEATURE_META_COREMEM_SIZE_IDX,
+	RGX_FEATURE_META_DMA_CHANNEL_COUNT_IDX,
+	RGX_FEATURE_NUM_CLUSTERS_IDX,
+	RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX,
+	RGX_FEATURE_NUM_RASTER_PIPES_IDX,
+	RGX_FEATURE_PHYS_BUS_WIDTH_IDX,
+	RGX_FEATURE_SCALABLE_TE_ARCH_IDX,
+	RGX_FEATURE_SCALABLE_VCE_IDX,
+	RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_IDX,
+	RGX_FEATURE_SLC_BANKS_IDX,
+	RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_IDX,
+	RGX_FEATURE_SLC_SIZE_IN_BYTES_IDX,
+	RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_IDX,
+	RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_IDX,
+	RGX_FEATURE_WITH_VALUES_MAX_IDX,
+} RGX_FEATURE_WITH_VALUE_INDEX;
+
+
+/******************************************************************************
+ * Mask and bit-position macros for ERNs and BRNs
+ *****************************************************************************/
+
+#define	FIX_HW_BRN_38344_POS                                        	(0U)
+#define	FIX_HW_BRN_38344_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000000001))
+
+#define	HW_ERN_42290_POS                                            	(1U)
+#define	HW_ERN_42290_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000000002))
+
+#define	FIX_HW_BRN_42321_POS                                        	(2U)
+#define	FIX_HW_BRN_42321_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000000004))
+
+#define	FIX_HW_BRN_42480_POS                                        	(3U)
+#define	FIX_HW_BRN_42480_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000000008))
+
+#define	HW_ERN_42606_POS                                            	(4U)
+#define	HW_ERN_42606_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000000010))
+
+#define	FIX_HW_BRN_43276_POS                                        	(5U)
+#define	FIX_HW_BRN_43276_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000000020))
+
+#define	FIX_HW_BRN_44871_POS                                        	(6U)
+#define	FIX_HW_BRN_44871_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000000040))
+
+#define	HW_ERN_45914_POS                                            	(7U)
+#define	HW_ERN_45914_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000000080))
+
+#define	HW_ERN_46066_POS                                            	(8U)
+#define	HW_ERN_46066_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000000100))
+
+#define	HW_ERN_47025_POS                                            	(9U)
+#define	HW_ERN_47025_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000000200))
+
+#define	HW_ERN_50539_POS                                            	(10U)
+#define	HW_ERN_50539_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000000400))
+
+#define	FIX_HW_BRN_50767_POS                                        	(11U)
+#define	FIX_HW_BRN_50767_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000000800))
+
+#define	HW_ERN_51468_POS                                            	(12U)
+#define	HW_ERN_51468_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000001000))
+
+#define	HW_ERN_57596_POS                                            	(13U)
+#define	HW_ERN_57596_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000002000))
+
+#define	FIX_HW_BRN_60084_POS                                        	(14U)
+#define	FIX_HW_BRN_60084_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000004000))
+
+#define	HW_ERN_61389_POS                                            	(15U)
+#define	HW_ERN_61389_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000008000))
+
+#define	FIX_HW_BRN_61450_POS                                        	(16U)
+#define	FIX_HW_BRN_61450_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000010000))
+
+#define	FIX_HW_BRN_63142_POS                                        	(17U)
+#define	FIX_HW_BRN_63142_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000020000))
+
+#define	FIX_HW_BRN_63553_POS                                        	(18U)
+#define	FIX_HW_BRN_63553_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000040000))
+
+#define	FIX_HW_BRN_65273_POS                                        	(19U)
+#define	FIX_HW_BRN_65273_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000080000))
+
+#define	HW_ERN_66622_POS                                            	(20U)
+#define	HW_ERN_66622_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000100000))
+
+/* Macro used for padding the unavailable values for features with values */
+#define RGX_FEATURE_VALUE_INVALID	(0xFFFFFFFEU)
+
+/* Macro used for marking a feature with value as disabled for a specific bvnc */
+#define RGX_FEATURE_VALUE_DISABLED	(0xFFFFFFFFU)
+
+#endif /* RGX_BVNC_DEFS_KM_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/rgx_bvnc_table_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/rgx_bvnc_table_km.h
new file mode 100644
index 0000000..70b522e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/rgx_bvnc_table_km.h
@@ -0,0 +1,403 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file rgx_bvnc_table_km.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/******************************************************************************
+ *                 Auto generated file by rgxbvnc_tablegen.py                 *
+ *                  This file should not be edited manually                   *
+ *****************************************************************************/
+
+#ifndef RGX_BVNC_TABLE_KM_H
+#define RGX_BVNC_TABLE_KM_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "rgxdefs_km.h"
+
+#ifndef _RGXBVNC_C_
+#error "This file should only be included from rgxbvnc.c"
+#endif
+
+#if defined(RGX_BVNC_TABLE_UM_H)
+#error "This file should not be included in conjunction with rgx_bvnc_table_um.h"
+#endif
+
+
+/******************************************************************************
+ * Defines and arrays for each feature with values used
+ * for handling the corresponding values
+ *****************************************************************************/
+
+#define	RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX	(2)
+static const IMG_UINT16 aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values[RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, };
+
+#define	RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX	(5)
+static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ALGORITHM_values[RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, };
+
+#define	RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX	(4)
+static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values[RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, };
+
+#define	RGX_FEATURE_GPU_VIRTUALISATION_NUM_OS_MAX_VALUE_IDX	(2)
+static const IMG_UINT16 aui16_RGX_FEATURE_GPU_VIRTUALISATION_NUM_OS_values[RGX_FEATURE_GPU_VIRTUALISATION_NUM_OS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 8, };
+
+#define	RGX_FEATURE_META_MAX_VALUE_IDX	(5)
+static const IMG_UINT16 aui16_RGX_FEATURE_META_values[RGX_FEATURE_META_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, LTP217, LTP218, MTP218, MTP219, };
+
+#define	RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX	(2)
+static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_BANKS_values[RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 8, };
+
+#define	RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX	(5)
+static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_SIZE_values[RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 32, 64, 256, };
+
+#define	RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX	(2)
+static const IMG_UINT16 aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values[RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 4, };
+
+#define	RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX	(7)
+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_CLUSTERS_values[RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, 6, 8, 16, };
+
+#define	RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX	(11)
+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values[RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, 6, 7, 8, 12, 16, 32, };
+
+#define	RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX	(3)
+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_RASTER_PIPES_values[RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, };
+
+#define	RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX	(4)
+static const IMG_UINT16 aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values[RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 32, 36, 40, };
+
+#define	RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX	(4)
+static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values[RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, };
+
+#define	RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX	(4)
+static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_VCE_values[RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, };
+
+#define	RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX	(3)
+static const IMG_UINT16 aui16_RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_values[RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, };
+
+#define	RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX	(4)
+static const IMG_UINT16 aui16_RGX_FEATURE_SLC_BANKS_values[RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, };
+
+#define	RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX	(2)
+static const IMG_UINT16 aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values[RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 512, };
+
+#define	RGX_FEATURE_SLC_SIZE_IN_BYTES_MAX_VALUE_IDX	(5)
+static const IMG_UINT16 aui16_RGX_FEATURE_SLC_SIZE_IN_BYTES_values[RGX_FEATURE_SLC_SIZE_IN_BYTES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 16, 64, 128, 256, };
+
+#define	RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX	(4)
+static const IMG_UINT16 aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values[RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 128, 256, 512, };
+
+#define	RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX	(2)
+static const IMG_UINT16 aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values[RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 40, };
+
+
+/******************************************************************************
+ * Table contains pointers to each feature value array for features that have
+ * values.
+ * Indexed using enum RGX_FEATURE_WITH_VALUE_INDEX from rgx_bvnc_defs_km.h
+ *****************************************************************************/
+
+static const IMG_UINT16 *gaFeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX] = {
+	aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values,
+	aui16_RGX_FEATURE_FBCDC_ALGORITHM_values,
+	aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values,
+	aui16_RGX_FEATURE_GPU_VIRTUALISATION_NUM_OS_values,
+	aui16_RGX_FEATURE_META_values,
+	aui16_RGX_FEATURE_META_COREMEM_BANKS_values,
+	aui16_RGX_FEATURE_META_COREMEM_SIZE_values,
+	aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values,
+	aui16_RGX_FEATURE_NUM_CLUSTERS_values,
+	aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values,
+	aui16_RGX_FEATURE_NUM_RASTER_PIPES_values,
+	aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values,
+	aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values,
+	aui16_RGX_FEATURE_SCALABLE_VCE_values,
+	aui16_RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_values,
+	aui16_RGX_FEATURE_SLC_BANKS_values,
+	aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values,
+	aui16_RGX_FEATURE_SLC_SIZE_IN_BYTES_values,
+	aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values,
+	aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values,
+};
+
+
+/******************************************************************************
+ * Array containing the lengths of the arrays containing the values.
+ * Used for indexing the aui16_<FEATURE>_values defined upwards
+ *****************************************************************************/
+
+
+static const IMG_UINT16 gaFeaturesValuesMaxIndexes[] = {
+	RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX,
+	RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX,
+	RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX,
+	RGX_FEATURE_GPU_VIRTUALISATION_NUM_OS_MAX_VALUE_IDX,
+	RGX_FEATURE_META_MAX_VALUE_IDX,
+	RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX,
+	RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX,
+	RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX,
+	RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX,
+	RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX,
+	RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX,
+	RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX,
+	RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX,
+	RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX,
+	RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX,
+	RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX,
+	RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX,
+	RGX_FEATURE_SLC_SIZE_IN_BYTES_MAX_VALUE_IDX,
+	RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX,
+	RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX,
+};
+
+
+/******************************************************************************
+ * Bit-positions for features with values
+ *****************************************************************************/
+
+static const IMG_UINT16 aui16FeaturesWithValuesBitPositions[] = {
+	(0U), /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_POS */
+	(2U), /* RGX_FEATURE_FBCDC_ALGORITHM_POS */
+	(5U), /* RGX_FEATURE_FBCDC_ARCHITECTURE_POS */
+	(8U), /* RGX_FEATURE_GPU_VIRTUALISATION_NUM_OS_POS */
+	(10U), /* RGX_FEATURE_META_POS */
+	(13U), /* RGX_FEATURE_META_COREMEM_BANKS_POS */
+	(15U), /* RGX_FEATURE_META_COREMEM_SIZE_POS */
+	(18U), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_POS */
+	(20U), /* RGX_FEATURE_NUM_CLUSTERS_POS */
+	(23U), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_POS */
+	(27U), /* RGX_FEATURE_NUM_RASTER_PIPES_POS */
+	(29U), /* RGX_FEATURE_PHYS_BUS_WIDTH_POS */
+	(32U), /* RGX_FEATURE_SCALABLE_TE_ARCH_POS */
+	(35U), /* RGX_FEATURE_SCALABLE_VCE_POS */
+	(38U), /* RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_POS */
+	(40U), /* RGX_FEATURE_SLC_BANKS_POS */
+	(43U), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_POS */
+	(45U), /* RGX_FEATURE_SLC_SIZE_IN_BYTES_POS */
+	(48U), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_POS */
+	(51U), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_POS */
+};
+
+
+/******************************************************************************
+ * Bit-masks for features with values
+ *****************************************************************************/
+
+static const IMG_UINT64 aui64FeaturesWithValuesBitMasks[] = {
+	(IMG_UINT64_C(0x0000000000000003)), /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_BIT_MASK */
+	(IMG_UINT64_C(0x000000000000001C)), /* RGX_FEATURE_FBCDC_ALGORITHM_BIT_MASK */
+	(IMG_UINT64_C(0x00000000000000E0)), /* RGX_FEATURE_FBCDC_ARCHITECTURE_BIT_MASK */
+	(IMG_UINT64_C(0x0000000000000300)), /* RGX_FEATURE_GPU_VIRTUALISATION_NUM_OS_BIT_MASK */
+	(IMG_UINT64_C(0x0000000000001C00)), /* RGX_FEATURE_META_BIT_MASK */
+	(IMG_UINT64_C(0x0000000000006000)), /* RGX_FEATURE_META_COREMEM_BANKS_BIT_MASK */
+	(IMG_UINT64_C(0x0000000000038000)), /* RGX_FEATURE_META_COREMEM_SIZE_BIT_MASK */
+	(IMG_UINT64_C(0x00000000000C0000)), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_BIT_MASK */
+	(IMG_UINT64_C(0x0000000000700000)), /* RGX_FEATURE_NUM_CLUSTERS_BIT_MASK */
+	(IMG_UINT64_C(0x0000000007800000)), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_BIT_MASK */
+	(IMG_UINT64_C(0x0000000018000000)), /* RGX_FEATURE_NUM_RASTER_PIPES_BIT_MASK */
+	(IMG_UINT64_C(0x00000000E0000000)), /* RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK */
+	(IMG_UINT64_C(0x0000000700000000)), /* RGX_FEATURE_SCALABLE_TE_ARCH_BIT_MASK */
+	(IMG_UINT64_C(0x0000003800000000)), /* RGX_FEATURE_SCALABLE_VCE_BIT_MASK */
+	(IMG_UINT64_C(0x000000C000000000)), /* RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_BIT_MASK */
+	(IMG_UINT64_C(0x0000070000000000)), /* RGX_FEATURE_SLC_BANKS_BIT_MASK */
+	(IMG_UINT64_C(0x0000180000000000)), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK */
+	(IMG_UINT64_C(0x0000E00000000000)), /* RGX_FEATURE_SLC_SIZE_IN_BYTES_BIT_MASK */
+	(IMG_UINT64_C(0x0007000000000000)), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_BIT_MASK */
+	(IMG_UINT64_C(0x0018000000000000)), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_BIT_MASK */
+};
+
+
+/******************************************************************************
+ * Table mapping bitmasks for features and features with values
+ *****************************************************************************/
+
+
+static const IMG_UINT64 gaFeatures[][3]=
+{
+	{ IMG_UINT64_C(0x000100000002001e), IMG_UINT64_C(0x0000001000040815), IMG_UINT64_C(0x0008680061a08c24) },	/* 1.0.2.30 */
+	{ IMG_UINT64_C(0x0001000000040005), IMG_UINT64_C(0x0000001000040814), IMG_UINT64_C(0x0008680061b08c24) },	/* 1.0.4.5 */
+	{ IMG_UINT64_C(0x0001000000040013), IMG_UINT64_C(0x0000001000040815), IMG_UINT64_C(0x0008680061b08c24) },	/* 1.0.4.19 */
+	{ IMG_UINT64_C(0x0004000000020033), IMG_UINT64_C(0x000008b0000c091f), IMG_UINT64_C(0x0008680063a10848) },	/* 4.0.2.51 */
+	{ IMG_UINT64_C(0x0004000000020039), IMG_UINT64_C(0x000008b0000c0d1f), IMG_UINT64_C(0x0008680063a10948) },	/* 4.0.2.57 */
+	{ IMG_UINT64_C(0x000400000002003a), IMG_UINT64_C(0x000008b0000c0d1f), IMG_UINT64_C(0x0008680063a10948) },	/* 4.0.2.58 */
+	{ IMG_UINT64_C(0x0004000000040037), IMG_UINT64_C(0x000008b0000c091e), IMG_UINT64_C(0x0008680063b10848) },	/* 4.0.4.55 */
+	{ IMG_UINT64_C(0x000400000006003e), IMG_UINT64_C(0x000008b0000c0d1f), IMG_UINT64_C(0x00086b0063c10948) },	/* 4.0.6.62 */
+	{ IMG_UINT64_C(0x000500000001002e), IMG_UINT64_C(0x0000000000240905), IMG_UINT64_C(0x0008290068908428) },	/* 5.0.1.46 */
+	{ IMG_UINT64_C(0x0006000000040023), IMG_UINT64_C(0x000008b0001c091f), IMG_UINT64_C(0x0008680063b10848) },	/* 6.0.4.35 */
+	{ IMG_UINT64_C(0x000f000000010040), IMG_UINT64_C(0x0000000000240d05), IMG_UINT64_C(0x0008490069108528) },	/* 15.0.1.64 */
+	{ IMG_UINT64_C(0x0016000000150010), IMG_UINT64_C(0x000000026c24ec05), IMG_UINT64_C(0x0008094029100100) },	/* 22.0.21.16 */
+	{ IMG_UINT64_C(0x0016000000160019), IMG_UINT64_C(0x000000022c24ec05), IMG_UINT64_C(0x0008294029100100) },	/* 22.0.22.25 */
+	{ IMG_UINT64_C(0x001600000016001d), IMG_UINT64_C(0x000000022c24ec05), IMG_UINT64_C(0x0008294029100100) },	/* 22.0.22.29 */
+	{ IMG_UINT64_C(0x0016000000360019), IMG_UINT64_C(0x000000022c24ec05), IMG_UINT64_C(0x0008494029900100) },	/* 22.0.54.25 */
+	{ IMG_UINT64_C(0x001600000036001e), IMG_UINT64_C(0x000000022c24ec05), IMG_UINT64_C(0x000849402a100100) },	/* 22.0.54.30 */
+	{ IMG_UINT64_C(0x0016000000360026), IMG_UINT64_C(0x000000022c24ec05), IMG_UINT64_C(0x000849404a100100) },	/* 22.0.54.38 */
+	{ IMG_UINT64_C(0x001600000036014a), IMG_UINT64_C(0x000000022c24ec05), IMG_UINT64_C(0x000849402a10012c) },	/* 22.0.54.330 */
+	{ IMG_UINT64_C(0x0016000000680012), IMG_UINT64_C(0x000000022c24ec05), IMG_UINT64_C(0x000849404b100100) },	/* 22.0.104.18 */
+	{ IMG_UINT64_C(0x00160000006800da), IMG_UINT64_C(0x000000022c24ec05), IMG_UINT64_C(0x000849404b10012c) },	/* 22.0.104.218 */
+	{ IMG_UINT64_C(0x0016000000d0013e), IMG_UINT64_C(0x000000022c24ec05), IMG_UINT64_C(0x00084a405420012c) },	/* 22.0.208.318 */
+	{ IMG_UINT64_C(0x00180000003600cc), IMG_UINT64_C(0x000004061424ee05), IMG_UINT64_C(0x000849804a10012c) },	/* 24.0.54.204 */
+	{ IMG_UINT64_C(0x00180000006801f8), IMG_UINT64_C(0x000004061424ee05), IMG_UINT64_C(0x000849804a90012c) },	/* 24.0.104.504 */
+	{ IMG_UINT64_C(0x0018000000d001f8), IMG_UINT64_C(0x000004061424ee05), IMG_UINT64_C(0x00086a805420012c) },	/* 24.0.208.504 */
+	{ IMG_UINT64_C(0x0018000000d001f9), IMG_UINT64_C(0x000004061424ee05), IMG_UINT64_C(0x00086a805420012c) },	/* 24.0.208.505 */
+	{ IMG_UINT64_C(0x001d0000003400ca), IMG_UINT64_C(0x000004661424ee45), IMG_UINT64_C(0x0008298049100130) },	/* 29.0.52.202 */
+	{ IMG_UINT64_C(0x001d0000006c00d0), IMG_UINT64_C(0x000004661424ee45), IMG_UINT64_C(0x00086a8053a00130) },	/* 29.0.108.208 */
+};
+
+/******************************************************************************
+ * Table mapping bitmasks for ERNs/BRNs
+ *****************************************************************************/
+
+
+static const IMG_UINT64 gaErnsBrns[][2]=
+{
+	{ IMG_UINT64_C(0x0001002700040013), IMG_UINT64_C(0x0000000000000005) },	/* 1.39.4.19 */
+	{ IMG_UINT64_C(0x0001004b0002001e), IMG_UINT64_C(0x0000000000000004) },	/* 1.75.2.30 */
+	{ IMG_UINT64_C(0x0001005200040005), IMG_UINT64_C(0x0000000000000000) },	/* 1.82.4.5 */
+	{ IMG_UINT64_C(0x0004001d00020033), IMG_UINT64_C(0x0000000000020812) },	/* 4.29.2.51 */
+	{ IMG_UINT64_C(0x0004001f00040037), IMG_UINT64_C(0x0000000000020812) },	/* 4.31.4.55 */
+	{ IMG_UINT64_C(0x0004002800020033), IMG_UINT64_C(0x0000000000020812) },	/* 4.40.2.51 */
+	{ IMG_UINT64_C(0x0004002900020039), IMG_UINT64_C(0x0000000000020812) },	/* 4.41.2.57 */
+	{ IMG_UINT64_C(0x0004002b0006003e), IMG_UINT64_C(0x0000000000020812) },	/* 4.43.6.62 */
+	{ IMG_UINT64_C(0x0004002d0002003a), IMG_UINT64_C(0x0000000000020012) },	/* 4.45.2.58 */
+	{ IMG_UINT64_C(0x0004002e0006003e), IMG_UINT64_C(0x0000000000020812) },	/* 4.46.6.62 */
+	{ IMG_UINT64_C(0x000500090001002e), IMG_UINT64_C(0x0000000000000061) },	/* 5.9.1.46 */
+	{ IMG_UINT64_C(0x0005000b0001002e), IMG_UINT64_C(0x0000000000000064) },	/* 5.11.1.46 */
+	{ IMG_UINT64_C(0x0006002200040023), IMG_UINT64_C(0x0000000000020012) },	/* 6.34.4.35 */
+	{ IMG_UINT64_C(0x000f000500010040), IMG_UINT64_C(0x0000000000000060) },	/* 15.5.1.64 */
+	{ IMG_UINT64_C(0x0016001e00360019), IMG_UINT64_C(0x00000000000d6070) },	/* 22.30.54.25 */
+	{ IMG_UINT64_C(0x001600280036001e), IMG_UINT64_C(0x00000000000d6070) },	/* 22.40.54.30 */
+	{ IMG_UINT64_C(0x0016002c00160019), IMG_UINT64_C(0x00000000000d6070) },	/* 22.44.22.25 */
+	{ IMG_UINT64_C(0x0016002e0036014a), IMG_UINT64_C(0x00000000000d4072) },	/* 22.46.54.330 */
+	{ IMG_UINT64_C(0x0016003100150010), IMG_UINT64_C(0x00000000000d6070) },	/* 22.49.21.16 */
+	{ IMG_UINT64_C(0x0016003c0016001d), IMG_UINT64_C(0x00000000000ca070) },	/* 22.60.22.29 */
+	{ IMG_UINT64_C(0x001600430036001e), IMG_UINT64_C(0x00000000000ce070) },	/* 22.67.54.30 */
+	{ IMG_UINT64_C(0x001600440036001e), IMG_UINT64_C(0x00000000000ca070) },	/* 22.68.54.30 */
+	{ IMG_UINT64_C(0x0016004b00160019), IMG_UINT64_C(0x00000000000ca070) },	/* 22.75.22.25 */
+	{ IMG_UINT64_C(0x00160056006800da), IMG_UINT64_C(0x0000000000048070) },	/* 22.86.104.218 */
+	{ IMG_UINT64_C(0x0016005700680012), IMG_UINT64_C(0x000000000004a070) },	/* 22.87.104.18 */
+	{ IMG_UINT64_C(0x0016006600360026), IMG_UINT64_C(0x000000000004a070) },	/* 22.102.54.38 */
+	{ IMG_UINT64_C(0x0016006800d0013e), IMG_UINT64_C(0x0000000000048072) },	/* 22.104.208.318 */
+	{ IMG_UINT64_C(0x0018003200d001f8), IMG_UINT64_C(0x0000000000142072) },	/* 24.50.208.504 */
+	{ IMG_UINT64_C(0x0018003800d001f9), IMG_UINT64_C(0x0000000000142072) },	/* 24.56.208.505 */
+	{ IMG_UINT64_C(0x00180042003600cc), IMG_UINT64_C(0x0000000000142072) },	/* 24.66.54.204 */
+	{ IMG_UINT64_C(0x00180043006801f8), IMG_UINT64_C(0x0000000000142072) },	/* 24.67.104.504 */
+	{ IMG_UINT64_C(0x001d000e006c00d0), IMG_UINT64_C(0x0000000000142272) },	/* 29.14.108.208 */
+	{ IMG_UINT64_C(0x001d0011003400ca), IMG_UINT64_C(0x0000000000142272) },	/* 29.17.52.202 */
+};
+
+#if defined(DEBUG)
+
+#define	FEATURE_NO_VALUES_NAMES_MAX_IDX	(44)
+
+static const IMG_CHAR * const gaszFeaturesNoValuesNames[FEATURE_NO_VALUES_NAMES_MAX_IDX] =
+{
+	"AXI_ACELITE",
+	"CLUSTER_GROUPING",
+	"COMPUTE",
+	"COMPUTE_MORTON_CAPABLE",
+	"COMPUTE_OVERLAP",
+	"COMPUTE_OVERLAP_WITH_BARRIERS",
+	"COREID_PER_OS",
+	"DUST_POWER_ISLAND_S7",
+	"DYNAMIC_DUST_POWER",
+	"FASTRENDER_DM",
+	"GPU_VIRTUALISATION",
+	"GS_RTA_SUPPORT",
+	"META_DMA",
+	"MIPS",
+	"PBE2_IN_XE",
+	"PBVNC_COREID_REG",
+	"PDS_PER_DUST",
+	"PDS_TEMPSIZE8",
+	"PERFBUS",
+	"PERF_COUNTER_BATCH",
+	"RAY_TRACING_DEPRECATED",
+	"ROGUEXE",
+	"S7_CACHE_HIERARCHY",
+	"S7_TOP_INFRASTRUCTURE",
+	"SCALABLE_VDM_GPP",
+	"SIGNAL_SNOOPING",
+	"SIMPLE_INTERNAL_PARAMETER_FORMAT",
+	"SIMPLE_INTERNAL_PARAMETER_FORMAT_V1",
+	"SIMPLE_INTERNAL_PARAMETER_FORMAT_V2",
+	"SINGLE_BIF",
+	"SLCSIZE8",
+	"SLC_HYBRID_CACHELINE_64_128",
+	"SLC_VIVT",
+	"SYS_BUS_SECURE_RESET",
+	"TDM_PDS_CHECKSUM",
+	"TESSELLATION",
+	"TLA",
+	"TPU_CEM_DATAMASTER_GLOBAL_REGISTERS",
+	"TPU_DM_GLOBAL_REGISTERS",
+	"TPU_FILTERING_MODE_CONTROL",
+	"VDM_DRAWINDIRECT",
+	"VDM_OBJECT_LEVEL_LLS",
+	"XE_MEMORY_HIERARCHY",
+	"XT_TOP_INFRASTRUCTURE",
+};
+
+#define	ERNSBRNS_IDS_MAX_IDX	(21)
+
+static const IMG_UINT32 gaui64ErnsBrnsIDs[ERNSBRNS_IDS_MAX_IDX] =
+{
+	38344,
+	42290,
+	42321,
+	42480,
+	42606,
+	43276,
+	44871,
+	45914,
+	46066,
+	47025,
+	50539,
+	50767,
+	51468,
+	57596,
+	60084,
+	61389,
+	61450,
+	63142,
+	63553,
+	65273,
+	66622,
+};
+
+#endif /* defined(DEBUG) */
+#endif /* RGX_BVNC_TABLE_KM_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/rgx_cr_defs_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/rgx_cr_defs_km.h
new file mode 100644
index 0000000..35b7f97
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/rgx_cr_defs_km.h
@@ -0,0 +1,5789 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file rgx_cr_defs_km.h
+@Brief          The file contains auto-generated hardware definitions without
+                BVNC-specific compile time conditionals.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*               ****   Autogenerated C -- do not edit    ****               */
+
+/*
+ */
+
+
+#ifndef RGX_CR_DEFS_KM_H
+#define RGX_CR_DEFS_KM_H
+
+#if !defined(IMG_EXPLICIT_INCLUDE_HWDEFS)
+#error This file may only be included if explicitly defined
+#endif
+
+#include "img_types.h"
+#include "img_defs.h"
+
+
+#define RGX_CR_DEFS_KM_REVISION 1
+
+/*
+    Register RGX_CR_RASTERISATION_INDIRECT
+*/
+#define RGX_CR_RASTERISATION_INDIRECT                     (0x8238U)
+#define RGX_CR_RASTERISATION_INDIRECT_MASKFULL            (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_RASTERISATION_INDIRECT_ADDRESS_SHIFT       (0U)
+#define RGX_CR_RASTERISATION_INDIRECT_ADDRESS_CLRMSK      (0xFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_PBE_INDIRECT
+*/
+#define RGX_CR_PBE_INDIRECT                               (0x83E0U)
+#define RGX_CR_PBE_INDIRECT_MASKFULL                      (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_PBE_INDIRECT_ADDRESS_SHIFT                 (0U)
+#define RGX_CR_PBE_INDIRECT_ADDRESS_CLRMSK                (0xFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_PBE_PERF_INDIRECT
+*/
+#define RGX_CR_PBE_PERF_INDIRECT                          (0x83D8U)
+#define RGX_CR_PBE_PERF_INDIRECT_MASKFULL                 (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_SHIFT            (0U)
+#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_CLRMSK           (0xFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_TPU_PERF_INDIRECT
+*/
+#define RGX_CR_TPU_PERF_INDIRECT                          (0x83F0U)
+#define RGX_CR_TPU_PERF_INDIRECT_MASKFULL                 (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_SHIFT            (0U)
+#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_CLRMSK           (0xFFFFFFF8U)
+
+
+/*
+    Register RGX_CR_RASTERISATION_PERF_INDIRECT
+*/
+#define RGX_CR_RASTERISATION_PERF_INDIRECT                (0x8318U)
+#define RGX_CR_RASTERISATION_PERF_INDIRECT_MASKFULL       (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_SHIFT  (0U)
+#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_TPU_MCU_L0_PERF_INDIRECT
+*/
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT                   (0x8028U)
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_MASKFULL          (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_SHIFT     (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_CLRMSK    (0xFFFFFFF8U)
+
+
+/*
+    Register RGX_CR_USC_PERF_INDIRECT
+*/
+#define RGX_CR_USC_PERF_INDIRECT                          (0x8030U)
+#define RGX_CR_USC_PERF_INDIRECT_MASKFULL                 (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_SHIFT            (0U)
+#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_CLRMSK           (0xFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_BLACKPEARL_INDIRECT
+*/
+#define RGX_CR_BLACKPEARL_INDIRECT                        (0x8388U)
+#define RGX_CR_BLACKPEARL_INDIRECT_MASKFULL               (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_SHIFT          (0U)
+#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_CLRMSK         (0xFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_BLACKPEARL_PERF_INDIRECT
+*/
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT                   (0x83F8U)
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT_MASKFULL          (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_SHIFT     (0U)
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_CLRMSK    (0xFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_TEXAS3_PERF_INDIRECT
+*/
+#define RGX_CR_TEXAS3_PERF_INDIRECT                       (0x83D0U)
+#define RGX_CR_TEXAS3_PERF_INDIRECT_MASKFULL              (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_SHIFT         (0U)
+#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_CLRMSK        (0xFFFFFFF8U)
+
+
+/*
+    Register RGX_CR_TEXAS_PERF_INDIRECT
+*/
+#define RGX_CR_TEXAS_PERF_INDIRECT                        (0x8288U)
+#define RGX_CR_TEXAS_PERF_INDIRECT_MASKFULL               (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_SHIFT          (0U)
+#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_CLRMSK         (0xFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_BX_TU_PERF_INDIRECT
+*/
+#define RGX_CR_BX_TU_PERF_INDIRECT                        (0xC900U)
+#define RGX_CR_BX_TU_PERF_INDIRECT_MASKFULL               (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_SHIFT          (0U)
+#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_CLRMSK         (0xFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_CLK_CTRL
+*/
+#define RGX_CR_CLK_CTRL                                   (0x0000U)
+#define RGX_CR_CLK_CTRL__PBE2_XE__MASKFULL                (IMG_UINT64_C(0xFFFFFF003F3FFFFF))
+#define RGX_CR_CLK_CTRL__S7_TOP__MASKFULL                 (IMG_UINT64_C(0xCFCF03000F3F3F0F))
+#define RGX_CR_CLK_CTRL_MASKFULL                          (IMG_UINT64_C(0xFFFFFF003F3FFFFF))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_SHIFT                   (62U)
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_CLRMSK                  (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_OFF                     (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_ON                      (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_AUTO                    (IMG_UINT64_C(0x8000000000000000))
+#define RGX_CR_CLK_CTRL_IPP_SHIFT                         (60U)
+#define RGX_CR_CLK_CTRL_IPP_CLRMSK                        (IMG_UINT64_C(0xCFFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_IPP_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_IPP_ON                            (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_CLK_CTRL_IPP_AUTO                          (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_CLK_CTRL_FBC_SHIFT                         (58U)
+#define RGX_CR_CLK_CTRL_FBC_CLRMSK                        (IMG_UINT64_C(0xF3FFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FBC_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_FBC_ON                            (IMG_UINT64_C(0x0400000000000000))
+#define RGX_CR_CLK_CTRL_FBC_AUTO                          (IMG_UINT64_C(0x0800000000000000))
+#define RGX_CR_CLK_CTRL_FBDC_SHIFT                        (56U)
+#define RGX_CR_CLK_CTRL_FBDC_CLRMSK                       (IMG_UINT64_C(0xFCFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FBDC_OFF                          (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_FBDC_ON                           (IMG_UINT64_C(0x0100000000000000))
+#define RGX_CR_CLK_CTRL_FBDC_AUTO                         (IMG_UINT64_C(0x0200000000000000))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_SHIFT                  (54U)
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_CLRMSK                 (IMG_UINT64_C(0xFF3FFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_OFF                    (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_ON                     (IMG_UINT64_C(0x0040000000000000))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_AUTO                   (IMG_UINT64_C(0x0080000000000000))
+#define RGX_CR_CLK_CTRL_USCS_SHIFT                        (52U)
+#define RGX_CR_CLK_CTRL_USCS_CLRMSK                       (IMG_UINT64_C(0xFFCFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_USCS_OFF                          (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_USCS_ON                           (IMG_UINT64_C(0x0010000000000000))
+#define RGX_CR_CLK_CTRL_USCS_AUTO                         (IMG_UINT64_C(0x0020000000000000))
+#define RGX_CR_CLK_CTRL_PBE_SHIFT                         (50U)
+#define RGX_CR_CLK_CTRL_PBE_CLRMSK                        (IMG_UINT64_C(0xFFF3FFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_PBE_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_PBE_ON                            (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_CLK_CTRL_PBE_AUTO                          (IMG_UINT64_C(0x0008000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L1_SHIFT                      (48U)
+#define RGX_CR_CLK_CTRL_MCU_L1_CLRMSK                     (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_MCU_L1_OFF                        (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L1_ON                         (IMG_UINT64_C(0x0001000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L1_AUTO                       (IMG_UINT64_C(0x0002000000000000))
+#define RGX_CR_CLK_CTRL_CDM_SHIFT                         (46U)
+#define RGX_CR_CLK_CTRL_CDM_CLRMSK                        (IMG_UINT64_C(0xFFFF3FFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_CDM_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_CDM_ON                            (IMG_UINT64_C(0x0000400000000000))
+#define RGX_CR_CLK_CTRL_CDM_AUTO                          (IMG_UINT64_C(0x0000800000000000))
+#define RGX_CR_CLK_CTRL_SIDEKICK_SHIFT                    (44U)
+#define RGX_CR_CLK_CTRL_SIDEKICK_CLRMSK                   (IMG_UINT64_C(0xFFFFCFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_SIDEKICK_OFF                      (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_SIDEKICK_ON                       (IMG_UINT64_C(0x0000100000000000))
+#define RGX_CR_CLK_CTRL_SIDEKICK_AUTO                     (IMG_UINT64_C(0x0000200000000000))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_SHIFT                (42U)
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_CLRMSK               (IMG_UINT64_C(0xFFFFF3FFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_OFF                  (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_ON                   (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_AUTO                 (IMG_UINT64_C(0x0000080000000000))
+#define RGX_CR_CLK_CTRL_BIF_SHIFT                         (40U)
+#define RGX_CR_CLK_CTRL_BIF_CLRMSK                        (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_ON                            (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_CLK_CTRL_BIF_AUTO                          (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_SHIFT               (28U)
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_OFF                 (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_ON                  (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_AUTO                (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_CTRL_MCU_L0_SHIFT                      (26U)
+#define RGX_CR_CLK_CTRL_MCU_L0_CLRMSK                     (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF))
+#define RGX_CR_CLK_CTRL_MCU_L0_OFF                        (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L0_ON                         (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_CTRL_MCU_L0_AUTO                       (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_CTRL_TPU_SHIFT                         (24U)
+#define RGX_CR_CLK_CTRL_TPU_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF))
+#define RGX_CR_CLK_CTRL_TPU_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_TPU_ON                            (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_CTRL_TPU_AUTO                          (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_CTRL_USC_SHIFT                         (20U)
+#define RGX_CR_CLK_CTRL_USC_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF))
+#define RGX_CR_CLK_CTRL_USC_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_USC_ON                            (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_CTRL_USC_AUTO                          (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_CLK_CTRL_TLA_SHIFT                         (18U)
+#define RGX_CR_CLK_CTRL_TLA_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF))
+#define RGX_CR_CLK_CTRL_TLA_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_TLA_ON                            (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_CLK_CTRL_TLA_AUTO                          (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_CLK_CTRL_SLC_SHIFT                         (16U)
+#define RGX_CR_CLK_CTRL_SLC_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF))
+#define RGX_CR_CLK_CTRL_SLC_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_SLC_ON                            (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_CLK_CTRL_SLC_AUTO                          (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_CLK_CTRL_UVS_SHIFT                         (14U)
+#define RGX_CR_CLK_CTRL_UVS_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF))
+#define RGX_CR_CLK_CTRL_UVS_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_UVS_ON                            (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_CLK_CTRL_UVS_AUTO                          (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_CLK_CTRL_PDS_SHIFT                         (12U)
+#define RGX_CR_CLK_CTRL_PDS_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF))
+#define RGX_CR_CLK_CTRL_PDS_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_PDS_ON                            (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_CLK_CTRL_PDS_AUTO                          (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_CLK_CTRL_VDM_SHIFT                         (10U)
+#define RGX_CR_CLK_CTRL_VDM_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF))
+#define RGX_CR_CLK_CTRL_VDM_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_VDM_ON                            (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_CTRL_VDM_AUTO                          (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_CLK_CTRL_PM_SHIFT                          (8U)
+#define RGX_CR_CLK_CTRL_PM_CLRMSK                         (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF))
+#define RGX_CR_CLK_CTRL_PM_OFF                            (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_PM_ON                             (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_CTRL_PM_AUTO                           (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_CTRL_GPP_SHIFT                         (6U)
+#define RGX_CR_CLK_CTRL_GPP_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFFFFF3F))
+#define RGX_CR_CLK_CTRL_GPP_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_GPP_ON                            (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_CTRL_GPP_AUTO                          (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_CLK_CTRL_TE_SHIFT                          (4U)
+#define RGX_CR_CLK_CTRL_TE_CLRMSK                         (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF))
+#define RGX_CR_CLK_CTRL_TE_OFF                            (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_TE_ON                             (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_CTRL_TE_AUTO                           (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_CTRL_TSP_SHIFT                         (2U)
+#define RGX_CR_CLK_CTRL_TSP_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFFFFFF3))
+#define RGX_CR_CLK_CTRL_TSP_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_TSP_ON                            (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_CTRL_TSP_AUTO                          (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_CTRL_ISP_SHIFT                         (0U)
+#define RGX_CR_CLK_CTRL_ISP_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+#define RGX_CR_CLK_CTRL_ISP_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_ISP_ON                            (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_CLK_CTRL_ISP_AUTO                          (IMG_UINT64_C(0x0000000000000002))
+
+
+/*
+    Register RGX_CR_CLK_STATUS
+*/
+#define RGX_CR_CLK_STATUS                                 (0x0008U)
+#define RGX_CR_CLK_STATUS__PBE2_XE__MASKFULL              (IMG_UINT64_C(0x00000001FFF077FF))
+#define RGX_CR_CLK_STATUS__S7_TOP__MASKFULL               (IMG_UINT64_C(0x00000001B3101773))
+#define RGX_CR_CLK_STATUS_MASKFULL                        (IMG_UINT64_C(0x00000001FFF077FF))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_SHIFT                  (32U)
+#define RGX_CR_CLK_STATUS_MCU_FBTC_CLRMSK                 (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_GATED                  (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_RUNNING                (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_SHIFT                 (31U)
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_CLRMSK                (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_GATED                 (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_RUNNING               (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_CLK_STATUS_IPP_SHIFT                       (30U)
+#define RGX_CR_CLK_STATUS_IPP_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF))
+#define RGX_CR_CLK_STATUS_IPP_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_IPP_RUNNING                     (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_CLK_STATUS_FBC_SHIFT                       (29U)
+#define RGX_CR_CLK_STATUS_FBC_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF))
+#define RGX_CR_CLK_STATUS_FBC_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_FBC_RUNNING                     (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_STATUS_FBDC_SHIFT                      (28U)
+#define RGX_CR_CLK_STATUS_FBDC_CLRMSK                     (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF))
+#define RGX_CR_CLK_STATUS_FBDC_GATED                      (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_FBDC_RUNNING                    (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_SHIFT                (27U)
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF))
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_GATED                (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_RUNNING              (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_STATUS_USCS_SHIFT                      (26U)
+#define RGX_CR_CLK_STATUS_USCS_CLRMSK                     (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF))
+#define RGX_CR_CLK_STATUS_USCS_GATED                      (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_USCS_RUNNING                    (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_STATUS_PBE_SHIFT                       (25U)
+#define RGX_CR_CLK_STATUS_PBE_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF))
+#define RGX_CR_CLK_STATUS_PBE_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_PBE_RUNNING                     (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_STATUS_MCU_L1_SHIFT                    (24U)
+#define RGX_CR_CLK_STATUS_MCU_L1_CLRMSK                   (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF))
+#define RGX_CR_CLK_STATUS_MCU_L1_GATED                    (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_MCU_L1_RUNNING                  (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_STATUS_CDM_SHIFT                       (23U)
+#define RGX_CR_CLK_STATUS_CDM_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF))
+#define RGX_CR_CLK_STATUS_CDM_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_CDM_RUNNING                     (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_CLK_STATUS_SIDEKICK_SHIFT                  (22U)
+#define RGX_CR_CLK_STATUS_SIDEKICK_CLRMSK                 (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF))
+#define RGX_CR_CLK_STATUS_SIDEKICK_GATED                  (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_SIDEKICK_RUNNING                (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_SHIFT              (21U)
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_GATED              (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_RUNNING            (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_CLK_STATUS_BIF_SHIFT                       (20U)
+#define RGX_CR_CLK_STATUS_BIF_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_BIF_RUNNING                     (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_SHIFT             (14U)
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF))
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_GATED             (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_RUNNING           (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_CLK_STATUS_MCU_L0_SHIFT                    (13U)
+#define RGX_CR_CLK_STATUS_MCU_L0_CLRMSK                   (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF))
+#define RGX_CR_CLK_STATUS_MCU_L0_GATED                    (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_MCU_L0_RUNNING                  (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_CLK_STATUS_TPU_SHIFT                       (12U)
+#define RGX_CR_CLK_STATUS_TPU_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF))
+#define RGX_CR_CLK_STATUS_TPU_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_TPU_RUNNING                     (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_CLK_STATUS_USC_SHIFT                       (10U)
+#define RGX_CR_CLK_STATUS_USC_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_CLK_STATUS_USC_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_USC_RUNNING                     (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_STATUS_TLA_SHIFT                       (9U)
+#define RGX_CR_CLK_STATUS_TLA_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF))
+#define RGX_CR_CLK_STATUS_TLA_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_TLA_RUNNING                     (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_STATUS_SLC_SHIFT                       (8U)
+#define RGX_CR_CLK_STATUS_SLC_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_CR_CLK_STATUS_SLC_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_SLC_RUNNING                     (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_STATUS_UVS_SHIFT                       (7U)
+#define RGX_CR_CLK_STATUS_UVS_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F))
+#define RGX_CR_CLK_STATUS_UVS_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_UVS_RUNNING                     (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_CLK_STATUS_PDS_SHIFT                       (6U)
+#define RGX_CR_CLK_STATUS_PDS_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF))
+#define RGX_CR_CLK_STATUS_PDS_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_PDS_RUNNING                     (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_STATUS_VDM_SHIFT                       (5U)
+#define RGX_CR_CLK_STATUS_VDM_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_CLK_STATUS_VDM_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_VDM_RUNNING                     (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_STATUS_PM_SHIFT                        (4U)
+#define RGX_CR_CLK_STATUS_PM_CLRMSK                       (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_STATUS_PM_GATED                        (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_PM_RUNNING                      (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_STATUS_GPP_SHIFT                       (3U)
+#define RGX_CR_CLK_STATUS_GPP_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_CLK_STATUS_GPP_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_GPP_RUNNING                     (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_STATUS_TE_SHIFT                        (2U)
+#define RGX_CR_CLK_STATUS_TE_CLRMSK                       (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_CLK_STATUS_TE_GATED                        (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_TE_RUNNING                      (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_STATUS_TSP_SHIFT                       (1U)
+#define RGX_CR_CLK_STATUS_TSP_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_CLK_STATUS_TSP_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_TSP_RUNNING                     (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_CLK_STATUS_ISP_SHIFT                       (0U)
+#define RGX_CR_CLK_STATUS_ISP_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_STATUS_ISP_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_ISP_RUNNING                     (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_CORE_ID
+*/
+#define RGX_CR_CORE_ID__PBVNC                             (0x0020U)
+#define RGX_CR_CORE_ID__PBVNC__MASKFULL                   (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT            (48U)
+#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK           (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT           (32U)
+#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK          (IMG_UINT64_C(0xFFFF0000FFFFFFFF))
+#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT (16U)
+#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF))
+#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT            (0U)
+#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_CORE_ID
+*/
+#define RGX_CR_CORE_ID                                    (0x0018U)
+#define RGX_CR_CORE_ID_MASKFULL                           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_CORE_ID_ID_SHIFT                           (16U)
+#define RGX_CR_CORE_ID_ID_CLRMSK                          (0x0000FFFFU)
+#define RGX_CR_CORE_ID_CONFIG_SHIFT                       (0U)
+#define RGX_CR_CORE_ID_CONFIG_CLRMSK                      (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_CORE_REVISION
+*/
+#define RGX_CR_CORE_REVISION                              (0x0020U)
+#define RGX_CR_CORE_REVISION_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_CORE_REVISION_DESIGNER_SHIFT               (24U)
+#define RGX_CR_CORE_REVISION_DESIGNER_CLRMSK              (0x00FFFFFFU)
+#define RGX_CR_CORE_REVISION_MAJOR_SHIFT                  (16U)
+#define RGX_CR_CORE_REVISION_MAJOR_CLRMSK                 (0xFF00FFFFU)
+#define RGX_CR_CORE_REVISION_MINOR_SHIFT                  (8U)
+#define RGX_CR_CORE_REVISION_MINOR_CLRMSK                 (0xFFFF00FFU)
+#define RGX_CR_CORE_REVISION_MAINTENANCE_SHIFT            (0U)
+#define RGX_CR_CORE_REVISION_MAINTENANCE_CLRMSK           (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_DESIGNER_REV_FIELD1
+*/
+#define RGX_CR_DESIGNER_REV_FIELD1                        (0x0028U)
+#define RGX_CR_DESIGNER_REV_FIELD1_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT (0U)
+#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_CLRMSK (0x00000000U)
+
+
+/*
+    Register RGX_CR_DESIGNER_REV_FIELD2
+*/
+#define RGX_CR_DESIGNER_REV_FIELD2                        (0x0030U)
+#define RGX_CR_DESIGNER_REV_FIELD2_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT (0U)
+#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_CLRMSK (0x00000000U)
+
+
+/*
+    Register RGX_CR_CHANGESET_NUMBER
+*/
+#define RGX_CR_CHANGESET_NUMBER                           (0x0040U)
+#define RGX_CR_CHANGESET_NUMBER_MASKFULL                  (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_SHIFT    (0U)
+#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_CLRMSK   (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_CLK_XTPLUS_CTRL
+*/
+#define RGX_CR_CLK_XTPLUS_CTRL                            (0x0080U)
+#define RGX_CR_CLK_XTPLUS_CTRL_MASKFULL                   (IMG_UINT64_C(0x0000003FFFFF0000))
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_SHIFT                  (36U)
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_CLRMSK                 (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_OFF                    (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_ON                     (IMG_UINT64_C(0x0000001000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_AUTO                   (IMG_UINT64_C(0x0000002000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_SHIFT                 (34U)
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_CLRMSK                (IMG_UINT64_C(0xFFFFFFF3FFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_OFF                   (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_ON                    (IMG_UINT64_C(0x0000000400000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_AUTO                  (IMG_UINT64_C(0x0000000800000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_SHIFT                  (32U)
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_CLRMSK                 (IMG_UINT64_C(0xFFFFFFFCFFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_OFF                    (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_ON                     (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_AUTO                   (IMG_UINT64_C(0x0000000200000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_SHIFT              (30U)
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_CLRMSK             (IMG_UINT64_C(0xFFFFFFFF3FFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_OFF                (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_ON                 (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_AUTO               (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_SHIFT                (28U)
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_OFF                  (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_ON                   (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_AUTO                 (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_SHIFT               (26U)
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_OFF                 (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_ON                  (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_AUTO                (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_SHIFT                (24U)
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_OFF                  (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_ON                   (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_AUTO                 (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_SHIFT           (22U)
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFF3FFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_OFF             (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_ON              (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_AUTO            (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_SHIFT       (20U)
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_OFF         (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_ON          (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_AUTO        (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_SHIFT           (18U)
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_OFF             (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_ON              (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_AUTO            (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_SHIFT             (16U)
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_OFF               (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_ON                (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_AUTO              (IMG_UINT64_C(0x0000000000020000))
+
+
+/*
+    Register RGX_CR_CLK_XTPLUS_STATUS
+*/
+#define RGX_CR_CLK_XTPLUS_STATUS                          (0x0088U)
+#define RGX_CR_CLK_XTPLUS_STATUS_MASKFULL                 (IMG_UINT64_C(0x00000000000007FF))
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_SHIFT                (10U)
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_GATED                (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_RUNNING              (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_SHIFT                (9U)
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF))
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_GATED                (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_RUNNING              (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_SHIFT            (8U)
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_GATED            (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_RUNNING          (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_SHIFT               (7U)
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F))
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_GATED               (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_RUNNING             (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_SHIFT              (6U)
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF))
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_GATED              (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_RUNNING            (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_SHIFT             (5U)
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_GATED             (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_RUNNING           (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_SHIFT              (4U)
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_GATED              (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_RUNNING            (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_SHIFT         (3U)
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_GATED         (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_RUNNING       (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_SHIFT     (2U)
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_CLRMSK    (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_GATED     (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_RUNNING   (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_SHIFT         (1U)
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_GATED         (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_RUNNING       (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_SHIFT           (0U)
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_GATED           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_RUNNING         (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_SOFT_RESET
+*/
+#define RGX_CR_SOFT_RESET                                 (0x0100U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL              (IMG_UINT64_C(0xFFEFFFFFFFFFFC1D))
+#define RGX_CR_SOFT_RESET_MASKFULL                        (IMG_UINT64_C(0x00E7FFFFFFFFFC1D))
+#define RGX_CR_SOFT_RESET_PHANTOM3_CORE_SHIFT             (63U)
+#define RGX_CR_SOFT_RESET_PHANTOM3_CORE_CLRMSK            (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PHANTOM3_CORE_EN                (IMG_UINT64_C(0x8000000000000000))
+#define RGX_CR_SOFT_RESET_PHANTOM2_CORE_SHIFT             (62U)
+#define RGX_CR_SOFT_RESET_PHANTOM2_CORE_CLRMSK            (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PHANTOM2_CORE_EN                (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_SOFT_RESET_BERNADO2_CORE_SHIFT             (61U)
+#define RGX_CR_SOFT_RESET_BERNADO2_CORE_CLRMSK            (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BERNADO2_CORE_EN                (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_SOFT_RESET_JONES_CORE_SHIFT                (60U)
+#define RGX_CR_SOFT_RESET_JONES_CORE_CLRMSK               (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_JONES_CORE_EN                   (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_SOFT_RESET_TILING_CORE_SHIFT               (59U)
+#define RGX_CR_SOFT_RESET_TILING_CORE_CLRMSK              (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_TILING_CORE_EN                  (IMG_UINT64_C(0x0800000000000000))
+#define RGX_CR_SOFT_RESET_TE3_SHIFT                       (58U)
+#define RGX_CR_SOFT_RESET_TE3_CLRMSK                      (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_TE3_EN                          (IMG_UINT64_C(0x0400000000000000))
+#define RGX_CR_SOFT_RESET_VCE_SHIFT                       (57U)
+#define RGX_CR_SOFT_RESET_VCE_CLRMSK                      (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_VCE_EN                          (IMG_UINT64_C(0x0200000000000000))
+#define RGX_CR_SOFT_RESET_VBS_SHIFT                       (56U)
+#define RGX_CR_SOFT_RESET_VBS_CLRMSK                      (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_VBS_EN                          (IMG_UINT64_C(0x0100000000000000))
+#define RGX_CR_SOFT_RESET_DPX1_CORE_SHIFT                 (55U)
+#define RGX_CR_SOFT_RESET_DPX1_CORE_CLRMSK                (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DPX1_CORE_EN                    (IMG_UINT64_C(0x0080000000000000))
+#define RGX_CR_SOFT_RESET_DPX0_CORE_SHIFT                 (54U)
+#define RGX_CR_SOFT_RESET_DPX0_CORE_CLRMSK                (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DPX0_CORE_EN                    (IMG_UINT64_C(0x0040000000000000))
+#define RGX_CR_SOFT_RESET_FBA_SHIFT                       (53U)
+#define RGX_CR_SOFT_RESET_FBA_CLRMSK                      (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_FBA_EN                          (IMG_UINT64_C(0x0020000000000000))
+#define RGX_CR_SOFT_RESET_FB_CDC_SHIFT                    (51U)
+#define RGX_CR_SOFT_RESET_FB_CDC_CLRMSK                   (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_FB_CDC_EN                       (IMG_UINT64_C(0x0008000000000000))
+#define RGX_CR_SOFT_RESET_SH_SHIFT                        (50U)
+#define RGX_CR_SOFT_RESET_SH_CLRMSK                       (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_SH_EN                           (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_SOFT_RESET_VRDM_SHIFT                      (49U)
+#define RGX_CR_SOFT_RESET_VRDM_CLRMSK                     (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_VRDM_EN                         (IMG_UINT64_C(0x0002000000000000))
+#define RGX_CR_SOFT_RESET_MCU_FBTC_SHIFT                  (48U)
+#define RGX_CR_SOFT_RESET_MCU_FBTC_CLRMSK                 (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_MCU_FBTC_EN                     (IMG_UINT64_C(0x0001000000000000))
+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_SHIFT             (47U)
+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_CLRMSK            (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_EN                (IMG_UINT64_C(0x0000800000000000))
+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_SHIFT             (46U)
+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_CLRMSK            (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_EN                (IMG_UINT64_C(0x0000400000000000))
+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_SHIFT             (45U)
+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_CLRMSK            (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_EN                (IMG_UINT64_C(0x0000200000000000))
+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_SHIFT             (44U)
+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_CLRMSK            (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_EN                (IMG_UINT64_C(0x0000100000000000))
+#define RGX_CR_SOFT_RESET_IPP_SHIFT                       (43U)
+#define RGX_CR_SOFT_RESET_IPP_CLRMSK                      (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_IPP_EN                          (IMG_UINT64_C(0x0000080000000000))
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_SHIFT                 (42U)
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_CLRMSK                (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_EN                    (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_SOFT_RESET_TORNADO_CORE_SHIFT              (41U)
+#define RGX_CR_SOFT_RESET_TORNADO_CORE_CLRMSK             (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_TORNADO_CORE_EN                 (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_SOFT_RESET_DUST_H_CORE_SHIFT               (40U)
+#define RGX_CR_SOFT_RESET_DUST_H_CORE_CLRMSK              (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_H_CORE_EN                  (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_SOFT_RESET_DUST_G_CORE_SHIFT               (39U)
+#define RGX_CR_SOFT_RESET_DUST_G_CORE_CLRMSK              (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_G_CORE_EN                  (IMG_UINT64_C(0x0000008000000000))
+#define RGX_CR_SOFT_RESET_DUST_F_CORE_SHIFT               (38U)
+#define RGX_CR_SOFT_RESET_DUST_F_CORE_CLRMSK              (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_F_CORE_EN                  (IMG_UINT64_C(0x0000004000000000))
+#define RGX_CR_SOFT_RESET_DUST_E_CORE_SHIFT               (37U)
+#define RGX_CR_SOFT_RESET_DUST_E_CORE_CLRMSK              (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_E_CORE_EN                  (IMG_UINT64_C(0x0000002000000000))
+#define RGX_CR_SOFT_RESET_DUST_D_CORE_SHIFT               (36U)
+#define RGX_CR_SOFT_RESET_DUST_D_CORE_CLRMSK              (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_D_CORE_EN                  (IMG_UINT64_C(0x0000001000000000))
+#define RGX_CR_SOFT_RESET_DUST_C_CORE_SHIFT               (35U)
+#define RGX_CR_SOFT_RESET_DUST_C_CORE_CLRMSK              (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_C_CORE_EN                  (IMG_UINT64_C(0x0000000800000000))
+#define RGX_CR_SOFT_RESET_MMU_SHIFT                       (34U)
+#define RGX_CR_SOFT_RESET_MMU_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF))
+#define RGX_CR_SOFT_RESET_MMU_EN                          (IMG_UINT64_C(0x0000000400000000))
+#define RGX_CR_SOFT_RESET_BIF1_SHIFT                      (33U)
+#define RGX_CR_SOFT_RESET_BIF1_CLRMSK                     (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BIF1_EN                         (IMG_UINT64_C(0x0000000200000000))
+#define RGX_CR_SOFT_RESET_GARTEN_SHIFT                    (32U)
+#define RGX_CR_SOFT_RESET_GARTEN_CLRMSK                   (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_SOFT_RESET_GARTEN_EN                       (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_SOFT_RESET_RASCAL_CORE_SHIFT               (31U)
+#define RGX_CR_SOFT_RESET_RASCAL_CORE_CLRMSK              (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_SOFT_RESET_RASCAL_CORE_EN                  (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_SOFT_RESET_DUST_B_CORE_SHIFT               (30U)
+#define RGX_CR_SOFT_RESET_DUST_B_CORE_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_B_CORE_EN                  (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_SOFT_RESET_DUST_A_CORE_SHIFT               (29U)
+#define RGX_CR_SOFT_RESET_DUST_A_CORE_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_A_CORE_EN                  (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_SOFT_RESET_FB_TLCACHE_SHIFT                (28U)
+#define RGX_CR_SOFT_RESET_FB_TLCACHE_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF))
+#define RGX_CR_SOFT_RESET_FB_TLCACHE_EN                   (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_SOFT_RESET_SLC_SHIFT                       (27U)
+#define RGX_CR_SOFT_RESET_SLC_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF))
+#define RGX_CR_SOFT_RESET_SLC_EN                          (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_SOFT_RESET_TLA_SHIFT                       (26U)
+#define RGX_CR_SOFT_RESET_TLA_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF))
+#define RGX_CR_SOFT_RESET_TLA_EN                          (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_SOFT_RESET_UVS_SHIFT                       (25U)
+#define RGX_CR_SOFT_RESET_UVS_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF))
+#define RGX_CR_SOFT_RESET_UVS_EN                          (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_SOFT_RESET_TE_SHIFT                        (24U)
+#define RGX_CR_SOFT_RESET_TE_CLRMSK                       (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF))
+#define RGX_CR_SOFT_RESET_TE_EN                           (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_SOFT_RESET_GPP_SHIFT                       (23U)
+#define RGX_CR_SOFT_RESET_GPP_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF))
+#define RGX_CR_SOFT_RESET_GPP_EN                          (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_SOFT_RESET_FBDC_SHIFT                      (22U)
+#define RGX_CR_SOFT_RESET_FBDC_CLRMSK                     (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF))
+#define RGX_CR_SOFT_RESET_FBDC_EN                         (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_SOFT_RESET_FBC_SHIFT                       (21U)
+#define RGX_CR_SOFT_RESET_FBC_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_SOFT_RESET_FBC_EN                          (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_SOFT_RESET_PM_SHIFT                        (20U)
+#define RGX_CR_SOFT_RESET_PM_CLRMSK                       (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF))
+#define RGX_CR_SOFT_RESET_PM_EN                           (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_SOFT_RESET_PBE_SHIFT                       (19U)
+#define RGX_CR_SOFT_RESET_PBE_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF))
+#define RGX_CR_SOFT_RESET_PBE_EN                          (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_SOFT_RESET_USC_SHARED_SHIFT                (18U)
+#define RGX_CR_SOFT_RESET_USC_SHARED_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF))
+#define RGX_CR_SOFT_RESET_USC_SHARED_EN                   (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_SOFT_RESET_MCU_L1_SHIFT                    (17U)
+#define RGX_CR_SOFT_RESET_MCU_L1_CLRMSK                   (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF))
+#define RGX_CR_SOFT_RESET_MCU_L1_EN                       (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_SOFT_RESET_BIF_SHIFT                       (16U)
+#define RGX_CR_SOFT_RESET_BIF_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF))
+#define RGX_CR_SOFT_RESET_BIF_EN                          (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_SOFT_RESET_CDM_SHIFT                       (15U)
+#define RGX_CR_SOFT_RESET_CDM_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF))
+#define RGX_CR_SOFT_RESET_CDM_EN                          (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_SOFT_RESET_VDM_SHIFT                       (14U)
+#define RGX_CR_SOFT_RESET_VDM_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF))
+#define RGX_CR_SOFT_RESET_VDM_EN                          (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_SOFT_RESET_TESS_SHIFT                      (13U)
+#define RGX_CR_SOFT_RESET_TESS_CLRMSK                     (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF))
+#define RGX_CR_SOFT_RESET_TESS_EN                         (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_SOFT_RESET_PDS_SHIFT                       (12U)
+#define RGX_CR_SOFT_RESET_PDS_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF))
+#define RGX_CR_SOFT_RESET_PDS_EN                          (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_SOFT_RESET_ISP_SHIFT                       (11U)
+#define RGX_CR_SOFT_RESET_ISP_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF))
+#define RGX_CR_SOFT_RESET_ISP_EN                          (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_SOFT_RESET_TSP_SHIFT                       (10U)
+#define RGX_CR_SOFT_RESET_TSP_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_SOFT_RESET_TSP_EN                          (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_SHIFT             (4U)
+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_EN                (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_SOFT_RESET_MCU_L0_SHIFT                    (3U)
+#define RGX_CR_SOFT_RESET_MCU_L0_CLRMSK                   (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_SOFT_RESET_MCU_L0_EN                       (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_SOFT_RESET_TPU_SHIFT                       (2U)
+#define RGX_CR_SOFT_RESET_TPU_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_SOFT_RESET_TPU_EN                          (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_SOFT_RESET_USC_SHIFT                       (0U)
+#define RGX_CR_SOFT_RESET_USC_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_SOFT_RESET_USC_EN                          (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_SOFT_RESET2
+*/
+#define RGX_CR_SOFT_RESET2                                (0x0108U)
+#define RGX_CR_SOFT_RESET2_MASKFULL                       (IMG_UINT64_C(0x00000000001FFFFF))
+#define RGX_CR_SOFT_RESET2_SPFILTER_SHIFT                 (12U)
+#define RGX_CR_SOFT_RESET2_SPFILTER_CLRMSK                (0xFFE00FFFU)
+#define RGX_CR_SOFT_RESET2_TDM_SHIFT                      (11U)
+#define RGX_CR_SOFT_RESET2_TDM_CLRMSK                     (0xFFFFF7FFU)
+#define RGX_CR_SOFT_RESET2_TDM_EN                         (0x00000800U)
+#define RGX_CR_SOFT_RESET2_ASTC_SHIFT                     (10U)
+#define RGX_CR_SOFT_RESET2_ASTC_CLRMSK                    (0xFFFFFBFFU)
+#define RGX_CR_SOFT_RESET2_ASTC_EN                        (0x00000400U)
+#define RGX_CR_SOFT_RESET2_BLACKPEARL_SHIFT               (9U)
+#define RGX_CR_SOFT_RESET2_BLACKPEARL_CLRMSK              (0xFFFFFDFFU)
+#define RGX_CR_SOFT_RESET2_BLACKPEARL_EN                  (0x00000200U)
+#define RGX_CR_SOFT_RESET2_USCPS_SHIFT                    (8U)
+#define RGX_CR_SOFT_RESET2_USCPS_CLRMSK                   (0xFFFFFEFFU)
+#define RGX_CR_SOFT_RESET2_USCPS_EN                       (0x00000100U)
+#define RGX_CR_SOFT_RESET2_IPF_SHIFT                      (7U)
+#define RGX_CR_SOFT_RESET2_IPF_CLRMSK                     (0xFFFFFF7FU)
+#define RGX_CR_SOFT_RESET2_IPF_EN                         (0x00000080U)
+#define RGX_CR_SOFT_RESET2_GEOMETRY_SHIFT                 (6U)
+#define RGX_CR_SOFT_RESET2_GEOMETRY_CLRMSK                (0xFFFFFFBFU)
+#define RGX_CR_SOFT_RESET2_GEOMETRY_EN                    (0x00000040U)
+#define RGX_CR_SOFT_RESET2_USC_SHARED_SHIFT               (5U)
+#define RGX_CR_SOFT_RESET2_USC_SHARED_CLRMSK              (0xFFFFFFDFU)
+#define RGX_CR_SOFT_RESET2_USC_SHARED_EN                  (0x00000020U)
+#define RGX_CR_SOFT_RESET2_PDS_SHARED_SHIFT               (4U)
+#define RGX_CR_SOFT_RESET2_PDS_SHARED_CLRMSK              (0xFFFFFFEFU)
+#define RGX_CR_SOFT_RESET2_PDS_SHARED_EN                  (0x00000010U)
+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_SHIFT           (3U)
+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_CLRMSK          (0xFFFFFFF7U)
+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_EN              (0x00000008U)
+#define RGX_CR_SOFT_RESET2_PIXEL_SHIFT                    (2U)
+#define RGX_CR_SOFT_RESET2_PIXEL_CLRMSK                   (0xFFFFFFFBU)
+#define RGX_CR_SOFT_RESET2_PIXEL_EN                       (0x00000004U)
+#define RGX_CR_SOFT_RESET2_CDM_SHIFT                      (1U)
+#define RGX_CR_SOFT_RESET2_CDM_CLRMSK                     (0xFFFFFFFDU)
+#define RGX_CR_SOFT_RESET2_CDM_EN                         (0x00000002U)
+#define RGX_CR_SOFT_RESET2_VERTEX_SHIFT                   (0U)
+#define RGX_CR_SOFT_RESET2_VERTEX_CLRMSK                  (0xFFFFFFFEU)
+#define RGX_CR_SOFT_RESET2_VERTEX_EN                      (0x00000001U)
+
+
+/*
+    Register RGX_CR_EVENT_STATUS
+*/
+#define RGX_CR_EVENT_STATUS                               (0x0130U)
+#define RGX_CR_EVENT_STATUS__ROGUEXE__MASKFULL            (IMG_UINT64_C(0x00000000E005FFFF))
+#define RGX_CR_EVENT_STATUS__SIGNALS__MASKFULL            (IMG_UINT64_C(0x00000000E007FFFF))
+#define RGX_CR_EVENT_STATUS_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_SHIFT      (31U)
+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_CLRMSK     (0x7FFFFFFFU)
+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_EN         (0x80000000U)
+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_SHIFT        (30U)
+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_CLRMSK       (0xBFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_EN           (0x40000000U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_SHIFT  (29U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_EN     (0x20000000U)
+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_SHIFT       (28U)
+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_CLRMSK      (0xEFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_EN          (0x10000000U)
+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_SHIFT      (27U)
+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_CLRMSK     (0xF7FFFFFFU)
+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_EN         (0x08000000U)
+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_SHIFT       (26U)
+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_CLRMSK      (0xFBFFFFFFU)
+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_EN          (0x04000000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_SHIFT        (25U)
+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_CLRMSK       (0xFDFFFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_EN           (0x02000000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_SHIFT        (24U)
+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_CLRMSK       (0xFEFFFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_EN           (0x01000000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_SHIFT        (23U)
+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_CLRMSK       (0xFF7FFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_EN           (0x00800000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_SHIFT        (22U)
+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_CLRMSK       (0xFFBFFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_EN           (0x00400000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_SHIFT        (21U)
+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_CLRMSK       (0xFFDFFFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_EN           (0x00200000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_SHIFT        (20U)
+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_CLRMSK       (0xFFEFFFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_EN           (0x00100000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_SHIFT        (19U)
+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_CLRMSK       (0xFFF7FFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_EN           (0x00080000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_SHIFT        (18U)
+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_CLRMSK       (0xFFFBFFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_EN           (0x00040000U)
+#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U)
+#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U)
+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_SHIFT            (17U)
+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_CLRMSK           (0xFFFDFFFFU)
+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_EN               (0x00020000U)
+#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_SHIFT  (17U)
+#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU)
+#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_EN     (0x00020000U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_SHIFT    (16U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_CLRMSK   (0xFFFEFFFFU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_EN       (0x00010000U)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_SHIFT             (15U)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK            (0xFFFF7FFFU)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_EN                (0x00008000U)
+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_SHIFT            (14U)
+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_CLRMSK           (0xFFFFBFFFU)
+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_EN               (0x00004000U)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_SHIFT                (13U)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_CLRMSK               (0xFFFFDFFFU)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_EN                   (0x00002000U)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_SHIFT                (12U)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_CLRMSK               (0xFFFFEFFFU)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_EN                   (0x00001000U)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_SHIFT             (11U)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_CLRMSK            (0xFFFFF7FFU)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_EN                (0x00000800U)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT          (10U)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK         (0xFFFFFBFFU)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN             (0x00000400U)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT          (9U)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK         (0xFFFFFDFFU)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN             (0x00000200U)
+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_SHIFT          (8U)
+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_CLRMSK         (0xFFFFFEFFU)
+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_EN             (0x00000100U)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT        (7U)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK       (0xFFFFFF7FU)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN           (0x00000080U)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_SHIFT            (6U)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK           (0xFFFFFFBFU)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_EN               (0x00000040U)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_SHIFT             (5U)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_CLRMSK            (0xFFFFFFDFU)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_EN                (0x00000020U)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT       (4U)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK      (0xFFFFFFEFU)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_EN          (0x00000010U)
+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT      (3U)
+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_CLRMSK     (0xFFFFFFF7U)
+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_EN         (0x00000008U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT        (2U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK       (0xFFFFFFFBU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_EN           (0x00000004U)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT         (1U)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK        (0xFFFFFFFDU)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_EN            (0x00000002U)
+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_SHIFT            (0U)
+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK           (0xFFFFFFFEU)
+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_EN               (0x00000001U)
+
+
+/*
+    Register RGX_CR_TIMER
+*/
+#define RGX_CR_TIMER                                      (0x0160U)
+#define RGX_CR_TIMER_MASKFULL                             (IMG_UINT64_C(0x8000FFFFFFFFFFFF))
+#define RGX_CR_TIMER_BIT31_SHIFT                          (63U)
+#define RGX_CR_TIMER_BIT31_CLRMSK                         (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF))
+#define RGX_CR_TIMER_BIT31_EN                             (IMG_UINT64_C(0x8000000000000000))
+#define RGX_CR_TIMER_VALUE_SHIFT                          (0U)
+#define RGX_CR_TIMER_VALUE_CLRMSK                         (IMG_UINT64_C(0xFFFF000000000000))
+
+
+/*
+    Register RGX_CR_TLA_STATUS
+*/
+#define RGX_CR_TLA_STATUS                                 (0x0178U)
+#define RGX_CR_TLA_STATUS_MASKFULL                        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_TLA_STATUS_BLIT_COUNT_SHIFT                (39U)
+#define RGX_CR_TLA_STATUS_BLIT_COUNT_CLRMSK               (IMG_UINT64_C(0x0000007FFFFFFFFF))
+#define RGX_CR_TLA_STATUS_REQUEST_SHIFT                   (7U)
+#define RGX_CR_TLA_STATUS_REQUEST_CLRMSK                  (IMG_UINT64_C(0xFFFFFF800000007F))
+#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_SHIFT             (1U)
+#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFFF81))
+#define RGX_CR_TLA_STATUS_BUSY_SHIFT                      (0U)
+#define RGX_CR_TLA_STATUS_BUSY_CLRMSK                     (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_TLA_STATUS_BUSY_EN                         (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_PM_PARTIAL_RENDER_ENABLE
+*/
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE                   (0x0338U)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL          (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT          (0U)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_SIDEKICK_IDLE
+*/
+#define RGX_CR_SIDEKICK_IDLE                              (0x03C8U)
+#define RGX_CR_SIDEKICK_IDLE_MASKFULL                     (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_SHIFT                 (6U)
+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_CLRMSK                (0xFFFFFFBFU)
+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_EN                    (0x00000040U)
+#define RGX_CR_SIDEKICK_IDLE_MMU_SHIFT                    (5U)
+#define RGX_CR_SIDEKICK_IDLE_MMU_CLRMSK                   (0xFFFFFFDFU)
+#define RGX_CR_SIDEKICK_IDLE_MMU_EN                       (0x00000020U)
+#define RGX_CR_SIDEKICK_IDLE_BIF128_SHIFT                 (4U)
+#define RGX_CR_SIDEKICK_IDLE_BIF128_CLRMSK                (0xFFFFFFEFU)
+#define RGX_CR_SIDEKICK_IDLE_BIF128_EN                    (0x00000010U)
+#define RGX_CR_SIDEKICK_IDLE_TLA_SHIFT                    (3U)
+#define RGX_CR_SIDEKICK_IDLE_TLA_CLRMSK                   (0xFFFFFFF7U)
+#define RGX_CR_SIDEKICK_IDLE_TLA_EN                       (0x00000008U)
+#define RGX_CR_SIDEKICK_IDLE_GARTEN_SHIFT                 (2U)
+#define RGX_CR_SIDEKICK_IDLE_GARTEN_CLRMSK                (0xFFFFFFFBU)
+#define RGX_CR_SIDEKICK_IDLE_GARTEN_EN                    (0x00000004U)
+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_SHIFT                 (1U)
+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_CLRMSK                (0xFFFFFFFDU)
+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_EN                    (0x00000002U)
+#define RGX_CR_SIDEKICK_IDLE_SOCIF_SHIFT                  (0U)
+#define RGX_CR_SIDEKICK_IDLE_SOCIF_CLRMSK                 (0xFFFFFFFEU)
+#define RGX_CR_SIDEKICK_IDLE_SOCIF_EN                     (0x00000001U)
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_STORE_STATUS
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS                   (0x0430U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_MASKFULL          (IMG_UINT64_C(0x00000000000000F3))
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_SHIFT   (4U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_CLRMSK  (0xFFFFFF0FU)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN    (0x00000002U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT    (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK   (0xFFFFFFFEU)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_EN       (0x00000001U)
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_STORE_TASK0
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0                    (0x0438U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_MASKFULL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_SHIFT   (32U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_CLRMSK  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_SHIFT   (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_CLRMSK  (IMG_UINT64_C(0xFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_STORE_TASK1
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1                    (0x0440U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_SHIFT   (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_CLRMSK  (0x00000000U)
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_STORE_TASK2
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2                    (0x0448U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_MASKFULL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_SHIFT  (32U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_SHIFT  (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_RESUME_TASK0
+*/
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0                   (0x0450U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_MASKFULL          (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_SHIFT  (32U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_SHIFT  (0U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_RESUME_TASK1
+*/
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1                   (0x0458U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_SHIFT  (0U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_CLRMSK (0x00000000U)
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_RESUME_TASK2
+*/
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2                   (0x0460U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_MASKFULL          (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_SHIFT (32U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_CDM_CONTEXT_STORE_STATUS
+*/
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS                   (0x04A0U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_MASKFULL          (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN    (0x00000002U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT    (0U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK   (0xFFFFFFFEU)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_EN       (0x00000001U)
+
+
+/*
+    Register RGX_CR_CDM_CONTEXT_PDS0
+*/
+#define RGX_CR_CDM_CONTEXT_PDS0                           (0x04A8U)
+#define RGX_CR_CDM_CONTEXT_PDS0_MASKFULL                  (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT           (36U)
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK          (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT      (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE       (16U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT           (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK          (IMG_UINT64_C(0xFFFFFFFF0000000F))
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT      (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE       (16U)
+
+
+/*
+    Register RGX_CR_CDM_CONTEXT_PDS1
+*/
+#define RGX_CR_CDM_CONTEXT_PDS1                           (0x04B0U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__MASKFULL      (IMG_UINT64_C(0x000000007FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS1_MASKFULL                  (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT         (29U)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK        (0xDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN            (0x20000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT         (28U)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK        (0xEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN            (0x10000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_SHIFT  (28U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_EN     (0x10000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT              (27U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK             (0xF7FFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_EN                 (0x08000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT        (21U)
+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK       (0xF81FFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_SHIFT       (20U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_CLRMSK      (0xFFEFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_EN          (0x00100000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_SHIFT         (11U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_CLRMSK        (0xFFF007FFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT           (7U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK          (0xFFFFF87FU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU)
+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT           (1U)
+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK          (0xFFFFFF81U)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_SHIFT               (0U)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_CLRMSK              (0xFFFFFFFEU)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_EN                  (0x00000001U)
+
+
+/*
+    Register RGX_CR_CDM_TERMINATE_PDS
+*/
+#define RGX_CR_CDM_TERMINATE_PDS                          (0x04B8U)
+#define RGX_CR_CDM_TERMINATE_PDS_MASKFULL                 (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT          (36U)
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK         (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT     (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE      (16U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT          (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK         (IMG_UINT64_C(0xFFFFFFFF0000000F))
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT     (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE      (16U)
+
+
+/*
+    Register RGX_CR_CDM_TERMINATE_PDS1
+*/
+#define RGX_CR_CDM_TERMINATE_PDS1                         (0x04C0U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__MASKFULL    (IMG_UINT64_C(0x000000007FFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS1_MASKFULL                (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT       (29U)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK      (0xDFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN          (0x20000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT       (28U)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK      (0xEFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN          (0x10000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_SHIFT (28U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_EN   (0x10000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_SHIFT            (27U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_CLRMSK           (0xF7FFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_EN               (0x08000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT      (21U)
+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK     (0xF81FFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_SHIFT     (20U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_CLRMSK    (0xFFEFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_EN        (0x00100000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_SHIFT       (11U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_CLRMSK      (0xFFF007FFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT         (7U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK        (0xFFFFF87FU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU)
+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT         (1U)
+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK        (0xFFFFFF81U)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_SHIFT             (0U)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_CLRMSK            (0xFFFFFFFEU)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_EN                (0x00000001U)
+
+
+/*
+    Register RGX_CR_CDM_CONTEXT_LOAD_PDS0
+*/
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0                      (0x04D8U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_MASKFULL             (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_SHIFT      (36U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_CLRMSK     (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSIZE  (16U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_SHIFT      (4U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_CLRMSK     (IMG_UINT64_C(0xFFFFFFFF0000000F))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSIZE  (16U)
+
+
+/*
+    Register RGX_CR_CDM_CONTEXT_LOAD_PDS1
+*/
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1                      (0x04E0U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_MASKFULL             (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_SHIFT    (29U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_CLRMSK   (0xDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_EN       (0x20000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_SHIFT    (28U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_CLRMSK   (0xEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_EN       (0x10000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_SHIFT (28U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_EN (0x10000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_SHIFT         (27U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_CLRMSK        (0xF7FFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_EN            (0x08000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_SHIFT   (21U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_CLRMSK  (0xF81FFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_SHIFT  (20U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_CLRMSK (0xFFEFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_EN     (0x00100000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_SHIFT    (11U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_CLRMSK   (0xFFF007FFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_SHIFT      (7U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_CLRMSK     (0xFFFFF87FU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_SHIFT      (1U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_CLRMSK     (0xFFFFFF81U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_SHIFT          (0U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_WRAPPER_CONFIG
+*/
+#define RGX_CR_MIPS_WRAPPER_CONFIG                        (0x0810U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_MASKFULL               (IMG_UINT64_C(0x000001030F01FFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_SHIFT   (40U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_CLRMSK  (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_EN      (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_SHIFT     (33U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_CLRMSK    (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_EN        (IMG_UINT64_C(0x0000000200000000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_SHIFT     (32U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_CLRMSK    (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_EN        (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_SHIFT            (25U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFF1FFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_SHIFT          (24U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_EN             (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_SHIFT    (16U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_CLRMSK   (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MIPS32   (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_SHIFT (0U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP1_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1                   (0x0818U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN    (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP1_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2                   (0x0820U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_SHIFT    (12U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK   (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT       (6U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_SHIFT     (5U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK    (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN        (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP2_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1                   (0x0828U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN    (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP2_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2                   (0x0830U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_SHIFT    (12U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK   (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_SHIFT       (6U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_SHIFT     (5U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_CLRMSK    (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_EN        (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP3_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1                   (0x0838U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN    (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP3_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2                   (0x0840U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_SHIFT    (12U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK   (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_SHIFT       (6U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_SHIFT     (5U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_CLRMSK    (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_EN        (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP4_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1                   (0x0848U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN    (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP4_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2                   (0x0850U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_SHIFT    (12U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK   (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_SHIFT       (6U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_SHIFT     (5U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_CLRMSK    (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_EN        (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP5_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1                   (0x0858U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN    (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP5_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2                   (0x0860U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_SHIFT    (12U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK   (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_SHIFT       (6U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_SHIFT     (5U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_CLRMSK    (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_EN        (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS            (0x0868U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_MASKFULL   (IMG_UINT64_C(0x00000001FFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_SHIFT (32U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_EN   (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR             (0x0870U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_MASKFULL    (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_EN    (0x00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG               (0x0878U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MASKFULL      (IMG_UINT64_C(0xFFFFFFF7FFFFFFBF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_SHIFT (36U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_SHIFT   (32U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_CLRMSK  (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_SHIFT (11U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_EN    (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_SHIFT (7U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF87F))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4KB (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16KB (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64KB (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256KB (IMG_UINT64_C(0x0000000000000180))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_1MB (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4MB (IMG_UINT64_C(0x0000000000000280))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16MB (IMG_UINT64_C(0x0000000000000300))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64MB (IMG_UINT64_C(0x0000000000000380))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256MB (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_SHIFT   (1U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_CLRMSK  (IMG_UINT64_C(0xFFFFFFFFFFFFFFC1))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP_RANGE_READ
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ                 (0x0880U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_MASKFULL        (IMG_UINT64_C(0x000000000000003F))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_SHIFT     (1U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_CLRMSK    (0xFFFFFFC1U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_SHIFT   (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_CLRMSK  (0xFFFFFFFEU)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_EN      (0x00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA                 (0x0888U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MASKFULL        (IMG_UINT64_C(0xFFFFFFF7FFFFFF81))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_SHIFT  (36U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_SHIFT     (32U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_CLRMSK    (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_SHIFT   (11U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_CLRMSK  (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_EN      (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_SHIFT (7U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF87F))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_EN  (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_WRAPPER_IRQ_ENABLE
+*/
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE                    (0x08A0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_MASKFULL           (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_SHIFT        (0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_CLRMSK       (0xFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_EN           (0x00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_WRAPPER_IRQ_STATUS
+*/
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS                    (0x08A8U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_MASKFULL           (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_SHIFT        (0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_CLRMSK       (0xFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN           (0x00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_WRAPPER_IRQ_CLEAR
+*/
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR                     (0x08B0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_MASKFULL            (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_SHIFT         (0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_CLRMSK        (0xFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN            (0x00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_WRAPPER_NMI_ENABLE
+*/
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE                    (0x08B8U)
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_MASKFULL           (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_SHIFT        (0U)
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_CLRMSK       (0xFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN           (0x00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_WRAPPER_NMI_EVENT
+*/
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT                     (0x08C0U)
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_MASKFULL            (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_SHIFT       (0U)
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_CLRMSK      (0xFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN          (0x00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_DEBUG_CONFIG
+*/
+#define RGX_CR_MIPS_DEBUG_CONFIG                          (0x08C8U)
+#define RGX_CR_MIPS_DEBUG_CONFIG_MASKFULL                 (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_SHIFT (0U)
+#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_EN   (0x00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_EXCEPTION_STATUS
+*/
+#define RGX_CR_MIPS_EXCEPTION_STATUS                      (0x08D0U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_MASKFULL             (IMG_UINT64_C(0x000000000000003F))
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_SHIFT       (5U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_CLRMSK      (0xFFFFFFDFU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_EN          (0x00000020U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_SHIFT   (4U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_CLRMSK  (0xFFFFFFEFU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN      (0x00000010U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_SHIFT    (3U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_CLRMSK   (0xFFFFFFF7U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_EN       (0x00000008U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_SHIFT    (2U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_CLRMSK   (0xFFFFFFFBU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_EN       (0x00000004U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_SHIFT         (1U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_CLRMSK        (0xFFFFFFFDU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_EN            (0x00000002U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_SHIFT         (0U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_CLRMSK        (0xFFFFFFFEU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN            (0x00000001U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVDATAX
+*/
+#define RGX_CR_META_SP_MSLVDATAX                          (0x0A00U)
+#define RGX_CR_META_SP_MSLVDATAX_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT          (0U)
+#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK         (0x00000000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVDATAT
+*/
+#define RGX_CR_META_SP_MSLVDATAT                          (0x0A08U)
+#define RGX_CR_META_SP_MSLVDATAT_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT          (0U)
+#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK         (0x00000000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVCTRL0
+*/
+#define RGX_CR_META_SP_MSLVCTRL0                          (0x0A10U)
+#define RGX_CR_META_SP_MSLVCTRL0_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVCTRL0_ADDR_SHIFT               (2U)
+#define RGX_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK              (0x00000003U)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT           (1U)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK          (0xFFFFFFFDU)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN              (0x00000002U)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_SHIFT                 (0U)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_CLRMSK                (0xFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_EN                    (0x00000001U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVCTRL1
+*/
+#define RGX_CR_META_SP_MSLVCTRL1                          (0x0A18U)
+#define RGX_CR_META_SP_MSLVCTRL1_MASKFULL                 (IMG_UINT64_C(0x00000000F7F4003F))
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT       (30U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK      (0x3FFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT    (29U)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK   (0xDFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN       (0x20000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT   (28U)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK  (0xEFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN      (0x10000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT       (26U)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK      (0xFBFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN          (0x04000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT       (25U)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK      (0xFDFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN          (0x02000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_SHIFT              (24U)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_CLRMSK             (0xFEFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_EN                 (0x01000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT           (21U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK          (0xFF1FFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT             (20U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK            (0xFFEFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_EN                (0x00100000U)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT          (18U)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK         (0xFFFBFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN             (0x00040000U)
+#define RGX_CR_META_SP_MSLVCTRL1_THREAD_SHIFT             (4U)
+#define RGX_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK            (0xFFFFFFCFU)
+#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT         (2U)
+#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK        (0xFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT         (0U)
+#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK        (0xFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVHANDSHKE
+*/
+#define RGX_CR_META_SP_MSLVHANDSHKE                       (0x0A50U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_MASKFULL              (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT           (2U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK          (0xFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT          (0U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK         (0xFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT0KICK
+*/
+#define RGX_CR_META_SP_MSLVT0KICK                         (0x0A80U)
+#define RGX_CR_META_SP_MSLVT0KICK_MASKFULL                (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT        (0U)
+#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK       (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT0KICKI
+*/
+#define RGX_CR_META_SP_MSLVT0KICKI                        (0x0A88U)
+#define RGX_CR_META_SP_MSLVT0KICKI_MASKFULL               (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT      (0U)
+#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK     (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT1KICK
+*/
+#define RGX_CR_META_SP_MSLVT1KICK                         (0x0A90U)
+#define RGX_CR_META_SP_MSLVT1KICK_MASKFULL                (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT        (0U)
+#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK       (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT1KICKI
+*/
+#define RGX_CR_META_SP_MSLVT1KICKI                        (0x0A98U)
+#define RGX_CR_META_SP_MSLVT1KICKI_MASKFULL               (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT      (0U)
+#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK     (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT2KICK
+*/
+#define RGX_CR_META_SP_MSLVT2KICK                         (0x0AA0U)
+#define RGX_CR_META_SP_MSLVT2KICK_MASKFULL                (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT        (0U)
+#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK       (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT2KICKI
+*/
+#define RGX_CR_META_SP_MSLVT2KICKI                        (0x0AA8U)
+#define RGX_CR_META_SP_MSLVT2KICKI_MASKFULL               (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT      (0U)
+#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK     (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT3KICK
+*/
+#define RGX_CR_META_SP_MSLVT3KICK                         (0x0AB0U)
+#define RGX_CR_META_SP_MSLVT3KICK_MASKFULL                (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT        (0U)
+#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK       (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT3KICKI
+*/
+#define RGX_CR_META_SP_MSLVT3KICKI                        (0x0AB8U)
+#define RGX_CR_META_SP_MSLVT3KICKI_MASKFULL               (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT      (0U)
+#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK     (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVRST
+*/
+#define RGX_CR_META_SP_MSLVRST                            (0x0AC0U)
+#define RGX_CR_META_SP_MSLVRST_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_SHIFT            (0U)
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK           (0xFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_EN               (0x00000001U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVIRQSTATUS
+*/
+#define RGX_CR_META_SP_MSLVIRQSTATUS                      (0x0AC8U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_MASKFULL             (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT      (3U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK     (0xFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN         (0x00000008U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT      (2U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK     (0xFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN         (0x00000004U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVIRQENABLE
+*/
+#define RGX_CR_META_SP_MSLVIRQENABLE                      (0x0AD0U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_MASKFULL             (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT         (3U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK        (0xFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_EN            (0x00000008U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT         (2U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK        (0xFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_EN            (0x00000004U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVIRQLEVEL
+*/
+#define RGX_CR_META_SP_MSLVIRQLEVEL                       (0x0AD8U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT            (0U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK           (0xFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_EN               (0x00000001U)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE
+*/
+#define RGX_CR_MTS_SCHEDULE                               (0x0B00U)
+#define RGX_CR_MTS_SCHEDULE_MASKFULL                      (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE_HOST_SHIFT                    (8U)
+#define RGX_CR_MTS_SCHEDULE_HOST_CLRMSK                   (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE_HOST_BG_TIMER                 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE_HOST_HOST                     (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_SHIFT                (6U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_CLRMSK               (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT0                 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT1                 (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT2                 (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT3                 (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_SHIFT                 (5U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_CLRMSK                (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_BGCTX                 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_INTCTX                (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE_TASK_SHIFT                    (4U)
+#define RGX_CR_MTS_SCHEDULE_TASK_CLRMSK                   (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED              (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE_TASK_COUNTED                  (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE_DM_SHIFT                      (0U)
+#define RGX_CR_MTS_SCHEDULE_DM_CLRMSK                     (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM0                        (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM1                        (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM2                        (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM3                        (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM4                        (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM5                        (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM6                        (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM7                        (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM_ALL                     (0x0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE1
+*/
+#define RGX_CR_MTS_SCHEDULE1                              (0x10B00U)
+#define RGX_CR_MTS_SCHEDULE1_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE1_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE1_HOST_CLRMSK                  (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE1_HOST_BG_TIMER                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE1_HOST_HOST                    (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK              (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT0                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT1                (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT2                (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT3                (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK               (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_BGCTX                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_INTCTX               (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_CLRMSK                  (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE1_TASK_NON_COUNTED             (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_COUNTED                 (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE1_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE1_DM_CLRMSK                    (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM0                       (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM1                       (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM2                       (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM3                       (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM4                       (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM5                       (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM6                       (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM7                       (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM_ALL                    (0x0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE2
+*/
+#define RGX_CR_MTS_SCHEDULE2                              (0x20B00U)
+#define RGX_CR_MTS_SCHEDULE2_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE2_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE2_HOST_CLRMSK                  (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE2_HOST_BG_TIMER                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE2_HOST_HOST                    (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK              (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT0                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT1                (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT2                (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT3                (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK               (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_BGCTX                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_INTCTX               (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_CLRMSK                  (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE2_TASK_NON_COUNTED             (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_COUNTED                 (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE2_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE2_DM_CLRMSK                    (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM0                       (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM1                       (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM2                       (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM3                       (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM4                       (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM5                       (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM6                       (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM7                       (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM_ALL                    (0x0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE3
+*/
+#define RGX_CR_MTS_SCHEDULE3                              (0x30B00U)
+#define RGX_CR_MTS_SCHEDULE3_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE3_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE3_HOST_CLRMSK                  (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE3_HOST_BG_TIMER                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE3_HOST_HOST                    (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK              (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT0                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT1                (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT2                (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT3                (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK               (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_BGCTX                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_INTCTX               (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_CLRMSK                  (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE3_TASK_NON_COUNTED             (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_COUNTED                 (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE3_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE3_DM_CLRMSK                    (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM0                       (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM1                       (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM2                       (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM3                       (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM4                       (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM5                       (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM6                       (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM7                       (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM_ALL                    (0x0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE4
+*/
+#define RGX_CR_MTS_SCHEDULE4                              (0x40B00U)
+#define RGX_CR_MTS_SCHEDULE4_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE4_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE4_HOST_CLRMSK                  (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE4_HOST_BG_TIMER                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE4_HOST_HOST                    (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK              (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT0                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT1                (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT2                (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT3                (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK               (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_BGCTX                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_INTCTX               (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_CLRMSK                  (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE4_TASK_NON_COUNTED             (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_COUNTED                 (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE4_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE4_DM_CLRMSK                    (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM0                       (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM1                       (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM2                       (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM3                       (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM4                       (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM5                       (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM6                       (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM7                       (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM_ALL                    (0x0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE5
+*/
+#define RGX_CR_MTS_SCHEDULE5                              (0x50B00U)
+#define RGX_CR_MTS_SCHEDULE5_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE5_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE5_HOST_CLRMSK                  (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE5_HOST_BG_TIMER                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE5_HOST_HOST                    (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK              (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT0                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT1                (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT2                (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT3                (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK               (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_BGCTX                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_INTCTX               (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_CLRMSK                  (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE5_TASK_NON_COUNTED             (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_COUNTED                 (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE5_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE5_DM_CLRMSK                    (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM0                       (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM1                       (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM2                       (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM3                       (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM4                       (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM5                       (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM6                       (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM7                       (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM_ALL                    (0x0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE6
+*/
+#define RGX_CR_MTS_SCHEDULE6                              (0x60B00U)
+#define RGX_CR_MTS_SCHEDULE6_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE6_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE6_HOST_CLRMSK                  (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE6_HOST_BG_TIMER                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE6_HOST_HOST                    (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK              (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT0                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT1                (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT2                (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT3                (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK               (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_BGCTX                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_INTCTX               (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_CLRMSK                  (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE6_TASK_NON_COUNTED             (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_COUNTED                 (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE6_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE6_DM_CLRMSK                    (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM0                       (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM1                       (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM2                       (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM3                       (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM4                       (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM5                       (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM6                       (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM7                       (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM_ALL                    (0x0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE7
+*/
+#define RGX_CR_MTS_SCHEDULE7                              (0x70B00U)
+#define RGX_CR_MTS_SCHEDULE7_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE7_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE7_HOST_CLRMSK                  (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE7_HOST_BG_TIMER                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE7_HOST_HOST                    (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK              (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT0                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT1                (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT2                (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT3                (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK               (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_BGCTX                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_INTCTX               (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_CLRMSK                  (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE7_TASK_NON_COUNTED             (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_COUNTED                 (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE7_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE7_DM_CLRMSK                    (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM0                       (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM1                       (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM2                       (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM3                       (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM4                       (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM5                       (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM6                       (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM7                       (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM_ALL                    (0x0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC
+*/
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC                 (0x0B30U)
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL        (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT  (0U)
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC
+*/
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC                 (0x0B38U)
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL        (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT  (0U)
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC
+*/
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC                (0x0B40U)
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL       (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC
+*/
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC                (0x0B48U)
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL       (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_MTS_GARTEN_WRAPPER_CONFIG
+*/
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG                  (0x0B50U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__MASKFULL (IMG_UINT64_C(0x000FF0FFFFFFF701))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL         (IMG_UINT64_C(0x0000FFFFFFFFF001))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT (44U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0xFFFF0FFFFFFFFFFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT (44U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0xFFF00FFFFFFFFFFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT   (40U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_CLRMSK  (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_SHIFT (12U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_SHIFT (9U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF9FF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_SHIFT (8U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_EN (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT  (0U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META   (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS    (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_MTS_INTCTX
+*/
+#define RGX_CR_MTS_INTCTX                                 (0x0B98U)
+#define RGX_CR_MTS_INTCTX_MASKFULL                        (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT          (22U)
+#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK         (0xC03FFFFFU)
+#define RGX_CR_MTS_INTCTX_DM_PTR_SHIFT                    (18U)
+#define RGX_CR_MTS_INTCTX_DM_PTR_CLRMSK                   (0xFFC3FFFFU)
+#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_SHIFT             (16U)
+#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_CLRMSK            (0xFFFCFFFFU)
+#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT         (8U)
+#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK        (0xFFFF00FFU)
+#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT     (0U)
+#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK    (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_MTS_BGCTX
+*/
+#define RGX_CR_MTS_BGCTX                                  (0x0BA0U)
+#define RGX_CR_MTS_BGCTX_MASKFULL                         (IMG_UINT64_C(0x0000000000003FFF))
+#define RGX_CR_MTS_BGCTX_DM_PTR_SHIFT                     (10U)
+#define RGX_CR_MTS_BGCTX_DM_PTR_CLRMSK                    (0xFFFFC3FFU)
+#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_SHIFT              (8U)
+#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_CLRMSK             (0xFFFFFCFFU)
+#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT     (0U)
+#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK    (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE
+*/
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE                 (0x0BA8U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT       (56U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK      (IMG_UINT64_C(0x00FFFFFFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT       (48U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK      (IMG_UINT64_C(0xFF00FFFFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT       (40U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK      (IMG_UINT64_C(0xFFFF00FFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT       (32U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK      (IMG_UINT64_C(0xFFFFFF00FFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT       (24U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK      (IMG_UINT64_C(0xFFFFFFFF00FFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT       (16U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFF00FFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT       (8U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFFFF00FF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT       (0U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+    Register RGX_CR_MTS_GPU_INT_STATUS
+*/
+#define RGX_CR_MTS_GPU_INT_STATUS                         (0x0BB0U)
+#define RGX_CR_MTS_GPU_INT_STATUS_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT            (0U)
+#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK           (0x00000000U)
+
+
+/*
+    Register RGX_CR_IRQ_OS0_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS0_EVENT_STATUS                       (0x0BD8U)
+#define RGX_CR_IRQ_OS0_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_SHIFT          (0U)
+#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS0_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS0_EVENT_CLEAR                        (0x0BE8U)
+#define RGX_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_SHIFT           (0U)
+#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS1_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS1_EVENT_STATUS                       (0x10BD8U)
+#define RGX_CR_IRQ_OS1_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_SHIFT          (0U)
+#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS1_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS1_EVENT_CLEAR                        (0x10BE8U)
+#define RGX_CR_IRQ_OS1_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_SHIFT           (0U)
+#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS2_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS2_EVENT_STATUS                       (0x20BD8U)
+#define RGX_CR_IRQ_OS2_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_SHIFT          (0U)
+#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS2_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS2_EVENT_CLEAR                        (0x20BE8U)
+#define RGX_CR_IRQ_OS2_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_SHIFT           (0U)
+#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS3_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS3_EVENT_STATUS                       (0x30BD8U)
+#define RGX_CR_IRQ_OS3_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_SHIFT          (0U)
+#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS3_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS3_EVENT_CLEAR                        (0x30BE8U)
+#define RGX_CR_IRQ_OS3_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_SHIFT           (0U)
+#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS4_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS4_EVENT_STATUS                       (0x40BD8U)
+#define RGX_CR_IRQ_OS4_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_SHIFT          (0U)
+#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS4_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS4_EVENT_CLEAR                        (0x40BE8U)
+#define RGX_CR_IRQ_OS4_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_SHIFT           (0U)
+#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS5_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS5_EVENT_STATUS                       (0x50BD8U)
+#define RGX_CR_IRQ_OS5_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_SHIFT          (0U)
+#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS5_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS5_EVENT_CLEAR                        (0x50BE8U)
+#define RGX_CR_IRQ_OS5_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_SHIFT           (0U)
+#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS6_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS6_EVENT_STATUS                       (0x60BD8U)
+#define RGX_CR_IRQ_OS6_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_SHIFT          (0U)
+#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS6_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS6_EVENT_CLEAR                        (0x60BE8U)
+#define RGX_CR_IRQ_OS6_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_SHIFT           (0U)
+#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS7_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS7_EVENT_STATUS                       (0x70BD8U)
+#define RGX_CR_IRQ_OS7_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_SHIFT          (0U)
+#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS7_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS7_EVENT_CLEAR                        (0x70BE8U)
+#define RGX_CR_IRQ_OS7_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_SHIFT           (0U)
+#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_META_BOOT
+*/
+#define RGX_CR_META_BOOT                                  (0x0BF8U)
+#define RGX_CR_META_BOOT_MASKFULL                         (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_BOOT_MODE_SHIFT                       (0U)
+#define RGX_CR_META_BOOT_MODE_CLRMSK                      (0xFFFFFFFEU)
+#define RGX_CR_META_BOOT_MODE_EN                          (0x00000001U)
+
+
+/*
+    Register RGX_CR_GARTEN_SLC
+*/
+#define RGX_CR_GARTEN_SLC                                 (0x0BB8U)
+#define RGX_CR_GARTEN_SLC_MASKFULL                        (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT           (0U)
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_PPP
+*/
+#define RGX_CR_PPP                                        (0x0CD0U)
+#define RGX_CR_PPP_MASKFULL                               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PPP_CHECKSUM_SHIFT                         (0U)
+#define RGX_CR_PPP_CHECKSUM_CLRMSK                        (0x00000000U)
+
+
+#define RGX_CR_ISP_RENDER_DIR_TYPE_MASK                   (0x00000003U)
+/*
+Top-left to bottom-right */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_TL2BR                  (0x00000000U)
+/*
+Top-right to bottom-left */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_TR2BL                  (0x00000001U)
+/*
+Bottom-left to top-right */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_BL2TR                  (0x00000002U)
+/*
+Bottom-right to top-left */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_BR2TL                  (0x00000003U)
+
+
+#define RGX_CR_ISP_RENDER_MODE_TYPE_MASK                  (0x00000003U)
+/*
+Normal render     */
+#define RGX_CR_ISP_RENDER_MODE_TYPE_NORM                  (0x00000000U)
+/*
+Fast 2D render    */
+#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_2D               (0x00000002U)
+/*
+Fast scale render */
+#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_SCALE            (0x00000003U)
+
+
+/*
+    Register RGX_CR_ISP_RENDER
+*/
+#define RGX_CR_ISP_RENDER                                 (0x0F08U)
+#define RGX_CR_ISP_RENDER_MASKFULL                        (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_ISP_RENDER_RESUME_SHIFT                    (4U)
+#define RGX_CR_ISP_RENDER_RESUME_CLRMSK                   (0xFFFFFFEFU)
+#define RGX_CR_ISP_RENDER_RESUME_EN                       (0x00000010U)
+#define RGX_CR_ISP_RENDER_DIR_SHIFT                       (2U)
+#define RGX_CR_ISP_RENDER_DIR_CLRMSK                      (0xFFFFFFF3U)
+#define RGX_CR_ISP_RENDER_DIR_TL2BR                       (0x00000000U)
+#define RGX_CR_ISP_RENDER_DIR_TR2BL                       (0x00000004U)
+#define RGX_CR_ISP_RENDER_DIR_BL2TR                       (0x00000008U)
+#define RGX_CR_ISP_RENDER_DIR_BR2TL                       (0x0000000CU)
+#define RGX_CR_ISP_RENDER_MODE_SHIFT                      (0U)
+#define RGX_CR_ISP_RENDER_MODE_CLRMSK                     (0xFFFFFFFCU)
+#define RGX_CR_ISP_RENDER_MODE_NORM                       (0x00000000U)
+#define RGX_CR_ISP_RENDER_MODE_FAST_2D                    (0x00000002U)
+#define RGX_CR_ISP_RENDER_MODE_FAST_SCALE                 (0x00000003U)
+
+
+/*
+    Register RGX_CR_ISP_CTL
+*/
+#define RGX_CR_ISP_CTL                                    (0x0F38U)
+#define RGX_CR_ISP_CTL_MASKFULL                           (IMG_UINT64_C(0x000000007FFFF3FF))
+#define RGX_CR_ISP_CTL_LINE_STYLE_SHIFT                   (30U)
+#define RGX_CR_ISP_CTL_LINE_STYLE_CLRMSK                  (0xBFFFFFFFU)
+#define RGX_CR_ISP_CTL_LINE_STYLE_EN                      (0x40000000U)
+#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_SHIFT               (29U)
+#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_CLRMSK              (0xDFFFFFFFU)
+#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_EN                  (0x20000000U)
+#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_SHIFT              (28U)
+#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_CLRMSK             (0xEFFFFFFFU)
+#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_EN                 (0x10000000U)
+#define RGX_CR_ISP_CTL_PAIR_TILES_SHIFT                   (27U)
+#define RGX_CR_ISP_CTL_PAIR_TILES_CLRMSK                  (0xF7FFFFFFU)
+#define RGX_CR_ISP_CTL_PAIR_TILES_EN                      (0x08000000U)
+#define RGX_CR_ISP_CTL_CREQ_BUF_EN_SHIFT                  (26U)
+#define RGX_CR_ISP_CTL_CREQ_BUF_EN_CLRMSK                 (0xFBFFFFFFU)
+#define RGX_CR_ISP_CTL_CREQ_BUF_EN_EN                     (0x04000000U)
+#define RGX_CR_ISP_CTL_TILE_AGE_EN_SHIFT                  (25U)
+#define RGX_CR_ISP_CTL_TILE_AGE_EN_CLRMSK                 (0xFDFFFFFFU)
+#define RGX_CR_ISP_CTL_TILE_AGE_EN_EN                     (0x02000000U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_SHIFT          (23U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_CLRMSK         (0xFE7FFFFFU)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX9            (0x00000000U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX10           (0x00800000U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_OGL            (0x01000000U)
+#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_SHIFT            (21U)
+#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_CLRMSK           (0xFF9FFFFFU)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_SHIFT                 (20U)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK                (0xFFEFFFFFU)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_EN                    (0x00100000U)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT           (19U)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK          (0xFFF7FFFFU)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN              (0x00080000U)
+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_SHIFT     (18U)
+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_CLRMSK    (0xFFFBFFFFU)
+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_EN        (0x00040000U)
+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_SHIFT          (17U)
+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_CLRMSK         (0xFFFDFFFFU)
+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_EN             (0x00020000U)
+#define RGX_CR_ISP_CTL_SAMPLE_POS_SHIFT                   (16U)
+#define RGX_CR_ISP_CTL_SAMPLE_POS_CLRMSK                  (0xFFFEFFFFU)
+#define RGX_CR_ISP_CTL_SAMPLE_POS_EN                      (0x00010000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_SHIFT                  (12U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_CLRMSK                 (0xFFFF0FFFU)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ONE               (0x00000000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWO               (0x00001000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THREE             (0x00002000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOUR              (0x00003000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIVE              (0x00004000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIX               (0x00005000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SEVEN             (0x00006000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_EIGHT             (0x00007000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_NINE              (0x00008000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TEN               (0x00009000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ELEVEN            (0x0000A000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWELVE            (0x0000B000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THIRTEEN          (0x0000C000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOURTEEN          (0x0000D000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIFTEEN           (0x0000E000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIXTEEN           (0x0000F000U)
+#define RGX_CR_ISP_CTL_VALID_ID_SHIFT                     (4U)
+#define RGX_CR_ISP_CTL_VALID_ID_CLRMSK                    (0xFFFFFC0FU)
+#define RGX_CR_ISP_CTL_UPASS_START_SHIFT                  (0U)
+#define RGX_CR_ISP_CTL_UPASS_START_CLRMSK                 (0xFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_ISP_STATUS
+*/
+#define RGX_CR_ISP_STATUS                                 (0x1038U)
+#define RGX_CR_ISP_STATUS_MASKFULL                        (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_ISP_STATUS_SPLIT_MAX_SHIFT                 (2U)
+#define RGX_CR_ISP_STATUS_SPLIT_MAX_CLRMSK                (0xFFFFFFFBU)
+#define RGX_CR_ISP_STATUS_SPLIT_MAX_EN                    (0x00000004U)
+#define RGX_CR_ISP_STATUS_ACTIVE_SHIFT                    (1U)
+#define RGX_CR_ISP_STATUS_ACTIVE_CLRMSK                   (0xFFFFFFFDU)
+#define RGX_CR_ISP_STATUS_ACTIVE_EN                       (0x00000002U)
+#define RGX_CR_ISP_STATUS_EOR_SHIFT                       (0U)
+#define RGX_CR_ISP_STATUS_EOR_CLRMSK                      (0xFFFFFFFEU)
+#define RGX_CR_ISP_STATUS_EOR_EN                          (0x00000001U)
+
+
+/*
+    Register group: RGX_CR_ISP_XTP_RESUME, with 64 repeats
+*/
+#define RGX_CR_ISP_XTP_RESUME_REPEATCOUNT                 (64)
+/*
+    Register RGX_CR_ISP_XTP_RESUME0
+*/
+#define RGX_CR_ISP_XTP_RESUME0                            (0x3A00U)
+#define RGX_CR_ISP_XTP_RESUME0_MASKFULL                   (IMG_UINT64_C(0x00000000003FF3FF))
+#define RGX_CR_ISP_XTP_RESUME0_TILE_X_SHIFT               (12U)
+#define RGX_CR_ISP_XTP_RESUME0_TILE_X_CLRMSK              (0xFFC00FFFU)
+#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_SHIFT               (0U)
+#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_CLRMSK              (0xFFFFFC00U)
+
+
+/*
+    Register group: RGX_CR_ISP_XTP_STORE, with 32 repeats
+*/
+#define RGX_CR_ISP_XTP_STORE_REPEATCOUNT                  (32)
+/*
+    Register RGX_CR_ISP_XTP_STORE0
+*/
+#define RGX_CR_ISP_XTP_STORE0                             (0x3C00U)
+#define RGX_CR_ISP_XTP_STORE0_MASKFULL                    (IMG_UINT64_C(0x000000007F3FF3FF))
+#define RGX_CR_ISP_XTP_STORE0_ACTIVE_SHIFT                (30U)
+#define RGX_CR_ISP_XTP_STORE0_ACTIVE_CLRMSK               (0xBFFFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_ACTIVE_EN                   (0x40000000U)
+#define RGX_CR_ISP_XTP_STORE0_EOR_SHIFT                   (29U)
+#define RGX_CR_ISP_XTP_STORE0_EOR_CLRMSK                  (0xDFFFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_EOR_EN                      (0x20000000U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_SHIFT             (28U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_CLRMSK            (0xEFFFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_EN                (0x10000000U)
+#define RGX_CR_ISP_XTP_STORE0_MT_SHIFT                    (24U)
+#define RGX_CR_ISP_XTP_STORE0_MT_CLRMSK                   (0xF0FFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_TILE_X_SHIFT                (12U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_X_CLRMSK               (0xFFC00FFFU)
+#define RGX_CR_ISP_XTP_STORE0_TILE_Y_SHIFT                (0U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_Y_CLRMSK               (0xFFFFFC00U)
+
+
+/*
+    Register group: RGX_CR_BIF_CAT_BASE, with 8 repeats
+*/
+#define RGX_CR_BIF_CAT_BASE_REPEATCOUNT                   (8)
+/*
+    Register RGX_CR_BIF_CAT_BASE0
+*/
+#define RGX_CR_BIF_CAT_BASE0                              (0x1200U)
+#define RGX_CR_BIF_CAT_BASE0_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK                  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE1
+*/
+#define RGX_CR_BIF_CAT_BASE1                              (0x1208U)
+#define RGX_CR_BIF_CAT_BASE1_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE1_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE1_ADDR_CLRMSK                  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE2
+*/
+#define RGX_CR_BIF_CAT_BASE2                              (0x1210U)
+#define RGX_CR_BIF_CAT_BASE2_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE2_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE2_ADDR_CLRMSK                  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE3
+*/
+#define RGX_CR_BIF_CAT_BASE3                              (0x1218U)
+#define RGX_CR_BIF_CAT_BASE3_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE3_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE3_ADDR_CLRMSK                  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE4
+*/
+#define RGX_CR_BIF_CAT_BASE4                              (0x1220U)
+#define RGX_CR_BIF_CAT_BASE4_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE4_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE4_ADDR_CLRMSK                  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE5
+*/
+#define RGX_CR_BIF_CAT_BASE5                              (0x1228U)
+#define RGX_CR_BIF_CAT_BASE5_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE5_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE5_ADDR_CLRMSK                  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE6
+*/
+#define RGX_CR_BIF_CAT_BASE6                              (0x1230U)
+#define RGX_CR_BIF_CAT_BASE6_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE6_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE6_ADDR_CLRMSK                  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE7
+*/
+#define RGX_CR_BIF_CAT_BASE7                              (0x1238U)
+#define RGX_CR_BIF_CAT_BASE7_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE7_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE7_ADDR_CLRMSK                  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE_INDEX
+*/
+#define RGX_CR_BIF_CAT_BASE_INDEX                         (0x1240U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_MASKFULL                (IMG_UINT64_C(0x00070707073F0707))
+#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_SHIFT              (48U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_CLRMSK             (IMG_UINT64_C(0xFFF8FFFFFFFFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_SHIFT               (40U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_CLRMSK              (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_SHIFT              (32U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_CLRMSK             (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_SHIFT               (24U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFF8FFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_TDM_SHIFT               (19U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_TDM_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFC7FFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_SHIFT               (16U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFF8FFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_SHIFT             (8U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFF8FF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_TA_SHIFT                (0U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_TA_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFFFFFFFF8))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_VCE0
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0                       (0x1248U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_MASKFULL              (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_SHIFT       (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_CLRMSK      (IMG_UINT64_C(0xF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_SHIFT            (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_CLRMSK           (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_SHIFT            (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_EN               (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_SHIFT           (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_EN              (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_TE0
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_TE0                        (0x1250U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_MASKFULL               (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_SHIFT        (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_CLRMSK       (IMG_UINT64_C(0xF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_SHIFT             (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_CLRMSK            (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_SHIFT             (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_EN                (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_SHIFT            (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_EN               (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_ALIST0
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0                     (0x1260U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_MASKFULL            (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_SHIFT     (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_CLRMSK    (IMG_UINT64_C(0xF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_SHIFT          (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_CLRMSK         (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_SHIFT          (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_EN             (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_SHIFT         (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_EN            (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_VCE1
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1                       (0x1268U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_MASKFULL              (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_SHIFT       (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_CLRMSK      (IMG_UINT64_C(0xF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_SHIFT            (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_CLRMSK           (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_SHIFT            (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_EN               (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_SHIFT           (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_EN              (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_TE1
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_TE1                        (0x1270U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_MASKFULL               (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_SHIFT        (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_CLRMSK       (IMG_UINT64_C(0xF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_SHIFT             (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_CLRMSK            (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_SHIFT             (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_EN                (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_SHIFT            (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_EN               (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_ALIST1
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1                     (0x1280U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_MASKFULL            (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_SHIFT     (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_CLRMSK    (IMG_UINT64_C(0xF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_SHIFT          (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_CLRMSK         (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_SHIFT          (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_EN             (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_SHIFT         (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_EN            (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_MMU_ENTRY_STATUS
+*/
+#define RGX_CR_BIF_MMU_ENTRY_STATUS                       (0x1288U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFF0F3))
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_SHIFT         (12U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK        (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT        (4U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK       (IMG_UINT64_C(0xFFFFFFFFFFFFFF0F))
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT       (0U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+/*
+    Register RGX_CR_BIF_MMU_ENTRY
+*/
+#define RGX_CR_BIF_MMU_ENTRY                              (0x1290U)
+#define RGX_CR_BIF_MMU_ENTRY_MASKFULL                     (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_SHIFT                 (1U)
+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_CLRMSK                (0xFFFFFFFDU)
+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_EN                    (0x00000002U)
+#define RGX_CR_BIF_MMU_ENTRY_PENDING_SHIFT                (0U)
+#define RGX_CR_BIF_MMU_ENTRY_PENDING_CLRMSK               (0xFFFFFFFEU)
+#define RGX_CR_BIF_MMU_ENTRY_PENDING_EN                   (0x00000001U)
+
+
+/*
+    Register RGX_CR_BIF_CTRL_INVAL
+*/
+#define RGX_CR_BIF_CTRL_INVAL                             (0x12A0U)
+#define RGX_CR_BIF_CTRL_INVAL_MASKFULL                    (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_BIF_CTRL_INVAL_TLB1_SHIFT                  (3U)
+#define RGX_CR_BIF_CTRL_INVAL_TLB1_CLRMSK                 (0xFFFFFFF7U)
+#define RGX_CR_BIF_CTRL_INVAL_TLB1_EN                     (0x00000008U)
+#define RGX_CR_BIF_CTRL_INVAL_PC_SHIFT                    (2U)
+#define RGX_CR_BIF_CTRL_INVAL_PC_CLRMSK                   (0xFFFFFFFBU)
+#define RGX_CR_BIF_CTRL_INVAL_PC_EN                       (0x00000004U)
+#define RGX_CR_BIF_CTRL_INVAL_PD_SHIFT                    (1U)
+#define RGX_CR_BIF_CTRL_INVAL_PD_CLRMSK                   (0xFFFFFFFDU)
+#define RGX_CR_BIF_CTRL_INVAL_PD_EN                       (0x00000002U)
+#define RGX_CR_BIF_CTRL_INVAL_PT_SHIFT                    (0U)
+#define RGX_CR_BIF_CTRL_INVAL_PT_CLRMSK                   (0xFFFFFFFEU)
+#define RGX_CR_BIF_CTRL_INVAL_PT_EN                       (0x00000001U)
+
+
+/*
+    Register RGX_CR_BIF_CTRL
+*/
+#define RGX_CR_BIF_CTRL                                   (0x12A8U)
+#define RGX_CR_BIF_CTRL__XE_MEM__MASKFULL                 (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_BIF_CTRL_MASKFULL                          (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF4_SHIFT              (8U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF4_CLRMSK             (0xFFFFFEFFU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF4_EN                 (0x00000100U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_SHIFT     (7U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_CLRMSK    (0xFFFFFF7FU)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_EN        (0x00000080U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_SHIFT    (6U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_CLRMSK   (0xFFFFFFBFU)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_EN       (0x00000040U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_SHIFT              (5U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_CLRMSK             (0xFFFFFFDFU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_EN                 (0x00000020U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_SHIFT              (4U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_CLRMSK             (0xFFFFFFEFU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_EN                 (0x00000010U)
+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_SHIFT                  (3U)
+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_CLRMSK                 (0xFFFFFFF7U)
+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_EN                     (0x00000008U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_SHIFT                (2U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_CLRMSK               (0xFFFFFFFBU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_EN                   (0x00000004U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_SHIFT              (1U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_CLRMSK             (0xFFFFFFFDU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_EN                 (0x00000002U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_SHIFT              (0U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_CLRMSK             (0xFFFFFFFEU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_EN                 (0x00000001U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_BANK0_MMU_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS                 (0x12B0U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL        (IMG_UINT64_C(0x000000000000F775))
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT  (12U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT  (4U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN     (0x00000010U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT     (0U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK    (0xFFFFFFFEU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN        (0x00000001U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_BANK0_REQ_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS                 (0x12B8U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__MASKFULL (IMG_UINT64_C(0x001FFFFFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL        (IMG_UINT64_C(0x0007FFFFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_SHIFT (52U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN (IMG_UINT64_C(0x0010000000000000))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT       (50U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK      (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN          (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT (46U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK (IMG_UINT64_C(0xFFF03FFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT    (44U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK   (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT    (40U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK   (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT (40U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFC0FFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT   (4U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK  (IMG_UINT64_C(0xFFFFFF000000000F))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_BANK1_MMU_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS                 (0x12C0U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_MASKFULL        (IMG_UINT64_C(0x000000000000F775))
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_SHIFT  (12U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_SHIFT  (4U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_EN     (0x00000010U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_SHIFT     (0U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_CLRMSK    (0xFFFFFFFEU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_EN        (0x00000001U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_BANK1_REQ_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS                 (0x12C8U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_MASKFULL        (IMG_UINT64_C(0x0007FFFFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_SHIFT       (50U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_CLRMSK      (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_EN          (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_SHIFT    (44U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_CLRMSK   (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_SHIFT    (40U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_CLRMSK   (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_SHIFT   (4U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_CLRMSK  (IMG_UINT64_C(0xFFFFFF000000000F))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+    Register RGX_CR_BIF_MMU_STATUS
+*/
+#define RGX_CR_BIF_MMU_STATUS                             (0x12D0U)
+#define RGX_CR_BIF_MMU_STATUS__XE_MEM__MASKFULL           (IMG_UINT64_C(0x000000001FFFFFF7))
+#define RGX_CR_BIF_MMU_STATUS_MASKFULL                    (IMG_UINT64_C(0x000000001FFFFFF7))
+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_SHIFT              (28U)
+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_CLRMSK             (0xEFFFFFFFU)
+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_EN                 (0x10000000U)
+#define RGX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT               (20U)
+#define RGX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK              (0xF00FFFFFU)
+#define RGX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT               (12U)
+#define RGX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK              (0xFFF00FFFU)
+#define RGX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT               (4U)
+#define RGX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK              (0xFFFFF00FU)
+#define RGX_CR_BIF_MMU_STATUS_STALLED_SHIFT               (2U)
+#define RGX_CR_BIF_MMU_STATUS_STALLED_CLRMSK              (0xFFFFFFFBU)
+#define RGX_CR_BIF_MMU_STATUS_STALLED_EN                  (0x00000004U)
+#define RGX_CR_BIF_MMU_STATUS_PAUSED_SHIFT                (1U)
+#define RGX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK               (0xFFFFFFFDU)
+#define RGX_CR_BIF_MMU_STATUS_PAUSED_EN                   (0x00000002U)
+#define RGX_CR_BIF_MMU_STATUS_BUSY_SHIFT                  (0U)
+#define RGX_CR_BIF_MMU_STATUS_BUSY_CLRMSK                 (0xFFFFFFFEU)
+#define RGX_CR_BIF_MMU_STATUS_BUSY_EN                     (0x00000001U)
+
+
+/*
+    Register RGX_CR_BIF_READS_EXT_STATUS
+*/
+#define RGX_CR_BIF_READS_EXT_STATUS                       (0x1320U)
+#define RGX_CR_BIF_READS_EXT_STATUS_MASKFULL              (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_BIF_READS_EXT_STATUS_MMU_SHIFT             (16U)
+#define RGX_CR_BIF_READS_EXT_STATUS_MMU_CLRMSK            (0xF000FFFFU)
+#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_SHIFT           (0U)
+#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_CLRMSK          (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIF_READS_INT_STATUS
+*/
+#define RGX_CR_BIF_READS_INT_STATUS                       (0x1328U)
+#define RGX_CR_BIF_READS_INT_STATUS_MASKFULL              (IMG_UINT64_C(0x0000000007FFFFFF))
+#define RGX_CR_BIF_READS_INT_STATUS_MMU_SHIFT             (16U)
+#define RGX_CR_BIF_READS_INT_STATUS_MMU_CLRMSK            (0xF800FFFFU)
+#define RGX_CR_BIF_READS_INT_STATUS_BANK1_SHIFT           (0U)
+#define RGX_CR_BIF_READS_INT_STATUS_BANK1_CLRMSK          (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIFPM_READS_INT_STATUS
+*/
+#define RGX_CR_BIFPM_READS_INT_STATUS                     (0x1330U)
+#define RGX_CR_BIFPM_READS_INT_STATUS_MASKFULL            (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_SHIFT         (0U)
+#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_CLRMSK        (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIFPM_READS_EXT_STATUS
+*/
+#define RGX_CR_BIFPM_READS_EXT_STATUS                     (0x1338U)
+#define RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL            (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_SHIFT         (0U)
+#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_CLRMSK        (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIFPM_STATUS_MMU
+*/
+#define RGX_CR_BIFPM_STATUS_MMU                           (0x1350U)
+#define RGX_CR_BIFPM_STATUS_MMU_MASKFULL                  (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_SHIFT            (0U)
+#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_CLRMSK           (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_BIF_STATUS_MMU
+*/
+#define RGX_CR_BIF_STATUS_MMU                             (0x1358U)
+#define RGX_CR_BIF_STATUS_MMU_MASKFULL                    (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_BIF_STATUS_MMU_REQUESTS_SHIFT              (0U)
+#define RGX_CR_BIF_STATUS_MMU_REQUESTS_CLRMSK             (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_READ
+*/
+#define RGX_CR_BIF_FAULT_READ                             (0x13E0U)
+#define RGX_CR_BIF_FAULT_READ_MASKFULL                    (IMG_UINT64_C(0x000000FFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_SHIFT               (4U)
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_CLRMSK              (IMG_UINT64_C(0xFFFFFF000000000F))
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSHIFT          (4U)
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSIZE           (16U)
+
+
+/*
+    Register RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS
+*/
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS           (0x1430U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL  (IMG_UINT64_C(0x000000000000F775))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT (12U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT (4U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN (0x00000010U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT (0U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN  (0x00000001U)
+
+
+/*
+    Register RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS
+*/
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS           (0x1438U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL  (IMG_UINT64_C(0x0007FFFFFFFFFFF0))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT (50U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN    (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT (44U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT (40U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT (4U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+    Register RGX_CR_MCU_FENCE
+*/
+#define RGX_CR_MCU_FENCE                                  (0x1740U)
+#define RGX_CR_MCU_FENCE_MASKFULL                         (IMG_UINT64_C(0x000007FFFFFFFFE0))
+#define RGX_CR_MCU_FENCE_DM_SHIFT                         (40U)
+#define RGX_CR_MCU_FENCE_DM_CLRMSK                        (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_MCU_FENCE_DM_VERTEX                        (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_MCU_FENCE_DM_PIXEL                         (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_MCU_FENCE_DM_COMPUTE                       (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_MCU_FENCE_DM_RAY_VERTEX                    (IMG_UINT64_C(0x0000030000000000))
+#define RGX_CR_MCU_FENCE_DM_RAY                           (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_MCU_FENCE_DM_FASTRENDER                    (IMG_UINT64_C(0x0000050000000000))
+#define RGX_CR_MCU_FENCE_ADDR_SHIFT                       (5U)
+#define RGX_CR_MCU_FENCE_ADDR_CLRMSK                      (IMG_UINT64_C(0xFFFFFF000000001F))
+#define RGX_CR_MCU_FENCE_ADDR_ALIGNSHIFT                  (5U)
+#define RGX_CR_MCU_FENCE_ADDR_ALIGNSIZE                   (32U)
+
+
+/*
+    Register RGX_CR_SPFILTER_SIGNAL_DESCR
+*/
+#define RGX_CR_SPFILTER_SIGNAL_DESCR                      (0x2700U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MASKFULL             (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_SHIFT           (0U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_CLRMSK          (0xFFFF0000U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSHIFT      (4U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSIZE       (16U)
+
+
+/*
+    Register RGX_CR_SPFILTER_SIGNAL_DESCR_MIN
+*/
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN                  (0x2708U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_MASKFULL         (IMG_UINT64_C(0x000000FFFFFFFFF0))
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_SHIFT       (4U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_CLRMSK      (IMG_UINT64_C(0xFFFFFF000000000F))
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSHIFT  (4U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSIZE   (16U)
+
+
+/*
+    Register RGX_CR_SLC_CTRL_MISC
+*/
+#define RGX_CR_SLC_CTRL_MISC                              (0x3800U)
+#define RGX_CR_SLC_CTRL_MISC_MASKFULL                     (IMG_UINT64_C(0xFFFFFFFF00FF010F))
+#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_SHIFT          (32U)
+#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_CLRMSK         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SHIFT       (16U)
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFF00FFFF))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_64_BYTE (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_128_BYTE (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH1 (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH2 (IMG_UINT64_C(0x0000000000110000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1   (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH2_SCRAMBLE (IMG_UINT64_C(0x0000000000210000))
+#define RGX_CR_SLC_CTRL_MISC_PAUSE_SHIFT                  (8U)
+#define RGX_CR_SLC_CTRL_MISC_PAUSE_CLRMSK                 (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_CR_SLC_CTRL_MISC_PAUSE_EN                     (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_SHIFT          (3U)
+#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_EN             (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_SHIFT  (2U)
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_EN     (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_SHIFT (1U)
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN   (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_SHIFT  (0U)
+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN     (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_SLC_CTRL_FLUSH_INVAL
+*/
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL                       (0x3818U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_MASKFULL              (IMG_UINT64_C(0x0000000080000FFF))
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_SHIFT            (31U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_CLRMSK           (0x7FFFFFFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_EN               (0x80000000U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_SHIFT   (11U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_CLRMSK  (0xFFFFF7FFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_EN      (0x00000800U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_SHIFT   (10U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_CLRMSK  (0xFFFFFBFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_EN      (0x00000400U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_SHIFT          (9U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_CLRMSK         (0xFFFFFDFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_EN             (0x00000200U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_SHIFT          (8U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_CLRMSK         (0xFFFFFEFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_EN             (0x00000100U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_SHIFT          (7U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_CLRMSK         (0xFFFFFF7FU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_EN             (0x00000080U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_SHIFT          (6U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_CLRMSK         (0xFFFFFFBFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_EN             (0x00000040U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_SHIFT    (5U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_CLRMSK   (0xFFFFFFDFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_EN       (0x00000020U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_SHIFT          (4U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_CLRMSK         (0xFFFFFFEFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_EN             (0x00000010U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_SHIFT      (3U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_CLRMSK     (0xFFFFFFF7U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_EN         (0x00000008U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_SHIFT        (2U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_CLRMSK       (0xFFFFFFFBU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_EN           (0x00000004U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_SHIFT           (1U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_CLRMSK          (0xFFFFFFFDU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_EN              (0x00000002U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_SHIFT             (0U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_CLRMSK            (0xFFFFFFFEU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN                (0x00000001U)
+
+
+/*
+    Register RGX_CR_SLC_STATUS0
+*/
+#define RGX_CR_SLC_STATUS0                                (0x3820U)
+#define RGX_CR_SLC_STATUS0_MASKFULL                       (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_SHIFT      (2U)
+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_CLRMSK     (0xFFFFFFFBU)
+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_EN         (0x00000004U)
+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_SHIFT            (1U)
+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_CLRMSK           (0xFFFFFFFDU)
+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_EN               (0x00000002U)
+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_SHIFT            (0U)
+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_CLRMSK           (0xFFFFFFFEU)
+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_EN               (0x00000001U)
+
+
+/*
+    Register RGX_CR_SLC_CTRL_BYPASS
+*/
+#define RGX_CR_SLC_CTRL_BYPASS                            (0x3828U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__MASKFULL          (IMG_UINT64_C(0x001FFFFFFFFF7FFF))
+#define RGX_CR_SLC_CTRL_BYPASS_MASKFULL                   (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_SHIFT     (52U)
+#define RGX_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_CLRMSK    (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_EN        (IMG_UINT64_C(0x0010000000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_SHIFT       (51U)
+#define RGX_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_CLRMSK      (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_EN          (IMG_UINT64_C(0x0008000000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBC_SHIFT              (50U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBC_CLRMSK             (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBC_EN                 (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_SHIFT         (49U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_CLRMSK        (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_EN            (IMG_UINT64_C(0x0002000000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_SHIFT         (48U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_CLRMSK        (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_EN            (IMG_UINT64_C(0x0001000000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_SHIFT         (47U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_CLRMSK        (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_EN            (IMG_UINT64_C(0x0000800000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_SHIFT         (46U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_CLRMSK        (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_EN            (IMG_UINT64_C(0x0000400000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_SHIFT              (45U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_CLRMSK             (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_EN                 (IMG_UINT64_C(0x0000200000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PBE_SHIFT              (44U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PBE_CLRMSK             (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PBE_EN                 (IMG_UINT64_C(0x0000100000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_ISP_SHIFT              (43U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_ISP_CLRMSK             (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_ISP_EN                 (IMG_UINT64_C(0x0000080000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PM_SHIFT               (42U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PM_CLRMSK              (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PM_EN                  (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TDM_SHIFT              (41U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TDM_CLRMSK             (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TDM_EN                 (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_CDM_SHIFT              (40U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_CDM_CLRMSK             (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_CDM_EN                 (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_SHIFT   (39U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_CLRMSK  (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_EN      (IMG_UINT64_C(0x0000008000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_SHIFT          (38U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_CLRMSK         (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_EN             (IMG_UINT64_C(0x0000004000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_SHIFT     (37U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_CLRMSK    (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_EN        (IMG_UINT64_C(0x0000002000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_VDM_SHIFT              (36U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_VDM_CLRMSK             (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_VDM_EN                 (IMG_UINT64_C(0x0000001000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_SHIFT    (35U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_CLRMSK   (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_EN       (IMG_UINT64_C(0x0000000800000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_SHIFT    (34U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_CLRMSK   (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_EN       (IMG_UINT64_C(0x0000000400000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_SHIFT           (33U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_CLRMSK          (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_EN              (IMG_UINT64_C(0x0000000200000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_SHIFT           (32U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_CLRMSK          (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_EN              (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_SHIFT        (31U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_CLRMSK       (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_EN           (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_SHIFT          (30U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_EN             (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_TE_SHIFT             (29U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_TE_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_TE_EN                (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_VCE_SHIFT            (28U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_VCE_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_VCE_EN               (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_SHIFT        (27U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_CLRMSK       (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_EN           (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_SHIFT               (26U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_EN                  (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_SHIFT          (25U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN             (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_SHIFT              (24U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_EN                 (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_SHIFT             (23U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_EN                (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_SHIFT              (22U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_EN                 (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_SHIFT             (21U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_EN                (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_SHIFT               (20U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_EN                  (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_SHIFT              (19U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_EN                 (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_SHIFT              (18U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_EN                 (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_SHIFT              (17U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_EN                 (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_SHIFT           (16U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_EN              (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_SHIFT          (15U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN             (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_SHIFT              (14U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_EN                 (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_SHIFT             (13U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_EN                (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_SHIFT             (12U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_EN                (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_SHIFT           (11U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_EN              (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_SHIFT           (10U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_EN              (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_SHIFT           (9U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_EN              (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_SHIFT               (8U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_EN                  (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_SHIFT               (7U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_EN                  (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_SHIFT               (6U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_EN                  (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_SHIFT         (5U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_EN            (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_SHIFT               (4U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_EN                  (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_SHIFT           (3U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_EN              (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_SHIFT             (2U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_EN                (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_SHIFT                (1U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_EN                   (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_SLC_CTRL_BYPASS_ALL_SHIFT                  (0U)
+#define RGX_CR_SLC_CTRL_BYPASS_ALL_CLRMSK                 (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_SLC_CTRL_BYPASS_ALL_EN                     (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_SLC_STATUS1
+*/
+#define RGX_CR_SLC_STATUS1                                (0x3870U)
+#define RGX_CR_SLC_STATUS1_MASKFULL                       (IMG_UINT64_C(0x800003FF03FFFFFF))
+#define RGX_CR_SLC_STATUS1_PAUSED_SHIFT                   (63U)
+#define RGX_CR_SLC_STATUS1_PAUSED_CLRMSK                  (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF))
+#define RGX_CR_SLC_STATUS1_PAUSED_EN                      (IMG_UINT64_C(0x8000000000000000))
+#define RGX_CR_SLC_STATUS1_READS1_SHIFT                   (32U)
+#define RGX_CR_SLC_STATUS1_READS1_CLRMSK                  (IMG_UINT64_C(0xFFFFFC00FFFFFFFF))
+#define RGX_CR_SLC_STATUS1_READS0_SHIFT                   (16U)
+#define RGX_CR_SLC_STATUS1_READS0_CLRMSK                  (IMG_UINT64_C(0xFFFFFFFFFC00FFFF))
+#define RGX_CR_SLC_STATUS1_READS1_EXT_SHIFT               (8U)
+#define RGX_CR_SLC_STATUS1_READS1_EXT_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFF00FF))
+#define RGX_CR_SLC_STATUS1_READS0_EXT_SHIFT               (0U)
+#define RGX_CR_SLC_STATUS1_READS0_EXT_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+    Register RGX_CR_SLC_IDLE
+*/
+#define RGX_CR_SLC_IDLE                                   (0x3898U)
+#define RGX_CR_SLC_IDLE__XE_MEM__MASKFULL                 (IMG_UINT64_C(0x00000000000003FF))
+#define RGX_CR_SLC_IDLE_MASKFULL                          (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_SLC_IDLE_MH_SYSARB1_SHIFT                  (9U)
+#define RGX_CR_SLC_IDLE_MH_SYSARB1_CLRMSK                 (0xFFFFFDFFU)
+#define RGX_CR_SLC_IDLE_MH_SYSARB1_EN                     (0x00000200U)
+#define RGX_CR_SLC_IDLE_MH_SYSARB0_SHIFT                  (8U)
+#define RGX_CR_SLC_IDLE_MH_SYSARB0_CLRMSK                 (0xFFFFFEFFU)
+#define RGX_CR_SLC_IDLE_MH_SYSARB0_EN                     (0x00000100U)
+#define RGX_CR_SLC_IDLE_IMGBV4_SHIFT                      (7U)
+#define RGX_CR_SLC_IDLE_IMGBV4_CLRMSK                     (0xFFFFFF7FU)
+#define RGX_CR_SLC_IDLE_IMGBV4_EN                         (0x00000080U)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_SHIFT                 (6U)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_CLRMSK                (0xFFFFFFBFU)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_EN                    (0x00000040U)
+#define RGX_CR_SLC_IDLE_RBOFIFO_SHIFT                     (5U)
+#define RGX_CR_SLC_IDLE_RBOFIFO_CLRMSK                    (0xFFFFFFDFU)
+#define RGX_CR_SLC_IDLE_RBOFIFO_EN                        (0x00000020U)
+#define RGX_CR_SLC_IDLE_FRC_CONV_SHIFT                    (4U)
+#define RGX_CR_SLC_IDLE_FRC_CONV_CLRMSK                   (0xFFFFFFEFU)
+#define RGX_CR_SLC_IDLE_FRC_CONV_EN                       (0x00000010U)
+#define RGX_CR_SLC_IDLE_VXE_CONV_SHIFT                    (3U)
+#define RGX_CR_SLC_IDLE_VXE_CONV_CLRMSK                   (0xFFFFFFF7U)
+#define RGX_CR_SLC_IDLE_VXE_CONV_EN                       (0x00000008U)
+#define RGX_CR_SLC_IDLE_VXD_CONV_SHIFT                    (2U)
+#define RGX_CR_SLC_IDLE_VXD_CONV_CLRMSK                   (0xFFFFFFFBU)
+#define RGX_CR_SLC_IDLE_VXD_CONV_EN                       (0x00000004U)
+#define RGX_CR_SLC_IDLE_BIF1_CONV_SHIFT                   (1U)
+#define RGX_CR_SLC_IDLE_BIF1_CONV_CLRMSK                  (0xFFFFFFFDU)
+#define RGX_CR_SLC_IDLE_BIF1_CONV_EN                      (0x00000002U)
+#define RGX_CR_SLC_IDLE_CBAR_SHIFT                        (0U)
+#define RGX_CR_SLC_IDLE_CBAR_CLRMSK                       (0xFFFFFFFEU)
+#define RGX_CR_SLC_IDLE_CBAR_EN                           (0x00000001U)
+
+
+/*
+    Register RGX_CR_SLC_STATUS2
+*/
+#define RGX_CR_SLC_STATUS2                                (0x3908U)
+#define RGX_CR_SLC_STATUS2_MASKFULL                       (IMG_UINT64_C(0x000003FF03FFFFFF))
+#define RGX_CR_SLC_STATUS2_READS3_SHIFT                   (32U)
+#define RGX_CR_SLC_STATUS2_READS3_CLRMSK                  (IMG_UINT64_C(0xFFFFFC00FFFFFFFF))
+#define RGX_CR_SLC_STATUS2_READS2_SHIFT                   (16U)
+#define RGX_CR_SLC_STATUS2_READS2_CLRMSK                  (IMG_UINT64_C(0xFFFFFFFFFC00FFFF))
+#define RGX_CR_SLC_STATUS2_READS3_EXT_SHIFT               (8U)
+#define RGX_CR_SLC_STATUS2_READS3_EXT_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFF00FF))
+#define RGX_CR_SLC_STATUS2_READS2_EXT_SHIFT               (0U)
+#define RGX_CR_SLC_STATUS2_READS2_EXT_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+    Register RGX_CR_SLC_CTRL_MISC2
+*/
+#define RGX_CR_SLC_CTRL_MISC2                             (0x3930U)
+#define RGX_CR_SLC_CTRL_MISC2_MASKFULL                    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_SHIFT         (0U)
+#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_CLRMSK        (0x00000000U)
+
+
+/*
+    Register RGX_CR_SLC_CROSSBAR_LOAD_BALANCE
+*/
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE                  (0x3938U)
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_MASKFULL         (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_SHIFT     (0U)
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_CLRMSK    (0xFFFFFFFEU)
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_EN        (0x00000001U)
+
+
+/*
+    Register RGX_CR_USC_UVS0_CHECKSUM
+*/
+#define RGX_CR_USC_UVS0_CHECKSUM                          (0x5000U)
+#define RGX_CR_USC_UVS0_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_USC_UVS1_CHECKSUM
+*/
+#define RGX_CR_USC_UVS1_CHECKSUM                          (0x5008U)
+#define RGX_CR_USC_UVS1_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_USC_UVS2_CHECKSUM
+*/
+#define RGX_CR_USC_UVS2_CHECKSUM                          (0x5010U)
+#define RGX_CR_USC_UVS2_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_USC_UVS3_CHECKSUM
+*/
+#define RGX_CR_USC_UVS3_CHECKSUM                          (0x5018U)
+#define RGX_CR_USC_UVS3_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_PPP_SIGNATURE
+*/
+#define RGX_CR_PPP_SIGNATURE                              (0x5020U)
+#define RGX_CR_PPP_SIGNATURE_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PPP_SIGNATURE_VALUE_SHIFT                  (0U)
+#define RGX_CR_PPP_SIGNATURE_VALUE_CLRMSK                 (0x00000000U)
+
+
+/*
+    Register RGX_CR_TE_SIGNATURE
+*/
+#define RGX_CR_TE_SIGNATURE                               (0x5028U)
+#define RGX_CR_TE_SIGNATURE_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TE_SIGNATURE_VALUE_SHIFT                   (0U)
+#define RGX_CR_TE_SIGNATURE_VALUE_CLRMSK                  (0x00000000U)
+
+
+/*
+    Register RGX_CR_TE_CHECKSUM
+*/
+#define RGX_CR_TE_CHECKSUM                                (0x5110U)
+#define RGX_CR_TE_CHECKSUM_MASKFULL                       (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TE_CHECKSUM_VALUE_SHIFT                    (0U)
+#define RGX_CR_TE_CHECKSUM_VALUE_CLRMSK                   (0x00000000U)
+
+
+/*
+    Register RGX_CR_USC_UVB_CHECKSUM
+*/
+#define RGX_CR_USC_UVB_CHECKSUM                           (0x5118U)
+#define RGX_CR_USC_UVB_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVB_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_USC_UVB_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_VCE_CHECKSUM
+*/
+#define RGX_CR_VCE_CHECKSUM                               (0x5030U)
+#define RGX_CR_VCE_CHECKSUM_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VCE_CHECKSUM_VALUE_SHIFT                   (0U)
+#define RGX_CR_VCE_CHECKSUM_VALUE_CLRMSK                  (0x00000000U)
+
+
+/*
+    Register RGX_CR_ISP_PDS_CHECKSUM
+*/
+#define RGX_CR_ISP_PDS_CHECKSUM                           (0x5038U)
+#define RGX_CR_ISP_PDS_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_ISP_TPF_CHECKSUM
+*/
+#define RGX_CR_ISP_TPF_CHECKSUM                           (0x5040U)
+#define RGX_CR_ISP_TPF_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_TFPU_PLANE0_CHECKSUM
+*/
+#define RGX_CR_TFPU_PLANE0_CHECKSUM                       (0x5048U)
+#define RGX_CR_TFPU_PLANE0_CHECKSUM_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_SHIFT           (0U)
+#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_CLRMSK          (0x00000000U)
+
+
+/*
+    Register RGX_CR_TFPU_PLANE1_CHECKSUM
+*/
+#define RGX_CR_TFPU_PLANE1_CHECKSUM                       (0x5050U)
+#define RGX_CR_TFPU_PLANE1_CHECKSUM_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_SHIFT           (0U)
+#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_CLRMSK          (0x00000000U)
+
+
+/*
+    Register RGX_CR_PBE_CHECKSUM
+*/
+#define RGX_CR_PBE_CHECKSUM                               (0x5058U)
+#define RGX_CR_PBE_CHECKSUM_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PBE_CHECKSUM_VALUE_SHIFT                   (0U)
+#define RGX_CR_PBE_CHECKSUM_VALUE_CLRMSK                  (0x00000000U)
+
+
+/*
+    Register RGX_CR_PDS_DOUTM_STM_SIGNATURE
+*/
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE                    (0x5060U)
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_SHIFT        (0U)
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_CLRMSK       (0x00000000U)
+
+
+/*
+    Register RGX_CR_IFPU_ISP_CHECKSUM
+*/
+#define RGX_CR_IFPU_ISP_CHECKSUM                          (0x5068U)
+#define RGX_CR_IFPU_ISP_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_USC_UVS4_CHECKSUM
+*/
+#define RGX_CR_USC_UVS4_CHECKSUM                          (0x5100U)
+#define RGX_CR_USC_UVS4_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_USC_UVS5_CHECKSUM
+*/
+#define RGX_CR_USC_UVS5_CHECKSUM                          (0x5108U)
+#define RGX_CR_USC_UVS5_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_PPP_CLIP_CHECKSUM
+*/
+#define RGX_CR_PPP_CLIP_CHECKSUM                          (0x5120U)
+#define RGX_CR_PPP_CLIP_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_TA_PHASE
+*/
+#define RGX_CR_PERF_TA_PHASE                              (0x6008U)
+#define RGX_CR_PERF_TA_PHASE_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_TA_PHASE_COUNT_SHIFT                  (0U)
+#define RGX_CR_PERF_TA_PHASE_COUNT_CLRMSK                 (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_3D_PHASE
+*/
+#define RGX_CR_PERF_3D_PHASE                              (0x6010U)
+#define RGX_CR_PERF_3D_PHASE_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_3D_PHASE_COUNT_SHIFT                  (0U)
+#define RGX_CR_PERF_3D_PHASE_COUNT_CLRMSK                 (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_COMPUTE_PHASE
+*/
+#define RGX_CR_PERF_COMPUTE_PHASE                         (0x6018U)
+#define RGX_CR_PERF_COMPUTE_PHASE_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_SHIFT             (0U)
+#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_CLRMSK            (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_TA_CYCLE
+*/
+#define RGX_CR_PERF_TA_CYCLE                              (0x6020U)
+#define RGX_CR_PERF_TA_CYCLE_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_TA_CYCLE_COUNT_SHIFT                  (0U)
+#define RGX_CR_PERF_TA_CYCLE_COUNT_CLRMSK                 (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_3D_CYCLE
+*/
+#define RGX_CR_PERF_3D_CYCLE                              (0x6028U)
+#define RGX_CR_PERF_3D_CYCLE_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_3D_CYCLE_COUNT_SHIFT                  (0U)
+#define RGX_CR_PERF_3D_CYCLE_COUNT_CLRMSK                 (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_COMPUTE_CYCLE
+*/
+#define RGX_CR_PERF_COMPUTE_CYCLE                         (0x6030U)
+#define RGX_CR_PERF_COMPUTE_CYCLE_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_SHIFT             (0U)
+#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_CLRMSK            (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_TA_OR_3D_CYCLE
+*/
+#define RGX_CR_PERF_TA_OR_3D_CYCLE                        (0x6038U)
+#define RGX_CR_PERF_TA_OR_3D_CYCLE_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_SHIFT            (0U)
+#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_CLRMSK           (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_INITIAL_TA_CYCLE
+*/
+#define RGX_CR_PERF_INITIAL_TA_CYCLE                      (0x6040U)
+#define RGX_CR_PERF_INITIAL_TA_CYCLE_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_CLRMSK         (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC0_READ_STALL
+*/
+#define RGX_CR_PERF_SLC0_READ_STALL                       (0x60B8U)
+#define RGX_CR_PERF_SLC0_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT           (0U)
+#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK          (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC0_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC0_WRITE_STALL                      (0x60C0U)
+#define RGX_CR_PERF_SLC0_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK         (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC1_READ_STALL
+*/
+#define RGX_CR_PERF_SLC1_READ_STALL                       (0x60E0U)
+#define RGX_CR_PERF_SLC1_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT           (0U)
+#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK          (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC1_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC1_WRITE_STALL                      (0x60E8U)
+#define RGX_CR_PERF_SLC1_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK         (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC2_READ_STALL
+*/
+#define RGX_CR_PERF_SLC2_READ_STALL                       (0x6158U)
+#define RGX_CR_PERF_SLC2_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT           (0U)
+#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK          (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC2_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC2_WRITE_STALL                      (0x6160U)
+#define RGX_CR_PERF_SLC2_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK         (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC3_READ_STALL
+*/
+#define RGX_CR_PERF_SLC3_READ_STALL                       (0x6180U)
+#define RGX_CR_PERF_SLC3_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT           (0U)
+#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK          (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC3_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC3_WRITE_STALL                      (0x6188U)
+#define RGX_CR_PERF_SLC3_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK         (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_3D_SPINUP
+*/
+#define RGX_CR_PERF_3D_SPINUP                             (0x6220U)
+#define RGX_CR_PERF_3D_SPINUP_MASKFULL                    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_3D_SPINUP_CYCLES_SHIFT                (0U)
+#define RGX_CR_PERF_3D_SPINUP_CYCLES_CLRMSK               (0x00000000U)
+
+
+/*
+    Register RGX_CR_AXI_ACE_LITE_CONFIGURATION
+*/
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION                 (0x38C0U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_MASKFULL        (IMG_UINT64_C(0x00003FFFFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_SHIFT (45U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_EN (IMG_UINT64_C(0x0000200000000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT (37U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_CLRMSK (IMG_UINT64_C(0xFFFFE01FFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT (36U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_EN (IMG_UINT64_C(0x0000001000000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_SHIFT (35U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_EN (IMG_UINT64_C(0x0000000800000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_SHIFT (34U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_EN (IMG_UINT64_C(0x0000000400000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT (30U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFC3FFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT (26U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC3FFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT (22U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC3FFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_SHIFT (20U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_SHIFT (18U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT (16U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT (14U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT (12U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT (10U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT (8U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_SHIFT (4U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF0F))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_SHIFT (0U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register RGX_CR_POWER_ESTIMATE_RESULT
+*/
+#define RGX_CR_POWER_ESTIMATE_RESULT                      (0x6328U)
+#define RGX_CR_POWER_ESTIMATE_RESULT_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_SHIFT          (0U)
+#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_CLRMSK         (0x00000000U)
+
+
+/*
+    Register RGX_CR_TA_PERF
+*/
+#define RGX_CR_TA_PERF                                    (0x7600U)
+#define RGX_CR_TA_PERF_MASKFULL                           (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_TA_PERF_CLR_3_SHIFT                        (4U)
+#define RGX_CR_TA_PERF_CLR_3_CLRMSK                       (0xFFFFFFEFU)
+#define RGX_CR_TA_PERF_CLR_3_EN                           (0x00000010U)
+#define RGX_CR_TA_PERF_CLR_2_SHIFT                        (3U)
+#define RGX_CR_TA_PERF_CLR_2_CLRMSK                       (0xFFFFFFF7U)
+#define RGX_CR_TA_PERF_CLR_2_EN                           (0x00000008U)
+#define RGX_CR_TA_PERF_CLR_1_SHIFT                        (2U)
+#define RGX_CR_TA_PERF_CLR_1_CLRMSK                       (0xFFFFFFFBU)
+#define RGX_CR_TA_PERF_CLR_1_EN                           (0x00000004U)
+#define RGX_CR_TA_PERF_CLR_0_SHIFT                        (1U)
+#define RGX_CR_TA_PERF_CLR_0_CLRMSK                       (0xFFFFFFFDU)
+#define RGX_CR_TA_PERF_CLR_0_EN                           (0x00000002U)
+#define RGX_CR_TA_PERF_CTRL_ENABLE_SHIFT                  (0U)
+#define RGX_CR_TA_PERF_CTRL_ENABLE_CLRMSK                 (0xFFFFFFFEU)
+#define RGX_CR_TA_PERF_CTRL_ENABLE_EN                     (0x00000001U)
+
+
+/*
+    Register RGX_CR_TA_PERF_SELECT0
+*/
+#define RGX_CR_TA_PERF_SELECT0                            (0x7608U)
+#define RGX_CR_TA_PERF_SELECT0_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_SHIFT            (48U)
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_SHIFT            (32U)
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT0_MODE_SHIFT                 (21U)
+#define RGX_CR_TA_PERF_SELECT0_MODE_CLRMSK                (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT0_MODE_EN                    (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_SHIFT         (16U)
+#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_SHIFT           (0U)
+#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TA_PERF_SELECT1
+*/
+#define RGX_CR_TA_PERF_SELECT1                            (0x7610U)
+#define RGX_CR_TA_PERF_SELECT1_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_SHIFT            (48U)
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_SHIFT            (32U)
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT1_MODE_SHIFT                 (21U)
+#define RGX_CR_TA_PERF_SELECT1_MODE_CLRMSK                (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT1_MODE_EN                    (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_SHIFT         (16U)
+#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_SHIFT           (0U)
+#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TA_PERF_SELECT2
+*/
+#define RGX_CR_TA_PERF_SELECT2                            (0x7618U)
+#define RGX_CR_TA_PERF_SELECT2_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_SHIFT            (48U)
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_SHIFT            (32U)
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT2_MODE_SHIFT                 (21U)
+#define RGX_CR_TA_PERF_SELECT2_MODE_CLRMSK                (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT2_MODE_EN                    (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_SHIFT         (16U)
+#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_SHIFT           (0U)
+#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TA_PERF_SELECT3
+*/
+#define RGX_CR_TA_PERF_SELECT3                            (0x7620U)
+#define RGX_CR_TA_PERF_SELECT3_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_SHIFT            (48U)
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_SHIFT            (32U)
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT3_MODE_SHIFT                 (21U)
+#define RGX_CR_TA_PERF_SELECT3_MODE_CLRMSK                (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT3_MODE_EN                    (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_SHIFT         (16U)
+#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_SHIFT           (0U)
+#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TA_PERF_SELECTED_BITS
+*/
+#define RGX_CR_TA_PERF_SELECTED_BITS                      (0x7648U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_MASKFULL             (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_SHIFT           (48U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_CLRMSK          (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_SHIFT           (32U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_CLRMSK          (IMG_UINT64_C(0xFFFF0000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_SHIFT           (16U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_CLRMSK          (IMG_UINT64_C(0xFFFFFFFF0000FFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_SHIFT           (0U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TA_PERF_COUNTER_0
+*/
+#define RGX_CR_TA_PERF_COUNTER_0                          (0x7650U)
+#define RGX_CR_TA_PERF_COUNTER_0_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_0_REG_SHIFT                (0U)
+#define RGX_CR_TA_PERF_COUNTER_0_REG_CLRMSK               (0x00000000U)
+
+
+/*
+    Register RGX_CR_TA_PERF_COUNTER_1
+*/
+#define RGX_CR_TA_PERF_COUNTER_1                          (0x7658U)
+#define RGX_CR_TA_PERF_COUNTER_1_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_1_REG_SHIFT                (0U)
+#define RGX_CR_TA_PERF_COUNTER_1_REG_CLRMSK               (0x00000000U)
+
+
+/*
+    Register RGX_CR_TA_PERF_COUNTER_2
+*/
+#define RGX_CR_TA_PERF_COUNTER_2                          (0x7660U)
+#define RGX_CR_TA_PERF_COUNTER_2_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_2_REG_SHIFT                (0U)
+#define RGX_CR_TA_PERF_COUNTER_2_REG_CLRMSK               (0x00000000U)
+
+
+/*
+    Register RGX_CR_TA_PERF_COUNTER_3
+*/
+#define RGX_CR_TA_PERF_COUNTER_3                          (0x7668U)
+#define RGX_CR_TA_PERF_COUNTER_3_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_3_REG_SHIFT                (0U)
+#define RGX_CR_TA_PERF_COUNTER_3_REG_CLRMSK               (0x00000000U)
+
+
+/*
+    Register RGX_CR_RASTERISATION_PERF
+*/
+#define RGX_CR_RASTERISATION_PERF                         (0x7700U)
+#define RGX_CR_RASTERISATION_PERF_MASKFULL                (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_RASTERISATION_PERF_CLR_3_SHIFT             (4U)
+#define RGX_CR_RASTERISATION_PERF_CLR_3_CLRMSK            (0xFFFFFFEFU)
+#define RGX_CR_RASTERISATION_PERF_CLR_3_EN                (0x00000010U)
+#define RGX_CR_RASTERISATION_PERF_CLR_2_SHIFT             (3U)
+#define RGX_CR_RASTERISATION_PERF_CLR_2_CLRMSK            (0xFFFFFFF7U)
+#define RGX_CR_RASTERISATION_PERF_CLR_2_EN                (0x00000008U)
+#define RGX_CR_RASTERISATION_PERF_CLR_1_SHIFT             (2U)
+#define RGX_CR_RASTERISATION_PERF_CLR_1_CLRMSK            (0xFFFFFFFBU)
+#define RGX_CR_RASTERISATION_PERF_CLR_1_EN                (0x00000004U)
+#define RGX_CR_RASTERISATION_PERF_CLR_0_SHIFT             (1U)
+#define RGX_CR_RASTERISATION_PERF_CLR_0_CLRMSK            (0xFFFFFFFDU)
+#define RGX_CR_RASTERISATION_PERF_CLR_0_EN                (0x00000002U)
+#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_SHIFT       (0U)
+#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_CLRMSK      (0xFFFFFFFEU)
+#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_EN          (0x00000001U)
+
+
+/*
+    Register RGX_CR_RASTERISATION_PERF_SELECT0
+*/
+#define RGX_CR_RASTERISATION_PERF_SELECT0                 (0x7708U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MASKFULL        (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_SHIFT      (21U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_EN         (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_RASTERISATION_PERF_COUNTER_0
+*/
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0               (0x7750U)
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0_MASKFULL      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_SHIFT     (0U)
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_CLRMSK    (0x00000000U)
+
+
+/*
+    Register RGX_CR_HUB_BIFPMCACHE_PERF
+*/
+#define RGX_CR_HUB_BIFPMCACHE_PERF                        (0x7800U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_MASKFULL               (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_SHIFT            (4U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_CLRMSK           (0xFFFFFFEFU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_EN               (0x00000010U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_SHIFT            (3U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_CLRMSK           (0xFFFFFFF7U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_EN               (0x00000008U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_SHIFT            (2U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_CLRMSK           (0xFFFFFFFBU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_EN               (0x00000004U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_SHIFT            (1U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_CLRMSK           (0xFFFFFFFDU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_EN               (0x00000002U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_SHIFT      (0U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_CLRMSK     (0xFFFFFFFEU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_EN         (0x00000001U)
+
+
+/*
+    Register RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0
+*/
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0                (0x7808U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MASKFULL       (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_SHIFT     (21U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_CLRMSK    (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_EN        (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0
+*/
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0              (0x7850U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_MASKFULL     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_SHIFT    (0U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_CLRMSK   (0x00000000U)
+
+
+/*
+    Register RGX_CR_TPU_MCU_L0_PERF
+*/
+#define RGX_CR_TPU_MCU_L0_PERF                            (0x7900U)
+#define RGX_CR_TPU_MCU_L0_PERF_MASKFULL                   (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_SHIFT                (4U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_CLRMSK               (0xFFFFFFEFU)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_EN                   (0x00000010U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_SHIFT                (3U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_CLRMSK               (0xFFFFFFF7U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_EN                   (0x00000008U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_SHIFT                (2U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_CLRMSK               (0xFFFFFFFBU)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_EN                   (0x00000004U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_SHIFT                (1U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_CLRMSK               (0xFFFFFFFDU)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_EN                   (0x00000002U)
+#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_SHIFT          (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_TPU_MCU_L0_PERF_SELECT0
+*/
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0                    (0x7908U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MASKFULL           (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_SHIFT    (48U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_CLRMSK   (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_SHIFT    (32U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_CLRMSK   (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_SHIFT         (21U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_EN            (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_SHIFT   (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_CLRMSK  (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TPU_MCU_L0_PERF_COUNTER_0
+*/
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0                  (0x7950U)
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_SHIFT        (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_CLRMSK       (0x00000000U)
+
+
+/*
+    Register RGX_CR_USC_PERF
+*/
+#define RGX_CR_USC_PERF                                   (0x8100U)
+#define RGX_CR_USC_PERF_MASKFULL                          (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_USC_PERF_CLR_3_SHIFT                       (4U)
+#define RGX_CR_USC_PERF_CLR_3_CLRMSK                      (0xFFFFFFEFU)
+#define RGX_CR_USC_PERF_CLR_3_EN                          (0x00000010U)
+#define RGX_CR_USC_PERF_CLR_2_SHIFT                       (3U)
+#define RGX_CR_USC_PERF_CLR_2_CLRMSK                      (0xFFFFFFF7U)
+#define RGX_CR_USC_PERF_CLR_2_EN                          (0x00000008U)
+#define RGX_CR_USC_PERF_CLR_1_SHIFT                       (2U)
+#define RGX_CR_USC_PERF_CLR_1_CLRMSK                      (0xFFFFFFFBU)
+#define RGX_CR_USC_PERF_CLR_1_EN                          (0x00000004U)
+#define RGX_CR_USC_PERF_CLR_0_SHIFT                       (1U)
+#define RGX_CR_USC_PERF_CLR_0_CLRMSK                      (0xFFFFFFFDU)
+#define RGX_CR_USC_PERF_CLR_0_EN                          (0x00000002U)
+#define RGX_CR_USC_PERF_CTRL_ENABLE_SHIFT                 (0U)
+#define RGX_CR_USC_PERF_CTRL_ENABLE_CLRMSK                (0xFFFFFFFEU)
+#define RGX_CR_USC_PERF_CTRL_ENABLE_EN                    (0x00000001U)
+
+
+/*
+    Register RGX_CR_USC_PERF_SELECT0
+*/
+#define RGX_CR_USC_PERF_SELECT0                           (0x8108U)
+#define RGX_CR_USC_PERF_SELECT0_MASKFULL                  (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_SHIFT           (48U)
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_CLRMSK          (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_SHIFT           (32U)
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_CLRMSK          (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_USC_PERF_SELECT0_MODE_SHIFT                (21U)
+#define RGX_CR_USC_PERF_SELECT0_MODE_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_USC_PERF_SELECT0_MODE_EN                   (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_SHIFT        (16U)
+#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_CLRMSK       (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_SHIFT          (0U)
+#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_USC_PERF_COUNTER_0
+*/
+#define RGX_CR_USC_PERF_COUNTER_0                         (0x8150U)
+#define RGX_CR_USC_PERF_COUNTER_0_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_PERF_COUNTER_0_REG_SHIFT               (0U)
+#define RGX_CR_USC_PERF_COUNTER_0_REG_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_JONES_IDLE
+*/
+#define RGX_CR_JONES_IDLE                                 (0x8328U)
+#define RGX_CR_JONES_IDLE_MASKFULL                        (IMG_UINT64_C(0x0000000000007FFF))
+#define RGX_CR_JONES_IDLE_TDM_SHIFT                       (14U)
+#define RGX_CR_JONES_IDLE_TDM_CLRMSK                      (0xFFFFBFFFU)
+#define RGX_CR_JONES_IDLE_TDM_EN                          (0x00004000U)
+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_SHIFT                (13U)
+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_CLRMSK               (0xFFFFDFFFU)
+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_EN                   (0x00002000U)
+#define RGX_CR_JONES_IDLE_FB_CDC_SHIFT                    (12U)
+#define RGX_CR_JONES_IDLE_FB_CDC_CLRMSK                   (0xFFFFEFFFU)
+#define RGX_CR_JONES_IDLE_FB_CDC_EN                       (0x00001000U)
+#define RGX_CR_JONES_IDLE_MMU_SHIFT                       (11U)
+#define RGX_CR_JONES_IDLE_MMU_CLRMSK                      (0xFFFFF7FFU)
+#define RGX_CR_JONES_IDLE_MMU_EN                          (0x00000800U)
+#define RGX_CR_JONES_IDLE_TLA_SHIFT                       (10U)
+#define RGX_CR_JONES_IDLE_TLA_CLRMSK                      (0xFFFFFBFFU)
+#define RGX_CR_JONES_IDLE_TLA_EN                          (0x00000400U)
+#define RGX_CR_JONES_IDLE_GARTEN_SHIFT                    (9U)
+#define RGX_CR_JONES_IDLE_GARTEN_CLRMSK                   (0xFFFFFDFFU)
+#define RGX_CR_JONES_IDLE_GARTEN_EN                       (0x00000200U)
+#define RGX_CR_JONES_IDLE_HOSTIF_SHIFT                    (8U)
+#define RGX_CR_JONES_IDLE_HOSTIF_CLRMSK                   (0xFFFFFEFFU)
+#define RGX_CR_JONES_IDLE_HOSTIF_EN                       (0x00000100U)
+#define RGX_CR_JONES_IDLE_SOCIF_SHIFT                     (7U)
+#define RGX_CR_JONES_IDLE_SOCIF_CLRMSK                    (0xFFFFFF7FU)
+#define RGX_CR_JONES_IDLE_SOCIF_EN                        (0x00000080U)
+#define RGX_CR_JONES_IDLE_TILING_SHIFT                    (6U)
+#define RGX_CR_JONES_IDLE_TILING_CLRMSK                   (0xFFFFFFBFU)
+#define RGX_CR_JONES_IDLE_TILING_EN                       (0x00000040U)
+#define RGX_CR_JONES_IDLE_IPP_SHIFT                       (5U)
+#define RGX_CR_JONES_IDLE_IPP_CLRMSK                      (0xFFFFFFDFU)
+#define RGX_CR_JONES_IDLE_IPP_EN                          (0x00000020U)
+#define RGX_CR_JONES_IDLE_USCS_SHIFT                      (4U)
+#define RGX_CR_JONES_IDLE_USCS_CLRMSK                     (0xFFFFFFEFU)
+#define RGX_CR_JONES_IDLE_USCS_EN                         (0x00000010U)
+#define RGX_CR_JONES_IDLE_PM_SHIFT                        (3U)
+#define RGX_CR_JONES_IDLE_PM_CLRMSK                       (0xFFFFFFF7U)
+#define RGX_CR_JONES_IDLE_PM_EN                           (0x00000008U)
+#define RGX_CR_JONES_IDLE_CDM_SHIFT                       (2U)
+#define RGX_CR_JONES_IDLE_CDM_CLRMSK                      (0xFFFFFFFBU)
+#define RGX_CR_JONES_IDLE_CDM_EN                          (0x00000004U)
+#define RGX_CR_JONES_IDLE_VDM_SHIFT                       (1U)
+#define RGX_CR_JONES_IDLE_VDM_CLRMSK                      (0xFFFFFFFDU)
+#define RGX_CR_JONES_IDLE_VDM_EN                          (0x00000002U)
+#define RGX_CR_JONES_IDLE_BIF_SHIFT                       (0U)
+#define RGX_CR_JONES_IDLE_BIF_CLRMSK                      (0xFFFFFFFEU)
+#define RGX_CR_JONES_IDLE_BIF_EN                          (0x00000001U)
+
+
+/*
+    Register RGX_CR_TORNADO_PERF
+*/
+#define RGX_CR_TORNADO_PERF                               (0x8228U)
+#define RGX_CR_TORNADO_PERF_MASKFULL                      (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_TORNADO_PERF_CLR_3_SHIFT                   (4U)
+#define RGX_CR_TORNADO_PERF_CLR_3_CLRMSK                  (0xFFFFFFEFU)
+#define RGX_CR_TORNADO_PERF_CLR_3_EN                      (0x00000010U)
+#define RGX_CR_TORNADO_PERF_CLR_2_SHIFT                   (3U)
+#define RGX_CR_TORNADO_PERF_CLR_2_CLRMSK                  (0xFFFFFFF7U)
+#define RGX_CR_TORNADO_PERF_CLR_2_EN                      (0x00000008U)
+#define RGX_CR_TORNADO_PERF_CLR_1_SHIFT                   (2U)
+#define RGX_CR_TORNADO_PERF_CLR_1_CLRMSK                  (0xFFFFFFFBU)
+#define RGX_CR_TORNADO_PERF_CLR_1_EN                      (0x00000004U)
+#define RGX_CR_TORNADO_PERF_CLR_0_SHIFT                   (1U)
+#define RGX_CR_TORNADO_PERF_CLR_0_CLRMSK                  (0xFFFFFFFDU)
+#define RGX_CR_TORNADO_PERF_CLR_0_EN                      (0x00000002U)
+#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_SHIFT             (0U)
+#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_CLRMSK            (0xFFFFFFFEU)
+#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_EN                (0x00000001U)
+
+
+/*
+    Register RGX_CR_TORNADO_PERF_SELECT0
+*/
+#define RGX_CR_TORNADO_PERF_SELECT0                       (0x8230U)
+#define RGX_CR_TORNADO_PERF_SELECT0_MASKFULL              (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_SHIFT       (48U)
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_CLRMSK      (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_SHIFT       (32U)
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_CLRMSK      (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_MODE_SHIFT            (21U)
+#define RGX_CR_TORNADO_PERF_SELECT0_MODE_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_MODE_EN               (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_SHIFT    (16U)
+#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_CLRMSK   (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_SHIFT      (0U)
+#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TORNADO_PERF_COUNTER_0
+*/
+#define RGX_CR_TORNADO_PERF_COUNTER_0                     (0x8268U)
+#define RGX_CR_TORNADO_PERF_COUNTER_0_MASKFULL            (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_SHIFT           (0U)
+#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_CLRMSK          (0x00000000U)
+
+
+/*
+    Register RGX_CR_TEXAS_PERF
+*/
+#define RGX_CR_TEXAS_PERF                                 (0x8290U)
+#define RGX_CR_TEXAS_PERF_MASKFULL                        (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_TEXAS_PERF_CLR_5_SHIFT                     (6U)
+#define RGX_CR_TEXAS_PERF_CLR_5_CLRMSK                    (0xFFFFFFBFU)
+#define RGX_CR_TEXAS_PERF_CLR_5_EN                        (0x00000040U)
+#define RGX_CR_TEXAS_PERF_CLR_4_SHIFT                     (5U)
+#define RGX_CR_TEXAS_PERF_CLR_4_CLRMSK                    (0xFFFFFFDFU)
+#define RGX_CR_TEXAS_PERF_CLR_4_EN                        (0x00000020U)
+#define RGX_CR_TEXAS_PERF_CLR_3_SHIFT                     (4U)
+#define RGX_CR_TEXAS_PERF_CLR_3_CLRMSK                    (0xFFFFFFEFU)
+#define RGX_CR_TEXAS_PERF_CLR_3_EN                        (0x00000010U)
+#define RGX_CR_TEXAS_PERF_CLR_2_SHIFT                     (3U)
+#define RGX_CR_TEXAS_PERF_CLR_2_CLRMSK                    (0xFFFFFFF7U)
+#define RGX_CR_TEXAS_PERF_CLR_2_EN                        (0x00000008U)
+#define RGX_CR_TEXAS_PERF_CLR_1_SHIFT                     (2U)
+#define RGX_CR_TEXAS_PERF_CLR_1_CLRMSK                    (0xFFFFFFFBU)
+#define RGX_CR_TEXAS_PERF_CLR_1_EN                        (0x00000004U)
+#define RGX_CR_TEXAS_PERF_CLR_0_SHIFT                     (1U)
+#define RGX_CR_TEXAS_PERF_CLR_0_CLRMSK                    (0xFFFFFFFDU)
+#define RGX_CR_TEXAS_PERF_CLR_0_EN                        (0x00000002U)
+#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_SHIFT               (0U)
+#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_CLRMSK              (0xFFFFFFFEU)
+#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_EN                  (0x00000001U)
+
+
+/*
+    Register RGX_CR_TEXAS_PERF_SELECT0
+*/
+#define RGX_CR_TEXAS_PERF_SELECT0                         (0x8298U)
+#define RGX_CR_TEXAS_PERF_SELECT0_MASKFULL                (IMG_UINT64_C(0x3FFF3FFF803FFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_SHIFT         (48U)
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_CLRMSK        (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_SHIFT         (32U)
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_CLRMSK        (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_MODE_SHIFT              (31U)
+#define RGX_CR_TEXAS_PERF_SELECT0_MODE_CLRMSK             (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_MODE_EN                 (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_SHIFT      (16U)
+#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFC0FFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_SHIFT        (0U)
+#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_CLRMSK       (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TEXAS_PERF_COUNTER_0
+*/
+#define RGX_CR_TEXAS_PERF_COUNTER_0                       (0x82D8U)
+#define RGX_CR_TEXAS_PERF_COUNTER_0_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_SHIFT             (0U)
+#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_CLRMSK            (0x00000000U)
+
+
+/*
+    Register RGX_CR_JONES_PERF
+*/
+#define RGX_CR_JONES_PERF                                 (0x8330U)
+#define RGX_CR_JONES_PERF_MASKFULL                        (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_JONES_PERF_CLR_3_SHIFT                     (4U)
+#define RGX_CR_JONES_PERF_CLR_3_CLRMSK                    (0xFFFFFFEFU)
+#define RGX_CR_JONES_PERF_CLR_3_EN                        (0x00000010U)
+#define RGX_CR_JONES_PERF_CLR_2_SHIFT                     (3U)
+#define RGX_CR_JONES_PERF_CLR_2_CLRMSK                    (0xFFFFFFF7U)
+#define RGX_CR_JONES_PERF_CLR_2_EN                        (0x00000008U)
+#define RGX_CR_JONES_PERF_CLR_1_SHIFT                     (2U)
+#define RGX_CR_JONES_PERF_CLR_1_CLRMSK                    (0xFFFFFFFBU)
+#define RGX_CR_JONES_PERF_CLR_1_EN                        (0x00000004U)
+#define RGX_CR_JONES_PERF_CLR_0_SHIFT                     (1U)
+#define RGX_CR_JONES_PERF_CLR_0_CLRMSK                    (0xFFFFFFFDU)
+#define RGX_CR_JONES_PERF_CLR_0_EN                        (0x00000002U)
+#define RGX_CR_JONES_PERF_CTRL_ENABLE_SHIFT               (0U)
+#define RGX_CR_JONES_PERF_CTRL_ENABLE_CLRMSK              (0xFFFFFFFEU)
+#define RGX_CR_JONES_PERF_CTRL_ENABLE_EN                  (0x00000001U)
+
+
+/*
+    Register RGX_CR_JONES_PERF_SELECT0
+*/
+#define RGX_CR_JONES_PERF_SELECT0                         (0x8338U)
+#define RGX_CR_JONES_PERF_SELECT0_MASKFULL                (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_SHIFT         (48U)
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_CLRMSK        (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_SHIFT         (32U)
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_CLRMSK        (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_MODE_SHIFT              (21U)
+#define RGX_CR_JONES_PERF_SELECT0_MODE_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_MODE_EN                 (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_SHIFT      (16U)
+#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_SHIFT        (0U)
+#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_CLRMSK       (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_JONES_PERF_COUNTER_0
+*/
+#define RGX_CR_JONES_PERF_COUNTER_0                       (0x8368U)
+#define RGX_CR_JONES_PERF_COUNTER_0_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_JONES_PERF_COUNTER_0_REG_SHIFT             (0U)
+#define RGX_CR_JONES_PERF_COUNTER_0_REG_CLRMSK            (0x00000000U)
+
+
+/*
+    Register RGX_CR_BLACKPEARL_PERF
+*/
+#define RGX_CR_BLACKPEARL_PERF                            (0x8400U)
+#define RGX_CR_BLACKPEARL_PERF_MASKFULL                   (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_BLACKPEARL_PERF_CLR_5_SHIFT                (6U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_5_CLRMSK               (0xFFFFFFBFU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_5_EN                   (0x00000040U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_4_SHIFT                (5U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_4_CLRMSK               (0xFFFFFFDFU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_4_EN                   (0x00000020U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_3_SHIFT                (4U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_3_CLRMSK               (0xFFFFFFEFU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_3_EN                   (0x00000010U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_2_SHIFT                (3U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_2_CLRMSK               (0xFFFFFFF7U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_2_EN                   (0x00000008U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_1_SHIFT                (2U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_1_CLRMSK               (0xFFFFFFFBU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_1_EN                   (0x00000004U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_0_SHIFT                (1U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_0_CLRMSK               (0xFFFFFFFDU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_0_EN                   (0x00000002U)
+#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_SHIFT          (0U)
+#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_BLACKPEARL_PERF_SELECT0
+*/
+#define RGX_CR_BLACKPEARL_PERF_SELECT0                    (0x8408U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MASKFULL           (IMG_UINT64_C(0x3FFF3FFF803FFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_SHIFT    (48U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_CLRMSK   (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_SHIFT    (32U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_CLRMSK   (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_SHIFT         (31U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_CLRMSK        (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_EN            (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC0FFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_SHIFT   (0U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_CLRMSK  (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_BLACKPEARL_PERF_COUNTER_0
+*/
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0                  (0x8448U)
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_SHIFT        (0U)
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_CLRMSK       (0x00000000U)
+
+
+/*
+    Register RGX_CR_PBE_PERF
+*/
+#define RGX_CR_PBE_PERF                                   (0x8478U)
+#define RGX_CR_PBE_PERF_MASKFULL                          (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_PBE_PERF_CLR_3_SHIFT                       (4U)
+#define RGX_CR_PBE_PERF_CLR_3_CLRMSK                      (0xFFFFFFEFU)
+#define RGX_CR_PBE_PERF_CLR_3_EN                          (0x00000010U)
+#define RGX_CR_PBE_PERF_CLR_2_SHIFT                       (3U)
+#define RGX_CR_PBE_PERF_CLR_2_CLRMSK                      (0xFFFFFFF7U)
+#define RGX_CR_PBE_PERF_CLR_2_EN                          (0x00000008U)
+#define RGX_CR_PBE_PERF_CLR_1_SHIFT                       (2U)
+#define RGX_CR_PBE_PERF_CLR_1_CLRMSK                      (0xFFFFFFFBU)
+#define RGX_CR_PBE_PERF_CLR_1_EN                          (0x00000004U)
+#define RGX_CR_PBE_PERF_CLR_0_SHIFT                       (1U)
+#define RGX_CR_PBE_PERF_CLR_0_CLRMSK                      (0xFFFFFFFDU)
+#define RGX_CR_PBE_PERF_CLR_0_EN                          (0x00000002U)
+#define RGX_CR_PBE_PERF_CTRL_ENABLE_SHIFT                 (0U)
+#define RGX_CR_PBE_PERF_CTRL_ENABLE_CLRMSK                (0xFFFFFFFEU)
+#define RGX_CR_PBE_PERF_CTRL_ENABLE_EN                    (0x00000001U)
+
+
+/*
+    Register RGX_CR_PBE_PERF_SELECT0
+*/
+#define RGX_CR_PBE_PERF_SELECT0                           (0x8480U)
+#define RGX_CR_PBE_PERF_SELECT0_MASKFULL                  (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_SHIFT           (48U)
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_CLRMSK          (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_SHIFT           (32U)
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_CLRMSK          (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_MODE_SHIFT                (21U)
+#define RGX_CR_PBE_PERF_SELECT0_MODE_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_MODE_EN                   (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_SHIFT        (16U)
+#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_CLRMSK       (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_SHIFT          (0U)
+#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_PBE_PERF_COUNTER_0
+*/
+#define RGX_CR_PBE_PERF_COUNTER_0                         (0x84B0U)
+#define RGX_CR_PBE_PERF_COUNTER_0_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PBE_PERF_COUNTER_0_REG_SHIFT               (0U)
+#define RGX_CR_PBE_PERF_COUNTER_0_REG_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_OCP_REVINFO
+*/
+#define RGX_CR_OCP_REVINFO                                (0x9000U)
+#define RGX_CR_OCP_REVINFO_MASKFULL                       (IMG_UINT64_C(0x00000007FFFFFFFF))
+#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_SHIFT            (33U)
+#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_CLRMSK           (IMG_UINT64_C(0xFFFFFFF9FFFFFFFF))
+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_SHIFT            (32U)
+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_CLRMSK           (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_EN               (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_OCP_REVINFO_REVISION_SHIFT                 (0U)
+#define RGX_CR_OCP_REVINFO_REVISION_CLRMSK                (IMG_UINT64_C(0xFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_OCP_SYSCONFIG
+*/
+#define RGX_CR_OCP_SYSCONFIG                              (0x9010U)
+#define RGX_CR_OCP_SYSCONFIG_MASKFULL                     (IMG_UINT64_C(0x0000000000000FFF))
+#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_SHIFT     (10U)
+#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_CLRMSK    (0xFFFFF3FFU)
+#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_SHIFT     (8U)
+#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_CLRMSK    (0xFFFFFCFFU)
+#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_SHIFT     (6U)
+#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_CLRMSK    (0xFFFFFF3FU)
+#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_SHIFT     (4U)
+#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_CLRMSK    (0xFFFFFFCFU)
+#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_SHIFT           (2U)
+#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_CLRMSK          (0xFFFFFFF3U)
+#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_SHIFT              (0U)
+#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_CLRMSK             (0xFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_RAW_0
+*/
+#define RGX_CR_OCP_IRQSTATUS_RAW_0                        (0x9020U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_EN (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_RAW_1
+*/
+#define RGX_CR_OCP_IRQSTATUS_RAW_1                        (0x9028U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_EN (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_RAW_2
+*/
+#define RGX_CR_OCP_IRQSTATUS_RAW_2                        (0x9030U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_SHIFT      (0U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_CLRMSK     (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_EN         (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_0
+*/
+#define RGX_CR_OCP_IRQSTATUS_0                            (0x9038U)
+#define RGX_CR_OCP_IRQSTATUS_0_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_EN  (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_1
+*/
+#define RGX_CR_OCP_IRQSTATUS_1                            (0x9040U)
+#define RGX_CR_OCP_IRQSTATUS_1_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_EN (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_2
+*/
+#define RGX_CR_OCP_IRQSTATUS_2                            (0x9048U)
+#define RGX_CR_OCP_IRQSTATUS_2_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_SHIFT       (0U)
+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_CLRMSK      (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN          (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_SET_0
+*/
+#define RGX_CR_OCP_IRQENABLE_SET_0                        (0x9050U)
+#define RGX_CR_OCP_IRQENABLE_SET_0_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_EN (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_SET_1
+*/
+#define RGX_CR_OCP_IRQENABLE_SET_1                        (0x9058U)
+#define RGX_CR_OCP_IRQENABLE_SET_1_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_EN (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_SET_2
+*/
+#define RGX_CR_OCP_IRQENABLE_SET_2                        (0x9060U)
+#define RGX_CR_OCP_IRQENABLE_SET_2_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_SHIFT   (0U)
+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_CLRMSK  (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_EN      (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_CLR_0
+*/
+#define RGX_CR_OCP_IRQENABLE_CLR_0                        (0x9068U)
+#define RGX_CR_OCP_IRQENABLE_CLR_0_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_EN (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_CLR_1
+*/
+#define RGX_CR_OCP_IRQENABLE_CLR_1                        (0x9070U)
+#define RGX_CR_OCP_IRQENABLE_CLR_1_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_EN (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_CLR_2
+*/
+#define RGX_CR_OCP_IRQENABLE_CLR_2                        (0x9078U)
+#define RGX_CR_OCP_IRQENABLE_CLR_2_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_SHIFT  (0U)
+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_EN     (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQ_EVENT
+*/
+#define RGX_CR_OCP_IRQ_EVENT                              (0x9080U)
+#define RGX_CR_OCP_IRQ_EVENT_MASKFULL                     (IMG_UINT64_C(0x00000000000FFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_SHIFT (19U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_SHIFT (18U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_SHIFT (17U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_SHIFT (16U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_SHIFT (15U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_SHIFT (14U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_EN  (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_SHIFT (13U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_EN   (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_SHIFT (12U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_SHIFT (11U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_SHIFT (10U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_EN  (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_SHIFT (9U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_EN   (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_SHIFT (8U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_SHIFT (7U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_SHIFT (6U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_EN  (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_SHIFT (5U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_EN   (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_SHIFT (4U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_SHIFT (3U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_SHIFT (2U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_EN  (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_SHIFT (1U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_EN   (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_SHIFT (0U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_OCP_DEBUG_CONFIG
+*/
+#define RGX_CR_OCP_DEBUG_CONFIG                           (0x9088U)
+#define RGX_CR_OCP_DEBUG_CONFIG_MASKFULL                  (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_DEBUG_CONFIG_REG_SHIFT                 (0U)
+#define RGX_CR_OCP_DEBUG_CONFIG_REG_CLRMSK                (0xFFFFFFFEU)
+#define RGX_CR_OCP_DEBUG_CONFIG_REG_EN                    (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_DEBUG_STATUS
+*/
+#define RGX_CR_OCP_DEBUG_STATUS                           (0x9090U)
+#define RGX_CR_OCP_DEBUG_STATUS_MASKFULL                  (IMG_UINT64_C(0x001F1F77FFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_SHIFT    (51U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_CLRMSK   (IMG_UINT64_C(0xFFE7FFFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_SHIFT    (50U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_CLRMSK   (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_EN       (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_SHIFT    (48U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_CLRMSK   (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_SHIFT    (43U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_CLRMSK   (IMG_UINT64_C(0xFFFFE7FFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_SHIFT    (42U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_CLRMSK   (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_EN       (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_SHIFT    (40U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_CLRMSK   (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_SHIFT        (38U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_CLRMSK       (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_EN           (IMG_UINT64_C(0x0000004000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_SHIFT (37U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_EN  (IMG_UINT64_C(0x0000002000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_SHIFT (36U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_EN    (IMG_UINT64_C(0x0000001000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_SHIFT        (34U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_CLRMSK       (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_EN           (IMG_UINT64_C(0x0000000400000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_SHIFT (33U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_EN  (IMG_UINT64_C(0x0000000200000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_SHIFT (32U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_EN    (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_SHIFT      (31U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_CLRMSK     (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_EN         (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_SHIFT         (30U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_EN            (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_SHIFT      (29U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_EN         (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_SHIFT      (27U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFE7FFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_SHIFT      (26U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_EN         (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_SHIFT      (24U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_SHIFT      (23U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_EN         (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_SHIFT         (22U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_EN            (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_SHIFT      (21U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_EN         (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_SHIFT      (19U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFE7FFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_SHIFT      (18U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_EN         (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_SHIFT      (16U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_SHIFT      (15U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_EN         (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_SHIFT         (14U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_EN            (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_SHIFT      (13U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_EN         (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_SHIFT      (11U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFFE7FF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_SHIFT      (10U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_EN         (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_SHIFT      (8U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_SHIFT      (7U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_EN         (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_SHIFT         (6U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_EN            (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_SHIFT      (5U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_EN         (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_SHIFT      (3U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFFFFE7))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_SHIFT      (2U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_EN         (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_SHIFT      (0U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_SHIFT           (6U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_CLRMSK          (0xFFFFFFBFU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_EN              (0x00000040U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_SHIFT               (5U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_CLRMSK              (0xFFFFFFDFU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_EN                  (0x00000020U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_META_SHIFT               (4U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_META_CLRMSK              (0xFFFFFFEFU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_META_EN                  (0x00000010U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_SHIFT             (3U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_CLRMSK            (0xFFFFFFF7U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_EN                (0x00000008U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_SHIFT              (2U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_CLRMSK             (0xFFFFFFFBU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_EN                 (0x00000004U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_SHIFT             (1U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_CLRMSK            (0xFFFFFFFDU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_EN                (0x00000002U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_SHIFT                (0U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_CLRMSK               (0xFFFFFFFEU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_EN                   (0x00000001U)
+
+
+#define RGX_CR_BIF_TRUST_DM_MASK                          (0x0000007FU)
+
+
+/*
+    Register RGX_CR_BIF_TRUST
+*/
+#define RGX_CR_BIF_TRUST                                  (0xA000U)
+#define RGX_CR_BIF_TRUST_MASKFULL                         (IMG_UINT64_C(0x00000000001FFFFF))
+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_SHIFT (20U)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_CLRMSK (0xFFEFFFFFU)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_EN   (0x00100000U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_SHIFT  (19U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_CLRMSK (0xFFF7FFFFU)
+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_EN     (0x00080000U)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_SHIFT       (18U)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_CLRMSK      (0xFFFBFFFFU)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_EN          (0x00040000U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_SHIFT         (17U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_CLRMSK        (0xFFFDFFFFU)
+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_EN            (0x00020000U)
+#define RGX_CR_BIF_TRUST_ENABLE_SHIFT                     (16U)
+#define RGX_CR_BIF_TRUST_ENABLE_CLRMSK                    (0xFFFEFFFFU)
+#define RGX_CR_BIF_TRUST_ENABLE_EN                        (0x00010000U)
+#define RGX_CR_BIF_TRUST_DM_TRUSTED_SHIFT                 (9U)
+#define RGX_CR_BIF_TRUST_DM_TRUSTED_CLRMSK                (0xFFFF01FFU)
+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_SHIFT   (8U)
+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_CLRMSK  (0xFFFFFEFFU)
+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_EN      (0x00000100U)
+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_SHIFT     (7U)
+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_CLRMSK    (0xFFFFFF7FU)
+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_EN        (0x00000080U)
+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_SHIFT     (6U)
+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_CLRMSK    (0xFFFFFFBFU)
+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_EN        (0x00000040U)
+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_SHIFT     (5U)
+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_CLRMSK    (0xFFFFFFDFU)
+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_EN        (0x00000020U)
+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_SHIFT       (4U)
+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_CLRMSK      (0xFFFFFFEFU)
+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_EN          (0x00000010U)
+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_SHIFT       (3U)
+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_CLRMSK      (0xFFFFFFF7U)
+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_EN          (0x00000008U)
+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_SHIFT    (2U)
+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_CLRMSK   (0xFFFFFFFBU)
+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_EN       (0x00000004U)
+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_SHIFT      (1U)
+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_CLRMSK     (0xFFFFFFFDU)
+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_EN         (0x00000002U)
+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_SHIFT      (0U)
+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_CLRMSK     (0xFFFFFFFEU)
+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_EN         (0x00000001U)
+
+
+/*
+    Register RGX_CR_SYS_BUS_SECURE
+*/
+#define RGX_CR_SYS_BUS_SECURE                             (0xA100U)
+#define RGX_CR_SYS_BUS_SECURE__SECR__MASKFULL             (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SYS_BUS_SECURE_MASKFULL                    (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_SHIFT                (0U)
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_CLRMSK               (0xFFFFFFFEU)
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_EN                   (0x00000001U)
+
+
+/*
+    Register RGX_CR_FBA_FC0_CHECKSUM
+*/
+#define RGX_CR_FBA_FC0_CHECKSUM                           (0xD170U)
+#define RGX_CR_FBA_FC0_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_FBA_FC1_CHECKSUM
+*/
+#define RGX_CR_FBA_FC1_CHECKSUM                           (0xD178U)
+#define RGX_CR_FBA_FC1_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_FBA_FC2_CHECKSUM
+*/
+#define RGX_CR_FBA_FC2_CHECKSUM                           (0xD180U)
+#define RGX_CR_FBA_FC2_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_FBA_FC3_CHECKSUM
+*/
+#define RGX_CR_FBA_FC3_CHECKSUM                           (0xD188U)
+#define RGX_CR_FBA_FC3_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_CLK_CTRL2
+*/
+#define RGX_CR_CLK_CTRL2                                  (0xD200U)
+#define RGX_CR_CLK_CTRL2_MASKFULL                         (IMG_UINT64_C(0x0000000000000F33))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_SHIFT                   (10U)
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_CLRMSK                  (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_OFF                     (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_ON                      (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_AUTO                    (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_CLK_CTRL2_VRDM_SHIFT                       (8U)
+#define RGX_CR_CLK_CTRL2_VRDM_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF))
+#define RGX_CR_CLK_CTRL2_VRDM_OFF                         (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL2_VRDM_ON                          (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_CTRL2_VRDM_AUTO                        (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_CTRL2_SH_SHIFT                         (4U)
+#define RGX_CR_CLK_CTRL2_SH_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF))
+#define RGX_CR_CLK_CTRL2_SH_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL2_SH_ON                            (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_CTRL2_SH_AUTO                          (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_CTRL2_FBA_SHIFT                        (0U)
+#define RGX_CR_CLK_CTRL2_FBA_CLRMSK                       (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+#define RGX_CR_CLK_CTRL2_FBA_OFF                          (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL2_FBA_ON                           (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_CLK_CTRL2_FBA_AUTO                         (IMG_UINT64_C(0x0000000000000002))
+
+
+/*
+    Register RGX_CR_CLK_STATUS2
+*/
+#define RGX_CR_CLK_STATUS2                                (0xD208U)
+#define RGX_CR_CLK_STATUS2_MASKFULL                       (IMG_UINT64_C(0x0000000000000015))
+#define RGX_CR_CLK_STATUS2_VRDM_SHIFT                     (4U)
+#define RGX_CR_CLK_STATUS2_VRDM_CLRMSK                    (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_STATUS2_VRDM_GATED                     (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS2_VRDM_RUNNING                   (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_STATUS2_SH_SHIFT                       (2U)
+#define RGX_CR_CLK_STATUS2_SH_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_CLK_STATUS2_SH_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS2_SH_RUNNING                     (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_STATUS2_FBA_SHIFT                      (0U)
+#define RGX_CR_CLK_STATUS2_FBA_CLRMSK                     (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_STATUS2_FBA_GATED                      (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS2_FBA_RUNNING                    (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_RPM_SHF_FPL
+*/
+#define RGX_CR_RPM_SHF_FPL                                (0xD520U)
+#define RGX_CR_RPM_SHF_FPL_MASKFULL                       (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC))
+#define RGX_CR_RPM_SHF_FPL_SIZE_SHIFT                     (40U)
+#define RGX_CR_RPM_SHF_FPL_SIZE_CLRMSK                    (IMG_UINT64_C(0xC00000FFFFFFFFFF))
+#define RGX_CR_RPM_SHF_FPL_BASE_SHIFT                     (2U)
+#define RGX_CR_RPM_SHF_FPL_BASE_CLRMSK                    (IMG_UINT64_C(0xFFFFFF0000000003))
+#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSHIFT                (2U)
+#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSIZE                 (4U)
+
+
+/*
+    Register RGX_CR_RPM_SHF_FPL_READ
+*/
+#define RGX_CR_RPM_SHF_FPL_READ                           (0xD528U)
+#define RGX_CR_RPM_SHF_FPL_READ_MASKFULL                  (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_SHIFT              (22U)
+#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_CLRMSK             (0xFFBFFFFFU)
+#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_EN                 (0x00400000U)
+#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_SHIFT              (0U)
+#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_CLRMSK             (0xFFC00000U)
+
+
+/*
+    Register RGX_CR_RPM_SHF_FPL_WRITE
+*/
+#define RGX_CR_RPM_SHF_FPL_WRITE                          (0xD530U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_MASKFULL                 (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_SHIFT             (22U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_CLRMSK            (0xFFBFFFFFU)
+#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_EN                (0x00400000U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_SHIFT             (0U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_CLRMSK            (0xFFC00000U)
+
+
+/*
+    Register RGX_CR_RPM_SHG_FPL
+*/
+#define RGX_CR_RPM_SHG_FPL                                (0xD538U)
+#define RGX_CR_RPM_SHG_FPL_MASKFULL                       (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC))
+#define RGX_CR_RPM_SHG_FPL_SIZE_SHIFT                     (40U)
+#define RGX_CR_RPM_SHG_FPL_SIZE_CLRMSK                    (IMG_UINT64_C(0xC00000FFFFFFFFFF))
+#define RGX_CR_RPM_SHG_FPL_BASE_SHIFT                     (2U)
+#define RGX_CR_RPM_SHG_FPL_BASE_CLRMSK                    (IMG_UINT64_C(0xFFFFFF0000000003))
+#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSHIFT                (2U)
+#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSIZE                 (4U)
+
+
+/*
+    Register RGX_CR_RPM_SHG_FPL_READ
+*/
+#define RGX_CR_RPM_SHG_FPL_READ                           (0xD540U)
+#define RGX_CR_RPM_SHG_FPL_READ_MASKFULL                  (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_SHIFT              (22U)
+#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_CLRMSK             (0xFFBFFFFFU)
+#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_EN                 (0x00400000U)
+#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_SHIFT              (0U)
+#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_CLRMSK             (0xFFC00000U)
+
+
+/*
+    Register RGX_CR_RPM_SHG_FPL_WRITE
+*/
+#define RGX_CR_RPM_SHG_FPL_WRITE                          (0xD548U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_MASKFULL                 (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_SHIFT             (22U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_CLRMSK            (0xFFBFFFFFU)
+#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_EN                (0x00400000U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_SHIFT             (0U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_CLRMSK            (0xFFC00000U)
+
+
+/*
+    Register RGX_CR_SH_PERF
+*/
+#define RGX_CR_SH_PERF                                    (0xD5F8U)
+#define RGX_CR_SH_PERF_MASKFULL                           (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_SH_PERF_CLR_3_SHIFT                        (4U)
+#define RGX_CR_SH_PERF_CLR_3_CLRMSK                       (0xFFFFFFEFU)
+#define RGX_CR_SH_PERF_CLR_3_EN                           (0x00000010U)
+#define RGX_CR_SH_PERF_CLR_2_SHIFT                        (3U)
+#define RGX_CR_SH_PERF_CLR_2_CLRMSK                       (0xFFFFFFF7U)
+#define RGX_CR_SH_PERF_CLR_2_EN                           (0x00000008U)
+#define RGX_CR_SH_PERF_CLR_1_SHIFT                        (2U)
+#define RGX_CR_SH_PERF_CLR_1_CLRMSK                       (0xFFFFFFFBU)
+#define RGX_CR_SH_PERF_CLR_1_EN                           (0x00000004U)
+#define RGX_CR_SH_PERF_CLR_0_SHIFT                        (1U)
+#define RGX_CR_SH_PERF_CLR_0_CLRMSK                       (0xFFFFFFFDU)
+#define RGX_CR_SH_PERF_CLR_0_EN                           (0x00000002U)
+#define RGX_CR_SH_PERF_CTRL_ENABLE_SHIFT                  (0U)
+#define RGX_CR_SH_PERF_CTRL_ENABLE_CLRMSK                 (0xFFFFFFFEU)
+#define RGX_CR_SH_PERF_CTRL_ENABLE_EN                     (0x00000001U)
+
+
+/*
+    Register RGX_CR_SH_PERF_SELECT0
+*/
+#define RGX_CR_SH_PERF_SELECT0                            (0xD600U)
+#define RGX_CR_SH_PERF_SELECT0_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_SHIFT            (48U)
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_SHIFT            (32U)
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_SH_PERF_SELECT0_MODE_SHIFT                 (21U)
+#define RGX_CR_SH_PERF_SELECT0_MODE_CLRMSK                (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_SH_PERF_SELECT0_MODE_EN                    (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_SHIFT         (16U)
+#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_SHIFT           (0U)
+#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_SH_PERF_COUNTER_0
+*/
+#define RGX_CR_SH_PERF_COUNTER_0                          (0xD628U)
+#define RGX_CR_SH_PERF_COUNTER_0_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SH_PERF_COUNTER_0_REG_SHIFT                (0U)
+#define RGX_CR_SH_PERF_COUNTER_0_REG_CLRMSK               (0x00000000U)
+
+
+/*
+    Register RGX_CR_SHF_SHG_CHECKSUM
+*/
+#define RGX_CR_SHF_SHG_CHECKSUM                           (0xD1C0U)
+#define RGX_CR_SHF_SHG_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_SHF_VERTEX_BIF_CHECKSUM
+*/
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM                    (0xD1C8U)
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_SHIFT        (0U)
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_CLRMSK       (0x00000000U)
+
+
+/*
+    Register RGX_CR_SHF_VARY_BIF_CHECKSUM
+*/
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM                      (0xD1D0U)
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_SHIFT          (0U)
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_CLRMSK         (0x00000000U)
+
+
+/*
+    Register RGX_CR_RPM_BIF_CHECKSUM
+*/
+#define RGX_CR_RPM_BIF_CHECKSUM                           (0xD1D8U)
+#define RGX_CR_RPM_BIF_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_SHG_BIF_CHECKSUM
+*/
+#define RGX_CR_SHG_BIF_CHECKSUM                           (0xD1E0U)
+#define RGX_CR_SHG_BIF_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_SHG_FE_BE_CHECKSUM
+*/
+#define RGX_CR_SHG_FE_BE_CHECKSUM                         (0xD1E8U)
+#define RGX_CR_SHG_FE_BE_CHECKSUM_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_SHIFT             (0U)
+#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_CLRMSK            (0x00000000U)
+
+
+/*
+    Register DPX_CR_BF_PERF
+*/
+#define DPX_CR_BF_PERF                                    (0xC458U)
+#define DPX_CR_BF_PERF_MASKFULL                           (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_BF_PERF_CLR_3_SHIFT                        (4U)
+#define DPX_CR_BF_PERF_CLR_3_CLRMSK                       (0xFFFFFFEFU)
+#define DPX_CR_BF_PERF_CLR_3_EN                           (0x00000010U)
+#define DPX_CR_BF_PERF_CLR_2_SHIFT                        (3U)
+#define DPX_CR_BF_PERF_CLR_2_CLRMSK                       (0xFFFFFFF7U)
+#define DPX_CR_BF_PERF_CLR_2_EN                           (0x00000008U)
+#define DPX_CR_BF_PERF_CLR_1_SHIFT                        (2U)
+#define DPX_CR_BF_PERF_CLR_1_CLRMSK                       (0xFFFFFFFBU)
+#define DPX_CR_BF_PERF_CLR_1_EN                           (0x00000004U)
+#define DPX_CR_BF_PERF_CLR_0_SHIFT                        (1U)
+#define DPX_CR_BF_PERF_CLR_0_CLRMSK                       (0xFFFFFFFDU)
+#define DPX_CR_BF_PERF_CLR_0_EN                           (0x00000002U)
+#define DPX_CR_BF_PERF_CTRL_ENABLE_SHIFT                  (0U)
+#define DPX_CR_BF_PERF_CTRL_ENABLE_CLRMSK                 (0xFFFFFFFEU)
+#define DPX_CR_BF_PERF_CTRL_ENABLE_EN                     (0x00000001U)
+
+
+/*
+    Register DPX_CR_BF_PERF_SELECT0
+*/
+#define DPX_CR_BF_PERF_SELECT0                            (0xC460U)
+#define DPX_CR_BF_PERF_SELECT0_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_SHIFT            (48U)
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_SHIFT            (32U)
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define DPX_CR_BF_PERF_SELECT0_MODE_SHIFT                 (21U)
+#define DPX_CR_BF_PERF_SELECT0_MODE_CLRMSK                (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define DPX_CR_BF_PERF_SELECT0_MODE_EN                    (IMG_UINT64_C(0x0000000000200000))
+#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_SHIFT         (16U)
+#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_SHIFT           (0U)
+#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register DPX_CR_BF_PERF_COUNTER_0
+*/
+#define DPX_CR_BF_PERF_COUNTER_0                          (0xC488U)
+#define DPX_CR_BF_PERF_COUNTER_0_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_BF_PERF_COUNTER_0_REG_SHIFT                (0U)
+#define DPX_CR_BF_PERF_COUNTER_0_REG_CLRMSK               (0x00000000U)
+
+
+/*
+    Register DPX_CR_BT_PERF
+*/
+#define DPX_CR_BT_PERF                                    (0xC3D0U)
+#define DPX_CR_BT_PERF_MASKFULL                           (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_BT_PERF_CLR_3_SHIFT                        (4U)
+#define DPX_CR_BT_PERF_CLR_3_CLRMSK                       (0xFFFFFFEFU)
+#define DPX_CR_BT_PERF_CLR_3_EN                           (0x00000010U)
+#define DPX_CR_BT_PERF_CLR_2_SHIFT                        (3U)
+#define DPX_CR_BT_PERF_CLR_2_CLRMSK                       (0xFFFFFFF7U)
+#define DPX_CR_BT_PERF_CLR_2_EN                           (0x00000008U)
+#define DPX_CR_BT_PERF_CLR_1_SHIFT                        (2U)
+#define DPX_CR_BT_PERF_CLR_1_CLRMSK                       (0xFFFFFFFBU)
+#define DPX_CR_BT_PERF_CLR_1_EN                           (0x00000004U)
+#define DPX_CR_BT_PERF_CLR_0_SHIFT                        (1U)
+#define DPX_CR_BT_PERF_CLR_0_CLRMSK                       (0xFFFFFFFDU)
+#define DPX_CR_BT_PERF_CLR_0_EN                           (0x00000002U)
+#define DPX_CR_BT_PERF_CTRL_ENABLE_SHIFT                  (0U)
+#define DPX_CR_BT_PERF_CTRL_ENABLE_CLRMSK                 (0xFFFFFFFEU)
+#define DPX_CR_BT_PERF_CTRL_ENABLE_EN                     (0x00000001U)
+
+
+/*
+    Register DPX_CR_BT_PERF_SELECT0
+*/
+#define DPX_CR_BT_PERF_SELECT0                            (0xC3D8U)
+#define DPX_CR_BT_PERF_SELECT0_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_SHIFT            (48U)
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_SHIFT            (32U)
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define DPX_CR_BT_PERF_SELECT0_MODE_SHIFT                 (21U)
+#define DPX_CR_BT_PERF_SELECT0_MODE_CLRMSK                (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define DPX_CR_BT_PERF_SELECT0_MODE_EN                    (IMG_UINT64_C(0x0000000000200000))
+#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_SHIFT         (16U)
+#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_SHIFT           (0U)
+#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register DPX_CR_BT_PERF_COUNTER_0
+*/
+#define DPX_CR_BT_PERF_COUNTER_0                          (0xC420U)
+#define DPX_CR_BT_PERF_COUNTER_0_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_BT_PERF_COUNTER_0_REG_SHIFT                (0U)
+#define DPX_CR_BT_PERF_COUNTER_0_REG_CLRMSK               (0x00000000U)
+
+
+/*
+    Register DPX_CR_RQ_USC_DEBUG
+*/
+#define DPX_CR_RQ_USC_DEBUG                               (0xC110U)
+#define DPX_CR_RQ_USC_DEBUG_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_SHIFT                (0U)
+#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_CLRMSK               (IMG_UINT64_C(0xFFFFFFFF00000000))
+
+
+/*
+    Register DPX_CR_BIF_FAULT_BANK_MMU_STATUS
+*/
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS                  (0xC5C8U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_MASKFULL         (IMG_UINT64_C(0x000000000000F775))
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_SHIFT   (12U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_CLRMSK  (0xFFFF0FFFU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_SHIFT  (8U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_SHIFT  (5U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_SHIFT   (4U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_CLRMSK  (0xFFFFFFEFU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_EN      (0x00000010U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_SHIFT      (0U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_CLRMSK     (0xFFFFFFFEU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_EN         (0x00000001U)
+
+
+/*
+    Register DPX_CR_BIF_FAULT_BANK_REQ_STATUS
+*/
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS                  (0xC5D0U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_MASKFULL         (IMG_UINT64_C(0x03FFFFFFFFFFFFF0))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_SHIFT        (57U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_CLRMSK       (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_EN           (IMG_UINT64_C(0x0200000000000000))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_SHIFT     (44U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_CLRMSK    (IMG_UINT64_C(0xFE000FFFFFFFFFFF))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_SHIFT     (40U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_CLRMSK    (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_SHIFT    (4U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_CLRMSK   (IMG_UINT64_C(0xFFFFFF000000000F))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+    Register DPX_CR_BIF_MMU_STATUS
+*/
+#define DPX_CR_BIF_MMU_STATUS                             (0xC5D8U)
+#define DPX_CR_BIF_MMU_STATUS_MASKFULL                    (IMG_UINT64_C(0x000000000FFFFFF7))
+#define DPX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT               (20U)
+#define DPX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK              (0xF00FFFFFU)
+#define DPX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT               (12U)
+#define DPX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK              (0xFFF00FFFU)
+#define DPX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT               (4U)
+#define DPX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK              (0xFFFFF00FU)
+#define DPX_CR_BIF_MMU_STATUS_STALLED_SHIFT               (2U)
+#define DPX_CR_BIF_MMU_STATUS_STALLED_CLRMSK              (0xFFFFFFFBU)
+#define DPX_CR_BIF_MMU_STATUS_STALLED_EN                  (0x00000004U)
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_SHIFT                (1U)
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK               (0xFFFFFFFDU)
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_EN                   (0x00000002U)
+#define DPX_CR_BIF_MMU_STATUS_BUSY_SHIFT                  (0U)
+#define DPX_CR_BIF_MMU_STATUS_BUSY_CLRMSK                 (0xFFFFFFFEU)
+#define DPX_CR_BIF_MMU_STATUS_BUSY_EN                     (0x00000001U)
+
+
+/*
+    Register DPX_CR_RT_PERF
+*/
+#define DPX_CR_RT_PERF                                    (0xC700U)
+#define DPX_CR_RT_PERF_MASKFULL                           (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_RT_PERF_CLR_3_SHIFT                        (4U)
+#define DPX_CR_RT_PERF_CLR_3_CLRMSK                       (0xFFFFFFEFU)
+#define DPX_CR_RT_PERF_CLR_3_EN                           (0x00000010U)
+#define DPX_CR_RT_PERF_CLR_2_SHIFT                        (3U)
+#define DPX_CR_RT_PERF_CLR_2_CLRMSK                       (0xFFFFFFF7U)
+#define DPX_CR_RT_PERF_CLR_2_EN                           (0x00000008U)
+#define DPX_CR_RT_PERF_CLR_1_SHIFT                        (2U)
+#define DPX_CR_RT_PERF_CLR_1_CLRMSK                       (0xFFFFFFFBU)
+#define DPX_CR_RT_PERF_CLR_1_EN                           (0x00000004U)
+#define DPX_CR_RT_PERF_CLR_0_SHIFT                        (1U)
+#define DPX_CR_RT_PERF_CLR_0_CLRMSK                       (0xFFFFFFFDU)
+#define DPX_CR_RT_PERF_CLR_0_EN                           (0x00000002U)
+#define DPX_CR_RT_PERF_CTRL_ENABLE_SHIFT                  (0U)
+#define DPX_CR_RT_PERF_CTRL_ENABLE_CLRMSK                 (0xFFFFFFFEU)
+#define DPX_CR_RT_PERF_CTRL_ENABLE_EN                     (0x00000001U)
+
+
+/*
+    Register DPX_CR_RT_PERF_SELECT0
+*/
+#define DPX_CR_RT_PERF_SELECT0                            (0xC708U)
+#define DPX_CR_RT_PERF_SELECT0_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_SHIFT            (48U)
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_SHIFT            (32U)
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define DPX_CR_RT_PERF_SELECT0_MODE_SHIFT                 (21U)
+#define DPX_CR_RT_PERF_SELECT0_MODE_CLRMSK                (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define DPX_CR_RT_PERF_SELECT0_MODE_EN                    (IMG_UINT64_C(0x0000000000200000))
+#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_SHIFT         (16U)
+#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_SHIFT           (0U)
+#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register DPX_CR_RT_PERF_COUNTER_0
+*/
+#define DPX_CR_RT_PERF_COUNTER_0                          (0xC730U)
+#define DPX_CR_RT_PERF_COUNTER_0_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_RT_PERF_COUNTER_0_REG_SHIFT                (0U)
+#define DPX_CR_RT_PERF_COUNTER_0_REG_CLRMSK               (0x00000000U)
+
+
+/*
+    Register DPX_CR_BX_TU_PERF
+*/
+#define DPX_CR_BX_TU_PERF                                 (0xC908U)
+#define DPX_CR_BX_TU_PERF_MASKFULL                        (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_BX_TU_PERF_CLR_3_SHIFT                     (4U)
+#define DPX_CR_BX_TU_PERF_CLR_3_CLRMSK                    (0xFFFFFFEFU)
+#define DPX_CR_BX_TU_PERF_CLR_3_EN                        (0x00000010U)
+#define DPX_CR_BX_TU_PERF_CLR_2_SHIFT                     (3U)
+#define DPX_CR_BX_TU_PERF_CLR_2_CLRMSK                    (0xFFFFFFF7U)
+#define DPX_CR_BX_TU_PERF_CLR_2_EN                        (0x00000008U)
+#define DPX_CR_BX_TU_PERF_CLR_1_SHIFT                     (2U)
+#define DPX_CR_BX_TU_PERF_CLR_1_CLRMSK                    (0xFFFFFFFBU)
+#define DPX_CR_BX_TU_PERF_CLR_1_EN                        (0x00000004U)
+#define DPX_CR_BX_TU_PERF_CLR_0_SHIFT                     (1U)
+#define DPX_CR_BX_TU_PERF_CLR_0_CLRMSK                    (0xFFFFFFFDU)
+#define DPX_CR_BX_TU_PERF_CLR_0_EN                        (0x00000002U)
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_SHIFT               (0U)
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_CLRMSK              (0xFFFFFFFEU)
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_EN                  (0x00000001U)
+
+
+/*
+    Register DPX_CR_BX_TU_PERF_SELECT0
+*/
+#define DPX_CR_BX_TU_PERF_SELECT0                         (0xC910U)
+#define DPX_CR_BX_TU_PERF_SELECT0_MASKFULL                (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_SHIFT         (48U)
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_CLRMSK        (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_SHIFT         (32U)
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_CLRMSK        (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_SHIFT              (21U)
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_EN                 (IMG_UINT64_C(0x0000000000200000))
+#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_SHIFT      (16U)
+#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_SHIFT        (0U)
+#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_CLRMSK       (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register DPX_CR_BX_TU_PERF_COUNTER_0
+*/
+#define DPX_CR_BX_TU_PERF_COUNTER_0                       (0xC938U)
+#define DPX_CR_BX_TU_PERF_COUNTER_0_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_SHIFT             (0U)
+#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_CLRMSK            (0x00000000U)
+
+
+/*
+    Register DPX_CR_RS_PDS_RR_CHECKSUM
+*/
+#define DPX_CR_RS_PDS_RR_CHECKSUM                         (0xC0F0U)
+#define DPX_CR_RS_PDS_RR_CHECKSUM_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_SHIFT             (0U)
+#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_CLRMSK            (IMG_UINT64_C(0xFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_MMU_CBASE_MAPPING_CONTEXT
+*/
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT                  (0xE140U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_MASKFULL         (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT         (0U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK        (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_MMU_CBASE_MAPPING
+*/
+#define RGX_CR_MMU_CBASE_MAPPING                          (0xE148U)
+#define RGX_CR_MMU_CBASE_MAPPING_MASKFULL                 (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT          (0U)
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK         (0xF0000000U)
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT     (12U)
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE      (4096U)
+
+
+/*
+    Register RGX_CR_MMU_FAULT_STATUS
+*/
+#define RGX_CR_MMU_FAULT_STATUS                           (0xE150U)
+#define RGX_CR_MMU_FAULT_STATUS_MASKFULL                  (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT             (28U)
+#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK            (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT             (20U)
+#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFF00FFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT              (12U)
+#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFF00FFF))
+#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT              (6U)
+#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFFFF03F))
+#define RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT               (4U)
+#define RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF))
+#define RGX_CR_MMU_FAULT_STATUS_RNW_SHIFT                 (3U)
+#define RGX_CR_MMU_FAULT_STATUS_RNW_CLRMSK                (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_MMU_FAULT_STATUS_RNW_EN                    (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT                (1U)
+#define RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9))
+#define RGX_CR_MMU_FAULT_STATUS_FAULT_SHIFT               (0U)
+#define RGX_CR_MMU_FAULT_STATUS_FAULT_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MMU_FAULT_STATUS_FAULT_EN                  (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_MMU_FAULT_STATUS_META
+*/
+#define RGX_CR_MMU_FAULT_STATUS_META                      (0xE158U)
+#define RGX_CR_MMU_FAULT_STATUS_META_MASKFULL             (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT        (28U)
+#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK       (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT        (20U)
+#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK       (IMG_UINT64_C(0xFFFFFFFFF00FFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT         (12U)
+#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFF00FFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT         (6U)
+#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFFFF03F))
+#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT          (4U)
+#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF))
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_SHIFT            (3U)
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_EN               (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT           (1U)
+#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9))
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT          (0U)
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN             (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_SLC3_CTRL_MISC
+*/
+#define RGX_CR_SLC3_CTRL_MISC                             (0xE200U)
+#define RGX_CR_SLC3_CTRL_MISC_MASKFULL                    (IMG_UINT64_C(0x0000000000000107))
+#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_SHIFT        (8U)
+#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_CLRMSK       (0xFFFFFEFFU)
+#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN           (0x00000100U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SHIFT      (0U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK     (0xFFFFFFF8U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_LINEAR     (0x00000000U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_IN_PAGE_HASH (0x00000001U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_FIXED_PVR_HASH (0x00000002U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH (0x00000003U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_WEAVED_HASH (0x00000004U)
+
+
+/*
+    Register RGX_CR_SLC3_SCRAMBLE
+*/
+#define RGX_CR_SLC3_SCRAMBLE                              (0xE208U)
+#define RGX_CR_SLC3_SCRAMBLE_MASKFULL                     (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE_BITS_SHIFT                   (0U)
+#define RGX_CR_SLC3_SCRAMBLE_BITS_CLRMSK                  (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_SLC3_SCRAMBLE2
+*/
+#define RGX_CR_SLC3_SCRAMBLE2                             (0xE210U)
+#define RGX_CR_SLC3_SCRAMBLE2_MASKFULL                    (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE2_BITS_SHIFT                  (0U)
+#define RGX_CR_SLC3_SCRAMBLE2_BITS_CLRMSK                 (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_SLC3_SCRAMBLE3
+*/
+#define RGX_CR_SLC3_SCRAMBLE3                             (0xE218U)
+#define RGX_CR_SLC3_SCRAMBLE3_MASKFULL                    (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE3_BITS_SHIFT                  (0U)
+#define RGX_CR_SLC3_SCRAMBLE3_BITS_CLRMSK                 (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_SLC3_SCRAMBLE4
+*/
+#define RGX_CR_SLC3_SCRAMBLE4                             (0xE260U)
+#define RGX_CR_SLC3_SCRAMBLE4_MASKFULL                    (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE4_BITS_SHIFT                  (0U)
+#define RGX_CR_SLC3_SCRAMBLE4_BITS_CLRMSK                 (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_SLC3_STATUS
+*/
+#define RGX_CR_SLC3_STATUS                                (0xE220U)
+#define RGX_CR_SLC3_STATUS_MASKFULL                       (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_STATUS_WRITES1_SHIFT                  (48U)
+#define RGX_CR_SLC3_STATUS_WRITES1_CLRMSK                 (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define RGX_CR_SLC3_STATUS_WRITES0_SHIFT                  (32U)
+#define RGX_CR_SLC3_STATUS_WRITES0_CLRMSK                 (IMG_UINT64_C(0xFFFF0000FFFFFFFF))
+#define RGX_CR_SLC3_STATUS_READS1_SHIFT                   (16U)
+#define RGX_CR_SLC3_STATUS_READS1_CLRMSK                  (IMG_UINT64_C(0xFFFFFFFF0000FFFF))
+#define RGX_CR_SLC3_STATUS_READS0_SHIFT                   (0U)
+#define RGX_CR_SLC3_STATUS_READS0_CLRMSK                  (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_SLC3_IDLE
+*/
+#define RGX_CR_SLC3_IDLE                                  (0xE228U)
+#define RGX_CR_SLC3_IDLE_MASKFULL                         (IMG_UINT64_C(0x00000000000FFFFF))
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_SHIFT               (18U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_CLRMSK              (0xFFF3FFFFU)
+#define RGX_CR_SLC3_IDLE_MMU_SHIFT                        (17U)
+#define RGX_CR_SLC3_IDLE_MMU_CLRMSK                       (0xFFFDFFFFU)
+#define RGX_CR_SLC3_IDLE_MMU_EN                           (0x00020000U)
+#define RGX_CR_SLC3_IDLE_RDI_SHIFT                        (16U)
+#define RGX_CR_SLC3_IDLE_RDI_CLRMSK                       (0xFFFEFFFFU)
+#define RGX_CR_SLC3_IDLE_RDI_EN                           (0x00010000U)
+#define RGX_CR_SLC3_IDLE_IMGBV4_SHIFT                     (12U)
+#define RGX_CR_SLC3_IDLE_IMGBV4_CLRMSK                    (0xFFFF0FFFU)
+#define RGX_CR_SLC3_IDLE_CACHE_BANKS_SHIFT                (4U)
+#define RGX_CR_SLC3_IDLE_CACHE_BANKS_CLRMSK               (0xFFFFF00FU)
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_SHIFT                (2U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_CLRMSK               (0xFFFFFFF3U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_SHIFT               (1U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_CLRMSK              (0xFFFFFFFDU)
+#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_EN                  (0x00000002U)
+#define RGX_CR_SLC3_IDLE_XBAR_SHIFT                       (0U)
+#define RGX_CR_SLC3_IDLE_XBAR_CLRMSK                      (0xFFFFFFFEU)
+#define RGX_CR_SLC3_IDLE_XBAR_EN                          (0x00000001U)
+
+
+/*
+    Register RGX_CR_SLC3_FAULT_STOP_STATUS
+*/
+#define RGX_CR_SLC3_FAULT_STOP_STATUS                     (0xE248U)
+#define RGX_CR_SLC3_FAULT_STOP_STATUS_MASKFULL            (IMG_UINT64_C(0x0000000000001FFF))
+#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_SHIFT           (0U)
+#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_CLRMSK          (0xFFFFE000U)
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_STORE_MODE
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_MODE                     (0xF048U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MASKFULL            (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_SHIFT          (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_CLRMSK         (0xFFFFFFFCU)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX          (0x00000000U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE       (0x00000001U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST           (0x00000002U)
+
+
+/*
+    Register RGX_CR_CONTEXT_MAPPING0
+*/
+#define RGX_CR_CONTEXT_MAPPING0                           (0xF078U)
+#define RGX_CR_CONTEXT_MAPPING0_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING0_2D_SHIFT                  (24U)
+#define RGX_CR_CONTEXT_MAPPING0_2D_CLRMSK                 (0x00FFFFFFU)
+#define RGX_CR_CONTEXT_MAPPING0_CDM_SHIFT                 (16U)
+#define RGX_CR_CONTEXT_MAPPING0_CDM_CLRMSK                (0xFF00FFFFU)
+#define RGX_CR_CONTEXT_MAPPING0_3D_SHIFT                  (8U)
+#define RGX_CR_CONTEXT_MAPPING0_3D_CLRMSK                 (0xFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING0_TA_SHIFT                  (0U)
+#define RGX_CR_CONTEXT_MAPPING0_TA_CLRMSK                 (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_CONTEXT_MAPPING1
+*/
+#define RGX_CR_CONTEXT_MAPPING1                           (0xF080U)
+#define RGX_CR_CONTEXT_MAPPING1_MASKFULL                  (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_CONTEXT_MAPPING1_HOST_SHIFT                (8U)
+#define RGX_CR_CONTEXT_MAPPING1_HOST_CLRMSK               (0xFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING1_TLA_SHIFT                 (0U)
+#define RGX_CR_CONTEXT_MAPPING1_TLA_CLRMSK                (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_CONTEXT_MAPPING2
+*/
+#define RGX_CR_CONTEXT_MAPPING2                           (0xF088U)
+#define RGX_CR_CONTEXT_MAPPING2_MASKFULL                  (IMG_UINT64_C(0x0000000000FFFFFF))
+#define RGX_CR_CONTEXT_MAPPING2_ALIST0_SHIFT              (16U)
+#define RGX_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK             (0xFF00FFFFU)
+#define RGX_CR_CONTEXT_MAPPING2_TE0_SHIFT                 (8U)
+#define RGX_CR_CONTEXT_MAPPING2_TE0_CLRMSK                (0xFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING2_VCE0_SHIFT                (0U)
+#define RGX_CR_CONTEXT_MAPPING2_VCE0_CLRMSK               (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_CONTEXT_MAPPING3
+*/
+#define RGX_CR_CONTEXT_MAPPING3                           (0xF090U)
+#define RGX_CR_CONTEXT_MAPPING3_MASKFULL                  (IMG_UINT64_C(0x0000000000FFFFFF))
+#define RGX_CR_CONTEXT_MAPPING3_ALIST1_SHIFT              (16U)
+#define RGX_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK             (0xFF00FFFFU)
+#define RGX_CR_CONTEXT_MAPPING3_TE1_SHIFT                 (8U)
+#define RGX_CR_CONTEXT_MAPPING3_TE1_CLRMSK                (0xFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING3_VCE1_SHIFT                (0U)
+#define RGX_CR_CONTEXT_MAPPING3_VCE1_CLRMSK               (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_BIF_JONES_OUTSTANDING_READ
+*/
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ                 (0xF098U)
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ_MASKFULL        (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_SHIFT   (0U)
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_CLRMSK  (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ
+*/
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ            (0xF0A0U)
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_MASKFULL   (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_SHIFT (0U)
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIF_DUST_OUTSTANDING_READ
+*/
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ                  (0xF0A8U)
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ_MASKFULL         (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_SHIFT    (0U)
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_CLRMSK   (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_CONTEXT_MAPPING4
+*/
+#define RGX_CR_CONTEXT_MAPPING4                           (0xF210U)
+#define RGX_CR_CONTEXT_MAPPING4_MASKFULL                  (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_SHIFT        (40U)
+#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_CLRMSK       (IMG_UINT64_C(0xFFFF00FFFFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_SHIFT          (32U)
+#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_CLRMSK         (IMG_UINT64_C(0xFFFFFF00FFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_SHIFT           (24U)
+#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_CLRMSK          (IMG_UINT64_C(0xFFFFFFFF00FFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_SHIFT        (16U)
+#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_CLRMSK       (IMG_UINT64_C(0xFFFFFFFFFF00FFFF))
+#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_SHIFT          (8U)
+#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFFFF00FF))
+#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_SHIFT           (0U)
+#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+#endif /* RGX_CR_DEFS_KM_H */
+
+/*****************************************************************************
+ End of file (rgx_cr_defs_km.h)
+*****************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/rgxdefs_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/rgxdefs_km.h
new file mode 100644
index 0000000..8f7ab7e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/rgxdefs_km.h
@@ -0,0 +1,299 @@
+/*************************************************************************/ /*!
+@Title          Rogue hw definitions (kernel mode)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXDEFS_KM_H
+#define RGXDEFS_KM_H
+
+#if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER)
+#include RGX_BVNC_CORE_KM_HEADER
+#include RGX_BNC_CONFIG_KM_HEADER
+#endif
+
+#define IMG_EXPLICIT_INCLUDE_HWDEFS
+#if defined(__KERNEL__)
+#include "rgx_cr_defs_km.h"
+#endif
+#undef IMG_EXPLICIT_INCLUDE_HWDEFS
+
+#include "rgx_heap_firmware.h"
+
+/* The following Macros are picked up through BVNC headers for PDUMP and
+ * no hardware operations to be compatible with old build infrastructure.
+ */
+#if defined(PDUMP) || defined(NO_HARDWARE) || !defined(SUPPORT_MULTIBVNC_RUNTIME_BVNC_ACQUISITION)
+/******************************************************************************
+ * Check for valid B.X.N.C
+ *****************************************************************************/
+#if !defined(RGX_BVNC_KM_B) || !defined(RGX_BVNC_KM_V) || !defined(RGX_BVNC_KM_N) || !defined(RGX_BVNC_KM_C)
+#error "Need to specify BVNC (RGX_BVNC_KM_B, RGX_BVNC_KM_V, RGX_BVNC_KM_N and RGX_BVNC_C)"
+#endif
+#endif
+
+#if defined(PDUMP) || defined(NO_HARDWARE)
+/* Check core/config compatibility */
+#if (RGX_BVNC_KM_B != RGX_BNC_KM_B) || (RGX_BVNC_KM_N != RGX_BNC_KM_N) || (RGX_BVNC_KM_C != RGX_BNC_KM_C)
+#error "BVNC headers are mismatching (KM core/config)"
+#endif
+
+#endif
+
+/******************************************************************************
+ * RGX Version name
+ *****************************************************************************/
+#define RGX_BVNC_ST2(S)	#S
+#define RGX_BVNC_ST(S)		RGX_BVNC_ST2(S)
+#define RGX_BVNC_KM			RGX_BVNC_ST(RGX_BVNC_KM_B) "." RGX_BVNC_ST(RGX_BVNC_KM_V) "." RGX_BVNC_ST(RGX_BVNC_KM_N) "." RGX_BVNC_ST(RGX_BVNC_KM_C)
+#define RGX_BVNC_KM_V_ST	RGX_BVNC_ST(RGX_BVNC_KM_V)
+
+/* Maximum string size is [bb.vvvp.nnnn.cccc\0], includes null char */
+#define RGX_BVNC_STR_SIZE_MAX (2+1+4+1+4+1+4+1)
+#define RGX_BVNC_STR_FMTSPEC  "%u.%u.%u.%u"
+#define RGX_BVNC_STRP_FMTSPEC "%u.%up.%u.%u"
+
+
+/******************************************************************************
+ * RGX Defines
+ *****************************************************************************/
+
+#define BVNC_FIELD_MASK     ((1 << BVNC_FIELD_WIDTH) - 1)
+#define C_POSITION          (0)
+#define N_POSITION          ((C_POSITION) + (BVNC_FIELD_WIDTH))
+#define V_POSITION          ((N_POSITION) + (BVNC_FIELD_WIDTH))
+#define B_POSITION          ((V_POSITION) + (BVNC_FIELD_WIDTH))
+
+#define B_POSTION_MASK      (((IMG_UINT64)(BVNC_FIELD_MASK) << (B_POSITION)))
+#define V_POSTION_MASK      (((IMG_UINT64)(BVNC_FIELD_MASK) << (V_POSITION)))
+#define N_POSTION_MASK      (((IMG_UINT64)(BVNC_FIELD_MASK) << (N_POSITION)))
+#define C_POSTION_MASK      (((IMG_UINT64)(BVNC_FIELD_MASK) << (C_POSITION)))
+
+#define GET_B(x)            (((x) & (B_POSTION_MASK)) >> (B_POSITION))
+#define GET_V(x)            (((x) & (V_POSTION_MASK)) >> (V_POSITION))
+#define GET_N(x)            (((x) & (N_POSTION_MASK)) >> (N_POSITION))
+#define GET_C(x)            (((x) & (C_POSTION_MASK)) >> (C_POSITION))
+
+#define BVNC_PACK(B,V,N,C)  ((((IMG_UINT64)B)) << (B_POSITION) | \
+                             (((IMG_UINT64)V)) << (V_POSITION) | \
+                             (((IMG_UINT64)N)) << (N_POSITION) | \
+                             (((IMG_UINT64)C)) << (C_POSITION) \
+                            )
+
+#define RGX_CR_CORE_ID_CONFIG_N_SHIFT    (8U)
+#define RGX_CR_CORE_ID_CONFIG_C_SHIFT    (0U)
+
+#define RGX_CR_CORE_ID_CONFIG_N_CLRMSK   (0XFFFF00FFU)
+#define RGX_CR_CORE_ID_CONFIG_C_CLRMSK   (0XFFFFFF00U)
+
+/* The default number of OSID is 1, higher number implies VZ enabled firmware */
+#if !defined(RGXFW_NATIVE) && defined(PVRSRV_VZ_NUM_OSID) && (PVRSRV_VZ_NUM_OSID + 1U > 1U)
+#define RGXFW_NUM_OS PVRSRV_VZ_NUM_OSID
+#else
+#define RGXFW_NUM_OS 1U
+#endif
+
+#define RGXFW_MAX_NUM_OS                                  (8U)
+#define RGXFW_HYPERVISOR_OS                               (0U)
+#define RGXFW_GUEST_OSID_START                            (1U)
+
+#define RGXFW_THREAD_0                                    (0U)
+#define RGXFW_THREAD_1                                    (1U)
+
+/* META cores (required for the RGX_FEATURE_META) */
+#define MTP218   (1)
+#define MTP219   (2)
+#define LTP218   (3)
+#define LTP217   (4)
+
+/* META Core memory feature depending on META variants */
+#define RGX_META_COREMEM_32K      (32*1024)
+#define RGX_META_COREMEM_48K      (48*1024)
+#define RGX_META_COREMEM_64K      (64*1024)
+#define RGX_META_COREMEM_96K      (96*1024)
+#define RGX_META_COREMEM_128K     (128*1024)
+#define RGX_META_COREMEM_256K     (256*1024)
+
+#if !defined(SUPPORT_MULTIBVNC)
+#if (!defined(SUPPORT_TRUSTED_DEVICE) || defined(RGX_FEATURE_META_DMA)) && \
+    (defined(RGX_FEATURE_META_COREMEM_SIZE) && RGX_FEATURE_META_COREMEM_SIZE != 0)
+#define RGX_META_COREMEM_SIZE     (RGX_FEATURE_META_COREMEM_SIZE*1024U)
+#define RGX_META_COREMEM          (1)
+#define RGX_META_COREMEM_CODE     (1)
+#if !defined(FIX_HW_BRN_50767) && (RGXFW_NUM_OS == 1)
+#define RGX_META_COREMEM_DATA     (1)
+#endif
+#else
+#undef SUPPORT_META_COREMEM
+#undef RGX_FEATURE_META_COREMEM_SIZE
+#undef RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_META_COREMEM_SIZE         (0)
+#endif
+#endif
+
+#define GET_ROGUE_CACHE_LINE_SIZE(x)    ((((IMG_INT32)x) > 0) ? ((x)/8) : (0))
+
+
+#define MAX_HW_TA3DCONTEXTS	2U
+
+#define RGX_CR_SOFT_RESET_DUST_n_CORE_EN	(RGX_CR_SOFT_RESET_DUST_A_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_B_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_C_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_D_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_E_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_F_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_G_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_H_CORE_EN)
+
+/* SOFT_RESET Rascal and DUSTs bits */
+#define RGX_CR_SOFT_RESET_RASCALDUSTS_EN	(RGX_CR_SOFT_RESET_RASCAL_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_n_CORE_EN)
+
+
+
+
+/* SOFT_RESET steps as defined in the TRM */
+#define RGX_S7_SOFT_RESET_DUSTS (RGX_CR_SOFT_RESET_DUST_n_CORE_EN)
+
+#define RGX_S7_SOFT_RESET_JONES (RGX_CR_SOFT_RESET_PM_EN  | \
+                                 RGX_CR_SOFT_RESET_VDM_EN | \
+                                 RGX_CR_SOFT_RESET_ISP_EN)
+
+#define RGX_S7_SOFT_RESET_JONES_ALL (RGX_S7_SOFT_RESET_JONES  | \
+                                     RGX_CR_SOFT_RESET_BIF_EN | \
+                                     RGX_CR_SOFT_RESET_SLC_EN | \
+                                     RGX_CR_SOFT_RESET_GARTEN_EN)
+
+#define RGX_S7_SOFT_RESET2 (RGX_CR_SOFT_RESET2_BLACKPEARL_EN | \
+                            RGX_CR_SOFT_RESET2_PIXEL_EN | \
+                            RGX_CR_SOFT_RESET2_CDM_EN | \
+                            RGX_CR_SOFT_RESET2_VERTEX_EN)
+
+
+
+#define RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT		(12)
+#define RGX_BIF_PM_PHYSICAL_PAGE_SIZE			(1 << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT)
+
+#define RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT		(14)
+#define RGX_BIF_PM_VIRTUAL_PAGE_SIZE			(1 << RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT)
+
+#define RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE	(16U)
+
+/* To get the number of required Dusts, divide the number of
+ * clusters by 2 and round up
+ */
+#define RGX_REQ_NUM_DUSTS(CLUSTERS)    ((CLUSTERS + 1U) / 2U)
+
+/* To get the number of required Bernado/Phantom(s), divide
+ * the number of clusters by 4 and round up
+ */
+#define RGX_REQ_NUM_PHANTOMS(CLUSTERS) ((CLUSTERS + 3U) / 4U)
+#define RGX_REQ_NUM_BERNADOS(CLUSTERS) ((CLUSTERS + 3U) / 4U)
+#define RGX_REQ_NUM_BLACKPEARLS(CLUSTERS) ((CLUSTERS + 3U) / 4U)
+
+#if !defined(SUPPORT_MULTIBVNC)
+# define RGX_NUM_PHANTOMS (RGX_REQ_NUM_PHANTOMS(RGX_FEATURE_NUM_CLUSTERS))
+#endif
+
+
+/* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT is not defined for format 1 cores (so define it now). */
+#if !defined(RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT)
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1)
+#endif
+
+/* META second thread feature depending on META variants and
+ * available CoreMem
+ */
+#if defined(RGX_FEATURE_META) && (RGX_FEATURE_META == MTP218 || RGX_FEATURE_META == MTP219) && defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && (RGX_FEATURE_META_COREMEM_SIZE == 256)
+#define RGXFW_META_SUPPORT_2ND_THREAD
+#endif
+
+
+/******************************************************************************
+ * WA HWBRNs
+ *****************************************************************************/
+#if defined(FIX_HW_BRN_36492)
+
+#undef RGX_CR_SOFT_RESET_SLC_EN
+#undef RGX_CR_SOFT_RESET_SLC_CLRMSK
+#undef RGX_CR_SOFT_RESET_SLC_SHIFT
+
+/* Remove the SOFT_RESET_SLC_EN bit from SOFT_RESET_MASKFULL */
+#undef RGX_CR_SOFT_RESET_MASKFULL
+#define RGX_CR_SOFT_RESET_MASKFULL IMG_UINT64_C(0x000001FFF7FFFC1D)
+
+#endif /* FIX_HW_BRN_36492 */
+
+
+#if defined(RGX_CR_JONES_IDLE_MASKFULL)
+/* Workaround for HW BRN 57289 */
+#if (RGX_CR_JONES_IDLE_MASKFULL != 0x0000000000007FFF)
+#error This WA must be updated if RGX_CR_JONES_IDLE is expanded!!!
+#endif
+#undef RGX_CR_JONES_IDLE_MASKFULL
+#undef RGX_CR_JONES_IDLE_TDM_SHIFT
+#undef RGX_CR_JONES_IDLE_TDM_CLRMSK
+#undef RGX_CR_JONES_IDLE_TDM_EN
+#define RGX_CR_JONES_IDLE_MASKFULL                        (IMG_UINT64_C(0x0000000000003FFF))
+#endif
+
+#if !defined(SUPPORT_MULTIBVNC)
+#if !defined(RGX_FEATURE_SLC_SIZE_IN_BYTES)
+#if defined(RGX_FEATURE_SLC_SIZE_IN_KILOBYTES)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (RGX_FEATURE_SLC_SIZE_IN_KILOBYTES * 1024)
+#else
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (0)
+#endif
+#endif
+#endif
+
+#if !defined(SUPPORT_MULTIBVNC)
+
+#if defined(RGX_FEATURE_ROGUEXE)
+#define RGX_NUM_RASTERISATION_MODULES	RGX_FEATURE_NUM_CLUSTERS
+#else
+#define RGX_NUM_RASTERISATION_MODULES	RGX_NUM_PHANTOMS
+#endif
+
+#endif /* defined(SUPPORT_MULTIBVNC) */
+
+/* GPU CR timer tick in GPU cycles */
+#define RGX_CRTIME_TICK_IN_CYCLES (256U)
+
+#endif /* RGXDEFS_KM_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/rgxmhdefs_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/rgxmhdefs_km.h
new file mode 100644
index 0000000..600380c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/rgxmhdefs_km.h
@@ -0,0 +1,339 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file rgxmhdefs_km.h
+@Brief          The file contains auto-generated hardware definitions without
+                BVNC-specific compile time conditionals.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*               ****   Autogenerated C -- do not edit    ****               */
+
+/*
+ *      rogue_mh.def
+ */
+
+
+#ifndef RGXMHDEFS_KM_H
+#define RGXMHDEFS_KM_H
+
+#include "img_types.h"
+#include "img_defs.h"
+
+
+#define RGXMHDEFS_KM_REVISION 0
+
+/*
+Encoding of MH_TAG_SB for TDM CTL
+*/
+#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_FENCE  (0x00000000U)
+#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_CONTEXT (0x00000001U)
+#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_QUEUE  (0x00000002U)
+
+
+/*
+Encoding of MH_TAG_SB for TDM DMA
+*/
+#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTL_STREAM (0x00000000U)
+#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTX_BUFFER (0x00000001U)
+#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_QUEUE_CTL (0x00000002U)
+
+
+/*
+Encoding of MH_TAG_SB for PMD
+*/
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAFSTACK    (0x00000008U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMLIST     (0x00000009U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DFSTACK    (0x0000000aU)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMLIST     (0x0000000bU)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX0      (0x0000000cU)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX1      (0x0000002dU)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_UFSTACK     (0x0000000fU)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMMUSTACK  (0x00000012U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMMUSTACK  (0x00000013U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAUFSTACK   (0x00000016U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DUFSTACK   (0x00000017U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DVFP       (0x00000019U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAVFP       (0x0000001aU)
+
+
+/*
+Encoding of MH_TAG_SB for PMA
+*/
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAFSTACK    (0x00000000U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMLIST     (0x00000001U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DFSTACK    (0x00000002U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMLIST     (0x00000003U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX0      (0x00000004U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX1      (0x00000025U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_MAVP        (0x00000006U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_UFSTACK     (0x00000007U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMMUSTACK  (0x00000008U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMMUSTACK  (0x00000009U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAUFSTACK   (0x00000014U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DUFSTACK   (0x00000015U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAVFP       (0x00000018U)
+
+
+/*
+Encoding of MH_TAG_SB for TA
+*/
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PPP              (0x00000008U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_VCERTC           (0x00000007U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_TEACRTC          (0x00000006U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGRTC           (0x00000005U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGR             (0x00000004U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGS             (0x00000003U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_TPC              (0x00000002U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_VCE              (0x00000001U)
+
+
+/*
+Encoding of MH_TAG_SB for IPF when there are 2 IPF pipes
+*/
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CREQ00 (0x00000000U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CREQ01 (0x00000001U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_PREQ00 (0x00000002U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_PREQ01 (0x00000003U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_RREQ (0x00000004U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_DBSC (0x00000005U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CPF (0x00000006U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_DELTA (0x00000007U)
+
+
+/*
+Encoding of MH_TAG_SB for IPF when there are 4 IPF pipes
+*/
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ00 (0x00000000U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ01 (0x00000001U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ02 (0x00000002U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ03 (0x00000003U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ00 (0x00000004U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ01 (0x00000005U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ02 (0x00000006U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ03 (0x00000007U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_RREQ (0x00000008U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_DBSC (0x00000009U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CPF (0x0000000aU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_DELTA (0x0000000bU)
+
+
+/*
+Encoding of MH_TAG_SB for IPF when there are 7 IPF pipes
+*/
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ00 (0x00000000U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ01 (0x00000001U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ02 (0x00000002U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ03 (0x00000003U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ04 (0x00000004U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ05 (0x00000005U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ06 (0x00000006U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ00 (0x00000007U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ01 (0x00000008U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ02 (0x00000009U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ03 (0x0000000aU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ04 (0x0000000bU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ05 (0x0000000cU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ06 (0x0000000dU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_RREQ (0x0000000eU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_DBSC (0x0000000fU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CPF (0x00000010U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_DELTA (0x00000011U)
+
+
+/*
+Encoding of MH_TAG_SB for IPF when there are 14 IPF pipes
+*/
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ00 (0x00000000U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ01 (0x00000001U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ02 (0x00000002U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ03 (0x00000003U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ04 (0x00000004U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ05 (0x00000005U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ06 (0x00000006U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ07 (0x00000007U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ08 (0x00000008U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ09 (0x00000009U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ10 (0x0000000aU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ11 (0x0000000bU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ12 (0x0000000cU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ13 (0x0000000dU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ00 (0x0000000eU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ01 (0x0000000fU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ02 (0x00000010U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ03 (0x00000011U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ04 (0x00000012U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ05 (0x00000013U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ06 (0x00000014U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ07 (0x00000015U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ08 (0x00000016U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ09 (0x00000017U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ10 (0x00000018U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ11 (0x00000019U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ12 (0x0000001aU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ13 (0x0000001bU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_RREQ (0x0000001cU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_DBSC (0x0000001dU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CPF (0x0000001eU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_DELTA (0x0000001fU)
+
+
+/*
+Encoding of MH_TAG_SB for TPF
+*/
+#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_PDS_STATE      (0x00000000U)
+#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DEPTH_BIAS     (0x00000001U)
+#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_FLOOR_OFFSET_DATA (0x00000002U)
+#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DELTA_DATA     (0x00000003U)
+
+
+/*
+Encoding of MH_TAG_SB for ISP
+*/
+#define RGX_MH_TAG_SB_ISP_ENCODING_ISP_TAG_ZLS            (0x00000000U)
+#define RGX_MH_TAG_SB_ISP_ENCODING_ISP_TAG_DS             (0x00000001U)
+
+
+/*
+Encoding of MH_TAG_SB for VDM
+*/
+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTROL        (0x00000000U)
+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STATE          (0x00000001U)
+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_INDEX          (0x00000002U)
+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STACK          (0x00000004U)
+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTEXT        (0x00000008U)
+
+
+/*
+Encoding of MH_TAG_SB for CDM
+*/
+#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTROL_STREAM (0x00000000U)
+#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_INDIRECT_DATA  (0x00000001U)
+#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_EVENT_DATA     (0x00000002U)
+#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTEXT_STATE  (0x00000003U)
+
+
+/*
+Encoding of MH_TAG_SB for MIPS
+*/
+#define RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_OPCODE_FETCH (0x00000002U)
+#define RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_DATA_ACCESS  (0x00000003U)
+
+
+/*
+Encoding of MH_TAG_SB for MMU
+*/
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PT_REQUEST     (0x00000000U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PD_REQUEST     (0x00000001U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PC_REQUEST     (0x00000002U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PT_REQUEST  (0x00000003U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_REQUEST  (0x00000004U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_REQUEST  (0x00000005U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_WREQUEST (0x00000006U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_WREQUEST (0x00000007U)
+
+
+/*
+Encoding of MH TAG
+*/
+#define RGX_MH_TAG_ENCODING_MH_TAG_MMU_PT                 (0x00000000U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MMU_PD                 (0x00000001U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MMU_PC                 (0x00000002U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MMU_PM                 (0x00000003U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MIPS                   (0x00000004U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0               (0x00000005U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1               (0x00000006U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2               (0x00000007U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3               (0x00000008U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0               (0x00000009U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1               (0x0000000aU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2               (0x0000000bU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3               (0x0000000cU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4               (0x0000000dU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PDS_0                  (0x0000000eU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PDS_1                  (0x0000000fU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCA               (0x00000010U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCB               (0x00000011U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCC               (0x00000012U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCD               (0x00000013U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCA           (0x00000014U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCB           (0x00000015U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCC           (0x00000016U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCD           (0x00000017U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDSRW              (0x00000018U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TCU_0                  (0x00000019U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TCU_1                  (0x0000001aU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_0                (0x0000001bU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_1                (0x0000001cU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_USC                    (0x0000001fU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_ISP_ZLS                (0x00000020U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_ISP_DS                 (0x00000021U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TPF                    (0x00000022U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS           (0x00000023U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF                (0x00000024U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ               (0x00000025U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS             (0x00000026U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG5               (0x00000027U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PPP                 (0x00000028U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPWRTC              (0x00000029U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TEACRTC             (0x0000002aU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGRTC              (0x0000002bU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGREGION           (0x0000002cU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGSTREAM           (0x0000002dU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPW                 (0x0000002eU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPC                 (0x0000002fU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PM_ALLOC               (0x00000030U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PM_DEALLOC             (0x00000031U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TDM_DMA                (0x00000032U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TDM_CTL                (0x00000033U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PBE0                   (0x00000034U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PBE1                   (0x00000035U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_IPP                    (0x00000038U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_ISP1_ZLS               (0x00000039U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_ISP1_DS                (0x0000003aU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TPF1                   (0x0000003bU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TPF1_PBCDBIAS          (0x0000003cU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TPF1_SPF               (0x0000003dU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_IPF1_CREQ              (0x0000003eU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_IPF1_OTHERS            (0x0000003fU)
+
+#endif /* RGXMHDEFS_KM_H */
+
+/*****************************************************************************
+ End of file (rgxmhdefs_km.h)
+*****************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/rgxmmudefs_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/rgxmmudefs_km.h
new file mode 100644
index 0000000..1b04267
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/rgxmmudefs_km.h
@@ -0,0 +1,350 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file rgxmmudefs_km.h
+@Brief          The file contains auto-generated hardware definitions without
+                BVNC-specific compile time conditionals.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*               ****   Autogenerated C -- do not edit    ****               */
+
+/*
+ *      rogue_bif.def
+ */
+
+
+#ifndef RGXMMUDEFS_KM_H
+#define RGXMMUDEFS_KM_H
+
+#include "img_types.h"
+#include "img_defs.h"
+
+
+#define RGXMMUDEFS_KM_REVISION 0
+
+/*
+Encoding of DM (note value 0x6 not used)
+*/
+#define RGX_BIF_DM_ENCODING_VERTEX                        (0x00000000U)
+#define RGX_BIF_DM_ENCODING_PIXEL                         (0x00000001U)
+#define RGX_BIF_DM_ENCODING_COMPUTE                       (0x00000002U)
+#define RGX_BIF_DM_ENCODING_TLA                           (0x00000003U)
+#define RGX_BIF_DM_ENCODING_PB_VCE                        (0x00000004U)
+#define RGX_BIF_DM_ENCODING_PB_TE                         (0x00000005U)
+#define RGX_BIF_DM_ENCODING_META                          (0x00000007U)
+#define RGX_BIF_DM_ENCODING_HOST                          (0x00000008U)
+#define RGX_BIF_DM_ENCODING_PM_ALIST                      (0x00000009U)
+
+
+/*
+Labelling of fields within virtual address
+*/
+/*
+Page Catalogue entry #
+*/
+#define RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT                  (30U)
+#define RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK                 (IMG_UINT64_C(0xFFFFFF003FFFFFFF))
+/*
+Page Directory entry #
+*/
+#define RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT                  (21U)
+#define RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK                 (IMG_UINT64_C(0xFFFFFFFFC01FFFFF))
+/*
+Page Table entry #
+*/
+#define RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT                  (12U)
+#define RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK                 (IMG_UINT64_C(0xFFFFFFFFFFE00FFF))
+
+
+/*
+Number of entries in a PC
+*/
+#define RGX_MMUCTRL_ENTRIES_PC_VALUE                      (0x00000400U)
+
+
+/*
+Number of entries in a PD
+*/
+#define RGX_MMUCTRL_ENTRIES_PD_VALUE                      (0x00000200U)
+
+
+/*
+Number of entries in a PT
+*/
+#define RGX_MMUCTRL_ENTRIES_PT_VALUE                      (0x00000200U)
+
+
+/*
+Size in bits of the PC entries in memory
+*/
+#define RGX_MMUCTRL_ENTRY_SIZE_PC_VALUE                   (0x00000020U)
+
+
+/*
+Size in bits of the PD entries in memory
+*/
+#define RGX_MMUCTRL_ENTRY_SIZE_PD_VALUE                   (0x00000040U)
+
+
+/*
+Size in bits of the PT entries in memory
+*/
+#define RGX_MMUCTRL_ENTRY_SIZE_PT_VALUE                   (0x00000040U)
+
+
+/*
+Encoding of page size field
+*/
+#define RGX_MMUCTRL_PAGE_SIZE_MASK                        (0x00000007U)
+#define RGX_MMUCTRL_PAGE_SIZE_4KB                         (0x00000000U)
+#define RGX_MMUCTRL_PAGE_SIZE_16KB                        (0x00000001U)
+#define RGX_MMUCTRL_PAGE_SIZE_64KB                        (0x00000002U)
+#define RGX_MMUCTRL_PAGE_SIZE_256KB                       (0x00000003U)
+#define RGX_MMUCTRL_PAGE_SIZE_1MB                         (0x00000004U)
+#define RGX_MMUCTRL_PAGE_SIZE_2MB                         (0x00000005U)
+
+
+/*
+Range of bits used for 4KB Physical Page
+*/
+#define RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT                  (12U)
+#define RGX_MMUCTRL_PAGE_4KB_RANGE_CLRMSK                 (IMG_UINT64_C(0xFFFFFF0000000FFF))
+
+
+/*
+Range of bits used for 16KB Physical Page
+*/
+#define RGX_MMUCTRL_PAGE_16KB_RANGE_SHIFT                 (14U)
+#define RGX_MMUCTRL_PAGE_16KB_RANGE_CLRMSK                (IMG_UINT64_C(0xFFFFFF0000003FFF))
+
+
+/*
+Range of bits used for 64KB Physical Page
+*/
+#define RGX_MMUCTRL_PAGE_64KB_RANGE_SHIFT                 (16U)
+#define RGX_MMUCTRL_PAGE_64KB_RANGE_CLRMSK                (IMG_UINT64_C(0xFFFFFF000000FFFF))
+
+
+/*
+Range of bits used for 256KB Physical Page
+*/
+#define RGX_MMUCTRL_PAGE_256KB_RANGE_SHIFT                (18U)
+#define RGX_MMUCTRL_PAGE_256KB_RANGE_CLRMSK               (IMG_UINT64_C(0xFFFFFF000003FFFF))
+
+
+/*
+Range of bits used for 1MB Physical Page
+*/
+#define RGX_MMUCTRL_PAGE_1MB_RANGE_SHIFT                  (20U)
+#define RGX_MMUCTRL_PAGE_1MB_RANGE_CLRMSK                 (IMG_UINT64_C(0xFFFFFF00000FFFFF))
+
+
+/*
+Range of bits used for 2MB Physical Page
+*/
+#define RGX_MMUCTRL_PAGE_2MB_RANGE_SHIFT                  (21U)
+#define RGX_MMUCTRL_PAGE_2MB_RANGE_CLRMSK                 (IMG_UINT64_C(0xFFFFFF00001FFFFF))
+
+
+/*
+Range of bits used for PT Base Address for 4KB Physical Page
+*/
+#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT               (12U)
+#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK              (IMG_UINT64_C(0xFFFFFF0000000FFF))
+
+
+/*
+Range of bits used for PT Base Address for 16KB Physical Page
+*/
+#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT              (10U)
+#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK             (IMG_UINT64_C(0xFFFFFF00000003FF))
+
+
+/*
+Range of bits used for PT Base Address for 64KB Physical Page
+*/
+#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT              (8U)
+#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK             (IMG_UINT64_C(0xFFFFFF00000000FF))
+
+
+/*
+Range of bits used for PT Base Address for 256KB Physical Page
+*/
+#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT             (6U)
+#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK            (IMG_UINT64_C(0xFFFFFF000000003F))
+
+
+/*
+Range of bits used for PT Base Address for 1MB Physical Page
+*/
+#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT               (5U)
+#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK              (IMG_UINT64_C(0xFFFFFF000000001F))
+
+
+/*
+Range of bits used for PT Base Address for 2MB Physical Page
+*/
+#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT               (5U)
+#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK              (IMG_UINT64_C(0xFFFFFF000000001F))
+
+
+/*
+Format of Page Table data
+*/
+/*
+PM/Meta protect bit
+*/
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT         (62U)
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK        (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN            (IMG_UINT64_C(0x4000000000000000))
+/*
+Upper part of vp page field
+*/
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT              (40U)
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK             (IMG_UINT64_C(0xC00000FFFFFFFFFF))
+/*
+Physical page address
+*/
+#define RGX_MMUCTRL_PT_DATA_PAGE_SHIFT                    (12U)
+#define RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK                   (IMG_UINT64_C(0xFFFFFF0000000FFF))
+/*
+Lower part of vp page field
+*/
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT              (6U)
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFFFF03F))
+/*
+Entry pending
+*/
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT           (5U)
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN              (IMG_UINT64_C(0x0000000000000020))
+/*
+PM Src
+*/
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_SHIFT                  (4U)
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_CLRMSK                 (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_EN                     (IMG_UINT64_C(0x0000000000000010))
+/*
+SLC Bypass Ctrl
+*/
+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_SHIFT         (3U)
+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN            (IMG_UINT64_C(0x0000000000000008))
+/*
+Cache Coherency bit
+*/
+#define RGX_MMUCTRL_PT_DATA_CC_SHIFT                      (2U)
+#define RGX_MMUCTRL_PT_DATA_CC_CLRMSK                     (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_MMUCTRL_PT_DATA_CC_EN                         (IMG_UINT64_C(0x0000000000000004))
+/*
+Read only
+*/
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_SHIFT               (1U)
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_EN                  (IMG_UINT64_C(0x0000000000000002))
+/*
+Entry valid
+*/
+#define RGX_MMUCTRL_PT_DATA_VALID_SHIFT                   (0U)
+#define RGX_MMUCTRL_PT_DATA_VALID_CLRMSK                  (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_MMUCTRL_PT_DATA_VALID_EN                      (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+Format of Page Directory data
+*/
+/*
+Entry pending
+*/
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT           (40U)
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK          (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF))
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN              (IMG_UINT64_C(0x0000010000000000))
+/*
+Page Table base address
+*/
+#define RGX_MMUCTRL_PD_DATA_PT_BASE_SHIFT                 (5U)
+#define RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK                (IMG_UINT64_C(0xFFFFFF000000001F))
+/*
+Page Size
+*/
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT               (1U)
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFFF1))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB                 (IMG_UINT64_C(0x0000000000000000))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB                (IMG_UINT64_C(0x0000000000000002))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB                (IMG_UINT64_C(0x0000000000000004))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB               (IMG_UINT64_C(0x0000000000000006))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB                 (IMG_UINT64_C(0x0000000000000008))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB                 (IMG_UINT64_C(0x000000000000000a))
+/*
+Entry valid
+*/
+#define RGX_MMUCTRL_PD_DATA_VALID_SHIFT                   (0U)
+#define RGX_MMUCTRL_PD_DATA_VALID_CLRMSK                  (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_MMUCTRL_PD_DATA_VALID_EN                      (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+Format of Page Catalogue data
+*/
+/*
+Page Catalogue base address
+*/
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT                 (4U)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK                (0x0000000FU)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT            (12U)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE             (4096U)
+/*
+Entry pending
+*/
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT           (1U)
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK          (0xFFFFFFFDU)
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN              (0x00000002U)
+/*
+Entry valid
+*/
+#define RGX_MMUCTRL_PC_DATA_VALID_SHIFT                   (0U)
+#define RGX_MMUCTRL_PC_DATA_VALID_CLRMSK                  (0xFFFFFFFEU)
+#define RGX_MMUCTRL_PC_DATA_VALID_EN                      (0x00000001U)
+
+
+#endif /* RGXMMUDEFS_KM_H */
+
+/*****************************************************************************
+ End of file (rgxmmudefs_km.h)
+*****************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/tpu_cacheability_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/tpu_cacheability_km.h
new file mode 100644
index 0000000..6c5b789
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km/tpu_cacheability_km.h
@@ -0,0 +1,50 @@
+/*************************************************************************/ /*!
+@Title          Policy for when to enable SLC caching for the TPU.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef TPU_CACHEABILITY_KM_H
+#define TPU_CACHEABILITY_KM_H
+
+/*
+  If the size of the SLC is less than this value then the TPU bypasses the SLC.
+ */
+#define RGX_TPU_CACHED_SLC_SIZE_THRESHOLD			(128*1024)
+
+#endif /* TPU_CACHEABILITY_KM_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km_apphint.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km_apphint.c
new file mode 100644
index 0000000..dd76384
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km_apphint.c
@@ -0,0 +1,1485 @@
+/*************************************************************************/ /*!
+@File           km_apphint.c
+@Title          Apphint routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debugfs.h"
+#include "pvr_uaccess.h"
+#include <linux/moduleparam.h>
+#include <linux/workqueue.h>
+#include <linux/string.h>
+#include <stdbool.h>
+
+/* Common and SO layer */
+#include "img_defs.h"
+#include "sofunc_pvr.h"
+
+/* for action device access */
+#include "pvrsrv.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxfwutils.h"
+#include "rgxhwperf.h"
+#include "htbserver.h"
+#include "rgxutils.h"
+#include "rgxapi_km.h"
+
+
+/* defines for default values */
+#include "rgx_fwif_km.h"
+#include "htbuffer_types.h"
+
+#include "pvr_notifier.h"
+
+#include "km_apphint_defs.h"
+#include "km_apphint.h"
+
+#if defined(PDUMP)
+#include <stdarg.h>
+#include "pdump_km.h"
+#endif
+
+/* Size of temporary buffers used to read and write AppHint data.
+ * Must be large enough to contain any strings read or written but no larger
+ * than 4096: which is the buffer size for the kernel_param_ops .get
+ * function. And less than 1024 to keep the stack frame size within bounds.
+ */
+#define APPHINT_BUFFER_SIZE 512
+
+#define APPHINT_DEVICES_MAX 16
+
+/*
+*******************************************************************************
+ * AppHint mnemonic data type helper tables
+******************************************************************************/
+struct apphint_lookup {
+	const char *name;
+	int value;
+};
+
+static const struct apphint_lookup fwt_logtype_tbl[] = {
+	{ "trace", 2},
+	{ "tbi", 1},
+	{ "none", 0}
+};
+
+static const struct apphint_lookup fwt_loggroup_tbl[] = {
+	RGXFWIF_LOG_GROUP_NAME_VALUE_MAP
+};
+
+static const struct apphint_lookup htb_loggroup_tbl[] = {
+#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) },
+	HTB_LOG_SFGROUPLIST
+#undef X
+};
+
+static const struct apphint_lookup htb_opmode_tbl[] = {
+	{ "droplatest", HTB_OPMODE_DROPLATEST},
+	{ "dropoldest", HTB_OPMODE_DROPOLDEST},
+	{ "block", HTB_OPMODE_BLOCK}
+};
+
+__maybe_unused
+static const struct apphint_lookup htb_logmode_tbl[] = {
+	{ "all", HTB_LOGMODE_ALLPID},
+	{ "restricted", HTB_LOGMODE_RESTRICTEDPID}
+};
+
+static const struct apphint_lookup timecorr_clk_tbl[] = {
+	{ "mono", 0 },
+	{ "mono_raw", 1 },
+	{ "sched", 2 }
+};
+
+/*
+*******************************************************************************
+ Data types
+******************************************************************************/
+union apphint_value {
+	IMG_UINT64 UINT64;
+	IMG_UINT32 UINT32;
+	IMG_BOOL BOOL;
+	IMG_CHAR *STRING;
+};
+
+struct apphint_action {
+	union {
+		PVRSRV_ERROR (*UINT64)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value);
+		PVRSRV_ERROR (*UINT32)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value);
+		PVRSRV_ERROR (*BOOL)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value);
+		PVRSRV_ERROR (*STRING)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value);
+	} query;
+	union {
+		PVRSRV_ERROR (*UINT64)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value);
+		PVRSRV_ERROR (*UINT32)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value);
+		PVRSRV_ERROR (*BOOL)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value);
+		PVRSRV_ERROR (*STRING)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value);
+	} set;
+	const PVRSRV_DEVICE_NODE *device;
+	const void *private_data;
+	union apphint_value stored;
+	bool free;
+};
+
+struct apphint_param {
+	IMG_UINT32 id;
+	APPHINT_DATA_TYPE data_type;
+	const void *data_type_helper;
+	IMG_UINT32 helper_size;
+};
+
+struct apphint_init_data {
+	IMG_UINT32 id;			/* index into AppHint Table */
+	APPHINT_CLASS class;
+	const IMG_CHAR *name;
+	union apphint_value default_value;
+};
+
+struct apphint_class_state {
+	APPHINT_CLASS class;
+	IMG_BOOL enabled;
+};
+
+struct apphint_work {
+	struct work_struct work;
+	union apphint_value new_value;
+	struct apphint_action *action;
+};
+
+/*
+*******************************************************************************
+ Initialization / configuration table data
+******************************************************************************/
+#define UINT32Bitfield UINT32
+#define UINT32List UINT32
+
+static const struct apphint_init_data init_data_buildvar[] = {
+#define X(a, b, c, d, e) \
+	{APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+	APPHINT_LIST_BUILDVAR
+#undef X
+};
+
+static const struct apphint_init_data init_data_modparam[] = {
+#define X(a, b, c, d, e) \
+	{APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+	APPHINT_LIST_MODPARAM
+#undef X
+};
+
+static const struct apphint_init_data init_data_debugfs[] = {
+#define X(a, b, c, d, e) \
+	{APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+	APPHINT_LIST_DEBUGFS
+#undef X
+};
+
+static const struct apphint_init_data init_data_debugfs_device[] = {
+#define X(a, b, c, d, e) \
+	{APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+	APPHINT_LIST_DEBUGFS_DEVICE
+#undef X
+};
+
+#undef UINT32Bitfield
+#undef UINT32List
+
+__maybe_unused static const char NO_PARAM_TABLE[] = {};
+
+static const struct apphint_param param_lookup[] = {
+#define X(a, b, c, d, e) \
+	{APPHINT_ID_ ## a, APPHINT_DATA_TYPE_ ## b, e, ARRAY_SIZE(e) },
+	APPHINT_LIST_ALL
+#undef X
+};
+
+static const struct apphint_class_state class_state[] = {
+#define X(a) {APPHINT_CLASS_ ## a, APPHINT_ENABLED_CLASS_ ## a},
+	APPHINT_CLASS_LIST
+#undef X
+};
+
+/*
+*******************************************************************************
+ Global state
+******************************************************************************/
+/* If the union apphint_value becomes such that it is not possible to read
+ * and write atomically, a mutex may be desirable to prevent a read returning
+ * a partially written state.
+ * This would require a statically initialized mutex outside of the
+ * struct apphint_state to prevent use of an uninitialized mutex when
+ * module_params are provided on the command line.
+ *     static DEFINE_MUTEX(apphint_mutex);
+ */
+static struct apphint_state
+{
+	struct workqueue_struct *workqueue;
+	PPVR_DEBUGFS_DIR_DATA debugfs_device_rootdir[APPHINT_DEVICES_MAX];
+	PPVR_DEBUGFS_ENTRY_DATA debugfs_device_entry[APPHINT_DEVICES_MAX][APPHINT_DEBUGFS_DEVICE_ID_MAX];
+	PPVR_DEBUGFS_DIR_DATA debugfs_rootdir;
+	PPVR_DEBUGFS_ENTRY_DATA debugfs_entry[APPHINT_DEBUGFS_ID_MAX];
+	PPVR_DEBUGFS_DIR_DATA buildvar_rootdir;
+	PPVR_DEBUGFS_ENTRY_DATA buildvar_entry[APPHINT_BUILDVAR_ID_MAX];
+
+	int num_devices;
+	PVRSRV_DEVICE_NODE *devices[APPHINT_DEVICES_MAX];
+	int initialized;
+
+	/* Array contains value space for 1 copy of all apphint values defined
+	 * (for device 1) and N copies of device specific apphint values for
+	 * multi-device platforms.
+	 */
+	struct apphint_action val[APPHINT_ID_MAX + ((APPHINT_DEVICES_MAX-1)*APPHINT_DEBUGFS_DEVICE_ID_MAX)];
+
+} apphint = {
+/* statically initialise default values to ensure that any module_params
+ * provided on the command line are not overwritten by defaults.
+ */
+	.val = {
+#define UINT32Bitfield UINT32
+#define UINT32List UINT32
+#define X(a, b, c, d, e) \
+	{ {NULL}, {NULL}, NULL, NULL, {.b=d}, false },
+	APPHINT_LIST_ALL
+#undef X
+#undef UINT32Bitfield
+#undef UINT32List
+	},
+	.initialized = 0,
+	.num_devices = 0
+};
+
+#define APPHINT_DEBUGFS_DEVICE_ID_OFFSET (APPHINT_ID_MAX-APPHINT_DEBUGFS_DEVICE_ID_MAX)
+
+static inline void
+get_apphint_id_from_action_addr(const struct apphint_action * const addr,
+                                APPHINT_ID * const id)
+{
+	*id = (APPHINT_ID)(addr - apphint.val);
+	if (*id >= APPHINT_ID_MAX) {
+		*id -= APPHINT_DEBUGFS_DEVICE_ID_OFFSET;
+		*id %= APPHINT_DEBUGFS_DEVICE_ID_MAX;
+		*id += APPHINT_DEBUGFS_DEVICE_ID_OFFSET;
+	}
+}
+
+static inline void
+get_value_offset_from_device(const PVRSRV_DEVICE_NODE * const device,
+                             int * const offset)
+{
+	int i;
+
+	/* No device offset if not a device specific apphint */
+	if (APPHINT_OF_DRIVER_NO_DEVICE == device) {
+		*offset = 0;
+		return;
+	}
+
+	for (i = 0; device && i < APPHINT_DEVICES_MAX; i++) {
+		if (apphint.devices[i] == device)
+			break;
+	}
+	if (APPHINT_DEVICES_MAX == i) {
+		PVR_DPF((PVR_DBG_WARNING, "%s: Unregistered device", __func__));
+		i = 0;
+	}
+	*offset = i * APPHINT_DEBUGFS_DEVICE_ID_MAX;
+}
+
+/**
+ * apphint_action_worker - perform an action after an AppHint update has been
+ *                    requested by a UM process
+ *                    And update the record of the current active value
+ */
+static void apphint_action_worker(struct work_struct *work)
+{
+	struct apphint_work *work_pkt = container_of(work,
+	                                             struct apphint_work,
+	                                             work);
+	struct apphint_action *a = work_pkt->action;
+	union apphint_value value = work_pkt->new_value;
+	APPHINT_ID id;
+	PVRSRV_ERROR result = PVRSRV_OK;
+
+	get_apphint_id_from_action_addr(a, &id);
+
+	if (a->set.UINT64) {
+		switch (param_lookup[id].data_type) {
+		case APPHINT_DATA_TYPE_UINT64:
+			result = a->set.UINT64(a->device,
+			                       a->private_data,
+			                       value.UINT64);
+			break;
+
+		case APPHINT_DATA_TYPE_UINT32:
+		case APPHINT_DATA_TYPE_UINT32Bitfield:
+		case APPHINT_DATA_TYPE_UINT32List:
+			result = a->set.UINT32(a->device,
+			                       a->private_data,
+			                       value.UINT32);
+			break;
+
+		case APPHINT_DATA_TYPE_BOOL:
+			result = a->set.BOOL(a->device,
+			                     a->private_data,
+			                     value.BOOL);
+			break;
+
+		case APPHINT_DATA_TYPE_STRING:
+			result = a->set.STRING(a->device,
+								   a->private_data,
+								   value.STRING);
+			kfree(value.STRING);
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: unrecognised data type (%d), index (%d)",
+			         __func__, param_lookup[id].data_type, id));
+		}
+
+		if (PVRSRV_OK != result) {
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: failed (%s)",
+			         __func__, PVRSRVGetErrorString(result)));
+		}
+	} else {
+		if (a->free) {
+			kfree(a->stored.STRING);
+		}
+		a->stored = value;
+		if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) {
+			a->free = true;
+		}
+		PVR_DPF((PVR_DBG_MESSAGE,
+		         "%s: AppHint value updated before handler is registered, ID(%d)",
+		         __func__, id));
+	}
+	kfree((void *)work_pkt);
+}
+
+static void apphint_action(union apphint_value new_value,
+                           struct apphint_action *action)
+{
+	struct apphint_work *work_pkt = kmalloc(sizeof(*work_pkt), GFP_KERNEL);
+
+	/* queue apphint update on a serialized workqueue to avoid races */
+	if (work_pkt) {
+		work_pkt->new_value = new_value;
+		work_pkt->action = action;
+		INIT_WORK(&work_pkt->work, apphint_action_worker);
+		if (0 == queue_work(apphint.workqueue, &work_pkt->work)) {
+			PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed to queue apphint change request",
+				__func__));
+			goto err_exit;
+		}
+	} else {
+		PVR_DPF((PVR_DBG_ERROR,
+			"%s: failed to alloc memory for apphint change request",
+			__func__));
+		goto err_exit;
+	}
+	return;
+err_exit:
+	kfree(new_value.STRING);
+}
+
+/**
+ * apphint_read - read the different AppHint data types
+ * return -errno or the buffer size
+ */
+static int apphint_read(char *buffer, size_t count, APPHINT_ID ue,
+			 union apphint_value *value)
+{
+	APPHINT_DATA_TYPE data_type = param_lookup[ue].data_type;
+	int result = 0;
+
+	switch (data_type) {
+	case APPHINT_DATA_TYPE_UINT64:
+		if (kstrtou64(buffer, 0, &value->UINT64) < 0) {
+			PVR_DPF((PVR_DBG_ERROR,
+				"%s: Invalid UINT64 input data for id %d: %s",
+				__func__, ue, buffer));
+			result = -EINVAL;
+			goto err_exit;
+		}
+		break;
+	case APPHINT_DATA_TYPE_UINT32:
+		if (kstrtou32(buffer, 0, &value->UINT32) < 0) {
+			PVR_DPF((PVR_DBG_ERROR,
+				"%s: Invalid UINT32 input data for id %d: %s",
+				__func__, ue, buffer));
+			result = -EINVAL;
+			goto err_exit;
+		}
+		break;
+	case APPHINT_DATA_TYPE_BOOL:
+		switch (buffer[0]) {
+		case '0':
+		case 'n':
+		case 'N':
+		case 'f':
+		case 'F':
+			value->BOOL = IMG_FALSE;
+			break;
+		case '1':
+		case 'y':
+		case 'Y':
+		case 't':
+		case 'T':
+			value->BOOL = IMG_TRUE;
+			break;
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+				"%s: Invalid BOOL input data for id %d: %s",
+				__func__, ue, buffer));
+			result = -EINVAL;
+			goto err_exit;
+		}
+		break;
+	case APPHINT_DATA_TYPE_UINT32List:
+	{
+		int i;
+		struct apphint_lookup *lookup =
+			(struct apphint_lookup *)
+			param_lookup[ue].data_type_helper;
+		int size = param_lookup[ue].helper_size;
+		/* buffer may include '\n', remove it */
+		char *arg = strsep(&buffer, "\n");
+
+		if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) {
+			result = -EINVAL;
+			goto err_exit;
+		}
+
+		for (i = 0; i < size; i++) {
+			if (strcasecmp(lookup[i].name, arg) == 0) {
+				value->UINT32 = lookup[i].value;
+				break;
+			}
+		}
+		if (i == size) {
+			if (strlen(arg) == 0) {
+				PVR_DPF((PVR_DBG_ERROR,
+					"%s: No value set for AppHint",
+					__func__));
+			} else {
+				PVR_DPF((PVR_DBG_ERROR,
+					"%s: Unrecognised AppHint value (%s)",
+					__func__, arg));
+			}
+			result = -EINVAL;
+		}
+		break;
+	}
+	case APPHINT_DATA_TYPE_UINT32Bitfield:
+	{
+		int i;
+		struct apphint_lookup *lookup =
+			(struct apphint_lookup *)
+			param_lookup[ue].data_type_helper;
+		int size = param_lookup[ue].helper_size;
+		/* buffer may include '\n', remove it */
+		char *string = strsep(&buffer, "\n");
+		char *token = strsep(&string, ",");
+
+		if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) {
+			result = -EINVAL;
+			goto err_exit;
+		}
+
+		value->UINT32 = 0;
+		/* empty string is valid to clear the bitfield */
+		while (token && *token) {
+			for (i = 0; i < size; i++) {
+				if (strcasecmp(lookup[i].name, token) == 0) {
+					value->UINT32 |= lookup[i].value;
+					break;
+				}
+			}
+			if (i == size) {
+				PVR_DPF((PVR_DBG_ERROR,
+					"%s: Unrecognised AppHint value (%s)",
+					__func__, token));
+				result = -EINVAL;
+				goto err_exit;
+			}
+			token = strsep(&string, ",");
+		}
+		break;
+	}
+	case APPHINT_DATA_TYPE_STRING:
+	{
+		/* buffer may include '\n', remove it */
+		char *string = strsep(&buffer, "\n");
+		size_t len = strlen(string);
+
+		if (!len) {
+			result = -EINVAL;
+			goto err_exit;
+		}
+
+		++len;
+
+		value->STRING = kmalloc(len, GFP_KERNEL);
+		if (!value->STRING) {
+			result = -ENOMEM;
+			goto err_exit;
+		}
+
+		strlcpy(value->STRING, string, len);
+		break;
+	}
+	default:
+		result = -EINVAL;
+		goto err_exit;
+	}
+
+err_exit:
+	return (result < 0) ? result : count;
+}
+
+static PVRSRV_ERROR get_apphint_value_from_action(const struct apphint_action * const action,
+												  union apphint_value * const value)
+{
+	APPHINT_ID id;
+	APPHINT_DATA_TYPE data_type;
+	PVRSRV_ERROR result = PVRSRV_OK;
+
+	get_apphint_id_from_action_addr(action, &id);
+	data_type = param_lookup[id].data_type;
+
+	if (action->query.UINT64) {
+		switch (data_type) {
+		case APPHINT_DATA_TYPE_UINT64:
+			result = action->query.UINT64(action->device,
+										  action->private_data,
+										  &value->UINT64);
+			break;
+
+		case APPHINT_DATA_TYPE_UINT32:
+		case APPHINT_DATA_TYPE_UINT32Bitfield:
+		case APPHINT_DATA_TYPE_UINT32List:
+			result = action->query.UINT32(action->device,
+										  action->private_data,
+										  &value->UINT32);
+			break;
+
+		case APPHINT_DATA_TYPE_BOOL:
+			result = action->query.BOOL(action->device,
+										action->private_data,
+										&value->BOOL);
+			break;
+
+		case APPHINT_DATA_TYPE_STRING:
+			result = action->query.STRING(action->device,
+										  action->private_data,
+										  &value->STRING);
+			break;
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: unrecognised data type (%d), index (%d)",
+			         __func__, data_type, id));
+		}
+	} else {
+		*value = action->stored;
+	}
+
+	if (PVRSRV_OK != result) {
+		PVR_DPF((PVR_DBG_ERROR, "%s: failed (%d), index (%d)", __func__, result, id));
+	}
+
+	return result;
+}
+
+/**
+ * apphint_write - write the current AppHint data to a buffer
+ *
+ * Returns length written or -errno
+ */
+static int apphint_write(char *buffer, const size_t size,
+                         const struct apphint_action *a)
+{
+	const struct apphint_param *hint;
+	int result = 0;
+	APPHINT_ID id;
+	union apphint_value value;
+
+	get_apphint_id_from_action_addr(a, &id);
+	hint = &param_lookup[id];
+
+	result = get_apphint_value_from_action(a, &value);
+
+	switch (hint->data_type) {
+	case APPHINT_DATA_TYPE_UINT64:
+		result += snprintf(buffer + result, size - result,
+				"0x%016llx",
+				value.UINT64);
+		break;
+	case APPHINT_DATA_TYPE_UINT32:
+		result += snprintf(buffer + result, size - result,
+				"0x%08x",
+				value.UINT32);
+		break;
+	case APPHINT_DATA_TYPE_BOOL:
+		result += snprintf(buffer + result, size - result,
+			"%s",
+			value.BOOL ? "Y" : "N");
+		break;
+	case APPHINT_DATA_TYPE_STRING:
+		if (value.STRING) {
+			result += snprintf(buffer + result, size - result,
+				"%s",
+				*value.STRING ? value.STRING : "(none)");
+		} else {
+			result += snprintf(buffer + result, size - result,
+			"(none)");
+		}
+		break;
+	case APPHINT_DATA_TYPE_UINT32List:
+	{
+		struct apphint_lookup *lookup =
+			(struct apphint_lookup *) hint->data_type_helper;
+		IMG_UINT32 i;
+
+		if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) {
+			result = -EINVAL;
+			goto err_exit;
+		}
+
+		for (i = 0; i < hint->helper_size; i++) {
+			if (lookup[i].value == value.UINT32) {
+				result += snprintf(buffer + result,
+						size - result,
+						"%s",
+						lookup[i].name);
+				break;
+			}
+		}
+		break;
+	}
+	case APPHINT_DATA_TYPE_UINT32Bitfield:
+	{
+		struct apphint_lookup *lookup =
+			(struct apphint_lookup *) hint->data_type_helper;
+		IMG_UINT32 i;
+
+		if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) {
+			result = -EINVAL;
+			goto err_exit;
+		}
+
+		for (i = 0; i < hint->helper_size; i++) {
+			if (lookup[i].value & value.UINT32) {
+				result += snprintf(buffer + result,
+						size - result,
+						"%s,",
+						lookup[i].name);
+			}
+		}
+		if (result) {
+			/* remove any trailing ',' */
+			--result;
+			*(buffer + result) = '\0';
+		} else {
+			result += snprintf(buffer + result,
+					size - result, "none");
+		}
+		break;
+	}
+	default:
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: unrecognised data type (%d), index (%d)",
+			 __func__, hint->data_type, id));
+		result = -EINVAL;
+	}
+
+err_exit:
+	return result;
+}
+
+/*
+*******************************************************************************
+ Module parameters initialization - different from debugfs
+******************************************************************************/
+/**
+ * apphint_kparam_set - Handle an update of a module parameter
+ *
+ * Returns 0, or -errno.  arg is in kp->arg.
+ */
+static int apphint_kparam_set(const char *val, const struct kernel_param *kp)
+{
+	char val_copy[APPHINT_BUFFER_SIZE];
+	APPHINT_ID id;
+	union apphint_value value;
+	int result;
+
+	/* need to discard const in case of string comparison */
+	result = strlcpy(val_copy, val, APPHINT_BUFFER_SIZE);
+
+	get_apphint_id_from_action_addr(kp->arg, &id);
+	if (result < APPHINT_BUFFER_SIZE) {
+		result = apphint_read(val_copy, result, id, &value);
+		if (result >= 0) {
+			((struct apphint_action *)kp->arg)->stored = value;
+			if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) {
+				((struct apphint_action *)kp->arg)->free = true;
+			}
+		}
+	} else {
+		PVR_DPF((PVR_DBG_ERROR, "%s: String too long", __func__));
+	}
+	return (result > 0) ? 0 : result;
+}
+
+/**
+ * apphint_kparam_get - handle a read of a module parameter
+ *
+ * Returns length written or -errno.  Buffer is 4k (ie. be short!)
+ */
+static int apphint_kparam_get(char *buffer, const struct kernel_param *kp)
+{
+	return apphint_write(buffer, PAGE_SIZE, kp->arg);
+}
+
+__maybe_unused
+static const struct kernel_param_ops apphint_kparam_fops = {
+	.set = apphint_kparam_set,
+	.get = apphint_kparam_get,
+};
+
+/*
+ * call module_param_cb() for all AppHints listed in APPHINT_LIST_MODPARAM
+ * apphint_modparam_class_ ## resolves to apphint_modparam_enable() except for
+ * AppHint classes that have been disabled.
+ */
+
+#define apphint_modparam_enable(name, number, perm) \
+	module_param_cb(name, &apphint_kparam_fops, &apphint.val[number], perm);
+
+#define X(a, b, c, d, e) \
+	apphint_modparam_class_ ##c(a, APPHINT_ID_ ## a, 0444)
+	APPHINT_LIST_MODPARAM
+#undef X
+
+/*
+*******************************************************************************
+ Debugfs get (seq file) operations - supporting functions
+******************************************************************************/
+static void *apphint_seq_start(struct seq_file *s, loff_t *pos)
+{
+	if (*pos == 0) {
+		/* We want only one entry in the sequence, one call to show() */
+		return (void *) 1;
+	}
+
+	PVR_UNREFERENCED_PARAMETER(s);
+
+	return NULL;
+}
+
+static void apphint_seq_stop(struct seq_file *s, void *v)
+{
+	PVR_UNREFERENCED_PARAMETER(s);
+	PVR_UNREFERENCED_PARAMETER(v);
+}
+
+static void *apphint_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	PVR_UNREFERENCED_PARAMETER(s);
+	PVR_UNREFERENCED_PARAMETER(v);
+	PVR_UNREFERENCED_PARAMETER(pos);
+	return NULL;
+}
+
+static int apphint_seq_show(struct seq_file *s, void *v)
+{
+	IMG_CHAR km_buffer[APPHINT_BUFFER_SIZE];
+	int result;
+
+	PVR_UNREFERENCED_PARAMETER(v);
+
+	result = apphint_write(km_buffer, APPHINT_BUFFER_SIZE, s->private);
+	if (result < 0) {
+		PVR_DPF((PVR_DBG_ERROR, "%s: failure", __func__));
+	} else {
+		/* debugfs requires a trailing \n, module_params don't */
+		result += snprintf(km_buffer + result,
+				APPHINT_BUFFER_SIZE - result,
+				"\n");
+		seq_puts(s, km_buffer);
+	}
+
+	/* have to return 0 to see output */
+	return (result < 0) ? result : 0;
+}
+
+static const struct seq_operations apphint_seq_fops = {
+	.start = apphint_seq_start,
+	.stop  = apphint_seq_stop,
+	.next  = apphint_seq_next,
+	.show  = apphint_seq_show,
+};
+
+/*
+*******************************************************************************
+ Debugfs supporting functions
+******************************************************************************/
+/**
+ * apphint_set - Handle a debugfs value update
+ */
+static ssize_t apphint_set(const char __user *buffer,
+			    size_t count,
+			    loff_t *ppos,
+			    void *data)
+{
+	APPHINT_ID id;
+	union apphint_value value;
+	struct apphint_action *action = data;
+	char km_buffer[APPHINT_BUFFER_SIZE];
+	int result = 0;
+
+	if (ppos == NULL)
+		return -EIO;
+
+	if (count >= APPHINT_BUFFER_SIZE) {
+		PVR_DPF((PVR_DBG_ERROR, "%s: String too long (%zd)",
+			__func__, count));
+		result = -EINVAL;
+		goto err_exit;
+	}
+
+	if (pvr_copy_from_user(km_buffer, buffer, count)) {
+		PVR_DPF((PVR_DBG_ERROR, "%s: Copy of user data failed",
+			__func__));
+		result = -EFAULT;
+		goto err_exit;
+	}
+	km_buffer[count] = '\0';
+
+	get_apphint_id_from_action_addr(action, &id);
+	result = apphint_read(km_buffer, count, id, &value);
+	if (result >= 0)
+		apphint_action(value, action);
+
+	*ppos += count;
+err_exit:
+	return result;
+}
+
+/**
+ * apphint_debugfs_init - Create the specified debugfs entries
+ */
+static int apphint_debugfs_init(const char *sub_dir,
+		int device_num,
+		unsigned init_data_size,
+		const struct apphint_init_data *init_data,
+		PPVR_DEBUGFS_DIR_DATA parentdir,
+		PPVR_DEBUGFS_DIR_DATA *rootdir, PPVR_DEBUGFS_ENTRY_DATA *entry)
+{
+	int result = 0;
+	unsigned i;
+	int device_value_offset = device_num * APPHINT_DEBUGFS_DEVICE_ID_MAX;
+
+	if (*rootdir) {
+		PVR_DPF((PVR_DBG_WARNING,
+			"AppHint DebugFS already created, skipping"));
+		result = -EEXIST;
+		goto err_exit;
+	}
+
+	result = PVRDebugFSCreateEntryDir(sub_dir, parentdir,
+					  rootdir);
+	if (result < 0) {
+		PVR_DPF((PVR_DBG_WARNING,
+			"Failed to create \"%s\" DebugFS directory.", sub_dir));
+		goto err_exit;
+	}
+
+	for (i = 0; i < init_data_size; i++) {
+		if (!class_state[init_data[i].class].enabled)
+			continue;
+
+		result = PVRDebugFSCreateFile(init_data[i].name,
+				*rootdir,
+				&apphint_seq_fops,
+				apphint_set,
+				NULL,
+				(void *) &apphint.val[init_data[i].id + device_value_offset],
+				&entry[i]);
+		if (result < 0) {
+			PVR_DPF((PVR_DBG_WARNING,
+				"Failed to create \"%s/%s\" DebugFS entry.",
+				sub_dir, init_data[i].name));
+		}
+	}
+
+err_exit:
+	return result;
+}
+
+/**
+ * apphint_debugfs_deinit- destroy the debugfs entries
+ */
+static void apphint_debugfs_deinit(unsigned num_entries,
+		PPVR_DEBUGFS_DIR_DATA *rootdir, PPVR_DEBUGFS_ENTRY_DATA *entry)
+{
+	unsigned i;
+
+	for (i = 0; i < num_entries; i++) {
+		if (entry[i]) {
+			PVRDebugFSRemoveFile(&entry[i]);
+		}
+	}
+
+	if (*rootdir) {
+		PVRDebugFSRemoveEntryDir(rootdir);
+		*rootdir = NULL;
+	}
+}
+
+/*
+*******************************************************************************
+ AppHint status dump implementation
+******************************************************************************/
+#if defined(PDUMP)
+static void apphint_pdump_values(void *flags, const IMG_CHAR *format, ...)
+{
+	char km_buffer[APPHINT_BUFFER_SIZE];
+	IMG_UINT32 ui32Flags = *(IMG_UINT32 *)flags;
+	va_list ap;
+
+	va_start(ap, format);
+	(void)vsnprintf(km_buffer, APPHINT_BUFFER_SIZE, format, ap);
+	va_end(ap);
+
+	PDumpCommentKM(km_buffer, ui32Flags);
+}
+#endif
+
+static IMG_BOOL is_apphint_value_equal(const APPHINT_DATA_TYPE data_type,
+									const union apphint_value * const left,
+									const union apphint_value * const right)
+{
+		switch (data_type) {
+		case APPHINT_DATA_TYPE_UINT64:
+			return left->UINT64 == right->UINT64;
+		case APPHINT_DATA_TYPE_UINT32:
+		case APPHINT_DATA_TYPE_UINT32List:
+		case APPHINT_DATA_TYPE_UINT32Bitfield:
+			return left->UINT32 == right->UINT32;
+		case APPHINT_DATA_TYPE_BOOL:
+			return left->BOOL == right->BOOL;
+		case APPHINT_DATA_TYPE_STRING:
+			return (strcmp(left->STRING, right->STRING) == 0 ? IMG_TRUE : IMG_FALSE);
+		default:
+			PVR_DPF((PVR_DBG_WARNING, "%s: unhandled data type (%d)", __func__, data_type));
+			return IMG_FALSE;
+		}
+}
+
+static void apphint_dump_values(const char *group_name,
+			int device_num,
+			const struct apphint_init_data *group_data,
+			int group_size,
+			DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+			void *pvDumpDebugFile,
+			bool list_all)
+{
+	int i, result;
+	int device_value_offset = device_num * APPHINT_DEBUGFS_DEVICE_ID_MAX;
+	char km_buffer[APPHINT_BUFFER_SIZE];
+	char count = 0;
+
+	PVR_DUMPDEBUG_LOG("  %s", group_name);
+	for (i = 0; i < group_size; i++)
+	{
+		IMG_UINT32 id = group_data[i].id;
+		APPHINT_DATA_TYPE data_type = param_lookup[id].data_type;
+		const struct apphint_action *action = &apphint.val[id + device_value_offset];
+		union apphint_value value;
+
+		result = get_apphint_value_from_action(action, &value);
+
+		if (PVRSRV_OK != result) {
+			continue;
+		}
+
+		/* List only apphints with non-default values */
+		if (!list_all &&
+			is_apphint_value_equal(data_type, &value, &group_data[i].default_value)) {
+			continue;
+		}
+
+		result = apphint_write(km_buffer, APPHINT_BUFFER_SIZE, action);
+		count++;
+
+		if (result <= 0) {
+			PVR_DUMPDEBUG_LOG("    %s: <Error>",
+				group_data[i].name);
+		} else {
+			PVR_DUMPDEBUG_LOG("    %s: %s",
+				group_data[i].name, km_buffer);
+		}
+	}
+
+	if (count == 0) {
+		PVR_DUMPDEBUG_LOG("    none");
+	}
+}
+
+/**
+ * Callback for debug dump
+ */
+static void apphint_dump_state(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+			IMG_UINT32 ui32VerbLevel,
+			DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+			void *pvDumpDebugFile)
+{
+	int i, result;
+	char km_buffer[APPHINT_BUFFER_SIZE];
+	PVRSRV_DEVICE_NODE *device = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+
+	if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) {
+		PVR_DUMPDEBUG_LOG("------[ AppHint Settings ]------");
+
+		apphint_dump_values("Build Vars", 0,
+			init_data_buildvar, ARRAY_SIZE(init_data_buildvar),
+			pfnDumpDebugPrintf, pvDumpDebugFile, true);
+
+		apphint_dump_values("Module Params", 0,
+			init_data_modparam, ARRAY_SIZE(init_data_modparam),
+			pfnDumpDebugPrintf, pvDumpDebugFile, false);
+
+		apphint_dump_values("Debugfs Params", 0,
+			init_data_debugfs, ARRAY_SIZE(init_data_debugfs),
+			pfnDumpDebugPrintf, pvDumpDebugFile, false);
+
+		for (i = 0; i < APPHINT_DEVICES_MAX; i++) {
+			if (!apphint.devices[i]
+			    || (device && device != apphint.devices[i]))
+				continue;
+
+			result = snprintf(km_buffer,
+					  APPHINT_BUFFER_SIZE,
+					  "Debugfs Params Device ID: %d",
+					  i);
+			if (0 > result)
+				continue;
+
+			apphint_dump_values(km_buffer, i,
+					    init_data_debugfs_device,
+					    ARRAY_SIZE(init_data_debugfs_device),
+					    pfnDumpDebugPrintf,
+					    pvDumpDebugFile,
+						false);
+		}
+	}
+}
+
+/*
+*******************************************************************************
+ Public interface
+******************************************************************************/
+int pvr_apphint_init(void)
+{
+	int result, i;
+
+	if (apphint.initialized) {
+		result = -EEXIST;
+		goto err_out;
+	}
+
+	for (i = 0; i < APPHINT_DEVICES_MAX; i++)
+		apphint.devices[i] = NULL;
+
+	/* create workqueue with strict execution ordering to ensure no
+	 * race conditions when setting/updating apphints from different
+	 * contexts
+	 */
+	apphint.workqueue = alloc_workqueue("apphint_workqueue",
+	                                    WQ_UNBOUND | WQ_FREEZABLE, 1);
+	if (!apphint.workqueue) {
+		result = -ENOMEM;
+		goto err_out;
+	}
+
+	result = apphint_debugfs_init("apphint", 0,
+		ARRAY_SIZE(init_data_debugfs), init_data_debugfs,
+		NULL,
+		&apphint.debugfs_rootdir, apphint.debugfs_entry);
+	if (0 != result)
+		goto err_out;
+
+	result = apphint_debugfs_init("buildvar", 0,
+		ARRAY_SIZE(init_data_buildvar), init_data_buildvar,
+		NULL,
+		&apphint.buildvar_rootdir, apphint.buildvar_entry);
+
+	apphint.initialized = 1;
+
+err_out:
+	return result;
+}
+
+int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device)
+{
+	int result, i;
+	char device_num[APPHINT_BUFFER_SIZE];
+	int device_value_offset;
+
+	if (!apphint.initialized) {
+		result = -EAGAIN;
+		goto err_out;
+	}
+
+	if (apphint.num_devices+1 >= APPHINT_DEVICES_MAX) {
+		result = -EMFILE;
+		goto err_out;
+	}
+
+	result = snprintf(device_num, APPHINT_BUFFER_SIZE, "%d", apphint.num_devices);
+	if (result < 0) {
+		PVR_DPF((PVR_DBG_WARNING,
+			"snprintf failed (%d)", result));
+		result = -EINVAL;
+		goto err_out;
+	}
+
+	/* Set the default values for the new device */
+	device_value_offset = apphint.num_devices * APPHINT_DEBUGFS_DEVICE_ID_MAX;
+	for (i = 0; i < APPHINT_DEBUGFS_DEVICE_ID_MAX; i++) {
+		apphint.val[init_data_debugfs_device[i].id + device_value_offset].stored
+			= init_data_debugfs_device[i].default_value;
+	}
+
+	result = apphint_debugfs_init(device_num, apphint.num_devices,
+	                              ARRAY_SIZE(init_data_debugfs_device),
+	                              init_data_debugfs_device,
+	                              apphint.debugfs_rootdir,
+	                              &apphint.debugfs_device_rootdir[apphint.num_devices],
+	                              apphint.debugfs_device_entry[apphint.num_devices]);
+	if (0 != result)
+		goto err_out;
+
+	apphint.devices[apphint.num_devices] = device;
+	apphint.num_devices++;
+
+	(void)SOPvrDbgRequestNotifyRegister(
+			&device->hAppHintDbgReqNotify,
+			device,
+			apphint_dump_state,
+			DEBUG_REQUEST_APPHINT,
+			device);
+
+err_out:
+	return result;
+}
+
+void pvr_apphint_device_unregister(PVRSRV_DEVICE_NODE *device)
+{
+	int i;
+
+	if (!apphint.initialized)
+		return;
+
+	/* find the device */
+	for (i = 0; i < APPHINT_DEVICES_MAX; i++) {
+		if (apphint.devices[i] == device)
+			break;
+	}
+
+	if (APPHINT_DEVICES_MAX == i)
+		return;
+
+	if (device->hAppHintDbgReqNotify) {
+		(void)SOPvrDbgRequestNotifyUnregister(
+			device->hAppHintDbgReqNotify);
+		device->hAppHintDbgReqNotify = NULL;
+	}
+
+	apphint_debugfs_deinit(APPHINT_DEBUGFS_DEVICE_ID_MAX,
+	                       &apphint.debugfs_device_rootdir[i],
+	                       apphint.debugfs_device_entry[i]);
+
+	apphint.devices[i] = NULL;
+	apphint.num_devices--;
+}
+
+void pvr_apphint_deinit(void)
+{
+	int i;
+
+	if (!apphint.initialized)
+		return;
+
+	/* remove any remaining device data */
+	for (i = 0; apphint.num_devices && i < APPHINT_DEVICES_MAX; i++) {
+		if (apphint.devices[i])
+			pvr_apphint_device_unregister(apphint.devices[i]);
+	}
+
+	/* free all alloc'd string apphints and set to NULL */
+	for (i = 0; i < ARRAY_SIZE(apphint.val); i++) {
+		if (apphint.val[i].free && apphint.val[i].stored.STRING) {
+			kfree(apphint.val[i].stored.STRING);
+			apphint.val[i].stored.STRING = NULL;
+			apphint.val[i].free = false;
+		}
+	}
+
+	apphint_debugfs_deinit(APPHINT_DEBUGFS_ID_MAX,
+			&apphint.debugfs_rootdir, apphint.debugfs_entry);
+	apphint_debugfs_deinit(APPHINT_BUILDVAR_ID_MAX,
+			&apphint.buildvar_rootdir, apphint.buildvar_entry);
+
+	destroy_workqueue(apphint.workqueue);
+
+	apphint.initialized = 0;
+}
+
+void pvr_apphint_dump_state(void)
+{
+#if defined(PDUMP)
+	IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+
+	apphint_dump_state(NULL, DEBUG_REQUEST_VERBOSITY_HIGH,
+	                   apphint_pdump_values, (void *)&ui32Flags);
+#endif
+	apphint_dump_state(NULL, DEBUG_REQUEST_VERBOSITY_HIGH,
+	                   NULL, NULL);
+}
+
+int pvr_apphint_get_uint64(APPHINT_ID ue, IMG_UINT64 *pVal)
+{
+	int error = -ERANGE;
+
+	if (ue < APPHINT_ID_MAX) {
+		*pVal = apphint.val[ue].stored.UINT64;
+		error = 0;
+	}
+	return error;
+}
+
+int pvr_apphint_get_uint32(APPHINT_ID ue, IMG_UINT32 *pVal)
+{
+	int error = -ERANGE;
+
+	if (ue < APPHINT_ID_MAX) {
+		*pVal = apphint.val[ue].stored.UINT32;
+		error = 0;
+	}
+	return error;
+}
+
+int pvr_apphint_get_bool(APPHINT_ID ue, IMG_BOOL *pVal)
+{
+	int error = -ERANGE;
+
+	if (ue < APPHINT_ID_MAX) {
+		error = 0;
+		*pVal = apphint.val[ue].stored.BOOL;
+	}
+	return error;
+}
+
+int pvr_apphint_get_string(APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size)
+{
+	int error = -ERANGE;
+	if (ue < APPHINT_ID_MAX && apphint.val[ue].stored.STRING) {
+		if (strlcpy(pBuffer, apphint.val[ue].stored.STRING, size) < size) {
+			error = 0;
+		}
+	}
+	return error;
+}
+
+void pvr_apphint_register_handlers_uint64(APPHINT_ID id,
+	PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value),
+	PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value),
+	const PVRSRV_DEVICE_NODE *device,
+	const void *private_data)
+{
+	int device_value_offset;
+
+	if (id >= APPHINT_ID_MAX) {
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: AppHint ID (%d) is out of range, max (%d)",
+		         __func__, id, APPHINT_ID_MAX-1));
+		return;
+	}
+
+	get_value_offset_from_device(device, &device_value_offset);
+
+	switch (param_lookup[id].data_type) {
+	case APPHINT_DATA_TYPE_UINT64:
+		break;
+	default:
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Does not match AppHint data type for ID (%d)",
+		         __func__, id));
+		return;
+	}
+
+	apphint.val[id + device_value_offset] = (struct apphint_action){
+		.query.UINT64 = query,
+		.set.UINT64 = set,
+		.device = device,
+		.private_data = private_data,
+		.stored = apphint.val[id + device_value_offset].stored
+	};
+}
+
+void pvr_apphint_register_handlers_uint32(APPHINT_ID id,
+	PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value),
+	PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value),
+	const PVRSRV_DEVICE_NODE *device,
+	const void *private_data)
+{
+	int device_value_offset;
+
+	if (id >= APPHINT_ID_MAX) {
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: AppHint ID (%d) is out of range, max (%d)",
+		         __func__, id, APPHINT_ID_MAX-1));
+		return;
+	}
+
+	get_value_offset_from_device(device, &device_value_offset);
+
+	switch (param_lookup[id].data_type) {
+	case APPHINT_DATA_TYPE_UINT32:
+	case APPHINT_DATA_TYPE_UINT32Bitfield:
+	case APPHINT_DATA_TYPE_UINT32List:
+		break;
+
+	default:
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Does not match AppHint data type for ID (%d)",
+		         __func__, id));
+		return;
+	}
+
+	apphint.val[id + device_value_offset] = (struct apphint_action){
+		.query.UINT32 = query,
+		.set.UINT32 = set,
+		.device = device,
+		.private_data = private_data,
+		.stored = apphint.val[id + device_value_offset].stored
+	};
+}
+
+void pvr_apphint_register_handlers_bool(APPHINT_ID id,
+	PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value),
+	PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value),
+	const PVRSRV_DEVICE_NODE *device,
+	const void *private_data)
+{
+	int device_value_offset;
+
+	if (id >= APPHINT_ID_MAX) {
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: AppHint ID (%d) is out of range, max (%d)",
+		         __func__, id, APPHINT_ID_MAX-1));
+		return;
+	}
+
+	get_value_offset_from_device(device, &device_value_offset);
+
+	switch (param_lookup[id].data_type) {
+	case APPHINT_DATA_TYPE_BOOL:
+		break;
+
+	default:
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Does not match AppHint data type for ID (%d)",
+		         __func__, id));
+		return;
+	}
+
+	apphint.val[id + device_value_offset] = (struct apphint_action){
+		.query.BOOL = query,
+		.set.BOOL = set,
+		.device = device,
+		.private_data = private_data,
+		.stored = apphint.val[id + device_value_offset].stored
+	};
+}
+
+void pvr_apphint_register_handlers_string(APPHINT_ID id,
+	PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value),
+	PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value),
+	const PVRSRV_DEVICE_NODE *device,
+	const void *private_data)
+{
+	int device_value_offset;
+
+	if (id >= APPHINT_ID_MAX) {
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: AppHint ID (%d) is out of range, max (%d)",
+		         __func__, id, APPHINT_ID_MAX-1));
+		return;
+	}
+
+	get_value_offset_from_device(device, &device_value_offset);
+
+	switch (param_lookup[id].data_type) {
+	case APPHINT_DATA_TYPE_STRING:
+		break;
+
+	default:
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Does not match AppHint data type for ID (%d)",
+		         __func__, id));
+		return;
+	}
+
+	apphint.val[id + device_value_offset] = (struct apphint_action){
+		.query.STRING = query,
+		.set.STRING = set,
+		.device = device,
+		.private_data = private_data,
+		.stored = apphint.val[id + device_value_offset].stored
+	};
+}
+
+/* EOF */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km_apphint.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km_apphint.h
new file mode 100644
index 0000000..7f48ef8
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km_apphint.h
@@ -0,0 +1,95 @@
+/*************************************************************************/ /*!
+@File           km_apphint.h
+@Title          Apphint internal header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Linux kernel AppHint control
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __KM_APPHINT_H__
+#define __KM_APPHINT_H__
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "pvrsrv_apphint.h"
+#include "km_apphint_defs.h"
+#include "device.h"
+
+int pvr_apphint_init(void);
+void pvr_apphint_deinit(void);
+int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device);
+void pvr_apphint_device_unregister(PVRSRV_DEVICE_NODE *device);
+void pvr_apphint_dump_state(void);
+
+int pvr_apphint_get_uint64(APPHINT_ID ue, IMG_UINT64 *pVal);
+int pvr_apphint_get_uint32(APPHINT_ID ue, IMG_UINT32 *pVal);
+int pvr_apphint_get_bool(APPHINT_ID ue, IMG_BOOL *pVal);
+int pvr_apphint_get_string(APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size);
+
+void pvr_apphint_register_handlers_uint64(APPHINT_ID id,
+	PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value),
+	PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value),
+	const PVRSRV_DEVICE_NODE *device,
+	const void * private_data);
+void pvr_apphint_register_handlers_uint32(APPHINT_ID id,
+	PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value),
+	PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value),
+	const PVRSRV_DEVICE_NODE *device,
+	const void *private_data);
+void pvr_apphint_register_handlers_bool(APPHINT_ID id,
+	PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value),
+	PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value),
+	const PVRSRV_DEVICE_NODE *device,
+	const void *private_data);
+void pvr_apphint_register_handlers_string(APPHINT_ID id,
+	PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value),
+	PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value),
+	const PVRSRV_DEVICE_NODE *device,
+	const void *private_data);
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* __KM_APPHINT_H__ */
+
+/******************************************************************************
+ End of file (apphint.h)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km_apphint_defs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km_apphint_defs.h
new file mode 100644
index 0000000..ab06768
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/km_apphint_defs.h
@@ -0,0 +1,321 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services AppHint definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#ifndef __KM_APPHINT_DEFS_H__
+#define __KM_APPHINT_DEFS_H__
+
+/* NB: The 'DEVICE' AppHints must be last in this list as they will be
+ * duplicated in the case of a driver supporting multiple devices
+ */
+#define APPHINT_LIST_ALL \
+	APPHINT_LIST_BUILDVAR \
+	APPHINT_LIST_MODPARAM \
+	APPHINT_LIST_DEBUGFS \
+	APPHINT_LIST_DEBUGFS_DEVICE
+
+
+/*
+*******************************************************************************
+ Build variables
+ All of these should be configurable only through the 'default' value
+******************************************************************************/
+#define APPHINT_LIST_BUILDVAR \
+/* name,                            type,           class,       default,                                         helper,         */ \
+X(HWRDebugDumpLimit,                UINT32,         ALWAYS,      PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT,                NO_PARAM_TABLE   ) \
+X(EnableTrustedDeviceAceConfig,     BOOL,           GPUVIRT_VAL, PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG,     NO_PARAM_TABLE   ) \
+X(CleanupThreadPriority,            UINT32,         NEVER,       PVRSRV_APPHINT_CLEANUPTHREADPRIORITY,            NO_PARAM_TABLE   ) \
+X(WatchdogThreadPriority,           UINT32,         NEVER,       PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY,           NO_PARAM_TABLE   ) \
+X(HWPerfClientBufferSize,           UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE,           NO_PARAM_TABLE   ) \
+
+/*
+*******************************************************************************
+ Module parameters
+******************************************************************************/
+#define APPHINT_LIST_MODPARAM \
+/* name,                            type,           class,       default,                                         helper,         */ \
+X(EnableSignatureChecks,            BOOL,           PDUMP,       PVRSRV_APPHINT_ENABLESIGNATURECHECKS,            NO_PARAM_TABLE   ) \
+X(SignatureChecksBufSize,           UINT32,         PDUMP,       PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE,           NO_PARAM_TABLE   ) \
+\
+X(DisableClockGating,               BOOL,           ALWAYS,      PVRSRV_APPHINT_DISABLECLOCKGATING,               NO_PARAM_TABLE   ) \
+X(BIFTilingMode,                    UINT32,         VALIDATION,  PVRSRV_APPHINT_BIFTILINGMODE,                    NO_PARAM_TABLE   ) \
+X(DisableDMOverlap,                 BOOL,           ALWAYS,      PVRSRV_APPHINT_DISABLEDMOVERLAP,                 NO_PARAM_TABLE   ) \
+\
+X(EnableCDMKillingRandMode,         BOOL,           VALIDATION,  PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE,         NO_PARAM_TABLE   ) \
+X(EnableFWContextSwitch,            UINT32,         ALWAYS,      PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH,            NO_PARAM_TABLE   ) \
+X(VDMContextSwitchMode,             UINT32,         VALIDATION,  PVRSRV_APPHINT_VDMCONTEXTSWITCHMODE,             NO_PARAM_TABLE   ) \
+X(EnableRDPowerIsland,              UINT32,         ALWAYS,      PVRSRV_APPHINT_ENABLERDPOWERISLAND,              NO_PARAM_TABLE   ) \
+\
+X(GeneralNon4KHeapPageSize,         UINT32,         ALWAYS,      PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE,     NO_PARAM_TABLE   ) \
+\
+X(DriverMode,                       UINT32,         ALWAYS,      PVRSRV_APPHINT_DRIVERMODE,                       NO_PARAM_TABLE   ) \
+\
+X(FirmwarePerf,                     UINT32,         VALIDATION,  PVRSRV_APPHINT_FIRMWAREPERF,                     NO_PARAM_TABLE   ) \
+X(FWContextSwitchProfile,           UINT32,         VALIDATION,  PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE,           NO_PARAM_TABLE   ) \
+X(HWPerfDisableCustomCounterFilter, BOOL,           VALIDATION,  PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER, NO_PARAM_TABLE   ) \
+X(HWPerfFWBufSizeInKB,              UINT32,         VALIDATION,  PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB,              NO_PARAM_TABLE   ) \
+X(HWPerfHostBufSizeInKB,            UINT32,         VALIDATION,  PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB,            NO_PARAM_TABLE   ) \
+X(HWPerfHostThreadTimeoutInMS,		UINT32,         VALIDATION,  PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS,      NO_PARAM_TABLE   ) \
+\
+X(JonesDisableMask,                 UINT32,         VALIDATION,  PVRSRV_APPHINT_JONESDISABLEMASK,                 NO_PARAM_TABLE   ) \
+X(NewFilteringMode,                 BOOL,           VALIDATION,  PVRSRV_APPHINT_NEWFILTERINGMODE,                 NO_PARAM_TABLE   ) \
+X(TruncateMode,                     UINT32,         VALIDATION,  PVRSRV_APPHINT_TRUNCATEMODE,                     NO_PARAM_TABLE   ) \
+X(UseMETAT1,                        UINT32,         VALIDATION,  PVRSRV_APPHINT_USEMETAT1,                        NO_PARAM_TABLE   ) \
+X(EmuMaxFreq,                       UINT32,         ALWAYS,      PVRSRV_APPHINT_EMUMAXFREQ,                       NO_PARAM_TABLE   ) \
+X(GPIOValidationMode,               UINT32,         VALIDATION,  PVRSRV_APPHINT_GPIOVALIDATIONMODE,               NO_PARAM_TABLE   ) \
+X(RGXBVNC,                          STRING,         ALWAYS,      PVRSRV_APPHINT_RGXBVNC,                          NO_PARAM_TABLE   ) \
+\
+X(FWContextSwitchCrossDM,           UINT32,         ALWAYS,      0,                                               NO_PARAM_TABLE   ) \
+\
+X(OSidRegion0Min,                   STRING,         GPUVIRT_VAL, PVRSRV_APPHINT_OSIDREGION0MIN,                   NO_PARAM_TABLE   ) \
+X(OSidRegion0Max,                   STRING,         GPUVIRT_VAL, PVRSRV_APPHINT_OSIDREGION0MAX,                   NO_PARAM_TABLE   ) \
+X(OSidRegion1Min,                   STRING,         GPUVIRT_VAL, PVRSRV_APPHINT_OSIDREGION1MIN,                   NO_PARAM_TABLE   ) \
+X(OSidRegion1Max,                   STRING,         GPUVIRT_VAL, PVRSRV_APPHINT_OSIDREGION1MAX,                   NO_PARAM_TABLE   ) \
+\
+X(TPUTrilinearFracMaskPDM,          UINT32,         VALIDATION,  0xF,                                             NO_PARAM_TABLE   ) \
+X(TPUTrilinearFracMaskVDM,          UINT32,         VALIDATION,  0,                                               NO_PARAM_TABLE   ) \
+X(TPUTrilinearFracMaskCDM,          UINT32,         VALIDATION,  0,                                               NO_PARAM_TABLE   ) \
+X(TPUTrilinearFracMaskTDM,          UINT32,         VALIDATION,  0,                                               NO_PARAM_TABLE   ) \
+X(HTBufferSizeInKB,                 UINT32,         ALWAYS,      PVRSRV_APPHINT_HTBUFFERSIZE,                     NO_PARAM_TABLE   ) \
+X(FWTraceBufSizeInDWords,           UINT32,         ALWAYS,      PVRSRV_APPHINT_FWTRACEBUFSIZEINDWORDS,           NO_PARAM_TABLE   ) \
+\
+X(EnablePageFaultDebug,             BOOL,           ALWAYS,      PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG,             NO_PARAM_TABLE   ) \
+X(EnableFullSyncTracking,           BOOL,           ALWAYS,      PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING,           NO_PARAM_TABLE   ) \
+X(IgnoreHWReportedBVNC,             BOOL,           ALWAYS,      PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC,             NO_PARAM_TABLE   ) \
+X(ValidateIrq,                      BOOL,           VALIDATION,  PVRSRV_APPHINT_VALIDATEIRQ,                      NO_PARAM_TABLE   ) \
+\
+X(PhysMemTestPasses,                UINT32,         ALWAYS,      PVRSRV_APPHINT_PHYSMEMTESTPASSES,                NO_PARAM_TABLE   ) \
+X(FBCDCVersionOverride,             UINT32,         VALIDATION,  PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE,             NO_PARAM_TABLE   )
+
+/*
+*******************************************************************************
+ Debugfs parameters - driver configuration
+******************************************************************************/
+#define APPHINT_LIST_DEBUGFS \
+/* name,                            type,           class,       default,                                         helper,         */ \
+X(EnableHTBLogGroup,                UINT32Bitfield, ALWAYS,      PVRSRV_APPHINT_ENABLEHTBLOGGROUP,                htb_loggroup_tbl ) \
+X(HTBOperationMode,                 UINT32List,     ALWAYS,      PVRSRV_APPHINT_HTBOPERATIONMODE,                 htb_opmode_tbl   ) \
+X(EnableFTraceGPU,                  BOOL,           ALWAYS,      PVRSRV_APPHINT_ENABLEFTRACEGPU,                  NO_PARAM_TABLE   ) \
+X(HWPerfFWFilter,                   UINT64,         ALWAYS,      PVRSRV_APPHINT_HWPERFFWFILTER,                   NO_PARAM_TABLE   ) \
+X(HWPerfHostFilter,                 UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFHOSTFILTER,                 NO_PARAM_TABLE   ) \
+X(HWPerfClientFilter_Services,      UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES,      NO_PARAM_TABLE   ) \
+X(HWPerfClientFilter_EGL,           UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL,           NO_PARAM_TABLE   ) \
+X(HWPerfClientFilter_OpenGLES,      UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES,      NO_PARAM_TABLE   ) \
+X(HWPerfClientFilter_OpenCL,        UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL,        NO_PARAM_TABLE   ) \
+X(HWPerfClientFilter_Vulkan,        UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN,        NO_PARAM_TABLE   ) \
+X(CacheOpConfig,                    UINT32,         ALWAYS,      PVRSRV_APPHINT_CACHEOPCONFIG,                    NO_PARAM_TABLE   ) \
+X(CacheOpGFThresholdSize,           UINT32,         ALWAYS,      PVRSRV_APPHINT_CACHEOPGFTHRESHOLDSIZE,           NO_PARAM_TABLE   ) \
+X(CacheOpUMKMThresholdSize,         UINT32,         ALWAYS,      PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE,          NO_PARAM_TABLE   ) \
+X(TimeCorrClock,                    UINT32List,     ALWAYS,      PVRSRV_APPHINT_TIMECORRCLOCK,                    timecorr_clk_tbl )
+
+/*
+*******************************************************************************
+ Debugfs parameters - device configuration
+******************************************************************************/
+#define APPHINT_LIST_DEBUGFS_DEVICE \
+/* name,                            type,           class,       default,                                         helper,         */ \
+/* Device Firmware config */\
+X(AssertOnHWRTrigger,               BOOL,           ALWAYS,      PVRSRV_APPHINT_ASSERTONHWRTRIGGER,               NO_PARAM_TABLE   ) \
+X(AssertOutOfMemory,                BOOL,           ALWAYS,      PVRSRV_APPHINT_ASSERTOUTOFMEMORY,                NO_PARAM_TABLE   ) \
+X(CheckMList,                       BOOL,           ALWAYS,      PVRSRV_APPHINT_CHECKMLIST,                       NO_PARAM_TABLE   ) \
+X(EnableHWR,                        BOOL,           ALWAYS,      APPHNT_BLDVAR_ENABLEHWR,                         NO_PARAM_TABLE   ) \
+X(EnableLogGroup,                   UINT32Bitfield, ALWAYS,      PVRSRV_APPHINT_ENABLELOGGROUP,                   fwt_loggroup_tbl ) \
+X(FirmwareLogType,                  UINT32List,     ALWAYS,      PVRSRV_APPHINT_FIRMWARELOGTYPE,                  fwt_logtype_tbl  ) \
+/* Device host config */ \
+X(EnableAPM,                        UINT32,         ALWAYS,      PVRSRV_APPHINT_ENABLEAPM,                        NO_PARAM_TABLE   ) \
+X(DisableFEDLogging,                BOOL,           ALWAYS,      PVRSRV_APPHINT_DISABLEFEDLOGGING,                NO_PARAM_TABLE   ) \
+X(ZeroFreelist,                     BOOL,           ALWAYS,      PVRSRV_APPHINT_ZEROFREELIST,                     NO_PARAM_TABLE   ) \
+X(DustRequestInject,                BOOL,           VALIDATION,  PVRSRV_APPHINT_DUSTREQUESTINJECT,                NO_PARAM_TABLE   ) \
+X(DisablePDumpPanic,                BOOL,           PDUMP,       PVRSRV_APPHINT_DISABLEPDUMPPANIC,                NO_PARAM_TABLE   ) \
+X(EnableFWPoisonOnFree,             BOOL,           ALWAYS,      PVRSRV_APPHINT_ENABLEFWPOISONONFREE,             NO_PARAM_TABLE   ) \
+X(FWPoisonOnFreeValue,              UINT32,         ALWAYS,      PVRSRV_APPHINT_FWPOISONONFREEVALUE,              NO_PARAM_TABLE   ) \
+
+/*
+*******************************************************************************
+ * Types used in the APPHINT_LIST_<GROUP> lists must be defined here.
+ * New types require specific handling code to be added
+******************************************************************************/
+#define APPHINT_DATA_TYPE_LIST \
+X(BOOL) \
+X(UINT64) \
+X(UINT32) \
+X(UINT32Bitfield) \
+X(UINT32List) \
+X(STRING)
+
+#define APPHINT_CLASS_LIST \
+X(ALWAYS) \
+X(NEVER) \
+X(DEBUG) \
+X(PDUMP) \
+X(VALIDATION) \
+X(GPUVIRT_VAL)
+
+/*
+*******************************************************************************
+ Visibility control for module parameters
+ These bind build variables to AppHint Visibility Groups.
+******************************************************************************/
+#define APPHINT_ENABLED_CLASS_ALWAYS IMG_TRUE
+#define APPHINT_ENABLED_CLASS_NEVER IMG_FALSE
+#define apphint_modparam_class_ALWAYS(a, b, c) apphint_modparam_enable(a, b, c)
+#if defined(DEBUG)
+	#define APPHINT_ENABLED_CLASS_DEBUG IMG_TRUE
+	#define apphint_modparam_class_DEBUG(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+	#define APPHINT_ENABLED_CLASS_DEBUG IMG_FALSE
+	#define apphint_modparam_class_DEBUG(a, b, c)
+#endif
+#if defined(PDUMP)
+	#define APPHINT_ENABLED_CLASS_PDUMP IMG_TRUE
+	#define apphint_modparam_class_PDUMP(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+	#define APPHINT_ENABLED_CLASS_PDUMP IMG_FALSE
+	#define apphint_modparam_class_PDUMP(a, b, c)
+#endif
+#if defined(SUPPORT_VALIDATION)
+	#define APPHINT_ENABLED_CLASS_VALIDATION IMG_TRUE
+	#define apphint_modparam_class_VALIDATION(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+	#define APPHINT_ENABLED_CLASS_VALIDATION IMG_FALSE
+	#define apphint_modparam_class_VALIDATION(a, b, c)
+#endif
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	#define APPHINT_ENABLED_CLASS_GPUVIRT_VAL IMG_TRUE
+	#define apphint_modparam_class_GPUVIRT_VAL(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+	#define APPHINT_ENABLED_CLASS_GPUVIRT_VAL IMG_FALSE
+	#define apphint_modparam_class_GPUVIRT_VAL(a, b, c)
+#endif
+
+/*
+*******************************************************************************
+ AppHint defaults based on other build parameters
+******************************************************************************/
+#if defined(HWR_DEFAULT_ENABLED)
+	#define APPHNT_BLDVAR_ENABLEHWR         1
+#else
+	#define APPHNT_BLDVAR_ENABLEHWR         0
+#endif
+#if defined(DEBUG)
+	#define APPHNT_BLDVAR_DEBUG             1
+	#define APPHNT_BLDVAR_DBGDUMPLIMIT      RGXFWIF_HWR_DEBUG_DUMP_ALL
+#else
+	#define APPHNT_BLDVAR_DEBUG             0
+	#define APPHNT_BLDVAR_DBGDUMPLIMIT      1
+#endif
+#if defined(DEBUG) || defined(PDUMP)
+#define APPHNT_BLDVAR_ENABLESIGNATURECHECKS     IMG_TRUE
+#else
+#define APPHNT_BLDVAR_ENABLESIGNATURECHECKS     IMG_FALSE
+#endif
+
+#if defined(DEBUG)
+	#define APPHNT_PHYSMEMTEST_ENABLE             1
+#else
+	#define APPHNT_PHYSMEMTEST_ENABLE             0
+#endif
+/*
+*******************************************************************************
+
+ Table generated enums
+
+******************************************************************************/
+/* Unique ID for all AppHints */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_ID_ ## a,
+	APPHINT_LIST_ALL
+#undef X
+	APPHINT_ID_MAX
+} APPHINT_ID;
+
+/* ID for build variable Apphints - used for build variable only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_BUILDVAR_ID_ ## a,
+	APPHINT_LIST_BUILDVAR
+#undef X
+	APPHINT_BUILDVAR_ID_MAX
+} APPHINT_BUILDVAR_ID;
+
+/* ID for Modparam Apphints - used for modparam only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_MODPARAM_ID_ ## a,
+	APPHINT_LIST_MODPARAM
+#undef X
+	APPHINT_MODPARAM_ID_MAX
+} APPHINT_MODPARAM_ID;
+
+/* ID for Debugfs Apphints - used for debugfs only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_DEBUGFS_ID_ ## a,
+	APPHINT_LIST_DEBUGFS
+#undef X
+	APPHINT_DEBUGFS_ID_MAX
+} APPHINT_DEBUGFS_ID;
+
+/* ID for Debugfs Device Apphints - used for debugfs device only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_DEBUGFS_DEVICE_ID_ ## a,
+	APPHINT_LIST_DEBUGFS_DEVICE
+#undef X
+	APPHINT_DEBUGFS_DEVICE_ID_MAX
+} APPHINT_DEBUGFS_DEVICE_ID;
+
+/* data types and actions */
+typedef enum {
+	APPHINT_DATA_TYPE_INVALID = 0,
+#define X(a) APPHINT_DATA_TYPE_ ## a,
+	APPHINT_DATA_TYPE_LIST
+#undef X
+	APPHINT_DATA_TYPE_MAX
+} APPHINT_DATA_TYPE;
+
+typedef enum {
+#define X(a) APPHINT_CLASS_ ## a,
+	APPHINT_CLASS_LIST
+#undef X
+	APPHINT_CLASS_MAX
+} APPHINT_CLASS;
+
+#endif /* __KM_APPHINT_DEFS_H__ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/linkage.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/linkage.h
new file mode 100644
index 0000000..27c1092
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/linkage.h
@@ -0,0 +1,55 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux specific Services code internal interfaces
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Interfaces between various parts of the Linux specific
+                Services code, that don't have any other obvious
+                header file to go into.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__LINKAGE_H__)
+#define __LINKAGE_H__
+
+PVRSRV_ERROR PVROSFuncInit(void);
+void PVROSFuncDeInit(void);
+
+int PVRDebugCreateDebugFSEntries(void);
+void PVRDebugRemoveDebugFSEntries(void);
+
+#endif /* !defined(__LINKAGE_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/linux_sw_sync.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/linux_sw_sync.h
new file mode 100644
index 0000000..fc66e98
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/linux_sw_sync.h
@@ -0,0 +1,66 @@
+/*************************************************************************/ /*!
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _UAPI_LINUX_PVR_SW_SYNC_H
+#define _UAPI_LINUX_PVR_SW_SYNC_H
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+
+#include <linux/types.h>
+
+#include "pvrsrv_sync_km.h"
+
+struct pvr_sw_sync_create_fence_data {
+  char name[PVRSRV_SYNC_NAME_LENGTH];
+  __s32 fence;
+  __u32 pad;
+  __u64 sync_pt_idx;
+};
+
+struct pvr_sw_timeline_advance_data {
+  __u64 sync_pt_idx;
+};
+
+#define PVR_SW_SYNC_IOC_MAGIC 'W'
+#define PVR_SW_SYNC_IOC_CREATE_FENCE _IOWR(PVR_SW_SYNC_IOC_MAGIC, 0, struct pvr_sw_sync_create_fence_data)
+#define PVR_SW_SYNC_IOC_INC _IOR(PVR_SW_SYNC_IOC_MAGIC, 1, struct pvr_sw_timeline_advance_data)
+
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/lists.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/lists.c
new file mode 100644
index 0000000..e8e7088
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/lists.c
@@ -0,0 +1,60 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linked list shared functions implementation.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implementation of the list iterators for types shared among
+                more than one file in the services code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "lists.h"
+
+/*===================================================================
+  LIST ITERATOR FUNCTIONS USED IN MORE THAN ONE FILE (those used just
+  once are implemented locally).
+  ===================================================================*/
+
+IMPLEMENT_LIST_ANY(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, IMG_BOOL, IMG_FALSE)
+IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
+IMPLEMENT_LIST_ANY_VA(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
+IMPLEMENT_LIST_FOR_EACH(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_INSERT_TAIL(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_REMOVE(PVRSRV_DEVICE_NODE)
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/lists.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/lists.h
new file mode 100644
index 0000000..fd25c45
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/lists.h
@@ -0,0 +1,355 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linked list shared functions templates.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Definition of the linked list function templates.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __LISTS_UTILS__
+#define __LISTS_UTILS__
+
+/* instruct QAC to ignore warnings about the following custom formatted macros */
+/* PRQA S 0881,3410 ++ */
+#include <stdarg.h>
+#include "img_types.h"
+#include "device.h"
+#include "power.h"
+
+/*
+ - USAGE -
+
+ The list functions work with any structure that provides the fields psNext and
+ ppsThis. In order to make a function available for a given type, it is required
+ to use the function template macro that creates the actual code.
+
+ There are 5 main types of functions:
+ - INSERT      : given a pointer to the head pointer of the list and a pointer
+                 to the node, inserts it as the new head.
+ - INSERT TAIL : given a pointer to the head pointer of the list and a pointer
+                 to the node, inserts the node at the tail of the list.
+ - REMOVE      : given a pointer to a node, removes it from its list.
+ - FOR EACH    : apply a function over all the elements of a list.
+ - ANY         : apply a function over the elements of a list, until one of them
+                 return a non null value, and then returns it.
+
+ The two last functions can have a variable argument form, with allows to pass
+ additional parameters to the callback function. In order to do this, the
+ callback function must take two arguments, the first is the current node and
+ the second is a list of variable arguments (va_list).
+
+ The ANY functions have also another for which specifies the return type of the
+ callback function and the default value returned by the callback function.
+
+*/
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_ForEach
+@Description    Apply a callback function to all the elements of a list.
+@Input          psHead        The head of the list to be processed.
+@Input          pfnCallBack   The function to be applied to each element of the list.
+*/ /**************************************************************************/
+#define DECLARE_LIST_FOR_EACH(TYPE) \
+void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_FOR_EACH(TYPE) \
+void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))\
+{\
+	while (psHead)\
+	{\
+		pfnCallBack(psHead);\
+		psHead = psHead->psNext;\
+	}\
+}
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_ForEachSafe
+@Description    Apply a callback function to all the elements of a list. Do it
+                in a safe way that handles the fact that a node might remove
+                itself from the list during the iteration.
+@Input          psHead        The head of the list to be processed.
+@Input          pfnCallBack   The function to be applied to each element of the list.
+*/ /**************************************************************************/
+#define DECLARE_LIST_FOR_EACH_SAFE(TYPE) \
+void List_##TYPE##_ForEachSafe(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_FOR_EACH_SAFE(TYPE) \
+void List_##TYPE##_ForEachSafe(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))\
+{\
+	TYPE *psNext;\
+\
+	while (psHead)\
+	{\
+		psNext = psHead->psNext; \
+		pfnCallBack(psHead);\
+		psHead = psNext;\
+	}\
+}
+
+
+#define DECLARE_LIST_FOR_EACH_VA(TYPE) \
+void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_FOR_EACH_VA(TYPE) \
+void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...) \
+{\
+	va_list ap;\
+	while (psHead)\
+	{\
+		va_start(ap, pfnCallBack);\
+		pfnCallBack(psHead, ap);\
+		psHead = psHead->psNext;\
+		va_end(ap);\
+	}\
+}
+
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_Any
+@Description    Applies a callback function to the elements of a list until
+                the function returns a non null value, then returns it.
+@Input          psHead        The head of the list to be processed.
+@Input          pfnCallBack   The function to be applied to each element of the list.
+@Return         The first non null value returned by the callback function.
+*/ /**************************************************************************/
+#define DECLARE_LIST_ANY(TYPE) \
+void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_ANY(TYPE) \
+void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode))\
+{ \
+	void *pResult;\
+	TYPE *psNextNode;\
+	pResult = NULL;\
+	psNextNode = psHead;\
+	while (psHead && !pResult)\
+	{\
+		psNextNode = psNextNode->psNext;\
+		pResult = pfnCallBack(psHead);\
+		psHead = psNextNode;\
+	}\
+	return pResult;\
+}
+
+
+/*with variable arguments, that will be passed as a va_list to the callback function*/
+
+#define DECLARE_LIST_ANY_VA(TYPE) \
+void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_ANY_VA(TYPE) \
+void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
+{\
+	va_list ap;\
+	TYPE *psNextNode;\
+	void* pResult = NULL;\
+	while (psHead && !pResult)\
+	{\
+		psNextNode = psHead->psNext;\
+		va_start(ap, pfnCallBack);\
+		pResult = pfnCallBack(psHead, ap);\
+		va_end(ap);\
+		psHead = psNextNode;\
+	}\
+	return pResult;\
+}
+
+/*those ones are for extra type safety, so there's no need to use castings for the results*/
+
+#define DECLARE_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))\
+{ \
+	RTYPE result;\
+	TYPE *psNextNode;\
+	result = CONTINUE;\
+	psNextNode = psHead;\
+	while (psHead && result == CONTINUE)\
+	{\
+		psNextNode = psNextNode->psNext;\
+		result = pfnCallBack(psHead);\
+		psHead = psNextNode;\
+	}\
+	return result;\
+}
+
+
+#define DECLARE_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
+{\
+	va_list ap;\
+	TYPE *psNextNode;\
+	RTYPE result = CONTINUE;\
+	while (psHead && result == CONTINUE)\
+	{\
+		psNextNode = psHead->psNext;\
+		va_start(ap, pfnCallBack);\
+		result = pfnCallBack(psHead, ap);\
+		va_end(ap);\
+		psHead = psNextNode;\
+	}\
+	return result;\
+}
+
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_Remove
+@Description    Removes a given node from the list.
+@Input          psNode      The pointer to the node to be removed.
+*/ /**************************************************************************/
+#define DECLARE_LIST_REMOVE(TYPE) \
+void List_##TYPE##_Remove(TYPE *psNode)
+
+#define IMPLEMENT_LIST_REMOVE(TYPE) \
+void List_##TYPE##_Remove(TYPE *psNode)\
+{\
+	(*psNode->ppsThis)=psNode->psNext;\
+	if (psNode->psNext)\
+	{\
+		psNode->psNext->ppsThis = psNode->ppsThis;\
+	}\
+}
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_Insert
+@Description    Inserts a given node at the beginning of the list.
+@Input          psHead   The pointer to the pointer to the head node.
+@Input          psNode   The pointer to the node to be inserted.
+*/ /**************************************************************************/
+#define DECLARE_LIST_INSERT(TYPE) \
+void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)
+
+#define IMPLEMENT_LIST_INSERT(TYPE) \
+void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)\
+{\
+	psNewNode->ppsThis = ppsHead;\
+	psNewNode->psNext = *ppsHead;\
+	*ppsHead = psNewNode;\
+	if (psNewNode->psNext)\
+	{\
+		psNewNode->psNext->ppsThis = &(psNewNode->psNext);\
+	}\
+}
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_InsertTail
+@Description    Inserts a given node at the end of the list.
+@Input          psHead   The pointer to the pointer to the head node.
+@Input          psNode   The pointer to the node to be inserted.
+*/ /**************************************************************************/
+#define DECLARE_LIST_INSERT_TAIL(TYPE) \
+void List_##TYPE##_InsertTail(TYPE **ppsHead, TYPE *psNewNode)
+
+#define IMPLEMENT_LIST_INSERT_TAIL(TYPE) \
+void List_##TYPE##_InsertTail(TYPE **ppsHead, TYPE *psNewNode)\
+{\
+	TYPE *psTempNode = *ppsHead;\
+	if (psTempNode != NULL)\
+	{\
+		while (psTempNode->psNext)\
+			psTempNode = psTempNode->psNext;\
+		ppsHead = &psTempNode->psNext;\
+	}\
+	psNewNode->ppsThis = ppsHead;\
+	psNewNode->psNext = NULL;\
+	*ppsHead = psNewNode;\
+}
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_Reverse
+@Description    Reverse a list in place
+@Input          ppsHead    The pointer to the pointer to the head node.
+*/ /**************************************************************************/
+#define DECLARE_LIST_REVERSE(TYPE) \
+void List_##TYPE##_Reverse(TYPE **ppsHead)
+
+#define IMPLEMENT_LIST_REVERSE(TYPE) \
+void List_##TYPE##_Reverse(TYPE **ppsHead)\
+{\
+	TYPE *psTmpNode1; \
+	TYPE *psTmpNode2; \
+	TYPE *psCurNode; \
+	psTmpNode1 = NULL; \
+	psCurNode = *ppsHead; \
+	while (psCurNode) { \
+		psTmpNode2 = psCurNode->psNext; \
+		psCurNode->psNext = psTmpNode1; \
+		psTmpNode1 = psCurNode; \
+		psCurNode = psTmpNode2; \
+		if (psCurNode) \
+		{ \
+			psTmpNode1->ppsThis = &(psCurNode->psNext); \
+		} \
+		else \
+		{ \
+			psTmpNode1->ppsThis = ppsHead; \
+		} \
+	} \
+	*ppsHead = psTmpNode1; \
+}
+
+#define IS_LAST_ELEMENT(x) ((x)->psNext == NULL)
+
+
+DECLARE_LIST_ANY(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, IMG_BOOL, IMG_FALSE);
+DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
+DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
+DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_INSERT_TAIL(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE);
+
+#undef DECLARE_LIST_ANY_2
+#undef DECLARE_LIST_ANY_VA
+#undef DECLARE_LIST_ANY_VA_2
+#undef DECLARE_LIST_FOR_EACH
+#undef DECLARE_LIST_FOR_EACH_VA
+#undef DECLARE_LIST_INSERT
+#undef DECLARE_LIST_REMOVE
+
+#endif
+
+/* re-enable warnings */
+/* PRQA S 0881,3410 -- */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/lock.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/lock.h
new file mode 100644
index 0000000..ebae79d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/lock.h
@@ -0,0 +1,355 @@
+/*************************************************************************/ /*!
+@File           lock.h
+@Title          Locking interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services internal locking interface
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _LOCK_H_
+#define _LOCK_H_
+
+/* In Linux kernel mode we are using the kernel mutex implementation directly
+ * with macros. This allows us to use the kernel lockdep feature for lock
+ * debugging. */
+#include "lock_types.h"
+
+#if defined(LINUX) && defined(__KERNEL__)
+
+#include "allocmem.h"
+#include <linux/atomic.h>
+
+#define OSLockCreateNoStats(phLock) ({ \
+	PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+	*(phLock) = OSAllocMemNoStats(sizeof(struct mutex)); \
+	if (*(phLock)) { mutex_init(*(phLock)); e = PVRSRV_OK; }; \
+	e;})
+#define OSLockCreate(phLock) ({ \
+	PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+	*(phLock) = OSAllocMem(sizeof(struct mutex)); \
+	if (*(phLock)) { mutex_init(*(phLock)); e = PVRSRV_OK; }; \
+	e;})
+#define OSLockDestroy(hLock) ({mutex_destroy((hLock)); OSFreeMem((hLock)); PVRSRV_OK;})
+#define OSLockDestroyNoStats(hLock) ({mutex_destroy((hLock)); OSFreeMemNoStats((hLock)); PVRSRV_OK;})
+
+#define OSLockAcquire(hLock) ({mutex_lock((hLock)); PVRSRV_OK;})
+#define OSLockAcquireNested(hLock, subclass) ({mutex_lock_nested((hLock), (subclass)); PVRSRV_OK;})
+#define OSLockRelease(hLock) ({mutex_unlock((hLock)); PVRSRV_OK;})
+
+#define OSLockIsLocked(hLock) ((mutex_is_locked((hLock)) == 1) ? IMG_TRUE : IMG_FALSE)
+#define OSTryLockAcquire(hLock) ((mutex_trylock(hLock) == 1) ? IMG_TRUE : IMG_FALSE)
+
+/* These _may_ be reordered or optimized away entirely by the compiler/hw */
+#define OSAtomicRead(pCounter)	atomic_read(pCounter)
+#define OSAtomicWrite(pCounter, i)	atomic_set(pCounter, i)
+
+/* The following atomic operations, in addition to being SMP-safe, also
+   imply a memory barrier around the operation  */
+#define OSAtomicIncrement(pCounter) atomic_inc_return(pCounter)
+#define OSAtomicDecrement(pCounter) atomic_dec_return(pCounter)
+#define OSAtomicCompareExchange(pCounter, oldv, newv) atomic_cmpxchg(pCounter,oldv,newv)
+
+#define OSAtomicAdd(pCounter, incr) atomic_add_return(incr,pCounter)
+#define OSAtomicAddUnless(pCounter, incr, test) __atomic_add_unless(pCounter,incr,test)
+
+#define OSAtomicSubtract(pCounter, incr) atomic_add_return(-(incr),pCounter)
+#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), test)
+
+#else /* defined(LINUX) && defined(__KERNEL__) */
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+/* INT mapping pattern:
+ * -2147483648 to -1 ---> 0 to +2147483647
+ * 0 to +2147483647  ---> +2147483648 to +4294967295 */
+
+/*! Used for mapping UINT32 to INT32 for implementation that
+ * use unsigned integers as atomic types.
+ */
+#define MAP_UNSIGNED32_TO_SIGNED32(x)   ((x) - 0x80000000)
+/*! Used for mapping INT32 to UINT32 for implementation that
+ * use signed integers as atomic types.
+ */
+#define MAP_SIGNED32_TO_UNSIGNED32(x)   ((x) + 0x80000000)
+
+
+/**************************************************************************/ /*!
+@Function       OSLockCreate
+@Description    Creates an operating system lock object.
+@Output         phLock           The created lock.
+@Return         PVRSRV_OK on success. PVRSRV_ERROR_OUT_OF_MEMORY if the driver
+                cannot allocate CPU memory needed for the lock.
+                PVRSRV_ERROR_INIT_FAILURE if the Operating System fails to
+                allocate the lock.
+ */ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR OSLockCreate(POS_LOCK *phLock);
+#if defined(INTEGRITY_OS)
+#define OSLockCreateNoStats OSLockCreate
+#endif
+
+/**************************************************************************/ /*!
+@Function       OSLockDestroy
+@Description    Destroys an operating system lock object.
+@Input          hLock            The lock to be destroyed.
+@Return         None.
+ */ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR OSLockDestroy(POS_LOCK hLock);
+
+#if defined(INTEGRITY_OS)
+#define OSLockDestroyNoStats OSLockDestroy
+#endif
+/**************************************************************************/ /*!
+@Function       OSLockAcquire
+@Description    Acquires an operating system lock.
+                NB. This function must not return until the lock is acquired
+                (meaning the implementation should not timeout or return with
+                an error, as the caller will assume they have the lock).
+@Input          hLock            The lock to be acquired.
+@Return         None.
+ */ /**************************************************************************/
+IMG_INTERNAL
+void OSLockAcquire(POS_LOCK hLock);
+
+/**************************************************************************/ /*!
+@Function       OSTryLockAcquire
+@Description    Try to acquire an operating system lock.
+                NB. If lock is acquired successfully in the first attempt,
+                then the function returns true and else it will return false.
+@Input          hLock            The lock to be acquired.
+@Return         IMG_TRUE if lock acquired successfully,
+                IMG_FALSE otherwise.
+ */ /**************************************************************************/
+IMG_INTERNAL
+IMG_BOOL OSTryLockAcquire(POS_LOCK hLock);
+
+/* Nested notation isn't used in UM or other OS's */
+/**************************************************************************/ /*!
+@Function       OSLockAcquireNested
+@Description    For operating systems other than Linux, this equates to an
+                OSLockAcquire() call. On Linux, this function wraps a call
+                to mutex_lock_nested(). This recognises the scenario where
+                there may be multiple subclasses within a particular class
+                of lock. In such cases, the order in which the locks belonging
+                these various subclasses are acquired is important and must be
+                validated.
+@Input          hLock            The lock to be acquired.
+@Input          subclass         The subclass of the lock.
+@Return         None.
+ */ /**************************************************************************/
+#define OSLockAcquireNested(hLock, subclass) OSLockAcquire((hLock))
+
+/**************************************************************************/ /*!
+@Function       OSLockRelease
+@Description    Releases an operating system lock.
+@Input          hLock            The lock to be released.
+@Return         None.
+ */ /**************************************************************************/
+IMG_INTERNAL
+void OSLockRelease(POS_LOCK hLock);
+
+/**************************************************************************/ /*!
+@Function       OSLockIsLocked
+@Description    Tests whether or not an operating system lock is currently
+                locked.
+@Input          hLock            The lock to be tested.
+@Return         IMG_TRUE if locked, IMG_FALSE if not locked.
+ */ /**************************************************************************/
+IMG_INTERNAL
+IMG_BOOL OSLockIsLocked(POS_LOCK hLock);
+
+#if defined(LINUX)
+
+/* Use GCC intrinsics (read/write semantics consistent with kernel-side implementation) */
+#define OSAtomicRead(pCounter) (*(volatile IMG_INT32 *)&(pCounter)->counter)
+#define OSAtomicWrite(pCounter, i) ((pCounter)->counter = (IMG_INT32) i)
+#define OSAtomicIncrement(pCounter) __sync_add_and_fetch((&(pCounter)->counter), 1)
+#define OSAtomicDecrement(pCounter) __sync_sub_and_fetch((&(pCounter)->counter), 1)
+#define OSAtomicCompareExchange(pCounter, oldv, newv) \
+	__sync_val_compare_and_swap((&(pCounter)->counter), oldv, newv)
+
+#define OSAtomicAdd(pCounter, incr) __sync_add_and_fetch((&(pCounter)->counter), incr)
+#define OSAtomicAddUnless(pCounter, incr, test) ({ \
+	IMG_INT32 c; IMG_INT32 old; \
+	c = OSAtomicRead(pCounter); \
+	while (1) { \
+		if (c == (test)) break; \
+		old = OSAtomicCompareExchange(pCounter, c, c+(incr)); \
+		if (old == c) break; \
+		c = old; \
+	} c; })
+
+#define OSAtomicSubtract(pCounter, incr) OSAtomicAdd(pCounter, -(incr))
+#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), test)
+
+#else
+
+/*************************************************************************/ /*!
+@Function       OSAtomicRead
+@Description    Read the value of a variable atomically.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to read
+@Return         The value of the atomic variable
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicRead(const ATOMIC_T *pCounter);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicWrite
+@Description    Write the value of a variable atomically.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to be written to
+@Input          v               The value to write
+@Return         None
+*/ /**************************************************************************/
+IMG_INTERNAL
+void OSAtomicWrite(ATOMIC_T *pCounter, IMG_INT32 v);
+
+/* For the following atomic operations, in addition to being SMP-safe,
+   should also  have a memory barrier around each operation  */
+/*************************************************************************/ /*!
+@Function       OSAtomicIncrement
+@Description    Increment the value of a variable atomically.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to be incremented
+@Return         The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicIncrement(ATOMIC_T *pCounter);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicDecrement
+@Description    Decrement the value of a variable atomically.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to be decremented
+@Return         The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicDecrement(ATOMIC_T *pCounter);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicAdd
+@Description    Add a specified value to a variable atomically.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to add the value to
+@Input          v               The value to be added
+@Return         The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicAdd(ATOMIC_T *pCounter, IMG_INT32 v);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicAddUnless
+@Description    Add a specified value to a variable atomically unless it
+                already equals a particular value.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to add the value to
+@Input          v               The value to be added to 'pCounter'
+@Input          t               The test value. If 'pCounter' equals this,
+                                its value will not be adjusted
+@Return         The old value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicAddUnless(ATOMIC_T *pCounter, IMG_INT32 v, IMG_INT32 t);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicSubtract
+@Description    Subtract a specified value to a variable atomically.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to subtract the value from
+@Input          v               The value to be subtracted
+@Return         The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicSubtract(ATOMIC_T *pCounter, IMG_INT32 v);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicSubtractUnless
+@Description    Subtract a specified value from a variable atomically unless
+                it already equals a particular value.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to subtract the value from
+@Input          v               The value to be subtracted from 'pCounter'
+@Input          t               The test value. If 'pCounter' equals this,
+                                its value will not be adjusted
+@Return         The old value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicSubtractUnless(ATOMIC_T *pCounter, IMG_INT32 v, IMG_INT32 t);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicCompareExchange
+@Description    Set a variable to a given value only if it is currently
+                equal to a specified value. The whole operation must be atomic.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to be checked and
+                                possibly updated
+@Input          oldv            The value the atomic variable must have in
+                                order to be modified
+@Input          newv            The value to write to the atomic variable if
+                                it equals 'oldv'
+@Return         The old value of *pCounter
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicCompareExchange(ATOMIC_T *pCounter, IMG_INT32 oldv, IMG_INT32 newv);
+
+#endif /* defined(LINUX) */
+#endif /* defined(LINUX) && defined(__KERNEL__) */
+
+#endif	/* _LOCK_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/lock_types.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/lock_types.h
new file mode 100644
index 0000000..e35c8b0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/lock_types.h
@@ -0,0 +1,91 @@
+/*************************************************************************/ /*!
+@File           lock_types.h
+@Title          Locking types
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Locking specific enums, defines and structures
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _LOCK_TYPES_H_
+#define _LOCK_TYPES_H_
+
+/* In Linux kernel mode we are using the kernel mutex implementation directly
+ * with macros. This allows us to use the kernel lockdep feature for lock
+ * debugging. */
+#if defined(LINUX) && defined(__KERNEL__)
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+/* The mutex is defined as a pointer to be compatible with the other code. This
+ * isn't ideal and usually you wouldn't do that in kernel code. */
+typedef struct mutex *POS_LOCK;
+typedef struct rw_semaphore *POSWR_LOCK;
+typedef atomic_t ATOMIC_T;
+
+#else /* defined(LINUX) && defined(__KERNEL__) */
+#include "img_types.h" /* needed for IMG_INT */
+typedef struct _OS_LOCK_ *POS_LOCK;
+
+#if defined(LINUX) || defined(__QNXNTO__) || defined(INTEGRITY_OS)
+typedef struct _OSWR_LOCK_ *POSWR_LOCK;
+#else /* defined(LINUX) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */
+typedef struct _OSWR_LOCK_ {
+	IMG_UINT32 ui32Dummy;
+} *POSWR_LOCK;
+#endif /* defined(LINUX) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */
+
+#if defined(LINUX)
+	typedef struct _OS_ATOMIC {IMG_INT32 counter;} ATOMIC_T;
+#elif defined(__QNXNTO__)
+	typedef struct _OS_ATOMIC {IMG_UINT32 counter;} ATOMIC_T;
+#elif defined(_WIN32)
+	/*
+	 * Dummy definition. WDDM doesn't use Services, but some headers
+	 * still have to be shared. This is one such case.
+	 */
+	typedef struct _OS_ATOMIC {IMG_INT32 counter;} ATOMIC_T;
+#elif defined(INTEGRITY_OS)
+	/* Only lower 32bits are used in OS ATOMIC APIs to have consistent behaviour across all OS */
+	typedef struct _OS_ATOMIC {IMG_UINT64 counter;} ATOMIC_T;
+#else
+	#error "Please type-define an atomic lock for this environment"
+#endif
+
+#endif /* defined(LINUX) && defined(__KERNEL__) */
+
+#endif /* _LOCK_TYPES_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/log2.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/log2.h
new file mode 100644
index 0000000..36abf5f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/log2.h
@@ -0,0 +1,409 @@
+/*************************************************************************/ /*!
+@Title          Integer log2 and related functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef LOG2_H
+#define LOG2_H
+
+#include "img_defs.h"
+
+/*************************************************************************/ /*!
+@Description    Determine if a number is a power of two.
+@Input          n
+@Return         True if n is a power of 2, false otherwise. True if n == 0.
+*/ /**************************************************************************/
+static INLINE IMG_BOOL __const_function IsPower2(uint32_t n)
+{
+	/* C++ needs this cast. */
+	return (IMG_BOOL)((n & (n - 1)) == 0);
+}
+
+/*************************************************************************/ /*!
+@Description    Determine if a number is a power of two.
+@Input          n
+@Return         True if n is a power of 2, false otherwise. True if n == 0.
+*/ /**************************************************************************/
+static INLINE IMG_BOOL __const_function IsPower2_64(uint64_t n)
+{
+	/* C++ needs this cast. */
+	return (IMG_BOOL)((n & (n - 1)) == 0);
+}
+
+/* Code using GNU GCC intrinsics */
+#if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER))
+
+/* CHAR_BIT is typically found in <limits.h>. For all the platforms where
+ * CHAR_BIT is not available, defined it here with the assumption that there
+ * are 8 bits in a byte */
+#ifndef CHAR_BIT
+#define CHAR_BIT 8U
+#endif
+
+/*************************************************************************/ /*!
+@Description    Compute floor(log2(n))
+@Input          n
+@Return         log2(n) rounded down to the nearest integer. Returns 0 if n == 0
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function FloorLog2(uint32_t n)
+{
+	if (unlikely(n == 0))
+	{
+		return 0;
+	}
+	else
+	{
+		uint32_t uNumBits = CHAR_BIT * sizeof(n);
+		return uNumBits - (uint32_t)__builtin_clz(n) - 1U;
+	}
+}
+
+/*************************************************************************/ /*!
+@Description    Compute floor(log2(n))
+@Input          n
+@Return         log2(n) rounded down to the nearest integer. Returns 0 if n == 0
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function FloorLog2_64(uint64_t n)
+{
+	if (unlikely(n == 0))
+	{
+		return 0;
+	}
+	else
+	{
+		uint32_t uNumBits = CHAR_BIT * sizeof(n);
+		return uNumBits - (uint32_t)__builtin_clzll(n) - 1U;
+	}
+}
+
+/*************************************************************************/ /*!
+@Description    Compute ceil(log2(n))
+@Input          n
+@Return         log2(n) rounded up to the nearest integer. Returns 0 if n == 0
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function CeilLog2(uint32_t n)
+{
+	if (unlikely(n == 0 || n == 1))
+	{
+		return 0;
+	}
+	else
+	{
+		uint32_t uNumBits = CHAR_BIT * sizeof(n);
+
+		n--; /* Handle powers of 2 */
+		return uNumBits - (uint32_t)__builtin_clz(n);
+	}
+}
+
+/*************************************************************************/ /*!
+@Description    Compute ceil(log2(n))
+@Input          n
+@Return         log2(n) rounded up to the nearest integer. Returns 0 if n == 0
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function CeilLog2_64(uint64_t n)
+{
+	if (unlikely(n == 0 || n == 1))
+	{
+		return 0;
+	}
+	else
+	{
+		uint32_t uNumBits = CHAR_BIT * sizeof(n);
+
+		n--; /* Handle powers of 2 */
+		return uNumBits - (uint32_t)__builtin_clzll(n);
+	}
+}
+
+/*************************************************************************/ /*!
+@Description    Compute log2(n) for exact powers of two only
+@Input          n                   Must be a power of two
+@Return         log2(n)
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function ExactLog2(uint32_t n)
+{
+	return (uint32_t)(CHAR_BIT * sizeof(n)) - (uint32_t)__builtin_clz(n) - 1U;
+}
+
+/*************************************************************************/ /*!
+@Description    Compute log2(n) for exact powers of two only
+@Input          n                   Must be a power of two
+@Return         log2(n)
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function ExactLog2_64(uint64_t n)
+{
+	return (uint32_t)(CHAR_BIT * sizeof(n)) - (uint32_t)__builtin_clzll(n) - 1U;
+}
+
+/*************************************************************************/ /*!
+@Description    Round a non-power-of-two number up to the next power of two.
+@Input          n
+@Return         n rounded up to the next power of two. If n is zero or
+                already a power of two, return n unmodified.
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function RoundUpToNextPowerOfTwo(uint32_t n)
+{
+	/* Cases with n greater than 2^31 needs separate handling
+	 * as result of (1<<32) is undefined. */
+	if (unlikely( n == 0 || n > (uint32_t)1 << (CHAR_BIT * sizeof(n) - 1)))
+	{
+		return 0;
+	}
+
+	/* Return n if it is already a power of 2 */
+	if ((IMG_BOOL)((n & (n - 1)) == 0))
+	{
+		return n;
+	}
+
+	return (uint32_t)1 << ((uint32_t)(CHAR_BIT * sizeof(n)) - (uint32_t)__builtin_clz(n));
+}
+
+/*************************************************************************/ /*!
+@Description    Round a non-power-of-two number up to the next power of two.
+@Input          n
+@Return         n rounded up to the next power of two. If n is zero or
+                already a power of two, return n unmodified.
+*/ /**************************************************************************/
+static INLINE uint64_t __const_function RoundUpToNextPowerOfTwo_64(uint64_t n)
+{
+	/* Cases with n greater than 2^63 needs separate handling
+	 * as result of (1<<64) is undefined. */
+	if (unlikely( n == 0 || n > (uint64_t)1 << (CHAR_BIT * sizeof(n) - 1)))
+	{
+		return 0;
+	}
+
+	/* Return n if it is already a power of 2 */
+	if ((IMG_BOOL)((n & (n - 1)) == 0))
+	{
+		return n;
+	}
+
+	return (uint64_t)1 << ((uint32_t)(CHAR_BIT * sizeof(n)) - (uint32_t)__builtin_clzll(n));
+}
+
+#else /* #if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) */
+
+/*************************************************************************/ /*!
+@Description    Round a non-power-of-two number up to the next power of two.
+@Input          n
+@Return         n rounded up to the next power of two. If n is zero or
+                already a power of two, return n unmodified.
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function RoundUpToNextPowerOfTwo(uint32_t n)
+{
+	n--;
+	n |= n >> 1;  /* handle  2 bit numbers */
+	n |= n >> 2;  /* handle  4 bit numbers */
+	n |= n >> 4;  /* handle  8 bit numbers */
+	n |= n >> 8;  /* handle 16 bit numbers */
+	n |= n >> 16; /* handle 32 bit numbers */
+	n++;
+
+	return n;
+}
+
+/*************************************************************************/ /*!
+@Description    Round a non-power-of-two number up to the next power of two.
+@Input          n
+@Return         n rounded up to the next power of two. If n is zero or
+                already a power of two, return n unmodified.
+*/ /**************************************************************************/
+static INLINE uint64_t __const_function RoundUpToNextPowerOfTwo_64(uint64_t n)
+{
+	n--;
+	n |= n >> 1;  /* handle  2 bit numbers */
+	n |= n >> 2;  /* handle  4 bit numbers */
+	n |= n >> 4;  /* handle  8 bit numbers */
+	n |= n >> 8;  /* handle 16 bit numbers */
+	n |= n >> 16; /* handle 32 bit numbers */
+	n |= n >> 32; /* handle 64 bit numbers */
+	n++;
+
+	return n;
+}
+
+/*************************************************************************/ /*!
+@Description    Compute floor(log2(n))
+@Input          n
+@Return         log2(n) rounded down to the nearest integer. Returns 0 if n == 0
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function FloorLog2(uint32_t n)
+{
+	uint32_t log2 = 0;
+
+	while (n >>= 1)
+		log2++;
+
+	return log2;
+}
+
+/*************************************************************************/ /*!
+@Description    Compute floor(log2(n))
+@Input          n
+@Return         log2(n) rounded down to the nearest integer. Returns 0 if n == 0
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function FloorLog2_64(uint64_t n)
+{
+	uint32_t log2 = 0;
+
+	while (n >>= 1)
+		log2++;
+
+	return log2;
+}
+
+/*************************************************************************/ /*!
+@Description    Compute ceil(log2(n))
+@Input          n
+@Return         log2(n) rounded up to the nearest integer. Returns 0 if n == 0
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function CeilLog2(uint32_t n)
+{
+	uint32_t log2 = 0;
+
+	if (n == 0)
+		return 0;
+
+	n--; /* Handle powers of 2 */
+
+	while (n)
+	{
+		log2++;
+		n >>= 1;
+	}
+
+	return log2;
+}
+
+/*************************************************************************/ /*!
+@Description    Compute ceil(log2(n))
+@Input          n
+@Return         log2(n) rounded up to the nearest integer. Returns 0 if n == 0
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function CeilLog2_64(uint64_t n)
+{
+	uint32_t log2 = 0;
+
+	if (n == 0)
+		return 0;
+
+	n--; /* Handle powers of 2 */
+
+	while (n)
+	{
+		log2++;
+		n >>= 1;
+	}
+
+	return log2;
+}
+
+/*************************************************************************/ /*!
+@Description    Compute log2(n) for exact powers of two only
+@Input          n                   Must be a power of two
+@Return         log2(n)
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function ExactLog2(uint32_t n)
+{
+	static const uint32_t b[] =
+		{0xAAAAAAAA, 0xCCCCCCCC, 0xF0F0F0F0, 0xFF00FF00, 0xFFFF0000};
+	uint32_t r = (n & b[0]) != 0;
+
+	r |= (uint32_t) ((n & b[4]) != 0) << 4;
+	r |= (uint32_t) ((n & b[3]) != 0) << 3;
+	r |= (uint32_t) ((n & b[2]) != 0) << 2;
+	r |= (uint32_t) ((n & b[1]) != 0) << 1;
+
+	return r;
+}
+
+/*************************************************************************/ /*!
+@Description    Compute log2(n) for exact powers of two only
+@Input          n                   Must be a power of two
+@Return         log2(n)
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function ExactLog2_64(uint64_t n)
+{
+	static const uint64_t b[] =
+		{0xAAAAAAAAAAAAAAAAULL, 0xCCCCCCCCCCCCCCCCULL,
+		 0xF0F0F0F0F0F0F0F0ULL, 0xFF00FF00FF00FF00ULL,
+		 0xFFFF0000FFFF0000ULL, 0xFFFFFFFF00000000ULL};
+	uint32_t r = (n & b[0]) != 0;
+
+	r |= (uint32_t) ((n & b[5]) != 0) << 5;
+	r |= (uint32_t) ((n & b[4]) != 0) << 4;
+	r |= (uint32_t) ((n & b[3]) != 0) << 3;
+	r |= (uint32_t) ((n & b[2]) != 0) << 2;
+	r |= (uint32_t) ((n & b[1]) != 0) << 1;
+
+	return r;
+}
+
+#endif /* #if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) */
+
+/*************************************************************************/ /*!
+@Description    Compute floor(log2(size)) , where size is the max of 3 sizes
+				This is almost always the ONLY EVER valid use of FloorLog2.
+				Usually CeilLog2() should be used instead.
+				For a 5x5x1 texture, the 3 miplevels are:
+					0:  5x5x1
+					1:	2x2x1
+					2:	1x1x1
+
+				For an 8x8x1 texture, the 4 miplevels are:
+					0:  8x8x1
+					1:	4x4x1
+					2:  2x2x1
+					3:  1x1x1
+
+
+@Input          sizeX, sizeY, sizeZ
+@Return         Count of mipmap levels for given dimensions
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function NumMipLevels(uint32_t sizeX, uint32_t sizeY, uint32_t sizeZ)
+{
+
+	uint32_t maxSize = MAX(MAX(sizeX, sizeY), sizeZ);
+	return FloorLog2(maxSize) + 1;
+}
+
+#endif /* LOG2_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mem_utils.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mem_utils.c
new file mode 100644
index 0000000..4d605ec
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mem_utils.c
@@ -0,0 +1,382 @@
+/*************************************************************************/ /*!
+@File
+@Title          Memory manipulation functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Memory related functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "osfunc_common.h"
+
+/* This workaround is only *required* on ARM64. Avoid building or including
+ * it by default on other architectures, unless the 'safe memcpy' test flag
+ * is enabled. (The code should work on other architectures.)
+ */
+
+#if defined(__arm64__) || defined(__aarch64__) || defined (PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)
+
+/* NOTE: This C file is compiled with -ffreestanding to avoid pattern matching
+ *       by the compiler to stdlib functions, and it must only use the below
+ *       headers. Do not include any IMG or services headers in this file.
+ */
+#if defined(__KERNEL__) && defined(LINUX)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+/* The attribute "vector_size" will generate floating point instructions
+ * and use FPU registers. In kernel OS, the FPU registers might be corrupted
+ * when CPU is doing context switch because FPU registers are not expected to
+ * be stored.
+ * GCC enables compiler option, -mgeneral-regs-only, by default.
+ * This option restricts the generated code to use general registers only
+ * so that we don't have issues on that.
+ */
+#if defined(__KERNEL__) && defined(__clang__)
+
+#define DEVICE_MEMSETCPY_NON_VECTOR_KM
+#if !defined(BITS_PER_BYTE)
+#define BITS_PER_BYTE (8)
+#endif /* BITS_PER_BYTE */
+
+typedef __uint128_t uint128_t;
+
+typedef struct
+{
+	uint128_t ui128DataFields[2];
+}
+uint256_t;
+
+#endif
+
+/* This file is only intended to be used on platforms which use GCC or Clang,
+ * due to its requirement on __attribute__((vector_size(n))), typeof() and
+ * __SIZEOF__ macros.
+ */
+#if defined(__GNUC__)
+
+#define MIN(a, b) \
+ ({__typeof(a) _a = (a); __typeof(b) _b = (b); _a > _b ? _b : _a;})
+
+#if !defined(DEVICE_MEMSETCPY_ALIGN_IN_BYTES)
+#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES __SIZEOF_LONG__
+#endif
+#if DEVICE_MEMSETCPY_ALIGN_IN_BYTES % 2 != 0
+#error "DEVICE_MEMSETCPY_ALIGN_IN_BYTES must be a power of 2"
+#endif
+#if DEVICE_MEMSETCPY_ALIGN_IN_BYTES < 4
+#error "DEVICE_MEMSETCPY_ALIGN_IN_BYTES must be equal or greater than 4"
+#endif
+
+#if __SIZEOF_POINTER__ != __SIZEOF_LONG__
+#error No support for architectures where void* and long are sized differently
+#endif
+
+#if   __SIZEOF_LONG__ >  DEVICE_MEMSETCPY_ALIGN_IN_BYTES
+/* Meaningless, and harder to do correctly */
+# error Cannot handle DEVICE_MEMSETCPY_ALIGN_IN_BYTES < sizeof(long)
+typedef unsigned long block_t;
+#elif __SIZEOF_LONG__ <= DEVICE_MEMSETCPY_ALIGN_IN_BYTES
+# if defined(DEVICE_MEMSETCPY_NON_VECTOR_KM)
+#  if DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8
+    typedef uint64_t block_t;
+#  elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16
+    typedef uint128_t block_t;
+#  elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32
+    typedef uint256_t block_t;
+#  endif
+# else
+typedef unsigned int block_t
+	__attribute__((vector_size(DEVICE_MEMSETCPY_ALIGN_IN_BYTES)));
+# endif
+# if defined(__arm64__) || defined(__aarch64__)
+#  if   DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8
+#   define DEVICE_MEMSETCPY_ARM64
+#   define REGSZ "w"
+#   define REGCL "w"
+#   define BVCLB "r"
+#  elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16
+#   define DEVICE_MEMSETCPY_ARM64
+#   define REGSZ "x"
+#   define REGCL "x"
+#   define BVCLB "r"
+#  elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32
+#   if defined(__ARM_NEON_FP)
+#    define DEVICE_MEMSETCPY_ARM64
+#    define REGSZ "q"
+#    define REGCL "v"
+#    define BVCLB "w"
+#   endif
+#  endif
+#  if defined(DEVICE_MEMSETCPY_ARM64)
+#   if defined(DEVICE_MEMSETCPY_ARM64_NON_TEMPORAL)
+#    define NSHLD() __asm__ ("dmb nshld")
+#    define NSHST() __asm__ ("dmb nshst")
+#    define LDP "ldnp"
+#    define STP "stnp"
+#   else
+#    define NSHLD()
+#    define NSHST()
+#    define LDP "ldp"
+#    define STP "stp"
+#   endif
+#   if defined(DEVICE_MEMSETCPY_NON_VECTOR_KM)
+#    if DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8
+typedef uint32_t block_half_t;
+#    elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16
+typedef uint64_t block_half_t;
+#    elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32
+typedef uint128_t block_half_t;
+#    endif
+#   else
+ typedef unsigned int block_half_t
+	__attribute__((vector_size(DEVICE_MEMSETCPY_ALIGN_IN_BYTES / 2)));
+#   endif
+#  endif
+# endif
+#endif
+
+__attribute__((visibility("hidden")))
+void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t uSize)
+{
+	volatile const char *pcSrc = pvSrc;
+	volatile char *pcDst = pvDst;
+	size_t uPreambleBytes;
+	int bBlockCopy = 0;
+
+	size_t uSrcUnaligned = (size_t)pcSrc % sizeof(block_t);
+	size_t uDstUnaligned = (size_t)pcDst % sizeof(block_t);
+
+	if (!uSrcUnaligned && !uDstUnaligned)
+	{
+		/* Neither pointer is unaligned. Optimal case. */
+		bBlockCopy = 1;
+	}
+	else
+	{
+		if (uSrcUnaligned == uDstUnaligned)
+		{
+			/* Neither pointer is usefully aligned, but they are misaligned in
+			 * the same way, so we can copy a preamble in a slow way, then
+			 * optimize the rest.
+			 */
+			uPreambleBytes = MIN(sizeof(block_t) - uDstUnaligned, uSize);
+			uSize -= uPreambleBytes;
+			while (uPreambleBytes)
+			{
+				*pcDst++ = *pcSrc++;
+				uPreambleBytes--;
+			}
+
+			bBlockCopy = 1;
+		}
+		else if ((uSrcUnaligned | uDstUnaligned) % sizeof(int) == 0)
+		{
+			/* Both pointers are at least 32-bit aligned, and we assume that
+			 * the processor must handle all kinds of 32-bit load-stores.
+			 * NOTE: Could we optimize this with a non-temporal version?
+			 */
+			if (uSize >= sizeof(int))
+			{
+				volatile int *piSrc = (int *)pcSrc;
+				volatile int *piDst = (int *)pcDst;
+
+				while (uSize >= sizeof(int))
+				{
+					*piDst++ = *piSrc++;
+					uSize -= sizeof(int);
+				}
+
+				pcSrc = (char *)piSrc;
+				pcDst = (char *)piDst;
+			}
+		}
+	}
+
+	if (bBlockCopy && uSize >= sizeof(block_t))
+	{
+		volatile block_t *pSrc = (block_t *)pcSrc;
+		volatile block_t *pDst = (block_t *)pcDst;
+
+#if defined(DEVICE_MEMSETCPY_ARM64)
+		NSHLD();
+#endif
+
+		while (uSize >= sizeof(block_t))
+		{
+#if defined(DEVICE_MEMSETCPY_ARM64)
+			__asm__ (LDP " " REGSZ "0, " REGSZ "1, [%[pSrc]]\n\t"
+			         STP " " REGSZ "0, " REGSZ "1, [%[pDst]]"
+						:
+						: [pSrc] "r" (pSrc), [pDst] "r" (pDst)
+						: "memory", REGCL "0", REGCL "1");
+#else
+			*pDst = *pSrc;
+#endif
+			pDst++;
+			pSrc++;
+			uSize -= sizeof(block_t);
+		}
+
+#if defined(DEVICE_MEMSETCPY_ARM64)
+		NSHST();
+#endif
+
+		pcSrc = (char *)pSrc;
+		pcDst = (char *)pDst;
+	}
+
+	while (uSize)
+	{
+		*pcDst++ = *pcSrc++;
+		uSize--;
+	}
+}
+
+__attribute__((visibility("hidden")))
+void DeviceMemSet(void *pvDst, unsigned char ui8Value, size_t uSize)
+{
+	volatile char *pcDst = pvDst;
+	size_t uPreambleBytes;
+
+	size_t uDstUnaligned = (size_t)pcDst % sizeof(block_t);
+
+	if (uDstUnaligned)
+	{
+		uPreambleBytes = MIN(sizeof(block_t) - uDstUnaligned, uSize);
+		uSize -= uPreambleBytes;
+		while (uPreambleBytes)
+		{
+			*pcDst++ = ui8Value;
+			uPreambleBytes--;
+		}
+	}
+
+	if (uSize >= sizeof(block_t))
+	{
+		volatile block_t *pDst = (block_t *)pcDst;
+		size_t i, uBlockSize;
+#if defined(DEVICE_MEMSETCPY_ARM64)
+		typedef block_half_t BLK_t;
+#else
+		typedef block_t BLK_t;
+#endif /* defined(DEVICE_MEMSETCPY_ARM64) */
+
+#if defined(DEVICE_MEMSETCPY_NON_VECTOR_KM)
+		BLK_t bValue = 0;
+
+		uBlockSize = sizeof(BLK_t) / sizeof(ui8Value);
+
+		for (i = 0; i < uBlockSize; i++)
+		{
+			bValue |= (BLK_t)ui8Value << ((uBlockSize - i - 1) * BITS_PER_BYTE);
+		}
+#else
+		BLK_t bValue = {0};
+
+		uBlockSize = sizeof(bValue) / sizeof(unsigned int);
+		for (i = 0; i < uBlockSize; i++)
+			bValue[i] = ui8Value << 24U |
+			            ui8Value << 16U |
+			            ui8Value <<  8U |
+			            ui8Value;
+#endif /* defined(DEVICE_MEMSETCPY_NON_VECTOR_KM) */
+
+#if defined(DEVICE_MEMSETCPY_ARM64)
+		NSHLD();
+#endif
+
+		while (uSize >= sizeof(block_t))
+		{
+#if defined(DEVICE_MEMSETCPY_ARM64)
+			__asm__ (STP " %" REGSZ "[bValue], %" REGSZ "[bValue], [%[pDst]]"
+						:
+						: [bValue] BVCLB (bValue), [pDst] "r" (pDst)
+						: "memory");
+#else
+			*pDst = bValue;
+#endif
+			pDst++;
+			uSize -= sizeof(block_t);
+		}
+
+#if defined(DEVICE_MEMSETCPY_ARM64)
+		NSHST();
+#endif
+
+		pcDst = (char *)pDst;
+	}
+
+	while (uSize)
+	{
+		*pcDst++ = ui8Value;
+		uSize--;
+	}
+}
+
+#else /* !defined(__GNUC__) */
+
+/* Potentially very slow (but safe) fallbacks for non-GNU C compilers */
+
+void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t uSize)
+{
+	volatile const char *pcSrc = pvSrc;
+	volatile char *pcDst = pvDst;
+
+	while (uSize)
+	{
+		*pcDst++ = *pcSrc++;
+		uSize--;
+	}
+}
+
+void DeviceMemSet(void *pvDst, unsigned char ui8Value, size_t uSize)
+{
+	volatile char *pcDst = pvDst;
+
+	while (uSize)
+	{
+		*pcDst++ = ui8Value;
+		uSize--;
+	}
+}
+
+#endif /* !defined(__GNUC__) */
+
+#endif /* defined(__arm64__) || defined(__aarch64__) || defined (PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mmu_common.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mmu_common.c
new file mode 100644
index 0000000..2f3709c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mmu_common.c
@@ -0,0 +1,4358 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common MMU Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements basic low level control of MMU.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /***************************************************************************/
+
+#include "devicemem_server_utils.h"
+
+/* Our own interface */
+#include "mmu_common.h"
+
+#include "rgx_bvnc_defs_km.h"
+#include "rgxmmudefs_km.h"
+/*
+Interfaces to other modules:
+
+Let's keep this graph up-to-date:
+
+   +-----------+
+   | devicemem |
+   +-----------+
+         |
+   +============+
+   | mmu_common |
+   +============+
+         |
+         +-----------------+
+         |                 |
+    +---------+      +----------+
+    |   pmr   |      |  device  |
+    +---------+      +----------+
+ */
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#if defined(PDUMP)
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#endif
+#include "pmr.h"
+/* include/ */
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv.h"
+#include "htbuffer.h"
+
+#include "rgxdevice.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "physmem_lma.h"
+#endif
+
+#include "dllist.h"
+
+// #define MMU_OBJECT_REFCOUNT_DEBUGING 1
+#if defined (MMU_OBJECT_REFCOUNT_DEBUGING)
+#define MMU_OBJ_DBG(x)	PVR_DPF(x)
+#else
+#define MMU_OBJ_DBG(x)
+#endif
+
+/*!
+ * Refcounted structure that is shared between the context and
+ * the cleanup thread items.
+ * It is used to keep track of all cleanup items and whether the creating
+ * MMU context has been destroyed and therefore is not allowed to be
+ * accessed any more.
+ *
+ * The cleanup thread is used to defer the freeing of the page tables
+ * because we have to make sure that the MMU cache has been invalidated.
+ * If we don't take care of this the MMU might partially access cached
+ * and uncached tables which might lead to inconsistencies and in the
+ * worst case to MMU pending faults on random memory.
+ */
+typedef struct _MMU_CTX_CLEANUP_DATA_
+{
+	/*! Refcount to know when this structure can be destroyed */
+	ATOMIC_T iRef;
+	/*! Protect items in this structure, especially the refcount */
+	POS_LOCK hCleanupLock;
+	/*! List of all cleanup items currently in flight */
+	DLLIST_NODE sMMUCtxCleanupItemsHead;
+	/*! Was the MMU context destroyed and should not be accessed any more? */
+	IMG_BOOL bMMUContextExists;
+} MMU_CTX_CLEANUP_DATA;
+
+
+/*!
+ * Structure holding one or more page tables that need to be
+ * freed after the MMU cache has been flushed which is signalled when
+ * the stored sync has a value that is <= the required value.
+ */
+typedef struct _MMU_CLEANUP_ITEM_
+{
+	/*! Cleanup thread data */
+	PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn;
+	/*! List to hold all the MMU_MEMORY_MAPPINGs, i.e. page tables */
+	DLLIST_NODE sMMUMappingHead;
+	/*! Node of the cleanup item list for the context */
+	DLLIST_NODE sMMUCtxCleanupItem;
+	/* Pointer to the cleanup meta data */
+	MMU_CTX_CLEANUP_DATA *psMMUCtxCleanupData;
+	/* Sync to query if the MMU cache was flushed */
+	PVRSRV_CLIENT_SYNC_PRIM *psSync;
+	/*! The update value of the sync to signal that the cache was flushed */
+	IMG_UINT16 uiRequiredSyncVal;
+	/*! The device node needed to free the page tables */
+	PVRSRV_DEVICE_NODE *psDevNode;
+} MMU_CLEANUP_ITEM;
+
+/*!
+	All physical allocations and frees are relative to this context, so
+	we would get all the allocations of PCs, PDs, and PTs from the same
+	RA.
+
+	We have one per MMU context in case we have mixed UMA/LMA devices
+	within the same system.
+ */
+typedef struct _MMU_PHYSMEM_CONTEXT_
+{
+	/*! Parent device node */
+	PVRSRV_DEVICE_NODE *psDevNode;
+
+	/*! Refcount so we know when to free up the arena */
+	IMG_UINT32 uiNumAllocations;
+
+	/*! Arena from which physical memory is derived */
+	RA_ARENA *psPhysMemRA;
+	/*! Arena name */
+	IMG_CHAR *pszPhysMemRAName;
+	/*! Size of arena name string */
+	size_t uiPhysMemRANameAllocSize;
+
+	/*! Meta data for deferred cleanup */
+	MMU_CTX_CLEANUP_DATA *psCleanupData;
+	/*! Temporary list of all deferred MMU_MEMORY_MAPPINGs. */
+	DLLIST_NODE sTmpMMUMappingHead;
+
+} MMU_PHYSMEM_CONTEXT;
+
+/*!
+	Mapping structure for MMU memory allocation
+ */
+typedef struct _MMU_MEMORY_MAPPING_
+{
+	/*! Physmem context to allocate from */
+	MMU_PHYSMEM_CONTEXT		*psContext;
+	/*! OS/system Handle for this allocation */
+	PG_HANDLE				sMemHandle;
+	/*! CPU virtual address of this allocation */
+	void					*pvCpuVAddr;
+	/*! Device physical address of this allocation */
+	IMG_DEV_PHYADDR			sDevPAddr;
+	/*! Size of this allocation */
+	size_t					uiSize;
+	/*! Number of current mappings of this allocation */
+	IMG_UINT32				uiCpuVAddrRefCount;
+	/*! Node for the defer free list */
+	DLLIST_NODE				sMMUMappingItem;
+} MMU_MEMORY_MAPPING;
+
+/*!
+	Memory descriptor for MMU objects. There can be more than one memory
+	descriptor per MMU memory allocation.
+ */
+typedef struct _MMU_MEMORY_DESC_
+{
+	/* NB: bValid is set if this descriptor describes physical
+	   memory.  This allows "empty" descriptors to exist, such that we
+	   can allocate them in batches. */
+	/*! Does this MMU object have physical backing */
+	IMG_BOOL				bValid;
+	/*! Device Physical address of physical backing */
+	IMG_DEV_PHYADDR			sDevPAddr;
+	/*! CPU virtual address of physical backing */
+	void					*pvCpuVAddr;
+	/*! Mapping data for this MMU object */
+	MMU_MEMORY_MAPPING		*psMapping;
+	/*! Memdesc offset into the psMapping */
+	IMG_UINT32 uiOffset;
+	/*! Size of the Memdesc */
+	IMG_UINT32 uiSize;
+} MMU_MEMORY_DESC;
+
+/*!
+	MMU levelx structure. This is generic and is used
+	for all levels (PC, PD, PT).
+ */
+typedef struct _MMU_Levelx_INFO_
+{
+	/*! The Number of entries in this level */
+	IMG_UINT32 ui32NumOfEntries;
+
+	/*! Number of times this level has been reference. Note: For Level1 (PTE)
+	    we still take/drop the reference when setting up the page tables rather
+	    then at map/unmap time as this simplifies things */
+	IMG_UINT32 ui32RefCount;
+
+	/*! MemDesc for this level */
+	MMU_MEMORY_DESC sMemDesc;
+
+	/*! Array of infos for the next level. Must be last member in structure */
+	struct _MMU_Levelx_INFO_ *apsNextLevel[1];
+} MMU_Levelx_INFO;
+
+/*!
+	MMU context structure
+ */
+struct _MMU_CONTEXT_
+{
+	/*! Parent device node */
+	PVRSRV_DEVICE_NODE *psDevNode;
+
+	MMU_DEVICEATTRIBS *psDevAttrs;
+
+	/*! For allocation and deallocation of the physical memory where
+	    the pagetables live */
+	struct _MMU_PHYSMEM_CONTEXT_ *psPhysMemCtx;
+
+#if defined(PDUMP)
+	/*! PDump context ID (required for PDump commands with virtual addresses) */
+	IMG_UINT32 uiPDumpContextID;
+
+	/*! The refcount of the PDump context ID */
+	IMG_UINT32 ui32PDumpContextIDRefCount;
+#endif
+
+	/*! Data that is passed back during device specific callbacks */
+	IMG_HANDLE hDevData;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	IMG_UINT32  ui32OSid;
+	IMG_UINT32	ui32OSidReg;
+	IMG_BOOL   bOSidAxiProt;
+#endif
+
+	/*! Lock to ensure exclusive access when manipulating the MMU context or
+	 * reading and using its content
+	 */
+	POS_LOCK hLock;
+
+	/*! Base level info structure. Must be last member in structure */
+	MMU_Levelx_INFO sBaseLevelInfo;
+	/* NO OTHER MEMBERS AFTER THIS STRUCTURE ! */
+};
+
+static const IMG_DEV_PHYADDR gsBadDevPhyAddr = {MMU_BAD_PHYS_ADDR};
+
+#if defined(DEBUG)
+#include "log2.h"
+#endif
+
+
+/*****************************************************************************
+ *                          Utility functions                                *
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       _FreeMMUMapping
+
+@Description    Free a given dllist of MMU_MEMORY_MAPPINGs and the page tables
+                they represent.
+
+@Input          psDevNode           Device node
+
+@Input          psTmpMMUMappingHead List of MMU_MEMORY_MAPPINGs to free
+ */
+/*****************************************************************************/
+static void
+_FreeMMUMapping(PVRSRV_DEVICE_NODE *psDevNode,
+                PDLLIST_NODE psTmpMMUMappingHead)
+{
+	PDLLIST_NODE psNode, psNextNode;
+
+	/* Free the current list unconditionally */
+	dllist_foreach_node(psTmpMMUMappingHead,
+	                    psNode,
+	                    psNextNode)
+	{
+		MMU_MEMORY_MAPPING *psMapping = IMG_CONTAINER_OF(psNode,
+		                                                 MMU_MEMORY_MAPPING,
+		                                                 sMMUMappingItem);
+
+		psDevNode->pfnDevPxFree(psDevNode, &psMapping->sMemHandle);
+		dllist_remove_node(psNode);
+		OSFreeMem(psMapping);
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       _CleanupThread_FreeMMUMapping
+
+@Description    Function to be executed by the cleanup thread to free
+                MMU_MEMORY_MAPPINGs after the MMU cache has been invalidated.
+
+                This function will request a MMU cache invalidate once and
+                retry to free the MMU_MEMORY_MAPPINGs until the invalidate
+                has been executed.
+
+                If the memory context that created this cleanup item has been
+                destroyed in the meantime this function will directly free the
+                MMU_MEMORY_MAPPINGs without waiting for any MMU cache
+                invalidation.
+
+@Input          pvData           Cleanup data in form of a MMU_CLEANUP_ITEM
+
+@Return         PVRSRV_OK if successful otherwise PVRSRV_ERROR_RETRY
+ */
+/*****************************************************************************/
+static PVRSRV_ERROR
+_CleanupThread_FreeMMUMapping(void* pvData)
+{
+	PVRSRV_ERROR eError;
+	MMU_CLEANUP_ITEM *psCleanup = (MMU_CLEANUP_ITEM *) pvData;
+	MMU_CTX_CLEANUP_DATA *psMMUCtxCleanupData = psCleanup->psMMUCtxCleanupData;
+	PVRSRV_DEVICE_NODE *psDevNode = psCleanup->psDevNode;
+	IMG_BOOL bFreeNow;
+	IMG_UINT32 uiSyncCurrent;
+	IMG_UINT32 uiSyncReq;
+
+	OSLockAcquire(psMMUCtxCleanupData->hCleanupLock);
+
+	/* Don't attempt to free anything when the context has been destroyed.
+	 * Especially don't access any device specific structures any more!*/
+	if (!psMMUCtxCleanupData->bMMUContextExists)
+	{
+		OSFreeMem(psCleanup);
+		eError = PVRSRV_OK;
+		goto e0;
+	}
+
+	if (psCleanup->psSync == NULL)
+	{
+		/* Kick to invalidate the MMU caches and get sync info */
+		psDevNode->pfnMMUCacheInvalidateKick(psDevNode,
+		                                     &psCleanup->uiRequiredSyncVal,
+		                                     IMG_TRUE);
+		psCleanup->psSync = psDevNode->psMMUCacheSyncPrim;
+	}
+
+	uiSyncCurrent = OSReadDeviceMem32(psCleanup->psSync->pui32LinAddr);
+	uiSyncReq = psCleanup->uiRequiredSyncVal;
+
+	/* Either the invalidate has been executed ... */
+	bFreeNow = (uiSyncCurrent >= uiSyncReq) ? IMG_TRUE :
+			/* ... with the counter wrapped around ... */
+			(uiSyncReq - uiSyncCurrent) > 0xEFFFFFFFUL ? IMG_TRUE :
+					/* ... or are we still waiting for the invalidate? */
+					IMG_FALSE;
+
+#if defined(NO_HARDWARE)
+	/* In NOHW the syncs will never be updated so just free the tables */
+	bFreeNow = IMG_TRUE;
+#endif
+
+	if (bFreeNow)
+	{
+		_FreeMMUMapping(psDevNode, &psCleanup->sMMUMappingHead);
+
+		dllist_remove_node(&psCleanup->sMMUCtxCleanupItem);
+		OSFreeMem(psCleanup);
+
+		eError = PVRSRV_OK;
+	}
+	else
+	{
+		eError = PVRSRV_ERROR_RETRY;
+	}
+
+	e0:
+
+	/* If this cleanup task has been successfully executed we can
+	 * decrease the context cleanup data refcount. Successfully
+	 * means here that the MMU_MEMORY_MAPPINGs have been freed by
+	 * either this cleanup task of when the MMU context has been
+	 * destroyed. */
+	if (eError == PVRSRV_OK)
+	{
+		OSLockRelease(psMMUCtxCleanupData->hCleanupLock);
+
+		if (OSAtomicDecrement(&psMMUCtxCleanupData->iRef) == 0)
+		{
+			OSLockDestroy(psMMUCtxCleanupData->hCleanupLock);
+			OSFreeMem(psMMUCtxCleanupData);
+		}
+	}
+	else
+	{
+		OSLockRelease(psMMUCtxCleanupData->hCleanupLock);
+	}
+
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       _SetupCleanup_FreeMMUMapping
+
+@Description    Setup a cleanup item for the cleanup thread that will
+                kick off a MMU invalidate request and free the associated
+                MMU_MEMORY_MAPPINGs when the invalidate was successful.
+
+@Input          psDevNode           Device node
+
+@Input          psPhysMemCtx        The current MMU physmem context
+ */
+/*****************************************************************************/
+static void
+_SetupCleanup_FreeMMUMapping(PVRSRV_DEVICE_NODE *psDevNode,
+                             MMU_PHYSMEM_CONTEXT *psPhysMemCtx)
+{
+
+	MMU_CLEANUP_ITEM *psCleanupItem;
+	MMU_CTX_CLEANUP_DATA *psCleanupData = psPhysMemCtx->psCleanupData;
+
+	if (dllist_is_empty(&psPhysMemCtx->sTmpMMUMappingHead))
+	{
+		goto e0;
+	}
+
+#if !defined(SUPPORT_MMU_PENDING_FAULT_PROTECTION)
+	/* If users deactivated this we immediately free the page tables */
+	goto e1;
+#endif
+
+	/* Don't defer the freeing if we are currently unloading the driver
+	 * or if the sync has been destroyed */
+	if (PVRSRVGetPVRSRVData()->bUnload ||
+			psDevNode->psMMUCacheSyncPrim == NULL)
+	{
+		goto e1;
+	}
+
+	/* Allocate a cleanup item */
+	psCleanupItem = OSAllocMem(sizeof(*psCleanupItem));
+	if (!psCleanupItem)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to get memory for deferred page table cleanup. "
+				"Freeing tables immediately",
+				__func__));
+		goto e1;
+	}
+
+	/* Set sync to NULL to indicate we did not interact with
+	 * the FW yet. Kicking off an MMU cache invalidate should
+	 * be done in the cleanup thread to not waste time here. */
+	psCleanupItem->psSync = NULL;
+	psCleanupItem->uiRequiredSyncVal = 0;
+	psCleanupItem->psDevNode = psDevNode;
+	psCleanupItem->psMMUCtxCleanupData = psCleanupData;
+
+	OSAtomicIncrement(&psCleanupData->iRef);
+
+	/* Move the page tables to free to the cleanup item */
+	dllist_replace_head(&psPhysMemCtx->sTmpMMUMappingHead,
+	                    &psCleanupItem->sMMUMappingHead);
+
+	/* Add the cleanup item itself to the context list */
+	dllist_add_to_tail(&psCleanupData->sMMUCtxCleanupItemsHead,
+	                   &psCleanupItem->sMMUCtxCleanupItem);
+
+	/* Setup the cleanup thread data and add the work item */
+	psCleanupItem->sCleanupThreadFn.pfnFree = _CleanupThread_FreeMMUMapping;
+	psCleanupItem->sCleanupThreadFn.pvData = psCleanupItem;
+	psCleanupItem->sCleanupThreadFn.bDependsOnHW = IMG_TRUE;
+	CLEANUP_THREAD_SET_RETRY_TIMEOUT(&psCleanupItem->sCleanupThreadFn,
+	                                 CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT);
+
+	PVRSRVCleanupThreadAddWork(&psCleanupItem->sCleanupThreadFn);
+
+	return;
+
+	e1:
+	/* Free the page tables now */
+	_FreeMMUMapping(psDevNode, &psPhysMemCtx->sTmpMMUMappingHead);
+	e0:
+	return;
+}
+
+/*************************************************************************/ /*!
+@Function       _CalcPCEIdx
+
+@Description    Calculate the page catalogue index
+
+@Input          sDevVAddr           Device virtual address
+
+@Input          psDevVAddrConfig    Configuration of the virtual address
+
+@Input          bRoundUp            Round up the index
+
+@Return         The page catalogue index
+ */
+/*****************************************************************************/
+static IMG_UINT32 _CalcPCEIdx(IMG_DEV_VIRTADDR sDevVAddr,
+                              const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+                              IMG_BOOL bRoundUp)
+{
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+	IMG_UINT32 ui32RetVal;
+
+	sTmpDevVAddr = sDevVAddr;
+
+	if (bRoundUp)
+	{
+		sTmpDevVAddr.uiAddr --;
+	}
+	ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPCIndexMask)
+			>> psDevVAddrConfig->uiPCIndexShift);
+
+	if (bRoundUp)
+	{
+		ui32RetVal ++;
+	}
+
+	return ui32RetVal;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _CalcPDEIdx
+
+@Description    Calculate the page directory index
+
+@Input          sDevVAddr           Device virtual address
+
+@Input          psDevVAddrConfig    Configuration of the virtual address
+
+@Input          bRoundUp            Round up the index
+
+@Return         The page directory index
+ */
+/*****************************************************************************/
+static IMG_UINT32 _CalcPDEIdx(IMG_DEV_VIRTADDR sDevVAddr,
+                              const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+                              IMG_BOOL bRoundUp)
+{
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+	IMG_UINT32 ui32RetVal;
+
+	sTmpDevVAddr = sDevVAddr;
+
+	if (bRoundUp)
+	{
+		sTmpDevVAddr.uiAddr --;
+	}
+	ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPDIndexMask)
+			>> psDevVAddrConfig->uiPDIndexShift);
+
+	if (bRoundUp)
+	{
+		ui32RetVal ++;
+	}
+
+	return ui32RetVal;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _CalcPTEIdx
+
+@Description    Calculate the page entry index
+
+@Input          sDevVAddr           Device virtual address
+
+@Input          psDevVAddrConfig    Configuration of the virtual address
+
+@Input          bRoundUp            Round up the index
+
+@Return         The page entry index
+ */
+/*****************************************************************************/
+static IMG_UINT32 _CalcPTEIdx(IMG_DEV_VIRTADDR sDevVAddr,
+                              const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+                              IMG_BOOL bRoundUp)
+{
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+	IMG_UINT32 ui32RetVal;
+
+	sTmpDevVAddr = sDevVAddr;
+	sTmpDevVAddr.uiAddr -= psDevVAddrConfig->uiOffsetInBytes;
+	if (bRoundUp)
+	{
+		sTmpDevVAddr.uiAddr --;
+	}
+	ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPTIndexMask)
+			>> psDevVAddrConfig->uiPTIndexShift);
+
+	if (bRoundUp)
+	{
+		ui32RetVal ++;
+	}
+
+	return ui32RetVal;
+}
+
+/*****************************************************************************
+ *         MMU memory allocation/management functions (mem desc)             *
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       _MMU_PhysMem_RAImportAlloc
+
+@Description    Imports MMU Px memory into the RA. This is where the
+                actual allocation of physical memory happens.
+
+@Input          hArenaHandle    Handle that was passed in during the
+                                creation of the RA
+
+@Input          uiSize          Size of the memory to import
+
+@Input          uiFlags         Flags that where passed in the allocation.
+
+@Output         puiBase         The address of where to insert this import
+
+@Output         puiActualSize   The actual size of the import
+
+@Output         phPriv          Handle which will be passed back when
+                                this import is freed
+
+@Return         PVRSRV_OK if import alloc was successful
+ */
+/*****************************************************************************/
+static PVRSRV_ERROR _MMU_PhysMem_RAImportAlloc(RA_PERARENA_HANDLE hArenaHandle,
+                                               RA_LENGTH_T uiSize,
+                                               RA_FLAGS_T uiFlags,
+                                               const IMG_CHAR *pszAnnotation,
+                                               RA_BASE_T *puiBase,
+                                               RA_LENGTH_T *puiActualSize,
+                                               RA_PERISPAN_HANDLE *phPriv)
+{
+	MMU_PHYSMEM_CONTEXT *psCtx = (MMU_PHYSMEM_CONTEXT *) hArenaHandle;
+	PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *) psCtx->psDevNode;
+	MMU_MEMORY_MAPPING *psMapping;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(pszAnnotation);
+	PVR_UNREFERENCED_PARAMETER(uiFlags);
+
+	psMapping = OSAllocMem(sizeof(MMU_MEMORY_MAPPING));
+	if (psMapping == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	eError = psDevNode->pfnDevPxAlloc(psDevNode, TRUNCATE_64BITS_TO_SIZE_T(uiSize), &psMapping->sMemHandle,
+	                                  &psMapping->sDevPAddr);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	psMapping->psContext = psCtx;
+	psMapping->uiSize = TRUNCATE_64BITS_TO_SIZE_T(uiSize);
+
+	psMapping->uiCpuVAddrRefCount = 0;
+
+	*phPriv = (RA_PERISPAN_HANDLE) psMapping;
+
+	/* Note: This assumes this memory never gets paged out */
+	*puiBase = (RA_BASE_T)psMapping->sDevPAddr.uiAddr;
+	*puiActualSize = uiSize;
+
+	return PVRSRV_OK;
+
+	e1:
+	OSFreeMem(psMapping);
+	e0:
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_PhysMem_RAImportFree
+
+@Description    Imports MMU Px memory into the RA. This is where the
+                actual free of physical memory happens.
+
+@Input          hArenaHandle    Handle that was passed in during the
+                                creation of the RA
+
+@Input          puiBase         The address of where to insert this import
+
+@Output         phPriv          Private data that the import alloc provided
+
+@Return         None
+ */
+/*****************************************************************************/
+static void _MMU_PhysMem_RAImportFree(RA_PERARENA_HANDLE hArenaHandle,
+                                      RA_BASE_T uiBase,
+                                      RA_PERISPAN_HANDLE hPriv)
+{
+	MMU_MEMORY_MAPPING *psMapping = (MMU_MEMORY_MAPPING *) hPriv;
+	MMU_PHYSMEM_CONTEXT *psCtx = (MMU_PHYSMEM_CONTEXT *) hArenaHandle;
+
+	PVR_UNREFERENCED_PARAMETER(uiBase);
+
+	/* Check we have dropped all CPU mappings */
+	PVR_ASSERT(psMapping->uiCpuVAddrRefCount == 0);
+
+	/* Add mapping to defer free list */
+	psMapping->psContext = NULL;
+	dllist_add_to_tail(&psCtx->sTmpMMUMappingHead, &psMapping->sMMUMappingItem);
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_PhysMemAlloc
+
+@Description    Allocates physical memory for MMU objects
+
+@Input          psCtx           Physmem context to do the allocation from
+
+@Output         psMemDesc       Allocation description
+
+@Input          uiBytes         Size of the allocation in bytes
+
+@Input          uiAlignment     Alignment requirement of this allocation
+
+@Return         PVRSRV_OK if allocation was successful
+ */
+/*****************************************************************************/
+
+static PVRSRV_ERROR _MMU_PhysMemAlloc(MMU_PHYSMEM_CONTEXT *psCtx,
+                                      MMU_MEMORY_DESC *psMemDesc,
+                                      size_t uiBytes,
+                                      size_t uiAlignment)
+{
+	PVRSRV_ERROR eError;
+	RA_BASE_T uiPhysAddr;
+
+	if (!psMemDesc || psMemDesc->bValid)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eError = RA_Alloc(psCtx->psPhysMemRA,
+	                  uiBytes,
+	                  RA_NO_IMPORT_MULTIPLIER,
+	                  0, /* flags */
+	                  uiAlignment,
+	                  "",
+	                  &uiPhysAddr,
+	                  NULL,
+	                  (RA_PERISPAN_HANDLE *) &psMemDesc->psMapping);
+
+	PVR_LOGR_IF_ERROR(eError, "RA_Alloc");
+
+	psMemDesc->bValid = IMG_TRUE;
+	psMemDesc->pvCpuVAddr = NULL;
+	psMemDesc->sDevPAddr.uiAddr = (IMG_UINT64) uiPhysAddr;
+
+	if (psMemDesc->psMapping->uiCpuVAddrRefCount == 0)
+	{
+		eError = psCtx->psDevNode->pfnDevPxMap(psCtx->psDevNode,
+		                                       &psMemDesc->psMapping->sMemHandle,
+		                                       psMemDesc->psMapping->uiSize,
+		                                       &psMemDesc->psMapping->sDevPAddr,
+		                                       &psMemDesc->psMapping->pvCpuVAddr);
+		if (eError != PVRSRV_OK)
+		{
+			RA_Free(psCtx->psPhysMemRA, psMemDesc->sDevPAddr.uiAddr);
+			return eError;
+		}
+	}
+
+	psMemDesc->psMapping->uiCpuVAddrRefCount++;
+	psMemDesc->uiOffset = (psMemDesc->sDevPAddr.uiAddr - psMemDesc->psMapping->sDevPAddr.uiAddr);
+	psMemDesc->pvCpuVAddr = (IMG_UINT8 *) psMemDesc->psMapping->pvCpuVAddr + psMemDesc->uiOffset;
+	psMemDesc->uiSize = uiBytes;
+	PVR_ASSERT(psMemDesc->pvCpuVAddr != NULL);
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_PhysMemFree
+
+@Description    Allocates physical memory for MMU objects
+
+@Input          psCtx           Physmem context to do the free on
+
+@Input          psMemDesc       Allocation description
+
+@Return         None
+ */
+/*****************************************************************************/
+static void _MMU_PhysMemFree(MMU_PHYSMEM_CONTEXT *psCtx,
+                             MMU_MEMORY_DESC *psMemDesc)
+{
+	RA_BASE_T uiPhysAddr;
+
+	PVR_ASSERT(psMemDesc->bValid);
+
+	if (--psMemDesc->psMapping->uiCpuVAddrRefCount == 0)
+	{
+		psCtx->psDevNode->pfnDevPxUnMap(psCtx->psDevNode, &psMemDesc->psMapping->sMemHandle,
+		                                psMemDesc->psMapping->pvCpuVAddr);
+	}
+
+	psMemDesc->pvCpuVAddr = NULL;
+
+	uiPhysAddr = psMemDesc->sDevPAddr.uiAddr;
+	RA_Free(psCtx->psPhysMemRA, uiPhysAddr);
+
+	psMemDesc->bValid = IMG_FALSE;
+}
+
+
+/*****************************************************************************
+ *              MMU object allocation/management functions                   *
+ *****************************************************************************/
+
+static INLINE PVRSRV_ERROR _MMU_ConvertDevMemFlags(IMG_BOOL bInvalidate,
+                                                   PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+                                                   MMU_PROTFLAGS_T *uiMMUProtFlags,
+                                                   MMU_CONTEXT *psMMUContext)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 uiGPUCacheMode;
+
+	/* Do flag conversion between devmem flags and MMU generic flags */
+	if (bInvalidate == IMG_FALSE)
+	{
+		*uiMMUProtFlags |= ((uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK)
+				>> PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET)
+				<< MMU_PROTFLAGS_DEVICE_OFFSET;
+
+		if (PVRSRV_CHECK_GPU_READABLE(uiMappingFlags))
+		{
+			*uiMMUProtFlags |= MMU_PROTFLAGS_READABLE;
+		}
+		if (PVRSRV_CHECK_GPU_WRITEABLE(uiMappingFlags))
+		{
+			*uiMMUProtFlags |= MMU_PROTFLAGS_WRITEABLE;
+		}
+
+		eError = DevmemDeviceCacheMode(psMMUContext->psDevNode,
+		                               uiMappingFlags,
+		                               &uiGPUCacheMode);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+
+		switch (uiGPUCacheMode)
+		{
+			case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED:
+			case PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE:
+				break;
+			case PVRSRV_MEMALLOCFLAG_GPU_CACHED:
+				*uiMMUProtFlags |= MMU_PROTFLAGS_CACHED;
+				break;
+			default:
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Wrong parameters",
+						__func__));
+				return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+		if (DevmemDeviceCacheCoherency(psMMUContext->psDevNode, uiMappingFlags))
+		{
+			*uiMMUProtFlags |= MMU_PROTFLAGS_CACHE_COHERENT;
+		}
+
+#if defined(SUPPORT_RGX)
+		if ((psMMUContext->psDevNode->pfnCheckDeviceFeature) &&
+			 PVRSRV_IS_FEATURE_SUPPORTED(psMMUContext->psDevNode, MIPS))
+		{
+			/*
+				If we are allocating on the MMU of the firmware processor, the cached/uncached attributes
+				must depend on the FIRMWARE_CACHED allocation flag.
+			 */
+			if (psMMUContext->psDevAttrs == psMMUContext->psDevNode->psFirmwareMMUDevAttrs)
+			{
+				if (uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED))
+				{
+					*uiMMUProtFlags |= MMU_PROTFLAGS_CACHED;
+				}
+				else
+				{
+					*uiMMUProtFlags &= ~MMU_PROTFLAGS_CACHED;
+
+				}
+				*uiMMUProtFlags &= ~MMU_PROTFLAGS_CACHE_COHERENT;
+			}
+		}
+#endif
+	}
+	else
+	{
+		*uiMMUProtFlags |= MMU_PROTFLAGS_INVALID;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       _PxMemAlloc
+
+@Description    Allocates physical memory for MMU objects, initialises
+                and PDumps it.
+
+@Input          psMMUContext    MMU context
+
+@Input          uiNumEntries    Number of entries to allocate
+
+@Input          psConfig        MMU Px config
+
+@Input          eMMULevel       MMU level that that allocation is for
+
+@Output         psMemDesc       Description of allocation
+
+@Return         PVRSRV_OK if allocation was successful
+ */
+/*****************************************************************************/
+static PVRSRV_ERROR _PxMemAlloc(MMU_CONTEXT *psMMUContext,
+                                IMG_UINT32 uiNumEntries,
+                                const MMU_PxE_CONFIG *psConfig,
+                                MMU_LEVEL eMMULevel,
+                                MMU_MEMORY_DESC *psMemDesc,
+                                IMG_UINT32 uiLog2Align)
+{
+	PVRSRV_ERROR eError;
+	size_t uiBytes;
+	size_t uiAlign;
+
+	PVR_ASSERT(psConfig->uiBytesPerEntry != 0);
+
+	uiBytes = uiNumEntries * psConfig->uiBytesPerEntry;
+	/* We need here the alignment of the previous level because that is the entry for we generate here */
+	uiAlign = 1 << uiLog2Align;
+
+	/*
+	 * If the hardware specifies an alignment requirement for a page table then
+	 * it also requires that all memory up to the next aligned address is
+	 * zeroed.
+	 *
+	 * Failing to do this can result in uninitialised data outside of the actual
+	 * page table range being read by the MMU and treated as valid, e.g. the
+	 * pending flag.
+	 *
+	 * Typically this will affect 1MiB, 2MiB PT pages which have a size of 16
+	 * and 8 bytes respectively but an alignment requirement of 64 bytes each.
+	 */
+	uiBytes = PVR_ALIGN(uiBytes, uiAlign);
+
+	/* allocate the object */
+	eError = _MMU_PhysMemAlloc(psMMUContext->psPhysMemCtx,
+	                           psMemDesc, uiBytes, uiAlign);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_PxMemAlloc: failed to allocate memory for the MMU object"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	/*
+		Clear the object
+		Note: if any MMUs are cleared with non-zero values then will need a
+		custom clear function
+		Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is
+		unlikely
+	 */
+	OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, uiBytes);
+
+	eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+	                                                &psMemDesc->psMapping->sMemHandle,
+	                                                psMemDesc->uiOffset,
+	                                                psMemDesc->uiSize);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Alloc MMU object");
+
+	PDumpMMUMalloc(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+	               eMMULevel,
+	               &psMemDesc->sDevPAddr,
+	               uiBytes,
+	               uiAlign,
+	               psMMUContext->psDevAttrs->eMMUType);
+
+	PDumpMMUDumpPxEntries(eMMULevel,
+	                      psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+	                      psMemDesc->pvCpuVAddr,
+	                      psMemDesc->sDevPAddr,
+	                      0,
+	                      uiNumEntries,
+	                      NULL, NULL, 0, /* pdump symbolic info is irrelevant here */
+	                      psConfig->uiBytesPerEntry,
+	                      uiLog2Align,
+	                      psConfig->uiAddrShift,
+	                      psConfig->uiAddrMask,
+	                      psConfig->uiProtMask,
+	                      psConfig->uiValidEnMask,
+	                      0,
+	                      psMMUContext->psDevAttrs->eMMUType);
+#endif
+
+	return PVRSRV_OK;
+	e1:
+	_MMU_PhysMemFree(psMMUContext->psPhysMemCtx,
+	                 psMemDesc);
+	e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       _PxMemFree
+
+@Description    Frees physical memory for MMU objects, de-initialises
+                and PDumps it.
+
+@Input          psMemDesc       Description of allocation
+
+@Return         PVRSRV_OK if allocation was successful
+ */
+/*****************************************************************************/
+
+static void _PxMemFree(MMU_CONTEXT *psMMUContext,
+                       MMU_MEMORY_DESC *psMemDesc, MMU_LEVEL eMMULevel)
+{
+#if defined(MMU_CLEARMEM_ON_FREE)
+	PVRSRV_ERROR eError;
+
+	/*
+		Clear the MMU object
+		Note: if any MMUs are cleared with non-zero values then will need a
+		custom clear function
+		Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is
+		unlikely
+	 */
+	OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, psMemDesc->ui32Bytes);
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Clear MMU object before freeing it");
+#endif
+#endif/* MMU_CLEARMEM_ON_FREE */
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Free MMU object");
+	{
+		PDumpMMUFree(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+		             eMMULevel,
+		             &psMemDesc->sDevPAddr,
+		             psMMUContext->psDevAttrs->eMMUType);
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(eMMULevel);
+#endif
+	/* free the PC */
+	_MMU_PhysMemFree(psMMUContext->psPhysMemCtx, psMemDesc);
+}
+
+static INLINE PVRSRV_ERROR _SetupPTE(MMU_CONTEXT *psMMUContext,
+                                     MMU_Levelx_INFO *psLevel,
+                                     IMG_UINT32 uiIndex,
+                                     const MMU_PxE_CONFIG *psConfig,
+                                     const IMG_DEV_PHYADDR *psDevPAddr,
+                                     IMG_BOOL bUnmap,
+#if defined(PDUMP)
+                                     const IMG_CHAR *pszMemspaceName,
+                                     const IMG_CHAR *pszSymbolicAddr,
+                                     IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset,
+#endif
+                                     IMG_UINT64 uiProtFlags)
+{
+	MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc;
+	IMG_UINT64 ui64PxE64;
+	IMG_UINT64 uiAddr = psDevPAddr->uiAddr;
+
+	if (PVRSRV_IS_FEATURE_SUPPORTED(psMMUContext->psDevNode, MIPS))
+	{
+		/*
+		 * If mapping for the MIPS FW context, check for sensitive PAs
+		 */
+		if (psMMUContext->psDevAttrs == psMMUContext->psDevNode->psFirmwareMMUDevAttrs)
+		{
+			PVRSRV_RGXDEV_INFO *psDevice = (PVRSRV_RGXDEV_INFO *)psMMUContext->psDevNode->pvDevice;
+
+			if (RGXMIPSFW_SENSITIVE_ADDR(uiAddr))
+			{
+				uiAddr = psDevice->psTrampoline->sPhysAddr.uiAddr + RGXMIPSFW_TRAMPOLINE_OFFSET(uiAddr);
+			}
+			/* FIX_HW_BRN_63553 is mainlined for all MIPS cores */
+			else if (uiAddr == 0x0 && !psDevice->sLayerParams.bDevicePA0IsValid)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s attempt to map addr 0x0 in the FW but 0x0 is not considered valid.", __func__));
+				return PVRSRV_ERROR_MMU_FAILED_TO_MAP_PAGE_TABLE;
+			}
+		}
+	}
+
+	/* Calculate Entry */
+	ui64PxE64 =    uiAddr /* Calculate the offset to that base */
+			>> psConfig->uiAddrLog2Align /* Shift away the useless bits, because the alignment is very coarse and we address by alignment */
+			<< psConfig->uiAddrShift /* Shift back to fit address in the Px entry */
+			& psConfig->uiAddrMask; /* Delete unused bits */
+	ui64PxE64 |= uiProtFlags;
+
+	/* Set the entry */
+	if (psConfig->uiBytesPerEntry == 8)
+	{
+		IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+		pui64Px[uiIndex] = ui64PxE64;
+	}
+	else if (psConfig->uiBytesPerEntry == 4)
+	{
+		IMG_UINT32 *pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+		/* assert that the result fits into 32 bits before writing
+		   it into the 32-bit array with a cast */
+		PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU));
+
+		pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64;
+	}
+	else
+	{
+		return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+	}
+
+
+	/* Log modification */
+	HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+	        HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+	        uiIndex, MMU_LEVEL_1,
+	        HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64),
+	        !bUnmap);
+
+#if defined (PDUMP)
+	PDumpMMUDumpPxEntries(MMU_LEVEL_1,
+	                      psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+	                      psMemDesc->pvCpuVAddr,
+	                      psMemDesc->sDevPAddr,
+	                      uiIndex,
+	                      1,
+	                      pszMemspaceName,
+	                      pszSymbolicAddr,
+	                      uiSymbolicAddrOffset,
+	                      psConfig->uiBytesPerEntry,
+	                      psConfig->uiAddrLog2Align,
+	                      psConfig->uiAddrShift,
+	                      psConfig->uiAddrMask,
+	                      psConfig->uiProtMask,
+	                      psConfig->uiValidEnMask,
+	                      0,
+	                      psMMUContext->psDevAttrs->eMMUType);
+#endif /*PDUMP*/
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       _SetupPxE
+
+@Description    Sets up an entry of an MMU object to point to the
+                provided address
+
+@Input          psMMUContext    MMU context to operate on
+
+@Input          psLevel         Level info for MMU object
+
+@Input          uiIndex         Index into the MMU object to setup
+
+@Input          psConfig        MMU Px config
+
+@Input          eMMULevel       Level of MMU object
+
+@Input          psDevPAddr      Address to setup the MMU object to point to
+
+@Input          pszMemspaceName Name of the PDump memory space that the entry
+                                will point to
+
+@Input          pszSymbolicAddr PDump symbolic address that the entry will
+                                point to
+
+@Input          uiProtFlags     MMU protection flags
+
+@Return         PVRSRV_OK if the setup was successful
+ */
+/*****************************************************************************/
+static PVRSRV_ERROR _SetupPxE(MMU_CONTEXT *psMMUContext,
+                              MMU_Levelx_INFO *psLevel,
+                              IMG_UINT32 uiIndex,
+                              const MMU_PxE_CONFIG *psConfig,
+                              MMU_LEVEL eMMULevel,
+                              const IMG_DEV_PHYADDR *psDevPAddr,
+#if defined(PDUMP)
+                              const IMG_CHAR *pszMemspaceName,
+                              const IMG_CHAR *pszSymbolicAddr,
+                              IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset,
+#endif
+                              MMU_PROTFLAGS_T uiProtFlags,
+                              IMG_UINT32 uiLog2DataPageSize)
+{
+	PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psDevNode;
+	MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc;
+
+	IMG_UINT32 (*pfnDerivePxEProt4)(IMG_UINT32);
+	IMG_UINT64 (*pfnDerivePxEProt8)(IMG_UINT32, IMG_UINT32);
+
+	if (!psDevPAddr)
+	{
+		/* Invalidate entry */
+		if (~uiProtFlags & MMU_PROTFLAGS_INVALID)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Error, no physical address specified, but not invalidating entry"));
+			uiProtFlags |= MMU_PROTFLAGS_INVALID;
+		}
+		psDevPAddr = &gsBadDevPhyAddr;
+	}
+	else
+	{
+		if (uiProtFlags & MMU_PROTFLAGS_INVALID)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "A physical address was specified when requesting invalidation of entry"));
+			uiProtFlags |= MMU_PROTFLAGS_INVALID;
+		}
+	}
+
+	switch (eMMULevel)
+	{
+		case MMU_LEVEL_3:
+			pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePCEProt4;
+			pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePCEProt8;
+			break;
+
+		case MMU_LEVEL_2:
+			pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePDEProt4;
+			pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePDEProt8;
+			break;
+
+		case MMU_LEVEL_1:
+			pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePTEProt4;
+			pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePTEProt8;
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR, "%s: invalid MMU level", __func__));
+			return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* How big is a PxE in bytes? */
+	/* Filling the actual Px entry with an address */
+	switch (psConfig->uiBytesPerEntry)
+	{
+		case 4:
+		{
+			IMG_UINT32 *pui32Px;
+			IMG_UINT64 ui64PxE64;
+
+			pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+			ui64PxE64 = psDevPAddr->uiAddr               /* Calculate the offset to that base */
+					>> psConfig->uiAddrLog2Align /* Shift away the unnecessary bits of the address */
+					<< psConfig->uiAddrShift     /* Shift back to fit address in the Px entry */
+					& psConfig->uiAddrMask;      /* Delete unused higher bits */
+
+			ui64PxE64 |= (IMG_UINT64)pfnDerivePxEProt4(uiProtFlags);
+			/* assert that the result fits into 32 bits before writing
+			   it into the 32-bit array with a cast */
+			PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU));
+
+			/* We should never invalidate an invalid page */
+			if (uiProtFlags & MMU_PROTFLAGS_INVALID)
+			{
+				PVR_ASSERT(pui32Px[uiIndex] != ui64PxE64);
+			}
+			pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64;
+			HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+			        HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+			        uiIndex, eMMULevel,
+			        HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64),
+			        (uiProtFlags & MMU_PROTFLAGS_INVALID)? 0: 1);
+			break;
+		}
+		case 8:
+		{
+			IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+			pui64Px[uiIndex] = psDevPAddr->uiAddr             /* Calculate the offset to that base */
+					>> psConfig->uiAddrLog2Align  /* Shift away the unnecessary bits of the address */
+					<< psConfig->uiAddrShift      /* Shift back to fit address in the Px entry */
+					& psConfig->uiAddrMask;       /* Delete unused higher bits */
+			pui64Px[uiIndex] |= pfnDerivePxEProt8(uiProtFlags, uiLog2DataPageSize);
+
+			HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+			        HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+			        uiIndex, eMMULevel,
+			        HTBLOG_U64_BITS_HIGH(pui64Px[uiIndex]), HTBLOG_U64_BITS_LOW(pui64Px[uiIndex]),
+			        (uiProtFlags & MMU_PROTFLAGS_INVALID)? 0: 1);
+			break;
+		}
+		default:
+			PVR_DPF((PVR_DBG_ERROR, "%s: PxE size not supported (%d) for level %d",
+					__func__, psConfig->uiBytesPerEntry, eMMULevel));
+
+			return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+	}
+
+#if defined (PDUMP)
+	PDumpMMUDumpPxEntries(eMMULevel,
+	                      psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+	                      psMemDesc->pvCpuVAddr,
+	                      psMemDesc->sDevPAddr,
+	                      uiIndex,
+	                      1,
+	                      pszMemspaceName,
+	                      pszSymbolicAddr,
+	                      uiSymbolicAddrOffset,
+	                      psConfig->uiBytesPerEntry,
+	                      psConfig->uiAddrLog2Align,
+	                      psConfig->uiAddrShift,
+	                      psConfig->uiAddrMask,
+	                      psConfig->uiProtMask,
+	                      psConfig->uiValidEnMask,
+	                      0,
+	                      psMMUContext->psDevAttrs->eMMUType);
+#endif
+
+	psDevNode->pfnMMUCacheInvalidate(psDevNode, psMMUContext->hDevData,
+	                                 eMMULevel,
+	                                 (uiProtFlags & MMU_PROTFLAGS_INVALID)?IMG_TRUE:IMG_FALSE);
+
+	return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ *                   MMU host control functions (Level Info)                 *
+ *****************************************************************************/
+
+
+/*************************************************************************/ /*!
+@Function       _MMU_FreeLevel
+
+@Description    Recursively frees the specified range of Px entries. If any
+                level has its last reference dropped then the MMU object
+                memory and the MMU_Levelx_Info will be freed.
+
+				At each level we might be crossing a boundary from one Px to
+				another. The values for auiStartArray should be by used for
+				the first call into each level and the values in auiEndArray
+				should only be used in the last call for each level.
+				In order to determine if this is the first/last call we pass
+				in bFirst and bLast.
+				When one level calls down to the next only if bFirst/bLast is set
+				and it's the first/last iteration of the loop at its level will
+				bFirst/bLast set for the next recursion.
+				This means that each iteration has the knowledge of the previous
+				level which is required.
+
+@Input          psMMUContext    MMU context to operate on
+
+@Input          psLevel                 Level info on which to free the
+                                        specified range
+
+@Input          auiStartArray           Array of start indexes (one for each level)
+
+@Input          auiEndArray             Array of end indexes (one for each level)
+
+@Input          auiEntriesPerPxArray    Array of number of entries for the Px
+                                        (one for each level)
+
+@Input          apsConfig               Array of PxE configs (one for each level)
+
+@Input          aeMMULevel              Array of MMU levels (one for each level)
+
+@Input          pui32CurrentLevel       Pointer to a variable which is set to our
+                                        current level
+
+@Input          uiStartIndex            Start index of the range to free
+
+@Input          uiEndIndex              End index of the range to free
+
+@Input			bFirst                  This is the first call for this level
+
+@Input			bLast                   This is the last call for this level
+
+@Return         IMG_TRUE if the last reference to psLevel was dropped
+ */
+/*****************************************************************************/
+static IMG_BOOL _MMU_FreeLevel(MMU_CONTEXT *psMMUContext,
+                               MMU_Levelx_INFO *psLevel,
+                               IMG_UINT32 auiStartArray[],
+                               IMG_UINT32 auiEndArray[],
+                               IMG_UINT32 auiEntriesPerPxArray[],
+                               const MMU_PxE_CONFIG *apsConfig[],
+                               MMU_LEVEL aeMMULevel[],
+                               IMG_UINT32 *pui32CurrentLevel,
+                               IMG_UINT32 uiStartIndex,
+                               IMG_UINT32 uiEndIndex,
+                               IMG_BOOL bFirst,
+                               IMG_BOOL bLast,
+                               IMG_UINT32 uiLog2DataPageSize)
+{
+	IMG_UINT32 uiThisLevel = *pui32CurrentLevel;
+	const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel];
+	IMG_UINT32 i;
+	IMG_BOOL bFreed = IMG_FALSE;
+
+	/* Sanity check */
+	PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL);
+	PVR_ASSERT(psLevel != NULL);
+
+	MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel: level = %d, range %d - %d, refcount = %d",
+			aeMMULevel[uiThisLevel], uiStartIndex,
+			uiEndIndex, psLevel->ui32RefCount));
+
+	for (i = uiStartIndex;(i < uiEndIndex) && (psLevel != NULL);i++)
+	{
+		if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+		{
+			MMU_Levelx_INFO *psNextLevel = psLevel->apsNextLevel[i];
+			IMG_UINT32 uiNextStartIndex;
+			IMG_UINT32 uiNextEndIndex;
+			IMG_BOOL bNextFirst;
+			IMG_BOOL bNextLast;
+
+			/* If we're crossing a Px then the start index changes */
+			if (bFirst && (i == uiStartIndex))
+			{
+				uiNextStartIndex = auiStartArray[uiThisLevel + 1];
+				bNextFirst = IMG_TRUE;
+			}
+			else
+			{
+				uiNextStartIndex = 0;
+				bNextFirst = IMG_FALSE;
+			}
+
+			/* If we're crossing a Px then the end index changes */
+			if (bLast && (i == (uiEndIndex - 1)))
+			{
+				uiNextEndIndex = auiEndArray[uiThisLevel + 1];
+				bNextLast = IMG_TRUE;
+			}
+			else
+			{
+				uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1];
+				bNextLast = IMG_FALSE;
+			}
+
+			/* Recurse into the next level */
+			(*pui32CurrentLevel)++;
+			if (_MMU_FreeLevel(psMMUContext, psNextLevel, auiStartArray,
+			                   auiEndArray, auiEntriesPerPxArray,
+			                   apsConfig, aeMMULevel, pui32CurrentLevel,
+			                   uiNextStartIndex, uiNextEndIndex,
+			                   bNextFirst, bNextLast, uiLog2DataPageSize))
+			{
+				PVRSRV_ERROR eError;
+
+				/* Un-wire the entry */
+				eError = _SetupPxE(psMMUContext,
+				                   psLevel,
+				                   i,
+				                   psConfig,
+				                   aeMMULevel[uiThisLevel],
+				                   NULL,
+#if defined(PDUMP)
+				                   NULL,	/* Only required for data page */
+				                   NULL,	/* Only required for data page */
+				                   0,      /* Only required for data page */
+#endif
+				                   MMU_PROTFLAGS_INVALID,
+				                   uiLog2DataPageSize);
+
+				PVR_ASSERT(eError == PVRSRV_OK);
+
+				/* Free table of the level below, pointed to by this table entry.
+				 * We don't destroy the table inside the above _MMU_FreeLevel call because we
+				 * first have to set the table entry of the level above to invalid. */
+				_PxMemFree(psMMUContext, &psNextLevel->sMemDesc, aeMMULevel[*pui32CurrentLevel]);
+				OSFreeMem(psNextLevel);
+
+				/* The level below us is empty, drop the refcount and clear the pointer */
+				psLevel->ui32RefCount--;
+				psLevel->apsNextLevel[i] = NULL;
+
+				/* Check we haven't wrapped around */
+				PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+			}
+			(*pui32CurrentLevel)--;
+		}
+		else
+		{
+			psLevel->ui32RefCount--;
+		}
+
+		/*
+		   Free this level if it is no longer referenced, unless it's the base
+		   level in which case it's part of the MMU context and should be freed
+		   when the MMU context is freed
+		 */
+		if ((psLevel->ui32RefCount == 0) && (psLevel != &psMMUContext->sBaseLevelInfo))
+		{
+			bFreed = IMG_TRUE;
+		}
+	}
+
+	/* Level one flushing is done when we actually write the table entries */
+	if ((aeMMULevel[uiThisLevel] != MMU_LEVEL_1) && (psLevel != NULL))
+	{
+		psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+		                                       &psLevel->sMemDesc.psMapping->sMemHandle,
+		                                       uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+		                                       (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry);
+	}
+
+	MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel end: level = %d, refcount = %d",
+			aeMMULevel[uiThisLevel], bFreed?0: (psLevel)?psLevel->ui32RefCount:-1));
+
+	return bFreed;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_AllocLevel
+
+@Description    Recursively allocates the specified range of Px entries. If any
+                level has its last reference dropped then the MMU object
+                memory and the MMU_Levelx_Info will be freed.
+
+				At each level we might be crossing a boundary from one Px to
+				another. The values for auiStartArray should be by used for
+				the first call into each level and the values in auiEndArray
+				should only be used in the last call for each level.
+				In order to determine if this is the first/last call we pass
+				in bFirst and bLast.
+				When one level calls down to the next only if bFirst/bLast is set
+				and it's the first/last iteration of the loop at its level will
+				bFirst/bLast set for the next recursion.
+				This means that each iteration has the knowledge of the previous
+				level which is required.
+
+@Input          psMMUContext    MMU context to operate on
+
+@Input          psLevel                 Level info on which to to free the
+                                        specified range
+
+@Input          auiStartArray           Array of start indexes (one for each level)
+
+@Input          auiEndArray             Array of end indexes (one for each level)
+
+@Input          auiEntriesPerPxArray    Array of number of entries for the Px
+                                        (one for each level)
+
+@Input          apsConfig               Array of PxE configs (one for each level)
+
+@Input          aeMMULevel              Array of MMU levels (one for each level)
+
+@Input          pui32CurrentLevel       Pointer to a variable which is set to our
+                                        current level
+
+@Input          uiStartIndex            Start index of the range to free
+
+@Input          uiEndIndex              End index of the range to free
+
+@Input			bFirst                  This is the first call for this level
+
+@Input			bLast                   This is the last call for this level
+
+@Return         IMG_TRUE if the last reference to psLevel was dropped
+ */
+/*****************************************************************************/
+static PVRSRV_ERROR _MMU_AllocLevel(MMU_CONTEXT *psMMUContext,
+                                    MMU_Levelx_INFO *psLevel,
+                                    IMG_UINT32 auiStartArray[],
+                                    IMG_UINT32 auiEndArray[],
+                                    IMG_UINT32 auiEntriesPerPxArray[],
+                                    const MMU_PxE_CONFIG *apsConfig[],
+                                    MMU_LEVEL aeMMULevel[],
+                                    IMG_UINT32 *pui32CurrentLevel,
+                                    IMG_UINT32 uiStartIndex,
+                                    IMG_UINT32 uiEndIndex,
+                                    IMG_BOOL bFirst,
+                                    IMG_BOOL bLast,
+                                    IMG_UINT32 uiLog2DataPageSize)
+{
+	IMG_UINT32 uiThisLevel = *pui32CurrentLevel; /* Starting with 0 */
+	const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel]; /* The table config for the current level */
+	PVRSRV_ERROR eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	IMG_UINT32 uiAllocState = 99; /* Debug info to check what progress was made in the function. Updated during this function. */
+	IMG_UINT32 i;
+
+	/* Sanity check */
+	PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL);
+
+	MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel: level = %d, range %d - %d, refcount = %d",
+			aeMMULevel[uiThisLevel], uiStartIndex,
+			uiEndIndex, psLevel->ui32RefCount));
+
+	/* Go from uiStartIndex to uiEndIndex through the Px */
+	for (i = uiStartIndex;i < uiEndIndex;i++)
+	{
+		/* Only try an allocation if this is not the last level */
+		/*Because a PT allocation is already done while setting the entry in PD */
+		if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+		{
+			IMG_UINT32 uiNextStartIndex;
+			IMG_UINT32 uiNextEndIndex;
+			IMG_BOOL bNextFirst;
+			IMG_BOOL bNextLast;
+
+			/* If there is already a next Px level existing, do not allocate it */
+			if (!psLevel->apsNextLevel[i])
+			{
+				MMU_Levelx_INFO *psNextLevel;
+				IMG_UINT32 ui32AllocSize;
+				IMG_UINT32 uiNextEntries;
+
+				/* Allocate and setup the next level */
+				uiNextEntries = auiEntriesPerPxArray[uiThisLevel + 1];
+				ui32AllocSize = sizeof(MMU_Levelx_INFO);
+				if (aeMMULevel[uiThisLevel + 1] != MMU_LEVEL_1)
+				{
+					ui32AllocSize += sizeof(MMU_Levelx_INFO *) * (uiNextEntries - 1);
+				}
+				psNextLevel = OSAllocZMem(ui32AllocSize);
+				if (psNextLevel == NULL)
+				{
+					uiAllocState = 0;
+					goto e0;
+				}
+
+				/* Hook in this level for next time */
+				psLevel->apsNextLevel[i] = psNextLevel;
+
+				psNextLevel->ui32NumOfEntries = uiNextEntries;
+				psNextLevel->ui32RefCount = 0;
+				/* Allocate Px memory for a sub level*/
+				eError = _PxMemAlloc(psMMUContext, uiNextEntries, apsConfig[uiThisLevel + 1],
+				                     aeMMULevel[uiThisLevel + 1],
+				                     &psNextLevel->sMemDesc,
+				                     psConfig->uiAddrLog2Align);
+				if (eError != PVRSRV_OK)
+				{
+					uiAllocState = 1;
+					goto e0;
+				}
+
+				/* Wire up the entry */
+				eError = _SetupPxE(psMMUContext,
+				                   psLevel,
+				                   i,
+				                   psConfig,
+				                   aeMMULevel[uiThisLevel],
+				                   &psNextLevel->sMemDesc.sDevPAddr,
+#if defined(PDUMP)
+				                   NULL, /* Only required for data page */
+				                   NULL, /* Only required for data page */
+				                   0,    /* Only required for data page */
+#endif
+				                   0,
+				                   uiLog2DataPageSize);
+
+				if (eError != PVRSRV_OK)
+				{
+					uiAllocState = 2;
+					goto e0;
+				}
+
+				psLevel->ui32RefCount++;
+			}
+
+			/* If we're crossing a Px then the start index changes */
+			if (bFirst && (i == uiStartIndex))
+			{
+				uiNextStartIndex = auiStartArray[uiThisLevel + 1];
+				bNextFirst = IMG_TRUE;
+			}
+			else
+			{
+				uiNextStartIndex = 0;
+				bNextFirst = IMG_FALSE;
+			}
+
+			/* If we're crossing a Px then the end index changes */
+			if (bLast && (i == (uiEndIndex - 1)))
+			{
+				uiNextEndIndex = auiEndArray[uiThisLevel + 1];
+				bNextLast = IMG_TRUE;
+			}
+			else
+			{
+				uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1];
+				bNextLast = IMG_FALSE;
+			}
+
+			/* Recurse into the next level */
+			(*pui32CurrentLevel)++;
+			eError = _MMU_AllocLevel(psMMUContext, psLevel->apsNextLevel[i],
+			                         auiStartArray,
+			                         auiEndArray,
+			                         auiEntriesPerPxArray,
+			                         apsConfig,
+			                         aeMMULevel,
+			                         pui32CurrentLevel,
+			                         uiNextStartIndex,
+			                         uiNextEndIndex,
+			                         bNextFirst,
+			                         bNextLast,
+			                         uiLog2DataPageSize);
+			(*pui32CurrentLevel)--;
+			if (eError != PVRSRV_OK)
+			{
+				uiAllocState = 2;
+				goto e0;
+			}
+		}
+		else
+		{
+			/* All we need to do for level 1 is bump the refcount */
+			psLevel->ui32RefCount++;
+		}
+		PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+	}
+
+	/* Level one flushing is done when we actually write the table entries */
+	if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+	{
+		eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+		                                                &psLevel->sMemDesc.psMapping->sMemHandle,
+		                                                uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+		                                                (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry);
+		if (eError != PVRSRV_OK)
+			goto e0;
+	}
+
+	MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel end: level = %d, refcount = %d",
+			aeMMULevel[uiThisLevel], psLevel->ui32RefCount));
+	return PVRSRV_OK;
+
+	e0:
+	/* Sanity check that we've not come down this route unexpectedly */
+	PVR_ASSERT(uiAllocState!=99);
+	PVR_DPF((PVR_DBG_ERROR, "_MMU_AllocLevel: Error %d allocating Px for level %d in stage %d"
+			,eError, aeMMULevel[uiThisLevel], uiAllocState));
+
+	/* the start value of index variable i is nor initialised on purpose
+	   indeed this for loop deinitialise what has already been initialised
+	   just before failing in reverse order. So the i index has already the
+	   right value. */
+	for (/* i already set */; i>= uiStartIndex && i< uiEndIndex; i--)
+	{
+		switch (uiAllocState)
+		{
+			IMG_UINT32 uiNextStartIndex;
+			IMG_UINT32 uiNextEndIndex;
+			IMG_BOOL bNextFirst;
+			IMG_BOOL bNextLast;
+
+			case 3:
+				/* If we're crossing a Px then the start index changes */
+				if (bFirst && (i == uiStartIndex))
+				{
+					uiNextStartIndex = auiStartArray[uiThisLevel + 1];
+					bNextFirst = IMG_TRUE;
+				}
+				else
+				{
+					uiNextStartIndex = 0;
+					bNextFirst = IMG_FALSE;
+				}
+
+				/* If we're crossing a Px then the end index changes */
+				if (bLast && (i == (uiEndIndex - 1)))
+				{
+					uiNextEndIndex = auiEndArray[uiThisLevel + 1];
+					bNextLast = IMG_TRUE;
+				}
+				else
+				{
+					uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1];
+					bNextLast = IMG_FALSE;
+				}
+
+				if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+				{
+					(*pui32CurrentLevel)++;
+					if (_MMU_FreeLevel(psMMUContext, psLevel->apsNextLevel[i],
+					                   auiStartArray, auiEndArray,
+					                   auiEntriesPerPxArray, apsConfig,
+					                   aeMMULevel, pui32CurrentLevel,
+					                   uiNextStartIndex, uiNextEndIndex,
+					                   bNextFirst, bNextLast, uiLog2DataPageSize))
+					{
+						psLevel->ui32RefCount--;
+						psLevel->apsNextLevel[i] = NULL;
+
+						/* Check we haven't wrapped around */
+						PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+					}
+					(*pui32CurrentLevel)--;
+				}
+				else
+				{
+					/* We should never come down this path, but it's here
+						   for completeness */
+					psLevel->ui32RefCount--;
+
+					/* Check we haven't wrapped around */
+					PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+				}
+
+				__fallthrough;
+			case 2:
+				if (psLevel->apsNextLevel[i] != NULL  &&
+						psLevel->apsNextLevel[i]->ui32RefCount == 0)
+				{
+					_PxMemFree(psMMUContext, &psLevel->sMemDesc,
+					           aeMMULevel[uiThisLevel]);
+				}
+
+				__fallthrough;
+			case 1:
+				if (psLevel->apsNextLevel[i] != NULL  &&
+						psLevel->apsNextLevel[i]->ui32RefCount == 0)
+				{
+					OSFreeMem(psLevel->apsNextLevel[i]);
+					psLevel->apsNextLevel[i] = NULL;
+				}
+
+				__fallthrough;
+			case 0:
+				uiAllocState = 3;
+				break;
+		}
+	}
+	return eError;
+}
+
+/*****************************************************************************
+ *                   MMU page table functions                                *
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       _MMU_GetLevelData
+
+@Description    Get the all the level data and calculates the indexes for the
+                specified address range
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddrStart          Start device virtual address
+
+@Input          sDevVAddrEnd            End device virtual address
+
+@Input          uiLog2DataPageSize      Log2 of the page size to use
+
+@Input          auiStartArray           Array of start indexes (one for each level)
+
+@Input          auiEndArray             Array of end indexes (one for each level)
+
+@Input          uiEntriesPerPxArray     Array of number of entries for the Px
+                                        (one for each level)
+
+@Input          apsConfig               Array of PxE configs (one for each level)
+
+@Input          aeMMULevel              Array of MMU levels (one for each level)
+
+@Input          ppsMMUDevVAddrConfig    Device virtual address config
+
+@Input			phPriv					Private data of page size config
+
+@Return         IMG_TRUE if the last reference to psLevel was dropped
+ */
+/*****************************************************************************/
+static void _MMU_GetLevelData(MMU_CONTEXT *psMMUContext,
+                              IMG_DEV_VIRTADDR sDevVAddrStart,
+                              IMG_DEV_VIRTADDR sDevVAddrEnd,
+                              IMG_UINT32 uiLog2DataPageSize,
+                              IMG_UINT32 auiStartArray[],
+                              IMG_UINT32 auiEndArray[],
+                              IMG_UINT32 auiEntriesPerPx[],
+                              const MMU_PxE_CONFIG *apsConfig[],
+                              MMU_LEVEL aeMMULevel[],
+                              const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+                              IMG_HANDLE *phPriv)
+{
+	const MMU_PxE_CONFIG *psMMUPDEConfig;
+	const MMU_PxE_CONFIG *psMMUPTEConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+	PVRSRV_ERROR eError;
+	IMG_UINT32 i = 0;
+
+	eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize,
+	                                                 &psMMUPDEConfig,
+	                                                 &psMMUPTEConfig,
+	                                                 ppsMMUDevVAddrConfig,
+	                                                 phPriv);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	psDevVAddrConfig = *ppsMMUDevVAddrConfig;
+
+	if (psDevVAddrConfig->uiPCIndexMask != 0)
+	{
+		auiStartArray[i] = _CalcPCEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE);
+		auiEndArray[i] = _CalcPCEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE);
+		auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPC;
+		apsConfig[i] = psDevAttrs->psBaseConfig;
+		aeMMULevel[i] = MMU_LEVEL_3;
+		i++;
+	}
+
+	if (psDevVAddrConfig->uiPDIndexMask != 0)
+	{
+		auiStartArray[i] = _CalcPDEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE);
+		auiEndArray[i] = _CalcPDEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE);
+		auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPD;
+		if (i == 0)
+		{
+			apsConfig[i] = psDevAttrs->psBaseConfig;
+		}
+		else
+		{
+			apsConfig[i] = psMMUPDEConfig;
+		}
+		aeMMULevel[i] = MMU_LEVEL_2;
+		i++;
+	}
+
+	/*
+		There is always a PTE entry so we have a slightly different behaviour than above.
+		E.g. for 2 MB RGX pages the uiPTIndexMask is 0x0000000000 but still there
+		is a PT with one entry.
+
+	 */
+	auiStartArray[i] = _CalcPTEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE);
+	if (psDevVAddrConfig->uiPTIndexMask !=0)
+	{
+		auiEndArray[i] = _CalcPTEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE);
+	}
+	else
+	{
+		/*
+			If the PTE mask is zero it means there is only 1 PTE and thus, as an
+			an exclusive bound, the end array index is equal to the start index + 1.
+		 */
+
+		auiEndArray[i] = auiStartArray[i] + 1;
+	}
+
+	auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPT;
+
+	if (i == 0)
+	{
+		apsConfig[i] = psDevAttrs->psBaseConfig;
+	}
+	else
+	{
+		apsConfig[i] = psMMUPTEConfig;
+	}
+	aeMMULevel[i] = MMU_LEVEL_1;
+}
+
+static void _MMU_PutLevelData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hPriv)
+{
+	MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+
+	psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+}
+
+/*************************************************************************/ /*!
+@Function       _AllocPageTables
+
+@Description    Allocate page tables and any higher level MMU objects required
+                for the specified virtual range
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddrStart          Start device virtual address
+
+@Input          sDevVAddrEnd            End device virtual address
+
+@Input          uiLog2DataPageSize      Page size of the data pages
+
+@Return         PVRSRV_OK if the allocation was successful
+ */
+/*****************************************************************************/
+static PVRSRV_ERROR
+_AllocPageTables(MMU_CONTEXT *psMMUContext,
+                 IMG_DEV_VIRTADDR sDevVAddrStart,
+                 IMG_DEV_VIRTADDR sDevVAddrEnd,
+                 IMG_UINT32 uiLog2DataPageSize)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 auiStartArray[MMU_MAX_LEVEL];
+	IMG_UINT32 auiEndArray[MMU_MAX_LEVEL];
+	IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL];
+	MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL];
+	const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL];
+	const MMU_DEVVADDR_CONFIG	*psDevVAddrConfig;
+	IMG_HANDLE hPriv;
+	IMG_UINT32 ui32CurrentLevel = 0;
+
+	PVR_DPF((PVR_DBG_ALLOC,
+			"_AllocPageTables: vaddr range: "IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC,
+			sDevVAddrStart.uiAddr,
+			sDevVAddrEnd.uiAddr
+	));
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Allocating page tables for %"IMG_UINT64_FMTSPEC" bytes virtual range: "
+	             IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC,
+	             (IMG_UINT64)sDevVAddrEnd.uiAddr - (IMG_UINT64)sDevVAddrStart.uiAddr,
+	             (IMG_UINT64)sDevVAddrStart.uiAddr,
+	             (IMG_UINT64)sDevVAddrEnd.uiAddr);
+#endif
+
+	_MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd,
+	                  (IMG_UINT32) uiLog2DataPageSize, auiStartArray, auiEndArray,
+	                  auiEntriesPerPx, apsConfig, aeMMULevel,
+	                  &psDevVAddrConfig, &hPriv);
+
+	HTBLOGK(HTB_SF_MMU_PAGE_OP_ALLOC,
+	        HTBLOG_U64_BITS_HIGH(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrStart.uiAddr),
+	        HTBLOG_U64_BITS_HIGH(sDevVAddrEnd.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrEnd.uiAddr));
+
+	eError = _MMU_AllocLevel(psMMUContext, &psMMUContext->sBaseLevelInfo,
+	                         auiStartArray, auiEndArray, auiEntriesPerPx,
+	                         apsConfig, aeMMULevel, &ui32CurrentLevel,
+	                         auiStartArray[0], auiEndArray[0],
+	                         IMG_TRUE, IMG_TRUE, uiLog2DataPageSize);
+
+	_MMU_PutLevelData(psMMUContext, hPriv);
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       _FreePageTables
+
+@Description    Free page tables and any higher level MMU objects at are no
+                longer referenced for the specified virtual range.
+                This will fill the temporary free list of the MMU context which
+                needs cleanup after the call.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddrStart          Start device virtual address
+
+@Input          sDevVAddrEnd            End device virtual address
+
+@Input          uiLog2DataPageSize      Page size of the data pages
+
+@Return         None
+ */
+/*****************************************************************************/
+static void _FreePageTables(MMU_CONTEXT *psMMUContext,
+                            IMG_DEV_VIRTADDR sDevVAddrStart,
+                            IMG_DEV_VIRTADDR sDevVAddrEnd,
+                            IMG_UINT32 uiLog2DataPageSize)
+{
+	IMG_UINT32 auiStartArray[MMU_MAX_LEVEL];
+	IMG_UINT32 auiEndArray[MMU_MAX_LEVEL];
+	IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL];
+	MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL];
+	const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL];
+	const MMU_DEVVADDR_CONFIG	*psDevVAddrConfig;
+	IMG_UINT32 ui32CurrentLevel = 0;
+	IMG_HANDLE hPriv;
+
+	PVR_DPF((PVR_DBG_ALLOC,
+			"_FreePageTables: vaddr range: "IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC,
+			sDevVAddrStart.uiAddr,
+			sDevVAddrEnd.uiAddr
+	));
+
+	_MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd,
+	                  uiLog2DataPageSize, auiStartArray, auiEndArray,
+	                  auiEntriesPerPx, apsConfig, aeMMULevel,
+	                  &psDevVAddrConfig, &hPriv);
+
+	HTBLOGK(HTB_SF_MMU_PAGE_OP_FREE,
+	        HTBLOG_U64_BITS_HIGH(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrStart.uiAddr),
+	        HTBLOG_U64_BITS_HIGH(sDevVAddrEnd.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrEnd.uiAddr));
+
+	/* ignoring return code, in this case there should be no references
+	 * to the level anymore, and at this stage there is nothing to do with
+	 * the return status */
+	(void) _MMU_FreeLevel(psMMUContext, &psMMUContext->sBaseLevelInfo,
+	                      auiStartArray, auiEndArray, auiEntriesPerPx,
+	                      apsConfig, aeMMULevel, &ui32CurrentLevel,
+	                      auiStartArray[0], auiEndArray[0],
+	                      IMG_TRUE, IMG_TRUE, uiLog2DataPageSize);
+
+	_MMU_PutLevelData(psMMUContext, hPriv);
+}
+
+
+/*************************************************************************/ /*!
+@Function       _MMU_GetPTInfo
+
+@Description    Get the PT level information and PT entry index for the specified
+                virtual address
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          psDevVAddr              Device virtual address to get the PTE info
+                                        from.
+
+@Input          psDevVAddrConfig        The current virtual address config obtained
+                                        by another function call before.
+
+@Output         psLevel                 Level info of the PT
+
+@Output         pui32PTEIndex           Index into the PT the address corresponds to
+
+@Return         None
+ */
+/*****************************************************************************/
+static INLINE void _MMU_GetPTInfo(MMU_CONTEXT                *psMMUContext,
+                                  IMG_DEV_VIRTADDR            sDevVAddr,
+                                  const MMU_DEVVADDR_CONFIG  *psDevVAddrConfig,
+                                  MMU_Levelx_INFO           **psLevel,
+                                  IMG_UINT32                 *pui32PTEIndex)
+{
+	MMU_Levelx_INFO *psLocalLevel = NULL;
+	MMU_LEVEL eMMULevel = psMMUContext->psDevAttrs->eTopLevel;
+	IMG_UINT32 uiPCEIndex;
+	IMG_UINT32 uiPDEIndex;
+
+	if ((eMMULevel <= MMU_LEVEL_0) || (eMMULevel >= MMU_LEVEL_LAST))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTEInfo: Invalid MMU level"));
+		psLevel = NULL;
+		return;
+	}
+
+	for (; eMMULevel > MMU_LEVEL_0; eMMULevel--)
+	{
+		if (eMMULevel == MMU_LEVEL_3)
+		{
+			/* find the page directory containing the PCE */
+			uiPCEIndex = _CalcPCEIdx (sDevVAddr, psDevVAddrConfig,
+			                          IMG_FALSE);
+			psLocalLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiPCEIndex];
+		}
+
+		if (eMMULevel == MMU_LEVEL_2)
+		{
+			/* find the page table containing the PDE */
+			uiPDEIndex = _CalcPDEIdx (sDevVAddr, psDevVAddrConfig,
+			                          IMG_FALSE);
+			if (psLocalLevel != NULL)
+			{
+				psLocalLevel = psLocalLevel->apsNextLevel[uiPDEIndex];
+			}
+			else
+			{
+				psLocalLevel =
+						psMMUContext->sBaseLevelInfo.apsNextLevel[uiPDEIndex];
+			}
+		}
+
+		if (eMMULevel == MMU_LEVEL_1)
+		{
+			/* find PTE index into page table */
+			*pui32PTEIndex = _CalcPTEIdx (sDevVAddr, psDevVAddrConfig,
+			                              IMG_FALSE);
+			if (psLocalLevel == NULL)
+			{
+				psLocalLevel = &psMMUContext->sBaseLevelInfo;
+			}
+		}
+	}
+	*psLevel = psLocalLevel;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_GetPTConfig
+
+@Description    Get the level config. Call _MMU_PutPTConfig after use!
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          uiLog2DataPageSize      Log 2 of the page size
+
+@Output         ppsConfig               Config of the PTE
+
+@Output         phPriv                  Private data handle to be passed back
+                                        when the info is put
+
+@Output         ppsDevVAddrConfig       Config of the device virtual addresses
+
+@Return         None
+ */
+/*****************************************************************************/
+static INLINE void _MMU_GetPTConfig(MMU_CONTEXT               *psMMUContext,
+                                    IMG_UINT32                  uiLog2DataPageSize,
+                                    const MMU_PxE_CONFIG      **ppsConfig,
+                                    IMG_HANDLE                 *phPriv,
+                                    const MMU_DEVVADDR_CONFIG **ppsDevVAddrConfig)
+{
+	MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	const MMU_PxE_CONFIG *psPDEConfig;
+	const MMU_PxE_CONFIG *psPTEConfig;
+
+	if (psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize,
+	                                            &psPDEConfig,
+	                                            &psPTEConfig,
+	                                            &psDevVAddrConfig,
+	                                            phPriv) != PVRSRV_OK)
+	{
+		/*
+		   There should be no way we got here unless uiLog2DataPageSize
+		   has changed after the MMU_Alloc call (in which case it's a bug in
+		   the MM code)
+		 */
+		PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTConfig: Could not get valid page size config"));
+		PVR_ASSERT(0);
+	}
+
+	*ppsConfig = psPTEConfig;
+	*ppsDevVAddrConfig = psDevVAddrConfig;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_PutPTConfig
+
+@Description    Put the level info. Has to be called after _MMU_GetPTConfig to
+                ensure correct refcounting.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          phPriv                  Private data handle created by
+                                        _MMU_GetPTConfig.
+
+@Return         None
+ */
+/*****************************************************************************/
+static INLINE void _MMU_PutPTConfig(MMU_CONTEXT *psMMUContext,
+                                    IMG_HANDLE hPriv)
+{
+	MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+
+	if (psDevAttrs->pfnPutPageSizeConfiguration(hPriv) != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Could not put page size config",
+				__func__));
+		PVR_ASSERT(0);
+	}
+}
+
+
+/*****************************************************************************
+ *                     Public interface functions                            *
+ *****************************************************************************/
+
+/*
+	MMU_ContextCreate
+ */
+PVRSRV_ERROR
+MMU_ContextCreate(PVRSRV_DEVICE_NODE *psDevNode,
+                  MMU_CONTEXT **ppsMMUContext,
+                  MMU_DEVICEATTRIBS *psDevAttrs)
+{
+	MMU_CONTEXT *psMMUContext;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	const MMU_PxE_CONFIG *psConfig;
+	MMU_PHYSMEM_CONTEXT *psCtx;
+	IMG_UINT32 ui32BaseObjects;
+	IMG_UINT32 ui32Size;
+	IMG_CHAR sBuf[40];
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	psConfig = psDevAttrs->psBaseConfig;
+	psDevVAddrConfig = psDevAttrs->psTopLevelDevVAddrConfig;
+
+	switch (psDevAttrs->eTopLevel)
+	{
+		case MMU_LEVEL_3:
+			ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPC;
+			break;
+
+		case MMU_LEVEL_2:
+			ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPD;
+			break;
+
+		case MMU_LEVEL_1:
+			ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPT;
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Invalid MMU config", __func__));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto e0;
+	}
+
+	/* Allocate the MMU context with the Level 1 Px info's */
+	ui32Size = sizeof(MMU_CONTEXT) +
+			((ui32BaseObjects - 1) * sizeof(MMU_Levelx_INFO *));
+
+	psMMUContext = OSAllocZMem(ui32Size);
+	if (psMMUContext == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Call to OSAllocZMem failed", __func__));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+#if defined(PDUMP)
+	/* Clear the refcount */
+	psMMUContext->ui32PDumpContextIDRefCount = 0;
+#endif
+	/* Record Device specific attributes in the context for subsequent use */
+	psMMUContext->psDevAttrs = psDevAttrs;
+	psMMUContext->psDevNode = psDevNode;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	{
+		IMG_UINT32 ui32OSid, ui32OSidReg;
+		IMG_BOOL bOSidAxiProt;
+
+		RetrieveOSidsfromPidList(OSGetCurrentClientProcessIDKM(), &ui32OSid, &ui32OSidReg, &bOSidAxiProt);
+
+		MMU_SetOSids(psMMUContext, ui32OSid, ui32OSidReg, bOSidAxiProt);
+	}
+#endif
+
+	/*
+	  Allocate physmem context and set it up
+	 */
+	psCtx = OSAllocZMem(sizeof(MMU_PHYSMEM_CONTEXT));
+	if (psCtx == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Call to OSAllocZMem failed", __func__));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e1;
+	}
+	psMMUContext->psPhysMemCtx = psCtx;
+
+	psCtx->psDevNode = psDevNode;
+
+	OSSNPrintf(sBuf, sizeof(sBuf), "pgtables %p", psCtx);
+	psCtx->uiPhysMemRANameAllocSize = OSStringLength(sBuf)+1;
+	psCtx->pszPhysMemRAName = OSAllocMem(psCtx->uiPhysMemRANameAllocSize);
+	if (psCtx->pszPhysMemRAName == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Out of memory", __func__));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e2;
+	}
+
+	OSStringCopy(psCtx->pszPhysMemRAName, sBuf);
+
+	psCtx->psPhysMemRA = RA_Create(psCtx->pszPhysMemRAName,
+	                               /* subsequent import */
+	                               psDevNode->uiMMUPxLog2AllocGran,
+	                               RA_LOCKCLASS_1,
+	                               _MMU_PhysMem_RAImportAlloc,
+	                               _MMU_PhysMem_RAImportFree,
+	                               psCtx, /* priv */
+	                               IMG_FALSE);
+	if (psCtx->psPhysMemRA == NULL)
+	{
+		OSFreeMem(psCtx->pszPhysMemRAName);
+		psCtx->pszPhysMemRAName = NULL;
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e3;
+	}
+
+	/* Setup cleanup meta data to check if a MMU context
+	 * has been destroyed and should not be accessed anymore */
+	psCtx->psCleanupData = OSAllocMem(sizeof(*(psCtx->psCleanupData)));
+	if (psCtx->psCleanupData == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Call to OSAllocMem failed", __func__));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e4;
+	}
+
+	OSLockCreate(&psCtx->psCleanupData->hCleanupLock);
+	psCtx->psCleanupData->bMMUContextExists = IMG_TRUE;
+	dllist_init(&psCtx->psCleanupData->sMMUCtxCleanupItemsHead);
+	OSAtomicWrite(&psCtx->psCleanupData->iRef, 1);
+
+	/* allocate the base level object */
+	/*
+	   Note: Although this is not required by the this file until
+	         the 1st allocation is made, a device specific callback
+	         might request the base object address so we allocate
+	         it up front.
+	 */
+	if (_PxMemAlloc(psMMUContext,
+	                ui32BaseObjects,
+	                psConfig,
+	                psDevAttrs->eTopLevel,
+	                &psMMUContext->sBaseLevelInfo.sMemDesc,
+	                psDevAttrs->ui32BaseAlign))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to alloc level 1 object", __func__));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e5;
+	}
+
+	dllist_init(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead);
+
+	psMMUContext->sBaseLevelInfo.ui32NumOfEntries = ui32BaseObjects;
+	psMMUContext->sBaseLevelInfo.ui32RefCount = 0;
+
+	eError = OSLockCreate(&psMMUContext->hLock);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to create lock for MMU_CONTEXT", __func__));
+		goto e6;
+	}
+
+	/* return context */
+	*ppsMMUContext = psMMUContext;
+
+	return PVRSRV_OK;
+
+	e6:
+	_PxMemFree(psMMUContext, &psMMUContext->sBaseLevelInfo.sMemDesc, psDevAttrs->eTopLevel);
+	e5:
+	OSFreeMem(psCtx->psCleanupData);
+	e4:
+	RA_Delete(psCtx->psPhysMemRA);
+	e3:
+	OSFreeMem(psCtx->pszPhysMemRAName);
+	e2:
+	OSFreeMem(psCtx);
+	e1:
+	OSFreeMem(psMMUContext);
+	e0:
+	return eError;
+}
+
+/*
+	MMU_ContextDestroy
+ */
+void
+MMU_ContextDestroy (MMU_CONTEXT *psMMUContext)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PDLLIST_NODE psNode, psNextNode;
+
+	PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *) psMMUContext->psDevNode;
+	MMU_CTX_CLEANUP_DATA *psCleanupData = psMMUContext->psPhysMemCtx->psCleanupData;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: Enter", __func__));
+
+	if (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK)
+	{
+		/* There should be no way to get here with live pages unless
+		   there is a bug in this module or the MM code */
+		PVR_ASSERT(psMMUContext->sBaseLevelInfo.ui32RefCount == 0);
+	}
+
+	/* Cleanup lock must be acquired before MMUContext lock. Reverse order
+	 * may lead to a deadlock and is reported by lockdep. */
+	OSLockAcquire(psCleanupData->hCleanupLock);
+	OSLockAcquire(psMMUContext->hLock);
+
+	/* Free the top level MMU object - will be put on defer free list.
+	 * This has to be done before the step below that will empty the
+	 * defer-free list. */
+	_PxMemFree(psMMUContext,
+	           &psMMUContext->sBaseLevelInfo.sMemDesc,
+	           psMMUContext->psDevAttrs->eTopLevel);
+
+	/* Empty the temporary defer-free list of Px */
+	_FreeMMUMapping(psDevNode, &psMMUContext->psPhysMemCtx->sTmpMMUMappingHead);
+	PVR_ASSERT(dllist_is_empty(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead));
+
+	/* Empty the defer free list so the cleanup thread will
+	 * not have to access any MMU context related structures anymore */
+	dllist_foreach_node(&psCleanupData->sMMUCtxCleanupItemsHead,
+	                    psNode,
+	                    psNextNode)
+	{
+		MMU_CLEANUP_ITEM *psCleanup = IMG_CONTAINER_OF(psNode,
+		                                               MMU_CLEANUP_ITEM,
+		                                               sMMUCtxCleanupItem);
+
+		_FreeMMUMapping(psDevNode, &psCleanup->sMMUMappingHead);
+
+		dllist_remove_node(psNode);
+	}
+	PVR_ASSERT(dllist_is_empty(&psCleanupData->sMMUCtxCleanupItemsHead));
+
+	psCleanupData->bMMUContextExists = IMG_FALSE;
+
+	/* Free physmem context */
+	RA_Delete(psMMUContext->psPhysMemCtx->psPhysMemRA);
+	psMMUContext->psPhysMemCtx->psPhysMemRA = NULL;
+	OSFreeMem(psMMUContext->psPhysMemCtx->pszPhysMemRAName);
+	psMMUContext->psPhysMemCtx->pszPhysMemRAName = NULL;
+
+	OSFreeMem(psMMUContext->psPhysMemCtx);
+
+	OSLockRelease(psMMUContext->hLock);
+
+	OSLockRelease(psCleanupData->hCleanupLock);
+
+	if (OSAtomicDecrement(&psCleanupData->iRef) == 0)
+	{
+		OSLockDestroy(psCleanupData->hCleanupLock);
+		OSFreeMem(psCleanupData);
+	}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	RemovePidOSidCoupling(OSGetCurrentClientProcessIDKM());
+#endif
+
+	OSLockDestroy(psMMUContext->hLock);
+
+	/* free the context itself. */
+	OSFreeMem(psMMUContext);
+	/*not nulling pointer, copy on stack*/
+
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: Exit", __func__));
+}
+
+/*
+	MMU_Alloc
+ */
+PVRSRV_ERROR
+MMU_Alloc (MMU_CONTEXT *psMMUContext,
+           IMG_DEVMEM_SIZE_T uSize,
+           IMG_DEVMEM_SIZE_T *puActualSize,
+           IMG_UINT32 uiProtFlags,
+           IMG_DEVMEM_SIZE_T uDevVAddrAlignment,
+           IMG_DEV_VIRTADDR *psDevVAddr,
+           IMG_UINT32 uiLog2PageSize)
+{
+	PVRSRV_ERROR eError;
+	IMG_DEV_VIRTADDR sDevVAddrEnd;
+
+	const MMU_PxE_CONFIG *psPDEConfig;
+	const MMU_PxE_CONFIG *psPTEConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+
+	MMU_DEVICEATTRIBS *psDevAttrs;
+	IMG_HANDLE hPriv;
+
+#if !defined (DEBUG)
+	PVR_UNREFERENCED_PARAMETER(uDevVAddrAlignment);
+#endif
+	PVR_DPF((PVR_DBG_MESSAGE,
+			"%s: uSize=" IMG_DEVMEM_SIZE_FMTSPEC
+			", uiProtFlags=0x%x, align="IMG_DEVMEM_ALIGN_FMTSPEC,
+			__func__, uSize, uiProtFlags, uDevVAddrAlignment));
+
+	/* check params */
+	if (!psMMUContext || !psDevVAddr || !puActualSize)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid parameter", __func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevAttrs = psMMUContext->psDevAttrs;
+
+	eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2PageSize,
+	                                                 &psPDEConfig,
+	                                                 &psPTEConfig,
+	                                                 &psDevVAddrConfig,
+	                                                 &hPriv);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to get config info (%s)",
+				__func__, PVRSRVGetErrorString(eError)));
+		return eError;
+	}
+
+	/* size and alignment must be datapage granular */
+	if (((psDevVAddr->uiAddr & psDevVAddrConfig->uiPageOffsetMask) != 0)
+			|| ((uSize & psDevVAddrConfig->uiPageOffsetMask) != 0))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: invalid address or size granularity",
+				__func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	sDevVAddrEnd = *psDevVAddr;
+	sDevVAddrEnd.uiAddr += uSize;
+
+	OSLockAcquire(psMMUContext->hLock);
+	eError = _AllocPageTables(psMMUContext, *psDevVAddr, sDevVAddrEnd, uiLog2PageSize);
+	OSLockRelease(psMMUContext->hLock);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: _AllocPageTables failed",
+				__func__));
+		return PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES;
+	}
+
+	psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+
+	return PVRSRV_OK;
+}
+
+/*
+	MMU_Free
+ */
+void
+MMU_Free (MMU_CONTEXT *psMMUContext,
+          IMG_DEV_VIRTADDR sDevVAddr,
+          IMG_DEVMEM_SIZE_T uiSize,
+          IMG_UINT32 uiLog2DataPageSize)
+{
+	IMG_DEV_VIRTADDR sDevVAddrEnd;
+
+	if (psMMUContext == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid parameter", __func__));
+		return;
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: Freeing DevVAddr " IMG_DEV_VIRTADDR_FMTSPEC,
+			__func__, sDevVAddr.uiAddr));
+
+	/* ensure the address range to free is inside the heap */
+	sDevVAddrEnd = sDevVAddr;
+	sDevVAddrEnd.uiAddr += uiSize;
+
+	/* The Cleanup lock has to be taken before the MMUContext hLock to
+	 * prevent deadlock scenarios. It is necessary only for parts of
+	 * _SetupCleanup_FreeMMUMapping though.*/
+	OSLockAcquire(psMMUContext->psPhysMemCtx->psCleanupData->hCleanupLock);
+
+	OSLockAcquire(psMMUContext->hLock);
+
+	_FreePageTables(psMMUContext,
+	                sDevVAddr,
+	                sDevVAddrEnd,
+	                uiLog2DataPageSize);
+
+	_SetupCleanup_FreeMMUMapping(psMMUContext->psDevNode,
+	                             psMMUContext->psPhysMemCtx);
+
+	OSLockRelease(psMMUContext->hLock);
+
+	OSLockRelease(psMMUContext->psPhysMemCtx->psCleanupData->hCleanupLock);
+
+	return;
+
+}
+
+PVRSRV_ERROR
+MMU_MapPages(MMU_CONTEXT *psMMUContext,
+             PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+             IMG_DEV_VIRTADDR sDevVAddrBase,
+             PMR *psPMR,
+             IMG_UINT32 ui32PhysPgOffset,
+             IMG_UINT32 ui32MapPageCount,
+             IMG_UINT32 *paui32MapIndices,
+             IMG_UINT32 uiLog2HeapPageSize)
+{
+	PVRSRV_ERROR eError;
+	IMG_HANDLE hPriv;
+
+	MMU_Levelx_INFO *psLevel = NULL;
+
+	MMU_Levelx_INFO *psPrevLevel = NULL;
+
+	IMG_UINT32 uiPTEIndex = 0;
+	IMG_UINT32 uiPageSize = (1 << uiLog2HeapPageSize);
+	IMG_UINT32 uiLoop = 0;
+	IMG_UINT32 ui32MappedCount = 0;
+	IMG_DEVMEM_OFFSET_T uiPgOffset = 0;
+	IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0;
+
+	IMG_UINT64 uiProtFlags = 0, uiProtFlagsReadOnly = 0, uiDefProtFlags=0;
+	MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+
+	const MMU_PxE_CONFIG *psConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+
+	IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+
+	IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_DEV_PHYADDR *psDevPAddr;
+	IMG_DEV_PHYADDR sDevPAddr;
+	IMG_BOOL *pbValid;
+	IMG_BOOL bValid;
+	IMG_BOOL bDummyBacking = IMG_FALSE, bZeroBacking = IMG_FALSE;
+	IMG_BOOL bNeedBacking = IMG_FALSE;
+
+#if defined(PDUMP)
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset;
+
+	PDUMPCOMMENT("Wire up Page Table entries to point to the Data Pages (%"IMG_INT64_FMTSPECd" bytes)",
+	             (IMG_UINT64)(ui32MapPageCount * uiPageSize));
+#endif /*PDUMP*/
+
+#if defined(TC_MEMORY_CONFIG) || defined(PLATO_MEMORY_CONFIG)
+	/* We're aware that on TC based platforms, accesses from GPU to CPU_LOCAL
+	 * allocated DevMem fail, so we forbid mapping such a PMR into device mmu */
+	if (PMR_Flags(psPMR) & PVRSRV_MEMALLOCFLAG_CPU_LOCAL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Mapping a CPU_LOCAL PMR to device is forbidden on this platform", __func__));
+		return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+	}
+#endif
+
+	/* Validate the most essential parameters */
+	if ((NULL == psMMUContext) || (0 == sDevVAddrBase.uiAddr) || (NULL == psPMR))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Invalid mapping parameter issued",
+				__func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	/* Allocate memory for page-frame-numbers and validity states,
+	   N.B. assert could be triggered by an illegal uiSizeBytes */
+	if (ui32MapPageCount > PMR_MAX_TRANSLATION_STACK_ALLOC)
+	{
+		psDevPAddr = OSAllocMem(ui32MapPageCount * sizeof(IMG_DEV_PHYADDR));
+		if (psDevPAddr == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR device PFN list"));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+
+		pbValid = OSAllocMem(ui32MapPageCount * sizeof(IMG_BOOL));
+		if (pbValid == NULL)
+		{
+			/* Should allocation fail, clean-up here before exit */
+			PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR device PFN state"));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			OSFreeMem(psDevPAddr);
+			goto e0;
+		}
+	}
+	else
+	{
+		psDevPAddr = asDevPAddr;
+		pbValid	= abValid;
+	}
+
+	/* Get the Device physical addresses of the pages we are trying to map
+	 * In the case of non indexed mapping we can get all addresses at once */
+	if (NULL == paui32MapIndices)
+	{
+		eError = PMR_DevPhysAddr(psPMR,
+		                         uiLog2HeapPageSize,
+		                         ui32MapPageCount,
+		                         (ui32PhysPgOffset << uiLog2HeapPageSize),
+		                         psDevPAddr,
+		                         pbValid);
+		if (eError != PVRSRV_OK)
+		{
+			goto e1;
+		}
+	}
+
+	/*Get the Page table level configuration */
+	_MMU_GetPTConfig(psMMUContext,
+	                 (IMG_UINT32) uiLog2HeapPageSize,
+	                 &psConfig,
+	                 &hPriv,
+	                 &psDevVAddrConfig);
+
+	eError = _MMU_ConvertDevMemFlags(IMG_FALSE,
+	                                 uiMappingFlags,
+	                                 &uiMMUProtFlags,
+	                                 psMMUContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto e2;
+	}
+
+	/* Callback to get device specific protection flags */
+	if (psConfig->uiBytesPerEntry == 8)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize);
+		uiMMUProtFlags |= MMU_PROTFLAGS_READABLE;
+		uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt8((uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE),
+		                                                                  uiLog2HeapPageSize);
+	}
+	else if (psConfig->uiBytesPerEntry == 4)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+		uiMMUProtFlags |= MMU_PROTFLAGS_READABLE;
+		uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt4((uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE));
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: The page table entry byte length is not supported",
+				__func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e2;
+	}
+
+	if (PMR_IsSparse(psPMR))
+	{
+		/* We know there will not be 4G number of PMR's */
+		bDummyBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(PMR_Flags(psPMR));
+		if (bDummyBacking)
+		{
+			bZeroBacking = PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(PMR_Flags(psPMR));
+		}
+	}
+
+	OSLockAcquire(psMMUContext->hLock);
+
+	for (uiLoop = 0; uiLoop < ui32MapPageCount; uiLoop++)
+	{
+
+#if defined(PDUMP)
+		IMG_DEVMEM_OFFSET_T uiNextSymName;
+#endif /*PDUMP*/
+
+		if (NULL != paui32MapIndices)
+		{
+			uiPgOffset = paui32MapIndices[uiLoop];
+
+			/*Calculate the Device Virtual Address of the page */
+			sDevVAddr.uiAddr = sDevVAddrBase.uiAddr + (uiPgOffset * uiPageSize);
+			/* Get the physical address to map */
+			eError = PMR_DevPhysAddr(psPMR,
+			                         uiLog2HeapPageSize,
+			                         1,
+			                         uiPgOffset * uiPageSize,
+			                         &sDevPAddr,
+			                         &bValid);
+			if (eError != PVRSRV_OK)
+			{
+				goto e3;
+			}
+		}
+		else
+		{
+			uiPgOffset = uiLoop + ui32PhysPgOffset;
+			sDevPAddr = psDevPAddr[uiLoop];
+			bValid = pbValid[uiLoop];
+		}
+
+		uiDefProtFlags = uiProtFlags;
+		/*
+			The default value of the entry is invalid so we don't need to mark
+			it as such if the page wasn't valid, we just advance pass that address
+		 */
+		if (bValid || bDummyBacking)
+		{
+
+			if (!bValid)
+			{
+				if (bZeroBacking)
+				{
+					sDevPAddr.uiAddr = psMMUContext->psDevNode->sDevZeroPage.ui64PgPhysAddr;
+					/* Ensure the zero back page PTE is read only */
+					uiDefProtFlags = uiProtFlagsReadOnly;
+				}
+				else
+				{
+					sDevPAddr.uiAddr = psMMUContext->psDevNode->sDummyPage.ui64PgPhysAddr;
+				}
+			}
+			else
+			{
+				/* check the physical alignment of the memory to map */
+				PVR_ASSERT((sDevPAddr.uiAddr & (uiPageSize-1)) == 0);
+			}
+
+#if defined(DEBUG)
+			{
+				IMG_INT32	i32FeatureVal = 0;
+				IMG_UINT32 ui32BitLength = FloorLog2(sDevPAddr.uiAddr);
+
+				i32FeatureVal = PVRSRV_GET_DEVICE_FEATURE_VALUE(psMMUContext->psDevNode, PHYS_BUS_WIDTH);
+				do {
+					/* i32FeatureVal can be negative for cases where this feature is undefined
+					 * In that situation we need to bail out than go ahead with debug comparison */
+					if (0 > i32FeatureVal)
+						break;
+
+					if (ui32BitLength > i32FeatureVal)
+					{
+						PVR_DPF((PVR_DBG_ERROR,
+								"%s Failed. The physical address bitlength (%d)"
+								" is greater than the chip can handle (%d).",
+								__func__, ui32BitLength, i32FeatureVal));
+
+						PVR_ASSERT(ui32BitLength <= i32FeatureVal);
+						eError = PVRSRV_ERROR_INVALID_PARAMS;
+						goto e3;
+					}
+				}while(0);
+			}
+#endif /*DEBUG*/
+
+#if defined(PDUMP)
+			if (bValid)
+			{
+				eError = PMR_PDumpSymbolicAddr(psPMR, uiPgOffset * uiPageSize,
+				                               sizeof(aszMemspaceName), &aszMemspaceName[0],
+				                               sizeof(aszSymbolicAddress), &aszSymbolicAddress[0],
+				                               &uiSymbolicAddrOffset,
+				                               &uiNextSymName);
+				PVR_ASSERT(eError == PVRSRV_OK);
+			}
+#endif /*PDUMP*/
+
+			psPrevLevel = psLevel;
+			/* Calculate PT index and get new table descriptor */
+			_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+			               &psLevel, &uiPTEIndex);
+
+			if (psPrevLevel == psLevel)
+			{
+				/*
+				 * Sparse allocations may have page offsets which
+				 * decrement as well as increment, so make sure we
+				 * update the range we will flush correctly.
+				 */
+				if (uiPTEIndex > uiFlushEnd)
+					uiFlushEnd = uiPTEIndex;
+				else if (uiPTEIndex < uiFlushStart)
+					uiFlushStart = uiPTEIndex;
+			}
+			else
+			{
+				/* Flush if we moved to another psLevel, i.e. page table */
+				if (psPrevLevel != NULL)
+				{
+					eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+					                                                &psPrevLevel->sMemDesc.psMapping->sMemHandle,
+					                                                uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset,
+					                                                (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+					if (eError != PVRSRV_OK)
+						goto e3;
+				}
+
+				uiFlushStart = uiPTEIndex;
+				uiFlushEnd = uiFlushStart;
+			}
+
+			HTBLOGK(HTB_SF_MMU_PAGE_OP_MAP,
+			        HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr),
+			        HTBLOG_U64_BITS_HIGH(sDevPAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevPAddr.uiAddr));
+
+			eError = _SetupPTE(psMMUContext,
+			                   psLevel,
+			                   uiPTEIndex,
+			                   psConfig,
+			                   &sDevPAddr,
+			                   IMG_FALSE,
+#if defined(PDUMP)
+			                   (bValid)?aszMemspaceName:(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName),
+			                		   ((bValid)?aszSymbolicAddress:((bZeroBacking)?DEV_ZERO_PAGE:DUMMY_PAGE)),
+			                		   (bValid)?uiSymbolicAddrOffset:0,
+#endif /*PDUMP*/
+			                    uiDefProtFlags);
+
+
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Mapping failed", __func__));
+				goto e3;
+			}
+
+			if (bValid)
+			{
+				PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+				PVR_DPF ((PVR_DBG_MESSAGE,
+						"%s: devVAddr=" IMG_DEV_VIRTADDR_FMTSPEC ", "
+						"size=" IMG_DEVMEM_OFFSET_FMTSPEC,
+						__func__,
+						sDevVAddr.uiAddr,
+						uiPgOffset * uiPageSize));
+
+				ui32MappedCount++;
+			}
+		}
+
+		sDevVAddr.uiAddr += uiPageSize;
+	}
+
+	/* Flush the last level we touched */
+	if (psLevel != NULL)
+	{
+		eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+		                                                &psLevel->sMemDesc.psMapping->sMemHandle,
+		                                                uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+		                                                (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+		if (eError != PVRSRV_OK)
+			goto e3;
+	}
+
+	OSLockRelease(psMMUContext->hLock);
+
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+
+	if (psDevPAddr != asDevPAddr)
+	{
+		OSFreeMem(pbValid);
+		OSFreeMem(psDevPAddr);
+	}
+
+	/* Flush TLB for PTs*/
+	psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+	                                               psMMUContext->hDevData,
+	                                               MMU_LEVEL_1,
+	                                               IMG_FALSE);
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Wired up %d Page Table entries (out of %d)", ui32MappedCount, ui32MapPageCount);
+#endif /*PDUMP*/
+
+	return PVRSRV_OK;
+
+	e3:
+	OSLockRelease(psMMUContext->hLock);
+
+	if (PMR_IsSparse(psPMR) && PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiMappingFlags))
+	{
+		bNeedBacking = IMG_TRUE;
+	}
+
+	MMU_UnmapPages(psMMUContext,(bNeedBacking)?uiMappingFlags:0, sDevVAddrBase, uiLoop, paui32MapIndices, uiLog2HeapPageSize, PMR_IsSparse(psPMR));
+	e2:
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+	e1:
+	if (psDevPAddr != asDevPAddr)
+	{
+		OSFreeMem(pbValid);
+		OSFreeMem(psDevPAddr);
+	}
+	e0:
+	return eError;
+}
+
+/*
+	MMU_UnmapPages
+ */
+void
+MMU_UnmapPages (MMU_CONTEXT *psMMUContext,
+                PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+                IMG_DEV_VIRTADDR sDevVAddrBase,
+                IMG_UINT32 ui32PageCount,
+                IMG_UINT32 *pai32FreeIndices,
+                IMG_UINT32 uiLog2PageSize,
+                PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags)
+{
+	IMG_UINT32 uiPTEIndex = 0, ui32Loop=0;
+	IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+	IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0;
+	MMU_Levelx_INFO *psLevel = NULL;
+	MMU_Levelx_INFO *psPrevLevel = NULL;
+	IMG_HANDLE hPriv;
+	const MMU_PxE_CONFIG *psConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	IMG_UINT64 uiProtFlags = 0, uiProtFlagsReadOnly = 0;
+	MMU_PROTFLAGS_T uiMMUProtFlags = 0, uiMMUReadOnlyProtFlags = 0;
+	IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+	IMG_DEV_PHYADDR sBackingPgDevPhysAddr;
+	IMG_BOOL bUnmap = IMG_TRUE, bDummyBacking = IMG_FALSE, bZeroBacking = IMG_FALSE;
+	IMG_CHAR *pcBackingPageName = NULL;
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Invalidate %d entries in page tables for virtual range: 0x%010"IMG_UINT64_FMTSPECX" to 0x%010"IMG_UINT64_FMTSPECX,
+	             ui32PageCount,
+	             (IMG_UINT64)sDevVAddr.uiAddr,
+	             ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1);
+#endif
+	bDummyBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiMemAllocFlags);
+	bZeroBacking = PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiMemAllocFlags);
+
+	if (bZeroBacking)
+	{
+		sBackingPgDevPhysAddr.uiAddr = psMMUContext->psDevNode->sDevZeroPage.ui64PgPhysAddr;
+		pcBackingPageName = DEV_ZERO_PAGE;
+	}
+	else
+	{
+		sBackingPgDevPhysAddr.uiAddr = psMMUContext->psDevNode->sDummyPage.ui64PgPhysAddr;
+		pcBackingPageName = DUMMY_PAGE;
+	}
+
+	bUnmap = (uiMappingFlags)? !bDummyBacking : IMG_TRUE;
+	/* Get PT and address configs */
+	_MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize,
+	                 &psConfig, &hPriv, &psDevVAddrConfig);
+
+	if (_MMU_ConvertDevMemFlags(bUnmap,
+	                            uiMappingFlags,
+	                            &uiMMUProtFlags,
+	                            psMMUContext) != PVRSRV_OK)
+	{
+		return;
+	}
+
+	uiMMUReadOnlyProtFlags = (uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE) | MMU_PROTFLAGS_READABLE;
+
+	/* Callback to get device specific protection flags */
+	if (psConfig->uiBytesPerEntry == 4)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+		uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUReadOnlyProtFlags);
+	}
+	else if (psConfig->uiBytesPerEntry == 8)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize);
+		uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUReadOnlyProtFlags, uiLog2PageSize);
+	}
+
+
+	OSLockAcquire(psMMUContext->hLock);
+
+	/* Unmap page by page */
+	while (ui32Loop < ui32PageCount)
+	{
+		if (NULL != pai32FreeIndices)
+		{
+			/*Calculate the Device Virtual Address of the page */
+			sDevVAddr.uiAddr = sDevVAddrBase.uiAddr +
+					pai32FreeIndices[ui32Loop] * (IMG_UINT64) uiPageSize;
+		}
+
+		psPrevLevel = psLevel;
+		/* Calculate PT index and get new table descriptor */
+		_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+		               &psLevel, &uiPTEIndex);
+
+		if (psPrevLevel == psLevel)
+		{
+			/*
+			 * Sparse allocations may have page offsets which
+			 * decrement as well as increment, so make sure we
+			 * update the range we will flush correctly.
+			 */
+			if (uiPTEIndex > uiFlushEnd)
+				uiFlushEnd = uiPTEIndex;
+			else if (uiPTEIndex < uiFlushStart)
+				uiFlushStart = uiPTEIndex;
+		}
+		else
+		{
+			/* Flush if we moved to another psLevel, i.e. page table */
+			if (psPrevLevel != NULL)
+			{
+				psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+				                                       &psPrevLevel->sMemDesc.psMapping->sMemHandle,
+				                                       uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset,
+				                                       (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+			}
+
+			uiFlushStart = uiPTEIndex;
+			uiFlushEnd = uiFlushStart;
+		}
+
+		HTBLOGK(HTB_SF_MMU_PAGE_OP_UNMAP,
+		        HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr));
+
+		if (_SetupPTE(psMMUContext,
+		              psLevel,
+		              uiPTEIndex,
+		              psConfig,
+		              (bDummyBacking)? &sBackingPgDevPhysAddr : &gsBadDevPhyAddr,
+		            		  bUnmap,
+#if defined(PDUMP)
+		            		  (bDummyBacking)? (psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName): NULL,
+		            		  (bDummyBacking)? pcBackingPageName: NULL,
+		            		  0U,
+#endif
+		            		  (bZeroBacking)? uiProtFlagsReadOnly: uiProtFlags) != PVRSRV_OK)
+		{
+			goto e0;
+		}
+
+		/* Check we haven't wrapped around */
+		PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+		ui32Loop++;
+		sDevVAddr.uiAddr += uiPageSize;
+	}
+
+	/* Flush the last level we touched */
+	if (psLevel != NULL)
+	{
+		psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+		                                       &psLevel->sMemDesc.psMapping->sMemHandle,
+		                                       uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+		                                       (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+	}
+
+	OSLockRelease(psMMUContext->hLock);
+
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+
+	/* Flush TLB for PTs*/
+	psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+	                                               psMMUContext->hDevData,
+	                                               MMU_LEVEL_1,
+	                                               IMG_TRUE);
+
+	return;
+
+	e0:
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+	PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Failed to map/unmap page table"));
+	PVR_ASSERT(0);
+	OSLockRelease(psMMUContext->hLock);
+	return;
+}
+
+PVRSRV_ERROR
+MMU_MapPMRFast (MMU_CONTEXT *psMMUContext,
+                IMG_DEV_VIRTADDR sDevVAddrBase,
+                const PMR *psPMR,
+                IMG_DEVMEM_SIZE_T uiSizeBytes,
+                PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+                IMG_UINT32 uiLog2HeapPageSize)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 uiCount, i;
+	IMG_UINT32 uiPageSize = 1 << uiLog2HeapPageSize;
+	IMG_UINT32 uiPTEIndex = 0;
+	IMG_UINT64 uiProtFlags;
+	MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+	MMU_Levelx_INFO *psLevel = NULL;
+	IMG_HANDLE hPriv;
+	const MMU_PxE_CONFIG *psConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+	IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_DEV_PHYADDR *psDevPAddr;
+	IMG_BOOL *pbValid;
+	IMG_UINT32 uiFlushStart = 0;
+
+#if defined(PDUMP)
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset;
+	IMG_UINT32 ui32MappedCount = 0;
+	PDUMPCOMMENT("Wire up Page Table entries to point to the Data Pages (%"IMG_INT64_FMTSPECd" bytes)", uiSizeBytes);
+#endif /*PDUMP*/
+
+	/* We should verify the size and contiguity when supporting variable page size */
+
+	PVR_ASSERT (psMMUContext != NULL);
+	PVR_ASSERT (psPMR != NULL);
+
+#if defined(TC_MEMORY_CONFIG) || defined(PLATO_MEMORY_CONFIG)
+	/* We're aware that on TC based platforms, accesses from GPU to CPU_LOCAL
+	 * allocated DevMem fail, so we forbid mapping such a PMR into device mmu */
+	if (PMR_Flags(psPMR) & PVRSRV_MEMALLOCFLAG_CPU_LOCAL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Mapping a CPU_LOCAL PMR to device is forbidden on this platform", __func__));
+		return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+	}
+#endif
+
+	/* Allocate memory for page-frame-numbers and validity states,
+	   N.B. assert could be triggered by an illegal uiSizeBytes */
+	uiCount = uiSizeBytes >> uiLog2HeapPageSize;
+	PVR_ASSERT((IMG_DEVMEM_OFFSET_T)uiCount << uiLog2HeapPageSize == uiSizeBytes);
+	if (uiCount > PMR_MAX_TRANSLATION_STACK_ALLOC)
+	{
+		psDevPAddr = OSAllocMem(uiCount * sizeof(IMG_DEV_PHYADDR));
+		if (psDevPAddr == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR device PFN list"));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto return_error;
+		}
+
+		pbValid = OSAllocMem(uiCount * sizeof(IMG_BOOL));
+		if (pbValid == NULL)
+		{
+			/* Should allocation fail, clean-up here before exit */
+			PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR device PFN state"));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			OSFreeMem(psDevPAddr);
+			goto free_paddr_array;
+		}
+	}
+	else
+	{
+		psDevPAddr = asDevPAddr;
+		pbValid	= abValid;
+	}
+
+	/* Get general PT and address configs */
+	_MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2HeapPageSize,
+	                 &psConfig, &hPriv, &psDevVAddrConfig);
+
+	eError = _MMU_ConvertDevMemFlags(IMG_FALSE,
+	                                 uiMappingFlags,
+	                                 &uiMMUProtFlags,
+	                                 psMMUContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto put_mmu_context;
+	}
+
+	/* Callback to get device specific protection flags */
+
+	if (psConfig->uiBytesPerEntry == 8)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize);
+	}
+	else if (psConfig->uiBytesPerEntry == 4)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: The page table entry byte length is not supported",
+				__func__));
+		eError = PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+		goto put_mmu_context;
+	}
+
+
+	/* "uiSize" is the amount of contiguity in the underlying
+	   page.  Normally this would be constant for the system, but,
+	   that constant needs to be communicated, in case it's ever
+	   different; caller guarantees that PMRLockSysPhysAddr() has
+	   already been called */
+	eError = PMR_DevPhysAddr(psPMR,
+	                         uiLog2HeapPageSize,
+	                         uiCount,
+	                         0,
+	                         psDevPAddr,
+	                         pbValid);
+	if (eError != PVRSRV_OK)
+	{
+		goto put_mmu_context;
+	}
+
+	OSLockAcquire(psMMUContext->hLock);
+
+	_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+	               &psLevel, &uiPTEIndex);
+	uiFlushStart = uiPTEIndex;
+
+	/* Map in all pages of that PMR page by page*/
+	for (i=0, uiCount=0; uiCount < uiSizeBytes; i++)
+	{
+#if defined(DEBUG)
+		{
+			IMG_INT32	i32FeatureVal = 0;
+			IMG_UINT32 ui32BitLength = FloorLog2(psDevPAddr[i].uiAddr);
+			i32FeatureVal = PVRSRV_GET_DEVICE_FEATURE_VALUE(psMMUContext->psDevNode, PHYS_BUS_WIDTH);
+			do {
+				if (0 > i32FeatureVal)
+					break;
+
+				if (ui32BitLength > i32FeatureVal)
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							"%s Failed. The physical address bitlength (%d)"
+							" is greater than the chip can handle (%d).",
+							__func__, ui32BitLength, i32FeatureVal));
+
+					PVR_ASSERT(ui32BitLength <= i32FeatureVal);
+					eError = PVRSRV_ERROR_INVALID_PARAMS;
+					OSLockRelease(psMMUContext->hLock);
+					goto put_mmu_context;
+				}
+			}while(0);
+		}
+#endif /*DEBUG*/
+#if defined(PDUMP)
+		{
+			IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+			eError = PMR_PDumpSymbolicAddr(psPMR, uiCount,
+			                               sizeof(aszMemspaceName), &aszMemspaceName[0],
+			                               sizeof(aszSymbolicAddress), &aszSymbolicAddress[0],
+			                               &uiSymbolicAddrOffset,
+			                               &uiNextSymName);
+			PVR_ASSERT(eError == PVRSRV_OK);
+			ui32MappedCount++;
+		}
+#endif /*PDUMP*/
+
+		HTBLOGK(HTB_SF_MMU_PAGE_OP_PMRMAP,
+		        HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr),
+		        HTBLOG_U64_BITS_HIGH(psDevPAddr[i].uiAddr), HTBLOG_U64_BITS_LOW(psDevPAddr[i].uiAddr));
+
+		/* Set the PT entry with the specified address and protection flags */
+		eError = _SetupPTE(psMMUContext, psLevel, uiPTEIndex,
+		                   psConfig, &psDevPAddr[i], IMG_FALSE,
+#if defined(PDUMP)
+		                   aszMemspaceName,
+		                   aszSymbolicAddress,
+		                   uiSymbolicAddrOffset,
+#endif /*PDUMP*/
+		                   uiProtFlags);
+		if (eError != PVRSRV_OK)
+			goto unlock_mmu_context;
+
+		sDevVAddr.uiAddr += uiPageSize;
+		uiCount += uiPageSize;
+
+		/* Calculate PT index and get new table descriptor */
+		if (uiPTEIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (uiCount != uiSizeBytes))
+		{
+			uiPTEIndex++;
+		}
+		else
+		{
+			eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+			                                                &psLevel->sMemDesc.psMapping->sMemHandle,
+			                                                uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+			                                                (uiPTEIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+			if (eError != PVRSRV_OK)
+				goto unlock_mmu_context;
+
+
+			_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+			               &psLevel, &uiPTEIndex);
+			uiFlushStart = uiPTEIndex;
+		}
+	}
+
+	OSLockRelease(psMMUContext->hLock);
+
+
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+
+	if (psDevPAddr != asDevPAddr)
+	{
+		OSFreeMem(pbValid);
+		OSFreeMem(psDevPAddr);
+	}
+
+	/* Flush TLB for PTs*/
+	psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+	                                               psMMUContext->hDevData,
+	                                               MMU_LEVEL_1,
+	                                               IMG_FALSE);
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Wired up %d Page Table entries (out of %d)", ui32MappedCount, i);
+#endif /*PDUMP*/
+
+	return PVRSRV_OK;
+
+unlock_mmu_context:
+	OSLockRelease(psMMUContext->hLock);
+	MMU_UnmapPMRFast(psMMUContext,
+	                 sDevVAddrBase,
+	                 uiSizeBytes >> uiLog2HeapPageSize,
+	                 uiLog2HeapPageSize);
+put_mmu_context:
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+
+	if (pbValid != abValid)
+	{
+		OSFreeMem(pbValid);
+	}
+
+free_paddr_array:
+	if (psDevPAddr != asDevPAddr)
+	{
+		OSFreeMem(psDevPAddr);
+	}
+
+return_error:
+	PVR_ASSERT(eError == PVRSRV_OK);
+	return eError;
+}
+
+/*
+    MMU_UnmapPages
+ */
+void
+MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext,
+                 IMG_DEV_VIRTADDR sDevVAddrBase,
+                 IMG_UINT32 ui32PageCount,
+                 IMG_UINT32 uiLog2PageSize)
+{
+	IMG_UINT32 uiPTEIndex = 0, ui32Loop=0;
+	IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+	MMU_Levelx_INFO *psLevel = NULL;
+	IMG_HANDLE hPriv;
+	const MMU_PxE_CONFIG *psConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+	IMG_UINT64 uiProtFlags = 0;
+	MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+	IMG_UINT64 uiEntry = 0;
+	IMG_UINT32 uiFlushStart = 0;
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Invalidate %d entries in page tables for virtual range: 0x%010"IMG_UINT64_FMTSPECX" to 0x%010"IMG_UINT64_FMTSPECX,
+	             ui32PageCount,
+	             (IMG_UINT64)sDevVAddr.uiAddr,
+	             ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1);
+#endif
+
+	/* Get PT and address configs */
+	_MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize,
+	                 &psConfig, &hPriv, &psDevVAddrConfig);
+
+	if (_MMU_ConvertDevMemFlags(IMG_TRUE,
+	                            0,
+	                            &uiMMUProtFlags,
+	                            psMMUContext) != PVRSRV_OK)
+	{
+		return;
+	}
+
+	/* Callback to get device specific protection flags */
+
+	if (psConfig->uiBytesPerEntry == 8)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize);
+
+		/* Fill the entry with a bad address but leave space for protection flags */
+		uiEntry = (gsBadDevPhyAddr.uiAddr & ~psConfig->uiProtMask) | uiProtFlags;
+	}
+	else if (psConfig->uiBytesPerEntry == 4)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+
+		/* Fill the entry with a bad address but leave space for protection flags */
+		uiEntry = (((IMG_UINT32) gsBadDevPhyAddr.uiAddr) & ~psConfig->uiProtMask) | (IMG_UINT32) uiProtFlags;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: The page table entry byte length is not supported",
+				__func__));
+		goto e0;
+	}
+
+	OSLockAcquire(psMMUContext->hLock);
+
+	_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+	               &psLevel, &uiPTEIndex);
+	uiFlushStart = uiPTEIndex;
+
+	/* Unmap page by page and keep the loop as quick as possible.
+	 * Only use parts of _SetupPTE that need to be executed. */
+	while (ui32Loop < ui32PageCount)
+	{
+
+		/* Set the PT entry to invalid and poison it with a bad address */
+		if (psConfig->uiBytesPerEntry == 8)
+		{
+			((IMG_UINT64*) psLevel->sMemDesc.pvCpuVAddr)[uiPTEIndex] = uiEntry;
+		}
+		else if (psConfig->uiBytesPerEntry == 4)
+		{
+			((IMG_UINT32*) psLevel->sMemDesc.pvCpuVAddr)[uiPTEIndex] = (IMG_UINT32) uiEntry;
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: The page table entry byte length is not supported",
+					__func__));
+			goto e1;
+		}
+
+		/* Log modifications */
+		HTBLOGK(HTB_SF_MMU_PAGE_OP_UNMAP,
+		        HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr));
+
+		HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+		        HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+		        uiPTEIndex, MMU_LEVEL_1,
+		        HTBLOG_U64_BITS_HIGH(uiEntry), HTBLOG_U64_BITS_LOW(uiEntry),
+		        IMG_FALSE);
+
+#if defined (PDUMP)
+		PDumpMMUDumpPxEntries(MMU_LEVEL_1,
+		                      psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+		                      psLevel->sMemDesc.pvCpuVAddr,
+		                      psLevel->sMemDesc.sDevPAddr,
+		                      uiPTEIndex,
+		                      1,
+		                      NULL,
+		                      NULL,
+		                      0,
+		                      psConfig->uiBytesPerEntry,
+		                      psConfig->uiAddrLog2Align,
+		                      psConfig->uiAddrShift,
+		                      psConfig->uiAddrMask,
+		                      psConfig->uiProtMask,
+		                      psConfig->uiValidEnMask,
+		                      0,
+		                      psMMUContext->psDevAttrs->eMMUType);
+#endif /*PDUMP*/
+
+		sDevVAddr.uiAddr += uiPageSize;
+		ui32Loop++;
+
+		/* Calculate PT index and get new table descriptor */
+		if (uiPTEIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (ui32Loop != ui32PageCount))
+		{
+			uiPTEIndex++;
+		}
+		else
+		{
+			psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+			                                       &psLevel->sMemDesc.psMapping->sMemHandle,
+			                                       uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+			                                       (uiPTEIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+
+			_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+			               &psLevel, &uiPTEIndex);
+			uiFlushStart = uiPTEIndex;
+		}
+	}
+
+	OSLockRelease(psMMUContext->hLock);
+
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+
+	/* Flush TLB for PTs*/
+	psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+	                                               psMMUContext->hDevData,
+	                                               MMU_LEVEL_1,
+	                                               IMG_TRUE);
+
+	return;
+
+	e1:
+	OSLockRelease(psMMUContext->hLock);
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+	e0:
+	PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map/unmap page table", __func__));
+	PVR_ASSERT(0);
+	return;
+}
+
+/*
+	MMU_ChangeValidity
+ */
+PVRSRV_ERROR
+MMU_ChangeValidity(MMU_CONTEXT *psMMUContext,
+                   IMG_DEV_VIRTADDR sDevVAddr,
+                   IMG_DEVMEM_SIZE_T uiNumPages,
+                   IMG_UINT32 uiLog2PageSize,
+                   IMG_BOOL bMakeValid,
+                   PMR *psPMR)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	IMG_HANDLE hPriv;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	const MMU_PxE_CONFIG *psConfig;
+	MMU_Levelx_INFO *psLevel = NULL;
+	IMG_UINT32 uiFlushStart = 0;
+	IMG_UINT32 uiPTIndex = 0;
+	IMG_UINT32 i;
+	IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+	IMG_BOOL bValid;
+
+#if defined(PDUMP)
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+	PDUMPCOMMENT("Change valid bit of the data pages to %d (0x%"IMG_UINT64_FMTSPECX" - 0x%"IMG_UINT64_FMTSPECX")",
+	             bMakeValid,
+	             sDevVAddr.uiAddr,
+	             sDevVAddr.uiAddr + (uiNumPages<<uiLog2PageSize) - 1);
+#endif /*PDUMP*/
+
+	/* We should verify the size and contiguity when supporting variable page size */
+	PVR_ASSERT (psMMUContext != NULL);
+	PVR_ASSERT (psPMR != NULL);
+
+	/* Get general PT and address configs */
+	_MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize,
+	                 &psConfig, &hPriv, &psDevVAddrConfig);
+
+	_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+	               &psLevel, &uiPTIndex);
+	uiFlushStart = uiPTIndex;
+
+	/* Do a page table walk and change attribute for every page in range. */
+	for (i=0; i < uiNumPages;)
+	{
+		/* Set the entry */
+		if (bMakeValid)
+		{
+			/* Only set valid if physical address exists (sparse allocs might have none)*/
+			eError = PMR_IsOffsetValid(psPMR, uiLog2PageSize, 1, (IMG_DEVMEM_OFFSET_T) i << uiLog2PageSize, &bValid);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "Cannot determine validity of page table entries page"));
+				goto e_exit;
+			}
+
+			if (bValid)
+			{
+				if (psConfig->uiBytesPerEntry == 8)
+				{
+					((IMG_UINT64 *) psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] |= (psConfig->uiValidEnMask);
+				}
+				else if (psConfig->uiBytesPerEntry == 4)
+				{
+					((IMG_UINT32 *) psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] |= (psConfig->uiValidEnMask);
+				}
+				else
+				{
+					eError = PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+					PVR_DPF((PVR_DBG_ERROR, "Cannot change page table entries due to wrong configuration"));
+					goto e_exit;
+				}
+			}
+		}
+		else
+		{
+			if (psConfig->uiBytesPerEntry == 8)
+			{
+				((IMG_UINT64 *) psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] &= ~(psConfig->uiValidEnMask);
+			}
+			else if (psConfig->uiBytesPerEntry == 4)
+			{
+				((IMG_UINT32 *) psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] &= ~(psConfig->uiValidEnMask);
+			}
+			else
+			{
+				eError = PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+				PVR_DPF((PVR_DBG_ERROR, "Cannot change page table entries due to wrong configuration"));
+				goto e_exit;
+			}
+		}
+
+#if defined(PDUMP)
+		PMR_PDumpSymbolicAddr(psPMR, i<<uiLog2PageSize,
+		                      sizeof(aszMemspaceName), &aszMemspaceName[0],
+		                      sizeof(aszSymbolicAddress), &aszSymbolicAddress[0],
+		                      &uiSymbolicAddrOffset,
+		                      &uiNextSymName);
+
+		PDumpMMUDumpPxEntries(MMU_LEVEL_1,
+		                      psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+		                      psLevel->sMemDesc.pvCpuVAddr,
+		                      psLevel->sMemDesc.sDevPAddr,
+		                      uiPTIndex,
+		                      1,
+		                      aszMemspaceName,
+		                      aszSymbolicAddress,
+		                      uiSymbolicAddrOffset,
+		                      psConfig->uiBytesPerEntry,
+		                      psConfig->uiAddrLog2Align,
+		                      psConfig->uiAddrShift,
+		                      psConfig->uiAddrMask,
+		                      psConfig->uiProtMask,
+		                      psConfig->uiValidEnMask,
+		                      0,
+		                      psMMUContext->psDevAttrs->eMMUType);
+#endif /*PDUMP*/
+
+		sDevVAddr.uiAddr += uiPageSize;
+		i++;
+
+		/* Calculate PT index and get new table descriptor */
+		if (uiPTIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (i != uiNumPages))
+		{
+			uiPTIndex++;
+		}
+		else
+		{
+
+			eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+			                                                &psLevel->sMemDesc.psMapping->sMemHandle,
+			                                                uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+			                                                (uiPTIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+			if (eError != PVRSRV_OK)
+				goto e_exit;
+
+			_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+			               &psLevel, &uiPTIndex);
+			uiFlushStart = uiPTIndex;
+		}
+	}
+
+	e_exit:
+
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+
+	/* Flush TLB for PTs*/
+	psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+	                                               psMMUContext->hDevData,
+	                                               MMU_LEVEL_1,
+	                                               !bMakeValid);
+
+	PVR_ASSERT(eError == PVRSRV_OK);
+	return eError;
+}
+
+
+/*
+	MMU_AcquireBaseAddr
+ */
+PVRSRV_ERROR
+MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr)
+{
+	if (!psMMUContext)
+	{
+		psPhysAddr->uiAddr = 0;
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*psPhysAddr = psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr;
+	return PVRSRV_OK;
+}
+
+/*
+	MMU_ReleaseBaseAddr
+ */
+void
+MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext)
+{
+	PVR_UNREFERENCED_PARAMETER(psMMUContext);
+}
+
+/*
+	MMU_SetDeviceData
+ */
+void MMU_SetDeviceData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hDevData)
+{
+	psMMUContext->hDevData = hDevData;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*
+    MMU_SetOSid, MMU_GetOSid
+ */
+
+void MMU_SetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt)
+{
+	psMMUContext->ui32OSid     = ui32OSid;
+	psMMUContext->ui32OSidReg  = ui32OSidReg;
+	psMMUContext->bOSidAxiProt = bOSidAxiProt;
+
+	return;
+}
+
+void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg, IMG_BOOL *pbOSidAxiProt)
+{
+	*pui32OSid     = psMMUContext->ui32OSid;
+	*pui32OSidReg  = psMMUContext->ui32OSidReg;
+	*pbOSidAxiProt = psMMUContext->bOSidAxiProt;
+
+	return;
+}
+
+#endif
+
+/*
+	MMU_CheckFaultAddress
+ */
+void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext,
+                           IMG_DEV_VIRTADDR *psDevVAddr,
+                           DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                           void *pvDumpDebugFile,
+                           MMU_FAULT_DATA *psOutFaultData)
+{
+	/* Ideally the RGX defs should be via callbacks, but the function is only called from RGX. */
+#define MMU_VALID_STR(entry,level) \
+		(apszMMUValidStr[((((entry)&(RGX_MMUCTRL_##level##_DATA_ENTRY_PENDING_EN))!=0) << 1)| \
+		                 ((((entry)&(RGX_MMUCTRL_##level##_DATA_VALID_EN))!=0) << 0)])
+	static const IMG_PCHAR apszMMUValidStr[1<<2] = {/*--*/ "not valid",
+			/*-V*/ "valid",
+			/*P-*/ "pending",
+	/*PV*/ "inconsistent (pending and valid)"};
+	MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+	MMU_LEVEL	eMMULevel = psDevAttrs->eTopLevel;
+	const MMU_PxE_CONFIG *psConfig;
+	const MMU_PxE_CONFIG *psMMUPDEConfig;
+	const MMU_PxE_CONFIG *psMMUPTEConfig;
+	const MMU_DEVVADDR_CONFIG *psMMUDevVAddrConfig;
+	IMG_HANDLE hPriv;
+	MMU_Levelx_INFO *psLevel = NULL;
+	PVRSRV_ERROR eError;
+	IMG_UINT64 uiIndex;
+	IMG_UINT32 ui32PCIndex;
+	IMG_UINT32 ui32PDIndex;
+	IMG_UINT32 ui32PTIndex;
+	IMG_UINT32 ui32Log2PageSize;
+	MMU_FAULT_DATA sMMUFaultData = {0};
+	MMU_LEVEL_DATA *psMMULevelData;
+
+	OSLockAcquire(psMMUContext->hLock);
+
+	/*
+		At this point we don't know the page size so assume it's 4K.
+		When we get the PD level (MMU_LEVEL_2) we can check to see
+		if this assumption is correct.
+	 */
+	eError = psDevAttrs->pfnGetPageSizeConfiguration(12,
+	                                                 &psMMUPDEConfig,
+	                                                 &psMMUPTEConfig,
+	                                                 &psMMUDevVAddrConfig,
+	                                                 &hPriv);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("Failed to get the page size info for log2 page sizeof 12"));
+	}
+
+	psLevel = &psMMUContext->sBaseLevelInfo;
+	psConfig = psDevAttrs->psBaseConfig;
+
+	sMMUFaultData.eTopLevel = psDevAttrs->eTopLevel;
+	sMMUFaultData.eType = MMU_FAULT_TYPE_NON_PM;
+
+
+	for (; eMMULevel > MMU_LEVEL_0; eMMULevel--)
+	{
+		if (eMMULevel == MMU_LEVEL_3)
+		{
+			/* Determine the PC index */
+			uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexMask;
+			uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexShift;
+			ui32PCIndex = (IMG_UINT32) uiIndex;
+			PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PCIndex));
+
+			psMMULevelData = &sMMUFaultData.sLevelData[MMU_LEVEL_3];
+			psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry;
+			psMMULevelData->ui32Index = ui32PCIndex;
+
+			if (ui32PCIndex >= psLevel->ui32NumOfEntries)
+			{
+				PVR_DUMPDEBUG_LOG("PC index (%d) out of bounds (%d)", ui32PCIndex, psLevel->ui32NumOfEntries);
+				psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries;
+				break;
+			}
+
+			if (psConfig->uiBytesPerEntry == 4)
+			{
+				IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+				PVR_DUMPDEBUG_LOG("PCE for index %d = 0x%08x and is %s",
+				                  ui32PCIndex,
+				                  pui32Ptr[ui32PCIndex],
+				                  MMU_VALID_STR(pui32Ptr[ui32PCIndex], PC));
+
+				psMMULevelData->ui64Address = pui32Ptr[ui32PCIndex];
+				psMMULevelData->psDebugStr  = MMU_VALID_STR(pui32Ptr[ui32PCIndex], PC);
+			}
+			else
+			{
+				IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+				PVR_DUMPDEBUG_LOG("PCE for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s",
+				                  ui32PCIndex,
+				                  pui64Ptr[ui32PCIndex],
+				                  MMU_VALID_STR(pui64Ptr[ui32PCIndex], PC));
+
+				psMMULevelData->ui64Address = pui64Ptr[ui32PCIndex];
+				psMMULevelData->psDebugStr  = MMU_VALID_STR(pui64Ptr[ui32PCIndex], PC);
+			}
+
+			psLevel = psLevel->apsNextLevel[ui32PCIndex];
+			if (!psLevel)
+			{
+				break;
+			}
+			psConfig = psMMUPDEConfig;
+		}
+
+
+		if (eMMULevel == MMU_LEVEL_2)
+		{
+			/* Determine the PD index */
+			uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexMask;
+			uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexShift;
+			ui32PDIndex = (IMG_UINT32) uiIndex;
+			PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PDIndex));
+
+			psMMULevelData = &sMMUFaultData.sLevelData[MMU_LEVEL_2];
+			psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry;
+			psMMULevelData->ui32Index = ui32PDIndex;
+
+			if (ui32PDIndex >= psLevel->ui32NumOfEntries)
+			{
+				PVR_DUMPDEBUG_LOG("PD index (%d) out of bounds (%d)", ui32PDIndex, psLevel->ui32NumOfEntries);
+				psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries;
+				break;
+			}
+
+			if (psConfig->uiBytesPerEntry == 4)
+			{
+				IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+				/* FIXME: MMU_VALID_STR doesn't work here because
+				 *        RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN is wider than
+				 *        32bits */
+
+				PVR_DUMPDEBUG_LOG("PDE for index %d = 0x%08x and is %s",
+				                  ui32PDIndex,
+				                  pui32Ptr[ui32PDIndex],
+				                  MMU_VALID_STR(pui32Ptr[ui32PDIndex], PD));
+
+				psMMULevelData->ui64Address = pui32Ptr[ui32PDIndex];
+				psMMULevelData->psDebugStr  = MMU_VALID_STR(pui32Ptr[ui32PDIndex], PD);
+
+				if (psDevAttrs->pfnGetPageSizeFromPDE4(pui32Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK)
+				{
+					PVR_LOG(("Failed to get the page size from the PDE"));
+				}
+			}
+			else
+			{
+				IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+				PVR_DUMPDEBUG_LOG("PDE for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s",
+				                  ui32PDIndex,
+				                  pui64Ptr[ui32PDIndex],
+				                  MMU_VALID_STR(pui64Ptr[ui32PDIndex], PD));
+
+				psMMULevelData->ui64Address = pui64Ptr[ui32PDIndex];
+				psMMULevelData->psDebugStr  = MMU_VALID_STR(pui64Ptr[ui32PDIndex], PD);
+
+				if (psDevAttrs->pfnGetPageSizeFromPDE8(pui64Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK)
+				{
+					PVR_LOG(("Failed to get the page size from the PDE"));
+				}
+			}
+
+			/*
+					We assumed the page size was 4K, now we have the actual size
+					from the PDE we can confirm if our assumption was correct.
+					Until now it hasn't mattered as the PC and PD are the same
+					regardless of the page size
+			 */
+			if (ui32Log2PageSize != 12)
+			{
+				/* Put the 4K page size data */
+				psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+
+				/* Get the correct size data */
+				eError = psDevAttrs->pfnGetPageSizeConfiguration(ui32Log2PageSize,
+				                                                 &psMMUPDEConfig,
+				                                                 &psMMUPTEConfig,
+				                                                 &psMMUDevVAddrConfig,
+				                                                 &hPriv);
+				if (eError != PVRSRV_OK)
+				{
+					PVR_LOG(("Failed to get the page size info for log2 page sizeof %d", ui32Log2PageSize));
+					break;
+				}
+			}
+			psLevel = psLevel->apsNextLevel[ui32PDIndex];
+			if (!psLevel)
+			{
+				break;
+			}
+			psConfig = psMMUPTEConfig;
+		}
+
+
+		if (eMMULevel == MMU_LEVEL_1)
+		{
+			/* Determine the PT index */
+			uiIndex = psDevVAddr->uiAddr & psMMUDevVAddrConfig->uiPTIndexMask;
+			uiIndex = uiIndex >> psMMUDevVAddrConfig->uiPTIndexShift;
+			ui32PTIndex = (IMG_UINT32) uiIndex;
+			PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PTIndex));
+
+			psMMULevelData = &sMMUFaultData.sLevelData[MMU_LEVEL_1];
+			psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry;
+			psMMULevelData->ui32Index = ui32PTIndex;
+
+			if (ui32PTIndex >= psLevel->ui32NumOfEntries)
+			{
+				PVR_DUMPDEBUG_LOG("PT index (%d) out of bounds (%d)", ui32PTIndex, psLevel->ui32NumOfEntries);
+				psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries;
+				break;
+			}
+
+			if (psConfig->uiBytesPerEntry == 4)
+			{
+				IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+				PVR_DUMPDEBUG_LOG("PTE for index %d = 0x%08x and is %s",
+				                  ui32PTIndex,
+				                  pui32Ptr[ui32PTIndex],
+				                  MMU_VALID_STR(pui32Ptr[ui32PTIndex], PT));
+
+				psMMULevelData->ui64Address = pui32Ptr[ui32PTIndex];
+				psMMULevelData->psDebugStr  = MMU_VALID_STR(pui32Ptr[ui32PTIndex], PT);
+			}
+			else
+			{
+				IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+				PVR_DUMPDEBUG_LOG("PTE for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s",
+				                  ui32PTIndex,
+				                  pui64Ptr[ui32PTIndex],
+				                  MMU_VALID_STR(pui64Ptr[ui32PTIndex], PT));
+
+				psMMULevelData->ui64Address = pui64Ptr[ui32PTIndex];
+				psMMULevelData->psDebugStr  = MMU_VALID_STR(pui64Ptr[ui32PTIndex], PT);
+			}
+			goto e1;
+		}
+
+		PVR_LOG(("Unsupported MMU setup"));
+	}
+
+	e1:
+	/* Put the page size data back */
+	psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+	OSLockRelease(psMMUContext->hLock);
+
+	if (psOutFaultData)
+	{
+		*psOutFaultData = sMMUFaultData;
+	}
+}
+
+IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext,
+                             IMG_UINT32 uiLog2PageSize,
+                             IMG_DEV_VIRTADDR sDevVAddr)
+{
+	MMU_Levelx_INFO *psLevel = NULL;
+	const MMU_PxE_CONFIG *psConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	IMG_HANDLE hPriv = NULL;
+	IMG_UINT32 uiIndex = 0;
+	IMG_BOOL bStatus = IMG_FALSE;
+
+	_MMU_GetPTConfig(psMMUContext, uiLog2PageSize, &psConfig, &hPriv, &psDevVAddrConfig);
+
+	OSLockAcquire(psMMUContext->hLock);
+
+	switch (psMMUContext->psDevAttrs->eTopLevel)
+	{
+		case MMU_LEVEL_3:
+			uiIndex = _CalcPCEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+			psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex];
+			if (psLevel == NULL)
+				break;
+
+			__fallthrough;
+		case MMU_LEVEL_2:
+			uiIndex = _CalcPDEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+
+			if (psLevel != NULL)
+				psLevel = psLevel->apsNextLevel[uiIndex];
+			else
+				psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex];
+
+			if (psLevel == NULL)
+				break;
+
+			__fallthrough;
+		case MMU_LEVEL_1:
+			uiIndex = _CalcPTEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+
+			if (psLevel == NULL)
+				psLevel = &psMMUContext->sBaseLevelInfo;
+
+			bStatus = ((IMG_UINT64 *) psLevel->sMemDesc.pvCpuVAddr)[uiIndex]
+			                                                        & psConfig->uiValidEnMask;
+			break;
+		default:
+			PVR_LOG(("MMU_IsVDevAddrValid: Unsupported MMU setup"));
+			break;
+	}
+
+	OSLockRelease(psMMUContext->hLock);
+
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+
+	return bStatus;
+}
+
+#if defined(PDUMP)
+IMG_CHAR *MMU_GetPxPDumpMemSpaceName(MMU_CONTEXT *psMMUContext)
+{
+	return psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName;
+}
+
+/*
+	MMU_ContextDerivePCPDumpSymAddr
+ */
+PVRSRV_ERROR MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext,
+                                             IMG_CHAR *pszPDumpSymbolicNameBuffer,
+                                             size_t uiPDumpSymbolicNameBufferSize)
+{
+	size_t uiCount;
+	IMG_UINT64 ui64PhysAddr;
+	PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psDevNode->sDevId;
+
+	if (!psMMUContext->sBaseLevelInfo.sMemDesc.bValid)
+	{
+		/* We don't have any allocations.  You're not allowed to ask
+           for the page catalogue base address until you've made at
+           least one allocation */
+		return PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR;
+	}
+
+	ui64PhysAddr = (IMG_UINT64)psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr.uiAddr;
+
+	PVR_ASSERT(uiPDumpSymbolicNameBufferSize >= (IMG_UINT32)(21 + OSStringLength(psDevId->pszPDumpDevName)));
+
+	/* Page table Symbolic Name is formed from page table phys addr
+       prefixed with MMUPT_. */
+
+	uiCount = OSSNPrintf(pszPDumpSymbolicNameBuffer,
+	                     uiPDumpSymbolicNameBufferSize,
+	                     ":%s:%s%016"IMG_UINT64_FMTSPECX,
+	                     psDevId->pszPDumpDevName,
+	                     psMMUContext->sBaseLevelInfo.sMemDesc.bValid?"MMUPC_":"XXX",
+	                    		 ui64PhysAddr);
+
+	if (uiCount + 1 > uiPDumpSymbolicNameBufferSize)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*
+	MMU_PDumpWritePageCatBase
+ */
+PVRSRV_ERROR
+MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext,
+                          const IMG_CHAR *pszSpaceName,
+                          IMG_DEVMEM_OFFSET_T uiOffset,
+                          IMG_UINT32 ui32WordSize,
+                          IMG_UINT32 ui32AlignShift,
+                          IMG_UINT32 ui32Shift,
+                          PDUMP_FLAGS_T uiPdumpFlags)
+{
+	PVRSRV_ERROR eError;
+	IMG_CHAR aszPageCatBaseSymbolicAddr[100];
+	const IMG_CHAR *pszPDumpDevName = psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName;
+
+	eError = MMU_ContextDerivePCPDumpSymAddr(psMMUContext,
+	                                         &aszPageCatBaseSymbolicAddr[0],
+	                                         sizeof(aszPageCatBaseSymbolicAddr));
+	if (eError == PVRSRV_OK)
+	{
+		eError = PDumpWriteSymbAddress(pszSpaceName,
+		                               uiOffset,
+		                               aszPageCatBaseSymbolicAddr,
+		                               0, /* offset -- Could be non-zero for var. pgsz */
+		                               pszPDumpDevName,
+		                               ui32WordSize,
+		                               ui32AlignShift,
+		                               ui32Shift,
+		                               uiPdumpFlags | PDUMP_FLAGS_CONTINUOUS);
+	}
+
+	return eError;
+}
+
+/*
+	MMU_AcquirePDumpMMUContext
+ */
+PVRSRV_ERROR MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext,
+                                        IMG_UINT32 *pui32PDumpMMUContextID)
+{
+	PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psDevNode->sDevId;
+
+	if (!psMMUContext->ui32PDumpContextIDRefCount)
+	{
+		PDUMP_MMU_ALLOC_MMUCONTEXT(psDevId->pszPDumpDevName,
+		                           psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr,
+		                           psMMUContext->psDevAttrs->eMMUType,
+		                           &psMMUContext->uiPDumpContextID);
+	}
+
+	psMMUContext->ui32PDumpContextIDRefCount++;
+	*pui32PDumpMMUContextID = psMMUContext->uiPDumpContextID;
+
+	return PVRSRV_OK;
+}
+
+/*
+	MMU_ReleasePDumpMMUContext
+ */
+PVRSRV_ERROR MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext)
+{
+	PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psDevNode->sDevId;
+
+	PVR_ASSERT(psMMUContext->ui32PDumpContextIDRefCount != 0);
+	psMMUContext->ui32PDumpContextIDRefCount--;
+
+	if (psMMUContext->ui32PDumpContextIDRefCount == 0)
+	{
+		PDUMP_MMU_FREE_MMUCONTEXT(psDevId->pszPDumpDevName,
+		                          psMMUContext->uiPDumpContextID);
+	}
+
+	return PVRSRV_OK;
+}
+#endif
+
+/******************************************************************************
+ End of file (mmu_common.c)
+ ******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mmu_common.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mmu_common.h
new file mode 100644
index 0000000..4e80208
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mmu_common.h
@@ -0,0 +1,755 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common MMU Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements basic low level control of MMU.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef MMU_COMMON_H
+#define MMU_COMMON_H
+
+/*
+	The Memory Management Unit (MMU) performs device virtual to physical
+	translation.
+
+	Terminology:
+	 - page catalogue, PC	(optional, 3 tier MMU)
+	 - page directory, PD
+	 - page table, PT (can be variable sized)
+	 - data page, DP (can be variable sized)
+	Note: PD and PC are fixed size and can't be larger than the native
+	      physical (CPU) page size
+	Shifts and AlignShift variables:
+	 - 'xxxShift' represent the number of bits a bitfield is shifted left from bit0
+	 - 'xxxAlignShift' is used to convert a bitfield (based at bit0) into byte units
+	   by applying a bit shift left by 'xxxAlignShift' bits
+*/
+
+/*
+	Device Virtual Address Config:
+
+	Incoming Device Virtual Address is deconstructed into up to 4
+	fields, where the virtual address is up to 64bits:
+	MSB-----------------------------------------------LSB
+	| PC Index:   | PD Index:  | PT Index: | DP offset: |
+	| d bits      | c bits     | b-v bits  |  a+v bits  |
+	-----------------------------------------------------
+	where v is the variable page table modifier, e.g.
+			v == 0 -> 4KB DP
+			v == 2 -> 16KB DP
+			v == 4 -> 64KB DP
+			v == 6 -> 256KB DP
+			v == 8 -> 1MB DP
+			v == 10 -> 4MB DP
+*/
+
+/* services/server/include/ */
+#include "pmr.h"
+
+/* include/ */
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_notifier.h"
+#include "pvrsrv_error.h"
+#include "servicesext.h"
+
+
+/*!
+	The level of the MMU
+*/
+typedef enum
+{
+	MMU_LEVEL_0 = 0,	/* Level 0 = Page */
+
+	MMU_LEVEL_1,
+	MMU_LEVEL_2,
+	MMU_LEVEL_3,
+	MMU_LEVEL_LAST
+} MMU_LEVEL;
+
+/* moved after declaration of MMU_LEVEL, as pdump_mmu.h references it */
+#include "pdump_mmu.h"
+
+#define MMU_MAX_LEVEL 3
+
+typedef struct _MMU_LEVEL_DATA_
+{
+	IMG_UINT32	ui32Index;
+	IMG_UINT32	ui32NumOfEntries;
+	IMG_CHAR const	*psDebugStr;
+	IMG_UINT8	uiBytesPerEntry;
+	IMG_UINT64	ui64Address;
+} MMU_LEVEL_DATA;
+
+typedef enum _MMU_FAULT_TYPE_
+{
+	MMU_FAULT_TYPE_UNKNOWN = 0, /* If fault is not analysed by Host */
+	MMU_FAULT_TYPE_PM,
+	MMU_FAULT_TYPE_NON_PM,
+} MMU_FAULT_TYPE;
+
+typedef struct _MMU_FAULT_DATA_
+{
+	MMU_LEVEL	eTopLevel;
+	MMU_FAULT_TYPE	eType;
+	MMU_LEVEL_DATA	sLevelData[MMU_LEVEL_LAST];
+} MMU_FAULT_DATA;
+
+struct _MMU_DEVVADDR_CONFIG_;
+
+/*!
+	MMU device attributes. This structure is the interface between the generic
+	MMU code and the device specific MMU code.
+*/
+typedef struct _MMU_DEVICEATTRIBS_
+{
+	PDUMP_MMU_TYPE eMMUType;
+
+	IMG_CHAR *pszMMUPxPDumpMemSpaceName;
+
+	/*! The type of the top level object */
+	MMU_LEVEL eTopLevel;
+
+	/*! Alignment requirement of the base object */
+	IMG_UINT32 ui32BaseAlign;
+
+	/*! HW config of the base object */
+	struct _MMU_PxE_CONFIG_ *psBaseConfig;
+
+	/*! Address split for the base object */
+	const struct _MMU_DEVVADDR_CONFIG_ *psTopLevelDevVAddrConfig;
+
+	/*! Callback for creating protection bits for the page catalogue entry with 8 byte entry */
+	IMG_UINT64 (*pfnDerivePCEProt8)(IMG_UINT32, IMG_UINT32);
+	/*! Callback for creating protection bits for the page catalogue entry with 4 byte entry */
+	IMG_UINT32 (*pfnDerivePCEProt4)(IMG_UINT32);
+	/*! Callback for creating protection bits for the page directory entry with 8 byte entry */
+	IMG_UINT64 (*pfnDerivePDEProt8)(IMG_UINT32, IMG_UINT32);
+	/*! Callback for creating protection bits for the page directory entry with 4 byte entry */
+	IMG_UINT32 (*pfnDerivePDEProt4)(IMG_UINT32);
+	/*! Callback for creating protection bits for the page table entry with 8 byte entry */
+	IMG_UINT64 (*pfnDerivePTEProt8)(IMG_UINT32, IMG_UINT32);
+	/*! Callback for creating protection bits for the page table entry with 4 byte entry */
+	IMG_UINT32 (*pfnDerivePTEProt4)(IMG_UINT32);
+
+	/*! Callback for getting the MMU configuration based on the specified page size */
+	PVRSRV_ERROR (*pfnGetPageSizeConfiguration)(IMG_UINT32 ui32DataPageSize,
+												const struct _MMU_PxE_CONFIG_ **ppsMMUPDEConfig,
+												const struct _MMU_PxE_CONFIG_ **ppsMMUPTEConfig,
+												const struct _MMU_DEVVADDR_CONFIG_ **ppsMMUDevVAddrConfig,
+												IMG_HANDLE *phPriv2);
+	/*! Callback for putting the MMU configuration obtained from pfnGetPageSizeConfiguration */
+	PVRSRV_ERROR (*pfnPutPageSizeConfiguration)(IMG_HANDLE hPriv);
+
+	/*! Callback for getting the page size from the PDE for the page table entry with 4 byte entry */
+	PVRSRV_ERROR (*pfnGetPageSizeFromPDE4)(IMG_UINT32, IMG_UINT32 *);
+	/*! Callback for getting the page size from the PDE for the page table entry with 8 byte entry */
+	PVRSRV_ERROR (*pfnGetPageSizeFromPDE8)(IMG_UINT64, IMG_UINT32 *);
+
+	/*! Private data handle */
+	IMG_HANDLE hGetPageSizeFnPriv;
+} MMU_DEVICEATTRIBS;
+
+/*!
+	MMU virtual address split
+*/
+typedef struct _MMU_DEVVADDR_CONFIG_
+{
+	/*! Page catalogue index mask */
+	IMG_UINT64	uiPCIndexMask;
+	/*! Page catalogue index shift */
+	IMG_UINT8	uiPCIndexShift;
+	/*! Total number of PC entries */
+	IMG_UINT32  uiNumEntriesPC;
+	/*! Page directory mask */
+	IMG_UINT64	uiPDIndexMask;
+	/*! Page directory shift */
+	IMG_UINT8	uiPDIndexShift;
+	/*! Total number of PD entries */
+	IMG_UINT32  uiNumEntriesPD;
+	/*! Page table mask */
+	IMG_UINT64	uiPTIndexMask;
+	/*! Page index shift */
+	IMG_UINT8	uiPTIndexShift;
+	/*! Total number of PT entries */
+	IMG_UINT32  uiNumEntriesPT;
+	/*! Page offset mask */
+	IMG_UINT64	uiPageOffsetMask;
+	/*! Page offset shift */
+	IMG_UINT8	uiPageOffsetShift;
+	/*! First virtual address mappable for this config */
+	IMG_UINT64  uiOffsetInBytes;
+
+} MMU_DEVVADDR_CONFIG;
+
+/*
+	P(C/D/T) Entry Config:
+
+	MSB-----------------------------------------------LSB
+	| PT Addr:   | variable PT ctrl | protection flags: |
+	| bits c+v   | b bits           | a bits            |
+	-----------------------------------------------------
+	where v is the variable page table modifier and is optional
+*/
+/*!
+	Generic MMU entry description. This is used to describe PC, PD and PT entries.
+*/
+typedef struct _MMU_PxE_CONFIG_
+{
+	IMG_UINT8	uiBytesPerEntry;  /*! Size of an entry in bytes */
+
+	IMG_UINT64	 uiAddrMask;      /*! Physical address mask */
+	IMG_UINT8	 uiAddrShift;     /*! Physical address shift */
+	IMG_UINT8	 uiAddrLog2Align; /*! Physical address Log 2 alignment */
+
+	IMG_UINT64	 uiVarCtrlMask;	  /*! Variable control mask */
+	IMG_UINT8	 uiVarCtrlShift;  /*! Variable control shift */
+
+	IMG_UINT64	 uiProtMask;      /*! Protection flags mask */
+	IMG_UINT8	 uiProtShift;     /*! Protection flags shift */
+
+	IMG_UINT64   uiValidEnMask;   /*! Entry valid bit mask */
+	IMG_UINT8    uiValidEnShift;  /*! Entry valid bit shift */
+} MMU_PxE_CONFIG;
+
+/* MMU Protection flags */
+
+
+/* These are specified generically and in a h/w independent way, and
+   are interpreted at each level (PC/PD/PT) separately. */
+
+/* The following flags are for internal use only, and should not
+   traverse the API */
+#define MMU_PROTFLAGS_INVALID 0x80000000U
+
+typedef IMG_UINT32 MMU_PROTFLAGS_T;
+
+/* The following flags should be supplied by the caller: */
+#define MMU_PROTFLAGS_READABLE	   				(1U<<0)
+#define MMU_PROTFLAGS_WRITEABLE		   		    (1U<<1)
+#define MMU_PROTFLAGS_CACHE_COHERENT			(1U<<2)
+#define MMU_PROTFLAGS_CACHED					(1U<<3)
+
+/* Device specific flags*/
+#define MMU_PROTFLAGS_DEVICE_OFFSET		16
+#define MMU_PROTFLAGS_DEVICE_MASK		0x000f0000UL
+#define MMU_PROTFLAGS_DEVICE(n)	\
+			(((n) << MMU_PROTFLAGS_DEVICE_OFFSET) & \
+			MMU_PROTFLAGS_DEVICE_MASK)
+
+
+typedef struct _MMU_CONTEXT_ MMU_CONTEXT;
+
+struct _PVRSRV_DEVICE_NODE_;
+
+typedef struct _MMU_PAGESIZECONFIG_
+{
+	const MMU_PxE_CONFIG *psPDEConfig;
+	const MMU_PxE_CONFIG *psPTEConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	IMG_UINT32 uiRefCount;
+	IMG_UINT32 uiMaxRefCount;
+} MMU_PAGESIZECONFIG;
+
+/*************************************************************************/ /*!
+@Function       MMU_ContextCreate
+
+@Description    Create a new MMU context
+
+@Input          psDevNode               Device node of the device to create the
+                                        MMU context for
+
+@Output         ppsMMUContext           The created MMU context
+
+@Return         PVRSRV_OK if the MMU context was successfully created
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR
+MMU_ContextCreate (struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+                   MMU_CONTEXT **ppsMMUContext,
+                   MMU_DEVICEATTRIBS *psDevAttrs);
+
+
+/*************************************************************************/ /*!
+@Function       MMU_ContextDestroy
+
+@Description    Destroy a MMU context
+
+@Input          psMMUContext            MMU context to destroy
+
+@Return         None
+*/
+/*****************************************************************************/
+extern void
+MMU_ContextDestroy (MMU_CONTEXT *psMMUContext);
+
+/*************************************************************************/ /*!
+@Function       MMU_Alloc
+
+@Description    Allocate the page tables required for the specified virtual range
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          uSize                   The size of the allocation
+
+@Output         puActualSize            Actual size of allocation
+
+@Input          uiProtFlags             Generic MMU protection flags
+
+@Input          uDevVAddrAlignment      Alignment requirement of the virtual
+                                        allocation
+
+@Input          psDevVAddr              Virtual address to start the allocation
+                                        from
+
+@Return         PVRSRV_OK if the allocation of the page tables was successful
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR
+MMU_Alloc (MMU_CONTEXT *psMMUContext,
+           IMG_DEVMEM_SIZE_T uSize,
+           IMG_DEVMEM_SIZE_T *puActualSize,
+           IMG_UINT32 uiProtFlags,
+           IMG_DEVMEM_SIZE_T uDevVAddrAlignment,
+           IMG_DEV_VIRTADDR *psDevVAddr,
+           IMG_UINT32 uiLog2PageSize);
+
+
+/*************************************************************************/ /*!
+@Function       MMU_Free
+
+@Description    Free the page tables of the specified virtual range
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          psDevVAddr              Virtual address to start the free
+                                        from
+
+@Input          uSize                   The size of the allocation
+
+@Return         None
+*/
+/*****************************************************************************/
+extern void
+MMU_Free (MMU_CONTEXT *psMMUContext,
+          IMG_DEV_VIRTADDR sDevVAddr,
+          IMG_DEVMEM_SIZE_T uiSize,
+          IMG_UINT32 uiLog2DataPageSize);
+
+
+/*************************************************************************/ /*!
+@Function       MMU_MapPages
+
+@Description    Map pages to the MMU.
+                Two modes of operation: One requires a list of physical page
+                indices that are going to be mapped, the other just takes
+                the PMR and a possible offset to map parts of it.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          uiMappingFlags          Memalloc flags for the mapping
+
+@Input          sDevVAddrBase           Device virtual address of the 1st page
+
+@Input          psPMR                   PMR to map
+
+@Input          ui32PhysPgOffset        Physical offset into the PMR
+
+@Input          ui32MapPageCount        Number of pages to map
+
+@Input          paui32MapIndices        List of page indices to map,
+                                         can be NULL
+
+@Input          uiLog2PageSize          Log2 page size of the pages to map
+
+@Return         PVRSRV_OK if the mapping was successful
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR
+MMU_MapPages(MMU_CONTEXT *psMMUContext,
+             PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+             IMG_DEV_VIRTADDR sDevVAddrBase,
+             PMR *psPMR,
+             IMG_UINT32 ui32PhysPgOffset,
+             IMG_UINT32 ui32MapPageCount,
+             IMG_UINT32 *paui32MapIndices,
+             IMG_UINT32 uiLog2PageSize);
+
+/*************************************************************************/ /*!
+@Function       MMU_UnmapPages
+
+@Description    Unmap pages from the MMU.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          uiMappingFlags          Memalloc flags for the mapping
+
+@Input          psDevVAddr              Device virtual address of the 1st page
+
+@Input          ui32PageCount           Number of pages to unmap
+
+@Input          pai32UnmapIndicies      Array of page indices to be unmapped
+
+@Input          uiLog2PageSize          log2 size of the page
+
+
+@Input          uiMemAllocFlags         Indicates if the unmapped regions need
+                                        to be backed by dummy or zero page
+
+@Return         None
+*/
+/*****************************************************************************/
+extern void
+MMU_UnmapPages (MMU_CONTEXT *psMMUContext,
+                PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+                IMG_DEV_VIRTADDR sDevVAddr,
+                IMG_UINT32 ui32PageCount,
+                IMG_UINT32 *pai32UnmapIndicies,
+                IMG_UINT32 uiLog2PageSize,
+                PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags);
+
+/*************************************************************************/ /*!
+@Function       MMU_MapPMRFast
+
+@Description    Map a PMR into the MMU. Must be not sparse.
+                This is supposed to cover most mappings and, as the name suggests,
+                should be as fast as possible.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddr               Device virtual address to map the PMR
+                                        into
+
+@Input          psPMR                   PMR to map
+
+@Input          uiSizeBytes             Size in bytes to map
+
+@Input          uiMappingFlags          Memalloc flags for the mapping
+
+@Return         PVRSRV_OK if the PMR was successfully mapped
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR
+MMU_MapPMRFast (MMU_CONTEXT *psMMUContext,
+                IMG_DEV_VIRTADDR sDevVAddr,
+                const PMR *psPMR,
+                IMG_DEVMEM_SIZE_T uiSizeBytes,
+                PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+                IMG_UINT32 uiLog2PageSize);
+
+/*************************************************************************/ /*!
+@Function       MMU_UnmapPMRFast
+
+@Description    Unmap pages from the MMU as fast as possible.
+                PMR must be non-sparse!
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddrBase           Device virtual address of the 1st page
+
+@Input          ui32PageCount           Number of pages to unmap
+
+@Input          uiLog2PageSize          log2 size of the page
+
+@Return         None
+*/
+/*****************************************************************************/
+extern void
+MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext,
+                 IMG_DEV_VIRTADDR sDevVAddrBase,
+                 IMG_UINT32 ui32PageCount,
+                 IMG_UINT32 uiLog2PageSize);
+
+/*************************************************************************/ /*!
+@Function       MMU_ChangeValidity
+
+@Description    Sets or unsets the valid bit of page table entries for a given
+                address range.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddr               The device virtual base address of
+                                        the range we want to modify
+
+@Input          uiSizeBytes             The size of the range in bytes
+
+@Input          uiLog2PageSize          Log2 of the used page size
+
+@Input          bMakeValid              Choose to set or unset the valid bit.
+                                        (bMakeValid == IMG_TRUE ) -> SET
+                                        (bMakeValid == IMG_FALSE) -> UNSET
+
+@Input          psPMR                   The PMR backing the allocation.
+                                        Needed in case we have sparse memory
+                                        where we have to check whether a physical
+                                        address actually backs the virtual.
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+MMU_ChangeValidity(MMU_CONTEXT *psMMUContext,
+                   IMG_DEV_VIRTADDR sDevVAddr,
+                   IMG_DEVMEM_SIZE_T uiSizeBytes,
+                   IMG_UINT32 uiLog2PageSize,
+                   IMG_BOOL bMakeValid,
+                   PMR *psPMR);
+
+/*************************************************************************/ /*!
+@Function       MMU_AcquireBaseAddr
+
+@Description    Acquire the device physical address of the base level MMU object
+
+@Input          psMMUContext            MMU context to operate on
+
+@Output         psPhysAddr              Device physical address of the base level
+                                        MMU object
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr);
+
+/*************************************************************************/ /*!
+@Function       MMU_ReleaseBaseAddr
+
+@Description    Release the device physical address of the base level MMU object
+
+@Input          psMMUContext            MMU context to operate on
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+void
+MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/***********************************************************************************/ /*!
+@Function       MMU_SetOSid
+
+@Description    Set the OSid associated with the application (and the MMU Context)
+
+@Input          psMMUContext            MMU context to store the OSid on
+
+@Input          ui32OSid                the OSid in question
+
+@Input          ui32OSidReg             The value that the firmware will assign to the
+                                        registers.
+
+@Input          bOSidAxiProt            Toggles whether the AXI prot bit will be set or
+                                        not.
+@Return None
+*/
+/***********************************************************************************/
+
+void MMU_SetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32OSid,
+                  IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt);
+
+/***********************************************************************************/ /*!
+@Function       MMU_GetOSid
+
+@Description    Retrieve the OSid associated with the MMU context.
+
+@Input          psMMUContext            MMU context in which the OSid is stored
+
+@Output         pui32OSid               The OSid in question
+
+@Output         pui32OSidReg            The OSid that the firmware will assign to the
+                                        registers.
+
+@Output         pbOSidAxiProt           Toggles whether the AXI prot bit will be set or
+                                        not.
+@Return None
+*/
+/***********************************************************************************/
+
+void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 * pui32OSid,
+                  IMG_UINT32 * pui32OSidReg, IMG_BOOL *pbOSidAxiProt);
+#endif
+
+/*************************************************************************/ /*!
+@Function       MMU_SetDeviceData
+
+@Description    Set the device specific callback data
+
+@Input          psMMUContext            MMU context to store the data on
+
+@Input          hDevData                Device data
+
+@Return         None
+*/
+/*****************************************************************************/
+void MMU_SetDeviceData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hDevData);
+
+/*************************************************************************/ /*!
+@Function       MMU_CheckFaultAddress
+
+@Description    Check the specified MMU context to see if the provided address
+                should be valid
+
+@Input          psMMUContext            MMU context to store the data on
+
+@Input          psDevVAddr              Address to check
+
+@Input          pfnDumpDebugPrintf      Debug print function
+
+@Input          pvDumpDebugFile         Optional file identifier to be passed
+                                        to the debug print function if required
+
+@Output          psOutFaultData          To store fault details after checking
+
+@Return         None
+*/
+/*****************************************************************************/
+void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext,
+                           IMG_DEV_VIRTADDR *psDevVAddr,
+                           DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                           void *pvDumpDebugFile,
+                           MMU_FAULT_DATA *psOutFaultData);
+
+/*************************************************************************/ /*!
+@Function       MMU_IsVDevAddrValid
+@Description    Checks if given address is valid.
+@Input          psMMUContext MMU context to store the data on
+@Input          uiLog2PageSize page size
+@Input          sDevVAddr Address to check
+@Return         IMG_TRUE of address is valid
+*/ /**************************************************************************/
+IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext,
+                             IMG_UINT32 uiLog2PageSize,
+                             IMG_DEV_VIRTADDR sDevVAddr);
+
+
+#if defined(PDUMP)
+IMG_CHAR *MMU_GetPxPDumpMemSpaceName(MMU_CONTEXT *psMMUContext);
+
+/*************************************************************************/ /*!
+@Function       MMU_ContextDerivePCPDumpSymAddr
+
+@Description    Derives a PDump Symbolic address for the top level MMU object
+
+@Input          psMMUContext                    MMU context to operate on
+
+@Input          pszPDumpSymbolicNameBuffer      Buffer to write the PDump symbolic
+                                                address to
+
+@Input          uiPDumpSymbolicNameBufferSize   Size of the buffer
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext,
+                                                    IMG_CHAR *pszPDumpSymbolicNameBuffer,
+                                                    size_t uiPDumpSymbolicNameBufferSize);
+
+/*************************************************************************/ /*!
+@Function       MMU_PDumpWritePageCatBase
+
+@Description    PDump write of the top level MMU object to a device register
+
+@Input          psMMUContext        MMU context to operate on
+
+@Input          pszSpaceName        PDump name of the mem/reg space
+
+@Input          uiOffset            Offset to write the address to
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext,
+                                       const IMG_CHAR *pszSpaceName,
+                                       IMG_DEVMEM_OFFSET_T uiOffset,
+                                       IMG_UINT32 ui32WordSize,
+                                       IMG_UINT32 ui32AlignShift,
+                                       IMG_UINT32 ui32Shift,
+                                       PDUMP_FLAGS_T uiPdumpFlags);
+
+/*************************************************************************/ /*!
+@Function       MMU_AcquirePDumpMMUContext
+
+@Description    Acquire a reference to the PDump MMU context for this MMU
+                context
+
+@Input          psMMUContext            MMU context to operate on
+
+@Output         pui32PDumpMMUContextID  PDump MMU context ID
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext,
+                                        IMG_UINT32 *pui32PDumpMMUContextID);
+
+/*************************************************************************/ /*!
+@Function       MMU_ReleasePDumpMMUContext
+
+@Description    Release a reference to the PDump MMU context for this MMU context
+
+@Input          psMMUContext            MMU context to operate on
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext);
+#else	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(MMU_PDumpWritePageCatBase)
+#endif
+static INLINE void
+MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext,
+                          const IMG_CHAR *pszSpaceName,
+                          IMG_DEVMEM_OFFSET_T uiOffset,
+                          IMG_UINT32 ui32WordSize,
+                          IMG_UINT32 ui32AlignShift,
+                          IMG_UINT32 ui32Shift,
+                          PDUMP_FLAGS_T uiPdumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMMUContext);
+	PVR_UNREFERENCED_PARAMETER(pszSpaceName);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32WordSize);
+	PVR_UNREFERENCED_PARAMETER(ui32AlignShift);
+	PVR_UNREFERENCED_PARAMETER(ui32Shift);
+	PVR_UNREFERENCED_PARAMETER(uiPdumpFlags);
+}
+#endif /* PDUMP */
+
+#endif /* #ifdef MMU_COMMON_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/module_common.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/module_common.c
new file mode 100644
index 0000000..e60e06d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/module_common.c
@@ -0,0 +1,522 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common linux module setup
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/module.h>
+
+#include "pvr_debugfs.h"
+#include "private_data.h"
+#include "linkage.h"
+#include "power.h"
+#include "env_connection.h"
+#include "process_stats.h"
+#include "module_common.h"
+#include "pvrsrv.h"
+#include "srvcore.h"
+#if defined(SUPPORT_RGX)
+#include "rgxdevice.h"
+#endif
+#include "pvrsrv_error.h"
+#include "pvr_drv.h"
+#include "pvr_bridge_k.h"
+
+#include <pvr_fence.h>
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif
+
+#include "ospvr_gputrace.h"
+
+#include "km_apphint.h"
+#include "srvinit.h"
+
+#include "htb_debug.h"
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+/* Display class interface */
+#include "kerneldisplay.h"
+EXPORT_SYMBOL(DCRegisterDevice);
+EXPORT_SYMBOL(DCUnregisterDevice);
+EXPORT_SYMBOL(DCDisplayConfigurationRetired);
+EXPORT_SYMBOL(DCDisplayHasPendingCommand);
+EXPORT_SYMBOL(DCImportBufferAcquire);
+EXPORT_SYMBOL(DCImportBufferRelease);
+
+/* Physmem interface (required by LMA DC drivers) */
+#include "physheap.h"
+EXPORT_SYMBOL(PhysHeapAcquire);
+EXPORT_SYMBOL(PhysHeapRelease);
+EXPORT_SYMBOL(PhysHeapGetType);
+EXPORT_SYMBOL(PhysHeapRegionGetCpuPAddr);
+EXPORT_SYMBOL(PhysHeapRegionGetSize);
+EXPORT_SYMBOL(PhysHeapCpuPAddrToDevPAddr);
+
+EXPORT_SYMBOL(PVRSRVGetDriverStatus);
+EXPORT_SYMBOL(PVRSRVSystemInstallDeviceLISR);
+EXPORT_SYMBOL(PVRSRVSystemUninstallDeviceLISR);
+
+#include "pvr_notifier.h"
+EXPORT_SYMBOL(PVRSRVCheckStatus);
+
+#include "pvr_debug.h"
+EXPORT_SYMBOL(PVRSRVGetErrorString);
+#endif /* defined(SUPPORT_DISPLAY_CLASS) */
+
+#include "rgxapi_km.h"
+#if defined(SUPPORT_SHARED_SLC)
+EXPORT_SYMBOL(RGXInitSLC);
+#endif
+
+#if defined(SUPPORT_RGX)
+EXPORT_SYMBOL(RGXHWPerfConnect);
+EXPORT_SYMBOL(RGXHWPerfDisconnect);
+EXPORT_SYMBOL(RGXHWPerfControl);
+EXPORT_SYMBOL(RGXHWPerfConfigureAndEnableCounters);
+EXPORT_SYMBOL(RGXHWPerfConfigureAndEnableCustomCounters);
+EXPORT_SYMBOL(RGXHWPerfDisableCounters);
+EXPORT_SYMBOL(RGXHWPerfAcquireEvents);
+EXPORT_SYMBOL(RGXHWPerfReleaseEvents);
+EXPORT_SYMBOL(RGXHWPerfConvertCRTimeStamp);
+#if defined(SUPPORT_KERNEL_HWPERF_TEST)
+EXPORT_SYMBOL(OSAddTimer);
+EXPORT_SYMBOL(OSEnableTimer);
+EXPORT_SYMBOL(OSDisableTimer);
+EXPORT_SYMBOL(OSRemoveTimer);
+#endif
+#endif
+
+CONNECTION_DATA *LinuxConnectionFromFile(struct file *pFile)
+{
+	if (pFile)
+	{
+		struct drm_file *psDRMFile = pFile->private_data;
+
+		return psDRMFile->driver_priv;
+	}
+
+	return NULL;
+}
+
+struct file *LinuxFileFromConnection(CONNECTION_DATA *psConnection)
+{
+	ENV_CONNECTION_DATA *psEnvConnection;
+
+	psEnvConnection = PVRSRVConnectionPrivateData(psConnection);
+	PVR_ASSERT(psEnvConnection != NULL);
+
+	return psEnvConnection->psFile;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVCommonDriverInit
+@Description  Common one time driver initialisation
+@Return       int           0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDriverInit(void)
+{
+	PVRSRV_ERROR pvrerr;
+	int error = 0;
+
+	error = PVRDebugFSInit();
+	if (error != 0)
+	{
+		return error;
+	}
+
+	if (HTB_CreateFSEntry() != PVRSRV_OK)
+	{
+		return -ENOMEM;
+	}
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	if (PVRSRVStatsInitialise() != PVRSRV_OK)
+	{
+		return -ENOMEM;
+	}
+#endif
+
+	if (PVROSFuncInit() != PVRSRV_OK)
+	{
+		return -ENOMEM;
+	}
+
+	error = pvr_apphint_init();
+	if (error != 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+			 "%s: failed AppHint setup(%d)",
+			 __func__, error));
+	}
+
+#if defined(SUPPORT_RGX)
+	pvrerr = PVRGpuTraceSupportInit();
+	if (pvrerr != PVRSRV_OK)
+	{
+		return -ENOMEM;
+	}
+#endif
+
+	pvrerr = PVRSRVDriverInit();
+	if (pvrerr != PVRSRV_OK)
+	{
+		return -ENODEV;
+	}
+
+#if defined(SUPPORT_RGX)
+	/* calling here because we need to handle input from the file even
+	 * before the devices are initialised
+	 * note: we're not passing a device node because apphint callbacks don't
+	 * need it */
+	PVRGpuTraceInitAppHintCallbacks(NULL);
+#endif
+
+	return 0;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVCommonDriverDeinit
+@Description  Common one time driver de-initialisation
+@Return       void
+*/ /***************************************************************************/
+void PVRSRVCommonDriverDeinit(void)
+{
+	PVRSRVDriverDeInit();
+
+#if defined(SUPPORT_RGX)
+	PVRGpuTraceSupportDeInit();
+#endif
+
+	pvr_apphint_deinit();
+
+	PVROSFuncDeInit();
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	PVRSRVStatsDestroy();
+#endif
+
+	HTB_DestroyFSEntry();
+
+	PVRDebugFSDeInit();
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVCommonDeviceInit
+@Description  Common device related initialisation.
+@Input        psDeviceNode  The device node for which initialisation should be
+                            performed
+@Return       int           0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDeviceInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	int error = 0;
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	{
+		PVRSRV_ERROR eError = pvr_sync_init(psDeviceNode->psDevConfig->pvOSDevice);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: unable to create sync (%d)",
+					 __func__, eError));
+			return -EBUSY;
+		}
+	}
+#endif
+
+	error = PVRDebugCreateDebugFSEntries();
+	if (error != 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+			 "%s: failed to create default debugfs entries (%d)",
+			 __func__, error));
+	}
+
+#if defined(SUPPORT_RGX)
+	error = PVRGpuTraceInitDevice(psDeviceNode);
+	if (error != 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+			 "%s: failed to initialise PVR GPU Tracing on device%d (%d)",
+			 __func__, psDeviceNode->sDevId.i32UMIdentifier, error));
+	}
+#endif
+
+	/* register the AppHint device control before device initialisation
+	 * so individual AppHints can be configured during the init phase
+	 */
+	error = pvr_apphint_device_register(psDeviceNode);
+	if (error != 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+			 "%s: failed to initialise device AppHints (%d)",
+			 __func__, error));
+	}
+
+	return 0;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVCommonDeviceDeinit
+@Description  Common device related de-initialisation.
+@Input        psDeviceNode  The device node for which de-initialisation should
+                            be performed
+@Return       void
+*/ /***************************************************************************/
+void PVRSRVCommonDeviceDeinit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+
+	pvr_apphint_device_unregister(psDeviceNode);
+
+#if defined(SUPPORT_RGX)
+	PVRGpuTraceDeInitDevice(psDeviceNode);
+#endif
+
+	PVRDebugRemoveDebugFSEntries();
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	pvr_sync_deinit();
+#endif
+
+	pvr_fence_cleanup();
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVCommonDeviceShutdown
+@Description  Common device shutdown.
+@Input        psDeviceNode  The device node representing the device that should
+                            be shutdown
+@Return       void
+*/ /***************************************************************************/
+
+void PVRSRVCommonDeviceShutdown(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError;
+
+	/*
+	 * Disable the bridge to stop processes trying to use the driver
+	 * after it has been shut down.
+	 */
+	eError = LinuxBridgeBlockClientsAccess(IMG_TRUE);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			"%s: Failed to suspend driver (%d)",
+			__func__, eError));
+		return;
+	}
+
+	(void) PVRSRVSetDeviceSystemPowerState(psDeviceNode,
+										   PVRSRV_SYS_POWER_STATE_OFF);
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVCommonDeviceSuspend
+@Description  Common device suspend.
+@Input        psDeviceNode  The device node representing the device that should
+                            be suspended
+@Return       int           0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDeviceSuspend(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	/*
+	 * LinuxBridgeBlockClientsAccess prevents processes from using the driver
+	 * while it's suspended (this is needed for Android). Acquire the bridge
+	 * lock first to ensure the driver isn't currently in use.
+	 */
+
+	LinuxBridgeBlockClientsAccess(IMG_FALSE);
+
+	if (PVRSRVSetDeviceSystemPowerState(psDeviceNode,
+										PVRSRV_SYS_POWER_STATE_OFF) != PVRSRV_OK)
+	{
+		LinuxBridgeUnblockClientsAccess();
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVCommonDeviceResume
+@Description  Common device resume.
+@Input        psDeviceNode  The device node representing the device that should
+                            be resumed
+@Return       int           0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDeviceResume(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	if (PVRSRVSetDeviceSystemPowerState(psDeviceNode,
+										PVRSRV_SYS_POWER_STATE_ON) != PVRSRV_OK)
+	{
+		return -EINVAL;
+	}
+
+	LinuxBridgeUnblockClientsAccess();
+
+	/*
+	 * Reprocess the device queues in case commands were blocked during
+	 * suspend.
+	 */
+	if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE)
+	{
+		PVRSRVCheckStatus(NULL);
+	}
+
+	return 0;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVCommonDeviceOpen
+@Description  Common device open.
+@Input        psDeviceNode  The device node representing the device being
+                            opened by a user mode process
+@Input        psDRMFile     The DRM file data that backs the file handle
+                            returned to the user mode process
+@Return       int           0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDeviceOpen(PVRSRV_DEVICE_NODE *psDeviceNode,
+						   struct drm_file *psDRMFile)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	ENV_CONNECTION_PRIVATE_DATA sPrivData;
+	void *pvConnectionData;
+	PVRSRV_ERROR eError;
+	int iErr = 0;
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSAcquireBridgeLock();
+#endif
+
+	if (!psPVRSRVData)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: No device data", __func__));
+		iErr = -ENODEV;
+		goto e1;
+	}
+
+	/*
+	 * If the first attempt already set the state to bad,
+	 * there is no point in going the second time, so get out
+	 */
+	if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_BAD)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Driver already in bad state. Device open failed.",
+				 __func__));
+		iErr = -ENODEV;
+		goto e1;
+	}
+
+	if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT)
+	{
+		eError = PVRSRVDeviceInitialise(psDeviceNode);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise device (%s)",
+					 __func__, PVRSRVGetErrorString(eError)));
+			iErr = -ENODEV;
+			goto e1;
+		}
+
+#if defined(SUPPORT_RGX)
+		PVRGpuTraceInitIfEnabled(psDeviceNode);
+#endif
+	}
+
+	sPrivData.psDevNode = psDeviceNode;
+	sPrivData.psFile = psDRMFile->filp;
+
+	/*
+	 * Here we pass the file pointer which will passed through to our
+	 * OSConnectionPrivateDataInit function where we can save it so
+	 * we can back reference the file structure from it's connection
+	 */
+	eError = PVRSRVConnectionConnect(&pvConnectionData, (void *) &sPrivData);
+	if (eError != PVRSRV_OK)
+	{
+		iErr = -ENOMEM;
+		goto e1;
+	}
+
+	psDRMFile->driver_priv = pvConnectionData;
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSReleaseBridgeLock();
+#endif
+
+out:
+	return iErr;
+e1:
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSReleaseBridgeLock();
+#endif
+	goto out;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVCommonDeviceRelease
+@Description  Common device release.
+@Input        psDeviceNode  The device node for the device that the given file
+                            represents
+@Input        psDRMFile     The DRM file data that's being released
+@Return       void
+*/ /***************************************************************************/
+void PVRSRVCommonDeviceRelease(PVRSRV_DEVICE_NODE *psDeviceNode,
+							   struct drm_file *psDRMFile)
+{
+	void *pvConnectionData;
+
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSAcquireBridgeLock();
+#endif
+
+	pvConnectionData = psDRMFile->driver_priv;
+	if (pvConnectionData)
+	{
+		PVRSRVConnectionDisconnect(pvConnectionData);
+		psDRMFile->driver_priv = NULL;
+	}
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSReleaseBridgeLock();
+#endif
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/module_common.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/module_common.h
new file mode 100644
index 0000000..69d55e4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/module_common.h
@@ -0,0 +1,67 @@
+/*************************************************************************/ /*!
+@File           module_common.h
+@Title          Common linux module setup header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _MODULE_COMMON_H_
+#define _MODULE_COMMON_H_
+
+/* DRVNAME is the name we use to register our driver. */
+#define DRVNAME PVR_LDM_DRIVER_REGISTRATION_NAME
+
+struct _PVRSRV_DEVICE_NODE_;
+struct drm_file;
+
+int PVRSRVCommonDriverInit(void);
+void PVRSRVCommonDriverDeinit(void);
+
+int PVRSRVCommonDeviceInit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+void PVRSRVCommonDeviceDeinit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+void PVRSRVCommonDeviceShutdown(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+int PVRSRVCommonDeviceSuspend(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+int PVRSRVCommonDeviceResume(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+int PVRSRVCommonDeviceOpen(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+						   struct drm_file *psDRMFile);
+void PVRSRVCommonDeviceRelease(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+							   struct drm_file *psDRMFile);
+
+#endif /* _MODULE_COMMON_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mtk_gpufreq.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mtk_gpufreq.c
new file mode 100644
index 0000000..5aba40c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mtk_gpufreq.c
@@ -0,0 +1,3247 @@
+/*
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/jiffies.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/input.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <linux/kthread.h>
+
+#ifdef CONFIG_OF
+#include <linux/of.h>
+#include <linux/of_address.h>
+#endif
+
+#include <linux/uaccess.h>
+
+#include "mtk_gpufreq.h"
+
+/*
+#include "mtk_static_power.h"
+*/
+
+
+#if 0  /* pedro */
+
+#include <mt-plat/upmu_common.h>
+#include "mt-plat/sync_write.h"
+#include "mt-plat/mtk_pmic_wrap.h"
+#include "mt-plat/mtk_chip.h"
+
+#include "mach/mtk_fhreg.h"
+#include "mach/mtk_freqhopping.h"
+
+/* TODO: check this! */
+/* #include "mach/mt_static_power.h" */
+#include "mach/mtk_thermal.h"
+#include "mt8167/include/mach/upmu_sw.h"
+
+#endif  /* pedro */
+
+#include <linux/regulator/consumer.h>
+
+#define DRIVER_NOT_READY	-1
+/*
+ * CONFIG
+ */
+/**************************************************
+ * GPU DVFS input boost feature
+ ***************************************************/
+#define MT_GPUFREQ_INPUT_BOOST
+
+/***************************
+ * Define for dynamic power table update
+ ****************************/
+#define MT_GPUFREQ_DYNAMIC_POWER_TABLE_UPDATE
+
+/* there is no PBM feature in mt8167 */
+#define DISABLE_PBM_FEATURE
+
+#define VGPU_SET_BY_PMIC
+
+/**************************************************
+ * Define register write function
+ ***************************************************/
+#define mt_gpufreq_reg_write(val, addr)		mt_reg_sync_writel((val), ((void *)addr))
+
+/***************************
+ * Operate Point Definition
+ ****************************/
+#define GPUOP(khz, volt, idx)	\
+{				\
+	.gpufreq_khz = khz,	\
+	.gpufreq_volt = volt,	\
+	.gpufreq_idx = idx,	\
+}
+
+/**************************
+ * GPU DVFS OPP table setting
+ ***************************/
+#if 0 /* for E2 */
+#define GPU_DVFS_FREQ0	(598000) /* KHz */
+#define GPU_DVFS_FREQ1	(494000) /* KHz */
+#define GPU_DVFS_FREQ2	(445000) /* KHz */
+#define GPU_DVFS_FREQ3	(396500) /* KHz */
+#define GPU_DVFS_FREQ4	(299000) /* KHz */
+#define GPU_DVFS_FREQ5	(253500) /* KHz */
+#else /* for E1 */
+#define GPU_DVFS_FREQ0_e1	(574000) /* KHz */
+#define GPU_DVFS_FREQ0		(598000) /* KHz */
+#define GPU_DVFS_FREQ1_e1	(468000) /* KHz */
+#define GPU_DVFS_FREQ1		(500500) /* KHz */
+#define GPU_DVFS_FREQ2		(430000) /* KHz */
+#define GPU_DVFS_FREQ3_e1	(390000) /* KHz */
+#define GPU_DVFS_FREQ3		(390000) /* KHz */
+#define GPU_DVFS_FREQ4		(299000) /* KHz */
+#define GPU_DVFS_FREQ5		(253500) /* KHz */
+#endif
+
+#define GPUFREQ_LAST_FREQ_LEVEL	(GPU_DVFS_FREQ5)
+
+#define GPU_DVFS_VOLT0	(130000)    /* mV x 100 */
+#define GPU_DVFS_VOLT1	(125000)    /* mV x 100 */
+#define GPU_DVFS_VOLT2	(115000)    /* mV x 100 */
+
+#define GPU_DVFS_PTPOD_DISABLE_VOLT	GPU_DVFS_VOLT2
+
+#define UNIVPLL_FREQ	GPU_DVFS_FREQ3 /* KHz */
+/*****************************************
+ * PMIC settle time (us), should not be changed
+ ******************************************/
+#ifdef VGPU_SET_BY_PMIC
+#define PMIC_MIN_VGPU GPU_DVFS_VOLT2
+#define PMIC_MAX_VGPU GPU_DVFS_VOLT0
+
+/* mt6799 us * 100 BEGIN */
+#define DELAY_FACTOR 1250
+#define HALF_DELAY_FACTOR 625
+#define BUCK_INIT_US 750
+/* mt6799 END */
+
+#define PMIC_CMD_DELAY_TIME	 5
+#define MIN_PMIC_SETTLE_TIME	25
+#define PMIC_VOLT_UP_SETTLE_TIME(old_volt, new_volt)	\
+	(((((new_volt) - (old_volt)) + 1250 - 1) / 1250) + PMIC_CMD_DELAY_TIME)
+#define PMIC_VOLT_DOWN_SETTLE_TIME(old_volt, new_volt)	\
+	(((((old_volt) - (new_volt)) * 2)  / 625) + PMIC_CMD_DELAY_TIME)
+#define PMIC_VOLT_ON_OFF_DELAY_US	   (200)
+#define INVALID_SLEW_RATE	(0)
+/* #define GPU_DVFS_PMIC_SETTLE_TIME (40) // us */
+
+/* register val -> mV */
+#define GPU_VOLT_TO_MV(volt)            ((((volt)*625)/100+700)*100)
+
+#define PMIC_BUCK_VGPU_VOSEL_ON		MT6351_PMIC_BUCK_VGPU_VOSEL_ON
+#define PMIC_ADDR_VGPU_VOSEL_ON		MT6351_PMIC_BUCK_VGPU_VOSEL_ON_ADDR
+#define PMIC_ADDR_VGPU_VOSEL_ON_MASK	MT6351_PMIC_BUCK_VGPU_VOSEL_ON_MASK
+#define PMIC_ADDR_VGPU_VOSEL_ON_SHIFT	MT6351_PMIC_BUCK_VGPU_VOSEL_ON_SHIFT
+#define PMIC_ADDR_VGPU_VOSEL_CTRL	MT6351_PMIC_BUCK_VGPU_VOSEL_CTRL_ADDR
+#define PMIC_ADDR_VGPU_VOSEL_CTRL_MASK	MT6351_PMIC_BUCK_VGPU_VOSEL_CTRL_MASK
+#define PMIC_ADDR_VGPU_VOSEL_CTRL_SHIFT	MT6351_PMIC_BUCK_VGPU_VOSEL_CTRL_SHIFT
+#define PMIC_ADDR_VGPU_EN		MT6351_PMIC_BUCK_VGPU_EN_ADDR
+#define PMIC_ADDR_VGPU_EN_MASK		MT6351_PMIC_BUCK_VGPU_EN_MASK
+#define PMIC_ADDR_VGPU_EN_SHIFT		MT6351_PMIC_BUCK_VGPU_EN_SHIFT
+#define PMIC_ADDR_VGPU_EN_CTRL		MT6351_PMIC_BUCK_VGPU_EN_CTRL_ADDR
+#define PMIC_ADDR_VGPU_EN_CTRL_MASK	MT6351_PMIC_BUCK_VGPU_EN_CTRL_MASK
+#define PMIC_ADDR_VGPU_EN_CTRL_SHIFT	MT6351_PMIC_BUCK_VGPU_EN_CTRL_SHIFT
+#elif defined(VGPU_SET_BY_EXTIC)
+#define GPU_LDO_BASE			0x10001000
+#define EXTIC_VSEL0			0x0		/* [0]	 */
+#define EXTIC_BUCK_EN0_MASK		0x1
+#define EXTIC_BUCK_EN0_SHIFT		0x7
+#define EXTIC_VSEL1			0x1	/* [0]	 */
+#define EXTIC_BUCK_EN1_MASK		0x1
+#define EXTIC_BUCK_EN1_SHIFT		0x7
+#define EXTIC_VGPU_CTRL			0x2
+#define EXTIC_VGPU_SLEW_MASK		0x7
+#define EXTIC_VGPU_SLEW_SHIFT		0x4
+
+#define EXTIC_VOLT_ON_OFF_DELAY_US		350
+#define EXTIC_VOLT_STEP			12826	/* 12.826mV per step */
+#define EXTIC_SLEW_STEP			100	/* 10.000mV per step */
+#define EXTIC_VOLT_UP_SETTLE_TIME(old_volt, new_volt, slew_rate)	\
+	(((((new_volt) - (old_volt)) * EXTIC_SLEW_STEP) /  EXTIC_VOLT_STEP) / (slew_rate))	/* us */
+#define EXTIC_VOLT_DOWN_SETTLE_TIME(old_volt, new_volt, slew_rate)	\
+	(((((old_volt) - (new_volt)) * EXTIC_SLEW_STEP) /  EXTIC_VOLT_STEP) / (slew_rate))	/* us */
+#endif
+/* efuse */
+#define GPUFREQ_EFUSE_INDEX		(4)
+#define EFUSE_MFG_SPD_BOND_SHIFT	(22)
+#define EFUSE_MFG_SPD_BOND_MASK		(0x3)
+/*
+ * LOG
+ */
+#if 1
+#define TAG	 "[Power/gpufreq] "
+
+#define gpufreq_err(fmt, args...)	\
+	pr_err(fmt, ##args)
+#define gpufreq_warn(fmt, args...)	\
+	pr_warn(fmt, ##args)
+#define gpufreq_info(fmt, args...)	\
+	pr_debug(fmt, ##args)
+#define gpufreq_dbg(fmt, args...)	\
+	pr_debug(fmt, ##args)
+#define gpufreq_ver(fmt, args...)	\
+	pr_debug(fmt, ##args)
+#else
+#define gpufreq_err(fmt, args...)
+#define gpufreq_warn(fmt, args...)
+#define gpufreq_info(fmt, args...)
+#define gpufreq_dbg(fmt, args...)
+#define gpufreq_ver(fmt, args...)
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static struct early_suspend mt_gpufreq_early_suspend_handler = {
+	.level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 200,
+	.suspend = NULL,
+	.resume = NULL,
+};
+#endif
+
+static sampler_func g_pFreqSampler;
+static sampler_func g_pVoltSampler;
+
+static gpufreq_power_limit_notify g_pGpufreq_power_limit_notify;
+#ifdef MT_GPUFREQ_INPUT_BOOST
+static gpufreq_input_boost_notify g_pGpufreq_input_boost_notify;
+#endif
+static gpufreq_ptpod_update_notify g_pGpufreq_ptpod_update_notify;
+
+
+/***************************
+ * GPU DVFS OPP Table
+ ****************************/
+/* Full Yield */
+static struct mt_gpufreq_table_info mt_gpufreq_opp_tbl_e1_0[] = {
+/*	GPUOP(GPU_DVFS_FREQ0, GPU_DVFS_VOLT0, 0), */
+/*	GPUOP(GPU_DVFS_FREQ1, GPU_DVFS_VOLT1, 1), */
+/*	GPUOP(GPU_DVFS_FREQ2, GPU_DVFS_VOLT1, 2), */
+	GPUOP(GPU_DVFS_FREQ3, GPU_DVFS_VOLT2, 3),
+	GPUOP(GPU_DVFS_FREQ4, GPU_DVFS_VOLT2, 4),
+	GPUOP(GPU_DVFS_FREQ5, GPU_DVFS_VOLT2, 5),
+};
+
+static struct mt_gpufreq_table_info mt_gpufreq_opp_tbl_e1_1[] = {
+/*	GPUOP(GPU_DVFS_FREQ0, GPU_DVFS_VOLT0, 0), */
+	GPUOP(GPU_DVFS_FREQ1, GPU_DVFS_VOLT1, 1),
+/*	GPUOP(GPU_DVFS_FREQ2, GPU_DVFS_VOLT1, 2), */
+	GPUOP(GPU_DVFS_FREQ3, GPU_DVFS_VOLT2, 3),
+	GPUOP(GPU_DVFS_FREQ4, GPU_DVFS_VOLT2, 4),
+	GPUOP(GPU_DVFS_FREQ5, GPU_DVFS_VOLT2, 5),
+};
+
+
+static struct mt_gpufreq_table_info mt_gpufreq_opp_tbl_e1_2[] = {
+	GPUOP(GPU_DVFS_FREQ0, GPU_DVFS_VOLT0, 0),
+	GPUOP(GPU_DVFS_FREQ1, GPU_DVFS_VOLT1, 1),
+/*	GPUOP(GPU_DVFS_FREQ2, GPU_DVFS_VOLT1, 2), */
+	GPUOP(GPU_DVFS_FREQ3, GPU_DVFS_VOLT2, 3),
+/*	GPUOP(GPU_DVFS_FREQ4, GPU_DVFS_VOLT2, 4), */
+	GPUOP(GPU_DVFS_FREQ5, GPU_DVFS_VOLT2, 5),
+};
+
+
+
+/**************************
+ * PTPOD enable/disable GPU power doamin
+ ***************************/
+static gpufreq_mfgclock_notify g_pGpufreq_mfgclock_enable_notify;
+static gpufreq_mfgclock_notify g_pGpufreq_mfgclock_disable_notify;
+
+
+/*
+ * AEE (SRAM debug)
+ */
+#ifdef MT_GPUFREQ_AEE_RR_REC
+enum gpu_dvfs_state {
+	GPU_DVFS_IS_DOING_DVFS = 0,
+	GPU_DVFS_IS_VGPU_ENABLED,
+};
+
+static void _mt_gpufreq_aee_init(void)
+{
+	aee_rr_rec_gpu_dvfs_vgpu(0xFF);
+	aee_rr_rec_gpu_dvfs_oppidx(0xFF);
+	aee_rr_rec_gpu_dvfs_status(0xFC);
+}
+#endif
+
+/**************************
+ * enable GPU DVFS count
+ ***************************/
+static int g_gpufreq_dvfs_disable_count;
+
+static unsigned int g_cur_gpu_freq = GPU_DVFS_FREQ5;	/* initial value, 396.5MHz */
+static unsigned int g_cur_gpu_volt = GPU_DVFS_VOLT2;	/* initial value, 1.15v */
+static unsigned int g_cur_gpu_idx = 0xFF;
+static unsigned int g_cur_gpu_OPPidx = 0xFF;
+
+
+static unsigned int g_cur_freq_init_keep;
+
+static bool mt_gpufreq_ready;
+
+/* In default settiing, freq_table[0] is max frequency, freq_table[num-1] is min frequency,*/
+static unsigned int g_gpufreq_max_id;
+
+/* If not limited, it should be set to freq_table[0] (MAX frequency) */
+static unsigned int g_limited_max_id;
+static unsigned int g_limited_min_id;
+
+static bool mt_gpufreq_debug;
+static bool mt_gpufreq_pause = true;
+static bool mt_gpufreq_keep_max_frequency_state;
+static bool mt_gpufreq_keep_opp_frequency_state;
+#if 1
+static unsigned int mt_gpufreq_keep_opp_frequency;
+#endif
+static unsigned int mt_gpufreq_keep_opp_index;
+static bool mt_gpufreq_fixed_freq_volt_state;
+static unsigned int mt_gpufreq_fixed_frequency;
+static unsigned int mt_gpufreq_fixed_voltage;
+
+#if 1
+static unsigned int mt_gpufreq_volt_enable;
+#endif
+static unsigned int mt_gpufreq_volt_enable_state;
+#ifdef MT_GPUFREQ_INPUT_BOOST
+static unsigned int mt_gpufreq_input_boost_state = 1;
+#endif
+/* static bool g_limited_power_ignore_state = false; */
+static bool g_limited_thermal_ignore_state;
+#ifdef MT_GPUFREQ_LOW_BATT_VOLT_PROTECT
+static bool g_limited_low_batt_volt_ignore_state;
+#endif
+#ifdef MT_GPUFREQ_LOW_BATT_VOLUME_PROTECT
+static bool g_limited_low_batt_volume_ignore_state;
+#endif
+#ifdef MT_GPUFREQ_OC_PROTECT
+static bool g_limited_oc_ignore_state;
+#endif
+
+static bool mt_gpufreq_opp_max_frequency_state;
+static unsigned int mt_gpufreq_opp_max_frequency;
+static unsigned int mt_gpufreq_opp_max_index;
+
+static unsigned int mt_gpufreq_dvfs_table_type;
+
+/* static DEFINE_SPINLOCK(mt_gpufreq_lock); */
+static DEFINE_MUTEX(mt_gpufreq_lock);
+static DEFINE_MUTEX(mt_gpufreq_power_lock);
+
+static unsigned int mt_gpufreqs_num;
+static struct mt_gpufreq_table_info *mt_gpufreqs;
+static struct mt_gpufreq_table_info *mt_gpufreqs_default;
+static struct mt_gpufreq_power_table_info *mt_gpufreqs_power;
+static struct mtk_gpu_power_info *mt_gpufreqs_power_info;
+
+static struct mt_gpufreq_pmic_t *mt_gpufreq_pmic;
+
+
+/* static struct mt_gpufreq_power_table_info *mt_gpufreqs_default_power; */
+
+static bool mt_gpufreq_ptpod_disable;
+static int mt_gpufreq_ptpod_disable_idx;
+
+static void mt_gpufreq_clock_switch(unsigned int freq_new);
+static void mt_gpufreq_volt_switch(unsigned int volt_old, unsigned int volt_new);
+static void mt_gpufreq_set(unsigned int freq_old, unsigned int freq_new,
+			   unsigned int volt_old, unsigned int volt_new);
+static unsigned int _mt_gpufreq_get_cur_volt(void);
+static unsigned int _mt_gpufreq_get_cur_freq(void);
+static void _mt_gpufreq_kick_pbm(int enable);
+
+
+#ifndef DISABLE_PBM_FEATURE
+static bool g_limited_pbm_ignore_state;
+static unsigned int mt_gpufreq_pbm_limited_gpu_power;	/* PBM limit power */
+static unsigned int mt_gpufreq_pbm_limited_index;	/* Limited frequency index for PBM */
+#define GPU_OFF_SETTLE_TIME_MS		(100)
+struct delayed_work notify_pbm_gpuoff_work;
+#endif
+
+#if 0  // pedro 
+int __attribute__ ((weak))
+get_immediate_gpu_wrap(void)
+{
+	pr_err("get_immediate_gpu_wrap doesn't exist");
+	return 0;
+}
+#endif // pedro
+
+/*************************************************************************************
+ * Check GPU DVFS Efuse
+ **************************************************************************************/
+static unsigned int mt_gpufreq_get_dvfs_table_type(void)
+{
+	unsigned int gpu_speed_bounding = 0;
+	unsigned int type = 0;
+
+	// if (mt_get_chip_sw_ver() == 0) { /* E1 */
+	//
+	{ // pedro, assume E1
+		mt_gpufreq_opp_tbl_e1_0[0].gpufreq_khz = GPU_DVFS_FREQ3_e1;
+
+		mt_gpufreq_opp_tbl_e1_1[0].gpufreq_khz = GPU_DVFS_FREQ1_e1;
+		mt_gpufreq_opp_tbl_e1_1[1].gpufreq_khz = GPU_DVFS_FREQ3_e1;
+
+		mt_gpufreq_opp_tbl_e1_2[0].gpufreq_khz = GPU_DVFS_FREQ0_e1;
+		mt_gpufreq_opp_tbl_e1_2[1].gpufreq_khz = GPU_DVFS_FREQ1_e1;
+		mt_gpufreq_opp_tbl_e1_2[2].gpufreq_khz = GPU_DVFS_FREQ3_e1;
+	}
+
+	/* Read the gpu efuse data*/
+	gpu_speed_bounding = get_devinfo_with_index(4);
+	gpufreq_info("GPU other bounding from efuse = %x\n", gpu_speed_bounding);
+
+	gpu_speed_bounding = (get_devinfo_with_index(GPUFREQ_EFUSE_INDEX) >>
+				EFUSE_MFG_SPD_BOND_SHIFT) & EFUSE_MFG_SPD_BOND_MASK;
+	gpufreq_info("GPU frequency bounding from efuse = %x\n", gpu_speed_bounding);
+
+	/* No efuse or free run? use clock-frequency from device tree to determine GPU table type! */
+	if (gpu_speed_bounding == 0) {
+#ifdef CONFIG_OF
+		static const struct of_device_id gpu_ids[] = {
+			{.compatible = "mediatek,mt8167-clark"},
+			{ /* sentinel */ }
+		};
+		struct device_node *node;
+		unsigned int gpu_speed = 0;
+
+		node = of_find_matching_node(NULL, gpu_ids);
+		if (!node) {
+			gpufreq_err("@%s: find GPU node failed\n", __func__);
+			gpu_speed = 500000;	/* default speed */
+		} else {
+			if (!of_property_read_u32(node, "clock-frequency", &gpu_speed)) {
+				gpu_speed = gpu_speed / 1000;	/* KHz */
+			} else {
+				gpufreq_err
+					("@%s: missing clock-frequency property, use default GPU level\n",
+					 __func__);
+				gpu_speed = 500000;	/* default speed */
+			}
+		}
+		gpufreq_info("GPU clock-frequency from DT = %d MHz\n", gpu_speed);
+
+		if (gpu_speed >= GPU_DVFS_FREQ0)
+			type = 2;	/* 600M */
+		else if (gpu_speed >= GPU_DVFS_FREQ1)
+			type = 1;	/* 500M */
+
+#else
+		gpufreq_err("@%s: Cannot get GPU speed from DT!\n", __func__);
+		type = 0;
+#endif
+		return type;
+	}
+
+	switch (gpu_speed_bounding) {
+	case 0:
+		type = 0;	/* free run */
+		break;
+	case 1:
+		type = 2;	/* 600M */
+		break;
+	case 2:
+		type = 1;	/* 500M */
+		break;
+	case 3:
+	default:
+		type = 0;	/* 400M */
+	}
+
+	return type;
+}
+
+#ifdef MT_GPUFREQ_INPUT_BOOST
+static struct task_struct *mt_gpufreq_up_task;
+static void mt_gpufreq_clock_switch(unsigned int freq_new);
+
+
+static int mt_gpufreq_input_boost_task(void *data)
+{
+	while (1) {
+		gpufreq_dbg("@%s: begin\n", __func__);
+
+		if (g_pGpufreq_input_boost_notify != NULL) {
+			gpufreq_dbg("@%s: g_pGpufreq_input_boost_notify\n", __func__);
+			g_pGpufreq_input_boost_notify(g_gpufreq_max_id);
+		}
+
+		gpufreq_dbg("@%s: end\n", __func__);
+
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule();
+
+		if (kthread_should_stop())
+			break;
+	}
+
+	return 0;
+}
+
+
+/*************************************************************************************
+ * Input boost
+ **************************************************************************************/
+static void mt_gpufreq_input_event(struct input_handle *handle, unsigned int type,
+				   unsigned int code, int value)
+{
+	if (mt_gpufreq_ready == false) {
+		gpufreq_warn("@%s: GPU DVFS not ready!\n", __func__);
+		return;
+	}
+
+	if ((type == EV_KEY) && (code == BTN_TOUCH) && (value == 1)
+		&& (mt_gpufreq_input_boost_state == 1)) {
+		gpufreq_dbg("@%s: accept.\n", __func__);
+
+		wake_up_process(mt_gpufreq_up_task);
+	}
+}
+
+static int mt_gpufreq_input_connect(struct input_handler *handler,
+					struct input_dev *dev, const struct input_device_id *id)
+{
+	struct input_handle *handle;
+	int error;
+
+	handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = "gpufreq_ib";
+
+	error = input_register_handle(handle);
+	if (error)
+		goto err2;
+
+	error = input_open_device(handle);
+	if (error)
+		goto err1;
+
+	return 0;
+err1:
+	input_unregister_handle(handle);
+err2:
+	kfree(handle);
+	return error;
+}
+
+static void mt_gpufreq_input_disconnect(struct input_handle *handle)
+{
+	input_close_device(handle);
+	input_unregister_handle(handle);
+	kfree(handle);
+}
+
+static const struct input_device_id mt_gpufreq_ids[] = {
+	{.driver_info = 1},
+	{},
+};
+
+static struct input_handler mt_gpufreq_input_handler = {
+	.event = mt_gpufreq_input_event,
+	.connect = mt_gpufreq_input_connect,
+	.disconnect = mt_gpufreq_input_disconnect,
+	.name = "gpufreq_ib",
+	.id_table = mt_gpufreq_ids,
+};
+#endif
+
+/*
+ * Power table calculation
+ */
+static void mt_gpufreq_power_calculation(unsigned int idx, unsigned int freq,
+					  unsigned int volt, unsigned int temp)
+{
+#define GPU_ACT_REF_POWER		845	/* mW  */
+#define GPU_ACT_REF_FREQ		850000	/* KHz */
+#define GPU_ACT_REF_VOLT		90000	/* mV x 100 */
+
+	unsigned int p_total = 0, p_dynamic = 0, ref_freq = 0, ref_volt = 0;
+	int p_leakage = 0;
+
+	p_dynamic = GPU_ACT_REF_POWER;
+	ref_freq = GPU_ACT_REF_FREQ;
+	ref_volt = GPU_ACT_REF_VOLT;
+
+	p_dynamic = p_dynamic *
+		((freq * 100) / ref_freq) *
+		((volt * 100) / ref_volt) * ((volt * 100) / ref_volt) / (100 * 100 * 100);
+
+#ifdef STATIC_PWR_READY2USE
+	p_leakage =
+		mt_spower_get_leakage(MT_SPOWER_GPU, (volt / 100), temp);
+	if (!mt_gpufreq_volt_enable_state || p_leakage < 0)
+		p_leakage = 0;
+#else
+	p_leakage = 71;
+#endif
+
+	p_total = p_dynamic + p_leakage;
+
+	gpufreq_ver("%d: p_dynamic = %d, p_leakage = %d, p_total = %d, temp = %d\n",
+			idx, p_dynamic, p_leakage, p_total, temp);
+
+	mt_gpufreqs_power[idx].gpufreq_power = p_total;
+}
+
+/**************************************
+ * Random seed generated for test
+ ***************************************/
+#ifdef MT_GPU_DVFS_RANDOM_TEST
+static int mt_gpufreq_idx_get(int num)
+{
+	int random = 0, mult = 0, idx;
+
+	random = jiffies & 0xF;
+
+	while (1) {
+		if ((mult * num) >= random) {
+			idx = (mult * num) - random;
+			break;
+		}
+		mult++;
+	}
+	return idx;
+}
+#endif
+
+
+/* Set frequency and voltage at driver probe function */
+static void mt_gpufreq_set_initial(void)
+{
+	unsigned int cur_volt = 0, cur_freq = 0;
+	int i = 0;
+
+	mutex_lock(&mt_gpufreq_lock);
+
+#ifdef MT_GPUFREQ_AEE_RR_REC
+	aee_rr_rec_gpu_dvfs_status(aee_rr_curr_gpu_dvfs_status() | (1 << GPU_DVFS_IS_DOING_DVFS));
+#endif
+
+	cur_volt = _mt_gpufreq_get_cur_volt();
+	cur_freq = _mt_gpufreq_get_cur_freq();
+
+//	for (i = 0; i < mt_gpufreqs_num; i++) {
+//		if (cur_volt >= mt_gpufreqs[i].gpufreq_volt) {
+			mt_gpufreq_set(cur_freq, mt_gpufreqs[i].gpufreq_khz,
+				       cur_volt, mt_gpufreqs[i].gpufreq_volt);
+			g_cur_gpu_OPPidx = i;
+			gpufreq_dbg("init_idx = %d\n", g_cur_gpu_OPPidx);
+			_mt_gpufreq_kick_pbm(1);
+//			break;
+//		}
+//	}
+
+	/* Not found, set to LPM */
+	if (i == mt_gpufreqs_num) {
+		gpufreq_err
+			("Set to LPM since GPU idx not found according to current Vcore = %d mV\n",
+			 cur_volt / 100);
+		g_cur_gpu_OPPidx = mt_gpufreqs_num - 1;
+		gpufreq_err
+			("mt_gpufreq_set_initial freq index = %d mV\n", g_cur_gpu_OPPidx);
+		mt_gpufreq_set(cur_freq, mt_gpufreqs[g_cur_gpu_OPPidx].gpufreq_khz,
+			       cur_volt, mt_gpufreqs[g_cur_gpu_OPPidx].gpufreq_volt);
+	}
+
+	g_cur_gpu_freq = mt_gpufreqs[g_cur_gpu_OPPidx].gpufreq_khz;
+	g_cur_gpu_volt = mt_gpufreqs[g_cur_gpu_OPPidx].gpufreq_volt;
+	g_cur_gpu_idx = mt_gpufreqs[g_cur_gpu_OPPidx].gpufreq_idx;
+
+
+#ifdef MT_GPUFREQ_AEE_RR_REC
+	aee_rr_rec_gpu_dvfs_oppidx(g_cur_gpu_OPPidx);
+	aee_rr_rec_gpu_dvfs_status(aee_rr_curr_gpu_dvfs_status() & ~(1 << GPU_DVFS_IS_DOING_DVFS));
+#endif
+
+	mutex_unlock(&mt_gpufreq_lock);
+}
+
+
+
+#ifndef DISABLE_PBM_FEATURE
+static void mt_gpufreq_notify_pbm_gpuoff(struct work_struct *work)
+{
+	mutex_lock(&mt_gpufreq_lock);
+	if (!mt_gpufreq_volt_enable_state)
+		_mt_gpufreq_kick_pbm(0);
+
+	mutex_unlock(&mt_gpufreq_lock);
+}
+#endif
+
+/* Set VGPU enable/disable when GPU clock be switched on/off */
+unsigned int mt_gpufreq_voltage_enable_set(unsigned int enable)
+{
+	/* mt8167, gpu uses vcore, the enable/disable is always on*/
+	if (mt_gpufreq_ready == false) {
+		gpufreq_warn("@%s: GPU DVFS not ready!\n", __func__);
+		return DRIVER_NOT_READY;
+	}
+
+	if (mt_gpufreq_ptpod_disable == true) {
+		if (enable == 0) {
+			gpufreq_info("mt_gpufreq_ptpod_disable == true\n");
+			return DRIVER_NOT_READY;
+		}
+	}
+	mt_gpufreq_volt_enable_state = enable;
+
+	return 0;
+}
+EXPORT_SYMBOL(mt_gpufreq_voltage_enable_set);
+
+/************************************************
+ * DVFS enable API for PTPOD
+ *************************************************/
+
+void mt_gpufreq_enable_by_ptpod(void)
+{
+	if (mt_gpufreq_ready == false) {
+		gpufreq_warn("@%s: GPU DVFS not ready!\n", __func__);
+		return;
+	}
+
+	mt_gpufreq_voltage_enable_set(0);
+#ifdef MTK_GPU_SPM
+	if (mt_gpufreq_ptpod_disable)
+		mtk_gpu_spm_resume();
+#endif
+
+	mt_gpufreq_ptpod_disable = false;
+	gpufreq_info("mt_gpufreq enabled by ptpod\n");
+
+	if (g_pGpufreq_mfgclock_disable_notify)
+		g_pGpufreq_mfgclock_disable_notify();
+	else
+		pr_err("mt_gpufreq_enable_by_ptpod: no callback!\n");
+
+	/* pmic auto mode: the variance of voltage is wide but saves more power. */
+	regulator_set_mode(mt_gpufreq_pmic->reg_vgpu, REGULATOR_MODE_NORMAL);
+	if (regulator_get_mode(mt_gpufreq_pmic->reg_vgpu) != REGULATOR_MODE_NORMAL)
+		pr_err("Vgpu should be REGULATOR_MODE_NORMAL(%d), but mode = %d\n",
+			REGULATOR_MODE_NORMAL, regulator_get_mode(mt_gpufreq_pmic->reg_vgpu));
+}
+EXPORT_SYMBOL(mt_gpufreq_enable_by_ptpod);
+
+/************************************************
+ * DVFS disable API for PTPOD
+ *************************************************/
+void mt_gpufreq_disable_by_ptpod(void)
+{
+	int i = 0, target_idx = 0;
+
+	if (mt_gpufreq_ready == false) {
+		gpufreq_warn("@%s: GPU DVFS not ready!\n", __func__);
+		return;
+	}
+
+#ifdef MTK_GPU_SPM
+	mtk_gpu_spm_pause();
+
+	g_cur_gpu_volt = _mt_gpufreq_get_cur_volt();
+	g_cur_gpu_freq = _mt_gpufreq_get_cur_freq();
+#endif
+
+	mt_gpufreq_ptpod_disable = true;
+	gpufreq_info("mt_gpufreq disabled by ptpod\n");
+
+	for (i = 0; i < mt_gpufreqs_num; i++) {
+		/* VBoot = 0.85v for PTPOD */
+		target_idx = i;
+		if (mt_gpufreqs_default[i].gpufreq_volt <= GPU_DVFS_PTPOD_DISABLE_VOLT)
+			break;
+	}
+
+	if (g_pGpufreq_mfgclock_enable_notify)
+		g_pGpufreq_mfgclock_enable_notify();
+	else
+		pr_err("mt_gpufreq_disable_by_ptpod: no callback!\n");
+
+	/* pmic PWM mode: the variance of voltage is narrow but consumes more power. */
+	regulator_set_mode(mt_gpufreq_pmic->reg_vgpu, REGULATOR_MODE_FAST);
+
+	if (regulator_get_mode(mt_gpufreq_pmic->reg_vgpu) != REGULATOR_MODE_FAST)
+		pr_err("Vgpu should be REGULATOR_MODE_FAST(%d), but mode = %d\n",
+			REGULATOR_MODE_FAST, regulator_get_mode(mt_gpufreq_pmic->reg_vgpu));
+
+
+	mt_gpufreq_ptpod_disable_idx = target_idx;
+
+	mt_gpufreq_voltage_enable_set(1);
+	mt_gpufreq_target(target_idx);
+}
+EXPORT_SYMBOL(mt_gpufreq_disable_by_ptpod);
+
+
+bool mt_gpufreq_IsPowerOn(void)
+{
+	return (mt_gpufreq_volt_enable_state == 1);
+}
+EXPORT_SYMBOL(mt_gpufreq_IsPowerOn);
+
+
+/************************************************
+ * API to switch back default voltage setting for GPU PTPOD disabled
+ *************************************************/
+void mt_gpufreq_restore_default_volt(void)
+{
+	int i;
+
+	if (mt_gpufreq_ready == false) {
+		gpufreq_warn("@%s: GPU DVFS not ready!\n", __func__);
+		return;
+	}
+
+	mutex_lock(&mt_gpufreq_lock);
+
+	for (i = 0; i < mt_gpufreqs_num; i++) {
+		mt_gpufreqs[i].gpufreq_volt = mt_gpufreqs_default[i].gpufreq_volt;
+		gpufreq_dbg("@%s: mt_gpufreqs[%d].gpufreq_volt = %x\n", __func__, i,
+				mt_gpufreqs[i].gpufreq_volt);
+	}
+
+#ifndef MTK_GPU_SPM
+	mt_gpufreq_volt_switch(g_cur_gpu_volt, mt_gpufreqs[g_cur_gpu_OPPidx].gpufreq_volt);
+#endif
+
+	g_cur_gpu_volt = mt_gpufreqs[g_cur_gpu_OPPidx].gpufreq_volt;
+
+	mutex_unlock(&mt_gpufreq_lock);
+}
+EXPORT_SYMBOL(mt_gpufreq_restore_default_volt);
+
+/* Set voltage because PTP-OD modified voltage table by PMIC wrapper */
+unsigned int mt_gpufreq_update_volt(unsigned int pmic_volt[], unsigned int array_size)
+{
+	int i;			/* , idx; */
+	/* unsigned long flags; */
+	unsigned volt = 0;
+
+	if (mt_gpufreq_ready == false) {
+		gpufreq_warn("@%s: GPU DVFS not ready!\n", __func__);
+		return DRIVER_NOT_READY;
+	}
+
+	if (array_size > mt_gpufreqs_num) {
+		gpufreq_err("mt_gpufreq_update_volt: array_size = %d, Over-Boundary!\n", array_size);
+		return 0;	/*-ENOSYS;*/
+	}
+
+	mutex_lock(&mt_gpufreq_lock);
+	for (i = 0; i < array_size; i++) {
+		volt = GPU_VOLT_TO_MV(pmic_volt[i]);
+		if ((volt > 95000) && (volt < 130000)) {	/* between 950mv~1300mv */
+			mt_gpufreqs[i].gpufreq_volt = volt;
+			gpufreq_dbg("@%s: mt_gpufreqs[%d].gpufreq_volt = %x\n", __func__, i,
+					mt_gpufreqs[i].gpufreq_volt);
+		} else {
+			gpufreq_err("@%s: index[%d]._volt = %x Over-Boundary\n", __func__, i, volt);
+		}
+	}
+
+
+#ifndef MTK_GPU_SPM
+	mt_gpufreq_volt_switch(g_cur_gpu_volt, mt_gpufreqs[g_cur_gpu_OPPidx].gpufreq_volt);
+#endif
+
+	g_cur_gpu_volt = mt_gpufreqs[g_cur_gpu_OPPidx].gpufreq_volt;
+	if (g_pGpufreq_ptpod_update_notify != NULL)
+		g_pGpufreq_ptpod_update_notify();
+	mutex_unlock(&mt_gpufreq_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(mt_gpufreq_update_volt);
+
+unsigned int mt_gpufreq_get_dvfs_table_num(void)
+{
+	return mt_gpufreqs_num;
+}
+EXPORT_SYMBOL(mt_gpufreq_get_dvfs_table_num);
+
+unsigned int mt_gpufreq_get_freq_by_idx(unsigned int idx)
+{
+	if (mt_gpufreq_ready == false) {
+		gpufreq_warn("@%s: GPU DVFS not ready!\n", __func__);
+		return DRIVER_NOT_READY;
+	}
+
+	if (idx < mt_gpufreqs_num) {
+		gpufreq_dbg("@%s: idx = %d, frequency= %d\n", __func__, idx,
+				mt_gpufreqs[idx].gpufreq_khz);
+		return mt_gpufreqs[idx].gpufreq_khz;
+	}
+
+	gpufreq_dbg("@%s: idx = %d, NOT found! return 0!\n", __func__, idx);
+	return 0;
+}
+EXPORT_SYMBOL(mt_gpufreq_get_freq_by_idx);
+
+unsigned int mt_gpufreq_get_volt_by_idx(unsigned int idx)
+{
+	if (mt_gpufreq_ready == false) {
+		gpufreq_warn("@%s: GPU DVFS not ready!\n", __func__);
+		return DRIVER_NOT_READY;
+	}
+
+	if (idx < mt_gpufreqs_num) {
+		gpufreq_dbg("@%s: idx = %d, voltage= %d\n", __func__, idx,
+				mt_gpufreqs[idx].gpufreq_volt);
+		return mt_gpufreqs[idx].gpufreq_volt;
+	}
+
+	gpufreq_dbg("@%s: idx = %d, NOT found! return 0!\n", __func__, idx);
+	return 0;
+}
+EXPORT_SYMBOL(mt_gpufreq_get_volt_by_idx);
+
+#if 0 /* pedro */
+#ifdef MT_GPUFREQ_DYNAMIC_POWER_TABLE_UPDATE
+static void mt_update_gpufreqs_power_table(void)
+{
+	int i = 0, temp = 0;
+	unsigned int freq = 0, volt = 0;
+
+	if (mt_gpufreq_ready == false) {
+		gpufreq_warn("@%s: GPU DVFS not ready\n", __func__);
+		return;
+	}
+
+#ifdef CONFIG_THERMAL
+	temp = get_immediate_gpu_wrap() / 1000;
+#else
+	temp = 40;
+#endif
+
+	gpufreq_dbg("@%s, temp = %d\n", __func__, temp);
+
+	mutex_lock(&mt_gpufreq_lock);
+
+	if ((temp >= -20) && (temp <= 125)) {
+		for (i = 0; i < mt_gpufreqs_num; i++) {
+			freq = mt_gpufreqs_power[i].gpufreq_khz;
+			volt = mt_gpufreqs_power[i].gpufreq_volt;
+
+			mt_gpufreq_power_calculation(i, freq, volt, temp);
+
+			gpufreq_ver("update mt_gpufreqs_power[%d].gpufreq_khz = %d\n", i,
+					mt_gpufreqs_power[i].gpufreq_khz);
+			gpufreq_ver("update mt_gpufreqs_power[%d].gpufreq_volt = %d\n", i,
+					mt_gpufreqs_power[i].gpufreq_volt);
+			gpufreq_ver("update mt_gpufreqs_power[%d].gpufreq_power = %d\n", i,
+					mt_gpufreqs_power[i].gpufreq_power);
+		}
+	} else
+		gpufreq_err("@%s: temp < 0 or temp > 125, NOT update power table!\n", __func__);
+
+	mutex_unlock(&mt_gpufreq_lock);
+}
+#endif
+#endif  /* pedro */
+
+
+static void mt_setup_gpufreqs_power_table(int num)
+{
+	int i = 0, temp = 0;
+
+	mt_gpufreqs_power = kzalloc((num) * sizeof(struct mt_gpufreq_power_table_info), GFP_KERNEL);
+	mt_gpufreqs_power_info = kzalloc((num) * sizeof(struct mtk_gpu_power_info), GFP_KERNEL);
+	if (mt_gpufreqs_power == NULL)
+		return;
+
+#ifdef CONFIG_THERMAL
+	temp = get_immediate_gpu_wrap() / 1000;
+#else
+	temp = 40;
+#endif
+
+	gpufreq_dbg("@%s: temp = %d\n", __func__, temp);
+
+	if ((temp < -20) || (temp > 125)) {
+		gpufreq_dbg("@%s: temp < 0 or temp > 125!\n", __func__);
+		temp = 65;
+	}
+
+	for (i = 0; i < num; i++) {
+		/* fill-in freq and volt in power table */
+		mt_gpufreqs_power[i].gpufreq_khz = mt_gpufreqs[i].gpufreq_khz;
+		mt_gpufreqs_power[i].gpufreq_volt = mt_gpufreqs[i].gpufreq_volt;
+
+		/* CJ, Fix? need the mt_gpufreq_power_calculation?*/
+		mt_gpufreq_power_calculation(i,
+						 mt_gpufreqs_power[i].gpufreq_khz,
+						 mt_gpufreqs_power[i].gpufreq_volt,
+						 temp);
+
+		mt_gpufreqs_power_info[i].gpufreq_khz = mt_gpufreqs_power[i].gpufreq_khz;
+		mt_gpufreqs_power_info[i].gpufreq_power = mt_gpufreqs_power[i].gpufreq_power;
+		gpufreq_info("mt_gpufreqs_power[%d].gpufreq_khz = %u\n", i,
+				 mt_gpufreqs_power[i].gpufreq_khz);
+		gpufreq_info("mt_gpufreqs_power[%d].gpufreq_volt = %u\n", i,
+				 mt_gpufreqs_power[i].gpufreq_volt);
+		gpufreq_info("mt_gpufreqs_power[%d].gpufreq_power = %u\n", i,
+				 mt_gpufreqs_power[i].gpufreq_power);
+	}
+
+#ifdef CONFIG_THERMAL
+	mtk_gpufreq_register(mt_gpufreqs_power_info, num);
+#endif
+}
+
+/***********************************************
+ * register frequency table to gpufreq subsystem
+ ************************************************/
+static int mt_setup_gpufreqs_table(struct mt_gpufreq_table_info *freqs, int num)
+{
+	int i = 0;
+
+	mt_gpufreqs = kzalloc((num) * sizeof(*freqs), GFP_KERNEL);
+	mt_gpufreqs_default = kzalloc((num) * sizeof(*freqs), GFP_KERNEL);
+	if ((mt_gpufreqs == NULL) || (mt_gpufreqs_default == NULL))
+		return -ENOMEM;
+
+	for (i = 0; i < num; i++) {
+		mt_gpufreqs[i].gpufreq_khz = freqs[i].gpufreq_khz;
+		mt_gpufreqs[i].gpufreq_volt = freqs[i].gpufreq_volt;
+		mt_gpufreqs[i].gpufreq_idx = freqs[i].gpufreq_idx;
+
+		mt_gpufreqs_default[i].gpufreq_khz = freqs[i].gpufreq_khz;
+		mt_gpufreqs_default[i].gpufreq_volt = freqs[i].gpufreq_volt;
+		mt_gpufreqs_default[i].gpufreq_idx = freqs[i].gpufreq_idx;
+
+		gpufreq_dbg("freqs[%d].gpufreq_khz = %u\n", i, freqs[i].gpufreq_khz);
+		gpufreq_dbg("freqs[%d].gpufreq_volt = %u\n", i, freqs[i].gpufreq_volt);
+		gpufreq_dbg("freqs[%d].gpufreq_idx = %u\n", i, freqs[i].gpufreq_idx);
+	}
+
+	mt_gpufreqs_num = num;
+
+	g_limited_max_id = 0;
+	g_limited_min_id = mt_gpufreqs_num - 1;
+
+	gpufreq_info("@%s: g_cur_gpu_freq = %d, g_cur_gpu_volt = %d\n", __func__, g_cur_gpu_freq,
+			g_cur_gpu_volt);
+
+	mt_setup_gpufreqs_power_table(num);
+
+	return 0;
+}
+
+/**************************************
+ * check if maximum frequency is needed
+ ***************************************/
+static int mt_gpufreq_keep_max_freq(unsigned int freq_old, unsigned int freq_new)
+{
+	if (mt_gpufreq_keep_max_frequency_state == true)
+		return 1;
+
+	return 0;
+}
+
+/*****************************
+ * set GPU DVFS status
+ ******************************/
+int mt_gpufreq_state_set(int enabled)
+{
+	if (enabled) {
+		if (!mt_gpufreq_pause) {
+			gpufreq_dbg("gpufreq already enabled\n");
+			return 0;
+		}
+
+		/*****************
+		 * enable GPU DVFS
+		 ******************/
+		g_gpufreq_dvfs_disable_count--;
+		gpufreq_dbg("enable GPU DVFS: g_gpufreq_dvfs_disable_count = %d\n",
+				g_gpufreq_dvfs_disable_count);
+
+		/***********************************************
+		 * enable DVFS if no any module still disable it
+		 ************************************************/
+		if (g_gpufreq_dvfs_disable_count <= 0)
+			mt_gpufreq_pause = false;
+		else
+			gpufreq_warn("someone still disable gpufreq, cannot enable it\n");
+	} else {
+		/******************
+		 * disable GPU DVFS
+		 *******************/
+		g_gpufreq_dvfs_disable_count++;
+		gpufreq_dbg("disable GPU DVFS: g_gpufreq_dvfs_disable_count = %d\n",
+				g_gpufreq_dvfs_disable_count);
+
+		if (mt_gpufreq_pause) {
+			gpufreq_dbg("gpufreq already disabled\n");
+			return 0;
+		}
+
+		mt_gpufreq_pause = true;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mt_gpufreq_state_set);
+static unsigned int mt_gpufreq_dds_calc(unsigned int freq_khz, enum post_div_order_enum post_div_order)
+{
+	unsigned int dds = 0;
+
+	dds = (((freq_khz * 4) / 1000) * 0x4000) / 26;
+
+	gpufreq_dbg("@%s: request freq = %d, div_order = %d, dds = %x\n",
+			__func__, freq_khz, post_div_order, dds);
+
+	return dds;
+}
+
+
+static void mt_gpufreq_clock_switch(unsigned int freq_new)
+{
+	unsigned int dds;
+
+	if (freq_new == g_cur_gpu_freq)
+		return;
+
+	dds = mt_gpufreq_dds_calc(freq_new, POST_DIV4);
+
+#ifdef CONFIG_MTK_FREQ_HOPPING
+	mt_dfs_mmpll(dds);
+#endif
+
+	g_cur_gpu_freq = freq_new;
+
+	if (g_pFreqSampler != NULL)
+		g_pFreqSampler(freq_new);
+
+	gpufreq_dbg("mt_gpu_clock_switch, freq_new = %d (KHz)\n", freq_new);
+
+	if (g_pFreqSampler != NULL)
+		g_pFreqSampler(freq_new);
+}
+
+static void mt_gpufreq_volt_switch(unsigned int volt_old, unsigned int volt_new)
+{
+	/* unsigned int delay_unit_us = 100; */
+
+	gpufreq_dbg("@%s: volt_new = %d\n", __func__, volt_new);
+
+/* #ifdef VGPU_SET_BY_PMIC */
+#if 0
+	if (volt_new > volt_old)
+		regulator_set_voltage(mt_gpufreq_pmic->reg_vgpu,
+					volt_new*10, (PMIC_MAX_VGPU*10) + 125);
+	else
+		regulator_set_voltage(mt_gpufreq_pmic->reg_vgpu,
+					volt_new*10, volt_old*10);
+	udelay(delay_unit_us);
+#endif
+	if (g_pVoltSampler != NULL)
+		g_pVoltSampler(volt_new);
+}
+
+static unsigned int _mt_gpufreq_get_cur_freq(void)
+{
+	return g_cur_gpu_freq;
+}
+
+static unsigned int _mt_gpufreq_get_cur_volt(void)
+{
+	unsigned int gpu_volt = 0;
+#if defined(VGPU_SET_BY_PMIC)
+	/* WARRNING: regulator_get_voltage prints uV */
+	gpu_volt = regulator_get_voltage(mt_gpufreq_pmic->reg_vgpu) / 10;
+	gpufreq_dbg("gpu_dvfs_get_cur_volt:[PMIC] volt = %d\n", gpu_volt);
+#else
+	gpufreq_dbg("gpu_dvfs_get_cur_volt:[WARN]  no tran value\n");
+#endif
+
+	return gpu_volt;
+}
+
+static void _mt_gpufreq_kick_pbm(int enable)
+{
+#ifndef DISABLE_PBM_FEATURE
+	int i;
+	int tmp_idx = -1;
+	unsigned int found = 0;
+	unsigned int power;
+	unsigned int cur_volt = _mt_gpufreq_get_cur_volt();
+	unsigned int cur_freq = _mt_gpufreq_get_cur_freq();
+
+	if (enable) {
+		for (i = 0; i < mt_gpufreqs_num; i++) {
+			if (mt_gpufreqs_power[i].gpufreq_khz == cur_freq) {
+				/* record idx since current voltage may not in DVFS table */
+				tmp_idx = i;
+
+				if (mt_gpufreqs_power[i].gpufreq_volt == cur_volt) {
+					power = mt_gpufreqs_power[i].gpufreq_power;
+					found = 1;
+					kicker_pbm_by_gpu(true, power, cur_volt / 100);
+					gpufreq_dbg
+						("@%s: request GPU power = %d, cur_volt = %d, cur_freq = %d\n",
+						 __func__, power, cur_volt / 100, cur_freq);
+					return;
+				}
+			}
+		}
+
+		if (!found) {
+			gpufreq_dbg("@%s: tmp_idx = %d\n", __func__, tmp_idx);
+
+			if (tmp_idx != -1 && tmp_idx < mt_gpufreqs_num) {
+				/* use freq to found corresponding power budget */
+				power = mt_gpufreqs_power[tmp_idx].gpufreq_power;
+				kicker_pbm_by_gpu(true, power, cur_volt / 100);
+				gpufreq_dbg
+					("@%s: request GPU power = %d, cur_volt = %d, cur_freq = %d\n",
+					 __func__, power, cur_volt / 100, cur_freq);
+			} else {
+				gpufreq_warn("@%s: Cannot found request power in power table!\n",
+						 __func__);
+				gpufreq_warn("cur_freq = %dKHz, cur_volt = %dmV\n", cur_freq,
+						 cur_volt / 100);
+			}
+		}
+	} else {
+		kicker_pbm_by_gpu(false, 0, cur_volt / 100);
+	}
+#endif
+}
+
+/*****************************************
+ * frequency ramp up and ramp down handler
+ ******************************************/
+/***********************************************************
+ * [note]
+ * 1. frequency ramp up need to wait voltage settle
+ * 2. frequency ramp down do not need to wait voltage settle
+ ************************************************************/
+static void mt_gpufreq_set(unsigned int freq_old, unsigned int freq_new,
+			   unsigned int volt_old, unsigned int volt_new)
+{
+	pr_debug("[ERROR] mt_gpufreq_set set to old volt=%x, new volt=%x", volt_old, volt_new);
+	pr_debug("[ERROR] mt_gpufreq_set set to old freq=%x, new freq=%x", freq_old, freq_new);
+
+	if (freq_new > freq_old) {
+		/* if(volt_old != volt_new) // ??? */
+		/* { */
+		mt_gpufreq_volt_switch(volt_old, volt_new);
+		/* } */
+
+		mt_gpufreq_clock_switch(freq_new);
+	} else {
+		mt_gpufreq_clock_switch(freq_new);
+
+		/* if(volt_old != volt_new) */
+		/* { */
+		mt_gpufreq_volt_switch(volt_old, volt_new);
+		/* } */
+	}
+
+	g_cur_gpu_freq = freq_new;
+	g_cur_gpu_volt = volt_new;
+
+	_mt_gpufreq_kick_pbm(1);
+}
+
+/**********************************
+ * gpufreq target callback function
+ ***********************************/
+/*************************************************
+ * [note]
+ * 1. handle frequency change request
+ * 2. call mt_gpufreq_set to set target frequency
+ **************************************************/
+unsigned int mt_gpufreq_target(unsigned int idx)
+{
+	/* unsigned long flags; */
+	unsigned int target_freq, target_volt, target_idx, target_OPPidx;
+
+#ifdef MT_GPUFREQ_PERFORMANCE_TEST
+	return 0;
+#endif
+
+	mutex_lock(&mt_gpufreq_lock);
+
+	if (mt_gpufreq_pause == true) {
+		gpufreq_warn("GPU DVFS pause!\n");
+		mutex_unlock(&mt_gpufreq_lock);
+		return DRIVER_NOT_READY;
+	}
+
+	if (mt_gpufreq_ready == false) {
+		gpufreq_warn("GPU DVFS not ready!\n");
+		mutex_unlock(&mt_gpufreq_lock);
+		return DRIVER_NOT_READY;
+	}
+
+	if (mt_gpufreq_volt_enable_state == 0) {
+		gpufreq_dbg("mt_gpufreq_volt_enable_state == 0! return\n");
+		mutex_unlock(&mt_gpufreq_lock);
+		return DRIVER_NOT_READY;
+	}
+#ifdef MT_GPU_DVFS_RANDOM_TEST
+	idx = mt_gpufreq_idx_get(5);
+	gpufreq_dbg("@%s: random test index is %d !\n", __func__, idx);
+#endif
+
+	if (idx > (mt_gpufreqs_num - 1)) {
+		mutex_unlock(&mt_gpufreq_lock);
+		gpufreq_err("@%s: idx out of range! idx = %d\n", __func__, idx);
+		return -1;
+	}
+
+	/**********************************
+	 * look up for the target GPU OPP
+	 ***********************************/
+	target_freq = mt_gpufreqs[idx].gpufreq_khz;
+	target_volt = mt_gpufreqs[idx].gpufreq_volt;
+	target_idx = mt_gpufreqs[idx].gpufreq_idx;
+	target_OPPidx = idx;
+
+	gpufreq_dbg("@%s: begin, receive freq: %d, OPPidx: %d\n", __func__, target_freq,
+			target_OPPidx);
+
+	/**********************************
+	 * Check if need to keep max frequency
+	 ***********************************/
+	if (mt_gpufreq_keep_max_freq(g_cur_gpu_freq, target_freq)) {
+		target_freq = mt_gpufreqs[g_gpufreq_max_id].gpufreq_khz;
+		target_volt = mt_gpufreqs[g_gpufreq_max_id].gpufreq_volt;
+		target_idx = mt_gpufreqs[g_gpufreq_max_id].gpufreq_idx;
+		target_OPPidx = g_gpufreq_max_id;
+		gpufreq_dbg("Keep MAX frequency %d !\n", target_freq);
+	}
+
+	/************************************************
+	 * If /proc command keep opp frequency.
+	 *************************************************/
+	if (mt_gpufreq_keep_opp_frequency_state == true) {
+		target_freq = mt_gpufreqs[mt_gpufreq_keep_opp_index].gpufreq_khz;
+		target_volt = mt_gpufreqs[mt_gpufreq_keep_opp_index].gpufreq_volt;
+		target_idx = mt_gpufreqs[mt_gpufreq_keep_opp_index].gpufreq_idx;
+		target_OPPidx = mt_gpufreq_keep_opp_index;
+		gpufreq_dbg("Keep opp! opp frequency %d, opp voltage %d, opp idx %d\n", target_freq,
+				target_volt, target_OPPidx);
+	}
+
+	/************************************************
+	 * If /proc command fix the frequency.
+	 *************************************************/
+	if (mt_gpufreq_fixed_freq_volt_state == true) {
+		target_freq = mt_gpufreq_fixed_frequency;
+		target_volt = mt_gpufreq_fixed_voltage;
+		target_idx = 0;
+		target_OPPidx = 0;
+		gpufreq_dbg("Fixed! fixed frequency %d, fixed voltage %d\n", target_freq,
+				target_volt);
+	}
+
+	/************************************************
+	 * If /proc command keep opp max frequency.
+	 *************************************************/
+	if (mt_gpufreq_opp_max_frequency_state == true) {
+		if (target_freq > mt_gpufreq_opp_max_frequency) {
+			target_freq = mt_gpufreqs[mt_gpufreq_opp_max_index].gpufreq_khz;
+			target_volt = mt_gpufreqs[mt_gpufreq_opp_max_index].gpufreq_volt;
+			target_idx = mt_gpufreqs[mt_gpufreq_opp_max_index].gpufreq_idx;
+			target_OPPidx = mt_gpufreq_opp_max_index;
+
+			gpufreq_dbg
+				("opp max freq! opp max frequency %d, opp max voltage %d, opp max idx %d\n",
+				 target_freq, target_volt, target_OPPidx);
+		}
+	}
+
+	/************************************************
+	 * PBM limit
+	 *************************************************/
+#ifndef DISABLE_PBM_FEATURE
+	if (mt_gpufreq_pbm_limited_index != 0) {
+		if (target_freq > mt_gpufreqs[mt_gpufreq_pbm_limited_index].gpufreq_khz) {
+			/*********************************************
+			* target_freq > limited_freq, need to adjust
+			**********************************************/
+			target_freq = mt_gpufreqs[mt_gpufreq_pbm_limited_index].gpufreq_khz;
+			target_volt = mt_gpufreqs[mt_gpufreq_pbm_limited_index].gpufreq_volt;
+			target_OPPidx = mt_gpufreq_pbm_limited_index;
+			gpufreq_dbg("Limit! Thermal/Power limit gpu frequency %d\n",
+					mt_gpufreqs[mt_gpufreq_pbm_limited_index].gpufreq_khz);
+		}
+	}
+#endif
+
+	/************************************************
+	 * Thermal/Power limit
+	 *************************************************/
+	if (g_limited_max_id != 0) {
+		if (target_freq > mt_gpufreqs[g_limited_max_id].gpufreq_khz) {
+			/*********************************************
+			 * target_freq > limited_freq, need to adjust
+			 **********************************************/
+			target_freq = mt_gpufreqs[g_limited_max_id].gpufreq_khz;
+			target_volt = mt_gpufreqs[g_limited_max_id].gpufreq_volt;
+			target_idx = mt_gpufreqs[g_limited_max_id].gpufreq_idx;
+			target_OPPidx = g_limited_max_id;
+			gpufreq_info("Limit! Thermal/Power limit gpu frequency %d\n",
+					 mt_gpufreqs[g_limited_max_id].gpufreq_khz);
+		}
+	}
+
+	/************************************************
+	 * DVFS keep at max freq when PTPOD initial
+	 *************************************************/
+	if (mt_gpufreq_ptpod_disable == true) {
+#if 1
+		target_freq = mt_gpufreqs[mt_gpufreq_ptpod_disable_idx].gpufreq_khz;
+		target_volt = GPU_DVFS_PTPOD_DISABLE_VOLT;
+		target_idx = mt_gpufreqs[mt_gpufreq_ptpod_disable_idx].gpufreq_idx;
+		target_OPPidx = mt_gpufreq_ptpod_disable_idx;
+		gpufreq_dbg("PTPOD disable dvfs, mt_gpufreq_ptpod_disable_idx = %d\n",
+				mt_gpufreq_ptpod_disable_idx);
+#else
+		mutex_unlock(&mt_gpufreq_lock);
+		gpufreq_dbg("PTPOD disable dvfs, return\n");
+		return 0;
+#endif
+	}
+
+	/************************************************
+	 * target frequency == current frequency, skip it
+	 *************************************************/
+	if (g_cur_gpu_freq == target_freq && g_cur_gpu_volt == target_volt) {
+		mutex_unlock(&mt_gpufreq_lock);
+		gpufreq_dbg("GPU frequency from %d KHz to %d KHz (skipped) due to same frequency\n",
+				g_cur_gpu_freq, target_freq);
+		return 0;
+	}
+
+	gpufreq_dbg("GPU current frequency %d KHz, target frequency %d KHz\n", g_cur_gpu_freq,
+			target_freq);
+
+#ifdef MT_GPUFREQ_AEE_RR_REC
+	aee_rr_rec_gpu_dvfs_status(aee_rr_curr_gpu_dvfs_status() | (1 << GPU_DVFS_IS_DOING_DVFS));
+	aee_rr_rec_gpu_dvfs_oppidx(target_OPPidx);
+#endif
+
+	/******************************
+	 * set to the target frequency
+	 *******************************/
+	mt_gpufreq_set(g_cur_gpu_freq, target_freq, g_cur_gpu_volt, target_volt);
+
+	g_cur_gpu_idx = target_idx;
+	g_cur_gpu_OPPidx = target_OPPidx;
+
+#ifdef MT_GPUFREQ_AEE_RR_REC
+	aee_rr_rec_gpu_dvfs_status(aee_rr_curr_gpu_dvfs_status() & ~(1 << GPU_DVFS_IS_DOING_DVFS));
+#endif
+
+	mutex_unlock(&mt_gpufreq_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(mt_gpufreq_target);
+
+
+/********************************************
+ *	   POWER LIMIT RELATED
+ ********************************************/
+enum {
+	IDX_THERMAL_LIMITED,
+	IDX_LOW_BATT_VOLT_LIMITED,
+	IDX_LOW_BATT_VOLUME_LIMITED,
+	IDX_OC_LIMITED,
+
+	NR_IDX_POWER_LIMITED,
+};
+
+/* NO need to throttle when OC */
+#ifdef MT_GPUFREQ_OC_PROTECT
+static unsigned int mt_gpufreq_oc_level;
+
+#define MT_GPUFREQ_OC_LIMIT_FREQ_1	 GPU_DVFS_FREQ4	/* no need to throttle when OC */
+static unsigned int mt_gpufreq_oc_limited_index_0;	/* unlimit frequency, index = 0. */
+static unsigned int mt_gpufreq_oc_limited_index_1;
+static unsigned int mt_gpufreq_oc_limited_index;	/* Limited frequency index for oc */
+#endif
+
+#ifdef MT_GPUFREQ_LOW_BATT_VOLUME_PROTECT
+static unsigned int mt_gpufreq_low_battery_volume;
+
+#define MT_GPUFREQ_LOW_BATT_VOLUME_LIMIT_FREQ_1	 GPU_DVFS_FREQ0
+static unsigned int mt_gpufreq_low_bat_volume_limited_index_0;	/* unlimit frequency, index = 0. */
+static unsigned int mt_gpufreq_low_bat_volume_limited_index_1;
+static unsigned int mt_gpufreq_low_batt_volume_limited_index;	/* Limited frequency index for low battery volume */
+#endif
+
+#ifdef MT_GPUFREQ_LOW_BATT_VOLT_PROTECT
+static unsigned int mt_gpufreq_low_battery_level;
+
+#define MT_GPUFREQ_LOW_BATT_VOLT_LIMIT_FREQ_1	 GPU_DVFS_FREQ0	/* no need to throttle when LV1 */
+#define MT_GPUFREQ_LOW_BATT_VOLT_LIMIT_FREQ_2	 GPU_DVFS_FREQ4
+static unsigned int mt_gpufreq_low_bat_volt_limited_index_0;	/* unlimit frequency, index = 0. */
+static unsigned int mt_gpufreq_low_bat_volt_limited_index_1;
+static unsigned int mt_gpufreq_low_bat_volt_limited_index_2;
+static unsigned int mt_gpufreq_low_batt_volt_limited_index;	/* Limited frequency index for low battery voltage */
+#endif
+
+static unsigned int mt_gpufreq_thermal_limited_gpu_power;	/* thermal limit power */
+static unsigned int mt_gpufreq_prev_thermal_limited_freq;	/* thermal limited freq */
+/* limit frequency index array */
+static unsigned int mt_gpufreq_power_limited_index_array[NR_IDX_POWER_LIMITED] = { 0 };
+
+/************************************************
+ * frequency adjust interface for thermal protect
+ *************************************************/
+/******************************************************
+ * parameter: target power
+ *******************************************************/
+static int mt_gpufreq_power_throttle_protect(void)
+{
+	int ret = 0;
+	int i = 0;
+	unsigned int limited_index = 0;
+
+	/* Check lowest frequency in all limitation */
+	for (i = 0; i < NR_IDX_POWER_LIMITED; i++) {
+		if (mt_gpufreq_power_limited_index_array[i] != 0 && limited_index == 0)
+			limited_index = mt_gpufreq_power_limited_index_array[i];
+		else if (mt_gpufreq_power_limited_index_array[i] != 0 && limited_index != 0) {
+			if (mt_gpufreq_power_limited_index_array[i] > limited_index)
+				limited_index = mt_gpufreq_power_limited_index_array[i];
+		}
+	}
+
+	g_limited_max_id = limited_index;
+
+	if (g_pGpufreq_power_limit_notify != NULL)
+		g_pGpufreq_power_limit_notify(g_limited_max_id);
+
+	return ret;
+}
+
+#ifdef MT_GPUFREQ_OC_PROTECT
+/************************************************
+ * GPU frequency adjust interface for oc protect
+ *************************************************/
+static void mt_gpufreq_oc_protect(unsigned int limited_index)
+{
+	mutex_lock(&mt_gpufreq_power_lock);
+
+	gpufreq_dbg("@%s: limited_index = %d\n", __func__, limited_index);
+
+	mt_gpufreq_power_limited_index_array[IDX_OC_LIMITED] = limited_index;
+	mt_gpufreq_power_throttle_protect();
+
+	mutex_unlock(&mt_gpufreq_power_lock);
+}
+
+void mt_gpufreq_oc_callback(enum BATTERY_OC_LEVEL oc_level)
+{
+	gpufreq_dbg("@%s: oc_level = %d\n", __func__, oc_level);
+
+	if (mt_gpufreq_ready == false) {
+		gpufreq_warn("@%s: GPU DVFS not ready!\n", __func__);
+		return;
+	}
+
+	if (g_limited_oc_ignore_state == true) {
+		gpufreq_info("@%s: g_limited_oc_ignore_state == true!\n", __func__);
+		return;
+	}
+
+	mt_gpufreq_oc_level = oc_level;
+
+	/* BATTERY_OC_LEVEL_1: >= 5.5A  */
+	if (oc_level == BATTERY_OC_LEVEL_1) {
+		if (mt_gpufreq_oc_limited_index != mt_gpufreq_oc_limited_index_1) {
+			mt_gpufreq_oc_limited_index = mt_gpufreq_oc_limited_index_1;
+			mt_gpufreq_oc_protect(mt_gpufreq_oc_limited_index_1);	/* Limit GPU 396.5Mhz */
+		}
+	}
+	/* unlimit gpu */
+	else {
+		if (mt_gpufreq_oc_limited_index != mt_gpufreq_oc_limited_index_0) {
+			mt_gpufreq_oc_limited_index = mt_gpufreq_oc_limited_index_0;
+			mt_gpufreq_oc_protect(mt_gpufreq_oc_limited_index_0);	/* Unlimit */
+		}
+	}
+}
+#endif
+
+#ifdef MT_GPUFREQ_LOW_BATT_VOLUME_PROTECT
+/************************************************
+ * GPU frequency adjust interface for low bat_volume protect
+ *************************************************/
+static void mt_gpufreq_low_batt_volume_protect(unsigned int limited_index)
+{
+	mutex_lock(&mt_gpufreq_power_lock);
+
+	gpufreq_dbg("@%s: limited_index = %d\n", __func__, limited_index);
+
+	mt_gpufreq_power_limited_index_array[IDX_LOW_BATT_VOLUME_LIMITED] = limited_index;
+	mt_gpufreq_power_throttle_protect();
+
+	mutex_unlock(&mt_gpufreq_power_lock);
+}
+
+void mt_gpufreq_low_batt_volume_callback(enum BATTERY_PERCENT_LEVEL low_battery_volume)
+{
+	gpufreq_dbg("@%s: low_battery_volume = %d\n", __func__, low_battery_volume);
+
+	if (mt_gpufreq_ready == false) {
+		gpufreq_warn("@%s: GPU DVFS not ready!\n", __func__);
+		return;
+	}
+
+	if (g_limited_low_batt_volume_ignore_state == true) {
+		gpufreq_info("@%s: g_limited_low_batt_volume_ignore_state == true!\n", __func__);
+		return;
+	}
+
+	mt_gpufreq_low_battery_volume = low_battery_volume;
+
+	/* LOW_BATTERY_VOLUME_1: <= 15%, LOW_BATTERY_VOLUME_0: >15% */
+	if (low_battery_volume == BATTERY_PERCENT_LEVEL_1) {
+		if (mt_gpufreq_low_batt_volume_limited_index !=
+			mt_gpufreq_low_bat_volume_limited_index_1) {
+			mt_gpufreq_low_batt_volume_limited_index =
+				mt_gpufreq_low_bat_volume_limited_index_1;
+
+			/* Unlimited */
+			mt_gpufreq_low_batt_volume_protect(mt_gpufreq_low_bat_volume_limited_index_1);
+		}
+	}
+	/* unlimit gpu */
+	else {
+		if (mt_gpufreq_low_batt_volume_limited_index !=
+			mt_gpufreq_low_bat_volume_limited_index_0) {
+			mt_gpufreq_low_batt_volume_limited_index =
+				mt_gpufreq_low_bat_volume_limited_index_0;
+			mt_gpufreq_low_batt_volume_protect(mt_gpufreq_low_bat_volume_limited_index_0);	/* Unlimit */
+		}
+	}
+}
+#endif
+
+
+#ifdef MT_GPUFREQ_LOW_BATT_VOLT_PROTECT
+/************************************************
+ * GPU frequency adjust interface for low bat_volt protect
+ *************************************************/
+static void mt_gpufreq_low_batt_volt_protect(unsigned int limited_index)
+{
+	mutex_lock(&mt_gpufreq_power_lock);
+
+	gpufreq_dbg("@%s: limited_index = %d\n", __func__, limited_index);
+	mt_gpufreq_power_limited_index_array[IDX_LOW_BATT_VOLT_LIMITED] = limited_index;
+	mt_gpufreq_power_throttle_protect();
+
+	mutex_unlock(&mt_gpufreq_power_lock);
+}
+
+/******************************************************
+ * parameter: low_battery_level
+ *******************************************************/
+void mt_gpufreq_low_batt_volt_callback(enum LOW_BATTERY_LEVEL low_battery_level)
+{
+	gpufreq_dbg("@%s: low_battery_level = %d\n", __func__, low_battery_level);
+
+	if (mt_gpufreq_ready == false) {
+		gpufreq_warn("@%s: GPU DVFS not ready!\n", __func__);
+		return;
+	}
+
+	if (g_limited_low_batt_volt_ignore_state == true) {
+		gpufreq_info("@%s: g_limited_low_batt_volt_ignore_state == true!\n", __func__);
+		return;
+	}
+
+	mt_gpufreq_low_battery_level = low_battery_level;
+
+	/* is_low_battery=1:need limit HW, is_low_battery=0:no limit */
+	/* 3.25V HW issue int and is_low_battery=1,
+	 * 3.0V HW issue int and is_low_battery=2,
+	 * 3.5V HW issue int and is_low_battery=0
+	 */
+
+	/* no need to throttle when LV1 */
+#if 0
+	if (low_battery_level == LOW_BATTERY_LEVEL_1) {
+		if (mt_gpufreq_low_batt_volt_limited_index !=
+			mt_gpufreq_low_bat_volt_limited_index_1) {
+			mt_gpufreq_low_batt_volt_limited_index =
+				mt_gpufreq_low_bat_volt_limited_index_1;
+			/* Limit GPU 416Mhz */
+			mt_gpufreq_low_batt_volt_protect(mt_gpufreq_low_bat_volt_limited_index_1);
+		}
+	} else
+#endif
+
+	if (low_battery_level == LOW_BATTERY_LEVEL_2) {
+		if (mt_gpufreq_low_batt_volt_limited_index !=
+			mt_gpufreq_low_bat_volt_limited_index_2) {
+			mt_gpufreq_low_batt_volt_limited_index =
+				mt_gpufreq_low_bat_volt_limited_index_2;
+			/* Limit GPU 400Mhz */
+			mt_gpufreq_low_batt_volt_protect(mt_gpufreq_low_bat_volt_limited_index_2);
+		}
+	} else {		/* unlimit gpu */
+		if (mt_gpufreq_low_batt_volt_limited_index !=
+			mt_gpufreq_low_bat_volt_limited_index_0) {
+			mt_gpufreq_low_batt_volt_limited_index =
+				mt_gpufreq_low_bat_volt_limited_index_0;
+			/* Unlimit */
+			mt_gpufreq_low_batt_volt_protect(mt_gpufreq_low_bat_volt_limited_index_0);
+		}
+	}
+}
+#endif
+
+/************************************************
+ * frequency adjust interface for thermal protect
+ *************************************************/
+/******************************************************
+ * parameter: target power
+ *******************************************************/
+static unsigned int _mt_gpufreq_get_limited_freq(unsigned int limited_power)
+{
+	int i = 0;
+	unsigned int limited_freq = 0;
+	unsigned int found = 0;
+
+	for (i = 0; i < mt_gpufreqs_num; i++) {
+		if (mt_gpufreqs_power[i].gpufreq_power <= limited_power) {
+			limited_freq = mt_gpufreqs_power[i].gpufreq_khz;
+			found = 1;
+			break;
+		}
+	}
+
+	/* not found */
+	if (!found)
+		limited_freq = mt_gpufreqs_power[mt_gpufreqs_num - 1].gpufreq_khz;
+
+	gpufreq_dbg("@%s: limited_freq = %d\n", __func__, limited_freq);
+
+	return limited_freq;
+}
+
+void mt_gpufreq_thermal_protect(unsigned int limited_power)
+{
+	int i = 0;
+	unsigned int limited_freq = 0;
+
+	mutex_lock(&mt_gpufreq_power_lock);
+
+	if (mt_gpufreq_ready == false) {
+		gpufreq_warn("@%s: GPU DVFS not ready!\n", __func__);
+		mutex_unlock(&mt_gpufreq_power_lock);
+		return;
+	}
+
+	if (mt_gpufreqs_num == 0) {
+		gpufreq_warn("@%s: mt_gpufreqs_num == 0!\n", __func__);
+		mutex_unlock(&mt_gpufreq_power_lock);
+		return;
+	}
+
+	if (g_limited_thermal_ignore_state == true) {
+		gpufreq_info("@%s: g_limited_thermal_ignore_state == true!\n", __func__);
+		mutex_unlock(&mt_gpufreq_power_lock);
+		return;
+	}
+
+	mt_gpufreq_thermal_limited_gpu_power = limited_power;
+
+#ifdef MT_GPUFREQ_DYNAMIC_POWER_TABLE_UPDATE
+	mt_update_gpufreqs_power_table();
+#endif
+
+	if (limited_power == 0)
+		mt_gpufreq_power_limited_index_array[IDX_THERMAL_LIMITED] = 0;
+	else {
+		limited_freq = _mt_gpufreq_get_limited_freq(limited_power);
+
+		for (i = 0; i < mt_gpufreqs_num; i++) {
+			if (mt_gpufreqs[i].gpufreq_khz <= limited_freq) {
+				mt_gpufreq_power_limited_index_array[IDX_THERMAL_LIMITED] = i;
+				break;
+			}
+		}
+	}
+
+	if (mt_gpufreq_prev_thermal_limited_freq != limited_freq) {
+		mt_gpufreq_prev_thermal_limited_freq = limited_freq;
+		mt_gpufreq_power_throttle_protect();
+		if (limited_freq < GPU_DVFS_FREQ5)
+			gpufreq_info("@%s: p %u f %u i %u\n", __func__, limited_power, limited_freq,
+				mt_gpufreq_power_limited_index_array[IDX_THERMAL_LIMITED]);
+	}
+
+	mutex_unlock(&mt_gpufreq_power_lock);
+}
+EXPORT_SYMBOL(mt_gpufreq_thermal_protect);
+
+/* for thermal to update power budget */
+unsigned int mt_gpufreq_get_max_power(void)
+{
+	if (!mt_gpufreqs_power)
+		return 0;
+	else
+		return mt_gpufreqs_power[0].gpufreq_power;
+}
+
+/* for thermal to update power budget */
+unsigned int mt_gpufreq_get_min_power(void)
+{
+	if (!mt_gpufreqs_power)
+		return 0;
+	else
+		return mt_gpufreqs_power[mt_gpufreqs_num - 1].gpufreq_power;
+}
+
+void mt_gpufreq_set_power_limit_by_pbm(unsigned int limited_power)
+{
+#ifndef DISABLE_PBM_FEATURE
+	int i = 0;
+	unsigned int limited_freq = 0;
+
+	mutex_lock(&mt_gpufreq_power_lock);
+
+	if (mt_gpufreq_ready == false) {
+		gpufreq_warn("@%s: GPU DVFS not ready!\n", __func__);
+		mutex_unlock(&mt_gpufreq_power_lock);
+		return;
+	}
+
+	if (mt_gpufreqs_num == 0) {
+		gpufreq_warn("@%s: mt_gpufreqs_num == 0!\n", __func__);
+		mutex_unlock(&mt_gpufreq_power_lock);
+		return;
+	}
+
+	if (g_limited_pbm_ignore_state == true) {
+		gpufreq_info("@%s: g_limited_pbm_ignore_state == true!\n", __func__);
+		mutex_unlock(&mt_gpufreq_power_lock);
+		return;
+	}
+
+	if (limited_power == mt_gpufreq_pbm_limited_gpu_power) {
+		gpufreq_dbg("@%s: limited_power(%d mW) not changed, skip it!\n",
+				__func__, limited_power);
+		mutex_unlock(&mt_gpufreq_power_lock);
+		return;
+	}
+
+	mt_gpufreq_pbm_limited_gpu_power = limited_power;
+
+	gpufreq_dbg("@%s: limited_power = %d\n", __func__, limited_power);
+
+#ifdef MT_GPUFREQ_DYNAMIC_POWER_TABLE_UPDATE
+	mt_update_gpufreqs_power_table();	/* TODO: need to check overhead? */
+#endif
+
+	if (limited_power == 0)
+		mt_gpufreq_pbm_limited_index = 0;
+	else {
+		limited_freq = _mt_gpufreq_get_limited_freq(limited_power);
+
+		for (i = 0; i < mt_gpufreqs_num; i++) {
+			if (mt_gpufreqs[i].gpufreq_khz <= limited_freq) {
+				mt_gpufreq_pbm_limited_index = i;
+				break;
+			}
+		}
+	}
+
+	gpufreq_dbg("PBM limit frequency upper bound to id = %d\n", mt_gpufreq_pbm_limited_index);
+
+	if (g_pGpufreq_power_limit_notify != NULL)
+		g_pGpufreq_power_limit_notify(mt_gpufreq_pbm_limited_index);
+
+	mutex_unlock(&mt_gpufreq_power_lock);
+#endif
+}
+
+#if 0  // pedro
+unsigned int mt_gpufreq_get_leakage_mw(void)
+{
+#ifndef DISABLE_PBM_FEATURE
+	int temp = 0;
+#ifdef STATIC_PWR_READY2USE
+	unsigned int cur_vcore = _mt_gpufreq_get_cur_volt() / 100;
+	int leak_power;
+#endif
+
+#ifdef CONFIG_THERMAL
+	temp = get_immediate_gpu_wrap() / 1000;
+#else
+	temp = 40;
+#endif
+
+#ifdef STATIC_PWR_READY2USE
+	leak_power = mt_spower_get_leakage(MT_SPOWER_GPU, cur_vcore, temp);
+	if (mt_gpufreq_volt_enable_state && leak_power > 0)
+		return leak_power;
+	else
+		return 0;
+#else
+	return 130;
+#endif
+
+#else /* DISABLE_PBM_FEATURE */
+	return 0;
+#endif
+}
+#endif
+
+/************************************************
+ * return current GPU thermal limit index
+ *************************************************/
+unsigned int mt_gpufreq_get_thermal_limit_index(void)
+{
+	gpufreq_dbg("current GPU thermal limit index is %d\n", g_limited_max_id);
+	return g_limited_max_id;
+}
+EXPORT_SYMBOL(mt_gpufreq_get_thermal_limit_index);
+
+/************************************************
+ * return current GPU thermal limit frequency
+ *************************************************/
+unsigned int mt_gpufreq_get_thermal_limit_freq(void)
+{
+	gpufreq_dbg("current GPU thermal limit freq is %d MHz\n",
+			mt_gpufreqs[g_limited_max_id].gpufreq_khz / 1000);
+	return mt_gpufreqs[g_limited_max_id].gpufreq_khz;
+}
+EXPORT_SYMBOL(mt_gpufreq_get_thermal_limit_freq);
+
+/************************************************
+ * return current GPU frequency index
+ *************************************************/
+unsigned int mt_gpufreq_get_cur_freq_index(void)
+{
+	gpufreq_dbg("current GPU frequency OPP index is %d\n", g_cur_gpu_OPPidx);
+	return g_cur_gpu_OPPidx;
+}
+EXPORT_SYMBOL(mt_gpufreq_get_cur_freq_index);
+
+/************************************************
+ * return current GPU frequency
+ *************************************************/
+unsigned int mt_gpufreq_get_cur_freq(void)
+{
+#ifdef MTK_GPU_SPM
+	return _mt_gpufreq_get_cur_freq();
+#else
+	gpufreq_dbg("current GPU frequency is %d MHz\n", g_cur_gpu_freq / 1000);
+	return g_cur_gpu_freq;
+#endif
+}
+EXPORT_SYMBOL(mt_gpufreq_get_cur_freq);
+
+/************************************************
+ * return current GPU voltage
+ *************************************************/
+unsigned int mt_gpufreq_get_cur_volt(void)
+{
+#if 0
+	return g_cur_gpu_volt;
+#else
+	return _mt_gpufreq_get_cur_volt();
+#endif
+}
+EXPORT_SYMBOL(mt_gpufreq_get_cur_volt);
+
+/************************************************
+ * register / unregister GPU input boost notifiction CB
+ *************************************************/
+void mt_gpufreq_input_boost_notify_registerCB(gpufreq_input_boost_notify pCB)
+{
+#ifdef MT_GPUFREQ_INPUT_BOOST
+	g_pGpufreq_input_boost_notify = pCB;
+#endif
+}
+EXPORT_SYMBOL(mt_gpufreq_input_boost_notify_registerCB);
+
+/************************************************
+ * register / unregister GPU power limit notifiction CB
+ *************************************************/
+void mt_gpufreq_power_limit_notify_registerCB(gpufreq_power_limit_notify pCB)
+{
+	g_pGpufreq_power_limit_notify = pCB;
+}
+EXPORT_SYMBOL(mt_gpufreq_power_limit_notify_registerCB);
+
+/************************************************
+ * register / unregister ptpod update GPU volt CB
+ *************************************************/
+void mt_gpufreq_update_volt_registerCB(gpufreq_ptpod_update_notify pCB)
+{
+	g_pGpufreq_ptpod_update_notify = pCB;
+}
+EXPORT_SYMBOL(mt_gpufreq_update_volt_registerCB);
+
+/************************************************
+ * register / unregister set GPU freq CB
+ *************************************************/
+void mt_gpufreq_setfreq_registerCB(sampler_func pCB)
+{
+	g_pFreqSampler = pCB;
+}
+EXPORT_SYMBOL(mt_gpufreq_setfreq_registerCB);
+
+/************************************************
+ * register / unregister set GPU volt CB
+ *************************************************/
+void mt_gpufreq_setvolt_registerCB(sampler_func pCB)
+{
+	g_pVoltSampler = pCB;
+}
+EXPORT_SYMBOL(mt_gpufreq_setvolt_registerCB);
+
+/************************************************
+* for ptpod used to open gpu external/internal power.
+*************************************************/
+void mt_gpufreq_mfgclock_notify_registerCB(gpufreq_mfgclock_notify pEnableCB,
+					   gpufreq_mfgclock_notify pDisableCB)
+{
+	g_pGpufreq_mfgclock_enable_notify = pEnableCB;
+	g_pGpufreq_mfgclock_disable_notify = pDisableCB;
+}
+EXPORT_SYMBOL(mt_gpufreq_mfgclock_notify_registerCB);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/*********************************
+ * early suspend callback function
+ **********************************/
+void mt_gpufreq_early_suspend(struct early_suspend *h)
+{
+	/* mt_gpufreq_state_set(0); */
+
+}
+
+/*******************************
+ * late resume callback function
+ ********************************/
+void mt_gpufreq_late_resume(struct early_suspend *h)
+{
+	/* mt_gpufreq_check_freq_and_set_pll(); */
+
+	/* mt_gpufreq_state_set(1); */
+}
+#endif
+
+static int mt_gpufreq_pm_restore_early(struct device *dev)
+{
+	int i = 0;
+	int found = 0;
+
+	g_cur_gpu_freq = _mt_gpufreq_get_cur_freq();
+
+	for (i = 0; i < mt_gpufreqs_num; i++) {
+		if (g_cur_gpu_freq == mt_gpufreqs[i].gpufreq_khz) {
+			g_cur_gpu_idx = mt_gpufreqs[i].gpufreq_idx;
+			g_cur_gpu_volt = mt_gpufreqs[i].gpufreq_volt;
+			g_cur_gpu_OPPidx = i;
+			found = 1;
+			gpufreq_dbg("match g_cur_gpu_OPPidx: %d\n", g_cur_gpu_OPPidx);
+			break;
+		}
+	}
+
+	if (found == 0) {
+		g_cur_gpu_idx = mt_gpufreqs[0].gpufreq_idx;
+		g_cur_gpu_volt = mt_gpufreqs[0].gpufreq_volt;
+		g_cur_gpu_OPPidx = 0;
+		gpufreq_err("gpu freq not found, set parameter to max freq\n");
+	}
+
+	gpufreq_dbg("GPU freq SW/HW: %d/%d\n", g_cur_gpu_freq, _mt_gpufreq_get_cur_freq());
+	gpufreq_dbg("g_cur_gpu_OPPidx: %d\n", g_cur_gpu_OPPidx);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mt_gpufreq_of_match[] = {
+	{.compatible = "mediatek,mt8167-gpufreq",},
+	{ /* sentinel */ },
+};
+#endif
+MODULE_DEVICE_TABLE(of, mt_gpufreq_of_match);
+static int mt_gpufreq_pdrv_probe(struct platform_device *pdev)
+{
+
+	mt_gpufreq_dvfs_table_type = mt_gpufreq_get_dvfs_table_type();
+
+
+	/**********************
+	 * Initial leackage power usage
+	 ***********************/
+#ifdef STATIC_PWR_READY2USE
+	mt_spower_init();
+#endif
+
+	/**********************
+	 * setup gpufreq table
+	 ***********************/
+	gpufreq_info("setup gpufreqs table\n");
+
+	switch (mt_gpufreq_dvfs_table_type) {
+	case 0:		/* 400MHz */
+		mt_setup_gpufreqs_table(mt_gpufreq_opp_tbl_e1_0,
+					ARRAY_SIZE(mt_gpufreq_opp_tbl_e1_0));
+		break;
+	case 1:		/* 500MHz */
+		mt_setup_gpufreqs_table(mt_gpufreq_opp_tbl_e1_1,
+					ARRAY_SIZE(mt_gpufreq_opp_tbl_e1_1));
+		break;
+	case 2:		/* 600MHz */
+		mt_setup_gpufreqs_table(mt_gpufreq_opp_tbl_e1_2,
+					ARRAY_SIZE(mt_gpufreq_opp_tbl_e1_2));
+		break;
+	default:	/* 400MHz */
+		mt_setup_gpufreqs_table(mt_gpufreq_opp_tbl_e1_0,
+					ARRAY_SIZE(mt_gpufreq_opp_tbl_e1_0));
+		break;
+	}
+
+	/**********************
+	 * setup PMIC init value
+	 ***********************/
+#ifdef VGPU_SET_BY_PMIC
+
+	gpufreq_info("VGPU Enabled (%d) %d mV\n",
+		regulator_is_enabled(mt_gpufreq_pmic->reg_vgpu),
+		_mt_gpufreq_get_cur_volt());
+
+	mt_gpufreq_volt_enable_state = 1;
+
+	/**********************
+	 * setup initial frequency
+	 ***********************/
+	mt_gpufreq_set_initial();
+
+	g_cur_freq_init_keep = g_cur_gpu_freq;
+
+	gpufreq_info("GPU current frequency = %dKHz\n", _mt_gpufreq_get_cur_freq());
+	gpufreq_info("Current Vcore = %dmV\n", _mt_gpufreq_get_cur_volt() / 100);
+	gpufreq_info("g_cur_gpu_freq = %d, g_cur_gpu_volt = %d\n", g_cur_gpu_freq, g_cur_gpu_volt);
+	gpufreq_info("g_cur_gpu_idx = %d, g_cur_gpu_OPPidx = %d\n", g_cur_gpu_idx,
+			 g_cur_gpu_OPPidx);
+
+	mt_gpufreq_ready = true;
+
+#ifdef MT_GPUFREQ_LOW_BATT_VOLT_PROTECT
+	for (i = 0; i < mt_gpufreqs_num; i++) {
+		if (mt_gpufreqs[i].gpufreq_khz == MT_GPUFREQ_LOW_BATT_VOLT_LIMIT_FREQ_1) {
+			mt_gpufreq_low_bat_volt_limited_index_1 = i;
+			break;
+		}
+	}
+
+	for (i = 0; i < mt_gpufreqs_num; i++) {
+		if (mt_gpufreqs[i].gpufreq_khz == MT_GPUFREQ_LOW_BATT_VOLT_LIMIT_FREQ_2) {
+			mt_gpufreq_low_bat_volt_limited_index_2 = i;
+			break;
+		}
+	}
+
+	/* register_low_battery_notify(&mt_gpufreq_low_batt_volt_callback, LOW_BATTERY_PRIO_GPU); */
+#endif
+
+#ifdef MT_GPUFREQ_LOW_BATT_VOLUME_PROTECT
+	pr_err("[ERROR] mt_gpufreq_pdrv_probe 17");
+	for (i = 0; i < mt_gpufreqs_num; i++) {
+		if (mt_gpufreqs[i].gpufreq_khz == MT_GPUFREQ_LOW_BATT_VOLUME_LIMIT_FREQ_1) {
+			mt_gpufreq_low_bat_volume_limited_index_1 = i;
+			break;
+		}
+	}
+
+	/* register_battery_percent_notify(&mt_gpufreq_low_batt_volume_callback,*/
+	/*				BATTERY_PERCENT_PRIO_GPU);		*/
+#endif
+
+#ifdef MT_GPUFREQ_OC_PROTECT
+	pr_err("[ERROR] mt_gpufreq_pdrv_probe 18");
+	for (i = 0; i < mt_gpufreqs_num; i++) {
+		if (mt_gpufreqs[i].gpufreq_khz == MT_GPUFREQ_OC_LIMIT_FREQ_1) {
+			mt_gpufreq_oc_limited_index_1 = i;
+			break;
+		}
+	}
+
+	/* register_battery_oc_notify(&mt_gpufreq_oc_callback, BATTERY_OC_PRIO_GPU); */
+#endif
+
+#ifndef DISABLE_PBM_FEATURE
+	INIT_DEFERRABLE_WORK(&notify_pbm_gpuoff_work, mt_gpufreq_notify_pbm_gpuoff);
+#endif
+
+	return 0;
+}
+
+/***************************************
+ * this function should never be called
+ ****************************************/
+static int mt_gpufreq_pdrv_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct dev_pm_ops mt_gpufreq_pm_ops = {
+	.suspend = NULL,
+	.resume = NULL,
+	.restore_early = mt_gpufreq_pm_restore_early,
+};
+static struct platform_driver mt_gpufreq_pdrv = {
+	.probe = mt_gpufreq_pdrv_probe,
+	.remove = mt_gpufreq_pdrv_remove,
+	.driver = {
+		   .name = "gpufreq",
+		   .pm = &mt_gpufreq_pm_ops,
+		   .owner = THIS_MODULE,
+#ifdef CONFIG_OF
+		   .of_match_table = mt_gpufreq_of_match,
+#endif
+		   },
+};
+
+
+#ifdef CONFIG_PROC_FS
+/* #if 0 */
+/*
+ * PROC
+ */
+
+/***************************
+ * show current debug status
+ ****************************/
+static int mt_gpufreq_debug_proc_show(struct seq_file *m, void *v)
+{
+	if (mt_gpufreq_debug)
+		seq_puts(m, "gpufreq debug enabled\n");
+	else
+		seq_puts(m, "gpufreq debug disabled\n");
+
+	return 0;
+}
+
+/***********************
+ * enable debug message
+ ************************/
+static ssize_t mt_gpufreq_debug_proc_write(struct file *file, const char __user *buffer,
+					   size_t count, loff_t *data)
+{
+	char desc[32];
+	int len = 0;
+
+	int debug = 0;
+
+	len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
+	if (copy_from_user(desc, buffer, len))
+		return 0;
+	desc[len] = '\0';
+
+	if (kstrtoint(desc, 0, &debug) == 0) {
+		if (debug == 0)
+			mt_gpufreq_debug = 0;
+		else if (debug == 1)
+			mt_gpufreq_debug = 1;
+		else
+			gpufreq_warn("bad argument!! should be 0 or 1 [0: disable, 1: enable]\n");
+	} else
+		gpufreq_warn("bad argument!! should be 0 or 1 [0: disable, 1: enable]\n");
+
+	return count;
+}
+
+#ifdef MT_GPUFREQ_OC_PROTECT
+/****************************
+ * show current limited by low batt volume
+ *****************************/
+static int mt_gpufreq_limited_oc_ignore_proc_show(struct seq_file *m, void *v)
+{
+	seq_printf(m, "g_limited_max_id = %d, g_limited_oc_ignore_state = %d\n", g_limited_max_id,
+		   g_limited_oc_ignore_state);
+
+	return 0;
+}
+
+/**********************************
+ * limited for low batt volume protect
+ ***********************************/
+static ssize_t mt_gpufreq_limited_oc_ignore_proc_write(struct file *file,
+							   const char __user *buffer, size_t count,
+							   loff_t *data)
+{
+	char desc[32];
+	int len = 0;
+	unsigned int ignore = 0;
+
+	len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
+	if (copy_from_user(desc, buffer, len))
+		return 0;
+	desc[len] = '\0';
+
+	if (kstrtouint(desc, 0, &ignore) == 0) {
+		if (ignore == 1)
+			g_limited_oc_ignore_state = true;
+		else if (ignore == 0)
+			g_limited_oc_ignore_state = false;
+		else
+			gpufreq_warn
+				("bad argument!! should be 0 or 1 [0: not ignore, 1: ignore]\n");
+	} else
+		gpufreq_warn("bad argument!! should be 0 or 1 [0: not ignore, 1: ignore]\n");
+
+	return count;
+}
+#endif
+
+#ifdef MT_GPUFREQ_LOW_BATT_VOLUME_PROTECT
+/****************************
+ * show current limited by low batt volume
+ *****************************/
+static int mt_gpufreq_limited_low_batt_volume_ignore_proc_show(struct seq_file *m, void *v)
+{
+	seq_printf(m, "g_limited_max_id = %d, g_limited_low_batt_volume_ignore_state = %d\n",
+		   g_limited_max_id, g_limited_low_batt_volume_ignore_state);
+
+	return 0;
+}
+
+/**********************************
+ * limited for low batt volume protect
+ ***********************************/
+static ssize_t mt_gpufreq_limited_low_batt_volume_ignore_proc_write(struct file *file,
+									const char __user *buffer,
+									size_t count, loff_t *data)
+{
+	char desc[32];
+	int len = 0;
+
+	unsigned int ignore = 0;
+
+	len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
+	if (copy_from_user(desc, buffer, len))
+		return 0;
+	desc[len] = '\0';
+
+	if (kstrtouint(desc, 0, &ignore) == 0) {
+		if (ignore == 1)
+			g_limited_low_batt_volume_ignore_state = true;
+		else if (ignore == 0)
+			g_limited_low_batt_volume_ignore_state = false;
+		else
+			gpufreq_warn
+				("bad argument!! should be 0 or 1 [0: not ignore, 1: ignore]\n");
+	} else
+		gpufreq_warn("bad argument!! should be 0 or 1 [0: not ignore, 1: ignore]\n");
+
+	return count;
+}
+#endif
+
+#ifdef MT_GPUFREQ_LOW_BATT_VOLT_PROTECT
+/****************************
+ * show current limited by low batt volt
+ *****************************/
+static int mt_gpufreq_limited_low_batt_volt_ignore_proc_show(struct seq_file *m, void *v)
+{
+	seq_printf(m, "g_limited_max_id = %d, g_limited_low_batt_volt_ignore_state = %d\n",
+		   g_limited_max_id, g_limited_low_batt_volt_ignore_state);
+
+	return 0;
+}
+
+/**********************************
+ * limited for low batt volt protect
+ ***********************************/
+static ssize_t mt_gpufreq_limited_low_batt_volt_ignore_proc_write(struct file *file,
+								  const char __user *buffer,
+								  size_t count, loff_t *data)
+{
+	char desc[32];
+	int len = 0;
+
+	unsigned int ignore = 0;
+
+	len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
+	if (copy_from_user(desc, buffer, len))
+		return 0;
+	desc[len] = '\0';
+
+	if (kstrtouint(desc, 0, &ignore) == 0) {
+		if (ignore == 1)
+			g_limited_low_batt_volt_ignore_state = true;
+		else if (ignore == 0)
+			g_limited_low_batt_volt_ignore_state = false;
+		else
+			gpufreq_warn
+				("bad argument!! should be 0 or 1 [0: not ignore, 1: ignore]\n");
+	} else
+		gpufreq_warn("bad argument!! should be 0 or 1 [0: not ignore, 1: ignore]\n");
+
+	return count;
+}
+#endif
+
+/****************************
+ * show current limited by thermal
+ *****************************/
+static int mt_gpufreq_limited_thermal_ignore_proc_show(struct seq_file *m, void *v)
+{
+	seq_printf(m, "g_limited_max_id = %d, g_limited_thermal_ignore_state = %d\n",
+		   g_limited_max_id, g_limited_thermal_ignore_state);
+
+	return 0;
+}
+
+/**********************************
+ * limited for thermal protect
+ ***********************************/
+static ssize_t mt_gpufreq_limited_thermal_ignore_proc_write(struct file *file,
+								const char __user *buffer,
+								size_t count, loff_t *data)
+{
+	char desc[32];
+	int len = 0;
+
+	unsigned int ignore = 0;
+
+	len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
+	if (copy_from_user(desc, buffer, len))
+		return 0;
+	desc[len] = '\0';
+
+	if (kstrtouint(desc, 0, &ignore) == 0) {
+		if (ignore == 1)
+			g_limited_thermal_ignore_state = true;
+		else if (ignore == 0)
+			g_limited_thermal_ignore_state = false;
+		else
+			gpufreq_warn
+				("bad argument!! should be 0 or 1 [0: not ignore, 1: ignore]\n");
+	} else
+		gpufreq_warn("bad argument!! should be 0 or 1 [0: not ignore, 1: ignore]\n");
+
+	return count;
+}
+
+#ifndef DISABLE_PBM_FEATURE
+/****************************
+ * show current limited by PBM
+ *****************************/
+static int mt_gpufreq_limited_pbm_ignore_proc_show(struct seq_file *m, void *v)
+{
+	seq_printf(m, "g_limited_max_id = %d, g_limited_oc_ignore_state = %d\n", g_limited_max_id,
+		   g_limited_pbm_ignore_state);
+
+	return 0;
+}
+
+/**********************************
+ * limited for low batt volume protect
+ ***********************************/
+static ssize_t mt_gpufreq_limited_pbm_ignore_proc_write(struct file *file,
+							const char __user *buffer, size_t count,
+							loff_t *data)
+{
+	char desc[32];
+	int len = 0;
+
+	unsigned int ignore = 0;
+
+	len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
+	if (copy_from_user(desc, buffer, len))
+		return 0;
+	desc[len] = '\0';
+
+	if (kstrtouint(desc, 0, &ignore) == 0) {
+		if (ignore == 1)
+			g_limited_pbm_ignore_state = true;
+		else if (ignore == 0)
+			g_limited_pbm_ignore_state = false;
+		else
+			gpufreq_warn
+				("bad argument!! should be 0 or 1 [0: not ignore, 1: ignore]\n");
+	} else
+		gpufreq_warn("bad argument!! should be 0 or 1 [0: not ignore, 1: ignore]\n");
+
+	return count;
+}
+#endif
+
+/****************************
+ * show current limited power
+ *****************************/
+static int mt_gpufreq_limited_power_proc_show(struct seq_file *m, void *v)
+{
+
+	seq_printf(m, "g_limited_max_id = %d, limit frequency = %d\n",
+		   g_limited_max_id, mt_gpufreqs[g_limited_max_id].gpufreq_khz);
+
+	return 0;
+}
+
+/**********************************
+ * limited power for thermal protect
+ ***********************************/
+static ssize_t mt_gpufreq_limited_power_proc_write(struct file *file,
+						   const char __user *buffer,
+						   size_t count, loff_t *data)
+{
+	char desc[32];
+	int len = 0;
+
+	unsigned int power = 0;
+
+	len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
+	if (copy_from_user(desc, buffer, len))
+		return 0;
+	desc[len] = '\0';
+
+	if (kstrtouint(desc, 0, &power) == 0)
+		mt_gpufreq_thermal_protect(power);
+	else
+		gpufreq_warn("bad argument!! please provide the maximum limited power\n");
+
+	return count;
+}
+
+/****************************
+ * show current limited power by PBM
+ *****************************/
+#ifndef DISABLE_PBM_FEATURE
+static int mt_gpufreq_limited_by_pbm_proc_show(struct seq_file *m, void *v)
+{
+	seq_printf(m, "pbm_limited_power = %d, limit index = %d\n",
+		   mt_gpufreq_pbm_limited_gpu_power, mt_gpufreq_pbm_limited_index);
+
+	return 0;
+}
+
+/**********************************
+ * limited power for thermal protect
+ ***********************************/
+static ssize_t mt_gpufreq_limited_by_pbm_proc_write(struct file *file, const char __user *buffer,
+							size_t count, loff_t *data)
+{
+	char desc[32];
+	int len = 0;
+
+	unsigned int power = 0;
+
+	len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
+	if (copy_from_user(desc, buffer, len))
+		return 0;
+	desc[len] = '\0';
+
+	if (kstrtouint(desc, 0, &power) == 0)
+		mt_gpufreq_set_power_limit_by_pbm(power);
+	else
+		gpufreq_warn("bad argument!! please provide the maximum limited power\n");
+
+	return count;
+}
+#endif
+
+/******************************
+ * show current GPU DVFS stauts
+ *******************************/
+static int mt_gpufreq_state_proc_show(struct seq_file *m, void *v)
+{
+	if (!mt_gpufreq_pause)
+		seq_puts(m, "GPU DVFS enabled\n");
+	else
+		seq_puts(m, "GPU DVFS disabled\n");
+
+	return 0;
+}
+
+/****************************************
+ * set GPU DVFS stauts by sysfs interface
+ *****************************************/
+static ssize_t mt_gpufreq_state_proc_write(struct file *file,
+					   const char __user *buffer, size_t count, loff_t *data)
+{
+	char desc[32];
+	int len = 0;
+
+	int enabled = 0;
+
+	len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
+	if (copy_from_user(desc, buffer, len))
+		return 0;
+	desc[len] = '\0';
+
+	if (kstrtoint(desc, 0, &enabled) == 0) {
+		if (enabled == 1) {
+			mt_gpufreq_keep_max_frequency_state = false;
+			mt_gpufreq_state_set(1);
+		} else if (enabled == 0) {
+			/* Keep MAX frequency when GPU DVFS disabled. */
+			mt_gpufreq_keep_max_frequency_state = true;
+			mt_gpufreq_voltage_enable_set(1);
+			mt_gpufreq_target(g_gpufreq_max_id);
+			mt_gpufreq_state_set(0);
+		} else
+			gpufreq_warn("bad argument!! argument should be \"1\" or \"0\"\n");
+	} else
+		gpufreq_warn("bad argument!! argument should be \"1\" or \"0\"\n");
+
+	return count;
+}
+
+/********************
+ * show GPU OPP table
+ *********************/
+static int mt_gpufreq_opp_dump_proc_show(struct seq_file *m, void *v)
+{
+	int i = 0;
+
+	for (i = 0; i < mt_gpufreqs_num; i++) {
+		seq_printf(m, "[%d] ", i);
+		seq_printf(m, "freq = %d, ", mt_gpufreqs[i].gpufreq_khz);
+		seq_printf(m, "volt = %d, ", mt_gpufreqs[i].gpufreq_volt);
+		seq_printf(m, "idx = %d\n", mt_gpufreqs[i].gpufreq_idx);
+	}
+
+	return 0;
+}
+
+/********************
+ * show GPU power table
+ *********************/
+static int mt_gpufreq_power_dump_proc_show(struct seq_file *m, void *v)
+{
+	int i = 0;
+
+	for (i = 0; i < mt_gpufreqs_num; i++) {
+		seq_printf(m, "mt_gpufreqs_power[%d].gpufreq_khz = %d\n", i,
+			   mt_gpufreqs_power[i].gpufreq_khz);
+		seq_printf(m, "mt_gpufreqs_power[%d].gpufreq_volt = %d\n", i,
+			   mt_gpufreqs_power[i].gpufreq_volt);
+		seq_printf(m, "mt_gpufreqs_power[%d].gpufreq_power = %d\n", i,
+			   mt_gpufreqs_power[i].gpufreq_power);
+	}
+
+	return 0;
+}
+
+/***************************
+ * show current specific frequency status
+ ****************************/
+static int mt_gpufreq_opp_freq_proc_show(struct seq_file *m, void *v)
+{
+	if (mt_gpufreq_keep_opp_frequency_state) {
+		seq_puts(m, "gpufreq keep opp frequency enabled\n");
+		seq_printf(m, "freq = %d\n", mt_gpufreqs[mt_gpufreq_keep_opp_index].gpufreq_khz);
+		seq_printf(m, "volt = %d\n", mt_gpufreqs[mt_gpufreq_keep_opp_index].gpufreq_volt);
+	} else
+		seq_puts(m, "gpufreq keep opp frequency disabled\n");
+	return 0;
+}
+
+/***********************
+ * enable specific frequency
+ ************************/
+static ssize_t mt_gpufreq_opp_freq_proc_write(struct file *file, const char __user *buffer,
+						  size_t count, loff_t *data)
+{
+	char desc[32];
+	int len = 0;
+
+	int i = 0;
+	int fixed_freq = 0;
+	int found = 0;
+
+	len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
+	if (copy_from_user(desc, buffer, len))
+		return 0;
+	desc[len] = '\0';
+
+	if (kstrtoint(desc, 0, &fixed_freq) == 0) {
+		if (fixed_freq == 0) {
+			mt_gpufreq_keep_opp_frequency_state = false;
+#ifdef MTK_GPU_SPM
+			mtk_gpu_spm_reset_fix();
+#endif
+		} else {
+			for (i = 0; i < mt_gpufreqs_num; i++) {
+				if (fixed_freq == mt_gpufreqs[i].gpufreq_khz) {
+					mt_gpufreq_keep_opp_index = i;
+					found = 1;
+					break;
+				}
+			}
+
+			if (found == 1) {
+				mt_gpufreq_keep_opp_frequency_state = true;
+				mt_gpufreq_keep_opp_frequency = fixed_freq;
+
+#ifndef MTK_GPU_SPM
+				mt_gpufreq_voltage_enable_set(1);
+				mt_gpufreq_target(mt_gpufreq_keep_opp_index);
+#else
+				mtk_gpu_spm_fix_by_idx(mt_gpufreq_keep_opp_index);
+#endif
+			}
+
+		}
+	} else
+		gpufreq_warn("bad argument!! please provide the fixed frequency\n");
+
+	return count;
+}
+
+/***************************
+ * show current specific frequency status
+ ****************************/
+static int mt_gpufreq_opp_max_freq_proc_show(struct seq_file *m, void *v)
+{
+	if (mt_gpufreq_opp_max_frequency_state) {
+		seq_puts(m, "gpufreq opp max frequency enabled\n");
+		seq_printf(m, "freq = %d\n", mt_gpufreqs[mt_gpufreq_opp_max_index].gpufreq_khz);
+		seq_printf(m, "volt = %d\n", mt_gpufreqs[mt_gpufreq_opp_max_index].gpufreq_volt);
+	} else
+		seq_puts(m, "gpufreq opp max frequency disabled\n");
+
+	return 0;
+}
+
+/***********************
+ * enable specific frequency
+ ************************/
+static ssize_t mt_gpufreq_opp_max_freq_proc_write(struct file *file, const char __user *buffer,
+						  size_t count, loff_t *data)
+{
+	char desc[32];
+	int len = 0;
+
+	int i = 0;
+	int max_freq = 0;
+	int found = 0;
+
+	len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
+	if (copy_from_user(desc, buffer, len))
+		return 0;
+	desc[len] = '\0';
+
+	if (kstrtoint(desc, 0, &max_freq) == 0) {
+		if (max_freq == 0) {
+			mt_gpufreq_opp_max_frequency_state = false;
+		} else {
+			for (i = 0; i < mt_gpufreqs_num; i++) {
+				if (mt_gpufreqs[i].gpufreq_khz <= max_freq) {
+					mt_gpufreq_opp_max_index = i;
+					found = 1;
+					break;
+				}
+			}
+
+			if (found == 1) {
+				mt_gpufreq_opp_max_frequency_state = true;
+				mt_gpufreq_opp_max_frequency =
+					mt_gpufreqs[mt_gpufreq_opp_max_index].gpufreq_khz;
+
+				mt_gpufreq_voltage_enable_set(1);
+				mt_gpufreq_target(mt_gpufreq_opp_max_index);
+			}
+		}
+	} else
+		gpufreq_warn("bad argument!! please provide the maximum limited frequency\n");
+
+	return count;
+}
+
+/********************
+ * show variable dump
+ *********************/
+static int mt_gpufreq_var_dump_proc_show(struct seq_file *m, void *v)
+{
+	int i = 0;
+
+#ifdef MTK_GPU_SPM
+	seq_puts(m, "DVFS_GPU SPM is on\n");
+#endif
+	seq_printf(m, "g_cur_gpu_freq = %d, g_cur_gpu_volt = %d\n", mt_gpufreq_get_cur_freq(),
+		   mt_gpufreq_get_cur_volt());
+	seq_printf(m, "g_cur_gpu_idx = %d, g_cur_gpu_OPPidx = %d\n", g_cur_gpu_idx,
+		   g_cur_gpu_OPPidx);
+	seq_printf(m, "g_limited_max_id = %d\n", g_limited_max_id);
+
+	for (i = 0; i < NR_IDX_POWER_LIMITED; i++)
+		seq_printf(m, "mt_gpufreq_power_limited_index_array[%d] = %d\n", i,
+			   mt_gpufreq_power_limited_index_array[i]);
+
+	seq_printf(m, "_mt_gpufreq_get_cur_freq = %d\n", _mt_gpufreq_get_cur_freq());
+	seq_printf(m, "mt_gpufreq_volt_enable_state = %d\n", mt_gpufreq_volt_enable_state);
+	seq_printf(m, "mt_gpufreq_dvfs_table_type = %d\n", mt_gpufreq_dvfs_table_type);
+	seq_printf(m, "mt_gpufreq_ptpod_disable_idx = %d\n", mt_gpufreq_ptpod_disable_idx);
+
+	return 0;
+}
+
+/***************************
+ * show current voltage enable status
+ ****************************/
+static int mt_gpufreq_volt_enable_proc_show(struct seq_file *m, void *v)
+{
+	if (mt_gpufreq_volt_enable)
+		seq_puts(m, "gpufreq voltage enabled\n");
+	else
+		seq_puts(m, "gpufreq voltage disabled\n");
+
+	return 0;
+}
+
+/***********************
+ * enable specific frequency
+ ************************/
+static ssize_t mt_gpufreq_volt_enable_proc_write(struct file *file, const char __user *buffer,
+						 size_t count, loff_t *data)
+{
+	char desc[32];
+	int len = 0;
+
+	int enable = 0;
+
+	len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
+	if (copy_from_user(desc, buffer, len))
+		return 0;
+	desc[len] = '\0';
+
+	if (kstrtoint(desc, 0, &enable) == 0) {
+		if (enable == 0) {
+			mt_gpufreq_voltage_enable_set(0);
+			mt_gpufreq_volt_enable = false;
+		} else if (enable == 1) {
+			mt_gpufreq_voltage_enable_set(1);
+			mt_gpufreq_volt_enable = true;
+		} else
+			gpufreq_warn("bad argument!! should be 0 or 1 [0: disable, 1: enable]\n");
+	} else
+		gpufreq_warn("bad argument!! should be 0 or 1 [0: disable, 1: enable]\n");
+
+	return count;
+}
+
+/***************************
+ * show current specific frequency status
+ ****************************/
+static int mt_gpufreq_fixed_freq_volt_proc_show(struct seq_file *m, void *v)
+{
+	if (mt_gpufreq_fixed_freq_volt_state) {
+		seq_puts(m, "gpufreq fixed frequency enabled\n");
+		seq_printf(m, "fixed frequency = %d\n", mt_gpufreq_fixed_frequency);
+		seq_printf(m, "fixed voltage = %d\n", mt_gpufreq_fixed_voltage);
+	} else
+		seq_puts(m, "gpufreq fixed frequency disabled\n");
+	return 0;
+}
+
+/***********************
+ * enable specific frequency
+ ************************/
+static void _mt_gpufreq_fixed_freq(int fixed_freq)
+{
+	/* freq (KHz) */
+	if ((fixed_freq >= GPUFREQ_LAST_FREQ_LEVEL)
+		&& (fixed_freq <= GPU_DVFS_FREQ0)) {
+		gpufreq_dbg("@ %s, mt_gpufreq_clock_switch1 fix frq = %d, fix volt = %d, volt = %d\n",
+			__func__, mt_gpufreq_fixed_frequency, mt_gpufreq_fixed_voltage, g_cur_gpu_volt);
+		mt_gpufreq_fixed_freq_volt_state = true;
+		mt_gpufreq_fixed_frequency = fixed_freq;
+		mt_gpufreq_fixed_voltage = g_cur_gpu_volt;
+		mt_gpufreq_voltage_enable_set(1);
+		gpufreq_dbg("@ %s, mt_gpufreq_clock_switch2 fix frq = %d, fix volt = %d, volt = %d\n",
+			__func__, mt_gpufreq_fixed_frequency, mt_gpufreq_fixed_voltage, g_cur_gpu_volt);
+		mt_gpufreq_clock_switch(mt_gpufreq_fixed_frequency);
+		g_cur_gpu_freq = mt_gpufreq_fixed_frequency;
+	}
+}
+
+static void _mt_gpufreq_fixed_volt(int fixed_volt)
+{
+	/* volt (mV) */
+#ifdef VGPU_SET_BY_PMIC
+	if (fixed_volt >= (PMIC_MIN_VGPU / 100) &&
+		fixed_volt <= (PMIC_MAX_VGPU / 100)) {
+#endif
+		gpufreq_dbg("@ %s, mt_gpufreq_volt_switch1 fix frq = %d, fix volt = %d, volt = %d\n",
+			__func__, mt_gpufreq_fixed_frequency, mt_gpufreq_fixed_voltage, g_cur_gpu_volt);
+		mt_gpufreq_fixed_freq_volt_state = true;
+		mt_gpufreq_fixed_frequency = g_cur_gpu_freq;
+		mt_gpufreq_fixed_voltage = fixed_volt * 100;
+		mt_gpufreq_voltage_enable_set(1);
+		gpufreq_dbg("@ %s, mt_gpufreq_volt_switch2 fix frq = %d, fix volt = %d, volt = %d\n",
+			__func__, mt_gpufreq_fixed_frequency, mt_gpufreq_fixed_voltage, g_cur_gpu_volt);
+		mt_gpufreq_volt_switch(g_cur_gpu_volt, mt_gpufreq_fixed_voltage);
+		g_cur_gpu_volt = mt_gpufreq_fixed_voltage;
+	}
+}
+
+static ssize_t mt_gpufreq_fixed_freq_volt_proc_write(struct file *file, const char __user *buffer,
+							 size_t count, loff_t *data)
+{
+	char desc[32];
+	int len = 0;
+
+	int fixed_freq = 0;
+	int fixed_volt = 0;
+
+	len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
+	if (copy_from_user(desc, buffer, len))
+		return 0;
+	desc[len] = '\0';
+
+	if (sscanf(desc, "%d %d", &fixed_freq, &fixed_volt) == 2) {
+		if ((fixed_freq == 0) && (fixed_volt == 0)) {
+			mt_gpufreq_fixed_freq_volt_state = false;
+			mt_gpufreq_fixed_frequency = 0;
+			mt_gpufreq_fixed_voltage = 0;
+#ifdef MTK_GPU_SPM
+			mtk_gpu_spm_reset_fix();
+#endif
+		} else {
+			g_cur_gpu_freq = _mt_gpufreq_get_cur_freq();
+#ifndef MTK_GPU_SPM
+			if (fixed_freq > g_cur_gpu_freq) {
+				_mt_gpufreq_fixed_volt(fixed_volt);
+				_mt_gpufreq_fixed_freq(fixed_freq);
+			} else {
+				_mt_gpufreq_fixed_freq(fixed_freq);
+				_mt_gpufreq_fixed_volt(fixed_volt);
+			}
+#else
+			if (0) {
+				_mt_gpufreq_fixed_volt(fixed_volt);
+				_mt_gpufreq_fixed_freq(fixed_freq);
+			}
+			{
+				int i, found;
+
+				for (i = 0; i < mt_gpufreqs_num; i++) {
+					if (fixed_freq == mt_gpufreqs[i].gpufreq_khz) {
+						mt_gpufreq_keep_opp_index = i;
+						found = 1;
+						break;
+					}
+				}
+				mt_gpufreq_fixed_frequency = fixed_freq;
+				mt_gpufreq_fixed_voltage = fixed_volt * 100;
+				mtk_gpu_spm_fix_by_idx(mt_gpufreq_keep_opp_index);
+			}
+#endif
+		}
+	} else
+		gpufreq_warn("bad argument!! should be [enable fixed_freq fixed_volt]\n");
+
+	return count;
+}
+
+#ifdef MT_GPUFREQ_INPUT_BOOST
+/*****************************
+ * show current input boost status
+ ******************************/
+static int mt_gpufreq_input_boost_proc_show(struct seq_file *m, void *v)
+{
+	if (mt_gpufreq_input_boost_state == 1)
+		seq_puts(m, "gpufreq input boost is enabled\n");
+	else
+		seq_puts(m, "gpufreq input boost is disabled\n");
+
+	return 0;
+}
+
+/***************************
+ * enable/disable input boost
+ ****************************/
+static ssize_t mt_gpufreq_input_boost_proc_write(struct file *file, const char __user *buffer,
+						 size_t count, loff_t *data)
+{
+	char desc[32];
+	int len = 0;
+
+	int debug = 0;
+
+	len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
+	if (copy_from_user(desc, buffer, len))
+		return 0;
+	desc[len] = '\0';
+
+	if (kstrtoint(desc, 0, &debug) == 0) {
+		if (debug == 0)
+			mt_gpufreq_input_boost_state = 0;
+		else if (debug == 1)
+			mt_gpufreq_input_boost_state = 1;
+		else
+			gpufreq_warn("bad argument!! should be 0 or 1 [0: disable, 1: enable]\n");
+	} else
+		gpufreq_warn("bad argument!! should be 0 or 1 [0: disable, 1: enable]\n");
+
+	return count;
+}
+
+/***************************
+ * show lowpower frequency opp enable status
+ ****************************/
+static int mt_gpufreq_lpt_enable_proc_show(struct seq_file *m, void *v)
+{
+	seq_puts(m, "not implemented\n");
+
+	return 0;
+}
+
+/***********************
+ * enable lowpower frequency opp
+ ************************/
+static ssize_t mt_gpufreq_lpt_enable_proc_write(struct file *file, const char __user *buffer,
+						 size_t count, loff_t *data)
+{
+
+	gpufreq_warn("not implemented\n");
+#if 0
+	char desc[32];
+	int len = 0;
+
+	int enable = 0;
+
+	len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
+	if (copy_from_user(desc, buffer, len))
+		return 0;
+	desc[len] = '\0';
+
+	if (kstrtoint(desc, 0, &enable) == 0) {
+		if (enable == 0)
+			mt_gpufreq_low_power_test_enable = false;
+		else if (enable == 1)
+			mt_gpufreq_low_power_test_enable = true;
+		else
+			gpufreq_warn("bad argument!! should be 0 or 1 [0: disable, 1: enable]\n");
+	} else
+		gpufreq_warn("bad argument!! should be 0 or 1 [0: disable, 1: enable]\n");
+#endif
+
+	return count;
+}
+
+#endif
+
+#define PROC_FOPS_RW(name)								\
+	static int mt_ ## name ## _proc_open(struct inode *inode, struct file *file)	\
+{											\
+	return single_open(file, mt_ ## name ## _proc_show, PDE_DATA(inode));		\
+}											\
+static const struct file_operations mt_ ## name ## _proc_fops = {			\
+	.owner		  = THIS_MODULE,						\
+	.open		   = mt_ ## name ## _proc_open,					\
+	.read		   = seq_read,							\
+	.llseek		 = seq_lseek,							\
+	.release		= single_release,					\
+	.write		  = mt_ ## name ## _proc_write,					\
+}
+
+#define PROC_FOPS_RO(name)								\
+	static int mt_ ## name ## _proc_open(struct inode *inode, struct file *file)	\
+{											\
+	return single_open(file, mt_ ## name ## _proc_show, PDE_DATA(inode));		\
+}											\
+static const struct file_operations mt_ ## name ## _proc_fops = {			\
+	.owner		  = THIS_MODULE,						\
+	.open		   = mt_ ## name ## _proc_open,					\
+	.read		   = seq_read,							\
+	.llseek		 = seq_lseek,							\
+	.release		= single_release,					\
+}
+
+#define PROC_ENTRY(name)	{__stringify(name), &mt_ ## name ## _proc_fops}
+
+PROC_FOPS_RW(gpufreq_debug);
+PROC_FOPS_RW(gpufreq_limited_power);
+#ifdef MT_GPUFREQ_OC_PROTECT
+PROC_FOPS_RW(gpufreq_limited_oc_ignore);
+#endif
+#ifdef MT_GPUFREQ_LOW_BATT_VOLUME_PROTECT
+PROC_FOPS_RW(gpufreq_limited_low_batt_volume_ignore);
+#endif
+#ifdef MT_GPUFREQ_LOW_BATT_VOLT_PROTECT
+PROC_FOPS_RW(gpufreq_limited_low_batt_volt_ignore);
+#endif
+PROC_FOPS_RW(gpufreq_limited_thermal_ignore);
+#ifndef DISABLE_PBM_FEATURE
+PROC_FOPS_RW(gpufreq_limited_pbm_ignore);
+PROC_FOPS_RW(gpufreq_limited_by_pbm);
+#endif
+PROC_FOPS_RW(gpufreq_state);
+PROC_FOPS_RO(gpufreq_opp_dump);
+PROC_FOPS_RO(gpufreq_power_dump);
+PROC_FOPS_RW(gpufreq_opp_freq);
+PROC_FOPS_RW(gpufreq_opp_max_freq);
+PROC_FOPS_RO(gpufreq_var_dump);
+PROC_FOPS_RW(gpufreq_volt_enable);
+PROC_FOPS_RW(gpufreq_fixed_freq_volt);
+#ifdef MT_GPUFREQ_INPUT_BOOST
+PROC_FOPS_RW(gpufreq_input_boost);
+#endif
+PROC_FOPS_RW(gpufreq_lpt_enable);
+
+static int mt_gpufreq_create_procfs(void)
+{
+	struct proc_dir_entry *dir = NULL;
+	int i;
+
+	struct pentry {
+		const char *name;
+		const struct file_operations *fops;
+	};
+
+	const struct pentry entries[] = {
+		PROC_ENTRY(gpufreq_debug),
+		PROC_ENTRY(gpufreq_limited_power),
+#ifdef MT_GPUFREQ_OC_PROTECT
+		PROC_ENTRY(gpufreq_limited_oc_ignore),
+#endif
+#ifdef MT_GPUFREQ_LOW_BATT_VOLUME_PROTECT
+		PROC_ENTRY(gpufreq_limited_low_batt_volume_ignore),
+#endif
+#ifdef MT_GPUFREQ_LOW_BATT_VOLT_PROTECT
+		PROC_ENTRY(gpufreq_limited_low_batt_volt_ignore),
+#endif
+		PROC_ENTRY(gpufreq_limited_thermal_ignore),
+#ifndef DISABLE_PBM_FEATURE
+		PROC_ENTRY(gpufreq_limited_pbm_ignore),
+		PROC_ENTRY(gpufreq_limited_by_pbm),
+#endif
+		PROC_ENTRY(gpufreq_state),
+		PROC_ENTRY(gpufreq_opp_dump),
+		PROC_ENTRY(gpufreq_power_dump),
+		PROC_ENTRY(gpufreq_opp_freq),
+		PROC_ENTRY(gpufreq_opp_max_freq),
+		PROC_ENTRY(gpufreq_var_dump),
+		PROC_ENTRY(gpufreq_volt_enable),
+		PROC_ENTRY(gpufreq_fixed_freq_volt),
+#ifdef MT_GPUFREQ_INPUT_BOOST
+		PROC_ENTRY(gpufreq_input_boost),
+#endif
+		PROC_ENTRY(gpufreq_lpt_enable),
+	};
+
+
+	dir = proc_mkdir("gpufreq", NULL);
+
+	if (!dir) {
+		gpufreq_err("fail to create /proc/gpufreq @ %s()\n", __func__);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(entries); i++) {
+		if (!proc_create
+			(entries[i].name, S_IRUGO | S_IWUSR | S_IWGRP, dir, entries[i].fops))
+			gpufreq_err("@%s: create /proc/gpufreq/%s failed\n", __func__,
+					entries[i].name);
+	}
+
+	return 0;
+}
+#endif				/* CONFIG_PROC_FS */
+
+/**********************************
+ * mediatek gpufreq initialization
+ ***********************************/
+static int __init mt_gpufreq_init(void)
+{
+	int ret = 0;
+
+#ifdef BRING_UP
+	/* Skip driver init in bring up stage */
+	return 0;
+#endif
+	gpufreq_info("@%s\n", __func__);
+
+#ifdef CONFIG_PROC_FS
+
+	/* init proc */
+	if (mt_gpufreq_create_procfs())
+		goto out;
+
+#endif				/* CONFIG_PROC_FS */
+
+	/* register platform device/driver */
+#if !defined(CONFIG_OF)
+	ret = platform_device_register(&mt_gpufreq_pdev);
+	if (ret) {
+		gpufreq_err("fail to register gpufreq device @ %s()\n", __func__);
+		goto out;
+	}
+#endif
+	ret = platform_driver_register(&mt_gpufreq_pdrv);
+	if (ret) {
+		gpufreq_err("fail to register gpufreq driver @ %s()\n", __func__);
+#if !defined(CONFIG_OF)
+		platform_device_unregister(&mt_gpufreq_pdev);
+#endif
+	}
+
+out:
+	return ret;
+}
+
+static void __exit mt_gpufreq_exit(void)
+{
+	platform_driver_unregister(&mt_gpufreq_pdrv);
+#if !defined(CONFIG_OF)
+	platform_device_unregister(&mt_gpufreq_pdev);
+#endif
+}
+
+module_init(mt_gpufreq_init);
+module_exit(mt_gpufreq_exit);
+
+MODULE_DESCRIPTION("MediaTek GPU Frequency Scaling driver");
+MODULE_LICENSE("GPL");
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mtk_gpufreq.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mtk_gpufreq.h
new file mode 100644
index 0000000..a516ff2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mtk_gpufreq.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2015 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MT_GPUFREQ_H
+#define _MT_GPUFREQ_H
+
+#include <linux/module.h>
+#include <linux/clk.h>
+
+#define MAX_VCO_VALUE	4000000
+#define MIN_VCO_VALUE	2000000
+
+#define DIV4_MAX_FREQ	994500
+#define DIV4_MIN_FREQ	500500
+#define DIV8_MAX_FREQ   397800
+#define DIV8_MIN_FREQ   250000
+#define DIV16_MAX_FREQ   248625
+#define DIV16_MIN_FREQ   125215
+
+#define TO_MHz_HEAD 100
+#define TO_MHz_TAIL 10
+#define ROUNDING_VALUE 5
+#define DDS_SHIFT 14
+#define POST_DIV_SHIFT 28
+#define POST_DIV_MASK 0x70000000
+#define GPUPLL_FIN 26
+
+
+enum post_div_order_enum {
+	POST_DIV2 = 1,
+	POST_DIV4,
+	POST_DIV8,
+	POST_DIV16,
+};
+
+struct mt_gpufreq_table_info {
+	unsigned int gpufreq_khz;
+	unsigned int gpufreq_volt;
+	unsigned int gpufreq_idx;
+};
+
+struct mt_gpufreq_power_table_info {
+	unsigned int gpufreq_khz;
+	unsigned int gpufreq_volt;
+	unsigned int gpufreq_power;
+};
+
+struct mt_gpufreq_clk_t {
+	struct clk *clk_mux;          /* main clock for mfg setting */
+	struct clk *clk_main_parent;	 /* substitution clock for mfg transient mux setting */
+	struct clk *clk_sub_parent;	 /* substitution clock for mfg transient parent setting */
+};
+
+struct mt_gpufreq_pmic_t {
+	struct regulator *reg_vgpu;		/* vgpu regulator */
+};
+
+/*****************
+ * extern function
+ ******************/
+extern int mt_gpufreq_state_set(int enabled);
+extern void mt_gpufreq_thermal_protect(unsigned int limited_power);
+extern unsigned int mt_gpufreq_get_cur_freq_index(void);
+extern unsigned int mt_gpufreq_get_cur_freq(void);
+extern unsigned int mt_gpufreq_get_cur_volt(void);
+extern unsigned int mt_gpufreq_get_dvfs_table_num(void);
+extern unsigned int mt_gpufreq_target(unsigned int idx);
+extern unsigned int mt_gpufreq_voltage_enable_set(unsigned int enable);
+extern unsigned int mt_gpufreq_update_volt(unsigned int pmic_volt[], unsigned int array_size);
+extern unsigned int mt_gpufreq_get_freq_by_idx(unsigned int idx);
+extern unsigned int mt_gpufreq_get_volt_by_idx(unsigned int idx);
+extern void mt_gpufreq_thermal_protect(unsigned int limited_power);
+extern void mt_gpufreq_restore_default_volt(void);
+extern void mt_gpufreq_enable_by_ptpod(void);
+extern void mt_gpufreq_disable_by_ptpod(void);
+extern unsigned int mt_gpufreq_get_max_power(void);
+extern unsigned int mt_gpufreq_get_min_power(void);
+extern unsigned int mt_gpufreq_get_thermal_limit_index(void);
+extern unsigned int mt_gpufreq_get_thermal_limit_freq(void);
+extern void mt_gpufreq_set_power_limit_by_pbm(unsigned int limited_power);
+
+extern unsigned int mt_get_mfgclk_freq(void);	/* Freq Meter API */
+extern u32 get_devinfo_with_index(u32 index);
+extern int mt_gpufreq_fan53555_init(void);
+/* #ifdef MT_GPUFREQ_AEE_RR_REC */
+extern void aee_rr_rec_gpu_dvfs_vgpu(u8 val);
+extern void aee_rr_rec_gpu_dvfs_oppidx(u8 val);
+extern void aee_rr_rec_gpu_dvfs_status(u8 val);
+extern u8 aee_rr_curr_gpu_dvfs_status(void);
+/* #endif */
+
+/*****************
+ * power limit notification
+ ******************/
+typedef void (*gpufreq_power_limit_notify)(unsigned int);
+extern void mt_gpufreq_power_limit_notify_registerCB(gpufreq_power_limit_notify pCB);
+
+/*****************
+ * input boost notification
+ ******************/
+typedef void (*gpufreq_input_boost_notify)(unsigned int);
+extern void mt_gpufreq_input_boost_notify_registerCB(gpufreq_input_boost_notify pCB);
+
+/*****************
+ * update voltage notification
+ ******************/
+typedef void (*gpufreq_ptpod_update_notify)(void);
+extern void mt_gpufreq_update_volt_registerCB(gpufreq_ptpod_update_notify pCB);
+
+/*****************
+ * profiling purpose
+ ******************/
+typedef void (*sampler_func)(unsigned int);
+extern void mt_gpufreq_setfreq_registerCB(sampler_func pCB);
+extern void mt_gpufreq_setvolt_registerCB(sampler_func pCB);
+
+extern void switch_mfg_clk(int src);
+
+/*****************
+ * PTPOD enable/disable GPU power doamin
+ ******************/
+typedef void (*gpufreq_mfgclock_notify)(void);
+extern void mt_gpufreq_mfgclock_notify_registerCB(
+		gpufreq_mfgclock_notify pEnableCB, gpufreq_mfgclock_notify pDisableCB);
+extern bool mt_gpucore_ready(void);
+
+
+#ifdef MTK_GPU_SPM
+void mtk_gpu_spm_fix_by_idx(unsigned int idx);
+void mtk_gpu_spm_reset_fix(void);
+void mtk_gpu_spm_pause(void);
+void mtk_gpu_spm_resume(void);
+#endif
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mtk_mfgsys.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mtk_mfgsys.c
new file mode 100644
index 0000000..761040c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mtk_mfgsys.c
@@ -0,0 +1,823 @@
+/*
+ * Copyright (C) 2015 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/reboot.h>
+#include <linux/version.h>
+#include <linux/notifier.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeup.h>
+
+#include "mtk_mfgsys.h"
+/* pedro  #include <mtk_boot.h>     */
+/* pedro  #include <mtk_gpufreq.h>  */
+/* pedro  #include "pvr_gputrace.h" */
+#include "rgxdevice.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "rgxhwperf.h"
+#include "device.h"
+#include "rgxinit.h"
+/* pedro  #include "mtk_chip.h"    */
+
+#ifdef CONFIG_MTK_HIBERNATION
+#include "sysconfig.h"
+#include <mach/mtk_hibernate_dpm.h>
+#include <mach/mt_irq.h>
+#include <mach/irqs.h>
+#endif
+
+/* pedro #include <trace/events/mtk_events.h> 
+   pedro #include <mtk_gpu_utility.h>   */
+
+
+#ifdef MTK_CAL_POWER_INDEX
+static IMG_PVOID g_pvRegsBaseKM;
+#define MTK_WAIT_FW_RESPONSE_TIMEOUT_US 5000
+#define MTK_GPIO_REG_OFFSET             0x30
+#define MTK_RGX_DEVICE_INDEX_INVALID    -1
+#endif
+
+static IMG_UINT32 gpu_debug_enable;
+static IMG_BOOL g_bDeviceInit;
+
+static IMG_BOOL g_bUnsync;
+static IMG_UINT32 g_ui32_unsync_freq_id;
+static IMG_BOOL bCoreinitSucceeded;
+
+static struct platform_device *sPVRLDMDev;
+static struct platform_device *sMFGASYNCDev;
+static struct platform_device *sMFG2DDev;
+#define GET_MTK_MFG_BASE(x) (struct mtk_mfg_base *)(x->dev.platform_data)
+
+static const char * const top_mfg_clk_sel_name[] = {
+	"mfg_slow_in_sel",
+	"mfg_axi_in_sel",
+	"mfg_mm_in_sel",
+};
+
+static const char * const top_mfg_clk_sel_parent_name[] = {
+	"slow_clk26m",
+	"bus_mainpll_d11",
+	"engine_csw_mux",
+};
+
+static const char * const top_mfg_clk_name[] = {
+	"top_slow",
+	"top_axi",
+	"top_mm",
+};
+
+#define TOP_MFG_CLK_SLOW    0
+#define TOP_MFG_CLK_AXI     1
+#define TOP_MFG_CLK_MM      2
+#define MAX_TOP_MFG_CLK ARRAY_SIZE(top_mfg_clk_name)
+
+#define REG_MFG_AXI BIT(0)
+#define REG_MFG_MEM BIT(1)
+#define REG_MFG_G3D BIT(2)
+#define REG_MFG_26M BIT(3)
+#define REG_MFG_ALL (REG_MFG_AXI | REG_MFG_MEM | REG_MFG_G3D | REG_MFG_26M)
+
+#define REG_MFG_CG_STA 0x00
+#define REG_MFG_CG_SET 0x04
+#define REG_MFG_CG_CLR 0x08
+
+#ifdef CONFIG_MTK_HIBERNATION
+int gpu_pm_restore_noirq(struct device *device)
+{
+#if defined(MTK_CONFIG_OF) && defined(CONFIG_OF)
+	int irq = MTKSysGetIRQ();
+#else
+	int irq = SYS_MTK_RGX_IRQ;
+#endif
+	mt_irq_set_sens(irq, MT_LEVEL_SENSITIVE);
+	mt_irq_set_polarity(irq, MT_POLARITY_LOW);
+	return 0;
+}
+#endif
+
+static PVRSRV_DEVICE_NODE *MTKGetRGXDevNode(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	IMG_UINT32 i;
+
+	for (i = 0; i < psPVRSRVData->ui32RegisteredDevices; i++) {
+		PVRSRV_DEVICE_NODE *psDeviceNode = &psPVRSRVData->psDeviceNodeList[i];
+
+		if (psDeviceNode && psDeviceNode->psDevConfig)
+			return psDeviceNode;
+	}
+	return NULL;
+}
+
+#define MTKCLK_prepare_enable(clk)								\
+	do {											\
+		if (clk) {									\
+			if (clk_prepare_enable(clk))						\
+				pr_debug("PVR_K: clk_prepare_enable failed when enabling " #clk);\
+		}										\
+	} while (0)
+
+#define MTKCLK_disable_unprepare(clk)			\
+	do {						\
+		if (clk)				\
+			clk_disable_unprepare(clk);	\
+	} while (0)
+
+
+static void mtk_mfg_set_clock_gating(void __iomem *reg)
+{
+	writel(REG_MFG_ALL, reg + REG_MFG_CG_SET);
+}
+
+static void mtk_mfg_clr_clock_gating(void __iomem *reg)
+{
+	writel(REG_MFG_ALL, reg + REG_MFG_CG_CLR);
+}
+
+#if defined(MTK_USE_HW_APM)
+static void mtk_mfg_enable_hw_apm(void)
+{
+	struct mtk_mfg_base *mfg_base = GET_MTK_MFG_BASE(sPVRLDMDev);
+
+	writel(0x01a80000, mfg_base->reg_base + 0x504);
+	writel(0x00080010, mfg_base->reg_base + 0x508);
+	writel(0x00080010, mfg_base->reg_base + 0x50c);
+	writel(0x00b800b8, mfg_base->reg_base + 0x510);
+	writel(0x00b000b0, mfg_base->reg_base + 0x514);
+	writel(0x00c000c8, mfg_base->reg_base + 0x518);
+	writel(0x00c000c8, mfg_base->reg_base + 0x51c);
+	writel(0x00d000d8, mfg_base->reg_base + 0x520);
+	writel(0x00d800d8, mfg_base->reg_base + 0x524);
+	writel(0x00d800d8, mfg_base->reg_base + 0x528);
+	writel(0x9000001b, mfg_base->reg_base + 0x24);
+	writel(0x8000001b, mfg_base->reg_base + 0x24);
+}
+static void mtk_mfg_disable_hw_apm(void) {};
+#else
+static void mtk_mfg_enable_hw_apm(void) {};
+static void mtk_mfg_disable_hw_apm(void) {};
+#endif /* MTK_USE_HW_APM */
+
+static void mtk_mfg_enable_clock(void)
+{
+	int i;
+	struct mtk_mfg_base *mfg_base = GET_MTK_MFG_BASE(sPVRLDMDev);
+
+	/*
+	** Hold wakelock when mfg power-on, prevent suspend when gpu active.
+	** When enter system suspend flow, forbbiden power domain control.
+	** If power domain can control, async/2d/mfg has sequence issue.
+	*/
+	pm_stay_awake(&mfg_base->mfg_async_pdev->dev);
+
+	/* Resume mfg power domain */
+	pm_runtime_get_sync(&mfg_base->mfg_async_pdev->dev);
+	pm_runtime_get_sync(&mfg_base->mfg_2d_pdev->dev);
+#if !defined(MTK_USE_HW_APM)
+	pm_runtime_get_sync(&mfg_base->pdev->dev);
+#endif
+
+	/* Prepare and enable mfg top clock */
+	for (i = 0; i < MAX_TOP_MFG_CLK; i++)
+		MTKCLK_prepare_enable(mfg_base->top_clk[i]);
+
+	/* Enable(un-gated) mfg clock */
+	mtk_mfg_clr_clock_gating(mfg_base->reg_base);
+}
+
+static void mtk_mfg_disable_clock(void)
+{
+	int i;
+	struct mtk_mfg_base *mfg_base = GET_MTK_MFG_BASE(sPVRLDMDev);
+
+	/* Disable(gated) mfg clock */
+	mtk_mfg_set_clock_gating(mfg_base->reg_base);
+
+	/* Disable and unprepare mfg top clock */
+	for (i = MAX_TOP_MFG_CLK - 1; i >= 0; i--)
+		MTKCLK_disable_unprepare(mfg_base->top_clk[i]);
+
+	/* Suspend mfg power domain */
+#if !defined(MTK_USE_HW_APM)
+	pm_runtime_put_sync(&mfg_base->pdev->dev);
+#endif
+	pm_runtime_put_sync(&mfg_base->mfg_2d_pdev->dev);
+	pm_runtime_put_sync(&mfg_base->mfg_async_pdev->dev);
+
+	/* Release wakelock when mfg power-off */
+	pm_relax(&mfg_base->mfg_async_pdev->dev);
+}
+
+static int mfg_notify_handler(struct notifier_block *this, unsigned long code,
+			      void *unused)
+{
+	struct mtk_mfg_base *mfg_base = container_of(this,
+						     typeof(*mfg_base),
+						     mfg_notifier);
+	if ((code != SYS_RESTART) && (code != SYS_POWER_OFF))
+		return 0;
+
+	pr_info("PVR_K: shutdown notified, code=%x\n", (unsigned int)code);
+
+	mutex_lock(&mfg_base->set_power_state);
+
+	/* the workaround code, because it seems that GPU have un-finished commands */
+	mtk_mfg_enable_clock();
+	mtk_mfg_enable_hw_apm();
+
+	mfg_base->shutdown = true;
+
+	mutex_unlock(&mfg_base->set_power_state);
+
+	return 0;
+}
+
+static void MTKEnableMfgClock(void)
+{
+	struct mtk_mfg_base *mfg_base = GET_MTK_MFG_BASE(sPVRLDMDev);
+
+	mutex_lock(&mfg_base->set_power_state);
+
+	if (!mfg_base->shutdown) {
+		mtk_mfg_enable_clock();
+		mtk_mfg_enable_hw_apm();
+	}
+
+	mutex_unlock(&mfg_base->set_power_state);
+}
+
+static void MTKDisableMfgClock(void)
+{
+	struct mtk_mfg_base *mfg_base = GET_MTK_MFG_BASE(sPVRLDMDev);
+
+	mutex_lock(&mfg_base->set_power_state);
+
+	if (!mfg_base->shutdown) {
+		mtk_mfg_disable_hw_apm();
+		mtk_mfg_disable_clock();
+	}
+
+	mutex_unlock(&mfg_base->set_power_state);
+}
+
+#ifdef MTK_CAL_POWER_INDEX
+static IMG_UINT32 MTKGetRGXDevIdx(void)
+{
+	static IMG_UINT32 ms_ui32RGXDevIdx = MTK_RGX_DEVICE_INDEX_INVALID;
+
+	if (ms_ui32RGXDevIdx == MTK_RGX_DEVICE_INDEX_INVALID) {
+		PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+		IMG_UINT32 i;
+
+		for (i = 0; i < psPVRSRVData->ui32RegisteredDevices; i++) {
+			PVRSRV_DEVICE_NODE *psDeviceNode = &psPVRSRVData->psDeviceNodeList[i];
+
+			if (psDeviceNode && psDeviceNode->psDevConfig) {
+				ms_ui32RGXDevIdx = i;
+				break;
+			}
+		}
+	}
+
+	return ms_ui32RGXDevIdx;
+}
+
+static void MTKStartPowerIndex(void)
+{
+	if (!g_pvRegsBaseKM) {
+		PVRSRV_DEVICE_NODE *psDevNode = MTKGetRGXDevNode();
+
+		if (psDevNode) {
+			PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+
+			if (psDevInfo)
+				g_pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
+		}
+	}
+
+	if (g_pvRegsBaseKM)
+		DRV_WriteReg32(g_pvRegsBaseKM + (uintptr_t)0x6320, 0x1);
+}
+
+static void MTKReStartPowerIndex(void)
+{
+	if (g_pvRegsBaseKM)
+		DRV_WriteReg32(g_pvRegsBaseKM + (uintptr_t)0x6320, 0x1);
+}
+
+static void MTKStopPowerIndex(void)
+{
+	if (g_pvRegsBaseKM)
+		DRV_WriteReg32(g_pvRegsBaseKM + (uintptr_t)0x6320, 0x0);
+}
+
+static IMG_UINT32 MTKCalPowerIndex(void)
+{
+	IMG_UINT32 ui32State, ui32Result;
+	PVRSRV_DEV_POWER_STATE  ePowerState;
+	IMG_BOOL bTimeout;
+	IMG_UINT32 u32Deadline;
+	IMG_PVOID pvGPIO_REG = g_pvRegsKM + (uintptr_t)MTK_GPIO_REG_OFFSET;
+	IMG_PVOID pvPOWER_ESTIMATE_RESULT = g_pvRegsBaseKM + (uintptr_t)6328;
+
+	if ((!g_pvRegsKM) || (!g_pvRegsBaseKM))
+		return 0;
+
+	if (PVRSRVPowerLock() != PVRSRV_OK)
+		return 0;
+
+	PVRSRVGetDevicePowerState(MTKGetRGXDevIdx(), &ePowerState);
+	if (ePowerState != PVRSRV_DEV_POWER_STATE_ON) {
+		PVRSRVPowerUnlock();
+		return 0;
+	}
+
+	/* writes 1 to GPIO_INPUT_REQ, bit[0] */
+	DRV_WriteReg32(pvGPIO_REG, DRV_Reg32(pvGPIO_REG) | 0x1);
+
+	/* wait for 1 in GPIO_OUTPUT_REQ, bit[16] */
+	bTimeout = IMG_TRUE;
+	u32Deadline = OSClockus() + MTK_WAIT_FW_RESPONSE_TIMEOUT_US;
+	while (OSClockus() < u32Deadline) {
+		if (0x10000 & DRV_Reg32(pvGPIO_REG)) {
+			bTimeout = IMG_FALSE;
+			break;
+		}
+	}
+
+	/* writes 0 to GPIO_INPUT_REQ, bit[0] */
+	DRV_WriteReg32(pvGPIO_REG, DRV_Reg32(pvGPIO_REG) & (~0x1));
+	if (bTimeout) {
+		PVRSRVPowerUnlock();
+		return 0;
+	}
+
+	/* read GPIO_OUTPUT_DATA, bit[24] */
+	ui32State = DRV_Reg32(pvGPIO_REG) >> 24;
+
+	/* read POWER_ESTIMATE_RESULT */
+	ui32Result = DRV_Reg32(pvPOWER_ESTIMATE_RESULT);
+
+	/*writes 1 to GPIO_OUTPUT_ACK, bit[17]*/
+	DRV_WriteReg32(pvGPIO_REG, DRV_Reg32(pvGPIO_REG)|0x20000);
+
+	/* wait for 0 in GPIO_OUTPUT_REG, bit[16] */
+	bTimeout = IMG_TRUE;
+	u32Deadline = OSClockus() + MTK_WAIT_FW_RESPONSE_TIMEOUT_US;
+	while (OSClockus() < u32Deadline) {
+		if (!(0x10000 & DRV_Reg32(pvGPIO_REG))) {
+			bTimeout = IMG_FALSE;
+			break;
+		}
+	}
+
+	/* writes 0 to GPIO_OUTPUT_ACK, bit[17] */
+	DRV_WriteReg32(pvGPIO_REG, DRV_Reg32(pvGPIO_REG) & (~0x20000));
+	if (bTimeout) {
+		PVRSRVPowerUnlock();
+		return 0;
+	}
+
+	MTKReStartPowerIndex();
+	PVRSRVPowerUnlock();
+	return (ui32State == 1) ? ui32Result : 0;
+}
+#endif
+
+static bool MTKCheckDeviceInit(void)
+{
+	PVRSRV_DEVICE_NODE *psDevNode = MTKGetRGXDevNode();
+	bool ret = false;
+
+	if (psDevNode) {
+		if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE)
+			ret = true;
+	}
+
+	return ret;
+}
+
+PVRSRV_ERROR MTKDevPrePowerState(IMG_HANDLE hSysData, PVRSRV_DEV_POWER_STATE eNewPowerState,
+				 PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+				 IMG_BOOL bForced)
+{
+	struct mtk_mfg_base *mfg_base = GET_MTK_MFG_BASE(sPVRLDMDev);
+
+	mutex_lock(&mfg_base->set_power_state);
+
+	if ((eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) &&
+	    (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON)) {
+		if (g_bDeviceInit) {
+#ifdef MTK_CAL_POWER_INDEX
+			MTKStopPowerIndex();
+#endif
+		} else
+			g_bDeviceInit = MTKCheckDeviceInit();
+
+		mtk_mfg_disable_hw_apm();
+		mtk_mfg_disable_clock();
+	}
+
+	mutex_unlock(&mfg_base->set_power_state);
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR MTKDevPostPowerState(IMG_HANDLE hSysData, PVRSRV_DEV_POWER_STATE eNewPowerState,
+				  PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+				  IMG_BOOL bForced)
+{
+	struct mtk_mfg_base *mfg_base = GET_MTK_MFG_BASE(sPVRLDMDev);
+
+	mutex_lock(&mfg_base->set_power_state);
+
+	if ((eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) &&
+	    (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON)) {
+		mtk_mfg_enable_clock();
+		mtk_mfg_enable_hw_apm();
+
+		if (g_bDeviceInit) {
+#ifdef MTK_CAL_POWER_INDEX
+			MTKStartPowerIndex();
+#endif
+		} else
+			g_bDeviceInit = MTKCheckDeviceInit();
+
+		if (g_bUnsync == IMG_TRUE) {
+			// pedro  mt_gpufreq_target(g_ui32_unsync_freq_id);
+			g_bUnsync = IMG_FALSE;
+		}
+	}
+
+	mutex_unlock(&mfg_base->set_power_state);
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR MTKSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
+{
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR MTKSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
+{
+	return PVRSRV_OK;
+}
+
+#ifdef SUPPORT_PDVFS
+#include "rgxpdvfs.h"
+static IMG_OPP *gpasOPPTable;
+
+static int MTKMFGOppUpdate(int ui32ThrottlePoint)
+{
+	PVRSRV_DEVICE_NODE *psDevNode = MTKGetRGXDevNode();
+	int i, ui32OPPTableSize;
+
+	static RGXFWIF_PDVFS_OPP sPDFVSOppInfo;
+	static int bNotReady = 1;
+
+	if (bNotReady) {
+		ui32OPPTableSize = mt_gpufreq_get_dvfs_table_num();
+		gpasOPPTable = (IMG_OPP  *)OSAllocZMem(sizeof(IMG_OPP) * ui32OPPTableSize);
+
+		for (i = 0; i < ui32OPPTableSize; i++) {
+			gpasOPPTable[i].ui32Volt = mt_gpufreq_get_volt_by_idx(i);
+			gpasOPPTable[i].ui32Freq = mt_gpufreq_get_freq_by_idx(i) * 1000;
+		}
+
+		if (psDevNode) {
+			PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+			PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+
+			psDevConfig->sDVFS.sDVFSDeviceCfg.pasOPPTable = gpasOPPTable;
+			psDevConfig->sDVFS.sDVFSDeviceCfg.ui32OPPTableSize = ui32OPPTableSize;
+
+			/* sPDFVSOppInfo.ui32ThrottlePoint = ui32ThrottlePoint; */
+			/* PDVFSSendOPPPoints(psDevInfo, sPDFVSOppInfo); */
+			bNotReady = 0;
+
+			PVR_DPF((PVR_DBG_ERROR, "PDVFS opptab=%p size=%d init completed",
+				 psDevConfig->sDVFS.sDVFSDeviceCfg.pasOPPTable, ui32OPPTableSize));
+		} else {
+			if (gpasOPPTable)
+				OSFreeMem(gpasOPPTable);
+		}
+	}
+}
+
+static void MTKFakeGpuLoading(unsigned int *pui32Loading, unsigned int *pui32Block, unsigned int *pui32Idle)
+{
+	*pui32Loading = 0;
+	*pui32Block = 0;
+	*pui32Idle = 0;
+}
+#endif
+
+PVRSRV_ERROR MTKMFGSystemInit(void)
+{
+	/* Set the CB for ptpod use */
+	// pedro mt_gpufreq_mfgclock_notify_registerCB(MTKEnableMfgClock, MTKDisableMfgClock);
+
+#ifdef CONFIG_MTK_HIBERNATION
+	register_swsusp_restore_noirq_func(ID_M_GPU, gpu_pm_restore_noirq, NULL);
+#endif
+
+	return PVRSRV_OK;
+}
+
+void MTKMFGSystemDeInit(void)
+{
+#ifdef SUPPORT_PDVFS
+	if (gpasOPPTable)
+		OSFreeMem(gpasOPPTable);
+#endif
+
+#ifdef CONFIG_MTK_HIBERNATION
+	unregister_swsusp_restore_noirq_func(ID_M_GPU);
+#endif
+
+#ifdef MTK_CAL_POWER_INDEX
+	g_pvRegsBaseKM = NULL;
+#endif
+}
+
+static void mfg_clk_set_parent(struct mtk_mfg_base *mfg_base)
+{
+	/* mfg_slow_in_sel/mfg_axi_in_sel: non-glitch-free mux, should disable mux before set parent.
+	 * mfg_mm_in_sel: glitch-free mux, should enable mux before set parent.
+	 */
+	clk_set_parent(mfg_base->top_clk_sel[TOP_MFG_CLK_SLOW], mfg_base->top_clk_sel_parent[TOP_MFG_CLK_SLOW]);
+	clk_set_parent(mfg_base->top_clk_sel[TOP_MFG_CLK_AXI], mfg_base->top_clk_sel_parent[TOP_MFG_CLK_AXI]);
+
+	clk_prepare_enable(mfg_base->top_clk_sel[TOP_MFG_CLK_MM]);
+	clk_set_parent(mfg_base->top_clk_sel[TOP_MFG_CLK_MM], mfg_base->top_clk_sel_parent[TOP_MFG_CLK_MM]);
+	clk_disable_unprepare(mfg_base->top_clk_sel[TOP_MFG_CLK_MM]);
+}
+
+static int mtk_mfg_bind_device_resource(struct platform_device *pdev,
+				 struct mtk_mfg_base *mfg_base)
+{
+	int i, err;
+	int len_clk = sizeof(struct clk *) * MAX_TOP_MFG_CLK;
+
+	if (!sMFGASYNCDev || !sMFG2DDev) {
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get pm_domain", __func__));
+		return -EPROBE_DEFER;
+	}
+
+	mfg_base->top_clk_sel = devm_kzalloc(&pdev->dev, len_clk, GFP_KERNEL);
+	if (!mfg_base->top_clk_sel)
+		return -ENOMEM;
+
+	mfg_base->top_clk_sel_parent = devm_kzalloc(&pdev->dev, len_clk, GFP_KERNEL);
+	if (!mfg_base->top_clk_sel_parent)
+		return -ENOMEM;
+
+	mfg_base->top_clk = devm_kzalloc(&pdev->dev, len_clk, GFP_KERNEL);
+	if (!mfg_base->top_clk)
+		return -ENOMEM;
+
+	mfg_base->reg_base = of_iomap(pdev->dev.of_node, 1);
+	if (!mfg_base->reg_base) {
+		PVR_DPF((PVR_DBG_ERROR, "%s: Unable to ioremap registers pdev %p", __func__, pdev));
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < MAX_TOP_MFG_CLK; i++) {
+		mfg_base->top_clk_sel_parent[i] = devm_clk_get(&pdev->dev,
+					top_mfg_clk_sel_parent_name[i]);
+		if (IS_ERR(mfg_base->top_clk_sel_parent[i])) {
+			err = PTR_ERR(mfg_base->top_clk_sel_parent[i]);
+			PVR_DPF((PVR_DBG_ERROR, "%s: devm_clk_get %s failed", __func__,
+				top_mfg_clk_sel_parent_name[i]));
+			goto err_iounmap_reg_base;
+		}
+	}
+
+	for (i = 0; i < MAX_TOP_MFG_CLK; i++) {
+		mfg_base->top_clk_sel[i] = devm_clk_get(&pdev->dev,
+						    top_mfg_clk_sel_name[i]);
+		if (IS_ERR(mfg_base->top_clk_sel[i])) {
+			err = PTR_ERR(mfg_base->top_clk_sel[i]);
+			PVR_DPF((PVR_DBG_ERROR, "%s: devm_clk_get %s failed", __func__, top_mfg_clk_sel_name[i]));
+			goto err_iounmap_reg_base;
+		}
+	}
+
+	for (i = 0; i < MAX_TOP_MFG_CLK; i++) {
+		mfg_base->top_clk[i] = devm_clk_get(&pdev->dev,
+						    top_mfg_clk_name[i]);
+		if (IS_ERR(mfg_base->top_clk[i])) {
+			err = PTR_ERR(mfg_base->top_clk[i]);
+			PVR_DPF((PVR_DBG_ERROR, "%s: devm_clk_get %s failed", __func__, top_mfg_clk_name[i]));
+			goto err_iounmap_reg_base;
+		}
+	}
+
+	mfg_clk_set_parent(mfg_base);
+
+	mfg_base->mfg_2d_pdev = sMFG2DDev;
+	mfg_base->mfg_async_pdev = sMFGASYNCDev;
+
+	mfg_base->mfg_notifier.notifier_call = mfg_notify_handler;
+	register_reboot_notifier(&mfg_base->mfg_notifier);
+
+	pm_runtime_enable(&pdev->dev);
+
+	mfg_base->pdev = pdev;
+	return 0;
+
+err_iounmap_reg_base:
+	iounmap(mfg_base->reg_base);
+	return err;
+}
+
+int MTKRGXDeviceInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	struct platform_device *pdev;
+	struct mtk_mfg_base *mfg_base;
+	int err;
+
+	pdev = to_platform_device((struct device *)psDevConfig->pvOSDevice);
+
+	sPVRLDMDev = pdev;
+	mfg_base = devm_kzalloc(&pdev->dev, sizeof(*mfg_base), GFP_KERNEL);
+	if (!mfg_base)
+		return -ENOMEM;
+
+	err = mtk_mfg_bind_device_resource(pdev, mfg_base);
+	if (err != 0)
+		return err;
+
+	mutex_init(&mfg_base->set_power_state);
+	pdev->dev.platform_data = mfg_base;
+
+	bCoreinitSucceeded = IMG_TRUE;
+	return 0;
+}
+
+int MTKRGXDeviceDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	return 0;
+}
+
+void MTKDisablePowerDomain(void)
+{
+	if (sMFG2DDev)
+		pm_runtime_put_sync(&sMFG2DDev->dev);
+
+	if (sMFGASYNCDev)
+		pm_runtime_put_sync(&sMFGASYNCDev->dev);
+}
+
+bool mt_gpucore_ready(void)
+{
+	return (bCoreinitSucceeded == IMG_TRUE);
+}
+EXPORT_SYMBOL(mt_gpucore_ready);
+
+module_param(gpu_debug_enable, uint, 0644);
+
+
+static int mtk_mfg_async_probe(struct platform_device *pdev)
+{
+#ifdef MTK_DEBUG
+	pr_info("mtk_mfg_async_probe\n");
+#endif
+
+	if (!pdev->dev.pm_domain) {
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dev->pm_domain", __func__));
+		return -EPROBE_DEFER;
+	}
+
+	sMFGASYNCDev = pdev;
+	pm_runtime_enable(&pdev->dev);
+
+	/* Use async power domain as a system suspend indicator. */
+	device_init_wakeup(&pdev->dev, true);
+	return 0;
+}
+
+static int mtk_mfg_async_remove(struct platform_device *pdev)
+{
+	device_init_wakeup(&pdev->dev, false);
+	pm_runtime_disable(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id mtk_mfg_async_of_ids[] = {
+	{ .compatible = "mediatek,mt8167-mfg-async",},
+	{}
+};
+
+static struct platform_driver mtk_mfg_async_driver = {
+	.probe  = mtk_mfg_async_probe,
+	.remove = mtk_mfg_async_remove,
+	.driver = {
+		.name = "mfg-async",
+		.of_match_table = mtk_mfg_async_of_ids,
+	}
+};
+
+#if defined(MODULE)
+int mtk_mfg_async_init(void)
+#else
+static int __init mtk_mfg_async_init(void)
+#endif
+{
+	int ret;
+
+	ret = platform_driver_register(&mtk_mfg_async_driver);
+	if (ret != 0) {
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to register mfg driver", __func__));
+		return ret;
+	}
+
+	if (sMFGASYNCDev)
+		pm_runtime_get_sync(&sMFGASYNCDev->dev);
+	else
+		PVR_DPF((PVR_DBG_ERROR, "%s: Enable power domain failed", __func__));
+
+	return ret;
+}
+
+static int mtk_mfg_2d_probe(struct platform_device *pdev)
+{
+#ifdef MTK_DEBUG
+	pr_info("mtk_mfg_2d_probe\n");
+#endif
+
+	if (!pdev->dev.pm_domain) {
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dev->pm_domain", __func__));
+		return -EPROBE_DEFER;
+	}
+
+	sMFG2DDev = pdev;
+	pm_runtime_enable(&pdev->dev);
+	return 0;
+}
+
+static int mtk_mfg_2d_remove(struct platform_device *pdev)
+{
+	pm_runtime_disable(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id mtk_mfg_2d_of_ids[] = {
+	{ .compatible = "mediatek,mt8167-mfg-2d",},
+	{}
+};
+
+static struct platform_driver mtk_mfg_2d_driver = {
+	.probe  = mtk_mfg_2d_probe,
+	.remove = mtk_mfg_2d_remove,
+	.driver = {
+		.name = "mfg-2d",
+		.of_match_table = mtk_mfg_2d_of_ids,
+	}
+};
+
+#if defined(MODULE)
+int mtk_mfg_2d_init(void)
+#else
+static int __init mtk_mfg_2d_init(void)
+#endif
+{
+	int ret;
+
+	ret = platform_driver_register(&mtk_mfg_2d_driver);
+	if (ret != 0) {
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to register mfg driver", __func__));
+		return ret;
+	}
+
+	if (sMFG2DDev)
+		pm_runtime_get_sync(&sMFG2DDev->dev);
+	else
+		PVR_DPF((PVR_DBG_ERROR, "%s: Enable power domain failed", __func__));
+
+	return ret;
+}
+
+#ifndef MODULE
+subsys_initcall(mtk_mfg_async_init);
+subsys_initcall(mtk_mfg_2d_init);
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mtk_mfgsys.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mtk_mfgsys.h
new file mode 100644
index 0000000..ab7de36
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/mtk_mfgsys.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2015 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MTK_MFGSYS_H
+#define MTK_MFGSYS_H
+
+#include "servicesext.h"
+#include "rgxdevice.h"
+#include <linux/regulator/consumer.h>
+
+/* Control SW APM is enabled or not  */
+#ifndef MTK_BRINGUP
+#define MTK_PM_SUPPORT 1
+#else
+#define MTK_PM_SUPPORT 0
+#endif
+
+struct mtk_mfg_base {
+	struct platform_device *pdev;
+	struct platform_device *mfg_2d_pdev;
+	struct platform_device *mfg_async_pdev;
+
+	struct clk **top_clk_sel;
+	struct clk **top_clk_sel_parent;
+	struct clk **top_clk;
+	void __iomem *reg_base;
+
+	/* mutex protect for set power state */
+	struct mutex set_power_state;
+	bool shutdown;
+	struct notifier_block mfg_notifier;
+};
+
+PVRSRV_ERROR MTKMFGSystemInit(void);
+void MTKMFGSystemDeInit(void);
+void MTKDisablePowerDomain(void);
+void MTKFWDump(void);
+
+/* below register interface in RGX sysconfig.c */
+PVRSRV_ERROR MTKDevPrePowerState(IMG_HANDLE hSysData, PVRSRV_DEV_POWER_STATE eNewPowerState,
+				 PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+				 IMG_BOOL bForced);
+
+PVRSRV_ERROR MTKDevPostPowerState(IMG_HANDLE hSysData, PVRSRV_DEV_POWER_STATE eNewPowerState,
+				  PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+				  IMG_BOOL bForced);
+
+PVRSRV_ERROR MTKSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState);
+
+PVRSRV_ERROR MTKSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState);
+
+int MTKRGXDeviceInit(PVRSRV_DEVICE_CONFIG *psDevConfig);
+int MTKRGXDeviceDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+#ifdef CONFIG_MTK_HIBERNATION
+extern void mt_irq_set_sens(unsigned int irq, unsigned int sens);
+extern void mt_irq_set_polarity(unsigned int irq, unsigned int polarity);
+int gpu_pm_restore_noirq(struct device *device);
+#endif
+
+#ifdef SUPPORT_PDVFS
+extern unsigned int mt_gpufreq_get_volt_by_idx(unsigned int idx);
+#endif
+
+#if defined(MODULE)
+int mtk_mfg_async_init(void);
+int mtk_mfg_2d_init(void);
+#endif
+
+#endif
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/opaque_types.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/opaque_types.h
new file mode 100644
index 0000000..766bc22
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/opaque_types.h
@@ -0,0 +1,56 @@
+/*************************************************************************/ /*!
+@File
+@Title          Opaque Types
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines opaque types for various services types
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef SERVICES_OPAQUE_TYPES_H
+#define SERVICES_OPAQUE_TYPES_H
+
+#include "img_defs.h"
+#include "img_types.h"
+
+typedef struct _PVRSRV_DEVICE_NODE_ *PPVRSRV_DEVICE_NODE;
+typedef const struct _PVRSRV_DEVICE_NODE_ *PCPVRSRV_DEVICE_NODE;
+
+#endif /* SERVICES_OPAQUE_TYPES_H */
+
+/******************************************************************************
+ End of file (opaque_types.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/os_cpu_cache.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/os_cpu_cache.h
new file mode 100644
index 0000000..1c47632
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/os_cpu_cache.h
@@ -0,0 +1,73 @@
+/*************************************************************************/ /*!
+@File
+@Title          OS and CPU d-cache maintenance mechanisms
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines for cache management which are visible internally only
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _OS_CPU_CACHE_H_
+#define _OS_CPU_CACHE_H_
+
+#include "info_page_defs.h"
+
+#define PVRSRV_CACHE_OP_GLOBAL				0x4 /*!< Extends cache_ops.h with explicit global flush w/ invalidate */
+#define PVRSRV_CACHE_OP_TIMELINE			0x8 /*!< Request SW_SYNC timeline notification when executed */
+
+#define CACHEFLUSH_ISA_X86					0x1	/*!< x86/x64 specific UM range-based cache flush */
+#define CACHEFLUSH_ISA_ARM64				0x2	/*!< Aarch64 specific UM range-based cache flush */
+#define CACHEFLUSH_ISA_GENERIC				0x3	/*!< Other ISA's without UM range-based cache flush */
+#ifndef CACHEFLUSH_ISA_TYPE
+	#if defined(__i386__) || defined(__x86_64__)
+		#define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_X86
+	#elif defined(__arm64__) || defined(__aarch64__)
+		#define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_ARM64
+	#else
+		#define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_GENERIC
+	#endif
+#endif
+
+#if (CACHEFLUSH_ISA_TYPE == CACHEFLUSH_ISA_X86) || (CACHEFLUSH_ISA_TYPE == CACHEFLUSH_ISA_ARM64)
+#define CACHEFLUSH_ISA_SUPPORTS_UM_FLUSH		/*!< x86/x86_64/ARM64 supports user-mode d-cache flush */
+#endif
+
+#if !defined(__mips__)
+#define CACHEFLUSH_ISA_SUPPORTS_GLOBAL_FLUSH	/*!< MIPS32/64 has no concept of a global d-cache flush */
+#endif
+
+#endif	/* _OS_CPU_CACHE_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/os_srvinit_param.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/os_srvinit_param.h
new file mode 100644
index 0000000..d5b8e0a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/os_srvinit_param.h
@@ -0,0 +1,322 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services initialisation parameters header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services initialisation parameter support for the Linux kernel.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef OS_SRVINIT_PARAM_H
+#define OS_SRVINIT_PARAM_H
+
+#if defined(LINUX) && defined(__KERNEL__)
+#include "km_apphint.h"
+#include "km_apphint_defs.h"
+
+#define SrvInitParamOpen() NULL
+#define SrvInitParamClose(pvState) ((void)(pvState))
+
+#define	SrvInitParamGetBOOL(state, name, value) \
+	((void) pvr_apphint_get_bool(APPHINT_ID_ ## name, &value))
+
+#define	SrvInitParamGetUINT32(state, name, value) \
+	((void) pvr_apphint_get_uint32(APPHINT_ID_ ## name, &value))
+
+#define	SrvInitParamGetUINT64(state, name, value) \
+	((void) pvr_apphint_get_uint64(APPHINT_ID_ ## name, &value))
+
+#define SrvInitParamGetSTRING(state, name, buffer, size) \
+	((void) pvr_apphint_get_string(APPHINT_ID_ ## name, buffer, size))
+
+#define	SrvInitParamGetUINT32BitField(state, name, value) \
+	((void) pvr_apphint_get_uint32(APPHINT_ID_ ## name, &value))
+
+#define	SrvInitParamGetUINT32List(state, name, value) \
+	((void) pvr_apphint_get_uint32(APPHINT_ID_ ## name, &value))
+
+#else	/* defined(LINUX) && defined(__KERNEL__) */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+#include "img_types.h"
+
+/*! Lookup item. */
+typedef struct
+{
+	const IMG_CHAR *pszValue;       /*!< looked up name */
+	IMG_UINT32 ui32Value;           /*!< looked up value */
+} SRV_INIT_PARAM_UINT32_LOOKUP;
+
+/*************************************************************************/ /*!
+@Brief          SrvInitParamOpen
+
+@Description    Establish a connection to the Parameter resource store which is
+                used to hold configuration information associated with the
+                server instance.
+
+@Return         (void *) Handle to Parameter resource store to be used for
+                subsequent parameter value queries
+
+*/ /**************************************************************************/
+void *SrvInitParamOpen(void);
+
+/*************************************************************************/ /*!
+@Brief          SrvInitParamClose
+
+@Description    Remove a pre-existing connection to the Parameter resource store
+                given by 'pvState' and release any temporary storage associated
+                with the 'pvState' mapping handle
+
+@Input          pvState             Handle to Parameter resource store
+
+*/ /**************************************************************************/
+void SrvInitParamClose(void *pvState);
+
+/*************************************************************************/ /*!
+@Brief          _SrvInitParamGetBOOL
+
+@Description    Get the current BOOL value for parameter 'pszName' from the
+                Parameter resource store attached to 'pvState'
+
+@Input          pvState             Handle to Parameter resource store
+
+@Input          pszName             Name of parameter to look-up
+
+@Input          pbDefault           Value to return if parameter not found
+
+@Output         pbValue             Value of parameter 'pszName' or 'pbDefault'
+                                    if not found
+
+*/ /**************************************************************************/
+void _SrvInitParamGetBOOL(
+	void *pvState,
+	const IMG_CHAR *pszName,
+	const IMG_BOOL *pbDefault,
+	IMG_BOOL *pbValue
+);
+
+/*! Get the BOOL value for parameter 'name' from the parameter resource store
+ *  attached to 'state'. */
+#define	SrvInitParamGetBOOL(state, name, value) \
+		_SrvInitParamGetBOOL(state, # name, & __SrvInitParam_ ## name, &(value))
+
+/*! Initialise FLAG type parameter identified by 'name'. */
+#define	SrvInitParamInitFLAG(name, defval, dummy) \
+	static const IMG_BOOL __SrvInitParam_ ## name = defval;
+
+/*! Initialise BOOL type parameter identified by 'name'. */
+#define	SrvInitParamInitBOOL(name, defval, dummy) \
+	static const IMG_BOOL __SrvInitParam_ ## name = defval;
+
+/*************************************************************************/ /*!
+@Brief          _SrvInitParamGetUINT32
+
+@Description    Get the current IMG_UINT32 value for parameter 'pszName'
+                from the Parameter resource store attached to 'pvState'
+
+@Input          pvState             Handle to Parameter resource store
+
+@Input          pszName             Name of parameter to look-up
+
+@Input          pui32Default        Value to return if parameter not found
+
+@Output         pui32Value            Value of parameter 'pszName' or
+                                    'pui32Default' if not found
+
+*/ /**************************************************************************/
+void _SrvInitParamGetUINT32(
+	void *pvState,
+	const IMG_CHAR *pszName,
+	const IMG_UINT32 *pui32Default,
+	IMG_UINT32 *pui32Value
+);
+
+/*! Get the UINT32 value for parameter 'name' from the parameter resource store
+ *  attached to 'state'. */
+#define	SrvInitParamGetUINT32(state, name, value) \
+		_SrvInitParamGetUINT32(state, # name, & __SrvInitParam_ ## name, &(value))
+
+/*! Initialise UINT32 type parameter identified by 'name'. */
+#define	SrvInitParamInitUINT32(name, defval, dummy) \
+	static const IMG_UINT32 __SrvInitParam_ ## name = defval;
+
+/*! Initialise UINT64 type parameter identified by 'name'. */
+#define	SrvInitParamInitUINT64(name, defval, dummy) \
+	static const IMG_UINT64 __SrvInitParam_ ## name = defval;
+
+/*! @cond Doxygen_Suppress */
+#define	SrvInitParamUnreferenced(name) \
+		PVR_UNREFERENCED_PARAMETER( __SrvInitParam_ ## name )
+/*! @endcond */
+
+/*************************************************************************/ /*!
+@Brief          _SrvInitParamGetUINT32BitField
+
+@Description    Get the current IMG_UINT32 bitfield value for parameter
+                'pszBasename' from the Parameter resource store
+                attached to 'pvState'
+
+@Input          pvState             Handle to Parameter resource store
+
+@Input          pszBaseName         Bitfield parameter name to search for
+
+@Input          uiDefault           Default return value if parameter not found
+
+@Input          psLookup            Bitfield array to traverse
+
+@Input          uiSize              number of elements in 'psLookup'
+
+@Output         puiValue            Value of bitfield or 'uiDefault' if
+                                    parameter not found
+*/ /**************************************************************************/
+void _SrvInitParamGetUINT32BitField(
+	void *pvState,
+	const IMG_CHAR *pszBaseName,
+	IMG_UINT32 uiDefault,
+	const SRV_INIT_PARAM_UINT32_LOOKUP *psLookup,
+	IMG_UINT32 uiSize,
+	IMG_UINT32 *puiValue
+);
+
+/*! Initialise UINT32 bitfield type parameter identified by 'name' with
+ *  'inival' value and 'lookup' look up array. */
+#define	SrvInitParamInitUINT32Bitfield(name, inival, lookup) \
+	static IMG_UINT32 __SrvInitParam_ ## name = inival; \
+	static SRV_INIT_PARAM_UINT32_LOOKUP * \
+		__SrvInitParamLookup_ ## name = &lookup[0]; \
+	static const IMG_UINT32 __SrvInitParamSize_ ## name = \
+					ARRAY_SIZE(lookup);
+
+/*! Get the UINT32 bitfield value for parameter 'name' from the parameter
+ *  resource store attached to 'state'. */
+#define	SrvInitParamGetUINT32BitField(state, name, value) \
+		_SrvInitParamGetUINT32BitField(state, # name, __SrvInitParam_ ## name, __SrvInitParamLookup_ ## name, __SrvInitParamSize_ ## name, &(value))
+
+/*************************************************************************/ /*!
+@Brief          _SrvInitParamGetUINT32List
+
+@Description    Get the current IMG_UINT32 list value for the specified
+                parameter 'pszName' from the Parameter resource store
+                attached to 'pvState'
+
+@Input          pvState             Handle to Parameter resource store
+
+@Input          pszName             Parameter list name to search for
+
+@Input          uiDefault           Default value to return if 'pszName' is
+                                    not set within 'pvState'
+
+@Input          psLookup            parameter list to traverse
+
+@Input          uiSize              number of elements in 'psLookup' list
+
+@Output         puiValue            value of located list element or
+                                    'uiDefault' if parameter not found
+
+*/ /**************************************************************************/
+void _SrvInitParamGetUINT32List(
+	void *pvState,
+	const IMG_CHAR *pszName,
+	IMG_UINT32 uiDefault,
+	const SRV_INIT_PARAM_UINT32_LOOKUP *psLookup,
+	IMG_UINT32 uiSize,
+	IMG_UINT32 *puiValue
+);
+
+/*! Get the UINT32 list value for parameter 'name' from the parameter
+ *  resource store attached to 'state'. */
+#define	SrvInitParamGetUINT32List(state, name, value) \
+		_SrvInitParamGetUINT32List(state, # name, __SrvInitParam_ ## name, __SrvInitParamLookup_ ## name, __SrvInitParamSize_ ## name, &(value))
+
+/*! Initialise UINT32 list type parameter identified by 'name' with
+ *  'defval' default value and 'lookup' look up list. */
+#define	SrvInitParamInitUINT32List(name, defval, lookup) \
+	static IMG_UINT32 __SrvInitParam_ ## name = defval; \
+	static SRV_INIT_PARAM_UINT32_LOOKUP * \
+		__SrvInitParamLookup_ ## name = &lookup[0]; \
+	static const IMG_UINT32 __SrvInitParamSize_ ## name = \
+					ARRAY_SIZE(lookup);
+
+/*************************************************************************/ /*!
+@Brief          _SrvInitParamGetSTRING
+
+@Description    Get the contents of the specified parameter string 'pszName'
+                from the Parameter resource store attached to 'pvState'
+
+@Input          pvState             Handle to Parameter resource store
+
+@Input          pszName             Parameter string name to search for
+
+@Input          psDefault           Default string to return if 'pszName' is
+                                    not set within 'pvState'
+
+@Input          size                Size of output 'pBuffer'
+
+@Output         pBuffer             Output copy of 'pszName' contents or
+                                    copy of 'psDefault' if 'pszName' is not
+                                    set within 'pvState'
+
+*/ /**************************************************************************/
+void _SrvInitParamGetSTRING(
+	void *pvState,
+	const IMG_CHAR *pszName,
+	const IMG_CHAR **psDefault,
+	IMG_CHAR *pBuffer,
+	size_t size
+);
+
+/*! Initialise STRING type parameter identified by 'name' with 'defval' default
+ *  value. */
+#define	SrvInitParamInitSTRING(name, defval, dummy) \
+	static const IMG_CHAR *__SrvInitParam_ ## name = defval;
+
+/*! Get the STRING value for parameter 'name' from the parameter resource store
+ *  attached to 'state'. */
+#define	SrvInitParamGetSTRING(state, name, buffer, size) \
+		_SrvInitParamGetSTRING(state, # name, & __SrvInitParam_ ## name, buffer, size)
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif	/* defined(LINUX) && defined(__KERNEL__) */
+
+#endif /* OS_SRVINIT_PARAM_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osconnection_server.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osconnection_server.c
new file mode 100644
index 0000000..6f567ed
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osconnection_server.c
@@ -0,0 +1,155 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux specific per process data functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+
+#include "connection_server.h"
+#include "osconnection_server.h"
+
+#include "env_connection.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+
+#include <linux/sched.h>
+
+#if defined (SUPPORT_ION)
+#include <linux/err.h>
+#include PVR_ANDROID_ION_HEADER
+
+/*
+	The ion device (the base object for all requests)
+	gets created by the system and we acquire it via
+	linux specific functions provided by the system layer
+*/
+#include "ion_sys.h"
+#endif
+
+PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData)
+{
+	ENV_CONNECTION_PRIVATE_DATA *psPrivData = pvOSData;
+	ENV_CONNECTION_DATA *psEnvConnection;
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+	ENV_ION_CONNECTION_DATA *psIonConnection;
+#endif
+
+	*phOsPrivateData = OSAllocZMem(sizeof(ENV_CONNECTION_DATA));
+
+	if (*phOsPrivateData == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __func__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psEnvConnection = (ENV_CONNECTION_DATA *)*phOsPrivateData;
+
+	psEnvConnection->owner = current->tgid;
+
+	/* Save the pointer to our struct file */
+	psEnvConnection->psFile = psPrivData->psFile;
+	psEnvConnection->psDevNode = psPrivData->psDevNode;
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+	psIonConnection = (ENV_ION_CONNECTION_DATA *)OSAllocZMem(sizeof(ENV_ION_CONNECTION_DATA));
+	if (psIonConnection == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __func__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psEnvConnection->psIonData = psIonConnection;
+	/*
+		We can have more then one connection per process so we need more then
+		the PID to have a unique name
+	*/
+	psEnvConnection->psIonData->psIonDev = IonDevAcquire();
+	OSSNPrintf(psEnvConnection->psIonData->azIonClientName, ION_CLIENT_NAME_SIZE, "pvr_ion_client-%p-%d", *phOsPrivateData, OSGetCurrentClientProcessIDKM());
+	psEnvConnection->psIonData->psIonClient =
+		ion_client_create(psEnvConnection->psIonData->psIonDev,
+						  psEnvConnection->psIonData->azIonClientName);
+
+	if (IS_ERR_OR_NULL(psEnvConnection->psIonData->psIonClient))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSConnectionPrivateDataInit: Couldn't create "
+								"ion client for per connection data"));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+#endif /* SUPPORT_ION && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
+{
+	ENV_CONNECTION_DATA *psEnvConnection;
+
+	if (hOsPrivateData == NULL)
+	{
+		return PVRSRV_OK;
+	}
+
+	psEnvConnection = hOsPrivateData;
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+	PVR_ASSERT(psEnvConnection->psIonData != NULL);
+
+	PVR_ASSERT(psEnvConnection->psIonData->psIonClient != NULL);
+	ion_client_destroy(psEnvConnection->psIonData->psIonClient);
+
+	IonDevRelease(psEnvConnection->psIonData->psIonDev);
+	OSFreeMem(psEnvConnection->psIonData);
+#endif
+
+	OSFreeMem(hOsPrivateData);
+	/*not nulling pointer, copy on stack*/
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_DEVICE_NODE *OSGetDevData(CONNECTION_DATA *psConnection)
+{
+	ENV_CONNECTION_DATA *psEnvConnection;
+
+	psEnvConnection = PVRSRVConnectionPrivateData(psConnection);
+	PVR_ASSERT(psEnvConnection);
+
+	return psEnvConnection->psDevNode;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osconnection_server.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osconnection_server.h
new file mode 100644
index 0000000..456157b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osconnection_server.h
@@ -0,0 +1,121 @@
+/**************************************************************************/ /*!
+@File
+@Title          Server side connection management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    API for OS specific callbacks from server side connection
+                management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#ifndef _OSCONNECTION_SERVER_H_
+#define _OSCONNECTION_SERVER_H_
+
+#include "handle.h"
+#include "osfunc.h"
+
+
+#if defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS)
+PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData);
+PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData);
+
+PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase);
+
+PVRSRV_DEVICE_NODE* OSGetDevData(CONNECTION_DATA *psConnection);
+
+#else	/* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSConnectionPrivateDataInit)
+#endif
+/*************************************************************************/ /*!
+@Function       OSConnectionPrivateDataInit
+@Description    Allocates and initialises any OS-specific private data
+                relating to a connection.
+                Called from PVRSRVConnectionConnect().
+@Input          pvOSData            pointer to any OS private data
+@Output         phOsPrivateData     handle to the created connection
+                                    private data
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData)
+{
+	PVR_UNREFERENCED_PARAMETER(phOsPrivateData);
+	PVR_UNREFERENCED_PARAMETER(pvOSData);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSConnectionPrivateDataDeInit)
+#endif
+/*************************************************************************/ /*!
+@Function       OSConnectionPrivateDataDeInit
+@Description    Frees previously allocated OS-specific private data
+                relating to a connection.
+@Input          hOsPrivateData      handle to the connection private data
+                                    to be freed
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
+{
+	PVR_UNREFERENCED_PARAMETER(hOsPrivateData);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSConnectionSetHandleOptions)
+#endif
+static INLINE PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
+{
+	PVR_UNREFERENCED_PARAMETER(psHandleBase);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSGetDevData)
+#endif
+static INLINE PVRSRV_DEVICE_NODE* OSGetDevData(CONNECTION_DATA *psConnection)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	return NULL;
+}
+#endif	/* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */
+
+
+#endif /* _OSCONNECTION_SERVER_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc.c
new file mode 100644
index 0000000..6bc652b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc.c
@@ -0,0 +1,1906 @@
+/*************************************************************************/ /*!
+@File
+@Title          Environment related functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#include <asm/page.h>
+#include <asm/div64.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+#include <linux/hugetlb.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/genalloc.h>
+#include <linux/string.h>
+#include <linux/freezer.h>
+#include <asm/hardirq.h>
+#include <asm/tlbflush.h>
+#include <linux/timer.h>
+#include <linux/capability.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/utsname.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+#include <linux/pfn_t.h>
+#include <linux/pfn.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+#include <linux/sched/clock.h>
+#include <linux/sched/signal.h>
+#else
+#include <linux/sched.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) */
+
+#include "log2.h"
+#include "osfunc.h"
+#include "cache_km.h"
+#include "img_defs.h"
+#include "img_types.h"
+#include "allocmem.h"
+#include "devicemem_server_utils.h"
+#include "pvr_debugfs.h"
+#include "event.h"
+#include "linkage.h"
+#include "pvr_uaccess.h"
+#include "pvr_debug.h"
+#include "pvr_bridge_k.h"
+#include "pvrsrv_memallocflags.h"
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+#include "physmem_osmem_linux.h"
+#include "dma_support.h"
+#include "kernel_compatibility.h"
+
+#if defined(VIRTUAL_PLATFORM)
+#define EVENT_OBJECT_TIMEOUT_US		(120000000ULL)
+#else
+#if defined(EMULATOR) || defined(TC_APOLLO_TCF5)
+#define EVENT_OBJECT_TIMEOUT_US		(2000000ULL)
+#else
+#define EVENT_OBJECT_TIMEOUT_US		(100000ULL)
+#endif /* EMULATOR */
+#endif
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+/*
+ * Main driver lock, used to ensure driver code is single threaded. There are
+ * some places where this lock must not be taken, such as in the mmap related
+ * driver entry points.
+ */
+static DEFINE_MUTEX(gPVRSRVLock);
+
+static void *g_pvBridgeBuffers;
+
+struct task_struct *BridgeLockGetOwner(void);
+IMG_BOOL BridgeLockIsLocked(void);
+#endif
+
+typedef struct {
+	struct task_struct *kthread;
+	PFN_THREAD pfnThread;
+	void *hData;
+	IMG_CHAR *pszThreadName;
+	IMG_BOOL   bIsThreadRunning;
+	IMG_BOOL   bIsSupportingThread;
+	PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB;
+	DLLIST_NODE sNode;
+} OSThreadData;
+
+static DLLIST_NODE gsThreadListHead;
+
+static void _ThreadListAddEntry(OSThreadData *psThreadListNode)
+{
+	dllist_add_to_tail(&gsThreadListHead, &(psThreadListNode->sNode));
+}
+
+static void _ThreadListRemoveEntry(OSThreadData *psThreadListNode)
+{
+	dllist_remove_node(&(psThreadListNode->sNode));
+}
+
+static void _ThreadSetStopped(OSThreadData *psOSThreadData)
+{
+	psOSThreadData->bIsThreadRunning = IMG_FALSE;
+}
+
+static void _OSInitThreadList(void)
+{
+	dllist_init(&gsThreadListHead);
+}
+
+void OSThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf,
+                      void *pvDumpDebugFile)
+{
+	PDLLIST_NODE psNodeCurr, psNodeNext;
+
+	dllist_foreach_node(&gsThreadListHead, psNodeCurr, psNodeNext)
+	{
+		OSThreadData *psThreadListNode;
+		psThreadListNode = IMG_CONTAINER_OF(psNodeCurr, OSThreadData, sNode);
+
+		PVR_DUMPDEBUG_LOG("  %s : %s",
+				  psThreadListNode->pszThreadName,
+				  (psThreadListNode->bIsThreadRunning) ? "Running" : "Stopped");
+
+		if (psThreadListNode->pfnDebugDumpCB)
+		{
+			psThreadListNode->pfnDebugDumpCB(pfnDumpDebugPrintf, pvDumpDebugFile);
+		}
+	}
+}
+
+PVRSRV_ERROR OSPhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize,
+							PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr)
+{
+	struct device *psDev = psDevNode->psDevConfig->pvOSDevice;
+	IMG_CPU_PHYADDR sCpuPAddr;
+	struct page *psPage;
+	IMG_UINT32	ui32Order=0;
+	gfp_t gfp_flags;
+
+	PVR_ASSERT(uiSize != 0);
+	/*Align the size to the page granularity */
+	uiSize = PAGE_ALIGN(uiSize);
+
+	/*Get the order to be used with the allocation */
+	ui32Order = get_order(uiSize);
+
+	gfp_flags = GFP_KERNEL;
+
+#if !defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY)
+	if (psDev)
+	{
+		if (*psDev->dma_mask == DMA_BIT_MASK(32))
+		{
+			/* Limit to 32 bit.
+			 * Achieved by setting __GFP_DMA32 for 64 bit systems */
+			gfp_flags |= __GFP_DMA32;
+		}
+		else if (*psDev->dma_mask < DMA_BIT_MASK(32))
+		{
+			/* Limit to whatever the size of DMA zone is. */
+			gfp_flags |= __GFP_DMA;
+		}
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(psDev);
+#endif
+
+	/*allocate the pages */
+	psPage = alloc_pages(gfp_flags, ui32Order);
+	if (psPage == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	uiSize = (1 << ui32Order) * PAGE_SIZE;
+
+	psMemHandle->u.pvHandle = psPage;
+	psMemHandle->ui32Order = ui32Order;
+	sCpuPAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(page_to_phys(psPage));
+
+	/*
+	 * Even when more pages are allocated as base MMU object we still need one single physical address because
+	 * they are physically contiguous.
+	 */
+	PhysHeapCpuPAddrToDevPAddr(psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL], 1, psDevPAddr, &sCpuPAddr);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,
+										uiSize,
+										(IMG_UINT64)(uintptr_t) psPage,
+										OSGetCurrentClientProcessIDKM());
+#else
+	PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,
+	                             psPage,
+								 sCpuPAddr,
+								 uiSize,
+								 NULL,
+								 OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+
+	return PVRSRV_OK;
+}
+
+void OSPhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle)
+{
+	struct page *psPage = (struct page*) psMemHandle->u.pvHandle;
+	IMG_UINT32	uiSize, uiPageCount=0;
+
+	uiPageCount = (1 << psMemHandle->ui32Order);
+	uiSize = (uiPageCount * PAGE_SIZE);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,
+	                                      (IMG_UINT64)(uintptr_t) psPage);
+#else
+	PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,
+	                                (IMG_UINT64)(uintptr_t) psPage,
+	                                OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+
+	__free_pages(psPage, psMemHandle->ui32Order);
+	psMemHandle->ui32Order = 0;
+}
+
+PVRSRV_ERROR OSPhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+						size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+						void **pvPtr)
+{
+	size_t actualSize = 1 << (PAGE_SHIFT + psMemHandle->ui32Order);
+	*pvPtr = kmap((struct page*)psMemHandle->u.pvHandle);
+
+	PVR_UNREFERENCED_PARAMETER(psDevPAddr);
+
+	PVR_UNREFERENCED_PARAMETER(actualSize); /* If we don't take an #ifdef path */
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, actualSize, OSGetCurrentClientProcessIDKM());
+#else
+	{
+		IMG_CPU_PHYADDR sCpuPAddr;
+		sCpuPAddr.uiAddr = 0;
+
+		PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA,
+									 *pvPtr,
+									 sCpuPAddr,
+									 actualSize,
+									 NULL,
+									 OSGetCurrentClientProcessIDKM());
+	}
+#endif
+#endif
+
+	return PVRSRV_OK;
+}
+
+void OSPhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, void *pvPtr)
+{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	/* Mapping is done a page at a time */
+	PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA,
+	                            (1 << (PAGE_SHIFT + psMemHandle->ui32Order)),
+	                            OSGetCurrentClientProcessIDKM());
+#else
+	PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA,
+	                                (IMG_UINT64)(uintptr_t)pvPtr,
+	                                OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(pvPtr);
+
+	kunmap((struct page*) psMemHandle->u.pvHandle);
+}
+
+PVRSRV_ERROR OSPhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode,
+                                   PG_HANDLE *psMemHandle,
+                                   IMG_UINT32 uiOffset,
+                                   IMG_UINT32 uiLength)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	struct page* psPage = (struct page*) psMemHandle->u.pvHandle;
+
+	void* pvVirtAddrStart = kmap(psPage) + uiOffset;
+	IMG_CPU_PHYADDR sPhysStart, sPhysEnd;
+
+	if (uiLength == 0)
+	{
+		goto e0;
+	}
+
+	if ((uiOffset + uiLength) > ((1 << psMemHandle->ui32Order) * PAGE_SIZE))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Invalid size params, uiOffset %u, uiLength %u",
+				__func__,
+				uiOffset,
+				uiLength));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	sPhysStart.uiAddr = page_to_phys(psPage) + uiOffset;
+	sPhysEnd.uiAddr = sPhysStart.uiAddr + uiLength;
+
+	CacheOpExec(psDevNode,
+				pvVirtAddrStart,
+				pvVirtAddrStart + uiLength,
+				sPhysStart,
+				sPhysEnd,
+				PVRSRV_CACHE_OP_CLEAN);
+
+e0:
+	kunmap(psPage);
+
+	return eError;
+}
+
+#if defined(__GNUC__)
+#define PVRSRV_MEM_ALIGN __attribute__ ((aligned (0x8)))
+#define PVRSRV_MEM_ALIGN_MASK (0x7)
+#else
+#error "PVRSRV Alignment macros need to be defined for this compiler"
+#endif
+
+IMG_UINT32 OSCPUCacheAttributeSize(IMG_DCACHE_ATTRIBUTE eCacheAttribute)
+{
+	IMG_UINT32 uiSize = 0;
+
+	switch (eCacheAttribute)
+	{
+		case PVR_DCACHE_LINE_SIZE:
+			uiSize = cache_line_size();
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache attribute type %d",
+					__func__, (IMG_UINT32)eCacheAttribute));
+			PVR_ASSERT(0);
+			break;
+	}
+
+	return uiSize;
+}
+
+IMG_UINT32 OSVSScanf(IMG_CHAR *pStr, const IMG_CHAR *pszFormat, ...)
+{
+	va_list argList;
+	IMG_INT32 iCount = 0;
+
+	va_start(argList, pszFormat);
+	iCount = vsscanf(pStr, pszFormat, argList);
+	va_end(argList);
+
+	return iCount;
+}
+
+IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, size_t uiLen)
+{
+	return (IMG_INT) memcmp(pvBufA, pvBufB, uiLen);
+}
+
+size_t OSStringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uSize)
+{
+	size_t	uSrcSize = strlcpy(pszDest, pszSrc, uSize);
+
+#if defined(PVR_DEBUG_STRLCPY) && defined(DEBUG)
+	/* Handle truncation by dumping calling stack if debug allows */
+	if (uSrcSize >= uSize)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+			"%s: String truncated Src = '<%s>' %ld bytes, Dest = '%s'",
+			__func__, pszSrc, (long)uSize, pszDest));
+		OSDumpStack();
+	}
+#endif	/* defined (PVR_DEBUG_STRLCPY) && defined(DEBUG) */
+
+	return uSrcSize;
+}
+
+IMG_CHAR *OSStringNCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uSize)
+{
+	/*
+	 * Let strlcpy handle any truncation cases correctly.
+	 * We will definitely get a NUL-terminated string set in pszDest
+	 */
+	(void) OSStringLCopy(pszDest, pszSrc, uSize);
+
+	return pszDest;
+}
+
+IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR *pszFormat, ...)
+{
+	va_list argList;
+	IMG_INT32 iCount;
+
+	va_start(argList, pszFormat);
+	iCount = vsnprintf(pStr, (size_t)ui32Size, pszFormat, argList);
+	va_end(argList);
+
+	return iCount;
+}
+
+IMG_INT32 OSVSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR* pszFormat, va_list vaArgs)
+{
+	return vsnprintf(pStr, ui32Size, pszFormat, vaArgs);
+}
+
+size_t OSStringLength(const IMG_CHAR *pStr)
+{
+	return strlen(pStr);
+}
+
+size_t OSStringNLength(const IMG_CHAR *pStr, size_t uiCount)
+{
+	return strnlen(pStr, uiCount);
+}
+
+IMG_INT32 OSStringCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2)
+{
+	return strcmp(pStr1, pStr2);
+}
+
+IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2,
+                          size_t uiSize)
+{
+	return strncmp(pStr1, pStr2, uiSize);
+}
+
+PVRSRV_ERROR OSStringToUINT32(const IMG_CHAR *pStr, IMG_UINT32 ui32Base,
+                              IMG_UINT32 *ui32Result)
+{
+	if (kstrtou32(pStr, ui32Base, ui32Result) != 0)
+		return PVRSRV_ERROR_CONVERSION_FAILED;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSInitEnvData(void)
+{
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	/* allocate memory for the bridge buffers to be used during an ioctl */
+	g_pvBridgeBuffers = OSAllocMem(PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE);
+	if (g_pvBridgeBuffers == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+#endif
+
+	LinuxInitPhysmem();
+
+	_OSInitThreadList();
+
+	return PVRSRV_OK;
+}
+
+
+void OSDeInitEnvData(void)
+{
+
+	LinuxDeinitPhysmem();
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	if (g_pvBridgeBuffers)
+	{
+		/* free-up the memory allocated for bridge buffers */
+		OSFreeMem(g_pvBridgeBuffers);
+		g_pvBridgeBuffers = NULL;
+	}
+#endif
+}
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+PVRSRV_ERROR OSGetGlobalBridgeBuffers(void **ppvBridgeInBuffer,
+									  void **ppvBridgeOutBuffer)
+{
+	PVR_ASSERT (ppvBridgeInBuffer && ppvBridgeOutBuffer);
+
+	*ppvBridgeInBuffer = g_pvBridgeBuffers;
+	*ppvBridgeOutBuffer = *ppvBridgeInBuffer + PVRSRV_MAX_BRIDGE_IN_SIZE;
+
+	return PVRSRV_OK;
+}
+#endif
+
+void OSReleaseThreadQuanta(void)
+{
+	schedule();
+}
+
+/* Not matching/aligning this API to the Clockus() API above to avoid necessary
+ * multiplication/division operations in calling code.
+ */
+static inline IMG_UINT64 Clockns64(void)
+{
+	IMG_UINT64 timenow;
+
+	/* Kernel thread preempt protection. Some architecture implementations
+	 * (ARM) of sched_clock are not preempt safe when the kernel is configured
+	 * as such e.g. CONFIG_PREEMPT and others.
+	 */
+	preempt_disable();
+
+	/* Using sched_clock instead of ktime_get since we need a time stamp that
+	 * correlates with that shown in kernel logs and trace data not one that
+	 * is a bit behind. */
+	timenow = sched_clock();
+
+	preempt_enable();
+
+	return timenow;
+}
+
+IMG_UINT64 OSClockns64(void)
+{
+	return Clockns64();
+}
+
+IMG_UINT64 OSClockus64(void)
+{
+	IMG_UINT64 timenow = Clockns64();
+	IMG_UINT32 remainder;
+
+	return OSDivide64r64(timenow, 1000, &remainder);
+}
+
+IMG_UINT32 OSClockus(void)
+{
+	return (IMG_UINT32) OSClockus64();
+}
+
+IMG_UINT32 OSClockms(void)
+{
+	IMG_UINT64 timenow = Clockns64();
+	IMG_UINT32 remainder;
+
+	return OSDivide64(timenow, 1000000, &remainder);
+}
+
+static inline IMG_UINT64 KClockns64(void)
+{
+	ktime_t sTime = ktime_get();
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+	return sTime;
+#else
+	return sTime.tv64;
+#endif
+}
+
+PVRSRV_ERROR OSClockMonotonicns64(IMG_UINT64 *pui64Time)
+{
+	*pui64Time = KClockns64();
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSClockMonotonicus64(IMG_UINT64 *pui64Time)
+{
+	IMG_UINT64 timenow = KClockns64();
+	IMG_UINT32 remainder;
+
+	*pui64Time = OSDivide64r64(timenow, 1000, &remainder);
+	return PVRSRV_OK;
+}
+
+IMG_UINT64 OSClockMonotonicRawns64(void)
+{
+	struct timespec ts;
+
+	getrawmonotonic(&ts);
+	return (IMG_UINT64) ts.tv_sec * 1000000000 + ts.tv_nsec;
+}
+
+IMG_UINT64 OSClockMonotonicRawus64(void)
+{
+	IMG_UINT32 rem;
+	return OSDivide64r64(OSClockMonotonicRawns64(), 1000, &rem);
+}
+
+/*
+	OSWaitus
+*/
+void OSWaitus(IMG_UINT32 ui32Timeus)
+{
+	udelay(ui32Timeus);
+}
+
+
+/*
+	OSSleepms
+*/
+void OSSleepms(IMG_UINT32 ui32Timems)
+{
+	msleep(ui32Timems);
+}
+
+
+INLINE IMG_UINT64 OSGetCurrentProcessVASpaceSize(void)
+{
+	return (IMG_UINT64)TASK_SIZE;
+}
+
+INLINE IMG_PID OSGetCurrentProcessID(void)
+{
+	if (in_interrupt())
+	{
+		return KERNEL_ID;
+	}
+
+	return (IMG_PID)task_tgid_nr(current);
+}
+
+INLINE IMG_CHAR *OSGetCurrentProcessName(void)
+{
+	return current->comm;
+}
+
+INLINE uintptr_t OSGetCurrentThreadID(void)
+{
+	if (in_interrupt())
+	{
+		return KERNEL_ID;
+	}
+
+	return current->pid;
+}
+
+IMG_PID OSGetCurrentClientProcessIDKM(void)
+{
+	return OSGetCurrentProcessID();
+}
+
+IMG_CHAR *OSGetCurrentClientProcessNameKM(void)
+{
+	return OSGetCurrentProcessName();
+}
+
+uintptr_t OSGetCurrentClientThreadIDKM(void)
+{
+	return OSGetCurrentThreadID();
+}
+
+size_t OSGetPageSize(void)
+{
+	return PAGE_SIZE;
+}
+
+size_t OSGetPageShift(void)
+{
+	return PAGE_SHIFT;
+}
+
+size_t OSGetPageMask(void)
+{
+	return (OSGetPageSize()-1);
+}
+
+size_t OSGetOrder(size_t uSize)
+{
+	return get_order(PAGE_ALIGN(uSize));
+}
+
+IMG_UINT64 OSGetRAMSize(void)
+{
+	struct sysinfo SI;
+	si_meminfo(&SI);
+
+	return (PAGE_SIZE * SI.totalram);
+}
+
+typedef struct
+{
+	int os_error;
+	PVRSRV_ERROR pvr_error;
+} error_map_t;
+
+/* return -ve versions of POSIX errors as they are used in this form */
+static const error_map_t asErrorMap[] =
+{
+	{-EFAULT, PVRSRV_ERROR_BRIDGE_EFAULT},
+	{-EINVAL, PVRSRV_ERROR_BRIDGE_EINVAL},
+	{-ENOMEM, PVRSRV_ERROR_BRIDGE_ENOMEM},
+	{-ERANGE, PVRSRV_ERROR_BRIDGE_ERANGE},
+	{-EPERM,  PVRSRV_ERROR_BRIDGE_EPERM},
+	{-ENOTTY, PVRSRV_ERROR_BRIDGE_ENOTTY},
+	{-ENOTTY, PVRSRV_ERROR_BRIDGE_CALL_FAILED},
+	{-ERANGE, PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL},
+	{-ENOMEM, PVRSRV_ERROR_OUT_OF_MEMORY},
+	{-EINVAL, PVRSRV_ERROR_INVALID_PARAMS},
+
+	{0,       PVRSRV_OK}
+};
+
+int PVRSRVToNativeError(PVRSRV_ERROR e)
+{
+	int os_error = -EFAULT;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(asErrorMap); i++)
+	{
+		if (e == asErrorMap[i].pvr_error)
+		{
+			os_error = asErrorMap[i].os_error;
+			break;
+		}
+	}
+	return os_error;
+}
+
+typedef struct  _MISR_DATA_ {
+	struct workqueue_struct *psWorkQueue;
+	struct work_struct sMISRWork;
+	const IMG_CHAR* pszName;
+	PFN_MISR pfnMISR;
+	void *hData;
+} MISR_DATA;
+
+/*
+	MISRWrapper
+*/
+static void MISRWrapper(struct work_struct *data)
+{
+	MISR_DATA *psMISRData = container_of(data, MISR_DATA, sMISRWork);
+
+	PVR_DPF((PVR_DBG_MESSAGE, "Waking up '%s' MISR %p", psMISRData->pszName, psMISRData));
+
+	psMISRData->pfnMISR(psMISRData->hData);
+}
+
+/*
+	OSInstallMISR
+*/
+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, PFN_MISR pfnMISR,
+							void *hData, const IMG_CHAR *pszMisrName)
+{
+	MISR_DATA *psMISRData;
+
+	psMISRData = OSAllocMem(sizeof(*psMISRData));
+	if (psMISRData == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psMISRData->hData = hData;
+	psMISRData->pfnMISR = pfnMISR;
+	psMISRData->pszName = pszMisrName;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "Installing MISR with cookie %p", psMISRData));
+
+	psMISRData->psWorkQueue = create_singlethread_workqueue("pvr_misr");
+
+	if (psMISRData->psWorkQueue == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: create_singlethreaded_workqueue failed"));
+		OSFreeMem(psMISRData);
+		return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD;
+	}
+
+	INIT_WORK(&psMISRData->sMISRWork, MISRWrapper);
+
+	*hMISRData = (IMG_HANDLE) psMISRData;
+
+	return PVRSRV_OK;
+}
+
+/*
+	OSUninstallMISR
+*/
+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData)
+{
+	MISR_DATA *psMISRData = (MISR_DATA *) hMISRData;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "Uninstalling MISR with cookie %p", psMISRData));
+
+	destroy_workqueue(psMISRData->psWorkQueue);
+	OSFreeMem(psMISRData);
+
+	return PVRSRV_OK;
+}
+
+/*
+	OSScheduleMISR
+*/
+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData)
+{
+	MISR_DATA *psMISRData = (MISR_DATA *) hMISRData;
+
+	/*
+		Note:
+
+		In the case of NO_HARDWARE we want the driver to be synchronous so
+		that we don't have to worry about waiting for previous operations
+		to complete
+	*/
+#if defined(NO_HARDWARE)
+	psMISRData->pfnMISR(psMISRData->hData);
+	return PVRSRV_OK;
+#else
+	{
+		bool rc = queue_work(psMISRData->psWorkQueue, &psMISRData->sMISRWork);
+		return (rc ? PVRSRV_OK : PVRSRV_ERROR_ALREADY_EXISTS);
+	}
+#endif
+}
+
+/* OS specific values for thread priority */
+static const IMG_INT32 ai32OSPriorityValues[OS_THREAD_LAST_PRIORITY] =
+{
+	  0, /* OS_THREAD_NOSET_PRIORITY */
+	-20, /* OS_THREAD_HIGHEST_PRIORITY */
+	-10, /* OS_THREAD_HIGH_PRIORITY */
+	  0, /* OS_THREAD_NORMAL_PRIORITY */
+	  9, /* OS_THREAD_LOW_PRIORITY */
+	 19, /* OS_THREAD_LOWEST_PRIORITY */
+};
+
+static int OSThreadRun(void *data)
+{
+	OSThreadData *psOSThreadData = data;
+
+	/* count freezable threads */
+	LinuxBridgeNumActiveKernelThreadsIncrement();
+
+	/* Returns true if the thread was frozen, should we do anything with this
+	 * information? What do we return? Which one is the error case? */
+	set_freezable();
+
+	PVR_DPF((PVR_DBG_MESSAGE, "Starting Thread '%s'...", psOSThreadData->pszThreadName));
+
+	/* Call the client's kernel thread with the client's data pointer */
+	psOSThreadData->pfnThread(psOSThreadData->hData);
+
+	if (psOSThreadData->bIsSupportingThread)
+	{
+		_ThreadSetStopped(psOSThreadData);
+	}
+
+	/* Wait for OSThreadDestroy() to call kthread_stop() */
+	while (!kthread_freezable_should_stop(NULL))
+	{
+		 schedule();
+	}
+
+	LinuxBridgeNumActiveKernelThreadsDecrement();
+
+	return 0;
+}
+
+PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread,
+                            IMG_CHAR *pszThreadName,
+                            PFN_THREAD pfnThread,
+                            PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB,
+                            IMG_BOOL bIsSupportingThread,
+                            void *hData)
+{
+	return OSThreadCreatePriority(phThread, pszThreadName, pfnThread,
+	                              pfnDebugDumpCB, bIsSupportingThread, hData,
+	                              OS_THREAD_NOSET_PRIORITY);
+}
+
+PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread,
+                                    IMG_CHAR *pszThreadName,
+                                    PFN_THREAD pfnThread,
+                                    PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB,
+                                    IMG_BOOL bIsSupportingThread,
+                                    void *hData,
+                                    OS_THREAD_LEVEL eThreadPriority)
+{
+	OSThreadData *psOSThreadData;
+	PVRSRV_ERROR eError;
+
+	psOSThreadData = OSAllocZMem(sizeof(*psOSThreadData));
+	if (psOSThreadData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	psOSThreadData->pfnThread = pfnThread;
+	psOSThreadData->hData = hData;
+	psOSThreadData->kthread = kthread_run(OSThreadRun, psOSThreadData, "%s", pszThreadName);
+
+	if (IS_ERR(psOSThreadData->kthread))
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_kthread;
+	}
+
+	if (bIsSupportingThread)
+	{
+		psOSThreadData->pszThreadName = pszThreadName;
+		psOSThreadData->pfnDebugDumpCB = pfnDebugDumpCB;
+		psOSThreadData->bIsThreadRunning = IMG_TRUE;
+		psOSThreadData->bIsSupportingThread = IMG_TRUE;
+
+		_ThreadListAddEntry(psOSThreadData);
+	}
+
+	if (eThreadPriority != OS_THREAD_NOSET_PRIORITY &&
+	    eThreadPriority < OS_THREAD_LAST_PRIORITY)
+	{
+		set_user_nice(psOSThreadData->kthread,
+		              ai32OSPriorityValues[eThreadPriority]);
+	}
+
+	*phThread = psOSThreadData;
+
+	return PVRSRV_OK;
+
+fail_kthread:
+	OSFreeMem(psOSThreadData);
+fail_alloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread)
+{
+	OSThreadData *psOSThreadData = hThread;
+	int ret;
+
+	/* Let the thread know we are ready for it to end and wait for it. */
+	ret = kthread_stop(psOSThreadData->kthread);
+	if (0 != ret)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "kthread_stop failed(%d)", ret));
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	if (psOSThreadData->bIsSupportingThread)
+	{
+		_ThreadListRemoveEntry(psOSThreadData);
+	}
+
+	OSFreeMem(psOSThreadData);
+
+	return PVRSRV_OK;
+}
+
+void OSPanic(void)
+{
+	BUG();
+
+#if defined(__KLOCWORK__)
+	/* Klocworks does not understand that BUG is terminal... */
+	abort();
+#endif
+}
+
+void *
+OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr,
+			   size_t ui32Bytes,
+			   IMG_UINT32 ui32MappingFlags)
+{
+	void __iomem *pvLinAddr;
+
+	if (ui32MappingFlags & ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK))
+	{
+		PVR_ASSERT(!"Found non-cpu cache mode flag when mapping to the cpu");
+		return NULL;
+	}
+
+	if (! PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE))
+	{
+		/*
+		  This is required to support DMA physheaps for GPU virtualization.
+		  Unfortunately, if a region of kernel managed memory is turned into
+		  a DMA buffer, conflicting mappings can come about easily on Linux
+		  as the original memory is mapped by the kernel as normal cached
+		  memory whilst DMA buffers are mapped mostly as uncached device or
+		  cache-coherent device memory. In both cases the system will have
+		  two conflicting mappings for the same memory region and will have
+		  "undefined behaviour" for most processors notably ARMv6 onwards
+		  and some x86 micro-architectures. As a result, perform ioremapping
+		  manually for DMA physheap allocations by translating from CPU/VA
+		  to BUS/PA thereby preventing the creation of conflicting mappings.
+		*/
+		pvLinAddr = (void __iomem *) SysDmaDevPAddrToCpuVAddr(BasePAddr.uiAddr, ui32Bytes);
+		if (pvLinAddr != NULL)
+		{
+			return (void __force *) pvLinAddr;
+		}
+	}
+
+	switch (ui32MappingFlags)
+	{
+		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+			pvLinAddr = (void __iomem *)ioremap_nocache(BasePAddr.uiAddr, ui32Bytes);
+			break;
+		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+#if defined(CONFIG_X86) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+			pvLinAddr = (void __iomem *)ioremap_wc(BasePAddr.uiAddr, ui32Bytes);
+#else
+			pvLinAddr = (void __iomem *)ioremap_nocache(BasePAddr.uiAddr, ui32Bytes);
+#endif
+			break;
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+#if defined(CONFIG_X86) || defined(CONFIG_ARM)
+			pvLinAddr = (void __iomem *)ioremap_cache(BasePAddr.uiAddr, ui32Bytes);
+#else
+			pvLinAddr = (void __iomem *)ioremap(BasePAddr.uiAddr, ui32Bytes);
+#endif
+			break;
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT:
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT:
+			PVR_ASSERT(!"Unexpected cpu cache mode");
+			pvLinAddr = NULL;
+			break;
+		default:
+			PVR_ASSERT(!"Unsupported cpu cache mode");
+			pvLinAddr = NULL;
+			break;
+	}
+
+	return (void __force *) pvLinAddr;
+}
+
+
+IMG_BOOL
+OSUnMapPhysToLin(void *pvLinAddr, size_t ui32Bytes, IMG_UINT32 ui32MappingFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32Bytes);
+
+	if (ui32MappingFlags & ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK))
+	{
+		PVR_ASSERT(!"Found non-cpu cache mode flag when unmapping from the cpu");
+		return IMG_FALSE;
+	}
+
+	if (! PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE))
+	{
+		if (SysDmaCpuVAddrToDevPAddr(pvLinAddr))
+		{
+			return IMG_TRUE;
+		}
+	}
+
+	iounmap((void __iomem *) pvLinAddr);
+
+	return IMG_TRUE;
+}
+
+#define	OS_MAX_TIMERS	8
+
+/* Timer callback strucure used by OSAddTimer */
+typedef struct TIMER_CALLBACK_DATA_TAG
+{
+	IMG_BOOL			bInUse;
+	PFN_TIMER_FUNC		pfnTimerFunc;
+	void				*pvData;
+	struct timer_list	sTimer;
+	IMG_UINT32			ui32Delay;
+	IMG_BOOL			bActive;
+	struct work_struct	sWork;
+}TIMER_CALLBACK_DATA;
+
+static struct workqueue_struct	*psTimerWorkQueue;
+
+static TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS];
+
+static DEFINE_MUTEX(sTimerStructLock);
+
+static void OSTimerCallbackBody(TIMER_CALLBACK_DATA *psTimerCBData)
+{
+	if (!psTimerCBData->bActive)
+		return;
+
+	/* call timer callback */
+	psTimerCBData->pfnTimerFunc(psTimerCBData->pvData);
+
+	/* reset timer */
+	mod_timer(&psTimerCBData->sTimer, psTimerCBData->sTimer.expires + psTimerCBData->ui32Delay);
+}
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0))
+/*************************************************************************/ /*!
+@Function       OSTimerCallbackWrapper
+@Description    OS specific timer callback wrapper function
+@Input          psTimer    Timer list structure
+*/ /**************************************************************************/
+static void OSTimerCallbackWrapper(struct timer_list *psTimer)
+{
+	TIMER_CALLBACK_DATA *psTimerCBData = from_timer(psTimerCBData, psTimer, sTimer);
+#else
+/*************************************************************************/ /*!
+@Function       OSTimerCallbackWrapper
+@Description    OS specific timer callback wrapper function
+@Input          uData    Timer callback data
+*/ /**************************************************************************/
+static void OSTimerCallbackWrapper(uintptr_t uData)
+{
+	TIMER_CALLBACK_DATA	*psTimerCBData = (TIMER_CALLBACK_DATA*)uData;
+#endif
+	int res;
+
+	res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork);
+	if (res == 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "OSTimerCallbackWrapper: work already queued"));
+	}
+}
+
+
+static void OSTimerWorkQueueCallBack(struct work_struct *psWork)
+{
+	TIMER_CALLBACK_DATA *psTimerCBData = container_of(psWork, TIMER_CALLBACK_DATA, sWork);
+
+	OSTimerCallbackBody(psTimerCBData);
+}
+
+IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, IMG_UINT32 ui32MsTimeout)
+{
+	TIMER_CALLBACK_DATA	*psTimerCBData;
+	IMG_UINT32		ui32i;
+
+	/* check callback */
+	if (!pfnTimerFunc)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback"));
+		return NULL;
+	}
+
+	/* Allocate timer callback data structure */
+	mutex_lock(&sTimerStructLock);
+	for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
+	{
+		psTimerCBData = &sTimers[ui32i];
+		if (!psTimerCBData->bInUse)
+		{
+			psTimerCBData->bInUse = IMG_TRUE;
+			break;
+		}
+	}
+	mutex_unlock(&sTimerStructLock);
+	if (ui32i >= OS_MAX_TIMERS)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: all timers are in use"));
+		return NULL;
+	}
+
+	psTimerCBData->pfnTimerFunc = pfnTimerFunc;
+	psTimerCBData->pvData = pvData;
+	psTimerCBData->bActive = IMG_FALSE;
+
+	/*
+		HZ = ticks per second
+		ui32MsTimeout = required ms delay
+		ticks = (Hz * ui32MsTimeout) / 1000
+	*/
+	psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000)
+								?	1
+								:	((HZ * ui32MsTimeout) / 1000);
+
+	/* initialise object */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0))
+	timer_setup(&psTimerCBData->sTimer, OSTimerCallbackWrapper, 0);
+#else
+	init_timer(&psTimerCBData->sTimer);
+
+	/* setup timer object */
+	psTimerCBData->sTimer.function = (void *)OSTimerCallbackWrapper;
+	psTimerCBData->sTimer.data = (uintptr_t)psTimerCBData;
+#endif
+
+	return (IMG_HANDLE)(uintptr_t)(ui32i + 1);
+}
+
+
+static inline TIMER_CALLBACK_DATA *GetTimerStructure(IMG_HANDLE hTimer)
+{
+	IMG_UINT32 ui32i = (IMG_UINT32)((uintptr_t)hTimer) - 1;
+
+	PVR_ASSERT(ui32i < OS_MAX_TIMERS);
+
+	return &sTimers[ui32i];
+}
+
+PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer)
+{
+	TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+	PVR_ASSERT(psTimerCBData->bInUse);
+	PVR_ASSERT(!psTimerCBData->bActive);
+
+	/* free timer callback data struct */
+	psTimerCBData->bInUse = IMG_FALSE;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer)
+{
+	TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+	PVR_ASSERT(psTimerCBData->bInUse);
+	PVR_ASSERT(!psTimerCBData->bActive);
+
+	/* Start timer arming */
+	psTimerCBData->bActive = IMG_TRUE;
+
+	/* set the expire time */
+	psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
+
+	/* Add the timer to the list */
+	add_timer(&psTimerCBData->sTimer);
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer)
+{
+	TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+	PVR_ASSERT(psTimerCBData->bInUse);
+	PVR_ASSERT(psTimerCBData->bActive);
+
+	/* Stop timer from arming */
+	psTimerCBData->bActive = IMG_FALSE;
+	smp_mb();
+
+	flush_workqueue(psTimerWorkQueue);
+
+	/* remove timer */
+	del_timer_sync(&psTimerCBData->sTimer);
+
+	/*
+	 * This second flush is to catch the case where the timer ran
+	 * before we managed to delete it, in which case, it will have
+	 * queued more work for the workqueue.	Since the bActive flag
+	 * has been cleared, this second flush won't result in the
+	 * timer being rearmed.
+	 */
+	flush_workqueue(psTimerWorkQueue);
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, IMG_HANDLE *hEventObject)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVR_UNREFERENCED_PARAMETER(pszName);
+
+	if (hEventObject)
+	{
+		if (LinuxEventObjectListCreate(hEventObject) != PVRSRV_OK)
+		{
+			 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: hEventObject is not a valid pointer"));
+		eError = PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT;
+	}
+
+	return eError;
+}
+
+
+PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (hEventObject)
+	{
+		LinuxEventObjectListDestroy(hEventObject);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: hEventObject is not a valid pointer"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return eError;
+}
+
+#define _FREEZABLE IMG_TRUE
+#define _NON_FREEZABLE IMG_FALSE
+
+/*
+ * EventObjectWaitTimeout()
+ */
+static PVRSRV_ERROR EventObjectWaitTimeout(IMG_HANDLE hOSEventKM,
+										   IMG_UINT64 uiTimeoutus,
+										   IMG_BOOL bHoldBridgeLock)
+{
+	PVRSRV_ERROR eError;
+
+	if (hOSEventKM && uiTimeoutus > 0)
+	{
+		eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, bHoldBridgeLock, _NON_FREEZABLE);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWait: invalid arguments %p, %lld", hOSEventKM, uiTimeoutus));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus)
+{
+	return EventObjectWaitTimeout(hOSEventKM, uiTimeoutus, IMG_FALSE);
+}
+
+PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM)
+{
+	return OSEventObjectWaitTimeout(hOSEventKM, EVENT_OBJECT_TIMEOUT_US);
+}
+
+PVRSRV_ERROR OSEventObjectWaitTimeoutAndHoldBridgeLock(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus)
+{
+	return EventObjectWaitTimeout(hOSEventKM, uiTimeoutus, IMG_TRUE);
+}
+
+PVRSRV_ERROR OSEventObjectWaitAndHoldBridgeLock(IMG_HANDLE hOSEventKM)
+{
+	return OSEventObjectWaitTimeoutAndHoldBridgeLock(hOSEventKM, EVENT_OBJECT_TIMEOUT_US);
+}
+
+PVRSRV_ERROR OSEventObjectWaitKernel(IMG_HANDLE hOSEventKM,
+                                     IMG_UINT64 uiTimeoutus)
+{
+	PVRSRV_ERROR eError;
+
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+	if (hOSEventKM)
+	{
+		if (uiTimeoutus > 0)
+			eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, IMG_FALSE,
+			                              _FREEZABLE);
+		else
+			eError = LinuxEventObjectWaitUntilSignalled(hOSEventKM);
+	}
+#else /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
+	if (hOSEventKM && uiTimeoutus > 0)
+	{
+		eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, IMG_FALSE,
+		                              _FREEZABLE);
+	}
+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWaitKernel: invalid arguments %p",
+		        hOSEventKM));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return eError;
+}
+
+void OSEventObjectDumpDebugInfo(IMG_HANDLE hOSEventKM)
+{
+	LinuxEventObjectDumpDebugInfo(hOSEventKM);
+}
+
+PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject, IMG_HANDLE *phOSEvent)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_LOGG_IF_FALSE(hEventObject != NULL, "Invalid hEventObject handle",
+	                  error_invalid_params);
+	PVR_LOGG_IF_FALSE(hEventObject != NULL, "Invalid phOSEvent handle pointer",
+	                  error_invalid_params);
+
+	eError = LinuxEventObjectAdd(hEventObject, phOSEvent);
+	PVR_LOGG_IF_ERROR(eError, "LinuxEventObjectAdd", error);
+
+	return PVRSRV_OK;
+
+error_invalid_params:
+	eError = PVRSRV_ERROR_INVALID_PARAMS;
+	goto error;
+
+error:
+	*phOSEvent = NULL;
+	return eError;
+}
+
+PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_LOGR_IF_FALSE(hOSEventKM != NULL, "Invalid hOSEventKM handle",
+	                  PVRSRV_ERROR_INVALID_PARAMS);
+
+	eError = LinuxEventObjectDelete(hOSEventKM);
+	PVR_LOGR_IF_ERROR(eError, "LinuxEventObjectDelete");
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject)
+{
+	PVRSRV_ERROR eError;
+
+	if (hEventObject)
+	{
+		eError = LinuxEventObjectSignal(hEventObject);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSEventObjectSignal: hOSEventKM is not a valid handle"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR OSCopyToUser(void *pvProcess,
+						  void __user *pvDest,
+						  const void *pvSrc,
+						  size_t ui32Bytes)
+{
+	PVR_UNREFERENCED_PARAMETER(pvProcess);
+
+	if (pvr_copy_to_user(pvDest, pvSrc, ui32Bytes)==0)
+		return PVRSRV_OK;
+	else
+		return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY;
+}
+
+PVRSRV_ERROR OSCopyFromUser(void *pvProcess,
+							void *pvDest,
+							const void __user *pvSrc,
+							size_t ui32Bytes)
+{
+	PVR_UNREFERENCED_PARAMETER(pvProcess);
+
+	if (likely(pvr_copy_from_user(pvDest, pvSrc, ui32Bytes)==0))
+		return PVRSRV_OK;
+	else
+		return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY;
+}
+
+IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder)
+{
+	*pui32Remainder = do_div(ui64Divident, ui32Divisor);
+
+	return ui64Divident;
+}
+
+IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder)
+{
+	*pui32Remainder = do_div(ui64Divident, ui32Divisor);
+
+	return (IMG_UINT32) ui64Divident;
+}
+
+/* One time osfunc initialisation */
+PVRSRV_ERROR PVROSFuncInit(void)
+{
+	{
+		PVR_ASSERT(!psTimerWorkQueue);
+
+		psTimerWorkQueue = create_freezable_workqueue("pvr_timer");
+		if (psTimerWorkQueue == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: couldn't create timer workqueue",
+					 __func__));
+			return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD;
+		}
+	}
+
+	{
+		IMG_UINT32 ui32i;
+
+		for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
+		{
+			TIMER_CALLBACK_DATA *psTimerCBData = &sTimers[ui32i];
+
+			INIT_WORK(&psTimerCBData->sWork, OSTimerWorkQueueCallBack);
+		}
+	}
+	return PVRSRV_OK;
+}
+
+/*
+ * Osfunc deinitialisation.
+ * Note that PVROSFuncInit may not have been called
+ */
+void PVROSFuncDeInit(void)
+{
+	if (psTimerWorkQueue != NULL)
+	{
+		destroy_workqueue(psTimerWorkQueue);
+		psTimerWorkQueue = NULL;
+	}
+}
+
+void OSDumpStack(void)
+{
+	dump_stack();
+}
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+
+static struct task_struct *gsOwner;
+
+void OSAcquireBridgeLock(void)
+{
+	mutex_lock(&gPVRSRVLock);
+	gsOwner = current;
+}
+
+void OSReleaseBridgeLock(void)
+{
+	gsOwner = NULL;
+	mutex_unlock(&gPVRSRVLock);
+}
+
+struct task_struct *BridgeLockGetOwner(void)
+{
+	return gsOwner;
+}
+
+IMG_BOOL BridgeLockIsLocked(void)
+{
+	return OSLockIsLocked(&gPVRSRVLock);
+}
+
+#endif
+
+/*************************************************************************/ /*!
+@Function		OSCreateStatisticEntry
+@Description	Create a statistic entry in the specified folder.
+@Input			pszName		   String containing the name for the entry.
+@Input			pvFolder	   Reference from OSCreateStatisticFolder() of the
+							   folder to create the entry in, or NULL for the
+							   root.
+@Input			pfnStatsPrint  Pointer to function that can be used to print the
+							   values of all the statistics.
+@Input			pvData		   OS specific reference that can be used by
+							   pfnGetElement.
+@Return			Pointer void reference to the entry created, which can be
+				passed to OSRemoveStatisticEntry() to remove the entry.
+*/ /**************************************************************************/
+void *OSCreateStatisticEntry(IMG_CHAR* pszName, void *pvFolder,
+							 OS_STATS_PRINT_FUNC* pfnStatsPrint,
+							 void *pvData)
+{
+	PPVR_DEBUGFS_ENTRY_DATA psNewFile;
+	int iResult;
+
+	iResult = PVRDebugFSCreateFile( pszName,
+					(PPVR_DEBUGFS_DIR_DATA)pvFolder,
+					NULL,
+					NULL,
+					pfnStatsPrint,
+					pvData,
+					&psNewFile );
+
+	return (iResult != 0) ? NULL : psNewFile;
+
+} /* OSCreateStatisticEntry */
+
+
+/*************************************************************************/ /*!
+@Function		OSRemoveStatisticEntry
+@Description	Removes a statistic entry.
+@Input			ppvEntry  Double Pointer void reference to the entry created by
+						 OSCreateStatisticEntry().
+*/ /**************************************************************************/
+void OSRemoveStatisticEntry(void **ppvEntry)
+{
+	PPVR_DEBUGFS_ENTRY_DATA psStatEntry = (PPVR_DEBUGFS_ENTRY_DATA)(*ppvEntry);
+	PVRDebugFSRemoveFile(&psStatEntry);
+	*ppvEntry = psStatEntry;
+} /* OSRemoveStatisticEntry */
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+void *OSCreateRawStatisticEntry(const IMG_CHAR *pszFileName, void *pvParentDir,
+                                OS_STATS_PRINT_FUNC *pfStatsPrint)
+{
+	PPVR_DEBUGFS_ENTRY_DATA psNewFile;
+	int iResult;
+
+	iResult = PVRDebugFSCreateFile( pszFileName,
+					pvParentDir,
+					NULL,
+					NULL,
+					pfStatsPrint,
+					NULL,
+					&psNewFile );
+
+	return (iResult != 0) ? NULL : psNewFile;
+}
+
+void OSRemoveRawStatisticEntry(void **ppvEntry)
+{
+	PPVR_DEBUGFS_ENTRY_DATA psStatEntry = (PPVR_DEBUGFS_ENTRY_DATA)(*ppvEntry);
+	PVRDebugFSRemoveFile(&psStatEntry);
+	*ppvEntry = psStatEntry;
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function		OSCreateStatisticFolder
+@Description	Create a statistic folder to hold statistic entries.
+@Input			pszName   String containing the name for the folder.
+@Input			pvFolder  Reference from OSCreateStatisticFolder() of the folder
+						  to create the folder in, or NULL for the root.
+@Return			Pointer void reference to the folder created, which can be
+				passed to OSRemoveStatisticFolder() to remove the folder.
+*/ /**************************************************************************/
+void *OSCreateStatisticFolder(IMG_CHAR *pszName, void *pvFolder)
+{
+	PPVR_DEBUGFS_DIR_DATA psNewStatFolder = NULL;
+	int iResult;
+
+	iResult = PVRDebugFSCreateEntryDir(pszName, (PPVR_DEBUGFS_DIR_DATA)pvFolder, &psNewStatFolder);
+	return (iResult == 0) ? (void *)psNewStatFolder : NULL;
+} /* OSCreateStatisticFolder */
+
+
+/*************************************************************************/ /*!
+@Function		OSRemoveStatisticFolder
+@Description	Removes a statistic folder.
+@Input          ppvFolder  Reference from OSCreateStatisticFolder() of the
+                           folder that should be removed.
+                           This needs to be double pointer because it has to
+                           be NULLed right after memory is freed to avoid
+                           possible races and use-after-free situations.
+*/ /**************************************************************************/
+void OSRemoveStatisticFolder(void **ppvFolder)
+{
+	PVRDebugFSRemoveEntryDir((PPVR_DEBUGFS_DIR_DATA *)ppvFolder);
+} /* OSRemoveStatisticFolder */
+
+
+PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray,
+                                         IMG_UINT64 sCpuVAddrBase,
+                                         IMG_CPU_PHYADDR sCpuPAHeapBase,
+                                         IMG_UINT32 ui32AllocPageCount,
+                                         IMG_UINT32 *pai32AllocIndices,
+                                         IMG_UINT32 ui32FreePageCount,
+                                         IMG_UINT32 *pai32FreeIndices,
+                                         IMG_BOOL bIsLMA)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+	pfn_t sPFN;
+#else
+	IMG_UINT64 uiPFN;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+
+	PVRSRV_ERROR eError;
+
+	struct mm_struct *psMM = current->mm;
+	struct vm_area_struct *psVMA = NULL;
+	struct address_space *psMapping = NULL;
+	struct page *psPage = NULL;
+
+	IMG_UINT64 uiCPUVirtAddr = 0;
+	IMG_UINT32 ui32Loop = 0;
+	IMG_UINT32 ui32PageSize = OSGetPageSize();
+	IMG_BOOL bMixedMap = IMG_FALSE;
+
+	/*
+	 * Acquire the lock before manipulating the VMA
+	 * In this case only mmap_sem lock would suffice as the pages associated with this VMA
+	 * are never meant to be swapped out.
+	 *
+	 * In the future, in case the pages are marked as swapped, page_table_lock needs
+	 * to be acquired in conjunction with this to disable page swapping.
+	 */
+
+	/* Find the Virtual Memory Area associated with the user base address */
+	psVMA = find_vma(psMM, (uintptr_t)sCpuVAddrBase);
+	if (NULL == psVMA)
+	{
+		eError = PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND;
+		return eError;
+	}
+
+	/* Acquire the memory sem */
+	down_write(&psMM->mmap_sem);
+
+	psMapping = psVMA->vm_file->f_mapping;
+
+	/* Set the page offset to the correct value as this is disturbed in MMAP_PMR func */
+	psVMA->vm_pgoff = (psVMA->vm_start >>  PAGE_SHIFT);
+
+	/* Delete the entries for the pages that got freed */
+	if (ui32FreePageCount && (pai32FreeIndices != NULL))
+	{
+		for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++)
+		{
+			uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32FreeIndices[ui32Loop] * ui32PageSize));
+
+			unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1);
+
+#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+			/*
+			 * Still need to map pages in case remap flag is set.
+			 * That is not done until the remap case succeeds
+			 */
+#endif
+		}
+		eError = PVRSRV_OK;
+	}
+
+	if ((psVMA->vm_flags & VM_MIXEDMAP) || bIsLMA)
+	{
+		psVMA->vm_flags |= VM_MIXEDMAP;
+		bMixedMap = IMG_TRUE;
+	}
+	else
+	{
+		if (ui32AllocPageCount && (NULL != pai32AllocIndices))
+		{
+			for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++)
+			{
+
+				psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]];
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+				sPFN = page_to_pfn_t(psPage);
+
+				if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0)
+#else
+				uiPFN = page_to_pfn(psPage);
+
+				if (!pfn_valid(uiPFN) || (page_count(pfn_to_page(uiPFN)) == 0))
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+				{
+					bMixedMap = IMG_TRUE;
+					psVMA->vm_flags |= VM_MIXEDMAP;
+					break;
+				}
+			}
+		}
+	}
+
+	/* Map the pages that got allocated */
+	if (ui32AllocPageCount && (NULL != pai32AllocIndices))
+	{
+		for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++)
+		{
+			int err;
+
+			uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32AllocIndices[ui32Loop] * ui32PageSize));
+			unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1);
+
+			if (bIsLMA)
+			{
+				phys_addr_t uiAddr = sCpuPAHeapBase.uiAddr +
+				                     ((IMG_DEV_PHYADDR *)psPageArray)[pai32AllocIndices[ui32Loop]].uiAddr;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+				sPFN = phys_to_pfn_t(uiAddr, 0);
+				psPage = pfn_t_to_page(sPFN);
+#else
+				uiPFN = uiAddr >> PAGE_SHIFT;
+				psPage = pfn_to_page(uiPFN);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+			}
+			else
+			{
+				psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]];
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+				sPFN = page_to_pfn_t(psPage);
+#else
+				uiPFN = page_to_pfn(psPage);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+			}
+
+			if (bMixedMap)
+			{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0))
+				vm_fault_t vmf;
+
+				vmf = vmf_insert_mixed(psVMA, uiCPUVirtAddr, sPFN);
+				if (vmf & VM_FAULT_ERROR)
+				{
+					err = vm_fault_to_errno(vmf, 0);
+				}
+				else
+				{
+					err = 0;
+				}
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+				err = vm_insert_mixed(psVMA, uiCPUVirtAddr, sPFN);
+#else
+				err = vm_insert_mixed(psVMA, uiCPUVirtAddr, uiPFN);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) */
+			}
+			else
+			{
+				err = vm_insert_page(psVMA, uiCPUVirtAddr, psPage);
+			}
+
+			if (err)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE, "Remap failure error code: %d", err));
+				eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED;
+				goto eFailed;
+			}
+		}
+	}
+
+	eError = PVRSRV_OK;
+	eFailed:
+	up_write(&psMM->mmap_sem);
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       OSDebugSignalPID
+@Description    Sends a SIGTRAP signal to a specific PID in user mode for
+                debugging purposes. The user mode process can register a handler
+                against this signal.
+                This is necessary to support the Rogue debugger. If the Rogue
+                debugger is not used then this function may be implemented as
+                a stub.
+@Input          ui32PID    The PID for the signal.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSDebugSignalPID(IMG_UINT32 ui32PID)
+{
+	int err;
+	struct pid *psPID;
+
+	psPID = find_vpid(ui32PID);
+	if (psPID == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get PID struct.", __func__));
+		return PVRSRV_ERROR_NOT_FOUND;
+	}
+
+	err = kill_pid(psPID, SIGTRAP, 0);
+	if (err != 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Signal Failure %d", __func__, err));
+		return PVRSRV_ERROR_SIGNAL_FAILED;
+	}
+
+	return PVRSRV_OK;
+}
+
+void OSDumpVersionInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					   void *pvDumpDebugFile)
+{
+	PVR_DUMPDEBUG_LOG("OS kernel info: %s %s %s %s",
+					utsname()->sysname,
+					utsname()->release,
+					utsname()->version,
+					utsname()->machine);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc.h
new file mode 100644
index 0000000..9915a99
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc.h
@@ -0,0 +1,1833 @@
+/*************************************************************************/ /*!
+@File
+@Title          OS functions header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS specific API definitions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifdef DEBUG_RELEASE_BUILD
+#pragma optimize( "", off )
+#define DEBUG		1
+#endif
+
+#ifndef __OSFUNC_H__
+/*! @cond Doxygen_Suppress */
+#define __OSFUNC_H__
+/*! @endcond */
+
+#if defined(LINUX) && defined(__KERNEL__)
+#include "kernel_nospec.h"
+#if !defined(NO_HARDWARE)
+#include <linux/io.h>
+#endif
+#endif
+
+#include <stdarg.h>
+
+#if defined(__QNXNTO__)
+#include <stdio.h>
+#include <string.h>
+#endif
+
+#if defined(INTEGRITY_OS)
+#include <stdio.h>
+#include <string.h>
+#endif
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "device.h"
+#include "pvrsrv_device.h"
+#include "cache_km.h"
+#include "osfunc_common.h"
+
+/******************************************************************************
+ * Static defines
+ *****************************************************************************/
+/*!
+ * Returned by OSGetCurrentProcessID() and OSGetCurrentThreadID() if the OS
+ * is currently operating in the interrupt context.
+ */
+#define KERNEL_ID			0xffffffffL
+
+#if defined(LINUX) && defined(__KERNEL__)
+#define OSConfineArrayIndexNoSpeculation(index, size) array_index_nospec((index), (size))
+#elif defined(__QNXNTO__)
+#define OSConfineArrayIndexNoSpeculation(index, size) (index)
+#define PVRSRV_MISSING_NO_SPEC_IMPL
+#elif defined(INTEGRITY_OS)
+#define OSConfineArrayIndexNoSpeculation(index, size) (index)
+#define PVRSRV_MISSING_NO_SPEC_IMPL
+#else
+/*************************************************************************/ /*!
+@Function       OSConfineArrayIndexNoSpeculation
+@Description    This macro aims to avoid code exposure to Cache Timing
+                Side-Channel Mechanisms which rely on speculative code
+                execution (Variant 1). It does so by ensuring a value to be
+                used as an array index will be set to zero if outside of the
+                bounds of the array, meaning any speculative execution of code
+                which uses this suitably adjusted index value will not then
+                attempt to load data from memory outside of the array bounds.
+                Code calling this macro must still first verify that the
+                original unmodified index value is within the bounds of the
+                array, and should then only use the modified value returned
+                by this function when accessing the array itself.
+                NB. If no OS-specific implementation of this macro is
+                defined, the original index is returned unmodified and no
+                protection against the potential exploit is provided.
+@Input          index    The original array index value that would be used to
+                         access the array.
+@Input          size     The number of elements in the array being accessed.
+@Return         The value to use for the array index, modified so that it
+                remains within array bounds.
+*/ /**************************************************************************/
+#define OSConfineArrayIndexNoSpeculation(index, size) (index)
+#if !defined(DOXYGEN)
+#define PVRSRV_MISSING_NO_SPEC_IMPL
+#endif
+#endif
+
+/*************************************************************************/ /*!
+@Function       OSClockns64
+@Description    This function returns the number of ticks since system boot
+                expressed in nanoseconds. Unlike OSClockns, OSClockns64 has
+                a near 64-bit range.
+@Return         The 64-bit clock value, in nanoseconds.
+*/ /**************************************************************************/
+IMG_UINT64 OSClockns64(void);
+
+/*************************************************************************/ /*!
+@Function       OSClockus64
+@Description    This function returns the number of ticks since system boot
+                expressed in microseconds. Unlike OSClockus, OSClockus64 has
+                a near 64-bit range.
+@Return         The 64-bit clock value, in microseconds.
+*/ /**************************************************************************/
+IMG_UINT64 OSClockus64(void);
+
+/*************************************************************************/ /*!
+@Function       OSClockus
+@Description    This function returns the number of ticks since system boot
+                in microseconds.
+@Return         The 32-bit clock value, in microseconds.
+*/ /**************************************************************************/
+IMG_UINT32 OSClockus(void);
+
+/*************************************************************************/ /*!
+@Function       OSClockms
+@Description    This function returns the number of ticks since system boot
+                in milliseconds.
+@Return         The 32-bit clock value, in milliseconds.
+*/ /**************************************************************************/
+IMG_UINT32 OSClockms(void);
+
+/*************************************************************************/ /*!
+@Function       OSClockMonotonicns64
+@Description    This function returns a clock value based on the system
+                monotonic clock.
+@Output         pui64Time     The 64-bit clock value, in nanoseconds.
+@Return         Error Code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSClockMonotonicns64(IMG_UINT64 *pui64Time);
+
+/*************************************************************************/ /*!
+@Function       OSClockMonotonicus64
+@Description    This function returns a clock value based on the system
+                monotonic clock.
+@Output         pui64Time     The 64-bit clock value, in microseconds.
+@Return         Error Code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSClockMonotonicus64(IMG_UINT64 *pui64Time);
+
+/*************************************************************************/ /*!
+@Function       OSClockMonotonicRawns64
+@Description    This function returns a clock value based on the system
+                monotonic raw clock.
+@Return         64bit ns timestamp
+*/ /**************************************************************************/
+IMG_UINT64 OSClockMonotonicRawns64(void);
+
+/*************************************************************************/ /*!
+@Function       OSClockMonotonicRawus64
+@Description    This function returns a clock value based on the system
+                monotonic raw clock.
+@Return         64bit us timestamp
+*/ /**************************************************************************/
+IMG_UINT64 OSClockMonotonicRawus64(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetPageSize
+@Description    This function returns the page size.
+                If the OS is not using memory mappings it should return a
+                default value of 4096.
+@Return         The size of a page, in bytes.
+*/ /**************************************************************************/
+size_t OSGetPageSize(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetPageShift
+@Description    This function returns the page size expressed as a power of
+                two. A number of pages, left-shifted by this value, gives the
+                equivalent size in bytes.
+                If the OS is not using memory mappings it should return a
+                default value of 12.
+@Return         The page size expressed as a power of two.
+*/ /**************************************************************************/
+size_t OSGetPageShift(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetPageMask
+@Description    This function returns a bitmask that may be applied to an
+                address to mask off the least-significant bits so as to
+                leave the start address of the page containing that address.
+@Return         The page mask.
+*/ /**************************************************************************/
+size_t OSGetPageMask(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetOrder
+@Description    This function returns the order of power of two for a given
+                size. Eg. for a uSize of 4096 bytes the function would
+                return 12 (4096 = 2^12).
+@Input          uSize     The size in bytes.
+@Return         The order of power of two.
+*/ /**************************************************************************/
+size_t OSGetOrder(size_t uSize);
+
+/*************************************************************************/ /*!
+@Function       OSGetRAMSize
+@Description    This function returns the total amount of GPU-addressable
+                memory provided by the system. In other words, after loading
+                the driver this would be the largest allocation an
+                application would reasonably expect to be able to make.
+                Note that this is function is not expected to return the
+                current available memory but the amount which would be
+                available on startup.
+@Return         Total GPU-addressable memory size, in bytes.
+*/ /**************************************************************************/
+IMG_UINT64 OSGetRAMSize(void);
+
+/*************************************************************************/ /*!
+@Description    Pointer to a Mid-level Interrupt Service Routine (MISR).
+@Input  pvData  Pointer to MISR specific data.
+*/ /**************************************************************************/
+typedef void (*PFN_MISR)(void *pvData);
+
+/*************************************************************************/ /*!
+@Description    Pointer to a thread entry point function.
+@Input  pvData  Pointer to thread specific data.
+*/ /**************************************************************************/
+typedef void (*PFN_THREAD)(void *pvData);
+
+/**************************************************************************/ /*!
+@Function       OSChangeSparseMemCPUAddrMap
+@Description    This function changes the CPU mapping of the underlying
+                sparse allocation. It is used by a PMR 'factory'
+                implementation if that factory supports sparse
+                allocations.
+@Input          psPageArray        array representing the pages in the
+                                   sparse allocation
+@Input          sCpuVAddrBase      the virtual base address of the sparse
+                                   allocation ('first' page)
+@Input          sCpuPAHeapBase     the physical address of the virtual
+                                   base address 'sCpuVAddrBase'
+@Input          ui32AllocPageCount the number of pages referenced in
+                                   'pai32AllocIndices'
+@Input          pai32AllocIndices  list of indices of pages within
+                                   'psPageArray' that we now want to
+                                   allocate and map
+@Input          ui32FreePageCount  the number of pages referenced in
+                                   'pai32FreeIndices'
+@Input          pai32FreeIndices   list of indices of pages within
+                                   'psPageArray' we now want to
+                                   unmap and free
+@Input          bIsLMA             flag indicating if the sparse allocation
+                                   is from LMA or UMA memory
+@Return         PVRSRV_OK on success, a failure code otherwise.
+ */ /**************************************************************************/
+PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray,
+                                         IMG_UINT64 sCpuVAddrBase,
+                                         IMG_CPU_PHYADDR sCpuPAHeapBase,
+                                         IMG_UINT32 ui32AllocPageCount,
+                                         IMG_UINT32 *pai32AllocIndices,
+                                         IMG_UINT32 ui32FreePageCount,
+                                         IMG_UINT32 *pai32FreeIndices,
+                                         IMG_BOOL bIsLMA);
+
+/*************************************************************************/ /*!
+@Function       OSInstallMISR
+@Description    Installs a Mid-level Interrupt Service Routine (MISR)
+                which handles higher-level processing of interrupts from
+                the device (GPU).
+                An MISR runs outside of interrupt context, and so may be
+                descheduled. This means it can contain code that would
+                not be permitted in the LISR.
+                An MISR is invoked when OSScheduleMISR() is called. This
+                call should be made by installed LISR once it has completed
+                its interrupt processing.
+                Multiple MISRs may be installed by the driver to handle
+                different causes of interrupt.
+@Input          pfnMISR       pointer to the function to be installed
+                              as the MISR
+@Input          hData         private data provided to the MISR
+@Input          pszMisrName   Name describing purpose of MISR worker thread
+                              (Must be a string literal).
+@Output         hMISRData     handle to the installed MISR (to be used
+                              for a subsequent uninstall)
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData,
+							PFN_MISR pfnMISR,
+							void *hData,
+							const IMG_CHAR *pszMisrName);
+
+/*************************************************************************/ /*!
+@Function       OSUninstallMISR
+@Description    Uninstalls a Mid-level Interrupt Service Routine (MISR).
+@Input          hMISRData     handle to the installed MISR
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData);
+
+/*************************************************************************/ /*!
+@Function       OSScheduleMISR
+@Description    Schedules a Mid-level Interrupt Service Routine (MISR) to be
+                executed. An MISR should be executed outside of interrupt
+                context, for example in a work queue.
+@Input          hMISRData     handle to the installed MISR
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData);
+
+/*************************************************************************/ /*!
+@Description    Pointer to a function implementing debug dump of thread-specific
+                data.
+@Input          pfnDumpDebugPrintf      Used to specify the print function used
+                                        to dump any debug information. If this
+                                        argument is NULL then a default print
+                                        function will be used.
+@Input          pvDumpDebugFile         File identifier to be passed to the
+                                        print function if specified.
+*/ /**************************************************************************/
+
+typedef void (*PFN_THREAD_DEBUG_DUMP)(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf,
+                                      void *pvDumpDebugFile);
+
+/*************************************************************************/ /*!
+@Function       OSThreadCreate
+@Description    Creates a kernel thread and starts it running. The caller
+                is responsible for informing the thread that it must finish
+                and return from the pfnThread function. It is not possible
+                to kill or terminate it. The new thread runs with the default
+                priority provided by the Operating System.
+                Note: Kernel threads are freezable which means that they
+                can be frozen by the kernel on for example driver suspend.
+                Because of that only OSEventObjectWaitKernel() function should
+                be used to put kernel threads in waiting state.
+@Output         phThread            Returned handle to the thread.
+@Input          pszThreadName       Name to assign to the thread.
+@Input          pfnThread           Thread entry point function.
+@Input          pfnDebugDumpCB      Used to dump info of the created thread
+@Input          bIsSupportingThread Set, if summary of this thread needs to
+                                    be dumped in debug_dump
+@Input          hData               Thread specific data pointer for pfnThread().
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+
+PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread,
+                            IMG_CHAR *pszThreadName,
+                            PFN_THREAD pfnThread,
+                            PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB,
+                            IMG_BOOL bIsSupportingThread,
+                            void *hData);
+
+/*! Available priority levels for the creation of a new Kernel Thread. */
+typedef enum priority_levels
+{
+	OS_THREAD_NOSET_PRIORITY = 0,   /* With this option the priority level is the default for the given OS */
+	OS_THREAD_HIGHEST_PRIORITY,
+	OS_THREAD_HIGH_PRIORITY,
+	OS_THREAD_NORMAL_PRIORITY,
+	OS_THREAD_LOW_PRIORITY,
+	OS_THREAD_LOWEST_PRIORITY,
+	OS_THREAD_LAST_PRIORITY     /* This must be always the last entry */
+} OS_THREAD_LEVEL;
+
+/*************************************************************************/ /*!
+@Function       OSThreadCreatePriority
+@Description    As OSThreadCreate, this function creates a kernel thread and
+                starts it running. The difference is that with this function
+                is possible to specify the priority used to schedule the new
+                thread.
+
+@Output         phThread            Returned handle to the thread.
+@Input          pszThreadName       Name to assign to the thread.
+@Input          pfnThread           Thread entry point function.
+@Input          pfnDebugDumpCB      Used to dump info of the created thread
+@Input          bIsSupportingThread Set, if summary of this thread needs to
+                                    be dumped in debug_dump
+@Input          hData               Thread specific data pointer for pfnThread().
+@Input          eThreadPriority     Priority level to assign to the new thread.
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread,
+                                    IMG_CHAR *pszThreadName,
+                                    PFN_THREAD pfnThread,
+                                    PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB,
+                                    IMG_BOOL bIsSupportingThread,
+                                    void *hData,
+                                    OS_THREAD_LEVEL eThreadPriority);
+
+/*************************************************************************/ /*!
+@Function       OSThreadDestroy
+@Description    Waits for the thread to end and then destroys the thread
+                handle memory. This function will block and wait for the
+                thread to finish successfully, thereby providing a sync point
+                for the thread completing its work. No attempt is made to kill
+                or otherwise terminate the thread.
+@Input          hThread   The thread handle returned by OSThreadCreate().
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread);
+
+/**************************************************************************/ /*!
+@Function       OSMapPhysToLin
+@Description    Maps physical memory into a linear address range.
+@Input          BasePAddr    physical CPU address
+@Input          ui32Bytes    number of bytes to be mapped
+@Input          ui32Flags    flags denoting the caching mode to be employed
+                             for the mapping (uncached/write-combined,
+                             cached coherent or cached incoherent).
+                             See pvrsrv_memallocflags.h for full flag bit
+                             definitions.
+@Return         Pointer to the new mapping if successful, NULL otherwise.
+ */ /**************************************************************************/
+void *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, size_t ui32Bytes, IMG_UINT32 ui32Flags);
+
+/**************************************************************************/ /*!
+@Function       OSUnMapPhysToLin
+@Description    Unmaps physical memory previously mapped by OSMapPhysToLin().
+@Input          pvLinAddr    the linear mapping to be unmapped
+@Input          ui32Bytes    number of bytes to be unmapped
+@Input          ui32Flags    flags denoting the caching mode that was employed
+                             for the original mapping.
+@Return         IMG_TRUE if unmapping was successful, IMG_FALSE otherwise.
+ */ /**************************************************************************/
+IMG_BOOL OSUnMapPhysToLin(void *pvLinAddr, size_t ui32Bytes, IMG_UINT32 ui32Flags);
+
+/**************************************************************************/ /*!
+@Function       OSCPUOperation
+@Description    Perform the specified cache operation on the CPU.
+@Input          eCacheOp      the type of cache operation to be performed
+@Return         PVRSRV_OK on success, a failure code otherwise.
+ */ /**************************************************************************/
+PVRSRV_ERROR OSCPUOperation(PVRSRV_CACHE_OP eCacheOp);
+
+/**************************************************************************/ /*!
+@Function       OSCPUCacheFlushRangeKM
+@Description    Clean and invalidate the CPU cache for the specified
+                address range.
+@Input          psDevNode     device on which the allocation was made
+@Input          pvVirtStart   virtual start address of the range to be
+                              flushed
+@Input          pvVirtEnd     virtual end address of the range to be
+                              flushed
+@Input          sCPUPhysStart physical start address of the range to be
+                              flushed
+@Input          sCPUPhysEnd   physical end address of the range to be
+                              flushed
+@Return         None
+ */ /**************************************************************************/
+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd);
+
+/**************************************************************************/ /*!
+@Function       OSCPUCacheCleanRangeKM
+@Description    Clean the CPU cache for the specified address range.
+                This writes out the contents of the cache and clears the
+                'dirty' bit (which indicates the physical memory is
+                consistent with the cache contents).
+@Input          psDevNode     device on which the allocation was made
+@Input          pvVirtStart   virtual start address of the range to be
+                              cleaned
+@Input          pvVirtEnd     virtual end address of the range to be
+                              cleaned
+@Input          sCPUPhysStart physical start address of the range to be
+                              cleaned
+@Input          sCPUPhysEnd   physical end address of the range to be
+                              cleaned
+@Return         None
+ */ /**************************************************************************/
+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd);
+
+/**************************************************************************/ /*!
+@Function       OSCPUCacheInvalidateRangeKM
+@Description    Invalidate the CPU cache for the specified address range.
+                The cache must reload data from those addresses if they
+                are accessed.
+@Input          psDevNode     device on which the allocation was made
+@Input          pvVirtStart   virtual start address of the range to be
+                              invalidated
+@Input          pvVirtEnd     virtual end address of the range to be
+                              invalidated
+@Input          sCPUPhysStart physical start address of the range to be
+                              invalidated
+@Input          sCPUPhysEnd   physical end address of the range to be
+                              invalidated
+@Return         None
+ */ /**************************************************************************/
+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                                 void *pvVirtStart,
+                                 void *pvVirtEnd,
+                                 IMG_CPU_PHYADDR sCPUPhysStart,
+                                 IMG_CPU_PHYADDR sCPUPhysEnd);
+
+/**************************************************************************/ /*!
+@Function       OSCPUCacheOpAddressType
+@Description    Returns the address type (i.e. virtual/physical/both) that OS
+                uses to perform cache maintenance on the CPU. This is used
+				to infer whether the virtual or physical address supplied to
+				the OSCPUCacheXXXRangeKM functions can be omitted when called.
+@Return         PVRSRV_CACHE_OP_ADDR_TYPE
+ */ /**************************************************************************/
+PVRSRV_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void);
+
+/*!
+ ******************************************************************************
+ * Cache attribute size type
+ *****************************************************************************/
+typedef enum _IMG_DCACHE_ATTRIBUTE_
+{
+	PVR_DCACHE_LINE_SIZE = 0,    /*!< The cache line size */
+	PVR_DCACHE_ATTRIBUTE_COUNT   /*!< The number of attributes (must be last) */
+} IMG_DCACHE_ATTRIBUTE;
+
+/**************************************************************************/ /*!
+@Function       OSCPUCacheAttributeSize
+@Description    Returns the size of a given cache attribute.
+                Typically this function is used to return the cache line
+                size, but may be extended to return the size of other
+                cache attributes.
+@Input          eCacheAttribute   the cache attribute whose size should
+                                  be returned.
+@Return         The size of the specified cache attribute, in bytes.
+ */ /**************************************************************************/
+IMG_UINT32 OSCPUCacheAttributeSize(IMG_DCACHE_ATTRIBUTE eCacheAttribute);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentProcessID
+@Description    Returns ID of current process (thread group)
+@Return         ID of current process
+*****************************************************************************/
+IMG_PID OSGetCurrentProcessID(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentProcessName
+@Description    Gets the name of current process
+@Return         Process name
+*****************************************************************************/
+IMG_CHAR *OSGetCurrentProcessName(void);
+
+/*************************************************************************/ /*!
+@Function		OSGetCurrentProcessVASpaceSize
+@Description	Returns the CPU virtual address space size of current process
+@Return			Process VA space size
+*/ /**************************************************************************/
+IMG_UINT64 OSGetCurrentProcessVASpaceSize(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentThreadID
+@Description    Returns ID for current thread
+@Return         ID of current thread
+*****************************************************************************/
+uintptr_t OSGetCurrentThreadID(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentClientProcessIDKM
+@Description    Returns ID of current client process (thread group) which
+                has made a bridge call into the server.
+                For some operating systems, this may simply be the current
+                process id. For others, it may be that a dedicated thread
+                is used to handle the processing of bridge calls and that
+                some additional processing is required to obtain the ID of
+                the client process making the bridge call.
+@Return         ID of current client process
+*****************************************************************************/
+IMG_PID OSGetCurrentClientProcessIDKM(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentClientProcessNameKM
+@Description    Gets the name of current client process
+@Return         Client process name
+*****************************************************************************/
+IMG_CHAR *OSGetCurrentClientProcessNameKM(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentClientThreadIDKM
+@Description    Returns ID for current client thread
+                For some operating systems, this may simply be the current
+                thread id. For others, it may be that a dedicated thread
+                is used to handle the processing of bridge calls and that
+                some additional processing is require to obtain the ID of
+                the client thread making the bridge call.
+@Return         ID of current client thread
+*****************************************************************************/
+uintptr_t OSGetCurrentClientThreadIDKM(void);
+
+/**************************************************************************/ /*!
+@Function       OSMemCmp
+@Description    Compares two blocks of memory for equality.
+@Input          pvBufA      Pointer to the first block of memory
+@Input          pvBufB      Pointer to the second block of memory
+@Input          uiLen       The number of bytes to be compared
+@Return         Value < 0 if pvBufA is less than pvBufB.
+                Value > 0 if pvBufB is less than pvBufA.
+                Value = 0 if pvBufA is equal to pvBufB.
+*****************************************************************************/
+IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, size_t uiLen);
+
+/*************************************************************************/ /*!
+@Function       OSPhyContigPagesAlloc
+@Description    Allocates a number of contiguous physical pages.
+                If allocations made by this function are CPU cached then
+                OSPhyContigPagesClean has to be implemented to write the
+                cached data to memory.
+@Input          psDevNode     the device for which the allocation is
+                              required
+@Input          uiSize        the size of the required allocation (in bytes)
+@Output         psMemHandle   a returned handle to be used to refer to this
+                              allocation
+@Output         psDevPAddr    the physical address of the allocation
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*****************************************************************************/
+PVRSRV_ERROR OSPhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize,
+							PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr);
+
+/*************************************************************************/ /*!
+@Function       OSPhyContigPagesFree
+@Description    Frees a previous allocation of contiguous physical pages
+@Input          psDevNode     the device on which the allocation was made
+@Input          psMemHandle   the handle of the allocation to be freed
+@Return         None.
+*****************************************************************************/
+void OSPhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle);
+
+/*************************************************************************/ /*!
+@Function       OSPhyContigPagesMap
+@Description    Maps the specified allocation of contiguous physical pages
+                to a kernel virtual address
+@Input          psDevNode     the device on which the allocation was made
+@Input          psMemHandle   the handle of the allocation to be mapped
+@Input          uiSize        the size of the allocation (in bytes)
+@Input          psDevPAddr    the physical address of the allocation
+@Output         pvPtr         the virtual kernel address to which the
+                              allocation is now mapped
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*****************************************************************************/
+PVRSRV_ERROR OSPhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+						size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+						void **pvPtr);
+
+/*************************************************************************/ /*!
+@Function       OSPhyContigPagesUnmap
+@Description    Unmaps the kernel mapping for the specified allocation of
+                contiguous physical pages
+@Input          psDevNode     the device on which the allocation was made
+@Input          psMemHandle   the handle of the allocation to be unmapped
+@Input          pvPtr         the virtual kernel address to which the
+                              allocation is currently mapped
+@Return         None.
+*****************************************************************************/
+void OSPhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, void *pvPtr);
+
+/*************************************************************************/ /*!
+@Function       OSPhyContigPagesClean
+@Description    Write the content of the specified allocation from CPU cache to
+                memory from (start + uiOffset) to (start + uiOffset + uiLength)
+                It is expected to be implemented as a cache clean operation but
+                it is allowed to fall back to a cache clean + invalidate
+                (i.e. flush).
+                If allocations returned by OSPhyContigPagesAlloc are always
+                uncached this can be implemented as nop.
+@Input          psDevNode     device on which the allocation was made
+@Input          psMemHandle   the handle of the allocation to be flushed
+@Input          uiOffset      the offset in bytes from the start of the
+                              allocation from where to start flushing
+@Input          uiLength      the amount to flush from the offset in bytes
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*****************************************************************************/
+PVRSRV_ERROR OSPhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode,
+                                   PG_HANDLE *psMemHandle,
+                                   IMG_UINT32 uiOffset,
+                                   IMG_UINT32 uiLength);
+
+
+/**************************************************************************/ /*!
+@Function       OSInitEnvData
+@Description    Called to initialise any environment-specific data. This
+                could include initialising the bridge calling infrastructure
+                or device memory management infrastructure.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+ */ /**************************************************************************/
+PVRSRV_ERROR OSInitEnvData(void);
+
+/**************************************************************************/ /*!
+@Function       OSDeInitEnvData
+@Description    The counterpart to OSInitEnvData(). Called to free any
+                resources which may have been allocated by OSInitEnvData().
+@Return         None.
+ */ /**************************************************************************/
+void OSDeInitEnvData(void);
+
+/**************************************************************************/ /*!
+@Function       OSSScanf
+@Description    OS function to support the standard C sscanf() function.
+ */ /**************************************************************************/
+IMG_UINT32 OSVSScanf(IMG_CHAR *pStr, const IMG_CHAR *pszFormat, ...);
+
+/**************************************************************************/ /*!
+@Function       OSStringNCopy
+@Description    OS function to support the standard C strncpy() function.
+ */ /**************************************************************************/
+IMG_CHAR* OSStringNCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uSize);
+
+/**************************************************************************/ /*!
+@Function       OSStringLCopy
+@Description    OS function to support the BSD C strlcpy() function.
+ */ /**************************************************************************/
+size_t OSStringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uSize);
+
+/**************************************************************************/ /*!
+@Function       OSSNPrintf
+@Description    OS function to support the standard C snprintf() function.
+@Output         pStr        char array to print into
+@Input          ui32Size    maximum size of data to write (chars)
+@Input          pszFormat   format string
+ */ /**************************************************************************/
+IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR *pszFormat, ...) __printf(3, 4);
+
+/**************************************************************************/ /*!
+@Function       OSVSNPrintf
+@Description    Printf to IMG string using variable args (see stdarg.h).
+                This is necessary because the '...' notation does not
+                support nested function calls.
+@Input          ui32Size           maximum size of data to write (chars)
+@Input          pszFormat          format string
+@Input          vaArgs             variable args structure (from stdarg.h)
+@Output         pStr               char array to print into
+@Return         Number of character written in buffer if successful other wise -1 on error
+*/ /**************************************************************************/
+IMG_INT32 OSVSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR* pszFormat, va_list vaArgs) __printf(3, 0);
+
+/**************************************************************************/ /*!
+@Function       OSStringLength
+@Description    OS function to support the standard C strlen() function.
+ */ /**************************************************************************/
+size_t OSStringLength(const IMG_CHAR *pStr);
+
+/**************************************************************************/ /*!
+@Function       OSStringNLength
+@Description    Return the length of a string, excluding the terminating null
+                byte ('\0'), but return at most 'uiCount' bytes. Only the first
+                'uiCount' bytes of 'pStr' are interrogated.
+@Input          pStr     pointer to the string
+@Input          uiCount  the maximum length to return
+@Return         Length of the string if less than 'uiCount' bytes, otherwise
+                'uiCount'.
+ */ /**************************************************************************/
+size_t OSStringNLength(const IMG_CHAR *pStr, size_t uiCount);
+
+/**************************************************************************/ /*!
+@Function       OSStringCompare
+@Description    OS function to support the standard C strcmp() function.
+ */ /**************************************************************************/
+IMG_INT32 OSStringCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2);
+
+/**************************************************************************/ /*!
+@Function       OSStringNCompare
+@Description    OS function to support the standard C strncmp() function.
+ */ /**************************************************************************/
+IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2,
+                           size_t uiSize);
+
+/**************************************************************************/ /*!
+@Function       OSStringToUINT32
+@Description    Changes string to IMG_UINT32.
+ */ /**************************************************************************/
+PVRSRV_ERROR OSStringToUINT32(const IMG_CHAR *pStr, IMG_UINT32 ui32Base,
+                              IMG_UINT32 *ui32Result);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectCreate
+@Description    Create an event object.
+@Input          pszName         name to assign to the new event object.
+@Output         EventObject     the created event object.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName,
+								 IMG_HANDLE *EventObject);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectDestroy
+@Description    Destroy an event object.
+@Input          hEventObject    the event object to destroy.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectSignal
+@Description    Signal an event object. Any thread waiting on that event
+                object will be woken.
+@Input          hEventObject    the event object to signal.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectWait
+@Description    Wait for an event object to signal. The function is passed
+                an OS event object handle (which allows the OS to have the
+                calling thread wait on the associated event object).
+                The calling thread will be rescheduled when the associated
+                event object signals.
+                If the event object has not signalled after a default timeout
+                period (defined in EVENT_OBJECT_TIMEOUT_MS), the function
+                will return with the result code PVRSRV_ERROR_TIMEOUT.
+
+                Note: If use of the global bridge lock is supported (if the
+                DDK has been built with PVRSRV_USE_BRIDGE_LOCK defined), the
+                global bridge lock should be released while waiting for the
+                event object to signal (if held by the current thread).
+                The following logic should be implemented in the OS
+                implementation:
+                ...
+                bReleasePVRLock = (!bHoldBridgeLock &&
+                                   BridgeLockIsLocked() &&
+                                   current == BridgeLockGetOwner());
+                if (bReleasePVRLock == IMG_TRUE) OSReleaseBridgeLock();
+                ...
+                / * sleep & reschedule - wait for signal * /
+                ...
+                / * if lock was previously held, re-acquire it * /
+                if (bReleasePVRLock == IMG_TRUE) OSAcquireBridgeLock();
+                ...
+
+@Input          hOSEventKM    the OS event object handle associated with
+                              the event object.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectWaitKernel
+@Description    Wait for an event object to signal. The function is passed
+                an OS event object handle (which allows the OS to have the
+                calling thread wait on the associated event object).
+                The calling thread will be rescheduled when the associated
+                event object signals.
+                If the event object has not signalled after a default timeout
+                period (defined in EVENT_OBJECT_TIMEOUT_MS), the function
+                will return with the result code PVRSRV_ERROR_TIMEOUT.
+
+                Note: This function should be used only by kernel thread.
+                This is because all kernel threads are freezable and
+                this function allows the kernel to freeze the threads
+                when waiting.
+
+                See OSEventObjectWait() for more details.
+
+@Input          hOSEventKM    the OS event object handle associated with
+                              the event object.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+#if defined(LINUX) && defined(__KERNEL__)
+PVRSRV_ERROR OSEventObjectWaitKernel(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus);
+#else
+#define OSEventObjectWaitKernel OSEventObjectWaitTimeout
+#endif
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectWaitTimeout
+@Description    Wait for an event object to signal or timeout. The function
+                is passed an OS event object handle (which allows the OS to
+                have the calling thread wait on the associated event object).
+                The calling thread will be rescheduled when the associated
+                event object signals.
+                If the event object has not signalled after the specified
+                timeout period (passed in 'uiTimeoutus'), the function
+                will return with the result code PVRSRV_ERROR_TIMEOUT.
+                NB. If use of the global bridge lock is supported (if
+                PVRSRV_USE_BRIDGE_LOCK is defined) it should be released while
+                waiting for the event object to signal (if held by the current
+                thread).
+                See OSEventObjectWait() for details.
+@Input          hOSEventKM    the OS event object handle associated with
+                              the event object.
+@Input          uiTimeoutus   the timeout period (in usecs)
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectWaitAndHoldBridgeLock
+@Description    Wait for an event object to signal. The function is passed
+                an OS event object handle (which allows the OS to have the
+                calling thread wait on the associated event object).
+                The calling thread will be rescheduled when the associated
+                event object signals.
+                If the event object has not signalled after a default timeout
+                period (defined in EVENT_OBJECT_TIMEOUT_MS), the function
+                will return with the result code PVRSRV_ERROR_TIMEOUT.
+                If use of the global bridge lock is supported (if
+                PVRSRV_USE_BRIDGE_LOCK is defined), it will be held while
+                waiting for the event object to signal (this will prevent
+                other bridge calls from being serviced during this time).
+                See OSEventObjectWait() for details.
+@Input          hOSEventKM    the OS event object handle associated with
+                              the event object.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWaitAndHoldBridgeLock(IMG_HANDLE hOSEventKM);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectWaitTimeoutAndHoldBridgeLock
+@Description    Wait for an event object to signal or timeout. The function
+                is passed an OS event object handle (which allows the OS to
+                have the calling thread wait on the associated event object).
+                The calling thread will be rescheduled when the associated
+                event object signals.
+                If the event object has not signalled after the specified
+                timeout period (passed in 'uiTimeoutus'), the function
+                will return with the result code PVRSRV_ERROR_TIMEOUT.
+                If use of the global bridge lock is supported (if
+                PVRSRV_USE_BRIDGE_LOCK is defined) it will be held while
+                waiting for the event object to signal (this will prevent
+                other bridge calls from being serviced during this time).
+                See OSEventObjectWait() for details.
+@Input          hOSEventKM    the OS event object handle associated with
+                              the event object.
+@Input          uiTimeoutus   the timeout period (in usecs)
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWaitTimeoutAndHoldBridgeLock(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectDumpDebugInfo
+@Description    Emits debug counters/stats related to the event object passed
+@Input          hOSEventKM    the OS event object handle associated with
+                              the event object.
+@Return         None.
+*/ /**************************************************************************/
+void OSEventObjectDumpDebugInfo(IMG_HANDLE hOSEventKM);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectOpen
+@Description    Open an OS handle on the specified event object.
+                This OS handle may then be used to make a thread wait for
+                that event object to signal.
+@Input          hEventObject    Event object handle.
+@Output         phOSEvent       OS handle to the returned event object.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject,
+											IMG_HANDLE *phOSEvent);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectClose
+@Description    Close an OS handle previously opened for an event object.
+@Input          hOSEventKM      OS event object handle to close.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM);
+
+/**************************************************************************/ /*!
+@Function       OSStringCopy
+@Description    OS function to support the standard C strcpy() function.
+ */ /**************************************************************************/
+/* Avoid macros so we don't evaluate pszSrc twice */
+static INLINE IMG_CHAR *OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc)
+{
+	return OSStringNCopy(pszDest, pszSrc, OSStringLength(pszSrc) + 1);
+}
+
+/*************************************************************************/ /*!
+@Function      OSWaitus
+@Description   Implements a busy wait of the specified number of microseconds.
+               This function does NOT release thread quanta.
+@Input         ui32Timeus     The duration of the wait period (in us)
+@Return        None.
+*/ /**************************************************************************/
+void OSWaitus(IMG_UINT32 ui32Timeus);
+
+/*************************************************************************/ /*!
+@Function       OSSleepms
+@Description    Implements a sleep of the specified number of milliseconds.
+                This function may allow pre-emption, meaning the thread
+                may potentially not be rescheduled for a longer period.
+@Input          ui32Timems    The duration of the sleep (in ms)
+@Return         None.
+*/ /**************************************************************************/
+void OSSleepms(IMG_UINT32 ui32Timems);
+
+/*************************************************************************/ /*!
+@Function       OSReleaseThreadQuanta
+@Description    Relinquishes the current thread's execution time-slice,
+                permitting the OS scheduler to schedule another thread.
+@Return         None.
+*/ /**************************************************************************/
+void OSReleaseThreadQuanta(void);
+
+
+/*************************************************************************/ /*!
+*/ /**************************************************************************/
+
+/* The access method is dependent on the location of the physical memory that
+ * makes up the PhyHeaps defined for the system and the CPU architecture. These
+ * macros may change in future to accommodate different access requirements.
+ */
+/*! Performs a 32 bit word read from the device memory. */
+#define OSReadDeviceMem32(addr)        (*((volatile IMG_UINT32 __force *)(addr)))
+/*! Performs a 32 bit word write to the device memory. */
+#define OSWriteDeviceMem32(addr, val)  (*((volatile IMG_UINT32 __force *)(addr)) = (IMG_UINT32)(val))
+
+#if defined(LINUX) && defined(__KERNEL__) && !defined(NO_HARDWARE)
+	#define OSReadHWReg8(addr, off)  ((IMG_UINT8)readb((IMG_BYTE __iomem *)(addr) + (off)))
+	#define OSReadHWReg16(addr, off) ((IMG_UINT16)readw((IMG_BYTE __iomem *)(addr) + (off)))
+	#define OSReadHWReg32(addr, off) ((IMG_UINT32)readl((IMG_BYTE __iomem *)(addr) + (off)))
+
+	/* Little endian support only */
+	#define OSReadHWReg64(addr, off) \
+			({ \
+				__typeof__(addr) _addr = addr; \
+				__typeof__(off) _off = off; \
+				(IMG_UINT64) \
+				( \
+					( (IMG_UINT64)(readl((IMG_BYTE __iomem *)(_addr) + (_off) + 4)) << 32) \
+					| readl((IMG_BYTE __iomem *)(_addr) + (_off)) \
+				); \
+			})
+
+	#define OSWriteHWReg8(addr, off, val)  writeb((IMG_UINT8)(val), (IMG_BYTE __iomem *)(addr) + (off))
+	#define OSWriteHWReg16(addr, off, val) writew((IMG_UINT16)(val), (IMG_BYTE __iomem *)(addr) + (off))
+	#define OSWriteHWReg32(addr, off, val) writel((IMG_UINT32)(val), (IMG_BYTE __iomem *)(addr) + (off))
+	/* Little endian support only */
+	#define OSWriteHWReg64(addr, off, val) do \
+			{ \
+				__typeof__(addr) _addr = addr; \
+				__typeof__(off) _off = off; \
+				__typeof__(val) _val = val; \
+				writel((IMG_UINT32)((_val) & 0xffffffff), (IMG_BYTE __iomem *)(_addr) + (_off));	\
+				writel((IMG_UINT32)(((IMG_UINT64)(_val) >> 32) & 0xffffffff), (IMG_BYTE __iomem *)(_addr) + (_off) + 4); \
+			} while (0)
+
+
+#elif defined(NO_HARDWARE)
+	/* FIXME: OSReadHWReg should not exist in no hardware builds */
+	#define OSReadHWReg8(addr, off)  (0x4eU)
+	#define OSReadHWReg16(addr, off) (0x3a4eU)
+	#define OSReadHWReg32(addr, off) (0x30f73a4eU)
+	#define OSReadHWReg64(addr, off) ((IMG_UINT64)0x5b376c9d30f73a4eU)
+
+	#define OSWriteHWReg8(addr, off, val)
+	#define OSWriteHWReg16(addr, off, val)
+	#define OSWriteHWReg32(addr, off, val)
+	#define OSWriteHWReg64(addr, off, val)
+
+#else
+/*************************************************************************/ /*!
+@Function       OSReadHWReg8
+@Description    Read from an 8-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to read from a location
+                but instead returns a constant value.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be read.
+@Return         The byte read.
+*/ /**************************************************************************/
+	IMG_UINT8 OSReadHWReg8(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function       OSReadHWReg16
+@Description    Read from a 16-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to read from a location
+                but instead returns a constant value.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be read.
+@Return         The word read.
+*/ /**************************************************************************/
+	IMG_UINT16 OSReadHWReg16(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function       OSReadHWReg32
+@Description    Read from a 32-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to read from a location
+                but instead returns a constant value.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be read.
+@Return         The long word read.
+*/ /**************************************************************************/
+	IMG_UINT32 OSReadHWReg32(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function       OSReadHWReg64
+@Description    Read from a 64-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to read from a location
+                but instead returns a constant value.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be read.
+@Return         The long long word read.
+*/ /**************************************************************************/
+	IMG_UINT64 OSReadHWReg64(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function       OSWriteHWReg8
+@Description    Write to an 8-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to write to a location.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be written to.
+@Input          ui8Value           The byte to be written to the register.
+@Return         None.
+*/ /**************************************************************************/
+	void OSWriteHWReg8(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT8 ui8Value);
+
+/*************************************************************************/ /*!
+@Function       OSWriteHWReg16
+@Description    Write to a 16-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to write to a location.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be written to.
+@Input          ui16Value          The word to be written to the register.
+@Return         None.
+*/ /**************************************************************************/
+	void OSWriteHWReg16(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT16 ui16Value);
+
+/*************************************************************************/ /*!
+@Function       OSWriteHWReg32
+@Description    Write to a 32-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to write to a location.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be written to.
+@Input          ui32Value          The long word to be written to the register.
+@Return         None.
+*/ /**************************************************************************/
+	void OSWriteHWReg32(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
+
+/*************************************************************************/ /*!
+@Function       OSWriteHWReg64
+@Description    Write to a 64-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to write to a location.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be written to.
+@Input          ui64Value          The long long word to be written to the
+                                   register.
+@Return         None.
+*/ /**************************************************************************/
+	void OSWriteHWReg64(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT64 ui64Value);
+#endif
+
+/*************************************************************************/ /*!
+@Description    Pointer to a timer callback function.
+@Input          pvData  Pointer to timer specific data.
+*/ /**************************************************************************/
+typedef void (*PFN_TIMER_FUNC)(void* pvData);
+
+/*************************************************************************/ /*!
+@Function       OSAddTimer
+@Description    OS specific function to install a timer callback. The
+                timer will then need to be enabled, as it is disabled by
+                default.
+                When enabled, the callback will be invoked once the specified
+                timeout has elapsed.
+@Input          pfnTimerFunc    Timer callback
+@Input          *pvData         Callback data
+@Input          ui32MsTimeout   Callback period
+@Return         Valid handle on success, NULL if a failure
+*/ /**************************************************************************/
+IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, IMG_UINT32 ui32MsTimeout);
+
+/*************************************************************************/ /*!
+@Function       OSRemoveTimer
+@Description    Removes the specified timer. The handle becomes invalid and
+                should no longer be used.
+@Input          hTimer          handle of the timer to be removed
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSRemoveTimer(IMG_HANDLE hTimer);
+
+/*************************************************************************/ /*!
+@Function       OSEnableTimer
+@Description    Enable the specified timer. after enabling, the timer will
+                invoke the associated callback at an interval determined by
+                the configured timeout period until disabled.
+@Input          hTimer          handle of the timer to be enabled
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEnableTimer(IMG_HANDLE hTimer);
+
+/*************************************************************************/ /*!
+@Function       OSDisableTimer
+@Description    Disable the specified timer
+@Input          hTimer          handle of the timer to be disabled
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSDisableTimer(IMG_HANDLE hTimer);
+
+
+/*************************************************************************/ /*!
+ @Function      OSPanic
+ @Description   Take action in response to an unrecoverable driver error
+ @Return        None
+*/ /**************************************************************************/
+void OSPanic(void);
+
+/*************************************************************************/ /*!
+@Function       OSCopyToUser
+@Description    Copy data to user-addressable memory from kernel-addressable
+                memory.
+                Note that pvDest may be an invalid address or NULL and the
+                function should return an error in this case.
+                For operating systems that do not have a user/kernel space
+                distinction, this function should be implemented as a stub
+                which simply returns PVRSRV_ERROR_NOT_SUPPORTED.
+@Input          pvProcess        handle of the connection
+@Input          pvDest           pointer to the destination User memory
+@Input          pvSrc            pointer to the source Kernel memory
+@Input          ui32Bytes        size of the data to be copied
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSCopyToUser(void *pvProcess, void __user *pvDest, const void *pvSrc, size_t ui32Bytes);
+
+/*************************************************************************/ /*!
+@Function       OSCopyFromUser
+@Description    Copy data from user-addressable memory to kernel-addressable
+                memory.
+                Note that pvSrc may be an invalid address or NULL and the
+                function should return an error in this case.
+                For operating systems that do not have a user/kernel space
+                distinction, this function should be implemented as a stub
+                which simply returns PVRSRV_ERROR_NOT_SUPPORTED.
+@Input          pvProcess        handle of the connection
+@Input          pvDest           pointer to the destination Kernel memory
+@Input          pvSrc            pointer to the source User memory
+@Input          ui32Bytes        size of the data to be copied
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSCopyFromUser(void *pvProcess, void *pvDest, const void __user *pvSrc, size_t ui32Bytes);
+
+#if defined(__linux__) || defined(INTEGRITY_OS)
+#define OSBridgeCopyFromUser OSCopyFromUser
+#define OSBridgeCopyToUser OSCopyToUser
+#else
+/*************************************************************************/ /*!
+@Function       OSBridgeCopyFromUser
+@Description    Copy data from user-addressable memory into kernel-addressable
+                memory as part of a bridge call operation.
+                For operating systems that do not have a user/kernel space
+                distinction, this function will require whatever implementation
+                is needed to pass data for making the bridge function call.
+                For operating systems which do have a user/kernel space
+                distinction (such as Linux) this function may be defined so
+                as to equate to a call to OSCopyFromUser().
+@Input          pvProcess        handle of the connection
+@Input          pvDest           pointer to the destination Kernel memory
+@Input          pvSrc            pointer to the source User memory
+@Input          ui32Bytes        size of the data to be copied
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSBridgeCopyFromUser (void *pvProcess,
+						void *pvDest,
+						const void *pvSrc,
+						size_t ui32Bytes);
+
+/*************************************************************************/ /*!
+@Function       OSBridgeCopyToUser
+@Description    Copy data to user-addressable memory from kernel-addressable
+                memory as part of a bridge call operation.
+                For operating systems that do not have a user/kernel space
+                distinction, this function will require whatever implementation
+                is needed to pass data for making the bridge function call.
+                For operating systems which do have a user/kernel space
+                distinction (such as Linux) this function may be defined so
+                as to equate to a call to OSCopyToUser().
+@Input          pvProcess        handle of the connection
+@Input          pvDest           pointer to the destination User memory
+@Input          pvSrc            pointer to the source Kernel memory
+@Input          ui32Bytes        size of the data to be copied
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSBridgeCopyToUser (void *pvProcess,
+						void *pvDest,
+						const void *pvSrc,
+						size_t ui32Bytes);
+#endif
+
+/* To be increased if required in future */
+#define PVRSRV_MAX_BRIDGE_IN_SIZE      0x2000    /*!< Size of the memory block used to hold data passed in to a bridge call */
+#define PVRSRV_MAX_BRIDGE_OUT_SIZE     0x1000    /*!< Size of the memory block used to hold data returned from a bridge call */
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK) || defined(DOXYGEN)
+/*************************************************************************/ /*!
+@Function       OSGetGlobalBridgeBuffers
+@Description    Returns the addresses and sizes of the buffers used to pass
+                data into and out of bridge function calls.
+@Output         ppvBridgeInBuffer         pointer to the input bridge data buffer
+                                          of size PVRSRV_MAX_BRIDGE_IN_SIZE.
+@Output         ppvBridgeOutBuffer        pointer to the output bridge data buffer
+                                          of size PVRSRV_MAX_BRIDGE_OUT_SIZE.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSGetGlobalBridgeBuffers (void **ppvBridgeInBuffer,
+									   void **ppvBridgeOutBuffer);
+#endif
+
+/*************************************************************************/ /*!
+@Function       OSPlatformBridgeInit
+@Description    Called during device creation to allow the OS port to register
+                other bridge modules and related resources that it requires.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPlatformBridgeInit(void);
+
+/*************************************************************************/ /*!
+@Function       OSPlatformBridgeDeInit
+@Description    Called during device destruction to allow the OS port to
+                deregister its OS specific bridges and clean up other
+                related resources.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPlatformBridgeDeInit(void);
+
+
+#if defined(LINUX) && defined(__KERNEL__)
+#define OSWriteMemoryBarrier() wmb()
+#define OSReadMemoryBarrier() rmb()
+#define OSMemoryBarrier() mb()
+#else
+/*************************************************************************/ /*!
+@Function       OSWriteMemoryBarrier
+@Description    Insert a write memory barrier.
+                The write memory barrier guarantees that all store operations
+                (writes) specified before the barrier will appear to happen
+                before all of the store operations specified after the barrier.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+void OSWriteMemoryBarrier(void);
+/*************************************************************************/ /*!
+@def            OSReadMemoryBarrier
+@Description    Insert a read memory barrier.
+                The read memory barrier guarantees that all load (read)
+                operations specified before the barrier will appear to happen
+                before all of the load operations specified after the barrier.
+*/ /**************************************************************************/
+#define OSReadMemoryBarrier() OSMemoryBarrier()
+/*************************************************************************/ /*!
+@Function       OSMemoryBarrier
+@Description    Insert a read/write memory barrier.
+                The read and write memory barrier guarantees that all load
+                (read) and all store (write) operations specified before the
+                barrier will appear to happen before all of the load/store
+                operations specified after the barrier.
+@Return         None.
+*/ /**************************************************************************/
+void OSMemoryBarrier(void);
+#endif
+
+/*************************************************************************/ /*!
+@Function       PVRSRVToNativeError
+@Description    Returns the OS-specific equivalent error number/code for
+                the specified PVRSRV_ERROR value.
+                If there is no equivalent, or the PVRSRV_ERROR value is
+                PVRSRV_OK (no error), 0 is returned.
+@Return         The OS equivalent error code.
+*/ /**************************************************************************/
+int PVRSRVToNativeError(PVRSRV_ERROR e);
+/** See PVRSRVToNativeError(). */
+#define OSPVRSRVToNativeError(e) ( (PVRSRV_OK == e)? 0: PVRSRVToNativeError(e) )
+
+
+#if defined(LINUX) && defined(__KERNEL__)
+
+/* Provide LockDep friendly definitions for Services RW locks */
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include "allocmem.h"
+
+#define OSWRLockCreate(ppsLock) ({ \
+	PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+	*(ppsLock) = OSAllocMem(sizeof(struct rw_semaphore)); \
+	if (*(ppsLock)) { init_rwsem(*(ppsLock)); e = PVRSRV_OK; }; \
+	e;})
+#define OSWRLockDestroy(psLock) ({OSFreeMem(psLock); PVRSRV_OK;})
+
+#define OSWRLockAcquireRead(psLock) ({down_read(psLock); PVRSRV_OK;})
+#define OSWRLockReleaseRead(psLock) ({up_read(psLock); PVRSRV_OK;})
+#define OSWRLockAcquireWrite(psLock) ({down_write(psLock); PVRSRV_OK;})
+#define OSWRLockReleaseWrite(psLock) ({up_write(psLock); PVRSRV_OK;})
+
+typedef spinlock_t *POS_SPINLOCK;
+
+#define OSSpinLockCreate(_ppsLock) ({ \
+	PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+	*(_ppsLock) = OSAllocMem(sizeof(spinlock_t)); \
+	if (*(_ppsLock)) {spin_lock_init(*(_ppsLock)); e = PVRSRV_OK;} \
+	e;})
+#define OSSpinLockDestroy(_psLock) ({OSFreeMem(_psLock);})
+
+#define OSSpinLockAcquire(_pLock, _pFlags) {unsigned long *p = (unsigned long *)_pFlags; spin_lock_irqsave(_pLock, *p);}
+#define OSSpinLockRelease(_pLock, _flags)  {spin_unlock_irqrestore(_pLock, _flags);}
+
+#elif defined(LINUX) || defined(__QNXNTO__) || defined (INTEGRITY_OS)
+/* User-mode unit tests use these definitions on Linux */
+
+PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock);
+void OSWRLockDestroy(POSWR_LOCK psLock);
+void OSWRLockAcquireRead(POSWR_LOCK psLock);
+void OSWRLockReleaseRead(POSWR_LOCK psLock);
+void OSWRLockAcquireWrite(POSWR_LOCK psLock);
+void OSWRLockReleaseWrite(POSWR_LOCK psLock);
+
+/* For now, spin-locks are required on Linux only, so other platforms fake
+ * spinlocks with normal mutex locks */
+#define POS_SPINLOCK POS_LOCK
+#define OSSpinLockCreate(ppLock) OSLockCreate(ppLock)
+#define OSSpinLockDestroy(pLock) OSLockDestroy(pLock)
+#define OSSpinLockAcquire(pLock, pFlags) {PVR_UNREFERENCED_PARAMETER(pFlags); OSLockAcquire(pLock);}
+#define OSSpinLockRelease(pLock, flags) {PVR_UNREFERENCED_PARAMETER(flags); OSLockRelease(pLock);}
+
+#else
+
+/*************************************************************************/ /*!
+@Function       OSWRLockCreate
+@Description    Create a writer/reader lock.
+                This type of lock allows multiple concurrent readers but
+                only a single writer, allowing for optimized performance.
+@Output         ppsLock     A handle to the created WR lock.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock)
+{
+	PVR_UNREFERENCED_PARAMETER(ppsLock);
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSWRLockDestroy
+@Description    Destroys a writer/reader lock.
+@Input          psLock     The handle of the WR lock to be destroyed.
+@Return         None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockDestroy(POSWR_LOCK psLock)
+{
+	PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+/*************************************************************************/ /*!
+@Function       OSWRLockAcquireRead
+@Description    Acquire a writer/reader read lock.
+                If the write lock is already acquired, the caller will
+                block until it is released.
+@Input          psLock     The handle of the WR lock to be acquired for
+                           reading.
+@Return         None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockAcquireRead(POSWR_LOCK psLock)
+{
+	PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+/*************************************************************************/ /*!
+@Function       OSWRLockReleaseRead
+@Description    Release a writer/reader read lock.
+@Input          psLock     The handle of the WR lock whose read lock is to
+                           be released.
+@Return         None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockReleaseRead(POSWR_LOCK psLock)
+{
+	PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+/*************************************************************************/ /*!
+@Function       OSWRLockAcquireWrite
+@Description    Acquire a writer/reader write lock.
+                If the write lock or any read lock are already acquired,
+                the caller will block until all are released.
+@Input          psLock     The handle of the WR lock to be acquired for
+                           writing.
+@Return         None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockAcquireWrite(POSWR_LOCK psLock)
+{
+	PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+/*************************************************************************/ /*!
+@Function       OSWRLockReleaseWrite
+@Description    Release a writer/reader write lock.
+@Input          psLock     The handle of the WR lock whose write lock is to
+                           be released.
+@Return         None
+*/ /**************************************************************************/
+static INLINE void OSWRLockReleaseWrite(POSWR_LOCK psLock)
+{
+	PVR_UNREFERENCED_PARAMETER(psLock);
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function       OSDivide64r64
+@Description    Divide a 64-bit value by a 32-bit value. Return the 64-bit
+                quotient.
+                The remainder is also returned in 'pui32Remainder'.
+@Input          ui64Divident        The number to be divided.
+@Input          ui32Divisor         The 32-bit value 'ui64Divident' is to
+                                    be divided by.
+@Output         pui32Remainder      The remainder of the division.
+@Return         The 64-bit quotient (result of the division).
+*/ /**************************************************************************/
+IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder);
+
+/*************************************************************************/ /*!
+@Function       OSDivide64
+@Description    Divide a 64-bit value by a 32-bit value. Return a 32-bit
+                quotient.
+                The remainder is also returned in 'pui32Remainder'.
+                This function allows for a more optional implementation
+                of a 64-bit division when the result is known to be
+                representable in 32-bits.
+@Input          ui64Divident        The number to be divided.
+@Input          ui32Divisor         The 32-bit value 'ui64Divident' is to
+                                    be divided by.
+@Output         pui32Remainder      The remainder of the division.
+@Return         The 32-bit quotient (result of the division).
+*/ /**************************************************************************/
+IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder);
+
+/*************************************************************************/ /*!
+@Function       OSDumpStack
+@Description    Dump the current task information and its stack trace.
+@Return         None
+*/ /**************************************************************************/
+void OSDumpStack(void);
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK) || defined(DOXYGEN)
+/*************************************************************************/ /*!
+@Function       OSAcquireBridgeLock
+@Description    Acquire the global bridge lock.
+                This prevents another bridge call from being actioned while
+                we are still servicing the current bridge call.
+                NB. This function must not return until the lock is acquired
+                (meaning the implementation should not timeout or return with
+                an error, as the caller will assume they have the lock).
+                This function has an OS-specific implementation rather than
+                an abstracted implementation for efficiency reasons, as it
+                is called frequently.
+@Return         None
+*/ /**************************************************************************/
+void OSAcquireBridgeLock(void);
+/*************************************************************************/ /*!
+@Function       OSReleaseBridgeLock
+@Description    Release the global bridge lock.
+                This function has an OS-specific implementation rather than
+                an abstracted implementation for efficiency reasons, as it
+                is called frequently.
+@Return         None
+*/ /**************************************************************************/
+void OSReleaseBridgeLock(void);
+#endif
+
+/*
+ *  Functions for providing support for PID statistics.
+ */
+
+/*************************************************************************/ /*!
+@Description    Pointer to a function for printing statistics.
+@Input  pvFilePtr   File identifier.
+@Input  pszFormat   Text to be printed including format specifiers.
+@Input  ...         Additional arguments depending on the pszFormat string.
+*/ /**************************************************************************/
+typedef void (OS_STATS_PRINTF_FUNC)(void *pvFilePtr, const IMG_CHAR *pszFormat, ...);
+
+/*************************************************************************/ /*!
+@Description    Pointer to a function responsible for parsing and printing of
+                formatted process statistics. Actual output should be done by
+                the function pointed to by the pfnOSGetStatsPrintf variable.
+@Input  pvFilePtr            File identifier passed to the pfnOSGetStatsPrintf function.
+@Input  pvStatPtr            Pointer to statistics structure.
+@Input  pfnOSGetStatsPrintf  Pointer to a function for printing the statistics.
+*/ /**************************************************************************/
+typedef void (OS_STATS_PRINT_FUNC)(void *pvFilePtr,
+								   void *pvStatPtr,
+								   OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+/*************************************************************************/ /*!
+@Description    Pointer to a function used to atomically increment a reference
+                count on the memory backing the statistic entry.
+@Input  pvStatPtr   Pointer to the statistics structure.
+@Return			Reference count after the operation.
+*/ /**************************************************************************/
+typedef IMG_UINT32 (OS_INC_STATS_MEM_REFCOUNT_FUNC)(void *pvStatPtr);
+
+/*************************************************************************/ /*!
+@Description    Pointer to a function used to atomically decrement a reference
+                count on the memory backing the statistic entry.
+@Input  pvStatPtr   Pointer to the statistics structure.
+@Return			Reference count after the operation.
+*/ /**************************************************************************/
+typedef IMG_UINT32 (OS_DEC_STATS_MEM_REFCOUNT_FUNC)(void *pvStatPtr);
+
+/*************************************************************************/ /*!
+@Function       OSCreateStatisticEntry
+@Description    Create a statistic entry in the specified folder.
+                Where operating systems do not support a debugfs,
+                file system this function may be implemented as a stub.
+@Input          pszName        String containing the name for the entry.
+@Input          pvFolder       Reference from OSCreateStatisticFolder() of the
+                               folder to create the entry in, or NULL for the
+                               root.
+@Input          pfnStatsPrint  Pointer to function that can be used to print the
+                               values of all the statistics.
+@Input          pvData         OS specific reference that can be used by
+                               pfnGetElement.
+@Return	        Pointer void reference to the entry created, which can be
+                passed to OSRemoveStatisticEntry() to remove the entry.
+*/ /**************************************************************************/
+void *OSCreateStatisticEntry(IMG_CHAR* pszName, void *pvFolder,
+							 OS_STATS_PRINT_FUNC* pfnStatsPrint,
+							 void *pvData);
+
+/*************************************************************************/ /*!
+@Function       OSRemoveStatisticEntry
+@Description    Removes a statistic entry.
+                Where operating systems do not support a debugfs,
+                file system this function may be implemented as a stub.
+@Input          ppvEntry  Double Pointer void reference to the entry created by
+                          OSCreateStatisticEntry().
+                          Double pointer is used so that it can be NULLed
+                          right after memory is freed to avoid possible races
+                          and use-after-free situations.
+*/ /**************************************************************************/
+void OSRemoveStatisticEntry(void **ppvEntry);
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+/*************************************************************************/ /*!
+@Function       OSCreateRawStatisticEntry
+@Description    Create a raw statistic entry in the specified folder.
+                Where operating systems do not support a debugfs
+                file system this function may be implemented as a stub.
+@Input          pszFileName    String containing the name for the entry.
+@Input          pvParentDir    Reference from OSCreateStatisticFolder() of the
+                               folder to create the entry in, or NULL for the
+                               root.
+@Input          pfnStatsPrint  Pointer to function that can be used to print the
+                               values of all the statistics.
+@Return	        Pointer void reference to the entry created, which can be
+                passed to OSRemoveRawStatisticEntry() to remove the entry.
+*/ /**************************************************************************/
+void *OSCreateRawStatisticEntry(const IMG_CHAR *pszFileName, void *pvParentDir,
+                                OS_STATS_PRINT_FUNC *pfStatsPrint);
+
+/*************************************************************************/ /*!
+@Function       OSRemoveRawStatisticEntry
+@Description    Removes a raw statistic entry.
+                Where operating systems do not support a debugfs
+                file system this function may be implemented as a stub.
+@Input          ppvEntry  Double Pointer void reference to the entry created by
+                          OSCreateRawStatisticEntry().
+                          Double pointer is used so that it can be NULLed
+                          right after memory is freed to avoid possible races
+                          and use-after-free situations.
+*/ /**************************************************************************/
+void OSRemoveRawStatisticEntry(void **ppvEntry);
+#endif
+
+/*************************************************************************/ /*!
+@Function       OSCreateStatisticFolder
+@Description    Create a statistic folder to hold statistic entries.
+                Where operating systems do not support a debugfs,
+                file system this function may be implemented as a stub.
+@Input          pszName   String containing the name for the folder.
+@Input          pvFolder  Reference from OSCreateStatisticFolder() of the folder
+                          to create the folder in, or NULL for the root.
+@Return         Pointer void reference to the folder created, which can be
+                passed to OSRemoveStatisticFolder() to remove the folder.
+*/ /**************************************************************************/
+void *OSCreateStatisticFolder(IMG_CHAR *pszName, void *pvFolder);
+
+/*************************************************************************/ /*!
+@Function       OSRemoveStatisticFolder
+@Description    Removes a statistic folder.
+                Where operating systems do not support a debugfs,
+                file system this function may be implemented as a stub.
+@Input          ppvFolder  Reference from OSCreateStatisticFolder() of the
+                           folder that should be removed.
+                           This needs to be double pointer because it has to
+                           be NULLed right after memory is freed to avoid
+                           possible races and use-after-free situations.
+*/ /**************************************************************************/
+void OSRemoveStatisticFolder(void **ppvFolder);
+
+/*************************************************************************/ /*!
+@Function       OSUserModeAccessToPerfCountersEn
+@Description    Permit User-mode access to CPU performance counter
+                registers.
+                This function is called during device initialisation.
+                Certain CPU architectures may need to explicitly permit
+                User mode access to performance counters - if this is
+                required, the necessary code should be implemented inside
+                this function.
+@Return         None.
+*/ /**************************************************************************/
+void OSUserModeAccessToPerfCountersEn(void);
+
+/*************************************************************************/ /*!
+@Function       OSDebugSignalPID
+@Description    Sends a SIGTRAP signal to a specific PID in user mode for
+                debugging purposes. The user mode process can register a handler
+                against this signal.
+                This is necessary to support the Rogue debugger. If the Rogue
+                debugger is not used then this function may be implemented as
+                a stub.
+@Input          ui32PID    The PID for the signal.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSDebugSignalPID(IMG_UINT32 ui32PID);
+
+#if defined(LINUX) && defined(__KERNEL__) && !defined(DOXYGEN)
+#define OSWarnOn(a) WARN_ON(a)
+#else
+/*************************************************************************/ /*!
+@Function       OSWarnOn
+@Description    This API allows the driver to emit a special token and stack
+                dump to the server log when an issue is detected that needs the
+                OS to be notified. The token or call may be used to trigger
+                log collection by the OS environment.
+                PVR_DPF log messages will have been emitted prior to this call.
+@Input          a    Expression to evaluate, if true trigger Warn signal
+@Return         None
+*/ /**************************************************************************/
+#define OSWarnOn(a) do { if ((a)) { OSDumpStack(); } } while(0)
+#endif
+
+/*************************************************************************/ /*!
+@Function       OSThreadDumpInfo
+@Description    Traverse the thread list and call each of the stored
+                callbacks to dump the info in debug_dump.
+@Input          pfnDumpDebugPrintf  The 'printf' function to be called to
+                                    display the debug info
+@Input          pvDumpDebugFile     Optional file identifier to be passed to
+                                    the 'printf' function if required
+*/ /**************************************************************************/
+void OSThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf,
+                      void *pvDumpDebugFile);
+
+/*************************************************************************/ /*!
+@Function       OSDumpVersionInfo
+@Description    Store OS version information in debug dump.
+@Input          pfnDumpDebugPrintf  The 'printf' function to be called to
+                                    display the debug info
+@Input          pvDumpDebugFile     Optional file identifier to be passed to
+                                    the 'printf' function if required
+*/ /**************************************************************************/
+void OSDumpVersionInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                       void *pvDumpDebugFile);
+
+/*************************************************************************/ /*!
+@Function       OSIsWriteCombineUnalignedSafe
+@Description    Determine if unaligned accesses to write-combine memory are
+                safe to perform, i.e. whether we are safe from a CPU fault
+                occurring. This test is specifically aimed at ARM64 platforms
+                which cannot provide this guarantee if the memory is 'device'
+                memory rather than 'normal' under the ARM memory architecture.
+@Return         IMG_TRUE if safe, IMG_FALSE otherwise.
+*/ /**************************************************************************/
+IMG_BOOL OSIsWriteCombineUnalignedSafe(void);
+
+#endif /* __OSFUNC_H__ */
+
+/******************************************************************************
+ End of file (osfunc.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc_arm.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc_arm.c
new file mode 100644
index 0000000..43245c8
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc_arm.c
@@ -0,0 +1,231 @@
+/*************************************************************************/ /*!
+@File
+@Title          arm specific OS functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS functions who's implementation are processor specific
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0))
+ #include <asm/system.h>
+#endif
+#include <asm/cacheflush.h>
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+
+
+#if defined(CONFIG_OUTER_CACHE)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0))
+
+	/* Since 3.16 the outer_xxx() functions require irqs to be disabled and no
+	 * other cache masters must operate on the outer cache. */
+	static DEFINE_SPINLOCK(gsCacheFlushLock);
+
+	#define OUTER_CLEAN_RANGE() { \
+		unsigned long uiLockFlags; \
+		\
+		spin_lock_irqsave(&gsCacheFlushLock, uiLockFlags); \
+		outer_clean_range(0, ULONG_MAX); \
+		spin_unlock_irqrestore(&gsCacheFlushLock, uiLockFlags); \
+	}
+
+	#define OUTER_FLUSH_ALL() { \
+		unsigned long uiLockFlags; \
+		\
+		spin_lock_irqsave(&gsCacheFlushLock, uiLockFlags); \
+		outer_flush_all(); \
+		spin_unlock_irqrestore(&gsCacheFlushLock, uiLockFlags); \
+	}
+
+#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) */
+
+	/* No need to disable IRQs for older kernels */
+	#define OUTER_CLEAN_RANGE() outer_clean_range(0, ULONG_MAX)
+	#define OUTER_FLUSH_ALL()   outer_flush_all()
+#endif /*(LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) */
+
+#else /* CONFIG_OUTER_CACHE */
+
+	/* Don't do anything if we have no outer cache */
+	#define OUTER_CLEAN_RANGE()
+	#define OUTER_FLUSH_ALL()
+#endif /* CONFIG_OUTER_CACHE */
+
+static void per_cpu_cache_flush(void *arg)
+{
+	PVR_UNREFERENCED_PARAMETER(arg);
+	flush_cache_all();
+}
+
+PVRSRV_ERROR OSCPUOperation(PVRSRV_CACHE_OP uiCacheOp)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	switch (uiCacheOp)
+	{
+		case PVRSRV_CACHE_OP_CLEAN:
+			on_each_cpu(per_cpu_cache_flush, NULL, 1);
+			OUTER_CLEAN_RANGE();
+			break;
+
+		case PVRSRV_CACHE_OP_INVALIDATE:
+		case PVRSRV_CACHE_OP_FLUSH:
+			on_each_cpu(per_cpu_cache_flush, NULL, 1);
+			OUTER_FLUSH_ALL();
+			break;
+
+		case PVRSRV_CACHE_OP_NONE:
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Global cache operation type %d is invalid",
+					__func__, uiCacheOp));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			PVR_ASSERT(0);
+			break;
+	}
+
+	return eError;
+}
+
+static inline size_t pvr_dmac_range_len(const void *pvStart, const void *pvEnd)
+{
+	return (size_t)((char *)pvEnd - (char *)pvStart);
+}
+
+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+	arm_dma_ops.sync_single_for_device(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+	arm_dma_ops.sync_single_for_cpu(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+#else	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+	/* Inner cache */
+	dmac_flush_range(pvVirtStart, pvVirtEnd);
+
+	/* Outer cache */
+	outer_flush_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr);
+#endif	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+}
+
+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+	arm_dma_ops.sync_single_for_device(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+#else	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+	/* Inner cache */
+	dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_TO_DEVICE);
+
+	/* Outer cache */
+	outer_clean_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr);
+#endif	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+}
+
+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                                 void *pvVirtStart,
+                                 void *pvVirtEnd,
+                                 IMG_CPU_PHYADDR sCPUPhysStart,
+                                 IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+	arm_dma_ops.sync_single_for_cpu(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+#else	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+	/* Inner cache */
+	dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_FROM_DEVICE);
+
+	/* Outer cache */
+	outer_inv_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr);
+#endif	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+}
+
+PVRSRV_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+	return PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL;
+#else
+	return PVRSRV_CACHE_OP_ADDR_TYPE_BOTH;
+#endif
+}
+
+/* User Enable Register */
+#define PMUSERENR_EN      0x00000001 /* enable user access to the counters */
+
+static void per_cpu_perf_counter_user_access_en(void *data)
+{
+	PVR_UNREFERENCED_PARAMETER(data);
+#if !defined(CONFIG_L4)
+	/* Enable user-mode access to counters. */
+	asm volatile("mcr p15, 0, %0, c9, c14, 0" :: "r"(PMUSERENR_EN));
+#endif
+}
+
+void OSUserModeAccessToPerfCountersEn(void)
+{
+	on_each_cpu(per_cpu_perf_counter_user_access_en, NULL, 1);
+}
+
+IMG_BOOL OSIsWriteCombineUnalignedSafe(void)
+{
+	/*
+	 * The kernel looks to have always used normal memory under ARM32.
+	 * See osfunc_arm64.c implementation for more details.
+	 */
+	return IMG_TRUE;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc_arm64.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc_arm64.c
new file mode 100644
index 0000000..8a4c29e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc_arm64.c
@@ -0,0 +1,268 @@
+/*************************************************************************/ /*!
+@File
+@Title          arm specific OS functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS functions who's implementation are processor specific
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/version.h>
+#include <linux/cpumask.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+#if defined(CONFIG_OUTER_CACHE)
+  /* If you encounter a 64-bit ARM system with an outer cache, you'll need
+   * to add the necessary code to manage that cache. See osfunc_arm.c
+   * for an example of how to do so.
+   */
+	#error "CONFIG_OUTER_CACHE not supported on arm64."
+#endif
+
+static void per_cpu_cache_flush(void *arg)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
+	unsigned long irqflags;
+	signed long Clidr, Csselr, LoC, Assoc, Nway, Nsets, Level, Lsize, Var;
+	static DEFINE_SPINLOCK(spinlock);
+
+	spin_lock_irqsave(&spinlock, irqflags);
+
+	/* Read cache level ID register */
+	asm volatile (
+		"dmb sy\n\t"
+		"mrs %[rc], clidr_el1\n\t"
+		: [rc] "=r" (Clidr));
+
+	/* Exit if there is no cache level of coherency */
+	LoC = (Clidr & (((1UL << 3)-1) << 24)) >> 23;
+	if (! LoC)
+	{
+		goto e0;
+	}
+
+	/*
+		This walks the cache hierarchy until the LLC/LOC cache, at each level skip
+		only instruction caches and determine the attributes at this dcache level.
+	*/
+	for (Level = 0; LoC > Level; Level += 2)
+	{
+		/* Mask off this CtypeN bit, skip if not unified cache or separate
+		   instruction and data caches */
+		Var = (Clidr >> (Level + (Level >> 1))) & ((1UL << 3) - 1);
+		if (Var < 2)
+		{
+			continue;
+		}
+
+		/* Select this dcache level for query */
+		asm volatile (
+			"msr csselr_el1, %[val]\n\t"
+			"isb\n\t"
+			"mrs %[rc], ccsidr_el1\n\t"
+			: [rc] "=r" (Csselr) : [val] "r" (Level));
+
+		/* Look-up this dcache organisation attributes */
+		Nsets = (Csselr >> 13) & ((1UL << 15) - 1);
+		Assoc = (Csselr >> 3) & ((1UL << 10) - 1);
+		Lsize = (Csselr & ((1UL << 3) - 1)) + 4;
+		Nway = 0;
+
+		/* For performance, do these in assembly; foreach dcache level/set,
+		   foreach dcache set/way, construct the "DC CISW" instruction
+		   argument and issue instruction */
+		asm volatile (
+			"mov x6, %[val0]\n\t"
+			"mov x9, %[rc1]\n\t"
+			"clz w9, w6\n\t"
+			"mov %[rc1], x9\n\t"
+			"lsetloop:\n\t"
+			"mov %[rc5], %[val0]\n\t"
+			"swayloop:\n\t"
+			"lsl x6, %[rc5], %[rc1]\n\t"
+			"orr x9, %[val2], x6\n\t"
+			"lsl x6, %[rc3], %[val4]\n\t"
+			"orr x9, x9, x6\n\t"
+			"dc	cisw, x9\n\t"
+			"subs %[rc5], %[rc5], #1\n\t"
+			"b.ge swayloop\n\t"
+			"subs %[rc3], %[rc3], #1\n\t"
+			"b.ge lsetloop\n\t"
+			: [rc1] "+r" (Nway), [rc3] "+r" (Nsets), [rc5] "+r" (Var)
+			: [val0] "r" (Assoc), [val2] "r" (Level), [val4] "r" (Lsize)
+			: "x6", "x9", "cc");
+	}
+
+e0:
+	/* Re-select L0 d-cache as active level, issue barrier before exit */
+	Var = 0;
+	asm volatile (
+		"msr csselr_el1, %[val]\n\t"
+		"dsb sy\n\t"
+		"isb\n\t"
+		: : [val] "r" (Var));
+
+	spin_unlock_irqrestore(&spinlock, irqflags);
+#else
+	flush_cache_all();
+#endif
+	PVR_UNREFERENCED_PARAMETER(arg);
+}
+
+PVRSRV_ERROR OSCPUOperation(PVRSRV_CACHE_OP uiCacheOp)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	switch (uiCacheOp)
+	{
+		case PVRSRV_CACHE_OP_CLEAN:
+		case PVRSRV_CACHE_OP_FLUSH:
+		case PVRSRV_CACHE_OP_INVALIDATE:
+			on_each_cpu(per_cpu_cache_flush, NULL, 1);
+			break;
+
+		case PVRSRV_CACHE_OP_NONE:
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Global cache operation type %d is invalid",
+					__func__, uiCacheOp));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			PVR_ASSERT(0);
+			break;
+	}
+
+	return eError;
+}
+
+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+							void *pvVirtStart,
+							void *pvVirtEnd,
+							IMG_CPU_PHYADDR sCPUPhysStart,
+							IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	struct device *dev;
+	const struct dma_map_ops *dma_ops;
+
+	PVR_UNREFERENCED_PARAMETER(pvVirtStart);
+	PVR_UNREFERENCED_PARAMETER(pvVirtEnd);
+
+	dev = psDevNode->psDevConfig->pvOSDevice;
+
+	dma_ops = get_dma_ops(dev);
+	dma_ops->sync_single_for_device(dev, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+	dma_ops->sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+}
+
+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+							void *pvVirtStart,
+							void *pvVirtEnd,
+							IMG_CPU_PHYADDR sCPUPhysStart,
+							IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	struct device *dev;
+	const struct dma_map_ops *dma_ops;
+
+	PVR_UNREFERENCED_PARAMETER(pvVirtStart);
+	PVR_UNREFERENCED_PARAMETER(pvVirtEnd);
+
+	dev = psDevNode->psDevConfig->pvOSDevice;
+
+	dma_ops = get_dma_ops(psDevNode->psDevConfig->pvOSDevice);
+	dma_ops->sync_single_for_device(dev, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+}
+
+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+								 void *pvVirtStart,
+								 void *pvVirtEnd,
+								 IMG_CPU_PHYADDR sCPUPhysStart,
+								 IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	struct device *dev;
+	const struct dma_map_ops *dma_ops;
+
+	PVR_UNREFERENCED_PARAMETER(pvVirtStart);
+	PVR_UNREFERENCED_PARAMETER(pvVirtEnd);
+
+	dev = psDevNode->psDevConfig->pvOSDevice;
+
+	dma_ops = get_dma_ops(psDevNode->psDevConfig->pvOSDevice);
+	dma_ops->sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+}
+
+PVRSRV_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void)
+{
+	return PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL;
+}
+
+void OSUserModeAccessToPerfCountersEn(void)
+{
+	/* FIXME: implement similarly to __arm__ */
+}
+
+IMG_BOOL OSIsWriteCombineUnalignedSafe(void)
+{
+	/*
+	 * Under ARM64 there is the concept of 'device' [0] and 'normal' [1] memory.
+	 * Unaligned access on device memory is explicitly disallowed [2]:
+	 *
+	 * 'Further, unaligned accesses are only allowed to regions marked as Normal
+	 *  memory type.
+	 *  ...
+	 *  Attempts to perform unaligned accesses when not allowed will cause an
+	 *  alignment fault (data abort).'
+	 *
+	 * Write-combine on ARM64 can be implemented as either normal non-cached
+	 * memory (NORMAL_NC) or as device memory with gathering enabled
+	 * (DEVICE_GRE.) Kernel 3.13 changed this from the latter to the former.
+	 *
+	 * [0]:http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0024a/CHDBDIDF.html
+	 * [1]:http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0024a/ch13s01s01.html
+	 * [2]:http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
+	 */
+
+	pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+	return (pgprot_val(pgprot) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_NC);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc_common.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc_common.h
new file mode 100644
index 0000000..24b9f23
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc_common.h
@@ -0,0 +1,158 @@
+/*************************************************************************/ /*!
+@File
+@Title          OS functions header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS specific API definitions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __OSFUNC_COMMON_H__
+/*! @cond Doxygen_Suppress */
+#define __OSFUNC_COMMON_H__
+/*! @endcond */
+
+#include "img_types.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#if defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)
+
+/* Workarounds for assumptions made that memory will not be mapped uncached
+ * in kernel or user address spaces on arm64 platforms (or other testing).
+ */
+
+/**************************************************************************/ /*!
+@Function       DeviceMemSet
+@Description    Set memory, whose mapping may be uncached, to a given value.
+                On some architectures, additional processing may be needed
+                if the mapping is uncached. In such cases, OSDeviceMemSet()
+                is defined as a call to this function.
+@Input          pvDest     void pointer to the memory to be set
+@Input          ui8Value   byte containing the value to be set
+@Input          ui32Size   the number of bytes to be set to the given value
+@Return         None
+ */ /**************************************************************************/
+void DeviceMemSet(void *pvDest, IMG_UINT8 ui8Value, size_t ui32Size);
+
+/**************************************************************************/ /*!
+@Function       DeviceMemCopy
+@Description    Copy values from one area of memory, to another, when one
+                or both mappings may be uncached.
+                On some architectures, additional processing may be needed
+                if mappings are uncached. In such cases, OSDeviceMemCopy()
+                is defined as a call to this function.
+@Input          pvDst      void pointer to the destination memory
+@Input          pvSrc      void pointer to the source memory
+@Input          ui32Size   the number of bytes to be copied
+@Return         None
+ */ /**************************************************************************/
+void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t ui32Size);
+
+#define OSDeviceMemSet(a,b,c)  DeviceMemSet((a), (b), (c))
+#define OSDeviceMemCopy(a,b,c) DeviceMemCopy((a), (b), (c))
+#define OSCachedMemSet(a,b,c)  memset((a), (b), (c))
+#define OSCachedMemCopy(a,b,c) memcpy((a), (b), (c))
+
+#else /* !(defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */
+
+/* Everything else */
+
+/**************************************************************************/ /*!
+@Function       OSDeviceMemSet
+@Description    Set memory, whose mapping may be uncached, to a given value.
+                On some architectures, additional processing may be needed
+                if the mapping is uncached.
+@Input          a     void pointer to the memory to be set
+@Input          b     byte containing the value to be set
+@Input          c     the number of bytes to be set to the given value
+@Return         Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSDeviceMemSet(a,b,c) memset((a), (b), (c))
+
+/**************************************************************************/ /*!
+@Function       OSDeviceMemCopy
+@Description    Copy values from one area of memory, to another, when one
+                or both mappings may be uncached.
+                On some architectures, additional processing may be needed
+                if mappings are uncached.
+@Input          a     void pointer to the destination memory
+@Input          b     void pointer to the source memory
+@Input          c     the number of bytes to be copied
+@Return         Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSDeviceMemCopy(a,b,c) memcpy((a), (b), (c))
+
+/**************************************************************************/ /*!
+@Function       OSCachedMemSet
+@Description    Set memory, where the mapping is known to be cached, to a
+                given value. This function exists to allow an optimal memset
+                to be performed when memory is known to be cached.
+@Input          a     void pointer to the memory to be set
+@Input          b     byte containing the value to be set
+@Input          c     the number of bytes to be set to the given value
+@Return         Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSCachedMemSet(a,b,c)  memset((a), (b), (c))
+
+/**************************************************************************/ /*!
+@Function       OSCachedMemCopy
+@Description    Copy values from one area of memory, to another, when both
+                mappings are known to be cached.
+                This function exists to allow an optimal memcpy to be
+                performed when memory is known to be cached.
+@Input          a     void pointer to the destination memory
+@Input          b     void pointer to the source memory
+@Input          c     the number of bytes to be copied
+@Return         Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSCachedMemCopy(a,b,c) memcpy((a), (b), (c))
+
+#endif /* !(defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __OSFUNC_COMMON_H__ */
+
+/******************************************************************************
+ End of file (osfunc_common.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc_mips.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc_mips.c
new file mode 100644
index 0000000..1d0a408
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc_mips.c
@@ -0,0 +1,140 @@
+/*************************************************************************/ /*!
+@File
+@Title          mips specific OS functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS functions who's implementation are processor specific
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+PVRSRV_ERROR OSCPUOperation(PVRSRV_CACHE_OP uiCacheOp)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	switch (uiCacheOp)
+	{
+		case PVRSRV_CACHE_OP_CLEAN:
+		case PVRSRV_CACHE_OP_FLUSH:
+		case PVRSRV_CACHE_OP_INVALIDATE:
+			eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+			break;
+
+		case PVRSRV_CACHE_OP_NONE:
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Global cache operation type %d is invalid",
+					__func__, uiCacheOp));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			PVR_ASSERT(0);
+			break;
+	}
+
+	return eError;
+}
+
+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	unsigned long len;
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+	PVR_ASSERT((uintptr_t) pvVirtEnd >= (uintptr_t) pvVirtEnd);
+
+	len = (unsigned long) pvVirtEnd - (unsigned long) pvVirtStart;
+	dma_cache_sync(psDevNode->psDevConfig->pvOSDevice, (void *)pvVirtStart, len, DMA_BIDIRECTIONAL);
+}
+
+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	unsigned long len;
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+	PVR_ASSERT((uintptr_t) pvVirtEnd >= (uintptr_t) pvVirtEnd);
+
+	len = (unsigned long) pvVirtEnd - (unsigned long) pvVirtStart;
+	dma_cache_sync(psDevNode->psDevConfig->pvOSDevice, (void *)pvVirtStart, len, DMA_TO_DEVICE);
+}
+
+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                                 void *pvVirtStart,
+                                 void *pvVirtEnd,
+                                 IMG_CPU_PHYADDR sCPUPhysStart,
+                                 IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	unsigned long len;
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+	PVR_ASSERT((uintptr_t) pvVirtEnd >= (uintptr_t) pvVirtEnd);
+
+	len = (unsigned long) pvVirtEnd - (unsigned long) pvVirtStart;
+	dma_cache_sync(psDevNode->psDevConfig->pvOSDevice, (void *)pvVirtStart, len, DMA_FROM_DEVICE);
+}
+
+PVRSRV_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void)
+{
+	return PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL;
+}
+
+void OSUserModeAccessToPerfCountersEn(void)
+{
+	/* Not applicable to MIPS architecture. */
+}
+
+IMG_BOOL OSIsWriteCombineUnalignedSafe(void)
+{
+	return IMG_TRUE;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc_x86.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc_x86.c
new file mode 100644
index 0000000..9753293
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osfunc_x86.c
@@ -0,0 +1,161 @@
+/*************************************************************************/ /*!
+@File
+@Title          x86 specific OS functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS functions who's implementation are processor specific
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/smp.h>
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+
+static void per_cpu_cache_flush(void *arg)
+{
+    PVR_UNREFERENCED_PARAMETER(arg);
+#if !defined(CONFIG_L4)
+    wbinvd();
+#endif
+}
+
+PVRSRV_ERROR OSCPUOperation(PVRSRV_CACHE_OP uiCacheOp)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	switch (uiCacheOp)
+	{
+		case PVRSRV_CACHE_OP_CLEAN:
+		case PVRSRV_CACHE_OP_FLUSH:
+		case PVRSRV_CACHE_OP_INVALIDATE:
+			on_each_cpu(per_cpu_cache_flush, NULL, 1);
+			break;
+
+		case PVRSRV_CACHE_OP_NONE:
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Global cache operation type %d is invalid",
+					__func__, uiCacheOp));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			PVR_ASSERT(0);
+			break;
+	}
+
+	return eError;
+}
+
+static void x86_flush_cache_range(const void *pvStart, const void *pvEnd)
+{
+	IMG_BYTE *pbStart = (IMG_BYTE *)pvStart;
+	IMG_BYTE *pbEnd = (IMG_BYTE *)pvEnd;
+	IMG_BYTE *pbBase;
+
+	pbEnd = (IMG_BYTE *)PVR_ALIGN((uintptr_t)pbEnd,
+	                              (uintptr_t)boot_cpu_data.x86_clflush_size);
+
+	mb();
+	for (pbBase = pbStart; pbBase < pbEnd; pbBase += boot_cpu_data.x86_clflush_size)
+	{
+#if !defined(CONFIG_L4)
+		clflush(pbBase);
+#endif
+	}
+	mb();
+}
+
+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+	x86_flush_cache_range(pvVirtStart, pvVirtEnd);
+}
+
+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+	/* No clean feature on x86 */
+	x86_flush_cache_range(pvVirtStart, pvVirtEnd);
+}
+
+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                                 void *pvVirtStart,
+                                 void *pvVirtEnd,
+                                 IMG_CPU_PHYADDR sCPUPhysStart,
+                                 IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+	/* No invalidate-only support */
+	x86_flush_cache_range(pvVirtStart, pvVirtEnd);
+}
+
+PVRSRV_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void)
+{
+	return PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL;
+}
+
+void OSUserModeAccessToPerfCountersEn(void)
+{
+	/* Not applicable to x86 architecture. */
+}
+
+IMG_BOOL OSIsWriteCombineUnalignedSafe(void)
+{
+	return IMG_TRUE;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/oskm_apphint.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/oskm_apphint.h
new file mode 100644
index 0000000..af4ccb1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/oskm_apphint.h
@@ -0,0 +1,176 @@
+/*************************************************************************/ /*!
+@File           oskm_apphint.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS-independent interface for retrieving KM apphints
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "img_defs.h"
+#if defined(LINUX)
+#include "km_apphint.h"
+#else
+#include "services_client_porting.h"
+#endif
+#if !defined(__OSKM_APPHINT_H__)
+#define __OSKM_APPHINT_H__
+
+
+#if defined(LINUX) && !defined(DOXYGEN)
+static INLINE IMG_UINT os_get_km_apphint_UINT32(void *state, APPHINT_ID id, IMG_UINT32 *pAppHintDefault, IMG_UINT32 *pVal) {
+	return !pvr_apphint_get_uint32(id, pVal);
+}
+static INLINE IMG_UINT os_get_km_apphint_UINT64(void *state, APPHINT_ID id, IMG_UINT64 *pAppHintDefault, IMG_UINT64 *pVal) {
+	return !pvr_apphint_get_uint64(id, pVal);
+}
+static INLINE IMG_UINT os_get_km_apphint_BOOL(void *state, APPHINT_ID id, IMG_BOOL *pAppHintDefault, IMG_BOOL *pVal) {
+	return !pvr_apphint_get_bool(id, pVal);
+}
+static INLINE IMG_UINT os_get_km_apphint_STRING(void *state, APPHINT_ID id, IMG_CHAR **pAppHintDefault, IMG_CHAR *buffer, size_t size) {
+	return !pvr_apphint_get_string(id, buffer, size);
+}
+
+#define OSGetKMAppHintUINT32(state, name, appHintDefault, value) \
+	os_get_km_apphint_UINT32(state, APPHINT_ID_ ## name, appHintDefault, value)
+
+#define OSGetKMAppHintUINT64(state, name, appHintDefault, value) \
+	os_get_km_apphint_UINT64(state, APPHINT_ID_ ## name, appHintDefault, value)
+
+#define OSGetKMAppHintBOOL(state, name, appHintDefault, value) \
+	os_get_km_apphint_BOOL(state, APPHINT_ID_ ## name, appHintDefault, value)
+
+#define OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size) \
+	os_get_km_apphint_STRING(state, APPHINT_ID_ ## name, appHintDefault, buffer, size)
+
+
+#define OSCreateKMAppHintState(state) \
+	PVR_UNREFERENCED_PARAMETER(state)
+
+#define OSFreeKMAppHintState(state) \
+	PVR_UNREFERENCED_PARAMETER(state)
+
+#else /* #if defined(LINUX) && !defined(DOXYGEN) */
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintUINT32(state, name, appHintDefault, value)
+@Description    Interface for retrieval of uint32 km app hint.
+				For non-linux operating systems, this macro implements a call
+				from server code to PVRSRVGetAppHint() declared in
+				services_client_porting.h, effectively making it 'shared' code.
+@Input          state             App hint state
+@Input          name              Name used to identify app hint
+@Input          appHintDefault    Default value to be returned if no
+								  app hint is found.
+@Output         value             Pointer to returned app hint value.
+ */ /**************************************************************************/
+#define OSGetKMAppHintUINT32(state, name, appHintDefault, value) \
+	PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value)
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintUINT64(state, name, appHintDefault, value)
+@Description    Interface for retrieval of uint64 km app hint.
+				For non-linux operating systems, this macro implements a call
+				from server code to PVRSRVGetAppHint() declared in
+				services_client_porting.h, effectively making it 'shared' code.
+@Input          state             App hint state
+@Input          name              Name used to identify app hint
+@Input          appHintDefault    Default value to be returned if no
+								  app hint is found.
+@Output         value             Pointer to returned app hint value.
+ */ /**************************************************************************/
+#define OSGetKMAppHintUINT64(state, name, appHintDefault, value) \
+	PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value)
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintBOOL(state, name, appHintDefault, value)
+@Description    Interface for retrieval of IMG_BOOL km app hint.
+				For non-linux operating systems, this macro implements a call
+				from server code to PVRSRVGetAppHint() declared in
+				services_client_porting.h, effectively making it 'shared' code.
+@Input          state             App hint state
+@Input          name              Name used to identify app hint
+@Input          appHintDefault    Default value to be returned if no
+								  app hint is found.
+@Output         value             Pointer to returned app hint value.
+ */ /**************************************************************************/
+#define OSGetKMAppHintBOOL(state, name, appHintDefault, value) \
+	PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value)
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size)
+@Description    Interface for retrieval of string km app hint.
+				For non-linux operating systems, this macro implements a call
+				from server code to PVRSRVGetAppHint() declared in
+				services_client_porting.h, effectively making it 'shared' code.
+@Input          state             App hint state
+@Input          name              Name used to identify app hint
+@Input          appHintDefault    Default value to be returned if no
+								  app hint is found.
+@Output         buffer            Buffer used to return app hint string.
+@Input			size			  Size of the buffer.
+ */ /**************************************************************************/
+#define OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size) \
+	(PVR_UNREFERENCED_PARAMETER(size), PVRSRVGetAppHint(state, # name, IMG_STRING_TYPE, appHintDefault, buffer))
+
+/**************************************************************************/ /*!
+@def OSCreateKMAppHintState(state)
+@Description    Creates the app hint state.
+				For non-linux operating systems, this macro implements a call
+				from server code to PVRSRVCreateAppHintState() declared in
+				services_client_porting.h, effectively making it 'shared' code.
+@Output          state             App hint state
+ */ /**************************************************************************/
+#define OSCreateKMAppHintState(state) \
+	PVRSRVCreateAppHintState(IMG_SRV_UM, 0, state)
+
+/**************************************************************************/ /*!
+@def OSFreeKMAppHintState
+@Description    Free the app hint state.
+				For non-linux operating systems, this macro implements a call
+				from server code to PVRSRVCreateAppHintState() declared in
+				services_client_porting.h, effectively making it 'shared' code.
+@Output          state             App hint state
+ */ /**************************************************************************/
+#define OSFreeKMAppHintState(state) \
+	PVRSRVFreeAppHintState(IMG_SRV_UM, state)
+
+#endif /* #if defined(LINUX) */
+
+#endif /* __OSKM_APPHINT_H__ */
+
+/******************************************************************************
+ End of file (oskm_apphint.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osmmap.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osmmap.h
new file mode 100644
index 0000000..bc83151
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osmmap.h
@@ -0,0 +1,123 @@
+/*************************************************************************/ /*!
+@File
+@Title          OS Interface for mapping PMRs into CPU space.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS abstraction for the mmap2 interface for mapping PMRs into
+                User Mode memory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _OSMMAP_H_
+#define _OSMMAP_H_
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/**************************************************************************/ /*!
+@Function       OSMMapPMR
+@Description    Maps the specified PMR into CPU memory so that it may be
+                accessed by the user process.
+                Whether the memory is mapped read only, read/write, or not at
+                all, is dependent on the PMR itself.
+                The PMR handle is opaque to the user, and lower levels of this
+                stack ensure that the handle is private to this process, such that
+                this API cannot be abused to gain access to other people's PMRs.
+                The OS implementation of this function should return the virtual
+                address and length for the User to use. The "PrivData" is to be
+                stored opaquely by the caller (N.B. he should make no assumptions,
+                in particular, NULL is a valid handle) and given back to the
+                call to OSMUnmapPMR.
+                The OS implementation is free to use the PrivData handle for any
+                purpose it sees fit.
+@Input          hBridge              The bridge handle.
+@Input          hPMR                 The handle of the PMR to be mapped.
+@Input          uiPMRLength          The size of the PMR.
+@Input          uiFlags              Flags indicating how the mapping should
+                                     be done (read-only, etc). These may not
+                                     be honoured if the PMR does not permit
+                                     them.
+@Input          uiPMRLength          The size of the PMR.
+@Output         phOSMMapPrivDataOut  Returned private data.
+@Output         ppvMappingAddressOut The returned mapping.
+@Output         puiMappingLengthOut  The size of the returned mapping.
+@Return         PVRSRV_OK on success, failure code otherwise.
+ */ /**************************************************************************/
+extern PVRSRV_ERROR
+OSMMapPMR(IMG_HANDLE hBridge,
+          IMG_HANDLE hPMR,
+          IMG_DEVMEM_SIZE_T uiPMRLength,
+          IMG_UINT32 uiFlags,
+          IMG_HANDLE *phOSMMapPrivDataOut,
+          void **ppvMappingAddressOut,
+          size_t *puiMappingLengthOut);
+
+/**************************************************************************/ /*!
+@Function       OSMUnmapPMR
+@Description    Unmaps the specified PMR from CPU memory.
+                This function is the counterpart to OSMMapPMR.
+                The caller is required to pass the PMR handle back in along
+                with the same 3-tuple of information that was returned by the
+                call to OSMMapPMR in phOSMMapPrivDataOut.
+                It is possible to unmap only part of the original mapping
+                with this call, by specifying only the address range to be
+                unmapped in pvMappingAddress and uiMappingLength.
+@Input          hBridge              The bridge handle.
+@Input          hPMR                 The handle of the PMR to be unmapped.
+@Input          hOSMMapPrivData      The OS private data of the mapping.
+@Input          pvMappingAddress     The address to be unmapped.
+@Input          uiMappingLength      The size to be unmapped.
+@Return         PVRSRV_OK on success, failure code otherwise.
+ */ /**************************************************************************/
+/*
+   FIXME:
+   perhaps this function should take _only_ the hOSMMapPrivData arg,
+   and the implementation is required to store any of the other data
+   items that it requires to do the unmap?
+*/
+extern void
+OSMUnmapPMR(IMG_HANDLE hBridge,
+            IMG_HANDLE hPMR,
+            IMG_HANDLE hOSMMapPrivData,
+            void *pvMappingAddress,
+            size_t uiMappingLength);
+
+
+#endif /* _OSMMAP_H_ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osmmap_stub.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osmmap_stub.c
new file mode 100644
index 0000000..77756c8
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/osmmap_stub.c
@@ -0,0 +1,146 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS abstraction for the mmap2 interface for mapping PMRs into
+                User Mode memory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* our exported API */
+#include "osmmap.h"
+
+/* include/ */
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+/* services/include/ */
+
+/* services/include/srvhelper/ */
+#include "ra.h"
+
+#include "pmr.h"
+
+PVRSRV_ERROR
+OSMMapPMR(IMG_HANDLE hBridge,
+          IMG_HANDLE hPMR,
+          IMG_DEVMEM_SIZE_T uiPMRSize,
+          IMG_UINT32 uiFlags,
+          IMG_HANDLE *phOSMMapPrivDataOut,
+          void **ppvMappingAddressOut,
+          size_t *puiMappingLengthOut)
+{
+    PVRSRV_ERROR eError;
+    PMR *psPMR;
+    void *pvKernelAddress;
+    size_t uiLength;
+    IMG_HANDLE hPriv;
+
+    PVR_UNREFERENCED_PARAMETER(hBridge);
+    PVR_UNREFERENCED_PARAMETER(uiFlags);
+
+    /*
+      Normally this function would mmap a PMR into the memory space of
+      user process, but in this case we're taking a PMR and mapping it
+      into kernel virtual space.  We keep the same function name for
+      symmetry as this allows the higher layers of the software stack
+      to not care whether they are user mode or kernel
+    */
+
+    psPMR = hPMR;
+
+    if (PMR_IsSparse(psPMR))
+    {
+        eError = PMRAcquireSparseKernelMappingData(psPMR,
+                                            0,
+                                            0,
+                                            &pvKernelAddress,
+                                            &uiLength,
+                                            &hPriv);
+    }
+    else
+    {
+        eError = PMRAcquireKernelMappingData(psPMR,
+                                            0,
+                                            0,
+                                            &pvKernelAddress,
+                                            &uiLength,
+                                            &hPriv);
+    }
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+
+    *phOSMMapPrivDataOut = hPriv;
+    *ppvMappingAddressOut = pvKernelAddress;
+    *puiMappingLengthOut = uiLength;
+
+    /* MappingLength might be rounded up to page size */
+    PVR_ASSERT(*puiMappingLengthOut >= uiPMRSize);
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+void
+OSMUnmapPMR(IMG_HANDLE hBridge,
+            IMG_HANDLE hPMR,
+            IMG_HANDLE hOSMMapPrivData,
+            void *pvMappingAddress,
+            size_t uiMappingLength)
+{
+    PMR *psPMR;
+
+    PVR_UNREFERENCED_PARAMETER(hBridge);
+    PVR_UNREFERENCED_PARAMETER(pvMappingAddress);
+    PVR_UNREFERENCED_PARAMETER(uiMappingLength);
+
+    psPMR = hPMR;
+    PMRReleaseKernelMappingData(psPMR,
+                                hOSMMapPrivData);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/ospvr_gputrace.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/ospvr_gputrace.h
new file mode 100644
index 0000000..1b239ef
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/ospvr_gputrace.h
@@ -0,0 +1,167 @@
+/*************************************************************************/ /*!
+@File           ospvr_gputrace.h
+@Title          PVR GPU Trace module common environment interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_GPUTRACE_H_
+#define PVR_GPUTRACE_H_
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "rgx_hwperf.h"
+#include "device.h"
+
+#if defined(LINUX)
+
+void PVRGpuTraceEnqueueEvent(
+		PVRSRV_DEVICE_NODE *psDevNode,
+		IMG_UINT32 ui32FirmwareCtx,
+		IMG_UINT32 ui32ExternalJobRef,
+		IMG_UINT32 ui32InternalJobRef,
+		RGX_HWPERF_KICK_TYPE eKickType);
+
+/* Early initialisation of GPU Trace events logic.
+ * This function is called on *driver* initialisation. */
+PVRSRV_ERROR PVRGpuTraceSupportInit(void);
+
+/* GPU Trace resources final cleanup.
+ * This function is called on driver de-initialisation. */
+void PVRGpuTraceSupportDeInit(void);
+
+/* Initialisation for AppHints callbacks.
+ * This function is called during the late stage of driver initialisation but
+ * before the device initialisation but after the debugfs sub-system has been
+ * initialised. */
+void PVRGpuTraceInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/* Per-device initialisation of the GPU Trace resources */
+PVRSRV_ERROR PVRGpuTraceInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/* Per-device cleanup for the GPU Trace resources. */
+void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/* Enables the gpu trace sub-system for a given device. */
+PVRSRV_ERROR PVRGpuTraceSetEnabled(
+		PVRSRV_DEVICE_NODE *psDeviceNode,
+		IMG_BOOL bNewValue);
+
+/* Returns IMG_TRUE if the gpu trace sub-system has been enabled (but not
+ * necessarily initialised). */
+IMG_BOOL PVRGpuTraceIsEnabled(void);
+
+/* Performs some initialisation steps if the feature was enabled
+ * on driver startup. */
+void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/* FTrace events callbacks interface */
+
+void PVRGpuTraceEnableUfoCallback(void);
+void PVRGpuTraceDisableUfoCallback(void);
+
+void PVRGpuTraceEnableFirmwareActivityCallback(void);
+void PVRGpuTraceDisableFirmwareActivityCallback(void);
+
+#else /* define(LINUX) */
+
+static inline void PVRGpuTraceEnqueueEvent(
+		PVRSRV_DEVICE_NODE *psDevNode,
+		IMG_UINT32 ui32FirmwareCtx,
+		IMG_UINT32 ui32ExternalJobRef,
+		IMG_UINT32 ui32InternalJobRef,
+		RGX_HWPERF_KICK_TYPE eKickType)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(ui32ExternalJobRef);
+	PVR_UNREFERENCED_PARAMETER(ui32InternalJobRef);
+	PVR_UNREFERENCED_PARAMETER(eKickType);
+}
+
+static inline PVRSRV_ERROR PVRGpuTraceSupportInit(void) {
+	return PVRSRV_OK;
+}
+
+static inline void PVRGpuTraceSupportDeInit(void) {}
+
+static inline void PVRGpuTraceInitAppHintCallbacks(
+		const PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+}
+
+static inline PVRSRV_ERROR PVRGpuTraceInitDevice(
+		PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	return PVRSRV_OK;
+}
+
+static inline void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+}
+
+static inline PVRSRV_ERROR PVRGpuTraceSetEnabled(
+		PVRSRV_DEVICE_NODE *psDeviceNode,
+		IMG_BOOL bNewValue)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(bNewValue);
+	return PVRSRV_OK;
+}
+
+static inline IMG_BOOL PVRGpuTraceIsEnabled(void)
+{
+	return IMG_FALSE;
+}
+
+static inline void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+}
+
+static inline void PVRGpuTraceEnableUfoCallback(void) {}
+static inline void PVRGpuTraceDisableUfoCallback(void) {}
+
+static inline void PVRGpuTraceEnableFirmwareActivityCallback(void) {}
+static inline void PVRGpuTraceDisableFirmwareActivityCallback(void) {}
+
+#endif /* define(LINUX) */
+
+#endif /* PVR_GPUTRACE_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdp/drm_pdp.mk b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdp/drm_pdp.mk
new file mode 100644
index 0000000..953a137
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdp/drm_pdp.mk
@@ -0,0 +1,12 @@
+drm_pdp-y += \
+ ../apollo/drm_pdp_crtc.o \
+ ../apollo/drm_pdp_debugfs.o \
+ ../apollo/drm_pdp_drv.o \
+ ../apollo/drm_pdp_dvi.o \
+ ../apollo/drm_pdp_gem.o \
+ ../apollo/drm_pdp_modeset.o \
+ ../apollo/drm_pdp_plane.o \
+ ../apollo/drm_pdp_tmds.o \
+ ../apollo/pdp_apollo.o \
+ ../apollo/pdp_odin.o \
+ ../apollo/pdp_plato.o
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdp2_mmu_regs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdp2_mmu_regs.h
new file mode 100644
index 0000000..6164c58
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdp2_mmu_regs.h
@@ -0,0 +1,764 @@
+/*************************************************************************/ /*!
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#ifndef _PDP2_MMU_REGS_H
+#define _PDP2_MMU_REGS_H
+
+/* Hardware register definitions */
+
+#define PDP_BIF_DIR_BASE_ADDR_OFFSET		(0x0020)
+#define PDP_BIF_DIR_BASE_ADDR_STRIDE		(4)
+#define PDP_BIF_DIR_BASE_ADDR_NO_ENTRIES		(4)
+
+/* PDP_BIF, DIR_BASE_ADDR, MMU_DIR_BASE_ADDR
+Base address in physical memory for MMU Directory n Entries. When MMU_ENABLE_EXT_ADDRESSING is '1', the bits 31:0 are assigned to the address 31+EXT_ADDR_RANGE:0+EXT_ADDR_RANGE, but then any address offset within a page is forced to 0. When MMU_ENABLE_EXT_ADDRESSING is '0', bits 31:12 are assigned to address 31:12
+*/
+#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_MASK		(0xFFFFFFFF)
+#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_SHIFT		(0)
+#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_LENGTH		(32)
+#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_TILE_CFG_OFFSET		(0x0040)
+#define PDP_BIF_TILE_CFG_STRIDE		(4)
+#define PDP_BIF_TILE_CFG_NO_ENTRIES		(4)
+
+/* PDP_BIF, TILE_CFG, TILE_128INTERLEAVE
+*/
+#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_MASK		(0x00000010)
+#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_LSBMASK		(0x00000001)
+#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_SHIFT		(4)
+#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_LENGTH		(1)
+#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, TILE_CFG, TILE_ENABLE
+*/
+#define PDP_BIF_TILE_CFG_TILE_ENABLE_MASK		(0x00000008)
+#define PDP_BIF_TILE_CFG_TILE_ENABLE_LSBMASK		(0x00000001)
+#define PDP_BIF_TILE_CFG_TILE_ENABLE_SHIFT		(3)
+#define PDP_BIF_TILE_CFG_TILE_ENABLE_LENGTH		(1)
+#define PDP_BIF_TILE_CFG_TILE_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, TILE_CFG, TILE_STRIDE
+*/
+#define PDP_BIF_TILE_CFG_TILE_STRIDE_MASK		(0x00000007)
+#define PDP_BIF_TILE_CFG_TILE_STRIDE_LSBMASK		(0x00000007)
+#define PDP_BIF_TILE_CFG_TILE_STRIDE_SHIFT		(0)
+#define PDP_BIF_TILE_CFG_TILE_STRIDE_LENGTH		(3)
+#define PDP_BIF_TILE_CFG_TILE_STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_TILE_MIN_ADDR_OFFSET		(0x0050)
+#define PDP_BIF_TILE_MIN_ADDR_STRIDE		(4)
+#define PDP_BIF_TILE_MIN_ADDR_NO_ENTRIES		(4)
+
+/* PDP_BIF, TILE_MIN_ADDR, TILE_MIN_ADDR
+*/
+#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_MASK		(0xFFFFFFFF)
+#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_SHIFT		(0)
+#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_LENGTH		(32)
+#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_TILE_MAX_ADDR_OFFSET		(0x0060)
+#define PDP_BIF_TILE_MAX_ADDR_STRIDE		(4)
+#define PDP_BIF_TILE_MAX_ADDR_NO_ENTRIES		(4)
+
+/* PDP_BIF, TILE_MAX_ADDR, TILE_MAX_ADDR
+*/
+#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_MASK		(0xFFFFFFFF)
+#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_SHIFT		(0)
+#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_LENGTH		(32)
+#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_CONTROL0_OFFSET		(0x0000)
+
+/* PDP_BIF, CONTROL0, MMU_TILING_SCHEME
+*/
+#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_MASK		(0x00000001)
+#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_SHIFT		(0)
+#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_LENGTH		(1)
+#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL0, MMU_CACHE_POLICY
+*/
+#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_MASK		(0x00000100)
+#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_SHIFT		(8)
+#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_LENGTH		(1)
+#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL0, FORCE_CACHE_POLICY_BYPASS
+*/
+#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_MASK		(0x00000200)
+#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_SHIFT		(9)
+#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_LENGTH		(1)
+#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL0, STALL_ON_PROTOCOL_FAULT
+*/
+#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_MASK		(0x00001000)
+#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_SHIFT		(12)
+#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_LENGTH		(1)
+#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_CONTROL1_OFFSET		(0x0008)
+
+/* PDP_BIF, CONTROL1, MMU_FLUSH0
+*/
+#define PDP_BIF_CONTROL1_MMU_FLUSH0_MASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_FLUSH0_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_FLUSH0_SHIFT		(0)
+#define PDP_BIF_CONTROL1_MMU_FLUSH0_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_FLUSH0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_FLUSH1
+*/
+#define PDP_BIF_CONTROL1_MMU_FLUSH1_MASK		(0x00000002)
+#define PDP_BIF_CONTROL1_MMU_FLUSH1_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_FLUSH1_SHIFT		(1)
+#define PDP_BIF_CONTROL1_MMU_FLUSH1_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_FLUSH1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_FLUSH2
+*/
+#define PDP_BIF_CONTROL1_MMU_FLUSH2_MASK		(0x00000004)
+#define PDP_BIF_CONTROL1_MMU_FLUSH2_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_FLUSH2_SHIFT		(2)
+#define PDP_BIF_CONTROL1_MMU_FLUSH2_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_FLUSH2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_FLUSH3
+*/
+#define PDP_BIF_CONTROL1_MMU_FLUSH3_MASK		(0x00000008)
+#define PDP_BIF_CONTROL1_MMU_FLUSH3_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_FLUSH3_SHIFT		(3)
+#define PDP_BIF_CONTROL1_MMU_FLUSH3_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_FLUSH3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_INVALDC0
+*/
+#define PDP_BIF_CONTROL1_MMU_INVALDC0_MASK		(0x00000100)
+#define PDP_BIF_CONTROL1_MMU_INVALDC0_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_INVALDC0_SHIFT		(8)
+#define PDP_BIF_CONTROL1_MMU_INVALDC0_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_INVALDC0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_INVALDC1
+*/
+#define PDP_BIF_CONTROL1_MMU_INVALDC1_MASK		(0x00000200)
+#define PDP_BIF_CONTROL1_MMU_INVALDC1_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_INVALDC1_SHIFT		(9)
+#define PDP_BIF_CONTROL1_MMU_INVALDC1_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_INVALDC1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_INVALDC2
+*/
+#define PDP_BIF_CONTROL1_MMU_INVALDC2_MASK		(0x00000400)
+#define PDP_BIF_CONTROL1_MMU_INVALDC2_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_INVALDC2_SHIFT		(10)
+#define PDP_BIF_CONTROL1_MMU_INVALDC2_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_INVALDC2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_INVALDC3
+*/
+#define PDP_BIF_CONTROL1_MMU_INVALDC3_MASK		(0x00000800)
+#define PDP_BIF_CONTROL1_MMU_INVALDC3_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_INVALDC3_SHIFT		(11)
+#define PDP_BIF_CONTROL1_MMU_INVALDC3_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_INVALDC3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_FAULT_CLEAR
+*/
+#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_MASK		(0x00010000)
+#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_SHIFT		(16)
+#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, PROTOCOL_FAULT_CLEAR
+*/
+#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_MASK		(0x00100000)
+#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_SHIFT		(20)
+#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_LENGTH		(1)
+#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_PAUSE_SET
+*/
+#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_MASK		(0x01000000)
+#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_SHIFT		(24)
+#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_PAUSE_CLEAR
+*/
+#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_MASK		(0x02000000)
+#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_SHIFT		(25)
+#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_SOFT_RESET
+*/
+#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_MASK		(0x10000000)
+#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_SHIFT		(28)
+#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_BANK_INDEX_OFFSET		(0x0010)
+
+/* PDP_BIF, BANK_INDEX, MMU_BANK_INDEX
+*/
+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_MASK		(0xC0000000)
+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_LSBMASK		(0x00000003)
+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_SHIFT		(30)
+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_LENGTH		(2)
+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_SIGNED_FIELD	IMG_FALSE
+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_NO_REPS		(16)
+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_SIZE		(2)
+
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_OFFSET		(0x0018)
+
+/* PDP_BIF, REQUEST_PRIORITY_ENABLE, CMD_PRIORITY_ENABLE
+*/
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_MASK		(0x00008000)
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_LSBMASK		(0x00000001)
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_SHIFT		(15)
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_LENGTH		(1)
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_SIGNED_FIELD	IMG_FALSE
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_NO_REPS		(16)
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_SIZE		(1)
+
+/* PDP_BIF, REQUEST_PRIORITY_ENABLE, CMD_MMU_PRIORITY_ENABLE
+*/
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_MASK		(0x00010000)
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_LSBMASK		(0x00000001)
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_SHIFT		(16)
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_LENGTH		(1)
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_OFFSET		(0x001C)
+
+/* PDP_BIF, REQUEST_LIMITED_THROUGHPUT, LIMITED_WORDS
+*/
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_MASK		(0x000003FF)
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_LSBMASK		(0x000003FF)
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_SHIFT		(0)
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_LENGTH		(10)
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, REQUEST_LIMITED_THROUGHPUT, REQUEST_GAP
+*/
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_MASK		(0x0FFF0000)
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_LSBMASK		(0x00000FFF)
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_SHIFT		(16)
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_LENGTH		(12)
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_ADDRESS_CONTROL_OFFSET		(0x0070)
+
+/* PDP_BIF, ADDRESS_CONTROL, MMU_BYPASS
+*/
+#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_MASK		(0x00000001)
+#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_LSBMASK		(0x00000001)
+#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_SHIFT		(0)
+#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_LENGTH		(1)
+#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, ADDRESS_CONTROL, MMU_ENABLE_EXT_ADDRESSING
+*/
+#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_MASK		(0x00000010)
+#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_LSBMASK		(0x00000001)
+#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_SHIFT		(4)
+#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_LENGTH		(1)
+#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, ADDRESS_CONTROL, UPPER_ADDRESS_FIXED
+*/
+#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_MASK		(0x00FF0000)
+#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_LSBMASK		(0x000000FF)
+#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_SHIFT		(16)
+#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_LENGTH		(8)
+#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_CONFIG0_OFFSET		(0x0080)
+
+/* PDP_BIF, CONFIG0, NUM_REQUESTORS
+*/
+#define PDP_BIF_CONFIG0_NUM_REQUESTORS_MASK		(0x0000000F)
+#define PDP_BIF_CONFIG0_NUM_REQUESTORS_LSBMASK		(0x0000000F)
+#define PDP_BIF_CONFIG0_NUM_REQUESTORS_SHIFT		(0)
+#define PDP_BIF_CONFIG0_NUM_REQUESTORS_LENGTH		(4)
+#define PDP_BIF_CONFIG0_NUM_REQUESTORS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG0, EXTENDED_ADDR_RANGE
+*/
+#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_MASK		(0x000000F0)
+#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_LSBMASK		(0x0000000F)
+#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_SHIFT		(4)
+#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_LENGTH		(4)
+#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG0, GROUP_OVERRIDE_SIZE
+*/
+#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_MASK		(0x00000700)
+#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_LSBMASK		(0x00000007)
+#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_SHIFT		(8)
+#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_LENGTH		(3)
+#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG0, ADDR_COHERENCY_SUPPORTED
+*/
+#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_MASK		(0x00001000)
+#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_LSBMASK		(0x00000001)
+#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_SHIFT		(12)
+#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_LENGTH		(1)
+#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG0, MMU_SUPPORTED
+*/
+#define PDP_BIF_CONFIG0_MMU_SUPPORTED_MASK		(0x00002000)
+#define PDP_BIF_CONFIG0_MMU_SUPPORTED_LSBMASK		(0x00000001)
+#define PDP_BIF_CONFIG0_MMU_SUPPORTED_SHIFT		(13)
+#define PDP_BIF_CONFIG0_MMU_SUPPORTED_LENGTH		(1)
+#define PDP_BIF_CONFIG0_MMU_SUPPORTED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG0, TILE_ADDR_GRANULARITY
+*/
+#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_MASK		(0x001F0000)
+#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_LSBMASK		(0x0000001F)
+#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_SHIFT		(16)
+#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_LENGTH		(5)
+#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG0, NO_READ_REORDER
+*/
+#define PDP_BIF_CONFIG0_NO_READ_REORDER_MASK		(0x00200000)
+#define PDP_BIF_CONFIG0_NO_READ_REORDER_LSBMASK		(0x00000001)
+#define PDP_BIF_CONFIG0_NO_READ_REORDER_SHIFT		(21)
+#define PDP_BIF_CONFIG0_NO_READ_REORDER_LENGTH		(1)
+#define PDP_BIF_CONFIG0_NO_READ_REORDER_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG0, TAGS_SUPPORTED
+*/
+#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_MASK		(0xFFC00000)
+#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_LSBMASK		(0x000003FF)
+#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_SHIFT		(22)
+#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_LENGTH		(10)
+#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_CONFIG1_OFFSET		(0x0084)
+
+/* PDP_BIF, CONFIG1, PAGE_SIZE
+*/
+#define PDP_BIF_CONFIG1_PAGE_SIZE_MASK		(0x0000000F)
+#define PDP_BIF_CONFIG1_PAGE_SIZE_LSBMASK		(0x0000000F)
+#define PDP_BIF_CONFIG1_PAGE_SIZE_SHIFT		(0)
+#define PDP_BIF_CONFIG1_PAGE_SIZE_LENGTH		(4)
+#define PDP_BIF_CONFIG1_PAGE_SIZE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG1, PAGE_CACHE_ENTRIES
+*/
+#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_MASK		(0x0000FF00)
+#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_LSBMASK		(0x000000FF)
+#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_SHIFT		(8)
+#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_LENGTH		(8)
+#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG1, DIR_CACHE_ENTRIES
+*/
+#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_MASK		(0x001F0000)
+#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_LSBMASK		(0x0000001F)
+#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_SHIFT		(16)
+#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_LENGTH		(5)
+#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG1, BANDWIDTH_COUNT_SUPPORTED
+*/
+#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_MASK		(0x01000000)
+#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_LSBMASK		(0x00000001)
+#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_SHIFT		(24)
+#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_LENGTH		(1)
+#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG1, STALL_COUNT_SUPPORTED
+*/
+#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_MASK		(0x02000000)
+#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_LSBMASK		(0x00000001)
+#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_SHIFT		(25)
+#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_LENGTH		(1)
+#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG1, LATENCY_COUNT_SUPPORTED
+*/
+#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_MASK		(0x04000000)
+#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_LSBMASK		(0x00000001)
+#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_SHIFT		(26)
+#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_LENGTH		(1)
+#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG1, SUPPORT_READ_INTERLEAVE
+*/
+#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_MASK		(0x10000000)
+#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_LSBMASK		(0x00000001)
+#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_SHIFT		(28)
+#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_LENGTH		(1)
+#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_STATUS0_OFFSET		(0x0088)
+
+/* PDP_BIF, STATUS0, MMU_PF_N_RW
+*/
+#define PDP_BIF_STATUS0_MMU_PF_N_RW_MASK		(0x00000001)
+#define PDP_BIF_STATUS0_MMU_PF_N_RW_LSBMASK		(0x00000001)
+#define PDP_BIF_STATUS0_MMU_PF_N_RW_SHIFT		(0)
+#define PDP_BIF_STATUS0_MMU_PF_N_RW_LENGTH		(1)
+#define PDP_BIF_STATUS0_MMU_PF_N_RW_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, STATUS0, MMU_FAULT_ADDR
+*/
+#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_MASK		(0xFFFFF000)
+#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_LSBMASK		(0x000FFFFF)
+#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_SHIFT		(12)
+#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_LENGTH		(20)
+#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_STATUS1_OFFSET		(0x008C)
+
+/* PDP_BIF, STATUS1, MMU_FAULT_REQ_STAT
+*/
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_MASK		(0x0000FFFF)
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_LSBMASK		(0x0000FFFF)
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_SHIFT		(0)
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_LENGTH		(16)
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, STATUS1, MMU_FAULT_REQ_ID
+*/
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_MASK		(0x000F0000)
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_LSBMASK		(0x0000000F)
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_SHIFT		(16)
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_LENGTH		(4)
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, STATUS1, MMU_FAULT_INDEX
+*/
+#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_MASK		(0x03000000)
+#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_LSBMASK		(0x00000003)
+#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_SHIFT		(24)
+#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_LENGTH		(2)
+#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, STATUS1, MMU_FAULT_RNW
+*/
+#define PDP_BIF_STATUS1_MMU_FAULT_RNW_MASK		(0x10000000)
+#define PDP_BIF_STATUS1_MMU_FAULT_RNW_LSBMASK		(0x00000001)
+#define PDP_BIF_STATUS1_MMU_FAULT_RNW_SHIFT		(28)
+#define PDP_BIF_STATUS1_MMU_FAULT_RNW_LENGTH		(1)
+#define PDP_BIF_STATUS1_MMU_FAULT_RNW_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_MEM_REQ_OFFSET		(0x0090)
+
+/* PDP_BIF, MEM_REQ, TAG_OUTSTANDING
+*/
+#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_MASK		(0x000003FF)
+#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_LSBMASK		(0x000003FF)
+#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_SHIFT		(0)
+#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_LENGTH		(10)
+#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, MEM_REQ, EXT_WRRESP_FAULT
+*/
+#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_MASK		(0x00001000)
+#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_LSBMASK		(0x00000001)
+#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_SHIFT		(12)
+#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_LENGTH		(1)
+#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, MEM_REQ, EXT_RDRESP_FAULT
+*/
+#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_MASK		(0x00002000)
+#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_LSBMASK		(0x00000001)
+#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_SHIFT		(13)
+#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_LENGTH		(1)
+#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, MEM_REQ, EXT_READ_BURST_FAULT
+*/
+#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_MASK		(0x00004000)
+#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_LSBMASK		(0x00000001)
+#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_SHIFT		(14)
+#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_LENGTH		(1)
+#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, MEM_REQ, INT_PROTOCOL_FAULT
+*/
+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_MASK		(0x80000000)
+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_LSBMASK		(0x00000001)
+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_SHIFT		(31)
+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_LENGTH		(1)
+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_SIGNED_FIELD	IMG_FALSE
+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_NO_REPS		(16)
+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_SIZE		(1)
+
+#define PDP_BIF_MEM_EXT_OUTSTANDING_OFFSET		(0x0094)
+
+/* PDP_BIF, MEM_EXT_OUTSTANDING, READ_WORDS_OUTSTANDING
+*/
+#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_MASK		(0x0000FFFF)
+#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_LSBMASK		(0x0000FFFF)
+#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_SHIFT		(0)
+#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_LENGTH		(16)
+#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_FAULT_SELECT_OFFSET		(0x00A0)
+
+/* PDP_BIF, FAULT_SELECT, MMU_FAULT_SELECT
+*/
+#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_MASK		(0x0000000F)
+#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_LSBMASK		(0x0000000F)
+#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_SHIFT		(0)
+#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_LENGTH		(4)
+#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_PROTOCOL_FAULT_OFFSET		(0x00A8)
+
+/* PDP_BIF, PROTOCOL_FAULT, FAULT_PAGE_BREAK
+*/
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_MASK		(0x00000001)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_LSBMASK		(0x00000001)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_SHIFT		(0)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_LENGTH		(1)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, PROTOCOL_FAULT, FAULT_WRITE
+*/
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_MASK		(0x00000010)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_LSBMASK		(0x00000001)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_SHIFT		(4)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_LENGTH		(1)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, PROTOCOL_FAULT, FAULT_READ
+*/
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_MASK		(0x00000020)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_LSBMASK		(0x00000001)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_SHIFT		(5)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_LENGTH		(1)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_TOTAL_READ_REQ_OFFSET		(0x0100)
+
+/* PDP_BIF, TOTAL_READ_REQ, TOTAL_READ_REQ
+*/
+#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_MASK		(0xFFFFFFFF)
+#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_SHIFT		(0)
+#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_LENGTH		(32)
+#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_TOTAL_WRITE_REQ_OFFSET		(0x0104)
+
+/* PDP_BIF, TOTAL_WRITE_REQ, TOTAL_WRITE_REQ
+*/
+#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_MASK		(0xFFFFFFFF)
+#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_SHIFT		(0)
+#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_LENGTH		(32)
+#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_READS_LESS_64_REQ_OFFSET		(0x0108)
+
+/* PDP_BIF, READS_LESS_64_REQ, READS_LESS_64_REQ
+*/
+#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_MASK		(0xFFFFFFFF)
+#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_SHIFT		(0)
+#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_LENGTH		(32)
+#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_WRITES_LESS_64_REQ_OFFSET		(0x010C)
+
+/* PDP_BIF, WRITES_LESS_64_REQ, WRITES_LESS_64_REQ
+*/
+#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_MASK		(0xFFFFFFFF)
+#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_SHIFT		(0)
+#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_LENGTH		(32)
+#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_EXT_CMD_STALL_OFFSET		(0x0120)
+
+/* PDP_BIF, EXT_CMD_STALL, EXT_CMD_STALL
+*/
+#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_MASK		(0xFFFFFFFF)
+#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_SHIFT		(0)
+#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_LENGTH		(32)
+#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_WRITE_REQ_STALL_OFFSET		(0x0124)
+
+/* PDP_BIF, WRITE_REQ_STALL, WRITE_REQ_STALL
+*/
+#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_MASK		(0xFFFFFFFF)
+#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_SHIFT		(0)
+#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_LENGTH		(32)
+#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_MISS_STALL_OFFSET		(0x0128)
+
+/* PDP_BIF, MISS_STALL, MMU_MISS_STALL
+*/
+#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_MASK		(0xFFFFFFFF)
+#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_SHIFT		(0)
+#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_LENGTH		(32)
+#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_ADDRESS_STALL_OFFSET		(0x012C)
+
+/* PDP_BIF, ADDRESS_STALL, ADDRESS_STALL
+*/
+#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_MASK		(0xFFFFFFFF)
+#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_SHIFT		(0)
+#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_LENGTH		(32)
+#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_TAG_STALL_OFFSET		(0x0130)
+
+/* PDP_BIF, TAG_STALL, TAG_STALL
+*/
+#define PDP_BIF_TAG_STALL_TAG_STALL_MASK		(0xFFFFFFFF)
+#define PDP_BIF_TAG_STALL_TAG_STALL_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_TAG_STALL_TAG_STALL_SHIFT		(0)
+#define PDP_BIF_TAG_STALL_TAG_STALL_LENGTH		(32)
+#define PDP_BIF_TAG_STALL_TAG_STALL_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_PEAK_READ_OUTSTANDING_OFFSET		(0x0140)
+
+/* PDP_BIF, PEAK_READ_OUTSTANDING, PEAK_TAG_OUTSTANDING
+*/
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_MASK		(0x000003FF)
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_LSBMASK		(0x000003FF)
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_SHIFT		(0)
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_LENGTH		(10)
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, PEAK_READ_OUTSTANDING, PEAK_READ_LATENCY
+*/
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_MASK		(0xFFFF0000)
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_LSBMASK		(0x0000FFFF)
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_SHIFT		(16)
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_LENGTH		(16)
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_AVERAGE_READ_LATENCY_OFFSET		(0x0144)
+
+/* PDP_BIF, AVERAGE_READ_LATENCY, AVERAGE_READ_LATENCY
+*/
+#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_MASK		(0xFFFFFFFF)
+#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_SHIFT		(0)
+#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_LENGTH		(32)
+#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_STATISTICS_CONTROL_OFFSET		(0x0160)
+
+/* PDP_BIF, STATISTICS_CONTROL, BANDWIDTH_STATS_INIT
+*/
+#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_MASK		(0x00000001)
+#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_LSBMASK		(0x00000001)
+#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_SHIFT		(0)
+#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_LENGTH		(1)
+#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, STATISTICS_CONTROL, STALL_STATS_INIT
+*/
+#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_MASK		(0x00000002)
+#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_LSBMASK		(0x00000001)
+#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_SHIFT		(1)
+#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_LENGTH		(1)
+#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, STATISTICS_CONTROL, LATENCY_STATS_INIT
+*/
+#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_MASK		(0x00000004)
+#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_LSBMASK		(0x00000001)
+#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_SHIFT		(2)
+#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_LENGTH		(1)
+#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_VERSION_OFFSET		(0x01D0)
+
+/* PDP_BIF, VERSION, MMU_MAJOR_REV
+*/
+#define PDP_BIF_VERSION_MMU_MAJOR_REV_MASK		(0x00FF0000)
+#define PDP_BIF_VERSION_MMU_MAJOR_REV_LSBMASK		(0x000000FF)
+#define PDP_BIF_VERSION_MMU_MAJOR_REV_SHIFT		(16)
+#define PDP_BIF_VERSION_MMU_MAJOR_REV_LENGTH		(8)
+#define PDP_BIF_VERSION_MMU_MAJOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, VERSION, MMU_MINOR_REV
+*/
+#define PDP_BIF_VERSION_MMU_MINOR_REV_MASK		(0x0000FF00)
+#define PDP_BIF_VERSION_MMU_MINOR_REV_LSBMASK		(0x000000FF)
+#define PDP_BIF_VERSION_MMU_MINOR_REV_SHIFT		(8)
+#define PDP_BIF_VERSION_MMU_MINOR_REV_LENGTH		(8)
+#define PDP_BIF_VERSION_MMU_MINOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, VERSION, MMU_MAINT_REV
+*/
+#define PDP_BIF_VERSION_MMU_MAINT_REV_MASK		(0x000000FF)
+#define PDP_BIF_VERSION_MMU_MAINT_REV_LSBMASK		(0x000000FF)
+#define PDP_BIF_VERSION_MMU_MAINT_REV_SHIFT		(0)
+#define PDP_BIF_VERSION_MMU_MAINT_REV_LENGTH		(8)
+#define PDP_BIF_VERSION_MMU_MAINT_REV_SIGNED_FIELD	IMG_FALSE
+
+#endif /* _PDP2_MMU_REGS_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdp2_regs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdp2_regs.h
new file mode 100644
index 0000000..bf85386
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdp2_regs.h
@@ -0,0 +1,8565 @@
+/*************************************************************************/ /*!
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#ifndef _PDP2_REGS_H
+#define _PDP2_REGS_H
+
+/*
+ * Bitfield operations
+ * For each argument field, the following preprocessor macros must exist
+ * field##_MASK - the number of bits in the bit field
+ * field##_SHIFT - offset from the first bit
+ */
+#define PLACE_FIELD(field, val) \
+	(((u32)(val) << (field##_SHIFT)) & (field##_MASK))
+
+#define ADJ_FIELD(x, field, val) \
+	(((x) & ~(field##_MASK)) \
+	| PLACE_FIELD(field, val))
+
+#define SET_FIELD(x, field, val) \
+	(x) = ADJ_FIELD(x, field, val)
+
+#define GET_FIELD(x, field) \
+	(((x) & (field##_MASK)) >> (field##_SHIFT))
+
+/* Keeps most significant bits */
+#define MOVE_FIELD(x, o1, l1, o2, l2) \
+	(((x) >> ((o1) + (l1) - (l2))) << (o2))
+
+#define MAX_FIELD_VALUE(field) \
+	((field##_MASK) >> (field##_SHIFT))
+
+/* Hardware register definitions */
+
+#define PDP_GRPH1SURF_OFFSET		(0x0000)
+
+/* PDP, GRPH1SURF, GRPH1PIXFMT
+*/
+#define PDP_GRPH1SURF_GRPH1PIXFMT_MASK		(0xF8000000)
+#define PDP_GRPH1SURF_GRPH1PIXFMT_LSBMASK		(0x0000001F)
+#define PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT		(27)
+#define PDP_GRPH1SURF_GRPH1PIXFMT_LENGTH		(5)
+#define PDP_GRPH1SURF_GRPH1PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1USEGAMMA
+*/
+#define PDP_GRPH1SURF_GRPH1USEGAMMA_MASK		(0x04000000)
+#define PDP_GRPH1SURF_GRPH1USEGAMMA_LSBMASK		(0x00000001)
+#define PDP_GRPH1SURF_GRPH1USEGAMMA_SHIFT		(26)
+#define PDP_GRPH1SURF_GRPH1USEGAMMA_LENGTH		(1)
+#define PDP_GRPH1SURF_GRPH1USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1USECSC
+*/
+#define PDP_GRPH1SURF_GRPH1USECSC_MASK		(0x02000000)
+#define PDP_GRPH1SURF_GRPH1USECSC_LSBMASK		(0x00000001)
+#define PDP_GRPH1SURF_GRPH1USECSC_SHIFT		(25)
+#define PDP_GRPH1SURF_GRPH1USECSC_LENGTH		(1)
+#define PDP_GRPH1SURF_GRPH1USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1LUTRWCHOICE
+*/
+#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_MASK		(0x01000000)
+#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LSBMASK		(0x00000001)
+#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SHIFT		(24)
+#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LENGTH		(1)
+#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1USELUT
+*/
+#define PDP_GRPH1SURF_GRPH1USELUT_MASK		(0x00800000)
+#define PDP_GRPH1SURF_GRPH1USELUT_LSBMASK		(0x00000001)
+#define PDP_GRPH1SURF_GRPH1USELUT_SHIFT		(23)
+#define PDP_GRPH1SURF_GRPH1USELUT_LENGTH		(1)
+#define PDP_GRPH1SURF_GRPH1USELUT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2SURF_OFFSET		(0x0004)
+
+/* PDP, GRPH2SURF, GRPH2PIXFMT
+*/
+#define PDP_GRPH2SURF_GRPH2PIXFMT_MASK		(0xF8000000)
+#define PDP_GRPH2SURF_GRPH2PIXFMT_LSBMASK		(0x0000001F)
+#define PDP_GRPH2SURF_GRPH2PIXFMT_SHIFT		(27)
+#define PDP_GRPH2SURF_GRPH2PIXFMT_LENGTH		(5)
+#define PDP_GRPH2SURF_GRPH2PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2USEGAMMA
+*/
+#define PDP_GRPH2SURF_GRPH2USEGAMMA_MASK		(0x04000000)
+#define PDP_GRPH2SURF_GRPH2USEGAMMA_LSBMASK		(0x00000001)
+#define PDP_GRPH2SURF_GRPH2USEGAMMA_SHIFT		(26)
+#define PDP_GRPH2SURF_GRPH2USEGAMMA_LENGTH		(1)
+#define PDP_GRPH2SURF_GRPH2USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2USECSC
+*/
+#define PDP_GRPH2SURF_GRPH2USECSC_MASK		(0x02000000)
+#define PDP_GRPH2SURF_GRPH2USECSC_LSBMASK		(0x00000001)
+#define PDP_GRPH2SURF_GRPH2USECSC_SHIFT		(25)
+#define PDP_GRPH2SURF_GRPH2USECSC_LENGTH		(1)
+#define PDP_GRPH2SURF_GRPH2USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2LUTRWCHOICE
+*/
+#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_MASK		(0x01000000)
+#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LSBMASK		(0x00000001)
+#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SHIFT		(24)
+#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LENGTH		(1)
+#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2USELUT
+*/
+#define PDP_GRPH2SURF_GRPH2USELUT_MASK		(0x00800000)
+#define PDP_GRPH2SURF_GRPH2USELUT_LSBMASK		(0x00000001)
+#define PDP_GRPH2SURF_GRPH2USELUT_SHIFT		(23)
+#define PDP_GRPH2SURF_GRPH2USELUT_LENGTH		(1)
+#define PDP_GRPH2SURF_GRPH2USELUT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3SURF_OFFSET		(0x0008)
+
+/* PDP, GRPH3SURF, GRPH3PIXFMT
+*/
+#define PDP_GRPH3SURF_GRPH3PIXFMT_MASK		(0xF8000000)
+#define PDP_GRPH3SURF_GRPH3PIXFMT_LSBMASK		(0x0000001F)
+#define PDP_GRPH3SURF_GRPH3PIXFMT_SHIFT		(27)
+#define PDP_GRPH3SURF_GRPH3PIXFMT_LENGTH		(5)
+#define PDP_GRPH3SURF_GRPH3PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3USEGAMMA
+*/
+#define PDP_GRPH3SURF_GRPH3USEGAMMA_MASK		(0x04000000)
+#define PDP_GRPH3SURF_GRPH3USEGAMMA_LSBMASK		(0x00000001)
+#define PDP_GRPH3SURF_GRPH3USEGAMMA_SHIFT		(26)
+#define PDP_GRPH3SURF_GRPH3USEGAMMA_LENGTH		(1)
+#define PDP_GRPH3SURF_GRPH3USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3USECSC
+*/
+#define PDP_GRPH3SURF_GRPH3USECSC_MASK		(0x02000000)
+#define PDP_GRPH3SURF_GRPH3USECSC_LSBMASK		(0x00000001)
+#define PDP_GRPH3SURF_GRPH3USECSC_SHIFT		(25)
+#define PDP_GRPH3SURF_GRPH3USECSC_LENGTH		(1)
+#define PDP_GRPH3SURF_GRPH3USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3LUTRWCHOICE
+*/
+#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_MASK		(0x01000000)
+#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LSBMASK		(0x00000001)
+#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SHIFT		(24)
+#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LENGTH		(1)
+#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3USELUT
+*/
+#define PDP_GRPH3SURF_GRPH3USELUT_MASK		(0x00800000)
+#define PDP_GRPH3SURF_GRPH3USELUT_LSBMASK		(0x00000001)
+#define PDP_GRPH3SURF_GRPH3USELUT_SHIFT		(23)
+#define PDP_GRPH3SURF_GRPH3USELUT_LENGTH		(1)
+#define PDP_GRPH3SURF_GRPH3USELUT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4SURF_OFFSET		(0x000C)
+
+/* PDP, GRPH4SURF, GRPH4PIXFMT
+*/
+#define PDP_GRPH4SURF_GRPH4PIXFMT_MASK		(0xF8000000)
+#define PDP_GRPH4SURF_GRPH4PIXFMT_LSBMASK		(0x0000001F)
+#define PDP_GRPH4SURF_GRPH4PIXFMT_SHIFT		(27)
+#define PDP_GRPH4SURF_GRPH4PIXFMT_LENGTH		(5)
+#define PDP_GRPH4SURF_GRPH4PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4USEGAMMA
+*/
+#define PDP_GRPH4SURF_GRPH4USEGAMMA_MASK		(0x04000000)
+#define PDP_GRPH4SURF_GRPH4USEGAMMA_LSBMASK		(0x00000001)
+#define PDP_GRPH4SURF_GRPH4USEGAMMA_SHIFT		(26)
+#define PDP_GRPH4SURF_GRPH4USEGAMMA_LENGTH		(1)
+#define PDP_GRPH4SURF_GRPH4USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4USECSC
+*/
+#define PDP_GRPH4SURF_GRPH4USECSC_MASK		(0x02000000)
+#define PDP_GRPH4SURF_GRPH4USECSC_LSBMASK		(0x00000001)
+#define PDP_GRPH4SURF_GRPH4USECSC_SHIFT		(25)
+#define PDP_GRPH4SURF_GRPH4USECSC_LENGTH		(1)
+#define PDP_GRPH4SURF_GRPH4USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4LUTRWCHOICE
+*/
+#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_MASK		(0x01000000)
+#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LSBMASK		(0x00000001)
+#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SHIFT		(24)
+#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LENGTH		(1)
+#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4USELUT
+*/
+#define PDP_GRPH4SURF_GRPH4USELUT_MASK		(0x00800000)
+#define PDP_GRPH4SURF_GRPH4USELUT_LSBMASK		(0x00000001)
+#define PDP_GRPH4SURF_GRPH4USELUT_SHIFT		(23)
+#define PDP_GRPH4SURF_GRPH4USELUT_LENGTH		(1)
+#define PDP_GRPH4SURF_GRPH4USELUT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1SURF_OFFSET		(0x0010)
+
+/* PDP, VID1SURF, VID1PIXFMT
+*/
+#define PDP_VID1SURF_VID1PIXFMT_MASK		(0xF8000000)
+#define PDP_VID1SURF_VID1PIXFMT_LSBMASK		(0x0000001F)
+#define PDP_VID1SURF_VID1PIXFMT_SHIFT		(27)
+#define PDP_VID1SURF_VID1PIXFMT_LENGTH		(5)
+#define PDP_VID1SURF_VID1PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEGAMMA
+*/
+#define PDP_VID1SURF_VID1USEGAMMA_MASK		(0x04000000)
+#define PDP_VID1SURF_VID1USEGAMMA_LSBMASK		(0x00000001)
+#define PDP_VID1SURF_VID1USEGAMMA_SHIFT		(26)
+#define PDP_VID1SURF_VID1USEGAMMA_LENGTH		(1)
+#define PDP_VID1SURF_VID1USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USECSC
+*/
+#define PDP_VID1SURF_VID1USECSC_MASK		(0x02000000)
+#define PDP_VID1SURF_VID1USECSC_LSBMASK		(0x00000001)
+#define PDP_VID1SURF_VID1USECSC_SHIFT		(25)
+#define PDP_VID1SURF_VID1USECSC_LENGTH		(1)
+#define PDP_VID1SURF_VID1USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEI2P
+*/
+#define PDP_VID1SURF_VID1USEI2P_MASK		(0x01000000)
+#define PDP_VID1SURF_VID1USEI2P_LSBMASK		(0x00000001)
+#define PDP_VID1SURF_VID1USEI2P_SHIFT		(24)
+#define PDP_VID1SURF_VID1USEI2P_LENGTH		(1)
+#define PDP_VID1SURF_VID1USEI2P_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1COSITED
+*/
+#define PDP_VID1SURF_VID1COSITED_MASK		(0x00800000)
+#define PDP_VID1SURF_VID1COSITED_LSBMASK		(0x00000001)
+#define PDP_VID1SURF_VID1COSITED_SHIFT		(23)
+#define PDP_VID1SURF_VID1COSITED_LENGTH		(1)
+#define PDP_VID1SURF_VID1COSITED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEHQCD
+*/
+#define PDP_VID1SURF_VID1USEHQCD_MASK		(0x00400000)
+#define PDP_VID1SURF_VID1USEHQCD_LSBMASK		(0x00000001)
+#define PDP_VID1SURF_VID1USEHQCD_SHIFT		(22)
+#define PDP_VID1SURF_VID1USEHQCD_LENGTH		(1)
+#define PDP_VID1SURF_VID1USEHQCD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEINSTREAM
+*/
+#define PDP_VID1SURF_VID1USEINSTREAM_MASK		(0x00200000)
+#define PDP_VID1SURF_VID1USEINSTREAM_LSBMASK		(0x00000001)
+#define PDP_VID1SURF_VID1USEINSTREAM_SHIFT		(21)
+#define PDP_VID1SURF_VID1USEINSTREAM_LENGTH		(1)
+#define PDP_VID1SURF_VID1USEINSTREAM_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2SURF_OFFSET		(0x0014)
+
+/* PDP, VID2SURF, VID2PIXFMT
+*/
+#define PDP_VID2SURF_VID2PIXFMT_MASK		(0xF8000000)
+#define PDP_VID2SURF_VID2PIXFMT_LSBMASK		(0x0000001F)
+#define PDP_VID2SURF_VID2PIXFMT_SHIFT		(27)
+#define PDP_VID2SURF_VID2PIXFMT_LENGTH		(5)
+#define PDP_VID2SURF_VID2PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SURF, VID2COSITED
+*/
+#define PDP_VID2SURF_VID2COSITED_MASK		(0x00800000)
+#define PDP_VID2SURF_VID2COSITED_LSBMASK		(0x00000001)
+#define PDP_VID2SURF_VID2COSITED_SHIFT		(23)
+#define PDP_VID2SURF_VID2COSITED_LENGTH		(1)
+#define PDP_VID2SURF_VID2COSITED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SURF, VID2USEGAMMA
+*/
+#define PDP_VID2SURF_VID2USEGAMMA_MASK		(0x04000000)
+#define PDP_VID2SURF_VID2USEGAMMA_LSBMASK		(0x00000001)
+#define PDP_VID2SURF_VID2USEGAMMA_SHIFT		(26)
+#define PDP_VID2SURF_VID2USEGAMMA_LENGTH		(1)
+#define PDP_VID2SURF_VID2USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SURF, VID2USECSC
+*/
+#define PDP_VID2SURF_VID2USECSC_MASK		(0x02000000)
+#define PDP_VID2SURF_VID2USECSC_LSBMASK		(0x00000001)
+#define PDP_VID2SURF_VID2USECSC_SHIFT		(25)
+#define PDP_VID2SURF_VID2USECSC_LENGTH		(1)
+#define PDP_VID2SURF_VID2USECSC_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3SURF_OFFSET		(0x0018)
+
+/* PDP, VID3SURF, VID3PIXFMT
+*/
+#define PDP_VID3SURF_VID3PIXFMT_MASK		(0xF8000000)
+#define PDP_VID3SURF_VID3PIXFMT_LSBMASK		(0x0000001F)
+#define PDP_VID3SURF_VID3PIXFMT_SHIFT		(27)
+#define PDP_VID3SURF_VID3PIXFMT_LENGTH		(5)
+#define PDP_VID3SURF_VID3PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SURF, VID3COSITED
+*/
+#define PDP_VID3SURF_VID3COSITED_MASK		(0x00800000)
+#define PDP_VID3SURF_VID3COSITED_LSBMASK		(0x00000001)
+#define PDP_VID3SURF_VID3COSITED_SHIFT		(23)
+#define PDP_VID3SURF_VID3COSITED_LENGTH		(1)
+#define PDP_VID3SURF_VID3COSITED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SURF, VID3USEGAMMA
+*/
+#define PDP_VID3SURF_VID3USEGAMMA_MASK		(0x04000000)
+#define PDP_VID3SURF_VID3USEGAMMA_LSBMASK		(0x00000001)
+#define PDP_VID3SURF_VID3USEGAMMA_SHIFT		(26)
+#define PDP_VID3SURF_VID3USEGAMMA_LENGTH		(1)
+#define PDP_VID3SURF_VID3USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SURF, VID3USECSC
+*/
+#define PDP_VID3SURF_VID3USECSC_MASK		(0x02000000)
+#define PDP_VID3SURF_VID3USECSC_LSBMASK		(0x00000001)
+#define PDP_VID3SURF_VID3USECSC_SHIFT		(25)
+#define PDP_VID3SURF_VID3USECSC_LENGTH		(1)
+#define PDP_VID3SURF_VID3USECSC_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4SURF_OFFSET		(0x001C)
+
+/* PDP, VID4SURF, VID4PIXFMT
+*/
+#define PDP_VID4SURF_VID4PIXFMT_MASK		(0xF8000000)
+#define PDP_VID4SURF_VID4PIXFMT_LSBMASK		(0x0000001F)
+#define PDP_VID4SURF_VID4PIXFMT_SHIFT		(27)
+#define PDP_VID4SURF_VID4PIXFMT_LENGTH		(5)
+#define PDP_VID4SURF_VID4PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SURF, VID4COSITED
+*/
+#define PDP_VID4SURF_VID4COSITED_MASK		(0x00800000)
+#define PDP_VID4SURF_VID4COSITED_LSBMASK		(0x00000001)
+#define PDP_VID4SURF_VID4COSITED_SHIFT		(23)
+#define PDP_VID4SURF_VID4COSITED_LENGTH		(1)
+#define PDP_VID4SURF_VID4COSITED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SURF, VID4USEGAMMA
+*/
+#define PDP_VID4SURF_VID4USEGAMMA_MASK		(0x04000000)
+#define PDP_VID4SURF_VID4USEGAMMA_LSBMASK		(0x00000001)
+#define PDP_VID4SURF_VID4USEGAMMA_SHIFT		(26)
+#define PDP_VID4SURF_VID4USEGAMMA_LENGTH		(1)
+#define PDP_VID4SURF_VID4USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SURF, VID4USECSC
+*/
+#define PDP_VID4SURF_VID4USECSC_MASK		(0x02000000)
+#define PDP_VID4SURF_VID4USECSC_LSBMASK		(0x00000001)
+#define PDP_VID4SURF_VID4USECSC_SHIFT		(25)
+#define PDP_VID4SURF_VID4USECSC_LENGTH		(1)
+#define PDP_VID4SURF_VID4USECSC_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1CTRL_OFFSET		(0x0020)
+
+/* PDP, GRPH1CTRL, GRPH1STREN
+*/
+#define PDP_GRPH1CTRL_GRPH1STREN_MASK		(0x80000000)
+#define PDP_GRPH1CTRL_GRPH1STREN_LSBMASK		(0x00000001)
+#define PDP_GRPH1CTRL_GRPH1STREN_SHIFT		(31)
+#define PDP_GRPH1CTRL_GRPH1STREN_LENGTH		(1)
+#define PDP_GRPH1CTRL_GRPH1STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1CKEYEN
+*/
+#define PDP_GRPH1CTRL_GRPH1CKEYEN_MASK		(0x40000000)
+#define PDP_GRPH1CTRL_GRPH1CKEYEN_LSBMASK		(0x00000001)
+#define PDP_GRPH1CTRL_GRPH1CKEYEN_SHIFT		(30)
+#define PDP_GRPH1CTRL_GRPH1CKEYEN_LENGTH		(1)
+#define PDP_GRPH1CTRL_GRPH1CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1CKEYSRC
+*/
+#define PDP_GRPH1CTRL_GRPH1CKEYSRC_MASK		(0x20000000)
+#define PDP_GRPH1CTRL_GRPH1CKEYSRC_LSBMASK		(0x00000001)
+#define PDP_GRPH1CTRL_GRPH1CKEYSRC_SHIFT		(29)
+#define PDP_GRPH1CTRL_GRPH1CKEYSRC_LENGTH		(1)
+#define PDP_GRPH1CTRL_GRPH1CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1BLEND
+*/
+#define PDP_GRPH1CTRL_GRPH1BLEND_MASK		(0x18000000)
+#define PDP_GRPH1CTRL_GRPH1BLEND_LSBMASK		(0x00000003)
+#define PDP_GRPH1CTRL_GRPH1BLEND_SHIFT		(27)
+#define PDP_GRPH1CTRL_GRPH1BLEND_LENGTH		(2)
+#define PDP_GRPH1CTRL_GRPH1BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1BLENDPOS
+*/
+#define PDP_GRPH1CTRL_GRPH1BLENDPOS_MASK		(0x07000000)
+#define PDP_GRPH1CTRL_GRPH1BLENDPOS_LSBMASK		(0x00000007)
+#define PDP_GRPH1CTRL_GRPH1BLENDPOS_SHIFT		(24)
+#define PDP_GRPH1CTRL_GRPH1BLENDPOS_LENGTH		(3)
+#define PDP_GRPH1CTRL_GRPH1BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1DITHEREN
+*/
+#define PDP_GRPH1CTRL_GRPH1DITHEREN_MASK		(0x00800000)
+#define PDP_GRPH1CTRL_GRPH1DITHEREN_LSBMASK		(0x00000001)
+#define PDP_GRPH1CTRL_GRPH1DITHEREN_SHIFT		(23)
+#define PDP_GRPH1CTRL_GRPH1DITHEREN_LENGTH		(1)
+#define PDP_GRPH1CTRL_GRPH1DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2CTRL_OFFSET		(0x0024)
+
+/* PDP, GRPH2CTRL, GRPH2STREN
+*/
+#define PDP_GRPH2CTRL_GRPH2STREN_MASK		(0x80000000)
+#define PDP_GRPH2CTRL_GRPH2STREN_LSBMASK		(0x00000001)
+#define PDP_GRPH2CTRL_GRPH2STREN_SHIFT		(31)
+#define PDP_GRPH2CTRL_GRPH2STREN_LENGTH		(1)
+#define PDP_GRPH2CTRL_GRPH2STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2CKEYEN
+*/
+#define PDP_GRPH2CTRL_GRPH2CKEYEN_MASK		(0x40000000)
+#define PDP_GRPH2CTRL_GRPH2CKEYEN_LSBMASK		(0x00000001)
+#define PDP_GRPH2CTRL_GRPH2CKEYEN_SHIFT		(30)
+#define PDP_GRPH2CTRL_GRPH2CKEYEN_LENGTH		(1)
+#define PDP_GRPH2CTRL_GRPH2CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2CKEYSRC
+*/
+#define PDP_GRPH2CTRL_GRPH2CKEYSRC_MASK		(0x20000000)
+#define PDP_GRPH2CTRL_GRPH2CKEYSRC_LSBMASK		(0x00000001)
+#define PDP_GRPH2CTRL_GRPH2CKEYSRC_SHIFT		(29)
+#define PDP_GRPH2CTRL_GRPH2CKEYSRC_LENGTH		(1)
+#define PDP_GRPH2CTRL_GRPH2CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2BLEND
+*/
+#define PDP_GRPH2CTRL_GRPH2BLEND_MASK		(0x18000000)
+#define PDP_GRPH2CTRL_GRPH2BLEND_LSBMASK		(0x00000003)
+#define PDP_GRPH2CTRL_GRPH2BLEND_SHIFT		(27)
+#define PDP_GRPH2CTRL_GRPH2BLEND_LENGTH		(2)
+#define PDP_GRPH2CTRL_GRPH2BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2BLENDPOS
+*/
+#define PDP_GRPH2CTRL_GRPH2BLENDPOS_MASK		(0x07000000)
+#define PDP_GRPH2CTRL_GRPH2BLENDPOS_LSBMASK		(0x00000007)
+#define PDP_GRPH2CTRL_GRPH2BLENDPOS_SHIFT		(24)
+#define PDP_GRPH2CTRL_GRPH2BLENDPOS_LENGTH		(3)
+#define PDP_GRPH2CTRL_GRPH2BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2DITHEREN
+*/
+#define PDP_GRPH2CTRL_GRPH2DITHEREN_MASK		(0x00800000)
+#define PDP_GRPH2CTRL_GRPH2DITHEREN_LSBMASK		(0x00000001)
+#define PDP_GRPH2CTRL_GRPH2DITHEREN_SHIFT		(23)
+#define PDP_GRPH2CTRL_GRPH2DITHEREN_LENGTH		(1)
+#define PDP_GRPH2CTRL_GRPH2DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3CTRL_OFFSET		(0x0028)
+
+/* PDP, GRPH3CTRL, GRPH3STREN
+*/
+#define PDP_GRPH3CTRL_GRPH3STREN_MASK		(0x80000000)
+#define PDP_GRPH3CTRL_GRPH3STREN_LSBMASK		(0x00000001)
+#define PDP_GRPH3CTRL_GRPH3STREN_SHIFT		(31)
+#define PDP_GRPH3CTRL_GRPH3STREN_LENGTH		(1)
+#define PDP_GRPH3CTRL_GRPH3STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3CKEYEN
+*/
+#define PDP_GRPH3CTRL_GRPH3CKEYEN_MASK		(0x40000000)
+#define PDP_GRPH3CTRL_GRPH3CKEYEN_LSBMASK		(0x00000001)
+#define PDP_GRPH3CTRL_GRPH3CKEYEN_SHIFT		(30)
+#define PDP_GRPH3CTRL_GRPH3CKEYEN_LENGTH		(1)
+#define PDP_GRPH3CTRL_GRPH3CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3CKEYSRC
+*/
+#define PDP_GRPH3CTRL_GRPH3CKEYSRC_MASK		(0x20000000)
+#define PDP_GRPH3CTRL_GRPH3CKEYSRC_LSBMASK		(0x00000001)
+#define PDP_GRPH3CTRL_GRPH3CKEYSRC_SHIFT		(29)
+#define PDP_GRPH3CTRL_GRPH3CKEYSRC_LENGTH		(1)
+#define PDP_GRPH3CTRL_GRPH3CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3BLEND
+*/
+#define PDP_GRPH3CTRL_GRPH3BLEND_MASK		(0x18000000)
+#define PDP_GRPH3CTRL_GRPH3BLEND_LSBMASK		(0x00000003)
+#define PDP_GRPH3CTRL_GRPH3BLEND_SHIFT		(27)
+#define PDP_GRPH3CTRL_GRPH3BLEND_LENGTH		(2)
+#define PDP_GRPH3CTRL_GRPH3BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3BLENDPOS
+*/
+#define PDP_GRPH3CTRL_GRPH3BLENDPOS_MASK		(0x07000000)
+#define PDP_GRPH3CTRL_GRPH3BLENDPOS_LSBMASK		(0x00000007)
+#define PDP_GRPH3CTRL_GRPH3BLENDPOS_SHIFT		(24)
+#define PDP_GRPH3CTRL_GRPH3BLENDPOS_LENGTH		(3)
+#define PDP_GRPH3CTRL_GRPH3BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3DITHEREN
+*/
+#define PDP_GRPH3CTRL_GRPH3DITHEREN_MASK		(0x00800000)
+#define PDP_GRPH3CTRL_GRPH3DITHEREN_LSBMASK		(0x00000001)
+#define PDP_GRPH3CTRL_GRPH3DITHEREN_SHIFT		(23)
+#define PDP_GRPH3CTRL_GRPH3DITHEREN_LENGTH		(1)
+#define PDP_GRPH3CTRL_GRPH3DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4CTRL_OFFSET		(0x002C)
+
+/* PDP, GRPH4CTRL, GRPH4STREN
+*/
+#define PDP_GRPH4CTRL_GRPH4STREN_MASK		(0x80000000)
+#define PDP_GRPH4CTRL_GRPH4STREN_LSBMASK		(0x00000001)
+#define PDP_GRPH4CTRL_GRPH4STREN_SHIFT		(31)
+#define PDP_GRPH4CTRL_GRPH4STREN_LENGTH		(1)
+#define PDP_GRPH4CTRL_GRPH4STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4CKEYEN
+*/
+#define PDP_GRPH4CTRL_GRPH4CKEYEN_MASK		(0x40000000)
+#define PDP_GRPH4CTRL_GRPH4CKEYEN_LSBMASK		(0x00000001)
+#define PDP_GRPH4CTRL_GRPH4CKEYEN_SHIFT		(30)
+#define PDP_GRPH4CTRL_GRPH4CKEYEN_LENGTH		(1)
+#define PDP_GRPH4CTRL_GRPH4CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4CKEYSRC
+*/
+#define PDP_GRPH4CTRL_GRPH4CKEYSRC_MASK		(0x20000000)
+#define PDP_GRPH4CTRL_GRPH4CKEYSRC_LSBMASK		(0x00000001)
+#define PDP_GRPH4CTRL_GRPH4CKEYSRC_SHIFT		(29)
+#define PDP_GRPH4CTRL_GRPH4CKEYSRC_LENGTH		(1)
+#define PDP_GRPH4CTRL_GRPH4CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4BLEND
+*/
+#define PDP_GRPH4CTRL_GRPH4BLEND_MASK		(0x18000000)
+#define PDP_GRPH4CTRL_GRPH4BLEND_LSBMASK		(0x00000003)
+#define PDP_GRPH4CTRL_GRPH4BLEND_SHIFT		(27)
+#define PDP_GRPH4CTRL_GRPH4BLEND_LENGTH		(2)
+#define PDP_GRPH4CTRL_GRPH4BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4BLENDPOS
+*/
+#define PDP_GRPH4CTRL_GRPH4BLENDPOS_MASK		(0x07000000)
+#define PDP_GRPH4CTRL_GRPH4BLENDPOS_LSBMASK		(0x00000007)
+#define PDP_GRPH4CTRL_GRPH4BLENDPOS_SHIFT		(24)
+#define PDP_GRPH4CTRL_GRPH4BLENDPOS_LENGTH		(3)
+#define PDP_GRPH4CTRL_GRPH4BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4DITHEREN
+*/
+#define PDP_GRPH4CTRL_GRPH4DITHEREN_MASK		(0x00800000)
+#define PDP_GRPH4CTRL_GRPH4DITHEREN_LSBMASK		(0x00000001)
+#define PDP_GRPH4CTRL_GRPH4DITHEREN_SHIFT		(23)
+#define PDP_GRPH4CTRL_GRPH4DITHEREN_LENGTH		(1)
+#define PDP_GRPH4CTRL_GRPH4DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1CTRL_OFFSET		(0x0030)
+
+/* PDP, VID1CTRL, VID1STREN
+*/
+#define PDP_VID1CTRL_VID1STREN_MASK		(0x80000000)
+#define PDP_VID1CTRL_VID1STREN_LSBMASK		(0x00000001)
+#define PDP_VID1CTRL_VID1STREN_SHIFT		(31)
+#define PDP_VID1CTRL_VID1STREN_LENGTH		(1)
+#define PDP_VID1CTRL_VID1STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CTRL, VID1CKEYEN
+*/
+#define PDP_VID1CTRL_VID1CKEYEN_MASK		(0x40000000)
+#define PDP_VID1CTRL_VID1CKEYEN_LSBMASK		(0x00000001)
+#define PDP_VID1CTRL_VID1CKEYEN_SHIFT		(30)
+#define PDP_VID1CTRL_VID1CKEYEN_LENGTH		(1)
+#define PDP_VID1CTRL_VID1CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CTRL, VID1CKEYSRC
+*/
+#define PDP_VID1CTRL_VID1CKEYSRC_MASK		(0x20000000)
+#define PDP_VID1CTRL_VID1CKEYSRC_LSBMASK		(0x00000001)
+#define PDP_VID1CTRL_VID1CKEYSRC_SHIFT		(29)
+#define PDP_VID1CTRL_VID1CKEYSRC_LENGTH		(1)
+#define PDP_VID1CTRL_VID1CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CTRL, VID1BLEND
+*/
+#define PDP_VID1CTRL_VID1BLEND_MASK		(0x18000000)
+#define PDP_VID1CTRL_VID1BLEND_LSBMASK		(0x00000003)
+#define PDP_VID1CTRL_VID1BLEND_SHIFT		(27)
+#define PDP_VID1CTRL_VID1BLEND_LENGTH		(2)
+#define PDP_VID1CTRL_VID1BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CTRL, VID1BLENDPOS
+*/
+#define PDP_VID1CTRL_VID1BLENDPOS_MASK		(0x07000000)
+#define PDP_VID1CTRL_VID1BLENDPOS_LSBMASK		(0x00000007)
+#define PDP_VID1CTRL_VID1BLENDPOS_SHIFT		(24)
+#define PDP_VID1CTRL_VID1BLENDPOS_LENGTH		(3)
+#define PDP_VID1CTRL_VID1BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CTRL, VID1DITHEREN
+*/
+#define PDP_VID1CTRL_VID1DITHEREN_MASK		(0x00800000)
+#define PDP_VID1CTRL_VID1DITHEREN_LSBMASK		(0x00000001)
+#define PDP_VID1CTRL_VID1DITHEREN_SHIFT		(23)
+#define PDP_VID1CTRL_VID1DITHEREN_LENGTH		(1)
+#define PDP_VID1CTRL_VID1DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2CTRL_OFFSET		(0x0034)
+
+/* PDP, VID2CTRL, VID2STREN
+*/
+#define PDP_VID2CTRL_VID2STREN_MASK		(0x80000000)
+#define PDP_VID2CTRL_VID2STREN_LSBMASK		(0x00000001)
+#define PDP_VID2CTRL_VID2STREN_SHIFT		(31)
+#define PDP_VID2CTRL_VID2STREN_LENGTH		(1)
+#define PDP_VID2CTRL_VID2STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CTRL, VID2CKEYEN
+*/
+#define PDP_VID2CTRL_VID2CKEYEN_MASK		(0x40000000)
+#define PDP_VID2CTRL_VID2CKEYEN_LSBMASK		(0x00000001)
+#define PDP_VID2CTRL_VID2CKEYEN_SHIFT		(30)
+#define PDP_VID2CTRL_VID2CKEYEN_LENGTH		(1)
+#define PDP_VID2CTRL_VID2CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CTRL, VID2CKEYSRC
+*/
+#define PDP_VID2CTRL_VID2CKEYSRC_MASK		(0x20000000)
+#define PDP_VID2CTRL_VID2CKEYSRC_LSBMASK		(0x00000001)
+#define PDP_VID2CTRL_VID2CKEYSRC_SHIFT		(29)
+#define PDP_VID2CTRL_VID2CKEYSRC_LENGTH		(1)
+#define PDP_VID2CTRL_VID2CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CTRL, VID2BLEND
+*/
+#define PDP_VID2CTRL_VID2BLEND_MASK		(0x18000000)
+#define PDP_VID2CTRL_VID2BLEND_LSBMASK		(0x00000003)
+#define PDP_VID2CTRL_VID2BLEND_SHIFT		(27)
+#define PDP_VID2CTRL_VID2BLEND_LENGTH		(2)
+#define PDP_VID2CTRL_VID2BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CTRL, VID2BLENDPOS
+*/
+#define PDP_VID2CTRL_VID2BLENDPOS_MASK		(0x07000000)
+#define PDP_VID2CTRL_VID2BLENDPOS_LSBMASK		(0x00000007)
+#define PDP_VID2CTRL_VID2BLENDPOS_SHIFT		(24)
+#define PDP_VID2CTRL_VID2BLENDPOS_LENGTH		(3)
+#define PDP_VID2CTRL_VID2BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CTRL, VID2DITHEREN
+*/
+#define PDP_VID2CTRL_VID2DITHEREN_MASK		(0x00800000)
+#define PDP_VID2CTRL_VID2DITHEREN_LSBMASK		(0x00000001)
+#define PDP_VID2CTRL_VID2DITHEREN_SHIFT		(23)
+#define PDP_VID2CTRL_VID2DITHEREN_LENGTH		(1)
+#define PDP_VID2CTRL_VID2DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3CTRL_OFFSET		(0x0038)
+
+/* PDP, VID3CTRL, VID3STREN
+*/
+#define PDP_VID3CTRL_VID3STREN_MASK		(0x80000000)
+#define PDP_VID3CTRL_VID3STREN_LSBMASK		(0x00000001)
+#define PDP_VID3CTRL_VID3STREN_SHIFT		(31)
+#define PDP_VID3CTRL_VID3STREN_LENGTH		(1)
+#define PDP_VID3CTRL_VID3STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CTRL, VID3CKEYEN
+*/
+#define PDP_VID3CTRL_VID3CKEYEN_MASK		(0x40000000)
+#define PDP_VID3CTRL_VID3CKEYEN_LSBMASK		(0x00000001)
+#define PDP_VID3CTRL_VID3CKEYEN_SHIFT		(30)
+#define PDP_VID3CTRL_VID3CKEYEN_LENGTH		(1)
+#define PDP_VID3CTRL_VID3CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CTRL, VID3CKEYSRC
+*/
+#define PDP_VID3CTRL_VID3CKEYSRC_MASK		(0x20000000)
+#define PDP_VID3CTRL_VID3CKEYSRC_LSBMASK		(0x00000001)
+#define PDP_VID3CTRL_VID3CKEYSRC_SHIFT		(29)
+#define PDP_VID3CTRL_VID3CKEYSRC_LENGTH		(1)
+#define PDP_VID3CTRL_VID3CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CTRL, VID3BLEND
+*/
+#define PDP_VID3CTRL_VID3BLEND_MASK		(0x18000000)
+#define PDP_VID3CTRL_VID3BLEND_LSBMASK		(0x00000003)
+#define PDP_VID3CTRL_VID3BLEND_SHIFT		(27)
+#define PDP_VID3CTRL_VID3BLEND_LENGTH		(2)
+#define PDP_VID3CTRL_VID3BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CTRL, VID3BLENDPOS
+*/
+#define PDP_VID3CTRL_VID3BLENDPOS_MASK		(0x07000000)
+#define PDP_VID3CTRL_VID3BLENDPOS_LSBMASK		(0x00000007)
+#define PDP_VID3CTRL_VID3BLENDPOS_SHIFT		(24)
+#define PDP_VID3CTRL_VID3BLENDPOS_LENGTH		(3)
+#define PDP_VID3CTRL_VID3BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CTRL, VID3DITHEREN
+*/
+#define PDP_VID3CTRL_VID3DITHEREN_MASK		(0x00800000)
+#define PDP_VID3CTRL_VID3DITHEREN_LSBMASK		(0x00000001)
+#define PDP_VID3CTRL_VID3DITHEREN_SHIFT		(23)
+#define PDP_VID3CTRL_VID3DITHEREN_LENGTH		(1)
+#define PDP_VID3CTRL_VID3DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4CTRL_OFFSET		(0x003C)
+
+/* PDP, VID4CTRL, VID4STREN
+*/
+#define PDP_VID4CTRL_VID4STREN_MASK		(0x80000000)
+#define PDP_VID4CTRL_VID4STREN_LSBMASK		(0x00000001)
+#define PDP_VID4CTRL_VID4STREN_SHIFT		(31)
+#define PDP_VID4CTRL_VID4STREN_LENGTH		(1)
+#define PDP_VID4CTRL_VID4STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CTRL, VID4CKEYEN
+*/
+#define PDP_VID4CTRL_VID4CKEYEN_MASK		(0x40000000)
+#define PDP_VID4CTRL_VID4CKEYEN_LSBMASK		(0x00000001)
+#define PDP_VID4CTRL_VID4CKEYEN_SHIFT		(30)
+#define PDP_VID4CTRL_VID4CKEYEN_LENGTH		(1)
+#define PDP_VID4CTRL_VID4CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CTRL, VID4CKEYSRC
+*/
+#define PDP_VID4CTRL_VID4CKEYSRC_MASK		(0x20000000)
+#define PDP_VID4CTRL_VID4CKEYSRC_LSBMASK		(0x00000001)
+#define PDP_VID4CTRL_VID4CKEYSRC_SHIFT		(29)
+#define PDP_VID4CTRL_VID4CKEYSRC_LENGTH		(1)
+#define PDP_VID4CTRL_VID4CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CTRL, VID4BLEND
+*/
+#define PDP_VID4CTRL_VID4BLEND_MASK		(0x18000000)
+#define PDP_VID4CTRL_VID4BLEND_LSBMASK		(0x00000003)
+#define PDP_VID4CTRL_VID4BLEND_SHIFT		(27)
+#define PDP_VID4CTRL_VID4BLEND_LENGTH		(2)
+#define PDP_VID4CTRL_VID4BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CTRL, VID4BLENDPOS
+*/
+#define PDP_VID4CTRL_VID4BLENDPOS_MASK		(0x07000000)
+#define PDP_VID4CTRL_VID4BLENDPOS_LSBMASK		(0x00000007)
+#define PDP_VID4CTRL_VID4BLENDPOS_SHIFT		(24)
+#define PDP_VID4CTRL_VID4BLENDPOS_LENGTH		(3)
+#define PDP_VID4CTRL_VID4BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CTRL, VID4DITHEREN
+*/
+#define PDP_VID4CTRL_VID4DITHEREN_MASK		(0x00800000)
+#define PDP_VID4CTRL_VID4DITHEREN_LSBMASK		(0x00000001)
+#define PDP_VID4CTRL_VID4DITHEREN_SHIFT		(23)
+#define PDP_VID4CTRL_VID4DITHEREN_LENGTH		(1)
+#define PDP_VID4CTRL_VID4DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1UCTRL_OFFSET		(0x0050)
+
+/* PDP, VID1UCTRL, VID1UVHALFSTR
+*/
+#define PDP_VID1UCTRL_VID1UVHALFSTR_MASK		(0xC0000000)
+#define PDP_VID1UCTRL_VID1UVHALFSTR_LSBMASK		(0x00000003)
+#define PDP_VID1UCTRL_VID1UVHALFSTR_SHIFT		(30)
+#define PDP_VID1UCTRL_VID1UVHALFSTR_LENGTH		(2)
+#define PDP_VID1UCTRL_VID1UVHALFSTR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2UCTRL_OFFSET		(0x0054)
+
+/* PDP, VID2UCTRL, VID2UVHALFSTR
+*/
+#define PDP_VID2UCTRL_VID2UVHALFSTR_MASK		(0xC0000000)
+#define PDP_VID2UCTRL_VID2UVHALFSTR_LSBMASK		(0x00000003)
+#define PDP_VID2UCTRL_VID2UVHALFSTR_SHIFT		(30)
+#define PDP_VID2UCTRL_VID2UVHALFSTR_LENGTH		(2)
+#define PDP_VID2UCTRL_VID2UVHALFSTR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3UCTRL_OFFSET		(0x0058)
+
+/* PDP, VID3UCTRL, VID3UVHALFSTR
+*/
+#define PDP_VID3UCTRL_VID3UVHALFSTR_MASK		(0xC0000000)
+#define PDP_VID3UCTRL_VID3UVHALFSTR_LSBMASK		(0x00000003)
+#define PDP_VID3UCTRL_VID3UVHALFSTR_SHIFT		(30)
+#define PDP_VID3UCTRL_VID3UVHALFSTR_LENGTH		(2)
+#define PDP_VID3UCTRL_VID3UVHALFSTR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4UCTRL_OFFSET		(0x005C)
+
+/* PDP, VID4UCTRL, VID4UVHALFSTR
+*/
+#define PDP_VID4UCTRL_VID4UVHALFSTR_MASK		(0xC0000000)
+#define PDP_VID4UCTRL_VID4UVHALFSTR_LSBMASK		(0x00000003)
+#define PDP_VID4UCTRL_VID4UVHALFSTR_SHIFT		(30)
+#define PDP_VID4UCTRL_VID4UVHALFSTR_LENGTH		(2)
+#define PDP_VID4UCTRL_VID4UVHALFSTR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1STRIDE_OFFSET		(0x0060)
+
+/* PDP, GRPH1STRIDE, GRPH1STRIDE
+*/
+#define PDP_GRPH1STRIDE_GRPH1STRIDE_MASK		(0xFFC00000)
+#define PDP_GRPH1STRIDE_GRPH1STRIDE_LSBMASK		(0x000003FF)
+#define PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT		(22)
+#define PDP_GRPH1STRIDE_GRPH1STRIDE_LENGTH		(10)
+#define PDP_GRPH1STRIDE_GRPH1STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2STRIDE_OFFSET		(0x0064)
+
+/* PDP, GRPH2STRIDE, GRPH2STRIDE
+*/
+#define PDP_GRPH2STRIDE_GRPH2STRIDE_MASK		(0xFFC00000)
+#define PDP_GRPH2STRIDE_GRPH2STRIDE_LSBMASK		(0x000003FF)
+#define PDP_GRPH2STRIDE_GRPH2STRIDE_SHIFT		(22)
+#define PDP_GRPH2STRIDE_GRPH2STRIDE_LENGTH		(10)
+#define PDP_GRPH2STRIDE_GRPH2STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3STRIDE_OFFSET		(0x0068)
+
+/* PDP, GRPH3STRIDE, GRPH3STRIDE
+*/
+#define PDP_GRPH3STRIDE_GRPH3STRIDE_MASK		(0xFFC00000)
+#define PDP_GRPH3STRIDE_GRPH3STRIDE_LSBMASK		(0x000003FF)
+#define PDP_GRPH3STRIDE_GRPH3STRIDE_SHIFT		(22)
+#define PDP_GRPH3STRIDE_GRPH3STRIDE_LENGTH		(10)
+#define PDP_GRPH3STRIDE_GRPH3STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4STRIDE_OFFSET		(0x006C)
+
+/* PDP, GRPH4STRIDE, GRPH4STRIDE
+*/
+#define PDP_GRPH4STRIDE_GRPH4STRIDE_MASK		(0xFFC00000)
+#define PDP_GRPH4STRIDE_GRPH4STRIDE_LSBMASK		(0x000003FF)
+#define PDP_GRPH4STRIDE_GRPH4STRIDE_SHIFT		(22)
+#define PDP_GRPH4STRIDE_GRPH4STRIDE_LENGTH		(10)
+#define PDP_GRPH4STRIDE_GRPH4STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1STRIDE_OFFSET		(0x0070)
+
+/* PDP, VID1STRIDE, VID1STRIDE
+*/
+#define PDP_VID1STRIDE_VID1STRIDE_MASK		(0xFFC00000)
+#define PDP_VID1STRIDE_VID1STRIDE_LSBMASK		(0x000003FF)
+#define PDP_VID1STRIDE_VID1STRIDE_SHIFT		(22)
+#define PDP_VID1STRIDE_VID1STRIDE_LENGTH		(10)
+#define PDP_VID1STRIDE_VID1STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2STRIDE_OFFSET		(0x0074)
+
+/* PDP, VID2STRIDE, VID2STRIDE
+*/
+#define PDP_VID2STRIDE_VID2STRIDE_MASK		(0xFFC00000)
+#define PDP_VID2STRIDE_VID2STRIDE_LSBMASK		(0x000003FF)
+#define PDP_VID2STRIDE_VID2STRIDE_SHIFT		(22)
+#define PDP_VID2STRIDE_VID2STRIDE_LENGTH		(10)
+#define PDP_VID2STRIDE_VID2STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3STRIDE_OFFSET		(0x0078)
+
+/* PDP, VID3STRIDE, VID3STRIDE
+*/
+#define PDP_VID3STRIDE_VID3STRIDE_MASK		(0xFFC00000)
+#define PDP_VID3STRIDE_VID3STRIDE_LSBMASK		(0x000003FF)
+#define PDP_VID3STRIDE_VID3STRIDE_SHIFT		(22)
+#define PDP_VID3STRIDE_VID3STRIDE_LENGTH		(10)
+#define PDP_VID3STRIDE_VID3STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4STRIDE_OFFSET		(0x007C)
+
+/* PDP, VID4STRIDE, VID4STRIDE
+*/
+#define PDP_VID4STRIDE_VID4STRIDE_MASK		(0xFFC00000)
+#define PDP_VID4STRIDE_VID4STRIDE_LSBMASK		(0x000003FF)
+#define PDP_VID4STRIDE_VID4STRIDE_SHIFT		(22)
+#define PDP_VID4STRIDE_VID4STRIDE_LENGTH		(10)
+#define PDP_VID4STRIDE_VID4STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1SIZE_OFFSET		(0x0080)
+
+/* PDP, GRPH1SIZE, GRPH1WIDTH
+*/
+#define PDP_GRPH1SIZE_GRPH1WIDTH_MASK		(0x0FFF0000)
+#define PDP_GRPH1SIZE_GRPH1WIDTH_LSBMASK		(0x00000FFF)
+#define PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT		(16)
+#define PDP_GRPH1SIZE_GRPH1WIDTH_LENGTH		(12)
+#define PDP_GRPH1SIZE_GRPH1WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1SIZE, GRPH1HEIGHT
+*/
+#define PDP_GRPH1SIZE_GRPH1HEIGHT_MASK		(0x00000FFF)
+#define PDP_GRPH1SIZE_GRPH1HEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT		(0)
+#define PDP_GRPH1SIZE_GRPH1HEIGHT_LENGTH		(12)
+#define PDP_GRPH1SIZE_GRPH1HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2SIZE_OFFSET		(0x0084)
+
+/* PDP, GRPH2SIZE, GRPH2WIDTH
+*/
+#define PDP_GRPH2SIZE_GRPH2WIDTH_MASK		(0x0FFF0000)
+#define PDP_GRPH2SIZE_GRPH2WIDTH_LSBMASK		(0x00000FFF)
+#define PDP_GRPH2SIZE_GRPH2WIDTH_SHIFT		(16)
+#define PDP_GRPH2SIZE_GRPH2WIDTH_LENGTH		(12)
+#define PDP_GRPH2SIZE_GRPH2WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2SIZE, GRPH2HEIGHT
+*/
+#define PDP_GRPH2SIZE_GRPH2HEIGHT_MASK		(0x00000FFF)
+#define PDP_GRPH2SIZE_GRPH2HEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_GRPH2SIZE_GRPH2HEIGHT_SHIFT		(0)
+#define PDP_GRPH2SIZE_GRPH2HEIGHT_LENGTH		(12)
+#define PDP_GRPH2SIZE_GRPH2HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3SIZE_OFFSET		(0x0088)
+
+/* PDP, GRPH3SIZE, GRPH3WIDTH
+*/
+#define PDP_GRPH3SIZE_GRPH3WIDTH_MASK		(0x0FFF0000)
+#define PDP_GRPH3SIZE_GRPH3WIDTH_LSBMASK		(0x00000FFF)
+#define PDP_GRPH3SIZE_GRPH3WIDTH_SHIFT		(16)
+#define PDP_GRPH3SIZE_GRPH3WIDTH_LENGTH		(12)
+#define PDP_GRPH3SIZE_GRPH3WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3SIZE, GRPH3HEIGHT
+*/
+#define PDP_GRPH3SIZE_GRPH3HEIGHT_MASK		(0x00000FFF)
+#define PDP_GRPH3SIZE_GRPH3HEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_GRPH3SIZE_GRPH3HEIGHT_SHIFT		(0)
+#define PDP_GRPH3SIZE_GRPH3HEIGHT_LENGTH		(12)
+#define PDP_GRPH3SIZE_GRPH3HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4SIZE_OFFSET		(0x008C)
+
+/* PDP, GRPH4SIZE, GRPH4WIDTH
+*/
+#define PDP_GRPH4SIZE_GRPH4WIDTH_MASK		(0x0FFF0000)
+#define PDP_GRPH4SIZE_GRPH4WIDTH_LSBMASK		(0x00000FFF)
+#define PDP_GRPH4SIZE_GRPH4WIDTH_SHIFT		(16)
+#define PDP_GRPH4SIZE_GRPH4WIDTH_LENGTH		(12)
+#define PDP_GRPH4SIZE_GRPH4WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4SIZE, GRPH4HEIGHT
+*/
+#define PDP_GRPH4SIZE_GRPH4HEIGHT_MASK		(0x00000FFF)
+#define PDP_GRPH4SIZE_GRPH4HEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_GRPH4SIZE_GRPH4HEIGHT_SHIFT		(0)
+#define PDP_GRPH4SIZE_GRPH4HEIGHT_LENGTH		(12)
+#define PDP_GRPH4SIZE_GRPH4HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1SIZE_OFFSET		(0x0090)
+
+/* PDP, VID1SIZE, VID1WIDTH
+*/
+#define PDP_VID1SIZE_VID1WIDTH_MASK		(0x0FFF0000)
+#define PDP_VID1SIZE_VID1WIDTH_LSBMASK		(0x00000FFF)
+#define PDP_VID1SIZE_VID1WIDTH_SHIFT		(16)
+#define PDP_VID1SIZE_VID1WIDTH_LENGTH		(12)
+#define PDP_VID1SIZE_VID1WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SIZE, VID1HEIGHT
+*/
+#define PDP_VID1SIZE_VID1HEIGHT_MASK		(0x00000FFF)
+#define PDP_VID1SIZE_VID1HEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_VID1SIZE_VID1HEIGHT_SHIFT		(0)
+#define PDP_VID1SIZE_VID1HEIGHT_LENGTH		(12)
+#define PDP_VID1SIZE_VID1HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2SIZE_OFFSET		(0x0094)
+
+/* PDP, VID2SIZE, VID2WIDTH
+*/
+#define PDP_VID2SIZE_VID2WIDTH_MASK		(0x0FFF0000)
+#define PDP_VID2SIZE_VID2WIDTH_LSBMASK		(0x00000FFF)
+#define PDP_VID2SIZE_VID2WIDTH_SHIFT		(16)
+#define PDP_VID2SIZE_VID2WIDTH_LENGTH		(12)
+#define PDP_VID2SIZE_VID2WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SIZE, VID2HEIGHT
+*/
+#define PDP_VID2SIZE_VID2HEIGHT_MASK		(0x00000FFF)
+#define PDP_VID2SIZE_VID2HEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_VID2SIZE_VID2HEIGHT_SHIFT		(0)
+#define PDP_VID2SIZE_VID2HEIGHT_LENGTH		(12)
+#define PDP_VID2SIZE_VID2HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3SIZE_OFFSET		(0x0098)
+
+/* PDP, VID3SIZE, VID3WIDTH
+*/
+#define PDP_VID3SIZE_VID3WIDTH_MASK		(0x0FFF0000)
+#define PDP_VID3SIZE_VID3WIDTH_LSBMASK		(0x00000FFF)
+#define PDP_VID3SIZE_VID3WIDTH_SHIFT		(16)
+#define PDP_VID3SIZE_VID3WIDTH_LENGTH		(12)
+#define PDP_VID3SIZE_VID3WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SIZE, VID3HEIGHT
+*/
+#define PDP_VID3SIZE_VID3HEIGHT_MASK		(0x00000FFF)
+#define PDP_VID3SIZE_VID3HEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_VID3SIZE_VID3HEIGHT_SHIFT		(0)
+#define PDP_VID3SIZE_VID3HEIGHT_LENGTH		(12)
+#define PDP_VID3SIZE_VID3HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4SIZE_OFFSET		(0x009C)
+
+/* PDP, VID4SIZE, VID4WIDTH
+*/
+#define PDP_VID4SIZE_VID4WIDTH_MASK		(0x0FFF0000)
+#define PDP_VID4SIZE_VID4WIDTH_LSBMASK		(0x00000FFF)
+#define PDP_VID4SIZE_VID4WIDTH_SHIFT		(16)
+#define PDP_VID4SIZE_VID4WIDTH_LENGTH		(12)
+#define PDP_VID4SIZE_VID4WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SIZE, VID4HEIGHT
+*/
+#define PDP_VID4SIZE_VID4HEIGHT_MASK		(0x00000FFF)
+#define PDP_VID4SIZE_VID4HEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_VID4SIZE_VID4HEIGHT_SHIFT		(0)
+#define PDP_VID4SIZE_VID4HEIGHT_LENGTH		(12)
+#define PDP_VID4SIZE_VID4HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1POSN_OFFSET		(0x00A0)
+
+/* PDP, GRPH1POSN, GRPH1XSTART
+*/
+#define PDP_GRPH1POSN_GRPH1XSTART_MASK		(0x0FFF0000)
+#define PDP_GRPH1POSN_GRPH1XSTART_LSBMASK		(0x00000FFF)
+#define PDP_GRPH1POSN_GRPH1XSTART_SHIFT		(16)
+#define PDP_GRPH1POSN_GRPH1XSTART_LENGTH		(12)
+#define PDP_GRPH1POSN_GRPH1XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1POSN, GRPH1YSTART
+*/
+#define PDP_GRPH1POSN_GRPH1YSTART_MASK		(0x00000FFF)
+#define PDP_GRPH1POSN_GRPH1YSTART_LSBMASK		(0x00000FFF)
+#define PDP_GRPH1POSN_GRPH1YSTART_SHIFT		(0)
+#define PDP_GRPH1POSN_GRPH1YSTART_LENGTH		(12)
+#define PDP_GRPH1POSN_GRPH1YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2POSN_OFFSET		(0x00A4)
+
+/* PDP, GRPH2POSN, GRPH2XSTART
+*/
+#define PDP_GRPH2POSN_GRPH2XSTART_MASK		(0x0FFF0000)
+#define PDP_GRPH2POSN_GRPH2XSTART_LSBMASK		(0x00000FFF)
+#define PDP_GRPH2POSN_GRPH2XSTART_SHIFT		(16)
+#define PDP_GRPH2POSN_GRPH2XSTART_LENGTH		(12)
+#define PDP_GRPH2POSN_GRPH2XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2POSN, GRPH2YSTART
+*/
+#define PDP_GRPH2POSN_GRPH2YSTART_MASK		(0x00000FFF)
+#define PDP_GRPH2POSN_GRPH2YSTART_LSBMASK		(0x00000FFF)
+#define PDP_GRPH2POSN_GRPH2YSTART_SHIFT		(0)
+#define PDP_GRPH2POSN_GRPH2YSTART_LENGTH		(12)
+#define PDP_GRPH2POSN_GRPH2YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3POSN_OFFSET		(0x00A8)
+
+/* PDP, GRPH3POSN, GRPH3XSTART
+*/
+#define PDP_GRPH3POSN_GRPH3XSTART_MASK		(0x0FFF0000)
+#define PDP_GRPH3POSN_GRPH3XSTART_LSBMASK		(0x00000FFF)
+#define PDP_GRPH3POSN_GRPH3XSTART_SHIFT		(16)
+#define PDP_GRPH3POSN_GRPH3XSTART_LENGTH		(12)
+#define PDP_GRPH3POSN_GRPH3XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3POSN, GRPH3YSTART
+*/
+#define PDP_GRPH3POSN_GRPH3YSTART_MASK		(0x00000FFF)
+#define PDP_GRPH3POSN_GRPH3YSTART_LSBMASK		(0x00000FFF)
+#define PDP_GRPH3POSN_GRPH3YSTART_SHIFT		(0)
+#define PDP_GRPH3POSN_GRPH3YSTART_LENGTH		(12)
+#define PDP_GRPH3POSN_GRPH3YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4POSN_OFFSET		(0x00AC)
+
+/* PDP, GRPH4POSN, GRPH4XSTART
+*/
+#define PDP_GRPH4POSN_GRPH4XSTART_MASK		(0x0FFF0000)
+#define PDP_GRPH4POSN_GRPH4XSTART_LSBMASK		(0x00000FFF)
+#define PDP_GRPH4POSN_GRPH4XSTART_SHIFT		(16)
+#define PDP_GRPH4POSN_GRPH4XSTART_LENGTH		(12)
+#define PDP_GRPH4POSN_GRPH4XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4POSN, GRPH4YSTART
+*/
+#define PDP_GRPH4POSN_GRPH4YSTART_MASK		(0x00000FFF)
+#define PDP_GRPH4POSN_GRPH4YSTART_LSBMASK		(0x00000FFF)
+#define PDP_GRPH4POSN_GRPH4YSTART_SHIFT		(0)
+#define PDP_GRPH4POSN_GRPH4YSTART_LENGTH		(12)
+#define PDP_GRPH4POSN_GRPH4YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1POSN_OFFSET		(0x00B0)
+
+/* PDP, VID1POSN, VID1XSTART
+*/
+#define PDP_VID1POSN_VID1XSTART_MASK		(0x0FFF0000)
+#define PDP_VID1POSN_VID1XSTART_LSBMASK		(0x00000FFF)
+#define PDP_VID1POSN_VID1XSTART_SHIFT		(16)
+#define PDP_VID1POSN_VID1XSTART_LENGTH		(12)
+#define PDP_VID1POSN_VID1XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1POSN, VID1YSTART
+*/
+#define PDP_VID1POSN_VID1YSTART_MASK		(0x00000FFF)
+#define PDP_VID1POSN_VID1YSTART_LSBMASK		(0x00000FFF)
+#define PDP_VID1POSN_VID1YSTART_SHIFT		(0)
+#define PDP_VID1POSN_VID1YSTART_LENGTH		(12)
+#define PDP_VID1POSN_VID1YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2POSN_OFFSET		(0x00B4)
+
+/* PDP, VID2POSN, VID2XSTART
+*/
+#define PDP_VID2POSN_VID2XSTART_MASK		(0x0FFF0000)
+#define PDP_VID2POSN_VID2XSTART_LSBMASK		(0x00000FFF)
+#define PDP_VID2POSN_VID2XSTART_SHIFT		(16)
+#define PDP_VID2POSN_VID2XSTART_LENGTH		(12)
+#define PDP_VID2POSN_VID2XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2POSN, VID2YSTART
+*/
+#define PDP_VID2POSN_VID2YSTART_MASK		(0x00000FFF)
+#define PDP_VID2POSN_VID2YSTART_LSBMASK		(0x00000FFF)
+#define PDP_VID2POSN_VID2YSTART_SHIFT		(0)
+#define PDP_VID2POSN_VID2YSTART_LENGTH		(12)
+#define PDP_VID2POSN_VID2YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3POSN_OFFSET		(0x00B8)
+
+/* PDP, VID3POSN, VID3XSTART
+*/
+#define PDP_VID3POSN_VID3XSTART_MASK		(0x0FFF0000)
+#define PDP_VID3POSN_VID3XSTART_LSBMASK		(0x00000FFF)
+#define PDP_VID3POSN_VID3XSTART_SHIFT		(16)
+#define PDP_VID3POSN_VID3XSTART_LENGTH		(12)
+#define PDP_VID3POSN_VID3XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3POSN, VID3YSTART
+*/
+#define PDP_VID3POSN_VID3YSTART_MASK		(0x00000FFF)
+#define PDP_VID3POSN_VID3YSTART_LSBMASK		(0x00000FFF)
+#define PDP_VID3POSN_VID3YSTART_SHIFT		(0)
+#define PDP_VID3POSN_VID3YSTART_LENGTH		(12)
+#define PDP_VID3POSN_VID3YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4POSN_OFFSET		(0x00BC)
+
+/* PDP, VID4POSN, VID4XSTART
+*/
+#define PDP_VID4POSN_VID4XSTART_MASK		(0x0FFF0000)
+#define PDP_VID4POSN_VID4XSTART_LSBMASK		(0x00000FFF)
+#define PDP_VID4POSN_VID4XSTART_SHIFT		(16)
+#define PDP_VID4POSN_VID4XSTART_LENGTH		(12)
+#define PDP_VID4POSN_VID4XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4POSN, VID4YSTART
+*/
+#define PDP_VID4POSN_VID4YSTART_MASK		(0x00000FFF)
+#define PDP_VID4POSN_VID4YSTART_LSBMASK		(0x00000FFF)
+#define PDP_VID4POSN_VID4YSTART_SHIFT		(0)
+#define PDP_VID4POSN_VID4YSTART_LENGTH		(12)
+#define PDP_VID4POSN_VID4YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1GALPHA_OFFSET		(0x00C0)
+
+/* PDP, GRPH1GALPHA, GRPH1GALPHA
+*/
+#define PDP_GRPH1GALPHA_GRPH1GALPHA_MASK		(0x000003FF)
+#define PDP_GRPH1GALPHA_GRPH1GALPHA_LSBMASK		(0x000003FF)
+#define PDP_GRPH1GALPHA_GRPH1GALPHA_SHIFT		(0)
+#define PDP_GRPH1GALPHA_GRPH1GALPHA_LENGTH		(10)
+#define PDP_GRPH1GALPHA_GRPH1GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2GALPHA_OFFSET		(0x00C4)
+
+/* PDP, GRPH2GALPHA, GRPH2GALPHA
+*/
+#define PDP_GRPH2GALPHA_GRPH2GALPHA_MASK		(0x000003FF)
+#define PDP_GRPH2GALPHA_GRPH2GALPHA_LSBMASK		(0x000003FF)
+#define PDP_GRPH2GALPHA_GRPH2GALPHA_SHIFT		(0)
+#define PDP_GRPH2GALPHA_GRPH2GALPHA_LENGTH		(10)
+#define PDP_GRPH2GALPHA_GRPH2GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3GALPHA_OFFSET		(0x00C8)
+
+/* PDP, GRPH3GALPHA, GRPH3GALPHA
+*/
+#define PDP_GRPH3GALPHA_GRPH3GALPHA_MASK		(0x000003FF)
+#define PDP_GRPH3GALPHA_GRPH3GALPHA_LSBMASK		(0x000003FF)
+#define PDP_GRPH3GALPHA_GRPH3GALPHA_SHIFT		(0)
+#define PDP_GRPH3GALPHA_GRPH3GALPHA_LENGTH		(10)
+#define PDP_GRPH3GALPHA_GRPH3GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4GALPHA_OFFSET		(0x00CC)
+
+/* PDP, GRPH4GALPHA, GRPH4GALPHA
+*/
+#define PDP_GRPH4GALPHA_GRPH4GALPHA_MASK		(0x000003FF)
+#define PDP_GRPH4GALPHA_GRPH4GALPHA_LSBMASK		(0x000003FF)
+#define PDP_GRPH4GALPHA_GRPH4GALPHA_SHIFT		(0)
+#define PDP_GRPH4GALPHA_GRPH4GALPHA_LENGTH		(10)
+#define PDP_GRPH4GALPHA_GRPH4GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1GALPHA_OFFSET		(0x00D0)
+
+/* PDP, VID1GALPHA, VID1GALPHA
+*/
+#define PDP_VID1GALPHA_VID1GALPHA_MASK		(0x000003FF)
+#define PDP_VID1GALPHA_VID1GALPHA_LSBMASK		(0x000003FF)
+#define PDP_VID1GALPHA_VID1GALPHA_SHIFT		(0)
+#define PDP_VID1GALPHA_VID1GALPHA_LENGTH		(10)
+#define PDP_VID1GALPHA_VID1GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2GALPHA_OFFSET		(0x00D4)
+
+/* PDP, VID2GALPHA, VID2GALPHA
+*/
+#define PDP_VID2GALPHA_VID2GALPHA_MASK		(0x000003FF)
+#define PDP_VID2GALPHA_VID2GALPHA_LSBMASK		(0x000003FF)
+#define PDP_VID2GALPHA_VID2GALPHA_SHIFT		(0)
+#define PDP_VID2GALPHA_VID2GALPHA_LENGTH		(10)
+#define PDP_VID2GALPHA_VID2GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3GALPHA_OFFSET		(0x00D8)
+
+/* PDP, VID3GALPHA, VID3GALPHA
+*/
+#define PDP_VID3GALPHA_VID3GALPHA_MASK		(0x000003FF)
+#define PDP_VID3GALPHA_VID3GALPHA_LSBMASK		(0x000003FF)
+#define PDP_VID3GALPHA_VID3GALPHA_SHIFT		(0)
+#define PDP_VID3GALPHA_VID3GALPHA_LENGTH		(10)
+#define PDP_VID3GALPHA_VID3GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4GALPHA_OFFSET		(0x00DC)
+
+/* PDP, VID4GALPHA, VID4GALPHA
+*/
+#define PDP_VID4GALPHA_VID4GALPHA_MASK		(0x000003FF)
+#define PDP_VID4GALPHA_VID4GALPHA_LSBMASK		(0x000003FF)
+#define PDP_VID4GALPHA_VID4GALPHA_SHIFT		(0)
+#define PDP_VID4GALPHA_VID4GALPHA_LENGTH		(10)
+#define PDP_VID4GALPHA_VID4GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1CKEY_R_OFFSET		(0x00E0)
+
+/* PDP, GRPH1CKEY_R, GRPH1CKEY_R
+*/
+#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_MASK		(0x000003FF)
+#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_LSBMASK		(0x000003FF)
+#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_SHIFT		(0)
+#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_LENGTH		(10)
+#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1CKEY_GB_OFFSET		(0x00E4)
+
+/* PDP, GRPH1CKEY_GB, GRPH1CKEY_G
+*/
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_MASK		(0x03FF0000)
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LSBMASK		(0x000003FF)
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SHIFT		(16)
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LENGTH		(10)
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CKEY_GB, GRPH1CKEY_B
+*/
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_MASK		(0x000003FF)
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LSBMASK		(0x000003FF)
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SHIFT		(0)
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LENGTH		(10)
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2CKEY_R_OFFSET		(0x00E8)
+
+/* PDP, GRPH2CKEY_R, GRPH2CKEY_R
+*/
+#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_MASK		(0x000003FF)
+#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_LSBMASK		(0x000003FF)
+#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_SHIFT		(0)
+#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_LENGTH		(10)
+#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2CKEY_GB_OFFSET		(0x00EC)
+
+/* PDP, GRPH2CKEY_GB, GRPH2CKEY_G
+*/
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_MASK		(0x03FF0000)
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LSBMASK		(0x000003FF)
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SHIFT		(16)
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LENGTH		(10)
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CKEY_GB, GRPH2CKEY_B
+*/
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_MASK		(0x000003FF)
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LSBMASK		(0x000003FF)
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SHIFT		(0)
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LENGTH		(10)
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3CKEY_R_OFFSET		(0x00F0)
+
+/* PDP, GRPH3CKEY_R, GRPH3CKEY_R
+*/
+#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_MASK		(0x000003FF)
+#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_LSBMASK		(0x000003FF)
+#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_SHIFT		(0)
+#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_LENGTH		(10)
+#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3CKEY_GB_OFFSET		(0x00F4)
+
+/* PDP, GRPH3CKEY_GB, GRPH3CKEY_G
+*/
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_MASK		(0x03FF0000)
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LSBMASK		(0x000003FF)
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SHIFT		(16)
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LENGTH		(10)
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CKEY_GB, GRPH3CKEY_B
+*/
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_MASK		(0x000003FF)
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LSBMASK		(0x000003FF)
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SHIFT		(0)
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LENGTH		(10)
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4CKEY_R_OFFSET		(0x00F8)
+
+/* PDP, GRPH4CKEY_R, GRPH4CKEY_R
+*/
+#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_MASK		(0x000003FF)
+#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_LSBMASK		(0x000003FF)
+#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_SHIFT		(0)
+#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_LENGTH		(10)
+#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4CKEY_GB_OFFSET		(0x00FC)
+
+/* PDP, GRPH4CKEY_GB, GRPH4CKEY_G
+*/
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_MASK		(0x03FF0000)
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LSBMASK		(0x000003FF)
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SHIFT		(16)
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LENGTH		(10)
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CKEY_GB, GRPH4CKEY_B
+*/
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_MASK		(0x000003FF)
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LSBMASK		(0x000003FF)
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SHIFT		(0)
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LENGTH		(10)
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1CKEY_R_OFFSET		(0x0100)
+
+/* PDP, VID1CKEY_R, VID1CKEY_R
+*/
+#define PDP_VID1CKEY_R_VID1CKEY_R_MASK		(0x000003FF)
+#define PDP_VID1CKEY_R_VID1CKEY_R_LSBMASK		(0x000003FF)
+#define PDP_VID1CKEY_R_VID1CKEY_R_SHIFT		(0)
+#define PDP_VID1CKEY_R_VID1CKEY_R_LENGTH		(10)
+#define PDP_VID1CKEY_R_VID1CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1CKEY_GB_OFFSET		(0x0104)
+
+/* PDP, VID1CKEY_GB, VID1CKEY_G
+*/
+#define PDP_VID1CKEY_GB_VID1CKEY_G_MASK		(0x03FF0000)
+#define PDP_VID1CKEY_GB_VID1CKEY_G_LSBMASK		(0x000003FF)
+#define PDP_VID1CKEY_GB_VID1CKEY_G_SHIFT		(16)
+#define PDP_VID1CKEY_GB_VID1CKEY_G_LENGTH		(10)
+#define PDP_VID1CKEY_GB_VID1CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CKEY_GB, VID1CKEY_B
+*/
+#define PDP_VID1CKEY_GB_VID1CKEY_B_MASK		(0x000003FF)
+#define PDP_VID1CKEY_GB_VID1CKEY_B_LSBMASK		(0x000003FF)
+#define PDP_VID1CKEY_GB_VID1CKEY_B_SHIFT		(0)
+#define PDP_VID1CKEY_GB_VID1CKEY_B_LENGTH		(10)
+#define PDP_VID1CKEY_GB_VID1CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2CKEY_R_OFFSET		(0x0108)
+
+/* PDP, VID2CKEY_R, VID2CKEY_R
+*/
+#define PDP_VID2CKEY_R_VID2CKEY_R_MASK		(0x000003FF)
+#define PDP_VID2CKEY_R_VID2CKEY_R_LSBMASK		(0x000003FF)
+#define PDP_VID2CKEY_R_VID2CKEY_R_SHIFT		(0)
+#define PDP_VID2CKEY_R_VID2CKEY_R_LENGTH		(10)
+#define PDP_VID2CKEY_R_VID2CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2CKEY_GB_OFFSET		(0x010C)
+
+/* PDP, VID2CKEY_GB, VID2CKEY_G
+*/
+#define PDP_VID2CKEY_GB_VID2CKEY_G_MASK		(0x03FF0000)
+#define PDP_VID2CKEY_GB_VID2CKEY_G_LSBMASK		(0x000003FF)
+#define PDP_VID2CKEY_GB_VID2CKEY_G_SHIFT		(16)
+#define PDP_VID2CKEY_GB_VID2CKEY_G_LENGTH		(10)
+#define PDP_VID2CKEY_GB_VID2CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CKEY_GB, VID2CKEY_B
+*/
+#define PDP_VID2CKEY_GB_VID2CKEY_B_MASK		(0x000003FF)
+#define PDP_VID2CKEY_GB_VID2CKEY_B_LSBMASK		(0x000003FF)
+#define PDP_VID2CKEY_GB_VID2CKEY_B_SHIFT		(0)
+#define PDP_VID2CKEY_GB_VID2CKEY_B_LENGTH		(10)
+#define PDP_VID2CKEY_GB_VID2CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3CKEY_R_OFFSET		(0x0110)
+
+/* PDP, VID3CKEY_R, VID3CKEY_R
+*/
+#define PDP_VID3CKEY_R_VID3CKEY_R_MASK		(0x000003FF)
+#define PDP_VID3CKEY_R_VID3CKEY_R_LSBMASK		(0x000003FF)
+#define PDP_VID3CKEY_R_VID3CKEY_R_SHIFT		(0)
+#define PDP_VID3CKEY_R_VID3CKEY_R_LENGTH		(10)
+#define PDP_VID3CKEY_R_VID3CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3CKEY_GB_OFFSET		(0x0114)
+
+/* PDP, VID3CKEY_GB, VID3CKEY_G
+*/
+#define PDP_VID3CKEY_GB_VID3CKEY_G_MASK		(0x03FF0000)
+#define PDP_VID3CKEY_GB_VID3CKEY_G_LSBMASK		(0x000003FF)
+#define PDP_VID3CKEY_GB_VID3CKEY_G_SHIFT		(16)
+#define PDP_VID3CKEY_GB_VID3CKEY_G_LENGTH		(10)
+#define PDP_VID3CKEY_GB_VID3CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CKEY_GB, VID3CKEY_B
+*/
+#define PDP_VID3CKEY_GB_VID3CKEY_B_MASK		(0x000003FF)
+#define PDP_VID3CKEY_GB_VID3CKEY_B_LSBMASK		(0x000003FF)
+#define PDP_VID3CKEY_GB_VID3CKEY_B_SHIFT		(0)
+#define PDP_VID3CKEY_GB_VID3CKEY_B_LENGTH		(10)
+#define PDP_VID3CKEY_GB_VID3CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4CKEY_R_OFFSET		(0x0118)
+
+/* PDP, VID4CKEY_R, VID4CKEY_R
+*/
+#define PDP_VID4CKEY_R_VID4CKEY_R_MASK		(0x000003FF)
+#define PDP_VID4CKEY_R_VID4CKEY_R_LSBMASK		(0x000003FF)
+#define PDP_VID4CKEY_R_VID4CKEY_R_SHIFT		(0)
+#define PDP_VID4CKEY_R_VID4CKEY_R_LENGTH		(10)
+#define PDP_VID4CKEY_R_VID4CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4CKEY_GB_OFFSET		(0x011C)
+
+/* PDP, VID4CKEY_GB, VID4CKEY_G
+*/
+#define PDP_VID4CKEY_GB_VID4CKEY_G_MASK		(0x03FF0000)
+#define PDP_VID4CKEY_GB_VID4CKEY_G_LSBMASK		(0x000003FF)
+#define PDP_VID4CKEY_GB_VID4CKEY_G_SHIFT		(16)
+#define PDP_VID4CKEY_GB_VID4CKEY_G_LENGTH		(10)
+#define PDP_VID4CKEY_GB_VID4CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CKEY_GB, VID4CKEY_B
+*/
+#define PDP_VID4CKEY_GB_VID4CKEY_B_MASK		(0x000003FF)
+#define PDP_VID4CKEY_GB_VID4CKEY_B_LSBMASK		(0x000003FF)
+#define PDP_VID4CKEY_GB_VID4CKEY_B_SHIFT		(0)
+#define PDP_VID4CKEY_GB_VID4CKEY_B_LENGTH		(10)
+#define PDP_VID4CKEY_GB_VID4CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1BLND2_R_OFFSET		(0x0120)
+
+/* PDP, GRPH1BLND2_R, GRPH1PIXDBL
+*/
+#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_MASK		(0x80000000)
+#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_LSBMASK		(0x00000001)
+#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_SHIFT		(31)
+#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_LENGTH		(1)
+#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1BLND2_R, GRPH1LINDBL
+*/
+#define PDP_GRPH1BLND2_R_GRPH1LINDBL_MASK		(0x20000000)
+#define PDP_GRPH1BLND2_R_GRPH1LINDBL_LSBMASK		(0x00000001)
+#define PDP_GRPH1BLND2_R_GRPH1LINDBL_SHIFT		(29)
+#define PDP_GRPH1BLND2_R_GRPH1LINDBL_LENGTH		(1)
+#define PDP_GRPH1BLND2_R_GRPH1LINDBL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1BLND2_R, GRPH1CKEYMASK_R
+*/
+#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_MASK		(0x000003FF)
+#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LSBMASK		(0x000003FF)
+#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SHIFT		(0)
+#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LENGTH		(10)
+#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1BLND2_GB_OFFSET		(0x0124)
+
+/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_G
+*/
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_MASK		(0x03FF0000)
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LSBMASK		(0x000003FF)
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SHIFT		(16)
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LENGTH		(10)
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_B
+*/
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_MASK		(0x000003FF)
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LSBMASK		(0x000003FF)
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SHIFT		(0)
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LENGTH		(10)
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2BLND2_R_OFFSET		(0x0128)
+
+/* PDP, GRPH2BLND2_R, GRPH2PIXDBL
+*/
+#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_MASK		(0x80000000)
+#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_LSBMASK		(0x00000001)
+#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_SHIFT		(31)
+#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_LENGTH		(1)
+#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2BLND2_R, GRPH2LINDBL
+*/
+#define PDP_GRPH2BLND2_R_GRPH2LINDBL_MASK		(0x20000000)
+#define PDP_GRPH2BLND2_R_GRPH2LINDBL_LSBMASK		(0x00000001)
+#define PDP_GRPH2BLND2_R_GRPH2LINDBL_SHIFT		(29)
+#define PDP_GRPH2BLND2_R_GRPH2LINDBL_LENGTH		(1)
+#define PDP_GRPH2BLND2_R_GRPH2LINDBL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2BLND2_R, GRPH2CKEYMASK_R
+*/
+#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_MASK		(0x000003FF)
+#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LSBMASK		(0x000003FF)
+#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SHIFT		(0)
+#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LENGTH		(10)
+#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2BLND2_GB_OFFSET		(0x012C)
+
+/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_G
+*/
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_MASK		(0x03FF0000)
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LSBMASK		(0x000003FF)
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SHIFT		(16)
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LENGTH		(10)
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_B
+*/
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_MASK		(0x000003FF)
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LSBMASK		(0x000003FF)
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SHIFT		(0)
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LENGTH		(10)
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3BLND2_R_OFFSET		(0x0130)
+
+/* PDP, GRPH3BLND2_R, GRPH3PIXDBL
+*/
+#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_MASK		(0x80000000)
+#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_LSBMASK		(0x00000001)
+#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_SHIFT		(31)
+#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_LENGTH		(1)
+#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3BLND2_R, GRPH3LINDBL
+*/
+#define PDP_GRPH3BLND2_R_GRPH3LINDBL_MASK		(0x20000000)
+#define PDP_GRPH3BLND2_R_GRPH3LINDBL_LSBMASK		(0x00000001)
+#define PDP_GRPH3BLND2_R_GRPH3LINDBL_SHIFT		(29)
+#define PDP_GRPH3BLND2_R_GRPH3LINDBL_LENGTH		(1)
+#define PDP_GRPH3BLND2_R_GRPH3LINDBL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3BLND2_R, GRPH3CKEYMASK_R
+*/
+#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_MASK		(0x000003FF)
+#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LSBMASK		(0x000003FF)
+#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SHIFT		(0)
+#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LENGTH		(10)
+#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3BLND2_GB_OFFSET		(0x0134)
+
+/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_G
+*/
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_MASK		(0x03FF0000)
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LSBMASK		(0x000003FF)
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SHIFT		(16)
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LENGTH		(10)
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_B
+*/
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_MASK		(0x000003FF)
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LSBMASK		(0x000003FF)
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SHIFT		(0)
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LENGTH		(10)
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4BLND2_R_OFFSET		(0x0138)
+
+/* PDP, GRPH4BLND2_R, GRPH4PIXDBL
+*/
+#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_MASK		(0x80000000)
+#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_LSBMASK		(0x00000001)
+#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_SHIFT		(31)
+#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_LENGTH		(1)
+#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4BLND2_R, GRPH4LINDBL
+*/
+#define PDP_GRPH4BLND2_R_GRPH4LINDBL_MASK		(0x20000000)
+#define PDP_GRPH4BLND2_R_GRPH4LINDBL_LSBMASK		(0x00000001)
+#define PDP_GRPH4BLND2_R_GRPH4LINDBL_SHIFT		(29)
+#define PDP_GRPH4BLND2_R_GRPH4LINDBL_LENGTH		(1)
+#define PDP_GRPH4BLND2_R_GRPH4LINDBL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4BLND2_R, GRPH4CKEYMASK_R
+*/
+#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_MASK		(0x000003FF)
+#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LSBMASK		(0x000003FF)
+#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SHIFT		(0)
+#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LENGTH		(10)
+#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4BLND2_GB_OFFSET		(0x013C)
+
+/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_G
+*/
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_MASK		(0x03FF0000)
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LSBMASK		(0x000003FF)
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SHIFT		(16)
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LENGTH		(10)
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_B
+*/
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_MASK		(0x000003FF)
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LSBMASK		(0x000003FF)
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SHIFT		(0)
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LENGTH		(10)
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1BLND2_R_OFFSET		(0x0140)
+
+/* PDP, VID1BLND2_R, VID1CKEYMASK_R
+*/
+#define PDP_VID1BLND2_R_VID1CKEYMASK_R_MASK		(0x000003FF)
+#define PDP_VID1BLND2_R_VID1CKEYMASK_R_LSBMASK		(0x000003FF)
+#define PDP_VID1BLND2_R_VID1CKEYMASK_R_SHIFT		(0)
+#define PDP_VID1BLND2_R_VID1CKEYMASK_R_LENGTH		(10)
+#define PDP_VID1BLND2_R_VID1CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1BLND2_GB_OFFSET		(0x0144)
+
+/* PDP, VID1BLND2_GB, VID1CKEYMASK_G
+*/
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_MASK		(0x03FF0000)
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_LSBMASK		(0x000003FF)
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_SHIFT		(16)
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_LENGTH		(10)
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1BLND2_GB, VID1CKEYMASK_B
+*/
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_MASK		(0x000003FF)
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_LSBMASK		(0x000003FF)
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_SHIFT		(0)
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_LENGTH		(10)
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2BLND2_R_OFFSET		(0x0148)
+
+/* PDP, VID2BLND2_R, VID2CKEYMASK_R
+*/
+#define PDP_VID2BLND2_R_VID2CKEYMASK_R_MASK		(0x000003FF)
+#define PDP_VID2BLND2_R_VID2CKEYMASK_R_LSBMASK		(0x000003FF)
+#define PDP_VID2BLND2_R_VID2CKEYMASK_R_SHIFT		(0)
+#define PDP_VID2BLND2_R_VID2CKEYMASK_R_LENGTH		(10)
+#define PDP_VID2BLND2_R_VID2CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2BLND2_GB_OFFSET		(0x014C)
+
+/* PDP, VID2BLND2_GB, VID2CKEYMASK_G
+*/
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_MASK		(0x03FF0000)
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_LSBMASK		(0x000003FF)
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_SHIFT		(16)
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_LENGTH		(10)
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2BLND2_GB, VID2CKEYMASK_B
+*/
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_MASK		(0x000003FF)
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_LSBMASK		(0x000003FF)
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_SHIFT		(0)
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_LENGTH		(10)
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3BLND2_R_OFFSET		(0x0150)
+
+/* PDP, VID3BLND2_R, VID3CKEYMASK_R
+*/
+#define PDP_VID3BLND2_R_VID3CKEYMASK_R_MASK		(0x000003FF)
+#define PDP_VID3BLND2_R_VID3CKEYMASK_R_LSBMASK		(0x000003FF)
+#define PDP_VID3BLND2_R_VID3CKEYMASK_R_SHIFT		(0)
+#define PDP_VID3BLND2_R_VID3CKEYMASK_R_LENGTH		(10)
+#define PDP_VID3BLND2_R_VID3CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3BLND2_GB_OFFSET		(0x0154)
+
+/* PDP, VID3BLND2_GB, VID3CKEYMASK_G
+*/
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_MASK		(0x03FF0000)
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_LSBMASK		(0x000003FF)
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_SHIFT		(16)
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_LENGTH		(10)
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3BLND2_GB, VID3CKEYMASK_B
+*/
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_MASK		(0x000003FF)
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_LSBMASK		(0x000003FF)
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_SHIFT		(0)
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_LENGTH		(10)
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4BLND2_R_OFFSET		(0x0158)
+
+/* PDP, VID4BLND2_R, VID4CKEYMASK_R
+*/
+#define PDP_VID4BLND2_R_VID4CKEYMASK_R_MASK		(0x000003FF)
+#define PDP_VID4BLND2_R_VID4CKEYMASK_R_LSBMASK		(0x000003FF)
+#define PDP_VID4BLND2_R_VID4CKEYMASK_R_SHIFT		(0)
+#define PDP_VID4BLND2_R_VID4CKEYMASK_R_LENGTH		(10)
+#define PDP_VID4BLND2_R_VID4CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4BLND2_GB_OFFSET		(0x015C)
+
+/* PDP, VID4BLND2_GB, VID4CKEYMASK_G
+*/
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_MASK		(0x03FF0000)
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_LSBMASK		(0x000003FF)
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_SHIFT		(16)
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_LENGTH		(10)
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4BLND2_GB, VID4CKEYMASK_B
+*/
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_MASK		(0x000003FF)
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_LSBMASK		(0x000003FF)
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_SHIFT		(0)
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_LENGTH		(10)
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1INTERLEAVE_CTRL_OFFSET		(0x0160)
+
+/* PDP, GRPH1INTERLEAVE_CTRL, GRPH1INTFIELD
+*/
+#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_MASK		(0x00000001)
+#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LSBMASK		(0x00000001)
+#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SHIFT		(0)
+#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LENGTH		(1)
+#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2INTERLEAVE_CTRL_OFFSET		(0x0164)
+
+/* PDP, GRPH2INTERLEAVE_CTRL, GRPH2INTFIELD
+*/
+#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_MASK		(0x00000001)
+#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LSBMASK		(0x00000001)
+#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SHIFT		(0)
+#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LENGTH		(1)
+#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3INTERLEAVE_CTRL_OFFSET		(0x0168)
+
+/* PDP, GRPH3INTERLEAVE_CTRL, GRPH3INTFIELD
+*/
+#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_MASK		(0x00000001)
+#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LSBMASK		(0x00000001)
+#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SHIFT		(0)
+#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LENGTH		(1)
+#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4INTERLEAVE_CTRL_OFFSET		(0x016C)
+
+/* PDP, GRPH4INTERLEAVE_CTRL, GRPH4INTFIELD
+*/
+#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_MASK		(0x00000001)
+#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LSBMASK		(0x00000001)
+#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SHIFT		(0)
+#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LENGTH		(1)
+#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1INTERLEAVE_CTRL_OFFSET		(0x0170)
+
+/* PDP, VID1INTERLEAVE_CTRL, VID1INTFIELD
+*/
+#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_MASK		(0x00000001)
+#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LSBMASK		(0x00000001)
+#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SHIFT		(0)
+#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LENGTH		(1)
+#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2INTERLEAVE_CTRL_OFFSET		(0x0174)
+
+/* PDP, VID2INTERLEAVE_CTRL, VID2INTFIELD
+*/
+#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_MASK		(0x00000001)
+#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LSBMASK		(0x00000001)
+#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SHIFT		(0)
+#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LENGTH		(1)
+#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3INTERLEAVE_CTRL_OFFSET		(0x0178)
+
+/* PDP, VID3INTERLEAVE_CTRL, VID3INTFIELD
+*/
+#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_MASK		(0x00000001)
+#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LSBMASK		(0x00000001)
+#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SHIFT		(0)
+#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LENGTH		(1)
+#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4INTERLEAVE_CTRL_OFFSET		(0x017C)
+
+/* PDP, VID4INTERLEAVE_CTRL, VID4INTFIELD
+*/
+#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_MASK		(0x00000001)
+#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LSBMASK		(0x00000001)
+#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SHIFT		(0)
+#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LENGTH		(1)
+#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1BASEADDR_OFFSET		(0x0180)
+
+/* PDP, GRPH1BASEADDR, GRPH1BASEADDR
+*/
+#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_SHIFT		(5)
+#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_LENGTH		(27)
+#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2BASEADDR_OFFSET		(0x0184)
+
+/* PDP, GRPH2BASEADDR, GRPH2BASEADDR
+*/
+#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_SHIFT		(5)
+#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_LENGTH		(27)
+#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3BASEADDR_OFFSET		(0x0188)
+
+/* PDP, GRPH3BASEADDR, GRPH3BASEADDR
+*/
+#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_SHIFT		(5)
+#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_LENGTH		(27)
+#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4BASEADDR_OFFSET		(0x018C)
+
+/* PDP, GRPH4BASEADDR, GRPH4BASEADDR
+*/
+#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_SHIFT		(5)
+#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_LENGTH		(27)
+#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1BASEADDR_OFFSET		(0x0190)
+
+/* PDP, VID1BASEADDR, VID1BASEADDR
+*/
+#define PDP_VID1BASEADDR_VID1BASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID1BASEADDR_VID1BASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID1BASEADDR_VID1BASEADDR_SHIFT		(5)
+#define PDP_VID1BASEADDR_VID1BASEADDR_LENGTH		(27)
+#define PDP_VID1BASEADDR_VID1BASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2BASEADDR_OFFSET		(0x0194)
+
+/* PDP, VID2BASEADDR, VID2BASEADDR
+*/
+#define PDP_VID2BASEADDR_VID2BASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID2BASEADDR_VID2BASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID2BASEADDR_VID2BASEADDR_SHIFT		(5)
+#define PDP_VID2BASEADDR_VID2BASEADDR_LENGTH		(27)
+#define PDP_VID2BASEADDR_VID2BASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3BASEADDR_OFFSET		(0x0198)
+
+/* PDP, VID3BASEADDR, VID3BASEADDR
+*/
+#define PDP_VID3BASEADDR_VID3BASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID3BASEADDR_VID3BASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID3BASEADDR_VID3BASEADDR_SHIFT		(5)
+#define PDP_VID3BASEADDR_VID3BASEADDR_LENGTH		(27)
+#define PDP_VID3BASEADDR_VID3BASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4BASEADDR_OFFSET		(0x019C)
+
+/* PDP, VID4BASEADDR, VID4BASEADDR
+*/
+#define PDP_VID4BASEADDR_VID4BASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID4BASEADDR_VID4BASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID4BASEADDR_VID4BASEADDR_SHIFT		(5)
+#define PDP_VID4BASEADDR_VID4BASEADDR_LENGTH		(27)
+#define PDP_VID4BASEADDR_VID4BASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1UBASEADDR_OFFSET		(0x01B0)
+
+/* PDP, VID1UBASEADDR, VID1UBASEADDR
+*/
+#define PDP_VID1UBASEADDR_VID1UBASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID1UBASEADDR_VID1UBASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID1UBASEADDR_VID1UBASEADDR_SHIFT		(5)
+#define PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH		(27)
+#define PDP_VID1UBASEADDR_VID1UBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2UBASEADDR_OFFSET		(0x01B4)
+
+/* PDP, VID2UBASEADDR, VID2UBASEADDR
+*/
+#define PDP_VID2UBASEADDR_VID2UBASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID2UBASEADDR_VID2UBASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID2UBASEADDR_VID2UBASEADDR_SHIFT		(5)
+#define PDP_VID2UBASEADDR_VID2UBASEADDR_LENGTH		(27)
+#define PDP_VID2UBASEADDR_VID2UBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3UBASEADDR_OFFSET		(0x01B8)
+
+/* PDP, VID3UBASEADDR, VID3UBASEADDR
+*/
+#define PDP_VID3UBASEADDR_VID3UBASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID3UBASEADDR_VID3UBASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID3UBASEADDR_VID3UBASEADDR_SHIFT		(5)
+#define PDP_VID3UBASEADDR_VID3UBASEADDR_LENGTH		(27)
+#define PDP_VID3UBASEADDR_VID3UBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4UBASEADDR_OFFSET		(0x01BC)
+
+/* PDP, VID4UBASEADDR, VID4UBASEADDR
+*/
+#define PDP_VID4UBASEADDR_VID4UBASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID4UBASEADDR_VID4UBASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID4UBASEADDR_VID4UBASEADDR_SHIFT		(5)
+#define PDP_VID4UBASEADDR_VID4UBASEADDR_LENGTH		(27)
+#define PDP_VID4UBASEADDR_VID4UBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VBASEADDR_OFFSET		(0x01D0)
+
+/* PDP, VID1VBASEADDR, VID1VBASEADDR
+*/
+#define PDP_VID1VBASEADDR_VID1VBASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID1VBASEADDR_VID1VBASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID1VBASEADDR_VID1VBASEADDR_SHIFT		(5)
+#define PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH		(27)
+#define PDP_VID1VBASEADDR_VID1VBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VBASEADDR_OFFSET		(0x01D4)
+
+/* PDP, VID2VBASEADDR, VID2VBASEADDR
+*/
+#define PDP_VID2VBASEADDR_VID2VBASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID2VBASEADDR_VID2VBASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID2VBASEADDR_VID2VBASEADDR_SHIFT		(5)
+#define PDP_VID2VBASEADDR_VID2VBASEADDR_LENGTH		(27)
+#define PDP_VID2VBASEADDR_VID2VBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VBASEADDR_OFFSET		(0x01D8)
+
+/* PDP, VID3VBASEADDR, VID3VBASEADDR
+*/
+#define PDP_VID3VBASEADDR_VID3VBASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID3VBASEADDR_VID3VBASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID3VBASEADDR_VID3VBASEADDR_SHIFT		(5)
+#define PDP_VID3VBASEADDR_VID3VBASEADDR_LENGTH		(27)
+#define PDP_VID3VBASEADDR_VID3VBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VBASEADDR_OFFSET		(0x01DC)
+
+/* PDP, VID4VBASEADDR, VID4VBASEADDR
+*/
+#define PDP_VID4VBASEADDR_VID4VBASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID4VBASEADDR_VID4VBASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID4VBASEADDR_VID4VBASEADDR_SHIFT		(5)
+#define PDP_VID4VBASEADDR_VID4VBASEADDR_LENGTH		(27)
+#define PDP_VID4VBASEADDR_VID4VBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1POSTSKIPCTRL_OFFSET		(0x0230)
+
+/* PDP, VID1POSTSKIPCTRL, VID1HPOSTCLIP
+*/
+#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_MASK		(0x007F0000)
+#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LSBMASK		(0x0000007F)
+#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SHIFT		(16)
+#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LENGTH		(7)
+#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1POSTSKIPCTRL, VID1VPOSTCLIP
+*/
+#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_MASK		(0x0000003F)
+#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LSBMASK		(0x0000003F)
+#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SHIFT		(0)
+#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LENGTH		(6)
+#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2POSTSKIPCTRL_OFFSET		(0x0234)
+
+/* PDP, VID2POSTSKIPCTRL, VID2HPOSTCLIP
+*/
+#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_MASK		(0x007F0000)
+#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LSBMASK		(0x0000007F)
+#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SHIFT		(16)
+#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LENGTH		(7)
+#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2POSTSKIPCTRL, VID2VPOSTCLIP
+*/
+#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_MASK		(0x0000003F)
+#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LSBMASK		(0x0000003F)
+#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SHIFT		(0)
+#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LENGTH		(6)
+#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3POSTSKIPCTRL_OFFSET		(0x0238)
+
+/* PDP, VID3POSTSKIPCTRL, VID3HPOSTCLIP
+*/
+#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_MASK		(0x007F0000)
+#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LSBMASK		(0x0000007F)
+#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SHIFT		(16)
+#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LENGTH		(7)
+#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3POSTSKIPCTRL, VID3VPOSTCLIP
+*/
+#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_MASK		(0x0000003F)
+#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LSBMASK		(0x0000003F)
+#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SHIFT		(0)
+#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LENGTH		(6)
+#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4POSTSKIPCTRL_OFFSET		(0x023C)
+
+/* PDP, VID4POSTSKIPCTRL, VID4HPOSTCLIP
+*/
+#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_MASK		(0x007F0000)
+#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LSBMASK		(0x0000007F)
+#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SHIFT		(16)
+#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LENGTH		(7)
+#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4POSTSKIPCTRL, VID4VPOSTCLIP
+*/
+#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_MASK		(0x0000003F)
+#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LSBMASK		(0x0000003F)
+#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SHIFT		(0)
+#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LENGTH		(6)
+#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1DECIMATE_CTRL_OFFSET		(0x0240)
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_COUNT
+*/
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_MODE
+*/
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_PIXEL_HALVE
+*/
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_EN
+*/
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_MASK		(0x00000001)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LSBMASK		(0x00000001)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SHIFT		(0)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LENGTH		(1)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2DECIMATE_CTRL_OFFSET		(0x0244)
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_COUNT
+*/
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_MODE
+*/
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_PIXEL_HALVE
+*/
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_EN
+*/
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_MASK		(0x00000001)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LSBMASK		(0x00000001)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SHIFT		(0)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LENGTH		(1)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3DECIMATE_CTRL_OFFSET		(0x0248)
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_COUNT
+*/
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_MODE
+*/
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_PIXEL_HALVE
+*/
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_EN
+*/
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_MASK		(0x00000001)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LSBMASK		(0x00000001)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SHIFT		(0)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LENGTH		(1)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4DECIMATE_CTRL_OFFSET		(0x024C)
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_COUNT
+*/
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_MODE
+*/
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_PIXEL_HALVE
+*/
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_EN
+*/
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_MASK		(0x00000001)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LSBMASK		(0x00000001)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SHIFT		(0)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LENGTH		(1)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1DECIMATE_CTRL_OFFSET		(0x0250)
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_COUNT
+*/
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_MODE
+*/
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_PIXEL_HALVE
+*/
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_EN
+*/
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_MASK		(0x00000001)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LSBMASK		(0x00000001)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SHIFT		(0)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LENGTH		(1)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2DECIMATE_CTRL_OFFSET		(0x0254)
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_COUNT
+*/
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_MODE
+*/
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_PIXEL_HALVE
+*/
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_EN
+*/
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_MASK		(0x00000001)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LSBMASK		(0x00000001)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SHIFT		(0)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LENGTH		(1)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3DECIMATE_CTRL_OFFSET		(0x0258)
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_COUNT
+*/
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_MODE
+*/
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_PIXEL_HALVE
+*/
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_EN
+*/
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_MASK		(0x00000001)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LSBMASK		(0x00000001)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SHIFT		(0)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LENGTH		(1)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4DECIMATE_CTRL_OFFSET		(0x025C)
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_COUNT
+*/
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_MODE
+*/
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_PIXEL_HALVE
+*/
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_EN
+*/
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_MASK		(0x00000001)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LSBMASK		(0x00000001)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SHIFT		(0)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LENGTH		(1)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1SKIPCTRL_OFFSET		(0x0270)
+
+/* PDP, VID1SKIPCTRL, VID1HSKIP
+*/
+#define PDP_VID1SKIPCTRL_VID1HSKIP_MASK		(0x0FFF0000)
+#define PDP_VID1SKIPCTRL_VID1HSKIP_LSBMASK		(0x00000FFF)
+#define PDP_VID1SKIPCTRL_VID1HSKIP_SHIFT		(16)
+#define PDP_VID1SKIPCTRL_VID1HSKIP_LENGTH		(12)
+#define PDP_VID1SKIPCTRL_VID1HSKIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SKIPCTRL, VID1VSKIP
+*/
+#define PDP_VID1SKIPCTRL_VID1VSKIP_MASK		(0x00000FFF)
+#define PDP_VID1SKIPCTRL_VID1VSKIP_LSBMASK		(0x00000FFF)
+#define PDP_VID1SKIPCTRL_VID1VSKIP_SHIFT		(0)
+#define PDP_VID1SKIPCTRL_VID1VSKIP_LENGTH		(12)
+#define PDP_VID1SKIPCTRL_VID1VSKIP_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2SKIPCTRL_OFFSET		(0x0274)
+
+/* PDP, VID2SKIPCTRL, VID2HSKIP
+*/
+#define PDP_VID2SKIPCTRL_VID2HSKIP_MASK		(0x0FFF0000)
+#define PDP_VID2SKIPCTRL_VID2HSKIP_LSBMASK		(0x00000FFF)
+#define PDP_VID2SKIPCTRL_VID2HSKIP_SHIFT		(16)
+#define PDP_VID2SKIPCTRL_VID2HSKIP_LENGTH		(12)
+#define PDP_VID2SKIPCTRL_VID2HSKIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SKIPCTRL, VID2VSKIP
+*/
+#define PDP_VID2SKIPCTRL_VID2VSKIP_MASK		(0x00000FFF)
+#define PDP_VID2SKIPCTRL_VID2VSKIP_LSBMASK		(0x00000FFF)
+#define PDP_VID2SKIPCTRL_VID2VSKIP_SHIFT		(0)
+#define PDP_VID2SKIPCTRL_VID2VSKIP_LENGTH		(12)
+#define PDP_VID2SKIPCTRL_VID2VSKIP_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3SKIPCTRL_OFFSET		(0x0278)
+
+/* PDP, VID3SKIPCTRL, VID3HSKIP
+*/
+#define PDP_VID3SKIPCTRL_VID3HSKIP_MASK		(0x0FFF0000)
+#define PDP_VID3SKIPCTRL_VID3HSKIP_LSBMASK		(0x00000FFF)
+#define PDP_VID3SKIPCTRL_VID3HSKIP_SHIFT		(16)
+#define PDP_VID3SKIPCTRL_VID3HSKIP_LENGTH		(12)
+#define PDP_VID3SKIPCTRL_VID3HSKIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SKIPCTRL, VID3VSKIP
+*/
+#define PDP_VID3SKIPCTRL_VID3VSKIP_MASK		(0x00000FFF)
+#define PDP_VID3SKIPCTRL_VID3VSKIP_LSBMASK		(0x00000FFF)
+#define PDP_VID3SKIPCTRL_VID3VSKIP_SHIFT		(0)
+#define PDP_VID3SKIPCTRL_VID3VSKIP_LENGTH		(12)
+#define PDP_VID3SKIPCTRL_VID3VSKIP_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4SKIPCTRL_OFFSET		(0x027C)
+
+/* PDP, VID4SKIPCTRL, VID4HSKIP
+*/
+#define PDP_VID4SKIPCTRL_VID4HSKIP_MASK		(0x0FFF0000)
+#define PDP_VID4SKIPCTRL_VID4HSKIP_LSBMASK		(0x00000FFF)
+#define PDP_VID4SKIPCTRL_VID4HSKIP_SHIFT		(16)
+#define PDP_VID4SKIPCTRL_VID4HSKIP_LENGTH		(12)
+#define PDP_VID4SKIPCTRL_VID4HSKIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SKIPCTRL, VID4VSKIP
+*/
+#define PDP_VID4SKIPCTRL_VID4VSKIP_MASK		(0x00000FFF)
+#define PDP_VID4SKIPCTRL_VID4VSKIP_LSBMASK		(0x00000FFF)
+#define PDP_VID4SKIPCTRL_VID4VSKIP_SHIFT		(0)
+#define PDP_VID4SKIPCTRL_VID4VSKIP_LENGTH		(12)
+#define PDP_VID4SKIPCTRL_VID4VSKIP_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1SCALECTRL_OFFSET		(0x0460)
+
+/* PDP, VID1SCALECTRL, VID1HSCALEBP
+*/
+#define PDP_VID1SCALECTRL_VID1HSCALEBP_MASK		(0x80000000)
+#define PDP_VID1SCALECTRL_VID1HSCALEBP_LSBMASK		(0x00000001)
+#define PDP_VID1SCALECTRL_VID1HSCALEBP_SHIFT		(31)
+#define PDP_VID1SCALECTRL_VID1HSCALEBP_LENGTH		(1)
+#define PDP_VID1SCALECTRL_VID1HSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VSCALEBP
+*/
+#define PDP_VID1SCALECTRL_VID1VSCALEBP_MASK		(0x40000000)
+#define PDP_VID1SCALECTRL_VID1VSCALEBP_LSBMASK		(0x00000001)
+#define PDP_VID1SCALECTRL_VID1VSCALEBP_SHIFT		(30)
+#define PDP_VID1SCALECTRL_VID1VSCALEBP_LENGTH		(1)
+#define PDP_VID1SCALECTRL_VID1VSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1HSBEFOREVS
+*/
+#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_MASK		(0x20000000)
+#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_LSBMASK		(0x00000001)
+#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_SHIFT		(29)
+#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_LENGTH		(1)
+#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VSURUNCTRL
+*/
+#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_MASK		(0x08000000)
+#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_LSBMASK		(0x00000001)
+#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_SHIFT		(27)
+#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_LENGTH		(1)
+#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1PAN_EN
+*/
+#define PDP_VID1SCALECTRL_VID1PAN_EN_MASK		(0x00040000)
+#define PDP_VID1SCALECTRL_VID1PAN_EN_LSBMASK		(0x00000001)
+#define PDP_VID1SCALECTRL_VID1PAN_EN_SHIFT		(18)
+#define PDP_VID1SCALECTRL_VID1PAN_EN_LENGTH		(1)
+#define PDP_VID1SCALECTRL_VID1PAN_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VORDER
+*/
+#define PDP_VID1SCALECTRL_VID1VORDER_MASK		(0x00030000)
+#define PDP_VID1SCALECTRL_VID1VORDER_LSBMASK		(0x00000003)
+#define PDP_VID1SCALECTRL_VID1VORDER_SHIFT		(16)
+#define PDP_VID1SCALECTRL_VID1VORDER_LENGTH		(2)
+#define PDP_VID1SCALECTRL_VID1VORDER_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VPITCH
+*/
+#define PDP_VID1SCALECTRL_VID1VPITCH_MASK		(0x0000FFFF)
+#define PDP_VID1SCALECTRL_VID1VPITCH_LSBMASK		(0x0000FFFF)
+#define PDP_VID1SCALECTRL_VID1VPITCH_SHIFT		(0)
+#define PDP_VID1SCALECTRL_VID1VPITCH_LENGTH		(16)
+#define PDP_VID1SCALECTRL_VID1VPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VSINIT_OFFSET		(0x0464)
+
+/* PDP, VID1VSINIT, VID1VINITIAL1
+*/
+#define PDP_VID1VSINIT_VID1VINITIAL1_MASK		(0xFFFF0000)
+#define PDP_VID1VSINIT_VID1VINITIAL1_LSBMASK		(0x0000FFFF)
+#define PDP_VID1VSINIT_VID1VINITIAL1_SHIFT		(16)
+#define PDP_VID1VSINIT_VID1VINITIAL1_LENGTH		(16)
+#define PDP_VID1VSINIT_VID1VINITIAL1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1VSINIT, VID1VINITIAL0
+*/
+#define PDP_VID1VSINIT_VID1VINITIAL0_MASK		(0x0000FFFF)
+#define PDP_VID1VSINIT_VID1VINITIAL0_LSBMASK		(0x0000FFFF)
+#define PDP_VID1VSINIT_VID1VINITIAL0_SHIFT		(0)
+#define PDP_VID1VSINIT_VID1VINITIAL0_LENGTH		(16)
+#define PDP_VID1VSINIT_VID1VINITIAL0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VCOEFF0_OFFSET		(0x0468)
+
+/* PDP, VID1VCOEFF0, VID1VCOEFF0
+*/
+#define PDP_VID1VCOEFF0_VID1VCOEFF0_MASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF0_VID1VCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF0_VID1VCOEFF0_SHIFT		(0)
+#define PDP_VID1VCOEFF0_VID1VCOEFF0_LENGTH		(32)
+#define PDP_VID1VCOEFF0_VID1VCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VCOEFF1_OFFSET		(0x046C)
+
+/* PDP, VID1VCOEFF1, VID1VCOEFF1
+*/
+#define PDP_VID1VCOEFF1_VID1VCOEFF1_MASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF1_VID1VCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF1_VID1VCOEFF1_SHIFT		(0)
+#define PDP_VID1VCOEFF1_VID1VCOEFF1_LENGTH		(32)
+#define PDP_VID1VCOEFF1_VID1VCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VCOEFF2_OFFSET		(0x0470)
+
+/* PDP, VID1VCOEFF2, VID1VCOEFF2
+*/
+#define PDP_VID1VCOEFF2_VID1VCOEFF2_MASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF2_VID1VCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF2_VID1VCOEFF2_SHIFT		(0)
+#define PDP_VID1VCOEFF2_VID1VCOEFF2_LENGTH		(32)
+#define PDP_VID1VCOEFF2_VID1VCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VCOEFF3_OFFSET		(0x0474)
+
+/* PDP, VID1VCOEFF3, VID1VCOEFF3
+*/
+#define PDP_VID1VCOEFF3_VID1VCOEFF3_MASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF3_VID1VCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF3_VID1VCOEFF3_SHIFT		(0)
+#define PDP_VID1VCOEFF3_VID1VCOEFF3_LENGTH		(32)
+#define PDP_VID1VCOEFF3_VID1VCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VCOEFF4_OFFSET		(0x0478)
+
+/* PDP, VID1VCOEFF4, VID1VCOEFF4
+*/
+#define PDP_VID1VCOEFF4_VID1VCOEFF4_MASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF4_VID1VCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF4_VID1VCOEFF4_SHIFT		(0)
+#define PDP_VID1VCOEFF4_VID1VCOEFF4_LENGTH		(32)
+#define PDP_VID1VCOEFF4_VID1VCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VCOEFF5_OFFSET		(0x047C)
+
+/* PDP, VID1VCOEFF5, VID1VCOEFF5
+*/
+#define PDP_VID1VCOEFF5_VID1VCOEFF5_MASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF5_VID1VCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF5_VID1VCOEFF5_SHIFT		(0)
+#define PDP_VID1VCOEFF5_VID1VCOEFF5_LENGTH		(32)
+#define PDP_VID1VCOEFF5_VID1VCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VCOEFF6_OFFSET		(0x0480)
+
+/* PDP, VID1VCOEFF6, VID1VCOEFF6
+*/
+#define PDP_VID1VCOEFF6_VID1VCOEFF6_MASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF6_VID1VCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF6_VID1VCOEFF6_SHIFT		(0)
+#define PDP_VID1VCOEFF6_VID1VCOEFF6_LENGTH		(32)
+#define PDP_VID1VCOEFF6_VID1VCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VCOEFF7_OFFSET		(0x0484)
+
+/* PDP, VID1VCOEFF7, VID1VCOEFF7
+*/
+#define PDP_VID1VCOEFF7_VID1VCOEFF7_MASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF7_VID1VCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF7_VID1VCOEFF7_SHIFT		(0)
+#define PDP_VID1VCOEFF7_VID1VCOEFF7_LENGTH		(32)
+#define PDP_VID1VCOEFF7_VID1VCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VCOEFF8_OFFSET		(0x0488)
+
+/* PDP, VID1VCOEFF8, VID1VCOEFF8
+*/
+#define PDP_VID1VCOEFF8_VID1VCOEFF8_MASK		(0x000000FF)
+#define PDP_VID1VCOEFF8_VID1VCOEFF8_LSBMASK		(0x000000FF)
+#define PDP_VID1VCOEFF8_VID1VCOEFF8_SHIFT		(0)
+#define PDP_VID1VCOEFF8_VID1VCOEFF8_LENGTH		(8)
+#define PDP_VID1VCOEFF8_VID1VCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HSINIT_OFFSET		(0x048C)
+
+/* PDP, VID1HSINIT, VID1HINITIAL
+*/
+#define PDP_VID1HSINIT_VID1HINITIAL_MASK		(0xFFFF0000)
+#define PDP_VID1HSINIT_VID1HINITIAL_LSBMASK		(0x0000FFFF)
+#define PDP_VID1HSINIT_VID1HINITIAL_SHIFT		(16)
+#define PDP_VID1HSINIT_VID1HINITIAL_LENGTH		(16)
+#define PDP_VID1HSINIT_VID1HINITIAL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1HSINIT, VID1HPITCH
+*/
+#define PDP_VID1HSINIT_VID1HPITCH_MASK		(0x0000FFFF)
+#define PDP_VID1HSINIT_VID1HPITCH_LSBMASK		(0x0000FFFF)
+#define PDP_VID1HSINIT_VID1HPITCH_SHIFT		(0)
+#define PDP_VID1HSINIT_VID1HPITCH_LENGTH		(16)
+#define PDP_VID1HSINIT_VID1HPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF0_OFFSET		(0x0490)
+
+/* PDP, VID1HCOEFF0, VID1HCOEFF0
+*/
+#define PDP_VID1HCOEFF0_VID1HCOEFF0_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF0_VID1HCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF0_VID1HCOEFF0_SHIFT		(0)
+#define PDP_VID1HCOEFF0_VID1HCOEFF0_LENGTH		(32)
+#define PDP_VID1HCOEFF0_VID1HCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF1_OFFSET		(0x0494)
+
+/* PDP, VID1HCOEFF1, VID1HCOEFF1
+*/
+#define PDP_VID1HCOEFF1_VID1HCOEFF1_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF1_VID1HCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF1_VID1HCOEFF1_SHIFT		(0)
+#define PDP_VID1HCOEFF1_VID1HCOEFF1_LENGTH		(32)
+#define PDP_VID1HCOEFF1_VID1HCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF2_OFFSET		(0x0498)
+
+/* PDP, VID1HCOEFF2, VID1HCOEFF2
+*/
+#define PDP_VID1HCOEFF2_VID1HCOEFF2_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF2_VID1HCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF2_VID1HCOEFF2_SHIFT		(0)
+#define PDP_VID1HCOEFF2_VID1HCOEFF2_LENGTH		(32)
+#define PDP_VID1HCOEFF2_VID1HCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF3_OFFSET		(0x049C)
+
+/* PDP, VID1HCOEFF3, VID1HCOEFF3
+*/
+#define PDP_VID1HCOEFF3_VID1HCOEFF3_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF3_VID1HCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF3_VID1HCOEFF3_SHIFT		(0)
+#define PDP_VID1HCOEFF3_VID1HCOEFF3_LENGTH		(32)
+#define PDP_VID1HCOEFF3_VID1HCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF4_OFFSET		(0x04A0)
+
+/* PDP, VID1HCOEFF4, VID1HCOEFF4
+*/
+#define PDP_VID1HCOEFF4_VID1HCOEFF4_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF4_VID1HCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF4_VID1HCOEFF4_SHIFT		(0)
+#define PDP_VID1HCOEFF4_VID1HCOEFF4_LENGTH		(32)
+#define PDP_VID1HCOEFF4_VID1HCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF5_OFFSET		(0x04A4)
+
+/* PDP, VID1HCOEFF5, VID1HCOEFF5
+*/
+#define PDP_VID1HCOEFF5_VID1HCOEFF5_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF5_VID1HCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF5_VID1HCOEFF5_SHIFT		(0)
+#define PDP_VID1HCOEFF5_VID1HCOEFF5_LENGTH		(32)
+#define PDP_VID1HCOEFF5_VID1HCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF6_OFFSET		(0x04A8)
+
+/* PDP, VID1HCOEFF6, VID1HCOEFF6
+*/
+#define PDP_VID1HCOEFF6_VID1HCOEFF6_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF6_VID1HCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF6_VID1HCOEFF6_SHIFT		(0)
+#define PDP_VID1HCOEFF6_VID1HCOEFF6_LENGTH		(32)
+#define PDP_VID1HCOEFF6_VID1HCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF7_OFFSET		(0x04AC)
+
+/* PDP, VID1HCOEFF7, VID1HCOEFF7
+*/
+#define PDP_VID1HCOEFF7_VID1HCOEFF7_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF7_VID1HCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF7_VID1HCOEFF7_SHIFT		(0)
+#define PDP_VID1HCOEFF7_VID1HCOEFF7_LENGTH		(32)
+#define PDP_VID1HCOEFF7_VID1HCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF8_OFFSET		(0x04B0)
+
+/* PDP, VID1HCOEFF8, VID1HCOEFF8
+*/
+#define PDP_VID1HCOEFF8_VID1HCOEFF8_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF8_VID1HCOEFF8_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF8_VID1HCOEFF8_SHIFT		(0)
+#define PDP_VID1HCOEFF8_VID1HCOEFF8_LENGTH		(32)
+#define PDP_VID1HCOEFF8_VID1HCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF9_OFFSET		(0x04B4)
+
+/* PDP, VID1HCOEFF9, VID1HCOEFF9
+*/
+#define PDP_VID1HCOEFF9_VID1HCOEFF9_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF9_VID1HCOEFF9_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF9_VID1HCOEFF9_SHIFT		(0)
+#define PDP_VID1HCOEFF9_VID1HCOEFF9_LENGTH		(32)
+#define PDP_VID1HCOEFF9_VID1HCOEFF9_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF10_OFFSET		(0x04B8)
+
+/* PDP, VID1HCOEFF10, VID1HCOEFF10
+*/
+#define PDP_VID1HCOEFF10_VID1HCOEFF10_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF10_VID1HCOEFF10_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF10_VID1HCOEFF10_SHIFT		(0)
+#define PDP_VID1HCOEFF10_VID1HCOEFF10_LENGTH		(32)
+#define PDP_VID1HCOEFF10_VID1HCOEFF10_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF11_OFFSET		(0x04BC)
+
+/* PDP, VID1HCOEFF11, VID1HCOEFF11
+*/
+#define PDP_VID1HCOEFF11_VID1HCOEFF11_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF11_VID1HCOEFF11_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF11_VID1HCOEFF11_SHIFT		(0)
+#define PDP_VID1HCOEFF11_VID1HCOEFF11_LENGTH		(32)
+#define PDP_VID1HCOEFF11_VID1HCOEFF11_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF12_OFFSET		(0x04C0)
+
+/* PDP, VID1HCOEFF12, VID1HCOEFF12
+*/
+#define PDP_VID1HCOEFF12_VID1HCOEFF12_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF12_VID1HCOEFF12_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF12_VID1HCOEFF12_SHIFT		(0)
+#define PDP_VID1HCOEFF12_VID1HCOEFF12_LENGTH		(32)
+#define PDP_VID1HCOEFF12_VID1HCOEFF12_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF13_OFFSET		(0x04C4)
+
+/* PDP, VID1HCOEFF13, VID1HCOEFF13
+*/
+#define PDP_VID1HCOEFF13_VID1HCOEFF13_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF13_VID1HCOEFF13_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF13_VID1HCOEFF13_SHIFT		(0)
+#define PDP_VID1HCOEFF13_VID1HCOEFF13_LENGTH		(32)
+#define PDP_VID1HCOEFF13_VID1HCOEFF13_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF14_OFFSET		(0x04C8)
+
+/* PDP, VID1HCOEFF14, VID1HCOEFF14
+*/
+#define PDP_VID1HCOEFF14_VID1HCOEFF14_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF14_VID1HCOEFF14_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF14_VID1HCOEFF14_SHIFT		(0)
+#define PDP_VID1HCOEFF14_VID1HCOEFF14_LENGTH		(32)
+#define PDP_VID1HCOEFF14_VID1HCOEFF14_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF15_OFFSET		(0x04CC)
+
+/* PDP, VID1HCOEFF15, VID1HCOEFF15
+*/
+#define PDP_VID1HCOEFF15_VID1HCOEFF15_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF15_VID1HCOEFF15_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF15_VID1HCOEFF15_SHIFT		(0)
+#define PDP_VID1HCOEFF15_VID1HCOEFF15_LENGTH		(32)
+#define PDP_VID1HCOEFF15_VID1HCOEFF15_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF16_OFFSET		(0x04D0)
+
+/* PDP, VID1HCOEFF16, VID1HCOEFF16
+*/
+#define PDP_VID1HCOEFF16_VID1HCOEFF16_MASK		(0x000000FF)
+#define PDP_VID1HCOEFF16_VID1HCOEFF16_LSBMASK		(0x000000FF)
+#define PDP_VID1HCOEFF16_VID1HCOEFF16_SHIFT		(0)
+#define PDP_VID1HCOEFF16_VID1HCOEFF16_LENGTH		(8)
+#define PDP_VID1HCOEFF16_VID1HCOEFF16_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1SCALESIZE_OFFSET		(0x04D4)
+
+/* PDP, VID1SCALESIZE, VID1SCALEWIDTH
+*/
+#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_MASK		(0x0FFF0000)
+#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_LSBMASK		(0x00000FFF)
+#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_SHIFT		(16)
+#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_LENGTH		(12)
+#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALESIZE, VID1SCALEHEIGHT
+*/
+#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_MASK		(0x00000FFF)
+#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SHIFT		(0)
+#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LENGTH		(12)
+#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CORE_ID_OFFSET		(0x04E0)
+
+/* PDP, PVR_PDP_CORE_ID, GROUP_ID
+*/
+#define PDP_CORE_ID_GROUP_ID_MASK		(0xFF000000)
+#define PDP_CORE_ID_GROUP_ID_LSBMASK		(0x000000FF)
+#define PDP_CORE_ID_GROUP_ID_SHIFT		(24)
+#define PDP_CORE_ID_GROUP_ID_LENGTH		(8)
+#define PDP_CORE_ID_GROUP_ID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_PDP_CORE_ID, CORE_ID
+*/
+#define PDP_CORE_ID_CORE_ID_MASK		(0x00FF0000)
+#define PDP_CORE_ID_CORE_ID_LSBMASK		(0x000000FF)
+#define PDP_CORE_ID_CORE_ID_SHIFT		(16)
+#define PDP_CORE_ID_CORE_ID_LENGTH		(8)
+#define PDP_CORE_ID_CORE_ID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_PDP_CORE_ID, CONFIG_ID
+*/
+#define PDP_CORE_ID_CONFIG_ID_MASK		(0x0000FFFF)
+#define PDP_CORE_ID_CONFIG_ID_LSBMASK		(0x0000FFFF)
+#define PDP_CORE_ID_CONFIG_ID_SHIFT		(0)
+#define PDP_CORE_ID_CONFIG_ID_LENGTH		(16)
+#define PDP_CORE_ID_CONFIG_ID_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CORE_REV_OFFSET		(0x04F0)
+
+/* PDP, PVR_PDP_CORE_REV, MAJOR_REV
+*/
+#define PDP_CORE_REV_MAJOR_REV_MASK		(0x00FF0000)
+#define PDP_CORE_REV_MAJOR_REV_LSBMASK		(0x000000FF)
+#define PDP_CORE_REV_MAJOR_REV_SHIFT		(16)
+#define PDP_CORE_REV_MAJOR_REV_LENGTH		(8)
+#define PDP_CORE_REV_MAJOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_PDP_CORE_REV, MINOR_REV
+*/
+#define PDP_CORE_REV_MINOR_REV_MASK		(0x0000FF00)
+#define PDP_CORE_REV_MINOR_REV_LSBMASK		(0x000000FF)
+#define PDP_CORE_REV_MINOR_REV_SHIFT		(8)
+#define PDP_CORE_REV_MINOR_REV_LENGTH		(8)
+#define PDP_CORE_REV_MINOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_PDP_CORE_REV, MAINT_REV
+*/
+#define PDP_CORE_REV_MAINT_REV_MASK		(0x000000FF)
+#define PDP_CORE_REV_MAINT_REV_LSBMASK		(0x000000FF)
+#define PDP_CORE_REV_MAINT_REV_SHIFT		(0)
+#define PDP_CORE_REV_MAINT_REV_LENGTH		(8)
+#define PDP_CORE_REV_MAINT_REV_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2SCALECTRL_OFFSET		(0x0500)
+
+/* PDP, VID2SCALECTRL, VID2HSCALEBP
+*/
+#define PDP_VID2SCALECTRL_VID2HSCALEBP_MASK		(0x80000000)
+#define PDP_VID2SCALECTRL_VID2HSCALEBP_LSBMASK		(0x00000001)
+#define PDP_VID2SCALECTRL_VID2HSCALEBP_SHIFT		(31)
+#define PDP_VID2SCALECTRL_VID2HSCALEBP_LENGTH		(1)
+#define PDP_VID2SCALECTRL_VID2HSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VSCALEBP
+*/
+#define PDP_VID2SCALECTRL_VID2VSCALEBP_MASK		(0x40000000)
+#define PDP_VID2SCALECTRL_VID2VSCALEBP_LSBMASK		(0x00000001)
+#define PDP_VID2SCALECTRL_VID2VSCALEBP_SHIFT		(30)
+#define PDP_VID2SCALECTRL_VID2VSCALEBP_LENGTH		(1)
+#define PDP_VID2SCALECTRL_VID2VSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2HSBEFOREVS
+*/
+#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_MASK		(0x20000000)
+#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_LSBMASK		(0x00000001)
+#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_SHIFT		(29)
+#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_LENGTH		(1)
+#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VSURUNCTRL
+*/
+#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_MASK		(0x08000000)
+#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_LSBMASK		(0x00000001)
+#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_SHIFT		(27)
+#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_LENGTH		(1)
+#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2PAN_EN
+*/
+#define PDP_VID2SCALECTRL_VID2PAN_EN_MASK		(0x00040000)
+#define PDP_VID2SCALECTRL_VID2PAN_EN_LSBMASK		(0x00000001)
+#define PDP_VID2SCALECTRL_VID2PAN_EN_SHIFT		(18)
+#define PDP_VID2SCALECTRL_VID2PAN_EN_LENGTH		(1)
+#define PDP_VID2SCALECTRL_VID2PAN_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VORDER
+*/
+#define PDP_VID2SCALECTRL_VID2VORDER_MASK		(0x00030000)
+#define PDP_VID2SCALECTRL_VID2VORDER_LSBMASK		(0x00000003)
+#define PDP_VID2SCALECTRL_VID2VORDER_SHIFT		(16)
+#define PDP_VID2SCALECTRL_VID2VORDER_LENGTH		(2)
+#define PDP_VID2SCALECTRL_VID2VORDER_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VPITCH
+*/
+#define PDP_VID2SCALECTRL_VID2VPITCH_MASK		(0x0000FFFF)
+#define PDP_VID2SCALECTRL_VID2VPITCH_LSBMASK		(0x0000FFFF)
+#define PDP_VID2SCALECTRL_VID2VPITCH_SHIFT		(0)
+#define PDP_VID2SCALECTRL_VID2VPITCH_LENGTH		(16)
+#define PDP_VID2SCALECTRL_VID2VPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VSINIT_OFFSET		(0x0504)
+
+/* PDP, VID2VSINIT, VID2VINITIAL1
+*/
+#define PDP_VID2VSINIT_VID2VINITIAL1_MASK		(0xFFFF0000)
+#define PDP_VID2VSINIT_VID2VINITIAL1_LSBMASK		(0x0000FFFF)
+#define PDP_VID2VSINIT_VID2VINITIAL1_SHIFT		(16)
+#define PDP_VID2VSINIT_VID2VINITIAL1_LENGTH		(16)
+#define PDP_VID2VSINIT_VID2VINITIAL1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2VSINIT, VID2VINITIAL0
+*/
+#define PDP_VID2VSINIT_VID2VINITIAL0_MASK		(0x0000FFFF)
+#define PDP_VID2VSINIT_VID2VINITIAL0_LSBMASK		(0x0000FFFF)
+#define PDP_VID2VSINIT_VID2VINITIAL0_SHIFT		(0)
+#define PDP_VID2VSINIT_VID2VINITIAL0_LENGTH		(16)
+#define PDP_VID2VSINIT_VID2VINITIAL0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VCOEFF0_OFFSET		(0x0508)
+
+/* PDP, VID2VCOEFF0, VID2VCOEFF0
+*/
+#define PDP_VID2VCOEFF0_VID2VCOEFF0_MASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF0_VID2VCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF0_VID2VCOEFF0_SHIFT		(0)
+#define PDP_VID2VCOEFF0_VID2VCOEFF0_LENGTH		(32)
+#define PDP_VID2VCOEFF0_VID2VCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VCOEFF1_OFFSET		(0x050C)
+
+/* PDP, VID2VCOEFF1, VID2VCOEFF1
+*/
+#define PDP_VID2VCOEFF1_VID2VCOEFF1_MASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF1_VID2VCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF1_VID2VCOEFF1_SHIFT		(0)
+#define PDP_VID2VCOEFF1_VID2VCOEFF1_LENGTH		(32)
+#define PDP_VID2VCOEFF1_VID2VCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VCOEFF2_OFFSET		(0x0510)
+
+/* PDP, VID2VCOEFF2, VID2VCOEFF2
+*/
+#define PDP_VID2VCOEFF2_VID2VCOEFF2_MASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF2_VID2VCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF2_VID2VCOEFF2_SHIFT		(0)
+#define PDP_VID2VCOEFF2_VID2VCOEFF2_LENGTH		(32)
+#define PDP_VID2VCOEFF2_VID2VCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VCOEFF3_OFFSET		(0x0514)
+
+/* PDP, VID2VCOEFF3, VID2VCOEFF3
+*/
+#define PDP_VID2VCOEFF3_VID2VCOEFF3_MASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF3_VID2VCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF3_VID2VCOEFF3_SHIFT		(0)
+#define PDP_VID2VCOEFF3_VID2VCOEFF3_LENGTH		(32)
+#define PDP_VID2VCOEFF3_VID2VCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VCOEFF4_OFFSET		(0x0518)
+
+/* PDP, VID2VCOEFF4, VID2VCOEFF4
+*/
+#define PDP_VID2VCOEFF4_VID2VCOEFF4_MASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF4_VID2VCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF4_VID2VCOEFF4_SHIFT		(0)
+#define PDP_VID2VCOEFF4_VID2VCOEFF4_LENGTH		(32)
+#define PDP_VID2VCOEFF4_VID2VCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VCOEFF5_OFFSET		(0x051C)
+
+/* PDP, VID2VCOEFF5, VID2VCOEFF5
+*/
+#define PDP_VID2VCOEFF5_VID2VCOEFF5_MASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF5_VID2VCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF5_VID2VCOEFF5_SHIFT		(0)
+#define PDP_VID2VCOEFF5_VID2VCOEFF5_LENGTH		(32)
+#define PDP_VID2VCOEFF5_VID2VCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VCOEFF6_OFFSET		(0x0520)
+
+/* PDP, VID2VCOEFF6, VID2VCOEFF6
+*/
+#define PDP_VID2VCOEFF6_VID2VCOEFF6_MASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF6_VID2VCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF6_VID2VCOEFF6_SHIFT		(0)
+#define PDP_VID2VCOEFF6_VID2VCOEFF6_LENGTH		(32)
+#define PDP_VID2VCOEFF6_VID2VCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VCOEFF7_OFFSET		(0x0524)
+
+/* PDP, VID2VCOEFF7, VID2VCOEFF7
+*/
+#define PDP_VID2VCOEFF7_VID2VCOEFF7_MASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF7_VID2VCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF7_VID2VCOEFF7_SHIFT		(0)
+#define PDP_VID2VCOEFF7_VID2VCOEFF7_LENGTH		(32)
+#define PDP_VID2VCOEFF7_VID2VCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VCOEFF8_OFFSET		(0x0528)
+
+/* PDP, VID2VCOEFF8, VID2VCOEFF8
+*/
+#define PDP_VID2VCOEFF8_VID2VCOEFF8_MASK		(0x000000FF)
+#define PDP_VID2VCOEFF8_VID2VCOEFF8_LSBMASK		(0x000000FF)
+#define PDP_VID2VCOEFF8_VID2VCOEFF8_SHIFT		(0)
+#define PDP_VID2VCOEFF8_VID2VCOEFF8_LENGTH		(8)
+#define PDP_VID2VCOEFF8_VID2VCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HSINIT_OFFSET		(0x052C)
+
+/* PDP, VID2HSINIT, VID2HINITIAL
+*/
+#define PDP_VID2HSINIT_VID2HINITIAL_MASK		(0xFFFF0000)
+#define PDP_VID2HSINIT_VID2HINITIAL_LSBMASK		(0x0000FFFF)
+#define PDP_VID2HSINIT_VID2HINITIAL_SHIFT		(16)
+#define PDP_VID2HSINIT_VID2HINITIAL_LENGTH		(16)
+#define PDP_VID2HSINIT_VID2HINITIAL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2HSINIT, VID2HPITCH
+*/
+#define PDP_VID2HSINIT_VID2HPITCH_MASK		(0x0000FFFF)
+#define PDP_VID2HSINIT_VID2HPITCH_LSBMASK		(0x0000FFFF)
+#define PDP_VID2HSINIT_VID2HPITCH_SHIFT		(0)
+#define PDP_VID2HSINIT_VID2HPITCH_LENGTH		(16)
+#define PDP_VID2HSINIT_VID2HPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF0_OFFSET		(0x0530)
+
+/* PDP, VID2HCOEFF0, VID2HCOEFF0
+*/
+#define PDP_VID2HCOEFF0_VID2HCOEFF0_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF0_VID2HCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF0_VID2HCOEFF0_SHIFT		(0)
+#define PDP_VID2HCOEFF0_VID2HCOEFF0_LENGTH		(32)
+#define PDP_VID2HCOEFF0_VID2HCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF1_OFFSET		(0x0534)
+
+/* PDP, VID2HCOEFF1, VID2HCOEFF1
+*/
+#define PDP_VID2HCOEFF1_VID2HCOEFF1_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF1_VID2HCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF1_VID2HCOEFF1_SHIFT		(0)
+#define PDP_VID2HCOEFF1_VID2HCOEFF1_LENGTH		(32)
+#define PDP_VID2HCOEFF1_VID2HCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF2_OFFSET		(0x0538)
+
+/* PDP, VID2HCOEFF2, VID2HCOEFF2
+*/
+#define PDP_VID2HCOEFF2_VID2HCOEFF2_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF2_VID2HCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF2_VID2HCOEFF2_SHIFT		(0)
+#define PDP_VID2HCOEFF2_VID2HCOEFF2_LENGTH		(32)
+#define PDP_VID2HCOEFF2_VID2HCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF3_OFFSET		(0x053C)
+
+/* PDP, VID2HCOEFF3, VID2HCOEFF3
+*/
+#define PDP_VID2HCOEFF3_VID2HCOEFF3_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF3_VID2HCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF3_VID2HCOEFF3_SHIFT		(0)
+#define PDP_VID2HCOEFF3_VID2HCOEFF3_LENGTH		(32)
+#define PDP_VID2HCOEFF3_VID2HCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF4_OFFSET		(0x0540)
+
+/* PDP, VID2HCOEFF4, VID2HCOEFF4
+*/
+#define PDP_VID2HCOEFF4_VID2HCOEFF4_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF4_VID2HCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF4_VID2HCOEFF4_SHIFT		(0)
+#define PDP_VID2HCOEFF4_VID2HCOEFF4_LENGTH		(32)
+#define PDP_VID2HCOEFF4_VID2HCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF5_OFFSET		(0x0544)
+
+/* PDP, VID2HCOEFF5, VID2HCOEFF5
+*/
+#define PDP_VID2HCOEFF5_VID2HCOEFF5_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF5_VID2HCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF5_VID2HCOEFF5_SHIFT		(0)
+#define PDP_VID2HCOEFF5_VID2HCOEFF5_LENGTH		(32)
+#define PDP_VID2HCOEFF5_VID2HCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF6_OFFSET		(0x0548)
+
+/* PDP, VID2HCOEFF6, VID2HCOEFF6
+*/
+#define PDP_VID2HCOEFF6_VID2HCOEFF6_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF6_VID2HCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF6_VID2HCOEFF6_SHIFT		(0)
+#define PDP_VID2HCOEFF6_VID2HCOEFF6_LENGTH		(32)
+#define PDP_VID2HCOEFF6_VID2HCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF7_OFFSET		(0x054C)
+
+/* PDP, VID2HCOEFF7, VID2HCOEFF7
+*/
+#define PDP_VID2HCOEFF7_VID2HCOEFF7_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF7_VID2HCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF7_VID2HCOEFF7_SHIFT		(0)
+#define PDP_VID2HCOEFF7_VID2HCOEFF7_LENGTH		(32)
+#define PDP_VID2HCOEFF7_VID2HCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF8_OFFSET		(0x0550)
+
+/* PDP, VID2HCOEFF8, VID2HCOEFF8
+*/
+#define PDP_VID2HCOEFF8_VID2HCOEFF8_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF8_VID2HCOEFF8_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF8_VID2HCOEFF8_SHIFT		(0)
+#define PDP_VID2HCOEFF8_VID2HCOEFF8_LENGTH		(32)
+#define PDP_VID2HCOEFF8_VID2HCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF9_OFFSET		(0x0554)
+
+/* PDP, VID2HCOEFF9, VID2HCOEFF9
+*/
+#define PDP_VID2HCOEFF9_VID2HCOEFF9_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF9_VID2HCOEFF9_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF9_VID2HCOEFF9_SHIFT		(0)
+#define PDP_VID2HCOEFF9_VID2HCOEFF9_LENGTH		(32)
+#define PDP_VID2HCOEFF9_VID2HCOEFF9_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF10_OFFSET		(0x0558)
+
+/* PDP, VID2HCOEFF10, VID2HCOEFF10
+*/
+#define PDP_VID2HCOEFF10_VID2HCOEFF10_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF10_VID2HCOEFF10_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF10_VID2HCOEFF10_SHIFT		(0)
+#define PDP_VID2HCOEFF10_VID2HCOEFF10_LENGTH		(32)
+#define PDP_VID2HCOEFF10_VID2HCOEFF10_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF11_OFFSET		(0x055C)
+
+/* PDP, VID2HCOEFF11, VID2HCOEFF11
+*/
+#define PDP_VID2HCOEFF11_VID2HCOEFF11_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF11_VID2HCOEFF11_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF11_VID2HCOEFF11_SHIFT		(0)
+#define PDP_VID2HCOEFF11_VID2HCOEFF11_LENGTH		(32)
+#define PDP_VID2HCOEFF11_VID2HCOEFF11_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF12_OFFSET		(0x0560)
+
+/* PDP, VID2HCOEFF12, VID2HCOEFF12
+*/
+#define PDP_VID2HCOEFF12_VID2HCOEFF12_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF12_VID2HCOEFF12_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF12_VID2HCOEFF12_SHIFT		(0)
+#define PDP_VID2HCOEFF12_VID2HCOEFF12_LENGTH		(32)
+#define PDP_VID2HCOEFF12_VID2HCOEFF12_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF13_OFFSET		(0x0564)
+
+/* PDP, VID2HCOEFF13, VID2HCOEFF13
+*/
+#define PDP_VID2HCOEFF13_VID2HCOEFF13_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF13_VID2HCOEFF13_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF13_VID2HCOEFF13_SHIFT		(0)
+#define PDP_VID2HCOEFF13_VID2HCOEFF13_LENGTH		(32)
+#define PDP_VID2HCOEFF13_VID2HCOEFF13_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF14_OFFSET		(0x0568)
+
+/* PDP, VID2HCOEFF14, VID2HCOEFF14
+*/
+#define PDP_VID2HCOEFF14_VID2HCOEFF14_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF14_VID2HCOEFF14_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF14_VID2HCOEFF14_SHIFT		(0)
+#define PDP_VID2HCOEFF14_VID2HCOEFF14_LENGTH		(32)
+#define PDP_VID2HCOEFF14_VID2HCOEFF14_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF15_OFFSET		(0x056C)
+
+/* PDP, VID2HCOEFF15, VID2HCOEFF15
+*/
+#define PDP_VID2HCOEFF15_VID2HCOEFF15_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF15_VID2HCOEFF15_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF15_VID2HCOEFF15_SHIFT		(0)
+#define PDP_VID2HCOEFF15_VID2HCOEFF15_LENGTH		(32)
+#define PDP_VID2HCOEFF15_VID2HCOEFF15_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF16_OFFSET		(0x0570)
+
+/* PDP, VID2HCOEFF16, VID2HCOEFF16
+*/
+#define PDP_VID2HCOEFF16_VID2HCOEFF16_MASK		(0x000000FF)
+#define PDP_VID2HCOEFF16_VID2HCOEFF16_LSBMASK		(0x000000FF)
+#define PDP_VID2HCOEFF16_VID2HCOEFF16_SHIFT		(0)
+#define PDP_VID2HCOEFF16_VID2HCOEFF16_LENGTH		(8)
+#define PDP_VID2HCOEFF16_VID2HCOEFF16_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2SCALESIZE_OFFSET		(0x0574)
+
+/* PDP, VID2SCALESIZE, VID2SCALEWIDTH
+*/
+#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_MASK		(0x0FFF0000)
+#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_LSBMASK		(0x00000FFF)
+#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_SHIFT		(16)
+#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_LENGTH		(12)
+#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALESIZE, VID2SCALEHEIGHT
+*/
+#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_MASK		(0x00000FFF)
+#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SHIFT		(0)
+#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LENGTH		(12)
+#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3SCALECTRL_OFFSET		(0x0578)
+
+/* PDP, VID3SCALECTRL, VID3HSCALEBP
+*/
+#define PDP_VID3SCALECTRL_VID3HSCALEBP_MASK		(0x80000000)
+#define PDP_VID3SCALECTRL_VID3HSCALEBP_LSBMASK		(0x00000001)
+#define PDP_VID3SCALECTRL_VID3HSCALEBP_SHIFT		(31)
+#define PDP_VID3SCALECTRL_VID3HSCALEBP_LENGTH		(1)
+#define PDP_VID3SCALECTRL_VID3HSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VSCALEBP
+*/
+#define PDP_VID3SCALECTRL_VID3VSCALEBP_MASK		(0x40000000)
+#define PDP_VID3SCALECTRL_VID3VSCALEBP_LSBMASK		(0x00000001)
+#define PDP_VID3SCALECTRL_VID3VSCALEBP_SHIFT		(30)
+#define PDP_VID3SCALECTRL_VID3VSCALEBP_LENGTH		(1)
+#define PDP_VID3SCALECTRL_VID3VSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3HSBEFOREVS
+*/
+#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_MASK		(0x20000000)
+#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_LSBMASK		(0x00000001)
+#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_SHIFT		(29)
+#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_LENGTH		(1)
+#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VSURUNCTRL
+*/
+#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_MASK		(0x08000000)
+#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_LSBMASK		(0x00000001)
+#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_SHIFT		(27)
+#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_LENGTH		(1)
+#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3PAN_EN
+*/
+#define PDP_VID3SCALECTRL_VID3PAN_EN_MASK		(0x00040000)
+#define PDP_VID3SCALECTRL_VID3PAN_EN_LSBMASK		(0x00000001)
+#define PDP_VID3SCALECTRL_VID3PAN_EN_SHIFT		(18)
+#define PDP_VID3SCALECTRL_VID3PAN_EN_LENGTH		(1)
+#define PDP_VID3SCALECTRL_VID3PAN_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VORDER
+*/
+#define PDP_VID3SCALECTRL_VID3VORDER_MASK		(0x00030000)
+#define PDP_VID3SCALECTRL_VID3VORDER_LSBMASK		(0x00000003)
+#define PDP_VID3SCALECTRL_VID3VORDER_SHIFT		(16)
+#define PDP_VID3SCALECTRL_VID3VORDER_LENGTH		(2)
+#define PDP_VID3SCALECTRL_VID3VORDER_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VPITCH
+*/
+#define PDP_VID3SCALECTRL_VID3VPITCH_MASK		(0x0000FFFF)
+#define PDP_VID3SCALECTRL_VID3VPITCH_LSBMASK		(0x0000FFFF)
+#define PDP_VID3SCALECTRL_VID3VPITCH_SHIFT		(0)
+#define PDP_VID3SCALECTRL_VID3VPITCH_LENGTH		(16)
+#define PDP_VID3SCALECTRL_VID3VPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VSINIT_OFFSET		(0x057C)
+
+/* PDP, VID3VSINIT, VID3VINITIAL1
+*/
+#define PDP_VID3VSINIT_VID3VINITIAL1_MASK		(0xFFFF0000)
+#define PDP_VID3VSINIT_VID3VINITIAL1_LSBMASK		(0x0000FFFF)
+#define PDP_VID3VSINIT_VID3VINITIAL1_SHIFT		(16)
+#define PDP_VID3VSINIT_VID3VINITIAL1_LENGTH		(16)
+#define PDP_VID3VSINIT_VID3VINITIAL1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3VSINIT, VID3VINITIAL0
+*/
+#define PDP_VID3VSINIT_VID3VINITIAL0_MASK		(0x0000FFFF)
+#define PDP_VID3VSINIT_VID3VINITIAL0_LSBMASK		(0x0000FFFF)
+#define PDP_VID3VSINIT_VID3VINITIAL0_SHIFT		(0)
+#define PDP_VID3VSINIT_VID3VINITIAL0_LENGTH		(16)
+#define PDP_VID3VSINIT_VID3VINITIAL0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VCOEFF0_OFFSET		(0x0580)
+
+/* PDP, VID3VCOEFF0, VID3VCOEFF0
+*/
+#define PDP_VID3VCOEFF0_VID3VCOEFF0_MASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF0_VID3VCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF0_VID3VCOEFF0_SHIFT		(0)
+#define PDP_VID3VCOEFF0_VID3VCOEFF0_LENGTH		(32)
+#define PDP_VID3VCOEFF0_VID3VCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VCOEFF1_OFFSET		(0x0584)
+
+/* PDP, VID3VCOEFF1, VID3VCOEFF1
+*/
+#define PDP_VID3VCOEFF1_VID3VCOEFF1_MASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF1_VID3VCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF1_VID3VCOEFF1_SHIFT		(0)
+#define PDP_VID3VCOEFF1_VID3VCOEFF1_LENGTH		(32)
+#define PDP_VID3VCOEFF1_VID3VCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VCOEFF2_OFFSET		(0x0588)
+
+/* PDP, VID3VCOEFF2, VID3VCOEFF2
+*/
+#define PDP_VID3VCOEFF2_VID3VCOEFF2_MASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF2_VID3VCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF2_VID3VCOEFF2_SHIFT		(0)
+#define PDP_VID3VCOEFF2_VID3VCOEFF2_LENGTH		(32)
+#define PDP_VID3VCOEFF2_VID3VCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VCOEFF3_OFFSET		(0x058C)
+
+/* PDP, VID3VCOEFF3, VID3VCOEFF3
+*/
+#define PDP_VID3VCOEFF3_VID3VCOEFF3_MASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF3_VID3VCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF3_VID3VCOEFF3_SHIFT		(0)
+#define PDP_VID3VCOEFF3_VID3VCOEFF3_LENGTH		(32)
+#define PDP_VID3VCOEFF3_VID3VCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VCOEFF4_OFFSET		(0x0590)
+
+/* PDP, VID3VCOEFF4, VID3VCOEFF4
+*/
+#define PDP_VID3VCOEFF4_VID3VCOEFF4_MASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF4_VID3VCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF4_VID3VCOEFF4_SHIFT		(0)
+#define PDP_VID3VCOEFF4_VID3VCOEFF4_LENGTH		(32)
+#define PDP_VID3VCOEFF4_VID3VCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VCOEFF5_OFFSET		(0x0594)
+
+/* PDP, VID3VCOEFF5, VID3VCOEFF5
+*/
+#define PDP_VID3VCOEFF5_VID3VCOEFF5_MASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF5_VID3VCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF5_VID3VCOEFF5_SHIFT		(0)
+#define PDP_VID3VCOEFF5_VID3VCOEFF5_LENGTH		(32)
+#define PDP_VID3VCOEFF5_VID3VCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VCOEFF6_OFFSET		(0x0598)
+
+/* PDP, VID3VCOEFF6, VID3VCOEFF6
+*/
+#define PDP_VID3VCOEFF6_VID3VCOEFF6_MASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF6_VID3VCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF6_VID3VCOEFF6_SHIFT		(0)
+#define PDP_VID3VCOEFF6_VID3VCOEFF6_LENGTH		(32)
+#define PDP_VID3VCOEFF6_VID3VCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VCOEFF7_OFFSET		(0x059C)
+
+/* PDP, VID3VCOEFF7, VID3VCOEFF7
+*/
+#define PDP_VID3VCOEFF7_VID3VCOEFF7_MASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF7_VID3VCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF7_VID3VCOEFF7_SHIFT		(0)
+#define PDP_VID3VCOEFF7_VID3VCOEFF7_LENGTH		(32)
+#define PDP_VID3VCOEFF7_VID3VCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VCOEFF8_OFFSET		(0x05A0)
+
+/* PDP, VID3VCOEFF8, VID3VCOEFF8
+*/
+#define PDP_VID3VCOEFF8_VID3VCOEFF8_MASK		(0x000000FF)
+#define PDP_VID3VCOEFF8_VID3VCOEFF8_LSBMASK		(0x000000FF)
+#define PDP_VID3VCOEFF8_VID3VCOEFF8_SHIFT		(0)
+#define PDP_VID3VCOEFF8_VID3VCOEFF8_LENGTH		(8)
+#define PDP_VID3VCOEFF8_VID3VCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HSINIT_OFFSET		(0x05A4)
+
+/* PDP, VID3HSINIT, VID3HINITIAL
+*/
+#define PDP_VID3HSINIT_VID3HINITIAL_MASK		(0xFFFF0000)
+#define PDP_VID3HSINIT_VID3HINITIAL_LSBMASK		(0x0000FFFF)
+#define PDP_VID3HSINIT_VID3HINITIAL_SHIFT		(16)
+#define PDP_VID3HSINIT_VID3HINITIAL_LENGTH		(16)
+#define PDP_VID3HSINIT_VID3HINITIAL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3HSINIT, VID3HPITCH
+*/
+#define PDP_VID3HSINIT_VID3HPITCH_MASK		(0x0000FFFF)
+#define PDP_VID3HSINIT_VID3HPITCH_LSBMASK		(0x0000FFFF)
+#define PDP_VID3HSINIT_VID3HPITCH_SHIFT		(0)
+#define PDP_VID3HSINIT_VID3HPITCH_LENGTH		(16)
+#define PDP_VID3HSINIT_VID3HPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF0_OFFSET		(0x05A8)
+
+/* PDP, VID3HCOEFF0, VID3HCOEFF0
+*/
+#define PDP_VID3HCOEFF0_VID3HCOEFF0_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF0_VID3HCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF0_VID3HCOEFF0_SHIFT		(0)
+#define PDP_VID3HCOEFF0_VID3HCOEFF0_LENGTH		(32)
+#define PDP_VID3HCOEFF0_VID3HCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF1_OFFSET		(0x05AC)
+
+/* PDP, VID3HCOEFF1, VID3HCOEFF1
+*/
+#define PDP_VID3HCOEFF1_VID3HCOEFF1_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF1_VID3HCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF1_VID3HCOEFF1_SHIFT		(0)
+#define PDP_VID3HCOEFF1_VID3HCOEFF1_LENGTH		(32)
+#define PDP_VID3HCOEFF1_VID3HCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF2_OFFSET		(0x05B0)
+
+/* PDP, VID3HCOEFF2, VID3HCOEFF2
+*/
+#define PDP_VID3HCOEFF2_VID3HCOEFF2_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF2_VID3HCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF2_VID3HCOEFF2_SHIFT		(0)
+#define PDP_VID3HCOEFF2_VID3HCOEFF2_LENGTH		(32)
+#define PDP_VID3HCOEFF2_VID3HCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF3_OFFSET		(0x05B4)
+
+/* PDP, VID3HCOEFF3, VID3HCOEFF3
+*/
+#define PDP_VID3HCOEFF3_VID3HCOEFF3_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF3_VID3HCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF3_VID3HCOEFF3_SHIFT		(0)
+#define PDP_VID3HCOEFF3_VID3HCOEFF3_LENGTH		(32)
+#define PDP_VID3HCOEFF3_VID3HCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF4_OFFSET		(0x05B8)
+
+/* PDP, VID3HCOEFF4, VID3HCOEFF4
+*/
+#define PDP_VID3HCOEFF4_VID3HCOEFF4_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF4_VID3HCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF4_VID3HCOEFF4_SHIFT		(0)
+#define PDP_VID3HCOEFF4_VID3HCOEFF4_LENGTH		(32)
+#define PDP_VID3HCOEFF4_VID3HCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF5_OFFSET		(0x05BC)
+
+/* PDP, VID3HCOEFF5, VID3HCOEFF5
+*/
+#define PDP_VID3HCOEFF5_VID3HCOEFF5_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF5_VID3HCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF5_VID3HCOEFF5_SHIFT		(0)
+#define PDP_VID3HCOEFF5_VID3HCOEFF5_LENGTH		(32)
+#define PDP_VID3HCOEFF5_VID3HCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF6_OFFSET		(0x05C0)
+
+/* PDP, VID3HCOEFF6, VID3HCOEFF6
+*/
+#define PDP_VID3HCOEFF6_VID3HCOEFF6_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF6_VID3HCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF6_VID3HCOEFF6_SHIFT		(0)
+#define PDP_VID3HCOEFF6_VID3HCOEFF6_LENGTH		(32)
+#define PDP_VID3HCOEFF6_VID3HCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF7_OFFSET		(0x05C4)
+
+/* PDP, VID3HCOEFF7, VID3HCOEFF7
+*/
+#define PDP_VID3HCOEFF7_VID3HCOEFF7_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF7_VID3HCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF7_VID3HCOEFF7_SHIFT		(0)
+#define PDP_VID3HCOEFF7_VID3HCOEFF7_LENGTH		(32)
+#define PDP_VID3HCOEFF7_VID3HCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF8_OFFSET		(0x05C8)
+
+/* PDP, VID3HCOEFF8, VID3HCOEFF8
+*/
+#define PDP_VID3HCOEFF8_VID3HCOEFF8_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF8_VID3HCOEFF8_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF8_VID3HCOEFF8_SHIFT		(0)
+#define PDP_VID3HCOEFF8_VID3HCOEFF8_LENGTH		(32)
+#define PDP_VID3HCOEFF8_VID3HCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF9_OFFSET		(0x05CC)
+
+/* PDP, VID3HCOEFF9, VID3HCOEFF9
+*/
+#define PDP_VID3HCOEFF9_VID3HCOEFF9_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF9_VID3HCOEFF9_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF9_VID3HCOEFF9_SHIFT		(0)
+#define PDP_VID3HCOEFF9_VID3HCOEFF9_LENGTH		(32)
+#define PDP_VID3HCOEFF9_VID3HCOEFF9_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF10_OFFSET		(0x05D0)
+
+/* PDP, VID3HCOEFF10, VID3HCOEFF10
+*/
+#define PDP_VID3HCOEFF10_VID3HCOEFF10_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF10_VID3HCOEFF10_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF10_VID3HCOEFF10_SHIFT		(0)
+#define PDP_VID3HCOEFF10_VID3HCOEFF10_LENGTH		(32)
+#define PDP_VID3HCOEFF10_VID3HCOEFF10_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF11_OFFSET		(0x05D4)
+
+/* PDP, VID3HCOEFF11, VID3HCOEFF11
+*/
+#define PDP_VID3HCOEFF11_VID3HCOEFF11_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF11_VID3HCOEFF11_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF11_VID3HCOEFF11_SHIFT		(0)
+#define PDP_VID3HCOEFF11_VID3HCOEFF11_LENGTH		(32)
+#define PDP_VID3HCOEFF11_VID3HCOEFF11_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF12_OFFSET		(0x05D8)
+
+/* PDP, VID3HCOEFF12, VID3HCOEFF12
+*/
+#define PDP_VID3HCOEFF12_VID3HCOEFF12_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF12_VID3HCOEFF12_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF12_VID3HCOEFF12_SHIFT		(0)
+#define PDP_VID3HCOEFF12_VID3HCOEFF12_LENGTH		(32)
+#define PDP_VID3HCOEFF12_VID3HCOEFF12_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF13_OFFSET		(0x05DC)
+
+/* PDP, VID3HCOEFF13, VID3HCOEFF13
+*/
+#define PDP_VID3HCOEFF13_VID3HCOEFF13_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF13_VID3HCOEFF13_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF13_VID3HCOEFF13_SHIFT		(0)
+#define PDP_VID3HCOEFF13_VID3HCOEFF13_LENGTH		(32)
+#define PDP_VID3HCOEFF13_VID3HCOEFF13_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF14_OFFSET		(0x05E0)
+
+/* PDP, VID3HCOEFF14, VID3HCOEFF14
+*/
+#define PDP_VID3HCOEFF14_VID3HCOEFF14_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF14_VID3HCOEFF14_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF14_VID3HCOEFF14_SHIFT		(0)
+#define PDP_VID3HCOEFF14_VID3HCOEFF14_LENGTH		(32)
+#define PDP_VID3HCOEFF14_VID3HCOEFF14_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF15_OFFSET		(0x05E4)
+
+/* PDP, VID3HCOEFF15, VID3HCOEFF15
+*/
+#define PDP_VID3HCOEFF15_VID3HCOEFF15_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF15_VID3HCOEFF15_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF15_VID3HCOEFF15_SHIFT		(0)
+#define PDP_VID3HCOEFF15_VID3HCOEFF15_LENGTH		(32)
+#define PDP_VID3HCOEFF15_VID3HCOEFF15_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF16_OFFSET		(0x05E8)
+
+/* PDP, VID3HCOEFF16, VID3HCOEFF16
+*/
+#define PDP_VID3HCOEFF16_VID3HCOEFF16_MASK		(0x000000FF)
+#define PDP_VID3HCOEFF16_VID3HCOEFF16_LSBMASK		(0x000000FF)
+#define PDP_VID3HCOEFF16_VID3HCOEFF16_SHIFT		(0)
+#define PDP_VID3HCOEFF16_VID3HCOEFF16_LENGTH		(8)
+#define PDP_VID3HCOEFF16_VID3HCOEFF16_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3SCALESIZE_OFFSET		(0x05EC)
+
+/* PDP, VID3SCALESIZE, VID3SCALEWIDTH
+*/
+#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_MASK		(0x0FFF0000)
+#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_LSBMASK		(0x00000FFF)
+#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_SHIFT		(16)
+#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_LENGTH		(12)
+#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALESIZE, VID3SCALEHEIGHT
+*/
+#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_MASK		(0x00000FFF)
+#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SHIFT		(0)
+#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LENGTH		(12)
+#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4SCALECTRL_OFFSET		(0x05F0)
+
+/* PDP, VID4SCALECTRL, VID4HSCALEBP
+*/
+#define PDP_VID4SCALECTRL_VID4HSCALEBP_MASK		(0x80000000)
+#define PDP_VID4SCALECTRL_VID4HSCALEBP_LSBMASK		(0x00000001)
+#define PDP_VID4SCALECTRL_VID4HSCALEBP_SHIFT		(31)
+#define PDP_VID4SCALECTRL_VID4HSCALEBP_LENGTH		(1)
+#define PDP_VID4SCALECTRL_VID4HSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VSCALEBP
+*/
+#define PDP_VID4SCALECTRL_VID4VSCALEBP_MASK		(0x40000000)
+#define PDP_VID4SCALECTRL_VID4VSCALEBP_LSBMASK		(0x00000001)
+#define PDP_VID4SCALECTRL_VID4VSCALEBP_SHIFT		(30)
+#define PDP_VID4SCALECTRL_VID4VSCALEBP_LENGTH		(1)
+#define PDP_VID4SCALECTRL_VID4VSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4HSBEFOREVS
+*/
+#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_MASK		(0x20000000)
+#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_LSBMASK		(0x00000001)
+#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_SHIFT		(29)
+#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_LENGTH		(1)
+#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VSURUNCTRL
+*/
+#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_MASK		(0x08000000)
+#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_LSBMASK		(0x00000001)
+#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_SHIFT		(27)
+#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_LENGTH		(1)
+#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4PAN_EN
+*/
+#define PDP_VID4SCALECTRL_VID4PAN_EN_MASK		(0x00040000)
+#define PDP_VID4SCALECTRL_VID4PAN_EN_LSBMASK		(0x00000001)
+#define PDP_VID4SCALECTRL_VID4PAN_EN_SHIFT		(18)
+#define PDP_VID4SCALECTRL_VID4PAN_EN_LENGTH		(1)
+#define PDP_VID4SCALECTRL_VID4PAN_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VORDER
+*/
+#define PDP_VID4SCALECTRL_VID4VORDER_MASK		(0x00030000)
+#define PDP_VID4SCALECTRL_VID4VORDER_LSBMASK		(0x00000003)
+#define PDP_VID4SCALECTRL_VID4VORDER_SHIFT		(16)
+#define PDP_VID4SCALECTRL_VID4VORDER_LENGTH		(2)
+#define PDP_VID4SCALECTRL_VID4VORDER_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VPITCH
+*/
+#define PDP_VID4SCALECTRL_VID4VPITCH_MASK		(0x0000FFFF)
+#define PDP_VID4SCALECTRL_VID4VPITCH_LSBMASK		(0x0000FFFF)
+#define PDP_VID4SCALECTRL_VID4VPITCH_SHIFT		(0)
+#define PDP_VID4SCALECTRL_VID4VPITCH_LENGTH		(16)
+#define PDP_VID4SCALECTRL_VID4VPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VSINIT_OFFSET		(0x05F4)
+
+/* PDP, VID4VSINIT, VID4VINITIAL1
+*/
+#define PDP_VID4VSINIT_VID4VINITIAL1_MASK		(0xFFFF0000)
+#define PDP_VID4VSINIT_VID4VINITIAL1_LSBMASK		(0x0000FFFF)
+#define PDP_VID4VSINIT_VID4VINITIAL1_SHIFT		(16)
+#define PDP_VID4VSINIT_VID4VINITIAL1_LENGTH		(16)
+#define PDP_VID4VSINIT_VID4VINITIAL1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4VSINIT, VID4VINITIAL0
+*/
+#define PDP_VID4VSINIT_VID4VINITIAL0_MASK		(0x0000FFFF)
+#define PDP_VID4VSINIT_VID4VINITIAL0_LSBMASK		(0x0000FFFF)
+#define PDP_VID4VSINIT_VID4VINITIAL0_SHIFT		(0)
+#define PDP_VID4VSINIT_VID4VINITIAL0_LENGTH		(16)
+#define PDP_VID4VSINIT_VID4VINITIAL0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VCOEFF0_OFFSET		(0x05F8)
+
+/* PDP, VID4VCOEFF0, VID4VCOEFF0
+*/
+#define PDP_VID4VCOEFF0_VID4VCOEFF0_MASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF0_VID4VCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF0_VID4VCOEFF0_SHIFT		(0)
+#define PDP_VID4VCOEFF0_VID4VCOEFF0_LENGTH		(32)
+#define PDP_VID4VCOEFF0_VID4VCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VCOEFF1_OFFSET		(0x05FC)
+
+/* PDP, VID4VCOEFF1, VID4VCOEFF1
+*/
+#define PDP_VID4VCOEFF1_VID4VCOEFF1_MASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF1_VID4VCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF1_VID4VCOEFF1_SHIFT		(0)
+#define PDP_VID4VCOEFF1_VID4VCOEFF1_LENGTH		(32)
+#define PDP_VID4VCOEFF1_VID4VCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VCOEFF2_OFFSET		(0x0600)
+
+/* PDP, VID4VCOEFF2, VID4VCOEFF2
+*/
+#define PDP_VID4VCOEFF2_VID4VCOEFF2_MASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF2_VID4VCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF2_VID4VCOEFF2_SHIFT		(0)
+#define PDP_VID4VCOEFF2_VID4VCOEFF2_LENGTH		(32)
+#define PDP_VID4VCOEFF2_VID4VCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VCOEFF3_OFFSET		(0x0604)
+
+/* PDP, VID4VCOEFF3, VID4VCOEFF3
+*/
+#define PDP_VID4VCOEFF3_VID4VCOEFF3_MASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF3_VID4VCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF3_VID4VCOEFF3_SHIFT		(0)
+#define PDP_VID4VCOEFF3_VID4VCOEFF3_LENGTH		(32)
+#define PDP_VID4VCOEFF3_VID4VCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VCOEFF4_OFFSET		(0x0608)
+
+/* PDP, VID4VCOEFF4, VID4VCOEFF4
+*/
+#define PDP_VID4VCOEFF4_VID4VCOEFF4_MASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF4_VID4VCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF4_VID4VCOEFF4_SHIFT		(0)
+#define PDP_VID4VCOEFF4_VID4VCOEFF4_LENGTH		(32)
+#define PDP_VID4VCOEFF4_VID4VCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VCOEFF5_OFFSET		(0x060C)
+
+/* PDP, VID4VCOEFF5, VID4VCOEFF5
+*/
+#define PDP_VID4VCOEFF5_VID4VCOEFF5_MASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF5_VID4VCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF5_VID4VCOEFF5_SHIFT		(0)
+#define PDP_VID4VCOEFF5_VID4VCOEFF5_LENGTH		(32)
+#define PDP_VID4VCOEFF5_VID4VCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VCOEFF6_OFFSET		(0x0610)
+
+/* PDP, VID4VCOEFF6, VID4VCOEFF6
+*/
+#define PDP_VID4VCOEFF6_VID4VCOEFF6_MASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF6_VID4VCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF6_VID4VCOEFF6_SHIFT		(0)
+#define PDP_VID4VCOEFF6_VID4VCOEFF6_LENGTH		(32)
+#define PDP_VID4VCOEFF6_VID4VCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VCOEFF7_OFFSET		(0x0614)
+
+/* PDP, VID4VCOEFF7, VID4VCOEFF7
+*/
+#define PDP_VID4VCOEFF7_VID4VCOEFF7_MASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF7_VID4VCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF7_VID4VCOEFF7_SHIFT		(0)
+#define PDP_VID4VCOEFF7_VID4VCOEFF7_LENGTH		(32)
+#define PDP_VID4VCOEFF7_VID4VCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VCOEFF8_OFFSET		(0x0618)
+
+/* PDP, VID4VCOEFF8, VID4VCOEFF8
+*/
+#define PDP_VID4VCOEFF8_VID4VCOEFF8_MASK		(0x000000FF)
+#define PDP_VID4VCOEFF8_VID4VCOEFF8_LSBMASK		(0x000000FF)
+#define PDP_VID4VCOEFF8_VID4VCOEFF8_SHIFT		(0)
+#define PDP_VID4VCOEFF8_VID4VCOEFF8_LENGTH		(8)
+#define PDP_VID4VCOEFF8_VID4VCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HSINIT_OFFSET		(0x061C)
+
+/* PDP, VID4HSINIT, VID4HINITIAL
+*/
+#define PDP_VID4HSINIT_VID4HINITIAL_MASK		(0xFFFF0000)
+#define PDP_VID4HSINIT_VID4HINITIAL_LSBMASK		(0x0000FFFF)
+#define PDP_VID4HSINIT_VID4HINITIAL_SHIFT		(16)
+#define PDP_VID4HSINIT_VID4HINITIAL_LENGTH		(16)
+#define PDP_VID4HSINIT_VID4HINITIAL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4HSINIT, VID4HPITCH
+*/
+#define PDP_VID4HSINIT_VID4HPITCH_MASK		(0x0000FFFF)
+#define PDP_VID4HSINIT_VID4HPITCH_LSBMASK		(0x0000FFFF)
+#define PDP_VID4HSINIT_VID4HPITCH_SHIFT		(0)
+#define PDP_VID4HSINIT_VID4HPITCH_LENGTH		(16)
+#define PDP_VID4HSINIT_VID4HPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF0_OFFSET		(0x0620)
+
+/* PDP, VID4HCOEFF0, VID4HCOEFF0
+*/
+#define PDP_VID4HCOEFF0_VID4HCOEFF0_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF0_VID4HCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF0_VID4HCOEFF0_SHIFT		(0)
+#define PDP_VID4HCOEFF0_VID4HCOEFF0_LENGTH		(32)
+#define PDP_VID4HCOEFF0_VID4HCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF1_OFFSET		(0x0624)
+
+/* PDP, VID4HCOEFF1, VID4HCOEFF1
+*/
+#define PDP_VID4HCOEFF1_VID4HCOEFF1_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF1_VID4HCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF1_VID4HCOEFF1_SHIFT		(0)
+#define PDP_VID4HCOEFF1_VID4HCOEFF1_LENGTH		(32)
+#define PDP_VID4HCOEFF1_VID4HCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF2_OFFSET		(0x0628)
+
+/* PDP, VID4HCOEFF2, VID4HCOEFF2
+*/
+#define PDP_VID4HCOEFF2_VID4HCOEFF2_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF2_VID4HCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF2_VID4HCOEFF2_SHIFT		(0)
+#define PDP_VID4HCOEFF2_VID4HCOEFF2_LENGTH		(32)
+#define PDP_VID4HCOEFF2_VID4HCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF3_OFFSET		(0x062C)
+
+/* PDP, VID4HCOEFF3, VID4HCOEFF3
+*/
+#define PDP_VID4HCOEFF3_VID4HCOEFF3_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF3_VID4HCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF3_VID4HCOEFF3_SHIFT		(0)
+#define PDP_VID4HCOEFF3_VID4HCOEFF3_LENGTH		(32)
+#define PDP_VID4HCOEFF3_VID4HCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF4_OFFSET		(0x0630)
+
+/* PDP, VID4HCOEFF4, VID4HCOEFF4
+*/
+#define PDP_VID4HCOEFF4_VID4HCOEFF4_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF4_VID4HCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF4_VID4HCOEFF4_SHIFT		(0)
+#define PDP_VID4HCOEFF4_VID4HCOEFF4_LENGTH		(32)
+#define PDP_VID4HCOEFF4_VID4HCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF5_OFFSET		(0x0634)
+
+/* PDP, VID4HCOEFF5, VID4HCOEFF5
+*/
+#define PDP_VID4HCOEFF5_VID4HCOEFF5_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF5_VID4HCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF5_VID4HCOEFF5_SHIFT		(0)
+#define PDP_VID4HCOEFF5_VID4HCOEFF5_LENGTH		(32)
+#define PDP_VID4HCOEFF5_VID4HCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF6_OFFSET		(0x0638)
+
+/* PDP, VID4HCOEFF6, VID4HCOEFF6
+*/
+#define PDP_VID4HCOEFF6_VID4HCOEFF6_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF6_VID4HCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF6_VID4HCOEFF6_SHIFT		(0)
+#define PDP_VID4HCOEFF6_VID4HCOEFF6_LENGTH		(32)
+#define PDP_VID4HCOEFF6_VID4HCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF7_OFFSET		(0x063C)
+
+/* PDP, VID4HCOEFF7, VID4HCOEFF7
+*/
+#define PDP_VID4HCOEFF7_VID4HCOEFF7_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF7_VID4HCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF7_VID4HCOEFF7_SHIFT		(0)
+#define PDP_VID4HCOEFF7_VID4HCOEFF7_LENGTH		(32)
+#define PDP_VID4HCOEFF7_VID4HCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF8_OFFSET		(0x0640)
+
+/* PDP, VID4HCOEFF8, VID4HCOEFF8
+*/
+#define PDP_VID4HCOEFF8_VID4HCOEFF8_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF8_VID4HCOEFF8_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF8_VID4HCOEFF8_SHIFT		(0)
+#define PDP_VID4HCOEFF8_VID4HCOEFF8_LENGTH		(32)
+#define PDP_VID4HCOEFF8_VID4HCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF9_OFFSET		(0x0644)
+
+/* PDP, VID4HCOEFF9, VID4HCOEFF9
+*/
+#define PDP_VID4HCOEFF9_VID4HCOEFF9_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF9_VID4HCOEFF9_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF9_VID4HCOEFF9_SHIFT		(0)
+#define PDP_VID4HCOEFF9_VID4HCOEFF9_LENGTH		(32)
+#define PDP_VID4HCOEFF9_VID4HCOEFF9_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF10_OFFSET		(0x0648)
+
+/* PDP, VID4HCOEFF10, VID4HCOEFF10
+*/
+#define PDP_VID4HCOEFF10_VID4HCOEFF10_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF10_VID4HCOEFF10_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF10_VID4HCOEFF10_SHIFT		(0)
+#define PDP_VID4HCOEFF10_VID4HCOEFF10_LENGTH		(32)
+#define PDP_VID4HCOEFF10_VID4HCOEFF10_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF11_OFFSET		(0x064C)
+
+/* PDP, VID4HCOEFF11, VID4HCOEFF11
+*/
+#define PDP_VID4HCOEFF11_VID4HCOEFF11_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF11_VID4HCOEFF11_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF11_VID4HCOEFF11_SHIFT		(0)
+#define PDP_VID4HCOEFF11_VID4HCOEFF11_LENGTH		(32)
+#define PDP_VID4HCOEFF11_VID4HCOEFF11_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF12_OFFSET		(0x0650)
+
+/* PDP, VID4HCOEFF12, VID4HCOEFF12
+*/
+#define PDP_VID4HCOEFF12_VID4HCOEFF12_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF12_VID4HCOEFF12_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF12_VID4HCOEFF12_SHIFT		(0)
+#define PDP_VID4HCOEFF12_VID4HCOEFF12_LENGTH		(32)
+#define PDP_VID4HCOEFF12_VID4HCOEFF12_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF13_OFFSET		(0x0654)
+
+/* PDP, VID4HCOEFF13, VID4HCOEFF13
+*/
+#define PDP_VID4HCOEFF13_VID4HCOEFF13_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF13_VID4HCOEFF13_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF13_VID4HCOEFF13_SHIFT		(0)
+#define PDP_VID4HCOEFF13_VID4HCOEFF13_LENGTH		(32)
+#define PDP_VID4HCOEFF13_VID4HCOEFF13_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF14_OFFSET		(0x0658)
+
+/* PDP, VID4HCOEFF14, VID4HCOEFF14
+*/
+#define PDP_VID4HCOEFF14_VID4HCOEFF14_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF14_VID4HCOEFF14_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF14_VID4HCOEFF14_SHIFT		(0)
+#define PDP_VID4HCOEFF14_VID4HCOEFF14_LENGTH		(32)
+#define PDP_VID4HCOEFF14_VID4HCOEFF14_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF15_OFFSET		(0x065C)
+
+/* PDP, VID4HCOEFF15, VID4HCOEFF15
+*/
+#define PDP_VID4HCOEFF15_VID4HCOEFF15_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF15_VID4HCOEFF15_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF15_VID4HCOEFF15_SHIFT		(0)
+#define PDP_VID4HCOEFF15_VID4HCOEFF15_LENGTH		(32)
+#define PDP_VID4HCOEFF15_VID4HCOEFF15_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF16_OFFSET		(0x0660)
+
+/* PDP, VID4HCOEFF16, VID4HCOEFF16
+*/
+#define PDP_VID4HCOEFF16_VID4HCOEFF16_MASK		(0x000000FF)
+#define PDP_VID4HCOEFF16_VID4HCOEFF16_LSBMASK		(0x000000FF)
+#define PDP_VID4HCOEFF16_VID4HCOEFF16_SHIFT		(0)
+#define PDP_VID4HCOEFF16_VID4HCOEFF16_LENGTH		(8)
+#define PDP_VID4HCOEFF16_VID4HCOEFF16_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4SCALESIZE_OFFSET		(0x0664)
+
+/* PDP, VID4SCALESIZE, VID4SCALEWIDTH
+*/
+#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_MASK		(0x0FFF0000)
+#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_LSBMASK		(0x00000FFF)
+#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_SHIFT		(16)
+#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_LENGTH		(12)
+#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALESIZE, VID4SCALEHEIGHT
+*/
+#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_MASK		(0x00000FFF)
+#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SHIFT		(0)
+#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LENGTH		(12)
+#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PORTER_BLND0_OFFSET		(0x0668)
+
+/* PDP, PORTER_BLND0, BLND0BLENDTYPE
+*/
+#define PDP_PORTER_BLND0_BLND0BLENDTYPE_MASK		(0x00000010)
+#define PDP_PORTER_BLND0_BLND0BLENDTYPE_LSBMASK		(0x00000001)
+#define PDP_PORTER_BLND0_BLND0BLENDTYPE_SHIFT		(4)
+#define PDP_PORTER_BLND0_BLND0BLENDTYPE_LENGTH		(1)
+#define PDP_PORTER_BLND0_BLND0BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND0, BLND0PORTERMODE
+*/
+#define PDP_PORTER_BLND0_BLND0PORTERMODE_MASK		(0x0000000F)
+#define PDP_PORTER_BLND0_BLND0PORTERMODE_LSBMASK		(0x0000000F)
+#define PDP_PORTER_BLND0_BLND0PORTERMODE_SHIFT		(0)
+#define PDP_PORTER_BLND0_BLND0PORTERMODE_LENGTH		(4)
+#define PDP_PORTER_BLND0_BLND0PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PORTER_BLND1_OFFSET		(0x066C)
+
+/* PDP, PORTER_BLND1, BLND1BLENDTYPE
+*/
+#define PDP_PORTER_BLND1_BLND1BLENDTYPE_MASK		(0x00000010)
+#define PDP_PORTER_BLND1_BLND1BLENDTYPE_LSBMASK		(0x00000001)
+#define PDP_PORTER_BLND1_BLND1BLENDTYPE_SHIFT		(4)
+#define PDP_PORTER_BLND1_BLND1BLENDTYPE_LENGTH		(1)
+#define PDP_PORTER_BLND1_BLND1BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND1, BLND1PORTERMODE
+*/
+#define PDP_PORTER_BLND1_BLND1PORTERMODE_MASK		(0x0000000F)
+#define PDP_PORTER_BLND1_BLND1PORTERMODE_LSBMASK		(0x0000000F)
+#define PDP_PORTER_BLND1_BLND1PORTERMODE_SHIFT		(0)
+#define PDP_PORTER_BLND1_BLND1PORTERMODE_LENGTH		(4)
+#define PDP_PORTER_BLND1_BLND1PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PORTER_BLND2_OFFSET		(0x0670)
+
+/* PDP, PORTER_BLND2, BLND2BLENDTYPE
+*/
+#define PDP_PORTER_BLND2_BLND2BLENDTYPE_MASK		(0x00000010)
+#define PDP_PORTER_BLND2_BLND2BLENDTYPE_LSBMASK		(0x00000001)
+#define PDP_PORTER_BLND2_BLND2BLENDTYPE_SHIFT		(4)
+#define PDP_PORTER_BLND2_BLND2BLENDTYPE_LENGTH		(1)
+#define PDP_PORTER_BLND2_BLND2BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND2, BLND2PORTERMODE
+*/
+#define PDP_PORTER_BLND2_BLND2PORTERMODE_MASK		(0x0000000F)
+#define PDP_PORTER_BLND2_BLND2PORTERMODE_LSBMASK		(0x0000000F)
+#define PDP_PORTER_BLND2_BLND2PORTERMODE_SHIFT		(0)
+#define PDP_PORTER_BLND2_BLND2PORTERMODE_LENGTH		(4)
+#define PDP_PORTER_BLND2_BLND2PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PORTER_BLND3_OFFSET		(0x0674)
+
+/* PDP, PORTER_BLND3, BLND3BLENDTYPE
+*/
+#define PDP_PORTER_BLND3_BLND3BLENDTYPE_MASK		(0x00000010)
+#define PDP_PORTER_BLND3_BLND3BLENDTYPE_LSBMASK		(0x00000001)
+#define PDP_PORTER_BLND3_BLND3BLENDTYPE_SHIFT		(4)
+#define PDP_PORTER_BLND3_BLND3BLENDTYPE_LENGTH		(1)
+#define PDP_PORTER_BLND3_BLND3BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND3, BLND3PORTERMODE
+*/
+#define PDP_PORTER_BLND3_BLND3PORTERMODE_MASK		(0x0000000F)
+#define PDP_PORTER_BLND3_BLND3PORTERMODE_LSBMASK		(0x0000000F)
+#define PDP_PORTER_BLND3_BLND3PORTERMODE_SHIFT		(0)
+#define PDP_PORTER_BLND3_BLND3PORTERMODE_LENGTH		(4)
+#define PDP_PORTER_BLND3_BLND3PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PORTER_BLND4_OFFSET		(0x0678)
+
+/* PDP, PORTER_BLND4, BLND4BLENDTYPE
+*/
+#define PDP_PORTER_BLND4_BLND4BLENDTYPE_MASK		(0x00000010)
+#define PDP_PORTER_BLND4_BLND4BLENDTYPE_LSBMASK		(0x00000001)
+#define PDP_PORTER_BLND4_BLND4BLENDTYPE_SHIFT		(4)
+#define PDP_PORTER_BLND4_BLND4BLENDTYPE_LENGTH		(1)
+#define PDP_PORTER_BLND4_BLND4BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND4, BLND4PORTERMODE
+*/
+#define PDP_PORTER_BLND4_BLND4PORTERMODE_MASK		(0x0000000F)
+#define PDP_PORTER_BLND4_BLND4PORTERMODE_LSBMASK		(0x0000000F)
+#define PDP_PORTER_BLND4_BLND4PORTERMODE_SHIFT		(0)
+#define PDP_PORTER_BLND4_BLND4PORTERMODE_LENGTH		(4)
+#define PDP_PORTER_BLND4_BLND4PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PORTER_BLND5_OFFSET		(0x067C)
+
+/* PDP, PORTER_BLND5, BLND5BLENDTYPE
+*/
+#define PDP_PORTER_BLND5_BLND5BLENDTYPE_MASK		(0x00000010)
+#define PDP_PORTER_BLND5_BLND5BLENDTYPE_LSBMASK		(0x00000001)
+#define PDP_PORTER_BLND5_BLND5BLENDTYPE_SHIFT		(4)
+#define PDP_PORTER_BLND5_BLND5BLENDTYPE_LENGTH		(1)
+#define PDP_PORTER_BLND5_BLND5BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND5, BLND5PORTERMODE
+*/
+#define PDP_PORTER_BLND5_BLND5PORTERMODE_MASK		(0x0000000F)
+#define PDP_PORTER_BLND5_BLND5PORTERMODE_LSBMASK		(0x0000000F)
+#define PDP_PORTER_BLND5_BLND5PORTERMODE_SHIFT		(0)
+#define PDP_PORTER_BLND5_BLND5PORTERMODE_LENGTH		(4)
+#define PDP_PORTER_BLND5_BLND5PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PORTER_BLND6_OFFSET		(0x0680)
+
+/* PDP, PORTER_BLND6, BLND6BLENDTYPE
+*/
+#define PDP_PORTER_BLND6_BLND6BLENDTYPE_MASK		(0x00000010)
+#define PDP_PORTER_BLND6_BLND6BLENDTYPE_LSBMASK		(0x00000001)
+#define PDP_PORTER_BLND6_BLND6BLENDTYPE_SHIFT		(4)
+#define PDP_PORTER_BLND6_BLND6BLENDTYPE_LENGTH		(1)
+#define PDP_PORTER_BLND6_BLND6BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND6, BLND6PORTERMODE
+*/
+#define PDP_PORTER_BLND6_BLND6PORTERMODE_MASK		(0x0000000F)
+#define PDP_PORTER_BLND6_BLND6PORTERMODE_LSBMASK		(0x0000000F)
+#define PDP_PORTER_BLND6_BLND6PORTERMODE_SHIFT		(0)
+#define PDP_PORTER_BLND6_BLND6PORTERMODE_LENGTH		(4)
+#define PDP_PORTER_BLND6_BLND6PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PORTER_BLND7_OFFSET		(0x0684)
+
+/* PDP, PORTER_BLND7, BLND7BLENDTYPE
+*/
+#define PDP_PORTER_BLND7_BLND7BLENDTYPE_MASK		(0x00000010)
+#define PDP_PORTER_BLND7_BLND7BLENDTYPE_LSBMASK		(0x00000001)
+#define PDP_PORTER_BLND7_BLND7BLENDTYPE_SHIFT		(4)
+#define PDP_PORTER_BLND7_BLND7BLENDTYPE_LENGTH		(1)
+#define PDP_PORTER_BLND7_BLND7BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND7, BLND7PORTERMODE
+*/
+#define PDP_PORTER_BLND7_BLND7PORTERMODE_MASK		(0x0000000F)
+#define PDP_PORTER_BLND7_BLND7PORTERMODE_LSBMASK		(0x0000000F)
+#define PDP_PORTER_BLND7_BLND7PORTERMODE_SHIFT		(0)
+#define PDP_PORTER_BLND7_BLND7PORTERMODE_LENGTH		(4)
+#define PDP_PORTER_BLND7_BLND7PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET		(0x06C8)
+
+/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_TRANS
+*/
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_MASK		(0x03FF0000)
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LSBMASK		(0x000003FF)
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SHIFT		(16)
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LENGTH		(10)
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_OPAQUE
+*/
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_MASK		(0x000003FF)
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LSBMASK		(0x000003FF)
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SHIFT		(0)
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LENGTH		(10)
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_OFFSET		(0x06CC)
+
+/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMAX
+*/
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_MASK		(0x03FF0000)
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LSBMASK		(0x000003FF)
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SHIFT		(16)
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LENGTH		(10)
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMIN
+*/
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_MASK		(0x000003FF)
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LSBMASK		(0x000003FF)
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SHIFT		(0)
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LENGTH		(10)
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1LUMAKEY_C_RG_OFFSET		(0x06D0)
+
+/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_R
+*/
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_MASK		(0x0FFF0000)
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LSBMASK		(0x00000FFF)
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SHIFT		(16)
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LENGTH		(12)
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_G
+*/
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_MASK		(0x00000FFF)
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LSBMASK		(0x00000FFF)
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SHIFT		(0)
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LENGTH		(12)
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1LUMAKEY_C_B_OFFSET		(0x06D4)
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYALPHAMULT
+*/
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_MASK		(0x20000000)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LSBMASK		(0x00000001)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SHIFT		(29)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LENGTH		(1)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYEN
+*/
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_MASK		(0x10000000)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LSBMASK		(0x00000001)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SHIFT		(28)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LENGTH		(1)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYOUTOFF
+*/
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_MASK		(0x03FF0000)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LSBMASK		(0x000003FF)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SHIFT		(16)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LENGTH		(10)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYC_B
+*/
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_MASK		(0x00000FFF)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LSBMASK		(0x00000FFF)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SHIFT		(0)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LENGTH		(12)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET		(0x06D8)
+
+/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_TRANS
+*/
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_MASK		(0x03FF0000)
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LSBMASK		(0x000003FF)
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SHIFT		(16)
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LENGTH		(10)
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_OPAQUE
+*/
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_MASK		(0x000003FF)
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LSBMASK		(0x000003FF)
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SHIFT		(0)
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LENGTH		(10)
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_OFFSET		(0x06DC)
+
+/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMAX
+*/
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_MASK		(0x03FF0000)
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LSBMASK		(0x000003FF)
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SHIFT		(16)
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LENGTH		(10)
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMIN
+*/
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_MASK		(0x000003FF)
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LSBMASK		(0x000003FF)
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SHIFT		(0)
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LENGTH		(10)
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2LUMAKEY_C_RG_OFFSET		(0x06E0)
+
+/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_R
+*/
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_MASK		(0x0FFF0000)
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LSBMASK		(0x00000FFF)
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SHIFT		(16)
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LENGTH		(12)
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_G
+*/
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_MASK		(0x00000FFF)
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LSBMASK		(0x00000FFF)
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SHIFT		(0)
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LENGTH		(12)
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2LUMAKEY_C_B_OFFSET		(0x06E4)
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYALPHAMULT
+*/
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_MASK		(0x20000000)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LSBMASK		(0x00000001)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SHIFT		(29)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LENGTH		(1)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYEN
+*/
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_MASK		(0x10000000)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LSBMASK		(0x00000001)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SHIFT		(28)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LENGTH		(1)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYOUTOFF
+*/
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_MASK		(0x03FF0000)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LSBMASK		(0x000003FF)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SHIFT		(16)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LENGTH		(10)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYC_B
+*/
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_MASK		(0x00000FFF)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LSBMASK		(0x00000FFF)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SHIFT		(0)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LENGTH		(12)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET		(0x06E8)
+
+/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_TRANS
+*/
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_MASK		(0x03FF0000)
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LSBMASK		(0x000003FF)
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SHIFT		(16)
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LENGTH		(10)
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_OPAQUE
+*/
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_MASK		(0x000003FF)
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LSBMASK		(0x000003FF)
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SHIFT		(0)
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LENGTH		(10)
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_OFFSET		(0x06EC)
+
+/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMAX
+*/
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_MASK		(0x03FF0000)
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LSBMASK		(0x000003FF)
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SHIFT		(16)
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LENGTH		(10)
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMIN
+*/
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_MASK		(0x000003FF)
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LSBMASK		(0x000003FF)
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SHIFT		(0)
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LENGTH		(10)
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3LUMAKEY_C_RG_OFFSET		(0x06F0)
+
+/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_R
+*/
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_MASK		(0x0FFF0000)
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LSBMASK		(0x00000FFF)
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SHIFT		(16)
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LENGTH		(12)
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_G
+*/
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_MASK		(0x00000FFF)
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LSBMASK		(0x00000FFF)
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SHIFT		(0)
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LENGTH		(12)
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3LUMAKEY_C_B_OFFSET		(0x06F4)
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYALPHAMULT
+*/
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_MASK		(0x20000000)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LSBMASK		(0x00000001)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SHIFT		(29)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LENGTH		(1)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYEN
+*/
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_MASK		(0x10000000)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LSBMASK		(0x00000001)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SHIFT		(28)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LENGTH		(1)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYOUTOFF
+*/
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_MASK		(0x03FF0000)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LSBMASK		(0x000003FF)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SHIFT		(16)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LENGTH		(10)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYC_B
+*/
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_MASK		(0x00000FFF)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LSBMASK		(0x00000FFF)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SHIFT		(0)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LENGTH		(12)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET		(0x06F8)
+
+/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_TRANS
+*/
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_MASK		(0x03FF0000)
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LSBMASK		(0x000003FF)
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SHIFT		(16)
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LENGTH		(10)
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_OPAQUE
+*/
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_MASK		(0x000003FF)
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LSBMASK		(0x000003FF)
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SHIFT		(0)
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LENGTH		(10)
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_OFFSET		(0x06FC)
+
+/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMAX
+*/
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_MASK		(0x03FF0000)
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LSBMASK		(0x000003FF)
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SHIFT		(16)
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LENGTH		(10)
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMIN
+*/
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_MASK		(0x000003FF)
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LSBMASK		(0x000003FF)
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SHIFT		(0)
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LENGTH		(10)
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4LUMAKEY_C_RG_OFFSET		(0x0700)
+
+/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_R
+*/
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_MASK		(0x0FFF0000)
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LSBMASK		(0x00000FFF)
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SHIFT		(16)
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LENGTH		(12)
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_G
+*/
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_MASK		(0x00000FFF)
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LSBMASK		(0x00000FFF)
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SHIFT		(0)
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LENGTH		(12)
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4LUMAKEY_C_B_OFFSET		(0x0704)
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYALPHAMULT
+*/
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_MASK		(0x20000000)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LSBMASK		(0x00000001)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SHIFT		(29)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LENGTH		(1)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYEN
+*/
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_MASK		(0x10000000)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LSBMASK		(0x00000001)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SHIFT		(28)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LENGTH		(1)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYOUTOFF
+*/
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_MASK		(0x03FF0000)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LSBMASK		(0x000003FF)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SHIFT		(16)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LENGTH		(10)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYC_B
+*/
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_MASK		(0x00000FFF)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LSBMASK		(0x00000FFF)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SHIFT		(0)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LENGTH		(12)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CSCCOEFF0_OFFSET		(0x0708)
+
+/* PDP, CSCCOEFF0, CSCCOEFFRU
+*/
+#define PDP_CSCCOEFF0_CSCCOEFFRU_MASK		(0x003FF800)
+#define PDP_CSCCOEFF0_CSCCOEFFRU_LSBMASK		(0x000007FF)
+#define PDP_CSCCOEFF0_CSCCOEFFRU_SHIFT		(11)
+#define PDP_CSCCOEFF0_CSCCOEFFRU_LENGTH		(11)
+#define PDP_CSCCOEFF0_CSCCOEFFRU_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CSCCOEFF0, CSCCOEFFRY
+*/
+#define PDP_CSCCOEFF0_CSCCOEFFRY_MASK		(0x000007FF)
+#define PDP_CSCCOEFF0_CSCCOEFFRY_LSBMASK		(0x000007FF)
+#define PDP_CSCCOEFF0_CSCCOEFFRY_SHIFT		(0)
+#define PDP_CSCCOEFF0_CSCCOEFFRY_LENGTH		(11)
+#define PDP_CSCCOEFF0_CSCCOEFFRY_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CSCCOEFF1_OFFSET		(0x070C)
+
+/* PDP, CSCCOEFF1, CSCCOEFFGY
+*/
+#define PDP_CSCCOEFF1_CSCCOEFFGY_MASK		(0x003FF800)
+#define PDP_CSCCOEFF1_CSCCOEFFGY_LSBMASK		(0x000007FF)
+#define PDP_CSCCOEFF1_CSCCOEFFGY_SHIFT		(11)
+#define PDP_CSCCOEFF1_CSCCOEFFGY_LENGTH		(11)
+#define PDP_CSCCOEFF1_CSCCOEFFGY_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CSCCOEFF1, CSCCOEFFRV
+*/
+#define PDP_CSCCOEFF1_CSCCOEFFRV_MASK		(0x000007FF)
+#define PDP_CSCCOEFF1_CSCCOEFFRV_LSBMASK		(0x000007FF)
+#define PDP_CSCCOEFF1_CSCCOEFFRV_SHIFT		(0)
+#define PDP_CSCCOEFF1_CSCCOEFFRV_LENGTH		(11)
+#define PDP_CSCCOEFF1_CSCCOEFFRV_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CSCCOEFF2_OFFSET		(0x0710)
+
+/* PDP, CSCCOEFF2, CSCCOEFFGV
+*/
+#define PDP_CSCCOEFF2_CSCCOEFFGV_MASK		(0x003FF800)
+#define PDP_CSCCOEFF2_CSCCOEFFGV_LSBMASK		(0x000007FF)
+#define PDP_CSCCOEFF2_CSCCOEFFGV_SHIFT		(11)
+#define PDP_CSCCOEFF2_CSCCOEFFGV_LENGTH		(11)
+#define PDP_CSCCOEFF2_CSCCOEFFGV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CSCCOEFF2, CSCCOEFFGU
+*/
+#define PDP_CSCCOEFF2_CSCCOEFFGU_MASK		(0x000007FF)
+#define PDP_CSCCOEFF2_CSCCOEFFGU_LSBMASK		(0x000007FF)
+#define PDP_CSCCOEFF2_CSCCOEFFGU_SHIFT		(0)
+#define PDP_CSCCOEFF2_CSCCOEFFGU_LENGTH		(11)
+#define PDP_CSCCOEFF2_CSCCOEFFGU_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CSCCOEFF3_OFFSET		(0x0714)
+
+/* PDP, CSCCOEFF3, CSCCOEFFBU
+*/
+#define PDP_CSCCOEFF3_CSCCOEFFBU_MASK		(0x003FF800)
+#define PDP_CSCCOEFF3_CSCCOEFFBU_LSBMASK		(0x000007FF)
+#define PDP_CSCCOEFF3_CSCCOEFFBU_SHIFT		(11)
+#define PDP_CSCCOEFF3_CSCCOEFFBU_LENGTH		(11)
+#define PDP_CSCCOEFF3_CSCCOEFFBU_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CSCCOEFF3, CSCCOEFFBY
+*/
+#define PDP_CSCCOEFF3_CSCCOEFFBY_MASK		(0x000007FF)
+#define PDP_CSCCOEFF3_CSCCOEFFBY_LSBMASK		(0x000007FF)
+#define PDP_CSCCOEFF3_CSCCOEFFBY_SHIFT		(0)
+#define PDP_CSCCOEFF3_CSCCOEFFBY_LENGTH		(11)
+#define PDP_CSCCOEFF3_CSCCOEFFBY_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CSCCOEFF4_OFFSET		(0x0718)
+
+/* PDP, CSCCOEFF4, CSCCOEFFBV
+*/
+#define PDP_CSCCOEFF4_CSCCOEFFBV_MASK		(0x000007FF)
+#define PDP_CSCCOEFF4_CSCCOEFFBV_LSBMASK		(0x000007FF)
+#define PDP_CSCCOEFF4_CSCCOEFFBV_SHIFT		(0)
+#define PDP_CSCCOEFF4_CSCCOEFFBV_LENGTH		(11)
+#define PDP_CSCCOEFF4_CSCCOEFFBV_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BGNDCOL_AR_OFFSET		(0x071C)
+
+/* PDP, BGNDCOL_AR, BGNDCOL_A
+*/
+#define PDP_BGNDCOL_AR_BGNDCOL_A_MASK		(0x03FF0000)
+#define PDP_BGNDCOL_AR_BGNDCOL_A_LSBMASK		(0x000003FF)
+#define PDP_BGNDCOL_AR_BGNDCOL_A_SHIFT		(16)
+#define PDP_BGNDCOL_AR_BGNDCOL_A_LENGTH		(10)
+#define PDP_BGNDCOL_AR_BGNDCOL_A_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, BGNDCOL_AR, BGNDCOL_R
+*/
+#define PDP_BGNDCOL_AR_BGNDCOL_R_MASK		(0x000003FF)
+#define PDP_BGNDCOL_AR_BGNDCOL_R_LSBMASK		(0x000003FF)
+#define PDP_BGNDCOL_AR_BGNDCOL_R_SHIFT		(0)
+#define PDP_BGNDCOL_AR_BGNDCOL_R_LENGTH		(10)
+#define PDP_BGNDCOL_AR_BGNDCOL_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BGNDCOL_GB_OFFSET		(0x0720)
+
+/* PDP, BGNDCOL_GB, BGNDCOL_G
+*/
+#define PDP_BGNDCOL_GB_BGNDCOL_G_MASK		(0x03FF0000)
+#define PDP_BGNDCOL_GB_BGNDCOL_G_LSBMASK		(0x000003FF)
+#define PDP_BGNDCOL_GB_BGNDCOL_G_SHIFT		(16)
+#define PDP_BGNDCOL_GB_BGNDCOL_G_LENGTH		(10)
+#define PDP_BGNDCOL_GB_BGNDCOL_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, BGNDCOL_GB, BGNDCOL_B
+*/
+#define PDP_BGNDCOL_GB_BGNDCOL_B_MASK		(0x000003FF)
+#define PDP_BGNDCOL_GB_BGNDCOL_B_LSBMASK		(0x000003FF)
+#define PDP_BGNDCOL_GB_BGNDCOL_B_SHIFT		(0)
+#define PDP_BGNDCOL_GB_BGNDCOL_B_LENGTH		(10)
+#define PDP_BGNDCOL_GB_BGNDCOL_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BORDCOL_R_OFFSET		(0x0724)
+
+/* PDP, BORDCOL_R, BORDCOL_R
+*/
+#define PDP_BORDCOL_R_BORDCOL_R_MASK		(0x000003FF)
+#define PDP_BORDCOL_R_BORDCOL_R_LSBMASK		(0x000003FF)
+#define PDP_BORDCOL_R_BORDCOL_R_SHIFT		(0)
+#define PDP_BORDCOL_R_BORDCOL_R_LENGTH		(10)
+#define PDP_BORDCOL_R_BORDCOL_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BORDCOL_GB_OFFSET		(0x0728)
+
+/* PDP, BORDCOL_GB, BORDCOL_G
+*/
+#define PDP_BORDCOL_GB_BORDCOL_G_MASK		(0x03FF0000)
+#define PDP_BORDCOL_GB_BORDCOL_G_LSBMASK		(0x000003FF)
+#define PDP_BORDCOL_GB_BORDCOL_G_SHIFT		(16)
+#define PDP_BORDCOL_GB_BORDCOL_G_LENGTH		(10)
+#define PDP_BORDCOL_GB_BORDCOL_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, BORDCOL_GB, BORDCOL_B
+*/
+#define PDP_BORDCOL_GB_BORDCOL_B_MASK		(0x000003FF)
+#define PDP_BORDCOL_GB_BORDCOL_B_LSBMASK		(0x000003FF)
+#define PDP_BORDCOL_GB_BORDCOL_B_SHIFT		(0)
+#define PDP_BORDCOL_GB_BORDCOL_B_LENGTH		(10)
+#define PDP_BORDCOL_GB_BORDCOL_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_LINESTAT_OFFSET		(0x0734)
+
+/* PDP, LINESTAT, LINENO
+*/
+#define PDP_LINESTAT_LINENO_MASK		(0x00001FFF)
+#define PDP_LINESTAT_LINENO_LSBMASK		(0x00001FFF)
+#define PDP_LINESTAT_LINENO_SHIFT		(0)
+#define PDP_LINESTAT_LINENO_LENGTH		(13)
+#define PDP_LINESTAT_LINENO_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CR_PDP_PROCAMP_C11C12_OFFSET		(0x0738)
+
+/* PDP, CR_PDP_PROCAMP_C11C12, CR_PROCAMP_C12
+*/
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_MASK		(0x3FFF0000)
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LSBMASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SHIFT		(16)
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LENGTH		(14)
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_PDP_PROCAMP_C11C12, CR_PROCAMP_C11
+*/
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_MASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LSBMASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SHIFT		(0)
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LENGTH		(14)
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CR_PDP_PROCAMP_C13C21_OFFSET		(0x073C)
+
+/* PDP, CR_PDP_PROCAMP_C13C21, CR_PROCAMP_C21
+*/
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_MASK		(0x3FFF0000)
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LSBMASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SHIFT		(16)
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LENGTH		(14)
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_PDP_PROCAMP_C13C21, CR_PROCAMP_C13
+*/
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_MASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LSBMASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SHIFT		(0)
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LENGTH		(14)
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CR_PDP_PROCAMP_C22C23_OFFSET		(0x0740)
+
+/* PDP, CR_PDP_PROCAMP_C22C23, CR_PROCAMP_C23
+*/
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_MASK		(0x3FFF0000)
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LSBMASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SHIFT		(16)
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LENGTH		(14)
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_PDP_PROCAMP_C22C23, CR_PROCAMP_C22
+*/
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_MASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LSBMASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SHIFT		(0)
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LENGTH		(14)
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CR_PDP_PROCAMP_C31C32_OFFSET		(0x0744)
+
+/* PDP, CR_PDP_PROCAMP_C31C32, CR_PROCAMP_C32
+*/
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_MASK		(0x3FFF0000)
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LSBMASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SHIFT		(16)
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LENGTH		(14)
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_PDP_PROCAMP_C31C32, CR_PROCAMP_C31
+*/
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_MASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LSBMASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SHIFT		(0)
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LENGTH		(14)
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CR_PDP_PROCAMP_C33_OFFSET		(0x0748)
+
+/* PDP, CR_PDP_PROCAMP_C33, CR_PROCAMP_C33
+*/
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_MASK		(0x3FFF0000)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_LSBMASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_SHIFT		(16)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_LENGTH		(14)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_PDP_PROCAMP_C33, CR_PROCAMP_RANGE
+*/
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_MASK		(0x00000030)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LSBMASK		(0x00000003)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SHIFT		(4)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LENGTH		(2)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_PDP_PROCAMP_C33, CR_PROCAMP_EN
+*/
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_MASK		(0x00000001)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_LSBMASK		(0x00000001)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_SHIFT		(0)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_LENGTH		(1)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_OFFSET		(0x074C)
+
+/* PDP, CR_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_G
+*/
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_MASK		(0x0FFF0000)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LSBMASK		(0x00000FFF)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SHIFT		(16)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LENGTH		(12)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_B
+*/
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_MASK		(0x00000FFF)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LSBMASK		(0x00000FFF)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SHIFT		(0)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LENGTH		(12)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_OFFSET		(0x0750)
+
+/* PDP, CR_PDP_PROCAMP_OUTOFFSET_R, CR_PROCAMP_OUTOFF_R
+*/
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_MASK		(0x00000FFF)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LSBMASK		(0x00000FFF)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SHIFT		(0)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LENGTH		(12)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_OFFSET		(0x0754)
+
+/* PDP, CR_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_G
+*/
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_MASK		(0x03FF0000)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LSBMASK		(0x000003FF)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SHIFT		(16)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LENGTH		(10)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_B
+*/
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_MASK		(0x000003FF)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LSBMASK		(0x000003FF)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SHIFT		(0)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LENGTH		(10)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CR_PDP_PROCAMP_INOFFSET_R_OFFSET		(0x0758)
+
+/* PDP, CR_PDP_PROCAMP_INOFFSET_R, CR_PROCAMP_INOFF_R
+*/
+#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_MASK		(0x000003FF)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LSBMASK		(0x000003FF)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SHIFT		(0)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LENGTH		(10)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_SIGNAT_R_OFFSET		(0x075C)
+
+/* PDP, SIGNAT_R, SIGNATURE_R
+*/
+#define PDP_SIGNAT_R_SIGNATURE_R_MASK		(0x000003FF)
+#define PDP_SIGNAT_R_SIGNATURE_R_LSBMASK		(0x000003FF)
+#define PDP_SIGNAT_R_SIGNATURE_R_SHIFT		(0)
+#define PDP_SIGNAT_R_SIGNATURE_R_LENGTH		(10)
+#define PDP_SIGNAT_R_SIGNATURE_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_SIGNAT_GB_OFFSET		(0x0760)
+
+/* PDP, SIGNAT_GB, SIGNATURE_G
+*/
+#define PDP_SIGNAT_GB_SIGNATURE_G_MASK		(0x03FF0000)
+#define PDP_SIGNAT_GB_SIGNATURE_G_LSBMASK		(0x000003FF)
+#define PDP_SIGNAT_GB_SIGNATURE_G_SHIFT		(16)
+#define PDP_SIGNAT_GB_SIGNATURE_G_LENGTH		(10)
+#define PDP_SIGNAT_GB_SIGNATURE_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SIGNAT_GB, SIGNATURE_B
+*/
+#define PDP_SIGNAT_GB_SIGNATURE_B_MASK		(0x000003FF)
+#define PDP_SIGNAT_GB_SIGNATURE_B_LSBMASK		(0x000003FF)
+#define PDP_SIGNAT_GB_SIGNATURE_B_SHIFT		(0)
+#define PDP_SIGNAT_GB_SIGNATURE_B_LENGTH		(10)
+#define PDP_SIGNAT_GB_SIGNATURE_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_REGISTER_UPDATE_CTRL_OFFSET		(0x0764)
+
+/* PDP, REGISTER_UPDATE_CTRL, BYPASS_DOUBLE_BUFFERING
+*/
+#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_MASK		(0x00000004)
+#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LSBMASK		(0x00000001)
+#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SHIFT		(2)
+#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LENGTH		(1)
+#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, REGISTER_UPDATE_CTRL, REGISTERS_VALID
+*/
+#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_MASK		(0x00000002)
+#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LSBMASK		(0x00000001)
+#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT		(1)
+#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LENGTH		(1)
+#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, REGISTER_UPDATE_CTRL, USE_VBLANK
+*/
+#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_MASK		(0x00000001)
+#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LSBMASK		(0x00000001)
+#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SHIFT		(0)
+#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LENGTH		(1)
+#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_REGISTER_UPDATE_STATUS_OFFSET		(0x0768)
+
+/* PDP, REGISTER_UPDATE_STATUS, REGISTERS_UPDATED
+*/
+#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_MASK		(0x00000002)
+#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LSBMASK		(0x00000001)
+#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SHIFT		(1)
+#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LENGTH		(1)
+#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DBGCTRL_OFFSET		(0x076C)
+
+/* PDP, DBGCTRL, DBG_READ
+*/
+#define PDP_DBGCTRL_DBG_READ_MASK		(0x00000002)
+#define PDP_DBGCTRL_DBG_READ_LSBMASK		(0x00000001)
+#define PDP_DBGCTRL_DBG_READ_SHIFT		(1)
+#define PDP_DBGCTRL_DBG_READ_LENGTH		(1)
+#define PDP_DBGCTRL_DBG_READ_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DBGCTRL, DBG_ENAB
+*/
+#define PDP_DBGCTRL_DBG_ENAB_MASK		(0x00000001)
+#define PDP_DBGCTRL_DBG_ENAB_LSBMASK		(0x00000001)
+#define PDP_DBGCTRL_DBG_ENAB_SHIFT		(0)
+#define PDP_DBGCTRL_DBG_ENAB_LENGTH		(1)
+#define PDP_DBGCTRL_DBG_ENAB_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DBGDATA_R_OFFSET		(0x0770)
+
+/* PDP, DBGDATA_R, DBG_DATA_R
+*/
+#define PDP_DBGDATA_R_DBG_DATA_R_MASK		(0x000003FF)
+#define PDP_DBGDATA_R_DBG_DATA_R_LSBMASK		(0x000003FF)
+#define PDP_DBGDATA_R_DBG_DATA_R_SHIFT		(0)
+#define PDP_DBGDATA_R_DBG_DATA_R_LENGTH		(10)
+#define PDP_DBGDATA_R_DBG_DATA_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DBGDATA_GB_OFFSET		(0x0774)
+
+/* PDP, DBGDATA_GB, DBG_DATA_G
+*/
+#define PDP_DBGDATA_GB_DBG_DATA_G_MASK		(0x03FF0000)
+#define PDP_DBGDATA_GB_DBG_DATA_G_LSBMASK		(0x000003FF)
+#define PDP_DBGDATA_GB_DBG_DATA_G_SHIFT		(16)
+#define PDP_DBGDATA_GB_DBG_DATA_G_LENGTH		(10)
+#define PDP_DBGDATA_GB_DBG_DATA_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DBGDATA_GB, DBG_DATA_B
+*/
+#define PDP_DBGDATA_GB_DBG_DATA_B_MASK		(0x000003FF)
+#define PDP_DBGDATA_GB_DBG_DATA_B_LSBMASK		(0x000003FF)
+#define PDP_DBGDATA_GB_DBG_DATA_B_SHIFT		(0)
+#define PDP_DBGDATA_GB_DBG_DATA_B_LENGTH		(10)
+#define PDP_DBGDATA_GB_DBG_DATA_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DBGSIDE_OFFSET		(0x0778)
+
+/* PDP, DBGSIDE, DBG_VAL
+*/
+#define PDP_DBGSIDE_DBG_VAL_MASK		(0x00000008)
+#define PDP_DBGSIDE_DBG_VAL_LSBMASK		(0x00000001)
+#define PDP_DBGSIDE_DBG_VAL_SHIFT		(3)
+#define PDP_DBGSIDE_DBG_VAL_LENGTH		(1)
+#define PDP_DBGSIDE_DBG_VAL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DBGSIDE, DBG_SIDE
+*/
+#define PDP_DBGSIDE_DBG_SIDE_MASK		(0x00000007)
+#define PDP_DBGSIDE_DBG_SIDE_LSBMASK		(0x00000007)
+#define PDP_DBGSIDE_DBG_SIDE_SHIFT		(0)
+#define PDP_DBGSIDE_DBG_SIDE_LENGTH		(3)
+#define PDP_DBGSIDE_DBG_SIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_OUTPUT_OFFSET		(0x077C)
+
+/* PDP, OUTPUT, EIGHT_BIT_OUTPUT
+*/
+#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_MASK		(0x00000002)
+#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_LSBMASK		(0x00000001)
+#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_SHIFT		(1)
+#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_LENGTH		(1)
+#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, OUTPUT, OUTPUT_CONFIG
+*/
+#define PDP_OUTPUT_OUTPUT_CONFIG_MASK		(0x00000001)
+#define PDP_OUTPUT_OUTPUT_CONFIG_LSBMASK		(0x00000001)
+#define PDP_OUTPUT_OUTPUT_CONFIG_SHIFT		(0)
+#define PDP_OUTPUT_OUTPUT_CONFIG_LENGTH		(1)
+#define PDP_OUTPUT_OUTPUT_CONFIG_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_SYNCCTRL_OFFSET		(0x0780)
+
+/* PDP, SYNCCTRL, SYNCACTIVE
+*/
+#define PDP_SYNCCTRL_SYNCACTIVE_MASK		(0x80000000)
+#define PDP_SYNCCTRL_SYNCACTIVE_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_SYNCACTIVE_SHIFT		(31)
+#define PDP_SYNCCTRL_SYNCACTIVE_LENGTH		(1)
+#define PDP_SYNCCTRL_SYNCACTIVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, PDP_RST
+*/
+#define PDP_SYNCCTRL_PDP_RST_MASK		(0x20000000)
+#define PDP_SYNCCTRL_PDP_RST_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_PDP_RST_SHIFT		(29)
+#define PDP_SYNCCTRL_PDP_RST_LENGTH		(1)
+#define PDP_SYNCCTRL_PDP_RST_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, POWERDN
+*/
+#define PDP_SYNCCTRL_POWERDN_MASK		(0x10000000)
+#define PDP_SYNCCTRL_POWERDN_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_POWERDN_SHIFT		(28)
+#define PDP_SYNCCTRL_POWERDN_LENGTH		(1)
+#define PDP_SYNCCTRL_POWERDN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, LOWPWRMODE
+*/
+#define PDP_SYNCCTRL_LOWPWRMODE_MASK		(0x08000000)
+#define PDP_SYNCCTRL_LOWPWRMODE_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_LOWPWRMODE_SHIFT		(27)
+#define PDP_SYNCCTRL_LOWPWRMODE_LENGTH		(1)
+#define PDP_SYNCCTRL_LOWPWRMODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDSYNCTRL
+*/
+#define PDP_SYNCCTRL_UPDSYNCTRL_MASK		(0x04000000)
+#define PDP_SYNCCTRL_UPDSYNCTRL_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_UPDSYNCTRL_SHIFT		(26)
+#define PDP_SYNCCTRL_UPDSYNCTRL_LENGTH		(1)
+#define PDP_SYNCCTRL_UPDSYNCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDINTCTRL
+*/
+#define PDP_SYNCCTRL_UPDINTCTRL_MASK		(0x02000000)
+#define PDP_SYNCCTRL_UPDINTCTRL_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_UPDINTCTRL_SHIFT		(25)
+#define PDP_SYNCCTRL_UPDINTCTRL_LENGTH		(1)
+#define PDP_SYNCCTRL_UPDINTCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDCTRL
+*/
+#define PDP_SYNCCTRL_UPDCTRL_MASK		(0x01000000)
+#define PDP_SYNCCTRL_UPDCTRL_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_UPDCTRL_SHIFT		(24)
+#define PDP_SYNCCTRL_UPDCTRL_LENGTH		(1)
+#define PDP_SYNCCTRL_UPDCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDWAIT
+*/
+#define PDP_SYNCCTRL_UPDWAIT_MASK		(0x000F0000)
+#define PDP_SYNCCTRL_UPDWAIT_LSBMASK		(0x0000000F)
+#define PDP_SYNCCTRL_UPDWAIT_SHIFT		(16)
+#define PDP_SYNCCTRL_UPDWAIT_LENGTH		(4)
+#define PDP_SYNCCTRL_UPDWAIT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, FIELD_EN
+*/
+#define PDP_SYNCCTRL_FIELD_EN_MASK		(0x00002000)
+#define PDP_SYNCCTRL_FIELD_EN_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_FIELD_EN_SHIFT		(13)
+#define PDP_SYNCCTRL_FIELD_EN_LENGTH		(1)
+#define PDP_SYNCCTRL_FIELD_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, CSYNC_EN
+*/
+#define PDP_SYNCCTRL_CSYNC_EN_MASK		(0x00001000)
+#define PDP_SYNCCTRL_CSYNC_EN_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_CSYNC_EN_SHIFT		(12)
+#define PDP_SYNCCTRL_CSYNC_EN_LENGTH		(1)
+#define PDP_SYNCCTRL_CSYNC_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, CLKPOL
+*/
+#define PDP_SYNCCTRL_CLKPOL_MASK		(0x00000800)
+#define PDP_SYNCCTRL_CLKPOL_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_CLKPOL_SHIFT		(11)
+#define PDP_SYNCCTRL_CLKPOL_LENGTH		(1)
+#define PDP_SYNCCTRL_CLKPOL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, VS_SLAVE
+*/
+#define PDP_SYNCCTRL_VS_SLAVE_MASK		(0x00000080)
+#define PDP_SYNCCTRL_VS_SLAVE_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_VS_SLAVE_SHIFT		(7)
+#define PDP_SYNCCTRL_VS_SLAVE_LENGTH		(1)
+#define PDP_SYNCCTRL_VS_SLAVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, HS_SLAVE
+*/
+#define PDP_SYNCCTRL_HS_SLAVE_MASK		(0x00000040)
+#define PDP_SYNCCTRL_HS_SLAVE_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_HS_SLAVE_SHIFT		(6)
+#define PDP_SYNCCTRL_HS_SLAVE_LENGTH		(1)
+#define PDP_SYNCCTRL_HS_SLAVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, BLNKPOL
+*/
+#define PDP_SYNCCTRL_BLNKPOL_MASK		(0x00000020)
+#define PDP_SYNCCTRL_BLNKPOL_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_BLNKPOL_SHIFT		(5)
+#define PDP_SYNCCTRL_BLNKPOL_LENGTH		(1)
+#define PDP_SYNCCTRL_BLNKPOL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, BLNKDIS
+*/
+#define PDP_SYNCCTRL_BLNKDIS_MASK		(0x00000010)
+#define PDP_SYNCCTRL_BLNKDIS_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_BLNKDIS_SHIFT		(4)
+#define PDP_SYNCCTRL_BLNKDIS_LENGTH		(1)
+#define PDP_SYNCCTRL_BLNKDIS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, VSPOL
+*/
+#define PDP_SYNCCTRL_VSPOL_MASK		(0x00000008)
+#define PDP_SYNCCTRL_VSPOL_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_VSPOL_SHIFT		(3)
+#define PDP_SYNCCTRL_VSPOL_LENGTH		(1)
+#define PDP_SYNCCTRL_VSPOL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, VSDIS
+*/
+#define PDP_SYNCCTRL_VSDIS_MASK		(0x00000004)
+#define PDP_SYNCCTRL_VSDIS_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_VSDIS_SHIFT		(2)
+#define PDP_SYNCCTRL_VSDIS_LENGTH		(1)
+#define PDP_SYNCCTRL_VSDIS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, HSPOL
+*/
+#define PDP_SYNCCTRL_HSPOL_MASK		(0x00000002)
+#define PDP_SYNCCTRL_HSPOL_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_HSPOL_SHIFT		(1)
+#define PDP_SYNCCTRL_HSPOL_LENGTH		(1)
+#define PDP_SYNCCTRL_HSPOL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, HSDIS
+*/
+#define PDP_SYNCCTRL_HSDIS_MASK		(0x00000001)
+#define PDP_SYNCCTRL_HSDIS_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_HSDIS_SHIFT		(0)
+#define PDP_SYNCCTRL_HSDIS_LENGTH		(1)
+#define PDP_SYNCCTRL_HSDIS_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_HSYNC1_OFFSET		(0x0784)
+
+/* PDP, HSYNC1, HBPS
+*/
+#define PDP_HSYNC1_HBPS_MASK		(0x1FFF0000)
+#define PDP_HSYNC1_HBPS_LSBMASK		(0x00001FFF)
+#define PDP_HSYNC1_HBPS_SHIFT		(16)
+#define PDP_HSYNC1_HBPS_LENGTH		(13)
+#define PDP_HSYNC1_HBPS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, HSYNC1, HT
+*/
+#define PDP_HSYNC1_HT_MASK		(0x00001FFF)
+#define PDP_HSYNC1_HT_LSBMASK		(0x00001FFF)
+#define PDP_HSYNC1_HT_SHIFT		(0)
+#define PDP_HSYNC1_HT_LENGTH		(13)
+#define PDP_HSYNC1_HT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_HSYNC2_OFFSET		(0x0788)
+
+/* PDP, HSYNC2, HAS
+*/
+#define PDP_HSYNC2_HAS_MASK		(0x1FFF0000)
+#define PDP_HSYNC2_HAS_LSBMASK		(0x00001FFF)
+#define PDP_HSYNC2_HAS_SHIFT		(16)
+#define PDP_HSYNC2_HAS_LENGTH		(13)
+#define PDP_HSYNC2_HAS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, HSYNC2, HLBS
+*/
+#define PDP_HSYNC2_HLBS_MASK		(0x00001FFF)
+#define PDP_HSYNC2_HLBS_LSBMASK		(0x00001FFF)
+#define PDP_HSYNC2_HLBS_SHIFT		(0)
+#define PDP_HSYNC2_HLBS_LENGTH		(13)
+#define PDP_HSYNC2_HLBS_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_HSYNC3_OFFSET		(0x078C)
+
+/* PDP, HSYNC3, HFPS
+*/
+#define PDP_HSYNC3_HFPS_MASK		(0x1FFF0000)
+#define PDP_HSYNC3_HFPS_LSBMASK		(0x00001FFF)
+#define PDP_HSYNC3_HFPS_SHIFT		(16)
+#define PDP_HSYNC3_HFPS_LENGTH		(13)
+#define PDP_HSYNC3_HFPS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, HSYNC3, HRBS
+*/
+#define PDP_HSYNC3_HRBS_MASK		(0x00001FFF)
+#define PDP_HSYNC3_HRBS_LSBMASK		(0x00001FFF)
+#define PDP_HSYNC3_HRBS_SHIFT		(0)
+#define PDP_HSYNC3_HRBS_LENGTH		(13)
+#define PDP_HSYNC3_HRBS_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VSYNC1_OFFSET		(0x0790)
+
+/* PDP, VSYNC1, VBPS
+*/
+#define PDP_VSYNC1_VBPS_MASK		(0x1FFF0000)
+#define PDP_VSYNC1_VBPS_LSBMASK		(0x00001FFF)
+#define PDP_VSYNC1_VBPS_SHIFT		(16)
+#define PDP_VSYNC1_VBPS_LENGTH		(13)
+#define PDP_VSYNC1_VBPS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VSYNC1, VT
+*/
+#define PDP_VSYNC1_VT_MASK		(0x00001FFF)
+#define PDP_VSYNC1_VT_LSBMASK		(0x00001FFF)
+#define PDP_VSYNC1_VT_SHIFT		(0)
+#define PDP_VSYNC1_VT_LENGTH		(13)
+#define PDP_VSYNC1_VT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VSYNC2_OFFSET		(0x0794)
+
+/* PDP, VSYNC2, VAS
+*/
+#define PDP_VSYNC2_VAS_MASK		(0x1FFF0000)
+#define PDP_VSYNC2_VAS_LSBMASK		(0x00001FFF)
+#define PDP_VSYNC2_VAS_SHIFT		(16)
+#define PDP_VSYNC2_VAS_LENGTH		(13)
+#define PDP_VSYNC2_VAS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VSYNC2, VTBS
+*/
+#define PDP_VSYNC2_VTBS_MASK		(0x00001FFF)
+#define PDP_VSYNC2_VTBS_LSBMASK		(0x00001FFF)
+#define PDP_VSYNC2_VTBS_SHIFT		(0)
+#define PDP_VSYNC2_VTBS_LENGTH		(13)
+#define PDP_VSYNC2_VTBS_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VSYNC3_OFFSET		(0x0798)
+
+/* PDP, VSYNC3, VFPS
+*/
+#define PDP_VSYNC3_VFPS_MASK		(0x1FFF0000)
+#define PDP_VSYNC3_VFPS_LSBMASK		(0x00001FFF)
+#define PDP_VSYNC3_VFPS_SHIFT		(16)
+#define PDP_VSYNC3_VFPS_LENGTH		(13)
+#define PDP_VSYNC3_VFPS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VSYNC3, VBBS
+*/
+#define PDP_VSYNC3_VBBS_MASK		(0x00001FFF)
+#define PDP_VSYNC3_VBBS_LSBMASK		(0x00001FFF)
+#define PDP_VSYNC3_VBBS_SHIFT		(0)
+#define PDP_VSYNC3_VBBS_LENGTH		(13)
+#define PDP_VSYNC3_VBBS_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_INTSTAT_OFFSET		(0x079C)
+
+/* PDP, INTSTAT, INTS_VID4ORUN
+*/
+#define PDP_INTSTAT_INTS_VID4ORUN_MASK		(0x00080000)
+#define PDP_INTSTAT_INTS_VID4ORUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VID4ORUN_SHIFT		(19)
+#define PDP_INTSTAT_INTS_VID4ORUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VID4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID3ORUN
+*/
+#define PDP_INTSTAT_INTS_VID3ORUN_MASK		(0x00040000)
+#define PDP_INTSTAT_INTS_VID3ORUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VID3ORUN_SHIFT		(18)
+#define PDP_INTSTAT_INTS_VID3ORUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VID3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID2ORUN
+*/
+#define PDP_INTSTAT_INTS_VID2ORUN_MASK		(0x00020000)
+#define PDP_INTSTAT_INTS_VID2ORUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VID2ORUN_SHIFT		(17)
+#define PDP_INTSTAT_INTS_VID2ORUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VID2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID1ORUN
+*/
+#define PDP_INTSTAT_INTS_VID1ORUN_MASK		(0x00010000)
+#define PDP_INTSTAT_INTS_VID1ORUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VID1ORUN_SHIFT		(16)
+#define PDP_INTSTAT_INTS_VID1ORUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VID1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH4ORUN
+*/
+#define PDP_INTSTAT_INTS_GRPH4ORUN_MASK		(0x00008000)
+#define PDP_INTSTAT_INTS_GRPH4ORUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_GRPH4ORUN_SHIFT		(15)
+#define PDP_INTSTAT_INTS_GRPH4ORUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_GRPH4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH3ORUN
+*/
+#define PDP_INTSTAT_INTS_GRPH3ORUN_MASK		(0x00004000)
+#define PDP_INTSTAT_INTS_GRPH3ORUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_GRPH3ORUN_SHIFT		(14)
+#define PDP_INTSTAT_INTS_GRPH3ORUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_GRPH3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH2ORUN
+*/
+#define PDP_INTSTAT_INTS_GRPH2ORUN_MASK		(0x00002000)
+#define PDP_INTSTAT_INTS_GRPH2ORUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_GRPH2ORUN_SHIFT		(13)
+#define PDP_INTSTAT_INTS_GRPH2ORUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_GRPH2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH1ORUN
+*/
+#define PDP_INTSTAT_INTS_GRPH1ORUN_MASK		(0x00001000)
+#define PDP_INTSTAT_INTS_GRPH1ORUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_GRPH1ORUN_SHIFT		(12)
+#define PDP_INTSTAT_INTS_GRPH1ORUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_GRPH1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID4URUN
+*/
+#define PDP_INTSTAT_INTS_VID4URUN_MASK		(0x00000800)
+#define PDP_INTSTAT_INTS_VID4URUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VID4URUN_SHIFT		(11)
+#define PDP_INTSTAT_INTS_VID4URUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VID4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID3URUN
+*/
+#define PDP_INTSTAT_INTS_VID3URUN_MASK		(0x00000400)
+#define PDP_INTSTAT_INTS_VID3URUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VID3URUN_SHIFT		(10)
+#define PDP_INTSTAT_INTS_VID3URUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VID3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID2URUN
+*/
+#define PDP_INTSTAT_INTS_VID2URUN_MASK		(0x00000200)
+#define PDP_INTSTAT_INTS_VID2URUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VID2URUN_SHIFT		(9)
+#define PDP_INTSTAT_INTS_VID2URUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VID2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID1URUN
+*/
+#define PDP_INTSTAT_INTS_VID1URUN_MASK		(0x00000100)
+#define PDP_INTSTAT_INTS_VID1URUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VID1URUN_SHIFT		(8)
+#define PDP_INTSTAT_INTS_VID1URUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VID1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH4URUN
+*/
+#define PDP_INTSTAT_INTS_GRPH4URUN_MASK		(0x00000080)
+#define PDP_INTSTAT_INTS_GRPH4URUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_GRPH4URUN_SHIFT		(7)
+#define PDP_INTSTAT_INTS_GRPH4URUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_GRPH4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH3URUN
+*/
+#define PDP_INTSTAT_INTS_GRPH3URUN_MASK		(0x00000040)
+#define PDP_INTSTAT_INTS_GRPH3URUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_GRPH3URUN_SHIFT		(6)
+#define PDP_INTSTAT_INTS_GRPH3URUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_GRPH3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH2URUN
+*/
+#define PDP_INTSTAT_INTS_GRPH2URUN_MASK		(0x00000020)
+#define PDP_INTSTAT_INTS_GRPH2URUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_GRPH2URUN_SHIFT		(5)
+#define PDP_INTSTAT_INTS_GRPH2URUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_GRPH2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH1URUN
+*/
+#define PDP_INTSTAT_INTS_GRPH1URUN_MASK		(0x00000010)
+#define PDP_INTSTAT_INTS_GRPH1URUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_GRPH1URUN_SHIFT		(4)
+#define PDP_INTSTAT_INTS_GRPH1URUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_GRPH1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VBLNK1
+*/
+#define PDP_INTSTAT_INTS_VBLNK1_MASK		(0x00000008)
+#define PDP_INTSTAT_INTS_VBLNK1_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VBLNK1_SHIFT		(3)
+#define PDP_INTSTAT_INTS_VBLNK1_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VBLNK0
+*/
+#define PDP_INTSTAT_INTS_VBLNK0_MASK		(0x00000004)
+#define PDP_INTSTAT_INTS_VBLNK0_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VBLNK0_SHIFT		(2)
+#define PDP_INTSTAT_INTS_VBLNK0_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VBLNK0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_HBLNK1
+*/
+#define PDP_INTSTAT_INTS_HBLNK1_MASK		(0x00000002)
+#define PDP_INTSTAT_INTS_HBLNK1_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_HBLNK1_SHIFT		(1)
+#define PDP_INTSTAT_INTS_HBLNK1_LENGTH		(1)
+#define PDP_INTSTAT_INTS_HBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_HBLNK0
+*/
+#define PDP_INTSTAT_INTS_HBLNK0_MASK		(0x00000001)
+#define PDP_INTSTAT_INTS_HBLNK0_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_HBLNK0_SHIFT		(0)
+#define PDP_INTSTAT_INTS_HBLNK0_LENGTH		(1)
+#define PDP_INTSTAT_INTS_HBLNK0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_INTENAB_OFFSET		(0x07A0)
+
+/* PDP, INTENAB, INTEN_VID4ORUN
+*/
+#define PDP_INTENAB_INTEN_VID4ORUN_MASK		(0x00080000)
+#define PDP_INTENAB_INTEN_VID4ORUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VID4ORUN_SHIFT		(19)
+#define PDP_INTENAB_INTEN_VID4ORUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VID4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID3ORUN
+*/
+#define PDP_INTENAB_INTEN_VID3ORUN_MASK		(0x00040000)
+#define PDP_INTENAB_INTEN_VID3ORUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VID3ORUN_SHIFT		(18)
+#define PDP_INTENAB_INTEN_VID3ORUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VID3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID2ORUN
+*/
+#define PDP_INTENAB_INTEN_VID2ORUN_MASK		(0x00020000)
+#define PDP_INTENAB_INTEN_VID2ORUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VID2ORUN_SHIFT		(17)
+#define PDP_INTENAB_INTEN_VID2ORUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VID2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID1ORUN
+*/
+#define PDP_INTENAB_INTEN_VID1ORUN_MASK		(0x00010000)
+#define PDP_INTENAB_INTEN_VID1ORUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VID1ORUN_SHIFT		(16)
+#define PDP_INTENAB_INTEN_VID1ORUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VID1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH4ORUN
+*/
+#define PDP_INTENAB_INTEN_GRPH4ORUN_MASK		(0x00008000)
+#define PDP_INTENAB_INTEN_GRPH4ORUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_GRPH4ORUN_SHIFT		(15)
+#define PDP_INTENAB_INTEN_GRPH4ORUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_GRPH4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH3ORUN
+*/
+#define PDP_INTENAB_INTEN_GRPH3ORUN_MASK		(0x00004000)
+#define PDP_INTENAB_INTEN_GRPH3ORUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_GRPH3ORUN_SHIFT		(14)
+#define PDP_INTENAB_INTEN_GRPH3ORUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_GRPH3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH2ORUN
+*/
+#define PDP_INTENAB_INTEN_GRPH2ORUN_MASK		(0x00002000)
+#define PDP_INTENAB_INTEN_GRPH2ORUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_GRPH2ORUN_SHIFT		(13)
+#define PDP_INTENAB_INTEN_GRPH2ORUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_GRPH2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH1ORUN
+*/
+#define PDP_INTENAB_INTEN_GRPH1ORUN_MASK		(0x00001000)
+#define PDP_INTENAB_INTEN_GRPH1ORUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_GRPH1ORUN_SHIFT		(12)
+#define PDP_INTENAB_INTEN_GRPH1ORUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_GRPH1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID4URUN
+*/
+#define PDP_INTENAB_INTEN_VID4URUN_MASK		(0x00000800)
+#define PDP_INTENAB_INTEN_VID4URUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VID4URUN_SHIFT		(11)
+#define PDP_INTENAB_INTEN_VID4URUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VID4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID3URUN
+*/
+#define PDP_INTENAB_INTEN_VID3URUN_MASK		(0x00000400)
+#define PDP_INTENAB_INTEN_VID3URUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VID3URUN_SHIFT		(10)
+#define PDP_INTENAB_INTEN_VID3URUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VID3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID2URUN
+*/
+#define PDP_INTENAB_INTEN_VID2URUN_MASK		(0x00000200)
+#define PDP_INTENAB_INTEN_VID2URUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VID2URUN_SHIFT		(9)
+#define PDP_INTENAB_INTEN_VID2URUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VID2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID1URUN
+*/
+#define PDP_INTENAB_INTEN_VID1URUN_MASK		(0x00000100)
+#define PDP_INTENAB_INTEN_VID1URUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VID1URUN_SHIFT		(8)
+#define PDP_INTENAB_INTEN_VID1URUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VID1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH4URUN
+*/
+#define PDP_INTENAB_INTEN_GRPH4URUN_MASK		(0x00000080)
+#define PDP_INTENAB_INTEN_GRPH4URUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_GRPH4URUN_SHIFT		(7)
+#define PDP_INTENAB_INTEN_GRPH4URUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_GRPH4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH3URUN
+*/
+#define PDP_INTENAB_INTEN_GRPH3URUN_MASK		(0x00000040)
+#define PDP_INTENAB_INTEN_GRPH3URUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_GRPH3URUN_SHIFT		(6)
+#define PDP_INTENAB_INTEN_GRPH3URUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_GRPH3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH2URUN
+*/
+#define PDP_INTENAB_INTEN_GRPH2URUN_MASK		(0x00000020)
+#define PDP_INTENAB_INTEN_GRPH2URUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_GRPH2URUN_SHIFT		(5)
+#define PDP_INTENAB_INTEN_GRPH2URUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_GRPH2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH1URUN
+*/
+#define PDP_INTENAB_INTEN_GRPH1URUN_MASK		(0x00000010)
+#define PDP_INTENAB_INTEN_GRPH1URUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_GRPH1URUN_SHIFT		(4)
+#define PDP_INTENAB_INTEN_GRPH1URUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_GRPH1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VBLNK1
+*/
+#define PDP_INTENAB_INTEN_VBLNK1_MASK		(0x00000008)
+#define PDP_INTENAB_INTEN_VBLNK1_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VBLNK1_SHIFT		(3)
+#define PDP_INTENAB_INTEN_VBLNK1_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VBLNK0
+*/
+#define PDP_INTENAB_INTEN_VBLNK0_MASK		(0x00000004)
+#define PDP_INTENAB_INTEN_VBLNK0_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VBLNK0_SHIFT		(2)
+#define PDP_INTENAB_INTEN_VBLNK0_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VBLNK0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_HBLNK1
+*/
+#define PDP_INTENAB_INTEN_HBLNK1_MASK		(0x00000002)
+#define PDP_INTENAB_INTEN_HBLNK1_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_HBLNK1_SHIFT		(1)
+#define PDP_INTENAB_INTEN_HBLNK1_LENGTH		(1)
+#define PDP_INTENAB_INTEN_HBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_HBLNK0
+*/
+#define PDP_INTENAB_INTEN_HBLNK0_MASK		(0x00000001)
+#define PDP_INTENAB_INTEN_HBLNK0_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_HBLNK0_SHIFT		(0)
+#define PDP_INTENAB_INTEN_HBLNK0_LENGTH		(1)
+#define PDP_INTENAB_INTEN_HBLNK0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_INTCLR_OFFSET		(0x07A4)
+
+/* PDP, INTCLR, INTCLR_VID4ORUN
+*/
+#define PDP_INTCLR_INTCLR_VID4ORUN_MASK		(0x00080000)
+#define PDP_INTCLR_INTCLR_VID4ORUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VID4ORUN_SHIFT		(19)
+#define PDP_INTCLR_INTCLR_VID4ORUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VID4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID3ORUN
+*/
+#define PDP_INTCLR_INTCLR_VID3ORUN_MASK		(0x00040000)
+#define PDP_INTCLR_INTCLR_VID3ORUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VID3ORUN_SHIFT		(18)
+#define PDP_INTCLR_INTCLR_VID3ORUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VID3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID2ORUN
+*/
+#define PDP_INTCLR_INTCLR_VID2ORUN_MASK		(0x00020000)
+#define PDP_INTCLR_INTCLR_VID2ORUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VID2ORUN_SHIFT		(17)
+#define PDP_INTCLR_INTCLR_VID2ORUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VID2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID1ORUN
+*/
+#define PDP_INTCLR_INTCLR_VID1ORUN_MASK		(0x00010000)
+#define PDP_INTCLR_INTCLR_VID1ORUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VID1ORUN_SHIFT		(16)
+#define PDP_INTCLR_INTCLR_VID1ORUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VID1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH4ORUN
+*/
+#define PDP_INTCLR_INTCLR_GRPH4ORUN_MASK		(0x00008000)
+#define PDP_INTCLR_INTCLR_GRPH4ORUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_GRPH4ORUN_SHIFT		(15)
+#define PDP_INTCLR_INTCLR_GRPH4ORUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_GRPH4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH3ORUN
+*/
+#define PDP_INTCLR_INTCLR_GRPH3ORUN_MASK		(0x00004000)
+#define PDP_INTCLR_INTCLR_GRPH3ORUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_GRPH3ORUN_SHIFT		(14)
+#define PDP_INTCLR_INTCLR_GRPH3ORUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_GRPH3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH2ORUN
+*/
+#define PDP_INTCLR_INTCLR_GRPH2ORUN_MASK		(0x00002000)
+#define PDP_INTCLR_INTCLR_GRPH2ORUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_GRPH2ORUN_SHIFT		(13)
+#define PDP_INTCLR_INTCLR_GRPH2ORUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_GRPH2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH1ORUN
+*/
+#define PDP_INTCLR_INTCLR_GRPH1ORUN_MASK		(0x00001000)
+#define PDP_INTCLR_INTCLR_GRPH1ORUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_GRPH1ORUN_SHIFT		(12)
+#define PDP_INTCLR_INTCLR_GRPH1ORUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_GRPH1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID4URUN
+*/
+#define PDP_INTCLR_INTCLR_VID4URUN_MASK		(0x00000800)
+#define PDP_INTCLR_INTCLR_VID4URUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VID4URUN_SHIFT		(11)
+#define PDP_INTCLR_INTCLR_VID4URUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VID4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID3URUN
+*/
+#define PDP_INTCLR_INTCLR_VID3URUN_MASK		(0x00000400)
+#define PDP_INTCLR_INTCLR_VID3URUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VID3URUN_SHIFT		(10)
+#define PDP_INTCLR_INTCLR_VID3URUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VID3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID2URUN
+*/
+#define PDP_INTCLR_INTCLR_VID2URUN_MASK		(0x00000200)
+#define PDP_INTCLR_INTCLR_VID2URUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VID2URUN_SHIFT		(9)
+#define PDP_INTCLR_INTCLR_VID2URUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VID2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID1URUN
+*/
+#define PDP_INTCLR_INTCLR_VID1URUN_MASK		(0x00000100)
+#define PDP_INTCLR_INTCLR_VID1URUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VID1URUN_SHIFT		(8)
+#define PDP_INTCLR_INTCLR_VID1URUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VID1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH4URUN
+*/
+#define PDP_INTCLR_INTCLR_GRPH4URUN_MASK		(0x00000080)
+#define PDP_INTCLR_INTCLR_GRPH4URUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_GRPH4URUN_SHIFT		(7)
+#define PDP_INTCLR_INTCLR_GRPH4URUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_GRPH4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH3URUN
+*/
+#define PDP_INTCLR_INTCLR_GRPH3URUN_MASK		(0x00000040)
+#define PDP_INTCLR_INTCLR_GRPH3URUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_GRPH3URUN_SHIFT		(6)
+#define PDP_INTCLR_INTCLR_GRPH3URUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_GRPH3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH2URUN
+*/
+#define PDP_INTCLR_INTCLR_GRPH2URUN_MASK		(0x00000020)
+#define PDP_INTCLR_INTCLR_GRPH2URUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_GRPH2URUN_SHIFT		(5)
+#define PDP_INTCLR_INTCLR_GRPH2URUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_GRPH2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH1URUN
+*/
+#define PDP_INTCLR_INTCLR_GRPH1URUN_MASK		(0x00000010)
+#define PDP_INTCLR_INTCLR_GRPH1URUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_GRPH1URUN_SHIFT		(4)
+#define PDP_INTCLR_INTCLR_GRPH1URUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_GRPH1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VBLNK1
+*/
+#define PDP_INTCLR_INTCLR_VBLNK1_MASK		(0x00000008)
+#define PDP_INTCLR_INTCLR_VBLNK1_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VBLNK1_SHIFT		(3)
+#define PDP_INTCLR_INTCLR_VBLNK1_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VBLNK0
+*/
+#define PDP_INTCLR_INTCLR_VBLNK0_MASK		(0x00000004)
+#define PDP_INTCLR_INTCLR_VBLNK0_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VBLNK0_SHIFT		(2)
+#define PDP_INTCLR_INTCLR_VBLNK0_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VBLNK0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_HBLNK1
+*/
+#define PDP_INTCLR_INTCLR_HBLNK1_MASK		(0x00000002)
+#define PDP_INTCLR_INTCLR_HBLNK1_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_HBLNK1_SHIFT		(1)
+#define PDP_INTCLR_INTCLR_HBLNK1_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_HBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_HBLNK0
+*/
+#define PDP_INTCLR_INTCLR_HBLNK0_MASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_HBLNK0_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_HBLNK0_SHIFT		(0)
+#define PDP_INTCLR_INTCLR_HBLNK0_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_HBLNK0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_MEMCTRL_OFFSET		(0x07A8)
+
+/* PDP, MEMCTRL, MEMREFRESH
+*/
+#define PDP_MEMCTRL_MEMREFRESH_MASK		(0xC0000000)
+#define PDP_MEMCTRL_MEMREFRESH_LSBMASK		(0x00000003)
+#define PDP_MEMCTRL_MEMREFRESH_SHIFT		(30)
+#define PDP_MEMCTRL_MEMREFRESH_LENGTH		(2)
+#define PDP_MEMCTRL_MEMREFRESH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, MEMCTRL, BURSTLEN
+*/
+#define PDP_MEMCTRL_BURSTLEN_MASK		(0x000000FF)
+#define PDP_MEMCTRL_BURSTLEN_LSBMASK		(0x000000FF)
+#define PDP_MEMCTRL_BURSTLEN_SHIFT		(0)
+#define PDP_MEMCTRL_BURSTLEN_LENGTH		(8)
+#define PDP_MEMCTRL_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_MEM_THRESH_OFFSET		(0x07AC)
+
+/* PDP, MEM_THRESH, UVTHRESHOLD
+*/
+#define PDP_MEM_THRESH_UVTHRESHOLD_MASK		(0xFF000000)
+#define PDP_MEM_THRESH_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define PDP_MEM_THRESH_UVTHRESHOLD_SHIFT		(24)
+#define PDP_MEM_THRESH_UVTHRESHOLD_LENGTH		(8)
+#define PDP_MEM_THRESH_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, MEM_THRESH, YTHRESHOLD
+*/
+#define PDP_MEM_THRESH_YTHRESHOLD_MASK		(0x001FF000)
+#define PDP_MEM_THRESH_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_MEM_THRESH_YTHRESHOLD_SHIFT		(12)
+#define PDP_MEM_THRESH_YTHRESHOLD_LENGTH		(9)
+#define PDP_MEM_THRESH_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, MEM_THRESH, THRESHOLD
+*/
+#define PDP_MEM_THRESH_THRESHOLD_MASK		(0x000001FF)
+#define PDP_MEM_THRESH_THRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_MEM_THRESH_THRESHOLD_SHIFT		(0)
+#define PDP_MEM_THRESH_THRESHOLD_LENGTH		(9)
+#define PDP_MEM_THRESH_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_ALTERNATE_3D_CTRL_OFFSET		(0x07B0)
+
+/* PDP, ALTERNATE_3D_CTRL, ALT3D_ON
+*/
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_MASK		(0x00000010)
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LSBMASK		(0x00000001)
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SHIFT		(4)
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LENGTH		(1)
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, ALTERNATE_3D_CTRL, ALT3D_BLENDSEL
+*/
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_MASK		(0x00000007)
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LSBMASK		(0x00000007)
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SHIFT		(0)
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LENGTH		(3)
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA0_R_OFFSET		(0x07B4)
+
+/* PDP, GAMMA0_R, GAMMA0_R
+*/
+#define PDP_GAMMA0_R_GAMMA0_R_MASK		(0x000003FF)
+#define PDP_GAMMA0_R_GAMMA0_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA0_R_GAMMA0_R_SHIFT		(0)
+#define PDP_GAMMA0_R_GAMMA0_R_LENGTH		(10)
+#define PDP_GAMMA0_R_GAMMA0_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA0_GB_OFFSET		(0x07B8)
+
+/* PDP, GAMMA0_GB, GAMMA0_G
+*/
+#define PDP_GAMMA0_GB_GAMMA0_G_MASK		(0x03FF0000)
+#define PDP_GAMMA0_GB_GAMMA0_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA0_GB_GAMMA0_G_SHIFT		(16)
+#define PDP_GAMMA0_GB_GAMMA0_G_LENGTH		(10)
+#define PDP_GAMMA0_GB_GAMMA0_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA0_GB, GAMMA0_B
+*/
+#define PDP_GAMMA0_GB_GAMMA0_B_MASK		(0x000003FF)
+#define PDP_GAMMA0_GB_GAMMA0_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA0_GB_GAMMA0_B_SHIFT		(0)
+#define PDP_GAMMA0_GB_GAMMA0_B_LENGTH		(10)
+#define PDP_GAMMA0_GB_GAMMA0_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA1_R_OFFSET		(0x07BC)
+
+/* PDP, GAMMA1_R, GAMMA1_R
+*/
+#define PDP_GAMMA1_R_GAMMA1_R_MASK		(0x000003FF)
+#define PDP_GAMMA1_R_GAMMA1_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA1_R_GAMMA1_R_SHIFT		(0)
+#define PDP_GAMMA1_R_GAMMA1_R_LENGTH		(10)
+#define PDP_GAMMA1_R_GAMMA1_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA1_GB_OFFSET		(0x07C0)
+
+/* PDP, GAMMA1_GB, GAMMA1_G
+*/
+#define PDP_GAMMA1_GB_GAMMA1_G_MASK		(0x03FF0000)
+#define PDP_GAMMA1_GB_GAMMA1_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA1_GB_GAMMA1_G_SHIFT		(16)
+#define PDP_GAMMA1_GB_GAMMA1_G_LENGTH		(10)
+#define PDP_GAMMA1_GB_GAMMA1_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA1_GB, GAMMA1_B
+*/
+#define PDP_GAMMA1_GB_GAMMA1_B_MASK		(0x000003FF)
+#define PDP_GAMMA1_GB_GAMMA1_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA1_GB_GAMMA1_B_SHIFT		(0)
+#define PDP_GAMMA1_GB_GAMMA1_B_LENGTH		(10)
+#define PDP_GAMMA1_GB_GAMMA1_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA2_R_OFFSET		(0x07C4)
+
+/* PDP, GAMMA2_R, GAMMA2_R
+*/
+#define PDP_GAMMA2_R_GAMMA2_R_MASK		(0x000003FF)
+#define PDP_GAMMA2_R_GAMMA2_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA2_R_GAMMA2_R_SHIFT		(0)
+#define PDP_GAMMA2_R_GAMMA2_R_LENGTH		(10)
+#define PDP_GAMMA2_R_GAMMA2_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA2_GB_OFFSET		(0x07C8)
+
+/* PDP, GAMMA2_GB, GAMMA2_G
+*/
+#define PDP_GAMMA2_GB_GAMMA2_G_MASK		(0x03FF0000)
+#define PDP_GAMMA2_GB_GAMMA2_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA2_GB_GAMMA2_G_SHIFT		(16)
+#define PDP_GAMMA2_GB_GAMMA2_G_LENGTH		(10)
+#define PDP_GAMMA2_GB_GAMMA2_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA2_GB, GAMMA2_B
+*/
+#define PDP_GAMMA2_GB_GAMMA2_B_MASK		(0x000003FF)
+#define PDP_GAMMA2_GB_GAMMA2_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA2_GB_GAMMA2_B_SHIFT		(0)
+#define PDP_GAMMA2_GB_GAMMA2_B_LENGTH		(10)
+#define PDP_GAMMA2_GB_GAMMA2_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA3_R_OFFSET		(0x07CC)
+
+/* PDP, GAMMA3_R, GAMMA3_R
+*/
+#define PDP_GAMMA3_R_GAMMA3_R_MASK		(0x000003FF)
+#define PDP_GAMMA3_R_GAMMA3_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA3_R_GAMMA3_R_SHIFT		(0)
+#define PDP_GAMMA3_R_GAMMA3_R_LENGTH		(10)
+#define PDP_GAMMA3_R_GAMMA3_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA3_GB_OFFSET		(0x07D0)
+
+/* PDP, GAMMA3_GB, GAMMA3_G
+*/
+#define PDP_GAMMA3_GB_GAMMA3_G_MASK		(0x03FF0000)
+#define PDP_GAMMA3_GB_GAMMA3_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA3_GB_GAMMA3_G_SHIFT		(16)
+#define PDP_GAMMA3_GB_GAMMA3_G_LENGTH		(10)
+#define PDP_GAMMA3_GB_GAMMA3_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA3_GB, GAMMA3_B
+*/
+#define PDP_GAMMA3_GB_GAMMA3_B_MASK		(0x000003FF)
+#define PDP_GAMMA3_GB_GAMMA3_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA3_GB_GAMMA3_B_SHIFT		(0)
+#define PDP_GAMMA3_GB_GAMMA3_B_LENGTH		(10)
+#define PDP_GAMMA3_GB_GAMMA3_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA4_R_OFFSET		(0x07D4)
+
+/* PDP, GAMMA4_R, GAMMA4_R
+*/
+#define PDP_GAMMA4_R_GAMMA4_R_MASK		(0x000003FF)
+#define PDP_GAMMA4_R_GAMMA4_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA4_R_GAMMA4_R_SHIFT		(0)
+#define PDP_GAMMA4_R_GAMMA4_R_LENGTH		(10)
+#define PDP_GAMMA4_R_GAMMA4_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA4_GB_OFFSET		(0x07D8)
+
+/* PDP, GAMMA4_GB, GAMMA4_G
+*/
+#define PDP_GAMMA4_GB_GAMMA4_G_MASK		(0x03FF0000)
+#define PDP_GAMMA4_GB_GAMMA4_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA4_GB_GAMMA4_G_SHIFT		(16)
+#define PDP_GAMMA4_GB_GAMMA4_G_LENGTH		(10)
+#define PDP_GAMMA4_GB_GAMMA4_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA4_GB, GAMMA4_B
+*/
+#define PDP_GAMMA4_GB_GAMMA4_B_MASK		(0x000003FF)
+#define PDP_GAMMA4_GB_GAMMA4_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA4_GB_GAMMA4_B_SHIFT		(0)
+#define PDP_GAMMA4_GB_GAMMA4_B_LENGTH		(10)
+#define PDP_GAMMA4_GB_GAMMA4_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA5_R_OFFSET		(0x07DC)
+
+/* PDP, GAMMA5_R, GAMMA5_R
+*/
+#define PDP_GAMMA5_R_GAMMA5_R_MASK		(0x000003FF)
+#define PDP_GAMMA5_R_GAMMA5_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA5_R_GAMMA5_R_SHIFT		(0)
+#define PDP_GAMMA5_R_GAMMA5_R_LENGTH		(10)
+#define PDP_GAMMA5_R_GAMMA5_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA5_GB_OFFSET		(0x07E0)
+
+/* PDP, GAMMA5_GB, GAMMA5_G
+*/
+#define PDP_GAMMA5_GB_GAMMA5_G_MASK		(0x03FF0000)
+#define PDP_GAMMA5_GB_GAMMA5_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA5_GB_GAMMA5_G_SHIFT		(16)
+#define PDP_GAMMA5_GB_GAMMA5_G_LENGTH		(10)
+#define PDP_GAMMA5_GB_GAMMA5_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA5_GB, GAMMA5_B
+*/
+#define PDP_GAMMA5_GB_GAMMA5_B_MASK		(0x000003FF)
+#define PDP_GAMMA5_GB_GAMMA5_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA5_GB_GAMMA5_B_SHIFT		(0)
+#define PDP_GAMMA5_GB_GAMMA5_B_LENGTH		(10)
+#define PDP_GAMMA5_GB_GAMMA5_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA6_R_OFFSET		(0x07E4)
+
+/* PDP, GAMMA6_R, GAMMA6_R
+*/
+#define PDP_GAMMA6_R_GAMMA6_R_MASK		(0x000003FF)
+#define PDP_GAMMA6_R_GAMMA6_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA6_R_GAMMA6_R_SHIFT		(0)
+#define PDP_GAMMA6_R_GAMMA6_R_LENGTH		(10)
+#define PDP_GAMMA6_R_GAMMA6_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA6_GB_OFFSET		(0x07E8)
+
+/* PDP, GAMMA6_GB, GAMMA6_G
+*/
+#define PDP_GAMMA6_GB_GAMMA6_G_MASK		(0x03FF0000)
+#define PDP_GAMMA6_GB_GAMMA6_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA6_GB_GAMMA6_G_SHIFT		(16)
+#define PDP_GAMMA6_GB_GAMMA6_G_LENGTH		(10)
+#define PDP_GAMMA6_GB_GAMMA6_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA6_GB, GAMMA6_B
+*/
+#define PDP_GAMMA6_GB_GAMMA6_B_MASK		(0x000003FF)
+#define PDP_GAMMA6_GB_GAMMA6_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA6_GB_GAMMA6_B_SHIFT		(0)
+#define PDP_GAMMA6_GB_GAMMA6_B_LENGTH		(10)
+#define PDP_GAMMA6_GB_GAMMA6_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA7_R_OFFSET		(0x07EC)
+
+/* PDP, GAMMA7_R, GAMMA7_R
+*/
+#define PDP_GAMMA7_R_GAMMA7_R_MASK		(0x000003FF)
+#define PDP_GAMMA7_R_GAMMA7_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA7_R_GAMMA7_R_SHIFT		(0)
+#define PDP_GAMMA7_R_GAMMA7_R_LENGTH		(10)
+#define PDP_GAMMA7_R_GAMMA7_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA7_GB_OFFSET		(0x07F0)
+
+/* PDP, GAMMA7_GB, GAMMA7_G
+*/
+#define PDP_GAMMA7_GB_GAMMA7_G_MASK		(0x03FF0000)
+#define PDP_GAMMA7_GB_GAMMA7_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA7_GB_GAMMA7_G_SHIFT		(16)
+#define PDP_GAMMA7_GB_GAMMA7_G_LENGTH		(10)
+#define PDP_GAMMA7_GB_GAMMA7_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA7_GB, GAMMA7_B
+*/
+#define PDP_GAMMA7_GB_GAMMA7_B_MASK		(0x000003FF)
+#define PDP_GAMMA7_GB_GAMMA7_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA7_GB_GAMMA7_B_SHIFT		(0)
+#define PDP_GAMMA7_GB_GAMMA7_B_LENGTH		(10)
+#define PDP_GAMMA7_GB_GAMMA7_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA8_R_OFFSET		(0x07F4)
+
+/* PDP, GAMMA8_R, GAMMA8_R
+*/
+#define PDP_GAMMA8_R_GAMMA8_R_MASK		(0x000003FF)
+#define PDP_GAMMA8_R_GAMMA8_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA8_R_GAMMA8_R_SHIFT		(0)
+#define PDP_GAMMA8_R_GAMMA8_R_LENGTH		(10)
+#define PDP_GAMMA8_R_GAMMA8_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA8_GB_OFFSET		(0x07F8)
+
+/* PDP, GAMMA8_GB, GAMMA8_G
+*/
+#define PDP_GAMMA8_GB_GAMMA8_G_MASK		(0x03FF0000)
+#define PDP_GAMMA8_GB_GAMMA8_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA8_GB_GAMMA8_G_SHIFT		(16)
+#define PDP_GAMMA8_GB_GAMMA8_G_LENGTH		(10)
+#define PDP_GAMMA8_GB_GAMMA8_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA8_GB, GAMMA8_B
+*/
+#define PDP_GAMMA8_GB_GAMMA8_B_MASK		(0x000003FF)
+#define PDP_GAMMA8_GB_GAMMA8_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA8_GB_GAMMA8_B_SHIFT		(0)
+#define PDP_GAMMA8_GB_GAMMA8_B_LENGTH		(10)
+#define PDP_GAMMA8_GB_GAMMA8_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA9_R_OFFSET		(0x07FC)
+
+/* PDP, GAMMA9_R, GAMMA9_R
+*/
+#define PDP_GAMMA9_R_GAMMA9_R_MASK		(0x000003FF)
+#define PDP_GAMMA9_R_GAMMA9_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA9_R_GAMMA9_R_SHIFT		(0)
+#define PDP_GAMMA9_R_GAMMA9_R_LENGTH		(10)
+#define PDP_GAMMA9_R_GAMMA9_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA9_GB_OFFSET		(0x0800)
+
+/* PDP, GAMMA9_GB, GAMMA9_G
+*/
+#define PDP_GAMMA9_GB_GAMMA9_G_MASK		(0x03FF0000)
+#define PDP_GAMMA9_GB_GAMMA9_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA9_GB_GAMMA9_G_SHIFT		(16)
+#define PDP_GAMMA9_GB_GAMMA9_G_LENGTH		(10)
+#define PDP_GAMMA9_GB_GAMMA9_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA9_GB, GAMMA9_B
+*/
+#define PDP_GAMMA9_GB_GAMMA9_B_MASK		(0x000003FF)
+#define PDP_GAMMA9_GB_GAMMA9_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA9_GB_GAMMA9_B_SHIFT		(0)
+#define PDP_GAMMA9_GB_GAMMA9_B_LENGTH		(10)
+#define PDP_GAMMA9_GB_GAMMA9_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA10_R_OFFSET		(0x0804)
+
+/* PDP, GAMMA10_R, GAMMA10_R
+*/
+#define PDP_GAMMA10_R_GAMMA10_R_MASK		(0x000003FF)
+#define PDP_GAMMA10_R_GAMMA10_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA10_R_GAMMA10_R_SHIFT		(0)
+#define PDP_GAMMA10_R_GAMMA10_R_LENGTH		(10)
+#define PDP_GAMMA10_R_GAMMA10_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA10_GB_OFFSET		(0x0808)
+
+/* PDP, GAMMA10_GB, GAMMA10_G
+*/
+#define PDP_GAMMA10_GB_GAMMA10_G_MASK		(0x03FF0000)
+#define PDP_GAMMA10_GB_GAMMA10_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA10_GB_GAMMA10_G_SHIFT		(16)
+#define PDP_GAMMA10_GB_GAMMA10_G_LENGTH		(10)
+#define PDP_GAMMA10_GB_GAMMA10_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA10_GB, GAMMA10_B
+*/
+#define PDP_GAMMA10_GB_GAMMA10_B_MASK		(0x000003FF)
+#define PDP_GAMMA10_GB_GAMMA10_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA10_GB_GAMMA10_B_SHIFT		(0)
+#define PDP_GAMMA10_GB_GAMMA10_B_LENGTH		(10)
+#define PDP_GAMMA10_GB_GAMMA10_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA11_R_OFFSET		(0x080C)
+
+/* PDP, GAMMA11_R, GAMMA11_R
+*/
+#define PDP_GAMMA11_R_GAMMA11_R_MASK		(0x000003FF)
+#define PDP_GAMMA11_R_GAMMA11_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA11_R_GAMMA11_R_SHIFT		(0)
+#define PDP_GAMMA11_R_GAMMA11_R_LENGTH		(10)
+#define PDP_GAMMA11_R_GAMMA11_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA11_GB_OFFSET		(0x0810)
+
+/* PDP, GAMMA11_GB, GAMMA11_G
+*/
+#define PDP_GAMMA11_GB_GAMMA11_G_MASK		(0x03FF0000)
+#define PDP_GAMMA11_GB_GAMMA11_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA11_GB_GAMMA11_G_SHIFT		(16)
+#define PDP_GAMMA11_GB_GAMMA11_G_LENGTH		(10)
+#define PDP_GAMMA11_GB_GAMMA11_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA11_GB, GAMMA11_B
+*/
+#define PDP_GAMMA11_GB_GAMMA11_B_MASK		(0x000003FF)
+#define PDP_GAMMA11_GB_GAMMA11_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA11_GB_GAMMA11_B_SHIFT		(0)
+#define PDP_GAMMA11_GB_GAMMA11_B_LENGTH		(10)
+#define PDP_GAMMA11_GB_GAMMA11_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA12_R_OFFSET		(0x0814)
+
+/* PDP, GAMMA12_R, GAMMA12_R
+*/
+#define PDP_GAMMA12_R_GAMMA12_R_MASK		(0x000003FF)
+#define PDP_GAMMA12_R_GAMMA12_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA12_R_GAMMA12_R_SHIFT		(0)
+#define PDP_GAMMA12_R_GAMMA12_R_LENGTH		(10)
+#define PDP_GAMMA12_R_GAMMA12_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA12_GB_OFFSET		(0x0818)
+
+/* PDP, GAMMA12_GB, GAMMA12_G
+*/
+#define PDP_GAMMA12_GB_GAMMA12_G_MASK		(0x03FF0000)
+#define PDP_GAMMA12_GB_GAMMA12_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA12_GB_GAMMA12_G_SHIFT		(16)
+#define PDP_GAMMA12_GB_GAMMA12_G_LENGTH		(10)
+#define PDP_GAMMA12_GB_GAMMA12_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA12_GB, GAMMA12_B
+*/
+#define PDP_GAMMA12_GB_GAMMA12_B_MASK		(0x000003FF)
+#define PDP_GAMMA12_GB_GAMMA12_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA12_GB_GAMMA12_B_SHIFT		(0)
+#define PDP_GAMMA12_GB_GAMMA12_B_LENGTH		(10)
+#define PDP_GAMMA12_GB_GAMMA12_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA13_R_OFFSET		(0x081C)
+
+/* PDP, GAMMA13_R, GAMMA13_R
+*/
+#define PDP_GAMMA13_R_GAMMA13_R_MASK		(0x000003FF)
+#define PDP_GAMMA13_R_GAMMA13_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA13_R_GAMMA13_R_SHIFT		(0)
+#define PDP_GAMMA13_R_GAMMA13_R_LENGTH		(10)
+#define PDP_GAMMA13_R_GAMMA13_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA13_GB_OFFSET		(0x0820)
+
+/* PDP, GAMMA13_GB, GAMMA13_G
+*/
+#define PDP_GAMMA13_GB_GAMMA13_G_MASK		(0x03FF0000)
+#define PDP_GAMMA13_GB_GAMMA13_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA13_GB_GAMMA13_G_SHIFT		(16)
+#define PDP_GAMMA13_GB_GAMMA13_G_LENGTH		(10)
+#define PDP_GAMMA13_GB_GAMMA13_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA13_GB, GAMMA13_B
+*/
+#define PDP_GAMMA13_GB_GAMMA13_B_MASK		(0x000003FF)
+#define PDP_GAMMA13_GB_GAMMA13_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA13_GB_GAMMA13_B_SHIFT		(0)
+#define PDP_GAMMA13_GB_GAMMA13_B_LENGTH		(10)
+#define PDP_GAMMA13_GB_GAMMA13_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA14_R_OFFSET		(0x0824)
+
+/* PDP, GAMMA14_R, GAMMA14_R
+*/
+#define PDP_GAMMA14_R_GAMMA14_R_MASK		(0x000003FF)
+#define PDP_GAMMA14_R_GAMMA14_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA14_R_GAMMA14_R_SHIFT		(0)
+#define PDP_GAMMA14_R_GAMMA14_R_LENGTH		(10)
+#define PDP_GAMMA14_R_GAMMA14_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA14_GB_OFFSET		(0x0828)
+
+/* PDP, GAMMA14_GB, GAMMA14_G
+*/
+#define PDP_GAMMA14_GB_GAMMA14_G_MASK		(0x03FF0000)
+#define PDP_GAMMA14_GB_GAMMA14_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA14_GB_GAMMA14_G_SHIFT		(16)
+#define PDP_GAMMA14_GB_GAMMA14_G_LENGTH		(10)
+#define PDP_GAMMA14_GB_GAMMA14_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA14_GB, GAMMA14_B
+*/
+#define PDP_GAMMA14_GB_GAMMA14_B_MASK		(0x000003FF)
+#define PDP_GAMMA14_GB_GAMMA14_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA14_GB_GAMMA14_B_SHIFT		(0)
+#define PDP_GAMMA14_GB_GAMMA14_B_LENGTH		(10)
+#define PDP_GAMMA14_GB_GAMMA14_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA15_R_OFFSET		(0x082C)
+
+/* PDP, GAMMA15_R, GAMMA15_R
+*/
+#define PDP_GAMMA15_R_GAMMA15_R_MASK		(0x000003FF)
+#define PDP_GAMMA15_R_GAMMA15_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA15_R_GAMMA15_R_SHIFT		(0)
+#define PDP_GAMMA15_R_GAMMA15_R_LENGTH		(10)
+#define PDP_GAMMA15_R_GAMMA15_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA15_GB_OFFSET		(0x0830)
+
+/* PDP, GAMMA15_GB, GAMMA15_G
+*/
+#define PDP_GAMMA15_GB_GAMMA15_G_MASK		(0x03FF0000)
+#define PDP_GAMMA15_GB_GAMMA15_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA15_GB_GAMMA15_G_SHIFT		(16)
+#define PDP_GAMMA15_GB_GAMMA15_G_LENGTH		(10)
+#define PDP_GAMMA15_GB_GAMMA15_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA15_GB, GAMMA15_B
+*/
+#define PDP_GAMMA15_GB_GAMMA15_B_MASK		(0x000003FF)
+#define PDP_GAMMA15_GB_GAMMA15_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA15_GB_GAMMA15_B_SHIFT		(0)
+#define PDP_GAMMA15_GB_GAMMA15_B_LENGTH		(10)
+#define PDP_GAMMA15_GB_GAMMA15_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA16_R_OFFSET		(0x0834)
+
+/* PDP, GAMMA16_R, GAMMA16_R
+*/
+#define PDP_GAMMA16_R_GAMMA16_R_MASK		(0x000003FF)
+#define PDP_GAMMA16_R_GAMMA16_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA16_R_GAMMA16_R_SHIFT		(0)
+#define PDP_GAMMA16_R_GAMMA16_R_LENGTH		(10)
+#define PDP_GAMMA16_R_GAMMA16_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA16_GB_OFFSET		(0x0838)
+
+/* PDP, GAMMA16_GB, GAMMA16_G
+*/
+#define PDP_GAMMA16_GB_GAMMA16_G_MASK		(0x03FF0000)
+#define PDP_GAMMA16_GB_GAMMA16_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA16_GB_GAMMA16_G_SHIFT		(16)
+#define PDP_GAMMA16_GB_GAMMA16_G_LENGTH		(10)
+#define PDP_GAMMA16_GB_GAMMA16_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA16_GB, GAMMA16_B
+*/
+#define PDP_GAMMA16_GB_GAMMA16_B_MASK		(0x000003FF)
+#define PDP_GAMMA16_GB_GAMMA16_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA16_GB_GAMMA16_B_SHIFT		(0)
+#define PDP_GAMMA16_GB_GAMMA16_B_LENGTH		(10)
+#define PDP_GAMMA16_GB_GAMMA16_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA17_R_OFFSET		(0x083C)
+
+/* PDP, GAMMA17_R, GAMMA17_R
+*/
+#define PDP_GAMMA17_R_GAMMA17_R_MASK		(0x000003FF)
+#define PDP_GAMMA17_R_GAMMA17_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA17_R_GAMMA17_R_SHIFT		(0)
+#define PDP_GAMMA17_R_GAMMA17_R_LENGTH		(10)
+#define PDP_GAMMA17_R_GAMMA17_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA17_GB_OFFSET		(0x0840)
+
+/* PDP, GAMMA17_GB, GAMMA17_G
+*/
+#define PDP_GAMMA17_GB_GAMMA17_G_MASK		(0x03FF0000)
+#define PDP_GAMMA17_GB_GAMMA17_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA17_GB_GAMMA17_G_SHIFT		(16)
+#define PDP_GAMMA17_GB_GAMMA17_G_LENGTH		(10)
+#define PDP_GAMMA17_GB_GAMMA17_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA17_GB, GAMMA17_B
+*/
+#define PDP_GAMMA17_GB_GAMMA17_B_MASK		(0x000003FF)
+#define PDP_GAMMA17_GB_GAMMA17_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA17_GB_GAMMA17_B_SHIFT		(0)
+#define PDP_GAMMA17_GB_GAMMA17_B_LENGTH		(10)
+#define PDP_GAMMA17_GB_GAMMA17_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA18_R_OFFSET		(0x0844)
+
+/* PDP, GAMMA18_R, GAMMA18_R
+*/
+#define PDP_GAMMA18_R_GAMMA18_R_MASK		(0x000003FF)
+#define PDP_GAMMA18_R_GAMMA18_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA18_R_GAMMA18_R_SHIFT		(0)
+#define PDP_GAMMA18_R_GAMMA18_R_LENGTH		(10)
+#define PDP_GAMMA18_R_GAMMA18_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA18_GB_OFFSET		(0x0848)
+
+/* PDP, GAMMA18_GB, GAMMA18_G
+*/
+#define PDP_GAMMA18_GB_GAMMA18_G_MASK		(0x03FF0000)
+#define PDP_GAMMA18_GB_GAMMA18_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA18_GB_GAMMA18_G_SHIFT		(16)
+#define PDP_GAMMA18_GB_GAMMA18_G_LENGTH		(10)
+#define PDP_GAMMA18_GB_GAMMA18_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA18_GB, GAMMA18_B
+*/
+#define PDP_GAMMA18_GB_GAMMA18_B_MASK		(0x000003FF)
+#define PDP_GAMMA18_GB_GAMMA18_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA18_GB_GAMMA18_B_SHIFT		(0)
+#define PDP_GAMMA18_GB_GAMMA18_B_LENGTH		(10)
+#define PDP_GAMMA18_GB_GAMMA18_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA19_R_OFFSET		(0x084C)
+
+/* PDP, GAMMA19_R, GAMMA19_R
+*/
+#define PDP_GAMMA19_R_GAMMA19_R_MASK		(0x000003FF)
+#define PDP_GAMMA19_R_GAMMA19_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA19_R_GAMMA19_R_SHIFT		(0)
+#define PDP_GAMMA19_R_GAMMA19_R_LENGTH		(10)
+#define PDP_GAMMA19_R_GAMMA19_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA19_GB_OFFSET		(0x0850)
+
+/* PDP, GAMMA19_GB, GAMMA19_G
+*/
+#define PDP_GAMMA19_GB_GAMMA19_G_MASK		(0x03FF0000)
+#define PDP_GAMMA19_GB_GAMMA19_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA19_GB_GAMMA19_G_SHIFT		(16)
+#define PDP_GAMMA19_GB_GAMMA19_G_LENGTH		(10)
+#define PDP_GAMMA19_GB_GAMMA19_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA19_GB, GAMMA19_B
+*/
+#define PDP_GAMMA19_GB_GAMMA19_B_MASK		(0x000003FF)
+#define PDP_GAMMA19_GB_GAMMA19_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA19_GB_GAMMA19_B_SHIFT		(0)
+#define PDP_GAMMA19_GB_GAMMA19_B_LENGTH		(10)
+#define PDP_GAMMA19_GB_GAMMA19_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA20_R_OFFSET		(0x0854)
+
+/* PDP, GAMMA20_R, GAMMA20_R
+*/
+#define PDP_GAMMA20_R_GAMMA20_R_MASK		(0x000003FF)
+#define PDP_GAMMA20_R_GAMMA20_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA20_R_GAMMA20_R_SHIFT		(0)
+#define PDP_GAMMA20_R_GAMMA20_R_LENGTH		(10)
+#define PDP_GAMMA20_R_GAMMA20_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA20_GB_OFFSET		(0x0858)
+
+/* PDP, GAMMA20_GB, GAMMA20_G
+*/
+#define PDP_GAMMA20_GB_GAMMA20_G_MASK		(0x03FF0000)
+#define PDP_GAMMA20_GB_GAMMA20_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA20_GB_GAMMA20_G_SHIFT		(16)
+#define PDP_GAMMA20_GB_GAMMA20_G_LENGTH		(10)
+#define PDP_GAMMA20_GB_GAMMA20_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA20_GB, GAMMA20_B
+*/
+#define PDP_GAMMA20_GB_GAMMA20_B_MASK		(0x000003FF)
+#define PDP_GAMMA20_GB_GAMMA20_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA20_GB_GAMMA20_B_SHIFT		(0)
+#define PDP_GAMMA20_GB_GAMMA20_B_LENGTH		(10)
+#define PDP_GAMMA20_GB_GAMMA20_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA21_R_OFFSET		(0x085C)
+
+/* PDP, GAMMA21_R, GAMMA21_R
+*/
+#define PDP_GAMMA21_R_GAMMA21_R_MASK		(0x000003FF)
+#define PDP_GAMMA21_R_GAMMA21_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA21_R_GAMMA21_R_SHIFT		(0)
+#define PDP_GAMMA21_R_GAMMA21_R_LENGTH		(10)
+#define PDP_GAMMA21_R_GAMMA21_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA21_GB_OFFSET		(0x0860)
+
+/* PDP, GAMMA21_GB, GAMMA21_G
+*/
+#define PDP_GAMMA21_GB_GAMMA21_G_MASK		(0x03FF0000)
+#define PDP_GAMMA21_GB_GAMMA21_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA21_GB_GAMMA21_G_SHIFT		(16)
+#define PDP_GAMMA21_GB_GAMMA21_G_LENGTH		(10)
+#define PDP_GAMMA21_GB_GAMMA21_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA21_GB, GAMMA21_B
+*/
+#define PDP_GAMMA21_GB_GAMMA21_B_MASK		(0x000003FF)
+#define PDP_GAMMA21_GB_GAMMA21_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA21_GB_GAMMA21_B_SHIFT		(0)
+#define PDP_GAMMA21_GB_GAMMA21_B_LENGTH		(10)
+#define PDP_GAMMA21_GB_GAMMA21_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA22_R_OFFSET		(0x0864)
+
+/* PDP, GAMMA22_R, GAMMA22_R
+*/
+#define PDP_GAMMA22_R_GAMMA22_R_MASK		(0x000003FF)
+#define PDP_GAMMA22_R_GAMMA22_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA22_R_GAMMA22_R_SHIFT		(0)
+#define PDP_GAMMA22_R_GAMMA22_R_LENGTH		(10)
+#define PDP_GAMMA22_R_GAMMA22_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA22_GB_OFFSET		(0x0868)
+
+/* PDP, GAMMA22_GB, GAMMA22_G
+*/
+#define PDP_GAMMA22_GB_GAMMA22_G_MASK		(0x03FF0000)
+#define PDP_GAMMA22_GB_GAMMA22_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA22_GB_GAMMA22_G_SHIFT		(16)
+#define PDP_GAMMA22_GB_GAMMA22_G_LENGTH		(10)
+#define PDP_GAMMA22_GB_GAMMA22_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA22_GB, GAMMA22_B
+*/
+#define PDP_GAMMA22_GB_GAMMA22_B_MASK		(0x000003FF)
+#define PDP_GAMMA22_GB_GAMMA22_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA22_GB_GAMMA22_B_SHIFT		(0)
+#define PDP_GAMMA22_GB_GAMMA22_B_LENGTH		(10)
+#define PDP_GAMMA22_GB_GAMMA22_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA23_R_OFFSET		(0x086C)
+
+/* PDP, GAMMA23_R, GAMMA23_R
+*/
+#define PDP_GAMMA23_R_GAMMA23_R_MASK		(0x000003FF)
+#define PDP_GAMMA23_R_GAMMA23_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA23_R_GAMMA23_R_SHIFT		(0)
+#define PDP_GAMMA23_R_GAMMA23_R_LENGTH		(10)
+#define PDP_GAMMA23_R_GAMMA23_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA23_GB_OFFSET		(0x0870)
+
+/* PDP, GAMMA23_GB, GAMMA23_G
+*/
+#define PDP_GAMMA23_GB_GAMMA23_G_MASK		(0x03FF0000)
+#define PDP_GAMMA23_GB_GAMMA23_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA23_GB_GAMMA23_G_SHIFT		(16)
+#define PDP_GAMMA23_GB_GAMMA23_G_LENGTH		(10)
+#define PDP_GAMMA23_GB_GAMMA23_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA23_GB, GAMMA23_B
+*/
+#define PDP_GAMMA23_GB_GAMMA23_B_MASK		(0x000003FF)
+#define PDP_GAMMA23_GB_GAMMA23_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA23_GB_GAMMA23_B_SHIFT		(0)
+#define PDP_GAMMA23_GB_GAMMA23_B_LENGTH		(10)
+#define PDP_GAMMA23_GB_GAMMA23_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA24_R_OFFSET		(0x0874)
+
+/* PDP, GAMMA24_R, GAMMA24_R
+*/
+#define PDP_GAMMA24_R_GAMMA24_R_MASK		(0x000003FF)
+#define PDP_GAMMA24_R_GAMMA24_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA24_R_GAMMA24_R_SHIFT		(0)
+#define PDP_GAMMA24_R_GAMMA24_R_LENGTH		(10)
+#define PDP_GAMMA24_R_GAMMA24_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA24_GB_OFFSET		(0x0878)
+
+/* PDP, GAMMA24_GB, GAMMA24_G
+*/
+#define PDP_GAMMA24_GB_GAMMA24_G_MASK		(0x03FF0000)
+#define PDP_GAMMA24_GB_GAMMA24_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA24_GB_GAMMA24_G_SHIFT		(16)
+#define PDP_GAMMA24_GB_GAMMA24_G_LENGTH		(10)
+#define PDP_GAMMA24_GB_GAMMA24_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA24_GB, GAMMA24_B
+*/
+#define PDP_GAMMA24_GB_GAMMA24_B_MASK		(0x000003FF)
+#define PDP_GAMMA24_GB_GAMMA24_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA24_GB_GAMMA24_B_SHIFT		(0)
+#define PDP_GAMMA24_GB_GAMMA24_B_LENGTH		(10)
+#define PDP_GAMMA24_GB_GAMMA24_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA25_R_OFFSET		(0x087C)
+
+/* PDP, GAMMA25_R, GAMMA25_R
+*/
+#define PDP_GAMMA25_R_GAMMA25_R_MASK		(0x000003FF)
+#define PDP_GAMMA25_R_GAMMA25_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA25_R_GAMMA25_R_SHIFT		(0)
+#define PDP_GAMMA25_R_GAMMA25_R_LENGTH		(10)
+#define PDP_GAMMA25_R_GAMMA25_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA25_GB_OFFSET		(0x0880)
+
+/* PDP, GAMMA25_GB, GAMMA25_G
+*/
+#define PDP_GAMMA25_GB_GAMMA25_G_MASK		(0x03FF0000)
+#define PDP_GAMMA25_GB_GAMMA25_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA25_GB_GAMMA25_G_SHIFT		(16)
+#define PDP_GAMMA25_GB_GAMMA25_G_LENGTH		(10)
+#define PDP_GAMMA25_GB_GAMMA25_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA25_GB, GAMMA25_B
+*/
+#define PDP_GAMMA25_GB_GAMMA25_B_MASK		(0x000003FF)
+#define PDP_GAMMA25_GB_GAMMA25_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA25_GB_GAMMA25_B_SHIFT		(0)
+#define PDP_GAMMA25_GB_GAMMA25_B_LENGTH		(10)
+#define PDP_GAMMA25_GB_GAMMA25_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA26_R_OFFSET		(0x0884)
+
+/* PDP, GAMMA26_R, GAMMA26_R
+*/
+#define PDP_GAMMA26_R_GAMMA26_R_MASK		(0x000003FF)
+#define PDP_GAMMA26_R_GAMMA26_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA26_R_GAMMA26_R_SHIFT		(0)
+#define PDP_GAMMA26_R_GAMMA26_R_LENGTH		(10)
+#define PDP_GAMMA26_R_GAMMA26_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA26_GB_OFFSET		(0x0888)
+
+/* PDP, GAMMA26_GB, GAMMA26_G
+*/
+#define PDP_GAMMA26_GB_GAMMA26_G_MASK		(0x03FF0000)
+#define PDP_GAMMA26_GB_GAMMA26_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA26_GB_GAMMA26_G_SHIFT		(16)
+#define PDP_GAMMA26_GB_GAMMA26_G_LENGTH		(10)
+#define PDP_GAMMA26_GB_GAMMA26_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA26_GB, GAMMA26_B
+*/
+#define PDP_GAMMA26_GB_GAMMA26_B_MASK		(0x000003FF)
+#define PDP_GAMMA26_GB_GAMMA26_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA26_GB_GAMMA26_B_SHIFT		(0)
+#define PDP_GAMMA26_GB_GAMMA26_B_LENGTH		(10)
+#define PDP_GAMMA26_GB_GAMMA26_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA27_R_OFFSET		(0x088C)
+
+/* PDP, GAMMA27_R, GAMMA27_R
+*/
+#define PDP_GAMMA27_R_GAMMA27_R_MASK		(0x000003FF)
+#define PDP_GAMMA27_R_GAMMA27_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA27_R_GAMMA27_R_SHIFT		(0)
+#define PDP_GAMMA27_R_GAMMA27_R_LENGTH		(10)
+#define PDP_GAMMA27_R_GAMMA27_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA27_GB_OFFSET		(0x0890)
+
+/* PDP, GAMMA27_GB, GAMMA27_G
+*/
+#define PDP_GAMMA27_GB_GAMMA27_G_MASK		(0x03FF0000)
+#define PDP_GAMMA27_GB_GAMMA27_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA27_GB_GAMMA27_G_SHIFT		(16)
+#define PDP_GAMMA27_GB_GAMMA27_G_LENGTH		(10)
+#define PDP_GAMMA27_GB_GAMMA27_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA27_GB, GAMMA27_B
+*/
+#define PDP_GAMMA27_GB_GAMMA27_B_MASK		(0x000003FF)
+#define PDP_GAMMA27_GB_GAMMA27_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA27_GB_GAMMA27_B_SHIFT		(0)
+#define PDP_GAMMA27_GB_GAMMA27_B_LENGTH		(10)
+#define PDP_GAMMA27_GB_GAMMA27_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA28_R_OFFSET		(0x0894)
+
+/* PDP, GAMMA28_R, GAMMA28_R
+*/
+#define PDP_GAMMA28_R_GAMMA28_R_MASK		(0x000003FF)
+#define PDP_GAMMA28_R_GAMMA28_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA28_R_GAMMA28_R_SHIFT		(0)
+#define PDP_GAMMA28_R_GAMMA28_R_LENGTH		(10)
+#define PDP_GAMMA28_R_GAMMA28_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA28_GB_OFFSET		(0x0898)
+
+/* PDP, GAMMA28_GB, GAMMA28_G
+*/
+#define PDP_GAMMA28_GB_GAMMA28_G_MASK		(0x03FF0000)
+#define PDP_GAMMA28_GB_GAMMA28_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA28_GB_GAMMA28_G_SHIFT		(16)
+#define PDP_GAMMA28_GB_GAMMA28_G_LENGTH		(10)
+#define PDP_GAMMA28_GB_GAMMA28_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA28_GB, GAMMA28_B
+*/
+#define PDP_GAMMA28_GB_GAMMA28_B_MASK		(0x000003FF)
+#define PDP_GAMMA28_GB_GAMMA28_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA28_GB_GAMMA28_B_SHIFT		(0)
+#define PDP_GAMMA28_GB_GAMMA28_B_LENGTH		(10)
+#define PDP_GAMMA28_GB_GAMMA28_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA29_R_OFFSET		(0x089C)
+
+/* PDP, GAMMA29_R, GAMMA29_R
+*/
+#define PDP_GAMMA29_R_GAMMA29_R_MASK		(0x000003FF)
+#define PDP_GAMMA29_R_GAMMA29_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA29_R_GAMMA29_R_SHIFT		(0)
+#define PDP_GAMMA29_R_GAMMA29_R_LENGTH		(10)
+#define PDP_GAMMA29_R_GAMMA29_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA29_GB_OFFSET		(0x08A0)
+
+/* PDP, GAMMA29_GB, GAMMA29_G
+*/
+#define PDP_GAMMA29_GB_GAMMA29_G_MASK		(0x03FF0000)
+#define PDP_GAMMA29_GB_GAMMA29_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA29_GB_GAMMA29_G_SHIFT		(16)
+#define PDP_GAMMA29_GB_GAMMA29_G_LENGTH		(10)
+#define PDP_GAMMA29_GB_GAMMA29_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA29_GB, GAMMA29_B
+*/
+#define PDP_GAMMA29_GB_GAMMA29_B_MASK		(0x000003FF)
+#define PDP_GAMMA29_GB_GAMMA29_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA29_GB_GAMMA29_B_SHIFT		(0)
+#define PDP_GAMMA29_GB_GAMMA29_B_LENGTH		(10)
+#define PDP_GAMMA29_GB_GAMMA29_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA30_R_OFFSET		(0x08A4)
+
+/* PDP, GAMMA30_R, GAMMA30_R
+*/
+#define PDP_GAMMA30_R_GAMMA30_R_MASK		(0x000003FF)
+#define PDP_GAMMA30_R_GAMMA30_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA30_R_GAMMA30_R_SHIFT		(0)
+#define PDP_GAMMA30_R_GAMMA30_R_LENGTH		(10)
+#define PDP_GAMMA30_R_GAMMA30_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA30_GB_OFFSET		(0x08A8)
+
+/* PDP, GAMMA30_GB, GAMMA30_G
+*/
+#define PDP_GAMMA30_GB_GAMMA30_G_MASK		(0x03FF0000)
+#define PDP_GAMMA30_GB_GAMMA30_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA30_GB_GAMMA30_G_SHIFT		(16)
+#define PDP_GAMMA30_GB_GAMMA30_G_LENGTH		(10)
+#define PDP_GAMMA30_GB_GAMMA30_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA30_GB, GAMMA30_B
+*/
+#define PDP_GAMMA30_GB_GAMMA30_B_MASK		(0x000003FF)
+#define PDP_GAMMA30_GB_GAMMA30_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA30_GB_GAMMA30_B_SHIFT		(0)
+#define PDP_GAMMA30_GB_GAMMA30_B_LENGTH		(10)
+#define PDP_GAMMA30_GB_GAMMA30_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA31_R_OFFSET		(0x08AC)
+
+/* PDP, GAMMA31_R, GAMMA31_R
+*/
+#define PDP_GAMMA31_R_GAMMA31_R_MASK		(0x000003FF)
+#define PDP_GAMMA31_R_GAMMA31_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA31_R_GAMMA31_R_SHIFT		(0)
+#define PDP_GAMMA31_R_GAMMA31_R_LENGTH		(10)
+#define PDP_GAMMA31_R_GAMMA31_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA31_GB_OFFSET		(0x08B0)
+
+/* PDP, GAMMA31_GB, GAMMA31_G
+*/
+#define PDP_GAMMA31_GB_GAMMA31_G_MASK		(0x03FF0000)
+#define PDP_GAMMA31_GB_GAMMA31_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA31_GB_GAMMA31_G_SHIFT		(16)
+#define PDP_GAMMA31_GB_GAMMA31_G_LENGTH		(10)
+#define PDP_GAMMA31_GB_GAMMA31_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA31_GB, GAMMA31_B
+*/
+#define PDP_GAMMA31_GB_GAMMA31_B_MASK		(0x000003FF)
+#define PDP_GAMMA31_GB_GAMMA31_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA31_GB_GAMMA31_B_SHIFT		(0)
+#define PDP_GAMMA31_GB_GAMMA31_B_LENGTH		(10)
+#define PDP_GAMMA31_GB_GAMMA31_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA32_R_OFFSET		(0x08B4)
+
+/* PDP, GAMMA32_R, GAMMA32_R
+*/
+#define PDP_GAMMA32_R_GAMMA32_R_MASK		(0x000003FF)
+#define PDP_GAMMA32_R_GAMMA32_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA32_R_GAMMA32_R_SHIFT		(0)
+#define PDP_GAMMA32_R_GAMMA32_R_LENGTH		(10)
+#define PDP_GAMMA32_R_GAMMA32_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA32_GB_OFFSET		(0x08B8)
+
+/* PDP, GAMMA32_GB, GAMMA32_G
+*/
+#define PDP_GAMMA32_GB_GAMMA32_G_MASK		(0x03FF0000)
+#define PDP_GAMMA32_GB_GAMMA32_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA32_GB_GAMMA32_G_SHIFT		(16)
+#define PDP_GAMMA32_GB_GAMMA32_G_LENGTH		(10)
+#define PDP_GAMMA32_GB_GAMMA32_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA32_GB, GAMMA32_B
+*/
+#define PDP_GAMMA32_GB_GAMMA32_B_MASK		(0x000003FF)
+#define PDP_GAMMA32_GB_GAMMA32_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA32_GB_GAMMA32_B_SHIFT		(0)
+#define PDP_GAMMA32_GB_GAMMA32_B_LENGTH		(10)
+#define PDP_GAMMA32_GB_GAMMA32_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VEVENT_OFFSET		(0x08BC)
+
+/* PDP, VEVENT, VEVENT
+*/
+#define PDP_VEVENT_VEVENT_MASK		(0x1FFF0000)
+#define PDP_VEVENT_VEVENT_LSBMASK		(0x00001FFF)
+#define PDP_VEVENT_VEVENT_SHIFT		(16)
+#define PDP_VEVENT_VEVENT_LENGTH		(13)
+#define PDP_VEVENT_VEVENT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VEVENT, VFETCH
+*/
+#define PDP_VEVENT_VFETCH_MASK		(0x00001FFF)
+#define PDP_VEVENT_VFETCH_LSBMASK		(0x00001FFF)
+#define PDP_VEVENT_VFETCH_SHIFT		(0)
+#define PDP_VEVENT_VFETCH_LENGTH		(13)
+#define PDP_VEVENT_VFETCH_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_HDECTRL_OFFSET		(0x08C0)
+
+/* PDP, HDECTRL, HDES
+*/
+#define PDP_HDECTRL_HDES_MASK		(0x1FFF0000)
+#define PDP_HDECTRL_HDES_LSBMASK		(0x00001FFF)
+#define PDP_HDECTRL_HDES_SHIFT		(16)
+#define PDP_HDECTRL_HDES_LENGTH		(13)
+#define PDP_HDECTRL_HDES_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, HDECTRL, HDEF
+*/
+#define PDP_HDECTRL_HDEF_MASK		(0x00001FFF)
+#define PDP_HDECTRL_HDEF_LSBMASK		(0x00001FFF)
+#define PDP_HDECTRL_HDEF_SHIFT		(0)
+#define PDP_HDECTRL_HDEF_LENGTH		(13)
+#define PDP_HDECTRL_HDEF_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VDECTRL_OFFSET		(0x08C4)
+
+/* PDP, VDECTRL, VDES
+*/
+#define PDP_VDECTRL_VDES_MASK		(0x1FFF0000)
+#define PDP_VDECTRL_VDES_LSBMASK		(0x00001FFF)
+#define PDP_VDECTRL_VDES_SHIFT		(16)
+#define PDP_VDECTRL_VDES_LENGTH		(13)
+#define PDP_VDECTRL_VDES_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VDECTRL, VDEF
+*/
+#define PDP_VDECTRL_VDEF_MASK		(0x00001FFF)
+#define PDP_VDECTRL_VDEF_LSBMASK		(0x00001FFF)
+#define PDP_VDECTRL_VDEF_SHIFT		(0)
+#define PDP_VDECTRL_VDEF_LENGTH		(13)
+#define PDP_VDECTRL_VDEF_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_OPMASK_R_OFFSET		(0x08C8)
+
+/* PDP, OPMASK_R, MASKLEVEL
+*/
+#define PDP_OPMASK_R_MASKLEVEL_MASK		(0x80000000)
+#define PDP_OPMASK_R_MASKLEVEL_LSBMASK		(0x00000001)
+#define PDP_OPMASK_R_MASKLEVEL_SHIFT		(31)
+#define PDP_OPMASK_R_MASKLEVEL_LENGTH		(1)
+#define PDP_OPMASK_R_MASKLEVEL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, OPMASK_R, BLANKLEVEL
+*/
+#define PDP_OPMASK_R_BLANKLEVEL_MASK		(0x40000000)
+#define PDP_OPMASK_R_BLANKLEVEL_LSBMASK		(0x00000001)
+#define PDP_OPMASK_R_BLANKLEVEL_SHIFT		(30)
+#define PDP_OPMASK_R_BLANKLEVEL_LENGTH		(1)
+#define PDP_OPMASK_R_BLANKLEVEL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, OPMASK_R, MASKR
+*/
+#define PDP_OPMASK_R_MASKR_MASK		(0x000003FF)
+#define PDP_OPMASK_R_MASKR_LSBMASK		(0x000003FF)
+#define PDP_OPMASK_R_MASKR_SHIFT		(0)
+#define PDP_OPMASK_R_MASKR_LENGTH		(10)
+#define PDP_OPMASK_R_MASKR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_OPMASK_GB_OFFSET		(0x08CC)
+
+/* PDP, OPMASK_GB, MASKG
+*/
+#define PDP_OPMASK_GB_MASKG_MASK		(0x03FF0000)
+#define PDP_OPMASK_GB_MASKG_LSBMASK		(0x000003FF)
+#define PDP_OPMASK_GB_MASKG_SHIFT		(16)
+#define PDP_OPMASK_GB_MASKG_LENGTH		(10)
+#define PDP_OPMASK_GB_MASKG_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, OPMASK_GB, MASKB
+*/
+#define PDP_OPMASK_GB_MASKB_MASK		(0x000003FF)
+#define PDP_OPMASK_GB_MASKB_LSBMASK		(0x000003FF)
+#define PDP_OPMASK_GB_MASKB_SHIFT		(0)
+#define PDP_OPMASK_GB_MASKB_LENGTH		(10)
+#define PDP_OPMASK_GB_MASKB_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_REGLD_ADDR_CTRL_OFFSET		(0x08D0)
+
+/* PDP, REGLD_ADDR_CTRL, REGLD_ADDRIN
+*/
+#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_MASK		(0xFFFFFFF0)
+#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LSBMASK		(0x0FFFFFFF)
+#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SHIFT		(4)
+#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LENGTH		(28)
+#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_REGLD_ADDR_STAT_OFFSET		(0x08D4)
+
+/* PDP, REGLD_ADDR_STAT, REGLD_ADDROUT
+*/
+#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_MASK		(0xFFFFFFF0)
+#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LSBMASK		(0x0FFFFFFF)
+#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SHIFT		(4)
+#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LENGTH		(28)
+#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_REGLD_STAT_OFFSET		(0x08D8)
+
+/* PDP, REGLD_STAT, REGLD_ADDREN
+*/
+#define PDP_REGLD_STAT_REGLD_ADDREN_MASK		(0x00800000)
+#define PDP_REGLD_STAT_REGLD_ADDREN_LSBMASK		(0x00000001)
+#define PDP_REGLD_STAT_REGLD_ADDREN_SHIFT		(23)
+#define PDP_REGLD_STAT_REGLD_ADDREN_LENGTH		(1)
+#define PDP_REGLD_STAT_REGLD_ADDREN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_REGLD_CTRL_OFFSET		(0x08DC)
+
+/* PDP, REGLD_CTRL, REGLD_ADDRLEN
+*/
+#define PDP_REGLD_CTRL_REGLD_ADDRLEN_MASK		(0xFF000000)
+#define PDP_REGLD_CTRL_REGLD_ADDRLEN_LSBMASK		(0x000000FF)
+#define PDP_REGLD_CTRL_REGLD_ADDRLEN_SHIFT		(24)
+#define PDP_REGLD_CTRL_REGLD_ADDRLEN_LENGTH		(8)
+#define PDP_REGLD_CTRL_REGLD_ADDRLEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, REGLD_CTRL, REGLD_VAL
+*/
+#define PDP_REGLD_CTRL_REGLD_VAL_MASK		(0x00800000)
+#define PDP_REGLD_CTRL_REGLD_VAL_LSBMASK		(0x00000001)
+#define PDP_REGLD_CTRL_REGLD_VAL_SHIFT		(23)
+#define PDP_REGLD_CTRL_REGLD_VAL_LENGTH		(1)
+#define PDP_REGLD_CTRL_REGLD_VAL_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_UPDCTRL_OFFSET		(0x08E0)
+
+/* PDP, UPDCTRL, UPDFIELD
+*/
+#define PDP_UPDCTRL_UPDFIELD_MASK		(0x00000001)
+#define PDP_UPDCTRL_UPDFIELD_LSBMASK		(0x00000001)
+#define PDP_UPDCTRL_UPDFIELD_SHIFT		(0)
+#define PDP_UPDCTRL_UPDFIELD_LENGTH		(1)
+#define PDP_UPDCTRL_UPDFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_INTCTRL_OFFSET		(0x08E4)
+
+/* PDP, PVR_PDP_INTCTRL, HBLNK_LINE
+*/
+#define PDP_INTCTRL_HBLNK_LINE_MASK		(0x00010000)
+#define PDP_INTCTRL_HBLNK_LINE_LSBMASK		(0x00000001)
+#define PDP_INTCTRL_HBLNK_LINE_SHIFT		(16)
+#define PDP_INTCTRL_HBLNK_LINE_LENGTH		(1)
+#define PDP_INTCTRL_HBLNK_LINE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_PDP_INTCTRL, HBLNK_LINENO
+*/
+#define PDP_INTCTRL_HBLNK_LINENO_MASK		(0x00001FFF)
+#define PDP_INTCTRL_HBLNK_LINENO_LSBMASK		(0x00001FFF)
+#define PDP_INTCTRL_HBLNK_LINENO_SHIFT		(0)
+#define PDP_INTCTRL_HBLNK_LINENO_LENGTH		(13)
+#define PDP_INTCTRL_HBLNK_LINENO_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PDISETUP_OFFSET		(0x0900)
+
+/* PDP, PDISETUP, PDI_BLNKLVL
+*/
+#define PDP_PDISETUP_PDI_BLNKLVL_MASK		(0x00000040)
+#define PDP_PDISETUP_PDI_BLNKLVL_LSBMASK		(0x00000001)
+#define PDP_PDISETUP_PDI_BLNKLVL_SHIFT		(6)
+#define PDP_PDISETUP_PDI_BLNKLVL_LENGTH		(1)
+#define PDP_PDISETUP_PDI_BLNKLVL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_BLNK
+*/
+#define PDP_PDISETUP_PDI_BLNK_MASK		(0x00000020)
+#define PDP_PDISETUP_PDI_BLNK_LSBMASK		(0x00000001)
+#define PDP_PDISETUP_PDI_BLNK_SHIFT		(5)
+#define PDP_PDISETUP_PDI_BLNK_LENGTH		(1)
+#define PDP_PDISETUP_PDI_BLNK_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_PWR
+*/
+#define PDP_PDISETUP_PDI_PWR_MASK		(0x00000010)
+#define PDP_PDISETUP_PDI_PWR_LSBMASK		(0x00000001)
+#define PDP_PDISETUP_PDI_PWR_SHIFT		(4)
+#define PDP_PDISETUP_PDI_PWR_LENGTH		(1)
+#define PDP_PDISETUP_PDI_PWR_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_EN
+*/
+#define PDP_PDISETUP_PDI_EN_MASK		(0x00000008)
+#define PDP_PDISETUP_PDI_EN_LSBMASK		(0x00000001)
+#define PDP_PDISETUP_PDI_EN_SHIFT		(3)
+#define PDP_PDISETUP_PDI_EN_LENGTH		(1)
+#define PDP_PDISETUP_PDI_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_GDEN
+*/
+#define PDP_PDISETUP_PDI_GDEN_MASK		(0x00000004)
+#define PDP_PDISETUP_PDI_GDEN_LSBMASK		(0x00000001)
+#define PDP_PDISETUP_PDI_GDEN_SHIFT		(2)
+#define PDP_PDISETUP_PDI_GDEN_LENGTH		(1)
+#define PDP_PDISETUP_PDI_GDEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_NFEN
+*/
+#define PDP_PDISETUP_PDI_NFEN_MASK		(0x00000002)
+#define PDP_PDISETUP_PDI_NFEN_LSBMASK		(0x00000001)
+#define PDP_PDISETUP_PDI_NFEN_SHIFT		(1)
+#define PDP_PDISETUP_PDI_NFEN_LENGTH		(1)
+#define PDP_PDISETUP_PDI_NFEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_CR
+*/
+#define PDP_PDISETUP_PDI_CR_MASK		(0x00000001)
+#define PDP_PDISETUP_PDI_CR_LSBMASK		(0x00000001)
+#define PDP_PDISETUP_PDI_CR_SHIFT		(0)
+#define PDP_PDISETUP_PDI_CR_LENGTH		(1)
+#define PDP_PDISETUP_PDI_CR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PDITIMING0_OFFSET		(0x0904)
+
+/* PDP, PDITIMING0, PDI_PWRSVGD
+*/
+#define PDP_PDITIMING0_PDI_PWRSVGD_MASK		(0x0F000000)
+#define PDP_PDITIMING0_PDI_PWRSVGD_LSBMASK		(0x0000000F)
+#define PDP_PDITIMING0_PDI_PWRSVGD_SHIFT		(24)
+#define PDP_PDITIMING0_PDI_PWRSVGD_LENGTH		(4)
+#define PDP_PDITIMING0_PDI_PWRSVGD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDITIMING0, PDI_LSDEL
+*/
+#define PDP_PDITIMING0_PDI_LSDEL_MASK		(0x007F0000)
+#define PDP_PDITIMING0_PDI_LSDEL_LSBMASK		(0x0000007F)
+#define PDP_PDITIMING0_PDI_LSDEL_SHIFT		(16)
+#define PDP_PDITIMING0_PDI_LSDEL_LENGTH		(7)
+#define PDP_PDITIMING0_PDI_LSDEL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDITIMING0, PDI_PWRSV2GD2
+*/
+#define PDP_PDITIMING0_PDI_PWRSV2GD2_MASK		(0x000003FF)
+#define PDP_PDITIMING0_PDI_PWRSV2GD2_LSBMASK		(0x000003FF)
+#define PDP_PDITIMING0_PDI_PWRSV2GD2_SHIFT		(0)
+#define PDP_PDITIMING0_PDI_PWRSV2GD2_LENGTH		(10)
+#define PDP_PDITIMING0_PDI_PWRSV2GD2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PDITIMING1_OFFSET		(0x0908)
+
+/* PDP, PDITIMING1, PDI_NLDEL
+*/
+#define PDP_PDITIMING1_PDI_NLDEL_MASK		(0x000F0000)
+#define PDP_PDITIMING1_PDI_NLDEL_LSBMASK		(0x0000000F)
+#define PDP_PDITIMING1_PDI_NLDEL_SHIFT		(16)
+#define PDP_PDITIMING1_PDI_NLDEL_LENGTH		(4)
+#define PDP_PDITIMING1_PDI_NLDEL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDITIMING1, PDI_ACBDEL
+*/
+#define PDP_PDITIMING1_PDI_ACBDEL_MASK		(0x000003FF)
+#define PDP_PDITIMING1_PDI_ACBDEL_LSBMASK		(0x000003FF)
+#define PDP_PDITIMING1_PDI_ACBDEL_SHIFT		(0)
+#define PDP_PDITIMING1_PDI_ACBDEL_LENGTH		(10)
+#define PDP_PDITIMING1_PDI_ACBDEL_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PDICOREID_OFFSET		(0x090C)
+
+/* PDP, PDICOREID, PDI_GROUP_ID
+*/
+#define PDP_PDICOREID_PDI_GROUP_ID_MASK		(0xFF000000)
+#define PDP_PDICOREID_PDI_GROUP_ID_LSBMASK		(0x000000FF)
+#define PDP_PDICOREID_PDI_GROUP_ID_SHIFT		(24)
+#define PDP_PDICOREID_PDI_GROUP_ID_LENGTH		(8)
+#define PDP_PDICOREID_PDI_GROUP_ID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDICOREID, PDI_CORE_ID
+*/
+#define PDP_PDICOREID_PDI_CORE_ID_MASK		(0x00FF0000)
+#define PDP_PDICOREID_PDI_CORE_ID_LSBMASK		(0x000000FF)
+#define PDP_PDICOREID_PDI_CORE_ID_SHIFT		(16)
+#define PDP_PDICOREID_PDI_CORE_ID_LENGTH		(8)
+#define PDP_PDICOREID_PDI_CORE_ID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDICOREID, PDI_CONFIG_ID
+*/
+#define PDP_PDICOREID_PDI_CONFIG_ID_MASK		(0x0000FFFF)
+#define PDP_PDICOREID_PDI_CONFIG_ID_LSBMASK		(0x0000FFFF)
+#define PDP_PDICOREID_PDI_CONFIG_ID_SHIFT		(0)
+#define PDP_PDICOREID_PDI_CONFIG_ID_LENGTH		(16)
+#define PDP_PDICOREID_PDI_CONFIG_ID_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PDICOREREV_OFFSET		(0x0910)
+
+/* PDP, PDICOREREV, PDI_MAJOR_REV
+*/
+#define PDP_PDICOREREV_PDI_MAJOR_REV_MASK		(0x00FF0000)
+#define PDP_PDICOREREV_PDI_MAJOR_REV_LSBMASK		(0x000000FF)
+#define PDP_PDICOREREV_PDI_MAJOR_REV_SHIFT		(16)
+#define PDP_PDICOREREV_PDI_MAJOR_REV_LENGTH		(8)
+#define PDP_PDICOREREV_PDI_MAJOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDICOREREV, PDI_MINOR_REV
+*/
+#define PDP_PDICOREREV_PDI_MINOR_REV_MASK		(0x0000FF00)
+#define PDP_PDICOREREV_PDI_MINOR_REV_LSBMASK		(0x000000FF)
+#define PDP_PDICOREREV_PDI_MINOR_REV_SHIFT		(8)
+#define PDP_PDICOREREV_PDI_MINOR_REV_LENGTH		(8)
+#define PDP_PDICOREREV_PDI_MINOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDICOREREV, PDI_MAINT_REV
+*/
+#define PDP_PDICOREREV_PDI_MAINT_REV_MASK		(0x000000FF)
+#define PDP_PDICOREREV_PDI_MAINT_REV_LSBMASK		(0x000000FF)
+#define PDP_PDICOREREV_PDI_MAINT_REV_SHIFT		(0)
+#define PDP_PDICOREREV_PDI_MAINT_REV_LENGTH		(8)
+#define PDP_PDICOREREV_PDI_MAINT_REV_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX2_OFFSET		(0x0920)
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y1
+*/
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_MASK		(0x000000C0)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LSBMASK		(0x00000003)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SHIFT		(6)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LENGTH		(2)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y1
+*/
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_MASK		(0x00000030)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LSBMASK		(0x00000003)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SHIFT		(4)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LENGTH		(2)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y0
+*/
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_MASK		(0x0000000C)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LSBMASK		(0x00000003)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SHIFT		(2)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LENGTH		(2)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y0
+*/
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_MASK		(0x00000003)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LSBMASK		(0x00000003)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SHIFT		(0)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LENGTH		(2)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX4_0_OFFSET		(0x0924)
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y1
+*/
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_MASK		(0xF0000000)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SHIFT		(28)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LENGTH		(4)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y1
+*/
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_MASK		(0x0F000000)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SHIFT		(24)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LENGTH		(4)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y1
+*/
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_MASK		(0x00F00000)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SHIFT		(20)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LENGTH		(4)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y1
+*/
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_MASK		(0x000F0000)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SHIFT		(16)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LENGTH		(4)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y0
+*/
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_MASK		(0x0000F000)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SHIFT		(12)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LENGTH		(4)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y0
+*/
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_MASK		(0x00000F00)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SHIFT		(8)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LENGTH		(4)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y0
+*/
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_MASK		(0x000000F0)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SHIFT		(4)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LENGTH		(4)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y0
+*/
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_MASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SHIFT		(0)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LENGTH		(4)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX4_1_OFFSET		(0x0928)
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y3
+*/
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_MASK		(0xF0000000)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SHIFT		(28)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LENGTH		(4)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y3
+*/
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_MASK		(0x0F000000)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SHIFT		(24)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LENGTH		(4)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y3
+*/
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_MASK		(0x00F00000)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SHIFT		(20)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LENGTH		(4)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y3
+*/
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_MASK		(0x000F0000)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SHIFT		(16)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LENGTH		(4)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y2
+*/
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_MASK		(0x0000F000)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SHIFT		(12)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LENGTH		(4)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y2
+*/
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_MASK		(0x00000F00)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SHIFT		(8)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LENGTH		(4)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y2
+*/
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_MASK		(0x000000F0)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SHIFT		(4)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LENGTH		(4)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y2
+*/
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_MASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SHIFT		(0)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LENGTH		(4)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_0_OFFSET		(0x092C)
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X4Y0
+*/
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SHIFT		(24)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LENGTH		(6)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X3Y0
+*/
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SHIFT		(18)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LENGTH		(6)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X2Y0
+*/
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SHIFT		(12)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LENGTH		(6)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X1Y0
+*/
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SHIFT		(6)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LENGTH		(6)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X0Y0
+*/
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SHIFT		(0)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LENGTH		(6)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_1_OFFSET		(0x0930)
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X1Y1
+*/
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SHIFT		(24)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LENGTH		(6)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X0Y1
+*/
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SHIFT		(18)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LENGTH		(6)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X7Y0
+*/
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SHIFT		(12)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LENGTH		(6)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X6Y0
+*/
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SHIFT		(6)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LENGTH		(6)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X5Y0
+*/
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SHIFT		(0)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LENGTH		(6)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_2_OFFSET		(0x0934)
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X6Y1
+*/
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SHIFT		(24)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LENGTH		(6)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X5Y1
+*/
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SHIFT		(18)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LENGTH		(6)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X4Y1
+*/
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SHIFT		(12)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LENGTH		(6)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X3Y1
+*/
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SHIFT		(6)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LENGTH		(6)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X2Y1
+*/
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SHIFT		(0)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LENGTH		(6)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_3_OFFSET		(0x0938)
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X3Y2
+*/
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SHIFT		(24)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LENGTH		(6)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X2Y2
+*/
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SHIFT		(18)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LENGTH		(6)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X1Y2
+*/
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SHIFT		(12)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LENGTH		(6)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X0Y2
+*/
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SHIFT		(6)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LENGTH		(6)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X7Y1
+*/
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SHIFT		(0)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LENGTH		(6)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_4_OFFSET		(0x093C)
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X0Y3
+*/
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SHIFT		(24)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LENGTH		(6)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X7Y2
+*/
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SHIFT		(18)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LENGTH		(6)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X6Y2
+*/
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SHIFT		(12)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LENGTH		(6)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X5Y2
+*/
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SHIFT		(6)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LENGTH		(6)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X4Y2
+*/
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SHIFT		(0)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LENGTH		(6)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_5_OFFSET		(0x0940)
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X5Y3
+*/
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SHIFT		(24)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LENGTH		(6)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X4Y3
+*/
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SHIFT		(18)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LENGTH		(6)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X3Y3
+*/
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SHIFT		(12)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LENGTH		(6)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X2Y3
+*/
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SHIFT		(6)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LENGTH		(6)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X1Y3
+*/
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SHIFT		(0)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LENGTH		(6)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_6_OFFSET		(0x0944)
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X2Y4
+*/
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SHIFT		(24)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LENGTH		(6)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X1Y4
+*/
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SHIFT		(18)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LENGTH		(6)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X0Y4
+*/
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SHIFT		(12)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LENGTH		(6)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X7Y3
+*/
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SHIFT		(6)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LENGTH		(6)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X6Y3
+*/
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SHIFT		(0)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LENGTH		(6)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_7_OFFSET		(0x0948)
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X7Y4
+*/
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SHIFT		(24)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LENGTH		(6)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X6Y4
+*/
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SHIFT		(18)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LENGTH		(6)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X5Y4
+*/
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SHIFT		(12)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LENGTH		(6)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X4Y4
+*/
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SHIFT		(6)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LENGTH		(6)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X3Y4
+*/
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SHIFT		(0)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LENGTH		(6)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_8_OFFSET		(0x094C)
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X4Y5
+*/
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SHIFT		(24)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LENGTH		(6)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X3Y5
+*/
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SHIFT		(18)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LENGTH		(6)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X2Y5
+*/
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SHIFT		(12)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LENGTH		(6)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X1Y5
+*/
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SHIFT		(6)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LENGTH		(6)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X0Y5
+*/
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SHIFT		(0)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LENGTH		(6)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_9_OFFSET		(0x0950)
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X1Y6
+*/
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SHIFT		(24)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LENGTH		(6)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X0Y6
+*/
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SHIFT		(18)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LENGTH		(6)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X7Y5
+*/
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SHIFT		(12)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LENGTH		(6)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X6Y5
+*/
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SHIFT		(6)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LENGTH		(6)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X5Y5
+*/
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SHIFT		(0)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LENGTH		(6)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_10_OFFSET		(0x0954)
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X6Y6
+*/
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SHIFT		(24)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LENGTH		(6)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X5Y6
+*/
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SHIFT		(18)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LENGTH		(6)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X4Y6
+*/
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SHIFT		(12)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LENGTH		(6)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X3Y6
+*/
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SHIFT		(6)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LENGTH		(6)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X2Y6
+*/
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SHIFT		(0)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LENGTH		(6)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_11_OFFSET		(0x0958)
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X3Y7
+*/
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SHIFT		(24)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LENGTH		(6)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X2Y7
+*/
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SHIFT		(18)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LENGTH		(6)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X1Y7
+*/
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SHIFT		(12)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LENGTH		(6)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X0Y7
+*/
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SHIFT		(6)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LENGTH		(6)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X7Y6
+*/
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SHIFT		(0)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LENGTH		(6)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_12_OFFSET		(0x095C)
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X7Y7
+*/
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SHIFT		(18)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LENGTH		(6)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X6Y7
+*/
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SHIFT		(12)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LENGTH		(6)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X5Y7
+*/
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SHIFT		(6)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LENGTH		(6)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X4Y7
+*/
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SHIFT		(0)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LENGTH		(6)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1_MEMCTRL_OFFSET		(0x0960)
+
+/* PDP, GRPH1_MEMCTRL, GRPH1_LOCAL_GLOBAL_MEMCTRL
+*/
+#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_MEMCTRL, GRPH1_BURSTLEN
+*/
+#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_MASK		(0x000000FF)
+#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LSBMASK		(0x000000FF)
+#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SHIFT		(0)
+#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LENGTH		(8)
+#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1_MEM_THRESH_OFFSET		(0x0964)
+
+/* PDP, GRPH1_MEM_THRESH, GRPH1_UVTHRESHOLD
+*/
+#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_MASK		(0xFF000000)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SHIFT		(24)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LENGTH		(8)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_MEM_THRESH, GRPH1_YTHRESHOLD
+*/
+#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_MASK		(0x001FF000)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SHIFT		(12)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LENGTH		(9)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_MEM_THRESH, GRPH1_THRESHOLD
+*/
+#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_MASK		(0x000001FF)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SHIFT		(0)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LENGTH		(9)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2_MEMCTRL_OFFSET		(0x0968)
+
+/* PDP, GRPH2_MEMCTRL, GRPH2_LOCAL_GLOBAL_MEMCTRL
+*/
+#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_MEMCTRL, GRPH2_BURSTLEN
+*/
+#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_MASK		(0x000000FF)
+#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LSBMASK		(0x000000FF)
+#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SHIFT		(0)
+#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LENGTH		(8)
+#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2_MEM_THRESH_OFFSET		(0x096C)
+
+/* PDP, GRPH2_MEM_THRESH, GRPH2_UVTHRESHOLD
+*/
+#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_MASK		(0xFF000000)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SHIFT		(24)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LENGTH		(8)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_MEM_THRESH, GRPH2_YTHRESHOLD
+*/
+#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_MASK		(0x001FF000)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SHIFT		(12)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LENGTH		(9)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_MEM_THRESH, GRPH2_THRESHOLD
+*/
+#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_MASK		(0x000001FF)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SHIFT		(0)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LENGTH		(9)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3_MEMCTRL_OFFSET		(0x0970)
+
+/* PDP, GRPH3_MEMCTRL, GRPH3_LOCAL_GLOBAL_MEMCTRL
+*/
+#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_MEMCTRL, GRPH3_BURSTLEN
+*/
+#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_MASK		(0x000000FF)
+#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LSBMASK		(0x000000FF)
+#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SHIFT		(0)
+#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LENGTH		(8)
+#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3_MEM_THRESH_OFFSET		(0x0974)
+
+/* PDP, GRPH3_MEM_THRESH, GRPH3_UVTHRESHOLD
+*/
+#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_MASK		(0xFF000000)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SHIFT		(24)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LENGTH		(8)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_MEM_THRESH, GRPH3_YTHRESHOLD
+*/
+#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_MASK		(0x001FF000)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SHIFT		(12)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LENGTH		(9)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_MEM_THRESH, GRPH3_THRESHOLD
+*/
+#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_MASK		(0x000001FF)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SHIFT		(0)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LENGTH		(9)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4_MEMCTRL_OFFSET		(0x0978)
+
+/* PDP, GRPH4_MEMCTRL, GRPH4_LOCAL_GLOBAL_MEMCTRL
+*/
+#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_MEMCTRL, GRPH4_BURSTLEN
+*/
+#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_MASK		(0x000000FF)
+#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LSBMASK		(0x000000FF)
+#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SHIFT		(0)
+#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LENGTH		(8)
+#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4_MEM_THRESH_OFFSET		(0x097C)
+
+/* PDP, GRPH4_MEM_THRESH, GRPH4_UVTHRESHOLD
+*/
+#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_MASK		(0xFF000000)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SHIFT		(24)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LENGTH		(8)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_MEM_THRESH, GRPH4_YTHRESHOLD
+*/
+#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_MASK		(0x001FF000)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SHIFT		(12)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LENGTH		(9)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_MEM_THRESH, GRPH4_THRESHOLD
+*/
+#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_MASK		(0x000001FF)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SHIFT		(0)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LENGTH		(9)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1_MEMCTRL_OFFSET		(0x0980)
+
+/* PDP, VID1_MEMCTRL, VID1_LOCAL_GLOBAL_MEMCTRL
+*/
+#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_MEMCTRL, VID1_BURSTLEN
+*/
+#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_MASK		(0x000000FF)
+#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_LSBMASK		(0x000000FF)
+#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_SHIFT		(0)
+#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_LENGTH		(8)
+#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1_MEM_THRESH_OFFSET		(0x0984)
+
+/* PDP, VID1_MEM_THRESH, VID1_UVTHRESHOLD
+*/
+#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_MASK		(0xFF000000)
+#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SHIFT		(24)
+#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LENGTH		(8)
+#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_MEM_THRESH, VID1_YTHRESHOLD
+*/
+#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_MASK		(0x001FF000)
+#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SHIFT		(12)
+#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LENGTH		(9)
+#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_MEM_THRESH, VID1_THRESHOLD
+*/
+#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_MASK		(0x000001FF)
+#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SHIFT		(0)
+#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LENGTH		(9)
+#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2_MEMCTRL_OFFSET		(0x0988)
+
+/* PDP, VID2_MEMCTRL, VID2_LOCAL_GLOBAL_MEMCTRL
+*/
+#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_MEMCTRL, VID2_BURSTLEN
+*/
+#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_MASK		(0x000000FF)
+#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_LSBMASK		(0x000000FF)
+#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_SHIFT		(0)
+#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_LENGTH		(8)
+#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2_MEM_THRESH_OFFSET		(0x098C)
+
+/* PDP, VID2_MEM_THRESH, VID2_UVTHRESHOLD
+*/
+#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_MASK		(0xFF000000)
+#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SHIFT		(24)
+#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LENGTH		(8)
+#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_MEM_THRESH, VID2_YTHRESHOLD
+*/
+#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_MASK		(0x001FF000)
+#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SHIFT		(12)
+#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LENGTH		(9)
+#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_MEM_THRESH, VID2_THRESHOLD
+*/
+#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_MASK		(0x000001FF)
+#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SHIFT		(0)
+#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LENGTH		(9)
+#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3_MEMCTRL_OFFSET		(0x0990)
+
+/* PDP, VID3_MEMCTRL, VID3_LOCAL_GLOBAL_MEMCTRL
+*/
+#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_MEMCTRL, VID3_BURSTLEN
+*/
+#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_MASK		(0x000000FF)
+#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_LSBMASK		(0x000000FF)
+#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_SHIFT		(0)
+#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_LENGTH		(8)
+#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3_MEM_THRESH_OFFSET		(0x0994)
+
+/* PDP, VID3_MEM_THRESH, VID3_UVTHRESHOLD
+*/
+#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_MASK		(0xFF000000)
+#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SHIFT		(24)
+#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LENGTH		(8)
+#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_MEM_THRESH, VID3_YTHRESHOLD
+*/
+#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_MASK		(0x001FF000)
+#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SHIFT		(12)
+#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LENGTH		(9)
+#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_MEM_THRESH, VID3_THRESHOLD
+*/
+#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_MASK		(0x000001FF)
+#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SHIFT		(0)
+#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LENGTH		(9)
+#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4_MEMCTRL_OFFSET		(0x0998)
+
+/* PDP, VID4_MEMCTRL, VID4_LOCAL_GLOBAL_MEMCTRL
+*/
+#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_MEMCTRL, VID4_BURSTLEN
+*/
+#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_MASK		(0x000000FF)
+#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_LSBMASK		(0x000000FF)
+#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_SHIFT		(0)
+#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_LENGTH		(8)
+#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4_MEM_THRESH_OFFSET		(0x099C)
+
+/* PDP, VID4_MEM_THRESH, VID4_UVTHRESHOLD
+*/
+#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_MASK		(0xFF000000)
+#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SHIFT		(24)
+#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LENGTH		(8)
+#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_MEM_THRESH, VID4_YTHRESHOLD
+*/
+#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_MASK		(0x001FF000)
+#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SHIFT		(12)
+#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LENGTH		(9)
+#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_MEM_THRESH, VID4_THRESHOLD
+*/
+#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_MASK		(0x000001FF)
+#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SHIFT		(0)
+#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LENGTH		(9)
+#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1_PANIC_THRESH_OFFSET		(0x09A0)
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_ENABLE
+*/
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SHIFT		(31)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LENGTH		(1)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_ENABLE
+*/
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SHIFT		(30)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LENGTH		(1)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MAX
+*/
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MIN
+*/
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MAX
+*/
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MIN
+*/
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2_PANIC_THRESH_OFFSET		(0x09A4)
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_ENABLE
+*/
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SHIFT		(31)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LENGTH		(1)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_ENABLE
+*/
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SHIFT		(30)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LENGTH		(1)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MAX
+*/
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MIN
+*/
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MAX
+*/
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MIN
+*/
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3_PANIC_THRESH_OFFSET		(0x09A8)
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_ENABLE
+*/
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SHIFT		(31)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LENGTH		(1)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_ENABLE
+*/
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SHIFT		(30)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LENGTH		(1)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MAX
+*/
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MIN
+*/
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MAX
+*/
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MIN
+*/
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4_PANIC_THRESH_OFFSET		(0x09AC)
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_ENABLE
+*/
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SHIFT		(31)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LENGTH		(1)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_ENABLE
+*/
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SHIFT		(30)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LENGTH		(1)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MAX
+*/
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MIN
+*/
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MAX
+*/
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MIN
+*/
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1_PANIC_THRESH_OFFSET		(0x09B0)
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_ENABLE
+*/
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SHIFT		(31)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LENGTH		(1)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_ENABLE
+*/
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SHIFT		(30)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LENGTH		(1)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MAX
+*/
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MIN
+*/
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MAX
+*/
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MIN
+*/
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2_PANIC_THRESH_OFFSET		(0x09B4)
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_ENABLE
+*/
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SHIFT		(31)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LENGTH		(1)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_ENABLE
+*/
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SHIFT		(30)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LENGTH		(1)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MAX
+*/
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MIN
+*/
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MAX
+*/
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MIN
+*/
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3_PANIC_THRESH_OFFSET		(0x09B8)
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_ENABLE
+*/
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SHIFT		(31)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LENGTH		(1)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_ENABLE
+*/
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SHIFT		(30)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LENGTH		(1)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MAX
+*/
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MIN
+*/
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MAX
+*/
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MIN
+*/
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4_PANIC_THRESH_OFFSET		(0x09BC)
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_ENABLE
+*/
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SHIFT		(31)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LENGTH		(1)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_ENABLE
+*/
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SHIFT		(30)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LENGTH		(1)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MAX
+*/
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MIN
+*/
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MAX
+*/
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MIN
+*/
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BURST_BOUNDARY_OFFSET		(0x09C0)
+
+/* PDP, BURST_BOUNDARY, BURST_BOUNDARY
+*/
+#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_MASK		(0x0000003F)
+#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_LSBMASK		(0x0000003F)
+#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_SHIFT		(0)
+#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_LENGTH		(6)
+#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_SIGNED_FIELD	IMG_FALSE
+
+
+/* ------------------------ End of register definitions ------------------------ */
+
+/*
+// NUMREG defines the extent of register address space.
+*/
+
+#define		PDP_NUMREG	   ((0x09C0 >> 2)+1)
+
+/* Info about video plane addresses */
+#define PDP_YADDR_BITS		(PDP_VID1BASEADDR_VID1BASEADDR_LENGTH)
+#define PDP_YADDR_ALIGN		5
+#define PDP_UADDR_BITS		(PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH)
+#define PDP_UADDR_ALIGN		5
+#define PDP_VADDR_BITS		(PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH)
+#define PDP_VADDR_ALIGN		5
+
+#define PDP_YSTRIDE_BITS	(PDP_VID1STRIDE_VID1STRIDE_LENGTH)
+#define PDP_YSTRIDE_ALIGN	5
+
+#define PDP_MAX_INPUT_WIDTH (PDP_VID1SIZE_VID1WIDTH_LSBMASK + 1)
+#define PDP_MAX_INPUT_HEIGHT (PDP_VID1SIZE_VID1HEIGHT_LSBMASK + 1)
+
+/* Maximum 6 bytes per pixel for RGB161616 */
+#define PDP_MAX_IMAGE_BYTES (PDP_MAX_INPUT_WIDTH * PDP_MAX_INPUT_HEIGHT * 6)
+
+/* Round up */
+#define PDP_MAX_IMAGE_PAGES ((PDP_MAX_IMAGE_BYTES+PAGE_SIZE-1)/PAGE_SIZE)
+
+#define PDP_YADDR_MAX		(((1 << PDP_YADDR_BITS) - 1) << PDP_YADDR_ALIGN)
+#define PDP_UADDR_MAX		(((1 << PDP_UADDR_BITS) - 1) << PDP_UADDR_ALIGN)
+#define PDP_VADDR_MAX		(((1 << PDP_VADDR_BITS) - 1) << PDP_VADDR_ALIGN)
+#define PDP_YSTRIDE_MAX		((1 << PDP_YSTRIDE_BITS) << PDP_YSTRIDE_ALIGN)
+#define PDP_YADDR_ALIGNMASK	((1 << PDP_YADDR_ALIGN) - 1)
+#define PDP_UADDR_ALIGNMASK	((1 << PDP_UADDR_ALIGN) - 1)
+#define PDP_VADDR_ALIGNMASK	((1 << PDP_VADDR_ALIGN) - 1)
+#define PDP_YSTRIDE_ALIGNMASK	((1 << PDP_YSTRIDE_ALIGN) - 1)
+
+/* Field Values */
+#define PDP_SURF_PIXFMT_RGB332					  0x3
+#define PDP_SURF_PIXFMT_ARGB4444				  0x4
+#define PDP_SURF_PIXFMT_ARGB1555				  0x5
+#define PDP_SURF_PIXFMT_RGB888					  0x6
+#define PDP_SURF_PIXFMT_RGB565					  0x7
+#define PDP_SURF_PIXFMT_ARGB8888				  0x8
+#define PDP_SURF_PIXFMT_420_PL8					  0x9
+#define PDP_SURF_PIXFMT_420_PL8IVU				  0xA
+#define PDP_SURF_PIXFMT_420_PL8IUV				  0xB
+#define PDP_SURF_PIXFMT_422_UY0VY1_8888			  0xC
+#define PDP_SURF_PIXFMT_422_VY0UY1_8888			  0xD
+#define PDP_SURF_PIXFMT_422_Y0UY1V_8888			  0xE
+#define PDP_SURF_PIXFMT_422_Y0VY1U_8888			  0xF
+#define PDP_SURF_PIXFMT_AYUV8888				  0x10
+#define PDP_SURF_PIXFMT_YUV101010				  0x15
+#define PDP_SURF_PIXFMT_RGB101010				  0x17
+#define PDP_SURF_PIXFMT_420_PL10IUV				  0x18
+#define PDP_SURF_PIXFMT_420_PL10IVU				  0x19
+#define PDP_SURF_PIXFMT_422_PL10IUV				  0x1A
+#define PDP_SURF_PIXFMT_422_PL10IVU				  0x1B
+#define PDP_SURF_PIXFMT_RGB121212				  0x1E
+#define PDP_SURF_PIXFMT_RGB161616				  0x1F
+
+#define PDP_CTRL_CKEYSRC_PREV					  0x0
+#define PDP_CTRL_CKEYSRC_CUR					  0x1
+
+#define PDP_MEMCTRL_MEMREFRESH_ALWAYS			  0x0
+#define PDP_MEMCTRL_MEMREFRESH_HBLNK			  0x1
+#define PDP_MEMCTRL_MEMREFRESH_VBLNK			  0x2
+#define PDP_MEMCTRL_MEMREFRESH_BOTH				  0x3
+
+#define PDP_3D_CTRL_BLENDSEL_BGND_WITH_POS0		  0x0
+#define PDP_3D_CTRL_BLENDSEL_POS0_WITH_POS1		  0x1
+#define PDP_3D_CTRL_BLENDSEL_POS1_WITH_POS2		  0x2
+#define PDP_3D_CTRL_BLENDSEL_POS2_WITH_POS3		  0x3
+#define PDP_3D_CTRL_BLENDSEL_POS3_WITH_POS4		  0x4
+#define PDP_3D_CTRL_BLENDSEL_POS4_WITH_POS5		  0x5
+#define PDP_3D_CTRL_BLENDSEL_POS5_WITH_POS6		  0x6
+#define PDP_3D_CTRL_BLENDSEL_POS6_WITH_POS7		  0x7
+
+#define PDP_UADDR_UV_STRIDE_EQUAL_TO_Y_STRIDE		  0x0
+#define PDP_UADDR_UV_STRIDE_EQUAL_TO_DOUBLE_Y_STRIDE  0x1
+#define PDP_UADDR_UV_STRIDE_EQUAL_TO_HALF_Y_STRIDE	  0x2
+
+#define PDP_PROCAMP_OUTPUT_OFFSET_FRACTIONAL_BITS 1
+#define PDP_PROCAMP_COEFFICIENT_FRACTIONAL_BITS	  10
+
+/*-------------------------------------------------------------------------------*/
+
+#endif /* _PDP2_REGS_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdp_drm.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdp_drm.h
new file mode 100644
index 0000000..ff2afb0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdp_drm.h
@@ -0,0 +1,107 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          PDP DRM definitions shared between kernel and user space.
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PDP_DRM_H__)
+#define __PDP_DRM_H__
+
+#if defined(__KERNEL__)
+#include <drm/drm.h>
+#else
+#include <drm.h>
+#endif
+
+struct drm_pdp_gem_create {
+	__u64 size;	/* in */
+	__u32 flags;	/* in */
+	__u32 handle;	/* out */
+};
+
+struct drm_pdp_gem_mmap {
+	__u32 handle;	/* in */
+	__u32 pad;
+	__u64 offset;	/* out */
+};
+
+#define PDP_GEM_CPU_PREP_READ	(1 << 0)
+#define PDP_GEM_CPU_PREP_WRITE	(1 << 1)
+#define PDP_GEM_CPU_PREP_NOWAIT	(1 << 2)
+
+struct drm_pdp_gem_cpu_prep {
+	__u32 handle;	/* in */
+	__u32 flags;	/* in */
+};
+
+struct drm_pdp_gem_cpu_fini {
+	__u32 handle;	/* in */
+	__u32 pad;
+};
+
+/*
+ * DRM command numbers, relative to DRM_COMMAND_BASE.
+ * These defines must be prefixed with "DRM_".
+ */
+#define DRM_PDP_GEM_CREATE		0x00
+#define DRM_PDP_GEM_MMAP		0x01
+#define DRM_PDP_GEM_CPU_PREP		0x02
+#define DRM_PDP_GEM_CPU_FINI		0x03
+
+/* These defines must be prefixed with "DRM_IOCTL_". */
+#define DRM_IOCTL_PDP_GEM_CREATE \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PDP_GEM_CREATE, \
+		 struct drm_pdp_gem_create)
+
+#define DRM_IOCTL_PDP_GEM_MMAP\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PDP_GEM_MMAP, \
+		 struct drm_pdp_gem_mmap)
+
+#define DRM_IOCTL_PDP_GEM_CPU_PREP \
+	DRM_IOW(DRM_COMMAND_BASE + DRM_PDP_GEM_CPU_PREP, \
+		struct drm_pdp_gem_cpu_prep)
+
+#define DRM_IOCTL_PDP_GEM_CPU_FINI \
+	DRM_IOW(DRM_COMMAND_BASE + DRM_PDP_GEM_CPU_FINI, \
+		struct drm_pdp_gem_cpu_fini)
+
+#endif /* defined(__PDP_DRM_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdump.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdump.h
new file mode 100644
index 0000000..5c066c2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdump.h
@@ -0,0 +1,232 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef _SERVICES_PDUMP_H_
+#define _SERVICES_PDUMP_H_
+
+#include "img_types.h"
+#include "services_km.h"
+
+
+/* A PDump out2.txt script is made up of 3 sections from three buffers:
+ * *
+ *  - Init phase buffer    - holds PDump data written during driver
+ *                            initialisation, non-volatile.
+ *  - Main phase buffer   - holds PDump data written after driver init,
+ *                            volatile.
+ *  - Deinit phase buffer - holds PDump data  needed to shutdown HW/play back,
+ *                            written only during driver initialisation using
+ *                            the DEINIT flag.
+ *
+ * Volatile in this sense means that the buffer is drained and cleared when
+ * the pdump capture application connects and transfers the data to file.
+ *
+ * The PDump sub-system uses the driver state (init/post-init), whether
+ * the pdump capture application is connected or not (capture range set/unset)
+ * and, if pdump connected whether the frame is in the range set, to decide
+ * which of the 3 buffers to write the PDump data. Hence there are several
+ * key time periods in the lifetime of the kernel driver that is enabled
+ * with PDUMP=1 (flag XX labels below time line):
+ *
+ * Events:load              init        pdump       enter          exit         pdump
+ *       driver             done       connects     range          range     disconnects
+ *         |__________________|____________|__________|______________|____________|______ . . .
+ * State:  |   init phase     | no capture | <- capture client connected ->       | no capture
+ *         |                  |            |                                      |
+ *         |__________________|____________|______________________________________|_____ . . .
+ * Flag:   | CT,DI            | NONE,CT,PR | NONE,CT,PR                           | See no
+ *         | Never NONE or PR | Never DI   | Never DI                             |   capture
+ *         |__________________|____________|______________________________________|_____ . . .
+ * Write   | NONE -undef      | -No write  | -No write | -Main buf    | -No write | See no
+ * buffer  | CT -Init buf     | -Main buf  | -Main buf | -Main buf    | -Main buf |   capture
+ *         | PR -undef        | -Init buf  | -undef    | -Init & Main | -undef    |
+ *         | DI -Deinit buf   | -undef     | -undef    | -undef       | -undef    |
+ *         |__________________|____________|___________|______________|___________|_____ . . .
+ *
+ * Note: The time line could repeat if the pdump capture application is
+ * disconnected and reconnected without unloading the driver module.
+ *
+ * The DEINIT (DI) | CONTINUOUS (CT) | PERSISTENT (PR) flags must never
+ * be OR'd together and given to a PDump call since undefined behaviour may
+ * result and produce an invalid PDump which does not play back cleanly.
+ *
+ * The decision on which flag to use comes down to which time period the
+ * client or server driver makes the PDump write call AND the nature/purpose
+ * of the data.
+ *
+ * Note: This is a simplified time line, not all conditions represented.
+ *
+ */
+
+typedef IMG_UINT32 PDUMP_FLAGS_T;
+
+#define PDUMP_FLAGS_NONE            PDUMP_NONE     /*<! Output this entry with no special treatment i.e. output
+                                                          only if in frame range. */
+#define PDUMP_FLAGS_BLKDATA         PDUMP_BLKDATA  /*<! This flag indicates block-mode PDump data to be recorded
+                                                          in Block script stream in addition to Main script stream,
+                                                          if capture mode is set to BLOCKED */
+
+#define PDUMP_FLAGS_DEINIT          0x20000000UL   /*<! Output this entry to the de-initialisation section, must
+                                                          only be used by the initialisation code in the Server. */
+
+#define PDUMP_FLAGS_POWER           0x08000000UL   /*<! Output this entry even when a power transition is ongoing,
+                                                          as directed by other PDUMP flags. */
+
+#define PDUMP_FLAGS_CONTINUOUS      PDUMP_CONT     /*<! Output this entry always regardless of framed capture range,
+                                                          used by client applications being dumped.
+                                                          During init phase of driver such data carrying this flag
+                                                          will be recorded and present for all PDump client
+                                                          connections.
+                                                          Never combine with the PERSIST flag. */
+
+#define PDUMP_FLAGS_PERSISTENT      PDUMP_PERSIST  /*<! Output this entry always regardless of app and range,
+                                                          used by persistent resources created *after* driver
+                                                          initialisation that must appear in all PDump captures
+                                                          (i.e. current capture regardless of frame range (CONT)
+                                                          and all future PDump captures) for that driver
+                                                          instantiation/session.
+                                                          Effectively this is data that is not forgotten
+                                                          for the second and subsequent PDump client connections.
+                                                          Never combine with the CONTINUOUS flag. */
+
+#define PDUMP_FLAGS_DEBUG           0x00010000U    /*<! For internal debugging use */
+
+#define PDUMP_FLAGS_NOHW            0x00000001U    /* For internal use: Skip sending instructions to the hardware
+                                                        when NO_HARDWARE=0 AND PDUMP=1 */
+
+#define PDUMP_FLAGS_FORCESPLIT      0x00000002U	   /* Forces Main and Block script streams to split - Internal
+                                                        flag used in Block mode of PDump */
+
+#define PDUMP_FLAGS_PDUMP_LOCK_HELD 0x00000004U    /* This flags denotes that PDUMP_LOCK is held already so
+						      further calls to PDUMP_LOCK  with this flag set will not
+						      try to take pdump lock. If PDUMP_LOCK is called without
+						      this flag by some other thread then it will try to take
+						      PDUMP_LOCK which would make that thread sleep. This flag
+						      introduced to enforce the order of pdumping after bridge
+						      lock removal */
+
+#define PDUMP_FILEOFFSET_FMTSPEC    "0x%08X"
+typedef IMG_UINT32 PDUMP_FILEOFFSET_T;
+
+/* PDump stream macros*/
+
+/* Parameter stream */
+#define PDUMP_PARAM_INIT_STREAM_NAME       "paramInit"
+#define PDUMP_PARAM_MAIN_STREAM_NAME       "paramMain"
+#define PDUMP_PARAM_DEINIT_STREAM_NAME     "paramDeinit"
+#define PDUMP_PARAM_BLOCK_STREAM_NAME      "paramBlock"
+
+/*** Parameter stream sizes ***/
+
+/* Parameter Init Stream */
+
+/* The streams are made tunable in core.mk, wherein the size of these buffers can be specified.
+ * In case nothing specified in core.mk, then the default size is taken
+ */
+#if defined(PDUMP_PARAM_INIT_STREAM_SIZE)
+	#if (PDUMP_PARAM_INIT_STREAM_SIZE < (700 * 1024))
+		#error PDUMP_PARAM_INIT_STREAM_SIZE must be at least 700 KB
+	#endif
+#endif
+
+/* Parameter Main Stream */
+#if defined(PDUMP_PARAM_MAIN_STREAM_SIZE)
+	#if (PDUMP_PARAM_MAIN_STREAM_SIZE < (2 * 1024 * 1024))
+		#error PDUMP_PARAM_MAIN_STREAM_SIZE must be at least 2 MB
+	#endif
+#endif
+
+/* Parameter Deinit Stream */
+#if defined(PDUMP_PARAM_DEINIT_STREAM_SIZE)
+	#if (PDUMP_PARAM_DEINIT_STREAM_SIZE < (64 * 1024))
+		#error PDUMP_PARAM_DEINIT_STREAM_SIZE must be at least 64 KB
+	#endif
+#endif
+
+/* Parameter Block Stream */
+/* There is no separate parameter Block stream as the Block script stream is
+ * just a filtered Main script stream. Hence it will refer to the Main stream
+ * parameters themselves.
+ */
+
+/*Script stream */
+#define PDUMP_SCRIPT_INIT_STREAM_NAME      "scriptInit"
+#define PDUMP_SCRIPT_MAIN_STREAM_NAME      "scriptMain"
+#define PDUMP_SCRIPT_DEINIT_STREAM_NAME    "scriptDeinit"
+#define PDUMP_SCRIPT_BLOCK_STREAM_NAME     "scriptBlock"
+
+/*** Script stream sizes ***/
+
+/* Script Init Stream */
+#if defined(PDUMP_SCRIPT_INIT_STREAM_SIZE)
+	#if (PDUMP_SCRIPT_INIT_STREAM_SIZE < (256 * 1024))
+		#error PDUMP_SCRIPT_INIT_STREAM_SIZE must be at least 256 KB
+	#endif
+#endif
+
+/* Script Main Stream */
+#if defined(PDUMP_SCRIPT_MAIN_STREAM_SIZE)
+	#if (PDUMP_SCRIPT_MAIN_STREAM_SIZE < (2 * 1024 * 1024))
+		#error PDUMP_SCRIPT_MAIN_STREAM_SIZE must be at least 2 MB
+	#endif
+#endif
+
+/* Script Deinit Stream */
+#if defined(PDUMP_SCRIPT_DEINIT_STREAM_SIZE)
+	#if (PDUMP_SCRIPT_DEINIT_STREAM_SIZE < (64 * 1024))
+		#error PDUMP_SCRIPT_DEINIT_STREAM_SIZE must be at least 64 KB
+	#endif
+#endif
+
+/* Script Block Stream */
+#if defined(PDUMP_SCRIPT_BLOCK_STREAM_SIZE)
+	#if (PDUMP_SCRIPT_BLOCK_STREAM_SIZE < (2 * 1024 * 1024))
+		#error PDUMP_SCRIPT_BLOCK_STREAM_SIZE must be at least 2 MB
+	#endif
+#endif
+
+
+#define PDUMP_PARAM_0_FILE_NAME     "%%0%%.prm"      /*!< Initial Param filename used in PDump capture */
+#define PDUMP_PARAM_N_FILE_NAME     "%%0%%_%02u.prm" /*!< Param filename used when PRM file split */
+#define PDUMP_PARAM_MAX_FILE_NAME   32               /*!< Max Size of parameter name used in out2.txt */
+
+#define PDUMP_IS_CONTINUOUS(flags) ((flags & PDUMP_FLAGS_CONTINUOUS) != 0)
+
+#endif /* _SERVICES_PDUMP_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdump_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdump_km.h
new file mode 100644
index 0000000..abafc5e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdump_km.h
@@ -0,0 +1,1057 @@
+/*************************************************************************/ /*!
+@File
+@Title          pdump functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main APIs for pdump functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PDUMP_KM_H
+#define PDUMP_KM_H
+
+#if defined(PDUMP)
+#include <stdarg.h>
+#endif
+
+/* services/srvkm/include/ */
+#include "device.h"
+
+/* include/ */
+#include "pvrsrv_error.h"
+
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "connection_server.h"
+/*
+ *	Pull in pdump flags from services include
+ */
+#include "pdump.h"
+#include "pdumpdefs.h"
+
+/* Define this to enable the PDUMP_HERE trace in the server */
+#undef PDUMP_TRACE
+
+#if defined(PDUMP_TRACE)
+#define PDUMP_HERE_VAR  IMG_UINT32 here = 0;
+#define PDUMP_HERE(a)	{ here = (a); if (ui32Flags & PDUMP_FLAGS_DEBUG) PVR_DPF((PVR_DBG_WARNING, "HERE %d", (a))); }
+#define PDUMP_HEREA(a)	{ here = (a); PVR_DPF((PVR_DBG_WARNING, "HERE ALWAYS %d", (a))); }
+#else
+#define PDUMP_HERE_VAR  IMG_UINT32 here = 0;
+#define PDUMP_HERE(a)	here = (a);
+#define PDUMP_HEREA(a)	here = (a);
+#endif
+
+#define PDUMP_PD_UNIQUETAG	(IMG_HANDLE)0
+#define PDUMP_PT_UNIQUETAG	(IMG_HANDLE)0
+
+/* Invalid value for PDump block number */
+#define PDUMP_BLOCKNUM_INVALID      IMG_UINT32_MAX
+
+typedef struct _PDUMP_CONNECTION_DATA_ PDUMP_CONNECTION_DATA;
+
+/* PDump transition events */
+typedef enum _PDUMP_TRANSITION_EVENT_
+{
+	PDUMP_TRANSITION_EVENT_NONE,              /* No event */
+	PDUMP_TRANSITION_EVENT_BLOCK_FINISHED,    /* Block mode event, current PDump-block has finished */
+	PDUMP_TRANSITION_EVENT_BLOCK_STARTED,     /* Block mode event, new PDump-block has started */
+	PDUMP_TRANSITION_EVENT_RANGE_ENTERED,     /* Transition into capture range */
+	PDUMP_TRANSITION_EVENT_RANGE_EXITED,      /* Transition out of capture range */
+} PDUMP_TRANSITION_EVENT;
+
+typedef PVRSRV_ERROR (*PFN_PDUMP_TRANSITION)(void *pvData, void *pvDevice, PDUMP_TRANSITION_EVENT eEvent, IMG_UINT32 ui32PDumpFlags);
+typedef void (*PFN_PDUMP_SYNCBLOCKS)(void *pvData, PDUMP_TRANSITION_EVENT eEvent);
+
+
+#ifdef PDUMP
+
+/*! Macro used to record a panic in the PDump script stream */
+#define PDUMP_PANIC(_id, _msg) do \
+		{ PVRSRV_ERROR _eE;\
+			_eE = PDumpPanic(((RGX_PDUMP_PANIC_ ## _id) & 0xFFFF), _msg, __func__, __LINE__);	\
+			PVR_LOG_IF_ERROR(_eE, "PDumpPanic");\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+/*! Macro used to record a driver error in the PDump script stream to invalidate the capture */
+#define PDUMP_ERROR(_err, _msg) \
+	(void)PDumpCaptureError(_err, _msg, __func__, __LINE__)
+
+#define SZ_MSG_SIZE_MAX			PVRSRV_PDUMP_MAX_COMMENT_SIZE
+#define SZ_SCRIPT_SIZE_MAX		PVRSRV_PDUMP_MAX_COMMENT_SIZE
+#define SZ_FILENAME_SIZE_MAX	(PVRSRV_PDUMP_MAX_FILENAME_SIZE+sizeof(PDUMP_PARAM_N_FILE_NAME))
+
+#define PDUMP_GET_SCRIPT_STRING()																			\
+	IMG_HANDLE hScript;																						\
+	void *pvScriptAlloc;																					\
+	IMG_UINT32 ui32MaxLen = SZ_SCRIPT_SIZE_MAX-1;															\
+	pvScriptAlloc = OSAllocMem( SZ_SCRIPT_SIZE_MAX );														\
+	if (!pvScriptAlloc)																						\
+	{																										\
+		PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_SCRIPT_STRING() failed to allocate memory for script buffer"));	\
+		return PVRSRV_ERROR_OUT_OF_MEMORY;																	\
+	}																										\
+																											\
+	hScript = (IMG_HANDLE) pvScriptAlloc;
+
+#define PDUMP_GET_MSG_STRING()																				\
+	IMG_CHAR *pszMsg;																						\
+	void *pvMsgAlloc;																						\
+	IMG_UINT32 ui32MaxLen = SZ_MSG_SIZE_MAX-1;																\
+	pvMsgAlloc = OSAllocMem( SZ_MSG_SIZE_MAX );																\
+	if (!pvMsgAlloc)																						\
+	{																										\
+		PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_MSG_STRING() failed to allocate memory for message buffer"));	\
+		return PVRSRV_ERROR_OUT_OF_MEMORY;																	\
+	}																										\
+	pszMsg = (IMG_CHAR *)pvMsgAlloc;
+
+#define PDUMP_GET_SCRIPT_AND_FILE_STRING()																	\
+	IMG_HANDLE hScript;																						\
+	IMG_CHAR *pszFileName;																					\
+	IMG_UINT32 ui32MaxLenScript = SZ_SCRIPT_SIZE_MAX-1;														\
+	void *pvScriptAlloc;																					\
+	void *pvFileAlloc;																						\
+	pvScriptAlloc = OSAllocMem( SZ_SCRIPT_SIZE_MAX );														\
+	if (!pvScriptAlloc)																						\
+	{																										\
+		PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_SCRIPT_AND_FILE_STRING() failed to allocate memory for script buffer"));		\
+		return PVRSRV_ERROR_OUT_OF_MEMORY;																	\
+	}																										\
+																											\
+	hScript = (IMG_HANDLE) pvScriptAlloc;																	\
+	pvFileAlloc = OSAllocMem( SZ_FILENAME_SIZE_MAX );														\
+	if (!pvFileAlloc)																						\
+	{																										\
+		PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_SCRIPT_AND_FILE_STRING() failed to allocate memory for filename buffer"));	\
+		OSFreeMem(pvScriptAlloc);																			\
+		return PVRSRV_ERROR_OUT_OF_MEMORY;																	\
+	}																										\
+	pszFileName = (IMG_CHAR *)pvFileAlloc;
+
+#define PDUMP_RELEASE_SCRIPT_STRING()																		\
+	if (pvScriptAlloc)																						\
+	{																										\
+		OSFreeMem(pvScriptAlloc);																			\
+		pvScriptAlloc = NULL;																				\
+	}
+
+#define PDUMP_RELEASE_MSG_STRING()																			\
+	if (pvMsgAlloc)																							\
+	{																										\
+		OSFreeMem(pvMsgAlloc);																				\
+		pvMsgAlloc = NULL;																					\
+	}
+
+#define PDUMP_RELEASE_FILE_STRING()																			\
+	if (pvFileAlloc)																						\
+	{																										\
+		OSFreeMem(pvFileAlloc);																				\
+		pvFileAlloc = NULL;																					\
+	}
+
+#define PDUMP_RELEASE_SCRIPT_AND_FILE_STRING()																\
+	if (pvScriptAlloc)																						\
+	{																										\
+		OSFreeMem(pvScriptAlloc);																			\
+		pvScriptAlloc = NULL;																				\
+	}																										\
+	if (pvFileAlloc)																						\
+	{																										\
+		OSFreeMem(pvFileAlloc);																				\
+		pvFileAlloc = NULL;																					\
+	}
+
+
+/* Shared across pdump_x files */
+PVRSRV_ERROR PDumpInitCommon(void);
+void PDumpDeInitCommon(void);
+PVRSRV_ERROR PDumpReady(void);
+void PDumpGetParameterZeroPageInfo(PDUMP_FILEOFFSET_T *puiZeroPageOffset,
+				size_t *puiZeroPageSize,
+				const IMG_CHAR **ppszZeroPageFilename);
+
+void PDumpConnectionNotify(void);
+void PDumpDisconnectionNotify(void);
+
+void PDumpStopInitPhase(void);
+PVRSRV_ERROR PDumpSetFrameKM(CONNECTION_DATA *psConnection,
+				PVRSRV_DEVICE_NODE * psDeviceNode,
+				IMG_UINT32 ui32Frame);
+PVRSRV_ERROR PDumpGetFrameKM(CONNECTION_DATA *psConnection,
+				PVRSRV_DEVICE_NODE * psDeviceNode,
+				IMG_UINT32* pui32Frame);
+PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpSetDefaultCaptureParamsKM(IMG_UINT32 ui32Mode,
+				IMG_UINT32 ui32Start,
+				IMG_UINT32 ui32End,
+				IMG_UINT32 ui32Interval,
+				IMG_UINT32 ui32MaxParamFileSize);
+
+
+PVRSRV_ERROR PDumpReg32(IMG_CHAR	*pszPDumpRegName,
+				IMG_UINT32	ui32RegAddr,
+				IMG_UINT32	ui32RegValue,
+				IMG_UINT32	ui32Flags);
+
+PVRSRV_ERROR PDumpReg64(IMG_CHAR	*pszPDumpRegName,
+				IMG_UINT32	ui32RegAddr,
+				IMG_UINT64	ui64RegValue,
+				IMG_UINT32	ui32Flags);
+
+PVRSRV_ERROR PDumpRegLabelToReg64(IMG_CHAR *pszPDumpRegName,
+				IMG_UINT32 ui32RegDst,
+				IMG_UINT32 ui32RegSrc,
+				IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpPhysHandleToInternalVar64(IMG_CHAR *pszInternalVar,
+				IMG_HANDLE hPdumpPages,
+				IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpMemLabelToInternalVar64(IMG_CHAR *pszInternalVar,
+				PMR *psPMR,
+				IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+				IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpWriteVarORValueOp(const IMG_CHAR *pszInternalVariable,
+				const IMG_UINT64 ui64Value,
+				const IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR PDumpWriteVarANDValueOp(const IMG_CHAR *pszInternalVariable,
+				const IMG_UINT64 ui64Value,
+				const IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR PDumpWriteVarSHRValueOp(const IMG_CHAR *pszInternalVariable,
+				const IMG_UINT64 ui64Value,
+				const IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR PDumpInternalVarToReg32(IMG_CHAR *pszPDumpRegName,
+				IMG_UINT32	ui32Reg,
+				IMG_CHAR *pszInternalVar,
+				IMG_UINT32	ui32Flags);
+
+PVRSRV_ERROR PDumpInternalVarToReg64(IMG_CHAR *pszPDumpRegName,
+				IMG_UINT32	ui32Reg,
+				IMG_CHAR *pszInternalVar,
+				IMG_UINT32	ui32Flags);
+
+PVRSRV_ERROR PDumpMemLabelToMem32(PMR *psPMRSource,
+				PMR *psPMRDest,
+				IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+				IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+				IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpMemLabelToMem64(PMR *psPMRSource,
+				PMR *psPMRDest,
+				IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+				IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+				IMG_UINT32	ui32Flags);
+
+PVRSRV_ERROR PDumpRegLabelToMem32(IMG_CHAR *pszPDumpRegName,
+				IMG_UINT32 ui32Reg,
+				PMR *psPMR,
+				IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+				IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpRegLabelToMem64(IMG_CHAR *pszPDumpRegName,
+				IMG_UINT32 ui32Reg,
+				PMR *psPMR,
+				IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+				IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpRegLabelToInternalVar(IMG_CHAR *pszPDumpRegName,
+				IMG_UINT32 ui32Reg,
+				IMG_CHAR *pszInternalVar,
+				IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpSAW(IMG_CHAR      *pszDevSpaceName,
+				IMG_UINT32    ui32HPOffsetBytes,
+				IMG_UINT32    ui32NumSaveBytes,
+				IMG_CHAR      *pszOutfileName,
+				IMG_UINT32    ui32OutfileOffsetByte,
+				PDUMP_FLAGS_T uiPDumpFlags);
+
+PVRSRV_ERROR PDumpRegPolKM(IMG_CHAR				*pszPDumpRegName,
+				IMG_UINT32			ui32RegAddr,
+				IMG_UINT32			ui32RegValue,
+				IMG_UINT32			ui32Mask,
+				IMG_UINT32			ui32Flags,
+				PDUMP_POLL_OPERATOR	eOperator);
+
+PVRSRV_ERROR PDumpBitmapKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+							  IMG_CHAR *pszFileName,
+							  IMG_UINT32 ui32FileOffset,
+							  IMG_UINT32 ui32Width,
+							  IMG_UINT32 ui32Height,
+							  IMG_UINT32 ui32StrideInBytes,
+							  IMG_DEV_VIRTADDR sDevBaseAddr,
+							  IMG_UINT32 ui32MMUContextID,
+							  IMG_UINT32 ui32Size,
+							  PDUMP_PIXEL_FORMAT ePixelFormat,
+							  IMG_UINT32 ui32AddrMode,
+							  IMG_UINT32 ui32PDumpFlags);
+
+
+/**************************************************************************/ /*!
+@Function       PDumpImageDescriptor
+@Description    PDumps image data out as an IMGBv2 data section
+@Input          psDeviceNode         Pointer to device node.
+@Input          ui32MMUContextID     PDUMP MMU context ID.
+@Input          pszSABFileName       Pointer to string containing file name of
+                                     Image being SABed
+@Input          sData                GPU virtual address of this surface.
+@Input          ui32DataSize         Image data size
+@Input          ui32LogicalWidth     Image logical width
+@Input          ui32LogicalHeight    Image logical height
+@Input          ui32PhysicalWidth    Image physical width
+@Input          ui32PhysicalHeight   Image physical height
+@Input          ePixFmt              Image pixel format
+@Input          eFBCompression       FB compression mode
+@Input          paui32FBCClearColour FB clear colour (Only applicable to FBC surfaces)
+@Input          eFBCSwizzle          FBC channel swizzle (Only applicable to FBC surfaces)
+@Input          sHeader              GPU virtual address of the headers of this
+                                     surface (Only applicable to FBC surfaces)
+@Input          ui32DataSize         Header size (Only applicable to FBC surfaces)
+@Input          ui32PDumpFlags       PDUMP flags
+@Return         PVRSRV_ERROR:        PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                     error code
+*/ /***************************************************************************/
+PVRSRV_ERROR PDumpImageDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode,
+									IMG_UINT32 ui32MMUContextID,
+									IMG_CHAR *pszSABFileName,
+									IMG_DEV_VIRTADDR sData,
+									IMG_UINT32 ui32DataSize,
+									IMG_UINT32 ui32LogicalWidth,
+									IMG_UINT32 ui32LogicalHeight,
+									IMG_UINT32 ui32PhysicalWidth,
+									IMG_UINT32 ui32PhysicalHeight,
+									PDUMP_PIXEL_FORMAT ePixFmt,
+									IMG_MEMLAYOUT eMemLayout,
+									IMG_FB_COMPRESSION eFBCompression,
+									const IMG_UINT32 *paui32FBCClearColour,
+									PDUMP_FBC_SWIZZLE eFBCSwizzle,
+									IMG_DEV_VIRTADDR sHeader,
+									IMG_UINT32 ui32HeaderSize,
+									IMG_UINT32 ui32PDumpFlags);
+
+/**************************************************************************/ /*!
+@Function       PDumpDataDescriptor
+@Description    PDumps non-image data out as an IMGCv1 data section
+@Input          psDeviceNode         Pointer to device node.
+@Input          ui32MMUContextID     PDUMP MMU context ID.
+@Input          pszSABFileName       Pointer to string containing file name of
+                                     Data being SABed
+@Input          sData                GPU virtual address of this data.
+@Input          ui32DataSize         Data size
+@Input          ui32ElementType      Data element type
+@Input          ui32ElementCount     Number of data elements
+@Input          ui32PDumpFlags       PDUMP flags
+@Return         PVRSRV_ERROR:        PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                     error code
+*/ /***************************************************************************/
+PVRSRV_ERROR PDumpDataDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode,
+									IMG_UINT32 ui32MMUContextID,
+									IMG_CHAR *pszSABFileName,
+									IMG_DEV_VIRTADDR sData,
+									IMG_UINT32 ui32DataSize,
+									IMG_UINT32 ui32ElementType,
+									IMG_UINT32 ui32ElementCount,
+									IMG_UINT32 ui32PDumpFlags);
+
+
+PVRSRV_ERROR PDumpReadRegKM(IMG_CHAR *pszPDumpRegName,
+				IMG_CHAR *pszFileName,
+				IMG_UINT32 ui32FileOffset,
+				IMG_UINT32 ui32Address,
+				IMG_UINT32 ui32Size,
+				IMG_UINT32 ui32PDumpFlags);
+
+__printf(2, 3)
+PVRSRV_ERROR PDumpCommentWithFlagsNoLock(IMG_UINT32	ui32Flags,
+				IMG_CHAR*	pszFormat,
+				...);
+
+PVRSRV_ERROR PDumpCommentWithFlagsNoLockVA(IMG_UINT32 ui32Flags,
+				const IMG_CHAR * pszFormat,
+				va_list args);
+
+__printf(2, 3)
+PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32	ui32Flags,
+				IMG_CHAR*	pszFormat,
+				...);
+
+PVRSRV_ERROR PDumpCommentWithFlagsVA(IMG_UINT32 ui32Flags,
+				const IMG_CHAR * pszFormat,
+				va_list args);
+
+PVRSRV_ERROR PDumpPanic(IMG_UINT32      ui32PanicNo,
+				IMG_CHAR*       pszPanicMsg,
+				const IMG_CHAR* pszPPFunc,
+				IMG_UINT32      ui32PPline);
+
+PVRSRV_ERROR PDumpCaptureError(PVRSRV_ERROR    ui32ErrorNo,
+				IMG_CHAR*       pszErrorMsg,
+				const IMG_CHAR* pszPPFunc,
+				IMG_UINT32      ui32PPline);
+
+PVRSRV_ERROR PDumpPDReg(PDUMP_MMU_ATTRIB *psMMUAttrib,
+				IMG_UINT32	ui32Reg,
+				IMG_UINT32	ui32dwData,
+				IMG_HANDLE	hUniqueTag);
+PVRSRV_ERROR PDumpPDRegWithFlags(PDUMP_MMU_ATTRIB *psMMUAttrib,
+				IMG_UINT32		ui32Reg,
+				IMG_UINT32		ui32Data,
+				IMG_UINT32		ui32Flags,
+				IMG_HANDLE		hUniqueTag);
+
+PVRSRV_ERROR PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame);
+
+PVRSRV_ERROR PDumpGetStateKM(IMG_UINT64 *ui64State);
+
+PVRSRV_ERROR PDumpGetCurrentBlockKM(IMG_UINT32 *pui32CurrentBlock);
+
+PVRSRV_ERROR PDumpForceCaptureStopKM(void);
+
+PVRSRV_ERROR PDumpIsCaptureFrameKM(IMG_BOOL *bIsCaptureRange);
+
+PVRSRV_ERROR PDumpRegRead32(IMG_CHAR *pszPDumpRegName,
+				const IMG_UINT32 dwRegOffset,
+				IMG_UINT32	ui32Flags);
+PVRSRV_ERROR PDumpRegRead64(IMG_CHAR *pszPDumpRegName,
+				const IMG_UINT32 dwRegOffset,
+				IMG_UINT32	ui32Flags);
+PVRSRV_ERROR PDumpRegRead64ToInternalVar(IMG_CHAR	*pszPDumpRegName,
+						IMG_CHAR *pszInternalVar,
+						const IMG_UINT32 dwRegOffset,
+						IMG_UINT32	ui32Flags);
+
+PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags);
+PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks);
+
+PVRSRV_ERROR PDumpRegBasedCBP(IMG_CHAR		*pszPDumpRegName,
+				IMG_UINT32	ui32RegOffset,
+				IMG_UINT32	ui32WPosVal,
+				IMG_UINT32	ui32PacketSize,
+				IMG_UINT32	ui32BufferSize,
+				IMG_UINT32	ui32Flags);
+
+PVRSRV_ERROR PDumpTRG(IMG_CHAR *pszMemSpace,
+				IMG_UINT32 ui32MMUCtxID,
+				IMG_UINT32 ui32RegionID,
+				IMG_BOOL bEnable,
+				IMG_UINT64 ui64VAddr,
+				IMG_UINT64 ui64LenBytes,
+				IMG_UINT32 ui32XStride,
+				IMG_UINT32 ui32Flags);
+
+void PDumpLock(void);
+void PDumpUnlock(void);
+PVRSRV_ERROR PDumpIfKM(IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags);
+PVRSRV_ERROR PDumpElseKM(IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags);
+PVRSRV_ERROR PDumpFiKM(IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags);
+
+void PDumpPowerTransitionStart(void);
+void PDumpPowerTransitionEnd(void);
+IMG_BOOL PDumpInPowerTransition(void);
+IMG_BOOL PDumpIsContCaptureOn(void);
+
+/*!
+ * @name	PDumpWriteParameter
+ * @brief	General function for writing to PDump stream. Used
+ *          mainly for memory dumps to parameter stream.
+ *			Usually more convenient to use PDumpWriteScript below
+ *			for the script stream.
+ * @param	psui8Data - data to write
+ * @param	ui32Size - size of write
+ * @param	ui32Flags - PDump flags
+ * @param   pui32FileOffset - on return contains the file offset to
+ *                            the start of the parameter data
+ * @param   aszFilenameStr - pointer to at least a 20 char buffer to
+ *                           return the parameter filename
+ * @return	error
+ */
+PVRSRV_ERROR PDumpWriteParameter(IMG_UINT8 *psui8Data, IMG_UINT32 ui32Size,
+				IMG_UINT32 ui32Flags, IMG_UINT32* pui32FileOffset,
+				IMG_CHAR* aszFilenameStr);
+
+/*!
+ * @name	PDumpWriteScript
+ * @brief	Write an PDumpOS created string to the "script" output stream
+ * @param	hString - PDump OS layer handle of string buffer to write
+ * @param	ui32Flags - PDump flags
+ * @return	IMG_TRUE on success.
+ */
+IMG_BOOL PDumpWriteScript(IMG_HANDLE hString, IMG_UINT32 ui32Flags);
+
+/**************************************************************************/ /*!
+@Function       PDumpSNPrintf
+@Description    Printf to OS-specific PDump state buffer. This function is
+                only called if PDUMP is defined.
+@Input          hBuf               handle of buffer to write into
+@Input          ui32ScriptSizeMax  maximum size of data to write (chars)
+@Input          pszFormat          format string
+@Return         None
+*/ /**************************************************************************/
+__printf(3, 4)
+PVRSRV_ERROR PDumpSNPrintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...);
+
+
+/*
+   PDumpWriteShiftedMaskedValue():
+
+   loads the "reference" address into an internal PDump register,
+   optionally shifts it right,
+   optionally shifts it left,
+   optionally masks it
+   then finally writes the computed value to the given destination address
+
+   i.e. it emits pdump language equivalent to this expression:
+
+   dest = ((&ref) >> SHRamount << SHLamount) & MASK
+*/
+extern PVRSRV_ERROR
+PDumpWriteShiftedMaskedValue(const IMG_CHAR *pszDestRegspaceName,
+                             const IMG_CHAR *pszDestSymbolicName,
+                             IMG_DEVMEM_OFFSET_T uiDestOffset,
+                             const IMG_CHAR *pszRefRegspaceName,
+                             const IMG_CHAR *pszRefSymbolicName,
+                             IMG_DEVMEM_OFFSET_T uiRefOffset,
+                             IMG_UINT32 uiSHRAmount,
+                             IMG_UINT32 uiSHLAmount,
+                             IMG_UINT32 uiMask,
+                             IMG_DEVMEM_SIZE_T uiWordSize,
+                             IMG_UINT32 uiPDumpFlags);
+
+/*
+  PDumpWriteSymbAddress():
+  writes the address of the "reference" to the offset given
+*/
+extern PVRSRV_ERROR
+PDumpWriteSymbAddress(const IMG_CHAR *pszDestSpaceName,
+                      IMG_DEVMEM_OFFSET_T uiDestOffset,
+                      const IMG_CHAR *pszRefSymbolicName,
+                      IMG_DEVMEM_OFFSET_T uiRefOffset,
+                      const IMG_CHAR *pszPDumpDevName,
+                      IMG_UINT32 ui32WordSize,
+                      IMG_UINT32 ui32AlignShift,
+                      IMG_UINT32 ui32Shift,
+                      IMG_UINT32 uiPDumpFlags);
+
+/* Register the connection with the PDump subsystem */
+extern PVRSRV_ERROR PDumpRegisterConnection(void *hSyncPrivData,
+	                                         PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks,
+	                                         PDUMP_CONNECTION_DATA **ppsPDumpConnectionData);
+
+/* Unregister the connection with the PDump subsystem */
+extern void PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData);
+
+/* Register for notification of PDump Transition into/out of capture range */
+extern PVRSRV_ERROR PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+													 PFN_PDUMP_TRANSITION pfnCallback,
+													 void *hPrivData,
+													 void *pvDevice,
+													 void **ppvHandle);
+
+/* Unregister notification of PDump Transition */
+extern void PDumpUnregisterTransitionCallback(void *pvHandle);
+
+/* Notify PDump of a Transition into/out of capture range */
+extern PVRSRV_ERROR PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, PDUMP_TRANSITION_EVENT eEvent, IMG_UINT32 ui32PDumpFlags);
+
+
+	#define PDUMP_LOCK(_ui32PDumpFlags) do \
+		{ if ((_ui32PDumpFlags & PDUMP_FLAGS_PDUMP_LOCK_HELD) == 0)\
+			{\
+				PDumpLock();\
+			}\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PDUMP_UNLOCK(_ui32PDumpFlags) do \
+		{ if ((_ui32PDumpFlags & PDUMP_FLAGS_PDUMP_LOCK_HELD) == 0)\
+			{\
+				PDumpUnlock();\
+			}\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PDUMPINIT				PDumpInitCommon
+	#define PDUMPDEINIT				PDumpDeInitCommon
+	#define PDUMPREG32				PDumpReg32
+	#define PDUMPREG64				PDumpReg64
+	#define PDUMPREGREAD32			PDumpRegRead32
+	#define PDUMPREGREAD64			PDumpRegRead64
+	#define PDUMPCOMMENT(...)		PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, __VA_ARGS__)
+	#define PDUMPCOMMENTWITHFLAGS	PDumpCommentWithFlags
+	#define PDUMPREGPOL				PDumpRegPolKM
+	#define PDUMPPDREG				PDumpPDReg
+	#define PDUMPPDREGWITHFLAGS		PDumpPDRegWithFlags
+	#define PDUMPREGBASEDCBP		PDumpRegBasedCBP
+	#define PDUMPENDINITPHASE		PDumpStopInitPhase
+	#define PDUMPIDLWITHFLAGS		PDumpIDLWithFlags
+	#define PDUMPIDL				PDumpIDL
+	#define PDUMPPOWCMDSTART		PDumpPowerTransitionStart
+	#define PDUMPPOWCMDEND			PDumpPowerTransitionEnd
+	#define PDUMPPOWCMDINTRANS		PDumpInPowerTransition
+
+#define PDUMP_BLKSTART(_ui32PDumpFlags) do \
+		{ PDUMP_LOCK(_ui32PDumpFlags);\
+		_ui32PDumpFlags |= PDUMP_FLAGS_PDUMP_LOCK_HELD;\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+#define PDUMP_BLKEND(_ui32PDumpFlags) do \
+		{ _ui32PDumpFlags &= ~PDUMP_FLAGS_PDUMP_LOCK_HELD;\
+		 PDUMP_UNLOCK(_ui32PDumpFlags);\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+#define PDUMPIF(_msg,_ui32PDumpFlags) do \
+		{PDUMP_BLKSTART(_ui32PDumpFlags);\
+		PDumpIfKM(_msg,_ui32PDumpFlags);\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PDUMPELSE				PDumpElseKM
+
+	#define PDUMPFI(_msg,_ui32PDumpFlags) do \
+		{ PDumpFiKM(_msg,_ui32PDumpFlags);\
+		PDUMP_BLKEND(_ui32PDumpFlags);\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+#else
+	/*
+		We should be clearer about which functions can be called
+		across the bridge as this looks rather unbalanced
+	*/
+
+/*! Macro used to record a panic in the PDump script stream */
+#define PDUMP_PANIC(_id, _msg)  ((void)0)
+
+/*! Macro used to record a driver error in the PDump script stream to invalidate the capture */
+#define PDUMP_ERROR(_err, _msg) ((void)0)
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpInitCommon)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpInitCommon(void)
+{
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpConnectionNotify)
+#endif
+static INLINE void
+PDumpConnectionNotify(void)
+{
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpDisconnectionNotify)
+#endif
+static INLINE void
+PDumpDisconnectionNotify(void)
+{
+}
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpLock)
+#endif
+static INLINE void
+PDumpLock(void)
+{
+}
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpUnlock)
+#endif
+static INLINE void
+PDumpUnlock(void)
+{
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpStopInitPhase)
+#endif
+static INLINE void
+PDumpStopInitPhase(void)
+{
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpSetFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpSetFrameKM(CONNECTION_DATA *psConnection,
+                PVRSRV_DEVICE_NODE *psDevNode,
+                IMG_UINT32 ui32Frame)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(ui32Frame);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpGetFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpGetFrameKM(CONNECTION_DATA *psConnection,
+                PVRSRV_DEVICE_NODE *psDeviceNode,
+                IMG_UINT32* pui32Frame)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(pui32Frame);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpCommentKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags)
+{
+	PVR_UNREFERENCED_PARAMETER(pszComment);
+	PVR_UNREFERENCED_PARAMETER(ui32Flags);
+	return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpSetDefaultCaptureParamsKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpSetDefaultCaptureParamsKM(IMG_UINT32 ui32Mode,
+                              IMG_UINT32 ui32Start,
+                              IMG_UINT32 ui32End,
+                              IMG_UINT32 ui32Interval,
+                              IMG_UINT32 ui32MaxParamFileSize)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32Mode);
+	PVR_UNREFERENCED_PARAMETER(ui32Start);
+	PVR_UNREFERENCED_PARAMETER(ui32End);
+	PVR_UNREFERENCED_PARAMETER(ui32Interval);
+	PVR_UNREFERENCED_PARAMETER(ui32MaxParamFileSize);
+
+	return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpPanic)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpPanic(IMG_UINT32      ui32PanicNo,
+		   IMG_CHAR*       pszPanicMsg,
+		   const IMG_CHAR* pszPPFunc,
+		   IMG_UINT32      ui32PPline)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32PanicNo);
+	PVR_UNREFERENCED_PARAMETER(pszPanicMsg);
+	PVR_UNREFERENCED_PARAMETER(pszPPFunc);
+	PVR_UNREFERENCED_PARAMETER(ui32PPline);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpCaptureError)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpCaptureError(PVRSRV_ERROR    ui32ErrorNo,
+                  IMG_CHAR*       pszErrorMsg,
+                  const IMG_CHAR* pszPPFunc,
+                  IMG_UINT32      ui32PPline)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32ErrorNo);
+	PVR_UNREFERENCED_PARAMETER(pszErrorMsg);
+	PVR_UNREFERENCED_PARAMETER(pszPPFunc);
+	PVR_UNREFERENCED_PARAMETER(ui32PPline);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpIsLastCaptureFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame)
+{
+	*pbIsLastCaptureFrame = IMG_FALSE;
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpGetStateKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpGetStateKM(IMG_UINT64 *ui64State)
+{
+	*ui64State = 0;
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpIsCaptureFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpIsCaptureFrameKM(IMG_BOOL *bIsCapturing)
+{
+	*bIsCapturing = IMG_FALSE;
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpGetCurrentBlockKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpGetCurrentBlockKM(IMG_UINT32 *pui32BlockNum)
+{
+	*pui32BlockNum = PDUMP_BLOCKNUM_INVALID;
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpForceCaptureStopKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpForceCaptureStopKM(void)
+{
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpBitmapKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpBitmapKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+										  IMG_CHAR *pszFileName,
+										  IMG_UINT32 ui32FileOffset,
+										  IMG_UINT32 ui32Width,
+										  IMG_UINT32 ui32Height,
+										  IMG_UINT32 ui32StrideInBytes,
+										  IMG_DEV_VIRTADDR sDevBaseAddr,
+										  IMG_UINT32 ui32MMUContextID,
+										  IMG_UINT32 ui32Size,
+										  PDUMP_PIXEL_FORMAT ePixelFormat,
+										  IMG_UINT32 ui32AddrMode,
+										  IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(pszFileName);
+	PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Width);
+	PVR_UNREFERENCED_PARAMETER(ui32Height);
+	PVR_UNREFERENCED_PARAMETER(ui32StrideInBytes);
+	PVR_UNREFERENCED_PARAMETER(sDevBaseAddr);
+	PVR_UNREFERENCED_PARAMETER(ui32MMUContextID);
+	PVR_UNREFERENCED_PARAMETER(ui32Size);
+	PVR_UNREFERENCED_PARAMETER(ePixelFormat);
+	PVR_UNREFERENCED_PARAMETER(ui32AddrMode);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpImageDescriptor)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpImageDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode,
+					   IMG_UINT32 ui32MMUContextID,
+					   IMG_CHAR *pszSABFileName,
+					   IMG_DEV_VIRTADDR sData,
+					   IMG_UINT32 ui32DataSize,
+					   IMG_UINT32 ui32LogicalWidth,
+					   IMG_UINT32 ui32LogicalHeight,
+					   IMG_UINT32 ui32PhysicalWidth,
+					   IMG_UINT32 ui32PhysicalHeight,
+					   PDUMP_PIXEL_FORMAT ePixFmt,
+					   IMG_MEMLAYOUT eMemLayout,
+					   IMG_FB_COMPRESSION eFBCompression,
+					   const IMG_UINT32 *paui32FBCClearColour,
+					   PDUMP_FBC_SWIZZLE eFBCSwizzle,
+					   IMG_DEV_VIRTADDR sHeader,
+					   IMG_UINT32 ui32HeaderSize,
+					   IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(ui32MMUContextID);
+	PVR_UNREFERENCED_PARAMETER(pszSABFileName);
+	PVR_UNREFERENCED_PARAMETER(sData);
+	PVR_UNREFERENCED_PARAMETER(ui32DataSize);
+	PVR_UNREFERENCED_PARAMETER(ui32LogicalWidth);
+	PVR_UNREFERENCED_PARAMETER(ui32LogicalHeight);
+	PVR_UNREFERENCED_PARAMETER(ui32PhysicalWidth);
+	PVR_UNREFERENCED_PARAMETER(ui32PhysicalHeight);
+	PVR_UNREFERENCED_PARAMETER(ePixFmt);
+	PVR_UNREFERENCED_PARAMETER(eMemLayout);
+	PVR_UNREFERENCED_PARAMETER(eFBCompression);
+	PVR_UNREFERENCED_PARAMETER(paui32FBCClearColour);
+	PVR_UNREFERENCED_PARAMETER(eFBCSwizzle);
+	PVR_UNREFERENCED_PARAMETER(sHeader);
+	PVR_UNREFERENCED_PARAMETER(ui32HeaderSize);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpDataDescriptor)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpDataDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode,
+					   IMG_UINT32 ui32MMUContextID,
+					   IMG_CHAR *pszSABFileName,
+					   IMG_DEV_VIRTADDR sData,
+					   IMG_UINT32 ui32DataSize,
+					   IMG_UINT32 ui32ElementType,
+					   IMG_UINT32 ui32ElementCount,
+					   IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(ui32MMUContextID);
+	PVR_UNREFERENCED_PARAMETER(pszSABFileName);
+	PVR_UNREFERENCED_PARAMETER(sData);
+	PVR_UNREFERENCED_PARAMETER(ui32DataSize);
+	PVR_UNREFERENCED_PARAMETER(ui32ElementType);
+	PVR_UNREFERENCED_PARAMETER(ui32ElementCount);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpRegisterConnection)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpRegisterConnection(void *hSyncPrivData,
+		                 PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks,
+	                     PDUMP_CONNECTION_DATA **ppsPDumpConnectionData)
+{
+	PVR_UNREFERENCED_PARAMETER(hSyncPrivData);
+	PVR_UNREFERENCED_PARAMETER(pfnPDumpSyncBlocks);
+	PVR_UNREFERENCED_PARAMETER(ppsPDumpConnectionData);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpUnregisterConnection)
+#endif
+static INLINE
+void PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+	PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpRegisterTransitionCallback)
+#endif
+static INLINE
+PVRSRV_ERROR PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+											  PFN_PDUMP_TRANSITION pfnCallback,
+											  void *hPrivData,
+											  void *pvDevice,
+											  void **ppvHandle)
+{
+	PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData);
+	PVR_UNREFERENCED_PARAMETER(pfnCallback);
+	PVR_UNREFERENCED_PARAMETER(hPrivData);
+	PVR_UNREFERENCED_PARAMETER(pvDevice);
+	PVR_UNREFERENCED_PARAMETER(ppvHandle);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpUnregisterTransitionCallback)
+#endif
+static INLINE
+void PDumpUnregisterTransitionCallback(void *pvHandle)
+{
+	PVR_UNREFERENCED_PARAMETER(pvHandle);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpTransition)
+#endif
+static INLINE
+PVRSRV_ERROR PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, PDUMP_TRANSITION_EVENT eEvent, IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData);
+	PVR_UNREFERENCED_PARAMETER(eEvent);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+
+	#if defined LINUX || defined GCC_IA32 || defined GCC_ARM || defined __QNXNTO__ || defined(INTEGRITY_OS)
+		#define PDUMPINIT	PDumpInitCommon
+		#define PDUMPDEINIT(args...)
+		#define PDUMPREG32(args...)
+		#define PDUMPREG64(args...)
+		#define PDUMPREGREAD32(args...)
+		#define PDUMPREGREAD64(args...)
+		#define PDUMPCOMMENT(args...)
+		#define PDUMPREGPOL(args...)
+		#define PDUMPPDREG(args...)
+		#define PDUMPPDREGWITHFLAGS(args...)
+		#define PDUMPSYNC(args...)
+		#define PDUMPCOPYTOMEM(args...)
+		#define PDUMPWRITE(args...)
+		#define PDUMPREGBASEDCBP(args...)
+		#define PDUMPCOMMENTWITHFLAGS(args...)
+		#define PDUMPENDINITPHASE(args...)
+		#define PDUMPIDLWITHFLAGS(args...)
+		#define PDUMPIDL(args...)
+		#define PDUMPPOWCMDSTART(args...)
+		#define PDUMPPOWCMDEND(args...)
+		#define PDUMP_LOCK(args...)
+		#define PDUMP_UNLOCK(args...)
+
+	#else
+		#error Compiler not specified
+	#endif
+#endif
+
+
+#endif /* PDUMP_KM_H */
+
+/******************************************************************************
+ End of file (pdump_km.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdump_mmu.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdump_mmu.h
new file mode 100644
index 0000000..ebc9821
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdump_mmu.h
@@ -0,0 +1,164 @@
+/**************************************************************************/ /*!
+@File
+@Title          Common MMU Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements basic low level control of MMU.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVKM_PDUMP_MMU_H
+#define SRVKM_PDUMP_MMU_H
+
+/* services/server/include/ */
+#include "pdump_symbolicaddr.h"
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "mmu_common.h"
+
+/*
+	PDUMP MMU attributes
+*/
+typedef struct _PDUMP_MMU_ATTRIB_DEVICE_
+{
+    /* Per-Device Pdump attribs */
+
+	/*!< Pdump memory bank name */
+	IMG_CHAR				*pszPDumpMemDevName;
+
+	/*!< Pdump register bank name */
+	IMG_CHAR				*pszPDumpRegDevName;
+
+} PDUMP_MMU_ATTRIB_DEVICE;
+
+typedef struct _PDUMP_MMU_ATTRIB_CONTEXT_
+{
+	IMG_UINT32 ui32Dummy;
+} PDUMP_MMU_ATTRIB_CONTEXT;
+
+typedef struct _PDUMP_MMU_ATTRIB_HEAP_
+{
+	/* data page info */
+	IMG_UINT32 ui32DataPageMask;
+} PDUMP_MMU_ATTRIB_HEAP;
+
+typedef struct _PDUMP_MMU_ATTRIB_
+{
+    /* FIXME: would these be better as pointers rather than copies? */
+    struct _PDUMP_MMU_ATTRIB_DEVICE_ sDevice;
+    struct _PDUMP_MMU_ATTRIB_CONTEXT_ sContext;
+    struct _PDUMP_MMU_ATTRIB_HEAP_ sHeap;
+} PDUMP_MMU_ATTRIB;
+
+#if defined(PDUMP)
+	extern PVRSRV_ERROR PDumpMMUMalloc(const IMG_CHAR			*pszPDumpDevName,
+                                           MMU_LEVEL 				eMMULevel,
+                                           IMG_DEV_PHYADDR			*psDevPAddr,
+                                           IMG_UINT32				ui32Size,
+                                           IMG_UINT32				ui32Align,
+                                           PDUMP_MMU_TYPE          eMMUType);
+
+    extern PVRSRV_ERROR PDumpMMUFree(const IMG_CHAR				*pszPDumpDevName,
+                                     MMU_LEVEL					eMMULevel,
+                                     IMG_DEV_PHYADDR			        *psDevPAddr,
+                                     PDUMP_MMU_TYPE             eMMUType);
+
+	extern PVRSRV_ERROR PDumpPTBaseObjectToMem64(const IMG_CHAR *pszPDumpDevName,
+                                                     PMR *psPMRDest,
+                                                     IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+                                                     IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+                                                     IMG_UINT32 ui32Flags,
+                                                     MMU_LEVEL eMMULevel,
+                                                     IMG_UINT64 ui64PxSymAddr);
+
+    extern PVRSRV_ERROR PDumpMMUDumpPxEntries(MMU_LEVEL eMMULevel,
+    								   const IMG_CHAR *pszPDumpDevName,
+                                       void *pvPxMem,
+                                       IMG_DEV_PHYADDR sPxDevPAddr,
+                                       IMG_UINT32 uiFirstEntry,
+                                       IMG_UINT32 uiNumEntries,
+                                       const IMG_CHAR *pszMemspaceName,
+                                       const IMG_CHAR *pszSymbolicAddr,
+                                       IMG_UINT64 uiSymbolicAddrOffset,
+                                       IMG_UINT32 uiBytesPerEntry,
+                                       IMG_UINT32 uiLog2Align,
+                                       IMG_UINT32 uiAddrShift,
+                                       IMG_UINT64 uiAddrMask,
+                                       IMG_UINT64 uiPxEProtMask,
+                                       IMG_UINT64 uiDataValidEnable,
+                                       IMG_UINT32 ui32Flags,
+                                       PDUMP_MMU_TYPE eMMUType);
+
+
+    extern PVRSRV_ERROR PDumpMMUAllocMMUContext(const IMG_CHAR *pszPDumpMemSpaceName,
+                                                IMG_DEV_PHYADDR sPCDevPAddr,
+                                                PDUMP_MMU_TYPE eMMUType,
+                                                IMG_UINT32 *pui32MMUContextID);
+
+    extern PVRSRV_ERROR PDumpMMUFreeMMUContext(const IMG_CHAR *pszPDumpMemSpaceName,
+                                               IMG_UINT32 ui32MMUContextID);
+
+	/* FIXME: split to separate file... (debatable whether this is anything to do with MMU) */
+extern PVRSRV_ERROR
+PDumpMMUSAB(const IMG_CHAR *pszPDumpMemNamespace,
+            IMG_UINT32 uiPDumpMMUCtx,
+            IMG_DEV_VIRTADDR sDevAddrStart,
+            IMG_DEVMEM_SIZE_T uiSize,
+            const IMG_CHAR *pszFilename,
+            IMG_UINT32 uiFileOffset,
+            IMG_UINT32 ui32PDumpFlags);
+
+	#define PDUMP_MMU_ALLOC_MMUCONTEXT(pszPDumpMemDevName, sPCDevPAddr, eMMUType, puiPDumpCtxID) \
+        PDumpMMUAllocMMUContext(pszPDumpMemDevName,                     \
+                                sPCDevPAddr,                            \
+                                eMMUType,								\
+                                puiPDumpCtxID)
+
+    #define PDUMP_MMU_FREE_MMUCONTEXT(pszPDumpMemDevName, uiPDumpCtxID) \
+        PDumpMMUFreeMMUContext(pszPDumpMemDevName, uiPDumpCtxID)
+#else
+
+	#define PDUMP_MMU_ALLOC_MMUCONTEXT(pszPDumpMemDevName, sPCDevPAddr, eMMUType, puiPDumpCtxID) \
+        ((void)0)
+    #define PDUMP_MMU_FREE_MMUCONTEXT(pszPDumpMemDevName, uiPDumpCtxID) \
+        ((void)0)
+
+#endif // defined(PDUMP)
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdump_physmem.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdump_physmem.h
new file mode 100644
index 0000000..5e36134
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdump_physmem.h
@@ -0,0 +1,243 @@
+/**************************************************************************/ /*!
+@File
+@Title          pdump functions to assist with physmem allocations
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements basic low level control of MMU.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVSRV_PDUMP_PHYSMEM_H
+#define SRVSRV_PDUMP_PHYSMEM_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "pmr.h"
+
+#define PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH 40
+#define PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH 60
+#define PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH (PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH)
+
+typedef struct _PDUMP_PHYSMEM_INFO_T_ PDUMP_PHYSMEM_INFO_T;
+
+#if defined(PDUMP)
+extern PVRSRV_ERROR
+PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle,
+                     IMG_CHAR **ppszSymbolicAddress);
+
+extern PVRSRV_ERROR
+PDumpMalloc(const IMG_CHAR *pszDevSpace,
+               const IMG_CHAR *pszSymbolicAddress,
+               IMG_UINT64 ui64Size,
+               /* alignment is alignment of start of buffer _and_
+                  minimum contiguity - i.e. smallest allowable
+                  page-size. */
+               IMG_DEVMEM_ALIGN_T uiAlign,
+               IMG_BOOL bInitialise,
+               IMG_UINT32 ui32InitValue,
+               IMG_HANDLE *phHandlePtr,
+               IMG_UINT32 ui32PDumpFlags);
+
+extern
+PVRSRV_ERROR PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle);
+
+void
+PDumpMakeStringValid(IMG_CHAR *pszString,
+                     IMG_UINT32 ui32StrLen);
+#else	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpGetSymbolicAddr)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle,
+                     IMG_CHAR **ppszSymbolicAddress)
+{
+	PVR_UNREFERENCED_PARAMETER(hPhysmemPDumpHandle);
+	PVR_UNREFERENCED_PARAMETER(ppszSymbolicAddress);
+	return PVRSRV_OK;
+}
+
+static INLINE PVRSRV_ERROR
+PDumpMalloc(const IMG_CHAR *pszDevSpace,
+               const IMG_CHAR *pszSymbolicAddress,
+               IMG_UINT64 ui64Size,
+               IMG_DEVMEM_ALIGN_T uiAlign,
+               IMG_BOOL bInitialise,
+               IMG_UINT32 ui32InitValue,
+               IMG_HANDLE *phHandlePtr,
+               IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(pszDevSpace);
+	PVR_UNREFERENCED_PARAMETER(pszSymbolicAddress);
+	PVR_UNREFERENCED_PARAMETER(ui64Size);
+	PVR_UNREFERENCED_PARAMETER(uiAlign);
+	PVR_UNREFERENCED_PARAMETER(bInitialise);
+	PVR_UNREFERENCED_PARAMETER(ui32InitValue);
+	PVR_UNREFERENCED_PARAMETER(phHandlePtr);
+	return PVRSRV_OK;
+}
+
+static INLINE PVRSRV_ERROR
+PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+	PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle);
+	return PVRSRV_OK;
+}
+#endif	/* PDUMP */
+
+#define PMR_DEFAULT_PREFIX "PMR"
+#define PMR_SYMBOLICADDR_FMTSPEC "%s%"IMG_UINT64_FMTSPEC"_%"IMG_UINT64_FMTSPEC"_%s"
+#define PMR_MEMSPACE_FMTSPEC "%s"
+#define PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC PMR_MEMSPACE_FMTSPEC
+
+#if defined(PDUMP)
+#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr) \
+    PDumpMalloc(pszPDumpMemDevName, PMR_OSALLOCPAGES_PREFIX, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr, PDUMP_NONE)
+#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \
+    PDumpFree(hHandle)
+#else
+#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr) \
+    ((void)(*phHandlePtr=NULL))
+#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \
+    ((void)(0))
+#endif // defined(PDUMP)
+
+extern PVRSRV_ERROR
+PDumpPMRWRW32(const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_UINT32 ui32Value,
+            PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRWRW32InternalVarToMem(const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              const IMG_CHAR *pszInternalVar,
+                              PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRRDW32MemToInternalVar(const IMG_CHAR *pszInternalVar,
+                              const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRWRW64(const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_UINT64 ui64Value,
+            PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRWRW64InternalVarToMem(const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              const IMG_CHAR *pszInternalVar,
+                              PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRRDW64MemToInternalVar(const IMG_CHAR *pszInternalVar,
+                              const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRLDB(const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_DEVMEM_SIZE_T uiSize,
+            const IMG_CHAR *pszFilename,
+            IMG_UINT32 uiFileOffset,
+            PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRSAB(const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_DEVMEM_SIZE_T uiSize,
+            const IMG_CHAR *pszFileName,
+            IMG_UINT32 uiFileOffset);
+
+/*
+  PDumpPMRPOL()
+
+  emits a POL to the PDUMP.
+*/
+extern PVRSRV_ERROR
+PDumpPMRPOL(const IMG_CHAR *pszMempaceName,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_UINT32 ui32Value,
+            IMG_UINT32 ui32Mask,
+            PDUMP_POLL_OPERATOR eOperator,
+            IMG_UINT32 uiCount,
+            IMG_UINT32 uiDelay,
+            PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRCBP(const IMG_CHAR *pszMemspaceName,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiReadOffset,
+            IMG_DEVMEM_OFFSET_T uiWriteOffset,
+            IMG_DEVMEM_SIZE_T uiPacketSize,
+            IMG_DEVMEM_SIZE_T uiBufferSize);
+
+/*
+ * PDumpWriteParameterBlob()
+ *
+ * writes a binary blob to the pdump param stream containing the
+ * current contents of the memory, and returns the filename and offset
+ * of where that blob is located (for use in a subsequent LDB, for
+ * example)
+ *
+ * Caller to provide buffer to receive filename, and declare the size
+ * of that buffer
+ */
+extern PVRSRV_ERROR
+PDumpWriteParameterBlob(IMG_UINT8 *pcBuffer,
+                 size_t uiNumBytes,
+                 PDUMP_FLAGS_T uiPDumpFlags,
+                 IMG_CHAR *pszFilenameOut,
+                 size_t uiFilenameBufSz,
+                 PDUMP_FILEOFFSET_T *puiOffsetOut);
+
+#endif /* #ifndef SRVSRV_PDUMP_PHYSMEM_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdump_symbolicaddr.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdump_symbolicaddr.h
new file mode 100644
index 0000000..ed912a5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdump_symbolicaddr.h
@@ -0,0 +1,55 @@
+/**************************************************************************/ /*!
+@File
+@Title          Abstraction of PDUMP symbolic address derivation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Allows pdump functions to derive symbolic addresses on-the-fly
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVKM_PDUMP_SYMBOLICADDR_H
+#define SRVKM_PDUMP_SYMBOLICADDR_H
+
+#include "img_types.h"
+
+#include "pvrsrv_error.h"
+
+/* pdump symbolic addresses are generated on-the-fly with a callback */
+
+typedef PVRSRV_ERROR (*PVRSRV_SYMADDRFUNCPTR)(IMG_HANDLE hPriv, IMG_UINT32 uiOffset, IMG_CHAR *pszSymbolicAddr, IMG_UINT32 ui32SymbolicAddrLen, IMG_UINT32 *pui32NewOffset);
+
+#endif /* #ifndef SRVKM_PDUMP_SYMBOLICADDR_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdumpdefs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdumpdefs.h
new file mode 100644
index 0000000..264a0ad
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pdumpdefs.h
@@ -0,0 +1,244 @@
+/*************************************************************************/ /*!
+@File
+@Title          PDUMP definitions header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    PDUMP definitions header
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PDUMPDEFS_H
+#define PDUMPDEFS_H
+
+/*! PDump Pixel Format Enumeration */
+typedef enum _PDUMP_PIXEL_FORMAT_
+{
+	PVRSRV_PDUMP_PIXEL_FORMAT_UNSUPPORTED = 0,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2,
+	PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9,
+/*	PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10, */
+	PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11,
+	PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12,
+	PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13,
+	PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15,
+	PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16,
+	PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17,
+	PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18,
+	PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20,
+	PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25,
+	PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26,
+	PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27,
+	PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28,
+	PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29,
+	PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 = 30,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 = 31,
+	PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 = 32,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 = 33,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGB555 = 34,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F16 = 35,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F32 = 36,
+	PVRSRV_PDUMP_PIXEL_FORMAT_L16 = 37,
+	PVRSRV_PDUMP_PIXEL_FORMAT_L32 = 38,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGBA8888 = 39,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ABGR4444 = 40,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGBA4444 = 41,
+	PVRSRV_PDUMP_PIXEL_FORMAT_BGRA4444 = 42,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ABGR1555 = 43,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGBA5551 = 44,
+	PVRSRV_PDUMP_PIXEL_FORMAT_BGRA5551 = 45,
+	PVRSRV_PDUMP_PIXEL_FORMAT_BGR565 = 46,
+	PVRSRV_PDUMP_PIXEL_FORMAT_A8 = 47,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16F16 = 49,
+	PVRSRV_PDUMP_PIXEL_FORMAT_A4 = 50,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ARGB2101010 = 51,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RSGSBS888 = 52,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32F32 = 53,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F16F16 = 54,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F32F32 = 55,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16 = 56,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32 = 57,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U8 = 58,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U8U8 = 59,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U16 = 60,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U16U16 = 61,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U16U16U16U16 = 62,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U32 = 63,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U32U32 = 64,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U32U32U32U32 = 65,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV32 = 66,
+
+	PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff
+
+} PDUMP_PIXEL_FORMAT;
+
+typedef enum _PDUMP_FBC_SWIZZLE_
+{
+	PVRSRV_PDUMP_FBC_SWIZZLE_ARGB = 0x0,
+	PVRSRV_PDUMP_FBC_SWIZZLE_ARBG = 0x1,
+	PVRSRV_PDUMP_FBC_SWIZZLE_AGRB = 0x2,
+	PVRSRV_PDUMP_FBC_SWIZZLE_AGBR = 0x3,
+	PVRSRV_PDUMP_FBC_SWIZZLE_ABGR = 0x4,
+	PVRSRV_PDUMP_FBC_SWIZZLE_ABRG = 0x5,
+	PVRSRV_PDUMP_FBC_SWIZZLE_RGBA = 0x8,
+	PVRSRV_PDUMP_FBC_SWIZZLE_RBGA = 0x9,
+	PVRSRV_PDUMP_FBC_SWIZZLE_GRBA = 0xA,
+	PVRSRV_PDUMP_FBC_SWIZZLE_GBRA = 0xB,
+	PVRSRV_PDUMP_FBC_SWIZZLE_BGRA = 0xC,
+	PVRSRV_PDUMP_FBC_SWIZZLE_BRGA = 0xD,
+} PDUMP_FBC_SWIZZLE;
+
+/*! PDump addrmode */
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT			0
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_MASK			0x000000FF
+
+#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT			8
+#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_NEGATIVE		(1U << PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_SHIFT		12
+#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_MASK			0x000FF000
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT				20
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_MASK				0x00F00000
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT			24
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_SHIFT			25
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT			28
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_MASK			0xF0000000
+
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_STRIDE			(0U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE1 (1U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE2 (2U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE3 (3U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE4 (4U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE5 (5U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE6 (6U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE7 (7U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_TWIDDLED		(9U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_PAGETILED		(11U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_NONE				(0U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_DIRECT		(1U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_DIRECT		(2U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_32X2_DIRECT		(3U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT		(4U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT		(5U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT_4TILE	(6U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT_4TILE	(7U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBC_DECOR					(1U << PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBC_LOSSY					(1U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_BASE			(1U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_ENHANCED		(2U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V2				(3U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_SURFACE		(4U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_RESOURCE		(5U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+
+/*! PDump Poll Operator */
+typedef enum _PDUMP_POLL_OPERATOR
+{
+	PDUMP_POLL_OPERATOR_EQUAL = 0,
+	PDUMP_POLL_OPERATOR_LESS = 1,
+	PDUMP_POLL_OPERATOR_LESSEQUAL = 2,
+	PDUMP_POLL_OPERATOR_GREATER = 3,
+	PDUMP_POLL_OPERATOR_GREATEREQUAL = 4,
+	PDUMP_POLL_OPERATOR_NOTEQUAL = 5,
+} PDUMP_POLL_OPERATOR;
+
+
+#define PVRSRV_PDUMP_MAX_FILENAME_SIZE			75  /*!< Max length of a pdump log file name */
+#define PVRSRV_PDUMP_MAX_COMMENT_SIZE			350 /*!< Max length of a pdump comment */
+
+/*!
+	PDump MMU type
+	(Maps to values listed in "PowerVR Tools.Pdump2 Script Functions.doc" Sec 2.13)
+*/
+typedef enum
+{
+	PDUMP_MMU_TYPE_4KPAGE_32BIT_STDTILE  = 1,
+	PDUMP_MMU_TYPE_VARPAGE_32BIT_STDTILE = 2,
+	PDUMP_MMU_TYPE_4KPAGE_36BIT_EXTTILE  = 3,
+	PDUMP_MMU_TYPE_4KPAGE_32BIT_EXTTILE  = 4,
+	PDUMP_MMU_TYPE_4KPAGE_36BIT_STDTILE  = 5,
+	PDUMP_MMU_TYPE_VARPAGE_40BIT         = 6,
+	PDUMP_MMU_TYPE_VIDEO_40BIT_STDTILE   = 7,
+	PDUMP_MMU_TYPE_VIDEO_40BIT_EXTTILE   = 8,
+	PDUMP_MMU_TYPE_MIPS_MICROAPTIV       = 9,
+	PDUMP_MMU_TYPE_LAST
+} PDUMP_MMU_TYPE;
+
+/*!
+	PDump states
+	These values are used by the bridge call PVRSRVPDumpGetState
+*/
+#define PDUMP_STATE_CAPTURE_FRAME	(1)		/*!< Flag represents the PDump being in capture range or not*/
+#define PDUMP_STATE_CONNECTED		(2)		/*!< Flag represents the PDump Client App being connected on not */
+#define PDUMP_STATE_SUSPENDED		(4)		/*!< Flag represents the PDump being suspended or not */
+
+/*!
+	PDump Capture modes
+	Values used with calls to PVRSRVPDumpSetDefaultCaptureParams
+*/
+#define PDUMP_CAPMODE_UNSET                     0x00000000UL
+#define PDUMP_CAPMODE_FRAMED                    0x00000001UL
+#define PDUMP_CAPMODE_CONTINUOUS                0x00000002UL
+#define PDUMP_CAPMODE_BLOCKED                   0x00000003UL
+
+#define PDUMP_CAPMODE_MAX                       PDUMP_CAPMODE_BLOCKED
+
+
+#endif /* PDUMPDEFS_H */
+
+/*****************************************************************************
+ End of file (pdumpdefs.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physheap.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physheap.c
new file mode 100644
index 0000000..4343e5b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physheap.c
@@ -0,0 +1,352 @@
+/*************************************************************************/ /*!
+@File           physheap.c
+@Title          Physical heap management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Management functions for the physical heap(s). A heap contains
+                all the information required by services when using memory from
+                that heap (such as CPU <> Device physical address translation).
+                A system must register one heap but can have more then one which
+                is why a heap must register with a (system) unique ID.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#include "img_types.h"
+#include "img_defs.h"
+#include "physheap.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+
+struct _PHYS_HEAP_
+{
+	/*! ID of this physical memory heap */
+	IMG_UINT32					ui32PhysHeapID;
+	/*! The type of this heap */
+	PHYS_HEAP_TYPE			eType;
+
+	/*! PDump name of this physical memory heap */
+	IMG_CHAR					*pszPDumpMemspaceName;
+	/*! Private data for the translate routines */
+	IMG_HANDLE					hPrivData;
+	/*! Function callbacks */
+	PHYS_HEAP_FUNCTIONS			*psMemFuncs;
+
+	/*! Array of sub-regions of the heap */
+	PHYS_HEAP_REGION			*pasRegions;
+	IMG_UINT32					ui32NumOfRegions;
+
+	/*! Refcount */
+	IMG_UINT32					ui32RefCount;
+	/*! Pointer to next physical heap */
+	struct _PHYS_HEAP_		*psNext;
+};
+
+static PHYS_HEAP *g_psPhysHeapList;
+static POS_LOCK g_hPhysHeapLock;
+
+#if defined(REFCOUNT_DEBUG)
+#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...)	\
+	PVRSRVDebugPrintf(PVR_DBG_WARNING,	\
+			  __FILE__,		\
+			  __LINE__,		\
+			  fmt,			\
+			  __VA_ARGS__)
+#else
+#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+
+PVRSRV_ERROR PhysHeapRegister(PHYS_HEAP_CONFIG *psConfig,
+							  PHYS_HEAP **ppsPhysHeap)
+{
+	PHYS_HEAP *psNew;
+	PHYS_HEAP *psTmp;
+
+	PVR_DPF_ENTERED;
+
+	if (psConfig->eType == PHYS_HEAP_TYPE_UNKNOWN)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Check this heap ID isn't already in use */
+	psTmp = g_psPhysHeapList;
+	while (psTmp)
+	{
+		if (psTmp->ui32PhysHeapID == psConfig->ui32PhysHeapID)
+		{
+			return PVRSRV_ERROR_PHYSHEAP_ID_IN_USE;
+		}
+		psTmp = psTmp->psNext;
+	}
+
+	psNew = OSAllocMem(sizeof(PHYS_HEAP));
+	if (psNew == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psNew->ui32PhysHeapID = psConfig->ui32PhysHeapID;
+	psNew->eType = psConfig->eType;
+	psNew->psMemFuncs = psConfig->psMemFuncs;
+	psNew->hPrivData = psConfig->hPrivData;
+	psNew->ui32RefCount = 0;
+	psNew->pszPDumpMemspaceName = psConfig->pszPDumpMemspaceName;
+
+	psNew->pasRegions = psConfig->pasRegions;
+	psNew->ui32NumOfRegions = psConfig->ui32NumOfRegions;
+
+	psNew->psNext = g_psPhysHeapList;
+	g_psPhysHeapList = psNew;
+
+	*ppsPhysHeap = psNew;
+
+	PVR_DPF_RETURN_RC1(PVRSRV_OK, *ppsPhysHeap);
+}
+
+void PhysHeapUnregister(PHYS_HEAP *psPhysHeap)
+{
+	PVR_DPF_ENTERED1(psPhysHeap);
+
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK)
+#endif
+	{
+		PVR_ASSERT(psPhysHeap->ui32RefCount == 0);
+	}
+
+	if (g_psPhysHeapList == psPhysHeap)
+	{
+		g_psPhysHeapList = psPhysHeap->psNext;
+	}
+	else
+	{
+		PHYS_HEAP *psTmp = g_psPhysHeapList;
+
+		while (psTmp->psNext != psPhysHeap)
+		{
+			psTmp = psTmp->psNext;
+		}
+		psTmp->psNext = psPhysHeap->psNext;
+	}
+
+	OSFreeMem(psPhysHeap);
+
+	PVR_DPF_RETURN;
+}
+
+PVRSRV_ERROR PhysHeapAcquire(IMG_UINT32 ui32PhysHeapID,
+							 PHYS_HEAP **ppsPhysHeap)
+{
+	PHYS_HEAP *psTmp = g_psPhysHeapList;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_DPF_ENTERED1(ui32PhysHeapID);
+
+	OSLockAcquire(g_hPhysHeapLock);
+
+	while (psTmp)
+	{
+		if (psTmp->ui32PhysHeapID == ui32PhysHeapID)
+		{
+			break;
+		}
+		psTmp = psTmp->psNext;
+	}
+
+	if (psTmp == NULL)
+	{
+		eError = PVRSRV_ERROR_PHYSHEAP_ID_INVALID;
+	}
+	else
+	{
+		psTmp->ui32RefCount++;
+		PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d",
+								__func__, psTmp, psTmp->ui32RefCount);
+	}
+
+	OSLockRelease(g_hPhysHeapLock);
+
+	*ppsPhysHeap = psTmp;
+	PVR_DPF_RETURN_RC1(eError, *ppsPhysHeap);
+}
+
+void PhysHeapRelease(PHYS_HEAP *psPhysHeap)
+{
+	PVR_DPF_ENTERED1(psPhysHeap);
+
+	OSLockAcquire(g_hPhysHeapLock);
+	psPhysHeap->ui32RefCount--;
+	PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d",
+							__func__, psPhysHeap, psPhysHeap->ui32RefCount);
+	OSLockRelease(g_hPhysHeapLock);
+
+	PVR_DPF_RETURN;
+}
+
+PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap)
+{
+	return psPhysHeap->eType;
+}
+
+/*
+ * This function will set the psDevPAddr to whatever the system layer
+ * has set it for the referenced region.
+ * It will not fail if the psDevPAddr is invalid.
+ */
+PVRSRV_ERROR PhysHeapRegionGetDevPAddr(PHYS_HEAP *psPhysHeap,
+								IMG_UINT32 ui32RegionId,
+								IMG_DEV_PHYADDR *psDevPAddr)
+{
+	if (ui32RegionId < psPhysHeap->ui32NumOfRegions)
+	{
+		*psDevPAddr = psPhysHeap->pasRegions[ui32RegionId].sCardBase;
+		return PVRSRV_OK;
+	}
+	else
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+}
+
+/*
+ * This function will set the psCpuPAddr to whatever the system layer
+ * has set it for the referenced region.
+ * It will not fail if the psCpuPAddr is invalid.
+ */
+PVRSRV_ERROR PhysHeapRegionGetCpuPAddr(PHYS_HEAP *psPhysHeap,
+								IMG_UINT32 ui32RegionId,
+								IMG_CPU_PHYADDR *psCpuPAddr)
+{
+	if (ui32RegionId < psPhysHeap->ui32NumOfRegions)
+	{
+		*psCpuPAddr = psPhysHeap->pasRegions[ui32RegionId].sStartAddr;
+		return PVRSRV_OK;
+	}
+	else
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+}
+
+PVRSRV_ERROR PhysHeapRegionGetSize(PHYS_HEAP *psPhysHeap,
+								   IMG_UINT32 ui32RegionId,
+								   IMG_UINT64 *puiSize)
+{
+	if (ui32RegionId < psPhysHeap->ui32NumOfRegions)
+	{
+		*puiSize = psPhysHeap->pasRegions[ui32RegionId].uiSize;
+		return PVRSRV_OK;
+	}
+	else
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+}
+
+void PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap,
+								IMG_UINT32 ui32NumOfAddr,
+								IMG_DEV_PHYADDR *psDevPAddr,
+								IMG_CPU_PHYADDR *psCpuPAddr)
+{
+	psPhysHeap->psMemFuncs->pfnCpuPAddrToDevPAddr(psPhysHeap->hPrivData,
+												 ui32NumOfAddr,
+												 psDevPAddr,
+												 psCpuPAddr);
+}
+
+void PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap,
+								IMG_UINT32 ui32NumOfAddr,
+								IMG_CPU_PHYADDR *psCpuPAddr,
+								IMG_DEV_PHYADDR *psDevPAddr)
+{
+	psPhysHeap->psMemFuncs->pfnDevPAddrToCpuPAddr(psPhysHeap->hPrivData,
+												 ui32NumOfAddr,
+												 psCpuPAddr,
+												 psDevPAddr);
+}
+
+IMG_UINT32 PhysHeapGetRegionId(PHYS_HEAP *psPhysHeap,
+								PVRSRV_MEMALLOCFLAGS_T uiAllocFlags)
+{
+	if (psPhysHeap->psMemFuncs->pfnGetRegionId == NULL)
+	{
+		return 0;
+	}
+
+	return psPhysHeap->psMemFuncs->pfnGetRegionId(psPhysHeap->hPrivData,
+												 uiAllocFlags);
+}
+
+IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap)
+{
+	return psPhysHeap->pszPDumpMemspaceName;
+}
+
+PVRSRV_ERROR PhysHeapInit(void)
+{
+	PVRSRV_ERROR eError;
+
+	g_psPhysHeapList = NULL;
+
+	eError = OSLockCreate(&g_hPhysHeapLock);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create PhysHeapLock: %s",
+										__func__,
+										PVRSRVGETERRORSTRING(eError)));
+		return eError;
+	}
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PhysHeapDeinit(void)
+{
+	PVR_ASSERT(g_psPhysHeapList == NULL);
+
+	OSLockDestroy(g_hPhysHeapLock);
+
+	return PVRSRV_OK;
+}
+
+IMG_UINT32 PhysHeapNumberOfRegions(PHYS_HEAP *psPhysHeap)
+{
+	return psPhysHeap->ui32NumOfRegions;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physheap.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physheap.h
new file mode 100644
index 0000000..1e6d637
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physheap.h
@@ -0,0 +1,160 @@
+/*************************************************************************/ /*!
+@File
+@Title          Physical heap management header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the interface for the physical heap management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+#ifndef _PHYSHEAP_H_
+#define _PHYSHEAP_H_
+
+typedef struct _PHYS_HEAP_ PHYS_HEAP;
+
+typedef void (*CpuPAddrToDevPAddr)(IMG_HANDLE hPrivData,
+								   IMG_UINT32 ui32NumOfAddr,
+								   IMG_DEV_PHYADDR *psDevPAddr,
+								   IMG_CPU_PHYADDR *psCpuPAddr);
+
+typedef void (*DevPAddrToCpuPAddr)(IMG_HANDLE hPrivData,
+								   IMG_UINT32 ui32NumOfAddr,
+								   IMG_CPU_PHYADDR *psCpuPAddr,
+								   IMG_DEV_PHYADDR *psDevPAddr);
+
+typedef IMG_UINT32 (*GetRegionId)(IMG_HANDLE hPrivData,
+								   PVRSRV_MEMALLOCFLAGS_T uiAllocationFlags);
+
+typedef struct _PHYS_HEAP_FUNCTIONS_
+{
+	/*! Translate CPU physical address to device physical address */
+	CpuPAddrToDevPAddr	pfnCpuPAddrToDevPAddr;
+	/*! Translate device physical address to CPU physical address */
+	DevPAddrToCpuPAddr	pfnDevPAddrToCpuPAddr;
+	/*! Return id of heap region to allocate from */
+	GetRegionId			pfnGetRegionId;
+} PHYS_HEAP_FUNCTIONS;
+
+typedef enum _PHYS_HEAP_TYPE_
+{
+	PHYS_HEAP_TYPE_UNKNOWN = 0,
+	PHYS_HEAP_TYPE_UMA,
+	PHYS_HEAP_TYPE_LMA,
+	PHYS_HEAP_TYPE_DMA,
+#if defined(SUPPORT_WRAP_EXTMEMOBJECT)
+	PHYS_HEAP_TYPE_WRAP,
+#endif
+} PHYS_HEAP_TYPE;
+
+typedef struct _PHYS_HEAP_REGION_
+{
+	IMG_CPU_PHYADDR			sStartAddr;
+	IMG_DEV_PHYADDR			sCardBase;
+	IMG_UINT64				uiSize;
+
+	IMG_HANDLE				hPrivData;
+} PHYS_HEAP_REGION;
+
+typedef struct _PHYS_HEAP_CONFIG_
+{
+	IMG_UINT32				ui32PhysHeapID;
+	PHYS_HEAP_TYPE			eType;
+	IMG_CHAR				*pszPDumpMemspaceName;
+	PHYS_HEAP_FUNCTIONS		*psMemFuncs;
+
+	PHYS_HEAP_REGION		*pasRegions;
+	IMG_UINT32				ui32NumOfRegions;
+	IMG_BOOL				bDynAlloc;
+
+	IMG_HANDLE				hPrivData;
+} PHYS_HEAP_CONFIG;
+
+PVRSRV_ERROR PhysHeapRegister(PHYS_HEAP_CONFIG *psConfig,
+							  PHYS_HEAP **ppsPhysHeap);
+
+void PhysHeapUnregister(PHYS_HEAP *psPhysHeap);
+
+PVRSRV_ERROR PhysHeapAcquire(IMG_UINT32 ui32PhysHeapID,
+							 PHYS_HEAP **ppsPhysHeap);
+
+void PhysHeapRelease(PHYS_HEAP *psPhysHeap);
+
+PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap);
+
+PVRSRV_ERROR PhysHeapRegionGetCpuPAddr(PHYS_HEAP *psPhysHeap,
+									   IMG_UINT32 ui32RegionId,
+								IMG_CPU_PHYADDR *psCpuPAddr);
+
+
+PVRSRV_ERROR PhysHeapRegionGetSize(PHYS_HEAP *psPhysHeap,
+							IMG_UINT32 ui32RegionId,
+						     IMG_UINT64 *puiSize);
+
+PVRSRV_ERROR PhysHeapRegionGetDevPAddr(PHYS_HEAP *psPhysHeap,
+									   IMG_UINT32 ui32RegionId,
+							 		   IMG_DEV_PHYADDR *psDevPAddr);
+
+PVRSRV_ERROR PhysHeapRegionGetSize(PHYS_HEAP *psPhysHeap,
+								   IMG_UINT32 ui32RegionId,
+						     	   IMG_UINT64 *puiSize);
+
+IMG_UINT32 PhysHeapNumberOfRegions(PHYS_HEAP *psPhysHeap);
+
+void PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap,
+								IMG_UINT32 ui32NumOfAddr,
+								IMG_DEV_PHYADDR *psDevPAddr,
+								IMG_CPU_PHYADDR *psCpuPAddr);
+
+void PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap,
+								IMG_UINT32 ui32NumOfAddr,
+								IMG_CPU_PHYADDR *psCpuPAddr,
+								IMG_DEV_PHYADDR *psDevPAddr);
+
+IMG_UINT32 PhysHeapGetRegionId(PHYS_HEAP *psPhysHeap,
+						PVRSRV_MEMALLOCFLAGS_T uiAllocFlags);
+
+
+IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap);
+
+PVRSRV_ERROR PhysHeapInit(void);
+PVRSRV_ERROR PhysHeapDeinit(void);
+
+#endif /* _PHYSHEAP_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem.c
new file mode 100644
index 0000000..d224eaa
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem.c
@@ -0,0 +1,635 @@
+/*************************************************************************/ /*!
+@File           physmem.c
+@Title          Physmem
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Common entry point for creation of RAM backed PMR's
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "device.h"
+#include "physmem.h"
+#include "pvrsrv.h"
+#include "osfunc.h"
+#include "pdump_physmem.h"
+#include "pdump_km.h"
+#include "rgx_heaps.h"
+
+#if defined(DEBUG)
+static IMG_UINT32 gPMRAllocFail;
+
+#if defined(LINUX)
+#include <linux/moduleparam.h>
+
+module_param(gPMRAllocFail, uint, 0644);
+MODULE_PARM_DESC(gPMRAllocFail, "When number of PMR allocs reaches "
+				 "this value, it will fail (default value is 0 which "
+				 "means that alloc function will behave normally).");
+#endif /* defined(LINUX) */
+#endif /* defined(DEBUG) */
+
+PVRSRV_ERROR DevPhysMemAlloc(PVRSRV_DEVICE_NODE	*psDevNode,
+                             IMG_UINT32 ui32MemSize,
+                             IMG_UINT32 ui32Log2Align,
+                             const IMG_UINT8 u8Value,
+                             IMG_BOOL bInitPage,
+#if defined(PDUMP)
+                             const IMG_CHAR *pszDevSpace,
+                             const IMG_CHAR *pszSymbolicAddress,
+                             IMG_HANDLE *phHandlePtr,
+#endif
+                             IMG_HANDLE hMemHandle,
+                             IMG_DEV_PHYADDR *psDevPhysAddr)
+{
+	void *pvCpuVAddr;
+	PVRSRV_ERROR eError;
+#if defined(PDUMP)
+	IMG_CHAR szFilenameOut[PDUMP_PARAM_MAX_FILE_NAME];
+	PDUMP_FILEOFFSET_T uiOffsetOut;
+	IMG_UINT32 ui32PageSize;
+	IMG_UINT32 ui32PDumpMemSize = ui32MemSize;
+#endif
+	PG_HANDLE *psMemHandle;
+	IMG_UINT64 uiMask;
+	IMG_DEV_PHYADDR sDevPhysAddr_int;
+
+	psMemHandle = hMemHandle;
+
+	/* Allocate the pages */
+	eError = psDevNode->pfnDevPxAlloc(psDevNode,
+	                                  TRUNCATE_64BITS_TO_SIZE_T(ui32MemSize),
+	                                  psMemHandle,
+	                                  &sDevPhysAddr_int);
+	if (PVRSRV_OK != eError)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Unable to allocate the pages"));
+		return eError;
+	}
+
+	/* Check to see if the page allocator returned pages with our desired
+	 * alignment, which is not unlikely
+	 */
+	uiMask = (1 << ui32Log2Align) - 1;
+	if (ui32Log2Align && (sDevPhysAddr_int.uiAddr & uiMask))
+	{
+		/* use over allocation instead */
+		psDevNode->pfnDevPxFree(psDevNode, psMemHandle);
+
+		ui32MemSize += (IMG_UINT32) uiMask;
+		eError = psDevNode->pfnDevPxAlloc(psDevNode,
+		                                  TRUNCATE_64BITS_TO_SIZE_T(ui32MemSize),
+		                                  psMemHandle,
+		                                  &sDevPhysAddr_int);
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"Unable to over-allocate the pages"));
+			return eError;
+		}
+
+		sDevPhysAddr_int.uiAddr += uiMask;
+		sDevPhysAddr_int.uiAddr &= ~uiMask;
+	}
+	*psDevPhysAddr = sDevPhysAddr_int;
+
+#if defined(PDUMP)
+	ui32PageSize = ui32Log2Align? (1 << ui32Log2Align) : OSGetPageSize();
+	eError = PDumpMalloc(pszDevSpace,
+								pszSymbolicAddress,
+								ui32PDumpMemSize,
+								ui32PageSize,
+								IMG_FALSE,
+								0,
+								phHandlePtr,
+								PDUMP_NONE);
+	if (PVRSRV_OK != eError)
+	{
+		PDUMPCOMMENT("Allocating pages failed");
+		*phHandlePtr = NULL;
+	}
+#endif
+
+	if (bInitPage)
+	{
+		/*Map the page to the CPU VA space */
+		eError = psDevNode->pfnDevPxMap(psDevNode,
+		                                psMemHandle,
+		                                ui32MemSize,
+		                                &sDevPhysAddr_int,
+		                                &pvCpuVAddr);
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"Unable to map the allocated page"));
+			psDevNode->pfnDevPxFree(psDevNode, psMemHandle);
+			return eError;
+		}
+
+		/*Fill the memory with given content */
+		OSDeviceMemSet(pvCpuVAddr, u8Value, ui32MemSize);
+
+		/*Map the page to the CPU VA space */
+		eError = psDevNode->pfnDevPxClean(psDevNode,
+		                                  psMemHandle,
+		                                  0,
+		                                  ui32MemSize);
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"Unable to clean the allocated page"));
+			psDevNode->pfnDevPxUnMap(psDevNode, psMemHandle, pvCpuVAddr);
+			psDevNode->pfnDevPxFree(psDevNode, psMemHandle);
+			return eError;
+		}
+
+#if defined(PDUMP)
+		/*P-Dumping of the page contents can be done in two ways
+		 * 1. Store the single byte init value to the .prm file
+		 *    and load the same value to the entire dummy page buffer
+		 *    This method requires lot of LDB's inserted into the out2.txt
+		 *
+		 * 2. Store the entire contents of the buffer to the .prm file
+		 *    and load them back.
+		 *    This only needs a single LDB instruction in the .prm file
+		 *    and chosen this method
+		 *    size of .prm file might go up but that's not huge at least
+		 *    for this allocation
+		 */
+		/*Write the buffer contents to the prm file */
+		eError = PDumpWriteParameterBlob(pvCpuVAddr,
+		                          ui32PDumpMemSize,
+		                          PDUMP_FLAGS_CONTINUOUS,
+		                          szFilenameOut,
+		                          sizeof(szFilenameOut),
+		                          &uiOffsetOut);
+		if (PVRSRV_OK == eError)
+		{
+			/* Load the buffer back to the allocated memory when playing the pdump */
+			eError = PDumpPMRLDB(pszDevSpace,
+			                     pszSymbolicAddress,
+			                     0,
+			                     ui32PDumpMemSize,
+			                     szFilenameOut,
+			                     uiOffsetOut,
+			                     PDUMP_FLAGS_CONTINUOUS);
+			if (PVRSRV_OK != eError)
+			{
+				PDUMP_ERROR(eError, "Failed to write LDB statement to script file");
+				PVR_DPF((PVR_DBG_ERROR, "Failed to write LDB statement to script file, error %d", eError));
+			}
+ 		}
+		else if (eError != PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+		{
+			PDUMP_ERROR(eError, "Failed to write device allocation to parameter file");
+			PVR_DPF((PVR_DBG_ERROR, "Failed to write device allocation to parameter file, error %d", eError));
+		}
+		else
+		{
+			/* else Write to parameter file prevented under the flags and
+			 * current state of the driver so skip write to script and error IF.
+             * This is normal e..g. no in capture range for example
+			 */
+			eError = PVRSRV_OK;
+		}
+#endif
+
+		/*UnMap the page */
+		psDevNode->pfnDevPxUnMap(psDevNode,
+		                         psMemHandle,
+		                         pvCpuVAddr);
+	}
+
+	return PVRSRV_OK;
+}
+
+void DevPhysMemFree(PVRSRV_DEVICE_NODE *psDevNode,
+#if defined(PDUMP)
+							IMG_HANDLE hPDUMPMemHandle,
+#endif
+							IMG_HANDLE	hMemHandle)
+{
+	PG_HANDLE *psMemHandle;
+
+	psMemHandle = hMemHandle;
+	psDevNode->pfnDevPxFree(psDevNode, psMemHandle);
+#if defined(PDUMP)
+	if (NULL != hPDUMPMemHandle)
+	{
+		PDumpFree(hPDUMPMemHandle);
+	}
+#endif
+
+}
+
+
+/* Checks the input parameters and adjusts them if possible and necessary */
+static inline PVRSRV_ERROR _ValidateParams(IMG_UINT32 ui32NumPhysChunks,
+                                           IMG_UINT32 ui32NumVirtChunks,
+                                           PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                           IMG_UINT32 *puiLog2AllocPageSize,
+                                           IMG_DEVMEM_SIZE_T *puiSize,
+                                           PMR_SIZE_T *puiChunkSize)
+{
+	IMG_UINT32 uiLog2AllocPageSize = *puiLog2AllocPageSize;
+	IMG_DEVMEM_SIZE_T uiSize = *puiSize;
+	PMR_SIZE_T uiChunkSize = *puiChunkSize;
+	/* Sparse if we have different number of virtual and physical chunks plus
+	 * in general all allocations with more than one virtual chunk */
+	IMG_BOOL bIsSparse = (ui32NumVirtChunks != ui32NumPhysChunks ||
+			ui32NumVirtChunks > 1) ? IMG_TRUE : IMG_FALSE;
+
+	/* Protect against ridiculous page sizes */
+	if (uiLog2AllocPageSize > RGX_HEAP_2MB_PAGE_SHIFT)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Page size is too big: 2^%u.", uiLog2AllocPageSize));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Sanity check of the alloc size */
+	if (uiSize >= 0x1000000000ULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Cancelling allocation request of over 64 GB. "
+				 "This is likely a bug."
+				, __func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Fail if requesting coherency on one side but uncached on the other */
+	if ( (PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) &&
+	         (PVRSRV_CHECK_GPU_UNCACHED(uiFlags) || PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags))) )
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Request for CPU coherency but specifying GPU uncached "
+				"Please use GPU cached flags for coherency."));
+		return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+	}
+
+	if ( (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) &&
+	         (PVRSRV_CHECK_CPU_UNCACHED(uiFlags) || PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags))) )
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Request for GPU coherency but specifying CPU uncached "
+				"Please use CPU cached flags for coherency."));
+		return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+	}
+
+	if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) && PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Zero on Alloc and Poison on Alloc are mutually exclusive.",
+				__func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (bIsSparse)
+	{
+		/* For sparse we need correct parameters like a suitable page size....  */
+		if (OSGetPageShift() > uiLog2AllocPageSize)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Invalid log2-contiguity for sparse allocation. "
+					"Requested %u, required minimum %zd",
+					__func__,
+					uiLog2AllocPageSize,
+					OSGetPageShift() ));
+
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+		/* ... chunk size must be a equal to page size ...*/
+		if ( uiChunkSize != (1 << uiLog2AllocPageSize) )
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Invalid chunk size for sparse allocation. Requested "
+					 "%#" IMG_UINT64_FMTSPECx ", must be same as page size %#x.",
+					__func__, uiChunkSize, 1 << uiLog2AllocPageSize));
+
+			return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+		}
+
+		if (ui32NumVirtChunks * uiChunkSize != uiSize)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Total alloc size (%#" IMG_UINT64_FMTSPECx ") "
+					 "is not equal to virtual chunks * chunk size "
+					 "(%#" IMG_UINT64_FMTSPECx ")",
+					__func__, uiSize, ui32NumVirtChunks * uiChunkSize));
+
+			return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+		}
+
+		if (ui32NumPhysChunks > ui32NumVirtChunks)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Number of physical chunks (%u) must not be greater "
+					"than number of virtual chunks (%u)",
+					__func__,
+					ui32NumPhysChunks,
+					ui32NumVirtChunks));
+
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+	}
+	else
+	{
+		/*
+		 * Silently round up alignment/pagesize if request was less that PAGE_SHIFT
+		 * because it would never be harmful for memory to be _more_ contiguous that
+		 * was desired.
+		 */
+		uiLog2AllocPageSize = OSGetPageShift() > uiLog2AllocPageSize ?
+				OSGetPageShift() : uiLog2AllocPageSize;
+
+		/* Same for total size */
+		uiSize = PVR_ALIGN(uiSize, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
+		*puiChunkSize = uiSize;
+	}
+
+	if ((uiSize & ((1ULL << uiLog2AllocPageSize) - 1)) != 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Total size (%#" IMG_UINT64_FMTSPECx ") "
+				 "must be a multiple of the requested contiguity (%u)",
+				 __func__, uiSize, 1 << uiLog2AllocPageSize));
+		return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+	}
+
+	*puiLog2AllocPageSize = uiLog2AllocPageSize;
+	*puiSize = uiSize;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PhysmemNewRamBackedPMR(CONNECTION_DATA * psConnection,
+                       PVRSRV_DEVICE_NODE *psDevNode,
+                       IMG_DEVMEM_SIZE_T uiSize,
+                       PMR_SIZE_T uiChunkSize,
+                       IMG_UINT32 ui32NumPhysChunks,
+                       IMG_UINT32 ui32NumVirtChunks,
+                       IMG_UINT32 *pui32MappingTable,
+                       IMG_UINT32 uiLog2AllocPageSize,
+                       PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                       IMG_UINT32 uiAnnotationLength,
+                       const IMG_CHAR *pszAnnotation,
+                       IMG_PID uiPid,
+                       PMR **ppsPMRPtr)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DEVICE_PHYS_HEAP ePhysHeapIdx;
+	PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize =
+		psDevNode->psDevConfig->pfnCheckMemAllocSize;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(uiAnnotationLength);
+
+	eError = _ValidateParams(ui32NumPhysChunks,
+	                         ui32NumVirtChunks,
+	                         uiFlags,
+	                         &uiLog2AllocPageSize,
+	                         &uiSize,
+	                         &uiChunkSize);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	/* Lookup the requested physheap index to use for this PMR allocation */
+	if (PVRSRV_CHECK_FW_LOCAL(uiFlags))
+	{
+		if (PVRSRV_CHECK_FW_GUEST(uiFlags))
+		{
+			ePhysHeapIdx = PVRSRV_DEVICE_PHYS_HEAP_FW_GUEST;
+			if (! PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST))
+			{
+				/* Shouldn't be reaching this code */
+				return PVRSRV_ERROR_INTERNAL_ERROR;
+			}
+		}
+		else
+		{
+			ePhysHeapIdx = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+		}
+	}
+	else if (PVRSRV_CHECK_CPU_LOCAL(uiFlags))
+	{
+		ePhysHeapIdx = PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL;
+	}
+	else
+	{
+		ePhysHeapIdx = PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL;
+	}
+
+	if (NULL == psDevNode->apsPhysHeap[ePhysHeapIdx])
+	{
+		/* In case a heap hasn't been acquired for this type, return invalid heap error */
+		PVR_DPF((PVR_DBG_ERROR, "%s: Requested allocation on device node (%p) from "
+		        "an invalid heap (HeapIndex=%d)",
+		        __func__, psDevNode, ePhysHeapIdx));
+		return PVRSRV_ERROR_INVALID_HEAP;
+	}
+
+	/* Apply memory budgeting policy */
+	if (pfnCheckMemAllocSize)
+	{
+		IMG_UINT64 uiMemSize = (IMG_UINT64)uiChunkSize * ui32NumPhysChunks;
+		PVRSRV_ERROR eError;
+
+		eError = pfnCheckMemAllocSize(psDevNode->psDevConfig->hSysData, uiMemSize);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+#if defined(DEBUG)
+	if (gPMRAllocFail > 0)
+	{
+		static IMG_UINT32 ui32AllocCount = 1;
+
+		if (ui32AllocCount < gPMRAllocFail)
+		{
+			ui32AllocCount++;
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s failed on %d allocation.",
+			         __func__, ui32AllocCount));
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+	}
+#endif /* defined(DEBUG) */
+
+	return psDevNode->pfnCreateRamBackedPMR[ePhysHeapIdx](psDevNode,
+											uiSize,
+											uiChunkSize,
+											ui32NumPhysChunks,
+											ui32NumVirtChunks,
+											pui32MappingTable,
+											uiLog2AllocPageSize,
+											uiFlags,
+											pszAnnotation,
+											uiPid,
+											ppsPMRPtr);
+}
+
+PVRSRV_ERROR
+PhysmemNewRamBackedLockedPMR(CONNECTION_DATA * psConnection,
+							PVRSRV_DEVICE_NODE *psDevNode,
+							IMG_DEVMEM_SIZE_T uiSize,
+							PMR_SIZE_T uiChunkSize,
+							IMG_UINT32 ui32NumPhysChunks,
+							IMG_UINT32 ui32NumVirtChunks,
+							IMG_UINT32 *pui32MappingTable,
+							IMG_UINT32 uiLog2PageSize,
+							PVRSRV_MEMALLOCFLAGS_T uiFlags,
+							IMG_UINT32 uiAnnotationLength,
+							const IMG_CHAR *pszAnnotation,
+							IMG_PID uiPid,
+							PMR **ppsPMRPtr)
+{
+
+	PVRSRV_ERROR eError;
+	eError = PhysmemNewRamBackedPMR(psConnection,
+									psDevNode,
+									uiSize,
+									uiChunkSize,
+									ui32NumPhysChunks,
+									ui32NumVirtChunks,
+									pui32MappingTable,
+									uiLog2PageSize,
+									uiFlags,
+									uiAnnotationLength,
+									pszAnnotation,
+									uiPid,
+									ppsPMRPtr);
+
+	if (eError == PVRSRV_OK)
+	{
+		eError = PMRLockSysPhysAddresses(*ppsPMRPtr);
+	}
+
+	return eError;
+}
+
+static void GetLMASize( IMG_DEVMEM_SIZE_T *puiLMASize,
+			PVRSRV_DEVICE_NODE *psDevNode )
+{
+	IMG_UINT uiRegionIndex = 0, uiNumRegions = 0;
+	PVR_ASSERT(psDevNode);
+
+	uiNumRegions = psDevNode->psDevConfig->pasPhysHeaps[0].ui32NumOfRegions;
+
+	for (uiRegionIndex = 0; uiRegionIndex < uiNumRegions; ++uiRegionIndex)
+	{
+		*puiLMASize += psDevNode->psDevConfig->pasPhysHeaps[0].pasRegions[uiRegionIndex].uiSize;
+	}
+}
+
+PVRSRV_ERROR
+PVRSRVGetMaxDevMemSizeKM( CONNECTION_DATA * psConnection,
+			  PVRSRV_DEVICE_NODE *psDevNode,
+			  IMG_DEVMEM_SIZE_T *puiLMASize,
+			  IMG_DEVMEM_SIZE_T *puiUMASize )
+{
+	IMG_BOOL bLMA = IMG_FALSE, bUMA = IMG_FALSE;
+
+	*puiLMASize = 0;
+	*puiUMASize = 0;
+
+#if defined(TC_MEMORY_CONFIG)			/* For TC2 */
+#if (TC_MEMORY_CONFIG == TC_MEMORY_LOCAL)
+	bLMA = IMG_TRUE;
+#elif (TC_MEMORY_CONFIG == TC_MEMORY_HOST)
+	bUMA = IMG_TRUE;
+#else
+	bUMA = IMG_TRUE;
+	bLMA = IMG_TRUE;
+#endif
+
+#elif defined(PLATO_MEMORY_CONFIG)		/* For Plato TC */
+#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL)
+	bLMA = IMG_TRUE;
+#elif (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST)
+	bUMA = IMG_TRUE;
+#else
+	bUMA = IMG_TRUE;
+	bLMA = IMG_TRUE;
+#endif
+
+#elif defined(LMA)				/* For emu, vp_linux */
+	bLMA = IMG_TRUE;
+
+#else						/* For all other platforms */
+	bUMA = IMG_TRUE;
+#endif
+
+	if (bLMA) { GetLMASize(puiLMASize, psDevNode); }
+	if (bUMA) { *puiUMASize = OSGetRAMSize(); }
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	return PVRSRV_OK;
+}
+
+/* 'Wrapper' function to call PMRImportPMR(), which
+ * first checks the PMR is for the current device.
+ * This avoids the need to do this in pmr.c, which
+ * would then need PVRSRV_DEVICE_NODE (defining this
+ * type in pmr.h causes a typedef redefinition issue).
+ */
+PVRSRV_ERROR
+PhysmemImportPMR(CONNECTION_DATA *psConnection,
+             PVRSRV_DEVICE_NODE *psDevNode,
+             PMR_EXPORT *psPMRExport,
+             PMR_PASSWORD_T uiPassword,
+             PMR_SIZE_T uiSize,
+             PMR_LOG2ALIGN_T uiLog2Contig,
+             PMR **ppsPMR)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	if (PMRGetExportDeviceNode(psPMRExport) != psDevNode)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device", __func__));
+		return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+	}
+
+	return PMRImportPMR(psPMRExport,
+	                    uiPassword,
+	                    uiSize,
+	                    uiLog2Contig,
+	                    ppsPMR);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem.h
new file mode 100644
index 0000000..4a839de
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem.h
@@ -0,0 +1,239 @@
+/*************************************************************************/ /*!
+@File
+@Title          Physmem header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for common entry point for creation of RAM backed PMR's
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SRVSRV_PHYSMEM_H_
+#define _SRVSRV_PHYSMEM_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "connection_server.h"
+
+/* services/server/include/ */
+#include "pmr.h"
+#include "pmr_impl.h"
+
+/* Valid values for TC_MEMORY_CONFIG configuration option */
+#define TC_MEMORY_LOCAL			(1)
+#define TC_MEMORY_HOST			(2)
+#define TC_MEMORY_HYBRID		(3)
+
+/* Valid values for the PLATO_MEMORY_CONFIG configuration option */
+#define PLATO_MEMORY_LOCAL		(1)
+#define PLATO_MEMORY_HOST		(2)
+#define PLATO_MEMORY_HYBRID		(3)
+
+/*************************************************************************/ /*!
+@Function       DevPhysMemAlloc
+
+@Description    Allocate memory from device specific heaps directly.
+
+@Input          psDevNode               device node to operate on
+@Input          ui32MemSize             Size of the memory to be allocated
+@Input          u8Value                 Value to be initialised to.
+@Input          bInitPage               Flag to control initialisation
+@Input          pszDevSpace             PDUMP memory space in which the
+                                          allocation is to be done
+@Input          pszSymbolicAddress      Symbolic name of the allocation
+@Input          phHandlePtr             PDUMP handle to the allocation
+@Output         psMemHandle             Handle to the allocated memory
+@Output         psDevPhysAddr           Device Physical address of allocated
+                                          page
+
+@Return         PVRSRV_OK if the allocation is successful
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+                                    IMG_UINT32 ui32MemSize,
+                                    IMG_UINT32 ui32Log2Align,
+                                    const IMG_UINT8 u8Value,
+                                    IMG_BOOL bInitPage,
+#if defined(PDUMP)
+                                    const IMG_CHAR *pszDevSpace,
+                                    const IMG_CHAR *pszSymbolicAddress,
+                                    IMG_HANDLE *phHandlePtr,
+#endif
+                                    IMG_HANDLE hMemHandle,
+                                    IMG_DEV_PHYADDR *psDevPhysAddr);
+
+/*************************************************************************/ /*!
+@Function       DevPhysMemFree
+
+@Description    Free memory to device specific heaps directly.
+
+@Input          	psDevNode            	device node to operate on
+@Input 				hPDUMPMemHandle			Pdump handle to allocated memory
+@Input 				hMemHandle				Devmem handle to allocated memory
+
+@Return
+*/
+/*****************************************************************************/
+extern void DevPhysMemFree(PVRSRV_DEVICE_NODE *psDevNode,
+#if defined(PDUMP)
+		IMG_HANDLE	hPDUMPMemHandle,
+#endif
+		IMG_HANDLE	hMemHandle);
+
+/*
+ * PhysmemNewRamBackedPMR
+ *
+ * This function will create a RAM backed PMR using the device specific
+ * callback, this allows control at a per-devicenode level to select the
+ * memory source thus supporting mixed UMA/LMA systems.
+ *
+ * The size must be a multiple of page size.  The page size is
+ * specified in log2.  It should be regarded as a minimum contiguity
+ * of which the that the resulting memory must be a multiple.  It may
+ * be that this should be a fixed number.  It may be that the
+ * allocation size needs to be a multiple of some coarser "page size"
+ * than that specified in the page size argument.  For example, take
+ * an OS whose page granularity is a fixed 16kB, but the caller
+ * requests memory in page sizes of 4kB.  The request can be satisfied
+ * if and only if the SIZE requested is a multiple of 16kB.  If the
+ * arguments supplied are such that this OS cannot grant the request,
+ * PVRSRV_ERROR_INVALID_PARAMS will be returned.
+ *
+ * The caller should supply storage of a pointer.  Upon successful
+ * return a PMR object will have been created and a pointer to it
+ * returned in the PMROut argument.
+ *
+ * A PMR thusly created should be destroyed with PhysmemUnrefPMR.
+ *
+ * Note that this function may cause memory allocations and on some
+ * OSes this may cause scheduling events, so it is important that this
+ * function be called with interrupts enabled and in a context where
+ * scheduling events and memory allocations are permitted.
+ *
+ * The flags may be used by the implementation to change its behaviour
+ * if required.  The flags will also be stored in the PMR as immutable
+ * metadata and returned to mmu_common when it asks for it.
+ *
+ * The PID specified is used to tie this allocation to the process context
+ * that the allocation is made on behalf of.
+ */
+extern PVRSRV_ERROR
+PhysmemNewRamBackedPMR(CONNECTION_DATA * psConnection,
+                       PVRSRV_DEVICE_NODE *psDevNode,
+                       IMG_DEVMEM_SIZE_T uiSize,
+                       IMG_DEVMEM_SIZE_T uiChunkSize,
+                       IMG_UINT32 ui32NumPhysChunks,
+                       IMG_UINT32 ui32NumVirtChunks,
+                       IMG_UINT32 *pui32MappingTable,
+                       IMG_UINT32 uiLog2PageSize,
+                       PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                       IMG_UINT32 uiAnnotationLength,
+                       const IMG_CHAR *pszAnnotation,
+                       IMG_PID uiPid,
+                       PMR **ppsPMROut);
+
+
+/*
+ * PhysmemNewRamBackedLockedPMR
+ *
+ * Same as function above but is additionally locking down the PMR.
+ *
+ * Get the physical memory and lock down the PMR directly, we do not want to
+ * defer the actual allocation to mapping time.
+ *
+ * In general the concept of on-demand allocations is not useful for allocations
+ * where we give the users the freedom to map and unmap memory at will. The user
+ * is not expecting his memory contents to suddenly vanish just because he unmapped
+ * the buffer.
+ * Even if he would know and be ok with it, we do not want to check for every page
+ * we unmap whether we have to unlock the underlying PMR.
+*/
+extern PVRSRV_ERROR
+PhysmemNewRamBackedLockedPMR(CONNECTION_DATA * psConnection,
+                             PVRSRV_DEVICE_NODE *psDevNode,
+                             IMG_DEVMEM_SIZE_T uiSize,
+                             PMR_SIZE_T uiChunkSize,
+                             IMG_UINT32 ui32NumPhysChunks,
+                             IMG_UINT32 ui32NumVirtChunks,
+                             IMG_UINT32 *pui32MappingTable,
+                             IMG_UINT32 uiLog2PageSize,
+                             PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                             IMG_UINT32 uiAnnotationLength,
+                             const IMG_CHAR *pszAnnotation,
+                             IMG_PID uiPid,
+                             PMR **ppsPMRPtr);
+
+/**************************************************************************/ /*!
+@Function       PhysmemImportPMR
+@Description    Import PMR a previously exported PMR
+@Input          psPMRExport           The exported PMR token
+@Input          uiPassword            Authorisation password
+                                      for the PMR being imported
+@Input          uiSize                Size of the PMR being imported
+                                      (for verification)
+@Input          uiLog2Contig          Log2 continuity of the PMR being
+                                      imported (for verification)
+@Output         ppsPMR                The imported PMR
+@Return         PVRSRV_ERROR_PMR_NOT_PERMITTED if not for the same device
+                PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR if password incorrect
+                PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES if size or contiguity incorrect
+                PVRSRV_OK if successful
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PhysmemImportPMR(CONNECTION_DATA *psConnection,
+                 PVRSRV_DEVICE_NODE *psDevNode,
+                 PMR_EXPORT *psPMRExport,
+                 PMR_PASSWORD_T uiPassword,
+                 PMR_SIZE_T uiSize,
+                 PMR_LOG2ALIGN_T uiLog2Contig,
+                 PMR **ppsPMR);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVGetMaxDevMemSizeKM
+@Description    Get the amount of device memory on current platform
+@Output         uiLMASize             LMA memory size
+@Output         uiUMASize             UMA memory size
+@Return         None
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVGetMaxDevMemSizeKM( CONNECTION_DATA * psConnection,
+		                   PVRSRV_DEVICE_NODE *psDevNode,
+		                   IMG_DEVMEM_SIZE_T *puiLMASize,
+		                   IMG_DEVMEM_SIZE_T *puiUMASize);
+
+#endif /* _SRVSRV_PHYSMEM_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_dmabuf.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_dmabuf.c
new file mode 100644
index 0000000..cf3d7aca
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_dmabuf.c
@@ -0,0 +1,1182 @@
+/*************************************************************************/ /*!
+@File           physmem_dmabuf.c
+@Title          dmabuf memory allocator
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks for dmabuf memory.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+
+#include "physmem_dmabuf.h"
+#include "pvrsrv.h"
+#include "pmr.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP)
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pmr_impl.h"
+#include "hash.h"
+#include "private_data.h"
+#include "module_common.h"
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+#include "ri_server.h"
+#endif
+
+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS)
+#include "mmap_stats.h"
+#endif
+
+#include "kernel_compatibility.h"
+
+/*
+ * dma_buf_ops
+ *
+ * These are all returning errors if used.
+ * The point is to prevent anyone outside of our driver from importing
+ * and using our dmabuf.
+ */
+
+static int PVRDmaBufOpsAttach(struct dma_buf *psDmaBuf,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
+							  struct device *psDev,
+#endif
+							  struct dma_buf_attachment *psAttachment)
+{
+	return -ENOSYS;
+}
+
+static struct sg_table *PVRDmaBufOpsMap(struct dma_buf_attachment *psAttachment,
+                                      enum dma_data_direction eDirection)
+{
+	/* Attach hasn't been called yet */
+	return ERR_PTR(-EINVAL);
+}
+
+static void PVRDmaBufOpsUnmap(struct dma_buf_attachment *psAttachment,
+                           struct sg_table *psTable,
+                           enum dma_data_direction eDirection)
+{
+}
+
+static void PVRDmaBufOpsRelease(struct dma_buf *psDmaBuf)
+{
+	PMR *psPMR = (PMR *) psDmaBuf->priv;
+
+	PMRUnrefPMR(psPMR);
+}
+
+static void *PVRDmaBufOpsKMap(struct dma_buf *psDmaBuf, unsigned long uiPageNum)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static int PVRDmaBufOpsMMap(struct dma_buf *psDmaBuf, struct vm_area_struct *psVMA)
+{
+	return -ENOSYS;
+}
+
+static const struct dma_buf_ops sPVRDmaBufOps =
+{
+	.attach        = PVRDmaBufOpsAttach,
+	.map_dma_buf   = PVRDmaBufOpsMap,
+	.unmap_dma_buf = PVRDmaBufOpsUnmap,
+	.release       = PVRDmaBufOpsRelease,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
+	.map_atomic    = PVRDmaBufOpsKMap,
+#endif
+	.map           = PVRDmaBufOpsKMap,
+#else
+	.kmap_atomic   = PVRDmaBufOpsKMap,
+	.kmap          = PVRDmaBufOpsKMap,
+#endif
+	.mmap          = PVRDmaBufOpsMMap,
+};
+
+/* end of dma_buf_ops */
+
+
+typedef struct _PMR_DMA_BUF_DATA_
+{
+	/* Filled in at PMR create time */
+	PHYS_HEAP *psPhysHeap;
+	struct dma_buf_attachment *psAttachment;
+	PFN_DESTROY_DMABUF_PMR pfnDestroy;
+	IMG_BOOL bPoisonOnFree;
+
+	/* Modified by PMR lock/unlock */
+	struct sg_table *psSgTable;
+	IMG_DEV_PHYADDR *pasDevPhysAddr;
+	IMG_UINT32 ui32PhysPageCount;
+	IMG_UINT32 ui32VirtPageCount;
+} PMR_DMA_BUF_DATA;
+
+/* Start size of the g_psDmaBufHash hash table */
+#define DMA_BUF_HASH_SIZE 20
+
+static DEFINE_MUTEX(g_HashLock);
+
+static HASH_TABLE *g_psDmaBufHash;
+static IMG_UINT32 g_ui32HashRefCount;
+
+#if defined(PVR_ANDROID_ION_USE_SG_LENGTH)
+#define pvr_sg_length(sg) ((sg)->length)
+#else
+#define pvr_sg_length(sg) sg_dma_len(sg)
+#endif
+
+/*****************************************************************************
+ *                       PMR callback functions                              *
+ *****************************************************************************/
+
+static PVRSRV_ERROR PMRFinalizeDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+	struct dma_buf_attachment *psAttachment = psPrivData->psAttachment;
+	struct dma_buf *psDmaBuf = psAttachment->dmabuf;
+	struct sg_table *psSgTable = psPrivData->psSgTable;
+	PMR *psPMR;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (psDmaBuf->ops != &sPVRDmaBufOps)
+	{
+		if (g_psDmaBufHash)
+		{
+			/* We have a hash table so check if we've seen this dmabuf before */
+			psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf);
+
+			if (psPMR)
+			{
+				if (!PMRIsPMRLive(psPMR))
+				{
+					HASH_Remove(g_psDmaBufHash, (uintptr_t) psDmaBuf);
+					g_ui32HashRefCount--;
+
+					if (g_ui32HashRefCount == 0)
+					{
+						HASH_Delete(g_psDmaBufHash);
+						g_psDmaBufHash = NULL;
+					}
+				}
+				else{
+					eError = PVRSRV_ERROR_PMR_STILL_REFERENCED;
+				}
+			}
+		}
+	}else
+	{
+		psPMR = (PMR *) psDmaBuf->priv;
+		if (PMRIsPMRLive(psPMR))
+		{
+			eError = PVRSRV_ERROR_PMR_STILL_REFERENCED;
+		}
+
+	}
+
+	if (PVRSRV_OK != eError)
+	{
+		return eError;
+	}
+
+	psPrivData->ui32PhysPageCount = 0;
+
+	dma_buf_unmap_attachment(psAttachment, psSgTable, DMA_BIDIRECTIONAL);
+
+
+	if (psPrivData->bPoisonOnFree)
+	{
+		void *pvKernAddr;
+		int i, err;
+
+		err = dma_buf_begin_cpu_access(psDmaBuf, DMA_FROM_DEVICE);
+		if (err)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to begin cpu access for free poisoning (err=%d)",
+					 __func__, err));
+			PVR_ASSERT(IMG_FALSE);
+			goto exit;
+		}
+
+		for (i = 0; i < psDmaBuf->size / PAGE_SIZE; i++)
+		{
+			pvKernAddr = dma_buf_kmap(psDmaBuf, i);
+			if (IS_ERR_OR_NULL(pvKernAddr))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Failed to poison allocation before free (err=%ld)",
+						 __func__, pvKernAddr ? PTR_ERR(pvKernAddr) : -ENOMEM));
+				PVR_ASSERT(IMG_FALSE);
+				goto exit_end_access;
+			}
+
+			memset(pvKernAddr, PVRSRV_POISON_ON_FREE_VALUE, PAGE_SIZE);
+
+			dma_buf_kunmap(psDmaBuf, i, pvKernAddr);
+		}
+
+exit_end_access:
+		do {
+			err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE);
+		} while (err == -EAGAIN || err == -EINTR);
+	}
+
+exit:
+	if (psPrivData->pfnDestroy)
+	{
+		eError = psPrivData->pfnDestroy(psPrivData->psPhysHeap, psPrivData->psAttachment);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	OSFreeMem(psPrivData->pasDevPhysAddr);
+	OSFreeMem(psPrivData);
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PMRLockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PVR_UNREFERENCED_PARAMETER(pvPriv);
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PMRUnlockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PVR_UNREFERENCED_PARAMETER(pvPriv);
+	return PVRSRV_OK;
+}
+
+static void PMRGetFactoryLock(void)
+{
+	mutex_lock(&g_HashLock);
+}
+
+static void PMRReleaseFactoryLock(void)
+{
+	mutex_unlock(&g_HashLock);
+}
+
+static PVRSRV_ERROR PMRDevPhysAddrDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+					 IMG_UINT32 ui32Log2PageSize,
+					 IMG_UINT32 ui32NumOfPages,
+					 IMG_DEVMEM_OFFSET_T *puiOffset,
+					 IMG_BOOL *pbValid,
+					 IMG_DEV_PHYADDR *psDevPAddr)
+{
+	PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+	IMG_UINT32 ui32PageIndex;
+	IMG_UINT32 idx;
+
+	if (ui32Log2PageSize != PAGE_SHIFT)
+	{
+		return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+	}
+
+	for (idx=0; idx < ui32NumOfPages; idx++)
+	{
+		if (pbValid[idx])
+		{
+			IMG_UINT32 ui32InPageOffset;
+
+			ui32PageIndex = puiOffset[idx] >> PAGE_SHIFT;
+			ui32InPageOffset = puiOffset[idx] - ((IMG_DEVMEM_OFFSET_T)ui32PageIndex << PAGE_SHIFT);
+
+
+			PVR_ASSERT(ui32PageIndex < psPrivData->ui32VirtPageCount);
+			PVR_ASSERT(ui32InPageOffset < PAGE_SIZE);
+			psDevPAddr[idx].uiAddr = psPrivData->pasDevPhysAddr[ui32PageIndex].uiAddr + ui32InPageOffset;
+		}
+	}
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+				  size_t uiOffset,
+				  size_t uiSize,
+				  void **ppvKernelAddressOut,
+				  IMG_HANDLE *phHandleOut,
+				  PMR_FLAGS_T ulFlags)
+{
+	PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+	struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
+	void *pvKernAddr;
+	PVRSRV_ERROR eError;
+	int err;
+
+	if (psPrivData->ui32PhysPageCount != psPrivData->ui32VirtPageCount)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Kernel mappings for sparse DMABufs "
+				"are not allowed!", __func__));
+		eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+		goto fail;
+	}
+
+	err = dma_buf_begin_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL);
+	if (err)
+	{
+		eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+		goto fail;
+	}
+
+	pvKernAddr = dma_buf_vmap(psDmaBuf);
+	if (IS_ERR_OR_NULL(pvKernAddr))
+	{
+		eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+		goto fail_kmap;
+	}
+
+	*ppvKernelAddressOut = pvKernAddr + uiOffset;
+	*phHandleOut = pvKernAddr;
+
+	return PVRSRV_OK;
+
+fail_kmap:
+	do {
+		err = dma_buf_end_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL);
+	} while (err == -EAGAIN || err == -EINTR);
+
+fail:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static void PMRReleaseKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+					      IMG_HANDLE hHandle)
+{
+	PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+	struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
+	void *pvKernAddr = hHandle;
+	int err;
+
+	dma_buf_vunmap(psDmaBuf, pvKernAddr);
+
+	do {
+		err = dma_buf_end_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL);
+	} while (err == -EAGAIN || err == -EINTR);
+}
+
+static PVRSRV_ERROR PMRMMapDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+                                  PMR *psPMR,
+                                  PMR_MMAP_DATA pOSMMapData)
+{
+	PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+	struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
+	struct vm_area_struct *psVma = pOSMMapData;
+	int err;
+
+	if (psPrivData->ui32PhysPageCount != psPrivData->ui32VirtPageCount)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Not possible to MMAP sparse DMABufs",
+				__func__));
+		return PVRSRV_ERROR_NOT_IMPLEMENTED;
+	}
+
+	err = dma_buf_mmap(psDmaBuf, psVma, 0);
+	if (err)
+	{
+		return (err == -EINVAL) ? PVRSRV_ERROR_NOT_SUPPORTED : PVRSRV_ERROR_BAD_MAPPING;
+	}
+
+#if defined (PVRSRV_ENABLE_LINUX_MMAP_STATS)
+	MMapStatsAddOrUpdatePMR(psPMR, psVma->vm_end - psVma->vm_start);
+#endif
+
+	return PVRSRV_OK;
+}
+
+static PMR_IMPL_FUNCTAB _sPMRDmaBufFuncTab =
+{
+	.pfnLockPhysAddresses		= PMRLockPhysAddressesDmaBuf,
+	.pfnUnlockPhysAddresses		= PMRUnlockPhysAddressesDmaBuf,
+	.pfnDevPhysAddr			= PMRDevPhysAddrDmaBuf,
+	.pfnAcquireKernelMappingData	= PMRAcquireKernelMappingDataDmaBuf,
+	.pfnReleaseKernelMappingData	= PMRReleaseKernelMappingDataDmaBuf,
+	.pfnMMap			= PMRMMapDmaBuf,
+	.pfnFinalize			= PMRFinalizeDmaBuf,
+	.pfnGetPMRFactoryLock = PMRGetFactoryLock,
+	.pfnReleasePMRFactoryLock = PMRReleaseFactoryLock,
+};
+
+/*****************************************************************************
+ *                       Public facing interface                             *
+ *****************************************************************************/
+
+PVRSRV_ERROR
+PhysmemCreateNewDmaBufBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+                                PHYS_HEAP *psHeap,
+                                struct dma_buf_attachment *psAttachment,
+                                PFN_DESTROY_DMABUF_PMR pfnDestroy,
+                                PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                IMG_DEVMEM_SIZE_T uiChunkSize,
+                                IMG_UINT32 ui32NumPhysChunks,
+                                IMG_UINT32 ui32NumVirtChunks,
+                                IMG_UINT32 *pui32MappingTable,
+                                IMG_UINT32 ui32NameSize,
+                                const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                                PMR **ppsPMRPtr)
+{
+	struct dma_buf *psDmaBuf = psAttachment->dmabuf;
+	PMR_DMA_BUF_DATA *psPrivData;
+	PMR_FLAGS_T uiPMRFlags;
+	IMG_BOOL bZeroOnAlloc;
+	IMG_BOOL bPoisonOnAlloc;
+	IMG_BOOL bPoisonOnFree;
+	PVRSRV_ERROR eError;
+	IMG_UINT32 i, j;
+	IMG_UINT32 uiPagesPerChunk = uiChunkSize >> PAGE_SHIFT;
+	IMG_UINT32 ui32PageCount = 0;
+	struct scatterlist *sg;
+	struct sg_table *table;
+	IMG_UINT32 uiSglOffset;
+	IMG_CHAR pszAnnotation[DEVMEM_ANNOTATION_MAX_LEN];
+
+	bZeroOnAlloc = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags);
+	bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags);
+	bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags);
+
+	if (bZeroOnAlloc && bPoisonOnFree)
+	{
+		/* Zero on Alloc and Poison on Alloc are mutually exclusive */
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto errReturn;
+	}
+
+	psPrivData = OSAllocZMem(sizeof(*psPrivData));
+	if (psPrivData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto errReturn;
+	}
+
+	psPrivData->psPhysHeap = psHeap;
+	psPrivData->psAttachment = psAttachment;
+	psPrivData->pfnDestroy = pfnDestroy;
+	psPrivData->bPoisonOnFree = bPoisonOnFree;
+	psPrivData->ui32VirtPageCount =
+			(ui32NumVirtChunks * uiChunkSize) >> PAGE_SHIFT;
+
+	psPrivData->pasDevPhysAddr =
+			OSAllocZMem(sizeof(*(psPrivData->pasDevPhysAddr)) *
+			            psPrivData->ui32VirtPageCount);
+	if (!psPrivData->pasDevPhysAddr)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate buffer for physical addresses (oom)",
+				 __func__));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto errFreePrivData;
+	}
+
+	if (bZeroOnAlloc || bPoisonOnAlloc)
+	{
+		void *pvKernAddr;
+		int i, err;
+
+		err = dma_buf_begin_cpu_access(psDmaBuf, DMA_FROM_DEVICE);
+		if (err)
+		{
+			eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+			goto errFreePhysAddr;
+		}
+
+		for (i = 0; i < psDmaBuf->size / PAGE_SIZE; i++)
+		{
+			pvKernAddr = dma_buf_kmap(psDmaBuf, i);
+			if (IS_ERR_OR_NULL(pvKernAddr))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Failed to map page for %s (err=%ld)",
+						 __func__, bZeroOnAlloc ? "zeroing" : "poisoning",
+						 pvKernAddr ? PTR_ERR(pvKernAddr) : -ENOMEM));
+				eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+
+				do {
+					err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE);
+				} while (err == -EAGAIN || err == -EINTR);
+
+				goto errFreePhysAddr;
+			}
+
+			if (bZeroOnAlloc)
+			{
+				memset(pvKernAddr, 0, PAGE_SIZE);
+			}
+			else
+			{
+				memset(pvKernAddr, PVRSRV_POISON_ON_ALLOC_VALUE, PAGE_SIZE);
+			}
+
+			dma_buf_kunmap(psDmaBuf, i, pvKernAddr);
+		}
+
+		do {
+			err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE);
+		} while (err == -EAGAIN || err == -EINTR);
+	}
+
+	table = dma_buf_map_attachment(psAttachment, DMA_BIDIRECTIONAL);
+	if (IS_ERR_OR_NULL(table))
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto errFreePhysAddr;
+	}
+
+	/*
+	 * We do a two pass process: first work out how many pages there
+	 * are and second, fill in the data.
+	 */
+	for_each_sg(table->sgl, sg, table->nents, i)
+	{
+		ui32PageCount += PAGE_ALIGN(pvr_sg_length(sg)) / PAGE_SIZE;
+	}
+
+	if (WARN_ON(!ui32PageCount))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Number of phys. pages must not be zero",
+				 __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto errUnmap;
+	}
+
+	if (WARN_ON(ui32PageCount != ui32NumPhysChunks * uiPagesPerChunk))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Requested physical chunks and actual "
+				"number of physical dma buf pages don't match",
+				 __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto errUnmap;
+	}
+
+	psPrivData->ui32PhysPageCount = ui32PageCount;
+	psPrivData->psSgTable = table;
+	ui32PageCount = 0;
+	sg = table->sgl;
+	uiSglOffset = 0;
+
+
+	/* Fill physical address array */
+	for (i = 0; i < ui32NumPhysChunks; i++)
+	{
+		for (j = 0; j < uiPagesPerChunk; j++)
+		{
+			IMG_UINT32 uiIdx = pui32MappingTable[i] * uiPagesPerChunk + j;
+
+			psPrivData->pasDevPhysAddr[uiIdx].uiAddr =
+					sg_dma_address(sg) + uiSglOffset;
+
+			/* Get the next offset for the current sgl or the next sgl */
+			uiSglOffset += PAGE_SIZE;
+			if (uiSglOffset >= pvr_sg_length(sg))
+			{
+				sg = sg_next(sg);
+				uiSglOffset = 0;
+
+				/* Check that we haven't looped */
+				if (WARN_ON(sg == table->sgl))
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s: Failed to fill phys. address "
+							"array",
+							 __func__));
+					eError = PVRSRV_ERROR_INVALID_PARAMS;
+					goto errUnmap;
+				}
+			}
+		}
+	}
+
+	uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+
+	/*
+	 * Check no significant bits were lost in cast due to different
+	 * bit widths for flags
+	 */
+	PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+	if (OSSNPrintf((IMG_CHAR *)pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN, "ImpDmaBuf:%s", (IMG_CHAR *)pszName) < 0)
+	{
+		pszAnnotation[0] = '\0';
+	}
+	else
+	{
+		pszAnnotation[DEVMEM_ANNOTATION_MAX_LEN-1] = '\0';
+	}
+
+	eError = PMRCreatePMR(psDevNode,
+			      psHeap,
+			      ui32NumVirtChunks * uiChunkSize,
+			      uiChunkSize,
+			      ui32NumPhysChunks,
+			      ui32NumVirtChunks,
+			      pui32MappingTable,
+			      PAGE_SHIFT,
+			      uiPMRFlags,
+			      pszAnnotation,
+			      &_sPMRDmaBufFuncTab,
+			      psPrivData,
+			      PMR_TYPE_DMABUF,
+			      ppsPMRPtr,
+			      PDUMP_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create PMR (%s)",
+				 __func__, PVRSRVGetErrorString(eError)));
+		goto errFreePhysAddr;
+	}
+
+	return PVRSRV_OK;
+
+errUnmap:
+	dma_buf_unmap_attachment(psAttachment, table, DMA_BIDIRECTIONAL);
+errFreePhysAddr:
+	OSFreeMem(psPrivData->pasDevPhysAddr);
+errFreePrivData:
+	OSFreeMem(psPrivData);
+errReturn:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static PVRSRV_ERROR PhysmemDestroyDmaBuf(PHYS_HEAP *psHeap,
+					 struct dma_buf_attachment *psAttachment)
+{
+	struct dma_buf *psDmaBuf = psAttachment->dmabuf;
+
+	PVR_UNREFERENCED_PARAMETER(psHeap);
+
+	dma_buf_detach(psDmaBuf, psAttachment);
+	dma_buf_put(psDmaBuf);
+
+	return PVRSRV_OK;
+}
+
+struct dma_buf *
+PhysmemGetDmaBuf(PMR *psPMR)
+{
+	PMR_DMA_BUF_DATA *psPrivData;
+
+	psPrivData = PMRGetPrivateData(psPMR, &_sPMRDmaBufFuncTab);
+	if (psPrivData)
+	{
+		return psPrivData->psAttachment->dmabuf;
+	}
+
+	return NULL;
+}
+
+PVRSRV_ERROR
+PhysmemExportDmaBuf(CONNECTION_DATA *psConnection,
+                    PVRSRV_DEVICE_NODE *psDevNode,
+                    PMR *psPMR,
+                    IMG_INT *piFd)
+{
+	struct dma_buf *psDmaBuf;
+	IMG_DEVMEM_SIZE_T uiPMRSize;
+	PVRSRV_ERROR eError;
+	IMG_INT iFd;
+
+	mutex_lock(&g_HashLock);
+
+	PMRRefPMR(psPMR);
+
+	eError = PMR_LogicalSize(psPMR, &uiPMRSize);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_pmr_ref;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+	{
+		DEFINE_DMA_BUF_EXPORT_INFO(sDmaBufExportInfo);
+
+		sDmaBufExportInfo.priv  = psPMR;
+		sDmaBufExportInfo.ops   = &sPVRDmaBufOps;
+		sDmaBufExportInfo.size  = uiPMRSize;
+		sDmaBufExportInfo.flags = O_RDWR;
+
+		psDmaBuf = dma_buf_export(&sDmaBufExportInfo);
+	}
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0))
+	psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps,
+	                          uiPMRSize, O_RDWR, NULL);
+#else
+	psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps,
+	                          uiPMRSize, O_RDWR);
+#endif
+
+	if (IS_ERR_OR_NULL(psDmaBuf))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to export buffer (err=%ld)",
+		         __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_pmr_ref;
+	}
+
+	iFd = dma_buf_fd(psDmaBuf, O_RDWR);
+	if (iFd < 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf fd (err=%d)",
+		         __func__, iFd));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_dma_buf;
+	}
+
+	mutex_unlock(&g_HashLock);
+	*piFd = iFd;
+	return PVRSRV_OK;
+
+fail_dma_buf:
+	dma_buf_put(psDmaBuf);
+
+fail_pmr_ref:
+	mutex_unlock(&g_HashLock);
+	PMRUnrefPMR(psPMR);
+
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection,
+                    PVRSRV_DEVICE_NODE *psDevNode,
+                    IMG_INT fd,
+                    PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                    IMG_UINT32 ui32NameSize,
+                    const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                    PMR **ppsPMRPtr,
+                    IMG_DEVMEM_SIZE_T *puiSize,
+                    IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_UINT32 ui32MappingTable = 0;
+	struct dma_buf *psDmaBuf;
+	PVRSRV_ERROR eError;
+
+	/* Get the buffer handle */
+	psDmaBuf = dma_buf_get(fd);
+	if (IS_ERR_OR_NULL(psDmaBuf))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf from fd (err=%ld)",
+				 __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM));
+		return PVRSRV_ERROR_BAD_MAPPING;
+
+	}
+
+	uiSize = psDmaBuf->size;
+
+	eError = PhysmemImportSparseDmaBuf(psConnection,
+	                                 psDevNode,
+	                                 fd,
+	                                 uiFlags,
+	                                 uiSize,
+	                                 1,
+	                                 1,
+	                                 &ui32MappingTable,
+	                                 ui32NameSize,
+	                                 pszName,
+	                                 ppsPMRPtr,
+	                                 puiSize,
+	                                 puiAlign);
+
+
+	dma_buf_put(psDmaBuf);
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection,
+                          PVRSRV_DEVICE_NODE *psDevNode,
+                          IMG_INT fd,
+                          PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                          IMG_DEVMEM_SIZE_T uiChunkSize,
+                          IMG_UINT32 ui32NumPhysChunks,
+                          IMG_UINT32 ui32NumVirtChunks,
+                          IMG_UINT32 *pui32MappingTable,
+                          IMG_UINT32 ui32NameSize,
+                          const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                          PMR **ppsPMRPtr,
+                          IMG_DEVMEM_SIZE_T *puiSize,
+                          IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+	PMR *psPMR = NULL;
+	struct dma_buf_attachment *psAttachment;
+	struct dma_buf *psDmaBuf;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bHashTableCreated = IMG_FALSE;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	if (!psDevNode)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto errReturn;
+	}
+
+	/* Terminate string from bridge to prevent corrupt annotations in RI */
+	if (pszName != NULL)
+	{
+		IMG_CHAR* pszName0 = (IMG_CHAR*) pszName;
+		pszName0[ui32NameSize-1] = '\0';
+	}
+
+	mutex_lock(&g_HashLock);
+
+	/* Get the buffer handle */
+	psDmaBuf = dma_buf_get(fd);
+	if (IS_ERR_OR_NULL(psDmaBuf))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf from fd (err=%ld)",
+				 __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM));
+		eError = PVRSRV_ERROR_BAD_MAPPING;
+		goto errReturn;
+	}
+
+	if (psDmaBuf->ops == &sPVRDmaBufOps)
+	{
+		PVRSRV_DEVICE_NODE *psPMRDevNode;
+
+		/* We exported this dma_buf, so we can just get its PMR */
+		psPMR = (PMR *) psDmaBuf->priv;
+
+		/* However, we can't import it if it belongs to a different device */
+		psPMRDevNode = PMR_DeviceNode(psPMR);
+		if (psPMRDevNode != psDevNode)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device",
+					 __func__));
+			eError = PVRSRV_ERROR_PMR_NOT_PERMITTED;
+			goto err;
+		}
+	}
+	else
+	{
+		if (g_psDmaBufHash)
+		{
+			/* We have a hash table so check if we've seen this dmabuf before */
+			psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf);
+		}
+		else
+		{
+			/*
+			 * As different processes may import the same dmabuf we need to
+			 * create a hash table so we don't generate a duplicate PMR but
+			 * rather just take a reference on an existing one.
+			 */
+			g_psDmaBufHash = HASH_Create(DMA_BUF_HASH_SIZE);
+			if (!g_psDmaBufHash)
+			{
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto err;
+			}
+			bHashTableCreated = IMG_TRUE;
+		}
+	}
+
+	if (psPMR)
+	{
+		/* Reuse the PMR we already created */
+		PMRRefPMR(psPMR);
+
+		*ppsPMRPtr = psPMR;
+		PMR_LogicalSize(psPMR, puiSize);
+		*puiAlign = PAGE_SIZE;
+	}
+	/* No errors so far */
+	eError = PVRSRV_OK;
+
+err:
+	if (psPMR || (PVRSRV_OK != eError))
+	{
+		mutex_unlock(&g_HashLock);
+		dma_buf_put(psDmaBuf);
+		return eError;
+	}
+
+	/* Do we want this to be a sparse PMR? */
+	if (ui32NumVirtChunks > 1)
+	{
+		IMG_UINT32 i;
+
+		/* Parameter validation */
+		if (psDmaBuf->size != (uiChunkSize * ui32NumPhysChunks) ||
+		    uiChunkSize != PAGE_SIZE ||
+		    ui32NumPhysChunks > ui32NumVirtChunks)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Requesting sparse buffer: "
+					"uiChunkSize ("IMG_DEVMEM_SIZE_FMTSPEC") must be equal to "
+					"OS page size (%lu). uiChunkSize * ui32NumPhysChunks "
+					"("IMG_DEVMEM_SIZE_FMTSPEC") must"
+					" be equal to the buffer size ("IMG_SIZE_FMTSPEC"). "
+					"ui32NumPhysChunks (%u) must be lesser or equal to "
+					"ui32NumVirtChunks (%u)",
+					 __func__,
+					uiChunkSize,
+					PAGE_SIZE,
+					uiChunkSize * ui32NumPhysChunks,
+					psDmaBuf->size,
+					ui32NumPhysChunks,
+					ui32NumVirtChunks));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto errUnlockAndDMAPut;
+		}
+
+		/* Parameter validation - Mapping table entries*/
+		for (i = 0; i < ui32NumPhysChunks; i++)
+		{
+			if (pui32MappingTable[i] > ui32NumVirtChunks)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Requesting sparse buffer: "
+						"Entry in mapping table (%u) is out of allocation "
+						"bounds (%u)",
+						 __func__,
+						 (IMG_UINT32) pui32MappingTable[i],
+						 (IMG_UINT32) ui32NumVirtChunks));
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				goto errUnlockAndDMAPut;
+			}
+		}
+	}
+	else
+	{
+		/* if ui32NumPhysChunks == 0 pui32MappingTable is NULL and because
+		 * is ui32NumPhysChunks is set to 1 below we don't allow NULL array */
+		if (pui32MappingTable == NULL)
+		{
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto errUnlockAndDMAPut;
+		}
+
+		/* Make sure parameters are valid for non-sparse allocations as well */
+		uiChunkSize = psDmaBuf->size;
+		ui32NumPhysChunks = 1;
+		ui32NumVirtChunks = 1;
+	}
+
+
+	psAttachment = dma_buf_attach(psDmaBuf, psDevNode->psDevConfig->pvOSDevice);
+	if (IS_ERR_OR_NULL(psAttachment))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to attach to dma-buf (err=%ld)",
+				 __func__, psAttachment? PTR_ERR(psAttachment) : -ENOMEM));
+		eError = PVRSRV_ERROR_BAD_MAPPING;
+		goto errUnlockAndDMAPut;
+	}
+
+	/*
+	 * Note:
+	 * While we have no way to determine the type of the buffer we just
+	 * assume that all dmabufs are from the same physical heap.
+	 */
+	eError = PhysmemCreateNewDmaBufBackedPMR(psDevNode,
+	                                         psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL],
+	                                         psAttachment,
+	                                         PhysmemDestroyDmaBuf,
+	                                         uiFlags,
+	                                         uiChunkSize,
+	                                         ui32NumPhysChunks,
+	                                         ui32NumVirtChunks,
+	                                         pui32MappingTable,
+	                                         ui32NameSize,
+	                                         pszName,
+	                                         &psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto errDMADetach;
+	}
+
+	/* First time we've seen this dmabuf so store it in the hash table */
+	HASH_Insert(g_psDmaBufHash, (uintptr_t) psDmaBuf, (uintptr_t) psPMR);
+	g_ui32HashRefCount++;
+
+	mutex_unlock(&g_HashLock);
+
+	*ppsPMRPtr = psPMR;
+	*puiSize = ui32NumVirtChunks * uiChunkSize;
+	*puiAlign = PAGE_SIZE;
+
+	return PVRSRV_OK;
+
+errDMADetach:
+	dma_buf_detach(psDmaBuf, psAttachment);
+
+errUnlockAndDMAPut:
+	if (IMG_TRUE == bHashTableCreated)
+	{
+		HASH_Delete(g_psDmaBufHash);
+		g_psDmaBufHash = NULL;
+	}
+	mutex_unlock(&g_HashLock);
+	dma_buf_put(psDmaBuf);
+
+errReturn:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) */
+
+PVRSRV_ERROR
+PhysmemCreateNewDmaBufBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+                                PHYS_HEAP *psHeap,
+                                struct dma_buf_attachment *psAttachment,
+                                PFN_DESTROY_DMABUF_PMR pfnDestroy,
+                                PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                IMG_DEVMEM_SIZE_T uiChunkSize,
+                                IMG_UINT32 ui32NumPhysChunks,
+                                IMG_UINT32 ui32NumVirtChunks,
+                                IMG_UINT32 *pui32MappingTable,
+                                IMG_UINT32 ui32NameSize,
+                                const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                                PMR **ppsPMRPtr)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(psHeap);
+	PVR_UNREFERENCED_PARAMETER(psAttachment);
+	PVR_UNREFERENCED_PARAMETER(pfnDestroy);
+	PVR_UNREFERENCED_PARAMETER(uiFlags);
+	PVR_UNREFERENCED_PARAMETER(uiChunkSize);
+	PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+	PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks);
+	PVR_UNREFERENCED_PARAMETER(pui32MappingTable);
+	PVR_UNREFERENCED_PARAMETER(ui32NameSize);
+	PVR_UNREFERENCED_PARAMETER(pszName);
+	PVR_UNREFERENCED_PARAMETER(ppsPMRPtr);
+
+	return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+struct dma_buf *
+PhysmemGetDmaBuf(PMR *psPMR)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+
+	return NULL;
+}
+
+PVRSRV_ERROR
+PhysmemExportDmaBuf(CONNECTION_DATA *psConnection,
+                    PVRSRV_DEVICE_NODE *psDevNode,
+                    PMR *psPMR,
+                    IMG_INT *piFd)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(piFd);
+
+	return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+PVRSRV_ERROR
+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection,
+                    PVRSRV_DEVICE_NODE *psDevNode,
+                    IMG_INT fd,
+                    PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                    IMG_UINT32 ui32NameSize,
+                    const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                    PMR **ppsPMRPtr,
+                    IMG_DEVMEM_SIZE_T *puiSize,
+                    IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(fd);
+	PVR_UNREFERENCED_PARAMETER(uiFlags);
+	PVR_UNREFERENCED_PARAMETER(ui32NameSize);
+	PVR_UNREFERENCED_PARAMETER(pszName);
+	PVR_UNREFERENCED_PARAMETER(ppsPMRPtr);
+	PVR_UNREFERENCED_PARAMETER(puiSize);
+	PVR_UNREFERENCED_PARAMETER(puiAlign);
+
+	return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+PVRSRV_ERROR
+PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection,
+                          PVRSRV_DEVICE_NODE *psDevNode,
+                          IMG_INT fd,
+                          PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                          IMG_DEVMEM_SIZE_T uiChunkSize,
+                          IMG_UINT32 ui32NumPhysChunks,
+                          IMG_UINT32 ui32NumVirtChunks,
+                          IMG_UINT32 *pui32MappingTable,
+                          IMG_UINT32 ui32NameSize,
+                          const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                          PMR **ppsPMRPtr,
+                          IMG_DEVMEM_SIZE_T *puiSize,
+                          IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(fd);
+	PVR_UNREFERENCED_PARAMETER(uiFlags);
+	PVR_UNREFERENCED_PARAMETER(ppsPMRPtr);
+	PVR_UNREFERENCED_PARAMETER(puiSize);
+	PVR_UNREFERENCED_PARAMETER(puiAlign);
+	PVR_UNREFERENCED_PARAMETER(uiChunkSize);
+	PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+	PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks);
+	PVR_UNREFERENCED_PARAMETER(pui32MappingTable);
+	PVR_UNREFERENCED_PARAMETER(ui32NameSize);
+	PVR_UNREFERENCED_PARAMETER(pszName);
+
+	return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_dmabuf.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_dmabuf.h
new file mode 100644
index 0000000..3398178
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_dmabuf.h
@@ -0,0 +1,114 @@
+/**************************************************************************/ /*!
+@File           physmem_dmabuf.h
+@Title          Header for dmabuf PMR factory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks importing Ion allocations
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(_PHYSMEM_DMABUF_H_)
+#define _PHYSMEM_DMABUF_H_
+
+#include <linux/dma-buf.h>
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "connection_server.h"
+
+#include "pmr.h"
+
+typedef PVRSRV_ERROR (*PFN_DESTROY_DMABUF_PMR)(PHYS_HEAP *psHeap,
+                                               struct dma_buf_attachment *psAttachment);
+
+PVRSRV_ERROR
+PhysmemCreateNewDmaBufBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+                                PHYS_HEAP *psHeap,
+                                struct dma_buf_attachment *psAttachment,
+                                PFN_DESTROY_DMABUF_PMR pfnDestroy,
+                                PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                IMG_DEVMEM_SIZE_T uiChunkSize,
+                                IMG_UINT32 ui32NumPhysChunks,
+                                IMG_UINT32 ui32NumVirtChunks,
+                                IMG_UINT32 *pui32MappingTable,
+		                        IMG_UINT32 ui32NameSize,
+		                        const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                                PMR **ppsPMRPtr);
+
+struct dma_buf *
+PhysmemGetDmaBuf(PMR *psPMR);
+
+PVRSRV_ERROR
+PhysmemExportDmaBuf(CONNECTION_DATA *psConnection,
+                    PVRSRV_DEVICE_NODE *psDevNode,
+                    PMR *psPMR,
+                    IMG_INT *piFd);
+
+PVRSRV_ERROR
+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection,
+                    PVRSRV_DEVICE_NODE *psDevNode,
+                    IMG_INT fd,
+                    PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                    IMG_UINT32 ui32NameSize,
+                    const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                    PMR **ppsPMRPtr,
+                    IMG_DEVMEM_SIZE_T *puiSize,
+                    IMG_DEVMEM_ALIGN_T *puiAlign);
+
+PVRSRV_ERROR
+PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection,
+                          PVRSRV_DEVICE_NODE *psDevNode,
+                          IMG_INT fd,
+                          PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                          IMG_DEVMEM_SIZE_T uiChunkSize,
+                          IMG_UINT32 ui32NumPhysChunks,
+                          IMG_UINT32 ui32NumVirtChunks,
+                          IMG_UINT32 *pui32MappingTable,
+                          IMG_UINT32 ui32NameSize,
+                          const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                          PMR **ppsPMRPtr,
+                          IMG_DEVMEM_SIZE_T *puiSize,
+                          IMG_DEVMEM_ALIGN_T *puiAlign);
+
+#endif /* !defined(_PHYSMEM_DMABUF_H_) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_hostmem.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_hostmem.c
new file mode 100644
index 0000000..178c486
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_hostmem.c
@@ -0,0 +1,145 @@
+/*************************************************************************/ /*!
+@File           physmem_hostmem.c
+@Title          Host memory device node functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Functions relevant to device memory allocations made from host
+                mem device node.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "physmem_hostmem.h"
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "physheap.h"
+#include "pvrsrv_device.h"
+
+static void HostMemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+                                      IMG_UINT32 ui32NumOfAddr,
+                                      IMG_DEV_PHYADDR *psDevPAddr,
+                                      IMG_CPU_PHYADDR *psCpuPAddr);
+
+static void HostMemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+                                      IMG_UINT32 ui32NumOfAddr,
+                                      IMG_CPU_PHYADDR *psCpuPAddr,
+                                      IMG_DEV_PHYADDR *psDevPAddr);
+
+/* heap callbacks for host driver's device's heap */
+static PHYS_HEAP_FUNCTIONS gsHostMemDevPhysHeapFuncs =
+{
+	/* pfnCpuPAddrToDevPAddr */
+	HostMemCpuPAddrToDevPAddr,
+	/* pfnDevPAddrToCpuPAddr */
+	HostMemDevPAddrToCpuPAddr,
+	/* pfnGetRegionId */
+	NULL,
+};
+
+static PVRSRV_DEVICE_CONFIG gsHostMemDevConfig[];
+
+/* heap configuration for host driver's device */
+static PHYS_HEAP_CONFIG gsPhysHeapConfigHostMemDevice[] =
+{
+	{
+		PHYS_HEAP_ID_HOSTMEM,
+		PHYS_HEAP_TYPE_UMA,
+		"SYSMEM",
+		&gsHostMemDevPhysHeapFuncs,
+		NULL,
+		0,
+		IMG_FALSE,
+		(IMG_HANDLE)&gsHostMemDevConfig[0],
+	}
+};
+
+/* device configuration for host driver's device */
+static PVRSRV_DEVICE_CONFIG gsHostMemDevConfig[] =
+{
+	{
+		.pszName = "HostMemDevice",
+		.eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE,
+		.pasPhysHeaps = &gsPhysHeapConfigHostMemDevice[0],
+		.ui32PhysHeapCount = ARRAY_SIZE(gsPhysHeapConfigHostMemDevice),
+		.aui32PhysHeapID = {
+			PHYS_HEAP_ID_HOSTMEM,
+			PHYS_HEAP_ID_HOSTMEM,
+			PHYS_HEAP_ID_HOSTMEM
+		},
+	}
+};
+
+static void HostMemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+                                      IMG_UINT32 ui32NumOfAddr,
+                                      IMG_DEV_PHYADDR *psDevPAddr,
+                                      IMG_CPU_PHYADDR *psCpuPAddr)
+{
+	PVR_UNREFERENCED_PARAMETER(hPrivData);
+	/* Optimise common case */
+	psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr;
+	if (ui32NumOfAddr > 1)
+	{
+		IMG_UINT32 ui32Idx;
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+		{
+			psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr;
+		}
+	}
+}
+
+static void HostMemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+                                      IMG_UINT32 ui32NumOfAddr,
+                                      IMG_CPU_PHYADDR *psCpuPAddr,
+                                      IMG_DEV_PHYADDR *psDevPAddr)
+{
+	PVR_UNREFERENCED_PARAMETER(hPrivData);
+	/* Optimise common case */
+	psCpuPAddr[0].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[0].uiAddr);
+	if (ui32NumOfAddr > 1)
+	{
+		IMG_UINT32 ui32Idx;
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+		{
+			psCpuPAddr[ui32Idx].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[ui32Idx].uiAddr);
+		}
+	}
+}
+
+PVRSRV_DEVICE_CONFIG* HostMemGetDeviceConfig(void)
+{
+	return &gsHostMemDevConfig[0];
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_hostmem.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_hostmem.h
new file mode 100644
index 0000000..883ca2a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_hostmem.h
@@ -0,0 +1,54 @@
+/*************************************************************************/ /*!
+@File           physmem_hostmem.h
+@Title          Host memory device node header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__PHYSMEM_HOSTMEM_H__)
+#define __PHYSMEM_HOSTMEM_H__
+
+#include "pvrsrv_device.h"
+
+/*! Heap ID of the host driver's device heap */
+#define PHYS_HEAP_ID_HOSTMEM (~((IMG_UINT32)0))
+
+PVRSRV_DEVICE_CONFIG* HostMemGetDeviceConfig(void);
+
+#endif /* !defined (__PHYSMEM_HOSTMEM_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_lma.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_lma.c
new file mode 100644
index 0000000..f129fd2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_lma.c
@@ -0,0 +1,1691 @@
+/*************************************************************************/ /*!
+@File           physmem_lma.c
+@Title          Local card memory allocator
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks for local card memory.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "rgx_pdump_panics.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "devicemem_server_utils.h"
+#include "physmem_lma.h"
+#include "pdump_km.h"
+#include "pmr.h"
+#include "pmr_impl.h"
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "rgxutils.h"
+#endif
+
+/* Since 0x0 is a valid DevPAddr, we rely on max 64-bit value to be an invalid
+ * page address */
+#define INVALID_PAGE_ADDR ~((IMG_UINT64)0x0)
+
+typedef struct _PMR_LMALLOCARRAY_DATA_ {
+	PVRSRV_DEVICE_NODE *psDevNode;
+	IMG_PID uiPid;
+    IMG_INT32 iNumPagesAllocated;
+    /*
+     * uiTotalNumPages:
+     * Total number of pages supported by this PMR. (Fixed as of now due the fixed Page table array size)
+     */
+    IMG_UINT32 uiTotalNumPages;
+    IMG_UINT32 uiPagesToAlloc;
+
+	IMG_UINT32 uiLog2AllocSize;
+	IMG_UINT32 uiContigAllocSize;
+	IMG_DEV_PHYADDR *pasDevPAddr;
+
+	IMG_BOOL bZeroOnAlloc;
+	IMG_BOOL bPoisonOnAlloc;
+	IMG_BOOL bFwLocalAlloc;
+	IMG_BOOL bFwConfigAlloc;
+	IMG_BOOL bFwGuestAlloc;
+
+	IMG_BOOL bOnDemand;
+
+	/*
+	  record at alloc time whether poisoning will be required when the
+	  PMR is freed.
+	*/
+	IMG_BOOL bPoisonOnFree;
+
+	/* Physical heap and arena pointers for this allocation */
+	PHYS_HEAP* psPhysHeap;
+	RA_ARENA* psArena;
+	PVRSRV_MEMALLOCFLAGS_T uiAllocFlags;
+
+} PMR_LMALLOCARRAY_DATA;
+
+static PVRSRV_ERROR _MapAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+							  IMG_DEV_PHYADDR *psDevPAddr,
+							  size_t uiSize,
+							  IMG_BOOL bFwLocalAlloc,
+							  PMR_FLAGS_T ulFlags,
+							  void **pvPtr)
+{
+	IMG_UINT32 ui32CPUCacheFlags;
+	IMG_CPU_PHYADDR sCpuPAddr;
+	PHYS_HEAP *psPhysHeap;
+	PVRSRV_ERROR eError;
+
+	eError = DevmemCPUCacheMode(psDevNode, ulFlags, &ui32CPUCacheFlags);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	if (bFwLocalAlloc)
+	{
+		psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+	}
+	else
+	{
+		psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+	}
+
+	PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPAddr, psDevPAddr);
+
+	*pvPtr = OSMapPhysToLin(sCpuPAddr, uiSize, ui32CPUCacheFlags);
+	if (*pvPtr == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	else
+	{
+		return PVRSRV_OK;
+	}
+}
+
+static void _UnMapAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+						size_t uiSize,
+						IMG_BOOL bFwLocalAlloc,
+						PMR_FLAGS_T ulFlags,
+						void *pvPtr)
+{
+	OSUnMapPhysToLin(pvPtr, uiSize, PVRSRV_CPU_CACHE_MODE(ulFlags));
+}
+
+static PVRSRV_ERROR
+_PoisonAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+			 IMG_DEV_PHYADDR *psDevPAddr,
+			 IMG_BOOL bFwLocalAlloc,
+			 IMG_UINT32 uiContigAllocSize,
+			 IMG_BYTE ui8PoisonValue)
+{
+	PVRSRV_ERROR eError;
+	void *pvKernLin = NULL;
+
+	eError = _MapAlloc(psDevNode,
+					   psDevPAddr,
+					   uiContigAllocSize,
+					   bFwLocalAlloc,
+					   PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+					   &pvKernLin);
+	if (eError != PVRSRV_OK)
+	{
+		goto map_failed;
+	}
+
+	OSDeviceMemSet(pvKernLin, ui8PoisonValue, uiContigAllocSize);
+
+	_UnMapAlloc(psDevNode, uiContigAllocSize, bFwLocalAlloc, 0,pvKernLin);
+
+	return PVRSRV_OK;
+
+map_failed:
+	PVR_DPF((PVR_DBG_ERROR, "Failed to poison allocation"));
+	return eError;
+}
+
+static PVRSRV_ERROR
+_ZeroAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+		   IMG_DEV_PHYADDR *psDevPAddr,
+		   IMG_BOOL bFwLocalAlloc,
+		   IMG_UINT32 uiContigAllocSize)
+{
+	void *pvKernLin = NULL;
+	PVRSRV_ERROR eError;
+
+	eError = _MapAlloc(psDevNode,
+					   psDevPAddr,
+					   uiContigAllocSize,
+					   bFwLocalAlloc,
+					   PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+					   &pvKernLin);
+	if (eError != PVRSRV_OK)
+	{
+		goto map_failed;
+	}
+
+	OSDeviceMemSet(pvKernLin, 0, uiContigAllocSize);
+
+	_UnMapAlloc(psDevNode, uiContigAllocSize, bFwLocalAlloc, 0, pvKernLin);
+
+	return PVRSRV_OK;
+
+map_failed:
+	PVR_DPF((PVR_DBG_ERROR, "Failed to zero allocation"));
+	return eError;
+}
+
+static PVRSRV_ERROR
+_AllocLMPageArray(PVRSRV_DEVICE_NODE *psDevNode,
+			  PMR_SIZE_T uiSize,
+			  PMR_SIZE_T uiChunkSize,
+			  IMG_UINT32 ui32NumPhysChunks,
+			  IMG_UINT32 ui32NumVirtChunks,
+			  IMG_UINT32 *pabMappingTable,
+			  IMG_UINT32 uiLog2AllocPageSize,
+			  IMG_BOOL bZero,
+			  IMG_BOOL bPoisonOnAlloc,
+			  IMG_BOOL bPoisonOnFree,
+			  IMG_BOOL bContig,
+			  IMG_BOOL bOnDemand,
+			  IMG_BOOL bFwLocalAlloc,
+			  IMG_BOOL bFwConfigAlloc,
+			  IMG_BOOL bFwGuestAlloc,
+			  PHYS_HEAP* psPhysHeap,
+			  PVRSRV_MEMALLOCFLAGS_T uiAllocFlags,
+			  IMG_PID uiPid,
+			  PMR_LMALLOCARRAY_DATA **ppsPageArrayDataPtr
+			  )
+{
+	PMR_LMALLOCARRAY_DATA *psPageArrayData = NULL;
+	IMG_UINT32 ui32Index;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(!bZero || !bPoisonOnAlloc);
+	PVR_ASSERT(OSGetPageShift() <= uiLog2AllocPageSize);
+
+	psPageArrayData = OSAllocZMem(sizeof(PMR_LMALLOCARRAY_DATA));
+	if (psPageArrayData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto errorOnAllocArray;
+	}
+
+	if (bContig)
+	{
+		/*
+			Some allocations require kernel mappings in which case in order
+			to be virtually contiguous we also have to be physically contiguous.
+		*/
+		psPageArrayData->uiTotalNumPages = 1;
+		psPageArrayData->uiPagesToAlloc = psPageArrayData->uiTotalNumPages;
+		psPageArrayData->uiContigAllocSize = TRUNCATE_64BITS_TO_32BITS(uiSize);
+		psPageArrayData->uiLog2AllocSize = uiLog2AllocPageSize;
+	}
+	else
+	{
+		IMG_UINT32 uiNumPages;
+
+		/* Use of cast below is justified by the assertion that follows to
+		prove that no significant bits have been truncated */
+		uiNumPages = (IMG_UINT32) ( ((uiSize - 1) >> uiLog2AllocPageSize) + 1);
+		PVR_ASSERT( ((PMR_SIZE_T) uiNumPages << uiLog2AllocPageSize) == uiSize);
+
+		psPageArrayData->uiTotalNumPages = uiNumPages;
+
+		if ((ui32NumVirtChunks != ui32NumPhysChunks) || (1 < ui32NumVirtChunks))
+		{
+			psPageArrayData->uiPagesToAlloc = ui32NumPhysChunks;
+		}
+		else
+		{
+			psPageArrayData->uiPagesToAlloc = uiNumPages;
+		}
+		psPageArrayData->uiContigAllocSize = 1 << uiLog2AllocPageSize;
+		psPageArrayData->uiLog2AllocSize = uiLog2AllocPageSize;
+	}
+	psPageArrayData->psDevNode = psDevNode;
+	psPageArrayData->uiPid = uiPid;
+	psPageArrayData->pasDevPAddr = OSAllocMem(sizeof(IMG_DEV_PHYADDR) *
+												psPageArrayData->uiTotalNumPages);
+	if (psPageArrayData->pasDevPAddr == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto errorOnAllocAddr;
+	}
+
+	/* Since no pages are allocated yet, initialise page addresses to INVALID_PAGE_ADDR */
+	for (ui32Index = 0; ui32Index < psPageArrayData->uiTotalNumPages; ui32Index++)
+	{
+		psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR;
+	}
+
+	psPageArrayData->iNumPagesAllocated = 0;
+	psPageArrayData->bZeroOnAlloc = bZero;
+	psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc;
+	psPageArrayData->bPoisonOnFree = bPoisonOnFree;
+	psPageArrayData->bOnDemand = bOnDemand;
+	psPageArrayData->bFwLocalAlloc = bFwLocalAlloc;
+	psPageArrayData->bFwConfigAlloc = bFwConfigAlloc;
+	psPageArrayData->psPhysHeap = psPhysHeap;
+	psPageArrayData->uiAllocFlags = uiAllocFlags;
+	psPageArrayData->bFwGuestAlloc = bFwGuestAlloc;
+
+	*ppsPageArrayDataPtr = psPageArrayData;
+
+	return PVRSRV_OK;
+
+	/*
+	  error exit paths follow:
+	*/
+
+errorOnAllocAddr:
+	OSFreeMem(psPageArrayData);
+
+errorOnAllocArray:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+static PVRSRV_ERROR
+_AllocLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, IMG_UINT32 *pui32MapTable)
+{
+	PVRSRV_ERROR eError;
+	RA_BASE_T uiCardAddr;
+	RA_LENGTH_T uiActualSize;
+	IMG_UINT32 i,ui32Index=0;
+	IMG_UINT32 uiContigAllocSize;
+	IMG_UINT32 uiLog2AllocSize;
+	IMG_UINT32 uiRegionId;
+	PVRSRV_DEVICE_NODE *psDevNode;
+	IMG_BOOL bPoisonOnAlloc;
+	IMG_BOOL bZeroOnAlloc;
+	RA_ARENA *pArena;
+
+	PVR_ASSERT(NULL != psPageArrayData);
+	PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
+
+	uiContigAllocSize = psPageArrayData->uiContigAllocSize;
+	uiLog2AllocSize = psPageArrayData->uiLog2AllocSize;
+	psDevNode = psPageArrayData->psDevNode;
+	bPoisonOnAlloc = psPageArrayData->bPoisonOnAlloc;
+	bZeroOnAlloc = psPageArrayData->bZeroOnAlloc;
+
+	if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE) && psPageArrayData->bFwLocalAlloc)
+	{
+		if (! psPageArrayData->bFwGuestAlloc)
+		{
+			pArena = psPageArrayData->bFwConfigAlloc ?
+									psDevNode->psKernelFwConfigMemArena[0] :
+									psDevNode->psKernelFwMainMemArena[0];
+		}
+		else
+		{
+			PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+			PVR_ASSERT(PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST));
+			PVR_ASSERT(psDevNode->uiKernelFwRAIdx && psDevNode->uiKernelFwRAIdx < RGXFW_NUM_OS);
+
+			SysVzGetPhysHeapOrigin(psDevNode->psDevConfig,
+								   PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL,
+								   &eHeapOrigin);
+
+			if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST)
+			{
+				pArena = psDevNode->psKernelFwRawMemArena[psDevNode->uiKernelFwRAIdx];
+			}
+			else
+			{
+				pArena = psPageArrayData->bFwConfigAlloc ?
+							psDevNode->psKernelFwConfigMemArena[psDevNode->uiKernelFwRAIdx] :
+							psDevNode->psKernelFwMainMemArena[psDevNode->uiKernelFwRAIdx];
+			}
+
+			psDevNode->uiKernelFwRAIdx = 0;
+			PVR_ASSERT(pArena != NULL);
+		}
+	}
+	else
+	{
+		/* Get suitable local memory region for this allocation */
+		uiRegionId = PhysHeapGetRegionId(psPageArrayData->psPhysHeap,
+		                                 psPageArrayData->uiAllocFlags);
+
+		PVR_ASSERT(uiRegionId < psDevNode->ui32NumOfLocalMemArenas);
+		pArena = psDevNode->apsLocalDevMemArenas[uiRegionId];
+	}
+
+	if (psPageArrayData->uiTotalNumPages <
+			(psPageArrayData->iNumPagesAllocated + psPageArrayData->uiPagesToAlloc))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Pages requested to allocate don't fit PMR alloc Size. "
+				"Allocated: %u + Requested: %u > Total Allowed: %u",
+				psPageArrayData->iNumPagesAllocated,
+				psPageArrayData->uiPagesToAlloc,
+				psPageArrayData->uiTotalNumPages));
+		eError = PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
+		return eError;
+	}
+
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	{
+		IMG_UINT32  ui32OSid=0, ui32OSidReg=0;
+		IMG_BOOL    bOSidAxiProt;
+		IMG_PID     pId;
+
+		pId=OSGetCurrentClientProcessIDKM();
+		RetrieveOSidsfromPidList(pId, &ui32OSid, &ui32OSidReg, &bOSidAxiProt);
+
+		pArena=psDevNode->psOSidSubArena[ui32OSid];
+		PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Giving from OS slot %d",ui32OSid));
+	}
+#endif
+
+	psPageArrayData->psArena = pArena;
+
+	for (i = 0; i < psPageArrayData->uiPagesToAlloc; i++)
+	{
+
+		/* This part of index finding should happen before allocating the page.
+		 * Just avoiding intricate paths */
+		if (psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc)
+		{
+			ui32Index = i;
+		}
+		else
+		{
+			if (NULL == pui32MapTable)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"Mapping table cannot be null"));
+				eError = PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY;
+				goto errorOnRAAlloc;
+			}
+
+			ui32Index = pui32MapTable[i];
+			if (ui32Index >= psPageArrayData->uiTotalNumPages)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Page alloc request Index out of bounds for PMR @0x%p",
+						__func__,
+						psPageArrayData));
+				eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+				goto errorOnRAAlloc;
+			}
+
+			if (INVALID_PAGE_ADDR != psPageArrayData->pasDevPAddr[ui32Index].uiAddr)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"Mapping already exists"));
+				eError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS;
+				goto errorOnRAAlloc;
+			}
+		}
+
+		eError = RA_Alloc(pArena,
+		                  uiContigAllocSize,
+		                  RA_NO_IMPORT_MULTIPLIER,
+		                  0,                       /* No flags */
+		                  1ULL << uiLog2AllocSize,
+		                  "LMA_Page_Alloc",
+		                  &uiCardAddr,
+		                  &uiActualSize,
+		                  NULL);                   /* No private handle */
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"Failed to Allocate the page @index:%d",
+					ui32Index));
+			eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+			goto errorOnRAAlloc;
+		}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+		PVR_DPF((PVR_DBG_MESSAGE,
+				"(GPU Virtualization Validation): Address: %llu",
+				uiCardAddr));
+}
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+		/* Allocation is done a page at a time */
+		PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiActualSize, psPageArrayData->uiPid);
+#else
+		{
+			IMG_CPU_PHYADDR sLocalCpuPAddr;
+
+			sLocalCpuPAddr.uiAddr = (IMG_UINT64)uiCardAddr;
+			PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+									 NULL,
+									 sLocalCpuPAddr,
+									 uiActualSize,
+									 NULL,
+									 psPageArrayData->uiPid);
+		}
+#endif
+#endif
+
+		psPageArrayData->pasDevPAddr[ui32Index].uiAddr = uiCardAddr;
+		if (bPoisonOnAlloc)
+		{
+			eError = _PoisonAlloc(psDevNode,
+								  &psPageArrayData->pasDevPAddr[ui32Index],
+								  psPageArrayData->bFwLocalAlloc,
+								  uiContigAllocSize,
+								  PVRSRV_POISON_ON_ALLOC_VALUE);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"Failed to poison the page"));
+				goto errorOnPoison;
+			}
+		}
+
+		if (bZeroOnAlloc)
+		{
+			eError = _ZeroAlloc(psDevNode,
+								&psPageArrayData->pasDevPAddr[ui32Index],
+								psPageArrayData->bFwLocalAlloc,
+								uiContigAllocSize);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"Failed to zero the page"));
+				goto errorOnZero;
+			}
+		}
+	}
+	psPageArrayData->iNumPagesAllocated += psPageArrayData->uiPagesToAlloc;
+
+	return PVRSRV_OK;
+
+	/*
+	  error exit paths follow:
+	*/
+errorOnZero:
+errorOnPoison:
+	eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+errorOnRAAlloc:
+	PVR_DPF((PVR_DBG_ERROR,
+			"%s: alloc_pages failed to honour request %d @index: %d of %d pages: (%s)",
+			__func__,
+			ui32Index,
+			i,
+			psPageArrayData->uiPagesToAlloc,
+			PVRSRVGetErrorString(eError)));
+	while (--i < psPageArrayData->uiPagesToAlloc)
+	{
+		if (psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc)
+		{
+			ui32Index = i;
+		}
+		else
+		{
+			if (NULL == pui32MapTable)
+			{
+				break;
+			}
+
+			ui32Index = pui32MapTable[i];
+		}
+
+		if (ui32Index < psPageArrayData->uiTotalNumPages)
+		{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			/* Allocation is done a page at a time */
+			PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+			                            uiContigAllocSize,
+			                            psPageArrayData->uiPid);
+#else
+			{
+				PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+				                                psPageArrayData->pasDevPAddr[ui32Index].uiAddr,
+				                                psPageArrayData->uiPid);
+			}
+#endif
+#endif
+			RA_Free(pArena, psPageArrayData->pasDevPAddr[ui32Index].uiAddr);
+			psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR;
+		}
+	}
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static PVRSRV_ERROR
+_FreeLMPageArray(PMR_LMALLOCARRAY_DATA *psPageArrayData)
+{
+	OSFreeMem(psPageArrayData->pasDevPAddr);
+
+	PVR_DPF((PVR_DBG_MESSAGE,
+			"physmem_lma.c: freed local memory array structure for PMR @0x%p",
+			psPageArrayData));
+
+	OSFreeMem(psPageArrayData);
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_FreeLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData,
+             IMG_UINT32 *pui32FreeIndices,
+             IMG_UINT32 ui32FreePageCount)
+{
+	IMG_UINT32 uiContigAllocSize;
+	IMG_UINT32 i, ui32PagesToFree=0, ui32PagesFreed=0, ui32Index=0;
+	RA_ARENA *pArena = psPageArrayData->psArena;
+
+	PVR_ASSERT(psPageArrayData->iNumPagesAllocated != 0);
+
+	uiContigAllocSize = psPageArrayData->uiContigAllocSize;
+
+	ui32PagesToFree = (NULL == pui32FreeIndices) ?
+			psPageArrayData->uiTotalNumPages : ui32FreePageCount;
+
+	for (i = 0; i < ui32PagesToFree; i++)
+	{
+		if (NULL == pui32FreeIndices)
+		{
+			ui32Index = i;
+		}
+		else
+		{
+			ui32Index = pui32FreeIndices[i];
+		}
+
+		if (INVALID_PAGE_ADDR != psPageArrayData->pasDevPAddr[ui32Index].uiAddr)
+		{
+			ui32PagesFreed++;
+			if (psPageArrayData->bPoisonOnFree)
+			{
+				_PoisonAlloc(psPageArrayData->psDevNode,
+							 &psPageArrayData->pasDevPAddr[ui32Index],
+							 psPageArrayData->bFwLocalAlloc,
+							 uiContigAllocSize,
+							 PVRSRV_POISON_ON_FREE_VALUE);
+			}
+
+			RA_Free(pArena,	psPageArrayData->pasDevPAddr[ui32Index].uiAddr);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			/* Allocation is done a page at a time */
+			PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+			                            uiContigAllocSize,
+			                            psPageArrayData->uiPid);
+#else
+			{
+				PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+				                                psPageArrayData->pasDevPAddr[ui32Index].uiAddr,
+				                                psPageArrayData->uiPid);
+			}
+#endif
+#endif
+			psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR;
+		}
+	}
+	psPageArrayData->iNumPagesAllocated -= ui32PagesFreed;
+
+	PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
+
+	PVR_DPF((PVR_DBG_MESSAGE,
+			"%s: freed %d local memory for PMR @0x%p",
+			__func__,
+			(ui32PagesFreed * uiContigAllocSize),
+			psPageArrayData));
+
+	return PVRSRV_OK;
+}
+
+/*
+ *
+ * Implementation of callback functions
+ *
+ */
+
+/* destructor func is called after last reference disappears, but
+   before PMR itself is freed. */
+static PVRSRV_ERROR
+PMRFinalizeLocalMem(PMR_IMPL_PRIVDATA pvPriv
+				 )
+{
+	PVRSRV_ERROR eError;
+	PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+
+	psLMAllocArrayData = pvPriv;
+
+	/* We can't free pages until now. */
+	if (psLMAllocArrayData->iNumPagesAllocated != 0)
+	{
+		eError = _FreeLMPages(psLMAllocArrayData, NULL, 0);
+		PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+	}
+
+	eError = _FreeLMPageArray(psLMAllocArrayData);
+	PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+
+	return PVRSRV_OK;
+}
+
+/* callback function for locking the system physical page addresses.
+   As we are LMA there is nothing to do as we control physical memory. */
+static PVRSRV_ERROR
+PMRLockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+
+	PVRSRV_ERROR eError;
+	PMR_LMALLOCARRAY_DATA *psLMAllocArrayData;
+
+	psLMAllocArrayData = pvPriv;
+
+	if (psLMAllocArrayData->bOnDemand)
+	{
+		/* Allocate Memory for deferred allocation */
+		eError = _AllocLMPages(psLMAllocArrayData, NULL);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	return PVRSRV_OK;
+
+}
+
+static PVRSRV_ERROR
+PMRUnlockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv
+							   )
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PMR_LMALLOCARRAY_DATA *psLMAllocArrayData;
+
+	psLMAllocArrayData = pvPriv;
+
+	if (psLMAllocArrayData->bOnDemand)
+	{
+		/* Free Memory for deferred allocation */
+		eError = _FreeLMPages(psLMAllocArrayData, NULL, 0);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	PVR_ASSERT(eError == PVRSRV_OK);
+	return eError;
+}
+
+/* N.B.  It is assumed that PMRLockSysPhysAddressesLocalMem() is called _before_ this function! */
+static PVRSRV_ERROR
+PMRSysPhysAddrLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+					   IMG_UINT32 ui32Log2PageSize,
+					   IMG_UINT32 ui32NumOfPages,
+					   IMG_DEVMEM_OFFSET_T *puiOffset,
+					   IMG_BOOL *pbValid,
+					   IMG_DEV_PHYADDR *psDevPAddr)
+{
+	IMG_UINT32 idx;
+	IMG_UINT32 uiLog2AllocSize;
+	IMG_UINT32 uiNumAllocs;
+	IMG_UINT64 uiAllocIndex;
+	IMG_DEVMEM_OFFSET_T uiInAllocOffset;
+	PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv;
+
+	if (psLMAllocArrayData->uiLog2AllocSize < ui32Log2PageSize)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Requested physical addresses from PMR "
+		         "for incompatible contiguity %u!",
+		         __func__,
+		         ui32Log2PageSize));
+		return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+	}
+
+	uiNumAllocs = psLMAllocArrayData->uiTotalNumPages;
+	if (uiNumAllocs > 1)
+	{
+		PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0);
+		uiLog2AllocSize = psLMAllocArrayData->uiLog2AllocSize;
+
+		for (idx=0; idx < ui32NumOfPages; idx++)
+		{
+			if (pbValid[idx])
+			{
+				uiAllocIndex = puiOffset[idx] >> uiLog2AllocSize;
+				uiInAllocOffset = puiOffset[idx] - (uiAllocIndex << uiLog2AllocSize);
+
+				PVR_ASSERT(uiAllocIndex < uiNumAllocs);
+				PVR_ASSERT(uiInAllocOffset < (1ULL << uiLog2AllocSize));
+
+				psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[uiAllocIndex].uiAddr + uiInAllocOffset;
+			}
+		}
+	}
+	else
+	{
+		for (idx=0; idx < ui32NumOfPages; idx++)
+		{
+			if (pbValid[idx])
+			{
+				psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[0].uiAddr + puiOffset[idx];
+			}
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+								 size_t uiOffset,
+								 size_t uiSize,
+								 void **ppvKernelAddressOut,
+								 IMG_HANDLE *phHandleOut,
+								 PMR_FLAGS_T ulFlags)
+{
+	PVRSRV_ERROR eError;
+	PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+	void *pvKernLinAddr = NULL;
+	IMG_UINT32 ui32PageIndex = 0;
+	size_t uiOffsetMask = uiOffset;
+
+	psLMAllocArrayData = pvPriv;
+
+	/* Check that we can map this in contiguously */
+	if (psLMAllocArrayData->uiTotalNumPages != 1)
+	{
+		size_t uiStart = uiOffset;
+		size_t uiEnd = uiOffset + uiSize - 1;
+		size_t uiPageMask = ~((1 << psLMAllocArrayData->uiLog2AllocSize) - 1);
+
+		/* We can still map if only one page is required */
+		if ((uiStart & uiPageMask) != (uiEnd & uiPageMask))
+		{
+			eError = PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+			goto e0;
+		}
+
+		/* Locate the desired physical page to map in */
+		ui32PageIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize;
+		uiOffsetMask = (1U << psLMAllocArrayData->uiLog2AllocSize) - 1;
+	}
+
+	PVR_ASSERT(ui32PageIndex < psLMAllocArrayData->uiTotalNumPages);
+
+	eError = _MapAlloc(psLMAllocArrayData->psDevNode,
+						&psLMAllocArrayData->pasDevPAddr[ui32PageIndex],
+						psLMAllocArrayData->uiContigAllocSize,
+						psLMAllocArrayData->bFwLocalAlloc,
+						ulFlags,
+						&pvKernLinAddr);
+
+	*ppvKernelAddressOut = ((IMG_CHAR *) pvKernLinAddr) + (uiOffset & uiOffsetMask);
+	*phHandleOut = pvKernLinAddr;
+
+	return eError;
+
+	/*
+	  error exit paths follow
+	*/
+
+ e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static void PMRReleaseKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+												 IMG_HANDLE hHandle)
+{
+	PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+	void *pvKernLinAddr = NULL;
+
+	psLMAllocArrayData = (PMR_LMALLOCARRAY_DATA *) pvPriv;
+	pvKernLinAddr = (void *) hHandle;
+
+	_UnMapAlloc(psLMAllocArrayData->psDevNode,
+				psLMAllocArrayData->uiContigAllocSize,
+				psLMAllocArrayData->bFwLocalAlloc,
+				0,
+				pvKernLinAddr);
+}
+
+
+static PVRSRV_ERROR
+CopyBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+				  IMG_DEVMEM_OFFSET_T uiOffset,
+				  IMG_UINT8 *pcBuffer,
+				  size_t uiBufSz,
+				  size_t *puiNumBytes,
+				  void (*pfnCopyBytes)(IMG_UINT8 *pcBuffer,
+									   IMG_UINT8 *pcPMR,
+									   size_t uiSize))
+{
+	PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+	size_t uiBytesCopied;
+	size_t uiBytesToCopy;
+	size_t uiBytesCopyableFromAlloc;
+	void *pvMapping = NULL;
+	IMG_UINT8 *pcKernelPointer = NULL;
+	size_t uiBufferOffset;
+	IMG_UINT64 uiAllocIndex;
+	IMG_DEVMEM_OFFSET_T uiInAllocOffset;
+	PVRSRV_ERROR eError;
+
+	psLMAllocArrayData = pvPriv;
+
+	uiBytesCopied = 0;
+	uiBytesToCopy = uiBufSz;
+	uiBufferOffset = 0;
+
+	if (psLMAllocArrayData->uiTotalNumPages > 1)
+	{
+		while (uiBytesToCopy > 0)
+		{
+			/* we have to map one alloc in at a time */
+			PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0);
+			uiAllocIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize;
+			uiInAllocOffset = uiOffset - (uiAllocIndex << psLMAllocArrayData->uiLog2AllocSize);
+			uiBytesCopyableFromAlloc = uiBytesToCopy;
+			if (uiBytesCopyableFromAlloc + uiInAllocOffset > (1ULL << psLMAllocArrayData->uiLog2AllocSize))
+			{
+				uiBytesCopyableFromAlloc = TRUNCATE_64BITS_TO_SIZE_T((1ULL << psLMAllocArrayData->uiLog2AllocSize)-uiInAllocOffset);
+			}
+
+			PVR_ASSERT(uiBytesCopyableFromAlloc != 0);
+			PVR_ASSERT(uiAllocIndex < psLMAllocArrayData->uiTotalNumPages);
+			PVR_ASSERT(uiInAllocOffset < (1ULL << psLMAllocArrayData->uiLog2AllocSize));
+
+			eError = _MapAlloc(psLMAllocArrayData->psDevNode,
+								&psLMAllocArrayData->pasDevPAddr[uiAllocIndex],
+								psLMAllocArrayData->uiContigAllocSize,
+								psLMAllocArrayData->bFwLocalAlloc,
+								PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+								&pvMapping);
+			if (eError != PVRSRV_OK)
+			{
+				goto e0;
+			}
+			pcKernelPointer = pvMapping;
+			pfnCopyBytes(&pcBuffer[uiBufferOffset], &pcKernelPointer[uiInAllocOffset], uiBytesCopyableFromAlloc);
+
+			_UnMapAlloc(psLMAllocArrayData->psDevNode,
+						psLMAllocArrayData->uiContigAllocSize,
+						psLMAllocArrayData->bFwLocalAlloc,
+						0,
+						pvMapping);
+
+			uiBufferOffset += uiBytesCopyableFromAlloc;
+			uiBytesToCopy -= uiBytesCopyableFromAlloc;
+			uiOffset += uiBytesCopyableFromAlloc;
+			uiBytesCopied += uiBytesCopyableFromAlloc;
+		}
+	}
+	else
+	{
+			PVR_ASSERT((uiOffset + uiBufSz) <= psLMAllocArrayData->uiContigAllocSize);
+			PVR_ASSERT(psLMAllocArrayData->uiContigAllocSize != 0);
+			eError = _MapAlloc(psLMAllocArrayData->psDevNode,
+								&psLMAllocArrayData->pasDevPAddr[0],
+								psLMAllocArrayData->uiContigAllocSize,
+								psLMAllocArrayData->bFwLocalAlloc,
+								PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+								&pvMapping);
+			if (eError != PVRSRV_OK)
+			{
+				goto e0;
+			}
+			pcKernelPointer = pvMapping;
+			pfnCopyBytes(pcBuffer, &pcKernelPointer[uiOffset], uiBufSz);
+
+			_UnMapAlloc(psLMAllocArrayData->psDevNode,
+						psLMAllocArrayData->uiContigAllocSize,
+						psLMAllocArrayData->bFwLocalAlloc,
+						0,
+						pvMapping);
+
+			uiBytesCopied = uiBufSz;
+	}
+	*puiNumBytes = uiBytesCopied;
+	return PVRSRV_OK;
+e0:
+	*puiNumBytes = uiBytesCopied;
+	return eError;
+}
+
+static void ReadLocalMem(IMG_UINT8 *pcBuffer,
+						 IMG_UINT8 *pcPMR,
+						 size_t uiSize)
+{
+	/* NOTE: 'CachedMemCopy' means the operating system default memcpy, which
+	 *       we *assume* in the LMA code will be faster, and doesn't need to
+	 *       worry about ARM64.
+	 */
+	OSCachedMemCopy(pcBuffer, pcPMR, uiSize);
+}
+
+static PVRSRV_ERROR
+PMRReadBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+				  IMG_DEVMEM_OFFSET_T uiOffset,
+				  IMG_UINT8 *pcBuffer,
+				  size_t uiBufSz,
+				  size_t *puiNumBytes)
+{
+	return CopyBytesLocalMem(pvPriv,
+							 uiOffset,
+							 pcBuffer,
+							 uiBufSz,
+							 puiNumBytes,
+							 ReadLocalMem);
+}
+
+static void WriteLocalMem(IMG_UINT8 *pcBuffer,
+						  IMG_UINT8 *pcPMR,
+						  size_t uiSize)
+{
+	/* NOTE: 'CachedMemCopy' means the operating system default memcpy, which
+	 *       we *assume* in the LMA code will be faster, and doesn't need to
+	 *       worry about ARM64.
+	 */
+	OSCachedMemCopy(pcPMR, pcBuffer, uiSize);
+}
+
+static PVRSRV_ERROR
+PMRWriteBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+					  IMG_DEVMEM_OFFSET_T uiOffset,
+					  IMG_UINT8 *pcBuffer,
+					  size_t uiBufSz,
+					  size_t *puiNumBytes)
+{
+	return CopyBytesLocalMem(pvPriv,
+							 uiOffset,
+							 pcBuffer,
+							 uiBufSz,
+							 puiNumBytes,
+							 WriteLocalMem);
+}
+
+/*************************************************************************/ /*!
+@Function       PMRChangeSparseMemLocalMem
+@Description    This function Changes the sparse mapping by allocating & freeing
+				of pages. It does also change the GPU maps accordingly
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+PMRChangeSparseMemLocalMem(PMR_IMPL_PRIVDATA pPriv,
+                           const PMR *psPMR,
+                           IMG_UINT32 ui32AllocPageCount,
+                           IMG_UINT32 *pai32AllocIndices,
+                           IMG_UINT32 ui32FreePageCount,
+                           IMG_UINT32 *pai32FreeIndices,
+                           IMG_UINT32 uiFlags)
+{
+	PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+	IMG_UINT32 ui32AdtnlAllocPages = 0;
+	IMG_UINT32 ui32AdtnlFreePages = 0;
+	IMG_UINT32 ui32CommonRequstCount = 0;
+	IMG_UINT32 ui32Loop = 0;
+	IMG_UINT32 ui32Index = 0;
+	IMG_UINT32 uiAllocpgidx;
+	IMG_UINT32 uiFreepgidx;
+
+	PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv;
+	IMG_DEV_PHYADDR sPhyAddr;
+
+#if defined(DEBUG)
+	IMG_BOOL bPoisonFail = IMG_FALSE;
+	IMG_BOOL bZeroFail = IMG_FALSE;
+#endif
+
+	/* Fetch the Page table array represented by the PMR */
+	IMG_DEV_PHYADDR *psPageArray = psPMRPageArrayData->pasDevPAddr;
+	PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappigTable(psPMR);
+
+	/* The incoming request is classified into two operations independent of
+	 * each other: alloc & free pages.
+	 * These operations can be combined with two mapping operations as well
+	 * which are GPU & CPU space mappings.
+	 *
+	 * From the alloc and free page requests, the net amount of pages to be
+	 * allocated or freed is computed. Pages that were requested to be freed
+	 * will be reused to fulfil alloc requests.
+	 *
+	 * The order of operations is:
+	 * 1. Allocate new pages from the OS
+	 * 2. Move the free pages from free request to alloc positions.
+	 * 3. Free the rest of the pages not used for alloc
+	 *
+	 * Alloc parameters are validated at the time of allocation
+	 * and any error will be handled then. */
+
+	if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH))
+	{
+		ui32CommonRequstCount = (ui32AllocPageCount > ui32FreePageCount) ?
+				ui32FreePageCount : ui32AllocPageCount;
+
+		PDUMP_PANIC(SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported");
+	}
+
+	if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC))
+	{
+		ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequstCount;
+	}
+	else
+	{
+		ui32AllocPageCount = 0;
+	}
+
+	if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE))
+	{
+		ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequstCount;
+	}
+	else
+	{
+		ui32FreePageCount = 0;
+	}
+
+	if (0 == (ui32CommonRequstCount || ui32AdtnlAllocPages || ui32AdtnlFreePages))
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		return eError;
+	}
+
+	{
+		/* Validate the free page indices */
+		if (ui32FreePageCount)
+		{
+			if (NULL != pai32FreeIndices)
+			{
+				for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++)
+				{
+					uiFreepgidx = pai32FreeIndices[ui32Loop];
+
+					if (uiFreepgidx > psPMRPageArrayData->uiTotalNumPages)
+					{
+						eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+						goto e0;
+					}
+
+					if (INVALID_PAGE_ADDR == psPageArray[uiFreepgidx].uiAddr)
+					{
+						eError = PVRSRV_ERROR_INVALID_PARAMS;
+						goto e0;
+					}
+				}
+			}else{
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				return eError;
+			}
+		}
+
+		/*The following block of code verifies any issues with common alloc page indices */
+		for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++)
+		{
+			uiAllocpgidx = pai32AllocIndices[ui32Loop];
+			if (uiAllocpgidx > psPMRPageArrayData->uiTotalNumPages)
+			{
+				eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+				goto e0;
+			}
+
+			if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+			{
+				if ((INVALID_PAGE_ADDR != psPageArray[uiAllocpgidx].uiAddr) ||
+						(TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx]))
+				{
+					eError = PVRSRV_ERROR_INVALID_PARAMS;
+					goto e0;
+				}
+			}
+			else
+			{
+				if ((INVALID_PAGE_ADDR ==  psPageArray[uiAllocpgidx].uiAddr) ||
+				    (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx]))
+				{
+					eError = PVRSRV_ERROR_INVALID_PARAMS;
+					goto e0;
+				}
+			}
+		}
+
+
+		ui32Loop = 0;
+
+		/* Allocate new pages */
+		if (0 != ui32AdtnlAllocPages)
+		{
+			/* Say how many pages to allocate */
+			psPMRPageArrayData->uiPagesToAlloc = ui32AdtnlAllocPages;
+
+			eError = _AllocLMPages(psPMRPageArrayData, pai32AllocIndices);
+			if (PVRSRV_OK != eError)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+				         "%s: New Addtl Allocation of pages failed",
+				         __func__));
+				goto e0;
+			}
+
+			/* Mark the corresponding pages of translation table as valid */
+			for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++)
+			{
+				psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop];
+			}
+
+			psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages;
+		}
+
+		ui32Index = ui32Loop;
+
+		/* Move the corresponding free pages to alloc request */
+		for (ui32Loop = 0; ui32Loop < ui32CommonRequstCount; ui32Loop++, ui32Index++)
+		{
+
+			uiAllocpgidx = pai32AllocIndices[ui32Index];
+			uiFreepgidx  = pai32FreeIndices[ui32Loop];
+			sPhyAddr = psPageArray[uiAllocpgidx];
+			psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx];
+
+			/* Is remap mem used in real world scenario? Should it be turned to a
+			 *  debug feature? The condition check needs to be out of loop, will be
+			 *  done at later point though after some analysis */
+			if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+			{
+				psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID;
+				psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+				psPageArray[uiFreepgidx].uiAddr = INVALID_PAGE_ADDR;
+			}
+			else
+			{
+				psPageArray[uiFreepgidx] = sPhyAddr;
+				psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx;
+				psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+			}
+
+			/* Be sure to honour the attributes associated with the allocation
+			 * such as zeroing, poisoning etc. */
+			if (psPMRPageArrayData->bPoisonOnAlloc)
+			{
+				eError = _PoisonAlloc(psPMRPageArrayData->psDevNode,
+				                      &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx],
+				                      psPMRPageArrayData->bFwLocalAlloc,
+				                      psPMRPageArrayData->uiContigAllocSize,
+				                      PVRSRV_POISON_ON_ALLOC_VALUE);
+
+				/* Consider this as a soft failure and go ahead but log error to kernel log */
+				if (eError != PVRSRV_OK)
+				{
+#if defined(DEBUG)
+					bPoisonFail = IMG_TRUE;
+#endif
+				}
+			}
+			else
+			{
+				if (psPMRPageArrayData->bZeroOnAlloc)
+				{
+					eError = _ZeroAlloc(psPMRPageArrayData->psDevNode,
+					                    &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx],
+					                    psPMRPageArrayData->bFwLocalAlloc,
+					                    psPMRPageArrayData->uiContigAllocSize);
+					/* Consider this as a soft failure and go ahead but log error to kernel log */
+					if (eError != PVRSRV_OK)
+					{
+#if defined(DEBUG)
+						/*Don't think we need to zero any pages further*/
+						bZeroFail = IMG_TRUE;
+#endif
+					}
+				}
+			}
+		}
+
+		/*Free the additional free pages */
+		if (0 != ui32AdtnlFreePages)
+		{
+			ui32Index = ui32Loop;
+			_FreeLMPages(psPMRPageArrayData, &pai32FreeIndices[ui32Loop], ui32AdtnlFreePages);
+			ui32Loop = 0;
+
+			while (ui32Loop++ < ui32AdtnlFreePages)
+			{
+				/*Set the corresponding mapping table entry to invalid address */
+				psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Index++]] = TRANSLATION_INVALID;
+			}
+
+			psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages;
+		}
+
+	}
+
+#if defined(DEBUG)
+	if (IMG_TRUE == bPoisonFail)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Error in poisoning the page", __func__));
+	}
+
+	if (IMG_TRUE == bZeroFail)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Error in zeroing the page", __func__));
+	}
+#endif
+
+	/* Update the PMR memory holding information */
+	eError = PVRSRV_OK;
+
+e0:
+		return eError;
+
+}
+
+/*************************************************************************/ /*!
+@Function       PMRChangeSparseMemCPUMapLocalMem
+@Description    This function Changes CPU maps accordingly
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static
+PVRSRV_ERROR PMRChangeSparseMemCPUMapLocalMem(PMR_IMPL_PRIVDATA pPriv,
+                                              const PMR *psPMR,
+                                              IMG_UINT64 sCpuVAddrBase,
+                                              IMG_UINT32 ui32AllocPageCount,
+                                              IMG_UINT32 *pai32AllocIndices,
+                                              IMG_UINT32 ui32FreePageCount,
+                                              IMG_UINT32 *pai32FreeIndices)
+{
+	PVRSRV_ERROR eError;
+	IMG_DEV_PHYADDR *psPageArray;
+	PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv;
+	uintptr_t sCpuVABase = sCpuVAddrBase;
+	IMG_CPU_PHYADDR sCpuAddrPtr;
+	IMG_BOOL bValid = IMG_FALSE;
+
+	/*Get the base address of the heap */
+	eError = PMR_CpuPhysAddr(psPMR,
+	                         psPMRPageArrayData->uiLog2AllocSize,
+	                         1,
+	                         0,	/* offset zero here mean first page in the PMR */
+	                         &sCpuAddrPtr,
+	                         &bValid);
+	PVR_LOGR_IF_ERROR(eError, "PMR_CpuPhysAddr");
+
+	/* Phys address of heap is computed here by subtracting the offset of this page
+	 * basically phys address of any page = Base address of heap + offset of the page */
+	sCpuAddrPtr.uiAddr -= psPMRPageArrayData->pasDevPAddr[0].uiAddr;
+	psPageArray = psPMRPageArrayData->pasDevPAddr;
+
+	return OSChangeSparseMemCPUAddrMap((void **)psPageArray,
+	                                   sCpuVABase,
+	                                   sCpuAddrPtr,
+	                                   ui32AllocPageCount,
+	                                   pai32AllocIndices,
+	                                   ui32FreePageCount,
+	                                   pai32FreeIndices,
+	                                   IMG_TRUE);
+}
+
+
+static PMR_IMPL_FUNCTAB _sPMRLMAFuncTab = {
+	/* pfnLockPhysAddresses */
+	&PMRLockSysPhysAddressesLocalMem,
+	/* pfnUnlockPhysAddresses */
+	&PMRUnlockSysPhysAddressesLocalMem,
+	/* pfnDevPhysAddr */
+	&PMRSysPhysAddrLocalMem,
+	/* pfnAcquireKernelMappingData */
+	&PMRAcquireKernelMappingDataLocalMem,
+	/* pfnReleaseKernelMappingData */
+	&PMRReleaseKernelMappingDataLocalMem,
+#if defined(INTEGRITY_OS)
+	/* pfnMapMemoryObject */
+	NULL,
+	/* pfnUnmapMemoryObject */
+	NULL,
+#endif
+	/* pfnReadBytes */
+	&PMRReadBytesLocalMem,
+	/* pfnWriteBytes */
+	&PMRWriteBytesLocalMem,
+	/* .pfnUnpinMem */
+	NULL,
+	/* .pfnPinMem */
+	NULL,
+	/* pfnChangeSparseMem*/
+	&PMRChangeSparseMemLocalMem,
+	/* pfnChangeSparseMemCPUMap */
+	&PMRChangeSparseMemCPUMapLocalMem,
+	/* pfnMMap */
+	NULL,
+	/* pfnFinalize */
+	&PMRFinalizeLocalMem
+};
+
+PVRSRV_ERROR
+PhysmemNewLocalRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+							IMG_DEVMEM_SIZE_T uiSize,
+							IMG_DEVMEM_SIZE_T uiChunkSize,
+							IMG_UINT32 ui32NumPhysChunks,
+							IMG_UINT32 ui32NumVirtChunks,
+							IMG_UINT32 *pui32MappingTable,
+							IMG_UINT32 uiLog2AllocPageSize,
+							PVRSRV_MEMALLOCFLAGS_T uiFlags,
+							const IMG_CHAR *pszAnnotation,
+							IMG_PID uiPid,
+							PMR **ppsPMRPtr)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_ERROR eError2;
+	PMR *psPMR = NULL;
+	PMR_LMALLOCARRAY_DATA *psPrivData = NULL;
+	PMR_FLAGS_T uiPMRFlags;
+	PHYS_HEAP *psPhysHeap;
+	IMG_BOOL bZero;
+	IMG_BOOL bPoisonOnAlloc;
+	IMG_BOOL bPoisonOnFree;
+	IMG_BOOL bOnDemand;
+	IMG_BOOL bContig;
+	IMG_BOOL bFwLocalAlloc;
+	IMG_BOOL bFwConfigAlloc;
+	IMG_BOOL bCpuLocalAlloc;
+	IMG_BOOL bFwGuestAlloc;
+
+	/* For sparse requests we have to do the allocation
+	 * in chunks rather than requesting one contiguous block */
+	if (ui32NumPhysChunks != ui32NumVirtChunks ||  ui32NumVirtChunks > 1)
+	{
+		if (PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags))
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: LMA kernel mapping functions currently "
+					"don't work with discontiguous memory.",
+					__func__));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto errorOnParam;
+		}
+		bContig = IMG_FALSE;
+	}
+	else
+	{
+		bContig = IMG_TRUE;
+	}
+
+	bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bFwLocalAlloc = PVRSRV_CHECK_FW_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bFwConfigAlloc = PVRSRV_CHECK_FW_CONFIG(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bCpuLocalAlloc = PVRSRV_CHECK_CPU_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bFwGuestAlloc = PVRSRV_CHECK_FW_GUEST(uiFlags) ? IMG_TRUE : IMG_FALSE;
+
+	if (bFwLocalAlloc)
+	{
+		psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+	}
+	else if (bCpuLocalAlloc)
+	{
+		psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL];
+	}
+	else
+	{
+		psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+	}
+
+	/* Create Array structure that holds the physical pages */
+	eError = _AllocLMPageArray(psDevNode,
+	                           uiChunkSize * ui32NumVirtChunks,
+	                           uiChunkSize,
+	                           ui32NumPhysChunks,
+	                           ui32NumVirtChunks,
+	                           pui32MappingTable,
+	                           uiLog2AllocPageSize,
+	                           bZero,
+	                           bPoisonOnAlloc,
+	                           bPoisonOnFree,
+	                           bContig,
+	                           bOnDemand,
+	                           bFwLocalAlloc,
+	                           bFwConfigAlloc,
+	                           bFwGuestAlloc,
+	                           psPhysHeap,
+	                           uiFlags,
+	                           uiPid,
+	                           &psPrivData);
+	if (eError != PVRSRV_OK)
+	{
+		goto errorOnAllocPageArray;
+	}
+
+	if (!bOnDemand)
+	{
+		/* Allocate the physical pages */
+		eError = _AllocLMPages(psPrivData,pui32MappingTable);
+		if (eError != PVRSRV_OK)
+		{
+			goto errorOnAllocPages;
+		}
+	}
+
+	/* In this instance, we simply pass flags straight through.
+
+	   Generically, uiFlags can include things that control the PMR
+	   factory, but we don't need any such thing (at the time of
+	   writing!), and our caller specifies all PMR flags so we don't
+	   need to meddle with what was given to us.
+	*/
+	uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+	/* check no significant bits were lost in cast due to different
+	   bit widths for flags */
+	PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+	if (bOnDemand)
+	{
+		PDUMPCOMMENT("Deferred Allocation PMR (LMA)");
+	}
+
+
+	eError = PMRCreatePMR(psDevNode,
+						  psPhysHeap,
+						  uiSize,
+						  uiChunkSize,
+						  ui32NumPhysChunks,
+						  ui32NumVirtChunks,
+						  pui32MappingTable,
+						  uiLog2AllocPageSize,
+						  uiPMRFlags,
+						  pszAnnotation,
+						  &_sPMRLMAFuncTab,
+						  psPrivData,
+						  PMR_TYPE_LMA,
+						  &psPMR,
+						  PDUMP_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"PhysmemNewLocalRamBackedPMR: Unable to create PMR (status=%d)",
+				eError));
+		goto errorOnCreate;
+	}
+
+	*ppsPMRPtr = psPMR;
+	return PVRSRV_OK;
+
+errorOnCreate:
+	if (!bOnDemand && psPrivData->iNumPagesAllocated)
+	{
+		eError2 = _FreeLMPages(psPrivData, NULL,0);
+		PVR_ASSERT(eError2 == PVRSRV_OK);
+	}
+
+errorOnAllocPages:
+	eError2 = _FreeLMPageArray(psPrivData);
+	PVR_ASSERT(eError2 == PVRSRV_OK);
+
+errorOnAllocPageArray:
+errorOnParam:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+
+struct PidOSidCouplingList
+{
+	IMG_PID     pId;
+	IMG_UINT32  ui32OSid;
+	IMG_UINT32	ui32OSidReg;
+	IMG_BOOL    bOSidAxiProt;
+
+	struct PidOSidCouplingList *psNext;
+};
+typedef struct PidOSidCouplingList PidOSidCouplingList;
+
+static PidOSidCouplingList *psPidOSidHead;
+static PidOSidCouplingList *psPidOSidTail;
+
+void InsertPidOSidsCoupling(IMG_PID pId, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt)
+{
+	PidOSidCouplingList *psTmp;
+
+	PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Inserting (PID/ OSid/ OSidReg/ IsSecure) (%d/ %d/ %d/ %s) into list",
+                 pId,ui32OSid, ui32OSidReg, (bOSidAxiProt)?"Yes":"No"));
+
+	psTmp=OSAllocMem(sizeof(PidOSidCouplingList));
+
+	if (psTmp==NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"(GPU Virtualization Validation): Memory allocation failed. No list insertion => program will execute normally."));
+		return;
+	}
+
+	psTmp->pId=pId;
+	psTmp->ui32OSid=ui32OSid;
+	psTmp->ui32OSidReg=ui32OSidReg;
+	psTmp->bOSidAxiProt = bOSidAxiProt;
+
+	psTmp->psNext=NULL;
+	if (psPidOSidHead==NULL)
+	{
+		psPidOSidHead=psTmp;
+		psPidOSidTail=psTmp;
+	}
+	else
+	{
+		psPidOSidTail->psNext=psTmp;
+		psPidOSidTail=psTmp;
+	}
+
+	return;
+}
+
+void RetrieveOSidsfromPidList(IMG_PID pId, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg, IMG_BOOL *pbOSidAxiProt)
+{
+	PidOSidCouplingList *psTmp;
+
+	for (psTmp=psPidOSidHead;psTmp!=NULL;psTmp=psTmp->psNext)
+	{
+		if (psTmp->pId==pId)
+		{
+			(*pui32OSid)     = psTmp->ui32OSid;
+			(*pui32OSidReg)  = psTmp->ui32OSidReg;
+			(*pbOSidAxiProt) = psTmp->bOSidAxiProt;
+
+			return;
+		}
+	}
+
+	(*pui32OSid)=0;
+	(*pui32OSidReg)=0;
+	(*pbOSidAxiProt) = IMG_FALSE;
+
+	return;
+}
+
+void RemovePidOSidCoupling(IMG_PID pId)
+{
+	PidOSidCouplingList *psTmp, *psPrev=NULL;
+
+	for (psTmp=psPidOSidHead; psTmp!=NULL; psTmp=psTmp->psNext)
+	{
+		if (psTmp->pId==pId) break;
+		psPrev=psTmp;
+	}
+
+	if (psTmp==NULL)
+	{
+		return;
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Deleting Pairing %d / (%d - %d) from list",psTmp->pId, psTmp->ui32OSid, psTmp->ui32OSidReg));
+
+	if (psTmp==psPidOSidHead)
+	{
+		if (psPidOSidHead->psNext==NULL)
+		{
+			psPidOSidHead=NULL;
+			psPidOSidTail=NULL;
+			OSFreeMem(psTmp);
+
+			return;
+		}
+
+		psPidOSidHead=psPidOSidHead->psNext;
+		OSFreeMem(psTmp);
+		return;
+	}
+
+	if (psPrev==NULL) return;
+
+	psPrev->psNext=psTmp->psNext;
+	if (psTmp==psPidOSidTail)
+	{
+		psPidOSidTail=psPrev;
+	}
+
+	OSFreeMem(psTmp);
+
+	return;
+}
+
+#endif
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_lma.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_lma.h
new file mode 100644
index 0000000..4b1ff73
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_lma.h
@@ -0,0 +1,86 @@
+/**************************************************************************/ /*!
+@File
+@Title          Header for local card memory allocator
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks for local card memory.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _SRVSRV_PHYSMEM_LMA_H_
+#define _SRVSRV_PHYSMEM_LMA_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+/* services/server/include/ */
+#include "pmr.h"
+#include "pmr_impl.h"
+
+/*
+ * PhysmemNewLocalRamBackedPMR
+ *
+ * This function will create a PMR using the local card memory and is OS
+ * agnostic.
+ */
+PVRSRV_ERROR
+PhysmemNewLocalRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+							IMG_DEVMEM_SIZE_T uiSize,
+							IMG_DEVMEM_SIZE_T uiChunkSize,
+							IMG_UINT32 ui32NumPhysChunks,
+							IMG_UINT32 ui32NumVirtChunks,
+							IMG_UINT32 *pui32MappingTable,
+							IMG_UINT32 uiLog2PageSize,
+							PVRSRV_MEMALLOCFLAGS_T uiFlags,
+							const IMG_CHAR *pszAnnotation,
+							IMG_PID uiPid,
+							PMR **ppsPMRPtr);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*
+ * Define some helper list functions for the virtualization validation code
+ */
+
+void	InsertPidOSidsCoupling(IMG_PID pId, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt);
+void	RetrieveOSidsfromPidList(IMG_PID pId, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg, IMG_BOOL *pbOSidAxiProt);
+void	RemovePidOSidCoupling(IMG_PID pId);
+#endif
+
+#endif /* #ifndef _SRVSRV_PHYSMEM_LMA_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_osmem.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_osmem.h
new file mode 100644
index 0000000..4698cb0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_osmem.h
@@ -0,0 +1,124 @@
+/*************************************************************************/ /*!
+@File
+@Title          PhysmemNewOSRamBackedPMR function declaration header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of Services memory management.  This file defines the
+                OS memory PMR factory API that must be defined so that the
+                common & device layer code in the Services Server can allocate
+                new PMRs back with pages from the OS page allocator. Applicable
+                for UMA based platforms, such platforms must implement this API
+                in the OS Porting layer, in the "env" directory for that
+                system.
+
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PHYSMEM_OSMEM_H_
+#define _PHYSMEM_OSMEM_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+/* services/server/include/ */
+#include "pmr.h"
+#include "pmr_impl.h"
+
+/*************************************************************************/ /*!
+@Function       PhysmemNewOSRamBackedPMR
+@Description    Rogue Services will call this function to allocate GPU device
+                memory from the PMR factory supported by the OS DDK port. This
+                factory typically obtains physical memory from the kernel/OS
+                API that allocates memory from the default heap of shared system
+                memory available on the platform. The allocated memory must be
+                page-aligned and be a whole number of pages.
+                After allocating the required memory, the implementation must
+                then call PMRCreatePMR() to obtain the PMR structure that
+                describes this allocation to the upper layers of the Services.
+                memory management sub-system.
+                NB. Implementation of this function is mandatory. If shared
+                system memory is not to be used in the OS port then the
+                implementation must return PVRSRV_ERROR_NOT_SUPPORTED.
+
+@Input          psDevNode        the device node
+@Input          uiSize           the size of the allocation
+                                 (must be a multiple of page size)
+@Input          uiChunkSize      when sparse allocations are requested,
+                                 this is the allocated chunk size.
+                                 For regular allocations, this will be
+                                 the same as uiSize.
+                                 (must be a multiple of page size)
+@Input          ui32NumPhysChunks  when sparse allocations are requested,
+                                   this is the number of physical chunks
+                                   to be allocated.
+                                   For regular allocations, this will be 1.
+@Input          ui32NumVirtChunks  when sparse allocations are requested,
+                                   this is the number of virtual chunks
+                                   covering the sparse allocation.
+                                   For regular allocations, this will be 1.
+@Input          pui32MappingTable  when sparse allocations are requested,
+                                   this is the list of the indices of
+                                   each physically-backed virtual chunk
+                                   For regular allocations, this will
+                                   be NULL.
+@Input          uiLog2PageSize   the physical pagesize in log2(bytes).
+@Input          uiFlags          the allocation flags.
+@Input          pszAnnotation    string describing the PMR (for debug).
+                                 This should be passed into the function
+                                 PMRCreatePMR().
+@Input          uiPid            The process ID that this allocation should
+                                 be associated with.
+@Output         ppsPMROut        pointer to the PMR created for the
+                                 new allocation
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+extern PVRSRV_ERROR
+PhysmemNewOSRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+                         IMG_DEVMEM_SIZE_T uiSize,
+						 IMG_DEVMEM_SIZE_T uiChunkSize,
+						 IMG_UINT32 ui32NumPhysChunks,
+						 IMG_UINT32 ui32NumVirtChunks,
+						 IMG_UINT32 *pui32MappingTable,
+                         IMG_UINT32 uiLog2PageSize,
+                         PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                         const IMG_CHAR *pszAnnotation,
+                         IMG_PID uiPid,
+                         PMR **ppsPMROut);
+
+#endif /* #ifndef _PHYSMEM_OSMEM_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_osmem_linux.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_osmem_linux.c
new file mode 100644
index 0000000..d9554d5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_osmem_linux.c
@@ -0,0 +1,3855 @@
+/*************************************************************************/ /*!
+@File
+@Title          Implementation of PMR functions for OS managed memory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management.  This module is responsible for
+                implementing the function callbacks for physical memory borrowed
+                from that normally managed by the operating system.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/version.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/vmalloc.h>
+#include <linux/gfp.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+
+#if defined(CONFIG_X86)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
+#include <asm/set_memory.h>
+#else
+#include <asm/cacheflush.h>
+#endif
+#endif
+
+/* include/ */
+#include "rgx_heaps.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "rgx_pdump_panics.h"
+/* services/server/include/ */
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pdump_km.h"
+#include "pmr.h"
+#include "pmr_impl.h"
+#include "cache_km.h"
+#include "devicemem_server_utils.h"
+
+/* ourselves */
+#include "physmem_osmem.h"
+#include "physmem_osmem_linux.h"
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#include "kernel_compatibility.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+static IMG_UINT32 g_uiMaxOrder = PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM;
+#else
+/* split_page not available on older kernels */
+#undef PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM
+#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 0
+static IMG_UINT32 g_uiMaxOrder;
+#endif
+
+/*
+   These corresponds to the MMU min/max page sizes and associated PTE
+   alignment that can be used on the device for an allocation. It is
+   4KB (min) and 2MB (max) respectively.
+*/
+#define PVR_MIN_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ	RGX_HEAP_4KB_PAGE_SHIFT
+#define PVR_MAX_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ	RGX_HEAP_2MB_PAGE_SHIFT
+
+/* Defines how many pages should be mapped at once to the kernel */
+#define PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES 1024 /* 4 MB */
+
+/*
+	These are used to get/set/mask lower-order bits in a dma_addr_t
+	to provide side-band information associated with that address.
+	These includes whether the address was obtained via alloc_page
+	or dma_alloc and if address came allocated pre-aligned or an
+	adjustment was made manually to aligned it.
+*/
+#define DMA_SET_ADJUSTED_ADDR(x)		((x) | ((dma_addr_t)0x02))
+#define DMA_IS_ADDR_ADJUSTED(x)			((x) & ((dma_addr_t)0x02))
+#define DMA_SET_ALLOCPG_ADDR(x)			((x) | ((dma_addr_t)0x01))
+#define DMA_IS_ALLOCPG_ADDR(x)			((x) & ((dma_addr_t)0x01))
+#define DMA_GET_ALIGN_ADJUSTMENT(x)		((x>>2) & ((dma_addr_t)0x3ff))
+#define DMA_SET_ALIGN_ADJUSTMENT(x,y)	((x) | (((dma_addr_t)y)<<0x02))
+#define DMA_GET_ADDR(x)					(((dma_addr_t)x) & ((dma_addr_t)~0xfff))
+#define DMA_VADDR_NOT_IN_USE			0xCAFEF00DDEADBEEFULL
+
+typedef struct _PMR_OSPAGEARRAY_DATA_ {
+	/* Device for which this allocation has been made */
+	PVRSRV_DEVICE_NODE *psDevNode;
+	/* The pid that made this allocation */
+	IMG_PID uiPid;
+
+	/*
+	 * iNumOSPagesAllocated:
+	 * Number of pages allocated in this PMR so far.
+	 * This allows for up to (2^31 - 1) pages. With 4KB pages, that's 8TB of memory for each PMR.
+	 */
+	IMG_INT32 iNumOSPagesAllocated;
+
+	/*
+	 * uiTotalNumOSPages:
+	 * Total number of pages supported by this PMR. (Fixed as of now due the fixed Page table array size)
+	 *  number of "pages" (a.k.a. macro pages, compound pages, higher order pages, etc...)
+	 */
+	IMG_UINT32 uiTotalNumOSPages;
+
+	/*
+	  uiLog2AllocPageSize;
+
+	  size of each "page" -- this would normally be the same as
+	  PAGE_SHIFT, but we support the idea that we may allocate pages
+	  in larger chunks for better contiguity, using order>0 in the
+	  call to alloc_pages()
+	*/
+	IMG_UINT32 uiLog2AllocPageSize;
+
+	/*
+	  ui64DmaMask;
+	*/
+	IMG_UINT64 ui64DmaMask;
+
+	/*
+	  For non DMA/CMA allocation, pagearray references the pages
+	  thus allocated; one entry per compound page when compound
+	  pages are used. In addition, for DMA/CMA allocations, we
+	  track the returned cpu virtual and device bus address.
+	*/
+	struct page **pagearray;
+	dma_addr_t *dmaphysarray;
+	void **dmavirtarray;
+
+	/*
+	  Record at alloc time whether poisoning will be required when the
+	  PMR is freed.
+	*/
+	IMG_BOOL bZero;
+	IMG_BOOL bPoisonOnFree;
+	IMG_BOOL bPoisonOnAlloc;
+	IMG_BOOL bOnDemand;
+	IMG_BOOL bUnpinned; /* Should be protected by page pool lock */
+	IMG_BOOL bIsCMA; /* Is CMA memory allocated via DMA framework */
+
+	/*
+	  The cache mode of the PMR. Additionally carrying the CPU-Cache-Clean
+	  flag, advising us to do cache maintenance on behalf of the caller.
+	  Boolean used to track if we need to revert the cache attributes
+	  of the pages used in this allocation. Depends on OS/architecture.
+	*/
+	IMG_UINT32 ui32CPUCacheFlags;
+	IMG_BOOL bUnsetMemoryType;
+} PMR_OSPAGEARRAY_DATA;
+
+/***********************************
+ * Page pooling for uncached pages *
+ ***********************************/
+
+static INLINE void
+_FreeOSPage_CMA(struct device *dev,
+				size_t alloc_size,
+				IMG_UINT32 uiOrder,
+				void *virt_addr,
+				dma_addr_t dev_addr,
+				struct page *psPage);
+
+static void
+_FreeOSPage(IMG_UINT32 uiOrder,
+			IMG_BOOL bUnsetMemoryType,
+			struct page *psPage);
+
+static PVRSRV_ERROR
+_FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+			IMG_UINT32 *pai32FreeIndices,
+			IMG_UINT32 ui32FreePageCount);
+
+static PVRSRV_ERROR
+_FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree,
+						   IMG_UINT32 *puiPagesFreed);
+
+/* A struct for our page pool holding an array of zeroed (!) pages.
+ * We always put units of page arrays to the pool but are
+ * able to take individual pages */
+typedef	struct
+{
+	/* Linkage for page pool LRU list */
+	struct list_head sPagePoolItem;
+
+	/* How many items are still in the page array */
+	IMG_UINT32 uiItemsRemaining;
+	/* Array of the actual pages */
+	struct page **ppsPageArray;
+
+} LinuxPagePoolEntry;
+
+/* CleanupThread structure to put allocation in page pool */
+typedef struct
+{
+	PVRSRV_CLEANUP_THREAD_WORK sCleanupWork;
+	IMG_UINT32 ui32CPUCacheMode;
+	LinuxPagePoolEntry *psPoolEntry;
+} LinuxCleanupData;
+
+/* A struct for the unpinned items */
+typedef struct
+{
+	struct list_head sUnpinPoolItem;
+	PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr;
+} LinuxUnpinEntry;
+
+
+/* Caches to hold page pool and page array structures */
+static struct kmem_cache *g_psLinuxPagePoolCache;
+static struct kmem_cache *g_psLinuxPageArray;
+
+/* Track what is live, all protected by pool lock.
+ * x86 needs two page pools because we have to change the memory attributes
+ * of the pages which is expensive due to an implicit flush.
+ * See set_pages_array_uc/wc/wb. */
+static IMG_UINT32 g_ui32UnpinPageCount;
+static IMG_UINT32 g_ui32PagePoolUCCount;
+#if defined(CONFIG_X86)
+static IMG_UINT32 g_ui32PagePoolWCCount;
+#endif
+/* Tracks asynchronous tasks currently accessing the page pool.
+ * It is incremented if a defer free task
+ * is created. Both will decrement the value when they finished the work.
+ * The atomic prevents piling up of deferred work in case the deferred thread
+ * cannot keep up with the application.*/
+static ATOMIC_T g_iPoolCleanTasks;
+/* We don't want too many asynchronous threads trying to access the page pool
+ * at the same time */
+#define PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS 128
+
+/* Defines how many pages the page cache should hold. */
+#if defined(PVR_LINUX_PHYSMEM_MAX_POOL_PAGES)
+static const IMG_UINT32 g_ui32PagePoolMaxEntries = PVR_LINUX_PHYSMEM_MAX_POOL_PAGES;
+#else
+static const IMG_UINT32 g_ui32PagePoolMaxEntries;
+#endif
+
+/*	We double check if we would exceed this limit if we are below MAX_POOL_PAGES
+	and want to add an allocation to the pool.
+	This prevents big allocations being given back to the OS just because they
+	exceed the MAX_POOL_PAGES limit even though the pool is currently empty. */
+#if defined(PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES)
+static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries = PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES;
+#else
+static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries;
+#endif
+
+#if defined(CONFIG_X86)
+#define PHYSMEM_OSMEM_NUM_OF_POOLS 2
+static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = {
+	PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+	PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE
+};
+#else
+#define PHYSMEM_OSMEM_NUM_OF_POOLS 1
+static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = {
+	PVRSRV_MEMALLOCFLAG_CPU_UNCACHED
+};
+#endif
+
+/* Global structures we use to manage the page pool */
+static DEFINE_MUTEX(g_sPagePoolMutex);
+
+/* List holding the page array pointers: */
+static LIST_HEAD(g_sPagePoolList_WC);
+static LIST_HEAD(g_sPagePoolList_UC);
+static LIST_HEAD(g_sUnpinList);
+
+static inline IMG_UINT32
+_PagesInPoolUnlocked(void)
+{
+	IMG_UINT32 uiCnt = g_ui32PagePoolUCCount;
+#if defined(CONFIG_X86)
+	uiCnt += g_ui32PagePoolWCCount;
+#endif
+	return uiCnt;
+}
+
+static inline void
+_PagePoolLock(void)
+{
+	mutex_lock(&g_sPagePoolMutex);
+}
+
+static inline int
+_PagePoolTrylock(void)
+{
+	return mutex_trylock(&g_sPagePoolMutex);
+}
+
+static inline void
+_PagePoolUnlock(void)
+{
+	mutex_unlock(&g_sPagePoolMutex);
+}
+
+static PVRSRV_ERROR
+_AddUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData)
+{
+	LinuxUnpinEntry *psUnpinEntry;
+
+	psUnpinEntry = OSAllocMem(sizeof(*psUnpinEntry));
+	if (!psUnpinEntry)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: OSAllocMem failed. Cannot add entry to unpin list.",
+				__func__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psUnpinEntry->psPageArrayDataPtr = psOSPageArrayData;
+
+	/* Add into pool that the shrinker can access easily*/
+	list_add_tail(&psUnpinEntry->sUnpinPoolItem, &g_sUnpinList);
+
+	g_ui32UnpinPageCount += psOSPageArrayData->iNumOSPagesAllocated;
+
+	return PVRSRV_OK;
+}
+
+static void
+_RemoveUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData)
+{
+	LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry;
+
+	/* Remove from pool */
+	list_for_each_entry_safe(psUnpinEntry,
+	                         psTempUnpinEntry,
+	                         &g_sUnpinList,
+	                         sUnpinPoolItem)
+	{
+		if (psUnpinEntry->psPageArrayDataPtr == psOSPageArrayData)
+		{
+			list_del(&psUnpinEntry->sUnpinPoolItem);
+			break;
+		}
+	}
+
+	OSFreeMem(psUnpinEntry);
+
+	g_ui32UnpinPageCount -= psOSPageArrayData->iNumOSPagesAllocated;
+}
+
+static inline IMG_BOOL
+_GetPoolListHead(IMG_UINT32 ui32CPUCacheFlags,
+				 struct list_head **ppsPoolHead,
+				 IMG_UINT32 **ppuiCounter)
+{
+	switch (PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags))
+	{
+		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+#if defined(CONFIG_X86)
+		/*
+			For x86 we need to keep different lists for uncached
+			and write-combined as we must always honour the PAT
+			setting which cares about this difference.
+		*/
+
+			*ppsPoolHead = &g_sPagePoolList_WC;
+			*ppuiCounter = &g_ui32PagePoolWCCount;
+			break;
+#endif
+
+		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+			*ppsPoolHead = &g_sPagePoolList_UC;
+			*ppuiCounter = &g_ui32PagePoolUCCount;
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Unknown CPU caching mode. "
+					 "Using default UC pool.",
+					 __func__));
+			*ppsPoolHead = &g_sPagePoolList_UC;
+			*ppuiCounter = &g_ui32PagePoolUCCount;
+			PVR_ASSERT(0);
+			return IMG_FALSE;
+	}
+	return IMG_TRUE;
+}
+
+static struct shrinker g_sShrinker;
+
+/* Returning the number of pages that still reside in the page pool. */
+static unsigned long
+_GetNumberOfPagesInPoolUnlocked(void)
+{
+	return _PagesInPoolUnlocked() + g_ui32UnpinPageCount;
+}
+
+/* Linux shrinker function that informs the OS about how many pages we are caching and
+ * it is able to reclaim. */
+static unsigned long
+_CountObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
+{
+	int remain;
+
+	PVR_ASSERT(psShrinker == &g_sShrinker);
+	(void)psShrinker;
+	(void)psShrinkControl;
+
+	/* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */
+	if (_PagePoolTrylock() == 0)
+		return 0;
+	remain = _GetNumberOfPagesInPoolUnlocked();
+	_PagePoolUnlock();
+
+	return remain;
+}
+
+/* Linux shrinker function to reclaim the pages from our page pool */
+static unsigned long
+_ScanObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
+{
+	unsigned long uNumToScan = psShrinkControl->nr_to_scan;
+	unsigned long uSurplus = 0;
+	LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry;
+	IMG_UINT32 uiPagesFreed;
+
+	PVR_ASSERT(psShrinker == &g_sShrinker);
+	(void)psShrinker;
+
+	/* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */
+	if (_PagePoolTrylock() == 0)
+		return SHRINK_STOP;
+
+	_FreePagesFromPoolUnlocked(uNumToScan,
+							   &uiPagesFreed);
+	uNumToScan -= uiPagesFreed;
+
+	if (uNumToScan == 0)
+	{
+		goto e_exit;
+	}
+
+	/* Free unpinned memory, starting with LRU entries */
+	list_for_each_entry_safe(psUnpinEntry,
+							 psTempUnpinEntry,
+							 &g_sUnpinList,
+							 sUnpinPoolItem)
+	{
+		PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr = psUnpinEntry->psPageArrayDataPtr;
+		IMG_UINT32 uiNumPages = (psPageArrayDataPtr->uiTotalNumOSPages > psPageArrayDataPtr->iNumOSPagesAllocated)?
+								psPageArrayDataPtr->iNumOSPagesAllocated:psPageArrayDataPtr->uiTotalNumOSPages;
+		PVRSRV_ERROR eError;
+
+		/* Free associated pages */
+		eError = _FreeOSPages(psPageArrayDataPtr,
+							  NULL,
+							  0);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Shrinker is unable to free unpinned pages. Error: %s (%d)",
+					 __func__,
+					 PVRSRVGetErrorString(eError),
+					 eError));
+			goto e_exit;
+		}
+
+		/* Remove item from pool */
+		list_del(&psUnpinEntry->sUnpinPoolItem);
+
+		g_ui32UnpinPageCount -= uiNumPages;
+
+		/* Check if there is more to free or if we already surpassed the limit */
+		if (uiNumPages < uNumToScan)
+		{
+			uNumToScan -= uiNumPages;
+
+		}
+		else if (uiNumPages > uNumToScan)
+		{
+			uSurplus += uiNumPages - uNumToScan;
+			uNumToScan = 0;
+			goto e_exit;
+		}
+		else
+		{
+			uNumToScan -= uiNumPages;
+			goto e_exit;
+		}
+	}
+
+e_exit:
+	if (list_empty(&g_sUnpinList))
+	{
+		PVR_ASSERT(g_ui32UnpinPageCount == 0);
+	}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0))
+	{
+		int remain;
+		remain = _GetNumberOfPagesInPoolUnlocked();
+		_PagePoolUnlock();
+		return remain;
+	}
+#else
+	/* Returning the number of pages freed during the scan */
+	_PagePoolUnlock();
+	return psShrinkControl->nr_to_scan - uNumToScan + uSurplus;
+#endif
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0))
+static int
+_ShrinkPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
+{
+	if (psShrinkControl->nr_to_scan != 0)
+	{
+		return _ScanObjectsInPagePool(psShrinker, psShrinkControl);
+	}
+	else
+	{
+		/* No pages are being reclaimed so just return the page count */
+		return _CountObjectsInPagePool(psShrinker, psShrinkControl);
+	}
+}
+
+static struct shrinker g_sShrinker =
+{
+	.shrink = _ShrinkPagePool,
+	.seeks = DEFAULT_SEEKS
+};
+#else
+static struct shrinker g_sShrinker =
+{
+	.count_objects = _CountObjectsInPagePool,
+	.scan_objects = _ScanObjectsInPagePool,
+	.seeks = DEFAULT_SEEKS
+};
+#endif
+
+/* Register the shrinker so Linux can reclaim cached pages */
+void LinuxInitPhysmem(void)
+{
+	g_psLinuxPageArray = kmem_cache_create("pvr-pa", sizeof(PMR_OSPAGEARRAY_DATA), 0, 0, NULL);
+
+	_PagePoolLock();
+	g_psLinuxPagePoolCache = kmem_cache_create("pvr-pp", sizeof(LinuxPagePoolEntry), 0, 0, NULL);
+	if (g_psLinuxPagePoolCache)
+	{
+		/* Only create the shrinker if we created the cache OK */
+		register_shrinker(&g_sShrinker);
+	}
+	_PagePoolUnlock();
+
+	OSAtomicWrite(&g_iPoolCleanTasks, 0);
+}
+
+/* Unregister the shrinker and remove all pages from the pool that are still left */
+void LinuxDeinitPhysmem(void)
+{
+	IMG_UINT32 uiPagesFreed;
+
+	if (OSAtomicRead(&g_iPoolCleanTasks) > 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "Still deferred cleanup tasks running "
+				"while deinitialising memory subsystem."));
+	}
+
+	_PagePoolLock();
+	if (_FreePagesFromPoolUnlocked(IMG_UINT32_MAX, &uiPagesFreed) != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Unable to free all pages from page pool when "
+				"deinitialising memory subsystem."));
+		PVR_ASSERT(0);
+	}
+
+	PVR_ASSERT(_PagesInPoolUnlocked() == 0);
+
+	/* Free the page cache */
+	kmem_cache_destroy(g_psLinuxPagePoolCache);
+
+	unregister_shrinker(&g_sShrinker);
+	_PagePoolUnlock();
+
+	kmem_cache_destroy(g_psLinuxPageArray);
+}
+
+static void EnableOOMKiller(void)
+{
+	current->flags &= ~PF_DUMPCORE;
+}
+
+static void DisableOOMKiller(void)
+{
+	/* PF_DUMPCORE is treated by the VM as if the OOM killer was disabled.
+	 *
+	 * As oom_killer_disable() is an inline, non-exported function, we
+	 * can't use it from a modular driver. Furthermore, the OOM killer
+	 * API doesn't look thread safe, which 'current' is.
+	 */
+	WARN_ON(current->flags & PF_DUMPCORE);
+	current->flags |= PF_DUMPCORE;
+}
+
+/* Prints out the addresses in a page array for debugging purposes
+ * Define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY locally to activate: */
+/* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY 1 */
+static inline void
+_DumpPageArray(struct page **pagearray, IMG_UINT32 uiPagesToPrint)
+{
+#if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY)
+	IMG_UINT32 i;
+	if (pagearray)
+	{
+		printk("Array %p:\n", pagearray);
+		for (i = 0; i < uiPagesToPrint; i++)
+		{
+			printk("%p | ", (pagearray)[i]);
+		}
+		printk("\n");
+	}
+	else
+	{
+		printk("Array is NULL:\n");
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(pagearray);
+	PVR_UNREFERENCED_PARAMETER(uiPagesToPrint);
+#endif
+}
+
+/* Debugging function that dumps out the number of pages for every
+ * page array that is currently in the page pool.
+ * Not defined by default. Define locally to activate feature: */
+/* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL 1 */
+static void
+_DumpPoolStructure(void)
+{
+#if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL)
+	LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
+	struct list_head *psPoolHead = NULL;
+	IMG_UINT32 j;
+	IMG_UINT32 *puiCounter;
+
+	printk("\n");
+	/* Empty all pools */
+	for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++)
+	{
+
+		printk("pool = %u\n", j);
+
+		/* Get the correct list for this caching mode */
+		if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead, &puiCounter))
+		{
+			break;
+		}
+
+		list_for_each_entry_safe(psPagePoolEntry,
+								 psTempPoolEntry,
+								 psPoolHead,
+								 sPagePoolItem)
+		{
+			printk("%u | ", psPagePoolEntry->uiItemsRemaining);
+		}
+		printk("\n");
+	}
+#endif
+}
+
+/* Free a certain number of pages from the page pool.
+ * Mainly used in error paths or at deinitialisation to
+ * empty the whole pool. */
+static PVRSRV_ERROR
+_FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree,
+						   IMG_UINT32 *puiPagesFreed)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
+	struct list_head *psPoolHead = NULL;
+	IMG_UINT32 i, j;
+	IMG_UINT32 *puiCounter;
+
+	*puiPagesFreed = uiMaxPagesToFree;
+
+	/* Empty all pools */
+	for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++)
+	{
+
+		/* Get the correct list for this caching mode */
+		if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead, &puiCounter))
+		{
+			break;
+		}
+
+		/* Free the pages and remove page arrays from the pool if they are exhausted */
+		list_for_each_entry_safe(psPagePoolEntry,
+								 psTempPoolEntry,
+								 psPoolHead,
+								 sPagePoolItem)
+		{
+			IMG_UINT32 uiItemsToFree;
+			struct page **ppsPageArray;
+
+			/* Check if we are going to free the whole page array or just parts */
+			if (psPagePoolEntry->uiItemsRemaining <= uiMaxPagesToFree)
+			{
+				uiItemsToFree = psPagePoolEntry->uiItemsRemaining;
+				ppsPageArray = psPagePoolEntry->ppsPageArray;
+			}
+			else
+			{
+				uiItemsToFree = uiMaxPagesToFree;
+				ppsPageArray = &(psPagePoolEntry->ppsPageArray[psPagePoolEntry->uiItemsRemaining - uiItemsToFree]);
+			}
+
+#if defined(CONFIG_X86)
+			/* Set the correct page caching attributes on x86 */
+			if (!PVRSRV_CHECK_CPU_CACHED(g_aui32CPUCacheFlags[j]))
+			{
+				int ret;
+				ret = set_pages_array_wb(ppsPageArray, uiItemsToFree);
+				if (ret)
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							 "%s: Failed to reset page attributes",
+							 __func__));
+					eError = PVRSRV_ERROR_FAILED_TO_FREE_PAGES;
+					goto e_exit;
+				}
+			}
+#endif
+
+			/* Free the actual pages */
+			for (i = 0; i < uiItemsToFree; i++)
+			{
+				__free_pages(ppsPageArray[i], 0);
+				ppsPageArray[i] = NULL;
+			}
+
+			/* Reduce counters */
+			uiMaxPagesToFree -= uiItemsToFree;
+			*puiCounter -= uiItemsToFree;
+			psPagePoolEntry->uiItemsRemaining -= uiItemsToFree;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+			/*
+			 * MemStats usually relies on having the bridge lock held, however
+			 * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+			 * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+			 * the page pool lock is used to ensure these calls are mutually
+			 * exclusive
+			 */
+			PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * uiItemsToFree);
+#endif
+
+			/* Is this pool entry exhausted, delete it */
+			if (psPagePoolEntry->uiItemsRemaining == 0)
+			{
+				OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
+				list_del(&psPagePoolEntry->sPagePoolItem);
+				kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
+			}
+
+			/* Return if we have all our pages */
+			if (uiMaxPagesToFree == 0)
+			{
+				goto e_exit;
+			}
+		}
+	}
+
+e_exit:
+	*puiPagesFreed -= uiMaxPagesToFree;
+	_DumpPoolStructure();
+	return eError;
+}
+
+/* Get a certain number of pages from the page pool and
+ * copy them directly into a given page array. */
+static void
+_GetPagesFromPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags,
+						  IMG_UINT32 uiMaxNumPages,
+						  struct page **ppsPageArray,
+						  IMG_UINT32 *puiNumReceivedPages)
+{
+	LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
+	struct list_head *psPoolHead = NULL;
+	IMG_UINT32 i;
+	IMG_UINT32 *puiCounter;
+
+	*puiNumReceivedPages = 0;
+
+	/* Get the correct list for this caching mode */
+	if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter))
+	{
+		return;
+	}
+
+	/* Check if there are actually items in the list */
+	if (list_empty(psPoolHead))
+	{
+		return;
+	}
+
+	PVR_ASSERT(*puiCounter > 0);
+
+	/* Receive pages from the pool */
+	list_for_each_entry_safe(psPagePoolEntry,
+							 psTempPoolEntry,
+							 psPoolHead,
+							 sPagePoolItem)
+	{
+		/* Get the pages from this pool entry */
+		for (i = psPagePoolEntry->uiItemsRemaining; i != 0 && *puiNumReceivedPages < uiMaxNumPages; i--)
+		{
+			ppsPageArray[*puiNumReceivedPages] = psPagePoolEntry->ppsPageArray[i-1];
+			(*puiNumReceivedPages)++;
+			psPagePoolEntry->uiItemsRemaining--;
+		}
+
+		/* Is this pool entry exhausted, delete it */
+		if (psPagePoolEntry->uiItemsRemaining == 0)
+		{
+			OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
+			list_del(&psPagePoolEntry->sPagePoolItem);
+			kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
+		}
+
+		/* Return if we have all our pages */
+		if (*puiNumReceivedPages == uiMaxNumPages)
+		{
+			goto exit_ok;
+		}
+	}
+
+exit_ok:
+
+	/* Update counters */
+	*puiCounter -= *puiNumReceivedPages;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	/* MemStats usually relies on having the bridge lock held, however
+	 * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+	 * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+	 * the page pool lock is used to ensure these calls are mutually
+	 * exclusive
+	 */
+	PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * (*puiNumReceivedPages));
+#endif
+
+	_DumpPoolStructure();
+	return;
+}
+
+/* Same as _GetPagesFromPoolUnlocked but handles locking and
+ * checks first whether pages from the pool are a valid option. */
+static inline void
+_GetPagesFromPoolLocked(PVRSRV_DEVICE_NODE *psDevNode,
+						IMG_UINT32 ui32CPUCacheFlags,
+						IMG_UINT32 uiPagesToAlloc,
+						IMG_UINT32 uiOrder,
+						IMG_BOOL bZero,
+						struct page **ppsPageArray,
+						IMG_UINT32 *puiPagesFromPool)
+{
+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES)
+	PVR_UNREFERENCED_PARAMETER(bZero);
+#else
+	/* Don't get pages from pool if it doesn't provide zeroed pages */
+	if (bZero)
+	{
+		return;
+	}
+#endif
+
+	/* The page pool stores only order 0 pages. If we need zeroed memory we
+	 * directly allocate from the OS because it is faster than
+	 * doing it within the driver. */
+	if (uiOrder == 0 &&
+	    !PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags))
+	{
+
+		_PagePoolLock();
+		_GetPagesFromPoolUnlocked(ui32CPUCacheFlags,
+								  uiPagesToAlloc,
+								  ppsPageArray,
+								  puiPagesFromPool);
+		_PagePoolUnlock();
+	}
+
+	return;
+}
+
+/* Takes a page array and maps it into the kernel to write zeros */
+static PVRSRV_ERROR
+_ZeroPageArray(IMG_UINT32 uiNumToClean,
+               struct page **ppsCleanArray,
+               pgprot_t pgprot)
+{
+	IMG_CPU_VIRTADDR pvAddr;
+	IMG_UINT32 uiMaxPagesToMap = MIN(PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES,
+	                                 uiNumToClean);
+
+	/* Map and fill the pages with zeros.
+	 * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE
+	 * at a time. */
+	while (uiNumToClean != 0)
+	{
+		IMG_UINT32 uiToClean = (uiNumToClean >= uiMaxPagesToMap) ?
+		                        uiMaxPagesToMap :
+		                        uiNumToClean;
+
+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
+		pvAddr = vmap(ppsCleanArray, uiToClean, VM_WRITE, pgprot);
+#else
+		pvAddr = vm_map_ram(ppsCleanArray, uiToClean, -1, pgprot);
+#endif
+		if (!pvAddr)
+		{
+			if (uiMaxPagesToMap <= 1)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Out of vmalloc memory, "
+						"unable to map pages for zeroing.",
+						__func__));
+				return PVRSRV_ERROR_OUT_OF_MEMORY;
+			}
+			else
+			{
+				/* Halve the pages to map at once and try again. */
+				uiMaxPagesToMap = uiMaxPagesToMap >> 1;
+				continue;
+			}
+		}
+
+		OSDeviceMemSet(pvAddr, 0, PAGE_SIZE * uiToClean);
+
+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
+		vunmap(pvAddr);
+#else
+		vm_unmap_ram(pvAddr, uiToClean);
+#endif
+
+		ppsCleanArray = &(ppsCleanArray[uiToClean]);
+		uiNumToClean -= uiToClean;
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_CleanupThread_CleanPages(void *pvData)
+{
+	LinuxCleanupData *psCleanupData = (LinuxCleanupData*) pvData;
+	LinuxPagePoolEntry *psPagePoolEntry = psCleanupData->psPoolEntry;
+	struct list_head *psPoolHead = NULL;
+	IMG_UINT32 *puiCounter = NULL;
+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES)
+	PVRSRV_ERROR eError;
+	pgprot_t pgprot;
+	IMG_UINT32 i;
+#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */
+
+	/* Get the correct pool for this caching mode. */
+	_GetPoolListHead(psCleanupData->ui32CPUCacheMode , &psPoolHead, &puiCounter);
+
+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES)
+	switch (PVRSRV_CPU_CACHE_MODE(psCleanupData->ui32CPUCacheMode))
+	{
+		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+#if defined(CONFIG_X86)
+			/* For x86 we can only map with the same attributes
+			 * as in the PAT settings*/
+			pgprot = pgprot_noncached(PAGE_KERNEL);
+			break;
+#endif
+
+		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+			pgprot = pgprot_writecombine(PAGE_KERNEL);
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Unknown caching mode to set page protection flags.",
+					__func__));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto eExit;
+	}
+
+	/* Map and fill the pages with zeros.
+	 * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE
+	 * at a time. */
+	eError = _ZeroPageArray(psPagePoolEntry->uiItemsRemaining,
+	                        psPagePoolEntry->ppsPageArray,
+	                        pgprot);
+	if (eError != PVRSRV_OK)
+	{
+		goto eExit;
+	}
+#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */
+
+	/* Lock down pool and add item */
+	_PagePoolLock();
+
+	/* Pool counters were already updated so don't do it here again*/
+
+	/* The pages are all zeroed so return them to the pool. */
+	list_add_tail(&psPagePoolEntry->sPagePoolItem, psPoolHead);
+
+	_DumpPoolStructure();
+	_PagePoolUnlock();
+
+	OSFreeMem(pvData);
+	OSAtomicDecrement(&g_iPoolCleanTasks);
+
+	return PVRSRV_OK;
+
+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES)
+eExit:
+	/* we failed to zero the pages so return the error so we can
+	 * retry during the next spin */
+	if ((psCleanupData->sCleanupWork.ui32RetryCount - 1) > 0)
+	{
+		return eError;
+	}
+
+	/* this was the last retry, give up and free pages to OS */
+	PVR_DPF((PVR_DBG_ERROR,
+			"%s: Deferred task error, freeing pages to OS.",
+			__func__));
+	_PagePoolLock();
+
+	*puiCounter -= psPagePoolEntry->uiItemsRemaining;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	/* MemStats usually relies on having the bridge lock held, however
+	 * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+	 * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+	 * the page pool lock is used to ensure these calls are mutually
+	 * exclusive
+	 */
+	PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * psCleanupData->psPoolEntry->uiItemsRemaining);
+#endif
+
+	_PagePoolUnlock();
+
+	for (i = 0; i < psCleanupData->psPoolEntry->uiItemsRemaining; i++)
+	{
+		_FreeOSPage(0, IMG_TRUE, psPagePoolEntry->ppsPageArray[i]);
+	}
+	OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
+	kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
+	OSFreeMem(psCleanupData);
+
+	OSAtomicDecrement(&g_iPoolCleanTasks);
+
+	return PVRSRV_OK;
+#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */
+}
+
+
+/* Put page array to the page pool.
+ * Handles locking and checks whether the pages are
+ * suitable to be stored in the pool. */
+static inline IMG_BOOL
+_PutPagesToPoolLocked(IMG_UINT32 ui32CPUCacheFlags,
+					  struct page **ppsPageArray,
+					  IMG_BOOL bUnpinned,
+					  IMG_UINT32 uiOrder,
+					  IMG_UINT32 uiNumPages)
+{
+	LinuxCleanupData *psCleanupData;
+	PVRSRV_CLEANUP_THREAD_WORK *psCleanupThreadFn;
+#if defined(SUPPORT_PHYSMEM_TEST)
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+#endif
+
+	if (uiOrder == 0 &&
+		!bUnpinned &&
+		!PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags))
+	{
+		IMG_UINT32 uiEntries;
+		IMG_UINT32 *puiCounter;
+		struct list_head *psPoolHead;
+
+
+		_PagePoolLock();
+
+		uiEntries = _PagesInPoolUnlocked();
+
+		/* Check for number of current page pool entries and whether
+		 * we have other asynchronous tasks in-flight */
+		if ( (uiEntries < g_ui32PagePoolMaxEntries) &&
+		     ((uiEntries + uiNumPages) <
+		      (g_ui32PagePoolMaxEntries + g_ui32PagePoolMaxExcessEntries) ))
+		{
+			if (OSAtomicIncrement(&g_iPoolCleanTasks) <=
+					PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS)
+			{
+#if defined(SUPPORT_PHYSMEM_TEST)
+				if (!psPVRSRVData->hCleanupThread)
+				{
+					goto eDecrement;
+				}
+#endif
+
+				psCleanupData = OSAllocMem(sizeof(*psCleanupData));
+
+				if (!psCleanupData)
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							 "%s: Failed to get memory for deferred page pool cleanup. "
+							 "Trying to free pages immediately",
+							 __func__));
+					goto eDecrement;
+				}
+
+				psCleanupThreadFn = &psCleanupData->sCleanupWork;
+				psCleanupData->ui32CPUCacheMode = ui32CPUCacheFlags;
+				psCleanupData->psPoolEntry = kmem_cache_alloc(g_psLinuxPagePoolCache, GFP_KERNEL);
+
+				if (!psCleanupData->psPoolEntry)
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							 "%s: Failed to get memory for deferred page pool cleanup. "
+							 "Trying to free pages immediately",
+							 __func__));
+					goto eFreeCleanupData;
+				}
+
+				if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter))
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							 "%s: Failed to get correct page pool",
+							 __func__));
+					goto eFreePoolEntry;
+				}
+
+				/* Increase counter here to avoid deferred cleanup tasks piling up */
+				*puiCounter = *puiCounter + uiNumPages;
+
+				psCleanupData->psPoolEntry->ppsPageArray = ppsPageArray;
+				psCleanupData->psPoolEntry->uiItemsRemaining = uiNumPages;
+
+				psCleanupThreadFn->pfnFree = _CleanupThread_CleanPages;
+				psCleanupThreadFn->pvData = psCleanupData;
+				psCleanupThreadFn->bDependsOnHW = IMG_FALSE;
+				CLEANUP_THREAD_SET_RETRY_COUNT(psCleanupThreadFn,
+				                               CLEANUP_THREAD_RETRY_COUNT_DEFAULT);
+
+	#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+				/* MemStats usually relies on having the bridge lock held, however
+				 * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+				 * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+				 * the page pool lock is used to ensure these calls are mutually
+				 * exclusive
+				 */
+				PVRSRVStatsIncrMemAllocPoolStat(PAGE_SIZE * uiNumPages);
+	#endif
+
+				/* We must not hold the pool lock when calling AddWork because it might call us back to
+				 * free pooled pages directly when unloading the driver	 */
+				_PagePoolUnlock();
+
+				PVRSRVCleanupThreadAddWork(psCleanupThreadFn);
+
+
+			}
+			else
+			{
+				goto eDecrement;
+			}
+
+		}
+		else
+		{
+			goto eUnlock;
+		}
+	}
+	else
+	{
+		goto eExitFalse;
+	}
+
+	return IMG_TRUE;
+
+eFreePoolEntry:
+	OSFreeMem(psCleanupData->psPoolEntry);
+eFreeCleanupData:
+	OSFreeMem(psCleanupData);
+eDecrement:
+	OSAtomicDecrement(&g_iPoolCleanTasks);
+eUnlock:
+	_PagePoolUnlock();
+eExitFalse:
+	return IMG_FALSE;
+}
+
+/* Get the GFP flags that we pass to the page allocator */
+static inline gfp_t
+_GetGFPFlags(IMG_BOOL bZero,
+             PVRSRV_DEVICE_NODE *psDevNode)
+{
+	struct device *psDev = psDevNode->psDevConfig->pvOSDevice;
+	gfp_t gfp_flags = GFP_USER | __GFP_NOWARN | __GFP_NOMEMALLOC;
+
+#if defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY)
+	/* Force use of HIGHMEM */
+	gfp_flags |= __GFP_HIGHMEM;
+
+	PVR_UNREFERENCED_PARAMETER(psDev);
+#else
+	if (psDev)
+	{
+#if defined(CONFIG_64BIT) || defined(CONFIG_ARM_LPAE) || defined(CONFIG_X86_PAE)
+		if (*psDev->dma_mask > DMA_BIT_MASK(32))
+		{
+			/* If our system is able to handle large addresses use highmem */
+			gfp_flags |= __GFP_HIGHMEM;
+		}
+		else if (*psDev->dma_mask == DMA_BIT_MASK(32))
+		{
+			/* Limit to 32 bit.
+			 * Achieved by setting __GFP_DMA32 for 64 bit systems */
+			gfp_flags |= __GFP_DMA32;
+		}
+		else
+		{
+			/* Limit to size of DMA zone. */
+			gfp_flags |= __GFP_DMA;
+		}
+#else
+		if (*psDev->dma_mask < DMA_BIT_MASK(32))
+		{
+			gfp_flags |= __GFP_DMA;
+		}
+		else
+		{
+			gfp_flags |= __GFP_HIGHMEM;
+		}
+#endif /* if defined(CONFIG_64BIT) || defined(CONFIG_ARM_LPAE) || defined(CONFIG_X86_PAE) */
+	}
+
+#endif /* if defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY) */
+
+	if (bZero)
+	{
+		gfp_flags |= __GFP_ZERO;
+	}
+
+	return gfp_flags;
+}
+
+/*
+ * @Function _PoisonDevicePage
+ *
+ * @Description  Poisons a device page. In normal case the device page has the
+ *               same size as the OS page and so the ui32DevPageOrder will be
+ *               equal to 0 and page argument will point to one OS page
+ *               structure. In case of Non4K pages the order will be greater
+ *               than 0 and page argument will point to an array of OS
+ *               allocated pages.
+ *
+ * @Input psDevNode          pointer to the device object
+ * @Input page               array of the pages allocated by from the OS
+ * @Input ui32DevPageOrder   order of the page (same as the one used to allocate
+ *                           the page array by alloc_pages())
+ * @Input ui32CPUCacheFlags  CPU cache flags applied to the page
+ * @Input ui8PoisonValue     value used to poison the page
+ */
+static void
+_PoisonDevicePage(PVRSRV_DEVICE_NODE *psDevNode,
+                  struct page *page,
+                  IMG_UINT32 ui32DevPageOrder,
+                  IMG_UINT32 ui32CPUCacheFlags,
+                  IMG_BYTE ui8PoisonValue)
+{
+	IMG_CPU_PHYADDR sCPUPhysAddrStart, sCPUPhysAddrEnd;
+	IMG_UINT32 ui32OsPageIdx;
+
+	for (ui32OsPageIdx = 0;
+	     ui32OsPageIdx < (1U << ui32DevPageOrder);
+	     ui32OsPageIdx++)
+	{
+		struct page *current_page = page + ui32OsPageIdx;
+		void *kvaddr = kmap_atomic(current_page);
+
+		if (PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags) ||
+		    PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags))
+		{
+			OSDeviceMemSet(kvaddr, ui8PoisonValue, PAGE_SIZE);
+		}
+		else
+		{
+			OSCachedMemSet(kvaddr, ui8PoisonValue, PAGE_SIZE);
+		}
+
+		sCPUPhysAddrStart.uiAddr = page_to_phys(current_page);
+		sCPUPhysAddrEnd.uiAddr = sCPUPhysAddrStart.uiAddr + PAGE_SIZE;
+
+		OSCPUCacheFlushRangeKM(psDevNode,
+		                       kvaddr, kvaddr + PAGE_SIZE,
+		                       sCPUPhysAddrStart, sCPUPhysAddrEnd);
+
+		kunmap_atomic(kvaddr);
+	}
+}
+
+/* Allocate and initialise the structure to hold the metadata of the allocation */
+static PVRSRV_ERROR
+_AllocOSPageArray(PVRSRV_DEVICE_NODE *psDevNode,
+				  PMR_SIZE_T uiChunkSize,
+				  IMG_UINT32 ui32NumPhysChunks,
+				  IMG_UINT32 ui32NumVirtChunks,
+				  IMG_UINT32 uiLog2AllocPageSize,
+				  IMG_BOOL bZero,
+				  IMG_BOOL bIsCMA,
+				  IMG_BOOL bPoisonOnAlloc,
+				  IMG_BOOL bPoisonOnFree,
+				  IMG_BOOL bOnDemand,
+				  IMG_UINT32 ui32CPUCacheFlags,
+				  IMG_PID uiPid,
+				  PMR_OSPAGEARRAY_DATA **ppsPageArrayDataPtr)
+{
+	PVRSRV_ERROR eError;
+	PMR_SIZE_T uiSize = uiChunkSize * ui32NumVirtChunks;
+	IMG_UINT32 uiNumOSPageSizeVirtPages;
+	IMG_UINT32 uiNumDevPageSizeVirtPages;
+	PMR_OSPAGEARRAY_DATA *psPageArrayData;
+	IMG_UINT64 ui64DmaMask = 0;
+	PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+
+	/* Use of cast below is justified by the assertion that follows to
+	 * prove that no significant bits have been truncated */
+	uiNumOSPageSizeVirtPages = (IMG_UINT32) (((uiSize - 1) >> PAGE_SHIFT) + 1);
+	PVR_ASSERT(((PMR_SIZE_T) uiNumOSPageSizeVirtPages << PAGE_SHIFT) == uiSize);
+
+	uiNumDevPageSizeVirtPages = uiNumOSPageSizeVirtPages >> (uiLog2AllocPageSize - PAGE_SHIFT);
+
+	/* Allocate the struct to hold the metadata */
+	psPageArrayData = kmem_cache_alloc(g_psLinuxPageArray, GFP_KERNEL);
+	if (psPageArrayData == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: OS refused the memory allocation for the private data.",
+				 __func__));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e_freed_none;
+	}
+
+	/*
+	 * Allocate the page array
+	 *
+	 * We avoid tracking this memory because this structure might go into the page pool.
+	 * The OS can drain the pool asynchronously and when doing that we have to avoid
+	 * any potential deadlocks.
+	 *
+	 * In one scenario the process stats vmalloc hash table lock is held and then
+	 * the oom-killer softirq is trying to call _ScanObjectsInPagePool(), it must not
+	 * try to acquire the vmalloc hash table lock again.
+	 */
+	psPageArrayData->pagearray = OSAllocZMemNoStats(sizeof(struct page *) * uiNumDevPageSizeVirtPages);
+	if (psPageArrayData->pagearray == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e_free_kmem_cache;
+	}
+	else
+	{
+		if (bIsCMA)
+		{
+			/* Allocate additional DMA/CMA cpu kernel virtual address & device bus address array state */
+			psPageArrayData->dmavirtarray = OSAllocZMemNoStats(sizeof(void*) * uiNumDevPageSizeVirtPages);
+			if (psPageArrayData->dmavirtarray == NULL)
+			{
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto e_free_pagearray;
+			}
+
+			psPageArrayData->dmaphysarray = OSAllocZMemNoStats(sizeof(dma_addr_t) * uiNumDevPageSizeVirtPages);
+			if (psPageArrayData->dmaphysarray == NULL)
+			{
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto e_free_cpuvirtaddrarray;
+			}
+		}
+	}
+
+	if (psDevNode->psDevConfig && psDevNode->psDevConfig->pvOSDevice)
+	{
+		struct device *psDev = psDevNode->psDevConfig->pvOSDevice;
+		ui64DmaMask = *psDev->dma_mask;
+	}
+
+	/* Init metadata */
+	psPageArrayData->psDevNode = psDevNode;
+	psPageArrayData->uiPid = uiPid;
+	psPageArrayData->iNumOSPagesAllocated = 0;
+	psPageArrayData->uiTotalNumOSPages = uiNumOSPageSizeVirtPages;
+	psPageArrayData->uiLog2AllocPageSize = uiLog2AllocPageSize;
+	psPageArrayData->ui64DmaMask = ui64DmaMask;
+	psPageArrayData->bZero = bZero;
+	psPageArrayData->bIsCMA = bIsCMA;
+	psPageArrayData->bOnDemand = bOnDemand;
+	psPageArrayData->bUnpinned = IMG_FALSE;
+	psPageArrayData->bPoisonOnFree = bPoisonOnFree;
+	psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc;
+	psPageArrayData->ui32CPUCacheFlags = ui32CPUCacheFlags;
+
+	/* Indicate whether this is an allocation with default caching attribute (i.e cached) or not */
+	if (PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags) ||
+		PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags))
+	{
+		psPageArrayData->bUnsetMemoryType = IMG_TRUE;
+	}
+	else
+	{
+		psPageArrayData->bUnsetMemoryType = IMG_FALSE;
+	}
+
+	*ppsPageArrayDataPtr = psPageArrayData;
+	return PVRSRV_OK;
+
+/* Error path */
+e_free_cpuvirtaddrarray:
+	OSFreeMemNoStats(psPageArrayData->dmavirtarray);
+
+e_free_pagearray:
+	OSFreeMemNoStats(psPageArrayData->pagearray);
+
+e_free_kmem_cache:
+	kmem_cache_free(g_psLinuxPageArray, psPageArrayData);
+	PVR_DPF((PVR_DBG_ERROR,
+			 "%s: OS refused the memory allocation for the page pointer table. "
+			 "Did you ask for too much?",
+			 __func__));
+
+e_freed_none:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static inline void
+_ApplyCacheMaintenance(PVRSRV_DEVICE_NODE *psDevNode,
+					   struct page **ppsPage,
+					   IMG_UINT32 uiNumPages,
+					   IMG_BOOL bFlush)
+{
+	PVRSRV_ERROR eError = PVRSRV_ERROR_RETRY;
+	void * pvAddr;
+
+#if !defined(__arm__) && !defined(__arm64__) && !defined(__aarch64__)
+	if ((uiNumPages << PAGE_SHIFT) >= PVR_DIRTY_BYTES_FLUSH_THRESHOLD)
+	{
+		/* May fail so fallback to range-based flush */
+		eError = OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+	}
+#endif
+
+	if (eError != PVRSRV_OK)
+	{
+
+		if (OSCPUCacheOpAddressType() == PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+		{
+			pgprot_t pgprot = PAGE_KERNEL;
+
+			IMG_UINT32 uiNumToClean = uiNumPages;
+			struct page **ppsCleanArray = ppsPage;
+
+			/* Map and flush page.
+			 * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE
+			 * at a time. */
+			while (uiNumToClean != 0)
+			{
+				IMG_UINT32 uiToClean = MIN(PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES,
+				                           uiNumToClean);
+				IMG_CPU_PHYADDR sUnused =
+					{ IMG_CAST_TO_CPUPHYADDR_UINT(0xCAFEF00DDEADBEEFULL) };
+
+				pvAddr = vm_map_ram(ppsCleanArray, uiToClean, -1, pgprot);
+				if (!pvAddr)
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							"Unable to flush page cache for new allocation, skipping flush."));
+					return;
+				}
+
+				CacheOpExec(psDevNode,
+							pvAddr,
+							pvAddr + PAGE_SIZE,
+							sUnused,
+							sUnused,
+							PVRSRV_CACHE_OP_FLUSH);
+
+				vm_unmap_ram(pvAddr, uiToClean);
+
+				ppsCleanArray = &(ppsCleanArray[uiToClean]);
+				uiNumToClean -= uiToClean;
+			}
+		}
+		else
+		{
+			IMG_UINT32 ui32Idx;
+
+			for (ui32Idx = 0; ui32Idx < uiNumPages; ++ui32Idx)
+			{
+				IMG_CPU_PHYADDR sCPUPhysAddrStart, sCPUPhysAddrEnd;
+
+				pvAddr = kmap(ppsPage[ui32Idx]);
+				sCPUPhysAddrStart.uiAddr = page_to_phys(ppsPage[ui32Idx]);
+				sCPUPhysAddrEnd.uiAddr = sCPUPhysAddrStart.uiAddr + PAGE_SIZE;
+
+				/* If we're zeroing, we need to make sure the cleared memory is pushed out
+				 * of the cache before the cache lines are invalidated */
+				CacheOpExec(psDevNode,
+							pvAddr,
+							pvAddr + PAGE_SIZE,
+							sCPUPhysAddrStart,
+							sCPUPhysAddrEnd,
+							PVRSRV_CACHE_OP_FLUSH);
+
+				kunmap(ppsPage[ui32Idx]);
+			}
+		}
+
+	}
+}
+
+/* Change the caching attribute of pages on x86 systems and takes care of
+ * cache maintenance. This function is supposed to be called once for pages that
+ * came from alloc_pages(). It expects an array of OS page sized pages!
+ *
+ * Flush/Invalidate pages in case the allocation is not cached. Necessary to
+ * remove pages from the cache that might be flushed later and corrupt memory. */
+static inline PVRSRV_ERROR
+_ApplyOSPagesAttribute(PVRSRV_DEVICE_NODE *psDevNode,
+					   struct page **ppsPage,
+					   IMG_UINT32 uiNumPages,
+					   IMG_BOOL bFlush,
+					   IMG_UINT32 ui32CPUCacheFlags)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_BOOL bCPUCached = PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags);
+	IMG_BOOL bCPUUncached = PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags);
+	IMG_BOOL bCPUWriteCombine = PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags);
+
+	if (ppsPage != NULL && uiNumPages != 0)
+	{
+#if defined (CONFIG_X86)
+		/* On x86 we have to set page cache attributes for non-cached pages.
+		 * The call is implicitly taking care of all flushing/invalidating
+		 * and therefore we can skip the usual cache maintenance after this. */
+		if (bCPUUncached || bCPUWriteCombine)
+		{
+			/* On x86 if we already have a mapping (e.g. low memory) we need to change the mode of
+				current mapping before we map it ourselves	*/
+			int ret = IMG_FALSE;
+			PVR_UNREFERENCED_PARAMETER(bFlush);
+
+			switch (PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags))
+			{
+				case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+					ret = set_pages_array_uc(ppsPage, uiNumPages);
+					if (ret)
+					{
+						eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE;
+						PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to UC failed, returned %d", ret));
+					}
+					break;
+
+				case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+					ret = set_pages_array_wc(ppsPage, uiNumPages);
+					if (ret)
+					{
+						eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE;
+						PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to WC failed, returned %d", ret));
+					}
+					break;
+
+				case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+					break;
+
+				default:
+					break;
+			}
+		}
+		else
+#endif
+		{
+			if ( bFlush ||
+				 bCPUUncached || bCPUWriteCombine ||
+				 (bCPUCached && PVRSRV_CHECK_CPU_CACHE_CLEAN(ui32CPUCacheFlags)) )
+			{
+				/*  We can be given pages which still remain in the cache.
+					In order to make sure that the data we write through our mappings
+					doesn't get overwritten by later cache evictions we invalidate the
+					pages that are given to us.
+
+					Note:
+					This still seems to be true if we request cold pages, it's just less
+					likely to be in the cache. */
+				_ApplyCacheMaintenance(psDevNode,
+									   ppsPage,
+									   uiNumPages,
+									   bFlush);
+			}
+		}
+	}
+
+	return eError;
+}
+
+/* Same as _AllocOSPage except it uses DMA framework to perform allocation.
+ * uiPageIndex is expected to be the pagearray index where to store the higher order page. */
+static PVRSRV_ERROR
+_AllocOSPage_CMA(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+				gfp_t gfp_flags,
+				IMG_UINT32 ui32AllocOrder,
+				IMG_UINT32 ui32MinOrder,
+				IMG_UINT32 uiPageIndex)
+{
+	void *virt_addr;
+	struct page *page;
+	dma_addr_t bus_addr;
+	IMG_UINT32 uiAllocIsMisaligned;
+	size_t alloc_size = PAGE_SIZE << ui32AllocOrder;
+	struct device *dev = psPageArrayData->psDevNode->psDevConfig->pvOSDevice;
+	PVR_ASSERT(ui32AllocOrder == ui32MinOrder);
+
+	do
+	{
+		DisableOOMKiller();
+#if defined(CONFIG_L4) || defined(PVR_LINUX_PHYSMEM_SUPPRESS_DMA_AC)
+		virt_addr = NULL;
+#else
+		virt_addr = dma_alloc_coherent(dev, alloc_size, &bus_addr, gfp_flags);
+#endif
+		if (virt_addr == NULL)
+		{
+			/* The idea here is primarily to support some older kernels with
+			   broken or non-functioning DMA/CMA implementations (< Linux-3.4)
+			   and to also handle DMA/CMA allocation failures by attempting a
+			   normal page allocation though we expect dma_alloc_coherent()
+			   already attempts this internally also before failing but
+			   nonetheless it does no harm to retry the allocation ourselves */
+			page = alloc_pages(gfp_flags, ui32AllocOrder);
+			if (page)
+			{
+				/* Taint bus_addr as alloc_page, needed when freeing;
+				   also acquire the low memory page address only, this
+				   prevents mapping possible high memory pages into
+				   kernel virtual address space which might exhaust
+				   the VMALLOC address space */
+				bus_addr = DMA_SET_ALLOCPG_ADDR(page_to_phys(page));
+				virt_addr = (void*)(uintptr_t) DMA_VADDR_NOT_IN_USE;
+			}
+			else
+			{
+				EnableOOMKiller();
+				return PVRSRV_ERROR_OUT_OF_MEMORY;
+			}
+		}
+		else
+		{
+#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
+			page = pfn_to_page(bus_addr >> PAGE_SHIFT);
+#else
+			/* Assumes bus address space is identical to physical address space */
+			page = phys_to_page(bus_addr);
+#endif
+		}
+		EnableOOMKiller();
+
+		/* Physical allocation alignment works/hidden behind the scene transparently,
+		   we do this here if the allocated buffer address does not meet its alignment
+		   requirement by over-allocating using the next power-2 order and reporting
+		   aligned-adjusted values back to meet the requested alignment constraint.
+		   Evidently we waste memory by doing this so should only do so if we do not
+		   initially meet the alignment constraint. */
+		uiAllocIsMisaligned = DMA_GET_ADDR(bus_addr) & ((PAGE_SIZE<<ui32MinOrder)-1);
+		if (uiAllocIsMisaligned || ui32AllocOrder > ui32MinOrder)
+		{
+			IMG_BOOL bUsedAllocPages = DMA_IS_ALLOCPG_ADDR(bus_addr);
+			if (ui32AllocOrder == ui32MinOrder)
+			{
+				if (bUsedAllocPages)
+				{
+					__free_pages(page, ui32AllocOrder);
+				}
+				else
+				{
+					dma_free_coherent(dev, alloc_size, virt_addr, bus_addr);
+				}
+
+				ui32AllocOrder = ui32AllocOrder + 1;
+				alloc_size = PAGE_SIZE << ui32AllocOrder;
+
+				PVR_ASSERT(uiAllocIsMisaligned != 0);
+			}
+			else
+			{
+				size_t align_adjust = PAGE_SIZE << ui32MinOrder;
+
+				/* Adjust virtual/bus addresses to meet alignment */
+				bus_addr = bUsedAllocPages ? page_to_phys(page) : bus_addr;
+				align_adjust = PVR_ALIGN((size_t)bus_addr, align_adjust);
+				align_adjust -= (size_t)bus_addr;
+
+				if (align_adjust)
+				{
+					if (bUsedAllocPages)
+					{
+						page += align_adjust >> PAGE_SHIFT;
+						bus_addr = DMA_SET_ALLOCPG_ADDR(page_to_phys(page));
+						virt_addr = (void*)(uintptr_t) DMA_VADDR_NOT_IN_USE;
+					}
+					else
+					{
+						bus_addr += align_adjust;
+						virt_addr += align_adjust;
+#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
+						page = pfn_to_page(bus_addr >> PAGE_SHIFT);
+#else
+						/* Assumes bus address space is identical to physical address space */
+						page = phys_to_page(bus_addr);
+#endif
+					}
+
+					/* Store adjustments in PAGE_SIZE counts */
+					align_adjust = align_adjust >> PAGE_SHIFT;
+					bus_addr = DMA_SET_ALIGN_ADJUSTMENT(bus_addr, align_adjust);
+				}
+
+				/* Taint bus_addr due to over-allocation, allows us to free
+				 * memory correctly */
+				bus_addr = DMA_SET_ADJUSTED_ADDR(bus_addr);
+				uiAllocIsMisaligned = 0;
+			}
+		}
+	} while (uiAllocIsMisaligned);
+
+	/* Convert OSPageSize-based index into DevicePageSize-based index */
+	psPageArrayData->dmavirtarray[uiPageIndex] = virt_addr;
+	psPageArrayData->dmaphysarray[uiPageIndex] = bus_addr;
+	psPageArrayData->pagearray[uiPageIndex] = page;
+
+	return PVRSRV_OK;
+}
+
+/* Allocate a page of order uiAllocOrder and stores it in the page array ppsPage at
+ * position uiPageIndex.
+ *
+ * If the order is higher than 0, it splits the page into multiples and
+ * stores them at position uiPageIndex to uiPageIndex+(1<<uiAllocOrder).
+ *
+ * This function is supposed to be used for uiMinOrder == 0 only! */
+static PVRSRV_ERROR
+_AllocOSPage(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+			gfp_t gfp_flags,
+			IMG_UINT32 uiAllocOrder,
+			IMG_UINT32 uiMinOrder,
+			IMG_UINT32 uiPageIndex)
+{
+	struct page *psPage;
+	IMG_UINT32 ui32Count;
+
+	/* Sanity check. If it fails we write into the wrong places in the array. */
+	PVR_ASSERT(uiMinOrder == 0);
+
+	/* Allocate the page */
+	DisableOOMKiller();
+	psPage = alloc_pages(gfp_flags, uiAllocOrder);
+	EnableOOMKiller();
+
+	if (psPage == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+	/* In case we need to, split the higher order page;
+	   this should only be used for order-0 allocations
+	   as higher order allocations should use DMA/CMA */
+	if (uiAllocOrder != 0)
+	{
+		split_page(psPage, uiAllocOrder);
+	}
+#endif
+
+	/* Store the page (or multiple split pages) in the page array */
+	for (ui32Count = 0; ui32Count < (1 << uiAllocOrder); ui32Count++)
+	{
+		psPageArrayData->pagearray[uiPageIndex + ui32Count] = &(psPage[ui32Count]);
+	}
+
+	return PVRSRV_OK;
+}
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+
+static inline void _AddMemAllocRecord_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+                                               struct page *psPage)
+{
+	IMG_CPU_PHYADDR sCPUPhysAddr = { page_to_phys(psPage) };
+	PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
+	                             NULL, sCPUPhysAddr,
+	                             1 << psPageArrayData->uiLog2AllocPageSize,
+	                             NULL, psPageArrayData->uiPid);
+}
+
+static inline void _RemoveMemAllocRecord_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+                                                  struct page *psPage)
+{
+	PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
+	                                (IMG_UINT64) page_to_phys(psPage),
+	                                psPageArrayData->uiPid);
+}
+
+#else /* defined(PVRSRV_ENABLE_MEMORY_STATS) */
+
+static inline void _IncrMemAllocStat_UmaPages(size_t uiSize, IMG_PID uiPid)
+{
+	PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
+	                            uiSize, uiPid);
+}
+
+static inline void _DecrMemAllocStat_UmaPages(size_t uiSize, IMG_PID uiPid)
+{
+	PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
+	                            uiSize, uiPid);
+}
+
+#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */
+#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */
+
+/* Allocation of OS pages: We may allocate 2^N order pages at a time for two reasons.
+ *
+ * Firstly to support device pages which are larger than OS. By asking the OS for 2^N
+ * order OS pages at a time we guarantee the device page is contiguous.
+ *
+ * Secondly for performance where we may ask for 2^N order pages to reduce the number
+ * of calls to alloc_pages, and thus reduce time for huge allocations.
+ *
+ * Regardless of page order requested, we need to break them down to track _OS pages.
+ * The maximum order requested is increased if all max order allocations were successful.
+ * If any request fails we reduce the max order.
+ */
+static PVRSRV_ERROR
+_AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 uiArrayIndex = 0;
+	IMG_UINT32 ui32Order;
+	IMG_UINT32 ui32MinOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+	IMG_BOOL bIncreaseMaxOrder = IMG_TRUE;
+
+	IMG_UINT32 ui32NumPageReq;
+	IMG_UINT32 uiPagesToAlloc;
+	IMG_UINT32 uiPagesFromPool = 0;
+
+	gfp_t gfp_flags = _GetGFPFlags(ui32MinOrder ? psPageArrayData->bZero : IMG_FALSE, /* Zero all pages later as batch */
+	                                      psPageArrayData->psDevNode);
+	gfp_t ui32GfpFlags;
+	gfp_t ui32HighOrderGfpFlags = ((gfp_flags & ~__GFP_RECLAIM) | __GFP_NORETRY);
+
+	struct page **ppsPageArray = psPageArrayData->pagearray;
+	struct page **ppsPageAttributeArray = NULL;
+
+	uiPagesToAlloc = psPageArrayData->uiTotalNumOSPages;
+
+	/* Try to get pages from the pool since it is faster;
+	   the page pool currently only supports zero-order pages
+	   thus currently excludes all DMA/CMA allocated memory */
+	_GetPagesFromPoolLocked(psPageArrayData->psDevNode,
+							psPageArrayData->ui32CPUCacheFlags,
+							uiPagesToAlloc,
+							ui32MinOrder,
+							psPageArrayData->bZero,
+							ppsPageArray,
+							&uiPagesFromPool);
+
+	uiArrayIndex = uiPagesFromPool;
+
+	if ((uiPagesToAlloc - uiPagesFromPool) < PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD)
+	{	/* Small allocations: ask for one device page at a time */
+		ui32Order = ui32MinOrder;
+		bIncreaseMaxOrder = IMG_FALSE;
+	}
+	else
+	{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+		/* Large zero-order or none zero-order allocations, ask for
+		   MAX(max-order,min-order) order pages at a time; alloc
+		   failures throttles this down to ZeroOrder allocations */
+		ui32Order = MAX(g_uiMaxOrder, ui32MinOrder);
+#else
+		/* Because split_pages() is not available on older kernels
+		   we cannot mix-and-match any-order pages in the PMR;
+		   only same-order pages must be present in page array.
+		   So we unconditionally force it to use ui32MinOrder on
+		   these older kernels */
+		ui32Order = ui32MinOrder;
+#if defined(DEBUG)
+		if (! psPageArrayData->bIsCMA)
+		{
+			/* Sanity check that this is zero */
+			PVR_ASSERT(! ui32Order);
+		}
+#endif
+#endif
+	}
+
+	/* Only if asking for more contiguity than we actually need, let it fail */
+	ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
+	ui32NumPageReq = (1 << ui32Order);
+
+	while (uiArrayIndex < uiPagesToAlloc)
+	{
+		IMG_UINT32 ui32PageRemain = uiPagesToAlloc - uiArrayIndex;
+
+		while (ui32NumPageReq > ui32PageRemain)
+		{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+			/* Pages to request is larger than that remaining
+			   so ask for less so never over allocate */
+			ui32Order = MAX(ui32Order >> 1,ui32MinOrder);
+#else
+			/* Pages to request is larger than that remaining so
+			   do nothing thus over allocate as we do not support
+			   mix/match of any-order pages in PMR page-array in
+			   older kernels (simplifies page free logic) */
+			PVR_ASSERT(ui32Order == ui32MinOrder);
+#endif
+			ui32NumPageReq = (1 << ui32Order);
+			ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
+		}
+
+		if (psPageArrayData->bIsCMA)
+		{
+			/* As the DMA/CMA framework rounds-up request to the
+			   next power-of-two, we request multiple uiMinOrder
+			   pages to satisfy allocation request in order to
+			   minimise wasting memory */
+			eError = _AllocOSPage_CMA(psPageArrayData,
+									  ui32GfpFlags,
+									  ui32Order,
+									  ui32MinOrder,
+									  uiArrayIndex >> ui32MinOrder);
+		}
+		else
+		{
+			/* Allocate uiOrder pages at uiArrayIndex */
+			eError = _AllocOSPage(psPageArrayData,
+								  ui32GfpFlags,
+								  ui32Order,
+								  ui32MinOrder,
+								  uiArrayIndex);
+		}
+
+		if (eError == PVRSRV_OK)
+		{
+			/* Successful request. Move onto next. */
+			uiArrayIndex += ui32NumPageReq;
+		}
+		else
+		{
+			if (ui32Order > ui32MinOrder)
+			{
+				/* Last request failed. Let's ask for less next time */
+				ui32Order = MAX(ui32Order >> 1,ui32MinOrder);
+				bIncreaseMaxOrder = IMG_FALSE;
+				ui32NumPageReq = (1 << ui32Order);
+				ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
+				g_uiMaxOrder = ui32Order;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0))
+				/* We should not trigger this code path in older kernels,
+				   this is enforced by ensuring ui32Order == ui32MinOrder */
+				PVR_ASSERT(ui32Order == ui32MinOrder);
+#endif
+			}
+			else
+			{
+				/* Failed to alloc pages at required contiguity. Failed allocation */
+				PVR_DPF((PVR_DBG_ERROR, "%s: %s failed to honour request at %u of %u, flags = %x, order = %u (%s)",
+								__func__,
+								psPageArrayData->bIsCMA ? "dma_alloc_coherent" : "alloc_pages",
+								uiArrayIndex,
+								uiPagesToAlloc,
+								ui32GfpFlags,
+								ui32Order,
+								PVRSRVGetErrorString(eError)));
+				eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+				goto e_free_pages;
+			}
+		}
+	}
+
+	if (bIncreaseMaxOrder && (g_uiMaxOrder < PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM))
+	{	/* All successful allocations on max order. Let's ask for more next time */
+		g_uiMaxOrder++;
+	}
+
+	/* Construct table of page pointers to apply attributes */
+	ppsPageAttributeArray = &ppsPageArray[uiPagesFromPool];
+	if (psPageArrayData->bIsCMA)
+	{
+		IMG_UINT32 uiIdx, uiIdy, uiIdz;
+
+		ppsPageAttributeArray = OSAllocMem(sizeof(struct page *) * uiPagesToAlloc);
+		if (ppsPageAttributeArray == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed OSAllocMem() for page attributes table"));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e_free_pages;
+		}
+
+		for (uiIdx = 0; uiIdx < uiPagesToAlloc; uiIdx += ui32NumPageReq)
+		{
+			uiIdy = uiIdx >> ui32Order;
+			for (uiIdz = 0; uiIdz < ui32NumPageReq; uiIdz++)
+			{
+				ppsPageAttributeArray[uiIdx+uiIdz] = psPageArrayData->pagearray[uiIdy];
+				ppsPageAttributeArray[uiIdx+uiIdz] += uiIdz;
+			}
+		}
+	}
+
+	if (psPageArrayData->bZero && ui32MinOrder == 0)
+	{
+		eError = _ZeroPageArray(uiPagesToAlloc - uiPagesFromPool,
+					   ppsPageAttributeArray,
+					   PAGE_KERNEL);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to zero pages (fast)"));
+			goto e_free_pages;
+		}
+	}
+
+
+	/* Do the cache management as required */
+	eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode,
+									ppsPageAttributeArray,
+									uiPagesToAlloc - uiPagesFromPool,
+									psPageArrayData->bZero,
+									psPageArrayData->ui32CPUCacheFlags);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes"));
+		goto e_free_pages;
+	}
+	else
+	{
+		if (psPageArrayData->bIsCMA)
+		{
+			OSFreeMem(ppsPageAttributeArray);
+		}
+	}
+
+	/* Update metadata */
+	psPageArrayData->iNumOSPagesAllocated = psPageArrayData->uiTotalNumOSPages;
+
+	{
+		IMG_UINT32 ui32NumPages =
+		        psPageArrayData->iNumOSPagesAllocated >> ui32MinOrder;
+		IMG_UINT32 i;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+		for (i = 0; i < ui32NumPages; i++)
+		{
+			_AddMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i]);
+		}
+#else
+		_IncrMemAllocStat_UmaPages(uiPagesToAlloc * PAGE_SIZE,
+		                           psPageArrayData->uiPid);
+#endif
+#endif
+
+		if (psPageArrayData->bPoisonOnAlloc)
+		{
+			for (i = 0; i < ui32NumPages; i++)
+			{
+				_PoisonDevicePage(psPageArrayData->psDevNode,
+				                  ppsPageArray[i],
+				                  ui32MinOrder,
+				                  psPageArrayData->ui32CPUCacheFlags,
+				                  PVRSRV_POISON_ON_ALLOC_VALUE);
+			}
+		}
+	}
+
+	return PVRSRV_OK;
+
+/* Error path */
+e_free_pages:
+	{
+		IMG_UINT32 ui32PageToFree;
+
+		if (psPageArrayData->bIsCMA)
+		{
+			IMG_UINT32 uiDevArrayIndex = uiArrayIndex >> ui32Order;
+			IMG_UINT32 uiDevPageSize = PAGE_SIZE << ui32Order;
+			PVR_ASSERT(ui32Order == ui32MinOrder);
+
+			if (ppsPageAttributeArray)
+			{
+				OSFreeMem(ppsPageAttributeArray);
+			}
+
+			for (ui32PageToFree = 0; ui32PageToFree < uiDevArrayIndex; ui32PageToFree++)
+			{
+				_FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+								uiDevPageSize,
+								ui32MinOrder,
+								psPageArrayData->dmavirtarray[ui32PageToFree],
+								psPageArrayData->dmaphysarray[ui32PageToFree],
+								ppsPageArray[ui32PageToFree]);
+				psPageArrayData->dmaphysarray[ui32PageToFree]= (dma_addr_t)0;
+				psPageArrayData->dmavirtarray[ui32PageToFree] = NULL;
+				ppsPageArray[ui32PageToFree] = NULL;
+			}
+		}
+		else
+		{
+			/* Free the pages we got from the pool */
+			for (ui32PageToFree = 0; ui32PageToFree < uiPagesFromPool; ui32PageToFree++)
+			{
+				_FreeOSPage(ui32MinOrder,
+							psPageArrayData->bUnsetMemoryType,
+							ppsPageArray[ui32PageToFree]);
+				ppsPageArray[ui32PageToFree] = NULL;
+			}
+
+			for (ui32PageToFree = uiPagesFromPool; ui32PageToFree < uiArrayIndex; ui32PageToFree++)
+			{
+				_FreeOSPage(ui32MinOrder, IMG_FALSE, ppsPageArray[ui32PageToFree]);
+				ppsPageArray[ui32PageToFree] = NULL;
+			}
+		}
+
+		return eError;
+	}
+}
+
+/* Allocation of OS pages: This function is used for sparse allocations.
+ *
+ * Sparse allocations provide only a proportion of sparse physical backing within the total
+ * virtual range. */
+static PVRSRV_ERROR
+_AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+					 IMG_UINT32 *puiAllocIndices,
+					 IMG_UINT32 uiPagesToAlloc)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 i;
+	struct page **ppsPageArray = psPageArrayData->pagearray;
+	IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+	IMG_UINT32 uiPagesFromPool = 0;
+	IMG_UINT32 uiNumOSPagesToAlloc = uiPagesToAlloc * (1 << uiOrder);
+	IMG_UINT32 uiTotalNumAllocPages = psPageArrayData->uiTotalNumOSPages >> uiOrder;
+	gfp_t ui32GfpFlags = _GetGFPFlags(uiOrder ? psPageArrayData->bZero :
+									  IMG_FALSE, /* Zero pages later as batch */
+									  psPageArrayData->psDevNode);
+
+	 /* We use this page array to receive pages from the pool and then reuse it afterwards to
+	  * store pages that need their cache attribute changed on x86*/
+	struct page **ppsTempPageArray;
+	IMG_UINT32 uiTempPageArrayIndex = 0;
+
+	/* Allocate the temporary page array that we need here to receive pages
+	 * from the pool and to store pages that need their caching attributes changed.
+	 * Allocate number of OS pages to be able to use the attribute function later. */
+	ppsTempPageArray = OSAllocMem(sizeof(struct page*) * uiNumOSPagesToAlloc);
+	if (ppsTempPageArray == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed metadata allocation", __func__));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e_exit;
+	}
+
+	/* Check the requested number of pages if they fit in the page array */
+	if (uiTotalNumAllocPages <
+			((psPageArrayData->iNumOSPagesAllocated >> uiOrder) + uiPagesToAlloc) )
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Trying to allocate more pages (Order %u) than this buffer can handle, "
+				 "Request + Allocated < Max! Request %u, Allocated %u, Max %u.",
+				 __func__,
+				 uiOrder,
+				 uiPagesToAlloc,
+				 psPageArrayData->iNumOSPagesAllocated >> uiOrder,
+				 uiTotalNumAllocPages));
+		eError = PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
+		goto e_free_temp_array;
+	}
+
+	/* Try to get pages from the pool since it is faster */
+	_GetPagesFromPoolLocked(psPageArrayData->psDevNode,
+							psPageArrayData->ui32CPUCacheFlags,
+							uiPagesToAlloc,
+							uiOrder,
+							psPageArrayData->bZero,
+							ppsTempPageArray,
+							&uiPagesFromPool);
+
+	/* Allocate pages from the OS or move the pages that we got from the pool
+	 * to the page array */
+	for (i = 0; i < uiPagesToAlloc; i++)
+	{
+		/* Check if the indices we are allocating are in range */
+		if (puiAllocIndices[i] >= uiTotalNumAllocPages)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Given alloc index %u at %u is larger than page array %u.",
+					 __func__,
+					 i,
+					 puiAllocIndices[i],
+					 uiTotalNumAllocPages));
+			eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+			goto e_free_pages;
+		}
+
+		/* Check if there is not already a page allocated at this position */
+		if (NULL != ppsPageArray[puiAllocIndices[i]])
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Mapping number %u at page array index %u already exists. "
+					 "Page struct %p",
+					 __func__,
+					 i,
+					 puiAllocIndices[i],
+					 ppsPageArray[puiAllocIndices[i]]));
+			eError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS;
+			goto e_free_pages;
+		}
+
+		/* Finally assign a page to the array.
+		 * Either from the pool or allocate a new one. */
+		if (uiPagesFromPool != 0)
+		{
+			uiPagesFromPool--;
+			ppsPageArray[puiAllocIndices[i]] = ppsTempPageArray[uiPagesFromPool];
+		}
+		else
+		{
+			if (psPageArrayData->bIsCMA)
+			{
+
+				/* As the DMA/CMA framework rounds-up request to the
+				   next power-of-two, we request multiple uiMinOrder
+				   pages to satisfy allocation request in order to
+				   minimise wasting memory */
+				eError = _AllocOSPage_CMA(psPageArrayData,
+										  ui32GfpFlags,
+										  uiOrder,
+										  uiOrder,
+										  puiAllocIndices[i]);
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "Failed to alloc CMA pages"));
+					goto e_free_pages;
+				}
+			}
+			else
+			{
+				DisableOOMKiller();
+				ppsPageArray[puiAllocIndices[i]] = alloc_pages(ui32GfpFlags, uiOrder);
+				EnableOOMKiller();
+			}
+
+			if (ppsPageArray[puiAllocIndices[i]] != NULL)
+			{
+				/* Reusing the temp page array if it has no pool pages anymore */
+
+				if (psPageArrayData->bIsCMA)
+				{
+					IMG_UINT32 idx;
+					struct page* psPageAddr;
+
+					psPageAddr = ppsPageArray[puiAllocIndices[i]];
+
+					for (idx = 0; idx < (1 << uiOrder); idx++)
+					{
+						ppsTempPageArray[uiTempPageArrayIndex + idx] = psPageAddr;
+						psPageAddr++;
+					}
+					uiTempPageArrayIndex += (1 << uiOrder);
+				}
+				else
+				{
+					ppsTempPageArray[uiTempPageArrayIndex] = ppsPageArray[puiAllocIndices[i]];
+					uiTempPageArrayIndex++;
+				}
+			}
+			else
+			{
+				/* Failed to alloc pages at required contiguity. Failed allocation */
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: alloc_pages failed to honour request at %u of %u, flags = %x, order = %u",
+						 __func__,
+						 i,
+						 uiPagesToAlloc,
+						 ui32GfpFlags,
+						 uiOrder));
+				eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+				goto e_free_pages;
+			}
+		}
+	}
+
+	if (psPageArrayData->bZero && uiOrder == 0)
+	{
+		eError = _ZeroPageArray(uiTempPageArrayIndex,
+		                        ppsTempPageArray,
+		                        PAGE_KERNEL);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to zero pages (sparse)"));
+			goto e_free_pages;
+		}
+	}
+
+	/* Do the cache management as required */
+	eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode,
+									ppsTempPageArray,
+									uiTempPageArrayIndex,
+									psPageArrayData->bZero,
+									psPageArrayData->ui32CPUCacheFlags);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes"));
+		goto e_free_pages;
+	}
+
+	/* Update metadata */
+	psPageArrayData->iNumOSPagesAllocated += uiNumOSPagesToAlloc;
+
+	/* Free temporary page array */
+	OSFreeMem(ppsTempPageArray);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	for (i = 0; i < uiPagesToAlloc; i++)
+	{
+		_AddMemAllocRecord_UmaPages(psPageArrayData,
+		                            ppsPageArray[puiAllocIndices[i]]);
+	}
+#else
+	_IncrMemAllocStat_UmaPages(uiNumOSPagesToAlloc * PAGE_SIZE,
+	                           psPageArrayData->uiPid);
+#endif
+#endif
+
+	if (psPageArrayData->bPoisonOnAlloc)
+	{
+		for (i = 0; i < uiPagesToAlloc; i++)
+		{
+			_PoisonDevicePage(psPageArrayData->psDevNode,
+			                  ppsPageArray[puiAllocIndices[i]],
+			                  uiOrder,
+			                  psPageArrayData->ui32CPUCacheFlags,
+			                  PVRSRV_POISON_ON_ALLOC_VALUE);
+		}
+	}
+
+	return PVRSRV_OK;
+
+/* Error path */
+e_free_pages:
+	{
+		IMG_UINT32 ui32PageToFree;
+
+		if (psPageArrayData->bIsCMA)
+		{
+			IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder;
+
+			for (ui32PageToFree = 0; ui32PageToFree < i; ui32PageToFree++)
+			{
+				_FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+								uiDevPageSize,
+								uiOrder,
+								psPageArrayData->dmavirtarray[puiAllocIndices[ui32PageToFree]],
+								psPageArrayData->dmaphysarray[puiAllocIndices[ui32PageToFree]],
+								ppsPageArray[puiAllocIndices[ui32PageToFree]]);
+				psPageArrayData->dmaphysarray[puiAllocIndices[ui32PageToFree]]= (dma_addr_t)0;
+				psPageArrayData->dmavirtarray[puiAllocIndices[ui32PageToFree]] = NULL;
+				ppsPageArray[puiAllocIndices[ui32PageToFree]] = NULL;
+			}
+		}
+		else
+		{
+			/* Free the pages we got from the pool */
+			for (ui32PageToFree = 0; ui32PageToFree < uiPagesFromPool; ui32PageToFree++)
+			{
+				_FreeOSPage(0,
+							psPageArrayData->bUnsetMemoryType,
+							ppsTempPageArray[ui32PageToFree]);
+			}
+
+			/* Free the pages we just allocated from the OS */
+			for (ui32PageToFree = uiPagesFromPool; ui32PageToFree < i; ui32PageToFree++)
+			{
+				_FreeOSPage(0,
+							IMG_FALSE,
+							ppsPageArray[puiAllocIndices[ui32PageToFree]]);
+			}
+
+			/* Reset all page array entries that have been set so far*/
+			for (ui32PageToFree = 0; ui32PageToFree < i; ui32PageToFree++)
+			{
+				ppsPageArray[puiAllocIndices[ui32PageToFree]] = NULL;
+			}
+		}
+	}
+
+e_free_temp_array:
+	OSFreeMem(ppsTempPageArray);
+
+e_exit:
+	return eError;
+}
+
+/* Allocate pages for a given page array.
+ *
+ * The executed allocation path depends whether an array with allocation
+ * indices has been passed or not */
+static PVRSRV_ERROR
+_AllocOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+			  IMG_UINT32 *puiAllocIndices,
+			  IMG_UINT32 uiPagesToAlloc)
+{
+	PVRSRV_ERROR eError;
+	struct page **ppsPageArray;
+
+	/* Sanity checks */
+	PVR_ASSERT(NULL != psPageArrayData);
+	if (psPageArrayData->bIsCMA)
+	{
+		PVR_ASSERT(psPageArrayData->dmaphysarray != NULL);
+		PVR_ASSERT(psPageArrayData->dmavirtarray != NULL);
+	}
+	PVR_ASSERT(psPageArrayData->pagearray != NULL);
+	PVR_ASSERT(0 <= psPageArrayData->iNumOSPagesAllocated);
+
+	ppsPageArray = psPageArrayData->pagearray;
+
+	/* Go the sparse alloc path if we have an array with alloc indices.*/
+	if (puiAllocIndices != NULL)
+	{
+		eError = _AllocOSPages_Sparse(psPageArrayData,
+									  puiAllocIndices,
+									  uiPagesToAlloc);
+	}
+	else
+	{
+		eError = _AllocOSPages_Fast(psPageArrayData);
+	}
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e_exit;
+	}
+
+	_DumpPageArray(ppsPageArray,
+	               psPageArrayData->uiTotalNumOSPages >>
+	               (psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT) );
+
+	PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: allocated OS memory for PMR @0x%p", psPageArrayData));
+	return PVRSRV_OK;
+
+e_exit:
+	return eError;
+}
+
+/* Same as _FreeOSPage except free memory using DMA framework */
+static INLINE void
+_FreeOSPage_CMA(struct device *dev,
+				size_t alloc_size,
+				IMG_UINT32 uiOrder,
+				void *virt_addr,
+				dma_addr_t dev_addr,
+				struct page *psPage)
+{
+	if (DMA_IS_ALLOCPG_ADDR(dev_addr))
+	{
+#if defined(CONFIG_X86)
+		void *pvPageVAddr = page_address(psPage);
+		if (pvPageVAddr)
+		{
+			int ret = set_memory_wb((unsigned long)pvPageVAddr, 1);
+			if (ret)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Failed to reset page attribute",
+						__func__));
+			}
+		}
+#endif
+
+		if (DMA_IS_ADDR_ADJUSTED(dev_addr))
+		{
+			psPage -= DMA_GET_ALIGN_ADJUSTMENT(dev_addr);
+			uiOrder += 1;
+		}
+
+		__free_pages(psPage, uiOrder);
+	}
+	else
+	{
+		if (DMA_IS_ADDR_ADJUSTED(dev_addr))
+		{
+			size_t align_adjust;
+
+			align_adjust = DMA_GET_ALIGN_ADJUSTMENT(dev_addr);
+			alloc_size = alloc_size << 1;
+
+			dev_addr = DMA_GET_ADDR(dev_addr);
+			dev_addr -= align_adjust << PAGE_SHIFT;
+			virt_addr -= align_adjust << PAGE_SHIFT;
+		}
+
+		dma_free_coherent(dev, alloc_size, virt_addr, DMA_GET_ADDR(dev_addr));
+	}
+}
+
+/* Free a single page back to the OS.
+ * Make sure the cache type is set back to the default value.
+ *
+ * Note:
+ * We must _only_ check bUnsetMemoryType in the case where we need to free
+ * the page back to the OS since we may have to revert the cache properties
+ * of the page to the default as given by the OS when it was allocated. */
+static void
+_FreeOSPage(IMG_UINT32 uiOrder,
+			IMG_BOOL bUnsetMemoryType,
+			struct page *psPage)
+{
+
+#if defined(CONFIG_X86)
+	void *pvPageVAddr;
+	pvPageVAddr = page_address(psPage);
+
+	if (pvPageVAddr && bUnsetMemoryType)
+	{
+		int ret;
+
+		ret = set_memory_wb((unsigned long)pvPageVAddr, 1);
+		if (ret)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attribute",
+					 __func__));
+		}
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(bUnsetMemoryType);
+#endif
+	__free_pages(psPage, uiOrder);
+}
+
+/* Free the struct holding the metadata */
+static PVRSRV_ERROR
+_FreeOSPagesArray(PMR_OSPAGEARRAY_DATA *psPageArrayData)
+{
+	PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: freed OS memory for PMR @0x%p", psPageArrayData));
+
+	/* Check if the page array actually still exists.
+	 * It might be the case that has been moved to the page pool */
+	if (psPageArrayData->pagearray != NULL)
+	{
+		OSFreeMemNoStats(psPageArrayData->pagearray);
+	}
+
+	kmem_cache_free(g_psLinuxPageArray, psPageArrayData);
+
+	return PVRSRV_OK;
+}
+
+/* Free all or some pages from a sparse page array */
+static PVRSRV_ERROR
+_FreeOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+					IMG_UINT32 *pai32FreeIndices,
+					IMG_UINT32 ui32FreePageCount)
+{
+	IMG_BOOL bSuccess;
+	IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+	IMG_UINT32 uiPageIndex, i, j, uiTempIdx = 0;
+	struct page **ppsPageArray = psPageArrayData->pagearray;
+	IMG_UINT32 uiNumPages;
+
+	struct page **ppsTempPageArray;
+	IMG_UINT32 uiTempArraySize;
+
+	/* We really should have something to free before we call this */
+	PVR_ASSERT(psPageArrayData->iNumOSPagesAllocated != 0);
+
+	if (pai32FreeIndices == NULL)
+	{
+		uiNumPages = psPageArrayData->uiTotalNumOSPages >> uiOrder;
+		uiTempArraySize = psPageArrayData->iNumOSPagesAllocated;
+	}
+	else
+	{
+		uiNumPages = ui32FreePageCount;
+		uiTempArraySize = ui32FreePageCount << uiOrder;
+	}
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	for (i = 0; i < uiNumPages; i++)
+	{
+		IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i;
+
+		if (NULL != ppsPageArray[idx])
+		{
+			_RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[idx]);
+		}
+	}
+#else
+	_DecrMemAllocStat_UmaPages(uiTempArraySize * PAGE_SIZE,
+	                           psPageArrayData->uiPid);
+#endif
+#endif
+
+	if (psPageArrayData->bPoisonOnFree)
+	{
+		for (i = 0; i < uiNumPages; i++)
+		{
+			IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i;
+
+			if (NULL != ppsPageArray[idx])
+			{
+				_PoisonDevicePage(psPageArrayData->psDevNode,
+				                  ppsPageArray[idx],
+				                  uiOrder,
+				                  psPageArrayData->ui32CPUCacheFlags,
+				                  PVRSRV_POISON_ON_FREE_VALUE);
+			}
+		}
+	}
+
+	if (psPageArrayData->bIsCMA)
+	{
+		IMG_UINT32 uiDevNumPages = uiNumPages;
+		IMG_UINT32 uiDevPageSize = 1<<psPageArrayData->uiLog2AllocPageSize;
+
+		for (i = 0; i < uiDevNumPages; i++)
+		{
+			IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i;
+			if (NULL != ppsPageArray[idx])
+			{
+				_FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+								uiDevPageSize,
+								uiOrder,
+								psPageArrayData->dmavirtarray[idx],
+								psPageArrayData->dmaphysarray[idx],
+								ppsPageArray[idx]);
+				psPageArrayData->dmaphysarray[idx] = (dma_addr_t)0;
+				psPageArrayData->dmavirtarray[idx] = NULL;
+				ppsPageArray[idx] = NULL;
+				uiTempIdx++;
+			}
+		}
+		uiTempIdx <<= uiOrder;
+	}
+	else
+	{
+
+		/* OSAllocMemNoStats required because this code may be run without the bridge lock held */
+		ppsTempPageArray = OSAllocMemNoStats(sizeof(struct page*) * uiTempArraySize);
+		if (ppsTempPageArray == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed free_pages metadata allocation", __func__));
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+
+		/* Put pages in a contiguous array so further processing is easier */
+		for (i = 0; i < uiNumPages; i++)
+		{
+			uiPageIndex = pai32FreeIndices ? pai32FreeIndices[i] : i;
+			if (NULL != ppsPageArray[uiPageIndex])
+			{
+				struct page *psPage = ppsPageArray[uiPageIndex];
+
+				for (j = 0; j < (1<<uiOrder); j++)
+				{
+					ppsTempPageArray[uiTempIdx] = psPage;
+					uiTempIdx++;
+					psPage++;
+				}
+
+				ppsPageArray[uiPageIndex] = NULL;
+			}
+		}
+
+		/* Try to move the temp page array to the pool */
+		bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags,
+										 ppsTempPageArray,
+										 psPageArrayData->bUnpinned,
+										 0,
+										 uiTempIdx);
+		if (bSuccess)
+		{
+			goto exit_ok;
+		}
+
+		/* Free pages and reset page caching attributes on x86 */
+#if defined(CONFIG_X86)
+		if (uiTempIdx != 0 && psPageArrayData->bUnsetMemoryType)
+		{
+			int iError;
+			iError = set_pages_array_wb(ppsTempPageArray, uiTempIdx);
+
+			if (iError)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __func__));
+			}
+		}
+#endif
+
+		/* Free the pages */
+		for (i = 0; i < uiTempIdx; i++)
+		{
+			__free_pages(ppsTempPageArray[i], 0);
+		}
+
+		/* Free the temp page array here if it did not move to the pool */
+		OSFreeMemNoStats(ppsTempPageArray);
+	}
+
+exit_ok:
+	/* Update metadata */
+	psPageArrayData->iNumOSPagesAllocated -= uiTempIdx;
+	PVR_ASSERT(0 <= psPageArrayData->iNumOSPagesAllocated);
+	return PVRSRV_OK;
+}
+
+/* Free all the pages in a page array */
+static PVRSRV_ERROR
+_FreeOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData)
+{
+	IMG_BOOL bSuccess;
+	IMG_UINT32 i;
+	IMG_UINT32 uiNumPages = psPageArrayData->uiTotalNumOSPages;
+	IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+	IMG_UINT32 uiDevNumPages = uiNumPages >> uiOrder;
+	IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder;
+	struct page **ppsPageArray = psPageArrayData->pagearray;
+
+	/* We really should have something to free before we call this */
+	PVR_ASSERT(psPageArrayData->iNumOSPagesAllocated != 0);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	for (i = 0; i < uiDevNumPages; i++)
+	{
+		_RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i]);
+	}
+#else
+	_DecrMemAllocStat_UmaPages(uiNumPages * PAGE_SIZE,
+	                           psPageArrayData->uiPid);
+#endif
+#endif
+
+	if (psPageArrayData->bPoisonOnFree)
+	{
+		for (i = 0; i < uiDevNumPages; i++)
+		{
+			_PoisonDevicePage(psPageArrayData->psDevNode,
+			                  ppsPageArray[i],
+			                  uiOrder,
+			                  psPageArrayData->ui32CPUCacheFlags,
+			                  PVRSRV_POISON_ON_FREE_VALUE);
+		}
+	}
+
+	/* Try to move the page array to the pool */
+	bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags,
+									 ppsPageArray,
+									 psPageArrayData->bUnpinned,
+									 uiOrder,
+									 uiNumPages);
+	if (bSuccess)
+	{
+		psPageArrayData->pagearray = NULL;
+		goto exit_ok;
+	}
+
+	if (psPageArrayData->bIsCMA)
+	{
+		for (i = 0; i < uiDevNumPages; i++)
+		{
+			_FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+							uiDevPageSize,
+							uiOrder,
+							psPageArrayData->dmavirtarray[i],
+							psPageArrayData->dmaphysarray[i],
+							ppsPageArray[i]);
+			psPageArrayData->dmaphysarray[i] = (dma_addr_t)0;
+			psPageArrayData->dmavirtarray[i] = NULL;
+			ppsPageArray[i] = NULL;
+		}
+	}
+	else
+	{
+#if defined(CONFIG_X86)
+		if (psPageArrayData->bUnsetMemoryType)
+		{
+			int ret;
+
+			ret = set_pages_array_wb(ppsPageArray, uiNumPages);
+			if (ret)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes",
+						 __func__));
+			}
+		}
+#endif
+
+		for (i = 0; i < uiNumPages; i++)
+		{
+			_FreeOSPage(uiOrder, IMG_FALSE, ppsPageArray[i]);
+			ppsPageArray[i] = NULL;
+		}
+	}
+
+exit_ok:
+	/* Update metadata */
+	psPageArrayData->iNumOSPagesAllocated = 0;
+	return PVRSRV_OK;
+}
+
+/* Free pages from a page array.
+ * Takes care of mem stats and chooses correct free path depending on parameters. */
+static PVRSRV_ERROR
+_FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+			 IMG_UINT32 *pai32FreeIndices,
+			 IMG_UINT32 ui32FreePageCount)
+{
+	PVRSRV_ERROR eError;
+
+	/* Go the sparse or non-sparse path */
+	if (psPageArrayData->iNumOSPagesAllocated != psPageArrayData->uiTotalNumOSPages
+		|| pai32FreeIndices != NULL)
+	{
+		eError = _FreeOSPages_Sparse(psPageArrayData,
+									 pai32FreeIndices,
+									 ui32FreePageCount);
+	}
+	else
+	{
+		eError = _FreeOSPages_Fast(psPageArrayData);
+	}
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_FreeOSPages_FreePages failed"));
+	}
+
+	_DumpPageArray(psPageArrayData->pagearray,
+	               psPageArrayData->uiTotalNumOSPages >>
+	              (psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT) );
+
+	return eError;
+}
+
+/*
+ *
+ * Implementation of callback functions
+ *
+ */
+
+/* Destructor func is called after last reference disappears, but
+ * before PMR itself is freed. */
+static PVRSRV_ERROR
+PMRFinalizeOSMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PVRSRV_ERROR eError;
+	PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+
+
+	/* We can't free pages until now. */
+	if (psOSPageArrayData->iNumOSPagesAllocated != 0)
+	{
+		_PagePoolLock();
+		if (psOSPageArrayData->bUnpinned)
+		{
+			_RemoveUnpinListEntryUnlocked(psOSPageArrayData);
+		}
+		_PagePoolUnlock();
+
+		eError = _FreeOSPages(psOSPageArrayData,
+							  NULL,
+							  0);
+		PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+	}
+
+	eError = _FreeOSPagesArray(psOSPageArrayData);
+	PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+	return PVRSRV_OK;
+}
+
+/* Callback function for locking the system physical page addresses.
+ * This function must be called before the lookup address func. */
+static PVRSRV_ERROR
+PMRLockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PVRSRV_ERROR eError;
+	PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+
+	if (psOSPageArrayData->bOnDemand)
+	{
+		/* Allocate Memory for deferred allocation */
+		eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumOSPages);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	eError = PVRSRV_OK;
+	return eError;
+}
+
+static PVRSRV_ERROR
+PMRUnlockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+	/* Just drops the refcount. */
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+
+	if (psOSPageArrayData->bOnDemand)
+	{
+		/* Free Memory for deferred allocation */
+		eError = _FreeOSPages(psOSPageArrayData,
+							  NULL,
+							  0);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	PVR_ASSERT (eError == PVRSRV_OK);
+	return eError;
+}
+
+/* N.B. It is assumed that PMRLockSysPhysAddressesOSMem() is called _before_ this function! */
+static PVRSRV_ERROR
+PMRSysPhysAddrOSMem(PMR_IMPL_PRIVDATA pvPriv,
+					IMG_UINT32 ui32Log2PageSize,
+					IMG_UINT32 ui32NumOfPages,
+					IMG_DEVMEM_OFFSET_T *puiOffset,
+					IMG_BOOL *pbValid,
+					IMG_DEV_PHYADDR *psDevPAddr)
+{
+	const PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+	IMG_UINT32 uiPageSize = 1U << psOSPageArrayData->uiLog2AllocPageSize;
+	IMG_UINT32 uiInPageOffset;
+	IMG_UINT32 uiPageIndex;
+	IMG_UINT32 uiIdx;
+
+	if (psOSPageArrayData->uiLog2AllocPageSize < ui32Log2PageSize)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Requested physical addresses from PMR "
+		         "for incompatible contiguity %u!",
+		         __func__,
+		         ui32Log2PageSize));
+		return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+	}
+
+	for (uiIdx=0; uiIdx < ui32NumOfPages; uiIdx++)
+	{
+		if (pbValid[uiIdx])
+		{
+			uiPageIndex = puiOffset[uiIdx] >> psOSPageArrayData->uiLog2AllocPageSize;
+			uiInPageOffset = puiOffset[uiIdx] - ((IMG_DEVMEM_OFFSET_T)uiPageIndex << psOSPageArrayData->uiLog2AllocPageSize);
+
+			PVR_ASSERT(uiPageIndex < psOSPageArrayData->uiTotalNumOSPages);
+			PVR_ASSERT(uiInPageOffset < uiPageSize);
+
+			psDevPAddr[uiIdx].uiAddr = page_to_phys(psOSPageArrayData->pagearray[uiPageIndex]);
+			psDevPAddr[uiIdx].uiAddr += uiInPageOffset;
+
+#if !defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY)
+			/* this is just a precaution, normally this should be always
+			 * available */
+			if (psOSPageArrayData->ui64DmaMask)
+			{
+				if (psDevPAddr[uiIdx].uiAddr > psOSPageArrayData->ui64DmaMask)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s: physical address"
+							" (%" IMG_UINT64_FMTSPECX ") out of allowable range"
+							" [0; %" IMG_UINT64_FMTSPECX "]", __func__,
+							psDevPAddr[uiIdx].uiAddr,
+							psOSPageArrayData->ui64DmaMask));
+					BUG();
+				}
+			}
+#endif
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+typedef struct _PMR_OSPAGEARRAY_KERNMAP_DATA_ {
+	void *pvBase;
+	IMG_UINT32 ui32PageCount;
+} PMR_OSPAGEARRAY_KERNMAP_DATA;
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv,
+								 size_t uiOffset,
+								 size_t uiSize,
+								 void **ppvKernelAddressOut,
+								 IMG_HANDLE *phHandleOut,
+								 PMR_FLAGS_T ulFlags)
+{
+	PVRSRV_ERROR eError;
+	PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+	void *pvAddress;
+	pgprot_t prot = PAGE_KERNEL;
+	IMG_UINT32 ui32PageOffset=0;
+	size_t uiMapOffset=0;
+	IMG_UINT32 ui32PageCount = 0;
+	IMG_UINT32 uiLog2AllocPageSize = psOSPageArrayData->uiLog2AllocPageSize;
+	IMG_UINT32 uiOSPageShift = OSGetPageShift();
+	IMG_UINT32 uiPageSizeDiff = 0;
+	struct page **pagearray;
+	PMR_OSPAGEARRAY_KERNMAP_DATA *psData;
+
+	/* For cases device page size greater than the OS page size,
+	 * multiple physically contiguous OS pages constitute one device page.
+	 * However only the first page address of such an ensemble is stored
+	 * as part of the mapping table in the driver. Hence when mapping the PMR
+	 * in part/full, all OS pages that constitute the device page
+	 * must also be mapped to kernel.
+	 *
+	 * For the case where device page size less than OS page size,
+	 * treat it the same way as the page sizes are equal */
+	if (uiLog2AllocPageSize > uiOSPageShift)
+	{
+		uiPageSizeDiff = uiLog2AllocPageSize - uiOSPageShift;
+	}
+
+	/*
+		Zero offset and size as a special meaning which means map in the
+		whole of the PMR, this is due to fact that the places that call
+		this callback might not have access to be able to determine the
+		physical size
+	*/
+	if ((uiOffset == 0) && (uiSize == 0))
+	{
+		ui32PageOffset = 0;
+		uiMapOffset = 0;
+		/* Page count = amount of OS pages */
+		ui32PageCount = psOSPageArrayData->iNumOSPagesAllocated;
+	}
+	else
+	{
+		size_t uiEndoffset;
+
+		ui32PageOffset = uiOffset >> uiLog2AllocPageSize;
+		uiMapOffset = uiOffset - (ui32PageOffset << uiLog2AllocPageSize);
+		uiEndoffset = uiOffset + uiSize - 1;
+		/* Add one as we want the count, not the offset */
+		/* Page count = amount of device pages (note uiLog2AllocPageSize being used) */
+		ui32PageCount = (uiEndoffset >> uiLog2AllocPageSize) + 1;
+		ui32PageCount -= ui32PageOffset;
+
+		/* The OS page count to be mapped might be different if the
+		 * OS page size is lesser than the device page size */
+		ui32PageCount <<= uiPageSizeDiff;
+	}
+
+	switch (PVRSRV_CPU_CACHE_MODE(psOSPageArrayData->ui32CPUCacheFlags))
+	{
+		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+				prot = pgprot_noncached(prot);
+				break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+				prot = pgprot_writecombine(prot);
+				break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+				break;
+
+		default:
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				goto e0;
+	}
+
+	if (uiPageSizeDiff)
+	{
+		/* Each device page can be broken down into ui32SubPageCount OS pages */
+		IMG_UINT32 ui32SubPageCount = 1 << uiPageSizeDiff;
+		IMG_UINT32 i;
+		struct page **psPage = &psOSPageArrayData->pagearray[ui32PageOffset];
+
+		/* Allocate enough memory for the OS page pointers for this mapping */
+		pagearray = OSAllocMem(ui32PageCount * sizeof(pagearray[0]));
+
+		if (pagearray == NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+
+		/* construct array that holds the page pointers that constitute the requested
+		 * mapping */
+		for (i = 0; i < ui32PageCount; i++)
+		{
+			IMG_UINT32 ui32OSPageArrayIndex  = i / ui32SubPageCount;
+			IMG_UINT32 ui32OSPageArrayOffset = i % ui32SubPageCount;
+
+			/*
+			 * The driver only stores OS page pointers for the first OS page
+			 * within each device page (psPage[ui32OSPageArrayIndex]).
+			 * Get the next OS page structure at device page granularity,
+			 * then calculate OS page pointers for all the other pages.
+			 */
+			pagearray[i] = psPage[ui32OSPageArrayIndex] + ui32OSPageArrayOffset;
+		}
+	}
+	else
+	{
+		pagearray = &psOSPageArrayData->pagearray[ui32PageOffset];
+	}
+
+	psData = OSAllocMem(sizeof(*psData));
+	if (psData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e1;
+	}
+
+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
+	pvAddress = vmap(pagearray, ui32PageCount, VM_READ | VM_WRITE, prot);
+#else
+	pvAddress = vm_map_ram(pagearray, ui32PageCount, -1, prot);
+#endif
+	if (pvAddress == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e2;
+	}
+
+	*ppvKernelAddressOut = pvAddress + uiMapOffset;
+	psData->pvBase = pvAddress;
+	psData->ui32PageCount = ui32PageCount;
+	*phHandleOut = psData;
+
+	if (uiPageSizeDiff)
+	{
+		OSFreeMem(pagearray);
+	}
+
+	return PVRSRV_OK;
+
+	/*
+	  error exit paths follow
+	*/
+ e2:
+	OSFreeMem(psData);
+ e1:
+	if (uiPageSizeDiff)
+	{
+		OSFreeMem(pagearray);
+	}
+ e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static void PMRReleaseKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv,
+											 IMG_HANDLE hHandle)
+{
+	PMR_OSPAGEARRAY_KERNMAP_DATA *psData = hHandle;
+	PVR_UNREFERENCED_PARAMETER(pvPriv);
+
+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
+	vunmap(psData->pvBase);
+#else
+	vm_unmap_ram(psData->pvBase, psData->ui32PageCount);
+#endif
+	OSFreeMem(psData);
+}
+
+static
+PVRSRV_ERROR PMRUnpinOSMem(PMR_IMPL_PRIVDATA pPriv)
+{
+	PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* Lock down the pool and add the array to the unpin list */
+	_PagePoolLock();
+
+	/* Sanity check */
+	PVR_ASSERT(psOSPageArrayData->bUnpinned == IMG_FALSE);
+	PVR_ASSERT(psOSPageArrayData->bOnDemand == IMG_FALSE);
+
+	eError = _AddUnpinListEntryUnlocked(psOSPageArrayData);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Unable to add allocation to unpinned list (%d).",
+		         __func__,
+		         eError));
+
+		goto e_exit;
+	}
+
+	psOSPageArrayData->bUnpinned = IMG_TRUE;
+
+e_exit:
+	_PagePoolUnlock();
+	return eError;
+}
+
+static
+PVRSRV_ERROR PMRPinOSMem(PMR_IMPL_PRIVDATA pPriv,
+						PMR_MAPPING_TABLE *psMappingTable)
+{
+	PVRSRV_ERROR eError;
+	PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv;
+	IMG_UINT32  *pui32MapTable = NULL;
+	IMG_UINT32 i,j=0, ui32Temp=0;
+
+	_PagePoolLock();
+
+	/* Sanity check */
+	PVR_ASSERT(psOSPageArrayData->bUnpinned);
+
+	psOSPageArrayData->bUnpinned = IMG_FALSE;
+
+	/* If there are still pages in the array remove entries from the pool */
+	if (psOSPageArrayData->iNumOSPagesAllocated != 0)
+	{
+		_RemoveUnpinListEntryUnlocked(psOSPageArrayData);
+		_PagePoolUnlock();
+
+		eError = PVRSRV_OK;
+		goto e_exit_mapalloc_failure;
+	}
+	_PagePoolUnlock();
+
+	/* If pages were reclaimed we allocate new ones and
+	 * return PVRSRV_ERROR_PMR_NEW_MEMORY */
+	if (psMappingTable->ui32NumVirtChunks == 1)
+	{
+		eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumOSPages);
+	}
+	else
+	{
+		pui32MapTable = (IMG_UINT32 *)OSAllocMem(sizeof(*pui32MapTable) * psMappingTable->ui32NumPhysChunks);
+		if (NULL == pui32MapTable)
+		{
+			eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Unable to Alloc Map Table.",
+					 __func__));
+			goto e_exit_mapalloc_failure;
+		}
+
+		for (i = 0,j=0; i < psMappingTable->ui32NumVirtChunks; i++)
+		{
+			ui32Temp = psMappingTable->aui32Translation[i];
+			if (TRANSLATION_INVALID != ui32Temp)
+			{
+				pui32MapTable[j++] = ui32Temp;
+			}
+		}
+		eError = _AllocOSPages(psOSPageArrayData, pui32MapTable, psMappingTable->ui32NumPhysChunks);
+	}
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Unable to get new pages for unpinned allocation.",
+				 __func__));
+
+		eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+		goto e_exit;
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE,
+			 "%s: Allocating new pages for unpinned allocation. "
+			 "Old content is lost!",
+			 __func__));
+
+	eError = PVRSRV_ERROR_PMR_NEW_MEMORY;
+
+e_exit:
+	OSFreeMem(pui32MapTable);
+e_exit_mapalloc_failure:
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       PMRChangeSparseMemOSMem
+@Description    This function Changes the sparse mapping by allocating & freeing
+				of pages. It does also change the GPU and CPU maps accordingly
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv,
+						const PMR *psPMR,
+						IMG_UINT32 ui32AllocPageCount,
+						IMG_UINT32 *pai32AllocIndices,
+						IMG_UINT32 ui32FreePageCount,
+						IMG_UINT32 *pai32FreeIndices,
+						IMG_UINT32 uiFlags)
+{
+	PVRSRV_ERROR eError;
+
+	PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappigTable(psPMR);
+	PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv;
+	struct page **psPageArray = psPMRPageArrayData->pagearray;
+	void **psDMAVirtArray = psPMRPageArrayData->dmavirtarray;
+	dma_addr_t *psDMAPhysArray = psPMRPageArrayData->dmaphysarray;
+
+	struct page *psPage;
+	dma_addr_t psDMAPAddr;
+	void *pvDMAVAddr;
+
+	IMG_UINT32 ui32AdtnlAllocPages = 0; /*<! Number of pages to alloc from the OS */
+	IMG_UINT32 ui32AdtnlFreePages = 0; /*<! Number of pages to free back to the OS */
+	IMG_UINT32 ui32CommonRequestCount = 0; /*<! Number of pages to move position in the page array */
+	IMG_UINT32 ui32Loop = 0;
+	IMG_UINT32 ui32Index = 0;
+	IMG_UINT32 uiAllocpgidx;
+	IMG_UINT32 uiFreepgidx;
+	IMG_UINT32 uiOrder = psPMRPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+	IMG_BOOL bCMA = psPMRPageArrayData->bIsCMA;
+
+
+	/* Check SPARSE flags and calculate pages to allocate and free */
+	if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH))
+	{
+		ui32CommonRequestCount = (ui32AllocPageCount > ui32FreePageCount) ?
+				ui32FreePageCount : ui32AllocPageCount;
+
+		PDUMP_PANIC(SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported");
+	}
+
+	if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC))
+	{
+		ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequestCount;
+	}
+	else
+	{
+		ui32AllocPageCount = 0;
+	}
+
+	if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE))
+	{
+		ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequestCount;
+	}
+	else
+	{
+		ui32FreePageCount = 0;
+	}
+
+	if (0 == (ui32CommonRequestCount || ui32AdtnlAllocPages || ui32AdtnlFreePages))
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Missing parameters for number of pages to alloc/free",
+		         __func__));
+		return eError;
+	}
+
+	/* The incoming request is classified into two operations independent of
+	 * each other: alloc & free pages.
+	 * These operations can be combined with two mapping operations as well
+	 * which are GPU & CPU space mappings.
+	 *
+	 * From the alloc and free page requests, the net amount of pages to be
+	 * allocated or freed is computed. Pages that were requested to be freed
+	 * will be reused to fulfil alloc requests.
+	 *
+	 * The order of operations is:
+	 * 1. Allocate new pages from the OS
+	 * 2. Move the free pages from free request to alloc positions.
+	 * 3. Free the rest of the pages not used for alloc
+	 *
+	 * Alloc parameters are validated at the time of allocation
+	 * and any error will be handled then. */
+
+	/* Validate the free indices */
+	if (ui32FreePageCount)
+	{
+		if (NULL != pai32FreeIndices){
+
+			for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++)
+			{
+				uiFreepgidx = pai32FreeIndices[ui32Loop];
+
+				if (uiFreepgidx > (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder))
+				{
+					eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+					goto e0;
+				}
+
+				if (NULL == psPageArray[uiFreepgidx])
+				{
+					eError = PVRSRV_ERROR_INVALID_PARAMS;
+					PVR_DPF((PVR_DBG_ERROR,
+					         "%s: Trying to free non-allocated page",
+					         __func__));
+					goto e0;
+				}
+			}
+		}
+		else
+		{
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: Given non-zero free count but missing indices array",
+			         __func__));
+			return eError;
+		}
+	}
+
+	/* Validate the alloc indices */
+	for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++)
+	{
+		uiAllocpgidx = pai32AllocIndices[ui32Loop];
+
+		if (uiAllocpgidx > (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder))
+		{
+			eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+			goto e0;
+		}
+
+		if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+		{
+			if ((NULL != psPageArray[uiAllocpgidx]) ||
+			    (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx]))
+			{
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				PVR_DPF((PVR_DBG_ERROR,
+				         "%s: Trying to allocate already allocated page again",
+				         __func__));
+				goto e0;
+			}
+		}
+		else
+		{
+			if ((NULL == psPageArray[uiAllocpgidx]) ||
+			    (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx]) )
+			{
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				PVR_DPF((PVR_DBG_ERROR,
+				         "%s: Unable to remap memory due to missing page",
+				         __func__));
+				goto e0;
+			}
+		}
+	}
+
+	ui32Loop = 0;
+
+	/* Allocate new pages from the OS */
+	if (0 != ui32AdtnlAllocPages)
+	{
+			eError = _AllocOSPages(psPMRPageArrayData, pai32AllocIndices, ui32AdtnlAllocPages);
+			if (PVRSRV_OK != eError)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE,
+				         "%s: New Addtl Allocation of pages failed",
+				         __func__));
+				goto e0;
+			}
+
+			psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages;
+			/*Mark the corresponding pages of translation table as valid */
+			for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++)
+			{
+				psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop];
+			}
+	}
+
+
+	ui32Index = ui32Loop;
+
+	/* Move the corresponding free pages to alloc request */
+	for (ui32Loop = 0; ui32Loop < ui32CommonRequestCount; ui32Loop++, ui32Index++)
+	{
+		uiAllocpgidx = pai32AllocIndices[ui32Index];
+		uiFreepgidx  = pai32FreeIndices[ui32Loop];
+
+		psPage = psPageArray[uiAllocpgidx];
+		psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx];
+
+		if (bCMA)
+		{
+			pvDMAVAddr = psDMAVirtArray[uiAllocpgidx];
+			psDMAPAddr = psDMAPhysArray[uiAllocpgidx];
+			psDMAVirtArray[uiAllocpgidx] = psDMAVirtArray[uiFreepgidx];
+			psDMAPhysArray[uiAllocpgidx] = psDMAPhysArray[uiFreepgidx];
+		}
+
+		/* Is remap mem used in real world scenario? Should it be turned to a
+		 *  debug feature? The condition check needs to be out of loop, will be
+		 *  done at later point though after some analysis */
+		if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+		{
+			psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID;
+			psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+			psPageArray[uiFreepgidx] = NULL;
+			if (bCMA)
+			{
+				psDMAVirtArray[uiFreepgidx] = NULL;
+				psDMAPhysArray[uiFreepgidx] = (dma_addr_t)0;
+			}
+		}
+		else
+		{
+			psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx;
+			psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+			psPageArray[uiFreepgidx] = psPage;
+			if (bCMA)
+			{
+				psDMAVirtArray[uiFreepgidx] = pvDMAVAddr;
+				psDMAPhysArray[uiFreepgidx] = psDMAPAddr;
+			}
+		}
+	}
+
+	/* Free the additional free pages */
+	if (0 != ui32AdtnlFreePages)
+	{
+		eError = _FreeOSPages(psPMRPageArrayData,
+		                      &pai32FreeIndices[ui32Loop],
+		                      ui32AdtnlFreePages);
+		if (eError != PVRSRV_OK)
+		{
+			goto e0;
+		}
+		psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages;
+		while (ui32Loop < ui32FreePageCount)
+		{
+			psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Loop]] = TRANSLATION_INVALID;
+			ui32Loop++;
+		}
+	}
+
+	eError = PVRSRV_OK;
+
+e0:
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       PMRChangeSparseMemCPUMapOSMem
+@Description    This function Changes CPU maps accordingly
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static
+PVRSRV_ERROR PMRChangeSparseMemCPUMapOSMem(PMR_IMPL_PRIVDATA pPriv,
+                                           const PMR *psPMR,
+                                           IMG_UINT64 sCpuVAddrBase,
+                                           IMG_UINT32 ui32AllocPageCount,
+                                           IMG_UINT32 *pai32AllocIndices,
+                                           IMG_UINT32 ui32FreePageCount,
+                                           IMG_UINT32 *pai32FreeIndices)
+{
+	struct page **psPageArray;
+	PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv;
+	IMG_CPU_PHYADDR sCPUPAddr;
+
+	sCPUPAddr.uiAddr = 0;
+	psPageArray = psPMRPageArrayData->pagearray;
+
+	return OSChangeSparseMemCPUAddrMap((void **)psPageArray,
+	                                   sCpuVAddrBase,
+	                                   sCPUPAddr,
+	                                   ui32AllocPageCount,
+	                                   pai32AllocIndices,
+	                                   ui32FreePageCount,
+	                                   pai32FreeIndices,
+	                                   IMG_FALSE);
+}
+
+static PMR_IMPL_FUNCTAB _sPMROSPFuncTab = {
+	.pfnLockPhysAddresses = &PMRLockSysPhysAddressesOSMem,
+	.pfnUnlockPhysAddresses = &PMRUnlockSysPhysAddressesOSMem,
+	.pfnDevPhysAddr = &PMRSysPhysAddrOSMem,
+	.pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataOSMem,
+	.pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataOSMem,
+	.pfnReadBytes = NULL,
+	.pfnWriteBytes = NULL,
+	.pfnUnpinMem = &PMRUnpinOSMem,
+	.pfnPinMem = &PMRPinOSMem,
+	.pfnChangeSparseMem = &PMRChangeSparseMemOSMem,
+	.pfnChangeSparseMemCPUMap = &PMRChangeSparseMemCPUMapOSMem,
+	.pfnFinalize = &PMRFinalizeOSMem,
+};
+
+PVRSRV_ERROR
+PhysmemNewOSRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+						 IMG_DEVMEM_SIZE_T uiSize,
+						 IMG_DEVMEM_SIZE_T uiChunkSize,
+						 IMG_UINT32 ui32NumPhysChunks,
+						 IMG_UINT32 ui32NumVirtChunks,
+						 IMG_UINT32 *puiAllocIndices,
+						 IMG_UINT32 uiLog2AllocPageSize,
+						 PVRSRV_MEMALLOCFLAGS_T uiFlags,
+						 const IMG_CHAR *pszAnnotation,
+						 IMG_PID uiPid,
+						 PMR **ppsPMRPtr)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_ERROR eError2;
+	PMR *psPMR;
+	struct _PMR_OSPAGEARRAY_DATA_ *psPrivData;
+	PMR_FLAGS_T uiPMRFlags;
+	PHYS_HEAP *psPhysHeap;
+	IMG_UINT32 ui32CPUCacheFlags;
+	IMG_BOOL bZero;
+	IMG_BOOL bIsCMA;
+	IMG_BOOL bPoisonOnAlloc;
+	IMG_BOOL bPoisonOnFree;
+	IMG_BOOL bOnDemand;
+	IMG_BOOL bCpuLocal;
+	IMG_BOOL bFwLocal;
+
+	/*
+	 * The host driver (but not guest) can still use this factory for firmware
+	 * allocations
+	 */
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) && PVRSRV_CHECK_FW_LOCAL(uiFlags))
+	{
+		PVR_ASSERT(0);
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto errorOnParam;
+	}
+
+	/* Select correct caching mode */
+	eError = DevmemCPUCacheMode(psDevNode, uiFlags, &ui32CPUCacheFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto errorOnParam;
+	}
+
+	if (PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags))
+	{
+		ui32CPUCacheFlags |= PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN;
+	}
+
+	/*
+	 * Use CMA framework if order is greater than OS page size; please note
+	 * that OSMMapPMRGeneric() has the same expectation as well.
+	 */
+	bIsCMA = uiLog2AllocPageSize > PAGE_SHIFT ? IMG_TRUE : IMG_FALSE;
+	bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bCpuLocal = PVRSRV_CHECK_CPU_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bFwLocal = PVRSRV_CHECK_FW_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags) ? IMG_TRUE : IMG_FALSE;
+
+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES)
+	/* Overwrite flags and always zero pages that could go back to UM */
+	bZero = IMG_TRUE;
+	bPoisonOnAlloc = IMG_FALSE;
+#endif
+
+	/* Physical allocation alignment is generally not supported except under
+	   very restrictive conditions, also there is a maximum alignment value
+	   which must not exceed the largest device page-size. If these are not
+	   met then fail the aligned-requested allocation */
+	if (bIsCMA)
+	{
+		IMG_UINT32 uiAlign = 1 << uiLog2AllocPageSize;
+		if (uiAlign > uiSize || uiAlign > (1 << PVR_MAX_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ))
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Invalid PA alignment: size 0x%llx, align 0x%x",
+					__func__, uiSize, uiAlign));
+			eError = PVRSRV_ERROR_INVALID_ALIGNMENT;
+			goto errorOnParam;
+		}
+		PVR_ASSERT(uiLog2AllocPageSize > PVR_MIN_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ);
+	}
+
+	/* Create Array structure that hold the physical pages */
+	eError = _AllocOSPageArray(psDevNode,
+							   uiChunkSize,
+							   ui32NumPhysChunks,
+							   ui32NumVirtChunks,
+							   uiLog2AllocPageSize,
+							   bZero,
+							   bIsCMA,
+							   bPoisonOnAlloc,
+							   bPoisonOnFree,
+							   bOnDemand,
+							   ui32CPUCacheFlags,
+							   uiPid,
+							   &psPrivData);
+	if (eError != PVRSRV_OK)
+	{
+		goto errorOnAllocPageArray;
+	}
+
+	if (!bOnDemand)
+	{
+		/* Do we fill the whole page array or just parts (sparse)? */
+		if (ui32NumPhysChunks == ui32NumVirtChunks)
+		{
+			/* Allocate the physical pages */
+			eError = _AllocOSPages(psPrivData,
+			                       NULL,
+			                       psPrivData->uiTotalNumOSPages >> (uiLog2AllocPageSize - PAGE_SHIFT));
+		}
+		else if (ui32NumPhysChunks != 0)
+		{
+			/* Calculate the number of pages we want to allocate */
+			IMG_UINT32 uiPagesToAlloc =
+				(IMG_UINT32)((((ui32NumPhysChunks * uiChunkSize) - 1) >> uiLog2AllocPageSize) + 1);
+
+			/* Make sure calculation is correct */
+			PVR_ASSERT(((PMR_SIZE_T) uiPagesToAlloc << uiLog2AllocPageSize) ==
+			           (ui32NumPhysChunks * uiChunkSize));
+
+			/* Allocate the physical pages */
+			eError = _AllocOSPages(psPrivData, puiAllocIndices,
+			                       uiPagesToAlloc);
+		}
+
+		if (eError != PVRSRV_OK)
+		{
+			goto errorOnAllocPages;
+		}
+	}
+
+	/*
+	 * In this instance, we simply pass flags straight through.
+	 *
+	 * Generically, uiFlags can include things that control the PMR factory, but
+	 * we don't need any such thing (at the time of writing!), and our caller
+	 * specifies all PMR flags so we don't need to meddle with what was given to
+	 * us.
+	 */
+	uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+
+	/*
+	 * Check no significant bits were lost in cast due to different bit widths
+	 * for flags
+	 */
+	PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+	if (bOnDemand)
+	{
+		PDUMPCOMMENT("Deferred Allocation PMR (UMA)");
+	}
+
+	if (bFwLocal)
+	{
+		PDUMPCOMMENT("FW_LOCAL allocation requested");
+		psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+	}
+	else if (bCpuLocal)
+	{
+		PDUMPCOMMENT("CPU_LOCAL allocation requested");
+		psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL];
+	}
+	else
+	{
+		psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+	}
+
+	eError = PMRCreatePMR(psDevNode,
+						  psPhysHeap,
+						  uiSize,
+						  uiChunkSize,
+						  ui32NumPhysChunks,
+						  ui32NumVirtChunks,
+						  puiAllocIndices,
+						  uiLog2AllocPageSize,
+						  uiPMRFlags,
+						  pszAnnotation,
+						  &_sPMROSPFuncTab,
+						  psPrivData,
+						  PMR_TYPE_OSMEM,
+						  &psPMR,
+						  PDUMP_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto errorOnCreate;
+	}
+
+	*ppsPMRPtr = psPMR;
+
+	return PVRSRV_OK;
+
+errorOnCreate:
+	if (!bOnDemand)
+	{
+		eError2 = _FreeOSPages(psPrivData, NULL, 0);
+		PVR_ASSERT(eError2 == PVRSRV_OK);
+	}
+
+errorOnAllocPages:
+	eError2 = _FreeOSPagesArray(psPrivData);
+	PVR_ASSERT(eError2 == PVRSRV_OK);
+
+errorOnAllocPageArray:
+errorOnParam:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_osmem_linux.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_osmem_linux.h
new file mode 100644
index 0000000..3fac82d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_osmem_linux.h
@@ -0,0 +1,49 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux OS physmem implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PHYSMEM_OSMEM_LINUX_H__
+#define __PHYSMEM_OSMEM_LINUX_H__
+
+void LinuxInitPhysmem(void);
+void LinuxDeinitPhysmem(void);
+
+#endif /* __PHYSMEM_OSMEM_LINUX_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_tdsecbuf.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_tdsecbuf.c
new file mode 100644
index 0000000..52243c3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_tdsecbuf.c
@@ -0,0 +1,587 @@
+/*************************************************************************/ /*!
+@File
+@Title          Implementation of PMR functions for Trusted Device secure memory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks for physical memory imported
+                from a trusted environment. The driver cannot acquire CPU
+                mappings for this secure memory.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "physmem_tdsecbuf.h"
+#include "physheap.h"
+#include "rgxdevice.h"
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+#include "ri_server.h"
+#endif
+
+
+#if defined (SUPPORT_TRUSTED_DEVICE)
+
+#if !defined(NO_HARDWARE)
+
+typedef struct _PMR_TDSECBUF_DATA_ {
+	PVRSRV_DEVICE_NODE    *psDevNode;
+	PHYS_HEAP             *psTDSecBufPhysHeap;
+	IMG_CPU_PHYADDR       sCpuPAddr;
+	IMG_DEV_PHYADDR       sDevPAddr;
+	IMG_UINT64            ui64Size;
+	IMG_UINT32            ui32Log2PageSize;
+	IMG_UINT64            ui64SecBufHandle;
+} PMR_TDSECBUF_DATA;
+
+
+/*
+ * Implementation of callback functions
+ */
+
+static PVRSRV_ERROR PMRSysPhysAddrTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv,
+                                              IMG_UINT32 ui32Log2PageSize,
+                                              IMG_UINT32 ui32NumOfPages,
+                                              IMG_DEVMEM_OFFSET_T *puiOffset,
+                                              IMG_BOOL *pbValid,
+                                              IMG_DEV_PHYADDR *psDevPAddr)
+{
+	PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+	IMG_UINT32 i;
+
+	if (psPrivData->ui32Log2PageSize != ui32Log2PageSize)
+	{
+		return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+	}
+
+	for (i = 0; i < ui32NumOfPages; i++)
+	{
+		psDevPAddr[i].uiAddr = psPrivData->sDevPAddr.uiAddr + puiOffset[i];
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PMRFinalizeTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+	PVRSRV_DEVICE_CONFIG *psDevConfig = psPrivData->psDevNode->psDevConfig;
+	PVRSRV_ERROR eError;
+
+	eError = psDevConfig->pfnTDSecureBufFree(psDevConfig->hSysData,
+											 psPrivData->ui64SecBufHandle);
+	if (eError != PVRSRV_OK)
+	{
+		if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PMRFinalizeTDSecBufMem: TDSecBufFree not implemented on the Trusted Device!"));
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PMRFinalizeTDSecBufMem: TDSecBufFree cannot free the resource!"));
+		}
+		return eError;
+	}
+
+	PhysHeapRelease(psPrivData->psTDSecBufPhysHeap);
+	OSFreeMem(psPrivData);
+
+	return PVRSRV_OK;
+}
+
+static PMR_IMPL_FUNCTAB _sPMRTDSecBufFuncTab = {
+	.pfnDevPhysAddr = &PMRSysPhysAddrTDSecBufMem,
+	.pfnFinalize = &PMRFinalizeTDSecBufMem,
+};
+
+
+/*
+ * Public functions
+ */
+PVRSRV_ERROR PhysmemNewTDSecureBufPMR(CONNECTION_DATA *psConnection,
+                                      PVRSRV_DEVICE_NODE *psDevNode,
+                                      IMG_DEVMEM_SIZE_T uiSize,
+                                      PMR_LOG2ALIGN_T uiLog2Align,
+                                      PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                      PMR **ppsPMRPtr,
+                                      IMG_UINT64 *pui64SecBufHandle)
+{
+	PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+	RGX_DATA *psRGXData = (RGX_DATA *)(psDevConfig->hDevData);
+	PMR_TDSECBUF_DATA *psPrivData = NULL;
+	PMR *psPMR = NULL;
+	IMG_UINT32 uiMappingTable = 0;
+	PMR_FLAGS_T uiPMRFlags;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+	/* In this instance, we simply pass flags straight through.
+	 * Generically, uiFlags can include things that control the PMR
+	 * factory, but we don't need any such thing (at the time of
+	 * writing!), and our caller specifies all PMR flags so we don't
+	 * need to meddle with what was given to us.
+	 */
+	uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+
+	/* Check no significant bits were lost in cast due to different bit widths for flags */
+	PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+	/* Many flags can be dropped as the driver cannot access this memory
+	 * and it is assumed that the trusted zone is physically contiguous
+	 */
+	uiPMRFlags &= ~(PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+	                PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+	                PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC |
+	                PVRSRV_MEMALLOCFLAG_POISON_ON_FREE |
+	                PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK);
+
+	psPrivData = OSAllocZMem(sizeof(PMR_TDSECBUF_DATA));
+	if (psPrivData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto errorOnAllocData;
+	}
+
+	/* Get required info for the TD Secure Buffer physical heap */
+	if (!psRGXData->bHasTDSecureBufPhysHeap)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Trusted Device physical heap not available!"));
+		eError = PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL;
+		goto errorOnAcquireHeap;
+	}
+
+	eError = PhysHeapAcquire(psRGXData->uiTDSecureBufPhysHeapID,
+	                         &psPrivData->psTDSecBufPhysHeap);
+	if (eError != PVRSRV_OK) goto errorOnAcquireHeap;
+
+	psPrivData->ui64Size = uiSize;
+
+	if (psDevConfig->pfnTDSecureBufAlloc && psDevConfig->pfnTDSecureBufFree)
+	{
+		PVRSRV_TD_SECBUF_PARAMS sTDSecBufParams;
+
+		psPrivData->psDevNode = psDevNode;
+
+		/* Ask the Trusted Device to allocate secure memory */
+		sTDSecBufParams.uiSize = uiSize;
+		sTDSecBufParams.uiAlign = 1 << uiLog2Align;
+
+		/* These will be returned by pfnTDSecureBufAlloc on success */
+		sTDSecBufParams.psSecBufAddr = &psPrivData->sCpuPAddr;
+		sTDSecBufParams.pui64SecBufHandle = &psPrivData->ui64SecBufHandle;
+
+		eError = psDevConfig->pfnTDSecureBufAlloc(psDevConfig->hSysData,
+												  &sTDSecBufParams);
+		if (eError != PVRSRV_OK)
+		{
+			if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufAlloc not implemented on the Trusted Device!"));
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufAlloc cannot allocate the resource!"));
+			}
+			goto errorOnAlloc;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufAlloc/Free not implemented!"));
+		eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+		goto errorOnAlloc;
+	}
+
+	PhysHeapCpuPAddrToDevPAddr(psPrivData->psTDSecBufPhysHeap,
+	                           1,
+	                           &psPrivData->sDevPAddr,
+	                           &psPrivData->sCpuPAddr);
+
+	/* Check that the secure buffer has the requested alignment */
+	if ((((1ULL << uiLog2Align) - 1) & psPrivData->sCpuPAddr.uiAddr) != 0)
+	/* Check that the secure buffer is aligned to a Rogue cache line */
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "Trusted Device physical heap has the wrong alignment! "
+				 "Physical address 0x%llx, alignment mask 0x%llx",
+				 (unsigned long long) psPrivData->sCpuPAddr.uiAddr,
+				 ((1ULL << uiLog2Align) - 1)));
+		eError = PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL;
+		goto errorOnCheckAlign;
+	}
+
+	psPrivData->ui32Log2PageSize = uiLog2Align;
+
+	eError = PMRCreatePMR(psDevNode,
+	                      psPrivData->psTDSecBufPhysHeap,
+	                      psPrivData->ui64Size,
+	                      psPrivData->ui64Size,
+	                      1,                 /* ui32NumPhysChunks */
+	                      1,                 /* ui32NumVirtChunks */
+	                      &uiMappingTable,   /* pui32MappingTable (not used) */
+	                      uiLog2Align,
+	                      uiPMRFlags,
+	                      "TDSECUREBUF_PMR",
+	                      &_sPMRTDSecBufFuncTab,
+	                      psPrivData,
+	                      PMR_TYPE_TDSECBUF,
+	                      &psPMR,
+	                      PDUMP_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto errorOnCreatePMR;
+	}
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	eError = RIWritePMREntryKM(psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+		         "%s: Failed to write PMR entry (%s)",
+		         __func__, PVRSRVGetErrorString(eError)));
+	}
+#endif
+
+	*ppsPMRPtr = psPMR;
+	*pui64SecBufHandle = psPrivData->ui64SecBufHandle;
+
+	return PVRSRV_OK;
+
+
+errorOnCreatePMR:
+errorOnCheckAlign:
+	eError = psDevConfig->pfnTDSecureBufFree(psDevConfig->hSysData,
+											 psPrivData->ui64SecBufHandle);
+	if (eError != PVRSRV_OK)
+	{
+		if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufFree not implemented on the Trusted Device!"));
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufFree cannot free the resource!"));
+		}
+	}
+errorOnAlloc:
+	PhysHeapRelease(psPrivData->psTDSecBufPhysHeap);
+errorOnAcquireHeap:
+	OSFreeMem(psPrivData);
+
+errorOnAllocData:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+#else /* NO_HARDWARE */
+
+#include "physmem_osmem.h"
+
+typedef struct _PMR_TDSECBUF_DATA_ {
+	PHYS_HEAP  *psTDSecBufPhysHeap;
+	PMR        *psOSMemPMR;
+	IMG_UINT32 ui32Log2PageSize;
+} PMR_TDSECBUF_DATA;
+
+
+/*
+ * Implementation of callback functions
+ */
+
+static PVRSRV_ERROR
+PMRLockPhysAddressesTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+	return PMRLockSysPhysAddresses(psPrivData->psOSMemPMR);
+}
+
+static PVRSRV_ERROR
+PMRUnlockPhysAddressesTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+	return PMRUnlockSysPhysAddresses(psPrivData->psOSMemPMR);
+}
+
+static PVRSRV_ERROR
+PMRSysPhysAddrTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv,
+                          IMG_UINT32 ui32Log2PageSize,
+                          IMG_UINT32 ui32NumOfPages,
+                          IMG_DEVMEM_OFFSET_T *puiOffset,
+                          IMG_BOOL *pbValid,
+                          IMG_DEV_PHYADDR *psDevPAddr)
+{
+	PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+	/* On the assumption that this PMR was created with
+	 * NumPhysChunks == NumVirtChunks then
+	 * puiOffset[0] == uiLogicalOffset
+	 */
+
+	return PMR_DevPhysAddr(psPrivData->psOSMemPMR,
+	                       ui32Log2PageSize,
+	                       ui32NumOfPages,
+	                       puiOffset[0],
+	                       psDevPAddr,
+	                       pbValid);
+}
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv,
+                                       size_t uiOffset,
+                                       size_t uiSize,
+                                       void **ppvKernelAddressOut,
+                                       IMG_HANDLE *phHandleOut,
+                                       PMR_FLAGS_T ulFlags)
+{
+	PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+	size_t uiLengthOut;
+
+	PVR_UNREFERENCED_PARAMETER(ulFlags);
+
+	return PMRAcquireKernelMappingData(psPrivData->psOSMemPMR,
+	                                   uiOffset,
+	                                   uiSize,
+	                                   ppvKernelAddressOut,
+	                                   &uiLengthOut,
+	                                   phHandleOut);
+}
+
+static void
+PMRReleaseKernelMappingDataTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv,
+                                       IMG_HANDLE hHandle)
+{
+	PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+	PMRReleaseKernelMappingData(psPrivData->psOSMemPMR, hHandle);
+}
+
+static PVRSRV_ERROR PMRFinalizeTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+	PMRUnrefPMR(psPrivData->psOSMemPMR);
+	PhysHeapRelease(psPrivData->psTDSecBufPhysHeap);
+	OSFreeMem(psPrivData);
+
+	return PVRSRV_OK;
+}
+
+static PMR_IMPL_FUNCTAB _sPMRTDSecBufFuncTab = {
+	.pfnLockPhysAddresses = &PMRLockPhysAddressesTDSecBufMem,
+	.pfnUnlockPhysAddresses = &PMRUnlockPhysAddressesTDSecBufMem,
+	.pfnDevPhysAddr = &PMRSysPhysAddrTDSecBufMem,
+	.pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataTDSecBufMem,
+	.pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataTDSecBufMem,
+	.pfnFinalize = &PMRFinalizeTDSecBufMem,
+};
+
+
+/*
+ * Public functions
+ */
+PVRSRV_ERROR PhysmemNewTDSecureBufPMR(CONNECTION_DATA *psConnection,
+                                      PVRSRV_DEVICE_NODE *psDevNode,
+                                      IMG_DEVMEM_SIZE_T uiSize,
+                                      PMR_LOG2ALIGN_T uiLog2Align,
+                                      PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                      PMR **ppsPMRPtr,
+                                      IMG_UINT64 *pui64SecBufHandle)
+{
+	PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+	RGX_DATA *psRGXData = (RGX_DATA *)(psDevConfig->hDevData);
+	PMR_TDSECBUF_DATA *psPrivData = NULL;
+	PMR *psPMR = NULL;
+	PMR *psOSPMR = NULL;
+	IMG_UINT32 uiMappingTable = 0;
+	PMR_FLAGS_T uiPMRFlags;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	/* In this instance, we simply pass flags straight through.
+	 * Generically, uiFlags can include things that control the PMR
+	 * factory, but we don't need any such thing (at the time of
+	 * writing!), and our caller specifies all PMR flags so we don't
+	 * need to meddle with what was given to us.
+	 */
+	uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+
+	/* Check no significant bits were lost in cast due to different bit widths for flags */
+	PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+	psPrivData = OSAllocZMem(sizeof(PMR_TDSECBUF_DATA));
+	if (psPrivData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto errorOnAllocData;
+	}
+
+	/* Get required info for the TD Secure Buffer physical heap */
+	if (!psRGXData->bHasTDSecureBufPhysHeap)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Trusted Device physical heap not available!"));
+		eError = PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL;
+		goto errorOnAcquireHeap;
+	}
+
+	eError = PhysHeapAcquire(psRGXData->uiTDSecureBufPhysHeapID,
+	                         &psPrivData->psTDSecBufPhysHeap);
+	if (eError != PVRSRV_OK) goto errorOnAcquireHeap;
+
+	/*
+	 * The alignment requested by the caller is only used to generate the
+	 * secure FW allocation pdump command with the correct alignment.
+	 * Internally we use another PMR with OS page alignment.
+	 */
+	psPrivData->ui32Log2PageSize = OSGetPageShift();
+
+	eError = PhysmemNewOSRamBackedPMR(psDevNode,
+	                                  uiSize,
+	                                  uiSize,
+	                                  1,                 /* ui32NumPhysChunks */
+	                                  1,                 /* ui32NumVirtChunks */
+	                                  &uiMappingTable,
+	                                  psPrivData->ui32Log2PageSize,
+	                                  uiFlags,
+	                                  "TDSECUREBUF_OSMEM",
+	                                  OSGetCurrentClientProcessIDKM(),
+	                                  &psOSPMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto errorOnCreateOSPMR;
+	}
+
+	/* This is the primary PMR dumped with correct memspace and alignment */
+	eError = PMRCreatePMR(psDevNode,
+	                      psPrivData->psTDSecBufPhysHeap,
+	                      uiSize,
+	                      uiSize,
+	                      1,               /* ui32NumPhysChunks */
+	                      1,               /* ui32NumVirtChunks */
+	                      &uiMappingTable, /* pui32MappingTable (not used) */
+	                      uiLog2Align,
+	                      uiPMRFlags,
+	                      "TDSECUREBUF_PMR",
+	                      &_sPMRTDSecBufFuncTab,
+	                      psPrivData,
+	                      PMR_TYPE_TDSECBUF,
+	                      &psPMR,
+	                      PDUMP_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto errorOnCreateTDPMR;
+	}
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	eError = RIWritePMREntryKM(psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+		         "%s: Failed to write PMR entry (%s)",
+		         __func__, PVRSRVGetErrorString(eError)));
+	}
+#endif
+
+	psPrivData->psOSMemPMR = psOSPMR;
+	*ppsPMRPtr = psPMR;
+	*pui64SecBufHandle = 0x0ULL;
+
+	return PVRSRV_OK;
+
+errorOnCreateTDPMR:
+	PMRUnrefPMR(psOSPMR);
+
+errorOnCreateOSPMR:
+	PhysHeapRelease(psPrivData->psTDSecBufPhysHeap);
+
+errorOnAcquireHeap:
+	OSFreeMem(psPrivData);
+
+errorOnAllocData:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+#endif /* NO_HARDWARE */
+
+#else /* SUPPORT_TRUSTED_DEVICE */
+
+PVRSRV_ERROR PhysmemNewTDSecureBufPMR(CONNECTION_DATA *psConnection,
+                                      PVRSRV_DEVICE_NODE *psDevNode,
+                                      IMG_DEVMEM_SIZE_T uiSize,
+                                      PMR_LOG2ALIGN_T uiLog2Align,
+                                      PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                      PMR **ppsPMRPtr,
+                                      IMG_UINT64 *pui64SecBufHandle)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiLog2Align);
+	PVR_UNREFERENCED_PARAMETER(uiFlags);
+	PVR_UNREFERENCED_PARAMETER(ppsPMRPtr);
+	PVR_UNREFERENCED_PARAMETER(pui64SecBufHandle);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+#endif
+
+PVRSRV_ERROR PhysmemImportSecBuf(CONNECTION_DATA *psConnection,
+                                 PVRSRV_DEVICE_NODE *psDevNode,
+                                 IMG_DEVMEM_SIZE_T uiSize,
+                                 IMG_UINT32 ui32Log2Align,
+                                 PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                 PMR **ppsPMRPtr,
+                                 IMG_UINT64 *pui64SecBufHandle)
+{
+	return PhysmemNewTDSecureBufPMR(psConnection,
+	                                psDevNode,
+	                                uiSize,
+	                                (PMR_LOG2ALIGN_T)ui32Log2Align,
+	                                uiFlags,
+	                                ppsPMRPtr,
+	                                pui64SecBufHandle);
+};
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_tdsecbuf.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_tdsecbuf.h
new file mode 100644
index 0000000..6d13802
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_tdsecbuf.h
@@ -0,0 +1,84 @@
+/**************************************************************************/ /*!
+@File
+@Title          Header for secure buffer PMR factory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks importing secure buffer
+                allocations.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _PHYSMEM_TDSECBUF_H_
+#define _PHYSMEM_TDSECBUF_H_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "pmr.h"
+
+/*
+ * PhysmemNewTDSecureBufPMR
+ *
+ * This function is used as part of the facility to provide secure buffer
+ * memory. A default implementation is provided but it can be replaced by
+ * the SoC implementor if necessary.
+ *
+ * Calling this function will create a PMR for a memory allocation made
+ * in "secure buffer memory". It will only be writable by a trusted
+ * entity and when the feature is enabled on the SoC the GPU will only
+ * be able to perform operations permitted by security rules.
+ */
+
+PVRSRV_ERROR PhysmemNewTDSecureBufPMR(CONNECTION_DATA *psConnection,
+                                      PVRSRV_DEVICE_NODE *psDevNode,
+                                      IMG_DEVMEM_SIZE_T uiSize,
+                                      PMR_LOG2ALIGN_T uiLog2Align,
+                                      PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                      PMR **ppsPMRPtr,
+                                      IMG_UINT64 *pui64SecBufHandle);
+
+PVRSRV_ERROR PhysmemImportSecBuf(CONNECTION_DATA *psConnection,
+                                 PVRSRV_DEVICE_NODE *psDevNode,
+                                 IMG_DEVMEM_SIZE_T uiSize,
+                                 IMG_UINT32 ui32Log2Align,
+                                 PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                 PMR **ppsPMRPtr,
+                                 IMG_UINT64 *pui64SecBufHandle);
+
+#endif /* _PHYSMEM_TDSECBUF_H_ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_test.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_test.c
new file mode 100644
index 0000000..a303838
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_test.c
@@ -0,0 +1,693 @@
+/*************************************************************************/ /*!
+@Title          Physmem_test
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Single entry point for testing of page factories
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#if defined(SUPPORT_PHYSMEM_TEST)
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "physmem_test.h"
+#include "device.h"
+#include "syscommon.h"
+#include "pmr.h"
+#include "osfunc.h"
+#include "physmem.h"
+#include "physmem_osmem.h"
+#include "physmem_lma.h"
+#include "pvrsrv.h"
+
+#define PHYSMEM_TEST_PAGES        2     /* Mem test pages */
+#define PHYSMEM_TEST_PASSES_MAX   1000  /* Limit number of passes to some reasonable value */
+
+
+/* Test patterns for mem test */
+
+static const IMG_UINT64 gui64Patterns[] = {
+	0,
+	0xffffffffffffffffULL,
+	0x5555555555555555ULL,
+	0xaaaaaaaaaaaaaaaaULL,
+	0x1111111111111111ULL,
+	0x2222222222222222ULL,
+	0x4444444444444444ULL,
+	0x8888888888888888ULL,
+	0x3333333333333333ULL,
+	0x6666666666666666ULL,
+	0x9999999999999999ULL,
+	0xccccccccccccccccULL,
+	0x7777777777777777ULL,
+	0xbbbbbbbbbbbbbbbbULL,
+	0xddddddddddddddddULL,
+	0xeeeeeeeeeeeeeeeeULL,
+	0x7a6c7258554e494cULL,
+};
+
+static const IMG_UINT32 gui32Patterns[] = {
+	0,
+	0xffffffffU,
+	0x55555555U,
+	0xaaaaaaaaU,
+	0x11111111U,
+	0x22222222U,
+	0x44444444U,
+	0x88888888U,
+	0x33333333U,
+	0x66666666U,
+	0x99999999U,
+	0xccccccccU,
+	0x77777777U,
+	0xbbbbbbbbU,
+	0xddddddddU,
+	0xeeeeeeeeU,
+	0x7a6c725cU,
+};
+
+static const IMG_UINT16 gui16Patterns[] = {
+	0,
+	0xffffU,
+	0x5555U,
+	0xaaaaU,
+	0x1111U,
+	0x2222U,
+	0x4444U,
+	0x8888U,
+	0x3333U,
+	0x6666U,
+	0x9999U,
+	0xccccU,
+	0x7777U,
+	0xbbbbU,
+	0xddddU,
+	0xeeeeU,
+	0x7a6cU,
+};
+
+static const IMG_UINT8 gui8Patterns[] = {
+	0,
+	0xffU,
+	0x55U,
+	0xaaU,
+	0x11U,
+	0x22U,
+	0x44U,
+	0x88U,
+	0x33U,
+	0x66U,
+	0x99U,
+	0xccU,
+	0x77U,
+	0xbbU,
+	0xddU,
+	0xeeU,
+	0x6cU,
+};
+
+
+/* Following function does minimal required initialisation for mem test using dummy device node */
+static PVRSRV_ERROR
+PhysMemTestInit(PVRSRV_DEVICE_NODE **ppsDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_ERROR			eError = PVRSRV_OK;
+	PVRSRV_DEVICE_NODE		*psDeviceNode;
+
+	/* Dummy device node */
+	psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode));
+	PVR_LOGR_IF_NOMEM(psDeviceNode, "OSAllocZMem");
+
+	psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_INIT;
+	psDeviceNode->psDevConfig = psDevConfig;
+	psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON;
+
+	/* Initialise Phys mem heaps */
+	eError = PVRSRVPhysMemHeapsInit(psDeviceNode, psDevConfig);
+	PVR_LOGG_IF_ERROR(eError, "PVRSRVPhysMemHeapsInit", ErrorSysDevDeInit);
+
+	psDeviceNode->uiMMUPxLog2AllocGran = OSGetPageShift();
+
+	*ppsDeviceNode = psDeviceNode;
+
+	return PVRSRV_OK;
+
+ErrorSysDevDeInit:
+	psDevConfig->psDevNode = NULL;
+	OSFreeMem(psDeviceNode);
+	return eError;
+}
+
+/* Undo initialisation done for mem test */
+static void
+PhysMemTestDeInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	/* Deinitialise Phys mem heaps */
+	PVRSRVPhysMemHeapsDeinit(psDeviceNode);
+
+	OSFreeMem(psDeviceNode);
+}
+
+/* Test for PMR factory validation */
+static PVRSRV_ERROR
+PMRValidationTest(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags)
+{
+	PVRSRV_ERROR eError, eError1;
+	IMG_UINT32 i = 0, j = 0, ui32Index = 0;
+	IMG_UINT32 *pui32MappingTable = NULL;
+	PMR *psPMR = NULL;
+	IMG_BOOL *pbValid;
+	IMG_DEV_PHYADDR *apsDevPAddr;
+	IMG_UINT32 ui32NumOfPages = 10, ui32NumOfPhysPages = 5;
+	size_t uiMappedSize, uiPageSize;
+	IMG_UINT8 *pcWriteBuffer, *pcReadBuffer;
+	IMG_HANDLE hPrivData = NULL;
+	void *pvKernAddr = NULL;
+
+	uiPageSize = OSGetPageSize();
+
+	/* Allocate OS memory for PMR page list */
+	apsDevPAddr = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR));
+	PVR_LOGR_IF_NOMEM(apsDevPAddr, "OSAllocMem");
+
+	/* Allocate OS memory for PMR page state */
+	pbValid = OSAllocMem(ui32NumOfPages * sizeof(IMG_BOOL));
+	PVR_LOGG_IF_NOMEM(pbValid, "OSAllocMem", eError, ErrorFreePMRPageListMem);
+	OSCachedMemSet(pbValid, 0, ui32NumOfPages * sizeof(IMG_BOOL));
+
+	/* Allocate OS memory for write buffer */
+	pcWriteBuffer = OSAllocMem(uiPageSize);
+	PVR_LOGG_IF_NOMEM(pcWriteBuffer, "OSAllocMem", eError, ErrorFreePMRPageStateMem);
+	OSCachedMemSet(pcWriteBuffer, 0xF, uiPageSize);
+
+	/* Allocate OS memory for read buffer */
+	pcReadBuffer = OSAllocMem(uiPageSize);
+	PVR_LOGG_IF_NOMEM(pcWriteBuffer, "OSAllocMem", eError, ErrorFreeWriteBuffer);
+
+	/* Allocate OS memory for mapping table */
+	pui32MappingTable = (IMG_UINT32 *)OSAllocMem(ui32NumOfPhysPages * sizeof(*pui32MappingTable));
+	PVR_LOGG_IF_NOMEM(pui32MappingTable, "OSAllocMem", eError, ErrorFreeReadBuffer);
+
+	/* Pages having even index will have physical backing in PMR */
+	for (ui32Index=0; ui32Index < ui32NumOfPages; ui32Index+=2)
+	{
+		pui32MappingTable[i++] = ui32Index;
+	}
+
+	/* Allocate Sparse PMR with SPARSE | READ | WRITE | UNCACHED attributes */
+	uiFlags |= PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \
+				PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+				PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+				PVRSRV_MEMALLOCFLAG_CPU_UNCACHED;
+
+	/* Allocate a sparse PMR from given physical heap - CPU/GPU/FW */
+	eError = PhysmemNewRamBackedPMR(NULL,
+									psDeviceNode,
+									ui32NumOfPages * uiPageSize,
+									uiPageSize,
+									ui32NumOfPhysPages,
+									ui32NumOfPages,
+									pui32MappingTable,
+									OSGetPageShift(),
+									uiFlags,
+									(strlen("PMR ValidationTest") + 1),
+									"PMR ValidationTest",
+									OSGetCurrentClientProcessIDKM(),
+									&psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to allocate a PMR"));
+		goto ErrorFreeMappingTable;
+	}
+
+	/* Check whether allocated PMR can be locked and obtain physical addresses of underlying memory pages */
+	eError = PMRLockSysPhysAddresses(psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to lock PMR"));
+		goto ErrorUnrefPMR;
+	}
+
+	/* Get the Device physical addresses of the pages */
+	eError = PMR_DevPhysAddr(psPMR, OSGetPageShift(), ui32NumOfPages, 0, apsDevPAddr, pbValid);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to map PMR pages into device physical addresses"));
+		goto ErrorUnlockPhysAddresses;
+	}
+
+	/* Check whether device address of each physical page is OS PAGE_SIZE aligned */
+	for (i = 0; i < ui32NumOfPages; i++)
+	{
+		if (pbValid[i])
+		{
+			if ((apsDevPAddr[i].uiAddr & OSGetPageMask()) != 0)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "Physical memory of PMR is not page aligned"));
+				eError = PVRSRV_ERROR_MEMORY_TEST_FAILED;
+				goto ErrorUnlockPhysAddresses;
+			}
+		}
+	}
+
+	/* Acquire kernel virtual address of each physical page and write to it and then release it */
+	for (i = 0; i < ui32NumOfPages; i++)
+	{
+		if (pbValid[i])
+		{
+			eError = PMRAcquireSparseKernelMappingData(psPMR, (i * uiPageSize), uiPageSize, &pvKernAddr, &uiMappedSize, &hPrivData);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR"));
+				goto ErrorUnlockPhysAddresses;
+			}
+			OSDeviceMemCopy(pvKernAddr, pcWriteBuffer, OSGetPageSize());
+
+			eError = PMRReleaseKernelMappingData(psPMR, hPrivData);
+			PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData");
+		}
+	}
+
+	/* Acquire kernel virtual address of each physical page and read from it and check where contents are intact */
+	for (i = 0; i < ui32NumOfPages; i++)
+	{
+		if (pbValid[i])
+		{
+			eError = PMRAcquireSparseKernelMappingData(psPMR, (i * uiPageSize), uiPageSize, &pvKernAddr, &uiMappedSize, &hPrivData);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR"));
+				goto ErrorUnlockPhysAddresses;
+			}
+			OSCachedMemSet(pcReadBuffer, 0x0, uiPageSize);
+			OSDeviceMemCopy(pcReadBuffer, pvKernAddr, uiMappedSize);
+
+			eError = PMRReleaseKernelMappingData(psPMR, hPrivData);
+			PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData");
+
+			for (j = 0; j < uiPageSize; j++)
+			{
+				if (pcReadBuffer[j] != pcWriteBuffer[j])
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s: Test failed. Got (0x%hhx), expected (0x%hhx)!", __func__, pcReadBuffer[j], pcWriteBuffer[j]));
+					eError = PVRSRV_ERROR_MEMORY_TEST_FAILED;
+					goto ErrorUnlockPhysAddresses;
+				}
+			}
+		}
+	}
+
+ErrorUnlockPhysAddresses:
+	/* Unlock and Unref the PMR to destroy it */
+	eError1 = PMRUnlockSysPhysAddresses(psPMR);
+	if (eError1 != PVRSRV_OK)
+	{
+		eError = (eError == PVRSRV_OK)? eError1 : eError;
+		PVR_DPF((PVR_DBG_ERROR, "Failed to unlock PMR"));
+	}
+
+ErrorUnrefPMR:
+	eError1 = PMRUnrefPMR(psPMR);
+	if (eError1 != PVRSRV_OK)
+	{
+		eError = (eError == PVRSRV_OK)? eError1 : eError;
+		PVR_DPF((PVR_DBG_ERROR, "Failed to free PMR"));
+	}
+ErrorFreeMappingTable:
+	OSFreeMem(pui32MappingTable);
+ErrorFreeReadBuffer:
+	OSFreeMem(pcReadBuffer);
+ErrorFreeWriteBuffer:
+	OSFreeMem(pcWriteBuffer);
+ErrorFreePMRPageStateMem:
+	OSFreeMem(pbValid);
+ErrorFreePMRPageListMem:
+	OSFreeMem(apsDevPAddr);
+
+	return eError;
+}
+
+#define DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, Patterns, NumOfPatterns, Error, ptr, i) \
+	for (i = 0; i < NumOfPatterns; i++) \
+	{ \
+		/* Write pattern */ \
+		for (ptr = StartAddr; ptr < EndAddr; ptr++) \
+		{ \
+			*ptr = Patterns[i]; \
+		} \
+		\
+		/* Read back and validate pattern */ \
+		for (ptr = StartAddr; ptr < EndAddr ; ptr++) \
+		{ \
+			if (*ptr != Patterns[i]) \
+			{ \
+				Error = PVRSRV_ERROR_MEMORY_TEST_FAILED; \
+				break; \
+			} \
+		} \
+		\
+		if (Error != PVRSRV_OK) \
+		{ \
+			break; \
+		} \
+	}
+
+static PVRSRV_ERROR
+TestPatternU8(void *pvKernAddr, size_t uiMappedSize)
+{
+	IMG_UINT8 *StartAddr = (IMG_UINT8 *) pvKernAddr;
+	IMG_UINT8 *EndAddr = ((IMG_UINT8 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT8));
+	IMG_UINT8 *p;
+	IMG_UINT32 i;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT8)) == 0);
+
+	DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui8Patterns, sizeof(gui8Patterns)/sizeof(IMG_UINT8), eError, p, i);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Test failed. Got (0x%hhx), expected (0x%hhx)!", __func__, *p, gui8Patterns[i])); \
+	}
+
+	return eError;
+}
+
+
+static PVRSRV_ERROR
+TestPatternU16(void *pvKernAddr, size_t uiMappedSize)
+{
+	IMG_UINT16 *StartAddr = (IMG_UINT16 *) pvKernAddr;
+	IMG_UINT16 *EndAddr = ((IMG_UINT16 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT16));
+	IMG_UINT16 *p;
+	IMG_UINT32 i;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT16)) == 0);
+
+	DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui16Patterns, sizeof(gui16Patterns)/sizeof(IMG_UINT16), eError, p, i);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Test failed. Got (0x%hx), expected (0x%hx)!", __func__, *p, gui16Patterns[i])); \
+	}
+
+	return eError;
+}
+
+static PVRSRV_ERROR
+TestPatternU32(void *pvKernAddr, size_t uiMappedSize)
+{
+	IMG_UINT32 *StartAddr = (IMG_UINT32 *) pvKernAddr;
+	IMG_UINT32 *EndAddr = ((IMG_UINT32 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT32));
+	IMG_UINT32 *p;
+	IMG_UINT32 i;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT32)) == 0);
+
+	DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui32Patterns, sizeof(gui32Patterns)/sizeof(IMG_UINT32), eError, p, i);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Test failed. Got (0x%x), expected (0x%x)!", __func__, *p, gui32Patterns[i])); \
+	}
+
+	return eError;
+}
+
+static PVRSRV_ERROR
+TestPatternU64(void *pvKernAddr, size_t uiMappedSize)
+{
+	IMG_UINT64 *StartAddr = (IMG_UINT64 *) pvKernAddr;
+	IMG_UINT64 *EndAddr = ((IMG_UINT64 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT64));
+	IMG_UINT64 *p;
+	IMG_UINT32 i;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT64)) == 0);
+
+	DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui64Patterns, sizeof(gui64Patterns)/sizeof(IMG_UINT64), eError, p, i);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Test failed. Got (0x%llx), expected (0x%llx)!", __func__, *p, gui64Patterns[i])); \
+	}
+
+	return eError;
+}
+
+static PVRSRV_ERROR
+TestSplitCacheline(void *pvKernAddr, size_t uiMappedSize)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	size_t uiCacheLineSize;
+	size_t uiBlockSize;
+	size_t j;
+	IMG_UINT8 *pcWriteBuffer, *pcReadBuffer;
+	IMG_UINT8 *StartAddr = (IMG_UINT8 *) pvKernAddr;
+	IMG_UINT8 *EndAddr, *p;
+
+	uiCacheLineSize = OSCPUCacheAttributeSize(PVR_DCACHE_LINE_SIZE);
+
+	if (uiCacheLineSize > 0)
+	{
+		uiBlockSize = (uiCacheLineSize * 2)/3; /* split cacheline */
+
+		pcWriteBuffer = OSAllocMem(uiBlockSize);
+		PVR_LOGR_IF_NOMEM(pcWriteBuffer, "OSAllocMem");
+
+		/* Fill the write buffer with test data, 0xAB*/
+		OSCachedMemSet(pcWriteBuffer, 0xAB, uiBlockSize);
+
+		pcReadBuffer = OSAllocMem(uiBlockSize);
+		PVR_LOGG_IF_NOMEM(pcWriteBuffer, "OSAllocMem", eError, ErrorFreeWriteBuffer);
+
+		/* Fit only complete blocks in uiMappedSize, ignore leftover bytes */
+		EndAddr = StartAddr + (uiBlockSize * (uiMappedSize / uiBlockSize));
+
+		/* Write blocks into the memory */
+		for (p = StartAddr; p < EndAddr; p += uiBlockSize)
+		{
+			OSCachedMemCopy(p, pcWriteBuffer, uiBlockSize);
+		}
+
+		/* Read back blocks and check */
+		for (p = StartAddr; p < EndAddr; p += uiBlockSize)
+		{
+			OSCachedMemCopy(pcReadBuffer, p, uiBlockSize);
+
+			for (j = 0; j < uiBlockSize; j++)
+			{
+				if (pcReadBuffer[j] != pcWriteBuffer[j])
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s: Test failed. Got (0x%hhx), expected (0x%hhx)!", __func__, pcReadBuffer[j], pcWriteBuffer[j]));
+					eError = PVRSRV_ERROR_MEMORY_TEST_FAILED;
+					goto ErrorMemTestFailed;
+				}
+			}
+		}
+
+ErrorMemTestFailed:
+		OSFreeMem(pcReadBuffer);
+ErrorFreeWriteBuffer:
+		OSFreeMem(pcWriteBuffer);
+	}
+
+	return eError;
+}
+
+/* Memory test - writes and reads back different patterns to memory and validate the same */
+static PVRSRV_ERROR
+MemTestPatterns(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32MappingTable = 0;
+	PMR *psPMR = NULL;
+	size_t uiMappedSize, uiPageSize;
+	IMG_HANDLE hPrivData = NULL;
+	void *pvKernAddr = NULL;
+
+	uiPageSize = OSGetPageSize();
+
+	/* Allocate PMR with READ | WRITE | WRITE_COMBINE attributes */
+	uiFlags |= PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+			   PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+			   PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE;
+
+	/*Allocate a PMR from given physical heap */
+	eError = PhysmemNewRamBackedPMR(NULL,
+									psDeviceNode,
+									uiPageSize * PHYSMEM_TEST_PAGES,
+									uiPageSize * PHYSMEM_TEST_PAGES,
+									1,
+									1,
+									&ui32MappingTable,
+									OSGetPageShift(),
+									uiFlags,
+									(strlen("PMR PhysMemTest") + 1),
+									"PMR PhysMemTest",
+									OSGetCurrentClientProcessIDKM(),
+									&psPMR);
+	PVR_LOGR_IF_ERROR(eError, "PhysmemNewRamBackedPMR");
+
+	/* Check whether allocated PMR can be locked and obtain physical addresses of underlying memory pages */
+	eError = PMRLockSysPhysAddresses(psPMR);
+	PVR_LOGG_IF_ERROR(eError, "PMRLockSysPhysAddresses", ErrorUnrefPMR);
+
+	/* Map the physical page(s) into kernel space, acquire kernel mapping for PMR */
+	eError = PMRAcquireKernelMappingData(psPMR, 0, uiPageSize * PHYSMEM_TEST_PAGES, &pvKernAddr, &uiMappedSize, &hPrivData);
+	PVR_LOGG_IF_ERROR(eError, "PMRAcquireKernelMappingData", ErrorUnlockPhysAddresses);
+
+	PVR_ASSERT((uiPageSize * PHYSMEM_TEST_PAGES) == uiMappedSize);
+
+	/* Test various patterns */
+	eError = TestPatternU64(pvKernAddr, uiMappedSize);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorReleaseKernelMappingData;
+	}
+
+	eError = TestPatternU32(pvKernAddr, uiMappedSize);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorReleaseKernelMappingData;
+	}
+
+	eError = TestPatternU16(pvKernAddr, uiMappedSize);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorReleaseKernelMappingData;
+	}
+
+	eError = TestPatternU8(pvKernAddr, uiMappedSize);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorReleaseKernelMappingData;
+	}
+
+	/* Test split cachelines */
+	eError = TestSplitCacheline(pvKernAddr, uiMappedSize);
+
+ErrorReleaseKernelMappingData:
+	(void) PMRReleaseKernelMappingData(psPMR, hPrivData);
+
+ErrorUnlockPhysAddresses:
+	/* Unlock and Unref the PMR to destroy it, ignore returned value */
+	(void) PMRUnlockSysPhysAddresses(psPMR);
+ErrorUnrefPMR:
+	(void) PMRUnrefPMR(psPMR);
+
+	return eError;
+}
+
+static PVRSRV_ERROR
+PhysMemTestRun(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags, IMG_UINT32 ui32Passes)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 i;
+
+	/* PMR validation test */
+	eError = PMRValidationTest(psDeviceNode, uiFlags);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: PMR validation test failed!", __func__));
+		return eError;
+	}
+
+	for (i = 0; i < ui32Passes; i++)
+	{
+		/* Mem test */
+		eError = MemTestPatterns(psDeviceNode, uiFlags);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s: [Pass#%u] MemTestPatterns failed!", __func__, i));
+			break;
+		}
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PhysMemTest(void *pvDevConfig, IMG_UINT32 ui32MemTestPasses)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+	PVRSRV_DEVICE_CONFIG *psDevConfig = pvDevConfig;
+
+	/* validate memtest passes requested */
+	ui32MemTestPasses = (ui32MemTestPasses > PHYSMEM_TEST_PASSES_MAX)? PHYSMEM_TEST_PASSES_MAX : ui32MemTestPasses;
+
+	/* Do minimal initialisation before test */
+	eError = PhysMemTestInit(&psDeviceNode, psDevConfig);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Test failed to initialize", __func__));
+		return eError;
+	}
+
+	/* GPU local mem */
+	eError = PhysMemTestRun(psDeviceNode, 0, ui32MemTestPasses);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "GPU local memory test failed!"));
+		goto ErrorPhysMemTestDeinit;
+	}
+
+	/* FW local mem */
+	eError = PhysMemTestRun(psDeviceNode, PVRSRV_MEMALLOCFLAG_FW_LOCAL, ui32MemTestPasses);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "FW local memory test failed!"));
+		goto ErrorPhysMemTestDeinit;
+	}
+
+	/* CPU local mem */
+	eError = PhysMemTestRun(psDeviceNode, PVRSRV_MEMALLOCFLAG_CPU_LOCAL, ui32MemTestPasses);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "CPU local memory test failed!"));
+		goto ErrorPhysMemTestDeinit;
+	}
+
+	PVR_DPF((PVR_DBG_ERROR, "PhysMemTest: Passed."));
+	goto PhysMemTestPassed;
+
+ErrorPhysMemTestDeinit:
+	PVR_DPF((PVR_DBG_ERROR, "PhysMemTest: Failed."));
+PhysMemTestPassed:
+	PhysMemTestDeInit(psDeviceNode);
+
+	return eError;
+}
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_test.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_test.h
new file mode 100644
index 0000000..5509dcd
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/physmem_test.h
@@ -0,0 +1,52 @@
+/*************************************************************************/ /*!
+@Title          Physmem test header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for single entry point for testing of page factories
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SRVSRV_PHYSMEM_TEST_H_
+#define _SRVSRV_PHYSMEM_TEST_H_
+/*
+ * PhysMemTest
+ *
+ *
+ */
+extern PVRSRV_ERROR
+PhysMemTest(void *pvDevConfig, IMG_UINT32 ui32MemTestPasses);
+#endif /* _SRVSRV_PHYSMEM_TEST_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/plato_drv.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/plato_drv.h
new file mode 100644
index 0000000..4d1a671
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/plato_drv.h
@@ -0,0 +1,411 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File           plato_drv.h
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PLATO_DRV_H
+#define _PLATO_DRV_H
+
+/*
+ * This contains the hooks for the plato pci driver, as used by the
+ * Rogue and PDP sub-devices, and the platform data passed to each of their
+ * drivers
+ */
+
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+
+// Debug output:
+// Sometimes will want to always output info or error even in release mode.
+// In that case use dev_info, dev_err directly.
+#if defined(PLATO_DRM_DEBUG)
+	#define plato_dev_info(dev, fmt, ...) \
+		dev_info(dev, fmt, ##__VA_ARGS__)
+	#define plato_dev_warn(dev, fmt, ...) \
+		dev_warn(dev, fmt, ##__VA_ARGS__)
+	#define plato_dev_error(dev, fmt, ...) \
+		dev_err(dev, fmt, ##__VA_ARGS__)
+	#define PLATO_DRM_CHECKPOINT            pr_info("line %d\n", __LINE__)
+#else
+	#define plato_dev_info(dev, fmt, ...)
+	#define plato_dev_warn(dev, fmt, ...)
+	#define plato_dev_error(dev, fmt, ...)
+	#define PLATO_DRM_CHECKPOINT
+#endif
+
+#define PLATO_INIT_SUCCESS	0
+#define PLATO_INIT_FAILURE	1
+#define PLATO_INIT_RETRY	2
+
+#define PCI_VENDOR_ID_PLATO				(0x1AEE)
+#define PCI_DEVICE_ID_PLATO				(0x0003)
+
+#define PLATO_SYSTEM_NAME				"Plato"
+
+/* Interrupt defines */
+enum PLATO_INTERRUPT {
+	PLATO_INTERRUPT_GPU = 0,
+	PLATO_INTERRUPT_PDP,
+	PLATO_INTERRUPT_HDMI,
+	PLATO_INTERRUPT_MAX,
+};
+
+#define PLATO_INT_SHIFT_GPU				(0)
+#define PLATO_INT_SHIFT_PDP				(8)
+#define PLATO_INT_SHIFT_HDMI			(9)
+#define PLATO_INT_SHIFT_HDMI_WAKEUP		(11)
+#define PLATO_INT_SHIFT_TEMP_A			(12)
+
+
+struct plato_region {
+	resource_size_t base;
+	resource_size_t size;
+};
+
+struct plato_io_region {
+	struct plato_region region;
+	void __iomem *registers;
+};
+
+/* The following structs are initialised and passed down by the parent plato
+ * driver to the respective sub-drivers
+ */
+
+#define PLATO_DEVICE_NAME_PDP			"plato_pdp"
+#define PLATO_PDP_RESOURCE_REGS			"pdp-regs"
+#define PLATO_PDP_RESOURCE_BIF_REGS		"pdp-bif-regs"
+
+#define PLATO_DEVICE_NAME_HDMI			"plato_hdmi"
+#define PLATO_HDMI_RESOURCE_REGS		"hdmi-regs"
+
+struct plato_pdp_platform_data {
+	resource_size_t memory_base;
+
+	/* The following is used by the drm_pdp driver as it manages the
+	 * pdp memory
+	 */
+	resource_size_t pdp_heap_memory_base;
+	resource_size_t pdp_heap_memory_size;
+};
+
+struct plato_hdmi_platform_data {
+	resource_size_t plato_memory_base;
+};
+
+
+#define PLATO_DEVICE_NAME_ROGUE			"plato_rogue"
+#define PLATO_ROGUE_RESOURCE_REGS		"rogue-regs"
+
+struct plato_rogue_platform_data {
+
+	/* The base address of the plato memory (CPU physical address) -
+	 * used to convert from CPU-Physical to device-physical addresses
+	 */
+	resource_size_t plato_memory_base;
+
+	/* The following is used to setup the services heaps */
+	int has_nonmappable;
+	struct plato_region rogue_heap_mappable;
+	resource_size_t rogue_heap_dev_addr;
+	struct plato_region rogue_heap_nonmappable;
+#if defined(SUPPORT_PLATO_DISPLAY)
+	struct plato_region pdp_heap;
+#endif
+};
+
+struct plato_interrupt_handler {
+	bool enabled;
+	void (*handler_function)(void *);
+	void *handler_data;
+};
+
+struct plato_device {
+	struct pci_dev *pdev;
+
+	struct plato_io_region sys_io;
+	struct plato_io_region aon_regs;
+
+	spinlock_t interrupt_handler_lock;
+	spinlock_t interrupt_enable_lock;
+
+	struct plato_interrupt_handler interrupt_handlers[PLATO_INTERRUPT_MAX];
+
+	struct plato_region rogue_mem;
+	struct plato_region rogue_heap_mappable;
+	struct plato_region rogue_heap_nonmappable;
+	int has_nonmappable;
+
+	resource_size_t dev_mem_base; /* Pointer to device memory base */
+
+	struct platform_device *rogue_dev;
+
+#if defined(SUPPORT_PLATO_DISPLAY)
+	struct platform_device *pdp_dev;
+	struct plato_region pdp_heap;
+
+	struct platform_device *hdmi_dev;
+#endif
+
+#if defined(CONFIG_MTRR) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+	int mtrr;
+#endif
+};
+
+#if defined(PLATO_LOG_CHECKPOINTS)
+#define PLATO_CHECKPOINT(p) dev_info(&p->pdev->dev, \
+					"- %s: %d", __func__, __LINE__)
+#else
+#define PLATO_CHECKPOINT(p)
+#endif
+
+#define plato_write_reg32(base, offset, value) \
+	iowrite32(value, (base) + (offset))
+#define plato_read_reg32(base, offset) ioread32(base + offset)
+#define plato_sleep_ms(x) msleep(x)
+#define plato_sleep_us(x) msleep(x/1000)
+
+/* Valid values for the PLATO_MEMORY_CONFIG configuration option */
+#define PLATO_MEMORY_LOCAL			(1)
+#define PLATO_MEMORY_HOST			(2)
+#define PLATO_MEMORY_HYBRID			(3)
+
+#if defined(PLATO_MEMORY_CONFIG)
+#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID)
+#define PVRSRV_DEVICE_PHYS_HEAP_PDP_LOCAL 2
+#elif (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL)
+#define PVRSRV_DEVICE_PHYS_HEAP_PDP_LOCAL 1
+#endif
+#endif /* PLATO_MEMORY_CONFIG */
+
+#define DCPDP_PHYS_HEAP_ID PVRSRV_DEVICE_PHYS_HEAP_PDP_LOCAL
+
+#define PLATO_PDP_MEM_SIZE			(384 * 1024 * 1024)
+
+#define SYS_PLATO_REG_PCI_BASENUM	(1)
+#define SYS_PLATO_REG_REGION_SIZE	(4 * 1024 * 1024)
+
+/*
+ * Give system region a whole span of the reg space including
+ * RGX registers. That's because there are sys register segments
+ * both before and after the RGX segment.
+ */
+#define SYS_PLATO_REG_SYS_OFFSET			(0x0)
+#define SYS_PLATO_REG_SYS_SIZE				(4 * 1024 * 1024)
+
+/* Entire Peripheral region */
+#define SYS_PLATO_REG_PERIP_OFFSET			(0x20000)
+#define SYS_PLATO_REG_PERIP_SIZE			(164 * 1024)
+
+/* Chip level registers */
+#define SYS_PLATO_REG_CHIP_LEVEL_OFFSET		(SYS_PLATO_REG_PERIP_OFFSET)
+#define SYS_PLATO_REG_CHIP_LEVEL_SIZE		(64 * 1024)
+
+#define SYS_PLATO_REG_TEMPA_OFFSET			(0x80000)
+#define SYS_PLATO_REG_TEMPA_SIZE			(64 * 1024)
+
+/* USB, DMA not included */
+
+#define SYS_PLATO_REG_DDR_A_CTRL_OFFSET		(0x120000)
+#define SYS_PLATO_REG_DDR_A_CTRL_SIZE		(64 * 1024)
+
+#define SYS_PLATO_REG_DDR_B_CTRL_OFFSET		(0x130000)
+#define SYS_PLATO_REG_DDR_B_CTRL_SIZE		(64 * 1024)
+
+#define SYS_PLATO_REG_DDR_A_PUBL_OFFSET		(0x140000)
+#define SYS_PLATO_REG_DDR_A_PUBL_SIZE		(64 * 1024)
+
+#define SYS_PLATO_REG_DDR_B_PUBL_OFFSET		(0x150000)
+#define SYS_PLATO_REG_DDR_B_PUBL_SIZE		(64 * 1024)
+
+#define SYS_PLATO_REG_NOC_OFFSET			(0x160000)
+#define SYS_PLATO_REG_NOC_SIZE		        (64 * 1024)
+
+/* Debug NOC registers */
+#define SYS_PLATO_REG_NOC_DBG_DDR_A_CTRL_OFFSET (0x1500)
+#define SYS_PLATO_REG_NOC_DBG_DDR_A_DATA_OFFSET (0x1580)
+#define SYS_PLATO_REG_NOC_DBG_DDR_A_PUBL_OFFSET (0x1600)
+#define SYS_PLATO_REG_NOC_DBG_DDR_B_CTRL_OFFSET (0x1680)
+#define SYS_PLATO_REG_NOC_DBG_DDR_B_DATA_OFFSET (0x1700)
+#define SYS_PLATO_REG_NOC_DBG_DDR_B_PUBL_OFFSET (0x1780)
+#define SYS_PLATO_REG_NOC_DBG_DISPLAY_S_OFFSET  (0x1800)
+#define SYS_PLATO_REG_NOC_DBG_GPIO_0_S_OFFSET   (0x1900)
+#define SYS_PLATO_REG_NOC_DBG_GPIO_1_S_OFFSET   (0x1980)
+#define SYS_PLATO_REG_NOC_DBG_GPU_S_OFFSET      (0x1A00)
+#define SYS_PLATO_REG_NOC_DBG_PCI_PHY_OFFSET    (0x1A80)
+#define SYS_PLATO_REG_NOC_DBG_PCI_REG_OFFSET    (0x1B00)
+#define SYS_PLATO_REG_NOC_DBG_PCI_S_OFFSET      (0x1B80)
+#define SYS_PLATO_REG_NOC_DBG_PERIPH_S_OFFSET   (0x1c00)
+#define SYS_PLATO_REG_NOC_DBG_RET_REG_OFFSET    (0x1D00)
+#define SYS_PLATO_REG_NOC_DBG_SERVICE_OFFSET    (0x1E00)
+
+#define SYS_PLATO_REG_RGX_OFFSET			(0x170000)
+#define SYS_PLATO_REG_RGX_SIZE				(64 * 1024)
+
+#define SYS_PLATO_REG_AON_OFFSET			(0x180000)
+#define SYS_PLATO_REG_AON_SIZE				(64 * 1024)
+
+#define SYS_PLATO_REG_PDP_OFFSET			(0x200000)
+#define SYS_PLATO_REG_PDP_SIZE				(0x1000)
+
+#define SYS_PLATO_REG_PDP_BIF_OFFSET \
+	(SYS_PLATO_REG_PDP_OFFSET + SYS_PLATO_REG_PDP_SIZE)
+#define SYS_PLATO_REG_PDP_BIF_SIZE          (0x200)
+
+#define SYS_PLATO_REG_HDMI_OFFSET \
+	(SYS_PLATO_REG_PDP_OFFSET + 0x20000)
+#define SYS_PLATO_REG_HDMI_SIZE             (128 * 1024)
+
+/* Device memory (including HP mapping) on base register 4 */
+#define SYS_DEV_MEM_PCI_BASENUM		(4)
+
+/* Device memory size */
+#define ONE_GB_IN_BYTES					(0x40000000ULL)
+#define SYS_DEV_MEM_REGION_SIZE \
+	(PLATO_MEMORY_SIZE_GIGABYTES * ONE_GB_IN_BYTES)
+
+/* Plato DDR offset in device memory map at 32GB */
+#define PLATO_DDR_DEV_PHYSICAL_BASE		(0x800000000)
+
+/* DRAM is split at 48GB */
+#define PLATO_DRAM_SPLIT_ADDR			(0xc00000000)
+
+/*
+ * Plato DDR region is aliased if less than 32GB memory is present.
+ * This defines memory base closest to the DRAM split point.
+ * If 32GB is present this is equal to PLATO_DDR_DEV_PHYSICAL_BASE
+ */
+#define PLATO_DDR_ALIASED_DEV_PHYSICAL_BASE \
+	(PLATO_DRAM_SPLIT_ADDR - (SYS_DEV_MEM_REGION_SIZE >> 1))
+
+#define PLATO_DDR_ALIASED_DEV_PHYSICAL_END \
+	(PLATO_DRAM_SPLIT_ADDR + (SYS_DEV_MEM_REGION_SIZE >> 1))
+
+#define PLATO_DDR_ALIASED_DEV_SEGMENT_SIZE \
+	((32ULL / PLATO_MEMORY_SIZE_GIGABYTES) * ONE_GB_IN_BYTES)
+
+/* Plato Host memory offset in device memory map at 512GB */
+#define PLATO_HOSTRAM_DEV_PHYSICAL_BASE (0x8000000000)
+
+/* Plato PLL, DDR/GPU, PDP and HDMI-SFR/CEC clocks */
+#define PLATO_PLL_REF_CLOCK_SPEED	(19200000)
+
+/* 600 MHz */
+#define PLATO_MEM_CLOCK_SPEED		(600000000)
+#define PLATO_MIN_MEM_CLOCK_SPEED	(600000000)
+#define PLATO_MAX_MEM_CLOCK_SPEED	(800000000)
+
+/* 396 MHz (~400 MHz) on HW, around 1MHz on the emulator */
+#if defined(EMULATOR) || defined(VIRTUAL_PLATFORM)
+#define	PLATO_RGX_CORE_CLOCK_SPEED	(1000000)
+#else
+
+#define	PLATO_RGX_CORE_CLOCK_SPEED	(396000000)
+#define	PLATO_RGX_MIN_CORE_CLOCK_SPEED	(396000000)
+#define	PLATO_RGX_MAX_CORE_CLOCK_SPEED	(742500000)
+#endif
+
+#define PLATO_MIN_PDP_CLOCK_SPEED		(165000000)
+#define PLATO_TARGET_HDMI_SFR_CLOCK_SPEED	(27000000)
+#define PLATO_TARGET_HDMI_CEC_CLOCK_SPEED	(32768)
+
+#define REG_TO_CELSIUS(reg)			(((reg) * 352/4096) - 109)
+#define CELSIUS_TO_REG(temp)		((((temp) + 109) * 4096) / 352)
+#define PLATO_MAX_TEMP_CELSIUS		(100)
+
+#define PLATO_LMA_HEAP_REGION_MAPPABLE			0
+#define PLATO_LMA_HEAP_REGION_NONMAPPABLE		1
+
+struct plato_debug_register {
+	char *description;
+	unsigned int offset;
+	unsigned int value;
+};
+
+#if defined(ENABLE_PLATO_HDMI)
+
+#if defined(HDMI_PDUMP)
+/* Hard coded video formats for pdump type run only */
+#define VIDEO_FORMAT_1280_720p          0
+#define VIDEO_FORMAT_1920_1080p         1
+#define DC_DEFAULT_VIDEO_FORMAT     (VIDEO_FORMAT_1920_1080p)
+#endif
+
+#endif /* ENABLE_PLATO_HDMI */
+
+/* Exposed APIs */
+int plato_enable(struct device *dev);
+void plato_disable(struct device *dev);
+
+int plato_enable_interrupt(struct device *dev,
+			   enum PLATO_INTERRUPT interrupt_id);
+int plato_disable_interrupt(struct device *dev,
+			   enum PLATO_INTERRUPT interrupt_id);
+
+int plato_set_interrupt_handler(struct device *dev,
+				enum PLATO_INTERRUPT interrupt_id,
+				void (*handler_function)(void *),
+				void *handler_data);
+unsigned int plato_core_clock_speed(struct device *dev);
+unsigned int plato_mem_clock_speed(struct device *dev);
+unsigned int plato_pll_clock_speed(struct device *dev,
+				   unsigned int clock_speed);
+void plato_enable_pdp_clock(struct device *dev);
+void plato_enable_pixel_clock(struct device *dev, u32 pixel_clock);
+
+int plato_debug_info(struct device *dev,
+		     struct plato_debug_register *noc_dbg_regs,
+		     struct plato_debug_register *aon_dbg_regs);
+
+/* Internal */
+int plato_memory_init(struct plato_device *plato);
+void plato_memory_deinit(struct plato_device *plato);
+int plato_cfg_init(struct plato_device *plato);
+int request_pci_io_addr(struct pci_dev *pdev, u32 index,
+			resource_size_t offset, resource_size_t length);
+void release_pci_io_addr(struct pci_dev *pdev, u32 index,
+			resource_size_t start, resource_size_t length);
+
+#endif /* _PLATO_DRV_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pmr.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pmr.c
new file mode 100644
index 0000000..ee0bf21
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pmr.c
@@ -0,0 +1,3550 @@
+/*************************************************************************/ /*!
+@File
+@Title          Physmem (PMR) abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management.  This module is responsible for
+                the "PMR" abstraction.  A PMR (Physical Memory Resource)
+                represents some unit of physical memory which is
+                allocated/freed/mapped/unmapped as an indivisible unit
+                (higher software levels provide an abstraction above that
+                to deal with dividing this down into smaller manageable units).
+                Importantly, this module knows nothing of virtual memory, or
+                of MMUs etc., with one excusable exception.  We have the
+                concept of a "page size", which really means nothing in
+                physical memory, but represents a "contiguity quantum" such
+                that the higher level modules which map this memory are able
+                to verify that it matches the needs of the page size for the
+                virtual realm into which it is being mapped.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "pdump.h"
+#include "devicemem_server_utils.h"
+
+#include "osfunc.h"
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#include "pmr_impl.h"
+#include "pmr_os.h"
+#include "pvrsrv.h"
+
+#include "allocmem.h"
+#include "lock.h"
+
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "secure_export.h"
+#include "ossecure_export.h"
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+#include "ri_server.h"
+#endif
+
+/* ourselves */
+#include "pmr.h"
+
+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS)
+#include "mmap_stats.h"
+#endif
+
+/* A "context" for the physical memory block resource allocator.
+
+   Context is probably the wrong word.
+
+   There is almost certainly only one of these, ever, in the system.
+   But, let's keep the notion of a context anyway, "just-in-case".
+ */
+static struct _PMR_CTX_
+{
+	/* For debugging, and PDump, etc., let's issue a forever
+       incrementing serial number to each allocation. */
+	IMG_UINT64 uiNextSerialNum;
+
+	/* For security, we only allow a PMR to be mapped if the caller
+       knows its key.  We can pseudo-randomly generate keys */
+	IMG_UINT64 uiNextKey;
+
+	/* For debugging only, I guess:  Number of live PMRs */
+	IMG_UINT32 uiNumLivePMRs;
+
+	/* Lock for this structure */
+	POS_LOCK hLock;
+
+	/* In order to seed the uiNextKey, we enforce initialisation at
+       driver load time.  Also, we can debug check at driver unload
+       that the PMR count is zero. */
+	IMG_BOOL bModuleInitialised;
+} _gsSingletonPMRContext = { 1, 0, 0, NULL, IMG_FALSE };
+
+
+/* A PMR. One per physical allocation.  May be "shared".
+
+   "shared" is ambiguous.  We need to be careful with terminology.
+   There are two ways in which a PMR may be "shared" and we need to be
+   sure that we are clear which we mean.
+
+   i)   multiple small allocations living together inside one PMR;
+
+   ii)  one single allocation filling a PMR but mapped into multiple
+        memory contexts.
+
+   This is more important further up the stack - at this level, all we
+   care is that the PMR is being referenced multiple times.
+ */
+struct _PMR_
+{
+	/* This object is strictly refcounted.  References include:
+       - mapping
+       - live handles (to this object)
+       - live export handles
+       (thus it is normal for allocated and exported memory to have a refcount of 3)
+       The object is destroyed when and only when the refcount reaches 0
+	 */
+
+	/* Device node on which this PMR was created and is valid */
+	PVRSRV_DEVICE_NODE *psDevNode;
+
+	/*
+       Physical address translation (device <> cpu) is done on a per device
+       basis which means we need the physical heap info
+	 */
+	PHYS_HEAP *psPhysHeap;
+
+	ATOMIC_T iRefCount;
+
+	/* lock count - this is the number of times
+       PMRLockSysPhysAddresses() has been called, less the number of
+       PMRUnlockSysPhysAddresses() calls.  This is arguably here for
+       debug reasons only, as the refcount is already incremented as a
+       matter of course.  Really, this just allows us to trap protocol
+       errors: i.e. calling PMRSysPhysAddr(),
+       without a lock, or calling PMRUnlockSysPhysAddresses() too many
+       or too few times. */
+	ATOMIC_T iLockCount;
+
+	/* Lock for this structure */
+	POS_LOCK hLock;
+
+	/* Incrementing serial number to each allocation. */
+	IMG_UINT64 uiSerialNum;
+
+	/* For security, we only allow a PMR to be mapped if the caller
+       knows its key.  We can pseudo-randomly generate keys */
+	PMR_PASSWORD_T uiKey;
+
+	/* Callbacks for per-flavour functions */
+	const PMR_IMPL_FUNCTAB *psFuncTab;
+
+	/* Data associated with the "subtype" */
+	PMR_IMPL_PRIVDATA pvFlavourData;
+
+	/* What kind of PMR do we have? */
+	PMR_IMPL_TYPE eFlavour;
+
+	/* And for pdump */
+	const IMG_CHAR *pszPDumpDefaultMemspaceName;
+
+	/* Allocation annotation */
+	IMG_CHAR szAnnotation[DEVMEM_ANNOTATION_MAX_LEN];
+
+#if defined(PDUMP)
+
+	IMG_HANDLE hPDumpAllocHandle;
+
+	IMG_UINT32 uiNumPDumpBlocks;
+#endif
+
+	/* Logical size of allocation.  "logical", because a PMR can
+       represent memory that will never physically exist.  This is the
+       amount of virtual space that the PMR would consume when it's
+       mapped into a virtual allocation. */
+	PMR_SIZE_T uiLogicalSize;
+
+	/* Mapping table for the allocation.
+	   PMR's can be sparse in which case not all the "logic" addresses
+	   in it are valid. We need to know which addresses are and aren't
+	   valid when mapping or reading the PMR.
+	   The mapping table translates "logical" offsets into physical
+	   offsets which is what we always pass to the PMR factory
+	   (so it doesn't have to be concerned about sparseness issues) */
+	PMR_MAPPING_TABLE *psMappingTable;
+
+	/* Indicates whether this PMR has been allocated as sparse.
+	 * The condition for this variable to be set at allocation time is:
+	 * (numVirtChunks != numPhysChunks) || (numVirtChunks > 1)
+	 */
+	IMG_BOOL bSparseAlloc;
+
+	/* Indicates whether this PMR has been unpinned.
+	 * By default, all PMRs are pinned at creation.
+	 */
+	IMG_BOOL bIsUnpinned;
+
+	/* Minimum Physical Contiguity Guarantee.  Might be called "page
+       size", but that would be incorrect, as page size is something
+       meaningful only in virtual realm.  This contiguity guarantee
+       provides an inequality that can be verified/asserted/whatever
+       to ensure that this PMR conforms to the page size requirement
+       of the place the PMR gets mapped.  (May be used to select an
+       appropriate heap in variable page size systems)
+
+       The absolutely necessary condition is this:
+
+       device MMU page size <= actual physical contiguity.
+
+       We go one step further in order to be able to provide an early warning / early compatibility check and say this:
+
+       device MMU page size <= 2**(uiLog2ContiguityGuarantee) <= actual physical contiguity.
+
+       In this way, it is possible to make the page table reservation
+       in the device MMU without even knowing the granularity of the
+       physical memory (i.e. useful for being able to allocate virtual
+       before physical)
+	 */
+	PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee;
+
+	/* Flags.  We store a copy of the "PMR flags" (usually a subset of
+       the flags given at allocation time) and return them to any
+       caller of PMR_Flags().  The intention of these flags is that
+       the ones stored here are used to represent permissions, such
+       that no one is able to map a PMR in a mode in which they are not
+       allowed, e.g. writeable for a read-only PMR, etc. */
+	PMR_FLAGS_T uiFlags;
+
+	/* Do we really need this? For now we'll keep it, until we know we don't. */
+	/* NB: this is not the "memory context" in client terms - this is
+       _purely_ the "PMR" context, of which there is almost certainly only
+       ever one per system as a whole, but we'll keep the concept
+       anyway, just-in-case. */
+	struct _PMR_CTX_ *psContext;
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	/*
+	 * Stored handle to PMR RI entry
+	 */
+	void		*hRIHandle;
+#endif
+};
+
+/* do we need a struct for the export handle?  I'll use one for now, but if nothing goes in it, we'll lose it */
+struct _PMR_EXPORT_
+{
+	struct _PMR_ *psPMR;
+};
+
+struct _PMR_PAGELIST_
+{
+	struct _PMR_ *psReferencePMR;
+};
+
+PPVRSRV_DEVICE_NODE PMRGetExportDeviceNode(PMR_EXPORT *psExportPMR)
+{
+	PPVRSRV_DEVICE_NODE psReturnedDeviceNode = NULL;
+
+	PVR_ASSERT(psExportPMR != NULL);
+	if (psExportPMR)
+	{
+		PVR_ASSERT(psExportPMR->psPMR != NULL);
+		if (psExportPMR->psPMR)
+		{
+			PVR_ASSERT(OSAtomicRead(&psExportPMR->psPMR->iRefCount) > 0);
+			if (OSAtomicRead(&psExportPMR->psPMR->iRefCount) > 0)
+			{
+				psReturnedDeviceNode = PMR_DeviceNode(psExportPMR->psPMR);
+			}
+		}
+	}
+
+	return psReturnedDeviceNode;
+}
+
+static PVRSRV_ERROR
+_PMRCreate(PMR_SIZE_T uiLogicalSize,
+           PMR_SIZE_T uiChunkSize,
+           IMG_UINT32 ui32NumPhysChunks,
+           IMG_UINT32 ui32NumVirtChunks,
+           IMG_UINT32 *pui32MappingTable,
+           PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee,
+           PMR_FLAGS_T uiFlags,
+           PMR **ppsPMR)
+{
+	void *pvPMRLinAddr;
+	PMR *psPMR;
+	PMR_MAPPING_TABLE *psMappingTable;
+	struct _PMR_CTX_ *psContext;
+	IMG_UINT32 i,ui32Temp=0;
+	IMG_UINT32 ui32Remainder;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bSparse = IMG_FALSE;
+
+	psContext = &_gsSingletonPMRContext;
+
+	/* Do we have a sparse allocation? */
+	if ( (ui32NumVirtChunks != ui32NumPhysChunks) ||
+			(ui32NumVirtChunks > 1) )
+	{
+		bSparse = IMG_TRUE;
+	}
+
+	/* Extra checks required for sparse PMRs */
+	if (uiLogicalSize != uiChunkSize)
+	{
+		/* Check the logical size and chunk information agree with each other */
+		if (uiLogicalSize != (uiChunkSize * ui32NumVirtChunks))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Bad mapping size (uiLogicalSize = 0x%llx, uiChunkSize = 0x%llx, ui32NumVirtChunks = %d)",
+					__func__, (unsigned long long)uiLogicalSize, (unsigned long long)uiChunkSize, ui32NumVirtChunks));
+			return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
+		}
+
+		/* Check that the chunk size is a multiple of the contiguity */
+		OSDivide64(uiChunkSize, (1<< uiLog2ContiguityGuarantee), &ui32Remainder);
+		if (ui32Remainder)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Bad chunk size, must be a multiple of the contiguity "
+					"(uiChunkSize = 0x%llx, uiLog2ContiguityGuarantee = %u)",
+					__func__,
+					(unsigned long long) uiChunkSize,
+					uiLog2ContiguityGuarantee));
+			return PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE;
+		}
+	}
+
+	pvPMRLinAddr = OSAllocMem(sizeof(*psPMR) + sizeof(*psMappingTable) + sizeof(IMG_UINT32) * ui32NumVirtChunks);
+
+	if (pvPMRLinAddr == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psPMR = (PMR *) pvPMRLinAddr;
+	psMappingTable = (PMR_MAPPING_TABLE *) (((IMG_CHAR *) pvPMRLinAddr) + sizeof(*psPMR));
+
+	eError = OSLockCreate(&psPMR->hLock);
+	if (eError != PVRSRV_OK)
+	{
+		OSFreeMem(psPMR);
+		return eError;
+	}
+
+	/* Setup the mapping table */
+	psMappingTable->uiChunkSize = uiChunkSize;
+	psMappingTable->ui32NumVirtChunks = ui32NumVirtChunks;
+	psMappingTable->ui32NumPhysChunks = ui32NumPhysChunks;
+	OSCachedMemSet(&psMappingTable->aui32Translation[0], 0xFF, sizeof(psMappingTable->aui32Translation[0])*
+	               ui32NumVirtChunks);
+	for (i=0; i<ui32NumPhysChunks; i++)
+	{
+		ui32Temp = pui32MappingTable[i];
+		psMappingTable->aui32Translation[ui32Temp] = ui32Temp;
+	}
+
+	/* Setup the PMR */
+	OSAtomicWrite(&psPMR->iRefCount, 0);
+
+	/* If allocation is not made on demand, it will be backed now and
+	 * backing will not be removed until the PMR is destroyed, therefore
+	 * we can initialise the iLockCount to 1 rather than 0.
+	 */
+	OSAtomicWrite(&psPMR->iLockCount, (PVRSRV_CHECK_ON_DEMAND(uiFlags) ? 0 : 1));
+
+	psPMR->psContext = psContext;
+	psPMR->uiLogicalSize = uiLogicalSize;
+	psPMR->uiLog2ContiguityGuarantee = uiLog2ContiguityGuarantee;
+	psPMR->uiFlags = uiFlags;
+	psPMR->psMappingTable = psMappingTable;
+	psPMR->bSparseAlloc = bSparse;
+	psPMR->bIsUnpinned = IMG_FALSE;
+	psPMR->szAnnotation[0] = '\0';
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	psPMR->hRIHandle = NULL;
+#endif
+
+	OSLockAcquire(psContext->hLock);
+	psPMR->uiKey = psContext->uiNextKey;
+	psPMR->uiSerialNum = psContext->uiNextSerialNum;
+	psContext->uiNextKey = (0x80200003 * psContext->uiNextKey)
+								^ (0xf00f0081 * (uintptr_t)pvPMRLinAddr);
+	psContext->uiNextSerialNum ++;
+	*ppsPMR = psPMR;
+	PVR_DPF((PVR_DBG_MESSAGE, "pmr.c: created PMR @0x%p", psPMR));
+	/* Increment live PMR count */
+	psContext->uiNumLivePMRs ++;
+	OSLockRelease(psContext->hLock);
+
+	return PVRSRV_OK;
+}
+
+/* This function returns true if the PMR is in use and false otherwise.
+ * This function is not thread safe and hence the caller
+ * needs to ensure the thread safety by explicitly taking
+ * the lock on the PMR or through other means */
+IMG_BOOL  PMRIsPMRLive(PMR *psPMR)
+{
+	return (OSAtomicRead(&psPMR->iRefCount) > 0);
+}
+
+static IMG_UINT32
+_Ref(PMR *psPMR)
+{
+	PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) >= 0);
+	/* We need to ensure that this function is always executed under
+	 * PMRLock. The only exception acceptable is the unloading of the driver.
+	 */
+	return OSAtomicIncrement(&psPMR->iRefCount);
+}
+
+static IMG_UINT32
+_Unref(PMR *psPMR)
+{
+	PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) > 0);
+	/* We need to ensure that this function is always executed under
+	 * PMRLock. The only exception acceptable is the unloading of the driver.
+	 */
+	return OSAtomicDecrement(&psPMR->iRefCount);
+}
+
+static void
+_UnrefAndMaybeDestroy(PMR *psPMR)
+{
+	PVRSRV_ERROR eError2;
+	struct _PMR_CTX_ *psCtx;
+	IMG_INT iRefCount;
+
+	PVR_ASSERT(psPMR != NULL);
+
+	/* Acquire PMR factory lock if provided */
+	if(psPMR->psFuncTab->pfnGetPMRFactoryLock)
+	{
+		psPMR->psFuncTab->pfnGetPMRFactoryLock();
+	}
+
+	iRefCount = _Unref(psPMR);
+
+	if (iRefCount == 0)
+	{
+		if (psPMR->psFuncTab->pfnFinalize != NULL)
+		{
+			eError2 = psPMR->psFuncTab->pfnFinalize(psPMR->pvFlavourData);
+
+			/* PMR unref can be called asynchronously by the kernel or other
+			 * third party modules (eg. display) which doesn't go through the
+			 * usual services bridge. The same PMR can be referenced simultaneously
+			 * in a different path that results in a race condition.
+			 * Hence depending on the race condition, a factory may refuse to destroy
+			 * the resource associated with this PMR if a reference on it was taken
+			 * prior to unref. In that case the PMR factory function returns the error.
+			 *
+			 * When such an error is encountered, the factory needs to ensure the state
+			 * associated with PMR is undisturbed. At this point we just bail out from
+			 * freeing the PMR itself. The PMR handle will then be freed at a later point
+			 * when the same PMR is unreferenced.
+			 * */
+			if (PVRSRV_ERROR_PMR_STILL_REFERENCED == eError2)
+			{
+				if(psPMR->psFuncTab->pfnReleasePMRFactoryLock)
+				{
+					psPMR->psFuncTab->pfnReleasePMRFactoryLock();
+				}
+				return;
+			}
+			PVR_ASSERT (eError2 == PVRSRV_OK); /* can we do better? */
+		}
+#if defined(PDUMP)
+		PDumpPMRFreePMR(psPMR,
+		                psPMR->uiLogicalSize,
+		                (1 << psPMR->uiLog2ContiguityGuarantee),
+		                psPMR->uiLog2ContiguityGuarantee,
+		                psPMR->hPDumpAllocHandle);
+#endif
+
+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS)
+		/* This PMR is about to be destroyed, update its mmap stats record (if present)
+		 * to avoid dangling pointer. Additionally, this is required because mmap stats
+		 * are identified by PMRs and a new PMR down the line "might" get the same address
+		 * as the one we're about to free and we'd like 2 different entries in mmaps
+		 * stats for such cases */
+		MMapStatsRemovePMR(psPMR);
+#endif
+
+#ifdef PVRSRV_NEED_PVR_ASSERT
+		/* If not backed on demand, iLockCount should be 1 otherwise it should be 0 */
+		PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1));
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+		{
+			PVRSRV_ERROR eError;
+
+			/* Delete RI entry */
+			if (psPMR->hRIHandle)
+			{
+				eError = RIDeletePMREntryKM (psPMR->hRIHandle);
+
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s: RIDeletePMREntryKM failed: %s",
+							__func__,
+							PVRSRVGetErrorString(eError)));
+					/* continue destroying the PMR */
+				}
+			}
+		}
+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+		psCtx = psPMR->psContext;
+
+		OSLockDestroy(psPMR->hLock);
+
+		/* Release PMR factory lock acquired if any */
+		if(psPMR->psFuncTab->pfnReleasePMRFactoryLock)
+		{
+			psPMR->psFuncTab->pfnReleasePMRFactoryLock();
+		}
+
+		OSFreeMem(psPMR);
+
+		/* Decrement live PMR count.  Probably only of interest for debugging */
+		PVR_ASSERT(psCtx->uiNumLivePMRs > 0);
+
+		OSLockAcquire(psCtx->hLock);
+		psCtx->uiNumLivePMRs --;
+		OSLockRelease(psCtx->hLock);
+	}
+	else
+	{
+		/* Release PMR factory lock acquired if any */
+		if(psPMR->psFuncTab->pfnReleasePMRFactoryLock)
+		{
+			psPMR->psFuncTab->pfnReleasePMRFactoryLock();
+		}
+	}
+}
+
+static IMG_BOOL _PMRIsSparse(const PMR *psPMR)
+{
+	return psPMR->bSparseAlloc;
+}
+
+PVRSRV_ERROR
+PMRCreatePMR(PVRSRV_DEVICE_NODE *psDevNode,
+             PHYS_HEAP *psPhysHeap,
+             PMR_SIZE_T uiLogicalSize,
+             PMR_SIZE_T uiChunkSize,
+             IMG_UINT32 ui32NumPhysChunks,
+             IMG_UINT32 ui32NumVirtChunks,
+             IMG_UINT32 *pui32MappingTable,
+             PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee,
+             PMR_FLAGS_T uiFlags,
+             const IMG_CHAR *pszAnnotation,
+             const PMR_IMPL_FUNCTAB *psFuncTab,
+             PMR_IMPL_PRIVDATA pvPrivData,
+             PMR_IMPL_TYPE eType,
+             PMR **ppsPMRPtr,
+             IMG_UINT32 ui32PDumpFlags)
+{
+	PMR *psPMR = NULL;
+	PVRSRV_ERROR eError;
+
+	eError = _PMRCreate(uiLogicalSize,
+	                    uiChunkSize,
+	                    ui32NumPhysChunks,
+	                    ui32NumVirtChunks,
+	                    pui32MappingTable,
+	                    uiLog2ContiguityGuarantee,
+	                    uiFlags,
+	                    &psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	psPMR->psDevNode = psDevNode;
+	psPMR->psPhysHeap = psPhysHeap;
+	psPMR->psFuncTab = psFuncTab;
+	psPMR->pszPDumpDefaultMemspaceName = PhysHeapPDumpMemspaceName(psPhysHeap);
+	psPMR->pvFlavourData = pvPrivData;
+	psPMR->eFlavour = eType;
+	OSAtomicWrite(&psPMR->iRefCount, 1);
+
+	OSStringLCopy(psPMR->szAnnotation, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN);
+
+#if defined(PDUMP)
+	{
+		PMR_FLAGS_T uiFlags = psPMR->uiFlags;
+		IMG_BOOL bInitialise =  IMG_FALSE;
+		IMG_UINT32 ui32InitValue = 0;
+
+		if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags))
+		{
+			bInitialise = IMG_TRUE;
+		}
+		else if (PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags))
+		{
+			ui32InitValue = 0xDEADBEEF;
+			bInitialise = IMG_TRUE;
+		}
+
+		PDumpPMRMallocPMR(psPMR,
+		                  (uiChunkSize * ui32NumVirtChunks),
+		                  1ULL<<uiLog2ContiguityGuarantee,
+		                  uiChunkSize,
+		                  ui32NumPhysChunks,
+		                  ui32NumVirtChunks,
+		                  pui32MappingTable,
+		                  uiLog2ContiguityGuarantee,
+		                  bInitialise,
+		                  ui32InitValue,
+		                  &psPMR->hPDumpAllocHandle,
+		                  ui32PDumpFlags);
+	}
+#endif
+
+	*ppsPMRPtr = psPMR;
+
+	return PVRSRV_OK;
+
+	/*
+	 * error exit paths follow
+	 */
+	e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR PMRLockSysPhysAddressesNested(PMR *psPMR,
+                                           IMG_UINT32 ui32NestingLevel)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psPMR != NULL);
+
+	/* Note: taking this lock is not required to protect the PMR reference count,
+	 * because the PMR reference count is atomic.
+	 * Rather, taking the lock here guarantees that no caller will exit this function
+	 * without the underlying physical addresses being locked.
+	 */
+	OSLockAcquireNested(psPMR->hLock, ui32NestingLevel);
+	/* We also count the locks as references, so that the PMR is not
+       freed while someone is using a physical address. */
+	/* "lock" here simply means incrementing the refcount.  It means
+       the refcount is multipurpose, but that's okay.  We only have to
+       promise that physical addresses are valid after this point, and
+       remain valid until the corresponding
+       PMRUnlockSysPhysAddressesOSMem() */
+	_Ref(psPMR);
+
+	/* Also count locks separately from other types of references, to
+       allow for debug assertions */
+
+	/* Only call callback if lockcount transitions from 0 to 1 (or 1 to 2 if not backed on demand) */
+	if (OSAtomicIncrement(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 1 : 2))
+	{
+		if (psPMR->psFuncTab->pfnLockPhysAddresses != NULL)
+		{
+			/* must always have lock and unlock in pairs! */
+			PVR_ASSERT(psPMR->psFuncTab->pfnUnlockPhysAddresses != NULL);
+
+			eError = psPMR->psFuncTab->pfnLockPhysAddresses(psPMR->pvFlavourData);
+
+			if (eError != PVRSRV_OK)
+			{
+				goto e1;
+			}
+		}
+	}
+	OSLockRelease(psPMR->hLock);
+
+	return PVRSRV_OK;
+
+	e1:
+	OSAtomicDecrement(&psPMR->iLockCount);
+	_Unref(psPMR);
+	PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) != 0);
+	OSLockRelease(psPMR->hLock);
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+PMRLockSysPhysAddresses(PMR *psPMR)
+{
+	return PMRLockSysPhysAddressesNested(psPMR, 0);
+}
+
+PVRSRV_ERROR
+PMRUnlockSysPhysAddresses(PMR *psPMR)
+{
+	return PMRUnlockSysPhysAddressesNested(psPMR, 2);
+}
+
+PVRSRV_ERROR
+PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psPMR != NULL);
+
+	/* Acquiring the lock here, as well as during the Lock operation ensures
+	 * the lock count hitting zero and the unlocking of the phys addresses is
+	 * an atomic operation
+	 */
+	OSLockAcquireNested(psPMR->hLock, ui32NestingLevel);
+	PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) > (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1));
+
+	if (OSAtomicDecrement(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1))
+	{
+		if (psPMR->psFuncTab->pfnUnlockPhysAddresses != NULL)
+		{
+			PVR_ASSERT(psPMR->psFuncTab->pfnLockPhysAddresses != NULL);
+
+			eError = psPMR->psFuncTab->pfnUnlockPhysAddresses(psPMR->pvFlavourData);
+			/* must never fail */
+			PVR_ASSERT(eError == PVRSRV_OK);
+		}
+	}
+
+	OSLockRelease(psPMR->hLock);
+
+	/* We also count the locks as references, so that the PMR is not
+       freed while someone is using a physical address. */
+	_UnrefAndMaybeDestroy(psPMR);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnpinPMR(PMR *psPMR, IMG_BOOL bDevMapped)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_ASSERT(psPMR != NULL);
+
+	OSLockAcquire(psPMR->hLock);
+	/* Stop if we still have references on the PMR */
+	if (   ( bDevMapped && (OSAtomicRead(&psPMR->iRefCount) > 2))
+			|| (!bDevMapped && (OSAtomicRead(&psPMR->iRefCount) > 1)) )
+	{
+		OSLockRelease(psPMR->hLock);
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: PMR is still referenced %u times. "
+				"That means this PMR is probably exported or used somewhere else. "
+				"Allowed are 2 references if it is mapped to device, otherwise 1.",
+				__func__,
+				OSAtomicRead(&psPMR->iRefCount)));
+
+		eError = PVRSRV_ERROR_PMR_STILL_REFERENCED;
+		goto e_exit;
+	}
+	OSLockRelease(psPMR->hLock);
+
+	if (psPMR->psFuncTab->pfnUnpinMem != NULL)
+	{
+		eError = psPMR->psFuncTab->pfnUnpinMem(psPMR->pvFlavourData);
+		if (eError == PVRSRV_OK)
+		{
+			psPMR->bIsUnpinned = IMG_TRUE;
+		}
+	}
+
+	e_exit:
+	return eError;
+}
+
+PVRSRV_ERROR
+PMRPinPMR(PMR *psPMR)
+{
+	PVRSRV_ERROR eError= PVRSRV_OK;
+
+	PVR_ASSERT(psPMR != NULL);
+
+	if (psPMR->psFuncTab->pfnPinMem != NULL)
+	{
+		eError = psPMR->psFuncTab->pfnPinMem(psPMR->pvFlavourData,
+		                                     psPMR->psMappingTable);
+		if (eError == PVRSRV_OK)
+		{
+			psPMR->bIsUnpinned = IMG_FALSE;
+		}
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PMRMakeLocalImportHandle(PMR *psPMR,
+                         PMR **ppsPMR)
+{
+	PMRRefPMR(psPMR);
+	*ppsPMR = psPMR;
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnmakeLocalImportHandle(PMR *psPMR)
+{
+	PMRUnrefPMR(psPMR);
+	return PVRSRV_OK;
+}
+
+/*
+	Note:
+	We pass back the PMR as it was passed in as a different handle type
+	(DEVMEM_MEM_IMPORT) and it allows us to change the import structure
+	type if we should need to embed any meta data in it.
+ */
+PVRSRV_ERROR
+PMRLocalImportPMR(PMR *psPMR,
+                  PMR **ppsPMR,
+                  IMG_DEVMEM_SIZE_T *puiSize,
+                  IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+	_Ref(psPMR);
+
+	/* Return the PMR */
+	*ppsPMR = psPMR;
+	*puiSize = psPMR->uiLogicalSize;
+	*puiAlign = 1ULL << psPMR->uiLog2ContiguityGuarantee;
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRGetUID(PMR *psPMR,
+          IMG_UINT64 *pui64UID)
+{
+	PVR_ASSERT(psPMR != NULL);
+
+	*pui64UID = psPMR->uiSerialNum;
+
+	return PVRSRV_OK;
+}
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR
+PMRExportPMR(PMR *psPMR,
+             PMR_EXPORT **ppsPMRExportPtr,
+             PMR_SIZE_T *puiSize,
+             PMR_LOG2ALIGN_T *puiLog2Contig,
+             PMR_PASSWORD_T *puiPassword)
+{
+	IMG_UINT64 uiPassword;
+	PMR_EXPORT *psPMRExport;
+
+	uiPassword = psPMR->uiKey;
+
+	psPMRExport = OSAllocMem(sizeof(*psPMRExport));
+	if (psPMRExport == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psPMRExport->psPMR = psPMR;
+	_Ref(psPMR);
+
+	*ppsPMRExportPtr = psPMRExport;
+	*puiSize = psPMR->uiLogicalSize;
+	*puiLog2Contig = psPMR->uiLog2ContiguityGuarantee;
+	*puiPassword = uiPassword;
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRUnexportPMR(PMR_EXPORT *psPMRExport)
+{
+	PVR_ASSERT(psPMRExport != NULL);
+	PVR_ASSERT(psPMRExport->psPMR != NULL);
+	PVR_ASSERT(OSAtomicRead(&psPMRExport->psPMR->iRefCount) > 0);
+
+	_UnrefAndMaybeDestroy(psPMRExport->psPMR);
+
+	OSFreeMem(psPMRExport);
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRImportPMR(PMR_EXPORT *psPMRExport,
+             PMR_PASSWORD_T uiPassword,
+             PMR_SIZE_T uiSize,
+             PMR_LOG2ALIGN_T uiLog2Contig,
+             PMR **ppsPMR)
+{
+	PMR *psPMR;
+
+	PVR_ASSERT(OSAtomicRead(&psPMRExport->psPMR->iRefCount) > 0);
+
+	psPMR = psPMRExport->psPMR;
+
+
+	if (psPMR->uiKey != uiPassword)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"PMRImport: Import failed, password specified does not match the export"));
+		return PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR;
+	}
+
+	if (psPMR->uiLogicalSize != uiSize || psPMR->uiLog2ContiguityGuarantee != uiLog2Contig)
+	{
+		return PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES;
+	}
+
+	_Ref(psPMR);
+
+	*ppsPMR = psPMR;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnimportPMR(PMR *psPMR)
+{
+	_UnrefAndMaybeDestroy(psPMR);
+
+	return PVRSRV_OK;
+}
+
+#else /* if defined(SUPPORT_INSECURE_EXPORT) */
+
+PVRSRV_ERROR
+PMRExportPMR(PMR *psPMR,
+             PMR_EXPORT **ppsPMRExportPtr,
+             PMR_SIZE_T *puiSize,
+             PMR_LOG2ALIGN_T *puiLog2Contig,
+             PMR_PASSWORD_T *puiPassword)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(ppsPMRExportPtr);
+	PVR_UNREFERENCED_PARAMETER(puiSize);
+	PVR_UNREFERENCED_PARAMETER(puiLog2Contig);
+	PVR_UNREFERENCED_PARAMETER(puiPassword);
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRUnexportPMR(PMR_EXPORT *psPMRExport)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMRExport);
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRImportPMR(PMR_EXPORT *psPMRExport,
+             PMR_PASSWORD_T uiPassword,
+             PMR_SIZE_T uiSize,
+             PMR_LOG2ALIGN_T uiLog2Contig,
+             PMR **ppsPMR)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMRExport);
+	PVR_UNREFERENCED_PARAMETER(uiPassword);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiLog2Contig);
+	PVR_UNREFERENCED_PARAMETER(ppsPMR);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnimportPMR(PMR *psPMR)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	return PVRSRV_OK;
+}
+#endif /* if defined(SUPPORT_INSECURE_EXPORT) */
+
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR PMRSecureUnexportPMR(PMR *psPMR)
+{
+	_UnrefAndMaybeDestroy(psPMR);
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _ReleaseSecurePMR(void *psExport)
+{
+	return PMRSecureUnexportPMR(psExport);
+}
+
+PVRSRV_ERROR PMRSecureExportPMR(CONNECTION_DATA *psConnection,
+                                PVRSRV_DEVICE_NODE * psDevNode,
+                                PMR *psPMR,
+                                IMG_SECURE_TYPE *phSecure,
+                                PMR **ppsPMR,
+                                CONNECTION_DATA **ppsSecureConnection)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(ppsSecureConnection);
+
+	/* We are acquiring reference to PMR here because OSSecureExport
+	 * releases bridge lock and PMR lock for a moment and we don't want PMR
+	 * to be removed by other thread in the meantime. */
+	_Ref(psPMR);
+
+	eError = OSSecureExport("secure_pmr",
+	                        _ReleaseSecurePMR,
+	                        (void *) psPMR,
+	                        phSecure);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	*ppsPMR = psPMR;
+
+	return PVRSRV_OK;
+	e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	_UnrefAndMaybeDestroy(psPMR);
+	return eError;
+}
+
+PVRSRV_ERROR PMRSecureImportPMR(CONNECTION_DATA *psConnection,
+                                PVRSRV_DEVICE_NODE *psDevNode,
+                                IMG_SECURE_TYPE hSecure,
+                                PMR **ppsPMR,
+                                IMG_DEVMEM_SIZE_T *puiSize,
+                                IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMR;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	eError = OSSecureImport(hSecure, (void **) &psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	if (psPMR->psDevNode != psDevNode)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device", __func__));
+		return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+	}
+
+	_Ref(psPMR);
+
+	/* Return the PMR */
+	*ppsPMR = psPMR;
+	*puiSize = psPMR->uiLogicalSize;
+	*puiAlign = 1 << psPMR->uiLog2ContiguityGuarantee;
+	return PVRSRV_OK;
+	e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR PMRSecureUnimportPMR(PMR *psPMR)
+{
+	_UnrefAndMaybeDestroy(psPMR);
+	return PVRSRV_OK;
+}
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+PVRSRV_ERROR
+PMRStoreRIHandle(PMR *psPMR,
+                 void *hRIHandle)
+{
+	PVR_ASSERT(psPMR != NULL);
+
+	psPMR->hRIHandle = hRIHandle;
+	return PVRSRV_OK;
+}
+#endif
+
+static PVRSRV_ERROR
+_PMRAcquireKernelMappingData(PMR *psPMR,
+                             size_t uiLogicalOffset,
+                             size_t uiSize,
+                             void **ppvKernelAddressOut,
+                             size_t *puiLengthOut,
+                             IMG_HANDLE *phPrivOut,
+                             IMG_BOOL bMapSparse)
+{
+	PVRSRV_ERROR eError;
+	void *pvKernelAddress;
+	IMG_HANDLE hPriv;
+
+	PVR_ASSERT(psPMR != NULL);
+
+	if (_PMRIsSparse(psPMR) && !bMapSparse)
+	{
+		/* Mapping of sparse allocations must be signalled. */
+		return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+	}
+
+	/* Acquire/Release functions must be overridden in pairs */
+	if (psPMR->psFuncTab->pfnAcquireKernelMappingData == NULL)
+	{
+		PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData == NULL);
+
+		/* If PMR implementation does not supply this pair of
+           functions, it means they do not permit the PMR to be mapped
+           into kernel memory at all */
+		eError = PVRSRV_ERROR_PMR_NOT_PERMITTED;
+		goto e0;
+	}
+	PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != NULL);
+
+	eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData,
+	                                                       uiLogicalOffset,
+	                                                       uiSize,
+	                                                       &pvKernelAddress,
+	                                                       &hPriv,
+	                                                       psPMR->uiFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	*ppvKernelAddressOut = pvKernelAddress;
+	if (uiSize == 0)
+	{
+		/* Zero size means map the whole PMR in ...*/
+		*puiLengthOut = (size_t)psPMR->uiLogicalSize;
+	}
+	else if (uiSize > (1 << psPMR->uiLog2ContiguityGuarantee))
+	{
+		/* ... map in the requested pages ...*/
+		*puiLengthOut = uiSize;
+	}
+	else
+	{
+		/* ... otherwise we just map in one page */
+		*puiLengthOut = 1 << psPMR->uiLog2ContiguityGuarantee;
+	}
+	*phPrivOut = hPriv;
+
+	return PVRSRV_OK;
+
+	e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+PMRAcquireKernelMappingData(PMR *psPMR,
+                            size_t uiLogicalOffset,
+                            size_t uiSize,
+                            void **ppvKernelAddressOut,
+                            size_t *puiLengthOut,
+                            IMG_HANDLE *phPrivOut)
+{
+	return _PMRAcquireKernelMappingData(psPMR,
+	                                    uiLogicalOffset,
+	                                    uiSize,
+	                                    ppvKernelAddressOut,
+	                                    puiLengthOut,
+	                                    phPrivOut,
+	                                    IMG_FALSE);
+}
+
+PVRSRV_ERROR
+PMRAcquireSparseKernelMappingData(PMR *psPMR,
+                                  size_t uiLogicalOffset,
+                                  size_t uiSize,
+                                  void **ppvKernelAddressOut,
+                                  size_t *puiLengthOut,
+                                  IMG_HANDLE *phPrivOut)
+{
+	return _PMRAcquireKernelMappingData(psPMR,
+	                                    uiLogicalOffset,
+	                                    uiSize,
+	                                    ppvKernelAddressOut,
+	                                    puiLengthOut,
+	                                    phPrivOut,
+	                                    IMG_TRUE);
+}
+
+PVRSRV_ERROR
+PMRReleaseKernelMappingData(PMR *psPMR,
+                            IMG_HANDLE hPriv)
+{
+	PVR_ASSERT (psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL);
+	PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != NULL);
+
+	psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData,
+	                                              hPriv);
+
+	return PVRSRV_OK;
+}
+
+#if defined(INTEGRITY_OS)
+
+PVRSRV_ERROR
+PMRMapMemoryObject(PMR *psPMR,
+                   IMG_HANDLE *phMemObj,
+                   void **pvClientAddr,
+                   IMG_HANDLE *phPrivOut)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_HANDLE hPriv = *phPrivOut;
+
+	PVR_ASSERT (psPMR->psFuncTab->pfnMapMemoryObject != NULL);
+
+	eError = psPMR->psFuncTab->pfnMapMemoryObject(hPriv, phMemObj, pvClientAddr, phPrivOut);
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PMRUnmapMemoryObject(PMR *psPMR,
+                     IMG_HANDLE hPriv)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_ASSERT (psPMR->psFuncTab->pfnUnmapMemoryObject != NULL);
+
+	eError = psPMR->psFuncTab->pfnUnmapMemoryObject(hPriv);
+
+	return eError;
+}
+
+#if defined(USING_HYPERVISOR)
+IMG_HANDLE PMRGetPmr(PMR *psPMR, size_t ulOffset)
+{
+	PVR_ASSERT(psPMR->psFuncTab->pfnGetPmr != NULL);
+	return psPMR->psFuncTab->pfnGetPmr(psPMR->pvFlavourData, ulOffset);
+}
+#endif
+#endif /* INTEGRITY_OS */
+
+/*
+	_PMRLogicalOffsetToPhysicalOffset
+
+	Translate between the "logical" offset which the upper levels
+	provide and the physical offset which is what the PMR
+	factories works on.
+
+	As well as returning the physical offset we return the number of
+	bytes remaining till the next chunk and if this chunk is valid.
+
+	For multi-page operations, upper layers communicate their
+	Log2PageSize else argument is redundant (set to zero).
+ */
+
+static void
+_PMRLogicalOffsetToPhysicalOffset(const PMR *psPMR,
+                                  IMG_UINT32 ui32Log2PageSize,
+                                  IMG_UINT32 ui32NumOfPages,
+                                  IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                                  IMG_DEVMEM_OFFSET_T *puiPhysicalOffset,
+                                  IMG_UINT32 *pui32BytesRemain,
+                                  IMG_BOOL *bValid)
+{
+	PMR_MAPPING_TABLE *psMappingTable = psPMR->psMappingTable;
+	IMG_DEVMEM_OFFSET_T uiPageSize = 1ULL << ui32Log2PageSize;
+	IMG_DEVMEM_OFFSET_T uiOffset = uiLogicalOffset;
+	IMG_UINT64 ui64ChunkIndex;
+	IMG_UINT32 ui32Remain;
+	IMG_UINT32 idx;
+
+	/* Must be translating at least a page */
+	PVR_ASSERT(ui32NumOfPages);
+
+	if (psMappingTable->ui32NumPhysChunks == psMappingTable->ui32NumVirtChunks)
+	{
+		/* Fast path the common case, as logical and physical offsets are
+			equal we assume the ui32NumOfPages span is also valid */
+		*pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiOffset);
+		puiPhysicalOffset[0] = uiOffset;
+		bValid[0] = IMG_TRUE;
+
+		if (ui32NumOfPages > 1)
+		{
+			/* initial offset may not be page aligned, round down */
+			uiOffset &= ~(uiPageSize-1);
+			for (idx=1; idx < ui32NumOfPages; idx++)
+			{
+				uiOffset += uiPageSize;
+				puiPhysicalOffset[idx] = uiOffset;
+				bValid[idx] = IMG_TRUE;
+			}
+		}
+	}
+	else
+	{
+		for (idx=0; idx < ui32NumOfPages; idx++)
+		{
+			ui64ChunkIndex = OSDivide64r64(
+					uiOffset,
+					TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize),
+					&ui32Remain);
+
+			if (psMappingTable->aui32Translation[ui64ChunkIndex] == TRANSLATION_INVALID)
+			{
+				bValid[idx] = IMG_FALSE;
+			}
+			else
+			{
+				bValid[idx] = IMG_TRUE;
+			}
+
+			if (idx == 0)
+			{
+				if (ui32Remain == 0)
+				{
+					/* Start of chunk so return the chunk size */
+					*pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize);
+				}
+				else
+				{
+					*pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize - ui32Remain);
+				}
+
+				puiPhysicalOffset[idx] = (psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize) +	 ui32Remain;
+
+				/* initial offset may not be page aligned, round down */
+				uiOffset &= ~(uiPageSize-1);
+			}
+			else
+			{
+				puiPhysicalOffset[idx] = psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize + ui32Remain;
+			}
+			uiOffset += uiPageSize;
+		}
+	}
+}
+
+static PVRSRV_ERROR
+_PMR_ReadBytesPhysical(PMR *psPMR,
+                       IMG_DEVMEM_OFFSET_T uiPhysicalOffset,
+                       IMG_UINT8 *pcBuffer,
+                       size_t uiBufSz,
+                       size_t *puiNumBytes)
+{
+	PVRSRV_ERROR eError;
+
+	if (psPMR->psFuncTab->pfnReadBytes != NULL)
+	{
+		/* defer to callback if present */
+
+		eError = PMRLockSysPhysAddresses(psPMR);
+		if (eError != PVRSRV_OK)
+		{
+			goto e0;
+		}
+
+		eError = psPMR->psFuncTab->pfnReadBytes(psPMR->pvFlavourData,
+		                                        uiPhysicalOffset,
+		                                        pcBuffer,
+		                                        uiBufSz,
+		                                        puiNumBytes);
+		PMRUnlockSysPhysAddresses(psPMR);
+		if (eError != PVRSRV_OK)
+		{
+			goto e0;
+		}
+	}
+	else if (psPMR->psFuncTab->pfnAcquireKernelMappingData)
+	{
+		/* "default" handler for reading bytes */
+
+		IMG_HANDLE hKernelMappingHandle;
+		IMG_UINT8 *pcKernelAddress;
+
+		eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData,
+		                                                       (size_t) uiPhysicalOffset,
+		                                                       uiBufSz,
+		                                                       (void **)&pcKernelAddress,
+		                                                       &hKernelMappingHandle,
+		                                                       psPMR->uiFlags);
+		if (eError != PVRSRV_OK)
+		{
+			goto e0;
+		}
+
+		/* Use the conservative 'DeviceMemCopy' here because we can't know
+		 * if this PMR will be mapped cached.
+		 */
+
+		OSDeviceMemCopy(&pcBuffer[0], pcKernelAddress, uiBufSz);
+		*puiNumBytes = uiBufSz;
+
+		psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData,
+		                                              hKernelMappingHandle);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PMR_ReadBytes: can't read from this PMR"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		OSPanic();
+		goto e0;
+	}
+
+	return PVRSRV_OK;
+
+	/*
+      error exit paths follow
+	 */
+
+	e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	*puiNumBytes = 0;
+	return eError;
+}
+
+PVRSRV_ERROR
+PMR_ReadBytes(PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT8 *pcBuffer,
+              size_t uiBufSz,
+              size_t *puiNumBytes)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_DEVMEM_OFFSET_T uiPhysicalOffset;
+	size_t uiBytesCopied = 0;
+
+	if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize)
+	{
+		uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset);
+	}
+	PVR_ASSERT(uiBufSz > 0);
+	PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize);
+
+	/*
+      PMR implementations can override this.  If they don't, a
+      "default" handler uses kernel virtual mappings.  If the kernel
+      can't provide a kernel virtual mapping, this function fails
+	 */
+	PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL ||
+	           psPMR->psFuncTab->pfnReadBytes != NULL);
+
+	while (uiBytesCopied != uiBufSz)
+	{
+		IMG_UINT32 ui32Remain;
+		size_t uiBytesToCopy;
+		size_t uiRead;
+		IMG_BOOL bValid;
+
+		_PMRLogicalOffsetToPhysicalOffset(psPMR,
+		                                  0,
+		                                  1,
+		                                  uiLogicalOffset,
+		                                  &uiPhysicalOffset,
+		                                  &ui32Remain,
+		                                  &bValid);
+		/*
+			Copy till either then end of the
+			chunk or end of the buffer
+		 */
+		uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain);
+
+		if (bValid)
+		{
+			/* Read the data from the PMR */
+			eError = _PMR_ReadBytesPhysical(psPMR,
+			                                uiPhysicalOffset,
+			                                &pcBuffer[uiBytesCopied],
+			                                uiBytesToCopy,
+			                                &uiRead);
+			if ((eError != PVRSRV_OK) || (uiRead != uiBytesToCopy))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Failed to read chunk (eError = %s, uiRead = " IMG_SIZE_FMTSPEC " uiBytesToCopy = " IMG_SIZE_FMTSPEC ")",
+						__func__,
+						PVRSRVGetErrorString(eError),
+						uiRead,
+						uiBytesToCopy));
+				/* Bail out as soon as we hit an error */
+				break;
+			}
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+					"%s: Invalid phys offset at logical offset (" IMG_DEVMEM_OFFSET_FMTSPEC ") logical size (" IMG_DEVMEM_OFFSET_FMTSPEC ")",
+					__func__,
+					uiLogicalOffset,
+					psPMR->uiLogicalSize));
+			/* Fill invalid chunks with 0 */
+			OSCachedMemSet(&pcBuffer[uiBytesCopied], 0, uiBytesToCopy);
+			uiRead = uiBytesToCopy;
+			eError = PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR;
+		}
+		uiLogicalOffset += uiRead;
+		uiBytesCopied += uiRead;
+	}
+
+	*puiNumBytes = uiBytesCopied;
+	return eError;
+}
+
+static PVRSRV_ERROR
+_PMR_WriteBytesPhysical(PMR *psPMR,
+                        IMG_DEVMEM_OFFSET_T uiPhysicalOffset,
+                        IMG_UINT8 *pcBuffer,
+                        size_t uiBufSz,
+                        size_t *puiNumBytes)
+{
+	PVRSRV_ERROR eError;
+
+	if (psPMR->psFuncTab->pfnWriteBytes != NULL)
+	{
+		/* defer to callback if present */
+
+		eError = PMRLockSysPhysAddresses(psPMR);
+		if (eError != PVRSRV_OK)
+		{
+			goto e0;
+		}
+
+		eError = psPMR->psFuncTab->pfnWriteBytes(psPMR->pvFlavourData,
+		                                         uiPhysicalOffset,
+		                                         pcBuffer,
+		                                         uiBufSz,
+		                                         puiNumBytes);
+		PMRUnlockSysPhysAddresses(psPMR);
+		if (eError != PVRSRV_OK)
+		{
+			goto e0;
+		}
+	}
+	else if (psPMR->psFuncTab->pfnAcquireKernelMappingData)
+	{
+		/* "default" handler for reading bytes */
+
+		IMG_HANDLE hKernelMappingHandle;
+		IMG_UINT8 *pcKernelAddress;
+
+		eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData,
+		                                                       (size_t) uiPhysicalOffset,
+		                                                       uiBufSz,
+		                                                       (void **)&pcKernelAddress,
+		                                                       &hKernelMappingHandle,
+		                                                       psPMR->uiFlags);
+		if (eError != PVRSRV_OK)
+		{
+			goto e0;
+		}
+
+		/* Use the conservative 'DeviceMemCopy' here because we can't know
+		 * if this PMR will be mapped cached.
+		 */
+
+		OSDeviceMemCopy(pcKernelAddress, &pcBuffer[0], uiBufSz);
+		*puiNumBytes = uiBufSz;
+
+		psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData,
+		                                              hKernelMappingHandle);
+	}
+	else
+	{
+		/*
+			The write callback is optional as it's only required by the debug
+			tools
+		 */
+		PVR_DPF((PVR_DBG_ERROR, "_PMR_WriteBytesPhysical: can't write to this PMR"));
+		eError = PVRSRV_ERROR_PMR_NOT_PERMITTED;
+		OSPanic();
+		goto e0;
+	}
+
+	return PVRSRV_OK;
+
+	/*
+      error exit paths follow
+	 */
+
+	e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	*puiNumBytes = 0;
+	return eError;
+}
+
+PVRSRV_ERROR
+PMR_WriteBytes(PMR *psPMR,
+               IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+               IMG_UINT8 *pcBuffer,
+               size_t uiBufSz,
+               size_t *puiNumBytes)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_DEVMEM_OFFSET_T uiPhysicalOffset;
+	size_t uiBytesCopied = 0;
+
+	if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize)
+	{
+		uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset);
+	}
+	PVR_ASSERT(uiBufSz > 0);
+	PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize);
+
+	/*
+      PMR implementations can override this.  If they don't, a
+      "default" handler uses kernel virtual mappings.  If the kernel
+      can't provide a kernel virtual mapping, this function fails
+	 */
+	PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL ||
+	           psPMR->psFuncTab->pfnWriteBytes != NULL);
+
+	while (uiBytesCopied != uiBufSz)
+	{
+		IMG_UINT32 ui32Remain;
+		size_t uiBytesToCopy;
+		size_t uiWrite;
+		IMG_BOOL bValid;
+
+		_PMRLogicalOffsetToPhysicalOffset(psPMR,
+		                                  0,
+		                                  1,
+		                                  uiLogicalOffset,
+		                                  &uiPhysicalOffset,
+		                                  &ui32Remain,
+		                                  &bValid);
+
+		/*
+			Copy till either then end of the
+			chunk or end of the buffer
+		 */
+		uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain);
+
+		if (bValid)
+		{
+			/* Write the data to the PMR */
+			eError = _PMR_WriteBytesPhysical(psPMR,
+			                                 uiPhysicalOffset,
+			                                 &pcBuffer[uiBytesCopied],
+			                                 uiBytesToCopy,
+			                                 &uiWrite);
+			if ((eError != PVRSRV_OK) || (uiWrite != uiBytesToCopy))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Failed to read chunk (eError = %s, uiWrite = " IMG_SIZE_FMTSPEC " uiBytesToCopy = " IMG_SIZE_FMTSPEC ")",
+						__func__,
+						PVRSRVGetErrorString(eError),
+						uiWrite,
+						uiBytesToCopy));
+				/* Bail out as soon as we hit an error */
+				break;
+			}
+		}
+		else
+		{
+			/* Ignore writes to invalid pages */
+			uiWrite = uiBytesToCopy;
+		}
+		uiLogicalOffset += uiWrite;
+		uiBytesCopied += uiWrite;
+	}
+
+	*puiNumBytes = uiBytesCopied;
+	return eError;
+}
+
+PVRSRV_ERROR
+PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData)
+{
+	if (psPMR->psFuncTab->pfnMMap)
+	{
+		return psPMR->psFuncTab->pfnMMap(psPMR->pvFlavourData, psPMR, pOSMMapData);
+	}
+
+	return OSMMapPMRGeneric(psPMR, pOSMMapData);
+}
+
+void
+PMRRefPMR(PMR *psPMR)
+{
+	PVR_ASSERT(psPMR != NULL);
+	_Ref(psPMR);
+}
+
+PVRSRV_ERROR
+PMRUnrefPMR(PMR *psPMR)
+{
+	_UnrefAndMaybeDestroy(psPMR);
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnrefUnlockPMR(PMR *psPMR)
+{
+	PMRUnlockSysPhysAddresses(psPMR);
+
+	PMRUnrefPMR(psPMR);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_DEVICE_NODE *
+PMR_DeviceNode(const PMR *psPMR)
+{
+	PVR_ASSERT(psPMR != NULL);
+
+	return psPMR->psDevNode;
+}
+
+PMR_FLAGS_T
+PMR_Flags(const PMR *psPMR)
+{
+	PVR_ASSERT(psPMR != NULL);
+
+	return psPMR->uiFlags;
+}
+
+IMG_BOOL
+PMR_IsSparse(const PMR *psPMR)
+{
+	PVR_ASSERT(psPMR != NULL);
+
+	return _PMRIsSparse(psPMR);
+}
+
+IMG_BOOL
+PMR_IsUnpinned(const PMR *psPMR)
+{
+	PVR_ASSERT(psPMR != NULL);
+
+	return psPMR->bIsUnpinned;
+}
+
+PVRSRV_ERROR
+PMR_LogicalSize(const PMR *psPMR,
+                IMG_DEVMEM_SIZE_T *puiLogicalSize)
+{
+	PVR_ASSERT(psPMR != NULL);
+
+	*puiLogicalSize = psPMR->uiLogicalSize;
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMR_PhysicalSize(const PMR *psPMR,
+                 IMG_DEVMEM_SIZE_T *puiPhysicalSize)
+{
+	PVR_ASSERT(psPMR != NULL);
+
+	/* iLockCount will be > 0 for any backed PMR (backed on demand or not) */
+	if ((OSAtomicRead(&psPMR->iLockCount) > 0) && !psPMR->bIsUnpinned)
+	{
+		if (psPMR->bSparseAlloc)
+		{
+			*puiPhysicalSize = psPMR->psMappingTable->uiChunkSize * psPMR->psMappingTable->ui32NumPhysChunks;
+		}
+		else
+		{
+			*puiPhysicalSize = psPMR->uiLogicalSize;
+		}
+	}
+	else
+	{
+		*puiPhysicalSize = 0;
+	}
+	return PVRSRV_OK;
+}
+
+PHYS_HEAP *
+PMR_PhysHeap(const PMR *psPMR)
+{
+	return psPMR->psPhysHeap;
+}
+
+PVRSRV_ERROR
+PMR_IsOffsetValid(const PMR *psPMR,
+                  IMG_UINT32 ui32Log2PageSize,
+                  IMG_UINT32 ui32NumOfPages,
+                  IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                  IMG_BOOL *pbValid)
+{
+	IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_UINT32 aui32BytesRemain[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset;
+	IMG_UINT32 *pui32BytesRemain = aui32BytesRemain;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_ASSERT(psPMR != NULL);
+	PVR_ASSERT(psPMR->uiLogicalSize >= uiLogicalOffset);
+
+	if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+	{
+		puiPhysicalOffset = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEVMEM_OFFSET_T));
+		if (puiPhysicalOffset == NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+
+		pui32BytesRemain = OSAllocMem(ui32NumOfPages * sizeof(IMG_UINT32));
+		if (pui32BytesRemain == NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+	}
+
+	_PMRLogicalOffsetToPhysicalOffset(psPMR,
+	                                  ui32Log2PageSize,
+	                                  ui32NumOfPages,
+	                                  uiLogicalOffset,
+	                                  puiPhysicalOffset,
+	                                  pui32BytesRemain,
+	                                  pbValid);
+
+	e0:
+	if (puiPhysicalOffset != auiPhysicalOffset && puiPhysicalOffset != NULL)
+	{
+		OSFreeMem(puiPhysicalOffset);
+	}
+
+	if (pui32BytesRemain != aui32BytesRemain && pui32BytesRemain != NULL)
+	{
+		OSFreeMem(pui32BytesRemain);
+	}
+
+	return eError;
+}
+
+PMR_MAPPING_TABLE *
+PMR_GetMappigTable(const PMR *psPMR)
+{
+	PVR_ASSERT(psPMR != NULL);
+	return psPMR->psMappingTable;
+
+}
+
+IMG_UINT32
+PMR_GetLog2Contiguity(const PMR *psPMR)
+{
+	PVR_ASSERT(psPMR != NULL);
+	return psPMR->uiLog2ContiguityGuarantee;
+}
+
+const IMG_CHAR *
+PMR_GetAnnotation(const PMR *psPMR)
+{
+	PVR_ASSERT(psPMR != NULL);
+	return psPMR->szAnnotation;
+}
+
+PMR_IMPL_TYPE
+PMR_GetType(const PMR *psPMR)
+{
+	PVR_ASSERT(psPMR != NULL);
+	return psPMR->eFlavour;
+}
+
+/* must have called PMRLockSysPhysAddresses() before calling this! */
+PVRSRV_ERROR
+PMR_DevPhysAddr(const PMR *psPMR,
+                IMG_UINT32 ui32Log2PageSize,
+                IMG_UINT32 ui32NumOfPages,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_DEV_PHYADDR *psDevAddrPtr,
+                IMG_BOOL *pbValid)
+{
+	IMG_UINT32 ui32Remain;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset;
+
+	PVR_ASSERT(psPMR != NULL);
+	PVR_ASSERT(ui32NumOfPages > 0);
+	PVR_ASSERT(psPMR->psFuncTab->pfnDevPhysAddr != NULL);
+
+#ifdef PVRSRV_NEED_PVR_ASSERT
+	PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) > (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1));
+#endif
+
+	if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+	{
+		puiPhysicalOffset = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEVMEM_OFFSET_T));
+		if (puiPhysicalOffset == NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+	}
+
+	_PMRLogicalOffsetToPhysicalOffset(psPMR,
+	                                  ui32Log2PageSize,
+	                                  ui32NumOfPages,
+	                                  uiLogicalOffset,
+	                                  puiPhysicalOffset,
+	                                  &ui32Remain,
+	                                  pbValid);
+	if (*pbValid || _PMRIsSparse(psPMR))
+	{
+		/* Sparse PMR may not always have the first page valid */
+		eError = psPMR->psFuncTab->pfnDevPhysAddr(psPMR->pvFlavourData,
+		                                          ui32Log2PageSize,
+		                                          ui32NumOfPages,
+		                                          puiPhysicalOffset,
+		                                          pbValid,
+		                                          psDevAddrPtr);
+#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES)
+		/* Currently excluded from the default build because of performance concerns.
+		 * We do not need this part in all systems because the GPU has the same address view of system RAM as the CPU.
+		 * Alternatively this could be implemented as part of the PMR-factories directly */
+
+		if (PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_UMA ||
+				PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_DMA)
+		{
+			IMG_UINT32 i;
+			IMG_DEV_PHYADDR sDevPAddrCorrected;
+
+			/* Copy the translated addresses to the correct array */
+			for (i = 0; i < ui32NumOfPages; i++)
+			{
+				PhysHeapCpuPAddrToDevPAddr(psPMR->psPhysHeap,
+				                           1,
+				                           &sDevPAddrCorrected,
+				                           (IMG_CPU_PHYADDR *) &psDevAddrPtr[i]);
+				psDevAddrPtr[i].uiAddr = sDevPAddrCorrected.uiAddr;
+			}
+		}
+#endif
+	}
+
+	if (puiPhysicalOffset != auiPhysicalOffset)
+	{
+		OSFreeMem(puiPhysicalOffset);
+	}
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	return PVRSRV_OK;
+
+	e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+PMR_CpuPhysAddr(const PMR *psPMR,
+                IMG_UINT32 ui32Log2PageSize,
+                IMG_UINT32 ui32NumOfPages,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_CPU_PHYADDR *psCpuAddrPtr,
+                IMG_BOOL *pbValid)
+{
+	PVRSRV_ERROR eError;
+	IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_DEV_PHYADDR *psDevPAddr = asDevPAddr;
+
+	if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+	{
+		psDevPAddr = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR));
+		if (psDevPAddr == NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+	}
+
+	eError = PMR_DevPhysAddr(psPMR, ui32Log2PageSize, ui32NumOfPages,
+	                         uiLogicalOffset, psDevPAddr, pbValid);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+	PhysHeapDevPAddrToCpuPAddr(psPMR->psPhysHeap, ui32NumOfPages, psCpuAddrPtr, psDevPAddr);
+
+	if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+	{
+		OSFreeMem(psDevPAddr);
+	}
+
+	return PVRSRV_OK;
+	e1:
+	if (psDevPAddr != asDevPAddr)
+	{
+		OSFreeMem(psDevPAddr);
+	}
+	e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR,
+                                 IMG_UINT32 ui32AllocPageCount,
+                                 IMG_UINT32 *pai32AllocIndices,
+                                 IMG_UINT32 ui32FreePageCount,
+                                 IMG_UINT32 *pai32FreeIndices,
+                                 IMG_UINT32 uiSparseFlags)
+{
+	PVRSRV_ERROR eError;
+
+	if (NULL == psPMR->psFuncTab->pfnChangeSparseMem)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: This type of sparse PMR cannot be changed.",
+				__func__));
+		return PVRSRV_ERROR_NOT_IMPLEMENTED;
+	}
+
+	eError = psPMR->psFuncTab->pfnChangeSparseMem(psPMR->pvFlavourData,
+	                                              psPMR,
+	                                              ui32AllocPageCount,
+	                                              pai32AllocIndices,
+	                                              ui32FreePageCount,
+	                                              pai32FreeIndices,
+	                                              uiSparseFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+#if defined(PDUMP)
+	{
+		IMG_BOOL bInitialise = IMG_FALSE;
+		IMG_UINT32 ui32InitValue = 0;
+
+		if (PVRSRV_CHECK_ZERO_ON_ALLOC(PMR_Flags(psPMR)))
+		{
+			bInitialise = IMG_TRUE;
+		}
+		else if (PVRSRV_CHECK_POISON_ON_ALLOC(PMR_Flags(psPMR)))
+		{
+			ui32InitValue = 0xDEADBEEF;
+			bInitialise = IMG_TRUE;
+		}
+
+		PDumpPMRChangeSparsePMR(psPMR,
+		                        1 << psPMR->uiLog2ContiguityGuarantee,
+		                        ui32AllocPageCount,
+		                        pai32AllocIndices,
+		                        ui32FreePageCount,
+		                        pai32FreeIndices,
+		                        bInitialise,
+		                        ui32InitValue,
+		                        &psPMR->hPDumpAllocHandle);
+	}
+
+#endif
+
+	e0:
+	return eError;
+}
+
+
+PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR,
+                                       IMG_UINT64 sCpuVAddrBase,
+                                       IMG_UINT32 ui32AllocPageCount,
+                                       IMG_UINT32 *pai32AllocIndices,
+                                       IMG_UINT32 ui32FreePageCount,
+                                       IMG_UINT32 *pai32FreeIndices)
+{
+	PVRSRV_ERROR eError;
+
+	if ((NULL == psPMR->psFuncTab) ||
+			(NULL == psPMR->psFuncTab->pfnChangeSparseMemCPUMap))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: This type of sparse PMR cannot be changed.",
+				__func__));
+		return PVRSRV_ERROR_NOT_IMPLEMENTED;
+	}
+
+	eError = psPMR->psFuncTab->pfnChangeSparseMemCPUMap(psPMR->pvFlavourData,
+	                                                    psPMR,
+	                                                    sCpuVAddrBase,
+	                                                    ui32AllocPageCount,
+	                                                    pai32AllocIndices,
+	                                                    ui32FreePageCount,
+	                                                    pai32FreeIndices);
+
+	return eError;
+}
+
+
+
+#if defined(PDUMP)
+
+static PVRSRV_ERROR
+_PMR_PDumpSymbolicAddrPhysical(const PMR *psPMR,
+                               IMG_DEVMEM_OFFSET_T uiPhysicalOffset,
+                               IMG_UINT32 ui32MemspaceNameLen,
+                               IMG_CHAR *pszMemspaceName,
+                               IMG_UINT32 ui32SymbolicAddrLen,
+                               IMG_CHAR *pszSymbolicAddr,
+                               IMG_DEVMEM_OFFSET_T *puiNewOffset,
+                               IMG_DEVMEM_OFFSET_T *puiNextSymName)
+{
+	if (DevmemCPUCacheCoherency(psPMR->psDevNode, psPMR->uiFlags) ||
+			DevmemDeviceCacheCoherency(psPMR->psDevNode, psPMR->uiFlags))
+	{
+		OSSNPrintf(pszMemspaceName,
+		           ui32MemspaceNameLen,
+		           PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC,
+		           psPMR->pszPDumpDefaultMemspaceName);
+	}
+	else
+	{
+		OSSNPrintf(pszMemspaceName, ui32MemspaceNameLen, PMR_MEMSPACE_FMTSPEC,
+		           psPMR->pszPDumpDefaultMemspaceName);
+	}
+
+	OSSNPrintf(pszSymbolicAddr,
+	           ui32SymbolicAddrLen,
+	           PMR_SYMBOLICADDR_FMTSPEC,
+	           PMR_DEFAULT_PREFIX,
+	           psPMR->uiSerialNum,
+	           uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR),
+	           psPMR->szAnnotation);
+	PDumpMakeStringValid(pszSymbolicAddr, OSStringLength(pszSymbolicAddr));
+
+
+	*puiNewOffset = uiPhysicalOffset & ((1 << PMR_GetLog2Contiguity(psPMR))-1);
+	*puiNextSymName = (IMG_DEVMEM_OFFSET_T) (((uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR))+1)
+			<< PMR_GetLog2Contiguity(psPMR));
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMR_PDumpSymbolicAddr(const PMR *psPMR,
+                      IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                      IMG_UINT32 ui32MemspaceNameLen,
+                      IMG_CHAR *pszMemspaceName,
+                      IMG_UINT32 ui32SymbolicAddrLen,
+                      IMG_CHAR *pszSymbolicAddr,
+                      IMG_DEVMEM_OFFSET_T *puiNewOffset,
+                      IMG_DEVMEM_OFFSET_T *puiNextSymName
+)
+{
+	IMG_DEVMEM_OFFSET_T uiPhysicalOffset;
+	IMG_UINT32 ui32Remain;
+	IMG_BOOL bValid;
+
+	PVR_ASSERT(uiLogicalOffset < psPMR->uiLogicalSize);
+
+	_PMRLogicalOffsetToPhysicalOffset(psPMR,
+	                                  0,
+	                                  1,
+	                                  uiLogicalOffset,
+	                                  &uiPhysicalOffset,
+	                                  &ui32Remain,
+	                                  &bValid);
+
+	if (!bValid)
+	{
+		/*	For sparse allocations, for a given logical address, there may not be a
+		 *	physical memory backing, the virtual range can still be valid.
+		 */
+		uiPhysicalOffset = uiLogicalOffset;
+	}
+
+	return _PMR_PDumpSymbolicAddrPhysical(psPMR,
+	                                      uiPhysicalOffset,
+	                                      ui32MemspaceNameLen,
+	                                      pszMemspaceName,
+	                                      ui32SymbolicAddrLen,
+	                                      pszSymbolicAddr,
+	                                      puiNewOffset,
+	                                      puiNextSymName);
+}
+
+/*!
+ * @brief Writes a WRW command to the script2 buffer, representing a
+ *        dword write to a physical allocation. Size is always
+ *        sizeof(IMG_UINT32).
+ * @param psPMR - PMR object representing allocation
+ * @param uiLogicalOffset - offset
+ * @param ui32Value - value to write
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMemValue32(PMR *psPMR,
+                       IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                       IMG_UINT32 ui32Value,
+                       PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+	IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee;
+
+	PVR_ASSERT(uiLogicalOffset + sizeof(ui32Value) <= psPMR->uiLogicalSize);
+	/* Especially make sure to not cross a block boundary */
+	PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value))
+			<= uiPMRPageSize));
+
+	eError = PMRLockSysPhysAddresses(psPMR);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Get the symbolic address of the PMR */
+	eError = PMR_PDumpSymbolicAddr(psPMR,
+	                               uiLogicalOffset,
+	                               sizeof(aszMemspaceName),
+	                               &aszMemspaceName[0],
+	                               sizeof(aszSymbolicName),
+	                               &aszSymbolicName[0],
+	                               &uiPDumpSymbolicOffset,
+	                               &uiNextSymName);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Write the WRW script command */
+	eError = PDumpPMRWRW32(aszMemspaceName,
+	                       aszSymbolicName,
+	                       uiPDumpSymbolicOffset,
+	                       ui32Value,
+	                       uiPDumpFlags);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	eError = PMRUnlockSysPhysAddresses(psPMR);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	return PVRSRV_OK;
+}
+
+/*!
+ * @brief Writes a RDW followed by a WRW command to the pdump script to perform
+ *        an effective copy from memory to memory. Memory copied is of size
+ *        sizeof (IMG_UINT32)
+ *
+ * @param psDstPMR - PMR object representing allocation of destination
+ * @param uiDstLogicalOffset - destination offset
+ * @param psSrcPMR - PMR object representing allocation of source
+ * @param uiSrcLogicalOffset - source offset
+ * @param pszTmpVar - pdump temporary variable used during the copy
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpCopyMem32(PMR *psDstPMR,
+                  IMG_DEVMEM_OFFSET_T uiDstLogicalOffset,
+                  PMR *psSrcPMR,
+                  IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset,
+                  const IMG_CHAR *pszTmpVar,
+                  PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+	const IMG_UINT32 uiDstPMRPageSize = 1 << psDstPMR->uiLog2ContiguityGuarantee;
+	const IMG_UINT32 uiSrcPMRPageSize = 1 << psSrcPMR->uiLog2ContiguityGuarantee;
+
+	PVR_ASSERT(uiSrcLogicalOffset + sizeof(IMG_UINT32) <= psSrcPMR->uiLogicalSize);
+	/* Especially make sure to not cross a block boundary */
+	PVR_ASSERT(( ((uiSrcLogicalOffset & (uiSrcPMRPageSize-1)) + sizeof(IMG_UINT32))
+			<= uiSrcPMRPageSize));
+
+	PVR_ASSERT(uiDstLogicalOffset + sizeof(IMG_UINT32) <= psDstPMR->uiLogicalSize);
+	/* Especially make sure to not cross a block boundary */
+	PVR_ASSERT(( ((uiDstLogicalOffset & (uiDstPMRPageSize-1)) + sizeof(IMG_UINT32))
+			<= uiDstPMRPageSize));
+
+
+	eError = PMRLockSysPhysAddresses(psSrcPMR);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Get the symbolic address of the source PMR */
+	eError = PMR_PDumpSymbolicAddr(psSrcPMR,
+	                               uiSrcLogicalOffset,
+	                               sizeof(aszMemspaceName),
+	                               &aszMemspaceName[0],
+	                               sizeof(aszSymbolicName),
+	                               &aszSymbolicName[0],
+	                               &uiPDumpSymbolicOffset,
+	                               &uiNextSymName);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Issue PDump read command */
+	eError = PDumpPMRRDW32MemToInternalVar(pszTmpVar,
+	                                       aszMemspaceName,
+	                                       aszSymbolicName,
+	                                       uiPDumpSymbolicOffset,
+	                                       uiPDumpFlags);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	eError = PMRUnlockSysPhysAddresses(psSrcPMR);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+
+
+	eError = PMRLockSysPhysAddresses(psDstPMR);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+
+	/* Get the symbolic address of the destination PMR */
+	eError = PMR_PDumpSymbolicAddr(psDstPMR,
+	                               uiDstLogicalOffset,
+	                               sizeof(aszMemspaceName),
+	                               &aszMemspaceName[0],
+	                               sizeof(aszSymbolicName),
+	                               &aszSymbolicName[0],
+	                               &uiPDumpSymbolicOffset,
+	                               &uiNextSymName);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+
+	/* Write the WRW script command */
+	eError = PDumpPMRWRW32InternalVarToMem(aszMemspaceName,
+	                                       aszSymbolicName,
+	                                       uiPDumpSymbolicOffset,
+	                                       pszTmpVar,
+	                                       uiPDumpFlags);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+
+	eError = PMRUnlockSysPhysAddresses(psDstPMR);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	return PVRSRV_OK;
+}
+
+/*!
+ * @brief Writes a WRW64 command to the script2 buffer, representing a
+ *        dword write to a physical allocation. Size is always
+ *        sizeof(IMG_UINT64).
+ * @param psPMR - PMR object representing allocation
+ * @param uiLogicalOffset - offset
+ * @param ui64Value - value to write
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMemValue64(PMR *psPMR,
+                       IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                       IMG_UINT64 ui64Value,
+                       PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+	IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee;
+
+
+	PVR_ASSERT(uiLogicalOffset + sizeof(ui64Value) <= psPMR->uiLogicalSize);
+	/* Especially make sure to not cross a block boundary */
+	PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui64Value))
+			<= uiPMRPageSize));
+
+	eError = PMRLockSysPhysAddresses(psPMR);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Get the symbolic address of the PMR */
+	eError = PMR_PDumpSymbolicAddr(psPMR,
+	                               uiLogicalOffset,
+	                               sizeof(aszMemspaceName),
+	                               &aszMemspaceName[0],
+	                               sizeof(aszSymbolicName),
+	                               &aszSymbolicName[0],
+	                               &uiPDumpSymbolicOffset,
+	                               &uiNextSymName);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Write the WRW script command */
+	eError = PDumpPMRWRW64(aszMemspaceName,
+	                       aszSymbolicName,
+	                       uiPDumpSymbolicOffset,
+	                       ui64Value,
+	                       uiPDumpFlags);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	eError = PMRUnlockSysPhysAddresses(psPMR);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	return PVRSRV_OK;
+}
+
+/*!
+ * @brief Writes a RDW64 followed by a WRW64 command to the pdump script to
+ *        perform an effective copy from memory to memory. Memory copied is of
+ *        size sizeof (IMG_UINT32)
+ *
+ * @param psDstPMR - PMR object representing allocation of destination
+ * @param uiDstLogicalOffset - destination offset
+ * @param psSrcPMR - PMR object representing allocation of source
+ * @param uiSrcLogicalOffset - source offset
+ * @param pszTmpVar - pdump temporary variable used during the copy
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpCopyMem64(PMR *psDstPMR,
+                  IMG_DEVMEM_OFFSET_T uiDstLogicalOffset,
+                  PMR *psSrcPMR,
+                  IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset,
+                  const IMG_CHAR *pszTmpVar,
+                  PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+	const IMG_UINT32 uiDstPMRPageSize = 1 << psDstPMR->uiLog2ContiguityGuarantee;
+	const IMG_UINT32 uiSrcPMRPageSize = 1 << psSrcPMR->uiLog2ContiguityGuarantee;
+
+	PVR_ASSERT(uiSrcLogicalOffset + sizeof(IMG_UINT32) <= psSrcPMR->uiLogicalSize);
+	/* Especially make sure to not cross a block boundary */
+	PVR_ASSERT(( ((uiSrcLogicalOffset & (uiSrcPMRPageSize-1)) + sizeof(IMG_UINT32))
+			<= uiSrcPMRPageSize));
+
+	PVR_ASSERT(uiDstLogicalOffset + sizeof(IMG_UINT32) <= psDstPMR->uiLogicalSize);
+	/* Especially make sure to not cross a block boundary */
+	PVR_ASSERT(( ((uiDstLogicalOffset & (uiDstPMRPageSize-1)) + sizeof(IMG_UINT32))
+			<= uiDstPMRPageSize));
+
+
+	eError = PMRLockSysPhysAddresses(psSrcPMR);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Get the symbolic address of the source PMR */
+	eError = PMR_PDumpSymbolicAddr(psSrcPMR,
+	                               uiSrcLogicalOffset,
+	                               sizeof(aszMemspaceName),
+	                               &aszMemspaceName[0],
+	                               sizeof(aszSymbolicName),
+	                               &aszSymbolicName[0],
+	                               &uiPDumpSymbolicOffset,
+	                               &uiNextSymName);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Issue PDump read command */
+	eError = PDumpPMRRDW64MemToInternalVar(pszTmpVar,
+	                                       aszMemspaceName,
+	                                       aszSymbolicName,
+	                                       uiPDumpSymbolicOffset,
+	                                       uiPDumpFlags);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	eError = PMRUnlockSysPhysAddresses(psSrcPMR);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+
+
+	eError = PMRLockSysPhysAddresses(psDstPMR);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+
+	/* Get the symbolic address of the destination PMR */
+	eError = PMR_PDumpSymbolicAddr(psDstPMR,
+	                               uiDstLogicalOffset,
+	                               sizeof(aszMemspaceName),
+	                               &aszMemspaceName[0],
+	                               sizeof(aszSymbolicName),
+	                               &aszSymbolicName[0],
+	                               &uiPDumpSymbolicOffset,
+	                               &uiNextSymName);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+
+	/* Write the WRW script command */
+	eError = PDumpPMRWRW64InternalVarToMem(aszMemspaceName,
+	                                       aszSymbolicName,
+	                                       uiPDumpSymbolicOffset,
+	                                       pszTmpVar,
+	                                       uiPDumpFlags);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+
+	eError = PMRUnlockSysPhysAddresses(psDstPMR);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	return PVRSRV_OK;
+}
+
+/*!
+ * @brief PDumps the contents of the given allocation.
+ * If bZero is IMG_TRUE then the zero page in the parameter stream is used
+ * as the source of data, rather than the allocation's actual backing.
+ * @param psPMR - PMR object representing allocation
+ * @param uiLogicalOffset - Offset to write at
+ * @param uiSize - Number of bytes to write
+ * @param uiPDumpFlags - PDump flags
+ * @param bZero - Use the PDump zero page as the source
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMem(PMR *psPMR,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_DEVMEM_SIZE_T uiSize,
+                PDUMP_FLAGS_T uiPDumpFlags,
+                IMG_BOOL bZero)
+{
+	PVRSRV_ERROR eError;
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiOutOffset;
+	IMG_DEVMEM_OFFSET_T uiCurrentOffset = uiLogicalOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName = 0;
+	const IMG_CHAR *pszParamStreamFileName;
+	PDUMP_FILEOFFSET_T uiParamStreamFileOffset;
+
+	/* required when !bZero */
+#define PMR_MAX_PDUMP_BUFSZ (1<<21)
+	IMG_CHAR aszParamStreamFilename[PDUMP_PARAM_MAX_FILE_NAME];
+	IMG_UINT8 *pcBuffer = NULL;
+	size_t uiBufSz;
+	IMG_BOOL bValid;
+	IMG_DEVMEM_SIZE_T uiSizeRemain = uiSize;
+
+	PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize);
+
+	/* Get the correct PDump stream file name */
+	if (bZero)
+	{
+		PDumpCommentWithFlags(uiPDumpFlags,
+		                      "Zeroing allocation (" IMG_DEVMEM_SIZE_FMTSPEC " bytes)",
+		                      uiSize);
+
+		/* get the zero page information. it is constant for this function */
+		PDumpGetParameterZeroPageInfo(&uiParamStreamFileOffset,
+		                              &uiBufSz,
+		                              &pszParamStreamFileName);
+	}
+	else
+	{
+
+		uiBufSz = 1 << PMR_GetLog2Contiguity(psPMR);
+		PVR_ASSERT((1 << PMR_GetLog2Contiguity(psPMR)) <= PMR_MAX_PDUMP_BUFSZ);
+
+		pcBuffer = OSAllocMem(uiBufSz);
+
+		PVR_LOGR_IF_NOMEM(pcBuffer, "OSAllocMem");
+
+		eError = PMRLockSysPhysAddresses(psPMR);
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		pszParamStreamFileName = aszParamStreamFilename;
+	}
+
+	/* Loop over all touched symbolic addresses of the PMR and
+	 * emit LDBs to load the contents. */
+	while (uiCurrentOffset < (uiLogicalOffset + uiSize))
+	{
+		/* Get the correct symbolic name for the current offset */
+		eError = PMR_PDumpSymbolicAddr(psPMR,
+		                               uiCurrentOffset,
+		                               sizeof(aszMemspaceName),
+		                               &aszMemspaceName[0],
+		                               sizeof(aszSymbolicName),
+		                               &aszSymbolicName[0],
+		                               &uiOutOffset,
+		                               &uiNextSymName);
+		PVR_ASSERT(eError == PVRSRV_OK);
+		PVR_ASSERT((uiNextSymName - uiCurrentOffset) <= uiBufSz);
+
+		PMR_IsOffsetValid(psPMR,
+		                  0,
+		                  1,
+		                  uiCurrentOffset,
+		                  &bValid);
+
+		/* Either just LDB the zeros or read from the PMR and store that
+		 * in the pdump stream */
+		if (bValid)
+		{
+			size_t uiNumBytes;
+
+			if (bZero)
+			{
+				uiNumBytes = MIN(uiSizeRemain, uiNextSymName - uiCurrentOffset);
+			}
+			else
+			{
+				IMG_DEVMEM_OFFSET_T uiReadOffset;
+				uiReadOffset = ((uiNextSymName > (uiLogicalOffset + uiSize)) ?
+						uiLogicalOffset + uiSize - uiCurrentOffset :
+						uiNextSymName - uiCurrentOffset);
+
+				eError = PMR_ReadBytes(psPMR,
+				                       uiCurrentOffset,
+				                       pcBuffer,
+				                       uiReadOffset,
+				                       &uiNumBytes);
+				PVR_ASSERT(eError == PVRSRV_OK);
+
+				eError = PDumpWriteParameterBlob(pcBuffer,
+				                          uiNumBytes,
+				                          uiPDumpFlags,
+				                          &aszParamStreamFilename[0],
+				                          sizeof(aszParamStreamFilename),
+				                          &uiParamStreamFileOffset);
+				if (eError == PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+				{
+					/* Write to parameter file prevented under the flags and
+					 * current state of the driver so skip further writes.
+					 */
+					eError = PVRSRV_OK;
+				}
+				else if (eError != PVRSRV_OK)
+				{
+					PDUMP_ERROR(eError, "Failed to write PMR memory to parameter file");
+				}
+			}
+
+			/* Emit the LDB command to the current symbolic address*/
+			eError = PDumpPMRLDB(aszMemspaceName,
+			                     aszSymbolicName,
+			                     uiOutOffset,
+			                     uiNumBytes,
+			                     pszParamStreamFileName,
+			                     uiParamStreamFileOffset,
+			                     uiPDumpFlags);
+			uiSizeRemain = uiSizeRemain - uiNumBytes;
+		}
+		uiCurrentOffset = uiNextSymName;
+	}
+
+	if (!bZero)
+	{
+		eError = PMRUnlockSysPhysAddresses(psPMR);
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		OSFreeMem(pcBuffer);
+	}
+
+	return PVRSRV_OK;
+}
+
+
+
+PVRSRV_ERROR
+PMRPDumpSaveToFile(const PMR *psPMR,
+                   IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   IMG_UINT32 uiArraySize,
+                   const IMG_CHAR *pszFilename,
+                   IMG_UINT32 uiFileOffset)
+{
+	PVRSRV_ERROR eError;
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiOutOffset;
+	IMG_DEVMEM_OFFSET_T uiCurrentOffset = uiLogicalOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName = 0;
+	IMG_UINT32 uiCurrentFileOffset = uiFileOffset;
+
+	PVR_UNREFERENCED_PARAMETER(uiArraySize);
+
+	PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize);
+
+	while (uiCurrentOffset < (uiLogicalOffset + uiSize))
+	{
+		IMG_DEVMEM_OFFSET_T uiReadOffset;
+
+		eError = PMR_PDumpSymbolicAddr(psPMR,
+		                               uiCurrentOffset,
+		                               sizeof(aszMemspaceName),
+		                               &aszMemspaceName[0],
+		                               sizeof(aszSymbolicName),
+		                               &aszSymbolicName[0],
+		                               &uiOutOffset,
+		                               &uiNextSymName);
+		PVR_ASSERT(eError == PVRSRV_OK);
+		PVR_ASSERT(uiNextSymName <= psPMR->uiLogicalSize);
+
+		uiReadOffset = ((uiNextSymName > (uiLogicalOffset + uiSize)) ?
+				uiLogicalOffset + uiSize - uiCurrentOffset :
+				uiNextSymName - uiCurrentOffset);
+
+		eError = PDumpPMRSAB(aszMemspaceName,
+		                     aszSymbolicName,
+		                     uiOutOffset,
+		                     uiReadOffset,
+		                     pszFilename,
+		                     uiCurrentFileOffset);
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		uiCurrentFileOffset += uiNextSymName - uiCurrentOffset;
+		uiCurrentOffset = uiNextSymName;
+	}
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRPDumpPol32(const PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT32 ui32Value,
+              IMG_UINT32 ui32Mask,
+              PDUMP_POLL_OPERATOR eOperator,
+              PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiPDumpOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+	IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee;
+
+	/* Make sure to not cross a block boundary */
+	PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value))
+			<= uiPMRPageSize));
+
+	eError = PMR_PDumpSymbolicAddr(psPMR,
+	                               uiLogicalOffset,
+	                               sizeof(aszMemspaceName),
+	                               &aszMemspaceName[0],
+	                               sizeof(aszSymbolicName),
+	                               &aszSymbolicName[0],
+	                               &uiPDumpOffset,
+	                               &uiNextSymName);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+#define _MEMPOLL_DELAY		(1000)
+#define _MEMPOLL_COUNT		(2000000000 / _MEMPOLL_DELAY)
+
+	eError = PDumpPMRPOL(aszMemspaceName,
+	                     aszSymbolicName,
+	                     uiPDumpOffset,
+	                     ui32Value,
+	                     ui32Mask,
+	                     eOperator,
+	                     _MEMPOLL_COUNT,
+	                     _MEMPOLL_DELAY,
+	                     uiPDumpFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	return PVRSRV_OK;
+
+	/*
+      error exit paths follow
+	 */
+
+	e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+PMRPDumpCBP(const PMR *psPMR,
+            IMG_DEVMEM_OFFSET_T uiReadOffset,
+            IMG_DEVMEM_OFFSET_T uiWriteOffset,
+            IMG_DEVMEM_SIZE_T uiPacketSize,
+            IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+	PVRSRV_ERROR eError;
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiPDumpOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+	eError = PMR_PDumpSymbolicAddr(psPMR,
+	                               uiReadOffset,
+	                               sizeof(aszMemspaceName),
+	                               &aszMemspaceName[0],
+	                               sizeof(aszSymbolicName),
+	                               &aszSymbolicName[0],
+	                               &uiPDumpOffset,
+	                               &uiNextSymName);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	eError = PDumpPMRCBP(aszMemspaceName,
+	                     aszSymbolicName,
+	                     uiPDumpOffset,
+	                     uiWriteOffset,
+	                     uiPacketSize,
+	                     uiBufferSize);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	return PVRSRV_OK;
+
+	/*
+      error exit paths follow
+	 */
+
+	e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+void
+PDumpPMRChangeSparsePMR(PMR *psPMR,
+                        IMG_UINT32 uiBlockSize,
+                        IMG_UINT32 ui32AllocPageCount,
+                        IMG_UINT32 *pai32AllocIndices,
+                        IMG_UINT32 ui32FreePageCount,
+                        IMG_UINT32 *pai32FreeIndices,
+                        IMG_BOOL bInitialise,
+                        IMG_UINT32 ui32InitValue,
+                        IMG_HANDLE *phPDumpAllocInfoOut)
+{
+	PVRSRV_ERROR eError;
+	IMG_HANDLE *phPDumpAllocInfo = (IMG_HANDLE*) psPMR->hPDumpAllocHandle;
+
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+	IMG_UINT32 i, uiIndex;
+
+	/* Remove pages from the PMR */
+	for (i = 0; i < ui32FreePageCount; i++)
+	{
+		uiIndex = pai32FreeIndices[i];
+
+		eError = PDumpFree(phPDumpAllocInfo[uiIndex]);
+		PVR_ASSERT(eError == PVRSRV_OK);
+		phPDumpAllocInfo[uiIndex] = NULL;
+	}
+
+	/* Add new pages to the PMR */
+	for (i = 0; i < ui32AllocPageCount; i++)
+	{
+		uiIndex = pai32AllocIndices[i];
+
+		PVR_ASSERT(phPDumpAllocInfo[uiIndex] == NULL);
+
+		eError = PMR_PDumpSymbolicAddr(psPMR,
+		                               uiIndex * uiBlockSize,
+		                               sizeof(aszMemspaceName),
+		                               &aszMemspaceName[0],
+		                               sizeof(aszSymbolicName),
+		                               &aszSymbolicName[0],
+		                               &uiOffset,
+		                               &uiNextSymName);
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		eError = PDumpMalloc(aszMemspaceName,
+		                     aszSymbolicName,
+		                     uiBlockSize,
+		                     uiBlockSize,
+		                     bInitialise,
+		                     ui32InitValue,
+		                     &phPDumpAllocInfo[uiIndex],
+		                     PDUMP_NONE);
+		PVR_ASSERT(eError == PVRSRV_OK);
+	}
+
+	/* (IMG_HANDLE) <- (IMG_HANDLE*) */
+	*phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo;
+}
+
+void
+PDumpPMRFreePMR(PMR *psPMR,
+                IMG_DEVMEM_SIZE_T uiSize,
+                IMG_DEVMEM_ALIGN_T uiBlockSize,
+                IMG_UINT32 uiLog2Contiguity,
+                IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 i;
+
+	/* (IMG_HANDLE*) <- (IMG_HANDLE) */
+	IMG_HANDLE *ahPDumpAllocHandleArray = (IMG_HANDLE*) hPDumpAllocationInfoHandle;
+
+	for (i = 0; i < psPMR->uiNumPDumpBlocks; i++)
+	{
+		if (ahPDumpAllocHandleArray[i] != NULL)
+		{
+			eError = PDumpFree(ahPDumpAllocHandleArray[i]);
+			PVR_ASSERT(eError == PVRSRV_OK);
+			ahPDumpAllocHandleArray[i] = NULL;
+		}
+	}
+
+	OSFreeMem(ahPDumpAllocHandleArray);
+}
+
+
+void
+PDumpPMRMallocPMR(PMR *psPMR,
+                  IMG_DEVMEM_SIZE_T uiSize,
+                  IMG_DEVMEM_ALIGN_T uiBlockSize,
+                  IMG_UINT32 ui32ChunkSize,
+                  IMG_UINT32 ui32NumPhysChunks,
+                  IMG_UINT32 ui32NumVirtChunks,
+                  IMG_UINT32 *puiMappingTable,
+                  IMG_UINT32 uiLog2Contiguity,
+                  IMG_BOOL bInitialise,
+                  IMG_UINT32 ui32InitValue,
+                  IMG_HANDLE *phPDumpAllocInfoOut,
+                  IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	IMG_HANDLE *phPDumpAllocInfo;
+
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+	IMG_UINT32 uiNumPhysBlocks;
+	IMG_UINT32 uiNumVirtBlocks;
+	IMG_UINT32 i, uiIndex;
+
+
+	if (PMR_IsSparse(psPMR))
+	{
+		uiNumPhysBlocks = (ui32ChunkSize * ui32NumPhysChunks) >> uiLog2Contiguity;
+		/* Make sure we did not cut off anything */
+		PVR_ASSERT(uiNumPhysBlocks << uiLog2Contiguity == (ui32ChunkSize * ui32NumPhysChunks));
+	}
+	else
+	{
+		uiNumPhysBlocks = uiSize >> uiLog2Contiguity;
+		/* Make sure we did not cut off anything */
+		PVR_ASSERT(uiNumPhysBlocks << uiLog2Contiguity == uiSize);
+	}
+
+	uiNumVirtBlocks = uiSize >> uiLog2Contiguity;
+	PVR_ASSERT(uiNumVirtBlocks << uiLog2Contiguity == uiSize);
+
+	psPMR->uiNumPDumpBlocks = uiNumVirtBlocks;
+
+	phPDumpAllocInfo = (IMG_HANDLE*) OSAllocZMem(uiNumVirtBlocks * sizeof(IMG_HANDLE));
+
+
+	for (i = 0; i < uiNumPhysBlocks; i++)
+	{
+		uiIndex = PMR_IsSparse(psPMR) ? puiMappingTable[i] : i;
+
+		eError = PMR_PDumpSymbolicAddr(psPMR,
+		                               uiIndex * uiBlockSize,
+		                               sizeof(aszMemspaceName),
+		                               &aszMemspaceName[0],
+		                               sizeof(aszSymbolicName),
+		                               &aszSymbolicName[0],
+		                               &uiOffset,
+		                               &uiNextSymName);
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		eError = PDumpMalloc(aszMemspaceName,
+		                     aszSymbolicName,
+		                     uiBlockSize,
+		                     uiBlockSize,
+		                     bInitialise,
+		                     ui32InitValue,
+		                     &phPDumpAllocInfo[uiIndex],
+		                     ui32PDumpFlags);
+		PVR_ASSERT(eError == PVRSRV_OK);
+	}
+
+	/* (IMG_HANDLE) <- (IMG_HANDLE*) */
+	*phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo;
+
+}
+#endif	/* PDUMP */
+
+
+void *PMRGetPrivateData(const PMR *psPMR,
+                        const PMR_IMPL_FUNCTAB *psFuncTab)
+{
+	return (psFuncTab == psPMR->psFuncTab) ? psPMR->pvFlavourData : NULL;
+}
+
+#define PMR_PM_WORD_SIZE 4
+
+PVRSRV_ERROR
+PMRWritePMPageList(/* Target PMR, offset, and length */
+		PMR *psPageListPMR,
+		IMG_DEVMEM_OFFSET_T uiTableOffset,
+		IMG_DEVMEM_SIZE_T  uiTableLength,
+		/* Referenced PMR, and "page" granularity */
+		PMR *psReferencePMR,
+		IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize,
+		PMR_PAGELIST **ppsPageList)
+{
+	PVRSRV_ERROR eError;
+	IMG_DEVMEM_SIZE_T uiWordSize;
+	IMG_UINT32 uiNumPages;
+	IMG_UINT32 uiPageIndex;
+	PMR_FLAGS_T uiFlags = psPageListPMR->uiFlags;
+	PMR_PAGELIST *psPageList;
+#if defined(PDUMP)
+	IMG_CHAR aszTableEntryMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszTableEntrySymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiTableEntryPDumpOffset;
+	IMG_CHAR aszPageMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszPageSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiPagePDumpOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+#endif
+#if !defined(NO_HARDWARE)
+	IMG_UINT32 uiPageListPageSize = 1 << psPageListPMR->uiLog2ContiguityGuarantee;
+	IMG_UINT64 uiPageListPMRPage = 0;
+	IMG_UINT64 uiPrevPageListPMRPage = 0;
+	IMG_HANDLE hPrivData = NULL;
+	void *pvKernAddr = NULL;
+	IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_DEV_PHYADDR *pasDevAddrPtr;
+	IMG_UINT32 *pui32DataPtr = NULL;
+	IMG_BOOL *pbPageIsValid;
+#endif
+
+	uiWordSize = PMR_PM_WORD_SIZE;
+
+	/* check we're being asked to write the same number of 4-byte units as there are pages */
+	uiNumPages = (IMG_UINT32)(psReferencePMR->uiLogicalSize >> uiLog2PageSize);
+
+	if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psReferencePMR->uiLogicalSize)
+	{
+		/* Strictly speaking, it's possible to provoke this error in two ways:
+			(i) if it's not a whole multiple of the page size; or
+			(ii) if there are more than 4 billion pages.
+			The latter is unlikely. :) but the check is required in order to justify the cast.
+		 */
+		eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+		goto return_error;
+	}
+	uiWordSize = (IMG_UINT32)uiTableLength / uiNumPages;
+	if (uiNumPages * uiWordSize != uiTableLength)
+	{
+		eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+		goto return_error;
+	}
+
+	/* Check we're not being asked to write off the end of the PMR */
+	if (uiTableOffset + uiTableLength > psPageListPMR->uiLogicalSize)
+	{
+		/* table memory insufficient to store all the entries */
+		/* table insufficient to store addresses of whole block */
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto return_error;
+	}
+
+	/* the PMR into which we are writing must not be user CPU mappable: */
+	if (PVRSRV_CHECK_CPU_READABLE(uiFlags) || PVRSRV_CHECK_CPU_WRITEABLE(uiFlags))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "Masked flags = 0x%" PVRSRV_MEMALLOCFLAGS_FMTSPEC,
+		         (uiFlags & (PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE))));
+		PVR_DPF((PVR_DBG_ERROR,
+		         "Page list PMR allows CPU mapping (0x%" PVRSRV_MEMALLOCFLAGS_FMTSPEC ")",
+		         uiFlags));
+		eError = PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS;
+		goto return_error;
+	}
+
+	if (_PMRIsSparse(psPageListPMR))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PageList PMR is sparse"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto return_error;
+	}
+
+	if (_PMRIsSparse(psReferencePMR))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Reference PMR is sparse"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto return_error;
+	}
+
+	psPageList = OSAllocMem(sizeof(PMR_PAGELIST));
+	if (psPageList == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR page list"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto return_error;
+	}
+	psPageList->psReferencePMR = psReferencePMR;
+
+	/* Need to lock down the physical addresses of the reference PMR */
+	/* N.B.  This also checks that the requested "contiguity" is achievable */
+	eError = PMRLockSysPhysAddresses(psReferencePMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto free_page_list;
+	}
+
+#if !defined(NO_HARDWARE)
+	if (uiNumPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+	{
+		pasDevAddrPtr = OSAllocMem(uiNumPages * sizeof(IMG_DEV_PHYADDR));
+		if (pasDevAddrPtr == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR page list"));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto unlock_phys_addrs;
+		}
+
+		pbPageIsValid = OSAllocMem(uiNumPages * sizeof(IMG_BOOL));
+		if (pbPageIsValid == NULL)
+		{
+			/* Clean-up before exit */
+			OSFreeMem(pasDevAddrPtr);
+
+			PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR page state"));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto free_devaddr_array;
+		}
+	}
+	else
+	{
+		pasDevAddrPtr = asDevPAddr;
+		pbPageIsValid = abValid;
+	}
+
+	eError = PMR_DevPhysAddr(psReferencePMR, uiLog2PageSize, uiNumPages, 0,
+	                         pasDevAddrPtr, pbPageIsValid);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to map PMR pages into device physical addresses"));
+		goto free_valid_array;
+	}
+#endif
+
+	for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++)
+	{
+		IMG_DEVMEM_OFFSET_T uiPMROffset = uiTableOffset + (uiWordSize * uiPageIndex);
+#if defined(PDUMP)
+		eError = PMR_PDumpSymbolicAddr(psPageListPMR,
+		                               uiPMROffset,
+		                               sizeof(aszTableEntryMemspaceName),
+		                               &aszTableEntryMemspaceName[0],
+		                               sizeof(aszTableEntrySymbolicName),
+		                               &aszTableEntrySymbolicName[0],
+		                               &uiTableEntryPDumpOffset,
+		                               &uiNextSymName);
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		eError = PMR_PDumpSymbolicAddr(psReferencePMR,
+		                               (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize,
+		                               sizeof(aszPageMemspaceName),
+		                               &aszPageMemspaceName[0],
+		                               sizeof(aszPageSymbolicName),
+		                               &aszPageSymbolicName[0],
+		                               &uiPagePDumpOffset,
+		                               &uiNextSymName);
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		eError = PDumpWriteShiftedMaskedValue(/* destination */
+				aszTableEntryMemspaceName,
+				aszTableEntrySymbolicName,
+				uiTableEntryPDumpOffset,
+				/* source */
+				aszPageMemspaceName,
+				aszPageSymbolicName,
+				uiPagePDumpOffset,
+				/* shift right */
+				uiLog2PageSize,
+				/* shift left */
+				0,
+				/* mask */
+				0xffffffff,
+				/* word size */
+				uiWordSize,
+				/* flags */
+				PDUMP_FLAGS_CONTINUOUS);
+		PVR_ASSERT(eError == PVRSRV_OK);
+#else
+		PVR_UNREFERENCED_PARAMETER(uiPMROffset);
+#endif
+#if !defined(NO_HARDWARE)
+
+		/*
+			We check for sparse PMR's at function entry, but as we can,
+			check that every page is valid
+		 */
+		PVR_ASSERT(pbPageIsValid[uiPageIndex]);
+		PVR_ASSERT(pasDevAddrPtr[uiPageIndex].uiAddr != 0);
+		PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0);
+
+		uiPageListPMRPage = uiPMROffset >> psReferencePMR->uiLog2ContiguityGuarantee;
+
+		if ((pui32DataPtr == NULL) || (uiPageListPMRPage != uiPrevPageListPMRPage))
+		{
+			size_t uiMappingOffset = uiPMROffset & (~(uiPageListPageSize - 1));
+			size_t uiMappedSize;
+
+			/* If we already had a page list mapped, we need to unmap it... */
+			if (pui32DataPtr != NULL)
+			{
+				PMRReleaseKernelMappingData(psPageListPMR, hPrivData);
+			}
+
+			eError = PMRAcquireKernelMappingData(psPageListPMR,
+			                                     uiMappingOffset,
+			                                     uiPageListPageSize,
+			                                     &pvKernAddr,
+			                                     &uiMappedSize,
+			                                     &hPrivData);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "Error mapping page list PMR page (%" IMG_UINT64_FMTSPEC ") into kernel (%d)",
+						uiPageListPMRPage, eError));
+				goto free_valid_array;
+			}
+
+			uiPrevPageListPMRPage = uiPageListPMRPage;
+			PVR_ASSERT(uiMappedSize >= uiPageListPageSize);
+			PVR_ASSERT(pvKernAddr != NULL);
+
+			pui32DataPtr = (IMG_UINT32 *) (((IMG_CHAR *) pvKernAddr) + (uiPMROffset & (uiPageListPageSize - 1)));
+		}
+
+		PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0);
+
+		/* Write the physical page index into the page list PMR */
+		*pui32DataPtr++ = TRUNCATE_64BITS_TO_32BITS(pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize);
+
+		/* Last page so unmap */
+		if (uiPageIndex == (uiNumPages - 1))
+		{
+			PMRReleaseKernelMappingData(psPageListPMR, hPrivData);
+		}
+#endif
+	}
+
+#if !defined(NO_HARDWARE)
+	if (pasDevAddrPtr != asDevPAddr)
+	{
+		OSFreeMem(pbPageIsValid);
+		OSFreeMem(pasDevAddrPtr);
+	}
+#endif
+	*ppsPageList = psPageList;
+	return PVRSRV_OK;
+
+	/*
+      error exit paths follow
+	 */
+#if !defined(NO_HARDWARE)
+
+free_valid_array:
+	if (pbPageIsValid != abValid)
+	{
+		OSFreeMem(pbPageIsValid);
+	}
+
+free_devaddr_array:
+	if (pasDevAddrPtr != asDevPAddr)
+	{
+		OSFreeMem(pasDevAddrPtr);
+	}
+
+unlock_phys_addrs:
+	PMRUnlockSysPhysAddresses(psReferencePMR);
+#endif
+
+free_page_list:
+	OSFreeMem(psPageList);
+
+return_error:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+PVRSRV_ERROR /* FIXME: should be void */
+PMRUnwritePMPageList(PMR_PAGELIST *psPageList)
+{
+	PVRSRV_ERROR eError2;
+
+	eError2 = PMRUnlockSysPhysAddresses(psPageList->psReferencePMR);
+	PVR_ASSERT(eError2 == PVRSRV_OK);
+	OSFreeMem(psPageList);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRZeroingPMR(PMR *psPMR,
+              IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize)
+{
+	IMG_UINT32 uiNumPages;
+	IMG_UINT32 uiPageIndex;
+	IMG_UINT32 ui32PageSize = 1 << uiLog2PageSize;
+	IMG_HANDLE hPrivData = NULL;
+	void *pvKernAddr = NULL;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	size_t uiMapedSize;
+
+	PVR_ASSERT(psPMR);
+
+	/* Calculate number of pages in this PMR */
+	uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize);
+
+	/* Verify the logical Size is a multiple or the physical page size */
+	if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: PMR is not a multiple of %u",ui32PageSize));
+		eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+		goto MultiPage_Error;
+	}
+
+	if (_PMRIsSparse(psPMR))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: PMR is sparse"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto Sparse_Error;
+	}
+
+	/* Scan through all pages of the PMR */
+	for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++)
+	{
+		/* map the physical page (for a given PMR offset) into kernel space */
+		eError = PMRAcquireKernelMappingData(psPMR,
+		                                     (size_t)uiPageIndex << uiLog2PageSize,
+		                                     ui32PageSize,
+		                                     &pvKernAddr,
+		                                     &uiMapedSize,
+		                                     &hPrivData);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: AcquireKernelMapping failed with error %u", eError));
+			goto AcquireKernelMapping_Error;
+		}
+
+		/* ensure the mapped page size is the same as the physical page size */
+		if (uiMapedSize != ui32PageSize)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: Physical Page size = 0x%08x, Size of Mapping = 0x%016" IMG_UINT64_FMTSPECx,
+					ui32PageSize,
+					(IMG_UINT64)uiMapedSize));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto MappingSize_Error;
+		}
+
+		/* Use the conservative 'DeviceMemSet' here because we can't know
+		 * if this PMR will be mapped cached.
+		 */
+
+		OSDeviceMemSet(pvKernAddr, 0, ui32PageSize);
+
+		/* release mapping */
+		PMRReleaseKernelMappingData(psPMR, hPrivData);
+
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE,"PMRZeroingPMR: Zeroing PMR %p done (num pages %u, page size %u)",
+			psPMR,
+			uiNumPages,
+			ui32PageSize));
+
+	return PVRSRV_OK;
+
+
+	/* Error handling */
+
+	MappingSize_Error:
+	PMRReleaseKernelMappingData(psPMR, hPrivData);
+
+	AcquireKernelMapping_Error:
+	Sparse_Error:
+	MultiPage_Error:
+
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+PMRDumpPageList(PMR *psPMR,
+                IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize)
+{
+	IMG_DEV_PHYADDR sDevAddrPtr;
+	IMG_UINT32 uiNumPages;
+	IMG_UINT32 uiPageIndex;
+	IMG_BOOL bPageIsValid;
+	IMG_UINT32 ui32Col = 16;
+	IMG_UINT32 ui32SizePerCol = 11;
+	IMG_UINT32 ui32ByteCount = 0;
+	IMG_CHAR pszBuffer[16 /* ui32Col */ * 11 /* ui32SizePerCol */ + 1];
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* Get number of pages */
+	uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize);
+
+	/* Verify the logical Size is a multiple or the physical page size */
+	if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PMRPrintPageList: PMR is not a multiple of %u", 1 << uiLog2PageSize));
+		eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+		goto MultiPage_Error;
+	}
+
+	if (_PMRIsSparse(psPMR))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PMRPrintPageList: PMR is sparse"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto Sparse_Error;
+	}
+
+	PVR_LOG(("    PMR %p, Number of pages %u, Log2PageSize %d", psPMR, uiNumPages, uiLog2PageSize));
+
+	/* Print the address of the physical pages */
+	for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++)
+	{
+		/* Get Device physical Address */
+		eError = PMR_DevPhysAddr(psPMR,
+		                         uiLog2PageSize,
+		                         1,
+		                         (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize,
+		                         &sDevAddrPtr,
+		                         &bPageIsValid);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PMRPrintPageList: PMR %p failed to get DevPhysAddr with error %u",
+					psPMR,
+					eError));
+			goto DevPhysAddr_Error;
+		}
+
+		ui32ByteCount += OSSNPrintf(pszBuffer + ui32ByteCount, ui32SizePerCol + 1, "%08x ", (IMG_UINT32)(sDevAddrPtr.uiAddr >> uiLog2PageSize));
+		PVR_ASSERT(ui32ByteCount < ui32Col * ui32SizePerCol);
+
+		if (uiPageIndex % ui32Col == ui32Col-1)
+		{
+			PVR_LOG(("      Phys Page: %s", pszBuffer));
+			ui32ByteCount = 0;
+		}
+	}
+	if (ui32ByteCount > 0)
+	{
+		PVR_LOG(("      Phys Page: %s", pszBuffer));
+	}
+
+	return PVRSRV_OK;
+
+	/* Error handling */
+	DevPhysAddr_Error:
+	Sparse_Error:
+	MultiPage_Error:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+PMRInit(void)
+{
+	PVRSRV_ERROR eError;
+
+	if (_gsSingletonPMRContext.bModuleInitialised)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Error: Singleton PMR context already initialized", __func__));
+		eError = PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR;
+		goto out;
+	}
+
+	eError = OSLockCreate(&_gsSingletonPMRContext.hLock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Error: failed to create lock", __func__));
+		goto out;
+	}
+
+	_gsSingletonPMRContext.uiNextSerialNum = 1;
+
+	_gsSingletonPMRContext.uiNextKey = 0x8300f001 * (uintptr_t)&_gsSingletonPMRContext;
+
+	_gsSingletonPMRContext.bModuleInitialised = IMG_TRUE;
+
+	_gsSingletonPMRContext.uiNumLivePMRs = 0;
+
+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS)
+	eError = MMapStatsInit();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: MMap stats initialisation failed", __func__));
+		goto out;
+	}
+#endif
+
+	out:
+	PVR_ASSERT(eError == PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+PMRDeInit(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		goto out;
+	}
+
+	if (!_gsSingletonPMRContext.bModuleInitialised)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Error: Singleton PMR context is not initialized", __func__));
+		eError = PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR;
+		goto out;
+	}
+
+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS)
+	MMapStatsDeInit();
+#endif
+
+	if (_gsSingletonPMRContext.uiNumLivePMRs != 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Error: %d live PMRs remain",
+				__func__,
+				_gsSingletonPMRContext.uiNumLivePMRs));
+		PVR_DPF((PVR_DBG_ERROR, "%s: This is an unrecoverable error; a subsequent crash is inevitable",
+				__func__));
+		eError = PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR;
+		goto out;
+	}
+
+	OSLockDestroy(_gsSingletonPMRContext.hLock);
+
+	_gsSingletonPMRContext.bModuleInitialised = IMG_FALSE;
+
+	/*
+      FIXME:
+
+      should deinitialise the mutex here
+	 */
+	out:
+	PVR_ASSERT(eError == PVRSRV_OK);
+	return eError;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pmr.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pmr.h
new file mode 100644
index 0000000..c8ff307
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pmr.h
@@ -0,0 +1,1108 @@
+/**************************************************************************/ /*!
+@File
+@Title		Physmem (PMR) abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Part of the memory management.  This module is responsible for
+                the "PMR" abstraction.  A PMR (Physical Memory Resource)
+                represents some unit of physical memory which is
+                allocated/freed/mapped/unmapped as an indivisible unit
+                (higher software levels provide an abstraction above that
+                to deal with dividing this down into smaller manageable units).
+                Importantly, this module knows nothing of virtual memory, or
+                of MMUs etc., with one excusable exception.  We have the
+                concept of a "page size", which really means nothing in
+                physical memory, but represents a "contiguity quantum" such
+                that the higher level modules which map this memory are able
+                to verify that it matches the needs of the page size for the
+                virtual realm into which it is being mapped.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _SRVSRV_PMR_H_
+#define _SRVSRV_PMR_H_
+
+/* include/ */
+#include "img_types.h"
+#include "img_defs.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"			/* Required for export DEVMEM_EXPORTCOOKIE */
+
+/* services/include */
+#include "pdump.h"
+
+/* services/server/include/ */
+#include "pmr_impl.h"
+#include "physheap.h"
+#include "opaque_types.h"
+
+#define PMR_MAX_TRANSLATION_STACK_ALLOC				(32)
+
+/* Maximum number of pages a PMR can have is 1G of memory */
+ #define PMR_MAX_SUPPORTED_PAGE_COUNT				(262144)
+
+typedef IMG_UINT64 PMR_BASE_T;
+typedef IMG_UINT64 PMR_SIZE_T;
+#define PMR_SIZE_FMTSPEC "0x%010"IMG_UINT64_FMTSPECX
+#define PMR_VALUE32_FMTSPEC "0x%08X"
+#define PMR_VALUE64_FMTSPEC "0x%016"IMG_UINT64_FMTSPECX
+typedef IMG_UINT32 PMR_LOG2ALIGN_T;
+typedef IMG_UINT64 PMR_PASSWORD_T;
+
+struct _PMR_MAPPING_TABLE_
+{
+	PMR_SIZE_T	uiChunkSize;			/*!< Size of a "chunk" */
+	IMG_UINT32 	ui32NumPhysChunks;		/*!< Number of physical chunks that are valid */
+	IMG_UINT32 	ui32NumVirtChunks;		/*!< Number of virtual chunks in the mapping */
+	/* Must be last */
+	IMG_UINT32 	aui32Translation[1];    /*!< Translation mapping for "logical" to physical */
+};
+
+#define TRANSLATION_INVALID 0xFFFFFFFFUL
+
+typedef struct _PMR_EXPORT_ PMR_EXPORT;
+
+typedef struct _PMR_PAGELIST_ PMR_PAGELIST;
+
+//typedef struct _PVRSRV_DEVICE_NODE_ *PPVRSRV_DEVICE_NODE;
+
+/*
+ * PMRCreatePMR
+ *
+ * Not to be called directly, only via implementations of PMR
+ * factories, e.g. in physmem_osmem.c, deviceclass.c, etc.
+ *
+ * Creates a PMR object, with callbacks and private data as per the
+ * FuncTab/PrivData args.
+ *
+ * Note that at creation time the PMR must set in stone the "logical
+ * size" and the "contiguity guarantee"
+ *
+ * Flags are also set at this time.  (T.B.D.  flags also immutable for
+ * the life of the PMR?)
+ *
+ * Logical size is the amount of Virtual space this allocation would
+ * take up when mapped.  Note that this does not have to be the same
+ * as the actual physical size of the memory.  For example, consider
+ * the sparsely allocated non-power-of-2 texture case.  In this
+ * instance, the "logical size" would be the virtual size of the
+ * rounded-up power-of-2 texture.  That some pages of physical memory
+ * may not exist does not affect the logical size calculation.
+ *
+ * The PMR must also supply the "contiguity guarantee" which is the
+ * finest granularity of alignment and size of physical pages that the
+ * PMR will provide after LockSysPhysAddresses is called.  Note that
+ * the calling code may choose to call PMRSysPhysAddr with a finer
+ * granularity than this, for example if it were to map into a device
+ * MMU with a smaller page size, and it's also OK for the PMR to
+ * supply physical memory in larger chunks than this.  But
+ * importantly, never the other way around.
+ *
+ * More precisely, the following inequality must be maintained
+ * whenever mappings and/or physical addresses exist:
+ *
+ *       (device MMU page size) <= 2**(uiLog2ContiguityGuarantee) <= (actual contiguity of physical memory)
+ *
+ * The function table will contain the following callbacks which may
+ * be overridden by the PMR implementation:
+ *
+ * pfnLockPhysAddresses
+ *
+ *      Called when someone locks requests that Physical pages are to
+ *      be locked down via the PMRLockSysPhysAddresses() API.  Note
+ *      that if physical pages are prefaulted at PMR creation time and
+ *      therefore static, it would not be necessary to override this
+ *      function, in which case NULL may be supplied.
+ *
+ * pfnUnlockPhysAddresses
+ *
+ *      The reverse of pfnLockPhysAddresses.  Note that this should be
+ *      NULL if and only if pfnLockPhysAddresses is NULL
+ *
+ * pfnSysPhysAddr
+ *
+ *      This function is mandatory.  This is the one which returns the
+ *      system physical address for a given offset into this PMR.  The
+ *      "lock" function will have been called, if overridden, before
+ *      this function, thus the implementation should not increase any
+ *      refcount when answering this call.  Refcounting, if necessary,
+ *      should be done in the lock/unlock calls.  Refcounting would
+ *      not be necessary in the prefaulted/static scenario, as the
+ *      pmr.c abstraction will handle the refcounting for the whole
+ *      PMR.
+ *
+ * pfnFinalize
+ *
+ *      Called when the PMR's refcount reaches zero and it gets
+ *      destroyed.  This allows the implementation to free up any
+ *      resource acquired during creation time.
+ *
+ */
+extern PVRSRV_ERROR
+PMRCreatePMR(PPVRSRV_DEVICE_NODE psDevNode,
+             PHYS_HEAP *psPhysHeap,
+             PMR_SIZE_T uiLogicalSize,
+             PMR_SIZE_T uiChunkSize,
+             IMG_UINT32 ui32NumPhysChunks,
+             IMG_UINT32 ui32NumVirtChunks,
+             IMG_UINT32 *pui32MappingTable,
+             PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee,
+             PMR_FLAGS_T uiFlags,
+             const IMG_CHAR *pszAnnotation,
+             const PMR_IMPL_FUNCTAB *psFuncTab,
+             PMR_IMPL_PRIVDATA pvPrivData,
+             PMR_IMPL_TYPE eType,
+             PMR **ppsPMRPtr,
+             IMG_UINT32 ui32PDumpFlags);
+
+/*
+ * PMRLockSysPhysAddresses()
+ *
+ * Calls the relevant callback to lock down the system physical addresses of the memory that makes up the whole PMR.
+ *
+ * Before this call, it is not valid to use any of the information
+ * getting APIs: PMR_Flags(), PMR_SysPhysAddr(),
+ * [ see note below about lock/unlock semantics ]
+ *
+ * The caller of this function does not have to care about how the PMR
+ * is implemented.  He only has to know that he is allowed access to
+ * the physical addresses _after_ calling this function and _until_
+ * calling PMRUnlockSysPhysAddresses().
+ *
+ *
+ * Notes to callback implementers (authors of PMR Factories):
+ *
+ * Some PMR implementations will be such that the physical memory
+ * exists for the lifetime of the PMR, with a static address, (and
+ * normally flags and symbolic address are static too) and so it is
+ * legal for a PMR implementation to not provide an implementation for
+ * the lock callback.
+ *
+ * Some PMR implementation may wish to page memory in from secondary
+ * storage on demand.  The lock/unlock callbacks _may_ be the place to
+ * do this.  (more likely, there would be a separate API for doing
+ * this, but this API provides a useful place to assert that it has
+ * been done)
+ */
+
+extern PVRSRV_ERROR
+PMRLockSysPhysAddresses(PMR *psPMR);
+
+extern PVRSRV_ERROR
+PMRLockSysPhysAddressesNested(PMR *psPMR,
+                        IMG_UINT32 ui32NestingLevel);
+
+/*
+ * PMRUnlockSysPhysAddresses()
+ *
+ * the reverse of PMRLockSysPhysAddresses()
+ */
+extern PVRSRV_ERROR
+PMRUnlockSysPhysAddresses(PMR *psPMR);
+
+extern PVRSRV_ERROR
+PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel);
+
+
+/**************************************************************************/ /*!
+@Function       PMRUnpinPMR
+@Description    This is the counterpart to PMRPinPMR(). It is meant to be
+                called before repinning an allocation.
+
+                For a detailed description see client API documentation.
+
+@Input          psPMR           The physical memory to unpin.
+
+@Input          bDevMapped      A flag that indicates if this PMR has been
+                                mapped to device virtual space.
+                                Needed to check if this PMR is allowed to be
+                                unpinned or not.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the memory is
+                                registered to be reclaimed. Error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR PMRUnpinPMR(PMR *psPMR, IMG_BOOL bDevMapped);
+
+/**************************************************************************/ /*!
+@Function       PMRPinPMR
+@Description    This is the counterpart to PMRUnpinPMR(). It is meant to be
+                called after unpinning an allocation.
+
+                For a detailed description see client API documentation.
+
+@Input          psPMR           The physical memory to pin.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the allocation content
+                                was successfully restored.
+
+                                PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+                                could not be restored and new physical memory
+                                was allocated.
+
+                                A different error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR PMRPinPMR(PMR *psPMR);
+
+
+/*
+ * PhysmemPMRExport()
+ *
+ * Given a PMR, creates a PMR "Export", which is a handle that
+ * provides sufficient data to be able to "import" this PMR elsewhere.
+ * The PMR Export is an object in its own right, whose existence
+ * implies a reference on the PMR, thus the PMR cannot be destroyed
+ * while the PMR Export exists.  The intention is that the PMR Export
+ * will be wrapped in the devicemem layer by a cross process handle,
+ * and some IPC by which to communicate the handle value and password
+ * to other processes.  The receiving process is able to unwrap this
+ * to gain access to the same PMR Export in this layer, and, via
+ * PhysmemPMRImport(), obtain a reference to the original PMR.
+ *
+ * The caller receives, along with the PMR Export object, information
+ * about the size and contiguity guarantee for the PMR, and also the
+ * PMRs secret password, in order to authenticate the subsequent
+ * import.
+ *
+ * N.B.  If you call PMRExportPMR() (and it succeeds), you are
+ * promising to later call PMRUnexportPMR()
+ */
+extern PVRSRV_ERROR
+PMRExportPMR(PMR *psPMR,
+             PMR_EXPORT **ppsPMRExport,
+             PMR_SIZE_T *puiSize,
+             PMR_LOG2ALIGN_T *puiLog2Contig,
+             PMR_PASSWORD_T *puiPassword);
+
+/*!
+*******************************************************************************
+
+ @Function	PMRMakeLocalImportHandle
+
+ @Description
+
+ Transform a general handle type into one that we are able to import.
+ Takes a PMR reference.
+
+ @Input   psPMR     The input PMR.
+ @Output  ppsPMR    The output PMR that is going to be transformed to the
+                    correct handle type.
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+extern PVRSRV_ERROR
+PMRMakeLocalImportHandle(PMR *psPMR,
+                         PMR **ppsPMR);
+
+/*!
+*******************************************************************************
+
+ @Function	PMRUnmakeLocalImportHandle
+
+ @Description
+
+ Take a PMR, destroy the handle and release a reference.
+ Counterpart to PMRMakeServerExportClientExport().
+
+ @Input   psPMR       PMR to destroy.
+                      Created by PMRMakeLocalImportHandle().
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+extern PVRSRV_ERROR
+PMRUnmakeLocalImportHandle(PMR *psPMR);
+
+/*
+ * PMRUnexporPMRt()
+ *
+ * The reverse of PMRExportPMR().  This causes the PMR to no
+ * longer be exported.  If the PMR has already been imported, the
+ * imported PMR reference will still be valid, but no further imports
+ * will be possible.
+ */
+extern PVRSRV_ERROR
+PMRUnexportPMR(PMR_EXPORT *psPMRExport);
+
+/*
+ * PMRImportPMR()
+ *
+ * Takes a PMR Export object, as obtained by PMRExportPMR(), and
+ * obtains a reference to the original PMR.
+ *
+ * The password must match, and is assumed to have been (by whatever
+ * means, IPC etc.) preserved intact from the former call to
+ * PMRExportPMR()
+ *
+ * The size and contiguity arguments are entirely irrelevant for the
+ * import, however they are verified in order to trap bugs.
+ *
+ * N.B.  If you call PhysmemPMRImport() (and it succeeds), you are
+ * promising to later call PhysmemPMRUnimport()
+ */
+extern PVRSRV_ERROR
+PMRImportPMR(PMR_EXPORT *psPMRExport,
+             PMR_PASSWORD_T uiPassword,
+             PMR_SIZE_T uiSize,
+             PMR_LOG2ALIGN_T uiLog2Contig,
+             PMR **ppsPMR);
+
+/*
+ * PMRUnimportPMR()
+ *
+ * releases the reference on the PMR as obtained by PMRImportPMR()
+ */
+extern PVRSRV_ERROR
+PMRUnimportPMR(PMR *psPMR);
+
+PVRSRV_ERROR
+PMRLocalImportPMR(PMR *psPMR,
+				  PMR **ppsPMR,
+				  IMG_DEVMEM_SIZE_T *puiSize,
+				  IMG_DEVMEM_ALIGN_T *puiAlign);
+
+/*
+ * Equivalent mapping functions when in kernel mode - TOOD: should
+ * unify this and the PMRAcquireMMapArgs API with a suitable
+ * abstraction
+ */
+extern PVRSRV_ERROR
+PMRAcquireKernelMappingData(PMR *psPMR,
+                            size_t uiLogicalOffset,
+                            size_t uiSize,
+                            void **ppvKernelAddressOut,
+                            size_t *puiLengthOut,
+                            IMG_HANDLE *phPrivOut);
+
+extern PVRSRV_ERROR
+PMRAcquireSparseKernelMappingData(PMR *psPMR,
+                                  size_t uiLogicalOffset,
+                                  size_t uiSize,
+                                  void **ppvKernelAddressOut,
+                                  size_t *puiLengthOut,
+                                  IMG_HANDLE *phPrivOut);
+
+extern PVRSRV_ERROR
+PMRReleaseKernelMappingData(PMR *psPMR,
+                            IMG_HANDLE hPriv);
+
+#if defined(INTEGRITY_OS)
+extern PVRSRV_ERROR
+PMRMapMemoryObject(PMR *psPMR,
+                  IMG_HANDLE *phMemObj,
+                  void **pvClientAddr,
+                  IMG_HANDLE *phPrivOut);
+extern PVRSRV_ERROR
+PMRUnmapMemoryObject(PMR *psPMR,
+                     IMG_HANDLE hPriv);
+#endif
+
+/*
+ * PMR_ReadBytes()
+ *
+ * calls into the PMR implementation to read up to uiBufSz bytes,
+ * returning the actual number read in *puiNumBytes
+ *
+ * this will read up to the end of the PMR, or the next symbolic name
+ * boundary, or until the requested number of bytes is read, whichever
+ * comes first
+ *
+ * In the case of sparse PMR's the caller doesn't know what offsets are
+ * valid and which ones aren't so we will just write 0 to invalid offsets
+ */
+extern PVRSRV_ERROR
+PMR_ReadBytes(PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT8 *pcBuffer,
+              size_t uiBufSz,
+              size_t *puiNumBytes);
+
+/*
+ * PMR_WriteBytes()
+ *
+ * calls into the PMR implementation to write up to uiBufSz bytes,
+ * returning the actual number read in *puiNumBytes
+ *
+ * this will write up to the end of the PMR, or the next symbolic name
+ * boundary, or until the requested number of bytes is written, whichever
+ * comes first
+ *
+ * In the case of sparse PMR's the caller doesn't know what offsets are
+ * valid and which ones aren't so we will just ignore data at invalid offsets
+ */
+extern PVRSRV_ERROR
+PMR_WriteBytes(PMR *psPMR,
+			   IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+               IMG_UINT8 *pcBuffer,
+               size_t uiBufSz,
+               size_t *puiNumBytes);
+
+/**************************************************************************/ /*!
+@Function       PMRMMapPMR
+@Description    Performs the necessary steps to map the PMR into a user process
+                address space. The caller does not need to call
+                PMRLockSysPhysAddresses before calling this function.
+
+@Input          psPMR           PMR to map.
+
+@Input          pOSMMapData     OS specific data needed to create a mapping.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success or an error otherwise.
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData);
+
+/*
+ * PMRRefPMR()
+ *
+ * Take a reference on the passed in PMR
+ */
+extern void
+PMRRefPMR(PMR *psPMR);
+
+/*
+ * PMRUnrefPMR()
+ *
+ * This undoes a call to any of the PhysmemNew* family of APIs
+ * (i.e. any PMR factory "constructor")
+ *
+ * This relinquishes a reference to the PMR, and, where the refcount
+ * reaches 0, causes the PMR to be destroyed (calling the finalizer
+ * callback on the PMR, if there is one)
+ */
+extern PVRSRV_ERROR
+PMRUnrefPMR(PMR *psPMR);
+
+/*
+ * PMRUnrefUnlockPMR()
+ *
+ * Same as above but also unlocks the PMR.
+ */
+extern PVRSRV_ERROR
+PMRUnrefUnlockPMR(PMR *psPMR);
+
+extern PPVRSRV_DEVICE_NODE
+PMR_DeviceNode(const PMR *psPMR);
+
+/*
+ * PMRIsPMRLive()
+ *
+ * This function returns true if the PMR is in use and false otherwise.
+ * This function is not thread safe and hence the caller
+ * needs to ensure the thread safety by explicitly taking
+ * the lock on the PMR or through other means */
+IMG_BOOL  PMRIsPMRLive(PMR *psPMR);
+
+/*
+ * PMR_Flags()
+ *
+ * Flags are static and guaranteed for the life of the PMR.  Thus this
+ * function is idempotent and acquire/release semantics is not
+ * required.
+ *
+ * Returns the flags as specified on the PMR.  The flags are to be
+ * interpreted as mapping permissions
+ */
+extern PMR_FLAGS_T
+PMR_Flags(const PMR *psPMR);
+
+extern IMG_BOOL
+PMR_IsSparse(const PMR *psPMR);
+
+extern IMG_BOOL
+PMR_IsUnpinned(const PMR *psPMR);
+
+extern PVRSRV_ERROR
+PMR_LogicalSize(const PMR *psPMR,
+				IMG_DEVMEM_SIZE_T *puiLogicalSize);
+
+extern PVRSRV_ERROR
+PMR_PhysicalSize(const PMR *psPMR,
+				 IMG_DEVMEM_SIZE_T *puiPhysicalSize);
+
+extern PHYS_HEAP *
+PMR_PhysHeap(const PMR *psPMR);
+
+extern PMR_MAPPING_TABLE *
+PMR_GetMappigTable(const PMR *psPMR);
+
+extern IMG_UINT32
+PMR_GetLog2Contiguity(const PMR *psPMR);
+
+extern const IMG_CHAR *
+PMR_GetAnnotation(const PMR *psPMR);
+
+/*
+ * PMR_IsOffsetValid()
+ *
+ * Returns if an address offset inside a PMR has a valid
+ * physical backing.
+ */
+extern PVRSRV_ERROR
+PMR_IsOffsetValid(const PMR *psPMR,
+				IMG_UINT32 ui32Log2PageSize,
+				IMG_UINT32 ui32NumOfPages,
+				IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+				IMG_BOOL *pbValid);
+
+extern PMR_IMPL_TYPE
+PMR_GetType(const PMR *psPMR);
+
+/*
+ * PMR_SysPhysAddr()
+ *
+ * A note regarding Lock/Unlock semantics
+ * ======================================
+ *
+ * PMR_SysPhysAddr may only be called after PMRLockSysPhysAddresses()
+ * has been called.  The data returned may be used only until
+ * PMRUnlockSysPhysAddresses() is called after which time the licence
+ * to use the data is revoked and the information may be invalid.
+ *
+ * Given an offset, this function returns the device physical address of the
+ * corresponding page in the PMR.  It may be called multiple times
+ * until the address of all relevant pages has been determined.
+ *
+ * If caller only wants one physical address it is sufficient to pass in:
+ * ui32Log2PageSize==0 and ui32NumOfPages==1
+ */
+extern PVRSRV_ERROR
+PMR_DevPhysAddr(const PMR *psPMR,
+                IMG_UINT32 ui32Log2PageSize,
+                IMG_UINT32 ui32NumOfPages,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_DEV_PHYADDR *psDevAddr,
+                IMG_BOOL *pbValid);
+
+/*
+ * PMR_CpuPhysAddr()
+ *
+ * See note above about Lock/Unlock semantics.
+ *
+ * Given an offset, this function returns the CPU physical address of the
+ * corresponding page in the PMR.  It may be called multiple times
+ * until the address of all relevant pages has been determined.
+ *
+ */
+extern PVRSRV_ERROR
+PMR_CpuPhysAddr(const PMR *psPMR,
+                IMG_UINT32 ui32Log2PageSize,
+                IMG_UINT32 ui32NumOfPages,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_CPU_PHYADDR *psCpuAddrPtr,
+                IMG_BOOL *pbValid);
+
+PVRSRV_ERROR
+PMRGetUID(PMR *psPMR,
+          IMG_UINT64 *pui64UID);
+/*
+ * PMR_ChangeSparseMem()
+ *
+ * See note above about Lock/Unlock semantics.
+ *
+ * This function alters the memory map of the given PMR in device space by adding/deleting the pages
+ * as requested.
+ *
+ */
+PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR,
+                                 IMG_UINT32 ui32AllocPageCount,
+                                 IMG_UINT32 *pai32AllocIndices,
+                                 IMG_UINT32 ui32FreePageCount,
+                                 IMG_UINT32 *pai32FreeIndices,
+                                 IMG_UINT32	uiSparseFlags);
+
+/*
+ * PMR_ChangeSparseMemCPUMap()
+ *
+ * See note above about Lock/Unlock semantics.
+ *
+ * This function alters the memory map of the given PMR in CPU space by adding/deleting the pages
+ * as requested.
+ *
+ */
+PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR,
+                                       IMG_UINT64 sCpuVAddrBase,
+                                       IMG_UINT32 ui32AllocPageCount,
+                                       IMG_UINT32 *pai32AllocIndices,
+                                       IMG_UINT32 ui32FreePageCount,
+                                       IMG_UINT32 *pai32FreeIndices);
+
+#if defined(PDUMP)
+
+extern void
+PDumpPMRMallocPMR(PMR *psPMR,
+                  IMG_DEVMEM_SIZE_T uiSize,
+                  IMG_DEVMEM_ALIGN_T uiBlockSize,
+                  IMG_UINT32 ui32ChunkSize,
+                  IMG_UINT32 ui32NumPhysChunks,
+                  IMG_UINT32 ui32NumVirtChunks,
+                  IMG_UINT32 *puiMappingTable,
+                  IMG_UINT32 uiLog2Contiguity,
+                  IMG_BOOL bInitialise,
+                  IMG_UINT32 ui32InitValue,
+                  IMG_HANDLE *phPDumpAllocInfoPtr,
+                  IMG_UINT32 ui32PDumpFlags);
+
+extern void
+PDumpPMRFreePMR(PMR *psPMR,
+                IMG_DEVMEM_SIZE_T uiSize,
+                IMG_DEVMEM_ALIGN_T uiBlockSize,
+                IMG_UINT32 uiLog2Contiguity,
+                IMG_HANDLE hPDumpAllocationInfoHandle);
+
+extern void
+PDumpPMRChangeSparsePMR(PMR *psPMR,
+                        IMG_UINT32 uiBlockSize,
+                        IMG_UINT32 ui32AllocPageCount,
+                        IMG_UINT32 *pai32AllocIndices,
+                        IMG_UINT32 ui32FreePageCount,
+                        IMG_UINT32 *pai32FreeIndices,
+                        IMG_BOOL bInitialise,
+                        IMG_UINT32 ui32InitValue,
+                        IMG_HANDLE *phPDumpAllocInfoOut);
+/*
+ * PMR_PDumpSymbolicAddr()
+ *
+ * Given an offset, returns the pdump memspace name and symbolic
+ * address of the corresponding page in the PMR.
+ *
+ * Note that PDump memspace names and symbolic addresses are static
+ * and valid for the lifetime of the PMR, therefore we don't require
+ * acquire/release semantics here.
+ *
+ * Note that it is expected that the pdump "mapping" code will call
+ * this function multiple times as each page is mapped in turn
+ *
+ * Note that NextSymName is the offset from the base of the PMR to the
+ * next pdump symbolic address (or the end of the PMR if the PMR only
+ * had one PDUMPMALLOC
+ */
+extern PVRSRV_ERROR
+PMR_PDumpSymbolicAddr(const PMR *psPMR,
+                      IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                      IMG_UINT32 ui32NamespaceNameLen,
+                      IMG_CHAR *pszNamespaceName,
+                      IMG_UINT32 ui32SymbolicAddrLen,
+                      IMG_CHAR *pszSymbolicAddr,
+                      IMG_DEVMEM_OFFSET_T *puiNewOffset,
+		      IMG_DEVMEM_OFFSET_T *puiNextSymName
+                      );
+
+/*
+ * PMRPDumpLoadMemValue32()
+ *
+ * writes the current contents of a dword in PMR memory to the pdump
+ * script stream. Useful for patching a buffer by simply editing the
+ * script output file in ASCII plain text.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpLoadMemValue32(PMR *psPMR,
+			         IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT32 ui32Value,
+                     PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpCopyMem32
+ *
+ * Adds in the pdump script stream a copy of a dword in one PMR memory location
+ * to another PMR memory location.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpCopyMem32(PMR *psDstPMR,
+                  IMG_DEVMEM_OFFSET_T uiDstLogicalOffset,
+                  PMR *psSrcPMR,
+                  IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset,
+                  const IMG_CHAR *pszTmpVar,
+                  PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpLoadMemValue64()
+ *
+ * writes the current contents of a dword in PMR memory to the pdump
+ * script stream. Useful for patching a buffer by simply editing the
+ * script output file in ASCII plain text.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpLoadMemValue64(PMR *psPMR,
+			         IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT64 ui64Value,
+                     PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpCopyMem64
+ *
+ * Adds in the pdump script stream a copy of a quadword in one PMR memory location
+ * to another PMR memory location.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpCopyMem64(PMR *psDstPMR,
+                  IMG_DEVMEM_OFFSET_T uiDstLogicalOffset,
+                  PMR *psSrcPMR,
+                  IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset,
+                  const IMG_CHAR *pszTmpVar,
+                  PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpLoadMem()
+ *
+ * writes the current contents of the PMR memory to the pdump PRM
+ * stream, and emits some PDump code to the script stream to LDB said
+ * bytes from said file. If bZero is IMG_TRUE then the PDump zero page
+ * is used as the source for the LDB.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpLoadMem(PMR *psPMR,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_DEVMEM_SIZE_T uiSize,
+                PDUMP_FLAGS_T uiPDumpFlags,
+                IMG_BOOL bZero);
+
+/*
+ * PMRPDumpSaveToFile()
+ *
+ * emits some PDump that does an SAB (save bytes) using the PDump
+ * symbolic address of the PMR.  Note that this is generally not the
+ * preferred way to dump the buffer contents.  There is an equivalent
+ * function in devicemem_server.h which also emits SAB but using the
+ * virtual address, which is the "right" way to dump the buffer
+ * contents to a file.  This function exists just to aid testing by
+ * providing a means to dump the PMR directly by symbolic address
+ * also.
+ */
+extern PVRSRV_ERROR
+PMRPDumpSaveToFile(const PMR *psPMR,
+                   IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   IMG_UINT32 uiArraySize,
+                   const IMG_CHAR *pszFilename,
+                   IMG_UINT32 uiFileOffset);
+#else	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpPMRMallocPMR)
+#endif
+static INLINE void
+PDumpPMRMallocPMR(PMR *psPMR,
+                  IMG_DEVMEM_SIZE_T uiSize,
+                  IMG_DEVMEM_ALIGN_T uiBlockSize,
+                  IMG_UINT32 ui32NumPhysChunks,
+                  IMG_UINT32 ui32NumVirtChunks,
+                  IMG_UINT32 *puiMappingTable,
+                  IMG_UINT32 uiLog2Contiguity,
+                  IMG_BOOL bInitialise,
+                  IMG_UINT32 ui32InitValue,
+                  IMG_HANDLE *phPDumpAllocInfoPtr,
+                  IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiBlockSize);
+	PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+	PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks);
+	PVR_UNREFERENCED_PARAMETER(puiMappingTable);
+	PVR_UNREFERENCED_PARAMETER(uiLog2Contiguity);
+	PVR_UNREFERENCED_PARAMETER(bInitialise);
+	PVR_UNREFERENCED_PARAMETER(ui32InitValue);
+	PVR_UNREFERENCED_PARAMETER(phPDumpAllocInfoPtr);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpPMRFreePMR)
+#endif
+static INLINE void
+PDumpPMRFreePMR(PMR *psPMR,
+                IMG_DEVMEM_SIZE_T uiSize,
+                IMG_DEVMEM_ALIGN_T uiBlockSize,
+                IMG_UINT32 uiLog2Contiguity,
+                IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiBlockSize);
+	PVR_UNREFERENCED_PARAMETER(uiLog2Contiguity);
+	PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpPMRChangeSparsePMR)
+#endif
+static INLINE void
+PDumpPMRChangeSparsePMR(PMR *psPMR,
+                        IMG_UINT32 uiBlockSize,
+                        IMG_UINT32 ui32AllocPageCount,
+                        IMG_UINT32 *pai32AllocIndices,
+                        IMG_UINT32 ui32FreePageCount,
+                        IMG_UINT32 *pai32FreeIndices,
+                        IMG_BOOL bInitialise,
+                        IMG_UINT32 ui32InitValue,
+                        IMG_HANDLE *phPDumpAllocInfoOut)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiBlockSize);
+	PVR_UNREFERENCED_PARAMETER(ui32AllocPageCount);
+	PVR_UNREFERENCED_PARAMETER(pai32AllocIndices);
+	PVR_UNREFERENCED_PARAMETER(ui32FreePageCount);
+	PVR_UNREFERENCED_PARAMETER(pai32FreeIndices);
+	PVR_UNREFERENCED_PARAMETER(bInitialise);
+	PVR_UNREFERENCED_PARAMETER(ui32InitValue);
+	PVR_UNREFERENCED_PARAMETER(phPDumpAllocInfoOut);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMR_PDumpSymbolicAddr)
+#endif
+static INLINE PVRSRV_ERROR
+PMR_PDumpSymbolicAddr(const PMR *psPMR,
+                      IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                      IMG_UINT32 ui32NamespaceNameLen,
+                      IMG_CHAR *pszNamespaceName,
+                      IMG_UINT32 ui32SymbolicAddrLen,
+                      IMG_CHAR *pszSymbolicAddr,
+                      IMG_DEVMEM_OFFSET_T *puiNewOffset,
+                      IMG_DEVMEM_OFFSET_T *puiNextSymName)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32NamespaceNameLen);
+	PVR_UNREFERENCED_PARAMETER(pszNamespaceName);
+	PVR_UNREFERENCED_PARAMETER(ui32SymbolicAddrLen);
+	PVR_UNREFERENCED_PARAMETER(pszSymbolicAddr);
+	PVR_UNREFERENCED_PARAMETER(puiNewOffset);
+	PVR_UNREFERENCED_PARAMETER(puiNextSymName);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpLoadMemValue32)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpLoadMemValue32(PMR *psPMR,
+			         IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT32 ui32Value,
+                     PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpLoadMemValue64)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpLoadMemValue64(PMR *psPMR,
+			         IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT64 ui64Value,
+                     PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+	PVR_UNREFERENCED_PARAMETER(ui64Value);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpLoadMem)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpLoadMem(PMR *psPMR,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_DEVMEM_SIZE_T uiSize,
+                PDUMP_FLAGS_T uiPDumpFlags,
+                IMG_BOOL bZero)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+	PVR_UNREFERENCED_PARAMETER(bZero);
+	return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpSaveToFile)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpSaveToFile(const PMR *psPMR,
+                   IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   IMG_UINT32 uiArraySize,
+                   const IMG_CHAR *pszFilename,
+                   IMG_UINT32 uiFileOffset)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiArraySize);
+	PVR_UNREFERENCED_PARAMETER(pszFilename);
+	PVR_UNREFERENCED_PARAMETER(uiFileOffset);
+	return PVRSRV_OK;
+}
+
+#endif	/* PDUMP */
+
+/* This function returns the private data that a pmr subtype
+   squirrelled in here. We use the function table pointer as
+   "authorization" that this function is being called by the pmr
+   subtype implementation.  We can assume (assert) that.  It would be
+   a bug in the implementation of the pmr subtype if this assertion
+   ever fails. */
+extern void *
+PMRGetPrivateData(const PMR *psPMR,
+                  const PMR_IMPL_FUNCTAB *psFuncTab);
+
+extern PVRSRV_ERROR
+PMRZeroingPMR(PMR *psPMR,
+				IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize);
+
+PVRSRV_ERROR
+PMRDumpPageList(PMR *psReferencePMR,
+					IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize);
+
+extern PVRSRV_ERROR
+PMRWritePMPageList(/* Target PMR, offset, and length */
+                   PMR *psPageListPMR,
+                   IMG_DEVMEM_OFFSET_T uiTableOffset,
+                   IMG_DEVMEM_SIZE_T  uiTableLength,
+                   /* Referenced PMR, and "page" granularity */
+                   PMR *psReferencePMR,
+                   IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize,
+                   PMR_PAGELIST **ppsPageList);
+
+/* Doesn't actually erase the page list - just releases the appropriate refcounts */
+extern PVRSRV_ERROR // should be void, surely
+PMRUnwritePMPageList(PMR_PAGELIST *psPageList);
+
+#if defined(PDUMP)
+extern PVRSRV_ERROR
+PMRPDumpPol32(const PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT32 ui32Value,
+              IMG_UINT32 ui32Mask,
+              PDUMP_POLL_OPERATOR eOperator,
+              PDUMP_FLAGS_T uiFlags);
+
+extern PVRSRV_ERROR
+PMRPDumpCBP(const PMR *psPMR,
+            IMG_DEVMEM_OFFSET_T uiReadOffset,
+            IMG_DEVMEM_OFFSET_T uiWriteOffset,
+            IMG_DEVMEM_SIZE_T uiPacketSize,
+            IMG_DEVMEM_SIZE_T uiBufferSize);
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpPol32)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpPol32(const PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT32 ui32Value,
+              IMG_UINT32 ui32Mask,
+              PDUMP_POLL_OPERATOR eOperator,
+              PDUMP_FLAGS_T uiFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(uiFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpCBP)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpCBP(const PMR *psPMR,
+            IMG_DEVMEM_OFFSET_T uiReadOffset,
+            IMG_DEVMEM_OFFSET_T uiWriteOffset,
+            IMG_DEVMEM_SIZE_T uiPacketSize,
+            IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiReadOffset);
+	PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+	PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+	PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+	return PVRSRV_OK;
+}
+#endif
+
+extern PPVRSRV_DEVICE_NODE PMRGetExportDeviceNode(PMR_EXPORT *psExportPMR);
+
+/*
+ * PMRInit()
+ *
+ * To be called once and only once to initialise the internal data in
+ * the PMR module (mutexes and such)
+ *
+ * Not for general use.  Only PVRSRVInit(); should be calling this.
+ */
+extern PVRSRV_ERROR
+PMRInit(void);
+
+/*
+ * PMRDeInit()
+ *
+ * To be called once and only once to deinitialise the internal data in
+ * the PMR module (mutexes and such) and for debug checks
+ *
+ * Not for general use.  Only PVRSRVDeInit(); should be calling this.
+ */
+extern PVRSRV_ERROR
+PMRDeInit(void);
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+extern PVRSRV_ERROR
+PMRStoreRIHandle(PMR *psPMR,
+				 void *hRIHandle);
+#endif
+
+#endif /* #ifdef _SRVSRV_PMR_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pmr_impl.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pmr_impl.h
new file mode 100644
index 0000000..a753b69
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pmr_impl.h
@@ -0,0 +1,594 @@
+/**************************************************************************/ /*!
+@File
+@Title          Implementation Callbacks for Physmem (PMR) abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This file is for definitions
+                that are private to the world of PMRs, but that need to be
+                shared between pmr.c itself and the modules that implement the
+                callbacks for the PMR.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVSRV_PMR_IMPL_H
+#define SRVSRV_PMR_IMPL_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+/*! Physical Memory Resource type.
+ */
+typedef struct _PMR_ PMR;
+
+/*! Per-flavour callbacks need to be shared with generic implementation
+ * (pmr.c).
+ */
+typedef void *PMR_IMPL_PRIVDATA;
+
+/*! Type for holding flags passed to the PMR factory.
+ */
+typedef PVRSRV_MEMALLOCFLAGS_T PMR_FLAGS_T;
+
+/*! Mapping table for the allocation.
+ *
+ * PMR's can be sparse in which case not all the logical addresses
+ * in it are valid. The mapping table translates logical offsets into
+ * physical offsets.
+ *
+ * This table is always passed to the PMR factory regardless if the memory
+ * is sparse or not. In case of non-sparse memory all virtual offsets are
+ * mapped to physical offsets.
+ */
+typedef struct _PMR_MAPPING_TABLE_ PMR_MAPPING_TABLE;
+
+/*! Private data passed to the ::PFN_MMAP_FN function.
+ */
+typedef void *PMR_MMAP_DATA;
+
+/*! PMR factory type.
+ */
+typedef enum _PMR_IMPL_TYPE_
+{
+	PMR_TYPE_NONE = 0,
+	PMR_TYPE_OSMEM,
+	PMR_TYPE_LMA,
+	PMR_TYPE_DMABUF,
+	PMR_TYPE_EXTMEM,
+	PMR_TYPE_DC,
+	PMR_TYPE_TDFWCODE,
+	PMR_TYPE_TDSECBUF
+} PMR_IMPL_TYPE;
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_LOCK_PHYS_ADDRESSES_FN
+
+@Description    Called to lock down the physical addresses for all pages
+                allocated for a PMR.
+                The default implementation is to simply increment a
+                lock-count for debugging purposes.
+                If overridden, the PFN_LOCK_PHYS_ADDRESSES_FN function will
+                be called when someone first requires a physical address,
+                and the PFN_UNLOCK_PHYS_ADDRESSES_FN counterpart will be
+                called when the last such reference is released.
+                The PMR implementation may assume that physical addresses
+                will have been "locked" in this manner before any call is
+                made to the pfnDevPhysAddr() callback
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+
+@Return         PVRSRV_OK if the operation was successful, an error code
+                otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_LOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_UNLOCK_PHYS_ADDRESSES_FN
+
+@Description    Called to release the lock taken on the physical addresses
+                for all pages allocated for a PMR.
+                The default implementation is to simply decrement a
+                lock-count for debugging purposes.
+                If overridden, the PFN_UNLOCK_PHYS_ADDRESSES_FN will be
+                called when the last reference taken on the PMR is
+                released.
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+
+@Return         PVRSRV_OK if the operation was successful, an error code
+                otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_UNLOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_DEV_PHYS_ADDR_FN
+
+@Description    Called to obtain one or more physical addresses for given
+                offsets within a PMR.
+
+                The PFN_LOCK_PHYS_ADDRESSES_FN callback (if overridden) is
+                guaranteed to have been called prior to calling the
+                PFN_DEV_PHYS_ADDR_FN callback and the caller promises not to
+                rely on the physical address thus obtained after the
+                PFN_UNLOCK_PHYS_ADDRESSES_FN callback is called.
+
+   Implementation of this callback is mandatory.
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+@Input          ui32Log2PageSize          The log2 page size.
+@Input          ui32NumOfAddr             The number of addresses to be
+                                          returned
+@Input          puiOffset                 The offset from the start of the
+                                          PMR (in bytes) for which the
+                                          physical address is required.
+                                          Where multiple addresses are
+                                          requested, this will contain a
+                                          list of offsets.
+@Output         pbValid                   List of boolean flags indicating
+                                          which addresses in the returned
+                                          list (psDevAddrPtr) are valid
+                                          (for sparse allocations, not all
+                                          pages may have a physical backing)
+@Output         psDevAddrPtr              Returned list of physical addresses
+
+@Return         PVRSRV_OK if the operation was successful, an error code
+                otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_DEV_PHYS_ADDR_FN)(PMR_IMPL_PRIVDATA pvPriv,
+                      IMG_UINT32 ui32Log2PageSize,
+                      IMG_UINT32 ui32NumOfAddr,
+                      IMG_DEVMEM_OFFSET_T *puiOffset,
+                      IMG_BOOL *pbValid,
+                      IMG_DEV_PHYADDR *psDevAddrPtr);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN
+
+@Description    Called to obtain a kernel-accessible address (mapped to a
+                virtual address if required) for the PMR for use internally
+                in Services.
+
+    Implementation of this function for the (default) PMR factory providing
+    OS-allocations is mandatory (the driver will expect to be able to call
+    this function for OS-provided allocations).
+    For other PMR factories, implementation of this function is only necessary
+    where an MMU mapping is required for the Kernel to be able to access the
+    allocated memory.
+    If no mapping is needed, this function can remain unimplemented and the
+    pfn may be set to NULL.
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+@Input          uiOffset                  Offset from the beginning of
+                                          the PMR at which mapping is to
+                                          start
+@Input          uiSize                    Size of mapping (in bytes)
+@Output         ppvKernelAddressOut       Mapped kernel address
+@Output         phHandleOut	              Returned handle of the new mapping
+@Input          ulFlags                   Mapping flags
+
+@Return         PVRSRV_OK if the mapping was successful, an error code
+                otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN)(PMR_IMPL_PRIVDATA pvPriv,
+                      size_t uiOffset,
+                      size_t uiSize,
+                      void **ppvKernelAddressOut,
+                      IMG_HANDLE *phHandleOut,
+                      PMR_FLAGS_T ulFlags);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_RELEASE_KERNEL_MAPPING_DATA_FN
+
+@Description    Called to release a mapped kernel virtual address
+
+   Implementation of this callback is mandatory if PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN
+   is provided for the PMR factory, otherwise this function can remain unimplemented
+   and the pfn may be set to NULL.
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+@Input          hHandle                   Handle of the mapping to be
+                                          released
+
+@Return         None
+*/
+/*****************************************************************************/
+typedef void (*PFN_RELEASE_KERNEL_MAPPING_DATA_FN)(PMR_IMPL_PRIVDATA pvPriv,
+              IMG_HANDLE hHandle);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_READ_BYTES_FN
+
+@Description    Called to read bytes from an unmapped allocation
+
+   Implementation of this callback is optional -
+   where it is not provided, the driver will use PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN
+   to map the entire PMR (if an MMU mapping is required for the Kernel to be
+   able to access the allocated memory).
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+@Input          uiOffset                  Offset from the beginning of
+                                          the PMR at which to begin
+                                          reading
+@Output         pcBuffer                  Buffer in which to return the
+                                          read data
+@Input          uiBufSz                   Number of bytes to be read
+@Output         puiNumBytes               Number of bytes actually read
+                                          (may be less than uiBufSz)
+
+@Return         PVRSRV_OK if the read was successful, an error code
+                otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_READ_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv,
+                      IMG_DEVMEM_OFFSET_T uiOffset,
+                      IMG_UINT8 *pcBuffer,
+                      size_t uiBufSz,
+                      size_t *puiNumBytes);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_WRITE_BYTES_FN
+
+@Description    Called to write bytes into an unmapped allocation
+
+   Implementation of this callback is optional -
+   where it is not provided, the driver will use PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN
+   to map the entire PMR (if an MMU mapping is required for the Kernel to be
+   able to access the allocated memory).
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+@Input          uiOffset                  Offset from the beginning of
+                                          the PMR at which to begin
+                                          writing
+@Input          pcBuffer                  Buffer containing the data to be
+                                          written
+@Input          uiBufSz                   Number of bytes to be written
+@Output         puiNumBytes               Number of bytes actually written
+                                          (may be less than uiBufSz)
+
+@Return         PVRSRV_OK if the write was successful, an error code
+                otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_WRITE_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv,
+                      IMG_DEVMEM_OFFSET_T uiOffset,
+                      IMG_UINT8 *pcBuffer,
+                      size_t uiBufSz,
+                      size_t *puiNumBytes);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_UNPIN_MEM_FN
+
+@Description    Called to unpin an allocation.
+                Once unpinned, the pages backing the allocation may be
+                re-used by the Operating System for another purpose.
+                When the pages are required again, they may be re-pinned
+                (by calling PFN_PIN_MEM_FN). The driver will try to return
+                same pages as before. The caller will be told if the
+                content of these returned pages has been modified or if
+                the pages returned are not the original pages.
+
+   Implementation of this callback is optional.
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+
+@Return         PVRSRV_OK if the unpin was successful, an error code
+                otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_UNPIN_MEM_FN)(PMR_IMPL_PRIVDATA pPriv);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_PIN_MEM_FN
+
+@Description    Called to pin a previously unpinned allocation.
+                The driver will try to return same pages as were previously
+                assigned to the allocation. The caller will be told if the
+                content of these returned pages has been modified or if
+                the pages returned are not the original pages.
+
+   Implementation of this callback is optional.
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+
+@Input          psMappingTable            Mapping table, which describes how
+                                          virtual 'chunks' are to be mapped to
+                                          physical 'chunks' for the allocation.
+
+@Return         PVRSRV_OK if the original pages were returned unmodified.
+                PVRSRV_ERROR_PMR_NEW_MEMORY if the memory returned was modified
+                or different pages were returned.
+                Another PVRSRV_ERROR code on failure.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_PIN_MEM_FN)(PMR_IMPL_PRIVDATA pPriv,
+                      PMR_MAPPING_TABLE *psMappingTable);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_CHANGE_SPARSE_MEM_FN
+
+@Description    Called to modify the physical backing for a given sparse
+                allocation.
+                The caller provides a list of the pages within the sparse
+                allocation which should be backed with a physical allocation
+                and a list of the pages which do not require backing.
+
+                Implementation of this callback is mandatory.
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+@Input          psPMR                     The PMR of the sparse allocation
+                                          to be modified
+@Input          ui32AllocPageCount        The number of pages specified in
+                                          pai32AllocIndices
+@Input          pai32AllocIndices         The list of pages in the sparse
+                                          allocation that should be backed
+                                          with a physical allocation. Pages
+                                          are referenced by their index
+                                          within the sparse allocation
+                                          (e.g. in a 10 page allocation, pages
+                                          are denoted by indices 0 to 9)
+@Input          ui32FreePageCount         The number of pages specified in
+                                          pai32FreeIndices
+@Input          pai32FreeIndices          The list of pages in the sparse
+                                          allocation that do not require
+                                          a physical allocation.
+@Input          ui32Flags                 Allocation flags
+
+@Return         PVRSRV_OK if the sparse allocation physical backing was updated
+                successfully, an error code otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_FN)(PMR_IMPL_PRIVDATA pPriv,
+                      const PMR *psPMR,
+                      IMG_UINT32 ui32AllocPageCount,
+                      IMG_UINT32 *pai32AllocIndices,
+                      IMG_UINT32 ui32FreePageCount,
+                      IMG_UINT32 *pai32FreeIndices,
+                      IMG_UINT32 uiFlags);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN
+
+@Description    Called to modify which pages are mapped for a given sparse
+                allocation.
+                The caller provides a list of the pages within the sparse
+                allocation which should be given a CPU mapping and a list
+                of the pages which do not require a CPU mapping.
+
+   Implementation of this callback is mandatory.
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+@Input          psPMR                     The PMR of the sparse allocation
+                                          to be modified
+@Input          sCpuVAddrBase             The virtual base address of the
+                                          sparse allocation
+@Input          ui32AllocPageCount        The number of pages specified in
+                                          pai32AllocIndices
+@Input          pai32AllocIndices         The list of pages in the sparse
+                                          allocation that should be given
+                                          a CPU mapping. Pages are referenced
+                                          by their index within the sparse
+                                          allocation (e.g. in a 10 page
+                                          allocation, pages are denoted by
+                                          indices 0 to 9)
+@Input          ui32FreePageCount         The number of pages specified in
+                                          pai32FreeIndices
+@Input          pai32FreeIndices          The list of pages in the sparse
+                                          allocation that do not require a CPU
+                                          mapping.
+
+@Return         PVRSRV_OK if the page mappings were updated successfully, an
+                error code otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN)(PMR_IMPL_PRIVDATA pPriv,
+                      const PMR *psPMR,
+                      IMG_UINT64 sCpuVAddrBase,
+                      IMG_UINT32 ui32AllocPageCount,
+                      IMG_UINT32 *pai32AllocIndices,
+                      IMG_UINT32 ui32FreePageCount,
+                      IMG_UINT32 *pai32FreeIndices);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_MMAP_FN
+
+@Description    Called to map pages in the specified PMR.
+
+   Implementation of this callback is optional.
+   Where it is provided, it will be used in place of OSMMapPMRGeneric().
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+@Input          psPMR                     The PMR of the allocation to be
+                                          mapped
+@Input          pMMapData                 OS-specific data to describe how
+                                          mapping should be performed
+
+@Return         PVRSRV_OK if the mapping was successful, an error code
+                otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_MMAP_FN)(PMR_IMPL_PRIVDATA pPriv,
+                                    PMR *psPMR,
+                                    PMR_MMAP_DATA pMMapData);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_FINALIZE_FN
+
+@Description    Called to destroy the PMR.
+                This callback will be called only when all references to
+                the PMR have been dropped.
+                The PMR was created via a call to PhysmemNewRamBackedPMR()
+                and is destroyed via this callback.
+
+   Implementation of this callback is mandatory.
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+
+@Return         PVRSRV_OK if the PMR destruction was successful, an error
+                code otherwise.
+                Currently PVRSRV_ERROR_PMR_STILL_REFERENCED is the only
+                error returned from physmem_dmabuf.c layer and on this
+                error, destroying of the PMR is aborted without disturbing
+                the PMR state.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_FINALIZE_FN)(PMR_IMPL_PRIVDATA pvPriv);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_ACQUIRE_PMR_FACTORY_LOCK_FN
+
+@Description    Called to acquire the PMR factory's global lock, if it has one,
+				hence callback optional. Factories which support entry points
+				in addition to the normal bridge calls, for example, from the
+				native OS that manipulate the PMR reference count should
+				create a factory lock and implementations for these call backs.
+
+				Implementation of this callback is optional.
+
+@Return			None
+*/
+/*****************************************************************************/
+typedef void (*PFN_ACQUIRE_PMR_FACTORY_LOCK_FN)(void);
+
+/*************************************************************************/ /*!
+@Brief			Callback function type PFN_RELEASE_PMR_FACTORY_LOCK_FN
+
+@Description	Called to release the PMR factory's global lock acquired by calling
+				pfn_acquire_pmr_factory_lock callback.
+
+				Implementation of this callback is optional.
+
+@Return			None
+*/
+/*****************************************************************************/
+typedef void (*PFN_RELEASE_PMR_FACTORY_LOCK_FN)(void);
+
+/*! PMR factory callback table.
+ */
+struct _PMR_IMPL_FUNCTAB_ {
+    /*! Callback function pointer, see ::PFN_LOCK_PHYS_ADDRESSES_FN */
+    PFN_LOCK_PHYS_ADDRESSES_FN pfnLockPhysAddresses;
+    /*! Callback function pointer, see ::PFN_UNLOCK_PHYS_ADDRESSES_FN */
+    PFN_UNLOCK_PHYS_ADDRESSES_FN pfnUnlockPhysAddresses;
+
+    /*! Callback function pointer, see ::PFN_DEV_PHYS_ADDR_FN */
+    PFN_DEV_PHYS_ADDR_FN pfnDevPhysAddr;
+
+    /*! Callback function pointer, see ::PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN */
+    PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN pfnAcquireKernelMappingData;
+    /*! Callback function pointer, see ::PFN_RELEASE_KERNEL_MAPPING_DATA_FN */
+    PFN_RELEASE_KERNEL_MAPPING_DATA_FN pfnReleaseKernelMappingData;
+
+#if defined (INTEGRITY_OS)
+    /*
+     * MapMemoryObject()/UnmapMemoryObject()
+     *
+     * called to map/unmap memory objects in Integrity OS
+     */
+
+    PVRSRV_ERROR (*pfnMapMemoryObject)(PMR_IMPL_PRIVDATA pvPriv,
+   										IMG_HANDLE *phMemObj,
+										void **pvClientAddr,
+										IMG_HANDLE *phHandleOut);
+    PVRSRV_ERROR (*pfnUnmapMemoryObject)(PMR_IMPL_PRIVDATA pvPriv);
+
+#if defined(USING_HYPERVISOR)
+    IMG_HANDLE (*pfnGetPmr)(PMR_IMPL_PRIVDATA pvPriv, size_t ulOffset);
+#endif
+#endif
+
+    /*! Callback function pointer, see ::PFN_READ_BYTES_FN */
+    PFN_READ_BYTES_FN pfnReadBytes;
+    /*! Callback function pointer, see ::PFN_WRITE_BYTES_FN */
+    PFN_WRITE_BYTES_FN pfnWriteBytes;
+
+    /*! Callback function pointer, see ::PFN_UNPIN_MEM_FN */
+    PFN_UNPIN_MEM_FN pfnUnpinMem;
+    /*! Callback function pointer, see ::PFN_PIN_MEM_FN */
+    PFN_PIN_MEM_FN pfnPinMem;
+
+    /*! Callback function pointer, see ::PFN_CHANGE_SPARSE_MEM_FN */
+    PFN_CHANGE_SPARSE_MEM_FN pfnChangeSparseMem;
+    /*! Callback function pointer, see ::PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN */
+    PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN pfnChangeSparseMemCPUMap;
+
+    /*! Callback function pointer, see ::PFN_MMAP_FN */
+    PFN_MMAP_FN pfnMMap;
+
+    /*! Callback function pointer, see ::PFN_FINALIZE_FN */
+    PFN_FINALIZE_FN pfnFinalize;
+
+    /*! Callback function pointer, see ::PFN_ACQUIRE_PMR_FACTORY_LOCK_FN */
+    PFN_ACQUIRE_PMR_FACTORY_LOCK_FN	pfnGetPMRFactoryLock;
+
+    /*! Callback function pointer, see ::PFN_RELEASE_PMR_FACTORY_LOCK_FN */
+    PFN_RELEASE_PMR_FACTORY_LOCK_FN	pfnReleasePMRFactoryLock;
+};
+
+/*! PMR factory callback table.
+ */
+typedef struct _PMR_IMPL_FUNCTAB_ PMR_IMPL_FUNCTAB;
+
+#endif /* SRVSRV_PMR_IMPL_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pmr_os.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pmr_os.c
new file mode 100644
index 0000000..00bb901
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pmr_os.c
@@ -0,0 +1,638 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux OS PMR functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <asm/page.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#if defined(CONFIG_L4)
+#include <asm/api-l4env/api.h>
+#endif
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+#include <linux/pfn_t.h>
+#include <linux/pfn.h>
+#endif
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "allocmem.h"
+#include "devicemem_server_utils.h"
+#include "pmr.h"
+#include "pmr_os.h"
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS)
+#include "mmap_stats.h"
+#endif
+
+#include "kernel_compatibility.h"
+
+/*
+ * x86_32:
+ * Use vm_insert_page because remap_pfn_range has issues when mapping HIGHMEM
+ * pages with default memory attributes; these HIGHMEM pages are skipped in
+ * set_pages_array_[uc,wc] during allocation; see reserve_pfn_range().
+ * Also vm_insert_page is faster.
+ *
+ * x86_64:
+ * Use vm_insert_page because it is faster.
+ *
+ * Other platforms:
+ * Use remap_pfn_range by default because it does not issue a cache flush.
+ * It is known that ARM32 benefits from this. When other platforms become
+ * available it has to be investigated if this assumption holds for them as well.
+ *
+ * Since vm_insert_page does more precise memory accounting we have the build
+ * flag PVR_MMAP_USE_VM_INSERT that forces its use. This is useful as a debug
+ * feature.
+ *
+ */
+#if defined(CONFIG_X86) || defined(PVR_MMAP_USE_VM_INSERT)
+#define PMR_OS_USE_VM_INSERT_PAGE 1
+#endif
+
+static void MMapPMROpen(struct vm_area_struct *ps_vma)
+{
+	PMR *psPMR = ps_vma->vm_private_data;
+
+	/* Our VM flags should ensure this function never gets called */
+	PVR_DPF((PVR_DBG_WARNING,
+			 "%s: Unexpected mmap open call, this is probably an application bug.",
+			 __func__));
+	PVR_DPF((PVR_DBG_WARNING,
+			 "%s: vma struct: 0x%p, vAddr: %#lX, length: %#lX, PMR pointer: 0x%p",
+			 __func__,
+			 ps_vma,
+			 ps_vma->vm_start,
+			 ps_vma->vm_end - ps_vma->vm_start,
+			 psPMR));
+
+	/* In case we get called anyway let's do things right by increasing the refcount and
+	 * locking down the physical addresses. */
+	PMRRefPMR(psPMR);
+
+	if (PMRLockSysPhysAddresses(psPMR) != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Could not lock down physical addresses, aborting.", __func__));
+		PMRUnrefPMR(psPMR);
+	}
+}
+
+static void MMapPMRClose(struct vm_area_struct *ps_vma)
+{
+	PMR *psPMR = ps_vma->vm_private_data;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if	defined(PVRSRV_ENABLE_MEMORY_STATS)
+	{
+		uintptr_t vAddr = ps_vma->vm_start;
+
+		while (vAddr < ps_vma->vm_end)
+		{
+			/* USER MAPPING */
+			PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,
+			                                (IMG_UINT64)vAddr,
+			                                OSGetCurrentClientProcessIDKM());
+			vAddr += PAGE_SIZE;
+		}
+	}
+#else
+	PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,
+	                            ps_vma->vm_end - ps_vma->vm_start,
+	                            OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+
+	PMRUnlockSysPhysAddresses(psPMR);
+	PMRUnrefPMR(psPMR);
+}
+
+/*
+ * This vma operation is used to read data from mmap regions. It is called
+ * by access_process_vm, which is called to handle PTRACE_PEEKDATA ptrace
+ * requests and reads from /proc/<pid>/mem.
+ */
+static int MMapVAccess(struct vm_area_struct *ps_vma, unsigned long addr,
+		       void *buf, int len, int write)
+{
+	PMR *psPMR = ps_vma->vm_private_data;
+	unsigned long ulOffset = addr - ps_vma->vm_start;
+	size_t uiBytesCopied;
+	PVRSRV_ERROR eError;
+	int iRetVal = -EINVAL;
+
+	if (write)
+	{
+		eError = PMR_WriteBytes(psPMR,
+					(IMG_DEVMEM_OFFSET_T) ulOffset,
+					buf,
+					len,
+					&uiBytesCopied);
+	}
+	else
+	{
+		eError = PMR_ReadBytes(psPMR,
+				       (IMG_DEVMEM_OFFSET_T) ulOffset,
+				       buf,
+				       len,
+				       &uiBytesCopied);
+	}
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Error from %s (%d)",
+			 __func__,
+			 write ? "PMR_WriteBytes" : "PMR_ReadBytes",
+			 eError));
+	}
+	else
+	{
+		iRetVal = uiBytesCopied;
+	}
+
+	return iRetVal;
+}
+
+static const struct vm_operations_struct gsMMapOps =
+{
+	.open = &MMapPMROpen,
+	.close = &MMapPMRClose,
+	.access = MMapVAccess,
+};
+
+static INLINE int _OSMMapPMR(PVRSRV_DEVICE_NODE *psDevNode,
+							struct vm_area_struct *ps_vma,
+							IMG_DEVMEM_OFFSET_T uiOffset,
+							IMG_CPU_PHYADDR *psCpuPAddr,
+							IMG_UINT32 uiLog2PageSize,
+							IMG_BOOL bUseVMInsertPage,
+							IMG_BOOL bUseMixedMap)
+{
+	IMG_INT32 iStatus;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+	pfn_t sPFN;
+#else
+	unsigned long uiPFN;
+#endif
+
+#if defined(CONFIG_L4)
+	size_t size;
+	IMG_CPU_VIRTADDR pvVAddr;
+#if defined(ARM)
+	struct device *dev = psDevNode->psDevConfig->pvOSDevice;
+#endif
+
+	/* In L4 remaps from KM into UM is done via VA */
+	pvVAddr = l4x_phys_to_virt(psCpuPAddr->uiAddr);
+	if (pvVAddr == NULL)
+	{
+		return -1;
+	}
+
+	for (size = 0; size < 1ULL << uiLog2PageSize; size += PAGE_SIZE)
+	{
+		/* Fault-in pages now, ensure compiler does not optimise this out */
+		*((volatile int*)pvVAddr + size) = *((volatile int*)pvVAddr + size);
+	}
+
+#if defined(ARM)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+	sPFN = pfn_to_pfn_t(dma_to_pfn(dev, psCpuPAddr->uiAddr));
+#else
+	uiPFN = dma_to_pfn(dev, psCpuPAddr->uiAddr);
+#endif
+#else /* defined(ARM) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+	sPFN =  pfn_to_pfn_t(((uintptr_t) pvVAddr) >> PAGE_SHIFT);
+#else
+	uiPFN = ((uintptr_t) pvVAddr) >> PAGE_SHIFT;
+	PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == (IMG_UINT64)(uintptr_t)pvVAddr);
+#endif
+#endif
+	PVR_ASSERT(bUseVMInsertPage == IMG_FALSE);
+#else /* defined(CONFIG_L4) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+	sPFN = phys_to_pfn_t(psCpuPAddr->uiAddr, 0);
+#else
+	uiPFN = psCpuPAddr->uiAddr >> PAGE_SHIFT;
+	PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr->uiAddr);
+#endif
+#endif
+
+	/*
+	 * vm_insert_page() allows insertion of individual pages into user
+	 * VMA space _only_ if page is a order-zero allocated page
+	 */
+	if (bUseVMInsertPage)
+	{
+		if (bUseMixedMap)
+		{
+			/*
+			 * This path is just for debugging. It should be
+			 * equivalent to the remap_pfn_range() path.
+			 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0))
+			vm_fault_t vmf;
+
+			vmf = vmf_insert_mixed(ps_vma,
+								  	ps_vma->vm_start + uiOffset,
+								  	sPFN);
+			if (vmf & VM_FAULT_ERROR)
+			{
+				iStatus = vm_fault_to_errno(vmf, 0);
+			}
+			else
+			{
+				iStatus = 0;
+			}
+#else
+			iStatus = vm_insert_mixed(ps_vma,
+									  ps_vma->vm_start + uiOffset,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+									  sPFN);
+#else
+									  uiPFN);
+#endif
+#endif
+		}
+		else
+		{
+			/* Since kernel 3.7 this sets VM_MIXEDMAP internally */
+			iStatus = vm_insert_page(ps_vma,
+									 ps_vma->vm_start + uiOffset,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+									 pfn_t_to_page(sPFN));
+#else
+									 pfn_to_page(uiPFN));
+#endif
+		}
+	}
+	else
+	{
+		/*
+		   NOTE: Regarding absence of dma_mmap_coherent() in _OSMMapPMR()
+
+		   The current services mmap model maps in a PMR's full-length size
+		   into the user VMA & applies any user specified offset to the kernel
+		   returned zero-offset based VA in services client; this essentially
+		   means services server ignores ps_vma->vm_pgoff (this houses hPMR)
+		   during a mmap call.
+
+		   Furthermore, during a DMA/CMA memory allocation, multiple order-n
+		   pages are used to satisfy an allocation request due to DMA/CMA
+		   framework rounding-up allocation size to next power-of-two which
+		   can lead to wasted memory (so we don't allocate using single call).
+
+		   The combination of the above two issues mean that we cannot use the
+		   dma_mmap_coherent() for a number of reasons outlined below:
+
+		     - Services mmap semantics does not fit with dma_mmap_coherent()
+		       which requires proper ps_vma->vm_pgoff; seeing this houses a
+		       hPMR handle value, calls into dma_mmap_coherent() fails. This
+		       could be avoided by forcing ps_vma->vm_pgoff to zero but the
+		       ps_vma->vm_pgoff is applied to DMA bus address PFN and not
+		       user VMA which is always mapped at ps_vma->vm_start.
+
+		     - As multiple order-n pages are used for DMA/CMA allocations, a
+		       single dma_mmap_coherent() call with a vma->vm_pgoff set to
+		       zero cannot (maybe) be used because there is no guarantee that
+		       all of the multiple order-n pages in the PMR are physically
+		       contiguous from the first entry to the last. Whilst this is
+		       highly likely to be the case, there is no guarantee that it
+		       will be so we cannot depend on this being the case.
+
+		   The solution is to manually mmap DMA/CMA pages into user VMA
+		   using remap_pfn_range() directly. Furthermore, accounting is
+		   always compromised for DMA/CMA allocations.
+		*/
+		size_t uiNumContiguousBytes = 1ULL << uiLog2PageSize;
+
+		iStatus = remap_pfn_range(ps_vma,
+								  ps_vma->vm_start + uiOffset,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+								  pfn_t_to_pfn(sPFN),
+#else
+								  uiPFN,
+#endif
+								  uiNumContiguousBytes,
+								  ps_vma->vm_page_prot);
+	}
+
+	return iStatus;
+}
+
+PVRSRV_ERROR
+OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData)
+{
+	struct vm_area_struct *ps_vma = pOSMMapData;
+	PVRSRV_DEVICE_NODE *psDevNode = PMR_DeviceNode(psPMR);
+	PVRSRV_ERROR eError;
+	size_t uiLength;
+	IMG_INT32 iStatus;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_UINT32 ui32CPUCacheFlags;
+	pgprot_t sPageProt;
+	IMG_CPU_PHYADDR asCpuPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_UINT32 uiOffsetIdx;
+	IMG_UINT32 uiNumOfPFNs;
+	IMG_UINT32 uiLog2PageSize;
+	IMG_CPU_PHYADDR *psCpuPAddr;
+	IMG_BOOL *pbValid;
+	IMG_BOOL bUseMixedMap = IMG_FALSE;
+	IMG_BOOL bUseVMInsertPage = IMG_FALSE;
+
+	eError = PMRLockSysPhysAddresses(psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
+		((ps_vma->vm_flags & VM_SHARED) == 0))
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e1;
+	}
+
+	sPageProt = vm_get_page_prot(ps_vma->vm_flags);
+
+	eError = DevmemCPUCacheMode(psDevNode,
+	                            PMR_Flags(psPMR),
+	                            &ui32CPUCacheFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	switch (ui32CPUCacheFlags)
+	{
+		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+				sPageProt = pgprot_noncached(sPageProt);
+				break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+				sPageProt = pgprot_writecombine(sPageProt);
+				break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+		{
+/* Do not set to write-combine for plato */
+#if !defined(PLATO_MEMORY_CONFIG)
+				PHYS_HEAP *psPhysHeap = PMR_PhysHeap(psPMR);
+
+				if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA)
+					sPageProt = pgprot_writecombine(sPageProt);
+#endif
+				break;
+		}
+
+		default:
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				goto e1;
+	}
+	ps_vma->vm_page_prot = sPageProt;
+
+	ps_vma->vm_flags |= VM_IO;
+
+	/* Don't include the mapping in core dumps */
+	ps_vma->vm_flags |= VM_DONTDUMP;
+
+	/*
+	 * Disable mremap because our nopage handler assumes all
+	 * page requests have already been validated.
+	 */
+	ps_vma->vm_flags |= VM_DONTEXPAND;
+
+	/* Don't allow mapping to be inherited across a process fork */
+	ps_vma->vm_flags |= VM_DONTCOPY;
+
+	uiLength = ps_vma->vm_end - ps_vma->vm_start;
+
+	/* Is this mmap targeting non order-zero pages or does it use pfn mappings?
+	 * If yes, don't use vm_insert_page */
+	uiLog2PageSize = PMR_GetLog2Contiguity(psPMR);
+#if defined(PMR_OS_USE_VM_INSERT_PAGE)
+	bUseVMInsertPage = (uiLog2PageSize == PAGE_SHIFT) && (PMR_GetType(psPMR) != PMR_TYPE_EXTMEM);
+#if defined(CONFIG_L4)
+	/* L4 uses CMA allocations */
+	bUseVMInsertPage = IMG_FALSE;
+#endif
+#endif
+
+	/* Can we use stack allocations */
+	uiNumOfPFNs = uiLength >> uiLog2PageSize;
+	if (uiNumOfPFNs > PMR_MAX_TRANSLATION_STACK_ALLOC)
+	{
+		psCpuPAddr = OSAllocMem(uiNumOfPFNs * sizeof(*psCpuPAddr));
+		if (psCpuPAddr == NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e1;
+		}
+
+		/* Should allocation fail, clean-up here before exiting */
+		pbValid = OSAllocMem(uiNumOfPFNs * sizeof(*pbValid));
+		if (pbValid == NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			OSFreeMem(psCpuPAddr);
+			goto e2;
+		}
+	}
+	else
+	{
+		psCpuPAddr = asCpuPAddr;
+		pbValid = abValid;
+	}
+
+	/* Obtain map range pfns */
+	eError = PMR_CpuPhysAddr(psPMR,
+				 uiLog2PageSize,
+				 uiNumOfPFNs,
+				 0,
+				 psCpuPAddr,
+				 pbValid);
+	if (eError != PVRSRV_OK)
+	{
+		goto e3;
+	}
+
+	/*
+	 * Scan the map range for pfns without struct page* handling. If
+	 * we find one, this is a mixed map, and we can't use vm_insert_page()
+	 * NOTE: vm_insert_page() allows insertion of individual pages into user
+	 * VMA space _only_ if said page is an order-zero allocated page.
+	 */
+	if (bUseVMInsertPage)
+	{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+		pfn_t sPFN;
+#else
+		unsigned long uiPFN;
+#endif
+
+		for (uiOffsetIdx = 0; uiOffsetIdx < uiNumOfPFNs; ++uiOffsetIdx)
+		{
+			if (pbValid[uiOffsetIdx])
+			{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+				sPFN = phys_to_pfn_t(psCpuPAddr[uiOffsetIdx].uiAddr, 0);
+
+				if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0)
+#else
+				uiPFN = psCpuPAddr[uiOffsetIdx].uiAddr >> PAGE_SHIFT;
+				PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr[uiOffsetIdx].uiAddr);
+
+				if (!pfn_valid(uiPFN) || page_count(pfn_to_page(uiPFN)) == 0)
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+				{
+					bUseMixedMap = IMG_TRUE;
+					break;
+				}
+			}
+		}
+
+		if (bUseMixedMap)
+		{
+			ps_vma->vm_flags |= VM_MIXEDMAP;
+		}
+	}
+	else
+	{
+		ps_vma->vm_flags |= VM_PFNMAP;
+	}
+
+	/* For each PMR page-size contiguous bytes, map page(s) into user VMA */
+	for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<uiLog2PageSize)
+	{
+		uiOffsetIdx = uiOffset >> uiLog2PageSize;
+		/*
+		 * Only map in pages that are valid, any that aren't will be
+		 * picked up by the nopage handler which will return a zeroed
+		 * page for us.
+		 */
+		if (pbValid[uiOffsetIdx])
+		{
+			iStatus = _OSMMapPMR(psDevNode,
+								 ps_vma,
+								 uiOffset,
+								 &psCpuPAddr[uiOffsetIdx],
+								 uiLog2PageSize,
+								 bUseVMInsertPage,
+								 bUseMixedMap);
+			if (iStatus)
+			{
+				/* Failure error code doesn't get propagated */
+				eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED;
+				PVR_ASSERT(0);
+				goto e3;
+			}
+		}
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS)
+#define PMR_OS_BAD_CPUADDR 0x0BAD0BAD
+		{
+			IMG_CPU_PHYADDR sPAddr;
+			sPAddr.uiAddr = pbValid[uiOffsetIdx] ?
+					psCpuPAddr[uiOffsetIdx].uiAddr :
+					IMG_CAST_TO_CPUPHYADDR_UINT(PMR_OS_BAD_CPUADDR);
+
+			PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,
+										(void*)(uintptr_t)(ps_vma->vm_start + uiOffset),
+										sPAddr,
+										1<<uiLog2PageSize,
+										NULL,
+										OSGetCurrentClientProcessIDKM());
+		}
+#undef PMR_OS_BAD_CPUADDR
+#endif
+	}
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, uiNumOfPFNs * PAGE_SIZE, OSGetCurrentClientProcessIDKM());
+#endif
+
+	if (psCpuPAddr != asCpuPAddr)
+	{
+		OSFreeMem(psCpuPAddr);
+		OSFreeMem(pbValid);
+	}
+
+	/* let us see the PMR so we can unlock it later */
+	ps_vma->vm_private_data = psPMR;
+
+	/* Install open and close handlers for ref-counting */
+	ps_vma->vm_ops = &gsMMapOps;
+
+	/*
+	 * Take a reference on the PMR so that it can't be freed while mapped
+	 * into the user process.
+	 */
+	PMRRefPMR(psPMR);
+
+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS)
+	/* record the stats */
+	MMapStatsAddOrUpdatePMR(psPMR, uiLength);
+#endif
+
+	return PVRSRV_OK;
+
+	/* Error exit paths follow */
+ e3:
+	if (pbValid != abValid)
+	{
+		OSFreeMem(pbValid);
+	}
+ e2:
+	if (psCpuPAddr != asCpuPAddr)
+	{
+		OSFreeMem(psCpuPAddr);
+	}
+ e1:
+	PMRUnlockSysPhysAddresses(psPMR);
+ e0:
+	return eError;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pmr_os.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pmr_os.h
new file mode 100644
index 0000000..0dfbd49
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pmr_os.h
@@ -0,0 +1,62 @@
+/*************************************************************************/ /*!
+@File
+@Title          OS PMR functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS specific PMR functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PMR_OS_H__)
+#define __PMR_OS_H__
+
+#include "pmr_impl.h"
+
+/*************************************************************************/ /*!
+@Function       OSMMapPMRGeneric
+@Description    Implements a generic PMR mapping function, which is used
+                to CPU map a PMR where the PMR does not have a mapping
+                function defined by the creating PMR factory.
+@Input          psPMR               the PMR to be mapped
+@Output         pOSMMapData         pointer to any private data
+                                    needed by the generic mapping function
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData);
+
+#endif /* !defined(__PMR_OS_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/power.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/power.c
new file mode 100644
index 0000000..bf7a832
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/power.c
@@ -0,0 +1,1073 @@
+/*************************************************************************/ /*!
+@File           power.c
+@Title          Power management functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main APIs for power management functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pdump_km.h"
+#include "allocmem.h"
+#include "osfunc.h"
+
+#include "pvrsrv.h"
+#include "pvr_debug.h"
+#include "process_stats.h"
+
+
+struct _PVRSRV_POWER_DEV_TAG_
+{
+	PFN_PRE_POWER					pfnDevicePrePower;
+	PFN_POST_POWER					pfnDevicePostPower;
+	PFN_SYS_DEV_PRE_POWER			pfnSystemPrePower;
+	PFN_SYS_DEV_POST_POWER			pfnSystemPostPower;
+	PFN_PRE_CLOCKSPEED_CHANGE		pfnPreClockSpeedChange;
+	PFN_POST_CLOCKSPEED_CHANGE		pfnPostClockSpeedChange;
+	PFN_FORCED_IDLE_REQUEST			pfnForcedIdleRequest;
+	PFN_FORCED_IDLE_CANCEL_REQUEST	pfnForcedIdleCancelRequest;
+	PFN_DUST_COUNT_REQUEST			pfnDustCountRequest;
+	IMG_HANDLE						hSysData;
+	IMG_HANDLE						hDevCookie;
+	PVRSRV_DEV_POWER_STATE 			eDefaultPowerState;
+	PVRSRV_DEV_POWER_STATE 			eCurrentPowerState;
+};
+
+/*!
+  Typedef for a pointer to a function that will be called for re-acquiring
+  device powerlock after releasing it temporarily for some timeout period
+  in function PVRSRVDeviceIdleRequestKM
+ */
+typedef PVRSRV_ERROR (*PFN_POWER_LOCK_ACQUIRE) (PCPVRSRV_DEVICE_NODE psDevNode);
+
+static inline IMG_UINT64 PVRSRVProcessStatsGetTimeNs(void)
+{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	return OSClockns64();
+#else
+	return 0;
+#endif
+}
+
+static inline IMG_UINT64 PVRSRVProcessStatsGetTimeUs(void)
+{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	return OSClockus();
+#else
+	return 0;
+#endif
+}
+
+/*!
+******************************************************************************
+
+ @Function	_IsSystemStatePowered
+
+ @Description	Tests whether a given system state represents powered-up.
+
+ @Input		eSystemPowerState : a system power state
+
+ @Return	IMG_BOOL
+
+******************************************************************************/
+static IMG_BOOL _IsSystemStatePowered(PVRSRV_SYS_POWER_STATE eSystemPowerState)
+{
+	return (eSystemPowerState == PVRSRV_SYS_POWER_STATE_ON);
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVPowerLock
+
+ @Description	Obtain the mutex for power transitions. Only allowed when
+                system power is on.
+
+ @Return	PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPowerLock(PCPVRSRV_DEVICE_NODE psDeviceNode)
+{
+	OSLockAcquire(psDeviceNode->hPowerLock);
+
+	/* Only allow to take powerlock when the system power is on */
+	if (_IsSystemStatePowered(psDeviceNode->eCurrentSysPowerState))
+	{
+		return PVRSRV_OK;
+	}
+
+	OSLockRelease(psDeviceNode->hPowerLock);
+
+	return PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVPowerTryLock
+
+ @Description	Try to obtain the mutex for power transitions. Only allowed when
+		system power is on.
+
+ @Return	PVRSRV_ERROR_RETRY or PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF or
+		PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPowerTryLock(PCPVRSRV_DEVICE_NODE psDeviceNode)
+{
+	if (!(OSTryLockAcquire(psDeviceNode->hPowerLock)))
+	{
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	/* Only allow to take powerlock when the system power is on */
+	if (_IsSystemStatePowered(psDeviceNode->eCurrentSysPowerState))
+	{
+		/* System is powered ON, return OK */
+		return PVRSRV_OK;
+	}
+	else
+	{
+		/* System is powered OFF, release the lock and return error */
+		OSLockRelease(psDeviceNode->hPowerLock);
+		return PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF;
+	}
+}
+
+/*!
+******************************************************************************
+
+ @Function     _PVRSRVForcedPowerLock
+
+ @Description  Obtain the mutex for power transitions regardless of system
+               power state
+
+ @Return       Always returns PVRSRV_OK. Function prototype required same as
+               PFN_POWER_LOCK_ACQUIRE
+
+******************************************************************************/
+static PVRSRV_ERROR _PVRSRVForcedPowerLock(PCPVRSRV_DEVICE_NODE psDeviceNode)
+{
+	OSLockAcquire(psDeviceNode->hPowerLock);
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVPowerUnlock
+
+ @Description	Release the mutex for power transitions
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+void PVRSRVPowerUnlock(PCPVRSRV_DEVICE_NODE psDeviceNode)
+{
+	OSLockRelease(psDeviceNode->hPowerLock);
+}
+
+IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice)
+{
+	return (psPowerDevice->eDefaultPowerState == PVRSRV_DEV_POWER_STATE_OFF);
+}
+
+/*!
+******************************************************************************
+
+ @Function      PVRSRVSetDeviceDefaultPowerState
+
+ @Description   Set the default device power state to eNewPowerState
+
+ @Input		    psDeviceNode : Device node
+ @Input         eNewPowerState : New power state
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(PCPVRSRV_DEVICE_NODE psDeviceNode,
+					PVRSRV_DEV_POWER_STATE eNewPowerState)
+{
+	PVRSRV_POWER_DEV *psPowerDevice;
+
+	psPowerDevice = psDeviceNode->psPowerDev;
+	if (psPowerDevice == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_DEVICE;
+	}
+
+	psPowerDevice->eDefaultPowerState = eNewPowerState;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function    _PVRSRVDeviceIdleRequestKM
+
+ @Description Perform device-specific processing required to force the device
+              idle. The device power-lock might be temporarily released (and
+              again re-acquired) during the course of this call, hence to
+              maintain lock-ordering power-lock should be the last acquired
+              lock before calling this function
+
+ @Input       psDeviceNode         : Device node
+
+ @Input       pfnIsDefaultStateOff : When specified, the idle request is only
+                                     processed if this function passes.
+
+ @Input       bDeviceOffPermitted  : IMG_TRUE if the transition should not fail
+                                       if device off
+                                     IMG_FALSE if the transition should fail if
+                                       device off
+
+ @Input       pfnPowerLockAcquire  : Function to re-acquire power-lock in-case
+                                     it was necessary to release it.
+
+ @Return      PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED
+                                     When re-acquisition of power-lock failed.
+                                     This error NEEDS EXPLICIT HANDLING at call
+                                     site as it signifies the caller needs to
+                                     AVOID calling PVRSRVPowerUnlock, since
+                                     power-lock is no longer "possessed" by
+                                     this context.
+
+              PVRSRV_OK              When idle request succeeded.
+              PVRSRV_ERROR           Other system errors.
+
+******************************************************************************/
+static PVRSRV_ERROR _PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode,
+					PFN_SYS_DEV_IS_DEFAULT_STATE_OFF    pfnIsDefaultStateOff,
+					IMG_BOOL                            bDeviceOffPermitted,
+					PFN_POWER_LOCK_ACQUIRE              pfnPowerLockAcquire)
+{
+	PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev;
+	PVRSRV_ERROR eError;
+
+	if ((psPowerDev && psPowerDev->pfnForcedIdleRequest) &&
+	    (!pfnIsDefaultStateOff || pfnIsDefaultStateOff(psPowerDev)))
+	{
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError = psPowerDev->pfnForcedIdleRequest(psPowerDev->hDevCookie,
+			                                          bDeviceOffPermitted);
+			if (eError == PVRSRV_OK)
+			{
+				/* Idle request was successful */
+				break;
+			}
+			else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+			{
+				PVRSRV_ERROR eErrPwrLockAcq;
+				/* FW denied idle request */
+				PVRSRVPowerUnlock(psDeviceNode);
+
+				OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+
+				eErrPwrLockAcq = pfnPowerLockAcquire(psDeviceNode);
+				if (eErrPwrLockAcq != PVRSRV_OK)
+				{
+					/* We only understand PVRSRV_ERROR_RETRY, so assert on others.
+					 * Moreover, we've ended-up releasing the power-lock which was
+					 * originally "held" by caller before calling this function -
+					 * since this needs vigilant handling at call-site, we pass
+					 * back an explicit error, for caller(s) to "avoid" calling
+					 * PVRSRVPowerUnlock */
+					PVR_ASSERT(eErrPwrLockAcq == PVRSRV_ERROR_RETRY);
+					PVR_DPF((PVR_DBG_ERROR, "%s: Failed to re-acquire power-lock "
+					         "(%s) after releasing it for a time-out",
+							 __func__, PVRSRVGetErrorString(eErrPwrLockAcq)));
+					return PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED;
+				}
+			}
+			else
+			{
+				/* some other error occurred, return failure */
+				break;
+			}
+		} END_LOOP_UNTIL_TIMEOUT();
+	}
+	else
+	{
+		return PVRSRV_OK;
+	}
+
+	return eError;
+}
+
+/*
+ * Wrapper function helps limiting calling complexity of supplying additional
+ * PFN_POWER_LOCK_ACQUIRE argument (required by _PVRSRVDeviceIdleRequestKM)
+ */
+inline PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode,
+					PFN_SYS_DEV_IS_DEFAULT_STATE_OFF      pfnIsDefaultStateOff,
+					IMG_BOOL                              bDeviceOffPermitted)
+{
+	return _PVRSRVDeviceIdleRequestKM(psDeviceNode,
+	                                  pfnIsDefaultStateOff,
+									  bDeviceOffPermitted,
+									  PVRSRVPowerLock);
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDeviceIdleCancelRequestKM
+
+ @Description
+
+ Perform device-specific processing required to cancel the forced idle state on the device, returning to normal operation.
+
+ @Input		psDeviceNode : Device node
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+	PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev;
+
+	if (psPowerDev && psPowerDev->pfnForcedIdleCancelRequest)
+	{
+		return psPowerDev->pfnForcedIdleCancelRequest(psPowerDev->hDevCookie);
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDevicePrePowerStateKM
+
+ @Description
+
+ Perform device-specific processing required before a power transition
+
+ @Input		psPowerDevice : Power device
+ @Input		eNewPowerState : New power state
+ @Input		bForced : TRUE if the transition should not fail (e.g. OS request)
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+static
+PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(PVRSRV_POWER_DEV		*psPowerDevice,
+										 PVRSRV_DEV_POWER_STATE	eNewPowerState,
+										 IMG_BOOL				bForced)
+{
+	IMG_UINT64 ui64SysTimer1 = 0;
+	IMG_UINT64 ui64SysTimer2 = 0;
+	IMG_UINT64 ui64DevTimer1 = 0;
+	IMG_UINT64 ui64DevTimer2 = 0;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(eNewPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+
+	if (psPowerDevice->pfnDevicePrePower != NULL)
+	{
+		ui64DevTimer1 = PVRSRVProcessStatsGetTimeNs();
+
+		/* Call the device's power callback. */
+		eError = psPowerDevice->pfnDevicePrePower(psPowerDevice->hDevCookie,
+												  eNewPowerState,
+												  psPowerDevice->eCurrentPowerState,
+												  bForced);
+
+		ui64DevTimer2 = PVRSRVProcessStatsGetTimeNs();
+
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	/* Do any required system-layer processing. */
+	if (psPowerDevice->pfnSystemPrePower != NULL)
+	{
+		ui64SysTimer1 = PVRSRVProcessStatsGetTimeNs();
+
+		eError = psPowerDevice->pfnSystemPrePower(psPowerDevice->hSysData,
+												  eNewPowerState,
+												  psPowerDevice->eCurrentPowerState,
+												  bForced);
+
+		ui64SysTimer2 = PVRSRVProcessStatsGetTimeNs();
+
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	InsertPowerTimeStatistic(ui64SysTimer1, ui64SysTimer2,
+							 ui64DevTimer1, ui64DevTimer2,
+							 bForced,
+							 eNewPowerState == PVRSRV_DEV_POWER_STATE_ON,
+							 IMG_TRUE);
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDevicePostPowerStateKM
+
+ @Description
+
+ Perform device-specific processing required after a power transition
+
+ @Input		psPowerDevice : Power device
+ @Input		eNewPowerState : New power state
+ @Input		bForced : TRUE if the transition should not fail (e.g. OS request)
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+static
+PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(PVRSRV_POWER_DEV			*psPowerDevice,
+										  PVRSRV_DEV_POWER_STATE	eNewPowerState,
+										  IMG_BOOL					bForced)
+{
+	IMG_UINT64 ui64SysTimer1 = 0;
+	IMG_UINT64 ui64SysTimer2 = 0;
+	IMG_UINT64 ui64DevTimer1 = 0;
+	IMG_UINT64 ui64DevTimer2 = 0;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(eNewPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+
+	/* Do any required system-layer processing. */
+	if (psPowerDevice->pfnSystemPostPower != NULL)
+	{
+		ui64SysTimer1 = PVRSRVProcessStatsGetTimeNs();
+
+		eError = psPowerDevice->pfnSystemPostPower(psPowerDevice->hSysData,
+												   eNewPowerState,
+												   psPowerDevice->eCurrentPowerState,
+												   bForced);
+
+		ui64SysTimer2 = PVRSRVProcessStatsGetTimeNs();
+
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	if (psPowerDevice->pfnDevicePostPower != NULL)
+	{
+		ui64DevTimer1 = PVRSRVProcessStatsGetTimeNs();
+
+		/* Call the device's power callback. */
+		eError = psPowerDevice->pfnDevicePostPower(psPowerDevice->hDevCookie,
+												   eNewPowerState,
+												   psPowerDevice->eCurrentPowerState,
+												   bForced);
+
+		ui64DevTimer2 = PVRSRVProcessStatsGetTimeNs();
+
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	InsertPowerTimeStatistic(ui64SysTimer1, ui64SysTimer2,
+							 ui64DevTimer1, ui64DevTimer2,
+							 bForced,
+							 eNewPowerState == PVRSRV_DEV_POWER_STATE_ON,
+							 IMG_FALSE);
+
+	psPowerDevice->eCurrentPowerState = eNewPowerState;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVSetDevicePowerStateKM
+
+ @Description	Set the Device into a new state
+
+ @Input		psDeviceNode : Device node
+ @Input		eNewPowerState : New power state
+ @Input		bForced : TRUE if the transition should not fail (e.g. OS request)
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(PPVRSRV_DEVICE_NODE psDeviceNode,
+										 PVRSRV_DEV_POWER_STATE	eNewPowerState,
+										 IMG_BOOL				bForced)
+{
+	PVRSRV_ERROR	eError;
+	PVRSRV_DATA*    psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_POWER_DEV *psPowerDevice;
+
+	psPowerDevice = psDeviceNode->psPowerDev;
+	if (!psPowerDevice)
+	{
+		return PVRSRV_OK;
+	}
+
+	if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT)
+	{
+		eNewPowerState = psPowerDevice->eDefaultPowerState;
+	}
+
+	if (psPowerDevice->eCurrentPowerState != eNewPowerState)
+	{
+		eError = PVRSRVDevicePrePowerStateKM(psPowerDevice,
+											 eNewPowerState,
+											 bForced);
+		if (eError != PVRSRV_OK)
+		{
+			goto ErrorExit;
+		}
+
+		eError = PVRSRVDevicePostPowerStateKM(psPowerDevice,
+											  eNewPowerState,
+											  bForced);
+		if (eError != PVRSRV_OK)
+		{
+			goto ErrorExit;
+		}
+
+		/* Signal Device Watchdog Thread about power mode change. */
+		if (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON)
+		{
+			psPVRSRVData->ui32DevicesWatchdogPwrTrans++;
+#if !defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+			if (psPVRSRVData->ui32DevicesWatchdogTimeout == DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT)
+#endif
+			{
+				eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj);
+				PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+			}
+		}
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+		else if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+		{
+			/* signal watchdog thread and give it a chance to switch to
+			 * longer / infinite wait time */
+			eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj);
+			PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+		}
+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
+	}
+
+	return PVRSRV_OK;
+
+ErrorExit:
+
+	if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE,
+				 "%s: Transition to %d was denied, Forced=%d",
+				 __func__, eNewPowerState, bForced));
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s: Transition to %d FAILED (%s)",
+				 __func__, eNewPowerState, PVRSRVGetErrorString(eError)));
+	}
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function     PVRSRVSetDeviceSystemPowerState
+@Description  Set the device into a new power state based on the systems power
+              state
+@Input        psDeviceNode          Device node
+@Input        eNewSysPowerState  New system power state
+@Return       PVRSRV_ERROR       PVRSRV_OK on success or an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode,
+											 PVRSRV_SYS_POWER_STATE eNewSysPowerState)
+{
+	PVRSRV_ERROR	eError;
+	IMG_UINT        uiStage = 0;
+
+	PVRSRV_DEV_POWER_STATE eNewDevicePowerState =
+	  _IsSystemStatePowered(eNewSysPowerState)? PVRSRV_DEV_POWER_STATE_DEFAULT : PVRSRV_DEV_POWER_STATE_OFF;
+
+	/* If setting devices to default state, force idle all devices whose default state is off */
+	PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff =
+	  (eNewDevicePowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ? PVRSRVDeviceIsDefaultStateOFF : NULL;
+
+	/* require a proper power state */
+	if (eNewSysPowerState == PVRSRV_SYS_POWER_STATE_Unspecified)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Prevent simultaneous SetPowerStateKM calls */
+	_PVRSRVForcedPowerLock(psDeviceNode);
+
+	/* no power transition requested, so do nothing */
+	if (eNewSysPowerState == psDeviceNode->eCurrentSysPowerState)
+	{
+		PVRSRVPowerUnlock(psDeviceNode);
+		return PVRSRV_OK;
+	}
+
+	eError = _PVRSRVDeviceIdleRequestKM(psDeviceNode, pfnIsDefaultStateOff,
+	                                    IMG_TRUE, _PVRSRVForcedPowerLock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle request failure (%s)",
+		                        __func__, PVRSRVGetErrorString(eError)));
+		uiStage++;
+		goto ErrorExit;
+	}
+
+	eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, eNewDevicePowerState,
+										 IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		uiStage++;
+		goto ErrorExit;
+	}
+
+	psDeviceNode->eCurrentSysPowerState = eNewSysPowerState;
+
+	PVRSRVPowerUnlock(psDeviceNode);
+
+	return PVRSRV_OK;
+
+ErrorExit:
+	PVRSRVPowerUnlock(psDeviceNode);
+
+	PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Transition from %d to %d FAILED (%s) at stage %u. Dumping debug info.",
+			 __func__, psDeviceNode->eCurrentSysPowerState, eNewSysPowerState,
+			 PVRSRVGetErrorString(eError), uiStage));
+
+	PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+
+	return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode,
+									   PFN_PRE_POWER				pfnDevicePrePower,
+									   PFN_POST_POWER				pfnDevicePostPower,
+									   PFN_SYS_DEV_PRE_POWER		pfnSystemPrePower,
+									   PFN_SYS_DEV_POST_POWER		pfnSystemPostPower,
+									   PFN_PRE_CLOCKSPEED_CHANGE	pfnPreClockSpeedChange,
+									   PFN_POST_CLOCKSPEED_CHANGE	pfnPostClockSpeedChange,
+									   PFN_FORCED_IDLE_REQUEST	pfnForcedIdleRequest,
+									   PFN_FORCED_IDLE_CANCEL_REQUEST	pfnForcedIdleCancelRequest,
+									   PFN_DUST_COUNT_REQUEST	pfnDustCountRequest,
+									   IMG_HANDLE					hDevCookie,
+									   PVRSRV_DEV_POWER_STATE		eCurrentPowerState,
+									   PVRSRV_DEV_POWER_STATE		eDefaultPowerState)
+{
+	PVRSRV_POWER_DEV *psPowerDevice;
+
+	PVR_ASSERT(!psDeviceNode->psPowerDev);
+
+	PVR_ASSERT(eCurrentPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+	PVR_ASSERT(eDefaultPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+
+	psPowerDevice = OSAllocMem(sizeof(PVRSRV_POWER_DEV));
+	if (psPowerDevice == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to alloc PVRSRV_POWER_DEV", __func__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* setup device for power manager */
+	psPowerDevice->pfnDevicePrePower = pfnDevicePrePower;
+	psPowerDevice->pfnDevicePostPower = pfnDevicePostPower;
+	psPowerDevice->pfnSystemPrePower = pfnSystemPrePower;
+	psPowerDevice->pfnSystemPostPower = pfnSystemPostPower;
+	psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange;
+	psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange;
+	psPowerDevice->pfnForcedIdleRequest = pfnForcedIdleRequest;
+	psPowerDevice->pfnForcedIdleCancelRequest = pfnForcedIdleCancelRequest;
+	psPowerDevice->pfnDustCountRequest = pfnDustCountRequest;
+	psPowerDevice->hSysData = psDeviceNode->psDevConfig->hSysData;
+	psPowerDevice->hDevCookie = hDevCookie;
+	psPowerDevice->eCurrentPowerState = eCurrentPowerState;
+	psPowerDevice->eDefaultPowerState = eDefaultPowerState;
+
+	psDeviceNode->psPowerDev = psPowerDevice;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVRemovePowerDevice
+
+ @Description
+
+ Removes device from power management register. Device is located by Device Index
+
+ @Input		psDeviceNode : Device node
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRemovePowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+	if (psDeviceNode->psPowerDev)
+	{
+		OSFreeMem(psDeviceNode->psPowerDev);
+		psDeviceNode->psPowerDev = NULL;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVGetDevicePowerState
+
+ @Description
+
+	Return the device power state
+
+ @Input		psDeviceNode : Device node
+ @Output	psPowerState : Current power state
+
+ @Return	PVRSRV_ERROR_UNKNOWN_POWER_STATE if device could not be found. PVRSRV_OK otherwise.
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVGetDevicePowerState(PCPVRSRV_DEVICE_NODE psDeviceNode,
+									   PPVRSRV_DEV_POWER_STATE pePowerState)
+{
+	PVRSRV_POWER_DEV *psPowerDevice;
+
+	psPowerDevice = psDeviceNode->psPowerDev;
+	if (psPowerDevice == NULL)
+	{
+		return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
+	}
+
+	*pePowerState = psPowerDevice->eCurrentPowerState;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVIsDevicePowered
+
+ @Description
+
+	Whether the device is powered, for the purposes of lockup detection.
+
+ @Input		psDeviceNode : Device node
+
+ @Return	IMG_BOOL
+
+******************************************************************************/
+IMG_BOOL PVRSRVIsDevicePowered(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+	PVRSRV_DEV_POWER_STATE ePowerState;
+
+	if (OSLockIsLocked(psDeviceNode->hPowerLock))
+	{
+		return IMG_FALSE;
+	}
+
+	if (PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState) != PVRSRV_OK)
+	{
+		return IMG_FALSE;
+	}
+
+	return (ePowerState == PVRSRV_DEV_POWER_STATE_ON);
+}
+
+/**************************************************************************/ /*!
+@Function       PVRSRVDevicePreClockSpeedChange
+
+@Description    This function is called before a voltage/frequency change is
+                made to the GPU HW. It informs the host driver of the intention
+                to make a DVFS change. If allows the host driver to idle
+                the GPU and begin a hold off period from starting new work
+                on the GPU.
+                When this call succeeds the caller *must* call
+                PVRSRVDevicePostClockSpeedChange() to end the hold off period
+                to allow new work to be submitted to the GPU.
+
+                Called form system layer or OS layer implementation that
+                is responsible for triggering a GPU DVFS transition.
+
+@Input          psDeviceNode pointer to the device affected by DVFS transition.
+@Input          bIdleDevice  when True, the driver will wait for the GPU to
+                             reach an idle state before the call returns.
+@Input          pvInfo       unused
+
+@Return         PVRSRV_OK    on success, power lock acquired and held on exit,
+                             GPU idle.
+                PVRSRV_ERROR on failure, power lock not held on exit, do not
+                             call PVRSRVDevicePostClockSpeedChange().
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVDevicePreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+                                IMG_BOOL            bIdleDevice,
+                                void*               pvInfo)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	PVRSRV_POWER_DEV	*psPowerDevice;
+	IMG_UINT64			ui64StartTimer, ui64StopTimer;
+
+	PVR_UNREFERENCED_PARAMETER(pvInfo);
+
+	ui64StartTimer = PVRSRVProcessStatsGetTimeUs();
+
+	/* This mutex is released in PVRSRVDevicePostClockSpeedChange. */
+	eError = PVRSRVPowerLock(psDeviceNode);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: failed to acquire lock (%s)",
+				 __func__, PVRSRVGetErrorString(eError)));
+		return eError;
+	}
+
+	psPowerDevice = psDeviceNode->psPowerDev;
+	if (psPowerDevice)
+	{
+		if ((psPowerDevice->eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice)
+		{
+			/* We can change the clock speed if the device is either IDLE or OFF */
+			eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE);
+
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle request failed (%s)",
+				                        __func__, PVRSRVGetErrorString(eError)));
+				if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED)
+				{
+					PVRSRVPowerUnlock(psDeviceNode);
+				}
+				return eError;
+			}
+		}
+
+		eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->hDevCookie,
+		                                               psPowerDevice->eCurrentPowerState);
+	}
+
+	ui64StopTimer = PVRSRVProcessStatsGetTimeUs();
+
+	InsertPowerTimeStatisticExtraPre(ui64StartTimer, ui64StopTimer);
+
+	return eError;
+}
+
+/**************************************************************************/ /*!
+@Function       PVRSRVDevicePostClockSpeedChange
+
+@Description    This function is called after a voltage/frequency change has
+                been made to the GPU HW following a call to
+                PVRSRVDevicePreClockSpeedChange().
+                Before calling this function the caller must ensure the system
+                data RGX_DATA->RGX_TIMING_INFORMATION->ui32CoreClockSpeed has
+                been updated with the new frequency set, measured in Hz.
+                The function informs the host driver that the DVFS change has
+                completed. The driver will end the work hold off period, cancel
+                the device idle period and update its time data records.
+                When this call returns work submissions are unblocked and
+                are submitted to the GPU as normal.
+                This function *must* not be called if the preceding call to
+                PVRSRVDevicePreClockSpeedChange() failed.
+
+                Called form system layer or OS layer implementation that
+                is responsible for triggering a GPU DVFS transition.
+
+@Input          psDeviceNode pointer to the device affected by DVFS transition.
+@Input          bIdleDevice  when True, the driver will cancel the GPU
+                             device idle state before the call returns. Value
+                             given must match that used in the call to
+                             PVRSRVDevicePreClockSpeedChange() otherwise
+                             undefined behaviour will result.
+@Input          pvInfo       unused
+
+@Return         void         power lock released, no longer held on exit.
+*/ /**************************************************************************/
+void
+PVRSRVDevicePostClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+                                 IMG_BOOL            bIdleDevice,
+                                 void*               pvInfo)
+{
+	PVRSRV_ERROR		eError;
+	PVRSRV_POWER_DEV	*psPowerDevice;
+	IMG_UINT64			ui64StartTimer, ui64StopTimer;
+
+	PVR_UNREFERENCED_PARAMETER(pvInfo);
+
+	ui64StartTimer = PVRSRVProcessStatsGetTimeUs();
+
+	psPowerDevice = psDeviceNode->psPowerDev;
+	if (psPowerDevice)
+	{
+		eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->hDevCookie,
+														psPowerDevice->eCurrentPowerState);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)",
+					 __func__, psDeviceNode, PVRSRVGetErrorString(eError)));
+		}
+
+		if ((psPowerDevice->eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice)
+		{
+			eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode);
+
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Failed to cancel forced IDLE.", __func__));
+			}
+		}
+	}
+
+	/* This mutex was acquired in PVRSRVDevicePreClockSpeedChange. */
+	PVRSRVPowerUnlock(psDeviceNode);
+
+	OSAtomicIncrement(&psDeviceNode->iNumClockSpeedChanges);
+
+	ui64StopTimer = PVRSRVProcessStatsGetTimeUs();
+
+	InsertPowerTimeStatisticExtraPost(ui64StartTimer, ui64StopTimer);
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDeviceDustCountChange
+
+ @Description
+
+	Request from system layer that a dust count change is requested.
+
+ @Input		psDeviceNode : Device node
+ @Input		ui32DustCount : dust count to be set
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDeviceDustCountChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+						IMG_UINT32	ui32DustCount)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	PVRSRV_POWER_DEV	*psPowerDevice;
+
+	psPowerDevice = psDeviceNode->psPowerDev;
+	if (psPowerDevice)
+	{
+		PVRSRV_DEV_POWER_STATE eDevicePowerState;
+
+		eError = PVRSRVPowerLock(psDeviceNode);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)",
+					 __func__, PVRSRVGetErrorString(eError)));
+			return eError;
+		}
+
+		eDevicePowerState = psPowerDevice->eCurrentPowerState;
+		if (eDevicePowerState == PVRSRV_DEV_POWER_STATE_ON)
+		{
+			/* Device must be idle to change dust count */
+			eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_FALSE);
+
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle request failure (%s)",
+				                        __func__, PVRSRVGetErrorString(eError)));
+				if (eError == PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED)
+				{
+					goto ErrorExit;
+				}
+				goto ErrorUnlockAndExit;
+			}
+		}
+
+		if (psPowerDevice->pfnDustCountRequest != NULL)
+		{
+			PVRSRV_ERROR	eError2 = psPowerDevice->pfnDustCountRequest(psPowerDevice->hDevCookie, ui32DustCount);
+
+			if (eError2 != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)",
+						 __func__, psDeviceNode,
+						 PVRSRVGetErrorString(eError)));
+			}
+		}
+
+		if (eDevicePowerState == PVRSRV_DEV_POWER_STATE_ON)
+		{
+			eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Failed to cancel forced IDLE.", __func__));
+				goto ErrorUnlockAndExit;
+			}
+		}
+
+		PVRSRVPowerUnlock(psDeviceNode);
+	}
+
+	return eError;
+
+ErrorUnlockAndExit:
+	PVRSRVPowerUnlock(psDeviceNode);
+ErrorExit:
+	return eError;
+}
+
+/******************************************************************************
+ End of file (power.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/power.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/power.h
new file mode 100644
index 0000000..4c8e69b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/power.h
@@ -0,0 +1,123 @@
+/*************************************************************************/ /*!
+@File
+@Title          Power Management Functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main APIs for power management functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef POWER_H
+#define POWER_H
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_device.h"
+#include "pvrsrv_error.h"
+#include "servicesext.h"
+#include "opaque_types.h"
+
+/*!
+ *****************************************************************************
+ *	Power management
+ *****************************************************************************/
+
+typedef struct _PVRSRV_POWER_DEV_TAG_ PVRSRV_POWER_DEV;
+
+typedef IMG_BOOL (*PFN_SYS_DEV_IS_DEFAULT_STATE_OFF)(PVRSRV_POWER_DEV *psPowerDevice);
+
+
+PVRSRV_ERROR PVRSRVPowerLock(PCPVRSRV_DEVICE_NODE psDeviceNode);
+void PVRSRVPowerUnlock(PCPVRSRV_DEVICE_NODE psDeviceNode);
+PVRSRV_ERROR PVRSRVPowerTryLock(PCPVRSRV_DEVICE_NODE psDeviceNode);
+IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice);
+
+
+PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(PPVRSRV_DEVICE_NODE	psDeviceNode,
+										 PVRSRV_DEV_POWER_STATE	eNewPowerState,
+										 IMG_BOOL				bForced);
+
+PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode,
+											 PVRSRV_SYS_POWER_STATE ePVRState);
+
+PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(PCPVRSRV_DEVICE_NODE psDeviceNode,
+					PVRSRV_DEV_POWER_STATE eNewPowerState);
+
+/* Type PFN_DC_REGISTER_POWER */
+PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE		psDeviceNode,
+									   PFN_PRE_POWER				pfnDevicePrePower,
+									   PFN_POST_POWER				pfnDevicePostPower,
+									   PFN_SYS_DEV_PRE_POWER		pfnSystemPrePower,
+									   PFN_SYS_DEV_POST_POWER		pfnSystemPostPower,
+									   PFN_PRE_CLOCKSPEED_CHANGE	pfnPreClockSpeedChange,
+									   PFN_POST_CLOCKSPEED_CHANGE	pfnPostClockSpeedChange,
+									   PFN_FORCED_IDLE_REQUEST		pfnForcedIdleRequest,
+									   PFN_FORCED_IDLE_CANCEL_REQUEST	pfnForcedIdleCancelRequest,
+									   PFN_DUST_COUNT_REQUEST	pfnDustCountRequest,
+									   IMG_HANDLE					hDevCookie,
+									   PVRSRV_DEV_POWER_STATE		eCurrentPowerState,
+									   PVRSRV_DEV_POWER_STATE		eDefaultPowerState);
+
+PVRSRV_ERROR PVRSRVRemovePowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+PVRSRV_ERROR PVRSRVGetDevicePowerState(PCPVRSRV_DEVICE_NODE psDeviceNode,
+									   PPVRSRV_DEV_POWER_STATE pePowerState);
+
+IMG_BOOL PVRSRVIsDevicePowered(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+											 IMG_BOOL	bIdleDevice,
+											 void	*pvInfo);
+
+void PVRSRVDevicePostClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+									  IMG_BOOL		bIdleDevice,
+									  void		*pvInfo);
+
+PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode,
+					PFN_SYS_DEV_IS_DEFAULT_STATE_OFF	pfnCheckIdleReq,
+					IMG_BOOL				bDeviceOffPermitted);
+
+PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+PVRSRV_ERROR PVRSRVDeviceDustCountChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+						IMG_UINT32	ui32DustCount);
+
+
+#endif /* POWER_H */
+
+/******************************************************************************
+ End of file (power.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/powervr/buffer_attribs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/powervr/buffer_attribs.h
new file mode 100644
index 0000000..6e8d6b5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/powervr/buffer_attribs.h
@@ -0,0 +1,94 @@
+/*************************************************************************/ /*!
+@File
+@Title          3D types for use by IMG APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef _POWERVR_BUFFER_ATTRIBS_H_
+#define _POWERVR_BUFFER_ATTRIBS_H_
+
+/**
+ * Memory layouts
+ * Defines how pixels are laid out within a surface.
+ */
+typedef enum
+{
+	IMG_MEMLAYOUT_STRIDED,       /**< Resource is strided, one row at a time */
+	IMG_MEMLAYOUT_TWIDDLED,      /**< Resource is 2D twiddled, classic style */
+	IMG_MEMLAYOUT_3DTWIDDLED,    /**< Resource is 3D twiddled, classic style */
+	IMG_MEMLAYOUT_TILED,         /**< Resource is tiled, tiling config specified elsewhere. */
+	IMG_MEMLAYOUT_PAGETILED,     /**< Resource is pagetiled */
+} IMG_MEMLAYOUT;
+
+/**
+ * Rotation types
+ */
+typedef enum
+{
+	IMG_ROTATION_0DEG = 0,
+	IMG_ROTATION_90DEG = 1,
+	IMG_ROTATION_180DEG = 2,
+	IMG_ROTATION_270DEG = 3,
+	IMG_ROTATION_FLIP_Y = 4,
+
+	IMG_ROTATION_BAD = 255,
+} IMG_ROTATION;
+
+/**
+ * Alpha types.
+ */
+typedef enum
+{
+	IMG_COLOURSPACE_FORMAT_UNKNOWN                 =  0x0 << 16,
+	IMG_COLOURSPACE_FORMAT_LINEAR                  =  0x1 << 16,
+	IMG_COLOURSPACE_FORMAT_SRGB                    =  0x2 << 16,
+	IMG_COLOURSPACE_FORMAT_SCRGB                   =  0x3 << 16,
+	IMG_COLOURSPACE_FORMAT_SCRGB_LINEAR            =  0x4 << 16,
+	IMG_COLOURSPACE_FORMAT_DISPLAY_P3_LINEAR       =  0x5 << 16,
+	IMG_COLOURSPACE_FORMAT_DISPLAY_P3              =  0x6 << 16,
+	IMG_COLOURSPACE_FORMAT_BT2020_PQ               =  0x7 << 16,
+	IMG_COLOURSPACE_FORMAT_BT2020_LINEAR           =  0x8 << 16,
+	IMG_COLOURSPACE_FORMAT_DISPLAY_P3_PASSTHROUGH  =  0x9 << 16,
+	IMG_COLOURSPACE_FORMAT_MASK                    =  0xF << 16,
+} IMG_COLOURSPACE_FORMAT;
+
+/**
+ * Types of framebuffer compression
+ */
+typedef enum
+{
+	IMG_FB_COMPRESSION_NONE,
+	IMG_FB_COMPRESSION_DIRECT_8x8,
+	IMG_FB_COMPRESSION_DIRECT_16x4,
+	IMG_FB_COMPRESSION_DIRECT_32x2,
+	IMG_FB_COMPRESSION_INDIRECT_8x8,
+	IMG_FB_COMPRESSION_INDIRECT_16x4,
+	IMG_FB_COMPRESSION_INDIRECT_4TILE_8x8,
+	IMG_FB_COMPRESSION_INDIRECT_4TILE_16x4,
+	IMG_FB_COMPRESSION_DIRECT_LOSSY_8x8,
+	IMG_FB_COMPRESSION_DIRECT_LOSSY_16x4,
+	IMG_FB_COMPRESSION_DIRECT_LOSSY_32x2,
+} IMG_FB_COMPRESSION;
+
+
+#endif /* _POWERVR_BUFFER_ATTRIBS_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/powervr/mem_types.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/powervr/mem_types.h
new file mode 100644
index 0000000..8fe959e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/powervr/mem_types.h
@@ -0,0 +1,64 @@
+/*************************************************************************/ /*!
+@File
+@Title          Public types
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef POWERVR_TYPES_H
+#define POWERVR_TYPES_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#if defined(_MSC_VER)
+	#include "msvc_types.h"
+#elif defined(LINUX) && defined(__KERNEL__)
+	#include <linux/types.h>
+	#include <linux/compiler.h>
+#else
+	#include <stdint.h>
+	#define __iomem
+#endif
+
+typedef void *IMG_CPU_VIRTADDR;
+
+/* device virtual address */
+typedef struct
+{
+	uint64_t  uiAddr;
+#define IMG_CAST_TO_DEVVADDR_UINT(var)		(uint64_t)(var)
+
+} IMG_DEV_VIRTADDR;
+
+typedef uint64_t IMG_DEVMEM_SIZE_T;
+typedef uint64_t IMG_DEVMEM_ALIGN_T;
+typedef uint64_t IMG_DEVMEM_OFFSET_T;
+typedef uint32_t IMG_DEVMEM_LOG2ALIGN_T;
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/powervr/pvrsrv_sync_ext.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/powervr/pvrsrv_sync_ext.h
new file mode 100644
index 0000000..a15baab
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/powervr/pvrsrv_sync_ext.h
@@ -0,0 +1,57 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services external synchronisation interface header
+@Description    Defines synchronisation structures that are visible internally
+                and externally
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef POWERVR_SYNC_EXT_H
+#define POWERVR_SYNC_EXT_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*! Implementation independent types for passing fence/timeline to Services.
+ */
+typedef int32_t PVRSRV_FENCE;
+typedef int32_t PVRSRV_TIMELINE;
+
+/*! Maximum length for an annotation name string for fence sync model objects.
+ */
+#define PVRSRV_SYNC_NAME_LENGTH 32
+
+/* Macros for API callers using the fence sync model
+ */
+#define PVRSRV_NO_TIMELINE      ((PVRSRV_TIMELINE) -1)
+#define PVRSRV_NO_FENCE         ((PVRSRV_FENCE)    -1)
+#define PVRSRV_NO_FENCE_PTR     NULL
+#define PVRSRV_NO_TIMELINE_PTR  NULL
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/powervr/sync_external.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/powervr/sync_external.h
new file mode 100644
index 0000000..d7b906e3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/powervr/sync_external.h
@@ -0,0 +1,86 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services external synchronisation interface header
+@Description    Defines synchronisation structures that are visible internally
+                and externally
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_EXTERNAL_
+#define _SYNC_EXTERNAL_
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include <powervr/mem_types.h>
+
+/*!
+ * Maximum byte length for a sync prim name
+ */
+#define SYNC_MAX_CLASS_NAME_LEN 32
+
+/*!
+ * Number of sync primitives in operations
+ */
+#define	PVRSRV_MAX_SYNC_PRIMS 32
+
+typedef void* PVRSRV_CLIENT_SYNC_PRIM_HANDLE;
+typedef void* SYNC_BRIDGE_HANDLE;
+typedef struct SYNC_PRIM_CONTEXT *PSYNC_PRIM_CONTEXT;
+typedef struct _SYNC_OP_COOKIE_ *PSYNC_OP_COOKIE;
+
+/*!
+ * Client sync prim definition holding a CPU accessible address
+ *
+ *   Structure: #PVRSRV_CLIENT_SYNC_PRIM
+ *   Typedef: ::PVRSRV_CLIENT_SYNC_PRIM
+ */
+typedef struct PVRSRV_CLIENT_SYNC_PRIM
+{
+	volatile uint32_t __iomem *pui32LinAddr;	/*!< User pointer to the primitive */
+} PVRSRV_CLIENT_SYNC_PRIM;
+
+/*!
+ * Bundled information for a sync prim operation
+ *
+ *   Structure: #PVRSRV_CLIENT_SYNC_PRIM_OP
+ *   Typedef: ::PVRSRV_CLIENT_SYNC_PRIM_OP
+ */
+typedef struct PVRSRV_CLIENT_SYNC_PRIM_OP
+{
+	#define PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK	(1 << 0)
+	#define PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE	(1 << 1)
+	#define PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE (PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE | (1<<2))
+	uint32_t                    ui32Flags;       /*!< Operation flags: PVRSRV_CLIENT_SYNC_PRIM_OP_XXX */
+	PVRSRV_CLIENT_SYNC_PRIM    *psSync;          /*!< Pointer to the client sync primitive */
+	uint32_t                    ui32FenceValue;  /*!< The Fence value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK is set) */
+	uint32_t                    ui32UpdateValue; /*!< The Update value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE is set) */
+} PVRSRV_CLIENT_SYNC_PRIM_OP;
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* _SYNC_EXTERNAL_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/private_data.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/private_data.h
new file mode 100644
index 0000000..6d63f15
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/private_data.h
@@ -0,0 +1,53 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux private data structure
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__INCLUDED_PRIVATE_DATA_H_)
+#define __INCLUDED_PRIVATE_DATA_H_
+
+#include <linux/fs.h>
+
+#include "connection_server.h"
+
+CONNECTION_DATA *LinuxConnectionFromFile(struct file *pFile);
+struct file *LinuxFileFromConnection(CONNECTION_DATA *psConnection);
+
+#endif /* !defined(__INCLUDED_PRIVATE_DATA_H_) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/proc_stats.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/proc_stats.h
new file mode 100644
index 0000000..f352d66
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/proc_stats.h
@@ -0,0 +1,153 @@
+/*************************************************************************/ /*!
+@File
+@Title          Process and driver statistic definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PROC_STATS_H
+#define PROC_STATS_H
+
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#define PVRSRV_PROCESS_STAT_KMALLOC_STAT_KEY \
+	X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC, "MemoryUsageKMalloc") \
+	X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC_MAX, "MemoryUsageKMallocMax") \
+	X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC, "MemoryUsageVMalloc") \
+	X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC_MAX, "MemoryUsageVMallocMax")
+#else
+/* Empty strings if these stats are not logged */
+#define PVRSRV_PROCESS_STAT_KMALLOC_STAT_KEY \
+	X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC, "") \
+	X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC_MAX, "") \
+	X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC, "") \
+	X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC_MAX, "")
+#endif
+
+
+/* X-Macro for Process stat keys */
+#define PVRSRV_PROCESS_STAT_KEY \
+	X(PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS, "Connections") \
+	X(PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS, "ConnectionsMax") \
+	X(PVRSRV_PROCESS_STAT_TYPE_RC_OOMS, "RenderContextOutOfMemoryEvents") \
+	X(PVRSRV_PROCESS_STAT_TYPE_RC_PRS, "RenderContextPartialRenders") \
+	X(PVRSRV_PROCESS_STAT_TYPE_RC_GROWS, "RenderContextGrows") \
+	X(PVRSRV_PROCESS_STAT_TYPE_RC_PUSH_GROWS, "RenderContextPushGrows") \
+	X(PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES, "RenderContextTAStores") \
+	X(PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES, "RenderContext3DStores") \
+	X(PVRSRV_PROCESS_STAT_TYPE_RC_SH_STORES, "RenderContextSHStores") \
+	X(PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES, "RenderContextCDMStores") \
+	X(PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP, "ZSBufferRequestsByApp") \
+	X(PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW, "ZSBufferRequestsByFirmware") \
+	X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP, "FreeListGrowRequestsByApp") \
+	X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW, "FreeListGrowRequestsByFirmware") \
+	X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT, "FreeListInitialPages") \
+	X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES, "FreeListMaxPages") \
+	PVRSRV_PROCESS_STAT_KMALLOC_STAT_KEY \
+	X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, "MemoryUsageAllocPTMemoryUMA") \
+	X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA_MAX, "MemoryUsageAllocPTMemoryUMAMax") \
+	X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, "MemoryUsageVMapPTUMA") \
+	X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA_MAX, "MemoryUsageVMapPTUMAMax") \
+	X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, "MemoryUsageAllocPTMemoryLMA") \
+	X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA_MAX, "MemoryUsageAllocPTMemoryLMAMax") \
+	X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, "MemoryUsageIORemapPTLMA") \
+	X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA_MAX, "MemoryUsageIORemapPTLMAMax") \
+	X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, "MemoryUsageAllocGPUMemLMA") \
+	X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES_MAX, "MemoryUsageAllocGPUMemLMAMax") \
+	X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, "MemoryUsageAllocGPUMemUMA") \
+	X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES_MAX, "MemoryUsageAllocGPUMemUMAMax") \
+	X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, "MemoryUsageMappedGPUMemUMA/LMA") \
+	X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES_MAX, "MemoryUsageMappedGPUMemUMA/LMAMax") \
+	X(PVRSRV_PROCESS_STAT_TYPE_TOTAL, "MemoryUsageTotal") \
+	X(PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX, "MemoryUsageTotalMax")
+
+
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#define PVRSRV_DRIVER_STAT_KMALLOC_STAT_KEY \
+	X(PVRSRV_DRIVER_STAT_TYPE_KMALLOC, "MemoryUsageKMalloc") \
+	X(PVRSRV_DRIVER_STAT_TYPE_KMALLOC_MAX, "MemoryUsageKMallocMax") \
+	X(PVRSRV_DRIVER_STAT_TYPE_VMALLOC, "MemoryUsageVMalloc") \
+	X(PVRSRV_DRIVER_STAT_TYPE_VMALLOC_MAX, "MemoryUsageVMallocMax")
+#else
+/* Empty strings if these stats are not logged */
+#define PVRSRV_DRIVER_STAT_KMALLOC_STAT_KEY \
+	X(PVRSRV_DRIVER_STAT_TYPE_KMALLOC, "") \
+	X(PVRSRV_DRIVER_STAT_TYPE_KMALLOC_MAX, "") \
+	X(PVRSRV_DRIVER_STAT_TYPE_VMALLOC, "") \
+	X(PVRSRV_DRIVER_STAT_TYPE_VMALLOC_MAX, "")
+#endif
+
+/* X-Macro for Driver stat keys */
+#define PVRSRV_DRIVER_STAT_KEY \
+	PVRSRV_DRIVER_STAT_KMALLOC_STAT_KEY \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, "MemoryUsageAllocPTMemoryUMA") \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA_MAX, "MemoryUsageAllocPTMemoryUMAMax") \
+	X(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, "MemoryUsageVMapPTUMA") \
+	X(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA_MAX, "MemoryUsageVMapPTUMAMax") \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, "MemoryUsageAllocPTMemoryLMA") \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA_MAX, "MemoryUsageAllocPTMemoryLMAMax") \
+	X(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, "MemoryUsageIORemapPTLMA") \
+	X(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA_MAX, "MemoryUsageIORemapPTLMAMax") \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, "MemoryUsageAllocGPUMemLMA") \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA_MAX, "MemoryUsageAllocGPUMemLMAMax") \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, "MemoryUsageAllocGPUMemUMA") \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_MAX, "MemoryUsageAllocGPUMemUMAMax") \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, "MemoryUsageAllocGPUMemUMAPool") \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL_MAX, "MemoryUsageAllocGPUMemUMAPoolMax") \
+	X(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, "MemoryUsageMappedGPUMemUMA/LMA") \
+	X(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA_MAX, "MemoryUsageMappedGPUMemUMA/LMAMax")
+
+
+typedef enum {
+#define X(stat_type, stat_str) stat_type,
+	PVRSRV_PROCESS_STAT_KEY
+#undef X
+	PVRSRV_PROCESS_STAT_TYPE_COUNT
+}PVRSRV_PROCESS_STAT_TYPE;
+
+typedef enum {
+#define X(stat_type, stat_str) stat_type,
+	PVRSRV_DRIVER_STAT_KEY
+#undef X
+	PVRSRV_DRIVER_STAT_TYPE_COUNT
+}PVRSRV_DRIVER_STAT_TYPE;
+
+extern const IMG_CHAR *const pszProcessStatType[];
+
+extern const IMG_CHAR *const pszDriverStatType[];
+
+#endif // PROC_STATS_H
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/process_stats.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/process_stats.c
new file mode 100644
index 0000000..7b5193a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/process_stats.c
@@ -0,0 +1,3316 @@
+/*************************************************************************/ /*!
+@File
+@Title          Process based statistics
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Manages a collection of statistics based around a process
+                and referenced via OS agnostic methods.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "lock.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "lists.h"
+#include "process_stats.h"
+#include "ri_server.h"
+#include "hash.h"
+#include "connection_server.h"
+#include "pvrsrv.h"
+#include "proc_stats.h"
+#include "htbuffer.h"
+#include "pvr_ricommon.h"
+
+ /* Enabled OS Statistics entries: DEBUGFS on Linux, undefined for other OSs */
+#if defined(LINUX) && ( \
+	defined(PVRSRV_ENABLE_PERPID_STATS) || \
+	defined(PVRSRV_ENABLE_CACHEOP_STATS) || \
+	defined(PVRSRV_ENABLE_MEMORY_STATS) || \
+	defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) )
+#define ENABLE_DEBUGFS_PIDS
+#endif
+
+/*
+ *  Maximum history of process statistics that will be kept.
+ */
+#define MAX_DEAD_LIST_PROCESSES  (10)
+
+/*
+ * Definition of all the strings used to format process based statistics.
+ */
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+/* Array of Process stat type defined using the X-Macro */
+#define X(stat_type, stat_str) stat_str,
+const IMG_CHAR *const pszProcessStatType[PVRSRV_PROCESS_STAT_TYPE_COUNT] = { PVRSRV_PROCESS_STAT_KEY };
+#undef X
+#endif
+
+/* Array of Driver stat type defined using the X-Macro */
+#define X(stat_type, stat_str) stat_str,
+const IMG_CHAR *const pszDriverStatType[PVRSRV_DRIVER_STAT_TYPE_COUNT] = { PVRSRV_DRIVER_STAT_KEY };
+#undef X
+
+/* structure used in hash table to track statistic entries */
+typedef struct{
+	size_t	   uiSizeInBytes;
+	IMG_PID	   uiPid;
+}_PVR_STATS_TRACKING_HASH_ENTRY;
+
+/* Function used internally to decrement tracked per-process statistic entries */
+static void _StatsDecrMemTrackedStat(_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry,
+                                    PVRSRV_MEM_ALLOC_TYPE eAllocType);
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+void RawProcessStatsPrintElements(void *pvFile,
+				  void *pvStatPtr,
+				  OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+#endif
+
+void  PowerStatsPrintElements(void *pvFile,
+							  void *pvStatPtr,
+							  OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+void  GlobalStatsPrintElements(void *pvFile,
+							   void *pvStatPtr,
+							   OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+/* Note: all of the accesses to the global stats should be protected
+ * by the gsGlobalStats.hGlobalStatsLock lock. This means all of the
+ * invocations of macros *_GLOBAL_STAT_VALUE. */
+
+/* Macro for fetching stat values */
+#define GET_GLOBAL_STAT_VALUE(idx) gsGlobalStats.ui32StatValue[idx]
+/*
+ *  Macros for updating stat values.
+ */
+#define UPDATE_MAX_VALUE(a,b)					do { if ((b) > (a)) {(a) = (b);} } while(0)
+#define INCREASE_STAT_VALUE(ptr,var,val)		do { (ptr)->i32StatValue[(var)] += (val); if ((ptr)->i32StatValue[(var)] > (ptr)->i32StatValue[(var##_MAX)]) {(ptr)->i32StatValue[(var##_MAX)] = (ptr)->i32StatValue[(var)];} } while(0)
+#define INCREASE_GLOBAL_STAT_VALUE(var,idx,val)		do { (var).ui32StatValue[(idx)] += (val); if ((var).ui32StatValue[(idx)] > (var).ui32StatValue[(idx##_MAX)]) {(var).ui32StatValue[(idx##_MAX)] = (var).ui32StatValue[(idx)];} } while(0)
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+/* Allow stats to go negative */
+#define DECREASE_STAT_VALUE(ptr,var,val)		do { (ptr)->i32StatValue[(var)] -= (val); } while(0)
+#define DECREASE_GLOBAL_STAT_VALUE(var,idx,val)		do { (var).ui32StatValue[(idx)] -= (val); } while(0)
+#else
+#define DECREASE_STAT_VALUE(ptr,var,val)		do { if ((ptr)->i32StatValue[(var)] >= (val)) { (ptr)->i32StatValue[(var)] -= (val); } else { (ptr)->i32StatValue[(var)] = 0; } } while(0)
+#define DECREASE_GLOBAL_STAT_VALUE(var,idx,val)		do { if ((var).ui32StatValue[(idx)] >= (val)) { (var).ui32StatValue[(idx)] -= (val); } else { (var).ui32StatValue[(idx)] = 0; } } while(0)
+#endif
+#define MAX_CACHEOP_STAT 16
+#define INCREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x+1) >= MAX_CACHEOP_STAT ? 0 : (x+1))
+#define DECREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x-1) < 0 ? (MAX_CACHEOP_STAT-1) : (x-1))
+
+/*
+ * Structures for holding statistics...
+ */
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+typedef struct _PVRSRV_MEM_ALLOC_REC_
+{
+	PVRSRV_MEM_ALLOC_TYPE           eAllocType;
+	IMG_UINT64                      ui64Key;
+	void*                           pvCpuVAddr;
+	IMG_CPU_PHYADDR	                sCpuPAddr;
+	size_t                          uiBytes;
+	void*                           pvPrivateData;
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+	void*                           pvAllocdFromFile;
+	IMG_UINT32                      ui32AllocdFromLine;
+#endif
+	IMG_PID	                        pid;
+	struct _PVRSRV_MEM_ALLOC_REC_*  psNext;
+	struct _PVRSRV_MEM_ALLOC_REC_** ppsThis;
+} PVRSRV_MEM_ALLOC_REC;
+#endif
+
+typedef struct _PVRSRV_PROCESS_STATS_ {
+
+	/* Linked list pointers */
+	struct _PVRSRV_PROCESS_STATS_* psNext;
+	struct _PVRSRV_PROCESS_STATS_* psPrev;
+
+	/* Create per process lock that need to be held
+	 * to edit of its members */
+	POS_LOCK                       hLock;
+
+	/* OS level process ID */
+	IMG_PID	                       pid;
+	IMG_UINT32                     ui32RefCount;
+
+	/* Stats... */
+	IMG_INT32                      i32StatValue[PVRSRV_PROCESS_STAT_TYPE_COUNT];
+	IMG_UINT32                     ui32StatAllocFlags;
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+	struct _CACHEOP_STRUCT_  {
+		PVRSRV_CACHE_OP        uiCacheOp;
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+		IMG_DEV_VIRTADDR       sDevVAddr;
+		IMG_DEV_PHYADDR        sDevPAddr;
+		RGXFWIF_DM             eFenceOpType;
+#endif
+		IMG_DEVMEM_SIZE_T      uiOffset;
+		IMG_DEVMEM_SIZE_T      uiSize;
+		IMG_UINT64             ui64ExecuteTime;
+		IMG_BOOL               bRangeBasedFlush;
+		IMG_BOOL               bUserModeFlush;
+		IMG_UINT32             ui32OpSeqNum;
+		IMG_BOOL               bIsFence;
+		IMG_PID                ownerPid;
+	}                              asCacheOp[MAX_CACHEOP_STAT];
+	IMG_INT32                      uiCacheOpWriteIndex;
+#endif
+
+	/* Other statistics structures */
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	PVRSRV_MEM_ALLOC_REC*          psMemoryRecords;
+#endif
+} PVRSRV_PROCESS_STATS;
+
+#if defined (ENABLE_DEBUGFS_PIDS)
+
+typedef struct _PVRSRV_OS_STAT_ENTRY_
+{
+	void *pvOSStatsFolderData;
+	void *pvOSProcessStatsEntryData;
+	void *pvOSMemStatsEntryData;
+	void *pvOSRIMemStatsEntryData;
+	void *pvOSCacheOpStatsEntryData;
+
+} PVRSRV_OS_STAT_ENTRY;
+
+static PVRSRV_OS_STAT_ENTRY gsLiveStatEntries;
+static PVRSRV_OS_STAT_ENTRY gsRetiredStatEntries;
+
+void  GenericStatsPrintElementsLive(void *pvFile,
+				    void *pvStatPtr,
+				    OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+void  GenericStatsPrintElementsRetired(void *pvFile,
+				       void *pvStatPtr,
+				       OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+/*
+ *  Functions for printing the information stored...
+ */
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+void  ProcessStatsPrintElements(void *pvFile,
+				PVRSRV_PROCESS_STATS *psProcessStats,
+				OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+void  MemStatsPrintElements(void *pvFile,
+			    PVRSRV_PROCESS_STATS *psProcessStats,
+			    OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+void  RIMemStatsPrintElements(void *pvFile,
+			      PVRSRV_PROCESS_STATS *psProcessStats,
+			      OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+void  CacheOpStatsPrintElements(void *pvFile,
+				PVRSRV_PROCESS_STATS *psProcessStats,
+				OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+#endif
+
+typedef void (PVRSRV_STATS_PRINT_ELEMENTS)(void *pvFile,
+					   PVRSRV_PROCESS_STATS *psProcessStats,
+					   OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+typedef enum
+{
+	PVRSRV_STAT_TYPE_PROCESS,
+	PVRSRV_STAT_TYPE_MEMORY,
+	PVRSRV_STAT_TYPE_RIMEMORY,
+	PVRSRV_STAT_TYPE_CACHEOP,
+	PVRSRV_STAT_TYPE_LAST
+} PVRSRV_STAT_TYPE;
+
+#define SEPARATOR_STR_LEN 166
+
+typedef struct _PVRSRV_STAT_PV_DATA_ {
+
+	PVRSRV_STAT_TYPE eStatType;
+	PVRSRV_STATS_PRINT_ELEMENTS* pfnStatsPrintElements;
+	IMG_CHAR szLiveStatsHeaderStr[SEPARATOR_STR_LEN + 1];
+	IMG_CHAR szRetiredStatsHeaderStr[SEPARATOR_STR_LEN + 1];
+
+} PVRSRV_STAT_PV_DATA;
+
+static PVRSRV_STAT_PV_DATA g_StatPvDataArr[] = {
+						{ PVRSRV_STAT_TYPE_PROCESS,  NULL, " Process"               , " Process"               },
+						{ PVRSRV_STAT_TYPE_MEMORY,   NULL, " Memory Allocation"     , " Memory Allocation"     },
+						{ PVRSRV_STAT_TYPE_RIMEMORY, NULL, " Resource Allocation"   , " Resource Allocation"   },
+						{ PVRSRV_STAT_TYPE_CACHEOP,  NULL, " Cache Maintenance Ops" , " Cache Maintenance Ops" }
+					      };
+
+#define GET_STAT_ENTRY_ID(STAT_TYPE) &g_StatPvDataArr[(STAT_TYPE)]
+
+/* Generic header strings */
+static const IMG_CHAR g_szLiveHeaderStr[]    = " Statistics for LIVE Processes ";
+static const IMG_CHAR g_szRetiredHeaderStr[] = " Statistics for RETIRED Processes ";
+
+/* Separator string used for separating stats for different PIDs */
+static IMG_CHAR g_szSeparatorStr[SEPARATOR_STR_LEN + 1] = "";
+
+static inline void
+_prepareStatsHeaderString(IMG_CHAR *pszStatsSpecificStr, const IMG_CHAR* pszGenericHeaderStr)
+{
+	IMG_UINT32 ui32NumSeparators;
+	IMG_CHAR szStatsHeaderFooterStr[75];
+
+	/* Prepare text content of the header in a local string */
+	strcpy(szStatsHeaderFooterStr, pszStatsSpecificStr);
+	strcat(szStatsHeaderFooterStr, pszGenericHeaderStr);
+
+	/* Write all '-' characters to the header string */
+	memset(pszStatsSpecificStr, '-', SEPARATOR_STR_LEN);
+	pszStatsSpecificStr[SEPARATOR_STR_LEN] = '\0';
+
+	/* Find the spot for text content in the header string */
+	ui32NumSeparators = (SEPARATOR_STR_LEN - strlen(szStatsHeaderFooterStr)) >> 1;
+
+	/* Finally write the text content */
+	OSSNPrintf(pszStatsSpecificStr + ui32NumSeparators,
+		   strlen(szStatsHeaderFooterStr),
+		   "%s", szStatsHeaderFooterStr);
+
+	/* Overwrite the '\0' character added by OSSNPrintf() */
+	if(strlen(szStatsHeaderFooterStr) > 0)
+	{
+		pszStatsSpecificStr[ui32NumSeparators + strlen(szStatsHeaderFooterStr) - 1] = ' ';
+	}
+}
+
+static inline void
+_prepareSeparatorStrings(void)
+{
+	IMG_UINT32 i;
+
+	/* Prepare header strings for each stat type */
+	for(i = 0; i < PVRSRV_STAT_TYPE_LAST; ++i)
+	{
+		_prepareStatsHeaderString(g_StatPvDataArr[i].szLiveStatsHeaderStr, g_szLiveHeaderStr);
+		_prepareStatsHeaderString(g_StatPvDataArr[i].szRetiredStatsHeaderStr, g_szRetiredHeaderStr);
+	}
+
+	/* Prepare separator string to separate stats for different PIDs */
+	memset(g_szSeparatorStr, '-', SEPARATOR_STR_LEN);
+	g_szSeparatorStr[SEPARATOR_STR_LEN] = '\0';
+}
+
+static inline void
+_prepareStatsPrivateData(void)
+{
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+	g_StatPvDataArr[PVRSRV_STAT_TYPE_PROCESS].pfnStatsPrintElements = ProcessStatsPrintElements;
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	g_StatPvDataArr[PVRSRV_STAT_TYPE_MEMORY].pfnStatsPrintElements = MemStatsPrintElements;
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	g_StatPvDataArr[PVRSRV_STAT_TYPE_RIMEMORY].pfnStatsPrintElements = RIMemStatsPrintElements;
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+	g_StatPvDataArr[PVRSRV_STAT_TYPE_CACHEOP].pfnStatsPrintElements = CacheOpStatsPrintElements;
+#endif
+
+	_prepareSeparatorStrings();
+}
+
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+static IMPLEMENT_LIST_INSERT(PVRSRV_MEM_ALLOC_REC)
+static IMPLEMENT_LIST_REMOVE(PVRSRV_MEM_ALLOC_REC)
+#endif
+
+/*
+ *  Global Boolean to flag when the statistics are ready to monitor
+ *  memory allocations.
+ */
+static  IMG_BOOL  bProcessStatsInitialised = IMG_FALSE;
+
+/*
+ * Linked lists for process stats. Live stats are for processes which are still running
+ * and the dead list holds those that have exited.
+ */
+static PVRSRV_PROCESS_STATS *g_psLiveList;
+static PVRSRV_PROCESS_STATS *g_psDeadList;
+
+static POS_LOCK g_psLinkedListLock;
+/* Lockdep feature in the kernel cannot differentiate between different instances of same lock type.
+ * This allows it to group all such instances of the same lock type under one class
+ * The consequence of this is that, if lock acquisition is nested on different instances, it generates
+ * a false warning message about the possible occurrence of deadlock due to recursive lock acquisition.
+ * Hence we create the following sub classes to explicitly appraise Lockdep of such safe lock nesting */
+#define PROCESS_LOCK_SUBCLASS_CURRENT	1
+#define PROCESS_LOCK_SUBCLASS_PREV 		2
+#define PROCESS_LOCK_SUBCLASS_NEXT 		3
+#if defined(ENABLE_DEBUGFS_PIDS)
+/*
+ * Pointer to OS folder to hold PID folders.
+ */
+static void *pvOSProcStatsFolder;
+#endif
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+static void *pvOSProcStats;
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+/* global driver PID stats registration handle */
+static IMG_HANDLE g_hDriverProcessStats;
+#endif
+
+/* global driver-data folders */
+typedef struct _GLOBAL_STATS_
+{
+	IMG_UINT32  ui32StatValue[PVRSRV_DRIVER_STAT_TYPE_COUNT];
+	POS_LOCK   hGlobalStatsLock;
+} GLOBAL_STATS;
+
+static void *pvOSGlobalMemEntryRef;
+static IMG_CHAR* const pszDriverStatFilename = "driver_stats";
+static GLOBAL_STATS gsGlobalStats;
+
+#define HASH_INITIAL_SIZE 5
+/* A hash table used to store the size of any vmalloc'd allocation
+ * against its address (not needed for kmallocs as we can use ksize()) */
+static HASH_TABLE* gpsSizeTrackingHashTable;
+static POS_LOCK	 gpsSizeTrackingHashTableLock;
+
+static PVRSRV_ERROR _RegisterProcess(IMG_HANDLE *phProcessStats, IMG_PID ownerPid);
+
+static void _AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats);
+static void _AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats);
+static void _RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats);
+
+static void _DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats);
+
+static void _DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                                   PVRSRV_PROCESS_STATS* psProcessStats,
+                                   IMG_UINT32 uiBytes);
+/*
+ * Power statistics related definitions
+ */
+
+/* For the mean time, use an exponentially weighted moving average with a
+ * 1/4 weighting for the new measurement.
+ */
+#define MEAN_TIME(A, B)     ( ((3*(A))/4) + ((1 * (B))/4) )
+
+#define UPDATE_TIME(time, newtime) \
+	((time) > 0 ? MEAN_TIME((time),(newtime)) : (newtime))
+
+/* Enum to be used as input to GET_POWER_STAT_INDEX */
+typedef enum
+{
+	DEVICE     = 0,
+	SYSTEM     = 1,
+	POST_POWER = 0,
+	PRE_POWER  = 2,
+	POWER_OFF  = 0,
+	POWER_ON   = 4,
+	NOT_FORCED = 0,
+	FORCED     = 8,
+} PVRSRV_POWER_STAT_TYPE;
+
+/* Macro used to access one of the power timing statistics inside an array */
+#define GET_POWER_STAT_INDEX(forced,powon,prepow,system) \
+	((forced) + (powon) + (prepow) + (system))
+
+/* For the power timing stats we need 16 variables to store all the
+ * combinations of forced/not forced, power-on/power-off, pre-power/post-power
+ * and device/system statistics
+ */
+#define NUM_POWER_STATS        (16)
+static IMG_UINT32 aui32PowerTimingStats[NUM_POWER_STATS];
+
+static void *pvOSPowerStatsEntryData;
+
+typedef struct _EXTRA_POWER_STATS_
+{
+	IMG_UINT64	ui64PreClockSpeedChangeDuration;
+	IMG_UINT64	ui64BetweenPreEndingAndPostStartingDuration;
+	IMG_UINT64	ui64PostClockSpeedChangeDuration;
+} EXTRA_POWER_STATS;
+
+#define NUM_EXTRA_POWER_STATS	10
+
+static EXTRA_POWER_STATS asClockSpeedChanges[NUM_EXTRA_POWER_STATS];
+static IMG_UINT32 ui32ClockSpeedIndexStart, ui32ClockSpeedIndexEnd;
+
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
+                              IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
+                              IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower)
+{
+	IMG_UINT32 *pui32Stat;
+	IMG_UINT64 ui64DeviceDiff = ui64DevEndTime - ui64DevStartTime;
+	IMG_UINT64 ui64SystemDiff = ui64SysEndTime - ui64SysStartTime;
+	IMG_UINT32 ui32Index;
+
+	if (bPrePower)
+	{
+		HTBLOGK(HTB_SF_MAIN_PRE_POWER, bPowerOn, ui64DeviceDiff, ui64SystemDiff);
+	}
+	else
+	{
+		HTBLOGK(HTB_SF_MAIN_POST_POWER, bPowerOn, ui64SystemDiff,ui64DeviceDiff);
+	}
+
+	ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED,
+	                                 bPowerOn ? POWER_ON : POWER_OFF,
+	                                 bPrePower ? PRE_POWER : POST_POWER,
+	                                 DEVICE);
+	pui32Stat = &aui32PowerTimingStats[ui32Index];
+	*pui32Stat = UPDATE_TIME(*pui32Stat, ui64DeviceDiff);
+
+	ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED,
+	                                 bPowerOn ? POWER_ON : POWER_OFF,
+	                                 bPrePower ? PRE_POWER : POST_POWER,
+	                                 SYSTEM);
+	pui32Stat = &aui32PowerTimingStats[ui32Index];
+	*pui32Stat = UPDATE_TIME(*pui32Stat, ui64SystemDiff);
+}
+
+static IMG_UINT64 ui64PreClockSpeedChangeMark;
+
+void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer)
+{
+	asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PreClockSpeedChangeDuration = ui64Stoptimer - ui64StartTimer;
+
+	ui64PreClockSpeedChangeMark = OSClockus();
+}
+
+void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer)
+{
+	IMG_UINT64 ui64Duration = ui64StartTimer - ui64PreClockSpeedChangeMark;
+
+	PVR_ASSERT(ui64PreClockSpeedChangeMark > 0);
+
+	asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64BetweenPreEndingAndPostStartingDuration = ui64Duration;
+	asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PostClockSpeedChangeDuration = ui64StopTimer - ui64StartTimer;
+
+	ui32ClockSpeedIndexEnd = (ui32ClockSpeedIndexEnd + 1) % NUM_EXTRA_POWER_STATS;
+
+	if (ui32ClockSpeedIndexEnd == ui32ClockSpeedIndexStart)
+	{
+		ui32ClockSpeedIndexStart = (ui32ClockSpeedIndexStart + 1) % NUM_EXTRA_POWER_STATS;
+	}
+
+	ui64PreClockSpeedChangeMark = 0;
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function       _FindProcessStatsInLiveList
+@Description    Searches the Live Process List for a statistics structure that
+                matches the PID given.
+@Input          pid  Process to search for.
+@Return         Pointer to stats structure for the process.
+*/ /**************************************************************************/
+static PVRSRV_PROCESS_STATS*
+_FindProcessStatsInLiveList(IMG_PID pid)
+{
+	PVRSRV_PROCESS_STATS* psProcessStats = g_psLiveList;
+
+	while (psProcessStats != NULL)
+	{
+		if (psProcessStats->pid == pid)
+		{
+			return psProcessStats;
+		}
+
+		psProcessStats = psProcessStats->psNext;
+	}
+
+	return NULL;
+} /* _FindProcessStatsInLiveList */
+
+/*************************************************************************/ /*!
+@Function       _FindProcessStatsInDeadList
+@Description    Searches the Dead Process List for a statistics structure that
+                matches the PID given.
+@Input          pid  Process to search for.
+@Return         Pointer to stats structure for the process.
+*/ /**************************************************************************/
+static PVRSRV_PROCESS_STATS*
+_FindProcessStatsInDeadList(IMG_PID pid)
+{
+	PVRSRV_PROCESS_STATS* psProcessStats = g_psDeadList;
+
+	while (psProcessStats != NULL)
+	{
+		if (psProcessStats->pid == pid)
+		{
+			return psProcessStats;
+		}
+
+		psProcessStats = psProcessStats->psNext;
+	}
+
+	return NULL;
+} /* _FindProcessStatsInDeadList */
+
+/*************************************************************************/ /*!
+@Function       _FindProcessStats
+@Description    Searches the Live and Dead Process Lists for a statistics
+                structure that matches the PID given.
+@Input          pid  Process to search for.
+@Return         Pointer to stats structure for the process.
+*/ /**************************************************************************/
+static PVRSRV_PROCESS_STATS*
+_FindProcessStats(IMG_PID pid)
+{
+	PVRSRV_PROCESS_STATS* psProcessStats = _FindProcessStatsInLiveList(pid);
+
+	if (psProcessStats == NULL)
+	{
+		psProcessStats = _FindProcessStatsInDeadList(pid);
+	}
+
+	return psProcessStats;
+} /* _FindProcessStats */
+
+/*************************************************************************/ /*!
+@Function       _CompressMemoryUsage
+@Description    Reduces memory usage by deleting old statistics data.
+                This function requires that the list lock is not held!
+*/ /**************************************************************************/
+static void
+_CompressMemoryUsage(void)
+{
+	PVRSRV_PROCESS_STATS* psProcessStats;
+	PVRSRV_PROCESS_STATS* psProcessStatsToBeFreed;
+	IMG_UINT32 ui32ItemsRemaining;
+
+	/*
+	 * We hold the lock whilst checking the list, but we'll release it
+	 * before freeing memory (as that will require the lock too)!
+	 */
+	OSLockAcquire(g_psLinkedListLock);
+
+	/* Check that the dead list is not bigger than the max size... */
+	psProcessStats          = g_psDeadList;
+	psProcessStatsToBeFreed = NULL;
+	ui32ItemsRemaining      = MAX_DEAD_LIST_PROCESSES;
+
+	while (psProcessStats != NULL  &&  ui32ItemsRemaining > 0)
+	{
+		ui32ItemsRemaining--;
+		if (ui32ItemsRemaining == 0)
+		{
+			/* This is the last allowed process, cut the linked list here! */
+			psProcessStatsToBeFreed = psProcessStats->psNext;
+			psProcessStats->psNext  = NULL;
+		}
+		else
+		{
+			psProcessStats = psProcessStats->psNext;
+		}
+	}
+
+	OSLockRelease(g_psLinkedListLock);
+
+	/* Any processes stats remaining will need to be destroyed... */
+	while (psProcessStatsToBeFreed != NULL)
+	{
+		PVRSRV_PROCESS_STATS* psNextProcessStats = psProcessStatsToBeFreed->psNext;
+
+		psProcessStatsToBeFreed->psNext = NULL;
+		_DestroyProcessStat(psProcessStatsToBeFreed);
+		psProcessStatsToBeFreed = psNextProcessStats;
+	}
+} /* _CompressMemoryUsage */
+
+/* These functions move the process stats from the live to the dead list.
+ * _MoveProcessToDeadList moves the entry in the global lists and
+ * it needs to be protected by g_psLinkedListLock.
+ * _MoveProcessToDeadListDebugFS performs the OS calls and it
+ * shouldn't be used under g_psLinkedListLock because this could generate a
+ * lockdep warning. */
+static void
+_MoveProcessToDeadList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	/* Take the element out of the live list and append to the dead list... */
+	_RemoveProcessStatsFromList(psProcessStats);
+	_AddProcessStatsToFrontOfDeadList(psProcessStats);
+} /* _MoveProcessToDeadList */
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+/* These functions move the process stats from the dead to the live list.
+ * _MoveProcessToLiveList moves the entry in the global lists and
+ * it needs to be protected by g_psLinkedListLock.
+ * _MoveProcessToLiveListDebugFS performs the OS calls and it
+ * shouldn't be used under g_psLinkedListLock because this could generate a
+ * lockdep warning. */
+static void
+_MoveProcessToLiveList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	/* Take the element out of the live list and append to the dead list... */
+	_RemoveProcessStatsFromList(psProcessStats);
+	_AddProcessStatsToFrontOfLiveList(psProcessStats);
+} /* _MoveProcessToLiveList */
+#endif
+
+/*************************************************************************/ /*!
+@Function       _AddProcessStatsToFrontOfLiveList
+@Description    Add a statistic to the live list head.
+@Input          psProcessStats  Process stats to add.
+*/ /**************************************************************************/
+static void
+_AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	/* This function should always be called under global list lock g_psLinkedListLock.
+	 */
+	PVR_ASSERT(psProcessStats != NULL);
+
+	OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+	if (g_psLiveList != NULL)
+	{
+		PVR_ASSERT(psProcessStats != g_psLiveList);
+		OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+		g_psLiveList->psPrev = psProcessStats;
+		OSLockRelease(g_psLiveList->hLock);
+		psProcessStats->psNext = g_psLiveList;
+	}
+
+	g_psLiveList = psProcessStats;
+
+	OSLockRelease(psProcessStats->hLock);
+} /* _AddProcessStatsToFrontOfLiveList */
+
+/*************************************************************************/ /*!
+@Function       _AddProcessStatsToFrontOfDeadList
+@Description    Add a statistic to the dead list head.
+@Input          psProcessStats  Process stats to add.
+*/ /**************************************************************************/
+static void
+_AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	PVR_ASSERT(psProcessStats != NULL);
+	OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+	if (g_psDeadList != NULL)
+	{
+		PVR_ASSERT(psProcessStats != g_psDeadList);
+		OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+		g_psDeadList->psPrev = psProcessStats;
+		OSLockRelease(g_psDeadList->hLock);
+		psProcessStats->psNext = g_psDeadList;
+	}
+
+	g_psDeadList = psProcessStats;
+
+	OSLockRelease(psProcessStats->hLock);
+} /* _AddProcessStatsToFrontOfDeadList */
+
+/*************************************************************************/ /*!
+@Function       _RemoveProcessStatsFromList
+@Description    Detaches a process from either the live or dead list.
+@Input          psProcessStats  Process stats to remove.
+*/ /**************************************************************************/
+static void
+_RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	PVR_ASSERT(psProcessStats != NULL);
+
+	OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+	/* Remove the item from the linked lists... */
+	if (g_psLiveList == psProcessStats)
+	{
+		g_psLiveList = psProcessStats->psNext;
+
+		if (g_psLiveList != NULL)
+		{
+			PVR_ASSERT(psProcessStats != g_psLiveList);
+			OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+			g_psLiveList->psPrev = NULL;
+			OSLockRelease(g_psLiveList->hLock);
+
+		}
+	}
+	else if (g_psDeadList == psProcessStats)
+	{
+		g_psDeadList = psProcessStats->psNext;
+
+		if (g_psDeadList != NULL)
+		{
+			PVR_ASSERT(psProcessStats != g_psDeadList);
+			OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+			g_psDeadList->psPrev = NULL;
+			OSLockRelease(g_psDeadList->hLock);
+		}
+	}
+	else
+	{
+		PVRSRV_PROCESS_STATS* psNext = psProcessStats->psNext;
+		PVRSRV_PROCESS_STATS* psPrev = psProcessStats->psPrev;
+
+		if (psProcessStats->psNext != NULL)
+		{
+			PVR_ASSERT(psProcessStats != psNext);
+			OSLockAcquireNested(psNext->hLock, PROCESS_LOCK_SUBCLASS_NEXT);
+			psProcessStats->psNext->psPrev = psPrev;
+			OSLockRelease(psNext->hLock);
+		}
+		if (psProcessStats->psPrev != NULL)
+		{
+			PVR_ASSERT(psProcessStats != psPrev);
+			OSLockAcquireNested(psPrev->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+			psProcessStats->psPrev->psNext = psNext;
+			OSLockRelease(psPrev->hLock);
+		}
+	}
+
+
+	/* Reset the pointers in this cell, as it is not attached to anything */
+	psProcessStats->psNext = NULL;
+	psProcessStats->psPrev = NULL;
+
+	OSLockRelease(psProcessStats->hLock);
+
+} /* _RemoveProcessStatsFromList */
+
+static PVRSRV_ERROR
+_AllocateProcessStats(PVRSRV_PROCESS_STATS **ppsProcessStats, IMG_PID ownerPid)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_PROCESS_STATS *psProcessStats;
+
+	psProcessStats = OSAllocZMemNoStats(sizeof(PVRSRV_PROCESS_STATS));
+	if (psProcessStats == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psProcessStats->pid             = ownerPid;
+	psProcessStats->ui32RefCount    = 1;
+
+	psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS]     = 1;
+	psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS] = 1;
+
+	eError = OSLockCreateNoStats(&psProcessStats->hLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	*ppsProcessStats = psProcessStats;
+	return PVRSRV_OK;
+
+e0:
+	OSFreeMemNoStats(psProcessStats);
+	return PVRSRV_ERROR_OUT_OF_MEMORY;
+}
+
+/*************************************************************************/ /*!
+@Function       _DestroyProcessStat
+@Description    Frees memory and resources held by a process statistic.
+@Input          psProcessStats  Process stats to destroy.
+*/ /**************************************************************************/
+static void
+_DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	PVR_ASSERT(psProcessStats != NULL);
+
+	OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+	/* Free the memory statistics... */
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	while (psProcessStats->psMemoryRecords)
+	{
+		List_PVRSRV_MEM_ALLOC_REC_Remove(psProcessStats->psMemoryRecords);
+	}
+#endif
+	OSLockRelease(psProcessStats->hLock);
+
+	/*Destroy the lock */
+	OSLockDestroyNoStats(psProcessStats->hLock);
+
+	/* Free the memory... */
+	OSFreeMemNoStats(psProcessStats);
+} /* _DestroyProcessStat */
+
+#if defined (ENABLE_DEBUGFS_PIDS)
+static inline void
+_createStatsFiles(PVRSRV_OS_STAT_ENTRY* psStatsEntries,
+		  OS_STATS_PRINT_FUNC* pfnStatsPrint)
+{
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+	psStatsEntries->pvOSProcessStatsEntryData = OSCreateStatisticEntry("process_stats",
+									  psStatsEntries->pvOSStatsFolderData,
+									  pfnStatsPrint,
+									  GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_PROCESS));
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+	psStatsEntries->pvOSCacheOpStatsEntryData = OSCreateStatisticEntry("cache_ops_exec",
+									  psStatsEntries->pvOSStatsFolderData,
+									  pfnStatsPrint,
+									  GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_CACHEOP));
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	psStatsEntries->pvOSMemStatsEntryData = OSCreateStatisticEntry("mem_area",
+								      psStatsEntries->pvOSStatsFolderData,
+								      pfnStatsPrint,
+								      GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_MEMORY));
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	psStatsEntries->pvOSRIMemStatsEntryData = OSCreateStatisticEntry("gpu_mem_area",
+									psStatsEntries->pvOSStatsFolderData,
+									pfnStatsPrint,
+									GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_RIMEMORY));
+#endif
+}
+
+static inline void
+_createStatisticsEntries(void)
+{
+	pvOSProcStatsFolder                     = OSCreateStatisticFolder("proc_stats", NULL);
+	gsLiveStatEntries.pvOSStatsFolderData    = OSCreateStatisticFolder("live_pids_stats", pvOSProcStatsFolder);
+	gsRetiredStatEntries.pvOSStatsFolderData = OSCreateStatisticFolder("retired_pids_stats", pvOSProcStatsFolder);
+
+	_createStatsFiles(&gsLiveStatEntries, GenericStatsPrintElementsLive);
+	_createStatsFiles(&gsRetiredStatEntries, GenericStatsPrintElementsRetired);
+
+	_prepareStatsPrivateData();
+}
+
+static inline void
+_removeStatsFiles(PVRSRV_OS_STAT_ENTRY* psStatsEntries)
+{
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+	OSRemoveStatisticEntry(&psStatsEntries->pvOSProcessStatsEntryData);
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+	OSRemoveStatisticEntry(&psStatsEntries->pvOSCacheOpStatsEntryData);
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	OSRemoveStatisticEntry(&psStatsEntries->pvOSMemStatsEntryData);
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	OSRemoveStatisticEntry(&psStatsEntries->pvOSRIMemStatsEntryData);
+#endif
+}
+
+static inline void
+_removeStatisticsEntries(void)
+{
+	_removeStatsFiles(&gsLiveStatEntries);
+	_removeStatsFiles(&gsRetiredStatEntries);
+
+	OSRemoveStatisticFolder(&(gsLiveStatEntries.pvOSStatsFolderData));
+	OSRemoveStatisticFolder(&(gsRetiredStatEntries.pvOSStatsFolderData));
+	OSRemoveStatisticFolder(&pvOSProcStatsFolder);
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function       PVRSRVStatsInitialise
+@Description    Entry point for initialising the statistics module.
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVStatsInitialise(void)
+{
+	PVRSRV_ERROR error;
+
+	PVR_ASSERT(g_psLiveList == NULL);
+	PVR_ASSERT(g_psDeadList == NULL);
+	PVR_ASSERT(g_psLinkedListLock == NULL);
+	PVR_ASSERT(gpsSizeTrackingHashTable == NULL);
+	PVR_ASSERT(bProcessStatsInitialised == IMG_FALSE);
+
+	/* We need a lock to protect the linked lists... */
+	error = OSLockCreate(&g_psLinkedListLock);
+	if (error == PVRSRV_OK)
+	{
+		/* We also need a lock to protect the hash table used for size tracking.. */
+		error = OSLockCreate(&gpsSizeTrackingHashTableLock);
+
+		if (error != PVRSRV_OK)
+		{
+			goto e0;
+		}
+
+		/* We also need a lock to protect the GlobalStat counters */
+		error = OSLockCreate(&gsGlobalStats.hGlobalStatsLock);
+		if (error != PVRSRV_OK)
+		{
+			goto e1;
+		}
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+		_createStatisticsEntries();
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+		pvOSProcStats = OSCreateRawStatisticEntry("memtrack_stats", NULL,
+		                                          RawProcessStatsPrintElements);
+#endif
+
+		/* Create power stats entry... */
+		pvOSPowerStatsEntryData = OSCreateStatisticEntry("power_timing_stats",
+														 NULL,
+														 PowerStatsPrintElements,
+														 NULL);
+
+		pvOSGlobalMemEntryRef = OSCreateStatisticEntry(pszDriverStatFilename,
+													   NULL,
+													   GlobalStatsPrintElements,
+													   NULL);
+
+		/* Flag that we are ready to start monitoring memory allocations. */
+
+		gpsSizeTrackingHashTable = HASH_Create(HASH_INITIAL_SIZE);
+
+		OSCachedMemSet(asClockSpeedChanges, 0, sizeof(asClockSpeedChanges));
+
+		bProcessStatsInitialised = IMG_TRUE;
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+		/* Register our 'system' PID to hold driver-wide alloc stats */
+		_RegisterProcess(&g_hDriverProcessStats, PVR_SYS_ALLOC_PID);
+#endif
+	}
+	return error;
+e1:
+	OSLockDestroy(gpsSizeTrackingHashTableLock);
+	gpsSizeTrackingHashTableLock = NULL;
+e0:
+	OSLockDestroy(g_psLinkedListLock);
+	g_psLinkedListLock = NULL;
+	return error;
+
+} /* PVRSRVStatsInitialise */
+
+static PVRSRV_ERROR _DumpAllVMallocEntries (uintptr_t k, uintptr_t v);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVStatsDestroy
+@Description    Method for destroying the statistics module data.
+*/ /**************************************************************************/
+void
+PVRSRVStatsDestroy(void)
+{
+	PVR_ASSERT(bProcessStatsInitialised);
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	/* Deregister our 'system' PID which holds driver-wide alloc stats */
+	PVRSRVStatsDeregisterProcess(g_hDriverProcessStats);
+#endif
+
+	/* Stop monitoring memory allocations... */
+	bProcessStatsInitialised = IMG_FALSE;
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+	if (pvOSProcStats)
+	{
+		OSRemoveRawStatisticEntry(&pvOSProcStats);
+	}
+#endif
+
+	/* Destroy the power stats entry... */
+	if (pvOSPowerStatsEntryData!=NULL)
+	{
+		OSRemoveStatisticEntry(&pvOSPowerStatsEntryData);
+	}
+
+	/* Destroy the global data entry */
+	if (pvOSGlobalMemEntryRef!=NULL)
+	{
+		OSRemoveStatisticEntry(&pvOSGlobalMemEntryRef);
+	}
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+	_removeStatisticsEntries();
+#endif
+
+	/* Destroy the locks... */
+	if (g_psLinkedListLock != NULL)
+	{
+		OSLockDestroy(g_psLinkedListLock);
+		g_psLinkedListLock = NULL;
+	}
+
+	/* Free the live and dead lists... */
+	while (g_psLiveList != NULL)
+	{
+		PVRSRV_PROCESS_STATS* psProcessStats = g_psLiveList;
+		_RemoveProcessStatsFromList(psProcessStats);
+		_DestroyProcessStat(psProcessStats);
+	}
+
+	while (g_psDeadList != NULL)
+	{
+		PVRSRV_PROCESS_STATS* psProcessStats = g_psDeadList;
+		_RemoveProcessStatsFromList(psProcessStats);
+		_DestroyProcessStat(psProcessStats);
+	}
+
+	if (gpsSizeTrackingHashTable != NULL)
+	{
+		/* Dump all remaining entries in HASH table (list any remaining vmallocs) */
+		HASH_Iterate(gpsSizeTrackingHashTable, (HASH_pfnCallback)_DumpAllVMallocEntries);
+		HASH_Delete(gpsSizeTrackingHashTable);
+	}
+	if (gpsSizeTrackingHashTableLock != NULL)
+	{
+		OSLockDestroy(gpsSizeTrackingHashTableLock);
+		gpsSizeTrackingHashTableLock = NULL;
+	}
+
+	if (NULL != gsGlobalStats.hGlobalStatsLock)
+	{
+		OSLockDestroy(gsGlobalStats.hGlobalStatsLock);
+		gsGlobalStats.hGlobalStatsLock = NULL;
+	}
+
+} /* PVRSRVStatsDestroy */
+
+static void _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+								  size_t uiBytes)
+{
+	OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
+
+	switch (eAllocType)
+	{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_KMALLOC, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMALLOC, uiBytes);
+			break;
+#else
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+			break;
+#endif
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, uiBytes);
+			break;
+
+		default:
+			PVR_ASSERT(0);
+			break;
+	}
+	OSLockRelease(gsGlobalStats.hGlobalStatsLock);
+}
+
+static void _increase_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+								  size_t uiBytes)
+{
+	OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
+
+	switch (eAllocType)
+	{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_KMALLOC, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMALLOC, uiBytes);
+			break;
+#else
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+			break;
+#endif
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, uiBytes);
+			break;
+
+		default:
+			PVR_ASSERT(0);
+			break;
+	}
+	OSLockRelease(gsGlobalStats.hGlobalStatsLock);
+}
+
+static PVRSRV_ERROR
+_RegisterProcess(IMG_HANDLE *phProcessStats, IMG_PID ownerPid)
+{
+	PVRSRV_PROCESS_STATS*	psProcessStats=NULL;
+	PVRSRV_ERROR			eError;
+
+	PVR_ASSERT(phProcessStats != NULL);
+
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: Register process PID %d [%s]",
+			__func__, ownerPid, (ownerPid == PVR_SYS_ALLOC_PID)
+			? "system" : OSGetCurrentClientProcessNameKM()));
+
+	/* Check the PID has not already moved to the dead list... */
+	OSLockAcquire(g_psLinkedListLock);
+	psProcessStats = _FindProcessStatsInDeadList(ownerPid);
+	if (psProcessStats != NULL)
+	{
+		/* Move it back onto the live list! */
+		_RemoveProcessStatsFromList(psProcessStats);
+		_AddProcessStatsToFrontOfLiveList(psProcessStats);
+	}
+	else
+	{
+		/* Check the PID is not already registered in the live list... */
+		psProcessStats = _FindProcessStatsInLiveList(ownerPid);
+	}
+
+	/* If the PID is on the live list then just increment the ref count and return... */
+	if (psProcessStats != NULL)
+	{
+		OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+		psProcessStats->ui32RefCount++;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount;
+		UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS],
+		                 psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS]);
+		OSLockRelease(psProcessStats->hLock);
+		OSLockRelease(g_psLinkedListLock);
+
+		*phProcessStats = psProcessStats;
+
+		return PVRSRV_OK;
+	}
+	OSLockRelease(g_psLinkedListLock);
+
+	/* Allocate a new node structure and initialise it... */
+	eError = _AllocateProcessStats(&psProcessStats, ownerPid);
+	if(eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	/* Add it to the live list... */
+	OSLockAcquire(g_psLinkedListLock);
+	_AddProcessStatsToFrontOfLiveList(psProcessStats);
+	OSLockRelease(g_psLinkedListLock);
+
+	/* Done */
+	*phProcessStats = (IMG_HANDLE) psProcessStats;
+
+	return PVRSRV_OK;
+
+e0:
+	*phProcessStats = (IMG_HANDLE) NULL;
+	return PVRSRV_ERROR_OUT_OF_MEMORY;
+} /* _RegisterProcess */
+
+/*************************************************************************/ /*!
+@Function       PVRSRVStatsRegisterProcess
+@Description    Register a process into the list statistics list.
+@Output         phProcessStats  Handle to the process to be used to deregister.
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats)
+{
+	return _RegisterProcess(phProcessStats, OSGetCurrentClientProcessIDKM());
+}
+
+/*************************************************************************/ /*!
+@Function       PVRSRVStatsDeregisterProcess
+@Input          hProcessStats  Handle to the process returned when registered.
+@Description    Method for destroying the statistics module data.
+*/ /**************************************************************************/
+void
+PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats)
+{
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: Deregister process entered PID %d [%s]",
+			__func__, OSGetCurrentClientProcessIDKM(),
+			OSGetCurrentProcessName()));
+
+	if (hProcessStats != (IMG_HANDLE) NULL)
+	{
+		PVRSRV_PROCESS_STATS* psProcessStats = (PVRSRV_PROCESS_STATS*) hProcessStats;
+
+		/* Lower the reference count, if zero then move it to the dead list */
+		OSLockAcquire(g_psLinkedListLock);
+		if (psProcessStats->ui32RefCount > 0)
+		{
+			OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+			psProcessStats->ui32RefCount--;
+			psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount;
+
+#if !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+			if (psProcessStats->ui32RefCount == 0)
+			{
+				OSLockRelease(psProcessStats->hLock);
+				_MoveProcessToDeadList(psProcessStats);
+			}else
+#endif
+			{
+				OSLockRelease(psProcessStats->hLock);
+			}
+		}
+		OSLockRelease(g_psLinkedListLock);
+
+		/* Check if the dead list needs to be reduced */
+		_CompressMemoryUsage();
+	}
+} /* PVRSRVStatsDeregisterProcess */
+
+void
+PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+							 void *pvCpuVAddr,
+							 IMG_CPU_PHYADDR sCpuPAddr,
+							 size_t uiBytes,
+							 void *pvPrivateData,
+							 IMG_PID currentPid)
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+{
+	_PVRSRVStatsAddMemAllocRecord(eAllocType, pvCpuVAddr, sCpuPAddr, uiBytes, pvPrivateData, currentPid, NULL, 0);
+}
+void
+
+_PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+							  void *pvCpuVAddr,
+							  IMG_CPU_PHYADDR sCpuPAddr,
+							  size_t uiBytes,
+							  void *pvPrivateData,
+							  IMG_PID currentPid,
+							  void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine)
+#endif
+{
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	IMG_PID				   currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+	PVRSRV_DATA*		   psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_MEM_ALLOC_REC*  psRecord = NULL;
+	PVRSRV_PROCESS_STATS*  psProcessStats;
+	enum { PVRSRV_PROC_NOTFOUND,
+	       PVRSRV_PROC_FOUND,
+	       PVRSRV_PROC_RESURRECTED
+	     } eProcSearch = PVRSRV_PROC_FOUND;
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s: Called when process statistics module is not initialised",
+				 __func__));
+#endif
+		return;
+	}
+
+	/*
+	 * To prevent a recursive loop, we make the memory allocations for our
+	 * memstat records via OSAllocMemNoStats(), which does not try to
+	 * create a memstat record entry.
+	 */
+
+	/* Allocate the memory record... */
+	psRecord = OSAllocZMemNoStats(sizeof(PVRSRV_MEM_ALLOC_REC));
+	if (psRecord == NULL)
+	{
+		return;
+	}
+
+	psRecord->eAllocType       = eAllocType;
+	psRecord->pvCpuVAddr       = pvCpuVAddr;
+	psRecord->sCpuPAddr.uiAddr = sCpuPAddr.uiAddr;
+	psRecord->uiBytes          = uiBytes;
+	psRecord->pvPrivateData    = pvPrivateData;
+
+	psRecord->pid = currentPid;
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+	psRecord->pvAllocdFromFile = pvAllocFromFile;
+	psRecord->ui32AllocdFromLine = ui32AllocFromLine;
+#endif
+
+	_increase_global_stat(eAllocType, uiBytes);
+	/* Lock while we find the correct process... */
+	OSLockAcquire(g_psLinkedListLock);
+
+	if (psPVRSRVData)
+	{
+		if ((currentPid == psPVRSRVData->cleanupThreadPid) &&
+		    (currentCleanupPid != 0))
+		{
+			psProcessStats = _FindProcessStats(currentCleanupPid);
+		}
+		else
+		{
+			psProcessStats = _FindProcessStatsInLiveList(currentPid);
+			if (!psProcessStats)
+			{
+				psProcessStats = _FindProcessStatsInDeadList(currentPid);
+				eProcSearch = PVRSRV_PROC_RESURRECTED;
+			}
+		}
+	}
+	else
+	{
+		psProcessStats = _FindProcessStatsInLiveList(currentPid);
+		if (!psProcessStats)
+		{
+			psProcessStats = _FindProcessStatsInDeadList(currentPid);
+			eProcSearch = PVRSRV_PROC_RESURRECTED;
+		}
+	}
+
+	if (psProcessStats == NULL)
+	{
+		eProcSearch = PVRSRV_PROC_NOTFOUND;
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s: Process stat increment called for 'unknown' process PID(%d)",
+				 __func__, currentPid));
+
+		if(_AllocateProcessStats(&psProcessStats, currentPid) != PVRSRV_OK)
+		{
+			OSLockRelease(g_psLinkedListLock);
+			goto e0;
+		}
+
+		/* Add it to the live list... */
+		_AddProcessStatsToFrontOfLiveList(psProcessStats);
+
+		OSLockRelease(g_psLinkedListLock);
+
+#else  /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */
+		OSLockRelease(g_psLinkedListLock);
+#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */
+
+		if (psProcessStats == NULL)
+		{
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+				PVR_DPF((PVR_DBG_ERROR,
+				 "%s UNABLE TO CREATE process_stats entry for pid %d [%s] (" IMG_SIZE_FMTSPEC " bytes)",
+				 __func__, currentPid, OSGetCurrentProcessName(), uiBytes));
+#endif
+			if (psRecord != NULL)
+			{
+				OSFreeMemNoStats(psRecord);
+			}
+			return;
+		}
+	}
+	else
+	{
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+		if (eProcSearch == PVRSRV_PROC_RESURRECTED)
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+				 "%s: Process stat incremented on 'dead' process PID(%d)",
+				 __func__, currentPid));
+			/* Move process from dead list to live list */
+			_MoveProcessToLiveList(psProcessStats);
+		}
+#endif
+		OSLockRelease(g_psLinkedListLock);
+	}
+
+	OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+	/* Insert the memory record... */
+	if (psRecord != NULL)
+	{
+		List_PVRSRV_MEM_ALLOC_REC_Insert(&psProcessStats->psMemoryRecords, psRecord);
+	}
+
+	/* Update the memory watermarks... */
+	switch (eAllocType)
+	{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+		{
+			if (psRecord != NULL)
+			{
+				if (pvCpuVAddr == NULL)
+				{
+					break;
+				}
+				psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes);
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+			psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+		{
+			if (psRecord != NULL)
+			{
+				if (pvCpuVAddr == NULL)
+				{
+					break;
+				}
+				psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes);
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+			psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+		}
+		break;
+#else
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+		break;
+#endif
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+		{
+			if (psRecord != NULL)
+			{
+				if (pvCpuVAddr == NULL)
+				{
+					break;
+				}
+				psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes);
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+			psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+		{
+			if (psRecord != NULL)
+			{
+				if (pvCpuVAddr == NULL)
+				{
+					break;
+				}
+				psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes);
+			psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+		{
+			if (psRecord != NULL)
+			{
+				psRecord->ui64Key = sCpuPAddr.uiAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes);
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+			psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+		{
+			if (psRecord != NULL)
+			{
+				if (pvCpuVAddr == NULL)
+				{
+					break;
+				}
+				psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes);
+			psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+		{
+			if (psRecord != NULL)
+			{
+				psRecord->ui64Key = sCpuPAddr.uiAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes);
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+			psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+		{
+			if (psRecord != NULL)
+			{
+				psRecord->ui64Key = sCpuPAddr.uiAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes);
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+			psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+		{
+			if (psRecord != NULL)
+			{
+				if (pvCpuVAddr == NULL)
+				{
+					break;
+				}
+				psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes);
+			psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+		}
+		break;
+
+		default:
+		{
+			PVR_ASSERT(0);
+		}
+		break;
+	}
+	OSLockRelease(psProcessStats->hLock);
+
+	return;
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+e0:
+	OSFreeMemNoStats(psRecord);
+	return;
+#endif
+#endif
+} /* PVRSRVStatsAddMemAllocRecord */
+
+void
+PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+								IMG_UINT64 ui64Key,
+								IMG_PID currentPid)
+{
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	IMG_PID				   currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+	PVRSRV_DATA*		   psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_PROCESS_STATS*  psProcessStats = NULL;
+	PVRSRV_MEM_ALLOC_REC*  psRecord		  = NULL;
+	IMG_BOOL			   bFound	      = IMG_FALSE;
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s: Called when process statistics module is not initialised",
+				 __func__));
+#endif
+		return;
+	}
+
+	/* Lock while we find the correct process and remove this record... */
+	OSLockAcquire(g_psLinkedListLock);
+
+	if (psPVRSRVData)
+	{
+		if ((currentPid == psPVRSRVData->cleanupThreadPid) &&
+		    (currentCleanupPid != 0))
+		{
+			psProcessStats = _FindProcessStats(currentCleanupPid);
+		}
+		else
+		{
+			psProcessStats = _FindProcessStats(currentPid);
+		}
+	}
+	else
+	{
+		psProcessStats = _FindProcessStats(currentPid);
+	}
+	if (psProcessStats != NULL)
+	{
+		psRecord      = psProcessStats->psMemoryRecords;
+		while (psRecord != NULL)
+		{
+			if (psRecord->ui64Key == ui64Key  &&  psRecord->eAllocType == eAllocType)
+			{
+				bFound = IMG_TRUE;
+				break;
+			}
+
+			psRecord = psRecord->psNext;
+		}
+	}
+
+	/* If not found, we need to do a full search in case it was allocated to a different PID... */
+	if (!bFound)
+	{
+		PVRSRV_PROCESS_STATS* psProcessStatsAlreadyChecked = psProcessStats;
+
+		/* Search all live lists first... */
+		psProcessStats = g_psLiveList;
+		while (psProcessStats != NULL)
+		{
+			if (psProcessStats != psProcessStatsAlreadyChecked)
+			{
+				psRecord      = psProcessStats->psMemoryRecords;
+				while (psRecord != NULL)
+				{
+					if (psRecord->ui64Key == ui64Key  &&  psRecord->eAllocType == eAllocType)
+					{
+						bFound = IMG_TRUE;
+						break;
+					}
+
+					psRecord = psRecord->psNext;
+				}
+			}
+
+			if (bFound)
+			{
+				break;
+			}
+
+			psProcessStats = psProcessStats->psNext;
+		}
+
+		/* If not found, then search all dead lists next... */
+		if (!bFound)
+		{
+			psProcessStats = g_psDeadList;
+			while (psProcessStats != NULL)
+			{
+				if (psProcessStats != psProcessStatsAlreadyChecked)
+				{
+					psRecord      = psProcessStats->psMemoryRecords;
+					while (psRecord != NULL)
+					{
+						if (psRecord->ui64Key == ui64Key  &&  psRecord->eAllocType == eAllocType)
+						{
+							bFound = IMG_TRUE;
+							break;
+						}
+
+						psRecord = psRecord->psNext;
+					}
+				}
+
+				if (bFound)
+				{
+					break;
+				}
+
+				psProcessStats = psProcessStats->psNext;
+			}
+		}
+	}
+
+	/* Update the watermark and remove this record...*/
+	if (bFound)
+	{
+		_decrease_global_stat(eAllocType, psRecord->uiBytes);
+
+		OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+		_DecreaseProcStatValue(eAllocType,
+		                       psProcessStats,
+		                       psRecord->uiBytes);
+
+		List_PVRSRV_MEM_ALLOC_REC_Remove(psRecord);
+		OSLockRelease(psProcessStats->hLock);
+		OSLockRelease(g_psLinkedListLock);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+		/* If all stats are now zero, remove the entry for this thread */
+		if (psProcessStats->ui32StatAllocFlags == 0)
+		{
+			OSLockAcquire(g_psLinkedListLock);
+			_MoveProcessToDeadList(psProcessStats);
+			OSLockRelease(g_psLinkedListLock);
+
+			/* Check if the dead list needs to be reduced */
+			_CompressMemoryUsage();
+		}
+#endif
+		/*
+		 * Free the record outside the lock so we don't deadlock and so we
+		 * reduce the time the lock is held.
+		 */
+		OSFreeMemNoStats(psRecord);
+	}
+	else
+	{
+		OSLockRelease(g_psLinkedListLock);
+	}
+
+#else
+PVR_UNREFERENCED_PARAMETER(eAllocType);
+PVR_UNREFERENCED_PARAMETER(ui64Key);
+#endif
+} /* PVRSRVStatsRemoveMemAllocRecord */
+
+static PVRSRV_ERROR _DumpAllVMallocEntries (uintptr_t k, uintptr_t v)
+{
+#if defined(PVRSRV_NEED_PVR_DPF) || defined(DOXYGEN)
+	_PVR_STATS_TRACKING_HASH_ENTRY *psNewTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)(uintptr_t)v;
+	IMG_UINT64 uiCpuVAddr = (IMG_UINT64)k;
+
+	PVR_DPF((PVR_DBG_ERROR, "%s: " IMG_SIZE_FMTSPEC " bytes @ 0x%" IMG_UINT64_FMTSPECx " (PID %u)", __func__,
+	         psNewTrackingHashEntry->uiSizeInBytes,
+	         uiCpuVAddr,
+	         psNewTrackingHashEntry->uiPid));
+#endif
+	return PVRSRV_OK;
+}
+
+void
+PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+									size_t uiBytes,
+									IMG_UINT64 uiCpuVAddr,
+									IMG_PID uiPid)
+{
+	IMG_BOOL bRes = IMG_FALSE;
+	_PVR_STATS_TRACKING_HASH_ENTRY *psNewTrackingHashEntry = NULL;
+
+	if (!bProcessStatsInitialised || (gpsSizeTrackingHashTable == NULL))
+	{
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s: Called when process statistics module is not initialised",
+				 __func__));
+#endif
+		return;
+	}
+
+	/* Alloc untracked memory for the new hash table entry */
+	psNewTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)OSAllocMemNoStats(sizeof(*psNewTrackingHashEntry));
+	if (psNewTrackingHashEntry)
+	{
+		/* Fill-in the size of the allocation and PID of the allocating process */
+		psNewTrackingHashEntry->uiSizeInBytes = uiBytes;
+		psNewTrackingHashEntry->uiPid = uiPid;
+		OSLockAcquire(gpsSizeTrackingHashTableLock);
+		/* Insert address of the new struct into the hash table */
+		bRes = HASH_Insert(gpsSizeTrackingHashTable, uiCpuVAddr, (uintptr_t)psNewTrackingHashEntry);
+		OSLockRelease(gpsSizeTrackingHashTableLock);
+	}
+
+	if (psNewTrackingHashEntry)
+	{
+		if (bRes)
+		{
+			PVRSRVStatsIncrMemAllocStat(eAllocType, uiBytes, uiPid);
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "*** %s : @ line %d HASH_Insert() failed!",
+					 __func__, __LINE__));
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "*** %s : @ line %d Failed to alloc memory for psNewTrackingHashEntry!",
+				 __func__, __LINE__));
+	}
+}
+
+void
+PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                            size_t uiBytes,
+                            IMG_PID currentPid)
+
+{
+	IMG_PID				  currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+	PVRSRV_DATA* 		  psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_PROCESS_STATS* psProcessStats = NULL;
+	enum { PVRSRV_PROC_NOTFOUND,
+	       PVRSRV_PROC_FOUND,
+	       PVRSRV_PROC_RESURRECTED
+	     } eProcSearch = PVRSRV_PROC_FOUND;
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s: Called when process statistics module is not initialised",
+				 __func__));
+#endif
+		return;
+	}
+
+	_increase_global_stat(eAllocType, uiBytes);
+	OSLockAcquire(g_psLinkedListLock);
+	if (psPVRSRVData)
+	{
+		if ((currentPid == psPVRSRVData->cleanupThreadPid) &&
+		    (currentCleanupPid != 0))
+		{
+			psProcessStats = _FindProcessStats(currentCleanupPid);
+		}
+		else
+		{
+			psProcessStats = _FindProcessStatsInLiveList(currentPid);
+			if (!psProcessStats)
+			{
+				psProcessStats = _FindProcessStatsInDeadList(currentPid);
+				eProcSearch = PVRSRV_PROC_RESURRECTED;
+			}
+		}
+	}
+	else
+	{
+		psProcessStats = _FindProcessStatsInLiveList(currentPid);
+		if (!psProcessStats)
+		{
+			psProcessStats = _FindProcessStatsInDeadList(currentPid);
+			eProcSearch = PVRSRV_PROC_RESURRECTED;
+		}
+	}
+
+	if (psProcessStats == NULL)
+	{
+		eProcSearch = PVRSRV_PROC_NOTFOUND;
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s: Process stat increment called for 'unknown' process PID(%d)",
+				 __func__, currentPid));
+
+		if (bProcessStatsInitialised)
+		{
+			if(_AllocateProcessStats(&psProcessStats, currentPid) != PVRSRV_OK)
+			{
+				return;
+			}
+
+			/* Add it to the live list... */
+			_AddProcessStatsToFrontOfLiveList(psProcessStats);
+		}
+#else
+		OSLockRelease(g_psLinkedListLock);
+#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */
+
+	}
+
+	if (psProcessStats != NULL)
+	{
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+		if (eProcSearch == PVRSRV_PROC_RESURRECTED)
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+					 "%s: Process stat incremented on 'dead' process PID(%d)",
+					 __func__, currentPid));
+
+			/* Move process from dead list to live list */
+			_MoveProcessToLiveList(psProcessStats);
+		}
+#endif
+		OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+		/*Release the list lock as soon as we acquire the process lock,
+		 * this ensures if the process is in deadlist the entry cannot be deleted or modified */
+		OSLockRelease(g_psLinkedListLock);
+		/* Update the memory watermarks... */
+		switch (eAllocType)
+		{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+			case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes);
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+				psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes);
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+				psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+			break;
+#else
+			case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+			case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+			break;
+#endif
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes);
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+				psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes);
+				psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes);
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+				psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes);
+				psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes);
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+				psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes);
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+				psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes);
+				psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+			break;
+
+			default:
+			{
+				PVR_ASSERT(0);
+			}
+			break;
+		}
+		OSLockRelease(psProcessStats->hLock);
+	}
+
+}
+
+static void
+_DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                       PVRSRV_PROCESS_STATS* psProcessStats,
+                       IMG_UINT32 uiBytes)
+{
+	switch (eAllocType)
+	{
+	#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+		{
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes);
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+			if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] == 0)
+			{
+				psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+		{
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes);
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+			if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC] == 0)
+			{
+				psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+		}
+		break;
+	#else
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+		break;
+	#endif
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+		{
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes);
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+			if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] == 0)
+			{
+				psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+		{
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes);
+			if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA] == 0)
+			{
+				psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+		{
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes);
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+			if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] == 0)
+			{
+				psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+		{
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes);
+			if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA] == 0)
+			{
+				psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+		{
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes);
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+			if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] == 0)
+			{
+				psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+		{
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes);
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+			if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES] == 0)
+			{
+				psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+		{
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes);
+			if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES] == 0)
+			{
+				psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+		}
+		break;
+
+		default:
+		{
+			PVR_ASSERT(0);
+		}
+		break;
+	}
+
+}
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+void RawProcessStatsPrintElements(void *pvFile,
+                                  void *pvStatPtr,
+                                  OS_STATS_PRINTF_FUNC *pfnOSStatsPrintf)
+{
+	PVRSRV_PROCESS_STATS *psProcessStats;
+
+	if (pfnOSStatsPrintf == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: pfnOSStatsPrintf not set", __func__));
+		return;
+	}
+
+	pfnOSStatsPrintf(pvFile, "%s,%s,%s,%s,%s,%s\n",
+	                 "PID",
+	                 "MemoryUsageKMalloc",           // PVRSRV_PROCESS_STAT_TYPE_KMALLOC
+	                 "MemoryUsageAllocPTMemoryUMA",  // PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA
+	                 "MemoryUsageAllocPTMemoryLMA",  // PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA
+	                 "MemoryUsageAllocGPUMemLMA",    // PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES
+	                 "MemoryUsageAllocGPUMemUMA"     // PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES
+	                 );
+
+	OSLockAcquire(g_psLinkedListLock);
+
+	psProcessStats = g_psLiveList;
+
+	while (psProcessStats != NULL)
+	{
+		if (psProcessStats->pid != PVR_SYS_ALLOC_PID)
+		{
+			pfnOSStatsPrintf(pvFile, "%d,%d,%d,%d,%d,%d\n",
+							 psProcessStats->pid,
+							 psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC],
+							 psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA],
+							 psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA],
+							 psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES],
+							 psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES]
+							 );
+		}
+
+		psProcessStats = psProcessStats->psNext;
+	}
+
+	OSLockRelease(g_psLinkedListLock);
+} /* RawProcessStatsPrintElements */
+#endif
+
+void
+PVRSRVStatsDecrMemKAllocStat(size_t uiBytes,
+                             IMG_PID decrPID)
+{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+	PVRSRV_PROCESS_STATS*  psProcessStats;
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+		return;
+	}
+
+	_decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, uiBytes);
+
+	OSLockAcquire(g_psLinkedListLock);
+
+	psProcessStats = _FindProcessStats(decrPID);
+
+	if (psProcessStats != NULL)
+	{
+		/* Decrement the kmalloc memory stat... */
+		DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes);
+		DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
+	}
+
+	OSLockRelease(g_psLinkedListLock);
+#endif
+}
+
+static void
+_StatsDecrMemTrackedStat(_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry,
+                        PVRSRV_MEM_ALLOC_TYPE eAllocType)
+{
+	PVRSRV_PROCESS_STATS*  psProcessStats;
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+		return;
+	}
+
+	_decrease_global_stat(eAllocType, psTrackingHashEntry->uiSizeInBytes);
+
+	OSLockAcquire(g_psLinkedListLock);
+
+	psProcessStats = _FindProcessStats(psTrackingHashEntry->uiPid);
+
+	if (psProcessStats != NULL)
+	{
+		OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+		/* Decrement the memory stat... */
+		_DecreaseProcStatValue(eAllocType,
+		                       psProcessStats,
+		                       psTrackingHashEntry->uiSizeInBytes);
+		OSLockRelease(psProcessStats->hLock);
+	}
+
+	OSLockRelease(g_psLinkedListLock);
+}
+
+void
+PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+									  IMG_UINT64 uiCpuVAddr)
+{
+	_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry = NULL;
+
+	if (!bProcessStatsInitialised || (gpsSizeTrackingHashTable == NULL))
+	{
+		return;
+	}
+
+	OSLockAcquire(gpsSizeTrackingHashTableLock);
+	psTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)HASH_Remove(gpsSizeTrackingHashTable, uiCpuVAddr);
+	OSLockRelease(gpsSizeTrackingHashTableLock);
+	if (psTrackingHashEntry)
+	{
+		_StatsDecrMemTrackedStat(psTrackingHashEntry, eAllocType);
+		OSFreeMemNoStats(psTrackingHashEntry);
+	}
+}
+
+void
+PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+							size_t uiBytes,
+							IMG_PID currentPid)
+{
+	IMG_PID				   currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+	PVRSRV_DATA* 		   psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_PROCESS_STATS*  psProcessStats = NULL;
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+		return;
+	}
+
+	_decrease_global_stat(eAllocType, uiBytes);
+
+	OSLockAcquire(g_psLinkedListLock);
+	if (psPVRSRVData)
+	{
+		if ((currentPid == psPVRSRVData->cleanupThreadPid) &&
+		    (currentCleanupPid != 0))
+		{
+			psProcessStats = _FindProcessStats(currentCleanupPid);
+		}
+		else
+		{
+			psProcessStats = _FindProcessStats(currentPid);
+		}
+	}
+	else
+	{
+		psProcessStats = _FindProcessStats(currentPid);
+	}
+
+
+	if (psProcessStats != NULL)
+	{
+		OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+		/*Release the list lock as soon as we acquire the process lock,
+		 * this ensures if the process is in deadlist the entry cannot be deleted or modified */
+		OSLockRelease(g_psLinkedListLock);
+		/* Update the memory watermarks... */
+		_DecreaseProcStatValue(eAllocType,
+		                       psProcessStats,
+		                       uiBytes);
+		OSLockRelease(psProcessStats->hLock);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+		/* If all stats are now zero, remove the entry for this thread */
+		if (psProcessStats->ui32StatAllocFlags == 0)
+		{
+			OSLockAcquire(g_psLinkedListLock);
+			_MoveProcessToDeadList(psProcessStats);
+			OSLockRelease(g_psLinkedListLock);
+
+			/* Check if the dead list needs to be reduced */
+			_CompressMemoryUsage();
+		}
+#endif
+	}else{
+		OSLockRelease(g_psLinkedListLock);
+	}
+}
+
+/* For now we do not want to expose the global stats API
+ * so we wrap it into this specific function for pooled pages.
+ * As soon as we need to modify the global stats directly somewhere else
+ * we want to replace these functions with more general ones.
+ */
+void
+PVRSRVStatsIncrMemAllocPoolStat(size_t uiBytes)
+{
+	_increase_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes);
+}
+
+void
+PVRSRVStatsDecrMemAllocPoolStat(size_t uiBytes)
+{
+	_decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes);
+}
+
+void
+PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders,
+									IMG_UINT32 ui32TotalNumOutOfMemory,
+									IMG_UINT32 ui32NumTAStores,
+									IMG_UINT32 ui32Num3DStores,
+									IMG_UINT32 ui32NumSHStores,
+									IMG_UINT32 ui32NumCDMStores,
+									IMG_PID pidOwner)
+{
+	IMG_PID	pidCurrent = pidOwner;
+
+	PVRSRV_PROCESS_STATS* psProcessStats;
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+		return;
+	}
+
+	/* Lock while we find the correct process and update the record... */
+	OSLockAcquire(g_psLinkedListLock);
+
+	psProcessStats = _FindProcessStats(pidCurrent);
+	if (psProcessStats != NULL)
+	{
+		OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_PRS]       += ui32TotalNumPartialRenders;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_OOMS]      += ui32TotalNumOutOfMemory;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES] += ui32NumTAStores;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES] += ui32Num3DStores;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_SH_STORES] += ui32NumSHStores;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES]+= ui32NumCDMStores;
+		OSLockRelease(psProcessStats->hLock);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING, "PVRSRVStatsUpdateRenderContextStats: Null process. Pid=%d", pidCurrent));
+	}
+
+	OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateRenderContextStats */
+
+void
+PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp,
+							   IMG_UINT32 ui32NumReqByFW,
+							   IMG_PID owner)
+{
+	IMG_PID				  currentPid = (owner==0)?OSGetCurrentClientProcessIDKM():owner;
+	PVRSRV_PROCESS_STATS* psProcessStats;
+
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+		return;
+	}
+
+	/* Lock while we find the correct process and update the record... */
+	OSLockAcquire(g_psLinkedListLock);
+
+	psProcessStats = _FindProcessStats(currentPid);
+	if (psProcessStats != NULL)
+	{
+		OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP] += ui32NumReqByApp;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW]  += ui32NumReqByFW;
+		OSLockRelease(psProcessStats->hLock);
+	}
+
+	OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateZSBufferStats */
+
+void
+PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp,
+							   IMG_UINT32 ui32NumGrowReqByFW,
+							   IMG_UINT32 ui32InitFLPages,
+							   IMG_UINT32 ui32NumHighPages,
+							   IMG_PID ownerPid)
+{
+	IMG_PID				  currentPid = (ownerPid!=0)?ownerPid:OSGetCurrentClientProcessIDKM();
+	PVRSRV_PROCESS_STATS* psProcessStats;
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+		return;
+	}
+
+	/* Lock while we find the correct process and update the record... */
+	OSLockAcquire(g_psLinkedListLock);
+
+	psProcessStats = _FindProcessStats(currentPid);
+
+	if (psProcessStats != NULL)
+	{
+
+		OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP] += ui32NumGrowReqByApp;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW]  += ui32NumGrowReqByFW;
+
+		UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT],
+				(IMG_INT32) ui32InitFLPages);
+
+		UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES],
+				(IMG_INT32) ui32NumHighPages);
+
+		OSLockRelease(psProcessStats->hLock);
+
+	}
+
+	OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateFreelistStats */
+
+
+#if defined (ENABLE_DEBUGFS_PIDS)
+
+void
+GenericStatsPrintElementsLive(void *pvFile,
+			      void *pvStatPtr,
+			      OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	PVRSRV_STAT_PV_DATA *psStatType = (PVRSRV_STAT_PV_DATA *)pvStatPtr;
+	PVRSRV_PROCESS_STATS* psProcessStats;
+
+	if (pfnOSStatsPrintf == NULL)	return;
+	PVR_ASSERT(psStatType->pfnStatsPrintElements != NULL);
+
+	pfnOSStatsPrintf(pvFile, "%s\n", psStatType->szLiveStatsHeaderStr);
+
+	OSLockAcquire(g_psLinkedListLock);
+
+	psProcessStats = g_psLiveList;
+
+	if(psProcessStats == NULL)
+	{
+		pfnOSStatsPrintf(pvFile, "No Stats to display\n%s\n", g_szSeparatorStr);
+	}
+	else
+	{
+		while(psProcessStats != NULL)
+		{
+			psStatType->pfnStatsPrintElements(pvFile, psProcessStats, pfnOSStatsPrintf);
+			psProcessStats = psProcessStats->psNext;
+			pfnOSStatsPrintf(pvFile, "%s\n", g_szSeparatorStr);
+		}
+	}
+	OSLockRelease(g_psLinkedListLock);
+}
+
+void
+GenericStatsPrintElementsRetired(void *pvFile,
+				 void *pvStatPtr,
+				 OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	PVRSRV_STAT_PV_DATA *psStatType = (PVRSRV_STAT_PV_DATA *)pvStatPtr;
+	PVRSRV_PROCESS_STATS* psProcessStats;
+
+	if (pfnOSStatsPrintf == NULL)	return;
+	PVR_ASSERT(psStatType->pfnStatsPrintElements != NULL);
+
+	pfnOSStatsPrintf(pvFile, "%s\n", psStatType->szRetiredStatsHeaderStr);
+
+	OSLockAcquire(g_psLinkedListLock);
+
+	psProcessStats = g_psDeadList;
+
+	if(psProcessStats == NULL)
+	{
+		pfnOSStatsPrintf(pvFile, "No Stats to display\n%s\n", g_szSeparatorStr);
+	}
+	else
+	{
+		while(psProcessStats != NULL)
+		{
+			psStatType->pfnStatsPrintElements(pvFile, psProcessStats, pfnOSStatsPrintf);
+			psProcessStats = psProcessStats->psNext;
+			pfnOSStatsPrintf(pvFile, "%s\n", g_szSeparatorStr);
+		}
+	}
+	OSLockRelease(g_psLinkedListLock);
+}
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+/*************************************************************************/ /*!
+@Function       ProcessStatsPrintElements
+@Description    Prints all elements for this process statistic record.
+@Input          pvStatPtr         Pointer to statistics structure.
+@Input          pfnOSStatsPrintf  Printf function to use for output.
+*/ /**************************************************************************/
+void
+ProcessStatsPrintElements(void *pvFile,
+			  PVRSRV_PROCESS_STATS* psProcessStats,
+			  OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	IMG_UINT32 ui32StatNumber;
+
+	OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+	pfnOSStatsPrintf(pvFile, "PID %u\n", psProcessStats->pid);
+
+	/* Loop through all the values and print them... */
+	for (ui32StatNumber = 0;
+	     ui32StatNumber < ARRAY_SIZE(pszProcessStatType);
+	     ui32StatNumber++)
+	{
+		if (OSStringCompare(pszProcessStatType[ui32StatNumber], "") != 0)
+		{
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+			if ((ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) ||
+			    (ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES))
+			{
+				/* get the stat from RI */
+				IMG_INT32 ui32Total = RITotalAllocProcessKM(psProcessStats->pid,
+									    (ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES)
+									    ? PHYS_HEAP_TYPE_LMA : PHYS_HEAP_TYPE_UMA);
+
+				pfnOSStatsPrintf(pvFile, "%-34s%10d %8dK\n",
+						 pszProcessStatType[ui32StatNumber], ui32Total, ui32Total>>10);
+			}
+			else
+#endif
+			{
+				if (ui32StatNumber >= PVRSRV_PROCESS_STAT_TYPE_KMALLOC &&
+					ui32StatNumber <= PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX)
+				{
+					pfnOSStatsPrintf(pvFile, "%-34s%10d %8dK\n",
+							 pszProcessStatType[ui32StatNumber],
+							 psProcessStats->i32StatValue[ui32StatNumber],
+							 psProcessStats->i32StatValue[ui32StatNumber] >> 10);
+				}
+				else
+				{
+					pfnOSStatsPrintf(pvFile, "%-34s%10d\n",
+							 pszProcessStatType[ui32StatNumber],
+							 psProcessStats->i32StatValue[ui32StatNumber]);
+				}
+			}
+		}
+	}
+
+	OSLockRelease(psProcessStats->hLock);
+} /* ProcessStatsPrintElements */
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+void
+PVRSRVStatsUpdateCacheOpStats(PVRSRV_CACHE_OP uiCacheOp,
+							IMG_UINT32 ui32OpSeqNum,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEV_PHYADDR sDevPAddr,
+							IMG_UINT32 eFenceOpType,
+#endif
+							IMG_DEVMEM_SIZE_T uiOffset,
+							IMG_DEVMEM_SIZE_T uiSize,
+							IMG_UINT64 ui64ExecuteTime,
+							IMG_BOOL bRangeBasedFlush,
+							IMG_BOOL bUserModeFlush,
+							IMG_BOOL bIsFence,
+							IMG_PID ownerPid)
+{
+	IMG_PID				  currentPid = (ownerPid!=0)?ownerPid:OSGetCurrentClientProcessIDKM();
+	PVRSRV_PROCESS_STATS* psProcessStats;
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+		return;
+	}
+
+	/* Lock while we find the correct process and update the record... */
+	OSLockAcquire(g_psLinkedListLock);
+
+	psProcessStats = _FindProcessStats(currentPid);
+
+	if (psProcessStats != NULL)
+	{
+		IMG_INT32 Idx;
+
+		OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+		/* Look-up next buffer write index */
+		Idx = psProcessStats->uiCacheOpWriteIndex;
+		psProcessStats->uiCacheOpWriteIndex = INCREMENT_CACHEOP_STAT_IDX_WRAP(Idx);
+
+		/* Store all CacheOp meta-data */
+		psProcessStats->asCacheOp[Idx].uiCacheOp = uiCacheOp;
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+		psProcessStats->asCacheOp[Idx].sDevVAddr = sDevVAddr;
+		psProcessStats->asCacheOp[Idx].sDevPAddr = sDevPAddr;
+		psProcessStats->asCacheOp[Idx].eFenceOpType = eFenceOpType;
+#endif
+		psProcessStats->asCacheOp[Idx].uiOffset = uiOffset;
+		psProcessStats->asCacheOp[Idx].uiSize = uiSize;
+		psProcessStats->asCacheOp[Idx].bRangeBasedFlush = bRangeBasedFlush;
+		psProcessStats->asCacheOp[Idx].bUserModeFlush = bUserModeFlush;
+		psProcessStats->asCacheOp[Idx].ui64ExecuteTime = ui64ExecuteTime;
+		psProcessStats->asCacheOp[Idx].ui32OpSeqNum = ui32OpSeqNum;
+		psProcessStats->asCacheOp[Idx].bIsFence = bIsFence;
+
+		OSLockRelease(psProcessStats->hLock);
+	}
+
+	OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateCacheOpStats */
+
+/*************************************************************************/ /*!
+@Function       CacheOpStatsPrintElements
+@Description    Prints all elements for this process statistic CacheOp record.
+@Input          pvStatPtr         Pointer to statistics structure.
+@Input          pfnOSStatsPrintf  Printf function to use for output.
+*/ /**************************************************************************/
+void
+CacheOpStatsPrintElements(void *pvFile,
+			  PVRSRV_PROCESS_STATS* psProcessStats,
+			  OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	IMG_CHAR  *pszCacheOpType, *pszFlushType, *pszFlushMode;
+	IMG_INT32 i32WriteIdx, i32ReadIdx;
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+	#define CACHEOP_RI_PRINTF_HEADER \
+		"%-10s %-10s %-5s %-16s %-16s %-10s %-10s %-12s %-12s\n"
+	#define CACHEOP_RI_PRINTF_FENCE	 \
+		"%-10s %-10s %-5s %-16s %-16s %-10s %-10s %-12llu 0x%-10x\n"
+	#define CACHEOP_RI_PRINTF		\
+		"%-10s %-10s %-5s 0x%-14llx 0x%-14llx 0x%-8llx 0x%-8llx %-12llu 0x%-10x\n"
+#else
+	#define CACHEOP_PRINTF_HEADER	\
+		"%-10s %-10s %-5s %-10s %-10s %-12s %-12s\n"
+	#define CACHEOP_PRINTF_FENCE	 \
+		"%-10s %-10s %-5s %-10s %-10s %-12llu 0x%-10x\n"
+	#define CACHEOP_PRINTF		 	\
+		"%-10s %-10s %-5s 0x%-8llx 0x%-8llx %-12llu 0x%-10x\n"
+#endif
+
+	pfnOSStatsPrintf(pvFile, "PID %u\n", psProcessStats->pid);
+
+	/* File header info */
+	pfnOSStatsPrintf(pvFile,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+					CACHEOP_RI_PRINTF_HEADER,
+#else
+					CACHEOP_PRINTF_HEADER,
+#endif
+					"CacheOp",
+					"Type",
+					"Mode",
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+					"DevVAddr",
+					"DevPAddr",
+#endif
+					"Offset",
+					"Size",
+					"Time (us)",
+					"SeqNo");
+
+	/* Take a snapshot of write index, read backwards in buffer
+	   and wrap round at boundary */
+	i32WriteIdx = psProcessStats->uiCacheOpWriteIndex;
+	for (i32ReadIdx = DECREMENT_CACHEOP_STAT_IDX_WRAP(i32WriteIdx);
+		 i32ReadIdx != i32WriteIdx;
+		 i32ReadIdx = DECREMENT_CACHEOP_STAT_IDX_WRAP(i32ReadIdx))
+	{
+		IMG_UINT64 ui64ExecuteTime;
+
+		if (! psProcessStats->asCacheOp[i32ReadIdx].ui32OpSeqNum)
+		{
+			break;
+		}
+
+		ui64ExecuteTime = psProcessStats->asCacheOp[i32ReadIdx].ui64ExecuteTime;
+
+		if (psProcessStats->asCacheOp[i32ReadIdx].bIsFence)
+		{
+			IMG_CHAR *pszFenceType = "";
+			pszCacheOpType = "Fence";
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+			switch (psProcessStats->asCacheOp[i32ReadIdx].eFenceOpType)
+			{
+				case RGXFWIF_DM_GP:
+					pszFenceType = "GP";
+					break;
+
+				case RGXFWIF_DM_TDM:
+					/* Also case RGXFWIF_DM_2D: */
+					pszFenceType = "TDM/2D";
+					break;
+
+				case RGXFWIF_DM_TA:
+					pszFenceType = "TA";
+					break;
+
+				case RGXFWIF_DM_3D:
+					pszFenceType = "3D";
+					break;
+
+				case RGXFWIF_DM_CDM:
+					pszFenceType = "CDM";
+					break;
+
+				default:
+					PVR_ASSERT(0);
+					break;
+			}
+#endif
+
+			pfnOSStatsPrintf(pvFile,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+							CACHEOP_RI_PRINTF_FENCE,
+#else
+							CACHEOP_PRINTF_FENCE,
+#endif
+							pszCacheOpType,
+							pszFenceType,
+							"",
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+							"",
+							"",
+#endif
+							"",
+							"",
+							ui64ExecuteTime,
+							psProcessStats->asCacheOp[i32ReadIdx].ui32OpSeqNum);
+		}
+		else
+		{
+			if (psProcessStats->asCacheOp[i32ReadIdx].bRangeBasedFlush)
+			{
+				IMG_DEVMEM_SIZE_T ui64NumOfPages;
+
+				ui64NumOfPages = psProcessStats->asCacheOp[i32ReadIdx].uiSize >> OSGetPageShift();
+				if (ui64NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC)
+				{
+					pszFlushType = "RBF.Fast";
+				}
+				else
+				{
+					pszFlushType = "RBF.Slow";
+				}
+			}
+			else
+			{
+				pszFlushType = "GF";
+			}
+
+			if (psProcessStats->asCacheOp[i32ReadIdx].bUserModeFlush)
+			{
+				pszFlushMode = "UM";
+			}
+			else
+			{
+				pszFlushMode = "KM";
+			}
+
+			switch (psProcessStats->asCacheOp[i32ReadIdx].uiCacheOp)
+			{
+				case PVRSRV_CACHE_OP_NONE:
+					pszCacheOpType = "None";
+					break;
+				case PVRSRV_CACHE_OP_CLEAN:
+					pszCacheOpType = "Clean";
+					break;
+				case PVRSRV_CACHE_OP_INVALIDATE:
+					pszCacheOpType = "Invalidate";
+					break;
+				case PVRSRV_CACHE_OP_FLUSH:
+					pszCacheOpType = "Flush";
+					break;
+				default:
+					pszCacheOpType = "Unknown";
+					break;
+			}
+
+			pfnOSStatsPrintf(pvFile,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+							CACHEOP_RI_PRINTF,
+#else
+							CACHEOP_PRINTF,
+#endif
+							pszCacheOpType,
+							pszFlushType,
+							pszFlushMode,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+							psProcessStats->asCacheOp[i32ReadIdx].sDevVAddr.uiAddr,
+							psProcessStats->asCacheOp[i32ReadIdx].sDevPAddr.uiAddr,
+#endif
+							psProcessStats->asCacheOp[i32ReadIdx].uiOffset,
+							psProcessStats->asCacheOp[i32ReadIdx].uiSize,
+							ui64ExecuteTime,
+							psProcessStats->asCacheOp[i32ReadIdx].ui32OpSeqNum);
+		}
+	}
+} /* CacheOpStatsPrintElements */
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+/*************************************************************************/ /*!
+@Function       MemStatsPrintElements
+@Description    Prints all elements for the memory statistic record.
+@Input          pvStatPtr         Pointer to statistics structure.
+@Input          pfnOSStatsPrintf  Printf function to use for output.
+*/ /**************************************************************************/
+void
+MemStatsPrintElements(void *pvFile,
+                      PVRSRV_PROCESS_STATS* psProcessStats,
+                      OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	IMG_UINT32	ui32VAddrFields = sizeof(void*)/sizeof(IMG_UINT32);
+	IMG_UINT32	ui32PAddrFields = sizeof(IMG_CPU_PHYADDR)/sizeof(IMG_UINT32);
+	PVRSRV_MEM_ALLOC_REC *psRecord;
+	IMG_UINT32 ui32ItemNumber;
+
+	/* Write the header... */
+	pfnOSStatsPrintf(pvFile, "PID    ");
+
+	pfnOSStatsPrintf(pvFile, "Type                VAddress");
+	for (ui32ItemNumber = 1;  ui32ItemNumber < ui32VAddrFields;  ui32ItemNumber++)
+	{
+		pfnOSStatsPrintf(pvFile, "        ");
+	}
+
+	pfnOSStatsPrintf(pvFile, "  PAddress");
+	for (ui32ItemNumber = 1;  ui32ItemNumber < ui32PAddrFields;  ui32ItemNumber++)
+	{
+        pfnOSStatsPrintf(pvFile, "        ");
+	}
+
+    pfnOSStatsPrintf(pvFile, "  Size(bytes)\n");
+
+	psRecord = psProcessStats->psMemoryRecords;
+	if (psRecord == NULL)
+	{
+		pfnOSStatsPrintf(pvFile, "%-5d\n", psProcessStats->pid);
+	}
+
+	while (psRecord != NULL)
+	{
+		IMG_BOOL bPrintStat = IMG_TRUE;
+
+		pfnOSStatsPrintf(pvFile, "%-5d  ", psProcessStats->pid);
+
+		switch (psRecord->eAllocType)
+		{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:      		pfnOSStatsPrintf(pvFile, "KMALLOC             "); break;
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:      		pfnOSStatsPrintf(pvFile, "VMALLOC             "); break;
+#else
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:      		bPrintStat = IMG_FALSE; break;
+#endif
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:  pfnOSStatsPrintf(pvFile, "ALLOC_PAGES_PT_LMA  "); break;
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:  pfnOSStatsPrintf(pvFile, "ALLOC_PAGES_PT_UMA  "); break;
+		case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:		pfnOSStatsPrintf(pvFile, "IOREMAP_PT_LMA      "); break;
+		case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:			pfnOSStatsPrintf(pvFile, "VMAP_PT_UMA         "); break;
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: 	pfnOSStatsPrintf(pvFile, "ALLOC_LMA_PAGES     "); break;
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: 	pfnOSStatsPrintf(pvFile, "ALLOC_UMA_PAGES     "); break;
+		case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: 	pfnOSStatsPrintf(pvFile, "MAP_UMA_LMA_PAGES   "); break;
+		default:										pfnOSStatsPrintf(pvFile, "INVALID             "); break;
+		}
+
+		if (bPrintStat)
+		{
+			for (ui32ItemNumber = 0; ui32ItemNumber < ui32VAddrFields; ui32ItemNumber++)
+			{
+				pfnOSStatsPrintf(pvFile, "%08x", *(((IMG_UINT32*) &psRecord->pvCpuVAddr) + ui32VAddrFields - ui32ItemNumber - 1));
+			}
+			pfnOSStatsPrintf(pvFile, "  ");
+
+			for (ui32ItemNumber = 0; ui32ItemNumber < ui32PAddrFields; ui32ItemNumber++)
+			{
+				pfnOSStatsPrintf(pvFile, "%08x", *(((IMG_UINT32*) &psRecord->sCpuPAddr.uiAddr) + ui32PAddrFields - ui32ItemNumber - 1));
+			}
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+			pfnOSStatsPrintf(pvFile, "  %u", psRecord->uiBytes);
+
+			pfnOSStatsPrintf(pvFile, "  %s", (IMG_CHAR*)psRecord->pvAllocdFromFile);
+
+			pfnOSStatsPrintf(pvFile, "  %d\n", psRecord->ui32AllocdFromLine);
+#else
+			pfnOSStatsPrintf(pvFile, "  %u\n", psRecord->uiBytes);
+#endif
+		}
+		/* Move to next record... */
+		psRecord = psRecord->psNext;
+	}
+} /* MemStatsPrintElements */
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+/*************************************************************************/ /*!
+@Function       RIMemStatsPrintElements
+@Description    Prints all elements for the RI Memory record.
+@Input          pvStatPtr         Pointer to statistics structure.
+@Input          pfnOSStatsPrintf  Printf function to use for output.
+*/ /**************************************************************************/
+void
+RIMemStatsPrintElements(void *pvFile,
+                        PVRSRV_PROCESS_STATS* psProcessStats,
+                        OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	IMG_CHAR   *pszStatFmtText  = NULL;
+	IMG_HANDLE *pRIHandle       = NULL;
+
+	/* Acquire RI lock*/
+	RILockAcquireKM();
+
+	/*
+	 *  Loop through the RI system to get each line of text.
+	 */
+	while (RIGetListEntryKM(psProcessStats->pid,
+							&pRIHandle,
+							&pszStatFmtText))
+	{
+		pfnOSStatsPrintf(pvFile, "%s", pszStatFmtText);
+	}
+
+	/* Release RI lock*/
+	RILockReleaseKM();
+
+} /* RIMemStatsPrintElements */
+#endif
+
+#endif
+
+static IMG_UINT32	ui32FirmwareStartTimestamp;
+static IMG_UINT64	ui64FirmwareIdleDuration;
+
+void SetFirmwareStartTime(IMG_UINT32 ui32Time)
+{
+	ui32FirmwareStartTimestamp = UPDATE_TIME(ui32FirmwareStartTimestamp, ui32Time);
+}
+
+void SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration)
+{
+	ui64FirmwareIdleDuration = UPDATE_TIME(ui64FirmwareIdleDuration, ui64Duration);
+}
+
+static INLINE void PowerStatsPrintGroup(IMG_UINT32 *pui32Stats,
+                                        void *pvFile,
+                                        OS_STATS_PRINTF_FUNC *pfnPrintf,
+                                        PVRSRV_POWER_STAT_TYPE eForced,
+                                        PVRSRV_POWER_STAT_TYPE ePowerOn)
+{
+	IMG_UINT32 ui32Index;
+
+	ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, DEVICE);
+	pfnPrintf(pvFile, "  Pre-Device:  %9u\n", pui32Stats[ui32Index]);
+
+	ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, SYSTEM);
+	pfnPrintf(pvFile, "  Pre-System:  %9u\n", pui32Stats[ui32Index]);
+
+	ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, SYSTEM);
+	pfnPrintf(pvFile, "  Post-System: %9u\n", pui32Stats[ui32Index]);
+
+	ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, DEVICE);
+	pfnPrintf(pvFile, "  Post-Device: %9u\n", pui32Stats[ui32Index]);
+}
+
+void PowerStatsPrintElements(void *pvFile,
+							 void *pvStatPtr,
+							 OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	IMG_UINT32 *pui32Stats = &aui32PowerTimingStats[0];
+	IMG_UINT32 ui32Idx;
+
+	PVR_UNREFERENCED_PARAMETER(pvStatPtr);
+
+	if (pfnOSStatsPrintf == NULL)
+	{
+		return;
+	}
+
+	pfnOSStatsPrintf(pvFile, "Forced Power-on Transition (nanoseconds):\n");
+	PowerStatsPrintGroup(pui32Stats, pvFile, pfnOSStatsPrintf, FORCED, POWER_ON);
+	pfnOSStatsPrintf(pvFile, "\n");
+
+	pfnOSStatsPrintf(pvFile, "Forced Power-off Transition (nanoseconds):\n");
+	PowerStatsPrintGroup(pui32Stats, pvFile, pfnOSStatsPrintf, FORCED, POWER_OFF);
+	pfnOSStatsPrintf(pvFile, "\n");
+
+	pfnOSStatsPrintf(pvFile, "Not Forced Power-on Transition (nanoseconds):\n");
+	PowerStatsPrintGroup(pui32Stats, pvFile, pfnOSStatsPrintf, NOT_FORCED, POWER_ON);
+	pfnOSStatsPrintf(pvFile, "\n");
+
+	pfnOSStatsPrintf(pvFile, "Not Forced Power-off Transition (nanoseconds):\n");
+	PowerStatsPrintGroup(pui32Stats, pvFile, pfnOSStatsPrintf, NOT_FORCED, POWER_OFF);
+	pfnOSStatsPrintf(pvFile, "\n");
+
+
+	pfnOSStatsPrintf(pvFile, "FW bootup time (timer ticks): %u\n", ui32FirmwareStartTimestamp);
+	pfnOSStatsPrintf(pvFile, "Host Acknowledge Time for FW Idle Signal (timer ticks): %u\n", (IMG_UINT32)(ui64FirmwareIdleDuration));
+	pfnOSStatsPrintf(pvFile, "\n");
+
+	pfnOSStatsPrintf(pvFile, "Last %d Clock Speed Change Timers (nanoseconds):\n", NUM_EXTRA_POWER_STATS);
+	pfnOSStatsPrintf(pvFile, "Prepare DVFS\tDVFS Change\tPost DVFS\n");
+
+	for (ui32Idx = ui32ClockSpeedIndexStart; ui32Idx !=ui32ClockSpeedIndexEnd; ui32Idx = (ui32Idx + 1) % NUM_EXTRA_POWER_STATS)
+	{
+		pfnOSStatsPrintf(pvFile, "%12llu\t%11llu\t%9llu\n",asClockSpeedChanges[ui32Idx].ui64PreClockSpeedChangeDuration,
+						 asClockSpeedChanges[ui32Idx].ui64BetweenPreEndingAndPostStartingDuration,
+						 asClockSpeedChanges[ui32Idx].ui64PostClockSpeedChangeDuration);
+	}
+
+
+} /* PowerStatsPrintElements */
+
+void GlobalStatsPrintElements(void *pvFile,
+			      void *pvStatPtr,
+			      OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf)
+{
+	PVR_UNREFERENCED_PARAMETER(pvStatPtr);
+
+	if (pfnOSGetStatsPrintf != NULL)
+	{
+		IMG_UINT32 ui32StatNumber;
+
+		OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
+
+		for (ui32StatNumber = 0;
+		     ui32StatNumber < ARRAY_SIZE(pszDriverStatType);
+		     ui32StatNumber++)
+		{
+			if (OSStringCompare(pszDriverStatType[ui32StatNumber], "") != 0)
+			{
+				pfnOSGetStatsPrintf(pvFile, "%-34s%10d\n",
+					    pszDriverStatType[ui32StatNumber],
+					    GET_GLOBAL_STAT_VALUE(ui32StatNumber));
+			}
+		}
+
+		OSLockRelease(gsGlobalStats.hGlobalStatsLock);
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       PVRSRVFindProcessMemStats
+@Description    Using the provided PID find memory stats for that process.
+                Memstats will be provided for live/connected processes only.
+                Memstat values provided by this API relate only to the physical
+                memory allocated by the process and does not relate to any of
+                the mapped or imported memory.
+@Input          pid                 Process to search for.
+@Input          ArraySize           Size of the array where memstat
+                                    records will be stored
+@Input          bAllProcessStats    Flag to denote if stats for
+                                    individual process are requested
+                                    stats for all processes are
+                                    requested
+@Input          MemoryStats         Handle to the memory where memstats
+                                    are stored.
+@Output         Memory statistics records for the requested pid.
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemoryStats)
+{
+	IMG_INT i;
+	PVRSRV_PROCESS_STATS* psProcessStats;
+
+	PVR_LOGR_IF_FALSE(pui32MemoryStats != NULL,
+	                  "pui32MemoryStats is NULL",
+	                  PVRSRV_ERROR_INVALID_PARAMS);
+
+	if (bAllProcessStats)
+	{
+		PVR_LOGR_IF_FALSE(ui32ArrSize == PVRSRV_DRIVER_STAT_TYPE_COUNT,
+				  "MemStats array size is incorrect",
+				  PVRSRV_ERROR_INVALID_PARAMS);
+
+		OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
+
+		for (i = 0; i < ui32ArrSize; i++)
+		{
+			pui32MemoryStats[i] = GET_GLOBAL_STAT_VALUE(i);
+		}
+
+		OSLockRelease(gsGlobalStats.hGlobalStatsLock);
+
+		return PVRSRV_OK;
+	}
+
+	PVR_LOGR_IF_FALSE(ui32ArrSize == PVRSRV_PROCESS_STAT_TYPE_COUNT,
+			  "MemStats array size is incorrect",
+			  PVRSRV_ERROR_INVALID_PARAMS);
+
+	OSLockAcquire(g_psLinkedListLock);
+
+	/* Search for the given PID in the Live List */
+	psProcessStats = _FindProcessStatsInLiveList(pid);
+
+	if (psProcessStats == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Process %d not found. This process may not be live anymore.", (IMG_INT)pid));
+		OSLockRelease(g_psLinkedListLock);
+
+		return PVRSRV_ERROR_PROCESS_NOT_FOUND;
+	}
+
+	OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+	for (i = 0; i < ui32ArrSize; i++)
+	{
+		pui32MemoryStats[i] = psProcessStats->i32StatValue[i];
+	}
+	OSLockRelease(psProcessStats->hLock);
+
+	OSLockRelease(g_psLinkedListLock);
+
+	return PVRSRV_OK;
+
+} /* PVRSRVFindProcessMemStats */
+
+/*************************************************************************/ /*!
+@Function       PVRSRVGetProcessMemUsage
+@Description    Calculate allocated kernel and graphics memory for all live/connected processes.
+                Memstat values provided by this API relate only to the physical
+                memory allocated by the process and does not relate to any of
+				the mapped or imported memory.
+@Output			pui32TotalMem					Total memory usage for all live PIDs connected to the driver.
+@Output         pui32NumberOfLivePids 			Number of live pids currently connected to the server.
+@Output         ppsPerProcessMemUsageData   	Handle to an array of PVRSRV_PER_PROCESS_MEM_USAGE, number of elements defined by pui32NumberOfLivePids.
+@Return        	PVRSRV_OK						Success
+				PVRSRV_ERROR_PROCESS_NOT_FOUND	No live processes.
+				PVRSRV_ERROR_OUT_OF_MEMORY 		Failed to allocate memory for ppsPerProcessMemUsageData.
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT32 *pui32TotalMem,
+									  IMG_UINT32 *pui32NumberOfLivePids,
+									  PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsageData)
+{
+	IMG_UINT32 ui32Counter = 0;
+	IMG_UINT32 ui32NumberOfLivePids = 0;
+	PVRSRV_ERROR eError = PVRSRV_ERROR_PROCESS_NOT_FOUND;
+	PVRSRV_PROCESS_STATS* psProcessStats = NULL;
+	PVRSRV_PER_PROCESS_MEM_USAGE* psPerProcessMemUsageData = NULL;
+
+	OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
+
+	*pui32TotalMem = GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_KMALLOC) +
+		GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_VMALLOC) +
+		GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA) +
+		GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA) +
+		GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA) +
+		GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA);
+
+	OSLockRelease(gsGlobalStats.hGlobalStatsLock);
+
+	OSLockAcquire(g_psLinkedListLock);
+	psProcessStats = g_psLiveList;
+
+	while (psProcessStats != NULL)
+	{
+		psProcessStats = psProcessStats->psNext;
+		ui32NumberOfLivePids++;
+	}
+
+	if (ui32NumberOfLivePids > 0)
+	{
+		/* Use OSAllocZMemNoStats to prevent deadlock. */
+		psPerProcessMemUsageData = OSAllocZMemNoStats(ui32NumberOfLivePids * sizeof(*psPerProcessMemUsageData));
+
+		if (psPerProcessMemUsageData)
+		{
+			psProcessStats = g_psLiveList;
+
+			while (psProcessStats != NULL)
+			{
+				OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+				psPerProcessMemUsageData[ui32Counter].ui32Pid = (IMG_UINT32)psProcessStats->pid;
+
+				psPerProcessMemUsageData[ui32Counter].ui32KernelMemUsage = psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] +
+				psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC];
+
+				psPerProcessMemUsageData[ui32Counter].ui32GraphicsMemUsage = psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] +
+				psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] +
+				psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] +
+				psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES];
+
+				OSLockRelease(psProcessStats->hLock);
+				psProcessStats = psProcessStats->psNext;
+				ui32Counter++;
+			}
+			eError = PVRSRV_OK;
+		}
+		else
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+	}
+
+	OSLockRelease(g_psLinkedListLock);
+	*pui32NumberOfLivePids = ui32NumberOfLivePids;
+	*ppsPerProcessMemUsageData = psPerProcessMemUsageData;
+
+	return eError;
+
+} /* PVRSRVGetProcessMemUsage */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/process_stats.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/process_stats.h
new file mode 100644
index 0000000..3441efd
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/process_stats.h
@@ -0,0 +1,224 @@
+/*************************************************************************/ /*!
+@File
+@Title          Functions for creating and reading proc filesystem entries.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PROCESS_STATS_H__
+#define __PROCESS_STATS_H__
+
+#include <powervr/mem_types.h>
+
+#include "pvrsrv_error.h"
+#include "cache_ops.h"
+
+/*
+ *  The publishing of Process Stats is controlled by the
+ *  PVRSRV_ENABLE_PROCESS_STATS build option. The recording of all Memory
+ *  allocations is controlled by the PVRSRV_ENABLE_MEMORY_STATS build option.
+ *
+ *  Note: There will be a performance degradation with memory allocation
+ *        recording enabled!
+ */
+
+
+/*
+ *  Memory types which can be tracked...
+ */
+typedef enum {
+    PVRSRV_MEM_ALLOC_TYPE_KMALLOC,				/* memory allocated by kmalloc() */
+    PVRSRV_MEM_ALLOC_TYPE_VMALLOC,				/* memory allocated by vmalloc() */
+    PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,	/* pages allocated from UMA to hold page table information */
+    PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA,			/* ALLOC_PAGES_PT_UMA mapped to kernel address space */
+    PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,	/* pages allocated from LMA to hold page table information */
+    PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA,		/* ALLOC_PAGES_PT_LMA mapped to kernel address space */
+    PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,		/* pages allocated from LMA */
+    PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,		/* pages allocated from UMA */
+    PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,	/* mapped UMA/LMA pages */
+    PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES,		/* pages in the page pool */
+
+	/* Must be the last enum...*/
+    PVRSRV_MEM_ALLOC_TYPE_COUNT
+} PVRSRV_MEM_ALLOC_TYPE;
+
+
+/*
+ * Functions for managing the processes recorded...
+ */
+PVRSRV_ERROR  PVRSRVStatsInitialise(void);
+
+void  PVRSRVStatsDestroy(void);
+
+PVRSRV_ERROR  PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats);
+
+void  PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats);
+
+#define MAX_POWER_STAT_ENTRIES		51
+
+/*
+ * Functions for recording the statistics...
+ */
+void  PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+								   void *pvCpuVAddr,
+								   IMG_CPU_PHYADDR sCpuPAddr,
+								   size_t uiBytes,
+								   void *pvPrivateData,
+								   IMG_PID uiPid);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+void  _PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+									void *pvCpuVAddr,
+									IMG_CPU_PHYADDR sCpuPAddr,
+									size_t uiBytes,
+									void *pvPrivateData,
+									IMG_PID uiPid,
+									void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine);
+#endif
+void  PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+									  IMG_UINT64 ui64Key,
+									  IMG_PID uiPid);
+
+void PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+								 size_t uiBytes,
+								 IMG_PID uiPid);
+
+/*
+ * Increases the memory stat for eAllocType. Tracks the allocation size value
+ * by inserting a value into a hash table with uiCpuVAddr as key.
+ * Pair with PVRSRVStatsDecrMemAllocStatAndUntrack().
+ */
+void PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+										 size_t uiBytes,
+										 IMG_UINT64 uiCpuVAddr,
+										 IMG_PID uiPid);
+
+void PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+								 size_t uiBytes,
+								 IMG_PID uiPid);
+
+void PVRSRVStatsDecrMemKAllocStat(size_t uiBytes,
+								  IMG_PID decrPID);
+
+/*
+ * Decrease the memory stat for eAllocType. Takes the allocation size value from the
+ * hash table with uiCpuVAddr as key. Pair with PVRSRVStatsIncrMemAllocStatAndTrack().
+ */
+void PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+        							IMG_UINT64 uiCpuVAddr);
+
+void
+PVRSRVStatsIncrMemAllocPoolStat(size_t uiBytes);
+
+void
+PVRSRVStatsDecrMemAllocPoolStat(size_t uiBytes);
+
+void  PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders,
+										  IMG_UINT32 ui32TotalNumOutOfMemory,
+										  IMG_UINT32 ui32TotalTAStores,
+										  IMG_UINT32 ui32Total3DStores,
+										  IMG_UINT32 ui32TotalSHStores,
+										  IMG_UINT32 ui32TotalCDMStores,
+										  IMG_PID owner);
+
+void  PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp,
+									 IMG_UINT32 ui32NumReqByFW,
+									 IMG_PID owner);
+
+void  PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp,
+									 IMG_UINT32 ui32NumGrowReqByFW,
+									 IMG_UINT32 ui32InitFLPages,
+									 IMG_UINT32 ui32NumHighPages,
+									 IMG_PID	ownerPid);
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+void  PVRSRVStatsUpdateCacheOpStats(PVRSRV_CACHE_OP uiCacheOp,
+									IMG_UINT32 ui32OpSeqNum,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)  && defined(DEBUG)
+									IMG_DEV_VIRTADDR sDevVAddr,
+									IMG_DEV_PHYADDR sDevPAddr,
+									IMG_UINT32 eFenceOpType,
+#endif
+									IMG_DEVMEM_SIZE_T uiOffset,
+									IMG_DEVMEM_SIZE_T uiSize,
+									IMG_UINT64 ui64ExecuteTimeMs,
+									IMG_BOOL bRangeBasedFlush,
+									IMG_BOOL bUserModeFlush,
+									IMG_BOOL bIsFence,
+									IMG_PID ownerPid);
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+/* Update pre/post power transition timing statistics */
+void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
+                              IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
+                              IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower);
+
+void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer);
+void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer);
+#else
+/* Update pre/post power transition timing statistics */
+static inline
+void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
+                              IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
+                              IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower) {}
+static inline
+void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer) {}
+
+static inline
+void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer) {}
+#endif
+
+void SetFirmwareStartTime(IMG_UINT32 ui32TimeStamp);
+
+void SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration);
+
+/* Functions used for calculating the memory usage statistics of a process */
+PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, IMG_UINT32 ui32ArrSize,
+                                       IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemoryStats);
+
+typedef struct {
+	IMG_UINT32 ui32Pid;
+	IMG_UINT32 ui32KernelMemUsage;
+	IMG_UINT32 ui32GraphicsMemUsage;
+} PVRSRV_PER_PROCESS_MEM_USAGE;
+
+PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT32 *pui32TotalMem,
+									  IMG_UINT32 *pui32NumberOfLivePids,
+									  PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsageData);
+
+#endif /* __PROCESS_STATS_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_bridge.h
new file mode 100644
index 0000000..b83af91
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_bridge.h
@@ -0,0 +1,449 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Bridge Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the PVR Bridge code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_BRIDGE_H
+#define PVR_BRIDGE_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "pvrsrv_error.h"
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "common_dc_bridge.h"
+#  if defined(SUPPORT_DCPLAT_BRIDGE)
+#    include "common_dcplat_bridge.h"
+#  endif
+#endif
+#include "common_mm_bridge.h"
+#if defined(SUPPORT_MMPLAT_BRIDGE)
+#include "common_mmplat_bridge.h"
+#endif
+#if defined(SUPPORT_WRAP_EXTMEM)
+#include "common_mmextmem_bridge.h"
+#endif
+#if !defined(EXCLUDE_CMM_BRIDGE)
+#include "common_cmm_bridge.h"
+#endif
+#if defined(LINUX)
+#include "common_dmabuf_bridge.h"
+#endif
+#if defined(PDUMP)
+#include "common_pdump_bridge.h"
+#include "common_pdumpctrl_bridge.h"
+#include "common_pdumpmm_bridge.h"
+#endif
+#include "common_cache_bridge.h"
+#include "common_srvcore_bridge.h"
+#include "common_sync_bridge.h"
+#if defined(SUPPORT_SERVER_SYNC)
+#if defined(SUPPORT_INSECURE_EXPORT)
+#include "common_syncexport_bridge.h"
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "common_syncsexport_bridge.h"
+#endif
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "common_smm_bridge.h"
+#endif
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+#include "common_htbuffer_bridge.h"
+#endif
+#include "common_pvrtl_bridge.h"
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+#include "common_ri_bridge.h"
+#endif
+
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+#include "common_validation_bridge.h"
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+#include "common_tutils_bridge.h"
+#endif
+
+#include "common_devicememhistory_bridge.h"
+#include "common_synctracking_bridge.h"
+
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#include "common_syncfallback_bridge.h"
+#endif
+
+/*
+ * Bridge Cmd Ids
+ */
+
+
+/* Note: The pattern
+ *   #define PVRSRV_BRIDGE_FEATURE (PVRSRV_BRIDGE_PREVFEATURE + 1)
+ *   #if defined(SUPPORT_FEATURE)
+ *   #define PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST (PVRSRV_BRIDGE_PREVFEATURE_DISPATCH_LAST + 1)
+ *   #define PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST  (PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST + PVRSRV_BRIDGE_FEATURE_CMD_LAST)
+ *   #else
+ *   #define PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST 0
+ *   #define PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST  (PVRSRV_BRIDGE_PREVFEATURE_DISPATCH_LAST)
+ *   #endif
+ * is used in the macro definitions below to make PVRSRV_BRIDGE_FEATURE_*
+ * take up no space in the dispatch table if SUPPORT_FEATURE is disabled.
+ *
+ * Note however that a bridge always defines PVRSRV_BRIDGE_FEATURE, even where
+ * the feature is not enabled (each bridge group retains its own ioctl number).
+ */
+
+#define PVRSRV_BRIDGE_FIRST					0UL
+
+/*   0: Default handler */
+#define PVRSRV_BRIDGE_DEFAULT				0UL
+#define PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST 0UL
+#define PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST  (PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST)
+/*   1: CORE functions */
+#define PVRSRV_BRIDGE_SRVCORE				1UL
+#define PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST (PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST+1)
+#define PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST  (PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST + PVRSRV_BRIDGE_SRVCORE_CMD_LAST)
+
+/*   2: SYNC functions */
+#define PVRSRV_BRIDGE_SYNC					2UL
+#define PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST (PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNC_DISPATCH_LAST  (PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNC_CMD_LAST)
+
+/*   3: SYNCEXPORT functions */
+#define PVRSRV_BRIDGE_SYNCEXPORT			3UL
+#if defined(SUPPORT_INSECURE_EXPORT) && defined(SUPPORT_SERVER_SYNC)
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNC_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST  (PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCEXPORT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST  (PVRSRV_BRIDGE_SYNC_DISPATCH_LAST)
+#endif
+
+/*   4: SYNCSEXPORT functions */
+#define PVRSRV_BRIDGE_SYNCSEXPORT			4UL
+#if defined(SUPPORT_SECURE_EXPORT) && defined(SUPPORT_SERVER_SYNC)
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST  (PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCSEXPORT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST  (PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST)
+#endif
+
+/*   5: PDUMP CTRL layer functions */
+#define PVRSRV_BRIDGE_PDUMPCTRL				5UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST  (PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMPCTRL_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST  (PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST)
+#endif
+
+/*   6: Memory Management functions */
+#define PVRSRV_BRIDGE_MM					6UL
+#define PVRSRV_BRIDGE_MM_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_MM_DISPATCH_LAST  (PVRSRV_BRIDGE_MM_DISPATCH_FIRST + PVRSRV_BRIDGE_MM_CMD_LAST)
+
+/*   7: Non-Linux Memory Management functions */
+#define PVRSRV_BRIDGE_MMPLAT				7UL
+#if defined(SUPPORT_MMPLAT_BRIDGE)
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST (PVRSRV_BRIDGE_MM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST  (PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST + PVRSRV_BRIDGE_MMPLAT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST  (PVRSRV_BRIDGE_MM_DISPATCH_LAST)
+#endif
+
+/*   8: Context Memory Management functions */
+#define PVRSRV_BRIDGE_CMM					8UL
+#if !defined(EXCLUDE_CMM_BRIDGE)
+#define PVRSRV_BRIDGE_CMM_DISPATCH_FIRST (PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_CMM_DISPATCH_LAST  (PVRSRV_BRIDGE_CMM_DISPATCH_FIRST + PVRSRV_BRIDGE_CMM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_CMM_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_CMM_DISPATCH_LAST  (PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST)
+#endif
+
+/*   9: PDUMP Memory Management functions */
+#define PVRSRV_BRIDGE_PDUMPMM				9UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST (PVRSRV_BRIDGE_CMM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST  (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMPMM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST  (PVRSRV_BRIDGE_CMM_DISPATCH_LAST)
+#endif
+
+/*   10: PDUMP functions */
+#define PVRSRV_BRIDGE_PDUMP					10UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST  (PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMP_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST  (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST)
+#endif
+
+/*  11: DMABUF functions */
+#define PVRSRV_BRIDGE_DMABUF				11UL
+#if defined(LINUX)
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST  (PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST + PVRSRV_BRIDGE_DMABUF_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST  (PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST)
+#endif
+
+/*  12: Display Class functions */
+#define PVRSRV_BRIDGE_DC					12UL
+#if defined(SUPPORT_DISPLAY_CLASS)
+#define PVRSRV_BRIDGE_DC_DISPATCH_FIRST (PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DC_DISPATCH_LAST  (PVRSRV_BRIDGE_DC_DISPATCH_FIRST + PVRSRV_BRIDGE_DC_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DC_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DC_DISPATCH_LAST  (PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST)
+#endif
+
+/*  13: Cache interface functions */
+#define PVRSRV_BRIDGE_CACHE					13UL
+#define PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST (PVRSRV_BRIDGE_DC_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_CACHE_DISPATCH_LAST  (PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST + PVRSRV_BRIDGE_CACHE_CMD_LAST)
+
+/*  14: Secure Memory Management functions */
+#define PVRSRV_BRIDGE_SMM					14UL
+#if defined(SUPPORT_SECURE_EXPORT)
+#define PVRSRV_BRIDGE_SMM_DISPATCH_FIRST (PVRSRV_BRIDGE_CACHE_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SMM_DISPATCH_LAST  (PVRSRV_BRIDGE_SMM_DISPATCH_FIRST + PVRSRV_BRIDGE_SMM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SMM_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SMM_DISPATCH_LAST  (PVRSRV_BRIDGE_CACHE_DISPATCH_LAST)
+#endif
+
+/*  15: Transport Layer interface functions */
+#define PVRSRV_BRIDGE_PVRTL					15UL
+#define PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST (PVRSRV_BRIDGE_SMM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST  (PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST + PVRSRV_BRIDGE_PVRTL_CMD_LAST)
+
+/*  16: Resource Information (RI) interface functions */
+#define PVRSRV_BRIDGE_RI					16UL
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+#define PVRSRV_BRIDGE_RI_DISPATCH_FIRST (PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RI_DISPATCH_LAST  (PVRSRV_BRIDGE_RI_DISPATCH_FIRST + PVRSRV_BRIDGE_RI_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_RI_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_RI_DISPATCH_LAST  (PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST)
+#endif
+
+/*  17: Validation interface functions */
+#define PVRSRV_BRIDGE_VALIDATION			17UL
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST (PVRSRV_BRIDGE_RI_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST  (PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST + PVRSRV_BRIDGE_VALIDATION_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST  (PVRSRV_BRIDGE_RI_DISPATCH_LAST)
+#endif
+
+/*  18: TUTILS interface functions */
+#define PVRSRV_BRIDGE_TUTILS				18UL
+#if defined(PVR_TESTING_UTILS)
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST  (PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST + PVRSRV_BRIDGE_TUTILS_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST  (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST)
+#endif
+
+/*  19: DevMem history interface functions */
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY		19UL
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST (PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST  (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST + PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST)
+
+/*  20: Host Trace Buffer interface functions */
+#define PVRSRV_BRIDGE_HTBUFFER				20UL
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST  (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST + PVRSRV_BRIDGE_HTBUFFER_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST  (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST)
+#endif
+
+/*  21: Non-Linux Display functions */
+#define PVRSRV_BRIDGE_DCPLAT				21UL
+#if defined(SUPPORT_DISPLAY_CLASS) && defined (SUPPORT_DCPLAT_BRIDGE)
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST  (PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST + PVRSRV_BRIDGE_DCPLAT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST  (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST)
+#endif
+
+/*  22: Extmem functions */
+#define PVRSRV_BRIDGE_MMEXTMEM				22UL
+#if defined(SUPPORT_WRAP_EXTMEM)
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST  (PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST + PVRSRV_BRIDGE_MMEXTMEM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST)
+#endif
+
+/*  23: Sync tracking functions */
+#define PVRSRV_BRIDGE_SYNCTRACKING			23UL
+#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST (PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST  (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCTRACKING_CMD_LAST)
+
+/*  24: Sync fallback functions */
+#define PVRSRV_BRIDGE_SYNCFALLBACK			24UL
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST  (PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCFALLBACK_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST  (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST)
+#endif
+
+/* NB PVRSRV_BRIDGE_LAST below must be the last bridge group defined above (PVRSRV_BRIDGE_FEATURE) */
+#define PVRSRV_BRIDGE_LAST					(PVRSRV_BRIDGE_SYNCFALLBACK)
+/* NB PVRSRV_BRIDGE_DISPATCH LAST below must be the last dispatch entry defined above (PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST) */
+#define PVRSRV_BRIDGE_DISPATCH_LAST			(PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST)
+
+/* bit mask representing the enabled PVR bridges */
+
+static const IMG_UINT32 gui32PVRBridges =
+	  (1U << (PVRSRV_BRIDGE_DEFAULT - PVRSRV_BRIDGE_FIRST))
+	| (1U << (PVRSRV_BRIDGE_SRVCORE - PVRSRV_BRIDGE_FIRST))
+	| (1U << (PVRSRV_BRIDGE_SYNC - PVRSRV_BRIDGE_FIRST))
+#if defined(SUPPORT_INSECURE_EXPORT) && defined(SUPPORT_SERVER_SYNC)
+	| (1U << (PVRSRV_BRIDGE_SYNCEXPORT - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_SECURE_EXPORT) && defined(SUPPORT_SERVER_SYNC)
+	| (1U << (PVRSRV_BRIDGE_SYNCSEXPORT - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(PDUMP)
+	| (1U << (PVRSRV_BRIDGE_PDUMPCTRL - PVRSRV_BRIDGE_FIRST))
+#endif
+	| (1U << (PVRSRV_BRIDGE_MM - PVRSRV_BRIDGE_FIRST))
+#if defined(SUPPORT_MMPLAT_BRIDGE)
+	| (1U << (PVRSRV_BRIDGE_MMPLAT - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_CMM)
+	| (1U << (PVRSRV_BRIDGE_CMM - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(PDUMP)
+	| (1U << (PVRSRV_BRIDGE_PDUMPMM - PVRSRV_BRIDGE_FIRST))
+	| (1U << (PVRSRV_BRIDGE_PDUMP - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(LINUX)
+	| (1U << (PVRSRV_BRIDGE_DMABUF - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_DISPLAY_CLASS)
+	| (1U << (PVRSRV_BRIDGE_DC - PVRSRV_BRIDGE_FIRST))
+#endif
+	| (1U << (PVRSRV_BRIDGE_CACHE - PVRSRV_BRIDGE_FIRST))
+#if defined(SUPPORT_SECURE_EXPORT)
+	| (1U << (PVRSRV_BRIDGE_SMM - PVRSRV_BRIDGE_FIRST))
+#endif
+	| (1U << (PVRSRV_BRIDGE_PVRTL - PVRSRV_BRIDGE_FIRST))
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	| (1U << (PVRSRV_BRIDGE_RI - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_VALIDATION)
+	| (1U << (PVRSRV_BRIDGE_VALIDATION - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(PVR_TESTING_UTILS)
+	| (1U << (PVRSRV_BRIDGE_TUTILS - PVRSRV_BRIDGE_FIRST))
+#endif
+	| (1U << (PVRSRV_BRIDGE_DEVICEMEMHISTORY - PVRSRV_BRIDGE_FIRST))
+#if defined(SUPPORT_HTBUFFER)
+	| (1U << (PVRSRV_BRIDGE_HTBUFFER - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_DISPLAY_CLASS) && defined (SUPPORT_DCPLAT_BRIDGE)
+	| (1U << (PVRSRV_BRIDGE_DCPLAT - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_WRAP_EXTMEM)
+	| (1U << (PVRSRV_BRIDGE_MMEXTMEM - PVRSRV_BRIDGE_FIRST))
+#endif
+	| (1U << (PVRSRV_BRIDGE_SYNCTRACKING - PVRSRV_BRIDGE_FIRST))
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	| (1U << (PVRSRV_BRIDGE_SYNCFALLBACK - PVRSRV_BRIDGE_FIRST))
+#endif
+	;
+
+/* bit field representing which PVR bridge groups may optionally not
+ * be present in the server
+ */
+#define PVR_BRIDGES_OPTIONAL \
+	( \
+		(1U << (PVRSRV_BRIDGE_RI - PVRSRV_BRIDGE_FIRST)) | \
+		(1U << (PVRSRV_BRIDGE_DEVICEMEMHISTORY - PVRSRV_BRIDGE_FIRST)) | \
+		(1U << (PVRSRV_BRIDGE_SYNCTRACKING - PVRSRV_BRIDGE_FIRST)) \
+	)
+
+/******************************************************************************
+ * Generic bridge structures
+ *****************************************************************************/
+
+
+/******************************************************************************
+ * bridge packaging structure
+ *****************************************************************************/
+typedef struct PVRSRV_BRIDGE_PACKAGE_TAG
+{
+	IMG_UINT32		ui32BridgeID;			/*!< ioctl bridge group */
+	IMG_UINT32		ui32FunctionID;			/*!< ioctl function index */
+	IMG_UINT32		ui32Size;				/*!< size of structure */
+	void __user		*pvParamIn;				/*!< input data buffer */
+	IMG_UINT32		ui32InBufferSize;		/*!< size of input data buffer */
+	void __user		*pvParamOut;			/*!< output data buffer */
+	IMG_UINT32		ui32OutBufferSize;		/*!< size of output data buffer */
+}PVRSRV_BRIDGE_PACKAGE;
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* PVR_BRIDGE_H */
+
+/******************************************************************************
+ End of file (pvr_bridge.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_bridge_k.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_bridge_k.c
new file mode 100644
index 0000000..4286af2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_bridge_k.c
@@ -0,0 +1,630 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Bridge Module (kernel side)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Receives calls from the user portion of services and
+                despatches them to functions in the kernel portion.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/mm_types.h>
+
+#include "img_defs.h"
+#include "pvr_bridge.h"
+#include "pvr_bridge_k.h"
+#include "connection_server.h"
+#include "syscommon.h"
+#include "pvr_debug.h"
+#include "pvr_debugfs.h"
+#include "private_data.h"
+#include "linkage.h"
+#include "pmr.h"
+#include "rgx_bvnc_defs_km.h"
+#include "pvrsrv_bridge_init.h"
+
+#include <drm/drmP.h>
+#include "pvr_drm.h"
+#include "pvr_drv.h"
+
+#include "env_connection.h"
+#include <linux/sched.h>
+#include <linux/freezer.h>
+
+/* RGX: */
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+
+#include "srvcore.h"
+#include "common_srvcore_bridge.h"
+
+PVRSRV_ERROR InitDMABUFBridge(void);
+PVRSRV_ERROR DeinitDMABUFBridge(void);
+
+#if defined(MODULE_TEST)
+/************************************************************************/
+// additional includes for services testing
+/************************************************************************/
+#include "pvr_test_bridge.h"
+#include "kern_test.h"
+/************************************************************************/
+// end of additional includes
+/************************************************************************/
+#endif
+
+/* WARNING!
+ * The mmap code has its own mutex, to prevent a possible deadlock,
+ * when using gPVRSRVLock.
+ * The Linux kernel takes the mm->mmap_sem before calling the mmap
+ * entry points (PVRMMap, MMapVOpen, MMapVClose), but the ioctl
+ * entry point may take mm->mmap_sem during fault handling, or
+ * before calling get_user_pages.  If gPVRSRVLock was used in the
+ * mmap entry points, a deadlock could result, due to the ioctl
+ * and mmap code taking the two locks in different orders.
+ * As a corollary to this, the mmap entry points must not call
+ * any driver code that relies on gPVRSRVLock is held.
+ */
+static DEFINE_MUTEX(g_sMMapMutex);
+
+#if defined(DEBUG_BRIDGE_KM)
+static PPVR_DEBUGFS_ENTRY_DATA gpsPVRDebugFSBridgeStatsEntry;
+static struct seq_operations gsBridgeStatsReadOps;
+static ssize_t BridgeStatsWrite(const char __user *pszBuffer,
+								size_t uiCount,
+								loff_t *puiPosition,
+								void *pvData);
+#endif
+
+#define _DRIVER_SUSPENDED 1
+#define _DRIVER_NOT_SUSPENDED 0
+static ATOMIC_T g_iDriverSuspended;
+static ATOMIC_T g_iNumActiveDriverThreads;
+static ATOMIC_T g_iNumActiveKernelThreads;
+static IMG_HANDLE g_hDriverThreadEventObject;
+
+PVRSRV_ERROR OSPlatformBridgeInit(void)
+{
+	PVRSRV_ERROR eError;
+
+	eError = InitDMABUFBridge();
+	PVR_LOG_IF_ERROR(eError, "InitDMABUFBridge");
+
+	OSAtomicWrite(&g_iDriverSuspended, _DRIVER_NOT_SUSPENDED);
+	OSAtomicWrite(&g_iNumActiveDriverThreads, 0);
+	OSAtomicWrite(&g_iNumActiveKernelThreads, 0);
+
+	eError = OSEventObjectCreate("Global driver thread event object",
+	                             &g_hDriverThreadEventObject);
+	PVR_LOGG_IF_ERROR(eError, "OSEventObjectCreate", error_);
+
+#if defined(DEBUG_BRIDGE_KM)
+	{
+		IMG_INT iResult;
+		iResult = PVRDebugFSCreateFile("bridge_stats",
+					NULL,
+					&gsBridgeStatsReadOps,
+					BridgeStatsWrite,
+					NULL,
+					&g_BridgeDispatchTable[0],
+					&gpsPVRDebugFSBridgeStatsEntry);
+		if (iResult != 0)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto error_;
+		}
+	}
+#endif
+
+	return PVRSRV_OK;
+
+error_:
+	if (g_hDriverThreadEventObject) {
+		OSEventObjectDestroy(g_hDriverThreadEventObject);
+		g_hDriverThreadEventObject = NULL;
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR OSPlatformBridgeDeInit(void)
+{
+	PVRSRV_ERROR eError;
+
+#if defined(DEBUG_BRIDGE_KM)
+	if (gpsPVRDebugFSBridgeStatsEntry != NULL)
+	{
+		PVRDebugFSRemoveFile(&gpsPVRDebugFSBridgeStatsEntry);
+	}
+#endif
+
+	eError = DeinitDMABUFBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitDMABUFBridge");
+
+	if (g_hDriverThreadEventObject != NULL) {
+		OSEventObjectDestroy(g_hDriverThreadEventObject);
+		g_hDriverThreadEventObject = NULL;
+	}
+
+	return eError;
+}
+
+#if defined(DEBUG_BRIDGE_KM)
+static void *BridgeStatsSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+	PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = (PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *)psSeqFile->private;
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSAcquireBridgeLock();
+#else
+	BridgeGlobalStatsLock();
+#endif
+
+	if (psDispatchTable == NULL || (*puiPosition) > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
+	{
+		return NULL;
+	}
+
+	if ((*puiPosition) == 0)
+	{
+		return SEQ_START_TOKEN;
+	}
+
+	return &(psDispatchTable[(*puiPosition) - 1]);
+}
+
+static void BridgeStatsSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSReleaseBridgeLock();
+#else
+	BridgeGlobalStatsUnlock();
+#endif
+}
+
+static void *BridgeStatsSeqNext(struct seq_file *psSeqFile,
+			       void *pvData,
+			       loff_t *puiPosition)
+{
+	PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = (PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *)psSeqFile->private;
+	loff_t uiItemAskedFor = *puiPosition; /* puiPosition on entry is the index to return */
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	/* Is the item asked for (starts at 0) a valid table index? */
+	if (uiItemAskedFor < BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
+	{
+		(*puiPosition)++; /* on exit it is the next seq index to ask for */
+		return &(psDispatchTable[uiItemAskedFor]);
+	}
+
+	/* Now passed the end of the table to indicate stop */
+	return NULL;
+}
+
+static int BridgeStatsSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	if (pvData == SEQ_START_TOKEN)
+	{
+		seq_printf(psSeqFile,
+			   "Total ioctl call count = %u\n"
+			   "Total number of bytes copied via copy_from_user = %u\n"
+			   "Total number of bytes copied via copy_to_user = %u\n"
+			   "Total number of bytes copied via copy_*_user = %u\n\n"
+			   "%3s: %-60s | %-48s | %10s | %20s | %20s | %20s | %20s\n",
+			   g_BridgeGlobalStats.ui32IOCTLCount,
+			   g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
+			   g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
+			   g_BridgeGlobalStats.ui32TotalCopyFromUserBytes + g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
+			   "#",
+			   "Bridge Name",
+			   "Wrapper Function",
+			   "Call Count",
+			   "copy_from_user (B)",
+			   "copy_to_user (B)",
+			   "Total Time (us)",
+			   "Max Time (us)");
+	}
+	else if (pvData != NULL)
+	{
+		PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry = (PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *)pvData;
+		IMG_UINT32 ui32Remainder;
+
+		seq_printf(psSeqFile,
+			   "%3d: %-60s   %-48s   %-10u   %-20u   %-20u   %-20" IMG_UINT64_FMTSPEC "   %-20" IMG_UINT64_FMTSPEC "\n",
+			   (IMG_UINT32)(((size_t)psEntry-(size_t)g_BridgeDispatchTable)/sizeof(*g_BridgeDispatchTable)),
+			   psEntry->pszIOCName,
+			   (psEntry->pfFunction != NULL) ? psEntry->pszFunctionName : "(null)",
+			   psEntry->ui32CallCount,
+			   psEntry->ui32CopyFromUserTotalBytes,
+			   psEntry->ui32CopyToUserTotalBytes,
+			   OSDivide64r64(psEntry->ui64TotalTimeNS, 1000, &ui32Remainder),
+			   OSDivide64r64(psEntry->ui64MaxTimeNS, 1000, &ui32Remainder));
+	}
+
+	return 0;
+}
+
+static struct seq_operations gsBridgeStatsReadOps =
+{
+	.start = BridgeStatsSeqStart,
+	.stop = BridgeStatsSeqStop,
+	.next = BridgeStatsSeqNext,
+	.show = BridgeStatsSeqShow,
+};
+
+static ssize_t BridgeStatsWrite(const char __user *pszBuffer,
+								size_t uiCount,
+								loff_t *puiPosition,
+								void *pvData)
+{
+	IMG_UINT32 i;
+	/* We only care if a '0' is written to the file, if so we reset results. */
+	char buf[1];
+	ssize_t iResult = simple_write_to_buffer(&buf[0], sizeof(buf), puiPosition, pszBuffer, uiCount);
+
+	if (iResult < 0)
+	{
+		return iResult;
+	}
+
+	if (iResult == 0 || buf[0] != '0')
+	{
+		return -EINVAL;
+	}
+
+	/* Reset stats. */
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSAcquireBridgeLock();
+#else
+	BridgeGlobalStatsLock();
+#endif
+
+	g_BridgeGlobalStats.ui32IOCTLCount = 0;
+	g_BridgeGlobalStats.ui32TotalCopyFromUserBytes = 0;
+	g_BridgeGlobalStats.ui32TotalCopyToUserBytes = 0;
+
+	for (i = 0; i < ARRAY_SIZE(g_BridgeDispatchTable); i++)
+	{
+		g_BridgeDispatchTable[i].ui32CallCount = 0;
+		g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0;
+		g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0;
+		g_BridgeDispatchTable[i].ui64TotalTimeNS = 0;
+		g_BridgeDispatchTable[i].ui64MaxTimeNS = 0;
+	}
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSReleaseBridgeLock();
+#else
+	BridgeGlobalStatsUnlock();
+#endif
+
+	return uiCount;
+}
+
+#endif /* defined(DEBUG_BRIDGE_KM) */
+
+PVRSRV_ERROR LinuxBridgeBlockClientsAccess(IMG_BOOL bShutdown)
+{
+	PVRSRV_ERROR eError;
+	IMG_HANDLE hEvent;
+
+	eError = OSEventObjectOpen(g_hDriverThreadEventObject, &hEvent);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__));
+		return eError;
+	}
+
+	if (OSAtomicCompareExchange(&g_iDriverSuspended, _DRIVER_NOT_SUSPENDED,
+	                            _DRIVER_SUSPENDED) == _DRIVER_SUSPENDED)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Driver is already suspended", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto out_put;
+	}
+
+	/* now wait for any threads currently in the server to exit */
+	while (OSAtomicRead(&g_iNumActiveDriverThreads) != 0 ||
+		   (OSAtomicRead(&g_iNumActiveKernelThreads) != 0 && !bShutdown))
+	{
+		if (OSAtomicRead(&g_iNumActiveDriverThreads) != 0)
+		{
+			PVR_LOG(("%s: waiting for user threads (%d)", __func__,
+			        OSAtomicRead(&g_iNumActiveDriverThreads)));
+		}
+		if (OSAtomicRead(&g_iNumActiveKernelThreads) != 0)
+		{
+			PVR_LOG(("%s: waiting for kernel threads (%d)", __func__,
+			        OSAtomicRead(&g_iNumActiveKernelThreads)));
+		}
+		/* Regular wait is called here (and not OSEventObjectWaitKernel) because
+		 * this code is executed by the caller of .suspend/.shutdown callbacks
+		 * which is most likely PM (or other actor responsible for suspend
+		 * process). Because of that this thread shouldn't and most likely
+		 * event cannot be frozen. */
+		OSEventObjectWait(hEvent);
+	}
+
+out_put:
+	OSEventObjectClose(hEvent);
+
+	return eError;
+}
+
+PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(void)
+{
+	PVRSRV_ERROR eError;
+
+	/* resume the driver and then signal so any waiting threads wake up */
+	if (OSAtomicCompareExchange(&g_iDriverSuspended, _DRIVER_SUSPENDED,
+	                            _DRIVER_NOT_SUSPENDED) == _DRIVER_NOT_SUSPENDED)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Driver is not suspended", __func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eError = OSEventObjectSignal(g_hDriverThreadEventObject);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: OSEventObjectSignal failed: %s",
+		        __func__, PVRSRVGetErrorString(eError)));
+	}
+
+	return eError;
+}
+
+static PVRSRV_ERROR LinuxBridgeSignalIfSuspended(void)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED)
+	{
+		PVRSRV_ERROR eError = OSEventObjectSignal(g_hDriverThreadEventObject);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to signal driver thread event"
+			        " object: %s", __func__, PVRSRVGetErrorString(eError)));
+		}
+	}
+
+	return eError;
+}
+
+void LinuxBridgeNumActiveKernelThreadsIncrement(void)
+{
+	OSAtomicIncrement(&g_iNumActiveKernelThreads);
+}
+
+void LinuxBridgeNumActiveKernelThreadsDecrement(void)
+{
+	OSAtomicDecrement(&g_iNumActiveKernelThreads);
+	PVR_ASSERT(OSAtomicRead(&g_iNumActiveKernelThreads) >= 0);
+
+	/* Signal on every decrement in case LinuxBridgeBlockClientsAccess() is
+	 * waiting for the threads to freeze.
+	 * (error is logged in called function so ignore, we can't do much with
+	 * it anyway) */
+	(void) LinuxBridgeSignalIfSuspended();
+}
+
+static PVRSRV_ERROR _WaitForDriverUnsuspend(void)
+{
+	PVRSRV_ERROR eError;
+	IMG_HANDLE hEvent;
+
+	eError = OSEventObjectOpen(g_hDriverThreadEventObject, &hEvent);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__));
+		return eError;
+	}
+
+	while (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED)
+	{
+		/* we should be able to use normal (not kernel) wait here since
+		 * we were just unfrozen and most likely we're not going to
+		 * be frozen again (?) */
+		OSEventObjectWait(hEvent);
+	}
+
+	OSEventObjectClose(hEvent);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVDriverThreadEnter(void)
+{
+	PVRSRV_ERROR eError;
+
+	/* increment first so there is no race between this value and
+	 * g_iDriverSuspended in LinuxBridgeBlockClientsAccess() */
+	OSAtomicIncrement(&g_iNumActiveDriverThreads);
+
+	if (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED)
+	{
+		/* decrement here because the driver is going to be suspended and
+		 * this thread is going to be frozen so we don't want to wait for
+		 * it in LinuxBridgeBlockClientsAccess() */
+		OSAtomicDecrement(&g_iNumActiveDriverThreads);
+
+		/* during suspend procedure this will put the current thread to
+		 * the freezer but during shutdown this will just return */
+		try_to_freeze();
+
+		/* if the thread was unfrozen but the flag is not yet set to
+		 * _DRIVER_NOT_SUSPENDED wait for it
+		 * in case this is a shutdown the thread was not frozen so we'll
+		 * wait here indefinitely but this is ok (and this is in fact what
+		 * we want) because no thread should be entering the driver in such
+		 * case */
+		eError = _WaitForDriverUnsuspend();
+
+		/* increment here because that means that the thread entered the
+		 * driver */
+		OSAtomicIncrement(&g_iNumActiveDriverThreads);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to wait for driver"
+			        " unsuspend: %s", __func__,
+			        PVRSRVGetErrorString(eError)));
+			return eError;
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+void PVRSRVDriverThreadExit(void)
+{
+	OSAtomicDecrement(&g_iNumActiveDriverThreads);
+	/* if the driver is being suspended then we need to signal the
+	 * event object as the thread suspending the driver is waiting
+	 * for active threads to exit
+	 * error is logged in called function so ignore returned error
+	 */
+	(void) LinuxBridgeSignalIfSuspended();
+}
+
+int
+PVRSRV_BridgeDispatchKM(struct drm_device __maybe_unused *dev, void *arg, struct drm_file *pDRMFile)
+{
+	struct drm_pvr_srvkm_cmd *psSrvkmCmd = (struct drm_pvr_srvkm_cmd *) arg;
+	PVRSRV_BRIDGE_PACKAGE sBridgePackageKM = { 0 };
+	CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pDRMFile->filp);
+	PVRSRV_ERROR error;
+
+	if (psConnection == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Connection is closed", __func__));
+		return -EFAULT;
+	}
+
+	PVR_ASSERT(psSrvkmCmd != NULL);
+
+	DRM_DEBUG("tgid=%d, tgid_connection=%d, bridge_id=%d, func_id=%d",
+			  task_tgid_nr(current),
+			  ((ENV_CONNECTION_DATA *)PVRSRVConnectionPrivateData(psConnection))->owner,
+			  psSrvkmCmd->bridge_id,
+			  psSrvkmCmd->bridge_func_id);
+
+	if ((error = PVRSRVDriverThreadEnter()) != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVDriverThreadEnter failed: %s",
+		        __func__,
+		        PVRSRVGetErrorString(error)));
+		goto e0;
+	}
+
+	sBridgePackageKM.ui32BridgeID = psSrvkmCmd->bridge_id;
+	sBridgePackageKM.ui32FunctionID = psSrvkmCmd->bridge_func_id;
+	sBridgePackageKM.ui32Size = sizeof(sBridgePackageKM);
+	sBridgePackageKM.pvParamIn = (void __user *)(uintptr_t)psSrvkmCmd->in_data_ptr;
+	sBridgePackageKM.ui32InBufferSize = psSrvkmCmd->in_data_size;
+	sBridgePackageKM.pvParamOut = (void __user *)(uintptr_t)psSrvkmCmd->out_data_ptr;
+	sBridgePackageKM.ui32OutBufferSize = psSrvkmCmd->out_data_size;
+
+	error = BridgedDispatchKM(psConnection, &sBridgePackageKM);
+
+	PVRSRVDriverThreadExit();
+
+e0:
+	return OSPVRSRVToNativeError(error);
+}
+
+int
+PVRSRV_MMap(struct file *pFile, struct vm_area_struct *ps_vma)
+{
+	CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile);
+	IMG_HANDLE hSecurePMRHandle = (IMG_HANDLE)((uintptr_t)ps_vma->vm_pgoff);
+	PMR *psPMR;
+	PVRSRV_ERROR eError;
+
+	if (psConnection == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Invalid connection data"));
+		return -ENOENT;
+	}
+
+	/*
+	 * The bridge lock used here to protect PVRSRVLookupHandle is replaced
+	 * by a specific lock considering that the handle functions have now
+	 * their own lock. This change was necessary to solve the lockdep issues
+	 * related with the PVRSRV_MMap.
+	 */
+	mutex_lock(&g_sMMapMutex);
+
+	eError = PVRSRVLookupHandle(psConnection->psHandleBase,
+								(void **)&psPMR,
+								hSecurePMRHandle,
+								PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+								IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	/* Note: PMRMMapPMR will take a reference on the PMR.
+	 * Unref the handle immediately, because we have now done
+	 * the required operation on the PMR (whether it succeeded or not)
+	 */
+	eError = PMRMMapPMR(psPMR, ps_vma);
+	PVRSRVReleaseHandle(psConnection->psHandleBase, hSecurePMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: PMRMMapPMR failed (%s)",
+				__func__, PVRSRVGetErrorString(eError)));
+		goto e0;
+	}
+
+	mutex_unlock(&g_sMMapMutex);
+
+	return 0;
+
+e0:
+	mutex_unlock(&g_sMMapMutex);
+
+	PVR_DPF((PVR_DBG_ERROR, "Unable to translate error %d", eError));
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return -ENOENT; // -EAGAIN // or what?
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_bridge_k.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_bridge_k.h
new file mode 100644
index 0000000..10680ea
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_bridge_k.h
@@ -0,0 +1,103 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Bridge Module (kernel side)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Receives calls from the user portion of services and
+                despatches them to functions in the kernel portion.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVR_BRIDGE_K_H_
+#define _PVR_BRIDGE_K_H_
+
+#include "pvrsrv_error.h"
+
+/*!
+******************************************************************************
+ @Function      LinuxBridgeBlockClientsAccess
+ @Description   This function will wait for any existing threads in the Server
+                to exit and then disable access to the driver. New threads will
+                not be allowed to enter the Server until the driver is
+                unsuspended (see LinuxBridgeUnblockClientsAccess).
+ @Input         bShutdown this flag indicates that the function was called
+                          from a shutdown callback and therefore it will
+                          not wait for the kernel threads to get frozen
+                          (because this doesn't happen during shutdown
+                          procedure)
+ @Return        PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR LinuxBridgeBlockClientsAccess(IMG_BOOL bShutdown);
+
+/*!
+******************************************************************************
+ @Function      LinuxBridgeUnblockClientsAccess
+ @Description   This function will re-enable the bridge and allow any threads
+                waiting to enter the Server to continue.
+ @Return        PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(void);
+
+void LinuxBridgeNumActiveKernelThreadsIncrement(void);
+void LinuxBridgeNumActiveKernelThreadsDecrement(void);
+
+/*!
+******************************************************************************
+ @Function      PVRSRVDriverThreadEnter
+ @Description   Increments number of client threads currently operating
+                in the driver's context.
+                If the driver is currently being suspended this function
+                will call try_to_freeze() on behalf of the client thread.
+                When the driver is resumed the function will exit and allow
+                the thread into the driver.
+ @Return        PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDriverThreadEnter(void);
+
+/*!
+******************************************************************************
+ @Function      PVRSRVDriverThreadExit
+ @Description   Decrements the number of client threads currently operating
+                in the driver's context to match the call to
+                PVRSRVDriverThreadEnter().
+                The function also signals the driver that a thread left the
+                driver context so if it's waiting to suspend it knows that
+                the number of threads decreased.
+******************************************************************************/
+void PVRSRVDriverThreadExit(void);
+
+#endif /* _PVR_BRIDGE_K_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_buffer_sync.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_buffer_sync.c
new file mode 100644
index 0000000..e0323fb
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_buffer_sync.c
@@ -0,0 +1,603 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          Linux buffer sync interface
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
+#include "services_kernel_client.h"
+#include "pvr_buffer_sync.h"
+#include "pvr_buffer_sync_shared.h"
+#include "pvr_drv.h"
+#include "pvr_fence.h"
+
+
+struct pvr_buffer_sync_context {
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	struct mutex ctx_lock;
+#endif
+	struct pvr_fence_context *fence_ctx;
+	struct ww_acquire_ctx acquire_ctx;
+};
+
+struct pvr_buffer_sync_check_data {
+	struct dma_fence_cb base;
+
+	u32 nr_fences;
+	struct pvr_fence **fences;
+};
+
+struct pvr_buffer_sync_append_data {
+	struct pvr_buffer_sync_context *ctx;
+
+	u32 nr_pmrs;
+	struct _PMR_ **pmrs;
+	u32 *pmr_flags;
+
+	struct pvr_fence *update_fence;
+	struct pvr_buffer_sync_check_data *check_data;
+};
+
+
+static struct reservation_object *
+pmr_reservation_object_get(struct _PMR_ *pmr)
+{
+	struct dma_buf *dmabuf;
+
+	dmabuf = PhysmemGetDmaBuf(pmr);
+	if (dmabuf)
+		return dmabuf->resv;
+
+	return NULL;
+}
+
+static int
+pvr_buffer_sync_pmrs_lock(struct pvr_buffer_sync_context *ctx,
+			  u32 nr_pmrs,
+			  struct _PMR_ **pmrs)
+{
+	struct reservation_object *resv, *cresv = NULL, *lresv = NULL;
+	int i, err;
+	struct ww_acquire_ctx *acquire_ctx = &ctx->acquire_ctx;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	mutex_lock(&ctx->ctx_lock);
+#endif
+
+	ww_acquire_init(acquire_ctx, &reservation_ww_class);
+retry:
+	for (i = 0; i < nr_pmrs; i++) {
+		resv = pmr_reservation_object_get(pmrs[i]);
+		if (!resv) {
+			pr_err("%s: Failed to get reservation object from pmr %p\n",
+			       __func__, pmrs[i]);
+			err = -EINVAL;
+			goto fail;
+		}
+
+		if (resv != lresv) {
+			err = ww_mutex_lock_interruptible(&resv->lock,
+							  acquire_ctx);
+			if (err) {
+				cresv = (err == -EDEADLK) ? resv : NULL;
+				goto fail;
+			}
+		} else {
+			lresv = NULL;
+		}
+	}
+
+	ww_acquire_done(acquire_ctx);
+
+	return 0;
+
+fail:
+	while (i--) {
+		resv = pmr_reservation_object_get(pmrs[i]);
+		if (WARN_ON_ONCE(!resv))
+			continue;
+		ww_mutex_unlock(&resv->lock);
+	}
+
+	if (lresv)
+		ww_mutex_unlock(&lresv->lock);
+
+	if (cresv) {
+		err = ww_mutex_lock_slow_interruptible(&cresv->lock,
+						       acquire_ctx);
+		if (!err) {
+			lresv = cresv;
+			cresv = NULL;
+			goto retry;
+		}
+	}
+
+	ww_acquire_fini(acquire_ctx);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	mutex_unlock(&ctx->ctx_lock);
+#endif
+	return err;
+}
+
+static void
+pvr_buffer_sync_pmrs_unlock(struct pvr_buffer_sync_context *ctx,
+			    u32 nr_pmrs,
+			    struct _PMR_ **pmrs)
+{
+	struct reservation_object *resv;
+	int i;
+	struct ww_acquire_ctx *acquire_ctx = &ctx->acquire_ctx;
+
+	for (i = 0; i < nr_pmrs; i++) {
+		resv = pmr_reservation_object_get(pmrs[i]);
+		if (WARN_ON_ONCE(!resv))
+			continue;
+		ww_mutex_unlock(&resv->lock);
+	}
+
+	ww_acquire_fini(acquire_ctx);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	mutex_unlock(&ctx->ctx_lock);
+#endif
+}
+
+static u32
+pvr_buffer_sync_pmrs_fence_count(u32 nr_pmrs, struct _PMR_ **pmrs,
+				 u32 *pmr_flags)
+{
+	struct reservation_object *resv;
+	struct reservation_object_list *resv_list;
+	struct dma_fence *fence;
+	u32 fence_count = 0;
+	bool exclusive;
+	int i;
+
+	for (i = 0; i < nr_pmrs; i++) {
+		exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE);
+
+		resv = pmr_reservation_object_get(pmrs[i]);
+		if (WARN_ON_ONCE(!resv))
+			continue;
+
+		resv_list = reservation_object_get_list(resv);
+		fence = reservation_object_get_excl(resv);
+
+		if (fence &&
+		    (!exclusive || !resv_list || !resv_list->shared_count))
+			fence_count++;
+
+		if (exclusive && resv_list)
+			fence_count += resv_list->shared_count;
+	}
+
+	return fence_count;
+}
+
+static struct pvr_buffer_sync_check_data *
+pvr_buffer_sync_check_fences_create(struct pvr_fence_context *fence_ctx,
+				    PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx,
+				    u32 nr_pmrs,
+				    struct _PMR_ **pmrs,
+				    u32 *pmr_flags)
+{
+	struct pvr_buffer_sync_check_data *data;
+	struct reservation_object *resv;
+	struct reservation_object_list *resv_list;
+	struct dma_fence *fence;
+	u32 fence_count;
+	bool exclusive;
+	int i, j;
+	int err;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return NULL;
+
+	fence_count = pvr_buffer_sync_pmrs_fence_count(nr_pmrs, pmrs,
+						       pmr_flags);
+	if (fence_count) {
+		data->fences = kcalloc(fence_count, sizeof(*data->fences),
+				       GFP_KERNEL);
+		if (!data->fences)
+			goto err_check_data_free;
+	}
+
+	for (i = 0; i < nr_pmrs; i++) {
+		resv = pmr_reservation_object_get(pmrs[i]);
+		if (WARN_ON_ONCE(!resv))
+			continue;
+
+		exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE);
+		if (!exclusive) {
+			err = reservation_object_reserve_shared(resv
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
+								, 1
+#endif
+				);
+			if (err)
+				goto err_destroy_fences;
+		}
+
+		resv_list = reservation_object_get_list(resv);
+		fence = reservation_object_get_excl(resv);
+
+		if (fence &&
+		    (!exclusive || !resv_list || !resv_list->shared_count)) {
+			data->fences[data->nr_fences++] =
+				pvr_fence_create_from_fence(fence_ctx,
+							    sync_checkpoint_ctx,
+							    fence,
+							    PVRSRV_NO_FENCE,
+							    "exclusive check fence");
+			if (!data->fences[data->nr_fences - 1]) {
+				data->nr_fences--;
+				PVR_FENCE_TRACE(fence,
+						"waiting on exclusive fence\n");
+				WARN_ON(dma_fence_wait(fence, true) <= 0);
+			}
+		}
+
+		if (exclusive && resv_list) {
+			for (j = 0; j < resv_list->shared_count; j++) {
+				fence = rcu_dereference_protected(resv_list->shared[j],
+								  reservation_object_held(resv));
+				data->fences[data->nr_fences++] =
+					pvr_fence_create_from_fence(fence_ctx,
+								    sync_checkpoint_ctx,
+								    fence,
+								    PVRSRV_NO_FENCE,
+								    "check fence");
+				if (!data->fences[data->nr_fences - 1]) {
+					data->nr_fences--;
+					PVR_FENCE_TRACE(fence,
+							"waiting on non-exclusive fence\n");
+					WARN_ON(dma_fence_wait(fence, true) <= 0);
+				}
+			}
+		}
+	}
+
+	WARN_ON((i != nr_pmrs) || (data->nr_fences != fence_count));
+
+	return data;
+
+err_destroy_fences:
+	for (i = 0; i < data->nr_fences; i++)
+		pvr_fence_destroy(data->fences[i]);
+	kfree(data->fences);
+err_check_data_free:
+	kfree(data);
+	return NULL;
+}
+
+static void
+pvr_buffer_sync_check_fences_destroy(struct pvr_buffer_sync_check_data *data)
+{
+	int i;
+
+	for (i = 0; i < data->nr_fences; i++)
+		pvr_fence_destroy(data->fences[i]);
+
+	kfree(data->fences);
+	kfree(data);
+}
+
+struct pvr_buffer_sync_context *
+pvr_buffer_sync_context_create(struct device *dev, const char *name)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct pvr_drm_private *priv = ddev->dev_private;
+	struct pvr_buffer_sync_context *ctx;
+	int err;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx) {
+		err = -ENOMEM;
+		goto err_exit;
+	}
+
+	ctx->fence_ctx = pvr_fence_context_create(priv->dev_node,
+						  priv->fence_status_wq,
+						  name);
+	if (!ctx->fence_ctx) {
+		err = -ENOMEM;
+		goto err_free_ctx;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	mutex_init(&ctx->ctx_lock);
+#endif
+
+	return ctx;
+
+err_free_ctx:
+	kfree(ctx);
+err_exit:
+	return ERR_PTR(err);
+}
+
+void
+pvr_buffer_sync_context_destroy(struct pvr_buffer_sync_context *ctx)
+{
+	pvr_fence_context_destroy(ctx->fence_ctx);
+	kfree(ctx);
+}
+
+int
+pvr_buffer_sync_resolve_and_create_fences(struct pvr_buffer_sync_context *ctx,
+					  PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx,
+					  u32 nr_pmrs,
+					  struct _PMR_ **pmrs,
+					  u32 *pmr_flags,
+					  u32 *nr_fence_checkpoints_out,
+					  PSYNC_CHECKPOINT **fence_checkpoints_out,
+					  PSYNC_CHECKPOINT *update_checkpoints_out,
+					  struct pvr_buffer_sync_append_data **data_out)
+{
+	struct pvr_buffer_sync_append_data *data;
+	PSYNC_CHECKPOINT *fence_checkpoints;
+	const size_t data_size = sizeof(*data);
+	const size_t pmrs_size = sizeof(*pmrs) * nr_pmrs;
+	const size_t pmr_flags_size = sizeof(*pmr_flags) * nr_pmrs;
+	int i;
+	int j;
+	int err;
+
+	if (unlikely((nr_pmrs && !(pmrs && pmr_flags)) ||
+	    !nr_fence_checkpoints_out || !fence_checkpoints_out ||
+	    !update_checkpoints_out))
+		return -EINVAL;
+
+	for (i = 0; i < nr_pmrs; i++) {
+		if (unlikely(!(pmr_flags[i] & PVR_BUFFER_FLAG_MASK))) {
+			pr_err("%s: Invalid flags %#08x for pmr %p\n",
+			       __func__, pmr_flags[i], pmrs[i]);
+			return -EINVAL;
+		}
+	}
+
+#if defined(NO_HARDWARE)
+	/*
+	 * For NO_HARDWARE there's no checking or updating of sync checkpoints
+	 * which means SW waits on our fences will cause a deadlock (since they
+	 * will never be signalled). Avoid this by not creating any fences.
+	 */
+	nr_pmrs = 0;
+#endif
+
+	if (!nr_pmrs) {
+		*nr_fence_checkpoints_out = 0;
+		*fence_checkpoints_out = NULL;
+		*update_checkpoints_out = NULL;
+		*data_out = NULL;
+
+		return 0;
+	}
+
+	data = kzalloc(data_size + pmrs_size + pmr_flags_size, GFP_KERNEL);
+	if (unlikely(!data))
+		return -ENOMEM;
+
+	data->ctx = ctx;
+	data->pmrs = (struct _PMR_ **)((char *)data + data_size);
+	data->pmr_flags = (u32 *)((char *)data->pmrs + pmrs_size);
+
+	/*
+	 * It's expected that user space will provide a set of unique PMRs
+	 * but, as a PMR can have multiple handles, it's still possible to
+	 * end up here with duplicates. Take this opportunity to filter out
+	 * any remaining duplicates (updating flags when necessary) before
+	 * trying to process them further.
+	 */
+	for (i = 0; i < nr_pmrs; i++) {
+		for (j = 0; j < data->nr_pmrs; j++) {
+			if (data->pmrs[j] == pmrs[i]) {
+				data->pmr_flags[j] |= pmr_flags[i];
+				break;
+			}
+		}
+
+		if (j == data->nr_pmrs) {
+			data->pmrs[j] = pmrs[i];
+			data->pmr_flags[j] = pmr_flags[i];
+			data->nr_pmrs++;
+		}
+	}
+
+	err = pvr_buffer_sync_pmrs_lock(ctx, data->nr_pmrs, data->pmrs);
+	if (unlikely(err)) {
+		pr_err("%s: failed to lock pmrs (errno=%d)\n",
+		       __func__, err);
+		goto err_free_data;
+	}
+
+	/* create the check data */
+	data->check_data = pvr_buffer_sync_check_fences_create(ctx->fence_ctx,
+							 sync_checkpoint_ctx,
+							 data->nr_pmrs,
+							 data->pmrs,
+							 data->pmr_flags);
+	if (unlikely(!data->check_data)) {
+		err = -ENOMEM;
+		goto err_pmrs_unlock;
+	}
+
+	fence_checkpoints = kcalloc(data->check_data->nr_fences,
+				    sizeof(*fence_checkpoints),
+				    GFP_KERNEL);
+	if (fence_checkpoints) {
+		pvr_fence_get_checkpoints(data->check_data->fences,
+					  data->check_data->nr_fences,
+					  fence_checkpoints);
+	} else {
+		if (unlikely(data->check_data->nr_fences)) {
+			err = -ENOMEM;
+			goto err_free_check_data;
+		}
+	}
+
+	/* create the update fence */
+	data->update_fence = pvr_fence_create(ctx->fence_ctx,
+			sync_checkpoint_ctx,
+			SYNC_CHECKPOINT_FOREIGN_CHECKPOINT, "update fence");
+	if (unlikely(!data->update_fence)) {
+		err = -ENOMEM;
+		goto err_free_fence_checkpoints;
+	}
+
+	/*
+	 * We need to clean up the fences once the HW has finished with them.
+	 * We can do this using fence callbacks. However, instead of adding a
+	 * callback to every fence, which would result in more work, we can
+	 * simply add one to the update fence since this will be the last fence
+	 * to be signalled. This callback can do all the necessary clean up.
+	 *
+	 * Note: we take an additional reference on the update fence in case
+	 * it signals before we can add it to a reservation object.
+	 */
+	PVR_FENCE_TRACE(&data->update_fence->base,
+			"create fence calling dma_fence_get\n");
+	dma_fence_get(&data->update_fence->base);
+
+	*nr_fence_checkpoints_out = data->check_data->nr_fences;
+	*fence_checkpoints_out = fence_checkpoints;
+	*update_checkpoints_out = pvr_fence_get_checkpoint(data->update_fence);
+	*data_out = data;
+
+	return err;
+
+err_free_fence_checkpoints:
+	kfree(fence_checkpoints);
+err_free_check_data:
+	pvr_buffer_sync_check_fences_destroy(data->check_data);
+err_pmrs_unlock:
+	pvr_buffer_sync_pmrs_unlock(ctx, data->nr_pmrs, data->pmrs);
+err_free_data:
+	kfree(data);
+	return err;
+}
+
+void
+pvr_buffer_sync_kick_succeeded(struct pvr_buffer_sync_append_data *data)
+{
+	struct reservation_object *resv;
+	int i;
+
+	dma_fence_enable_sw_signaling(&data->update_fence->base);
+
+	for (i = 0; i < data->nr_pmrs; i++) {
+		resv = pmr_reservation_object_get(data->pmrs[i]);
+		if (WARN_ON_ONCE(!resv))
+			continue;
+
+		if (data->pmr_flags[i] & PVR_BUFFER_FLAG_WRITE) {
+			PVR_FENCE_TRACE(&data->update_fence->base,
+					"added exclusive fence (%s) to resv %p\n",
+					data->update_fence->name, resv);
+			reservation_object_add_excl_fence(resv,
+							  &data->update_fence->base);
+		} else if (data->pmr_flags[i] & PVR_BUFFER_FLAG_READ) {
+			PVR_FENCE_TRACE(&data->update_fence->base,
+					"added non-exclusive fence (%s) to resv %p\n",
+					data->update_fence->name, resv);
+			reservation_object_add_shared_fence(resv,
+								&data->update_fence->base);
+		}
+	}
+
+	/*
+	 * Now that the fence has been added to the necessary
+	 * reservation objects we can safely drop the extra reference
+	 * we took in pvr_buffer_sync_resolve_and_create_fences().
+	 */
+	dma_fence_put(&data->update_fence->base);
+	pvr_buffer_sync_pmrs_unlock(data->ctx, data->nr_pmrs,
+					data->pmrs);
+
+	/* destroy the check fences */
+	pvr_buffer_sync_check_fences_destroy(data->check_data);
+	/* destroy the update fence */
+	pvr_fence_destroy(data->update_fence);
+
+	/* free the append data */
+	kfree(data);
+}
+
+void
+pvr_buffer_sync_kick_failed(struct pvr_buffer_sync_append_data *data)
+{
+
+	/* drop the extra reference we took on the update fence in
+	 * pvr_buffer_sync_resolve_and_create_fences().
+	 */
+	dma_fence_put(&data->update_fence->base);
+
+	if (data->nr_pmrs > 0)
+		pvr_buffer_sync_pmrs_unlock(data->ctx, data->nr_pmrs,
+					    data->pmrs);
+
+	/* destroy the check fences */
+	pvr_buffer_sync_check_fences_destroy(data->check_data);
+	/* destroy the update fence */
+	pvr_fence_destroy(data->update_fence);
+
+	/* free the append data */
+	kfree(data);
+}
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+enum tag_img_bool
+pvr_buffer_sync_checkpoint_ufo_has_signalled(u32 fwaddr, u32 value)
+{
+	return pvr_fence_checkpoint_ufo_has_signalled(fwaddr, value);
+}
+
+void
+pvr_buffer_sync_check_state(void)
+{
+	pvr_fence_check_state();
+}
+#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_buffer_sync.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_buffer_sync.h
new file mode 100644
index 0000000..2668f93
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_buffer_sync.h
@@ -0,0 +1,143 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File           pvr_buffer_sync.h
+@Title          PowerVR Linux buffer sync interface
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_BUFFER_SYNC_H
+#define PVR_BUFFER_SYNC_H
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
+struct _PMR_;
+struct pvr_buffer_sync_context;
+struct pvr_buffer_sync_append_data;
+
+/**
+ * pvr_buffer_sync_context_create - creates a buffer sync context
+ * @dev: Linux device
+ * @name: context name (used for debugging)
+ *
+ * pvr_buffer_sync_context_destroy() should be used to clean up the buffer
+ * sync context.
+ *
+ * Return: A buffer sync context or NULL if it fails for any reason.
+ */
+struct pvr_buffer_sync_context *
+pvr_buffer_sync_context_create(struct device *dev, const char *name);
+
+/**
+ * pvr_buffer_sync_context_destroy() - frees a buffer sync context
+ * @ctx: buffer sync context
+ */
+void
+pvr_buffer_sync_context_destroy(struct pvr_buffer_sync_context *ctx);
+
+/**
+ * pvr_buffer_sync_resolve_and_create_fences() - create checkpoints from
+ *                                               buffers
+ * @ctx: buffer sync context
+ * @sync_checkpoint_ctx: context in which to create sync checkpoints
+ * @nr_pmrs: number of buffer objects (PMRs)
+ * @pmrs: buffer array
+ * @pmr_flags: internal flags
+ * @nr_fence_checkpoints_out: returned number of fence sync checkpoints
+ * @fence_checkpoints_out: returned array of fence sync checkpoints
+ * @update_checkpoint_out: returned update sync checkpoint
+ * @data_out: returned buffer sync data
+ *
+ * After this call, either pvr_buffer_sync_kick_succeeded() or
+ * pvr_buffer_sync_kick_failed() must be called.
+ *
+ * Return: 0 on success or an error code otherwise.
+ */
+int
+pvr_buffer_sync_resolve_and_create_fences(struct pvr_buffer_sync_context *ctx,
+					  PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx,
+					  u32 nr_pmrs,
+					  struct _PMR_ **pmrs,
+					  u32 *pmr_flags,
+					  u32 *nr_fence_checkpoints_out,
+					  PSYNC_CHECKPOINT **fence_checkpoints_out,
+					  PSYNC_CHECKPOINT *update_checkpoint_out,
+					  struct pvr_buffer_sync_append_data **data_out);
+
+/**
+ * pvr_buffer_sync_kick_succeeded() - cleans up after a successful kick
+ *                                    operation
+ * @data: buffer sync data returned by
+ *        pvr_buffer_sync_resolve_and_create_fences()
+ *
+ * Should only be called following pvr_buffer_sync_resolve_and_create_fences().
+ */
+void
+pvr_buffer_sync_kick_succeeded(struct pvr_buffer_sync_append_data *data);
+
+/**
+ * pvr_buffer_sync_kick_failed() - cleans up after a failed kick operation
+ * @data: buffer sync data returned by
+ *        pvr_buffer_sync_resolve_and_create_fences()
+ *
+ * Should only be called following pvr_buffer_sync_resolve_and_create_fences().
+ */
+void
+pvr_buffer_sync_kick_failed(struct pvr_buffer_sync_append_data *data);
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+/**
+ * pvr_buffer_sync_checkpoint_ufo_has_signalled() - signals that a checkpoint's
+ *                                                  state has been updated
+ * @fwaddr: firmware address of the updated checkpoint
+ * @value: the new value of the checkpoint
+ */
+enum tag_img_bool
+pvr_buffer_sync_checkpoint_ufo_has_signalled(u32 fwaddr, u32 value);
+
+/**
+ * pvr_buffer_sync_check_state() - performs a full sync state check
+ */
+void
+pvr_buffer_sync_check_state(void);
+#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB)*/
+#endif /* PVR_BUFFER_SYNC_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_buffer_sync_shared.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_buffer_sync_shared.h
new file mode 100644
index 0000000..9258a45
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_buffer_sync_shared.h
@@ -0,0 +1,52 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR buffer sync shared
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Shared definitions between client and server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PVR_BUFFER_SYNC_SHARED_H__
+#define __PVR_BUFFER_SYNC_SHARED_H__
+
+#define PVR_BUFFER_FLAG_READ		(1 << 0)
+#define PVR_BUFFER_FLAG_WRITE		(1 << 1)
+#define PVR_BUFFER_FLAG_MASK		(PVR_BUFFER_FLAG_READ | \
+									 PVR_BUFFER_FLAG_WRITE)
+
+#endif /* __PVR_BUFFER_SYNC_SHARED_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_counting_timeline.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_counting_timeline.c
new file mode 100644
index 0000000..3100859
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_counting_timeline.c
@@ -0,0 +1,309 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR Linux software "counting" timeline fence implementation
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/kref.h>
+
+#include "services_kernel_client.h"
+#include "pvr_counting_timeline.h"
+#include "pvr_sw_fence.h"
+
+#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \
+	do {                                                             \
+		if (pfnDumpDebugPrintf)                                  \
+			pfnDumpDebugPrintf(pvDumpDebugFile, fmt,         \
+					   ## __VA_ARGS__);              \
+		else                                                     \
+			pr_err(fmt "\n", ## __VA_ARGS__);                \
+	} while (0)
+
+struct pvr_counting_fence_timeline {
+	struct pvr_sw_fence_context *context;
+
+	void *dbg_request_handle;
+
+	spinlock_t active_fences_lock;
+	u64 current_value; /* guarded by active_fences_lock */
+	u64 next_value; /* guarded by active_fences_lock */
+	struct list_head active_fences;
+
+	struct kref kref;
+};
+
+struct pvr_counting_fence {
+	u64 value;
+	struct dma_fence *fence;
+	struct list_head active_list_entry;
+};
+
+void pvr_counting_fence_timeline_dump_timeline(
+	void *data,
+	DUMPDEBUG_PRINTF_FUNC *dump_debug_printf,
+	void *dump_debug_file)
+{
+
+	struct pvr_counting_fence_timeline *timeline =
+		(struct pvr_counting_fence_timeline *) data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&timeline->active_fences_lock, flags);
+
+	PVR_DUMPDEBUG_LOG(dump_debug_printf,
+					  dump_debug_file,
+					  "TL:%s SeqNum: %llu/%llu",
+					  pvr_sw_fence_context_name(
+							  timeline->context),
+					  timeline->current_value,
+					  timeline->next_value);
+
+	spin_unlock_irqrestore(&timeline->active_fences_lock, flags);
+}
+
+static void
+pvr_counting_fence_timeline_debug_request(void *data, u32 verbosity,
+			DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+			void *pvDumpDebugFile)
+{
+	struct pvr_counting_fence_timeline *timeline =
+		(struct pvr_counting_fence_timeline *)data;
+	struct pvr_counting_fence *obj;
+	unsigned long flags;
+	char value[128];
+
+	if (DD_VERB_LVL_ENABLED(verbosity, DEBUG_REQUEST_VERBOSITY_MEDIUM)) {
+		spin_lock_irqsave(&timeline->active_fences_lock, flags);
+		pvr_sw_fence_context_value_str(timeline->context, value,
+					       sizeof(value));
+		PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+				  "sw: %s @%s cur=%llu",
+				  pvr_sw_fence_context_name(timeline->context),
+				  value, timeline->current_value);
+		list_for_each_entry(obj, &timeline->active_fences,
+				    active_list_entry) {
+			obj->fence->ops->fence_value_str(obj->fence,
+							 value, sizeof(value));
+			PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+					  " @%s: val=%llu", value, obj->value);
+		}
+		spin_unlock_irqrestore(&timeline->active_fences_lock, flags);
+	}
+}
+
+struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_create(
+	void *dev_cookie,
+	const char *name)
+{
+	PVRSRV_ERROR srv_err;
+	struct pvr_counting_fence_timeline *timeline =
+		kmalloc(sizeof(*timeline), GFP_KERNEL);
+
+	if (!timeline)
+		goto err_out;
+
+	timeline->context = pvr_sw_fence_context_create(name,
+							"pvr_sw_sync");
+	if (!timeline->context)
+		goto err_free_timeline;
+
+	srv_err = PVRSRVRegisterDbgRequestNotify(&timeline->dbg_request_handle,
+				dev_cookie,
+				pvr_counting_fence_timeline_debug_request,
+				DEBUG_REQUEST_LINUXFENCE,
+				timeline);
+	if (srv_err != PVRSRV_OK) {
+		pr_err("%s: failed to register debug request callback (%s)\n",
+		       __func__, PVRSRVGetErrorString(srv_err));
+		goto err_free_timeline_ctx;
+	}
+
+	timeline->current_value = 0;
+	timeline->next_value = 1;
+	kref_init(&timeline->kref);
+	spin_lock_init(&timeline->active_fences_lock);
+	INIT_LIST_HEAD(&timeline->active_fences);
+
+err_out:
+	return timeline;
+
+err_free_timeline_ctx:
+	pvr_sw_fence_context_destroy(timeline->context);
+
+err_free_timeline:
+	kfree(timeline);
+	timeline = NULL;
+	goto err_out;
+}
+
+void pvr_counting_fence_timeline_force_complete(
+	struct pvr_counting_fence_timeline *timeline)
+{
+	struct list_head *entry, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&timeline->active_fences_lock, flags);
+
+	/* This is just a safety measurement. Normally we should never see any
+	 * unsignaled sw fences when we come here. Warn if we still do! */
+	WARN_ON(!list_empty(&timeline->active_fences));
+
+	list_for_each_safe(entry, tmp, &timeline->active_fences) {
+		struct pvr_counting_fence *fence =
+			list_entry(entry, struct pvr_counting_fence,
+			active_list_entry);
+		dma_fence_signal(fence->fence);
+		dma_fence_put(fence->fence);
+		fence->fence = NULL;
+		list_del(&fence->active_list_entry);
+		kfree(fence);
+	}
+	spin_unlock_irqrestore(&timeline->active_fences_lock, flags);
+}
+
+static void pvr_counting_fence_timeline_destroy(
+	struct kref *kref)
+{
+	struct pvr_counting_fence_timeline *timeline =
+		container_of(kref, struct pvr_counting_fence_timeline, kref);
+
+	WARN_ON(!list_empty(&timeline->active_fences));
+
+	PVRSRVUnregisterDbgRequestNotify(timeline->dbg_request_handle);
+
+	pvr_sw_fence_context_destroy(timeline->context);
+	kfree(timeline);
+}
+
+void pvr_counting_fence_timeline_put(
+	struct pvr_counting_fence_timeline *timeline)
+{
+	kref_put(&timeline->kref, pvr_counting_fence_timeline_destroy);
+}
+
+struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_get(
+	struct pvr_counting_fence_timeline *timeline)
+{
+	if (!timeline)
+		return NULL;
+	kref_get(&timeline->kref);
+	return timeline;
+}
+
+struct dma_fence *pvr_counting_fence_create(
+	struct pvr_counting_fence_timeline *timeline, u64 *sync_pt_idx)
+{
+	unsigned long flags;
+	struct dma_fence *sw_fence;
+	struct pvr_counting_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL);
+
+	if (!fence)
+		return NULL;
+
+	sw_fence = pvr_sw_fence_create(timeline->context);
+	if (!sw_fence)
+		goto err_free_fence;
+
+	fence->fence = dma_fence_get(sw_fence);
+
+	spin_lock_irqsave(&timeline->active_fences_lock, flags);
+
+	fence->value = timeline->next_value++;
+	if (sync_pt_idx)
+		*sync_pt_idx = fence->value;
+
+	list_add_tail(&fence->active_list_entry, &timeline->active_fences);
+
+	spin_unlock_irqrestore(&timeline->active_fences_lock, flags);
+
+	/* Counting fences can be signalled any time after creation */
+	dma_fence_enable_sw_signaling(sw_fence);
+
+	return sw_fence;
+
+err_free_fence:
+	kfree(fence);
+	return NULL;
+}
+
+bool pvr_counting_fence_timeline_inc(
+	struct pvr_counting_fence_timeline *timeline, u64 *sync_pt_idx)
+{
+	struct list_head *entry, *tmp;
+	unsigned long flags;
+	bool res;
+
+	spin_lock_irqsave(&timeline->active_fences_lock, flags);
+
+	if (timeline->current_value == timeline->next_value-1) {
+		res = false;
+		goto exit_unlock;
+	}
+
+	timeline->current_value++;
+
+	if (sync_pt_idx) {
+		*sync_pt_idx = timeline->current_value;
+	}
+
+	list_for_each_safe(entry, tmp, &timeline->active_fences) {
+		struct pvr_counting_fence *fence =
+			list_entry(entry, struct pvr_counting_fence,
+			active_list_entry);
+		if (fence->value <= timeline->current_value) {
+			dma_fence_signal(fence->fence);
+			dma_fence_put(fence->fence);
+			fence->fence = NULL;
+			list_del(&fence->active_list_entry);
+			kfree(fence);
+		}
+	}
+
+	res = true;
+
+exit_unlock:
+	spin_unlock_irqrestore(&timeline->active_fences_lock, flags);
+
+	return res;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_counting_timeline.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_counting_timeline.h
new file mode 100644
index 0000000..f7c6f04
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_counting_timeline.h
@@ -0,0 +1,71 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_COUNTING_TIMELINE_H__)
+#define __PVR_COUNTING_TIMELINE_H__
+
+#include "pvr_linux_fence.h"
+
+struct pvr_counting_fence_timeline;
+
+void pvr_counting_fence_timeline_dump_timeline(
+	void *data,
+	DUMPDEBUG_PRINTF_FUNC *dump_debug_printf,
+	void *dump_debug_file);
+
+struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_create(
+	void *dev_cookie,
+	const char *name);
+void pvr_counting_fence_timeline_put(
+	struct pvr_counting_fence_timeline *fence_timeline);
+struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_get(
+	struct pvr_counting_fence_timeline *fence_timeline);
+struct dma_fence *pvr_counting_fence_create(
+	struct pvr_counting_fence_timeline *fence_timeline, u64 *sync_pt_idx);
+bool pvr_counting_fence_timeline_inc(
+	struct pvr_counting_fence_timeline *fence_timeline, u64 *sync_pt_idx);
+void pvr_counting_fence_timeline_force_complete(
+	struct pvr_counting_fence_timeline *fence_timeline);
+
+#endif /* !defined(__PVR_COUNTING_TIMELINE_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_debug.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_debug.c
new file mode 100644
index 0000000..b30db80
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_debug.c
@@ -0,0 +1,1831 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debug Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Provides kernel side Debug Functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/hardirq.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <stdarg.h>
+
+#include "allocmem.h"
+#include "pvrversion.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "servicesext.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "pvr_debugfs.h"
+#include "linkage.h"
+#include "pvr_uaccess.h"
+#include "pvrsrv.h"
+#include "lists.h"
+#include "osfunc.h"
+
+#include "rgx_options.h"
+
+#if defined(SUPPORT_RGX)
+#include "rgxdevice.h"
+#include "rgxdebug.h"
+#include "rgxinit.h"
+#include "rgxfwutils.h"
+#include "sofunc_rgx.h"
+/* Handle used by DebugFS to get GPU utilisation stats */
+static IMG_HANDLE ghGpuUtilUserDebugFS;
+#endif
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+/******** BUFFERED LOG MESSAGES ********/
+
+/* Because we don't want to have to handle CCB wrapping, each buffered
+ * message is rounded up to PVRSRV_DEBUG_CCB_MESG_MAX bytes. This means
+ * there is the same fixed number of messages that can be stored,
+ * regardless of message length.
+ */
+
+#if defined(PVRSRV_DEBUG_CCB_MAX)
+
+#define PVRSRV_DEBUG_CCB_MESG_MAX	PVR_MAX_DEBUG_MESSAGE_LEN
+
+#include <linux/syscalls.h>
+#include <linux/time.h>
+
+typedef struct
+{
+	const IMG_CHAR *pszFile;
+	IMG_INT iLine;
+	IMG_UINT32 ui32TID;
+	IMG_UINT32 ui32PID;
+	IMG_CHAR pcMesg[PVRSRV_DEBUG_CCB_MESG_MAX];
+	struct timeval sTimeVal;
+}
+PVRSRV_DEBUG_CCB;
+
+static PVRSRV_DEBUG_CCB gsDebugCCB[PVRSRV_DEBUG_CCB_MAX];
+
+static IMG_UINT giOffset;
+
+static DEFINE_MUTEX(gsDebugCCBMutex);
+
+static void
+AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line,
+			   const IMG_CHAR *szBuffer)
+{
+	mutex_lock(&gsDebugCCBMutex);
+
+	gsDebugCCB[giOffset].pszFile = pszFileName;
+	gsDebugCCB[giOffset].iLine   = ui32Line;
+	gsDebugCCB[giOffset].ui32TID = current->pid;
+	gsDebugCCB[giOffset].ui32PID = current->tgid;
+
+	do_gettimeofday(&gsDebugCCB[giOffset].sTimeVal);
+
+	strncpy(gsDebugCCB[giOffset].pcMesg, szBuffer, PVRSRV_DEBUG_CCB_MESG_MAX - 1);
+	gsDebugCCB[giOffset].pcMesg[PVRSRV_DEBUG_CCB_MESG_MAX - 1] = 0;
+
+	giOffset = (giOffset + 1) % PVRSRV_DEBUG_CCB_MAX;
+
+	mutex_unlock(&gsDebugCCBMutex);
+}
+
+void PVRSRVDebugPrintfDumpCCB(void)
+{
+	int i;
+
+	mutex_lock(&gsDebugCCBMutex);
+
+	for (i = 0; i < PVRSRV_DEBUG_CCB_MAX; i++)
+	{
+		PVRSRV_DEBUG_CCB *psDebugCCBEntry =
+			&gsDebugCCB[(giOffset + i) % PVRSRV_DEBUG_CCB_MAX];
+
+		/* Early on, we won't have PVRSRV_DEBUG_CCB_MAX messages */
+		if (!psDebugCCBEntry->pszFile)
+		{
+			continue;
+		}
+
+		printk(KERN_ERR "%s:%d: (%ld.%ld, tid=%u, pid=%u) %s\n",
+			   psDebugCCBEntry->pszFile,
+			   psDebugCCBEntry->iLine,
+			   (long)psDebugCCBEntry->sTimeVal.tv_sec,
+			   (long)psDebugCCBEntry->sTimeVal.tv_usec,
+			   psDebugCCBEntry->ui32TID,
+			   psDebugCCBEntry->ui32PID,
+			   psDebugCCBEntry->pcMesg);
+
+		/* Clear this entry so it doesn't get printed the next time again. */
+		psDebugCCBEntry->pszFile = NULL;
+	}
+
+	mutex_unlock(&gsDebugCCBMutex);
+}
+
+#else /* defined(PVRSRV_DEBUG_CCB_MAX) */
+static INLINE void
+AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line,
+			   const IMG_CHAR *szBuffer)
+{
+	(void)pszFileName;
+	(void)szBuffer;
+	(void)ui32Line;
+}
+
+void PVRSRVDebugPrintfDumpCCB(void)
+{
+	/* Not available */
+}
+
+#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */
+
+#endif /* defined(PVRSRV_NEED_PVR_DPF) */
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+#define PVR_MAX_FILEPATH_LEN 256
+
+#if !defined(PVR_TESTING_UTILS)
+static
+#endif
+IMG_UINT32 gPVRDebugLevel =
+	(
+	 DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING
+
+#if defined(PVRSRV_DEBUG_CCB_MAX)
+	 | DBGPRIV_BUFFERED
+#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */
+
+#if defined(PVR_DPF_ADHOC_DEBUG_ON)
+	 | DBGPRIV_DEBUG
+#endif /* defined(PVR_DPF_ADHOC_DEBUG_ON) */
+	);
+
+module_param(gPVRDebugLevel, uint, 0644);
+MODULE_PARM_DESC(gPVRDebugLevel,
+				 "Sets the level of debug output (default 0x7)");
+
+#endif /* defined(PVRSRV_NEED_PVR_DPF) || defined(PVRSRV_NEED_PVR_TRACE) */
+
+#define	PVR_MAX_MSG_LEN PVR_MAX_DEBUG_MESSAGE_LEN
+
+/* Message buffer for non-IRQ messages */
+static IMG_CHAR gszBufferNonIRQ[PVR_MAX_MSG_LEN + 1];
+
+/* Message buffer for IRQ messages */
+static IMG_CHAR gszBufferIRQ[PVR_MAX_MSG_LEN + 1];
+
+/* The lock is used to control access to gszBufferNonIRQ */
+static DEFINE_MUTEX(gsDebugMutexNonIRQ);
+
+/* The lock is used to control access to gszBufferIRQ */
+static DEFINE_SPINLOCK(gsDebugLockIRQ);
+
+#define	USE_SPIN_LOCK (in_interrupt() || !preemptible())
+
+static inline void GetBufferLock(unsigned long *pulLockFlags)
+{
+	if (USE_SPIN_LOCK)
+	{
+		spin_lock_irqsave(&gsDebugLockIRQ, *pulLockFlags);
+	}
+	else
+	{
+		__acquire(&gsDebugLockIRQ);
+		mutex_lock(&gsDebugMutexNonIRQ);
+	}
+}
+
+static inline void ReleaseBufferLock(unsigned long ulLockFlags)
+{
+	if (USE_SPIN_LOCK)
+	{
+		spin_unlock_irqrestore(&gsDebugLockIRQ, ulLockFlags);
+	}
+	else
+	{
+		__release(&gsDebugLockIRQ);
+		mutex_unlock(&gsDebugMutexNonIRQ);
+	}
+}
+
+static inline void SelectBuffer(IMG_CHAR **ppszBuf, IMG_UINT32 *pui32BufSiz)
+{
+	if (USE_SPIN_LOCK)
+	{
+		*ppszBuf = gszBufferIRQ;
+		*pui32BufSiz = sizeof(gszBufferIRQ);
+	}
+	else
+	{
+		*ppszBuf = gszBufferNonIRQ;
+		*pui32BufSiz = sizeof(gszBufferNonIRQ);
+	}
+}
+
+/*
+ * Append a string to a buffer using formatted conversion.
+ * The function takes a variable number of arguments, pointed
+ * to by the var args list.
+ */
+__printf(3, 0)
+static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, va_list VArgs)
+{
+	IMG_UINT32 ui32Used;
+	IMG_UINT32 ui32Space;
+	IMG_INT32 i32Len;
+
+	ui32Used = strlen(pszBuf);
+	BUG_ON(ui32Used >= ui32BufSiz);
+	ui32Space = ui32BufSiz - ui32Used;
+
+	i32Len = vsnprintf(&pszBuf[ui32Used], ui32Space, pszFormat, VArgs);
+	pszBuf[ui32BufSiz - 1] = 0;
+
+	/* Return true if string was truncated */
+	return i32Len < 0 || i32Len >= (IMG_INT32)ui32Space;
+}
+
+/*************************************************************************/ /*!
+@Function       PVRSRVReleasePrintf
+@Description    To output an important message to the user in release builds
+@Input          pszFormat   The message format string
+@Input          ...         Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+void PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...)
+{
+	va_list vaArgs;
+	unsigned long ulLockFlags = 0;
+	IMG_CHAR *pszBuf;
+	IMG_UINT32 ui32BufSiz;
+	IMG_INT32  result;
+
+	SelectBuffer(&pszBuf, &ui32BufSiz);
+
+	va_start(vaArgs, pszFormat);
+
+	GetBufferLock(&ulLockFlags);
+
+	result = snprintf(pszBuf, (ui32BufSiz - 2), "PVR_K:  %u: ", current->pid);
+	PVR_ASSERT(result>0);
+	ui32BufSiz -= result;
+
+	if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
+	{
+		printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+	}
+	else
+	{
+		printk(KERN_ERR "%s\n", pszBuf);
+	}
+
+	ReleaseBufferLock(ulLockFlags);
+	va_end(vaArgs);
+}
+
+#if defined(PVRSRV_NEED_PVR_TRACE)
+
+/*************************************************************************/ /*!
+@Function       PVRTrace
+@Description    To output a debug message to the user
+@Input          pszFormat   The message format string
+@Input          ...         Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+void PVRSRVTrace(const IMG_CHAR *pszFormat, ...)
+{
+	va_list VArgs;
+	unsigned long ulLockFlags = 0;
+	IMG_CHAR *pszBuf;
+	IMG_UINT32 ui32BufSiz;
+	IMG_INT32  result;
+
+	SelectBuffer(&pszBuf, &ui32BufSiz);
+
+	va_start(VArgs, pszFormat);
+
+	GetBufferLock(&ulLockFlags);
+
+	result = snprintf(pszBuf, (ui32BufSiz - 2), "PVR: %u: ", current->pid);
+	PVR_ASSERT(result>0);
+	ui32BufSiz -= result;
+
+	if (VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs))
+	{
+		printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+	}
+	else
+	{
+		printk(KERN_ERR "%s\n", pszBuf);
+	}
+
+	ReleaseBufferLock(ulLockFlags);
+
+	va_end(VArgs);
+}
+
+#endif /* defined(PVRSRV_NEED_PVR_TRACE) */
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+/*
+ * Append a string to a buffer using formatted conversion.
+ * The function takes a variable number of arguments, calling
+ * VBAppend to do the actual work.
+ */
+__printf(3, 4)
+static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, ...)
+{
+	va_list VArgs;
+	IMG_BOOL bTrunc;
+
+	va_start (VArgs, pszFormat);
+
+	bTrunc = VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs);
+
+	va_end (VArgs);
+
+	return bTrunc;
+}
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDebugPrintf
+@Description    To output a debug message to the user
+@Input          uDebugLevel The current debug level
+@Input          pszFile     The source file generating the message
+@Input          uLine       The line of the source file
+@Input          pszFormat   The message format string
+@Input          ...         Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+void PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel,
+			   const IMG_CHAR *pszFullFileName,
+			   IMG_UINT32 ui32Line,
+			   const IMG_CHAR *pszFormat,
+			   ...)
+{
+	const IMG_CHAR *pszFileName = pszFullFileName;
+	IMG_CHAR *pszLeafName;
+	va_list vaArgs;
+	unsigned long ulLockFlags = 0;
+	IMG_CHAR *pszBuf;
+	IMG_UINT32 ui32BufSiz;
+
+	if (!(gPVRDebugLevel & ui32DebugLevel))
+	{
+		return;
+	}
+
+	SelectBuffer(&pszBuf, &ui32BufSiz);
+
+	va_start(vaArgs, pszFormat);
+
+	GetBufferLock(&ulLockFlags);
+
+	switch (ui32DebugLevel)
+	{
+		case DBGPRIV_FATAL:
+		{
+			strncpy(pszBuf, "PVR_K:(Fatal): ", (ui32BufSiz - 2));
+			break;
+		}
+		case DBGPRIV_ERROR:
+		{
+			strncpy(pszBuf, "PVR_K:(Error): ", (ui32BufSiz - 2));
+			break;
+		}
+		case DBGPRIV_WARNING:
+		{
+			strncpy(pszBuf, "PVR_K:(Warn):  ", (ui32BufSiz - 2));
+			break;
+		}
+		case DBGPRIV_MESSAGE:
+		{
+			strncpy(pszBuf, "PVR_K:(Mesg):  ", (ui32BufSiz - 2));
+			break;
+		}
+		case DBGPRIV_VERBOSE:
+		{
+			strncpy(pszBuf, "PVR_K:(Verb):  ", (ui32BufSiz - 2));
+			break;
+		}
+		case DBGPRIV_DEBUG:
+		{
+			strncpy(pszBuf, "PVR_K:(Debug): ", (ui32BufSiz - 2));
+			break;
+		}
+		case DBGPRIV_CALLTRACE:
+		case DBGPRIV_ALLOC:
+		case DBGPRIV_BUFFERED:
+		default:
+		{
+			strncpy(pszBuf, "PVR_K: ", (ui32BufSiz - 2));
+			break;
+		}
+	}
+	pszBuf[ui32BufSiz - 1] = '\0';
+
+	if (current->pid == task_tgid_nr(current))
+	{
+		(void) BAppend(pszBuf, ui32BufSiz, "%5u: ", current->pid);
+	}
+	else
+	{
+		(void) BAppend(pszBuf, ui32BufSiz, "%5u-%5u: ", task_tgid_nr(current) /* pid id of group*/, current->pid /* task id */);
+	}
+
+	if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
+	{
+		printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+	}
+	else
+	{
+		IMG_BOOL bTruncated = IMG_FALSE;
+
+#if !defined(__sh__)
+		pszLeafName = (IMG_CHAR *)strrchr (pszFileName, '/');
+
+		if (pszLeafName)
+		{
+			pszFileName = pszLeafName+1;
+		}
+#endif /* __sh__ */
+
+#if defined(DEBUG)
+		{
+			static const IMG_CHAR *lastFile;
+
+			if (lastFile == pszFileName)
+			{
+				bTruncated = BAppend(pszBuf, ui32BufSiz, " [%u]", ui32Line);
+			}
+			else
+			{
+				bTruncated = BAppend(pszBuf, ui32BufSiz, " [%s:%u]", pszFileName, ui32Line);
+				lastFile = pszFileName;
+			}
+		}
+#endif
+
+		if (bTruncated)
+		{
+			printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+		}
+		else
+		{
+			if (ui32DebugLevel & DBGPRIV_BUFFERED)
+			{
+				AddToBufferCCB(pszFileName, ui32Line, pszBuf);
+			}
+			else
+			{
+				printk(KERN_ERR "%s\n", pszBuf);
+			}
+		}
+	}
+
+	ReleaseBufferLock(ulLockFlags);
+
+	va_end (vaArgs);
+}
+
+#endif /* PVRSRV_NEED_PVR_DPF */
+
+
+/*************************************************************************/ /*!
+ Version DebugFS entry
+*/ /**************************************************************************/
+
+static void *_DebugVersionCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode,
+					  va_list va)
+{
+	loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+	loff_t uiPosition = va_arg(va, loff_t);
+	loff_t uiCurrentPosition = *puiCurrentPosition;
+
+	(*puiCurrentPosition)++;
+
+	return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugVersionSeqStart(struct seq_file *psSeqFile,
+				   loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	if (*puiPosition == 0)
+	{
+		return SEQ_START_TOKEN;
+	}
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+										  _DebugVersionCompare_AnyVaCb,
+										  &uiCurrentPosition,
+										  *puiPosition);
+}
+
+static void _DebugVersionSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugVersionSeqNext(struct seq_file *psSeqFile,
+				  void *pvData,
+				  loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	(*puiPosition)++;
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+										  _DebugVersionCompare_AnyVaCb,
+										  &uiCurrentPosition,
+										  *puiPosition);
+}
+
+#define SEQ_PRINT_VERSION_FMTSPEC "%s Version: %u.%u @ %u (%s) build options: 0x%08x %s\n"
+#define STR_DEBUG   "debug"
+#define STR_RELEASE "release"
+
+static int _DebugVersionSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	if (pvData == SEQ_START_TOKEN)
+	{
+		if (psPVRSRVData->sDriverInfo.bIsNoMatch)
+		{
+			const BUILD_INFO *psBuildInfo;
+
+			psBuildInfo = &psPVRSRVData->sDriverInfo.sUMBuildInfo;
+			seq_printf(psSeqFile, SEQ_PRINT_VERSION_FMTSPEC,
+			           "UM Driver",
+				       PVRVERSION_UNPACK_MAJ(psBuildInfo->ui32BuildVersion),
+				       PVRVERSION_UNPACK_MIN(psBuildInfo->ui32BuildVersion),
+				       psBuildInfo->ui32BuildRevision,
+				       (psBuildInfo->ui32BuildType == BUILD_TYPE_DEBUG) ? STR_DEBUG : STR_RELEASE,
+				       psBuildInfo->ui32BuildOptions,
+					   PVR_BUILD_DIR);
+
+			psBuildInfo = &psPVRSRVData->sDriverInfo.sKMBuildInfo;
+			seq_printf(psSeqFile, SEQ_PRINT_VERSION_FMTSPEC,
+			           "KM Driver",
+				       PVRVERSION_UNPACK_MAJ(psBuildInfo->ui32BuildVersion),
+				       PVRVERSION_UNPACK_MIN(psBuildInfo->ui32BuildVersion),
+				       psBuildInfo->ui32BuildRevision,
+				       (psBuildInfo->ui32BuildType == BUILD_TYPE_DEBUG) ? STR_DEBUG : STR_RELEASE,
+				       psBuildInfo->ui32BuildOptions,
+					   PVR_BUILD_DIR);
+		}
+		else
+		{
+			/*
+			 * bIsNoMatch is `false` in one of the following cases:
+			 * - UM & KM version parameters actually match.
+			 * - A comparison between UM & KM has not been made yet, because no
+			 *   client ever connected.
+			 *
+			 * In both cases, available (KM) version info is the best output we
+			 * can provide.
+			 */
+			seq_printf(psSeqFile, "Driver Version: %s (%s) build options: 0x%08lx %s\n",
+			           PVRVERSION_STRING, PVR_BUILD_TYPE, RGX_BUILD_OPTIONS_KM, PVR_BUILD_DIR);
+		}
+	}
+	else if (pvData != NULL)
+	{
+		PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)pvData;
+#if defined(SUPPORT_RGX)
+		PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+		RGXFWIF_INIT *psRGXFWInit;
+		PVRSRV_ERROR eError;
+#endif
+		IMG_BOOL bFwVersionInfoPrinted = IMG_FALSE;
+
+		seq_printf(psSeqFile, "\nDevice Name: %s\n", psDevNode->psDevConfig->pszName);
+
+		if (psDevNode->psDevConfig->pszVersion)
+		{
+			seq_printf(psSeqFile, "Device Version: %s\n", psDevNode->psDevConfig->pszVersion);
+		}
+
+		if (psDevNode->pfnDeviceVersionString)
+		{
+			IMG_CHAR *pszDeviceVersionString;
+
+			if (psDevNode->pfnDeviceVersionString(psDevNode, &pszDeviceVersionString) == PVRSRV_OK)
+			{
+				seq_printf(psSeqFile, "%s\n", pszDeviceVersionString);
+
+				OSFreeMem(pszDeviceVersionString);
+			}
+		}
+#if defined(SUPPORT_RGX)
+		/* print device's firmware version info */
+		if (psDevInfo->psRGXFWIfInitMemDesc != NULL)
+		{
+			eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc, (void**)&psRGXFWInit);
+			if (eError == PVRSRV_OK)
+			{
+				if (psRGXFWInit->sRGXCompChecks.bUpdated)
+				{
+					const RGXFWIF_COMPCHECKS *psRGXCompChecks = &psRGXFWInit->sRGXCompChecks;
+
+					seq_printf(psSeqFile, SEQ_PRINT_VERSION_FMTSPEC,
+							   "Firmware",
+							   PVRVERSION_UNPACK_MAJ(psRGXCompChecks->ui32DDKVersion),
+							   PVRVERSION_UNPACK_MIN(psRGXCompChecks->ui32DDKVersion),
+							   psRGXCompChecks->ui32DDKBuild,
+							   ((psRGXCompChecks->ui32BuildOptions & OPTIONS_DEBUG_MASK) ?
+								   STR_DEBUG : STR_RELEASE),
+							   psRGXCompChecks->ui32BuildOptions,
+							   PVR_BUILD_DIR);
+					bFwVersionInfoPrinted = IMG_TRUE;
+				}
+				DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Error acquiring CPU virtual address of FWInitMemDesc",
+						 __func__));
+			}
+		}
+#endif
+
+		if (!bFwVersionInfoPrinted)
+		{
+			seq_printf(psSeqFile, "Firmware Version: Info unavailable %s\n",
+#if defined(NO_HARDWARE)
+			           "on NoHW driver"
+#else
+                       "(Is INIT complete?)"
+#endif
+                       );
+		}
+	}
+
+	return 0;
+}
+
+static struct seq_operations gsDebugVersionReadOps =
+{
+	.start = _DebugVersionSeqStart,
+	.stop = _DebugVersionSeqStop,
+	.next = _DebugVersionSeqNext,
+	.show = _DebugVersionSeqShow,
+};
+
+#if defined(SUPPORT_RGX) && defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+/*************************************************************************/ /*!
+ Power data DebugFS entry
+*/ /**************************************************************************/
+
+static void *_DebugPowerDataCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode,
+					  va_list va)
+{
+	loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+	loff_t uiPosition = va_arg(va, loff_t);
+	loff_t uiCurrentPosition = *puiCurrentPosition;
+
+	(*puiCurrentPosition)++;
+
+	return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugPowerDataSeqStart(struct seq_file *psSeqFile,
+									 loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 0;
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+										  _DebugPowerDataCompare_AnyVaCb,
+										  &uiCurrentPosition,
+										  *puiPosition);
+}
+
+static void _DebugPowerDataSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugPowerDataSeqNext(struct seq_file *psSeqFile,
+									void *pvData,
+									loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 0;
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	(*puiPosition)++;
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+										  _DebugPowerDataCompare_AnyVaCb,
+										  &uiCurrentPosition,
+										  *puiPosition);
+}
+
+static PVRSRV_ERROR SendPowerCounterCommand(PVRSRV_DEVICE_NODE* psDeviceNode,
+											RGXFWIF_COUNTER_DUMP_REQUEST eRequestType)
+{
+	PVRSRV_ERROR eError;
+
+	RGXFWIF_KCCB_CMD sCounterDumpCmd;
+
+	sCounterDumpCmd.eCmdType = RGXFWIF_KCCB_CMD_COUNTER_DUMP;
+	sCounterDumpCmd.uCmdData.sCounterDumpConfigData.eCounterDumpRequest = eRequestType;
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sCounterDumpCmd,
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SendPowerCounterCommand: RGXScheduleCommand failed. Error:%u", eError));
+	}
+
+	return eError;
+}
+
+static void *_IsDevNodeNotInitialised(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	return psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE ? NULL : psDeviceNode;
+}
+
+static void _SendPowerCounterCommand(PVRSRV_DEVICE_NODE* psDeviceNode,
+									 va_list va)
+{
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	OSLockAcquire(psDevInfo->hCounterDumpingLock);
+
+	SendPowerCounterCommand(psDeviceNode, va_arg(va, RGXFWIF_COUNTER_DUMP_REQUEST));
+
+	OSLockRelease(psDevInfo->hCounterDumpingLock);
+}
+
+static int _DebugPowerDataSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (pvData != NULL)
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+		PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+
+		if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Not all device nodes were initialised when power counter data was requested!"));
+			return -EIO;
+		}
+
+		OSLockAcquire(psDevInfo->hCounterDumpingLock);
+
+		eError = SendPowerCounterCommand(psDeviceNode, RGXFWIF_PWR_COUNTER_DUMP_SAMPLE);
+
+		if (eError != PVRSRV_OK)
+		{
+			return -EIO;
+		}
+
+		/* Create update command to notify the host that the copy is finished. */
+		{
+			PVRSRV_CLIENT_SYNC_PRIM* psCopySyncPrim;
+			RGXFWIF_DEV_VIRTADDR sSyncFWAddr;
+			RGXFWIF_KCCB_CMD sSyncCmd;
+			eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+								&psCopySyncPrim,
+								"power counter dump sync prim");
+
+			SyncPrimSet(psCopySyncPrim, 0);
+
+			SyncPrimGetFirmwareAddr(psCopySyncPrim, &sSyncFWAddr.ui32Addr);
+
+			sSyncCmd.eCmdType = RGXFWIF_KCCB_CMD_SYNC;
+			sSyncCmd.uCmdData.sSyncData.sSyncObjDevVAddr = sSyncFWAddr;
+			sSyncCmd.uCmdData.sSyncData.uiUpdateVal = 1;
+
+			eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+						RGXFWIF_DM_GP,
+						&sSyncCmd,
+						0,
+						PDUMP_FLAGS_CONTINUOUS);
+
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "_DebugPowerDataSeqShow: RGXScheduleCommand failed. Error:%u", eError));
+				OSLockRelease(psDevInfo->hCounterDumpingLock);
+				return -EIO;
+			}
+
+			eError = PVRSRVWaitForValueKM(psCopySyncPrim->pui32LinAddr, 1, 0xffffffff);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "_DebugPowerDataSeqShow: PVRSRVWaitForValueKM failed. Error:%u", eError));
+				OSLockRelease(psDevInfo->hCounterDumpingLock);
+				return -EIO;
+			}
+
+			eError = SyncPrimFree(psCopySyncPrim);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "_DebugPowerDataSeqShow: SyncPrimFree failed. Error:%u", eError));
+				OSLockRelease(psDevInfo->hCounterDumpingLock);
+				return -EIO;
+			}
+		}
+
+		/* Read back the buffer */
+		{
+			IMG_UINT32* pui32PowerBuffer;
+			IMG_UINT32 ui32NumOfRegs, ui32SamplePeriod;
+			IMG_UINT32 i,j;
+
+			eError = DevmemAcquireCpuVirtAddr(psDevInfo->psCounterBufferMemDesc, (void**)&pui32PowerBuffer);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"_DebugPowerDataSeqShow: Failed to acquire buffer memory mapping (%u)", eError));
+				OSLockRelease(psDevInfo->hCounterDumpingLock);
+				return -EIO;
+			}
+
+			ui32NumOfRegs = *pui32PowerBuffer++;
+			ui32SamplePeriod = *pui32PowerBuffer++;
+
+			if (ui32NumOfRegs)
+			{
+				seq_printf(psSeqFile, "Power counter data for device id: %d\n", psDeviceNode->sDevId.i32UMIdentifier);
+				seq_printf(psSeqFile, "Sample period: 0x%08x\n", ui32SamplePeriod);
+
+				for (i = 0; i < ui32NumOfRegs; i++)
+				{
+					IMG_UINT32 ui32High, ui32Low;
+					IMG_UINT32 ui32RegOffset = *pui32PowerBuffer++;
+					IMG_UINT32 ui32NumOfInstances = *pui32PowerBuffer++;
+
+					PVR_ASSERT(ui32NumOfInstances);
+
+					seq_printf(psSeqFile, "0x%08x:", ui32RegOffset);
+
+					for (j = 0; j < ui32NumOfInstances; j++)
+					{
+						ui32Low = *pui32PowerBuffer++;
+						ui32High = *pui32PowerBuffer++;
+
+						seq_printf(psSeqFile, " 0x%016llx", (IMG_UINT64)ui32Low | (IMG_UINT64)ui32High << 32);
+					}
+
+					seq_printf(psSeqFile, "\n");
+				}
+			}
+
+			DevmemReleaseCpuVirtAddr(psDevInfo->psCounterBufferMemDesc);
+		}
+
+		OSLockRelease(psDevInfo->hCounterDumpingLock);
+	}
+
+	return eError;
+}
+
+static IMG_INT PowerDataSet(const char __user *pcBuffer,
+							 size_t uiCount,
+							 loff_t *puiPosition,
+							 void *pvData)
+{
+	IMG_CHAR acDataBuffer[2];
+	PVRSRV_DATA* psPVRSRVData = (PVRSRV_DATA*) pvData;
+
+	if (puiPosition == NULL || *puiPosition != 0)
+	{
+		return -EIO;
+	}
+
+	if (uiCount == 0 || uiCount > ARRAY_SIZE(acDataBuffer))
+	{
+		return -EINVAL;
+	}
+
+	if (pvr_copy_from_user(acDataBuffer, pcBuffer, uiCount))
+	{
+		return -EINVAL;
+	}
+
+	if (acDataBuffer[uiCount - 1] != '\n')
+	{
+		return -EINVAL;
+	}
+
+	if (List_PVRSRV_DEVICE_NODE_Any(psPVRSRVData->psDeviceNodeList, _IsDevNodeNotInitialised))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Not all device nodes were initialised when power counter data was requested!"));
+		return -EIO;
+	}
+
+	if ((acDataBuffer[0] == '1') && uiCount == 2)
+	{
+		List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList,
+										   _SendPowerCounterCommand,
+										   RGXFWIF_PWR_COUNTER_DUMP_START);
+
+	}
+	else if ((acDataBuffer[0] == '0') && uiCount == 2)
+	{
+
+		List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList,
+										   _SendPowerCounterCommand,
+										   RGXFWIF_PWR_COUNTER_DUMP_STOP);
+	}
+	else
+	{
+
+		return -EINVAL;
+	}
+
+	*puiPosition += uiCount;
+	return uiCount;
+}
+
+static struct seq_operations gsDebugPowerDataReadOps =
+{
+	.start = _DebugPowerDataSeqStart,
+	.stop =  _DebugPowerDataSeqStop,
+	.next =  _DebugPowerDataSeqNext,
+	.show =  _DebugPowerDataSeqShow,
+};
+
+#endif /* SUPPORT_RGX && SUPPORT_POWER_SAMPLING_VIA_DEBUGFS*/
+/*************************************************************************/ /*!
+ Status DebugFS entry
+*/ /**************************************************************************/
+
+static void *_DebugStatusCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode,
+										 va_list va)
+{
+	loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+	loff_t uiPosition = va_arg(va, loff_t);
+	loff_t uiCurrentPosition = *puiCurrentPosition;
+
+	(*puiCurrentPosition)++;
+
+	return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugStatusSeqStart(struct seq_file *psSeqFile,
+								  loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	if (*puiPosition == 0)
+	{
+		return SEQ_START_TOKEN;
+	}
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+										  _DebugStatusCompare_AnyVaCb,
+										  &uiCurrentPosition,
+										  *puiPosition);
+}
+
+static void _DebugStatusSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugStatusSeqNext(struct seq_file *psSeqFile,
+								 void *pvData,
+								 loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	(*puiPosition)++;
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+										  _DebugStatusCompare_AnyVaCb,
+										  &uiCurrentPosition,
+										  *puiPosition);
+}
+
+static int _DebugStatusSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	if (pvData == SEQ_START_TOKEN)
+	{
+		PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+
+		if (psPVRSRVData != NULL)
+		{
+			switch (psPVRSRVData->eServicesState)
+			{
+				case PVRSRV_SERVICES_STATE_OK:
+					seq_printf(psSeqFile, "Driver Status:   OK\n");
+					break;
+				case PVRSRV_SERVICES_STATE_BAD:
+					seq_printf(psSeqFile, "Driver Status:   BAD\n");
+					break;
+				case PVRSRV_SERVICES_STATE_UNDEFINED:
+					seq_printf(psSeqFile, "Driver Status:   UNDEFINED\n");
+					break;
+				default:
+					seq_printf(psSeqFile, "Driver Status:   UNKNOWN (%d)\n", psPVRSRVData->eServicesState);
+					break;
+			}
+		}
+	}
+	else if (pvData != NULL)
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+		IMG_CHAR           *pszStatus = "";
+		IMG_CHAR           *pszReason = "";
+		PVRSRV_DEVICE_HEALTH_STATUS eHealthStatus;
+		PVRSRV_DEVICE_HEALTH_REASON eHealthReason;
+
+		/* Update the health status now if possible... */
+		if (psDeviceNode->pfnUpdateHealthStatus)
+		{
+			psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, IMG_FALSE);
+		}
+		eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus);
+		eHealthReason = OSAtomicRead(&psDeviceNode->eHealthReason);
+
+		switch (eHealthStatus)
+		{
+			case PVRSRV_DEVICE_HEALTH_STATUS_OK:  pszStatus = "OK";  break;
+			case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING:  pszStatus = "NOT RESPONDING";  break;
+			case PVRSRV_DEVICE_HEALTH_STATUS_DEAD:  pszStatus = "DEAD";  break;
+			case PVRSRV_DEVICE_HEALTH_STATUS_FAULT:  pszStatus = "FAULT";  break;
+			case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED:  pszStatus = "UNDEFINED";  break;
+			default:  pszStatus = "UNKNOWN";  break;
+		}
+
+		switch (eHealthReason)
+		{
+			case PVRSRV_DEVICE_HEALTH_REASON_NONE:  pszReason = "";  break;
+			case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED:  pszReason = " (Asserted)";  break;
+			case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING:  pszReason = " (Poll failure)";  break;
+			case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS:  pszReason = " (Global Event Object timeouts rising)";  break;
+			case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT:  pszReason = " (KCCB offset invalid)";  break;
+			case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED:  pszReason = " (KCCB stalled)";  break;
+			case PVRSRV_DEVICE_HEALTH_REASON_IDLING:  pszReason = " (Idling)";  break;
+			case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING:  pszReason = " (Restarting)";  break;
+			default:  pszReason = " (Unknown reason)";  break;
+		}
+
+		seq_printf(psSeqFile, "Firmware Status: %s%s\n", pszStatus, pszReason);
+
+		if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+		{
+			/*
+			 * Guest drivers do not support the following functionality:
+			 *	- Perform actual on-chip fw tracing.
+			 *	- Collect actual on-chip GPU utilization stats.
+			 *	- Perform actual on-chip GPU power/dvfs management.
+			 *	- As a result no more information can be provided.
+			 */
+			return 0;
+		}
+
+		/* Write other useful stats to aid the test cycle... */
+		if (psDeviceNode->pvDevice != NULL)
+		{
+#if defined(SUPPORT_RGX)
+			PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+			RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+			/* Calculate the number of HWR events in total across all the DMs... */
+			if (psRGXFWIfTraceBufCtl != NULL)
+			{
+				IMG_UINT32 ui32HWREventCount = 0;
+				IMG_UINT32 ui32CRREventCount = 0;
+				IMG_UINT32 ui32DMIndex;
+
+				for (ui32DMIndex = 0; ui32DMIndex < RGXFWIF_DM_MAX; ui32DMIndex++)
+				{
+					ui32HWREventCount += psRGXFWIfTraceBufCtl->aui32HwrDmLockedUpCount[ui32DMIndex];
+					ui32CRREventCount += psRGXFWIfTraceBufCtl->aui32HwrDmOverranCount[ui32DMIndex];
+				}
+
+				seq_printf(psSeqFile, "HWR Event Count: %d\n", ui32HWREventCount);
+				seq_printf(psSeqFile, "CRR Event Count: %d\n", ui32CRREventCount);
+				seq_printf(psSeqFile, "FWF Event Count: %d\n", psRGXFWIfTraceBufCtl->ui32FWFaults);
+			}
+
+			/* Write the number of APM events... */
+			seq_printf(psSeqFile, "APM Event Count: %d\n", psDevInfo->ui32ActivePMReqTotal);
+
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+			if ((psRGXFWIfTraceBufCtl != NULL) &&
+				(psRGXFWIfTraceBufCtl->ui32TracebufFlags & RGXFWIF_TRACEBUFCFG_SLR_LOG))
+			{
+				/* Write the number of Sync Lockup Recovery (SLR) events... */
+				seq_printf(psSeqFile, "SLR Event Count: %d\n", psRGXFWIfTraceBufCtl->ui32ForcedUpdatesRequested);
+			}
+#endif
+
+			/* Write the current GPU Utilisation values... */
+			if (psDevInfo->pfnGetGpuUtilStats &&
+				eHealthStatus == PVRSRV_DEVICE_HEALTH_STATUS_OK)
+			{
+				RGXFWIF_GPU_UTIL_STATS sGpuUtilStats;
+				PVRSRV_ERROR eError = PVRSRV_OK;
+
+				eError = psDevInfo->pfnGetGpuUtilStats(psDeviceNode,
+													   ghGpuUtilUserDebugFS,
+													   &sGpuUtilStats);
+
+				if ((eError == PVRSRV_OK) &&
+					((IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative))
+				{
+					IMG_UINT64 util;
+					IMG_UINT32 rem;
+
+					util = 100 * (sGpuUtilStats.ui64GpuStatActiveHigh +
+								  sGpuUtilStats.ui64GpuStatActiveLow);
+					util = OSDivide64(util, (IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative, &rem);
+
+					seq_printf(psSeqFile, "GPU Utilisation: %u%%\n", (IMG_UINT32)util);
+				}
+				else
+				{
+					seq_printf(psSeqFile, "GPU Utilisation: -\n");
+				}
+			}
+#endif
+		}
+	}
+
+	return 0;
+}
+
+static ssize_t DebugStatusSet(const char __user *pcBuffer,
+							  size_t uiCount,
+							  loff_t *puiPosition,
+							  void *pvData)
+{
+	IMG_CHAR acDataBuffer[6];
+
+	if (puiPosition == NULL || *puiPosition != 0)
+	{
+		return -EIO;
+	}
+
+	if (uiCount == 0 || uiCount > ARRAY_SIZE(acDataBuffer))
+	{
+		return -EINVAL;
+	}
+
+	if (pvr_copy_from_user(acDataBuffer, pcBuffer, uiCount))
+	{
+		return -EINVAL;
+	}
+
+	if (acDataBuffer[uiCount - 1] != '\n')
+	{
+		return -EINVAL;
+	}
+
+	if (((acDataBuffer[0] == 'k') || ((acDataBuffer[0] == 'K'))) && uiCount == 2)
+	{
+		PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+		psPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_BAD;
+	}
+	else
+	{
+		return -EINVAL;
+	}
+
+	*puiPosition += uiCount;
+	return uiCount;
+}
+
+static struct seq_operations gsDebugStatusReadOps =
+{
+	.start = _DebugStatusSeqStart,
+	.stop = _DebugStatusSeqStop,
+	.next = _DebugStatusSeqNext,
+	.show = _DebugStatusSeqShow,
+};
+
+/*************************************************************************/ /*!
+ Dump Debug DebugFS entry
+*/ /**************************************************************************/
+
+static void *_DebugDumpDebugCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, va_list va)
+{
+	loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+	loff_t uiPosition = va_arg(va, loff_t);
+	loff_t uiCurrentPosition = *puiCurrentPosition;
+
+	(*puiCurrentPosition)++;
+
+	return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugDumpDebugSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	if (*puiPosition == 0)
+	{
+		return SEQ_START_TOKEN;
+	}
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+										  _DebugDumpDebugCompare_AnyVaCb,
+										  &uiCurrentPosition,
+										  *puiPosition);
+}
+
+static void _DebugDumpDebugSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugDumpDebugSeqNext(struct seq_file *psSeqFile,
+									void *pvData,
+									loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	(*puiPosition)++;
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+										  _DebugDumpDebugCompare_AnyVaCb,
+										  &uiCurrentPosition,
+										  *puiPosition);
+}
+
+static void _DumpDebugSeqPrintf(void *pvDumpDebugFile,
+				const IMG_CHAR *pszFormat, ...)
+{
+	struct seq_file *psSeqFile = (struct seq_file *)pvDumpDebugFile;
+	IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+	va_list ArgList;
+
+	va_start(ArgList, pszFormat);
+	vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList);
+	va_end(ArgList);
+	seq_printf(psSeqFile, "%s\n", szBuffer);
+}
+
+static int _DebugDumpDebugSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	if (pvData != NULL  &&  pvData != SEQ_START_TOKEN)
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+
+		if (psDeviceNode->pvDevice != NULL)
+		{
+			PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX,
+						_DumpDebugSeqPrintf, psSeqFile);
+		}
+	}
+
+	return 0;
+}
+
+static struct seq_operations gsDumpDebugReadOps =
+{
+	.start = _DebugDumpDebugSeqStart,
+	.stop  = _DebugDumpDebugSeqStop,
+	.next  = _DebugDumpDebugSeqNext,
+	.show  = _DebugDumpDebugSeqShow,
+};
+
+#if defined(SUPPORT_RGX)
+/*************************************************************************/ /*!
+ Firmware Trace DebugFS entry
+*/ /**************************************************************************/
+static void *_DebugFWTraceCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, va_list va)
+{
+	loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+	loff_t uiPosition = va_arg(va, loff_t);
+	loff_t uiCurrentPosition = *puiCurrentPosition;
+
+	(*puiCurrentPosition)++;
+
+	return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugFWTraceSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	if (*puiPosition == 0)
+	{
+		return SEQ_START_TOKEN;
+	}
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+										  _DebugFWTraceCompare_AnyVaCb,
+										  &uiCurrentPosition,
+										  *puiPosition);
+}
+
+static void _DebugFWTraceSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugFWTraceSeqNext(struct seq_file *psSeqFile,
+								  void *pvData,
+								  loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	(*puiPosition)++;
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+										  _DebugFWTraceCompare_AnyVaCb,
+										  &uiCurrentPosition,
+										  *puiPosition);
+}
+
+static void _FWTraceSeqPrintf(void *pvDumpDebugFile,
+				const IMG_CHAR *pszFormat, ...)
+{
+	struct seq_file *psSeqFile = (struct seq_file *)pvDumpDebugFile;
+	IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+	va_list ArgList;
+
+	va_start(ArgList, pszFormat);
+	vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList);
+	va_end(ArgList);
+	seq_printf(psSeqFile, "%s\n", szBuffer);
+}
+
+static int _DebugFWTraceSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	if (pvData != NULL  &&  pvData != SEQ_START_TOKEN)
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+
+		if (psDeviceNode->pvDevice != NULL)
+		{
+			PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+			RGXDumpFirmwareTrace(_FWTraceSeqPrintf, psSeqFile, psDevInfo);
+		}
+	}
+
+	return 0;
+}
+
+static struct seq_operations gsFWTraceReadOps =
+{
+	.start = _DebugFWTraceSeqStart,
+	.stop  = _DebugFWTraceSeqStop,
+	.next  = _DebugFWTraceSeqNext,
+	.show  = _DebugFWTraceSeqShow,
+};
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+
+static PVRSRV_RGXDEV_INFO *getPsDevInfo(struct seq_file *psSeqFile)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+
+	if (psPVRSRVData != NULL)
+	{
+		if (psPVRSRVData->psDeviceNodeList != NULL)
+		{
+			PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psPVRSRVData->psDeviceNodeList->pvDevice;
+			return psDevInfo;
+		}
+	}
+	return NULL;
+}
+
+static void *_FirmwareGcovSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = getPsDevInfo(psSeqFile);
+
+	if (psDevInfo != NULL)
+	{
+		if (psDevInfo->psFirmwareGcovBufferMemDesc != NULL)
+		{
+			void *pvCpuVirtAddr;
+			DevmemAcquireCpuVirtAddr(psDevInfo->psFirmwareGcovBufferMemDesc, &pvCpuVirtAddr);
+			return *puiPosition ? NULL : pvCpuVirtAddr;
+		}
+	}
+
+	return NULL;
+}
+
+static void _FirmwareGcovSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = getPsDevInfo(psSeqFile);
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	if (psDevInfo != NULL)
+	{
+		if (psDevInfo->psFirmwareGcovBufferMemDesc != NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psFirmwareGcovBufferMemDesc);
+		}
+	}
+}
+
+static void *_FirmwareGcovSeqNext(struct seq_file *psSeqFile,
+								  void *pvData,
+								  loff_t *puiPosition)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+	PVR_UNREFERENCED_PARAMETER(puiPosition);
+	return NULL;
+}
+
+static int _FirmwareGcovSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = getPsDevInfo(psSeqFile);
+
+	if (psDevInfo != NULL)
+	{
+		seq_write(psSeqFile, pvData, psDevInfo->ui32FirmwareGcovSize);
+	}
+	return 0;
+}
+
+static struct seq_operations gsFirmwareGcovReadOps =
+{
+	.start = _FirmwareGcovSeqStart,
+	.stop  = _FirmwareGcovSeqStop,
+	.next  = _FirmwareGcovSeqNext,
+	.show  = _FirmwareGcovSeqShow,
+};
+
+#endif /* defined(SUPPORT_FIRMWARE_GCOV) */
+
+
+#endif /* defined(SUPPORT_RGX) */
+/*************************************************************************/ /*!
+ Debug level DebugFS entry
+*/ /**************************************************************************/
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+static void *DebugLevelSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+	if (*puiPosition == 0)
+	{
+		return psSeqFile->private;
+	}
+
+	return NULL;
+}
+
+static void DebugLevelSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *DebugLevelSeqNext(struct seq_file *psSeqFile,
+							   void *pvData,
+							   loff_t *puiPosition)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+	PVR_UNREFERENCED_PARAMETER(puiPosition);
+
+	return NULL;
+}
+
+static int DebugLevelSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	if (pvData != NULL)
+	{
+		IMG_UINT32 uiDebugLevel = *((IMG_UINT32 *)pvData);
+
+		seq_printf(psSeqFile, "%u\n", uiDebugLevel);
+
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static struct seq_operations gsDebugLevelReadOps =
+{
+	.start = DebugLevelSeqStart,
+	.stop = DebugLevelSeqStop,
+	.next = DebugLevelSeqNext,
+	.show = DebugLevelSeqShow,
+};
+
+
+static IMG_INT DebugLevelSet(const char __user *pcBuffer,
+							 size_t uiCount,
+							 loff_t *puiPosition,
+							 void *pvData)
+{
+	IMG_UINT32 *uiDebugLevel = (IMG_UINT32 *)pvData;
+	IMG_CHAR acDataBuffer[6];
+
+	if (puiPosition == NULL || *puiPosition != 0)
+	{
+		return -EIO;
+	}
+
+	if (uiCount == 0 || uiCount > ARRAY_SIZE(acDataBuffer))
+	{
+		return -EINVAL;
+	}
+
+	if (pvr_copy_from_user(acDataBuffer, pcBuffer, uiCount))
+	{
+		return -EINVAL;
+	}
+
+	if (acDataBuffer[uiCount - 1] != '\n')
+	{
+		return -EINVAL;
+	}
+
+	if (sscanf(acDataBuffer, "%u", &gPVRDebugLevel) == 0)
+	{
+		return -EINVAL;
+	}
+
+	/* As this is Linux the next line uses a GCC builtin function */
+	(*uiDebugLevel) &= (1 << __builtin_ffsl(DBGPRIV_LAST)) - 1;
+
+	*puiPosition += uiCount;
+	return uiCount;
+}
+#endif /* defined(DEBUG) */
+
+static PPVR_DEBUGFS_ENTRY_DATA gpsVersionDebugFSEntry;
+
+static PPVR_DEBUGFS_ENTRY_DATA gpsStatusDebugFSEntry;
+static PPVR_DEBUGFS_ENTRY_DATA gpsDumpDebugDebugFSEntry;
+
+#if defined(SUPPORT_RGX)
+static PPVR_DEBUGFS_ENTRY_DATA gpsFWTraceDebugFSEntry;
+#if defined(SUPPORT_FIRMWARE_GCOV)
+static PPVR_DEBUGFS_ENTRY_DATA gpsFirmwareGcovDebugFSEntry;
+#endif
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+static PPVR_DEBUGFS_ENTRY_DATA gpsPowerDataDebugFSEntry;
+#endif
+#endif
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+static PPVR_DEBUGFS_ENTRY_DATA gpsDebugLevelDebugFSEntry;
+#endif
+
+int PVRDebugCreateDebugFSEntries(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	int iResult;
+
+	PVR_ASSERT(psPVRSRVData != NULL);
+
+	/*
+	 * The DebugFS entries are designed to work in a single device system but
+	 * this function will be called multiple times in a multi-device system.
+	 * Return an error in this case.
+	 */
+	if (gpsVersionDebugFSEntry)
+	{
+		return -EEXIST;
+	}
+
+#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE)
+	if (SORgxGpuUtilStatsRegister(&ghGpuUtilUserDebugFS) != PVRSRV_OK)
+	{
+		return -ENOMEM;
+	}
+#endif
+
+	iResult = PVRDebugFSCreateFile("version",
+									NULL,
+									&gsDebugVersionReadOps,
+									NULL,
+									NULL,
+									psPVRSRVData,
+									&gpsVersionDebugFSEntry);
+	if (iResult != 0)
+	{
+		goto PVRDebugCreateDebugFSEntriesErrorExit;
+	}
+
+	iResult = PVRDebugFSCreateFile("status",
+									NULL,
+									&gsDebugStatusReadOps,
+									(PVRSRV_ENTRY_WRITE_FUNC *)DebugStatusSet,
+									NULL,
+									psPVRSRVData,
+									&gpsStatusDebugFSEntry);
+	if (iResult != 0)
+	{
+		goto PVRDebugCreateDebugFSEntriesErrorExit;
+	}
+
+	iResult = PVRDebugFSCreateFile("debug_dump",
+									NULL,
+									&gsDumpDebugReadOps,
+									NULL,
+									NULL,
+									psPVRSRVData,
+									&gpsDumpDebugDebugFSEntry);
+	if (iResult != 0)
+	{
+		goto PVRDebugCreateDebugFSEntriesErrorExit;
+	}
+
+#if defined(SUPPORT_RGX)
+	if (! PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		iResult = PVRDebugFSCreateFile("firmware_trace",
+										NULL,
+										&gsFWTraceReadOps,
+										NULL,
+										NULL,
+										psPVRSRVData,
+										&gpsFWTraceDebugFSEntry);
+		if (iResult != 0)
+		{
+			goto PVRDebugCreateDebugFSEntriesErrorExit;
+		}
+	}
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+	{
+
+		iResult = PVRDebugFSCreateFile("firmware_gcov",
+										NULL,
+										&gsFirmwareGcovReadOps,
+										NULL,
+										NULL,
+										psPVRSRVData,
+										&gpsFirmwareGcovDebugFSEntry);
+
+		if (iResult != 0)
+		{
+			goto PVRDebugCreateDebugFSEntriesErrorExit;
+		}
+	}
+#endif
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+	iResult = PVRDebugFSCreateFile("power_data",
+									NULL,
+									&gsDebugPowerDataReadOps,
+									(PVRSRV_ENTRY_WRITE_FUNC *)PowerDataSet,
+									NULL,
+									psPVRSRVData,
+									&gpsPowerDataDebugFSEntry);
+	if (iResult != 0)
+	{
+		goto PVRDebugCreateDebugFSEntriesErrorExit;
+	}
+#endif
+#endif
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+	iResult = PVRDebugFSCreateFile("debug_level",
+									NULL,
+									&gsDebugLevelReadOps,
+									(PVRSRV_ENTRY_WRITE_FUNC *)DebugLevelSet,
+									NULL,
+									&gPVRDebugLevel,
+									&gpsDebugLevelDebugFSEntry);
+	if (iResult != 0)
+	{
+		goto PVRDebugCreateDebugFSEntriesErrorExit;
+	}
+#endif
+
+	return 0;
+
+PVRDebugCreateDebugFSEntriesErrorExit:
+
+	PVRDebugRemoveDebugFSEntries();
+
+	return iResult;
+}
+
+void PVRDebugRemoveDebugFSEntries(void)
+{
+#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE)
+	if (ghGpuUtilUserDebugFS != NULL)
+	{
+		SORgxGpuUtilStatsUnregister(ghGpuUtilUserDebugFS);
+		ghGpuUtilUserDebugFS = NULL;
+	}
+#endif
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+	if (gpsDebugLevelDebugFSEntry != NULL)
+	{
+		PVRDebugFSRemoveFile(&gpsDebugLevelDebugFSEntry);
+	}
+#endif
+
+#if defined(SUPPORT_RGX)
+	if (gpsFWTraceDebugFSEntry != NULL)
+	{
+		PVRDebugFSRemoveFile(&gpsFWTraceDebugFSEntry);
+	}
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+	if (gpsFirmwareGcovDebugFSEntry != NULL)
+	{
+		PVRDebugFSRemoveFile(&gpsFirmwareGcovDebugFSEntry);
+	}
+#endif
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+	if (gpsPowerDataDebugFSEntry != NULL)
+	{
+		PVRDebugFSRemoveFile(&gpsPowerDataDebugFSEntry);
+	}
+#endif
+
+#endif /* defined(SUPPORT_RGX) */
+
+	if (gpsDumpDebugDebugFSEntry != NULL)
+	{
+		PVRDebugFSRemoveFile(&gpsDumpDebugDebugFSEntry);
+	}
+
+	if (gpsStatusDebugFSEntry != NULL)
+	{
+		PVRDebugFSRemoveFile(&gpsStatusDebugFSEntry);
+	}
+
+	if (gpsVersionDebugFSEntry != NULL)
+	{
+		PVRDebugFSRemoveFile(&gpsVersionDebugFSEntry);
+	}
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_debug.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_debug.h
new file mode 100644
index 0000000..255892d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_debug.h
@@ -0,0 +1,661 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Debug Declarations
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides debug functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_DEBUG_H
+#define PVR_DEBUG_H
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/*! @cond Doxygen_Suppress */
+#if defined(_MSC_VER)
+#	define MSC_SUPPRESS_4127 __pragma(warning(suppress:4127))
+#else
+#	define MSC_SUPPRESS_4127
+#endif
+/*! @endcond */
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define PVR_MAX_DEBUG_MESSAGE_LEN	(512)   /*!< Max length of a Debug Message */
+
+/* These are privately used by pvr_debug, use the PVR_DBG_ defines instead */
+#define DBGPRIV_FATAL     0x001UL  /*!< Debug-Fatal. Privately used by pvr_debug. */
+#define DBGPRIV_ERROR     0x002UL  /*!< Debug-Error. Privately used by pvr_debug. */
+#define DBGPRIV_WARNING   0x004UL  /*!< Debug-Warning. Privately used by pvr_debug. */
+#define DBGPRIV_MESSAGE   0x008UL  /*!< Debug-Message. Privately used by pvr_debug. */
+#define DBGPRIV_VERBOSE   0x010UL  /*!< Debug-Verbose. Privately used by pvr_debug. */
+#define DBGPRIV_CALLTRACE 0x020UL  /*!< Debug-CallTrace. Privately used by pvr_debug. */
+#define DBGPRIV_ALLOC     0x040UL  /*!< Debug-Alloc. Privately used by pvr_debug. */
+#define DBGPRIV_BUFFERED  0x080UL  /*!< Debug-Buffered. Privately used by pvr_debug. */
+#define DBGPRIV_DEBUG     0x100UL  /*!< Debug-AdHoc-Debug. Never submitted. Privately used by pvr_debug. */
+#define DBGPRIV_LAST      0x200UL  /*!< Always set to highest mask value. Privately used by pvr_debug. */
+
+#if !defined(PVRSRV_NEED_PVR_ASSERT) && defined(DEBUG)
+#define PVRSRV_NEED_PVR_ASSERT
+#endif
+
+#if defined(PVRSRV_NEED_PVR_ASSERT) && !defined(PVRSRV_NEED_PVR_DPF)
+#define PVRSRV_NEED_PVR_DPF
+#endif
+
+#if !defined(PVRSRV_NEED_PVR_TRACE) && (defined(DEBUG) || defined(TIMING))
+#define PVRSRV_NEED_PVR_TRACE
+#endif
+
+#if !defined(DOXYGEN)
+/*************************************************************************/ /*
+PVRSRVGetErrorString
+Returns a string describing the provided PVRSRV_ERROR code
+NB No doxygen comments provided as this function does not require porting
+   for other operating systems
+*/ /**************************************************************************/
+	const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError);
+#	define PVRSRVGETERRORSTRING PVRSRVGetErrorString
+#endif
+
+/* PVR_ASSERT() and PVR_DBG_BREAK handling */
+
+#if defined(PVRSRV_NEED_PVR_ASSERT) || defined(DOXYGEN)
+
+/* Unfortunately the klocworks static analysis checker doesn't understand our
+ * ASSERT macros. Thus it reports lots of false positive. Defining our Assert
+ * macros in a special way when the code is analysed by klocworks avoids
+ * them. */
+#if defined(__KLOCWORK__)
+  #define PVR_ASSERT(x) do { if (!(x)) abort(); } while (0)
+#else /* ! __KLOCWORKS__ */
+
+#if defined(_WIN32)
+#define PVR_ASSERT(expr) do										\
+	{															\
+		MSC_SUPPRESS_4127										\
+		if (unlikely(!(expr)))								\
+		{														\
+			PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__,\
+					  "*** Debug assertion failed!");			\
+			__debugbreak();										\
+		}														\
+	MSC_SUPPRESS_4127											\
+	} while (0)
+
+#else
+
+#if defined(LINUX) && defined(__KERNEL__)
+#include <linux/kernel.h>
+#include <linux/bug.h>
+
+/* In Linux kernel mode, use WARN_ON() directly. This produces the
+   correct filename and line number in the warning message. */
+#define PVR_ASSERT(EXPR) do											\
+	{																\
+		if (unlikely(!(EXPR)))										\
+		{															\
+			PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__,	\
+							  "Debug assertion failed!");			\
+			WARN_ON(1);												\
+		}															\
+	} while (0)
+
+#else /* defined(LINUX) && defined(__KERNEL__) */
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDebugAssertFail
+@Description    Indicate to the user that a debug assertion has failed and
+                prevent the program from continuing.
+                Invoked from the macro PVR_ASSERT().
+@Input          pszFile       The name of the source file where the assertion failed
+@Input          ui32Line      The line number of the failed assertion
+@Input          pszAssertion  String describing the assertion
+@Return         NEVER!
+*/ /**************************************************************************/
+IMG_EXPORT void IMG_CALLCONV __noreturn
+PVRSRVDebugAssertFail(const IMG_CHAR *pszFile,
+                      IMG_UINT32 ui32Line,
+                      const IMG_CHAR *pszAssertion);
+
+#define PVR_ASSERT(EXPR) do										\
+	{															\
+		if (unlikely(!(EXPR)))								\
+			PVRSRVDebugAssertFail(__FILE__, __LINE__, #EXPR);	\
+	} while (0)
+
+#endif /* defined(LINUX) && defined(__KERNEL__) */
+#endif /* defined(_WIN32) */
+#endif /* defined(__KLOCWORK__) */
+
+#if defined(__KLOCWORK__)
+	#define PVR_DBG_BREAK do { abort(); } while (0)
+#else
+	#if defined(WIN32)
+		#define PVR_DBG_BREAK __debugbreak()   /*!< Implementation of PVR_DBG_BREAK for (non-WinCE) Win32 */
+	#else
+		#if defined(PVR_DBG_BREAK_ASSERT_FAIL)
+		/*!< Implementation of PVR_DBG_BREAK that maps onto PVRSRVDebugAssertFail */
+			#if defined(_WIN32)
+				#define PVR_DBG_BREAK	DBG_BREAK
+			#else
+				#if defined(LINUX) && defined(__KERNEL__)
+					#define PVR_DBG_BREAK BUG()
+				#else
+					#define PVR_DBG_BREAK	PVRSRVDebugAssertFail(__FILE__, __LINE__, "PVR_DBG_BREAK")
+				#endif
+			#endif
+		#else
+			/*!< Null Implementation of PVR_DBG_BREAK (does nothing) */
+			#define PVR_DBG_BREAK
+		#endif
+	#endif
+#endif
+
+
+#else  /* defined(PVRSRV_NEED_PVR_ASSERT) */
+    /* Unfortunately the klocworks static analysis checker doesn't understand our
+     * ASSERT macros. Thus it reports lots of false positive. Defining our Assert
+     * macros in a special way when the code is analysed by klocworks avoids
+     * them. */
+    #if defined(__KLOCWORK__)
+        #define PVR_ASSERT(EXPR) do { if (!(EXPR)) abort(); } while (0)
+    #else
+        #define PVR_ASSERT(EXPR) (void)(EXPR) /*!< Null Implementation of PVR_ASSERT (does nothing) */
+    #endif
+
+    #define PVR_DBG_BREAK    /*!< Null Implementation of PVR_DBG_BREAK (does nothing) */
+
+#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */
+
+
+/* PVR_DPF() handling */
+
+#if defined(PVRSRV_NEED_PVR_DPF) || defined(DOXYGEN)
+
+	/* New logging mechanism */
+	#define PVR_DBG_FATAL     DBGPRIV_FATAL     /*!< Debug level passed to PVRSRVDebugPrintf() for fatal errors. */
+	#define PVR_DBG_ERROR     DBGPRIV_ERROR     /*!< Debug level passed to PVRSRVDebugPrintf() for non-fatal errors. */
+	#define PVR_DBG_WARNING   DBGPRIV_WARNING   /*!< Debug level passed to PVRSRVDebugPrintf() for warnings. */
+	#define PVR_DBG_MESSAGE   DBGPRIV_MESSAGE   /*!< Debug level passed to PVRSRVDebugPrintf() for information only. */
+	#define PVR_DBG_VERBOSE   DBGPRIV_VERBOSE   /*!< Debug level passed to PVRSRVDebugPrintf() for very low-priority debug. */
+	#define PVR_DBG_CALLTRACE DBGPRIV_CALLTRACE /*!< Debug level passed to PVRSRVDebugPrintf() for function tracing purposes. */
+	#define PVR_DBG_ALLOC     DBGPRIV_ALLOC     /*!< Debug level passed to PVRSRVDebugPrintf() for tracking some of drivers memory operations. */
+	#define PVR_DBG_BUFFERED  DBGPRIV_BUFFERED  /*!< Debug level passed to PVRSRVDebugPrintf() when debug should be written to the debug circular buffer. */
+	#define PVR_DBG_DEBUG     DBGPRIV_DEBUG     /*!< Debug level passed to PVRSRVDebugPrintf() for debug messages. */
+
+	/* These levels are always on with PVRSRV_NEED_PVR_DPF */
+	/*! @cond Doxygen_Suppress */
+	#define __PVR_DPF_0x001UL(...) PVRSRVDebugPrintf(DBGPRIV_FATAL, __VA_ARGS__)
+	#define __PVR_DPF_0x002UL(...) PVRSRVDebugPrintf(DBGPRIV_ERROR, __VA_ARGS__)
+	#define __PVR_DPF_0x080UL(...) PVRSRVDebugPrintf(DBGPRIV_BUFFERED, __VA_ARGS__)
+
+	/*
+	  The AdHoc-Debug level is only supported when enabled in the local
+	  build environment and may need to be used in both debug and release
+	  builds. An error is generated in the formal build if it is checked in.
+	*/
+#if defined(PVR_DPF_ADHOC_DEBUG_ON)
+	#define __PVR_DPF_0x100UL(...) PVRSRVDebugPrintf(DBGPRIV_DEBUG, __VA_ARGS__)
+#else
+    /* Use an undefined token here to stop compilation dead in the offending module */
+	#define __PVR_DPF_0x100UL(...) __ERROR__PVR_DBG_DEBUG_is_in_use_but_has_not_been_enabled__Note_Debug_DPF_must_not_be_checked_in__Define_PVR_DPF_ADHOC_DEBUG_ON_for_testing
+#endif
+
+	/* Some are compiled out completely in release builds */
+#if defined(DEBUG) || defined(DOXYGEN)
+	#define __PVR_DPF_0x004UL(...) PVRSRVDebugPrintf(DBGPRIV_WARNING, __VA_ARGS__)
+	#define __PVR_DPF_0x008UL(...) PVRSRVDebugPrintf(DBGPRIV_MESSAGE, __VA_ARGS__)
+	#define __PVR_DPF_0x010UL(...) PVRSRVDebugPrintf(DBGPRIV_VERBOSE, __VA_ARGS__)
+	#define __PVR_DPF_0x020UL(...) PVRSRVDebugPrintf(DBGPRIV_CALLTRACE, __VA_ARGS__)
+	#define __PVR_DPF_0x040UL(...) PVRSRVDebugPrintf(DBGPRIV_ALLOC, __VA_ARGS__)
+#else
+	#define __PVR_DPF_0x004UL(...)
+	#define __PVR_DPF_0x008UL(...)
+	#define __PVR_DPF_0x010UL(...)
+	#define __PVR_DPF_0x020UL(...)
+	#define __PVR_DPF_0x040UL(...)
+	#define __PVR_DPF_0x200UL(...)
+#endif
+
+	/* Translate the different log levels to separate macros
+	 * so they can each be compiled out.
+	 */
+#if defined(DEBUG)
+	#define __PVR_DPF(lvl, ...) __PVR_DPF_ ## lvl (__FILE__, __LINE__, __VA_ARGS__)
+#else
+	#define __PVR_DPF(lvl, ...) __PVR_DPF_ ## lvl ("", 0, __VA_ARGS__)
+#endif
+	/*! @endcond */
+
+	/* Get rid of the double bracketing */
+	#define PVR_DPF(x) __PVR_DPF x
+
+	#define PVR_LOG_ERROR(_rc, _call) \
+		PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__))
+
+	#define PVR_LOG_IF_ERROR(_rc, _call) do \
+		{ if (unlikely(_rc != PVRSRV_OK)) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+		  } \
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOGR_IF_NOMEM(_expr, _call) do \
+		{ if (unlikely(_expr == NULL)) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s() failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", _call, __func__)); \
+			return PVRSRV_ERROR_OUT_OF_MEMORY; } \
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOGG_IF_NOMEM(_expr, _call, _err, _go) do \
+		{ if (unlikely(_expr == NULL)) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s() failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", _call, __func__)); \
+			_err = PVRSRV_ERROR_OUT_OF_MEMORY; \
+			goto _go; } \
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOGR_IF_ERROR(_rc, _call) do \
+		{ if (unlikely(_rc != PVRSRV_OK)) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+			return _rc; } \
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOGRN_IF_ERROR(_rc, _call) do \
+		{ if (unlikely(_rc != PVRSRV_OK)) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+			return; } \
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOGG_IF_ERROR(_rc, _call, _go) do \
+		{ if (unlikely(_rc != PVRSRV_OK)) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+			goto _go; } \
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOG_IF_FALSE(_expr, _msg) do \
+		{ if (unlikely(!(_expr))) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \
+		  } \
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOGR_IF_FALSE(_expr, _msg, _rc) do \
+		{ if (unlikely(!(_expr))) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \
+			return _rc; } \
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOGG_IF_FALSE(_expr, _msg, _go) do \
+		{ if (unlikely(!(_expr))) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \
+			goto _go; } \
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDebugPrintf
+@Description    Output a debug message to the user, using an OS-specific
+                method, to a log or console which can be read by developers
+                Invoked from the macro PVR_DPF().
+@Input          ui32DebugLevel   The debug level of the message. This can
+                                 be used to restrict the output of debug
+                                 messages based on their severity.
+                                 If this is PVR_DBG_BUFFERED, the message
+                                 should be written into a debug circular
+                                 buffer instead of being output immediately
+                                 (useful when performance would otherwise
+                                 be adversely affected).
+                                 The debug circular buffer shall only be
+                                 output when PVRSRVDebugPrintfDumpCCB() is
+                                 called.
+@Input          pszFileName      The source file containing the code that is
+                                 generating the message
+@Input          ui32Line         The line number in the source file
+@Input          pszFormat        The formatted message string
+@Input          ...              Zero or more arguments for use by the
+                                 formatted string
+@Return         None
+*/ /**************************************************************************/
+IMG_EXPORT void IMG_CALLCONV PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel,
+                                               const IMG_CHAR *pszFileName,
+                                               IMG_UINT32 ui32Line,
+                                               const IMG_CHAR *pszFormat,
+                                               ...) __printf(4, 5);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDebugPrintfDumpCCB
+@Description    When PVRSRVDebugPrintf() is called with the ui32DebugLevel
+                specified as DBGPRIV_BUFFERED, the debug shall be written to
+                the debug circular buffer instead of being output immediately.
+                (This could be used to obtain debug without incurring a
+                performance hit by printing it at that moment).
+                This function shall dump the contents of that debug circular
+                buffer to be output in an OS-specific method to a log or
+                console which can be read by developers.
+@Return         None
+*/ /**************************************************************************/
+IMG_EXPORT void IMG_CALLCONV PVRSRVDebugPrintfDumpCCB(void);
+
+#else  /* defined(PVRSRV_NEED_PVR_DPF) */
+
+	#define PVR_DPF(X)  /*!< Null Implementation of PowerVR Debug Printf (does nothing) */
+
+	#define PVR_LOG_ERROR(_rc, _call) (void)(_rc)
+	#define PVR_LOG_IF_ERROR(_rc, _call) (void)(_rc)
+
+	#define PVR_LOGR_IF_NOMEM(_expr, _call) do { if (unlikely(_expr == NULL)) { return PVRSRV_ERROR_OUT_OF_MEMORY; } MSC_SUPPRESS_4127 } while (0)
+	#define PVR_LOGG_IF_NOMEM(_expr, _call, _err, _go) do { if (unlikely(_expr == NULL)) { _err = PVRSRV_ERROR_OUT_OF_MEMORY; goto _go; } MSC_SUPPRESS_4127	} while (0)
+	#define PVR_LOGR_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return (_rc); } MSC_SUPPRESS_4127 } while(0)
+	#define PVR_LOGRN_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return; } MSC_SUPPRESS_4127 } while(0)
+	#define PVR_LOGG_IF_ERROR(_rc, _call, _go) do { if (unlikely(_rc != PVRSRV_OK)) { goto _go; } MSC_SUPPRESS_4127 } while(0)
+
+	#define PVR_LOG_IF_FALSE(_expr, _msg) (void)(_expr)
+	#define PVR_LOGR_IF_FALSE(_expr, _msg, _rc) do { if (unlikely(!(_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while(0)
+	#define PVR_LOGG_IF_FALSE(_expr, _msg, _go) do { if (unlikely(!(_expr))) { goto _go; } MSC_SUPPRESS_4127 } while(0)
+
+	#undef PVR_DPF_FUNCTION_TRACE_ON
+
+#endif /* defined(PVRSRV_NEED_PVR_DPF) */
+
+#define PVR_RETURN_IF_ERROR(_rc) do \
+	{ if (unlikely(_rc != PVRSRV_OK)) { \
+		return _rc; } \
+	MSC_SUPPRESS_4127\
+	} while (0)
+
+#if defined(DEBUG)
+	#define PVR_LOG_WARN(_rc, _call) \
+		PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__))
+
+	#define PVR_LOG_WARN_IF_ERROR(_rc, _call) do \
+		{ if (unlikely(_rc != PVRSRV_OK)) \
+			PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+		MSC_SUPPRESS_4127\
+		} while (0)
+#else
+	#define PVR_LOG_WARN(_rc, _call) (void)(_rc)
+	#define PVR_LOG_WARN_IF_ERROR(_rc, _call) (void)(_rc)
+#endif
+
+/*! @cond Doxygen_Suppress */
+#if defined(PVR_DPF_FUNCTION_TRACE_ON)
+
+	#define PVR_DPF_ENTERED \
+        PVR_DPF((PVR_DBG_CALLTRACE, "|-> %s:%d entered", __func__, __LINE__))
+
+	#define PVR_DPF_ENTERED1(p1) \
+		PVR_DPF((PVR_DBG_CALLTRACE, "|-> %s:%d entered (0x%lx)", __func__, __LINE__, ((unsigned long)p1)))
+
+	#define PVR_DPF_RETURN_RC(a) \
+        do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned %d", __func__, __LINE__, (_r))); return (_r); MSC_SUPPRESS_4127 } while (0)
+
+	#define PVR_DPF_RETURN_RC1(a,p1) \
+		do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned %d (0x%lx)", __func__, __LINE__, (_r), ((unsigned long)p1))); return (_r); MSC_SUPPRESS_4127 } while (0)
+
+	#define PVR_DPF_RETURN_VAL(a) \
+		do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned with value", __func__, __LINE__ )); return (a); MSC_SUPPRESS_4127 } while (0)
+
+	#define PVR_DPF_RETURN_OK \
+		do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned ok", __func__, __LINE__)); return PVRSRV_OK; MSC_SUPPRESS_4127 } while (0)
+
+	#define PVR_DPF_RETURN \
+		do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned", __func__, __LINE__)); return; MSC_SUPPRESS_4127 } while (0)
+
+	#if !defined(DEBUG)
+	#error PVR DPF Function trace enabled in release build, rectify
+	#endif
+
+#else /* defined(PVR_DPF_FUNCTION_TRACE_ON) */
+
+	#define PVR_DPF_ENTERED
+	#define PVR_DPF_ENTERED1(p1)
+	#define PVR_DPF_RETURN_RC(a)     return (a)
+	#define PVR_DPF_RETURN_RC1(a,p1) return (a)
+	#define PVR_DPF_RETURN_VAL(a)    return (a)
+	#define PVR_DPF_RETURN_OK        return PVRSRV_OK
+	#define PVR_DPF_RETURN           return
+
+#endif /* defined(PVR_DPF_FUNCTION_TRACE_ON) */
+/*! @endcond */
+
+#if defined(__KERNEL__) || defined(DOXYGEN) || defined(__QNXNTO__)
+/*Use PVR_DPF() unless message is necessary in release build */
+#ifdef PVR_DISABLE_LOGGING
+#define PVR_LOG(X)
+#else
+#define PVR_LOG(X) PVRSRVReleasePrintf X
+#endif
+
+/*************************************************************************/ /*!
+@Function       PVRSRVReleasePrintf
+@Description    Output an important message, using an OS-specific method,
+                to a log or console which can be read by developers in
+                release builds.
+                Invoked from the macro PVR_LOG().
+@Input          pszFormat   The message format string
+@Input          ...         Zero or more arguments for use by the format string
+@Return         None
+*/ /**************************************************************************/
+void IMG_CALLCONV PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) __printf(1, 2);
+#endif
+
+/* PVR_TRACE() handling */
+
+#if defined(PVRSRV_NEED_PVR_TRACE) || defined(DOXYGEN)
+
+	#define PVR_TRACE(X)	PVRSRVTrace X    /*!< PowerVR Debug Trace Macro */
+	/* Empty string implementation that is -O0 build friendly */
+	#define PVR_TRACE_EMPTY_LINE()	PVR_TRACE(("%s", ""))
+
+/*************************************************************************/ /*!
+@Function       PVRTrace
+@Description    Output a debug message to the user
+                Invoked from the macro PVR_TRACE().
+@Input          pszFormat   The message format string
+@Input          ...         Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+IMG_EXPORT void IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... )
+	__printf(1, 2);
+
+#else /* defined(PVRSRV_NEED_PVR_TRACE) */
+    /*! Null Implementation of PowerVR Debug Trace Macro (does nothing) */
+	#define PVR_TRACE(X)
+
+#endif /* defined(PVRSRV_NEED_PVR_TRACE) */
+
+
+#if defined(PVRSRV_NEED_PVR_ASSERT)
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(TRUNCATE_64BITS_TO_32BITS)
+#endif
+	INLINE static IMG_UINT32 TRUNCATE_64BITS_TO_32BITS(IMG_UINT64 uiInput)
+	{
+		 IMG_UINT32 uiTruncated;
+
+		 uiTruncated = (IMG_UINT32)uiInput;
+		 PVR_ASSERT(uiInput == uiTruncated);
+		 return uiTruncated;
+	}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(TRUNCATE_64BITS_TO_SIZE_T)
+#endif
+	INLINE static size_t TRUNCATE_64BITS_TO_SIZE_T(IMG_UINT64 uiInput)
+	{
+		 size_t uiTruncated;
+
+		 uiTruncated = (size_t)uiInput;
+		 PVR_ASSERT(uiInput == uiTruncated);
+		 return uiTruncated;
+	}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(TRUNCATE_SIZE_T_TO_32BITS)
+#endif
+	INLINE static IMG_UINT32 TRUNCATE_SIZE_T_TO_32BITS(size_t uiInput)
+	{
+		 IMG_UINT32 uiTruncated;
+
+		 uiTruncated = (IMG_UINT32)uiInput;
+		 PVR_ASSERT(uiInput == uiTruncated);
+		 return uiTruncated;
+	}
+
+
+#else /* defined(PVRSRV_NEED_PVR_ASSERT) */
+	#define TRUNCATE_64BITS_TO_32BITS(expr) ((IMG_UINT32)(expr))
+	#define TRUNCATE_64BITS_TO_SIZE_T(expr) ((size_t)(expr))
+	#define TRUNCATE_SIZE_T_TO_32BITS(expr) ((IMG_UINT32)(expr))
+#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */
+
+/*! @cond Doxygen_Suppress */
+/* Macros used to trace calls */
+#if defined(DEBUG)
+	#define PVR_DBG_FILELINE , (__FILE__), (__LINE__)
+	#define PVR_DBG_FILELINE_PARAM , const IMG_CHAR *pszaFile, IMG_UINT32 ui32Line
+	#define PVR_DBG_FILELINE_ARG , pszaFile, ui32Line
+	#define PVR_DBG_FILELINE_FMT " %s:%u"
+	#define PVR_DBG_FILELINE_UNREF() do { PVR_UNREFERENCED_PARAMETER(pszaFile); \
+				PVR_UNREFERENCED_PARAMETER(ui32Line); } while(0)
+#else
+	#define PVR_DBG_FILELINE
+	#define PVR_DBG_FILELINE_PARAM
+	#define PVR_DBG_FILELINE_ARG
+	#define PVR_DBG_FILELINE_FMT
+	#define PVR_DBG_FILELINE_UNREF()
+#endif
+/*! @endcond */
+
+#if defined(__cplusplus)
+}
+#endif
+
+/*!
+    @def PVR_ASSERT
+    @brief Aborts the program if assertion fails.
+
+    The macro will be defined only when PVRSRV_NEED_PVR_ASSERT macro is
+    enabled. It's ignored otherwise.
+
+    @def PVR_DPF
+    @brief PowerVR Debug Printf logging macro used throughout the driver.
+
+    The macro allows to print logging messages to appropriate log. The
+    destination log is based on the component (user space / kernel space) and
+    operating system (Linux, Android, etc.).
+
+    The macro also supports severity levels that allow to turn on/off messages
+    based on their importance.
+
+    This macro will print messages with severity level higher that error only
+    if PVRSRV_NEED_PVR_DPF macro is defined.
+
+    @def PVR_LOG_ERROR
+    @brief Logs error.
+
+    @def PVR_LOG_IF_ERROR
+    @brief Logs error if not PVRSRV_OK.
+
+    @def PVR_LOGR_IF_NOMEM
+    @brief Logs error if expression is NULL and returns PVRSRV_ERROR_OUT_OF_MEMORY.
+
+    @def PVR_LOGG_IF_NOMEM
+    @brief Logs error if expression is NULL and jumps to given label.
+
+    @def PVR_LOGR_IF_ERROR
+    @brief Logs error if not PVRSRV_OK and returns the error.
+
+    @def PVR_LOGRN_IF_ERROR
+    @brief Logs error if not PVRSRV_OK and returns (used in function that return void).
+
+    @def PVR_LOGG_IF_ERROR
+    @brief Logs error if not PVRSRV_OK and jumps to label.
+
+    @def PVR_LOG_IF_FALSE
+    @brief Prints error message if expression is false.
+
+    @def PVR_LOGR_IF_FALSE
+    @brief Prints error message if expression is false and returns given error.
+
+    @def PVR_LOGG_IF_FALSE
+    @brief Prints error message if expression is false and jumps to label.
+
+    @def PVR_RETURN_IF_ERROR
+    @brief Returns passed error code if it's different than PVRSRV_OK;
+
+    @def PVR_LOG_WARN
+    @brief Logs warning.
+
+    @def PVR_LOG_WARN_IF_ERROR
+    @brief Logs warning if not PVRSRV_OK.
+
+    @def PVR_LOG
+    @brief Prints message to a log unconditionally.
+
+    This macro will print messages only if PVRSRV_NEED_PVR_LOG macro is defined.
+
+    @def PVR_TRACE_EMPTY_LINE
+    @brief Prints empty line to a log (PVRSRV_NEED_PVR_LOG must be defined).
+
+    @def TRUNCATE_64BITS_TO_32BITS
+    @brief Truncates 64 bit value to 32 bit value (with possible precision loss).
+
+    @def TRUNCATE_64BITS_TO_SIZE_T
+    @brief Truncates 64 bit value to size_t value (with possible precision loss).
+
+    @def TRUNCATE_SIZE_T_TO_32BITS
+    @brief Truncates size_t value to 32 bit value (with possible precision loss).
+ */
+
+#endif	/* PVR_DEBUG_H */
+
+/******************************************************************************
+ End of file (pvr_debug.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_debugfs.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_debugfs.c
new file mode 100644
index 0000000..16d674c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_debugfs.c
@@ -0,0 +1,844 @@
+/*************************************************************************/ /*!
+@File
+@Title          Functions for creating debugfs directories and entries.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <pvr_debugfs.h>
+#include <hash.h>
+#include "allocmem.h"
+#include "pvr_bridge_k.h"
+
+#define PVR_DEBUGFS_PVR_DPF_LEVEL PVR_DBG_ERROR
+
+#define PVR_DEBUGFS_DIR_NAME PVR_DRM_NAME
+
+/* Maximum number of debugfs files present at a time */
+#define NUM_HASH_ENTRIES 250
+
+/* Lock used when :
+ * 1) adjusting refCounts
+ * 2) deleting entries
+ * 3) inserting, retrieving and removing entries from gHashTable */
+static struct mutex gDebugFSHashAndRefLock;
+
+/* Hash table to store pointers to allocated memories.
+   It is supposed to help avoiding use-after-free cases */
+static HASH_TABLE *gHashTable;
+
+typedef struct _PVR_DEBUGFS_DIR_
+{
+	struct dentry*        psDirEntry;
+	PPVR_DEBUGFS_DIR_DATA psParentDir;
+	IMG_UINT32            ui32DirRefCount;
+} PVR_DEBUGFS_DIR;
+
+typedef struct _PVR_DEBUGFS_FILE_
+{
+	struct dentry*               psFileEntry;
+	PVR_DEBUGFS_DIR*             psParentDir;
+	const struct seq_operations* psReadOps;
+	OS_STATS_PRINT_FUNC*         pfnStatsPrint;
+	PVRSRV_ENTRY_WRITE_FUNC*     pfnWrite;
+	IMG_UINT32                   ui32FileRefCount;
+	void*                        pvData;
+} PVR_DEBUGFS_FILE;
+
+static struct dentry* gpsPVRDebugFSEntryDir;
+
+static IMG_BOOL _RefDebugFSDir(PVR_DEBUGFS_DIR *psDebugFSDir);
+static void     _UnrefAndMaybeDestroyDebugFSDir(PVR_DEBUGFS_DIR **ppsDebugFSDir);
+static IMG_BOOL _RefDebugFSFile(PVR_DEBUGFS_FILE *psDebugFSFile);
+static void     _UnrefAndMaybeDestroyDebugFSFile(PVR_DEBUGFS_FILE **ppsDebugFSFile);
+
+
+static void _StatsSeqPrintf(void *pvFile, const IMG_CHAR *pszFormat, ...)
+{
+	IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+	va_list  ArgList;
+
+	va_start(ArgList, pszFormat);
+	vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList);
+	seq_printf((struct seq_file *)pvFile, "%s", szBuffer);
+	va_end(ArgList);
+}
+
+static int _DebugFSStatisticSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_DEBUGFS_FILE *psDebugFSFile = (PVR_DEBUGFS_FILE *)psSeqFile->private;
+
+	if (psDebugFSFile != NULL)
+	{
+		if (psDebugFSFile->pfnStatsPrint == NULL)
+		{
+			PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called for file '%s', "
+				"which does not have pfnStatsPrint defined, returning -EIO(%d)",
+				__func__, psDebugFSFile->psFileEntry->d_iname, -EIO));
+			return -EIO;
+		}
+
+		psDebugFSFile->pfnStatsPrint((void*)psSeqFile, psDebugFSFile->pvData, _StatsSeqPrintf);
+		return 0;
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL,
+			 "%s: Called when psDebugFSFile is NULL, returning -ENODATA(%d)",
+			 __func__, -ENODATA));
+	}
+
+	return -ENODATA;
+}
+
+/*************************************************************************/ /*!
+ Common internal API
+*/ /**************************************************************************/
+
+#define _DRIVER_THREAD_ENTER() \
+	do { \
+		PVRSRV_ERROR eLocalError = PVRSRVDriverThreadEnter(); \
+		if (eLocalError != PVRSRV_OK) \
+		{ \
+			PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVDriverThreadEnter failed: %s", \
+				__func__, PVRSRVGetErrorString(eLocalError))); \
+			return OSPVRSRVToNativeError(eLocalError); \
+		} \
+	} while (0)
+
+#define _DRIVER_THREAD_EXIT() \
+	PVRSRVDriverThreadExit()
+
+static int _DebugFSFileOpen(struct inode *psINode, struct file *psFile)
+{
+	PVR_DEBUGFS_FILE *psDebugFSFile;
+	int iResult = -EIO;
+
+	_DRIVER_THREAD_ENTER();
+
+	PVR_ASSERT(psINode);
+	psDebugFSFile = (PVR_DEBUGFS_FILE *)psINode->i_private;
+
+	if (psDebugFSFile != NULL)
+	{
+		/* Take ref on stat entry before opening seq file - this ref will
+		 * be dropped if we fail to open the seq file or when we close it
+		 */
+		if (_RefDebugFSFile(psDebugFSFile))
+		{
+			if (psDebugFSFile->psReadOps != NULL)
+			{
+				iResult = seq_open(psFile, psDebugFSFile->psReadOps);
+
+				if (iResult == 0)
+				{
+					struct seq_file *psSeqFile = psFile->private_data;
+
+					psSeqFile->private = psDebugFSFile->pvData;
+				}
+			}
+			else
+			{
+				iResult = single_open(psFile, _DebugFSStatisticSeqShow, psDebugFSFile);
+			}
+
+			if (iResult != 0)
+			{
+				/* Drop ref if we failed to open seq file */
+				_UnrefAndMaybeDestroyDebugFSFile(&psDebugFSFile);
+
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to seq_open psFile, returning %d",
+					 __func__, iResult));
+			}
+		}
+	}
+	else
+	{
+		mutex_unlock(&gDebugFSHashAndRefLock);
+
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL,
+			 "%s: Called when psDebugFSFile is NULL", __func__));
+	}
+
+	_DRIVER_THREAD_EXIT();
+
+	return iResult;
+}
+
+static int _DebugFSFileClose(struct inode *psINode, struct file *psFile)
+{
+	int iResult = -EIO;
+	PVR_DEBUGFS_FILE *psDebugFSFile = (PVR_DEBUGFS_FILE *)psINode->i_private;
+
+	if (psDebugFSFile != NULL)
+	{
+		_DRIVER_THREAD_ENTER();
+
+		if (psDebugFSFile->psReadOps != NULL)
+		{
+			iResult = seq_release(psINode, psFile);
+		}
+		else
+		{
+			iResult = single_release(psINode, psFile);
+		}
+
+		if (iResult != 0)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release psFile, returning %d",
+			__func__, iResult));
+		}
+
+		_UnrefAndMaybeDestroyDebugFSFile(&psDebugFSFile);
+
+		_DRIVER_THREAD_EXIT();
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL,
+			 "%s: Called when psDebugFSFile is NULL", __func__));
+	}
+
+	return iResult;
+}
+
+static ssize_t _DebugFSFileRead(struct file *psFile,
+				char __user *pszBuffer,
+				size_t uiCount,
+				loff_t *puiPosition)
+{
+	ssize_t iResult;
+
+	_DRIVER_THREAD_ENTER();
+
+	iResult = seq_read(psFile, pszBuffer, uiCount, puiPosition);
+
+	if (iResult < 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to read psFile, returning %zd",
+		__func__, iResult));
+	}
+
+	_DRIVER_THREAD_EXIT();
+
+	return iResult;
+}
+
+static loff_t _DebugFSFileLSeek(struct file *psFile,
+				loff_t iOffset,
+				int iOrigin)
+{
+	loff_t iResult;
+
+	_DRIVER_THREAD_ENTER();
+
+	iResult = seq_lseek(psFile, iOffset, iOrigin);
+
+	if (iResult < 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reposition offset of psFile, returning %lld",
+		__func__, iResult));
+	}
+
+	_DRIVER_THREAD_EXIT();
+
+	return iResult;
+}
+
+static ssize_t _DebugFSFileWrite(struct file *psFile,
+				 const char __user *pszBuffer,
+				 size_t uiCount,
+				 loff_t *puiPosition)
+{
+	struct inode *psINode = psFile->f_path.dentry->d_inode;
+	PVR_DEBUGFS_FILE *psDebugFSFile = (PVR_DEBUGFS_FILE *)psINode->i_private;
+	ssize_t iResult = -EIO;
+
+	if (psDebugFSFile != NULL)
+	{
+		if (psDebugFSFile->pfnWrite == NULL)
+		{
+			PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called for file '%s', "
+				"which does not have pfnWrite defined, returning -EIO(%d)",
+				__func__, psFile->f_path.dentry->d_iname, -EIO));
+			goto exit;
+		}
+
+		_DRIVER_THREAD_ENTER();
+
+		iResult = psDebugFSFile->pfnWrite(pszBuffer, uiCount, puiPosition, psDebugFSFile->pvData);
+
+		if (iResult < 0)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to write to psFile, returning %zd",
+			__func__, iResult));
+		}
+
+		_DRIVER_THREAD_EXIT();
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL,
+			 "%s: Called when psDebugFSFile is NULL", __func__));
+	}
+
+exit:
+	return iResult;
+}
+
+static const struct file_operations gsPVRDebugFSFileOps =
+{
+	.owner = THIS_MODULE,
+	.open = _DebugFSFileOpen,
+	.read = _DebugFSFileRead,
+	.write = _DebugFSFileWrite,
+	.llseek = _DebugFSFileLSeek,
+	.release = _DebugFSFileClose,
+};
+
+/*****************************************************************************************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       PVRDebugFSInit
+@Description    Initialise PVR debugfs support. This should be called before
+                using any PVRDebugFS functions.
+@Return         int      On success, returns 0. Otherwise, returns an
+                         error code.
+*/ /**************************************************************************/
+int PVRDebugFSInit(void)
+{
+	PVR_ASSERT(gpsPVRDebugFSEntryDir == NULL);
+
+	mutex_init(&gDebugFSHashAndRefLock);
+
+	gHashTable = HASH_Create(NUM_HASH_ENTRIES);
+	if (gHashTable == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Cannot create Hash Table", __func__));
+		return -ENOMEM;
+	}
+
+	gpsPVRDebugFSEntryDir = debugfs_create_dir(PVR_DEBUGFS_DIR_NAME, NULL);
+	if (gpsPVRDebugFSEntryDir == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Cannot create '%s' debugfs root directory",
+			 __func__, PVR_DEBUGFS_DIR_NAME));
+
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       PVRDebugFSDeInit
+@Description    Deinitialise PVR debugfs support. This should be called only
+                if PVRDebugFSInit() has already been called. All debugfs
+                directories and entries should be removed otherwise this
+                function will fail.
+@Return         void
+*/ /**************************************************************************/
+void PVRDebugFSDeInit(void)
+{
+	if (gpsPVRDebugFSEntryDir != NULL)
+	{
+		debugfs_remove(gpsPVRDebugFSEntryDir);
+		gpsPVRDebugFSEntryDir = NULL;
+
+		HASH_Delete(gHashTable);
+		gHashTable = NULL;
+
+		mutex_destroy(&gDebugFSHashAndRefLock);
+	}
+}
+
+/*****************************************************************************************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       PVRDebugFSCreateEntryDir
+@Description    Create a directory for debugfs entries that will be located
+                under the root directory, as created by
+                PVRDebugFSCreateEntries().
+@Input          pszDirName       String containing the name for the directory.
+@Input          psParentDir      The parent directory in which to create the new
+                                 directory. This should either be NULL, meaning it
+                                 should be created in the root directory, or a
+                                 pointer to a directory as returned by this
+                                 function.
+@Output	        ppsNewDir        On success, points to the newly created
+                                 directory.
+@Return         int              On success, returns 0. Otherwise, returns an
+                                 error code.
+*/ /**************************************************************************/
+int PVRDebugFSCreateEntryDir(const IMG_CHAR *pszDirName,
+		    PVR_DEBUGFS_DIR *psParentDir,
+		    PVR_DEBUGFS_DIR **ppsNewDir)
+{
+	PVR_DEBUGFS_DIR *psNewDir;
+	struct dentry *psDirEntry;
+
+	PVR_ASSERT(gpsPVRDebugFSEntryDir != NULL);
+	if (pszDirName == NULL || ppsNewDir == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid parameter", __func__));
+		return -EINVAL;
+	}
+
+	psNewDir = OSAllocMemNoStats(sizeof(*psNewDir));
+	if (psNewDir == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Cannot allocate memory for '%s' pvr_debugfs structure",
+			 __func__, pszDirName));
+		return -ENOMEM;
+	}
+
+	psNewDir->psParentDir = psParentDir;
+	psDirEntry = debugfs_create_dir(pszDirName, (psNewDir->psParentDir) ?
+		     psNewDir->psParentDir->psDirEntry : gpsPVRDebugFSEntryDir);
+
+	if (IS_ERR_OR_NULL(psDirEntry))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Cannot create '%s' debugfs directory",
+			 __func__, pszDirName));
+
+		OSFreeMemNoStats(psNewDir);
+		return (NULL == psDirEntry) ? -ENOMEM : -ENODEV;
+	}
+
+	psNewDir->psDirEntry = psDirEntry;
+	*ppsNewDir = psNewDir;
+	psNewDir->ui32DirRefCount = 1;
+
+	/* if parent directory is not gpsPVRDebugFSEntryDir, increment its refCount */
+	if (psNewDir->psParentDir != NULL)
+	{
+		/* if we fail to acquire the reference that probably means that
+		 * parent dir was already freed - we have to cleanup in this situation */
+		if (!_RefDebugFSDir(psNewDir->psParentDir))
+		{
+			_UnrefAndMaybeDestroyDebugFSDir(ppsNewDir);
+			return -EFAULT;
+		}
+	}
+
+	mutex_lock(&gDebugFSHashAndRefLock);
+	if (!HASH_Insert(gHashTable, (uintptr_t)psNewDir, (uintptr_t)psNewDir))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Failed to add Hash entry for '%s' debugfs directory",
+			 __func__, pszDirName));
+
+		mutex_unlock(&gDebugFSHashAndRefLock);
+		_UnrefAndMaybeDestroyDebugFSDir(ppsNewDir);
+		return -ENOMEM;
+	}
+	mutex_unlock(&gDebugFSHashAndRefLock);
+
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       PVRDebugFSRemoveEntryDir
+@Description    Remove a directory that was created by
+                PVRDebugFSCreateEntryDir(). Any directories or files created
+                under the directory being removed should be removed first.
+@Input          ppsDir       Pointer representing the directory to be removed.
+                             Has to be double pointer to avoid possible races
+                             and use-after-free situations.
+@Return         void
+*/ /**************************************************************************/
+void PVRDebugFSRemoveEntryDir(PVR_DEBUGFS_DIR **ppsDir)
+{
+	_UnrefAndMaybeDestroyDebugFSDir(ppsDir);
+}
+
+static IMG_BOOL _RefDebugFSDir(PVR_DEBUGFS_DIR *psDebugFSDir)
+{
+	IMG_BOOL bStatus = IMG_FALSE;
+	uintptr_t uiHashVal;
+
+	PVR_ASSERT(psDebugFSDir != NULL && psDebugFSDir->psDirEntry != NULL);
+
+	mutex_lock(&gDebugFSHashAndRefLock);
+
+	uiHashVal = HASH_Retrieve(gHashTable, (uintptr_t)psDebugFSDir);
+	if (uiHashVal == 0)
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL,
+			 "%s: Directory (%p) is already deleted, abort read",
+			 __func__, psDebugFSDir));
+		goto exit;
+	}
+	PVR_ASSERT(uiHashVal == (uintptr_t)psDebugFSDir);
+
+	if (psDebugFSDir->ui32DirRefCount > 0)
+	{
+		/* Increment refCount */
+		psDebugFSDir->ui32DirRefCount++;
+		bStatus = IMG_TRUE;
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called ref on psDebugFSDir '%s'"
+			" when ui32RefCount is zero", __func__,
+			psDebugFSDir->psDirEntry->d_iname));
+	}
+
+exit:
+	mutex_unlock(&gDebugFSHashAndRefLock);
+
+	return bStatus;
+}
+
+/* decrements refCount on a directory and removes it if the count reaches
+ * 0, this function also walks recursively over parent directories and
+ * decrements refCount on them too
+ * note: it's safe to call this function with *ppsDebugFSDir pointing to NULL */
+static void _UnrefAndMaybeDestroyDebugFSDir(PVR_DEBUGFS_DIR **ppsDebugFSDir)
+{
+	PVR_DEBUGFS_DIR *psDebugFSDir, *psParentDir = NULL;
+	struct dentry *psDir = NULL;
+
+	PVR_ASSERT(ppsDebugFSDir != NULL);
+
+	psDebugFSDir = *ppsDebugFSDir;
+
+	/* it's ok to call this function with NULL pointer */
+	if (psDebugFSDir == NULL)
+	{
+		return;
+	}
+
+	mutex_lock(&gDebugFSHashAndRefLock);
+
+	PVR_ASSERT(psDebugFSDir->psDirEntry != NULL);
+
+	if (psDebugFSDir->ui32DirRefCount > 0)
+	{
+		/* Decrement refCount and free if now zero */
+		if (--psDebugFSDir->ui32DirRefCount == 0)
+		{
+			uintptr_t uiHashVal;
+
+			uiHashVal = HASH_Remove(gHashTable, (uintptr_t)psDebugFSDir);
+			if (uiHashVal == 0)
+			{
+				PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL,
+					 "%s: Entry for Dir '%s'' not found in Hash table",
+					 __func__, psDebugFSDir->psDirEntry->d_iname));
+			}
+			else
+			{
+				PVR_ASSERT(uiHashVal == (uintptr_t)psDebugFSDir);
+			}
+
+			psDir = psDebugFSDir->psDirEntry;
+			psParentDir = psDebugFSDir->psParentDir;
+
+			psDebugFSDir->psDirEntry = NULL;
+			psDebugFSDir->psParentDir = NULL;
+
+			*ppsDebugFSDir = NULL;
+
+			OSFreeMemNoStats(psDebugFSDir);
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to unref psDebugFSDir '%s'"
+			" when ui32RefCount is zero", __func__,
+			psDebugFSDir->psDirEntry->d_iname));
+	}
+
+	/* unlock here so we don't have any relation with the locks that might
+	 * be taken in debugfs_remove() */
+	mutex_unlock(&gDebugFSHashAndRefLock);
+
+	if (psDir != NULL)
+	{
+		debugfs_remove(psDir);
+	}
+
+	/* decrement refcount of parent directory */
+	if (psParentDir != NULL)
+	{
+		_UnrefAndMaybeDestroyDebugFSDir(&psParentDir);
+	}
+}
+
+/*****************************************************************************************************************************************************/
+
+/*************************************************************************/ /*!
+@Function               PVRDebugFSCreateFile
+@Description            Create an entry in the specified directory.
+@Input                  pszName        String containing the name for the entry.
+@Input                  psParentDir    Pointer from PVRDebugFSCreateEntryDir()
+                                       representing the directory in which to create
+                                       the entry or NULL for the root directory.
+@Input                  psReadOps      Pointer to structure containing the necessary
+                                       functions to read from the entry.
+@Input                  pfnWrite       Callback function used to write to the entry.
+                                       This function must update the offset pointer
+                                       before it returns.
+@Input                  pfnStatsPrint  A callback function used to print all the
+                                       statistics when reading from the statistic
+                                       entry.
+@Input                  pvData         Private data to be passed to the read
+                                       functions, in the seq_file private member, and
+                                       the write function callback.
+@Output                 ppsNewFile     On success, points to the newly created entry.
+@Return                 int            On success, returns 0. Otherwise, returns an
+                                       error code.
+*/ /**************************************************************************/
+int PVRDebugFSCreateFile(const char *pszName,
+			  PVR_DEBUGFS_DIR *psParentDir,
+			  const struct seq_operations *psReadOps,
+			  PVRSRV_ENTRY_WRITE_FUNC *pfnWrite,
+			  OS_STATS_PRINT_FUNC *pfnStatsPrint,
+			  void *pvData,
+			  PVR_DEBUGFS_FILE **ppsNewFile)
+{
+	PVR_DEBUGFS_FILE *psDebugFSFile;
+	struct dentry *psEntry;
+	umode_t uiMode;
+
+	PVR_ASSERT(gpsPVRDebugFSEntryDir != NULL);
+
+	psDebugFSFile = OSAllocMemNoStats(sizeof(*psDebugFSFile));
+	if (psDebugFSFile == NULL)
+	{
+		return -ENOMEM;
+	}
+
+	psDebugFSFile->psReadOps = psReadOps;
+	psDebugFSFile->pfnWrite = pfnWrite;
+	psDebugFSFile->pvData = pvData;
+	psDebugFSFile->pfnStatsPrint = pfnStatsPrint;
+
+	uiMode = S_IFREG;
+
+	if (psReadOps != NULL || pfnStatsPrint != NULL)
+	{
+		uiMode |= S_IRUGO;
+	}
+
+	if (pfnWrite != NULL)
+	{
+		uiMode |= S_IWUSR;
+	}
+
+	psDebugFSFile->psParentDir = psParentDir;
+	psDebugFSFile->ui32FileRefCount = 1;
+
+	psEntry = debugfs_create_file(pszName,
+				      uiMode,
+				      (psParentDir != NULL) ? psParentDir->psDirEntry : gpsPVRDebugFSEntryDir,
+				      psDebugFSFile,
+				      &gsPVRDebugFSFileOps);
+	if (IS_ERR_OR_NULL(psEntry))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Cannot create debugfs '%s' file",
+			 __func__, pszName));
+
+		OSFreeMemNoStats(psDebugFSFile);
+		return (NULL == psEntry) ? -ENOMEM : -ENODEV;
+	}
+
+	psDebugFSFile->psFileEntry = psEntry;
+	if (ppsNewFile != NULL)
+	{
+		*ppsNewFile = (void*)psDebugFSFile;
+	}
+
+	if (psDebugFSFile->psParentDir != NULL)
+	{
+		/* increment refCount of parent directory */
+		if (!_RefDebugFSDir(psDebugFSFile->psParentDir))
+		{
+			_UnrefAndMaybeDestroyDebugFSFile(ppsNewFile);
+			return -EFAULT;
+		}
+	}
+
+	mutex_lock(&gDebugFSHashAndRefLock);
+	if (!HASH_Insert(gHashTable, (uintptr_t)psDebugFSFile, (uintptr_t)psDebugFSFile))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Failed to add Hash entry for '%s' debugfs file",
+			 __func__, pszName));
+
+		mutex_unlock(&gDebugFSHashAndRefLock);
+		_UnrefAndMaybeDestroyDebugFSFile(ppsNewFile);
+		return -ENOMEM;
+	}
+	mutex_unlock(&gDebugFSHashAndRefLock);
+
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       PVRDebugFSRemoveFile
+@Description    Removes an entry that was created by PVRDebugFSCreateFile().
+@Input          ppsDebugFSFile   Pointer representing the entry to be removed.
+                Has to be double pointer to avoid possible races
+                and use-after-free situations.
+@Return         void
+*/ /**************************************************************************/
+void PVRDebugFSRemoveFile(PVR_DEBUGFS_FILE **ppsDebugFSFile)
+{
+	_UnrefAndMaybeDestroyDebugFSFile(ppsDebugFSFile);
+}
+
+
+static IMG_BOOL _RefDebugFSFile(PVR_DEBUGFS_FILE *psDebugFSFile)
+{
+	IMG_BOOL bResult = IMG_FALSE;
+	uintptr_t uiHashVal;
+
+	mutex_lock(&gDebugFSHashAndRefLock);
+
+	PVR_ASSERT(psDebugFSFile != NULL);
+
+	uiHashVal = HASH_Retrieve(gHashTable, (uintptr_t)psDebugFSFile);
+	if (uiHashVal == 0)
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: File (%p) is already deleted",
+			 __func__, psDebugFSFile));
+
+		goto exit;
+	}
+	PVR_ASSERT(uiHashVal == (uintptr_t)psDebugFSFile);
+
+	if (psDebugFSFile->ui32FileRefCount > 0)
+	{
+		/* Increment refCount of psDebugFSFile */
+		psDebugFSFile->ui32FileRefCount++;
+		bResult = IMG_TRUE;
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called ref on psDebugFSFile '%s'"
+			" when ui32FileRefCount is zero", __func__,
+			psDebugFSFile->psFileEntry->d_iname));
+	}
+
+exit:
+	mutex_unlock(&gDebugFSHashAndRefLock);
+
+	return bResult;
+}
+
+static void _UnrefAndMaybeDestroyDebugFSFile(PVR_DEBUGFS_FILE **ppsDebugFSFile)
+{
+	PVR_DEBUGFS_FILE *psDebugFSFile;
+	PVR_DEBUGFS_DIR *psParentDir = NULL;
+	struct dentry *psEntry = NULL;
+
+	mutex_lock(&gDebugFSHashAndRefLock);
+
+	/* Decrement refCount of psDebugFSFile, and free if now zero */
+	psDebugFSFile = *ppsDebugFSFile;
+	PVR_ASSERT(psDebugFSFile != NULL);
+
+	if (psDebugFSFile->ui32FileRefCount > 0)
+	{
+		if (--psDebugFSFile->ui32FileRefCount == 0)
+		{
+			uintptr_t uiHashVal;
+
+			uiHashVal = HASH_Remove(gHashTable, (uintptr_t)psDebugFSFile);
+			if (uiHashVal == 0)
+			{
+				PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL,
+					 "%s: Entry for File '%s'' not found in Hash table",
+					 __func__, psDebugFSFile->psFileEntry->d_iname));
+			}
+			else
+			{
+				PVR_ASSERT(uiHashVal == (uintptr_t)psDebugFSFile);
+			}
+
+			psEntry = psDebugFSFile->psFileEntry;
+			psParentDir = psDebugFSFile->psParentDir;
+
+			if (psEntry != NULL)
+			{
+				/* set to NULL so nothing can reference this pointer, we have
+				 * a copy that will be used to free the memory */
+				*ppsDebugFSFile = NULL;
+
+				psEntry->d_inode->i_private = NULL;
+			}
+
+			/* now free the memory allocated for psDebugFSFile */
+			OSFreeMemNoStats(psDebugFSFile);
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to unref psDebugFSFile"
+			" '%s' when ui32RefCount is zero", __func__,
+			psDebugFSFile->psFileEntry->d_iname));
+	}
+
+	/* unlock here so we don't have any relation with the locks that might
+	 * be taken in debugfs_remove() */
+	mutex_unlock(&gDebugFSHashAndRefLock);
+
+	if (psEntry != NULL)
+	{
+		/* we should be able to do it outside of the lock now since
+		 * even if something opens the file the private data is already
+		 * NULL*/
+		debugfs_remove(psEntry);
+	}
+
+	if (psParentDir != NULL)
+	{
+		/* decrement refcount of parent directory */
+		_UnrefAndMaybeDestroyDebugFSDir(&psParentDir);
+	}
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_debugfs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_debugfs.h
new file mode 100644
index 0000000..1b10250
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_debugfs.h
@@ -0,0 +1,84 @@
+/*************************************************************************/ /*!
+@File
+@Title          Functions for creating debugfs directories and entries.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_DEBUGFS_H
+#define PVR_DEBUGFS_H
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "osfunc.h"
+
+typedef ssize_t (PVRSRV_ENTRY_WRITE_FUNC)(const char __user *pszBuffer,
+					  size_t uiCount,
+					  loff_t *puiPosition,
+					  void *pvData);
+
+typedef IMG_UINT32 (PVRSRV_INC_PVDATA_REFCNT_FN)(void *pvData);
+typedef IMG_UINT32 (PVRSRV_DEC_PVDATA_REFCNT_FN)(void *pvData);
+
+typedef struct _PVR_DEBUGFS_DIR_ *PPVR_DEBUGFS_DIR_DATA;
+typedef struct _PVR_DEBUGFS_FILE_ *PPVR_DEBUGFS_ENTRY_DATA;
+
+int PVRDebugFSInit(void);
+void PVRDebugFSDeInit(void);
+
+int PVRDebugFSCreateEntryDir(const char *pszName,
+			PPVR_DEBUGFS_DIR_DATA psParentDir,
+			PPVR_DEBUGFS_DIR_DATA *ppsNewDir);
+
+void PVRDebugFSRemoveEntryDir(PPVR_DEBUGFS_DIR_DATA *ppsDir);
+
+int PVRDebugFSCreateFile(const char *pszName,
+			 PPVR_DEBUGFS_DIR_DATA psParentDir,
+			 const struct seq_operations *psReadOps,
+			 PVRSRV_ENTRY_WRITE_FUNC *pfnWrite,
+			 OS_STATS_PRINT_FUNC *pfnStatsPrint,
+			 void *pvData,
+			 PPVR_DEBUGFS_ENTRY_DATA *ppsNewFile);
+
+
+void PVRDebugFSRemoveFile(PPVR_DEBUGFS_ENTRY_DATA *ppsDebugFSEntry);
+
+#endif /* PVR_DEBUGFS_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_drm.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_drm.c
new file mode 100644
index 0000000..1991c33
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_drm.c
@@ -0,0 +1,297 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR DRM driver
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <drm/drm.h>
+#include <drm/drmP.h> /* include before drm_crtc.h for kernels older than 3.9 */
+#include <drm/drm_crtc.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/version.h>
+
+#include "module_common.h"
+#include "pvr_drm.h"
+#include "pvr_drv.h"
+#include "pvrversion.h"
+#include "services_kernel_client.h"
+
+#include "kernel_compatibility.h"
+
+#define PVR_DRM_DRIVER_NAME PVR_DRM_NAME
+#define PVR_DRM_DRIVER_DESC "Imagination Technologies PVR DRM"
+#define	PVR_DRM_DRIVER_DATE "20170530"
+
+
+static int pvr_pm_suspend(struct device *dev)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct pvr_drm_private *priv = ddev->dev_private;
+
+	DRM_DEBUG_DRIVER("device %p\n", dev);
+
+	return PVRSRVCommonDeviceSuspend(priv->dev_node);
+}
+
+static int pvr_pm_resume(struct device *dev)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct pvr_drm_private *priv = ddev->dev_private;
+
+	DRM_DEBUG_DRIVER("device %p\n", dev);
+
+	return PVRSRVCommonDeviceResume(priv->dev_node);
+}
+
+const struct dev_pm_ops pvr_pm_ops = {
+	.suspend = pvr_pm_suspend,
+	.resume = pvr_pm_resume,
+};
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
+static
+#endif
+int pvr_drm_load(struct drm_device *ddev, unsigned long flags)
+{
+	struct pvr_drm_private *priv;
+	enum PVRSRV_ERROR srv_err;
+	int err, deviceId;
+
+	DRM_DEBUG_DRIVER("device %p\n", ddev->dev);
+
+	dev_set_drvdata(ddev->dev, ddev);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
+	/*
+	 * Older kernels do not have render drm_minor member in drm_device,
+	 * so we fallback to primary node for device identification
+	 */
+	deviceId = ddev->primary->index;
+#else
+	if (ddev->render)
+		deviceId = ddev->render->index;
+	else /* when render node is NULL, fallback to primary node */
+		deviceId = ddev->primary->index;
+#endif
+
+	priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		err = -ENOMEM;
+		goto err_exit;
+	}
+	ddev->dev_private = priv;
+
+#if defined(SUPPORT_BUFFER_SYNC) || defined(SUPPORT_NATIVE_FENCE_SYNC)
+	priv->fence_status_wq = create_freezable_workqueue("pvr_fce_status");
+	if (!priv->fence_status_wq) {
+		DRM_ERROR("failed to create fence status workqueue\n");
+		err = -ENOMEM;
+		goto err_free_priv;
+	}
+#endif
+
+	srv_err = PVRSRVDeviceCreate(ddev->dev, deviceId, &priv->dev_node);
+	if (srv_err != PVRSRV_OK) {
+		DRM_ERROR("failed to create device node for device %p (%s)\n",
+			  ddev->dev, PVRSRVGetErrorString(srv_err));
+		if (srv_err == PVRSRV_ERROR_PROBE_DEFER)
+			err = -EPROBE_DEFER;
+		else
+			err = -ENODEV;
+		goto err_workqueue_destroy;
+	}
+
+	err = PVRSRVCommonDeviceInit(priv->dev_node);
+	if (err) {
+		DRM_ERROR("device %p initialisation failed (err=%d)\n",
+			  ddev->dev, err);
+		goto err_device_destroy;
+	}
+
+	drm_mode_config_init(ddev);
+
+	return 0;
+
+err_device_destroy:
+	PVRSRVDeviceDestroy(priv->dev_node);
+err_workqueue_destroy:
+#if defined(SUPPORT_BUFFER_SYNC) || defined(SUPPORT_NATIVE_FENCE_SYNC)
+	destroy_workqueue(priv->fence_status_wq);
+err_free_priv:
+#endif
+	kfree(priv);
+err_exit:
+	return err;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
+static
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+int pvr_drm_unload(struct drm_device *ddev)
+#else
+void pvr_drm_unload(struct drm_device *ddev)
+#endif
+{
+	struct pvr_drm_private *priv = ddev->dev_private;
+
+	DRM_DEBUG_DRIVER("device %p\n", ddev->dev);
+
+	drm_mode_config_cleanup(ddev);
+
+	PVRSRVCommonDeviceDeinit(priv->dev_node);
+
+	PVRSRVDeviceDestroy(priv->dev_node);
+
+#if defined(SUPPORT_BUFFER_SYNC) || defined(SUPPORT_NATIVE_FENCE_SYNC)
+	destroy_workqueue(priv->fence_status_wq);
+#endif
+
+	kfree(priv);
+	ddev->dev_private = NULL;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+	return 0;
+#endif
+}
+
+static int pvr_drm_open(struct drm_device *ddev, struct drm_file *dfile)
+{
+	struct pvr_drm_private *priv = ddev->dev_private;
+	int err;
+
+	if (!try_module_get(THIS_MODULE)) {
+		DRM_ERROR("failed to get module reference\n");
+		return -ENOENT;
+	}
+
+	err = PVRSRVCommonDeviceOpen(priv->dev_node, dfile);
+	if (err)
+		module_put(THIS_MODULE);
+
+	return err;
+}
+
+static void pvr_drm_release(struct drm_device *ddev, struct drm_file *dfile)
+{
+	struct pvr_drm_private *priv = ddev->dev_private;
+
+	PVRSRVCommonDeviceRelease(priv->dev_node, dfile);
+
+	module_put(THIS_MODULE);
+}
+
+/*
+ * The DRM global lock is taken for ioctls unless the DRM_UNLOCKED flag is set.
+ * If you revise one of the driver specific ioctls, or add a new one, that has
+ * DRM_UNLOCKED set then consider whether the gPVRSRVLock mutex needs to be
+ * taken.
+ */
+static struct drm_ioctl_desc pvr_drm_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(PVR_SRVKM_CMD, PVRSRV_BridgeDispatchKM, DRM_RENDER_ALLOW | DRM_UNLOCKED)
+};
+
+#if defined(CONFIG_COMPAT)
+static long pvr_compat_ioctl(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+
+	if (nr < DRM_COMMAND_BASE)
+		return drm_compat_ioctl(file, cmd, arg);
+
+	return drm_ioctl(file, cmd, arg);
+}
+#endif /* defined(CONFIG_COMPAT) */
+
+static const struct file_operations pvr_drm_fops = {
+	.owner			= THIS_MODULE,
+	.open			= drm_open,
+	.release		= drm_release,
+	/*
+	 * FIXME:
+	 * Wrap this in a function that checks enough data has been
+	 * supplied with the ioctl (e.g. _IOCDIR(nr) != _IOC_NONE &&
+	 * _IOC_SIZE(nr) == size).
+	 */
+	.unlocked_ioctl		= drm_ioctl,
+#if defined(CONFIG_COMPAT)
+	.compat_ioctl		= pvr_compat_ioctl,
+#endif
+	.mmap			= PVRSRV_MMap,
+	.poll			= drm_poll,
+	.read			= drm_read,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
+	.fasync			= drm_fasync,
+#endif
+};
+
+const struct drm_driver pvr_drm_generic_driver = {
+	.driver_features	= DRIVER_MODESET | DRIVER_RENDER,
+
+	.dev_priv_size		= 0,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+	.load			= NULL,
+	.unload			= NULL,
+#else
+	.load			= pvr_drm_load,
+	.unload			= pvr_drm_unload,
+#endif
+	.open			= pvr_drm_open,
+	.postclose		= pvr_drm_release,
+
+	.ioctls			= pvr_drm_ioctls,
+	.num_ioctls		= ARRAY_SIZE(pvr_drm_ioctls),
+	.fops			= &pvr_drm_fops,
+
+	.name			= PVR_DRM_DRIVER_NAME,
+	.desc			= PVR_DRM_DRIVER_DESC,
+	.date			= PVR_DRM_DRIVER_DATE,
+	.major			= PVRVERSION_MAJ,
+	.minor			= PVRVERSION_MIN,
+	.patchlevel		= PVRVERSION_BUILD,
+};
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_drm.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_drm.h
new file mode 100644
index 0000000..3383e23
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_drm.h
@@ -0,0 +1,85 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          PVR DRM definitions shared between kernel and user space.
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_DRM_H__)
+#define __PVR_DRM_H__
+
+#include "pvr_drm_core.h"
+
+/*
+ * IMPORTANT:
+ * All structures below are designed to be the same size when compiled for 32
+ * and/or 64 bit architectures, i.e. there should be no compiler inserted
+ * padding. This is achieved by sticking to the following rules:
+ * 1) only use fixed width types
+ * 2) always naturally align fields by arranging them appropriately and by using
+ *    padding fields when necessary
+ *
+ * These rules should _always_ be followed when modifying or adding new
+ * structures to this file.
+ */
+
+struct drm_pvr_srvkm_cmd {
+	__u32 bridge_id;
+	__u32 bridge_func_id;
+	__u64 in_data_ptr;
+	__u64 out_data_ptr;
+	__u32 in_data_size;
+	__u32 out_data_size;
+};
+
+/*
+ * DRM command numbers, relative to DRM_COMMAND_BASE.
+ * These defines must be prefixed with "DRM_".
+ */
+#define DRM_PVR_SRVKM_CMD		0 /* Used for PVR Services ioctls */
+
+
+/* These defines must be prefixed with "DRM_IOCTL_". */
+#define	DRM_IOCTL_PVR_SRVKM_CMD	\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_SRVKM_CMD, \
+		 struct drm_pvr_srvkm_cmd)
+
+#endif /* defined(__PVR_DRM_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_drm_core.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_drm_core.h
new file mode 100644
index 0000000..b53b4c3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_drm_core.h
@@ -0,0 +1,78 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*
+ * @File
+ * @Title          Linux DRM definitions shared between kernel and user space.
+ * @Codingstyle    LinuxKernel
+ * @Copyright      1999 Precision Insight, Inc., Cedar Park, Texas.
+ *                 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ *                 All rights reserved.
+ * @Description    This header contains a subset of the Linux kernel DRM uapi
+ *                 and is designed to be used in kernel and user mode. When
+ *                 included from kernel mode, it pulls in the full version of
+ *                 drm.h. Whereas, when included from user mode, it defines a
+ *                 minimal version of drm.h (as found in libdrm). As such, the
+ *                 structures and ioctl commands must exactly match those found
+ *                 in the Linux kernel/libdrm.
+ * @License        MIT
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#if !defined(__PVR_DRM_CORE_H__)
+#define __PVR_DRM_CORE_H__
+
+#if defined(__KERNEL__)
+#include <drm/drm.h>
+#else
+#include <asm/ioctl.h>
+#include <linux/types.h>
+
+#define DRM_IOCTL_BASE			'd'
+#define DRM_COMMAND_BASE                0x40
+
+#define DRM_IOWR(nr, type)		_IOWR(DRM_IOCTL_BASE, nr, type)
+
+struct drm_version {
+	int version_major;
+	int version_minor;
+	int version_patchlevel;
+	__kernel_size_t name_len;
+	char *name;
+	__kernel_size_t date_len;
+	char *date;
+	__kernel_size_t desc_len;
+	char *desc;
+};
+
+struct drm_set_version {
+	int drm_di_major;
+	int drm_di_minor;
+	int drm_dd_major;
+	int drm_dd_minor;
+};
+
+#define DRM_IOCTL_VERSION		DRM_IOWR(0x00, struct drm_version)
+#define DRM_IOCTL_SET_VERSION		DRM_IOWR(0x07, struct drm_set_version)
+#endif
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_drv.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_drv.h
new file mode 100644
index 0000000..835c7f1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_drv.h
@@ -0,0 +1,83 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR DRM driver
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_DRV_H__)
+#define __PVR_DRV_H__
+
+#include <linux/version.h>
+#include <drm/drmP.h>
+#include <linux/pm.h>
+
+struct file;
+struct _PVRSRV_DEVICE_NODE_;
+struct workqueue_struct;
+struct vm_area_struct;
+
+/* This structure is used to store Linux specific per-device information. */
+struct pvr_drm_private {
+	struct _PVRSRV_DEVICE_NODE_ *dev_node;
+
+#if defined(SUPPORT_BUFFER_SYNC) || defined(SUPPORT_NATIVE_FENCE_SYNC)
+	struct workqueue_struct *fence_status_wq;
+#endif
+};
+
+extern const struct dev_pm_ops pvr_pm_ops;
+extern const struct drm_driver pvr_drm_generic_driver;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+int pvr_drm_load(struct drm_device *ddev, unsigned long flags);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+int pvr_drm_unload(struct drm_device *ddev);
+#else
+void pvr_drm_unload(struct drm_device *ddev);
+#endif
+#endif
+
+int PVRSRV_BridgeDispatchKM(struct drm_device *dev, void *arg,
+			    struct drm_file *file);
+int PVRSRV_MMap(struct file *file, struct vm_area_struct *ps_vma);
+
+#endif /* !defined(__PVR_DRV_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_fd_sync_kernel.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_fd_sync_kernel.h
new file mode 100644
index 0000000..9a85f19
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_fd_sync_kernel.h
@@ -0,0 +1,77 @@
+/*************************************************************************/ /*!
+@File           pvr_fd_sync_kernel.h
+@Title          Kernel/userspace interface definitions to use the kernel sync
+                driver
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* vi: set ts=8: */
+
+
+#ifndef _PVR_FD_SYNC_KERNEL_H_
+#define _PVR_FD_SYNC_KERNEL_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define PVR_SYNC_MAX_QUERY_FENCE_POINTS 14
+
+#define PVR_SYNC_IOC_MAGIC 'W'
+
+#define PVR_SYNC_IOC_RENAME \
+ _IOW(PVR_SYNC_IOC_MAGIC,  4, struct pvr_sync_rename_ioctl_data)
+
+#define PVR_SYNC_IOC_FORCE_SW_ONLY \
+ _IO(PVR_SYNC_IOC_MAGIC,   5)
+
+struct pvr_sync_pt_info {
+	/* Output */
+	__u32 id;
+	__u32 ui32FWAddr;
+	__u32 ui32CurrOp;
+	__u32 ui32NextOp;
+	__u32 ui32TlTaken;
+} __attribute__((packed, aligned(8)));
+
+struct pvr_sync_rename_ioctl_data
+{
+	/* Input */
+	char szName[32];
+} __attribute__((packed, aligned(8)));
+
+#endif /* _PVR_FD_SYNC_KERNEL_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_fence.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_fence.c
new file mode 100644
index 0000000..cccd613
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_fence.c
@@ -0,0 +1,1103 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR Linux fence interface
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+#include <linux/hashtable.h>
+#endif
+
+#include "pvr_fence.h"
+#include "services_kernel_client.h"
+#include "sync_checkpoint_external.h"
+
+#define CREATE_TRACE_POINTS
+#include "pvr_fence_trace.h"
+
+/* This header must always be included last */
+#include "kernel_compatibility.h"
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+static DEFINE_HASHTABLE(pvr_fence_ufo_lut, 16);
+static DEFINE_SPINLOCK(pvr_fence_ufo_lut_spinlock);
+#endif
+
+#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \
+	do {                                                             \
+		if (pfnDumpDebugPrintf)                                  \
+			pfnDumpDebugPrintf(pvDumpDebugFile, fmt,         \
+					   ## __VA_ARGS__);              \
+		else                                                     \
+			pr_err(fmt "\n", ## __VA_ARGS__);                \
+	} while (0)
+
+static inline void
+pvr_fence_sync_signal(struct pvr_fence *pvr_fence, u32 fence_sync_flags)
+{
+	SyncCheckpointSignal(pvr_fence->sync_checkpoint, fence_sync_flags);
+}
+
+static inline bool
+pvr_fence_sync_is_signaled(struct pvr_fence *pvr_fence, u32 fence_sync_flags)
+{
+	return SyncCheckpointIsSignalled(pvr_fence->sync_checkpoint,
+					 fence_sync_flags);
+}
+
+static inline u32
+pvr_fence_sync_value(struct pvr_fence *pvr_fence)
+{
+	if (SyncCheckpointIsErrored(pvr_fence->sync_checkpoint,
+				    PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+		return PVRSRV_SYNC_CHECKPOINT_ERRORED;
+	else if (SyncCheckpointIsSignalled(pvr_fence->sync_checkpoint,
+					   PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+		return PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+	else
+		return PVRSRV_SYNC_CHECKPOINT_ACTIVE;
+}
+
+static void
+pvr_fence_context_check_status(struct work_struct *data)
+{
+	PVRSRVCheckStatus(NULL);
+}
+
+void
+pvr_context_value_str(struct pvr_fence_context *fctx, char *str, int size)
+{
+	snprintf(str, size,
+		 "%u ctx=%llu refs=%u",
+		 atomic_read(&fctx->fence_seqno),
+		 fctx->fence_context,
+		 refcount_read(&fctx->kref.refcount));
+}
+
+static void
+pvr_fence_context_fences_dump(struct pvr_fence_context *fctx,
+			      DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+			      void *pvDumpDebugFile)
+{
+	struct pvr_fence *pvr_fence;
+	unsigned long flags;
+	char value[128];
+
+	spin_lock_irqsave(&fctx->list_lock, flags);
+	pvr_context_value_str(fctx, value, sizeof(value));
+	PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+			 "%s: @%s", fctx->name, value);
+	list_for_each_entry(pvr_fence, &fctx->fence_list, fence_head) {
+		struct dma_fence *fence = pvr_fence->fence;
+		const char *timeline_value_str = "unknown timeline value";
+		const char *fence_value_str = "unknown fence value";
+
+		pvr_fence->base.ops->fence_value_str(&pvr_fence->base, value,
+						     sizeof(value));
+		PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+				  " @%s", value);
+
+		if (is_pvr_fence(fence))
+			continue;
+
+		if (fence->ops->timeline_value_str) {
+			fence->ops->timeline_value_str(fence, value,
+						       sizeof(value));
+			timeline_value_str = value;
+		}
+
+		PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+				  " | %s: %s (driver: %s)",
+				  fence->ops->get_timeline_name(fence),
+				  timeline_value_str,
+				  fence->ops->get_driver_name(fence));
+
+		if (fence->ops->fence_value_str) {
+			fence->ops->fence_value_str(fence, value,
+						    sizeof(value));
+			fence_value_str = value;
+		}
+
+		PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+				  " |  @%s (foreign)", value);
+	}
+	spin_unlock_irqrestore(&fctx->list_lock, flags);
+}
+
+static inline unsigned int
+pvr_fence_context_seqno_next(struct pvr_fence_context *fctx)
+{
+	return atomic_inc_return(&fctx->fence_seqno) - 1;
+}
+
+static inline void
+pvr_fence_context_free_deferred(struct pvr_fence_context *fctx)
+{
+	struct pvr_fence *pvr_fence, *tmp;
+	LIST_HEAD(deferred_free_list);
+	unsigned long flags;
+
+	spin_lock_irqsave(&fctx->list_lock, flags);
+	list_for_each_entry_safe(pvr_fence, tmp,
+				 &fctx->deferred_free_list,
+				 fence_head)
+		list_move(&pvr_fence->fence_head, &deferred_free_list);
+	spin_unlock_irqrestore(&fctx->list_lock, flags);
+
+	list_for_each_entry_safe(pvr_fence, tmp,
+				 &deferred_free_list,
+				 fence_head) {
+		list_del(&pvr_fence->fence_head);
+		SyncCheckpointFree(pvr_fence->sync_checkpoint);
+		dma_fence_free(&pvr_fence->base);
+		module_put(THIS_MODULE);
+	}
+}
+
+void
+pvr_fence_context_free_deferred_callback(void *data)
+{
+	struct pvr_fence_context *fctx = (struct pvr_fence_context *)data;
+
+	/*
+	 * Free up any fence objects we have deferred freeing.
+	 */
+	pvr_fence_context_free_deferred(fctx);
+}
+
+static void
+pvr_fence_context_signal_fences(void *data)
+{
+	struct pvr_fence_context *fctx = (struct pvr_fence_context *)data;
+	struct pvr_fence *pvr_fence, *tmp;
+	unsigned long flags1;
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+	unsigned long flags2;
+#endif
+	LIST_HEAD(signal_list);
+
+	/*
+	 * We can't call fence_signal while holding the lock as we can end up
+	 * in a situation whereby pvr_fence_foreign_signal_sync, which also
+	 * takes the list lock, ends up being called as a result of the
+	 * fence_signal below, i.e. fence_signal(fence) -> fence->callback()
+	 *  -> fence_signal(foreign_fence) -> foreign_fence->callback() where
+	 * the foreign_fence callback is pvr_fence_foreign_signal_sync.
+	 *
+	 * So extract the items we intend to signal and add them to their own
+	 * queue.
+	 */
+	spin_lock_irqsave(&fctx->list_lock, flags1);
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+	spin_lock_irqsave(&pvr_fence_ufo_lut_spinlock, flags2);
+	list_for_each_entry_safe(pvr_fence, tmp, &fctx->signal_list,
+				 signal_head) {
+		if (pvr_fence_sync_is_signaled(pvr_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) {
+			list_move_tail(&pvr_fence->signal_head, &signal_list);
+			hash_del(&pvr_fence->ufo_lookup);
+		}
+	}
+	spin_unlock_irqrestore(&pvr_fence_ufo_lut_spinlock, flags2);
+#else
+	list_for_each_entry_safe(pvr_fence, tmp, &fctx->signal_list,
+	             signal_head) {
+		if (pvr_fence_sync_is_signaled(pvr_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+			list_move_tail(&pvr_fence->signal_head, &signal_list);
+	}
+#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */
+
+	spin_unlock_irqrestore(&fctx->list_lock, flags1);
+
+	list_for_each_entry_safe(pvr_fence, tmp, &signal_list, signal_head) {
+
+		PVR_FENCE_TRACE(&pvr_fence->base, "signalled fence (%s)\n",
+				pvr_fence->name);
+		trace_pvr_fence_signal_fence(pvr_fence);
+		spin_lock_irqsave(&pvr_fence->fctx->list_lock, flags1);
+		list_del(&pvr_fence->signal_head);
+		spin_unlock_irqrestore(&pvr_fence->fctx->list_lock, flags1);
+		dma_fence_signal(pvr_fence->fence);
+		dma_fence_put(pvr_fence->fence);
+	}
+
+	/*
+	 * Take this opportunity to free up any fence objects we
+	 * have deferred freeing.
+	 */
+	pvr_fence_context_free_deferred(fctx);
+}
+
+void
+pvr_fence_context_signal_fences_nohw(void *data)
+{
+	pvr_fence_context_signal_fences(data);
+}
+
+static void
+pvr_fence_context_destroy_work(struct work_struct *data)
+{
+	struct pvr_fence_context *fctx =
+		container_of(data, struct pvr_fence_context, destroy_work);
+
+	pvr_fence_context_free_deferred(fctx);
+
+	/*
+	 * Ensure any outstanding calls to SyncCheckpointFree have completed
+	 * on the fence workqueue.
+	 */
+	flush_workqueue(fctx->fence_wq);
+
+	if (WARN_ON(!list_empty_careful(&fctx->fence_list)))
+		pvr_fence_context_fences_dump(fctx, NULL, NULL);
+
+	PVRSRVUnregisterDbgRequestNotify(fctx->dbg_request_handle);
+	PVRSRVUnregisterCmdCompleteNotify(fctx->cmd_complete_handle);
+	kfree(fctx);
+}
+
+static void
+pvr_fence_context_debug_request(void *data, u32 verbosity,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile)
+{
+	struct pvr_fence_context *fctx = (struct pvr_fence_context *)data;
+
+	if (DD_VERB_LVL_ENABLED(verbosity, DEBUG_REQUEST_VERBOSITY_MEDIUM))
+		pvr_fence_context_fences_dump(fctx, pfnDumpDebugPrintf,
+					      pvDumpDebugFile);
+}
+
+/**
+ * pvr_fence_context_create - creates a PVR fence context
+ * @dev_cookie: services device cookie
+ * @name: context name (used for debugging)
+ *
+ * Creates a PVR fence context that can be used to create PVR fences or to
+ * create PVR fences from an existing fence.
+ *
+ * pvr_fence_context_destroy should be called to clean up the fence context.
+ *
+ * Returns NULL if a context cannot be created.
+ */
+struct pvr_fence_context *
+pvr_fence_context_create(void *dev_cookie,
+			 struct workqueue_struct *fence_status_wq,
+			 const char *name)
+{
+	struct pvr_fence_context *fctx;
+	PVRSRV_ERROR srv_err;
+
+	fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
+	if (!fctx)
+		return NULL;
+
+	spin_lock_init(&fctx->lock);
+	atomic_set(&fctx->fence_seqno, 0);
+	INIT_WORK(&fctx->check_status_work, pvr_fence_context_check_status);
+	INIT_WORK(&fctx->destroy_work, pvr_fence_context_destroy_work);
+	spin_lock_init(&fctx->list_lock);
+	INIT_LIST_HEAD(&fctx->signal_list);
+	INIT_LIST_HEAD(&fctx->fence_list);
+	INIT_LIST_HEAD(&fctx->deferred_free_list);
+
+	fctx->fence_wq = fence_status_wq;
+
+	fctx->fence_context = dma_fence_context_alloc(1);
+	strlcpy(fctx->name, name, sizeof(fctx->name));
+
+	srv_err = PVRSRVRegisterCmdCompleteNotify(&fctx->cmd_complete_handle,
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+				pvr_fence_context_free_deferred_callback,
+#else
+				pvr_fence_context_signal_fences,
+#endif
+				fctx);
+	if (srv_err != PVRSRV_OK) {
+		pr_err("%s: failed to register command complete callback (%s)\n",
+		       __func__, PVRSRVGetErrorString(srv_err));
+		goto err_free_fctx;
+	}
+	srv_err = PVRSRVRegisterDbgRequestNotify(&fctx->dbg_request_handle,
+				dev_cookie,
+				pvr_fence_context_debug_request,
+				DEBUG_REQUEST_LINUXFENCE,
+				fctx);
+	if (srv_err != PVRSRV_OK) {
+		pr_err("%s: failed to register debug request callback (%s)\n",
+		       __func__, PVRSRVGetErrorString(srv_err));
+		goto err_unregister_cmd_complete_notify;
+	}
+
+	kref_init(&fctx->kref);
+
+	PVR_FENCE_CTX_TRACE(fctx, "created fence context (%s)\n", name);
+	trace_pvr_fence_context_create(fctx);
+
+	return fctx;
+
+err_unregister_cmd_complete_notify:
+	PVRSRVUnregisterCmdCompleteNotify(fctx->cmd_complete_handle);
+err_free_fctx:
+	kfree(fctx);
+	return NULL;
+}
+
+static void pvr_fence_context_destroy_kref(struct kref *kref)
+{
+	struct pvr_fence_context *fctx =
+		container_of(kref, struct pvr_fence_context, kref);
+
+	PVR_FENCE_CTX_TRACE(fctx, "destroyed fence context (%s)\n", fctx->name);
+
+	trace_pvr_fence_context_destroy_kref(fctx);
+
+	schedule_work(&fctx->destroy_work);
+}
+
+/**
+ * pvr_fence_context_destroy - destroys a context
+ * @fctx: PVR fence context to destroy
+ *
+ * Destroys a PVR fence context with the expectation that all fences have been
+ * destroyed.
+ */
+void
+pvr_fence_context_destroy(struct pvr_fence_context *fctx)
+{
+	trace_pvr_fence_context_destroy(fctx);
+
+	kref_put(&fctx->kref, pvr_fence_context_destroy_kref);
+}
+
+static const char *
+pvr_fence_get_driver_name(struct dma_fence *fence)
+{
+	return PVR_LDM_DRIVER_REGISTRATION_NAME;
+}
+
+static const char *
+pvr_fence_get_timeline_name(struct dma_fence *fence)
+{
+	struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+
+	if (pvr_fence)
+		return pvr_fence->fctx->name;
+	return NULL;
+}
+
+static
+void pvr_fence_fence_value_str(struct dma_fence *fence, char *str, int size)
+{
+	struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+
+	if (!pvr_fence)
+		return;
+
+	snprintf(str, size,
+		 "%u: (%s%s) refs=%u fwaddr=%#08x enqueue=%u status=%-9s %s%s",
+		 pvr_fence->fence->seqno,
+		 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+			  &pvr_fence->fence->flags) ? "+" : "-",
+		 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+			  &pvr_fence->fence->flags) ? "+" : "-",
+		 refcount_read(&pvr_fence->fence->refcount.refcount),
+		 SyncCheckpointGetFirmwareAddr(
+			 pvr_fence->sync_checkpoint),
+		 SyncCheckpointGetEnqueuedCount(pvr_fence->sync_checkpoint),
+		 SyncCheckpointGetStateString(pvr_fence->sync_checkpoint),
+		 pvr_fence->name,
+		 (&pvr_fence->base != pvr_fence->fence) ?
+		 "(foreign)" : "");
+}
+
+static
+void pvr_fence_timeline_value_str(struct dma_fence *fence, char *str, int size)
+{
+	struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+
+	if (pvr_fence)
+		pvr_context_value_str(pvr_fence->fctx, str, size);
+}
+
+static bool
+pvr_fence_enable_signaling(struct dma_fence *fence)
+{
+	struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+	unsigned long flags;
+
+	if (!pvr_fence)
+		return false;
+
+	WARN_ON_SMP(!spin_is_locked(&pvr_fence->fctx->lock));
+
+	if (pvr_fence_sync_is_signaled(pvr_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+		return false;
+
+	dma_fence_get(&pvr_fence->base);
+
+	spin_lock_irqsave(&pvr_fence->fctx->list_lock, flags);
+	list_add_tail(&pvr_fence->signal_head, &pvr_fence->fctx->signal_list);
+	spin_unlock_irqrestore(&pvr_fence->fctx->list_lock, flags);
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+	spin_lock_irqsave(&pvr_fence_ufo_lut_spinlock, flags);
+	hash_add(pvr_fence_ufo_lut, &pvr_fence->ufo_lookup,
+		 SyncCheckpointGetFirmwareAddr(pvr_fence->sync_checkpoint));
+	spin_unlock_irqrestore(&pvr_fence_ufo_lut_spinlock, flags);
+#endif
+
+	PVR_FENCE_TRACE(&pvr_fence->base, "signalling enabled (%s)\n",
+			pvr_fence->name);
+	trace_pvr_fence_enable_signaling(pvr_fence);
+
+	return true;
+}
+
+static bool
+pvr_fence_is_signaled(struct dma_fence *fence)
+{
+	struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+
+	if (pvr_fence)
+		return pvr_fence_sync_is_signaled(pvr_fence,
+		                                  PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT);
+	return false;
+}
+
+static void
+pvr_fence_release(struct dma_fence *fence)
+{
+	struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+	unsigned long flags;
+
+	if (pvr_fence) {
+		struct pvr_fence_context *fctx = pvr_fence->fctx;
+
+		PVR_FENCE_TRACE(&pvr_fence->base, "released fence (%s)\n",
+				pvr_fence->name);
+		trace_pvr_fence_release(pvr_fence);
+
+		spin_lock_irqsave(&fctx->list_lock, flags);
+		list_move(&pvr_fence->fence_head,
+			  &fctx->deferred_free_list);
+		spin_unlock_irqrestore(&fctx->list_lock, flags);
+
+		kref_put(&fctx->kref, pvr_fence_context_destroy_kref);
+	}
+}
+
+const struct dma_fence_ops pvr_fence_ops = {
+	.get_driver_name = pvr_fence_get_driver_name,
+	.get_timeline_name = pvr_fence_get_timeline_name,
+	.fence_value_str = pvr_fence_fence_value_str,
+	.timeline_value_str = pvr_fence_timeline_value_str,
+	.enable_signaling = pvr_fence_enable_signaling,
+	.signaled = pvr_fence_is_signaled,
+	.wait = dma_fence_default_wait,
+	.release = pvr_fence_release,
+};
+
+/**
+ * pvr_fence_create - creates a PVR fence
+ * @fctx: PVR fence context on which the PVR fence should be created
+ * @sync_checkpoint_ctx: context in which to create sync checkpoints
+ * @timeline_fd: timeline on which the PVR fence should be created
+ * @name: PVR fence name (used for debugging)
+ *
+ * Creates a PVR fence.
+ *
+ * Once the fence is finished with, pvr_fence_destroy should be called.
+ *
+ * Returns NULL if a PVR fence cannot be created.
+ */
+struct pvr_fence *
+pvr_fence_create(struct pvr_fence_context *fctx,
+		struct _SYNC_CHECKPOINT_CONTEXT *sync_checkpoint_ctx,
+		int timeline_fd, const char *name)
+{
+	struct pvr_fence *pvr_fence;
+	unsigned int seqno;
+	unsigned long flags;
+	PVRSRV_ERROR srv_err;
+
+	if (!try_module_get(THIS_MODULE))
+		goto err_exit;
+
+	pvr_fence = kzalloc(sizeof(*pvr_fence), GFP_KERNEL);
+	if (unlikely(!pvr_fence))
+		goto err_module_put;
+
+	srv_err = SyncCheckpointAlloc(sync_checkpoint_ctx,
+				      (PVRSRV_TIMELINE) timeline_fd, PVRSRV_NO_FENCE,
+				      name, &pvr_fence->sync_checkpoint);
+	if (unlikely(srv_err != PVRSRV_OK))
+		goto err_free_fence;
+
+	INIT_LIST_HEAD(&pvr_fence->fence_head);
+	INIT_LIST_HEAD(&pvr_fence->signal_head);
+	pvr_fence->fctx = fctx;
+	seqno = pvr_fence_context_seqno_next(fctx);
+	/* Add the seqno to the fence name for easier debugging */
+	snprintf(pvr_fence->name, sizeof(pvr_fence->name), "%d-%s",
+		 seqno, name);
+	pvr_fence->fence = &pvr_fence->base;
+
+	dma_fence_init(&pvr_fence->base, &pvr_fence_ops, &fctx->lock,
+		       fctx->fence_context, seqno);
+
+	spin_lock_irqsave(&fctx->list_lock, flags);
+	list_add_tail(&pvr_fence->fence_head, &fctx->fence_list);
+	spin_unlock_irqrestore(&fctx->list_lock, flags);
+
+	kref_get(&fctx->kref);
+
+	PVR_FENCE_TRACE(&pvr_fence->base, "created fence (%s)\n", name);
+	trace_pvr_fence_create(pvr_fence);
+
+	return pvr_fence;
+
+err_free_fence:
+	kfree(pvr_fence);
+err_module_put:
+	module_put(THIS_MODULE);
+err_exit:
+	return NULL;
+}
+
+static const char *
+pvr_fence_foreign_get_driver_name(struct dma_fence *fence)
+{
+	return PVR_LDM_DRIVER_REGISTRATION_NAME;
+}
+
+static const char *
+pvr_fence_foreign_get_timeline_name(struct dma_fence *fence)
+{
+	return "foreign";
+}
+
+static
+void pvr_fence_foreign_fence_value_str(struct dma_fence *fence, char *str,
+				       int size)
+{
+	struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+	u32 sync_addr = 0;
+	u32 sync_value_next;
+
+	if (WARN_ON(!pvr_fence))
+		return;
+
+	sync_addr = SyncCheckpointGetFirmwareAddr(pvr_fence->sync_checkpoint);
+	sync_value_next = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+
+	/*
+	 * Include the fence flag bits from the foreign fence instead of our
+	 * shadow copy. This is done as the shadow fence flag bits aren't used.
+	 */
+	snprintf(str, size,
+		 "%u: (%s%s) refs=%u fwaddr=%#08x cur=%#08x nxt=%#08x %s",
+		 fence->seqno,
+		 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+			  &pvr_fence->fence->flags) ? "+" : "-",
+		 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+			  &pvr_fence->fence->flags) ? "+" : "-",
+		 refcount_read(&fence->refcount.refcount),
+		 sync_addr,
+		 pvr_fence_sync_value(pvr_fence),
+		 sync_value_next,
+		 pvr_fence->name);
+}
+
+static
+void pvr_fence_foreign_timeline_value_str(struct dma_fence *fence, char *str,
+					  int size)
+{
+	struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+
+	if (pvr_fence)
+		pvr_context_value_str(pvr_fence->fctx, str, size);
+}
+
+static bool
+pvr_fence_foreign_enable_signaling(struct dma_fence *fence)
+{
+	WARN_ON("cannot enable signalling on foreign fence");
+	return false;
+}
+
+static signed long
+pvr_fence_foreign_wait(struct dma_fence *fence, bool intr, signed long timeout)
+{
+	WARN_ON("cannot wait on foreign fence");
+	return 0;
+}
+
+static void
+pvr_fence_foreign_release(struct dma_fence *fence)
+{
+	struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+	unsigned long flags;
+
+	if (pvr_fence) {
+		struct pvr_fence_context *fctx = pvr_fence->fctx;
+		struct dma_fence *foreign_fence = pvr_fence->fence;
+
+		PVR_FENCE_TRACE(&pvr_fence->base,
+				"released fence for foreign fence %llu#%d (%s)\n",
+				(u64) pvr_fence->fence->context,
+				pvr_fence->fence->seqno, pvr_fence->name);
+		trace_pvr_fence_foreign_release(pvr_fence);
+
+		spin_lock_irqsave(&fctx->list_lock, flags);
+		list_move(&pvr_fence->fence_head,
+			  &fctx->deferred_free_list);
+		spin_unlock_irqrestore(&fctx->list_lock, flags);
+
+		dma_fence_put(foreign_fence);
+
+		kref_put(&fctx->kref,
+			 pvr_fence_context_destroy_kref);
+	}
+}
+
+const struct dma_fence_ops pvr_fence_foreign_ops = {
+	.get_driver_name = pvr_fence_foreign_get_driver_name,
+	.get_timeline_name = pvr_fence_foreign_get_timeline_name,
+	.fence_value_str = pvr_fence_foreign_fence_value_str,
+	.timeline_value_str = pvr_fence_foreign_timeline_value_str,
+	.enable_signaling = pvr_fence_foreign_enable_signaling,
+	.wait = pvr_fence_foreign_wait,
+	.release = pvr_fence_foreign_release,
+};
+
+static void
+pvr_fence_foreign_signal_sync(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+	struct pvr_fence *pvr_fence = container_of(cb, struct pvr_fence, cb);
+	struct pvr_fence_context *fctx = pvr_fence->fctx;
+
+	WARN_ON_ONCE(is_pvr_fence(fence));
+
+	/* Callback registered by dma_fence_add_callback can be called from an atomic ctx */
+	pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_CTX_ATOMIC);
+
+	trace_pvr_fence_foreign_signal(pvr_fence);
+
+	queue_work(fctx->fence_wq, &fctx->check_status_work);
+
+	PVR_FENCE_TRACE(&pvr_fence->base,
+			"foreign fence %llu#%d signalled (%s)\n",
+			(u64) pvr_fence->fence->context,
+			pvr_fence->fence->seqno, pvr_fence->name);
+
+	/* Drop the reference on the base fence */
+	dma_fence_put(&pvr_fence->base);
+}
+
+/**
+ * pvr_fence_create_from_fence - creates a PVR fence from a fence
+ * @fctx: PVR fence context on which the PVR fence should be created
+ * @sync_checkpoint_ctx: context in which to create sync checkpoints
+ * @fence: fence from which the PVR fence should be created
+ * @fence_fd: fd for the sync file to which the fence belongs. If it doesn't
+ *            belong to a sync file then PVRSRV_NO_FENCE should be given
+ *            instead.
+ * @name: PVR fence name (used for debugging)
+ *
+ * Creates a PVR fence from an existing fence. If the fence is a foreign fence,
+ * i.e. one that doesn't originate from a PVR fence context, then a new PVR
+ * fence will be created using the specified sync_checkpoint_context.
+ * Otherwise, a reference will be taken on the underlying fence and the PVR
+ * fence will be returned.
+ *
+ * Once the fence is finished with, pvr_fence_destroy should be called.
+ *
+ * Returns NULL if a PVR fence cannot be created.
+ */
+
+struct pvr_fence *
+pvr_fence_create_from_fence(struct pvr_fence_context *fctx,
+			    struct _SYNC_CHECKPOINT_CONTEXT *sync_checkpoint_ctx,
+			    struct dma_fence *fence,
+			    PVRSRV_FENCE fence_fd,
+			    const char *name)
+{
+	struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+	unsigned int seqno;
+	unsigned long flags;
+	PVRSRV_ERROR srv_err;
+	int err;
+
+	if (pvr_fence) {
+		if (WARN_ON(fence->ops == &pvr_fence_foreign_ops))
+			return NULL;
+		dma_fence_get(fence);
+
+		PVR_FENCE_TRACE(fence, "created fence from PVR fence (%s)\n",
+				name);
+		return pvr_fence;
+	}
+
+	if (!try_module_get(THIS_MODULE))
+		goto err_exit;
+
+	pvr_fence = kzalloc(sizeof(*pvr_fence), GFP_KERNEL);
+	if (!pvr_fence)
+		goto err_module_put;
+
+	srv_err = SyncCheckpointAlloc(sync_checkpoint_ctx,
+					  SYNC_CHECKPOINT_FOREIGN_CHECKPOINT,
+					  fence_fd,
+					  name, &pvr_fence->sync_checkpoint);
+	if (srv_err != PVRSRV_OK)
+		goto err_free_pvr_fence;
+
+	INIT_LIST_HEAD(&pvr_fence->fence_head);
+	INIT_LIST_HEAD(&pvr_fence->signal_head);
+	pvr_fence->fctx = fctx;
+	pvr_fence->fence = dma_fence_get(fence);
+	seqno = pvr_fence_context_seqno_next(fctx);
+	/* Add the seqno to the fence name for easier debugging */
+	snprintf(pvr_fence->name, sizeof(pvr_fence->name), "%d-%s",
+		 seqno, name);
+	/*
+	 * We use the base fence to refcount the PVR fence and to do the
+	 * necessary clean up once the refcount drops to 0.
+	 */
+	dma_fence_init(&pvr_fence->base, &pvr_fence_foreign_ops, &fctx->lock,
+		       fctx->fence_context, seqno);
+
+	/*
+	 * Take an extra reference on the base fence that gets dropped when the
+	 * foreign fence is signalled.
+	 */
+	dma_fence_get(&pvr_fence->base);
+
+	spin_lock_irqsave(&fctx->list_lock, flags);
+	list_add_tail(&pvr_fence->fence_head, &fctx->fence_list);
+	spin_unlock_irqrestore(&fctx->list_lock, flags);
+	kref_get(&fctx->kref);
+
+	PVR_FENCE_TRACE(&pvr_fence->base,
+			"created fence from foreign fence %llu#%d (%s)\n",
+			(u64) pvr_fence->fence->context,
+			pvr_fence->fence->seqno, name);
+
+	err = dma_fence_add_callback(fence, &pvr_fence->cb,
+				     pvr_fence_foreign_signal_sync);
+	if (err) {
+		if (err != -ENOENT) {
+			pr_err("%s: failed to add fence callback (err=%d)",
+			       __func__, err);
+			goto err_put_ref;
+		}
+
+		/*
+		 * The fence has already signalled so set the sync as signalled.
+		 * The "signalled" hwperf packet should be emitted because the
+		 * callback won't be called for already signalled fence hence,
+		 * PVRSRV_FENCE_FLAG_NONE flag.
+		 */
+		pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_NONE);
+		PVR_FENCE_TRACE(&pvr_fence->base,
+				"foreign fence %llu#%d already signaled (%s)\n",
+				(u64) pvr_fence->fence->context,
+				pvr_fence->fence->seqno,
+				name);
+		dma_fence_put(&pvr_fence->base);
+	}
+
+	trace_pvr_fence_foreign_create(pvr_fence);
+
+	return pvr_fence;
+
+err_put_ref:
+	kref_put(&fctx->kref, pvr_fence_context_destroy_kref);
+	spin_lock_irqsave(&fctx->list_lock, flags);
+	list_del(&pvr_fence->fence_head);
+	spin_unlock_irqrestore(&fctx->list_lock, flags);
+	SyncCheckpointFree(pvr_fence->sync_checkpoint);
+err_free_pvr_fence:
+	kfree(pvr_fence);
+err_module_put:
+	module_put(THIS_MODULE);
+err_exit:
+	return NULL;
+}
+
+/**
+ * pvr_fence_destroy - destroys a PVR fence
+ * @pvr_fence: PVR fence to destroy
+ *
+ * Destroys a PVR fence. Upon return, the PVR fence may still exist if something
+ * else still references the underlying fence, e.g. a reservation object, or if
+ * software signalling has been enabled and the fence hasn't yet been signalled.
+ */
+void
+pvr_fence_destroy(struct pvr_fence *pvr_fence)
+{
+	PVR_FENCE_TRACE(&pvr_fence->base, "destroyed fence (%s)\n",
+			pvr_fence->name);
+
+	dma_fence_put(&pvr_fence->base);
+}
+
+/**
+ * pvr_fence_sw_signal - signals a PVR fence sync
+ * @pvr_fence: PVR fence to signal
+ *
+ * Sets the PVR fence sync value to signalled.
+ *
+ * Returns -EINVAL if the PVR fence represents a foreign fence.
+ */
+int
+pvr_fence_sw_signal(struct pvr_fence *pvr_fence)
+{
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+	unsigned long flags;
+#endif
+
+	if (!is_our_fence(pvr_fence->fctx, &pvr_fence->base))
+		return -EINVAL;
+
+	pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_NONE);
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+	dma_fence_put(pvr_fence->fence);
+#endif
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+	spin_lock_irqsave(&pvr_fence_ufo_lut_spinlock, flags);
+	hash_del(&pvr_fence->ufo_lookup);
+	spin_unlock_irqrestore(&pvr_fence_ufo_lut_spinlock, flags);
+#endif
+
+	queue_work(pvr_fence->fctx->fence_wq,
+		   &pvr_fence->fctx->check_status_work);
+
+	PVR_FENCE_TRACE(&pvr_fence->base, "sw set fence sync signalled (%s)\n",
+			pvr_fence->name);
+
+	return 0;
+}
+
+/**
+ * pvr_fence_sw_error - errors the sync checkpoint backing a PVR fence
+ * @pvr_fence: PVR fence to error
+ *
+ * Sets the PVR fence sync checkpoint value to errored.
+ *
+ * Returns -EINVAL if the PVR fence represents a foreign fence.
+ */
+int
+pvr_fence_sw_error(struct pvr_fence *pvr_fence)
+{
+	if (!is_our_fence(pvr_fence->fctx, &pvr_fence->base))
+		return -EINVAL;
+
+	SyncCheckpointError(pvr_fence->sync_checkpoint, PVRSRV_FENCE_FLAG_NONE);
+	PVR_FENCE_TRACE(&pvr_fence->base, "sw set fence sync errored (%s)\n",
+			pvr_fence->name);
+
+	return 0;
+}
+
+int
+pvr_fence_get_checkpoints(struct pvr_fence **pvr_fences, u32 nr_fences,
+			  struct _SYNC_CHECKPOINT **fence_checkpoints)
+{
+	struct _SYNC_CHECKPOINT **next_fence_checkpoint = fence_checkpoints;
+	struct pvr_fence **next_pvr_fence = pvr_fences;
+	int fence_checkpoint_idx;
+
+	if (nr_fences > 0) {
+
+		for (fence_checkpoint_idx = 0; fence_checkpoint_idx < nr_fences;
+		     fence_checkpoint_idx++) {
+			struct pvr_fence *next_fence = *next_pvr_fence++;
+			*next_fence_checkpoint++ = next_fence->sync_checkpoint;
+			/* Take reference on sync checkpoint (will be dropped
+			 * later by kick code)
+			 */
+			SyncCheckpointTakeRef(next_fence->sync_checkpoint);
+		}
+	}
+
+	return 0;
+}
+
+struct _SYNC_CHECKPOINT *
+pvr_fence_get_checkpoint(struct pvr_fence *update_fence)
+{
+	return update_fence->sync_checkpoint;
+}
+
+/**
+ * pvr_fence_dump_info_on_stalled_ufos - displays debug
+ * information on a native fence associated with any of
+ * the ufos provided. This function will be called from
+ * pvr_sync_file.c if the driver determines any GPU work
+ * is stuck waiting for a sync checkpoint representing a
+ * foreign sync to be signalled.
+ * @nr_ufos: number of ufos in vaddrs
+ * @vaddrs:  array of FW addresses of UFOs which the
+ *           driver is waiting on.
+ *
+ * Output debug information to kernel log on linux fences
+ * which would be responsible for signalling the sync
+ * checkpoints indicated by the ufo vaddresses.
+ *
+ * Returns the number of ufos in the array which were found
+ * to be associated with foreign syncs.
+ */
+u32 pvr_fence_dump_info_on_stalled_ufos(struct pvr_fence_context *fctx,
+					u32 nr_ufos, u32 *vaddrs)
+{
+	int our_ufo_ct = 0;
+	struct pvr_fence *pvr_fence;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fctx->list_lock, flags);
+	/* dump info on any ufos in our active list */
+	list_for_each_entry(pvr_fence, &fctx->fence_list, fence_head) {
+		u32 *this_ufo_vaddr = vaddrs;
+		int ufo_num;
+		DUMPDEBUG_PRINTF_FUNC *pfnDummy = NULL;
+
+		for (ufo_num = 0; ufo_num < nr_ufos; ufo_num++) {
+			struct _SYNC_CHECKPOINT *checkpoint =
+				pvr_fence->sync_checkpoint;
+			const u32 fence_ufo_addr =
+				SyncCheckpointGetFirmwareAddr(checkpoint);
+
+			if (fence_ufo_addr != this_ufo_vaddr[ufo_num])
+				continue;
+
+			/* Dump sync info */
+			PVR_DUMPDEBUG_LOG(pfnDummy, NULL,
+					  "\tSyncID = %d, FWAddr = 0x%08x: TLID = %d (Foreign Fence - [%p] %s)",
+					  SyncCheckpointGetId(checkpoint),
+					  fence_ufo_addr,
+					  SyncCheckpointGetTimeline(checkpoint),
+					  pvr_fence->fence,
+					  pvr_fence->name);
+			our_ufo_ct++;
+		}
+	}
+	spin_unlock_irqrestore(&fctx->list_lock, flags);
+	return our_ufo_ct;
+}
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+enum tag_img_bool pvr_fence_checkpoint_ufo_has_signalled(u32 fwaddr, u32 value)
+{
+	struct pvr_fence *pvr_fence = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pvr_fence_ufo_lut_spinlock, flags);
+	hash_for_each_possible(pvr_fence_ufo_lut, pvr_fence,
+			ufo_lookup, fwaddr) {
+		struct _SYNC_CHECKPOINT *checkpoint =
+			pvr_fence->sync_checkpoint;
+
+		if (SyncCheckpointGetFirmwareAddr(checkpoint) == fwaddr) {
+			hash_del(&pvr_fence->ufo_lookup);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&pvr_fence_ufo_lut_spinlock, flags);
+
+	if (!pvr_fence)
+		return IMG_FALSE;
+
+	PVR_FENCE_TRACE(&pvr_fence->base, "signalled fence (%s)\n",
+			pvr_fence->name);
+
+	trace_pvr_fence_signal_fence(pvr_fence);
+	spin_lock_irqsave(&pvr_fence->fctx->list_lock, flags);
+	list_del(&pvr_fence->signal_head);
+	spin_unlock_irqrestore(&pvr_fence->fctx->list_lock, flags);
+	dma_fence_signal(pvr_fence->fence);
+	dma_fence_put(pvr_fence->fence);
+
+	return IMG_TRUE;
+}
+
+void
+pvr_fence_check_state(void)
+{
+	int bkt;
+	unsigned long flags;
+	struct hlist_node *tmp1;
+	struct pvr_fence *pvr_fence, *tmp2;
+	LIST_HEAD(signal_list);
+
+	/*
+	 * Cannot call dma_fence_signal whilst holding spinlock, since
+	 * dma_fence_signal will take fctx->lock and in
+	 * pvr_fence_enable_signalling these are taken the other way around.
+	 */
+	spin_lock_irqsave(&pvr_fence_ufo_lut_spinlock, flags);
+	hash_for_each_safe(pvr_fence_ufo_lut, bkt, tmp1, pvr_fence, ufo_lookup) {
+		if (pvr_fence_sync_is_signaled(pvr_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) {
+			list_move_tail(&pvr_fence->signal_head, &signal_list);
+			hash_del(&pvr_fence->ufo_lookup);
+		}
+	}
+	spin_unlock_irqrestore(&pvr_fence_ufo_lut_spinlock, flags);
+
+	list_for_each_entry_safe(pvr_fence, tmp2, &signal_list, signal_head) {
+		PVR_FENCE_TRACE(&pvr_fence->base, "signalled fence (%s)\n",
+				pvr_fence->name);
+
+		trace_pvr_fence_signal_fence(pvr_fence);
+		spin_lock_irqsave(&pvr_fence->fctx->list_lock, flags);
+		list_del(&pvr_fence->signal_head);
+		spin_unlock_irqrestore(&pvr_fence->fctx->list_lock, flags);
+		dma_fence_signal(pvr_fence->fence);
+		dma_fence_put(pvr_fence->fence);
+	}
+}
+#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_fence.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_fence.h
new file mode 100644
index 0000000..ed437c5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_fence.h
@@ -0,0 +1,244 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR Linux fence interface
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_FENCE_H__)
+#define __PVR_FENCE_H__
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+static inline void pvr_fence_cleanup(void)
+{
+}
+#else
+#include "services_kernel_client.h"
+#include "pvr_linux_fence.h"
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+struct _SYNC_CHECKPOINT_CONTEXT;
+struct _SYNC_CHECKPOINT;
+
+/**
+ * pvr_fence_context - PVR fence context used to create and manage PVR fences
+ * @lock: protects the context and fences created on the context
+ * @name: fence context name (used for debugging)
+ * @dbg_request_handle: handle for callback used to dump debug data
+ * @fence_context: fence context with which to associate fences
+ * @fence_seqno: sequence number to use for the next fence
+ * @fence_wq: work queue for signalled fence work
+ * @check_status_work: work item used to inform services when a foreign fence
+ * has signalled
+ * @cmd_complete_handle: handle for callback used to signal fences when fence
+ * syncs are met
+ * @list_lock: protects the active and active foreign lists
+ * @signal_list: list of fences waiting to be signalled
+ * @fence_list: list of fences (used for debugging)
+ * @deferred_free_list: list of fences that we will free when we are no longer
+ * holding spinlocks.  The frees get implemented when an update fence is
+ * signalled or the context is freed.
+ */
+struct pvr_fence_context {
+	spinlock_t lock;
+	char name[32];
+	void *dbg_request_handle;
+	u64 fence_context;
+	atomic_t fence_seqno;
+
+	struct workqueue_struct *fence_wq;
+	struct work_struct check_status_work;
+
+	void *cmd_complete_handle;
+
+	spinlock_t list_lock;
+	struct list_head signal_list;
+	struct list_head fence_list;
+	struct list_head deferred_free_list;
+
+	struct kref kref;
+	struct work_struct destroy_work;
+};
+
+/**
+ * pvr_fence - PVR fence that represents both native and foreign fences
+ * @base: fence structure
+ * @fctx: fence context on which this fence was created
+ * @name: fence name (used for debugging)
+ * @fence: pointer to base fence structure or foreign fence
+ * @sync_checkpoint: services sync checkpoint used by hardware
+ * @fence_head: entry on the context fence and deferred free list
+ * @signal_head: entry on the context signal list
+ * @cb: foreign fence callback to set the sync to signalled
+ */
+struct pvr_fence {
+	struct dma_fence base;
+	struct pvr_fence_context *fctx;
+	char name[32];
+
+	struct dma_fence *fence;
+	struct _SYNC_CHECKPOINT *sync_checkpoint;
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+	struct hlist_node ufo_lookup;
+#endif
+
+	struct list_head fence_head;
+	struct list_head signal_head;
+	struct dma_fence_cb cb;
+};
+
+extern const struct dma_fence_ops pvr_fence_ops;
+extern const struct dma_fence_ops pvr_fence_foreign_ops;
+
+static inline bool is_our_fence(struct pvr_fence_context *fctx,
+				struct dma_fence *fence)
+{
+	return (fence->context == fctx->fence_context);
+}
+
+static inline bool is_pvr_fence(struct dma_fence *fence)
+{
+	return ((fence->ops == &pvr_fence_ops) ||
+		(fence->ops == &pvr_fence_foreign_ops));
+}
+
+static inline struct pvr_fence *to_pvr_fence(struct dma_fence *fence)
+{
+	if (is_pvr_fence(fence))
+		return container_of(fence, struct pvr_fence, base);
+
+	return NULL;
+}
+
+struct pvr_fence_context *
+pvr_fence_context_create(void *dev_cookie,
+			 struct workqueue_struct *fence_status_wq,
+			 const char *name);
+void pvr_fence_context_destroy(struct pvr_fence_context *fctx);
+void pvr_context_value_str(struct pvr_fence_context *fctx, char *str, int size);
+
+struct pvr_fence *
+pvr_fence_create(struct pvr_fence_context *fctx,
+		 struct _SYNC_CHECKPOINT_CONTEXT *sync_checkpoint_ctx,
+		 int timeline_fd, const char *name);
+struct pvr_fence *
+pvr_fence_create_from_fence(struct pvr_fence_context *fctx,
+			    struct _SYNC_CHECKPOINT_CONTEXT *sync_checkpoint_ctx,
+			    struct dma_fence *fence,
+			    PVRSRV_FENCE fence_fd,
+			    const char *name);
+void pvr_fence_destroy(struct pvr_fence *pvr_fence);
+int pvr_fence_sw_signal(struct pvr_fence *pvr_fence);
+int pvr_fence_sw_error(struct pvr_fence *pvr_fence);
+
+int pvr_fence_get_checkpoints(struct pvr_fence **pvr_fences, u32 nr_fences,
+			      struct _SYNC_CHECKPOINT **fence_checkpoints);
+struct _SYNC_CHECKPOINT *
+pvr_fence_get_checkpoint(struct pvr_fence *update_fence);
+
+void pvr_fence_context_signal_fences_nohw(void *data);
+
+void pvr_fence_context_free_deferred_callback(void *data);
+
+u32 pvr_fence_dump_info_on_stalled_ufos(struct pvr_fence_context *fctx,
+					u32 nr_ufos,
+					u32 *vaddrs);
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+enum tag_img_bool pvr_fence_checkpoint_ufo_has_signalled(u32 fwaddr, u32 value);
+
+void pvr_fence_check_state(void);
+#endif
+
+static inline void pvr_fence_cleanup(void)
+{
+	/*
+	 * Ensure all PVR fence contexts have been destroyed, by flushing
+	 * the global workqueue.
+	 */
+	flush_scheduled_work();
+}
+
+#if defined(PVR_FENCE_DEBUG)
+#define PVR_FENCE_CTX_TRACE(c, fmt, ...)                                   \
+	do {                                                               \
+		struct pvr_fence_context *__fctx = (c);                    \
+		pr_err("c %llu: (PVR) " fmt, (u64) __fctx->fence_context,  \
+		       ## __VA_ARGS__);                                    \
+	} while (0)
+#else
+#define PVR_FENCE_CTX_TRACE(c, fmt, ...)
+#endif
+
+#define PVR_FENCE_CTX_WARN(c, fmt, ...)                                    \
+	do {                                                               \
+		struct pvr_fence_context *__fctx = (c);                    \
+		pr_warn("c %llu: (PVR) " fmt, (u64) __fctx->fence_context, \
+			## __VA_ARGS__);                                   \
+	} while (0)
+
+#define PVR_FENCE_CTX_ERR(c, fmt, ...)                                     \
+	do {                                                               \
+		struct pvr_fence_context *__fctx = (c);                    \
+		pr_err("c %llu: (PVR) " fmt, (u64) __fctx->fence_context,  \
+		       ## __VA_ARGS__);                                    \
+	} while (0)
+
+#if defined(PVR_FENCE_DEBUG)
+#define PVR_FENCE_TRACE(f, fmt, ...)                                       \
+	DMA_FENCE_ERR(f, "(PVR) " fmt, ## __VA_ARGS__)
+#else
+#define PVR_FENCE_TRACE(f, fmt, ...)
+#endif
+
+#define PVR_FENCE_WARN(f, fmt, ...)                                        \
+	DMA_FENCE_WARN(f, "(PVR) " fmt, ## __VA_ARGS__)
+
+#define PVR_FENCE_ERR(f, fmt, ...)                                         \
+	DMA_FENCE_ERR(f, "(PVR) " fmt, ## __VA_ARGS__)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */
+#endif /* !defined(__PVR_FENCE_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_fence_trace.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_fence_trace.h
new file mode 100644
index 0000000..cc95fe3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_fence_trace.h
@@ -0,0 +1,227 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pvr_fence
+
+#if !defined(_TRACE_PVR_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PVR_FENCE_H
+
+#include <linux/tracepoint.h>
+
+struct pvr_fence;
+struct pvr_fence_context;
+
+DECLARE_EVENT_CLASS(pvr_fence_context,
+
+	TP_PROTO(struct pvr_fence_context *fctx),
+	TP_ARGS(fctx),
+
+	TP_STRUCT__entry(
+		__string(name, fctx->name)
+		__array(char, val, 128)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, fctx->name)
+		pvr_context_value_str(fctx, __entry->val,
+			sizeof(__entry->val));
+	),
+
+	TP_printk("name=%s val=%s",
+		  __get_str(name),
+		  __entry->val
+	)
+);
+
+DEFINE_EVENT(pvr_fence_context, pvr_fence_context_create,
+	TP_PROTO(struct pvr_fence_context *fctx),
+	TP_ARGS(fctx)
+);
+
+DEFINE_EVENT(pvr_fence_context, pvr_fence_context_destroy,
+	TP_PROTO(struct pvr_fence_context *fctx),
+	TP_ARGS(fctx)
+);
+
+DEFINE_EVENT(pvr_fence_context, pvr_fence_context_destroy_kref,
+	TP_PROTO(struct pvr_fence_context *fctx),
+	TP_ARGS(fctx)
+);
+
+DEFINE_EVENT(pvr_fence_context, pvr_fence_context_signal_fences,
+	TP_PROTO(struct pvr_fence_context *fctx),
+	TP_ARGS(fctx)
+);
+
+DECLARE_EVENT_CLASS(pvr_fence,
+	TP_PROTO(struct pvr_fence *fence),
+	TP_ARGS(fence),
+
+	TP_STRUCT__entry(
+		__string(driver,
+			fence->base.ops->get_driver_name(&fence->base))
+		__string(timeline,
+			fence->base.ops->get_timeline_name(&fence->base))
+		__array(char, val, 128)
+		__field(u64, context)
+	),
+
+	TP_fast_assign(
+		__assign_str(driver,
+			fence->base.ops->get_driver_name(&fence->base))
+		__assign_str(timeline,
+			fence->base.ops->get_timeline_name(&fence->base))
+		fence->base.ops->fence_value_str(&fence->base,
+			__entry->val, sizeof(__entry->val));
+		__entry->context = fence->base.context;
+	),
+
+	TP_printk("driver=%s timeline=%s ctx=%llu val=%s",
+		  __get_str(driver), __get_str(timeline),
+		  __entry->context, __entry->val
+	)
+);
+
+DEFINE_EVENT(pvr_fence, pvr_fence_create,
+	TP_PROTO(struct pvr_fence *fence),
+	TP_ARGS(fence)
+);
+
+DEFINE_EVENT(pvr_fence, pvr_fence_release,
+	TP_PROTO(struct pvr_fence *fence),
+	TP_ARGS(fence)
+);
+
+DEFINE_EVENT(pvr_fence, pvr_fence_enable_signaling,
+	TP_PROTO(struct pvr_fence *fence),
+	TP_ARGS(fence)
+);
+
+DEFINE_EVENT(pvr_fence, pvr_fence_signal_fence,
+	TP_PROTO(struct pvr_fence *fence),
+	TP_ARGS(fence)
+);
+
+DECLARE_EVENT_CLASS(pvr_fence_foreign,
+	TP_PROTO(struct pvr_fence *fence),
+	TP_ARGS(fence),
+
+	TP_STRUCT__entry(
+		__string(driver,
+			fence->base.ops->get_driver_name(&fence->base))
+		__string(timeline,
+			fence->base.ops->get_timeline_name(&fence->base))
+		__array(char, val, 128)
+		__field(u64, context)
+		__string(foreign_driver,
+			fence->fence->ops->get_driver_name ?
+			fence->fence->ops->get_driver_name(fence->fence) :
+			"unknown")
+		__string(foreign_timeline,
+			fence->fence->ops->get_timeline_name ?
+			fence->fence->ops->get_timeline_name(fence->fence) :
+			"unknown")
+		__array(char, foreign_val, 128)
+		__field(u64, foreign_context)
+	),
+
+	TP_fast_assign(
+		__assign_str(driver,
+			fence->base.ops->get_driver_name(&fence->base))
+		__assign_str(timeline,
+			fence->base.ops->get_timeline_name(&fence->base))
+		fence->base.ops->fence_value_str(&fence->base, __entry->val,
+			sizeof(__entry->val));
+		__entry->context = fence->base.context;
+		__assign_str(foreign_driver,
+			fence->fence->ops->get_driver_name ?
+			fence->fence->ops->get_driver_name(fence->fence) :
+			"unknown")
+		__assign_str(foreign_timeline,
+			fence->fence->ops->get_timeline_name ?
+			fence->fence->ops->get_timeline_name(fence->fence) :
+			"unknown")
+		fence->fence->ops->fence_value_str ?
+			fence->fence->ops->fence_value_str(
+				fence->fence, __entry->foreign_val,
+				sizeof(__entry->foreign_val)) :
+			(void) strlcpy(__entry->foreign_val,
+				"unknown", sizeof(__entry->foreign_val));
+		__entry->foreign_context = fence->fence->context;
+	),
+
+	TP_printk("driver=%s timeline=%s ctx=%llu val=%s foreign: driver=%s timeline=%s ctx=%llu val=%s",
+		  __get_str(driver), __get_str(timeline), __entry->context,
+		  __entry->val, __get_str(foreign_driver),
+		  __get_str(foreign_timeline), __entry->foreign_context,
+		  __entry->foreign_val
+	)
+);
+
+DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_create,
+	TP_PROTO(struct pvr_fence *fence),
+	TP_ARGS(fence)
+);
+
+DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_release,
+	TP_PROTO(struct pvr_fence *fence),
+	TP_ARGS(fence)
+);
+
+DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_signal,
+	TP_PROTO(struct pvr_fence *fence),
+	TP_ARGS(fence)
+);
+
+#endif /* _TRACE_PVR_FENCE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+
+/* This is needed because the name of this file doesn't match TRACE_SYSTEM. */
+#define TRACE_INCLUDE_FILE pvr_fence_trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_gputrace.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_gputrace.c
new file mode 100644
index 0000000..e69cda0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_gputrace.c
@@ -0,0 +1,1261 @@
+/*************************************************************************/ /*!
+@File           pvr_gputrace.c
+@Title          PVR GPU Trace module Linux implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0))
+#include <linux/trace_events.h>
+#else
+#include <linux/ftrace_event.h>
+#endif
+
+#include "pvrsrv_error.h"
+#include "pvrsrv_apphint.h"
+#include "pvr_debug.h"
+#include "ospvr_gputrace.h"
+#include "rgxhwperf.h"
+#include "rgxtimecorr.h"
+#include "device.h"
+#include "trace_events.h"
+#include "pvrsrv.h"
+#include "pvrsrv_tlstreams.h"
+#include "tlclient.h"
+#include "pvr_debug.h"
+#define CREATE_TRACE_POINTS
+#include "rogue_trace_events.h"
+
+/******************************************************************************
+ Module internal implementation
+******************************************************************************/
+
+typedef enum {
+	PVR_GPUTRACE_SWITCH_TYPE_UNDEF = 0,
+
+	PVR_GPUTRACE_SWITCH_TYPE_BEGIN = 1,
+	PVR_GPUTRACE_SWITCH_TYPE_END = 2,
+	PVR_GPUTRACE_SWITCH_TYPE_SINGLE = 3
+} PVR_GPUTRACE_SWITCH_TYPE;
+
+typedef struct RGX_HWPERF_FTRACE_DATA {
+	/* This lock ensures the HWPerf TL stream reading resources are not destroyed
+	 * by one thread disabling it while another is reading from it. Keeps the
+	 * state and resource create/destroy atomic and consistent. */
+	POS_LOCK    hFTraceResourceLock;
+
+	IMG_HANDLE  hGPUTraceCmdCompleteHandle;
+	IMG_HANDLE  hGPUTraceTLStream;
+	IMG_UINT64  ui64LastSampledTimeCorrOSTimeStamp;
+	IMG_UINT32  ui32FTraceLastOrdinal;
+} RGX_HWPERF_FTRACE_DATA;
+
+/* This lock ensures state change of GPU_TRACING on/off is done atomically */
+static POS_LOCK ghGPUTraceStateLock;
+static IMG_BOOL gbFTraceGPUEventsEnabled = PVRSRV_APPHINT_ENABLEFTRACEGPU;
+
+/* Saved value of the clock source before the trace was enabled. We're keeping
+ * it here so that we know which clock should be selected after we disable the
+ * gpu ftrace. */
+#if defined(SUPPORT_RGX)
+static RGXTIMECORR_CLOCK_TYPE geLastTimeCorrClock = PVRSRV_APPHINT_TIMECORRCLOCK;
+#endif
+
+/* This lock ensures that the reference counting operation on the FTrace UFO
+ * events and enable/disable operation on firmware event are performed as
+ * one atomic operation. This should ensure that there are no race conditions
+ * between reference counting and firmware event state change.
+ * See below comment for guiUfoEventRef.
+ */
+static POS_LOCK ghLockFTraceEventLock;
+
+/* Multiple FTrace UFO events are reflected in the firmware as only one event. When
+ * we enable FTrace UFO event we want to also at the same time enable it in
+ * the firmware. Since there is a multiple-to-one relation between those events
+ * we count how many FTrace UFO events is enabled. If at least one event is
+ * enabled we enabled the firmware event. When all FTrace UFO events are disabled
+ * we disable firmware event. */
+static IMG_UINT guiUfoEventRef;
+
+/******************************************************************************
+ Module In-bound API
+******************************************************************************/
+
+static PVRSRV_ERROR _GpuTraceDisable(
+	PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+	IMG_BOOL bDeInit);
+
+static void _GpuTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE);
+
+PVRSRV_ERROR PVRGpuTraceSupportInit(void)
+{
+	PVRSRV_ERROR eError;
+
+	if (ghLockFTraceEventLock != NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "FTrace Support is already initialized"));
+		return PVRSRV_OK;
+	}
+
+	/* common module params initialization */
+	eError = OSLockCreate(&ghLockFTraceEventLock);
+	PVR_LOGR_IF_ERROR(eError, "OSLockCreate");
+
+	eError = OSLockCreate(&ghGPUTraceStateLock);
+	PVR_LOGR_IF_ERROR (eError, "OSLockCreate");
+
+	return PVRSRV_OK;
+}
+
+void PVRGpuTraceSupportDeInit(void)
+{
+	if (ghGPUTraceStateLock)
+	{
+		OSLockDestroy(ghGPUTraceStateLock);
+	}
+
+	if (ghLockFTraceEventLock)
+	{
+		OSLockDestroy(ghLockFTraceEventLock);
+		ghLockFTraceEventLock = NULL;
+	}
+}
+
+PVRSRV_ERROR PVRGpuTraceInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError;
+	RGX_HWPERF_FTRACE_DATA *psData;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	psData = OSAllocZMem(sizeof(RGX_HWPERF_FTRACE_DATA));
+	psDevInfo->pvGpuFtraceData = psData;
+	PVR_LOGG_IF_NOMEM(psData, "OSAllocZMem", eError, e0);
+
+	/* We initialise it only once because we want to track if any
+	 * packets were dropped. */
+	psData->ui32FTraceLastOrdinal = IMG_UINT32_MAX - 1;
+
+	eError = OSLockCreate(&psData->hFTraceResourceLock);
+	PVR_LOGG_IF_ERROR(eError, "OSLockCreate", e0);
+
+	return PVRSRV_OK;
+
+e0:
+	PVRGpuTraceDeInitDevice(psDeviceNode);
+	return eError;
+}
+
+void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGX_HWPERF_FTRACE_DATA *psData = psDevInfo->pvGpuFtraceData;
+
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+	if (psData)
+	{
+		/* first disable the tracing, to free up TL resources */
+		if (psData->hFTraceResourceLock)
+		{
+			OSLockAcquire(psData->hFTraceResourceLock);
+			_GpuTraceDisable(psDeviceNode->pvDevice, IMG_TRUE);
+			OSLockRelease(psData->hFTraceResourceLock);
+
+			/* now free all the FTrace resources */
+			OSLockDestroy(psData->hFTraceResourceLock);
+		}
+		OSFreeMem(psData);
+		psDevInfo->pvGpuFtraceData = NULL;
+	}
+}
+
+IMG_BOOL PVRGpuTraceIsEnabled(void)
+{
+	return gbFTraceGPUEventsEnabled;
+}
+
+void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	if (PVRGpuTraceIsEnabled())
+	{
+		PVRSRV_ERROR eError = PVRGpuTraceSetEnabled(psDeviceNode, IMG_TRUE);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to initialise GPU event tracing"
+					" (%s)", PVRSRVGetErrorString(eError)));
+		}
+
+		/* below functions will enable FTrace events which in turn will
+		 * execute HWPerf callbacks that set appropriate filter values
+		 * note: unfortunately the functions don't allow to pass private
+		 *       data so they enable events for all of the devices
+		 *       at once, which means that this can happen more than once
+		 *       if there is more than one device */
+
+		/* single events can be enabled by calling trace_set_clr_event()
+		 * with the event name, e.g.:
+		 * trace_set_clr_event("rogue", "rogue_ufo_update", 1) */
+#if defined(CONFIG_EVENT_TRACING) /* this is a kernel config option */
+#if defined(ANDROID) || defined(CHROMIUMOS_KERNEL)
+		if (trace_set_clr_event("gpu", NULL, 1))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to enable \"gpu\" event"
+					" group"));
+		}
+		else
+		{
+			PVR_LOG(("FTrace events from \"gpu\" group enabled"));
+		}
+#endif /* defined(ANDROID) || defined(CHROMIUMOS_KERNEL) */
+		if (trace_set_clr_event("rogue", NULL, 1))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to enable \"rogue\" event"
+					" group"));
+		}
+		else
+		{
+			PVR_LOG(("FTrace events from \"rogue\" group enabled"));
+		}
+#endif /* defined (CONFIG_EVENT_TRACING) */
+	}
+}
+
+/* Caller must now hold hFTraceResourceLock before calling this method.
+ */
+static PVRSRV_ERROR _GpuTraceEnable(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	RGX_HWPERF_FTRACE_DATA *psFtraceData;
+	PVRSRV_DEVICE_NODE *psRgxDevNode = psRgxDevInfo->psDeviceNode;
+	IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5];
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psRgxDevInfo);
+
+	psFtraceData = psRgxDevInfo->pvGpuFtraceData;
+
+	PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceResourceLock));
+
+	/* return if already enabled */
+	if (psFtraceData->hGPUTraceTLStream)
+	{
+		return PVRSRV_OK;
+	}
+
+#if defined(SUPPORT_RGX)
+	/* Signal FW to enable event generation */
+	if (psRgxDevInfo->bFirmwareInitialised)
+	{
+		IMG_UINT64 ui64UFOFilter = psRgxDevInfo->ui64HWPerfFilter &
+		        (RGX_HWPERF_EVENT_MASK_FW_SED | RGX_HWPERF_EVENT_MASK_FW_UFO);
+
+		eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode,
+		                               RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE,
+		                               RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |
+		                               ui64UFOFilter);
+		PVR_LOGG_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM", err_out);
+	}
+	else
+#endif
+	{
+		/* only set filter and exit */
+		psRgxDevInfo->ui64HWPerfFilter = RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |
+		        ((RGX_HWPERF_EVENT_MASK_FW_SED | RGX_HWPERF_EVENT_MASK_FW_UFO) &
+		        psRgxDevInfo->ui64HWPerfFilter);
+
+		PVR_DPF((PVR_DBG_WARNING,
+				 "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")",
+				 psRgxDevInfo->ui64HWPerfFilter));
+
+		return PVRSRV_OK;
+	}
+
+	/* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */
+	if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d",
+					PVRSRV_TL_HWPERF_RGX_FW_STREAM, psRgxDevNode->sDevId.i32UMIdentifier) < 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to form HWPerf stream name for device %d",
+		                        __func__,
+								psRgxDevNode->sDevId.i32UMIdentifier));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Open the TL Stream for HWPerf data consumption */
+	eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE,
+								pszHWPerfStreamName,
+								PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING,
+								&psFtraceData->hGPUTraceTLStream);
+	PVR_LOGG_IF_ERROR(eError, "TLClientOpenStream", err_out);
+
+#if defined(SUPPORT_RGX)
+	if (RGXTimeCorrGetClockSource() != RGXTIMECORR_CLOCK_SCHED)
+	{
+		/* Set clock source for timer correlation data to sched_clock */
+		geLastTimeCorrClock = RGXTimeCorrGetClockSource();
+		RGXTimeCorrSetClockSource(psRgxDevNode, RGXTIMECORR_CLOCK_SCHED);
+	}
+#endif
+
+	/* Reset the OS timestamp coming from the timer correlation data
+	 * associated with the latest HWPerf event we processed.
+	 */
+	psFtraceData->ui64LastSampledTimeCorrOSTimeStamp = 0;
+
+	/* Register a notifier to collect HWPerf data whenever the HW completes
+	 * an operation.
+	 */
+	eError = PVRSRVRegisterCmdCompleteNotify(
+		&psFtraceData->hGPUTraceCmdCompleteHandle,
+		&_GpuTraceCmdCompleteNotify,
+		psRgxDevInfo);
+	PVR_LOGG_IF_ERROR(eError, "PVRSRVRegisterCmdCompleteNotify", err_close_stream);
+
+err_out:
+	PVR_DPF_RETURN_RC(eError);
+
+err_close_stream:
+	TLClientCloseStream(DIRECT_BRIDGE_HANDLE,
+						psFtraceData->hGPUTraceTLStream);
+	psFtraceData->hGPUTraceTLStream = NULL;
+	goto err_out;
+}
+
+/* Caller must now hold hFTraceResourceLock before calling this method.
+ */
+static PVRSRV_ERROR _GpuTraceDisable(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_BOOL bDeInit)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	RGX_HWPERF_FTRACE_DATA *psFtraceData;
+#if defined(SUPPORT_RGX)
+	PVRSRV_DEVICE_NODE *psRgxDevNode = psRgxDevInfo->psDeviceNode;
+#endif
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psRgxDevInfo);
+
+	psFtraceData = psRgxDevInfo->pvGpuFtraceData;
+
+	PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceResourceLock));
+
+	/* if FW is not yet initialised, just set filter and exit */
+	if (!psRgxDevInfo->bFirmwareInitialised)
+	{
+		psRgxDevInfo->ui64HWPerfFilter = RGX_HWPERF_EVENT_MASK_NONE;
+		PVR_DPF((PVR_DBG_WARNING,
+				 "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")",
+				 psRgxDevInfo->ui64HWPerfFilter));
+
+		return PVRSRV_OK;
+	}
+
+	if (NULL == psFtraceData->hGPUTraceTLStream)
+	{
+		/* Tracing already disabled, just return */
+		return PVRSRV_OK;
+	}
+
+#if defined(SUPPORT_RGX)
+	if (!bDeInit)
+	{
+		eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode,
+		                               RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE,
+		                               (RGX_HWPERF_EVENT_MASK_NONE));
+		PVR_LOG_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM");
+	}
+#endif
+
+	if (psFtraceData->hGPUTraceCmdCompleteHandle)
+	{
+		/* Tracing is being turned off. Unregister the notifier. */
+		eError = PVRSRVUnregisterCmdCompleteNotify(
+				psFtraceData->hGPUTraceCmdCompleteHandle);
+		PVR_LOG_IF_ERROR(eError, "PVRSRVUnregisterCmdCompleteNotify");
+		psFtraceData->hGPUTraceCmdCompleteHandle = NULL;
+	}
+
+	if (psFtraceData->hGPUTraceTLStream)
+	{
+		IMG_PBYTE pbTmp = NULL;
+		IMG_UINT32 ui32Tmp = 0;
+
+		/* We have to flush both the L1 (FW) and L2 (Host) buffers in case there
+		 * are some events left unprocessed in this FTrace/systrace "session"
+		 * (note that even if we have just disabled HWPerf on the FW some packets
+		 * could have been generated and already copied to L2 by the MISR handler).
+		 *
+		 * With the following calls we will both copy new data to the Host buffer
+		 * (done by the producer callback in TLClientAcquireData) and advance
+		 * the read offset in the buffer to catch up with the latest events.
+		 */
+		eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+		                             psFtraceData->hGPUTraceTLStream,
+		                             &pbTmp, &ui32Tmp);
+		PVR_LOG_IF_ERROR(eError, "TLClientCloseStream");
+
+		/* Let close stream perform the release data on the outstanding acquired data */
+		eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE,
+		                             psFtraceData->hGPUTraceTLStream);
+		PVR_LOG_IF_ERROR(eError, "TLClientCloseStream");
+
+		psFtraceData->hGPUTraceTLStream = NULL;
+	}
+
+#if defined(SUPPORT_RGX)
+	if (geLastTimeCorrClock != RGXTIMECORR_CLOCK_SCHED)
+	{
+		RGXTimeCorrSetClockSource(psRgxDevNode, geLastTimeCorrClock);
+	}
+#endif
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+static PVRSRV_ERROR _GpuTraceSetEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                        IMG_BOOL bNewValue)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	RGX_HWPERF_FTRACE_DATA *psFtraceData;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psRgxDevInfo);
+	psFtraceData = psRgxDevInfo->pvGpuFtraceData;
+
+	/* About to create/destroy FTrace resources, lock critical section
+	 * to avoid HWPerf MISR thread contention.
+	 */
+	OSLockAcquire(psFtraceData->hFTraceResourceLock);
+
+	eError = (bNewValue ? _GpuTraceEnable(psRgxDevInfo)
+					   : _GpuTraceDisable(psRgxDevInfo, IMG_FALSE));
+
+	OSLockRelease(psFtraceData->hFTraceResourceLock);
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+// TODO: change the name to something more appropriate
+static PVRSRV_ERROR _GpuTraceSetEnabledForAllDevices(IMG_BOOL bNewValue)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+
+	/* This entry point from DebugFS must take the global
+	 * bridge lock at this outer level of the stack before calling
+	 * into the RGX part of the driver which can lead to RGX
+	 * device data changes and communication with the FW which
+	 * all requires the bridge lock.
+	 */
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSAcquireBridgeLock();
+#endif
+	psDeviceNode = psPVRSRVData->psDeviceNodeList;
+	/* enable/disable GPU trace on all devices */
+	while (psDeviceNode)
+	{
+		eError = _GpuTraceSetEnabled(psDeviceNode->pvDevice, bNewValue);
+		if (eError != PVRSRV_OK)
+		{
+			break;
+		}
+		psDeviceNode = psDeviceNode->psNext;
+	}
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSReleaseBridgeLock();
+#endif
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR PVRGpuTraceSetEnabled(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                   IMG_BOOL bNewValue)
+{
+	return _GpuTraceSetEnabled(psDeviceNode->pvDevice, bNewValue);
+}
+
+/* ----- HWPerf to FTrace packet processing and events injection ------------ */
+
+static const IMG_CHAR *_HWPerfKickTypeToStr(RGX_HWPERF_KICK_TYPE eKickType)
+{
+	static const IMG_CHAR *aszKickType[RGX_HWPERF_KICK_TYPE_LAST+1] = {
+		"TA3D", "TQ2D", "TQ3D", "CDM", "RS", "VRDM", "TQTDM", "SYNC", "LAST"
+	};
+
+	/* cast in case of negative value */
+	if (((IMG_UINT32) eKickType) >= RGX_HWPERF_KICK_TYPE_LAST)
+	{
+		return "<UNKNOWN>";
+	}
+
+	return aszKickType[eKickType];
+}
+
+void PVRGpuTraceEnqueueEvent(
+		PVRSRV_DEVICE_NODE *psDevNode,
+		IMG_UINT32 ui32FirmwareCtx,
+		IMG_UINT32 ui32ExtJobRef,
+		IMG_UINT32 ui32IntJobRef,
+		RGX_HWPERF_KICK_TYPE eKickType)
+{
+	const IMG_CHAR *pszKickType = _HWPerfKickTypeToStr(eKickType);
+
+	PVR_DPF((PVR_DBG_MESSAGE, "PVRGpuTraceEnqueueEvent(%s): contextId %u, "
+	        "jobId %u", pszKickType, ui32FirmwareCtx, ui32IntJobRef));
+
+	if (PVRGpuTraceIsEnabled())
+	{
+		trace_rogue_job_enqueue(ui32FirmwareCtx, ui32IntJobRef, ui32ExtJobRef,
+					pszKickType);
+	}
+}
+
+static void _GpuTraceWorkSwitch(
+		IMG_UINT64 ui64HWTimestampInOSTime,
+		IMG_UINT32 ui32CtxId,
+		IMG_UINT32 ui32CtxPriority,
+		IMG_UINT32 ui32ExtJobRef,
+		IMG_UINT32 ui32IntJobRef,
+		const IMG_CHAR* pszWorkType,
+		PVR_GPUTRACE_SWITCH_TYPE eSwType)
+{
+	PVR_ASSERT(pszWorkType);
+	trace_rogue_sched_switch(pszWorkType, eSwType, ui64HWTimestampInOSTime,
+			ui32CtxId, 2-ui32CtxPriority, ui32IntJobRef, ui32ExtJobRef);
+}
+
+static void _GpuTraceUfo(
+		IMG_UINT64 ui64OSTimestamp,
+		const RGX_HWPERF_UFO_EV eEvType,
+		const IMG_UINT32 ui32CtxId,
+		const IMG_UINT32 ui32ExtJobRef,
+		const IMG_UINT32 ui32IntJobRef,
+		const IMG_UINT32 ui32UFOCount,
+		const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+	switch (eEvType) {
+		case RGX_HWPERF_UFO_EV_UPDATE:
+			trace_rogue_ufo_updates(ui64OSTimestamp, ui32CtxId,
+					ui32ExtJobRef, ui32IntJobRef, ui32UFOCount, puData);
+			break;
+		case RGX_HWPERF_UFO_EV_CHECK_SUCCESS:
+			trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32CtxId,
+					ui32ExtJobRef, ui32IntJobRef, IMG_FALSE, ui32UFOCount,
+					puData);
+			break;
+		case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS:
+			trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32CtxId,
+					ui32ExtJobRef, ui32IntJobRef, IMG_TRUE, ui32UFOCount,
+					puData);
+			break;
+		case RGX_HWPERF_UFO_EV_CHECK_FAIL:
+			trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32CtxId,
+					ui32ExtJobRef, ui32IntJobRef, IMG_FALSE, ui32UFOCount,
+					puData);
+			break;
+		case RGX_HWPERF_UFO_EV_PRCHECK_FAIL:
+			trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32CtxId,
+					ui32ExtJobRef, ui32IntJobRef, IMG_TRUE, ui32UFOCount,
+					puData);
+			break;
+		default:
+			break;
+	}
+}
+
+static void _GpuTraceFirmware(
+		IMG_UINT64 ui64HWTimestampInOSTime,
+		const IMG_CHAR* pszWorkType,
+		PVR_GPUTRACE_SWITCH_TYPE eSwType)
+{
+	trace_rogue_firmware_activity(ui64HWTimestampInOSTime, pszWorkType, eSwType);
+}
+
+static void _GpuTraceEventsLost(
+		const RGX_HWPERF_STREAM_ID eStreamId,
+		const IMG_UINT32 ui32LastOrdinal,
+		const IMG_UINT32 ui32CurrOrdinal)
+{
+	trace_rogue_events_lost(eStreamId, ui32LastOrdinal, ui32CurrOrdinal);
+}
+
+/* Calculate the OS timestamp given an RGX timestamp in the HWPerf event. */
+static uint64_t CalculateEventTimestamp(
+	PVRSRV_RGXDEV_INFO *psDevInfo,
+	uint32_t ui32TimeCorrIndex,
+	uint64_t ui64EventTimestamp)
+{
+	RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb;
+	RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData;
+	RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32TimeCorrIndex];
+	uint64_t ui64CRTimeStamp = psTimeCorr->ui64CRTimeStamp;
+	uint64_t ui64OSTimeStamp = psTimeCorr->ui64OSTimeStamp;
+	uint64_t ui64CRDeltaToOSDeltaKNs = psTimeCorr->ui64CRDeltaToOSDeltaKNs;
+	uint64_t ui64EventOSTimestamp, deltaRgxTimer, delta_ns;
+
+	if (psFtraceData->ui64LastSampledTimeCorrOSTimeStamp > ui64OSTimeStamp)
+	{
+		/* The previous packet had a time reference (time correlation data) more
+		 * recent than the one in the current packet, it means the timer
+		 * correlation array wrapped too quickly (buffer too small) and in the
+		 * previous call to _GpuTraceUfoEvent we read one of the
+		 * newest timer correlations rather than one of the oldest ones.
+		 */
+		PVR_DPF((PVR_DBG_ERROR, "%s: The timestamps computed so far could be "
+				 "wrong! The time correlation array size should be increased "
+				 "to avoid this.", __func__));
+	}
+
+	psFtraceData->ui64LastSampledTimeCorrOSTimeStamp = ui64OSTimeStamp;
+
+	/* RGX CR timer ticks delta */
+	deltaRgxTimer = ui64EventTimestamp - ui64CRTimeStamp;
+	/* RGX time delta in nanoseconds */
+	delta_ns = RGXFWIF_GET_DELTA_OSTIME_NS(deltaRgxTimer, ui64CRDeltaToOSDeltaKNs);
+	/* Calculate OS time of HWPerf event */
+	ui64EventOSTimestamp = ui64OSTimeStamp + delta_ns;
+
+	PVR_DPF((PVR_DBG_VERBOSE, "%s: psCurrentDvfs RGX %llu, OS %llu, DVFSCLK %u",
+			 __func__, ui64CRTimeStamp, ui64OSTimeStamp,
+			 psTimeCorr->ui32CoreClockSpeed));
+
+	return ui64EventOSTimestamp;
+}
+
+static void _GpuTraceSwitchEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+		RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt, const IMG_CHAR* pszWorkName,
+		PVR_GPUTRACE_SWITCH_TYPE eSwType)
+{
+	IMG_UINT64 ui64Timestamp;
+	RGX_HWPERF_HW_DATA* psHWPerfPktData;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psHWPerfPkt);
+	PVR_ASSERT(pszWorkName);
+
+	psHWPerfPktData = (RGX_HWPERF_HW_DATA*) RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt);
+
+	ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex,
+											psHWPerfPkt->ui64Timestamp);
+
+	PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceSwitchEvent: %s ui32ExtJobRef=%d, ui32IntJobRef=%d, eSwType=%d",
+			pszWorkName, psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32IntJobRef, eSwType));
+
+	_GpuTraceWorkSwitch(ui64Timestamp,
+	                    psHWPerfPktData->ui32DMContext,
+	                    psHWPerfPktData->ui32CtxPriority,
+	                    psHWPerfPktData->ui32ExtJobRef,
+	                    psHWPerfPktData->ui32IntJobRef,
+	                    pszWorkName,
+	                    eSwType);
+
+	PVR_DPF_RETURN;
+}
+
+static void _GpuTraceUfoEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+                              RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt)
+{
+	IMG_UINT64 ui64Timestamp;
+	RGX_HWPERF_UFO_DATA *psHWPerfPktData;
+	IMG_UINT32 ui32UFOCount;
+	RGX_HWPERF_UFO_DATA_ELEMENT *puData;
+
+	psHWPerfPktData = (RGX_HWPERF_UFO_DATA *)
+	        RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt);
+
+	ui32UFOCount = RGX_HWPERF_GET_UFO_STREAMSIZE(psHWPerfPktData->ui32StreamInfo);
+	puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) (((IMG_BYTE *) psHWPerfPktData)
+	        + RGX_HWPERF_GET_UFO_STREAMOFFSET(psHWPerfPktData->ui32StreamInfo));
+
+	ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex,
+											psHWPerfPkt->ui64Timestamp);
+
+	PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceUfoEvent: ui32ExtJobRef=%d, "
+	        "ui32IntJobRef=%d", psHWPerfPktData->ui32ExtJobRef,
+	        psHWPerfPktData->ui32IntJobRef));
+
+	_GpuTraceUfo(ui64Timestamp, psHWPerfPktData->eEvType,
+	             psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32ExtJobRef,
+	             psHWPerfPktData->ui32IntJobRef, ui32UFOCount, puData);
+}
+
+static void _GpuTraceFirmwareEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+		RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt, const IMG_CHAR* pszWorkName,
+		PVR_GPUTRACE_SWITCH_TYPE eSwType)
+
+{
+	uint64_t ui64Timestamp;
+	RGX_HWPERF_FW_DATA *psHWPerfPktData = (RGX_HWPERF_FW_DATA *)
+		RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt);
+
+	ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex,
+											psHWPerfPkt->ui64Timestamp);
+
+	_GpuTraceFirmware(ui64Timestamp, pszWorkName, eSwType);
+}
+
+static IMG_BOOL ValidAndEmitFTraceEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+		RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt)
+{
+	RGX_HWPERF_EVENT_TYPE eType;
+	RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData;
+	IMG_UINT32 ui32HwEventTypeIndex;
+	static const struct {
+		IMG_CHAR* pszName;
+		PVR_GPUTRACE_SWITCH_TYPE eSwType;
+	} aszHwEventTypeMap[] = {
+#define _T(T) PVR_GPUTRACE_SWITCH_TYPE_##T
+		{ "BG",             _T(BEGIN)  }, /* RGX_HWPERF_FW_BGSTART */
+		{ "BG",             _T(END)    }, /* RGX_HWPERF_FW_BGEND */
+		{ "IRQ",            _T(BEGIN)  }, /* RGX_HWPERF_FW_IRQSTART */
+		{ "IRQ",            _T(END)    }, /* RGX_HWPERF_FW_IRQEND */
+		{ "DBG",            _T(BEGIN)  }, /* RGX_HWPERF_FW_DBGSTART */
+		{ "DBG",            _T(END)    }, /* RGX_HWPERF_FW_DBGEND */
+		{ "PMOOM_TAPAUSE",  _T(END)    }, /* RGX_HWPERF_HW_PMOOM_TAPAUSE */
+		{ "TA",             _T(BEGIN)  }, /* RGX_HWPERF_HW_TAKICK */
+		{ "TA",             _T(END)    }, /* RGX_HWPERF_HW_TAFINISHED */
+		{ "TQ3D",           _T(BEGIN)  }, /* RGX_HWPERF_HW_3DTQKICK */
+		{ "3D",             _T(BEGIN)  }, /* RGX_HWPERF_HW_3DKICK */
+		{ "3D",             _T(END)    }, /* RGX_HWPERF_HW_3DFINISHED */
+		{ "CDM",            _T(BEGIN)  }, /* RGX_HWPERF_HW_CDMKICK */
+		{ "CDM",            _T(END)    }, /* RGX_HWPERF_HW_CDMFINISHED */
+		{ "TQ2D",           _T(BEGIN)  }, /* RGX_HWPERF_HW_TLAKICK */
+		{ "TQ2D",           _T(END)    }, /* RGX_HWPERF_HW_TLAFINISHED */
+		{ "3DSPM",          _T(BEGIN)  }, /* RGX_HWPERF_HW_3DSPMKICK */
+		{ NULL,             0          }, /* RGX_HWPERF_HW_PERIODIC (unsupported) */
+		{ "RTU",            _T(BEGIN)  }, /* RGX_HWPERF_HW_RTUKICK */
+		{ "RTU",            _T(END)    }, /* RGX_HWPERF_HW_RTUFINISHED */
+		{ "SHG",            _T(BEGIN)  }, /* RGX_HWPERF_HW_SHGKICK */
+		{ "SHG",            _T(END)    }, /* RGX_HWPERF_HW_SHGFINISHED */
+		{ "TQ3D",           _T(END)    }, /* RGX_HWPERF_HW_3DTQFINISHED */
+		{ "3DSPM",          _T(END)    }, /* RGX_HWPERF_HW_3DSPMFINISHED */
+		{ "PMOOM_TARESUME", _T(BEGIN)  }, /* RGX_HWPERF_HW_PMOOM_TARESUME */
+		{ "TDM",            _T(BEGIN)  }, /* RGX_HWPERF_HW_TDMKICK */
+		{ "TDM",            _T(END)    }, /* RGX_HWPERF_HW_TDMFINISHED */
+		{ "NULL",           _T(SINGLE) }, /* RGX_HWPERF_HW_NULLKICK */
+#undef _T
+	};
+	static_assert(RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE == RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE + 1,
+				  "FW and HW events are not contiguous in RGX_HWPERF_EVENT_TYPE");
+
+	PVR_ASSERT(psHWPerfPkt);
+	eType = RGX_HWPERF_GET_TYPE(psHWPerfPkt);
+
+	if (psFtraceData->ui32FTraceLastOrdinal != psHWPerfPkt->ui32Ordinal - 1)
+	{
+		RGX_HWPERF_STREAM_ID eStreamId = RGX_HWPERF_GET_STREAM_ID(psHWPerfPkt);
+		_GpuTraceEventsLost(eStreamId,
+		                    psFtraceData->ui32FTraceLastOrdinal,
+		                    psHWPerfPkt->ui32Ordinal);
+		PVR_DPF((PVR_DBG_ERROR, "FTrace events lost (stream_id = %u, ordinal: last = %u, current = %u)",
+		         eStreamId, psFtraceData->ui32FTraceLastOrdinal, psHWPerfPkt->ui32Ordinal));
+	}
+
+	psFtraceData->ui32FTraceLastOrdinal = psHWPerfPkt->ui32Ordinal;
+
+	/* Process UFO packets */
+	if (eType == RGX_HWPERF_UFO)
+	{
+		_GpuTraceUfoEvent(psDevInfo, psHWPerfPkt);
+		return IMG_TRUE;
+	}
+
+	if (eType <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE)
+	{
+		/* this ID belongs to range 0, so index directly in range 0 */
+		ui32HwEventTypeIndex = eType - RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE;
+	}
+	else
+	{
+		/* this ID belongs to range 1, so first index in range 1 and skip number of slots used up for range 0 */
+		ui32HwEventTypeIndex = (eType - RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE) +
+		                       (RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE - RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE + 1);
+	}
+
+	if (ui32HwEventTypeIndex >= ARRAY_SIZE(aszHwEventTypeMap))
+		goto err_unsupported;
+
+	if (aszHwEventTypeMap[ui32HwEventTypeIndex].pszName == NULL)
+	{
+		/* Not supported map entry, ignore event */
+		goto err_unsupported;
+	}
+
+	if (HWPERF_PACKET_IS_HW_TYPE(eType))
+	{
+		if (aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType == PVR_GPUTRACE_SWITCH_TYPE_SINGLE)
+		{
+			_GpuTraceSwitchEvent(psDevInfo, psHWPerfPkt,
+			                     aszHwEventTypeMap[ui32HwEventTypeIndex].pszName,
+			                     PVR_GPUTRACE_SWITCH_TYPE_BEGIN);
+			_GpuTraceSwitchEvent(psDevInfo, psHWPerfPkt,
+			                     aszHwEventTypeMap[ui32HwEventTypeIndex].pszName,
+			                     PVR_GPUTRACE_SWITCH_TYPE_END);
+		}
+		else
+		{
+			_GpuTraceSwitchEvent(psDevInfo, psHWPerfPkt,
+			                     aszHwEventTypeMap[ui32HwEventTypeIndex].pszName,
+			                     aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType);
+		}
+	}
+	else if (HWPERF_PACKET_IS_FW_TYPE(eType))
+	{
+		_GpuTraceFirmwareEvent(psDevInfo, psHWPerfPkt,
+										aszHwEventTypeMap[ui32HwEventTypeIndex].pszName,
+										aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType);
+	}
+	else
+	{
+		goto err_unsupported;
+	}
+
+	return IMG_TRUE;
+
+err_unsupported:
+	PVR_DPF((PVR_DBG_VERBOSE, "%s: Unsupported event type %d", __func__, eType));
+	return IMG_FALSE;
+}
+
+
+static void _GpuTraceProcessPackets(PVRSRV_RGXDEV_INFO *psDevInfo,
+		IMG_PBYTE pBuffer, IMG_UINT32 ui32ReadLen)
+{
+	IMG_UINT32			ui32TlPackets = 0;
+	IMG_UINT32			ui32HWPerfPackets = 0;
+	IMG_UINT32			ui32HWPerfPacketsSent = 0;
+	IMG_PBYTE			pBufferEnd;
+	PVRSRVTL_PPACKETHDR psHDRptr;
+	PVRSRVTL_PACKETTYPE ui16TlType;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psDevInfo);
+	PVR_ASSERT(pBuffer);
+	PVR_ASSERT(ui32ReadLen);
+
+	/* Process the TL Packets
+	 */
+	pBufferEnd = pBuffer+ui32ReadLen;
+	psHDRptr = GET_PACKET_HDR(pBuffer);
+	while ( psHDRptr < (PVRSRVTL_PPACKETHDR)pBufferEnd )
+	{
+		ui16TlType = GET_PACKET_TYPE(psHDRptr);
+		if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA)
+		{
+			IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr);
+			if (0 == ui16DataLen)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "_GpuTraceProcessPackets: ZERO Data in TL data packet: %p", psHDRptr));
+			}
+			else
+			{
+				RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt;
+				RGX_HWPERF_V2_PACKET_HDR* psHWPerfEnd;
+
+				/* Check for lost hwperf data packets */
+				psHWPerfEnd = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr)+ui16DataLen);
+				psHWPerfPkt = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr));
+				do
+				{
+					if (ValidAndEmitFTraceEvent(psDevInfo, psHWPerfPkt))
+					{
+						ui32HWPerfPacketsSent++;
+					}
+					ui32HWPerfPackets++;
+					psHWPerfPkt = RGX_HWPERF_GET_NEXT_PACKET(psHWPerfPkt);
+				}
+				while (psHWPerfPkt < psHWPerfEnd);
+			}
+		}
+		else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "_GpuTraceProcessPackets: Indication that the transport buffer was full"));
+		}
+		else
+		{
+			/* else Ignore padding packet type and others */
+			PVR_DPF((PVR_DBG_MESSAGE, "_GpuTraceProcessPackets: Ignoring TL packet, type %d", ui16TlType ));
+		}
+
+		psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr);
+		ui32TlPackets++;
+	}
+
+	PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceProcessPackets: TL "
+	 		"Packets processed %03d, HWPerf packets %03d, sent %03d",
+	 		ui32TlPackets, ui32HWPerfPackets, ui32HWPerfPacketsSent));
+
+	PVR_DPF_RETURN;
+}
+
+
+static void _GpuTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+	PVRSRV_RGXDEV_INFO* psDeviceInfo = hCmdCompHandle;
+	RGX_HWPERF_FTRACE_DATA* psFtraceData;
+	PVRSRV_ERROR		eError;
+	IMG_PBYTE			pBuffer;
+	IMG_UINT32			ui32ReadLen;
+	IMG_BOOL 			bFTraceLockAcquired = IMG_FALSE;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psDeviceInfo != NULL);
+
+	psFtraceData = psDeviceInfo->pvGpuFtraceData;
+
+	/* Command-complete notifiers can run concurrently. If this is
+	 * happening, just bail out and let the previous call finish.
+	 * This is ok because we can process the queued packets on the next call.
+	 */
+	bFTraceLockAcquired = OSTryLockAcquire(psFtraceData->hFTraceResourceLock);
+	if (IMG_FALSE == bFTraceLockAcquired)
+	{
+		PVR_DPF_RETURN;
+	}
+
+	/* If this notifier is called, it means the TL resources will be valid at-least
+	 * until the end of this call, since the DeInit function will wait on the hFTraceResourceLock
+	 * to clean-up the TL resources and un-register the notifier, so just assert here.
+	 */
+	PVR_ASSERT(psFtraceData->hGPUTraceTLStream);
+
+	/* If we have a valid stream attempt to acquire some data */
+	eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream, &pBuffer, &ui32ReadLen);
+	if (eError == PVRSRV_OK)
+	{
+		/* Process the HWPerf packets and release the data */
+		if (ui32ReadLen > 0)
+		{
+			PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceCmdCompleteNotify: DATA AVAILABLE offset=%p, length=%d", pBuffer, ui32ReadLen));
+
+			/* Process the transport layer data for HWPerf packets... */
+			_GpuTraceProcessPackets(psDeviceInfo, pBuffer, ui32ReadLen);
+
+			eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_LOG_ERROR(eError, "TLClientReleaseData");
+
+				/* Serious error, disable FTrace GPU events */
+
+				/* Release TraceLock so we always have the locking
+				 * order BridgeLock->TraceLock to prevent AB-BA deadlocks*/
+				OSLockRelease(psFtraceData->hFTraceResourceLock);
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+				OSAcquireBridgeLock();
+#endif
+				OSLockAcquire(psFtraceData->hFTraceResourceLock);
+				_GpuTraceDisable(psDeviceInfo, IMG_FALSE);
+				OSLockRelease(psFtraceData->hFTraceResourceLock);
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+				OSReleaseBridgeLock();
+#endif
+				goto out;
+
+			}
+		} /* else no data, ignore */
+	}
+	else if (eError != PVRSRV_ERROR_TIMEOUT)
+	{
+		PVR_LOG_ERROR(eError, "TLClientAcquireData");
+	}
+	if (bFTraceLockAcquired)
+	{
+		OSLockRelease(psFtraceData->hFTraceResourceLock);
+	}
+out:
+	PVR_DPF_RETURN;
+}
+
+/* ----- AppHint interface -------------------------------------------------- */
+
+static PVRSRV_ERROR _GpuTraceIsEnabledCallback(
+	const PVRSRV_DEVICE_NODE *device,
+	const void *private_data,
+	IMG_BOOL *value)
+{
+	PVR_UNREFERENCED_PARAMETER(device);
+	PVR_UNREFERENCED_PARAMETER(private_data);
+
+	*value = gbFTraceGPUEventsEnabled;
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _GpuTraceSetEnabledCallback(
+	const PVRSRV_DEVICE_NODE *device,
+	const void *private_data,
+	IMG_BOOL value)
+{
+	PVR_UNREFERENCED_PARAMETER(device);
+
+	/* Lock down the state to avoid concurrent writes */
+	OSLockAcquire(ghGPUTraceStateLock);
+
+	if (value != gbFTraceGPUEventsEnabled)
+	{
+		PVRSRV_ERROR eError;
+		if ((eError = _GpuTraceSetEnabledForAllDevices(value)) == PVRSRV_OK)
+		{
+			PVR_TRACE(("%s GPU FTrace", value ? "ENABLED" : "DISABLED"));
+			gbFTraceGPUEventsEnabled = value;
+		}
+		else
+		{
+			PVR_TRACE(("FAILED to %s GPU FTrace", value ? "enable" : "disable"));
+			/* On failure, partial enable/disable might have resulted.
+			 * Try best to restore to previous state. Ignore error */
+			_GpuTraceSetEnabledForAllDevices(gbFTraceGPUEventsEnabled);
+
+			OSLockRelease(ghGPUTraceStateLock);
+			return eError;
+		}
+	}
+	else
+	{
+		PVR_TRACE(("GPU FTrace already %s!", value ? "enabled" : "disabled"));
+	}
+
+	OSLockRelease(ghGPUTraceStateLock);
+
+	return PVRSRV_OK;
+}
+
+void PVRGpuTraceInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFTraceGPU,
+	                                  _GpuTraceIsEnabledCallback,
+	                                  _GpuTraceSetEnabledCallback,
+	                                  psDeviceNode, NULL);
+}
+
+/* ----- FTrace event callbacks --------------------------------------------- */
+
+void PVRGpuTraceEnableUfoCallback(void)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList;
+#if defined(SUPPORT_RGX)
+	PVRSRV_RGXDEV_INFO *psRgxDevInfo;
+	PVRSRV_ERROR eError;
+#endif
+
+	/* Lock down events state, for consistent value of guiUfoEventRef */
+	OSLockAcquire(ghLockFTraceEventLock);
+	if (guiUfoEventRef++ == 0)
+	{
+		/* make sure UFO events are enabled on all rogue devices */
+		while (psDeviceNode)
+		{
+#if defined(SUPPORT_RGX)
+			IMG_UINT64 ui64Filter;
+
+			psRgxDevInfo = psDeviceNode->pvDevice;
+			ui64Filter = RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO) |
+							psRgxDevInfo->ui64HWPerfFilter;
+			/* Small chance exists that ui64HWPerfFilter can be changed here and
+			 * the newest filter value will be changed to the old one + UFO event.
+			 * This is not a critical problem. */
+			eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW,
+											IMG_FALSE, ui64Filter);
+			if (eError == PVRSRV_ERROR_NOT_INITIALISED)
+			{
+				/* If we land here that means that the FW is not initialised yet.
+				 * We stored the filter and it will be passed to the firmware
+				 * during its initialisation phase. So ignore. */
+			}
+			else if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "Could not enable UFO HWPerf events on device %d", psDeviceNode->sDevId.i32UMIdentifier));
+			}
+#endif
+			psDeviceNode = psDeviceNode->psNext;
+		}
+	}
+	OSLockRelease(ghLockFTraceEventLock);
+}
+
+void PVRGpuTraceDisableUfoCallback(void)
+{
+#if defined(SUPPORT_RGX)
+	PVRSRV_ERROR eError;
+#endif
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+
+	/* We have to check if lock is valid because on driver unload
+	 * PVRGpuTraceSupportDeInit is called before kernel disables the ftrace
+	 * events. This means that the lock will be destroyed before this callback
+	 * is called.
+	 * We can safely return if that situation happens because driver will be
+	 * unloaded so we don't care about HWPerf state anymore. */
+	if (ghLockFTraceEventLock == NULL)
+		return;
+
+	psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList;
+
+	/* Lock down events state, for consistent value of guiUfoEventRef */
+	OSLockAcquire(ghLockFTraceEventLock);
+	if (--guiUfoEventRef == 0)
+	{
+		/* make sure UFO events are disabled on all rogue devices */
+		while (psDeviceNode)
+		{
+#if defined(SUPPORT_RGX)
+			IMG_UINT64 ui64Filter;
+			PVRSRV_RGXDEV_INFO *psRgxDevInfo = psDeviceNode->pvDevice;
+
+			ui64Filter = ~(RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO)) &
+					psRgxDevInfo->ui64HWPerfFilter;
+			/* Small chance exists that ui64HWPerfFilter can be changed here and
+			 * the newest filter value will be changed to the old one + UFO event.
+			 * This is not a critical problem. */
+			eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW,
+			                               IMG_FALSE, ui64Filter);
+			if (eError == PVRSRV_ERROR_NOT_INITIALISED)
+			{
+				/* If we land here that means that the FW is not initialised yet.
+				 * We stored the filter and it will be passed to the firmware
+				 * during its initialisation phase. So ignore. */
+			}
+			else if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "Could not disable UFO HWPerf events on device %d",
+				        psDeviceNode->sDevId.i32UMIdentifier));
+			}
+#endif
+			psDeviceNode = psDeviceNode->psNext;
+		}
+	}
+	OSLockRelease(ghLockFTraceEventLock);
+}
+
+void PVRGpuTraceEnableFirmwareActivityCallback(void)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList;
+#if defined(SUPPORT_RGX)
+	PVRSRV_RGXDEV_INFO *psRgxDevInfo;
+	uint64_t ui64Filter, ui64FWEventsFilter = 0;
+	int i;
+
+	for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE;
+		 i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE; i++)
+	{
+		ui64FWEventsFilter |= RGX_HWPERF_EVENT_MASK_VALUE(i);
+	}
+#endif
+	OSLockAcquire(ghLockFTraceEventLock);
+	/* Enable all FW events on all the devices */
+	while (psDeviceNode)
+	{
+#if defined(SUPPORT_RGX)
+		PVRSRV_ERROR eError;
+		psRgxDevInfo = psDeviceNode->pvDevice;
+		ui64Filter = psRgxDevInfo->ui64HWPerfFilter | ui64FWEventsFilter;
+
+		eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW,
+		                               IMG_FALSE, ui64Filter);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Could not enable HWPerf event for firmware"
+			        " task timings (%s).", PVRSRVGetErrorString(eError)));
+		}
+#endif
+		psDeviceNode = psDeviceNode->psNext;
+	}
+	OSLockRelease(ghLockFTraceEventLock);
+}
+
+void PVRGpuTraceDisableFirmwareActivityCallback(void)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+#if defined(SUPPORT_RGX)
+	IMG_UINT64 ui64FWEventsFilter = ~0;
+	int i;
+#endif
+
+	/* We have to check if lock is valid because on driver unload
+	 * PVRGpuTraceSupportDeInit is called before kernel disables the ftrace
+	 * events. This means that the lock will be destroyed before this callback
+	 * is called.
+	 * We can safely return if that situation happens because driver will be
+	 * unloaded so we don't care about HWPerf state anymore. */
+	if (ghLockFTraceEventLock == NULL)
+		return;
+
+	psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList;
+
+#if defined(SUPPORT_RGX)
+	for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE;
+		 i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE; i++)
+	{
+		ui64FWEventsFilter &= ~RGX_HWPERF_EVENT_MASK_VALUE(i);
+	}
+#endif
+
+	OSLockAcquire(ghLockFTraceEventLock);
+
+	/* Disable all FW events on all the devices */
+	while (psDeviceNode)
+	{
+#if defined(SUPPORT_RGX)
+		PVRSRV_RGXDEV_INFO *psRgxDevInfo = psDeviceNode->pvDevice;
+		IMG_UINT64 ui64Filter = psRgxDevInfo->ui64HWPerfFilter & ui64FWEventsFilter;
+
+		if (PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW,
+		                          IMG_FALSE, ui64Filter) != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Could not disable HWPerf event for firmware task timings."));
+		}
+#endif
+		psDeviceNode = psDeviceNode->psNext;
+	}
+
+	OSLockRelease(ghLockFTraceEventLock);
+}
+
+/******************************************************************************
+ End of file (pvr_gputrace.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_intrinsics.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_intrinsics.h
new file mode 100644
index 0000000..ee7de67
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_intrinsics.h
@@ -0,0 +1,70 @@
+/*************************************************************************/ /*!
+@File
+@Title          Intrinsics definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVR_INTRINSICS_H_
+#define _PVR_INTRINSICS_H_
+
+/* PVR_CTZLL:
+ * Count the number of trailing zeroes in a long long integer
+ */
+
+#if defined(__GNUC__)
+#if defined(__x86_64__)
+
+	#define PVR_CTZLL __builtin_ctzll
+#endif
+#endif
+
+/* PVR_CLZLL:
+ * Count the number of leading zeroes in a long long integer
+ */
+
+#if defined(__GNUC__)
+#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || \
+					defined(__arm__) || defined(__mips)
+
+#define PVR_CLZLL __builtin_clzll
+
+#endif
+#endif
+
+#endif /* _PVR_INTRINSICS_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_linux_fence.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_linux_fence.h
new file mode 100644
index 0000000..4b491b3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_linux_fence.h
@@ -0,0 +1,107 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR Linux fence compatibility header
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_LINUX_FENCE_H__)
+#define __PVR_LINUX_FENCE_H__
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) && \
+	(!defined(CHROMIUMOS_KERNEL) || \
+	 (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)))
+#include <linux/fence.h>
+#else
+#include <linux/dma-fence.h>
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) && \
+	(!defined(CHROMIUMOS_KERNEL) || \
+	 (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)))
+/* Structures */
+#define	dma_fence fence
+#define dma_fence_array fence_array
+#define	dma_fence_cb fence_cb
+#define	dma_fence_ops fence_ops
+
+/* Defines and Enums */
+#define DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT FENCE_FLAG_ENABLE_SIGNAL_BIT
+#define DMA_FENCE_FLAG_SIGNALED_BIT FENCE_FLAG_SIGNALED_BIT
+#define DMA_FENCE_FLAG_USER_BITS FENCE_FLAG_USER_BITS
+
+#define DMA_FENCE_ERR FENCE_ERR
+#define	DMA_FENCE_TRACE FENCE_TRACE
+#define DMA_FENCE_WARN FENCE_WARN
+
+/* Functions */
+#define dma_fence_add_callback fence_add_callback
+#define dma_fence_context_alloc fence_context_alloc
+#define dma_fence_default_wait fence_default_wait
+#define dma_fence_is_signaled fence_is_signaled
+#define dma_fence_enable_sw_signaling fence_enable_sw_signaling
+#define dma_fence_free fence_free
+#define dma_fence_get fence_get
+#define dma_fence_get_rcu fence_get_rcu
+#define dma_fence_init fence_init
+#define dma_fence_is_array fence_is_array
+#define dma_fence_put fence_put
+#define dma_fence_signal fence_signal
+#define dma_fence_wait fence_wait
+#define to_dma_fence_array to_fence_array
+
+static inline signed long
+dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
+{
+	signed long lret;
+
+	lret = fence_wait_timeout(fence, intr, timeout);
+	if (lret || timeout)
+		return lret;
+
+	return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ? 1 : 0;
+}
+
+#endif
+
+#endif /* !defined(__PVR_LINUX_FENCE_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_notifier.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_notifier.c
new file mode 100644
index 0000000..9eb0141
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_notifier.c
@@ -0,0 +1,508 @@
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR notifier interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "allocmem.h"
+#include "dllist.h"
+
+#include "device.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "pvrversion.h"
+#include "connection_server.h"
+
+#include "osfunc.h"
+#include "sofunc_pvr.h"
+
+/*************************************************************************/ /*!
+Command Complete Notifier Interface
+*/ /**************************************************************************/
+
+typedef struct PVRSRV_CMDCOMP_NOTIFY_TAG
+{
+	PVRSRV_CMDCOMP_HANDLE	hCmdCompHandle;
+	PFN_CMDCOMP_NOTIFY		pfnCmdCompleteNotify;
+	DLLIST_NODE				sListNode;
+} PVRSRV_CMDCOMP_NOTIFY;
+
+/* Head of the list of callbacks called when command complete happens */
+static DLLIST_NODE g_sCmdCompNotifyHead;
+static POSWR_LOCK g_hCmdCompNotifyLock;
+
+PVRSRV_ERROR
+PVRSRVCmdCompleteInit(void)
+{
+	PVRSRV_ERROR eError;
+
+	eError = OSWRLockCreate(&g_hCmdCompNotifyLock);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	dllist_init(&g_sCmdCompNotifyHead);
+
+	return PVRSRV_OK;
+}
+
+void
+PVRSRVCmdCompleteDeinit(void)
+{
+	/* Check that all notify function have been unregistered */
+	if (!dllist_is_empty(&g_sCmdCompNotifyHead))
+	{
+		PDLLIST_NODE psNode;
+
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Command complete notify list is not empty!", __func__));
+
+		/* Clean up any stragglers */
+		psNode = dllist_get_next_node(&g_sCmdCompNotifyHead);
+		while (psNode)
+		{
+			PVRSRV_CMDCOMP_NOTIFY *psNotify;
+
+			dllist_remove_node(psNode);
+
+			psNotify = IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode);
+			OSFreeMem(psNotify);
+
+			psNode = dllist_get_next_node(&g_sCmdCompNotifyHead);
+		}
+	}
+
+	if (g_hCmdCompNotifyLock)
+	{
+		OSWRLockDestroy(g_hCmdCompNotifyLock);
+	}
+}
+
+PVRSRV_ERROR
+PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify,
+								PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify,
+								PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+	PVRSRV_CMDCOMP_NOTIFY *psNotify;
+
+	if (!phNotify || !pfnCmdCompleteNotify || !hCmdCompHandle)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Bad arguments (%p, %p, %p)",
+				 __func__, phNotify, pfnCmdCompleteNotify, hCmdCompHandle));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psNotify = OSAllocMem(sizeof(*psNotify));
+	if (!psNotify)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Not enough memory to allocate CmdCompleteNotify function",
+				 __func__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* Set-up the notify data */
+	psNotify->hCmdCompHandle = hCmdCompHandle;
+	psNotify->pfnCmdCompleteNotify = pfnCmdCompleteNotify;
+
+	/* Add it to the list of Notify functions */
+	OSWRLockAcquireWrite(g_hCmdCompNotifyLock);
+	dllist_add_to_tail(&g_sCmdCompNotifyHead, &psNotify->sListNode);
+	OSWRLockReleaseWrite(g_hCmdCompNotifyLock);
+
+	*phNotify = psNotify;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify)
+{
+	PVRSRV_CMDCOMP_NOTIFY *psNotify;
+
+	psNotify = (PVRSRV_CMDCOMP_NOTIFY *) hNotify;
+	if (!psNotify)
+	{
+		PVR_DPF((PVR_DBG_ERROR," %s: Bad arguments (%p)", __func__, hNotify));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	OSWRLockAcquireWrite(g_hCmdCompNotifyLock);
+	dllist_remove_node(&psNotify->sListNode);
+	OSWRLockReleaseWrite(g_hCmdCompNotifyLock);
+
+	OSFreeMem(psNotify);
+
+	return PVRSRV_OK;
+}
+
+void
+PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+#if !defined(NO_HARDWARE)
+	DLLIST_NODE *psNode, *psNext;
+#endif
+
+	/* Call notify callbacks to check if blocked work items can now proceed */
+#if !defined(NO_HARDWARE)
+	OSWRLockAcquireRead(g_hCmdCompNotifyLock);
+	dllist_foreach_node(&g_sCmdCompNotifyHead, psNode, psNext)
+	{
+		PVRSRV_CMDCOMP_NOTIFY *psNotify =
+			IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode);
+
+		if (hCmdCompCallerHandle != psNotify->hCmdCompHandle)
+		{
+			psNotify->pfnCmdCompleteNotify(psNotify->hCmdCompHandle);
+		}
+	}
+	OSWRLockReleaseRead(g_hCmdCompNotifyLock);
+#endif
+
+	if (psPVRSRVData->hGlobalEventObject)
+	{
+		OSEventObjectSignal(psPVRSRVData->hGlobalEventObject);
+	}
+}
+
+/*************************************************************************/ /*!
+Debug Notifier Interface
+*/ /**************************************************************************/
+
+typedef struct DEBUG_REQUEST_ENTRY_TAG
+{
+	IMG_UINT32		ui32RequesterID;
+	DLLIST_NODE		sListHead;
+} DEBUG_REQUEST_ENTRY;
+
+typedef struct DEBUG_REQUEST_TABLE_TAG
+{
+	POSWR_LOCK				hLock;
+	IMG_UINT32				ui32RequestCount;
+	DEBUG_REQUEST_ENTRY		asEntry[1];
+} DEBUG_REQUEST_TABLE;
+
+typedef struct DEBUG_REQUEST_NOTIFY_TAG
+{
+	PVRSRV_DEVICE_NODE		*psDevNode;
+	PVRSRV_DBGREQ_HANDLE	hDbgRequestHandle;
+	PFN_DBGREQ_NOTIFY		pfnDbgRequestNotify;
+	IMG_UINT32				ui32RequesterID;
+	DLLIST_NODE				sListNode;
+} DEBUG_REQUEST_NOTIFY;
+
+
+PVRSRV_ERROR
+PVRSRVRegisterDbgTable(PVRSRV_DEVICE_NODE *psDevNode,
+                       const IMG_UINT32 *paui32Table, IMG_UINT32 ui32Length)
+{
+	DEBUG_REQUEST_TABLE *psDebugTable;
+	IMG_UINT32 i;
+	PVRSRV_ERROR eError;
+
+	if (psDevNode->hDebugTable)
+	{
+		return PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED;
+	}
+
+	psDebugTable = OSAllocMem(sizeof(DEBUG_REQUEST_TABLE) +
+							  (sizeof(DEBUG_REQUEST_ENTRY) * (ui32Length-1)));
+	if (!psDebugTable)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	eError = OSWRLockCreate(&psDebugTable->hLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorFreeDebugTable;
+	}
+
+	psDebugTable->ui32RequestCount = ui32Length;
+
+	/* Init the list heads */
+	for (i = 0; i < ui32Length; i++)
+	{
+		psDebugTable->asEntry[i].ui32RequesterID = paui32Table[i];
+		dllist_init(&psDebugTable->asEntry[i].sListHead);
+	}
+
+	psDevNode->hDebugTable = (IMG_HANDLE *) psDebugTable;
+
+	return PVRSRV_OK;
+
+ErrorFreeDebugTable:
+	OSFreeMem(psDebugTable);
+
+	return eError;
+}
+
+void
+PVRSRVUnregisterDbgTable(PVRSRV_DEVICE_NODE *psDevNode)
+{
+	DEBUG_REQUEST_TABLE *psDebugTable;
+	IMG_UINT32 i;
+
+	PVR_ASSERT(psDevNode->hDebugTable);
+	psDebugTable = (DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable;
+	psDevNode->hDebugTable = NULL;
+
+	for (i = 0; i < psDebugTable->ui32RequestCount; i++)
+	{
+		if (!dllist_is_empty(&psDebugTable->asEntry[i].sListHead))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Found registered callback(s) on %d",
+					 __func__, i));
+		}
+	}
+
+	OSWRLockDestroy(psDebugTable->hLock);
+	psDebugTable->hLock = NULL;
+
+	OSFreeMem(psDebugTable);
+}
+
+PVRSRV_ERROR
+PVRSRVRegisterDbgRequestNotify(IMG_HANDLE *phNotify,
+							   PVRSRV_DEVICE_NODE *psDevNode,
+							   PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+							   IMG_UINT32 ui32RequesterID,
+							   PVRSRV_DBGREQ_HANDLE hDbgRequestHandle)
+{
+	DEBUG_REQUEST_TABLE *psDebugTable;
+	DEBUG_REQUEST_NOTIFY *psNotify;
+	PDLLIST_NODE psHead = NULL;
+	IMG_UINT32 i;
+	PVRSRV_ERROR eError;
+
+	if (!phNotify || !psDevNode || !pfnDbgRequestNotify)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Bad arguments (%p, %p, %p)",
+				 __func__, phNotify, psDevNode, pfnDbgRequestNotify));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDebugTable = (DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable;
+
+	PVR_ASSERT(psDebugTable);
+
+	/* NoStats used since this may be called outside of the register/de-register
+	 * process calls which track memory use. */
+	psNotify = OSAllocMemNoStats(sizeof(*psNotify));
+	if (!psNotify)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Not enough memory to allocate DbgRequestNotify structure",
+				 __func__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* Set-up the notify data */
+	psNotify->psDevNode = psDevNode;
+	psNotify->hDbgRequestHandle = hDbgRequestHandle;
+	psNotify->pfnDbgRequestNotify = pfnDbgRequestNotify;
+	psNotify->ui32RequesterID = ui32RequesterID;
+
+	/* Lock down all the lists */
+	OSWRLockAcquireWrite(psDebugTable->hLock);
+
+	/* Find which list to add it to */
+	for (i = 0; i < psDebugTable->ui32RequestCount; i++)
+	{
+		if (psDebugTable->asEntry[i].ui32RequesterID == ui32RequesterID)
+		{
+			psHead = &psDebugTable->asEntry[i].sListHead;
+		}
+	}
+
+	if (!psHead)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to find debug requester", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ErrorReleaseLock;
+	}
+
+	/* Add it to the list of Notify functions */
+	dllist_add_to_tail(psHead, &psNotify->sListNode);
+
+	/* Unlock the lists */
+	OSWRLockReleaseWrite(psDebugTable->hLock);
+
+	*phNotify = psNotify;
+
+	return PVRSRV_OK;
+
+ErrorReleaseLock:
+	OSWRLockReleaseWrite(psDebugTable->hLock);
+	OSFreeMem(psNotify);
+
+	return eError;
+}
+
+PVRSRV_ERROR
+SOPvrDbgRequestNotifyRegister(IMG_HANDLE *phNotify,
+							  PVRSRV_DEVICE_NODE *psDevNode,
+							  PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+							  IMG_UINT32 ui32RequesterID,
+							  PVRSRV_DBGREQ_HANDLE hDbgRequestHandle)
+{
+	return PVRSRVRegisterDbgRequestNotify(phNotify,
+			psDevNode,
+			pfnDbgRequestNotify,
+			ui32RequesterID,
+			hDbgRequestHandle);
+}
+
+PVRSRV_ERROR
+PVRSRVUnregisterDbgRequestNotify(IMG_HANDLE hNotify)
+{
+	DEBUG_REQUEST_NOTIFY *psNotify = (DEBUG_REQUEST_NOTIFY *) hNotify;
+	DEBUG_REQUEST_TABLE *psDebugTable;
+
+	if (!psNotify)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Bad arguments (%p)", __func__, hNotify));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDebugTable = (DEBUG_REQUEST_TABLE *) psNotify->psDevNode->hDebugTable;
+
+	OSWRLockAcquireWrite(psDebugTable->hLock);
+	dllist_remove_node(&psNotify->sListNode);
+	OSWRLockReleaseWrite(psDebugTable->hLock);
+
+	OSFreeMemNoStats(psNotify);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+SOPvrDbgRequestNotifyUnregister(IMG_HANDLE hNotify)
+{
+	return PVRSRVUnregisterDbgRequestNotify(hNotify);
+}
+
+void
+PVRSRVDebugRequest(PVRSRV_DEVICE_NODE *psDevNode,
+				   IMG_UINT32 ui32VerbLevel,
+				   DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				   void *pvDumpDebugFile)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	DEBUG_REQUEST_TABLE *psDebugTable =
+		(DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable;
+	static const IMG_CHAR *apszVerbosityTable[] = { "Low", "Medium", "High" };
+	const IMG_CHAR *szVerbosityLevel;
+	IMG_UINT32 i;
+
+	static_assert(ARRAY_SIZE(apszVerbosityTable) == DEBUG_REQUEST_VERBOSITY_MAX+1,
+	              "Incorrect number of verbosity levels");
+
+	PVR_ASSERT(psDebugTable);
+
+	OSWRLockAcquireRead(psDebugTable->hLock);
+
+	if (ui32VerbLevel < ARRAY_SIZE(apszVerbosityTable))
+	{
+		szVerbosityLevel = apszVerbosityTable[ui32VerbLevel];
+	}
+	else
+	{
+		szVerbosityLevel = "unknown";
+		PVR_ASSERT(!"Invalid verbosity level received");
+	}
+
+	PVR_DUMPDEBUG_LOG("------------[ PVR DBG: START (%s) ]------------",
+			szVerbosityLevel);
+
+	OSDumpVersionInfo(pfnDumpDebugPrintf, pvDumpDebugFile);
+
+	PVR_DUMPDEBUG_LOG("DDK info: %s (%s) %s",
+					   PVRVERSION_STRING, PVR_BUILD_TYPE, PVR_BUILD_DIR);
+
+	PVR_DUMPDEBUG_LOG("Time now: %" IMG_UINT64_FMTSPEC "us", \
+			OSClockus64());
+
+	switch (psPVRSRVData->eServicesState)
+	{
+		case PVRSRV_SERVICES_STATE_OK:
+			PVR_DUMPDEBUG_LOG("Services State: OK");
+			break;
+		case PVRSRV_SERVICES_STATE_BAD:
+			PVR_DUMPDEBUG_LOG("Services State: BAD");
+			break;
+		case PVRSRV_SERVICES_STATE_UNDEFINED:
+			PVR_DUMPDEBUG_LOG("Services State: UNDEFINED");
+			break;
+		default:
+			PVR_DUMPDEBUG_LOG("Services State: UNKNOWN (%d)",
+							   psPVRSRVData->eServicesState);
+			break;
+	}
+
+	PVRSRVConnectionDebugNotify(pfnDumpDebugPrintf, pvDumpDebugFile);
+
+	/* For each requester */
+	for (i = 0; i < psDebugTable->ui32RequestCount; i++)
+	{
+		DLLIST_NODE *psNode;
+		DLLIST_NODE *psNext;
+
+		/* For each notifier on this requestor */
+		dllist_foreach_node(&psDebugTable->asEntry[i].sListHead, psNode, psNext)
+		{
+			DEBUG_REQUEST_NOTIFY *psNotify =
+				IMG_CONTAINER_OF(psNode, DEBUG_REQUEST_NOTIFY, sListNode);
+			psNotify->pfnDbgRequestNotify(psNotify->hDbgRequestHandle, ui32VerbLevel,
+							pfnDumpDebugPrintf, pvDumpDebugFile);
+		}
+	}
+
+	PVR_DUMPDEBUG_LOG("------------[ PVR DBG: END ]------------");
+	OSWRLockReleaseRead(psDebugTable->hLock);
+
+	if (!pfnDumpDebugPrintf)
+	{
+		/* Only notify OS of an issue if the debug dump has gone there */
+		OSWarnOn(IMG_TRUE);
+	}
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_notifier.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_notifier.h
new file mode 100644
index 0000000..249f117
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_notifier.h
@@ -0,0 +1,250 @@
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR notifier interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_NOTIFIER_H__)
+#define __PVR_NOTIFIER_H__
+
+#include "img_types.h"
+#include "pvr_debug.h"
+
+
+/*************************************************************************/ /*!
+Command Complete Notifier Interface
+*/ /**************************************************************************/
+
+typedef IMG_HANDLE PVRSRV_CMDCOMP_HANDLE;
+#ifndef _CMDCOMPNOTIFY_PFN_
+typedef void (*PFN_CMDCOMP_NOTIFY)(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle);
+#define _CMDCOMPNOTIFY_PFN_
+#endif
+
+/*************************************************************************/ /*!
+@Function       PVRSRVCmdCompleteInit
+@Description    Performs initialisation of the command complete notifier
+                interface.
+@Return         PVRSRV_ERROR         PVRSRV_OK on success otherwise an error
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVCmdCompleteInit(void);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVCmdCompleteDeinit
+@Description    Performs cleanup for the command complete notifier interface.
+@Return         PVRSRV_ERROR         PVRSRV_OK on success otherwise an error
+*/ /**************************************************************************/
+void
+PVRSRVCmdCompleteDeinit(void);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVRegisterCmdCompleteNotify
+@Description    Register a callback function that is called when some device
+                finishes some work, which is signalled via a call to
+                PVRSRVCheckStatus.
+@Output         phNotify             On success, points to command complete
+                                     notifier handle
+@Input          pfnCmdCompleteNotify Function callback
+@Input          hPrivData            Data to be passed back to the caller via
+                                     the callback function
+@Return         PVRSRV_ERROR         PVRSRV_OK on success otherwise an error
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify,
+                                PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify,
+                                PVRSRV_CMDCOMP_HANDLE hPrivData);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVUnregisterCmdCompleteNotify
+@Description    Unregister a previously registered callback function.
+@Input          hNotify              Command complete notifier handle
+@Return         PVRSRV_ERROR         PVRSRV_OK on success otherwise an error
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVCheckStatus
+@Description    Notify any registered command complete handlers that some work
+                has been finished (unless hCmdCompCallerHandle matches a
+                handler's hPrivData). Also signal the global event object.
+@Input          hCmdCompCallerHandle Used to prevent a handler from being
+                                     notified. A NULL value results in all
+                                     handlers being notified.
+*/ /**************************************************************************/
+void
+PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle);
+
+
+/*************************************************************************/ /*!
+Debug Notifier Interface
+*/ /**************************************************************************/
+
+#define DEBUG_REQUEST_DC                0
+#define DEBUG_REQUEST_SERVERSYNC        1
+#define DEBUG_REQUEST_SYS               2
+#define DEBUG_REQUEST_ANDROIDSYNC       3
+#define DEBUG_REQUEST_LINUXFENCE        4
+#define DEBUG_REQUEST_SYNCCHECKPOINT    5
+#define DEBUG_REQUEST_HTB               6
+#define DEBUG_REQUEST_APPHINT           7
+#define DEBUG_REQUEST_FALLBACKSYNC      8
+
+#define DEBUG_REQUEST_VERBOSITY_LOW     0
+#define DEBUG_REQUEST_VERBOSITY_MEDIUM  1
+#define DEBUG_REQUEST_VERBOSITY_HIGH    2
+#define DEBUG_REQUEST_VERBOSITY_MAX     DEBUG_REQUEST_VERBOSITY_HIGH
+
+#define DD_VERB_LVL_ENABLED(_verbLvl, _verbLvlChk) ((_verbLvl) >= (_verbLvlChk))
+
+/*
+ * Macro used within debug dump functions to send output either to PVR_LOG or
+ * a custom function. The custom function should be stored as a function
+ * pointer in a local variable called 'pfnDumpDebugPrintf'. 'pvDumpDebugFile'
+ * is also required as a local variable to serve as a file identifier for the
+ * printf function if required.
+ */
+#define PVR_DUMPDEBUG_LOG(...)                                \
+	do                                                        \
+	{                                                         \
+		if (pfnDumpDebugPrintf)                               \
+			pfnDumpDebugPrintf(pvDumpDebugFile, __VA_ARGS__); \
+		else                                                  \
+			PVR_LOG((__VA_ARGS__));                           \
+	} while(0)
+
+struct _PVRSRV_DEVICE_NODE_;
+
+typedef IMG_HANDLE PVRSRV_DBGREQ_HANDLE;
+#ifndef _DBGNOTIFY_PFNS_
+typedef void (DUMPDEBUG_PRINTF_FUNC)(void *pvDumpDebugFile,
+					const IMG_CHAR *pszFormat, ...);
+typedef void (*PFN_DBGREQ_NOTIFY)(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+					IMG_UINT32 ui32VerbLevel,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile);
+#define _DBGNOTIFY_PFNS_
+#endif
+
+/*************************************************************************/ /*!
+@Function       PVRSRVRegisterDbgTable
+@Description    Registers a debug requester table for the given device. The
+                order in which the debug requester IDs appear in the given
+                table determine the order in which a set of notifier callbacks
+                will be called. In other words, the requester ID that appears
+                first will have all of its associated debug notifier callbacks
+                called first. This will then be followed by all the callbacks
+                associated with the next requester ID in the table and so on.
+@Input          psDevNode     Device node to register requester table with
+@Input          paui32Table   Array of requester IDs
+@Input          ui32Length    Number of elements in paui32Table
+@Return         PVRSRV_ERROR  PVRSRV_OK on success otherwise an error
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVRegisterDbgTable(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+                       const IMG_UINT32 *paui32Table, IMG_UINT32 ui32Length);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVUnregisterDbgTable
+@Description    Unregisters a debug requester table.
+@Input          psDevNode     Device node for which the requester table should
+                              be unregistered
+@Return         void
+*/ /**************************************************************************/
+void
+PVRSRVUnregisterDbgTable(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVRegisterDbgRequestNotify
+@Description    Register a callback function that is called when a debug request
+                is made via a call PVRSRVDebugRequest. There are a number of
+                verbosity levels ranging from DEBUG_REQUEST_VERBOSITY_LOW up to
+                DEBUG_REQUEST_VERBOSITY_MAX. The callback will be called once
+                for each level up to the highest level specified to
+                PVRSRVDebugRequest.
+@Output         phNotify             Points to debug notifier handle on success
+@Input          psDevNode            Device node for which the debug callback
+                                     should be registered
+@Input          pfnDbgRequestNotify  Function callback
+@Input          ui32RequesterID      Requester ID. This is used to determine
+                                     the order in which callbacks are called
+@Input          hDbgReqeustHandle    Data to be passed back to the caller via
+                                     the callback function
+@Return         PVRSRV_ERROR         PVRSRV_OK on success otherwise an error
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVRegisterDbgRequestNotify(IMG_HANDLE *phNotify,
+                               struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+                               PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+                               IMG_UINT32 ui32RequesterID,
+                               PVRSRV_DBGREQ_HANDLE hDbgReqeustHandle);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVUnregisterDbgRequestNotify
+@Description    Unregister a previously registered callback function.
+@Input          hNotify              Debug notifier handle.
+@Return         PVRSRV_ERROR         PVRSRV_OK on success otherwise an error
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVUnregisterDbgRequestNotify(IMG_HANDLE hNotify);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDebugRequest
+@Description    Notify any registered debug request handlers that a debug
+                request has been made and at what level.
+@Input          psDevNode           Device node for which the debug request
+                                    has been made
+@Input          ui32VerbLevel       The maximum verbosity level to dump
+@Input          pfnDumpDebugPrintf  Used to specify the print function that
+                                    should be used to dump any debug
+                                    information. If this argument is NULL then
+                                    PVR_LOG() will be used as the default
+                                    print function.
+@Input          pvDumpDebugFile     Optional file identifier to be passed to
+                                    the print function if required.
+@Return         void
+*/ /**************************************************************************/
+void
+PVRSRVDebugRequest(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+                   IMG_UINT32 ui32VerbLevel,
+                   DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                   void *pvDumpDebugFile);
+
+#endif /* !defined(__PVR_NOTIFIER_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_platform_drv.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_platform_drv.c
new file mode 100644
index 0000000..c9ef6a4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_platform_drv.c
@@ -0,0 +1,316 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR DRM platform driver
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <drm/drmP.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/version.h>
+
+#include "module_common.h"
+#include "pvr_drv.h"
+#include "pvrmodule.h"
+#include "sysinfo.h"
+
+/* This header must always be included last */
+#include "kernel_compatibility.h"
+
+static struct drm_driver pvr_drm_platform_driver;
+
+#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED)
+/*
+ * This is an arbitrary value. If it's changed then the 'num_devices' module
+ * parameter description should also be updated to match.
+ */
+#define MAX_DEVICES 16
+
+static unsigned int pvr_num_devices = 1;
+static struct platform_device **pvr_devices;
+
+#if defined(NO_HARDWARE)
+static int pvr_num_devices_set(const char *val,
+			       const struct kernel_param *param)
+{
+	int err;
+
+	err = param_set_uint(val, param);
+	if (err)
+		return err;
+
+	if (pvr_num_devices == 0 || pvr_num_devices > MAX_DEVICES)
+		return -EINVAL;
+
+	return 0;
+}
+
+static const struct kernel_param_ops pvr_num_devices_ops = {
+	.set = pvr_num_devices_set,
+	.get = param_get_uint,
+};
+
+module_param_cb(num_devices, &pvr_num_devices_ops, &pvr_num_devices, 0444);
+MODULE_PARM_DESC(num_devices,
+		 "Number of platform devices to register (default: 1 - max: 16)");
+#endif /* defined(NO_HARDWARE) */
+#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */
+
+static int pvr_devices_register(void)
+{
+#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED)
+	struct platform_device_info pvr_dev_info = {
+		.name = SYS_RGX_DEV_NAME,
+		.id = -2,
+#if defined(NO_HARDWARE)
+		/* Not all cores have 40 bit physical support, but this
+		 * will work unless > 32 bit address is returned on those cores.
+		 * In the future this will be fixed more correctly.
+		 */
+		.dma_mask = DMA_BIT_MASK(40),
+#else
+		.dma_mask = DMA_BIT_MASK(32),
+#endif
+	};
+	unsigned int i;
+
+	BUG_ON(pvr_num_devices == 0 || pvr_num_devices > MAX_DEVICES);
+
+	pvr_devices = kmalloc_array(pvr_num_devices, sizeof(*pvr_devices),
+				    GFP_KERNEL);
+	if (!pvr_devices)
+		return -ENOMEM;
+
+	for (i = 0; i < pvr_num_devices; i++) {
+		pvr_devices[i] = platform_device_register_full(&pvr_dev_info);
+		if (IS_ERR(pvr_devices[i])) {
+			DRM_ERROR("unable to register device %u (err=%ld)\n",
+				  i, PTR_ERR(pvr_devices[i]));
+			pvr_devices[i] = NULL;
+			return -ENODEV;
+		}
+	}
+#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */
+
+	return 0;
+}
+
+static void pvr_devices_unregister(void)
+{
+#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED)
+	unsigned int i;
+
+	BUG_ON(!pvr_devices);
+
+	for (i = 0; i < pvr_num_devices && pvr_devices[i]; i++)
+		platform_device_unregister(pvr_devices[i]);
+
+	kfree(pvr_devices);
+	pvr_devices = NULL;
+#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */
+}
+
+static int pvr_probe(struct platform_device *pdev)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+	struct drm_device *ddev;
+	int ret;
+
+	DRM_DEBUG_DRIVER("device %p\n", &pdev->dev);
+
+	ddev = drm_dev_alloc(&pvr_drm_platform_driver, &pdev->dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+	if (IS_ERR(ddev))
+		return PTR_ERR(ddev);
+#else
+	if (!ddev)
+		return -ENOMEM;
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+	/* Needed by drm_platform_set_busid */
+	ddev->platformdev = pdev;
+#endif
+
+	/*
+	 * The load callback, called from drm_dev_register, is deprecated,
+	 * because of potential race conditions. Calling the function here,
+	 * before calling drm_dev_register, avoids those potential races.
+	 */
+	BUG_ON(pvr_drm_platform_driver.load != NULL);
+	ret = pvr_drm_load(ddev, 0);
+	if (ret)
+		goto err_drm_dev_put;
+
+	ret = drm_dev_register(ddev, 0);
+	if (ret)
+		goto err_drm_dev_unload;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+		pvr_drm_platform_driver.name,
+		pvr_drm_platform_driver.major,
+		pvr_drm_platform_driver.minor,
+		pvr_drm_platform_driver.patchlevel,
+		pvr_drm_platform_driver.date,
+		ddev->primary->index);
+#endif
+	return 0;
+
+err_drm_dev_unload:
+	pvr_drm_unload(ddev);
+err_drm_dev_put:
+	drm_dev_put(ddev);
+	return	ret;
+#else
+	DRM_DEBUG_DRIVER("device %p\n", &pdev->dev);
+
+	return drm_platform_init(&pvr_drm_platform_driver, pdev);
+#endif
+}
+
+static int pvr_remove(struct platform_device *pdev)
+{
+	struct drm_device *ddev = platform_get_drvdata(pdev);
+
+	DRM_DEBUG_DRIVER("device %p\n", &pdev->dev);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+	drm_dev_unregister(ddev);
+
+	/* The unload callback, called from drm_dev_unregister, is
+	 * deprecated. Call the unload function directly.
+	 */
+	BUG_ON(pvr_drm_platform_driver.unload != NULL);
+	pvr_drm_unload(ddev);
+
+	drm_dev_put(ddev);
+#else
+	drm_put_dev(ddev);
+#endif
+	return 0;
+}
+
+static void pvr_shutdown(struct platform_device *pdev)
+{
+	struct drm_device *ddev = platform_get_drvdata(pdev);
+	struct pvr_drm_private *priv = ddev->dev_private;
+
+	DRM_DEBUG_DRIVER("device %p\n", &pdev->dev);
+
+	PVRSRVCommonDeviceShutdown(priv->dev_node);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+static struct of_device_id pvr_of_ids[] = {
+#if defined(SYS_RGX_OF_COMPATIBLE)
+	{ .compatible = SYS_RGX_OF_COMPATIBLE, },
+#endif
+	{},
+};
+
+#if !defined(CHROMIUMOS_KERNEL) || !defined(MODULE)
+MODULE_DEVICE_TABLE(of, pvr_of_ids);
+#endif
+#endif
+
+static struct platform_device_id pvr_platform_ids[] = {
+#if defined(SYS_RGX_DEV_NAME)
+	{ SYS_RGX_DEV_NAME, 0 },
+#endif
+	{ }
+};
+
+#if !defined(CHROMIUMOS_KERNEL) || !defined(MODULE)
+MODULE_DEVICE_TABLE(platform, pvr_platform_ids);
+#endif
+
+static struct platform_driver pvr_platform_driver = {
+	.driver = {
+		.name		= DRVNAME,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+		.of_match_table	= of_match_ptr(pvr_of_ids),
+#endif
+		.pm		= &pvr_pm_ops,
+	},
+	.id_table		= pvr_platform_ids,
+	.probe			= pvr_probe,
+	.remove			= pvr_remove,
+	.shutdown		= pvr_shutdown,
+};
+
+static int __init pvr_init(void)
+{
+	int err;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	pvr_drm_platform_driver = pvr_drm_generic_driver;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) && \
+	(LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+	pvr_drm_platform_driver.set_busid = drm_platform_set_busid;
+#endif
+
+	err = PVRSRVCommonDriverInit();
+	if (err)
+		return err;
+
+	err = platform_driver_register(&pvr_platform_driver);
+	if (err)
+		return err;
+
+	return pvr_devices_register();
+}
+
+static void __exit pvr_exit(void)
+{
+	DRM_DEBUG_DRIVER("\n");
+
+	pvr_devices_unregister();
+	platform_driver_unregister(&pvr_platform_driver);
+	PVRSRVCommonDriverDeinit();
+
+	DRM_DEBUG_DRIVER("done\n");
+}
+
+late_initcall(pvr_init);
+module_exit(pvr_exit);
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_ricommon.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_ricommon.h
new file mode 100644
index 0000000..73a7ca0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_ricommon.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services Resource Information (RI) common types and definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Resource Information (RI) common types and definitions included
+                in both user mode and kernel mode source.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __PVR_RICOMMON_H__
+#define __PVR_RICOMMON_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+
+
+/*! Maximum text string length including the null byte */
+#define PRVSRVRI_MAX_TEXT_LENGTH	20U
+
+/* PID used to hold PMR allocations which are driver-wide (ie have a lifetime
+   longer than an application process) */
+#define PVR_SYS_ALLOC_PID 1
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __PVR_RICOMMON_H__ */
+/******************************************************************************
+ End of file (pvr_ricommon.h)
+******************************************************************************/
+
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_sw_fence.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_sw_fence.c
new file mode 100644
index 0000000..9200af7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_sw_fence.c
@@ -0,0 +1,201 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/spinlock_types.h>
+#include <linux/atomic.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/bug.h>
+
+#include "pvr_sw_fence.h"
+
+struct pvr_sw_fence_context {
+	struct kref kref;
+	unsigned int context;
+	char context_name[32];
+	char driver_name[32];
+	atomic_t seqno;
+	atomic_t fence_count;
+};
+
+struct pvr_sw_fence {
+	struct dma_fence base;
+	struct pvr_sw_fence_context *fence_context;
+	spinlock_t lock;
+};
+
+#define to_pvr_sw_fence(fence) container_of(fence, struct pvr_sw_fence, base)
+
+const char *pvr_sw_fence_context_name(struct pvr_sw_fence_context *fctx)
+{
+	return fctx->context_name;
+}
+
+void pvr_sw_fence_context_value_str(struct pvr_sw_fence_context *fctx,
+				    char *str, int size)
+{
+	snprintf(str, size, "%d", atomic_read(&fctx->seqno));
+}
+
+static inline unsigned
+pvr_sw_fence_context_seqno_next(struct pvr_sw_fence_context *fence_context)
+{
+	return atomic_inc_return(&fence_context->seqno) - 1;
+}
+
+static const char *pvr_sw_fence_get_driver_name(struct dma_fence *fence)
+{
+	struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence);
+
+	return pvr_sw_fence->fence_context->driver_name;
+}
+
+static const char *pvr_sw_fence_get_timeline_name(struct dma_fence *fence)
+{
+	struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence);
+
+	return pvr_sw_fence_context_name(pvr_sw_fence->fence_context);
+}
+
+static void pvr_sw_fence_value_str(struct dma_fence *fence, char *str, int size)
+{
+	snprintf(str, size, "%d", fence->seqno);
+}
+
+static void pvr_sw_fence_timeline_value_str(struct dma_fence *fence,
+					    char *str, int size)
+{
+	struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence);
+
+	pvr_sw_fence_context_value_str(pvr_sw_fence->fence_context, str, size);
+}
+
+static bool pvr_sw_fence_enable_signaling(struct dma_fence *fence)
+{
+	return true;
+}
+
+static void pvr_sw_fence_context_destroy_kref(struct kref *kref)
+{
+	struct pvr_sw_fence_context *fence_context =
+		container_of(kref, struct pvr_sw_fence_context, kref);
+	unsigned int fence_count;
+
+	fence_count = atomic_read(&fence_context->fence_count);
+	if (WARN_ON(fence_count))
+		pr_debug("%s context has %u fence(s) remaining\n",
+			 fence_context->context_name, fence_count);
+
+	kfree(fence_context);
+}
+
+static void pvr_sw_fence_release(struct dma_fence *fence)
+{
+	struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence);
+
+	atomic_dec(&pvr_sw_fence->fence_context->fence_count);
+	kref_put(&pvr_sw_fence->fence_context->kref,
+		pvr_sw_fence_context_destroy_kref);
+	kfree(pvr_sw_fence);
+}
+
+static const struct dma_fence_ops pvr_sw_fence_ops = {
+	.get_driver_name = pvr_sw_fence_get_driver_name,
+	.get_timeline_name = pvr_sw_fence_get_timeline_name,
+	.fence_value_str = pvr_sw_fence_value_str,
+	.timeline_value_str = pvr_sw_fence_timeline_value_str,
+	.enable_signaling = pvr_sw_fence_enable_signaling,
+	.wait = dma_fence_default_wait,
+	.release = pvr_sw_fence_release,
+};
+
+struct pvr_sw_fence_context *
+pvr_sw_fence_context_create(const char *context_name, const char *driver_name)
+{
+	struct pvr_sw_fence_context *fence_context;
+
+	fence_context = kmalloc(sizeof(*fence_context), GFP_KERNEL);
+	if (!fence_context)
+		return NULL;
+
+	fence_context->context = dma_fence_context_alloc(1);
+	strlcpy(fence_context->context_name, context_name,
+		sizeof(fence_context->context_name));
+	strlcpy(fence_context->driver_name, driver_name,
+		sizeof(fence_context->driver_name));
+	atomic_set(&fence_context->seqno, 0);
+	atomic_set(&fence_context->fence_count, 0);
+	kref_init(&fence_context->kref);
+
+	return fence_context;
+}
+
+void pvr_sw_fence_context_destroy(struct pvr_sw_fence_context *fence_context)
+{
+	kref_put(&fence_context->kref, pvr_sw_fence_context_destroy_kref);
+}
+
+struct dma_fence *
+pvr_sw_fence_create(struct pvr_sw_fence_context *fence_context)
+{
+	struct pvr_sw_fence *pvr_sw_fence;
+	unsigned int seqno;
+
+	pvr_sw_fence = kmalloc(sizeof(*pvr_sw_fence), GFP_KERNEL);
+	if (!pvr_sw_fence)
+		return NULL;
+
+	spin_lock_init(&pvr_sw_fence->lock);
+	pvr_sw_fence->fence_context = fence_context;
+
+	seqno = pvr_sw_fence_context_seqno_next(fence_context);
+	dma_fence_init(&pvr_sw_fence->base, &pvr_sw_fence_ops,
+		       &pvr_sw_fence->lock, fence_context->context, seqno);
+
+	atomic_inc(&fence_context->fence_count);
+	kref_get(&fence_context->kref);
+
+	return &pvr_sw_fence->base;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_sw_fence.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_sw_fence.h
new file mode 100644
index 0000000..5463b74
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_sw_fence.h
@@ -0,0 +1,62 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_SW_FENCES_H__)
+#define __PVR_SW_FENCES_H__
+
+#include "pvr_linux_fence.h"
+
+struct pvr_sw_fence_context;
+
+struct pvr_sw_fence_context *pvr_sw_fence_context_create(const char *name,
+				const char *driver_name);
+void pvr_sw_fence_context_destroy(struct pvr_sw_fence_context *fence_context);
+struct dma_fence *pvr_sw_fence_create(struct pvr_sw_fence_context *
+				      fence_context);
+
+const char *pvr_sw_fence_context_name(struct pvr_sw_fence_context *fctx);
+void pvr_sw_fence_context_value_str(struct pvr_sw_fence_context *fctx,
+				    char *str, int size);
+
+#endif /* !defined(__PVR_SW_FENCES_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_sync.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_sync.h
new file mode 100644
index 0000000..fa71fc6
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_sync.h
@@ -0,0 +1,102 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File           pvr_sync.h
+@Title          Kernel driver for Android's sync mechanism
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVR_SYNC_H
+#define _PVR_SYNC_H
+
+#include <linux/device.h>
+
+#include "pvr_fd_sync_kernel.h"
+#include "services_kernel_client.h"
+
+
+/* Services internal interface */
+
+/**
+ * pvr_sync_init() - create an internal sync context
+ * @dev: Linux device
+ *
+ * Return: PVRSRV_OK on success.
+ */
+enum PVRSRV_ERROR pvr_sync_init(struct device *dev);
+
+
+/**
+ * pvr_sync_deinit() - destroy an internal sync context
+ *
+ * Drains any work items with outstanding sync fence updates/dependencies.
+ */
+void pvr_sync_deinit(void);
+
+enum PVRSRV_ERROR pvr_sync_fence_wait(void *fence, u32 timeout_in_ms);
+
+enum PVRSRV_ERROR pvr_sync_fence_release(void *fence);
+
+enum PVRSRV_ERROR pvr_sync_fence_get(int fence_fd, void **fence_out);
+
+enum PVRSRV_ERROR pvr_sync_sw_timeline_fence_create(int timeline_fd,
+						    const char *fence_name,
+						    int *fence_fd_out,
+						    u64 *sync_pt_idx);
+
+enum PVRSRV_ERROR pvr_sync_sw_timeline_advance(void *timeline,
+					       u64 *sync_pt_idx);
+
+enum PVRSRV_ERROR pvr_sync_sw_timeline_release(void *timeline);
+
+enum PVRSRV_ERROR pvr_sync_sw_timeline_get(int timeline_fd,
+					   void **timeline_out);
+
+enum PVRSRV_ERROR
+sync_dump_fence(void *sw_fence_obj,
+		DUMPDEBUG_PRINTF_FUNC *dump_debug_printf,
+		void *dump_debug_file);
+
+enum PVRSRV_ERROR
+sync_sw_dump_timeline(void *sw_timeline_obj,
+		      DUMPDEBUG_PRINTF_FUNC *dump_debug_printf,
+		      void *dump_debug_file);
+
+#endif /* _PVR_SYNC_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_sync_file.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_sync_file.c
new file mode 100644
index 0000000..7daf5ca
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_sync_file.c
@@ -0,0 +1,1062 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File           pvr_sync_file.c
+@Title          Kernel driver for Android's sync mechanism
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "services_kernel_client.h"
+#include "pvr_drv.h"
+#include "pvr_sync.h"
+#include "pvr_fence.h"
+#include "pvr_counting_timeline.h"
+
+#include "linux_sw_sync.h"
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/sync_file.h>
+#include <linux/file.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+
+/* This header must always be included last */
+#include "kernel_compatibility.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)) && !defined(CHROMIUMOS_KERNEL)
+#define sync_file_user_name(s)	((s)->name)
+#else
+#define sync_file_user_name(s)	((s)->user_name)
+#endif
+
+#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \
+	do {                                                             \
+		if (pfnDumpDebugPrintf)                                  \
+			pfnDumpDebugPrintf(pvDumpDebugFile, fmt,         \
+					   ## __VA_ARGS__);              \
+		else                                                     \
+			pr_err(fmt "\n", ## __VA_ARGS__);                \
+	} while (0)
+
+#define	FILE_NAME "pvr_sync_file"
+
+struct sw_sync_create_fence_data {
+	__u32 value;
+	char name[32];
+	__s32 fence;
+};
+#define SW_SYNC_IOC_MAGIC 'W'
+#define SW_SYNC_IOC_CREATE_FENCE \
+	(_IOWR(SW_SYNC_IOC_MAGIC, 0, struct sw_sync_create_fence_data))
+#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+/* Global data for the sync driver */
+static struct {
+	void *dev_cookie;
+	void *dbg_request_handle;
+	struct workqueue_struct *fence_status_wq;
+	struct pvr_fence_context *foreign_fence_context;
+#if defined(NO_HARDWARE)
+	spinlock_t pvr_timeline_active_list_lock;
+	struct list_head pvr_timeline_active_list;
+#endif
+	PFN_SYNC_CHECKPOINT_STRUCT sync_checkpoint_ops;
+} pvr_sync_data;
+
+static const struct file_operations pvr_sync_fops;
+
+/* This is the actual timeline metadata. We might keep this around after the
+ * base sync driver has destroyed the pvr_sync_timeline_wrapper object.
+ */
+struct pvr_sync_timeline {
+	char name[32];
+	struct file *file;
+	bool is_sw;
+	/* Fence context used for hw fences */
+	struct pvr_fence_context *hw_fence_context;
+	/* Timeline and context for sw fences */
+	struct pvr_counting_fence_timeline *sw_fence_timeline;
+#if defined(NO_HARDWARE)
+	/* List of all timelines (used to advance all timelines in nohw builds) */
+	struct list_head list;
+#endif
+};
+
+static
+void pvr_sync_free_checkpoint_list_mem(void *mem_ptr)
+{
+	kfree(mem_ptr);
+}
+
+#if defined(NO_HARDWARE)
+/* function used to signal pvr fence in nohw builds */
+static
+void pvr_sync_nohw_signal_fence(void *fence_data_to_signal)
+{
+	struct pvr_sync_timeline *this_timeline;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pvr_sync_data.pvr_timeline_active_list_lock, flags);
+	list_for_each_entry(this_timeline, &pvr_sync_data.pvr_timeline_active_list, list) {
+		pvr_fence_context_signal_fences_nohw(this_timeline->hw_fence_context);
+	}
+	spin_unlock_irqrestore(&pvr_sync_data.pvr_timeline_active_list_lock, flags);
+}
+#endif
+
+static bool is_pvr_timeline(struct file *file)
+{
+	return file->f_op == &pvr_sync_fops;
+}
+
+static struct pvr_sync_timeline *pvr_sync_timeline_fget(int fd)
+{
+	struct file *file = fget(fd);
+
+	if (!file)
+		return NULL;
+
+	if (!is_pvr_timeline(file)) {
+		fput(file);
+		return NULL;
+	}
+
+	return file->private_data;
+}
+
+static void pvr_sync_timeline_fput(struct pvr_sync_timeline *timeline)
+{
+	fput(timeline->file);
+}
+
+/* ioctl and fops handling */
+
+static int pvr_sync_open(struct inode *inode, struct file *file)
+{
+	struct pvr_sync_timeline *timeline;
+	char task_comm[TASK_COMM_LEN];
+	int err = -ENOMEM;
+
+	get_task_comm(task_comm, current);
+
+	timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
+	if (!timeline)
+		goto err_out;
+
+	strlcpy(timeline->name, task_comm, sizeof(timeline->name));
+	timeline->file = file;
+	timeline->is_sw = false;
+
+	file->private_data = timeline;
+	err = 0;
+err_out:
+	return err;
+}
+
+static int pvr_sync_close(struct inode *inode, struct file *file)
+{
+	struct pvr_sync_timeline *timeline = file->private_data;
+
+	if (timeline->sw_fence_timeline) {
+		/* This makes sure any outstanding SW syncs are marked as
+		 * complete at timeline close time. Otherwise it'll leak the
+		 * timeline (as outstanding fences hold a ref) and possibly
+		 * wedge the system if something is waiting on one of those
+		 * fences
+		 */
+		pvr_counting_fence_timeline_force_complete(
+			timeline->sw_fence_timeline);
+		pvr_counting_fence_timeline_put(timeline->sw_fence_timeline);
+	}
+
+	if (timeline->hw_fence_context) {
+#if defined(NO_HARDWARE)
+		list_del(&timeline->list);
+#endif
+		pvr_fence_context_destroy(timeline->hw_fence_context);
+	}
+
+	kfree(timeline);
+
+	return 0;
+}
+
+/*
+ * This is the function that kick code will call in order to 'finalise' a
+ * created output fence just prior to returning from the kick function.
+ * The OS native sync code needs to implement a function meeting this
+ * specification - the implementation may be a nop if the OS does not need
+ * to perform any actions at this point.
+ *
+ * Input: fence_fd            The PVRSRV_FENCE to be 'finalised'. This value
+ *                            will have been returned by an earlier call to
+ *                            pvr_sync_create_fence().
+ * Input: finalise_data       The finalise data returned by an earlier call
+ *                            to pvr_sync_create_fence().
+ */
+static enum PVRSRV_ERROR
+pvr_sync_finalise_fence(PVRSRV_FENCE fence_fd, void *finalise_data)
+{
+	struct sync_file *sync_file = finalise_data;
+	struct pvr_fence *pvr_fence;
+
+	if (!sync_file || (fence_fd < 0)) {
+		pr_err(FILE_NAME ": %s: Invalid input fence\n", __func__);
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	pvr_fence = to_pvr_fence(sync_file->fence);
+
+	/* pvr fences can be signalled any time after creation */
+	dma_fence_enable_sw_signaling(&pvr_fence->base);
+
+	fd_install(fence_fd, sync_file->file);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * This is the function that kick code will call in order to obtain a new
+ * PVRSRV_FENCE from the OS native sync code and the PSYNC_CHECKPOINT used
+ * in that fence. The OS native sync code needs to implement a function
+ * meeting this specification.
+ *
+ * Input: fence_name               A string to annotate the fence with (for
+ *                                 debug).
+ * Input: timeline                 The timeline on which the new fence is to be
+ *                                 created.
+ * Output: new_fence               The new PVRSRV_FENCE to be returned by the
+ *                                 kick call.
+ * Output: fence_uid               Unique ID of the update fence.
+ * Output: fence_finalise_data     Pointer to data needed to finalise the fence.
+ * Output: new_checkpoint_handle   The PSYNC_CHECKPOINT used by the new fence.
+ */
+static enum PVRSRV_ERROR
+pvr_sync_create_fence(const char *fence_name,
+		      PVRSRV_TIMELINE new_fence_timeline,
+		      PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+		      PVRSRV_FENCE *new_fence, u64 *fence_uid,
+		      void **fence_finalise_data,
+		      PSYNC_CHECKPOINT *new_checkpoint_handle,
+		      void **timeline_update_sync,
+		      __u32 *timeline_update_value)
+{
+	PVRSRV_ERROR err = PVRSRV_OK;
+	PVRSRV_FENCE new_fence_fd = -1;
+	struct pvr_sync_timeline *timeline;
+	struct pvr_fence *pvr_fence;
+	PSYNC_CHECKPOINT checkpoint;
+	struct sync_file *sync_file;
+
+	if (new_fence_timeline < 0 || !new_fence || !new_checkpoint_handle
+		|| !fence_finalise_data) {
+		pr_err(FILE_NAME ": %s: Invalid input params\n", __func__);
+		err =  PVRSRV_ERROR_INVALID_PARAMS;
+		goto err_out;
+	}
+
+	/* We reserve the new fence FD before taking any operations
+	 * as we do not want to fail (e.g. run out of FDs)
+	 */
+	new_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+	if (new_fence_fd < 0) {
+		pr_err(FILE_NAME ": %s: Failed to get fd\n", __func__);
+		err = PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+		goto err_out;
+	}
+
+	timeline = pvr_sync_timeline_fget(new_fence_timeline);
+	if (!timeline) {
+		pr_err(FILE_NAME ": %s: Failed to open supplied timeline fd (%d)\n",
+			__func__, new_fence_timeline);
+		err = PVRSRV_ERROR_INVALID_PARAMS;
+		goto err_put_fd;
+	}
+
+	if (timeline->is_sw) {
+		/* This should never happen! */
+		pr_err(FILE_NAME ": %s: Request to create a pvr fence on sw timeline (%d)\n",
+			__func__, new_fence_timeline);
+		err = PVRSRV_ERROR_INVALID_PARAMS;
+		goto err_put_timeline;
+	}
+
+	if (!timeline->hw_fence_context) {
+#if defined(NO_HARDWARE)
+		unsigned long flags;
+#endif
+		/* First time we use this timeline, so create a context. */
+		timeline->hw_fence_context =
+			pvr_fence_context_create(pvr_sync_data.dev_cookie,
+						 pvr_sync_data.fence_status_wq,
+						 timeline->name);
+		if (!timeline->hw_fence_context) {
+			pr_err(FILE_NAME ": %s: Failed to create fence context (%d)\n",
+			       __func__, new_fence_timeline);
+			err = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto err_put_timeline;
+		}
+#if defined(NO_HARDWARE)
+		/* Add timeline to active list */
+		INIT_LIST_HEAD(&timeline->list);
+		spin_lock_irqsave(&pvr_sync_data.pvr_timeline_active_list_lock, flags);
+		list_add_tail(&timeline->list, &pvr_sync_data.pvr_timeline_active_list);
+		spin_unlock_irqrestore(&pvr_sync_data.pvr_timeline_active_list_lock, flags);
+#endif
+	}
+
+	pvr_fence = pvr_fence_create(timeline->hw_fence_context,
+								 psSyncCheckpointContext,
+								 new_fence_timeline,
+								 fence_name);
+	if (!pvr_fence) {
+		pr_err(FILE_NAME ": %s: Failed to create new pvr_fence\n",
+			__func__);
+		err = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_put_timeline;
+	}
+
+	checkpoint = pvr_fence_get_checkpoint(pvr_fence);
+	if (!checkpoint) {
+		pr_err(FILE_NAME ": %s: Failed to get fence checkpoint\n",
+			__func__);
+		err = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_destroy_fence;
+	}
+
+	sync_file = sync_file_create(&pvr_fence->base);
+	if (!sync_file) {
+		pr_err(FILE_NAME ": %s: Failed to create sync_file\n",
+			__func__);
+		err = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_destroy_fence;
+	}
+	strlcpy(sync_file_user_name(sync_file),
+		pvr_fence->name,
+		sizeof(sync_file_user_name(sync_file)));
+	dma_fence_put(&pvr_fence->base);
+
+	*new_fence = new_fence_fd;
+	*fence_finalise_data = sync_file;
+	*new_checkpoint_handle = checkpoint;
+	*fence_uid = OSGetCurrentClientProcessIDKM();
+	*fence_uid = (*fence_uid << 32) | (new_fence_fd & U32_MAX);
+	/* not used but don't want to return dangling pointers */
+	*timeline_update_sync = NULL;
+	*timeline_update_value = 0;
+
+	pvr_sync_timeline_fput(timeline);
+err_out:
+	return err;
+
+err_destroy_fence:
+	pvr_fence_destroy(pvr_fence);
+err_put_timeline:
+	pvr_sync_timeline_fput(timeline);
+err_put_fd:
+	put_unused_fd(new_fence_fd);
+	*fence_uid = PVRSRV_NO_FENCE;
+	goto err_out;
+}
+
+/*
+ * This is the function that kick code will call in order to 'rollback' a
+ * created output fence should an error occur when submitting the kick.
+ * The OS native sync code needs to implement a function meeting this
+ * specification.
+ *
+ * Input: fence_to_rollback The PVRSRV_FENCE to be 'rolled back'. The fence
+ *                          should be destroyed and any actions taken due to
+ *                          its creation that need to be undone should be
+ *                          reverted.
+ * Input: finalise_data     The finalise data for the fence to be 'rolled back'.
+ */
+static enum PVRSRV_ERROR
+pvr_sync_rollback_fence_data(PVRSRV_FENCE fence_to_rollback,
+			     void *fence_data_to_rollback)
+{
+	struct sync_file *sync_file = fence_data_to_rollback;
+	struct pvr_fence *pvr_fence;
+
+	if (!sync_file || fence_to_rollback < 0) {
+		pr_err(FILE_NAME ": %s: Invalid fence (%d)\n", __func__,
+			fence_to_rollback);
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	pvr_fence = to_pvr_fence(sync_file->fence);
+	if (!pvr_fence) {
+		pr_err(FILE_NAME
+			": %s: Non-PVR fence (%p)\n",
+			__func__, sync_file->fence);
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	fput(sync_file->file);
+
+	put_unused_fd(fence_to_rollback);
+
+	pvr_fence_destroy(pvr_fence);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * This is the function that kick code will call in order to obtain a list of
+ * the PSYNC_CHECKPOINTs for a given PVRSRV_FENCE passed to a kick function.
+ * The OS native sync code will allocate the memory to hold the returned list
+ * of PSYNC_CHECKPOINT ptrs. The caller will free this memory once it has
+ * finished referencing it.
+ *
+ * Input: fence                     The input (check) fence
+ * Output: nr_checkpoints           The number of PVRSRV_SYNC_CHECKPOINT ptrs
+ *                                  returned in the checkpoint_handles
+ *                                  parameter.
+ * Output: fence_uid                Unique ID of the check fence
+ * Input/Output: checkpoint_handles The returned list of PVRSRV_SYNC_CHECKPOINTs.
+ */
+static enum PVRSRV_ERROR
+pvr_sync_resolve_fence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+		       PVRSRV_FENCE fence_to_resolve, u32 *nr_checkpoints,
+		       PSYNC_CHECKPOINT **checkpoint_handles, u64 *fence_uid)
+{
+	PSYNC_CHECKPOINT *checkpoints = NULL;
+	unsigned int i, num_fences, num_used_fences = 0;
+	struct dma_fence **fences = NULL;
+	struct dma_fence *fence;
+	PVRSRV_ERROR err = PVRSRV_OK;
+
+	if (!nr_checkpoints || !checkpoint_handles || !fence_uid) {
+		pr_err(FILE_NAME ": %s: Invalid input checkpoint pointer\n",
+			__func__);
+		err =  PVRSRV_ERROR_INVALID_PARAMS;
+		goto err_out;
+	}
+
+	*nr_checkpoints = 0;
+	*checkpoint_handles = NULL;
+	*fence_uid = 0;
+
+	if (fence_to_resolve < 0)
+		goto err_out;
+
+	fence = sync_file_get_fence(fence_to_resolve);
+	if (!fence) {
+		pr_err(FILE_NAME ": %s: Failed to read sync private data for fd %d\n",
+			__func__, fence_to_resolve);
+		err = PVRSRV_ERROR_HANDLE_NOT_FOUND;
+		goto err_out;
+	}
+
+	if (dma_fence_is_array(fence)) {
+		struct dma_fence_array *array = to_dma_fence_array(fence);
+
+		fences = array->fences;
+		num_fences = array->num_fences;
+	} else {
+		fences = &fence;
+		num_fences = 1;
+	}
+
+	checkpoints = kmalloc_array(num_fences, sizeof(PSYNC_CHECKPOINT),
+			      GFP_KERNEL);
+	if (!checkpoints) {
+		err = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_put_fence;
+	}
+	for (i = 0; i < num_fences; i++) {
+		/* Only return the checkpoint if the fence is still active. */
+		if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+			      &fences[i]->flags)) {
+			struct pvr_fence *pvr_fence =
+				pvr_fence_create_from_fence(
+					pvr_sync_data.foreign_fence_context,
+					psSyncCheckpointContext,
+					fences[i],
+					fence_to_resolve,
+					"foreign");
+			if (!pvr_fence) {
+				pr_err(FILE_NAME ": %s: Failed to create fence\n",
+				       __func__);
+				err = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto err_free_checkpoints;
+			}
+			checkpoints[num_used_fences] =
+				pvr_fence_get_checkpoint(pvr_fence);
+			SyncCheckpointTakeRef(checkpoints[num_used_fences]);
+			++num_used_fences;
+			dma_fence_put(&pvr_fence->base);
+		}
+	}
+	/* If we don't return any checkpoints, delete the array because
+	 * the caller will not.
+	 */
+	if (num_used_fences == 0) {
+		kfree(checkpoints);
+		checkpoints = NULL;
+	}
+
+	*checkpoint_handles = checkpoints;
+	*nr_checkpoints = num_used_fences;
+	*fence_uid = OSGetCurrentClientProcessIDKM();
+	*fence_uid = (*fence_uid << 32) | (fence_to_resolve & U32_MAX);
+
+err_put_fence:
+	dma_fence_put(fence);
+err_out:
+	return err;
+
+err_free_checkpoints:
+	for (i = 0; i < num_used_fences; i++) {
+		if (checkpoints[i])
+			SyncCheckpointDropRef(checkpoints[i]);
+	}
+	kfree(checkpoints);
+	goto err_put_fence;
+}
+
+/*
+ * This is the function that driver code will call in order to request the
+ * sync implementation to output debug information relating to any sync
+ * checkpoints it may have created which appear in the provided array of
+ * FW addresses of Unified Fence Objects (UFOs).
+ *
+ * Input: nr_ufos             The number of FW addresses provided in the
+ *                            vaddrs parameter.
+ * Input: vaddrs              The array of FW addresses of UFOs. The sync
+ *                            implementation should check each of these to
+ *                            see if any relate to sync checkpoints it has
+ *                            created and where they do output debug information
+ *                            pertaining to the native/fallback sync with
+ *                            which it is associated.
+ */
+static u32
+pvr_sync_dump_info_on_stalled_ufos(u32 nr_ufos, u32 *vaddrs)
+{
+	return pvr_fence_dump_info_on_stalled_ufos(pvr_sync_data.foreign_fence_context,
+						   nr_ufos,
+						   vaddrs);
+}
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+static enum tag_img_bool pvr_sync_checkpoint_ufo_has_signalled(u32 fwaddr,
+	u32 value)
+{
+	return pvr_fence_checkpoint_ufo_has_signalled(fwaddr, value);
+}
+
+static void pvr_sync_check_state(void)
+{
+	pvr_fence_check_state();
+}
+#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */
+
+static long pvr_sync_ioctl_rename(struct pvr_sync_timeline *timeline,
+	void __user *user_data)
+{
+	int err = 0;
+	struct pvr_sync_rename_ioctl_data data;
+
+	if (!access_ok(user_data, sizeof(data))) {
+		err = -EFAULT;
+		goto err;
+	}
+
+	if (copy_from_user(&data, user_data, sizeof(data))) {
+		err = -EFAULT;
+		goto err;
+	}
+
+	data.szName[sizeof(data.szName) - 1] = '\0';
+	strlcpy(timeline->name, data.szName, sizeof(timeline->name));
+	if (timeline->hw_fence_context)
+		strlcpy(timeline->hw_fence_context->name, data.szName,
+			sizeof(timeline->hw_fence_context->name));
+
+err:
+	return err;
+}
+
+static long pvr_sync_ioctl_force_sw_only(struct pvr_sync_timeline *timeline,
+	void **private_data)
+{
+	/* Already in SW mode? */
+	if (timeline->sw_fence_timeline)
+		return 0;
+
+	/* Create a sw_sync timeline with the old GPU timeline's name */
+	timeline->sw_fence_timeline = pvr_counting_fence_timeline_create(
+		pvr_sync_data.dev_cookie,
+		timeline->name);
+	if (!timeline->sw_fence_timeline)
+		return -ENOMEM;
+
+	timeline->is_sw = true;
+
+	return 0;
+}
+
+static long pvr_sync_ioctl_sw_create_fence(struct pvr_sync_timeline *timeline,
+	void __user *user_data)
+{
+	struct pvr_sw_sync_create_fence_data data;
+	struct sync_file *sync_file;
+	int fd = get_unused_fd_flags(O_CLOEXEC);
+	struct dma_fence *fence;
+	int err = -EFAULT;
+
+	if (fd < 0) {
+		pr_err(FILE_NAME ": %s: Failed to find unused fd (%d)\n",
+		       __func__, fd);
+		err = -EMFILE;
+		goto err_out;
+	}
+
+	if (copy_from_user(&data, user_data, sizeof(data))) {
+		pr_err(FILE_NAME ": %s: Failed copy from user\n", __func__);
+		goto err_put_fd;
+	}
+
+	fence = pvr_counting_fence_create(timeline->sw_fence_timeline, &data.sync_pt_idx);
+	if (!fence) {
+		pr_err(FILE_NAME ": %s: Failed to create a sync point (%d)\n",
+		       __func__, fd);
+		err = -ENOMEM;
+		goto err_put_fd;
+	}
+
+	sync_file = sync_file_create(fence);
+	if (!sync_file) {
+		pr_err(FILE_NAME ": %s: Failed to create a sync point (%d)\n",
+			__func__, fd);
+		 err = -ENOMEM;
+		goto err_put_fence;
+	}
+
+	data.fence = fd;
+
+	if (copy_to_user(user_data, &data, sizeof(data))) {
+		pr_err(FILE_NAME ": %s: Failed copy to user\n", __func__);
+		goto err_put_fence;
+	}
+
+	fd_install(fd, sync_file->file);
+	err = 0;
+
+	dma_fence_put(fence);
+err_out:
+	return err;
+
+err_put_fence:
+	dma_fence_put(fence);
+err_put_fd:
+	put_unused_fd(fd);
+	goto err_out;
+}
+
+static long pvr_sync_ioctl_sw_inc(struct pvr_sync_timeline *timeline,
+                                  void __user *user_data)
+{
+	bool res;
+	struct pvr_sw_timeline_advance_data data;
+
+	res = pvr_counting_fence_timeline_inc(timeline->sw_fence_timeline, &data.sync_pt_idx);
+
+	/* pvr_counting_fence_timeline_inc won't allow sw timeline to be
+	 * advanced beyond the last defined point
+	 */
+	if (!res) {
+		pr_err("pvr_sync_file: attempt to advance SW timeline beyond last defined point\n");
+		return -EPERM;
+	}
+
+	if (copy_to_user(user_data, &data, sizeof(data))) {
+		pr_err(FILE_NAME ": %s: Failed copy to user\n", __func__);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static long
+pvr_sync_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	void __user *user_data = (void __user *)arg;
+	long err = -ENOTTY;
+	struct pvr_sync_timeline *timeline = file->private_data;
+
+	if (!timeline->is_sw) {
+
+		switch (cmd) {
+		case PVR_SYNC_IOC_RENAME:
+			err = pvr_sync_ioctl_rename(timeline, user_data);
+			break;
+		case PVR_SYNC_IOC_FORCE_SW_ONLY:
+			err = pvr_sync_ioctl_force_sw_only(timeline,
+				&file->private_data);
+			break;
+		default:
+			break;
+		}
+	} else {
+
+		switch (cmd) {
+		case PVR_SW_SYNC_IOC_CREATE_FENCE:
+			err = pvr_sync_ioctl_sw_create_fence(timeline,
+							     user_data);
+			break;
+		case PVR_SW_SYNC_IOC_INC:
+			err = pvr_sync_ioctl_sw_inc(timeline, user_data);
+			break;
+		default:
+			break;
+		}
+	}
+
+	return err;
+}
+
+static const struct file_operations pvr_sync_fops = {
+	.owner          = THIS_MODULE,
+	.open           = pvr_sync_open,
+	.release        = pvr_sync_close,
+	.unlocked_ioctl = pvr_sync_ioctl,
+	.compat_ioctl   = pvr_sync_ioctl,
+};
+
+static struct miscdevice pvr_sync_device = {
+	.minor          = MISC_DYNAMIC_MINOR,
+	.name           = PVRSYNC_MODNAME,
+	.fops           = &pvr_sync_fops,
+};
+
+static void
+pvr_sync_debug_request_heading(void *data, u32 verbosity,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile)
+{
+	if (DD_VERB_LVL_ENABLED(verbosity, DEBUG_REQUEST_VERBOSITY_MEDIUM))
+		PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, "------[ Native Fence Sync: timelines ]------");
+}
+
+enum PVRSRV_ERROR pvr_sync_init(struct device *dev)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct pvr_drm_private *priv = ddev->dev_private;
+	enum PVRSRV_ERROR error;
+	int err;
+
+	error = PVRSRVRegisterDbgRequestNotify(&pvr_sync_data.dbg_request_handle,
+				priv->dev_node,
+				pvr_sync_debug_request_heading,
+				DEBUG_REQUEST_LINUXFENCE,
+				NULL);
+	if (error != PVRSRV_OK) {
+		pr_err("%s: failed to register debug request callback (%s)\n",
+		       __func__, PVRSRVGetErrorString(error));
+		goto err_out;
+	}
+
+	pvr_sync_data.dev_cookie = priv->dev_node;
+	pvr_sync_data.fence_status_wq = priv->fence_status_wq;
+
+	pvr_sync_data.foreign_fence_context =
+		pvr_fence_context_create(pvr_sync_data.dev_cookie,
+					 pvr_sync_data.fence_status_wq,
+					 "foreign_sync");
+	if (!pvr_sync_data.foreign_fence_context) {
+		pr_err(FILE_NAME ": %s: Failed to create foreign sync context\n",
+			__func__);
+		error = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_out;
+	}
+
+#if defined(NO_HARDWARE)
+	INIT_LIST_HEAD(&pvr_sync_data.pvr_timeline_active_list);
+#endif
+
+	/* Register the resolve fence and create fence functions with
+	 * sync_checkpoint.c
+	 * The pvr_fence context registers its own EventObject callback to
+	 * update sync status
+	 */
+	/* Initialise struct and register with sync_checkpoint.c */
+	pvr_sync_data.sync_checkpoint_ops.pfnFenceResolve = pvr_sync_resolve_fence;
+	pvr_sync_data.sync_checkpoint_ops.pfnFenceCreate = pvr_sync_create_fence;
+	pvr_sync_data.sync_checkpoint_ops.pfnFenceDataRollback = pvr_sync_rollback_fence_data;
+	pvr_sync_data.sync_checkpoint_ops.pfnFenceFinalise = pvr_sync_finalise_fence;
+#if defined(NO_HARDWARE)
+	pvr_sync_data.sync_checkpoint_ops.pfnNoHWUpdateTimelines = pvr_sync_nohw_signal_fence;
+#else
+	pvr_sync_data.sync_checkpoint_ops.pfnNoHWUpdateTimelines = NULL;
+#endif
+	pvr_sync_data.sync_checkpoint_ops.pfnFreeCheckpointListMem = pvr_sync_free_checkpoint_list_mem;
+	pvr_sync_data.sync_checkpoint_ops.pfnDumpInfoOnStalledUFOs = pvr_sync_dump_info_on_stalled_ufos;
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+	pvr_sync_data.sync_checkpoint_ops.pfnCheckpointHasSignalled = pvr_sync_checkpoint_ufo_has_signalled;
+	pvr_sync_data.sync_checkpoint_ops.pfnCheckState = pvr_sync_check_state;
+	pvr_sync_data.sync_checkpoint_ops.pfnSignalWaiters = NULL;
+#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */
+	strlcpy(pvr_sync_data.sync_checkpoint_ops.pszImplName, "pvr_sync_file", SYNC_CHECKPOINT_IMPL_MAX_STRLEN);
+
+	SyncCheckpointRegisterFunctions(&pvr_sync_data.sync_checkpoint_ops);
+
+	err = misc_register(&pvr_sync_device);
+	if (err) {
+		pr_err(FILE_NAME ": %s: Failed to register pvr_sync device (%d)\n",
+		       __func__, err);
+		error = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+		goto err_unregister_checkpoint_funcs;
+	}
+	error = PVRSRV_OK;
+
+err_out:
+	return error;
+
+err_unregister_checkpoint_funcs:
+	SyncCheckpointRegisterFunctions(NULL);
+	pvr_fence_context_destroy(pvr_sync_data.foreign_fence_context);
+	goto err_out;
+}
+
+void pvr_sync_deinit(void)
+{
+	SyncCheckpointRegisterFunctions(NULL);
+	misc_deregister(&pvr_sync_device);
+	pvr_fence_context_destroy(pvr_sync_data.foreign_fence_context);
+	PVRSRVUnregisterDbgRequestNotify(pvr_sync_data.dbg_request_handle);
+}
+
+enum PVRSRV_ERROR pvr_sync_fence_wait(void *fence, u32 timeout_in_ms)
+{
+	long timeout = msecs_to_jiffies(timeout_in_ms);
+	int err;
+
+	err = dma_fence_wait_timeout(fence, true, timeout);
+	/*
+	 * dma_fence_wait_timeout returns:
+	 * - the remaining timeout on success
+	 * - 0 on timeout
+	 * - -ERESTARTSYS if interrupted
+	 */
+	if (err > 0)
+		return PVRSRV_OK;
+	else if (err == 0)
+		return PVRSRV_ERROR_TIMEOUT;
+
+	return PVRSRV_ERROR_FAILED_DEPENDENCIES;
+}
+
+enum PVRSRV_ERROR pvr_sync_fence_release(void *fence)
+{
+	dma_fence_put(fence);
+
+	return PVRSRV_OK;
+}
+
+enum PVRSRV_ERROR pvr_sync_fence_get(int fence_fd, void **fence_out)
+{
+	struct dma_fence *fence;
+
+	fence = sync_file_get_fence(fence_fd);
+	if (fence == NULL)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	*fence_out = fence;
+
+	return PVRSRV_OK;
+}
+
+enum PVRSRV_ERROR pvr_sync_sw_timeline_fence_create(int timeline_fd,
+						    const char *fence_name,
+						    int *fence_fd_out,
+						    u64 *sync_pt_idx)
+{
+	enum PVRSRV_ERROR srv_err;
+	struct pvr_sync_timeline *timeline;
+	struct dma_fence *fence = NULL;
+	struct sync_file *sync_file = NULL;
+	int fd;
+
+	fd = get_unused_fd_flags(O_CLOEXEC);
+	if (fd < 0)
+		return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+
+	timeline = pvr_sync_timeline_fget(timeline_fd);
+	if (!timeline) {
+		/* unrecognised timeline */
+		srv_err = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+		goto err_put_fd;
+	}
+
+	fence = pvr_counting_fence_create(timeline->sw_fence_timeline, sync_pt_idx);
+	pvr_sync_timeline_fput(timeline);
+	if (!fence) {
+		srv_err = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_put_fd;
+	}
+
+	sync_file = sync_file_create(fence);
+	if (!sync_file) {
+		srv_err = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_put_fence;
+	}
+
+	fd_install(fd, sync_file->file);
+
+	*fence_fd_out = fd;
+
+	return PVRSRV_OK;
+
+err_put_fence:
+	dma_fence_put(fence);
+err_put_fd:
+	put_unused_fd(fd);
+	return srv_err;
+}
+
+enum PVRSRV_ERROR pvr_sync_sw_timeline_advance(void *timeline, u64 *sync_pt_idx)
+{
+	if (timeline == NULL)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	pvr_counting_fence_timeline_inc(timeline, sync_pt_idx);
+
+	return PVRSRV_OK;
+}
+
+enum PVRSRV_ERROR pvr_sync_sw_timeline_release(void *timeline)
+{
+	if (timeline == NULL)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	pvr_counting_fence_timeline_put(timeline);
+
+	return PVRSRV_OK;
+}
+
+enum PVRSRV_ERROR pvr_sync_sw_timeline_get(int timeline_fd,
+					   void **timeline_out)
+{
+	struct pvr_counting_fence_timeline *sw_timeline;
+	struct pvr_sync_timeline *timeline;
+
+	timeline = pvr_sync_timeline_fget(timeline_fd);
+	if (!timeline)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	sw_timeline =
+		pvr_counting_fence_timeline_get(timeline->sw_fence_timeline);
+	pvr_sync_timeline_fput(timeline);
+	if (!sw_timeline)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	*timeline_out = sw_timeline;
+
+	return PVRSRV_OK;
+}
+static void _dump_sync_point(struct dma_fence *fence,
+							  DUMPDEBUG_PRINTF_FUNC *dump_debug_printf,
+							  void *dump_debug_file)
+{
+	const struct dma_fence_ops *fence_ops = fence->ops;
+	bool signaled = dma_fence_is_signaled(fence);
+	char time[16] = { '\0' };
+
+	fence_ops->timeline_value_str(fence, time, sizeof(time));
+
+	PVR_DUMPDEBUG_LOG(dump_debug_printf,
+					  dump_debug_file,
+					  "<%p> Seq#=%u TS=%s State=%s TLN=%s",
+					  fence,
+					  fence->seqno,
+					  time,
+					  (signaled) ? "Signalled" : "Active",
+					  fence_ops->get_timeline_name(fence));
+}
+
+static void _dump_fence(struct dma_fence *fence,
+			DUMPDEBUG_PRINTF_FUNC *dump_debug_printf,
+			void *dump_debug_file)
+{
+	if (dma_fence_is_array(fence)) {
+		struct dma_fence_array *fence_array = to_dma_fence_array(fence);
+		int i;
+
+		PVR_DUMPDEBUG_LOG(dump_debug_printf,
+				  dump_debug_file,
+				  "Fence: [%p] Sync Points:\n",
+				  fence_array);
+
+		for (i = 0; i < fence_array->num_fences; i++)
+			_dump_sync_point(fence_array->fences[i],
+					 dump_debug_printf,
+					 dump_debug_file);
+
+	} else {
+		_dump_sync_point(fence, dump_debug_printf, dump_debug_file);
+	}
+}
+
+enum PVRSRV_ERROR
+sync_dump_fence(void *sw_fence_obj,
+		DUMPDEBUG_PRINTF_FUNC *dump_debug_printf,
+		void *dump_debug_file)
+{
+	struct dma_fence *fence = (struct dma_fence *) sw_fence_obj;
+
+	_dump_fence(fence, dump_debug_printf, dump_debug_file);
+
+	return PVRSRV_OK;
+}
+
+enum PVRSRV_ERROR
+sync_sw_dump_timeline(void *sw_timeline_obj,
+		      DUMPDEBUG_PRINTF_FUNC *dump_debug_printf,
+		      void *dump_debug_file)
+{
+	pvr_counting_fence_timeline_dump_timeline(sw_timeline_obj,
+						  dump_debug_printf,
+						  dump_debug_file);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_uaccess.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_uaccess.h
new file mode 100644
index 0000000..05fcd12
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvr_uaccess.h
@@ -0,0 +1,99 @@
+/*************************************************************************/ /*!
+@File
+@Title          Utility functions for user space access
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __PVR_UACCESS_H__
+#define __PVR_UACCESS_H__
+
+#include <linux/uaccess.h>
+#include <linux/version.h>
+
+static inline unsigned long pvr_copy_to_user(void __user *pvTo, const void *pvFrom, unsigned long ulBytes)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0))
+	if (access_ok(VERIFY_WRITE, pvTo, ulBytes))
+#else
+	if (access_ok(pvTo, ulBytes))
+#endif
+	{
+		return __copy_to_user(pvTo, pvFrom, ulBytes);
+	}
+
+	return ulBytes;
+}
+
+
+#if defined(__KLOCWORK__)
+	/* this part is only to tell Klocwork not to report false positive because
+	   it doesn't understand that pvr_copy_from_user will initialise the memory
+	   pointed to by pvTo */
+#include <linux/string.h> /* get the memset prototype */
+static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes)
+{
+	if (pvTo != NULL)
+	{
+		memset(pvTo, 0xAA, ulBytes);
+		return 0;
+	}
+	return 1;
+}
+
+#else /* real implementation */
+
+static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes)
+{
+	/*
+	 * The compile time correctness checking introduced for copy_from_user in
+	 * Linux 2.6.33 isn't fully compatible with our usage of the function.
+	 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0))
+	if (access_ok(VERIFY_READ, pvFrom, ulBytes))
+#else
+	if (access_ok(pvFrom, ulBytes))
+#endif
+	{
+		return __copy_from_user(pvTo, pvFrom, ulBytes);
+	}
+
+	return ulBytes;
+}
+#endif /* klocworks */
+
+#endif /* __PVR_UACCESS_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrmodule.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrmodule.h
new file mode 100644
index 0000000..267c7b6
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrmodule.h
@@ -0,0 +1,48 @@
+/*************************************************************************/ /*!
+@Title          Module Author and License.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef	_PVRMODULE_H_
+#define	_PVRMODULE_H_
+
+MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
+MODULE_LICENSE("Dual MIT/GPL");
+
+#endif	/* _PVRMODULE_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv.c
new file mode 100644
index 0000000..6cc32ab
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv.c
@@ -0,0 +1,4078 @@
+/*************************************************************************/ /*!
+@File
+@Title          core services functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main APIs for core services functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "rgxdebug.h"
+#include "handle.h"
+#include "connection_server.h"
+#include "osconnection_server.h"
+#include "pdump_km.h"
+#include "ra.h"
+#include "allocmem.h"
+#include "pmr.h"
+#include "pvrsrv.h"
+#include "srvcore.h"
+#include "services_km.h"
+#include "pvrsrv_device.h"
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "sync.h"
+#include "sync_server.h"
+#include "sync_checkpoint.h"
+#include "sync_fallback_server.h"
+#include "sync_checkpoint_init.h"
+#include "devicemem.h"
+#include "cache_km.h"
+#include "info_page.h"
+#include "info_page_defs.h"
+#include "pvrsrv_bridge_init.h"
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+#include "devicemem_server.h"
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+#include "log2.h"
+
+#include "lists.h"
+#include "dllist.h"
+#include "syscommon.h"
+#include "sysvalidation.h"
+
+#include "physmem_lma.h"
+#include "physmem_osmem.h"
+#include "physmem_hostmem.h"
+
+#include "tlintern.h"
+#include "htbserver.h"
+
+#if defined(SUPPORT_RGX)
+#include "rgxinit.h"
+#include "rgxhwperf.h"
+#include "rgxfwutils.h"
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+#include "ri_server.h"
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	#if !defined(GPUVIRT_SIZEOF_ARENA0)
+		#define GPUVIRT_SIZEOF_ARENA0	64 * 1024 * 1024 //Giving 64 megs of LMA memory to arena 0 for firmware and other allocations
+	#endif
+#endif
+
+#include "devicemem_history_server.h"
+
+#if defined(PVR_DVFS)
+#include "pvr_dvfs_device.h"
+#endif
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "dc_server.h"
+#endif
+
+#include "rgx_options.h"
+#include "srvinit.h"
+#include "rgxutils.h"
+
+#include "oskm_apphint.h"
+#include "pvrsrv_apphint.h"
+
+#include "rgx_bvnc_defs_km.h"
+
+#include "pvrsrv_tlstreams.h"
+#include "tlstream.h"
+
+#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__)
+#include "physmem_test.h"
+#endif
+
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+#define INFINITE_SLEEP_TIMEOUT 0ULL
+#endif
+
+/*! Wait 100ms before retrying deferred clean-up again */
+#define CLEANUP_THREAD_WAIT_RETRY_TIMEOUT 100000ULL
+
+/*! Wait 8hrs when no deferred clean-up required. Allows a poll several times
+ * a day to check for any missed clean-up. */
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+#define CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT INFINITE_SLEEP_TIMEOUT
+#else
+#define CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT 28800000000ULL
+#endif
+
+#if !defined(EMULATOR) && !defined(VIRTUAL_PLATFORM)
+#if defined(PVRSRV_STALLED_CCB_ACTION) && (DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT > 5000)
+/* Warn if DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT is too large for SLR to be effective. */
+#warning The value defined for DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT is too large for Sync Lockup Recovery(SLR) to be effective. Please refer to the System Porting Guide.
+#endif
+#endif
+
+/*! When unloading try a few times to free everything remaining on the list */
+#define CLEANUP_THREAD_UNLOAD_RETRY 4
+
+#define PVRSRV_PROC_HANDLE_BASE_INIT 10
+
+#define PVRSRV_TL_CTLR_STREAM_SIZE 4096
+
+static PVRSRV_DATA	*gpsPVRSRVData;
+static IMG_UINT32 g_ui32InitFlags;
+
+/* mark which parts of Services were initialised */
+#define		INIT_DATA_ENABLE_PDUMPINIT	0x1U
+
+static IMG_UINT32 g_aui32DebugOrderTable[] = {
+	DEBUG_REQUEST_SYS,
+	DEBUG_REQUEST_APPHINT,
+	DEBUG_REQUEST_HTB,
+	DEBUG_REQUEST_DC,
+	DEBUG_REQUEST_SYNCCHECKPOINT,
+	DEBUG_REQUEST_SERVERSYNC,
+	DEBUG_REQUEST_ANDROIDSYNC,
+	DEBUG_REQUEST_FALLBACKSYNC,
+	DEBUG_REQUEST_LINUXFENCE
+};
+
+static PVRSRV_ERROR _VzDeviceCreate(PVRSRV_DEVICE_NODE *psDeviceNode);
+static void _VzDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode);
+static PVRSRV_ERROR _VzConstructRAforFwHeap(RA_ARENA **ppsArena, IMG_CHAR *szName,
+											IMG_UINT64 uBase, RA_LENGTH_T uSize);
+static void _VzTearDownRAforFwHeap(RA_ARENA **ppsArena, IMG_UINT64 uBase);
+
+/* Callback to dump info of cleanup thread in debug_dump */
+static void CleanupThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf,
+                                  void *pvDumpDebugFile)
+{
+	PVRSRV_DATA *psPVRSRVData;
+	psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	PVR_DUMPDEBUG_LOG("    Number of deferred cleanup items : %u",
+			  OSAtomicRead(&psPVRSRVData->i32NumCleanupItems));
+}
+
+/* Add work to the cleanup thread work list.
+ * The work item will be executed by the cleanup thread
+ */
+void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData)
+{
+	PVRSRV_DATA *psPVRSRVData;
+	PVRSRV_ERROR eError;
+
+	psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	PVR_ASSERT(psData != NULL);
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK || psPVRSRVData->bUnload)
+#else
+	if (psPVRSRVData->bUnload)
+#endif
+	{
+		CLEANUP_THREAD_FN pfnFree = psData->pfnFree;
+
+		PVR_DPF((PVR_DBG_MESSAGE, "Cleanup thread has already quit: doing work immediately"));
+
+		eError = pfnFree(psData->pvData);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to free resource "
+						"(callback " IMG_PFN_FMTSPEC "). "
+						"Immediate free will not be retried.",
+						pfnFree));
+		}
+	}
+	else
+	{
+		/* add this work item to the list */
+		OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+		dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, &psData->sNode);
+		OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+
+		OSAtomicIncrement(&psPVRSRVData->i32NumCleanupItems);
+
+		/* signal the cleanup thread to ensure this item gets processed */
+		eError = OSEventObjectSignal(psPVRSRVData->hCleanupEventObject);
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+	}
+}
+
+/* Pop an item from the head of the cleanup thread work list */
+static INLINE DLLIST_NODE *_CleanupThreadWorkListPop(PVRSRV_DATA *psPVRSRVData)
+{
+	DLLIST_NODE *psNode;
+
+	OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+	psNode = dllist_get_next_node(&psPVRSRVData->sCleanupThreadWorkList);
+	if (psNode != NULL)
+	{
+		dllist_remove_node(psNode);
+	}
+	OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+
+	return psNode;
+}
+
+/* Process the cleanup thread work list */
+static IMG_BOOL _CleanupThreadProcessWorkList(PVRSRV_DATA *psPVRSRVData,
+                                              IMG_BOOL *pbUseGlobalEO)
+{
+	DLLIST_NODE *psNodeIter, *psNodeLast;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bNeedRetry = IMG_FALSE;
+
+	/* any callback functions which return error will be
+	 * moved to the back of the list, and additional items can be added
+	 * to the list at any time so we ensure we only iterate from the
+	 * head of the list to the current tail (since the tail may always
+	 * be changing)
+	 */
+
+	OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+	psNodeLast = psPVRSRVData->sCleanupThreadWorkList.psPrevNode;
+	OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+
+	do
+	{
+		PVRSRV_CLEANUP_THREAD_WORK *psData;
+
+		psNodeIter = _CleanupThreadWorkListPop(psPVRSRVData);
+
+		if (psNodeIter != NULL)
+		{
+			CLEANUP_THREAD_FN pfnFree;
+
+			psData = IMG_CONTAINER_OF(psNodeIter, PVRSRV_CLEANUP_THREAD_WORK, sNode);
+
+			/* get the function pointer address here so we have access to it
+			 * in order to report the error in case of failure, without having
+			 * to depend on psData not having been freed
+			 */
+			pfnFree = psData->pfnFree;
+
+			*pbUseGlobalEO = psData->bDependsOnHW;
+			eError = pfnFree(psData->pvData);
+
+			if (eError != PVRSRV_OK)
+			{
+				/* move to back of the list, if this item's
+				 * retry count hasn't hit zero.
+				 */
+				if (CLEANUP_THREAD_IS_RETRY_TIMEOUT(psData))
+				{
+					if (CLEANUP_THREAD_RETRY_TIMEOUT_REACHED(psData))
+					{
+						bNeedRetry = IMG_TRUE;
+					}
+				}
+				else
+				{
+					if (psData->ui32RetryCount-- > 0)
+					{
+						bNeedRetry = IMG_TRUE;
+					}
+				}
+
+				if (bNeedRetry)
+				{
+					OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+					dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, psNodeIter);
+					OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "Failed to free resource "
+								"(callback " IMG_PFN_FMTSPEC "). "
+								"Retry limit reached",
+								pfnFree));
+				}
+			}
+			else
+			{
+				OSAtomicDecrement(&psPVRSRVData->i32NumCleanupItems);
+			}
+		}
+	} while ((psNodeIter != NULL) && (psNodeIter != psNodeLast));
+
+	return bNeedRetry;
+}
+
+// #define CLEANUP_DPFL PVR_DBG_WARNING
+#define CLEANUP_DPFL    PVR_DBG_MESSAGE
+
+/* Create/initialise data required by the cleanup thread,
+ * before the cleanup thread is started
+ */
+static PVRSRV_ERROR _CleanupThreadPrepare(PVRSRV_DATA *psPVRSRVData)
+{
+	PVRSRV_ERROR eError;
+
+	/* Create the clean up event object */
+
+	eError = OSEventObjectCreate("PVRSRV_CLEANUP_EVENTOBJECT", &gpsPVRSRVData->hCleanupEventObject);
+	PVR_LOGG_IF_ERROR(eError, "OSEventObjectCreate", Exit);
+
+	/* initialise the mutex and linked list required for the cleanup thread work list */
+
+	eError = OSLockCreate(&psPVRSRVData->hCleanupThreadWorkListLock);
+	PVR_LOGG_IF_ERROR(eError, "OSLockCreate", Exit);
+
+	dllist_init(&psPVRSRVData->sCleanupThreadWorkList);
+
+Exit:
+	return eError;
+}
+
+static void CleanupThread(void *pvData)
+{
+	PVRSRV_DATA *psPVRSRVData = pvData;
+	IMG_BOOL     bRetryWorkList = IMG_FALSE;
+	IMG_HANDLE	 hGlobalEvent;
+	IMG_HANDLE	 hOSEvent;
+	PVRSRV_ERROR eRc;
+	IMG_BOOL bUseGlobalEO = IMG_FALSE;
+	IMG_UINT32 uiUnloadRetry = 0;
+
+	/* Store the process id (pid) of the clean-up thread */
+	psPVRSRVData->cleanupThreadPid = OSGetCurrentProcessID();
+	OSAtomicWrite(&psPVRSRVData->i32NumCleanupItems, 0);
+
+	PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread starting... "));
+
+	/* Open an event on the clean up event object so we can listen on it,
+	 * abort the clean up thread and driver if this fails.
+	 */
+	eRc = OSEventObjectOpen(psPVRSRVData->hCleanupEventObject, &hOSEvent);
+	PVR_ASSERT(eRc == PVRSRV_OK);
+
+	eRc = OSEventObjectOpen(psPVRSRVData->hGlobalEventObject, &hGlobalEvent);
+	PVR_ASSERT(eRc == PVRSRV_OK);
+
+	/* While the driver is in a good state and is not being unloaded
+	 * try to free any deferred items when signalled
+	 */
+	while (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK)
+	{
+		IMG_HANDLE hEvent;
+
+		if (psPVRSRVData->bUnload)
+		{
+			if (dllist_is_empty(&psPVRSRVData->sCleanupThreadWorkList) ||
+					uiUnloadRetry > CLEANUP_THREAD_UNLOAD_RETRY)
+			{
+				break;
+			}
+			uiUnloadRetry++;
+		}
+
+		/* Wait until signalled for deferred clean up OR wait for a
+		 * short period if the previous deferred clean up was not able
+		 * to release all the resources before trying again.
+		 * Bridge lock re-acquired on our behalf before the wait call returns.
+		 */
+
+		if (bRetryWorkList && bUseGlobalEO)
+		{
+			hEvent = hGlobalEvent;
+		}
+		else
+		{
+			hEvent = hOSEvent;
+		}
+
+		eRc = OSEventObjectWaitKernel(hEvent,
+				bRetryWorkList ?
+				CLEANUP_THREAD_WAIT_RETRY_TIMEOUT :
+				CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT);
+		if (eRc == PVRSRV_ERROR_TIMEOUT)
+		{
+			PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait timeout"));
+		}
+		else if (eRc == PVRSRV_OK)
+		{
+			PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait OK, signal received"));
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "CleanupThread: wait error %d", eRc));
+		}
+
+		bRetryWorkList = _CleanupThreadProcessWorkList(psPVRSRVData, &bUseGlobalEO);
+	}
+
+	OSLockDestroy(psPVRSRVData->hCleanupThreadWorkListLock);
+
+	eRc = OSEventObjectClose(hOSEvent);
+	PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose");
+
+	eRc = OSEventObjectClose(hGlobalEvent);
+	PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose");
+
+	PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread ending... "));
+}
+
+static IMG_BOOL DevicesWatchdogThread_Powered_Any(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_DEV_POWER_STATE ePowerState = PVRSRV_DEV_POWER_STATE_ON;
+	PVRSRV_ERROR eError;
+
+	eError = PVRSRVPowerLock(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		if (eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)
+		{
+			/* Power lock cannot be acquired at this time (sys power is off) */
+			return IMG_FALSE;
+		}
+
+		/* Any other error is unexpected so we assume the device is on */
+		PVR_DPF((PVR_DBG_ERROR,
+				 "DevicesWatchdogThread: Failed to acquire power lock for device %p (%s)",
+				 psDeviceNode, PVRSRVGetErrorString(eError)));
+		return IMG_TRUE;
+	}
+
+	(void) PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+	PVRSRVPowerUnlock(psDeviceNode);
+
+	return (ePowerState == PVRSRV_DEV_POWER_STATE_ON) ? IMG_TRUE : IMG_FALSE;
+}
+
+static void DevicesWatchdogThread_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode,
+											  va_list va)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+	PVRSRV_DEVICE_HEALTH_STATUS *pePreviousHealthStatus, eHealthStatus;
+	PVRSRV_ERROR eError;
+
+	pePreviousHealthStatus = va_arg(va, PVRSRV_DEVICE_HEALTH_STATUS *);
+
+	if (psDeviceNode->pfnUpdateHealthStatus != NULL)
+	{
+		eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, IMG_TRUE);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "DevicesWatchdogThread: "
+					 "Could not check for fatal error (%d)!",
+					 eError));
+		}
+	}
+	eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus);
+
+	if (eHealthStatus != PVRSRV_DEVICE_HEALTH_STATUS_OK)
+	{
+		if (eHealthStatus != *pePreviousHealthStatus)
+		{
+			if (!(psDevInfo->ui32DeviceFlags &
+				  RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN))
+			{
+				PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: "
+						 "Device status not OK!!!"));
+				PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX,
+								   NULL, NULL);
+			}
+		}
+	}
+
+	*pePreviousHealthStatus = eHealthStatus;
+}
+
+#if defined(SUPPORT_RGX)
+static void HWPerfPeriodicHostEventsThread(void *pvData)
+{
+	PVRSRV_DATA *psPVRSRVData = pvData;
+	IMG_HANDLE hOSEvent;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bHostStreamIsOpenForReading;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	eError = OSEventObjectOpen(psPVRSRVData->hHWPerfHostPeriodicEvObj, &hOSEvent);
+	PVR_LOGRN_IF_ERROR(eError, "OSEventObjectOpen");
+
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) &&
+			!psPVRSRVData->bUnload && !psPVRSRVData->bHWPerfHostThreadStop)
+#else
+	while (!psPVRSRVData->bUnload && !psPVRSRVData->bHWPerfHostThreadStop)
+#endif
+	{
+		eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64)psPVRSRVData->ui32HWPerfHostThreadTimeout * 1000);
+		if (eError == PVRSRV_OK && (psPVRSRVData->bUnload || psPVRSRVData->bHWPerfHostThreadStop))
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "HWPerfPeriodicHostEventsThread: Shutdown event received."));
+			break;
+		}
+
+		psDevInfo = (PVRSRV_RGXDEV_INFO*)psPVRSRVData->psDeviceNodeList->pvDevice;
+
+		/* Check if the HWPerf host stream is open for reading before writing a packet,
+		   this covers cases where the event filter is not zeroed before a reader disconnects. */
+		bHostStreamIsOpenForReading = TLStreamIsOpenForReading(psDevInfo->hHWPerfHostStream);
+
+		if (bHostStreamIsOpenForReading)
+		{
+#if defined(SUPPORT_RGX)
+			RGXSRV_HWPERF_HOST_INFO(psPVRSRVData->psDeviceNodeList->pvDevice, RGX_HWPERF_INFO_EV_MEM_USAGE);
+#endif
+		}
+		else
+		{
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+			psPVRSRVData->ui32HWPerfHostThreadTimeout = INFINITE_SLEEP_TIMEOUT;
+#else
+			/* This 'long' timeout is temporary until functionality is added to services to put a thread to sleep indefinitely. */
+			psPVRSRVData->ui32HWPerfHostThreadTimeout = 60 * 60 * 8 * 1000; // 8 hours
+#endif
+		}
+	}
+
+	eError = OSEventObjectClose(hOSEvent);
+	PVR_LOG_IF_ERROR(eError, "OSEventObjectClose");
+}
+#endif
+
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+
+typedef enum
+{
+	DWT_ST_INIT,
+	DWT_ST_SLEEP_POWERON,
+	DWT_ST_SLEEP_POWEROFF,
+	DWT_ST_SLEEP_DEFERRED,
+	DWT_ST_FINAL
+} DWT_STATE;
+
+typedef enum
+{
+	DWT_SIG_POWERON,
+	DWT_SIG_POWEROFF,
+	DWT_SIG_TIMEOUT,
+	DWT_SIG_UNLOAD,
+	DWT_SIG_ERROR
+} DWT_SIGNAL;
+
+static inline IMG_BOOL _DwtIsPowerOn(PVRSRV_DATA *psPVRSRVData)
+{
+	return List_PVRSRV_DEVICE_NODE_IMG_BOOL_Any(psPVRSRVData->psDeviceNodeList,
+	                                         DevicesWatchdogThread_Powered_Any);
+}
+
+static inline void _DwtCheckHealthStatus(PVRSRV_DATA *psPVRSRVData,
+                                         PVRSRV_DEVICE_HEALTH_STATUS *peStatus)
+{
+	List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList,
+	                                   DevicesWatchdogThread_ForEachVaCb,
+	                                   peStatus);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR)
+	SysPrintAndResetFaultStatusRegister();
+#endif
+}
+
+static DWT_SIGNAL _DwtWait(PVRSRV_DATA *psPVRSRVData, IMG_HANDLE hOSEvent,
+                           IMG_UINT32 ui32Timeout)
+{
+	PVRSRV_ERROR eError;
+
+	eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64) ui32Timeout * 1000);
+
+#ifdef PVR_TESTING_UTILS
+	psPVRSRVData->ui32DevicesWdWakeupCounter++;
+#endif
+
+	if (eError == PVRSRV_OK)
+	{
+		if (psPVRSRVData->bUnload)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Shutdown event"
+			        " received."));
+			return DWT_SIG_UNLOAD;
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power state "
+			        "change event received."));
+
+			if (_DwtIsPowerOn(psPVRSRVData))
+			{
+				return DWT_SIG_POWERON;
+			}
+			else
+			{
+				return DWT_SIG_POWEROFF;
+			}
+		}
+	}
+	else if (eError == PVRSRV_ERROR_TIMEOUT)
+	{
+		return DWT_SIG_TIMEOUT;
+	}
+
+	PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: Error (%d) when"
+	        " waiting for event!", eError));
+	return DWT_SIG_ERROR;
+}
+
+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
+
+static void DevicesWatchdogThread(void *pvData)
+{
+	PVRSRV_DATA *psPVRSRVData = pvData;
+	PVRSRV_DEVICE_HEALTH_STATUS ePreviousHealthStatus = PVRSRV_DEVICE_HEALTH_STATUS_OK;
+	IMG_HANDLE hOSEvent;
+	PVRSRV_ERROR eError;
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+	DWT_STATE eState = DWT_ST_INIT;
+	const IMG_UINT32 ui32OnTimeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT;
+	const IMG_UINT32 ui32OffTimeout = INFINITE_SLEEP_TIMEOUT;
+#else
+	IMG_UINT32 ui32Timeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT;
+	/* Flag used to defer the sleep timeout change by 1 loop iteration.
+	 * This helps to ensure at least two health checks are performed before a long sleep.
+	 */
+	IMG_BOOL bDoDeferredTimeoutChange = IMG_FALSE;
+#endif
+
+	PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power off sleep time: %d.",
+			DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT));
+
+	/* Open an event on the devices watchdog event object so we can listen on it
+	   and abort the devices watchdog thread. */
+	eError = OSEventObjectOpen(psPVRSRVData->hDevicesWatchdogEvObj, &hOSEvent);
+	PVR_LOGRN_IF_ERROR(eError, "OSEventObjectOpen");
+
+	/* Loop continuously checking the device status every few seconds. */
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) &&
+			!psPVRSRVData->bUnload)
+#else
+	while (!psPVRSRVData->bUnload)
+#endif
+	{
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+
+		switch (eState)
+		{
+			case DWT_ST_INIT:
+			{
+				if (_DwtIsPowerOn(psPVRSRVData))
+				{
+					eState = DWT_ST_SLEEP_POWERON;
+				}
+				else
+				{
+					eState = DWT_ST_SLEEP_POWEROFF;
+				}
+
+				break;
+			}
+			case DWT_ST_SLEEP_POWERON:
+			{
+				DWT_SIGNAL eSignal = _DwtWait(psPVRSRVData, hOSEvent,
+				                                    ui32OnTimeout);
+
+				switch (eSignal) {
+					case DWT_SIG_POWERON:
+						/* self-transition, nothing to do */
+						break;
+					case DWT_SIG_POWEROFF:
+						eState = DWT_ST_SLEEP_DEFERRED;
+						break;
+					case DWT_SIG_TIMEOUT:
+						_DwtCheckHealthStatus(psPVRSRVData,
+						                      &ePreviousHealthStatus);
+						/* self-transition */
+						break;
+					case DWT_SIG_UNLOAD:
+						eState = DWT_ST_FINAL;
+						break;
+					case DWT_SIG_ERROR:
+						/* deliberately ignored */
+						break;
+				}
+
+				break;
+			}
+			case DWT_ST_SLEEP_POWEROFF:
+			{
+				DWT_SIGNAL eSignal = _DwtWait(psPVRSRVData, hOSEvent,
+				                                    ui32OffTimeout);
+
+				switch (eSignal) {
+					case DWT_SIG_POWERON:
+						eState = DWT_ST_SLEEP_POWERON;
+						_DwtCheckHealthStatus(psPVRSRVData,
+						                      &ePreviousHealthStatus);
+						break;
+					case DWT_SIG_POWEROFF:
+						/* self-transition, nothing to do */
+						break;
+					case DWT_SIG_TIMEOUT:
+						/* self-transition */
+						_DwtCheckHealthStatus(psPVRSRVData,
+						                      &ePreviousHealthStatus);
+						break;
+					case DWT_SIG_UNLOAD:
+						eState = DWT_ST_FINAL;
+						break;
+					case DWT_SIG_ERROR:
+						/* deliberately ignored */
+						break;
+				}
+
+				break;
+			}
+			case DWT_ST_SLEEP_DEFERRED:
+			{
+				DWT_SIGNAL eSignal =_DwtWait(psPVRSRVData, hOSEvent,
+				                                   ui32OnTimeout);
+
+				switch (eSignal) {
+					case DWT_SIG_POWERON:
+						eState = DWT_ST_SLEEP_POWERON;
+						_DwtCheckHealthStatus(psPVRSRVData,
+						                      &ePreviousHealthStatus);
+						break;
+					case DWT_SIG_POWEROFF:
+						/* self-transition, nothing to do */
+						break;
+					case DWT_SIG_TIMEOUT:
+						eState = DWT_ST_SLEEP_POWEROFF;
+						_DwtCheckHealthStatus(psPVRSRVData,
+						                      &ePreviousHealthStatus);
+						break;
+					case DWT_SIG_UNLOAD:
+						eState = DWT_ST_FINAL;
+						break;
+					case DWT_SIG_ERROR:
+						/* deliberately ignored */
+						break;
+				}
+
+				break;
+			}
+			case DWT_ST_FINAL:
+				/* the loop should terminate on next spin if this state is
+				 * reached so nothing to do here. */
+				break;
+		}
+
+#else /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
+		IMG_BOOL bPwrIsOn = IMG_FALSE;
+
+		/* Wait time between polls (done at the start of the loop to allow devices
+		   to initialise) or for the event signal (shutdown or power on). */
+		eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64)ui32Timeout * 1000);
+
+#ifdef PVR_TESTING_UTILS
+		psPVRSRVData->ui32DevicesWdWakeupCounter++;
+#endif
+		if (eError == PVRSRV_OK)
+		{
+			if (psPVRSRVData->bUnload)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Shutdown event received."));
+				break;
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power state change event received."));
+			}
+		}
+		else if (eError != PVRSRV_ERROR_TIMEOUT)
+		{
+			/* If timeout do nothing otherwise print warning message. */
+			PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: "
+					"Error (%d) when waiting for event!", eError));
+		}
+
+		bPwrIsOn = List_PVRSRV_DEVICE_NODE_IMG_BOOL_Any(psPVRSRVData->psDeviceNodeList,
+														DevicesWatchdogThread_Powered_Any);
+
+		if (bPwrIsOn || psPVRSRVData->ui32DevicesWatchdogPwrTrans)
+		{
+			psPVRSRVData->ui32DevicesWatchdogPwrTrans = 0;
+			ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT;
+			bDoDeferredTimeoutChange = IMG_FALSE;
+		}
+		else
+		{
+			/* First, check if the previous loop iteration signalled a need to change the timeout period */
+			if (bDoDeferredTimeoutChange)
+			{
+				ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT;
+				bDoDeferredTimeoutChange = IMG_FALSE;
+			}
+			else
+			{
+				/* Signal that we need to change the sleep timeout in the next loop iteration
+				 * to allow the device health check code a further iteration at the current
+				 * sleep timeout in order to determine bad health (e.g. stalled cCCB) by
+				 * comparing past and current state snapshots */
+				bDoDeferredTimeoutChange = IMG_TRUE;
+			}
+		}
+
+		List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList,
+										   DevicesWatchdogThread_ForEachVaCb,
+										   &ePreviousHealthStatus);
+
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR)
+		SysPrintAndResetFaultStatusRegister();
+#endif
+
+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
+	}
+
+	eError = OSEventObjectClose(hOSEvent);
+	PVR_LOG_IF_ERROR(eError, "OSEventObjectClose");
+}
+
+PVRSRV_DATA *PVRSRVGetPVRSRVData(void)
+{
+	return gpsPVRSRVData;
+}
+
+static PVRSRV_ERROR _HostMemDeviceCreate(void)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+	PVRSRV_DEVICE_CONFIG *psDevConfig = HostMemGetDeviceConfig();
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	/* Assert ensures HostMemory device isn't already created and
+	 * that data is initialized */
+	PVR_ASSERT(psPVRSRVData->psHostMemDeviceNode == NULL);
+
+	/* for now, we only know a single heap (UMA) config for host device */
+	PVR_ASSERT(psDevConfig->ui32PhysHeapCount == 1 &&
+				psDevConfig->pasPhysHeaps[0].eType == PHYS_HEAP_TYPE_UMA);
+
+	/* N.B.- In case of any failures in this function, we just return error to
+	   the caller, as clean-up is taken care by _HostMemDeviceDestroy function */
+
+	psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode));
+	PVR_LOGR_IF_NOMEM(psDeviceNode, "OSAllocZMem");
+
+	/* early save return pointer to aid clean-up */
+	psPVRSRVData->psHostMemDeviceNode = psDeviceNode;
+
+	psDeviceNode->psDevConfig = psDevConfig;
+	psDeviceNode->papsRegisteredPhysHeaps =
+		OSAllocZMem(sizeof(*psDeviceNode->papsRegisteredPhysHeaps) *
+					psDevConfig->ui32PhysHeapCount);
+	PVR_LOGR_IF_NOMEM(psDeviceNode->papsRegisteredPhysHeaps, "OSAllocZMem");
+
+	eError = PhysHeapRegister(&psDevConfig->pasPhysHeaps[0],
+								  &psDeviceNode->papsRegisteredPhysHeaps[0]);
+	PVR_LOGR_IF_ERROR(eError, "PhysHeapRegister");
+	psDeviceNode->ui32RegisteredPhysHeaps = 1;
+
+	/* Only CPU local heap is valid on host-mem DevNode, so enable minimal callbacks */
+	eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL],
+							 &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]);
+	PVR_LOGR_IF_ERROR(eError, "PhysHeapAcquire");
+
+	psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = PhysmemNewOSRamBackedPMR;
+
+	return PVRSRV_OK;
+}
+
+static void _HostMemDeviceDestroy(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->psHostMemDeviceNode;
+
+	if (!psDeviceNode)
+	{
+		return;
+	}
+
+	psPVRSRVData->psHostMemDeviceNode = NULL;
+	if (psDeviceNode->papsRegisteredPhysHeaps)
+	{
+		if (psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL])
+		{
+			PhysHeapRelease(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]);
+		}
+
+		if (psDeviceNode->papsRegisteredPhysHeaps[0])
+		{
+			/* clean-up function as well is aware of only one heap */
+			PVR_ASSERT(psDeviceNode->ui32RegisteredPhysHeaps == 1);
+			PhysHeapUnregister(psDeviceNode->papsRegisteredPhysHeaps[0]);
+		}
+
+		OSFreeMem(psDeviceNode->papsRegisteredPhysHeaps);
+	}
+	OSFreeMem(psDeviceNode);
+}
+
+static PVRSRV_ERROR InitialiseInfoPageTimeouts(PVRSRV_DATA *psPVRSRVData)
+{
+	if (NULL == psPVRSRVData)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_VALUE_RETRIES] = WAIT_TRY_COUNT;
+	psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_VALUE_TIMEOUT_MS] =
+		((MAX_HW_TIME_US / 10000) + 1000);
+		/* TIMEOUT_INFO_VALUE_TIMEOUT_MS resolves to...
+			vp       : 2000  + 1000
+			emu      : 2000  + 1000
+			rgx_nohw : 50    + 1000
+			plato    : 30000 + 1000 (VIRTUAL_PLATFORM or EMULATOR)
+			           50    + 1000 (otherwise)
+		*/
+
+	psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_CONDITION_RETRIES] = 5;
+	psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_CONDITION_TIMEOUT_MS] =
+		((MAX_HW_TIME_US / 10000) + 100);
+		/* TIMEOUT_INFO_CONDITION_TIMEOUT_MS resolves to...
+			vp       : 2000  + 100
+			emu      : 2000  + 100
+			rgx_nohw : 50    + 100
+			plato    : 30000 + 100 (VIRTUAL_PLATFORM or EMULATOR)
+			           50    + 100 (otherwise)
+		*/
+
+	psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_EVENT_OBJECT_RETRIES] = 5;
+	psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_EVENT_OBJECT_TIMEOUT_MS] =
+		((MAX_HW_TIME_US / 10000) + 100);
+		/* TIMEOUT_INFO_EVENT_OBJECT_TIMEOUT_MS resolves to...
+			vp       : 2000  + 100
+			emu      : 2000  + 100
+			rgx_nohw : 50    + 100
+			plato    : 30000 + 100 (VIRTUAL_PLATFORM or EMULATOR)
+			           50    + 100 (otherwise)
+		*/
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR IMG_CALLCONV
+PVRSRVDriverInit(void)
+{
+	PVRSRV_ERROR eError;
+
+	PVRSRV_DATA	*psPVRSRVData = NULL;
+
+	IMG_UINT32 ui32AppHintCleanupThreadPriority;
+	IMG_UINT32 ui32AppHintWatchdogThreadPriority;
+	IMG_BOOL bEnablePageFaultDebug;
+	IMG_BOOL bEnableFullSyncTracking;
+
+	void *pvAppHintState = NULL;
+	IMG_UINT32 ui32AppHintDefault;
+
+	/*
+	 * As this function performs one time driver initialisation, use the
+	 * Services global device-independent data to determine whether or not
+	 * this function has already been called.
+	 */
+	if (gpsPVRSRVData)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Driver already initialised", __func__));
+		return PVRSRV_ERROR_ALREADY_EXISTS;
+	}
+
+	/*
+	 * Initialise the server bridges
+	 */
+	eError = ServerBridgeInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	eError = PhysHeapInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	eError = DevmemIntInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	/*
+	 * Allocate the device-independent data
+	 */
+	psPVRSRVData = OSAllocZMem(sizeof(*gpsPVRSRVData));
+	if (psPVRSRVData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto Error;
+	}
+
+	/* Now it is set up, point gpsPVRSRVData to the actual data */
+	gpsPVRSRVData = psPVRSRVData;
+
+	eError = BridgeDispatcherInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	/* Init any OS specific's */
+	eError = OSInitEnvData();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	/* Early init. server cache maintenance */
+	eError = CacheOpInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	RIInitKM();
+#endif
+
+	ui32AppHintDefault = PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG;
+
+	OSCreateKMAppHintState(&pvAppHintState);
+	OSGetKMAppHintBOOL(pvAppHintState, EnablePageFaultDebug,
+			&ui32AppHintDefault, &bEnablePageFaultDebug);
+	OSFreeKMAppHintState(pvAppHintState);
+
+	if (bEnablePageFaultDebug)
+	{
+		eError = DevicememHistoryInitKM();
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+						"%s: Failed to initialise DevicememHistoryInitKM", __func__));
+			goto Error;
+		}
+	}
+
+	eError = PMRInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+	eError = DCInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+#endif
+
+	/* Initialise overall system state */
+	gpsPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_OK;
+
+	/* Create an event object */
+	eError = OSEventObjectCreate("PVRSRV_GLOBAL_EVENTOBJECT", &gpsPVRSRVData->hGlobalEventObject);
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+	gpsPVRSRVData->ui32GEOConsecutiveTimeouts = 0;
+
+	eError = PVRSRVCmdCompleteInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	eError = PVRSRVHandleInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	OSCreateKMAppHintState(&pvAppHintState);
+	ui32AppHintDefault = PVRSRV_APPHINT_CLEANUPTHREADPRIORITY;
+	OSGetKMAppHintUINT32(pvAppHintState, CleanupThreadPriority,
+	                     &ui32AppHintDefault, &ui32AppHintCleanupThreadPriority);
+
+	ui32AppHintDefault = PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY;
+	OSGetKMAppHintUINT32(pvAppHintState, WatchdogThreadPriority,
+	                     &ui32AppHintDefault, &ui32AppHintWatchdogThreadPriority);
+
+	ui32AppHintDefault = PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING;
+	OSGetKMAppHintBOOL(pvAppHintState, EnableFullSyncTracking,
+			&ui32AppHintDefault, &bEnableFullSyncTracking);
+	OSFreeKMAppHintState(pvAppHintState);
+	pvAppHintState = NULL;
+
+	eError = _CleanupThreadPrepare(gpsPVRSRVData);
+	PVR_LOGG_IF_ERROR(eError, "_CleanupThreadPrepare", Error);
+
+	/* Create a thread which is used to do the deferred cleanup */
+	eError = OSThreadCreatePriority(&gpsPVRSRVData->hCleanupThread,
+	                                "pvr_defer_free",
+	                                CleanupThread,
+	                                CleanupThreadDumpInfo,
+	                                IMG_TRUE,
+	                                gpsPVRSRVData,
+	                                ui32AppHintCleanupThreadPriority);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create deferred cleanup thread",
+				 __func__));
+		goto Error;
+	}
+
+	/* Create the devices watchdog event object */
+	eError = OSEventObjectCreate("PVRSRV_DEVICESWATCHDOG_EVENTOBJECT", &gpsPVRSRVData->hDevicesWatchdogEvObj);
+	PVR_LOGG_IF_ERROR(eError, "OSEventObjectCreate", Error);
+
+	/* Create a thread which is used to detect fatal errors */
+	eError = OSThreadCreatePriority(&gpsPVRSRVData->hDevicesWatchdogThread,
+	                                "pvr_device_wdg",
+	                                DevicesWatchdogThread,
+	                                NULL,
+	                                IMG_TRUE,
+	                                gpsPVRSRVData,
+	                                ui32AppHintWatchdogThreadPriority);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create devices watchdog thread",
+				 __func__));
+		goto Error;
+	}
+
+	gpsPVRSRVData->psProcessHandleBase_Table = HASH_Create(PVRSRV_PROC_HANDLE_BASE_INIT);
+
+	if (gpsPVRSRVData->psProcessHandleBase_Table == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to create hash table for process handle base.",
+				__func__));
+		eError = PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE;
+		goto Error;
+	}
+
+	eError = OSLockCreate(&gpsPVRSRVData->hProcessHandleBase_Lock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to create lock for process handle base.",
+				__func__));
+		goto Error;
+	}
+
+#if defined(SUPPORT_RGX)
+	eError = OSLockCreate(&gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to create lock for HWPerf periodic thread.",
+				__func__));
+		goto Error;
+	}
+#endif
+
+	eError = _HostMemDeviceCreate();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	/* Initialise the Transport Layer */
+	eError = TLInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	/* Initialise pdump */
+	eError = PDUMPINIT();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	g_ui32InitFlags |= INIT_DATA_ENABLE_PDUMPINIT;
+
+	/* Initialise TL control stream */
+	eError = TLStreamCreate(&psPVRSRVData->hTLCtrlStream,
+	                        psPVRSRVData->psHostMemDeviceNode,
+	                        PVRSRV_TL_CTLR_STREAM, PVRSRV_TL_CTLR_STREAM_SIZE,
+	                        TL_OPMODE_DROP_OLDEST, NULL, NULL, NULL,
+                            NULL);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to create TL control plane stream"
+		        " (%d).", eError));
+		psPVRSRVData->hTLCtrlStream = NULL;
+	}
+
+	eError = InfoPageCreate(psPVRSRVData);
+	PVR_LOGG_IF_ERROR(eError, "InfoPageCreate", Error);
+
+
+	/* Initialise the Timeout Info */
+	eError = InitialiseInfoPageTimeouts(psPVRSRVData);
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	if (bEnableFullSyncTracking)
+	{
+		psPVRSRVData->pui32InfoPage[DEBUG_FEATURE_FLAGS] |= DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED;
+	}
+	if (bEnablePageFaultDebug)
+	{
+		psPVRSRVData->pui32InfoPage[DEBUG_FEATURE_FLAGS] |= DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED;
+	}
+
+	/* Initialise the Host Trace Buffer */
+	eError = HTBInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+#if defined(SUPPORT_RGX)
+	RGXHWPerfClientInitAppHintCallbacks();
+#endif
+
+	/* Late init. client cache maintenance via info. page */
+	eError = CacheOpInit2();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed to initialise the CacheOp framework (%d)",
+				__func__, eError));
+		goto Error;
+	}
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	eError = ServerSyncInitOnce(psPVRSRVData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to initialise sync server",
+				__func__));
+		goto Error;
+	}
+#endif
+
+	dllist_init(&psPVRSRVData->sConnections);
+	eError = OSLockCreate(&psPVRSRVData->hConnectionsLock);
+	PVR_LOGG_IF_ERROR(eError, "OSLockCreate", Error);
+
+	return 0;
+
+Error:
+	PVRSRVDriverDeInit();
+	return eError;
+}
+
+void IMG_CALLCONV
+PVRSRVDriverDeInit(void)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_BOOL bEnablePageFaultDebug;
+
+	if (gpsPVRSRVData == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: missing device-independent data",
+				 __func__));
+		return;
+	}
+
+	bEnablePageFaultDebug = GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED;
+
+	gpsPVRSRVData->bUnload = IMG_TRUE;
+
+	if (gpsPVRSRVData->hProcessHandleBase_Lock)
+	{
+		OSLockDestroy(gpsPVRSRVData->hProcessHandleBase_Lock);
+		gpsPVRSRVData->hProcessHandleBase_Lock = NULL;
+	}
+
+#if defined(SUPPORT_RGX)
+	PVRSRVDestroyHWPerfHostThread();
+	if (gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock)
+	{
+		OSLockDestroy(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock);
+		gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock = NULL;
+	}
+#endif
+
+	if (gpsPVRSRVData->psProcessHandleBase_Table)
+	{
+		HASH_Delete(gpsPVRSRVData->psProcessHandleBase_Table);
+		gpsPVRSRVData->psProcessHandleBase_Table = NULL;
+	}
+
+	if (gpsPVRSRVData->hGlobalEventObject)
+	{
+		OSEventObjectSignal(gpsPVRSRVData->hGlobalEventObject);
+	}
+
+	/* Stop and cleanup the devices watchdog thread */
+	if (gpsPVRSRVData->hDevicesWatchdogThread)
+	{
+		if (gpsPVRSRVData->hDevicesWatchdogEvObj)
+		{
+			eError = OSEventObjectSignal(gpsPVRSRVData->hDevicesWatchdogEvObj);
+			PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+		}
+		LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US)
+		{
+			eError = OSThreadDestroy(gpsPVRSRVData->hDevicesWatchdogThread);
+			if (PVRSRV_OK == eError)
+			{
+				gpsPVRSRVData->hDevicesWatchdogThread = NULL;
+				break;
+			}
+			OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+		PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+	}
+
+	if (gpsPVRSRVData->hDevicesWatchdogEvObj)
+	{
+		eError = OSEventObjectDestroy(gpsPVRSRVData->hDevicesWatchdogEvObj);
+		gpsPVRSRVData->hDevicesWatchdogEvObj = NULL;
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+	}
+
+	/* Stop and cleanup the deferred clean up thread, event object and
+	 * deferred context list.
+	 */
+	if (gpsPVRSRVData->hCleanupThread)
+	{
+		if (gpsPVRSRVData->hCleanupEventObject)
+		{
+			eError = OSEventObjectSignal(gpsPVRSRVData->hCleanupEventObject);
+			PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+		}
+		LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US)
+		{
+			eError = OSThreadDestroy(gpsPVRSRVData->hCleanupThread);
+			if (PVRSRV_OK == eError)
+			{
+				gpsPVRSRVData->hCleanupThread = NULL;
+				break;
+			}
+			OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+		PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+	}
+
+	if (gpsPVRSRVData->hCleanupEventObject)
+	{
+		eError = OSEventObjectDestroy(gpsPVRSRVData->hCleanupEventObject);
+		gpsPVRSRVData->hCleanupEventObject = NULL;
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+	}
+
+	/* Tear down the HTB before PVRSRVHandleDeInit() removes its TL handle */
+	/* HTB De-init happens in device de-registration currently */
+	eError = HTBDeInit();
+	PVR_LOG_IF_ERROR(eError, "HTBDeInit");
+
+	/* Tear down CacheOp framework information page first */
+	CacheOpDeInit2();
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	ServerSyncDeinitOnce(gpsPVRSRVData);
+#endif
+	/* Clean up information page */
+	InfoPageDestroy(gpsPVRSRVData);
+
+	/* Close the TL control plane stream. */
+	TLStreamClose(gpsPVRSRVData->hTLCtrlStream);
+
+	/* deinitialise pdump */
+	if ((g_ui32InitFlags & INIT_DATA_ENABLE_PDUMPINIT) > 0)
+	{
+		PDUMPDEINIT();
+	}
+
+	/* Clean up Transport Layer resources that remain */
+	TLDeInit();
+
+	_HostMemDeviceDestroy();
+
+	eError = PVRSRVHandleDeInit();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVHandleDeInit failed", __func__));
+	}
+
+	/* destroy event object */
+	if (gpsPVRSRVData->hGlobalEventObject)
+	{
+		OSEventObjectDestroy(gpsPVRSRVData->hGlobalEventObject);
+		gpsPVRSRVData->hGlobalEventObject = NULL;
+	}
+
+	PVRSRVCmdCompleteDeinit();
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+	eError = DCDeInit();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: DCDeInit failed", __func__));
+	}
+#endif
+
+	eError = PMRDeInit();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: PMRDeInit failed", __func__));
+	}
+
+	BridgeDispatcherDeinit();
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	RIDeInitKM();
+#endif
+
+	if (bEnablePageFaultDebug)
+	{
+		DevicememHistoryDeInitKM();
+	}
+
+	CacheOpDeInit();
+
+	OSDeInitEnvData();
+
+	(void) DevmemIntDeInit();
+
+	eError = ServerBridgeDeInit();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: ServerBridgeDeinit failed", __func__));
+	}
+
+	eError = PhysHeapDeinit();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: PhysHeapDeinit failed", __func__));
+	}
+
+	if (OSLockDestroy(gpsPVRSRVData->hConnectionsLock) != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: ConnectionLock destruction failed", __func__));
+	}
+
+	OSFreeMem(gpsPVRSRVData);
+	gpsPVRSRVData = NULL;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+static PVRSRV_ERROR CreateLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	IMG_UINT	uiCounter=0;
+
+	for (uiCounter = 0; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++)
+	{
+		psDeviceNode->psOSidSubArena[uiCounter] =
+			RA_Create(psDeviceNode->apszRANames[0],
+					  OSGetPageShift(),			/* Use host page size, keeps things simple */
+					  RA_LOCKCLASS_0,			/* This arena doesn't use any other arenas. */
+					  NULL,					/* No Import */
+					  NULL,					/* No free import */
+					  NULL,					/* No import handle */
+					  IMG_FALSE);
+
+		if (psDeviceNode->psOSidSubArena[uiCounter] == NULL)
+		{
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Calling RA_Add with base %u and size %u",0, GPUVIRT_SIZEOF_ARENA0));
+
+	/* Arena creation takes place earlier than when the client side reads the apphints and transfers them over the bridge. Since we don't
+	 * know how the memory is going to be partitioned and since we already need some memory for all the initial allocations that take place,
+	 * we populate the first sub-arena (0) with a span of 64 megabytes. This has been shown to be enough even for cases where EWS is allocated
+	 * memory in this sub arena and then a multi app example is executed. This pre-allocation also means that consistency must be maintained
+	 * between apphints and reality. That's why in the Apphints, the OSid0 region must start from 0 and end at 3FFFFFF. */
+
+	if (!RA_Add(psDeviceNode->psOSidSubArena[0], 0, GPUVIRT_SIZEOF_ARENA0, 0 , NULL))
+	{
+		RA_Delete(psDeviceNode->psOSidSubArena[0]);
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psDeviceNode->apsLocalDevMemArenas[0] = psDeviceNode->psOSidSubArena[0];
+
+	return PVRSRV_OK;
+}
+
+void PopulateLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode,
+						  IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+						  IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS])
+{
+	IMG_UINT	uiCounter;
+
+	/* Since Sub Arena[0] has been populated already, now we populate the rest starting from 1*/
+
+	for (uiCounter = 1; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Calling RA_Add with base %u and size %u",aui32OSidMin[0][uiCounter], aui32OSidMax[0][uiCounter]-aui32OSidMin[0][uiCounter]+1));
+
+		if (!RA_Add(psDeviceNode->psOSidSubArena[uiCounter], aui32OSidMin[0][uiCounter], aui32OSidMax[0][uiCounter]-aui32OSidMin[0][uiCounter]+1, 0, NULL))
+		{
+			goto error;
+		}
+	}
+
+	#if defined(EMULATOR)
+	{
+		SysSetOSidRegisters(aui32OSidMin, aui32OSidMax);
+	}
+	#endif
+
+	return;
+
+error:
+	for (uiCounter = 0; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++)
+	{
+		RA_Delete(psDeviceNode->psOSidSubArena[uiCounter]);
+	}
+
+	return;
+}
+
+#endif
+
+static void _SysDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+					IMG_UINT32 ui32VerbLevel,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile)
+{
+	/* Only dump info once */
+	PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*) hDebugRequestHandle;
+
+    PVR_DUMPDEBUG_LOG("------[ System Summary ]------");
+
+    switch (psDeviceNode->eCurrentSysPowerState)
+	{
+		case PVRSRV_SYS_POWER_STATE_OFF:
+			PVR_DUMPDEBUG_LOG("Device System Power State: OFF");
+			break;
+		case PVRSRV_SYS_POWER_STATE_ON:
+			PVR_DUMPDEBUG_LOG("Device System Power State: ON");
+			break;
+		default:
+			PVR_DUMPDEBUG_LOG("Device System Power State: UNKNOWN (%d)",
+							   psDeviceNode->eCurrentSysPowerState);
+			break;
+	}
+
+    PVR_DUMPDEBUG_LOG("MaxHWTOut: %dus, WtTryCt: %d, WDGTOut(on,off): (%dms,%dms)",
+                      MAX_HW_TIME_US, WAIT_TRY_COUNT, DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT, DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT);
+
+    SysDebugInfo(psDeviceNode->psDevConfig, pfnDumpDebugPrintf, pvDumpDebugFile);
+}
+
+static void _ThreadsDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgReqestHandle,
+                                       IMG_UINT32 ui32VerbLevel,
+                                       DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile)
+{
+	PVR_UNREFERENCED_PARAMETER(hDbgReqestHandle);
+
+	if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH))
+	{
+		PVR_DUMPDEBUG_LOG("------[ Server Thread Summary ]------");
+		OSThreadDumpInfo(pfnDumpDebugPrintf, pvDumpDebugFile);
+	}
+}
+
+PVRSRV_ERROR PVRSRVPhysMemHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_DEVICE_PHYS_HEAP physHeapIndex;
+	IMG_UINT32 ui32RegionId = 0;
+	PVRSRV_ERROR eError;
+	IMG_UINT32 i;
+
+	/* Register the physical memory heaps */
+	psDeviceNode->papsRegisteredPhysHeaps =
+		OSAllocZMem(sizeof(*psDeviceNode->papsRegisteredPhysHeaps) *
+					psDevConfig->ui32PhysHeapCount);
+	if (!psDeviceNode->papsRegisteredPhysHeaps)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++)
+	{
+		/* No real device should register a heap with ID same as host device's heap ID */
+		PVR_ASSERT(psDevConfig->pasPhysHeaps[i].ui32PhysHeapID != PHYS_HEAP_ID_HOSTMEM);
+
+		eError = PhysHeapRegister(&psDevConfig->pasPhysHeaps[i],
+								  &psDeviceNode->papsRegisteredPhysHeaps[i]);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to register physical heap %d (%s)",
+					 __func__, psDevConfig->pasPhysHeaps[i].ui32PhysHeapID,
+					 PVRSRVGetErrorString(eError)));
+			goto ErrorPhysHeapsUnregister;
+		}
+
+		psDeviceNode->ui32RegisteredPhysHeaps++;
+	}
+
+	/*
+	 * The physical backing storage for the following physical heaps
+	 * [CPU,GPU,FW] may or may not come from the same underlying source
+	 */
+	eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL],
+							 &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to acquire PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL physical memory heap",
+				 __func__));
+		goto ErrorPhysHeapsUnregister;
+	}
+
+	eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL],
+							 &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to acquire PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL physical memory heap",
+				 __func__));
+		goto ErrorPhysHeapsRelease;
+	}
+
+	eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL],
+							 &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to acquire PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL physical memory heap",
+				 __func__));
+		goto ErrorPhysHeapsRelease;
+	}
+
+	eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL],
+							 &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL]);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to acquire PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL physical memory heap",
+				 __func__));
+		goto ErrorPhysHeapsRelease;
+	}
+
+	/* Do we have card memory? If so create RAs to manage it */
+	if (PhysHeapGetType(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]) == PHYS_HEAP_TYPE_LMA)
+	{
+		RA_BASE_T uBase;
+		RA_LENGTH_T uSize;
+		IMG_UINT64 ui64Size;
+		IMG_CPU_PHYADDR sCpuPAddr;
+		IMG_DEV_PHYADDR sDevPAddr;
+
+		IMG_UINT32 ui32NumOfLMARegions;
+		PHYS_HEAP* psLMAHeap;
+
+		psLMAHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+		ui32NumOfLMARegions = PhysHeapNumberOfRegions(psLMAHeap);
+
+		if (ui32NumOfLMARegions == 0)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: LMA heap has no memory regions defined.", __func__));
+			eError = PVRSRV_ERROR_DEVICEMEM_INVALID_LMA_HEAP;
+			goto ErrorPhysHeapsRelease;
+		}
+
+		/* Allocate memory for RA pointers and name strings */
+		psDeviceNode->apsLocalDevMemArenas = OSAllocMem(sizeof(RA_ARENA*) * ui32NumOfLMARegions);
+		if (!psDeviceNode->apsLocalDevMemArenas)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate LocalDevMemArenas",
+					 __func__));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto ErrorPhysHeapsRelease;
+		}
+		psDeviceNode->ui32NumOfLocalMemArenas = ui32NumOfLMARegions;
+		psDeviceNode->apszRANames = OSAllocMem(ui32NumOfLMARegions * sizeof(IMG_PCHAR));
+		if (!psDeviceNode->apszRANames)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate RANames",
+					 __func__));
+			OSFreeMem(psDeviceNode->apsLocalDevMemArenas);
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto ErrorPhysHeapsRelease;
+		}
+
+		for (; ui32RegionId < ui32NumOfLMARegions; ui32RegionId++)
+		{
+			eError = PhysHeapRegionGetSize(psLMAHeap, ui32RegionId, &ui64Size);
+			if (eError != PVRSRV_OK)
+			{
+				/* We can only get here if there is a bug in this module */
+				PVR_ASSERT(IMG_FALSE);
+				return eError;
+			}
+
+			eError = PhysHeapRegionGetCpuPAddr(psLMAHeap, ui32RegionId, &sCpuPAddr);
+			if (eError != PVRSRV_OK)
+			{
+				/* We can only get here if there is a bug in this module */
+				PVR_ASSERT(IMG_FALSE);
+				return eError;
+			}
+
+			eError = PhysHeapRegionGetDevPAddr(psLMAHeap, ui32RegionId, &sDevPAddr);
+			if (eError != PVRSRV_OK)
+			{
+				/* We can only get here if there is a bug in this module */
+				PVR_ASSERT(IMG_FALSE);
+				return eError;
+			}
+
+			PVR_DPF((PVR_DBG_MESSAGE,
+					"Creating RA for card memory - region %d - 0x%016"
+					IMG_UINT64_FMTSPECx"-0x%016" IMG_UINT64_FMTSPECx,
+					 ui32RegionId, (IMG_UINT64) sCpuPAddr.uiAddr,
+					 sCpuPAddr.uiAddr + ui64Size));
+
+			psDeviceNode->apszRANames[ui32RegionId] =
+				OSAllocMem(PVRSRV_MAX_RA_NAME_LENGTH);
+			if (!psDeviceNode->apszRANames[ui32RegionId])
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to alloc RANames[]",
+						 __func__));
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto ErrorRAsDelete;
+			}
+			OSSNPrintf(psDeviceNode->apszRANames[ui32RegionId],
+					   PVRSRV_MAX_RA_NAME_LENGTH,
+					   "%s card mem",
+					   psDevConfig->pszName);
+
+			uBase = sDevPAddr.uiAddr;
+			uSize = (RA_LENGTH_T) ui64Size;
+			PVR_ASSERT(uSize == ui64Size);
+
+			/* Use host page size, keeps things simple */
+			psDeviceNode->apsLocalDevMemArenas[ui32RegionId] =
+				RA_Create(psDeviceNode->apszRANames[ui32RegionId],
+						  OSGetPageShift(), RA_LOCKCLASS_0, NULL, NULL, NULL,
+						  IMG_FALSE);
+
+			if (psDeviceNode->apsLocalDevMemArenas[ui32RegionId] == NULL)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create LMA memory arena",
+						 __func__));
+				OSFreeMem(psDeviceNode->apszRANames[ui32RegionId]);
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto ErrorRAsDelete;
+			}
+
+			if (!RA_Add(psDeviceNode->apsLocalDevMemArenas[ui32RegionId],
+						uBase, uSize, 0, NULL))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Failed to add memory to LMA memory arena",
+						 __func__));
+				RA_Delete(psDeviceNode->apsLocalDevMemArenas[ui32RegionId]);
+				OSFreeMem(psDeviceNode->apszRANames[ui32RegionId]);
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto ErrorRAsDelete;
+			}
+		}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+		eError = CreateLMASubArenas(psDeviceNode);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to create LMA memory sub-arenas", __func__));
+			goto ErrorRAsDelete;
+		}
+#endif
+
+		/* If additional psDeviceNode->pfnDevPx* callbacks are added,
+		   update the corresponding virtualization-specific override
+		   in pvrsrv_vz.c:_VzDeviceCreate() */
+		psDeviceNode->pfnDevPxAlloc = LMA_PhyContigPagesAlloc;
+		psDeviceNode->pfnDevPxFree = LMA_PhyContigPagesFree;
+		psDeviceNode->pfnDevPxMap = LMA_PhyContigPagesMap;
+		psDeviceNode->pfnDevPxUnMap = LMA_PhyContigPagesUnmap;
+		psDeviceNode->pfnDevPxClean = LMA_PhyContigPagesClean;
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = PhysmemNewLocalRamBackedPMR;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "===== OS System memory only, no local card memory"));
+
+		/* else we only have OS system memory */
+		psDeviceNode->pfnDevPxAlloc = OSPhyContigPagesAlloc;
+		psDeviceNode->pfnDevPxFree = OSPhyContigPagesFree;
+		psDeviceNode->pfnDevPxMap = OSPhyContigPagesMap;
+		psDeviceNode->pfnDevPxUnMap = OSPhyContigPagesUnmap;
+		psDeviceNode->pfnDevPxClean = OSPhyContigPagesClean;
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = PhysmemNewOSRamBackedPMR;
+	}
+
+	if (PhysHeapGetType(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]) == PHYS_HEAP_TYPE_LMA)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "===== Local card memory only, no OS system memory"));
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = PhysmemNewLocalRamBackedPMR;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "===== OS System memory, 2nd phys heap"));
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = PhysmemNewOSRamBackedPMR;
+	}
+
+	if (PhysHeapGetType(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]) == PHYS_HEAP_TYPE_LMA)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "===== Local card memory only, no OS system memory"));
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = PhysmemNewLocalRamBackedPMR;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "===== OS System memory, 3rd phys heap"));
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = PhysmemNewOSRamBackedPMR;
+	}
+
+	return PVRSRV_OK;
+
+ErrorRAsDelete:
+	while (ui32RegionId)
+	{
+		ui32RegionId--;
+		RA_Delete(psDeviceNode->apsLocalDevMemArenas[ui32RegionId]);
+		psDeviceNode->apsLocalDevMemArenas[ui32RegionId] = NULL;
+
+		OSFreeMem(psDeviceNode->apszRANames[ui32RegionId]);
+		psDeviceNode->apszRANames[ui32RegionId] = NULL;
+	}
+
+	OSFreeMem(psDeviceNode->apsLocalDevMemArenas);
+	psDeviceNode->apsLocalDevMemArenas = NULL;
+
+	OSFreeMem(psDeviceNode->apszRANames);
+	psDeviceNode->apszRANames = NULL;
+
+ErrorPhysHeapsRelease:
+	for (physHeapIndex = 0;
+		 physHeapIndex < ARRAY_SIZE(psDeviceNode->apsPhysHeap);
+		 physHeapIndex++)
+	{
+		if (psDeviceNode->apsPhysHeap[physHeapIndex])
+		{
+			PhysHeapRelease(psDeviceNode->apsPhysHeap[physHeapIndex]);
+		}
+	}
+
+ErrorPhysHeapsUnregister:
+	for (i = 0; i < psDeviceNode->ui32RegisteredPhysHeaps; i++)
+	{
+		PhysHeapUnregister(psDeviceNode->papsRegisteredPhysHeaps[i]);
+	}
+	OSFreeMem(psDeviceNode->papsRegisteredPhysHeaps);
+
+	return eError;
+}
+
+void PVRSRVPhysMemHeapsDeinit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_DEVICE_PHYS_HEAP ePhysHeapIdx;
+	IMG_UINT32 i;
+	IMG_UINT32 ui32RegionIdx;
+
+	/* Remove RAs and RA names for local card memory */
+	for (ui32RegionIdx = 0;
+		 ui32RegionIdx < psDeviceNode->ui32NumOfLocalMemArenas;
+		 ui32RegionIdx++)
+	{
+		if (psDeviceNode->apsLocalDevMemArenas[ui32RegionIdx])
+		{
+			RA_Delete(psDeviceNode->apsLocalDevMemArenas[ui32RegionIdx]);
+			psDeviceNode->apsLocalDevMemArenas[ui32RegionIdx] = NULL;
+		}
+
+		if (psDeviceNode->apszRANames[ui32RegionIdx])
+		{
+			OSFreeMem(psDeviceNode->apszRANames[ui32RegionIdx]);
+			psDeviceNode->apszRANames[ui32RegionIdx] = NULL;
+		}
+	}
+
+	if (psDeviceNode->apsLocalDevMemArenas)
+	{
+		OSFreeMem(psDeviceNode->apsLocalDevMemArenas);
+		psDeviceNode->apsLocalDevMemArenas = NULL;
+	}
+
+	if (psDeviceNode->apszRANames)
+	{
+		OSFreeMem(psDeviceNode->apszRANames);
+		psDeviceNode->apszRANames = NULL;
+	}
+
+	/* Release heaps */
+	for (ePhysHeapIdx = 0;
+		 ePhysHeapIdx < ARRAY_SIZE(psDeviceNode->apsPhysHeap);
+		 ePhysHeapIdx++)
+	{
+		if (psDeviceNode->apsPhysHeap[ePhysHeapIdx])
+		{
+			PhysHeapRelease(psDeviceNode->apsPhysHeap[ePhysHeapIdx]);
+		}
+	}
+
+	/* Unregister heaps */
+	for (i = 0; i < psDeviceNode->ui32RegisteredPhysHeaps; i++)
+	{
+		PhysHeapUnregister(psDeviceNode->papsRegisteredPhysHeaps[i]);
+	}
+
+	OSFreeMem(psDeviceNode->papsRegisteredPhysHeaps);
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDeviceCreate(void *pvOSDevice,
+											 IMG_INT32 i32UMIdentifier,
+											 PVRSRV_DEVICE_NODE **ppsDeviceNode)
+{
+	PVRSRV_DATA				*psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR			eError;
+	PVRSRV_DEVICE_CONFIG	*psDevConfig;
+	PVRSRV_DEVICE_NODE		*psDeviceNode;
+#if defined(SUPPORT_RGX) || defined(SUPPORT_ALT_REGBASE)
+	PVRSRV_RGXDEV_INFO		*psDevInfo = NULL;
+#endif
+	IMG_UINT32				ui32AppHintDefault;
+	IMG_UINT32				ui32AppHintDriverMode;
+#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__)
+	IMG_UINT32				ui32AppHintPhysMemTestPasses;
+#endif
+	void *pvAppHintState    = NULL;
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+	IMG_HANDLE				hProcessStats;
+#endif
+
+	psDeviceNode = OSAllocZMemNoStats(sizeof(*psDeviceNode));
+	if (!psDeviceNode)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate device node",
+				 __func__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+	/* Allocate process statistics */
+	eError = PVRSRVStatsRegisterProcess(&hProcessStats);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Couldn't register process statistics (%d)",
+			 __func__, eError));
+		goto ErrorFreeDeviceNode;
+	}
+#endif
+
+	psDeviceNode->sDevId.i32UMIdentifier = i32UMIdentifier;
+
+	/* Read driver mode (i.e. native, host or guest) AppHint early */
+	ui32AppHintDefault = PVRSRV_APPHINT_DRIVERMODE;
+	OSCreateKMAppHintState(&pvAppHintState);
+	OSGetKMAppHintUINT32(pvAppHintState, DriverMode,
+						 &ui32AppHintDefault, &ui32AppHintDriverMode);
+	OSFreeKMAppHintState(pvAppHintState);
+	pvAppHintState = NULL;
+	psPVRSRVData->eDriverMode = PVRSRV_VZ_APPHINT_MODE(ui32AppHintDriverMode);
+
+	eError = SysDevInit(pvOSDevice, &psDevConfig);
+	if (eError)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get device config (%s)",
+				 __func__, PVRSRVGetErrorString(eError)));
+		goto ErrorDeregisterStats;
+	}
+
+	PVR_ASSERT(psDevConfig);
+	PVR_ASSERT(psDevConfig->pvOSDevice == pvOSDevice);
+	PVR_ASSERT(!psDevConfig->psDevNode);
+
+	psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_INIT;
+	psDeviceNode->psDevConfig = psDevConfig;
+	psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON;
+
+	if (psDeviceNode->psDevConfig->pfnSysDriverMode)
+	{
+		if (! PVRSRV_VZ_APPHINT_MODE_IS_OVERRIDE(ui32AppHintDriverMode))
+		{
+			/*
+			 * The driver mode AppHint can be an override and non-override (default)
+			 * value. If the system layer provides a callback in SysDevInit() to
+			 * force the driver into a particular driver mode, then only comply
+			 * if the apphint value provided is a non-override mode value.
+			 */
+			psPVRSRVData->eDriverMode = psDeviceNode->psDevConfig->pfnSysDriverMode();
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_WARNING, "Override driver mode specified, ignoring SysDriveMode"));
+		}
+	}
+
+	/*
+	 * Ensure that the supplied driver execution mode is consistent with the number
+	 * of OSIDs the firmware can support. Any failure here is (should be) fatal as
+	 * the requested for driver mode cannot be supported by the firmware.
+	 */
+	switch (psPVRSRVData->eDriverMode)
+	{
+		case DRIVER_MODE_NATIVE:
+		/* Always supported mode */
+			break;
+
+		case DRIVER_MODE_HOST:
+		case DRIVER_MODE_GUEST:
+#if (RGXFW_NUM_OS == 1)
+			PVR_DPF((PVR_DBG_ERROR, "The number of firmware supported OSID(s) is 1"));
+			PVR_DPF((PVR_DBG_ERROR,	"Halting initialisation, cannot transition to %s mode",
+					psPVRSRVData->eDriverMode == DRIVER_MODE_HOST ? "host" : "guest"));
+			eError = PVRSRV_ERROR_NOT_SUPPORTED;
+			goto ErrorSysDevDeInit;
+#endif
+			break;
+
+		default:
+			if ((IMG_INT32)psPVRSRVData->eDriverMode < (IMG_INT32)DRIVER_MODE_NATIVE ||
+			    (IMG_INT32)psPVRSRVData->eDriverMode >= (IMG_INT32)RGXFW_NUM_OS)
+			{
+				/* Running on non-VZ capable BVNC so simulating OSID using eDriverMode but
+				   value is outside of permitted range */
+				PVR_DPF((PVR_DBG_ERROR,
+						"Halting initialisation, OSID %d is outside of range [0:%d] supported",
+						(IMG_INT)psPVRSRVData->eDriverMode, RGXFW_NUM_OS-1));
+				eError = PVRSRV_ERROR_NOT_SUPPORTED;
+				goto ErrorSysDevDeInit;
+			}
+			else
+			{
+				/* Invalid driver mode enumeration integer value */
+				PVR_DPF((PVR_DBG_ERROR, "Halting initialisation due to invalid driver mode %d",
+						(IMG_INT32)psPVRSRVData->eDriverMode));
+				eError = PVRSRV_ERROR_NOT_SUPPORTED;
+				goto ErrorSysDevDeInit;
+			}
+			break;
+	}
+
+#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__)
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE))
+	{
+		/* Read AppHint - Configurable memory test pass count */
+		ui32AppHintDefault = 0;
+		OSCreateKMAppHintState(&pvAppHintState);
+		OSGetKMAppHintUINT32(pvAppHintState, PhysMemTestPasses,
+				&ui32AppHintDefault, &ui32AppHintPhysMemTestPasses);
+		OSFreeKMAppHintState(pvAppHintState);
+		pvAppHintState = NULL;
+
+		if(ui32AppHintPhysMemTestPasses > 0)
+		{
+			eError = PhysMemTest(psDevConfig, ui32AppHintPhysMemTestPasses);
+			PVR_LOGG_IF_ERROR(eError, "PhysMemTest", ErrorSysDevDeInit);
+		}
+	}
+#endif
+	/* Store the device node in the device config for the system layer to use */
+	psDevConfig->psDevNode = psDeviceNode;
+
+	/* Perform additional VZ system initialisation */
+	eError = SysVzDevInit(psDevConfig);
+	if (eError)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed system virtualization initialisation (%s)",
+				 __func__, PVRSRVGetErrorString(eError)));
+		goto ErrorSysDevDeInit;
+	}
+
+	eError = PVRSRVRegisterDbgTable(psDeviceNode,
+									g_aui32DebugOrderTable,
+									ARRAY_SIZE(g_aui32DebugOrderTable));
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorSysVzDevDeInit;
+	}
+
+	eError = OSLockCreate(&psDeviceNode->hPowerLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorUnregisterDbgTable;
+	}
+
+	eError = PVRSRVPhysMemHeapsInit(psDeviceNode, psDevConfig);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorPowerLockDestroy;
+	}
+
+#if defined(SUPPORT_RGX)
+	/* Requires registered GPU local heap */
+	/* Requires debug table */
+	/* Initialises psDevInfo */
+	eError = RGXRegisterDevice(psDeviceNode, &psDevInfo);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to register device", __func__));
+		eError = PVRSRV_ERROR_DEVICE_REGISTER_FAILED;
+		goto ErrorPhysMemHeapsDeinit;
+	}
+#endif
+
+	psDeviceNode->uiMMUPxLog2AllocGran = OSGetPageShift();
+
+	eError = ServerSyncInit(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorDeInitRgx;
+	}
+
+	eError = SyncCheckpointInit(psDeviceNode);
+	PVR_LOG_IF_ERROR(eError, "SyncCheckpointInit");
+
+#if defined(SUPPORT_RGX) && defined(SUPPORT_DEDICATED_FW_MEMORY) && !defined(NO_HARDWARE)
+	eError = PhysmemInitFWDedicatedMem(psDeviceNode, psDevConfig);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise dedicated FW memory heap",
+				 __func__));
+		goto ErrorOnFWMemInit;
+	}
+#endif
+
+	/* Perform additional vz initialisation */
+	eError = _VzDeviceCreate(psDeviceNode);
+	PVR_LOG_IF_ERROR(eError, "_VzDeviceCreate");
+
+	/*
+	 * This is registered before doing device specific initialisation to ensure
+	 * generic device information is dumped first during a debug request.
+	 */
+	eError = PVRSRVRegisterDbgRequestNotify(&psDeviceNode->hDbgReqNotify,
+											psDeviceNode,
+											_SysDebugRequestNotify,
+											DEBUG_REQUEST_SYS,
+											psDeviceNode);
+	PVR_LOG_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify");
+
+	eError = PVRSRVRegisterDbgRequestNotify(&psDeviceNode->hThreadsDbgReqNotify,
+												psDeviceNode,
+												_ThreadsDebugRequestNotify,
+												DEBUG_REQUEST_SYS,
+												NULL);
+	PVR_LOG_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify");
+
+	eError = HTBDeviceCreate(psDeviceNode);
+	PVR_LOG_IF_ERROR(eError, "HTBDeviceCreate");
+
+	psPVRSRVData->ui32RegisteredDevices++;
+
+#if defined(PVR_DVFS) && !defined(NO_HARDWARE)
+	eError = InitDVFS(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to start DVFS", __func__));
+		goto ErrorDecrementDeviceCount;
+	}
+#endif
+
+	OSAtomicWrite(&psDeviceNode->iNumClockSpeedChanges, 0);
+
+#if defined(PVR_TESTING_UTILS)
+	TUtilsInit(psDeviceNode);
+#endif
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSWRLockCreate(&psDeviceNode->hMemoryContextPageFaultNotifyListLock);
+	if (psDeviceNode->hMemoryContextPageFaultNotifyListLock == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for PF notify list",
+		        __func__));
+		goto ErrorDecrementDeviceCount;
+	}
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+	dllist_init(&psDeviceNode->sMemoryContextPageFaultNotifyListHead);
+
+	PVR_DPF((PVR_DBG_MESSAGE, "Registered device %p", psDeviceNode));
+	PVR_DPF((PVR_DBG_MESSAGE, "Register bank address = 0x%08lx",
+			 (unsigned long)psDevConfig->sRegsCpuPBase.uiAddr));
+	PVR_DPF((PVR_DBG_MESSAGE, "IRQ = %d", psDevConfig->ui32IRQ));
+
+#if defined(SUPPORT_RGX) && defined(SUPPORT_ALT_REGBASE)
+	{
+		IMG_DEV_PHYADDR sRegsGpuPBase;
+
+		PhysHeapCpuPAddrToDevPAddr(psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL],
+		                           1,
+		                           &sRegsGpuPBase,
+		                           &(psDeviceNode->psDevConfig->sRegsCpuPBase));
+
+		PVR_LOG(("%s: Using alternate Register bank GPU address: 0x%08lx (orig: 0x%08lx)", __func__,
+		         (unsigned long)psDevConfig->sAltRegsGpuPBase.uiAddr,
+		         (unsigned long)sRegsGpuPBase.uiAddr));
+	}
+#endif
+
+	/* Finally insert the device into the dev-list and set it as active */
+	List_PVRSRV_DEVICE_NODE_InsertTail(&psPVRSRVData->psDeviceNodeList,
+									   psDeviceNode);
+
+	*ppsDeviceNode = psDeviceNode;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+	/* Close the process statistics */
+	PVRSRVStatsDeregisterProcess(hProcessStats);
+#endif
+
+#if defined(SUPPORT_VALIDATION) && !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockCreateNoStats(&psDeviceNode->hValidationLock);
+#endif
+
+	return PVRSRV_OK;
+
+#if (defined(PVR_DVFS) && !defined(NO_HARDWARE)) || !defined(PVRSRV_USE_BRIDGE_LOCK)
+ErrorDecrementDeviceCount:
+#endif
+	psPVRSRVData->ui32RegisteredDevices--;
+
+	if (psDeviceNode->hDbgReqNotify)
+	{
+		PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hDbgReqNotify);
+	}
+
+	if (psDeviceNode->hThreadsDbgReqNotify)
+	{
+		PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hThreadsDbgReqNotify);
+	}
+
+	/* Perform vz deinitialisation */
+	_VzDeviceDestroy(psDeviceNode);
+
+#if defined(SUPPORT_RGX) && defined(SUPPORT_DEDICATED_FW_MEMORY) && !defined(NO_HARDWARE)
+ErrorOnFWMemInit:
+	PhysmemDeinitFWDedicatedMem(psDeviceNode);
+#endif
+
+	ServerSyncDeinit(psDeviceNode);
+
+ErrorDeInitRgx:
+#if defined(SUPPORT_RGX)
+	DevDeInitRGX(psDeviceNode);
+ErrorPhysMemHeapsDeinit:
+	PVRSRVPhysMemHeapsDeinit(psDeviceNode);
+#endif
+ErrorPowerLockDestroy:
+	OSLockDestroy(psDeviceNode->hPowerLock);
+ErrorUnregisterDbgTable:
+	PVRSRVUnregisterDbgTable(psDeviceNode);
+ErrorSysVzDevDeInit:
+	psDevConfig->psDevNode = NULL;
+	SysVzDevDeInit(psDevConfig);
+ErrorSysDevDeInit:
+	SysDevDeInit(psDevConfig);
+ErrorDeregisterStats:
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+	/* Close the process statistics */
+	PVRSRVStatsDeregisterProcess(hProcessStats);
+ErrorFreeDeviceNode:
+#endif
+	OSFreeMemNoStats(psDeviceNode);
+
+	return eError;
+}
+
+#if defined(SUPPORT_RGX)
+static PVRSRV_ERROR _SetDeviceFlag(const PVRSRV_DEVICE_NODE *psDevice,
+                                   const void *psPrivate, IMG_BOOL bValue)
+{
+	PVRSRV_ERROR eResult = PVRSRV_OK;
+	IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+
+	if (!ui32Flag)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eResult = RGXSetDeviceFlags((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice,
+	                            ui32Flag, bValue);
+
+	return eResult;
+}
+
+static PVRSRV_ERROR _ReadDeviceFlag(const PVRSRV_DEVICE_NODE *psDevice,
+                                   const void *psPrivate, IMG_BOOL *pbValue)
+{
+	PVRSRV_ERROR eResult = PVRSRV_OK;
+	IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+	IMG_UINT32 ui32State;
+
+	if (!ui32Flag)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eResult = RGXGetDeviceFlags((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice,
+	                            &ui32State);
+
+	if (PVRSRV_OK == eResult)
+	{
+		*pbValue = (ui32State & ui32Flag)? IMG_TRUE: IMG_FALSE;
+	}
+
+	return eResult;
+}
+static PVRSRV_ERROR _SetStateFlag(const PVRSRV_DEVICE_NODE *psDevice,
+                                  const void *psPrivate, IMG_BOOL bValue)
+{
+	PVRSRV_ERROR eResult = PVRSRV_OK;
+	IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+
+	if (!ui32Flag)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* EnableHWR is a special case
+	 * only possible to disable after FW is running
+	 */
+	if (bValue && RGXFWIF_INICFG_HWR_EN == ui32Flag)
+	{
+		return PVRSRV_ERROR_NOT_SUPPORTED;
+	}
+
+	eResult = RGXStateFlagCtrl((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice,
+	                           ui32Flag, NULL, bValue);
+
+	return eResult;
+}
+
+static PVRSRV_ERROR _ReadStateFlag(const PVRSRV_DEVICE_NODE *psDevice,
+                                   const void *psPrivate, IMG_BOOL *pbValue)
+{
+	IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+	IMG_UINT32 ui32State;
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDevice->pvDevice;
+
+	if (!ui32Flag)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	ui32State = psDevInfo->psFWIfOSConfig->ui32ConfigFlags;
+
+	if (pbValue)
+	{
+		*pbValue = (ui32State & ui32Flag)? IMG_TRUE: IMG_FALSE;
+	}
+
+	return PVRSRV_OK;
+}
+#endif
+
+PVRSRV_ERROR PVRSRVDeviceInitialise(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	IMG_BOOL bInitSuccesful = IMG_FALSE;
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+	IMG_HANDLE hProcessStats;
+#endif
+	PVRSRV_ERROR eError;
+
+	if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_INIT)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Device already initialised", __func__));
+		return PVRSRV_ERROR_INIT_FAILURE;
+	}
+
+	/* Allocate process statistics */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+	eError = PVRSRVStatsRegisterProcess(&hProcessStats);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Couldn't register process statistics (%d)",
+			 __func__, eError));
+		return eError;
+	}
+#endif
+
+#if defined(SUPPORT_RGX)
+	eError = RGXInit(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Initialisation of Rogue device failed (%s)",
+				 __func__, PVRSRVGetErrorString(eError)));
+		goto Exit;
+	}
+#endif
+
+	bInitSuccesful = IMG_TRUE;
+
+#if defined(SUPPORT_RGX)
+Exit:
+#endif
+	eError = PVRSRVDeviceFinalise(psDeviceNode, bInitSuccesful);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Services failed to finalise the device (%s)",
+				 __func__, PVRSRVGetErrorString(eError)));
+	}
+
+#if defined(SUPPORT_RGX)
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableClockGating,
+	                                  _ReadStateFlag, _SetStateFlag,
+	                                  psDeviceNode,
+	                                  (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_CLKGATING_EN));
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableDMOverlap,
+	                                  _ReadStateFlag, _SetStateFlag,
+	                                  psDeviceNode,
+	                                  (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_DM_OVERLAP));
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOnHWRTrigger,
+	                                  _ReadStateFlag, _SetStateFlag,
+	                                  psDeviceNode,
+	                                  (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER));
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOutOfMemory,
+	                                  _ReadStateFlag, _SetStateFlag,
+	                                  psDeviceNode,
+	                                  (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY));
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_CheckMList,
+	                                  _ReadStateFlag, _SetStateFlag,
+	                                  psDeviceNode,
+	                                  (void*)((uintptr_t)RGXFWIF_INICFG_CHECK_MLIST_EN));
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableHWR,
+	                                  _ReadStateFlag, _SetStateFlag,
+	                                  psDeviceNode,
+	                                  (void*)((uintptr_t)RGXFWIF_INICFG_HWR_EN));
+
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableFEDLogging,
+	                                  _ReadDeviceFlag, _SetDeviceFlag,
+	                                  psDeviceNode,
+	                                  (void*)((uintptr_t)RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN));
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_ZeroFreelist,
+	                                  _ReadDeviceFlag, _SetDeviceFlag,
+	                                  psDeviceNode,
+	                                  (void*)((uintptr_t)RGXKM_DEVICE_STATE_ZERO_FREELIST));
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DustRequestInject,
+	                                  _ReadDeviceFlag, _SetDeviceFlag,
+	                                  psDeviceNode,
+	                                  (void*)((uintptr_t)RGXKM_DEVICE_STATE_DUST_REQUEST_INJECT_EN));
+
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisablePDumpPanic,
+	                                  RGXQueryPdumpPanicDisable, RGXSetPdumpPanicDisable,
+	                                  psDeviceNode,
+	                                  NULL);
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+	/* Close the process statistics */
+	PVRSRVStatsDeregisterProcess(hProcessStats);
+#endif
+
+	return eError;
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_DATA				*psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR			eError;
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	IMG_BOOL				bForceUnload = IMG_FALSE;
+
+	if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		bForceUnload = IMG_TRUE;
+	}
+#endif
+
+	psPVRSRVData->ui32RegisteredDevices--;
+
+	psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_DEINIT;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	if (psDeviceNode->hMemoryContextPageFaultNotifyListLock != NULL)
+	{
+		OSWRLockDestroy(psDeviceNode->hMemoryContextPageFaultNotifyListLock);
+	}
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+#if defined(SUPPORT_VALIDATION) && !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroyNoStats(psDeviceNode->hValidationLock);
+	psDeviceNode->hValidationLock = NULL;
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+	TUtilsDeinit(psDeviceNode);
+#endif
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	SyncFbDeregisterDevice(psDeviceNode);
+#endif
+	/* Counter part to what gets done in PVRSRVDeviceFinalise */
+	if (psDeviceNode->hSyncCheckpointContext)
+	{
+		SyncCheckpointContextDestroy(psDeviceNode->hSyncCheckpointContext);
+		psDeviceNode->hSyncCheckpointContext = NULL;
+	}
+	if (psDeviceNode->hSyncPrimContext)
+	{
+		if (psDeviceNode->psSyncPrim)
+		{
+			/* Free general purpose sync primitive */
+			SyncPrimFree(psDeviceNode->psSyncPrim);
+			psDeviceNode->psSyncPrim = NULL;
+		}
+
+		if (psDeviceNode->psMMUCacheSyncPrim)
+		{
+			PVRSRV_CLIENT_SYNC_PRIM *psSync = psDeviceNode->psMMUCacheSyncPrim;
+
+			/* Ensure there are no pending MMU Cache Ops in progress before freeing this sync. */
+			eError = PVRSRVPollForValueKM(psSync->pui32LinAddr,
+			                              psDeviceNode->ui16NextMMUInvalidateUpdate-1,
+			                              0xFFFFFFFF);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to wait for MMU Cache op", __func__));
+				return eError;
+			}
+
+			/* Important to set the device node pointer to NULL
+			 * before we free the sync-prim to make sure we don't
+			 * defer the freeing of the sync-prim's page tables itself.
+			 * The sync is used to defer the MMU page table
+			 * freeing. */
+			psDeviceNode->psMMUCacheSyncPrim = NULL;
+
+			/* Free general purpose sync primitive */
+			SyncPrimFree(psSync);
+		}
+
+		SyncPrimContextDestroy(psDeviceNode->hSyncPrimContext);
+		psDeviceNode->hSyncPrimContext = NULL;
+	}
+
+	eError = PVRSRVPowerLock(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire power lock", __func__));
+		return eError;
+	}
+
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	/*
+	 * Firmware probably not responding if bForceUnload is set, but we still want to unload the
+	 * driver.
+	 */
+	if (!bForceUnload)
+#endif
+	{
+		/* Force idle device */
+		eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle request failure (%s)",
+			                        __func__, PVRSRVGetErrorString(eError)));
+			if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED)
+			{
+				PVRSRVPowerUnlock(psDeviceNode);
+			}
+			return eError;
+		}
+	}
+
+	/* Power down the device if necessary */
+	eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+										 PVRSRV_DEV_POWER_STATE_OFF,
+										 IMG_TRUE);
+	PVRSRVPowerUnlock(psDeviceNode);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed PVRSRVSetDevicePowerStateKM call (%s). Dump debug.",
+				 __func__, PVRSRVGetErrorString(eError)));
+
+		PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+
+		/*
+		 * If the driver is okay then return the error, otherwise we can ignore
+		 * this error.
+		 */
+		if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK)
+		{
+			return eError;
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_MESSAGE,
+					 "%s: Will continue to unregister as driver status is not OK",
+					 __func__));
+		}
+	}
+
+#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE)
+	DeinitDVFS(psDeviceNode);
+#endif
+
+#if defined(SUPPORT_RGX)
+	DevDeInitRGX(psDeviceNode);
+#endif
+
+	HTBDeviceDestroy(psDeviceNode);
+
+	if (psDeviceNode->hDbgReqNotify)
+	{
+		PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hDbgReqNotify);
+	}
+
+	if (psDeviceNode->hThreadsDbgReqNotify)
+	{
+		PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hThreadsDbgReqNotify);
+	}
+
+	SyncCheckpointDeinit(psDeviceNode);
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	ServerSyncDeinit(psDeviceNode);
+#endif
+#if defined(SUPPORT_RGX) && defined(SUPPORT_DEDICATED_FW_MEMORY) && !defined(NO_HARDWARE)
+	PhysmemDeinitFWDedicatedMem(psDeviceNode);
+#endif
+
+	/* Perform vz deinitialisation */
+	_VzDeviceDestroy(psDeviceNode);
+
+	List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
+
+	PVRSRVPhysMemHeapsDeinit(psDeviceNode);
+	OSLockDestroy(psDeviceNode->hPowerLock);
+
+	PVRSRVUnregisterDbgTable(psDeviceNode);
+
+	psDeviceNode->psDevConfig->psDevNode = NULL;
+	SysVzDevDeInit(psDeviceNode->psDevConfig);
+	SysDevDeInit(psDeviceNode->psDevConfig);
+
+	OSFreeMemNoStats(psDeviceNode);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR LMA_PhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize,
+							PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr)
+{
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	IMG_UINT32 ui32OSid = 0;
+#endif
+	RA_BASE_T uiCardAddr;
+	RA_LENGTH_T uiActualSize;
+	PVRSRV_ERROR eError;
+
+	RA_ARENA *pArena=psDevNode->apsLocalDevMemArenas[0];
+	IMG_UINT32 ui32Log2NumPages = 0;
+
+	PVR_ASSERT(uiSize != 0);
+	ui32Log2NumPages = OSGetOrder(uiSize);
+	uiSize = (1 << ui32Log2NumPages) * OSGetPageSize();
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+	IMG_UINT32 ui32OSidReg = 0;
+	IMG_BOOL   bOSidAxiProt;
+
+	IMG_PID    pId = OSGetCurrentClientProcessIDKM();
+
+	RetrieveOSidsfromPidList(pId, &ui32OSid, &ui32OSidReg, &bOSidAxiProt);
+
+	pArena = psDevNode->psOSidSubArena[ui32OSid];
+}
+#endif
+
+	eError = RA_Alloc(pArena,
+	                  uiSize,
+	                  RA_NO_IMPORT_MULTIPLIER,
+	                  0,                         /* No flags */
+	                  uiSize,
+	                  "LMA_PhyContigPagesAlloc",
+	                  &uiCardAddr,
+	                  &uiActualSize,
+	                  NULL);                     /* No private handle */
+
+	PVR_ASSERT(uiSize == uiActualSize);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+	PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): LMA_PhyContigPagesAlloc: Address:%llu, size:%llu", uiCardAddr,uiActualSize));
+}
+#endif
+
+	psMemHandle->u.ui64Handle = uiCardAddr;
+	psDevPAddr->uiAddr = (IMG_UINT64) uiCardAddr;
+
+	if (PVRSRV_OK == eError)
+	{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	    PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+	                                        uiSize,
+	                                        (IMG_UINT64)(uintptr_t) psMemHandle,
+		                                    OSGetCurrentClientProcessIDKM());
+#else
+		IMG_CPU_PHYADDR sCpuPAddr;
+		sCpuPAddr.uiAddr = psDevPAddr->uiAddr;
+
+		PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+		                             NULL,
+		                             sCpuPAddr,
+		                             uiSize,
+		                             NULL,
+		                             OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+		psMemHandle->ui32Order = ui32Log2NumPages;
+	}
+
+	return eError;
+}
+
+void LMA_PhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle)
+{
+	RA_BASE_T uiCardAddr = (RA_BASE_T) psMemHandle->u.ui64Handle;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+	                                      (IMG_UINT64)(uintptr_t) psMemHandle);
+#else
+	PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+									(IMG_UINT64)uiCardAddr,
+									OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+	RA_Free(psDevNode->apsLocalDevMemArenas[0], uiCardAddr);
+	psMemHandle->ui32Order = 0;
+}
+
+PVRSRV_ERROR LMA_PhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+							size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+							void **pvPtr)
+{
+	IMG_CPU_PHYADDR sCpuPAddr;
+	IMG_UINT32 ui32NumPages = (1 << psMemHandle->ui32Order);
+	PVR_UNREFERENCED_PARAMETER(psMemHandle);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+
+	PhysHeapDevPAddrToCpuPAddr(psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL], 1, &sCpuPAddr, psDevPAddr);
+	*pvPtr = OSMapPhysToLin(sCpuPAddr,
+							ui32NumPages * OSGetPageSize(),
+							PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE);
+	if (*pvPtr == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	else
+	{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+		PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA,
+		                            ui32NumPages * OSGetPageSize(),
+		                            OSGetCurrentClientProcessIDKM());
+#else
+		{
+			PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA,
+										 *pvPtr,
+										 sCpuPAddr,
+										 ui32NumPages * OSGetPageSize(),
+										 NULL,
+										 OSGetCurrentClientProcessIDKM());
+		}
+#endif
+#endif
+		return PVRSRV_OK;
+	}
+}
+
+void LMA_PhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+						void *pvPtr)
+{
+	IMG_UINT32 ui32NumPages = (1 << psMemHandle->ui32Order);
+	PVR_UNREFERENCED_PARAMETER(psMemHandle);
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA,
+		                            ui32NumPages * OSGetPageSize(),
+		                            OSGetCurrentClientProcessIDKM());
+#else
+	PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA,
+	                                (IMG_UINT64)(uintptr_t)pvPtr,
+	                                OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+
+	OSUnMapPhysToLin(pvPtr, ui32NumPages * OSGetPageSize(),
+					 PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+}
+
+PVRSRV_ERROR LMA_PhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode,
+                                     PG_HANDLE *psMemHandle,
+                                     IMG_UINT32 uiOffset,
+                                     IMG_UINT32 uiLength)
+{
+	/* No need to flush because we map as uncached */
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(psMemHandle);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(uiLength);
+
+	return PVRSRV_OK;
+}
+
+IMG_BOOL IsPhysmemNewRamBackedByLMA(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_PHYS_HEAP ePhysHeapIdx)
+{
+	return psDeviceNode->pfnCreateRamBackedPMR[ePhysHeapIdx] == PhysmemNewLocalRamBackedPMR;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVDeviceFinalise
+@Description  Performs the final parts of device initialisation.
+@Input        psDeviceNode            Device node of the device to finish
+                                      initialising
+@Input        bInitSuccessful         Whether or not device specific
+                                      initialisation was successful
+@Return       PVRSRV_ERROR     PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode,
+											   IMG_BOOL bInitSuccessful)
+{
+	PVRSRV_ERROR eError;
+
+	if (bInitSuccessful)
+	{
+		eError = SyncCheckpointContextCreate(psDeviceNode,
+											 &psDeviceNode->hSyncCheckpointContext);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to create sync checkpoint context (%s)",
+					 __func__, PVRSRVGetErrorString(eError)));
+
+			goto ErrorExit;
+		}
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+		eError = SyncFbRegisterDevice(psDeviceNode);
+		if (eError != PVRSRV_OK)
+		{
+			goto ErrorExit;
+		}
+#endif
+		eError = SyncPrimContextCreate(psDeviceNode,
+									   &psDeviceNode->hSyncPrimContext);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to create sync prim context (%s)",
+					 __func__, PVRSRVGetErrorString(eError)));
+			SyncCheckpointContextDestroy(psDeviceNode->hSyncCheckpointContext);
+			goto ErrorExit;
+		}
+
+		/* Allocate general purpose sync primitive */
+		eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+							   &psDeviceNode->psSyncPrim,
+							   "pvrsrv dev general");
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to allocate sync primitive with error (%s)",
+					 __func__, PVRSRVGetErrorString(eError)));
+			goto ErrorExit;
+		}
+
+		/* Allocate MMU cache invalidate sync */
+		eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+							   &psDeviceNode->psMMUCacheSyncPrim,
+							   "pvrsrv dev MMU cache");
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to allocate sync primitive with error (%s)",
+					 __func__, PVRSRVGetErrorString(eError)));
+			goto ErrorExit;
+		}
+
+		/* Next update value will be 1 since sync prim starts with 0 */
+		psDeviceNode->ui16NextMMUInvalidateUpdate = 1;
+
+		eError = PVRSRVPowerLock(psDeviceNode);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire power lock (%s)",
+					 __func__, PVRSRVGetErrorString(eError)));
+			goto ErrorExit;
+		}
+
+		/*
+		 * Always ensure a single power on command appears in the pdump. This
+		 * should be the only power related call outside of PDUMPPOWCMDSTART
+		 * and PDUMPPOWCMDEND.
+		 */
+		eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+											 PVRSRV_DEV_POWER_STATE_ON, IMG_TRUE);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to set device %p power state to 'on' (%s)",
+					 __func__, psDeviceNode, PVRSRVGetErrorString(eError)));
+			PVRSRVPowerUnlock(psDeviceNode);
+			goto ErrorExit;
+		}
+
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+		eError = ValidateFWOnLoad(psDeviceNode->pvDevice);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to verify FW code (%s)",
+					 __func__, PVRSRVGetErrorString(eError)));
+			PVRSRVPowerUnlock(psDeviceNode);
+			return eError;
+		}
+#endif
+
+		/* Verify firmware compatibility for device */
+		if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+		{
+			/* defer the compatibility checks in case of Guest Mode until after
+			 * the first kick was submitted, as the firmware only fills the
+			 * compatibility data then.  */
+			eError = PVRSRV_OK;
+		}
+		else
+		{
+			eError = PVRSRVDevInitCompatCheck(psDeviceNode);
+		}
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed compatibility check for device %p (%s)",
+					 __func__, psDeviceNode, PVRSRVGetErrorString(eError)));
+			PVRSRVPowerUnlock(psDeviceNode);
+			PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+			goto ErrorExit;
+		}
+
+		PDUMPPOWCMDSTART();
+
+		/* Force the device to idle if its default power state is off */
+		eError = PVRSRVDeviceIdleRequestKM(psDeviceNode,
+										   &PVRSRVDeviceIsDefaultStateOFF,
+										   IMG_TRUE);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle request failure (%s)",
+			                        __func__, PVRSRVGetErrorString(eError)));
+			if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED)
+			{
+				PVRSRVPowerUnlock(psDeviceNode);
+			}
+			goto ErrorExit;
+		}
+
+		/* Place device into its default power state. */
+		eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+											 PVRSRV_DEV_POWER_STATE_DEFAULT,
+											 IMG_TRUE);
+		PDUMPPOWCMDEND();
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to set device %p into its default power state (%s)",
+					 __func__, psDeviceNode, PVRSRVGetErrorString(eError)));
+
+			PVRSRVPowerUnlock(psDeviceNode);
+			goto ErrorExit;
+		}
+
+		PVRSRVPowerUnlock(psDeviceNode);
+
+		/*
+		 * If PDUMP is enabled and RGX device is supported, then initialise the
+		 * performance counters that can be further modified in PDUMP. Then,
+		 * before ending the init phase of the pdump, drain the commands put in
+		 * the kCCB during the init phase.
+		 */
+#if defined(SUPPORT_RGX) && defined(PDUMP)
+		{
+			PVRSRV_RGXDEV_INFO *psDevInfo =
+				(PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice);
+
+			eError = RGXInitHWPerfCounters(psDeviceNode);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Failed to init hwperf counters (%s)",
+						 __func__, PVRSRVGetErrorString(eError)));
+				goto ErrorExit;
+			}
+
+			eError = RGXPdumpDrainKCCB(psDevInfo,
+									   psDevInfo->psKernelCCBCtl->ui32WriteOffset);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Problem draining kCCB (%s)",
+						 __func__, PVRSRVGetErrorString(eError)));
+				goto ErrorExit;
+			}
+		}
+#endif
+
+		/* Now that the device(s) are fully initialised set them as active */
+		psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_ACTIVE;
+		eError = PVRSRV_OK;
+
+#if defined(SUPPORT_RGX)
+		if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+		{
+			/* Kick an initial dummy command to make the firmware initialise all
+			 * its internal guest OS data structures and compatibility information */
+			if (RGXFWHealthCheckCmd((PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice)) != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Cannot kick initial command to the Device (%s)",
+						 __func__, PVRSRVGetErrorString(eError)));
+				goto ErrorExit;
+			}
+
+			eError = PVRSRVDevInitCompatCheck(psDeviceNode);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Failed compatibility check for device %p (%s)",
+						 __func__, psDeviceNode, PVRSRVGetErrorString(eError)));
+				PVRSRVPowerUnlock(psDeviceNode);
+				PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+				goto ErrorExit;
+			}
+		}
+#endif
+	}
+	else
+	{
+		/* Initialisation failed so set the device(s) into a bad state */
+		psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_BAD;
+		eError = PVRSRV_ERROR_NOT_INITIALISED;
+	}
+
+	/* Give PDump control a chance to end the init phase, depends on OS */
+	PDumpStopInitPhase();
+
+	return eError;
+
+ErrorExit:
+	/* Initialisation failed so set the device(s) into a bad state */
+	psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_BAD;
+
+	return eError;
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	/* Only check devices which specify a compatibility check callback */
+	if (psDeviceNode->pfnInitDeviceCompatCheck)
+		return psDeviceNode->pfnInitDeviceCompatCheck(psDeviceNode);
+	else
+		return PVRSRV_OK;
+}
+
+/*
+	PollForValueKM
+*/
+static
+PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32 __iomem *	pui32LinMemAddr,
+										  IMG_UINT32			ui32Value,
+										  IMG_UINT32			ui32Mask,
+										  IMG_UINT32			ui32Timeoutus,
+										  IMG_UINT32			ui32PollPeriodus,
+										  IMG_BOOL				bAllowPreemption)
+{
+#if defined(NO_HARDWARE)
+	PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	PVR_UNREFERENCED_PARAMETER(ui32Timeoutus);
+	PVR_UNREFERENCED_PARAMETER(ui32PollPeriodus);
+	PVR_UNREFERENCED_PARAMETER(bAllowPreemption);
+	return PVRSRV_OK;
+#else
+	IMG_UINT32	ui32ActualValue = 0xFFFFFFFFU; /* Initialiser only required to prevent incorrect warning */
+
+	if (bAllowPreemption)
+	{
+		PVR_ASSERT(ui32PollPeriodus >= 1000);
+	}
+
+	LOOP_UNTIL_TIMEOUT(ui32Timeoutus)
+	{
+		ui32ActualValue = OSReadHWReg32(pui32LinMemAddr, 0) & ui32Mask;
+
+		if (ui32ActualValue == ui32Value)
+		{
+			return PVRSRV_OK;
+		}
+
+		if (gpsPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+		{
+			return PVRSRV_ERROR_TIMEOUT;
+		}
+
+		if (bAllowPreemption)
+		{
+			OSSleepms(ui32PollPeriodus / 1000);
+		}
+		else
+		{
+			OSWaitus(ui32PollPeriodus);
+		}
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	PVR_DPF((PVR_DBG_ERROR,"PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).",
+			ui32Value, ui32ActualValue, ui32Mask));
+
+	return PVRSRV_ERROR_TIMEOUT;
+#endif /* NO_HARDWARE */
+}
+
+
+/*
+	PVRSRVPollForValueKM
+*/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPollForValueKM (volatile IMG_UINT32	__iomem *pui32LinMemAddr,
+												IMG_UINT32			ui32Value,
+												IMG_UINT32			ui32Mask)
+{
+	return PollForValueKM(pui32LinMemAddr, ui32Value, ui32Mask,
+						  MAX_HW_TIME_US,
+						  MAX_HW_TIME_US/WAIT_TRY_COUNT,
+						  IMG_FALSE);
+}
+
+static
+PVRSRV_ERROR IMG_CALLCONV WaitForValueKM(volatile IMG_UINT32 __iomem *pui32LinMemAddr,
+                                         IMG_UINT32           ui32Value,
+                                         IMG_UINT32           ui32Mask,
+                                         IMG_BOOL             bHoldBridgeLock)
+{
+#if defined(NO_HARDWARE)
+	PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	return PVRSRV_OK;
+#else
+
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	IMG_HANDLE hOSEvent;
+	PVRSRV_ERROR eError;
+	PVRSRV_ERROR eErrorWait;
+	IMG_UINT32 ui32ActualValue;
+
+	eError = OSEventObjectOpen(psPVRSRVData->hGlobalEventObject, &hOSEvent);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVWaitForValueKM: Failed to setup EventObject with error (%d)", eError));
+		goto EventObjectOpenError;
+	}
+
+	eError = PVRSRV_ERROR_TIMEOUT;
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		ui32ActualValue = (OSReadDeviceMem32(pui32LinMemAddr) & ui32Mask);
+
+		if (ui32ActualValue == ui32Value)
+		{
+			/* Expected value has been found */
+			eError = PVRSRV_OK;
+			break;
+		}
+		else if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+		{
+			/* Services in bad state, don't wait any more */
+			eError = PVRSRV_ERROR_NOT_READY;
+			break;
+		}
+		else
+		{
+			/* wait for event and retry */
+			eErrorWait = bHoldBridgeLock ? OSEventObjectWaitAndHoldBridgeLock(hOSEvent) : OSEventObjectWait(hOSEvent);
+			if (eErrorWait != PVRSRV_OK  &&  eErrorWait != PVRSRV_ERROR_TIMEOUT)
+			{
+				PVR_DPF((PVR_DBG_WARNING,"PVRSRVWaitForValueKM: Waiting for value failed with error %d. Expected 0x%x but found 0x%x (Mask 0x%08x). Retrying",
+							eErrorWait,
+							ui32Value,
+							ui32ActualValue,
+							ui32Mask));
+			}
+		}
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	OSEventObjectClose(hOSEvent);
+
+	/* One last check in case the object wait ended after the loop timeout... */
+	if (eError != PVRSRV_OK  &&  (OSReadDeviceMem32(pui32LinMemAddr) & ui32Mask) == ui32Value)
+	{
+		eError = PVRSRV_OK;
+	}
+
+	/* Provide event timeout information to aid the Device Watchdog Thread... */
+	if (eError == PVRSRV_OK)
+	{
+		psPVRSRVData->ui32GEOConsecutiveTimeouts = 0;
+	}
+	else if (eError == PVRSRV_ERROR_TIMEOUT)
+	{
+		psPVRSRVData->ui32GEOConsecutiveTimeouts++;
+	}
+
+EventObjectOpenError:
+
+	return eError;
+
+#endif /* NO_HARDWARE */
+}
+
+/*
+	PVRSRVWaitForValueKM
+*/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKM (volatile IMG_UINT32	__iomem *pui32LinMemAddr,
+												IMG_UINT32			ui32Value,
+												IMG_UINT32			ui32Mask)
+{
+	/* In this case we are NOT retaining bridge lock while waiting
+	   for bridge lock. */
+	return WaitForValueKM(pui32LinMemAddr, ui32Value, ui32Mask, IMG_FALSE);
+}
+
+/*
+	PVRSRVWaitForValueKMAndHoldBridgeLock
+*/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKMAndHoldBridgeLockKM(volatile IMG_UINT32 __iomem *pui32LinMemAddr,
+                                                                  IMG_UINT32          ui32Value,
+                                                                  IMG_UINT32          ui32Mask)
+{
+	return WaitForValueKM(pui32LinMemAddr, ui32Value, ui32Mask, IMG_TRUE);
+}
+
+int PVRSRVGetDriverStatus(void)
+{
+	return PVRSRVGetPVRSRVData()->eServicesState;
+}
+
+/*
+	PVRSRVSystemHasCacheSnooping
+*/
+IMG_BOOL PVRSRVSystemHasCacheSnooping(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	if ((psDevConfig->eCacheSnoopingMode != PVRSRV_DEVICE_SNOOP_NONE) &&
+		(psDevConfig->eCacheSnoopingMode != PVRSRV_DEVICE_SNOOP_EMULATED))
+	{
+		return IMG_TRUE;
+	}
+	return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemSnoopingIsEmulated(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	if (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_EMULATED)
+	{
+		return IMG_TRUE;
+	}
+	return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	if ((psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CPU_ONLY) ||
+		(psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CROSS))
+	{
+		return IMG_TRUE;
+	}
+	return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	if ((psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_DEVICE_ONLY) ||
+		(psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CROSS))
+	{
+		return IMG_TRUE;
+	}
+	return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemHasNonMappableLocalMemory(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	return psDevConfig->bHasNonMappableLocalMemory;
+}
+
+/*
+	PVRSRVSystemWaitCycles
+*/
+void PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles)
+{
+	/* Delay in us */
+	IMG_UINT32 ui32Delayus = 1;
+
+	/* obtain the device freq */
+	if (psDevConfig->pfnClockFreqGet != NULL)
+	{
+		IMG_UINT32 ui32DeviceFreq;
+
+		ui32DeviceFreq = psDevConfig->pfnClockFreqGet(psDevConfig->hSysData);
+
+		ui32Delayus = (ui32Cycles*1000000)/ui32DeviceFreq;
+
+		if (ui32Delayus == 0)
+		{
+			ui32Delayus = 1;
+		}
+	}
+
+	OSWaitus(ui32Delayus);
+}
+
+static void *
+PVRSRVSystemInstallDeviceLISR_Match_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode,
+											va_list va)
+{
+	void *pvOSDevice = va_arg(va, void *);
+
+	if (psDeviceNode->psDevConfig->pvOSDevice == pvOSDevice)
+	{
+		return psDeviceNode;
+	}
+
+	return NULL;
+}
+
+PVRSRV_ERROR PVRSRVSystemInstallDeviceLISR(void *pvOSDevice,
+										   IMG_UINT32 ui32IRQ,
+										   const IMG_CHAR *pszName,
+										   PFN_LISR pfnLISR,
+										   void *pvData,
+										   IMG_HANDLE *phLISRData)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+
+	psDeviceNode =
+		List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+									   &PVRSRVSystemInstallDeviceLISR_Match_AnyVaCb,
+									   pvOSDevice);
+	if (!psDeviceNode)
+	{
+		/* Device can't be found in the list so it isn't in the system */
+		PVR_DPF((PVR_DBG_ERROR, "%s: device %p with irq %d is not present",
+				 __func__, pvOSDevice, ui32IRQ));
+		return PVRSRV_ERROR_INVALID_DEVICE;
+	}
+
+	return SysInstallDeviceLISR(psDeviceNode->psDevConfig->hSysData, ui32IRQ,
+								pszName, pfnLISR, pvData, phLISRData);
+}
+
+PVRSRV_ERROR PVRSRVSystemUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+	return SysUninstallDeviceLISR(hLISRData);
+}
+
+PVRSRV_ERROR
+PVRSRVSystemBIFTilingHeapGetXStride(PVRSRV_DEVICE_CONFIG *psDevConfig,
+									IMG_UINT32 uiHeapNum,
+									IMG_UINT32 *puiXStride)
+{
+	PVR_ASSERT(puiXStride != NULL);
+
+	if (uiHeapNum < 1 || uiHeapNum > psDevConfig->ui32BIFTilingHeapCount)
+	{
+		*puiXStride = 0;
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*puiXStride = psDevConfig->pui32BIFTilingHeapConfigs[uiHeapNum - 1];
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSystemBIFTilingGetConfig(PVRSRV_DEVICE_CONFIG  *psDevConfig,
+                               RGXFWIF_BIFTILINGMODE *peBifTilingMode,
+                               IMG_UINT32            *puiNumHeaps)
+{
+	*peBifTilingMode = psDevConfig->eBIFTilingMode;
+	*puiNumHeaps = psDevConfig->ui32BIFTilingHeapCount;
+	return PVRSRV_OK;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR)
+void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState)
+{
+	SysSetAxiProtOSid(ui32OSid, bState);
+	return;
+}
+
+void SetTrustedDeviceAceEnabled(void)
+{
+	SysSetTrustedDeviceAceEnabled();
+
+	return;
+}
+#endif
+
+#if defined(SUPPORT_RGX)
+PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateHWPerfHostThread(IMG_UINT32 ui32Timeout)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!ui32Timeout)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	OSLockAcquire(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock);
+
+	/* Create only once */
+	if (gpsPVRSRVData->hHWPerfHostPeriodicThread == NULL)
+	{
+		/* Create the HWPerf event object */
+		eError = OSEventObjectCreate("PVRSRV_HWPERFHOSTPERIODIC_EVENTOBJECT", &gpsPVRSRVData->hHWPerfHostPeriodicEvObj);
+
+		if (eError == PVRSRV_OK)
+		{
+			gpsPVRSRVData->bHWPerfHostThreadStop = IMG_FALSE;
+			gpsPVRSRVData->ui32HWPerfHostThreadTimeout = ui32Timeout;
+			/* Create a thread which is used to periodically emit host stream packets */
+			eError = OSThreadCreate(&gpsPVRSRVData->hHWPerfHostPeriodicThread,
+				"pvr_hwperf_host",
+				HWPerfPeriodicHostEventsThread,
+				NULL, IMG_TRUE, gpsPVRSRVData);
+
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create HWPerf host periodic thread", __func__));
+			}
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: OSEventObjectCreate failed", __func__));
+		}
+	}
+	/* If the thread has already been created then just update the timeout and wake up thread */
+	else
+	{
+		gpsPVRSRVData->ui32HWPerfHostThreadTimeout = ui32Timeout;
+		eError = OSEventObjectSignal(gpsPVRSRVData->hHWPerfHostPeriodicEvObj);
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+	}
+
+	OSLockRelease(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock);
+	return eError;
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyHWPerfHostThread(void)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	OSLockAcquire(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock);
+
+	/* Stop and cleanup the HWPerf periodic thread */
+	if (gpsPVRSRVData->hHWPerfHostPeriodicThread)
+	{
+		if (gpsPVRSRVData->hHWPerfHostPeriodicEvObj)
+		{
+			gpsPVRSRVData->bHWPerfHostThreadStop = IMG_TRUE;
+			eError = OSEventObjectSignal(gpsPVRSRVData->hHWPerfHostPeriodicEvObj);
+			PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+		}
+		LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US)
+		{
+			eError = OSThreadDestroy(gpsPVRSRVData->hHWPerfHostPeriodicThread);
+			if (PVRSRV_OK == eError)
+			{
+				gpsPVRSRVData->hHWPerfHostPeriodicThread = NULL;
+				break;
+			}
+			OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+		PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+
+		if (gpsPVRSRVData->hHWPerfHostPeriodicEvObj)
+		{
+			eError = OSEventObjectDestroy(gpsPVRSRVData->hHWPerfHostPeriodicEvObj);
+			gpsPVRSRVData->hHWPerfHostPeriodicEvObj = NULL;
+			PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+		}
+	}
+
+	OSLockRelease(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock);
+	return eError;
+}
+#endif
+
+static PVRSRV_ERROR _VzDeviceCreate(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	RA_BASE_T uBase;
+	RA_LENGTH_T uSize;
+	IMG_UINT ui32OSID;
+	IMG_UINT64 ui64Size;
+	PVRSRV_ERROR eError;
+	PHYS_HEAP *psPhysHeap;
+	IMG_CPU_PHYADDR sCpuPAddr;
+	IMG_DEV_PHYADDR sDevPAddr;
+	PHYS_HEAP_TYPE eHeapType;
+	IMG_UINT32 ui32NumOfHeapRegions;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+
+	/* First, register device GPU physical heap based on physheap config */
+	psPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+	ui32NumOfHeapRegions = PhysHeapNumberOfRegions(psPhysHeap);
+	eHeapType = PhysHeapGetType(psPhysHeap);
+
+	/* Normally, for GPU UMA physheap, use OS services but here we override this
+	   if said physheap is DMA/UMA carve-out; for this create an RA to manage it */
+	if (eHeapType == PHYS_HEAP_TYPE_UMA || eHeapType == PHYS_HEAP_TYPE_DMA)
+	{
+		if (ui32NumOfHeapRegions)
+		{
+			eError = PhysHeapRegionGetCpuPAddr(psPhysHeap, 0, &sCpuPAddr);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_ASSERT(IMG_FALSE);
+				goto e0;
+			}
+
+			eError = PhysHeapRegionGetSize(psPhysHeap, 0, &ui64Size);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_ASSERT(IMG_FALSE);
+				goto e0;
+			}
+
+			eError = PhysHeapRegionGetDevPAddr(psPhysHeap, 0, &sDevPAddr);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_ASSERT(IMG_FALSE);
+				goto e0;
+			}
+		}
+		else
+		{
+			sDevPAddr.uiAddr = (IMG_UINT64)0;
+			sCpuPAddr.uiAddr = (IMG_UINT64)0;
+			ui64Size = (IMG_UINT64)0;
+		}
+
+		if (sCpuPAddr.uiAddr && sDevPAddr.uiAddr && ui64Size)
+		{
+			psDeviceNode->ui32NumOfLocalMemArenas = ui32NumOfHeapRegions;
+			PVR_ASSERT(ui32NumOfHeapRegions == 1);
+
+			PVR_DPF((PVR_DBG_MESSAGE, "===== UMA (carve-out) memory, 1st phys heap (gpu)"));
+
+			PVR_DPF((PVR_DBG_MESSAGE, "Creating RA for gpu memory 0x%016"IMG_UINT64_FMTSPECX"-0x%016"IMG_UINT64_FMTSPECX,
+					(IMG_UINT64) sCpuPAddr.uiAddr, sCpuPAddr.uiAddr + ui64Size - 1));
+
+			uBase = sDevPAddr.uiAddr;
+			uSize = (RA_LENGTH_T) ui64Size;
+			PVR_ASSERT(uSize == ui64Size);
+
+			psDeviceNode->apsLocalDevMemArenas = OSAllocMem(sizeof(RA_ARENA*));
+			PVR_ASSERT(psDeviceNode->apsLocalDevMemArenas);
+			psDeviceNode->apszRANames = OSAllocMem(sizeof(IMG_PCHAR));
+			PVR_ASSERT(psDeviceNode->apszRANames);
+			psDeviceNode->apszRANames[0] = OSAllocMem(PVRSRV_MAX_RA_NAME_LENGTH);
+			PVR_ASSERT(psDeviceNode->apszRANames[0]);
+
+			OSSNPrintf(psDeviceNode->apszRANames[0], PVRSRV_MAX_RA_NAME_LENGTH,
+						"%s gpu mem", psDeviceNode->psDevConfig->pszName);
+
+			psDeviceNode->apsLocalDevMemArenas[0] =
+				RA_Create(psDeviceNode->apszRANames[0],
+							OSGetPageShift(),	/* Use OS page size, keeps things simple */
+							RA_LOCKCLASS_0,		/* This arena doesn't use any other arenas. */
+							NULL,				/* No Import */
+							NULL,				/* No free import */
+							NULL,				/* No import handle */
+							IMG_FALSE);
+			if (psDeviceNode->apsLocalDevMemArenas[0] == NULL)
+			{
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto e0;
+			}
+
+			if (!RA_Add(psDeviceNode->apsLocalDevMemArenas[0], uBase, uSize, 0 , NULL))
+			{
+				RA_Delete(psDeviceNode->apsLocalDevMemArenas[0]);
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto e0;
+			}
+
+			/* Replace the UMA allocator with LMA allocator */
+			psDeviceNode->pfnDevPxAlloc = LMA_PhyContigPagesAlloc;
+			psDeviceNode->pfnDevPxFree = LMA_PhyContigPagesFree;
+			psDeviceNode->pfnDevPxMap = LMA_PhyContigPagesMap;
+			psDeviceNode->pfnDevPxUnMap = LMA_PhyContigPagesUnmap;
+			psDeviceNode->pfnDevPxClean = LMA_PhyContigPagesClean;
+			psDeviceNode->uiMMUPxLog2AllocGran = OSGetPageShift();
+			psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = PhysmemNewLocalRamBackedPMR;
+		}
+	}
+	else
+	{
+		/* LMA heap sanity check */
+		PVR_ASSERT(ui32NumOfHeapRegions);
+	}
+
+	/* Next, register device firmware physical heap based on heap config */
+	psPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+	ui32NumOfHeapRegions = PhysHeapNumberOfRegions(psPhysHeap);
+	eHeapType = PhysHeapGetType(psPhysHeap);
+	PVR_ASSERT(eHeapType != PHYS_HEAP_TYPE_UNKNOWN);
+
+	PVR_DPF((PVR_DBG_MESSAGE, "===== LMA/DMA/UMA (carve-out) memory, 2nd phys heap (fw)"));
+
+	if (ui32NumOfHeapRegions)
+	{
+		eError = PhysHeapRegionGetCpuPAddr(psPhysHeap, 0, &sCpuPAddr);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_ASSERT(IMG_FALSE);
+			goto e0;
+		}
+
+		eError = PhysHeapRegionGetSize(psPhysHeap, 0, &ui64Size);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_ASSERT(IMG_FALSE);
+			goto e0;
+		}
+
+		eError = PhysHeapRegionGetDevPAddr(psPhysHeap, 0, &sDevPAddr);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_ASSERT(IMG_FALSE);
+			goto e0;
+		}
+	}
+	else
+	{
+		sDevPAddr.uiAddr = (IMG_UINT64)0;
+		sCpuPAddr.uiAddr = (IMG_UINT64)0;
+		ui64Size = (IMG_UINT64)0;
+	}
+
+	if (ui32NumOfHeapRegions)
+	{
+#if defined(SUPPORT_RGX)
+		PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+		RA_LENGTH_T uConfigSize = RGX_FIRMWARE_CONFIG_HEAP_SIZE;
+		RA_LENGTH_T uMainSize = 0;
+
+		uMainSize = (RA_LENGTH_T) RGXGetFwMainHeapSize(psDeviceNode->pvDevice);
+
+		SysVzGetPhysHeapOrigin(psDeviceNode->psDevConfig,
+							   PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL,
+							   &eHeapOrigin);
+
+		PVR_DPF((PVR_DBG_MESSAGE, "Creating RA for  fw memory 0x%016"IMG_UINT64_FMTSPECX"-0x%016"IMG_UINT64_FMTSPECX,
+				(IMG_UINT64) sCpuPAddr.uiAddr, sCpuPAddr.uiAddr + ui64Size - 1));
+
+		/* Now we construct RA to manage FW heap */
+		uBase = sDevPAddr.uiAddr;
+		uSize = (RA_LENGTH_T) ui64Size;
+		PVR_ASSERT(sCpuPAddr.uiAddr && uSize == ui64Size);
+		if (eHeapType != PHYS_HEAP_TYPE_LMA)
+		{
+			/* On some LMA config, fw base starts at zero */
+			PVR_ASSERT(sDevPAddr.uiAddr);
+		}
+
+		/* All vz drivers go through this motion, loop terminates early for guest driver(s) */
+		for (ui32OSID = 0; ui32OSID < RGXFW_NUM_OS; ui32OSID++)
+		{
+			RA_BASE_T uOSIDConfigBase,	uOSIDMainBase;
+#if defined(SUPPORT_RGX)
+			if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST) && ui32OSID == 0)
+			{
+				uOSIDMainBase = uBase;
+				uOSIDConfigBase = uOSIDMainBase + RGXGetFwMainHeapSize((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice);
+			}
+			else
+#else
+			/* Assert here if SUPPORT_RGX = 0 as function is broken */
+			PVR_DPF((PVR_DBG_ERROR, "%s: Func not operational with SUPPORT_RGX = 0", __func__));
+			PVR_ASSERT(0);
+#endif
+			{
+				uOSIDConfigBase = uBase + (ui32OSID * RGX_FIRMWARE_RAW_HEAP_SIZE);
+				uOSIDMainBase = uOSIDConfigBase + uConfigSize;
+			}
+
+			OSSNPrintf(psDeviceNode->szKernelFwConfigRAName[ui32OSID], sizeof(psDeviceNode->szKernelFwConfigRAName[ui32OSID]),
+									"%s fw mem", psDeviceNode->psDevConfig->pszName);
+
+			psDeviceNode->psKernelFwConfigMemArena[ui32OSID] =
+				RA_Create(psDeviceNode->szKernelFwConfigRAName[ui32OSID],
+							OSGetPageShift(),		/* Use OS page size, keeps things simple */
+							RA_LOCKCLASS_0,			/* This arena doesn't use any other arenas. */
+							NULL,				/* No Import */
+							NULL,				/* No free import */
+							NULL,				/* No import handle */
+							IMG_FALSE);
+			if (psDeviceNode->psKernelFwConfigMemArena[ui32OSID] == NULL)
+			{
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto e1;
+			}
+
+			if (!RA_Add(psDeviceNode->psKernelFwConfigMemArena[ui32OSID], uOSIDConfigBase, uConfigSize, 0 , NULL))
+			{
+				RA_Delete(psDeviceNode->psKernelFwConfigMemArena[ui32OSID]);
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto e1;
+			}
+
+			OSSNPrintf(psDeviceNode->szKernelFwMainRAName[ui32OSID], sizeof(psDeviceNode->szKernelFwMainRAName[ui32OSID]),
+						"%s fw mem", psDeviceNode->psDevConfig->pszName);
+
+			psDeviceNode->psKernelFwMainMemArena[ui32OSID] =
+				RA_Create(psDeviceNode->szKernelFwMainRAName[ui32OSID],
+							OSGetPageShift(),		/* Use OS page size, keeps things simple */
+							RA_LOCKCLASS_0,			/* This arena doesn't use any other arenas. */
+							NULL,				/* No Import */
+							NULL,				/* No free import */
+							NULL,				/* No import handle */
+							IMG_FALSE);
+			if (psDeviceNode->psKernelFwMainMemArena[ui32OSID] == NULL)
+			{
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto e1;
+			}
+
+			if (!RA_Add(psDeviceNode->psKernelFwMainMemArena[ui32OSID], uOSIDMainBase, uMainSize, 0 , NULL))
+			{
+				RA_Delete(psDeviceNode->psKernelFwMainMemArena[ui32OSID]);
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto e1;
+			}
+
+			/* Guest drivers should not initialize subsequent array entries as the driver depends on this */
+			if (eHeapOrigin != PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST || PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+			{
+				break;
+			}
+		}
+#else
+		PVR_UNREFERENCED_PARAMETER(ui32OSID);
+		PVR_DPF((PVR_DBG_ERROR,"Support RGX undef"));
+		PVR_ASSERT(0);
+#endif
+		/* Fw physheap is always managed by LMA PMR factory */
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = PhysmemNewLocalRamBackedPMR;
+	}
+
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST))
+	{
+		/* Guest Fw physheap is a pseudo-heap which is always managed by LMA PMR factory and exclusively used
+		   by the host driver. For this pseudo-heap, we do not create an actual heap meta-data to represent
+		   it seeing it's only used during guest driver FW initialisation so this saves us having to provide
+		   heap pfnCpuPAddrToDevPAddr/pfnDevPAddrToCpuPAddr callbacks here which are not needed as the host
+		   driver will _never_ access this guest firmware heap - instead we reuse the real FW heap meta-data */
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_GUEST] = PhysmemNewLocalRamBackedPMR;
+		psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_GUEST] =
+											psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+	}
+
+	return PVRSRV_OK;
+#if defined(SUPPORT_RGX)
+e1:
+	_VzDeviceDestroy(psDeviceNode);
+#endif
+e0:
+	return eError;
+}
+
+static void _VzDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	IMG_UINT ui32OSID;
+	IMG_UINT64 ui64Size;
+	PHYS_HEAP *psPhysHeap;
+	IMG_CPU_PHYADDR sCpuPAddr;
+	IMG_DEV_PHYADDR sDevPAddr;
+	PHYS_HEAP_TYPE eHeapType;
+	IMG_UINT32 ui32NumOfHeapRegions;
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_NATIVE);
+
+	/* First, unregister device firmware physical heap based on heap config */
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST))
+	{
+		/* Remove pseudo-heap pointer, rest of heap deinitialization is unaffected */
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_GUEST] = NULL;
+		psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_GUEST] = NULL;
+	}
+
+	psPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+	ui32NumOfHeapRegions = PhysHeapNumberOfRegions(psPhysHeap);
+
+	if (ui32NumOfHeapRegions)
+	{
+		for (ui32OSID = 0; ui32OSID < RGXFW_NUM_OS; ui32OSID++)
+		{
+			if (psDeviceNode->psKernelFwMainMemArena[ui32OSID])
+			{
+				RA_Delete(psDeviceNode->psKernelFwMainMemArena[ui32OSID]);
+				psDeviceNode->psKernelFwMainMemArena[ui32OSID] = NULL;
+			}
+
+			if (psDeviceNode->psKernelFwConfigMemArena[ui32OSID])
+			{
+				RA_Delete(psDeviceNode->psKernelFwConfigMemArena[ui32OSID]);
+				psDeviceNode->psKernelFwConfigMemArena[ui32OSID] = NULL;
+			}
+		}
+	}
+
+	/* Next, unregister device GPU physical heap based on heap config */
+	psPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+	ui32NumOfHeapRegions = PhysHeapNumberOfRegions(psPhysHeap);
+	eHeapType = PhysHeapGetType(psPhysHeap);
+
+	if (eHeapType == PHYS_HEAP_TYPE_UMA || eHeapType == PHYS_HEAP_TYPE_DMA)
+	{
+		if (ui32NumOfHeapRegions)
+		{
+			if (PhysHeapRegionGetCpuPAddr(psPhysHeap, 0, &sCpuPAddr) != PVRSRV_OK)
+			{
+				PVR_ASSERT(IMG_FALSE);
+				return;
+			}
+
+			if (PhysHeapRegionGetSize(psPhysHeap, 0, &ui64Size) != PVRSRV_OK)
+			{
+				PVR_ASSERT(IMG_FALSE);
+				return;
+			}
+
+			if (PhysHeapRegionGetDevPAddr(psPhysHeap, 0, &sDevPAddr) != PVRSRV_OK)
+			{
+				PVR_ASSERT(IMG_FALSE);
+				return;
+			}
+		}
+		else
+		{
+			sDevPAddr.uiAddr = (IMG_UINT64)0;
+			sCpuPAddr.uiAddr = (IMG_UINT64)0;
+			ui64Size = (IMG_UINT64)0;
+		}
+
+		if (sCpuPAddr.uiAddr && sDevPAddr.uiAddr && ui64Size)
+		{
+			if (psDeviceNode->apsLocalDevMemArenas && psDeviceNode->apsLocalDevMemArenas[0])
+			{
+				RA_Delete(psDeviceNode->apsLocalDevMemArenas[0]);
+				psDeviceNode->apsLocalDevMemArenas[0] = NULL;
+				OSFreeMem(psDeviceNode->apsLocalDevMemArenas);
+				psDeviceNode->apsLocalDevMemArenas = NULL;
+			}
+
+			if (psDeviceNode->apszRANames)
+			{
+				OSFreeMem(psDeviceNode->apszRANames[0]);
+				psDeviceNode->apszRANames[0] = NULL;
+				OSFreeMem(psDeviceNode->apszRANames);
+				psDeviceNode->apszRANames = NULL;
+			}
+		}
+	}
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVVzRegisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+															IMG_DEV_PHYADDR sDevPAddr,
+															IMG_UINT64 ui64DevPSize,
+															IMG_UINT32 uiOSID)
+{
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+	PHYS_HEAP *psPhysHeap;
+	PVRSRV_ERROR eError;
+
+	/*
+	   This is called by the host driver only, it creates an RA to manage this guest firmware
+	   physheaps so we fail the call if an invalid guest OSID is supplied.
+	*/
+	PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_HOST, PVRSRV_ERROR_INTERNAL_ERROR);
+	PVR_DPF((PVR_DBG_MESSAGE, "===== Registering OSID: %d fw physheap memory", uiOSID));
+	PVR_LOGR_IF_FALSE(((uiOSID > 0)&&(uiOSID < RGXFW_NUM_OS)), "Invalid guest OSID", PVRSRV_ERROR_INVALID_PARAMS);
+
+	/* Verify guest size with host size (support only same sized FW heaps) */
+	psPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+
+	if (ui64DevPSize != RGX_FIRMWARE_RAW_HEAP_SIZE)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+				"OSID: %d fw physheap size 0x%"IMG_UINT64_FMTSPECX" differs from host fw phyheap size 0x%X",
+				uiOSID,
+				ui64DevPSize,
+				RGX_FIRMWARE_RAW_HEAP_SIZE));
+
+		PVR_DPF((PVR_DBG_WARNING,
+				"Truncating OSID: %d requested fw physheap to: 0x%X\n",
+				uiOSID,
+				RGX_FIRMWARE_RAW_HEAP_SIZE));
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE, "Creating RA for fw 0x%016"IMG_UINT64_FMTSPECX"-0x%016"IMG_UINT64_FMTSPECX" [DEV/PA]",
+			(IMG_UINT64) sDevPAddr.uiAddr, sDevPAddr.uiAddr + RGX_FIRMWARE_RAW_HEAP_SIZE - 1));
+
+	SysVzGetPhysHeapOrigin(psDeviceNode->psDevConfig,
+						   PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL,
+						   &eHeapOrigin);
+	PVR_LOGR_IF_FALSE((eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST),
+					  "PVRSRVVzRegisterFirmwarePhysHeap: Host PVZ config: Invalid PVZ setup\n"
+					  "=>: all driver types (i.e. host/guest) must use same FW heap origin",
+					  PVRSRV_ERROR_INVALID_PARAMS);
+
+	OSSNPrintf(psDeviceNode->szKernelFwRawRAName[uiOSID],
+			   sizeof(psDeviceNode->szKernelFwRawRAName[uiOSID]),
+			   "[OSID: %d]: raw guest fw mem", uiOSID);
+
+	eError = _VzConstructRAforFwHeap(&psDeviceNode->psKernelFwRawMemArena[uiOSID],
+									 psDeviceNode->szKernelFwRawRAName[uiOSID],
+									 sDevPAddr.uiAddr,
+									 RGX_FIRMWARE_RAW_HEAP_SIZE);
+	if (eError == PVRSRV_OK)
+	{
+		psDeviceNode->ui64RABase[uiOSID] = sDevPAddr.uiAddr;
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVVzUnregisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+															 IMG_UINT32 uiOSID)
+{
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+
+	PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_HOST, PVRSRV_ERROR_INTERNAL_ERROR);
+	PVR_DPF((PVR_DBG_MESSAGE, "===== Unregistering OSID: %d fw physheap memory", uiOSID));
+	PVR_LOGR_IF_FALSE(((uiOSID > 0)&&(uiOSID < RGXFW_NUM_OS)), "Invalid guest OSID", PVRSRV_ERROR_INVALID_PARAMS);
+
+	SysVzGetPhysHeapOrigin(psDeviceNode->psDevConfig,
+						   PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL,
+						   &eHeapOrigin);
+	PVR_LOGR_IF_FALSE((eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST),
+					  "PVRSRVVzUnregisterFirmwarePhysHeap: Host PVZ config: Invalid PVZ setup\n"
+					  "=>: all driver types (i.e. host/guest) must use same FW heap origin",
+					  PVRSRV_ERROR_INVALID_PARAMS);
+
+	_VzTearDownRAforFwHeap(&psDeviceNode->psKernelFwRawMemArena[uiOSID], (IMG_UINT64)psDeviceNode->ui64RABase[uiOSID]);
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _VzConstructRAforFwHeap(RA_ARENA **ppsArena, IMG_CHAR *szName,
+											IMG_UINT64 uBase, RA_LENGTH_T uSize)
+{
+	PVRSRV_ERROR eError;
+
+	/* Construct RA to manage FW Raw heap */
+	*ppsArena = RA_Create(szName,
+						OSGetPageShift(),		/* Use host page size, keeps things simple */
+						RA_LOCKCLASS_0,			/* This arena doesn't use any other arenas */
+						NULL,					/* No Import */
+						NULL,					/* No free import */
+						NULL,					/* No import handle */
+						IMG_FALSE);
+	eError = (*ppsArena == NULL) ? (PVRSRV_ERROR_OUT_OF_MEMORY) : (PVRSRV_OK);
+
+	if (eError == PVRSRV_OK && !RA_Add(*ppsArena, uBase, uSize, 0 , NULL))
+	{
+		RA_Delete(*ppsArena);
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	return eError;
+}
+
+static void _VzTearDownRAforFwHeap(RA_ARENA **ppsArena, IMG_UINT64 uBase)
+{
+	RA_Free(*ppsArena, uBase);
+	RA_Delete(*ppsArena);
+	*ppsArena = NULL;
+}
+
+/*****************************************************************************
+ End of file (pvrsrv.c)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv.h
new file mode 100644
index 0000000..b7bafab
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv.h
@@ -0,0 +1,544 @@
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR services server header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVRSRV_H
+#define PVRSRV_H
+
+#include "connection_server.h"
+#include "device.h"
+#include "power.h"
+#include "syscommon.h"
+#include "sysinfo.h"
+#include "physheap.h"
+#include "cache_ops.h"
+#include "pvr_notifier.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "pvrsrv_pool.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+#include "dma_support.h"
+#include "vz_support.h"
+#include "vz_physheap.h"
+
+/*!
+ * For OSThreadDestroy(), which may require a retry
+ * Try for 100 ms to destroy an OS thread before failing
+ */
+#define OS_THREAD_DESTROY_TIMEOUT_US 100000ULL
+#define OS_THREAD_DESTROY_RETRY_COUNT 10
+
+typedef enum _VMM_CONF_PARAM_
+{
+	VMM_CONF_PRIO_OSID0 = 0,
+	VMM_CONF_PRIO_OSID1 = 1,
+	VMM_CONF_PRIO_OSID2 = 2,
+	VMM_CONF_PRIO_OSID3 = 3,
+	VMM_CONF_PRIO_OSID4 = 4,
+	VMM_CONF_PRIO_OSID5 = 5,
+	VMM_CONF_PRIO_OSID6 = 6,
+	VMM_CONF_PRIO_OSID7 = 7,
+	VMM_CONF_ISOL_THRES = 8,
+	VMM_CONF_HCS_DEADLINE = 9
+} VMM_CONF_PARAM;
+
+typedef struct _BUILD_INFO_
+{
+	IMG_UINT32	ui32BuildOptions;
+	IMG_UINT32	ui32BuildVersion;
+	IMG_UINT32	ui32BuildRevision;
+	IMG_UINT32	ui32BuildType;
+#define BUILD_TYPE_DEBUG	0
+#define BUILD_TYPE_RELEASE	1
+	/* The above fields are self explanatory */
+	/* B.V.N.C can be added later if required */
+} BUILD_INFO;
+
+typedef struct _DRIVER_INFO_
+{
+	BUILD_INFO	sUMBuildInfo;
+	BUILD_INFO	sKMBuildInfo;
+	IMG_UINT8	ui8UMSupportedArch;
+	IMG_UINT8	ui8KMBitArch;
+
+#define	BUILD_ARCH_64BIT			(1 << 0)
+#define	BUILD_ARCH_32BIT			(1 << 1)
+#define	BUILD_ARCH_BOTH		(BUILD_ARCH_32BIT | BUILD_ARCH_64BIT)
+	IMG_BOOL	bIsNoMatch;
+}DRIVER_INFO;
+
+typedef struct PVRSRV_DATA_TAG
+{
+	PVRSRV_DRIVER_MODE			eDriverMode;				/*!< Driver mode (i.e. native, host or guest) */
+	DRIVER_INFO					sDriverInfo;
+	IMG_UINT32					ui32RegisteredDevices;
+	PVRSRV_DEVICE_NODE			*psDeviceNodeList;			/*!< List head of device nodes */
+	PVRSRV_DEVICE_NODE			*psHostMemDeviceNode;		/*!< DeviceNode to be used for device independent
+	                                                             host based memory allocations where the DevMem
+	                                                             framework is to be used e.g. TL */
+	PVRSRV_SERVICES_STATE		eServicesState;				/*!< global driver state */
+
+	HASH_TABLE					*psProcessHandleBase_Table; /*!< Hash table with process handle bases */
+	POS_LOCK					hProcessHandleBase_Lock;	/*!< Lock for the process handle base table */
+	PVRSRV_HANDLE_BASE			*psProcessHandleBaseBeingFreed; /*!< Pointer to process handle base currently being freed */
+
+	IMG_HANDLE					hGlobalEventObject;			/*!< OS Global Event Object */
+	IMG_UINT32					ui32GEOConsecutiveTimeouts;	/*!< OS Global Event Object Timeouts */
+
+	IMG_HANDLE					hCleanupThread;				/*!< Cleanup thread */
+	IMG_HANDLE					hCleanupEventObject;		/*!< Event object to drive cleanup thread */
+	POS_LOCK					hCleanupThreadWorkListLock;	/*!< Lock protecting the cleanup thread work list */
+	DLLIST_NODE					sCleanupThreadWorkList;		/*!< List of work for the cleanup thread */
+	IMG_PID						cleanupThreadPid;			/*!< Cleanup thread process id */
+	ATOMIC_T					i32NumCleanupItems;			/*!< Number of items in cleanup thread work list */
+
+	IMG_HANDLE					hDevicesWatchdogThread;		/*!< Devices watchdog thread */
+	IMG_HANDLE					hDevicesWatchdogEvObj;		/*! Event object to drive devices watchdog thread */
+	volatile IMG_UINT32			ui32DevicesWatchdogPwrTrans;/*! Number of off -> on power state transitions */
+#if !defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+	volatile IMG_UINT32			ui32DevicesWatchdogTimeout; /*! Timeout for the Devices watchdog Thread */
+#endif
+#ifdef PVR_TESTING_UTILS
+	volatile IMG_UINT32			ui32DevicesWdWakeupCounter;	/* Need this for the unit tests. */
+#endif
+
+	POS_LOCK					hHWPerfHostPeriodicThread_Lock;	/*!< Lock for the HWPerf Host periodic thread */
+	IMG_HANDLE					hHWPerfHostPeriodicThread;		/*!< HWPerf Host periodic thread */
+	IMG_HANDLE					hHWPerfHostPeriodicEvObj;		/*! Event object to drive HWPerf thread */
+	volatile IMG_BOOL			bHWPerfHostThreadStop;
+	IMG_UINT32					ui32HWPerfHostThreadTimeout;
+
+	IMG_HANDLE					hPvzConnection;				/*!< PVZ connection used for cross-VM hyper-calls */
+	POS_LOCK					hPvzConnectionLock;			/*!< Lock protecting PVZ connection */
+	IMG_BOOL					abVmOnline[RGXFW_NUM_OS];
+
+	IMG_BOOL					bUnload;					/*!< Driver unload is in progress */
+
+	IMG_HANDLE					hTLCtrlStream;				/*! Control plane for TL streams */
+
+	IMG_HANDLE					hDriverThreadEventObject;	/*! Event object relating to multi-threading in the Server */
+	IMG_BOOL					bDriverSuspended;			/*! if TRUE, the driver is suspended and new threads should not enter */
+	ATOMIC_T					iNumActiveDriverThreads;	/*! Number of threads active in the Server */
+
+	PMR							*psInfoPagePMR;				/*! Handle to exportable PMR of the information page. */
+	IMG_UINT32					*pui32InfoPage;				/*! CPU memory mapping for information page. */
+	DEVMEM_MEMDESC				*psInfoPageMemDesc;			/*! Memory descriptor of the information page. */
+	POS_LOCK					hInfoPageLock;				/*! Lock guarding access to information page. */
+
+	POS_LOCK                    hConnectionsLock;           /*!< Lock protecting sConnections */
+	DLLIST_NODE                 sConnections;               /*!< The list of currently active connection objects */
+} PVRSRV_DATA;
+
+
+/*!
+******************************************************************************
+ @Function	PVRSRVGetPVRSRVData
+
+ @Description	Get a pointer to the global data
+
+ @Return   PVRSRV_DATA *
+******************************************************************************/
+PVRSRV_DATA *PVRSRVGetPVRSRVData(void);
+
+/*!
+******************************************************************************
+@Note	Kernel code must always query the driver mode using the
+		PVRSRV_VZ_MODE_IS() macro _only_ and PVRSRV_DATA->eDriverMode should
+		not be read directly as the field also overloads as driver OSID (i.e.
+		not to be confused with hardware kick register OSID) when running on
+		non-VZ capable BVNC as the driver has to simulate OSID propagation to
+		the firmware in the absence of the hardware kick register propagating
+		this OSID on any non-VZ BVNC.
+******************************************************************************/
+#define PVRSRV_VZ_MODE_IS(_expr)              (((((IMG_INT)_expr)>0)&&((IMG_INT)PVRSRVGetPVRSRVData()->eDriverMode>0)) ? \
+                                                   (IMG_TRUE) : ((_expr) == (PVRSRVGetPVRSRVData()->eDriverMode)))
+#define PVRSRV_VZ_RETN_IF_MODE(_expr)         do { if (  PVRSRV_VZ_MODE_IS(_expr)) { return; } } while(0)
+#define PVRSRV_VZ_RETN_IF_NOT_MODE(_expr)     do { if (! PVRSRV_VZ_MODE_IS(_expr)) { return; } } while(0)
+#define PVRSRV_VZ_RET_IF_MODE(_expr, _rc)     do { if (  PVRSRV_VZ_MODE_IS(_expr)) { return (_rc); } } while(0)
+#define PVRSRV_VZ_RET_IF_NOT_MODE(_expr, _rc) do { if (! PVRSRV_VZ_MODE_IS(_expr)) { return (_rc); } } while(0)
+#define PVRSRV_VZ_DRIVER_OSID                 (((IMG_INT)PVRSRVGetPVRSRVData()->eDriverMode) > (0) ? \
+												   ((IMG_UINT32)(PVRSRVGetPVRSRVData()->eDriverMode)) : (0))
+
+/*!
+******************************************************************************
+@Note	The driver execution mode AppHint (i.e. PVRSRV_APPHINT_DRIVERMODE)
+		can be an override or non-override 32-bit value. An override value
+		has the MSB bit set & a non-override value has this MSB bit cleared.
+		Excluding this MSB bit & interpreting the remaining 31-bit as a
+		signed 31-bit integer, the mode values are:
+		  [-1 native <default>: 0 host : +1 guest ].
+******************************************************************************/
+#define PVRSRV_VZ_APPHINT_MODE_IS_OVERRIDE(_expr)   ((IMG_UINT32)(_expr)&(IMG_UINT32)(1<<31))
+#define PVRSRV_VZ_APPHINT_MODE(_expr)				\
+	((((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF) == (IMG_UINT32)0x7FFFFFFF) ? DRIVER_MODE_NATIVE : \
+		!((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF) ? DRIVER_MODE_HOST : \
+			((IMG_UINT32)((IMG_UINT32)(_expr)&(IMG_UINT)0x7FFFFFFF)==(IMG_UINT32)0x1) ? DRIVER_MODE_GUEST : \
+				((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF))
+
+/*!
+******************************************************************************
+
+ @Function	LMA memory management API
+
+******************************************************************************/
+PVRSRV_ERROR LMA_PhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize,
+							PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr);
+
+void LMA_PhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle);
+
+PVRSRV_ERROR LMA_PhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+							size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+							void **pvPtr);
+
+void LMA_PhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+					void *pvPtr);
+
+PVRSRV_ERROR LMA_PhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode,
+                                     PG_HANDLE *psMemHandle,
+                                     IMG_UINT32 uiOffset,
+                                     IMG_UINT32 uiLength);
+
+IMG_BOOL IsPhysmemNewRamBackedByLMA(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_PHYS_HEAP ePhysHeapIdx);
+
+/*!
+******************************************************************************
+ @Function	PVRSRVPollForValueKM
+
+ @Description
+ Polls for a value to match a masked read
+
+ @Input pui32LinMemAddr : CPU linear address to poll
+ @Input ui32Value : required value
+ @Input ui32Mask : Mask
+
+ @Return   PVRSRV_ERROR :
+******************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPollForValueKM(
+		volatile IMG_UINT32 __iomem *pui32LinMemAddr,
+		IMG_UINT32                   ui32Value,
+		IMG_UINT32                   ui32Mask);
+
+/*!
+******************************************************************************
+ @Function	PVRSRVWaitForValueKM
+
+ @Description
+ Waits (using EventObjects) for a value to match a masked read
+
+ @Input pui32LinMemAddr			: CPU linear address to poll
+ @Input ui32Value				: required value
+ @Input ui32Mask				: Mask
+
+ @Return   PVRSRV_ERROR :
+******************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKM(
+		volatile IMG_UINT32 __iomem *pui32LinMemAddr,
+		IMG_UINT32                   ui32Value,
+		IMG_UINT32                   ui32Mask);
+
+/*!
+******************************************************************************
+ @Function	PVRSRVWaitForValueKMAndHoldBridgeLockKM
+
+ @Description
+ Waits without releasing bridge lock (using EventObjects) for a value
+ to match a masked read
+
+ @Input pui32LinMemAddr			: CPU linear address to poll
+ @Input ui32Value				: required value
+ @Input ui32Mask				: Mask
+
+ @Return   PVRSRV_ERROR :
+******************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKMAndHoldBridgeLockKM(
+		volatile IMG_UINT32 __iomem *pui32LinMemAddr,
+		IMG_UINT32                   ui32Value,
+		IMG_UINT32                   ui32Mask);
+
+/*!
+******************************************************************************
+ @Function	: PVRSRVSystemHasCacheSnooping
+
+ @Description	: Returns whether the system has cache snooping
+
+ @Return : IMG_TRUE if the system has cache snooping
+******************************************************************************/
+IMG_BOOL PVRSRVSystemHasCacheSnooping(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function	: PVRSRVSystemSnoopingIsEmulated
+
+ @Description : Returns whether system cache snooping support is emulated
+
+ @Return : IMG_TRUE if the system cache snooping is emulated in software
+******************************************************************************/
+IMG_BOOL PVRSRVSystemSnoopingIsEmulated(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function	: PVRSRVSystemSnoopingOfCPUCache
+
+ @Description	: Returns whether the system supports snooping of the CPU cache
+
+ @Return : IMG_TRUE if the system has CPU cache snooping
+******************************************************************************/
+IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function	: PVRSRVSystemSnoopingOfDeviceCache
+
+ @Description	: Returns whether the system supports snooping of the device cache
+
+ @Return : IMG_TRUE if the system has device cache snooping
+******************************************************************************/
+IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function	: PVRSRVSystemHasNonMappableLocalMemory
+
+ @Description	: Returns whether the device has non-mappable part of local memory
+
+ @Return : IMG_TRUE if the device has non-mappable part of local memory
+******************************************************************************/
+IMG_BOOL PVRSRVSystemHasNonMappableLocalMemory(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function	: PVRSRVSystemWaitCycles
+
+ @Description	: Waits for at least ui32Cycles of the Device clk.
+******************************************************************************/
+void PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles);
+
+PVRSRV_ERROR PVRSRVSystemInstallDeviceLISR(void *pvOSDevice,
+										   IMG_UINT32 ui32IRQ,
+										   const IMG_CHAR *pszName,
+										   PFN_LISR pfnLISR,
+										   void *pvData,
+										   IMG_HANDLE *phLISRData);
+
+PVRSRV_ERROR PVRSRVSystemUninstallDeviceLISR(IMG_HANDLE hLISRData);
+
+int PVRSRVGetDriverStatus(void);
+
+/*!
+******************************************************************************
+ @Function	: PVRSRVIsBridgeEnabled
+
+ @Description	: Returns whether the given bridge group is enabled
+
+ @Return : IMG_TRUE if the given bridge group is enabled
+******************************************************************************/
+static inline IMG_BOOL PVRSRVIsBridgeEnabled(IMG_HANDLE hServices, IMG_UINT32 ui32BridgeGroup)
+{
+	IMG_UINT32 ui32Bridges;
+	IMG_UINT32 ui32Offset;
+
+	PVR_UNREFERENCED_PARAMETER(hServices);
+
+#if defined(SUPPORT_RGX)
+	if (ui32BridgeGroup >= PVRSRV_BRIDGE_RGX_FIRST)
+	{
+		ui32Bridges = gui32RGXBridges;
+		ui32Offset = PVRSRV_BRIDGE_RGX_FIRST;
+	}
+	else
+#endif /* SUPPORT_RGX */
+	{
+		ui32Bridges = gui32PVRBridges;
+		ui32Offset = PVRSRV_BRIDGE_FIRST;
+	}
+
+	return ((1U << (ui32BridgeGroup - ui32Offset)) & ui32Bridges) != 0;
+}
+
+/*!
+******************************************************************************
+ @Function	: PVRSRVSystemBIFTilingHeapGetXStride
+
+ @Description	: return the default x-stride configuration for the given
+                  BIF tiling heap number
+
+ @Input psDevConfig: Pointer to a device config
+
+ @Input uiHeapNum: BIF tiling heap number, starting from 1
+
+ @Output puiXStride: pointer to x-stride output of the requested heap
+******************************************************************************/
+PVRSRV_ERROR
+PVRSRVSystemBIFTilingHeapGetXStride(PVRSRV_DEVICE_CONFIG *psDevConfig,
+									IMG_UINT32 uiHeapNum,
+									IMG_UINT32 *puiXStride);
+
+/*!
+******************************************************************************
+ @Function              : PVRSRVSystemBIFTilingGetConfig
+
+ @Description           : return the BIF tiling mode and number of BIF
+                          tiling heaps for the given device config
+
+ @Input psDevConfig     : Pointer to a device config
+
+ @Output peBifTilingMode: Pointer to a BIF tiling mode enum
+
+ @Output puiNumHeaps    : pointer to uint to hold number of heaps
+
+******************************************************************************/
+PVRSRV_ERROR
+PVRSRVSystemBIFTilingGetConfig(PVRSRV_DEVICE_CONFIG  *psDevConfig,
+                               RGXFWIF_BIFTILINGMODE *peBifTilingMode,
+                               IMG_UINT32            *puiNumHeaps);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*!
+******************************************************************************
+ @Function				: PopulateLMASubArenas
+
+ @Description			: Uses the Apphints passed by the client at
+						  initialization time to add bases and sizes in the
+						  various arenas in the LMA memory
+
+ @Input psDeviceNode	: Pointer to the device node struct containing all the
+						  arena information
+
+ @Input aui32OSidMin	: Single dimensional array containing the minimum
+						  values for each OSid area
+
+ @Input aui32OSidMax	: Single dimensional array containing the maximum
+						  values for each OSid area
+******************************************************************************/
+
+void PopulateLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]);
+
+#if defined(EMULATOR)
+	void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState);
+	void SetTrustedDeviceAceEnabled(void);
+#endif
+
+#endif
+
+/*!
+******************************************************************************
+ @Function			PVRSRVVzRegisterFirmwarePhysHeap
+
+ @Description		Request to map a physical heap to kernel FW memory context
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+******************************************************************************/
+PVRSRV_ERROR PVRSRVVzRegisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+											  IMG_DEV_PHYADDR sDevPAddr,
+											  IMG_UINT64 ui64DevPSize,
+											  IMG_UINT32 uiOSID);
+
+/*!
+******************************************************************************
+ @Function			PVRSRVVzUnregisterFirmwarePhysHeap
+
+ @Description		Request to unmap a physical heap from kernel FW memory context
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+******************************************************************************/
+PVRSRV_ERROR PVRSRVVzUnregisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+												IMG_UINT32 uiOSID);
+
+/*!
+******************************************************************************
+ @Function			: PVRSRVCreateHWPerfHostThread
+
+ @Description		: Creates HWPerf event object and thread unless already created
+
+ @Input ui32Timeout	: Initial timeout (ms) between updates on the HWPerf thread
+
+ @Return			: PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+******************************************************************************/
+PVRSRV_ERROR PVRSRVCreateHWPerfHostThread(IMG_UINT32 ui32Timeout);
+
+/*!
+******************************************************************************
+ @Function			: PVRSRVDestroyHWPerfHostThread
+
+ @Description		: Destroys HWPerf event object and thread if created
+
+ @Return			: PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDestroyHWPerfHostThread(void);
+
+/*!
+******************************************************************************
+ @Function			: PVRSRVPhysMemHeapsInit
+
+ @Description		: Registers and acquires physical memory heaps
+
+ @Return			: PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPhysMemHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			: PVRSRVPhysMemHeapsDeinit
+
+ @Description		: Releases and unregisters physical memory heaps
+
+ @Return			: PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+******************************************************************************/
+void PVRSRVPhysMemHeapsDeinit(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#endif /* PVRSRV_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_apphint.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_apphint.h
new file mode 100644
index 0000000..5d3afc0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_apphint.h
@@ -0,0 +1,72 @@
+/**************************************************************************/ /*!
+@File
+@Title          PowerVR AppHint generic interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__PVRSRV_APPHINT_H__)
+#define __PVRSRV_APPHINT_H__
+
+/* Supplied to PVRSRVAppHintRegisterHandlers*() functions when the apphint
+ * is a global driver apphint, i.e. apphints not present in
+ * APPHINT_DEBUGFS_DEVICE_ID, i.e. not per device.
+ */
+#define APPHINT_OF_DRIVER_NO_DEVICE ((void*)-1U)
+
+#if defined(LINUX)
+
+#include "km_apphint.h"
+#define PVRSRVAppHintDumpState() pvr_apphint_dump_state()
+#define PVRSRVAppHintRegisterHandlersUINT64(i,q,s,d,p) pvr_apphint_register_handlers_uint64(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersUINT32(i,q,s,d,p) pvr_apphint_register_handlers_uint32(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersBOOL(i,q,s,d,p) pvr_apphint_register_handlers_bool(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersSTRING(i,q,s,d,p) pvr_apphint_register_handlers_string(i,q,s,d,p)
+
+#else
+
+#define PVRSRVAppHintDumpState()
+#define PVRSRVAppHintRegisterHandlersUINT64(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersUINT32(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersBOOL(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersSTRING(i,q,s,d,p)
+
+#endif
+
+#endif /* !defined(__PVRSRV_APPHINT_H__) */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_bridge_init.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_bridge_init.c
new file mode 100644
index 0000000..b3a269e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_bridge_init.c
@@ -0,0 +1,475 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Common Bridge Init/Deinit Module (kernel side)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements common PVR Bridge init/deinit code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv_bridge_init.h"
+#include "srvcore.h"
+
+/* These will go when full bridge gen comes in */
+#if defined(PDUMP)
+PVRSRV_ERROR InitPDUMPCTRLBridge(void);
+PVRSRV_ERROR DeinitPDUMPCTRLBridge(void);
+PVRSRV_ERROR InitPDUMPBridge(void);
+PVRSRV_ERROR DeinitPDUMPBridge(void);
+PVRSRV_ERROR InitRGXPDUMPBridge(void);
+PVRSRV_ERROR DeinitRGXPDUMPBridge(void);
+#endif
+#if defined(SUPPORT_DISPLAY_CLASS)
+PVRSRV_ERROR InitDCBridge(void);
+PVRSRV_ERROR DeinitDCBridge(void);
+#endif
+PVRSRV_ERROR InitMMBridge(void);
+PVRSRV_ERROR DeinitMMBridge(void);
+#if !defined(EXCLUDE_CMM_BRIDGE)
+PVRSRV_ERROR InitCMMBridge(void);
+PVRSRV_ERROR DeinitCMMBridge(void);
+#endif
+PVRSRV_ERROR InitPDUMPMMBridge(void);
+PVRSRV_ERROR DeinitPDUMPMMBridge(void);
+PVRSRV_ERROR InitSRVCOREBridge(void);
+PVRSRV_ERROR DeinitSRVCOREBridge(void);
+PVRSRV_ERROR InitSYNCBridge(void);
+PVRSRV_ERROR DeinitSYNCBridge(void);
+
+#if defined(SUPPORT_SERVER_SYNC)
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR InitSYNCEXPORTBridge(void);
+PVRSRV_ERROR DeinitSYNCEXPORTBridge(void);
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR InitSYNCSEXPORTBridge(void);
+PVRSRV_ERROR DeinitSYNCSEXPORTBridge(void);
+#endif
+#endif /* defined(SUPPORT_SERVER_SYNC) */
+
+#if defined (SUPPORT_RGX)
+PVRSRV_ERROR InitRGXTA3DBridge(void);
+PVRSRV_ERROR DeinitRGXTA3DBridge(void);
+PVRSRV_ERROR InitRGXTQBridge(void);
+PVRSRV_ERROR DeinitRGXTQBridge(void);
+PVRSRV_ERROR InitRGXTQ2Bridge(void);
+PVRSRV_ERROR DeinitRGXTQ2Bridge(void);
+PVRSRV_ERROR InitRGXCMPBridge(void);
+PVRSRV_ERROR DeinitRGXCMPBridge(void);
+#if !defined(EXCLUDE_RGXBREAKPOINT_BRIDGE)
+PVRSRV_ERROR InitRGXBREAKPOINTBridge(void);
+PVRSRV_ERROR DeinitRGXBREAKPOINTBridge(void);
+#endif
+PVRSRV_ERROR InitRGXFWDBGBridge(void);
+PVRSRV_ERROR DeinitRGXFWDBGBridge(void);
+PVRSRV_ERROR InitRGXHWPERFBridge(void);
+PVRSRV_ERROR DeinitRGXHWPERFBridge(void);
+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE)
+PVRSRV_ERROR InitRGXREGCONFIGBridge(void);
+PVRSRV_ERROR DeinitRGXREGCONFIGBridge(void);
+#endif
+PVRSRV_ERROR InitRGXKICKSYNCBridge(void);
+PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void);
+PVRSRV_ERROR InitRGXSIGNALSBridge(void);
+PVRSRV_ERROR DeinitRGXSIGNALSBridge(void);
+#endif /* SUPPORT_RGX */
+PVRSRV_ERROR InitCACHEBridge(void);
+PVRSRV_ERROR DeinitCACHEBridge(void);
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR InitSMMBridge(void);
+PVRSRV_ERROR DeinitSMMBridge(void);
+#endif
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+PVRSRV_ERROR InitHTBUFFERBridge(void);
+PVRSRV_ERROR DeinitHTBUFFERBridge(void);
+#endif
+PVRSRV_ERROR InitPVRTLBridge(void);
+PVRSRV_ERROR DeinitPVRTLBridge(void);
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+PVRSRV_ERROR InitRIBridge(void);
+PVRSRV_ERROR DeinitRIBridge(void);
+#endif
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void);
+PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void);
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+PVRSRV_ERROR InitVALIDATIONBridge(void);
+PVRSRV_ERROR DeinitVALIDATIONBridge(void);
+#endif
+#if defined(PVR_TESTING_UTILS)
+PVRSRV_ERROR InitTUTILSBridge(void);
+PVRSRV_ERROR DeinitTUTILSBridge(void);
+#endif
+PVRSRV_ERROR InitSYNCTRACKINGBridge(void);
+PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void);
+#if defined(SUPPORT_WRAP_EXTMEM)
+PVRSRV_ERROR InitMMEXTMEMBridge(void);
+PVRSRV_ERROR DeinitMMEXTMEMBridge(void);
+#endif
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+PVRSRV_ERROR InitSYNCFALLBACKBridge(void);
+PVRSRV_ERROR DeinitSYNCFALLBACKBridge(void);
+#endif
+
+
+PVRSRV_ERROR
+ServerBridgeInit(void)
+{
+	PVRSRV_ERROR eError;
+
+	BridgeDispatchTableStartOffsetsInit();
+
+	eError = InitSRVCOREBridge();
+	PVR_LOG_IF_ERROR(eError, "InitSRVCOREBridge");
+
+	eError = InitSYNCBridge();
+	PVR_LOG_IF_ERROR(eError, "InitSYNCBridge");
+
+#if defined(SUPPORT_SERVER_SYNC)
+#if defined(SUPPORT_INSECURE_EXPORT)
+	eError = InitSYNCEXPORTBridge();
+	PVR_LOG_IF_ERROR(eError, "InitSYNCEXPORTBridge");
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+	eError = InitSYNCSEXPORTBridge();
+	PVR_LOG_IF_ERROR(eError, "InitSYNCSEXPORTBridge");
+#endif
+#endif /* defined(SUPPORT_SERVER_SYNC) */
+
+#if defined(PDUMP)
+	eError = InitPDUMPCTRLBridge();
+	PVR_LOG_IF_ERROR(eError, "InitPDUMPCTRLBridge");
+#endif
+
+	eError = InitMMBridge();
+	PVR_LOG_IF_ERROR(eError, "InitMMBridge");
+
+#if !defined(EXCLUDE_CMM_BRIDGE)
+	eError = InitCMMBridge();
+	PVR_LOG_IF_ERROR(eError, "InitCMMBridge");
+#endif
+
+#if defined(PDUMP)
+	eError = InitPDUMPMMBridge();
+	PVR_LOG_IF_ERROR(eError, "InitPDUMPMMBridge");
+
+	eError = InitPDUMPBridge();
+	PVR_LOG_IF_ERROR(eError, "InitPDUMPBridge");
+#endif
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+	eError = InitDCBridge();
+	PVR_LOG_IF_ERROR(eError, "InitDCBridge");
+#endif
+
+	eError = InitCACHEBridge();
+	PVR_LOG_IF_ERROR(eError, "InitCACHEBridge");
+
+#if defined(SUPPORT_SECURE_EXPORT)
+	eError = InitSMMBridge();
+	PVR_LOG_IF_ERROR(eError, "InitSMMBridge");
+#endif
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+	eError = InitHTBUFFERBridge();
+	PVR_LOG_IF_ERROR(eError, "InitHTBUFFERBridge");
+#endif
+
+	eError = InitPVRTLBridge();
+	PVR_LOG_IF_ERROR(eError, "InitPVRTLBridge");
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	eError = InitRIBridge();
+	PVR_LOG_IF_ERROR(eError, "InitRIBridge");
+#endif
+
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+	eError = InitVALIDATIONBridge();
+	PVR_LOG_IF_ERROR(eError, "InitVALIDATIONBridge");
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+	eError = InitTUTILSBridge();
+	PVR_LOG_IF_ERROR(eError, "InitTUTILSBridge");
+#endif
+
+	eError = InitDEVICEMEMHISTORYBridge();
+	PVR_LOG_IF_ERROR(eError, "InitDEVICEMEMHISTORYBridge");
+
+	eError = InitSYNCTRACKINGBridge();
+	PVR_LOG_IF_ERROR(eError, "InitSYNCTRACKINGBridge");
+
+#if defined (SUPPORT_RGX)
+
+	eError = InitRGXTQBridge();
+	PVR_LOG_IF_ERROR(eError, "InitRGXTQBridge");
+
+	eError = InitRGXTA3DBridge();
+	PVR_LOG_IF_ERROR(eError, "InitRGXTA3DBridge");
+
+	#if !defined(EXCLUDE_RGXBREAKPOINT_BRIDGE)
+	eError = InitRGXBREAKPOINTBridge();
+	PVR_LOG_IF_ERROR(eError, "InitRGXBREAKPOINTBridge");
+#endif
+
+	eError = InitRGXFWDBGBridge();
+	PVR_LOG_IF_ERROR(eError, "InitRGXFWDBGBridge");
+
+#if defined(PDUMP)
+	eError = InitRGXPDUMPBridge();
+	PVR_LOG_IF_ERROR(eError, "InitRGXPDUMPBridge");
+#endif
+
+	eError = InitRGXHWPERFBridge();
+	PVR_LOG_IF_ERROR(eError, "InitRGXHWPERFBridge");
+
+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE)
+	eError = InitRGXREGCONFIGBridge();
+	PVR_LOG_IF_ERROR(eError, "InitRGXREGCONFIGBridge");
+#endif
+
+	eError = InitRGXKICKSYNCBridge();
+	PVR_LOG_IF_ERROR(eError, "InitRGXKICKSYNCBridge");
+
+#endif /* SUPPORT_RGX */
+
+#if defined(SUPPORT_WRAP_EXTMEM)
+	eError = InitMMEXTMEMBridge();
+	PVR_LOG_IF_ERROR(eError, "InitMMEXTMEMBridge");
+#endif
+
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	eError = InitSYNCFALLBACKBridge();
+	PVR_LOG_IF_ERROR(eError, "InitSYNCFALLBACKBridge");
+#endif
+
+	eError = OSPlatformBridgeInit();
+	PVR_LOG_IF_ERROR(eError, "OSPlatformBridgeInit");
+
+	return eError;
+}
+
+PVRSRV_ERROR
+ServerBridgeDeInit(void)
+{
+	PVRSRV_ERROR eError;
+
+	eError = OSPlatformBridgeDeInit();
+	PVR_LOG_IF_ERROR(eError, "OSPlatformBridgeDeInit");
+
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	eError = DeinitSYNCFALLBACKBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitSYNCFALLBACKBridge");
+#endif
+
+#if defined(SUPPORT_WRAP_EXTMEM)
+	eError = DeinitMMEXTMEMBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitMMEXTMEMBridge");
+#endif
+
+	eError = DeinitSRVCOREBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitSRVCOREBridge");
+
+	eError = DeinitSYNCBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitSYNCBridge");
+
+#if defined(SUPPORT_SERVER_SYNC)
+#if defined(SUPPORT_INSECURE_EXPORT)
+	eError = DeinitSYNCEXPORTBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitSYNCEXPORTBridge");
+#endif
+
+#if defined(SUPPORT_SECURE_EXPORT)
+	eError = DeinitSYNCSEXPORTBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitSYNCSEXPORTBridge");
+#endif
+#endif /* defined(SUPPORT_SERVER_SYNC) */
+
+#if defined(PDUMP)
+	eError = DeinitPDUMPCTRLBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitPDUMPCTRLBridge");
+#endif
+
+	eError = DeinitMMBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitMMBridge");
+
+#if !defined(EXCLUDE_CMM_BRIDGE)
+	eError = DeinitCMMBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitCMMBridge");
+#endif
+
+#if defined(PDUMP)
+	eError = DeinitPDUMPMMBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitPDUMPMMBridge");
+
+	eError = DeinitPDUMPBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitPDUMPBridge");
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+	eError = DeinitTUTILSBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitTUTILSBridge");
+#endif
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+	eError = DeinitDCBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitDCBridge");
+#endif
+
+	eError = DeinitCACHEBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitCACHEBridge");
+
+#if defined(SUPPORT_SECURE_EXPORT)
+	eError = DeinitSMMBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitSMMBridge");
+#endif
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+	eError = DeinitHTBUFFERBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitHTBUFFERBridge");
+#endif
+
+	eError = DeinitPVRTLBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitPVRTLBridge");
+
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+	eError = DeinitVALIDATIONBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitVALIDATIONBridge");
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	eError = DeinitRIBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitRIBridge");
+#endif
+
+	eError = DeinitDEVICEMEMHISTORYBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitDEVICEMEMHISTORYBridge");
+
+	eError = DeinitSYNCTRACKINGBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitSYNCTRACKINGBridge");
+
+#if defined (SUPPORT_RGX)
+
+	eError = DeinitRGXTQBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitRGXTQBridge");
+
+	eError = DeinitRGXTA3DBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitRGXTA3DBridge");
+
+#if !defined(EXCLUDE_RGXBREAKPOINT_BRIDGE)
+	eError = DeinitRGXBREAKPOINTBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitRGXBREAKPOINTBridge");
+#endif
+
+	eError = DeinitRGXFWDBGBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitRGXFWDBGBridge");
+
+#if defined(PDUMP)
+	eError = DeinitRGXPDUMPBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitRGXPDUMPBridge");
+#endif
+
+	eError = DeinitRGXHWPERFBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitRGXHWPERFBridge");
+
+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE)
+	eError = DeinitRGXREGCONFIGBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitRGXREGCONFIGBridge");
+#endif
+
+	eError = DeinitRGXKICKSYNCBridge();
+	PVR_LOGR_IF_ERROR(eError, "DeinitRGXKICKSYNCBridge");
+
+#endif /* SUPPORT_RGX */
+
+	return eError;
+}
+
+#if defined(SUPPORT_RGX)
+PVRSRV_ERROR
+DeviceDepBridgeInit(IMG_UINT64 ui64Features)
+{
+	PVRSRV_ERROR eError;
+
+	if (ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK)
+	{
+		eError = InitRGXCMPBridge();
+		PVR_LOGR_IF_ERROR(eError, "InitRGXCMPBridge");
+	}
+
+	if (ui64Features & RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK)
+	{
+		eError = InitRGXSIGNALSBridge();
+		PVR_LOGR_IF_ERROR(eError, "InitRGXCMPBridge");
+	}
+
+	if (ui64Features & RGX_FEATURE_FASTRENDER_DM_BIT_MASK)
+	{
+		eError = InitRGXTQ2Bridge();
+		PVR_LOGR_IF_ERROR(eError, "InitRGXTQ2Bridge");
+	}
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DeviceDepBridgeDeInit(IMG_UINT64 ui64Features)
+{
+	PVRSRV_ERROR eError;
+
+	if (ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK)
+	{
+		eError = DeinitRGXCMPBridge();
+		PVR_LOGR_IF_ERROR(eError, "DeinitRGXCMPBridge");
+	}
+
+	if (ui64Features & RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK)
+	{
+		eError = DeinitRGXSIGNALSBridge();
+		PVR_LOGR_IF_ERROR(eError, "DeinitRGXSIGNALSBridge");
+	}
+
+	if (ui64Features & RGX_FEATURE_FASTRENDER_DM_BIT_MASK)
+	{
+		eError = DeinitRGXTQ2Bridge();
+		PVR_LOGR_IF_ERROR(eError, "DeinitRGXTQ2Bridge");
+	}
+
+	return PVRSRV_OK;
+}
+#endif /* SUPPORT_RGX */
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_bridge_init.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_bridge_init.h
new file mode 100644
index 0000000..b99d474
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_bridge_init.h
@@ -0,0 +1,57 @@
+/**************************************************************************/ /*!
+@File
+@Title          PVR Common Bridge Init/Deinit Module (kernel side)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the common PVR Bridge init/deinit code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _PVRSRV_BRIDGE_INIT_H_
+#define _PVRSRV_BRIDGE_INIT_H_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+PVRSRV_ERROR ServerBridgeInit(void);
+PVRSRV_ERROR DeviceDepBridgeInit(IMG_UINT64 ui64Features);
+
+PVRSRV_ERROR ServerBridgeDeInit(void);
+PVRSRV_ERROR DeviceDepBridgeDeInit(IMG_UINT64 ui64Features);
+
+
+#endif  /* _PVRSRV_BRIDGE_INIT_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_cleanup.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_cleanup.h
new file mode 100644
index 0000000..99ddcea
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_cleanup.h
@@ -0,0 +1,159 @@
+/**************************************************************************/ /*!
+@File
+@Title          PowerVR SrvKM cleanup thread deferred work interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _PVRSRV_CLEANUP_H
+#define _PVRSRV_CLEANUP_H
+
+#include "dllist.h"
+
+/**************************************************************************/ /*!
+@Brief          CLEANUP_THREAD_FN
+
+@Description    This is the function prototype for the pfnFree member found in
+                the structure PVRSRV_CLEANUP_THREAD_WORK. The function is
+                responsible for carrying out the clean up work and if successful
+                freeing the memory originally supplied to the call
+                PVRSRVCleanupThreadAddWork().
+
+@Input          pvParam  This is private data originally supplied by the caller
+                         to PVRSRVCleanupThreadAddWork() when registering the
+                         clean up work item, psDAta->pvData. Itr can be cast
+                         to a relevant type within the using module.
+
+@Return         PVRSRV_OK if the cleanup operation was successful and the
+                callback has freed the PVRSRV_CLEANUP_THREAD_WORK* work item
+                memory original supplied to PVRSRVCleanupThreadAddWork()
+                Any other error code will lead to the work item
+                being re-queued and hence the original
+                PVRSRV_CLEANUP_THREAD_WORK* must not be freed.
+*/ /***************************************************************************/
+
+typedef PVRSRV_ERROR (*CLEANUP_THREAD_FN)(void *pvParam);
+
+
+/* Typical number of times a caller should want the work to be retried in case
+ * of the callback function (pfnFree) returning an error.
+ * Callers to PVRSRVCleanupThreadAddWork should provide this value as the retry
+ * count (ui32RetryCount) unless there are special requirements.
+ * A value of 200 corresponds to around ~20s (200 * 100ms). If it is not
+ * successful by then give up as an unrecoverable problem has occurred.
+ */
+#define CLEANUP_THREAD_RETRY_COUNT_DEFAULT 200u
+/* Like for CLEANUP_THREAD_RETRY_COUNT_DEFAULT but call will wait for
+ * a specified amount of time rather than number of retries.
+ */
+#define CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT 2000u /* 2s */
+
+/* Use to set retry count on a cleanup item.
+ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK
+ * _count - retry count
+ */
+#define CLEANUP_THREAD_SET_RETRY_COUNT(_item,_count) \
+	do { \
+		(_item)->ui32RetryCount = (_count); \
+		(_item)->ui32TimeStart = 0; \
+		(_item)->ui32TimeEnd = 0; \
+	} while (0)
+
+/* Use to set timeout deadline on a cleanup item.
+ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK
+ * _timeout - timeout in milliseconds, if 0
+ *            CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT is used
+ */
+#define CLEANUP_THREAD_SET_RETRY_TIMEOUT(_item,_timeout) \
+	do { \
+		(_item)->ui32RetryCount = 0; \
+		(_item)->ui32TimeStart = OSClockms(); \
+		(_item)->ui32TimeEnd = (_item)->ui32TimeStart + ((_timeout) > 0 ? \
+				(_timeout) : CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT); \
+	} while (0)
+
+/* Indicates if the timeout on a given item has been reached.
+ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK
+ */
+#define CLEANUP_THREAD_RETRY_TIMEOUT_REACHED(_item) \
+	((_item)->ui32TimeEnd - (_item)->ui32TimeStart >= \
+			OSClockms() - (_item)->ui32TimeStart)
+
+/* Indicates if the current item is waiting on timeout or retry count.
+ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK
+ * */
+#define CLEANUP_THREAD_IS_RETRY_TIMEOUT(_item) \
+	((_item)->ui32TimeStart != (_item->ui32TimeEnd))
+
+/* Clean up work item specifics so that the task can be managed by the
+ * pvr_defer_free cleanup thread in the Server.
+ */
+typedef struct _PVRSRV_CLEANUP_THREAD_WORK_
+{
+	DLLIST_NODE sNode;             /*!< List node used internally by the cleanup
+	                                    thread */
+	CLEANUP_THREAD_FN pfnFree;     /*!< Pointer to the function to be called to
+	                                    carry out the deferred cleanup */
+	void *pvData;                  /*!< private data for pfnFree, usually a way back
+	                                    to the original PVRSRV_CLEANUP_THREAD_WORK*
+	                                    pointer supplied in the call to
+	                                    PVRSRVCleanupThreadAddWork(). */
+	IMG_UINT32 ui32TimeStart;      /*!< Timestamp in ms of the moment when
+	                                    cleanup item has been created. */
+	IMG_UINT32 ui32TimeEnd;        /*!< Time in ms after which no further retry
+	                                    attempts will be made, item discard and
+	                                    error logged when this is reached. */
+	IMG_UINT32 ui32RetryCount;     /*!< Number of times the callback should be
+	                                    re-tried when it returns error. */
+	IMG_BOOL bDependsOnHW;         /*!< Retry again after the RGX interrupt signals
+	                                    the global event object */
+} PVRSRV_CLEANUP_THREAD_WORK;
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVCleanupThreadAddWork
+
+@Description    Add a work item to be called from the cleanup thread
+
+@Input          psData : The function pointer and private data for the callback
+
+@Return         None
+*/ /***************************************************************************/
+void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData);
+
+#endif /* _PVRSRV_CLEANUP_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_device.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_device.h
new file mode 100644
index 0000000..f45276b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_device.h
@@ -0,0 +1,343 @@
+/**************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __PVRSRV_DEVICE_H__
+#define __PVRSRV_DEVICE_H__
+
+#include "img_types.h"
+#include "physheap.h"
+#include "pvrsrv_error.h"
+#include "rgx_fwif_km.h"
+#include "servicesext.h"
+
+#if defined(PVR_DVFS) || defined(SUPPORT_PDVFS)
+#include "pvr_dvfs.h"
+#endif
+
+typedef struct _PVRSRV_DEVICE_CONFIG_ PVRSRV_DEVICE_CONFIG;
+typedef enum _DRIVER_MODE_
+{
+/* Do not use these enumerations directly, to query the
+   current driver mode, use the PVRSRV_VZ_MODE_IS()
+   macro */
+	DRIVER_MODE_NATIVE	= -1,
+	DRIVER_MODE_HOST	=  0,
+	DRIVER_MODE_GUEST
+} PVRSRV_DRIVER_MODE;
+
+/*
+ * All the heaps from which regular device memory allocations can be made in
+ * terms of their locality to the respective device.
+ */
+typedef enum
+{
+	PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL = 0,
+	PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL = 1,
+	PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL  = 2,
+	PVRSRV_DEVICE_PHYS_HEAP_FW_GUEST  = 3,
+	PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL  = 4,
+	PVRSRV_DEVICE_PHYS_HEAP_LAST
+} PVRSRV_DEVICE_PHYS_HEAP;
+
+typedef enum
+{
+	PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_MAPPABLE = 0,
+	PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_NON_MAPPABLE = 1,
+	PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_LAST
+} PVRSRV_DEVICE_LOCAL_MEMORY_ARENA;
+
+typedef enum _PVRSRV_DEVICE_SNOOP_MODE_
+{
+	PVRSRV_DEVICE_SNOOP_NONE = 0,
+	PVRSRV_DEVICE_SNOOP_CPU_ONLY,
+	PVRSRV_DEVICE_SNOOP_DEVICE_ONLY,
+	PVRSRV_DEVICE_SNOOP_CROSS,
+	PVRSRV_DEVICE_SNOOP_EMULATED,
+} PVRSRV_DEVICE_SNOOP_MODE;
+
+#if defined(SUPPORT_SOC_TIMER)
+typedef IMG_UINT64
+(*PFN_SYS_DEV_SOC_TIMER_READ)(IMG_HANDLE hSysData);
+#endif
+
+typedef IMG_UINT32
+(*PFN_SYS_DEV_CLK_FREQ_GET)(IMG_HANDLE hSysData);
+
+typedef PVRSRV_ERROR
+(*PFN_SYS_DEV_PRE_POWER)(IMG_HANDLE hSysData,
+						 PVRSRV_DEV_POWER_STATE eNewPowerState,
+						 PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+						 IMG_BOOL bForced);
+
+typedef PVRSRV_ERROR
+(*PFN_SYS_DEV_POST_POWER)(IMG_HANDLE hSysData,
+						  PVRSRV_DEV_POWER_STATE eNewPowerState,
+						  PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+						  IMG_BOOL bForced);
+
+typedef void
+(*PFN_SYS_DEV_INTERRUPT_HANDLED)(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+typedef PVRSRV_ERROR
+(*PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE)(IMG_HANDLE hSysData,
+									IMG_UINT64 ui64MemSize);
+
+typedef void (*PFN_SYS_DEV_FEAT_DEP_INIT)(PVRSRV_DEVICE_CONFIG *, IMG_UINT64);
+
+typedef PVRSRV_DRIVER_MODE (*PFN_SYS_DRIVER_MODE)(void);
+
+typedef enum _PVRSRV_TD_FW_MEM_REGION_
+{
+	PVRSRV_DEVICE_FW_CODE_REGION         = 0,
+	PVRSRV_DEVICE_FW_PRIVATE_DATA_REGION = 1,
+	PVRSRV_DEVICE_FW_COREMEM_CODE_REGION = 2,
+	PVRSRV_DEVICE_FW_COREMEM_DATA_REGION = 3
+} PVRSRV_TD_FW_MEM_REGION;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+
+typedef struct _PVRSRV_TD_FW_PARAMS_
+{
+	const void *pvFirmware;
+	IMG_UINT32 ui32FirmwareSize;
+
+	union
+	{
+		struct
+		{
+			/* META-only parameters */
+			IMG_DEV_VIRTADDR sFWCodeDevVAddr;
+			IMG_DEV_VIRTADDR sFWDataDevVAddr;
+			IMG_DEV_VIRTADDR sFWCorememCodeDevVAddr;
+			RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr;
+			IMG_DEVMEM_SIZE_T uiFWCorememCodeSize;
+			IMG_DEV_VIRTADDR sFWCorememDataDevVAddr;
+			RGXFWIF_DEV_VIRTADDR sFWCorememDataFWAddr;
+			IMG_UINT32 ui32NumThreads;
+			IMG_UINT32 ui32MainThreadID;
+		} sMeta;
+
+		struct
+		{
+			/* MIPS-only parameters */
+			IMG_DEV_PHYADDR sGPURegAddr;
+			IMG_DEV_PHYADDR sFWPageTableAddr;
+			IMG_DEV_PHYADDR sFWStackAddr;
+		} sMips;
+	} uFWP;
+} PVRSRV_TD_FW_PARAMS;
+
+typedef PVRSRV_ERROR
+(*PFN_TD_SEND_FW_IMAGE)(IMG_HANDLE hSysData,
+						PVRSRV_TD_FW_PARAMS *psTDFWParams);
+
+typedef struct _PVRSRV_TD_POWER_PARAMS_
+{
+	IMG_DEV_PHYADDR sPCAddr; /* META only used param */
+
+	/* MIPS only used fields */
+	IMG_DEV_PHYADDR sGPURegAddr;
+	IMG_DEV_PHYADDR sBootRemapAddr;
+	IMG_DEV_PHYADDR sCodeRemapAddr;
+	IMG_DEV_PHYADDR sDataRemapAddr;
+} PVRSRV_TD_POWER_PARAMS;
+
+typedef PVRSRV_ERROR
+(*PFN_TD_SET_POWER_PARAMS)(IMG_HANDLE hSysData,
+						   PVRSRV_TD_POWER_PARAMS *psTDPowerParams);
+
+typedef PVRSRV_ERROR
+(*PFN_TD_RGXSTART)(IMG_HANDLE hSysData);
+
+typedef PVRSRV_ERROR
+(*PFN_TD_RGXSTOP)(IMG_HANDLE hSysData);
+
+typedef struct _PVRSRV_TD_SECBUF_PARAMS_
+{
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_DEVMEM_ALIGN_T uiAlign;
+	IMG_CPU_PHYADDR *psSecBufAddr;
+	IMG_UINT64 *pui64SecBufHandle;
+} PVRSRV_TD_SECBUF_PARAMS;
+
+typedef PVRSRV_ERROR
+(*PFN_TD_SECUREBUF_ALLOC)(IMG_HANDLE hSysData,
+						  PVRSRV_TD_SECBUF_PARAMS *psTDSecBufParams);
+
+typedef PVRSRV_ERROR
+(*PFN_TD_SECUREBUF_FREE)(IMG_HANDLE hSysData,
+						 IMG_UINT64 ui64SecBufHandle);
+#endif /* defined(SUPPORT_TRUSTED_DEVICE) */
+
+struct _PVRSRV_DEVICE_CONFIG_
+{
+	/*! OS device passed to SysDevInit (linux: 'struct device') */
+	void *pvOSDevice;
+
+	/*!
+	 *! Service representation of pvOSDevice. Should be set to NULL when the
+	 *! config is created in SysDevInit. Set by Services once a device node has
+	 *! been created for this config and unset before SysDevDeInit is called.
+	 */
+	struct _PVRSRV_DEVICE_NODE_ *psDevNode;
+
+	/*! Name of the device */
+	IMG_CHAR *pszName;
+
+	/*! Version of the device (optional) */
+	IMG_CHAR *pszVersion;
+
+	/*! Register bank address */
+	IMG_CPU_PHYADDR sRegsCpuPBase;
+	/*! Register bank size */
+	IMG_UINT32 ui32RegsSize;
+	/*! Device interrupt number */
+	IMG_UINT32 ui32IRQ;
+
+	PVRSRV_DEVICE_SNOOP_MODE eCacheSnoopingMode;
+
+	/*! Device specific data handle */
+	IMG_HANDLE hDevData;
+
+	/*! System specific data that gets passed into system callback functions. */
+	IMG_HANDLE hSysData;
+
+	IMG_BOOL bHasNonMappableLocalMemory;
+
+	/*! Indicates if system supports FBCDC v3.1 */
+	IMG_BOOL bHasFBCDCVersion31;
+
+	PHYS_HEAP_CONFIG *pasPhysHeaps;
+	IMG_UINT32 ui32PhysHeapCount;
+
+	/*!
+	 *! ID of the Physical memory heap to use.
+	 *!
+	 *! The first entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL])
+	 *! will be used for allocations where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL
+	 *! flag is not set. Normally this will be the PhysHeapID of an LMA heap
+	 *! but the configuration could specify a UMA heap here (if desired).
+	 *!
+	 *! The second entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL])
+	 *! will be used for allocations where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL
+	 *! flag is set. Normally this will be the PhysHeapID of a UMA heap but
+	 *! the configuration could specify an LMA heap here (if desired).
+	 *!
+	 *! The third entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL])
+	 *! will be used for allocations where the PVRSRV_MEMALLOCFLAG_FW_LOCAL
+	 *! flag is set.
+	 *!
+	 *! The fourth entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL])
+	 *! will be used for allocations that are imported into the driver and
+	 *! are local to other devices, e.g. a display controller.
+	 *!
+	 *! In the event of there being only one Physical Heap, the configuration
+	 *! should specify the same heap details in all entries.
+	 */
+	IMG_UINT32 aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_LAST];
+
+	RGXFWIF_BIFTILINGMODE eBIFTilingMode;
+	IMG_UINT32 *pui32BIFTilingHeapConfigs;
+	IMG_UINT32 ui32BIFTilingHeapCount;
+
+	/*!
+	 *! Callbacks to change system device power state at the beginning and end
+	 *! of a power state change (optional).
+	 */
+	PFN_SYS_DEV_PRE_POWER pfnPrePowerState;
+	PFN_SYS_DEV_POST_POWER pfnPostPowerState;
+
+	/*! Callback to obtain the clock frequency from the device (optional). */
+	PFN_SYS_DEV_CLK_FREQ_GET pfnClockFreqGet;
+
+#if defined(SUPPORT_SOC_TIMER)
+	/*! Callback to read SoC timer register value (mandatory). */
+	PFN_SYS_DEV_SOC_TIMER_READ	pfnSoCTimerRead;
+#endif
+
+	/*!
+	 *! Callback to handle memory budgeting. Can be used to reject allocations
+	 *! over a certain size (optional).
+	 */
+	PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	/*!
+	 *! Callback to send FW image and FW boot time parameters to the trusted
+	 *! device.
+	 */
+	PFN_TD_SEND_FW_IMAGE pfnTDSendFWImage;
+
+	/*!
+	 *! Callback to send parameters needed in a power transition to the trusted
+	 *! device.
+	 */
+	PFN_TD_SET_POWER_PARAMS pfnTDSetPowerParams;
+
+	/*! Callbacks to ping the trusted device to securely run RGXStart/Stop() */
+	PFN_TD_RGXSTART pfnTDRGXStart;
+	PFN_TD_RGXSTOP pfnTDRGXStop;
+
+	/*! Callback to request allocation/freeing of secure buffers */
+	PFN_TD_SECUREBUF_ALLOC pfnTDSecureBufAlloc;
+	PFN_TD_SECUREBUF_FREE pfnTDSecureBufFree;
+#endif /* defined(SUPPORT_TRUSTED_DEVICE) */
+
+	/*! Function that does device feature specific system layer initialisation */
+	PFN_SYS_DEV_FEAT_DEP_INIT	pfnSysDevFeatureDepInit;
+
+	/*! Function returns system layer execution environment */
+	PFN_SYS_DRIVER_MODE			pfnSysDriverMode;
+
+#if defined(PVR_DVFS) || defined(SUPPORT_PDVFS)
+	PVRSRV_DVFS sDVFS;
+#endif
+
+#if defined(SUPPORT_ALT_REGBASE)
+	IMG_DEV_PHYADDR sAltRegsGpuPBase;
+#endif
+
+#if defined(SUPPORT_DEVICE_PA0_AS_VALID)
+	IMG_BOOL bDevicePA0IsValid;
+#endif
+};
+
+#endif /* __PVRSRV_DEVICE_H__*/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_device_types.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_device_types.h
new file mode 100644
index 0000000..0439c34
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_device_types.h
@@ -0,0 +1,56 @@
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR device type definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVRSRV_DEVICE_TYPES_H__)
+#define __PVRSRV_DEVICE_TYPES_H__
+
+#include "img_types.h"
+
+#define PVRSRV_MAX_DEVICES		16	/*!< Largest supported number of devices on the system */
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#endif /* __PVRSRV_DEVICE_TYPES_H__ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_error.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_error.c
new file mode 100644
index 0000000..5cd02a2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_error.c
@@ -0,0 +1,61 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services error support
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+IMG_EXPORT
+const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError)
+{
+	switch (eError)
+	{
+		case PVRSRV_OK:
+			return "PVRSRV_OK";
+#define PVRE(x) \
+		case x: \
+			return #x;
+#include "pvrsrv_errors.h"
+#undef PVRE
+		default:
+			return "Unknown PVRSRV error number";
+	}
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_error.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_error.h
new file mode 100644
index 0000000..3cc446e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_error.h
@@ -0,0 +1,61 @@
+/*************************************************************************/ /*!
+@File           pvrsrv_error.h
+@Title          services error enumerant
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines error codes used by any/all services modules
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (PVRSRV_ERROR_H)
+#define PVRSRV_ERROR_H
+
+/*!
+ *****************************************************************************
+ * Error values
+ *****************************************************************************/
+typedef enum PVRSRV_ERROR
+{
+	PVRSRV_OK,
+#define PVRE(x) x,
+#include "pvrsrv_errors.h"
+#undef PVRE
+	PVRSRV_ERROR_FORCE_I32 = 0x7fffffff
+
+} PVRSRV_ERROR;
+
+#endif /* !defined (PVRSRV_ERROR_H) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_errors.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_errors.h
new file mode 100644
index 0000000..997e984
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_errors.h
@@ -0,0 +1,398 @@
+/*************************************************************************/ /*!
+@File           pvrsrv_errors.h
+@Title          services error codes
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines error codes used by any/all services modules
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* Don't add include guards to this file! */
+
+PVRE(PVRSRV_ERROR_OUT_OF_MEMORY)
+PVRE(PVRSRV_ERROR_TOO_FEW_BUFFERS)
+PVRE(PVRSRV_ERROR_INVALID_PARAMS)
+PVRE(PVRSRV_ERROR_INIT_FAILURE)
+PVRE(PVRSRV_ERROR_CANT_REGISTER_CALLBACK)
+PVRE(PVRSRV_ERROR_INVALID_DEVICE)
+PVRE(PVRSRV_ERROR_NOT_OWNER)
+PVRE(PVRSRV_ERROR_BAD_MAPPING)
+PVRE(PVRSRV_ERROR_TIMEOUT)
+PVRE(PVRSRV_ERROR_NOT_IMPLEMENTED)
+PVRE(PVRSRV_ERROR_FLIP_CHAIN_EXISTS)
+PVRE(PVRSRV_ERROR_INVALID_SWAPINTERVAL)
+PVRE(PVRSRV_ERROR_SCENE_INVALID)
+PVRE(PVRSRV_ERROR_STREAM_ERROR)
+PVRE(PVRSRV_ERROR_FAILED_DEPENDENCIES)
+PVRE(PVRSRV_ERROR_CMD_NOT_PROCESSED)
+PVRE(PVRSRV_ERROR_CMD_TOO_BIG)
+PVRE(PVRSRV_ERROR_DEVICE_REGISTER_FAILED)
+PVRE(PVRSRV_ERROR_TOOMANYBUFFERS)
+PVRE(PVRSRV_ERROR_NOT_SUPPORTED)
+PVRE(PVRSRV_ERROR_PROCESSING_BLOCKED)
+PVRE(PVRSRV_ERROR_CANNOT_FLUSH_QUEUE)
+PVRE(PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE)
+PVRE(PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS)
+PVRE(PVRSRV_ERROR_RETRY)
+PVRE(PVRSRV_ERROR_DDK_VERSION_MISMATCH)
+PVRE(PVRSRV_ERROR_DDK_BUILD_MISMATCH)
+PVRE(PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH)
+PVRE(PVRSRV_ERROR_BVNC_MISMATCH)
+PVRE(PVRSRV_ERROR_FWPROCESSOR_MISMATCH)
+PVRE(PVRSRV_ERROR_UPLOAD_TOO_BIG)
+PVRE(PVRSRV_ERROR_INVALID_FLAGS)
+PVRE(PVRSRV_ERROR_FAILED_TO_REGISTER_PROCESS)
+PVRE(PVRSRV_ERROR_UNABLE_TO_LOAD_LIBRARY)
+PVRE(PVRSRV_ERROR_UNABLE_GET_FUNC_ADDR)
+PVRE(PVRSRV_ERROR_UNLOAD_LIBRARY_FAILED)
+PVRE(PVRSRV_ERROR_BRIDGE_CALL_FAILED)
+PVRE(PVRSRV_ERROR_IOCTL_CALL_FAILED)
+PVRE(PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR)
+PVRE(PVRSRV_ERROR_MMU_CONFIG_IS_WRONG)
+PVRE(PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_CREATE_HEAP)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_MAP_PAGE_TABLE)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_UNMAP_PAGE_TABLE)
+PVRE(PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE)
+PVRE(PVRSRV_ERROR_MMU_LIVE_ALLOCATIONS_IN_HEAP)
+PVRE(PVRSRV_ERROR_MMU_RESERVATION_NOT_INSIDE_HEAP)
+PVRE(PVRSRV_ERROR_PMR_NEW_MEMORY)
+PVRE(PVRSRV_ERROR_PMR_STILL_REFERENCED)
+PVRE(PVRSRV_ERROR_PMR_CLIENT_NOT_TRUSTED)
+PVRE(PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES)
+PVRE(PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY)
+PVRE(PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES)
+PVRE(PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE)
+PVRE(PVRSRV_ERROR_PMR_NOT_PERMITTED)
+PVRE(PVRSRV_ERROR_PMR_ALREADY_OCCUPIED)
+PVRE(PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR)
+PVRE(PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR)
+PVRE(PVRSRV_ERROR_PMR_WRONG_PMR_TYPE)
+PVRE(PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS)
+PVRE(PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE)
+PVRE(PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE)
+PVRE(PVRSRV_ERROR_PMR_MAPPINGTABLE_MISMATCH)
+PVRE(PVRSRV_ERROR_PMR_INVALID_CHUNK)
+PVRE(PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING)
+PVRE(PVRSRV_ERROR_PMR_EMPTY)
+PVRE(PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND)
+PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_UNMAP_FAILED)
+PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED)
+PVRE(PVRSRV_ERROR_PMR_PAGE_POISONING_FAILED)
+PVRE(PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY)
+PVRE(PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP)
+PVRE(PVRSRV_ERROR_DEVICEMEM_BAD_IMPORT_SIZE)
+PVRE(PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX)
+PVRE(PVRSRV_ERROR_DEVICEMEM_MAP_FAILED)
+PVRE(PVRSRV_ERROR_DEVICEMEM_NON_ZERO_USAGE_COUNT)
+PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE)
+PVRE(PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA)
+PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM)
+PVRE(PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED)
+PVRE(PVRSRV_ERROR_DEVICEMEM_NO_MAPPING)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_LMA_HEAP)
+PVRE(PVRSRV_ERROR_INVALID_MMU_TYPE)
+PVRE(PVRSRV_ERROR_BUFFER_DEVICE_NOT_FOUND)
+PVRE(PVRSRV_ERROR_BUFFER_DEVICE_ALREADY_PRESENT)
+PVRE(PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND)
+PVRE(PVRSRV_ERROR_PCI_CALL_FAILED)
+PVRE(PVRSRV_ERROR_PCI_REGION_TOO_SMALL)
+PVRE(PVRSRV_ERROR_PCI_REGION_UNAVAILABLE)
+PVRE(PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH)
+PVRE(PVRSRV_ERROR_REGISTER_BASE_NOT_SET)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_USER_MEM)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VP_MEMORY)
+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_SHARED_PBDESC)
+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_KERNELVIRTUAL)
+PVRE(PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY)
+PVRE(PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES)
+PVRE(PVRSRV_ERROR_FAILED_TO_FREE_PAGES)
+PVRE(PVRSRV_ERROR_FAILED_TO_COPY_PAGES)
+PVRE(PVRSRV_ERROR_UNABLE_TO_LOCK_PAGES)
+PVRE(PVRSRV_ERROR_UNABLE_TO_UNLOCK_PAGES)
+PVRE(PVRSRV_ERROR_STILL_MAPPED)
+PVRE(PVRSRV_ERROR_MAPPING_NOT_FOUND)
+PVRE(PVRSRV_ERROR_PHYS_ADDRESS_EXCEEDS_32BIT)
+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE)
+PVRE(PVRSRV_ERROR_INVALID_SEGMENT_BLOCK)
+PVRE(PVRSRV_ERROR_INVALID_GFXDEVDEVDATA)
+PVRE(PVRSRV_ERROR_INVALID_DEVINFO)
+PVRE(PVRSRV_ERROR_INVALID_MEMINFO)
+PVRE(PVRSRV_ERROR_INVALID_MISCINFO)
+PVRE(PVRSRV_ERROR_UNKNOWN_IOCTL)
+PVRE(PVRSRV_ERROR_INVALID_CONTEXT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT)
+PVRE(PVRSRV_ERROR_INVALID_HEAP)
+PVRE(PVRSRV_ERROR_INVALID_KERNELINFO)
+PVRE(PVRSRV_ERROR_UNKNOWN_POWER_STATE)
+PVRE(PVRSRV_ERROR_INVALID_HANDLE_TYPE)
+PVRE(PVRSRV_ERROR_INVALID_WRAP_TYPE)
+PVRE(PVRSRV_ERROR_INVALID_PHYS_ADDR)
+PVRE(PVRSRV_ERROR_INVALID_CPU_ADDR)
+PVRE(PVRSRV_ERROR_INVALID_HEAPINFO)
+PVRE(PVRSRV_ERROR_INVALID_PERPROC)
+PVRE(PVRSRV_ERROR_FAILED_TO_RETRIEVE_HEAPINFO)
+PVRE(PVRSRV_ERROR_INVALID_MAP_REQUEST)
+PVRE(PVRSRV_ERROR_INVALID_UNMAP_REQUEST)
+PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP)
+PVRE(PVRSRV_ERROR_MAPPING_STILL_IN_USE)
+PVRE(PVRSRV_ERROR_EXCEEDED_HW_LIMITS)
+PVRE(PVRSRV_ERROR_NO_STAGING_BUFFER_ALLOCATED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_PERPROC_AREA)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_THREAD)
+PVRE(PVRSRV_ERROR_THREAD_READ_ERROR)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR)
+PVRE(PVRSRV_ERROR_UNABLE_TO_UNINSTALL_ISR)
+PVRE(PVRSRV_ERROR_ISR_ALREADY_INSTALLED)
+PVRE(PVRSRV_ERROR_ISR_NOT_INSTALLED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INITIALISE_INTERRUPT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_INFO)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DO_BACKWARDS_BLIT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_SERVICES)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE)
+PVRE(PVRSRV_ERROR_INVALID_CCB_COMMAND)
+PVRE(PVRSRV_ERROR_KERNEL_CCB_FULL)
+PVRE(PVRSRV_ERROR_FLIP_FAILED)
+PVRE(PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED)
+PVRE(PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE)
+PVRE(PVRSRV_ERROR_TIMEOUT_WAITING_FOR_CLIENT_CCB)
+PVRE(PVRSRV_ERROR_CREATE_RENDER_CONTEXT_FAILED)
+PVRE(PVRSRV_ERROR_UNKNOWN_PRIMARY_FRAG)
+PVRE(PVRSRV_ERROR_UNEXPECTED_SECONDARY_FRAG)
+PVRE(PVRSRV_ERROR_UNEXPECTED_PRIMARY_FRAG)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_FENCE_ID)
+PVRE(PVRSRV_ERROR_BLIT_SETUP_FAILED)
+PVRE(PVRSRV_ERROR_SUBMIT_NEEDED)
+PVRE(PVRSRV_ERROR_PDUMP_NOT_AVAILABLE)
+PVRE(PVRSRV_ERROR_PDUMP_BUFFER_FULL)
+PVRE(PVRSRV_ERROR_PDUMP_BUF_OVERFLOW)
+PVRE(PVRSRV_ERROR_PDUMP_NOT_ACTIVE)
+PVRE(PVRSRV_ERROR_INCOMPLETE_LINE_OVERLAPS_PAGES)
+PVRE(PVRSRV_ERROR_MUTEX_DESTROY_FAILED)
+PVRE(PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR)
+PVRE(PVRSRV_ERROR_INSUFFICIENT_SPACE_FOR_COMMAND)
+PVRE(PVRSRV_ERROR_PROCESS_NOT_INITIALISED)
+PVRE(PVRSRV_ERROR_PROCESS_NOT_FOUND)
+PVRE(PVRSRV_ERROR_SRV_CONNECT_FAILED)
+PVRE(PVRSRV_ERROR_SRV_DISCONNECT_FAILED)
+PVRE(PVRSRV_ERROR_DEINT_PHASE_FAILED)
+PVRE(PVRSRV_ERROR_INIT2_PHASE_FAILED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE)
+PVRE(PVRSRV_ERROR_NO_DC_DEVICES_FOUND)
+PVRE(PVRSRV_ERROR_DC_DEVICE_INACCESSIBLE)
+PVRE(PVRSRV_ERROR_DC_INVALID_MAXDEPTH)
+PVRE(PVRSRV_ERROR_UNABLE_TO_OPEN_DC_DEVICE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_UNREGISTER_DEVICE)
+PVRE(PVRSRV_ERROR_NO_DEVICEDATA_FOUND)
+PVRE(PVRSRV_ERROR_NO_DEVICENODE_FOUND)
+PVRE(PVRSRV_ERROR_NO_CLIENTNODE_FOUND)
+PVRE(PVRSRV_ERROR_FAILED_TO_PROCESS_QUEUE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INIT_TASK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SCHEDULE_TASK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_KILL_TASK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_TIMER)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DISABLE_TIMER)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_TIMER)
+PVRE(PVRSRV_ERROR_UNKNOWN_PIXEL_FORMAT)
+PVRE(PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE)
+PVRE(PVRSRV_ERROR_HANDLE_NOT_ALLOCATED)
+PVRE(PVRSRV_ERROR_HANDLE_TYPE_MISMATCH)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE)
+PVRE(PVRSRV_ERROR_HANDLE_NOT_SHAREABLE)
+PVRE(PVRSRV_ERROR_HANDLE_NOT_FOUND)
+PVRE(PVRSRV_ERROR_INVALID_SUBHANDLE)
+PVRE(PVRSRV_ERROR_HANDLE_BATCH_IN_USE)
+PVRE(PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_HASH_VALUE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_HASH_VALUE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_HASH_VALUE)
+PVRE(PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP)
+PVRE(PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE)
+PVRE(PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVAILABLE)
+PVRE(PVRSRV_ERROR_INVALID_DEVICEID)
+PVRE(PVRSRV_ERROR_DEVICEID_NOT_FOUND)
+PVRE(PVRSRV_ERROR_MEMORY_TEST_FAILED)
+PVRE(PVRSRV_ERROR_CPUPADDR_TEST_FAILED)
+PVRE(PVRSRV_ERROR_COPY_TEST_FAILED)
+PVRE(PVRSRV_ERROR_SEMAPHORE_NOT_INITIALISED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CLOCK)
+PVRE(PVRSRV_ERROR_CLOCK_REQUEST_FAILED)
+PVRE(PVRSRV_ERROR_DISABLE_CLOCK_FAILURE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_CLOCK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_SYSTEM_CLOCK)
+PVRE(PVRSRV_ERROR_UNKNOWN_SGL_ERROR)
+PVRE(PVRSRV_ERROR_SYSTEM_POWER_CHANGE_FAILURE)
+PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE)
+PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)
+PVRE(PVRSRV_ERROR_BAD_SYNC_STATE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_MMUCONTEXT_ID)
+PVRE(PVRSRV_ERROR_PARAMETER_BUFFER_INVALID_ALIGNMENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ACQUIRE_CONNECTION)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CONNECTION)
+PVRE(PVRSRV_ERROR_PHYSHEAP_ID_IN_USE)
+PVRE(PVRSRV_ERROR_PHYSHEAP_ID_INVALID)
+PVRE(PVRSRV_ERROR_HP_REQUEST_TOO_LONG)
+PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM)
+PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM_OP)
+PVRE(PVRSRV_ERROR_INVALID_SYNC_CONTEXT)
+PVRE(PVRSRV_ERROR_BP_NOT_SET)
+PVRE(PVRSRV_ERROR_BP_ALREADY_SET)
+PVRE(PVRSRV_ERROR_FEATURE_DISABLED)
+PVRE(PVRSRV_ERROR_REG_CONFIG_ENABLED)
+PVRE(PVRSRV_ERROR_REG_CONFIG_FULL)
+PVRE(PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE)
+PVRE(PVRSRV_ERROR_MEMORY_ACCESS)
+PVRE(PVRSRV_ERROR_NO_SYSTEM_BUFFER)
+PVRE(PVRSRV_ERROR_DC_INVALID_CONFIG)
+PVRE(PVRSRV_ERROR_DC_INVALID_CROP_RECT)
+PVRE(PVRSRV_ERROR_DC_INVALID_DISPLAY_RECT)
+PVRE(PVRSRV_ERROR_DC_INVALID_BUFFER_DIMS)
+PVRE(PVRSRV_ERROR_DC_INVALID_TRANSFORM)
+PVRE(PVRSRV_ERROR_DC_INVALID_SCALE)
+PVRE(PVRSRV_ERROR_DC_INVALID_CUSTOM)
+PVRE(PVRSRV_ERROR_DC_TOO_MANY_PIPES)
+PVRE(PVRSRV_ERROR_DC_INVALID_PLANE_ALPHA)
+PVRE(PVRSRV_ERROR_NOT_READY)
+PVRE(PVRSRV_ERROR_RESOURCE_UNAVAILABLE)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_PIXEL_FORMAT)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_MEMORY_LAYOUT)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_FB_COMPRESSION_MODE)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_DIMS)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_TIMER)
+PVRE(PVRSRV_ERROR_NOT_FOUND)
+PVRE(PVRSRV_ERROR_ALREADY_OPEN)
+PVRE(PVRSRV_ERROR_STREAM_MISUSE)
+PVRE(PVRSRV_ERROR_STREAM_FULL)
+PVRE(PVRSRV_ERROR_STREAM_READLIMIT_REACHED)
+PVRE(PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE)
+PVRE(PVRSRV_ERROR_PHYSMEM_NOT_ALLOCATED)
+PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MAX)
+PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MIN)
+PVRE(PVRSRV_ERROR_INVALID_PB_CONFIG)
+PVRE(PVRSRV_ERROR_META_THREAD0_NOT_ENABLED)
+PVRE(PVRSRV_ERROR_NOT_AUTHENTICATED)
+PVRE(PVRSRV_ERROR_REQUEST_TDFWCODE_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_INIT_TDFWCODE_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_INIT_TDSECUREBUF_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_MUTEX_ALREADY_CREATED)
+PVRE(PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED)
+PVRE(PVRSRV_ERROR_ALREADY_EXISTS)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SEND_PULSE)
+PVRE(PVRSRV_ERROR_TASK_FAILED)
+PVRE(PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+PVRE(PVRSRV_ERROR_INVALID_GPU_ADDR)
+PVRE(PVRSRV_ERROR_INVALID_OFFSET)
+PVRE(PVRSRV_ERROR_CCCB_STALLED)
+PVRE(PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE)
+PVRE(PVRSRV_ERROR_NOT_ENABLED)
+PVRE(PVRSRV_ERROR_SYSTEM_LOCAL_MEMORY_INIT_FAIL)
+PVRE(PVRSRV_ERROR_FW_IMAGE_MISMATCH)
+PVRE(PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+PVRE(PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL)
+PVRE(PVRSRV_ERROR_RPM_PBSIZE_ALREADY_MAX)
+PVRE(PVRSRV_ERROR_NONZERO_REFCOUNT)
+PVRE(PVRSRV_ERROR_SETAFFINITY_FAILED)
+PVRE(PVRSRV_ERROR_INTERNAL_ERROR)
+PVRE(PVRSRV_ERROR_BRIDGE_EFAULT)
+PVRE(PVRSRV_ERROR_BRIDGE_EINVAL)
+PVRE(PVRSRV_ERROR_BRIDGE_ENOMEM)
+PVRE(PVRSRV_ERROR_BRIDGE_ERANGE)
+PVRE(PVRSRV_ERROR_BRIDGE_EPERM)
+PVRE(PVRSRV_ERROR_BRIDGE_ENOTTY)
+PVRE(PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)
+PVRE(PVRSRV_ERROR_PROBE_DEFER)
+PVRE(PVRSRV_ERROR_INVALID_ALIGNMENT)
+PVRE(PVRSRV_ERROR_CLOSE_FAILED)
+PVRE(PVRSRV_ERROR_NOT_INITIALISED)
+PVRE(PVRSRV_ERROR_CONVERSION_FAILED)
+PVRE(PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL)
+PVRE(PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED)
+PVRE(PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED)
+PVRE(PVRSRV_ERROR_OBJECT_STILL_REFERENCED)
+PVRE(PVRSRV_ERROR_BVNC_UNSUPPORTED)
+PVRE(PVRSRV_ERROR_INVALID_BVNC_PARAMS)
+PVRE(PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE)
+PVRE(PVRSRV_ERROR_DEVICEMEM_ADDITIONAL_HEAPS_IN_CONTEXT)
+PVRE(PVRSRV_ERROR_PID_ALREADY_REGISTERED)
+PVRE(PVRSRV_ERROR_PID_NOT_REGISTERED)
+PVRE(PVRSRV_ERROR_SIGNAL_FAILED)
+PVRE(PVRSRV_ERROR_INVALID_NOTIF_STREAM)
+PVRE(PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED)
+PVRE(PVRSRV_ERROR_INVALID_PVZ_CONFIG)
+PVRE(PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED)
+PVRE(PVRSRV_ERROR_NOT_SW_TIMELINE)
+PVRE(PVRSRV_ERROR_SW_TIMELINE_AT_LATEST_POINT)
+PVRE(PVRSRV_ERROR_INVALID_PVZ_OSID)
+PVRE(PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE)
+PVRE(PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG)
+PVRE(PVRSRV_ERROR_INTERRUPTED)
+PVRE(PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED)
+PVRE(PVRSRV_ERROR_PDUMP_INVALID_BLOCKLEN)
+PVRE(PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)
+PVRE(PVRSRV_ERROR_BAD_PARAM_SIZE)
+PVRE(PVRSRV_ERROR_INVALID_REQUEST)
+PVRE(PVRSRV_ERROR_FAILED_TO_ACQUIRE_PAGES)
+PVRE(PVRSRV_ERROR_TEST_FAILED)
+PVRE(PVRSRV_ERROR_SYNC_PRIM_OP_NOT_SUPPORTED)
+PVRE(PVRSRV_ERROR_FAILED_TO_GET_VIRT_ADDR)
+PVRE(PVRSRV_ERROR_UNABLE_TO_FREE_RESOURCE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_SEMAPHORE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_SEMAPHORE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_SEMAPHORE)
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_memallocflags.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_memallocflags.h
new file mode 100644
index 0000000..bb93c97
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_memallocflags.h
@@ -0,0 +1,844 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This file defines flags used on memory allocations and mappings
+                These flags are relevant throughout the memory management
+                software stack and are specified by users of services and
+                understood by all levels of the memory management in both
+                client and server.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVRSRV_MEMALLOCFLAGS_H
+#define PVRSRV_MEMALLOCFLAGS_H
+
+#include "img_types.h"
+#include "rgx_memallocflags.h"
+
+/*!
+  Type for specifying memory allocation flags.
+ */
+#if defined(SUPPORT_VALIDATION)
+typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T;
+#define PVRSRV_MEMALLOCFLAGS_FMTSPEC IMG_UINT64_FMTSPECx
+#else
+typedef IMG_UINT32 PVRSRV_MEMALLOCFLAGS_T;
+#define PVRSRV_MEMALLOCFLAGS_FMTSPEC "x"
+#endif
+
+
+/*
+ * --- MAPPING FLAGS ---
+ * | 0-3    | 4-7    | 8-10        | 11-13       | 14          |
+ * | GPU-RW | CPU-RW | GPU-Caching | CPU-Caching | KM-Mappable |
+ *
+ * --- MISC FLAGS ---
+ * | 15    | 16        | 17       | 18  | 19                | 20              | 21        | 22        |
+ * | Defer | CPU-Local | FW-Local | SVM | Sparse-Dummy-Page | CPU-Cache-Clean | FW-Config | FW-Guest  |
+ *
+ * --- DEV CONTROL FLAGS ---
+ * | 24-27        |
+ * | Device-Flags |
+ *
+ * --- MEMSET FLAGS ---
+ * | 29             | 30          | 31            |
+ * | Poison-On-Free | P.-On-Alloc | Zero-On-Alloc |
+ *
+ */
+
+/*
+ *  **********************************************************
+ *  *                                                        *
+ *  *                       MAPPING FLAGS                    *
+ *  *                                                        *
+ *  **********************************************************
+ */
+
+/*!
+ * This flag affects the device MMU protection flags, and specifies
+ * that the memory may be read by the GPU.
+ *
+ * Typically all device memory allocations would specify this flag.
+ *
+ * At the moment, memory allocations without this flag are not supported
+ *
+ * This flag will live with the PMR, thus subsequent mappings would
+ * honour this flag.
+ *
+ * This is a dual purpose flag.  It specifies that memory is permitted
+ * to be read by the GPU, and also requests that the allocation is
+ * mapped into the GPU as a readable mapping
+ *
+ * To be clear:
+ * - When used as an argument on PMR creation; it specifies
+ *       that GPU readable mappings will be _permitted_
+ * - When used as an argument to a "map" function: it specifies
+ *       that a GPU readable mapping is _desired_
+ * - When used as an argument to "AllocDeviceMem": it specifies
+ *       that the PMR will be created with permission to be mapped
+ *       with a GPU readable mapping, _and_ that this PMR will be
+ *       mapped with a GPU readable mapping.
+ * This distinction becomes important when (a) we export allocations;
+ * and (b) when we separate the creation of the PMR from the mapping.
+ */
+#define PVRSRV_MEMALLOCFLAG_GPU_READABLE		(1U<<0)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_READABLE flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_GPU_READABLE(uiFlags)		(((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READABLE) != 0)
+
+/*!
+ * This flag affects the device MMU protection flags, and specifies
+ * that the memory may be written by the GPU
+ *
+ * Using this flag on an allocation signifies that the allocation is
+ * intended to be written by the GPU.
+ *
+ * Omitting this flag causes a read-only mapping.
+ *
+ * This flag will live with the PMR, thus subsequent mappings would
+ * honour this flag.
+ *
+ * This is a dual purpose flag.  It specifies that memory is permitted
+ * to be written by the GPU, and also requests that the allocation is
+ * mapped into the GPU as a writable mapping (see note above about
+ * permission vs. mapping mode, and why this flag causes permissions
+ * to be inferred from mapping mode on first allocation)
+ *
+ * N.B.  This flag has no relevance to the CPU's MMU mapping, if any,
+ * and would therefore not enforce read-only mapping on CPU.
+ */
+#define PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE       (1U<<1)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_GPU_WRITEABLE(uiFlags)				(((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE) != 0)
+
+/*!
+  The flag indicates whether an allocation can be mapped as GPU readable in another GPU memory context.
+ */
+#define PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED  (1U<<2)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_GPU_READ_PERMITTED(uiFlags)		(((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED) != 0)
+
+/*!
+  The flag indicates whether an allocation can be mapped as GPU writable in another GPU memory context.
+ */
+#define PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED (1U<<3)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_GPU_WRITE_PERMITTED(uiFlags)		(((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED) != 0)
+
+/*!
+  The flag indicates that an allocation is mapped as readable to the CPU.
+ */
+#define PVRSRV_MEMALLOCFLAG_CPU_READABLE        (1U<<4)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_READABLE flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_READABLE(uiFlags)				(((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READABLE) != 0)
+
+/*!
+  The flag indicates that an allocation is mapped as writable to the CPU.
+ */
+#define PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE       (1U<<5)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_WRITEABLE(uiFlags)				(((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE) != 0)
+
+/*!
+  The flag indicates whether an allocation can be mapped as CPU readable in another CPU memory context.
+ */
+#define PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED  (1U<<6)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_READ_PERMITTED(uiFlags)		(((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED) != 0)
+
+/*!
+  The flag indicates whether an allocation can be mapped as CPU writable in another CPU memory context.
+ */
+#define PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED (1U<<7)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_WRITE_PERMITTED(uiFlags)		(((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED) != 0)
+
+
+/*
+ *  **********************************************************
+ *  *                                                        *
+ *  *                    CACHE CONTROL FLAGS                 *
+ *  *                                                        *
+ *  **********************************************************
+ */
+
+/*
+	GPU domain
+	==========
+
+	The following defines are used to control the GPU cache bit field.
+	The defines are mutually exclusive.
+
+	A helper macro, PVRSRV_GPU_CACHE_MODE, is provided to obtain just the GPU
+	cache bit field from the flags. This should be used whenever the GPU cache
+	mode needs to be determined.
+*/
+
+/*!
+  GPU domain. Flag indicating uncached memory. This means that any writes to memory
+  allocated with this flag are written straight to memory and thus are
+  coherent for any device in the system.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED				(0U<<8)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_UNCACHED mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_GPU_UNCACHED(uiFlags)				(PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_UNCACHED)
+
+/*!
+   GPU domain. Use write combiner (if supported) to combine sequential writes
+   together to reduce memory access by doing burst writes.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE			(1U<<8)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags)			(PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE)
+
+/*!
+    GPU domain. This flag affects the GPU MMU protection flags.
+    The allocation will be cached.
+    Services will try to set the coherent bit in the GPU MMU tables so the
+    GPU cache is snooping the CPU cache. If coherency is not supported the
+    caller is responsible to ensure the caches are up to date.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT			(2U<<8)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags)		(PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT)
+
+/*!
+   GPU domain. Request cached memory, but not coherent (i.e. no cache
+   snooping). Services will flush the GPU internal caches after every GPU
+   task so no cache maintenance requests from the users are necessary.
+
+    Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future
+    expansion.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT		(3U<<8)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags)		(PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT)
+
+/*!
+    GPU domain. This flag is for internal use only and is used to indicate
+    that the underlying allocation should be cached on the GPU after all
+    the snooping and coherent checks have been done
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHED					(7U<<8)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHED mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_GPU_CACHED(uiFlags)				(PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHED)
+
+/*!
+    GPU domain. GPU cache mode mask.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK			(7U<<8)
+
+/*!
+  @Description    A helper macro to obtain just the GPU	cache bit field from the flags.
+                  This should be used whenever the GPU cache mode needs to be determined.
+  @Input  uiFlags Allocation flags.
+  @Return         Value of the GPU cache bit field.
+ */
+#define PVRSRV_GPU_CACHE_MODE(uiFlags)					((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK)
+
+
+/*
+	CPU domain
+	==========
+
+	The following defines are used to control the CPU cache bit field.
+	The defines are mutually exclusive.
+
+	A helper macro, PVRSRV_CPU_CACHE_MODE, is provided to obtain just the CPU
+	cache bit field from the flags. This should be used whenever the CPU cache
+	mode needs to be determined.
+*/
+
+/*!
+   CPU domain. Request uncached memory. This means that any writes to memory
+   allocated with this flag are written straight to memory and thus are
+   coherent for any device in the system.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_UNCACHED				(0U<<11)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_UNCACHED mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_UNCACHED(uiFlags)				(PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_UNCACHED)
+
+/*!
+   CPU domain. Use write combiner (if supported) to combine sequential writes
+   together to reduce memory access by doing burst writes.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE			(1U<<11)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags)			(PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE)
+
+/*!
+    CPU domain. This flag affects the CPU MMU protection flags.
+    The allocation will be cached.
+    Services will try to set the coherent bit in the CPU MMU tables so the
+    CPU cache is snooping the GPU cache. If coherency is not supported the
+    caller is responsible to ensure the caches are up to date.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT			(2U<<11)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags)		(PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT)
+
+/*!
+    CPU domain. Request cached memory, but not coherent (i.e. no cache
+    snooping). This means that if the allocation needs to transition from
+    one device to another services has to be informed so it can
+    flush/invalidate the appropriate caches.
+
+    Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future
+    expansion.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT		(3U<<11)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags)		(PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT)
+
+/*!
+    CPU domain. This flag is for internal use only and is used to indicate
+    that the underlying allocation should be cached on the CPU
+    after all the snooping and coherent checks have been done
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHED					(7U<<11)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHED mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_CACHED(uiFlags)				(PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHED)
+
+/*!
+	CPU domain. CPU cache mode mask
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK			(7U<<11)
+
+/*!
+  @Description    A helper macro to obtain just the CPU	cache bit field from the flags.
+                  This should be used whenever the CPU cache mode needs to be determined.
+  @Input  uiFlags Allocation flags.
+  @Return         Value of the CPU cache bit field.
+ */
+#define PVRSRV_CPU_CACHE_MODE(uiFlags)					((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)
+
+/* Helper flags for usual cases */
+
+/*!
+ * Memory will be uncached on CPU and GPU
+ */
+#define PVRSRV_MEMALLOCFLAG_UNCACHED					(PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_UNCACHED mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_UNCACHED(uiFlags)					(PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_UNCACHED)
+
+/*!
+ * Memory will be write-combined on CPU and GPU
+ */
+#define PVRSRV_MEMALLOCFLAG_WRITE_COMBINE				(PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE | PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_WRITE_COMBINE mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_WRITE_COMBINE(uiFlags)				(PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_WRITE_COMBINE)
+
+/*!
+ * Memory will be cache-incoherent on CPU and GPU
+ */
+#define PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT			(PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_CACHE_INCOHERENT(uiFlags)			(PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT)
+
+/*!
+	Cache mode mask
+*/
+#define PVRSRV_CACHE_MODE(uiFlags)						(PVRSRV_GPU_CACHE_MODE(uiFlags) | PVRSRV_CPU_CACHE_MODE(uiFlags))
+
+
+/*!
+   CPU MMU Flags mask -- intended for use internal to services only
+ */
+#define PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK  (PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+												PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+												PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)
+
+/*!
+   MMU Flags mask -- intended for use internal to services only - used for
+   partitioning the flags bits and determining which flags to pass down to
+   mmu_common.c
+ */
+#define PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK  (PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+                                                PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+                                                PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK)
+
+/*!
+    Indicates that the PMR created due to this allocation will support
+    in-kernel CPU mappings.  Only privileged processes may use this flag as
+    it may cause wastage of precious kernel virtual memory on some platforms.
+ */
+#define PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE			(1U<<14)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags)		(((uiFlags) & PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) != 0)
+
+
+
+/*
+ *
+ *  **********************************************************
+ *  *                                                        *
+ *  *            ALLOC MEMORY FLAGS                          *
+ *  *                                                        *
+ *  **********************************************************
+ *
+ * (Bits 15)
+ *
+ */
+#define PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC			(1U<<15)
+#define PVRSRV_CHECK_ON_DEMAND(uiFlags)					(((uiFlags) & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) != 0)
+
+/*!
+    Indicates that the allocation will primarily be accessed by the CPU, so
+    a UMA allocation (if available) is preferable. If not set, the allocation
+    will primarily be accessed by the GPU, so LMA allocation  (if available)
+    is preferable.
+ */
+#define PVRSRV_MEMALLOCFLAG_CPU_LOCAL					(1U<<16)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_LOCAL flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_LOCAL(uiFlags)					(((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_LOCAL) != 0)
+
+
+/*!
+    Indicates that the allocation will primarily be accessed by the FW.
+ */
+#define PVRSRV_MEMALLOCFLAG_FW_LOCAL					(1U<<17)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_FW_LOCAL flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_FW_LOCAL(uiFlags)					(((uiFlags) & PVRSRV_MEMALLOCFLAG_FW_LOCAL) != 0)
+
+/*!
+    Indicates that the allocation will be accessed by the CPU and GPU using
+    the same virtual address, i.e. for all SVM allocs,
+    IMG_CPU_VIRTADDR == IMG_DEV_VIRTADDR
+ */
+#define PVRSRV_MEMALLOCFLAG_SVM_ALLOC					(1U<<18)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_SVM_ALLOC flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_SVM_ALLOC(uiFlags)					(((uiFlags) & PVRSRV_MEMALLOCFLAG_SVM_ALLOC) != 0)
+
+/*!
+    Indicates the particular memory that's being allocated is sparse and the
+    sparse regions should not be backed by dummy page
+*/
+#define PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING		(1U << 19)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiFlags)		(((uiFlags) & PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) == 0)
+
+/*!
+    Services is going to clean the cache for the allocated memory.
+    For performance reasons avoid usage if allocation is written to by the
+    CPU anyway before the next GPU kick.
+ */
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN				(1U<<20)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags)			(((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN) != 0)
+
+/*!
+ * Indicates that the particular allocation will exist at the FW Config heap
+ * residing right after the end of the FW Main heap
+ */
+#define PVRSRV_MEMALLOCFLAG_FW_CONFIG					(1U<<21)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_FW_CONFIG flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_FW_CONFIG(uiFlags)					(((uiFlags) & PVRSRV_MEMALLOCFLAG_FW_CONFIG) != 0)
+
+/*!
+ * Indicates that the particular allocation is being mapped into FW by the
+ * privileged OSID-0 (i.e. host/primary) driver on behalf of an unprivileged
+ * guest OSID-x (i.e. OSID-1 up to OSID-7) driver
+ */
+#define PVRSRV_MEMALLOCFLAG_FW_GUEST					(1U<<22)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_FW_GUEST flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_FW_GUEST(uiFlags)					(((uiFlags) & PVRSRV_MEMALLOCFLAG_FW_GUEST) != 0)
+
+/*! PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING
+
+    Indicates the particular memory that's being allocated is sparse and the
+    sparse regions should be backed by zero page. This is different with
+    zero on alloc flag such that only physically unbacked pages are backed
+    by zero page at the time of mapping.
+    The zero backed page is always with read only attribute irrespective of its
+    original attributes.
+*/
+#define PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING			(1U << 23)
+#define PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiFlags)		(((uiFlags) & \
+			PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING) == PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING)
+
+
+/*
+ *
+ *  **********************************************************
+ *  *                                                        *
+ *  *            MEMORY ZEROING AND POISONING FLAGS          *
+ *  *                                                        *
+ *  **********************************************************
+ *
+ * Zero / Poison, on alloc/free
+ *
+ * We think the following usecases are required:
+ *
+ *  don't poison or zero on alloc or free
+ *     (normal operation, also most efficient)
+ *  poison on alloc
+ *     (for helping to highlight bugs)
+ *  poison on alloc and free
+ *     (for helping to highlight bugs)
+ *  zero on alloc
+ *     (avoid highlighting security issues in other uses of memory)
+ *  zero on alloc and poison on free
+ *     (avoid highlighting security issues in other uses of memory, while
+ *      helping to highlight a subset of bugs e.g. memory freed prematurely)
+ *
+ * Since there are more than 4, we can't encode this in just two bits,
+ * so we might as well have a separate flag for each of the three
+ * actions.
+ */
+
+/*!
+    Ensures that the memory allocated is initialised with zeroes.
+ */
+#define PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC				(1U<<31)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags)				(((uiFlags) & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) != 0)
+
+/*!
+    Scribbles over the allocated memory with a poison value
+
+    Not compatible with ZERO_ON_ALLOC
+
+    Poisoning is very deliberately _not_ reflected in PDump as we want
+    a simulation to cry loudly if the initialised data propagates to a
+    result.
+ */
+#define PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC				(1U<<30)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags)			(((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC) != 0)
+
+/*!
+    Causes memory to be trashed when freed, as a lazy man's security measure.
+ */
+#define PVRSRV_MEMALLOCFLAG_POISON_ON_FREE (1U<<29)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_POISON_ON_FREE flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_POISON_ON_FREE(uiFlags)			(((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_FREE) != 0)
+
+/*
+ *
+ *  **********************************************************
+ *  *                                                        *
+ *  *                Device specific MMU flags               *
+ *  *                                                        *
+ *  **********************************************************
+ *
+ * (Bits 24 to 27)
+ *
+ * Some services controlled devices have device specific control bits in
+ * their page table entries, we need to allow these flags to be passed down
+ * the memory management layers so the user can control these bits.
+ * For example, RGX device has the file rgx_memallocflags.h
+ */
+
+/*!
+ * Offset of device specific MMU flags.
+ */
+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET		24
+
+/*!
+ * Mask for retrieving device specific MMU flags.
+ */
+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK		0x0f000000UL
+
+/*!
+  @Description    Helper macro for setting device specific MMU flags.
+  @Input    n     Flag index.
+  @Return         Flag vector with the specified bit set.
+ */
+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(n)	\
+			(((n) << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) & \
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK)
+
+
+/*!
+ * Secure buffer mask -- Flags in the mask are allowed for secure buffers
+ * because they are not related to CPU mappings.
+ */
+#define PVRSRV_MEMALLOCFLAGS_SECBUFMASK  ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+                                           PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \
+                                           PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED)
+
+
+
+/*!
+  PMR flags mask -- for internal services use only.  This is the set of flags
+  that will be passed down and stored with the PMR, this also includes the
+  MMU flags which the PMR has to pass down to mm_common.c at PMRMap time.
+*/
+#define PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK  (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \
+                                            PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \
+                                            PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                            PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                            PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \
+                                            PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
+                                            PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \
+                                            PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
+                                            PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+                                            PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \
+                                            PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \
+                                            PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \
+                                            PVRSRV_MEMALLOCFLAG_FW_LOCAL | \
+                                            PVRSRV_MEMALLOCFLAG_FW_CONFIG | \
+                                            PVRSRV_MEMALLOCFLAG_FW_GUEST | \
+                                            PVRSRV_MEMALLOCFLAG_CPU_LOCAL)
+
+/*!
+  RA differentiation mask
+
+  for use internal to services
+
+  this is the set of flags bits that are able to determine whether a pair of
+  allocations are permitted to live in the same page table. Allocations
+  whose flags differ in any of these places would be allocated from separate
+  RA Imports and therefore would never coexist in the same page.
+  Special cases are zeroing and poisoning of memory. The caller is responsible
+  to set the sub-allocations to the value he wants it to be. To differentiate
+  between zeroed and poisoned RA Imports does not make sense because the
+  memory might be reused.
+
+*/
+#define PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK (PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK \
+                                                      & \
+                                                      ~(PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC   | \
+                                                        PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC))
+
+/*!
+  Flags that affect _allocation_
+*/
+#define PVRSRV_MEMALLOCFLAGS_PERALLOCFLAGSMASK (0xFFFFFFFFU)
+
+/*!
+  Flags that affect _mapping_
+*/
+#define PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK   (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \
+                                                    PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
+                                                    PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+                                                    PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \
+                                                    PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \
+                                                    PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \
+                                                    PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING)
+
+#if ((~(PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK) & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK) != 0)
+#error PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK is not a subset of PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK
+#endif
+
+
+/*!
+  Flags that affect _physical allocations_ in the DevMemX API
+ */
+#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_PHYSICAL_MASK (PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+                                                    PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \
+                                                    PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED | \
+                                                    PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \
+                                                    PVRSRV_MEMALLOCFLAG_CPU_LOCAL | \
+                                                    PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                                    PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
+                                                    PVRSRV_MEMALLOCFLAG_POISON_ON_FREE)
+
+/*!
+  Flags that affect _virtual allocations_ in the DevMemX API
+ */
+#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_VIRTUAL_MASK  (PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
+                                                    PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED | \
+                                                    PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED)
+
+#endif /* PVRSRV_MEMALLOCFLAGS_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_pool.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_pool.c
new file mode 100644
index 0000000..0e1fff1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_pool.c
@@ -0,0 +1,274 @@
+/**************************************************************************/ /*!
+@File
+@Title          Services pool implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides a generic pool implementation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "lock.h"
+#include "dllist.h"
+#include "allocmem.h"
+
+struct _PVRSRV_POOL_
+{
+	POS_LOCK hLock;
+	/* total max number of permitted entries in the pool */
+	IMG_UINT uiMaxEntries;
+	/* currently number of pool entries created. these may be in the pool
+	 * or in-use
+	 */
+	IMG_UINT uiNumBusy;
+	/* number of not-in-use entries currently free in the pool */
+	IMG_UINT uiNumFree;
+
+	DLLIST_NODE sFreeList;
+
+	const IMG_CHAR *pszName;
+
+	PVRSRV_POOL_ALLOC_FUNC *pfnAlloc;
+	PVRSRV_POOL_FREE_FUNC *pfnFree;
+	void *pvPrivData;
+};
+
+typedef struct _PVRSRV_POOL_ENTRY_
+{
+	DLLIST_NODE sNode;
+	void *pvData;
+} PVRSRV_POOL_ENTRY;
+
+PVRSRV_ERROR PVRSRVPoolCreate(PVRSRV_POOL_ALLOC_FUNC *pfnAlloc,
+					PVRSRV_POOL_FREE_FUNC *pfnFree,
+					IMG_UINT32 ui32MaxEntries,
+					const IMG_CHAR *pszName,
+					void *pvPrivData,
+					PVRSRV_POOL **ppsPool)
+{
+	PVRSRV_POOL *psPool;
+	PVRSRV_ERROR eError;
+
+	psPool = OSAllocMem(sizeof(PVRSRV_POOL));
+
+	if (psPool == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_alloc;
+	}
+
+	eError = OSLockCreate(&psPool->hLock);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto err_lock_create;
+	}
+
+	psPool->uiMaxEntries = ui32MaxEntries;
+	psPool->uiNumBusy = 0;
+	psPool->uiNumFree = 0;
+	psPool->pfnAlloc = pfnAlloc;
+	psPool->pfnFree = pfnFree;
+	psPool->pvPrivData = pvPrivData;
+	psPool->pszName = pszName;
+
+	dllist_init(&psPool->sFreeList);
+
+	*ppsPool = psPool;
+
+	return PVRSRV_OK;
+
+err_lock_create:
+	OSFreeMem(psPool);
+err_alloc:
+	return eError;
+}
+
+static PVRSRV_ERROR _DestroyPoolEntry(PVRSRV_POOL *psPool,
+					PVRSRV_POOL_ENTRY *psEntry)
+{
+	psPool->pfnFree(psPool->pvPrivData, psEntry->pvData);
+	OSFreeMem(psEntry);
+
+	return PVRSRV_OK;
+}
+
+void PVRSRVPoolDestroy(PVRSRV_POOL *psPool)
+{
+	if (psPool->uiNumBusy != 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Attempt to destroy pool %s "
+						"with %u entries still in use",
+						__func__,
+						psPool->pszName,
+						psPool->uiNumBusy));
+		return;
+	}
+
+	OSLockDestroy(psPool->hLock);
+
+	while (psPool->uiNumFree)
+	{
+		PVRSRV_POOL_ENTRY *psEntry;
+		DLLIST_NODE *psChosenNode;
+
+		psChosenNode = dllist_get_next_node(&psPool->sFreeList);
+		dllist_remove_node(psChosenNode);
+
+		psEntry = IMG_CONTAINER_OF(psChosenNode, PVRSRV_POOL_ENTRY, sNode);
+
+		_DestroyPoolEntry(psPool, psEntry);
+
+		psPool->uiNumFree--;
+	}
+
+	OSFreeMem(psPool);
+}
+
+static PVRSRV_ERROR _CreateNewPoolEntry(PVRSRV_POOL *psPool,
+					PVRSRV_POOL_ENTRY **ppsEntry)
+{
+	PVRSRV_POOL_ENTRY *psNewEntry;
+	PVRSRV_ERROR eError;
+
+	psNewEntry = OSAllocMem(sizeof(PVRSRV_POOL_ENTRY));
+
+	if (psNewEntry == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_allocmem;
+	}
+
+	dllist_init(&psNewEntry->sNode);
+
+	eError = psPool->pfnAlloc(psPool->pvPrivData, &psNewEntry->pvData);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto err_pfn_alloc;
+	}
+
+	*ppsEntry = psNewEntry;
+
+	return PVRSRV_OK;
+
+err_pfn_alloc:
+	OSFreeMem(psNewEntry);
+err_allocmem:
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVPoolGet(PVRSRV_POOL *psPool,
+					PVRSRV_POOL_TOKEN *hToken,
+					void **ppvDataOut)
+{
+	PVRSRV_POOL_ENTRY *psEntry;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	OSLockAcquire(psPool->hLock);
+
+	/* check if we already have a free element ready */
+	if (psPool->uiNumFree)
+	{
+		DLLIST_NODE *psChosenNode;
+		psChosenNode = dllist_get_next_node(&psPool->sFreeList);
+		dllist_remove_node(psChosenNode);
+
+		psEntry = IMG_CONTAINER_OF(psChosenNode, PVRSRV_POOL_ENTRY, sNode);
+
+		psPool->uiNumFree--;
+	}
+	else
+	{
+		/* no available elements in the pool. try to create one */
+
+		eError = _CreateNewPoolEntry(psPool, &psEntry);
+
+		if (eError != PVRSRV_OK)
+		{
+			goto out_unlock;
+		}
+	}
+
+#if defined(DEBUG) || defined(SUPPORT_VALIDATION)
+	/* Don't poison the IN buffer as that is copied from client and would be
+	 * waste of cycles.
+	 */
+	OSCachedMemSet(((IMG_PBYTE)psEntry->pvData)+PVRSRV_MAX_BRIDGE_IN_SIZE,
+			PVRSRV_POISON_ON_ALLOC_VALUE, PVRSRV_MAX_BRIDGE_OUT_SIZE);
+#endif
+
+	psPool->uiNumBusy++;
+	*hToken = psEntry;
+	*ppvDataOut = psEntry->pvData;
+
+out_unlock:
+	OSLockRelease(psPool->hLock);
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVPoolPut(PVRSRV_POOL *psPool, PVRSRV_POOL_TOKEN hToken)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_POOL_ENTRY *psEntry = hToken;
+
+	PVR_ASSERT(psPool->uiNumBusy > 0);
+
+	OSLockAcquire(psPool->hLock);
+
+	/* put this entry in the pool if the pool has space,
+	 * otherwise free it
+	 */
+	if (psPool->uiNumFree < psPool->uiMaxEntries)
+	{
+		dllist_add_to_tail(&psPool->sFreeList, &psEntry->sNode);
+		psPool->uiNumFree++;
+	}
+	else
+	{
+		eError = _DestroyPoolEntry(psPool, psEntry);
+	}
+
+	psPool->uiNumBusy--;
+
+	OSLockRelease(psPool->hLock);
+
+	return eError;
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_pool.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_pool.h
new file mode 100644
index 0000000..71a204f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_pool.h
@@ -0,0 +1,135 @@
+/**************************************************************************/ /*!
+@File
+@Title          Services pool implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides a generic pool implementation.
+                The pool allows to dynamically retrieve and return entries from
+                it using functions pair PVRSRVPoolGet/PVRSRVPoolPut. The entries
+                are created in lazy manner which means not until first usage.
+                The pool API allows to pass and allocation/free functions
+                pair that will allocate entry's private data and return it
+                to the caller on every entry 'Get'.
+                The pool will keep up to ui32MaxEntries entries allocated.
+                Every entry that exceeds this number and is 'Put' back to the
+                pool will be freed on the spot instead being returned to the
+                pool.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__PVRSRVPOOL_H__)
+#define __PVRSRVPOOL_H__
+
+/**************************************************************************/ /*!
+ @Description  Callback function called during creation of the new element. This
+               function allocates an object that will be stored in the pool.
+               The object can be retrieved from the pool by calling
+               PVRSRVPoolGet.
+ @Input        pvPrivData      Private data passed to the alloc function.
+ @Output       pvOut           Allocated object.
+ @Return       PVRSRV_ERROR    PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+typedef PVRSRV_ERROR (PVRSRV_POOL_ALLOC_FUNC)(void *pvPrivData, void **pvOut);
+
+/**************************************************************************/ /*!
+ @Description  Callback function called to free the object allocated by
+               the counterpart alloc function.
+ @Input        pvPrivData      Private data passed to the free function.
+ @Output       pvFreeData      Object allocated by PVRSRV_POOL_ALLOC_FUNC.
+*/ /***************************************************************************/
+typedef void (PVRSRV_POOL_FREE_FUNC)(void *pvPrivData, void *pvFreeData);
+
+typedef IMG_HANDLE PVRSRV_POOL_TOKEN;
+
+typedef struct _PVRSRV_POOL_ PVRSRV_POOL;
+
+/**************************************************************************/ /*!
+ @Function     PVRSRVPoolCreate
+ @Description  Creates new buffer pool.
+ @Input        pfnAlloc        Allocation function pointer. Function is used
+                               to allocate new pool entries' data.
+ @Input        pfnFree         Free function pointer. Function is used to
+                               free memory allocated by pfnAlloc function.
+ @Input        ui32MaxEntries  Total maximum number of entries in the pool.
+ @Input        pszName         Name of the pool. String has to be NULL
+                               terminated.
+ @Input        pvPrivData      Private data that will be passed to pfnAlloc and
+                               pfnFree functions.
+ @Output       ppsPool         New buffer pool object.
+ @Return       PVRSRV_ERROR    PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVPoolCreate(PVRSRV_POOL_ALLOC_FUNC *pfnAlloc,
+					PVRSRV_POOL_FREE_FUNC *pfnFree,
+					IMG_UINT32 ui32MaxEntries,
+					const IMG_CHAR *pszName,
+					void *pvPrivData,
+					PVRSRV_POOL **ppsPool);
+
+/**************************************************************************/ /*!
+ @Function     PVRSRVPoolDestroy
+ @Description  Destroys pool created by PVRSRVPoolCreate.
+ @Input        psPool          Buffer pool object meant to be destroyed.
+*/ /***************************************************************************/
+void PVRSRVPoolDestroy(PVRSRV_POOL *psPool);
+
+/**************************************************************************/ /*!
+ @Function     PVRSRVPoolGet
+ @Description  Retrieves an entry form a pool. If no free elements are
+               available new entry will be allocated.
+ @Input        psPool          Pointer to the pool.
+ @Output       hToken          Pointer to the entry handle.
+ @Output       ppvDataOut      Pointer to data stored in the entry (the data
+                               allocated by the pfnAlloc function).
+ @Return       PVRSRV_ERROR    PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVPoolGet(PVRSRV_POOL *psPool,
+						PVRSRV_POOL_TOKEN *hToken,
+						void **ppvDataOut);
+
+/**************************************************************************/ /*!
+ @Function     PVRSRVPoolPut
+ @Description  Returns entry to the pool. If number of entries is greater
+               than ui32MaxEntries set during pool creation the entry will
+               be freed instead.
+ @Input        psPool          Pointer to the pool.
+ @Input        hToken          Entry handle.
+ @Return       PVRSRV_ERROR    PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVPoolPut(PVRSRV_POOL *psPool,
+						PVRSRV_POOL_TOKEN hToken);
+
+#endif /* __PVRSRVPOOL_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_sync_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_sync_km.h
new file mode 100644
index 0000000..04611f9f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_sync_km.h
@@ -0,0 +1,65 @@
+/*************************************************************************/ /*!
+@File
+@Title         PVR synchronisation interface
+@Copyright     Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description   Types for server side code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef PVRSRV_SYNC_KM_H
+#define PVRSRV_SYNC_KM_H
+
+#include <powervr/pvrsrv_sync_ext.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define SYNC_FB_FILE_STRING_MAX			256
+#define SYNC_FB_MODULE_STRING_LEN_MAX	(32)
+#define	SYNC_FB_DESC_STRING_LEN_MAX		(32)
+
+/* By default, fence-sync module emits into HWPerf (of course, if enabled) and
+ * considers a process (sleepable) context */
+#define PVRSRV_FENCE_FLAG_NONE             (0U)
+#define PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT (1U << 0)
+#define PVRSRV_FENCE_FLAG_CTX_ATOMIC       (1U << 1)
+
+#if defined(__cplusplus)
+}
+#endif
+#endif	/* PVRSRV_SYNC_KM_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_sync_server.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_sync_server.h
new file mode 100644
index 0000000..175490c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_sync_server.h
@@ -0,0 +1,279 @@
+/**************************************************************************/ /*!
+@File
+@Title          Fence sync server interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _PVRSRV_SYNC_SERVER_H_
+#define _PVRSRV_SYNC_SERVER_H_
+
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#include "sync_fallback_server.h"
+#include "pvr_notifier.h"
+#include "img_types.h"
+#include "pvrsrv_sync_km.h"
+#elif defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif
+
+#include "rgxhwperf.h"
+
+#define SYNC_SW_TIMELINE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH
+#define SYNC_SW_FENCE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH
+
+typedef struct _SYNC_TIMELINE_OBJ_
+{
+	void *pvTlObj; /* Implementation specific timeline object */
+
+	PVRSRV_TIMELINE hTimeline; /* Reference to implementation-independent timeline object */
+} SYNC_TIMELINE_OBJ;
+
+typedef struct _SYNC_FENCE_OBJ_
+{
+	void *pvFenceObj; /* Implementation specific fence object */
+
+	PVRSRV_FENCE hFence; /* Reference to implementation-independent fence object */
+} SYNC_FENCE_OBJ;
+
+static inline void SyncClearTimelineObj(SYNC_TIMELINE_OBJ *psSTO)
+{
+	psSTO->pvTlObj = NULL;
+	psSTO->hTimeline = PVRSRV_NO_TIMELINE;
+}
+
+static inline IMG_BOOL SyncIsTimelineObjValid(const SYNC_TIMELINE_OBJ *psSTO)
+{
+	return psSTO->pvTlObj != NULL;
+}
+
+static inline void SyncClearFenceObj(SYNC_FENCE_OBJ *psSFO)
+{
+	psSFO->pvFenceObj = NULL;
+	psSFO->hFence = PVRSRV_NO_FENCE;
+}
+
+static inline IMG_BOOL SyncIsFenceObjValid(const SYNC_FENCE_OBJ *psSFO)
+{
+	return psSFO->pvFenceObj != NULL;
+}
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+
+/* Mapping of each required function to its appropriate sync-implementation function */
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	#define SyncFenceWaitKM_                SyncFbFenceWait
+	#define SyncGetFenceObj_                SyncFbGetFenceObj
+	#define SyncFenceReleaseKM_             SyncFbFenceReleaseKM
+	#define SyncSWTimelineFenceCreateKM_    SyncFbSWTimelineFenceCreateKM
+	#define SyncSWTimelineAdvanceKM_        SyncFbSWTimelineAdvanceKM
+	#define SyncSWGetTimelineObj_           SyncFbSWGetTimelineObj
+	#define SyncSWTimelineReleaseKM_        SyncFbTimelineRelease
+	#define SyncDumpFence_                  SyncFbDumpFenceKM
+	#define SyncSWDumpTimeline_             SyncFbSWDumpTimelineKM
+#elif defined(SUPPORT_NATIVE_FENCE_SYNC)
+	#define SyncFenceWaitKM_                pvr_sync_fence_wait
+	#define SyncGetFenceObj_                pvr_sync_fence_get
+	#define SyncFenceReleaseKM_             pvr_sync_fence_release
+	#define SyncSWTimelineFenceCreateKM_    pvr_sync_sw_timeline_fence_create
+	#define SyncSWTimelineAdvanceKM_        pvr_sync_sw_timeline_advance
+	#define SyncSWGetTimelineObj_           pvr_sync_sw_timeline_get
+	#define SyncSWTimelineReleaseKM_        pvr_sync_sw_timeline_release
+	#define SyncDumpFence_                  sync_dump_fence
+	#define SyncSWDumpTimeline_             sync_sw_dump_timeline
+#endif
+
+/*************************************************************************/ /*!
+@Function       SyncFenceWaitKM
+
+@Description    Wait for all the sync points in the fence to be signalled.
+
+@Input          psFenceObj          Fence to wait on
+
+@Input          ui32TimeoutInMs     Maximum time to wait (in milliseconds)
+
+@Return         PVRSRV_OK               once the fence has been passed (all
+                                        containing check points have either
+                                        signalled or errored)
+                PVRSRV_ERROR_TIMEOUT    if the poll has exceeded the timeout
+                PVRSRV_ERROR_FAILED_DEPENDENCIES Other sync-impl specific error
+*/ /**************************************************************************/
+static inline PVRSRV_ERROR
+SyncFenceWaitKM(PVRSRV_DEVICE_NODE *psDevNode,
+                const SYNC_FENCE_OBJ *psFenceObj,
+                IMG_UINT32 ui32TimeoutInMs)
+{
+	PVRSRV_ERROR eError;
+
+	RGXSRV_HWPERF_SYNC_FENCE_WAIT(psDevNode->pvDevice,
+								  BEGIN,
+								  OSGetCurrentProcessID(),
+								  psFenceObj->hFence,
+								  ui32TimeoutInMs);
+
+	eError = SyncFenceWaitKM_(psFenceObj->pvFenceObj, ui32TimeoutInMs);
+
+	RGXSRV_HWPERF_SYNC_FENCE_WAIT(psDevNode->pvDevice,
+								  END,
+								  OSGetCurrentProcessID(),
+								  psFenceObj->hFence,
+								  ((eError == PVRSRV_OK) ?
+									  RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_PASSED :
+									  ((eError == PVRSRV_ERROR_TIMEOUT) ?
+										  RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_TIMEOUT :
+										  RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_ERROR)));
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       SyncGetFenceObj
+
+@Description    Get the implementation specific server fence object from
+                opaque implementation independent PVRSRV_FENCE type.
+                When successful, this function gets a reference on the base
+                fence, which needs to be dropped using SyncFenceReleaseKM,
+                when fence object is no longer in use.
+
+@Input          iFence        Input opaque fence object
+
+@Output         psFenceObj    Pointer to implementation specific fence object
+
+@Return         PVRSRV_ERROR  PVRSRV_OK, on success
+*/ /**************************************************************************/
+static inline PVRSRV_ERROR
+SyncGetFenceObj(PVRSRV_FENCE iFence,
+                SYNC_FENCE_OBJ *psFenceObj)
+{
+	psFenceObj->hFence = iFence;
+	return SyncGetFenceObj_(iFence, &psFenceObj->pvFenceObj);
+}
+
+/*************************************************************************/ /*!
+@Function       SyncFenceReleaseKM
+
+@Description    Release reference on this fence.
+
+@Input          psFenceObj     Fence to be released
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static inline
+PVRSRV_ERROR SyncFenceReleaseKM(const SYNC_FENCE_OBJ *psFenceObj)
+{
+	return SyncFenceReleaseKM_(psFenceObj->pvFenceObj);
+}
+
+/*****************************************************************************/
+/*                                                                           */
+/*                      SW TIMELINE SPECIFIC FUNCTIONS                       */
+/*                                                                           */
+/*****************************************************************************/
+
+static inline PVRSRV_ERROR
+SyncSWTimelineFenceCreateKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            PVRSRV_TIMELINE hSWTimeline,
+                            const IMG_CHAR *pszFenceName,
+                            PVRSRV_FENCE *phOutFence)
+{
+	IMG_UINT64 ui64SyncPtIdx;
+	PVRSRV_ERROR eError;
+	eError = SyncSWTimelineFenceCreateKM_(hSWTimeline,
+	                                      pszFenceName,
+	                                      phOutFence,
+	                                      &ui64SyncPtIdx);
+	if (eError == PVRSRV_OK)
+	{
+		RGXSRV_HWPERF_ALLOC_SW_FENCE(psDevNode, OSGetCurrentProcessID(),
+		                             *phOutFence, hSWTimeline, ui64SyncPtIdx,
+		                             pszFenceName, OSStringLength(pszFenceName));
+	}
+	return eError;
+}
+
+static inline PVRSRV_ERROR
+SyncSWTimelineAdvanceKM(PVRSRV_DEVICE_NODE *psDevNode,
+                        const SYNC_TIMELINE_OBJ *psSWTimelineObj)
+{
+	IMG_UINT64 ui64SyncPtIdx;
+	PVRSRV_ERROR eError;
+	eError = SyncSWTimelineAdvanceKM_(psSWTimelineObj->pvTlObj,
+	                                  &ui64SyncPtIdx);
+
+	if (eError == PVRSRV_OK)
+	{
+		RGXSRV_HWPERF_SYNC_SW_TL_ADV(psDevNode->pvDevice,
+		                             OSGetCurrentProcessID(),
+		                             psSWTimelineObj->hTimeline,
+		                             ui64SyncPtIdx);
+	}
+	return eError;
+}
+
+static inline PVRSRV_ERROR
+SyncSWGetTimelineObj(PVRSRV_TIMELINE hSWTimeline,
+                     SYNC_TIMELINE_OBJ *psSWTimelineObj)
+{
+	psSWTimelineObj->hTimeline = hSWTimeline;
+	return SyncSWGetTimelineObj_(hSWTimeline, &psSWTimelineObj->pvTlObj);
+}
+
+static inline PVRSRV_ERROR
+SyncSWTimelineReleaseKM(const SYNC_TIMELINE_OBJ *psSWTimelineObj)
+{
+	return SyncSWTimelineReleaseKM_(psSWTimelineObj->pvTlObj);
+}
+
+static inline PVRSRV_ERROR
+SyncDumpFence(const SYNC_FENCE_OBJ *psFenceObj,
+              DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+              void *pvDumpDebugFile)
+{
+	return SyncDumpFence_(psFenceObj->pvFenceObj, pfnDumpDebugPrintf, pvDumpDebugFile);
+}
+
+static inline PVRSRV_ERROR
+SyncSWDumpTimeline(const SYNC_TIMELINE_OBJ *psSWTimelineObj,
+                   DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                   void *pvDumpDebugFile)
+{
+	return SyncSWDumpTimeline_(psSWTimelineObj->pvTlObj, pfnDumpDebugPrintf, pvDumpDebugFile);
+}
+
+#endif /* PVR_USE_FENCE_SYNC_MODEL */
+
+#endif /* _PVRSRV_SYNC_SERVER_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_tlcommon.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_tlcommon.h
new file mode 100644
index 0000000..487924e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_tlcommon.h
@@ -0,0 +1,257 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services Transport Layer common types and definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport layer common types and definitions included into
+                both user mode and kernel mode source.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef PVR_TLCOMMON_H
+#define PVR_TLCOMMON_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+
+
+/*! Handle type for stream descriptor objects as created by this API */
+typedef IMG_HANDLE PVRSRVTL_SD;
+
+/*! Maximum stream name length including the null byte */
+#define PRVSRVTL_MAX_STREAM_NAME_SIZE	40U
+
+/*! Maximum number of streams expected to exist */
+#define PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER (32*PRVSRVTL_MAX_STREAM_NAME_SIZE)
+
+/*! Packet lengths are always rounded up to a multiple of 8 bytes */
+#define PVRSRVTL_PACKET_ALIGNMENT		8U
+#define PVRSRVTL_ALIGN(x)				((x+PVRSRVTL_PACKET_ALIGNMENT-1) & ~(PVRSRVTL_PACKET_ALIGNMENT-1))
+
+
+/*! A packet is made up of a header structure followed by the data bytes.
+ * There are 3 types of packet: normal (has data), data lost and padding,
+ * see packet flags. Header kept small to reduce data overhead.
+ *
+ * if the ORDER of the structure members is changed, please UPDATE the
+ * PVRSRVTL_PACKET_FLAG_OFFSET macro.
+ *
+ * Layout of uiTypeSize member is :
+ *
+ * |<---------------------------32-bits------------------------------>|
+ * |<----8---->|<-----1----->|<----7--->|<------------16------------->|
+ * |    Type   | Drop-Oldest |  UNUSED  |             Size            |
+ *
+ */
+typedef struct
+{
+	IMG_UINT32 uiTypeSize;	/*!< Type, Drop-Oldest flag & number of bytes following header */
+	IMG_UINT32 uiReserved;	/*!< Reserve, packets and data must be 8 byte aligned */
+
+	/* First bytes of TL packet data follow header ... */
+} PVRSRVTL_PACKETHDR, *PVRSRVTL_PPACKETHDR;
+
+/* Structure must always be a size multiple of 8 as stream buffer
+ * still an array of IMG_UINT32s.
+ */
+static_assert((sizeof(PVRSRVTL_PACKETHDR) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+			  "sizeof(PVRSRVTL_PACKETHDR) must be a multiple of 8");
+
+/*! Packet header reserved word fingerprint "TLP1" */
+#define PVRSRVTL_PACKETHDR_RESERVED 0x31504C54U
+
+/*! Packet header mask used to extract the size from the uiTypeSize member.
+ * Do not use directly, see GET macros.
+ */
+#define PVRSRVTL_PACKETHDR_SIZE_MASK    0x0000FFFFU
+#define PVRSRVTL_MAX_PACKET_SIZE        (PVRSRVTL_PACKETHDR_SIZE_MASK & ~0xFU)
+
+
+/*! Packet header mask used to extract the type from the uiTypeSize member.
+ * Do not use directly, see GET macros.
+ */
+#define PVRSRVTL_PACKETHDR_TYPE_MASK    0xFF000000U
+#define PVRSRVTL_PACKETHDR_TYPE_OFFSET  24U
+
+/*! Packet header mask used to check if packets before this one were dropped
+ * or not. Do not use directly, see GET macros.
+ */
+#define PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK    0x00800000U
+#define PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET    23U
+
+/*! Packet type enumeration.
+ */
+typedef enum
+{
+	/*! Undefined packet */
+	PVRSRVTL_PACKETTYPE_UNDEF = 0,
+
+	/*! Normal packet type. Indicates data follows the header.
+	 */
+	PVRSRVTL_PACKETTYPE_DATA = 1,
+
+	/*! When seen this packet type indicates that at this moment in the stream
+	 * packet(s) were not able to be accepted due to space constraints and
+	 * that recent data may be lost - depends on how the producer handles the
+	 * error. Such packets have no data, data length is 0.
+	 */
+	PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED = 2,
+
+	/*! Packets with this type set are padding packets that contain undefined
+	 * data and must be ignored/skipped by the client. They are used when the
+	 * circular stream buffer wraps around and there is not enough space for
+	 * the data at the end of the buffer. Such packets have a length of 0 or
+	 * more.
+	 */
+	PVRSRVTL_PACKETTYPE_PADDING = 3,
+
+	/*! This packet type conveys to the stream consumer that the stream
+	 * producer has reached the end of data for that data sequence. The
+	 * TLDaemon has several options for processing these packets that can
+	 * be selected on a per stream basis.
+	 */
+	PVRSRVTL_PACKETTYPE_MARKER_EOS = 4,
+
+	/*! This is same as PVRSRVTL_PACKETTYPE_MARKER_EOS but additionally removes
+	 * old data record output file before opening new/next one
+	 */
+	PVRSRVTL_PACKETTYPE_MARKER_EOS_REMOVEOLD = 5,
+
+	/*! Packet emitted on first stream opened by writer. Packet carries a name
+	 * of the opened stream in a form of null-terminated string.
+	 */
+	PVRSRVTL_PACKETTYPE_STREAM_OPEN_FOR_WRITE = 6,
+
+	/*! Packet emitted on last stream closed by writer. Packet carries a name
+	 * of the closed stream in a form of null-terminated string.
+	 */
+	PVRSRVTL_PACKETTYPE_STREAM_CLOSE_FOR_WRITE = 7,
+
+	PVRSRVTL_PACKETTYPE_LAST
+} PVRSRVTL_PACKETTYPE;
+
+/* The SET_PACKET_* macros rely on the order the PVRSRVTL_PACKETHDR members are declared:
+ * uiFlags is the upper half of a structure consisting of 2 uint16 quantities.
+ */
+#define PVRSRVTL_SET_PACKET_DATA(len)       (len) | (PVRSRVTL_PACKETTYPE_DATA                     << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+#define PVRSRVTL_SET_PACKET_PADDING(len)    (len) | (PVRSRVTL_PACKETTYPE_PADDING                  << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+#define PVRSRVTL_SET_PACKET_WRITE_FAILED    (0)   | (PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+#define PVRSRVTL_SET_PACKET_HDR(len, type)  (len) | ((type)                                       << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+
+/*! Returns the number of bytes of data in the packet.
+ * p may be any address type.
+ */
+#define GET_PACKET_DATA_LEN(p)	\
+	((IMG_UINT32) ((PVRSRVTL_PPACKETHDR)(p))->uiTypeSize & PVRSRVTL_PACKETHDR_SIZE_MASK)
+
+
+/*! Returns a IMG_BYTE* pointer to the first byte of data in the packet */
+#define GET_PACKET_DATA_PTR(p)	\
+	((IMG_PBYTE) (((size_t)p) + sizeof(PVRSRVTL_PACKETHDR)))
+
+/*! Given a PVRSRVTL_PPACKETHDR address, return the address of the next pack
+ *  It is up to the caller to determine if the new address is within the
+ *  packet buffer.
+ */
+#define GET_NEXT_PACKET_ADDR(p) \
+	((PVRSRVTL_PPACKETHDR) (((IMG_UINT8 *)p) + sizeof(PVRSRVTL_PACKETHDR) + \
+	(((((PVRSRVTL_PPACKETHDR)p)->uiTypeSize & PVRSRVTL_PACKETHDR_SIZE_MASK) + \
+	(PVRSRVTL_PACKET_ALIGNMENT-1)) & (~(PVRSRVTL_PACKET_ALIGNMENT-1)))))
+
+/*! Turns the packet address p into a PVRSRVTL_PPACKETHDR pointer type.
+ */
+#define GET_PACKET_HDR(p)		((PVRSRVTL_PPACKETHDR)(p))
+
+/*! Get the type of the packet. p is of type PVRSRVTL_PPACKETHDR.
+ */
+#define GET_PACKET_TYPE(p)		(((p)->uiTypeSize & PVRSRVTL_PACKETHDR_TYPE_MASK)>>PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+
+/*! Set PACKETS_DROPPED flag in packet header as a part of uiTypeSize.
+ * p is of type PVRSRVTL_PPACKETHDR.
+ */
+#define SET_PACKETS_DROPPED(p)		(((p)->uiTypeSize) | (1<<PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET))
+
+/*! Check if packets were dropped before this packet.
+ * p is of type PVRSRVTL_PPACKETHDR.
+ */
+#define CHECK_PACKETS_DROPPED(p)	(((p)->uiTypeSize & PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK)>>PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET)
+
+/*! Flags for use with PVRSRVTLOpenStream
+ * 0x01 - Do not block in PVRSRVTLAcquireData() when no bytes are available
+ * 0x02 - When the stream does not exist wait for a bit (2s) in
+ *        PVRSRVTLOpenStream() and then exit with a timeout error if it still
+ *        does not exist.
+ * 0x04 - Open stream for write only operations.
+ *        If flag is not used stream is opened as read-only. This flag is
+ *        required if one wants to call reserve/commit/write function on the
+ *        stream descriptor. Read from on the stream descriptor opened
+ *        with this flag will fail.
+ * 0x08 - Disable Producer Callback.
+ *        If this flag is set and the stream becomes empty, do not call any
+ *        associated producer callback to generate more data from the reader
+ *        context.
+ * 0x10 - Reset stream on open.
+ *        When this flag is used the stream will drop all of the stored data.
+ * 0x20 - Limit read position to the write position at time the stream
+ *        was opened. Hence this flag will freeze the content read to that
+ *        produced before the stream was opened for reading.
+ * 0x40 - Ignore Open Callback.
+ *        When this flag is set ignore any OnReaderOpenCallback setting for
+ *        the stream. This allows access to the stream to be made without
+ *        generating any extra packets into the stream.
+ */
+
+#define PVRSRV_STREAM_FLAG_NONE                        (0U)
+#define PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING         (1U<<0)
+#define PVRSRV_STREAM_FLAG_OPEN_WAIT                   (1U<<1)
+#define PVRSRV_STREAM_FLAG_OPEN_WO                     (1U<<2)
+#define PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK   (1U<<3)
+#define PVRSRV_STREAM_FLAG_RESET_ON_OPEN               (1U<<4)
+#define PVRSRV_STREAM_FLAG_READ_LIMIT                  (1U<<5)
+#define PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK        (1U<<6)
+
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* PVR_TLCOMMON_H */
+/******************************************************************************
+ End of file (pvrsrv_tlcommon.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_tlstreams.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_tlstreams.h
new file mode 100644
index 0000000..a4ead13
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrv_tlstreams.h
@@ -0,0 +1,62 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services Transport Layer stream names
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport layer common types and definitions included into
+                both user mode and kernel mode source.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVRSRV_TLSTREAMS_H_
+#define _PVRSRV_TLSTREAMS_H_
+
+#define PVRSRV_TL_CTLR_STREAM "tlctrl"
+
+#define PVRSRV_TL_HWPERF_RGX_FW_STREAM      "hwperf_fw_"
+#define PVRSRV_TL_HWPERF_HOST_SERVER_STREAM "hwperf_host_"
+
+/* Host HWPerf client stream names are of the form 'hwperf_client_<pid>' */
+#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM         "hwperf_client_"
+#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC "hwperf_client_%u_%u"
+
+#endif /* _PVRSRV_TLSTREAMS_H_ */
+
+/******************************************************************************
+ End of file (pvrsrv_tlstreams.h)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrvkm.mk b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrvkm.mk
new file mode 100644
index 0000000..3215069
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrsrvkm.mk
@@ -0,0 +1,139 @@
+pvrsrvkm-y += \
+ client_cache_direct_bridge.o \
+ server_cache_bridge.o \
+ server_cmm_bridge.o \
+ client_devicememhistory_direct_bridge.o \
+ server_devicememhistory_bridge.o \
+ server_dmabuf_bridge.o \
+ client_htbuffer_direct_bridge.o \
+ server_htbuffer_bridge.o \
+ client_mm_direct_bridge.o \
+ server_mm_bridge.o \
+ client_pvrtl_direct_bridge.o \
+ server_pvrtl_bridge.o \
+ server_rgxbreakpoint_bridge.o \
+ server_rgxcmp_bridge.o \
+ server_rgxfwdbg_bridge.o \
+ server_rgxhwperf_bridge.o \
+ server_rgxkicksync_bridge.o \
+ server_rgxregconfig_bridge.o \
+ server_rgxsignals_bridge.o \
+ server_rgxta3d_bridge.o \
+ server_rgxtq2_bridge.o \
+ server_rgxtq_bridge.o \
+ server_srvcore_bridge.o \
+ client_sync_direct_bridge.o \
+ server_sync_bridge.o \
+ client_synctracking_direct_bridge.o \
+ server_synctracking_bridge.o \
+ pvr_buffer_sync.o \
+ pvr_counting_timeline.o \
+ pvr_drm.o \
+ pvr_fence.o \
+ pvr_platform_drv.o \
+ pvr_sw_fence.o \
+ pvr_sync_file.o \
+ cache_km.o \
+ connection_server.o \
+ devicemem_heapcfg.o \
+ devicemem_history_server.o \
+ devicemem_server.o \
+ handle.o \
+ htbserver.o \
+ info_page_km.o \
+ lists.o \
+ mmu_common.o \
+ physheap.o \
+ physmem.o \
+ physmem_hostmem.o \
+ physmem_lma.o \
+ physmem_tdsecbuf.o \
+ pmr.o \
+ power.o \
+ process_stats.o \
+ pvr_notifier.o \
+ pvrsrv.o \
+ pvrsrv_bridge_init.o \
+ pvrsrv_pool.o \
+ srvcore.o \
+ sync_checkpoint.o \
+ sync_server.o \
+ tlintern.o \
+ tlserver.o \
+ tlstream.o \
+ rgxfwload.o \
+ rgxbreakpoint.o \
+ rgxbvnc.o \
+ rgxccb.o \
+ rgxcompute.o \
+ rgxdebug.o \
+ rgxfwdbg.o \
+ rgxfwimageutils.o \
+ rgxfwutils.o \
+ rgxhwperf.o \
+ rgxinit.o \
+ rgxkicksync.o \
+ rgxlayer_impl.o \
+ rgxmem.o \
+ rgxmipsmmuinit.o \
+ rgxmmuinit.o \
+ rgxpower.o \
+ rgxregconfig.o \
+ rgxsignals.o \
+ rgxsrvinit.o \
+ rgxstartstop.o \
+ rgxsyncutils.o \
+ rgxta3d.o \
+ rgxtdmtransfer.o \
+ rgxtimecorr.o \
+ rgxtransfer.o \
+ rgxutils.o \
+ allocmem.o \
+ event.o \
+ handle_idr.o \
+ htb_debug.o \
+ km_apphint.o \
+ module_common.o \
+ osconnection_server.o \
+ osfunc.o \
+ osmmap_stub.o \
+ physmem_dmabuf.o \
+ physmem_osmem_linux.o \
+ physmem_test.o \
+ pmr_os.o \
+ pvr_bridge_k.o \
+ pvr_debug.o \
+ pvr_debugfs.o \
+ pvr_gputrace.o \
+ devicemem.o \
+ devicemem_utils.o \
+ hash.o \
+ htbuffer.o \
+ mem_utils.o \
+ pvrsrv_error.o \
+ ra.o \
+ sync.o \
+ tlclient.o \
+ uniq_key_splay_tree.o \
+ rgx_hwperf_table.o \
+ system/dma_support.o \
+ system/vmm_pvz_client.o \
+ system/vmm_pvz_server.o \
+ system/vmm_type_stub.o \
+ system/vz_physheap_common.o \
+ system/vz_physheap_generic.o \
+ system/vz_support.o \
+ system/vz_vmm_pvz.o \
+ system/vz_vmm_vm.o \
+ interrupt_support.o \
+ sysconfig.o \
+ mtk_mfgsys.o
+pvrsrvkm-$(CONFIG_DRM_POWERVR_ROGUE_DEBUG) += \
+ client_ri_direct_bridge.o \
+ server_ri_bridge.o \
+ ri_server.o
+pvrsrvkm-$(CONFIG_ARM)   += osfunc_arm.o
+pvrsrvkm-$(CONFIG_ARM64) += osfunc_arm64.o
+pvrsrvkm-$(CONFIG_EVENT_TRACING) += trace_events.o
+pvrsrvkm-$(CONFIG_MIPS)  += osfunc_mips.o
+pvrsrvkm-$(CONFIG_X86)   += osfunc_x86.o
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrversion.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrversion.h
new file mode 100644
index 0000000..a57b3c6
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/pvrversion.h
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@File           pvrversion.h
+@Title          PowerVR version numbers and strings.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Version numbers and strings for PowerVR components.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVRVERSION_H
+#define PVRVERSION_H
+
+#define PVRVERSION_MAJ               1U
+#define PVRVERSION_MIN               11U
+
+#define PVRVERSION_FAMILY           "rogueddk"
+#define PVRVERSION_BRANCHNAME       "1.11"
+#define PVRVERSION_BUILD             5516664
+#define PVRVERSION_BSCONTROL        "Rogue_DDK_Linux"
+
+#define PVRVERSION_STRING           "Rogue_DDK_Linux rogueddk 1.11@5516664"
+#define PVRVERSION_STRING_SHORT     "1.11@5516664"
+
+#define COPYRIGHT_TXT               "Copyright (c) Imagination Technologies Ltd. All Rights Reserved."
+
+#define PVRVERSION_BUILD_HI          551
+#define PVRVERSION_BUILD_LO          6664
+#define PVRVERSION_STRING_NUMERIC   "1.11.551.6664"
+
+#define PVRVERSION_PACK(MAJ,MIN) (((IMG_UINT32)((IMG_UINT32)(MAJ) & 0xFFFFU) << 16U) | (((MIN) & 0xFFFFU) << 0U))
+#define PVRVERSION_UNPACK_MAJ(VERSION) (((VERSION) >> 16U) & 0xFFFFU)
+#define PVRVERSION_UNPACK_MIN(VERSION) (((VERSION) >> 0U) & 0xFFFFU)
+
+#endif /* PVRVERSION_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/ra.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/ra.c
new file mode 100644
index 0000000..b69a64a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/ra.c
@@ -0,0 +1,1389 @@
+/*************************************************************************/ /*!
+@File
+@Title          Resource Allocator
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+@Description
+ Implements generic resource allocation. The resource
+ allocator was originally intended to manage address spaces.  In
+ practice the resource allocator is generic and can manage arbitrary
+ sets of integers.
+
+ Resources are allocated from arenas. Arena's can be created with an
+ initial span of resources. Further resources spans can be added to
+ arenas. A call back mechanism allows an arena to request further
+ resource spans on demand.
+
+ Each arena maintains an ordered list of resource segments each
+ described by a boundary tag. Each boundary tag describes a segment
+ of resources which are either 'free', available for allocation, or
+ 'busy' currently allocated. Adjacent 'free' segments are always
+ coallesced to avoid fragmentation.
+
+ For allocation, all 'free' segments are kept on lists of 'free'
+ segments in a table index by pvr_log2(segment size). ie Each table index
+ n holds 'free' segments in the size range 2^n -> 2^(n+1) - 1.
+
+ Allocation policy is based on an *almost* good fit strategy.
+
+ Allocated segments are inserted into a self scaling hash table which
+ maps the base resource of the span to the relevant boundary
+ tag. This allows the code to get back to the bounary tag without
+ exporting explicit boundary tag references through the API.
+
+ Each arena has an associated quantum size, all allocations from the
+ arena are made in multiples of the basic quantum.
+
+ On resource exhaustion in an arena, a callback if provided will be
+ used to request further resources. Resouces spans allocated by the
+ callback mechanism will be returned when freed (through one of the
+ two callbacks).
+*/ /**************************************************************************/
+
+/* Issues:
+ * - flags, flags are passed into the resource allocator but are not currently used.
+ * - determination, of import size, is currently braindead.
+ * - debug code should be moved out to own module and #ifdef'd
+ */
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "uniq_key_splay_tree.h"
+
+#include "hash.h"
+#include "ra.h"
+#include "pvrsrv_memallocflags.h"
+
+#include "osfunc.h"
+#include "allocmem.h"
+#include "lock.h"
+#include "pvr_intrinsics.h"
+
+/* The initial, and minimum size of the live address -> boundary tag
+   structure hash table. The value 64 is a fairly arbitrary
+   choice. The hash table resizes on demand so the value chosen is
+   not critical. */
+#define MINIMUM_HASH_SIZE (64)
+
+
+/* #define RA_VALIDATE */
+
+#if defined(__KLOCWORK__)
+  /* make sure Klocworks analyse all the code (including the debug one) */
+  #if !defined(RA_VALIDATE)
+    #define RA_VALIDATE
+  #endif
+#endif
+
+#if (!defined(PVRSRV_NEED_PVR_ASSERT)) || (!defined(RA_VALIDATE))
+  /* Disable the asserts unless explicitly told otherwise.  They slow the driver
+     too much for other people */
+
+  #undef PVR_ASSERT
+  /* let's use a macro that really do not do anything when compiling in release
+     mode! */
+  #define PVR_ASSERT(x)
+#endif
+
+/* boundary tags, used to describe a resource segment */
+struct _BT_
+{
+	enum bt_type
+	{
+		btt_free,				/* free resource segment */
+		btt_live				/* allocated resource segment */
+	} type;
+
+	unsigned int is_leftmost;
+	unsigned int is_rightmost;
+	unsigned int free_import;
+
+	/* The base resource and extent of this segment */
+	RA_BASE_T base;
+	RA_LENGTH_T uSize;
+
+	/* doubly linked ordered list of all segments within the arena */
+	struct _BT_ *pNextSegment;
+	struct _BT_ *pPrevSegment;
+
+	/* doubly linked un-ordered list of free segments with the same flags. */
+	struct _BT_ * next_free;
+	struct _BT_ * prev_free;
+
+	/* a user reference associated with this span, user references are
+	 * currently only provided in the callback mechanism */
+	IMG_HANDLE hPriv;
+
+	/* Flags to match on this span */
+	IMG_UINT32 uFlags;
+
+};
+typedef struct _BT_ BT;
+
+
+/* resource allocation arena */
+struct _RA_ARENA_
+{
+	/* arena name for diagnostics output */
+	IMG_CHAR *name;
+
+	/* allocations within this arena are quantum sized */
+	RA_LENGTH_T uQuantum;
+
+	/* import interface, if provided */
+	PVRSRV_ERROR (*pImportAlloc)(RA_PERARENA_HANDLE h,
+								 RA_LENGTH_T uSize,
+								 IMG_UINT32 uFlags,
+								 const IMG_CHAR *pszAnnotation,
+								 RA_BASE_T *pBase,
+								 RA_LENGTH_T *pActualSize,
+								 RA_PERISPAN_HANDLE *phPriv);
+	void (*pImportFree) (RA_PERARENA_HANDLE,
+						 RA_BASE_T,
+						 RA_PERISPAN_HANDLE hPriv);
+
+	/* arbitrary handle provided by arena owner to be passed into the
+	 * import alloc and free hooks */
+	void *pImportHandle;
+
+	IMG_PSPLAY_TREE per_flags_buckets;
+
+	/* resource segment list */
+	BT *pHeadSegment;
+
+	/* segment address to boundary tag hash table */
+	HASH_TABLE *pSegmentHash;
+
+	/* Lock for this arena */
+	POS_LOCK hLock;
+
+	/* LockClass of this arena. This is used within lockdep to decide if a
+	 * recursive call sequence with the same lock class is allowed or not. */
+	IMG_UINT32 ui32LockClass;
+
+	/* If TRUE, imports will not be split up. Allocations will always get their
+	 * own import
+	 */
+	IMG_BOOL bNoSplit;
+};
+
+/*************************************************************************/ /*!
+@Function       _RequestAllocFail
+@Description    Default callback allocator used if no callback is
+                specified, always fails to allocate further resources to the
+                arena.
+@Input          _h - callback handle
+@Input          _uSize - requested allocation size
+@Output         _pActualSize - actual allocation size
+@Input          _pRef - user reference
+@Input          _uflags - allocation flags
+@Input          _pBase - receives allocated base
+@Return         PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL, this function always fails to allocate.
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_RequestAllocFail (RA_PERARENA_HANDLE _h,
+				   RA_LENGTH_T _uSize,
+				   IMG_UINT32 _uFlags,
+				   const IMG_CHAR *_pszAnnotation,
+				   RA_BASE_T *_pBase,
+				   RA_LENGTH_T *_pActualSize,
+				   RA_PERISPAN_HANDLE *_phPriv)
+{
+	PVR_UNREFERENCED_PARAMETER (_h);
+	PVR_UNREFERENCED_PARAMETER (_uSize);
+	PVR_UNREFERENCED_PARAMETER (_pActualSize);
+	PVR_UNREFERENCED_PARAMETER (_phPriv);
+	PVR_UNREFERENCED_PARAMETER (_uFlags);
+	PVR_UNREFERENCED_PARAMETER (_pBase);
+	PVR_UNREFERENCED_PARAMETER (_pszAnnotation);
+
+	return PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL;
+}
+
+
+#if defined (PVR_CTZLL)
+	/* make sure to trigger an error if someone change the buckets or the bHasEltsMapping size
+	   the bHasEltsMapping is used to quickly determine the smallest bucket containing elements.
+	   therefore it must have at least as many bits has the buckets array have buckets. The RA
+	   implementation actually uses one more bit. */
+	static_assert(ARRAY_SIZE(((IMG_PSPLAY_TREE)0)->buckets)
+				  < 8 * sizeof(((IMG_PSPLAY_TREE) 0)->bHasEltsMapping),
+				  "Too many buckets for bHasEltsMapping bitmap");
+#endif
+
+
+/*************************************************************************/ /*!
+@Function       pvr_log2
+@Description    Computes the floor of the log base 2 of a unsigned integer
+@Input          n       Unsigned integer
+@Return         Floor(Log2(n))
+*/ /**************************************************************************/
+#if defined(PVR_CLZLL)
+/* make sure to trigger a problem if someone changes the RA_LENGTH_T type
+   indeed the __builtin_clzll is for unsigned long long variables.
+
+   if someone changes RA_LENGTH to unsigned long, then use __builtin_clzl
+   if it changes to unsigned int, use __builtin_clz
+
+   if it changes for something bigger than unsigned long long,
+   then revert the pvr_log2 to the classic implementation */
+static_assert(sizeof(RA_LENGTH_T) == sizeof(unsigned long long),
+			  "RA log routines not tuned for sizeof(RA_LENGTH_T)");
+
+static inline IMG_UINT32 pvr_log2(RA_LENGTH_T n)
+{
+	PVR_ASSERT( n != 0 ); /* Log2 is not defined on 0 */
+
+	return (8 * sizeof(RA_LENGTH_T)) - 1 - PVR_CLZLL(n);
+}
+#else
+static IMG_UINT32
+pvr_log2 (RA_LENGTH_T n)
+{
+	IMG_UINT32 l = 0;
+
+	PVR_ASSERT( n != 0 ); /* Log2 is not defined on 0 */
+
+	n>>=1;
+	while (n>0)
+	{
+		n>>=1;
+		l++;
+	}
+	return l;
+}
+#endif
+
+
+#if defined(RA_VALIDATE)
+/*************************************************************************/ /*!
+@Function       _IsInSegmentList
+@Description    Tests if a BT is in the segment list.
+@Input          pArena           The arena.
+@Input          pBT              The boundary tag to look for.
+@Return         IMG_FALSE  BT was not in the arena's segment list.
+                IMG_TRUE   BT was in the arena's segment list.
+*/ /**************************************************************************/
+static IMG_BOOL
+_IsInSegmentList (RA_ARENA *pArena, BT *pBT)
+{
+	BT*  pBTScan;
+
+	PVR_ASSERT (pArena != NULL);
+	PVR_ASSERT (pBT != NULL);
+
+	/* Walk the segment list until we see the BT pointer... */
+	pBTScan = pArena->pHeadSegment;
+	while (pBTScan != NULL  &&  pBTScan != pBT)
+	{
+		pBTScan = pBTScan->pNextSegment;
+	}
+
+	/* Test if we found it and then return */
+	return (pBTScan == pBT);
+}
+
+/*************************************************************************/ /*!
+@Function       _IsInFreeList
+@Description    Tests if a BT is in the free list.
+@Input          pArena           The arena.
+@Input          pBT              The boundary tag to look for.
+@Return         IMG_FALSE  BT was not in the arena's free list.
+                IMG_TRUE   BT was in the arena's free list.
+*/ /**************************************************************************/
+static IMG_BOOL
+_IsInFreeList (RA_ARENA *pArena, BT *pBT)
+{
+	BT*  pBTScan;
+	IMG_UINT32  uIndex;
+
+	PVR_ASSERT (pArena != NULL);
+	PVR_ASSERT (pBT != NULL);
+
+	/* Look for the free list that holds BTs of this size... */
+	uIndex  = pvr_log2 (pBT->uSize);
+	PVR_ASSERT (uIndex < FREE_TABLE_LIMIT);
+
+	pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets);
+	if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->flags != pBT->uFlags))
+	{
+		return 0;
+	}
+	else
+	{
+		pBTScan = pArena->per_flags_buckets->buckets[uIndex];
+		while (pBTScan != NULL  &&  pBTScan != pBT)
+		{
+			pBTScan = pBTScan->next_free;
+		}
+
+		/* Test if we found it and then return */
+		return (pBTScan == pBT);
+	}
+}
+
+/* is_arena_valid should only be used in debug mode.
+   it checks that some properties an arena must have are verified */
+static int is_arena_valid(struct _RA_ARENA_ * arena)
+{
+	struct _BT_ * chunk;
+#if defined(PVR_CTZLL)
+	unsigned int i;
+#endif
+
+	for (chunk = arena->pHeadSegment; chunk != NULL; chunk = chunk->pNextSegment)
+	{
+		/* if next segment is NULL, then it must be a rightmost */
+		PVR_ASSERT((chunk->pNextSegment != NULL) || (chunk->is_rightmost));
+		/* if prev segment is NULL, then it must be a leftmost */
+		PVR_ASSERT((chunk->pPrevSegment != NULL) || (chunk->is_leftmost));
+
+		if (chunk->type == btt_free)
+		{
+			/* checks the correctness of the type field */
+			PVR_ASSERT(_IsInFreeList(arena, chunk));
+
+			/* check that there can't be two consecutive free chunks.
+			   Indeed, instead of having two consecutive free chunks,
+			   there should be only one that span the size of the two. */
+			PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->type != btt_free));
+			PVR_ASSERT((chunk->is_rightmost) || (chunk->pNextSegment->type != btt_free));
+		}
+		else
+		{
+			/* checks the correctness of the type field */
+			PVR_ASSERT(!_IsInFreeList(arena, chunk));
+		}
+
+		PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->base + chunk->pPrevSegment->uSize == chunk->base));
+		PVR_ASSERT((chunk->is_rightmost) || (chunk->base + chunk->uSize == chunk->pNextSegment->base));
+
+		/* all segments of the same imports must have the same flags ... */
+		PVR_ASSERT((chunk->is_rightmost) || (chunk->uFlags == chunk->pNextSegment->uFlags));
+		/* ... and the same import handle */
+		PVR_ASSERT((chunk->is_rightmost) || (chunk->hPriv == chunk->pNextSegment->hPriv));
+
+
+		/* if a free chunk spans a whole import, then it must be an 'not to free import'.
+		   Otherwise it should have been freed. */
+		PVR_ASSERT((!chunk->is_leftmost) || (!chunk->is_rightmost) || (chunk->type == btt_live) || (!chunk->free_import));
+	}
+
+#if defined(PVR_CTZLL)
+	if (arena->per_flags_buckets != NULL)
+	{
+		for (i = 0; i < FREE_TABLE_LIMIT; ++i)
+		{
+			/* verify that the bHasEltsMapping is correct for this flags bucket */
+			PVR_ASSERT(
+				((arena->per_flags_buckets->buckets[i] == NULL) &&
+				 (( (arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) == 0)))
+				||
+				((arena->per_flags_buckets->buckets[i] != NULL) &&
+				 ((  (arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) != 0)))
+				);
+		}
+	}
+#endif
+
+	/* if arena was not valid, one of the assert before should have triggered */
+	return 1;
+}
+#endif
+/*************************************************************************/ /*!
+@Function       _SegmentListInsertAfter
+@Description    Insert a boundary tag into an arena segment list after a
+                specified boundary tag.
+@Input          pInsertionPoint  The insertion point.
+@Input          pBT              The boundary tag to insert.
+@Return         PVRSRV_OK (doesn't fail)
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR
+_SegmentListInsertAfter (BT *pInsertionPoint,
+						 BT *pBT)
+{
+	PVR_ASSERT (pBT != NULL);
+	PVR_ASSERT (pInsertionPoint != NULL);
+
+	pBT->pNextSegment = pInsertionPoint->pNextSegment;
+	pBT->pPrevSegment = pInsertionPoint;
+	if (pInsertionPoint->pNextSegment != NULL)
+	{
+		pInsertionPoint->pNextSegment->pPrevSegment = pBT;
+	}
+	pInsertionPoint->pNextSegment = pBT;
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       _SegmentListInsert
+@Description    Insert a boundary tag into an arena segment list
+@Input          pArena    The arena.
+@Input          pBT       The boundary tag to insert.
+@Return         PVRSRV_OK (doesn't fail)
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR
+_SegmentListInsert (RA_ARENA *pArena, BT *pBT)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVR_ASSERT (!_IsInSegmentList(pArena, pBT));
+
+	/* insert into the segment chain */
+	pBT->pNextSegment = pArena->pHeadSegment;
+	pArena->pHeadSegment = pBT;
+	if (pBT->pNextSegment != NULL)
+	{
+		pBT->pNextSegment->pPrevSegment = pBT;
+	}
+
+	pBT->pPrevSegment = NULL;
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       _SegmentListRemove
+@Description    Remove a boundary tag from an arena segment list.
+@Input          pArena    The arena.
+@Input          pBT       The boundary tag to remove.
+*/ /**************************************************************************/
+static void
+_SegmentListRemove (RA_ARENA *pArena, BT *pBT)
+{
+	PVR_ASSERT (_IsInSegmentList(pArena, pBT));
+
+	if (pBT->pPrevSegment == NULL)
+		pArena->pHeadSegment = pBT->pNextSegment;
+	else
+		pBT->pPrevSegment->pNextSegment = pBT->pNextSegment;
+
+	if (pBT->pNextSegment != NULL)
+		pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _BuildBT
+@Description    Construct a boundary tag for a free segment.
+@Input          base     The base of the resource segment.
+@Input          uSize    The extent of the resouce segment.
+@Input          uFlags   The flags to give to the boundary tag
+@Return         Boundary tag or NULL
+*/ /**************************************************************************/
+static BT *
+_BuildBT (RA_BASE_T base, RA_LENGTH_T uSize, RA_FLAGS_T uFlags)
+{
+	BT *pBT;
+
+	pBT = OSAllocZMem(sizeof(BT));
+	if (pBT == NULL)
+	{
+		return NULL;
+	}
+
+	pBT->is_leftmost = 1;
+	pBT->is_rightmost = 1;
+	/* pBT->free_import = 0; */
+	pBT->type = btt_live;
+	pBT->base = base;
+	pBT->uSize = uSize;
+	pBT->uFlags = uFlags;
+
+	return pBT;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _SegmentSplit
+@Description    Split a segment into two, maintain the arena segment list. The
+                boundary tag should not be in the free table. Neither the
+                original or the new neighbour bounary tag will be in the free
+                table.
+@Input          pBT       The boundary tag to split.
+@Input          uSize     The required segment size of boundary tag after
+                          splitting.
+@Return         New neighbour boundary tag or NULL.
+*/ /**************************************************************************/
+static BT *
+_SegmentSplit (BT *pBT, RA_LENGTH_T uSize)
+{
+	BT *pNeighbour;
+
+	pNeighbour = _BuildBT(pBT->base + uSize, pBT->uSize - uSize, pBT->uFlags);
+	if (pNeighbour == NULL)
+	{
+		return NULL;
+	}
+
+	_SegmentListInsertAfter(pBT, pNeighbour);
+
+	pNeighbour->is_leftmost = 0;
+	pNeighbour->is_rightmost = pBT->is_rightmost;
+	pNeighbour->free_import = pBT->free_import;
+	pBT->is_rightmost = 0;
+	pNeighbour->hPriv = pBT->hPriv;
+	pBT->uSize = uSize;
+	pNeighbour->uFlags = pBT->uFlags;
+
+	return pNeighbour;
+}
+
+/*************************************************************************/ /*!
+@Function       _FreeListInsert
+@Description    Insert a boundary tag into an arena free table.
+@Input          pArena    The arena.
+@Input          pBT       The boundary tag.
+*/ /**************************************************************************/
+static void
+_FreeListInsert (RA_ARENA *pArena, BT *pBT)
+{
+	IMG_UINT32 uIndex;
+	uIndex = pvr_log2 (pBT->uSize);
+
+	PVR_ASSERT (uIndex < FREE_TABLE_LIMIT);
+	PVR_ASSERT (!_IsInFreeList(pArena, pBT));
+
+	pBT->type = btt_free;
+
+	pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets);
+	/* the flags item in the splay tree must have been created before-hand by
+	   _InsertResource */
+	PVR_ASSERT(pArena->per_flags_buckets != NULL);
+	PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL);
+
+	pBT->next_free = pArena->per_flags_buckets->buckets[uIndex];
+	if (pBT->next_free != NULL)
+	{
+		pBT->next_free->prev_free = pBT;
+	}
+	pBT->prev_free = NULL;
+	pArena->per_flags_buckets->buckets[uIndex] = pBT;
+
+#if defined(PVR_CTZLL)
+	/* tells that bucket[index] now contains elements */
+	pArena->per_flags_buckets->bHasEltsMapping |= ((IMG_ELTS_MAPPINGS) 1 << uIndex);
+#endif
+}
+
+/*************************************************************************/ /*!
+@Function       _FreeListRemove
+@Description    Remove a boundary tag from an arena free table.
+@Input          pArena    The arena.
+@Input          pBT       The boundary tag.
+*/ /**************************************************************************/
+static void
+_FreeListRemove (RA_ARENA *pArena, BT *pBT)
+{
+	IMG_UINT32 uIndex;
+	uIndex = pvr_log2 (pBT->uSize);
+
+	PVR_ASSERT (uIndex < FREE_TABLE_LIMIT);
+	PVR_ASSERT (_IsInFreeList(pArena, pBT));
+
+	if (pBT->next_free != NULL)
+	{
+		pBT->next_free->prev_free = pBT->prev_free;
+	}
+
+	if (pBT->prev_free != NULL)
+	{
+		pBT->prev_free->next_free = pBT->next_free;
+	}
+	else
+	{
+		pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets);
+		/* the flags item in the splay tree must have already been created
+		   (otherwise how could there be a segment with these flags */
+		PVR_ASSERT(pArena->per_flags_buckets != NULL);
+		PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL);
+
+		pArena->per_flags_buckets->buckets[uIndex] = pBT->next_free;
+#if defined(PVR_CTZLL)
+		if (pArena->per_flags_buckets->buckets[uIndex] == NULL)
+		{
+			/* there is no more elements in this bucket. Update the mapping. */
+			pArena->per_flags_buckets->bHasEltsMapping &= ~((IMG_ELTS_MAPPINGS) 1 << uIndex);
+		}
+#endif
+	}
+
+	PVR_ASSERT (!_IsInFreeList(pArena, pBT));
+	pBT->type = btt_live;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _InsertResource
+@Description    Add a free resource segment to an arena.
+@Input          pArena    The arena.
+@Input          base      The base of the resource segment.
+@Input          uSize     The extent of the resource segment.
+@Input          uFlags    The flags of the new resources.
+@Return         New bucket pointer
+                NULL on failure
+*/ /**************************************************************************/
+static BT *
+_InsertResource (RA_ARENA *pArena, RA_BASE_T base, RA_LENGTH_T uSize,
+				 RA_FLAGS_T uFlags)
+{
+	BT *pBT;
+	PVR_ASSERT (pArena!=NULL);
+
+	pBT = _BuildBT (base, uSize, uFlags);
+
+	if (pBT != NULL)
+	{
+		IMG_PSPLAY_TREE tmp = PVRSRVInsert(pBT->uFlags, pArena->per_flags_buckets);
+		if (tmp == NULL)
+		{
+			OSFreeMem(pBT);
+			return NULL;
+		}
+
+		pArena->per_flags_buckets = tmp;
+		_SegmentListInsert (pArena, pBT);
+		_FreeListInsert (pArena, pBT);
+	}
+	return pBT;
+}
+
+/*************************************************************************/ /*!
+@Function       _InsertResourceSpan
+@Description    Add a free resource span to an arena, marked for free_import.
+@Input          pArena    The arena.
+@Input          base      The base of the resource segment.
+@Input          uSize     The extent of the resource segment.
+@Return         The boundary tag representing the free resource segment,
+                or NULL on failure.
+*/ /**************************************************************************/
+static INLINE BT *
+_InsertResourceSpan (RA_ARENA *pArena,
+                     RA_BASE_T base,
+                     RA_LENGTH_T uSize,
+                     RA_FLAGS_T uFlags)
+{
+	BT *pBT = _InsertResource(pArena, base, uSize, uFlags);
+	if (pBT != NULL)
+	{
+		pBT->free_import = 1;
+	}
+	return pBT;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _RemoveResourceSpan
+@Description    Frees a resource span from an arena, returning the imported
+				span via the callback.
+@Input          pArena     The arena.
+@Input          pBT        The boundary tag to free.
+@Return         IMG_FALSE failure - span was still in use
+                IMG_TRUE  success - span was removed and returned
+*/ /**************************************************************************/
+static INLINE IMG_BOOL
+_RemoveResourceSpan (RA_ARENA *pArena, BT *pBT)
+{
+	PVR_ASSERT (pArena!=NULL);
+	PVR_ASSERT (pBT!=NULL);
+
+	if (pBT->free_import &&
+		pBT->is_leftmost &&
+		pBT->is_rightmost)
+	{
+		_SegmentListRemove (pArena, pBT);
+		pArena->pImportFree (pArena->pImportHandle, pBT->base, pBT->hPriv);
+		OSFreeMem(pBT);
+
+		return IMG_TRUE;
+	}
+
+
+	return IMG_FALSE;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _FreeBT
+@Description    Free a boundary tag taking care of the segment list and the
+                boundary tag free table.
+@Input          pArena     The arena.
+@Input          pBT        The boundary tag to free.
+*/ /**************************************************************************/
+static void
+_FreeBT (RA_ARENA *pArena, BT *pBT)
+{
+	BT *pNeighbour;
+
+	PVR_ASSERT (pArena!=NULL);
+	PVR_ASSERT (pBT!=NULL);
+	PVR_ASSERT (!_IsInFreeList(pArena, pBT));
+
+	/* try and coalesce with left neighbour */
+	pNeighbour = pBT->pPrevSegment;
+	if ((!pBT->is_leftmost)	&& (pNeighbour->type == btt_free))
+	{
+		/* Sanity check. */
+		PVR_ASSERT(pNeighbour->base + pNeighbour->uSize == pBT->base);
+
+		_FreeListRemove (pArena, pNeighbour);
+		_SegmentListRemove (pArena, pNeighbour);
+		pBT->base = pNeighbour->base;
+
+		pBT->uSize += pNeighbour->uSize;
+		pBT->is_leftmost = pNeighbour->is_leftmost;
+		OSFreeMem(pNeighbour);
+	}
+
+	/* try to coalesce with right neighbour */
+	pNeighbour = pBT->pNextSegment;
+	if ((!pBT->is_rightmost) && (pNeighbour->type == btt_free))
+	{
+		/* sanity check */
+		PVR_ASSERT(pBT->base + pBT->uSize == pNeighbour->base);
+
+		_FreeListRemove (pArena, pNeighbour);
+		_SegmentListRemove (pArena, pNeighbour);
+		pBT->uSize += pNeighbour->uSize;
+		pBT->is_rightmost = pNeighbour->is_rightmost;
+		OSFreeMem(pNeighbour);
+	}
+
+	if (_RemoveResourceSpan(pArena, pBT) == IMG_FALSE)
+	{
+		_FreeListInsert (pArena, pBT);
+		PVR_ASSERT( (!pBT->is_rightmost) || (!pBT->is_leftmost) || (!pBT->free_import) );
+	}
+
+	PVR_ASSERT(is_arena_valid(pArena));
+}
+
+
+/*
+  This function returns the first element in a bucket that can be split
+  in a way that one of the subsegment can meet the size and alignment
+  criteria.
+
+  The first_elt is the bucket to look into. Remember that a bucket is
+  implemented as a pointer to the first element of the linked list.
+
+  nb_max_try is used to limit the number of elements considered.
+  This is used to only consider the first nb_max_try elements in the
+  free-list. The special value ~0 is used to say unlimited i.e. consider
+  all elements in the free list
+ */
+static INLINE
+struct _BT_ * find_chunk_in_bucket(struct _BT_ * first_elt,
+								   RA_LENGTH_T uSize,
+								   RA_LENGTH_T uAlignment,
+								   unsigned int nb_max_try)
+{
+	struct _BT_ * walker;
+
+	for (walker = first_elt; (walker != NULL) && (nb_max_try != 0); walker = walker->next_free)
+	{
+		const RA_BASE_T aligned_base = (uAlignment > 1) ?
+			(walker->base + uAlignment - 1) & ~(uAlignment - 1)
+			: walker->base;
+
+		if (walker->base + walker->uSize >= aligned_base + uSize)
+		{
+			return walker;
+		}
+
+		/* 0xFFFF...FFFF is used has nb_max_try = infinity. */
+		if (nb_max_try != (unsigned int) ~0)
+		{
+			nb_max_try--;
+		}
+	}
+
+	return NULL;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _AttemptAllocAligned
+@Description    Attempt an allocation from an arena.
+@Input          pArena       The arena.
+@Input          uSize        The requested allocation size.
+@Output         phPriv		 The user references associated with
+                             the imported segment. (optional)
+@Input          flags        Allocation flags
+@Input          uAlignment   Required uAlignment, or 0.
+                             Must be a power of 2 if not 0
+@Output         base         Allocated resource base (non optional, must not be NULL)
+@Return         IMG_FALSE failure
+                IMG_TRUE success
+*/ /**************************************************************************/
+static IMG_BOOL
+_AttemptAllocAligned (RA_ARENA *pArena,
+					  RA_LENGTH_T uSize,
+					  IMG_UINT32 uFlags,
+					  RA_LENGTH_T uAlignment,
+					  RA_BASE_T *base,
+                      RA_PERISPAN_HANDLE *phPriv) /* this is the "per-import" private data */
+{
+
+	IMG_UINT32 index_low;
+	IMG_UINT32 index_high;
+	IMG_UINT32 i;
+	struct _BT_ * pBT = NULL;
+	RA_BASE_T aligned_base;
+
+	PVR_ASSERT (pArena!=NULL);
+	PVR_ASSERT (base != NULL);
+
+	pArena->per_flags_buckets = PVRSRVSplay(uFlags, pArena->per_flags_buckets);
+	if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->ui32Flags != uFlags))
+	{
+		/* no chunks with these flags. */
+		return IMG_FALSE;
+	}
+
+	index_low = pvr_log2(uSize);
+	index_high = pvr_log2(uSize + uAlignment - 1);
+
+	PVR_ASSERT(index_low < FREE_TABLE_LIMIT);
+	PVR_ASSERT(index_high < FREE_TABLE_LIMIT);
+	PVR_ASSERT(index_low <= index_high);
+
+#if defined(PVR_CTZLL)
+	i = PVR_CTZLL((~(((IMG_ELTS_MAPPINGS)1 << (index_high + 1)) - 1)) & pArena->per_flags_buckets->bHasEltsMapping);
+#else
+	for (i = index_high + 1; (i < FREE_TABLE_LIMIT) && (pArena->per_flags_buckets->buckets[i] == NULL); ++i)
+	{
+	}
+#endif
+	PVR_ASSERT(i <= FREE_TABLE_LIMIT);
+
+	if (i != FREE_TABLE_LIMIT)
+	{
+		/* since we start at index_high + 1, we are guarantee to exit */
+		pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, 1);
+	}
+	else
+	{
+		for (i = index_high; (i != index_low - 1) && (pBT == NULL); --i)
+		{
+			pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, (unsigned int) ~0);
+		}
+	}
+
+	if (pBT == NULL)
+	{
+		return IMG_FALSE;
+	}
+
+	aligned_base = (uAlignment > 1) ? (pBT->base + uAlignment - 1) & ~(uAlignment - 1) : pBT->base;
+
+	_FreeListRemove (pArena, pBT);
+
+	if (pArena->bNoSplit)
+	{
+		goto nosplit;
+	}
+
+	/* with uAlignment we might need to discard the front of this segment */
+	if (aligned_base > pBT->base)
+	{
+		BT *pNeighbour;
+		pNeighbour = _SegmentSplit (pBT, (RA_LENGTH_T)(aligned_base - pBT->base));
+		/* partition the buffer, create a new boundary tag */
+		if (pNeighbour == NULL)
+		{
+			PVR_DPF ((PVR_DBG_ERROR, "%s: Front split failed", __func__));
+			/* Put pBT back in the list */
+			_FreeListInsert (pArena, pBT);
+			return IMG_FALSE;
+		}
+
+		_FreeListInsert(pArena, pBT);
+		pBT = pNeighbour;
+	}
+
+	/* the segment might be too big, if so, discard the back of the segment */
+	if (pBT->uSize > uSize)
+	{
+		BT *pNeighbour;
+		pNeighbour = _SegmentSplit(pBT, uSize);
+		/* partition the buffer, create a new boundary tag */
+		if (pNeighbour == NULL)
+		{
+			PVR_DPF ((PVR_DBG_ERROR, "%s: Back split failed", __func__));
+			/* Put pBT back in the list */
+			_FreeListInsert (pArena, pBT);
+			return IMG_FALSE;
+		}
+
+		_FreeListInsert (pArena, pNeighbour);
+	}
+nosplit:
+	pBT->type = btt_live;
+
+	if (!HASH_Insert_Extended (pArena->pSegmentHash, &aligned_base, (uintptr_t)pBT))
+	{
+		_FreeBT (pArena, pBT);
+		return IMG_FALSE;
+	}
+
+	if (phPriv != NULL)
+		*phPriv = pBT->hPriv;
+
+	*base = aligned_base;
+
+	return IMG_TRUE;
+}
+
+
+
+/*************************************************************************/ /*!
+@Function       RA_Create
+@Description    To create a resource arena.
+@Input          name          The name of the arena for diagnostic purposes.
+@Input          base          The base of an initial resource span or 0.
+@Input          uSize         The size of an initial resource span or 0.
+@Input          uFlags        The flags of an initial resource span or 0.
+@Input          ulog2Quantum  The arena allocation quantum.
+@Input          imp_alloc     A resource allocation callback or 0.
+@Input          imp_free      A resource de-allocation callback or 0.
+@Input          pImportHandle Handle passed to alloc and free or 0.
+@Input          bNoSplit      Disable splitting up imports.
+@Return         arena handle, or NULL.
+*/ /**************************************************************************/
+IMG_INTERNAL RA_ARENA *
+RA_Create (IMG_CHAR *name,
+		   RA_LOG2QUANTUM_T uLog2Quantum,
+		   IMG_UINT32 ui32LockClass,
+		   PVRSRV_ERROR (*imp_alloc)(RA_PERARENA_HANDLE h,
+								 RA_LENGTH_T uSize,
+								 RA_FLAGS_T _flags,
+								 const IMG_CHAR *pszAnnotation,
+								 /* returned data */
+								 RA_BASE_T *pBase,
+								 RA_LENGTH_T *pActualSize,
+								 RA_PERISPAN_HANDLE *phPriv),
+		   void (*imp_free) (RA_PERARENA_HANDLE,
+							 RA_BASE_T,
+							 RA_PERISPAN_HANDLE),
+		   RA_PERARENA_HANDLE arena_handle,
+		   IMG_BOOL bNoSplit)
+{
+	RA_ARENA *pArena;
+	PVRSRV_ERROR eError;
+
+	if (name == NULL)
+	{
+		PVR_DPF ((PVR_DBG_ERROR, "RA_Create: invalid parameter 'name' (NULL not accepted)"));
+		return NULL;
+	}
+
+	PVR_DPF ((PVR_DBG_MESSAGE, "RA_Create: name='%s'", name));
+
+	pArena = OSAllocMem(sizeof (*pArena));
+	if (pArena == NULL)
+	{
+		goto arena_fail;
+	}
+
+	eError = OSLockCreate(&pArena->hLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto lock_fail;
+	}
+
+	pArena->pSegmentHash = HASH_Create_Extended(MINIMUM_HASH_SIZE, sizeof(RA_BASE_T), HASH_Func_Default, HASH_Key_Comp_Default);
+
+	if (pArena->pSegmentHash==NULL)
+	{
+		goto hash_fail;
+	}
+
+	pArena->name = name;
+	pArena->pImportAlloc = (imp_alloc!=NULL) ? imp_alloc : &_RequestAllocFail;
+	pArena->pImportFree = imp_free;
+	pArena->pImportHandle = arena_handle;
+	pArena->pHeadSegment = NULL;
+	pArena->uQuantum = 1ULL << uLog2Quantum;
+	pArena->per_flags_buckets = NULL;
+	pArena->ui32LockClass = ui32LockClass;
+	pArena->bNoSplit = bNoSplit;
+
+	PVR_ASSERT(is_arena_valid(pArena));
+	return pArena;
+
+hash_fail:
+	OSLockDestroy(pArena->hLock);
+lock_fail:
+	OSFreeMem(pArena);
+	/*not nulling pointer, out of scope*/
+arena_fail:
+	return NULL;
+}
+
+/*************************************************************************/ /*!
+@Function       RA_Delete
+@Description    To delete a resource arena. All resources allocated from
+                the arena must be freed before deleting the arena.
+@Input          pArena        The arena to delete.
+*/ /**************************************************************************/
+IMG_INTERNAL void
+RA_Delete (RA_ARENA *pArena)
+{
+	IMG_UINT32 uIndex;
+	IMG_BOOL bWarn = IMG_TRUE;
+
+	PVR_ASSERT(pArena != NULL);
+
+	if (pArena == NULL)
+	{
+		PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: invalid parameter - pArena"));
+		return;
+	}
+
+	PVR_ASSERT(is_arena_valid(pArena));
+
+	PVR_DPF ((PVR_DBG_MESSAGE,
+			  "RA_Delete: name='%s'", pArena->name));
+
+	while (pArena->pHeadSegment != NULL)
+	{
+		BT *pBT = pArena->pHeadSegment;
+
+		if (pBT->type != btt_free)
+		{
+			if (bWarn)
+			{
+				PVR_DPF ((PVR_DBG_ERROR, "%s: Allocations still exist in the arena that is being destroyed", __func__));
+				PVR_DPF ((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmem context", __func__));
+				PVR_DPF ((PVR_DBG_ERROR, "%s: base = 0x%llx size=0x%llx", __func__,
+					  (unsigned long long)pBT->base, (unsigned long long)pBT->uSize));
+				PVR_DPF ((PVR_DBG_ERROR, "%s: This warning will be issued only once for the first allocation found!", __func__));
+				bWarn = IMG_FALSE;
+			}
+		}
+		else
+		{
+			_FreeListRemove(pArena, pBT);
+		}
+
+		_SegmentListRemove (pArena, pBT);
+		OSFreeMem(pBT);
+		/*not nulling original pointer, it has changed*/
+	}
+
+	while (pArena->per_flags_buckets != NULL)
+	{
+		for (uIndex=0; uIndex<FREE_TABLE_LIMIT; uIndex++)
+		{
+			PVR_ASSERT(pArena->per_flags_buckets->buckets[uIndex] == NULL);
+		}
+
+		pArena->per_flags_buckets = PVRSRVDelete(pArena->per_flags_buckets->ui32Flags, pArena->per_flags_buckets);
+	}
+
+	HASH_Delete (pArena->pSegmentHash);
+	OSLockDestroy(pArena->hLock);
+	OSFreeMem(pArena);
+	/*not nulling pointer, copy on stack*/
+}
+
+/*************************************************************************/ /*!
+@Function       RA_Add
+@Description    To add a resource span to an arena. The span must not
+                overlapp with any span previously added to the arena.
+@Input          pArena     The arena to add a span into.
+@Input          base       The base of the span.
+@Input          uSize      The extent of the span.
+@Input			uFlags	   the flags of the new import
+@Input			hPriv	   a private handle associate to the span. (reserved for user)
+@Return         IMG_TRUE - Success
+                IMG_FALSE - failure
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+RA_Add (RA_ARENA *pArena,
+		RA_BASE_T base,
+		RA_LENGTH_T uSize,
+		RA_FLAGS_T uFlags,
+		RA_PERISPAN_HANDLE hPriv)
+{
+	struct _BT_* bt;
+	PVR_ASSERT (pArena != NULL);
+	PVR_ASSERT (uSize != 0);
+
+	if (pArena == NULL)
+	{
+		PVR_DPF ((PVR_DBG_ERROR,"RA_Add: invalid parameter - pArena"));
+		return IMG_FALSE;
+	}
+
+	if (uSize == 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RA_Add: invalid size 0 added to arena %s", pArena->name));
+		return IMG_FALSE;
+	}
+
+	OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+	PVR_ASSERT(is_arena_valid(pArena));
+	PVR_DPF ((PVR_DBG_MESSAGE, "RA_Add: name='%s', "
+			  "base=0x%llx, size=0x%llx", pArena->name,
+			  (unsigned long long)base, (unsigned long long)uSize));
+
+	uSize = (uSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1);
+	bt = _InsertResource(pArena, base, uSize, uFlags);
+	if (bt != NULL)
+	{
+		bt->hPriv = hPriv;
+	}
+
+	PVR_ASSERT(is_arena_valid(pArena));
+	OSLockRelease(pArena->hLock);
+
+	return bt != NULL;
+}
+
+/*************************************************************************/ /*!
+@Function       RA_Alloc
+@Description    To allocate resource from an arena.
+@Input          pArena            The arena
+@Input          uRequestSize      The size of resource segment requested.
+@Input          uImportMultiplier Import x-times more for future requests if
+                                  we have to import new memory.
+@Output         pActualSize       The actual size of resource segment
+                                  allocated, typcially rounded up by quantum.
+@Output         phPriv            The user reference associated with allocated resource span.
+@Input          uImportFlags            Flags influencing allocation policy.
+@Input          uAlignment        The uAlignment constraint required for the
+                                  allocated segment, use 0 if uAlignment not required, otherwise
+                                  must be a power of 2.
+@Output         base              Allocated base resource
+@Return         PVRSRV_OK - success
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+RA_Alloc (RA_ARENA *pArena,
+		  RA_LENGTH_T uRequestSize,
+		  IMG_UINT8 uImportMultiplier,
+		  RA_FLAGS_T uImportFlags,
+		  RA_LENGTH_T uAlignment,
+		  const IMG_CHAR *pszAnnotation,
+		  RA_BASE_T *base,
+		  RA_LENGTH_T *pActualSize,
+		  RA_PERISPAN_HANDLE *phPriv)
+{
+	PVRSRV_ERROR eError;
+	IMG_BOOL bResult;
+	RA_LENGTH_T uSize = uRequestSize;
+	RA_FLAGS_T uFlags = (uImportFlags & PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK);
+
+	if (pArena == NULL || uImportMultiplier == 0 || uSize == 0)
+	{
+		PVR_DPF ((PVR_DBG_ERROR,
+				  "RA_Alloc: One of the necessary parameters is 0"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+	PVR_ASSERT(is_arena_valid(pArena));
+
+	if (pActualSize != NULL)
+	{
+		*pActualSize = uSize;
+	}
+
+	/* Must be a power of 2 or 0 */
+	PVR_ASSERT((uAlignment == 0) || (uAlignment & (uAlignment - 1)) == 0);
+
+	PVR_DPF ((PVR_DBG_MESSAGE,
+			  "RA_Alloc: arena='%s', size=0x%llx(0x%llx), "
+			  "alignment=0x%llx", pArena->name,
+			  (unsigned long long)uSize, (unsigned long long)uRequestSize,
+			  (unsigned long long)uAlignment));
+
+	/* if allocation failed then we might have an import source which
+	   can provide more resource, else we will have to fail the
+	   allocation to the caller. */
+	bResult = _AttemptAllocAligned (pArena, uSize, uFlags, uAlignment, base, phPriv);
+	if (!bResult)
+	{
+		IMG_HANDLE hPriv;
+		RA_BASE_T import_base;
+		RA_LENGTH_T uImportSize = uSize;
+
+		/*
+			Ensure that we allocate sufficient space to meet the uAlignment
+			constraint
+		 */
+		if (uAlignment > pArena->uQuantum)
+		{
+			uImportSize += (uAlignment - pArena->uQuantum);
+		}
+
+		/* apply over-allocation multiplier after all alignment adjustments */
+		uImportSize *= uImportMultiplier;
+
+		/* ensure that we import according to the quanta of this arena */
+		uImportSize = (uImportSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1);
+
+		eError = pArena->pImportAlloc (pArena->pImportHandle,
+									   uImportSize, uImportFlags,
+									   pszAnnotation,
+									   &import_base, &uImportSize,
+									   &hPriv);
+		if (PVRSRV_OK != eError)
+		{
+			OSLockRelease(pArena->hLock);
+			return eError;
+		}
+		else
+		{
+			BT *pBT;
+			pBT = _InsertResourceSpan (pArena, import_base, uImportSize, uFlags);
+			/* successfully import more resource, create a span to
+			   represent it and retry the allocation attempt */
+			if (pBT == NULL)
+			{
+				/* insufficient resources to insert the newly acquired span,
+				   so free it back again */
+				pArena->pImportFree(pArena->pImportHandle, import_base, hPriv);
+
+				PVR_DPF ((PVR_DBG_MESSAGE, "RA_Alloc: name='%s', "
+						  "size=0x%llx failed!", pArena->name,
+						  (unsigned long long)uSize));
+				/* RA_Dump (arena); */
+
+				OSLockRelease(pArena->hLock);
+				return PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED;
+			}
+
+			pBT->hPriv = hPriv;
+
+			bResult = _AttemptAllocAligned(pArena, uSize, uFlags, uAlignment, base, phPriv);
+			if (!bResult)
+			{
+				PVR_DPF ((PVR_DBG_ERROR,
+						  "RA_Alloc: name='%s' second alloc failed!",
+						  pArena->name));
+
+				/*
+				  On failure of _AttemptAllocAligned() depending on the exact point
+				  of failure, the imported segment may have been used and freed, or
+				  left untouched. If the later, we need to return it.
+				*/
+				_FreeBT(pArena, pBT);
+
+				OSLockRelease(pArena->hLock);
+				return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED;
+			}
+			else
+			{
+				/* Check if the new allocation was in the span we just added... */
+				if (*base < import_base  ||  *base > (import_base + uImportSize))
+				{
+					PVR_DPF ((PVR_DBG_ERROR,
+							  "RA_Alloc: name='%s' alloc did not occur in the imported span!",
+							  pArena->name));
+
+					/*
+					  Remove the imported span which should not be in use (if it is then
+					  that is okay, but essentially no span should exist that is not used).
+					*/
+					_FreeBT(pArena, pBT);
+				}
+			}
+		}
+	}
+
+	PVR_DPF ((PVR_DBG_MESSAGE, "RA_Alloc: name='%s', size=0x%llx, "
+			  "*base=0x%llx = %d",pArena->name, (unsigned long long)uSize,
+			  (unsigned long long)*base, bResult));
+
+	PVR_ASSERT(is_arena_valid(pArena));
+
+	OSLockRelease(pArena->hLock);
+	return PVRSRV_OK;
+}
+
+
+
+
+/*************************************************************************/ /*!
+@Function       RA_Free
+@Description    To free a resource segment.
+@Input          pArena     The arena the segment was originally allocated from.
+@Input          base       The base of the resource span to free.
+*/ /**************************************************************************/
+IMG_INTERNAL void
+RA_Free (RA_ARENA *pArena, RA_BASE_T base)
+{
+	BT *pBT;
+
+	PVR_ASSERT (pArena != NULL);
+
+	if (pArena == NULL)
+	{
+		PVR_DPF ((PVR_DBG_ERROR,"RA_Free: invalid parameter - pArena"));
+		return;
+	}
+
+	OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+	PVR_ASSERT(is_arena_valid(pArena));
+
+	PVR_DPF ((PVR_DBG_MESSAGE, "RA_Free: name='%s', base=0x%llx", pArena->name,
+			  (unsigned long long)base));
+
+	pBT = (BT *) HASH_Remove_Extended (pArena->pSegmentHash, &base);
+	PVR_ASSERT (pBT != NULL);
+
+	if (pBT)
+	{
+		PVR_ASSERT (pBT->base == base);
+		_FreeBT (pArena, pBT);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RA_Free: no resource span found for given base (0x%llX) in arena %s",
+										(unsigned long long) base,
+											pArena->name));
+	}
+
+	PVR_ASSERT(is_arena_valid(pArena));
+	OSLockRelease(pArena->hLock);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/ra.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/ra.h
new file mode 100644
index 0000000..fd59cd4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/ra.h
@@ -0,0 +1,206 @@
+/*************************************************************************/ /*!
+@File
+@Title          Resource Allocator API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RA_H_
+#define _RA_H_
+
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/** Resource arena.
+ *  struct _RA_ARENA_ deliberately opaque
+ */
+typedef struct _RA_ARENA_ RA_ARENA;			//PRQA S 3313
+
+/*
+ * Per-Arena handle - this is private data for the caller of the RA.
+ * The RA knows nothing about this data.  It is given it upon
+ * RA_Create, and promises to pass it to calls to the ImportAlloc and
+ * ImportFree callbacks
+ */
+typedef IMG_HANDLE RA_PERARENA_HANDLE;
+/*
+ * Per-Import handle - this is private data for the caller of the RA.
+ * The RA knows nothing about this data.  It is given it on a
+ * per-import basis, either the "initial" import at RA_Create time, or
+ * further imports via the ImportAlloc callback.  It sends it back via
+ * the ImportFree callback, and also provides it in answer to any
+ * RA_Alloc request to signify from which "import" the allocation came
+ */
+typedef IMG_HANDLE RA_PERISPAN_HANDLE;
+
+typedef IMG_UINT64 RA_BASE_T;
+typedef IMG_UINT32 RA_LOG2QUANTUM_T;
+typedef IMG_UINT64 RA_LENGTH_T;
+
+/* Lock classes: describes the level of nesting between different arenas. */
+#define RA_LOCKCLASS_0 0
+#define RA_LOCKCLASS_1 1
+#define RA_LOCKCLASS_2 2
+
+#define RA_NO_IMPORT_MULTIPLIER 1
+
+/*
+ * Flags in an "import" must much the flags for an allocation
+ */
+typedef IMG_UINT32 RA_FLAGS_T;
+
+/**
+ *  @Function   RA_Create
+ *
+ *  @Description
+ *
+ *  To create a resource arena.
+ *
+ *  @Input name - the name of the arena for diagnostic purposes.
+ *  @Input uQuantum - the arena allocation quantum.
+ *  @Input ui32LockClass - the lock class level this arena uses.
+ *  @Input alloc - a resource allocation callback or 0.
+ *  @Input free - a resource de-allocation callback or 0.
+ *  @Input per_arena_handle - user private handle passed to alloc and free or 0.
+ *  @Input bNoSplit - Disable splitting up imports.
+ *  @Return pointer to arena, or NULL.
+ */
+RA_ARENA *
+RA_Create (IMG_CHAR *name,
+           /* subsequent imports: */
+           RA_LOG2QUANTUM_T uLog2Quantum,
+           IMG_UINT32 ui32LockClass,
+           PVRSRV_ERROR (*imp_alloc)(RA_PERARENA_HANDLE _h,
+                                 RA_LENGTH_T uSize,
+                                 RA_FLAGS_T uFlags,
+                                 const IMG_CHAR *pszAnnotation,
+                                 RA_BASE_T *pBase,
+                                 RA_LENGTH_T *pActualSize,
+                                 RA_PERISPAN_HANDLE *phPriv),
+           void (*imp_free) (RA_PERARENA_HANDLE,
+                                 RA_BASE_T,
+                                 RA_PERISPAN_HANDLE),
+           RA_PERARENA_HANDLE per_arena_handle,
+           IMG_BOOL bNoSplit);
+
+/**
+ *  @Function   RA_Delete
+ *
+ *  @Description
+ *
+ *  To delete a resource arena. All resources allocated from the arena
+ *  must be freed before deleting the arena.
+ *
+ *  @Input  pArena - the arena to delete.
+ *  @Return None
+ */
+void
+RA_Delete (RA_ARENA *pArena);
+
+/**
+ *  @Function   RA_Add
+ *
+ *  @Description
+ *
+ *  To add a resource span to an arena. The span must not overlap with
+ *  any span previously added to the arena.
+ *
+ *  @Input pArena - the arena to add a span into.
+ *  @Input base - the base of the span.
+ *  @Input uSize - the extent of the span.
+ *  @Input hPriv - handle associated to the span (reserved to user uses)
+ *  @Return IMG_TRUE - success, IMG_FALSE - failure
+ */
+IMG_BOOL
+RA_Add (RA_ARENA *pArena,
+		RA_BASE_T base,
+		RA_LENGTH_T uSize,
+		RA_FLAGS_T uFlags,
+		RA_PERISPAN_HANDLE hPriv);
+
+/**
+ *  @Function   RA_Alloc
+ *
+ *  @Description
+ *
+ *  To allocate resource from an arena.
+ *
+ *  @Input  pArena - the arena
+ *  @Input  uRequestSize - the size of resource segment requested.
+ *  @Input  uImportMultiplier - Import x-times of the uRequestSize
+ *          for future RA_Alloc calls.
+ *          Use RA_NO_IMPORT_MULTIPLIER to import the exact size.
+ *  @Output pActualSize - the actual_size of resource segment allocated,
+ *          typcially rounded up by quantum.
+ *  @Input  uImportFlags - flags influencing allocation policy.
+ *  @Input  uAlignment - the alignment constraint required for the
+ *          allocated segment, use 0 if alignment not required.
+ *  @Input  pszAnnotation - a string to describe the allocation
+ *  @Output pBase - allocated base resource
+ *  @Output phPriv - the user reference associated with allocated
+ *          resource span.
+ *  @Return PVRSRV_OK - success
+ */
+PVRSRV_ERROR
+RA_Alloc (RA_ARENA *pArena,
+          RA_LENGTH_T uSize,
+          IMG_UINT8 uImportMultiplier,
+          RA_FLAGS_T uFlags,
+          RA_LENGTH_T uAlignment,
+          const IMG_CHAR *pszAnnotation,
+          RA_BASE_T *pBase,
+          RA_LENGTH_T *pActualSize,
+          RA_PERISPAN_HANDLE *phPriv);
+
+/**
+ *  @Function   RA_Free
+ *
+ *  @Description    To free a resource segment.
+ *
+ *  @Input  pArena - the arena the segment was originally allocated from.
+ *  @Input  base - the base of the resource span to free.
+ *  @Input  bFreeBackingStore - Should backing store memory be freed?
+ *
+ *  @Return None
+ */
+void
+RA_Free (RA_ARENA *pArena, RA_BASE_T base);
+
+#endif
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_bridge.h
new file mode 100644
index 0000000..8802a00
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_bridge.h
@@ -0,0 +1,213 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Bridge Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the Rogue Bridge code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGX_BRIDGE_H
+#define RGX_BRIDGE_H
+
+#include "pvr_bridge.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "rgx_fwif_km.h"
+
+#define RGXFWINITPARAMS_VERSION   1
+#define RGXFWINITPARAMS_EXTENSION 128
+
+#include "common_rgxta3d_bridge.h"
+#include "common_rgxcmp_bridge.h"
+
+#include "common_rgxtq2_bridge.h"
+#include "common_rgxtq_bridge.h"
+#if !defined(EXCLUDE_RGXBREAKPOINT_BRIDGE)
+#include "common_rgxbreakpoint_bridge.h"
+#endif
+#include "common_rgxfwdbg_bridge.h"
+#if defined(PDUMP)
+#include "common_rgxpdump_bridge.h"
+#endif
+#include "common_rgxhwperf_bridge.h"
+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE)
+#include "common_rgxregconfig_bridge.h"
+#endif
+#include "common_rgxkicksync_bridge.h"
+
+#include "common_rgxsignals_bridge.h"
+
+
+/*
+ * Bridge Cmd Ids
+ */
+
+/* *REMEMBER* to update PVRSRV_BRIDGE_RGX_LAST if you add/remove a bridge
+ * group!
+ * Also you need to ensure all PVRSRV_BRIDGE_RGX_xxx_DISPATCH_FIRST offsets
+ * follow on from the previous bridge group's commands!
+ *
+ * If a bridge group is optional, ensure you *ALWAYS* define its index
+ * (e.g. PVRSRV_BRIDGE_RGXCMP is always 151, even is the feature is not
+ * defined). If an optional bridge group is not defined you must still
+ * define PVRSRV_BRIDGE_RGX_xxx_DISPATCH_FIRST for it with an assigned
+ * value of 0.
+ */
+
+/* The RGX bridge groups start at 128 (PVRSRV_BRIDGE_RGX_FIRST) rather than
+ * follow-on from the other non-device bridge groups (meaning that they then
+ * won't be displaced if other non-device bridge groups are added).
+ */
+
+#define PVRSRV_BRIDGE_RGX_FIRST                  128UL
+
+/* 128: RGX TQ interface functions */
+#define PVRSRV_BRIDGE_RGXTQ                      128UL
+#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST       (PVRSRV_BRIDGE_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST        (PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTQ_CMD_LAST)
+
+/* 129: RGX Compute interface functions */
+#define PVRSRV_BRIDGE_RGXCMP                     129UL
+#	define PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST   (PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST + 1)
+#	define PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST    (PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXCMP_CMD_LAST)
+
+/* 130: RGX TA/3D interface functions */
+#define PVRSRV_BRIDGE_RGXTA3D                    130UL
+#define PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST     (PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST      (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTA3D_CMD_LAST)
+
+/* 131: RGX Breakpoint interface functions */
+#define PVRSRV_BRIDGE_RGXBREAKPOINT                 131UL
+#if !defined(EXCLUDE_RGXBREAKPOINT_BRIDGE)
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_FIRST  (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST   (PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_FIRST  0
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST   (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST)
+#endif
+
+/* 132: RGX Debug/Misc interface functions */
+#define PVRSRV_BRIDGE_RGXFWDBG                   132UL
+#define PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_FIRST    (PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST     (PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXFWDBG_CMD_LAST)
+
+/* 133: RGX PDump interface functions */
+#define PVRSRV_BRIDGE_RGXPDUMP                   133UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST    (PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST     (PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXPDUMP_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST    0
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST     (PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST)
+#endif
+
+/* 134: RGX HWPerf interface functions */
+#define PVRSRV_BRIDGE_RGXHWPERF                  134UL
+#define PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST   (PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST    (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST)
+
+/* 135: RGX Register Configuration interface functions */
+#define PVRSRV_BRIDGE_RGXREGCONFIG                  135UL
+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE)
+#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_FIRST   (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST    (PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXREGCONFIG_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_FIRST   0
+#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST    (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST)
+#endif
+
+/* 136: RGX kicksync interface */
+#define PVRSRV_BRIDGE_RGXKICKSYNC                136UL
+#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST  (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST)
+
+/* 137: RGX signals interface */
+#define PVRSRV_BRIDGE_RGXSIGNALS                 137UL
+#define PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_FIRST  (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_LAST   (PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXSIGNALS_CMD_LAST)
+
+
+#define PVRSRV_BRIDGE_RGXTQ2                     138UL
+#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST      (PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST       (PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTQ2_CMD_LAST)
+
+#define PVRSRV_BRIDGE_RGX_LAST                   (PVRSRV_BRIDGE_RGXTQ2)
+#define PVRSRV_BRIDGE_RGX_DISPATCH_LAST          (PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST)
+
+/* bit mask representing the enabled RGX bridges */
+
+static const IMG_UINT32 gui32RGXBridges =
+	  (1U << (PVRSRV_BRIDGE_RGXTQ - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(RGX_FEATURE_COMPUTE)
+	| (1U << (PVRSRV_BRIDGE_RGXCMP - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+	| (1U << (PVRSRV_BRIDGE_RGXTA3D - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(SUPPORT_BREAKPOINT)
+	| (1U << (PVRSRV_BRIDGE_BREAKPOINT - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+	| (1U << (PVRSRV_BRIDGE_RGXFWDBG - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(PDUMP)
+	| (1U << (PVRSRV_BRIDGE_RGXPDUMP - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+	| (1U << (PVRSRV_BRIDGE_RGXHWPERF - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(SUPPORT_REGCONFIG)
+	| (1U << (PVRSRV_BRIDGE_RGXREGCONFIG - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+	| (1U << (PVRSRV_BRIDGE_RGXKICKSYNC - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(RGX_FEATURE_SIGNAL_SNOOPING)
+	| (1U << (PVRSRV_BRIDGE_RGXSIGNALS - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+	| (1U << (PVRSRV_BRIDGE_RGXTQ2 - PVRSRV_BRIDGE_RGX_FIRST));
+
+/* bit field representing which RGX bridge groups may optionally not
+ * be present in the server
+ */
+
+#define RGX_BRIDGES_OPTIONAL \
+	( \
+		0 /* no RGX bridges are currently optional */ \
+	)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* RGX_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_common.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_common.h
new file mode 100644
index 0000000..5c3b839
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_common.h
@@ -0,0 +1,193 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Common Types and Defines Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Common types and definitions for RGX software
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_COMMON_H
+#define RGX_COMMON_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+
+/* Included to get the BVNC_KM_N defined and other feature defs */
+#include "km/rgxdefs_km.h"
+
+/*! This macro represents a mask of LSBs that must be zero on data structure
+ * sizes and offsets to ensure they are 8-byte granular on types shared between
+ * the FW and host driver */
+#define RGX_FW_ALIGNMENT_LSB (7U)
+
+/*! Macro to test structure size alignment */
+#define RGX_FW_STRUCT_SIZE_ASSERT(_a)	\
+	static_assert((sizeof(_a) & RGX_FW_ALIGNMENT_LSB) == 0U,	\
+				  "Size of " #_a " is not properly aligned")
+
+/*! Macro to test structure member alignment */
+#define RGX_FW_STRUCT_OFFSET_ASSERT(_a, _b)	\
+	static_assert((offsetof(_a, _b) & RGX_FW_ALIGNMENT_LSB) == 0U,	\
+				  "Offset of " #_a "." #_b " is not properly aligned")
+
+
+/* The following enum assumes only one of RGX_FEATURE_TLA or
+ * RGX_FEATURE_FASTRENDER_DM feature is present.
+ * In case this is no more true, fail build to fix code. */
+#if defined(RGX_FEATURE_TLA) && defined(RGX_FEATURE_FASTRENDER_DM)
+#error "Both RGX_FEATURE_TLA and RGX_FEATURE_FASTRENDER_DM defined. Fix code to handle this!"
+#endif
+
+/*! The master definition for data masters known to the firmware of RGX.
+ * When a new DM is added to this list, relevant entry should be added to
+ * RGX_HWPERF_DM enum list.
+ * The DM in a V1 HWPerf packet uses this definition. */
+
+typedef IMG_UINT32 RGXFWIF_DM;
+
+#define	RGXFWIF_DM_GP			IMG_UINT32_C(0)
+/* Either TDM or 2D DM is present. The above build time error is present to verify this */
+#define	RGXFWIF_DM_2D			IMG_UINT32_C(1) /* when RGX_FEATURE_TLA defined */
+#define	RGXFWIF_DM_TDM			IMG_UINT32_C(1) /* when RGX_FEATURE_FASTRENDER_DM defined */
+
+#define	RGXFWIF_DM_TA			IMG_UINT32_C(2)
+#define	RGXFWIF_DM_3D			IMG_UINT32_C(3)
+#define	RGXFWIF_DM_CDM			IMG_UINT32_C(4)
+
+#define	RGXFWIF_DM_LAST RGXFWIF_DM_CDM
+
+typedef enum _RGX_KICK_TYPE_DM_
+{
+	RGX_KICK_TYPE_DM_GP			= 0x001,
+	RGX_KICK_TYPE_DM_TDM_2D		= 0x002,
+	RGX_KICK_TYPE_DM_TA			= 0x004,
+	RGX_KICK_TYPE_DM_3D			= 0x008,
+	RGX_KICK_TYPE_DM_CDM		= 0x010,
+	RGX_KICK_TYPE_DM_RTU		= 0x020,
+	RGX_KICK_TYPE_DM_SHG		= 0x040,
+	RGX_KICK_TYPE_DM_TQ2D		= 0x080,
+	RGX_KICK_TYPE_DM_TQ3D		= 0x100,
+	RGX_KICK_TYPE_DM_LAST		= 0x200
+} RGX_KICK_TYPE_DM;
+
+/* Maximum number of DM in use: GP, 2D/TDM, TA, 3D, CDM, SHG, RTU */
+#define RGXFWIF_DM_DEFAULT_MAX	(RGXFWIF_DM_LAST + 1U)
+
+/* Maximum number of DM in use: GP, 2D/TDM, TA, 3D, CDM*/
+#define RGXFWIF_DM_MAX			(5U)
+#define RGXFWIF_HWDM_MAX		(RGXFWIF_DM_MAX)
+
+/* Min/Max number of HW DMs (all but GP) */
+#if defined(RGX_FEATURE_TLA)
+#define RGXFWIF_HWDM_MIN		(1U)
+#else
+#if defined(RGX_FEATURE_FASTRENDER_DM)
+#define RGXFWIF_HWDM_MIN		(1U)
+#else
+#define RGXFWIF_HWDM_MIN		(2U)
+#endif
+#endif
+
+/*
+ * Data Master Tags to be appended to resources created on behalf of each RGX
+ * Context.
+ */
+#define RGX_RI_DM_TAG_KS   'K'
+#define RGX_RI_DM_TAG_CDM  'C'
+#define RGX_RI_DM_TAG_RC   'R' /* To be removed once TA/3D Timelines are split */
+#define RGX_RI_DM_TAG_TA   'V'
+#define RGX_RI_DM_TAG_3D   'P'
+#define RGX_RI_DM_TAG_TDM  'T'
+#define RGX_RI_DM_TAG_TQ2D '2'
+#define RGX_RI_DM_TAG_TQ3D 'Q'
+
+/*
+ * Client API Tags to be appended to resources created on behalf of each
+ * Client API.
+ */
+#define RGX_RI_CLIENT_API_GLES1    '1'
+#define RGX_RI_CLIENT_API_GLES3    '3'
+#define RGX_RI_CLIENT_API_VULKAN   'V'
+#define RGX_RI_CLIENT_API_EGL      'E'
+#define RGX_RI_CLIENT_API_OPENCL   'C'
+#define RGX_RI_CLIENT_API_OPENGL   'G'
+#define RGX_RI_CLIENT_API_SERVICES 'S'
+#define RGX_RI_CLIENT_API_WSEGL    'W'
+#define RGX_RI_CLIENT_API_ANDROID  'A'
+#define RGX_RI_CLIENT_API_LWS      'L'
+
+/*
+ * Format a RI annotation for a given RGX Data Master context
+ */
+#define RGX_RI_FORMAT_DM_ANNOTATION(annotation, dmTag, clientAPI) do         \
+	{                                                                        \
+		annotation[0] = dmTag;                                               \
+		annotation[1] = clientAPI;                                           \
+		annotation[2] = '\0';                                                \
+	} while (0)
+
+/*!
+ ******************************************************************************
+ * RGXFW Compiler alignment definitions
+ *****************************************************************************/
+#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES)
+#define RGXFW_ALIGN			__attribute__ ((aligned (8)))
+#elif defined(_MSC_VER)
+#define RGXFW_ALIGN			__declspec(align(8))
+#pragma warning (disable : 4324)
+#else
+#error "Align MACROS need to be defined for this compiler"
+#endif
+
+/*!
+ ******************************************************************************
+ * Force 8-byte alignment for structures allocated uncached.
+ *****************************************************************************/
+#define UNCACHED_ALIGN      RGXFW_ALIGN
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* RGX_COMMON_H */
+
+/******************************************************************************
+ End of file
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_compat_bvnc.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_compat_bvnc.h
new file mode 100644
index 0000000..f0e2177
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_compat_bvnc.h
@@ -0,0 +1,140 @@
+/*************************************************************************/ /*!
+@File           rgx_compat_bvnc.h
+@Title          BVNC compatibility check utilities
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Utility functions used for packing BNC and V.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (RGX_COMPAT_BVNC_H)
+#define RGX_COMPAT_BVNC_H
+
+#include "img_types.h"
+
+#if defined(RGX_FIRMWARE)               /* Services firmware */
+# include "rgxfw_utils.h"
+# define PVR_COMPAT_ASSERT RGXFW_ASSERT
+#elif !defined(RGX_BUILD_BINARY)        /* Services host driver code */
+# include "pvr_debug.h"
+# define PVR_COMPAT_ASSERT PVR_ASSERT
+#else                                   /* FW user-mode tools */
+# include <assert.h>
+# define PVR_COMPAT_ASSERT assert
+#endif
+
+/* 64bit endian conversion macros */
+#if defined(__BIG_ENDIAN__)
+#define RGX_INT64_TO_BE(N) (N)
+#define RGX_INT64_FROM_BE(N) (N)
+#define RGX_INT32_TO_BE(N) (N)
+#define RGX_INT32_FROM_BE(N) (N)
+#else
+#define RGX_INT64_TO_BE(N)        \
+	((((N) >> 56)   & 0xff)       \
+	 | (((N) >> 40) & 0xff00)     \
+	 | (((N) >> 24) & 0xff0000)   \
+	 | (((N) >> 8)  & 0xff000000U) \
+	 | ((N)                << 56) \
+	 | (((N) & 0xff00)     << 40) \
+	 | (((N) & 0xff0000)   << 24) \
+	 | (((N) & 0xff000000U) << 8))
+#define RGX_INT64_FROM_BE(N) RGX_INT64_TO_BE(N)
+
+#define RGX_INT32_TO_BE(N)   \
+	((((N) >> 24)  & 0xff)   \
+	 | (((N) >> 8) & 0xff00) \
+	 | ((N)           << 24) \
+	 | (((N & 0xff00) << 8)))
+#define RGX_INT32_FROM_BE(N) RGX_INT32_TO_BE(N)
+#endif
+
+/******************************************************************************
+ * RGX Version packed into 64-bit (BVNC) to be used by Compatibility Check
+ *****************************************************************************/
+
+#define RGX_BVNC_PACK_SHIFT_B 48
+#define RGX_BVNC_PACK_SHIFT_V 32
+#define RGX_BVNC_PACK_SHIFT_N 16
+#define RGX_BVNC_PACK_SHIFT_C 0
+
+#define RGX_BVNC_PACK_MASK_B (IMG_UINT64_C(0xFFFF000000000000))
+#define RGX_BVNC_PACK_MASK_V (IMG_UINT64_C(0x0000FFFF00000000))
+#define RGX_BVNC_PACK_MASK_N (IMG_UINT64_C(0x00000000FFFF0000))
+#define RGX_BVNC_PACK_MASK_C (IMG_UINT64_C(0x000000000000FFFF))
+
+#define RGX_BVNC_PACKED_EXTR_B(BVNC) ((IMG_UINT32)(((BVNC).ui64BVNC & RGX_BVNC_PACK_MASK_B) >> RGX_BVNC_PACK_SHIFT_B))
+#define RGX_BVNC_PACKED_EXTR_V(BVNC) ((IMG_UINT32)(((BVNC).ui64BVNC & RGX_BVNC_PACK_MASK_V) >> RGX_BVNC_PACK_SHIFT_V))
+#define RGX_BVNC_PACKED_EXTR_N(BVNC) ((IMG_UINT32)(((BVNC).ui64BVNC & RGX_BVNC_PACK_MASK_N) >> RGX_BVNC_PACK_SHIFT_N))
+#define RGX_BVNC_PACKED_EXTR_C(BVNC) ((IMG_UINT32)(((BVNC).ui64BVNC & RGX_BVNC_PACK_MASK_C) >> RGX_BVNC_PACK_SHIFT_C))
+
+#define RGX_BVNC_EQUAL(L,R,all,version,bvnc) do {															\
+										(bvnc) = IMG_FALSE;													\
+										(version) = ((L).ui32LayoutVersion == (R).ui32LayoutVersion);		\
+										if (version)														\
+										{																	\
+											(bvnc) = ((L).ui64BVNC == (R).ui64BVNC);						\
+										}																	\
+										(all) = (version) && (bvnc);										\
+									} while (0)
+
+
+/**************************************************************************//**
+ * Utility function for packing BVNC
+ *****************************************************************************/
+static inline IMG_UINT64 rgx_bvnc_pack(IMG_UINT32 ui32B, IMG_UINT32 ui32V, IMG_UINT32 ui32N, IMG_UINT32 ui32C)
+{
+	/*
+	 * Test for input B, V, N and C exceeding max bit width.
+	 */
+	PVR_COMPAT_ASSERT((ui32B & (~(RGX_BVNC_PACK_MASK_B >> RGX_BVNC_PACK_SHIFT_B))) == 0);
+	PVR_COMPAT_ASSERT((ui32V & (~(RGX_BVNC_PACK_MASK_V >> RGX_BVNC_PACK_SHIFT_V))) == 0);
+	PVR_COMPAT_ASSERT((ui32N & (~(RGX_BVNC_PACK_MASK_N >> RGX_BVNC_PACK_SHIFT_N))) == 0);
+	PVR_COMPAT_ASSERT((ui32C & (~(RGX_BVNC_PACK_MASK_C >> RGX_BVNC_PACK_SHIFT_C))) == 0);
+
+	return (((IMG_UINT64)ui32B << RGX_BVNC_PACK_SHIFT_B) |
+			((IMG_UINT64)ui32V << RGX_BVNC_PACK_SHIFT_V) |
+			((IMG_UINT64)ui32N << RGX_BVNC_PACK_SHIFT_N) |
+			((IMG_UINT64)ui32C << RGX_BVNC_PACK_SHIFT_C));
+}
+
+
+#endif /* RGX_COMPAT_BVNC_H */
+
+/******************************************************************************
+ End of file (rgx_compat_bvnc.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fw_info.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fw_info.h
new file mode 100644
index 0000000..6b0c986
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fw_info.h
@@ -0,0 +1,129 @@
+/*************************************************************************/ /*!
+@File
+@Title          FW image information
+
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Utility functions used internally for HWPerf data retrieval
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (RGX_FW_INFO_H)
+#define RGX_FW_INFO_H
+
+#include "img_types.h"
+#include "rgx_common.h"
+
+/*
+ * Firmware binary block unit in bytes.
+ * Raw data stored in FW binary will be aligned to this size.
+ */
+#define FW_BLOCK_SIZE 4096L
+
+typedef enum
+{
+	META_CODE = 0,
+	META_PRIVATE_DATA,
+	META_COREMEM_CODE,
+	META_COREMEM_DATA,
+	MIPS_CODE,
+	MIPS_EXCEPTIONS_CODE,
+	MIPS_BOOT_CODE,
+	MIPS_PRIVATE_DATA,
+	MIPS_BOOT_DATA,
+	MIPS_STACK,
+} RGX_FW_SECTION_ID;
+
+typedef enum
+{
+	NONE = 0,
+	FW_CODE,
+	FW_DATA,
+	FW_COREMEM_CODE,
+	FW_COREMEM_DATA
+} RGX_FW_SECTION_TYPE;
+
+
+/*
+ * FW binary format with FW info attached:
+ *
+ *          Contents        Offset
+ *     +-----------------+
+ *     |                 |    0
+ *     |                 |
+ *     | Original binary |
+ *     |      file       |
+ *     |   (.ldr/.elf)   |
+ *     |                 |
+ *     |                 |
+ *     +-----------------+
+ *     | FW info header  |  FILE_SIZE - 4K
+ *     +-----------------+
+ *     |                 |
+ *     | FW layout table |
+ *     |                 |
+ *     +-----------------+
+ *                          FILE_SIZE
+ */
+
+#define FW_INFO_VERSION  (1)
+
+typedef struct
+{
+	IMG_UINT32 ui32InfoVersion;      /* FW info version */
+	IMG_UINT32 ui32HeaderLen;        /* Header length */
+	IMG_UINT32 ui32LayoutEntryNum;   /* Number of entries in the layout table */
+	IMG_UINT32 ui32LayoutEntrySize;  /* Size of an entry in the layout table */
+	IMG_UINT64 RGXFW_ALIGN ui64BVNC; /* BVNC */
+	IMG_UINT32 ui32Flags;            /* Compatibility flags */
+} RGX_FW_INFO_HEADER;
+
+typedef struct
+{
+	RGX_FW_SECTION_ID eId;
+	RGX_FW_SECTION_TYPE eType;
+	IMG_UINT32 ui32BaseAddr;
+	IMG_UINT32 ui32MaxSize;
+	IMG_UINT32 ui32AllocSize;
+	IMG_UINT32 ui32AllocOffset;
+} RGX_FW_LAYOUT_ENTRY;
+
+#endif /*  RGX_FW_INFO_H */
+
+/******************************************************************************
+ End of file (rgx_fw_info.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fwif_alignchecks.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fwif_alignchecks.h
new file mode 100644
index 0000000..2dc6b56
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fwif_alignchecks.h
@@ -0,0 +1,173 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX fw interface alignment checks
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Checks to avoid disalignment in RGX fw data structures
+                shared with the host
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (RGX_FWIF_ALIGNCHECKS_H)
+#define RGX_FWIF_ALIGNCHECKS_H
+
+/* for the offsetof macro */
+#if defined(__KERNEL__) && defined(LINUX)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+/*!
+ ******************************************************************************
+ * Alignment UM/FW checks array
+ *****************************************************************************/
+
+#define RGXFW_ALIGN_CHECKS_UM_MAX 128U
+
+#define RGXFW_ALIGN_CHECKS_INIT0						\
+		sizeof(RGXFWIF_TRACEBUF),						\
+		offsetof(RGXFWIF_TRACEBUF, ui32LogType),		\
+		offsetof(RGXFWIF_TRACEBUF, sTraceBuf),			\
+		offsetof(RGXFWIF_TRACEBUF, aui32HwrDmLockedUpCount),	\
+		offsetof(RGXFWIF_TRACEBUF, aui32HwrDmOverranCount),	\
+		offsetof(RGXFWIF_TRACEBUF, aui32HwrDmRecoveredCount),	\
+		offsetof(RGXFWIF_TRACEBUF, aui32HwrDmFalseDetectCount),	\
+														\
+		/* RGXFWIF_CMDTA checks */						\
+		sizeof(RGXFWIF_CMDTA),							\
+		offsetof(RGXFWIF_CMDTA, sTARegs),				\
+														\
+		/* RGXFWIF_CMD3D checks */						\
+		sizeof(RGXFWIF_CMD3D),							\
+		offsetof(RGXFWIF_CMD3D, s3DRegs),				\
+														\
+		/* RGXFWIF_CMDTRANSFER checks */                \
+		sizeof(RGXFWIF_CMDTRANSFER),                    \
+		offsetof(RGXFWIF_CMDTRANSFER, sTransRegs),      \
+														\
+														\
+		/* RGXFWIF_CMD_COMPUTE checks */				\
+		sizeof(RGXFWIF_CMD_COMPUTE),					\
+		offsetof(RGXFWIF_CMD_COMPUTE, sCDMRegs),		\
+									\
+		sizeof(RGXFWIF_FREELIST), \
+		offsetof(RGXFWIF_FREELIST, psFreeListDevVAddr),\
+		offsetof(RGXFWIF_FREELIST, ui32MaxPages),\
+		offsetof(RGXFWIF_FREELIST, ui32CurrentPages),\
+		offsetof(RGXFWIF_FREELIST, ui32HWRCounter),\
+									\
+		sizeof(RGXFWIF_RENDER_TARGET),\
+		offsetof(RGXFWIF_RENDER_TARGET, psVHeapTableDevVAddr), \
+							\
+		sizeof(RGXFWIF_HWRTDATA), \
+		offsetof(RGXFWIF_HWRTDATA, psPMMListDevVAddr), \
+		offsetof(RGXFWIF_HWRTDATA, apsFreeLists),\
+		offsetof(RGXFWIF_HWRTDATA, ui64VCECatBase), \
+		offsetof(RGXFWIF_HWRTDATA, psParentRenderTarget), \
+		offsetof(RGXFWIF_HWRTDATA, eState), \
+		offsetof(RGXFWIF_HWRTDATA, ui32NumPartialRenders), \
+							\
+		sizeof(RGXFWIF_HWPERF_CTL_BLK), \
+		offsetof(RGXFWIF_HWPERF_CTL_BLK, aui64CounterCfg), \
+\
+		sizeof(RGXFWIF_HWPERF_CTL), \
+		offsetof(RGXFWIF_HWPERF_CTL, SelCntr)
+
+#if defined(RGX_FEATURE_TLA)
+#define RGXFW_ALIGN_CHECKS_INIT1                   \
+		RGXFW_ALIGN_CHECKS_INIT0,                  \
+		/* RGXFWIF_CMD2D checks */                 \
+		sizeof(RGXFWIF_CMD2D),                     \
+		offsetof(RGXFWIF_CMD2D, s2DRegs)
+#else
+#define RGXFW_ALIGN_CHECKS_INIT1		RGXFW_ALIGN_CHECKS_INIT0
+#endif	/* RGX_FEATURE_TLA */
+
+
+#if defined(RGX_FEATURE_FASTRENDER_DM)
+#define RGXFW_ALIGN_CHECKS_INIT                    \
+		RGXFW_ALIGN_CHECKS_INIT1,                  \
+		/* RGXFWIF_CMDTDM checks */                \
+		sizeof(RGXFWIF_CMDTDM),                    \
+		offsetof(RGXFWIF_CMDTDM, sTDMRegs)
+#else
+#define RGXFW_ALIGN_CHECKS_INIT		RGXFW_ALIGN_CHECKS_INIT1
+#endif /* ! RGX_FEATURE_FASTRENDER_DM */
+
+
+
+/*!
+ ******************************************************************************
+ * Alignment KM checks array
+ *****************************************************************************/
+
+#define RGXFW_ALIGN_CHECKS_INIT_KM                                           \
+		sizeof(RGXFWIF_INIT),                                        \
+		offsetof(RGXFWIF_INIT, sFaultPhysAddr),                      \
+		offsetof(RGXFWIF_INIT, sPDSExecBase),                        \
+		offsetof(RGXFWIF_INIT, sUSCExecBase),                        \
+		offsetof(RGXFWIF_INIT, psKernelCCBCtl),                      \
+		offsetof(RGXFWIF_INIT, psKernelCCB),                         \
+		offsetof(RGXFWIF_INIT, psFirmwareCCBCtl),                    \
+		offsetof(RGXFWIF_INIT, psFirmwareCCB),                       \
+		offsetof(RGXFWIF_INIT, asSigBufCtl),                         \
+		offsetof(RGXFWIF_INIT, sTraceBufCtl),                        \
+		offsetof(RGXFWIF_INIT, sRGXCompChecks),                      \
+		                                                             \
+		/* RGXFWIF_FWRENDERCONTEXT checks */                         \
+		sizeof(RGXFWIF_FWRENDERCONTEXT),                             \
+		offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext),               \
+		offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext),               \
+		                                                             \
+		sizeof(RGXFWIF_FWCOMMONCONTEXT),                             \
+		offsetof(RGXFWIF_FWCOMMONCONTEXT, psFWMemContext),           \
+		offsetof(RGXFWIF_FWCOMMONCONTEXT, sRunNode),                 \
+		offsetof(RGXFWIF_FWCOMMONCONTEXT, psCCB),                    \
+		                                                             \
+		sizeof(RGXFWIF_MMUCACHEDATA),                                \
+		offsetof(RGXFWIF_MMUCACHEDATA,sMemoryContext),               \
+		offsetof(RGXFWIF_MMUCACHEDATA,ui32Flags),                    \
+		offsetof(RGXFWIF_MMUCACHEDATA,sMMUCacheSync),                \
+		offsetof(RGXFWIF_MMUCACHEDATA,ui16MMUCacheSyncUpdateValue)
+
+
+#endif /*  RGX_FWIF_ALIGNCHECKS_H */
+
+/******************************************************************************
+ End of file (rgx_fwif_alignchecks.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fwif_hwperf.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fwif_hwperf.h
new file mode 100644
index 0000000..b3e1cd2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fwif_hwperf.h
@@ -0,0 +1,243 @@
+/*************************************************************************/ /*!
+@File           rgx_fwif_hwperf.h
+@Title          RGX HWPerf support
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Shared header between RGX firmware and Init process
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_FWIF_HWPERF_H
+#define RGX_FWIF_HWPERF_H
+
+#include "rgx_fwif_shared.h"
+#include "rgx_hwperf.h"
+#include "rgxdefs_km.h"
+
+
+/*****************************************************************************/
+
+/* Structure to hold a block's parameters for passing between the BG context
+ * and the IRQ context when applying a configuration request. */
+typedef struct
+{
+	IMG_BOOL                bValid;
+	IMG_BOOL                bEnabled;
+	IMG_UINT32              eBlockID;
+	IMG_UINT32              uiCounterMask;
+	IMG_UINT64  RGXFW_ALIGN aui64CounterCfg[RGX_CNTBLK_COUNTERS_MAX];
+}  RGXFWIF_HWPERF_CTL_BLK;
+
+/* Structure used to hold the configuration of the non-mux counters blocks */
+typedef struct
+{
+	IMG_UINT32            ui32NumSelectedCounters;
+	IMG_UINT32            aui32SelectedCountersIDs[RGX_HWPERF_MAX_CUSTOM_CNTRS];
+} RGXFW_HWPERF_SELECT;
+
+/* Structure to hold the whole configuration request details for all blocks
+ * The block masks and counts are used to optimise reading of this data. */
+typedef struct
+{
+	IMG_UINT32                         ui32HWPerfCtlFlags;
+
+	IMG_UINT32                         ui32SelectedCountersBlockMask;
+	RGXFW_HWPERF_SELECT RGXFW_ALIGN    SelCntr[RGX_HWPERF_MAX_CUSTOM_BLKS];
+
+	IMG_UINT32                         ui32EnabledBlksCount;
+	RGXFWIF_HWPERF_CTL_BLK RGXFW_ALIGN sBlkCfg[RGX_HWPERF_MAX_DEFINED_BLKS];
+} UNCACHED_ALIGN RGXFWIF_HWPERF_CTL;
+
+/* NOTE: The switch statement in this function must be kept in alignment with
+ * the enumeration RGX_HWPERF_CNTBLK_ID defined in rgx_hwperf.h. ASSERTs may
+ * result if not.
+ * The function provides a hash lookup to get a handle on the global store for
+ * a block's configuration store from it's block ID.
+ */
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(rgxfw_hwperf_get_block_ctl)
+#endif
+static INLINE RGXFWIF_HWPERF_CTL_BLK* rgxfw_hwperf_get_block_ctl(
+		RGX_HWPERF_CNTBLK_ID eBlockID, RGXFWIF_HWPERF_CTL *psHWPerfInitData)
+{
+	IMG_UINT32 ui32Idx;
+
+	/* Hash the block ID into a control configuration array index */
+	switch (eBlockID)
+	{
+		case RGX_CNTBLK_ID_TA:
+		case RGX_CNTBLK_ID_RASTER:
+		case RGX_CNTBLK_ID_HUB:
+		case RGX_CNTBLK_ID_TORNADO:
+		case RGX_CNTBLK_ID_JONES:
+		case RGX_CNTBLK_ID_BF:
+		case RGX_CNTBLK_ID_BT:
+		case RGX_CNTBLK_ID_RT:
+		case RGX_CNTBLK_ID_SH:
+		{
+			ui32Idx = eBlockID;
+			break;
+		}
+		case RGX_CNTBLK_ID_TPU_MCU0:
+		case RGX_CNTBLK_ID_TPU_MCU1:
+		case RGX_CNTBLK_ID_TPU_MCU2:
+		case RGX_CNTBLK_ID_TPU_MCU3:
+		case RGX_CNTBLK_ID_TPU_MCU4:
+		case RGX_CNTBLK_ID_TPU_MCU5:
+		case RGX_CNTBLK_ID_TPU_MCU6:
+		case RGX_CNTBLK_ID_TPU_MCU7:
+		{
+			ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+						(eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+			break;
+		}
+		case RGX_CNTBLK_ID_USC0:
+		case RGX_CNTBLK_ID_USC1:
+		case RGX_CNTBLK_ID_USC2:
+		case RGX_CNTBLK_ID_USC3:
+		case RGX_CNTBLK_ID_USC4:
+		case RGX_CNTBLK_ID_USC5:
+		case RGX_CNTBLK_ID_USC6:
+		case RGX_CNTBLK_ID_USC7:
+		case RGX_CNTBLK_ID_USC8:
+		case RGX_CNTBLK_ID_USC9:
+		case RGX_CNTBLK_ID_USC10:
+		case RGX_CNTBLK_ID_USC11:
+		case RGX_CNTBLK_ID_USC12:
+		case RGX_CNTBLK_ID_USC13:
+		case RGX_CNTBLK_ID_USC14:
+		case RGX_CNTBLK_ID_USC15:
+		{
+			ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+						RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+						(eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+			break;
+		}
+		case RGX_CNTBLK_ID_TEXAS0:
+		case RGX_CNTBLK_ID_TEXAS1:
+		case RGX_CNTBLK_ID_TEXAS2:
+		case RGX_CNTBLK_ID_TEXAS3:
+		case RGX_CNTBLK_ID_TEXAS4:
+		case RGX_CNTBLK_ID_TEXAS5:
+		case RGX_CNTBLK_ID_TEXAS6:
+		case RGX_CNTBLK_ID_TEXAS7:
+		{
+			ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+						RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+						RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+						(eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+			break;
+		}
+		case RGX_CNTBLK_ID_RASTER0:
+		case RGX_CNTBLK_ID_RASTER1:
+		case RGX_CNTBLK_ID_RASTER2:
+		case RGX_CNTBLK_ID_RASTER3:
+		{
+			ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+						RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+						RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+						RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) +
+						(eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+			break;
+		}
+		case RGX_CNTBLK_ID_BLACKPEARL0:
+		case RGX_CNTBLK_ID_BLACKPEARL1:
+		case RGX_CNTBLK_ID_BLACKPEARL2:
+		case RGX_CNTBLK_ID_BLACKPEARL3:
+		{
+			ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+						RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+						RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+						RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) +
+						RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) +
+						(eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+			break;
+		}
+		case RGX_CNTBLK_ID_PBE0:
+		case RGX_CNTBLK_ID_PBE1:
+		case RGX_CNTBLK_ID_PBE2:
+		case RGX_CNTBLK_ID_PBE3:
+		case RGX_CNTBLK_ID_PBE4:
+		case RGX_CNTBLK_ID_PBE5:
+		case RGX_CNTBLK_ID_PBE6:
+		case RGX_CNTBLK_ID_PBE7:
+		case RGX_CNTBLK_ID_PBE8:
+		case RGX_CNTBLK_ID_PBE9:
+		case RGX_CNTBLK_ID_PBE10:
+		case RGX_CNTBLK_ID_PBE11:
+		case RGX_CNTBLK_ID_PBE12:
+		case RGX_CNTBLK_ID_PBE13:
+		case RGX_CNTBLK_ID_PBE14:
+		case RGX_CNTBLK_ID_PBE15:
+		{
+			ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+						RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+						RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+						RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) +
+						RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) +
+						RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3) +
+						(eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+			break;
+		}
+		case RGX_CNTBLK_ID_BX_TU0:
+		case RGX_CNTBLK_ID_BX_TU1:
+		case RGX_CNTBLK_ID_BX_TU2:
+		case RGX_CNTBLK_ID_BX_TU3:
+		{
+			ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+						RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+						RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+						RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) +
+						RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) +
+						RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3) +
+						RGX_CNTBLK_INDIRECT_COUNT(PBE, 15) +
+						(eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+			break;
+		}
+		default:
+		{
+			ui32Idx = RGX_HWPERF_MAX_DEFINED_BLKS;
+			break;
+		}
+	}
+	if (ui32Idx >= RGX_HWPERF_MAX_DEFINED_BLKS)
+	{
+		return NULL;
+	}
+	return &psHWPerfInitData->sBlkCfg[ui32Idx];
+}
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fwif_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fwif_km.h
new file mode 100644
index 0000000..bff1bdb
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fwif_km.h
@@ -0,0 +1,1952 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX firmware interface structures used by pvrsrvkm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX firmware interface structures used by pvrsrvkm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_FWIF_KM_H)
+#define RGX_FWIF_KM_H
+
+#include "img_types.h"
+#include "rgx_fwif_shared.h"
+#include "rgxdefs_km.h"
+#include "dllist.h"
+#include "rgx_hwperf.h"
+
+
+/*************************************************************************/ /*!
+ Logging type
+*/ /**************************************************************************/
+#define RGXFWIF_LOG_TYPE_NONE			0x00000000U
+#define RGXFWIF_LOG_TYPE_TRACE			0x00000001U
+#define RGXFWIF_LOG_TYPE_GROUP_MAIN		0x00000002U
+#define RGXFWIF_LOG_TYPE_GROUP_MTS		0x00000004U
+#define RGXFWIF_LOG_TYPE_GROUP_CLEANUP	0x00000008U
+#define RGXFWIF_LOG_TYPE_GROUP_CSW		0x00000010U
+#define RGXFWIF_LOG_TYPE_GROUP_BIF		0x00000020U
+#define RGXFWIF_LOG_TYPE_GROUP_PM		0x00000040U
+#define RGXFWIF_LOG_TYPE_GROUP_RTD		0x00000080U
+#define RGXFWIF_LOG_TYPE_GROUP_SPM		0x00000100U
+#define RGXFWIF_LOG_TYPE_GROUP_POW		0x00000200U
+#define RGXFWIF_LOG_TYPE_GROUP_HWR		0x00000400U
+#define RGXFWIF_LOG_TYPE_GROUP_HWP		0x00000800U
+#define RGXFWIF_LOG_TYPE_GROUP_RPM		0x00001000U
+#define RGXFWIF_LOG_TYPE_GROUP_DMA		0x00002000U
+#define RGXFWIF_LOG_TYPE_GROUP_MISC		0x00004000U
+#define RGXFWIF_LOG_TYPE_GROUP_DEBUG	0x80000000U
+#define RGXFWIF_LOG_TYPE_GROUP_MASK		0x80007FFEU
+#define RGXFWIF_LOG_TYPE_MASK			0x80007FFFU
+
+/* String used in pvrdebug -h output */
+#define RGXFWIF_LOG_GROUPS_STRING_LIST   "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,debug"
+
+/* Table entry to map log group strings to log type value */
+typedef struct {
+	const IMG_CHAR* pszLogGroupName;
+	IMG_UINT32      ui32LogGroupType;
+} RGXFWIF_LOG_GROUP_MAP_ENTRY;
+
+/*
+  Macro for use with the RGXFWIF_LOG_GROUP_MAP_ENTRY type to create a lookup
+  table where needed. Keep log group names short, no more than 20 chars.
+*/
+#define RGXFWIF_LOG_GROUP_NAME_VALUE_MAP { "none",    RGXFWIF_LOG_TYPE_NONE }, \
+                                         { "main",    RGXFWIF_LOG_TYPE_GROUP_MAIN }, \
+                                         { "mts",     RGXFWIF_LOG_TYPE_GROUP_MTS }, \
+                                         { "cleanup", RGXFWIF_LOG_TYPE_GROUP_CLEANUP }, \
+                                         { "csw",     RGXFWIF_LOG_TYPE_GROUP_CSW }, \
+                                         { "bif",     RGXFWIF_LOG_TYPE_GROUP_BIF }, \
+                                         { "pm",      RGXFWIF_LOG_TYPE_GROUP_PM }, \
+                                         { "rtd",     RGXFWIF_LOG_TYPE_GROUP_RTD }, \
+                                         { "spm",     RGXFWIF_LOG_TYPE_GROUP_SPM }, \
+                                         { "pow",     RGXFWIF_LOG_TYPE_GROUP_POW }, \
+                                         { "hwr",     RGXFWIF_LOG_TYPE_GROUP_HWR }, \
+                                         { "hwp",     RGXFWIF_LOG_TYPE_GROUP_HWP }, \
+                                         { "rpm",     RGXFWIF_LOG_TYPE_GROUP_RPM }, \
+                                         { "dma",     RGXFWIF_LOG_TYPE_GROUP_DMA }, \
+                                         { "misc",    RGXFWIF_LOG_TYPE_GROUP_MISC }, \
+                                         { "debug",   RGXFWIF_LOG_TYPE_GROUP_DEBUG }
+
+
+/* Used in print statements to display log group state, one %s per group defined */
+#define RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC  "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
+
+/* Used in a print statement to display log group state, one per group */
+#define RGXFWIF_LOG_ENABLED_GROUPS_LIST(types)  (((types) & RGXFWIF_LOG_TYPE_GROUP_MAIN)	?("main ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_MTS)		?("mts ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_CLEANUP)	?("cleanup ")	:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_CSW)		?("csw ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_BIF)		?("bif ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_PM)		?("pm ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_RTD)		?("rtd ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_SPM)		?("spm ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_POW)		?("pow ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_HWR)		?("hwr ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_HWP)		?("hwp ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_RPM)		?("rpm ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_DMA)		?("dma ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_MISC)	?("misc ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_DEBUG)	?("debug ")		:(""))
+
+
+/************************************************************************
+* RGX FW signature checks
+************************************************************************/
+#define RGXFW_SIG_BUFFER_SIZE_MIN       (8192)
+
+/*!
+ ******************************************************************************
+ * Trace Buffer
+ *****************************************************************************/
+
+/*! Default size of RGXFWIF_TRACEBUF_SPACE in DWords */
+#define RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U
+#define RGXFW_TRACE_BUFFER_ASSERT_SIZE 200U
+#if defined(RGXFW_META_SUPPORT_2ND_THREAD)
+#define RGXFW_THREAD_NUM 2U
+#else
+#define RGXFW_THREAD_NUM 1U
+#endif
+
+#define RGXFW_POLL_TYPE_SET 0x80000000U
+
+typedef struct
+{
+	IMG_CHAR	szPath[RGXFW_TRACE_BUFFER_ASSERT_SIZE];
+	IMG_CHAR	szInfo[RGXFW_TRACE_BUFFER_ASSERT_SIZE];
+	IMG_UINT32	ui32LineNum;
+} UNCACHED_ALIGN RGXFWIF_FILE_INFO_BUF;
+
+typedef struct
+{
+	IMG_UINT32			ui32TracePointer;
+
+#if defined(RGX_FIRMWARE)
+	IMG_UINT32 *pui32RGXFWIfTraceBuffer;		/* To be used by firmware for writing into trace buffer */
+#else
+	RGXFWIF_DEV_VIRTADDR pui32RGXFWIfTraceBuffer;
+#endif
+	IMG_PUINT32             pui32TraceBuffer;	/* To be used by host when reading from trace buffer */
+
+	RGXFWIF_FILE_INFO_BUF	sAssertBuf;
+} UNCACHED_ALIGN RGXFWIF_TRACEBUF_SPACE;
+
+
+#define RGXFWIF_FWFAULTINFO_MAX 	(8U)			/* Total number of FW fault logs stored */
+
+typedef struct
+{
+	IMG_UINT64 RGXFW_ALIGN	ui64CRTimer;
+	IMG_UINT64 RGXFW_ALIGN	ui64OSTimer;
+	IMG_UINT32 RGXFW_ALIGN	ui32Data;
+	IMG_UINT32 ui32Reserved;
+	RGXFWIF_FILE_INFO_BUF	sFaultBuf;
+} UNCACHED_ALIGN RGX_FWFAULTINFO;
+
+
+#define RGXFWIF_POW_STATES \
+  X(RGXFWIF_POW_OFF)			/* idle and handshaked with the host (ready to full power down) */ \
+  X(RGXFWIF_POW_ON)				/* running HW commands */ \
+  X(RGXFWIF_POW_FORCED_IDLE)	/* forced idle */ \
+  X(RGXFWIF_POW_IDLE)			/* idle waiting for host handshake */
+
+typedef enum
+{
+#define X(NAME) NAME,
+	RGXFWIF_POW_STATES
+#undef X
+} RGXFWIF_POW_STATE;
+
+/* Firmware HWR states */
+#define RGXFWIF_HWR_HARDWARE_OK			(0x1U << 0U)	/*!< The HW state is ok or locked up */
+#define RGXFWIF_HWR_ANALYSIS_DONE		(0x1U << 2U)	/*!< The analysis of a GPU lockup has been performed */
+#define RGXFWIF_HWR_GENERAL_LOCKUP		(0x1U << 3U)	/*!< A DM unrelated lockup has been detected */
+#define RGXFWIF_HWR_DM_RUNNING_OK		(0x1U << 4U)	/*!< At least one DM is running without being close to a lockup */
+#define RGXFWIF_HWR_DM_STALLING			(0x1U << 5U)	/*!< At least one DM is close to lockup */
+#define RGXFWIF_HWR_FW_FAULT			(0x1U << 6U)	/*!< The FW has faulted and needs to restart */
+#define RGXFWIF_HWR_RESTART_REQUESTED	(0x1U << 7U)	/*!< The FW has requested the host to restart it */
+typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS;
+
+/* Firmware per-DM HWR states */
+#define RGXFWIF_DM_STATE_WORKING 					(0x00U)		/*!< DM is working if all flags are cleared */
+#define RGXFWIF_DM_STATE_READY_FOR_HWR 				(IMG_UINT32_C(0x1) << 0)	/*!< DM is idle and ready for HWR */
+#define RGXFWIF_DM_STATE_NEEDS_SKIP					(IMG_UINT32_C(0x1) << 2)	/*!< DM need to skip to next cmd before resuming processing */
+#define RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP			(IMG_UINT32_C(0x1) << 3)	/*!< DM need partial render cleanup before resuming processing */
+#define RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR			(IMG_UINT32_C(0x1) << 4)	/*!< DM need to increment Recovery Count once fully recovered */
+#define RGXFWIF_DM_STATE_GUILTY_LOCKUP				(IMG_UINT32_C(0x1) << 5)	/*!< DM was identified as locking up and causing HWR */
+#define RGXFWIF_DM_STATE_INNOCENT_LOCKUP			(IMG_UINT32_C(0x1) << 6)	/*!< DM was innocently affected by another lockup which caused HWR */
+#define RGXFWIF_DM_STATE_GUILTY_OVERRUNING			(IMG_UINT32_C(0x1) << 7)	/*!< DM was identified as over-running and causing HWR */
+#define RGXFWIF_DM_STATE_INNOCENT_OVERRUNING		(IMG_UINT32_C(0x1) << 8)	/*!< DM was innocently affected by another DM over-running which caused HWR */
+#define RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH		(IMG_UINT32_C(0x1) << 9)	/*!< DM was forced into HWR as it delayed more important workloads */
+
+/* Per-OS Fw States */
+typedef enum
+{
+	RGXFW_OS_STATE_STOPPED,		/*!< OS in this state is ignored by the FW */
+	RGXFW_OS_STATE_READY,		/*!< OS is allowed to run by the Host/Hypervisor but is uninitialised */
+	RGXFW_OS_STATE_ACTIVE,		/*!< OS is fully up and running */
+	RGXFW_OS_STATE_OFFLOADING	/*!< OS is in a transitory state, finishing its tasks before stopping */
+} RGXFWIF_OS_STATE;
+
+typedef struct
+{
+	IMG_UINT			bfOsState		: 3;
+	IMG_UINT			bfFLOk			: 1;
+	IMG_UINT			bfFLGrowPending	: 1;
+	IMG_UINT			bfIsolatedOS	: 1;
+	IMG_UINT			bfReserved		: 26;
+} RGXFWIF_PER_OS_STATES;
+
+typedef IMG_UINT32 RGXFWIF_HWR_RECOVERYFLAGS;
+
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+#define RGXFWIF_TRACEBUFCFG_SLR_LOG (0x1U << 0)
+#define PVR_SLR_LOG_ENTRIES 10
+#define PVR_SLR_LOG_STRLEN  30 // MAX_CLIENT_CCB_NAME not visible to this header
+
+typedef struct
+{
+	IMG_UINT64 RGXFW_ALIGN	ui64Timestamp;
+	IMG_UINT32				ui32FWCtxAddr;
+	IMG_UINT32				ui32NumUFOs;
+	IMG_CHAR				aszCCBName[PVR_SLR_LOG_STRLEN];
+} UNCACHED_ALIGN RGXFWIF_SLR_ENTRY;
+#endif
+
+typedef struct
+{
+	IMG_UINT32				ui32LogType;
+	volatile RGXFWIF_POW_STATE		ePowState;
+	RGXFWIF_TRACEBUF_SPACE	sTraceBuf[RGXFW_THREAD_NUM];
+	IMG_UINT32              ui32TraceBufSizeInDWords; /* Member initialised only when sTraceBuf is actually allocated
+	                                                   * (in RGXTraceBufferInitOnDemandResources) */
+
+	IMG_UINT32				aui32HwrDmLockedUpCount[RGXFWIF_DM_DEFAULT_MAX];
+	IMG_UINT32				aui32HwrDmOverranCount[RGXFWIF_DM_DEFAULT_MAX];
+	IMG_UINT32				aui32HwrDmRecoveredCount[RGXFWIF_DM_DEFAULT_MAX];
+	IMG_UINT32				aui32HwrDmFalseDetectCount[RGXFWIF_DM_DEFAULT_MAX];
+	IMG_UINT32				ui32HwrCounter;
+
+	IMG_UINT32				aui32CrPollAddr[RGXFW_THREAD_NUM];
+	IMG_UINT32				aui32CrPollMask[RGXFW_THREAD_NUM];
+
+	RGXFWIF_HWR_STATEFLAGS		ui32HWRStateFlags;
+	RGXFWIF_HWR_RECOVERYFLAGS	aui32HWRRecoveryFlags[RGXFWIF_DM_DEFAULT_MAX];
+
+	volatile IMG_UINT32		ui32HWPerfRIdx;
+	volatile IMG_UINT32		ui32HWPerfWIdx;
+	volatile IMG_UINT32		ui32HWPerfWrapCount;
+	IMG_UINT32				ui32HWPerfSize;       /* Constant after setup, needed in FW */
+	IMG_UINT32				ui32HWPerfDropCount;  /* The number of times the FW drops a packet due to buffer full */
+
+	/* These next three items are only valid at runtime when the FW is built
+	 * with RGX_HWPERF_UTILIZATION & RGX_HWPERF_DROP_TRACKING defined
+	 * in rgxfw_hwperf.c */
+	IMG_UINT32				ui32HWPerfUt;         /* Buffer utilisation, high watermark of bytes in use */
+	IMG_UINT32				ui32FirstDropOrdinal; /* The ordinal of the first packet the FW dropped */
+	IMG_UINT32				ui32LastDropOrdinal;  /* The ordinal of the last packet the FW dropped */
+#if !defined(RGX_FW_IRQ_OS_COUNTERS)
+	volatile IMG_UINT32			aui32InterruptCount[RGXFW_THREAD_NUM]; /*!< Interrupt count from Threads > */
+#endif
+	IMG_UINT32				ui32KCCBCmdsExecuted;
+	IMG_UINT64 RGXFW_ALIGN			ui64StartIdleTime;
+	IMG_UINT32				ui32PowMonEstimate;	/* Non-volatile power monitoring results:
+													   static power (by default)
+													   energy count (PVR_POWER_MONITOR_DYNAMIC_ENERGY) */
+#define RGXFWIF_MAX_PCX 16U
+	IMG_UINT32				ui32T1PCX[RGXFWIF_MAX_PCX];
+	IMG_UINT32				ui32T1PCXWOff;
+
+	RGXFWIF_PER_OS_STATES	sPerOsStateMirror[RGXFW_NUM_OS];	/*!< State flags for each Operating System mirrored from Fw coremem> */
+
+	IMG_UINT32				ui32MMUFlushCounter;
+
+	RGX_FWFAULTINFO			sFaultInfo[RGXFWIF_FWFAULTINFO_MAX];
+	IMG_UINT32				ui32FWFaults;
+
+	/* Markers to signal that the host should perform a full sync check. */
+	IMG_UINT32				ui32FWSyncCheckMark;
+	IMG_UINT32				ui32HostSyncCheckMark;
+
+#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK)
+#define RGXFWIF_STATS_FRAMEWORK_LINESIZE	(8)
+#define RGXFWIF_STATS_FRAMEWORK_MAX			(2048*RGXFWIF_STATS_FRAMEWORK_LINESIZE)
+	IMG_UINT32 RGXFW_ALIGN	aui32FWStatsBuf[RGXFWIF_STATS_FRAMEWORK_MAX];
+#endif
+
+	IMG_UINT32              ui32TracebufFlags; /*!< Compatibility and other flags */
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+	IMG_UINT32				ui32ForcedUpdatesRequested;
+	IMG_UINT8				ui8SLRLogWp;
+	RGXFWIF_SLR_ENTRY		sSLRLogFirst;
+	RGXFWIF_SLR_ENTRY		sSLRLog[PVR_SLR_LOG_ENTRIES];
+	IMG_UINT64 RGXFW_ALIGN	ui64LastForcedUpdateTime;
+#endif
+} UNCACHED_ALIGN RGXFWIF_TRACEBUF;
+
+
+/*!
+ ******************************************************************************
+ * HWR Data
+ *****************************************************************************/
+typedef enum
+{
+	RGX_HWRTYPE_UNKNOWNFAILURE = 0,
+	RGX_HWRTYPE_OVERRUN        = 1,
+	RGX_HWRTYPE_POLLFAILURE    = 2,
+	RGX_HWRTYPE_BIF0FAULT      = 3,
+	RGX_HWRTYPE_BIF1FAULT      = 4,
+	RGX_HWRTYPE_TEXASBIF0FAULT = 5,
+	RGX_HWRTYPE_MMUFAULT       = 6,
+	RGX_HWRTYPE_MMUMETAFAULT   = 7,
+} RGX_HWRTYPE;
+
+#define RGXFWIF_HWRTYPE_BIF_BANK_GET(eHWRType) ((eHWRType == RGX_HWRTYPE_BIF0FAULT) ? 0 : 1)
+
+#define RGXFWIF_HWRTYPE_PAGE_FAULT_GET(eHWRType) ((eHWRType == RGX_HWRTYPE_BIF0FAULT      ||       \
+                                                   eHWRType == RGX_HWRTYPE_BIF1FAULT      ||       \
+                                                   eHWRType == RGX_HWRTYPE_TEXASBIF0FAULT ||       \
+                                                   eHWRType == RGX_HWRTYPE_MMUFAULT       ||       \
+                                                   eHWRType == RGX_HWRTYPE_MMUMETAFAULT) ? 1 : 0)
+
+typedef struct
+{
+	IMG_UINT64	RGXFW_ALIGN		ui64BIFReqStatus;
+	IMG_UINT64	RGXFW_ALIGN		ui64BIFMMUStatus;
+	IMG_UINT64	RGXFW_ALIGN		ui64PCAddress; /*!< phys address of the page catalogue */
+	IMG_UINT64	RGXFW_ALIGN		ui64Reserved;
+} RGX_BIFINFO;
+
+typedef struct
+{
+	IMG_UINT64	RGXFW_ALIGN		ui64MMUStatus;
+	IMG_UINT64	RGXFW_ALIGN		ui64PCAddress; /*!< phys address of the page catalogue */
+	IMG_UINT64	RGXFW_ALIGN		ui64Reserved;
+} RGX_MMUINFO;
+
+typedef struct
+{
+	IMG_UINT32	ui32ThreadNum;
+	IMG_UINT32 	ui32CrPollAddr;
+	IMG_UINT32 	ui32CrPollMask;
+	IMG_UINT32 	ui32CrPollLastValue;
+	IMG_UINT64 	RGXFW_ALIGN ui64Reserved;
+} UNCACHED_ALIGN RGX_POLLINFO;
+
+typedef struct
+{
+	union
+	{
+		RGX_BIFINFO  sBIFInfo;
+		RGX_MMUINFO  sMMUInfo;
+		RGX_POLLINFO sPollInfo;
+	} uHWRData;
+
+	IMG_UINT64 RGXFW_ALIGN ui64CRTimer;
+	IMG_UINT64 RGXFW_ALIGN ui64OSTimer;
+	IMG_UINT32             ui32FrameNum;
+	IMG_UINT32             ui32PID;
+	IMG_UINT32             ui32ActiveHWRTData;
+	IMG_UINT32             ui32HWRNumber;
+	IMG_UINT32             ui32EventStatus;
+	IMG_UINT32             ui32HWRRecoveryFlags;
+	RGX_HWRTYPE            eHWRType;
+	RGXFWIF_DM             eDM;
+	IMG_UINT64 RGXFW_ALIGN ui64CRTimeOfKick;
+	IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetStart;
+	IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetFinish;
+	IMG_UINT64 RGXFW_ALIGN ui64CRTimeFreelistReady;
+	IMG_UINT64 RGXFW_ALIGN ui64Reserved[2];
+} UNCACHED_ALIGN RGX_HWRINFO;
+
+#define RGXFWIF_HWINFO_MAX_FIRST 8U							/* Number of first HWR logs recorded (never overwritten by newer logs) */
+#define RGXFWIF_HWINFO_MAX_LAST 8U							/* Number of latest HWR logs (older logs are overwritten by newer logs) */
+#define RGXFWIF_HWINFO_MAX (RGXFWIF_HWINFO_MAX_FIRST + RGXFWIF_HWINFO_MAX_LAST)	/* Total number of HWR logs stored in a buffer */
+#define RGXFWIF_HWINFO_LAST_INDEX (RGXFWIF_HWINFO_MAX - 1U)	/* Index of the last log in the HWR log buffer */
+typedef struct
+{
+	RGX_HWRINFO sHWRInfo[RGXFWIF_HWINFO_MAX];
+
+	IMG_UINT32	ui32FirstCrPollAddr[RGXFW_THREAD_NUM];
+	IMG_UINT32	ui32FirstCrPollMask[RGXFW_THREAD_NUM];
+	IMG_UINT32	ui32FirstCrPollLastValue[RGXFW_THREAD_NUM];
+	IMG_UINT32	ui32WriteIndex;
+	IMG_UINT32	ui32DDReqCount;
+	IMG_UINT32	ui32HWRInfoBufFlags; /* Compatibility and other flags */
+} UNCACHED_ALIGN RGXFWIF_HWRINFOBUF;
+
+typedef enum
+{
+	RGX_ACTIVEPM_FORCE_OFF = 0,
+	RGX_ACTIVEPM_FORCE_ON = 1,
+	RGX_ACTIVEPM_DEFAULT = 2
+} RGX_ACTIVEPM_CONF;
+
+typedef enum
+{
+	RGX_RD_POWER_ISLAND_FORCE_OFF = 0,
+	RGX_RD_POWER_ISLAND_FORCE_ON = 1,
+	RGX_RD_POWER_ISLAND_DEFAULT = 2
+} RGX_RD_POWER_ISLAND_CONF;
+
+typedef enum
+{
+	RGX_META_T1_OFF   = 0x0,           /*!< No thread 1 running (unless 2nd thread is used for HWPerf) */
+	RGX_META_T1_MAIN  = 0x1,           /*!< Run the main thread 0 code on thread 1 (and vice versa if 2nd thread is used for HWPerf) */
+	RGX_META_T1_DUMMY = 0x2            /*!< Run dummy test code on thread 1 */
+} RGX_META_T1_CONF;
+
+/*!
+ ******************************************************************************
+ * Querying DM state
+ *****************************************************************************/
+
+typedef enum
+{
+	RGXFWIF_DM_STATE_NORMAL			= 0,
+	RGXFWIF_DM_STATE_LOCKEDUP		= 1
+} RGXFWIF_DM_STATE;
+
+typedef struct
+{
+	IMG_UINT16 ui16RegNum;				/*!< Register number */
+	IMG_UINT16 ui16IndirectRegNum;		/*!< Indirect register number (or 0 if not used) */
+	IMG_UINT16 ui16IndirectStartVal;	/*!< Start value for indirect register */
+	IMG_UINT16 ui16IndirectEndVal;		/*!< End value for indirect register */
+} RGXFW_REGISTER_LIST;
+
+
+#define RGXFWIF_CTXSWITCH_PROFILE_FAST_EN		(1U)
+#define RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN		(2U)
+#define RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN		(3U)
+#define RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN	(4U)
+
+/*!
+ ******************************************************************************
+ * RGX firmware Init Config Data
+ *****************************************************************************/
+#define RGXFWIF_INICFG_CTXSWITCH_TA_EN				(IMG_UINT32_C(0x1) << 0)
+#define RGXFWIF_INICFG_CTXSWITCH_3D_EN				(IMG_UINT32_C(0x1) << 1)
+#define RGXFWIF_INICFG_CTXSWITCH_CDM_EN				(IMG_UINT32_C(0x1) << 2)
+#define RGXFWIF_INICFG_CTXSWITCH_MODE_RAND			(IMG_UINT32_C(0x1) << 3)
+#define RGXFWIF_INICFG_CTXSWITCH_SRESET_EN			(IMG_UINT32_C(0x1) << 4)
+#define RGXFWIF_INICFG_POW_RASCALDUST				(IMG_UINT32_C(0x1) << 5)
+#define RGXFWIF_INICFG_HWPERF_EN					(IMG_UINT32_C(0x1) << 6)
+#define RGXFWIF_INICFG_HWR_EN						(IMG_UINT32_C(0x1) << 7)
+#define RGXFWIF_INICFG_CHECK_MLIST_EN				(IMG_UINT32_C(0x1) << 8)
+#define RGXFWIF_INICFG_DISABLE_CLKGATING_EN 		(IMG_UINT32_C(0x1) << 9)
+#define RGXFWIF_INICFG_POLL_COUNTERS_EN				(IMG_UINT32_C(0x1) << 10)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_SHIFT		(11)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX		(RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX << RGXFWIF_INICFG_VDM_CTX_STORE_MODE_SHIFT)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INSTANCE	(RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE << RGXFWIF_INICFG_VDM_CTX_STORE_MODE_SHIFT)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_LIST		(RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST << RGXFWIF_INICFG_VDM_CTX_STORE_MODE_SHIFT)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_MASK		(RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX |\
+                                                     RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INSTANCE |\
+                                                     RGXFWIF_INICFG_VDM_CTX_STORE_MODE_LIST)
+#define RGXFWIF_INICFG_REGCONFIG_EN					(IMG_UINT32_C(0x1) << 13)
+#define RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY		(IMG_UINT32_C(0x1) << 14)
+#define RGXFWIF_INICFG_HWP_DISABLE_FILTER			(IMG_UINT32_C(0x1) << 15)
+#define RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN			(IMG_UINT32_C(0x1) << 16)
+#define RGXFWIF_INICFG_CDM_KILL_MODE_RAND_EN		(IMG_UINT32_C(0x1) << 17)
+#define RGXFWIF_INICFG_DISABLE_DM_OVERLAP			(IMG_UINT32_C(0x1) << 18)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT		(19)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST		(RGXFWIF_CTXSWITCH_PROFILE_FAST_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM		(RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW		(RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY	(RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK		(IMG_UINT32_C(0x7) << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_METAT1_SHIFT					(22)
+#define RGXFWIF_INICFG_METAT1_MAIN					((IMG_UINT32)RGX_META_T1_MAIN  << RGXFWIF_INICFG_METAT1_SHIFT)
+#define RGXFWIF_INICFG_METAT1_DUMMY					((IMG_UINT32)RGX_META_T1_DUMMY << RGXFWIF_INICFG_METAT1_SHIFT)
+#define RGXFWIF_INICFG_METAT1_ENABLED				(RGXFWIF_INICFG_METAT1_MAIN | RGXFWIF_INICFG_METAT1_DUMMY)
+#define RGXFWIF_INICFG_METAT1_MASK					(RGXFWIF_INICFG_METAT1_ENABLED >> RGXFWIF_INICFG_METAT1_SHIFT)
+#define RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER		(IMG_UINT32_C(0x1) << 24)
+#define RGXFWIF_INICFG_WORKEST_V1					(IMG_UINT32_C(0x1) << 25)
+#define RGXFWIF_INICFG_WORKEST_V2					(IMG_UINT32_C(0x1) << 26)
+#define RGXFWIF_INICFG_PDVFS_V1						(IMG_UINT32_C(0x1) << 27)
+#define RGXFWIF_INICFG_PDVFS_V2						(IMG_UINT32_C(0x1) << 28)
+#define RGXFWIF_INICFG_DISABLE_PDP_EN				(IMG_UINT32_C(0x1) << 29)
+#define RGXFWIF_INICFG_ALL							(0x3FFFFFFFU)
+
+#define RGXFWIF_INICFG_EXT_LOW_PRIO_CS_TDM          (0x1U <<  0)
+#define RGXFWIF_INICFG_EXT_LOW_PRIO_CS_TA           (0x1U <<  1)
+#define RGXFWIF_INICFG_EXT_LOW_PRIO_CS_3D           (0x1U <<  2)
+#define RGXFWIF_INICFG_EXT_LOW_PRIO_CS_CDM          (0x1U <<  3)
+#define RGXFWIF_INICFG_EXT_VALIDATE_IRQ             (0x1U <<  4)
+#define RGXFWIF_INICFG_EXT_LOW_PRIO_CS_MASK         (RGXFWIF_INICFG_EXT_LOW_PRIO_CS_TDM |\
+                                                     RGXFWIF_INICFG_EXT_LOW_PRIO_CS_TA  |\
+                                                     RGXFWIF_INICFG_EXT_LOW_PRIO_CS_3D  |\
+                                                     RGXFWIF_INICFG_EXT_LOW_PRIO_CS_CDM |\
+                                                     RGXFWIF_INICFG_EXT_VALIDATE_IRQ)
+#define RGXFWIF_INICFG_EXT_FBCDC_V3_1_EN			(0x1U <<  5)
+#define RGXFWIF_INICFG_EXT_PDVFS_HOST_REACTIVE_TIMER (0x1U << 6)
+
+#define RGXFWIF_FILTCFG_TRUNCATE_HALF		(0x1U << 3)
+#define RGXFWIF_FILTCFG_TRUNCATE_INT		(0x1U << 2)
+#define RGXFWIF_FILTCFG_NEW_FILTER_MODE		(0x1U << 1)
+
+#define RGXFWIF_INICFG_CTXSWITCH_DM_ALL		(RGXFWIF_INICFG_CTXSWITCH_TA_EN | \
+											 RGXFWIF_INICFG_CTXSWITCH_3D_EN | \
+											 RGXFWIF_INICFG_CTXSWITCH_CDM_EN)
+
+#define RGXFWIF_INICFG_CTXSWITCH_CLRMSK		~(RGXFWIF_INICFG_CTXSWITCH_DM_ALL | \
+											 RGXFWIF_INICFG_CTXSWITCH_MODE_RAND | \
+											 RGXFWIF_INICFG_CTXSWITCH_SRESET_EN)
+
+#if defined(RGX_FW_IRQ_OS_COUNTERS)
+/* Unused registers re-purposed for storing counters of the Firmware's
+ * interrupts for each OS
+ */
+#define IRQ_COUNTER_STORAGE_REGS                        \
+		0x2028U, /* RGX_CR_PM_TA_MMU_FSTACK         */  \
+		0x2050U, /* RGX_CR_PM_3D_MMU_FSTACK         */  \
+		0x2030U, /* RGX_CR_PM_START_OF_MMU_TACONTEXT*/  \
+		0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/  \
+		0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/  \
+		0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/  \
+		0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/  \
+		0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/
+#endif
+
+#if defined(RGX_FIRMWARE)
+typedef DLLIST_NODE							RGXFWIF_DLLIST_NODE;
+#else
+typedef struct {RGXFWIF_DEV_VIRTADDR p;
+                RGXFWIF_DEV_VIRTADDR n;}	RGXFWIF_DLLIST_NODE;
+#endif
+
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_SIGBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_TRACEBUF;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_TBIBUF;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_HWPERFBUF;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_HWRINFOBUF;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_RUNTIME_CFG;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_GPU_UTIL_FWCB;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_REG_CFG;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_HWPERF_CTL;
+typedef RGXFWIF_DEV_VIRTADDR  PRGX_HWPERF_CONFIG_CNTBLK;
+typedef RGXFWIF_DEV_VIRTADDR  PRGX_HWPERF_SELECT_CUSTOM_CNTRS;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_CCB_CTL;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_CCB;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_FWMEMCONTEXT;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_FWCOMMONCONTEXT;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_ZSBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_COMMONCTX_STATE;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_RF_CMD;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_CORE_CLK_RATE;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_OS_CONFIG;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_COUNTERBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_FIRMWAREGCOVBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_CCCB;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_CCCB_CTL;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_FREELIST;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_HWRTDATA;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_TIMESTAMP_ADDR;
+
+
+/*!
+ * This number is used to represent an invalid page catalogue physical address
+ */
+#define RGXFWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU
+
+/*!
+ * This number is used to represent unallocated page catalog base register
+ */
+#define RGXFW_BIF_INVALID_PCREG 0xFFFFFFFFU
+
+/*!
+    Firmware memory context.
+*/
+typedef struct
+{
+	IMG_DEV_PHYADDR			RGXFW_ALIGN sPCDevPAddr;	/*!< device physical address of context's page catalogue */
+	IMG_UINT32				uiPageCatBaseRegID;	/*!< associated page catalog base register (RGXFW_BIF_INVALID_PCREG == unallocated) */
+	IMG_UINT32				uiBreakpointAddr; /*!< breakpoint address */
+	IMG_UINT32				uiBPHandlerAddr; /*!< breakpoint handler address */
+	IMG_UINT32				uiBreakpointCtl; /*!< DM and enable control for BP */
+	IMG_UINT32				ui32FwMemCtxFlags; /*!< Compatibility and other flags */
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	IMG_UINT32              ui32OSid;
+	IMG_BOOL                bOSidAxiProt;
+#endif
+
+} UNCACHED_ALIGN RGXFWIF_FWMEMCONTEXT;
+
+/*!
+ * 	FW context state flags
+ */
+#define RGXFWIF_CONTEXT_TAFLAGS_NEED_RESUME			(0x00000001U)
+#define RGXFWIF_CONTEXT_RENDERFLAGS_NEED_RESUME		(0x00000002U)
+#define RGXFWIF_CONTEXT_CDMFLAGS_NEED_RESUME		(0x00000004U)
+#define RGXFWIF_CONTEXT_TDMFLAGS_CONTEXT_STORED		(0x00000008U)
+#define RGXFWIF_CONTEXT_ALLFLAGS_NEED_RESUME		(0x0000000FU)
+
+#define RGXFWIF_CONTEXT_TDMFLAGS_HEADER_STALE		(0x00000010U)
+
+/*
+ * Fast scale blit renders can be divided into smaller slices. The maximum
+ * screen size is 8192x8192 pixels or 256x256 tiles. The blit is sliced
+ * into 512x512 pixel blits or 16x16 tiles. Therefore, there are at most
+ * 256 slices of 16x16 tiles, which means we need 8bits to count up to
+ * which slice we have blitted so far.
+ */
+#define RGXFWIF_CONTEXT_SLICE_BLIT_X_MASK			(0x00000F00)
+#define RGXFWIF_CONTEXT_SLICE_BLIT_X_SHIFT			(8)
+#define RGXFWIF_CONTEXT_SLICE_BLIT_Y_MASK			(0x0000F000)
+#define RGXFWIF_CONTEXT_SLICE_BLIT_Y_SHIFT			(12)
+
+typedef struct
+{
+	/* FW-accessible TA state which must be written out to memory on context store */
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER;		 /* To store in mid-TA */
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER_Init;	 /* Initial value (in case is 'lost' due to a lock-up */
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VDM_BATCH;
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VBS_SO_PRIM0;
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VBS_SO_PRIM1;
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VBS_SO_PRIM2;
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VBS_SO_PRIM3;
+	IMG_UINT16	RGXFW_ALIGN ui16TACurrentIdx;
+
+	RGXFWIF_TAREGISTERS_CSWITCH RGXFW_ALIGN sCtxSwitch_Regs;
+
+	IMG_UINT32 ui32CtxStateFlags; /* Compatibility and other flags */
+} UNCACHED_ALIGN RGXFWIF_TACTX_STATE;
+
+typedef struct
+{
+	/* FW-accessible ISP state which must be written out to memory on context store */
+	IMG_UINT64	RGXFW_ALIGN u3DReg_PM_DEALLOCATED_MASK_STATUS;
+	IMG_UINT64	RGXFW_ALIGN u3DReg_PM_PDS_MTILEFREE_STATUS;
+	IMG_UINT32 ui32CtxStateFlags; /* Compatibility and other flags */
+	/* au3DReg_ISP_STORE should be the last element of the structure
+	 * as this is an array whose size is determined at runtime
+	 * after detecting the RGX core */
+	IMG_UINT32 au3DReg_ISP_STORE[];
+} UNCACHED_ALIGN RGXFWIF_3DCTX_STATE;
+
+typedef struct
+{
+	IMG_BOOL	RGXFW_ALIGN	bBufferB;
+	IMG_UINT32 ui32CtxStateFlags; /* Compatibility and other flags */
+} RGXFWIF_COMPUTECTX_STATE;
+
+typedef struct RGXFWIF_FWCOMMONCONTEXT_
+{
+	/*
+		Used by bg and irq context
+	*/
+	/* CCB details for this firmware context */
+	PRGXFWIF_CCCB_CTL		psCCBCtl;				/*!< CCB control */
+	PRGXFWIF_CCCB			psCCB;					/*!< CCB base */
+	RGXFWIF_DMA_ADDR		sCCBMetaDMAAddr;
+
+	/*
+		Used by the bg context only
+	*/
+	RGXFWIF_DLLIST_NODE		RGXFW_ALIGN sWaitingNode;			/*!< List entry for the waiting list */
+	RGXFWIF_UFO				sLastFailedUFO;						/*!< UFO that last failed (or NULL) */
+
+	/*
+		Used by the irq context only
+	*/
+	RGXFWIF_DLLIST_NODE		sRunNode;				/*!< List entry for the run list */
+
+	PRGXFWIF_FWMEMCONTEXT	psFWMemContext;			/*!< Memory context */
+
+	/* Context suspend state */
+	PRGXFWIF_COMMONCTX_STATE	RGXFW_ALIGN psContextState;		/*!< TA/3D context suspend state, read/written by FW */
+
+	/* Framework state
+	 */
+	PRGXFWIF_RF_CMD		RGXFW_ALIGN psRFCmd;		/*!< Register updates for Framework */
+
+	/*
+	 * 	Flags e.g. for context switching
+	 */
+	IMG_UINT32				ui32FWComCtxFlags;
+	IMG_UINT32				ui32Priority;
+	IMG_UINT32				ui32PrioritySeqNum;
+
+	/* References to the host side originators */
+	IMG_UINT32				ui32ServerCommonContextID;			/*!< the Server Common Context */
+	IMG_UINT32				ui32PID;							/*!< associated process ID */
+
+	/* Statistic updates waiting to be passed back to the host... */
+	IMG_BOOL				bStatsPending;						/*!< True when some stats are pending */
+	IMG_INT32				i32StatsNumStores;					/*!< Number of stores on this context since last update */
+	IMG_INT32				i32StatsNumOutOfMemory;				/*!< Number of OOMs on this context since last update */
+	IMG_INT32				i32StatsNumPartialRenders;			/*!< Number of PRs on this context since last update */
+	RGXFWIF_DM				eDM;								/*!< Data Master type */
+	IMG_UINT64				RGXFW_ALIGN  ui64WaitSignalAddress;	/*!< Device Virtual Address of the signal the context is waiting on */
+	RGXFWIF_DLLIST_NODE		             sWaitSignalNode;		/*!< List entry for the wait-signal list */
+	RGXFWIF_DLLIST_NODE		RGXFW_ALIGN  sBufStalledNode;		/*!< List entry for the buffer stalled list */
+	IMG_UINT64				RGXFW_ALIGN  ui64CBufQueueCtrlAddr;	/*!< Address of the circular buffer queue pointers */
+	IMG_UINT64				RGXFW_ALIGN  ui64ResumeSignalAddr;	/*!< Address of the Services Signal for resuming the buffer */
+	IMG_BOOL				bReadOffsetNeedsReset;				/*!< Following HWR circular buffer read-offset needs resetting */
+} UNCACHED_ALIGN RGXFWIF_FWCOMMONCONTEXT;
+
+/*!
+	Firmware render context.
+*/
+typedef struct
+{
+	RGXFWIF_FWCOMMONCONTEXT	sTAContext;				/*!< Firmware context for the TA */
+	RGXFWIF_FWCOMMONCONTEXT	s3DContext;				/*!< Firmware context for the 3D */
+
+	RGXFWIF_STATIC_RENDERCONTEXT_STATE sStaticRendercontextState;
+
+	IMG_UINT32			ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */
+
+	IMG_UINT32			ui32FwRenderCtxFlags; /*!< Compatibility and other flags */
+
+} UNCACHED_ALIGN RGXFWIF_FWRENDERCONTEXT;
+
+/*!
+	Firmware compute context.
+*/
+typedef struct
+{
+	RGXFWIF_FWCOMMONCONTEXT sCDMContext;				/*!< Firmware context for the CDM */
+
+	RGXFWIF_STATIC_COMPUTECONTEXT_STATE sStaticComputecontextState;
+
+	IMG_UINT32 ui32ComputeCtxFlags; /*!< Compatibility and other flags */
+
+} UNCACHED_ALIGN RGXFWIF_FWCOMPUTECONTEXT;
+
+
+/*!
+	BIF tiling mode
+*/
+typedef IMG_UINT32 RGXFWIF_BIFTILINGMODE;
+
+#define RGXFWIF_BIFTILINGMODE_NONE    0U
+#define RGXFWIF_BIFTILINGMODE_256x16  0U
+#define RGXFWIF_BIFTILINGMODE_512x8   1U
+#define RGXFWIF_BIFTILINGMODE_MAX     4U
+
+/* Number of BIF tiling configurations / heaps */
+#define RGXFWIF_NUM_BIF_TILING_CONFIGS 4U
+
+
+/*!
+ ******************************************************************************
+ * Defines for CMD_TYPE corruption detection and forward compatibility check
+ *****************************************************************************/
+
+/* CMD_TYPE 32bit contains:
+ * 31:16	Reserved for magic value to detect corruption (16 bits)
+ * 15		Reserved for RGX_CCB_TYPE_TASK (1 bit)
+ * 14:0		Bits available for CMD_TYPEs (15 bits) */
+
+
+/* Magic value to detect corruption */
+#define RGX_CMD_MAGIC_DWORD			IMG_UINT32_C(0x2ABC)
+#define RGX_CMD_MAGIC_DWORD_MASK	(0xFFFF0000U)
+#define RGX_CMD_MAGIC_DWORD_SHIFT	(16U)
+#define RGX_CMD_MAGIC_DWORD_SHIFTED	(RGX_CMD_MAGIC_DWORD << RGX_CMD_MAGIC_DWORD_SHIFT)
+
+/* Maximum number of CMD_TYPEs supported = 32767 (i.e. 15 bits length) */
+#define RGX_CMD_TYPE_LENGTH			(15U)
+#define RGX_CMD_TYPE_MASK			(0x00007FFFU)
+#define RGX_CMD_TYPE_SHIFT			(0U)
+
+
+/*!
+ ******************************************************************************
+ * Kernel CCB control for RGX
+ *****************************************************************************/
+typedef struct
+{
+	volatile IMG_UINT32		ui32WriteOffset;		/*!< write offset into array of commands (MUST be aligned to 16 bytes!) */
+	volatile IMG_UINT32		ui32ReadOffset;			/*!< read offset into array of commands */
+	IMG_UINT32				ui32WrapMask;			/*!< Offset wrapping mask (Total capacity of the CCB - 1) */
+	IMG_UINT32				ui32CmdSize;			/*!< size of each command in bytes */
+} UNCACHED_ALIGN RGXFWIF_CCB_CTL;
+
+/*!
+ ******************************************************************************
+ * Kernel CCB command structure for RGX
+ *****************************************************************************/
+
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PT      (0x1U) /* MMU_CTRL_INVAL_PT_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PD      (0x2U) /* MMU_CTRL_INVAL_PD_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PC      (0x4U) /* MMU_CTRL_INVAL_PC_EN */
+
+#if !defined(__KERNEL)
+
+#if !defined(RGX_FEATURE_SLC_VIVT)
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB   (0x10U) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB     (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8U) /* BIF_CTRL_INVAL_TLB1_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x0U) /* not used */
+
+#else /* RGX_FEATURE_SLC_VIVT */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB   (0x0) /* not used */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB     (0x0) /* not used */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */
+#endif
+
+#else
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB   (0x10) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB     (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8) /* BIF_CTRL_INVAL_TLB1_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */
+#endif
+
+#define RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000) /* indicates FW should interrupt the host */
+
+typedef struct
+{
+	PRGXFWIF_FWMEMCONTEXT sMemoryContext;
+	IMG_UINT32            ui32Flags;
+	RGXFWIF_DEV_VIRTADDR  sMMUCacheSync;
+	IMG_UINT16            ui16MMUCacheSyncUpdateValue;
+} __attribute__ ((packed)) RGXFWIF_MMUCACHEDATA;
+
+typedef struct
+{
+	IMG_BOOL               bSetBypassed;        /*!< Should SLC be/not be bypassed for indicated units? */
+	IMG_UINT32             uiFlags;             /*!< Units to enable/disable */
+} RGXFWIF_SLCBPCTLDATA;
+
+#define RGXFWIF_BPDATA_FLAGS_ENABLE (1U << 0)
+#define RGXFWIF_BPDATA_FLAGS_WRITE  (1U << 1)
+#define RGXFWIF_BPDATA_FLAGS_CTL    (1U << 2)
+#define RGXFWIF_BPDATA_FLAGS_REGS   (1U << 3)
+
+typedef struct
+{
+	PRGXFWIF_FWMEMCONTEXT	psFWMemContext;			/*!< Memory context */
+	IMG_UINT32		ui32BPAddr;			/*!< Breakpoint address */
+	IMG_UINT32		ui32HandlerAddr;		/*!< Breakpoint handler */
+	IMG_UINT32		ui32BPDM;			/*!< Breakpoint control */
+	IMG_UINT32		ui32BPDataFlags;
+	IMG_UINT32		ui32TempRegs;		/*!< Number of temporary registers to overallocate */
+	IMG_UINT32		ui32SharedRegs;		/*!< Number of shared registers to overallocate */
+} RGXFWIF_BPDATA;
+
+#define RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS 4
+
+typedef struct
+{
+	PRGXFWIF_FWCOMMONCONTEXT	psContext;			/*!< address of the firmware context */
+	IMG_UINT32					ui32CWoffUpdate;	/*!< Client CCB woff update */
+	IMG_UINT32					ui32NumCleanupCtl;		/*!< number of CleanupCtl pointers attached */
+	PRGXFWIF_CLEANUP_CTL		apsCleanupCtl[RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS]; /*!< CleanupCtl structures associated with command */
+	IMG_UINT32					ui32WorkEstCmdHeaderOffset; /*!< offset to the CmdHeader which houses the workload estimation kick data. */
+} RGXFWIF_KCCB_CMD_KICK_DATA;
+
+typedef struct
+{
+	RGXFWIF_DEV_VIRTADDR sSyncObjDevVAddr;
+	IMG_UINT32 uiUpdateVal;
+} RGXFWIF_KCCB_CMD_SYNC_DATA;
+
+typedef struct
+{
+	PRGXFWIF_FWCOMMONCONTEXT	psContext;			/*!< address of the firmware context */
+	IMG_UINT32					ui32CCBFenceOffset;	/*!< Client CCB fence offset */
+} RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA;
+
+typedef enum
+{
+	RGXFWIF_CLEANUP_FWCOMMONCONTEXT,		/*!< FW common context cleanup */
+	RGXFWIF_CLEANUP_HWRTDATA,				/*!< FW HW RT data cleanup */
+	RGXFWIF_CLEANUP_FREELIST,				/*!< FW freelist cleanup */
+	RGXFWIF_CLEANUP_ZSBUFFER,				/*!< FW ZS Buffer cleanup */
+} RGXFWIF_CLEANUP_TYPE;
+
+#define RGXFWIF_CLEANUP_RUN		(1U << 0U)	/*!< The requested cleanup command has run on the FW */
+#define RGXFWIF_CLEANUP_BUSY	(1U << 1U)	/*!< The requested resource is busy */
+
+typedef struct
+{
+	RGXFWIF_CLEANUP_TYPE			eCleanupType;			/*!< Cleanup type */
+	union {
+		PRGXFWIF_FWCOMMONCONTEXT 	psContext;				/*!< FW common context to cleanup */
+		PRGXFWIF_HWRTDATA 			psHWRTData;				/*!< HW RT to cleanup */
+		PRGXFWIF_FREELIST 			psFreelist;				/*!< Freelist to cleanup */
+		PRGXFWIF_ZSBUFFER 			psZSBuffer;				/*!< ZS Buffer to cleanup */
+	} uCleanupData;
+	RGXFWIF_DEV_VIRTADDR			sSyncObjDevVAddr;		/*!< sync primitive used to indicate state of the request */
+} RGXFWIF_CLEANUP_REQUEST;
+
+typedef enum
+{
+	RGXFWIF_POW_OFF_REQ = 1,
+	RGXFWIF_POW_FORCED_IDLE_REQ,
+	RGXFWIF_POW_NUMDUST_CHANGE,
+	RGXFWIF_POW_APM_LATENCY_CHANGE
+} RGXFWIF_POWER_TYPE;
+
+typedef enum
+{
+	RGXFWIF_POWER_FORCE_IDLE = 1,
+	RGXFWIF_POWER_CANCEL_FORCED_IDLE,
+	RGXFWIF_POWER_HOST_TIMEOUT,
+} RGXFWIF_POWER_FORCE_IDLE_TYPE;
+
+typedef struct
+{
+	RGXFWIF_POWER_TYPE					ePowType;					/*!< Type of power request */
+	union
+	{
+		IMG_UINT32						ui32NumOfDusts;			/*!< Number of active Dusts */
+		IMG_BOOL						bForced;				/*!< If the operation is mandatory */
+		RGXFWIF_POWER_FORCE_IDLE_TYPE	ePowRequestType;		/*!< Type of Request. Consolidating Force Idle, Cancel Forced Idle, Host Timeout */
+		IMG_UINT32						ui32ActivePMLatencyms;	/*!< Number of milliseconds to set APM latency */
+	} uPoweReqData;
+} RGXFWIF_POWER_REQUEST;
+
+typedef struct
+{
+	PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to fence on (only useful when bDMContext == TRUE) */
+	IMG_BOOL    bInval;                 /*!< Invalidate the cache as well as flushing */
+	IMG_BOOL    bDMContext;             /*!< The data to flush/invalidate belongs to a specific DM context */
+	RGXFWIF_DM  eDM;                    /*!< DM to flush entries for (only useful when bDMContext == TRUE) */
+} RGXFWIF_SLCFLUSHINVALDATA;
+
+typedef struct
+{
+	IMG_UINT32  ui32HCSDeadlineMS;  /* New number of milliseconds C/S is allowed to last */
+} RGXFWIF_HCS_CTL;
+
+typedef enum{
+	RGXFWIF_HWPERF_CTRL_TOGGLE = 0,
+	RGXFWIF_HWPERF_CTRL_SET    = 1,
+	RGXFWIF_HWPERF_CTRL_EMIT_FEATURES_EV = 2
+} RGXFWIF_HWPERF_UPDATE_CONFIG;
+
+typedef struct
+{
+	RGXFWIF_HWPERF_UPDATE_CONFIG eOpCode; /*!< Control operation code */
+	IMG_UINT64	RGXFW_ALIGN	ui64Mask;   /*!< Mask of events to toggle */
+} RGXFWIF_HWPERF_CTRL;
+
+typedef struct
+{
+	IMG_UINT32                ui32NumBlocks;    /*!< Number of RGX_HWPERF_CONFIG_CNTBLK in the array */
+	PRGX_HWPERF_CONFIG_CNTBLK sBlockConfigs;    /*!< Address of the RGX_HWPERF_CONFIG_CNTBLK array */
+} RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS;
+
+typedef struct
+{
+	IMG_UINT32	ui32NewClockSpeed; 			/*!< New clock speed */
+} RGXFWIF_CORECLKSPEEDCHANGE_DATA;
+
+#define RGXFWIF_HWPERF_CTRL_BLKS_MAX	16
+
+typedef struct
+{
+	IMG_BOOL	bEnable;
+	IMG_UINT32	ui32NumBlocks;                              /*!< Number of block IDs in the array */
+	IMG_UINT16	aeBlockIDs[RGXFWIF_HWPERF_CTRL_BLKS_MAX];   /*!< Array of RGX_HWPERF_CNTBLK_ID values */
+} RGXFWIF_HWPERF_CTRL_BLKS;
+
+
+typedef struct
+{
+	IMG_UINT16                      ui16CustomBlock;
+	IMG_UINT16                      ui16NumCounters;
+	PRGX_HWPERF_SELECT_CUSTOM_CNTRS sCustomCounterIDs;
+} RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS;
+
+typedef struct
+{
+	RGXFWIF_DEV_VIRTADDR	sZSBufferFWDevVAddr; 				/*!< ZS-Buffer FW address */
+	IMG_UINT32				bDone;								/*!< action backing/unbacking succeeded */
+} RGXFWIF_ZSBUFFER_BACKING_DATA;
+
+typedef struct
+{
+	IMG_UINT32 ui32IsolationPriorityThreshold;
+} RGXFWIF_OSID_ISOLATION_GROUP_DATA;
+
+typedef struct
+{
+	RGXFWIF_DEV_VIRTADDR	sFreeListFWDevVAddr; 				/*!< Freelist FW address */
+	IMG_UINT32				ui32DeltaPages;						/*!< Amount of the Freelist change */
+	IMG_UINT32				ui32NewPages;						/*!< New amount of pages on the freelist (including ready pages) */
+	IMG_UINT32              ui32ReadyPages;                     /*!< Number of ready pages to be held in reserve until OOM */
+} RGXFWIF_FREELIST_GS_DATA;
+
+#define RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000U
+
+typedef struct
+{
+	IMG_UINT32			ui32FreelistsCount;
+	IMG_UINT32			aui32FreelistIDs[MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS];
+} RGXFWIF_FREELISTS_RECONSTRUCTION_DATA;
+
+
+typedef struct
+{
+	IMG_DEV_VIRTADDR RGXFW_ALIGN       sDevSignalAddress; /*!< device virtual address of the updated signal */
+	PRGXFWIF_FWMEMCONTEXT              psFWMemContext; /*!< Memory context */
+} UNCACHED_ALIGN RGXFWIF_SIGNAL_UPDATE_DATA;
+
+
+typedef struct
+{
+	PRGXFWIF_FWCOMMONCONTEXT  psContext; /*!< Context to that may need to be resumed following write offset update */
+} UNCACHED_ALIGN RGXFWIF_WRITE_OFFSET_UPDATE_DATA;
+
+/*!
+ ******************************************************************************
+ * Proactive DVFS Structures
+ *****************************************************************************/
+#define NUM_OPP_VALUES 16
+
+typedef struct
+{
+	IMG_UINT32			ui32Volt; /* V  */
+	IMG_UINT32			ui32Freq; /* Hz */
+} UNCACHED_ALIGN PDVFS_OPP;
+
+typedef struct
+{
+	PDVFS_OPP		asOPPValues[NUM_OPP_VALUES];
+#if defined(DEBUG)
+	IMG_UINT32		ui32MinOPPPoint;
+#endif
+	IMG_UINT32		ui32MaxOPPPoint;
+} UNCACHED_ALIGN RGXFWIF_PDVFS_OPP;
+
+typedef struct
+{
+	IMG_UINT32 ui32MaxOPPPoint;
+} UNCACHED_ALIGN RGXFWIF_PDVFS_MAX_FREQ_DATA;
+
+typedef struct
+{
+	IMG_UINT32 ui32MinOPPPoint;
+} UNCACHED_ALIGN RGXFWIF_PDVFS_MIN_FREQ_DATA;
+
+/*!
+ ******************************************************************************
+ * Register configuration structures
+ *****************************************************************************/
+
+#define RGXFWIF_REG_CFG_MAX_SIZE 512
+
+typedef enum
+{
+	RGXFWIF_REGCFG_CMD_ADD 				= 101,
+	RGXFWIF_REGCFG_CMD_CLEAR 			= 102,
+	RGXFWIF_REGCFG_CMD_ENABLE 			= 103,
+	RGXFWIF_REGCFG_CMD_DISABLE 			= 104
+} RGXFWIF_REGDATA_CMD_TYPE;
+
+typedef enum
+{
+	RGXFWIF_REG_CFG_TYPE_PWR_ON=0,      /* Sidekick power event */
+	RGXFWIF_REG_CFG_TYPE_DUST_CHANGE,   /* Rascal / dust power event */
+	RGXFWIF_REG_CFG_TYPE_TA,            /* TA kick */
+	RGXFWIF_REG_CFG_TYPE_3D,            /* 3D kick */
+	RGXFWIF_REG_CFG_TYPE_CDM,           /* Compute kick */
+	RGXFWIF_REG_CFG_TYPE_TLA,           /* TLA kick */
+	RGXFWIF_REG_CFG_TYPE_TDM,           /* TDM kick */
+	RGXFWIF_REG_CFG_TYPE_ALL            /* Applies to all types. Keep as last element */
+} RGXFWIF_REG_CFG_TYPE;
+
+typedef struct
+{
+	IMG_UINT64		ui64Addr;
+	IMG_UINT64		ui64Mask;
+	IMG_UINT64		ui64Value;
+} RGXFWIF_REG_CFG_REC;
+
+typedef struct
+{
+	RGXFWIF_REGDATA_CMD_TYPE         eCmdType;
+	RGXFWIF_REG_CFG_TYPE             eRegConfigType;
+	RGXFWIF_REG_CFG_REC RGXFW_ALIGN  sRegConfig;
+
+} RGXFWIF_REGCONFIG_DATA;
+
+typedef struct
+{
+	/**
+	 * PDump WRW command write granularity is 32 bits.
+	 * Add padding to ensure array size is 32 bit granular.
+	 */
+	IMG_UINT8           RGXFW_ALIGN  aui8NumRegsType[PVR_ALIGN((IMG_UINT32)RGXFWIF_REG_CFG_TYPE_ALL,sizeof(IMG_UINT32))];
+	RGXFWIF_REG_CFG_REC RGXFW_ALIGN  asRegConfigs[RGXFWIF_REG_CFG_MAX_SIZE];
+} UNCACHED_ALIGN RGXFWIF_REG_CFG;
+
+/* OSid Scheduling Priority Change */
+typedef struct
+{
+	IMG_UINT32			ui32OSidNum;
+	IMG_UINT32			ui32Priority;
+} RGXFWIF_OSID_PRIORITY_DATA;
+
+typedef enum
+{
+	RGXFWIF_OS_ONLINE = 1,
+	RGXFWIF_OS_OFFLINE
+} RGXFWIF_OS_STATE_CHANGE;
+
+typedef struct
+{
+	IMG_UINT32 ui32OSid;
+	RGXFWIF_OS_STATE_CHANGE eNewOSState;
+} UNCACHED_ALIGN RGXFWIF_OS_STATE_CHANGE_DATA;
+
+typedef enum
+{
+	RGXFWIF_PWR_COUNTER_DUMP_START = 1,
+	RGXFWIF_PWR_COUNTER_DUMP_STOP,
+	RGXFWIF_PWR_COUNTER_DUMP_SAMPLE,
+} RGXFWIF_COUNTER_DUMP_REQUEST;
+
+typedef struct
+{
+	RGXFWIF_COUNTER_DUMP_REQUEST eCounterDumpRequest;
+}  RGXFW_ALIGN RGXFWIF_COUNTER_DUMP_DATA;
+
+typedef enum
+{
+	/* Common commands */
+	RGXFWIF_KCCB_CMD_KICK								= 101U | RGX_CMD_MAGIC_DWORD_SHIFTED,
+	RGXFWIF_KCCB_CMD_MMUCACHE							= 102U | RGX_CMD_MAGIC_DWORD_SHIFTED,
+	RGXFWIF_KCCB_CMD_BP									= 103U | RGX_CMD_MAGIC_DWORD_SHIFTED,
+	RGXFWIF_KCCB_CMD_SYNC								= 104U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Host sync command. Requires sSyncData. */
+	RGXFWIF_KCCB_CMD_SLCFLUSHINVAL						= 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< SLC flush and invalidation request */
+	RGXFWIF_KCCB_CMD_CLEANUP							= 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests cleanup of a FW resource (type specified in the command data) */
+	RGXFWIF_KCCB_CMD_POW								= 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Power request */
+	RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE			= 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Backing for on-demand ZS-Buffer done */
+	RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE			= 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Unbacking for on-demand ZS-Buffer done */
+	RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE				= 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelist Grow done */
+	RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE	= 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelists Reconstruction done */
+	RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE				= 113U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has performed a signal update */
+	RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE			= 114U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has added more data to a CDM2 Circular Buffer */
+	RGXFWIF_KCCB_CMD_HEALTH_CHECK						= 115U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Health check request */
+	RGXFWIF_KCCB_CMD_FORCE_UPDATE						= 116U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Forcing signalling of all unmet UFOs for a given CCB offset */
+
+
+	/* Commands only permitted to the native or host os */
+	RGXFWIF_KCCB_CMD_REGCONFIG							= 200U | RGX_CMD_MAGIC_DWORD_SHIFTED,
+	RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS 		= 201U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure the custom counters for HWPerf */
+	RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT	= 202U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks during the init process*/
+	RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE						= 203U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Ask the firmware to update its cached ui32LogType value from the (shared) tracebuf control structure */
+	RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ				= 205U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a maximum frequency/OPP point */
+	RGXFWIF_KCCB_CMD_PDVFS_REQUEST_REACTIVE_UPDATE		= 206U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Request an frequency/OPP update after workload completion */
+	RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE				= 207U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the relative scheduling priority for a particular OSid. It can only be serviced for the Host DDK */
+	RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL					= 208U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set or clear firmware state flags */
+	RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE					= 209U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set hard context switching deadline */
+	RGXFWIF_KCCB_CMD_OS_ISOLATION_GROUP_CHANGE			= 210U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the configuration of (or even disables) the OSid Isolation scheduling group. It can only be serviced for the Host DDK */
+	RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE			= 211U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW that a Guest OS has come online / offline. It can only be serviced for the Host DDK */
+	RGXFWIF_KCCB_CMD_COUNTER_DUMP						= 212U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Controls counter dumping in the FW */
+	RGXFWIF_KCCB_CMD_SLCBPCTL							= 213U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< SLC bypass control. Requires sSLCBPCtlData. For validation */
+	RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG				= 214U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */
+	RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS			= 215U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks */
+	RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS					= 216U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */
+	RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE					= 217U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Core clock speed change event */
+	RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ				= 218U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a minimum frequency/OPP point */
+} RGXFWIF_KCCB_CMD_TYPE;
+
+#define RGXFWIF_LAST_ALLOWED_GUEST_KCCB_CMD (RGXFWIF_KCCB_CMD_REGCONFIG - 1)
+
+/* Kernel CCB command packet */
+typedef struct
+{
+	RGXFWIF_KCCB_CMD_TYPE  eCmdType;      /*!< Command type */
+	RGXFWIF_DM             eDM;           /*!< DM associated with the command */
+	IMG_UINT32             ui32KCCBFlags; /*!< Compatibility and other flags */
+	IMG_UINT32             ui32Unused;    /*!< Unused because of alignment of field below */
+
+	/* NOTE: Make sure that uCmdData is the last member of this struct
+	 * This is to calculate actual command size for device mem copy.
+	 * (Refer RGXGetCmdMemCopySize())
+	 * */
+	union
+	{
+		RGXFWIF_KCCB_CMD_KICK_DATA			sCmdKickData;			/*!< Data for Kick command */
+		RGXFWIF_MMUCACHEDATA				sMMUCacheData;			/*!< Data for MMU cache command */
+		RGXFWIF_BPDATA						sBPData;				/*!< Data for Breakpoint Commands */
+		RGXFWIF_SLCBPCTLDATA       			sSLCBPCtlData;  		/*!< Data for SLC Bypass Control */
+		RGXFWIF_KCCB_CMD_SYNC_DATA 			sSyncData;          	/*!< Data for host sync commands */
+		RGXFWIF_SLCFLUSHINVALDATA			sSLCFlushInvalData;		/*!< Data for SLC Flush/Inval commands */
+		RGXFWIF_CLEANUP_REQUEST				sCleanupData; 			/*!< Data for cleanup commands */
+		RGXFWIF_POWER_REQUEST				sPowData;				/*!< Data for power request commands */
+		RGXFWIF_HWPERF_CTRL					sHWPerfCtrl;			/*!< Data for HWPerf control command */
+		RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS	sHWPerfCfgEnableBlks;	/*!< Data for HWPerf configure, clear and enable performance counter block command */
+		RGXFWIF_HWPERF_CTRL_BLKS			sHWPerfCtrlBlks;		/*!< Data for HWPerf enable or disable performance counter block commands */
+		RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS  sHWPerfSelectCstmCntrs; /*!< Data for HWPerf configure the custom counters to read */
+		RGXFWIF_CORECLKSPEEDCHANGE_DATA		sCoreClkSpeedChangeData;/*!< Data for core clock speed change */
+		RGXFWIF_ZSBUFFER_BACKING_DATA		sZSBufferBackingData;	/*!< Feedback for Z/S Buffer backing/unbacking */
+		RGXFWIF_FREELIST_GS_DATA			sFreeListGSData;		/*!< Feedback for Freelist grow/shrink */
+		RGXFWIF_FREELISTS_RECONSTRUCTION_DATA	sFreeListsReconstructionData;	/*!< Feedback for Freelists reconstruction */
+		RGXFWIF_REGCONFIG_DATA				sRegConfigData;			/*!< Data for custom register configuration */
+		RGXFWIF_SIGNAL_UPDATE_DATA          sSignalUpdateData;      /*!< Data for informing the FW about the signal update */
+		RGXFWIF_WRITE_OFFSET_UPDATE_DATA    sWriteOffsetUpdateData; /*!< Data for informing the FW about the write offset update */
+		RGXFWIF_PDVFS_MAX_FREQ_DATA			sPDVFSMaxFreqData;		/*!< Data for setting the max frequency/OPP */
+		RGXFWIF_PDVFS_MIN_FREQ_DATA			sPDVFSMinFreqData;		/*!< Data for setting the min frequency/OPP */
+		RGXFWIF_OSID_PRIORITY_DATA			sCmdOSidPriorityData;	/*!< Data for updating an OSid priority */
+		RGXFWIF_HCS_CTL						sHCSCtrl;				/*!< Data for Hard Context Switching */
+		RGXFWIF_OSID_ISOLATION_GROUP_DATA   sCmdOSidIsolationData;  /*!< Data for updating the OSid isolation group */
+		RGXFWIF_OS_STATE_CHANGE_DATA        sCmdOSOnlineStateData;  /*!< Data for updating the Guest Online states */
+		RGXFWIF_DEV_VIRTADDR                sTBIBuffer;             /*!< Dev address for TBI buffer allocated on demand */
+		RGXFWIF_COUNTER_DUMP_DATA			sCounterDumpConfigData; /*!< Data for dumping of register ranges */
+		RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA  sForceUpdateData;       /*!< Data for signalling all unmet fences for a given CCB */
+	} UNCACHED_ALIGN uCmdData;
+} UNCACHED_ALIGN RGXFWIF_KCCB_CMD;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_KCCB_CMD);
+
+/*!
+ ******************************************************************************
+ * Firmware CCB command structure for RGX
+ *****************************************************************************/
+
+typedef struct
+{
+	IMG_UINT32				ui32ZSBufferID;
+	IMG_BOOL				bPopulate;
+} RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA;
+
+typedef struct
+{
+	IMG_UINT32				ui32FreelistID;
+} RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA;
+
+typedef struct
+{
+	IMG_UINT32			ui32FreelistsCount;
+	IMG_UINT32			ui32HwrCounter;
+	IMG_UINT32			aui32FreelistIDs[MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS];
+} RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA;
+
+/*!
+	Last reset reason for a context.
+*/
+typedef enum
+{
+	RGXFWIF_CONTEXT_RESET_REASON_NONE					= 0,	/*!< No reset reason recorded */
+	RGXFWIF_CONTEXT_RESET_REASON_GUILTY_LOCKUP			= 1,	/*!< Caused a reset due to locking up */
+	RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_LOCKUP		= 2,	/*!< Affected by another context locking up */
+	RGXFWIF_CONTEXT_RESET_REASON_GUILTY_OVERRUNING		= 3,	/*!< Overran the global deadline */
+	RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING	= 4,	/*!< Affected by another context overrunning */
+	RGXFWIF_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH	= 5,	/*!< Forced reset to ensure scheduling requirements */
+} RGXFWIF_CONTEXT_RESET_REASON;
+
+typedef struct
+{
+	IMG_UINT32						ui32ServerCommonContextID;	/*!< Context affected by the reset */
+	RGXFWIF_CONTEXT_RESET_REASON	eResetReason;				/*!< Reason for reset */
+	IMG_UINT32						ui32ResetJobRef;			/*!< Job ref running at the time of reset */
+	IMG_BOOL						bPageFault;					/*!< Did a page fault happen */
+	IMG_UINT64 RGXFW_ALIGN			ui64PCAddress;				/*!< At what page catalog address */
+	IMG_DEV_VIRTADDR RGXFW_ALIGN	sFaultAddress;				/*!< Page fault address (only when applicable) */
+} RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA;
+
+typedef enum
+{
+	RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING				= 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, 	/*!< Requests ZSBuffer to be backed with physical pages */
+	RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING			= 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, 	/*!< Requests ZSBuffer to be unbacked */
+	RGXFWIF_FWCCB_CMD_FREELIST_GROW					= 103U | RGX_CMD_MAGIC_DWORD_SHIFTED, 	/*!< Requests an on-demand freelist grow/shrink */
+	RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION		= 104U | RGX_CMD_MAGIC_DWORD_SHIFTED, 	/*!< Requests freelists reconstruction */
+	RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION	= 105U | RGX_CMD_MAGIC_DWORD_SHIFTED,	/*!< Notifies host of a HWR event on a context */
+	RGXFWIF_FWCCB_CMD_DEBUG_DUMP					= 106U | RGX_CMD_MAGIC_DWORD_SHIFTED,	/*!< Requests an on-demand debug dump */
+	RGXFWIF_FWCCB_CMD_UPDATE_STATS					= 107U | RGX_CMD_MAGIC_DWORD_SHIFTED,	/*!< Requests an on-demand update on process stats */
+
+	RGXFWIF_FWCCB_CMD_WORKLOAD_FINISHED				= 108U | RGX_CMD_MAGIC_DWORD_SHIFTED,	/*!< Supplies data for the workload matching algorithm */
+	RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE			= 109U | RGX_CMD_MAGIC_DWORD_SHIFTED,
+	RGXFWIF_FWCCB_CMD_PDVFS_FREEMEM					= 110U | RGX_CMD_MAGIC_DWORD_SHIFTED,
+	RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART			= 111U | RGX_CMD_MAGIC_DWORD_SHIFTED,
+} RGXFWIF_FWCCB_CMD_TYPE;
+
+typedef enum
+{
+    RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS=1,		/*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumPartialRenders stat */
+    RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY,			/*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumOutOfMemory stat */
+    RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES,				/*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTAStores stat */
+    RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES,				/*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32Num3DStores stat */
+    RGXFWIF_FWCCB_CMD_UPDATE_NUM_SH_STORES,				/*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumSHStores stat */
+    RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES				/*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumCDMStores stat */
+} RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE;
+
+typedef struct
+{
+    RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE		eElementToUpdate;			/*!< Element to update */
+    IMG_PID									pidOwner;					/*!< The pid of the process whose stats are being updated */
+    IMG_INT32								i32AdjustmentValue;			/*!< Adjustment to be made to the statistic */
+} RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA;
+
+typedef struct
+{
+	IMG_UINT32 ui32CoreClkRate;
+} UNCACHED_ALIGN RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA;
+
+typedef struct
+{
+	IMG_UINT64 RGXFW_ALIGN ui64MemDesc;
+} UNCACHED_ALIGN RGXFWIF_FWCCB_CMD_PDVFS_FREEMEM_DATA;
+
+typedef struct
+{
+	RGXFWIF_FWCCB_CMD_TYPE  eCmdType;       /*!< Command type */
+	IMG_UINT32              ui32FWCCBFlags; /*!< Compatibility and other flags */
+
+	union
+	{
+		RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA				sCmdZSBufferBacking;			/*!< Data for Z/S-Buffer on-demand (un)backing*/
+		RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA					sCmdFreeListGS;					/*!< Data for on-demand freelist grow/shrink */
+		RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA		sCmdFreeListsReconstruction;	/*!< Data for freelists reconstruction */
+		RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA				sCmdContextResetNotification;	/*!< Data for context reset notification */
+		RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA					sCmdUpdateStatsData;			/*!< Data for updating process stats */
+		RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA			sCmdCoreClkRateChange;
+		RGXFWIF_FWCCB_CMD_PDVFS_FREEMEM_DATA				sCmdPDVFSFreeMem;
+	} RGXFW_ALIGN uCmdData;
+} RGXFW_ALIGN RGXFWIF_FWCCB_CMD;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_FWCCB_CMD);
+
+
+/*!
+ ******************************************************************************
+ * Workload estimation Firmware CCB command structure for RGX
+ *****************************************************************************/
+typedef struct
+{
+	IMG_UINT64 RGXFW_ALIGN ui64ReturnDataIndex; /*!< Index for return data array */
+	IMG_UINT64 RGXFW_ALIGN ui64CyclesTaken;     /*!< The cycles the workload took on the hardware */
+} RGXFWIF_WORKEST_FWCCB_CMD;
+
+
+/*!
+ ******************************************************************************
+ * Client CCB commands for RGX
+ *****************************************************************************/
+
+/* Required memory alignment for 64-bit variables accessible by Meta
+  (The gcc meta aligns 64-bit variables to 64-bit; therefore, memory shared
+   between the host and meta that contains 64-bit variables has to maintain
+   this alignment) */
+#define RGXFWIF_FWALLOC_ALIGN	sizeof(IMG_UINT64)
+
+#define RGX_CCB_TYPE_TASK			(IMG_UINT32_C(1) << 15)
+#define RGX_CCB_FWALLOC_ALIGN(size)	(((size) + (RGXFWIF_FWALLOC_ALIGN-1)) & ~(RGXFWIF_FWALLOC_ALIGN - 1))
+
+typedef IMG_UINT32 RGXFWIF_CCB_CMD_TYPE;
+
+#define RGXFWIF_CCB_CMD_TYPE_TA			(201U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK)
+#define RGXFWIF_CCB_CMD_TYPE_3D			(202U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK)
+#define RGXFWIF_CCB_CMD_TYPE_CDM		(203U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK)
+#define RGXFWIF_CCB_CMD_TYPE_TQ_3D		(204U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK)
+#define RGXFWIF_CCB_CMD_TYPE_TQ_2D		(205U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK)
+#define RGXFWIF_CCB_CMD_TYPE_3D_PR		(206U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK)
+#define RGXFWIF_CCB_CMD_TYPE_NULL		(207U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK)
+/* Free slots 208-211 */
+#define RGXFWIF_CCB_CMD_TYPE_TQ_TDM     (212U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK)
+
+/* Leave a gap between CCB specific commands and generic commands */
+#define RGXFWIF_CCB_CMD_TYPE_FENCE          (213U | RGX_CMD_MAGIC_DWORD_SHIFTED)
+#define RGXFWIF_CCB_CMD_TYPE_UPDATE         (214U | RGX_CMD_MAGIC_DWORD_SHIFTED)
+/* Free slot 215 */
+#define RGXFWIF_CCB_CMD_TYPE_FENCE_PR       (216U | RGX_CMD_MAGIC_DWORD_SHIFTED)
+#define RGXFWIF_CCB_CMD_TYPE_PRIORITY       (217U | RGX_CMD_MAGIC_DWORD_SHIFTED)
+/* Free slot 218 */
+#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE (219U | RGX_CMD_MAGIC_DWORD_SHIFTED)
+/* Free slot 220 */
+
+#define RGXFWIF_CCB_CMD_TYPE_PADDING	(221U | RGX_CMD_MAGIC_DWORD_SHIFTED)
+
+
+typedef struct
+{
+	/* Index for the KM Workload estimation return data array */
+	IMG_UINT64 RGXFW_ALIGN                    ui64ReturnDataIndex;
+	/* Deadline for the workload */
+	IMG_UINT64 RGXFW_ALIGN                    ui64Deadline;
+	/* Predicted time taken to do the work in cycles */
+	IMG_UINT64 RGXFW_ALIGN                    ui64CyclesPrediction;
+} RGXFWIF_WORKEST_KICK_DATA;
+
+typedef struct
+{
+	RGXFWIF_CCB_CMD_TYPE				eCmdType;
+	IMG_UINT32							ui32CmdSize;
+	IMG_UINT32							ui32ExtJobRef; /*!< external job reference - provided by client and used in debug for tracking submitted work */
+	IMG_UINT32							ui32IntJobRef; /*!< internal job reference - generated by services and used in debug for tracking submitted work */
+	RGXFWIF_WORKEST_KICK_DATA			sWorkEstKickData; /*!< Workload Estimation - Workload Estimation Data */
+} RGXFWIF_CCB_CMD_HEADER;
+
+
+/*!
+ ******************************************************************************
+ * Client CCB commands which are only required by the kernel
+ *****************************************************************************/
+typedef struct
+{
+	IMG_UINT32             ui32Priority;
+} RGXFWIF_CMD_PRIORITY;
+
+
+/*!
+ ******************************************************************************
+ * Signature and Checksums Buffer
+ *****************************************************************************/
+typedef struct
+{
+	PRGXFWIF_SIGBUFFER		sBuffer;			/*!< Ptr to Signature Buffer memory */
+	IMG_UINT32				ui32LeftSizeInRegs;	/*!< Amount of space left for storing regs in the buffer */
+} UNCACHED_ALIGN RGXFWIF_SIGBUF_CTL;
+
+typedef struct
+{
+	PRGXFWIF_COUNTERBUFFER	sBuffer;			/*!< Ptr to counter dump buffer */
+	IMG_UINT32				ui32SizeInDwords; 	/*!< Amount of space for storing in the buffer */
+} UNCACHED_ALIGN RGXFWIF_COUNTER_DUMP_CTL;
+
+typedef struct
+{
+	PRGXFWIF_FIRMWAREGCOVBUFFER	sBuffer;			/*!< Ptr to firmware gcov buffer */
+	IMG_UINT32				ui32Size;           	/*!< Amount of space for storing in the buffer */
+} UNCACHED_ALIGN RGXFWIF_FIRMWARE_GCOV_CTL;
+
+/*!
+ *****************************************************************************
+ * RGX Compatibility checks
+ *****************************************************************************/
+
+/* WARNING: Whenever the layout of RGXFWIF_COMPCHECKS_BVNC changes, the
+	following define should be increased by 1 to indicate to the
+	compatibility logic that layout has changed. */
+#define RGXFWIF_COMPCHECKS_LAYOUT_VERSION 3
+
+typedef struct
+{
+	IMG_UINT32	ui32LayoutVersion; /* WARNING: This field must be defined as first one in this structure */
+	IMG_UINT64	RGXFW_ALIGN ui64BVNC;
+} UNCACHED_ALIGN RGXFWIF_COMPCHECKS_BVNC;
+
+typedef struct
+{
+	IMG_UINT8	ui8OsCountSupport;
+} UNCACHED_ALIGN RGXFWIF_INIT_OPTIONS;
+
+#define RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(name) \
+	RGXFWIF_COMPCHECKS_BVNC name = { \
+		RGXFWIF_COMPCHECKS_LAYOUT_VERSION, \
+		0, \
+	}
+#define RGXFWIF_COMPCHECKS_BVNC_INIT(name) \
+	do { \
+		(name).ui32LayoutVersion = RGXFWIF_COMPCHECKS_LAYOUT_VERSION; \
+		(name).ui64BVNC = 0; \
+	} while (0)
+
+typedef struct
+{
+	RGXFWIF_COMPCHECKS_BVNC		sHWBVNC;				/*!< hardware BVNC (from the RGX registers) */
+	RGXFWIF_COMPCHECKS_BVNC		sFWBVNC;				/*!< firmware BVNC */
+	IMG_UINT32					ui32FWProcessorVersion;	/*!< identifier of the MIPS/META version */
+	IMG_UINT32					ui32DDKVersion;			/*!< software DDK version */
+	IMG_UINT32					ui32DDKBuild;			/*!< software DDK build no. */
+	IMG_UINT32					ui32BuildOptions;		/*!< build options bit-field */
+	RGXFWIF_INIT_OPTIONS		sInitOptions;			/*!< initialisation options bit-field */
+	IMG_BOOL					bUpdated;				/*!< Information is valid */
+} UNCACHED_ALIGN RGXFWIF_COMPCHECKS;
+
+/*!
+ ******************************************************************************
+ * Updated configuration post FW data init.
+ *****************************************************************************/
+typedef struct
+{
+	IMG_UINT32         ui32ActivePMLatencyms;      /* APM latency in ms before signalling IDLE to the host */
+	IMG_UINT32         ui32RuntimeCfgFlags;        /* Compatibility and other flags */
+	IMG_BOOL           bActivePMLatencyPersistant; /* If set, APM latency does not reset to system default each GPU power transition */
+	IMG_UINT32         ui32CoreClockSpeed;         /* Core clock speed, currently only used to calculate timer ticks */
+	IMG_UINT32         ui32DefaultDustsNumInit;    /* Last number of dusts change requested by the host */
+	PRGXFWIF_HWPERFBUF sHWPerfBuf;                 /* On-demand allocated HWPerf buffer address, to be passed to the FW */
+} RGXFWIF_RUNTIME_CFG;
+
+/*!
+ *****************************************************************************
+ * Control data for RGX
+ *****************************************************************************/
+
+#define RGXFWIF_HWR_DEBUG_DUMP_ALL (99999U)
+
+#if defined(PDUMP)
+
+#define RGXFWIF_PID_FILTER_MAX_NUM_PIDS 32U
+
+typedef enum
+{
+	RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT,
+	RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT
+} RGXFWIF_PID_FILTER_MODE;
+
+typedef struct
+{
+	IMG_PID uiPID;
+	IMG_UINT32 ui32OSID;
+} RGXFW_ALIGN RGXFWIF_PID_FILTER_ITEM;
+
+typedef struct
+{
+	RGXFWIF_PID_FILTER_MODE eMode;
+	/* each process in the filter list is specified by a PID and OS ID pair.
+	 * each PID and OS pair is an item in the items array (asItems).
+	 * if the array contains less than RGXFWIF_PID_FILTER_MAX_NUM_PIDS entries
+	 * then it must be terminated by an item with pid of zero.
+	 */
+	RGXFWIF_PID_FILTER_ITEM asItems[RGXFWIF_PID_FILTER_MAX_NUM_PIDS];
+} RGXFW_ALIGN RGXFWIF_PID_FILTER;
+#endif
+
+typedef enum
+{
+	RGXFWIF_TPU_DM_PDM = 0,
+	RGXFWIF_TPU_DM_VDM = 1,
+	RGXFWIF_TPU_DM_CDM = 2,
+	RGXFWIF_TPU_DM_TDM = 3,
+	RGXFWIF_TPU_DM_LAST
+} RGXFWIF_TPU_DM;
+
+typedef struct
+{
+	IMG_UINT32              ui32ConfigFlags;        /*!< Configuration flags from host */
+	IMG_UINT32              ui32ConfigFlagsExt;     /*!< Extended configuration flags from host */
+	RGXFWIF_DEV_VIRTADDR    sPowerSync;
+	PRGXFWIF_TRACEBUF       sTraceBufCtl;           /*!< structure containing trace control data and actual trace buffer */
+	PRGXFWIF_HWRINFOBUF     sRGXFWIfHWRInfoBufCtl;
+} RGXFWIF_OS_CONFIG;
+
+typedef enum
+{
+	RGXFWIF_GPIO_VAL_OFF           = 0, /*!< No GPIO validation */
+	RGXFWIF_GPIO_VAL_GENERAL       = 1, /*!< Simple test case that
+	                                         initiates by sending data via the
+	                                         GPIO and then sends back any data
+	                                         received over the GPIO */
+	RGXFWIF_GPIO_VAL_AP            = 2, /*!< More complex test case that writes
+	                                         and reads data across the entire
+	                                         GPIO AP address range.*/
+#if defined(SUPPORT_STRIP_RENDERING)
+	RGXFWIF_GPIO_VAL_SR_BASIC      = 3, /*!< Strip Rendering AP based basic test.*/
+	RGXFWIF_GPIO_VAL_SR_COMPLEX    = 4, /*!< Strip Rendering AP based complex test.*/
+#endif
+	RGXFWIF_GPIO_VAL_LAST
+} RGXFWIF_GPIO_VAL_MODE;
+
+typedef enum
+{
+	FW_PERF_CONF_NONE = 0,
+	FW_PERF_CONF_ICACHE = 1,
+	FW_PERF_CONF_DCACHE = 2,
+	FW_PERF_CONF_POLLS = 3,
+	FW_PERF_CONF_CUSTOM_TIMER = 4,
+	FW_PERF_CONF_JTLB_INSTR = 5,
+	FW_PERF_CONF_INSTRUCTIONS = 6
+} FW_PERF_CONF;
+
+typedef enum
+{
+	FW_BOOT_STAGE_NOT_AVAILABLE = -1,
+	FW_BOOT_NOT_STARTED = 0,
+	FW_BOOT_BLDR_STARTED = 1,
+	FW_BOOT_CACHE_DONE,
+	FW_BOOT_TLB_DONE,
+	FW_BOOT_MAIN_STARTED,
+	FW_BOOT_ALIGNCHECKS_DONE,
+	FW_BOOT_INIT_DONE,
+} FW_BOOT_STAGE;
+
+typedef struct
+{
+
+	PRGXFWIF_OS_CONFIG      sOSConfig;              /*!< OS configuration data for the FW initialization */
+
+	IMG_DEV_PHYADDR         RGXFW_ALIGN sFaultPhysAddr;
+
+	IMG_DEV_VIRTADDR        RGXFW_ALIGN sPDSExecBase;
+	IMG_DEV_VIRTADDR        RGXFW_ALIGN sUSCExecBase;
+
+	IMG_UINT32              ui32FilterFlags;
+
+	/* Kernel CCB */
+	PRGXFWIF_CCB_CTL        psKernelCCBCtl;
+	PRGXFWIF_CCB            psKernelCCB;
+
+	/* Firmware CCB */
+	PRGXFWIF_CCB_CTL        psFirmwareCCBCtl;
+	PRGXFWIF_CCB            psFirmwareCCB;
+
+	RGXFWIF_SIGBUF_CTL	asSigBufCtl[RGXFWIF_DM_DEFAULT_MAX];
+
+	IMG_UINT32              ui32HWRDebugDumpLimit;
+
+	RGXFWIF_BIFTILINGMODE   eBifTilingMode;
+	struct
+	{
+		IMG_UINT64 uiBase;
+		IMG_UINT64 uiLen;
+		IMG_UINT64 uiXStride;
+	}                       RGXFW_ALIGN sBifTilingCfg[RGXFWIF_NUM_BIF_TILING_CONFIGS];
+
+	PRGXFWIF_RUNTIME_CFG    sRuntimeCfg;
+
+	PRGXFWIF_TRACEBUF       sTraceBufCtl;
+	PRGXFWIF_TBIBUF         sTBIBuf;
+	IMG_UINT64              RGXFW_ALIGN ui64HWPerfFilter;
+
+	PRGXFWIF_HWRINFOBUF     sRGXFWIfHWRInfoBufCtl;
+	PRGXFWIF_GPU_UTIL_FWCB  sGpuUtilFWCbCtl;
+	PRGXFWIF_REG_CFG        sRegCfg;
+	PRGXFWIF_HWPERF_CTL     sHWPerfCtl;
+
+	RGXFWIF_COUNTER_DUMP_CTL sCounterDumpCtl;
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+	RGXFWIF_FIRMWARE_GCOV_CTL sFirmwareGcovCtl;
+#endif
+
+	RGXFWIF_DEV_VIRTADDR    sAlignChecks;
+
+	/* Core clock speed at FW boot time */
+	IMG_UINT32              ui32InitialCoreClockSpeed;
+
+	/* APM latency in ms before signalling IDLE to the host */
+	IMG_UINT32              ui32ActivePMLatencyms;
+
+	/* Flag to be set by the Firmware after successful start */
+	IMG_BOOL                bFirmwareStarted;
+
+	IMG_UINT32              ui32MarkerVal;
+
+	IMG_UINT32              ui32FirmwareStartedTimeStamp;
+
+	IMG_UINT32              ui32JonesDisableMask;
+
+	/* Compatibility checks to be populated by the Firmware */
+	RGXFWIF_COMPCHECKS      sRGXCompChecks;
+
+	RGXFWIF_DMA_ADDR        sCorememDataStore;
+
+	FW_PERF_CONF            eFirmwarePerf;
+
+	IMG_DEV_VIRTADDR        RGXFW_ALIGN sSLC3FenceDevVAddr;
+
+	RGXFWIF_DEV_VIRTADDR    sT1Stack;
+
+	RGXFWIF_PDVFS_OPP       sPDVFSOPPInfo;
+
+	/**
+	 * FW Pointer to memory containing core clock rate in Hz.
+	 * Firmware (PDVFS) updates the memory when running on non primary FW thread
+	 * to communicate to host driver.
+	 */
+	PRGXFWIF_CORE_CLK_RATE  sCoreClockRate;
+
+#if defined(PDUMP)
+	RGXFWIF_PID_FILTER      sPIDFilter;
+#endif
+
+	/* Workload Estimation Firmware CCB */
+	PRGXFWIF_CCB_CTL        psWorkEstFirmwareCCBCtl;
+	PRGXFWIF_CCB            psWorkEstFirmwareCCB;
+
+	RGXFWIF_GPIO_VAL_MODE   eGPIOValidationMode;
+	IMG_UINT32              RGXFW_ALIGN aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST];
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+	/* Checkpoint CCB */
+	PRGXFWIF_CCB_CTL        psCheckpointCCBCtl;
+	PRGXFWIF_CCB            psCheckpointCCB;
+#endif
+
+	/*Used in HWPerf for decoding BVNC Features*/
+	RGX_HWPERF_BVNC         sBvncKmFeatureFlags;
+
+} UNCACHED_ALIGN RGXFWIF_INIT;
+
+
+/*!
+ *****************************************************************************
+ * Timer correlation shared data and defines
+ *****************************************************************************/
+
+typedef struct
+{
+	IMG_UINT64 RGXFW_ALIGN ui64OSTimeStamp;
+	IMG_UINT64 RGXFW_ALIGN ui64OSMonoTimeStamp;
+	IMG_UINT64 RGXFW_ALIGN ui64CRTimeStamp;
+
+	/* Utility variable used to convert CR timer deltas to OS timer deltas (nS),
+	 * where the deltas are relative to the timestamps above:
+	 * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below */
+	IMG_UINT64 RGXFW_ALIGN ui64CRDeltaToOSDeltaKNs;
+
+	IMG_UINT32             ui32CoreClockSpeed;
+	IMG_UINT32             ui32Reserved;
+} UNCACHED_ALIGN RGXFWIF_TIME_CORR;
+
+
+/* The following macros are used to help converting FW timestamps to the Host
+ * time domain. On the FW the RGX_CR_TIMER counter is used to keep track of
+ * time; it increments by 1 every 256 GPU clock ticks, so the general
+ * formula to perform the conversion is:
+ *
+ * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS,
+ *   otherwise if (scale == 10^6) then deltaOS is in uS ]
+ *
+ *             deltaCR * 256                                   256 * scale
+ *  deltaOS = --------------- * scale = deltaCR * K    [ K = --------------- ]
+ *             GPUclockspeed                                  GPUclockspeed
+ *
+ * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20)
+ * to get some better accuracy and to avoid returning 0 in the integer
+ * division 256000000/GPUfreq if GPUfreq is greater than 256MHz.
+ * This is the same as keeping K as a decimal number.
+ *
+ * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies
+ * (deltaCR * K is more or less a constant), and it's relative to the base
+ * OS timestamp sampled as a part of the timer correlation data.
+ * This base is refreshed on GPU power-on, DVFS transition and periodic
+ * frequency calibration (executed every few seconds if the FW is doing
+ * some work), so as long as the GPU is doing something and one of these
+ * events is triggered then deltaCR * K will not overflow and deltaOS will be
+ * correct.
+ */
+
+#define RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT  (20)
+
+#define RGXFWIF_GET_DELTA_OSTIME_NS(deltaCR, K) \
+	(((deltaCR) * (K)) >> RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT)
+
+
+/*!
+ ******************************************************************************
+ * GPU Utilisation
+ *****************************************************************************/
+
+#define RGXFWIF_GPU_UTIL_STATE_ACTIVE_LOW     (0U)
+#define RGXFWIF_GPU_UTIL_STATE_IDLE           (1U)
+#define RGXFWIF_GPU_UTIL_STATE_ACTIVE_HIGH    (2U)
+#define RGXFWIF_GPU_UTIL_STATE_BLOCKED        (3U)
+#define RGXFWIF_GPU_UTIL_STATE_NUM            (4U)
+
+#define RGXFWIF_GPU_UTIL_TIME_MASK            IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)
+#define RGXFWIF_GPU_UTIL_STATE_MASK           IMG_UINT64_C(0x0000000000000003)
+
+#define RGXFWIF_GPU_UTIL_GET_TIME(word)       ((word) & RGXFWIF_GPU_UTIL_TIME_MASK)
+#define RGXFWIF_GPU_UTIL_GET_STATE(word)      ((word) & RGXFWIF_GPU_UTIL_STATE_MASK)
+
+/* The OS timestamps computed by the FW are approximations of the real time,
+ * which means they could be slightly behind or ahead the real timer on the Host.
+ * In some cases we can perform subtractions between FW approximated
+ * timestamps and real OS timestamps, so we need a form of protection against
+ * negative results if for instance the FW one is a bit ahead of time.
+ */
+#define RGXFWIF_GPU_UTIL_GET_PERIOD(newtime,oldtime) \
+	((newtime) > (oldtime) ? ((newtime) - (oldtime)) : 0U)
+
+#define RGXFWIF_GPU_UTIL_MAKE_WORD(time,state) \
+	(RGXFWIF_GPU_UTIL_GET_TIME(time) | RGXFWIF_GPU_UTIL_GET_STATE(state))
+
+
+/* The timer correlation array must be big enough to ensure old entries won't be
+ * overwritten before all the HWPerf events linked to those entries are processed
+ * by the MISR. The update frequency of this array depends on how fast the system
+ * can change state (basically how small the APM latency is) and perform DVFS transitions.
+ *
+ * The minimum size is 2 (not 1) to avoid race conditions between the FW reading
+ * an entry while the Host is updating it. With 2 entries in the worst case the FW
+ * will read old data, which is still quite ok if the Host is updating the timer
+ * correlation at that time.
+ */
+#define RGXFWIF_TIME_CORR_ARRAY_SIZE            256U
+#define RGXFWIF_TIME_CORR_CURR_INDEX(seqcount)  ((seqcount) % RGXFWIF_TIME_CORR_ARRAY_SIZE)
+
+/* Make sure the timer correlation array size is a power of 2 */
+static_assert((RGXFWIF_TIME_CORR_ARRAY_SIZE & (RGXFWIF_TIME_CORR_ARRAY_SIZE - 1U)) == 0U,
+			  "RGXFWIF_TIME_CORR_ARRAY_SIZE must be a power of two");
+
+typedef struct
+{
+	RGXFWIF_TIME_CORR sTimeCorr[RGXFWIF_TIME_CORR_ARRAY_SIZE];
+	IMG_UINT32        ui32TimeCorrSeqCount;
+
+	/* Last GPU state + OS time of the last state update */
+	IMG_UINT64 RGXFW_ALIGN ui64LastWord;
+
+	/* Counters for the amount of time the GPU was active/idle/blocked */
+	IMG_UINT64 RGXFW_ALIGN aui64StatsCounters[RGXFWIF_GPU_UTIL_STATE_NUM];
+
+	IMG_UINT32 ui32GpuUtilFlags; /* Compatibility and other flags */
+} UNCACHED_ALIGN RGXFWIF_GPU_UTIL_FWCB;
+
+
+typedef struct
+{
+	IMG_UINT32           ui32RenderTargetIndex;		//Render number
+	IMG_UINT32           ui32CurrentRenderTarget;	//index in RTA
+	IMG_UINT32           ui32ActiveRenderTargets;	//total active RTs
+	IMG_UINT32           ui32CumulActiveRenderTargets;   //total active RTs from the first TA kick, for OOM
+	RGXFWIF_DEV_VIRTADDR sValidRenderTargets;  //Array of valid RT indices
+	RGXFWIF_DEV_VIRTADDR sRTANumPartialRenders;  //Array of number of occurred partial renders per render target
+	IMG_UINT32           ui32MaxRTs;   //Number of render targets in the array
+	IMG_UINT32           ui32RTACtlFlags; /* Compatibility and other flags */
+} UNCACHED_ALIGN RGXFWIF_RTA_CTL;
+
+typedef struct
+{
+	IMG_DEV_VIRTADDR	RGXFW_ALIGN psFreeListDevVAddr;
+	IMG_UINT64			RGXFW_ALIGN ui64CurrentDevVAddr;
+	IMG_UINT32			ui32CurrentStackTop;
+	IMG_UINT32			ui32MaxPages;
+	IMG_UINT32			ui32GrowPages;
+	IMG_UINT32			ui32CurrentPages; /* HW pages */
+	IMG_UINT32			ui32AllocatedPageCount;
+	IMG_UINT32			ui32AllocatedMMUPageCount;
+	IMG_UINT32			ui32HWRCounter;
+	IMG_UINT32			ui32FreeListID;
+	IMG_BOOL			bGrowPending;
+	IMG_UINT32			ui32ReadyPages; /* Pages that should be used only when OOM is reached */
+	IMG_UINT32			ui32FreelistFlags; /* Compatibility and other flags */
+} UNCACHED_ALIGN RGXFWIF_FREELIST;
+
+typedef struct
+{
+	IMG_DEV_VIRTADDR	RGXFW_ALIGN psVHeapTableDevVAddr; /*!< VHeap Data Store */
+	IMG_BOOL			bTACachesNeedZeroing;             /*!< Whether RTC and TPC caches (on mem) need to be zeroed on next first TA kick */
+	IMG_UINT32			ui32RenderTargetFlags;            /* Compatibility and other flags */
+} UNCACHED_ALIGN RGXFWIF_RENDER_TARGET;
+
+
+/*!
+ ******************************************************************************
+ * HWRTData
+ *****************************************************************************/
+
+/* HWRTData flags */
+#define HWRTDATA_TA_CLEAN                 (1U << 0)
+#define HWRTDATA_3D_CLEAN                 (1U << 1)
+#define HWRTDATA_HAS_LAST_TA              (1U << 2)
+#define HWRTDATA_PARTIAL_RENDERED         (1U << 3)
+#define HWRTDATA_DISABLE_TILE_REORDERING  (1U << 4)
+#define HWRTDATA_NEED_BRN65101_BLIT       (1U << 5)
+#define HWRTDATA_FIRST_BRN65101_STRIP     (1U << 6)
+#define HWRTDATA_NEED_BRN67182_2ND_RENDER (1U << 7)
+
+typedef enum
+{
+	RGXFWIF_RTDATA_STATE_NONE = 0,
+	RGXFWIF_RTDATA_STATE_KICKTA,
+	RGXFWIF_RTDATA_STATE_KICKTAFIRST,
+	RGXFWIF_RTDATA_STATE_TAFINISHED,
+	RGXFWIF_RTDATA_STATE_KICK3D,
+	RGXFWIF_RTDATA_STATE_3DFINISHED,
+	RGXFWIF_RTDATA_STATE_TAOUTOFMEM,
+	RGXFWIF_RTDATA_STATE_PARTIALRENDERFINISHED,
+	/* In case of HWR, we can't set the RTDATA state to NONE,
+	 * as this will cause any TA to become a first TA.
+	 * To ensure all related TA's are skipped, we use the HWR state */
+	RGXFWIF_RTDATA_STATE_HWR,
+	RGXFWIF_RTDATA_STATE_UNKNOWN = 0x7FFFFFFFU
+} RGXFWIF_RTDATA_STATE;
+
+typedef struct
+{
+	IMG_UINT32							ui32HWRTDataFlags;
+	RGXFWIF_RTDATA_STATE				eState;
+
+	IMG_UINT32							ui32NumPartialRenders; /*!< Number of partial renders. Used to setup ZLS bits correctly */
+	IMG_DEV_VIRTADDR					RGXFW_ALIGN psPMMListDevVAddr; /*!< MList Data Store */
+
+	IMG_UINT64							RGXFW_ALIGN ui64VCECatBase[4];
+	IMG_UINT64							RGXFW_ALIGN ui64VCELastCatBase[4];
+	IMG_UINT64							RGXFW_ALIGN ui64TECatBase[4];
+	IMG_UINT64							RGXFW_ALIGN ui64TELastCatBase[4];
+	IMG_UINT64							RGXFW_ALIGN ui64AlistCatBase;
+	IMG_UINT64							RGXFW_ALIGN ui64AlistLastCatBase;
+
+	IMG_UINT64							RGXFW_ALIGN ui64PMAListStackPointer;
+	IMG_UINT32							ui32PMMListStackPointer;
+
+	PRGXFWIF_FREELIST					RGXFW_ALIGN apsFreeLists[RGXFW_MAX_FREELISTS];
+	IMG_UINT32							aui32FreeListHWRSnapshot[RGXFW_MAX_FREELISTS];
+
+	PRGXFWIF_RENDER_TARGET				psParentRenderTarget;
+
+	RGXFWIF_CLEANUP_CTL					sTACleanupState;
+	RGXFWIF_CLEANUP_CTL					s3DCleanupState;
+
+	RGXFWIF_RTA_CTL						sRTACtl;
+
+	IMG_UINT32							ui32PPPScreen;
+	IMG_UINT64							RGXFW_ALIGN ui64MultiSampleCtl;
+	IMG_UINT64							ui64FlippedMultiSampleCtl;
+	IMG_UINT32							ui32TPCStride;
+	IMG_DEV_VIRTADDR					RGXFW_ALIGN sTailPtrsDevVAddr;
+	IMG_UINT32							ui32TPCSize;
+	IMG_UINT32							ui32TEScreen;
+	IMG_UINT32							ui32MTileStride;
+	IMG_UINT32							ui32TEAA;
+	IMG_UINT32							ui32TEMTILE1;
+	IMG_UINT32							ui32TEMTILE2;
+	IMG_UINT32							ui32ISPMergeLowerX;
+	IMG_UINT32							ui32ISPMergeLowerY;
+	IMG_UINT32							ui32ISPMergeUpperX;
+	IMG_UINT32							ui32ISPMergeUpperY;
+	IMG_UINT32							ui32ISPMergeScaleX;
+	IMG_UINT32							ui32ISPMergeScaleY;
+	IMG_DEV_VIRTADDR					RGXFW_ALIGN sMacrotileArrayDevVAddr;
+	IMG_DEV_VIRTADDR					RGXFW_ALIGN sRgnHeaderDevVAddr;
+	IMG_DEV_VIRTADDR					RGXFW_ALIGN sRTCDevVAddr;
+	IMG_UINT64							RGXFW_ALIGN uiRgnHeaderSize;
+	IMG_UINT32							ui32ISPMtileSize;
+#if defined(RGX_FIRMWARE)
+	struct RGXFWIF_FWCOMMONCONTEXT_*	RGXFW_ALIGN psOwnerTA;
+#else
+	RGXFWIF_DEV_VIRTADDR				RGXFW_ALIGN pui32OwnerTANotUsedByHost;
+#endif
+} UNCACHED_ALIGN RGXFWIF_HWRTDATA;
+
+#endif /* RGX_FWIF_KM_H */
+
+/******************************************************************************
+ End of file (rgx_fwif_km.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fwif_resetframework.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fwif_resetframework.h
new file mode 100644
index 0000000..82e7fc1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fwif_resetframework.h
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@File			rgx_fwif_resetframework.h
+@Title         	Post-reset work-around framework FW interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_FWIF_RESETFRAMEWORK_H)
+#define RGX_FWIF_RESETFRAMEWORK_H
+
+#include "img_types.h"
+#include "rgx_fwif_shared.h"
+
+typedef struct
+{
+#if RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT == 2
+	IMG_UINT64	uCDMReg_CDM_CB_QUEUE;
+	IMG_UINT64	uCDMReg_CDM_CB_BASE;
+	IMG_UINT64	uCDMReg_CDM_CB;
+#else
+	IMG_UINT64  uCDMReg_CDM_CTRL_STREAM_BASE;
+#endif
+} RGXFWIF_RF_REGISTERS;
+
+#define RGXFWIF_RF_FLAG_ENABLE 0x00000001U /*!< enables the reset framework in the firmware */
+
+typedef struct
+{
+	IMG_UINT32           ui32Flags;
+
+	/* THIS MUST BE THE LAST MEMBER OF THE CONTAINING STRUCTURE */
+	RGXFWIF_RF_REGISTERS RGXFW_ALIGN sFWRegisters;
+
+} RGXFWIF_RF_CMD;
+
+/* to opaquely allocate and copy in the kernel */
+#define RGXFWIF_RF_CMD_SIZE  sizeof(RGXFWIF_RF_CMD)
+
+#endif /* RGX_FWIF_RESETFRAMEWORK_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fwif_sf.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fwif_sf.h
new file mode 100644
index 0000000..5feafac
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fwif_sf.h
@@ -0,0 +1,738 @@
+/*************************************************************************/ /*!
+@File			rgx_fwif_sf.h
+@Title          RGX firmware interface string format specifiers
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the rgx firmware logging messages. The following
+				list are the messages the firmware prints. Changing anything
+				but the first column or spelling mistakes in the strings will
+				break compatibility with log files created with older/newer
+				firmware versions.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_FWIF_SF_H
+#define RGX_FWIF_SF_H
+
+/*****************************************************************************
+ * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you
+ *  		 WILL BREAK fw tracing message compatibility with previous
+ *  		 fw versions. Only add new ones, if so required.
+ ****************************************************************************/
+/* Available log groups */
+#define RGXFW_LOG_SFGROUPLIST       \
+	X(RGXFW_GROUP_NULL,NULL)        \
+	X(RGXFW_GROUP_MAIN,MAIN)        \
+	X(RGXFW_GROUP_CLEANUP,CLEANUP)  \
+	X(RGXFW_GROUP_CSW,CSW)          \
+	X(RGXFW_GROUP_PM, PM)           \
+	X(RGXFW_GROUP_RTD,RTD)          \
+	X(RGXFW_GROUP_SPM,SPM)          \
+	X(RGXFW_GROUP_MTS,MTS)          \
+	X(RGXFW_GROUP_BIF,BIF)          \
+	X(RGXFW_GROUP_MISC,MISC)        \
+	X(RGXFW_GROUP_POW,POW)          \
+	X(RGXFW_GROUP_HWR,HWR)          \
+	X(RGXFW_GROUP_HWP,HWP)          \
+	X(RGXFW_GROUP_RPM,RPM)          \
+	X(RGXFW_GROUP_DMA,DMA)          \
+	X(RGXFW_GROUP_DBG,DBG)
+
+enum RGXFW_LOG_SFGROUPS {
+#define X(A,B) A,
+	RGXFW_LOG_SFGROUPLIST
+#undef X
+};
+
+#define IMG_SF_STRING_MAX_SIZE 256
+
+typedef struct {
+	IMG_UINT32 ui32Id;
+	IMG_CHAR sName[IMG_SF_STRING_MAX_SIZE];
+} RGXFW_STID_FMT; /*  pair of string format id and string formats */
+
+typedef struct {
+	IMG_UINT32 ui32Id;
+	IMG_CHAR   *psName;
+} RGXKM_STID_FMT; /*  pair of string format id and string formats */
+
+/* Table of String Format specifiers, the group they belong and the number of
+ * arguments each expects. Xmacro styled macros are used to generate what is
+ * needed without requiring hand editing.
+ *
+ * id		: id within a group
+ * gid		: group id
+ * Sym name	: name of enumerations used to identify message strings
+ * String	: Actual string
+ * #args	: number of arguments the string format requires
+ */
+#define RGXFW_LOG_SFIDLIST \
+/*id, gid,              id name,        string,                           # arguments */ \
+X( 0, RGXFW_GROUP_NULL, RGXFW_SF_FIRST, "You should not use this string\n", 0) \
+\
+X( 1,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx 0x%08.8X @ %d, RTD 0x%08x. Partial render:%d, CSW resume:%d, prio:%d\n", 6) \
+X( 2,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_FINISHED, "3D finished, HWRTData0State=%d, HWRTData1State=%d\n", 2) \
+X( 3,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK3D_TQ_DEPRECATED, "Kick 3D TQ: FWCtx 0x%08.8X @ %d, CSW resume:%d, prio: %d\n", 4) \
+X( 4,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_TQ_FINISHED, "3D Transfer finished\n", 0) \
+X( 5,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_DEPRECATED, "Kick Compute: FWCtx 0x%08.8X @ %d, prio: %d\n", 3) \
+X( 6,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_FINISHED, "Compute finished\n", 0) \
+X( 7,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx 0x%08.8X @ %d, RTD 0x%08x. First kick:%d, Last kick:%d, CSW resume:%d, prio:%d\n", 7) \
+X( 8,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_FINISHED, "TA finished\n", 0) \
+X( 9,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESTART_AFTER_PRENDER, "Restart TA after partial render\n", 0) \
+X(10,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESUME_WOUT_PRENDER, "Resume TA without partial render\n", 0) \
+X(11,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OOM, "Out of memory! Context 0x%08x, HWRTData 0x%x\n", 2) \
+X(12,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA_DEPRECATED, "Kick TLA: FWCtx 0x%08.8X @ %d, prio:%d\n", 3) \
+X(13,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TLA_FINISHED, "TLA finished\n", 0) \
+X(14,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CCCB_WOFF_UPDATE, "cCCB Woff update = %d, DM = %d, FWCtx = 0x%08.8X\n", 3) \
+X(16,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_START, "UFO Checks for FWCtx %08.8X @ %d\n", 2) \
+X(17,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK, "UFO Check: [%08.8X] is %08.8X requires %08.8X\n", 3) \
+X(18,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_SUCCEEDED, "UFO Checks succeeded\n", 0) \
+X(19,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_PR_CHECK, "UFO PR-Check: [%08.8X] is %08.8X requires >= %08.8X\n", 3) \
+X(20,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_START, "UFO SPM PR-Checks for FWCtx %08.8X\n", 1) \
+X(21,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_DEPRECATED, "UFO SPM special PR-Check: [%08.8X] is %08.8X requires >= ????????, [%08.8X] is ???????? requires %08.8X\n", 4) \
+X(22,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE_START, "UFO Updates for FWCtx %08.8X @ %d\n", 2) \
+X(23,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE, "UFO Update: [%08.8X] = %08.8X\n", 2) \
+X(24,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ASSERT_FAILED, "ASSERT Failed: line %d of:\n", 1) \
+X(25,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_LOCKUP_DEPRECATED, "HWR: Lockup detected on DM%d, FWCtx: %08.8X\n", 2) \
+X(26,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_FW_DEPRECATED, "HWR: Reset fw state for DM%d, FWCtx: %08.8X, MemCtx: %08.8X\n", 3) \
+X(27,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_HW_DEPRECATED, "HWR: Reset HW\n", 0) \
+X(28,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_TERMINATED_DEPRECATED, "HWR: Lockup recovered.\n", 0) \
+X(29,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_FALSE_LOCKUP_DEPRECATED, "HWR: False lockup detected for DM%u\n", 1) \
+X(30,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ALIGN_FAILED, "Alignment check %d failed: host = %X, fw = %X\n", 3) \
+X(31,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GP_USC_TRIGGERED, "GP USC triggered\n", 0) \
+X(32,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_OVERALLOC_REGS, "Overallocating %u temporary registers and %u shared registers for breakpoint handler\n", 2) \
+X(33,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED, "Setting breakpoint: Addr 0x%08.8X\n", 1) \
+X(34,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_STORE, "Store breakpoint state\n", 0) \
+X(35,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_UNSET, "Unsetting BP Registers\n", 0) \
+X(36,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NONZERO_RT, "Active RTs expected to be zero, actually %u\n", 1) \
+X(37,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTC_PRESENT, "RTC present, %u active render targets\n", 1) \
+X(38,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_EST_POWER, "Estimated Power 0x%x\n", 1) \
+X(39,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_TARGET, "RTA render target %u\n", 1) \
+X(40,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_KICK_RENDER, "Kick RTA render %u of %u\n", 2) \
+X(41,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SIZES_CHECK, "HWR sizes check %d failed: addresses = %d, sizes = %d\n", 3) \
+X(42,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_ENABLE_DEPRECATED, "Pow: DUSTS_ENABLE = 0x%X\n", 1) \
+X(43,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_HWREQ_DEPRECATED, "Pow: On(1)/Off(0): %d, Units: 0x%08.8X\n", 2) \
+X(44,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_DEPRECATED, "Pow: Changing number of dusts from %d to %d\n", 2) \
+X(45,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_SIDEKICK_IDLE_DEPRECATED, "Pow: Sidekick ready to be powered down\n", 0) \
+X(46,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_REQ_DEPRECATED, "Pow: Request to change num of dusts to %d (bPowRascalDust=%d)\n", 2) \
+X(47,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_STORE, "No ZS Buffer used for partial render (store)\n", 0) \
+X(48,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_LOAD, "No Depth/Stencil Buffer used for partial render (load)\n", 0) \
+X(49,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SET_LOCKUP_DEPRECATED, "HWR: Lock-up DM%d FWCtx: %08.8X\n", 2) \
+X(50,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE_DEPRECATED, "MLIST%d checker: CatBase TE=0x%08x (%d Pages), VCE=0x%08x (%d Pages), ALIST=0x%08x, IsTA=%d\n", 7) \
+X(51,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_MLIST_VALUE, "MLIST%d checker: MList[%d] = 0x%08x\n", 3) \
+X(52,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_OK, "MLIST%d OK\n", 1) \
+X(53,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_EMPTY, "MLIST%d is empty\n", 1) \
+X(54,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE, "MLIST%d checker: CatBase TE=0x%08X%08X, VCE=0x%08x%08X, ALIST=0x%08x%08X, IsTA=%d\n", 8) \
+X(55,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_40480KICK, "3D OQ flush kick\n", 0) \
+X(56,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWP_UNSUPPORTED_BLOCK, "HWPerf block ID (0x%x) unsupported by device\n", 1) \
+X(57,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED2, "Setting breakpoint: Addr 0x%08.8X DM%u\n", 2) \
+X(58,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED, "Kick RTU: FWCtx 0x%08.8X @ %d, prio: %d\n", 3) \
+X(59,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_FINISHED, "RDM finished on context %u\n", 1) \
+X(60,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED, "Kick SHG: FWCtx 0x%08.8X @ %d, prio: %d\n", 3) \
+X(61,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SHG_FINISHED, "SHG finished\n", 0) \
+X(62,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBA_FINISHED, "FBA finished on context %u\n", 1) \
+X(63,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_FAILED, "UFO Checks failed\n", 0) \
+X(64,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_START, "Kill DM%d start\n", 1) \
+X(65,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_COMPLETE, "Kill DM%d complete\n", 1) \
+X(66,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FC_CCB_UPDATE, "FC%u cCCB Woff update = %u\n", 2) \
+X(67,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED2, "Kick RTU: FWCtx 0x%08.8X @ %d, prio: %d, Frame Context: %d\n", 4) \
+X(68,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIDEKICK_INIT, "Sidekick init\n", 0) \
+X(69,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RD_INIT, "Rascal+Dusts init (# dusts mask: %X)\n", 1) \
+X(70,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGTIMES, "Register access cycles: read: %d cycles, write: %d cycles, iterations: %d\n", 3) \
+X(71,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_ADD, "Register configuration added. Address: 0x%x Value: 0x%x%x\n", 3) \
+X(72,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_SET, "Register configuration applied to type %d. (0:pow on, 1:Rascal/dust init, 2-5: TA,3D,CDM,TLA, 6:All)\n", 1) \
+X(73,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TPC_FLUSH, "Perform TPC flush.\n", 0) \
+X(74,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP, "GPU has locked up (see HWR logs for more info)\n", 0) \
+X(75,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_OUTOFTIME, "HWR has been triggered - GPU has overrun its deadline (see HWR logs)\n", 0) \
+X(76,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_POLLFAILURE, "HWR has been triggered - GPU has failed a poll (see HWR logs)\n", 0) \
+X(77,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DOPPLER_OOM, "Doppler out of memory event for FC %u\n", 1) \
+X(78,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK1, "UFO SPM special PR-Check: [%08.8X] is %08.8X requires >= %08.8X\n", 3) \
+X(79,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK2, "UFO SPM special PR-Check: [%08.8X] is %08.8X requires %08.8X\n", 3) \
+X(80,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TIMESTAMP, "TIMESTAMP -> [%08.8X]\n", 1) \
+X(81,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE_START, "UFO RMW Updates for FWCtx %08.8X @ %d\n", 2) \
+X(82,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE, "UFO Update: [%08.8X] = %08.8X\n", 2) \
+X(83,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULLCMD, "Kick Null cmd: FWCtx 0x%08.8X @ %d\n", 2) \
+X(84,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RPM_OOM, "RPM Out of memory! Context 0x%08x, SH requestor %d\n", 2) \
+X(85,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_ABORT_DISCARD, "Discard RTU due to RPM abort: FWCtx 0x%08.8X @ %d, prio: %d, Frame Context: %d\n", 4) \
+X(86,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED, "Deferring DM%u from running context 0x%08x @ %d (deferred DMs = 0x%08x)\n", 4) \
+X(87,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_WAITING_TURN, "Deferring DM%u from running context 0x%08x @ %d to let other deferred DMs run (deferred DMs = 0x%08x)\n", 4) \
+X(88,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_NO_LONGER, "No longer deferring DM%u from running context = 0x%08x @ %d (deferred DMs = 0x%08x)\n", 4) \
+X(89,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB_DEPRECATED, "FWCCB for DM%u is full, we will have to wait for space! (Roff = %u, Woff = %u)\n", 3) \
+X(90,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB, "FWCCB for OSid %u is full, we will have to wait for space! (Roff = %u, Woff = %u)\n", 3) \
+X(91,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART, "Host Sync Partition marker: %d\n", 1) \
+X(92,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART_RPT, "Host Sync Partition repeat: %d\n", 1) \
+X(93,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CLOCK_SPEED_CHANGE, "Core clock set to %d Hz\n", 1) \
+X(94,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_OFFSETS, "Compute Queue: FWCtx 0x%08.8X, prio: %d, queue: 0x%08X%08X (Roff = %u, Woff = %u, Size = %u)\n", 7) \
+X(95,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE, "Signal check failed, Required Data: 0x%X, Address: 0x%08x%08x\n", 3) \
+X(96,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE, "Signal update, Snoop Filter: %u, MMU Ctx: %u, Signal Id: %u, Signals Base: 0x%08x%08x\n", 5) \
+X(97,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNALED, "Signalled the previously waiting FWCtx: 0x%08.8X, OSId: %u, Signal Address: 0x%08x%08x\n", 4) \
+X(98,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED_DEPRECATED, "Compute stalled\n", 0) \
+X(99,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED, "Compute stalled (Roff = %u, Woff = %u, Size = %u)\n", 3) \
+X(100, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED_FROM_STALL, "Compute resumed (Roff = %u, Woff = %u, Size = %u)\n", 3) \
+X(101, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_SIGNAL_UPDATE, "Signal update notification from the host, PC Physical Address: 0x%08x%08x, Signal Virtual Address: 0x%08x%08x\n", 4) \
+X(102, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_OSID_DM, "Signal update from DM: %u, OSId: %u, PC Physical Address: 0x%08x%08x\n", 4) \
+X(103, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DM, "DM: %u signal check failed\n", 1) \
+X(104, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED, "Kick TDM: FWCtx 0x%08.8X @ %d, prio:%d\n", 3) \
+X(105, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_FINISHED, "TDM finished\n", 0) \
+X(106, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TE_PIPE_STATUS, "MMU_PM_CAT_BASE_TE[%d]_PIPE[%d]:  0x%08X 0x%08X)\n", 4) \
+X(107, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_HIT_DEPRECATED, "BRN 54141 HIT\n", 0) \
+X(108, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_APPLYING_DUMMY_TA_DEPRECATED, "BRN 54141 Dummy TA kicked\n", 0) \
+X(109, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_RESUME_TA_DEPRECATED, "BRN 54141 resume TA\n", 0) \
+X(110, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DOUBLE_HIT_DEPRECATED, "BRN 54141 double hit after applying WA\n", 0) \
+X(111, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DUMMY_TA_VDM_BASE_DEPRECATED, "BRN 54141 Dummy TA VDM base address: 0x%08x%08x\n", 2) \
+X(112, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_WITH_CURRENT, "Signal check failed, Required Data: 0x%X, Current Data: 0x%X, Address: 0x%08x%08x\n", 4) \
+X(113, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BUFFER_STALL, "TDM stalled (Roff = %u, Woff = %u)\n", 2) \
+X(114, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_WRITE_OFFSET_UPDATE, "Write Offset update notification for stalled FWCtx %08.8X\n", 1) \
+X(115, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE, "Changing OSid %d's priority from %u to %u\n", 3) \
+X(116, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED, "Compute resumed\n", 0) \
+X(117, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA, "Kick TLA: FWCtx 0x%08.8X @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 7) \
+X(118, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM, "Kick TDM: FWCtx 0x%08.8X @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 7) \
+X(119, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA, "Kick TA: FWCtx 0x%08.8X @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 11) \
+X(120, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D, "Kick 3D: FWCtx 0x%08.8X @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 10) \
+X(121, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3DTQ, "Kick 3D TQ: FWCtx 0x%08.8X @ %d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 8) \
+X(122, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE, "Kick Compute: FWCtx 0x%08.8X @ %d. (PID:%d, prio:%d, ext:0x%08X, int:0x%08X)\n", 6) \
+X(123, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU, "Kick RTU: FWCtx 0x%08.8X @ %d, Frame Context:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 8) \
+X(124, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG, "Kick SHG: FWCtx 0x%08.8X @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 7) \
+X(125, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CSRM_RECONFIG, "Reconfigure CSRM: special coeff support enable %d.\n", 1) \
+X(127, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_REQ_MAX_COEFFS, "TA requires max coeff mode, deferring: %d.\n", 1) \
+X(128, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_REQ_MAX_COEFFS, "3D requires max coeff mode, deferring: %d.\n", 1) \
+X(129, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_FAILED, "Kill DM%d failed\n", 1) \
+X(130, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE, "Thread Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)\n", 2) \
+X(131, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE_FENCE, "Thread Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)\n", 3) \
+X(132, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_HCS_TRIGGERED, "DM %d failed to Context Switch on time. Triggered HCS (see HWR logs).\n", 1) \
+X(133, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HCS_SET, "HCS changed to %d ms\n", 1) \
+X(134, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT, "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x%08x)\n", 4) \
+X(135, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_TILES_IN_FLIGHT, "  Phantom %d: USCTiles=%d\n", 2) \
+X(136, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_OFF, "Isolation grouping is disabled\n", 0) \
+X(137, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF, "Isolation group configured with a priority threshold of %d\n", 1) \
+X(138, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_ONLINE_DEPRECATED, "OS %d has come online\n", 1) \
+X(139, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_OFFLINE_DEPRECATED, "OS %d has gone offline\n", 1) \
+X(140, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNAL_REKICK, "Signalled the previously stalled FWCtx: 0x%08.8X, OSId: %u, Signal Address: 0x%08x%08x\n", 4) \
+X(141, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSETS, "TDM Queue: FWCtx 0x%08.8X, prio: %d, queue: 0x%08X%08X (Roff = %u, Woff = %u, Size = %u)\n", 7) \
+X(142, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSET_READ_RESET, "Reset TDM Queue Read Offset: FWCtx 0x%08.8X, queue: 0x%08X%08X (Roff = %u becomes 0, Woff = %u, Size = %u)\n", 6) \
+X(143, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UMQ_MISMATCHED_READ_OFFSET, "User Mode Queue mismatched stream start: FWCtx 0x%08.8X, queue: 0x%08X%08X (Roff = %u, StreamStartOffset = %u)\n", 5) \
+X(144, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIDEKICK_DEINIT, "Sidekick deinit\n", 0) \
+X(145, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RD_DEINIT, "Rascal+Dusts deinit\n", 0) \
+X(146, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG_DEPRECATED, "Initialised OS %d with config flags 0x%08x\n", 2) \
+X(147, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHKPT_LIMIT, "Fence checkpoint UFO limit exceeded %d/%d\n", 2) \
+X(148, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_62850KICK, "3D Dummy stencil store\n", 0) \
+X(149, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG, "Initialised OS %d with config flags 0x%08x and extended config flags 0x%08x\n", 3) \
+X(150, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_COMMAND, "Unknown Command (eCmdType=0x%08x)\n", 1) \
+X(151, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE, "UFO forced update: FWCtx %08.8X @ %d [%08.8X] = %08.8X\n", 4) \
+X(152, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE_NOP, "UFO forced update NOP: FWCtx %08.8X @ %d [%08.8X] = %08.8X, reason %d\n", 5) \
+X(153, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN66075_CHECK, "TDM context switch check: Roff %u points to 0x%08x, Match=%u\n", 3) \
+X(154, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS_WOUT_CHKPT, "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x\n", 6) \
+X(155, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWIRQ, "FW IRQ # %u @ %u\n", 2) \
+X(156, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET, "Setting breakpoint: Addr 0x%08.8X DM%u usc_breakpoint_ctrl_dm = %u\n", 3) \
+X(157, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB, "Invalid KCCB setup for OSid %u: KCCB 0x%08x, KCCB Ctrl 0x%08x\n", 3) \
+X(158, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_CMD, "Invalid KCCB cmd (%u) for OSid %u @ KCCB 0x%08x\n", 3) \
+X(159, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_FAULT, "FW FAULT: At line %d in file 0x%08x%08x, additional data=0x%08x\n", 4) \
+X(160, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_INVALID, "Invalid breakpoint: MemCtx 0x%08x Addr 0x%08.8X DM%u usc_breakpoint_ctrl_dm = %u\n", 4) \
+X(161, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID, "Discarding invalid SLC flushinval command for OSid %u: DM %u, FWCtx 0x%08x\n", 3) \
+X(162, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE, "Invalid Write Offset update notification from OSid %u to DM %u: FWCtx 0x%08x, MemCtx 0x%08x\n", 4) \
+X(163, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD, "Null FWCtx in KCCB kick cmd for OSid %u: KCCB 0x%08x, ROff %u, WOff %u\n", 4) \
+X(164, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FULL_CHPTCCB, "Checkpoint CCB for OSid %u is full, signalling host for full check state (Roff = %u, Woff = %u)\n", 3) \
+X(165, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS, "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x chptCCBCtl@0x%x chptCCB@0x%x\n", 8) \
+X(166, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_STATE_CHANGE, "OSid %d state transition request: from %d to %d (0-stopped 1-ready 2-active 3-offloading). Status %d (1-ok 0-fail)\n", 4) \
+X(167, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_STALE_KCCB_CMDS, "OSid %u has %u stale commands in its KCCB\n", 2) \
+X(170, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_KCCB_COMMAND, "Unknown KCCB Command: KCCBCtl=0x%08x, KCCB=0x%08x, Roff=%u, Woff=%u, Wrap=%u, Cmd=0x%08x, CmdType=0x%08x\n", 7) \
+X(171, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_CCB_COMMAND1, "Unknown Client CCB Command processing fences: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u\n", 10) \
+X(172, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_CCB_COMMAND2, "Unknown Client CCB Command executing kick: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u\n", 10) \
+\
+X( 1, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED, "Bg Task DM = %u, counted = %d\n", 2) \
+X( 2, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE_DEPRECATED, "Bg Task complete DM = %u\n", 1) \
+X( 3, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK, "Irq Task DM = %u, Breq = %d, SBIrq = 0x%X\n", 3) \
+X( 4, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE_DEPRECATED, "Irq Task complete DM = %u\n", 1) \
+X( 5, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_BG_ALL, "Kick MTS Bg task DM=All\n", 0) \
+X( 6, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_IRQ, "Kick MTS Irq task DM=%d\n", 1) \
+X( 7, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED, "Ready queue debug DM = %u, celltype = %d\n", 2) \
+X( 8, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN_DEPRECATED, "Ready-to-run debug DM = %u, item = 0x%x\n", 2) \
+X( 9, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMDHEADER, "Client command header DM = %u, client CCB = %x, cmd = %x\n", 3) \
+X(10, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN, "Ready-to-run debug OSid = %u, DM = %u, item = 0x%x\n", 3) \
+X(11, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE, "Ready queue debug DM = %u, celltype = %d, OSid = %u\n", 3) \
+X(12, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK, "Bg Task DM = %u, counted = %d, OSid = %u\n", 3 ) \
+X(13, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE, "Bg Task complete DM Bitfield: %u\n", 1) \
+X(14, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE, "Irq Task complete.\n", 0) \
+X(15, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMD_DISCARD, "Discarded Command Type: %d OS ID = %d PID = %d context = 0x%08x cccb ROff = %x, due to USC breakpoint hit by OS ID = %d PID = %d.\n", 7) \
+\
+X( 1, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_CLEANUP, "FwCommonContext [0x%08x] cleaned\n", 1) \
+X( 2, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_BUSY, "FwCommonContext [0x%08x] is busy: ReadOffset = %d, WriteOffset = %d\n", 3) \
+X( 3, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP, "HWRTData [0x%08x] for DM=%d, received cleanup request\n", 2) \
+X( 4, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_FOR_DM, "HWRTData [0x%08x] HW Context cleaned for DM%u, executed commands = %d\n", 3) \
+X( 5, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED, "HWRTData [0x%08x] HW Context for DM%u is busy\n", 2) \
+X( 6, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED, "HWRTData [0x%08x] HW Context %u cleaned\n", 2) \
+X( 7, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FL_CLEANED, "Freelist [0x%08x] cleaned\n", 1) \
+X( 8, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_CLEANED, "ZSBuffer [0x%08x] cleaned\n", 1) \
+X( 9, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_BUSY, "ZSBuffer [0x%08x] is busy: submitted = %d, executed = %d\n", 3) \
+X(10, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY, "HWRTData [0x%08x] HW Context for DM%u is busy: submitted = %d, executed = %d\n", 4) \
+X(11, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANUP, "HW Ray Frame data [0x%08x] for DM=%d, received cleanup request\n", 2) \
+X(12, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_FOR_DM, "HW Ray Frame Data [0x%08x] cleaned for DM%u, executed commands = %d\n", 3) \
+X(13, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_BUSY, "HW Ray Frame Data [0x%08x] for DM%u is busy: submitted = %d, executed = %d\n", 4) \
+X(14, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED, "HW Ray Frame Data [0x%08x] HW Context %u cleaned\n", 2) \
+X(15, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_INVALID_REQUEST, "Discarding invalid cleanup request of type 0x%x\n", 1) \
+\
+X( 1, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_NEEDS_RESUME, "CDM FWCtx 0x%08.8X needs resume\n", 1) \
+X( 2, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME, "*** CDM FWCtx 0x%08.8X resume from snapshot buffer 0x%08X%08X\n", 3) \
+X( 3, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SHARED, "CDM FWCtx shared alloc size load 0x%X\n", 1) \
+X( 4, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_COMPLETE, "*** CDM FWCtx store complete\n", 0) \
+X( 5, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_START, "*** CDM FWCtx store start\n", 0) \
+X( 6, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SOFT_RESET, "CDM Soft Reset\n", 0) \
+X( 7, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_NEEDS_RESUME, "3D FWCtx 0x%08.8X needs resume\n", 1) \
+X( 8, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME, "*** 3D FWCtx 0x%08.8X resume\n", 1) \
+X( 9, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_COMPLETE, "*** 3D context store complete\n", 0) \
+X(10, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED, "3D context store pipe state: 0x%08.8X 0x%08.8X 0x%08.8X\n", 3) \
+X(11, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START, "*** 3D context store start\n", 0) \
+X(12, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_TQ_RESUME, "*** 3D TQ FWCtx 0x%08.8X resume\n", 1) \
+X(13, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_NEEDS_RESUME, "TA FWCtx 0x%08.8X needs resume\n", 1) \
+X(14, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_RESUME, "*** TA FWCtx 0x%08.8X resume from snapshot buffer 0x%08X%08X\n", 3) \
+X(15, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_SHARED, "TA context shared alloc size store 0x%X, load 0x%X\n", 2) \
+X(16, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_COMPLETE, "*** TA context store complete\n", 0) \
+X(17, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_START, "*** TA context store start\n", 0) \
+X(18, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED_AGAIN, "Higher priority context scheduled for DM %u, old prio:%d, new prio:%d\n", 3) \
+X(19, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SET_CONTEXT_PRIORITY, "Set FWCtx 0x%x priority to %u\n", 2) \
+X(20, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED2, "3D context store pipe%d state: 0x%08.8X\n", 2) \
+X(21, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_DEPRECATED, "3D context resume pipe%d state: 0x%08.8X\n", 2) \
+X(22, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_NEEDS_RESUME, "SHG FWCtx 0x%08.8X needs resume\n", 1) \
+X(23, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_RESUME, "*** SHG FWCtx 0x%08.8X resume from snapshot buffer 0x%08X%08X\n", 3) \
+X(24, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_SHARED, "SHG context shared alloc size store 0x%X, load 0x%X\n", 2) \
+X(25, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_COMPLETE, "*** SHG context store complete\n", 0) \
+X(26, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_START, "*** SHG context store start\n", 0) \
+X(27, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_PIPE_INDIRECT, "Performing TA indirection, last used pipe %d\n", 1) \
+X(28, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_CTRL_STREAM_TERMINATE, "CDM context store hit ctrl stream terminate. Skip resume.\n", 0) \
+X(29, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_AB_BUFFER, "*** CDM FWCtx 0x%08.8X resume from snapshot buffer 0x%08X%08X, shader state %u\n", 4) \
+X(30, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STATE_BUFFER_FLIP, "TA PDS/USC state buffer flip (%d->%d)\n", 2) \
+X(31, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_52563_HIT, "TA context store hit BRN 52563: vertex store tasks outstanding\n", 0) \
+X(32, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_USC_POLL_FAILED, "TA USC poll failed (USC vertex task count: %d)\n", 1) \
+X(33, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_DEFERRED_DEPRECATED, "TA context store deferred due to BRN 54141.", 0) \
+X(34, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u\n", 7) \
+X(35, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_START, "*** TDM context store start\n", 0) \
+X(36, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_COMPLETE, "*** TDM context store complete\n", 0) \
+X(37, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_NEEDS_RESUME, "TDM context needs resume, header [%08.8X, %08.8X]\n", 2) \
+X(38, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u. Hard Context Switching: %u\n", 8) \
+X(39, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE, "3D context store pipe %2d (%2d) state: 0x%08.8X\n", 3) \
+X(40, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE, "3D context resume pipe %2d (%2d) state: 0x%08.8X\n", 3) \
+\
+X( 1, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE, "Activate MemCtx=0x%08x BIFreq=%d secure=%d\n", 3) \
+X( 2, RGXFW_GROUP_BIF, RGXFW_SF_BIF_DEACTIVATE, "Deactivate MemCtx=0x%08x\n", 1) \
+X( 3, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_ALLOC, "Alloc PC reg %d\n", 1) \
+X( 4, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_GRAB, "Grab reg %d refcount now %d\n", 2) \
+X( 5, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_UNGRAB, "Ungrab reg %d refcount now %d\n", 2) \
+X( 6, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG, "Setup reg=%d BIFreq=%d, expect=0x%08x%08x, actual=0x%08x%08x\n", 6) \
+X( 7, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST, "Trust enabled:%d, for BIFreq=%d\n", 2) \
+X( 8, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TILECFG, "BIF Tiling Cfg %d base %08x%08x len %08x%08x enable %d stride %d --> %08x%08x\n", 9) \
+X( 9, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID0, "Wrote the Value %d to OSID0, Cat Base %d, Register's contents are now %08x %08x\n", 4) \
+X(10, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID1, "Wrote the Value %d to OSID1, Context  %d, Register's contents are now %04x\n", 3) \
+X(11, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSIDx, "ui32OSid = %u, Catbase = %u, Reg Address = %x, Reg index = %u, Bitshift index = %u, Val = %08x%08x\n", 7) \
+X(12, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, BIFREQ %u\n", 5) \
+X(13, RGXFW_GROUP_BIF, RGXFW_SF_BIF_UNMAP_GPU_MEMORY, "Unmap GPU memory (event status 0x%x)\n", 1) \
+\
+X( 1, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_WRITE, "GPIO write 0x%02x\n", 1) \
+X( 2, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_READ, "GPIO read 0x%02x\n", 1) \
+X( 3, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ENABLED, "GPIO enabled\n", 0) \
+X( 4, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_DISABLED, "GPIO disabled\n", 0) \
+X( 5, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_STATUS, "GPIO status=%d (0=OK, 1=Disabled)\n", 1) \
+X( 6, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_READ, "GPIO_AP: Read address=0x%02x (%d byte(s))\n", 2) \
+X( 7, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_WRITE, "GPIO_AP: Write address=0x%02x (%d byte(s))\n", 2) \
+X( 8, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_TIMEOUT, "GPIO_AP timeout!\n", 0) \
+X( 9, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_ERROR, "GPIO_AP error. GPIO status=%d (0=OK, 1=Disabled)\n", 1) \
+X(10, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ALREADY_READ, "GPIO already read 0x%02x\n", 1) \
+X(11, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_CHECK_BUFFER_AVAILABLE, "SR: Check buffer %d available returned %d\n", 2) \
+X(12, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAITING_BUFFER_AVAILABLE, "SR: Waiting for buffer %d\n", 1) \
+X(13, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAIT_BUFFER_TIMEOUT, "SR: Timeout waiting for buffer %d (after %d ticks)\n", 2) \
+X(14, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_CHECK, "SR: Skip frame check for strip %d returned %d (0=No skip, 1=Skip frame)\n", 2) \
+X(15, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_REMAINING_STRIPS, "SR: Skip remaining strip %d in frame\n", 1) \
+X(16, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_FRAME_SKIP_NEW_FRAME, "SR: Inform HW that strip %d is a new frame\n", 1) \
+X(17, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_TIMEOUT, "SR: Timeout waiting for INTERRUPT_FRAME_SKIP (after %d ticks)\n", 1) \
+X(18, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_MODE, "SR: Strip mode is %d\n", 1) \
+X(19, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_INDEX, "SR: Strip Render start (strip %d)\n", 1) \
+X(20, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_RENDERED, "SR: Strip Render complete (buffer %d)\n", 1) \
+X(21, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_FAULT, "SR: Strip Render fault (buffer %d)\n", 1) \
+\
+X( 1, RGXFW_GROUP_PM, RGXFW_SF_PM_AMLIST, "ALIST%d SP = %u, MLIST%d SP = %u (VCE 0x%08x%08x, TE 0x%08x%08x, ALIST 0x%08x%08x)\n", 10) \
+X( 2, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_DEPRECATED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d, mmu:%d\n", 8) \
+X( 3, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE_DEPRECATED, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-3D-Base: 0x%08x%08x (SP = %u, 4PT = %u)\n", 14) \
+X( 4, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE_DEPRECATED, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-TA-Base: 0x%08x%08x (SP = %u, 4PT = %u)\n", 14) \
+X( 5, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_COMPLETE, "Freelist grow completed [0x%08x]: added pages 0x%08x, total pages 0x%08x, new DevVirtAddr 0x%08x%08x\n", 5) \
+X( 6, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_DENIED, "Grow for freelist ID=0x%08x denied by host\n", 1) \
+X( 7, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE, "Freelist update completed [0x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x\n", 5) \
+X( 8, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_RECONSTRUCTION_FAILED_DEPRECATED, "Reconstruction of freelist ID=0x%08x failed\n", 1) \
+X( 9, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_WARNING, "Ignored attempt to pause or unpause the DM while there is no relevant operation in progress (0-TA,1-3D): %d, operation(0-unpause, 1-pause): %d\n", 2) \
+X( 10, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT_STATUS, "Force free 3D Context memory, FWCtx: %08x, status(1:success, 0:fail): %d\n", 2)\
+X( 11, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_ALLOC, "PM pause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x\n", 1) \
+X( 12, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_ALLOC, "PM unpause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x\n", 1) \
+X( 13, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_DALLOC, "PM pause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x\n", 1) \
+X( 14, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_DALLOC, "PM unpause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x\n", 1) \
+X( 15, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_FAILED, "PM ALLOC/DALLOC change was not actioned: PM_PAGE_MANAGEOP_STATUS=0x%x\n", 1) \
+X( 16, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d\n", 7) \
+X( 17, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)\n", 10) \
+X( 18, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)\n", 10) \
+\
+X( 1, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_DYNAMIC_STATUS, "Global link list dynamic page count: vertex 0x%x, varying 0x%x, node 0x%x\n", 3) \
+X( 2, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_STATIC_STATUS, "Global link list static page count: vertex 0x%x, varying 0x%x, node 0x%x\n", 3) \
+X( 3, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_GROW, "RPM request failed. Waiting for freelist grow.\n", 0) \
+X( 4, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_ABORT, "RPM request failed. Aborting the current frame.\n", 0) \
+X( 5, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_PENDING_GROW, "RPM waiting for pending grow on freelist 0x%08x\n", 1) \
+X( 6, RGXFW_GROUP_RPM, RGXFW_SF_RPM_REQUEST_HOST_GROW, "Request freelist grow [0x%08x] current pages %d, grow size %d\n", 3) \
+X( 7, RGXFW_GROUP_RPM, RGXFW_SF_RPM_FREELIST_LOAD, "Freelist load: SHF = 0x%08x, SHG = 0x%08x\n", 2) \
+X( 8, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_DEPRECATED, "SHF FPL register: 0x%08X.%08X\n", 2) \
+X( 9, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_DEPRECATED, "SHG FPL register: 0x%08X.%08X\n", 2) \
+X(10, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_FREELIST, "Kernel requested RPM grow on freelist (type %d) at 0x%08x from current size %d to new size %d, RPM restart: %d (1=Yes)\n", 5) \
+X(11, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_RESTART, "Restarting SHG\n", 0) \
+X(12, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_ABORTED, "Grow failed, aborting the current frame.\n", 0) \
+X(13, RGXFW_GROUP_RPM, RGXFW_SF_RPM_ABORT_COMPLETE, "RPM abort complete on HWFrameData [0x%08x].\n", 1) \
+X(14, RGXFW_GROUP_RPM, RGXFW_SF_RPM_CLEANUP_NEEDS_ABORT, "RPM freelist cleanup [0x%08x] requires abort to proceed.\n", 1) \
+X(15, RGXFW_GROUP_RPM, RGXFW_SF_RPM_RPM_PT, "RPM page table base register: 0x%08X.%08X\n", 2) \
+X(16, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_ABORT, "Issuing RPM abort.\n", 0) \
+X(17, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_TOGGLE_CHECK_FULL, "RPM OOM received but toggle bits indicate free pages available\n", 0) \
+X(18, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_HW_TIMEOUT, "RPM hardware timeout. Unable to process OOM event.\n", 0) \
+X(19, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_LOAD, "SHF FL (0x%08x) load, FPL: 0x%08X.%08X, roff: 0x%08X, woff: 0x%08X\n", 5) \
+X(20, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_LOAD, "SHG FL (0x%08x) load, FPL: 0x%08X.%08X, roff: 0x%08X, woff: 0x%08X\n", 5) \
+X(21, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_STORE, "SHF FL (0x%08x) store, roff: 0x%08X, woff: 0x%08X\n", 3) \
+X(22, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_STORE, "SHG FL (0x%08x) store, roff: 0x%08X, woff: 0x%08X\n", 3) \
+\
+X( 1, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_FINISHED, "3D RTData 0x%08x finished on HW context %u\n", 2) \
+X( 2, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_READY, "3D RTData 0x%08x ready on HW context %u\n", 2) \
+X( 3, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO_DEPRECATED, "CONTEXT_PB_BASE set to %X, FL different between TA/3D: local: %d, global: %d, mmu: %d\n", 4) \
+X( 4, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_3D, "Loading VFP table 0x%08x%08x for 3D\n", 2) \
+X( 5, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_TA, "Loading VFP table 0x%08x%08x for TA\n", 2) \
+X( 6, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED, "Load Freelist 0x%X type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 10) \
+X( 7, RGXFW_GROUP_RTD, RGXFW_SF_RTD_VHEAP_STORE, "Perform VHEAP table store\n", 0) \
+X( 8, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_MATCH_FOUND, "RTData 0x%08x: found match in Context=%d: Load=No, Store=No\n", 2) \
+X( 9, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_NULL_FOUND, "RTData 0x%08x: found NULL in Context=%d: Load=Yes, Store=No\n", 2) \
+X(10, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_3D_FINISHED, "RTData 0x%08x: found state 3D finished (0x%08x) in Context=%d: Load=Yes, Store=Yes\n", 3) \
+X(11, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_TA_FINISHED, "RTData 0x%08x: found state TA finished (0x%08x) in Context=%d: Load=Yes, Store=Yes\n", 3) \
+X(12, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_STACK_POINTERS, "Loading stack-pointers for %d (0:MidTA,1:3D) on context %d, MLIST = 0x%08x, ALIST = 0x%08x%08x\n", 5) \
+X(13, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED, "Store Freelist 0x%X type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 10) \
+X(14, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_FINISHED, "TA RTData 0x%08x finished on HW context %u\n", 2) \
+X(15, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED, "TA RTData 0x%08x loaded on HW context %u\n", 2) \
+X(16, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED2, "Store Freelist 0x%X type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 12) \
+X(17, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED2, "Load  Freelist 0x%X type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 12) \
+X(18, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG, "Freelist 0x%X RESET!!!!!!!!\n", 1) \
+X(19, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG2, "Freelist 0x%X stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 5) \
+X(20, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_DEPRECATED, "Request reconstruction of Freelist 0x%X type: %d (0:local,1:global,2:mmu) on HW context %u\n", 3) \
+X(21, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED, "Freelist reconstruction ACK from host (HWR state :%u)\n", 1) \
+X(22, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED2, "Freelist reconstruction completed\n", 0) \
+X(23, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED_DEPRECATED, "TA RTData 0x%08x loaded on HW context %u HWRTDataNeedsLoading=%d\n", 3) \
+X(24, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TE_RGNHDR_INFO, "TE Region headers base 0x%08x%08x (RGNHDR Init: %d)\n", 3) \
+X(25, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS, "TA Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)\n", 8) \
+X(26, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_LOADED_DEPRECATED, "3D RTData 0x%08x loaded on HW context %u\n", 2) \
+X(27, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED, "3D Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x (MemCtx 0x%08x)\n", 4) \
+X(28, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RESTART_AFTER_PR_EXECUTED, "Restarting TA after partial render, HWRTData0State=%d, HWRTData1State=%d\n", 2) \
+X(29, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO, "CONTEXT_PB_BASE set to %X, FL different between TA/3D: local: %d, global: %d\n", 3) \
+X(30, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_FL, "Store Freelist 0x%X type: %d (0:local,1:global) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 12) \
+X(31, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL, "Load  Freelist 0x%X type: %d (0:local,1:global) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 12) \
+X(32, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS, "3D Buffers: FWCtx 0x%08x, parent RT 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)\n", 5) \
+\
+X( 1, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_DEPRECATED, "Force Z-Load for partial render\n", 0) \
+X( 2, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_DEPRECATED, "Force Z-Store for partial render\n", 0) \
+X( 3, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_LOCAL, "3D MemFree: Local FL 0x%08x\n", 1) \
+X( 4, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_MMU, "3D MemFree: MMU FL 0x%08x\n", 1) \
+X( 5, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_GLOBAL, "3D MemFree: Global FL 0x%08x\n", 1) \
+X( 6, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD, "OOM TA/3D PR Check: [%08.8X] is %08.8X requires %08.8X, HardwareSync Fence [%08.8X] is %08.8X requires %08.8X\n", 6) \
+X( 7, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_FL, "OOM TA_cmd=0x%08x, U-FL 0x%08x, N-FL 0x%08x\n", 3) \
+X( 8, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_MMU_FL, "OOM TA_cmd=0x%08x, OOM MMU:%d, U-FL 0x%08x, N-FL 0x%08x, MMU-FL 0x%08x\n", 5) \
+X( 9, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_AVOIDED, "Partial render avoided\n", 0) \
+X(10, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_DISCARDED, "Partial render discarded\n", 0) \
+X(11, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_FINISHED, "Partial Render finished\n", 0) \
+X(12, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DBG, "SPM Owner = 3D-BG\n", 0) \
+X(13, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DIRQ, "SPM Owner = 3D-IRQ\n", 0) \
+X(14, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_NONE, "SPM Owner = NONE\n", 0) \
+X(15, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TABG, "SPM Owner = TA-BG\n", 0) \
+X(16, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TAIRQ, "SPM Owner = TA-IRQ\n", 0) \
+X(17, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_ADDRESS, "ZStore address 0x%08x%08x\n", 2) \
+X(18, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SSTORE_ADDRESS, "SStore address 0x%08x%08x\n", 2) \
+X(19, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_ADDRESS, "ZLoad address 0x%08x%08x\n", 2) \
+X(20, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SLOAD_ADDRESS, "SLoad address 0x%08x%08x\n", 2) \
+X(21, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_ZSBUFFER_DEPRECATED, "No deferred ZS Buffer provided\n", 0) \
+X(22, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POPULATED, "ZS Buffer successfully populated (ID=0x%08x)\n", 1) \
+X(23, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POP_UNNEEDED_DEPRECATED, "No need to populate ZS Buffer (ID=0x%08x)\n", 1) \
+X(24, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOPULATED, "ZS Buffer successfully unpopulated (ID=0x%08x)\n", 1) \
+X(25, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOP_UNNEEDED_DEPRECATED, "No need to unpopulate ZS Buffer (ID=0x%08x)\n", 1) \
+X(26, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_DEPRECATED, "Send ZS-Buffer backing request to host (ID=0x%08x)\n", 1) \
+X(27, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_DEPRECATED, "Send ZS-Buffer unbacking request to host (ID=0x%08x)\n", 1) \
+X(28, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_PENDING_DEPRECATED, "Don't send ZS-Buffer backing request. Previous request still pending (ID=0x%08x)\n", 1) \
+X(29, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_PENDING_DEPRECATED, "Don't send ZS-Buffer unbacking request. Previous request still pending (ID=0x%08x)\n", 1) \
+X(30, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZBUFFER_NOT_READY_DEPRECATED, "Partial Render waiting for ZBuffer to be backed (ID=0x%08x)\n", 1) \
+X(31, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SBUFFER_NOT_READY_DEPRECATED, "Partial Render waiting for SBuffer to be backed (ID=0x%08x)\n", 1) \
+X(32, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_NONE, "SPM State = none\n", 0) \
+X(33, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_BLOCKED, "SPM State = PR blocked\n", 0) \
+X(34, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_GROW, "SPM State = wait for grow\n", 0) \
+X(35, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_HW, "SPM State = wait for HW\n", 0) \
+X(36, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_RUNNING, "SPM State = PR running\n", 0) \
+X(37, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_AVOIDED, "SPM State = PR avoided\n", 0) \
+X(38, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_EXECUTED, "SPM State = PR executed\n", 0) \
+X(39, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FREELIST_MATCH, "3DMemFree matches freelist 0x%08x (FL type = %u)\n", 2) \
+X(40, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_FLAG_SET, "Raise the 3DMemFreeDedected flag\n", 0) \
+X(41, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_PENDING_GROW, "Wait for pending grow on Freelist 0x%08x\n", 1) \
+X(42, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_FAILED, "ZS Buffer failed to be populated (ID=0x%08x)\n", 1) \
+X(43, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FL_GROW_DEBUG, "Grow update inconsistency: FL addr: 0x%02x%08x, curr pages: %u, ready: %u, new: %u\n", 5) \
+X(44, RGXFW_GROUP_SPM, RGXFW_SF_SPM_RESUMED_TA, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u, SP : %u\n", 4) \
+X(45, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE_DEPRECATED, "Received grow update, FL addr: 0x%02x%08x, current pages: %u, ready pages: %u, threshold: %u\n", 5) \
+X(46, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_PRBUFFER, "No deferred partial render FW (Type=%d) Buffer provided\n", 1) \
+X(47, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_POP_UNNEEDED, "No need to populate PR Buffer (ID=0x%08x)\n", 1) \
+X(48, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNPOP_UNNEEDED, "No need to unpopulate PR Buffer (ID=0x%08x)\n", 1) \
+X(49, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST, "Send PR Buffer backing request to host (ID=0x%08x)\n", 1) \
+X(50, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST, "Send PR Buffer unbacking request to host (ID=0x%08x)\n", 1) \
+X(51, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST_PENDING, "Don't send PR Buffer backing request. Previous request still pending (ID=0x%08x)\n", 1) \
+X(52, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST_PENDING, "Don't send PR Buffer unbacking request. Previous request still pending (ID=0x%08x)\n", 1) \
+X(53, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_NOT_READY, "Partial Render waiting for Buffer %d type to be backed (ID=0x%08x)\n", 2) \
+X(54, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE, "Received grow update, FL addr: 0x%02x%08x, new pages: %u, ready pages: %u\n", 4) \
+\
+X( 1, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED_AGAIN, "Check Pow state DM%d int: 0x%X, ext: 0x%X, pow flags: 0x%X\n", 4) \
+X( 2, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_IDLE, "Sidekick idle (might be powered down). Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+X( 3, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ, "OS requested pow off (forced = %d), DM%d, pow flags: 0x%8.8X\n", 3) \
+X( 4, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_DEPRECATED, "Initiate powoff query. Inactive DMs: %d %d %d %d\n", 4) \
+X( 5, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECKOFF_DEPRECATED, "Any RD-DM pending? %d, Any RD-DM Active? %d\n", 2) \
+X( 6, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_OFF, "Sidekick ready to be powered down. Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+X( 7, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ, "HW Request On(1)/Off(0): %d, Units: 0x%08.8X\n", 2) \
+X( 8, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_REQ, "Request to change num of dusts to %d (Power flags=%d)\n", 2) \
+X( 9, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE, "Changing number of dusts from %d to %d\n", 2) \
+X(11, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_INIT_DEPRECATED, "Sidekick init\n", 0) \
+X(12, RGXFW_GROUP_POW, RGXFW_SF_POW_RD_INIT_DEPRECATED, "Rascal+Dusts init (# dusts mask: %X)\n", 1) \
+X(13, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_RD, "Initiate powoff query for RD-DMs.\n", 0) \
+X(14, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_TLA, "Initiate powoff query for TLA-DM.\n", 0) \
+X(15, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RD, "Any RD-DM pending? %d, Any RD-DM Active? %d\n", 2) \
+X(16, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_TLA, "TLA-DM pending? %d, TLA-DM Active? %d\n", 2) \
+X(17, RGXFW_GROUP_POW, RGXFW_SF_POW_BRN37566, "Request power up due to BRN37566. Pow stat int: 0x%X\n", 1) \
+X(18, RGXFW_GROUP_POW, RGXFW_SF_POW_REQ_CANCEL, "Cancel power off request int: 0x%X, ext: 0x%X, pow flags: 0x%X\n", 3) \
+X(19, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_IDLE, "OS requested forced IDLE, pow flags: 0x%X\n", 1) \
+X(20, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE, "OS cancelled forced IDLE, pow flags: 0x%X\n", 1) \
+X(21, RGXFW_GROUP_POW, RGXFW_SF_POW_IDLE_TIMER, "Idle timer start. Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+X(22, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_IDLE_TIMER, "Cancel idle timer. Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+X(23, RGXFW_GROUP_POW, RGXFW_SF_POW_APM_LATENCY_CHANGE, "Active PM latency set to %dms. Core clock: %d Hz\n", 2) \
+X(24, RGXFW_GROUP_POW, RGXFW_SF_POW_CDM_CLUSTERS, "Compute cluster mask change to 0x%X, %d dusts powered.\n", 2) \
+X(25, RGXFW_GROUP_POW, RGXFW_SF_POW_NULL_CMD_INIOFF_RD, "Null command executed, repeating initiate powoff query for RD-DMs.\n", 0) \
+X(26, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_ENERGY_DEPRECATED, "Power monitor: Estimate of dynamic energy %u\n", 1) \
+X(27, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED, "Check Pow state: Int: 0x%X, Ext: 0x%X, Pow flags: 0x%X\n", 3) \
+X(28, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_DEADLINE, "Proactive DVFS: New deadline, time = 0x%08x%08x\n", 2) \
+X(29, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_WORKLOAD, "Proactive DVFS: New workload, cycles = 0x%08x%08x\n", 2) \
+X(30, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_CALCULATE, "Proactive DVFS: Proactive frequency calculated = 0x%08x\n", 1) \
+X(31, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UTILISATION, "Proactive DVFS: Reactive utilisation = 0x%08x\n", 1) \
+X(32, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_REACT, "Proactive DVFS: Reactive frequency calculated = 0x%08x%08x\n", 2) \
+X(33, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND_DEPRECATED, "Proactive DVFS: OPP Point Sent = 0x%x\n", 1) \
+X(34, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DEADLINE_REMOVED, "Proactive DVFS: Deadline removed = 0x%08x%08x\n", 2) \
+X(35, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_WORKLOAD_REMOVED, "Proactive DVFS: Workload removed = 0x%08x%08x\n", 2) \
+X(36, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_THROTTLE, "Proactive DVFS: Throttle to a maximum = 0x%x\n", 1) \
+X(37, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_FAILURE, "Proactive DVFS: Failed to pass OPP point via GPIO.\n", 0) \
+X(38, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_INVALID_NODE, "Proactive DVFS: Invalid node passed to function.\n", 0) \
+X(39, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GUEST_BAD_ACCESS_DEPRECATED, "Proactive DVFS: Guest OS attempted to do a privileged action. OSid = %u\n", 1) \
+X(40, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_STARTED, "Proactive DVFS: Unprofiled work started. Total unprofiled work present: 0x%x\n", 1) \
+X(41, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_FINISHED, "Proactive DVFS: Unprofiled work finished. Total unprofiled work present: 0x%x\n", 1) \
+X(42, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DISABLED, "Proactive DVFS: Disabled: Not enabled by host.\n", 0) \
+X(43, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ_RESULT, "HW Request Completed(1)/Aborted(0): %d, Ticks: %d\n", 2) \
+X(44, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_FIX_59042, "Allowed number of dusts is %d due to BRN59042.\n", 1) \
+X(45, RGXFW_GROUP_POW, RGXFW_SF_POW_HOST_TIMEOUT_NOTIFICATION, "Host timed out while waiting for a forced idle state. Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+X(46, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK, "Check Pow state: Int: 0x%X, Ext: 0x%X, Pow flags: 0x%X, Fence Counters: Check: %u - Update: %u\n", 5) \
+X(47, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND, "Proactive DVFS: OPP Point Sent = 0x%x, Success = %x\n", 2) \
+X(48, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_IDLE, "Proactive DVFS: GPU transitioned to idle\n", 0) \
+X(49, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_ACTIVE, "Proactive DVFS: GPU transitioned to active\n", 0) \
+X(50, RGXFW_GROUP_POW, RGXFW_SF_POW_POWDUMP_BUFFER_SIZE, "Power counter dumping: Data truncated writing register %u. Buffer too small.\n", 1) \
+X(51, RGXFW_GROUP_POW, RGXFW_SF_POW_POWCTRL_ABORT, "Power controller returned ABORT for last request so retrying.\n", 0) \
+X(52, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_POWER_REQUEST, "Discarding invalid power request: type 0x%x, DM %u\n", 2) \
+X(53, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE_NOT_IDLE, "Detected attempt to cancel forced idle while not forced idle (pow state 0x%x, pow flags 0x%x)\n", 2) \
+X(54, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_POW_OFF_NOT_IDLE, "Detected attempt to force power off while not forced idle (pow state 0x%x, pow flags 0x%x)\n", 2) \
+X(55, RGXFW_GROUP_POW, RGXFW_SF_POW_NUMDUST_CHANGE_NOT_IDLE, "Detected attempt to change dust count while not forced idle (pow state 0x%x)\n", 1) \
+X(56, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_RESULT, "Power monitor: Type = %d (0 = power, 1 = energy), Estimate result = 0x%08x%08x\n", 3) \
+X(57, RGXFW_GROUP_POW, RGXFW_SF_POW_MINMAX_CONFLICT, "Conflicting clock frequency range: OPP min = %u, max = %u\n", 2) \
+X(58, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_FLOOR, "Proactive DVFS: Set floor to a minimum = 0x%x\n", 1) \
+\
+X(1, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DEPRECATED, "Lockup detected on DM%d, FWCtx: %08.8X\n", 2) \
+X(2, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_FW_DEPRECATED, "Reset fw state for DM%d, FWCtx: %08.8X, MemCtx: %08.8X\n", 3) \
+X(3, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED, "Reset HW\n", 0) \
+X(4, RGXFW_GROUP_HWR, RGXFW_SF_HWR_TERMINATED_DEPRECATED, "Lockup recovered.\n", 0) \
+X(5, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED, "Lock-up DM%d FWCtx: %08.8X\n", 2) \
+X(6, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DETECTED_DEPRECATED, "Lockup detected: GLB(%d->%d), PER-DM(0x%08X->0x%08X)\n", 4) \
+X(7, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EARLY_FAULT_DETECTION_DEPRECATED, "Early fault detection: GLB(%d->%d), PER-DM(0x%08X)\n", 3) \
+X(8, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP_DEPRECATED, "Hold scheduling due lockup: GLB(%d), PER-DM(0x%08X->0x%08X)\n", 3) \
+X(9, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FALSE_LOCKUP_DEPRECATED, "False lockup detected: GLB(%d->%d), PER-DM(0x%08X->0x%08X)\n", 4) \
+X(10, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED, "BRN37729: GLB(%d->%d), PER-DM(0x%08X->0x%08X)\n", 4) \
+X(11, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED, "Freelists reconstructed: GLB(%d->%d), PER-DM(0x%08X)\n", 3) \
+X(12, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RECONSTRUCTING_FREELISTS_DEPRECATED, "Reconstructing freelists: %u (0-No, 1-Yes): GLB(%d->%d), PER-DM(0x%08X)\n", 4) \
+X(13, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FAILED_HW_POLL, "HW poll %u (0-Unset 1-Set) failed (reg:0x%08X val:0x%08X)\n", 3) \
+X(14, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED_DEPRECATED, "Discarded cmd on DM%u FWCtx=0x%08X\n", 2) \
+X(15, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED, "Discarded cmd on DM%u (reason=%u) HWRTData=0x%08X (st: %d), FWCtx 0x%08X @ %d\n", 6) \
+X(16, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PM_FENCE, "PM fence WA could not be applied, Valid TA Setup: %d, RD powered off: %d\n", 2) \
+X(17, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_SNAPSHOT, "FL snapshot RTD 0x%08.8X - local (0x%08.8X): %d, global (0x%08.8X): %d\n", 5) \
+X(18, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_CHECK, "FL check RTD 0x%08.8X, discard: %d - local (0x%08.8X): s%d?=c%d, global (0x%08.8X): s%d?=c%d\n", 8) \
+X(19, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_DEPRECATED, "FL reconstruction 0x%08.8X c%d\n", 2) \
+X(20, RGXFW_GROUP_HWR, RGXFW_SF_HWR_3D_CHECK, "3D check: missing TA FWCtx 0x%08.8X @ %d, RTD 0x%08x.\n", 3) \
+X(21, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED2, "Reset HW (mmu:%d, extmem: %d)\n", 2) \
+X(22, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_TA_CACHES, "Zero TA caches for FWCtx: %08.8X (TPC addr: %08X%08X, size: %d bytes)\n", 4) \
+X(23, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED, "Recovery DM%u: Freelists reconstructed. New R-Flags=0x%08X\n", 2) \
+X(24, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SKIPPED_CMD, "Recovery DM%u: FWCtx 0x%08x skipped to command @ %u. PR=%u. New R-Flags=0x%08X\n", 5) \
+X(25, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_RECOVERED, "Recovery DM%u: DM fully recovered\n", 1) \
+X(26, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP, "DM%u: Hold scheduling due to R-Flag = 0x%08x\n", 2) \
+X(27, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_RECONSTRUCTION, "Analysis: Need freelist reconstruction\n", 0) \
+X(28, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP, "Analysis DM%u: Lockup FWCtx: %08.8X. Need to skip to next command\n", 2) \
+X(29, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP_OOM_TA, "Analysis DM%u: Lockup while TA is OOM FWCtx: %08.8X. Need to skip to next command\n", 2) \
+X(30, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_PR_CLEANUP, "Analysis DM%u: Lockup while partial render FWCtx: %08.8X. Need PR cleanup\n", 2) \
+X(31, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP, "GPU has locked up\n", 0) \
+X(32, RGXFW_GROUP_HWR, RGXFW_SF_HWR_READY, "DM%u ready for HWR\n", 1) \
+X(33, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_UPDATE_RECOVERY, "Recovery DM%u: Updated Recovery counter. New R-Flags=0x%08X\n", 2) \
+X(34, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729, "Analysis: BRN37729 detected, reset TA and re-kicked 0x%08X)\n", 1) \
+X(35, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_TIMED_OUT, "DM%u timed out\n", 1) \
+X(36, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EVENT_STATUS_REG, "RGX_CR_EVENT_STATUS=0x%08x\n", 1) \
+X(37, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_FALSE_LOCKUP, "DM%u lockup falsely detected, R-Flags=0x%08X\n", 2) \
+X(38, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_OUTOFTIME, "GPU has overrun its deadline\n", 0) \
+X(39, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_POLLFAILURE, "GPU has failed a poll\n", 0) \
+X(40, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PERF_PHASE_REG, "RGX DM%u phase count=0x%08x\n", 2) \
+X(41, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW, "Reset HW (loop:%d, poll failures: 0x%08X)\n", 2) \
+X(42, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_FAULT_EVENT, "MMU fault event: 0x%08X\n", 1) \
+X(43, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BIF1_FAULT, "BIF1 page fault detected (Bank1 MMU Status: 0x%08X)\n", 1) \
+X(44, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK_TRUE_DEPRECATED, "Fast CRC Failed. Proceeding to full register checking (DM: %u).\n", 1) \
+X(45, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_META_FAULT, "Meta MMU page fault detected (Meta MMU Status: 0x%08X%08X)\n", 2) \
+X(46, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK, "Fast CRC Check result for DM%u is HWRNeeded=%u\n", 2) \
+X(47, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FULL_CHECK, "Full Signature Check result for DM%u is HWRNeeded=%u\n", 2) \
+X(48, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINAL_RESULT, "Final result for DM%u is HWRNeeded=%u with HWRChecksToGo=%u\n", 3) \
+X(49, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK, "USC Slots result for DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d\n", 3) \
+X(50, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK, "Deadline counter for DM%u is HWRDeadline=%u\n", 2) \
+X(51, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST_DEPRECATED, "Holding Scheduling on OSid %u due to pending freelist reconstruction\n", 1) \
+X(52, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_REQUEST, "Requesting reconstruction for freelist 0x%X (ID=%d)\n", 2) \
+X(53, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_PASSED, "Reconstruction of freelist ID=%d complete\n", 1) \
+X(54, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED, "Reconstruction needed for freelist 0x%X (ID=%d) type: %d (0:local,1:global,2:mmu) on HW context %u\n", 4) \
+X(55, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FAILED, "Reconstruction of freelist ID=%d failed\n", 1) \
+X(56, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESTRICTING_PDS_TASKS, "Restricting PDS Tasks to help other stalling DMs (RunningMask=0x%02X, StallingMask=0x%02X, PDS_CTRL=0x%08X%08X)\n", 4) \
+X(57, RGXFW_GROUP_HWR, RGXFW_SF_HWR_UNRESTRICTING_PDS_TASKS, "Unrestricting PDS Tasks again (RunningMask=0x%02X, StallingMask=0x%02X, PDS_CTRL=0x%08X%08X)\n", 4) \
+X(58, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_USED, "USC slots: %u used by DM%u\n", 2) \
+X(59, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_EMPTY, "USC slots: %u empty\n", 1) \
+X(60, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HCS_FIRE, "HCS DM%d's Context Switch failed to meet deadline. Current time: %08x%08x, deadline: %08x%08x\n", 5) \
+X(61, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_HW_RESET, "Begin hardware reset (HWR Counter=%d)\n", 1) \
+X(62, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINISH_HW_RESET, "Finished hardware reset (HWR Counter=%d)\n", 1) \
+X(63, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST, "Holding Scheduling on DM %u for OSid %u due to pending freelist reconstruction\n", 2) \
+X(64, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_UMQ_READ_OFFSET, "User Mode Queue ROff reset: FWCtx 0x%08.8X, queue: 0x%08X%08X (Roff = %u becomes StreamStartOffset = %u)\n", 5) \
+X(65, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED, "Reconstruction needed for freelist 0x%X (ID=%d) type: %d (0:local,1:global) on HW context %u\n", 4) \
+X(66, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MIPS_FAULT, "Mips page fault detected (BadVAddr: 0x%08x, EntryLo0: 0x%08x, EntryLo1: 0x%08x)\n", 3) \
+X(67, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ANOTHER_CHANCE, "At least one other DM is running okay so DM%u will get another chance\n", 1) \
+X(68, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FW, "Reconstructing in FW, FL: 0x%X (ID=%d)\n", 2) \
+X(69, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_RTC, "Zero RTC for FWCtx: %08.8X (RTC addr: %08X%08X, size: %d bytes)\n", 4) \
+\
+X( 1, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGBLK, "Block 0x%x mapped to Config Idx %u\n", 2) \
+X( 2, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_OMTBLK, "Block 0x%x omitted from event - not enabled in HW\n", 1) \
+X( 3, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INCBLK, "Block 0x%x included in event - enabled in HW\n", 1) \
+X( 4, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELREG, "Select register state hi_0x%x lo_0x%x\n", 2) \
+X( 5, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CSBHDR, "Counter stream block header word 0x%x\n", 1) \
+X( 6, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTROFF, "Counter register offset 0x%x\n", 1) \
+X( 7, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGSKP, "Block 0x%x config unset, skipping\n", 1) \
+X( 8, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INDBLK, "Accessing Indirect block 0x%x\n", 1) \
+X( 9, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DIRBLK, "Accessing Direct block 0x%x\n", 1) \
+X(10, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CNTPRG, "Programmed counter select register at offset 0x%x\n", 1) \
+X(11, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKPRG, "Block register offset 0x%x and value 0x%x\n", 2) \
+X(12, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKCG, "Reading config block from driver 0x%x\n", 1) \
+X(13, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKRG, "Reading block range 0x%x to 0x%x\n", 2) \
+X(14, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKREC, "Recording block 0x%x config from driver\n", 1) \
+X(15, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKED, "Finished reading config block from driver\n", 0) \
+X(16, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_COUNTER, "Custom Counter offset: %x   value: %x\n", 2) \
+X(17, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELECT_CNTR, "Select counter n:%u  ID:%x\n", 2) \
+X(18, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_SELECT_PACK, "The counter ID %x is not allowed. The package [b:%u, n:%u] will be discarded\n", 3) \
+X(19, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS, "Custom Counters filter status %d\n", 1) \
+X(20, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_WRONG_BLOCK, "The Custom block %d is not allowed. Use only blocks lower than %d. The package will be discarded\n", 2) \
+X(21, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_TOO_MANY_ID, "The package will be discarded because it contains %d counters IDs while the upper limit is %d\n", 2) \
+X(22, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHECK_FILTER, "Check Filter %x is %x ?\n", 2) \
+X(23, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_RESET_CUSTOM_BLOCK, "The custom block %u is reset\n", 1) \
+X(24, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INVALID_CMD, "Encountered an invalid command (%d)\n", 1) \
+X(25, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_DEPRECATED, "HWPerf Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)\n", 2) \
+X(26, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_FENCE_DEPRECATED, "HWPerf Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)\n", 3) \
+X(27, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_BLOCK, "Custom Counter block: %d\n", 1) \
+\
+X( 1, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_REQUEST, "Transfer 0x%02x request: 0x%02x%08x -> 0x%08x, size %u\n", 5) \
+X( 2, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_COMPLETE, "Transfer of type 0x%02x expected on channel %u, 0x%02x found, status %u\n", 4) \
+X( 3, RGXFW_GROUP_DMA, RGXFW_SF_DMA_INT_REG, "DMA Interrupt register 0x%08x\n", 1) \
+X( 4, RGXFW_GROUP_DMA, RGXFW_SF_DMA_WAIT, "Waiting for transfer ID %u completion...\n", 1) \
+X( 5, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOADING_FAILED, "Loading of cCCB data from FW common context 0x%08x (offset: %u, size: %u) failed\n", 3) \
+X( 6, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOAD_INVALID, "Invalid load of cCCB data from FW common context 0x%08x (offset: %u, size: %u)\n", 3) \
+X( 7, RGXFW_GROUP_DMA, RGXFW_SF_DMA_POLL_FAILED, "Transfer 0x%02x request poll failure\n", 1) \
+X( 8, RGXFW_GROUP_DMA, RGXFW_SF_DMA_BOOT_TRANSFER_FAILED, "Boot transfer(s) failed (code? %u, data? %u), used slower memcpy instead\n", 2) \
+\
+X( 1, RGXFW_GROUP_DBG, RGXFW_SF_DBG_INTPAIR, "0x%08x 0x%08x\n", 2) \
+X( 2, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1HEX, "0x%08x\n", 1) \
+X( 3, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2HEX, "0x%08x 0x%08x\n", 2) \
+X( 4, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3HEX, "0x%08x 0x%08x 0x%08x\n", 3) \
+X( 5, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4HEX, "0x%08x 0x%08x 0x%08x 0x%08x\n", 4) \
+X( 6, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 5) \
+X( 7, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 6) \
+X( 8, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 7) \
+X( 9, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 8) \
+X(10, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1SIGNED, "%d\n", 1) \
+X(11, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2SIGNED, "%d %d\n", 2) \
+X(12, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3SIGNED, "%d %d %d\n", 3) \
+X(13, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4SIGNED, "%d %d %d %d\n", 4) \
+X(14, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5SIGNED, "%d %d %d %d %d\n", 5) \
+X(15, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6SIGNED, "%d %d %d %d %d %d\n", 6) \
+X(16, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7SIGNED, "%d %d %d %d %d %d %d\n", 7) \
+X(17, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8SIGNED, "%d %d %d %d %d %d %d %d\n", 8) \
+X(18, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1UNSIGNED, "%u\n", 1) \
+X(19, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2UNSIGNED, "%u %u\n", 2) \
+X(20, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3UNSIGNED, "%u %u %u\n", 3) \
+X(21, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4UNSIGNED, "%u %u %u %u\n", 4) \
+X(22, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5UNSIGNED, "%u %u %u %u %u\n", 5) \
+X(23, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6UNSIGNED, "%u %u %u %u %u %u\n", 6) \
+X(24, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7UNSIGNED, "%u %u %u %u %u %u %u\n", 7) \
+X(25, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8UNSIGNED, "%u %u %u %u %u %u %u %u\n", 8) \
+\
+X(65535, RGXFW_GROUP_NULL, RGXFW_SF_LAST, "You should not use this string\n", 15)
+
+
+/*  The symbolic names found in the table above are assigned an ui32 value of
+ *  the following format:
+ *  31 30 28 27       20   19  16    15  12      11            0   bits
+ *  -   ---   ---- ----     ----      ----        ---- ---- ----
+ *     0-11: id number
+ *    12-15: group id number
+ *    16-19: number of parameters
+ *    20-27: unused
+ *    28-30: active: identify SF packet, otherwise regular int32
+ *       31: reserved for signed/unsigned compatibility
+ *
+ *   The following macro assigns those values to the enum generated SF ids list.
+ */
+#define RGXFW_LOG_IDMARKER			(0x70000000U)
+#define RGXFW_LOG_CREATESFID(a,b,e) ((IMG_UINT32)(a) | ((IMG_UINT32)(b)<<12U) | ((IMG_UINT32)(e)<<16U)) | RGXFW_LOG_IDMARKER
+
+#define RGXFW_LOG_IDMASK			(0xFFF00000)
+#define RGXFW_LOG_VALIDID(I)		(((I) & RGXFW_LOG_IDMASK) == RGXFW_LOG_IDMARKER)
+
+typedef enum {
+#define X(a, b, c, d, e) c = RGXFW_LOG_CREATESFID(a,b,e),
+	RGXFW_LOG_SFIDLIST
+#undef X
+} RGXFW_LOG_SFids;
+
+/* Return the group id that the given (enum generated) id belongs to */
+#define RGXFW_SF_GID(x) (((IMG_UINT32)(x)>>12) & 0xfU)
+/* Returns how many arguments the SF(string format) for the given (enum generated) id requires */
+#define RGXFW_SF_PARAMNUM(x) (((IMG_UINT32)(x)>>16) & 0xfU)
+
+#endif /* RGX_FWIF_SF_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fwif_shared.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fwif_shared.h
new file mode 100644
index 0000000..073a3e2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_fwif_shared.h
@@ -0,0 +1,228 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX firmware interface structures
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX firmware interface structures shared by both host client
+                and host server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (RGX_FWIF_SHARED_H)
+#define RGX_FWIF_SHARED_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "rgx_common.h"
+#include "powervr/mem_types.h"
+
+/*
+ * This is a generic limit imposed on any DM (TA,3D,CDM,TDM,2D,TRANSFER)
+ * command passed through the bridge.
+ * Just across the bridge in the server, any incoming kick command size is
+ * checked against this maximum limit.
+ * In case the incoming command size is larger than the specified limit,
+ * the bridge call is retired with error.
+ */
+#define RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE	(1024U)
+
+typedef struct RGXFWIF_DEV_VIRTADDR_
+{
+	IMG_UINT32	ui32Addr;
+} RGXFWIF_DEV_VIRTADDR;
+
+typedef struct
+{
+	IMG_DEV_VIRTADDR        RGXFW_ALIGN psDevVirtAddr;
+	RGXFWIF_DEV_VIRTADDR    pbyFWAddr;
+} UNCACHED_ALIGN RGXFWIF_DMA_ADDR;
+
+typedef IMG_UINT8	RGXFWIF_CCCB;
+
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_RENDER_TARGET;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_UFO_ADDR;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_CLEANUP_CTL;
+
+
+typedef struct
+{
+	PRGXFWIF_UFO_ADDR	puiAddrUFO;
+	IMG_UINT32			ui32Value;
+} RGXFWIF_UFO;
+
+typedef struct
+{
+	IMG_UINT32				ui32SubmittedCommands;	/*!< Number of commands received by the FW */
+	IMG_UINT32				ui32ExecutedCommands;	/*!< Number of commands executed by the FW */
+} UNCACHED_ALIGN RGXFWIF_CLEANUP_CTL;
+
+
+/*!
+ * Client Circular Command Buffer (CCCB) control structure.
+ * This is shared between the Server and the Firmware and holds byte offsets
+ * into the CCCB as well as the wrapping mask to aid wrap around. A given
+ * snapshot of this queue with Cmd 1 running on the GPU might be:
+ *
+ *          Roff                           Doff                 Woff
+ * [..........|-1----------|=2===|=3===|=4===|~5~~~~|~6~~~~|~7~~~~|..........]
+ *            <      runnable commands       ><   !ready to run   >
+ *
+ * Cmd 1    : Currently executing on the GPU data master.
+ * Cmd 2,3,4: Fence dependencies met, commands runnable.
+ * Cmd 5... : Fence dependency not met yet.
+ */
+typedef struct
+{
+	IMG_UINT32  ui32WriteOffset;    /*!< Host write offset into CCB. This
+	                                 *    must be aligned to 16 bytes. */
+	IMG_UINT32  ui32ReadOffset;     /*!< Firmware read offset into CCB.
+	                                      Points to the command that is
+	                                 *    runnable on GPU, if R!=W */
+	IMG_UINT32  ui32DepOffset;      /*!< Firmware fence dependency offset.
+	                                 *    Points to commands not ready, i.e.
+	                                 *    fence dependencies are not met. */
+	IMG_UINT32  ui32WrapMask;       /*!< Offset wrapping mask, total capacity
+	                                 *    in bytes of the CCB-1 */
+} UNCACHED_ALIGN RGXFWIF_CCCB_CTL;
+
+
+typedef IMG_UINT32 RGXFW_FREELIST_TYPE;
+
+#define RGXFW_LOCAL_FREELIST     IMG_UINT32_C(0)
+#define RGXFW_GLOBAL_FREELIST    IMG_UINT32_C(1)
+#define RGXFW_FREELIST_TYPE_LAST RGXFW_GLOBAL_FREELIST
+#define RGXFW_MAX_FREELISTS      (RGXFW_FREELIST_TYPE_LAST + 1U)
+
+
+typedef struct
+{
+	IMG_UINT64	uTAReg_VDM_CONTEXT_STATE_BASE_ADDR;
+	IMG_UINT64	uTAReg_VDM_CONTEXT_STATE_RESUME_ADDR;
+	IMG_UINT64	uTAReg_TA_CONTEXT_STATE_BASE_ADDR;
+
+	struct
+	{
+		IMG_UINT64	uTAReg_VDM_CONTEXT_STORE_TASK0;
+		IMG_UINT64	uTAReg_VDM_CONTEXT_STORE_TASK1;
+		IMG_UINT64	uTAReg_VDM_CONTEXT_STORE_TASK2;
+
+		/* VDM resume state update controls */
+		IMG_UINT64	uTAReg_VDM_CONTEXT_RESUME_TASK0;
+		IMG_UINT64	uTAReg_VDM_CONTEXT_RESUME_TASK1;
+		IMG_UINT64	uTAReg_VDM_CONTEXT_RESUME_TASK2;
+
+		IMG_UINT64	uTAReg_VDM_CONTEXT_STORE_TASK3;
+		IMG_UINT64	uTAReg_VDM_CONTEXT_STORE_TASK4;
+
+		IMG_UINT64	uTAReg_VDM_CONTEXT_RESUME_TASK3;
+		IMG_UINT64	uTAReg_VDM_CONTEXT_RESUME_TASK4;
+	} asTAState[2];
+
+} RGXFWIF_TAREGISTERS_CSWITCH;
+
+#define RGXFWIF_TAREGISTERS_CSWITCH_SIZE sizeof(RGXFWIF_TAREGISTERS_CSWITCH)
+
+typedef struct
+{
+	IMG_UINT64	uCDMReg_CDM_CONTEXT_STATE_BASE_ADDR;
+	IMG_UINT64	uCDMReg_CDM_CONTEXT_PDS0;
+	IMG_UINT64	uCDMReg_CDM_CONTEXT_PDS1;
+	IMG_UINT64  uCDMReg_CDM_TERMINATE_PDS;
+	IMG_UINT64  uCDMReg_CDM_TERMINATE_PDS1;
+
+	/* CDM resume controls */
+	IMG_UINT64	uCDMReg_CDM_RESUME_PDS0;
+	IMG_UINT64	uCDMReg_CDM_CONTEXT_PDS0_B;
+	IMG_UINT64	uCDMReg_CDM_RESUME_PDS0_B;
+
+	IMG_UINT64	uCDMReg_CDM_TERMINATE_PDS_PH2;	/*!< register setting for 2 Phantoms (i.e. >4 compute clusters) */
+	IMG_UINT64	uCDMReg_CDM_TERMINATE_PDS1_PH2;
+
+} RGXFWIF_CDM_REGISTERS_CSWITCH;
+
+typedef struct
+{
+	IMG_UINT64			ui64RobustnessAddress;
+
+	IMG_UINT32					ui32MaxTADeadlineMS;/*!< Max HWR deadline limit in ms */
+	IMG_UINT32					ui32Max3DDeadlineMS;/*!< Max HWR deadline limit in ms */
+
+} RGXFWIF_STATIC_RENDERCONTEXT_STATE;
+
+#define RGXFWIF_STATIC_RENDERCONTEXT_SIZE sizeof(RGXFWIF_STATIC_RENDERCONTEXT_STATE)
+
+typedef struct
+{
+	IMG_UINT64			ui64RobustnessAddress;
+
+	RGXFWIF_CDM_REGISTERS_CSWITCH sCDMRegistersCSWITCH;
+} RGXFWIF_STATIC_COMPUTECONTEXT_STATE;
+
+#define RGXFWIF_STATIC_COMPUTECONTEXT_SIZE sizeof(RGXFWIF_STATIC_COMPUTECONTEXT_STATE)
+
+typedef IMG_UINT32 RGXFWIF_PRBUFFER_TYPE;
+
+#define	RGXFWIF_PRBUFFER_START        IMG_UINT32_C(0)
+#define	RGXFWIF_PRBUFFER_ZBUFFER      IMG_UINT32_C(0)
+#define	RGXFWIF_PRBUFFER_SBUFFER      IMG_UINT32_C(1)
+#define	RGXFWIF_PRBUFFER_MSAABUFFER   IMG_UINT32_C(2)
+#define	RGXFWIF_PRBUFFER_MAXSUPPORTED IMG_UINT32_C(3)
+
+
+typedef enum
+{
+	RGXFWIF_PRBUFFER_UNBACKED = 0,
+	RGXFWIF_PRBUFFER_BACKED,
+	RGXFWIF_PRBUFFER_BACKING_PENDING,
+	RGXFWIF_PRBUFFER_UNBACKING_PENDING,
+}RGXFWIF_PRBUFFER_STATE;
+
+typedef struct
+{
+	IMG_UINT32				ui32BufferID;				/*!< Buffer ID*/
+	IMG_BOOL				bOnDemand;					/*!< Needs On-demand Z/S/MSAA Buffer allocation */
+	RGXFWIF_PRBUFFER_STATE	eState;						/*!< Z/S/MSAA -Buffer state */
+	RGXFWIF_CLEANUP_CTL		sCleanupState;				/*!< Cleanup state */
+	IMG_UINT32				ui32PRBufferFlags;			/*!< Compatibility and other flags */
+} UNCACHED_ALIGN RGXFWIF_PRBUFFER;
+
+
+#endif /*  RGX_FWIF_SHARED_H */
+
+/******************************************************************************
+ End of file (rgx_fwif_shared.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_heap_firmware.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_heap_firmware.h
new file mode 100644
index 0000000..f452466
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_heap_firmware.h
@@ -0,0 +1,95 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX FW heap definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_FW_HEAP_H)
+#define RGX_FW_HEAP_H
+
+/* Start at 903GiB. Size of 32MB per OSID (see rgxheapconfig.h)
+ * NOTE:
+ *      The firmware heaps bases and sizes are defined here to
+ *      simplify #include dependencies, see rgxheapconfig.h
+ *      for the full RGX virtual address space layout.
+ */
+
+/*
+ * The config heap takes up the last 64 KBytes from the total firmware heap
+ * space. It is intended to act as a storage space for the kernel and
+ * firmware CCB offset storage. The Main Firmware heap size is reduced
+ * accordingly but most of the map / unmap functions must take into
+ * consideration the entire range (i.e. main and config heap).
+ */
+#define RGX_FIRMWARE_NUMBER_OF_FW_HEAPS              (2)
+#define RGX_FIRMWARE_HEAP_SHIFT                      RGX_FW_HEAP_SHIFT
+#define RGX_FIRMWARE_RAW_HEAP_BASE                   (0xE1C0000000ULL)
+#define RGX_FIRMWARE_RAW_HEAP_SIZE                   (IMG_UINT32_C(1) << RGX_FIRMWARE_HEAP_SHIFT)
+#define RGX_FIRMWARE_CONFIG_HEAP_SIZE                (IMG_UINT32_C(0x10000)) /* 64KB */
+#define RGX_FIRMWARE_META_MAIN_HEAP_SIZE             (RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE)
+/*
+ * MIPS FW needs space in the Main heap to map GPU memory.
+ * This space is taken from the MAIN heap, to avoid creating a new heap.
+ */
+#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE      (IMG_UINT32_C(0x100000)) /* 1MB */
+#define RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE             (RGX_FIRMWARE_RAW_HEAP_SIZE - \
+                                                      RGX_FIRMWARE_CONFIG_HEAP_SIZE - \
+                                                      RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE)
+
+/* Hypervisor sub-heap order: MAIN + CONFIG */
+#define RGX_FIRMWARE_HYPERV_MAIN_HEAP_BASE           RGX_FIRMWARE_RAW_HEAP_BASE
+#define RGX_FIRMWARE_HYPERV_CONFIG_HEAP_BASE         (RGX_FIRMWARE_HYPERV_MAIN_HEAP_BASE + \
+                                                      RGX_FIRMWARE_RAW_HEAP_SIZE - \
+                                                      RGX_FIRMWARE_CONFIG_HEAP_SIZE)
+
+/* Guest sub-heap order: CONFIG + MAIN */
+#define RGX_FIRMWARE_GUEST_CONFIG_HEAP_BASE          RGX_FIRMWARE_RAW_HEAP_BASE
+#define RGX_FIRMWARE_GUEST_MAIN_HEAP_BASE            (RGX_FIRMWARE_GUEST_CONFIG_HEAP_BASE + RGX_FIRMWARE_CONFIG_HEAP_SIZE)
+
+/*
+ * The maximum configurable size via RGX_FW_HEAP_SHIFT is 32MiB (1<<25) and
+ * the minimum is 4MiB (1<<22); the default firmware heap size is set to
+ * maximum 32MiB.
+ */
+#if defined(RGX_FW_HEAP_SHIFT) && (RGX_FW_HEAP_SHIFT < 22 || RGX_FW_HEAP_SHIFT > 25)
+#error "RGX_FW_HEAP_SHIFT is outside valid range [22, 25]"
+#endif
+
+#endif /* RGX_FW_HEAP_H */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_heaps.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_heaps.h
new file mode 100644
index 0000000..2d6409b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_heaps.h
@@ -0,0 +1,188 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX heap definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGX_HEAPS_H__)
+#define __RGX_HEAPS_H__
+
+#include "km/rgxdefs_km.h"
+#include "img_defs.h"
+#include "log2.h"
+#include "pvr_debug.h"
+
+/* RGX Heap IDs, note: not all heaps are available to clients */
+/* N.B.  Old heap identifiers are deprecated now that the old memory
+   management is. New heap identifiers should be suitably renamed */
+#define RGX_UNDEFINED_HEAP_ID					(~0LU)			/*!< RGX Undefined Heap ID */
+#define RGX_GENERAL_SVM_HEAP_ID					0				/*!< RGX General SVM (shared virtual memory) Heap ID */
+#define RGX_GENERAL_HEAP_ID						1				/*!< RGX General Heap ID */
+#define RGX_GENERAL_NON4K_HEAP_ID				2				/*!< RGX General none-4K Heap ID */
+#define RGX_RGNHDR_BRN_63142_HEAP_ID			3				/*!< RGX RgnHdr BRN63142 Heap ID */
+#define RGX_MMU_INIA_BRN_65273_ID				4				/*!< RGX MMU INIA Heap ID */
+#define RGX_MMU_INIB_BRN_65273_ID				5				/*!< RGX MMU INIB Heap ID */
+#define RGX_PDSCODEDATA_HEAP_ID					6				/*!< RGX PDS Code/Data Heap ID */
+#define RGX_USCCODE_HEAP_ID						7				/*!< RGX USC Code Heap ID */
+#define RGX_FIRMWARE_MAIN_HEAP_ID				8				/*!< RGX Main Firmware Heap ID */
+#define RGX_TQ3DPARAMETERS_HEAP_ID				9				/*!< RGX Firmware Heap ID */
+#define RGX_BIF_TILING_HEAP_1_ID				10				/*!< RGX BIF Tiling Heap 1 ID */
+#define RGX_BIF_TILING_HEAP_2_ID				11				/*!< RGX BIF Tiling Heap 2 ID */
+#define RGX_BIF_TILING_HEAP_3_ID				12				/*!< RGX BIF Tiling Heap 3 ID */
+#define RGX_BIF_TILING_HEAP_4_ID				13				/*!< RGX BIF Tiling Heap 4 ID */
+#define RGX_DOPPLER_HEAP_ID						14				/*!< Doppler Heap ID */
+#define RGX_DOPPLER_OVERFLOW_HEAP_ID			15				/*!< Doppler Overflow Heap ID */
+#define RGX_SERVICES_SIGNALS_HEAP_ID			16				/*!< Services Signals Heap ID */
+#define RGX_SIGNALS_HEAP_ID						17				/*!< Signals Heap ID */
+#define RGX_TDM_TPU_YUV_COEFFS_HEAP_ID			18
+#define RGX_FIRMWARE_CONFIG_HEAP_ID				19				/*!< Additional OSIDs Firmware */
+#define RGX_GUEST_FIRMWARE_RAW_HEAP_ID			21			/*!< Additional OSIDs Firmware */
+#define RGX_MAX_HEAP_ID		(RGX_GUEST_FIRMWARE_RAW_HEAP_ID + RGXFW_NUM_OS)	/*!< Max Valid Heap ID */
+
+
+
+/*
+  Identify heaps by their names
+*/
+#define RGX_GENERAL_SVM_HEAP_IDENT		"General SVM"			/*!< RGX General SVM (shared virtual memory) Heap Identifier */
+#define RGX_GENERAL_HEAP_IDENT 			"General"               /*!< RGX General Heap Identifier */
+#define RGX_GENERAL_NON4K_HEAP_IDENT	"General NON-4K"        /*!< RGX General non-4K Heap Identifier */
+#define RGX_RGNHDR_BRN_63142_HEAP_IDENT "RgnHdr BRN63142"       /*!< RGX RgnHdr BRN63142 Heap Identifier */
+#define RGX_MMU_INIA_BRN_65273_HEAP_IDENT "MMU INIA BRN65273"   /*!< MMU BRN65273 Heap A Identifier */
+#define RGX_MMU_INIB_BRN_65273_HEAP_IDENT "MMU INIB BRN65273"   /*!< MMU BRN65273 Heap B Identifier */
+#define RGX_PDSCODEDATA_HEAP_IDENT 		"PDS Code and Data"     /*!< RGX PDS Code/Data Heap Identifier */
+#define RGX_USCCODE_HEAP_IDENT			"USC Code"              /*!< RGX USC Code Heap Identifier */
+#define RGX_TQ3DPARAMETERS_HEAP_IDENT	"TQ3DParameters"        /*!< RGX TQ 3D Parameters Heap Identifier */
+#define RGX_BIF_TILING_HEAP_1_IDENT	    "BIF Tiling Heap l"	    /*!< RGX BIF Tiling Heap 1 identifier */
+#define RGX_BIF_TILING_HEAP_2_IDENT	    "BIF Tiling Heap 2"	    /*!< RGX BIF Tiling Heap 2 identifier */
+#define RGX_BIF_TILING_HEAP_3_IDENT	    "BIF Tiling Heap 3"	    /*!< RGX BIF Tiling Heap 3 identifier */
+#define RGX_BIF_TILING_HEAP_4_IDENT	    "BIF Tiling Heap 4"	    /*!< RGX BIF Tiling Heap 4 identifier */
+#define RGX_DOPPLER_HEAP_IDENT			"Doppler"				/*!< Doppler Heap Identifier */
+#define RGX_DOPPLER_OVERFLOW_HEAP_IDENT	"Doppler Overflow"		/*!< Doppler Heap Identifier */
+#define RGX_SERVICES_SIGNALS_HEAP_IDENT	"Services Signals"		/*!< Services Signals Heap Identifier */
+#define RGX_SIGNALS_HEAP_IDENT	        "Signals"		        /*!< Signals Heap Identifier */
+#define RGX_VISTEST_HEAP_IDENT			"VisTest"				/*!< VisTest heap */
+#define RGX_TDM_TPU_YUV_COEFFS_HEAP_IDENT "TDM TPU YUV Coeffs"
+#define RGX_FIRMWARE_MAIN_HEAP_IDENT		"Firmware Main"
+#define RGX_FIRMWARE_CONFIG_HEAP_IDENT		"Firmware Config"
+#define RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT	"Firmware Raw Guest %d"
+
+/* BIF tiling heaps have specific buffer requirements based on their XStride
+ * configuration. This is detailed in the BIF tiling documentation and ensures
+ * that the bits swapped by the BIF tiling algorithm do not result in addresses
+ * outside the allocated buffer. The representation here reflects the diagram
+ * in the BIF tiling documentation for tiling mode '0'.
+ *
+ * For tiling mode '1', the overall tile size does not change, width increases
+ * to 2^9 but the height drops to 2^3.
+ * This means the RGX_BIF_TILING_HEAP_ALIGN_LOG2_FROM_XSTRIDE macro can be
+ * used for both modes.
+ *
+ * Previous TILING_HEAP_STRIDE macros are retired in preference to storing an
+ * alignment to stride factor, derived from the tiling mode, with the tiling
+ * heap configuration data.
+ *
+ * XStride is defined for a platform in sysconfig.h, but the resulting
+ * alignment and stride factor can be queried through the
+ * PVRSRVGetHeapLog2ImportAlignmentAndTilingStrideFactor() API.
+ * For reference:
+ *   Log2BufferStride = Log2Alignment - Log2AlignmentToTilingStrideFactor
+ */
+#define RGX_BIF_TILING_HEAP_ALIGN_LOG2_FROM_XSTRIDE(X)       (4+X+1+8)
+#define RGX_BIF_TILING_HEAP_LOG2_ALIGN_TO_STRIDE_BASE              (4)
+
+/*
+ *  Supported log2 page size values for RGX_GENERAL_NON_4K_HEAP_ID
+ */
+#define RGX_HEAP_4KB_PAGE_SHIFT					(12)
+#define RGX_HEAP_16KB_PAGE_SHIFT				(14)
+#define RGX_HEAP_64KB_PAGE_SHIFT				(16)
+#define RGX_HEAP_256KB_PAGE_SHIFT				(18)
+#define RGX_HEAP_1MB_PAGE_SHIFT					(20)
+#define RGX_HEAP_2MB_PAGE_SHIFT					(21)
+
+/* Takes a log2 page size parameter and calculates a suitable page size
+ * for the RGX heaps. Returns 0 if parameter is wrong.*/
+static INLINE IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize)
+{
+	IMG_BOOL bFound = IMG_FALSE;
+
+	/* OS page shift must be at least RGX_HEAP_4KB_PAGE_SHIFT,
+	 * max RGX_HEAP_2MB_PAGE_SHIFT, non-zero and a power of two*/
+	if ( uiLog2PageSize == 0 ||
+		(uiLog2PageSize < RGX_HEAP_4KB_PAGE_SHIFT) ||
+		(uiLog2PageSize > RGX_HEAP_2MB_PAGE_SHIFT))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Provided incompatible log2 page size %u",
+				__func__,
+				uiLog2PageSize));
+		PVR_ASSERT(0);
+		return 0;
+	}
+
+	do
+	{
+		switch (uiLog2PageSize)
+		{
+			case RGX_HEAP_4KB_PAGE_SHIFT:
+			case RGX_HEAP_16KB_PAGE_SHIFT:
+			case RGX_HEAP_64KB_PAGE_SHIFT:
+			case RGX_HEAP_256KB_PAGE_SHIFT:
+			case RGX_HEAP_1MB_PAGE_SHIFT:
+			case RGX_HEAP_2MB_PAGE_SHIFT:
+				/* All good, RGX page size equals given page size
+				 * => use it as default for heaps */
+				bFound = IMG_TRUE;
+				break;
+			default:
+				/* We have to fall back to a smaller device
+				 * page size than given page size because there
+				 * is no exact match for any supported size. */
+				uiLog2PageSize -= 1;
+				break;
+		}
+	} while (!bFound);
+
+	return uiLog2PageSize;
+}
+
+
+#endif /* __RGX_HEAPS_H__ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_hwperf.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_hwperf.h
new file mode 100644
index 0000000..c0c8c69
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_hwperf.h
@@ -0,0 +1,1434 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HWPerf and Debug Types and Defines Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Common data types definitions for hardware performance API
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_HWPERF_H_
+#define RGX_HWPERF_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* These structures are used on both GPU and CPU and must be a size that is a
+ * multiple of 64 bits, 8 bytes to allow the FW to write 8 byte quantities at
+ * 8 byte aligned addresses. RGX_FW_STRUCT_*_ASSERT() is used to check this.
+ */
+
+/******************************************************************************
+ * Includes and Defines
+ *****************************************************************************/
+
+#include "img_types.h"
+#include "img_defs.h"
+
+#include "rgx_common.h"
+#include "pvrsrv_tlcommon.h"
+#include "pvrsrv_sync_km.h"
+
+
+#if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER)
+/* HWPerf interface assumption checks */
+static_assert(RGX_FEATURE_NUM_CLUSTERS <= 16U, "Cluster count too large for HWPerf protocol definition");
+
+
+#if !defined(__KERNEL__)
+/* User-mode and Firmware definitions only */
+
+/*! The number of indirectly addressable TPU_MSC blocks in the GPU */
+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST MAX(((IMG_UINT32)RGX_FEATURE_NUM_CLUSTERS >> 1), 1U)
+
+/*! The number of indirectly addressable USC blocks in the GPU */
+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER (RGX_FEATURE_NUM_CLUSTERS)
+
+# if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+
+ /*! Defines the number of performance counter blocks that are directly
+  * addressable in the RGX register map for S. */
+#  define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS      1 /* JONES */
+#  define RGX_HWPERF_INDIRECT_BY_PHANTOM       (RGX_NUM_PHANTOMS)
+#  define RGX_HWPERF_PHANTOM_NONDUST_BLKS      1 /* BLACKPEARL */
+#  define RGX_HWPERF_PHANTOM_DUST_BLKS         2 /* TPU, TEXAS */
+#  define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 2 /* USC, PBE */
+#  define RGX_HWPERF_DOPPLER_BX_TU_BLKS        0
+
+# elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+
+  /*! Defines the number of performance counter blocks that are directly
+   * addressable in the RGX register map. */
+#   define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS    2 /* TORNADO, TA */
+#   define RGX_HWPERF_DOPPLER_BX_TU_BLKS      0
+
+#  define RGX_HWPERF_INDIRECT_BY_PHANTOM       (RGX_NUM_PHANTOMS)
+#  define RGX_HWPERF_PHANTOM_NONDUST_BLKS      2 /* RASTER, TEXAS */
+#  define RGX_HWPERF_PHANTOM_DUST_BLKS         1 /* TPU */
+#  define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 1 /* USC */
+
+# else /* !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && ! defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) i.e. S6 */
+
+ /*! Defines the number of performance counter blocks that are
+  * addressable in the RGX register map for Series 6. */
+#  define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS      3 /* TA, RASTER, HUB */
+#  define RGX_HWPERF_INDIRECT_BY_PHANTOM       0 /* PHANTOM is not there in Rogue1. Just using it to keep naming same as later series (RogueXT n Rogue XT+) */
+#  define RGX_HWPERF_PHANTOM_NONDUST_BLKS      0
+#  define RGX_HWPERF_PHANTOM_DUST_BLKS         1 /* TPU */
+#  define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 1 /* USC */
+#  define RGX_HWPERF_DOPPLER_BX_TU_BLKS        0
+
+# endif
+
+/*! The number of performance counters in each layout block defined for UM/FW code */
+#if defined(RGX_FEATURE_CLUSTER_GROUPING)
+  #define RGX_HWPERF_CNTRS_IN_BLK 6
+ #else
+  #define RGX_HWPERF_CNTRS_IN_BLK 4
+#endif
+
+#else /* defined(__KERNEL__) */
+/* Kernel/server definitions - not used, hence invalid definitions */
+
+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST    0xFF
+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER 0xFF
+
+# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS      0xFF
+# define RGX_HWPERF_INDIRECT_BY_PHANTOM       0xFF
+# define RGX_HWPERF_PHANTOM_NONDUST_BLKS      0xFF
+# define RGX_HWPERF_PHANTOM_DUST_BLKS         0xFF
+# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 0xFF
+
+# define RGX_HWPERF_DOPPLER_BX_TU_BLKS       0U
+
+#endif
+#endif
+
+/*! The number of custom non-mux counter blocks supported */
+#define RGX_HWPERF_MAX_CUSTOM_BLKS 5U
+
+/*! The number of counters supported in each non-mux counter block */
+#define RGX_HWPERF_MAX_CUSTOM_CNTRS 8U
+
+
+/******************************************************************************
+ * Packet Event Type Enumerations
+ *****************************************************************************/
+
+/*! Type used to encode the event that generated the packet.
+ * NOTE: When this type is updated the corresponding hwperfbin2json tool
+ * source needs to be updated as well. The RGX_HWPERF_EVENT_MASK_* macros will
+ * also need updating when adding new types.
+ */
+typedef IMG_UINT32 RGX_HWPERF_EVENT_TYPE;
+
+#define RGX_HWPERF_INVALID				0x00U
+
+/* FW types 0x01..0x06 */
+#define RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE	0x01U
+
+#define RGX_HWPERF_FW_BGSTART			0x01U
+#define RGX_HWPERF_FW_BGEND				0x02U
+#define RGX_HWPERF_FW_IRQSTART			0x03U
+
+#define RGX_HWPERF_FW_IRQEND			0x04U
+#define RGX_HWPERF_FW_DBGSTART			0x05U
+#define RGX_HWPERF_FW_DBGEND			0x06U
+
+#define RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE		0x06U
+
+/* HW types 0x07..0x19 */
+#define RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE	0x07U
+
+#define RGX_HWPERF_HW_PMOOM_TAPAUSE		0x07U
+
+#define RGX_HWPERF_HW_TAKICK			0x08U
+#define RGX_HWPERF_HW_TAFINISHED		0x09U
+#define RGX_HWPERF_HW_3DTQKICK			0x0AU
+#define RGX_HWPERF_HW_3DKICK			0x0BU
+#define RGX_HWPERF_HW_3DFINISHED		0x0CU
+#define RGX_HWPERF_HW_CDMKICK			0x0DU
+#define RGX_HWPERF_HW_CDMFINISHED		0x0EU
+#define RGX_HWPERF_HW_TLAKICK			0x0FU
+#define RGX_HWPERF_HW_TLAFINISHED		0x10U
+#define RGX_HWPERF_HW_3DSPMKICK			0x11U
+#define RGX_HWPERF_HW_PERIODIC			0x12U
+#define RGX_HWPERF_HW_RTUKICK			0x13U
+#define RGX_HWPERF_HW_RTUFINISHED		0x14U
+#define RGX_HWPERF_HW_SHGKICK			0x15U
+#define RGX_HWPERF_HW_SHGFINISHED		0x16U
+#define RGX_HWPERF_HW_3DTQFINISHED		0x17U
+#define RGX_HWPERF_HW_3DSPMFINISHED		0x18U
+
+#define RGX_HWPERF_HW_PMOOM_TARESUME	0x19U
+
+/* HW_EVENT_RANGE0 used up. Use next empty range below to add new hardware events */
+#define RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE	0x19U
+
+	/* other types 0x1A..0x1F */
+#define RGX_HWPERF_CLKS_CHG				0x1AU
+#define RGX_HWPERF_GPU_STATE_CHG		0x1BU
+
+/* power types 0x20..0x27 */
+#define RGX_HWPERF_PWR_EST_RANGE_FIRST_TYPE	0x20U
+#define RGX_HWPERF_PWR_EST_REQUEST		0x20U
+#define RGX_HWPERF_PWR_EST_READY		0x21U
+#define RGX_HWPERF_PWR_EST_RESULT		0x22U
+#define RGX_HWPERF_PWR_EST_RANGE_LAST_TYPE	0x22U
+
+#define RGX_HWPERF_PWR_CHG				0x23U
+
+/* HW_EVENT_RANGE1 0x28..0x2F, for accommodating new hardware events */
+#define RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE	0x28U
+
+#define RGX_HWPERF_HW_TDMKICK			0x28U
+#define RGX_HWPERF_HW_TDMFINISHED		0x29U
+#define RGX_HWPERF_HW_NULLKICK			0x2AU
+
+#define RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE 0x2AU
+
+/* context switch types 0x30..0x31 */
+#define RGX_HWPERF_CSW_START			0x30U
+#define RGX_HWPERF_CSW_FINISHED			0x31U
+
+/* firmware misc 0x38..0x39 */
+#define RGX_HWPERF_UFO					0x38U
+#define RGX_HWPERF_FWACT				0x39U
+
+/* last */
+#define RGX_HWPERF_LAST_TYPE			0x3AU
+
+/* This enumeration must have a value that is a power of two as it is
+ * used in masks and a filter bit field (currently 64 bits long).
+ */
+#define RGX_HWPERF_MAX_TYPE				0x40U
+
+
+/* The event type values are incrementing integers for use as a shift ordinal
+ * in the event filtering process at the point events are generated.
+ * This scheme thus implies a limit of 63 event types.
+ */
+static_assert(RGX_HWPERF_LAST_TYPE < RGX_HWPERF_MAX_TYPE, "Too many HWPerf event types");
+
+/* Macro used to check if an event type ID is present in the known set of hardware type events */
+#define HWPERF_PACKET_IS_HW_TYPE(_etype)	(((_etype) >= RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE) || \
+											 ((_etype) >= RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE))
+
+#define HWPERF_PACKET_IS_FW_TYPE(_etype)					\
+	((_etype) >= RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE &&	\
+	 (_etype) <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE)
+
+
+typedef enum {
+	RGX_HWPERF_HOST_INVALID   = 0x00,
+	RGX_HWPERF_HOST_ENQ       = 0x01,
+	RGX_HWPERF_HOST_UFO       = 0x02,
+	RGX_HWPERF_HOST_ALLOC     = 0x03,
+	RGX_HWPERF_HOST_CLK_SYNC  = 0x04,
+	RGX_HWPERF_HOST_FREE      = 0x05,
+	RGX_HWPERF_HOST_MODIFY    = 0x06,
+	RGX_HWPERF_HOST_DEV_INFO  = 0x07,
+	RGX_HWPERF_HOST_INFO      = 0x08,
+	RGX_HWPERF_HOST_SYNC_FENCE_WAIT = 0x09,
+	RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE  = 0x0A,
+
+	/* last */
+	RGX_HWPERF_HOST_LAST_TYPE,
+
+	/* This enumeration must have a value that is a power of two as it is
+	 * used in masks and a filter bit field (currently 32 bits long).
+	 */
+	RGX_HWPERF_HOST_MAX_TYPE  = 0x20
+} RGX_HWPERF_HOST_EVENT_TYPE;
+
+/* The event type values are incrementing integers for use as a shift ordinal
+ * in the event filtering process at the point events are generated.
+ * This scheme thus implies a limit of 31 event types.
+ */
+static_assert(RGX_HWPERF_HOST_LAST_TYPE < RGX_HWPERF_HOST_MAX_TYPE, "Too many HWPerf host event types");
+
+
+/******************************************************************************
+ * Packet Header Format Version 2 Types
+ *****************************************************************************/
+
+/*! Major version number of the protocol in operation
+ */
+#define RGX_HWPERF_V2_FORMAT 2
+
+/*! Signature ASCII pattern 'HWP2' found in the first word of a HWPerfV2 packet
+ */
+#define HWPERF_PACKET_V2_SIG		0x48575032
+
+/*! Signature ASCII pattern 'HWPA' found in the first word of a HWPerfV2a packet
+ */
+#define HWPERF_PACKET_V2A_SIG		0x48575041
+
+/*! Signature ASCII pattern 'HWPB' found in the first word of a HWPerfV2b packet
+ */
+#define HWPERF_PACKET_V2B_SIG		0x48575042
+
+#define HWPERF_PACKET_ISVALID(_ptr) (((_ptr) == HWPERF_PACKET_V2_SIG) || ((_ptr) == HWPERF_PACKET_V2A_SIG)|| ((_ptr) == HWPERF_PACKET_V2B_SIG))
+
+/*! Type defines the HWPerf packet header common to all events. */
+typedef struct
+{
+	IMG_UINT32  ui32Sig;        /*!< Always the value HWPERF_PACKET_SIG */
+	IMG_UINT32  ui32Size;       /*!< Overall packet size in bytes */
+	IMG_UINT32  eTypeId;        /*!< Event type information field */
+	IMG_UINT32  ui32Ordinal;    /*!< Sequential number of the packet */
+	IMG_UINT64  ui64Timestamp;  /*!< Event timestamp */
+} RGX_HWPERF_V2_PACKET_HDR, *RGX_PHWPERF_V2_PACKET_HDR;
+
+#ifndef __CHECKER__
+RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_V2_PACKET_HDR, ui64Timestamp);
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_HDR);
+#endif
+
+
+/*! Mask for use with the IMG_UINT32 ui32Size header field */
+#define RGX_HWPERF_SIZE_MASK         0xFFFFU
+
+/*! This macro defines an upper limit to which the size of the largest variable
+ * length HWPerf packet must fall within, currently 3KB. This constant may be
+ * used to allocate a buffer to hold one packet.
+ * This upper limit is policed by packet producing code.
+ */
+#define RGX_HWPERF_MAX_PACKET_SIZE   0xC00U
+
+/*! Defines an upper limit to the size of a variable length packet payload.
+ */
+#define RGX_HWPERF_MAX_PAYLOAD_SIZE	 ((IMG_UINT32)(RGX_HWPERF_MAX_PACKET_SIZE-\
+	sizeof(RGX_HWPERF_V2_PACKET_HDR)))
+
+
+/*! Macro which takes a structure name and provides the packet size for
+ * a fixed size payload packet, rounded up to 8 bytes to align packets
+ * for 64 bit architectures. */
+#define RGX_HWPERF_MAKE_SIZE_FIXED(_struct)       ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN(sizeof(_struct), PVRSRVTL_PACKET_ALIGNMENT))))
+
+/*! Macro which takes the number of bytes written in the data payload of a
+ * packet for a variable size payload packet, rounded up to 8 bytes to
+ * align packets for 64 bit architectures. */
+#define RGX_HWPERF_MAKE_SIZE_VARIABLE(_size)      ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN(_size, PVRSRVTL_PACKET_ALIGNMENT))))
+
+/*! Macro to obtain the size of the packet */
+#define RGX_HWPERF_GET_SIZE(_packet_addr)         ((IMG_UINT16)(((_packet_addr)->ui32Size) & RGX_HWPERF_SIZE_MASK))
+
+/*! Macro to obtain the size of the packet data */
+#define RGX_HWPERF_GET_DATA_SIZE(_packet_addr)    (RGX_HWPERF_GET_SIZE(_packet_addr) - sizeof(RGX_HWPERF_V2_PACKET_HDR))
+
+
+
+/*! Masks for use with the IMG_UINT32 eTypeId header field */
+#define RGX_HWPERF_TYPEID_MASK			0x7FFFFU
+#define RGX_HWPERF_TYPEID_EVENT_MASK	0x07FFFU
+#define RGX_HWPERF_TYPEID_THREAD_MASK	0x08000U
+#define RGX_HWPERF_TYPEID_STREAM_MASK	0x70000U
+#define RGX_HWPERF_TYPEID_OSID_MASK		0xFF000000U
+
+/*! Meta thread macros for encoding the ID into the type field of a packet */
+#define RGX_HWPERF_META_THREAD_SHIFT	15U
+#define RGX_HWPERF_META_THREAD_ID0		0x0U
+#define RGX_HWPERF_META_THREAD_ID1		0x1U
+/*! Obsolete, kept for source compatibility */
+#define RGX_HWPERF_META_THREAD_MASK		0x1U
+/*! Stream ID macros for encoding the ID into the type field of a packet */
+#define RGX_HWPERF_STREAM_SHIFT			16U
+/*! OSID bit-shift macro used for encoding OSID into type field of a packet */
+#define RGX_HWPERF_OSID_SHIFT			24U
+typedef enum {
+	RGX_HWPERF_STREAM_ID0_FW,     /*!< Events from the Firmware/GPU */
+	RGX_HWPERF_STREAM_ID1_HOST,   /*!< Events from the Server host driver component */
+	RGX_HWPERF_STREAM_ID2_CLIENT, /*!< Events from the Client host driver component */
+	RGX_HWPERF_STREAM_ID_LAST,
+} RGX_HWPERF_STREAM_ID;
+
+/* Checks if all stream IDs can fit under RGX_HWPERF_TYPEID_STREAM_MASK. */
+static_assert(((IMG_UINT32)RGX_HWPERF_STREAM_ID_LAST - 1U) < (RGX_HWPERF_TYPEID_STREAM_MASK >> RGX_HWPERF_STREAM_SHIFT),
+		"Too many HWPerf stream IDs.");
+
+/*! Macros used to set the packet type and encode meta thread ID (0|1), HWPerf stream ID, and OSID within */
+#define RGX_HWPERF_MAKE_TYPEID(_stream, _type, _thread, _osid)\
+		((IMG_UINT32) ((RGX_HWPERF_TYPEID_STREAM_MASK&((IMG_UINT32)(_stream) << RGX_HWPERF_STREAM_SHIFT)) | \
+		(RGX_HWPERF_TYPEID_THREAD_MASK & ((IMG_UINT32)(_thread) << RGX_HWPERF_META_THREAD_SHIFT)) | \
+		(RGX_HWPERF_TYPEID_EVENT_MASK & (IMG_UINT32)(_type)) | \
+		(RGX_HWPERF_TYPEID_OSID_MASK & ((IMG_UINT32)(_osid) << RGX_HWPERF_OSID_SHIFT))))
+
+/*! Obtains the event type that generated the packet */
+#define RGX_HWPERF_GET_TYPE(_packet_addr)            (((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_EVENT_MASK)
+
+/*! Obtains the META Thread number that generated the packet */
+#define RGX_HWPERF_GET_THREAD_ID(_packet_addr)       (((((_packet_addr)->eTypeId)&RGX_HWPERF_TYPEID_THREAD_MASK) >> RGX_HWPERF_META_THREAD_SHIFT))
+
+/*! Obtains the guest OSID which resulted in packet generation */
+#define RGX_HWPERF_GET_OSID(_packet_addr)            (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_OSID_MASK) >> RGX_HWPERF_OSID_SHIFT)
+
+/*! Obtain stream id */
+#define RGX_HWPERF_GET_STREAM_ID(_packet_addr)       (((((_packet_addr)->eTypeId)&RGX_HWPERF_TYPEID_STREAM_MASK) >> RGX_HWPERF_STREAM_SHIFT))
+
+/*! Macros to obtain a typed pointer to a packet or data structure given a packet address */
+#define RGX_HWPERF_GET_PACKET(_buffer_addr)            ((RGX_HWPERF_V2_PACKET_HDR *)  (_buffer_addr))
+#define RGX_HWPERF_GET_PACKET_DATA_BYTES(_packet_addr) ((IMG_BYTE *)(((IMG_BYTE *)(_packet_addr)) + sizeof(RGX_HWPERF_V2_PACKET_HDR)))
+#define RGX_HWPERF_GET_NEXT_PACKET(_packet_addr)       ((RGX_HWPERF_V2_PACKET_HDR *)  ( ((IMG_BYTE *)(_packet_addr))+(RGX_HWPERF_SIZE_MASK&(_packet_addr)->ui32Size)) )
+
+/*! Obtains a typed pointer to a packet header given the packed data address */
+#define RGX_HWPERF_GET_PACKET_HEADER(_packet_addr)     ((RGX_HWPERF_V2_PACKET_HDR *)  ( ((IMG_BYTE *)(_packet_addr)) - sizeof(RGX_HWPERF_V2_PACKET_HDR) ))
+
+
+/******************************************************************************
+ * Other Common Defines
+ *****************************************************************************/
+
+/* This macro is not a real array size, but indicates the array has a variable
+ * length only known at run-time but always contains at least 1 element. The
+ * final size of the array is deduced from the size field of a packet header.
+ */
+#define RGX_HWPERF_ONE_OR_MORE_ELEMENTS  1U
+
+/* This macro is not a real array size, but indicates the array is optional
+ * and if present has a variable length only known at run-time. The final
+ * size of the array is deduced from the size field of a packet header. */
+#define RGX_HWPERF_ZERO_OR_MORE_ELEMENTS 1U
+
+
+/*! Masks for use with the IMG_UINT32 ui32BlkInfo field */
+#define RGX_HWPERF_BLKINFO_BLKCOUNT_MASK	0xFFFF0000U
+#define RGX_HWPERF_BLKINFO_BLKOFFSET_MASK	0x0000FFFFU
+
+/*! Shift for the NumBlocks and counter block offset field in ui32BlkInfo */
+#define RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT	16U
+#define RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT 0U
+
+/*! Macro used to set the block info word as a combination of two 16-bit integers */
+#define RGX_HWPERF_MAKE_BLKINFO(_numblks, _blkoffset) ((IMG_UINT32) ((RGX_HWPERF_BLKINFO_BLKCOUNT_MASK&((_numblks) << RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT)) | (RGX_HWPERF_BLKINFO_BLKOFFSET_MASK&((_blkoffset) << RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT))))
+
+/*! Macro used to obtain get the number of counter blocks present in the packet */
+#define RGX_HWPERF_GET_BLKCOUNT(_blkinfo)            ((_blkinfo & RGX_HWPERF_BLKINFO_BLKCOUNT_MASK) >> RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT)
+
+/*! Obtains the offset of the counter block stream in the packet */
+#define RGX_HWPERF_GET_BLKOFFSET(_blkinfo)           ((_blkinfo & RGX_HWPERF_BLKINFO_BLKOFFSET_MASK) >> RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT)
+
+/* This macro gets the number of blocks depending on the packet version */
+#define RGX_HWPERF_GET_NUMBLKS(_sig, _packet_data, _numblocks)	\
+	if(HWPERF_PACKET_V2B_SIG == _sig)\
+	{\
+		(_numblocks) = RGX_HWPERF_GET_BLKCOUNT((_packet_data)->ui32BlkInfo);\
+	}\
+	else\
+	{\
+		IMG_UINT32 ui32VersionOffset = (((_sig) == HWPERF_PACKET_V2_SIG) ? 1 : 3);\
+		(_numblocks) = *(IMG_UINT16 *)(&((_packet_data)->ui32WorkTarget) + ui32VersionOffset);\
+	}
+
+/* This macro gets the counter stream pointer depending on the packet version */
+#define RGX_HWPERF_GET_CNTSTRM(_sig, _hw_packet_data, _cntstream_ptr)	\
+{\
+	if(HWPERF_PACKET_V2B_SIG == _sig)\
+	{\
+		(_cntstream_ptr) = (IMG_UINT32 *)((IMG_BYTE *)(_hw_packet_data) + RGX_HWPERF_GET_BLKOFFSET((_hw_packet_data)->ui32BlkInfo));\
+	}\
+	else\
+	{\
+		IMG_UINT32 ui32BlkStreamOffsetInWords = ((_sig == HWPERF_PACKET_V2_SIG) ? 6 : 8);\
+		(_cntstream_ptr) = ((IMG_UINT32 *)_hw_packet_data) + ui32BlkStreamOffsetInWords;\
+	}\
+}
+
+/* This is the maximum frame contexts that are supported in the driver at the moment */
+#define RGX_HWPERF_HW_MAX_WORK_CONTEXT               2
+
+/*! Masks for use with the RGX_HWPERF_UFO_EV eEvType field */
+#define RGX_HWPERF_UFO_STREAMSIZE_MASK 0xFFFF0000U
+#define RGX_HWPERF_UFO_STREAMOFFSET_MASK 0x0000FFFFU
+
+/*! Shift for the UFO count and data stream fields */
+#define RGX_HWPERF_UFO_STREAMSIZE_SHIFT 16U
+#define RGX_HWPERF_UFO_STREAMOFFSET_SHIFT 0U
+
+/*! Macro used to set UFO stream info word as a combination of two 16-bit integers */
+#define RGX_HWPERF_MAKE_UFOPKTINFO(_ssize, _soff)\
+        ((IMG_UINT32) ((RGX_HWPERF_UFO_STREAMSIZE_MASK&((_ssize) << RGX_HWPERF_UFO_STREAMSIZE_SHIFT)) |\
+        (RGX_HWPERF_UFO_STREAMOFFSET_MASK&((_soff) << RGX_HWPERF_UFO_STREAMOFFSET_SHIFT))))
+
+/*! Macro used to obtain UFO count*/
+#define RGX_HWPERF_GET_UFO_STREAMSIZE(_streaminfo)\
+        ((_streaminfo & RGX_HWPERF_UFO_STREAMSIZE_MASK) >> RGX_HWPERF_UFO_STREAMSIZE_SHIFT)
+
+/*! Obtains the offset of the UFO stream in the packet */
+#define RGX_HWPERF_GET_UFO_STREAMOFFSET(_streaminfo)\
+        ((_streaminfo & RGX_HWPERF_UFO_STREAMOFFSET_MASK) >> RGX_HWPERF_UFO_STREAMOFFSET_SHIFT)
+
+
+
+/******************************************************************************
+ * Data Stream Common Types
+ *****************************************************************************/
+
+/* All the Data Masters HWPerf is aware of. When a new DM is added to this
+ * list, it should be appended at the end to maintain backward compatibility
+ * of HWPerf data */
+typedef enum {
+
+	RGX_HWPERF_DM_GP,
+	RGX_HWPERF_DM_2D,
+	RGX_HWPERF_DM_TA,
+	RGX_HWPERF_DM_3D,
+	RGX_HWPERF_DM_CDM,
+	RGX_HWPERF_DM_RTU,
+	RGX_HWPERF_DM_SHG,
+	RGX_HWPERF_DM_TDM,
+
+	RGX_HWPERF_DM_LAST,
+
+	RGX_HWPERF_DM_INVALID = 0x1FFFFFFF
+} RGX_HWPERF_DM;
+
+/* Enum containing bit pos for 32bit feature flags used in hwperf and api */
+typedef enum {
+	RGX_HWPERF_FEATURE_PERFBUS_FLAG                = 0x001,
+	RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG  = 0x002,
+	RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG  = 0x004,
+	RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG     = 0x008,
+	RGX_HWPERF_FEATURE_ROGUEXE_FLAG                = 0x010,
+	RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG   = 0x020,
+	RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG             = 0x040,
+	RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION         = 0x080
+} RGX_HWPERF_FEATURE_FLAGS;
+
+/*! This structure holds the data of a firmware packet. */
+typedef struct
+{
+	RGX_HWPERF_DM eDM;				/*!< DataMaster identifier, see RGX_HWPERF_DM */
+	IMG_UINT32 ui32TxtActCyc;		/*!< Meta TXTACTCYC register value */
+	IMG_UINT32 ui32FWPerfCount0;	/*!< Meta/MIPS PERF_COUNT0 register */
+	IMG_UINT32 ui32FWPerfCount1;	/*!< Meta/MIPS PERF_COUNT1 register */
+	IMG_UINT32 ui32TimeCorrIndex;
+	IMG_UINT32 ui32Padding;
+} RGX_HWPERF_FW_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FW_DATA);
+
+/*! This structure holds the data of a hardware packet, including counters. */
+typedef struct
+{
+	IMG_UINT32 ui32DMCyc;         /*!< DataMaster cycle count register, 0 if none */
+	IMG_UINT32 ui32FrameNum;      /*!< Frame number, undefined on some DataMasters */
+	IMG_UINT32 ui32PID;           /*!< Process identifier */
+	IMG_UINT32 ui32DMContext;     /*!< GPU Data Master (FW) Context */
+	IMG_UINT32 ui32WorkTarget;    /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */
+	IMG_UINT32 ui32ExtJobRef;     /*!< Client driver context job reference used for tracking/debugging */
+	IMG_UINT32 ui32IntJobRef;     /*!< RGX Data master context job reference used for tracking/debugging */
+	IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the time correlation at the time the packet was generated */
+	IMG_UINT32 ui32BlkInfo;       /*!< <31..16> NumBlocks <15..0> Counter block stream offset */
+	IMG_UINT32 ui32WorkCtx;       /*!< Work context: Render Context for TA/3D; RayTracing Context for RTU/SHG; 0x0 otherwise */
+	IMG_UINT32 ui32CtxPriority;   /*!< Context priority */
+	IMG_UINT32 ui32Padding1;      /* To ensure correct alignment */
+	IMG_UINT32 aui32CountBlksStream[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; /*!< Counter data */
+	IMG_UINT32 ui32Padding2;      /* To ensure correct alignment */
+} RGX_HWPERF_HW_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA);
+
+/*! Mask for use with the aui32CountBlksStream field when decoding the
+ * counter block ID and mask word. */
+#define RGX_HWPERF_CNTBLK_ID_MASK	0xFFFF0000U
+#define RGX_HWPERF_CNTBLK_ID_SHIFT	16U
+
+/*! Obtains the counter block ID from the supplied RGX_HWPERF_HW_DATA address
+ * and stream index. May be used in decoding the counter block stream words of
+ * a RGX_HWPERF_HW_DATA structure. */
+#define RGX_HWPERF_GET_CNTBLK_ID(_data_addr, _idx) ((IMG_UINT16)(((_data_addr)->aui32CountBlksStream[(_idx)]&RGX_HWPERF_CNTBLK_ID_MASK)>>RGX_HWPERF_CNTBLK_ID_SHIFT))
+#define RGX_HWPERF_GET_CNTBLK_IDW(_word)           ((IMG_UINT16)(((_word)&RGX_HWPERF_CNTBLK_ID_MASK)>>RGX_HWPERF_CNTBLK_ID_SHIFT))
+
+/*! Obtains the counter mask from the supplied RGX_HWPERF_HW_DATA address
+ * and stream index. May be used in decoding the counter block stream words
+ * of a RGX_HWPERF_HW_DATA structure. */
+#define RGX_HWPERF_GET_CNT_MASK(_data_addr, _idx) ((IMG_UINT16)((_data_addr)->aui32CountBlksStream[(_idx)]&((1<<RGX_CNTBLK_COUNTERS_MAX)-1)))
+#define RGX_HWPERF_GET_CNT_MASKW(_word)           ((IMG_UINT16)((_word)&((1<<RGX_CNTBLK_COUNTERS_MAX)-1)))
+
+
+typedef struct
+{
+	RGX_HWPERF_DM	eDM;					/*!< DataMaster identifier, see RGX_HWPERF_DM */
+	IMG_UINT32		ui32DMContext;			/*!< GPU Data Master (FW) Context */
+	IMG_UINT32		ui32FrameNum;			/*!< Frame number */
+	IMG_UINT32		ui32TxtActCyc;			/*!< Meta TXTACTCYC register value */
+	IMG_UINT32		ui32PerfCycle;			/*!< Cycle count. Used to measure HW context store latency */
+	IMG_UINT32		ui32PerfPhase;			/*!< Phase. Used to determine geometry content */
+	IMG_UINT32		ui32Padding[2];			/*!< Padding to 8 DWords */
+} RGX_HWPERF_CSW_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CSW_DATA);
+
+/*! Enumeration of clocks supporting this event */
+typedef enum
+{
+	RGX_HWPERF_CLKS_CHG_INVALID = 0,
+
+	RGX_HWPERF_CLKS_CHG_NAME_CORE = 1,
+
+	RGX_HWPERF_CLKS_CHG_LAST,
+} RGX_HWPERF_CLKS_CHG_NAME;
+
+/*! This structure holds the data of a clocks change packet. */
+typedef struct
+{
+	IMG_UINT64                ui64NewClockSpeed;         /*!< New Clock Speed (in Hz) */
+	RGX_HWPERF_CLKS_CHG_NAME  eClockName;                /*!< Clock name */
+	IMG_UINT32                ui32CalibratedClockSpeed;  /*!< Calibrated new GPU clock speed (in Hz) */
+	IMG_UINT64                ui64OSTimeStamp;           /*!< OSTimeStamp sampled by the host */
+	IMG_UINT64                ui64CRTimeStamp;           /*!< CRTimeStamp sampled by the host and
+	                                                          correlated to OSTimeStamp */
+} RGX_HWPERF_CLKS_CHG_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CLKS_CHG_DATA);
+
+/*! Enumeration of GPU utilisation states supported by this event */
+typedef IMG_UINT32 RGX_HWPERF_GPU_STATE;
+
+#define RGX_HWPERF_GPU_STATE_ACTIVE_LOW  0U
+#define RGX_HWPERF_GPU_STATE_IDLE        1U
+#define RGX_HWPERF_GPU_STATE_ACTIVE_HIGH 2U
+#define RGX_HWPERF_GPU_STATE_BLOCKED     3U
+#define RGX_HWPERF_GPU_STATE_LAST        4U
+
+
+/*! This structure holds the data of a GPU utilisation state change packet. */
+typedef struct
+{
+	RGX_HWPERF_GPU_STATE	eState;		/*!< New GPU utilisation state */
+	IMG_UINT32				uiUnused1;	/*!< Padding */
+	IMG_UINT32				uiUnused2;	/*!< Padding */
+	IMG_UINT32				uiUnused3;	/*!< Padding */
+} RGX_HWPERF_GPU_STATE_CHG_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_GPU_STATE_CHG_DATA);
+
+
+/*! Signature pattern 'HPE1' found in the first word of a PWR_EST packet data */
+#define HWPERF_PWR_EST_V1_SIG	0x48504531
+
+/*! Macros to obtain a component field from a counter ID word */
+#define RGX_HWPERF_GET_PWR_EST_HIGH_FLAG(_word) (((_word)&0x80000000)>>31)
+#define RGX_HWPERF_GET_PWR_EST_UNIT(_word)      (((_word)&0x0F000000)>>24)
+#define RGX_HWPERF_GET_PWR_EST_NUMBER(_word)    ((_word)&0x0000FFFF)
+
+/*! This macro constructs a counter ID for a power estimate data stream from
+ * the component parts of: high word flag, unit id, counter number */
+#define RGX_HWPERF_MAKE_PWR_EST_COUNTERID(_high, _unit, _number)           \
+			((IMG_UINT32)(((IMG_UINT32)((IMG_UINT32)(_high)&0x1U)<<31) | ((IMG_UINT32)((IMG_UINT32)(_unit)&0xFU)<<24) | \
+			              ((_number)&0x0000FFFFU)))
+
+/*! This structure holds the data for a power estimate packet. */
+typedef struct
+{
+	IMG_UINT32  ui32StreamVersion;  /*!< HWPERF_PWR_EST_V1_SIG */
+	IMG_UINT32  ui32StreamSize;     /*!< Size of array in bytes of stream data
+	                                     held in the aui32StreamData member */
+	IMG_UINT32  aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Counter data */
+	IMG_UINT32  ui32Padding; /* To ensure correct alignment */
+} RGX_HWPERF_PWR_EST_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_PWR_EST_DATA);
+
+/*! Enumeration of the kinds of power change events that can occur */
+typedef enum
+{
+	RGX_HWPERF_PWR_UNDEFINED = 0,
+	RGX_HWPERF_PWR_ON        = 1, /*!< Whole device powered on */
+	RGX_HWPERF_PWR_OFF       = 2, /*!< Whole device powered off */
+	RGX_HWPERF_PWR_UP        = 3, /*!< Power turned on to a HW domain */
+	RGX_HWPERF_PWR_DOWN      = 4, /*!< Power turned off to a HW domain */
+
+	RGX_HWPERF_PWR_LAST,
+} RGX_HWPERF_PWR;
+
+/*! This structure holds the data of a power packet. */
+typedef struct
+{
+	RGX_HWPERF_PWR eChange;                  /*!< Defines the type of power change */
+	IMG_UINT32     ui32Domains;              /*!< HW Domains affected */
+	IMG_UINT64     ui64OSTimeStamp;          /*!< OSTimeStamp sampled by the host */
+	IMG_UINT64     ui64CRTimeStamp;          /*!< CRTimeStamp sampled by the host and
+	                                              correlated to OSTimeStamp */
+	IMG_UINT32     ui32CalibratedClockSpeed; /*!< GPU clock speed (in Hz) at the time
+	                                              the two timers were correlated */
+	IMG_UINT32     ui32Unused1;              /*!< Padding */
+} RGX_HWPERF_PWR_CHG_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_PWR_CHG_DATA);
+
+
+
+/*! Firmware Activity event. */
+typedef enum
+{
+	RGX_HWPERF_FWACT_EV_INVALID,            /*! Invalid value. */
+	RGX_HWPERF_FWACT_EV_REGS_SET,           /*! Registers set. */
+	RGX_HWPERF_FWACT_EV_HWR_DETECTED,       /*! HWR detected. */
+	RGX_HWPERF_FWACT_EV_HWR_RESET_REQUIRED, /*! Reset required. */
+	RGX_HWPERF_FWACT_EV_HWR_RECOVERED,      /*! HWR recovered. */
+	RGX_HWPERF_FWACT_EV_HWR_FREELIST_READY, /*! Freelist ready. */
+	RGX_HWPERF_FWACT_EV_FEATURES,           /*! Features present */
+
+	RGX_HWPERF_FWACT_EV_LAST                /*! Number of element. */
+} RGX_HWPERF_FWACT_EV;
+
+/*! Cause of the HWR event. */
+typedef enum
+{
+	RGX_HWPERF_HWR_REASON_INVALID,              /*! Invalid value. */
+	RGX_HWPERF_HWR_REASON_LOCKUP,               /*! Lockup. */
+	RGX_HWPERF_HWR_REASON_PAGEFAULT,            /*! Page fault. */
+	RGX_HWPERF_HWR_REASON_POLLFAIL,             /*! Poll fail. */
+	RGX_HWPERF_HWR_REASON_DEADLINE_OVERRUN,     /*! Deadline overrun. */
+	RGX_HWPERF_HWR_REASON_CSW_DEADLINE_OVERRUN, /*! Hard Context Switch deadline overrun. */
+
+	RGX_HWPERF_HWR_REASON_LAST                  /*! Number of elements. */
+} RGX_HWPERF_HWR_REASON;
+
+
+/* Fixed size for BVNC string so it does not alter packet data format
+ * Check it is large enough against official BVNC string length maximum
+ */
+#define RGX_HWPERF_MAX_BVNC_LEN (24)
+static_assert((RGX_HWPERF_MAX_BVNC_LEN >= RGX_BVNC_STR_SIZE_MAX),
+			  "Space inside HWPerf packet data for BVNC string insufficient");
+
+/*! BVNC Features */
+typedef struct
+{
+	IMG_CHAR aszBvncString[RGX_HWPERF_MAX_BVNC_LEN]; /*! BVNC string */
+	IMG_UINT32 ui32BvncKmFeatureFlags;               /*! See RGX_HWPERF_FEATURE_FLAGS */
+	IMG_UINT8 ui8RgxUnitsIndirectByPhantom;          /*! Counter block instances */
+	IMG_UINT8 ui8RgxUnitsPhantomIndirectByDust;      /*! Counter block instances */
+	IMG_UINT8 ui8RgxUnitsPhantomIndirectByCluster;   /*! Counter block instances */
+	IMG_UINT8 ui8Padding;
+} RGX_HWPERF_BVNC;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_BVNC);
+
+/*! Sub-event's data. */
+typedef union
+{
+	struct
+	{
+		RGX_HWPERF_DM eDM;                 /*!< Data Master ID. */
+		RGX_HWPERF_HWR_REASON eReason;     /*!< Reason of the HWR. */
+		IMG_UINT32 ui32DMContext;          /*!< FW render context */
+	} sHWR;                                /*!< HWR sub-event data. */
+
+	RGX_HWPERF_BVNC sBVNC;    /*!< BVNC Features */
+} RGX_HWPERF_FWACT_DETAIL;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DETAIL);
+
+/*! This structure holds the data of a FW activity event packet */
+typedef struct
+{
+	RGX_HWPERF_FWACT_EV eEvType;           /*!< Event type. */
+	RGX_HWPERF_FWACT_DETAIL uFwActDetail;  /*!< Data of the sub-event. */
+	IMG_UINT32 ui32Padding;
+} RGX_HWPERF_FWACT_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DATA);
+
+
+
+typedef enum {
+	RGX_HWPERF_UFO_EV_UPDATE,
+	RGX_HWPERF_UFO_EV_CHECK_SUCCESS,
+	RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS,
+	RGX_HWPERF_UFO_EV_CHECK_FAIL,
+	RGX_HWPERF_UFO_EV_PRCHECK_FAIL,
+	RGX_HWPERF_UFO_EV_FORCE_UPDATE,
+
+	RGX_HWPERF_UFO_EV_LAST
+} RGX_HWPERF_UFO_EV;
+
+/*! Data stream tuple. */
+typedef union
+{
+	struct
+	{
+		IMG_UINT32 ui32FWAddr;
+		IMG_UINT32 ui32Value;
+	} sCheckSuccess;
+	struct
+	{
+		IMG_UINT32 ui32FWAddr;
+		IMG_UINT32 ui32Value;
+		IMG_UINT32 ui32Required;
+	} sCheckFail;
+	struct
+	{
+		IMG_UINT32 ui32FWAddr;
+		IMG_UINT32 ui32OldValue;
+		IMG_UINT32 ui32NewValue;
+	} sUpdate;
+} RGX_HWPERF_UFO_DATA_ELEMENT;
+
+/*! This structure holds the packet payload data for UFO event. */
+typedef struct
+{
+	RGX_HWPERF_UFO_EV eEvType;
+	IMG_UINT32 ui32TimeCorrIndex;
+	IMG_UINT32 ui32PID;
+	IMG_UINT32 ui32ExtJobRef;
+	IMG_UINT32 ui32IntJobRef;
+	IMG_UINT32 ui32DMContext;      /*!< GPU Data Master (FW) Context */
+	IMG_UINT32 ui32StreamInfo;
+	RGX_HWPERF_DM eDM;
+	IMG_UINT32 ui32Padding;
+	IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS];
+} RGX_HWPERF_UFO_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_UFO_DATA);
+
+
+
+typedef enum
+{
+	RGX_HWPERF_KICK_TYPE_TA3D, /*!< Replaced by separate TA and 3D types */
+	RGX_HWPERF_KICK_TYPE_TQ2D,
+	RGX_HWPERF_KICK_TYPE_TQ3D,
+	RGX_HWPERF_KICK_TYPE_CDM,
+	RGX_HWPERF_KICK_TYPE_RS,
+	RGX_HWPERF_KICK_TYPE_VRDM,
+	RGX_HWPERF_KICK_TYPE_TQTDM,
+	RGX_HWPERF_KICK_TYPE_SYNC,
+	RGX_HWPERF_KICK_TYPE_TA,
+	RGX_HWPERF_KICK_TYPE_3D,
+	RGX_HWPERF_KICK_TYPE_LAST,
+
+	RGX_HWPERF_KICK_TYPE_FORCE_32BIT = 0x7fffffff
+} RGX_HWPERF_KICK_TYPE;
+
+typedef struct
+{
+	RGX_HWPERF_KICK_TYPE ui32EnqType;
+	IMG_UINT32 ui32PID;
+	IMG_UINT32 ui32ExtJobRef;
+	IMG_UINT32 ui32IntJobRef;
+	IMG_UINT32 ui32DMContext;        /*!< GPU Data Master (FW) Context */
+	IMG_UINT32 ui32Padding;
+	IMG_UINT64 ui64CheckFence_UID;
+	IMG_UINT64 ui64UpdateFence_UID;
+	IMG_UINT64 ui64DeadlineInus;     /*!< Workload deadline in system monotonic time */
+	IMG_UINT64 ui64CycleEstimate;    /*!< Estimated cycle time for the workload */
+	PVRSRV_FENCE hCheckFence;        /*!< Fence this enqueue task waits for, before starting */
+	PVRSRV_FENCE hUpdateFence;       /*!< Fence this enqueue task signals, on completion */
+	PVRSRV_TIMELINE hUpdateTimeline; /*!< Timeline on which the above hUpdateFence is created */
+
+	IMG_UINT32 ui32Pad;              /* Align structure size to 8 bytes */
+} RGX_HWPERF_HOST_ENQ_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_ENQ_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+			  "sizeof(RGX_HWPERF_HOST_ENQ_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef struct
+{
+	RGX_HWPERF_UFO_EV eEvType;
+	IMG_UINT32 ui32StreamInfo;
+	IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS];
+	IMG_UINT32 ui32Padding;      /* Align structure size to 8 bytes */
+} RGX_HWPERF_HOST_UFO_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_UFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+			  "sizeof(RGX_HWPERF_HOST_UFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef enum
+{
+	RGX_HWPERF_HOST_RESOURCE_TYPE_INVALID,
+	RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC, /* PRIM */
+	RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE_DEPRECATED, /* Timeline resource packets are now
+	                                                      emitted in client hwperf buffer */
+	RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, /* Fence for use on GPU (SYNC_CP backed) */
+	RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP,
+	RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW, /* Fence created on SW timeline */
+
+	RGX_HWPERF_HOST_RESOURCE_TYPE_LAST
+} RGX_HWPERF_HOST_RESOURCE_TYPE;
+
+typedef union
+{
+	struct
+	{
+		IMG_UINT32 uiPid;
+		IMG_UINT64 ui64Timeline_UID1;
+		IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+		IMG_UINT32 ui32Padding;       /* Align structure size to 8 bytes */
+	} sTimelineAlloc;
+
+	struct
+	{
+		IMG_PID uiPID;
+		PVRSRV_FENCE hFence;
+		IMG_UINT32 ui32CheckPt_FWAddr;
+		IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+	} sFenceAlloc;
+
+	struct
+	{
+		IMG_UINT32 ui32CheckPt_FWAddr;
+		PVRSRV_TIMELINE hTimeline;
+		IMG_PID uiPID;
+		PVRSRV_FENCE hFence;
+		IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+	} sSyncCheckPointAlloc;
+
+	struct
+	{
+		IMG_PID uiPID;
+		PVRSRV_FENCE hSWFence;
+		PVRSRV_TIMELINE hSWTimeline;
+		IMG_UINT64 ui64SyncPtIndex;
+		IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+	} sSWFenceAlloc;
+
+	struct
+	{
+		IMG_UINT32 ui32FWAddr;
+		IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+	} sSyncAlloc;
+} RGX_HWPERF_HOST_ALLOC_DETAIL;
+
+typedef struct
+{
+	RGX_HWPERF_HOST_RESOURCE_TYPE ui32AllocType;
+	RGX_HWPERF_HOST_ALLOC_DETAIL RGXFW_ALIGN uAllocDetail;
+} RGX_HWPERF_HOST_ALLOC_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_ALLOC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+			  "sizeof(RGX_HWPERF_HOST_ALLOC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef union
+{
+	struct
+	{
+		IMG_UINT32 uiPid;
+		IMG_UINT64 ui64Timeline_UID1;
+		IMG_UINT32 ui32Padding;       /* Align structure size to 8 bytes */
+	} sTimelineDestroy;
+
+	struct
+	{
+		IMG_UINT64 ui64Fence_UID;
+		IMG_UINT32 ui32Padding;       /* Align structure size to 8 bytes */
+	} sFenceDestroy;
+
+	struct
+	{
+		IMG_UINT32 ui32CheckPt_FWAddr;
+	} sSyncCheckPointFree;
+
+	struct
+	{
+		IMG_UINT32 ui32FWAddr;
+	} sSyncFree;
+} RGX_HWPERF_HOST_FREE_DETAIL;
+
+typedef struct
+{
+	RGX_HWPERF_HOST_RESOURCE_TYPE ui32FreeType;
+	RGX_HWPERF_HOST_FREE_DETAIL uFreeDetail;
+	IMG_UINT32 ui32Padding;       /* Align structure size to 8 bytes */
+} RGX_HWPERF_HOST_FREE_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_FREE_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+			  "sizeof(RGX_HWPERF_HOST_FREE_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef struct
+{
+	IMG_UINT64 ui64CRTimestamp;
+	IMG_UINT64 ui64OSTimestamp;
+	IMG_UINT32 ui32ClockSpeed;
+	IMG_UINT32 ui32Padding;       /* Align structure size to 8 bytes */
+} RGX_HWPERF_HOST_CLK_SYNC_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+			  "sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef union
+{
+	struct
+	{
+		IMG_UINT64 ui64NewFence_UID;
+		IMG_UINT64 ui64InFence1_UID;
+		IMG_UINT64 ui64InFence2_UID;
+		IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+		IMG_UINT32 ui32Padding;       /* Align structure size to 8 bytes */
+	} sFenceMerge;
+} RGX_HWPERF_HOST_MODIFY_DETAIL;
+
+typedef struct
+{
+	RGX_HWPERF_HOST_RESOURCE_TYPE ui32ModifyType;
+	RGX_HWPERF_HOST_MODIFY_DETAIL uModifyDetail;
+} RGX_HWPERF_HOST_MODIFY_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_MODIFY_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+			  "sizeof(RGX_HWPERF_HOST_MODIFY_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef enum
+{
+	RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED = 0,
+	RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK,
+	RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_RESPONDING,
+	RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD,
+	RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT,
+
+	RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_LAST
+} RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS;
+
+typedef enum
+{
+	RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED = 0,
+	RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE,
+	RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED,
+	RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING,
+	RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS,
+	RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT,
+	RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED,
+	RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING,
+	RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING,
+
+	RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_LAST
+} RGX_HWPERF_HOST_DEVICE_HEALTH_REASON;
+
+typedef enum
+{
+	RGX_HWPERF_DEV_INFO_EV_HEALTH,
+
+	RGX_HWPERF_DEV_INFO_EV_LAST
+} RGX_HWPERF_DEV_INFO_EV;
+
+typedef union
+{
+	struct
+	{
+		RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS eDeviceHealthStatus;
+		RGX_HWPERF_HOST_DEVICE_HEALTH_REASON eDeviceHealthReason;
+	} sDeviceStatus;
+} RGX_HWPERF_HOST_DEV_INFO_DETAIL;
+
+typedef struct
+{
+	IMG_UINT32 ui32Padding;       /* Align structure size to 8 bytes */
+	RGX_HWPERF_DEV_INFO_EV eEvType;
+	RGX_HWPERF_HOST_DEV_INFO_DETAIL	uDevInfoDetail;
+} RGX_HWPERF_HOST_DEV_INFO_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+			 "sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef enum
+{
+	RGX_HWPERF_INFO_EV_MEM_USAGE,
+
+	RGX_HWPERF_INFO_EV_LAST
+} RGX_HWPERF_INFO_EV;
+
+typedef union
+{
+	struct
+	{
+		IMG_UINT32 ui32TotalMemoryUsage;
+		struct
+		{
+			IMG_UINT32 ui32Pid;
+			IMG_UINT32 ui32KernelMemUsage;
+			IMG_UINT32 ui32GraphicsMemUsage;
+		} sPerProcessUsage[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS];
+	} sMemUsageStats;
+} RGX_HWPERF_HOST_INFO_DETAIL;
+
+typedef struct
+{
+	IMG_UINT32 ui32Padding;       /* Align structure size to 8 bytes */
+	RGX_HWPERF_INFO_EV eEvType;
+	RGX_HWPERF_HOST_INFO_DETAIL uInfoDetail;
+} RGX_HWPERF_HOST_INFO_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+			  "sizeof(RGX_HWPERF_HOST_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef enum
+{
+	RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN = 0,
+	RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END,
+
+	RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_LAST,
+} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE;
+
+typedef enum
+{
+	RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_INVALID = 0,
+	RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_TIMEOUT,
+	RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_PASSED,
+	RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_ERROR,
+
+	RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_LAST,
+} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT;
+
+typedef union
+{
+	struct
+	{
+		IMG_UINT32 ui32TimeoutInMs;
+	} sBegin;
+
+	struct
+	{
+		RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT eResult;
+	} sEnd;
+} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL;
+
+typedef struct
+{
+	IMG_PID uiPID;
+	PVRSRV_FENCE hFence;
+	RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType;
+	RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL uDetail;
+
+} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA;
+
+static_assert((sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+			  "sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef struct
+{
+	IMG_PID uiPID;
+	PVRSRV_TIMELINE hTimeline;
+	IMG_UINT64 ui64SyncPtIndex;
+
+} RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA;
+
+static_assert((sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+			  "sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+/*! This type is a union of packet payload data structures associated with
+ * various FW and Host  events */
+typedef union
+{
+	RGX_HWPERF_FW_DATA             sFW;           /*!< Firmware event packet data */
+	RGX_HWPERF_HW_DATA             sHW;           /*!< Hardware event packet data */
+	RGX_HWPERF_CLKS_CHG_DATA       sCLKSCHG;      /*!< Clock change event packet data */
+	RGX_HWPERF_GPU_STATE_CHG_DATA  sGPUSTATECHG;  /*!< GPU utilisation state change event packet data */
+	RGX_HWPERF_PWR_EST_DATA        sPWREST;       /*!< Power estimate event packet data */
+	RGX_HWPERF_PWR_CHG_DATA        sPWR;          /*!< Power event packet data */
+	RGX_HWPERF_CSW_DATA			   sCSW;		  /*!< Context switch packet data */
+	RGX_HWPERF_UFO_DATA            sUFO;          /*!< UFO data */
+	RGX_HWPERF_FWACT_DATA          sFWACT;        /*!< Firmware activity event packet data */
+	/* */
+	RGX_HWPERF_HOST_ENQ_DATA       sENQ;          /*!< Host ENQ data */
+	RGX_HWPERF_HOST_UFO_DATA       sHUFO;         /*!< Host UFO data */
+	RGX_HWPERF_HOST_ALLOC_DATA     sHALLOC;       /*!< Host Alloc data */
+	RGX_HWPERF_HOST_CLK_SYNC_DATA  sHCLKSYNC;     /*!< Host CLK_SYNC data */
+	RGX_HWPERF_HOST_FREE_DATA      sHFREE;        /*!< Host Free data */
+	RGX_HWPERF_HOST_MODIFY_DATA    sHMOD;         /*!< Host Modify data */
+	RGX_HWPERF_HOST_DEV_INFO_DATA  sHDEVINFO;	  /*!< Host device info data */
+	RGX_HWPERF_HOST_INFO_DATA      sHINFO;        /*!< Host info data */
+	RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA sWAIT;   /*!< Host fence-wait data */
+	RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA sSWTLADV; /*!< Host SW-timeline advance data */
+} RGX_HWPERF_V2_PACKET_DATA_, *RGX_PHWPERF_V2_PACKET_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_DATA_);
+
+#define RGX_HWPERF_GET_PACKET_DATA(_packet_addr) ((RGX_PHWPERF_V2_PACKET_DATA) (((IMG_BYTE *)(_packet_addr)) + sizeof(RGX_HWPERF_V2_PACKET_HDR)))
+
+
+/******************************************************************************
+ * API Types
+ *****************************************************************************/
+
+/*! Counter block IDs for all the hardware blocks with counters.
+ * Directly addressable blocks must have a value between 0..15.
+ * First hex digit represents a group number and the second hex digit
+ * represents the unit within the group. Group 0 is the direct group,
+ * all others are indirect groups.
+ */
+typedef IMG_UINT32 RGX_HWPERF_CNTBLK_ID;
+
+/* Directly addressable counter blocks */
+#define	RGX_CNTBLK_ID_TA			 0x0000U
+#define	RGX_CNTBLK_ID_RASTER		 0x0001U /* Non-cluster grouping cores */
+#define	RGX_CNTBLK_ID_HUB			 0x0002U /* Non-cluster grouping cores */
+#define	RGX_CNTBLK_ID_TORNADO		 0x0003U /* XT cores */
+#define	RGX_CNTBLK_ID_JONES			 0x0004U /* S7 cores */
+#define	RGX_CNTBLK_ID_BF			 0x0005U /* Doppler unit */
+#define	RGX_CNTBLK_ID_BT			 0x0006U /* Doppler unit */
+#define	RGX_CNTBLK_ID_RT			 0x0007U /* Doppler unit */
+#define	RGX_CNTBLK_ID_SH			 0x0008U /* Ray tracing unit */
+
+#define	RGX_CNTBLK_ID_DIRECT_LAST	 0x0009U
+
+/* Indirectly addressable counter blocks */
+#define	RGX_CNTBLK_ID_TPU_MCU0		 0x0010U /* Addressable by Dust */
+#define	RGX_CNTBLK_ID_TPU_MCU1		 0x0011U
+#define	RGX_CNTBLK_ID_TPU_MCU2		 0x0012U
+#define	RGX_CNTBLK_ID_TPU_MCU3		 0x0013U
+#define	RGX_CNTBLK_ID_TPU_MCU4		 0x0014U
+#define	RGX_CNTBLK_ID_TPU_MCU5		 0x0015U
+#define	RGX_CNTBLK_ID_TPU_MCU6		 0x0016U
+#define	RGX_CNTBLK_ID_TPU_MCU7		 0x0017U
+#define	RGX_CNTBLK_ID_TPU_MCU_ALL	 0x4010U
+
+#define	RGX_CNTBLK_ID_USC0			 0x0020U /* Addressable by Cluster */
+#define	RGX_CNTBLK_ID_USC1			 0x0021U
+#define	RGX_CNTBLK_ID_USC2			 0x0022U
+#define	RGX_CNTBLK_ID_USC3			 0x0023U
+#define	RGX_CNTBLK_ID_USC4			 0x0024U
+#define	RGX_CNTBLK_ID_USC5			 0x0025U
+#define	RGX_CNTBLK_ID_USC6			 0x0026U
+#define	RGX_CNTBLK_ID_USC7			 0x0027U
+#define	RGX_CNTBLK_ID_USC8			 0x0028U
+#define	RGX_CNTBLK_ID_USC9			 0x0029U
+#define	RGX_CNTBLK_ID_USC10			 0x002AU
+#define	RGX_CNTBLK_ID_USC11			 0x002BU
+#define	RGX_CNTBLK_ID_USC12			 0x002CU
+#define	RGX_CNTBLK_ID_USC13			 0x002DU
+#define	RGX_CNTBLK_ID_USC14			 0x002EU
+#define	RGX_CNTBLK_ID_USC15			 0x002FU
+#define	RGX_CNTBLK_ID_USC_ALL		 0x4020U
+
+#define	RGX_CNTBLK_ID_TEXAS0		 0x0030U /* Addressable by Phantom in XT, Dust in S7 */
+#define	RGX_CNTBLK_ID_TEXAS1		 0x0031U
+#define	RGX_CNTBLK_ID_TEXAS2		 0x0032U
+#define	RGX_CNTBLK_ID_TEXAS3		 0x0033U
+#define	RGX_CNTBLK_ID_TEXAS4		 0x0034U
+#define	RGX_CNTBLK_ID_TEXAS5		 0x0035U
+#define	RGX_CNTBLK_ID_TEXAS6		 0x0036U
+#define	RGX_CNTBLK_ID_TEXAS7		 0x0037U
+#define	RGX_CNTBLK_ID_TEXAS_ALL		 0x4030U
+
+#define	RGX_CNTBLK_ID_RASTER0		 0x0040U /* Addressable by Phantom, XT only */
+#define	RGX_CNTBLK_ID_RASTER1		 0x0041U
+#define	RGX_CNTBLK_ID_RASTER2		 0x0042U
+#define	RGX_CNTBLK_ID_RASTER3		 0x0043U
+#define	RGX_CNTBLK_ID_RASTER_ALL	 0x4040U
+
+#define	RGX_CNTBLK_ID_BLACKPEARL0	 0x0050U /* Addressable by Phantom, S7, only */
+#define	RGX_CNTBLK_ID_BLACKPEARL1	 0x0051U
+#define	RGX_CNTBLK_ID_BLACKPEARL2	 0x0052U
+#define	RGX_CNTBLK_ID_BLACKPEARL3	 0x0053U
+#define	RGX_CNTBLK_ID_BLACKPEARL_ALL 0x4050U
+
+#define	RGX_CNTBLK_ID_PBE0			 0x0060U /* Addressable by Cluster in S7 and PBE2_IN_XE */
+#define	RGX_CNTBLK_ID_PBE1			 0x0061U
+#define	RGX_CNTBLK_ID_PBE2			 0x0062U
+#define	RGX_CNTBLK_ID_PBE3			 0x0063U
+#define	RGX_CNTBLK_ID_PBE4			 0x0064U
+#define	RGX_CNTBLK_ID_PBE5			 0x0065U
+#define	RGX_CNTBLK_ID_PBE6			 0x0066U
+#define	RGX_CNTBLK_ID_PBE7			 0x0067U
+#define	RGX_CNTBLK_ID_PBE8			 0x0068U
+#define	RGX_CNTBLK_ID_PBE9			 0x0069U
+#define	RGX_CNTBLK_ID_PBE10			 0x006AU
+#define	RGX_CNTBLK_ID_PBE11			 0x006BU
+#define	RGX_CNTBLK_ID_PBE12			 0x006CU
+#define	RGX_CNTBLK_ID_PBE13			 0x006DU
+#define	RGX_CNTBLK_ID_PBE14			 0x006EU
+#define	RGX_CNTBLK_ID_PBE15			 0x006FU
+#define	RGX_CNTBLK_ID_PBE_ALL		 0x4060U
+
+#define	RGX_CNTBLK_ID_BX_TU0		 0x0070U /* Doppler unit, XT only */
+#define	RGX_CNTBLK_ID_BX_TU1		 0x0071U
+#define	RGX_CNTBLK_ID_BX_TU2		 0x0072U
+#define	RGX_CNTBLK_ID_BX_TU3		 0x0073U
+#define	RGX_CNTBLK_ID_BX_TU_ALL		 0x4070U
+
+#define	RGX_CNTBLK_ID_LAST			 0x0074U
+
+#define	RGX_CNTBLK_ID_CUSTOM0		 0x7FF0U
+#define	RGX_CNTBLK_ID_CUSTOM1		 0x7FF1U
+#define	RGX_CNTBLK_ID_CUSTOM2		 0x7FF2U
+#define	RGX_CNTBLK_ID_CUSTOM3		 0x7FF3U
+#define	RGX_CNTBLK_ID_CUSTOM4_FW	 0x7FF4U /* Custom block used for getting statistics held in the FW */
+
+
+/* Masks for the counter block ID*/
+#define RGX_CNTBLK_ID_GROUP_MASK     (0x00F0U)
+#define RGX_CNTBLK_ID_GROUP_SHIFT    (4)
+#define RGX_CNTBLK_ID_UNIT_ALL_MASK  (0x4000U)
+#define RGX_CNTBLK_ID_UNIT_MASK		 (0xfU)
+
+#define RGX_CNTBLK_INDIRECT_COUNT(_class, _n) ((IMG_UINT32)(RGX_CNTBLK_ID_ ## _class ## _n) - (IMG_UINT32)(RGX_CNTBLK_ID_ ## _class ## 0) + 1u)
+
+/*! The number of layout blocks defined with configurable multiplexed
+ * performance counters, hence excludes custom counter blocks.
+ */
+#define RGX_HWPERF_MAX_DEFINED_BLKS  (\
+	(IMG_UINT32)RGX_CNTBLK_ID_DIRECT_LAST    +\
+	RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU,     7)+\
+	RGX_CNTBLK_INDIRECT_COUNT(USC,        15)+\
+	RGX_CNTBLK_INDIRECT_COUNT(TEXAS,       7)+\
+	RGX_CNTBLK_INDIRECT_COUNT(RASTER,      3)+\
+	RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL,  3)+\
+	RGX_CNTBLK_INDIRECT_COUNT(PBE,        15)+\
+	RGX_CNTBLK_INDIRECT_COUNT(BX_TU,       3) )
+
+#define RGX_HWPERF_EVENT_MASK_VALUE(e)      (IMG_UINT64_C(1) << (e))
+
+#define RGX_CUSTOM_FW_CNTRS	\
+		X(TA_LOCAL_FL_SIZE,		0x0,	RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \
+										RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \
+										RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \
+										RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED))	\
+		X(TA_GLOBAL_FL_SIZE,	0x1,	RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \
+										RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \
+										RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \
+										RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED))	\
+		X(3D_LOCAL_FL_SIZE,		0x2,	RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \
+										RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED))	\
+		X(3D_GLOBAL_FL_SIZE,	0x3,	RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \
+										RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED))
+
+/*! Counter IDs for the firmware held statistics */
+typedef enum
+{
+#define X(ctr, id, allow_mask)	RGX_CUSTOM_FW_CNTR_##ctr = id,
+	RGX_CUSTOM_FW_CNTRS
+#undef X
+
+	/* always the last entry in the list */
+	RGX_CUSTOM_FW_CNTR_LAST
+} RGX_HWPERF_CUSTOM_FW_CNTR_ID;
+
+/*! Identifier for each counter in a performance counting module */
+typedef IMG_UINT32 RGX_HWPERF_CNTBLK_COUNTER_ID;
+
+#define	RGX_CNTBLK_COUNTER0_ID 0U
+#define	RGX_CNTBLK_COUNTER1_ID 1U
+#define	RGX_CNTBLK_COUNTER2_ID 2U
+#define	RGX_CNTBLK_COUNTER3_ID 3U
+#define	RGX_CNTBLK_COUNTER4_ID 4U
+#define	RGX_CNTBLK_COUNTER5_ID 5U
+	/* MAX value used in server handling of counter config arrays */
+#define	RGX_CNTBLK_COUNTERS_MAX 6U
+
+
+/* sets all the bits from bit _b1 to _b2, in a IMG_UINT64 type */
+#define MASK_RANGE_IMPL(b1, b2)	((IMG_UINT64)((IMG_UINT64_C(1) << ((IMG_UINT32)(b2)-(IMG_UINT32)(b1) + 1U)) - 1U) << (IMG_UINT32)b1)
+#define MASK_RANGE(R)			MASK_RANGE_IMPL(R##_FIRST_TYPE, R##_LAST_TYPE)
+#define RGX_HWPERF_HOST_EVENT_MASK_VALUE(e) (IMG_UINT32_C(1) << (e))
+
+/*! Mask macros for use with RGXCtrlHWPerf() API.
+ */
+#define RGX_HWPERF_EVENT_MASK_NONE          (IMG_UINT64_C(0x0000000000000000))
+#define RGX_HWPERF_EVENT_MASK_ALL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+
+/*! HWPerf Firmware event masks
+ * Next macro covers all FW Start/End/Debug (SED) events.
+ */
+#define RGX_HWPERF_EVENT_MASK_FW_SED    (MASK_RANGE(RGX_HWPERF_FW_EVENT_RANGE))
+
+#define RGX_HWPERF_EVENT_MASK_FW_UFO    (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO))
+#define RGX_HWPERF_EVENT_MASK_FW_CSW    (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_START) |\
+                                          RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_FINISHED))
+#define RGX_HWPERF_EVENT_MASK_ALL_FW    (RGX_HWPERF_EVENT_MASK_FW_SED |\
+                                          RGX_HWPERF_EVENT_MASK_FW_UFO |\
+                                          RGX_HWPERF_EVENT_MASK_FW_CSW)
+
+#define RGX_HWPERF_EVENT_MASK_HW_PERIODIC   (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PERIODIC))
+#define RGX_HWPERF_EVENT_MASK_HW_KICKFINISH ((MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE0) |\
+                                               MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE1)) &\
+                                              ~(RGX_HWPERF_EVENT_MASK_HW_PERIODIC))
+
+#define RGX_HWPERF_EVENT_MASK_ALL_HW        (RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |\
+                                              RGX_HWPERF_EVENT_MASK_HW_PERIODIC)
+
+#define RGX_HWPERF_EVENT_MASK_ALL_PWR_EST   (MASK_RANGE(RGX_HWPERF_PWR_EST_RANGE))
+
+#define RGX_HWPERF_EVENT_MASK_ALL_PWR       (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG) |\
+                                              RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_GPU_STATE_CHG) |\
+                                              RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG))
+
+/*! HWPerf Host event masks
+ */
+#define RGX_HWPERF_EVENT_MASK_HOST_WORK_ENQ  (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_ENQ))
+#define RGX_HWPERF_EVENT_MASK_HOST_ALL_UFO   (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_UFO))
+#define RGX_HWPERF_EVENT_MASK_HOST_ALL_PWR   (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_CLK_SYNC))
+
+
+/*! Type used in the RGX API RGXConfigureAndEnableHWPerfCounters() */
+ typedef struct
+{
+	/*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */
+	IMG_UINT16 ui16BlockID;
+
+	/*! 4 or 6 LSBs used to select counters to configure in this block. */
+	IMG_UINT8  ui8CounterSelect;
+
+	/*! 4 or 6 LSBs used as MODE bits for the counters in the group. */
+	IMG_UINT8  ui8Mode;
+
+	/*! 5 or 6 LSBs used as the GROUP_SELECT value for the counter. */
+	IMG_UINT8  aui8GroupSelect[RGX_CNTBLK_COUNTERS_MAX];
+
+	/*! 16 LSBs used as the BIT_SELECT value for the counter. */
+	IMG_UINT16 aui16BitSelect[RGX_CNTBLK_COUNTERS_MAX];
+
+	/*! 14 LSBs used as the BATCH_MAX value for the counter. */
+	IMG_UINT32 aui32BatchMax[RGX_CNTBLK_COUNTERS_MAX];
+
+	/*! 14 LSBs used as the BATCH_MIN value for the counter. */
+	IMG_UINT32 aui32BatchMin[RGX_CNTBLK_COUNTERS_MAX];
+} UNCACHED_ALIGN RGX_HWPERF_CONFIG_CNTBLK;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CONFIG_CNTBLK);
+
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* RGX_HWPERF_H_ */
+
+/******************************************************************************
+ End of file
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_hwperf_table.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_hwperf_table.c
new file mode 100644
index 0000000..16f01175
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_hwperf_table.c
@@ -0,0 +1,614 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HW Performance counter table
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX HW Performance counters table
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "img_defs.h"
+#include "rgx_fwif_hwperf.h"
+#if defined(__KERNEL__)
+#include "rgxdefs_km.h"
+#else
+#include "rgxdefs.h"
+#endif
+#include "rgx_hwperf_table.h"
+
+/* Includes needed for PVRSRVKM (Server) context */
+#	include "rgx_bvnc_defs_km.h"
+#	if defined(__KERNEL__)
+#		include "rgxdevice.h"
+#	endif
+
+/* Shared compile-time context ASSERT macro */
+#if defined(RGX_FIRMWARE)
+#	include "rgxfw_utils.h"
+/*  firmware context */
+#	define DBG_ASSERT(_c) RGXFW_ASSERT((_c))
+#else
+#	include "pvr_debug.h"
+/*  host client/server context */
+#	define DBG_ASSERT(_c) PVR_ASSERT((_c))
+#endif
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered()
+
+ Referenced in gasCntBlkTypeModel[] table below and only called from
+ RGX_FIRMWARE run-time context. Therefore compile time configuration is used.
+ *****************************************************************************/
+
+#if defined(RGX_FIRMWARE) && defined(RGX_FEATURE_PERFBUS)
+#	include "rgxfw_pow.h"
+#	include "rgxfw_utils.h"
+
+static bool rgxfw_hwperf_pow_st_direct(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId)
+{
+	PVR_UNREFERENCED_PARAMETER(eBlkType);
+	PVR_UNREFERENCED_PARAMETER(ui8UnitId);
+
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+	/* S7XT: JONES */
+	return (eBlkType == RGX_CNTBLK_ID_JONES);
+#elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+	/* S6XT: TA, TORNADO */
+	return true;
+#else
+	/* S6  : TA, HUB, RASTER (RASCAL) */
+	return (gsPowCtl.ePowState & RGXFW_POW_ST_RD_ON) != 0U;
+#endif
+}
+
+/* Only use conditional compilation when counter blocks appear in different
+ * islands for different Rogue families.
+ */
+static bool rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId)
+{
+	IMG_UINT32 ui32NumDustsEnabled = rgxfw_pow_get_enabled_dusts_num();
+
+	if (((gsPowCtl.ePowState & RGXFW_POW_ST_RD_ON) != 0U) &&
+			(ui32NumDustsEnabled > 0U))
+	{
+#if defined(RGX_FEATURE_DYNAMIC_DUST_POWER)
+		IMG_UINT32 ui32NumUscEnabled = ui32NumDustsEnabled*2U;
+
+		switch (eBlkType)
+		{
+		case RGX_CNTBLK_ID_TPU_MCU0:                   /* S6 and S6XT */
+#if defined (RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+		case RGX_CNTBLK_ID_TEXAS0:                     /* S7 */
+#endif
+			if (ui8UnitId >= ui32NumDustsEnabled)
+			{
+				return false;
+			}
+			break;
+		case RGX_CNTBLK_ID_USC0:                       /* S6, S6XT, S7 */
+		case RGX_CNTBLK_ID_PBE0:                       /* S7, PBE2_IN_XE */
+			/* Handle single cluster cores */
+			if (ui8UnitId >= ((ui32NumUscEnabled > RGX_FEATURE_NUM_CLUSTERS) ? RGX_FEATURE_NUM_CLUSTERS : ui32NumUscEnabled))
+			{
+				return false;
+			}
+			break;
+		case RGX_CNTBLK_ID_BLACKPEARL0:                /* S7 */
+		case RGX_CNTBLK_ID_RASTER0:                    /* S6XT */
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+		case RGX_CNTBLK_ID_TEXAS0:                     /* S6XT */
+#endif
+			if (ui8UnitId >= (RGX_REQ_NUM_PHANTOMS(ui32NumUscEnabled)))
+			{
+				return false;
+			}
+			break;
+		default:
+			RGXFW_ASSERT(false);  /* should never get here, table error */
+			break;
+		}
+#else
+		/* Always true, no fused DUSTs, all powered so do not check unit */
+		PVR_UNREFERENCED_PARAMETER(eBlkType);
+		PVR_UNREFERENCED_PARAMETER(ui8UnitId);
+#endif
+	}
+	else
+	{
+		return false;
+	}
+	return true;
+}
+
+#else /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */
+
+# define rgxfw_hwperf_pow_st_direct   ((void*)NULL)
+# define rgxfw_hwperf_pow_st_indirect ((void*)NULL)
+# define rgxfw_hwperf_pow_st_gandalf  ((void*)NULL)
+
+#endif /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */
+
+# define rgxfw_hwperf_pow_st_gandalf  ((void*)NULL)
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() end
+ *****************************************************************************/
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() start
+
+ Referenced in gasCntBlkTypeModel[] table below and called from all build
+ contexts:
+ RGX_FIRMWARE, PVRSRVCTL (UM) and PVRSRVKM (Server).
+
+ Therefore each function has two implementations, one for compile time and one
+ run time configuration depending on the context. The functions will inform the
+ caller whether this block is valid for this particular RGX device. Other
+ run-time dependent data is returned in psRtInfo for the caller to use.
+ *****************************************************************************/
+
+/* Used for block types: USC */
+static IMG_BOOL rgx_hwperf_blk_present_perfbus(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+	DBG_ASSERT(psBlkTypeDesc != NULL);
+	DBG_ASSERT(psRtInfo != NULL);
+	DBG_ASSERT(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_USC0);
+
+#if defined(__KERNEL__) /* Server context */
+	PVR_ASSERT(pvDev_km != NULL);
+	{
+		PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+		if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS))
+		{
+			psRtInfo->uiNumUnits = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) ? RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) : 0;
+			return IMG_TRUE;
+		}
+	}
+	PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+#else /* FW context */
+	PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if defined(RGX_FEATURE_PERFBUS)
+	psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+	return IMG_TRUE;
+# else
+	PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+	PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+	return IMG_FALSE;
+}
+
+/* Used for block types: Direct RASTERISATION, HUB */
+static IMG_BOOL rgx_hwperf_blk_present_not_clustergrouping(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+	DBG_ASSERT(psBlkTypeDesc != NULL);
+	DBG_ASSERT(psRtInfo != NULL);
+	DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_RASTER) ||
+			(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_HUB));
+
+#if defined(__KERNEL__) /* Server context */
+	PVR_ASSERT(pvDev_km != NULL);
+	{
+		PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+		if ((!RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) &&
+				(RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS)))
+		{
+			psRtInfo->uiNumUnits = 1;
+			return IMG_TRUE;
+		}
+	}
+#else /* FW context */
+	PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS)
+	psRtInfo->uiNumUnits = 1;
+	PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+	return IMG_TRUE;
+# else
+	PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+	PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+	return IMG_FALSE;
+}
+
+/* Used for block types: BF, BT, RT, SH, BX_TU */
+static IMG_BOOL rgx_hwperf_blk_present_raytracing(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+	DBG_ASSERT(psBlkTypeDesc != NULL);
+	DBG_ASSERT(psRtInfo != NULL);
+	DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BF) ||
+			(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BT) ||
+			(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_RT) ||
+			(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_SH) ||
+			(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BX_TU0));
+
+	PVR_UNREFERENCED_PARAMETER(pvDev_km);
+	PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+	PVR_UNREFERENCED_PARAMETER(psRtInfo);
+
+	return IMG_FALSE;
+}
+
+#if defined(__KERNEL__) /* Server context */
+IMG_UINT32 rgx_units_indirect_by_phantom(PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg)
+{
+	/* Run-time math for RGX_HWPERF_INDIRECT_BY_PHANTOM */
+	return ((psFeatCfg->ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK) == 0) ? 1
+			: (psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX]+3)/4;
+}
+
+IMG_UINT32 rgx_units_phantom_indirect_by_dust(PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg)
+{
+	/* Run-time math for RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST */
+	return MAX((psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX]>>1),1);
+}
+
+IMG_UINT32 rgx_units_phantom_indirect_by_cluster(PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg)
+{
+	/* Run-time math for RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER */
+	return psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX];
+}
+#endif /* defined(__KERNEL__) */
+
+/* Used for block types: TORNADO, TEXAS, Indirect RASTERISATION */
+static IMG_BOOL rgx_hwperf_blk_present_xttop(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+	DBG_ASSERT(psBlkTypeDesc != NULL);
+	DBG_ASSERT(psRtInfo != NULL);
+	DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TORNADO) ||
+			(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) ||
+			(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_RASTER0));
+
+#if defined(__KERNEL__) /* Server context */
+	PVR_ASSERT(pvDev_km != NULL);
+	{
+		PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+		if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE))
+		{
+			psRtInfo->uiNumUnits =
+					(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TORNADO) ? 1
+							: rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg); // Texas, Ind. Raster
+			return IMG_TRUE;
+		}
+	}
+#else /* FW context */
+	PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS)
+	psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+	return IMG_TRUE;
+# else
+	PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+	PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+	return IMG_FALSE;
+}
+
+/* Used for block types: JONES, TPU_MCU, TEXAS, BLACKPERL, PBE */
+static IMG_BOOL rgx_hwperf_blk_present_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+	DBG_ASSERT(psBlkTypeDesc != NULL);
+	DBG_ASSERT(psRtInfo != NULL);
+	DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_JONES) ||
+			(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) ||
+			(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) ||
+			(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BLACKPEARL0) ||
+			(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_PBE0));
+
+#if defined(__KERNEL__) /* Server context */
+	PVR_ASSERT(pvDev_km != NULL);
+	{
+		PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+		if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+		{
+			if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0)
+			{
+				psRtInfo->uiNumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg);
+				return IMG_TRUE;
+			}
+			else if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TEXAS0)
+			{
+				psRtInfo->uiNumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg);
+				return IMG_TRUE;
+			}
+			else if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BLACKPEARL0)
+			{
+				psRtInfo->uiNumUnits = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg);
+				return IMG_TRUE;
+			}
+			else if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_PBE0)
+			{
+				psRtInfo->uiNumUnits = rgx_units_phantom_indirect_by_cluster(&psDevInfo->sDevFeatureCfg);
+				return IMG_TRUE;
+			}
+			else if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_JONES)
+			{
+				psRtInfo->uiNumUnits = 1;
+				return IMG_TRUE;
+			}
+		}
+	}
+#else /* FW context */
+	PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+	psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+	return IMG_TRUE;
+# else
+	PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+	PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+	return IMG_FALSE;
+}
+
+/* Used for block types: TA, TPU_MCU. Also PBE when PBE2_IN_XE is present */
+static IMG_BOOL rgx_hwperf_blk_present_not_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+	DBG_ASSERT(psBlkTypeDesc != NULL);
+	DBG_ASSERT(psRtInfo != NULL);
+	DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TA) ||
+			(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) ||
+			(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_PBE0));
+
+#if defined(__KERNEL__) /* Server context */
+	PVR_ASSERT(pvDev_km != NULL);
+	{
+		PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+		if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE) &&
+				RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS))
+		{
+			if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE))
+			{
+				switch (psBlkTypeDesc->uiCntBlkIdBase)
+				{
+					case RGX_CNTBLK_ID_TA:	psRtInfo->uiNumUnits = 1; break;
+					case RGX_CNTBLK_ID_PBE0:
+						psRtInfo->uiNumUnits = rgx_units_phantom_indirect_by_cluster(&psDevInfo->sDevFeatureCfg); // PBE0
+						break;
+					default:
+						psRtInfo->uiNumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg); // TPU_MCU0
+				}
+			}
+			else
+			{
+				switch (psBlkTypeDesc->uiCntBlkIdBase)
+				{
+					case RGX_CNTBLK_ID_TA:	psRtInfo->uiNumUnits = 1; break;
+					case RGX_CNTBLK_ID_PBE0:
+						/* PBE counters are not present on this config */
+						return IMG_FALSE;
+					default:
+						psRtInfo->uiNumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg); // TPU_MCU0
+				}
+			}
+			return IMG_TRUE;
+		}
+	}
+#else /* FW context */
+	PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS)
+#  if !defined(RGX_FEATURE_PBE2_IN_XE)
+	if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_PBE0)
+	{
+		/* No support for PBE counters without PBE2_IN_XE */
+		return IMG_FALSE;
+	}
+#  endif
+	psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+	return IMG_TRUE;
+# else
+	PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+	PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+	return IMG_FALSE;
+}
+
+#if !defined(__KERNEL__) /* Firmware or User-mode context */
+static IMG_BOOL rgx_hwperf_blk_present_false(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+	PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+	PVR_UNREFERENCED_PARAMETER(pvDev_km);
+	PVR_UNREFERENCED_PARAMETER(psRtInfo);
+
+	/* Some functions not used on some BVNCs, silence compiler warnings */
+	PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_perfbus);
+	PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_not_clustergrouping);
+	PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_raytracing);
+	PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_xttop);
+	PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_s7top);
+	PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_not_s7top);
+
+	return IMG_FALSE;
+}
+
+/* Used to instantiate a null row in the block type model table below where the
+ * block is not supported for a given build BVNC in firmware/user mode context.
+ * This is needed as the blockid to block type lookup uses the table as well
+ * and clients may try to access blocks not in the hardware. */
+#define RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(_blkid) {_blkid, 0, 0, 0, 0, 0, 0, 0, 0, #_blkid, NULL, rgx_hwperf_blk_present_false}
+
+#endif
+
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() end
+ *****************************************************************************/
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] table
+
+ This table holds the entries for the performance counter block type model.
+ Where the block is not present on an RGX device in question the
+ pfnIsBlkPresent() returns false, if valid and present it returns true.
+ Columns in the table with a ** indicate the value is a default and the
+ value returned in RGX_HWPERF_CNTBLK_RT_INFO when calling pfnIsBlkPresent()
+ should be used at runtime by the caller. These columns are only valid for
+ compile time BVNC configured contexts.
+
+ Order of table rows must match order of counter block IDs in the enumeration
+ RGX_HWPERF_CNTBLK_ID.
+ *****************************************************************************/
+
+static const RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] =
+{
+		/*   uiCntBlkIdBase,         iIndirectReg,                  uiPerfReg,                  uiSelect0BaseReg,                    uiCounter0BaseReg                   uiNumCounters,  uiNumUnits**,                  uiSelectRegModeShift, uiSelectRegOffsetShift,            pfnIsBlkPowered               pfnIsBlkPresent
+		 *                                                                                                                                                                                                                                                   pszBlockNameComment,  */
+		/*RGX_CNTBLK_ID_TA*/
+#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+		{RGX_CNTBLK_ID_TA,       0, /* direct */                RGX_CR_TA_PERF,             RGX_CR_TA_PERF_SELECT0,              RGX_CR_TA_PERF_COUNTER_0,             4,              1,                              21,                  3,  "RGX_CR_TA_PERF",              rgxfw_hwperf_pow_st_direct,   rgx_hwperf_blk_present_not_s7top },
+#else
+		RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TA),
+#endif
+
+		/*RGX_CNTBLK_ID_RASTER*/
+#if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+		{RGX_CNTBLK_ID_RASTER,   0, /* direct */                RGX_CR_RASTERISATION_PERF,  RGX_CR_RASTERISATION_PERF_SELECT0,   RGX_CR_RASTERISATION_PERF_COUNTER_0,  4,              1,                              21,                  3,  "RGX_CR_RASTERISATION_PERF",   rgxfw_hwperf_pow_st_direct,   rgx_hwperf_blk_present_not_clustergrouping },
+#else
+		RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RASTER),
+#endif
+
+		/*RGX_CNTBLK_ID_HUB*/
+#if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+		{RGX_CNTBLK_ID_HUB,      0, /* direct */                RGX_CR_HUB_BIFPMCACHE_PERF, RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0,  RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0, 4,              1,                              21,                  3,  "RGX_CR_HUB_BIFPMCACHE_PERF",  rgxfw_hwperf_pow_st_direct,   rgx_hwperf_blk_present_not_clustergrouping },
+#else
+		RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_HUB),
+#endif
+
+		/*RGX_CNTBLK_ID_TORNADO*/
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+		{RGX_CNTBLK_ID_TORNADO,  0, /* direct */                RGX_CR_TORNADO_PERF,        RGX_CR_TORNADO_PERF_SELECT0,         RGX_CR_TORNADO_PERF_COUNTER_0,        4,              1,                              21,                  4,  "RGX_CR_TORNADO_PERF",         rgxfw_hwperf_pow_st_direct,   rgx_hwperf_blk_present_xttop },
+#else
+		RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TORNADO),
+#endif
+
+		/*RGX_CNTBLK_ID_JONES*/
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+		{RGX_CNTBLK_ID_JONES,   0, /* direct */                 RGX_CR_JONES_PERF,          RGX_CR_JONES_PERF_SELECT0,           RGX_CR_JONES_PERF_COUNTER_0,          4,              1,                              21,                  3,  "RGX_CR_JONES_PERF",           rgxfw_hwperf_pow_st_direct,    rgx_hwperf_blk_present_s7top },
+#else
+		RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_JONES),
+#endif
+
+		/*RGX_CNTBLK_ID_BF RGX_CNTBLK_ID_BT RGX_CNTBLK_ID_RT RGX_CNTBLK_ID_SH*/
+#if defined(__KERNEL__)
+		{RGX_CNTBLK_ID_BF,      0, /* direct */                 DPX_CR_BF_PERF,             DPX_CR_BF_PERF_SELECT0,              DPX_CR_BF_PERF_COUNTER_0,             4,              1,                              21,                  3,  "RGX_CR_BF_PERF",              rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing },
+		{RGX_CNTBLK_ID_BT,      0, /* direct */                 DPX_CR_BT_PERF,             DPX_CR_BT_PERF_SELECT0,              DPX_CR_BT_PERF_COUNTER_0,             4,              1,                              21,                  3,  "RGX_CR_BT_PERF",              rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing },
+		{RGX_CNTBLK_ID_RT,      0, /* direct */                 DPX_CR_RT_PERF,             DPX_CR_RT_PERF_SELECT0,              DPX_CR_RT_PERF_COUNTER_0,             4,              1,                              21,                  3,  "RGX_CR_RT_PERF",              rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing },
+		{RGX_CNTBLK_ID_SH,      0, /* direct */                 RGX_CR_SH_PERF,             RGX_CR_SH_PERF_SELECT0,              RGX_CR_SH_PERF_COUNTER_0,             4,              1,                              21,                  3,  "RGX_CR_SH_PERF",              rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing },
+#else
+		RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BF),
+		RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BT),
+		RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RT),
+		RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_SH),
+#endif
+
+		/*RGX_CNTBLK_ID_TPU_MCU0*/
+#if defined(RGX_FEATURE_PERFBUS) && !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+		{RGX_CNTBLK_ID_TPU_MCU0, RGX_CR_TPU_MCU_L0_PERF_INDIRECT, RGX_CR_TPU_MCU_L0_PERF,   RGX_CR_TPU_MCU_L0_PERF_SELECT0,     RGX_CR_TPU_MCU_L0_PERF_COUNTER_0,     4,              RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST,    21,          3,  "RGX_CR_TPU_MCU_L0_PERF",      rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_not_s7top },
+#else
+		RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TPU_MCU0),
+#endif
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+		{RGX_CNTBLK_ID_TPU_MCU0, RGX_CR_TPU_PERF_INDIRECT,      RGX_CR_TPU_MCU_L0_PERF,     RGX_CR_TPU_MCU_L0_PERF_SELECT0,     RGX_CR_TPU_MCU_L0_PERF_COUNTER_0,     4,              RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST,    21,          3,  "RGX_CR_TPU_MCU_L0_PERF",      rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top },
+#else
+		RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TPU_MCU0),
+#endif
+
+		/*RGX_CNTBLK_ID_USC0*/
+#if defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+		{RGX_CNTBLK_ID_USC0,    RGX_CR_USC_PERF_INDIRECT,       RGX_CR_USC_PERF,            RGX_CR_USC_PERF_SELECT0,            RGX_CR_USC_PERF_COUNTER_0,            4,              RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER, 21,          3,  "RGX_CR_USC_PERF",             rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_perfbus },
+#else
+		RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_USC0),
+#endif
+
+		/*RGX_CNTBLK_ID_TEXAS0*/
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+		{RGX_CNTBLK_ID_TEXAS0,  RGX_CR_TEXAS3_PERF_INDIRECT,    RGX_CR_TEXAS_PERF,          RGX_CR_TEXAS_PERF_SELECT0,          RGX_CR_TEXAS_PERF_COUNTER_0,          6,              RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST,    31,          3,  "RGX_CR_TEXAS_PERF",           rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top },
+#else
+		RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TEXAS0),
+#endif
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+		{RGX_CNTBLK_ID_TEXAS0,  RGX_CR_TEXAS_PERF_INDIRECT,     RGX_CR_TEXAS_PERF,          RGX_CR_TEXAS_PERF_SELECT0,          RGX_CR_TEXAS_PERF_COUNTER_0,          6,              RGX_HWPERF_INDIRECT_BY_PHANTOM,         31,          3,  "RGX_CR_TEXAS_PERF",           rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_xttop },
+#else
+		RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TEXAS0),
+#endif
+
+		/*RGX_CNTBLK_ID_RASTER0*/
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+		{RGX_CNTBLK_ID_RASTER0, RGX_CR_RASTERISATION_PERF_INDIRECT, RGX_CR_RASTERISATION_PERF, RGX_CR_RASTERISATION_PERF_SELECT0, RGX_CR_RASTERISATION_PERF_COUNTER_0,  4,            RGX_HWPERF_INDIRECT_BY_PHANTOM,         21,          3,  "RGX_CR_RASTERISATION_PERF",   rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_xttop },
+#else
+		RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RASTER0),
+#endif
+
+		/*RGX_CNTBLK_ID_BLACKPEARL0*/
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+		{RGX_CNTBLK_ID_BLACKPEARL0, RGX_CR_BLACKPEARL_PERF_INDIRECT, RGX_CR_BLACKPEARL_PERF, RGX_CR_BLACKPEARL_PERF_SELECT0,    RGX_CR_BLACKPEARL_PERF_COUNTER_0,     6,              RGX_HWPERF_INDIRECT_BY_PHANTOM,         21,          3,  "RGX_CR_BLACKPEARL_PERF",      rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top },
+#else
+		RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BLACKPEARL0),
+#endif
+
+		/*RGX_CNTBLK_ID_PBE0*/
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+		{RGX_CNTBLK_ID_PBE0,    RGX_CR_PBE_PERF_INDIRECT, RGX_CR_PBE_PERF,                  RGX_CR_PBE_PERF_SELECT0,            RGX_CR_PBE_PERF_COUNTER_0,            4,              RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER, 21,          3,  "RGX_CR_PBE_PERF",             rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top },
+#else
+		RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_PBE0),
+#endif
+#if defined(RGX_FEATURE_PBE2_IN_XE) || defined(__KERNEL__)
+		{RGX_CNTBLK_ID_PBE0,    RGX_CR_PBE_PERF_INDIRECT, RGX_CR_PBE_PERF,                  RGX_CR_PBE_PERF_SELECT0,            RGX_CR_PBE_PERF_COUNTER_0,            4,              RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER, 21,          3,  "RGX_CR_PBE_PERF",             rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_not_s7top },
+#else
+		RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_PBE0),
+#endif
+
+		/*RGX_CNTBLK_ID_BX_TU0*/
+#if defined(__KERNEL__)
+		{RGX_CNTBLK_ID_BX_TU0, RGX_CR_BX_TU_PERF_INDIRECT,       DPX_CR_BX_TU_PERF,           DPX_CR_BX_TU_PERF_SELECT0,        DPX_CR_BX_TU_PERF_COUNTER_0,          4,              RGX_HWPERF_DOPPLER_BX_TU_BLKS,          21,          3,  "RGX_CR_BX_TU_PERF",           rgxfw_hwperf_pow_st_gandalf,  rgx_hwperf_blk_present_raytracing },
+#else
+		RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BX_TU0),
+#endif
+};
+
+
+IMG_INTERNAL IMG_UINT32
+RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel)
+{
+	*ppsModel = gasCntBlkTypeModel;
+	return ARRAY_SIZE(gasCntBlkTypeModel);
+}
+
+/******************************************************************************
+ End of file (rgx_hwperf_table.c)
+ ******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_hwperf_table.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_hwperf_table.h
new file mode 100644
index 0000000..b7670b0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_hwperf_table.h
@@ -0,0 +1,119 @@
+/*************************************************************************/ /*!
+@File
+@Title          HWPerf counter table header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Utility functions used internally for HWPerf data retrieval
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGX_HWPERF_TABLE_H
+#define RGX_HWPERF_TABLE_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "rgx_fwif_hwperf.h"
+#if defined(__KERNEL__)
+#include "rgxdevice.h"
+#endif
+/*****************************************************************************/
+
+/* Forward declaration */
+typedef struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_ RGXFW_HWPERF_CNTBLK_TYPE_MODEL;
+
+/* Function pointer type for functions to check dynamic power state of
+ * counter block instance. Used only in firmware. */
+typedef bool (*PFN_RGXFW_HWPERF_CNTBLK_POWERED)(
+		RGX_HWPERF_CNTBLK_ID eBlkType,
+		IMG_UINT8 ui8UnitId);
+
+/* Counter block run-time info */
+typedef struct
+{
+	IMG_UINT32 uiNumUnits;             /* Number of instances of this block type in the core */
+} RGX_HWPERF_CNTBLK_RT_INFO;
+
+/* Function pointer type for functions to check block is valid and present
+ * on that RGX Device at runtime. It may have compile logic or run-time
+ * logic depending on where the code executes: server, srvinit or firmware.
+ * Values in the psRtInfo output parameter are only valid if true returned.
+ */
+typedef IMG_BOOL (*PFN_RGXFW_HWPERF_CNTBLK_PRESENT)(
+		const struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_* psBlkTypeDesc,
+		void *pvDev_km,
+		RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo);
+
+/* This structure encodes properties of a type of performance counter block.
+ * The structure is sometimes referred to as a block type descriptor. These
+ * properties contained in this structure represent the columns in the block
+ * type model table variable below. These values vary depending on the build
+ * BVNC and core type.
+ * Each direct block has a unique type descriptor and each indirect group has
+ * a type descriptor.
+ */
+struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_
+{
+	/* Could use RGXFW_ALIGN_DCACHEL here but then we would waste 40% of the cache line? */
+	IMG_UINT32 uiCntBlkIdBase;         /* The starting block id for this block type */
+	IMG_UINT32 uiIndirectReg;          /* 0 if direct type otherwise the indirect control register to select indirect unit */
+	IMG_UINT32 uiPerfReg;              /* RGX_CR_*_PERF register for this block type */
+	IMG_UINT32 uiSelect0BaseReg;       /* RGX_CR_*_PERF_SELECT0 register for this block type */
+	IMG_UINT32 uiCounter0BaseReg;      /* RGX_CR_*_PERF_COUNTER_0 register for this block type */
+	IMG_UINT8  uiNumCounters;          /* Number of counters in this block type */
+	IMG_UINT8  uiNumUnits;             /* Number of instances of this block type in the core */
+	IMG_UINT8  uiSelectRegModeShift;   /* Mode field shift value of select registers */
+	IMG_UINT8  uiSelectRegOffsetShift; /* Interval between select registers, either 8 bytes or 16, hence << 3 or << 4 */
+	IMG_CHAR   pszBlockNameComment[30];              /* Name of the PERF register. Used while dumping the perf counters to pdumps */
+	PFN_RGXFW_HWPERF_CNTBLK_POWERED pfnIsBlkPowered; /* A function to determine dynamic power state for the block type */
+	PFN_RGXFW_HWPERF_CNTBLK_PRESENT pfnIsBlkPresent; /* A function to determine presence on RGX Device at run-time */
+};
+
+/*****************************************************************************/
+
+#if defined(__KERNEL__) /* Server context */
+IMG_UINT32 rgx_units_indirect_by_phantom(PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg);
+IMG_UINT32 rgx_units_phantom_indirect_by_dust(PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg);
+IMG_UINT32 rgx_units_phantom_indirect_by_cluster(PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg);
+#endif /* defined(__KERNEL__) */
+
+IMG_INTERNAL IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel);
+
+#endif /* RGX_HWPERF_TABLE_H */
+
+/******************************************************************************
+ End of file (rgx_hwperf_table.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_memallocflags.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_memallocflags.h
new file mode 100644
index 0000000..34eb6e4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_memallocflags.h
@@ -0,0 +1,49 @@
+/**************************************************************************/ /*!
+@File
+@Title          RGX memory allocation flags
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGX_MEMALLOCFLAGS_H
+#define RGX_MEMALLOCFLAGS_H
+
+#define PMMETA_PROTECT          (1 << 0)      /* Memory that only the PM and Meta can access */
+#define FIRMWARE_CACHED         (1 << 1)      /* Memory that is cached in META/MIPS */
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_meta.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_meta.h
new file mode 100644
index 0000000..230c298
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_meta.h
@@ -0,0 +1,392 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX META definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX META helper definitions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (RGX_META_H)
+#define RGX_META_H
+
+
+/***** The META HW register definitions in the file are updated manually *****/
+
+
+#include "img_defs.h"
+#include "km/rgxdefs_km.h"
+
+
+/************************************************************************
+* META registers and MACROS
+************************************************************************/
+#define	META_CR_CTRLREG_BASE(T)					(0x04800000U + 0x1000U*(T))
+
+#define META_CR_TXPRIVEXT						(0x048000E8)
+#define META_CR_TXPRIVEXT_MINIM_EN				(IMG_UINT32_C(0x1) << 7)
+
+#define META_CR_SYSC_JTAG_THREAD				(0x04830030)
+#define META_CR_SYSC_JTAG_THREAD_PRIV_EN		(0x00000004)
+
+#define META_CR_PERF_COUNT0						(0x0480FFE0)
+#define META_CR_PERF_COUNT1						(0x0480FFE8)
+#define META_CR_PERF_COUNT_CTRL_SHIFT			(28)
+#define META_CR_PERF_COUNT_CTRL_MASK			(0xF0000000)
+#define META_CR_PERF_COUNT_CTRL_DCACHEHITS		(IMG_UINT32_C(0x8) << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICACHEHITS		(IMG_UINT32_C(0x9) << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICACHEMISS		(IMG_UINT32_C(0xA) << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICORE			(IMG_UINT32_C(0xD) << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_THR_SHIFT			(24)
+#define META_CR_PERF_COUNT_THR_MASK				(0x0F000000)
+#define META_CR_PERF_COUNT_THR_0				(IMG_UINT32_C(0x1) << META_CR_PERF_COUNT_THR_SHIFT)
+#define META_CR_PERF_COUNT_THR_1				(IMG_UINT32_C(0x2) << META_CR_PERF_COUNT_THR_1)
+
+#define META_CR_TxVECINT_BHALT					(0x04820500)
+#define META_CR_PERF_ICORE0						(0x0480FFD0)
+#define META_CR_PERF_ICORE1						(0x0480FFD8)
+#define META_CR_PERF_ICORE_DCACHEMISS			(0x8)
+
+#define META_CR_PERF_COUNT(CTRL, THR)			((META_CR_PERF_COUNT_CTRL_##CTRL << META_CR_PERF_COUNT_CTRL_SHIFT) | \
+												 (THR << META_CR_PERF_COUNT_THR_SHIFT))
+
+#define	META_CR_TXUXXRXDT_OFFSET				(META_CR_CTRLREG_BASE(0U) + 0x0000FFF0U)
+#define	META_CR_TXUXXRXRQ_OFFSET				(META_CR_CTRLREG_BASE(0U) + 0x0000FFF8U)
+
+#define META_CR_TXUXXRXRQ_DREADY_BIT			(0x80000000U)	/* Poll for done */
+#define META_CR_TXUXXRXRQ_RDnWR_BIT  			(0x00010000U)	/* Set for read  */
+#define META_CR_TXUXXRXRQ_TX_S       			(12)
+#define META_CR_TXUXXRXRQ_RX_S       			(4)
+#define META_CR_TXUXXRXRQ_UXX_S      			(0)
+
+#define META_CR_TXUIN_ID						(0x0)			/* Internal ctrl regs */
+#define META_CR_TXUD0_ID						(0x1)			/* Data unit regs */
+#define META_CR_TXUD1_ID						(0x2)			/* Data unit regs */
+#define META_CR_TXUA0_ID						(0x3)			/* Address unit regs */
+#define META_CR_TXUA1_ID						(0x4)			/* Address unit regs */
+#define META_CR_TXUPC_ID						(0x5)			/* PC registers */
+
+/* Macros to calculate register access values */
+#define META_CR_CORE_REG(Thr, RegNum, Unit)	(((IMG_UINT32)(Thr)		<< META_CR_TXUXXRXRQ_TX_S ) | \
+											 ((IMG_UINT32)(RegNum)	<< META_CR_TXUXXRXRQ_RX_S ) | \
+											 ((IMG_UINT32)(Unit)	<< META_CR_TXUXXRXRQ_UXX_S))
+
+#define META_CR_THR0_PC		META_CR_CORE_REG(0, 0, META_CR_TXUPC_ID)
+#define META_CR_THR0_PCX	META_CR_CORE_REG(0, 1, META_CR_TXUPC_ID)
+#define META_CR_THR0_SP		META_CR_CORE_REG(0, 0, META_CR_TXUA0_ID)
+
+#define META_CR_THR1_PC		META_CR_CORE_REG(1, 0, META_CR_TXUPC_ID)
+#define META_CR_THR1_PCX	META_CR_CORE_REG(1, 1, META_CR_TXUPC_ID)
+#define META_CR_THR1_SP		META_CR_CORE_REG(1, 0, META_CR_TXUA0_ID)
+
+#define SP_ACCESS(Thread)	META_CR_CORE_REG(Thread, 0, META_CR_TXUA0_ID)
+#define PC_ACCESS(Thread)	META_CR_CORE_REG(Thread, 0, META_CR_TXUPC_ID)
+
+#define	META_CR_COREREG_ENABLE			(0x0000000U)
+#define	META_CR_COREREG_STATUS			(0x0000010U)
+#define	META_CR_COREREG_DEFR			(0x00000A0U)
+#define	META_CR_COREREG_PRIVEXT			(0x00000E8U)
+
+#define	META_CR_T0ENABLE_OFFSET			(META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_ENABLE)
+#define	META_CR_T0STATUS_OFFSET			(META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_STATUS)
+#define	META_CR_T0DEFR_OFFSET			(META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_DEFR)
+#define	META_CR_T0PRIVEXT_OFFSET		(META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_PRIVEXT)
+
+#define	META_CR_T1ENABLE_OFFSET			(META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_ENABLE)
+#define	META_CR_T1STATUS_OFFSET			(META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_STATUS)
+#define	META_CR_T1DEFR_OFFSET			(META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_DEFR)
+#define	META_CR_T1PRIVEXT_OFFSET		(META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_PRIVEXT)
+
+#define META_CR_TXENABLE_ENABLE_BIT		(0x00000001U)   /* Set if running */
+#define META_CR_TXSTATUS_PRIV			(0x00020000U)
+#define META_CR_TXPRIVEXT_MINIM			(0x00000080U)
+
+#define META_MEM_GLOBAL_RANGE_BIT		(0x80000000U)
+
+
+/************************************************************************
+* META LDR Format
+************************************************************************/
+/* Block header structure */
+typedef struct
+{
+	IMG_UINT32	ui32DevID;
+	IMG_UINT32	ui32SLCode;
+	IMG_UINT32	ui32SLData;
+	IMG_UINT16	ui16PLCtrl;
+	IMG_UINT16	ui16CRC;
+
+} RGX_META_LDR_BLOCK_HDR;
+
+/* High level data stream block  structure */
+typedef struct
+{
+	IMG_UINT16	ui16Cmd;
+	IMG_UINT16	ui16Length;
+	IMG_UINT32	ui32Next;
+	IMG_UINT32	aui32CmdData[4];
+
+} RGX_META_LDR_L1_DATA_BLK;
+
+/* High level data stream block  structure */
+typedef struct
+{
+	IMG_UINT16	ui16Tag;
+	IMG_UINT16	ui16Length;
+	IMG_UINT32	aui32BlockData[4];
+
+} RGX_META_LDR_L2_DATA_BLK;
+
+/* Config command structure */
+typedef struct
+{
+	IMG_UINT32	ui32Type;
+	IMG_UINT32	aui32BlockData[4];
+
+} RGX_META_LDR_CFG_BLK;
+
+/* Block type definitions */
+#define RGX_META_LDR_COMMENT_TYPE_MASK			(0x0010U)
+#define RGX_META_LDR_BLK_IS_COMMENT(X)			((X & RGX_META_LDR_COMMENT_TYPE_MASK) != 0U)
+
+/* Command definitions
+	Value	Name			Description
+	0		LoadMem			Load memory with binary data.
+	1		LoadCore		Load a set of core registers.
+	2		LoadMMReg		Load a set of memory mapped registers.
+	3		StartThreads	Set each thread PC and SP, then enable	threads.
+	4		ZeroMem			Zeros a memory region.
+	5		Config			Perform	a configuration command. */
+#define RGX_META_LDR_CMD_MASK				(0x000FU)
+
+#define RGX_META_LDR_CMD_LOADMEM			(0x0000U)
+#define RGX_META_LDR_CMD_LOADCORE			(0x0001U)
+#define RGX_META_LDR_CMD_LOADMMREG			(0x0002U)
+#define RGX_META_LDR_CMD_START_THREADS		(0x0003U)
+#define RGX_META_LDR_CMD_ZEROMEM			(0x0004U)
+#define RGX_META_LDR_CMD_CONFIG			(0x0005U)
+
+/* Config Command definitions
+	Value	Name		Description
+	0		Pause		Pause for x times 100 instructions
+	1		Read		Read a value from register - No value return needed.
+						Utilises effects of issuing reads to certain registers
+	2		Write		Write to mem location
+	3		MemSet		Set mem to value
+	4		MemCheck	check mem for specific value.*/
+#define RGX_META_LDR_CFG_PAUSE			(0x0000)
+#define RGX_META_LDR_CFG_READ			(0x0001)
+#define RGX_META_LDR_CFG_WRITE			(0x0002)
+#define RGX_META_LDR_CFG_MEMSET			(0x0003)
+#define RGX_META_LDR_CFG_MEMCHECK		(0x0004)
+
+
+/************************************************************************
+* RGX FW segmented MMU definitions
+************************************************************************/
+/* All threads can access the segment */
+#define RGXFW_SEGMMU_ALLTHRS	(IMG_UINT32_C(0xf) << 8U)
+/* Writable */
+#define RGXFW_SEGMMU_WRITEABLE	(0x1U << 1U)
+/* All threads can access and writable */
+#define RGXFW_SEGMMU_ALLTHRS_WRITEABLE	(RGXFW_SEGMMU_ALLTHRS | RGXFW_SEGMMU_WRITEABLE)
+
+/* Direct map region 11 used for mapping GPU memory */
+#define RGXFW_SEGMMU_DMAP_GPU_ID			(11U)
+#define RGXFW_SEGMMU_DMAP_GPU_ADDR_START	(0x07800000U)
+
+/* Segment IDs */
+#define RGXFW_SEGMMU_DATA_ID			(1U)
+#define RGXFW_SEGMMU_BOOTLDR_ID			(2U)
+#define RGXFW_SEGMMU_TEXT_ID			(RGXFW_SEGMMU_BOOTLDR_ID)
+
+#define RGXFW_SEGMMU_META_DM_ID			(0x7U)
+
+
+/*
+ * SLC caching strategy in S7 is emitted through the segment MMU. All the segments
+ * configured through the macro RGXFW_SEGMMU_OUTADDR_TOP are CACHED in the SLC.
+ * The interface has been kept the same to simplify the code changes.
+ * The bifdm argument is ignored (no longer relevant) in S7.
+ */
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_ERN_45914(pers, coheren, mmu_ctx)  ( (((IMG_UINT64)(pers)    & 0x3U)  << 52U) | \
+                                                                         (((IMG_UINT64)(mmu_ctx) & 0xFFU) << 44U) | \
+                                                                         (((IMG_UINT64)(coheren) & 0x1U)  << 40U) )
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED_ERN_45914(mmu_ctx)      RGXFW_SEGMMU_OUTADDR_TOP_S7_ERN_45914(0x3, 0x0, mmu_ctx)
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED_ERN_45914(mmu_ctx)    RGXFW_SEGMMU_OUTADDR_TOP_S7_ERN_45914(0x0, 0x1, mmu_ctx)
+/* Set FW code/data cached in the SLC as default */
+#define RGXFW_SEGMMU_OUTADDR_TOP_ERN_45914(mmu_ctx, bifdm)             RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED_ERN_45914(mmu_ctx | (bifdm&0x0))
+
+/* To configure the Page Catalog and BIF-DM fed into the BIF for Garten accesses through this segment */
+#define RGXFW_SEGMMU_OUTADDR_TOP_PRE_S7(pc, bifdm)              ( ((IMG_UINT64)((IMG_UINT64)(pc)    & 0xFU) << 44U) | \
+                                                                  ((IMG_UINT64)((IMG_UINT64)(bifdm) & 0xFU) << 40U) )
+
+#if !defined(__KERNEL__) && defined(RGX_FEATURE_META)
+#if defined(HW_ERN_45914)
+#define RGXFW_SEGMMU_OUTADDR_TOP                  RGXFW_SEGMMU_OUTADDR_TOP_ERN_45914
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED  RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED_ERN_45914
+#else
+#define RGXFW_SEGMMU_OUTADDR_TOP                  RGXFW_SEGMMU_OUTADDR_TOP_PRE_S7
+#endif
+#endif
+
+
+/* META segments have 4kB minimum size */
+#define RGXFW_SEGMMU_ALIGN			(0x1000U)
+
+/* Segmented MMU registers (n = segment id) */
+#define META_CR_MMCU_SEGMENTn_BASE(n)			(0x04850000U + (n)*0x10U)
+#define META_CR_MMCU_SEGMENTn_LIMIT(n)			(0x04850004U + (n)*0x10U)
+#define META_CR_MMCU_SEGMENTn_OUTA0(n)			(0x04850008U + (n)*0x10U)
+#define META_CR_MMCU_SEGMENTn_OUTA1(n)			(0x0485000CU + (n)*0x10U)
+
+/* The following defines must be recalculated if the Meta MMU segments
+ * used to access Host-FW data are changed
+ * Current combinations are:
+ * - SLC uncached, META cached,   FW base address 0x70000000
+ * - SLC uncached, META uncached, FW base address 0xF0000000
+ * - SLC cached,   META cached,   FW base address 0x10000000
+ * - SLC cached,   META uncached, FW base address 0x90000000
+ */
+#define RGXFW_SEGMMU_DATA_BASE_ADDRESS        (0x10000000U)
+#define RGXFW_SEGMMU_DATA_META_CACHED         (0x0U)
+#define RGXFW_SEGMMU_DATA_META_UNCACHED       (META_MEM_GLOBAL_RANGE_BIT) // 0x80000000
+#define RGXFW_SEGMMU_DATA_META_CACHE_MASK     (META_MEM_GLOBAL_RANGE_BIT)
+/* For non-VIVT SLCs the cacheability of the FW data in the SLC is selected
+ * in the PTEs for the FW data, not in the Meta Segment MMU,
+ * which means these defines have no real effect in those cases */
+#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED     (0x0U)
+#define RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED   (0x60000000U)
+#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK (0x60000000U)
+
+
+/************************************************************************
+* RGX FW RGX MMU definitions
+************************************************************************/
+#if defined(RGX_FEATURE_SLC_VIVT) && defined(SUPPORT_TRUSTED_DEVICE)
+
+#define META_MMU_CONTEXT_MAPPING        (0x1) /* fw data */
+#define META_MMU_CONTEXT_MAPPING_CODE   (0x0) /* fw code */
+
+#else
+
+#define META_MMU_CONTEXT_MAPPING       (0x0)
+
+#endif
+
+#if defined(SECURE_FW_CODE_OSID) && defined(RGX_FEATURE_META)
+#error "SECURE_FW_CODE_OSID is not supported on META cores"
+#endif
+
+
+/************************************************************************
+* RGX FW Bootloader defaults
+************************************************************************/
+#define RGXFW_BOOTLDR_META_ADDR		(0x40000000)
+#define RGXFW_BOOTLDR_DEVV_ADDR_0	(0xC0000000U)
+#define RGXFW_BOOTLDR_DEVV_ADDR_1	(0x000000E1)
+#define RGXFW_BOOTLDR_DEVV_ADDR		((((IMG_UINT64) RGXFW_BOOTLDR_DEVV_ADDR_1) << 32) | RGXFW_BOOTLDR_DEVV_ADDR_0)
+#define RGXFW_BOOTLDR_LIMIT		(0x1FFFF000)
+#define RGXFW_MAX_BOOTLDR_OFFSET	(0x1000)
+
+/* Bootloader configuration offset is in dwords (512 bytes) */
+#define RGXFW_BOOTLDR_CONF_OFFSET	(0x80)
+
+
+/************************************************************************
+* RGX META Stack
+************************************************************************/
+#define RGX_META_STACK_SIZE  (0x1000U)
+
+/************************************************************************
+ RGX META Core memory
+************************************************************************/
+/* code and data both map to the same physical memory */
+#define RGX_META_COREMEM_CODE_ADDR   (0x80000000U)
+#define RGX_META_COREMEM_DATA_ADDR   (0x82000000U)
+#define RGX_META_COREMEM_OFFSET_MASK (0x01ffffffU)
+
+#if defined(__KERNEL__)
+#define RGX_META_IS_COREMEM_CODE(A, B)  (((A) >= RGX_META_COREMEM_CODE_ADDR) && ((A) < (RGX_META_COREMEM_CODE_ADDR + (B))))
+#define RGX_META_IS_COREMEM_DATA(A, B)  (((A) >= RGX_META_COREMEM_DATA_ADDR) && ((A) < (RGX_META_COREMEM_DATA_ADDR + (B))))
+#endif
+
+/************************************************************************
+* 2nd thread
+************************************************************************/
+#define RGXFW_THR1_PC		(0x18930000)
+#define RGXFW_THR1_SP		(0x78890000)
+
+/************************************************************************
+* META compatibility
+************************************************************************/
+
+#define META_CR_CORE_ID			(0x04831000)
+#define META_CR_CORE_ID_VER_SHIFT	(16U)
+#define META_CR_CORE_ID_VER_CLRMSK	(0XFF00FFFFU)
+
+#if !defined(__KERNEL__) && defined(RGX_FEATURE_META)
+
+	#if (RGX_FEATURE_META == MTP218)
+	#define RGX_CR_META_CORE_ID_VALUE 0x19
+	#elif (RGX_FEATURE_META == MTP219)
+	#define RGX_CR_META_CORE_ID_VALUE 0x1E
+	#elif (RGX_FEATURE_META == LTP218)
+	#define RGX_CR_META_CORE_ID_VALUE 0x1C
+	#elif (RGX_FEATURE_META == LTP217)
+	#define RGX_CR_META_CORE_ID_VALUE 0x1F
+	#else
+	#error "Unknown META ID"
+	#endif
+#else
+
+	#define RGX_CR_META_MTP218_CORE_ID_VALUE 0x19
+	#define RGX_CR_META_MTP219_CORE_ID_VALUE 0x1E
+	#define RGX_CR_META_LTP218_CORE_ID_VALUE 0x1C
+	#define RGX_CR_META_LTP217_CORE_ID_VALUE 0x1F
+
+#endif
+#define RGXFW_PROCESSOR_META        "META"
+
+
+#endif /*  RGX_META_H */
+
+/******************************************************************************
+ End of file (rgx_meta.h)
+******************************************************************************/
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_mips.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_mips.h
new file mode 100644
index 0000000..6dea4f3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_mips.h
@@ -0,0 +1,416 @@
+/*************************************************************************/ /*!
+@File           rgx_mips.h
+@Title
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Platform       RGX
+@Description    RGX MIPS definitions, kernel/user space
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (RGX_MIPS_H)
+#define RGX_MIPS_H
+
+/*
+ * Utility defines for memory management
+ */
+#define RGXMIPSFW_LOG2_PAGE_SIZE_4K              (12)
+#define RGXMIPSFW_PAGE_SIZE_4K                   (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_4K)
+#define RGXMIPSFW_PAGE_MASK_4K                   (RGXMIPSFW_PAGE_SIZE_4K - 1)
+#define RGXMIPSFW_LOG2_PAGE_SIZE_64K             (16)
+#define RGXMIPSFW_PAGE_SIZE_64K                  (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_64K)
+#define RGXMIPSFW_PAGE_MASK_64K                  (RGXMIPSFW_PAGE_SIZE_64K - 1)
+#define RGXMIPSFW_LOG2_PAGETABLE_PAGE_SIZE       (15)
+#define RGXMIPSFW_LOG2_PTE_ENTRY_SIZE            (2)
+/* Total number of TLB entries */
+#define RGXMIPSFW_NUMBER_OF_TLB_ENTRIES          (16)
+/* "Uncached" caching policy */
+#define RGXMIPSFW_UNCACHED_CACHE_POLICY          (0X00000002)
+/* "Write-back write-allocate" caching policy */
+#define RGXMIPSFW_WRITEBACK_CACHE_POLICY         (0X00000003)
+/* "Write-through no write-allocate" caching policy */
+#define RGXMIPSFW_WRITETHROUGH_CACHE_POLICY      (0X00000001)
+/* Cached policy used by MIPS in case of physical bus on 32 bit */
+#define RGXMIPSFW_CACHED_POLICY                  (RGXMIPSFW_WRITEBACK_CACHE_POLICY)
+/* Cached policy used by MIPS in case of physical bus on more than 32 bit */
+#define RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT      (RGXMIPSFW_WRITETHROUGH_CACHE_POLICY)
+/* Total number of Remap entries */
+#define RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES        (2 * RGXMIPSFW_NUMBER_OF_TLB_ENTRIES)
+
+
+/*
+ * MIPS EntryLo/PTE format
+ */
+
+#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_SHIFT     (31U)
+#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_CLRMSK    (0X7FFFFFFF)
+#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN        (0X80000000)
+
+#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_SHIFT     (30U)
+#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_CLRMSK    (0XBFFFFFFF)
+#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_EN        (0X40000000)
+
+/* Page Frame Number */
+#define RGXMIPSFW_ENTRYLO_PFN_SHIFT              (6)
+#define RGXMIPSFW_ENTRYLO_PFN_ALIGNSHIFT         (12)
+/* Mask used for the MIPS Page Table in case of physical bus on 32 bit */
+#define RGXMIPSFW_ENTRYLO_PFN_MASK               (0x03FFFFC0)
+#define RGXMIPSFW_ENTRYLO_PFN_SIZE               (20)
+/* Mask used for the MIPS Page Table in case of physical bus on more than 32 bit */
+#define RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT   (0x3FFFFFC0)
+#define RGXMIPSFW_ENTRYLO_PFN_SIZE_ABOVE_32BIT   (24)
+#define RGXMIPSFW_ADDR_TO_ENTRYLO_PFN_RSHIFT     (RGXMIPSFW_ENTRYLO_PFN_ALIGNSHIFT - \
+                                                  RGXMIPSFW_ENTRYLO_PFN_SHIFT)
+
+#define RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT     (3U)
+#define RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK    (0XFFFFFFC7)
+
+#define RGXMIPSFW_ENTRYLO_DIRTY_SHIFT            (2U)
+#define RGXMIPSFW_ENTRYLO_DIRTY_CLRMSK           (0XFFFFFFFB)
+#define RGXMIPSFW_ENTRYLO_DIRTY_EN               (0X00000004)
+
+#define RGXMIPSFW_ENTRYLO_VALID_SHIFT            (1U)
+#define RGXMIPSFW_ENTRYLO_VALID_CLRMSK           (0XFFFFFFFD)
+#define RGXMIPSFW_ENTRYLO_VALID_EN               (0X00000002)
+
+#define RGXMIPSFW_ENTRYLO_GLOBAL_SHIFT           (0U)
+#define RGXMIPSFW_ENTRYLO_GLOBAL_CLRMSK          (0XFFFFFFFE)
+#define RGXMIPSFW_ENTRYLO_GLOBAL_EN              (0X00000001)
+
+#define RGXMIPSFW_ENTRYLO_DVG                    (RGXMIPSFW_ENTRYLO_DIRTY_EN | \
+                                                  RGXMIPSFW_ENTRYLO_VALID_EN | \
+                                                  RGXMIPSFW_ENTRYLO_GLOBAL_EN)
+#define RGXMIPSFW_ENTRYLO_UNCACHED               (RGXMIPSFW_UNCACHED_CACHE_POLICY << \
+                                                  RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT)
+#define RGXMIPSFW_ENTRYLO_DVG_UNCACHED           (RGXMIPSFW_ENTRYLO_DVG | RGXMIPSFW_ENTRYLO_UNCACHED)
+
+
+/* Remap Range Config Addr Out */
+/* These defines refer to the upper half of the Remap Range Config register */
+#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_MASK      (0x0FFFFFF0)
+#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT     (4)  /* wrt upper half of the register */
+#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT (12)
+#define RGXMIPSFW_ADDR_TO_RR_ADDR_OUT_RSHIFT     (RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT - \
+                                                  RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT)
+
+#if defined(SECURE_FW_CODE_OSID) && (SECURE_FW_CODE_OSID + 1 > 2)
+#define MIPS_FW_CODE_OSID                        (SECURE_FW_CODE_OSID)
+#elif defined(SECURE_FW_CODE_OSID)
+#define MIPS_FW_CODE_OSID                        (1U)
+#endif
+
+
+/*
+ * Pages to trampoline problematic physical addresses:
+ *   - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN : 0x1FC0_0000
+ *   - RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN : 0x1FC0_1000
+ *   - RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN : 0x1FC0_2000
+ *   - (benign trampoline)               : 0x1FC0_3000
+ * that would otherwise be erroneously remapped by the MIPS wrapper
+ * (see "Firmware virtual layout and remap configuration" section below)
+ */
+
+#define RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES       (2)
+#define RGXMIPSFW_TRAMPOLINE_NUMPAGES            (1 << RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES)
+#define RGXMIPSFW_TRAMPOLINE_SIZE                (RGXMIPSFW_TRAMPOLINE_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE_4K)
+#define RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE   (RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES + RGXMIPSFW_LOG2_PAGE_SIZE_4K)
+
+#define RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR    (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN)
+#define RGXMIPSFW_TRAMPOLINE_OFFSET(a)           (a - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN)
+
+#define RGXMIPSFW_SENSITIVE_ADDR(a)              (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN == (~((1<<RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE)-1) & a))
+
+/*
+ * Firmware virtual layout and remap configuration
+ */
+/*
+ * For each remap region we define:
+ * - the virtual base used by the Firmware to access code/data through that region
+ * - the microAptivAP physical address correspondent to the virtual base address,
+ *   used as input address and remapped to the actual physical address
+ * - log2 of size of the region remapped by the MIPS wrapper, i.e. number of bits from
+ *   the bottom of the base input address that survive onto the output address
+ *   (this defines both the alignment and the maximum size of the remapped region)
+ * - one or more code/data segments within the remapped region
+ */
+
+/* Boot remap setup */
+#define RGXMIPSFW_BOOT_REMAP_VIRTUAL_BASE        (0xBFC00000)
+#define RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN        (0x1FC00000)
+#define RGXMIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE   (12)
+#define RGXMIPSFW_BOOT_NMI_CODE_VIRTUAL_BASE     (RGXMIPSFW_BOOT_REMAP_VIRTUAL_BASE)
+
+/* Data remap setup */
+#define RGXMIPSFW_DATA_REMAP_VIRTUAL_BASE        (0xBFC01000)
+#define RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN        (0x1FC01000)
+#define RGXMIPSFW_DATA_REMAP_LOG2_SEGMENT_SIZE   (12)
+#define RGXMIPSFW_BOOT_NMI_DATA_VIRTUAL_BASE     (RGXMIPSFW_DATA_REMAP_VIRTUAL_BASE)
+
+/* Code remap setup */
+#define RGXMIPSFW_CODE_REMAP_VIRTUAL_BASE        (0x9FC02000)
+#define RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN        (0x1FC02000)
+#define RGXMIPSFW_CODE_REMAP_LOG2_SEGMENT_SIZE   (12)
+#define RGXMIPSFW_EXCEPTIONS_VIRTUAL_BASE        (RGXMIPSFW_CODE_REMAP_VIRTUAL_BASE)
+
+/* Permanent mappings setup */
+#define RGXMIPSFW_PT_VIRTUAL_BASE                (0xCF000000)
+#define RGXMIPSFW_REGISTERS_VIRTUAL_BASE         (0xCF400000)
+#define RGXMIPSFW_STACK_VIRTUAL_BASE             (0xCF600000)
+
+
+/*
+ * Bootloader configuration data
+ */
+/* Bootloader configuration offset within the bootloader/NMI data page */
+#define RGXMIPSFW_BOOTLDR_CONF_OFFSET                         (0x0)
+/* Offsets of bootloader configuration parameters in 64-bit words */
+#define RGXMIPSFW_ROGUE_REGS_BASE_PHYADDR_OFFSET              (0x0)
+#define RGXMIPSFW_PAGE_TABLE_BASE_PHYADDR_OFFSET              (0x1)
+#define RGXMIPSFW_STACKPOINTER_PHYADDR_OFFSET                 (0x2)
+#define RGXMIPSFW_RESERVED_FUTURE_OFFSET                      (0x3)
+
+/*
+ * NMI shared data
+ */
+/* Base address of the shared data within the bootloader/NMI data page */
+#define RGXMIPSFW_NMI_SHARED_DATA_BASE                        (0x100)
+/* Size used by Debug dump data */
+#define RGXMIPSFW_NMI_SHARED_SIZE                             (0x2B0)
+/* Offsets in the NMI shared area in 32-bit words */
+#define RGXMIPSFW_NMI_SYNC_FLAG_OFFSET                        (0x0)
+#define RGXMIPSFW_NMI_STATE_OFFSET                            (0x1)
+#define RGXMIPSFW_NMI_ERROR_STATE_SET                         (0x1)
+
+/*
+ * MIPS boot stage
+ */
+#define RGXMIPSFW_BOOT_STAGE_OFFSET                           (0x400)
+
+/*
+ * MIPS private data in the bootloader data page.
+ * Memory below this offset is used by the FW only, no interface data allowed.
+ */
+#define RGXMIPSFW_PRIVATE_DATA_OFFSET                         (0x800)
+
+
+/* The things that follow are excluded when compiling assembly sources*/
+#if !defined (RGXMIPSFW_ASSEMBLY_CODE)
+#include "img_types.h"
+#include "km/rgxdefs_km.h"
+
+#define RGXMIPSFW_GET_OFFSET_IN_DWORDS(offset)                (offset / sizeof(IMG_UINT32))
+#define RGXMIPSFW_GET_OFFSET_IN_QWORDS(offset)                (offset / sizeof(IMG_UINT64))
+
+/* Used for compatibility checks */
+#define RGXMIPSFW_ARCHTYPE_VER_CLRMSK                         (0xFFFFE3FFU)
+#define RGXMIPSFW_ARCHTYPE_VER_SHIFT                          (10U)
+#define RGXMIPSFW_CORE_ID_VALUE                               (0x001U)
+#define RGXFW_PROCESSOR_MIPS		                          "MIPS"
+
+/* microAptivAP cache line size */
+#define RGXMIPSFW_MICROAPTIVEAP_CACHELINE_SIZE                (16U)
+
+/* The SOCIF transactions are identified with the top 16 bits of the physical address emitted by the MIPS */
+#define RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN           (16U)
+
+/* Values to put in the MIPS selectors for performance counters*/
+#define RGXMIPSFW_PERF_COUNT_CTRL_ICACHE_ACCESSES_C0          (9U)   /* Icache accesses in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_ICACHE_MISSES_C1            (9U)   /* Icache misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_DCACHE_ACCESSES_C0          (10U)  /* Dcache accesses in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_DCACHE_MISSES_C1            (11U) /* Dcache misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_ITLB_INSTR_ACCESSES_C0      (5U)  /* ITLB instruction accesses in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_JTLB_INSTR_MISSES_C1        (7U)  /* JTLB instruction accesses misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_INSTR_COMPLETED_C0          (1U)  /* Instructions completed in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_JTLB_DATA_MISSES_C1         (8U)  /* JTLB data misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_EVENT_SHIFT                 (5U)  /* Shift for the Event field in the MIPS perf ctrl registers */
+/* Additional flags for performance counters. See MIPS manual for further reference*/
+#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_USER_MODE             (8U)
+#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_KERNEL_MODE           (2U)
+#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_EXL                   (1U)
+
+
+#define RGXMIPSFW_C0_NBHWIRQ	8
+
+/* Macros to decode C0_Cause register */
+#define RGXMIPSFW_C0_CAUSE_EXCCODE(CAUSE)       (((CAUSE) & 0x7c) >> 2)
+#define RGXMIPSFW_C0_CAUSE_EXCCODE_FWERROR      9
+/* Use only when Coprocessor Unusable exception */
+#define RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(CAUSE) (((CAUSE) >> 28) & 0x3)
+#define RGXMIPSFW_C0_CAUSE_PENDING_HWIRQ(CAUSE) (((CAUSE) & 0x3fc00) >> 10)
+#define RGXMIPSFW_C0_CAUSE_FDCIPENDING          (1 << 21)
+#define RGXMIPSFW_C0_CAUSE_IV                   (1 << 23)
+#define RGXMIPSFW_C0_CAUSE_IC                   (1 << 25)
+#define RGXMIPSFW_C0_CAUSE_PCIPENDING           (1 << 26)
+#define RGXMIPSFW_C0_CAUSE_TIPENDING            (1 << 30)
+#define RGXMIPSFW_C0_CAUSE_BRANCH_DELAY         (1 << 31)
+
+/* Macros to decode C0_Debug register */
+#define RGXMIPSFW_C0_DEBUG_EXCCODE(DEBUG) (((DEBUG) >> 10) & 0x1f)
+#define RGXMIPSFW_C0_DEBUG_DSS            (1 << 0)
+#define RGXMIPSFW_C0_DEBUG_DBP            (1 << 1)
+#define RGXMIPSFW_C0_DEBUG_DDBL           (1 << 2)
+#define RGXMIPSFW_C0_DEBUG_DDBS           (1 << 3)
+#define RGXMIPSFW_C0_DEBUG_DIB            (1 << 4)
+#define RGXMIPSFW_C0_DEBUG_DINT           (1 << 5)
+#define RGXMIPSFW_C0_DEBUG_DIBIMPR        (1 << 6)
+#define RGXMIPSFW_C0_DEBUG_DDBLIMPR       (1 << 18)
+#define RGXMIPSFW_C0_DEBUG_DDBSIMPR       (1 << 19)
+#define RGXMIPSFW_C0_DEBUG_IEXI           (1 << 20)
+#define RGXMIPSFW_C0_DEBUG_DBUSEP         (1 << 21)
+#define RGXMIPSFW_C0_DEBUG_CACHEEP        (1 << 22)
+#define RGXMIPSFW_C0_DEBUG_MCHECKP        (1 << 23)
+#define RGXMIPSFW_C0_DEBUG_IBUSEP         (1 << 24)
+#define RGXMIPSFW_C0_DEBUG_DM             (1 << 30)
+#define RGXMIPSFW_C0_DEBUG_DBD            (1 << 31)
+
+/* ELF format defines */
+#define ELF_PT_LOAD     (0x1U)   /* Program header identifier as Load */
+#define ELF_SHT_SYMTAB  (0x2U)   /* Section identifier as Symbol Table */
+#define ELF_SHT_STRTAB  (0x3U)   /* Section identifier as String Table */
+#define MAX_STRTAB_NUM  (0x8U)   /* Maximum number of string table in the firmware ELF file */
+
+
+/* Redefined structs of ELF format */
+typedef struct
+{
+	IMG_UINT8    ui32Eident[16];
+	IMG_UINT16   ui32Etype;
+	IMG_UINT16   ui32Emachine;
+	IMG_UINT32   ui32Eversion;
+	IMG_UINT32   ui32Eentry;
+	IMG_UINT32   ui32Ephoff;
+	IMG_UINT32   ui32Eshoff;
+	IMG_UINT32   ui32Eflags;
+	IMG_UINT16   ui32Eehsize;
+	IMG_UINT16   ui32Ephentsize;
+	IMG_UINT16   ui32Ephnum;
+	IMG_UINT16   ui32Eshentsize;
+	IMG_UINT16   ui32Eshnum;
+	IMG_UINT16   ui32Eshtrndx;
+} RGX_MIPS_ELF_HDR;
+
+
+typedef struct
+{
+	IMG_UINT32   ui32Stname;
+	IMG_UINT32   ui32Stvalue;
+	IMG_UINT32   ui32Stsize;
+	IMG_UINT8    ui32Stinfo;
+	IMG_UINT8    ui32Stother;
+	IMG_UINT16   ui32Stshndx;
+} RGX_MIPS_ELF_SYM;
+
+
+typedef struct
+{
+	IMG_UINT32   ui32Shname;
+	IMG_UINT32   ui32Shtype;
+	IMG_UINT32   ui32Shflags;
+	IMG_UINT32   ui32Shaddr;
+	IMG_UINT32   ui32Shoffset;
+	IMG_UINT32   ui32Shsize;
+	IMG_UINT32   ui32Shlink;
+	IMG_UINT32   ui32Shinfo;
+	IMG_UINT32   ui32Shaddralign;
+	IMG_UINT32   ui32Shentsize;
+} RGX_MIPS_ELF_SHDR;
+
+typedef struct
+{
+	IMG_UINT32   ui32Ptype;
+	IMG_UINT32   ui32Poffset;
+	IMG_UINT32   ui32Pvaddr;
+	IMG_UINT32   ui32Ppaddr;
+	IMG_UINT32   ui32Pfilesz;
+	IMG_UINT32   ui32Pmemsz;
+	IMG_UINT32   ui32Pflags;
+	IMG_UINT32   ui32Palign;
+ } RGX_MIPS_ELF_PROGRAM_HDR;
+
+#define RGXMIPSFW_TLB_GET_MASK(PAGE_MASK)       (((PAGE_MASK) >> 13) & 0XFFFFU)
+#define RGXMIPSFW_TLB_GET_PAGE_SIZE(PAGE_MASK)  ((((PAGE_MASK) | 0x1FFF) + 1) >> 11)
+#define RGXMIPSFW_TLB_GET_VPN2(ENTRY_HI)        ((ENTRY_HI) >> 13)
+#define RGXMIPSFW_TLB_GET_COHERENCY(ENTRY_LO)   (((ENTRY_LO) >> 3) & 0x7U)
+#define RGXMIPSFW_TLB_GET_PFN(ENTRY_LO)         (((ENTRY_LO) >> 6) & 0XFFFFFU)
+#define RGXMIPSFW_TLB_GET_PA(ENTRY_LO)          (((ENTRY_LO) & 0x03FFFFC0) << 6)
+#define RGXMIPSFW_TLB_GET_INHIBIT(ENTRY_LO)     (((ENTRY_LO) >> 30) & 0x3U)
+#define RGXMIPSFW_TLB_GET_DGV(ENTRY_LO)         ((ENTRY_LO) & 0x7U)
+#define RGXMIPSFW_TLB_GLOBAL                    (1U)
+#define RGXMIPSFW_TLB_VALID                     (1U << 1)
+#define RGXMIPSFW_TLB_DIRTY                     (1U << 2)
+#define RGXMIPSFW_TLB_XI                        (1U << 30)
+#define RGXMIPSFW_TLB_RI                        (1U << 31)
+
+#define RGXMIPSFW_REMAP_GET_REGION_SIZE(REGION_SIZE_ENCODING) (1 << ((REGION_SIZE_ENCODING + 1) << 1))
+
+typedef struct {
+	IMG_UINT32 ui32TLBPageMask;
+	IMG_UINT32 ui32TLBHi;
+	IMG_UINT32 ui32TLBLo0;
+	IMG_UINT32 ui32TLBLo1;
+} RGX_MIPS_TLB_ENTRY;
+
+typedef struct {
+	IMG_UINT32 ui32RemapAddrIn;     /* always 4k aligned */
+	IMG_UINT32 ui32RemapAddrOut;    /* always 4k aligned */
+	IMG_UINT32 ui32RemapRegionSize;
+} RGX_MIPS_REMAP_ENTRY;
+
+typedef struct {
+	IMG_UINT32 ui32ErrorState; /* This must come first in the structure */
+	IMG_UINT32 ui32ErrorEPC;
+	IMG_UINT32 ui32StatusRegister;
+	IMG_UINT32 ui32CauseRegister;
+	IMG_UINT32 ui32BadRegister;
+	IMG_UINT32 ui32EPC;
+	IMG_UINT32 ui32SP;
+	IMG_UINT32 ui32Debug;
+	IMG_UINT32 ui32DEPC;
+	IMG_UINT32 ui32BadInstr;
+	IMG_UINT32 ui32UnmappedAddress;
+	RGX_MIPS_TLB_ENTRY asTLB[RGXMIPSFW_NUMBER_OF_TLB_ENTRIES];
+	RGX_MIPS_REMAP_ENTRY asRemap[RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES];
+} RGX_MIPS_STATE;
+
+#endif  /* RGXMIPSFW_ASSEMBLY_CODE */
+
+
+#endif /*RGX_MIPS_H*/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_options.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_options.h
new file mode 100644
index 0000000..65cb8ad7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_options.h
@@ -0,0 +1,250 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX build options
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* Each build option listed here is packed into a dword which provides up to
+ *  log2(RGX_BUILD_OPTIONS_MASK_KM + 1) flags for KM and
+ *  (32 - log2(RGX_BUILD_OPTIONS_MASK_KM + 1)) flags for UM.
+ * The corresponding bit is set if the build option was enabled at compile
+ * time.
+ *
+ * In order to extract the enabled build flags the INTERNAL_TEST switch should
+ * be enabled in a client program which includes this header. Then the client
+ * can test specific build flags by reading the bit value at
+ *  ##OPTIONNAME##_SET_OFFSET
+ * in RGX_BUILD_OPTIONS_KM or RGX_BUILD_OPTIONS.
+ *
+ * IMPORTANT: add new options to unused bits or define a new dword
+ * (e.g. RGX_BUILD_OPTIONS_KM2 or RGX_BUILD_OPTIONS2) so that the bitfield
+ * remains backwards compatible.
+ */
+
+#ifndef RGX_OPTIONS_H
+#define RGX_OPTIONS_H
+
+#define RGX_BUILD_OPTIONS_MASK_KM 0x0000FFFFUL
+
+#if defined(NO_HARDWARE) || defined(INTERNAL_TEST)
+	#define NO_HARDWARE_SET_OFFSET	OPTIONS_BIT0
+	#define OPTIONS_BIT0		(0x1ul << 0)
+	#if OPTIONS_BIT0 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT0		0x0UL
+#endif /* NO_HARDWARE */
+
+
+#if defined(PDUMP) || defined(INTERNAL_TEST)
+	#define PDUMP_SET_OFFSET	OPTIONS_BIT1
+	#define OPTIONS_BIT1		(0x1ul << 1)
+	#if OPTIONS_BIT1 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT1		0x0UL
+#endif /* PDUMP */
+
+
+#if defined(INTERNAL_TEST)
+	#define UNUSED_SET_OFFSET	OPTIONS_BIT2
+	#define OPTIONS_BIT2		(0x1ul << 2)
+	#if OPTIONS_BIT2 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT2		0x0UL
+#endif
+
+/* No longer used */
+#if defined(INTERNAL_TEST)
+	#define OPTIONS_BIT3		(0x1ul << 3)
+	#if OPTIONS_BIT3 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT3		0x0UL
+#endif
+
+
+#if defined(SUPPORT_RGX) || defined(INTERNAL_TEST)
+	#define SUPPORT_RGX_SET_OFFSET	OPTIONS_BIT4
+	#define OPTIONS_BIT4		(0x1ul << 4)
+	#if OPTIONS_BIT4 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT4		0x0UL
+#endif /* SUPPORT_RGX */
+
+
+#if defined(SUPPORT_SECURE_EXPORT) || defined(INTERNAL_TEST)
+	#define SUPPORT_SECURE_EXPORT_SET_OFFSET	OPTIONS_BIT5
+	#define OPTIONS_BIT5		(0x1ul << 5)
+	#if OPTIONS_BIT5 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT5		0x0UL
+#endif /* SUPPORT_SECURE_EXPORT */
+
+
+#if defined(SUPPORT_INSECURE_EXPORT) || defined(INTERNAL_TEST)
+	#define SUPPORT_INSECURE_EXPORT_SET_OFFSET	OPTIONS_BIT6
+	#define OPTIONS_BIT6		(0x1ul << 6)
+	#if OPTIONS_BIT6 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT6		0x0UL
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+
+#if defined(SUPPORT_VFP) || defined(INTERNAL_TEST)
+	#define SUPPORT_VFP_SET_OFFSET	OPTIONS_BIT7
+	#define OPTIONS_BIT7		(0x1ul << 7)
+	#if OPTIONS_BIT7 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT7		0x0UL
+#endif /* SUPPORT_VFP */
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION) || defined(INTERNAL_TEST)
+	#define SUPPORT_WORKLOAD_ESTIMATION_OFFSET	OPTIONS_BIT8
+	#define OPTIONS_BIT8		(0x1ul << 8)
+	#if OPTIONS_BIT8 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT8		0x0UL
+#endif /* SUPPORT_WORKLOAD_ESTIMATION */
+#define OPTIONS_WORKLOAD_ESTIMATION_MASK	(0x1ul << 8)
+
+#if defined(SUPPORT_PDVFS) || defined(INTERNAL_TEST)
+	#define SUPPORT_PDVFS_OFFSET	OPTIONS_BIT9
+	#define OPTIONS_BIT9		(0x1ul << 9)
+	#if OPTIONS_BIT9 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT9		0x0UL
+#endif /* SUPPORT_PDVFS */
+#define OPTIONS_PDVFS_MASK	(0x1ul << 9)
+
+#if defined(DEBUG) || defined(INTERNAL_TEST)
+	#define DEBUG_SET_OFFSET	OPTIONS_BIT10
+	#define OPTIONS_BIT10		(0x1ul << 10)
+	#if OPTIONS_BIT10 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT10		0x0UL
+#endif /* DEBUG */
+/* The bit position of this should be the same as DEBUG_SET_OFFSET option
+ * when defined.
+ */
+#define OPTIONS_DEBUG_MASK	(0x1ul << 10)
+
+#if defined(SUPPORT_BUFFER_SYNC) || defined(INTERNAL_TEST)
+	#define SUPPORT_BUFFER_SYNC_SET_OFFSET	OPTIONS_BIT11
+	#define OPTIONS_BIT11		(0x1ul << 11)
+	#if OPTIONS_BIT11 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT11		0x0UL
+#endif /* SUPPORT_BUFFER_SYNC */
+
+#if defined(RGX_FW_IRQ_OS_COUNTERS) || defined(INTERNAL_TEST)
+	#define SUPPORT_FW_IRQ_REG_COUNTERS		OPTIONS_BIT12
+	#define OPTIONS_BIT12		(0x1ul << 12)
+	#if OPTIONS_BIT12 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT12		0x0UL
+#endif /* RGX_FW_IRQ_OS_COUNTERS */
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL) || defined(INTERNAL_TEST)
+	#define SUPPORT_SERVER_SYNC_IMPL_OFFSET OPTIONS_BIT13
+	#define OPTIONS_BIT13     (0x1ul << 13)
+	#if OPTIONS_BIT13 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT13     0x0UL
+#endif
+
+#define RGX_BUILD_OPTIONS_KM	\
+	(OPTIONS_BIT0  |\
+	 OPTIONS_BIT1  |\
+	 OPTIONS_BIT2  |\
+	 OPTIONS_BIT3  |\
+	 OPTIONS_BIT4  |\
+	 OPTIONS_BIT6  |\
+	 OPTIONS_BIT7  |\
+	 OPTIONS_BIT8  |\
+	 OPTIONS_BIT9  |\
+	 OPTIONS_BIT10 |\
+	 OPTIONS_BIT11 |\
+	 OPTIONS_BIT12 |\
+	 OPTIONS_BIT13)
+
+
+#if defined(SUPPORT_PERCONTEXT_FREELIST) || defined(INTERNAL_TEST)
+	#define OPTIONS_BIT31		(0x1ul << 31)
+	#if OPTIONS_BIT31 <= RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+	#define SUPPORT_PERCONTEXT_FREELIST_SET_OFFSET	OPTIONS_BIT31
+#else
+	#define OPTIONS_BIT31		0x0U
+#endif /* SUPPORT_PERCONTEXT_FREELIST */
+
+#define RGX_BUILD_OPTIONS (RGX_BUILD_OPTIONS_KM | OPTIONS_BIT31)
+
+#define OPTIONS_STRICT (RGX_BUILD_OPTIONS &                  \
+                        ~(OPTIONS_DEBUG_MASK               | \
+                          OPTIONS_WORKLOAD_ESTIMATION_MASK | \
+                          OPTIONS_PDVFS_MASK))
+
+#endif /* RGX_OPTIONS_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_pdump_panics.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_pdump_panics.h
new file mode 100644
index 0000000..2cb3a18
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_pdump_panics.h
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX PDump panic definitions header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX PDump panic definitions header
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (RGX_PDUMP_PANICS_H_)
+#define RGX_PDUMP_PANICS_H_
+
+
+/*! Unique device specific IMG_UINT16 panic IDs to identify the cause of a
+ * RGX PDump panic in a PDump script. */
+typedef enum
+{
+	RGX_PDUMP_PANIC_UNDEFINED = 0,
+
+	/* These panics occur when test parameters and driver configuration
+	 * enable features that require the firmware and host driver to
+	 * communicate. Such features are not supported with off-line playback.
+	 */
+	RGX_PDUMP_PANIC_ZSBUFFER_BACKING         = 101, /*!< Requests ZSBuffer to be backed with physical pages */
+	RGX_PDUMP_PANIC_ZSBUFFER_UNBACKING       = 102, /*!< Requests ZSBuffer to be unbacked */
+	RGX_PDUMP_PANIC_FREELIST_GROW            = 103, /*!< Requests an on-demand freelist grow/shrink */
+	RGX_PDUMP_PANIC_FREELISTS_RECONSTRUCTION = 104, /*!< Requests freelists reconstruction */
+	RGX_PDUMP_PANIC_SPARSEMEM_SWAP			= 105, /*!<	Requests sparse remap memory swap feature */
+} RGX_PDUMP_PANIC;
+
+
+#endif /* RGX_PDUMP_PANICS_H_ */
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_tq_shared.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_tq_shared.h
new file mode 100644
index 0000000..bd3460c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgx_tq_shared.h
@@ -0,0 +1,63 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX transfer queue shared
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Shared definitions between client and server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGX_TQ_SHARED_H__
+#define __RGX_TQ_SHARED_H__
+
+#define TQ_MAX_PREPARES_PER_SUBMIT		16
+
+#define TQ_PREP_FLAGS_COMMAND_3D		0x0
+#define TQ_PREP_FLAGS_COMMAND_2D		0x1
+#define TQ_PREP_FLAGS_COMMAND_MASK		(0xf)
+#define TQ_PREP_FLAGS_COMMAND_SHIFT		0
+#define TQ_PREP_FLAGS_PDUMPCONTINUOUS	(1 << 4)
+#define TQ_PREP_FLAGS_START				(1 << 5)
+#define TQ_PREP_FLAGS_END				(1 << 6)
+
+#define TQ_PREP_FLAGS_COMMAND_SET(m) \
+	((TQ_PREP_FLAGS_COMMAND_##m << TQ_PREP_FLAGS_COMMAND_SHIFT) & TQ_PREP_FLAGS_COMMAND_MASK)
+
+#define TQ_PREP_FLAGS_COMMAND_IS(m,n) \
+	(((m & TQ_PREP_FLAGS_COMMAND_MASK) >> TQ_PREP_FLAGS_COMMAND_SHIFT)  == TQ_PREP_FLAGS_COMMAND_##n)
+
+#endif /* __RGX_TQ_SHARED_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxapi_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxapi_km.h
new file mode 100644
index 0000000..a9fba87
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxapi_km.h
@@ -0,0 +1,313 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX API Header kernel mode
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exported RGX API details
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGXAPI_KM_H__
+#define __RGXAPI_KM_H__
+
+#if defined(SUPPORT_SHARED_SLC)
+/*************************************************************************/ /*!
+@Function       RGXInitSLC
+@Description    Init the SLC after a power up. It is required to call this
+                function if using SUPPORT_SHARED_SLC. Otherwise, it shouldn't
+                be called.
+@Input          hDevHandle   RGX Device Node
+@Return         PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle);
+#endif
+
+#include "rgx_hwperf.h"
+
+
+/******************************************************************************
+ * RGX HW Performance Profiling Control API(s)
+ *****************************************************************************/
+
+typedef struct _RGX_HWPERF_DEVICE_
+{
+	IMG_CHAR pszName[20];	/* Helps identify this device uniquely */
+	IMG_HANDLE hDevData;	/* Handle for the server */
+
+	struct _RGX_HWPERF_DEVICE_ *psNext;
+} RGX_HWPERF_DEVICE;
+
+typedef struct
+{
+	RGX_HWPERF_DEVICE *psHWPerfDevList;
+} RGX_HWPERF_CONNECTION;
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfLazyConnect
+@Description    Obtain a HWPerf connection object to the RGX device(s). The
+                connections to devices are not actually opened until
+                HWPerfOpen() is called.
+@Output         ppsHWPerfConnection Address of a HWPerf connection object
+@Return         PVRSRV_ERROR        System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfOpen
+@Description    Opens connection(s) to the RGX device(s). Valid handle to the
+                connection object has to be provided which means the this
+                function needs to be preceded by the call to
+                RGXHWPerfLazyConnect() function.
+@Input          psHWPerfConnection HWPerf connection object
+@Return         PVRSRV_ERROR       System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION* psHWPerfConnection);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfConnect
+@Description    Obtain a connection object to the RGX HWPerf module. Allocated
+                connection object(s) reference opened connection(s). Calling
+                this function is an equivalent of calling RGXHWPerfLazyConnect
+                and RGXHWPerfOpen. This connect should be used when the caller
+                will be retrieving event data.
+@Output         ppsHWPerfConnection Address of HWPerf connection object
+@Return         PVRSRV_ERROR        System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfFreeConnection
+@Description    Frees the HWPerf connection object
+@Input          psHWPerfConnection Pointer to connection object as returned
+                                   from RGXHWPerfLazyConnect()
+@Return         PVRSRV_ERROR       System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** psHWPerfConnection);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfClose
+@Description    Closes all the opened connection(s) to RGX device(s)
+@Input          psHWPerfConnection Pointer to HWPerf connection object as
+                                   returned from RGXHWPerfConnect() or
+                                   RGXHWPerfOpen()
+@Return         PVRSRV_ERROR       System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfDisconnect
+@Description    Disconnect from the RGX device
+@Input          ppsHWPerfConnection Pointer to HWPerf connection object as
+                                    returned from RGXHWPerfConnect() or
+                                    RGXHWPerfOpen(). Calling this function is
+                                    an equivalent of calling RGXHWPerfClose()
+                                    and RGXHWPerfFreeConnection().
+@Return         PVRSRV_ERROR        System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfControl
+@Description    Enable or disable the generation of RGX HWPerf event packets.
+                See RGXCtrlHWPerf().
+@Input          psHWPerfConnection Pointer to HWPerf connection object
+@Input          eStreamId          ID of the HWPerf stream
+@Input          bToggle            Switch to toggle or apply mask.
+@Input          ui64Mask           Mask of events to control.
+@Return         PVRSRV_ERROR       System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfControl(
+		RGX_HWPERF_CONNECTION *psHWPerfConnection,
+		RGX_HWPERF_STREAM_ID eStreamId,
+		IMG_BOOL             bToggle,
+		IMG_UINT64           ui64Mask);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfGetFilter
+@Description    Reads HWPerf stream filter where stream is identified by the
+                given stream ID.
+@Input          hDevData     Handle to connection/device object
+@Input          eStreamId    ID of the HWPerf stream
+@Output         ui64Filter   HWPerf filter value
+@Return         PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfGetFilter(
+		IMG_HANDLE  hDevData,
+		RGX_HWPERF_STREAM_ID eStreamId,
+		IMG_UINT64 *ui64Filter
+);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfConfigureAndEnableCounters
+@Description    Enable and configure the performance counter block for one or
+                more device layout modules.
+                See RGXHWPerfConfigureAndEnableCustomCounters().
+@Input          psHWPerfConnection Pointer to HWPerf connection object
+@Input          ui32NumBlocks      Number of elements in the array
+@Input          asBlockConfigs     Address of the array of configuration blocks
+@Return         PVRSRV_ERROR       System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfConfigureAndEnableCounters(
+		RGX_HWPERF_CONNECTION *psHWPerfConnection,
+		IMG_UINT32                 ui32NumBlocks,
+		RGX_HWPERF_CONFIG_CNTBLK*  asBlockConfigs);
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfConfigureAndEnableCustomCounters
+@Description    Enable and configure custom performance counters
+@Input          psHWPerfConnection    Pointer to connection object
+@Input          ui16CustomBlockID     ID of the custom block to configure
+@Input          ui16NumCustomCounters Number of custom counters
+@Input          pui32CustomCounterIDs Pointer to array containing custom
+                                      counter IDs
+@Return         PVRSRV_ERROR          System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfConfigureAndEnableCustomCounters(
+		RGX_HWPERF_CONNECTION *psHWPerfConnection,
+		IMG_UINT16             ui16CustomBlockID,
+		IMG_UINT16             ui16NumCustomCounters,
+		IMG_UINT32            *pui32CustomCounterIDs);
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfDisableCounters
+@Description    Disable the performance counter block for one or more device
+                layout modules.
+@Input          psHWPerfConnection Pointer to HWPerf connection object
+@Input          ui32NumBlocks      Number of elements in the array
+@Input          aeBlockIDs         An array of bytes with values taken from
+                                   the RGX_HWPERF_CNTBLK_ID enumeration.
+@Return         PVRSRV_ERROR       System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfDisableCounters(
+		RGX_HWPERF_CONNECTION *psHWPerfConnection,
+		IMG_UINT32   ui32NumBlocks,
+		IMG_UINT16*  aeBlockIDs);
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfEnableCounters
+@Description    Enable the performance counter block for one or more device
+                layout modules.
+@Input          hDevData      Handle to connection/device object
+@Input          ui32NumBlocks Number of elements in the array
+@Input          aeBlockIDs    An array of bytes with values taken from the
+                              RGX_HWPERF_CNTBLK_ID enumeration.
+@Return         PVRSRV_ERROR  System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfEnableCounters(
+		IMG_HANDLE   hDevData,
+		IMG_UINT32   ui32NumBlocks,
+		IMG_UINT16*  aeBlockIDs);
+
+/******************************************************************************
+ * RGX HW Performance Profiling Retrieval API(s)
+ *
+ * The client must ensure their use of this acquire/release API for a single
+ * connection/stream must not be shared with multiple execution contexts e.g.
+ * between a kernel thread and an ISR handler. It is the client's
+ * responsibility to ensure this API is not interrupted by a high priority
+ * thread/ISR
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfAcquireEvents
+@Description    When there is data available to read this call returns with OK
+                and the address and length of the data buffer the client can
+                safely read. This buffer may contain one or more event packets.
+                When there is no data to read, this call returns with OK and
+                sets *puiBufLen to 0 on exit.
+                Clients must pair this call with a ReleaseEvents call.
+@Input          hDevData     Handle to connection/device object
+@Input          eStreamId    ID of the HWPerf stream
+@Output         ppBuf        Address of a pointer to a byte buffer. On exit it
+                             contains the address of buffer to read from
+@Output         pui32BufLen  Pointer to an integer. On exit it is the size of
+                             the data to read from the buffer
+@Return         PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfAcquireEvents(
+		IMG_HANDLE  hDevData,
+		RGX_HWPERF_STREAM_ID eStreamId,
+		IMG_PBYTE*  ppBuf,
+		IMG_UINT32* pui32BufLen);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfReleaseEvents
+@Description    Called after client has read the event data out of the buffer
+                retrieved from the Acquire Events call to release resources.
+@Input          hDevData     Handle to connection/device object
+@Input          eStreamId    ID of the HWPerf stream
+@Return         PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR RGXHWPerfReleaseEvents(
+		IMG_HANDLE hDevData,
+		RGX_HWPERF_STREAM_ID eStreamId);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfConvertCRTimeStamp
+@Description    Converts the timestamp given by FW events to the common OS
+                timestamp. The first three inputs are obtained via a CLK_SYNC
+                event, ui64CRTimeStamp is the CR timestamp from the FW event
+                to be converted.
+@Input          ui32ClkSpeed        Clock speed given by sync event
+@Input          ui64CorrCRTimeStamp CR Timestamp given by sync event
+@Input          ui64CorrOSTimeStamp Correlating OS Timestamp given by sync
+                                    event
+@Input          ui64CRTimeStamp     CR Timestamp to convert
+@Return         IMG_UINT64          Calculated OS Timestamp
+*/ /**************************************************************************/
+IMG_UINT64 RGXHWPerfConvertCRTimeStamp(
+		IMG_UINT32 ui32ClkSpeed,
+		IMG_UINT64 ui64CorrCRTimeStamp,
+		IMG_UINT64 ui64CorrOSTimeStamp,
+		IMG_UINT64 ui64CRTimeStamp);
+
+#endif /* __RGXAPI_KM_H__ */
+
+/******************************************************************************
+ End of file (rgxapi_km.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxbreakpoint.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxbreakpoint.c
new file mode 100644
index 0000000..acc0ce4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxbreakpoint.c
@@ -0,0 +1,338 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Breakpoint routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Breakpoint routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxbreakpoint.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxmem.h"
+#include "device.h"
+#include "sync_internal.h"
+#include "pdump_km.h"
+#include "pvrsrv.h"
+
+PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA    * psConnection,
+                                      PVRSRV_DEVICE_NODE * psDeviceNode,
+                                      IMG_HANDLE           hMemCtxPrivData,
+                                      RGXFWIF_DM           eFWDataMaster,
+                                      IMG_UINT32           ui32BPAddr,
+                                      IMG_UINT32           ui32HandlerAddr,
+                                      IMG_UINT32           ui32DataMaster)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	DEVMEM_MEMDESC		*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sBPCmd;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psDevInfo->hBPLock);
+#endif
+
+	if (psDevInfo->bBPSet)
+	{
+		eError = PVRSRV_ERROR_BP_ALREADY_SET;
+		goto unlock;
+	}
+
+	sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+	sBPCmd.uCmdData.sBPData.ui32BPAddr = ui32BPAddr;
+	sBPCmd.uCmdData.sBPData.ui32HandlerAddr = ui32HandlerAddr;
+	sBPCmd.uCmdData.sBPData.ui32BPDM = ui32DataMaster;
+	sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_WRITE | RGXFWIF_BPDATA_FLAGS_ENABLE;
+
+	RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext,
+				psFWMemContextMemDesc,
+				0 ,
+				RFW_FWADDR_NOREF_FLAG);
+
+	eError = RGXScheduleCommand(psDevInfo,
+				eFWDataMaster,
+				&sBPCmd,
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+		goto unlock;
+	}
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDevInfo, eFWDataMaster, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXSetBreakpointKM: Wait for completion aborted with error (%u)", eError));
+		goto unlock;
+	}
+
+	psDevInfo->eBPDM = eFWDataMaster;
+	psDevInfo->bBPSet = IMG_TRUE;
+
+unlock:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psDevInfo->hBPLock);
+#endif
+
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(CONNECTION_DATA    * psConnection,
+                                        PVRSRV_DEVICE_NODE * psDeviceNode,
+                                        IMG_HANDLE           hMemCtxPrivData)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	DEVMEM_MEMDESC		*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sBPCmd;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+	sBPCmd.uCmdData.sBPData.ui32BPAddr = 0;
+	sBPCmd.uCmdData.sBPData.ui32HandlerAddr = 0;
+	sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_WRITE | RGXFWIF_BPDATA_FLAGS_CTL;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psDevInfo->hBPLock);
+#endif
+
+	RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext,
+				psFWMemContextMemDesc,
+				0 ,
+				RFW_FWADDR_NOREF_FLAG);
+
+	eError = RGXScheduleCommand(psDevInfo,
+				psDevInfo->eBPDM,
+				&sBPCmd,
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+		goto unlock;
+	}
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDevInfo, psDevInfo->eBPDM, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXClearBreakpointKM: Wait for completion aborted with error (%u)", eError));
+		goto unlock;
+	}
+
+	psDevInfo->bBPSet = IMG_FALSE;
+
+unlock:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psDevInfo->hBPLock);
+#endif
+
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(CONNECTION_DATA    * psConnection,
+                                         PVRSRV_DEVICE_NODE * psDeviceNode,
+                                         IMG_HANDLE           hMemCtxPrivData)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	DEVMEM_MEMDESC		*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sBPCmd;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psDevInfo->hBPLock);
+#endif
+
+	if (psDevInfo->bBPSet == IMG_FALSE)
+	{
+		eError = PVRSRV_ERROR_BP_NOT_SET;
+		goto unlock;
+	}
+
+	sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+	sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_CTL | RGXFWIF_BPDATA_FLAGS_ENABLE;
+
+	RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext,
+				psFWMemContextMemDesc,
+				0 ,
+				RFW_FWADDR_NOREF_FLAG);
+
+	eError = RGXScheduleCommand(psDevInfo,
+				psDevInfo->eBPDM,
+				&sBPCmd,
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXEnableBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+		goto unlock;
+	}
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDevInfo, psDevInfo->eBPDM, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXEnableBreakpointKM: Wait for completion aborted with error (%u)", eError));
+		goto unlock;
+	}
+
+unlock:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psDevInfo->hBPLock);
+#endif
+
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(CONNECTION_DATA    * psConnection,
+                                          PVRSRV_DEVICE_NODE * psDeviceNode,
+                                          IMG_HANDLE           hMemCtxPrivData)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	DEVMEM_MEMDESC		*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sBPCmd;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psDevInfo->hBPLock);
+#endif
+
+	if (psDevInfo->bBPSet == IMG_FALSE)
+	{
+		eError = PVRSRV_ERROR_BP_NOT_SET;
+		goto unlock;
+	}
+
+	sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+	sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_CTL;
+
+	RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext,
+				psFWMemContextMemDesc,
+				0 ,
+				RFW_FWADDR_NOREF_FLAG);
+
+	eError = RGXScheduleCommand(psDevInfo,
+				psDevInfo->eBPDM,
+				&sBPCmd,
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDisableBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+		goto unlock;
+	}
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDevInfo, psDevInfo->eBPDM, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXDisableBreakpointKM: Wait for completion aborted with error (%u)", eError));
+		goto unlock;
+	}
+
+unlock:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psDevInfo->hBPLock);
+#endif
+
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(CONNECTION_DATA    * psConnection,
+                                                PVRSRV_DEVICE_NODE * psDeviceNode,
+                                                IMG_UINT32           ui32TempRegs,
+                                                IMG_UINT32           ui32SharedRegs)
+{
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+#endif
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sBPCmd;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+	sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_REGS;
+	sBPCmd.uCmdData.sBPData.ui32TempRegs = ui32TempRegs;
+	sBPCmd.uCmdData.sBPData.ui32SharedRegs = ui32SharedRegs;
+	sBPCmd.uCmdData.sBPData.psFWMemContext.ui32Addr = 0U;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psDevInfo->hBPLock);
+#endif
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sBPCmd,
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXOverallocateBPRegistersKM: RGXScheduleCommand failed. Error:%u", eError));
+		goto unlock;
+	}
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXOverallocateBPRegistersKM: Wait for completion aborted with error (%u)", eError));
+		goto unlock;
+	}
+
+unlock:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psDevInfo->hBPLock);
+#endif
+
+	return eError;
+}
+
+/******************************************************************************
+ End of file (rgxbreakpoint.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxbreakpoint.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxbreakpoint.h
new file mode 100644
index 0000000..fc66568
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxbreakpoint.h
@@ -0,0 +1,141 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX breakpoint functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX breakpoint functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXBREAKPOINT_H__)
+#define __RGXBREAKPOINT_H__
+
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_km.h"
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXSetBreakpointKM
+
+ @Description
+	Server-side implementation of RGXSetBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input eDataMaster - Data Master to schedule command for
+ @Input hMemCtxPrivData - memory context private data
+ @Input ui32BPAddr - Address of breakpoint
+ @Input ui32HandlerAddr - Address of breakpoint handler
+ @Input ui32BPCtl - Breakpoint controls
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA    * psConnection,
+                                      PVRSRV_DEVICE_NODE * psDeviceNode,
+                                      IMG_HANDLE           hMemCtxPrivData,
+                                      RGXFWIF_DM           eFWDataMaster,
+                                      IMG_UINT32           ui32BPAddr,
+                                      IMG_UINT32           ui32HandlerAddr,
+                                      IMG_UINT32           ui32DataMaster);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXClearBreakpointKM
+
+ @Description
+	Server-side implementation of RGXClearBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input hMemCtxPrivData - memory context private data
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(CONNECTION_DATA    * psConnection,
+                                        PVRSRV_DEVICE_NODE * psDeviceNode,
+                                        IMG_HANDLE           hMemCtxPrivData);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXEnableBreakpointKM
+
+ @Description
+	Server-side implementation of RGXEnableBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input hMemCtxPrivData - memory context private data
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(CONNECTION_DATA    * psConnection,
+                                         PVRSRV_DEVICE_NODE * psDeviceNode,
+                                         IMG_HANDLE           hMemCtxPrivData);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXDisableBreakpointKM
+
+ @Description
+	Server-side implementation of RGXDisableBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input hMemCtxPrivData - memory context private data
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(CONNECTION_DATA    * psConnection,
+                                          PVRSRV_DEVICE_NODE * psDeviceNode,
+                                          IMG_HANDLE           hMemCtxPrivData);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXOverallocateBPRegistersKM
+
+ @Description
+	Server-side implementation of RGXOverallocateBPRegisters
+
+ @Input psDeviceNode - RGX Device node
+ @Input ui32TempRegs - Number of temporary registers to overallocate
+ @Input ui32SharedRegs - Number of shared registers to overallocate
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(CONNECTION_DATA    * psConnection,
+                                                PVRSRV_DEVICE_NODE * psDeviceNode,
+                                                IMG_UINT32           ui32TempRegs,
+                                                IMG_UINT32           ui32SharedRegs);
+#endif /* __RGXBREAKPOINT_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxbvnc.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxbvnc.c
new file mode 100644
index 0000000..d6c42c4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxbvnc.c
@@ -0,0 +1,572 @@
+/*************************************************************************/ /*!
+@File
+@Title          BVNC handling specific routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Functions used for BNVC related work
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "rgxbvnc.h"
+#define _RGXBVNC_C_
+#include "rgx_bvnc_table_km.h"
+#undef _RGXBVNC_C_
+#include "oskm_apphint.h"
+#include "pvrsrv.h"
+
+#define RGXBVNC_BUFFER_SIZE (((PVRSRV_MAX_DEVICES)*(RGX_BVNC_STR_SIZE_MAX))+1)
+
+/* This function searches the given array for a given search value */
+static IMG_UINT64* _RGXSearchBVNCTable( IMG_UINT64 *pui64Array,
+								IMG_UINT uiEnd,
+								IMG_UINT64 ui64SearchValue,
+								IMG_UINT uiRowCount)
+{
+	IMG_UINT uiStart = 0, index;
+	IMG_UINT64 value, *pui64Ptr = NULL;
+
+	while (uiStart < uiEnd)
+	{
+		index = (uiStart + uiEnd)/2;
+		pui64Ptr = pui64Array + (index * uiRowCount);
+		value = *(pui64Ptr);
+
+		if (value == ui64SearchValue)
+		{
+			return pui64Ptr;
+		}
+
+		if (value > ui64SearchValue)
+		{
+			uiEnd = index;
+		}else
+		{
+			uiStart = index + 1;
+		}
+	}
+	return NULL;
+}
+#define RGX_SEARCH_BVNC_TABLE(t, b) (_RGXSearchBVNCTable((IMG_UINT64*)(t), \
+                                ARRAY_SIZE(t), (b), \
+                                sizeof((t)[0])/sizeof(IMG_UINT64)) )
+
+
+#if defined(DEBUG)
+
+#define PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, szShortName, Feature)															\
+	if ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] != RGX_FEATURE_VALUE_DISABLED )			\
+		{ PVR_LOG(("%s %d", szShortName, psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX])); }		\
+	else																\
+		{ PVR_LOG(("%s N/A", szShortName)); }
+
+static void _RGXBvncDumpParsedConfig(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+	IMG_UINT64 ui64Mask = 0, ui32IdOrNameIdx = 1;
+
+	PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NC:       ", NUM_CLUSTERS);
+	PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "CSF:      ", CDM_CONTROL_STREAM_FORMAT);
+	PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "FBCDCA:   ", FBCDC_ARCHITECTURE);
+	PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MCMB:     ", META_COREMEM_BANKS);
+	PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MCMS:     ", META_COREMEM_SIZE);
+	PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MDMACnt:  ", META_DMA_CHANNEL_COUNT);
+	PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NIIP:     ", NUM_ISP_IPP_PIPES);
+	PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "PBW:      ", PHYS_BUS_WIDTH);
+	PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "STEArch:  ", SCALABLE_TE_ARCH);
+	PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SVCEA:    ", SCALABLE_VCE);
+	PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCBanks: ", SLC_BANKS);
+	PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCCLS:   ", SLC_CACHE_LINE_SIZE_BITS);
+	PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCSize:  ", SLC_SIZE_IN_BYTES);
+	PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "VASB:     ", VIRTUAL_ADDRESS_SPACE_BITS);
+	PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "META:     ", META);
+
+#if defined(FEATURE_NO_VALUES_NAMES_MAX_IDX)
+	/* Dump the features with no values */
+	ui64Mask = psDevInfo->sDevFeatureCfg.ui64Features;
+	while (ui64Mask)
+	{
+		if (ui64Mask & 0x01)
+		{
+			if (ui32IdOrNameIdx <= FEATURE_NO_VALUES_NAMES_MAX_IDX)
+			{
+				PVR_LOG(("%s", gaszFeaturesNoValuesNames[ui32IdOrNameIdx - 1]));
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_WARNING,"Feature with Mask doesn't exist: 0x%016" IMG_UINT64_FMTSPECx, ((IMG_UINT64)1 << (ui32IdOrNameIdx - 1))));
+			}
+		}
+		ui64Mask >>= 1;
+		ui32IdOrNameIdx++;
+	}
+#endif
+
+#if defined(ERNSBRNS_IDS_MAX_IDX)
+	/* Dump the ERN and BRN flags for this core */
+	ui64Mask = psDevInfo->sDevFeatureCfg.ui64ErnsBrns;
+	ui32IdOrNameIdx = 1;
+
+	while (ui64Mask)
+	{
+		if (ui64Mask & 0x1)
+		{
+			if (ui32IdOrNameIdx <= ERNSBRNS_IDS_MAX_IDX)
+			{
+				PVR_LOG(("ERN/BRN : %d", gaui64ErnsBrnsIDs[ui32IdOrNameIdx - 1]));
+			}
+			else
+			{
+				PVR_LOG(("Unknown ErnBrn bit: 0x%0" IMG_UINT64_FMTSPECx, ((IMG_UINT64)1 << (ui32IdOrNameIdx - 1))));
+			}
+		}
+		ui64Mask >>= 1;
+		ui32IdOrNameIdx++;
+	}
+#endif
+
+}
+#endif
+
+static void _RGXBvncParseFeatureValues(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT64 ui64PackedValues)
+{
+	IMG_UINT32 ui32Index;
+
+	/* Read the feature values for the runtime BVNC */
+	for (ui32Index = 0; ui32Index < RGX_FEATURE_WITH_VALUES_MAX_IDX; ui32Index++)
+	{
+		IMG_UINT16	ui16ValueIndex = (ui64PackedValues & aui64FeaturesWithValuesBitMasks[ui32Index]) >> aui16FeaturesWithValuesBitPositions[ui32Index];
+
+		if (ui16ValueIndex < gaFeaturesValuesMaxIndexes[ui32Index])
+		{
+			if (gaFeaturesValues[ui32Index][ui16ValueIndex] == (IMG_UINT16)RGX_FEATURE_VALUE_DISABLED)
+			{
+				psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = RGX_FEATURE_VALUE_DISABLED;
+			}
+			else
+			{
+				psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = gaFeaturesValues[ui32Index][ui16ValueIndex];
+			}
+		}
+		else
+		{
+			/* This case should never be reached */
+			psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = RGX_FEATURE_VALUE_INVALID;
+			PVR_DPF((PVR_DBG_ERROR, "%s: Feature with index (%d) decoded wrong value index (%d)", __func__, ui32Index, ui16ValueIndex));
+			PVR_ASSERT(ui16ValueIndex < gaFeaturesValuesMaxIndexes[ui32Index]);
+		}
+	}
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+	{
+		psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_META_IDX] = RGX_FEATURE_VALUE_DISABLED;
+	}
+
+	/* Get the max number of dusts in the core */
+	if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS))
+	{
+		psDevInfo->sDevFeatureCfg.ui32MAXDustCount = MAX(1, (RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) / 2));
+	}
+	else
+	{
+		/* This case should never be reached as all cores have clusters */
+		psDevInfo->sDevFeatureCfg.ui32MAXDustCount = RGX_FEATURE_VALUE_INVALID;
+		PVR_DPF((PVR_DBG_ERROR, "%s: Number of clusters feature value missing!", __func__));
+		PVR_ASSERT(0);
+	}
+
+
+	/* Transform the SLC cacheline size info in bytes */
+	if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_SIZE_IN_BYTES))
+	{
+		psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_SLC_SIZE_IN_BYTES_IDX] *= 1024;
+	}
+
+	/* Transform the META coremem size info in bytes */
+	if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE))
+	{
+		psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_META_COREMEM_SIZE_IDX] *= 1024;
+	}
+}
+
+static void _RGXBvncAcquireAppHint(IMG_CHAR *pszBVNC, const IMG_UINT32 ui32RGXDevCount)
+{
+	IMG_CHAR *pszAppHintDefault = PVRSRV_APPHINT_RGXBVNC;
+	void *pvAppHintState = NULL;
+	IMG_UINT32 ui32BVNCCount = 0;
+	IMG_BOOL bRet;
+	IMG_CHAR szBVNCAppHint[RGXBVNC_BUFFER_SIZE];
+	IMG_CHAR *pszCurrentBVNC = szBVNCAppHint;
+	szBVNCAppHint[0] = '\0';
+
+	OSCreateKMAppHintState(&pvAppHintState);
+
+	bRet = (IMG_BOOL)OSGetKMAppHintSTRING(pvAppHintState,
+						RGXBVNC,
+						&pszAppHintDefault,
+						szBVNCAppHint,
+						sizeof(szBVNCAppHint));
+
+	OSFreeKMAppHintState(pvAppHintState);
+
+	if (!bRet || (szBVNCAppHint[0] == '\0'))
+	{
+		return;
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC module param list: %s",__func__, szBVNCAppHint));
+
+	while (*pszCurrentBVNC != '\0')
+	{
+		IMG_CHAR *pszNext = pszCurrentBVNC;
+
+		if (ui32BVNCCount >= PVRSRV_MAX_DEVICES)
+		{
+			break;
+		}
+
+		while (1)
+		{
+			if (*pszNext == ',')
+			{
+				pszNext[0] = '\0';
+				pszNext++;
+				break;
+			} else if (*pszNext == '\0')
+			{
+				break;
+			}
+			pszNext++;
+		}
+
+		if (ui32BVNCCount == ui32RGXDevCount)
+		{
+			strcpy(pszBVNC, pszCurrentBVNC);
+			return;
+		}
+
+		ui32BVNCCount++;
+		pszCurrentBVNC = pszNext;
+	}
+
+	PVR_DPF((PVR_DBG_ERROR, "%s: Given module parameters list is shorter than "
+	"number of actual devices", __func__));
+
+	/* If only one BVNC parameter is specified, the same is applied for all RGX
+	 * devices detected */
+	if (1 == ui32BVNCCount)
+	{
+		strcpy(pszBVNC, szBVNCAppHint);
+	}
+}
+
+/* Function that parses the BVNC List passed as module parameter */
+static PVRSRV_ERROR _RGXBvncParseList(IMG_UINT32 *pB,
+									  IMG_UINT32 *pV,
+									  IMG_UINT32 *pN,
+									  IMG_UINT32 *pC,
+									  const IMG_UINT32 ui32RGXDevCount)
+{
+	unsigned int ui32ScanCount = 0;
+	IMG_CHAR aszBVNCString[RGX_BVNC_STR_SIZE_MAX];
+
+	aszBVNCString[0] = '\0';
+
+	/* 4 components of a BVNC string is B, V, N & C */
+#define RGX_BVNC_INFO_PARAMS (4)
+
+	_RGXBvncAcquireAppHint(aszBVNCString, ui32RGXDevCount);
+
+	if ('\0' == aszBVNCString[0])
+	{
+		return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+	}
+
+	/* Parse the given RGX_BVNC string */
+	ui32ScanCount = OSVSScanf(aszBVNCString, RGX_BVNC_STR_FMTSPEC, pB, pV, pN, pC);
+	if (RGX_BVNC_INFO_PARAMS != ui32ScanCount)
+	{
+		ui32ScanCount = OSVSScanf(aszBVNCString,RGX_BVNC_STRP_FMTSPEC, pB, pV, pN, pC);
+	}
+	if (RGX_BVNC_INFO_PARAMS != ui32ScanCount)
+	{
+		return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+	}
+	PVR_LOG(("BVNC module parameter honoured: %s", aszBVNCString));
+
+	return PVRSRV_OK;
+}
+
+/* This function detects the Rogue variant and configures the
+ * essential config info associated with such a device.
+ * The config info includes features, errata, etc */
+PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	static IMG_UINT32 ui32RGXDevCnt = 0;
+	PVRSRV_ERROR eError;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	IMG_UINT64 ui64BVNC=0;
+	IMG_UINT32 B=0, V=0, N=0, C=0;
+	IMG_UINT64 *pui64Cfg = NULL;
+
+	/* Check for load time RGX BVNC parameter */
+	eError = _RGXBvncParseList(&B,&V,&N,&C, ui32RGXDevCnt);
+	if (PVRSRV_OK == eError)
+	{
+		PVR_LOG(("Read BVNC " RGX_BVNC_STR_FMTSPEC \
+				" from driver load parameter", B, V, N, C));
+
+		/* Extract the BVNC config from the Features table */
+		ui64BVNC = BVNC_PACK(B,0,N,C);
+		pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC);
+		PVR_LOG_IF_FALSE((pui64Cfg != NULL), "Driver parameter BVNC configuration not found!");
+	}
+
+	{
+		void *pvAppHintState = NULL;
+		IMG_BOOL bAppHintDefault = PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC;
+
+		OSCreateKMAppHintState(&pvAppHintState);
+		OSGetKMAppHintBOOL(pvAppHintState,
+							IgnoreHWReportedBVNC,
+							&bAppHintDefault,
+							&psDevInfo->bIgnoreHWReportedBVNC);
+		OSFreeKMAppHintState(pvAppHintState);
+	}
+
+#if !defined(NO_HARDWARE) && defined(SUPPORT_MULTIBVNC_RUNTIME_BVNC_ACQUISITION)
+
+	/* Try to detect the RGX BVNC from the HW device */
+	if ((NULL == pui64Cfg) && !psDevInfo->bIgnoreHWReportedBVNC)
+	{
+		IMG_UINT64 ui32ID;
+		IMG_HANDLE hSysData;
+
+		hSysData = psDeviceNode->psDevConfig->hSysData;
+
+		/* Power-up the device as required to read the registers */
+		if (psDeviceNode->psDevConfig->pfnPrePowerState)
+		{
+			eError = psDeviceNode->psDevConfig->pfnPrePowerState(hSysData, PVRSRV_DEV_POWER_STATE_ON,
+					PVRSRV_DEV_POWER_STATE_OFF, IMG_FALSE);
+			PVR_LOGR_IF_ERROR(eError, "pfnPrePowerState ON");
+		}
+
+		if (psDeviceNode->psDevConfig->pfnPostPowerState)
+		{
+			eError = psDeviceNode->psDevConfig->pfnPostPowerState(hSysData, PVRSRV_DEV_POWER_STATE_ON,
+					PVRSRV_DEV_POWER_STATE_OFF, IMG_FALSE);
+			PVR_LOGR_IF_ERROR(eError, "pfnPostPowerState ON");
+		}
+
+		/* Read the BVNC, in to new way first, if B not set, use old scheme */
+		ui32ID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID__PBVNC);
+
+		if (GET_B(ui32ID))
+		{
+			B = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK) >>
+													RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT;
+			V = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK) >>
+													RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT;
+			N = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK) >>
+													RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT;
+			C = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK) >>
+													RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT;
+
+		}
+		else
+		{
+			IMG_UINT64 ui32CoreID, ui32CoreRev;
+			ui32CoreRev = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_REVISION);
+			ui32CoreID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID);
+			B = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MAJOR_CLRMSK) >>
+													RGX_CR_CORE_REVISION_MAJOR_SHIFT;
+			V = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MINOR_CLRMSK) >>
+													RGX_CR_CORE_REVISION_MINOR_SHIFT;
+			N = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_N_CLRMSK) >>
+													RGX_CR_CORE_ID_CONFIG_N_SHIFT;
+			C = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_C_CLRMSK) >>
+													RGX_CR_CORE_ID_CONFIG_C_SHIFT;
+		}
+		PVR_LOG(("Read BVNC " RGX_BVNC_STR_FMTSPEC \
+				" from HW device registers",	B, V, N, C));
+
+		/* Power-down the device */
+		if (psDeviceNode->psDevConfig->pfnPrePowerState)
+		{
+			eError = psDeviceNode->psDevConfig->pfnPrePowerState(hSysData, PVRSRV_DEV_POWER_STATE_OFF,
+					PVRSRV_DEV_POWER_STATE_ON, IMG_FALSE);
+			PVR_LOGR_IF_ERROR(eError, "pfnPrePowerState OFF");
+		}
+
+		if (psDeviceNode->psDevConfig->pfnPostPowerState)
+		{
+			eError = psDeviceNode->psDevConfig->pfnPostPowerState(hSysData, PVRSRV_DEV_POWER_STATE_OFF,
+					PVRSRV_DEV_POWER_STATE_ON, IMG_FALSE);
+			PVR_LOGR_IF_ERROR(eError, "pfnPostPowerState OFF");
+		}
+
+		/* Extract the BVNC config from the Features table */
+		ui64BVNC = BVNC_PACK(B,0,N,C);
+		if (ui64BVNC != 0)
+		{
+			pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC);
+			PVR_LOG_IF_FALSE((pui64Cfg != NULL), "HW device BVNC configuration not found!");
+		}
+		else if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+		{
+			/*
+			 * On host OS we should not get here as CORE_ID should not be zero, so flag an error.
+			 * On older cores, guest OS only has CORE_ID if defined(RGX_FEATURE_COREID_PER_OS)
+			 */
+			PVR_LOG_ERROR(PVRSRV_ERROR_DEVICE_REGISTER_FAILED, "CORE_ID register returns zero. Unknown BVNC");
+		}
+	}
+#endif
+
+	if (NULL == pui64Cfg)
+	{
+		/* We reach here if the HW is not present,
+		 * or we are running in a guest OS with no COREID_PER_OS feature,
+		 * or HW is unstable during register read giving invalid values,
+		 * or runtime detection has been disabled - fall back to compile time BVNC
+		 */
+		B = RGX_BVNC_KM_B;
+		N = RGX_BVNC_KM_N;
+		C = RGX_BVNC_KM_C;
+		{
+			IMG_UINT32	ui32ScanCount = 0;
+			ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%u", &V);
+			if (1 != ui32ScanCount)
+			{
+				ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%up", &V);
+				if (1 != ui32ScanCount)
+				{
+					V = 0;
+				}
+			}
+		}
+		PVR_LOG(("Reverting to compile time BVNC %s", RGX_BVNC_KM));
+
+		/* Extract the BVNC config from the Features table */
+		ui64BVNC = BVNC_PACK(B,0,N,C);
+		pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC);
+		PVR_LOG_IF_FALSE((pui64Cfg != NULL), "Compile time BVNC configuration not found!");
+	}
+
+	/* Have we failed to identify the BVNC to use? */
+	if (NULL == pui64Cfg)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: BVNC Detection and feature lookup failed. "
+		    "Unsupported BVNC: 0x%016" IMG_UINT64_FMTSPECx, __func__, ui64BVNC));
+		return PVRSRV_ERROR_BVNC_UNSUPPORTED;
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC Feature found config: 0x%016"
+	    IMG_UINT64_FMTSPECx " 0x%016" IMG_UINT64_FMTSPECx " 0x%016"
+	    IMG_UINT64_FMTSPECx "\n",__func__, pui64Cfg[0], pui64Cfg[1],
+	    pui64Cfg[2]));
+
+	/* Parsing feature config depends on available features on the core
+	 * hence this parsing should always follow the above feature assignment */
+	psDevInfo->sDevFeatureCfg.ui64Features = pui64Cfg[1];
+	_RGXBvncParseFeatureValues(psDevInfo, pui64Cfg[2]);
+
+	/* Add 'V' to the packed BVNC value to get the BVNC ERN and BRN config. */
+	ui64BVNC = BVNC_PACK(B,V,N,C);
+	pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaErnsBrns, ui64BVNC);
+	if (NULL == pui64Cfg)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: BVNC ERN/BRN lookup failed. "
+		    "Unsupported BVNC: 0x%016" IMG_UINT64_FMTSPECx,	__func__, ui64BVNC));
+		psDevInfo->sDevFeatureCfg.ui64ErnsBrns = 0;
+		return PVRSRV_ERROR_BVNC_UNSUPPORTED;
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC ERN/BRN Cfg: 0x%016" IMG_UINT64_FMTSPECx
+	    " 0x%016" IMG_UINT64_FMTSPECx, __func__, *pui64Cfg, pui64Cfg[1]));
+	psDevInfo->sDevFeatureCfg.ui64ErnsBrns = pui64Cfg[1];
+
+	psDevInfo->sDevFeatureCfg.ui32B = B;
+	psDevInfo->sDevFeatureCfg.ui32V = V;
+	psDevInfo->sDevFeatureCfg.ui32N = N;
+	psDevInfo->sDevFeatureCfg.ui32C = C;
+
+	/* Message to confirm configuration look up was a success */
+	PVR_LOG(("RGX Device registered with BVNC " RGX_BVNC_STR_FMTSPEC, \
+			B, V, N, C));
+
+	ui32RGXDevCnt++;
+
+#if defined(DEBUG)
+	_RGXBvncDumpParsedConfig(psDeviceNode);
+#endif
+	return PVRSRV_OK;
+}
+
+/*
+ * This function checks if a particular feature is available on the given rgx device */
+IMG_BOOL RGXBvncCheckFeatureSupported(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64FeatureMask)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	/* FIXME: need to implement a bounds check for passed feature mask */
+	if (psDevInfo->sDevFeatureCfg.ui64Features & ui64FeatureMask)
+	{
+		return IMG_TRUE;
+	}
+	return IMG_FALSE;
+}
+
+/*
+ * This function returns the value of a feature on the given rgx device */
+IMG_INT32 RGXBvncGetSupportedFeatureValue(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_FEATURE_WITH_VALUE_INDEX eFeatureIndex)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	/*FIXME: need to implement a bounds check for passed feature mask */
+
+	if (eFeatureIndex >= RGX_FEATURE_WITH_VALUES_MAX_IDX)
+	{
+		return -1;
+	}
+
+	if (psDevInfo->sDevFeatureCfg.ui32FeaturesValues[eFeatureIndex] == RGX_FEATURE_VALUE_DISABLED)
+	{
+		return -1;
+	}
+
+	return psDevInfo->sDevFeatureCfg.ui32FeaturesValues[eFeatureIndex];
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxbvnc.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxbvnc.h
new file mode 100644
index 0000000..8f419c1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxbvnc.h
@@ -0,0 +1,80 @@
+/*************************************************************************/ /*!
+@File
+@Title          BVNC handling specific header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the BVNC related work
+                (see hwdefs/km/rgx_bvnc_table_km.h and
+                hwdefs/km/rgx_bvnc_defs_km.h
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXBVNC_H__)
+#define __RGXBVNC_H__
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "rgxdevice.h"
+
+/*************************************************************************/ /*!
+@brief		This function detects the Rogue variant and configures the
+			essential config info associated with such a device.
+			The config info includes features, errata, etc
+@param		psDeviceNode - Device Node pointer
+@return		PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*************************************************************************/ /*!
+@brief		This function checks if a particular feature is available on
+			the given rgx device
+@param		psDeviceNode - Device Node pointer
+@param		ui64FeatureMask - feature to be checked
+@return		true if feature is supported, false otherwise
+*/ /**************************************************************************/
+IMG_BOOL RGXBvncCheckFeatureSupported(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64FeatureMask);
+
+/*************************************************************************/ /*!
+@brief		This function returns the value of a feature on the given
+			rgx device
+@param		psDeviceNode - Device Node pointer
+@param		ui64FeatureMask - feature for which to return the value
+@return		the value for the specified feature
+*/ /**************************************************************************/
+IMG_INT32 RGXBvncGetSupportedFeatureValue(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_FEATURE_WITH_VALUE_INDEX eFeatureIndex);
+
+#endif /* __RGXBVNC_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxccb.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxccb.c
new file mode 100644
index 0000000..fb0d898
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxccb.c
@@ -0,0 +1,2580 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX CCB routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX CCB routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debug.h"
+#include "rgxdevice.h"
+#include "pdump_km.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "rgxfwutils.h"
+#include "osfunc.h"
+#include "rgxccb.h"
+#include "rgx_memallocflags.h"
+#include "devicemem_pdump.h"
+#include "dllist.h"
+#if defined(LINUX)
+#include "trace_events.h"
+#endif
+#include "sync_checkpoint_external.h"
+#include "sync_checkpoint.h"
+#include "rgxutils.h"
+#include "info_page.h"
+
+/*
+ * Defines the number of fence updates to record so that future fences in the
+ * CCB. Can be checked to see if they are already known to be satisfied.
+ */
+#define RGX_CCCB_FENCE_UPDATE_LIST_SIZE  (32)
+
+#define RGX_UFO_PTR_ADDR(ufoptr) \
+	(((ufoptr)->puiAddrUFO.ui32Addr) & 0xFFFFFFFC)
+
+#define GET_CCB_SPACE(WOff, ROff, CCBSize) \
+	((((ROff) - (WOff)) + ((CCBSize) - 1)) & ((CCBSize) - 1))
+
+#define UPDATE_CCB_OFFSET(Off, PacketSize, CCBSize) \
+	(Off) = (((Off) + (PacketSize)) & ((CCBSize) - 1))
+
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+
+#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD 0x1
+#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED 0x2
+
+typedef struct _RGX_CLIENT_CCB_UTILISATION_
+{
+	/* the threshold in bytes.
+	 * when the CCB utilisation hits the threshold then we will print
+	 * a warning message.
+	 */
+	IMG_UINT32 ui32ThresholdBytes;
+	/* Maximum cCCB usage at some point in time */
+	IMG_UINT32 ui32HighWaterMark;
+	/* keep track of the warnings already printed.
+	 * bit mask of PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_xyz
+	 */
+	IMG_UINT32 ui32Warnings;
+	/* Keep track how many times CCB was full.
+	 * Counters are reset after every grow.
+	 */
+	IMG_UINT32 ui32CCBFull;
+	IMG_UINT32 ui32CCBAcquired;
+} RGX_CLIENT_CCB_UTILISATION;
+
+#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */
+
+struct _RGX_CLIENT_CCB_ {
+	volatile RGXFWIF_CCCB_CTL	*psClientCCBCtrl;				/*!< CPU mapping of the CCB control structure used by the fw */
+	IMG_UINT8					*pui8ClientCCB;					/*!< CPU mapping of the CCB */
+	DEVMEM_MEMDESC 				*psClientCCBMemDesc;			/*!< MemDesc for the CCB */
+	DEVMEM_MEMDESC 				*psClientCCBCtrlMemDesc;		/*!< MemDesc for the CCB control */
+	IMG_UINT32					ui32HostWriteOffset;			/*!< CCB write offset from the driver side */
+	IMG_UINT32					ui32LastPDumpWriteOffset;		/*!< CCB write offset from the last time we submitted a command in capture range */
+	IMG_UINT32					ui32FinishedPDumpWriteOffset;	/*!< Trails LastPDumpWriteOffset for last finished command, used for HW CB driven DMs */
+	IMG_UINT32					ui32LastROff;					/*!< Last CCB Read offset to help detect any CCB wedge */
+	IMG_UINT32					ui32LastWOff;					/*!< Last CCB Write offset to help detect any CCB wedge */
+	IMG_UINT32					ui32ByteCount;					/*!< Count of the number of bytes written to CCCB */
+	IMG_UINT32					ui32LastByteCount;				/*!< Last value of ui32ByteCount to help detect any CCB wedge */
+	IMG_UINT32					ui32Size;						/*!< Size of the CCB */
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+	IMG_UINT32					ui32VirtualAllocSize;			/*!< Virtual size of the CCB */
+	IMG_PUINT32					pui32MappingTable;				/*!< Mapping table for sparse allocation of the CCB */
+#endif
+	DLLIST_NODE					sNode;							/*!< Node used to store this CCB on the per connection list */
+	PDUMP_CONNECTION_DATA		*psPDumpConnectionData;			/*!< Pointer to the per connection data in which we reside */
+	void						*hTransition;					/*!< Handle for Transition callback */
+	IMG_CHAR					szName[MAX_CLIENT_CCB_NAME];	/*!< Name of this client CCB */
+	RGX_SERVER_COMMON_CONTEXT	*psServerCommonContext;			/*!< Parent server common context that this CCB belongs to */
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+	RGX_CCB_REQUESTOR_TYPE		eRGXCCBRequestor;
+	RGX_CLIENT_CCB_UTILISATION	sUtilisation;					/*!< CCB utilisation data */
+#endif
+#if defined(DEBUG)
+	IMG_UINT32					ui32UpdateEntries;				/*!< Number of Fence Updates in asFenceUpdateList */
+	RGXFWIF_UFO					asFenceUpdateList[RGX_CCCB_FENCE_UPDATE_LIST_SIZE];  /*!< List of recent updates written in this CCB */
+#endif
+    IMG_UINT32                  ui32CCBFlags;                   /*!< Bitmask for various flags relating to CCB. Bit defines in rgxccb.h */
+};
+
+/* Forms a table, with array of strings for each requestor type (listed in RGX_CCB_REQUESTORS X macro), to be used for
+   DevMemAllocation comments and PDump comments. Each tuple in the table consists of 3 strings:
+	{ "FwClientCCB:" <requestor_name>, "FwClientCCBControl:" <requestor_name>, <requestor_name> },
+   The first string being used as comment when allocating ClientCCB for the given requestor, the second for CCBControl
+   structure, and the 3rd one for use in PDUMP comments. The number of tuples in the table must adhere to the following
+   build assert. */
+IMG_CHAR *const aszCCBRequestors[][3] =
+{
+#define REQUESTOR_STRING(prefix,req) #prefix ":" #req
+#define FORM_REQUESTOR_TUPLE(req) { REQUESTOR_STRING(FwClientCCB,req), REQUESTOR_STRING(FwClientCCBControl,req), #req },
+	RGX_CCB_REQUESTORS(FORM_REQUESTOR_TUPLE)
+#undef FORM_REQUESTOR_TUPLE
+};
+
+PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB,
+						IMG_UINT32 ui32PDumpFlags)
+{
+
+	IMG_UINT32 ui32PollOffset;
+
+	if (BIT_ISSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN))
+	{
+		/* Draining CCB on a command that hasn't finished, and FW isn't expected
+		 * to have updated Roff up to Woff. Only drain to the first
+		 * finished command prior to this. The Roff for this
+		 * is stored in ui32FinishedPDumpWriteOffset.
+		 */
+		ui32PollOffset = psClientCCB->ui32FinishedPDumpWriteOffset;
+
+		PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+							  "cCCB(%s@%p): Draining open CCB rgxfw_roff < woff (%d)",
+							  psClientCCB->szName,
+							  psClientCCB,
+							  ui32PollOffset);
+	}
+	else
+	{
+		/* Command to a finished CCB stream and FW is drained to empty
+		 * out remaining commands until R==W.
+		 */
+		ui32PollOffset = psClientCCB->ui32LastPDumpWriteOffset;
+
+		PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+							  "cCCB(%s@%p): Draining CCB rgxfw_roff == woff (%d)",
+							  psClientCCB->szName,
+							  psClientCCB,
+							  ui32PollOffset);
+	}
+
+	return DevmemPDumpDevmemPol32(psClientCCB->psClientCCBCtrlMemDesc,
+									offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+									ui32PollOffset,
+									0xffffffff,
+									PDUMP_POLL_OPERATOR_EQUAL,
+									ui32PDumpFlags);
+}
+
+/******************************************************************************
+ FUNCTION	: RGXCCBPDumpSyncCCB
+
+ PURPOSE	: Synchronise Client CCBs from both live and playback contexts.
+       		   Waits for live-FW to empty live-CCB.
+        	   Waits for sim-FW to empty sim-CCB by adding POL
+
+ PARAMETERS	: psClientCCB		- The client CCB
+			  ui32PDumpFlags    - PDump flags
+
+ RETURNS	: PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXCCBPDumpSyncCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+
+	/* Wait for the live FW to catch up/empty CCB. This is done by returning
+	 * retry which will get pushed back out to Services client where it
+	 * waits on the event object and then resubmits the command.
+	 */
+	if (psClientCCB->psClientCCBCtrl->ui32ReadOffset != psClientCCB->ui32HostWriteOffset)
+	{
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	/* Wait for the sim FW to catch up/empty sim CCB.
+	 * We drain whenever capture range is entered, even if no commands
+	 * have been issued on this CCB when out of capture range. We have to
+	 * wait for commands that might have been issued in the last capture
+	 * range to finish so the connection's sync block snapshot dumped after
+	 * all the PDumpTransition callbacks have been execute doesn't clobber
+	 * syncs which the sim FW is currently working on.
+	 *
+	 * Although this is sub-optimal for play-back - while out of capture
+	 * range for every continuous operation we synchronise the sim
+	 * play-back processing the script and the sim FW, there is no easy
+	 * solution. Not all modules that work with syncs register a
+	 * PDumpTransition callback and thus we have no way of knowing if we
+	 * can skip this sim CCB drain and sync block dump or not.
+	 */
+
+	eError = RGXCCBPDumpDrainCCB(psClientCCB, ui32PDumpFlags);
+	PVR_LOG_IF_ERROR(eError, "RGXCCBPDumpDrainCCB");
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Live CCB and simulation CCB now empty, FW idle on CCB in both
+	 * contexts.
+	 */
+	return PVRSRV_OK;
+}
+
+/******************************************************************************
+ FUNCTION	: RGXCCBPDumpFastForwardCCB
+
+ PURPOSE	: Fast-forward sim-CCB and live-CCB offsets to live app-thread
+        	  values.
+        	   This helps to skip any commands submitted when out of capture
+        	   range and start with first command in capture range in both
+        	   live and playback contexts. In case of Block mode, this helps
+        	   to playback any intermediate PDump block directly after first
+        	   block.
+
+
+ PARAMETERS	: psClientCCB		- The client CCB
+			  ui32PDumpFlags    - PDump flags
+
+ RETURNS	: void
+******************************************************************************/
+static void RGXCCBPDumpFastForwardCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32PDumpFlags)
+{
+	volatile RGXFWIF_CCCB_CTL *psCCBCtl = psClientCCB->psClientCCBCtrl;
+
+	/* Make sure that we have synced live-FW and live-App threads */
+	PVR_ASSERT(psCCBCtl->ui32ReadOffset == psClientCCB->ui32HostWriteOffset);
+
+	psCCBCtl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset;
+	psCCBCtl->ui32DepOffset = psClientCCB->ui32HostWriteOffset;
+	psCCBCtl->ui32WriteOffset = psClientCCB->ui32HostWriteOffset;
+
+	PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+			"cCCB(%s@%p): Fast-forward from %d to %d",
+			psClientCCB->szName,
+			psClientCCB,
+			psClientCCB->ui32LastPDumpWriteOffset,
+			psClientCCB->ui32HostWriteOffset);
+
+	DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc,
+			0,
+			sizeof(RGXFWIF_CCCB_CTL),
+			ui32PDumpFlags);
+
+	/* Although we've entered capture range for this process connection
+	 * we might not do any work	on this CCB so update the
+	 * ui32LastPDumpWriteOffset to reflect where we got to for next
+	 * time so we start the drain from where we got to last time.
+	 */
+	psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset;
+
+}
+
+static PVRSRV_ERROR _RGXCCBPDumpTransition(void *pvData, void *pvDevice, PDUMP_TRANSITION_EVENT eEvent, IMG_UINT32 ui32PDumpFlags)
+{
+	RGX_CLIENT_CCB *psClientCCB = (RGX_CLIENT_CCB *) pvData;
+#if defined(PDUMP)
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) pvDevice;
+#endif
+	PVRSRV_ERROR eError;
+
+	/* Block mode:
+	 * Here is block structure at transition (ui32BlockLength=N frames):
+	 *
+	 * ...
+	 * ...
+	 * PDUMP_BLOCK_START_0x0000000x{
+	 *   <Fast-forward sim-CCCB>
+	 *   <Re-dump SyncBlocks>
+	 *   ...
+	 *   ...
+	 *   ... (N frames data)
+	 *   ...
+	 *   ...
+	 *   <(1) Drain sim-KCCB>                     ''|
+	 *   <(2) Sync live and sim CCCB>               |
+	 * }PDUMP_BLOCK_END_0x0000000x                  | <- BlockTransition Steps
+	 *   <(3) Split MAIN and BLOCK stream script>   |
+	 * PDUMP_BLOCK_START_0x0000000y{                |
+	 *   <(4) Fast-forward sim-CCCB>                |
+	 *   <(5) Re-dump SyncBlocks>                 ,,|
+	 *   ...
+	 *   ...
+	 *   ... (N frames data)
+	 *   ...
+	 *   ...
+	 *   <Drain sim-KCCB>
+	 *   <Sync live and sim CCCB>
+	 * }PDUMP_BLOCK_END_0x0000000y
+	 * ...
+	 * ...
+	 *
+	 * Steps (3) and (5) are done in pdump_server.c
+	 * */
+	switch(eEvent)
+	{
+		case PDUMP_TRANSITION_EVENT_RANGE_ENTERED:
+			{
+				/* We're about to transition into capture range and we've submitted
+				 * new commands since the last time we entered capture range so drain
+				 * the live CCB and simulation (sim) CCB as required, i.e. leave CCB
+				 * idle in both live and sim contexts.
+				 * This requires the host driver to ensure the live FW & the sim FW
+				 * have both emptied out the remaining commands until R==W (CCB empty).
+				 */
+
+				eError = RGXCCBPDumpSyncCCB(psClientCCB, ui32PDumpFlags);
+				PVR_RETURN_IF_ERROR(eError);
+
+				if (psClientCCB->ui32LastPDumpWriteOffset != psClientCCB->ui32HostWriteOffset)
+				{
+					/* If new commands have been written when out of capture range in
+					 * the live CCB then we need to fast forward the sim CCBCtl
+					 * offsets past uncaptured commands. This is done by PDUMPing
+					 * the CCBCtl memory to align sim values with the live CCBCtl
+					 * values. Both live and sim FWs can start with the 1st command
+					 * which is in the new capture range.
+					 */
+					RGXCCBPDumpFastForwardCCB(psClientCCB, ui32PDumpFlags);
+				}
+				break;
+			}
+		case PDUMP_TRANSITION_EVENT_RANGE_EXITED:
+			{
+				/* Nothing to do */
+				break;
+			}
+		case PDUMP_TRANSITION_EVENT_BLOCK_FINISHED:
+			{
+				/* (1) Drain KCCB from current block before starting new:
+				 *
+				 * At playback, this will ensure that sim-FW drains all commands in KCCB
+				 * belongs to current block before 'jumping' to any future commands (from
+				 * next block). This will synchronise script-thread and sim-FW thread KCCBs
+				 * at end of each pdump block.
+				 *
+				 * This will additionally force redump of KCCBCtl structure at start of next/new block.
+				 * */
+
+#if defined(PDUMP)
+				eError = RGXPdumpDrainKCCB(psDevInfo, psDevInfo->psKernelCCBCtl->ui32WriteOffset);
+				PVR_LOGR_IF_ERROR(eError, "RGXPdumpDrainKCCB");
+#endif
+
+				/* (2) Synchronise Client CCBs from live and playback contexts before starting new block:
+				 *
+				 * This operation will,
+				 * a. Force synchronisation between app-thread and live-FW thread (i.e. Wait
+				 *    for live-FW to empty live Client CCB).
+				 *
+				 * b. Next, it will dump poll command to drain Client CCB at end of every
+				 *    pdump block. At playback time this will synchronise sim-FW and
+				 *    script-thread Client CCBs at end of each block.
+				 *
+				 * This is to ensure that all commands in CCB from current block are processed
+				 * before moving on to future commands.
+				 * */
+
+				eError = RGXCCBPDumpSyncCCB(psClientCCB, ui32PDumpFlags);
+				PVR_RETURN_IF_ERROR(eError);
+				break;
+			}
+		case PDUMP_TRANSITION_EVENT_BLOCK_STARTED:
+			{
+				/* (4) Fast-forward CCB write offsets to current live values:
+				 *
+				 * We have already synchronised live-FW and app-thread above at end of each
+				 * block (in Step 2a above), now fast-forward Client CCBCtl write offsets to that of
+				 * current app-thread values at start of every block. This will allow us to
+				 * skip any intermediate pdump blocks and start with last (or any next) block
+				 * immediately after first pdump block.
+				 * */
+
+				RGXCCBPDumpFastForwardCCB(psClientCCB, ui32PDumpFlags);
+				break;
+			}
+		case PDUMP_TRANSITION_EVENT_NONE:
+			/* Invalid event for transition */
+		default:
+			{
+				/* Unknown Transition event */
+				return PVRSRV_ERROR_INVALID_PARAMS;
+			}
+	}
+	return PVRSRV_OK;
+}
+
+#if defined (PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+
+static INLINE void _RGXInitCCBUtilisation(RGX_CLIENT_CCB *psClientCCB)
+{
+	psClientCCB->sUtilisation.ui32HighWaterMark = 0; /* initialize ui32HighWaterMark level to zero */
+	psClientCCB->sUtilisation.ui32ThresholdBytes = (psClientCCB->ui32Size *
+							PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD)	/ 100;
+	psClientCCB->sUtilisation.ui32Warnings = 0;
+	psClientCCB->sUtilisation.ui32CCBAcquired = 0;
+	psClientCCB->sUtilisation.ui32CCBFull = 0;
+}
+
+static INLINE void _RGXPrintCCBUtilisationWarning(RGX_CLIENT_CCB *psClientCCB,
+									IMG_UINT32 ui32WarningType,
+									IMG_UINT32 ui32CmdSize)
+{
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE)
+	if (ui32WarningType == PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED)
+	{
+		PVR_LOG(("Failed to acquire CCB space for %u byte command:", ui32CmdSize));
+	}
+
+	PVR_LOG(("%s: Client CCB (%s) watermark (%u) hit %d%% of its allocation size (%u)",
+								__func__,
+								psClientCCB->szName,
+								psClientCCB->sUtilisation.ui32HighWaterMark,
+								psClientCCB->sUtilisation.ui32HighWaterMark * 100 / psClientCCB->ui32Size,
+								psClientCCB->ui32Size));
+#else
+	PVR_UNREFERENCED_PARAMETER(ui32WarningType);
+	PVR_UNREFERENCED_PARAMETER(ui32CmdSize);
+
+	PVR_LOG(("GPU %s command buffer usage high (%u). This is not an error but the application may not run optimally.",
+							aszCCBRequestors[psClientCCB->eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+							psClientCCB->sUtilisation.ui32HighWaterMark * 100 / psClientCCB->ui32Size));
+#endif
+}
+
+static INLINE void _RGXCCBUtilisationEvent(RGX_CLIENT_CCB *psClientCCB,
+						IMG_UINT32 ui32WarningType,
+						IMG_UINT32 ui32CmdSize)
+{
+	/* in VERBOSE mode we will print a message for each different
+	 * event type as they happen.
+	 * but by default we will only issue one message
+	 */
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE)
+	if (!(psClientCCB->sUtilisation.ui32Warnings & ui32WarningType))
+#else
+	if (!psClientCCB->sUtilisation.ui32Warnings)
+#endif
+	{
+		_RGXPrintCCBUtilisationWarning(psClientCCB,
+						ui32WarningType,
+						ui32CmdSize);
+		/* record that we have issued a warning of this type */
+		psClientCCB->sUtilisation.ui32Warnings |= ui32WarningType;
+	}
+}
+
+/* Check the current CCB utilisation. Print a one-time warning message if it is above the
+ * specified threshold
+ */
+static INLINE void _RGXCheckCCBUtilisation(RGX_CLIENT_CCB *psClientCCB)
+{
+	/* Print a warning message if the cCCB watermark is above the threshold value */
+	if (psClientCCB->sUtilisation.ui32HighWaterMark >= psClientCCB->sUtilisation.ui32ThresholdBytes)
+	{
+		_RGXCCBUtilisationEvent(psClientCCB,
+					PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD,
+					0);
+	}
+}
+
+/* Update the cCCB high watermark level if necessary */
+static void _RGXUpdateCCBUtilisation(RGX_CLIENT_CCB *psClientCCB)
+{
+	IMG_UINT32 ui32FreeSpace, ui32MemCurrentUsage;
+
+	ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+									  psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+									  psClientCCB->ui32Size);
+	ui32MemCurrentUsage = psClientCCB->ui32Size - ui32FreeSpace;
+
+	if (ui32MemCurrentUsage > psClientCCB->sUtilisation.ui32HighWaterMark)
+	{
+		psClientCCB->sUtilisation.ui32HighWaterMark = ui32MemCurrentUsage;
+
+		/* The high water mark has increased. Check if it is above the
+		 * threshold so we can print a warning if necessary.
+		 */
+		_RGXCheckCCBUtilisation(psClientCCB);
+	}
+}
+
+#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */
+
+PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO	*psDevInfo,
+						  IMG_UINT32			ui32CCBSizeLog2,
+						  IMG_UINT32			ui32CCBMaxSizeLog2,
+						  CONNECTION_DATA		*psConnectionData,
+						  RGX_CCB_REQUESTOR_TYPE		eRGXCCBRequestor,
+						  RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+						  RGX_CLIENT_CCB		**ppsClientCCB,
+						  DEVMEM_MEMDESC 		**ppsClientCCBMemDesc,
+						  DEVMEM_MEMDESC 		**ppsClientCCBCtrlMemDesc)
+{
+	PVRSRV_ERROR	eError;
+	DEVMEM_FLAGS_T	uiClientCCBMemAllocFlags, uiClientCCBCtlMemAllocFlags;
+	IMG_UINT32		ui32AllocSize = (1U << ui32CCBSizeLog2);
+	RGX_CLIENT_CCB	*psClientCCB;
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+	IMG_UINT32		ui32FWLog2PageSize = DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap);
+	IMG_UINT32		ui32FWPageSize = (1U << ui32FWLog2PageSize);
+	IMG_UINT32		ui32NumPages = ui32AllocSize / ui32FWPageSize;
+	IMG_UINT32		ui32VirtualAllocSize = (1U << ui32CCBMaxSizeLog2);
+	IMG_UINT32		ui32NumVirtPages = ui32VirtualAllocSize / ui32FWPageSize;
+	IMG_UINT32		i;
+#else
+	PVR_UNREFERENCED_PARAMETER(ui32CCBMaxSizeLog2);
+#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */
+
+	/* All client CCBs should be at-least of the "minimum" size and not to exceed "maximum" */
+	if ((ui32CCBSizeLog2 < MIN_SAFE_CCB_SIZE_LOG2) ||
+		(ui32CCBSizeLog2 > MAX_SAFE_CCB_SIZE_LOG2))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXCreateCCBKM: %s CCB size is invalid (%d). Should be from %d to %d",
+			aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], ui32CCBSizeLog2, MIN_SAFE_CCB_SIZE_LOG2, MAX_SAFE_CCB_SIZE_LOG2));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+	if ((ui32CCBMaxSizeLog2 < ui32CCBSizeLog2) ||
+		(ui32CCBMaxSizeLog2 > MAX_SAFE_CCB_SIZE_LOG2))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXCreateCCBKM: %s CCB maximum size is invalid (%d). Should be from %d to %d",
+			aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], ui32CCBMaxSizeLog2, ui32CCBSizeLog2, MAX_SAFE_CCB_SIZE_LOG2));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+#endif
+
+	psClientCCB = OSAllocMem(sizeof(*psClientCCB));
+	if (psClientCCB == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+	psClientCCB->psServerCommonContext = psServerCommonContext;
+
+	uiClientCCBMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+								PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+								PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+								PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+								PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+								PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+								PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+	uiClientCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+								PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+								PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+								PVRSRV_MEMALLOCFLAG_UNCACHED |
+								PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+								PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+	/* If connection data indicates Sync Lockup Recovery (SLR) should be disabled,
+	 * indicate this in ui32CCBFlags.
+	 */
+	if (psConnectionData->ui32ClientFlags & SRV_FLAGS_CLIENT_SLR_DISABLED)
+	{
+		BIT_SET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED);
+	}
+
+	PDUMPCOMMENT("Allocate RGXFW cCCB");
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+	psClientCCB->ui32VirtualAllocSize = ui32VirtualAllocSize;
+
+	/*
+	 * Growing CCB is doubling the size. Last grow would require only ui32NumVirtPages/2 new pages
+	 * because another ui32NumVirtPages/2 is already allocated.
+	 * Sometimes initial pages count would be higher (when CCB size is equal to CCB maximum size) so MAX is needed.
+	 */
+	psClientCCB->pui32MappingTable = OSAllocMem(MAX(ui32NumPages, ui32NumVirtPages/2) * sizeof(IMG_UINT32));
+	if (psClientCCB->pui32MappingTable == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc_mtable;
+	}
+	for (i = 0; i < ui32NumPages; i++)
+	{
+		psClientCCB->pui32MappingTable[i] = i;
+	}
+
+	if (IsPhysmemNewRamBackedByLMA(psDevInfo->psDeviceNode, PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL))
+	{
+		/*
+		 * On LMA sparse memory can't be mapped to kernel.
+		 * To work around this whole ccb memory is allocated at once as contiguous.
+		 */
+		eError = DevmemFwAllocate(psDevInfo,
+								ui32VirtualAllocSize,
+								uiClientCCBMemAllocFlags,
+								aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING],
+								&psClientCCB->psClientCCBMemDesc);
+	}
+	else
+	{
+		eError = DevmemFwAllocateSparse(psDevInfo,
+										ui32VirtualAllocSize,
+										ui32FWPageSize,
+										ui32NumPages,
+										ui32NumVirtPages,
+										psClientCCB->pui32MappingTable,
+										uiClientCCBMemAllocFlags,
+										aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING],
+										&psClientCCB->psClientCCBMemDesc);
+	}
+
+#else /* defined(PVRSRV_ENABLE_CCCB_GROW) */
+	eError = DevmemFwAllocate(psDevInfo,
+							ui32AllocSize,
+							uiClientCCBMemAllocFlags,
+							aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING],
+							&psClientCCB->psClientCCBMemDesc);
+#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to allocate RGX client CCB (%s)",
+				PVRSRVGetErrorString(eError)));
+		goto fail_alloc_ccb;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc,
+									  (void **) &psClientCCB->pui8ClientCCB);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to map RGX client CCB (%s)",
+				PVRSRVGetErrorString(eError)));
+		goto fail_map_ccb;
+	}
+
+	PDUMPCOMMENT("Allocate RGXFW cCCB control");
+	eError = DevmemFwAllocate(psDevInfo,
+										sizeof(RGXFWIF_CCCB_CTL),
+										uiClientCCBCtlMemAllocFlags,
+										aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING],
+										&psClientCCB->psClientCCBCtrlMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to allocate RGX client CCB control (%s)",
+				PVRSRVGetErrorString(eError)));
+		goto fail_alloc_ccbctrl;
+	}
+
+
+	eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc,
+									  (void **) &psClientCCB->psClientCCBCtrl);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to map RGX client CCB control (%s)",
+				PVRSRVGetErrorString(eError)));
+		goto fail_map_ccbctrl;
+	}
+
+	psClientCCB->psClientCCBCtrl->ui32WriteOffset = 0;
+	psClientCCB->psClientCCBCtrl->ui32ReadOffset = 0;
+	psClientCCB->psClientCCBCtrl->ui32DepOffset = 0;
+	psClientCCB->psClientCCBCtrl->ui32WrapMask = ui32AllocSize - 1;
+	OSSNPrintf(psClientCCB->szName, MAX_CLIENT_CCB_NAME, "%s-P%lu-T%lu-%s",
+									aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+									(unsigned long) OSGetCurrentClientProcessIDKM(),
+									(unsigned long) OSGetCurrentClientThreadIDKM(),
+									OSGetCurrentClientProcessNameKM());
+
+	PDUMPCOMMENT("cCCB control");
+	DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc,
+					   0,
+					   sizeof(RGXFWIF_CCCB_CTL),
+					   PDUMP_FLAGS_CONTINUOUS);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	psClientCCB->ui32HostWriteOffset = 0;
+	psClientCCB->ui32LastPDumpWriteOffset = 0;
+	psClientCCB->ui32FinishedPDumpWriteOffset = 0;
+	psClientCCB->ui32Size = ui32AllocSize;
+	psClientCCB->ui32LastROff = ui32AllocSize - 1;
+	psClientCCB->ui32ByteCount = 0;
+	psClientCCB->ui32LastByteCount = 0;
+	BIT_UNSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN);
+
+#if defined(DEBUG)
+	psClientCCB->ui32UpdateEntries = 0;
+#endif
+
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+	_RGXInitCCBUtilisation(psClientCCB);
+	psClientCCB->eRGXCCBRequestor = eRGXCCBRequestor;
+#endif
+	eError = PDumpRegisterTransitionCallback(psConnectionData->psPDumpConnectionData,
+											  _RGXCCBPDumpTransition,
+											  psClientCCB,
+											  psDevInfo,
+											  &psClientCCB->hTransition);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_pdumpreg;
+	}
+
+	/*
+	 * Note:
+	 * Save the PDump specific structure, which is ref counted unlike
+	 * the connection data, to ensure it's not freed too early
+	 */
+	psClientCCB->psPDumpConnectionData = psConnectionData->psPDumpConnectionData;
+	PDUMPCOMMENT("New RGXFW cCCB(%s@%p) created",
+				 psClientCCB->szName,
+				 psClientCCB);
+
+	*ppsClientCCB = psClientCCB;
+	*ppsClientCCBMemDesc = psClientCCB->psClientCCBMemDesc;
+	*ppsClientCCBCtrlMemDesc = psClientCCB->psClientCCBCtrlMemDesc;
+	return PVRSRV_OK;
+
+fail_pdumpreg:
+	DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc);
+fail_map_ccbctrl:
+	DevmemFwFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc);
+fail_alloc_ccbctrl:
+	DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc);
+fail_map_ccb:
+	DevmemFwFree(psDevInfo, psClientCCB->psClientCCBMemDesc);
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+fail_alloc_ccb:
+	OSFreeMem(psClientCCB->pui32MappingTable);
+fail_alloc_mtable:
+#else
+fail_alloc_ccb:
+#endif
+	OSFreeMem(psClientCCB);
+fail_alloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB)
+{
+#if defined (PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+	if (psClientCCB->sUtilisation.ui32CCBFull)
+	{
+		PVR_LOG(("CCBUtilisationInfo: GPU %s command buffer was full %d times out of %d. "
+				"This is not an error but the application may not run optimally.",
+				aszCCBRequestors[psClientCCB->eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+				psClientCCB->sUtilisation.ui32CCBFull,
+				psClientCCB->sUtilisation.ui32CCBAcquired));
+	}
+#endif
+	PDumpUnregisterTransitionCallback(psClientCCB->hTransition);
+	DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc);
+	DevmemFwFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc);
+	DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc);
+	DevmemFwFree(psDevInfo, psClientCCB->psClientCCBMemDesc);
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+	OSFreeMem(psClientCCB->pui32MappingTable);
+#endif
+	OSFreeMem(psClientCCB);
+}
+
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+static PVRSRV_ERROR _RGXCCBMemChangeSparse(RGX_CLIENT_CCB *psClientCCB,
+										  IMG_UINT32 ui32AllocPageCount)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32	 i;
+
+#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+	DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc);
+#endif
+
+	for (i = 0; i < ui32AllocPageCount; i++)
+	{
+		psClientCCB->pui32MappingTable[i] = ui32AllocPageCount + i;
+	}
+
+	/* Double the CCB size (CCB must be POT) by adding ui32AllocPageCount new pages */
+	eError = DeviceMemChangeSparse(psClientCCB->psClientCCBMemDesc,
+									ui32AllocPageCount,
+									psClientCCB->pui32MappingTable,
+									0,
+									NULL,
+									SPARSE_RESIZE_ALLOC | SPARSE_MAP_CPU_ADDR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXAcquireCCB: Failed to grow RGX client CCB (%s)",
+				PVRSRVGetErrorString(eError)));
+		return eError;
+	}
+
+#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+	eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc,
+									(void **) &psClientCCB->pui8ClientCCB);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXAcquireCCB: Failed to map RGX client CCB (%s)",
+				PVRSRVGetErrorString(eError)));
+		return eError;
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */
+
+/******************************************************************************
+ FUNCTION	: RGXAcquireCCB
+
+ PURPOSE	: Obtains access to write some commands to a CCB
+
+ PARAMETERS	: psClientCCB		- The client CCB
+			  ui32CmdSize		- How much space is required
+			  ppvBufferSpace	- Pointer to space in the buffer
+			  ui32PDumpFlags - Should this be PDump continuous?
+
+ RETURNS	: PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB,
+										IMG_UINT32		ui32CmdSize,
+										void			**ppvBufferSpace,
+										IMG_UINT32		ui32PDumpFlags)
+{
+
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+	IMG_UINT32	ui32RetryCount = 2;
+#endif
+
+#if defined(PDUMP)
+	PVRSRV_ERROR eError;
+	IMG_BOOL	bInCaptureRange;
+	IMG_BOOL	bPdumpEnabled;
+	IMG_UINT64	ui64PDumpState = 0;
+
+	PDumpGetStateKM(&ui64PDumpState);
+	PDumpIsCaptureFrameKM(&bInCaptureRange);
+	bPdumpEnabled = (ui64PDumpState & PDUMP_STATE_CONNECTED) != 0
+		&& (bInCaptureRange || PDUMP_IS_CONTINUOUS(ui32PDumpFlags));
+
+	/*
+		PDumpSetFrame will detect as we Transition into capture range for
+		frame based data but if we are PDumping continuous data then we
+		need to inform the PDump layer ourselves
+	*/
+	if ((ui64PDumpState & PDUMP_STATE_CONNECTED) != 0
+		&& PDUMP_IS_CONTINUOUS(ui32PDumpFlags)
+		&& !bInCaptureRange)
+	{
+		eError = PDumpTransition(psClientCCB->psPDumpConnectionData, PDUMP_TRANSITION_EVENT_RANGE_ENTERED, ui32PDumpFlags);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+#endif
+
+	/* Check that the CCB can hold this command + padding */
+	if ((ui32CmdSize + PADDING_COMMAND_SIZE + 1) > psClientCCB->ui32Size)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Command size (%d bytes) too big for CCB (%d bytes)",
+								ui32CmdSize, psClientCCB->ui32Size));
+		return PVRSRV_ERROR_CMD_TOO_BIG;
+	}
+
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+	while (ui32RetryCount--)
+#endif
+	{
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+		psClientCCB->sUtilisation.ui32CCBAcquired++;
+#endif
+
+		/*
+			Check we don't overflow the end of the buffer and make sure we have
+			enough space for the padding command. We don't have enough space (including the
+			minimum amount for the padding command) we will need to make sure we insert a
+			padding command now and wrap before adding the main command.
+		*/
+		if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) <= psClientCCB->ui32Size)
+		{
+			/* The command can fit without wrapping... */
+			IMG_UINT32 ui32FreeSpace;
+
+#if defined(PDUMP)
+			/* Wait for sufficient CCB space to become available */
+			PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+									ui32CmdSize, psClientCCB->ui32HostWriteOffset,
+									psClientCCB->szName);
+			DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+						offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+						psClientCCB->ui32HostWriteOffset,
+						ui32CmdSize,
+						psClientCCB->ui32Size);
+#endif
+
+			ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+										psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+										psClientCCB->ui32Size);
+
+			/* Can command fit? */
+			if (ui32FreeSpace > ui32CmdSize)
+			{
+				*ppvBufferSpace = (void *) (psClientCCB->pui8ClientCCB +
+											psClientCCB->ui32HostWriteOffset);
+				return PVRSRV_OK;
+			}
+			/* There is not enough free space in CCB. */
+			goto e_retry;
+		}
+		else
+		{
+			/*
+				We're at the end of the buffer without enough contiguous space.
+				The command cannot fit without wrapping, we need to insert a
+				padding command and wrap. We need to do this in one go otherwise
+				we would be leaving unflushed commands and forcing the client to
+				deal with flushing the padding command but not the command they
+				wanted to write. Therefore we either do all or nothing.
+			*/
+			RGXFWIF_CCB_CMD_HEADER *psHeader;
+			IMG_UINT32 ui32FreeSpace;
+			IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset;
+
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+			{
+				ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+											psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+											psClientCCB->ui32Size);
+				/*
+				 * Check if CCB should grow or be wrapped.
+				 * Wrap CCB if there is no need for grow (CCB is half empty) or CCB can't grow,
+				 * and when is free space for command and padding.
+				 */
+				if (((ui32FreeSpace > psClientCCB->ui32Size/2) || (psClientCCB->ui32Size == psClientCCB->ui32VirtualAllocSize)) &&
+					(ui32FreeSpace > ui32Remain + ui32CmdSize))
+				{
+					/* Wrap CCB */
+					psHeader = (void *) (psClientCCB->pui8ClientCCB + psClientCCB->ui32HostWriteOffset);
+					psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING;
+					psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+#if defined(PDUMP)
+					PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize);
+					if (bPdumpEnabled)
+					{
+						DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc,
+										psClientCCB->ui32HostWriteOffset,
+										ui32Remain,
+										ui32PDumpFlags);
+					}
+#endif
+
+					*ppvBufferSpace = (void *) (psClientCCB->pui8ClientCCB +
+												0 /*ui32HostWriteOffset after wrap */);
+					return PVRSRV_OK;
+				}
+				else if ((psClientCCB->ui32Size < psClientCCB->ui32VirtualAllocSize) &&
+					(psClientCCB->ui32HostWriteOffset >= psClientCCB->psClientCCBCtrl->ui32ReadOffset))
+				{
+					/* Grow CCB */
+					PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext);
+
+					/* Something went wrong if we are here a second time */
+					PVR_ASSERT(ui32RetryCount != 0);
+
+					/*
+					 * On LMA sparse memory can't be mapped to kernel.
+					 * To work around this whole ccb memory was allocated at once as contiguous.
+					 * In such case below sparse change is not needed because memory is already allocated.
+					 */
+					if (!IsPhysmemNewRamBackedByLMA(psDevInfo->psDeviceNode, PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL))
+					{
+						IMG_UINT32 ui32FWPageSize = 1U << DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap);
+						IMG_UINT32 ui32AllocPageCount = psClientCCB->ui32Size / ui32FWPageSize;
+
+						_RGXCCBMemChangeSparse(psClientCCB, ui32AllocPageCount);
+					}
+
+					/* Setup new CCB size */
+					psClientCCB->psClientCCBCtrl->ui32WrapMask += psClientCCB->ui32Size;
+					psClientCCB->ui32Size += psClientCCB->ui32Size;
+
+#if defined(PDUMP)
+					PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB update for grow");
+					if (bPdumpEnabled)
+					{
+						DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc,
+											offsetof(RGXFWIF_CCCB_CTL, ui32WrapMask),
+											sizeof(psClientCCB->psClientCCBCtrl->ui32WrapMask),
+											ui32PDumpFlags);
+						DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc,
+											offsetof(RGX_CLIENT_CCB, ui32Size),
+											sizeof(psClientCCB->ui32Size),
+											ui32PDumpFlags);
+					}
+#endif
+
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+					PVR_LOG(("%s: Client CCB (%s) grew to %u", __func__, psClientCCB->szName, psClientCCB->ui32Size));
+					/* Reset counters */
+					_RGXInitCCBUtilisation(psClientCCB);
+#endif
+
+					/* CCB doubled the size so retry now. */
+				}
+				else
+				{
+					/* CCB can't grow anymore and can't be wrapped */
+#if defined(PDUMP)
+					/* Wait for sufficient CCB space to become available */
+					PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+											ui32Remain, psClientCCB->ui32HostWriteOffset,
+											psClientCCB->szName);
+					DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+								offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+								psClientCCB->ui32HostWriteOffset,
+								ui32Remain,
+								psClientCCB->ui32Size);
+					PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+											ui32CmdSize, 0 /*ui32HostWriteOffset after wrap */,
+											psClientCCB->szName);
+					DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+								offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+								0 /*ui32HostWriteOffset after wrap */,
+								ui32CmdSize,
+								psClientCCB->ui32Size);
+					/* CCB has now space for our command so try wrapping again. Retry now. */
+#else /* defined(PDUMP) */
+					goto e_retry;
+#endif /* defined(PDUMP) */
+				}
+			}
+#else /* defined(PVRSRV_ENABLE_CCCB_GROW) */
+#if defined(PDUMP)
+			/* Wait for sufficient CCB space to become available */
+			PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+									ui32Remain, psClientCCB->ui32HostWriteOffset,
+									psClientCCB->szName);
+			DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+						offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+						psClientCCB->ui32HostWriteOffset,
+						ui32Remain,
+						psClientCCB->ui32Size);
+			PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+									ui32CmdSize, 0 /*ui32HostWriteOffset after wrap */,
+									psClientCCB->szName);
+			DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+						offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+						0 /*ui32HostWriteOffset after wrap */,
+						ui32CmdSize,
+						psClientCCB->ui32Size);
+#endif
+			ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+										psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+										psClientCCB->ui32Size);
+
+			if (ui32FreeSpace > ui32Remain + ui32CmdSize)
+			{
+				psHeader = (void *) (psClientCCB->pui8ClientCCB + psClientCCB->ui32HostWriteOffset);
+				psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING;
+				psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER);
+#if defined(PDUMP)
+				PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize);
+				if (bPdumpEnabled)
+				{
+					DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc,
+									psClientCCB->ui32HostWriteOffset,
+									ui32Remain,
+									ui32PDumpFlags);
+				}
+#endif
+
+				*ppvBufferSpace = (void *) (psClientCCB->pui8ClientCCB +
+											0 /*ui32HostWriteOffset after wrap */);
+				return PVRSRV_OK;
+			}
+
+			goto e_retry;
+#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */
+		}
+	}
+e_retry:
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+	psClientCCB->sUtilisation.ui32CCBFull++;
+	_RGXCCBUtilisationEvent(psClientCCB,
+				PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED,
+				ui32CmdSize);
+#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */
+	return PVRSRV_ERROR_RETRY;
+}
+
+/******************************************************************************
+ FUNCTION	: RGXReleaseCCB
+
+ PURPOSE	: Release a CCB that we have been writing to.
+
+ PARAMETERS	: psDevData			- device data
+			  psCCB				- the CCB
+
+ RETURNS	: None
+******************************************************************************/
+void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB,
+								IMG_UINT32		ui32CmdSize,
+								IMG_UINT32		ui32PDumpFlags)
+{
+	IMG_BOOL	bInCaptureRange;
+	IMG_BOOL	bPdumpEnabled;
+	IMG_UINT64	ui64PDumpState = 0;
+
+	PDumpGetStateKM(&ui64PDumpState);
+	PDumpIsCaptureFrameKM(&bInCaptureRange);
+	bPdumpEnabled = (ui64PDumpState & PDUMP_STATE_CONNECTED) != 0
+		&& (bInCaptureRange || PDUMP_IS_CONTINUOUS(ui32PDumpFlags));
+
+	/*
+	 * If a padding command was needed then we should now move ui32HostWriteOffset
+	 * forward. The command has already be dumped (if bPdumpEnabled).
+	 */
+	if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) > psClientCCB->ui32Size)
+	{
+		IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset;
+
+		UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset,
+						  ui32Remain,
+						  psClientCCB->ui32Size);
+		psClientCCB->ui32ByteCount += ui32Remain;
+	}
+
+	/* Dump the CCB data */
+	if (bPdumpEnabled)
+	{
+		DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc,
+						   psClientCCB->ui32HostWriteOffset,
+						   ui32CmdSize,
+						   ui32PDumpFlags);
+	}
+
+	/*
+	 * Check if there any fences being written that will already be
+	 * satisfied by the last written update command in this CCB. At the
+	 * same time we can ASSERT that all sync addresses are not NULL.
+	 */
+#if defined(DEBUG)
+	{
+		IMG_UINT8 *pui8BufferStart = (void *)((uintptr_t)psClientCCB->pui8ClientCCB + psClientCCB->ui32HostWriteOffset);
+		IMG_UINT8 *pui8BufferEnd   = (void *)((uintptr_t)psClientCCB->pui8ClientCCB + psClientCCB->ui32HostWriteOffset + ui32CmdSize);
+		IMG_BOOL  bMessagePrinted  = IMG_FALSE;
+
+		/* Walk through the commands in this section of CCB being released... */
+		while (pui8BufferStart < pui8BufferEnd)
+		{
+			RGXFWIF_CCB_CMD_HEADER *psCmdHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8BufferStart;
+
+			if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UPDATE)
+			{
+				/* If an UPDATE then record the values incase an adjacent fence uses it. */
+				IMG_UINT32  ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+				RGXFWIF_UFO *psUFOPtr   = (RGXFWIF_UFO*)(pui8BufferStart + sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+				psClientCCB->ui32UpdateEntries = 0;
+				while (ui32NumUFOs-- > 0)
+				{
+					PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0);
+					if (psClientCCB->ui32UpdateEntries < RGX_CCCB_FENCE_UPDATE_LIST_SIZE)
+					{
+						psClientCCB->asFenceUpdateList[psClientCCB->ui32UpdateEntries++] = *psUFOPtr++;
+					}
+				}
+			}
+			else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE)
+			{
+				/* If a FENCE then check the values against the last UPDATE issued. */
+				IMG_UINT32  ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+				RGXFWIF_UFO *psUFOPtr   = (RGXFWIF_UFO*)(pui8BufferStart + sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+				while (ui32NumUFOs-- > 0)
+				{
+					PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0);
+
+					if (bMessagePrinted == IMG_FALSE)
+					{
+						RGXFWIF_UFO *psUpdatePtr = psClientCCB->asFenceUpdateList;
+						IMG_UINT32  ui32UpdateIndex;
+
+						for (ui32UpdateIndex = 0; ui32UpdateIndex < psClientCCB->ui32UpdateEntries; ui32UpdateIndex++)
+						{
+							if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr))
+							{
+								if (RGX_UFO_PTR_ADDR(psUFOPtr) == RGX_UFO_PTR_ADDR(psUpdatePtr))
+								{
+									PVR_DPF((PVR_DBG_MESSAGE, "Redundant sync checkpoint check found in cCCB(%p) - 0x%x -> 0x%x",
+											psClientCCB, RGX_UFO_PTR_ADDR(psUFOPtr), psUFOPtr->ui32Value));
+									bMessagePrinted = IMG_TRUE;
+									break;
+								}
+							}
+							else
+							{
+								if (psUFOPtr->puiAddrUFO.ui32Addr == psUpdatePtr->puiAddrUFO.ui32Addr  &&
+									psUFOPtr->ui32Value == psUpdatePtr->ui32Value)
+								{
+									PVR_DPF((PVR_DBG_MESSAGE, "Redundant fence check found in cCCB(%p) - 0x%x -> 0x%x",
+											psClientCCB, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
+									bMessagePrinted = IMG_TRUE;
+									break;
+								}
+							}
+							psUpdatePtr++;
+						}
+					}
+
+					psUFOPtr++;
+				}
+			}
+			else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR  ||
+					 psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE)
+			{
+				/* For all other UFO ops check the UFO address is not NULL. */
+				IMG_UINT32  ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+				RGXFWIF_UFO *psUFOPtr   = (RGXFWIF_UFO*)(pui8BufferStart + sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+				while (ui32NumUFOs-- > 0)
+				{
+					PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0);
+					psUFOPtr++;
+				}
+			}
+
+			/* Move to the next command in this section of CCB being released... */
+			pui8BufferStart += sizeof(RGXFWIF_CCB_CMD_HEADER) + psCmdHeader->ui32CmdSize;
+		}
+	}
+#endif /* REDUNDANT_SYNCS_DEBUG */
+
+	/*
+	 * Update the CCB write offset.
+	 */
+	UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset,
+					  ui32CmdSize,
+					  psClientCCB->ui32Size);
+	psClientCCB->ui32ByteCount += ui32CmdSize;
+
+#if defined (PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+	_RGXUpdateCCBUtilisation(psClientCCB);
+#endif
+	/*
+		PDumpSetFrame will detect as we Transition out of capture range for
+		frame based data but if we are PDumping continuous data then we
+		need to inform the PDump layer ourselves
+	*/
+	if ((ui64PDumpState & PDUMP_STATE_CONNECTED) != 0
+		&& PDUMP_IS_CONTINUOUS(ui32PDumpFlags)
+		&& !bInCaptureRange)
+	{
+		PVRSRV_ERROR eError;
+
+		/* Only Transitioning into capture range can cause an error */
+		eError = PDumpTransition(psClientCCB->psPDumpConnectionData, PDUMP_TRANSITION_EVENT_RANGE_EXITED, ui32PDumpFlags);
+		PVR_ASSERT(eError == PVRSRV_OK);
+	}
+
+	if (bPdumpEnabled)
+	{
+		if (!BIT_ISSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN))
+		{
+			/* Store offset to last finished CCB command. This offset can
+			 * be needed when appending commands to a non finished CCB.
+			 */
+			psClientCCB->ui32FinishedPDumpWriteOffset = psClientCCB->ui32LastPDumpWriteOffset;
+		}
+
+		/* Update the PDump write offset to show we PDumped this command */
+		psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset;
+	}
+
+#if defined(NO_HARDWARE)
+	/*
+		The firmware is not running, it cannot update these; we do here instead.
+	*/
+	psClientCCB->psClientCCBCtrl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset;
+	psClientCCB->psClientCCBCtrl->ui32DepOffset = psClientCCB->ui32HostWriteOffset;
+#endif
+}
+
+IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB)
+{
+	return psClientCCB->ui32HostWriteOffset;
+}
+
+#define SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL PVR_DBG_ERROR
+#define CHECK_COMMAND(cmd, fenceupdate) \
+				case RGXFWIF_CCB_CMD_TYPE_##cmd: \
+						PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, #cmd " command (%d bytes)", psHeader->ui32CmdSize)); \
+						bFenceUpdate = fenceupdate; \
+						break
+
+static void _RGXClientCCBDumpCommands(RGX_CLIENT_CCB *psClientCCB,
+									  IMG_UINT32 ui32Offset,
+									  IMG_UINT32 ui32ByteCount)
+{
+#if defined(SUPPORT_DUMP_CLIENT_CCB_COMMANDS)
+	IMG_UINT8 *pui8Ptr = psClientCCB->pui8ClientCCB + ui32Offset;
+	IMG_UINT32 ui32ConsumeSize = ui32ByteCount;
+
+	while (ui32ConsumeSize)
+	{
+		RGXFWIF_CCB_CMD_HEADER *psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8Ptr;
+		IMG_BOOL bFenceUpdate = IMG_FALSE;
+
+		PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "@offset 0x%08lx", pui8Ptr - psClientCCB->pui8ClientCCB));
+		switch (psHeader->eCmdType)
+		{
+			CHECK_COMMAND(TA, IMG_FALSE);
+			CHECK_COMMAND(3D, IMG_FALSE);
+			CHECK_COMMAND(CDM, IMG_FALSE);
+			CHECK_COMMAND(TQ_3D, IMG_FALSE);
+			CHECK_COMMAND(TQ_2D, IMG_FALSE);
+			CHECK_COMMAND(3D_PR, IMG_FALSE);
+			CHECK_COMMAND(NULL, IMG_FALSE);
+			CHECK_COMMAND(SHG, IMG_FALSE);
+			CHECK_COMMAND(RTU, IMG_FALSE);
+			CHECK_COMMAND(RTU_FC, IMG_FALSE);
+			CHECK_COMMAND(FENCE, IMG_TRUE);
+			CHECK_COMMAND(UPDATE, IMG_TRUE);
+			CHECK_COMMAND(UNFENCED_UPDATE, IMG_FALSE);
+			CHECK_COMMAND(FENCE_PR, IMG_TRUE);
+			CHECK_COMMAND(PADDING, IMG_FALSE);
+			CHECK_COMMAND(TQ_TDM, IMG_FALSE);
+			default:
+				PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "Unknown command!"));
+				break;
+		}
+		pui8Ptr += sizeof(*psHeader);
+		if (bFenceUpdate)
+		{
+			IMG_UINT32 j;
+			RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *) pui8Ptr;
+			for (j=0;j<psHeader->ui32CmdSize/sizeof(RGXFWIF_UFO);j++)
+			{
+				PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "Addr = 0x%08x, value = 0x%08x",
+							psUFOPtr[j].puiAddrUFO.ui32Addr, psUFOPtr[j].ui32Value));
+			}
+		}
+		else
+		{
+			IMG_UINT32 *pui32Ptr = (IMG_UINT32 *) pui8Ptr;
+			IMG_UINT32 ui32Remain = psHeader->ui32CmdSize/sizeof(IMG_UINT32);
+			while (ui32Remain)
+			{
+				if (ui32Remain >= 4)
+				{
+					PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x 0x%08x 0x%08x",
+							pui32Ptr[0], pui32Ptr[1], pui32Ptr[2], pui32Ptr[3]));
+					pui32Ptr += 4;
+					ui32Remain -= 4;
+				}
+				if (ui32Remain == 3)
+				{
+					PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x 0x%08x",
+							pui32Ptr[0], pui32Ptr[1], pui32Ptr[2]));
+					pui32Ptr += 3;
+					ui32Remain -= 3;
+				}
+				if (ui32Remain == 2)
+				{
+					PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x",
+							pui32Ptr[0], pui32Ptr[1]));
+					pui32Ptr += 2;
+					ui32Remain -= 2;
+				}
+				if (ui32Remain == 1)
+				{
+					PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x",
+							pui32Ptr[0]));
+					pui32Ptr += 1;
+					ui32Remain -= 1;
+				}
+			}
+		}
+		pui8Ptr += psHeader->ui32CmdSize;
+		ui32ConsumeSize -= sizeof(*psHeader) + psHeader->ui32CmdSize;
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(psClientCCB);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	PVR_UNREFERENCED_PARAMETER(ui32ByteCount);
+#endif
+}
+
+/*
+	Workout how much space this command will require
+*/
+PVRSRV_ERROR RGXCmdHelperInitCmdCCB(RGX_CLIENT_CCB            *psClientCCB,
+                                    IMG_UINT32                ui32ClientFenceCount,
+                                    PRGXFWIF_UFO_ADDR         *pauiFenceUFOAddress,
+                                    IMG_UINT32                *paui32FenceValue,
+                                    IMG_UINT32                ui32ClientUpdateCount,
+                                    PRGXFWIF_UFO_ADDR         *pauiUpdateUFOAddress,
+                                    IMG_UINT32                *paui32UpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+                                    IMG_UINT32                ui32ServerSyncCount,
+                                    IMG_UINT32                *paui32ServerSyncFlags,
+                                    IMG_UINT32                ui32ServerSyncFlagMask,
+                                    SERVER_SYNC_PRIMITIVE     **papsServerSyncs,
+#endif
+                                    IMG_UINT32                ui32CmdSize,
+                                    IMG_PBYTE                 pui8DMCmd,
+                                    RGXFWIF_CCB_CMD_TYPE      eType,
+                                    IMG_UINT32                ui32ExtJobRef,
+                                    IMG_UINT32                ui32IntJobRef,
+                                    IMG_UINT32                ui32PDumpFlags,
+                                    RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData,
+                                    IMG_CHAR                  *pszCommandName,
+                                    IMG_BOOL                  bCCBStateOpen,
+                                    RGX_CCB_CMD_HELPER_DATA   *psCmdHelperData)
+{
+	IMG_UINT32 ui32FenceCount;
+	IMG_UINT32 ui32UpdateCount;
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	IMG_UINT32 i;
+#endif
+
+	/* Job reference values */
+	psCmdHelperData->ui32ExtJobRef = ui32ExtJobRef;
+	psCmdHelperData->ui32IntJobRef = ui32IntJobRef;
+
+	/* Save the data we require in the submit call */
+	psCmdHelperData->psClientCCB = psClientCCB;
+#if defined(PDUMP)
+	psCmdHelperData->ui32PDumpFlags = ui32PDumpFlags;
+#endif
+	psCmdHelperData->pszCommandName = pszCommandName;
+	if (bCCBStateOpen)
+	{
+		BIT_SET(psCmdHelperData->psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN);
+	}
+	else
+	{
+		BIT_UNSET(psCmdHelperData->psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN);
+	}
+
+	/* Client sync data */
+	psCmdHelperData->ui32ClientFenceCount = ui32ClientFenceCount;
+	psCmdHelperData->pauiFenceUFOAddress = pauiFenceUFOAddress;
+	psCmdHelperData->paui32FenceValue = paui32FenceValue;
+	psCmdHelperData->ui32ClientUpdateCount = ui32ClientUpdateCount;
+	psCmdHelperData->pauiUpdateUFOAddress = pauiUpdateUFOAddress;
+	psCmdHelperData->paui32UpdateValue = paui32UpdateValue;
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	/* Server sync data */
+	psCmdHelperData->ui32ServerSyncCount = ui32ServerSyncCount;
+	psCmdHelperData->paui32ServerSyncFlags = paui32ServerSyncFlags;
+	psCmdHelperData->ui32ServerSyncFlagMask = ui32ServerSyncFlagMask;
+	psCmdHelperData->papsServerSyncs = papsServerSyncs;
+#endif
+
+	/* Command data */
+	psCmdHelperData->ui32CmdSize = ui32CmdSize;
+	psCmdHelperData->pui8DMCmd = pui8DMCmd;
+	psCmdHelperData->eType = eType;
+
+	PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+			"%s Command Server Init on FWCtx %08x", pszCommandName,
+			FWCommonContextGetFWAddress(psClientCCB->psServerCommonContext).ui32Addr);
+
+	/* Init the generated data members */
+	psCmdHelperData->ui32ServerFenceCount = 0;
+	psCmdHelperData->ui32ServerUpdateCount = 0;
+	psCmdHelperData->ui32ServerUnfencedUpdateCount = 0;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	/* Workload Data added */
+	psCmdHelperData->psWorkEstKickData = psWorkEstKickData;
+#endif
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	/* Workout how many fences and updates this command will have */
+	for (i = 0; i < ui32ServerSyncCount; i++)
+	{
+		IMG_UINT32 ui32Flag = paui32ServerSyncFlags[i] & ui32ServerSyncFlagMask;
+
+		if (ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)
+		{
+			/* Server syncs must fence */
+			psCmdHelperData->ui32ServerFenceCount++;
+		}
+
+		/* If it is an update */
+		if (ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)
+		{
+			/* is it a fenced update or a progress update (a.k.a unfenced update) ?*/
+			if ((ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE) == PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE)
+			{
+				/* it is a progress update */
+				psCmdHelperData->ui32ServerUnfencedUpdateCount++;
+			}
+			else
+			{
+				/* it is a fenced update */
+				psCmdHelperData->ui32ServerUpdateCount++;
+			}
+		}
+	}
+#endif
+
+
+	/* Total fence command size (header plus command data) */
+	ui32FenceCount = ui32ClientFenceCount + psCmdHelperData->ui32ServerFenceCount;
+	if (ui32FenceCount)
+	{
+		psCmdHelperData->ui32FenceCmdSize = RGX_CCB_FWALLOC_ALIGN((ui32FenceCount * sizeof(RGXFWIF_UFO)) +
+																  sizeof(RGXFWIF_CCB_CMD_HEADER));
+	}
+	else
+	{
+		psCmdHelperData->ui32FenceCmdSize = 0;
+	}
+
+	/* Total DM command size (header plus command data) */
+	psCmdHelperData->ui32DMCmdSize = RGX_CCB_FWALLOC_ALIGN(ui32CmdSize +
+														   sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+	/* Total update command size (header plus command data) */
+	ui32UpdateCount = ui32ClientUpdateCount + psCmdHelperData->ui32ServerUpdateCount;
+	if (ui32UpdateCount)
+	{
+		psCmdHelperData->ui32UpdateCmdSize = RGX_CCB_FWALLOC_ALIGN((ui32UpdateCount * sizeof(RGXFWIF_UFO)) +
+																   sizeof(RGXFWIF_CCB_CMD_HEADER));
+	}
+	else
+	{
+		psCmdHelperData->ui32UpdateCmdSize = 0;
+	}
+
+	/* Total unfenced update command size (header plus command data) */
+	if (psCmdHelperData->ui32ServerUnfencedUpdateCount != 0)
+	{
+		psCmdHelperData->ui32UnfencedUpdateCmdSize = RGX_CCB_FWALLOC_ALIGN((psCmdHelperData->ui32ServerUnfencedUpdateCount * sizeof(RGXFWIF_UFO)) +
+																		   sizeof(RGXFWIF_CCB_CMD_HEADER));
+	}
+	else
+	{
+		psCmdHelperData->ui32UnfencedUpdateCmdSize = 0;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*
+	Reserve space in the CCB and fill in the command and client sync data
+*/
+PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount,
+									   RGX_CCB_CMD_HELPER_DATA *asCmdHelperData)
+{
+	IMG_UINT32 ui32AllocSize = 0;
+	IMG_UINT32 i;
+	IMG_UINT8 *pui8StartPtr;
+	PVRSRV_ERROR eError;
+
+	/*
+		Workout how much space we need for all the command(s)
+	*/
+	ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData);
+
+#if defined(PDUMP)
+	for (i = 0; i < ui32CmdCount; i++)
+	{
+		if ((asCmdHelperData[0].ui32PDumpFlags ^ asCmdHelperData[i].ui32PDumpFlags) & PDUMP_FLAGS_CONTINUOUS)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: PDump continuous is not consistent (%s != %s) for command %d",
+					 __func__,
+					 PDUMP_IS_CONTINUOUS(asCmdHelperData[0].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE",
+					 PDUMP_IS_CONTINUOUS(asCmdHelperData[i].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE",
+					 ui32CmdCount));
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+	}
+#endif
+
+	/*
+		Acquire space in the CCB for all the command(s).
+	*/
+	eError = RGXAcquireCCB(asCmdHelperData[0].psClientCCB,
+						   ui32AllocSize,
+						   (void **)&pui8StartPtr,
+						   asCmdHelperData[0].ui32PDumpFlags);
+	if (unlikely(eError != PVRSRV_OK))
+	{
+		return eError;
+	}
+
+	/*
+		For each command fill in the fence, DM, and update command
+
+		Note:
+		We only fill in the client fences here, the server fences (and updates)
+		will be filled in together at the end. This is because we might fail the
+		kernel CCB alloc and would then have to rollback the server syncs if
+		we took the operation here
+	*/
+	for (i = 0; i < ui32CmdCount; i++)
+	{
+		RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = & asCmdHelperData[i];
+		IMG_UINT8 *pui8CmdPtr;
+		IMG_UINT8 *pui8ServerFenceStart = NULL;
+		IMG_UINT8 *pui8ServerUpdateStart = NULL;
+#if defined(PDUMP)
+		IMG_UINT32 ui32CtxAddr = FWCommonContextGetFWAddress(asCmdHelperData->psClientCCB->psServerCommonContext).ui32Addr;
+		IMG_UINT32 ui32CcbWoff = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(asCmdHelperData->psClientCCB->psServerCommonContext));
+#endif
+
+		if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0)
+		{
+			PDUMPCOMMENT("Start of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes",
+					psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff);
+		}
+
+		/*
+			Create the fence command.
+		*/
+		if (psCmdHelperData->ui32FenceCmdSize)
+		{
+			RGXFWIF_CCB_CMD_HEADER *psHeader;
+			IMG_UINT k, uiNextValueIndex;
+
+			/* Fences are at the start of the command */
+			pui8CmdPtr = pui8StartPtr;
+
+			psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+			psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_FENCE;
+
+			psHeader->ui32CmdSize = psCmdHelperData->ui32FenceCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+			psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+			psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+			psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0;
+			psHeader->sWorkEstKickData.ui64Deadline = 0;
+			psHeader->sWorkEstKickData.ui64CyclesPrediction = 0;
+#endif
+
+			pui8CmdPtr += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+			/* Fill in the client fences */
+			uiNextValueIndex = 0;
+			for (k = 0; k < psCmdHelperData->ui32ClientFenceCount; k++)
+			{
+				RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *) pui8CmdPtr;
+
+				psUFOPtr->puiAddrUFO = psCmdHelperData->pauiFenceUFOAddress[k];
+
+				if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr))
+				{
+					psUFOPtr->ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+				}
+				else
+				{
+					/* Only increment uiNextValueIndex for non sync checkpoints
+					 * (as paui32FenceValue only contains values for sync prims)
+					 */
+					psUFOPtr->ui32Value = psCmdHelperData->paui32FenceValue[uiNextValueIndex++];
+				}
+				pui8CmdPtr += sizeof(RGXFWIF_UFO);
+
+#if defined SYNC_COMMAND_DEBUG
+				PVR_DPF((PVR_DBG_ERROR, "%s client sync fence - 0x%x -> 0x%x",
+						psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
+#endif
+				PDUMPCOMMENT(".. %s client sync fence - 0x%x -> 0x%x",
+						psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value);
+
+
+			}
+			pui8ServerFenceStart = pui8CmdPtr;
+		}
+
+		/* jump over the Server fences */
+		pui8CmdPtr = pui8StartPtr + psCmdHelperData->ui32FenceCmdSize;
+
+		/*
+			Create the DM command
+		*/
+		if (psCmdHelperData->ui32DMCmdSize)
+		{
+			RGXFWIF_CCB_CMD_HEADER *psHeader;
+
+			psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+			psHeader->eCmdType = psCmdHelperData->eType;
+
+			psHeader->ui32CmdSize = psCmdHelperData->ui32DMCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+			psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+			psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+			if (psCmdHelperData->psWorkEstKickData != NULL)
+			{
+				PVR_ASSERT(psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_TA ||
+				           psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_3D);
+				psHeader->sWorkEstKickData = *psCmdHelperData->psWorkEstKickData;
+			}
+			else
+			{
+				psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0;
+				psHeader->sWorkEstKickData.ui64Deadline = 0;
+				psHeader->sWorkEstKickData.ui64CyclesPrediction = 0;
+			}
+#endif
+
+			pui8CmdPtr += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+			/* The buffer is write-combine, so no special device memory treatment required. */
+			OSCachedMemCopy(pui8CmdPtr, psCmdHelperData->pui8DMCmd, psCmdHelperData->ui32CmdSize);
+			pui8CmdPtr += psCmdHelperData->ui32CmdSize;
+		}
+
+
+		/*
+			Create the update command.
+
+			Note:
+			We only fill in the client updates here, the server updates (and fences)
+			will be filled in together at the end
+		*/
+		if (psCmdHelperData->ui32UpdateCmdSize)
+		{
+			RGXFWIF_CCB_CMD_HEADER *psHeader;
+			IMG_UINT k, uiNextValueIndex;
+
+			psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+			psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_UPDATE;
+			psHeader->ui32CmdSize = psCmdHelperData->ui32UpdateCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+			psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+			psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+			psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0;
+			psHeader->sWorkEstKickData.ui64Deadline = 0;
+			psHeader->sWorkEstKickData.ui64CyclesPrediction = 0;
+#endif
+			pui8CmdPtr += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+			/* Fill in the client updates */
+			uiNextValueIndex = 0;
+			for (k = 0; k < psCmdHelperData->ui32ClientUpdateCount; k++)
+			{
+				RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *) pui8CmdPtr;
+
+				psUFOPtr->puiAddrUFO = psCmdHelperData->pauiUpdateUFOAddress[k];
+				if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr))
+				{
+					psUFOPtr->ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+				}
+				else
+				{
+					/* Only increment uiNextValueIndex for non sync checkpoints
+					 * (as paui32UpdateValue only contains values for sync prims)
+					 */
+					psUFOPtr->ui32Value = psCmdHelperData->paui32UpdateValue[uiNextValueIndex++];
+				}
+				pui8CmdPtr += sizeof(RGXFWIF_UFO);
+
+#if defined SYNC_COMMAND_DEBUG
+				PVR_DPF((PVR_DBG_ERROR, "%s client sync update - 0x%x -> 0x%x",
+						psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
+#endif
+				PDUMPCOMMENT(".. %s client sync update - 0x%x -> 0x%x",
+						psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value);
+
+			}
+			pui8ServerUpdateStart = pui8CmdPtr;
+		}
+
+		/* Save the server sync fence & update offsets for submit time */
+		psCmdHelperData->pui8ServerFenceStart  = pui8ServerFenceStart;
+		psCmdHelperData->pui8ServerUpdateStart = pui8ServerUpdateStart;
+
+		/* jump over the fenced update */
+		if (psCmdHelperData->ui32UnfencedUpdateCmdSize != 0)
+		{
+			RGXFWIF_CCB_CMD_HEADER * const psHeader = (RGXFWIF_CCB_CMD_HEADER *) psCmdHelperData->pui8ServerUpdateStart + psCmdHelperData->ui32UpdateCmdSize;
+			/* set up the header for unfenced updates */
+			PVR_ASSERT(psHeader); /* Could be zero if ui32UpdateCmdSize is 0 which is never expected */
+			psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE;
+			psHeader->ui32CmdSize = psCmdHelperData->ui32UnfencedUpdateCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+			psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+			psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+			psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0;
+			psHeader->sWorkEstKickData.ui64Deadline = 0;
+			psHeader->sWorkEstKickData.ui64CyclesPrediction = 0;
+#endif
+
+			/* jump over the header */
+			psCmdHelperData->pui8ServerUnfencedUpdateStart = ((IMG_UINT8*) psHeader) + sizeof(RGXFWIF_CCB_CMD_HEADER);
+		}
+		else
+		{
+			psCmdHelperData->pui8ServerUnfencedUpdateStart = NULL;
+		}
+
+		/* Save start for sanity checking at submit time */
+		psCmdHelperData->pui8StartPtr = pui8StartPtr;
+
+		/* Set the start pointer for the next iteration around the loop */
+		pui8StartPtr +=
+			psCmdHelperData->ui32FenceCmdSize         +
+			psCmdHelperData->ui32DMCmdSize            +
+			psCmdHelperData->ui32UpdateCmdSize        +
+			psCmdHelperData->ui32UnfencedUpdateCmdSize;
+
+		if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0)
+		{
+			PDUMPCOMMENT("End of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes",
+					psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff);
+		}
+		else
+		{
+			PDUMPCOMMENT("No %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes",
+					psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff);
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+/*
+	Fill in the server syncs data and release the CCB space
+*/
+void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount,
+							   RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+							   const IMG_CHAR *pcszDMName,
+							   IMG_UINT32 ui32CtxAddr)
+{
+	IMG_UINT32 ui32AllocSize = 0;
+	IMG_UINT32 i;
+#if defined(LINUX)
+	IMG_BOOL bTraceChecks = trace_rogue_are_fence_checks_traced();
+	IMG_BOOL bTraceUpdates = trace_rogue_are_fence_updates_traced();
+#endif
+
+	/*
+		Workout how much space we need for all the command(s)
+	*/
+	ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData);
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	PVRSRVLockServerSync();
+#endif
+#endif
+	/*
+		For each command fill in the server sync info
+	*/
+	for (i=0;i<ui32CmdCount;i++)
+	{
+		RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = &asCmdHelperData[i];
+		IMG_UINT8 *pui8ServerFenceStart = psCmdHelperData->pui8ServerFenceStart;
+		IMG_UINT8 *pui8ServerUpdateStart = psCmdHelperData->pui8ServerUpdateStart;
+		IMG_UINT8 *pui8ServerUnfencedUpdateStart = psCmdHelperData->pui8ServerUnfencedUpdateStart;
+		IMG_UINT32 j;
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+		/* Now fill in the server fence and updates together */
+		for (j = 0; j < psCmdHelperData->ui32ServerSyncCount; j++)
+		{
+			RGXFWIF_UFO *psUFOPtr;
+			IMG_UINT32 ui32UpdateValue;
+			IMG_UINT32 ui32FenceValue;
+			IMG_UINT32 ui32SyncAddr;
+			PVRSRV_ERROR eError;
+			IMG_UINT32 ui32Flag = psCmdHelperData->paui32ServerSyncFlags[j] & psCmdHelperData->ui32ServerSyncFlagMask;
+			IMG_BOOL bFence = ((ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)!=0)?IMG_TRUE:IMG_FALSE;
+			IMG_BOOL bUpdate = ((ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)!=0)?IMG_TRUE:IMG_FALSE;
+			const IMG_BOOL bUnfencedUpdate = ((ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE) == PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE)
+				? IMG_TRUE
+				: IMG_FALSE;
+
+			eError = PVRSRVServerSyncQueueHWOpKM_NoGlobalLock(psCmdHelperData->papsServerSyncs[j],
+												 bUpdate,
+												 &ui32FenceValue,
+												 &ui32UpdateValue);
+			/* This function can't fail */
+			PVR_ASSERT(eError == PVRSRV_OK);
+
+			/*
+				As server syncs always fence (we have a check in RGXCmcdHelperInitCmdCCB
+				which ensures the client is playing ball) the filling in of the fence
+				is unconditional.
+			*/
+			eError = ServerSyncGetFWAddr(psCmdHelperData->papsServerSyncs[j], &ui32SyncAddr);
+			if (unlikely(PVRSRV_OK != eError))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to read Server Sync FW address (%d)",
+					__func__, eError));
+				PVR_ASSERT(eError == PVRSRV_OK);
+			}
+			if (bFence)
+			{
+				PVR_ASSERT(pui8ServerFenceStart != NULL);
+
+				psUFOPtr = (RGXFWIF_UFO *) pui8ServerFenceStart;
+				psUFOPtr->puiAddrUFO.ui32Addr = ui32SyncAddr;
+				psUFOPtr->ui32Value = ui32FenceValue;
+				pui8ServerFenceStart += sizeof(RGXFWIF_UFO);
+
+#if defined(LINUX) && defined(SUPPORT_RGX)
+				if (bTraceChecks)
+				{
+					trace_rogue_fence_checks(psCmdHelperData->pszCommandName,
+											 pcszDMName,
+											 ui32CtxAddr,
+											 psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+											 1,
+											 &psUFOPtr->puiAddrUFO,
+											 &psUFOPtr->ui32Value);
+				}
+#endif
+			}
+
+			/* If there is an update then fill that in as well */
+			if (bUpdate)
+			{
+				if (bUnfencedUpdate)
+				{
+					PVR_ASSERT(pui8ServerUnfencedUpdateStart != NULL);
+
+					psUFOPtr = (RGXFWIF_UFO *) pui8ServerUnfencedUpdateStart;
+					psUFOPtr->puiAddrUFO.ui32Addr = ui32SyncAddr;
+					psUFOPtr->ui32Value = ui32UpdateValue;
+					pui8ServerUnfencedUpdateStart += sizeof(RGXFWIF_UFO);
+				}
+				else
+				{
+					/* fenced update */
+					PVR_ASSERT(pui8ServerUpdateStart != NULL);
+
+					psUFOPtr = (RGXFWIF_UFO *) pui8ServerUpdateStart;
+					psUFOPtr->puiAddrUFO.ui32Addr = ui32SyncAddr;
+					psUFOPtr->ui32Value = ui32UpdateValue;
+					pui8ServerUpdateStart += sizeof(RGXFWIF_UFO);
+				}
+#if defined(LINUX) && defined(SUPPORT_RGX)
+				if (bTraceUpdates)
+				{
+					trace_rogue_fence_updates(psCmdHelperData->pszCommandName,
+											  pcszDMName,
+											  ui32CtxAddr,
+											  psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+											  1,
+											  &psUFOPtr->puiAddrUFO,
+											  &psUFOPtr->ui32Value);
+				}
+#endif
+
+#if defined(NO_HARDWARE)
+				/*
+				  There is no FW so the host has to do any Sync updates
+				  (client sync updates are done in the client
+				*/
+				PVRSRVServerSyncPrimSetKM(psCmdHelperData->papsServerSyncs[j], ui32UpdateValue);
+#endif
+			}
+		}
+#endif
+
+#if defined(LINUX) && defined(SUPPORT_RGX)
+		if (bTraceChecks)
+		{
+			trace_rogue_fence_checks(psCmdHelperData->pszCommandName,
+									 pcszDMName,
+									 ui32CtxAddr,
+									 psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+									 psCmdHelperData->ui32ClientFenceCount,
+									 psCmdHelperData->pauiFenceUFOAddress,
+									 psCmdHelperData->paui32FenceValue);
+		}
+		if (bTraceUpdates)
+		{
+			trace_rogue_fence_updates(psCmdHelperData->pszCommandName,
+									  pcszDMName,
+									  ui32CtxAddr,
+									  psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+									  psCmdHelperData->ui32ClientUpdateCount,
+									  psCmdHelperData->pauiUpdateUFOAddress,
+									  psCmdHelperData->paui32UpdateValue);
+		}
+#endif
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+		if (psCmdHelperData->ui32ServerSyncCount)
+		{
+			/*
+				Do some sanity checks to ensure we did the point math right
+			*/
+			if (pui8ServerFenceStart != NULL)
+			{
+				PVR_ASSERT(pui8ServerFenceStart ==
+						   (psCmdHelperData->pui8StartPtr +
+						   psCmdHelperData->ui32FenceCmdSize));
+			}
+
+			if (pui8ServerUpdateStart != NULL)
+			{
+				PVR_ASSERT(pui8ServerUpdateStart ==
+				           psCmdHelperData->pui8StartPtr             +
+				           psCmdHelperData->ui32FenceCmdSize         +
+				           psCmdHelperData->ui32DMCmdSize            +
+				           psCmdHelperData->ui32UpdateCmdSize);
+			}
+
+			if (pui8ServerUnfencedUpdateStart != NULL)
+			{
+				PVR_ASSERT(pui8ServerUnfencedUpdateStart ==
+				           psCmdHelperData->pui8StartPtr             +
+				           psCmdHelperData->ui32FenceCmdSize         +
+				           psCmdHelperData->ui32DMCmdSize            +
+				           psCmdHelperData->ui32UpdateCmdSize        +
+				           psCmdHelperData->ui32UnfencedUpdateCmdSize);
+			}
+		}
+#endif
+		/*
+			All the commands have been filled in so release the CCB space.
+			The FW still won't run this command until we kick it
+		*/
+		PDUMPCOMMENTWITHFLAGS(psCmdHelperData->ui32PDumpFlags,
+				"%s Command Server Release on FWCtx %08x",
+				psCmdHelperData->pszCommandName, ui32CtxAddr);
+	}
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	PVRSRVUnlockServerSync();
+#endif
+#endif
+
+	_RGXClientCCBDumpCommands(asCmdHelperData[0].psClientCCB,
+							  asCmdHelperData[0].psClientCCB->ui32HostWriteOffset,
+							  ui32AllocSize);
+
+	RGXReleaseCCB(asCmdHelperData[0].psClientCCB,
+				  ui32AllocSize,
+				  asCmdHelperData[0].ui32PDumpFlags);
+
+	BIT_UNSET(asCmdHelperData[0].psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN);
+}
+
+IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32              ui32CmdCount,
+                                      RGX_CCB_CMD_HELPER_DATA *asCmdHelperData)
+{
+	IMG_UINT32 ui32AllocSize = 0;
+	IMG_UINT32 i;
+
+	/*
+		Workout how much space we need for all the command(s)
+	*/
+	for (i = 0; i < ui32CmdCount; i++)
+	{
+		ui32AllocSize +=
+			asCmdHelperData[i].ui32FenceCmdSize          +
+			asCmdHelperData[i].ui32DMCmdSize             +
+			asCmdHelperData[i].ui32UpdateCmdSize         +
+			asCmdHelperData[i].ui32UnfencedUpdateCmdSize;
+	}
+
+	return ui32AllocSize;
+}
+
+/* Work out how much of an offset there is to a specific command. */
+IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+                                        IMG_UINT32              ui32Cmdindex)
+{
+	IMG_UINT32 ui32Offset = 0;
+	IMG_UINT32 i;
+
+	for (i = 0; i < ui32Cmdindex; i++)
+	{
+		ui32Offset +=
+			asCmdHelperData[i].ui32FenceCmdSize          +
+			asCmdHelperData[i].ui32DMCmdSize             +
+			asCmdHelperData[i].ui32UpdateCmdSize         +
+			asCmdHelperData[i].ui32UnfencedUpdateCmdSize;
+	}
+
+	return ui32Offset;
+}
+
+/* Returns the offset of the data master command from a write offset */
+IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData)
+{
+	return psCmdHelperData->ui32FenceCmdSize;
+}
+
+static const char *_CCBCmdTypename(RGXFWIF_CCB_CMD_TYPE cmdType)
+{
+	switch (cmdType)
+	{
+		case RGXFWIF_CCB_CMD_TYPE_TA: return "TA";
+		case RGXFWIF_CCB_CMD_TYPE_3D: return "3D";
+		case RGXFWIF_CCB_CMD_TYPE_CDM: return "CDM";
+		case RGXFWIF_CCB_CMD_TYPE_TQ_3D: return "TQ_3D";
+		case RGXFWIF_CCB_CMD_TYPE_TQ_2D: return "TQ_2D";
+		case RGXFWIF_CCB_CMD_TYPE_3D_PR: return "3D_PR";
+		case RGXFWIF_CCB_CMD_TYPE_NULL: return "NULL";
+		case RGXFWIF_CCB_CMD_TYPE_TQ_TDM: return "TQ_TDM";
+
+		case RGXFWIF_CCB_CMD_TYPE_FENCE: return "FENCE";
+		case RGXFWIF_CCB_CMD_TYPE_UPDATE: return "UPDATE";
+		case RGXFWIF_CCB_CMD_TYPE_FENCE_PR: return "FENCE_PR";
+		case RGXFWIF_CCB_CMD_TYPE_PRIORITY: return "PRIORITY";
+
+		case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE: return "UNFENCED_UPDATE";
+
+		case RGXFWIF_CCB_CMD_TYPE_PADDING: return "PADDING";
+
+		default:
+			PVR_ASSERT(IMG_FALSE);
+		break;
+	}
+
+	return "INVALID";
+}
+
+PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM)
+{
+	volatile RGXFWIF_CCCB_CTL	*psClientCCBCtrl;
+	IMG_UINT32 					ui32SampledRdOff, ui32SampledDpOff, ui32SampledWrOff;
+	PVRSRV_ERROR				eError = PVRSRV_OK;
+
+	if (psCurrentClientCCB == NULL)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB is NULL"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl;
+	ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset;
+	ui32SampledDpOff = psClientCCBCtrl->ui32DepOffset;
+	ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset;
+
+	if (ui32SampledRdOff > psClientCCBCtrl->ui32WrapMask ||
+		ui32SampledDpOff > psClientCCBCtrl->ui32WrapMask ||
+		ui32SampledWrOff > psClientCCBCtrl->ui32WrapMask)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB has invalid offset (ROFF=%d DOFF=%d WOFF=%d)",
+				ui32SampledRdOff, ui32SampledDpOff, ui32SampledWrOff));
+		return PVRSRV_ERROR_INVALID_OFFSET;
+	}
+
+	if (ui32SampledRdOff != ui32SampledWrOff &&
+				psCurrentClientCCB->ui32LastROff != psCurrentClientCCB->ui32LastWOff &&
+				ui32SampledRdOff == psCurrentClientCCB->ui32LastROff &&
+				(psCurrentClientCCB->ui32ByteCount - psCurrentClientCCB->ui32LastByteCount) < psCurrentClientCCB->ui32Size)
+	{
+		PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDevNode->pvDevice;
+
+		/* Only log a stalled CCB if GPU is idle (any state other than POW_ON is considered idle) */
+		if ((psDevInfo->psRGXFWIfTraceBuf->ePowState != RGXFWIF_POW_ON) &&
+			psDevInfo->ui32SLRHoldoffCounter == 0)
+		{
+			static __maybe_unused const char *pszStalledAction =
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+					"force";
+#else
+					"warn";
+#endif
+			/* Don't log this by default unless debugging since a higher up
+			 * function will log the stalled condition. Helps avoid double
+			 * messages in the log.
+			 */
+			PVR_DPF((PVR_DBG_ERROR, "%s (%s): CCCB has not progressed (ROFF=%d DOFF=%d WOFF=%d) for \"%s\"",
+					__func__, pszStalledAction, ui32SampledRdOff,
+					ui32SampledDpOff, ui32SampledWrOff,
+					(IMG_PCHAR)&psCurrentClientCCB->szName));
+			eError = PVRSRV_ERROR_CCCB_STALLED;
+
+			{
+				IMG_UINT8				*pui8ClientCCBBuff = psCurrentClientCCB->pui8ClientCCB;
+				RGXFWIF_CCB_CMD_HEADER	*psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)(pui8ClientCCBBuff + ui32SampledRdOff);
+				PVRSRV_RGXDEV_INFO		*psDevInfo = FWCommonContextGetRGXDevInfo(psCurrentClientCCB->psServerCommonContext);
+
+				/* Special case - if readOffset is on a PADDING packet, CCB has wrapped.
+				 * In this case, skip over the PADDING packet.
+				 */
+				if (psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_PADDING)
+				{
+					psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)(pui8ClientCCBBuff +
+					                                             ((ui32SampledRdOff +
+					                                               psCommandHeader->ui32CmdSize +
+					                                               sizeof(RGXFWIF_CCB_CMD_HEADER))
+					                                              & psCurrentClientCCB->psClientCCBCtrl->ui32WrapMask));
+				}
+
+				/* Only try to recover a 'stalled' context (ie one waiting on a fence), as some work (eg compute) could
+				 * take a long time to complete, during which time the CCB ptrs would not advance.
+				 */
+				if (((psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE) ||
+				     (psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)) &&
+				    (psCommandHeader != (RGXFWIF_CCB_CMD_HEADER *)(pui8ClientCCBBuff + ui32SampledWrOff)))
+				{
+					/* Acquire the cCCB recovery lock */
+					OSLockAcquire(psDevInfo->hCCBRecoveryLock);
+
+					if (!psDevInfo->pvEarliestStalledClientCCB)
+					{
+						psDevInfo->pvEarliestStalledClientCCB = (void*)psCurrentClientCCB;
+						psDevInfo->ui32OldestSubmissionOrdinal = psCommandHeader->ui32IntJobRef;
+					}
+					else
+					{
+						/* Check if this fence cmd header has an older submission stamp than the one we are currently considering unblocking
+						 * (account for submission stamp wrap by checking diff is less than 0x80000000) - if it is older, then this becomes
+						 * our preferred fence to be unblocked/
+						 */
+						if ((psCommandHeader->ui32IntJobRef < psDevInfo->ui32OldestSubmissionOrdinal) &&
+						    ((psDevInfo->ui32OldestSubmissionOrdinal - psCommandHeader->ui32IntJobRef) < 0x8000000))
+						{
+							psDevInfo->pvEarliestStalledClientCCB = (void*)psCurrentClientCCB;
+							psDevInfo->ui32OldestSubmissionOrdinal = psCommandHeader->ui32IntJobRef;
+						}
+					}
+
+					/* Release the cCCB recovery lock */
+					OSLockRelease(psDevInfo->hCCBRecoveryLock);
+				}
+			}
+		}
+	}
+
+	psCurrentClientCCB->ui32LastROff = ui32SampledRdOff;
+	psCurrentClientCCB->ui32LastWOff = ui32SampledWrOff;
+	psCurrentClientCCB->ui32LastByteCount = psCurrentClientCCB->ui32ByteCount;
+
+	return eError;
+}
+
+void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+			PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+			RGX_CLIENT_CCB *psCurrentClientCCB,
+			DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+			void *pvDumpDebugFile)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+	volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl;
+	IMG_UINT8 *pui8ClientCCBBuff = psCurrentClientCCB->pui8ClientCCB;
+	IMG_UINT32 ui32Offset = psClientCCBCtrl->ui32ReadOffset;
+	IMG_UINT32 ui32DepOffset = psClientCCBCtrl->ui32DepOffset;
+	IMG_UINT32 ui32EndOffset = psCurrentClientCCB->ui32HostWriteOffset;
+	IMG_UINT32 ui32WrapMask = psClientCCBCtrl->ui32WrapMask;
+	IMG_CHAR * pszState = "Ready";
+
+	PVR_DUMPDEBUG_LOG("FWCtx 0x%08X (%s)", sFWCommonContext.ui32Addr,
+		(IMG_PCHAR)&psCurrentClientCCB->szName);
+	if (ui32Offset == ui32EndOffset)
+	{
+		PVR_DUMPDEBUG_LOG("  `--<Empty>");
+	}
+
+	while (ui32Offset != ui32EndOffset)
+	{
+		RGXFWIF_CCB_CMD_HEADER *psCmdHeader = (RGXFWIF_CCB_CMD_HEADER*)(pui8ClientCCBBuff + ui32Offset);
+		IMG_UINT32 ui32NextOffset = (ui32Offset + psCmdHeader->ui32CmdSize + sizeof(RGXFWIF_CCB_CMD_HEADER)) & ui32WrapMask;
+		IMG_BOOL bLastCommand = (ui32NextOffset == ui32EndOffset)? IMG_TRUE: IMG_FALSE;
+		IMG_BOOL bLastUFO;
+		#define CCB_SYNC_INFO_LEN 80
+		IMG_CHAR pszSyncInfo[CCB_SYNC_INFO_LEN];
+		IMG_UINT32 ui32NoOfUpdates, i;
+		RGXFWIF_UFO *psUFOPtr;
+
+		ui32NoOfUpdates = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+		psUFOPtr = (RGXFWIF_UFO*)(pui8ClientCCBBuff + ui32Offset + sizeof(RGXFWIF_CCB_CMD_HEADER));
+		pszSyncInfo[0] = '\0';
+
+		if (ui32Offset == ui32DepOffset)
+		{
+			pszState = "Waiting";
+		}
+
+		PVR_DUMPDEBUG_LOG("  %s--%s %s @ %u Int=%u Ext=%u",
+			bLastCommand? "`": "|",
+			pszState, _CCBCmdTypename(psCmdHeader->eCmdType),
+			ui32Offset, psCmdHeader->ui32IntJobRef, psCmdHeader->ui32ExtJobRef
+			);
+
+		/* switch on type and write checks and updates */
+		switch (psCmdHeader->eCmdType)
+		{
+			case RGXFWIF_CCB_CMD_TYPE_UPDATE:
+			case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE:
+			case RGXFWIF_CCB_CMD_TYPE_FENCE:
+			case RGXFWIF_CCB_CMD_TYPE_FENCE_PR:
+			{
+				for (i = 0; i < ui32NoOfUpdates; i++, psUFOPtr++)
+				{
+					bLastUFO = (ui32NoOfUpdates-1 == i)? IMG_TRUE: IMG_FALSE;
+
+					if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+					{
+						if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr))
+						{
+							SyncCheckpointRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr,
+										pszSyncInfo, CCB_SYNC_INFO_LEN);
+						}
+						else
+						{
+							SyncRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr,
+										pszSyncInfo, CCB_SYNC_INFO_LEN);
+						}
+					}
+
+					PVR_DUMPDEBUG_LOG("  %s  %s--Addr:0x%08x Val=0x%08x %s",
+						bLastCommand? " ": "|",
+						bLastUFO? "`": "|",
+						psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value,
+						pszSyncInfo
+						);
+				}
+				break;
+			}
+
+			default:
+				break;
+		}
+		ui32Offset = ui32NextOffset;
+	}
+
+}
+
+void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+				RGX_CLIENT_CCB *psCurrentClientCCB,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile)
+{
+	volatile RGXFWIF_CCCB_CTL	*psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl;
+	IMG_UINT8					*pui8ClientCCBBuff = psCurrentClientCCB->pui8ClientCCB;
+	volatile IMG_UINT8			*pui8Ptr;
+	IMG_UINT32					ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset;
+	IMG_UINT32					ui32SampledDepOff = psClientCCBCtrl->ui32DepOffset;
+	IMG_UINT32					ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset;
+
+	pui8Ptr = pui8ClientCCBBuff + ui32SampledRdOff;
+
+	if ((ui32SampledRdOff == ui32SampledDepOff) &&
+		(ui32SampledRdOff != ui32SampledWrOff))
+	{
+		volatile RGXFWIF_CCB_CMD_HEADER *psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)(pui8ClientCCBBuff + ui32SampledRdOff);
+		RGXFWIF_CCB_CMD_TYPE 	eCommandType = psCommandHeader->eCmdType;
+		volatile IMG_UINT8				*pui8Ptr = (IMG_UINT8 *)psCommandHeader;
+
+		/* CCB is stalled on a fence... */
+		if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR))
+		{
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+			PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psCurrentClientCCB->psServerCommonContext);
+			IMG_UINT32 ui32Val;
+#endif
+			RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *)(pui8Ptr + sizeof(*psCommandHeader));
+			IMG_UINT32 jj;
+
+			/* Display details of the fence object on which the context is pending */
+			PVR_DUMPDEBUG_LOG("FWCtx 0x%08X @ %d (%s) pending on %s:",
+							   sFWCommonContext.ui32Addr,
+							   ui32SampledRdOff,
+							   (IMG_PCHAR)&psCurrentClientCCB->szName,
+							   _CCBCmdTypename(eCommandType));
+			for (jj=0; jj<psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO); jj++)
+			{
+#if !defined(SUPPORT_EXTRA_METASP_DEBUG)
+				PVR_DUMPDEBUG_LOG("  Addr:0x%08x  Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value);
+#else
+				ui32Val = 0;
+				RGXReadWithSP(psDevInfo, psUFOPtr[jj].puiAddrUFO.ui32Addr, &ui32Val);
+				PVR_DUMPDEBUG_LOG("  Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x",
+				                   psUFOPtr[jj].puiAddrUFO.ui32Addr,
+				                   psUFOPtr[jj].ui32Value, ui32Val);
+#endif
+			}
+
+			/* Advance psCommandHeader past the FENCE to the next command header (this will be the TA/3D command that is fenced) */
+			pui8Ptr = (IMG_UINT8 *)psUFOPtr + psCommandHeader->ui32CmdSize;
+			psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)pui8Ptr;
+			if ((uintptr_t)psCommandHeader != ((uintptr_t)pui8ClientCCBBuff + ui32SampledWrOff))
+			{
+				PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X fenced command is of type %s",sFWCommonContext.ui32Addr, _CCBCmdTypename(psCommandHeader->eCmdType));
+				/* Advance psCommandHeader past the TA/3D to the next command header (this will possibly be an UPDATE) */
+				pui8Ptr += sizeof(*psCommandHeader) + psCommandHeader->ui32CmdSize;
+				psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)pui8Ptr;
+				/* If the next command is an update, display details of that so we can see what would then become unblocked */
+				if ((uintptr_t)psCommandHeader != ((uintptr_t)pui8ClientCCBBuff + ui32SampledWrOff))
+				{
+					eCommandType = psCommandHeader->eCmdType;
+
+					if (eCommandType == RGXFWIF_CCB_CMD_TYPE_UPDATE)
+					{
+						psUFOPtr = (RGXFWIF_UFO *)((IMG_UINT8 *)psCommandHeader + sizeof(*psCommandHeader));
+						PVR_DUMPDEBUG_LOG(" preventing %s:",_CCBCmdTypename(eCommandType));
+						for (jj=0; jj<psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO); jj++)
+						{
+#if !defined(SUPPORT_EXTRA_METASP_DEBUG)
+							PVR_DUMPDEBUG_LOG("  Addr:0x%08x  Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value);
+#else
+							ui32Val = 0;
+							RGXReadWithSP(psDevInfo, psUFOPtr[jj].puiAddrUFO.ui32Addr, &ui32Val);
+							PVR_DUMPDEBUG_LOG("  Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x",
+							                   psUFOPtr[jj].puiAddrUFO.ui32Addr,
+							                   psUFOPtr[jj].ui32Value,
+							                   ui32Val);
+#endif
+						}
+					}
+				}
+				else
+				{
+					PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr);
+				}
+			}
+			else
+			{
+				PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr);
+			}
+		}
+	}
+}
+
+void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGX_CLIENT_CCB *psStalledClientCCB;
+
+	PVR_ASSERT(psDevInfo);
+
+	psStalledClientCCB = (RGX_CLIENT_CCB *)psDevInfo->pvEarliestStalledClientCCB;
+
+	if (psStalledClientCCB)
+	{
+		volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psStalledClientCCB->psClientCCBCtrl;
+		IMG_UINT32 ui32SampledDepOffset = psClientCCBCtrl->ui32DepOffset;
+		IMG_UINT8                 *pui8Ptr = (psStalledClientCCB->pui8ClientCCB + ui32SampledDepOffset);
+		RGXFWIF_CCB_CMD_HEADER    *psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)(pui8Ptr);
+		RGXFWIF_CCB_CMD_TYPE      eCommandType = psCommandHeader->eCmdType;
+
+		if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR))
+		{
+			RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *)(pui8Ptr + sizeof(*psCommandHeader));
+			IMG_UINT32 jj;
+			IMG_UINT32 ui32NumUnsignalledUFOs = 0;
+			IMG_UINT32 ui32UnsignalledUFOVaddrs[PVRSRV_MAX_SYNC_PRIMS];
+
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+			/* Check flag indicates support for SLR Log (for compatibility) */
+			if (psDevInfo->psRGXFWIfTraceBuf->ui32TracebufFlags & RGXFWIF_TRACEBUFCFG_SLR_LOG)
+			{
+				if (!psDevInfo->psRGXFWIfTraceBuf->sSLRLogFirst.aszCCBName[0])
+				{
+					OSClockMonotonicns64(&psDevInfo->psRGXFWIfTraceBuf->sSLRLogFirst.ui64Timestamp);
+					psDevInfo->psRGXFWIfTraceBuf->sSLRLogFirst.ui32NumUFOs = (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO));
+					psDevInfo->psRGXFWIfTraceBuf->sSLRLogFirst.ui32FWCtxAddr = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr;
+					OSStringLCopy(psDevInfo->psRGXFWIfTraceBuf->sSLRLogFirst.aszCCBName,
+								  psStalledClientCCB->szName,
+								  MAX_CLIENT_CCB_NAME);
+				}
+				else
+				{
+					OSClockMonotonicns64(&psDevInfo->psRGXFWIfTraceBuf->sSLRLog[psDevInfo->psRGXFWIfTraceBuf->ui8SLRLogWp].ui64Timestamp);
+					psDevInfo->psRGXFWIfTraceBuf->sSLRLog[psDevInfo->psRGXFWIfTraceBuf->ui8SLRLogWp].ui32NumUFOs = (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO));
+					psDevInfo->psRGXFWIfTraceBuf->sSLRLog[psDevInfo->psRGXFWIfTraceBuf->ui8SLRLogWp].ui32FWCtxAddr = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr;
+					OSStringLCopy(psDevInfo->psRGXFWIfTraceBuf->sSLRLog[psDevInfo->psRGXFWIfTraceBuf->ui8SLRLogWp].aszCCBName,
+								  psStalledClientCCB->szName,
+								  MAX_CLIENT_CCB_NAME);
+					psDevInfo->psRGXFWIfTraceBuf->ui8SLRLogWp = (psDevInfo->psRGXFWIfTraceBuf->ui8SLRLogWp + 1) % PVR_SLR_LOG_ENTRIES;
+				}
+				psDevInfo->psRGXFWIfTraceBuf->ui32ForcedUpdatesRequested++;
+			}
+#endif
+			PVR_LOG(("Fence found on context 0x%x '%s' @ %d has %d UFOs",
+			         FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr,
+					 psStalledClientCCB->szName, ui32SampledDepOffset,
+			         (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO))));
+
+			for (jj=0; jj<psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO); jj++)
+			{
+				if (PVRSRV_UFO_IS_SYNC_CHECKPOINT((RGXFWIF_UFO *)&psUFOPtr[jj]))
+				{
+					IMG_UINT32 ui32ReadValue = SyncCheckpointStateFromUFO(psDevInfo->psDeviceNode,
+					                                           psUFOPtr[jj].puiAddrUFO.ui32Addr);
+					PVR_LOG(("  %d/%d FWAddr 0x%x requires 0x%x (currently 0x%x)", jj+1,
+							   (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)),
+							   psUFOPtr[jj].puiAddrUFO.ui32Addr,
+							   psUFOPtr[jj].ui32Value,
+							   ui32ReadValue));
+					/* If fence is unmet, dump debug info on it */
+					if (ui32ReadValue != psUFOPtr[jj].ui32Value)
+					{
+						/* Add to our list to pass to pvr_sync */
+						ui32UnsignalledUFOVaddrs[ui32NumUnsignalledUFOs] = psUFOPtr[jj].puiAddrUFO.ui32Addr;
+						ui32NumUnsignalledUFOs++;
+					}
+				}
+				else
+				{
+					PVR_LOG(("  %d/%d FWAddr 0x%x requires 0x%x", jj+1,
+							   (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)),
+							   psUFOPtr[jj].puiAddrUFO.ui32Addr,
+							   psUFOPtr[jj].ui32Value));
+				}
+			}
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined (SUPPORT_FALLBACK_FENCE_SYNC)
+			if (ui32NumUnsignalledUFOs > 0)
+			{
+				IMG_UINT32 ui32NumSyncsOwned;
+				PVRSRV_ERROR eErr = SyncCheckpointDumpInfoOnStalledUFOs(ui32NumUnsignalledUFOs, &ui32UnsignalledUFOVaddrs[0], &ui32NumSyncsOwned);
+
+				PVR_LOG_IF_ERROR(eErr, "SyncCheckpointDumpInfoOnStalledUFOs() call failed.");
+			}
+#endif
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+			if (BIT_ISSET(psStalledClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED))
+			{
+				PRGXFWIF_FWCOMMONCONTEXT psContext = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext);
+
+				PVR_LOG(("SLR disabled for FWCtx 0x%08X", psContext.ui32Addr));
+			}
+			else
+			{
+				if (ui32NumUnsignalledUFOs > 0)
+				{
+					RGXFWIF_KCCB_CMD sSignalFencesCmd;
+
+				sSignalFencesCmd.eCmdType = RGXFWIF_KCCB_CMD_FORCE_UPDATE;
+				sSignalFencesCmd.eDM = RGXFWIF_DM_GP;
+				sSignalFencesCmd.uCmdData.sForceUpdateData.psContext = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext);
+				sSignalFencesCmd.uCmdData.sForceUpdateData.ui32CCBFenceOffset = ui32SampledDepOffset;
+
+					PVR_LOG(("Forced update command issued for FWCtx 0x%08X", sSignalFencesCmd.uCmdData.sForceUpdateData.psContext.ui32Addr));
+
+					RGXScheduleCommand(FWCommonContextGetRGXDevInfo(psStalledClientCCB->psServerCommonContext),
+									   RGXFWIF_DM_GP,
+									   &sSignalFencesCmd,
+									   0,
+									   PDUMP_FLAGS_CONTINUOUS);
+				}
+			}
+#endif
+		}
+		psDevInfo->pvEarliestStalledClientCCB = NULL;
+	}
+}
+
+/******************************************************************************
+ End of file (rgxccb.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxccb.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxccb.h
new file mode 100644
index 0000000..7d7d813
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxccb.h
@@ -0,0 +1,309 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Circular Command Buffer functionality.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX Circular Command Buffer functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXCCB_H__)
+#define __RGXCCB_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "sync_server.h"
+#include "connection_server.h"
+#include "rgxdebug.h"
+#include "rgxdefs_km.h"
+#include "pvr_notifier.h"
+
+#define MAX_CLIENT_CCB_NAME	30
+#define SYNC_FLAG_MASK_ALL  IMG_UINT32_MAX
+
+/*
+ * This size is to be used when a client CCB is found to consume very
+ * negligible space (e.g. a few hundred bytes to few KBs - less than a page).
+ * In such a case, instead of allocating CCB of size of only a few KBs, we
+ * allocate at-least this much to be future risk-free.
+ */
+#define MIN_SAFE_CCB_SIZE_LOG2         13  /* 8K (2 Pages) */
+#define MAX_SAFE_CCB_SIZE_LOG2         18  /* 256K (64 Pages) */
+
+#define RGX_TQ3D_CCB_SIZE_LOG2         PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D
+static_assert(RGX_TQ3D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 &&
+	RGX_TQ3D_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ3D CCB size is invalid");
+#define RGX_TQ3D_CCB_MAX_SIZE_LOG2		PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ3D
+static_assert(RGX_TQ3D_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D
+	&& RGX_TQ3D_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ3D max CCB size is invalid");
+
+#define RGX_TQ2D_CCB_SIZE_LOG2         PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D
+static_assert(RGX_TQ2D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 &&
+	RGX_TQ2D_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ2D CCB size is invalid");
+#define RGX_TQ2D_CCB_MAX_SIZE_LOG2		PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ2D
+static_assert(RGX_TQ2D_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D &&
+	RGX_TQ2D_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ2D max CCB size is invalid");
+
+#define RGX_CDM_CCB_SIZE_LOG2          PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM
+static_assert(RGX_CDM_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 &&
+	RGX_CDM_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "CDM CCB size is invalid");
+#define RGX_CDM_CCB_MAX_SIZE_LOG2		PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_CDM
+static_assert(RGX_CDM_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM &&
+	RGX_CDM_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "CDM max CCB size is invalid");
+
+#define RGX_TA_CCB_SIZE_LOG2           PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA
+static_assert(RGX_TA_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 &&
+	RGX_TA_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TA CCB size is invalid");
+#define RGX_TA_CCB_MAX_SIZE_LOG2		PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TA
+static_assert(RGX_TA_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA &&
+	RGX_TA_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TA max CCB size is invalid");
+
+#define RGX_3D_CCB_SIZE_LOG2           PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D
+static_assert(RGX_3D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 &&
+	RGX_3D_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "3D CCB size is invalid");
+#define RGX_3D_CCB_MAX_SIZE_LOG2		PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_3D
+static_assert(RGX_3D_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D &&
+	RGX_3D_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "3D max CCB size is invalid");
+
+#define RGX_KICKSYNC_CCB_SIZE_LOG2     PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC
+static_assert(RGX_KICKSYNC_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 &&
+	RGX_KICKSYNC_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "KickSync CCB size is invalid");
+#define RGX_KICKSYNC_CCB_MAX_SIZE_LOG2	PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_KICKSYNC
+static_assert(RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC &&
+	RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "KickSync max CCB size is invalid");
+
+
+typedef struct _RGX_CLIENT_CCB_ RGX_CLIENT_CCB;
+
+/*
+	This structure is declared here as it's allocated on the heap by
+	the callers
+*/
+
+typedef struct _RGX_CCB_CMD_HELPER_DATA_ {
+	/* Data setup at command init time */
+	RGX_CLIENT_CCB  			*psClientCCB;
+	IMG_CHAR 					*pszCommandName;
+	IMG_UINT32 					ui32PDumpFlags;
+
+	IMG_UINT32					ui32ClientFenceCount;
+	PRGXFWIF_UFO_ADDR			*pauiFenceUFOAddress;
+	IMG_UINT32					*paui32FenceValue;
+	IMG_UINT32					ui32ClientUpdateCount;
+	PRGXFWIF_UFO_ADDR			*pauiUpdateUFOAddress;
+	IMG_UINT32					*paui32UpdateValue;
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	IMG_UINT32					ui32ServerSyncCount;
+	IMG_UINT32					*paui32ServerSyncFlags;
+	IMG_UINT32					ui32ServerSyncFlagMask;
+	SERVER_SYNC_PRIMITIVE		**papsServerSyncs;
+#endif
+	RGXFWIF_CCB_CMD_TYPE		eType;
+	IMG_UINT32					ui32CmdSize;
+	IMG_UINT8					*pui8DMCmd;
+	IMG_UINT32					ui32FenceCmdSize;
+	IMG_UINT32					ui32DMCmdSize;
+	IMG_UINT32					ui32UpdateCmdSize;
+	IMG_UINT32					ui32UnfencedUpdateCmdSize;
+
+	/* Data setup at command acquire time */
+	IMG_UINT8					*pui8StartPtr;
+	IMG_UINT8					*pui8ServerUpdateStart;
+	IMG_UINT8					*pui8ServerUnfencedUpdateStart;
+	IMG_UINT8					*pui8ServerFenceStart;
+	IMG_UINT32					ui32ServerFenceCount;
+	IMG_UINT32					ui32ServerUpdateCount;
+	IMG_UINT32					ui32ServerUnfencedUpdateCount;
+
+	/* Job reference fields */
+	IMG_UINT32					ui32ExtJobRef;
+	IMG_UINT32					ui32IntJobRef;
+
+	/* Workload kick information */
+	RGXFWIF_WORKEST_KICK_DATA	*psWorkEstKickData;
+
+} RGX_CCB_CMD_HELPER_DATA;
+
+#define PADDING_COMMAND_SIZE	(sizeof(RGXFWIF_CCB_CMD_HEADER))
+
+
+#define RGX_CCB_REQUESTORS(TYPE) \
+	/* for debugging purposes */ TYPE(UNDEF)	\
+	TYPE(TA)	\
+	TYPE(3D)	\
+	TYPE(CDM)	\
+	TYPE(SH)	\
+	TYPE(RS)	\
+	TYPE(TQ_3D)	\
+	TYPE(TQ_2D)	\
+	TYPE(TQ_TDM)    \
+	TYPE(KICKSYNC)	\
+
+/* Forms an enum constant for each type present in RGX_CCB_REQUESTORS list. The enum is mainly used as
+   an index to the aszCCBRequestors table defined in rgxccb.c. The total number of enums must adhere
+   to the following build assert.
+*/
+typedef enum _RGX_CCB_REQUESTOR_TYPE_
+{
+#define CONSTRUCT_ENUM(req) REQ_TYPE_##req,
+	RGX_CCB_REQUESTORS (CONSTRUCT_ENUM)
+#undef CONSTRUCT_ENUM
+
+	/* should always be at the end */
+	REQ_TYPE_TOTAL_COUNT,
+} RGX_CCB_REQUESTOR_TYPE;
+
+/* Tuple describing the columns of the following table */
+typedef enum _RGX_CCB_REQUESTOR_TUPLE_
+{
+	REQ_RGX_FW_CLIENT_CCB_STRING,          /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCB for this requestor */
+	REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING,  /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCBControl for this requestor */
+	REQ_PDUMP_COMMENT,                     /* Index to comment to be dumped in PDUMPs */
+
+	/* should always be at the end */
+	REQ_TUPLE_CARDINALITY,
+} RGX_CCB_REQUESTOR_TUPLE;
+
+/* Unpack U8 values from U32. */
+#define U32toU8_Unpack1(U32Packed) (U32Packed & 0xFF)
+#define U32toU8_Unpack2(U32Packed) ((U32Packed>>8) & 0xFF)
+#define U32toU8_Unpack3(U32Packed) ((U32Packed>>16) & 0xFF)
+#define U32toU8_Unpack4(U32Packed) ((U32Packed>>24) & 0xFF)
+
+/* Defines for bit meanings within the ui32CCBFlags member of struct _RGX_CLIENT_CCB_
+ *
+ *   ( X = taken/in use, - = available/unused )
+ *
+ *   31                             10
+ *    |                             ||
+ *    ------------------------------XX
+ *  Bit   Meaning
+ *    0 = If set, CCB is still open and commands will be appended to it
+ *    1 = If set, do not perform Sync Lockup Recovery (SLR) for this CCB
+ */
+#define CCB_FLAGS_CCB_STATE_OPEN (0)  /*!< This bit is set to indicate CCB is in the 'Open' state. */
+#define CCB_FLAGS_SLR_DISABLED   (1)  /*!< This bit is set to disable Sync Lockup Recovery (SLR) for this CCB. */
+
+
+/*	Table containing an array of strings for each requestor type in the list of RGX_CCB_REQUESTORS. In addition to its use in
+	this module (rgxccb.c), this table is also used to access string to be dumped in PDUMP comments, hence, marking it extern for
+	use in other modules.
+*/
+extern IMG_CHAR *const aszCCBRequestors[][REQ_TUPLE_CARDINALITY];
+
+PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB,
+					IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO	*psDevInfo,
+						  IMG_UINT32			ui32CCBSizeLog2,
+						  IMG_UINT32			ui32CCBMaxSizeLog2,
+						  CONNECTION_DATA		*psConnectionData,
+						  RGX_CCB_REQUESTOR_TYPE	eCCBRequestor,
+						  RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+						  RGX_CLIENT_CCB		**ppsClientCCB,
+						  DEVMEM_MEMDESC 		**ppsClientCCBMemDesc,
+						  DEVMEM_MEMDESC 		**ppsClientCCBCtlMemDesc);
+
+void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB);
+
+PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB,
+										IMG_UINT32		ui32CmdSize,
+										void			**ppvBufferSpace,
+										IMG_UINT32		ui32PDumpFlags);
+
+void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB,
+								IMG_UINT32		ui32CmdSize,
+								IMG_UINT32		ui32PDumpFlags);
+
+IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB);
+
+PVRSRV_ERROR RGXCmdHelperInitCmdCCB(RGX_CLIENT_CCB            *psClientCCB,
+                                    IMG_UINT32                ui32ClientFenceCount,
+                                    PRGXFWIF_UFO_ADDR         *pauiFenceUFOAddress,
+                                    IMG_UINT32                *paui32FenceValue,
+                                    IMG_UINT32                ui32ClientUpdateCount,
+                                    PRGXFWIF_UFO_ADDR         *pauiUpdateUFOAddress,
+                                    IMG_UINT32                *paui32UpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+                                    IMG_UINT32                ui32ServerSyncCount,
+                                    IMG_UINT32                *paui32ServerSyncFlags,
+                                    IMG_UINT32                ui32ServerSyncFlagMask,
+                                    SERVER_SYNC_PRIMITIVE     **papsServerSyncs,
+#endif
+                                    IMG_UINT32                ui32CmdSize,
+                                    IMG_PBYTE                 pui8DMCmd,
+                                    RGXFWIF_CCB_CMD_TYPE      eType,
+                                    IMG_UINT32                ui32ExtJobRef,
+                                    IMG_UINT32                ui32IntJobRef,
+                                    IMG_UINT32                ui32PDumpFlags,
+                                    RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData,
+                                    IMG_CHAR                  *pszCommandName,
+                                    IMG_BOOL                  bCCBStateOpen,
+                                    RGX_CCB_CMD_HELPER_DATA   *psCmdHelperData);
+
+PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount,
+									   RGX_CCB_CMD_HELPER_DATA *asCmdHelperData);
+
+void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount,
+							   RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+							   const IMG_CHAR *pcszDMName,
+							   IMG_UINT32 ui32CtxAddr);
+
+IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount,
+								   RGX_CCB_CMD_HELPER_DATA *asCmdHelperData);
+
+IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+                                        IMG_UINT32              ui32Cmdindex);
+
+IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData);
+
+void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+				RGX_CLIENT_CCB  *psCurrentClientCCB,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile);
+
+void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+			PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+			RGX_CLIENT_CCB *psCurrentClientCCB,
+			DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+			void *pvDumpDebugFile);
+
+PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB  *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM);
+
+void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo);
+#endif /* __RGXCCB_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxcompute.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxcompute.c
new file mode 100644
index 0000000..56635db
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxcompute.c
@@ -0,0 +1,1090 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Compute routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Compute routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "srvkm.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxcompute.h"
+#include "rgx_bvnc_defs_km.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "rgxccb.h"
+#include "rgxhwperf.h"
+#include "ospvr_gputrace.h"
+#include "htbuffer.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "rgx_memallocflags.h"
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_CMP_UFO_DUMP	0
+
+//#define CMP_CHECKPOINT_DEBUG 1
+
+#if defined(CMP_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+struct _RGX_SERVER_COMPUTE_CONTEXT_ {
+	PVRSRV_DEVICE_NODE			*psDeviceNode;
+	RGX_SERVER_COMMON_CONTEXT	*psServerCommonContext;
+	DEVMEM_MEMDESC				*psFWComputeContextMemDesc;
+	DEVMEM_MEMDESC				*psFWFrameworkMemDesc;
+	DEVMEM_MEMDESC				*psFWComputeContextStateMemDesc;
+	PVRSRV_CLIENT_SYNC_PRIM		*psSync;
+	DLLIST_NODE					sListNode;
+	SYNC_ADDR_LIST				sSyncAddrListFence;
+	SYNC_ADDR_LIST				sSyncAddrListUpdate;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	POS_LOCK                    		 hLock;
+#endif
+};
+
+PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA			*psConnection,
+											 PVRSRV_DEVICE_NODE			*psDeviceNode,
+											 IMG_UINT32					ui32Priority,
+											 IMG_UINT32					ui32FrameworkCommandSize,
+											 IMG_PBYTE					pbyFrameworkCommand,
+											 IMG_HANDLE					hMemCtxPrivData,
+											 IMG_DEV_VIRTADDR			sServicesSignalAddr,
+											 IMG_UINT32					ui32StaticComputecontextStateSize,
+											 IMG_PBYTE					pStaticComputecontextState,
+											 IMG_UINT32					ui32PackedCCBSizeU88,
+											 RGX_SERVER_COMPUTE_CONTEXT	**ppsComputeContext)
+{
+	PVRSRV_RGXDEV_INFO 			*psDevInfo = psDeviceNode->pvDevice;
+	DEVMEM_MEMDESC				*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	RGX_SERVER_COMPUTE_CONTEXT	*psComputeContext;
+	RGX_COMMON_CONTEXT_INFO		sInfo;
+	PVRSRV_ERROR				eError = PVRSRV_OK;
+	RGXFWIF_FWCOMPUTECONTEXT	*psFWComputeContext;
+	IMG_UINT32					ui32CCBAllocSizeLog2, ui32CCBMaxAllocSizeLog2;
+
+	/* Prepare cleanup struct */
+	*ppsComputeContext = NULL;
+
+	psComputeContext = OSAllocZMem(sizeof(*psComputeContext));
+	if (psComputeContext == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/*
+		Create the FW compute context, this has the CDM common
+		context embedded within it
+	 */
+	eError = DevmemFwAllocate(psDevInfo,
+			sizeof(RGXFWIF_FWCOMPUTECONTEXT),
+			RGX_FWCOMCTX_ALLOCFLAGS,
+			"FwComputeContext",
+			&psComputeContext->psFWComputeContextMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_fwcomputecontext;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	eError = OSLockCreate(&psComputeContext->hLock);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to create lock (%s)",
+				 __func__,
+				 PVRSRVGetErrorString(eError)));
+		goto fail_createlock;
+	}
+#endif
+
+	psComputeContext->psDeviceNode = psDeviceNode;
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+						   &psComputeContext->psSync,
+						   "compute cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to allocate cleanup sync (%s)",
+				 __func__,
+				 PVRSRVGetErrorString(eError)));
+		goto fail_syncalloc;
+	}
+
+	/*
+		Allocate device memory for the firmware GPU context suspend state.
+		Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+	*/
+	PDUMPCOMMENT("Allocate RGX firmware compute context suspend state");
+
+	eError = DevmemFwAllocate(psDevInfo,
+							  sizeof(RGXFWIF_COMPUTECTX_STATE),
+							  RGX_FWCOMCTX_ALLOCFLAGS,
+							  "FwComputeContextState",
+							  &psComputeContext->psFWComputeContextStateMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to allocate firmware GPU context suspend state (%d)",
+				 __func__,
+				 eError));
+		goto fail_contextsuspendalloc;
+	}
+
+	/*
+	 * Create the FW framework buffer
+	 */
+	eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+										&psComputeContext->psFWFrameworkMemDesc,
+										ui32FrameworkCommandSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to allocate firmware GPU framework state (%d)",
+				 __func__,
+				 eError));
+		goto fail_frameworkcreate;
+	}
+
+	/* Copy the Framework client data into the framework buffer */
+	eError = PVRSRVRGXFrameworkCopyCommand(psComputeContext->psFWFrameworkMemDesc,
+										   pbyFrameworkCommand,
+										   ui32FrameworkCommandSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to populate the framework buffer (%s)",
+				 __func__,
+				 PVRSRVGetErrorString(eError)));
+		goto fail_frameworkcopy;
+	}
+
+	sInfo.psFWFrameworkMemDesc = psComputeContext->psFWFrameworkMemDesc;
+
+	if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, CDM_CONTROL_STREAM_FORMAT) &&
+		RGX_GET_FEATURE_VALUE(psDevInfo, CDM_CONTROL_STREAM_FORMAT) == 2 &&
+		RGX_IS_FEATURE_SUPPORTED(psDevInfo, SIGNAL_SNOOPING))
+	{
+		sInfo.psResumeSignalAddr = &sServicesSignalAddr;
+	}else
+	{
+		PVR_UNREFERENCED_PARAMETER(sServicesSignalAddr);
+	}
+
+	ui32CCBAllocSizeLog2 = U32toU8_Unpack1(ui32PackedCCBSizeU88);
+	ui32CCBMaxAllocSizeLog2 = U32toU8_Unpack2(ui32PackedCCBSizeU88);
+	eError = FWCommonContextAllocate(psConnection,
+									 psDeviceNode,
+									 REQ_TYPE_CDM,
+									 RGXFWIF_DM_CDM,
+									 psComputeContext->psFWComputeContextMemDesc,
+									 offsetof(RGXFWIF_FWCOMPUTECONTEXT, sCDMContext),
+									 psFWMemContextMemDesc,
+									 psComputeContext->psFWComputeContextStateMemDesc,
+									 ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_CDM_CCB_SIZE_LOG2,
+									 ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_CDM_CCB_MAX_SIZE_LOG2,
+									 ui32Priority,
+									 &sInfo,
+									 &psComputeContext->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_contextalloc;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc,
+			(void **)&psFWComputeContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_acquire_cpu_mapping;
+	}
+
+	OSDeviceMemCopy(&psFWComputeContext->sStaticComputecontextState, pStaticComputecontextState, ui32StaticComputecontextStateSize);
+	DevmemPDumpLoadMem(psComputeContext->psFWComputeContextMemDesc, 0, sizeof(RGXFWIF_FWCOMPUTECONTEXT), PDUMP_FLAGS_CONTINUOUS);
+	DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc);
+
+	SyncAddrListInit(&psComputeContext->sSyncAddrListFence);
+	SyncAddrListInit(&psComputeContext->sSyncAddrListUpdate);
+
+	{
+		PVRSRV_RGXDEV_INFO			*psDevInfo = psDeviceNode->pvDevice;
+
+		OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock);
+		dllist_add_to_tail(&(psDevInfo->sComputeCtxtListHead), &(psComputeContext->sListNode));
+		OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock);
+	}
+
+	*ppsComputeContext = psComputeContext;
+	return PVRSRV_OK;
+
+fail_acquire_cpu_mapping:
+	FWCommonContextFree(psComputeContext->psServerCommonContext);
+fail_contextalloc:
+fail_frameworkcopy:
+	DevmemFwFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc);
+fail_frameworkcreate:
+	DevmemFwFree(psDevInfo, psComputeContext->psFWComputeContextStateMemDesc);
+fail_contextsuspendalloc:
+	SyncPrimFree(psComputeContext->psSync);
+fail_syncalloc:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psComputeContext->hLock);
+fail_createlock:
+#endif
+	DevmemFwFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc);
+fail_fwcomputecontext:
+	OSFreeMem(psComputeContext);
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+	PVRSRV_ERROR				eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(psComputeContext->psDeviceNode,
+											  psComputeContext->psServerCommonContext,
+											  psComputeContext->psSync,
+											  RGXFWIF_DM_CDM,
+											  PDUMP_FLAGS_NONE);
+
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		return eError;
+	}
+
+	/* ... it has so we can free its resources */
+
+	OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock);
+	dllist_remove_node(&(psComputeContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock);
+
+	FWCommonContextFree(psComputeContext->psServerCommonContext);
+	DevmemFwFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc);
+	DevmemFwFree(psDevInfo, psComputeContext->psFWComputeContextStateMemDesc);
+	DevmemFwFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc);
+	SyncPrimFree(psComputeContext->psSync);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psComputeContext->hLock);
+#endif
+	OSFreeMem(psComputeContext);
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT	*psComputeContext,
+								IMG_UINT32					ui32ClientCacheOpSeqNum,
+								IMG_UINT32					ui32ClientFenceCount,
+								SYNC_PRIMITIVE_BLOCK		**pauiClientFenceUFOSyncPrimBlock,
+								IMG_UINT32					*paui32ClientFenceSyncOffset,
+								IMG_UINT32					*paui32ClientFenceValue,
+								IMG_UINT32					ui32ClientUpdateCount,
+								SYNC_PRIMITIVE_BLOCK		**pauiClientUpdateUFOSyncPrimBlock,
+								IMG_UINT32					*paui32ClientUpdateSyncOffset,
+								IMG_UINT32					*paui32ClientUpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+								IMG_UINT32					ui32ServerSyncPrims,
+								IMG_UINT32					*paui32ServerSyncFlags,
+								SERVER_SYNC_PRIMITIVE		**pasServerSyncs,
+#endif
+								PVRSRV_FENCE				iCheckFence,
+								PVRSRV_TIMELINE				iUpdateTimeline,
+								PVRSRV_FENCE				*piUpdateFence,
+								IMG_CHAR					pszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+								IMG_UINT32					ui32CmdSize,
+								IMG_PBYTE					pui8DMCmd,
+								IMG_UINT32					ui32PDumpFlags,
+								IMG_UINT32					ui32ExtJobRef)
+{
+	RGXFWIF_KCCB_CMD		sCmpKCCBCmd;
+	RGX_CCB_CMD_HELPER_DATA	asCmdHelperData[1];
+	PVRSRV_ERROR			eError;
+	PVRSRV_ERROR			eError2;
+	IMG_UINT32				i;
+	IMG_UINT32				ui32CDMCmdOffset = 0;
+	PVRSRV_RGXDEV_INFO      *psDevInfo = FWCommonContextGetRGXDevInfo(psComputeContext->psServerCommonContext);
+	IMG_UINT32              ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
+	IMG_UINT32				ui32FWCtx;
+	IMG_BOOL				bCCBStateOpen = IMG_FALSE;
+
+	IMG_UINT32 ui32IntClientFenceCount = 0;
+	PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL;
+	IMG_UINT32 *paui32IntFenceValue = NULL;
+	IMG_UINT32 ui32IntClientUpdateCount = 0;
+	PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL;
+	IMG_UINT32 *paui32IntUpdateValue = NULL;
+	PVRSRV_FENCE  iUpdateFence = PVRSRV_NO_FENCE;
+	IMG_UINT64               uiCheckFenceUID = 0;
+	IMG_UINT64               uiUpdateFenceUID = 0;
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+	PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+	IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+	IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+	PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+	IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+	void *pvUpdateFenceFinaliseData = NULL;
+
+	if (iUpdateTimeline >= 0 && !piUpdateFence)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+#else /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+	if (iUpdateTimeline >= 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: Providing update timeline (%d) in non-supporting driver",
+			__func__, iUpdateTimeline));
+	}
+	if (iCheckFence >= 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: Providing check fence (%d) in non-supporting driver",
+			__func__, iCheckFence));
+	}
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+	/* Ensure the string is null-terminated (Required for safety) */
+	pszUpdateFenceName[31] = '\0';
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psComputeContext->hLock);
+#endif
+
+	ui32IntClientFenceCount = ui32ClientFenceCount;
+	eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListFence,
+									ui32ClientFenceCount,
+									pauiClientFenceUFOSyncPrimBlock,
+									paui32ClientFenceSyncOffset);
+	if (eError != PVRSRV_OK)
+	{
+		goto err_populate_sync_addr_list;
+	}
+	if (ui32IntClientFenceCount && !pauiIntFenceUFOAddress)
+	{
+		pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs;
+	}
+	paui32IntFenceValue = paui32ClientFenceValue;
+
+	ui32IntClientUpdateCount = ui32ClientUpdateCount;
+
+	eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListUpdate,
+									ui32ClientUpdateCount,
+									pauiClientUpdateUFOSyncPrimBlock,
+									paui32ClientUpdateSyncOffset);
+	if (eError != PVRSRV_OK)
+	{
+		goto err_populate_sync_addr_list;
+	}
+	if (ui32IntClientUpdateCount && !pauiIntUpdateUFOAddress)
+	{
+		pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs;
+	}
+	paui32IntUpdateValue = paui32ClientUpdateValue;
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	/* Sanity check the server fences */
+	for (i=0;i<ui32ServerSyncPrims;i++)
+	{
+		if (!(paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on CDM) must fence", __func__));
+			eError = PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+			goto err_populate_sync_addr_list;
+		}
+	}
+#endif
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext));
+	/* Resolve the sync checkpoints that make up the input fence */
+	eError = SyncCheckpointResolveFence(psComputeContext->psDeviceNode->hSyncCheckpointContext,
+										iCheckFence,
+										&ui32FenceSyncCheckpointCount,
+										&apsFenceSyncCheckpoints,
+	                                    &uiCheckFenceUID);
+	if (eError != PVRSRV_OK)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __func__, eError));
+		goto fail_resolve_input_fence;
+	}
+	CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+#if defined(CMP_CHECKPOINT_DEBUG)
+	if (ui32FenceSyncCheckpointCount > 0)
+	{
+		IMG_UINT32 ii;
+		for (ii=0; ii<ui32FenceSyncCheckpointCount; ii++)
+		{
+			PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii);
+			CHKPT_DBG((PVR_DBG_ERROR, "%s:    apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint));
+		}
+	}
+#endif
+	/* Create the output fence (if required) */
+	if (iUpdateTimeline != PVRSRV_NO_TIMELINE)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d,  psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>)...", __func__, iUpdateFence, iUpdateTimeline, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext));
+		eError = SyncCheckpointCreateFence(psComputeContext->psDeviceNode,
+		                                   pszUpdateFenceName,
+										   iUpdateTimeline,
+										   psComputeContext->psDeviceNode->hSyncCheckpointContext,
+										   &iUpdateFence,
+										   &uiUpdateFenceUID,
+										   &pvUpdateFenceFinaliseData,
+										   &psUpdateSyncCheckpoint,
+										   (void*)&psFenceTimelineUpdateSync,
+										   &ui32FenceTimelineUpdateValue);
+		if (eError != PVRSRV_OK)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)", __func__, eError));
+			goto fail_create_output_fence;
+		}
+
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=%u)", __func__, iUpdateFence, psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u, psFenceTimelineUpdateSync=<%p>", __func__, ui32IntClientUpdateCount, (void*)psFenceTimelineUpdateSync));
+		/* Append the sync prim update for the timeline (if required) */
+		if (psFenceTimelineUpdateSync)
+		{
+			IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+			/* Allocate memory to hold the list of update values (including our timeline update) */
+			pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+			if (!pui32IntAllocatedUpdateValues)
+			{
+				/* Failed to allocate memory */
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto fail_alloc_update_values_mem;
+			}
+			OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+			/* Copy the update values into the new memory, then append our timeline update value */
+			OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+#if defined(CMP_CHECKPOINT_DEBUG)
+			if (ui32IntClientUpdateCount > 0)
+			{
+				IMG_UINT32 iii;
+				IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount));
+				for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+					pui32Tmp++;
+				}
+			}
+#endif
+			/* Now set the additional update value */
+			pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+			*pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+			ui32IntClientUpdateCount++;
+			/* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */
+			paui32ClientUpdateValue = pui32IntAllocatedUpdateValues;
+
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: append the timeline sync prim addr <%p> to the compute context update list", __func__,  (void*)psFenceTimelineUpdateSync));
+			/* Now append the timeline sync prim addr to the compute context update list */
+			SyncAddrListAppendSyncPrim(&psComputeContext->sSyncAddrListUpdate,
+			                           psFenceTimelineUpdateSync);
+#if defined(CMP_CHECKPOINT_DEBUG)
+			if (ui32IntClientUpdateCount > 0)
+			{
+				IMG_UINT32 iii;
+				IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount));
+				for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+					pui32Tmp++;
+				}
+			}
+#endif
+			/* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+			paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
+		}
+	}
+
+	/* Append the checks (from input fence) */
+	if (ui32FenceSyncCheckpointCount > 0)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d sync checkpoints to Compute CDM Fence (&psComputeContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psComputeContext->sSyncAddrListFence));
+#if defined(CMP_CHECKPOINT_DEBUG)
+		if (ui32IntClientUpdateCount > 0)
+		{
+			IMG_UINT32 iii;
+			IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+			for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+				pui32Tmp++;
+			}
+		}
+#endif
+		SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListFence,
+									  ui32FenceSyncCheckpointCount,
+									  apsFenceSyncCheckpoints);
+		if (!pauiIntFenceUFOAddress)
+		{
+			pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs;
+		}
+		ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+	}
+#if defined(CMP_CHECKPOINT_DEBUG)
+	if (ui32IntClientUpdateCount > 0)
+	{
+		IMG_UINT32 iii;
+		IMG_UINT32 *pui32Tmp = (IMG_UINT32*)paui32IntUpdateValue;
+
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   Dumping %d update values (paui32IntUpdateValue=<%p>)...", __func__, ui32IntClientUpdateCount, (void*)paui32IntUpdateValue));
+		for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: paui32IntUpdateValue[%d] = <%p>", __func__, iii, (void*)pui32Tmp));
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: *paui32IntUpdateValue[%d] = 0x%x", __func__, iii, *pui32Tmp));
+			pui32Tmp++;
+		}
+	}
+#endif
+
+	if (psUpdateSyncCheckpoint)
+	{
+		/* Append the update (from output fence) */
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 sync checkpoint to Compute CDM Update (&psComputeContext->sSyncAddrListUpdate=<%p>, psUpdateSyncCheckpoint=<%p>)...", __func__, (void*)&psComputeContext->sSyncAddrListUpdate , (void*)psUpdateSyncCheckpoint));
+		SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate,
+									  1,
+									  &psUpdateSyncCheckpoint);
+		if (!pauiIntUpdateUFOAddress)
+		{
+			pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs;
+		}
+		ui32IntClientUpdateCount++;
+#if defined(CMP_CHECKPOINT_DEBUG)
+		if (ui32IntClientUpdateCount > 0)
+		{
+			IMG_UINT32 iii;
+			IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress;
+
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress=<%p>, pui32Tmp=<%p>, ui32IntClientUpdateCount=%u", __func__, (void*)pauiIntUpdateUFOAddress, (void*)pui32Tmp, ui32IntClientUpdateCount));
+			for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+				pui32Tmp++;
+			}
+		}
+#endif
+	}
+	CHKPT_DBG((PVR_DBG_ERROR, "%s:   (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if (ENABLE_CMP_UFO_DUMP == 1)
+		PVR_DPF((PVR_DBG_ERROR, "%s: dumping Compute (CDM) fence/updates syncs...", __func__));
+		{
+			IMG_UINT32 ii;
+			PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+			IMG_UINT32 *pui32TmpIntFenceValue = paui32IntFenceValue;
+			PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+			IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+			/* Dump Fence syncs and Update syncs */
+			PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) fence syncs (&psComputeContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psComputeContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+			for (ii=0; ii<ui32IntClientFenceCount; ii++)
+			{
+				if (psTmpIntFenceUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr, *pui32TmpIntFenceValue, *pui32TmpIntFenceValue));
+					pui32TmpIntFenceValue++;
+				}
+				psTmpIntFenceUFOAddress++;
+			}
+			PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) update syncs (&psComputeContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psComputeContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+			for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+			{
+				if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+					pui32TmpIntUpdateValue++;
+				}
+				psTmpIntUpdateUFOAddress++;
+			}
+		}
+#endif
+
+	eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext),
+	                                ui32IntClientFenceCount,
+	                                pauiIntFenceUFOAddress,
+	                                paui32IntFenceValue,
+	                                ui32IntClientUpdateCount,
+	                                pauiIntUpdateUFOAddress,
+	                                paui32IntUpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	                                ui32ServerSyncPrims,
+	                                paui32ServerSyncFlags,
+	                                SYNC_FLAG_MASK_ALL,
+	                                pasServerSyncs,
+#endif
+	                                ui32CmdSize,
+	                                pui8DMCmd,
+	                                RGXFWIF_CCB_CMD_TYPE_CDM,
+	                                ui32ExtJobRef,
+	                                ui32IntJobRef,
+	                                ui32PDumpFlags,
+	                                NULL,
+	                                "Compute",
+	                                bCCBStateOpen,
+	                                asCmdHelperData);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_cmdinit;
+	}
+
+	eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData),
+	                                   asCmdHelperData);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_cmdaquire;
+	}
+
+
+	/*
+		We should reserve space in the kernel CCB here and fill in the command
+		directly.
+		This is so if there isn't space in the kernel CCB we can return with
+		retry back to services client before we take any operations
+	*/
+
+	/*
+		We might only be kicking for flush out a padding packet so only submit
+		the command if the create was successful
+	*/
+	if (eError == PVRSRV_OK)
+	{
+		/*
+			All the required resources are ready at this point, we can't fail so
+			take the required server sync operations and commit all the resources
+		*/
+
+		ui32CDMCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext));
+		RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "CDM", FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr);
+	}
+
+	/* Construct the kernel compute CCB command. */
+	sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+	sCmpKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+	sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext));
+	sCmpKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+	ui32FWCtx = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr;
+
+	HTBLOGK(HTB_SF_MAIN_KICK_CDM,
+			sCmpKCCBCmd.uCmdData.sCmdKickData.psContext,
+			ui32CDMCmdOffset
+			);
+	RGXSRV_HWPERF_ENQ(psComputeContext,
+	                  OSGetCurrentClientProcessIDKM(),
+	                  ui32FWCtx,
+	                  ui32ExtJobRef,
+	                  ui32IntJobRef,
+	                  RGX_HWPERF_KICK_TYPE_CDM,
+	                  iCheckFence,
+	                  iUpdateFence,
+	                  iUpdateTimeline,
+	                  uiCheckFenceUID,
+	                  uiUpdateFenceUID,
+	                  NO_DEADLINE,
+	                  NO_CYCEST);
+
+	/*
+	 * Submit the compute command to the firmware.
+	 */
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError2 = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+									RGXFWIF_DM_CDM,
+									&sCmpKCCBCmd,
+									ui32ClientCacheOpSeqNum,
+									ui32PDumpFlags);
+		if (eError2 != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if (eError2 != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s failed to schedule kernel CCB command (%s)",
+				 __func__,
+				 PVRSRVGetErrorString(eError2)));
+	}
+	else
+	{
+		PVRGpuTraceEnqueueEvent(psComputeContext->psDeviceNode->pvDevice,
+		                        ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
+		                        RGX_HWPERF_KICK_TYPE_CDM);
+	}
+	/*
+	 * Now check eError (which may have returned an error from our earlier call
+	 * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+	 * so we check it now...
+	 */
+	if (eError != PVRSRV_OK )
+	{
+		goto fail_cmdaquire;
+	}
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+#if defined(NO_HARDWARE)
+	/* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+	if (psUpdateSyncCheckpoint)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint)));
+		SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+	}
+	if (psFenceTimelineUpdateSync)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   Updating NOHW sync prim<%p> to %d", __func__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+		SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+	}
+	SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined (NO_HARDWARE) */
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+	*piUpdateFence = iUpdateFence;
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE))
+	{
+		SyncCheckpointFinaliseFence(psComputeContext->psDeviceNode, iUpdateFence,
+		                            pvUpdateFenceFinaliseData,
+									psUpdateSyncCheckpoint, pszUpdateFenceName);
+	}
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+	/* Free memory allocated to hold the internal list of update values */
+	if (pui32IntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui32IntAllocatedUpdateValues);
+		pui32IntAllocatedUpdateValues = NULL;
+	}
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psComputeContext->hLock);
+#endif
+
+	return PVRSRV_OK;
+
+fail_cmdinit:
+fail_cmdaquire:
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListFence);
+	SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListUpdate);
+fail_alloc_update_values_mem:
+	if (iUpdateFence != PVRSRV_NO_FENCE)
+	{
+		SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+	}
+fail_create_output_fence:
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+fail_resolve_input_fence:
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+err_populate_sync_addr_list:
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+	/* Free memory allocated to hold the internal list of update values */
+	if (pui32IntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui32IntAllocatedUpdateValues);
+		pui32IntAllocatedUpdateValues = NULL;
+	}
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psComputeContext->hLock);
+#endif
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+	RGXFWIF_KCCB_CMD sFlushCmd;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+#if defined(PDUMP)
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit Compute flush");
+#endif
+	sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+	sFlushCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_FALSE;
+	sFlushCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_TRUE;
+	sFlushCmd.uCmdData.sSLCFlushInvalData.eDM = RGXFWIF_DM_CDM;
+	sFlushCmd.uCmdData.sSLCFlushInvalData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psComputeContext->hLock);
+#endif
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+									RGXFWIF_DM_CDM,
+									&sFlushCmd,
+									0,
+									PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to schedule SLC flush command (%s)",
+				 __func__,
+				 PVRSRVGetErrorString(eError)));
+	}
+	else
+	{
+		/* Wait for the SLC flush to complete */
+		eError = RGXWaitForFWOp(psComputeContext->psDeviceNode->pvDevice,
+								RGXFWIF_DM_CDM,
+								psComputeContext->psSync,
+								PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Compute flush aborted (%s)",
+					 __func__,
+					 PVRSRVGetErrorString(eError)));
+		}
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psComputeContext->hLock);
+#endif
+	return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT  *psComputeContext)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice;
+	if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, CDM_CONTROL_STREAM_FORMAT) &&
+		2 == RGX_GET_FEATURE_VALUE(psDevInfo, CDM_CONTROL_STREAM_FORMAT))
+	{
+
+		RGXFWIF_KCCB_CMD  sKCCBCmd;
+		PVRSRV_ERROR      eError;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockAcquire(psComputeContext->hLock);
+#endif
+
+		/* Schedule the firmware command */
+		sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE;
+		sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+										RGXFWIF_DM_CDM,
+										&sKCCBCmd,
+										0,
+										PDUMP_FLAGS_NONE);
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to schedule the FW command %d (%s)",
+					__func__,
+					eError,
+					PVRSRVGETERRORSTRING(eError)));
+		}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockRelease(psComputeContext->hLock);
+#endif
+		return eError;
+	}else
+	{
+		return PVRSRV_ERROR_NOT_SUPPORTED;
+	}
+}
+
+
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                  PVRSRV_DEVICE_NODE * psDeviceNode,
+												  RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+												  IMG_UINT32 ui32Priority)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psComputeContext->hLock);
+#endif
+
+	eError = ContextSetPriority(psComputeContext->psServerCommonContext,
+								psConnection,
+								psComputeContext->psDeviceNode->pvDevice,
+								ui32Priority,
+								RGXFWIF_DM_CDM);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the compute context (%s)", __func__, PVRSRVGetErrorString(eError)));
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psComputeContext->hLock);
+#endif
+	return eError;
+}
+
+/*
+ * PVRSRVRGXGetLastComputeContextResetReasonKM
+ */
+PVRSRV_ERROR PVRSRVRGXGetLastComputeContextResetReasonKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+                                                         IMG_UINT32 *peLastResetReason,
+														 IMG_UINT32 *pui32LastResetJobRef)
+{
+	PVR_ASSERT(psComputeContext != NULL);
+	PVR_ASSERT(peLastResetReason != NULL);
+	PVR_ASSERT(pui32LastResetJobRef != NULL);
+
+	*peLastResetReason = FWCommonContextGetLastResetReason(psComputeContext->psServerCommonContext,
+	                                                       pui32LastResetJobRef);
+
+	return PVRSRV_OK;
+}
+
+void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                          DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                          void *pvDumpDebugFile,
+                          IMG_UINT32 ui32VerbLevel)
+{
+	DLLIST_NODE *psNode, *psNext;
+	OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock);
+	dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx =
+			IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode);
+		DumpFWCommonContextInfo(psCurrentServerComputeCtx->psServerCommonContext,
+		                        pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+	}
+	OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	IMG_UINT32 ui32ContextBitMask = 0;
+	DLLIST_NODE *psNode, *psNext;
+	OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock);
+	dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx =
+			IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode);
+
+		if (CheckStalledClientCommonContext(psCurrentServerComputeCtx->psServerCommonContext, RGX_KICK_TYPE_DM_CDM)
+			== PVRSRV_ERROR_CCCB_STALLED)
+		{
+			ui32ContextBitMask |= RGX_KICK_TYPE_DM_CDM;
+		}
+	}
+	OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock);
+	return ui32ContextBitMask;
+}
+
+/******************************************************************************
+ End of file (rgxcompute.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxcompute.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxcompute.h
new file mode 100644
index 0000000..82a0598
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxcompute.h
@@ -0,0 +1,170 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX compute functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX compute functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXCOMPUTE_H__)
+#define __RGXCOMPUTE_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "connection_server.h"
+
+
+typedef struct _RGX_SERVER_COMPUTE_CONTEXT_ RGX_SERVER_COMPUTE_CONTEXT;
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXCreateComputeContextKM
+
+ @Description
+
+@Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA			*psConnection,
+											 PVRSRV_DEVICE_NODE			*psDeviceNode,
+											 IMG_UINT32					ui32Priority,
+											 IMG_UINT32					ui32FrameworkRegisterSize,
+											 IMG_PBYTE					pbyFrameworkRegisters,
+											 IMG_HANDLE					hMemCtxPrivData,
+											 IMG_DEV_VIRTADDR			sServicesSignalAddr,
+											 IMG_UINT32					ui32StaticComputecontextStateSize,
+											 IMG_PBYTE					pStaticComputecontextState,
+											 IMG_UINT32					ui32PackedCCBSizeU88,
+											 RGX_SERVER_COMPUTE_CONTEXT	**ppsComputeContext);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXDestroyComputeContextKM
+
+ @Description
+	Server-side implementation of RGXDestroyComputeContext
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXKickCDMKM
+
+ @Description
+	Server-side implementation of RGXKickCDM
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT	*psComputeContext,
+								IMG_UINT32					ui32ClientCacheOpSeqNum,
+								IMG_UINT32					ui32ClientFenceCount,
+								SYNC_PRIMITIVE_BLOCK		**pauiClientFenceUFOSyncPrimBlock,
+								IMG_UINT32					*paui32ClientFenceSyncOffset,
+								IMG_UINT32					*paui32ClientFenceValue,
+								IMG_UINT32					ui32ClientUpdateCount,
+								SYNC_PRIMITIVE_BLOCK		**pauiClientUpdateUFOSyncPrimBlock,
+								IMG_UINT32					*paui32ClientUpdateSyncOffset,
+								IMG_UINT32					*paui32ClientUpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+								IMG_UINT32					ui32ServerSyncPrims,
+								IMG_UINT32					*paui32ServerSyncFlags,
+								SERVER_SYNC_PRIMITIVE		**pasServerSyncs,
+#endif
+								PVRSRV_FENCE				iCheckFence,
+								PVRSRV_TIMELINE				iUpdateTimeline,
+								PVRSRV_FENCE				*piUpdateFence,
+								IMG_CHAR					pcszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+								IMG_UINT32					ui32CmdSize,
+								IMG_PBYTE					pui8DMCmd,
+								IMG_UINT32					ui32PDumpFlags,
+								IMG_UINT32					ui32ExtJobRef);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXFlushComputeDataKM
+
+ @Description
+	Server-side implementation of RGXFlushComputeData
+
+ @Input psComputeContext - Compute context to flush
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+/*!
+*******************************************************************************
+
+ @Function	    PVRSRVRGXNotifyComputeWriteOffsetUpdateKM
+ @Description   Server-side implementation of RGXNotifyComputeWriteOffsetUpdate
+
+ @Input         psComputeContext - Compute context to flush
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection,
+												  PVRSRV_DEVICE_NODE *psDeviceNode,
+												  RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+												  IMG_UINT32 ui32Priority);
+
+PVRSRV_ERROR PVRSRVRGXGetLastComputeContextResetReasonKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+                                                         IMG_UINT32 *peLastResetReason,
+                                                         IMG_UINT32 *pui32LastResetJobRef);
+
+/* Debug - Dump debug info of compute contexts on this device */
+void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                          DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                          void *pvDumpDebugFile,
+                          IMG_UINT32 ui32VerbLevel);
+
+/* Debug/Watchdog - check if client compute contexts are stalled */
+IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* __RGXCOMPUTE_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxdebug.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxdebug.c
new file mode 100644
index 0000000..620a4c9
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxdebug.c
@@ -0,0 +1,5417 @@
+/*************************************************************************/ /*!
+@File
+@Title          Rgx debug information
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX debugging functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+
+#include "img_defs.h"
+#include "rgxdefs_km.h"
+#include "rgxdevice.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "cache_km.h"
+#include "osfunc.h"
+
+#include "rgxdebug.h"
+#include "pvrversion.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "rgxutils.h"
+#include "tlstream.h"
+#include "rgxfwutils.h"
+#include "pvrsrv.h"
+#include "services_km.h"
+
+#include "rgxfwimageutils.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "devicemem_utils.h"
+#include "rgx_fwif_km.h"
+#include "rgx_fwif_sf.h"
+#include "rgxfw_log_helper.h"
+#include "fwtrace_string.h"
+#include "rgxfwimageutils.h"
+#include "rgxfwload.h"
+
+#include "rgxta3d.h"
+#include "rgxkicksync.h"
+#include "rgxcompute.h"
+#include "rgxtransfer.h"
+#include "rgxtdmtransfer.h"
+#include "rgxtimecorr.h"
+#include "rgx_options.h"
+#include "rgxinit.h"
+#include "devicemem_history_server.h"
+#include "info_page.h"
+#include "rgx_bvnc_defs_km.h"
+#define PVR_DUMP_DRIVER_INFO(x, y)														\
+	PVR_DUMPDEBUG_LOG("%s info: %d.%d @ %8d (%s) build options: 0x%08x",				\
+					   (x),																\
+					   PVRVERSION_UNPACK_MAJ((y).ui32BuildVersion),						\
+					   PVRVERSION_UNPACK_MIN((y).ui32BuildVersion),						\
+					   (y).ui32BuildRevision,											\
+					   (BUILD_TYPE_DEBUG == (y).ui32BuildType) ? "debug":"release",		\
+					   (y).ui32BuildOptions);
+
+#define PVR_DUMP_FIRMWARE_INFO(x)														\
+	PVR_DUMPDEBUG_LOG("FW info: %d.%d @ %8d (%s) build options: 0x%08x",				\
+						PVRVERSION_UNPACK_MAJ((x).ui32DDKVersion),						\
+						PVRVERSION_UNPACK_MIN((x).ui32DDKVersion),						\
+						(x).ui32DDKBuild,												\
+						((x).ui32BuildOptions & OPTIONS_DEBUG_MASK) ? "debug":"release",\
+						(x).ui32BuildOptions);
+
+
+#define RGX_DEBUG_STR_SIZE		(150)
+#define MAX_FW_DESCRIPTION_LENGTH	(500u)
+
+#define RGX_CR_BIF_CAT_BASE0                              (0x1200U)
+#define RGX_CR_BIF_CAT_BASE1                              (0x1208U)
+
+#define RGX_CR_BIF_CAT_BASEN(n) \
+	RGX_CR_BIF_CAT_BASE0 + \
+	((RGX_CR_BIF_CAT_BASE1 - RGX_CR_BIF_CAT_BASE0) * n)
+
+
+#define RGXDBG_BIF_IDS \
+	X(BIF0)\
+	X(BIF1)\
+	X(TEXAS_BIF)\
+	X(DPX_BIF)
+
+#define RGXDBG_SIDEBAND_TYPES \
+	X(META)\
+	X(TLA)\
+	X(DMA)\
+	X(VDMM)\
+	X(CDM)\
+	X(IPP)\
+	X(PM)\
+	X(TILING)\
+	X(MCU)\
+	X(PDS)\
+	X(PBE)\
+	X(VDMS)\
+	X(IPF)\
+	X(ISP)\
+	X(TPF)\
+	X(USCS)\
+	X(PPP)\
+	X(VCE)\
+	X(TPF_CPF)\
+	X(IPF_CPF)\
+	X(FBCDC)
+
+typedef enum
+{
+#define X(NAME) RGXDBG_##NAME,
+	RGXDBG_BIF_IDS
+#undef X
+} RGXDBG_BIF_ID;
+
+typedef enum
+{
+#define X(NAME) RGXDBG_##NAME,
+	RGXDBG_SIDEBAND_TYPES
+#undef X
+} RGXDBG_SIDEBAND_TYPE;
+
+static const IMG_CHAR *const pszPowStateName[] =
+{
+#define X(NAME)	#NAME,
+	RGXFWIF_POW_STATES
+#undef X
+};
+
+static const IMG_CHAR *const pszBIFNames[] =
+{
+#define X(NAME)	#NAME,
+	RGXDBG_BIF_IDS
+#undef X
+};
+
+static IMG_UINT32     gui32FaultIndex = 0;
+static MMU_FAULT_DATA gsMMUFaultData[RGXFWIF_HWINFO_MAX];
+
+typedef struct _IMG_FLAGS2DESC_
+{
+	IMG_UINT32		uiFlag;
+	const IMG_CHAR	*pszLabel;
+} IMG_FLAGS2DESC;
+
+static const IMG_FLAGS2DESC asCSW2Description[] =
+{
+	{RGXFWIF_INICFG_CTXSWITCH_TA_EN, " TA;"},
+	{RGXFWIF_INICFG_CTXSWITCH_3D_EN, " 3D;"},
+	{RGXFWIF_INICFG_CTXSWITCH_CDM_EN, " CDM;"},
+	{RGXFWIF_INICFG_CTXSWITCH_MODE_RAND, " Random;"},
+	{RGXFWIF_INICFG_CTXSWITCH_SRESET_EN, " SoftReset;"},
+	{RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX, " VDM CS INDEX mode;"},
+	{RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INSTANCE, " VDM CS INSTANCE mode;"},
+	{RGXFWIF_INICFG_VDM_CTX_STORE_MODE_LIST, " VDM CS LIST mode;"},
+	{RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST, " Fast CSW profile;"},
+	{RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM, " Medium CSW profile;"},
+	{RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW, " Slow CSW profile;"},
+	{RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY, " No Delay CSW profile;"}
+};
+
+static const IMG_FLAGS2DESC asMisc2Description[] =
+{
+	{RGXFWIF_INICFG_POW_RASCALDUST, " Power Rascal/Dust;"},
+	{RGXFWIF_INICFG_HWPERF_EN, " HwPerf EN;"},
+	{RGXFWIF_INICFG_HWR_EN, " HWR EN;"},
+	{RGXFWIF_INICFG_CHECK_MLIST_EN, " Check MList;"},
+	{RGXFWIF_INICFG_DISABLE_CLKGATING_EN, " ClockGating Off;"},
+	{RGXFWIF_INICFG_POLL_COUNTERS_EN, " Poll Counters;"},
+	{RGXFWIF_INICFG_REGCONFIG_EN, " Register Config;"},
+	{RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY, " Assert on OOM;"},
+	{RGXFWIF_INICFG_HWP_DISABLE_FILTER, " HWP Filter Off;"},
+	{RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN, " Custom PerfTimer;"},
+	{RGXFWIF_INICFG_CDM_KILL_MODE_RAND_EN, " CDM Random kill;"},
+	{RGXFWIF_INICFG_DISABLE_DM_OVERLAP, " DM Overlap Off;"},
+	{RGXFWIF_INICFG_METAT1_MAIN, " Main;"},
+	{RGXFWIF_INICFG_METAT1_DUMMY, " Dummy;"},
+	{RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER, " Assert on HWR;"},
+	{RGXFWIF_INICFG_WORKEST_V1, " Workload Estim v1;"},
+	{RGXFWIF_INICFG_WORKEST_V2, " Workload Estim v2;"},
+	{RGXFWIF_INICFG_PDVFS_V1, " PDVFS v1;"},
+	{RGXFWIF_INICFG_PDVFS_V2, " PDVFS v2;"}
+};
+
+static const IMG_FLAGS2DESC asHwrState2Description[] =
+{
+	{RGXFWIF_HWR_HARDWARE_OK, " HWR OK;"},
+	{RGXFWIF_HWR_ANALYSIS_DONE, " Analysis done;"},
+	{RGXFWIF_HWR_GENERAL_LOCKUP, " General lockup;"},
+	{RGXFWIF_HWR_DM_RUNNING_OK, " DM running ok;"},
+	{RGXFWIF_HWR_DM_STALLING, " DM stalling;"},
+	{RGXFWIF_HWR_FW_FAULT, " FW fault;"},
+	{RGXFWIF_HWR_RESTART_REQUESTED, " Restarting;"},
+};
+
+static const IMG_FLAGS2DESC asDmState2Description[] =
+{
+		{RGXFWIF_DM_STATE_WORKING, " working;"},
+		{RGXFWIF_DM_STATE_READY_FOR_HWR, " ready for hwr;"},
+		{RGXFWIF_DM_STATE_NEEDS_SKIP, " needs skip;"},
+		{RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP, " needs PR cleanup;"},
+		{RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR, " needs trace clear;"},
+		{RGXFWIF_DM_STATE_GUILTY_LOCKUP, " guilty lockup;"},
+		{RGXFWIF_DM_STATE_INNOCENT_LOCKUP, " innocent lockup;"},
+		{RGXFWIF_DM_STATE_GUILTY_OVERRUNING, " guilty overrunning;"},
+		{RGXFWIF_DM_STATE_INNOCENT_OVERRUNING, " innocent overrunning;"},
+		{RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH, " hard context switching;"},
+};
+
+#if !defined(NO_HARDWARE)
+/* Translation of MIPS exception encoding */
+typedef struct _MIPS_EXCEPTION_ENCODING_
+{
+	const IMG_CHAR *const pszStr;	/* Error type */
+	const IMG_BOOL bIsFatal;	/* Error is fatal or non-fatal */
+} MIPS_EXCEPTION_ENCODING;
+
+static const MIPS_EXCEPTION_ENCODING apsMIPSExcCodes[] =
+{
+	{"Interrupt", IMG_FALSE},
+	{"TLB modified exception", IMG_FALSE},
+	{"TLB exception (load/instruction fetch)", IMG_FALSE},
+	{"TLB exception (store)", IMG_FALSE},
+	{"Address error exception (load/instruction fetch)", IMG_TRUE},
+	{"Address error exception (store)", IMG_TRUE},
+	{"Bus error exception (instruction fetch)", IMG_TRUE},
+	{"Bus error exception (load/store)", IMG_TRUE},
+	{"Syscall exception", IMG_FALSE},
+	{"Breakpoint exception (FW assert)", IMG_FALSE},
+	{"Reserved instruction exception", IMG_TRUE},
+	{"Coprocessor Unusable exception", IMG_FALSE},
+	{"Arithmetic Overflow exception", IMG_FALSE},
+	{"Trap exception", IMG_FALSE},
+	{NULL, IMG_FALSE},
+	{NULL, IMG_FALSE},
+	{"Implementation-Specific Exception 1 (COP2)", IMG_FALSE},
+	{"CorExtend Unusable", IMG_FALSE},
+	{"Coprocessor 2 exceptions", IMG_FALSE},
+	{"TLB Read-Inhibit", IMG_TRUE},
+	{"TLB Execute-Inhibit", IMG_TRUE},
+	{NULL, IMG_FALSE},
+	{NULL, IMG_FALSE},
+	{"Reference to WatchHi/WatchLo address", IMG_FALSE},
+	{"Machine check", IMG_FALSE},
+	{NULL, IMG_FALSE},
+	{"DSP Module State Disabled exception", IMG_FALSE},
+	{NULL, IMG_FALSE},
+	{NULL, IMG_FALSE},
+	{NULL, IMG_FALSE},
+	/* Can only happen in MIPS debug mode */
+	{"Parity error", IMG_FALSE},
+	{NULL, IMG_FALSE}
+};
+
+static IMG_CHAR const *_GetMIPSExcString(IMG_UINT32 ui32ExcCode)
+{
+	if (ui32ExcCode >= sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING))
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+		         "Only %lu exceptions available in MIPS, %u is not a valid exception code",
+		         (unsigned long)sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING), ui32ExcCode));
+		return NULL;
+	}
+
+	return apsMIPSExcCodes[ui32ExcCode].pszStr;
+}
+#endif
+
+typedef struct _RGXMIPSFW_C0_DEBUG_TBL_ENTRY_
+{
+    IMG_UINT32 ui32Mask;
+    const IMG_CHAR * pszExplanation;
+} RGXMIPSFW_C0_DEBUG_TBL_ENTRY;
+
+#if !defined(NO_HARDWARE)
+static const RGXMIPSFW_C0_DEBUG_TBL_ENTRY sMIPS_C0_DebugTable[] =
+{
+    { RGXMIPSFW_C0_DEBUG_DSS,      "Debug single-step exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DBP,      "Debug software breakpoint exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DDBL,     "Debug data break exception occurred on a load" },
+    { RGXMIPSFW_C0_DEBUG_DDBS,     "Debug data break exception occurred on a store" },
+    { RGXMIPSFW_C0_DEBUG_DIB,      "Debug instruction break exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DINT,     "Debug interrupt exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DIBIMPR,  "Imprecise debug instruction break exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DDBLIMPR, "Imprecise debug data break load exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DDBSIMPR, "Imprecise debug data break store exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_IEXI,     "Imprecise error exception inhibit controls exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DBUSEP,   "Data access Bus Error exception pending" },
+    { RGXMIPSFW_C0_DEBUG_CACHEEP,  "Imprecise Cache Error pending" },
+    { RGXMIPSFW_C0_DEBUG_MCHECKP,  "Imprecise Machine Check exception pending" },
+    { RGXMIPSFW_C0_DEBUG_IBUSEP,   "Instruction fetch Bus Error exception pending" },
+    { RGXMIPSFW_C0_DEBUG_DBD,      "Debug exception occurred in branch delay slot" }
+};
+#endif
+
+static PVRSRV_ERROR
+RGXPollMetaRegThroughSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegOffset,
+                        IMG_UINT32 ui32PollValue, IMG_UINT32 ui32Mask)
+{
+	IMG_UINT32 ui32RegValue, ui32NumPolls = 0;
+	PVRSRV_ERROR eError;
+
+	do
+	{
+		eError = RGXReadWithSP(psDevInfo, ui32RegOffset, &ui32RegValue);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	} while (((ui32RegValue & ui32Mask) != ui32PollValue) && (ui32NumPolls++ < 1000));
+
+	return ((ui32RegValue & ui32Mask) == ui32PollValue) ? PVRSRV_OK : PVRSRV_ERROR_RETRY;
+}
+
+static PVRSRV_ERROR
+RGXReadMetaCoreReg(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegAddr, IMG_UINT32 *pui32RegVal)
+{
+	PVRSRV_ERROR eError;
+
+	/* Core Read Ready? */
+	eError = RGXPollMetaRegThroughSP(psDevInfo,
+	                                 META_CR_TXUXXRXRQ_OFFSET,
+	                                 META_CR_TXUXXRXRQ_DREADY_BIT,
+									 META_CR_TXUXXRXRQ_DREADY_BIT);
+	PVR_LOGR_IF_ERROR(eError, "RGXPollMetaRegThroughSP");
+
+	/* Set the reg we are interested in reading */
+	eError = RGXWriteWithSP(psDevInfo, META_CR_TXUXXRXRQ_OFFSET,
+	                        ui32RegAddr | META_CR_TXUXXRXRQ_RDnWR_BIT);
+	PVR_LOGR_IF_ERROR(eError, "RGXWriteWithSP");
+
+	/* Core Read Done? */
+	eError = RGXPollMetaRegThroughSP(psDevInfo,
+	                                 META_CR_TXUXXRXRQ_OFFSET,
+	                                 META_CR_TXUXXRXRQ_DREADY_BIT,
+									 META_CR_TXUXXRXRQ_DREADY_BIT);
+	PVR_LOGR_IF_ERROR(eError, "RGXPollMetaRegThroughSP");
+
+	/* Read the value */
+	return RGXReadWithSP(psDevInfo, META_CR_TXUXXRXDT_OFFSET, pui32RegVal);
+}
+
+PVRSRV_ERROR
+RGXReadWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 *pui32Value)
+{
+	PVRSRV_ERROR eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, pui32Value);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXReadWithSP error: %s", PVRSRVGetErrorString(eError)));
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR
+RGXWriteWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 ui32Value)
+{
+	PVRSRV_ERROR eError = RGXWriteMETAAddr(psDevInfo, ui32FWAddr, ui32Value);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXWriteMETAAddr error: %s", PVRSRVGetErrorString(eError)));
+	}
+	return eError;
+}
+
+#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE)
+static PVRSRV_ERROR _ValidateWithSP(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+						void *pvDumpDebugFile,
+						PVRSRV_RGXDEV_INFO *psDevInfo,
+						RGXFWIF_DEV_VIRTADDR *psFWAddr,
+						void *pvHostCodeAddr,
+						IMG_UINT32 ui32MaxLen,
+						const IMG_CHAR *pszDesc,
+						IMG_UINT32 ui32StartOffset)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32Value, i;
+	IMG_UINT32 ui32FWCodeDevVAAddr = psFWAddr->ui32Addr + ui32StartOffset;
+	IMG_UINT32 *pui32FWCode = (IMG_PUINT32) ((IMG_PBYTE)pvHostCodeAddr + ui32StartOffset);
+
+	ui32MaxLen -= ui32StartOffset;
+	ui32MaxLen /= sizeof(IMG_UINT32); /* Byte -> 32 bit words */
+
+	for (i = 0; i < ui32MaxLen; i++)
+	{
+		eError = RGXReadMETAAddr(psDevInfo, ui32FWCodeDevVAAddr, &ui32Value);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "_ValidateWithSP error: %s", PVRSRVGetErrorString(eError)));
+			return eError;
+		}
+
+		PVR_DPF((PVR_DBG_VERBOSE, "0x%x: CPU 0x%08x, FW 0x%08x", i * 4, pui32FWCode[i], ui32Value));
+
+		if (pui32FWCode[i] != ui32Value)
+		{
+			PVR_DUMPDEBUG_LOG("_ValidateWithSP: Mismatch while validating %s at offset 0x%x: CPU 0x%08x (%p), FW 0x%08x (%x)",
+				 pszDesc,
+				 (i * 4) + ui32StartOffset, pui32FWCode[i], pui32FWCode, ui32Value, ui32FWCodeDevVAAddr);
+			return PVRSRV_ERROR_FW_IMAGE_MISMATCH;
+		}
+
+		ui32FWCodeDevVAAddr += 4;
+	}
+
+	PVR_DUMPDEBUG_LOG("Match between Host and Meta view of the %s", pszDesc);
+	return PVRSRV_OK;
+}
+#endif
+
+#if !defined(NO_HARDWARE)
+static PVRSRV_ERROR _ValidateFWImageForMIPS(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+						void *pvDumpDebugFile,
+						PVRSRV_RGXDEV_INFO *psDevInfo,
+						char *pszFormat)
+{
+#if !defined(SUPPORT_TRUSTED_DEVICE)
+	PVRSRV_ERROR eError;
+	IMG_PUINT32 *pui32HostFWCode = NULL;
+	struct RGXFW *psRGXFW = NULL;
+	const IMG_BYTE *pbRGXFirmware = NULL;
+	IMG_UINT32 *pui32CodeMemoryPointer;
+	IMG_UINT32 ui32MaxLenInBytes = psDevInfo->ui32FWCodeSizeInBytes;
+	RGX_LAYER_PARAMS sLayerParams;
+	sLayerParams.psDevInfo = psDevInfo;
+
+	/* Load FW from system for code verification */
+	pui32HostFWCode = OSAllocZMem(psDevInfo->ui32FWCodeSizeInBytes);
+	if (pui32HostFWCode == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed in allocating memory for FW code. "
+				"So skipping FW code verification",
+				__func__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* Load FW image */
+	pbRGXFirmware = RGXLoadAndGetFWData(psDevInfo->psDeviceNode, &psRGXFW);
+	if (!pbRGXFirmware)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to load FW image file.",__func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto cleanup_initfw;
+	}
+
+	eError = ProcessELFCommandStream(&sLayerParams, pbRGXFirmware, (IMG_PBYTE) pui32HostFWCode, NULL);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed in parsing FW image file.", __func__));
+		goto cleanup_initfw;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pui32CodeMemoryPointer);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Error in acquiring MIPS FW code memory area (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto cleanup_initfw;
+	}
+
+	ui32MaxLenInBytes /= sizeof(IMG_UINT32); /* Byte -> 32 bit words */
+
+	if (OSMemCmp(pui32HostFWCode, pui32CodeMemoryPointer, ui32MaxLenInBytes) == 0)
+	{
+		PVR_DUMPDEBUG_LOG("%s Match between Host and MIPS views of the FW code", pszFormat);
+	}
+	else
+	{
+		PVR_DUMPDEBUG_LOG("%s Mismatch between Host and MIPS views of the FW code", pszFormat);
+	}
+
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+
+cleanup_initfw:
+	if (psRGXFW)
+	{
+		RGXUnloadFirmware(psRGXFW);
+	}
+
+	if (pui32HostFWCode)
+	{
+		OSFreeMem(pui32HostFWCode);
+	}
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+	PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile);
+	PVR_UNREFERENCED_PARAMETER(psDevInfo);
+	PVR_UNREFERENCED_PARAMETER(pszFormat);
+	return PVRSRV_OK;
+#endif
+}
+#endif
+
+static PVRSRV_ERROR _ValidateFWImageForMETA(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+						void *pvDumpDebugFile,
+						PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE)
+	IMG_UINT32 *pui32HostFWCode = NULL, *pui32HostFWCoremem = NULL;
+	struct RGXFW *psRGXFW = NULL;
+	const IMG_BYTE *pbRGXFirmware = NULL;
+	RGXFWIF_DEV_VIRTADDR sFWAddr;
+	PVRSRV_ERROR eError;
+	RGX_LAYER_PARAMS sLayerParams;
+	sLayerParams.psDevInfo = psDevInfo;
+
+	if (psDevInfo->pvRegsBaseKM == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: RGX registers not mapped yet!", __func__));
+		return PVRSRV_ERROR_BAD_MAPPING;
+	}
+
+	/* Load FW from system for code verification */
+	pui32HostFWCode = OSAllocZMem(psDevInfo->ui32FWCodeSizeInBytes);
+	if (pui32HostFWCode == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed in allocating memory for FW code. "
+				"So skipping FW code verification",
+				__func__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	if (psDevInfo->ui32FWCorememCodeSizeInBytes)
+	{
+		pui32HostFWCoremem = OSAllocZMem(psDevInfo->ui32FWCorememCodeSizeInBytes);
+		if (pui32HostFWCoremem == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed in allocating memory for FW core code. "
+					"So skipping FW code verification",
+					__func__));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto freeHostFWCode;
+		}
+	}
+
+	/* Load FW image */
+	pbRGXFirmware = RGXLoadAndGetFWData(psDevInfo->psDeviceNode, &psRGXFW);
+	if (!pbRGXFirmware)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed in loading FW image file.", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto cleanup_initfw;
+	}
+
+	eError = ProcessLDRCommandStream(&sLayerParams, pbRGXFirmware,
+					(IMG_PBYTE) pui32HostFWCode, NULL,
+					(IMG_PBYTE) pui32HostFWCoremem, NULL, NULL);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed in parsing FW image file.", __func__));
+		goto cleanup_initfw;
+	}
+
+	/* starting checking after BOOT LOADER config */
+	sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR;
+	eError = _ValidateWithSP(pfnDumpDebugPrintf, pvDumpDebugFile,
+					psDevInfo, &sFWAddr,
+					pui32HostFWCode, psDevInfo->ui32FWCodeSizeInBytes,
+					"FW code", RGXFW_MAX_BOOTLDR_OFFSET);
+	if (eError != PVRSRV_OK)
+	{
+		goto cleanup_initfw;
+	}
+
+	if (psDevInfo->ui32FWCorememCodeSizeInBytes)
+	{
+		sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE);
+
+		eError = _ValidateWithSP(pfnDumpDebugPrintf, pvDumpDebugFile,
+						psDevInfo, &sFWAddr,
+						pui32HostFWCoremem, psDevInfo->ui32FWCorememCodeSizeInBytes,
+						"FW coremem code", 0);
+	}
+
+cleanup_initfw:
+	if (psRGXFW)
+	{
+		RGXUnloadFirmware(psRGXFW);
+	}
+
+	if (pui32HostFWCoremem)
+	{
+		OSFreeMem(pui32HostFWCoremem);
+	}
+freeHostFWCode:
+	if (pui32HostFWCode)
+	{
+		OSFreeMem(pui32HostFWCode);
+	}
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+	PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile);
+	PVR_UNREFERENCED_PARAMETER(psDevInfo);
+	return PVRSRV_OK;
+#endif
+}
+
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE)
+	IMG_PBYTE pbCodeMemoryPointer;
+	PVRSRV_ERROR eError;
+	RGXFWIF_DEV_VIRTADDR sFWAddr;
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pbCodeMemoryPointer);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR;
+	eError = _ValidateWithSP(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes, "FW code", 0);
+	if (eError != PVRSRV_OK)
+	{
+		goto releaseFWCodeMapping;
+	}
+
+	if (psDevInfo->ui32FWCorememCodeSizeInBytes)
+	{
+		eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememMemDesc, (void **)&pbCodeMemoryPointer);
+		if (eError != PVRSRV_OK)
+		{
+			goto releaseFWCoreCodeMapping;
+		}
+
+		sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE);
+
+		eError = _ValidateWithSP(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer,
+						psDevInfo->ui32FWCorememCodeSizeInBytes, "FW coremem code", 0);
+	}
+
+releaseFWCoreCodeMapping:
+	if (psDevInfo->ui32FWCorememCodeSizeInBytes)
+	{
+		DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememMemDesc);
+	}
+releaseFWCodeMapping:
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(psDevInfo);
+	return PVRSRV_OK;
+#endif
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDecodePMPC
+
+ @Description
+
+ Return the name for the PM managed Page Catalogues
+
+ @Input ui32PC	 - Page Catalogue number
+
+ @Return   void
+
+******************************************************************************/
+static const IMG_CHAR* _RGXDecodePMPC(IMG_UINT32 ui32PC)
+{
+	const IMG_CHAR* pszPMPC = " (-)";
+
+	switch (ui32PC)
+	{
+		case 0x8: pszPMPC = " (PM-VCE0)"; break;
+		case 0x9: pszPMPC = " (PM-TE0)"; break;
+		case 0xA: pszPMPC = " (PM-ZLS0)"; break;
+		case 0xB: pszPMPC = " (PM-ALIST0)"; break;
+		case 0xC: pszPMPC = " (PM-VCE1)"; break;
+		case 0xD: pszPMPC = " (PM-TE1)"; break;
+		case 0xE: pszPMPC = " (PM-ZLS1)"; break;
+		case 0xF: pszPMPC = " (PM-ALIST1)"; break;
+	}
+
+	return pszPMPC;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDecodeBIFReqTags
+
+ @Description
+
+ Decode the BIF Tag ID and sideband data fields from BIF_FAULT_BANK_REQ_STATUS regs
+
+ @Input eBankID             - BIF identifier
+ @Input ui32TagID           - Tag ID value
+ @Input ui32TagSB           - Tag Sideband data
+ @Output ppszTagID          - Decoded string from the Tag ID
+ @Output ppszTagSB          - Decoded string from the Tag SB
+ @Output pszScratchBuf      - Buffer provided to the function to generate the debug strings
+ @Input ui32ScratchBufSize  - Size of the provided buffer
+
+ @Return   void
+
+******************************************************************************/
+#include "rgxmhdefs_km.h"
+
+static void _RGXDecodeBIFReqTagsXE(PVRSRV_RGXDEV_INFO	*psDevInfo,
+								   IMG_UINT32	ui32TagID,
+								   IMG_UINT32	ui32TagSB,
+								   IMG_CHAR		**ppszTagID,
+								   IMG_CHAR		**ppszTagSB,
+								   IMG_CHAR		*pszScratchBuf,
+								   IMG_UINT32	ui32ScratchBufSize)
+{
+	/* default to unknown */
+	IMG_CHAR *pszTagID = "-";
+	IMG_CHAR *pszTagSB = "-";
+
+	PVR_ASSERT(ppszTagID != NULL);
+	PVR_ASSERT(ppszTagSB != NULL);
+
+	switch (ui32TagID)
+	{
+		/* MMU tags */
+		case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PT:
+		case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PD:
+		case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PC:
+		case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PM:
+		{
+			switch (ui32TagID)
+			{
+				case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PT:	pszTagID = "MMU PT"; break;
+				case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PD:	pszTagID = "MMU PD"; break;
+				case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PC:	pszTagID = "MMU PC"; break;
+				case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PM:	pszTagID = "MMU PM"; break;
+			}
+			switch (ui32TagSB)
+			{
+				case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PT_REQUEST:		pszTagSB = "PT"; break;
+				case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PD_REQUEST:		pszTagSB = "PD"; break;
+				case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PC_REQUEST:		pszTagSB = "PC"; break;
+				case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PT_REQUEST:	pszTagSB = "PM PT"; break;
+				case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_REQUEST:	pszTagSB = "PM PD"; break;
+				case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_REQUEST:	pszTagSB = "PM PC"; break;
+				case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_WREQUEST:	pszTagSB = "PM PD W"; break;
+				case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_WREQUEST:	pszTagSB = "PM PC W"; break;
+			}
+			break;
+		}
+
+		/* MIPS */
+		case RGX_MH_TAG_ENCODING_MH_TAG_MIPS:
+		{
+			pszTagID = "MIPS";
+			switch (ui32TagSB)
+			{
+				case RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_OPCODE_FETCH:	pszTagSB = "Opcode"; break;
+				case RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_DATA_ACCESS:	pszTagSB = "Data"; break;
+			}
+			break;
+		}
+
+		/* CDM tags */
+		case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0:
+		case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1:
+		case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2:
+		case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3:
+		{
+			switch (ui32TagID)
+			{
+				case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0:	pszTagID = "CDM Stage 0"; break;
+				case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1:	pszTagID = "CDM Stage 1"; break;
+				case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2:	pszTagID = "CDM Stage 2"; break;
+				case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3:	pszTagID = "CDM Stage 3"; break;
+			}
+			switch (ui32TagSB)
+			{
+				case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTROL_STREAM:	pszTagSB = "Control"; break;
+				case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_INDIRECT_DATA:	pszTagSB = "Indirect"; break;
+				case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_EVENT_DATA:		pszTagSB = "Event"; break;
+				case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTEXT_STATE:	pszTagSB = "Context"; break;
+			}
+			break;
+		}
+
+		/* VDM tags */
+		case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0:
+		case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1:
+		case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2:
+		case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3:
+		case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4:
+		{
+			switch (ui32TagID)
+			{
+				case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0:	pszTagID = "VDM Stage 0"; break;
+				case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1:	pszTagID = "VDM Stage 1"; break;
+				case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2:	pszTagID = "VDM Stage 2"; break;
+				case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3:	pszTagID = "VDM Stage 3"; break;
+				case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4:	pszTagID = "VDM Stage 4"; break;
+			}
+			switch (ui32TagSB)
+			{
+				case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTROL:	pszTagSB = "Control"; break;
+				case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STATE:		pszTagSB = "State"; break;
+				case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_INDEX:		pszTagSB = "Index"; break;
+				case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STACK:		pszTagSB = "Stack"; break;
+				case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTEXT:	pszTagSB = "Context"; break;
+			}
+			break;
+		}
+
+		/* PDS */
+		case RGX_MH_TAG_ENCODING_MH_TAG_PDS_0:
+			pszTagID = "PDS req 0"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_PDS_1:
+			pszTagID = "PDS req 1"; break;
+
+		/* MCU */
+		case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCA:
+			pszTagID = "MCU USCA"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCB:
+			pszTagID = "MCU USCB"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCC:
+			pszTagID = "MCU USCC"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCD:
+			pszTagID = "MCU USCD"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCA:
+			pszTagID = "MCU PDS USCA"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCB:
+			pszTagID = "MCU PDS USCB"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCC:
+			pszTagID = "MCU PDS USCC"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCD:
+			pszTagID = "MCU PDSUSCD"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDSRW:
+			pszTagID = "PDS PDSRW"; break;
+
+		/* TCU */
+		case RGX_MH_TAG_ENCODING_MH_TAG_TCU_0:
+			pszTagID = "TCU req 0"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_TCU_1:
+			pszTagID = "TCU req 1"; break;
+
+		/* FBCDC */
+		case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_0:
+			pszTagID = "FBCDC0"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_1:
+			pszTagID = "FBCDC1"; break;
+
+		/* USC Shared */
+		case RGX_MH_TAG_ENCODING_MH_TAG_USC:
+			pszTagID = "USCS"; break;
+
+		/* ISP */
+		case RGX_MH_TAG_ENCODING_MH_TAG_ISP_ZLS:
+			pszTagID = "ISP0 ZLS"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_ISP_DS:
+			pszTagID = "ISP0 DS"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_ISP1_ZLS:
+			pszTagID = "ISP1 ZLS"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_ISP1_DS:
+			pszTagID = "ISP1 DS"; break;
+
+		/* TPF */
+		case RGX_MH_TAG_ENCODING_MH_TAG_TPF:
+		case RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS:
+		case RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF:
+		case RGX_MH_TAG_ENCODING_MH_TAG_TPF1:
+		case RGX_MH_TAG_ENCODING_MH_TAG_TPF1_PBCDBIAS:
+		case RGX_MH_TAG_ENCODING_MH_TAG_TPF1_SPF:
+		{
+			switch (ui32TagID)
+			{
+				case RGX_MH_TAG_ENCODING_MH_TAG_TPF:           pszTagID = "TPF0"; break;
+				case RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS:  pszTagID = "TPF0 DBIAS"; break;
+				case RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF:       pszTagID = "TPF0 SPF"; break;
+				case RGX_MH_TAG_ENCODING_MH_TAG_TPF1:          pszTagID = "TPF1"; break;
+				case RGX_MH_TAG_ENCODING_MH_TAG_TPF1_PBCDBIAS: pszTagID = "TPF1 DBIAS"; break;
+				case RGX_MH_TAG_ENCODING_MH_TAG_TPF1_SPF:      pszTagID = "TPF1 SPF"; break;
+			}
+			switch (ui32TagSB)
+			{
+				case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_PDS_STATE:	pszTagSB = "PDS state"; break;
+				case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DEPTH_BIAS:	pszTagSB = "Depth bias"; break;
+				case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_FLOOR_OFFSET_DATA:	pszTagSB = "Floor offset"; break;
+				case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DELTA_DATA:	pszTagSB = "Delta"; break;
+			}
+			break;
+		}
+
+		/* IPF */
+		case RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ:
+		case RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS:
+		case RGX_MH_TAG_ENCODING_MH_TAG_IPF1_CREQ:
+		case RGX_MH_TAG_ENCODING_MH_TAG_IPF1_OTHERS:
+		{
+			switch (ui32TagID)
+			{
+				case RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ:      pszTagID = "IPF0"; break;
+				case RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS:    pszTagID = "IPF0"; break;
+				case RGX_MH_TAG_ENCODING_MH_TAG_IPF1_CREQ:     pszTagID = "IPF1"; break;
+				case RGX_MH_TAG_ENCODING_MH_TAG_IPF1_OTHERS:   pszTagID = "IPF1"; break;
+			}
+
+			if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_ISP_IPP_PIPES))
+			{
+				if (ui32TagID < RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES))
+				{
+					OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "CReq%d", ui32TagID);
+					pszTagSB = pszScratchBuf;
+				}
+				else if (ui32TagID < 2 * RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES))
+				{
+					ui32TagID -= RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES);
+					OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "PReq%d", ui32TagID);
+					pszTagSB = pszScratchBuf;
+				}
+				else
+				{
+					switch (ui32TagSB - 2 * RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES))
+					{
+						case 0:	pszTagSB = "RReq"; break;
+						case 1:	pszTagSB = "DBSC"; break;
+						case 2:	pszTagSB = "CPF"; break;
+						case 3:	pszTagSB = "Delta"; break;
+					}
+				}
+			}
+			break;
+		}
+
+		/* VDM Stage 5 (temporary) */
+		case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG5:
+			pszTagID = "VDM Stage 5"; break;
+
+		/* TA */
+		case RGX_MH_TAG_ENCODING_MH_TAG_TA_PPP:
+			pszTagID = "PPP"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_TA_TPWRTC:
+			pszTagID = "TPW RTC"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_TA_TEACRTC:
+			pszTagID = "TEAC RTC"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGRTC:
+			pszTagID = "PSG RTC"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGREGION:
+			pszTagID = "PSG Region"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGSTREAM:
+			pszTagID = "PSG Stream"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_TA_TPW:
+			pszTagID = "TPW"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_TA_TPC:
+			pszTagID = "TPC"; break;
+
+		/* PM */
+		case RGX_MH_TAG_ENCODING_MH_TAG_PM_ALLOC:
+		{
+			pszTagID = "PMA";
+			switch (ui32TagSB)
+			{
+				case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAFSTACK:	pszTagSB = "TA Fstack"; break;
+				case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMLIST:		pszTagSB = "TA MList"; break;
+				case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DFSTACK:	pszTagSB = "3D Fstack"; break;
+				case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMLIST:		pszTagSB = "3D MList"; break;
+				case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX0:		pszTagSB = "Context0"; break;
+				case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX1:		pszTagSB = "Context1"; break;
+				case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_MAVP:		pszTagSB = "MAVP"; break;
+				case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_UFSTACK:		pszTagSB = "UFstack"; break;
+				case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMMUSTACK:	pszTagSB = "TA MMUstack"; break;
+				case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMMUSTACK:	pszTagSB = "3D MMUstack"; break;
+				case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAUFSTACK:	pszTagSB = "TA UFstack"; break;
+				case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DUFSTACK:	pszTagSB = "3D UFstack"; break;
+				case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAVFP:		pszTagSB = "TA VFP"; break;
+			}
+			break;
+		}
+		case RGX_MH_TAG_ENCODING_MH_TAG_PM_DEALLOC:
+		{
+			pszTagID = "PMD";
+			switch (ui32TagSB)
+			{
+				case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAFSTACK:	pszTagSB = "TA Fstack"; break;
+				case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMLIST:		pszTagSB = "TA MList"; break;
+				case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DFSTACK:	pszTagSB = "3D Fstack"; break;
+				case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMLIST:		pszTagSB = "3D MList"; break;
+				case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX0:		pszTagSB = "Context0"; break;
+				case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX1:		pszTagSB = "Context1"; break;
+				case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_UFSTACK:		pszTagSB = "UFstack"; break;
+				case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMMUSTACK:	pszTagSB = "TA MMUstack"; break;
+				case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMMUSTACK:	pszTagSB = "3D MMUstack"; break;
+				case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAUFSTACK:	pszTagSB = "TA UFstack"; break;
+				case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DUFSTACK:	pszTagSB = "3D UFstack"; break;
+				case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAVFP:		pszTagSB = "TA VFP"; break;
+				case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DVFP:		pszTagSB = "3D VFP"; break;
+			}
+			break;
+		}
+
+		/* TDM */
+		case RGX_MH_TAG_ENCODING_MH_TAG_TDM_DMA:
+		{
+			pszTagID = "TDM DMA";
+			switch (ui32TagSB)
+			{
+				case RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTL_STREAM: pszTagSB = "Ctl stream"; break;
+				case RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTX_BUFFER: pszTagSB = "Ctx buffer"; break;
+				case RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_QUEUE_CTL:  pszTagSB = "Queue ctl"; break;
+			}
+			break;
+		}
+		case RGX_MH_TAG_ENCODING_MH_TAG_TDM_CTL:
+		{
+			pszTagID = "TDM CTL";
+			switch (ui32TagSB)
+			{
+				case RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_FENCE:   pszTagSB = "Fence"; break;
+				case RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_CONTEXT: pszTagSB = "Context"; break;
+				case RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_QUEUE:   pszTagSB = "Queue"; break;
+			}
+			break;
+		}
+
+		/* PBE */
+		case RGX_MH_TAG_ENCODING_MH_TAG_PBE0:
+			pszTagID = "PBE0"; break;
+		case RGX_MH_TAG_ENCODING_MH_TAG_PBE1:
+			pszTagID = "PBE1"; break;
+
+		/* IPP */
+		case RGX_MH_TAG_ENCODING_MH_TAG_IPP:
+			pszTagID = "IPP"; break;
+	}
+
+	*ppszTagID = pszTagID;
+	*ppszTagSB = pszTagSB;
+}
+
+
+static void _RGXDecodeBIFReqTags(PVRSRV_RGXDEV_INFO	*psDevInfo,
+								 RGXDBG_BIF_ID	eBankID,
+								 IMG_UINT32		ui32TagID,
+								 IMG_UINT32		ui32TagSB,
+								 IMG_CHAR		**ppszTagID,
+								 IMG_CHAR		**ppszTagSB,
+								 IMG_CHAR		*pszScratchBuf,
+								 IMG_UINT32		ui32ScratchBufSize)
+{
+	/* default to unknown */
+	IMG_CHAR *pszTagID = "-";
+	IMG_CHAR *pszTagSB = "-";
+
+	PVR_ASSERT(ppszTagID != NULL);
+	PVR_ASSERT(ppszTagSB != NULL);
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY))
+	{
+		_RGXDecodeBIFReqTagsXE(psDevInfo, ui32TagID, ui32TagSB, ppszTagID, ppszTagSB, pszScratchBuf, ui32ScratchBufSize);
+		return;
+	}
+
+	switch (ui32TagID)
+	{
+		case 0x0:
+		{
+			pszTagID = "MMU";
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Table"; break;
+				case 0x1: pszTagSB = "Directory"; break;
+				case 0x2: pszTagSB = "Catalogue"; break;
+			}
+			break;
+		}
+		case 0x1:
+		{
+			pszTagID = "TLA";
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Pixel data"; break;
+				case 0x1: pszTagSB = "Command stream data"; break;
+				case 0x2: pszTagSB = "Fence or flush"; break;
+			}
+			break;
+		}
+		case 0x2:
+		{
+			pszTagID = "HOST";
+			break;
+		}
+		case 0x3:
+		{
+			if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+			{
+					pszTagID = "META";
+					switch (ui32TagSB)
+					{
+						case 0x0: pszTagSB = "DCache - Thread 0"; break;
+						case 0x1: pszTagSB = "ICache - Thread 0"; break;
+						case 0x2: pszTagSB = "JTag - Thread 0"; break;
+						case 0x3: pszTagSB = "Slave bus - Thread 0"; break;
+						case 0x4: pszTagSB = "DCache - Thread "; break;
+						case 0x5: pszTagSB = "ICache - Thread 1"; break;
+						case 0x6: pszTagSB = "JTag - Thread 1"; break;
+						case 0x7: pszTagSB = "Slave bus - Thread 1"; break;
+					}
+			}
+			else if (RGX_IS_ERN_SUPPORTED(psDevInfo, 57596))
+			{
+				pszTagID="TCU";
+			}
+			else
+			{
+				/* Unreachable code */
+				PVR_ASSERT(IMG_FALSE);
+			}
+			break;
+		}
+		case 0x4:
+		{
+			pszTagID = "USC";
+			OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+			           "Cache line %d", (ui32TagSB & 0x3f));
+			pszTagSB = pszScratchBuf;
+			break;
+		}
+		case 0x5:
+		{
+			pszTagID = "PBE";
+			break;
+		}
+		case 0x6:
+		{
+			pszTagID = "ISP";
+			switch (ui32TagSB)
+			{
+				case 0x00: pszTagSB = "ZLS"; break;
+				case 0x20: pszTagSB = "Occlusion Query"; break;
+			}
+			break;
+		}
+		case 0x7:
+		{
+			if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING))
+			{
+				if (eBankID == RGXDBG_TEXAS_BIF)
+				{
+					pszTagID = "IPF";
+					switch (ui32TagSB)
+					{
+						case 0x0: pszTagSB = "CPF"; break;
+						case 0x1: pszTagSB = "DBSC"; break;
+						case 0x2:
+						case 0x4:
+						case 0x6:
+						case 0x8: pszTagSB = "Control Stream"; break;
+						case 0x3:
+						case 0x5:
+						case 0x7:
+						case 0x9: pszTagSB = "Primitive Block"; break;
+					}
+				}
+				else
+				{
+					pszTagID = "IPP";
+					switch (ui32TagSB)
+					{
+						case 0x0: pszTagSB = "Macrotile Header"; break;
+						case 0x1: pszTagSB = "Region Header"; break;
+					}
+				}
+			}
+			else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SIMPLE_INTERNAL_PARAMETER_FORMAT))
+			{
+				pszTagID = "IPF";
+				switch (ui32TagSB)
+				{
+					case 0x0: pszTagSB = "Region Header"; break;
+					case 0x1: pszTagSB = "DBSC"; break;
+					case 0x2: pszTagSB = "CPF"; break;
+					case 0x3: pszTagSB = "Control Stream"; break;
+					case 0x4: pszTagSB = "Primitive Block"; break;
+				}
+			}
+			else
+			{
+				pszTagID = "IPF";
+				switch (ui32TagSB)
+				{
+					case 0x0: pszTagSB = "Macrotile Header"; break;
+					case 0x1: pszTagSB = "Region Header"; break;
+					case 0x2: pszTagSB = "DBSC"; break;
+					case 0x3: pszTagSB = "CPF"; break;
+					case 0x4:
+					case 0x6:
+					case 0x8: pszTagSB = "Control Stream"; break;
+					case 0x5:
+					case 0x7:
+					case 0x9: pszTagSB = "Primitive Block"; break;
+				}
+			}
+			break;
+		}
+		case 0x8:
+		{
+			pszTagID = "CDM";
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Control Stream"; break;
+				case 0x1: pszTagSB = "Indirect Data"; break;
+				case 0x2: pszTagSB = "Event Write"; break;
+				case 0x3: pszTagSB = "Context State"; break;
+			}
+			break;
+		}
+		case 0x9:
+		{
+			pszTagID = "VDM";
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Control Stream"; break;
+				case 0x1: pszTagSB = "PPP State"; break;
+				case 0x2: pszTagSB = "Index Data"; break;
+				case 0x4: pszTagSB = "Call Stack"; break;
+				case 0x8: pszTagSB = "Context State"; break;
+			}
+			break;
+		}
+		case 0xA:
+		{
+			pszTagID = "PM";
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "PMA_TAFSTACK"; break;
+				case 0x1: pszTagSB = "PMA_TAMLIST"; break;
+				case 0x2: pszTagSB = "PMA_3DFSTACK"; break;
+				case 0x3: pszTagSB = "PMA_3DMLIST"; break;
+				case 0x4: pszTagSB = "PMA_PMCTX0"; break;
+				case 0x5: pszTagSB = "PMA_PMCTX1"; break;
+				case 0x6: pszTagSB = "PMA_MAVP"; break;
+				case 0x7: pszTagSB = "PMA_UFSTACK"; break;
+				case 0x8: pszTagSB = "PMD_TAFSTACK"; break;
+				case 0x9: pszTagSB = "PMD_TAMLIST"; break;
+				case 0xA: pszTagSB = "PMD_3DFSTACK"; break;
+				case 0xB: pszTagSB = "PMD_3DMLIST"; break;
+				case 0xC: pszTagSB = "PMD_PMCTX0"; break;
+				case 0xD: pszTagSB = "PMD_PMCTX1"; break;
+				case 0xF: pszTagSB = "PMD_UFSTACK"; break;
+				case 0x10: pszTagSB = "PMA_TAMMUSTACK"; break;
+				case 0x11: pszTagSB = "PMA_3DMMUSTACK"; break;
+				case 0x12: pszTagSB = "PMD_TAMMUSTACK"; break;
+				case 0x13: pszTagSB = "PMD_3DMMUSTACK"; break;
+				case 0x14: pszTagSB = "PMA_TAUFSTACK"; break;
+				case 0x15: pszTagSB = "PMA_3DUFSTACK"; break;
+				case 0x16: pszTagSB = "PMD_TAUFSTACK"; break;
+				case 0x17: pszTagSB = "PMD_3DUFSTACK"; break;
+				case 0x18: pszTagSB = "PMA_TAVFP"; break;
+				case 0x19: pszTagSB = "PMD_3DVFP"; break;
+				case 0x1A: pszTagSB = "PMD_TAVFP"; break;
+			}
+			break;
+		}
+		case 0xB:
+		{
+			pszTagID = "TA";
+			switch (ui32TagSB)
+			{
+				case 0x1: pszTagSB = "VCE"; break;
+				case 0x2: pszTagSB = "TPC"; break;
+				case 0x3: pszTagSB = "TE Control Stream"; break;
+				case 0x4: pszTagSB = "TE Region Header"; break;
+				case 0x5: pszTagSB = "TE Render Target Cache"; break;
+				case 0x6: pszTagSB = "TEAC Render Target Cache"; break;
+				case 0x7: pszTagSB = "VCE Render Target Cache"; break;
+				case 0x8: pszTagSB = "PPP Context State"; break;
+			}
+			break;
+		}
+		case 0xC:
+		{
+			pszTagID = "TPF";
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "TPF0: Primitive Block"; break;
+				case 0x1: pszTagSB = "TPF0: Depth Bias"; break;
+				case 0x2: pszTagSB = "TPF0: Per Primitive IDs"; break;
+				case 0x3: pszTagSB = "CPF - Tables"; break;
+				case 0x4: pszTagSB = "TPF1: Primitive Block"; break;
+				case 0x5: pszTagSB = "TPF1: Depth Bias"; break;
+				case 0x6: pszTagSB = "TPF1: Per Primitive IDs"; break;
+				case 0x7: pszTagSB = "CPF - Data: Pipe 0"; break;
+				case 0x8: pszTagSB = "TPF2: Primitive Block"; break;
+				case 0x9: pszTagSB = "TPF2: Depth Bias"; break;
+				case 0xA: pszTagSB = "TPF2: Per Primitive IDs"; break;
+				case 0xB: pszTagSB = "CPF - Data: Pipe 1"; break;
+				case 0xC: pszTagSB = "TPF3: Primitive Block"; break;
+				case 0xD: pszTagSB = "TPF3: Depth Bias"; break;
+				case 0xE: pszTagSB = "TPF3: Per Primitive IDs"; break;
+				case 0xF: pszTagSB = "CPF - Data: Pipe 2"; break;
+			}
+			break;
+		}
+		case 0xD:
+		{
+			pszTagID = "PDS";
+			break;
+		}
+		case 0xE:
+		{
+			pszTagID = "MCU";
+			{
+				IMG_UINT32 ui32Burst = (ui32TagSB >> 5) & 0x7;
+				IMG_UINT32 ui32GroupEnc = (ui32TagSB >> 2) & 0x7;
+				IMG_UINT32 ui32Group = ui32TagSB & 0x3;
+
+				IMG_CHAR* pszBurst = "";
+				IMG_CHAR* pszGroupEnc = "";
+				IMG_CHAR* pszGroup = "";
+
+				switch (ui32Burst)
+				{
+					case 0x0:
+					case 0x1: pszBurst = "128bit word within the Lower 256bits"; break;
+					case 0x2:
+					case 0x3: pszBurst = "128bit word within the Upper 256bits"; break;
+					case 0x4: pszBurst = "Lower 256bits"; break;
+					case 0x5: pszBurst = "Upper 256bits"; break;
+					case 0x6: pszBurst = "512 bits"; break;
+				}
+				switch (ui32GroupEnc)
+				{
+					case 0x0: pszGroupEnc = "TPUA_USC"; break;
+					case 0x1: pszGroupEnc = "TPUB_USC"; break;
+					case 0x2: pszGroupEnc = "USCA_USC"; break;
+					case 0x3: pszGroupEnc = "USCB_USC"; break;
+					case 0x4: pszGroupEnc = "PDS_USC"; break;
+					case 0x5:
+						if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) &&
+							6 > RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS))
+						{
+							pszGroupEnc = "PDSRW";
+						} else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) &&
+							6 == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS))
+						{
+							pszGroupEnc = "UPUC_USC";
+						}
+						break;
+					case 0x6:
+						if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) &&
+							6 == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS))
+						{
+							pszGroupEnc = "TPUC_USC";
+						}
+						break;
+					case 0x7:
+						if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) &&
+							6 == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS))
+						{
+							pszGroupEnc = "PDSRW";
+						}
+						break;
+				}
+				switch (ui32Group)
+				{
+					case 0x0: pszGroup = "Banks 0-3"; break;
+					case 0x1: pszGroup = "Banks 4-7"; break;
+					case 0x2: pszGroup = "Banks 8-11"; break;
+					case 0x3: pszGroup = "Banks 12-15"; break;
+				}
+
+				OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+								"%s, %s, %s", pszBurst, pszGroupEnc, pszGroup);
+				pszTagSB = pszScratchBuf;
+			}
+			break;
+		}
+		case 0xF:
+		{
+			pszTagID = "FB_CDC";
+
+			if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE))
+			{
+				IMG_UINT32 ui32Req   = (ui32TagSB >> 0) & 0xf;
+				IMG_UINT32 ui32MCUSB = (ui32TagSB >> 4) & 0x3;
+				IMG_CHAR* pszReqOrig = "";
+
+				switch (ui32Req)
+				{
+					case 0x0: pszReqOrig = "FBC Request, originator ZLS"; break;
+					case 0x1: pszReqOrig = "FBC Request, originator PBE"; break;
+					case 0x2: pszReqOrig = "FBC Request, originator Host"; break;
+					case 0x3: pszReqOrig = "FBC Request, originator TLA"; break;
+					case 0x4: pszReqOrig = "FBDC Request, originator ZLS"; break;
+					case 0x5: pszReqOrig = "FBDC Request, originator MCU"; break;
+					case 0x6: pszReqOrig = "FBDC Request, originator Host"; break;
+					case 0x7: pszReqOrig = "FBDC Request, originator TLA"; break;
+					case 0x8: pszReqOrig = "FBC Request, originator ZLS Requester Fence"; break;
+					case 0x9: pszReqOrig = "FBC Request, originator PBE Requester Fence"; break;
+					case 0xa: pszReqOrig = "FBC Request, originator Host Requester Fence"; break;
+					case 0xb: pszReqOrig = "FBC Request, originator TLA Requester Fence"; break;
+					case 0xc: pszReqOrig = "Reserved"; break;
+					case 0xd: pszReqOrig = "Reserved"; break;
+					case 0xe: pszReqOrig = "FBDC Request, originator FBCDC(Host) Memory Fence"; break;
+					case 0xf: pszReqOrig = "FBDC Request, originator FBCDC(TLA) Memory Fence"; break;
+				}
+				OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+				           "%s, MCU sideband 0x%X", pszReqOrig, ui32MCUSB);
+				pszTagSB = pszScratchBuf;
+			}
+			else
+			{
+				IMG_UINT32 ui32Req   = (ui32TagSB >> 2) & 0x7;
+				IMG_UINT32 ui32MCUSB = (ui32TagSB >> 0) & 0x3;
+				IMG_CHAR* pszReqOrig = "";
+
+				switch (ui32Req)
+				{
+					case 0x0: pszReqOrig = "FBC Request, originator ZLS";   break;
+					case 0x1: pszReqOrig = "FBC Request, originator PBE";   break;
+					case 0x2: pszReqOrig = "FBC Request, originator Host";  break;
+					case 0x3: pszReqOrig = "FBC Request, originator TLA";   break;
+					case 0x4: pszReqOrig = "FBDC Request, originator ZLS";  break;
+					case 0x5: pszReqOrig = "FBDC Request, originator MCU";  break;
+					case 0x6: pszReqOrig = "FBDC Request, originator Host"; break;
+					case 0x7: pszReqOrig = "FBDC Request, originator TLA";  break;
+				}
+				OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+				           "%s, MCU sideband 0x%X", pszReqOrig, ui32MCUSB);
+				pszTagSB = pszScratchBuf;
+			}
+			break;
+		}
+	} /* switch(TagID) */
+
+	*ppszTagID = pszTagID;
+	*ppszTagSB = pszTagSB;
+}
+
+
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDecodeMMULevel
+
+ @Description
+
+ Return the name for the MMU level that faulted.
+
+ @Input ui32MMULevel	 - MMU level
+
+ @Return   IMG_CHAR* to the sting describing the MMU level that faulted.
+
+******************************************************************************/
+static const IMG_CHAR* _RGXDecodeMMULevel(IMG_UINT32 ui32MMULevel)
+{
+	const IMG_CHAR* pszMMULevel = "";
+
+	switch (ui32MMULevel)
+	{
+		case 0x0: pszMMULevel = " (Page Table)"; break;
+		case 0x1: pszMMULevel = " (Page Directory)"; break;
+		case 0x2: pszMMULevel = " (Page Catalog)"; break;
+		case 0x3: pszMMULevel = " (Cat Base)"; break;
+	}
+
+	return pszMMULevel;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDecodeMMUReqTags
+
+ @Description
+
+ Decodes the MMU Tag ID and Sideband data fields from RGX_CR_MMU_FAULT_META_STATUS and
+ RGX_CR_MMU_FAULT_STATUS regs.
+
+ @Input ui32TagID           - Tag ID value
+ @Input ui32TagSB           - Tag Sideband data
+ @Input bRead               - Read flag
+ @Output ppszTagID          - Decoded string from the Tag ID
+ @Output ppszTagSB          - Decoded string from the Tag SB
+ @Output pszScratchBuf      - Buffer provided to the function to generate the debug strings
+ @Input ui32ScratchBufSize  - Size of the provided buffer
+
+ @Return   void
+
+******************************************************************************/
+static void _RGXDecodeMMUReqTags(PVRSRV_RGXDEV_INFO    *psDevInfo,
+								IMG_UINT32  ui32TagID,
+								 IMG_UINT32  ui32TagSB,
+								 IMG_BOOL    bRead,
+								 IMG_CHAR    **ppszTagID,
+								 IMG_CHAR    **ppszTagSB,
+								 IMG_CHAR    *pszScratchBuf,
+								 IMG_UINT32  ui32ScratchBufSize)
+{
+	IMG_INT32  i32SideBandType = -1;
+	IMG_CHAR   *pszTagID = "-";
+	IMG_CHAR   *pszTagSB = "-";
+
+	PVR_ASSERT(ppszTagID != NULL);
+	PVR_ASSERT(ppszTagSB != NULL);
+
+
+	switch (ui32TagID)
+	{
+		case  0: pszTagID = "META (Jones)"; i32SideBandType = RGXDBG_META; break;
+		case  1: pszTagID = "TLA (Jones)"; i32SideBandType = RGXDBG_TLA; break;
+		case  2: pszTagID = "DMA (Jones)"; i32SideBandType = RGXDBG_DMA; break;
+		case  3: pszTagID = "VDMM (Jones)"; i32SideBandType = RGXDBG_VDMM; break;
+		case  4: pszTagID = "CDM (Jones)"; i32SideBandType = RGXDBG_CDM; break;
+		case  5: pszTagID = "IPP (Jones)"; i32SideBandType = RGXDBG_IPP; break;
+		case  6: pszTagID = "PM (Jones)"; i32SideBandType = RGXDBG_PM; break;
+		case  7: pszTagID = "Tiling (Jones)"; i32SideBandType = RGXDBG_TILING; break;
+		case  8: pszTagID = "MCU (Texas 0)"; i32SideBandType = RGXDBG_MCU; break;
+		case 12: pszTagID = "VDMS (Black Pearl 0)"; i32SideBandType = RGXDBG_VDMS; break;
+		case 13: pszTagID = "IPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF; break;
+		case 14: pszTagID = "ISP (Black Pearl 0)"; i32SideBandType = RGXDBG_ISP; break;
+		case 15: pszTagID = "TPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF; break;
+		case 16: pszTagID = "USCS (Black Pearl 0)"; i32SideBandType = RGXDBG_USCS; break;
+		case 17: pszTagID = "PPP (Black Pearl 0)"; i32SideBandType = RGXDBG_PPP; break;
+		case 20: pszTagID = "MCU (Texas 1)"; i32SideBandType = RGXDBG_MCU; break;
+		case 24: pszTagID = "MCU (Texas 2)"; i32SideBandType = RGXDBG_MCU; break;
+		case 28: pszTagID = "VDMS (Black Pearl 1)"; i32SideBandType = RGXDBG_VDMS; break;
+		case 29: pszTagID = "IPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF; break;
+		case 30: pszTagID = "ISP (Black Pearl 1)"; i32SideBandType = RGXDBG_ISP; break;
+		case 31: pszTagID = "TPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF; break;
+		case 32: pszTagID = "USCS (Black Pearl 1)"; i32SideBandType = RGXDBG_USCS; break;
+		case 33: pszTagID = "PPP (Black Pearl 1)"; i32SideBandType = RGXDBG_PPP; break;
+		case 36: pszTagID = "MCU (Texas 3)"; i32SideBandType = RGXDBG_MCU; break;
+		case 40: pszTagID = "MCU (Texas 4)"; i32SideBandType = RGXDBG_MCU; break;
+		case 44: pszTagID = "VDMS (Black Pearl 2)"; i32SideBandType = RGXDBG_VDMS; break;
+		case 45: pszTagID = "IPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF; break;
+		case 46: pszTagID = "ISP (Black Pearl 2)"; i32SideBandType = RGXDBG_ISP; break;
+		case 47: pszTagID = "TPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF; break;
+		case 48: pszTagID = "USCS (Black Pearl 2)"; i32SideBandType = RGXDBG_USCS; break;
+		case 49: pszTagID = "PPP (Black Pearl 2)"; i32SideBandType = RGXDBG_PPP; break;
+		case 52: pszTagID = "MCU (Texas 5)"; i32SideBandType = RGXDBG_MCU; break;
+		case 56: pszTagID = "MCU (Texas 6)"; i32SideBandType = RGXDBG_MCU; break;
+		case 60: pszTagID = "VDMS (Black Pearl 3)"; i32SideBandType = RGXDBG_VDMS; break;
+		case 61: pszTagID = "IPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF; break;
+		case 62: pszTagID = "ISP (Black Pearl 3)"; i32SideBandType = RGXDBG_ISP; break;
+		case 63: pszTagID = "TPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF; break;
+		case 64: pszTagID = "USCS (Black Pearl 3)"; i32SideBandType = RGXDBG_USCS; break;
+		case 65: pszTagID = "PPP (Black Pearl 3)"; i32SideBandType = RGXDBG_PPP; break;
+		case 68: pszTagID = "MCU (Texas 7)"; i32SideBandType = RGXDBG_MCU; break;
+	}
+	if (('-' == pszTagID[0]) && '\n' == pszTagID[1])
+	{
+
+		if (RGX_IS_ERN_SUPPORTED(psDevInfo, 50539) ||
+			(RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, FBCDC_ARCHITECTURE) && RGX_GET_FEATURE_VALUE(psDevInfo, FBCDC_ARCHITECTURE) >= 3))
+		{
+			switch (ui32TagID)
+			{
+			case 18: pszTagID = "TPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+			case 19: pszTagID = "IPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+			case 34: pszTagID = "TPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+			case 35: pszTagID = "IPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+			case 50: pszTagID = "TPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+			case 51: pszTagID = "IPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+			case 66: pszTagID = "TPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+			case 67: pszTagID = "IPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+			}
+
+			if (RGX_IS_ERN_SUPPORTED(psDevInfo, 50539))
+			{
+				switch (ui32TagID)
+				{
+				case 9:	pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+				case 10: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break;
+				case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 21: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+				case 22: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break;
+				case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 25: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+				case 26: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break;
+				case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 37: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+				case 38: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break;
+				case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 41: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+				case 42: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break;
+				case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 53: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+				case 54: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break;
+				case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 57: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+				case 58: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break;
+				case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 69: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+				case 70: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break;
+				case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break;
+				}
+			}else
+			{
+				switch (ui32TagID)
+				{
+				case 9:	pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break;
+				case 10: pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+				case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break;
+				case 22: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+				case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break;
+				case 26: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+				case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break;
+				case 38: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+				case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break;
+				case 42: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+				case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break;
+				case 54: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+				case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break;
+				case 58: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+				case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break;
+				case 70: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+				case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break;
+				}
+			}
+		}else
+		{
+			switch (ui32TagID)
+			{
+			case 9:	pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break;
+			case 10: pszTagID = "PBE0 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+			case 11: pszTagID = "PBE1 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+			case 18: pszTagID = "VCE (Black Pearl 0)"; i32SideBandType = RGXDBG_VCE; break;
+			case 19: pszTagID = "FBCDC (Black Pearl 0)"; i32SideBandType = RGXDBG_FBCDC; break;
+			case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break;
+			case 22: pszTagID = "PBE0 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+			case 23: pszTagID = "PBE1 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+			case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break;
+			case 26: pszTagID = "PBE0 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+			case 27: pszTagID = "PBE1 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+			case 34: pszTagID = "VCE (Black Pearl 1)"; i32SideBandType = RGXDBG_VCE; break;
+			case 35: pszTagID = "FBCDC (Black Pearl 1)"; i32SideBandType = RGXDBG_FBCDC; break;
+			case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break;
+			case 38: pszTagID = "PBE0 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+			case 39: pszTagID = "PBE1 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+			case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break;
+			case 42: pszTagID = "PBE0 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+			case 43: pszTagID = "PBE1 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+			case 50: pszTagID = "VCE (Black Pearl 2)"; i32SideBandType = RGXDBG_VCE; break;
+			case 51: pszTagID = "FBCDC (Black Pearl 2)"; i32SideBandType = RGXDBG_FBCDC; break;
+			case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break;
+			case 54: pszTagID = "PBE0 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+			case 55: pszTagID = "PBE1 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+			case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break;
+			case 58: pszTagID = "PBE0 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+			case 59: pszTagID = "PBE1 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+			case 66: pszTagID = "VCE (Black Pearl 3)"; i32SideBandType = RGXDBG_VCE; break;
+			case 67: pszTagID = "FBCDC (Black Pearl 3)"; i32SideBandType = RGXDBG_FBCDC; break;
+			case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break;
+			case 70: pszTagID = "PBE0 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+			case 71: pszTagID = "PBE1 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+			}
+		}
+
+	}
+
+	switch (i32SideBandType)
+	{
+		case RGXDBG_META:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "DCache - Thread 0"; break;
+				case 0x1: pszTagSB = "ICache - Thread 0"; break;
+				case 0x2: pszTagSB = "JTag - Thread 0"; break;
+				case 0x3: pszTagSB = "Slave bus - Thread 0"; break;
+				case 0x4: pszTagSB = "DCache - Thread 1"; break;
+				case 0x5: pszTagSB = "ICache - Thread 1"; break;
+				case 0x6: pszTagSB = "JTag - Thread 1"; break;
+				case 0x7: pszTagSB = "Slave bus - Thread 1"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_TLA:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Pixel data"; break;
+				case 0x1: pszTagSB = "Command stream data"; break;
+				case 0x2: pszTagSB = "Fence or flush"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_VDMM:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Control Stream - Read Only"; break;
+				case 0x1: pszTagSB = "PPP State - Read Only"; break;
+				case 0x2: pszTagSB = "Indices - Read Only"; break;
+				case 0x4: pszTagSB = "Call Stack - Read/Write"; break;
+				case 0x6: pszTagSB = "DrawIndirect - Read Only"; break;
+				case 0xA: pszTagSB = "Context State - Write Only"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_CDM:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Control Stream"; break;
+				case 0x1: pszTagSB = "Indirect Data"; break;
+				case 0x2: pszTagSB = "Event Write"; break;
+				case 0x3: pszTagSB = "Context State"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_IPP:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Macrotile Header"; break;
+				case 0x1: pszTagSB = "Region Header"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_PM:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "PMA_TAFSTACK"; break;
+				case 0x1: pszTagSB = "PMA_TAMLIST"; break;
+				case 0x2: pszTagSB = "PMA_3DFSTACK"; break;
+				case 0x3: pszTagSB = "PMA_3DMLIST"; break;
+				case 0x4: pszTagSB = "PMA_PMCTX0"; break;
+				case 0x5: pszTagSB = "PMA_PMCTX1"; break;
+				case 0x6: pszTagSB = "PMA_MAVP"; break;
+				case 0x7: pszTagSB = "PMA_UFSTACK"; break;
+				case 0x8: pszTagSB = "PMD_TAFSTACK"; break;
+				case 0x9: pszTagSB = "PMD_TAMLIST"; break;
+				case 0xA: pszTagSB = "PMD_3DFSTACK"; break;
+				case 0xB: pszTagSB = "PMD_3DMLIST"; break;
+				case 0xC: pszTagSB = "PMD_PMCTX0"; break;
+				case 0xD: pszTagSB = "PMD_PMCTX1"; break;
+				case 0xF: pszTagSB = "PMD_UFSTACK"; break;
+				case 0x10: pszTagSB = "PMA_TAMMUSTACK"; break;
+				case 0x11: pszTagSB = "PMA_3DMMUSTACK"; break;
+				case 0x12: pszTagSB = "PMD_TAMMUSTACK"; break;
+				case 0x13: pszTagSB = "PMD_3DMMUSTACK"; break;
+				case 0x14: pszTagSB = "PMA_TAUFSTACK"; break;
+				case 0x15: pszTagSB = "PMA_3DUFSTACK"; break;
+				case 0x16: pszTagSB = "PMD_TAUFSTACK"; break;
+				case 0x17: pszTagSB = "PMD_3DUFSTACK"; break;
+				case 0x18: pszTagSB = "PMA_TAVFP"; break;
+				case 0x19: pszTagSB = "PMD_3DVFP"; break;
+				case 0x1A: pszTagSB = "PMD_TAVFP"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_TILING:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "PSG Control Stream TP0"; break;
+				case 0x1: pszTagSB = "TPC TP0"; break;
+				case 0x2: pszTagSB = "VCE0"; break;
+				case 0x3: pszTagSB = "VCE1"; break;
+				case 0x4: pszTagSB = "PSG Control Stream TP1"; break;
+				case 0x5: pszTagSB = "TPC TP1"; break;
+				case 0x8: pszTagSB = "PSG Region Header TP0"; break;
+				case 0xC: pszTagSB = "PSG Region Header TP1"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_VDMS:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Context State - Write Only"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_IPF:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x00:
+				case 0x20: pszTagSB = "CPF"; break;
+				case 0x01: pszTagSB = "DBSC"; break;
+				case 0x02:
+				case 0x04:
+				case 0x06:
+				case 0x08:
+				case 0x0A:
+				case 0x0C:
+				case 0x0E:
+				case 0x10: pszTagSB = "Control Stream"; break;
+				case 0x03:
+				case 0x05:
+				case 0x07:
+				case 0x09:
+				case 0x0B:
+				case 0x0D:
+				case 0x0F:
+				case 0x11: pszTagSB = "Primitive Block"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_ISP:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x00: pszTagSB = "ZLS read/write"; break;
+				case 0x20: pszTagSB = "Occlusion query read/write"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_TPF:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "TPF0: Primitive Block"; break;
+				case 0x1: pszTagSB = "TPF0: Depth Bias"; break;
+				case 0x2: pszTagSB = "TPF0: Per Primitive IDs"; break;
+				case 0x3: pszTagSB = "CPF - Tables"; break;
+				case 0x4: pszTagSB = "TPF1: Primitive Block"; break;
+				case 0x5: pszTagSB = "TPF1: Depth Bias"; break;
+				case 0x6: pszTagSB = "TPF1: Per Primitive IDs"; break;
+				case 0x7: pszTagSB = "CPF - Data: Pipe 0"; break;
+				case 0x8: pszTagSB = "TPF2: Primitive Block"; break;
+				case 0x9: pszTagSB = "TPF2: Depth Bias"; break;
+				case 0xA: pszTagSB = "TPF2: Per Primitive IDs"; break;
+				case 0xB: pszTagSB = "CPF - Data: Pipe 1"; break;
+				case 0xC: pszTagSB = "TPF3: Primitive Block"; break;
+				case 0xD: pszTagSB = "TPF3: Depth Bias"; break;
+				case 0xE: pszTagSB = "TPF3: Per Primitive IDs"; break;
+				case 0xF: pszTagSB = "CPF - Data: Pipe 2"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_FBCDC:
+		{
+			/*
+			 * FBC faults on a 4-cluster phantom does not always set SB
+			 * bit 5, but since FBC is write-only and FBDC is read-only,
+			 * we can set bit 5 if this is a write fault, before decoding.
+			 */
+			if (bRead == IMG_FALSE)
+			{
+				ui32TagSB |= 0x20;
+			}
+
+			switch (ui32TagSB)
+			{
+				case 0x00: pszTagSB = "FBDC Request, originator ZLS"; break;
+				case 0x02: pszTagSB = "FBDC Request, originator MCU Dust 0"; break;
+				case 0x03: pszTagSB = "FBDC Request, originator MCU Dust 1"; break;
+				case 0x20: pszTagSB = "FBC Request, originator ZLS"; break;
+				case 0x22: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 0"; break;
+				case 0x23: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 1"; break;
+				case 0x24: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 0"; break;
+				case 0x25: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 1"; break;
+				case 0x28: pszTagSB = "FBC Request, originator ZLS Fence"; break;
+				case 0x2a: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 0, Fence"; break;
+				case 0x2b: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 1, Fence"; break;
+				case 0x2c: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 0, Fence"; break;
+				case 0x2d: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 1, Fence"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_MCU:
+		{
+			IMG_UINT32 ui32SetNumber = (ui32TagSB >> 5) & 0x7;
+			IMG_UINT32 ui32WayNumber = (ui32TagSB >> 2) & 0x7;
+			IMG_UINT32 ui32Group     = ui32TagSB & 0x3;
+
+			IMG_CHAR* pszGroup = "";
+
+			switch (ui32Group)
+			{
+				case 0x0: pszGroup = "Banks 0-1"; break;
+				case 0x1: pszGroup = "Banks 2-3"; break;
+				case 0x2: pszGroup = "Banks 4-5"; break;
+				case 0x3: pszGroup = "Banks 6-7"; break;
+			}
+
+			OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+			           "Set=%d, Way=%d, %s", ui32SetNumber, ui32WayNumber, pszGroup);
+			pszTagSB = pszScratchBuf;
+			break;
+		}
+
+		default:
+		{
+			OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "SB=0x%02x", ui32TagSB);
+			pszTagSB = pszScratchBuf;
+			break;
+		}
+	}
+
+	*ppszTagID = pszTagID;
+	*ppszTagSB = pszTagSB;
+}
+
+static void ConvertOSTimestampToSAndNS(IMG_UINT64 ui64OSTimer,
+							IMG_UINT64 *pui64Seconds,
+							IMG_UINT64 *pui64Nanoseconds)
+{
+	IMG_UINT32 ui32Remainder;
+
+	*pui64Seconds = OSDivide64r64(ui64OSTimer, 1000000000, &ui32Remainder);
+	*pui64Nanoseconds = ui64OSTimer - (*pui64Seconds * 1000000000ULL);
+}
+
+
+typedef enum _DEVICEMEM_HISTORY_QUERY_INDEX_
+{
+	DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING,
+	DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED,
+	DEVICEMEM_HISTORY_QUERY_INDEX_NEXT,
+	DEVICEMEM_HISTORY_QUERY_INDEX_COUNT,
+} DEVICEMEM_HISTORY_QUERY_INDEX;
+
+/*!
+*******************************************************************************
+
+ @Function	_PrintDevicememHistoryQueryResult
+
+ @Description
+
+ Print details of a single result from a DevicememHistory query
+
+ @Input pfnDumpDebugPrintf       - Debug printf function
+ @Input pvDumpDebugFile          - Optional file identifier to be passed to the
+                                   'printf' function if required
+ @Input psFaultProcessInfo       - The process info derived from the page fault
+ @Input psResult                 - The DevicememHistory result to be printed
+ @Input ui32Index                - The index of the result
+
+ @Return   void
+
+******************************************************************************/
+static void _PrintDevicememHistoryQueryResult(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+						void *pvDumpDebugFile,
+						RGXMEM_PROCESS_INFO *psFaultProcessInfo,
+						DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult,
+						IMG_UINT32 ui32Index)
+{
+	IMG_UINT32 ui32Remainder;
+	IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+	ConvertOSTimestampToSAndNS(psResult->ui64When,
+							&ui64Seconds,
+							&ui64Nanoseconds);
+
+	if (psFaultProcessInfo->uiPID != RGXMEM_SERVER_PID_FIRMWARE)
+	{
+		PVR_DUMPDEBUG_LOG("  [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC
+					" Size: " IMG_DEVMEM_SIZE_FMTSPEC
+					" Operation: %s Modified: %" IMG_UINT64_FMTSPEC
+					" us ago (OS time %" IMG_UINT64_FMTSPEC
+					".%09" IMG_UINT64_FMTSPEC " s)",
+										ui32Index,
+										psResult->szString,
+						psResult->sBaseDevVAddr.uiAddr,
+						psResult->uiSize,
+						psResult->bMap ? "Map": "Unmap",
+						OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder),
+						ui64Seconds,
+						ui64Nanoseconds);
+	}
+	else
+	{
+		PVR_DUMPDEBUG_LOG("  [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC
+					" Size: " IMG_DEVMEM_SIZE_FMTSPEC
+					" Operation: %s Modified: %" IMG_UINT64_FMTSPEC
+					" us ago (OS time %" IMG_UINT64_FMTSPEC
+					".%09" IMG_UINT64_FMTSPEC
+					") PID: %u (%s)",
+										ui32Index,
+										psResult->szString,
+						psResult->sBaseDevVAddr.uiAddr,
+						psResult->uiSize,
+						psResult->bMap ? "Map": "Unmap",
+						OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder),
+						ui64Seconds,
+						ui64Nanoseconds,
+						psResult->sProcessInfo.uiPID,
+						psResult->sProcessInfo.szProcessName);
+	}
+
+	if (!psResult->bRange)
+	{
+		PVR_DUMPDEBUG_LOG("      Whole allocation was %s", psResult->bMap ? "mapped": "unmapped");
+	}
+	else
+	{
+		PVR_DUMPDEBUG_LOG("      Pages %u to %u (" IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC ") %s%s",
+										psResult->ui32StartPage,
+										psResult->ui32StartPage + psResult->ui32PageCount - 1,
+										psResult->sMapStartAddr.uiAddr,
+										psResult->sMapEndAddr.uiAddr,
+										psResult->bAll ? "(whole allocation) " : "",
+										psResult->bMap ? "mapped": "unmapped");
+	}
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_PrintDevicememHistoryQueryOut
+
+ @Description
+
+ Print details of all the results from a DevicememHistory query
+
+ @Input pfnDumpDebugPrintf       - Debug printf function
+ @Input pvDumpDebugFile          - Optional file identifier to be passed to the
+                                   'printf' function if required
+ @Input psFaultProcessInfo       - The process info derived from the page fault
+ @Input psQueryOut               - Storage for the query results
+
+ @Return   void
+
+******************************************************************************/
+static void _PrintDevicememHistoryQueryOut(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+						void *pvDumpDebugFile,
+						RGXMEM_PROCESS_INFO *psFaultProcessInfo,
+						DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut)
+{
+	IMG_UINT32 i;
+
+	if (psQueryOut->ui32NumResults == 0)
+	{
+		PVR_DUMPDEBUG_LOG("  No results");
+	}
+	else
+	{
+		for (i = 0; i < psQueryOut->ui32NumResults; i++)
+		{
+			_PrintDevicememHistoryQueryResult(pfnDumpDebugPrintf, pvDumpDebugFile,
+									psFaultProcessInfo,
+									&psQueryOut->sResults[i],
+									i);
+		}
+	}
+}
+
+/* table of HW page size values and the equivalent */
+static const unsigned int aui32HWPageSizeTable[][2] =
+{
+	{ 0, PVRSRV_4K_PAGE_SIZE },
+	{ 1, PVRSRV_16K_PAGE_SIZE },
+	{ 2, PVRSRV_64K_PAGE_SIZE },
+	{ 3, PVRSRV_256K_PAGE_SIZE },
+	{ 4, PVRSRV_1M_PAGE_SIZE },
+	{ 5, PVRSRV_2M_PAGE_SIZE }
+};
+
+/*!
+*******************************************************************************
+
+ @Function	_PageSizeHWToBytes
+
+ @Description
+
+ Convert a HW page size value to its size in bytes
+
+ @Input ui32PageSizeHW     - The HW page size value
+
+ @Return   IMG_UINT32      The page size in bytes
+
+******************************************************************************/
+static IMG_UINT32 _PageSizeHWToBytes(IMG_UINT32 ui32PageSizeHW)
+{
+	if (ui32PageSizeHW > 5)
+	{
+		/* This is invalid, so return a default value as we cannot ASSERT in this code! */
+		return PVRSRV_4K_PAGE_SIZE;
+	}
+
+	return aui32HWPageSizeTable[ui32PageSizeHW][1];
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_GetDevicememHistoryData
+
+ @Description
+
+ Get the DevicememHistory results for the given PID and faulting device virtual address.
+ The function will query DevicememHistory for information about the faulting page, as well
+ as the page before and after.
+
+ @Input uiPID              - The process ID to search for allocations belonging to
+ @Input sFaultDevVAddr     - The device address to search for allocations at/before/after
+ @Input asQueryOut         - Storage for the query results
+ @Input ui32PageSizeBytes  - Faulted page size in bytes
+
+ @Return IMG_BOOL          - IMG_TRUE if any results were found for this page fault
+
+******************************************************************************/
+static IMG_BOOL _GetDevicememHistoryData(IMG_PID uiPID, IMG_DEV_VIRTADDR sFaultDevVAddr,
+							DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT],
+							IMG_UINT32 ui32PageSizeBytes)
+{
+	IMG_UINT32 i;
+	DEVICEMEM_HISTORY_QUERY_IN sQueryIn;
+	IMG_BOOL bAnyHits = IMG_FALSE;
+
+	/* if the page fault originated in the firmware then the allocation may
+	 * appear to belong to any PID, because FW allocations are attributed
+	 * to the client process creating the allocation, so instruct the
+	 * devicemem_history query to search all available PIDs
+	 */
+	if (uiPID == RGXMEM_SERVER_PID_FIRMWARE)
+	{
+		sQueryIn.uiPID = DEVICEMEM_HISTORY_PID_ANY;
+	}
+	else
+	{
+		sQueryIn.uiPID = uiPID;
+	}
+
+	/* query the DevicememHistory about the preceding / faulting / next page */
+
+	for (i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++)
+	{
+		IMG_BOOL bHits;
+
+		switch (i)
+		{
+			case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING:
+				sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) - 1;
+				break;
+			case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED:
+				sQueryIn.sDevVAddr = sFaultDevVAddr;
+				break;
+			case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT:
+				sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) + ui32PageSizeBytes;
+				break;
+		}
+
+		/* First try matching any record at the exact address... */
+		bHits = DevicememHistoryQuery(&sQueryIn, &asQueryOut[i], ui32PageSizeBytes, IMG_FALSE);
+		if (!bHits)
+		{
+			/* If not matched then try matching any record in the same page... */
+			bHits = DevicememHistoryQuery(&sQueryIn, &asQueryOut[i], ui32PageSizeBytes, IMG_TRUE);
+		}
+
+		if (bHits)
+		{
+			bAnyHits = IMG_TRUE;
+		}
+	}
+
+	return bAnyHits;
+}
+
+/* stored data about one page fault */
+typedef struct _FAULT_INFO_
+{
+	/* the process info of the memory context that page faulted */
+	RGXMEM_PROCESS_INFO sProcessInfo;
+	IMG_DEV_VIRTADDR sFaultDevVAddr;
+	MMU_FAULT_DATA   sMMUFaultData;
+	DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT];
+	/* the CR timer value at the time of the fault, recorded by the FW.
+	 * used to differentiate different page faults
+	 */
+	IMG_UINT64 ui64CRTimer;
+	/* time when this FAULT_INFO entry was added. used for timing
+	 * reference against the map/unmap information
+	 */
+	IMG_UINT64 ui64When;
+} FAULT_INFO;
+
+/* history list of page faults.
+ * Keeps the first `n` page faults and the last `n` page faults, like the FW
+ * HWR log
+ */
+typedef struct _FAULT_INFO_LOG_
+{
+	IMG_UINT32 ui32Head;
+	IMG_UINT32 ui32NumWrites;
+	/* the number of faults in this log need not correspond exactly to
+	 * the HWINFO number of the FW, as the FW HWINFO log may contain
+	 * non-page fault HWRs
+	 */
+	FAULT_INFO asFaults[RGXFWIF_HWINFO_MAX];
+} FAULT_INFO_LOG;
+
+static FAULT_INFO_LOG gsFaultInfoLog = { 0 };
+
+/*!
+*******************************************************************************
+
+ @Function	_QueryFaultInfo
+
+ @Description
+
+ Searches the local list of previously analysed page faults to see if the given
+ fault has already been analysed and if so, returns a pointer to the analysis
+ object (FAULT_INFO *), otherwise returns NULL.
+
+ @Input pfnDumpDebugPrintf       - The debug printf function
+ @Input pvDumpDebugFile          - Optional file identifier to be passed to the
+                                   'printf' function if required
+ @Input sFaultDevVAddr           - The faulting device virtual address
+ @Input ui64CRTimer              - The CR timer value recorded by the FW at the time of the fault
+
+ @Return   FAULT_INFO* Pointer to an existing fault analysis structure if found, otherwise NULL
+
+******************************************************************************/
+static FAULT_INFO *_QueryFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile,
+					IMG_DEV_VIRTADDR sFaultDevVAddr,
+					IMG_UINT64 ui64CRTimer)
+{
+	IMG_UINT32 i;
+
+	for (i = 0; i < MIN(gsFaultInfoLog.ui32NumWrites, RGXFWIF_HWINFO_MAX); i++)
+	{
+		if ((gsFaultInfoLog.asFaults[i].ui64CRTimer == ui64CRTimer) &&
+			(gsFaultInfoLog.asFaults[i].sFaultDevVAddr.uiAddr == sFaultDevVAddr.uiAddr))
+			{
+				return &gsFaultInfoLog.asFaults[i];
+			}
+	}
+
+	return NULL;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	__AcquireNextFaultInfoElement
+
+ @Description
+
+ Gets a pointer to the next element in the fault info log
+ (requires the fault info lock be held)
+
+
+ @Return   FAULT_INFO* Pointer to the next record for writing
+
+******************************************************************************/
+
+static FAULT_INFO *_AcquireNextFaultInfoElement(void)
+{
+	IMG_UINT32 ui32Head = gsFaultInfoLog.ui32Head;
+	FAULT_INFO *psInfo = &gsFaultInfoLog.asFaults[ui32Head];
+
+	return psInfo;
+}
+
+static void _CommitFaultInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+							FAULT_INFO *psInfo,
+							RGXMEM_PROCESS_INFO *psProcessInfo,
+							IMG_DEV_VIRTADDR sFaultDevVAddr,
+							IMG_UINT64 ui64CRTimer,
+							MMU_FAULT_DATA *psMMUFaultData)
+{
+	IMG_UINT32 i, j;
+
+	/* commit the page fault details */
+
+	psInfo->sProcessInfo = *psProcessInfo;
+	psInfo->sFaultDevVAddr = sFaultDevVAddr;
+	psInfo->ui64CRTimer = ui64CRTimer;
+	psInfo->ui64When = OSClockns64();
+	if (psMMUFaultData != NULL)
+	{
+		OSDeviceMemCopy(&psInfo->sMMUFaultData, psMMUFaultData, sizeof(MMU_FAULT_DATA));
+	}
+
+	/* if the page fault was caused by the firmware then get information about
+	 * which client application created the related allocations.
+	 *
+	 * Fill in the process info data for each query result.
+	 */
+
+	if (psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE)
+	{
+		for (i = 0; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++)
+		{
+			for (j = 0; j < DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS; j++)
+			{
+				IMG_BOOL bFound;
+
+				RGXMEM_PROCESS_INFO *psProcInfo = &psInfo->asQueryOut[i].sResults[j].sProcessInfo;
+				bFound = RGXPCPIDToProcessInfo(psDevInfo,
+									psProcInfo->uiPID,
+									psProcInfo);
+				if (!bFound)
+				{
+					OSStringLCopy(psProcInfo->szProcessName,
+									"(unknown)",
+									sizeof(psProcInfo->szProcessName));
+				}
+			}
+		}
+	}
+
+	/* assert the faults circular buffer hasn't been moving and
+	 * move the head along
+	 */
+
+	PVR_ASSERT(psInfo == &gsFaultInfoLog.asFaults[gsFaultInfoLog.ui32Head]);
+
+	if (gsFaultInfoLog.ui32Head < RGXFWIF_HWINFO_MAX - 1)
+	{
+		gsFaultInfoLog.ui32Head++;
+	}
+	else
+	{
+		/* wrap back to the first of the 'LAST' entries */
+		gsFaultInfoLog.ui32Head = RGXFWIF_HWINFO_MAX_FIRST;
+	}
+
+	gsFaultInfoLog.ui32NumWrites++;
+
+
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_PrintFaultInfo
+
+ @Description
+
+ Print all the details of a page fault from a FAULT_INFO structure
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psInfo               - The page fault occurrence to print
+ @Input pui32Index           - (optional) index value to include in the print output
+
+ @Return   void
+
+******************************************************************************/
+static void _PrintFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile,
+					FAULT_INFO *psInfo,
+					const IMG_UINT32 *pui32Index)
+{
+	IMG_UINT32 i;
+	IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+	IMG_PID uiPID;
+
+	uiPID = (psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE || psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_PM) ?
+					0 : psInfo->sProcessInfo.uiPID;
+
+	ConvertOSTimestampToSAndNS(psInfo->ui64When, &ui64Seconds, &ui64Nanoseconds);
+
+	if (pui32Index)
+	{
+		PVR_DUMPDEBUG_LOG("(%u) Device memory history for page fault address" IMG_DEV_VIRTADDR_FMTSPEC
+							", CRTimer: 0x%016" IMG_UINT64_FMTSPECx
+							", PID: %u (%s, unregistered: %u) OS time: "
+							"%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC,
+					*pui32Index,
+					psInfo->sFaultDevVAddr.uiAddr,
+					psInfo->ui64CRTimer,
+					uiPID,
+					psInfo->sProcessInfo.szProcessName,
+					psInfo->sProcessInfo.bUnregistered,
+					ui64Seconds,
+					ui64Nanoseconds);
+	}
+	else
+	{
+		PVR_DUMPDEBUG_LOG("Device memory history for page fault address" IMG_DEV_VIRTADDR_FMTSPEC
+							", PID: %u "
+							"(%s, unregistered: %u) OS time: "
+							"%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC,
+					psInfo->sFaultDevVAddr.uiAddr,
+					uiPID,
+					psInfo->sProcessInfo.szProcessName,
+					psInfo->sProcessInfo.bUnregistered,
+					ui64Seconds,
+					ui64Nanoseconds);
+	}
+
+	if (psInfo->sProcessInfo.uiPID != RGXMEM_SERVER_PID_PM)
+	{
+		for (i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++)
+		{
+			const IMG_CHAR *pszWhich;
+
+			switch (i)
+			{
+				case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING:
+					pszWhich = "Preceding page";
+					break;
+				case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED:
+					pszWhich = "Faulted page";
+					break;
+				case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT:
+					pszWhich = "Next page";
+					break;
+			}
+
+			PVR_DUMPDEBUG_LOG("%s:", pszWhich);
+			_PrintDevicememHistoryQueryOut(pfnDumpDebugPrintf, pvDumpDebugFile,
+								&psInfo->sProcessInfo,
+								&psInfo->asQueryOut[i]);
+		}
+	}
+}
+
+static void _RecordFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile,
+					PVRSRV_RGXDEV_INFO *psDevInfo,
+					IMG_DEV_VIRTADDR sFaultDevVAddr,
+					IMG_DEV_PHYADDR sPCDevPAddr,
+					IMG_UINT64 ui64CRTimer,
+					IMG_UINT32 ui32PageSizeBytes,
+					const IMG_CHAR  *pszIndent,
+					MMU_FAULT_DATA *psMMUFaultData)
+{
+	IMG_BOOL bFound = IMG_FALSE, bIsPMFault = IMG_FALSE;
+	RGXMEM_PROCESS_INFO sProcessInfo;
+	FAULT_INFO *psInfo;
+
+	 /* look to see if we have already processed this fault.
+	  * if so then use the previously acquired information.
+	  */
+	OSLockAcquire(psDevInfo->hDebugFaultInfoLock);
+	psInfo = _QueryFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, sFaultDevVAddr, ui64CRTimer);
+
+	if (psInfo == NULL)
+	{
+		if (sPCDevPAddr.uiAddr != RGXFWIF_INVALID_PC_PHYADDR)
+		{
+			/* Check if this is PM fault */
+			if (psMMUFaultData != NULL && psMMUFaultData->eType == MMU_FAULT_TYPE_PM)
+			{
+				bIsPMFault = IMG_TRUE;
+				bFound = IMG_TRUE;
+			}
+			else
+			{
+				/* look up the process details for the faulting page catalogue */
+				bFound = RGXPCAddrToProcessInfo(psDevInfo, sPCDevPAddr, &sProcessInfo);
+			}
+
+			if (bFound)
+			{
+				IMG_BOOL bHits;
+
+				psInfo = _AcquireNextFaultInfoElement();
+
+				if (bIsPMFault)
+				{
+					sProcessInfo.uiPID = RGXMEM_SERVER_PID_PM;
+					OSStringNCopy(sProcessInfo.szProcessName, "PM", sizeof(sProcessInfo.szProcessName));
+					sProcessInfo.szProcessName[sizeof(sProcessInfo.szProcessName) - 1] = '\0';
+					sProcessInfo.bUnregistered = IMG_FALSE;
+					bHits = IMG_TRUE;
+				}
+				else
+				{
+					/* get any DevicememHistory data for the faulting address */
+					bHits = _GetDevicememHistoryData(sProcessInfo.uiPID,
+									 sFaultDevVAddr,
+									 psInfo->asQueryOut,
+									 ui32PageSizeBytes);
+				}
+
+				if (bHits)
+				{
+					_CommitFaultInfo(psDevInfo,
+								psInfo,
+								&sProcessInfo,
+								sFaultDevVAddr,
+								ui64CRTimer,
+								psMMUFaultData);
+				}
+				else
+				{
+					/* no hits, so no data to present */
+					PVR_DUMPDEBUG_LOG("%sNo matching Devmem History for fault address", pszIndent);
+					psInfo = NULL;
+				}
+			}
+			else
+			{
+				PVR_DUMPDEBUG_LOG("%sCould not find PID for PC 0x%016" IMG_UINT64_FMTSPECx, pszIndent, sPCDevPAddr.uiAddr);
+			}
+		}
+		else
+		{
+			PVR_DUMPDEBUG_LOG("%sPage fault not applicable to Devmem History", pszIndent);
+		}
+	}
+
+	if (psInfo != NULL)
+	{
+		_PrintFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psInfo, NULL);
+	}
+
+	OSLockRelease(psDevInfo->hDebugFaultInfoLock);
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_DumpFWHWRHostView
+
+ @Description
+
+ Dump FW HWR fault status in human readable form.
+
+ @Input ui32Index            - Index of global Fault info
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Return   void
+
+******************************************************************************/
+static void _DumpFWHWRHostView(MMU_FAULT_DATA *psFaultData,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile,
+					MMU_FAULT_DATA *psOutFaultData)
+{
+	MMU_LEVEL eTopLevel;
+	const IMG_CHAR szPageLevel[][4] = {"", "PTE", "PDE", "PCE" };
+	const IMG_CHAR szPageError[][3] = {"", "PT",  "PD",  "PC"  };
+
+	eTopLevel = psFaultData->eTopLevel;
+
+	if (psFaultData->eType == MMU_FAULT_TYPE_UNKNOWN)
+	{
+		return;
+	}
+	else if (psFaultData->eType == MMU_FAULT_TYPE_PM)
+	{
+		PVR_DUMPDEBUG_LOG("PM faulted at PC address = 0x%016" IMG_UINT64_FMTSPECx, psFaultData->sLevelData[MMU_LEVEL_0].ui64Address);
+	}
+	else
+	{
+		MMU_LEVEL eCurrLevel;
+		PVR_ASSERT(eTopLevel < MMU_LEVEL_LAST);
+
+		for (eCurrLevel = MMU_LEVEL_0; eCurrLevel <= eTopLevel; eCurrLevel++)
+ 		{
+			MMU_LEVEL eLevel = eTopLevel - eCurrLevel;
+			MMU_LEVEL_DATA *psMMULevelData = &psFaultData->sLevelData[eLevel];
+			if (psMMULevelData->ui64Address)
+			{
+				if (psMMULevelData->uiBytesPerEntry == 4)
+				{
+					PVR_DUMPDEBUG_LOG("%s for index %d = 0x%08x and is %s",
+								szPageLevel[eLevel],
+								psMMULevelData->ui32Index,
+								(IMG_UINT) psMMULevelData->ui64Address,
+								psMMULevelData->psDebugStr);
+				}
+				else
+				{
+					PVR_DUMPDEBUG_LOG("%s for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s",
+								szPageLevel[eLevel],
+								psMMULevelData->ui32Index,
+								psMMULevelData->ui64Address,
+								psMMULevelData->psDebugStr);
+				}
+			}
+			else
+			{
+				PVR_DUMPDEBUG_LOG("%s index (%d) out of bounds (%d)",
+							szPageError[eLevel],
+							psMMULevelData->ui32Index,
+							psMMULevelData->ui32NumOfEntries);
+				break;
+			}
+		}
+	}
+
+	if (psOutFaultData)
+	{
+		OSDeviceMemCopy(psOutFaultData, psFaultData, sizeof(MMU_FAULT_DATA));
+	}
+}
+
+static inline void _UpdateFaultInfo(MMU_FAULT_DATA *psDestData, MMU_FAULT_DATA *psSrcData)
+{
+	OSDeviceMemCopy(psDestData, psSrcData, sizeof(MMU_FAULT_DATA));
+
+	/* Update count for next entry */
+	if (gui32FaultIndex < RGXFWIF_HWINFO_MAX - 1)
+	{
+		gui32FaultIndex++;
+	}
+	else
+	{
+		gui32FaultIndex = RGXFWIF_HWINFO_MAX_FIRST;
+	}
+}
+
+static void _HostFaultAnalysis(PVRSRV_RGXDEV_INFO *psDevInfo,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile,
+				IMG_UINT64 ui64MMUStatus,
+				IMG_BOOL bPMFault,
+				IMG_DEV_PHYADDR *psPCDevPAddr,
+				IMG_DEV_VIRTADDR *psFaultAddr,
+				IMG_UINT64 *pui64CRTimer,
+				MMU_FAULT_DATA *psFaultData)
+{
+	IMG_UINT32 ui32Index = RGXFWIF_HWINFO_MAX;
+	IMG_UINT32 ui32LatestHWRNumber = 0;
+	IMG_UINT64 ui64LatestMMUStatus = 0;
+	IMG_UINT64 ui64LatestPCAddress = RGXFWIF_INVALID_PC_PHYADDR;
+	const IMG_CHAR  *pszIndent = "    ";
+
+	/*
+	 * Few cat bases are memory contexts used for PM or firmware.
+	 * The rest are application contexts.
+	 *
+	 * It is not possible for the host to obtain the cat base address
+	 * while the FW is running (since the cat bases are indirectly
+	 * accessed), but in the case of the 'live' PC we can see if the
+	 * FW has already logged it in the HWR log.
+	 */
+
+	for (ui32Index = 0; ui32Index < RGXFWIF_HWINFO_MAX; ui32Index++)
+	{
+		RGX_HWRINFO *psHWRInfo = &psDevInfo->psRGXFWIfHWRInfoBuf->sHWRInfo[ui32Index];
+
+		if (psHWRInfo->ui32HWRNumber > ui32LatestHWRNumber && psHWRInfo->eHWRType == RGX_HWRTYPE_MMUFAULT)
+		{
+			ui32LatestHWRNumber = psHWRInfo->ui32HWRNumber;
+			ui64LatestMMUStatus = psHWRInfo->uHWRData.sMMUInfo.ui64MMUStatus;
+			ui64LatestPCAddress = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress;
+			*pui64CRTimer       = psHWRInfo->ui64CRTimer;
+		}
+	}
+
+	if (ui64LatestMMUStatus == ui64MMUStatus && ui64LatestPCAddress != RGXFWIF_INVALID_PC_PHYADDR)
+	{
+		psPCDevPAddr->uiAddr = ui64LatestPCAddress;
+		PVR_DUMPDEBUG_LOG("%sLocated PC address: 0x%016" IMG_UINT64_FMTSPECx, pszIndent, psPCDevPAddr->uiAddr);
+	}
+	else
+	{
+		psPCDevPAddr->uiAddr = RGXFWIF_INVALID_PC_PHYADDR;
+	}
+
+	if (psPCDevPAddr->uiAddr != RGXFWIF_INVALID_PC_PHYADDR)
+	{
+		if (!bPMFault)
+		{
+			PVR_DUMPDEBUG_LOG("%sChecking faulting address " IMG_DEV_VIRTADDR_FMTSPEC, pszIndent, psFaultAddr->uiAddr);
+			RGXCheckFaultAddress(psDevInfo, psFaultAddr, psPCDevPAddr, pfnDumpDebugPrintf, pvDumpDebugFile, psFaultData);
+		}
+		else
+		{
+			/* PM fault and we dump PC details only */
+			psFaultData->eTopLevel = MMU_LEVEL_0;
+			psFaultData->eType     = MMU_FAULT_TYPE_PM;
+			psFaultData->sLevelData[MMU_LEVEL_0].ui64Address = psPCDevPAddr->uiAddr;
+		}
+
+		if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) == 0)
+		{
+			_UpdateFaultInfo(&gsMMUFaultData[gui32FaultIndex], psFaultData);
+		}
+	}
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDumpRGXBIFBank
+
+ @Description
+
+ Dump BIF Bank state in human readable form.
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psDevInfo            - RGX device info
+ @Input eBankID              - BIF identifier
+ @Input ui64MMUStatus        - MMU Status register value
+ @Input ui64ReqStatus        - BIF request Status register value
+ @Input ui32HWRIndex         - Index of FW HWR info if function is called
+                                as a part of the debug dump summary else
+                                RGXFWIF_HWINFO_MAX
+ @Return   void
+
+******************************************************************************/
+static void _RGXDumpRGXBIFBank(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile,
+					PVRSRV_RGXDEV_INFO *psDevInfo,
+					RGXDBG_BIF_ID eBankID,
+					IMG_UINT64 ui64MMUStatus,
+					IMG_UINT64 ui64ReqStatus,
+					IMG_UINT32 ui32HWRIndex)
+{
+	IMG_BOOL bExistingHWR = ui32HWRIndex < RGXFWIF_HWINFO_MAX;
+	IMG_CHAR *pszIndent = (bExistingHWR ? "" : "    ");
+
+	if (ui64MMUStatus == 0x0)
+	{
+		PVR_DUMPDEBUG_LOG("%s - OK", pszBIFNames[eBankID]);
+	}
+	else
+	{
+		IMG_DEV_VIRTADDR sFaultDevVAddr;
+		IMG_DEV_PHYADDR sPCDevPAddr = { 0 };
+		IMG_UINT32 ui32PageSize;
+		IMG_UINT64 ui64CRTimer = 0;
+		IMG_UINT32 ui32PC =
+			(ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >>
+				RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT;
+		MMU_FAULT_DATA sFaultData = { 0 };
+
+		/* Bank 0 & 1 share the same fields */
+		PVR_DUMPDEBUG_LOG("%s%s - FAULT:",
+						  pszIndent,
+						  pszBIFNames[eBankID]);
+
+		/* MMU Status */
+		{
+			IMG_UINT32 ui32MMUDataType =
+				(ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK) >>
+					RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT;
+
+			IMG_BOOL bROFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN) != 0;
+			IMG_BOOL bProtFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN) != 0;
+
+			ui32PageSize = (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >>
+						RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT;
+
+			PVR_DUMPDEBUG_LOG("%s  * MMU status (0x%016" IMG_UINT64_FMTSPECx "): PC = %d%s, Page Size = %d, MMU data type = %d%s%s.",
+						pszIndent,
+						ui64MMUStatus,
+						ui32PC,
+						(ui32PC < 0x8)?"":_RGXDecodePMPC(ui32PC),
+						ui32PageSize,
+						ui32MMUDataType,
+						(bROFault)?", Read Only fault":"",
+						(bProtFault)?", PM/META protection fault":"");
+		}
+
+		/* Req Status */
+		{
+			IMG_CHAR *pszTagID;
+			IMG_CHAR *pszTagSB;
+			IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE];
+			IMG_BOOL bRead;
+			IMG_UINT32 ui32TagSB, ui32TagID;
+			IMG_UINT64 ui64Addr;
+
+			if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY))
+			{
+				bRead = (ui64ReqStatus & RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN) != 0;
+				ui32TagSB = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK) >>
+					RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT;
+				ui32TagID = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK) >>
+					RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT;
+			}
+			else
+			{
+				bRead = (ui64ReqStatus & RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN) != 0;
+				ui32TagSB = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK) >>
+					RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT;
+				ui32TagID = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK) >>
+					RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT;
+			}
+			ui64Addr = ((ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK) >>
+				RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT) <<
+				RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT;
+
+			_RGXDecodeBIFReqTags(psDevInfo, eBankID, ui32TagID, ui32TagSB, &pszTagID, &pszTagSB, &aszScratch[0], RGX_DEBUG_STR_SIZE);
+
+			PVR_DUMPDEBUG_LOG("%s  * Request (0x%016" IMG_UINT64_FMTSPECx
+						"): %s (%s), %s " IMG_DEV_VIRTADDR_FMTSPEC ".",
+							  pszIndent,
+							  ui64ReqStatus,
+			                  pszTagID,
+			                  pszTagSB,
+			                  (bRead)?"Reading from":"Writing to",
+			                  ui64Addr);
+		}
+
+		/* Check if the host thinks this fault is valid */
+
+		sFaultDevVAddr.uiAddr = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK);
+
+		if (bExistingHWR)
+		{
+			/* Called from debug dump summary */
+			sPCDevPAddr.uiAddr = psDevInfo->psRGXFWIfHWRInfoBuf->sHWRInfo[ui32HWRIndex].uHWRData.sBIFInfo.ui64PCAddress;
+			ui64CRTimer = psDevInfo->psRGXFWIfHWRInfoBuf->sHWRInfo[ui32HWRIndex].ui64CRTimer;
+
+			PVR_DUMPDEBUG_LOG("%sFW logged fault using PC Address: 0x%016" IMG_UINT64_FMTSPECx, pszIndent, sPCDevPAddr.uiAddr);
+			if (psDevInfo->psRGXFWIfHWRInfoBuf->sHWRInfo[ui32HWRIndex].ui32HWRNumber < psDevInfo->psRGXFWIfHWRInfoBuf->ui32DDReqCount)
+			{
+				/* check if fault is already analysed from host */
+				if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+				{
+					_DumpFWHWRHostView(&(gsFaultInfoLog.asFaults[ui32HWRIndex].sMMUFaultData),
+								pfnDumpDebugPrintf, pvDumpDebugFile, &sFaultData);
+				}
+				else
+				{
+					_DumpFWHWRHostView(&gsMMUFaultData[ui32HWRIndex],
+								pfnDumpDebugPrintf, pvDumpDebugFile, &sFaultData);
+				}
+			}
+		}
+		else
+		{
+			/* Only the first 8 cat bases are application memory contexts which we can validate... */
+			IMG_BOOL bPMFault = (ui32PC >= 8);
+			_HostFaultAnalysis(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile,
+						ui64MMUStatus, bPMFault, &sPCDevPAddr, &sFaultDevVAddr,
+						&ui64CRTimer, &sFaultData);
+		}
+
+		if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+		{
+			_RecordFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo,
+						sFaultDevVAddr, sPCDevPAddr, ui64CRTimer,
+						_PageSizeHWToBytes(ui32PageSize), pszIndent,
+						&sFaultData);
+		}
+	}
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDumpRGXMMUFaultStatus
+
+ @Description
+
+ Dump MMU Fault status in human readable form.
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psDevInfo            - RGX device info
+ @Input ui64MMUStatus        - MMU Status register value
+ @Input ui32HWRIndex         - Index of FW HWR info if function is called
+                                as a part of the debug dump summary else
+                                RGXFWIF_HWINFO_MAX
+ @Input pszMetaOrCore        - string representing call is for META or MMU core
+ @Return   void
+
+******************************************************************************/
+static void _RGXDumpRGXMMUFaultStatus(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile,
+					PVRSRV_RGXDEV_INFO *psDevInfo,
+					IMG_UINT64 ui64MMUStatus,
+					IMG_UINT32 ui32HWRIndex,
+					const IMG_PCHAR pszMetaOrCore)
+{
+	IMG_BOOL bExistingHWR = ui32HWRIndex < RGXFWIF_HWINFO_MAX;
+	IMG_CHAR *pszIndent   = (!bExistingHWR ? "" : "    ");
+
+	if (ui64MMUStatus == 0x0)
+	{
+		PVR_DUMPDEBUG_LOG("%sMMU (%s) - OK", pszIndent, pszMetaOrCore);
+	}
+	else
+	{
+		IMG_UINT32 ui32PC        = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT;
+		IMG_UINT64 ui64Addr      = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT) <<  4; /* align shift */
+		IMG_UINT32 ui32Requester = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT;
+		IMG_UINT32 ui32SideBand  = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT;
+		IMG_UINT32 ui32MMULevel  = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT;
+		IMG_BOOL bRead           = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_RNW_EN) != 0;
+		IMG_BOOL bFault          = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_FAULT_EN) != 0;
+		IMG_BOOL bROFault        = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >>
+		                            RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x2;
+		IMG_BOOL bProtFault      = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >>
+		                            RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x3;
+		IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE];
+		IMG_CHAR *pszTagID;
+		IMG_CHAR *pszTagSB;
+		IMG_UINT64 ui64CRTimer = 0;
+		IMG_DEV_VIRTADDR sFaultDevVAddr;
+		IMG_DEV_PHYADDR sPCDevPAddr = { 0 };
+		MMU_FAULT_DATA sFaultData;
+		memset(&sFaultData, 0, sizeof(MMU_FAULT_DATA));
+
+		_RGXDecodeMMUReqTags(psDevInfo, ui32Requester, ui32SideBand, bRead, &pszTagID, &pszTagSB, aszScratch, RGX_DEBUG_STR_SIZE);
+
+		PVR_DUMPDEBUG_LOG("%sMMU (%s) - FAULT:", pszIndent, pszMetaOrCore);
+		PVR_DUMPDEBUG_LOG("%s  * MMU status (0x%016" IMG_UINT64_FMTSPECx "): PC = %d, %s 0x%010" IMG_UINT64_FMTSPECx ", %s (%s)%s%s%s%s.",
+						  pszIndent,
+						  ui64MMUStatus,
+						  ui32PC,
+						  (bRead)?"Reading from":"Writing to",
+						  ui64Addr,
+						  pszTagID,
+						  pszTagSB,
+						  (bFault)?", Fault":"",
+						  (bROFault)?", Read Only fault":"",
+						  (bProtFault)?", PM/META protection fault":"",
+						  _RGXDecodeMMULevel(ui32MMULevel));
+
+		/* Check if the host thinks this fault is valid */
+		sFaultDevVAddr.uiAddr = ui64Addr;
+
+		if (bExistingHWR)
+		{
+			/* Called from debug dump summary */
+			sPCDevPAddr.uiAddr = psDevInfo->psRGXFWIfHWRInfoBuf->sHWRInfo[ui32HWRIndex].uHWRData.sMMUInfo.ui64PCAddress;
+			ui64CRTimer = psDevInfo->psRGXFWIfHWRInfoBuf->sHWRInfo[ui32HWRIndex].ui64CRTimer;
+
+			PVR_DUMPDEBUG_LOG("%sFW logged fault using PC Address: 0x%016" IMG_UINT64_FMTSPECx, pszIndent, sPCDevPAddr.uiAddr);
+			if (psDevInfo->psRGXFWIfHWRInfoBuf->sHWRInfo[ui32HWRIndex].ui32HWRNumber < psDevInfo->psRGXFWIfHWRInfoBuf->ui32DDReqCount)
+			{
+				/* check if Fault is already analysed from host */
+				if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+				{
+					_DumpFWHWRHostView(&(gsFaultInfoLog.asFaults[ui32HWRIndex].sMMUFaultData),
+								pfnDumpDebugPrintf, pvDumpDebugFile, &sFaultData);
+				}
+				else
+				{
+					_DumpFWHWRHostView(&gsMMUFaultData[ui32HWRIndex],
+								pfnDumpDebugPrintf, pvDumpDebugFile, &sFaultData);
+				}
+			}
+		}
+		else
+		{
+			IMG_BOOL bPMFault;
+#if defined(SUPPORT_TRUSTED_DEVICE)
+			ui32PC = ui32PC - 1;
+#endif
+			bPMFault = (ui32PC <= 8);
+			_HostFaultAnalysis(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile,
+						ui64MMUStatus, bPMFault, &sPCDevPAddr, &sFaultDevVAddr,
+						&ui64CRTimer, &sFaultData);
+		}
+		if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+		{
+			_RecordFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo,
+						sFaultDevVAddr, sPCDevPAddr, ui64CRTimer,
+						_PageSizeHWToBytes(0), pszIndent, &sFaultData);
+		}
+	}
+}
+static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_RNW_EN == RGX_CR_MMU_FAULT_STATUS_META_RNW_EN),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_FAULT_EN == RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+
+
+
+#if !defined(NO_HARDWARE)
+static PVRSRV_ERROR _RGXMipsExtraDebug(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_MIPS_STATE *psMIPSState)
+{
+	void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
+	IMG_UINT32 ui32RegRead;
+	IMG_UINT32 eError = PVRSRV_OK;
+	IMG_UINT32 *pui32NMIMemoryPointer;
+	IMG_UINT32 volatile *pui32SyncFlag;
+	IMG_DEVMEM_OFFSET_T uiNMIMemoryBootOffset;
+
+	/* Map the FW data area to the kernel */
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc,
+									 (void **)&pui32NMIMemoryPointer);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to acquire NMI shared memory area (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto map_error_fail;
+	}
+
+	/* Calculate offset to the boot/NMI data page */
+	uiNMIMemoryBootOffset = RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA));
+
+	/* Jump to the NMI shared data area within the page above */
+	pui32NMIMemoryPointer += uiNMIMemoryBootOffset + RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXMIPSFW_NMI_SHARED_DATA_BASE);
+
+	/* Acquire the NMI operations lock */
+	OSLockAcquire(psDevInfo->hNMILock);
+
+	/* Make sure the synchronisation flag is set to 0 */
+	pui32SyncFlag = &pui32NMIMemoryPointer[RGXMIPSFW_NMI_SYNC_FLAG_OFFSET];
+	*pui32SyncFlag = 0;
+	OSWriteMemoryBarrier();
+	(void) *pui32SyncFlag;
+
+	/* Enable NMI issuing in the MIPS wrapper */
+	OSWriteHWReg64(pvRegsBaseKM,
+				   RGX_CR_MIPS_WRAPPER_NMI_ENABLE,
+				   RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN);
+
+	/* Check the MIPS is not in error state already (e.g. it is booting or an NMI has already been requested) */
+	ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+				   RGX_CR_MIPS_EXCEPTION_STATUS);
+	if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN) || (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN))
+	{
+
+		eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE;
+		goto fail;
+	}
+	ui32RegRead = 0;
+
+	/* Issue NMI */
+	OSWriteHWReg32(pvRegsBaseKM,
+				   RGX_CR_MIPS_WRAPPER_NMI_EVENT,
+				   RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN);
+
+
+	/* Wait for NMI Taken to be asserted */
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+									RGX_CR_MIPS_EXCEPTION_STATUS);
+		if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN) == 0)
+	{
+			eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE;
+			goto fail;
+	}
+	ui32RegRead = 0;
+
+	/* Allow the firmware to proceed */
+	*pui32SyncFlag = 1;
+	OSWriteMemoryBarrier();
+	(void) *pui32SyncFlag;
+
+	/* Wait for the FW to have finished the NMI routine */
+	ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+								RGX_CR_MIPS_EXCEPTION_STATUS);
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+									RGX_CR_MIPS_EXCEPTION_STATUS);
+		if (!(ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN))
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+	if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN)
+	{
+			eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE;
+			goto fail;
+	}
+	ui32RegRead = 0;
+
+	/* Copy state */
+	OSDeviceMemCopy(psMIPSState, pui32NMIMemoryPointer + RGXMIPSFW_NMI_STATE_OFFSET, sizeof(*psMIPSState));
+
+	--(psMIPSState->ui32ErrorEPC);
+	--(psMIPSState->ui32EPC);
+
+	/* Disable NMI issuing in the MIPS wrapper */
+	OSWriteHWReg32(pvRegsBaseKM,
+				   RGX_CR_MIPS_WRAPPER_NMI_ENABLE,
+				   0);
+
+fail:
+	/* Release the NMI operations lock */
+	OSLockRelease(psDevInfo->hNMILock);
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+map_error_fail:
+	return eError;
+}
+
+/* Print decoded information from cause register */
+static void _RGXMipsDumpCauseDecode(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                    void *pvDumpDebugFile,
+                                    IMG_UINT32 ui32Cause,
+                                    IMG_UINT32 ui32ErrorState)
+{
+#define INDENT "    "
+	const IMG_UINT32 ui32ExcCode = RGXMIPSFW_C0_CAUSE_EXCCODE(ui32Cause);
+	const IMG_CHAR * const pszException = _GetMIPSExcString(ui32ExcCode);
+
+	if (ui32ErrorState == RGXMIPSFW_NMI_ERROR_STATE_SET &&
+	    pszException != NULL)
+	{
+		PVR_DUMPDEBUG_LOG(INDENT "Cause exception: %s", pszException);
+	}
+
+	if (ui32Cause & RGXMIPSFW_C0_CAUSE_FDCIPENDING)
+	{
+		PVR_DUMPDEBUG_LOG(INDENT "FDC interrupt pending");
+	}
+
+	if (!(ui32Cause & RGXMIPSFW_C0_CAUSE_IV))
+	{
+		PVR_DUMPDEBUG_LOG(INDENT "Interrupt uses general interrupt vector");
+	}
+
+	if (ui32Cause & RGXMIPSFW_C0_CAUSE_PCIPENDING)
+	{
+		PVR_DUMPDEBUG_LOG(INDENT "Performance Counter Interrupt pending");
+	}
+
+	/* Unusable Coproc exception */
+	if (ui32ExcCode == 11)
+	{
+		PVR_DUMPDEBUG_LOG(INDENT "Unusable Coprocessor: %d", RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(ui32Cause));
+	}
+
+#undef INDENT
+}
+
+static IMG_BOOL _IsFWCodeException(IMG_UINT32 ui32ExcCode)
+{
+	if (ui32ExcCode >= sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING))
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+		         "Only %lu exceptions available in MIPS, %u is not a valid exception code",
+		         (unsigned long)sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING), ui32ExcCode));
+		return IMG_FALSE;
+	}
+
+	return apsMIPSExcCodes[ui32ExcCode].bIsFatal;
+}
+
+static void _RGXMipsDumpDebugDecode(PVRSRV_RGXDEV_INFO *psDevInfo,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile,
+					IMG_UINT32 ui32Debug,
+					IMG_UINT32 ui32DEPC)
+{
+	const IMG_CHAR *pszDException = NULL;
+	IMG_UINT32 i;
+#define INDENT "    "
+
+	if (!(ui32Debug & RGXMIPSFW_C0_DEBUG_DM))
+	{
+		return;
+	}
+
+	PVR_DUMPDEBUG_LOG("DEBUG                        :");
+
+	pszDException = _GetMIPSExcString(RGXMIPSFW_C0_DEBUG_EXCCODE(ui32Debug));
+
+	if (pszDException != NULL)
+	{
+		PVR_DUMPDEBUG_LOG(INDENT "Debug exception: %s", pszDException);
+	}
+
+	/* Check FW code corruption in case of known errors */
+	if (_IsFWCodeException(RGXMIPSFW_C0_DEBUG_EXCCODE(ui32Debug)))
+	{
+		PVRSRV_ERROR eError;
+		eError = _ValidateFWImageForMIPS(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, INDENT);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DUMPDEBUG_LOG(INDENT "Failed to validate any FW code corruption");
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(sMIPS_C0_DebugTable); ++i)
+	{
+		const RGXMIPSFW_C0_DEBUG_TBL_ENTRY * const psDebugEntry = &sMIPS_C0_DebugTable[i];
+
+		if (ui32Debug & psDebugEntry->ui32Mask)
+		{
+			PVR_DUMPDEBUG_LOG(INDENT "%s", psDebugEntry->pszExplanation);
+		}
+	}
+#undef INDENT
+	PVR_DUMPDEBUG_LOG("DEPC                    :0x%08X", ui32DEPC);
+}
+
+static inline void _GetMipsTLBPARanges(const RGX_MIPS_TLB_ENTRY *psTLBEntry,
+                                       const RGX_MIPS_REMAP_ENTRY *psRemapEntry0,
+                                       const RGX_MIPS_REMAP_ENTRY *psRemapEntry1,
+                                       IMG_UINT64 *pui64PA0Start,
+                                       IMG_UINT64 *pui64PA0End,
+                                       IMG_UINT64 *pui64PA1Start,
+                                       IMG_UINT64 *pui64PA1End)
+{
+	IMG_BOOL bUseRemapOutput = (psRemapEntry0 != NULL && psRemapEntry1 != NULL) ? IMG_TRUE : IMG_FALSE;
+	IMG_UINT64 ui64PageSize = RGXMIPSFW_TLB_GET_PAGE_SIZE(psTLBEntry->ui32TLBPageMask);
+
+	if ((psTLBEntry->ui32TLBLo0 & RGXMIPSFW_TLB_VALID) == 0)
+	{
+		/* Dummy values to fail the range checks later */
+		*pui64PA0Start = -1ULL;
+		*pui64PA0End   = -1ULL;
+	}
+	else if (bUseRemapOutput)
+	{
+		*pui64PA0Start = (IMG_UINT64)psRemapEntry0->ui32RemapAddrOut << 12;
+		*pui64PA0End   = *pui64PA0Start + ui64PageSize - 1;
+	}
+	else
+	{
+		*pui64PA0Start = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo0);
+		*pui64PA0End   = *pui64PA0Start + ui64PageSize - 1;
+	}
+
+	if ((psTLBEntry->ui32TLBLo1 & RGXMIPSFW_TLB_VALID) == 0)
+	{
+		/* Dummy values to fail the range checks later */
+		*pui64PA1Start = -1ULL;
+		*pui64PA1End   = -1ULL;
+	}
+	else if (bUseRemapOutput)
+	{
+		*pui64PA1Start = (IMG_UINT64)psRemapEntry1->ui32RemapAddrOut << 12;
+		*pui64PA1End   = *pui64PA1Start + ui64PageSize - 1;
+	}
+	else
+	{
+		*pui64PA1Start = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo1);
+		*pui64PA1End   = *pui64PA1Start + ui64PageSize - 1;
+	}
+}
+
+static void _CheckMipsTLBDuplicatePAs(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                      void *pvDumpDebugFile,
+                                      const RGX_MIPS_TLB_ENTRY *psTLB,
+                                      const RGX_MIPS_REMAP_ENTRY *psRemap)
+{
+	IMG_UINT64 ui64PA0StartI, ui64PA1StartI, ui64PA0StartJ, ui64PA1StartJ;
+	IMG_UINT64 ui64PA0EndI,   ui64PA1EndI,   ui64PA0EndJ,   ui64PA1EndJ;
+	IMG_UINT32 i, j;
+
+#define RANGES_OVERLAP(start0,end0,start1,end1)  ((start0) < (end1) && (start1) < (end0))
+
+	for (i = 0; i < RGXMIPSFW_NUMBER_OF_TLB_ENTRIES; i++)
+	{
+		_GetMipsTLBPARanges(&psTLB[i],
+		                    psRemap ? &psRemap[i] : NULL,
+		                    psRemap ? &psRemap[i + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL,
+		                    &ui64PA0StartI, &ui64PA0EndI,
+		                    &ui64PA1StartI, &ui64PA1EndI);
+
+		for (j = i + 1; j < RGXMIPSFW_NUMBER_OF_TLB_ENTRIES; j++)
+		{
+			_GetMipsTLBPARanges(&psTLB[j],
+			                    psRemap ? &psRemap[j] : NULL,
+			                    psRemap ? &psRemap[j + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL,
+			                    &ui64PA0StartJ, &ui64PA0EndJ,
+			                    &ui64PA1StartJ, &ui64PA1EndJ);
+
+			if (RANGES_OVERLAP(ui64PA0StartI, ui64PA0EndI, ui64PA0StartJ, ui64PA0EndJ) ||
+			    RANGES_OVERLAP(ui64PA0StartI, ui64PA0EndI, ui64PA1StartJ, ui64PA1EndJ) ||
+			    RANGES_OVERLAP(ui64PA1StartI, ui64PA1EndI, ui64PA0StartJ, ui64PA0EndJ) ||
+			    RANGES_OVERLAP(ui64PA1StartI, ui64PA1EndI, ui64PA1StartJ, ui64PA1EndJ)  )
+			{
+				PVR_DUMPDEBUG_LOG("Overlap between TLB entry %u and %u", i , j);
+			}
+		}
+	}
+}
+
+static inline void _RGXMipsDumpTLBEntry(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                        void *pvDumpDebugFile,
+                                        const RGX_MIPS_TLB_ENTRY *psTLBEntry,
+                                        const RGX_MIPS_REMAP_ENTRY *psRemapEntry0,
+                                        const RGX_MIPS_REMAP_ENTRY *psRemapEntry1,
+                                        IMG_UINT32 ui32Index)
+{
+	IMG_BOOL bDumpRemapEntries = (psRemapEntry0 != NULL && psRemapEntry1 != NULL) ? IMG_TRUE : IMG_FALSE;
+	IMG_UINT64 ui64PA0 = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo0);
+	IMG_UINT64 ui64PA1 = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo1);
+	IMG_UINT64 ui64Remap0AddrOut = 0, ui64Remap1AddrOut = 0;
+	IMG_UINT32 ui32Remap0AddrIn = 0, ui32Remap1AddrIn = 0;
+
+	static const IMG_CHAR * const apszPermissionInhibit[4] =
+	{
+		"",
+		"XI",
+		"RI",
+		"RIXI"
+	};
+
+	static const IMG_CHAR * const apszCoherencyTLB[8] =
+	{
+		"C",
+		"C",
+		" ",
+		"C",
+		"C",
+		"C",
+		"C",
+		" "
+	};
+
+	static const IMG_CHAR * const apszDirtyGlobalValid[8] =
+	{
+		"   ",
+		"  G",
+		" V ",
+		" VG",
+		"D  ",
+		"D G",
+		"DV ",
+		"DVG"
+	};
+
+	if (bDumpRemapEntries)
+	{
+		/* RemapAddrIn is always 4k aligned and on 32 bit */
+		ui32Remap0AddrIn = psRemapEntry0->ui32RemapAddrIn << 12;
+		ui32Remap1AddrIn = psRemapEntry1->ui32RemapAddrIn << 12;
+
+		/* RemapAddrOut is always 4k aligned and on 32 or 36 bit */
+		ui64Remap0AddrOut = (IMG_UINT64)psRemapEntry0->ui32RemapAddrOut << 12;
+		ui64Remap1AddrOut = (IMG_UINT64)psRemapEntry1->ui32RemapAddrOut << 12;
+
+		/* If TLB and remap entries match, then merge them else, print them separately */
+		if ((IMG_UINT32)ui64PA0 == ui32Remap0AddrIn &&
+		    (IMG_UINT32)ui64PA1 == ui32Remap1AddrIn)
+		{
+			ui64PA0 = ui64Remap0AddrOut;
+			ui64PA1 = ui64Remap1AddrOut;
+			bDumpRemapEntries = IMG_FALSE;
+		}
+	}
+
+	PVR_DUMPDEBUG_LOG("%2u) VA 0x%08X (%3uk) -> PA0 0x%08" IMG_UINT64_FMTSPECx " %s%s%s, "
+	                                           "PA1 0x%08" IMG_UINT64_FMTSPECx " %s%s%s",
+	                  ui32Index,
+	                  psTLBEntry->ui32TLBHi,
+	                  RGXMIPSFW_TLB_GET_PAGE_SIZE(psTLBEntry->ui32TLBPageMask),
+	                  ui64PA0,
+	                  apszPermissionInhibit[RGXMIPSFW_TLB_GET_INHIBIT(psTLBEntry->ui32TLBLo0)],
+	                  apszDirtyGlobalValid[RGXMIPSFW_TLB_GET_DGV(psTLBEntry->ui32TLBLo0)],
+	                  apszCoherencyTLB[RGXMIPSFW_TLB_GET_COHERENCY(psTLBEntry->ui32TLBLo0)],
+	                  ui64PA1,
+	                  apszPermissionInhibit[RGXMIPSFW_TLB_GET_INHIBIT(psTLBEntry->ui32TLBLo1)],
+	                  apszDirtyGlobalValid[RGXMIPSFW_TLB_GET_DGV(psTLBEntry->ui32TLBLo1)],
+	                  apszCoherencyTLB[RGXMIPSFW_TLB_GET_COHERENCY(psTLBEntry->ui32TLBLo1)]);
+
+	if (bDumpRemapEntries)
+	{
+		PVR_DUMPDEBUG_LOG("    Remap %2u : IN 0x%08X (%3uk) => OUT 0x%08" IMG_UINT64_FMTSPECx,
+		                  ui32Index,
+		                  ui32Remap0AddrIn,
+		                  RGXMIPSFW_REMAP_GET_REGION_SIZE(psRemapEntry0->ui32RemapRegionSize),
+		                  ui64Remap0AddrOut);
+
+		PVR_DUMPDEBUG_LOG("    Remap %2u : IN 0x%08X (%3uk) => OUT 0x%08" IMG_UINT64_FMTSPECx,
+		                  ui32Index + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES,
+		                  ui32Remap1AddrIn,
+		                  RGXMIPSFW_REMAP_GET_REGION_SIZE(psRemapEntry1->ui32RemapRegionSize),
+		                  ui64Remap1AddrOut);
+	}
+}
+
+#endif /* !defined(NO_HARDWARE) */
+
+/*
+	Appends flags strings to a null-terminated string buffer - each flag
+	description string starts with a space.
+*/
+static void _Flags2Description(IMG_CHAR *sDesc, const IMG_FLAGS2DESC *psConvTable, IMG_UINT32 ui32TableSize, IMG_UINT32 ui32Flags)
+{
+	IMG_UINT32 ui32Idx;
+
+	for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++)
+	{
+		if ((ui32Flags & psConvTable[ui32Idx].uiFlag) == psConvTable[ui32Idx].uiFlag)
+		{
+			strcat(sDesc, psConvTable[ui32Idx].pszLabel);
+		}
+	}
+}
+
+/*
+	Writes flags strings to an uninitialised buffer.
+*/
+static void _GetFwFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32RawFlags)
+{
+	const IMG_CHAR *psCswLabel = "Ctx switch:";
+	strcpy(psDesc, psCswLabel);
+	_Flags2Description(psDesc, asCSW2Description, ARRAY_SIZE(asCSW2Description), ui32RawFlags);
+	_Flags2Description(psDesc, asMisc2Description, ARRAY_SIZE(asMisc2Description), ui32RawFlags);
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDumpFWAssert
+
+ @Description
+
+ Dump FW assert strings when a thread asserts.
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psRGXFWIfTraceBufCtl - RGX FW trace buffer
+
+ @Return   void
+
+******************************************************************************/
+static void _RGXDumpFWAssert(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile,
+					RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl)
+{
+	IMG_CHAR    *pszTraceAssertPath;
+	IMG_CHAR    *pszTraceAssertInfo;
+	IMG_INT32   ui32TraceAssertLine;
+	IMG_UINT32  i;
+
+	for (i = 0; i < RGXFW_THREAD_NUM; i++)
+	{
+		pszTraceAssertPath = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szPath;
+		pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szInfo;
+		ui32TraceAssertLine = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.ui32LineNum;
+
+		/* print non-null assert strings */
+		if (*pszTraceAssertInfo)
+		{
+			PVR_DUMPDEBUG_LOG("FW-T%d Assert: %s (%s:%d)",
+			                  i, pszTraceAssertInfo, pszTraceAssertPath, ui32TraceAssertLine);
+		}
+	}
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDumpFWFaults
+
+ @Description
+
+ Dump FW assert strings when a thread asserts.
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psRGXFWIfTraceBufCtl - RGX FW trace buffer
+
+ @Return   void
+
+******************************************************************************/
+static void _RGXDumpFWFaults(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                             void *pvDumpDebugFile,
+                             RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl)
+{
+	if (psRGXFWIfTraceBufCtl->ui32FWFaults > 0)
+	{
+		IMG_UINT32	ui32StartFault = psRGXFWIfTraceBufCtl->ui32FWFaults - RGXFWIF_FWFAULTINFO_MAX;
+		IMG_UINT32	ui32EndFault   = psRGXFWIfTraceBufCtl->ui32FWFaults - 1;
+		IMG_UINT32  ui32Index;
+
+		if (psRGXFWIfTraceBufCtl->ui32FWFaults < RGXFWIF_FWFAULTINFO_MAX)
+		{
+			ui32StartFault = 0;
+		}
+
+		for (ui32Index = ui32StartFault; ui32Index <= ui32EndFault; ui32Index++)
+		{
+			RGX_FWFAULTINFO *psFaultInfo = &psRGXFWIfTraceBufCtl->sFaultInfo[ui32Index % RGXFWIF_FWFAULTINFO_MAX];
+			IMG_UINT64      ui64Seconds, ui64Nanoseconds;
+
+			/* Split OS timestamp in seconds and nanoseconds */
+			ConvertOSTimestampToSAndNS(psFaultInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds);
+
+			PVR_DUMPDEBUG_LOG("FW Fault %d: %s (%s:%d)",
+			                  ui32Index+1, psFaultInfo->sFaultBuf.szInfo,
+			                  psFaultInfo->sFaultBuf.szPath,
+			                  psFaultInfo->sFaultBuf.ui32LineNum);
+			PVR_DUMPDEBUG_LOG("            Data = 0x%08x, CRTimer = 0x%012"IMG_UINT64_FMTSPECx", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC,
+			                  psFaultInfo->ui32Data,
+			                  psFaultInfo->ui64CRTimer,
+			                  ui64Seconds, ui64Nanoseconds);
+		}
+	}
+}
+
+static void _RGXDumpFWPoll(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile,
+					RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl)
+{
+	IMG_UINT32 i;
+	for (i = 0; i < RGXFW_THREAD_NUM; i++)
+	{
+		if (psRGXFWIfTraceBufCtl->aui32CrPollAddr[i])
+		{
+			PVR_DUMPDEBUG_LOG("T%u polling %s (reg:0x%08X mask:0x%08X)",
+			                  i,
+			                  ((psRGXFWIfTraceBufCtl->aui32CrPollAddr[i] & RGXFW_POLL_TYPE_SET)?("set"):("unset")),
+			                  psRGXFWIfTraceBufCtl->aui32CrPollAddr[i] & ~RGXFW_POLL_TYPE_SET,
+			                  psRGXFWIfTraceBufCtl->aui32CrPollMask[i]);
+		}
+	}
+
+}
+
+static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile, RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl,
+					RGXFWIF_HWRINFOBUF *psHWInfoBuf, PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	IMG_BOOL   bAnyLocked = IMG_FALSE;
+	IMG_UINT32 dm, i;
+	IMG_UINT32 ui32LineSize;
+	IMG_CHAR   *pszLine, *pszTemp;
+	IMG_CHAR   *apszDmNames[] = {"GP", "TDM", "TA", "3D", "CDM",
+	                              "RTU", "SHG", NULL };
+
+	const IMG_CHAR    szMsgHeader[] = "Number of HWR: ";
+	const IMG_CHAR    szMsgFalse[] = "FALSE(";
+	IMG_CHAR          *pszLockupType = "";
+	RGX_HWRINFO       *psHWRInfo;
+	const IMG_UINT32  ui32MsgHeaderCharCount = ARRAY_SIZE(szMsgHeader) - 1; /* size includes the null */
+	const IMG_UINT32  ui32MsgFalseCharCount = ARRAY_SIZE(szMsgFalse) - 1;
+	IMG_UINT32        ui32HWRRecoveryFlags;
+	IMG_UINT32        ui32ReadIndex;
+
+	if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)))
+	{
+		apszDmNames[RGXFWIF_DM_TDM] = "2D";
+	}
+
+	for (dm = 0; dm < RGXFWIF_DM_MAX; dm++)
+	{
+		if (psRGXFWIfTraceBufCtl->aui32HwrDmLockedUpCount[dm] ||
+		    psRGXFWIfTraceBufCtl->aui32HwrDmOverranCount[dm])
+		{
+			bAnyLocked = IMG_TRUE;
+			break;
+		}
+	}
+
+	if (!bAnyLocked && (psRGXFWIfTraceBufCtl->ui32HWRStateFlags & RGXFWIF_HWR_HARDWARE_OK) && !PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		/* No HWR situation, print nothing */
+		return;
+	}
+
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		IMG_BOOL bAnyHWROccured = IMG_FALSE;
+
+		for (dm = 0; (dm < RGXFWIF_DM_MAX) && (apszDmNames[dm] != NULL); dm++)
+		{
+			if (psRGXFWIfTraceBufCtl->aui32HwrDmRecoveredCount[dm] != 0 ||
+				psRGXFWIfTraceBufCtl->aui32HwrDmLockedUpCount[dm] != 0 ||
+				psRGXFWIfTraceBufCtl->aui32HwrDmOverranCount[dm] !=0)
+				{
+					bAnyHWROccured = IMG_TRUE;
+					break;
+				}
+		}
+
+		if (!bAnyHWROccured)
+		{
+			return;
+		}
+	}
+
+	ui32LineSize = sizeof(IMG_CHAR) * (
+			ui32MsgHeaderCharCount +
+			(RGXFWIF_DM_MAX * (	4/*DM name + left parenthesis*/ +
+				10/*UINT32 max num of digits*/ +
+				1/*slash*/ +
+				10/*UINT32 max num of digits*/ +
+				3/*right parenthesis + comma + space*/)) +
+			ui32MsgFalseCharCount + 1 + (RGXFWIF_DM_MAX*6) + 1
+				/* 'FALSE(' + ')' + (UINT16 max num + comma) per DM + \0 */
+			);
+
+	pszLine = OSAllocMem(ui32LineSize);
+	if (pszLine == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			"%s: Out of mem allocating line string (size: %d)",
+			__func__,
+			ui32LineSize));
+		return;
+	}
+
+	OSStringCopy(pszLine, szMsgHeader);
+	pszTemp = pszLine + ui32MsgHeaderCharCount;
+
+	for (dm = 0; (dm < RGXFWIF_DM_MAX) && (apszDmNames[dm] != NULL); dm++)
+	{
+		pszTemp += OSSNPrintf(pszTemp,
+				4 + 10 + 1 + 10 + 1 + 10 + 1 + 1 + 1 + 1
+				/* (name + left parenthesis) + UINT32 + slash + UINT32 + plus + UINT32 + right parenthesis + comma + space + \0 */,
+				"%s(%u/%u+%u), ",
+				apszDmNames[dm],
+				psRGXFWIfTraceBufCtl->aui32HwrDmRecoveredCount[dm],
+				psRGXFWIfTraceBufCtl->aui32HwrDmLockedUpCount[dm],
+				psRGXFWIfTraceBufCtl->aui32HwrDmOverranCount[dm]);
+	}
+
+	OSStringCopy(pszTemp, szMsgFalse);
+	pszTemp += ui32MsgFalseCharCount;
+
+	for (dm = 0; (dm < RGXFWIF_DM_MAX) && (apszDmNames[dm] != NULL); dm++)
+	{
+		pszTemp += OSSNPrintf(pszTemp,
+				10 + 1 + 1 /* UINT32 max num + comma + \0 */,
+				(dm < RGXFWIF_DM_MAX-1 ? "%u," : "%u)"),
+				psRGXFWIfTraceBufCtl->aui32HwrDmFalseDetectCount[dm]);
+	}
+
+	PVR_DUMPDEBUG_LOG("%s", pszLine);
+
+	OSFreeMem(pszLine);
+
+	/* Print out per HWR info */
+	for (dm = 0; (dm < RGXFWIF_DM_MAX) && (apszDmNames[dm] != NULL); dm++)
+	{
+		if (dm == RGXFWIF_DM_GP)
+		{
+			PVR_DUMPDEBUG_LOG("DM %d (GP)", dm);
+		}
+		else
+		{
+			if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+			{
+				IMG_CHAR sPerDmHwrDescription[RGX_DEBUG_STR_SIZE];
+				sPerDmHwrDescription[0] = '\0';
+
+				_Flags2Description(sPerDmHwrDescription, asDmState2Description, ARRAY_SIZE(asDmState2Description), psRGXFWIfTraceBufCtl->aui32HWRRecoveryFlags[dm]);
+				PVR_DUMPDEBUG_LOG("DM %d (HWRflags 0x%08x:%s)", dm, psRGXFWIfTraceBufCtl->aui32HWRRecoveryFlags[dm], sPerDmHwrDescription);
+			}
+			else
+			{
+				PVR_DUMPDEBUG_LOG("DM %d", dm);
+			}
+		}
+
+		ui32ReadIndex = 0;
+		for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++)
+		{
+			psHWRInfo = &psHWInfoBuf->sHWRInfo[ui32ReadIndex];
+
+			if ((psHWRInfo->eDM == dm) && (psHWRInfo->ui32HWRNumber != 0))
+			{
+				IMG_CHAR aui8RecoveryNum[10+10+1];
+				IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+				/* Split OS timestamp in seconds and nanoseconds */
+				ConvertOSTimestampToSAndNS(psHWRInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds);
+
+				ui32HWRRecoveryFlags = psHWRInfo->ui32HWRRecoveryFlags;
+				if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_LOCKUP) { pszLockupType = ", Guilty Lockup"; }
+				else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_LOCKUP) { pszLockupType = ", Innocent Lockup"; }
+				else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_OVERRUNING) { pszLockupType = ", Guilty Overrun"; }
+				else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_OVERRUNING) { pszLockupType = ", Innocent Overrun"; }
+				else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH) { pszLockupType = ", Hard Context Switch"; }
+
+				OSSNPrintf(aui8RecoveryNum, sizeof(aui8RecoveryNum), "Recovery %d:", psHWRInfo->ui32HWRNumber);
+				PVR_DUMPDEBUG_LOG("  %s PID = %u, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s",
+				                   aui8RecoveryNum,
+				                   psHWRInfo->ui32PID,
+				                   psHWRInfo->ui32FrameNum,
+				                   psHWRInfo->ui32ActiveHWRTData,
+				                   psHWRInfo->ui32EventStatus,
+				                   pszLockupType);
+				pszTemp = &aui8RecoveryNum[0];
+				while (*pszTemp != '\0')
+				{
+					*pszTemp++ = ' ';
+				}
+
+				/* There's currently no time correlation for the Guest OSes on the Firmware so there's no point printing OS Timestamps on Guests */
+				if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+				{
+					PVR_DUMPDEBUG_LOG("  %s CRTimer = 0x%012"IMG_UINT64_FMTSPECx", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ", CyclesElapsed = %" IMG_INT64_FMTSPECd,
+									   aui8RecoveryNum,
+									   psHWRInfo->ui64CRTimer,
+									   ui64Seconds,
+									   ui64Nanoseconds,
+									   (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256);
+				}
+				else
+				{
+					PVR_DUMPDEBUG_LOG("  %s CRTimer = 0x%012"IMG_UINT64_FMTSPECx", CyclesElapsed = %" IMG_INT64_FMTSPECd,
+									   aui8RecoveryNum,
+									   psHWRInfo->ui64CRTimer,
+									   (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256);
+				}
+
+				if (psHWRInfo->ui64CRTimeHWResetFinish != 0)
+				{
+					if (psHWRInfo->ui64CRTimeFreelistReady != 0)
+					{
+						PVR_DUMPDEBUG_LOG("  %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", FreelistReconTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalRecoveryTimeInCycles = %" IMG_INT64_FMTSPECd,
+										   aui8RecoveryNum,
+										   (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256,
+										   (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256,
+										   (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimeHWResetFinish)*256,
+										   (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimer)*256);
+					}
+					else
+					{
+						PVR_DUMPDEBUG_LOG("  %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalRecoveryTimeInCycles = %" IMG_INT64_FMTSPECd,
+										   aui8RecoveryNum,
+										   (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256,
+										   (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256,
+										   (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256);
+					}
+				}
+
+				switch (psHWRInfo->eHWRType)
+				{
+					case RGX_HWRTYPE_BIF0FAULT:
+					case RGX_HWRTYPE_BIF1FAULT:
+					{
+						if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)))
+						{
+							_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXFWIF_HWRTYPE_BIF_BANK_GET(psHWRInfo->eHWRType),
+											psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus,
+											psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus,
+											ui32ReadIndex);
+						}
+					}
+					break;
+					case RGX_HWRTYPE_TEXASBIF0FAULT:
+					{
+						if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)))
+						{
+							if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING))
+							{
+								_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF,
+											psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus,
+											psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus,
+											ui32ReadIndex);
+							}
+						}
+					}
+					break;
+					case RGX_HWRTYPE_MMUFAULT:
+					{
+						if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+						{
+							_RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo,
+											psHWRInfo->uHWRData.sMMUInfo.ui64MMUStatus,
+											ui32ReadIndex,
+											"Core");
+						}
+					}
+					break;
+					case RGX_HWRTYPE_MMUMETAFAULT:
+					{
+						if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+						{
+							_RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo,
+											psHWRInfo->uHWRData.sMMUInfo.ui64MMUStatus,
+											ui32ReadIndex,
+											"Meta");
+						}
+					}
+					break;
+
+
+					case RGX_HWRTYPE_POLLFAILURE:
+					{
+						PVR_DUMPDEBUG_LOG("    T%u polling %s (reg:0x%08X mask:0x%08X last:0x%08X)",
+										  psHWRInfo->uHWRData.sPollInfo.ui32ThreadNum,
+										  ((psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & RGXFW_POLL_TYPE_SET)?("set"):("unset")),
+										  psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & ~RGXFW_POLL_TYPE_SET,
+										  psHWRInfo->uHWRData.sPollInfo.ui32CrPollMask,
+										  psHWRInfo->uHWRData.sPollInfo.ui32CrPollLastValue);
+					}
+					break;
+
+					case RGX_HWRTYPE_OVERRUN:
+					case RGX_HWRTYPE_UNKNOWNFAILURE:
+					{
+						/* Nothing to dump */
+					}
+					break;
+
+					default:
+					{
+						PVR_ASSERT(IMG_FALSE);
+					}
+					break;
+				}
+			}
+
+			if (ui32ReadIndex == RGXFWIF_HWINFO_MAX_FIRST - 1)
+				ui32ReadIndex = psHWInfoBuf->ui32WriteIndex;
+			else
+				ui32ReadIndex = (ui32ReadIndex + 1) - (ui32ReadIndex / RGXFWIF_HWINFO_LAST_INDEX) * RGXFWIF_HWINFO_MAX_LAST;
+		}
+	}
+}
+
+#if !defined(NO_HARDWARE)
+
+/*!
+*******************************************************************************
+
+ @Function	_CheckForPendingPage
+
+ @Description
+
+ Check if the MMU indicates it is blocked on a pending page
+
+ @Input psDevInfo	 - RGX device info
+
+ @Return   IMG_BOOL      - IMG_TRUE if there is a pending page
+
+******************************************************************************/
+static INLINE IMG_BOOL _CheckForPendingPage(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	IMG_UINT32 ui32BIFMMUEntry;
+
+	ui32BIFMMUEntry = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY);
+
+	if (ui32BIFMMUEntry & RGX_CR_BIF_MMU_ENTRY_PENDING_EN)
+	{
+		return IMG_TRUE;
+	}
+	else
+	{
+		return IMG_FALSE;
+	}
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_GetPendingPageInfo
+
+ @Description
+
+ Get information about the pending page from the MMU status registers
+
+ @Input psDevInfo	 - RGX device info
+ @Output psDevVAddr      - The device virtual address of the pending MMU address translation
+ @Output pui32CatBase    - The page catalog base
+ @Output pui32DataType   - The MMU entry data type
+
+ @Return   void
+
+******************************************************************************/
+static void _GetPendingPageInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR *psDevVAddr,
+									IMG_UINT32 *pui32CatBase,
+									IMG_UINT32 *pui32DataType)
+{
+	IMG_UINT64 ui64BIFMMUEntryStatus;
+
+	ui64BIFMMUEntryStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY_STATUS);
+
+	psDevVAddr->uiAddr = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK);
+
+	*pui32CatBase = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK) >>
+								RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT;
+
+	*pui32DataType = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK) >>
+								RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT;
+}
+
+#endif
+
+void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile,
+					PVRSRV_RGXDEV_INFO *psDevInfo,
+					IMG_BOOL bRGXPoweredON)
+{
+	IMG_CHAR *pszState, *pszReason;
+	RGXFWIF_TRACEBUF *psRGXFWIfTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+	IMG_UINT32 ui32OSid;
+	RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+	/* space for the current clock speed and 3 previous */
+	RGXFWIF_TIME_CORR asTimeCorrs[4];
+	IMG_UINT32 ui32NumClockSpeedChanges;
+
+#if defined(NO_HARDWARE)
+	PVR_UNREFERENCED_PARAMETER(bRGXPoweredON);
+#else
+	if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+		{
+
+			IMG_UINT64	ui64RegValMMUStatus;
+
+			ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS);
+			_RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui64RegValMMUStatus, RGXFWIF_HWINFO_MAX, "Core");
+
+			ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS_META);
+			_RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui64RegValMMUStatus, RGXFWIF_HWINFO_MAX, "Meta");
+		}else
+		{
+			IMG_UINT64	ui64RegValMMUStatus, ui64RegValREQStatus;
+
+			ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_MMU_STATUS);
+			ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_REQ_STATUS);
+
+			_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF0, ui64RegValMMUStatus, ui64RegValREQStatus, RGXFWIF_HWINFO_MAX);
+
+			if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, SINGLE_BIF)))
+			{
+				ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_MMU_STATUS);
+				ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_REQ_STATUS);
+				_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF1, ui64RegValMMUStatus, ui64RegValREQStatus, RGXFWIF_HWINFO_MAX);
+			}
+
+			if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING))
+			{
+				IMG_UINT32  ui32PhantomCnt = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) ?  RGX_REQ_NUM_PHANTOMS(RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) : 0;
+
+				if (ui32PhantomCnt > 1)
+				{
+					IMG_UINT32  ui32Phantom;
+					for (ui32Phantom = 0;  ui32Phantom < ui32PhantomCnt;  ui32Phantom++)
+					{
+						/* This can't be done as it may interfere with the FW... */
+						/*OSWriteHWReg64(RGX_CR_TEXAS_INDIRECT, ui32Phantom);*/
+
+						ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS);
+						ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS);
+
+						_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, RGXFWIF_HWINFO_MAX);
+					}
+				}else
+				{
+					ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS);
+					ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS);
+
+					_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, RGXFWIF_HWINFO_MAX);
+				}
+			}
+		}
+
+		if (_CheckForPendingPage(psDevInfo))
+		{
+			IMG_UINT32 ui32CatBase;
+			IMG_UINT32 ui32DataType;
+			IMG_DEV_VIRTADDR sDevVAddr;
+
+			PVR_DUMPDEBUG_LOG("MMU Pending page: Yes");
+
+			_GetPendingPageInfo(psDevInfo, &sDevVAddr, &ui32CatBase, &ui32DataType);
+
+			if (ui32CatBase >= 8)
+			{
+				PVR_DUMPDEBUG_LOG("Cannot check address on PM cat base %u", ui32CatBase);
+			}
+			else
+			{
+				IMG_DEV_PHYADDR sPCDevPAddr;
+
+				sPCDevPAddr.uiAddr = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_CAT_BASEN(ui32CatBase));
+
+				PVR_DUMPDEBUG_LOG("Checking device virtual address " IMG_DEV_VIRTADDR_FMTSPEC
+							" on cat base %u. PC Addr = 0x%" IMG_UINT64_FMTSPECx,
+								sDevVAddr.uiAddr,
+								ui32CatBase,
+								sPCDevPAddr.uiAddr);
+				RGXCheckFaultAddress(psDevInfo, &sDevVAddr, &sPCDevPAddr,
+							pfnDumpDebugPrintf, pvDumpDebugFile, NULL);
+			}
+		}
+	}
+#endif /* NO_HARDWARE */
+
+	/* Firmware state */
+	switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthStatus))
+	{
+		case PVRSRV_DEVICE_HEALTH_STATUS_OK:  pszState = "OK";  break;
+		case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING:  pszState = "NOT RESPONDING";  break;
+		case PVRSRV_DEVICE_HEALTH_STATUS_DEAD:  pszState = "DEAD";  break;
+		case PVRSRV_DEVICE_HEALTH_STATUS_FAULT:  pszState = "FAULT";  break;
+		case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED:  pszState = "UNDEFINED";  break;
+		default:  pszState = "UNKNOWN";  break;
+	}
+
+	switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthReason))
+	{
+		case PVRSRV_DEVICE_HEALTH_REASON_NONE:  pszReason = "";  break;
+		case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED:  pszReason = " - Asserted";  break;
+		case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING:  pszReason = " - Poll failure";  break;
+		case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS:  pszReason = " - Global Event Object timeouts rising";  break;
+		case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT:  pszReason = " - KCCB offset invalid";  break;
+		case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED:  pszReason = " - KCCB stalled";  break;
+		case PVRSRV_DEVICE_HEALTH_REASON_IDLING:  pszReason = " - Idling";  break;
+		case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING:  pszReason = " - Restarting";  break;
+		default:  pszReason = " - Unknown reason";  break;
+	}
+
+	if (psRGXFWIfTraceBuf == NULL)
+	{
+		PVR_DUMPDEBUG_LOG("RGX FW State: %s%s", pszState, pszReason);
+
+		/* can't dump any more information */
+		return;
+	}
+
+	if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		IMG_CHAR sHwrStateDescription[RGX_DEBUG_STR_SIZE];
+		sHwrStateDescription[0] = '\0';
+
+		_Flags2Description(sHwrStateDescription, asHwrState2Description, ARRAY_SIZE(asHwrState2Description), psRGXFWIfTraceBuf->ui32HWRStateFlags);
+		PVR_DUMPDEBUG_LOG("RGX FW State: %s%s (HWRState 0x%08x:%s)", pszState, pszReason, psRGXFWIfTraceBuf->ui32HWRStateFlags, sHwrStateDescription);
+		PVR_DUMPDEBUG_LOG("RGX FW Power State: %s (APM %s: %d ok, %d denied, %d non-idle, %d retry, %d other, %d total. Latency: %u ms)",
+	                  pszPowStateName[psRGXFWIfTraceBuf->ePowState],
+	                  (psDevInfo->pvAPMISRData)?"enabled":"disabled",
+	                  psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqNonIdle,
+	                  psDevInfo->ui32ActivePMReqDenied,
+	                  psDevInfo->ui32ActivePMReqNonIdle,
+	                  psDevInfo->ui32ActivePMReqRetry,
+	                  psDevInfo->ui32ActivePMReqTotal -
+						  psDevInfo->ui32ActivePMReqOk -
+						  psDevInfo->ui32ActivePMReqDenied -
+						  psDevInfo->ui32ActivePMReqRetry -
+						  psDevInfo->ui32ActivePMReqNonIdle,
+	                  psDevInfo->ui32ActivePMReqTotal,
+			  psRuntimeCfg->ui32ActivePMLatencyms);
+
+		ui32NumClockSpeedChanges = (IMG_UINT32) OSAtomicRead(&psDevInfo->psDeviceNode->iNumClockSpeedChanges);
+		RGXGetTimeCorrData(psDevInfo->psDeviceNode, asTimeCorrs, ARRAY_SIZE(asTimeCorrs));
+
+		PVR_DUMPDEBUG_LOG("RGX DVFS: %u frequency changes. Current frequency: %u.%03u MHz (sampled at %" IMG_UINT64_FMTSPEC ")",
+											ui32NumClockSpeedChanges,
+											asTimeCorrs[0].ui32CoreClockSpeed / 1000000,
+											(asTimeCorrs[0].ui32CoreClockSpeed / 1000) % 1000,
+											asTimeCorrs[0].ui64OSTimeStamp);
+		if (ui32NumClockSpeedChanges > 0)
+		{
+			PVR_DUMPDEBUG_LOG("          Previous frequencies: %u.%03u, %u.%03u, %u.%03u MHz (Sampled at "
+							"%" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ")",
+												asTimeCorrs[1].ui32CoreClockSpeed / 1000000,
+												(asTimeCorrs[1].ui32CoreClockSpeed / 1000) % 1000,
+												asTimeCorrs[2].ui32CoreClockSpeed / 1000000,
+												(asTimeCorrs[2].ui32CoreClockSpeed / 1000) % 1000,
+												asTimeCorrs[3].ui32CoreClockSpeed / 1000000,
+												(asTimeCorrs[3].ui32CoreClockSpeed / 1000) % 1000,
+												asTimeCorrs[1].ui64OSTimeStamp,
+												asTimeCorrs[2].ui64OSTimeStamp,
+												asTimeCorrs[3].ui64OSTimeStamp);
+		}
+
+		for (ui32OSid = 0; ui32OSid < RGXFW_NUM_OS; ui32OSid++)
+		{
+			RGXFWIF_PER_OS_STATES sFwOsState = psRGXFWIfTraceBuf->sPerOsStateMirror[ui32OSid];
+			static const IMG_CHAR * const apszFwOsStateName[4] =
+			{
+				"stopped",
+				"ready",
+				"active",
+				"offloading"
+			};
+
+			PVR_DUMPDEBUG_LOG("RGX FW OS %u - State: %s; Freelists: %s%s%s", ui32OSid,
+							  apszFwOsStateName[sFwOsState.bfOsState],
+							  (sFwOsState.bfFLOk) ? "Ok" : "Not Ok",
+							  (sFwOsState.bfFLGrowPending) ? "; Grow Request Pending" : "",
+							  (sFwOsState.bfIsolatedOS) ? "; Isolated;" : ""
+							 );
+		}
+
+		_RGXDumpFWAssert(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBuf);
+		_RGXDumpFWFaults(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBuf);
+		_RGXDumpFWPoll(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBuf);
+	}
+	else
+	{
+		PVR_DUMPDEBUG_LOG("RGX FW State: Unavailable under Guest Mode of operation");
+		PVR_DUMPDEBUG_LOG("RGX FW Power State: Unavailable under Guest Mode of operation");
+	}
+
+	_RGXDumpFWHWRInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBuf, psDevInfo->psRGXFWIfHWRInfoBuf, psDevInfo);
+
+#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK)
+	/* Dump all non-zero values in lines of 8... */
+	{
+		IMG_CHAR    pszLine[(9*RGXFWIF_STATS_FRAMEWORK_LINESIZE)+1];
+		IMG_UINT32  *pui32FWStatsBuf = psRGXFWIfTraceBuf->aui32FWStatsBuf;
+		IMG_UINT32  ui32Index1, ui32Index2;
+
+		PVR_DUMPDEBUG_LOG("STATS[START]: RGXFWIF_STATS_FRAMEWORK_MAX=%d", RGXFWIF_STATS_FRAMEWORK_MAX);
+		for (ui32Index1 = 0;  ui32Index1 < RGXFWIF_STATS_FRAMEWORK_MAX;  ui32Index1 += RGXFWIF_STATS_FRAMEWORK_LINESIZE)
+		{
+			IMG_UINT32  ui32OrOfValues = 0;
+			IMG_CHAR    *pszBuf = pszLine;
+
+			/* Print all values in this line and skip if all zero... */
+			for (ui32Index2 = 0;  ui32Index2 < RGXFWIF_STATS_FRAMEWORK_LINESIZE;  ui32Index2++)
+			{
+				ui32OrOfValues |= pui32FWStatsBuf[ui32Index1+ui32Index2];
+				OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32FWStatsBuf[ui32Index1+ui32Index2]);
+				pszBuf += 9; /* write over the '\0' */
+			}
+
+			if (ui32OrOfValues != 0)
+			{
+				PVR_DUMPDEBUG_LOG("STATS[%08x]:%s", ui32Index1, pszLine);
+			}
+		}
+		PVR_DUMPDEBUG_LOG("STATS[END]");
+	}
+#endif
+}
+
+static void _RGXDumpMetaSPExtraDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+						void *pvDumpDebugFile,
+						PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+/* List of extra META Slave Port debug registers */
+#define RGX_META_SP_EXTRA_DEBUG \
+			X(RGX_CR_META_SP_MSLVCTRL0) \
+			X(RGX_CR_META_SP_MSLVCTRL1) \
+			X(RGX_CR_META_SP_MSLVDATAX) \
+			X(RGX_CR_META_SP_MSLVIRQSTATUS) \
+			X(RGX_CR_META_SP_MSLVIRQENABLE) \
+			X(RGX_CR_META_SP_MSLVIRQLEVEL)
+
+	IMG_UINT32 ui32Idx, ui32RegIdx;
+	IMG_UINT32 ui32RegVal;
+	IMG_UINT32 ui32RegAddr;
+
+	const IMG_UINT32 aui32DebugRegAddr [] = {
+#define X(A) A,
+		RGX_META_SP_EXTRA_DEBUG
+#undef X
+		};
+
+	const IMG_CHAR* apszDebugRegName [] = {
+#define X(A) #A,
+	RGX_META_SP_EXTRA_DEBUG
+#undef X
+	};
+
+	const IMG_UINT32 aui32Debug2RegAddr [] = {0xA28, 0x0A30, 0x0A38};
+
+	PVR_DUMPDEBUG_LOG("META Slave Port extra debug:");
+
+	/* dump first set of Slave Port debug registers */
+	for (ui32Idx = 0; ui32Idx < sizeof(aui32DebugRegAddr)/sizeof(IMG_UINT32); ui32Idx++)
+	{
+		const IMG_CHAR* pszRegName = apszDebugRegName[ui32Idx];
+
+		ui32RegAddr = aui32DebugRegAddr[ui32Idx];
+		ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr);
+		PVR_DUMPDEBUG_LOG("  * %s: 0x%8.8X", pszRegName, ui32RegVal);
+	}
+
+	/* dump second set of Slave Port debug registers */
+	for (ui32Idx = 0; ui32Idx < 4; ui32Idx++)
+	{
+		OSWriteHWReg32(psDevInfo->pvRegsBaseKM, 0xA20, ui32Idx);
+		ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, 0xA20);
+		PVR_DUMPDEBUG_LOG("  * 0xA20[%d]: 0x%8.8X", ui32Idx, ui32RegVal);
+
+	}
+
+	for (ui32RegIdx = 0; ui32RegIdx < sizeof(aui32Debug2RegAddr)/sizeof(IMG_UINT32); ui32RegIdx++)
+	{
+		ui32RegAddr = aui32Debug2RegAddr[ui32RegIdx];
+		for (ui32Idx = 0; ui32Idx < 2; ui32Idx++)
+		{
+			OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr, ui32Idx);
+			ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr);
+			PVR_DUMPDEBUG_LOG("  * 0x%X[%d]: 0x%8.8X", ui32RegAddr, ui32Idx, ui32RegVal);
+		}
+	}
+
+}
+
+void RGXDumpDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+			void *pvDumpDebugFile,
+			PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	PVR_DUMPDEBUG_LOG("------[ RGX Device: Start ]------");
+
+	RGXDebugRequestProcess(pfnDumpDebugPrintf, pvDumpDebugFile,
+				psDevInfo, DEBUG_REQUEST_VERBOSITY_MAX);
+
+	PVR_DUMPDEBUG_LOG("------[ RGX Device: End ]------");
+}
+
+/*
+ *  Array of all the Firmware Trace log IDs used to convert the trace data.
+ */
+typedef struct _TRACEBUF_LOG_ {
+	RGXFW_LOG_SFids	eSFId;
+	const IMG_CHAR	*pszName;
+	const IMG_CHAR	*pszFmt;
+	IMG_UINT32		ui32ArgNum;
+} TRACEBUF_LOG;
+
+static const TRACEBUF_LOG aLogDefinitions[] =
+{
+#define X(a, b, c, d, e) {RGXFW_LOG_CREATESFID(a,b,e), #c, d, e},
+	RGXFW_LOG_SFIDLIST
+#undef X
+};
+
+#define NARGS_MASK ~(0xF<<16)
+static IMG_BOOL _FirmwareTraceIntegrityCheck(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+						void *pvDumpDebugFile)
+{
+	const TRACEBUF_LOG *psLogDef = &aLogDefinitions[0];
+	IMG_BOOL bIntegrityOk = IMG_TRUE;
+
+	/*
+	 * For every log ID, check the format string and number of arguments is valid.
+	 */
+	while (psLogDef->eSFId != RGXFW_SF_LAST)
+	{
+		const TRACEBUF_LOG *psLogDef2;
+		const IMG_CHAR *pszString;
+		IMG_UINT32 ui32Count;
+
+		/*
+		 * Check the number of arguments matches the number of '%' in the string and
+		 * check that no string uses %s which is not supported as it requires a
+		 * pointer to memory that is not going to be valid.
+		 */
+		pszString = psLogDef->pszFmt;
+		ui32Count = 0;
+
+		while (*pszString != '\0')
+		{
+			if (*pszString++ == '%')
+			{
+				ui32Count++;
+				if (*pszString == 's')
+				{
+					bIntegrityOk = IMG_FALSE;
+					PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has an unsupported type not recognized (fmt: %%%c). Please fix.",
+									  psLogDef->pszName, *pszString);
+				}
+				else if (*pszString == '%')
+				{
+					/* Double % is a printable % sign and not a format string... */
+					ui32Count--;
+				}
+			}
+		}
+
+		if (ui32Count != psLogDef->ui32ArgNum)
+		{
+			bIntegrityOk = IMG_FALSE;
+			PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but only %d are specified. Please fix.",
+			                  psLogDef->pszName, ui32Count, psLogDef->ui32ArgNum);
+		}
+
+		/* RGXDumpFirmwareTrace() has a hardcoded limit of supporting up to 20 arguments... */
+		if (ui32Count > 20)
+		{
+			bIntegrityOk = IMG_FALSE;
+			PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but a maximum of 20 are supported. Please fix.",
+			                  psLogDef->pszName, ui32Count);
+		}
+
+		/* Check the id number is unique (don't take into account the number of arguments) */
+		ui32Count = 0;
+		psLogDef2 = &aLogDefinitions[0];
+
+		while (psLogDef2->eSFId != RGXFW_SF_LAST)
+		{
+			if ((psLogDef->eSFId & NARGS_MASK) == (psLogDef2->eSFId & NARGS_MASK))
+			{
+				ui32Count++;
+			}
+			psLogDef2++;
+		}
+
+		if (ui32Count != 1)
+		{
+			bIntegrityOk = IMG_FALSE;
+			PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s id %x is not unique, there are %d more. Please fix.",
+			                  psLogDef->pszName, psLogDef->eSFId, ui32Count - 1);
+		}
+
+		/* Move to the next log ID... */
+		psLogDef++;
+	}
+
+	return bIntegrityOk;
+}
+
+void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile,
+				PVRSRV_RGXDEV_INFO  *psDevInfo)
+{
+	RGXFWIF_TRACEBUF  *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+	static IMG_BOOL   bIntegrityCheckPassed = IMG_FALSE;
+
+	/* Check that the firmware trace is correctly defined... */
+	if (!bIntegrityCheckPassed)
+	{
+		bIntegrityCheckPassed = _FirmwareTraceIntegrityCheck(pfnDumpDebugPrintf, pvDumpDebugFile);
+		if (!bIntegrityCheckPassed)
+		{
+			return;
+		}
+	}
+
+	/* Dump FW trace information... */
+	if (psRGXFWIfTraceBufCtl != NULL)
+	{
+		IMG_UINT32  tid;
+		IMG_UINT32  ui32TraceBufSizeInDWords = psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords;
+
+		/* Print the log type settings... */
+		if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)
+		{
+			PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")",
+							  ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")),
+							  RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType)
+							  );
+		}
+		else
+		{
+			PVR_DUMPDEBUG_LOG("Debug log type: none");
+		}
+
+		/* Print the decoded log for each thread... */
+		for (tid = 0;  tid < RGXFW_THREAD_NUM;  tid++)
+		{
+			IMG_UINT32  *pui32TraceBuf = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer;
+			IMG_UINT32  ui32TracePtr  = psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer;
+			IMG_UINT32  ui32Count     = 0;
+
+			if (pui32TraceBuf == NULL)
+			{
+				/* trace buffer not yet allocated */
+				continue;
+			}
+
+			while (ui32Count < ui32TraceBufSizeInDWords)
+			{
+				IMG_UINT32  ui32Data, ui32DataToId;
+
+				/* Find the first valid log ID, skipping whitespace... */
+				do
+				{
+					ui32Data     = pui32TraceBuf[ui32TracePtr];
+					ui32DataToId = idToStringID(ui32Data, SFs);
+
+					/* If an unrecognized id is found it may be inconsistent data or a firmware trace error. */
+					if (ui32DataToId == RGXFW_SF_LAST  &&  RGXFW_LOG_VALIDID(ui32Data))
+					{
+						PVR_DUMPDEBUG_LOG("WARNING: Unrecognized id (%x). From here on the trace might be wrong!", ui32Data);
+					}
+
+					/* Update the trace pointer... */
+					ui32TracePtr = (ui32TracePtr + 1) % ui32TraceBufSizeInDWords;
+					ui32Count++;
+				} while ((RGXFW_SF_LAST == ui32DataToId  ||  ui32DataToId >= RGXFW_SF_FIRST)  &&
+				         ui32Count < ui32TraceBufSizeInDWords);
+
+				if (ui32Count < ui32TraceBufSizeInDWords)
+				{
+					IMG_CHAR   szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN] = "%" IMG_UINT64_FMTSPEC ":T%u-%s> ";
+					IMG_UINT64 ui64Timestamp;
+					IMG_UINT   uiLen;
+
+					/* If we hit the ASSERT message then this is the end of the log... */
+					if (ui32Data == RGXFW_SF_MAIN_ASSERT_FAILED)
+					{
+						PVR_DUMPDEBUG_LOG("ASSERTION %s failed at %s:%u",
+										  psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szInfo,
+										  psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szPath,
+										  psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.ui32LineNum);
+						break;
+					}
+
+					/*
+					 * Print the trace string and provide up to 20 arguments which
+					 * printf function will be able to use. We have already checked
+					 * that no string uses more than this.
+					 */
+					OSStringCopy(&szBuffer[OSStringLength(szBuffer)], SFs[ui32DataToId].psName);
+					uiLen = OSStringLength(szBuffer);
+					szBuffer[uiLen ? uiLen - 1 : 0] = '\0';
+					ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 0) % ui32TraceBufSizeInDWords]) << 32 |
+					                (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 1) % ui32TraceBufSizeInDWords]);
+					PVR_DUMPDEBUG_LOG(szBuffer, ui64Timestamp, tid, groups[RGXFW_SF_GID(ui32Data)],
+									  pui32TraceBuf[(ui32TracePtr +  2) % ui32TraceBufSizeInDWords],
+									  pui32TraceBuf[(ui32TracePtr +  3) % ui32TraceBufSizeInDWords],
+									  pui32TraceBuf[(ui32TracePtr +  4) % ui32TraceBufSizeInDWords],
+									  pui32TraceBuf[(ui32TracePtr +  5) % ui32TraceBufSizeInDWords],
+									  pui32TraceBuf[(ui32TracePtr +  6) % ui32TraceBufSizeInDWords],
+									  pui32TraceBuf[(ui32TracePtr +  7) % ui32TraceBufSizeInDWords],
+									  pui32TraceBuf[(ui32TracePtr +  8) % ui32TraceBufSizeInDWords],
+									  pui32TraceBuf[(ui32TracePtr +  9) % ui32TraceBufSizeInDWords],
+									  pui32TraceBuf[(ui32TracePtr + 10) % ui32TraceBufSizeInDWords],
+									  pui32TraceBuf[(ui32TracePtr + 11) % ui32TraceBufSizeInDWords],
+									  pui32TraceBuf[(ui32TracePtr + 12) % ui32TraceBufSizeInDWords],
+									  pui32TraceBuf[(ui32TracePtr + 13) % ui32TraceBufSizeInDWords],
+									  pui32TraceBuf[(ui32TracePtr + 14) % ui32TraceBufSizeInDWords],
+									  pui32TraceBuf[(ui32TracePtr + 15) % ui32TraceBufSizeInDWords],
+									  pui32TraceBuf[(ui32TracePtr + 16) % ui32TraceBufSizeInDWords],
+									  pui32TraceBuf[(ui32TracePtr + 17) % ui32TraceBufSizeInDWords],
+									  pui32TraceBuf[(ui32TracePtr + 18) % ui32TraceBufSizeInDWords],
+									  pui32TraceBuf[(ui32TracePtr + 19) % ui32TraceBufSizeInDWords],
+									  pui32TraceBuf[(ui32TracePtr + 20) % ui32TraceBufSizeInDWords],
+									  pui32TraceBuf[(ui32TracePtr + 21) % ui32TraceBufSizeInDWords]);
+
+					/* Update the trace pointer... */
+					ui32TracePtr = (ui32TracePtr + 2 + RGXFW_SF_PARAMNUM(ui32Data)) % ui32TraceBufSizeInDWords;
+					ui32Count    = (ui32Count    + 2 + RGXFW_SF_PARAMNUM(ui32Data));
+				}
+			}
+		}
+	}
+}
+
+static const IMG_CHAR *_RGXGetDebugDevStateString(PVRSRV_DEVICE_STATE eDevState)
+{
+	switch (eDevState)
+	{
+		case PVRSRV_DEVICE_STATE_INIT:
+			return "Initialising";
+		case PVRSRV_DEVICE_STATE_ACTIVE:
+			return "Active";
+		case PVRSRV_DEVICE_STATE_DEINIT:
+			return "De-initialising";
+		case PVRSRV_DEVICE_STATE_BAD:
+			return "Bad";
+		case PVRSRV_DEVICE_STATE_UNDEFINED:
+			PVR_ASSERT(!"Device has undefined state");
+			__fallthrough;
+		default:
+			return "Unknown";
+	}
+}
+
+static const IMG_CHAR* _RGXGetDebugDevPowerStateString(PVRSRV_DEV_POWER_STATE ePowerState)
+{
+	switch (ePowerState)
+	{
+		case PVRSRV_DEV_POWER_STATE_DEFAULT: return "DEFAULT";
+		case PVRSRV_DEV_POWER_STATE_OFF: return "OFF";
+		case PVRSRV_DEV_POWER_STATE_ON: return "ON";
+		default: return "UNKNOWN";
+	}
+}
+
+PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+								 void *pvDumpDebugFile,
+								 PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	IMG_UINT32   ui32Meta = (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ? RGX_GET_FEATURE_VALUE(psDevInfo, META) : 0;
+	IMG_UINT32   ui32TACycles, ui323DCycles, ui32TAOr3DCycles, ui32TAAnd3DCycles;
+	IMG_UINT32   ui32RegVal;
+	IMG_BOOL     bFirmwarePerf;
+	IMG_BOOL     bS7Infra = RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE);
+	void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
+	PVRSRV_ERROR eError;
+	RGXFWIF_INIT *psRGXFWInit = NULL;
+
+	PVR_DUMPDEBUG_LOG("------[ RGX registers ]------");
+	PVR_DUMPDEBUG_LOG("RGX Register Base Address (Linear):   0x%p", psDevInfo->pvRegsBaseKM);
+	PVR_DUMPDEBUG_LOG("RGX Register Base Address (Physical): 0x%08lX", (unsigned long)psDevInfo->sRegsPhysBase.uiAddr);
+
+	/* Check if firmware perf was set at Init time */
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc, (void**)&psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to acquire kernel FW IF Init struct"));
+		return eError;
+	}
+	bFirmwarePerf = (psRGXFWInit->eFirmwarePerf != FW_PERF_CONF_NONE);
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+
+/* Helper macros to emit data */
+#define REG32_FMTSPEC   "%-30s: 0x%08X"
+#define REG64_FMTSPEC   "%-30s: 0x%016" IMG_UINT64_FMTSPECx
+#define DDLOG32(R)      PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, RGX_CR_##R));
+#define DDLOG64(R)      PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, RGX_CR_##R));
+#define DDLOG32_DPX(R)  PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, DPX_CR_##R));
+#define DDLOG64_DPX(R)  PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, DPX_CR_##R));
+#define DDLOGVAL32(S,V) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, S, V);
+
+#if defined(NO_HARDWARE)
+	/* OSReadHWReg variants don't use params passed in NoHW builds */
+	PVR_UNREFERENCED_PARAMETER(pvRegsBaseKM);
+#endif
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBVNC_COREID_REG))
+	{
+		DDLOG64(CORE_ID);
+	}
+	else
+	{
+		DDLOG32(CORE_ID);
+	}
+	DDLOG32(CORE_REVISION);
+	DDLOG32(DESIGNER_REV_FIELD1);
+	DDLOG32(DESIGNER_REV_FIELD2);
+	DDLOG64(CHANGESET_NUMBER);
+	if (ui32Meta)
+	{
+		DDLOG32(META_SP_MSLVIRQSTATUS);
+	}
+
+	DDLOG64(CLK_CTRL);
+	DDLOG64(CLK_STATUS);
+	DDLOG64(CLK_CTRL2);
+	DDLOG64(CLK_STATUS2);
+
+	if (bS7Infra)
+	{
+		DDLOG64(CLK_XTPLUS_CTRL);
+		DDLOG64(CLK_XTPLUS_STATUS);
+	}
+	DDLOG32(EVENT_STATUS);
+	DDLOG64(TIMER);
+	if (bS7Infra)
+	{
+		DDLOG64(MMU_FAULT_STATUS);
+		DDLOG64(MMU_FAULT_STATUS_META);
+	}
+	else
+	{
+		DDLOG32(BIF_FAULT_BANK0_MMU_STATUS);
+		DDLOG64(BIF_FAULT_BANK0_REQ_STATUS);
+		DDLOG32(BIF_FAULT_BANK1_MMU_STATUS);
+		DDLOG64(BIF_FAULT_BANK1_REQ_STATUS);
+	}
+	DDLOG32(BIF_MMU_STATUS);
+	DDLOG32(BIF_MMU_ENTRY);
+	DDLOG64(BIF_MMU_ENTRY_STATUS);
+
+	if (bS7Infra)
+	{
+		DDLOG32(BIF_JONES_OUTSTANDING_READ);
+		DDLOG32(BIF_BLACKPEARL_OUTSTANDING_READ);
+		DDLOG32(BIF_DUST_OUTSTANDING_READ);
+	}
+	else
+	{
+		if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE)))
+		{
+			DDLOG32(BIF_STATUS_MMU);
+			DDLOG32(BIF_READS_EXT_STATUS);
+			DDLOG32(BIF_READS_INT_STATUS);
+		}
+		DDLOG32(BIFPM_STATUS_MMU);
+		DDLOG32(BIFPM_READS_EXT_STATUS);
+		DDLOG32(BIFPM_READS_INT_STATUS);
+	}
+
+	if (RGX_IS_BRN_SUPPORTED(psDevInfo, 44871))
+	{
+		PVR_DUMPDEBUG_LOG("Warning: BRN44871 is present");
+	}
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT))
+	{
+		DDLOG64(CONTEXT_MAPPING0);
+		DDLOG64(CONTEXT_MAPPING1);
+		DDLOG64(CONTEXT_MAPPING2);
+		DDLOG64(CONTEXT_MAPPING3);
+		DDLOG64(CONTEXT_MAPPING4);
+	}
+	else
+	{
+		DDLOG64(BIF_CAT_BASE_INDEX);
+		DDLOG64(BIF_CAT_BASE0);
+		DDLOG64(BIF_CAT_BASE1);
+		DDLOG64(BIF_CAT_BASE2);
+		DDLOG64(BIF_CAT_BASE3);
+		DDLOG64(BIF_CAT_BASE4);
+		DDLOG64(BIF_CAT_BASE5);
+		DDLOG64(BIF_CAT_BASE6);
+		DDLOG64(BIF_CAT_BASE7);
+	}
+
+	DDLOG32(BIF_CTRL_INVAL);
+	DDLOG32(BIF_CTRL);
+
+	DDLOG64(BIF_PM_CAT_BASE_VCE0);
+	DDLOG64(BIF_PM_CAT_BASE_TE0);
+	DDLOG64(BIF_PM_CAT_BASE_ALIST0);
+	DDLOG64(BIF_PM_CAT_BASE_VCE1);
+	DDLOG64(BIF_PM_CAT_BASE_TE1);
+	DDLOG64(BIF_PM_CAT_BASE_ALIST1);
+
+	DDLOG32(PERF_TA_PHASE);
+	DDLOG32(PERF_TA_CYCLE);
+	DDLOG32(PERF_3D_PHASE);
+	DDLOG32(PERF_3D_CYCLE);
+
+	ui32TACycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_TA_CYCLE);
+	ui323DCycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_3D_CYCLE);
+	ui32TAOr3DCycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_TA_OR_3D_CYCLE);
+	ui32TAAnd3DCycles = ((ui32TACycles + ui323DCycles) > ui32TAOr3DCycles) ? (ui32TACycles + ui323DCycles - ui32TAOr3DCycles) : 0;
+	DDLOGVAL32("PERF_TA_OR_3D_CYCLE", ui32TAOr3DCycles);
+	DDLOGVAL32("PERF_TA_AND_3D_CYCLE", ui32TAAnd3DCycles);
+
+	DDLOG32(PERF_COMPUTE_PHASE);
+	DDLOG32(PERF_COMPUTE_CYCLE);
+
+	DDLOG32(PM_PARTIAL_RENDER_ENABLE);
+
+	DDLOG32(ISP_RENDER);
+	DDLOG64(TLA_STATUS);
+	DDLOG64(MCU_FENCE);
+
+	DDLOG32(VDM_CONTEXT_STORE_STATUS);
+	DDLOG64(VDM_CONTEXT_STORE_TASK0);
+	DDLOG64(VDM_CONTEXT_STORE_TASK1);
+	DDLOG64(VDM_CONTEXT_STORE_TASK2);
+	DDLOG64(VDM_CONTEXT_RESUME_TASK0);
+	DDLOG64(VDM_CONTEXT_RESUME_TASK1);
+	DDLOG64(VDM_CONTEXT_RESUME_TASK2);
+
+	DDLOG32(ISP_CTL);
+	DDLOG32(ISP_STATUS);
+	DDLOG32(MTS_INTCTX);
+	DDLOG32(MTS_BGCTX);
+	DDLOG32(MTS_BGCTX_COUNTED_SCHEDULE);
+	DDLOG32(MTS_SCHEDULE);
+	DDLOG32(MTS_GPU_INT_STATUS);
+
+	DDLOG32(CDM_CONTEXT_STORE_STATUS);
+	DDLOG64(CDM_CONTEXT_PDS0);
+	DDLOG64(CDM_CONTEXT_PDS1);
+	DDLOG64(CDM_TERMINATE_PDS);
+	DDLOG64(CDM_TERMINATE_PDS1);
+
+	if (RGX_IS_ERN_SUPPORTED(psDevInfo, 47025))
+	{
+		DDLOG64(CDM_CONTEXT_LOAD_PDS0);
+		DDLOG64(CDM_CONTEXT_LOAD_PDS1);
+	}
+
+	if (bS7Infra)
+	{
+		DDLOG32(JONES_IDLE);
+	}
+
+	DDLOG32(SIDEKICK_IDLE);
+
+	if (!bS7Infra)
+	{
+		DDLOG32(SLC_IDLE);
+		DDLOG32(SLC_STATUS0);
+		DDLOG64(SLC_STATUS1);
+
+		if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS) && RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS))
+		{
+			DDLOG64(SLC_STATUS2);
+		}
+
+		DDLOG32(SLC_CTRL_BYPASS);
+		DDLOG64(SLC_CTRL_MISC);
+	}
+	else
+	{
+		DDLOG32(SLC3_IDLE);
+		DDLOG64(SLC3_STATUS);
+		DDLOG32(SLC3_FAULT_STOP_STATUS);
+	}
+
+	if (ui32Meta)
+	{
+		IMG_BOOL bIsT0Enabled = IMG_FALSE, bIsFWFaulted = IMG_FALSE;
+
+		/* Forcing bit 6 of MslvCtrl1 to 0 to avoid internal reg read going through the core */
+		OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1, 0x0);
+
+		eError = RGXReadWithSP(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegVal);
+		PVR_LOGG_IF_ERROR(eError, "RGXReadWithSP", _METASPError);
+		DDLOGVAL32("T0 TXENABLE", ui32RegVal);
+		if (ui32RegVal & META_CR_TXENABLE_ENABLE_BIT)
+		{
+			bIsT0Enabled = IMG_TRUE;
+		}
+
+		eError = RGXReadWithSP(psDevInfo, META_CR_T0STATUS_OFFSET, &ui32RegVal);
+		PVR_LOGG_IF_ERROR(eError, "RGXReadWithSP", _METASPError);
+		DDLOGVAL32("T0 TXSTATUS", ui32RegVal);
+
+		/* check for FW fault */
+		if (((ui32RegVal >> 20) & 0x3) == 0x2)
+		{
+			bIsFWFaulted = IMG_TRUE;
+		}
+
+		eError = RGXReadWithSP(psDevInfo, META_CR_T0DEFR_OFFSET, &ui32RegVal);
+		PVR_LOGG_IF_ERROR(eError, "RGXReadWithSP", _METASPError);
+		DDLOGVAL32("T0 TXDEFR", ui32RegVal);
+
+		eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PC, &ui32RegVal);
+		PVR_LOGG_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+		DDLOGVAL32("T0 PC", ui32RegVal);
+
+		eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PCX, &ui32RegVal);
+		PVR_LOGG_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+		DDLOGVAL32("T0 PCX", ui32RegVal);
+
+		eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_SP, &ui32RegVal);
+		PVR_LOGG_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+		DDLOGVAL32("T0 SP", ui32RegVal);
+
+
+		if ((ui32Meta == MTP218) || (ui32Meta == MTP219))
+		{
+			eError = RGXReadWithSP(psDevInfo, META_CR_T1ENABLE_OFFSET, &ui32RegVal);
+			PVR_LOGG_IF_ERROR(eError, "RGXReadWithSP", _METASPError);
+			DDLOGVAL32("T1 TXENABLE", ui32RegVal);
+
+			eError = RGXReadWithSP(psDevInfo, META_CR_T1STATUS_OFFSET, &ui32RegVal);
+			PVR_LOGG_IF_ERROR(eError, "RGXReadWithSP", _METASPError);
+			DDLOGVAL32("T1 TXSTATUS", ui32RegVal);
+
+			eError = RGXReadWithSP(psDevInfo, META_CR_T1DEFR_OFFSET, &ui32RegVal);
+			PVR_LOGG_IF_ERROR(eError, "RGXReadWithSP", _METASPError);
+			DDLOGVAL32("T1 TXDEFR", ui32RegVal);
+
+			eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PC, &ui32RegVal);
+			PVR_LOGG_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+			DDLOGVAL32("T1 PC", ui32RegVal);
+
+			eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PCX, &ui32RegVal);
+			PVR_LOGG_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+			DDLOGVAL32("T1 PCX", ui32RegVal);
+
+			eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_SP, &ui32RegVal);
+			PVR_LOGG_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+			DDLOGVAL32("T1 SP", ui32RegVal);
+		}
+
+		if (bFirmwarePerf)
+		{
+			eError = RGXReadWithSP(psDevInfo, META_CR_PERF_COUNT0, &ui32RegVal);
+			PVR_LOGG_IF_ERROR(eError, "RGXReadWithSP", _METASPError);
+			DDLOGVAL32("PERF_COUNT0", ui32RegVal);
+
+			eError = RGXReadWithSP(psDevInfo, META_CR_PERF_COUNT1, &ui32RegVal);
+			PVR_LOGG_IF_ERROR(eError, "RGXReadWithSP", _METASPError);
+			DDLOGVAL32("PERF_COUNT1", ui32RegVal);
+		}
+
+		if (bIsT0Enabled & bIsFWFaulted)
+		{
+			PVRSRV_ERROR eError;
+			eError = _ValidateFWImageForMETA(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption");
+			}
+		}
+		else if (bIsFWFaulted)
+		{
+			PVR_DUMPDEBUG_LOG("Skipping FW code memory corruption checking as META is disabled");
+		}
+	}
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+	{
+		DDLOG32(MIPS_ADDR_REMAP1_CONFIG1);
+		DDLOG64(MIPS_ADDR_REMAP1_CONFIG2);
+		DDLOG32(MIPS_ADDR_REMAP2_CONFIG1);
+		DDLOG64(MIPS_ADDR_REMAP2_CONFIG2);
+		DDLOG32(MIPS_ADDR_REMAP3_CONFIG1);
+		DDLOG64(MIPS_ADDR_REMAP3_CONFIG2);
+		DDLOG32(MIPS_ADDR_REMAP4_CONFIG1);
+		DDLOG64(MIPS_ADDR_REMAP4_CONFIG2);
+		DDLOG32(MIPS_ADDR_REMAP5_CONFIG1);
+		DDLOG64(MIPS_ADDR_REMAP5_CONFIG2);
+		DDLOG64(MIPS_WRAPPER_CONFIG);
+		DDLOG32(MIPS_EXCEPTION_STATUS);
+
+#if !defined(NO_HARDWARE)
+		{
+			RGX_MIPS_STATE sMIPSState = {0};
+
+			eError = _RGXMipsExtraDebug(psDevInfo, &sMIPSState);
+			PVR_DUMPDEBUG_LOG("---- [ MIPS internal state ] ----");
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DUMPDEBUG_LOG("MIPS extra debug not available");
+			}
+			else
+			{
+				DDLOGVAL32("PC", sMIPSState.ui32ErrorEPC);
+				DDLOGVAL32("STATUS_REGISTER", sMIPSState.ui32StatusRegister);
+				DDLOGVAL32("CAUSE_REGISTER", sMIPSState.ui32CauseRegister);
+				_RGXMipsDumpCauseDecode(pfnDumpDebugPrintf, pvDumpDebugFile,
+				                        sMIPSState.ui32CauseRegister, sMIPSState.ui32ErrorState);
+				DDLOGVAL32("BAD_REGISTER", sMIPSState.ui32BadRegister);
+				DDLOGVAL32("EPC", sMIPSState.ui32EPC);
+				DDLOGVAL32("SP", sMIPSState.ui32SP);
+				DDLOGVAL32("BAD_INSTRUCTION", sMIPSState.ui32BadInstr);
+				_RGXMipsDumpDebugDecode(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile,
+				                        sMIPSState.ui32Debug, sMIPSState.ui32DEPC);
+
+				{
+					IMG_UINT32 ui32Idx;
+
+					IMG_BOOL bCheckBRN63553WA =
+					   RGX_IS_BRN_SUPPORTED(psDevInfo, 63553) &&
+				       (OSReadHWReg32(pvRegsBaseKM, RGX_CR_MIPS_ADDR_REMAP5_CONFIG1) == (0x0 | RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN));
+
+					IMG_BOOL bUseRemapRanges = RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32;
+
+					PVR_DUMPDEBUG_LOG("TLB                           :");
+
+					for (ui32Idx = 0; ui32Idx < ARRAY_SIZE(sMIPSState.asTLB); ui32Idx++)
+					{
+						RGX_MIPS_REMAP_ENTRY *psRemapEntry0 = NULL;
+						RGX_MIPS_REMAP_ENTRY *psRemapEntry1 = NULL;
+
+						if (bUseRemapRanges)
+						{
+							psRemapEntry0 = &sMIPSState.asRemap[ui32Idx];
+							psRemapEntry1 = &sMIPSState.asRemap[ui32Idx+16];
+						}
+
+
+						_RGXMipsDumpTLBEntry(pfnDumpDebugPrintf,
+								     pvDumpDebugFile,
+								     &sMIPSState.asTLB[ui32Idx],
+								     psRemapEntry0,
+								     psRemapEntry1,
+								     ui32Idx);
+
+						if (bCheckBRN63553WA)
+						{
+							const RGX_MIPS_TLB_ENTRY *psTLBEntry = &sMIPSState.asTLB[ui32Idx];
+
+							#define BRN63553_TLB_IS_NUL(X)  (((X) & RGXMIPSFW_TLB_VALID) && (RGXMIPSFW_TLB_GET_PA(X) == 0x0))
+
+							if (BRN63553_TLB_IS_NUL(psTLBEntry->ui32TLBLo0) || BRN63553_TLB_IS_NUL(psTLBEntry->ui32TLBLo1))
+							{
+								PVR_DUMPDEBUG_LOG("BRN63553 WA present with a valid TLB entry mapping address 0x0.");
+							}
+						}
+					}
+
+					/* This implicitly also checks for overlaps between memory and regbank addresses */
+					_CheckMipsTLBDuplicatePAs(pfnDumpDebugPrintf,
+					                          pvDumpDebugFile,
+					                          sMIPSState.asTLB,
+					                          bUseRemapRanges ? sMIPSState.asRemap : NULL);
+
+					if (bUseRemapRanges)
+					{
+						/* Dump unmapped address if it was dumped in FW, otherwise it will be 0 */
+						if (sMIPSState.ui32UnmappedAddress)
+						{
+							PVR_DUMPDEBUG_LOG("Remap unmapped address => 0x%08X",
+									  sMIPSState.ui32UnmappedAddress);
+						}
+					}
+				}
+			}
+			PVR_DUMPDEBUG_LOG("--------------------------------");
+		}
+#endif
+	}
+
+	return PVRSRV_OK;
+
+_METASPError:
+	PVR_DPF((PVR_DBG_ERROR, "Dump Slave Port debug information"));
+	_RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+
+	return eError;
+}
+
+void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile,
+				PVRSRV_RGXDEV_INFO *psDevInfo,
+				IMG_UINT32 ui32VerbLevel)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+	RGXFWIF_INIT *psRGXFWInit = NULL;
+	PVRSRV_DEV_POWER_STATE  ePowerState;
+	IMG_BOOL                bRGXPoweredON;
+	const IMG_CHAR          *Bit32 = "32 Bit", *Bit64 = "64 Bit";
+	IMG_UINT8               ui8FwOsCount;
+	RGXFWIF_TRACEBUF        *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+	eError = PVRSRVPowerLock(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		return;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc, (void**)&psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to acquire kernel FW IF Init struct"));
+		goto ExitUnlock;
+	}
+
+	ui8FwOsCount = psRGXFWInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport;
+
+	eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Error retrieving RGX power state. No debug info dumped.",
+				__func__));
+		goto Exit;
+	}
+
+	bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON);
+	PVR_DUMPDEBUG_LOG("------[ Driver Info ]------");
+	PVR_DUMP_DRIVER_INFO("UM", psPVRSRVData->sDriverInfo.sUMBuildInfo);
+	PVR_DUMP_DRIVER_INFO("KM", psPVRSRVData->sDriverInfo.sKMBuildInfo);
+	if (psRGXFWInit->sRGXCompChecks.bUpdated)
+	{
+		PVR_DUMP_FIRMWARE_INFO(psRGXFWInit->sRGXCompChecks);
+	}
+	else
+	{
+		PVR_DUMPDEBUG_LOG("FW info: UNINITIALIZED");
+	}
+	PVR_DUMPDEBUG_LOG("Comparison of UM/KM components: %s", (psPVRSRVData->sDriverInfo.bIsNoMatch) ? ("MISMATCH") : ("MATCHING"));
+
+	PVR_DUMPDEBUG_LOG("KM Arch: %s", (psPVRSRVData->sDriverInfo.ui8KMBitArch & BUILD_ARCH_64BIT)?
+							Bit64 : Bit32);
+
+	if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE))
+	{
+		PVR_DUMPDEBUG_LOG("Driver Mode: %s", (PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST))?"Host":"Guest");
+	}
+
+	if ((PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE) && (ui8FwOsCount > 1)) ||
+		(PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST) && (ui8FwOsCount != RGXFW_NUM_OS)))
+	{
+		PVR_DUMPDEBUG_LOG("Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)",
+						  (PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE)) ? (1) : (RGXFW_NUM_OS), ui8FwOsCount);
+	}
+
+	if (psPVRSRVData->sDriverInfo.ui8UMSupportedArch)
+	{
+		if ((psPVRSRVData->sDriverInfo.ui8UMSupportedArch & BUILD_ARCH_BOTH) ==
+			BUILD_ARCH_BOTH)
+		{
+			PVR_DUMPDEBUG_LOG("UM Connected Clients Arch: %s and %s", Bit64, Bit32);
+
+		}else
+		{
+			PVR_DUMPDEBUG_LOG("UM Connected Clients: %s",(psPVRSRVData->sDriverInfo.ui8UMSupportedArch &
+					BUILD_ARCH_64BIT)? Bit64 : Bit32);
+		}
+	}
+
+	PVR_DUMPDEBUG_LOG("------[ RGX Summary ]------");
+	PVR_DUMPDEBUG_LOG("RGX BVNC: %d.%d.%d.%d", psDevInfo->sDevFeatureCfg.ui32B, \
+											   psDevInfo->sDevFeatureCfg.ui32V,	\
+											   psDevInfo->sDevFeatureCfg.ui32N, \
+											   psDevInfo->sDevFeatureCfg.ui32C);
+	PVR_DUMPDEBUG_LOG("RGX Device State: %s", _RGXGetDebugDevStateString(psDeviceNode->eDevState));
+	PVR_DUMPDEBUG_LOG("RGX Power State: %s", _RGXGetDebugDevPowerStateString(ePowerState));
+
+	RGXDumpRGXDebugSummary(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, bRGXPoweredON);
+
+	if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+
+		eError = RGXDumpRGXRegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: RGXDumpRGXRegisters failed (%s)",
+					__func__,
+					PVRSRVGetErrorString(eError)));
+		}
+	}
+	else
+	{
+			PVR_DUMPDEBUG_LOG(" (!) %s. No registers dumped", PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) ? "Guest Mode of operation" : "RGX power is down");
+	}
+
+	/* Dump out the kernel CCB. */
+	{
+		RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+
+		if (psKCCBCtl != NULL)
+		{
+			PVR_DUMPDEBUG_LOG("RGX Kernel CCB WO:0x%X RO:0x%X",
+							  psKCCBCtl->ui32WriteOffset,
+							  psKCCBCtl->ui32ReadOffset);
+		}
+	}
+
+	/* Dump out the firmware CCB. */
+	{
+		RGXFWIF_CCB_CTL *psFCCBCtl = psDevInfo->psFirmwareCCBCtl;
+
+		if (psFCCBCtl != NULL)
+		{
+			PVR_DUMPDEBUG_LOG("RGX Firmware CCB WO:0x%X RO:0x%X",
+							   psFCCBCtl->ui32WriteOffset,
+							   psFCCBCtl->ui32ReadOffset);
+		}
+	}
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	/* Dump out the Workload estimation CCB. */
+	{
+	    RGXFWIF_CCB_CTL *psWorkEstCCBCtl = psDevInfo->psWorkEstFirmwareCCBCtl;
+
+	    if (psWorkEstCCBCtl != NULL)
+	    {
+	        PVR_DUMPDEBUG_LOG("RGX WorkEst CCB WO:0x%X RO:0x%X",
+	                           psWorkEstCCBCtl->ui32WriteOffset,
+	                           psWorkEstCCBCtl->ui32ReadOffset);
+	    }
+	}
+#endif
+
+
+	if (psRGXFWIfTraceBufCtl != NULL)
+	{
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+		/* Dump out the checkpoint CCB offsets. */
+		{
+			RGXFWIF_CCB_CTL *psCheckpointCCBCtl = psDevInfo->psCheckpointCCBCtl;
+
+			if (psCheckpointCCBCtl != NULL)
+			{
+				PVR_DUMPDEBUG_LOG("RGX Checkpoint CCB WO:0x%X RO:0x%X (Check State: FW=%#X, HOST=%#X)",
+								  psCheckpointCCBCtl->ui32WriteOffset,
+								  psCheckpointCCBCtl->ui32ReadOffset,
+								  psRGXFWIfTraceBufCtl->ui32FWSyncCheckMark,
+								  psRGXFWIfTraceBufCtl->ui32HostSyncCheckMark);
+			}
+		}
+#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */
+
+		/* Dump the KCCB commands executed */
+		PVR_DUMPDEBUG_LOG("RGX Kernel CCB commands executed = %d",
+						  psRGXFWIfTraceBufCtl->ui32KCCBCmdsExecuted);
+
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+		/* Check flag indicates support for SLR Log (for compatibility) */
+		if (psRGXFWIfTraceBufCtl->ui32TracebufFlags & RGXFWIF_TRACEBUFCFG_SLR_LOG)
+		{
+			/* Dump the number of times we have performed a forced UFO update,
+			 * and (if non-zero) the timestamp of the most recent occurrence/
+			 */
+			PVR_DUMPDEBUG_LOG("RGX SLR: Forced UFO updates requested = %d",
+							  psRGXFWIfTraceBufCtl->ui32ForcedUpdatesRequested);
+			if (psRGXFWIfTraceBufCtl->ui32ForcedUpdatesRequested > 0)
+			{
+				IMG_UINT8 ui8Idx;
+				IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+				if (psRGXFWIfTraceBufCtl->ui64LastForcedUpdateTime > 0ULL)
+				{
+					ConvertOSTimestampToSAndNS(psRGXFWIfTraceBufCtl->ui64LastForcedUpdateTime, &ui64Seconds, &ui64Nanoseconds);
+					PVR_DUMPDEBUG_LOG("RGX SLR: (most recent forced update was around %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ")",
+									  ui64Seconds, ui64Nanoseconds);
+				}
+				else
+				{
+					PVR_DUMPDEBUG_LOG("RGX SLR: (unable to force update as fence contained no sync checkpoints)");
+				}
+				/* Dump SLR log */
+				if (psRGXFWIfTraceBufCtl->sSLRLogFirst.aszCCBName[0])
+				{
+					ConvertOSTimestampToSAndNS(psRGXFWIfTraceBufCtl->sSLRLogFirst.ui64Timestamp, &ui64Seconds, &ui64Nanoseconds);
+					PVR_DUMPDEBUG_LOG("RGX SLR:{%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC
+									  "} Fence found on context 0x%x '%s' has %d UFOs",
+									  ui64Seconds, ui64Nanoseconds,
+									  psRGXFWIfTraceBufCtl->sSLRLogFirst.ui32FWCtxAddr,
+									  psRGXFWIfTraceBufCtl->sSLRLogFirst.aszCCBName,
+									  psRGXFWIfTraceBufCtl->sSLRLogFirst.ui32NumUFOs);
+				}
+				for (ui8Idx=0; ui8Idx<PVR_SLR_LOG_ENTRIES;ui8Idx++)
+				{
+					if (psRGXFWIfTraceBufCtl->sSLRLog[ui8Idx].aszCCBName[0])
+					{
+						ConvertOSTimestampToSAndNS(psRGXFWIfTraceBufCtl->sSLRLog[ui8Idx].ui64Timestamp, &ui64Seconds, &ui64Nanoseconds);
+						PVR_DUMPDEBUG_LOG("RGX SLR:[%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC
+										  "] Fence found on context 0x%x '%s' has %d UFOs",
+										  ui64Seconds, ui64Nanoseconds,
+										  psRGXFWIfTraceBufCtl->sSLRLog[ui8Idx].ui32FWCtxAddr,
+										  psRGXFWIfTraceBufCtl->sSLRLog[ui8Idx].aszCCBName,
+										  psRGXFWIfTraceBufCtl->sSLRLog[ui8Idx].ui32NumUFOs);
+					}
+				}
+			}
+		}
+		else
+		{
+			PVR_DUMPDEBUG_LOG("RGX SLR: Unsupported");
+		}
+#else
+		PVR_DUMPDEBUG_LOG("RGX SLR: Disabled");
+#endif
+
+		/* Dump the IRQ info for threads or OS IDs */
+		if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+		{
+			IMG_UINT32 ui32idx;
+
+			for_each_irq_cnt(ui32idx)
+			{
+				IMG_UINT32 ui32IrqCnt;
+
+				get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo);
+				if (ui32IrqCnt)
+					if (ui32IrqCnt)
+					{
+						PVR_DUMPDEBUG_LOG(MSG_IRQ_CNT_TYPE "%u: FW IRQ count = %u", ui32idx, ui32IrqCnt);
+#if defined(RGX_FW_IRQ_OS_COUNTERS)
+						if (ui32idx == RGXFW_HYPERVISOR_OS)
+#endif
+						{
+							PVR_DUMPDEBUG_LOG("Last sampled IRQ count in LISR = %u", psDevInfo->aui32SampleIRQCount[ui32idx]);
+						}
+					}
+			}
+		}
+	}
+
+	/* Dump the FW config flags */
+	{
+		RGXFWIF_OS_CONFIG   *psOSConfig = psDevInfo->psFWIfOSConfig;
+		IMG_CHAR sFwFlagsDescription[MAX_FW_DESCRIPTION_LENGTH];
+
+		if (!psOSConfig)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: OS Config is not mapped into CPU space",
+					__func__));
+			goto Exit;
+		}
+
+		_GetFwFlagsDescription(sFwFlagsDescription, psOSConfig->ui32ConfigFlags);
+		PVR_DUMPDEBUG_LOG("FW OS config flags = 0x%X (%s)", psOSConfig->ui32ConfigFlags, sFwFlagsDescription);
+	}
+
+	if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM))
+	{
+		IMG_INT tid;
+		/* Dump FW trace information */
+		if (psRGXFWIfTraceBufCtl != NULL)
+		{
+			for (tid = 0 ; tid < RGXFW_THREAD_NUM ; tid++)
+			{
+				IMG_UINT32	i;
+				IMG_BOOL	bPrevLineWasZero = IMG_FALSE;
+				IMG_BOOL	bLineIsAllZeros = IMG_FALSE;
+				IMG_UINT32	ui32CountLines = 0;
+				IMG_UINT32	*pui32TraceBuffer;
+				IMG_CHAR	*pszLine;
+
+				if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)
+				{
+					PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")",
+									  ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")),
+									  RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType)
+									  );
+				}
+				else
+				{
+					PVR_DUMPDEBUG_LOG("Debug log type: none");
+				}
+
+				pui32TraceBuffer = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer;
+
+				/* Skip if trace buffer is not allocated */
+				if (pui32TraceBuffer == NULL)
+				{
+					PVR_DUMPDEBUG_LOG("RGX FW thread %d: Trace buffer not yet allocated",tid);
+					continue;
+				}
+
+/* Max number of DWords to be printed per line, in debug dump output */
+#define PVR_DD_FW_TRACEBUF_LINESIZE 30U
+				/* each element in the line is 8 characters plus a space.  The '+ 1' is because of the final trailing '\0'. */
+				pszLine = OSAllocMem(9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1);
+				if (pszLine == NULL)
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							"%s: Out of mem allocating line string (size: %d)",
+							__func__,
+							9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1));
+					goto Exit;
+				}
+
+				PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace START ]------", tid);
+				PVR_DUMPDEBUG_LOG("FWT[traceptr]: %X", psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer);
+				PVR_DUMPDEBUG_LOG("FWT[tracebufsize]: %X", psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords);
+
+				for (i = 0; i < psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords; i += PVR_DD_FW_TRACEBUF_LINESIZE)
+				{
+					IMG_UINT32 k = 0;
+					IMG_UINT32 ui32Line = 0x0;
+					IMG_UINT32 ui32LineOffset = i*sizeof(IMG_UINT32);
+					IMG_CHAR   *pszBuf = pszLine;
+
+					for (k = 0; k < PVR_DD_FW_TRACEBUF_LINESIZE; k++)
+					{
+						if ((i + k) >= psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords)
+						{
+							/* Stop reading when the index goes beyond trace buffer size. This condition is
+							 * hit during printing the last line in DD when ui32TraceBufSizeInDWords is not
+							 * a multiple of PVR_DD_FW_TRACEBUF_LINESIZE */
+							break;
+						}
+
+						ui32Line |= pui32TraceBuffer[i + k];
+
+						/* prepare the line to print it. The '+1' is because of the trailing '\0' added */
+						OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32TraceBuffer[i + k]);
+						pszBuf += 9; /* write over the '\0' */
+					}
+
+					bLineIsAllZeros = (ui32Line == 0x0);
+
+					if (bLineIsAllZeros)
+					{
+						if (bPrevLineWasZero)
+						{
+							ui32CountLines++;
+						}
+						else
+						{
+							bPrevLineWasZero = IMG_TRUE;
+							ui32CountLines = 1;
+							PVR_DUMPDEBUG_LOG("FWT[%08x]: 00000000 ... 00000000", ui32LineOffset);
+						}
+					}
+					else
+					{
+						if (bPrevLineWasZero  &&  ui32CountLines > 1)
+						{
+							PVR_DUMPDEBUG_LOG("FWT[...]: %d lines were all zero", ui32CountLines);
+						}
+						bPrevLineWasZero = IMG_FALSE;
+
+						PVR_DUMPDEBUG_LOG("FWT[%08x]:%s", ui32LineOffset, pszLine);
+					}
+
+				}
+				if (bPrevLineWasZero)
+				{
+					PVR_DUMPDEBUG_LOG("FWT[END]: %d lines were all zero", ui32CountLines);
+				}
+
+				PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace END ]------", tid);
+
+				OSFreeMem(pszLine);
+			}
+
+			if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+			{
+				RGXFWIF_OS_CONFIG *psOSConfig = psDevInfo->psFWIfOSConfig;
+
+				if (!psOSConfig)
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							"%s: OS Config is not mapped into CPU space",
+							__func__));
+					goto Exit;
+				}
+
+				if ((psOSConfig->ui32ConfigFlags & RGXFWIF_INICFG_METAT1_DUMMY) != 0)
+				{
+					IMG_UINT32 *pui32T1PCX = &psRGXFWIfTraceBufCtl->ui32T1PCX[0];
+					IMG_UINT32 ui32T1PCXWOff = psRGXFWIfTraceBufCtl->ui32T1PCXWOff;
+					IMG_UINT32 i = ui32T1PCXWOff;
+
+					PVR_DUMPDEBUG_LOG("------[ FW Thread 1 PCX list (most recent first) ]------");
+					do
+					{
+						PVR_DUMPDEBUG_LOG("  0x%08x", pui32T1PCX[i]);
+						i = (i == 0) ? (RGXFWIF_MAX_PCX - 1) : (i - 1);
+
+					} while (i != ui32T1PCXWOff);
+
+					PVR_DUMPDEBUG_LOG("------[ FW Thread 1 PCX list [END] ]------");
+				}
+
+			}
+		}
+
+		{
+			if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH))
+			{
+				PVR_DUMPDEBUG_LOG("------[ Full CCB Status ]------");
+			}
+			else
+			{
+				PVR_DUMPDEBUG_LOG("------[ Stalled FWCtxs ]------");
+			}
+
+			DumpTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+
+			DumpRenderCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+
+			DumpKickSyncCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+
+			if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE))
+			{
+				DumpComputeCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+			}
+			if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))
+			{
+				DumpTDMTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+			}
+		}
+	}
+
+Exit:
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+ExitUnlock:
+	PVRSRVPowerUnlock(psDeviceNode);
+}
+
+/******************************************************************************
+ End of file (rgxdebug.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxdebug.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxdebug.h
new file mode 100644
index 0000000..fe09a19
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxdebug.h
@@ -0,0 +1,270 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX debug header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX debugging functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXDEBUG_H__)
+#define __RGXDEBUG_H__
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "device.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "rgxdevice.h"
+
+
+/**
+ * Debug utility macro for printing FW IRQ count and Last sampled IRQ count in
+ * LISR for each RGX FW thread.
+ * Macro takes pointer to PVRSRV_RGXDEV_INFO as input.
+ */
+
+#if defined(RGX_FW_IRQ_OS_COUNTERS)
+#define for_each_irq_cnt(ui32idx) \
+	for (ui32idx = 0; ui32idx < RGXFW_NUM_OS; ui32idx++)
+
+#define get_irq_cnt_val(ui32Dest, ui32idx, psRgxDevInfo) \
+	extern const IMG_UINT32 gaui32FwOsIrqCntRegAddr[RGXFW_MAX_NUM_OS]; \
+	ui32Dest = OSReadHWReg32((psRgxDevInfo)->pvRegsBaseKM, gaui32FwOsIrqCntRegAddr[ui32idx]);
+
+#define MSG_IRQ_CNT_TYPE "OS"
+
+#else
+
+#define for_each_irq_cnt(ui32idx) \
+	for (ui32idx = 0; ui32idx < RGXFW_THREAD_NUM; ui32idx++)
+
+#define get_irq_cnt_val(ui32Dest, ui32idx, psRgxDevInfo) \
+	ui32Dest = (psRgxDevInfo)->psRGXFWIfTraceBuf->aui32InterruptCount[ui32idx]
+
+#define MSG_IRQ_CNT_TYPE "Thread"
+#endif /* RGX_FW_IRQ_OS_COUNTERS */
+
+static inline void RGXDEBUG_PRINT_IRQ_COUNT(PVRSRV_RGXDEV_INFO* psRgxDevInfo)
+{
+#if defined(PVRSRV_NEED_PVR_DPF) && defined(DEBUG)
+	IMG_UINT32 ui32idx;
+
+	for_each_irq_cnt(ui32idx)
+	{
+		IMG_UINT32 ui32IrqCnt;
+
+		get_irq_cnt_val(ui32IrqCnt, ui32idx, psRgxDevInfo);
+
+		PVR_DPF((DBGPRIV_VERBOSE, MSG_IRQ_CNT_TYPE
+		         " %u FW IRQ count = %u", ui32idx, ui32IrqCnt));
+
+#if defined(RGX_FW_IRQ_OS_COUNTERS)
+		if (ui32idx == RGXFW_HYPERVISOR_OS)
+#endif
+		{
+			PVR_DPF((DBGPRIV_VERBOSE, "Last sampled IRQ count in LISR = %u",
+			        (psRgxDevInfo)->aui32SampleIRQCount[ui32idx]));
+		}
+	}
+#endif /* PVRSRV_NEED_PVR_DPF */
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDumpDebugInfo
+
+ @Description
+
+ Dump useful debugging info. Dumps lesser information than PVRSRVDebugRequest.
+ Does not dump debugging information for all requester types.(SysDebug, ServerSync info)
+
+ @Input pfnDumpDebugPrintf  - Optional replacement print function
+ @Input pvDumpDebugFile     - Optional file identifier to be passed to the
+                              'printf' function if required
+ @Input psDevInfo           - RGX device info
+
+ @Return   void
+
+******************************************************************************/
+void RGXDumpDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+			void *pvDumpDebugFile,
+			PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDebugRequestProcess
+
+ @Description
+
+ This function will print out the debug for the specificed level of
+ verbosity
+
+ @Input pfnDumpDebugPrintf  - Optional replacement print function
+ @Input pvDumpDebugFile     - Optional file identifier to be passed to the
+                              'printf' function if required
+ @Input psDevInfo           - RGX device info
+ @Input ui32VerbLevel       - Verbosity level
+
+ @Return   void
+
+******************************************************************************/
+void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile,
+				PVRSRV_RGXDEV_INFO *psDevInfo,
+				IMG_UINT32 ui32VerbLevel);
+/*!
+*******************************************************************************
+
+ @Function	RGXDumpRGXRegisters
+
+ @Description
+
+ Dumps an extensive list of RGX registers required for debugging
+
+ @Input pfnDumpDebugPrintf  - Optional replacement print function
+ @Input pvDumpDebugFile     - Optional file identifier to be passed to the
+                              'printf' function if required
+ @Input psDevInfo           - RGX device info
+
+ @Return PVRSRV_ERROR         PVRSRV_OK on success, error code otherwise
+
+******************************************************************************/
+PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+								 void *pvDumpDebugFile,
+								 PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDumpFirmwareTrace
+
+ @Description Dumps the decoded version of the firmware trace buffer.
+
+ Dump useful debugging info
+
+ @Input pfnDumpDebugPrintf  - Optional replacement print function
+ @Input pvDumpDebugFile     - Optional file identifier to be passed to the
+                              'printf' function if required
+ @Input psDevInfo           - RGX device info
+
+ @Return   void
+
+******************************************************************************/
+void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile,
+				PVRSRV_RGXDEV_INFO  *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function	RGXReadWithSP
+
+ @Description
+
+ Reads data from a memory location (FW memory map) using the META Slave Port
+
+ @Input  psDevInfo  - Pointer to RGX DevInfo to be used while reading
+ @Input  ui32FWAddr - 32 bit FW address
+ @Input  pui32Value - When the read is successful, value at above FW address
+                      is returned at this location
+
+ @Return PVRSRV_ERROR PVRSRV_OK if read success, error code otherwise.
+******************************************************************************/
+PVRSRV_ERROR RGXReadWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 *pui32Value);
+
+/*!
+*******************************************************************************
+
+ @Function	RGXWriteWithSP
+
+ @Description
+
+ Writes data to a memory location (FW memory map) using the META Slave Port
+
+ @Input  psDevInfo  - Pointer to RGX DevInfo to be used while writing
+ @Input  ui32FWAddr - 32 bit FW address
+
+ @Input  ui32Value  - 32 bit Value to write
+
+ @Return PVRSRV_ERROR PVRSRV_OK if write success, error code otherwise.
+******************************************************************************/
+PVRSRV_ERROR RGXWriteWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 ui32Value);
+
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+/*!
+*******************************************************************************
+
+ @Function     ValidateFWOnLoad
+
+ @Description  Compare the Firmware image as seen from the CPU point of view
+               against the same memory area as seen from the META point of view
+               after first power up.
+
+ @Input        psDevInfo - Device Info
+
+ @Return       PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo);
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDumpRGXDebugSummary
+
+ @Description
+
+ Dump a summary in human readable form with the RGX state
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psDevInfo	     - RGX device info
+ @Input bRGXPoweredON        - IMG_TRUE if RGX device is on
+
+ @Return   void
+
+******************************************************************************/
+void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile,
+					PVRSRV_RGXDEV_INFO *psDevInfo,
+					IMG_BOOL bRGXPoweredON);
+
+#endif /* __RGXDEBUG_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxdevice.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxdevice.h
new file mode 100644
index 0000000..df9e3bb
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxdevice.h
@@ -0,0 +1,652 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX device node header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX device node
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXDEVICE_H__)
+#define __RGXDEVICE_H__
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_device_types.h"
+#include "mmu_common.h"
+#include "rgx_fwif_km.h"
+#include "cache_ops.h"
+#include "device.h"
+#include "osfunc.h"
+#include "rgxlayer_impl.h"
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "hash.h"
+#endif
+typedef struct _RGX_SERVER_COMMON_CONTEXT_ RGX_SERVER_COMMON_CONTEXT;
+
+typedef struct {
+	DEVMEM_MEMDESC		*psFWFrameworkMemDesc;
+	IMG_DEV_VIRTADDR	*psResumeSignalAddr;
+} RGX_COMMON_CONTEXT_INFO;
+
+
+/*!
+ ******************************************************************************
+ * Device state flags
+ *****************************************************************************/
+#define RGXKM_DEVICE_STATE_ZERO_FREELIST            (0x1) /*!< Zeroing the physical pages of reconstructed free lists */
+#define RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN    (0x2) /*!< Used to disable the Devices Watchdog logging */
+#define RGXKM_DEVICE_STATE_DUST_REQUEST_INJECT_EN   (0x4) /*!< Used for validation to inject dust requests every TA/3D kick */
+#define RGXKM_DEVICE_STATE_MASK                     (0x7)
+
+/*!
+ ******************************************************************************
+ * GPU DVFS Table
+ *****************************************************************************/
+
+#define RGX_GPU_DVFS_TABLE_SIZE                      32
+#define RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US       25000     /* Time required to calibrate a clock frequency the first time */
+#define RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US  150000    /* Time required for a recalibration after a DVFS transition */
+#define RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US    10000000  /* Time before the next periodic calibration and correlation */
+
+typedef struct _GPU_FREQ_TRACKING_DATA_
+{
+	/* Core clock speed estimated by the driver */
+	IMG_UINT32 ui32EstCoreClockSpeed;
+
+	/* Amount of successful calculations of the estimated core clock speed */
+	IMG_UINT32 ui32CalibrationCount;
+} GPU_FREQ_TRACKING_DATA;
+
+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY)
+#define RGX_GPU_FREQ_TRACKING_SIZE 16
+
+typedef struct
+{
+	IMG_UINT64 ui64BeginCRTimestamp;
+	IMG_UINT64 ui64BeginOSTimestamp;
+
+	IMG_UINT64 ui64EndCRTimestamp;
+	IMG_UINT64 ui64EndOSTimestamp;
+
+	IMG_UINT32 ui32EstCoreClockSpeed;
+	IMG_UINT32 ui32CoreClockSpeed;
+} GPU_FREQ_TRACKING_HISTORY;
+#endif
+
+typedef struct _RGX_GPU_DVFS_TABLE_
+{
+	/* Beginning of current calibration period (in us) */
+	IMG_UINT64 ui64CalibrationCRTimestamp;
+	IMG_UINT64 ui64CalibrationOSTimestamp;
+
+	/* Calculated calibration period (in us) */
+	IMG_UINT64 ui64CalibrationCRTimediff;
+	IMG_UINT64 ui64CalibrationOSTimediff;
+
+	/* Current calibration period (in us) */
+	IMG_UINT32 ui32CalibrationPeriod;
+
+	/* System layer frequency table and frequency tracking data */
+	IMG_UINT32 ui32FreqIndex;
+	IMG_UINT32 aui32GPUFrequency[RGX_GPU_DVFS_TABLE_SIZE];
+	GPU_FREQ_TRACKING_DATA asTrackingData[RGX_GPU_DVFS_TABLE_SIZE];
+
+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY)
+	IMG_UINT32 ui32HistoryIndex;
+	GPU_FREQ_TRACKING_HISTORY asTrackingHistory[RGX_GPU_FREQ_TRACKING_SIZE];
+#endif
+} RGX_GPU_DVFS_TABLE;
+
+
+/*!
+ ******************************************************************************
+ * GPU utilisation statistics
+ *****************************************************************************/
+
+typedef struct _RGXFWIF_GPU_UTIL_STATS_
+{
+	IMG_BOOL   bValid;                /* If TRUE, statistics are valid.
+	                                     FALSE if the driver couldn't get reliable stats. */
+	IMG_UINT64 ui64GpuStatActiveHigh; /* GPU active high statistic */
+	IMG_UINT64 ui64GpuStatActiveLow;  /* GPU active low (i.e. TLA active only) statistic */
+	IMG_UINT64 ui64GpuStatBlocked;    /* GPU blocked statistic */
+	IMG_UINT64 ui64GpuStatIdle;       /* GPU idle statistic */
+	IMG_UINT64 ui64GpuStatCumulative; /* Sum of active/blocked/idle stats */
+	IMG_UINT64 ui64TimeStamp;         /* Timestamp of the most recent sample of the GPU stats */
+} RGXFWIF_GPU_UTIL_STATS;
+
+
+typedef struct _RGX_REG_CONFIG_
+{
+	IMG_BOOL               bEnabled;
+	RGXFWIF_REG_CFG_TYPE   eRegCfgTypeToPush;
+	IMG_UINT32             ui32NumRegRecords;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	POS_LOCK               hLock;
+#endif
+} RGX_REG_CONFIG;
+
+typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC;
+
+typedef struct
+{
+	IMG_UINT32			ui32DustCount1;
+	IMG_UINT32			ui32DustCount2;
+	IMG_BOOL			bToggle;
+} RGX_DUST_STATE;
+
+typedef struct _PVRSRV_DEVICE_FEATURE_CONFIG_
+{
+	IMG_UINT64 ui64ErnsBrns;
+	IMG_UINT64 ui64Features;
+	IMG_UINT32 ui32B;
+	IMG_UINT32 ui32V;
+	IMG_UINT32 ui32N;
+	IMG_UINT32 ui32C;
+	IMG_UINT32 ui32FeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX];
+	IMG_UINT32 ui32MAXDustCount;
+	IMG_PCHAR  pszBVNCString;
+}PVRSRV_DEVICE_FEATURE_CONFIG;
+
+/* This is used to get the value of a specific feature.
+ * Note that it will assert if the feature is disabled or value is invalid. */
+#define RGX_GET_FEATURE_VALUE(psDevInfo, Feature) \
+			( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] )
+
+/* This is used to check if the feature value (e.g. with an integer value) is available for the currently running BVNC or not */
+#define RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, Feature) \
+			( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] < RGX_FEATURE_VALUE_DISABLED )
+
+/* This is used to check if the Boolean feature (e.g. WITHOUT an integer value) is available for the currently running BVNC or not */
+#define RGX_IS_FEATURE_SUPPORTED(psDevInfo, Feature) \
+			BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64Features, RGX_FEATURE_##Feature##_BIT_MASK)
+
+/* This is used to check if the ERN is available for the currently running BVNC or not */
+#define RGX_IS_ERN_SUPPORTED(psDevInfo, ERN) \
+			BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64ErnsBrns, HW_ERN_##ERN##_BIT_MASK)
+
+/* This is used to check if the BRN is available for the currently running BVNC or not */
+#define RGX_IS_BRN_SUPPORTED(psDevInfo, BRN) \
+			BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64ErnsBrns, FIX_HW_BRN_##BRN##_BIT_MASK)
+
+/* there is a corresponding define in rgxapi.h */
+#define RGX_MAX_TIMER_QUERIES 16
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+/*
+   For the workload estimation return data array, the max amount of commands the
+   MTS can have is 255, therefore 512 (LOG2 = 9) is large enough to account for
+   all corner cases
+*/
+#define RETURN_DATA_ARRAY_SIZE_LOG2 (9)
+#define RETURN_DATA_ARRAY_SIZE      ((1UL) << RETURN_DATA_ARRAY_SIZE_LOG2)
+#define RETURN_DATA_ARRAY_WRAP_MASK (RETURN_DATA_ARRAY_SIZE - 1)
+
+#define WORKLOAD_HASH_SIZE_LOG2		6
+#define WORKLOAD_HASH_SIZE 			((1UL) << WORKLOAD_HASH_SIZE_LOG2)
+#define WORKLOAD_HASH_WRAP_MASK		(WORKLOAD_HASH_SIZE - 1)
+
+typedef struct _RGX_WORKLOAD_TA3D_
+{
+	IMG_UINT32				ui32RenderTargetSize;
+	IMG_UINT32				ui32NumberOfDrawCalls;
+	IMG_UINT32				ui32NumberOfIndices;
+	IMG_UINT32				ui32NumberOfMRTs;
+} RGX_WORKLOAD_TA3D;
+
+typedef struct _WORKLOAD_MATCHING_DATA_
+{
+	POS_LOCK				psHashLock;
+	HASH_TABLE				*psHashTable;
+	RGX_WORKLOAD_TA3D		asHashKeys[WORKLOAD_HASH_SIZE];
+	IMG_UINT64				aui64HashData[WORKLOAD_HASH_SIZE];
+	IMG_UINT32				ui32HashArrayWO;
+
+} WORKLOAD_MATCHING_DATA;
+
+typedef struct _WORKEST_HOST_DATA_
+{
+	WORKLOAD_MATCHING_DATA	sWorkloadMatchingDataTA;
+	WORKLOAD_MATCHING_DATA	sWorkloadMatchingData3D;
+	IMG_UINT32				ui32WorkEstCCBReceived;
+} WORKEST_HOST_DATA;
+
+typedef struct _WORKEST_RETURN_DATA_
+{
+	WORKEST_HOST_DATA		*psWorkEstHostData;
+	WORKLOAD_MATCHING_DATA	*psWorkloadMatchingData;
+	RGX_WORKLOAD_TA3D		sWorkloadCharacteristics;
+} WORKEST_RETURN_DATA;
+#endif
+
+
+typedef struct
+{
+#if defined(PDUMP)
+	IMG_HANDLE      hPdumpPages;
+#endif
+	PG_HANDLE       sPages;
+	IMG_DEV_PHYADDR sPhysAddr;
+} RGX_MIPS_ADDRESS_TRAMPOLINE;
+
+
+/*!
+ ******************************************************************************
+ * RGX Device info
+ *****************************************************************************/
+
+typedef struct _PVRSRV_RGXDEV_INFO_
+{
+	PVRSRV_DEVICE_NODE		*psDeviceNode;
+
+	PVRSRV_DEVICE_FEATURE_CONFIG	sDevFeatureCfg;
+
+	/* FIXME: This is a workaround due to having 2 inits but only 1 deinit */
+	IMG_BOOL				bDevInit2Done;
+
+	IMG_BOOL                bFirmwareInitialised;
+	IMG_BOOL				bPDPEnabled;
+
+	IMG_HANDLE				hDbgReqNotify;
+
+	/* Kernel mode linear address of device registers */
+	void __iomem			*pvRegsBaseKM;
+
+	/* FIXME: The alloc for this should go through OSAllocMem in future */
+	IMG_HANDLE				hRegMapping;
+
+	/* System physical address of device registers*/
+	IMG_CPU_PHYADDR			sRegsPhysBase;
+	/*  Register region size in bytes */
+	IMG_UINT32				ui32RegSize;
+
+	PVRSRV_STUB_PBDESC		*psStubPBDescListKM;
+
+	/* Firmware memory context info */
+	DEVMEM_CONTEXT			*psKernelDevmemCtx;
+	DEVMEM_HEAP				*psFirmwareMainHeap;
+	DEVMEM_HEAP				*psFirmwareConfigHeap;
+	MMU_CONTEXT				*psKernelMMUCtx;
+
+	void					*pvDeviceMemoryHeap;
+
+	/* Kernel CCB */
+	DEVMEM_MEMDESC			*psKernelCCBCtlMemDesc;    /*!< memdesc for Kernel CCB control */
+	RGXFWIF_CCB_CTL			*psKernelCCBCtl;           /*!< kernel mapping for Kernel CCB control */
+	DEVMEM_MEMDESC			*psKernelCCBMemDesc;       /*!< memdesc for Kernel CCB */
+	IMG_UINT8				*psKernelCCB;              /*!< kernel mapping for Kernel CCB */
+
+	/* Firmware CCB */
+	DEVMEM_MEMDESC			*psFirmwareCCBCtlMemDesc;   /*!< memdesc for Firmware CCB control */
+	RGXFWIF_CCB_CTL			*psFirmwareCCBCtl;          /*!< kernel mapping for Firmware CCB control */
+	DEVMEM_MEMDESC			*psFirmwareCCBMemDesc;      /*!< memdesc for Firmware CCB */
+	IMG_UINT8				*psFirmwareCCB;             /*!< kernel mapping for Firmware CCB */
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+	/* Checkpoint CCB */
+	DEVMEM_MEMDESC          *psCheckpointCCBCtlMemDesc;  /*!< memdesc for Checkpoint CCB control */
+	RGXFWIF_CCB_CTL         *psCheckpointCCBCtl;         /*!< kernel mapping for Checkpoint CCB control */
+	DEVMEM_MEMDESC          *psCheckpointCCBMemDesc;     /*!< memdesc for Checkpoint CCB */
+	IMG_UINT8               *psCheckpointCCB;            /*!< kernel mapping for Checkpoint CCB */
+#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */
+
+	/* Workload Estimation Firmware CCB */
+	DEVMEM_MEMDESC			*psWorkEstFirmwareCCBCtlMemDesc;   /*!< memdesc for Workload Estimation Firmware CCB control */
+	RGXFWIF_CCB_CTL			*psWorkEstFirmwareCCBCtl;          /*!< kernel mapping for Workload Estimation Firmware CCB control */
+	DEVMEM_MEMDESC			*psWorkEstFirmwareCCBMemDesc;      /*!< memdesc for Workload Estimation Firmware CCB */
+	IMG_UINT8				*psWorkEstFirmwareCCB;             /*!< kernel mapping for Workload Estimation Firmware CCB */
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+	/* Counter dumping */
+	DEVMEM_MEMDESC 			*psCounterBufferMemDesc;      /*!< mem desc for counter dumping buffer */
+	POS_LOCK				hCounterDumpingLock;          /*!< Lock for guarding access to counter dumping buffer */
+#endif
+
+	IMG_BOOL				bEnableFWPoisonOnFree;             /*!< Enable poisoning of FW allocations when freed */
+	IMG_BYTE				ubFWPoisonOnFreeValue;             /*!< Byte value used when poisoning FW allocations */
+
+	IMG_BOOL				bIgnoreHWReportedBVNC;			/*!< Ignore BVNC reported by HW */
+
+	/*
+		if we don't preallocate the pagetables we must
+		insert newly allocated page tables dynamically
+	*/
+	void					*pvMMUContextList;
+
+	IMG_UINT32				ui32ClkGateStatusReg;
+	IMG_UINT32				ui32ClkGateStatusMask;
+
+	DEVMEM_MEMDESC			*psRGXFWCodeMemDesc;
+	IMG_DEV_VIRTADDR		sFWCodeDevVAddrBase;
+	IMG_UINT32			ui32FWCodeSizeInBytes;
+	DEVMEM_MEMDESC			*psRGXFWDataMemDesc;
+	IMG_DEV_VIRTADDR		sFWDataDevVAddrBase;
+	RGX_MIPS_ADDRESS_TRAMPOLINE	*psTrampoline;
+
+	DEVMEM_MEMDESC			*psRGXFWCorememCodeMemDesc;
+	IMG_DEV_VIRTADDR		sFWCorememCodeDevVAddrBase;
+	RGXFWIF_DEV_VIRTADDR		sFWCorememCodeFWAddr;
+	IMG_UINT32			ui32FWCorememCodeSizeInBytes;
+
+	DEVMEM_MEMDESC			*psRGXFWIfCorememDataStoreMemDesc;
+	IMG_DEV_VIRTADDR		sFWCorememDataStoreDevVAddrBase;
+	RGXFWIF_DEV_VIRTADDR		sFWCorememDataStoreFWAddr;
+
+#if defined(RGXFW_ALIGNCHECKS)
+	DEVMEM_MEMDESC			*psRGXFWAlignChecksMemDesc;
+#endif
+
+	DEVMEM_MEMDESC			*psRGXFWSigTAChecksMemDesc;
+	IMG_UINT32				ui32SigTAChecksSize;
+
+	DEVMEM_MEMDESC			*psRGXFWSig3DChecksMemDesc;
+	IMG_UINT32				ui32Sig3DChecksSize;
+
+	DEVMEM_MEMDESC			*psRGXFWSigTDM2DChecksMemDesc;
+	IMG_UINT32				ui32SigTDM2DChecksSize;
+
+#if defined (PDUMP)
+	IMG_BOOL				bDumpedKCCBCtlAlready;
+#endif
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	POS_LOCK				hRGXFWIfBufInitLock;			/*!< trace buffer lock for initialisation phase */
+#endif
+
+	DEVMEM_MEMDESC			*psRGXFWIfTraceBufCtlMemDesc;	/*!< memdesc of trace buffer control structure */
+	DEVMEM_MEMDESC			*psRGXFWIfTraceBufferMemDesc[RGXFW_THREAD_NUM];	/*!< memdesc of actual FW trace (log) buffer(s) */
+	RGXFWIF_TRACEBUF		*psRGXFWIfTraceBuf;		/* structure containing trace control data and actual trace buffer */
+
+	DEVMEM_MEMDESC			*psRGXFWIfTBIBufferMemDesc;	/*!< memdesc of actual FW TBI buffer */
+	RGXFWIF_DEV_VIRTADDR		sRGXFWIfTBIBuffer;		/* TBI buffer data */
+	IMG_UINT32			ui32FWIfTBIBufferSize;
+
+	DEVMEM_MEMDESC			*psRGXFWIfHWRInfoBufCtlMemDesc;
+	RGXFWIF_HWRINFOBUF		*psRGXFWIfHWRInfoBuf;
+
+	DEVMEM_MEMDESC			*psRGXFWIfGpuUtilFWCbCtlMemDesc;
+	RGXFWIF_GPU_UTIL_FWCB	*psRGXFWIfGpuUtilFWCb;
+
+	DEVMEM_MEMDESC			*psRGXFWIfHWPerfBufMemDesc;
+	IMG_BYTE				*psRGXFWIfHWPerfBuf;
+	IMG_UINT32				ui32RGXFWIfHWPerfBufSize; /* in bytes */
+
+	DEVMEM_MEMDESC			*psRGXFWIfRegCfgMemDesc;
+
+	DEVMEM_MEMDESC			*psRGXFWIfHWPerfCountersMemDesc;
+	DEVMEM_MEMDESC			*psRGXFWIfInitMemDesc;
+	DEVMEM_MEMDESC			*psRGXFWIfOSConfigDesc;
+	RGXFWIF_OS_CONFIG		*psFWIfOSConfig;
+
+	DEVMEM_MEMDESC			*psRGXFWIfRuntimeCfgMemDesc;
+	RGXFWIF_RUNTIME_CFG		*psRGXFWIfRuntimeCfg;
+
+	/* Additional guest firmware memory context info */
+	DEVMEM_HEAP				*psGuestFirmwareRawHeap[RGXFW_NUM_OS];
+	DEVMEM_MEMDESC			*psGuestFirmwareRawMemDesc[RGXFW_NUM_OS];
+	DEVMEM_MEMDESC			*psGuestFirmwareMainMemDesc[RGXFW_NUM_OS];
+	DEVMEM_MEMDESC			*psGuestFirmwareConfigMemDesc[RGXFW_NUM_OS];
+
+	DEVMEM_MEMDESC			*psMETAT1StackMemDesc;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	/* Array to store data needed for workload estimation when a workload
+	   has finished and its cycle time is returned to the host.	 */
+	WORKEST_RETURN_DATA		asReturnData[RETURN_DATA_ARRAY_SIZE];
+	IMG_UINT32				ui32ReturnDataWO;
+#endif
+
+#if defined (SUPPORT_PDVFS)
+	/**
+	 * Host memdesc and pointer to memory containing core clock rate in Hz.
+	 * Firmware updates the memory on changing the core clock rate over GPIO.
+	 * Note: Shared memory needs atomic access from Host driver and firmware,
+	 * hence size should not be greater than memory transaction granularity.
+	 * Currently it is chosen to be 32 bits.
+	 */
+	DEVMEM_MEMDESC			*psRGXFWIFCoreClkRateMemDesc;
+	volatile IMG_UINT32		*pui32RGXFWIFCoreClkRate;
+	/**
+	 * Last sampled core clk rate.
+	 */
+	volatile IMG_UINT32		ui32CoreClkRateSnapshot;
+#endif
+
+	/*
+	   HWPerf data for the RGX device
+	 */
+
+	POS_LOCK    hHWPerfLock;  /*! Critical section lock that protects HWPerf code
+	                           *  from multiple thread duplicate init/deinit
+	                           *  and loss/freeing of FW & Host resources while in
+	                           *  use in another thread e.g. MSIR. */
+
+	IMG_UINT64  ui64HWPerfFilter; /*! Event filter for FW events (settable by AppHint) */
+	IMG_HANDLE  hHWPerfStream;    /*! TL Stream buffer (L2) for firmware event stream */
+	IMG_UINT32  ui32MaxPacketSize;/*!< Max allowed packet size */
+
+	IMG_UINT32  ui32HWPerfHostFilter;      /*! Event filter for HWPerfHost stream (settable by AppHint) */
+	POS_LOCK    hLockHWPerfHostStream;     /*! Lock guarding access to HWPerfHost stream from multiple threads */
+	IMG_HANDLE  hHWPerfHostStream;         /*! TL Stream buffer for host only event stream */
+	IMG_UINT32  ui32HWPerfHostBufSize;     /*! Host side buffer size in bytes */
+	IMG_UINT32  ui32HWPerfHostLastOrdinal; /*! Ordinal of the last packet emitted in HWPerfHost TL stream.
+	                                        *  Guarded by hLockHWPerfHostStream */
+	IMG_UINT32  ui32HWPerfHostNextOrdinal; /*! Ordinal number for HWPerfHost events. Guarded by hHWPerfHostSpinLock */
+	IMG_UINT8   *pui8DeferredEvents;       /*! List of HWPerfHost events yet to be emitted in the TL stream.
+	                                        *  Events generated from atomic context are deferred "emitted"
+											*  as the "emission" code can sleep */
+	IMG_UINT16  ui16DEReadIdx;             /*! Read index in the above deferred events buffer */
+	IMG_UINT16  ui16DEWriteIdx;            /*! Write index in the above deferred events buffer */
+	void        *pvHostHWPerfMISR;         /*! MISR to emit pending/deferred events in HWPerfHost TL stream */
+	POS_SPINLOCK hHWPerfHostSpinLock;      /*! Guards data shared between an atomic & sleepable-context */
+#if defined (PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+	IMG_UINT32  ui32DEHighWatermark;       /*! High watermark of deferred events buffer usage. Protected by
+	                                        *! hHWPerfHostSpinLock*/
+	/* Max number of times DeferredEmission waited for an atomic-context to "finish" packet write */
+	IMG_UINT32  ui32WaitForAtomicCtxPktHighWatermark; /*! Protected by hLockHWPerfHostStream */
+	/* Whether warning has been logged about an atomic-context packet loss (due to too long wait for "write" finish) */
+	IMG_BOOL    bWarnedAtomicCtxPktLost;
+	/* Max number of times DeferredEmission scheduled-out to give a chance to the right-ordinal packet to be emitted */
+	IMG_UINT32  ui32WaitForRightOrdPktHighWatermark; /*! Protected by hLockHWPerfHostStream */
+	/* Whether warning has been logged about an packet loss (due to too long wait for right ordinal to emit) */
+	IMG_BOOL    bWarnedPktOrdinalBroke;
+#endif
+
+	void        *pvGpuFtraceData;
+
+	/* Poll data for detecting firmware fatal errors */
+	IMG_UINT32				aui32CrLastPollAddr[RGXFW_THREAD_NUM];
+	IMG_UINT32				ui32KCCBCmdsExecutedLastTime;
+	IMG_BOOL				bKCCBCmdsWaitingLastTime;
+	IMG_UINT32				ui32GEOTimeoutsLastTime;
+
+	/* Client stall detection */
+	IMG_UINT32				ui32StalledClientMask;
+
+	IMG_BOOL				bWorkEstEnabled;
+	IMG_BOOL				bPDVFSEnabled;
+
+	void					*pvLISRData;
+	void					*pvMISRData;
+	void					*pvAPMISRData;
+	RGX_ACTIVEPM_CONF		eActivePMConf;
+
+	volatile IMG_UINT32		aui32SampleIRQCount[RGXFW_THREAD_NUM];
+
+	DEVMEM_MEMDESC			*psRGXFaultAddressMemDesc;
+
+	DEVMEM_MEMDESC			*psSLC3FenceMemDesc;
+
+	/* If we do 10 deferred memory allocations per second, then the ID would wrap around after 13 years */
+	IMG_UINT32				ui32ZSBufferCurrID;	/*!< ID assigned to the next deferred devmem allocation */
+	IMG_UINT32				ui32FreelistCurrID;	/*!< ID assigned to the next freelist */
+
+	POS_LOCK 				hLockZSBuffer;		/*!< Lock to protect simultaneous access to ZSBuffers */
+	DLLIST_NODE				sZSBufferHead;		/*!< List of on-demand ZSBuffers */
+	POS_LOCK 				hLockFreeList;		/*!< Lock to protect simultaneous access to Freelists */
+	DLLIST_NODE				sFreeListHead;		/*!< List of growable Freelists */
+	PSYNC_PRIM_CONTEXT		hSyncPrimContext;
+	PVRSRV_CLIENT_SYNC_PRIM *psPowSyncPrim;
+
+	IMG_UINT32				ui32ActivePMReqOk;
+	IMG_UINT32				ui32ActivePMReqDenied;
+	IMG_UINT32				ui32ActivePMReqNonIdle;
+	IMG_UINT32				ui32ActivePMReqRetry;
+	IMG_UINT32				ui32ActivePMReqTotal;
+
+	IMG_HANDLE				hProcessQueuesMISR;
+
+	IMG_UINT32 				ui32DeviceFlags;		/*!< Flags to track general device state */
+
+	/* GPU DVFS Table */
+	RGX_GPU_DVFS_TABLE  *psGpuDVFSTable;
+
+	/* Pointer to function returning the GPU utilisation statistics since the last
+	 * time the function was called. Supports different users at the same time.
+	 *
+	 * psReturnStats [out]: GPU utilisation statistics (active high/active low/idle/blocked)
+	 *                      in microseconds since the last time the function was called
+	 *                      by a specific user (identified by hGpuUtilUser)
+	 *
+	 * Returns PVRSRV_OK in case the call completed without errors,
+	 * some other value otherwise.
+	 */
+	PVRSRV_ERROR (*pfnGetGpuUtilStats) (PVRSRV_DEVICE_NODE *psDeviceNode,
+	                                    IMG_HANDLE hGpuUtilUser,
+	                                    RGXFWIF_GPU_UTIL_STATS *psReturnStats);
+
+	POS_LOCK				hGPUUtilLock;
+
+	/* Register configuration */
+	RGX_REG_CONFIG			sRegCongfig;
+
+	IMG_BOOL				bRGXPowered;
+	DLLIST_NODE				sMemoryContextList;
+
+	POSWR_LOCK				hRenderCtxListLock;
+	POSWR_LOCK				hComputeCtxListLock;
+	POSWR_LOCK				hTransferCtxListLock;
+	POSWR_LOCK				hTDMCtxListLock;
+	POSWR_LOCK				hMemoryCtxListLock;
+	POSWR_LOCK				hKickSyncCtxListLock;
+
+	/* Linked list of deferred KCCB commands due to a full KCCB */
+	POS_LOCK 				hLockKCCBDeferredCommandsList;
+	DLLIST_NODE				sKCCBDeferredCommandsListHead;
+	IMG_UINT32				ui32KCCBDeferredCommandsCount;	/*!< No of commands in the deferred list*/
+
+	/* Linked lists of contexts on this device */
+	DLLIST_NODE				sRenderCtxtListHead;
+	DLLIST_NODE				sComputeCtxtListHead;
+	DLLIST_NODE				sTransferCtxtListHead;
+	DLLIST_NODE				sTDMCtxtListHead;
+	DLLIST_NODE				sKickSyncCtxtListHead;
+
+	DLLIST_NODE 			sCommonCtxtListHead;
+	POSWR_LOCK				hCommonCtxtListLock;
+	IMG_UINT32				ui32CommonCtxtCurrentID;	/*!< ID assigned to the next common context */
+
+	POS_LOCK 				hDebugFaultInfoLock;	/*!< Lock to protect the debug fault info list */
+	POS_LOCK 				hMMUCtxUnregLock;		/*!< Lock to protect list of unregistered MMU contexts */
+
+	POS_LOCK				hNMILock; /*!< Lock to protect NMI operations */
+
+	RGX_DUST_STATE			sDustReqState;
+
+	RGX_LAYER_PARAMS		sLayerParams;
+
+	RGXFWIF_DM				eBPDM;					/*!< Current breakpoint data master */
+	IMG_BOOL				bBPSet;					/*!< A Breakpoint has been set */
+	POS_LOCK				hBPLock;				/*!< Lock for break point operations */
+
+	IMG_UINT32				ui32CoherencyTestsDone;
+
+	ATOMIC_T				iCCBSubmissionOrdinal; /* Rolling count used to indicate CCB submission order (all CCBs) */
+	POS_LOCK				hCCBRecoveryLock;      /* Lock to protect pvEarliestStalledClientCCB and ui32OldestSubmissionOrdinal variables*/
+	void					*pvEarliestStalledClientCCB; /* Will point to cCCB command to unblock in the event of a stall */
+	IMG_UINT32				ui32OldestSubmissionOrdinal; /* Earliest submission ordinal of CCB entry found so far */
+	IMG_UINT32				ui32SLRHoldoffCounter;   /* Decremented each time health check is called until zero. SLR only happen when zero. */
+
+	POS_LOCK				hCCBStallCheckLock; /* Lock used to guard against multiple threads simultaneously checking for stalled CCBs */
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+	/* Firmware gcov buffer */
+	DEVMEM_MEMDESC 			*psFirmwareGcovBufferMemDesc;      /*!< mem desc for Firmware gcov dumping buffer */
+	IMG_UINT32				ui32FirmwareGcovSize;
+#endif
+
+} PVRSRV_RGXDEV_INFO;
+
+
+
+typedef struct _RGX_TIMING_INFORMATION_
+{
+	/*! GPU default core clock speed in Hz */
+	IMG_UINT32			ui32CoreClockSpeed;
+
+	/*! Active Power Management: GPU actively requests the host driver to be powered off */
+	IMG_BOOL			bEnableActivePM;
+
+	/*! Enable the GPU to power off internal Power Islands independently from the host driver */
+	IMG_BOOL			bEnableRDPowIsland;
+
+	/*! Active Power Management: Delay between the GPU idle and the request to the host */
+	IMG_UINT32			ui32ActivePMLatencyms;
+
+} RGX_TIMING_INFORMATION;
+
+typedef struct _RGX_DATA_
+{
+	/*! Timing information */
+	RGX_TIMING_INFORMATION	*psRGXTimingInfo;
+	IMG_BOOL bHasTDFWCodePhysHeap;
+	IMG_UINT32 uiTDFWCodePhysHeapID;
+	IMG_BOOL bHasTDSecureBufPhysHeap;
+	IMG_UINT32 uiTDSecureBufPhysHeapID;
+	IMG_BOOL bHasFWMemPhysHeap;
+	IMG_UINT32 uiFWMemPhysHeapID;
+} RGX_DATA;
+
+
+/*
+	RGX PDUMP register bank name (prefix)
+*/
+#define RGX_PDUMPREG_NAME		"RGXREG"
+#define RGX_TB_PDUMPREG_NAME	"EMUREG"
+
+#endif /* __RGXDEVICE_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfw_log_helper.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfw_log_helper.h
new file mode 100644
index 0000000..2ac666d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfw_log_helper.h
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@File           rgxfw_log_helper.h
+@Title          Firmware TBI logging helper function
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Platform       Generic
+@Description    This file contains some helper code to make TBI logging possible
+                Specifically, it uses the SFIDLIST xmacro to trace ids back to
+                the original strings.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGXFW_LOG_HELPER_H
+#define RGXFW_LOG_HELPER_H
+
+#include "rgx_fwif_sf.h"
+
+static IMG_CHAR *const groups[]= {
+#define X(A,B) #B,
+	RGXFW_LOG_SFGROUPLIST
+#undef X
+};
+
+/*  idToStringID : Search SFs tuples {id,string} for a matching id.
+ *   return index to array if found or RGXFW_SF_LAST if none found.
+ *   bsearch could be used as ids are in increasing order. */
+#if defined(RGX_FIRMWARE)
+static IMG_UINT32 idToStringID(IMG_UINT32 ui32CheckData, const RGXFW_STID_FMT *const psSFs)
+#else
+static IMG_UINT32 idToStringID(IMG_UINT32 ui32CheckData, const RGXKM_STID_FMT *const psSFs)
+#endif
+{
+	IMG_UINT32 i = 0, ui32Id = (IMG_UINT32)RGXFW_SF_LAST;
+
+	for ( i = 0 ; psSFs[i].ui32Id != (IMG_UINT32)RGXFW_SF_LAST ; i++)
+	{
+		if ( ui32CheckData == psSFs[i].ui32Id )
+		{
+			ui32Id = i;
+			break;
+		}
+	}
+	return ui32Id;
+}
+
+#endif /* RGXFW_LOG_HELPER_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwdbg.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwdbg.c
new file mode 100644
index 0000000..6c28949
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwdbg.c
@@ -0,0 +1,296 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debugging and miscellaneous functions server implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Kernel services functions for debugging and other
+                miscellaneous functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv.h"
+#include "pvr_debug.h"
+#include "rgxfwdbg.h"
+#include "rgxfwutils.h"
+#include "rgxta3d.h"
+#include "pdump_km.h"
+#include "mmu_common.h"
+#include "devicemem_server.h"
+#include "osfunc.h"
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSLCSetBypassStateKM(
+	CONNECTION_DATA * psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  uiFlags,
+	IMG_BOOL bSetBypassed)
+{
+	RGXFWIF_KCCB_CMD  sSLCBPCtlCmd;
+	PVRSRV_ERROR  eError = PVRSRV_OK;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	sSLCBPCtlCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCBPCTL;
+	sSLCBPCtlCmd.uCmdData.sSLCBPCtlData.bSetBypassed = bSetBypassed;
+	sSLCBPCtlCmd.uCmdData.sSLCBPCtlData.uiFlags = uiFlags;
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+	                            RGXFWIF_DM_GP,
+	                            &sSLCBPCtlCmd,
+	                            0,
+	                            PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXFWDebugSLCSetEnableStateKM: RGXScheduleCommandfailed. Error:%u", eError));
+	}
+	else
+	{
+		/* Wait for the SLC flush to complete */
+		eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFWDebugSLCSetEnableStateKM: Waiting for value aborted with error (%u)", eError));
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugQueryFWLogKM(
+	const CONNECTION_DATA *psConnection,
+	const PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32 *pui32RGXFWLogType)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	if (!psDeviceNode || !pui32RGXFWLogType)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevInfo = psDeviceNode->pvDevice;
+
+	if (!psDevInfo || !psDevInfo->psRGXFWIfTraceBuf)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*pui32RGXFWLogType = psDevInfo->psRGXFWIfTraceBuf->ui32LogType;
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSetFWLogKM(
+	const CONNECTION_DATA * psConnection,
+	const PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  ui32RGXFWLogType)
+{
+	RGXFWIF_KCCB_CMD sLogTypeUpdateCmd;
+	PVRSRV_DEV_POWER_STATE ePowerState;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+	IMG_UINT32 ui32OldRGXFWLogTpe = psDevInfo->psRGXFWIfTraceBuf->ui32LogType;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* check log type is valid */
+	if (ui32RGXFWLogType & ~RGXFWIF_LOG_TYPE_MASK)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psDevInfo->hRGXFWIfBufInitLock);
+#endif  /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+	/* set the new log type and ensure the new log type is written to memory
+	 * before requesting the FW to read it
+	 */
+	psDevInfo->psRGXFWIfTraceBuf->ui32LogType = ui32RGXFWLogType;
+	OSMemoryBarrier();
+
+	/* Allocate firmware trace buffer resource(s) if not already done */
+	if (RGXTraceBufferIsInitRequired(psDevInfo))
+	{
+		eError = RGXTraceBufferInitOnDemandResources(psDevInfo);
+	}
+	/* Check if LogType is TBI then allocate resource on demand and copy
+	 * SFs to it
+	 */
+	else if (RGXTBIBufferIsInitRequired(psDevInfo))
+	{
+		eError = RGXTBIBufferInitOnDemandResources(psDevInfo);
+		if (eError == PVRSRV_OK)
+		{
+			sLogTypeUpdateCmd.uCmdData.sTBIBuffer = psDevInfo->sRGXFWIfTBIBuffer;
+		}
+	}
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate resource on-demand. Reverting to old value", __func__));
+		psDevInfo->psRGXFWIfTraceBuf->ui32LogType = ui32OldRGXFWLogTpe;
+		OSMemoryBarrier();
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockRelease(psDevInfo->hRGXFWIfBufInitLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+		return eError;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psDevInfo->hRGXFWIfBufInitLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+	eError = PVRSRVPowerLock((const PPVRSRV_DEVICE_NODE) psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire power lock (%u)", __func__, eError));
+		return eError;
+	}
+
+	eError = PVRSRVGetDevicePowerState((const PPVRSRV_DEVICE_NODE) psDeviceNode, &ePowerState);
+
+	if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF))
+	{
+		/* Ask the FW to update its cached version of logType value */
+		sLogTypeUpdateCmd.eCmdType = RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE;
+
+		eError = RGXSendCommand(psDevInfo,
+		                        RGXFWIF_DM_GP,
+		                        &sLogTypeUpdateCmd,
+		                        PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: RGXSendCommand failed. Error:%u", __func__, eError));
+		}
+		else
+		{
+			/* Give up the power lock as its acquired in RGXWaitForFWOp */
+			PVRSRVPowerUnlock((const PPVRSRV_DEVICE_NODE) psDeviceNode);
+
+			/* Wait for the LogType value to be updated */
+			eError = RGXWaitForFWOp(psDevInfo, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"%s: Waiting for value aborted with error (%u)", __func__, eError));
+			}
+			return eError;
+		}
+	}
+
+	PVRSRVPowerUnlock((const PPVRSRV_DEVICE_NODE) psDeviceNode);
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSetHCSDeadlineKM(
+	CONNECTION_DATA *psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  ui32HCSDeadlineMS)
+{
+	PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	return RGXFWSetHCSDeadline(psDevInfo, ui32HCSDeadlineMS);
+}
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSetOSidPriorityKM(
+	CONNECTION_DATA *psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  ui32OSid,
+	IMG_UINT32  ui32OSidPriority)
+{
+	PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	return RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32OSidPriority);
+}
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSetOSNewOnlineStateKM(
+	CONNECTION_DATA *psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  ui32OSid,
+	IMG_UINT32  ui32OSNewState)
+{
+	PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_OS_STATE_CHANGE eState;
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	eState = (ui32OSNewState) ? (RGXFWIF_OS_ONLINE) : (RGXFWIF_OS_OFFLINE);
+	return RGXFWSetFwOsState(psDevInfo, ui32OSid, eState);
+}
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugDumpFreelistPageListKM(
+	CONNECTION_DATA * psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+	DLLIST_NODE *psNode, *psNext;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	if (dllist_is_empty(&psDevInfo->sFreeListHead))
+	{
+		return PVRSRV_OK;
+	}
+
+	PVR_LOG(("---------------[ Begin Freelist Page List Dump ]------------------"));
+
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext)
+	{
+		RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+		RGXDumpFreeListPageList(psFreeList);
+	}
+	OSLockRelease(psDevInfo->hLockFreeList);
+
+	PVR_LOG(("----------------[ End Freelist Page List Dump ]-------------------"));
+
+	return PVRSRV_OK;
+
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwdbg.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwdbg.h
new file mode 100644
index 0000000..e73f22c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwdbg.h
@@ -0,0 +1,108 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debugging and miscellaneous functions server interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Kernel services functions for debugging and other
+                miscellaneous functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if ! defined(RGXFWDBG_H)
+#define RGXFWDBG_H
+
+#include <img_defs.h>
+#include <pvrsrv_error.h>
+#include <device.h>
+#include <pmr.h>
+
+#include "connection_server.h"
+
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSLCSetBypassStateKM(
+	CONNECTION_DATA *psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  uiFlags,
+	IMG_BOOL  bSetBypassed);
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugInitFWImageKM(
+	PMR *psFWImgDestPMR,
+	PMR *psFWImgSrcPMR,
+	IMG_UINT64 ui64FWImgLen,
+	PMR *psFWImgSigPMR,
+	IMG_UINT64 ui64FWSigLen);
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugQueryFWLogKM(
+	const CONNECTION_DATA *psConnection,
+	const PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32 *pui32RGXFWLogType);
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSetFWLogKM(
+	const CONNECTION_DATA *psConnection,
+	const PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  ui32RGXFWLogType);
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSetHCSDeadlineKM(
+	CONNECTION_DATA *psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  ui32HCSDeadlineMS);
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSetOSidPriorityKM(
+	CONNECTION_DATA *psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  ui32OSid,
+	IMG_UINT32  ui32OSidPriority);
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSetOSNewOnlineStateKM(
+	CONNECTION_DATA *psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  ui32OSid,
+	IMG_UINT32  ui32OSNewState);
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugDumpFreelistPageListKM(
+	CONNECTION_DATA * psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwimageutils.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwimageutils.c
new file mode 100644
index 0000000..55cfd9d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwimageutils.c
@@ -0,0 +1,1037 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services Firmware image utilities used at init time
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services Firmware image utilities used at init time
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* The routines implemented here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when trusted device is enabled).
+ * Any new dependency should be added to rgxlayer.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary. */
+#include "rgxfwimageutils.h"
+#include "pvrsrv.h"
+
+
+/************************************************************************
+* FW layout information
+************************************************************************/
+#define MAX_NUM_ENTRIES (8)
+static RGX_FW_LAYOUT_ENTRY asRGXFWLayoutTable[MAX_NUM_ENTRIES];
+static IMG_UINT32 ui32LayoutEntryNum;
+
+
+static RGX_FW_LAYOUT_ENTRY* GetTableEntry(const void *hPrivate, RGX_FW_SECTION_ID eId)
+{
+	IMG_UINT32 i;
+
+	for (i = 0; i < ui32LayoutEntryNum; i++)
+	{
+		if (asRGXFWLayoutTable[i].eId == eId)
+		{
+			return &asRGXFWLayoutTable[i];
+		}
+	}
+
+	RGXErrorLog(hPrivate, "%s: id %u not found, returning entry 0\n",
+	            __func__, eId);
+
+	return &asRGXFWLayoutTable[0];
+}
+
+/*!
+*******************************************************************************
+
+ @Function      FindMMUSegment
+
+ @Description   Given a 32 bit FW address attempt to find the corresponding
+                pointer to FW allocation
+
+ @Input         ui32OffsetIn      : 32 bit FW address
+ @Input         pvHostFWCodeAddr  : Pointer to FW code
+ @Input         pvHostFWDataAddr  : Pointer to FW data
+ @Input         uiHostAddrOut     : CPU pointer equivalent to ui32OffsetIn
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR FindMMUSegment(IMG_UINT32 ui32OffsetIn,
+                                   void *pvHostFWCodeAddr,
+                                   void *pvHostFWDataAddr,
+                                   void **uiHostAddrOut)
+{
+	IMG_UINT32 i;
+
+	for (i = 0; i < ui32LayoutEntryNum; i++)
+	{
+		if ((ui32OffsetIn >= asRGXFWLayoutTable[i].ui32BaseAddr) &&
+		    (ui32OffsetIn < (asRGXFWLayoutTable[i].ui32BaseAddr + asRGXFWLayoutTable[i].ui32AllocSize)))
+		{
+			if (asRGXFWLayoutTable[i].eType == FW_CODE)
+			{
+				*uiHostAddrOut = pvHostFWCodeAddr;
+			}
+			else
+			{
+				*uiHostAddrOut = pvHostFWDataAddr;
+			}
+			goto found;
+		}
+	}
+
+	return PVRSRV_ERROR_INIT_FAILURE;
+
+found:
+	if (*uiHostAddrOut == NULL)
+	{
+		return PVRSRV_OK;
+	}
+
+	/* Direct Mem write to mapped memory */
+	ui32OffsetIn -= asRGXFWLayoutTable[i].ui32BaseAddr;
+	ui32OffsetIn += asRGXFWLayoutTable[i].ui32AllocOffset;
+
+	/* Add offset to pointer to FW allocation only if
+	 * that allocation is available
+	 */
+	if (*uiHostAddrOut)
+	{
+		*(IMG_UINT8 **)uiHostAddrOut += ui32OffsetIn;
+	}
+
+	return PVRSRV_OK;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      RGXFWConfigureSegID
+
+ @Description   Configures a single segment of the Segment MMU
+                (base, limit and out_addr)
+
+ @Input         hPrivate        : Implementation specific data
+ @Input         ui64SegOutAddr  : Segment output base address (40 bit devVaddr)
+ @Input         ui32SegBase     : Segment input base address (32 bit FW address)
+ @Input         ui32SegLimit    : Segment size
+ @Input         ui32SegID       : Segment ID
+ @Input         pszName         : Segment name
+ @Input         ppui32BootConf  : Pointer to bootloader data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXFWConfigureSegID(const void *hPrivate,
+                                IMG_UINT64 ui64SegOutAddr,
+                                IMG_UINT32 ui32SegBase,
+                                IMG_UINT32 ui32SegLimit,
+                                IMG_UINT32 ui32SegID,
+                                IMG_UINT32 **ppui32BootConf)
+{
+	IMG_UINT32 *pui32BootConf = *ppui32BootConf;
+	IMG_UINT32 ui32SegOutAddr0 = ui64SegOutAddr & 0x00000000FFFFFFFFUL;
+	IMG_UINT32 ui32SegOutAddr1 = (ui64SegOutAddr >> 32) & 0x00000000FFFFFFFFUL;
+
+	/* META segments have a minimum size */
+	IMG_UINT32 ui32LimitOff = (ui32SegLimit < RGXFW_SEGMMU_ALIGN) ?
+	                          RGXFW_SEGMMU_ALIGN : ui32SegLimit;
+	/* the limit is an offset, therefore off = size - 1 */
+	ui32LimitOff -= 1;
+
+	RGXCommentLog(hPrivate,
+	              "* Seg%d: meta_addr = 0x%08x, devv_addr = 0x%" IMG_UINT64_FMTSPECx ", limit = 0x%x",
+	              ui32SegID,
+	              ui32SegBase,
+	              ui64SegOutAddr,
+	              ui32LimitOff);
+
+	ui32SegBase |= RGXFW_SEGMMU_ALLTHRS_WRITEABLE;
+
+	*pui32BootConf++ = META_CR_MMCU_SEGMENTn_BASE(ui32SegID);
+	*pui32BootConf++ = ui32SegBase;
+
+	*pui32BootConf++ = META_CR_MMCU_SEGMENTn_LIMIT(ui32SegID);
+	*pui32BootConf++ = ui32LimitOff;
+
+	*pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA0(ui32SegID);
+	*pui32BootConf++ = ui32SegOutAddr0;
+
+	*pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA1(ui32SegID);
+	*pui32BootConf++ = ui32SegOutAddr1;
+
+	*ppui32BootConf = pui32BootConf;
+}
+
+/*!
+*******************************************************************************
+
+ @Function      RGXFWConfigureSegMMU
+
+ @Description   Configures META's Segment MMU
+
+ @Input         hPrivate             : Implementation specific data
+ @Input         psFWCodeDevVAddrBase : FW code base device virtual address
+ @Input         psFWDataDevVAddrBase : FW data base device virtual address
+ @Input         ppui32BootConf       : Pointer to bootloader data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXFWConfigureSegMMU(const void       *hPrivate,
+                                 IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase,
+                                 IMG_DEV_VIRTADDR *psFWDataDevVAddrBase,
+                                 IMG_UINT32       **ppui32BootConf)
+{
+	IMG_UINT64 ui64SegOutAddrTop;
+	IMG_UINT32 i;
+
+	PVR_UNREFERENCED_PARAMETER(psFWCodeDevVAddrBase);
+
+	/* Configure Segment MMU */
+	RGXCommentLog(hPrivate, "********** FW configure Segment MMU **********");
+
+	if (RGX_DEVICE_HAS_ERN(hPrivate, 45914))
+	{
+		ui64SegOutAddrTop = RGXFW_SEGMMU_OUTADDR_TOP_ERN_45914(META_MMU_CONTEXT_MAPPING, RGXFW_SEGMMU_META_DM_ID);
+	}
+	else
+	{
+		ui64SegOutAddrTop = RGXFW_SEGMMU_OUTADDR_TOP_PRE_S7(META_MMU_CONTEXT_MAPPING, RGXFW_SEGMMU_META_DM_ID);
+	}
+
+	for (i = 0; i < ui32LayoutEntryNum; i++)
+	{
+		/*
+		 * FW code is using the bootloader segment which is already configured on boot.
+		 * FW coremem code and data don't use the segment MMU.
+		 * Only the FW data segment needs to be configured.
+		 */
+
+		if (asRGXFWLayoutTable[i].eType == FW_DATA)
+		{
+			IMG_UINT64 ui64SegOutAddr;
+			IMG_UINT32 ui32SegId = RGXFW_SEGMMU_DATA_ID;
+
+			ui64SegOutAddr = (psFWDataDevVAddrBase->uiAddr | ui64SegOutAddrTop) +
+			                  asRGXFWLayoutTable[i].ui32AllocOffset;
+
+			RGXFWConfigureSegID(hPrivate,
+			                    ui64SegOutAddr,
+			                    asRGXFWLayoutTable[i].ui32BaseAddr,
+			                    asRGXFWLayoutTable[i].ui32AllocSize,
+			                    ui32SegId,
+			                    ppui32BootConf); /*write the sequence to the bootldr */
+
+			break;
+		}
+	}
+}
+
+/*!
+*******************************************************************************
+
+ @Function      RGXFWConfigureMetaCaches
+
+ @Description   Configure and enable the Meta instruction and data caches
+
+ @Input         hPrivate          : Implementation specific data
+ @Input         ui32NumThreads    : Number of FW threads in use
+ @Input         ui32MainThreadID  : ID of the FW thread in use
+                                    (only meaningful if ui32NumThreads == 1)
+ @Input         ppui32BootConf    : Pointer to bootloader data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXFWConfigureMetaCaches(const void *hPrivate,
+                                     IMG_UINT32 ui32NumThreads,
+                                     IMG_UINT32 ui32MainThreadID,
+                                     IMG_UINT32 **ppui32BootConf)
+{
+	IMG_UINT32 *pui32BootConf = *ppui32BootConf;
+	IMG_UINT32 ui32DCacheT0, ui32ICacheT0;
+	IMG_UINT32 ui32DCacheT1, ui32ICacheT1;
+	IMG_UINT32 ui32DCacheT2, ui32ICacheT2;
+	IMG_UINT32 ui32DCacheT3, ui32ICacheT3;
+
+#define META_CR_MMCU_LOCAL_EBCTRL                        (0x04830600)
+#define META_CR_MMCU_LOCAL_EBCTRL_ICWIN                  (0x3 << 14)
+#define META_CR_MMCU_LOCAL_EBCTRL_DCWIN                  (0x3 << 6)
+#define META_CR_SYSC_DCPART(n)                           (0x04830200 + (n)*0x8)
+#define META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE         (0x1 << 31)
+#define META_CR_SYSC_ICPART(n)                           (0x04830220 + (n)*0x8)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF  (0x8 << 16)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE       (0xF)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE       (0x7)
+#define META_CR_MMCU_DCACHE_CTRL                         (0x04830018)
+#define META_CR_MMCU_ICACHE_CTRL                         (0x04830020)
+#define META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN           (0x1)
+
+	RGXCommentLog(hPrivate, "********** Meta caches configuration *********");
+
+	/* Initialise I/Dcache settings */
+	ui32DCacheT0 = ui32DCacheT1 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+	ui32DCacheT2 = ui32DCacheT3 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+	ui32ICacheT0 = ui32ICacheT1 = ui32ICacheT2 = ui32ICacheT3 = 0;
+
+	if (ui32NumThreads == 1)
+	{
+		if (ui32MainThreadID == 0)
+		{
+			ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+			ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+		}
+		else
+		{
+			ui32DCacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+			ui32ICacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+		}
+	}
+	else
+	{
+		ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE;
+		ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE;
+
+		ui32DCacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE |
+		                META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF;
+		ui32ICacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE |
+		                META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF;
+	}
+
+	/* Local region MMU enhanced bypass: WIN-3 mode for code and data caches */
+	*pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL;
+	*pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL_ICWIN |
+	                   META_CR_MMCU_LOCAL_EBCTRL_DCWIN;
+
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_MMCU_LOCAL_EBCTRL,
+	              META_CR_MMCU_LOCAL_EBCTRL_ICWIN | META_CR_MMCU_LOCAL_EBCTRL_DCWIN);
+
+	/* Data cache partitioning thread 0 to 3 */
+	*pui32BootConf++ = META_CR_SYSC_DCPART(0);
+	*pui32BootConf++ = ui32DCacheT0;
+	*pui32BootConf++ = META_CR_SYSC_DCPART(1);
+	*pui32BootConf++ = ui32DCacheT1;
+	*pui32BootConf++ = META_CR_SYSC_DCPART(2);
+	*pui32BootConf++ = ui32DCacheT2;
+	*pui32BootConf++ = META_CR_SYSC_DCPART(3);
+	*pui32BootConf++ = ui32DCacheT3;
+
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_SYSC_DCPART(0), ui32DCacheT0);
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_SYSC_DCPART(1), ui32DCacheT1);
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_SYSC_DCPART(2), ui32DCacheT2);
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_SYSC_DCPART(3), ui32DCacheT3);
+
+	/* Enable data cache hits */
+	*pui32BootConf++ = META_CR_MMCU_DCACHE_CTRL;
+	*pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN;
+
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_MMCU_DCACHE_CTRL,
+	              META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN);
+
+	/* Instruction cache partitioning thread 0 to 3 */
+	*pui32BootConf++ = META_CR_SYSC_ICPART(0);
+	*pui32BootConf++ = ui32ICacheT0;
+	*pui32BootConf++ = META_CR_SYSC_ICPART(1);
+	*pui32BootConf++ = ui32ICacheT1;
+	*pui32BootConf++ = META_CR_SYSC_ICPART(2);
+	*pui32BootConf++ = ui32ICacheT2;
+	*pui32BootConf++ = META_CR_SYSC_ICPART(3);
+	*pui32BootConf++ = ui32ICacheT3;
+
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_SYSC_ICPART(0), ui32ICacheT0);
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_SYSC_ICPART(1), ui32ICacheT1);
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_SYSC_ICPART(2), ui32ICacheT2);
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_SYSC_ICPART(3), ui32ICacheT3);
+
+	/* Enable instruction cache hits */
+	*pui32BootConf++ = META_CR_MMCU_ICACHE_CTRL;
+	*pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN;
+
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_MMCU_ICACHE_CTRL,
+	              META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN);
+
+	*pui32BootConf++ = 0x040000C0;
+	*pui32BootConf++ = 0;
+
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", 0x040000C0, 0);
+
+	*ppui32BootConf = pui32BootConf;
+}
+
+/*!
+*******************************************************************************
+
+ @Function      ProcessLDRCommandStream
+
+ @Description   Process the output of the Meta toolchain in the .LDR format
+                copying code and data sections into their final location and
+                passing some information to the Meta bootloader
+
+ @Input         hPrivate                 : Implementation specific data
+ @Input         pbLDR                    : Pointer to FW blob
+ @Input         pvHostFWCodeAddr         : Pointer to FW code
+ @Input         pvHostFWDataAddr         : Pointer to FW data
+ @Input         pvHostFWCorememCodeAddr  : Pointer to FW coremem code
+ @Input         pvHostFWCorememDataAddr  : Pointer to FW coremem data
+ @Input         ppui32BootConf           : Pointer to bootloader data
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate,
+                                     const IMG_BYTE* pbLDR,
+                                     void* pvHostFWCodeAddr,
+                                     void* pvHostFWDataAddr,
+                                     void* pvHostFWCorememCodeAddr,
+                                     void* pvHostFWCorememDataAddr,
+                                     IMG_UINT32 **ppui32BootConf)
+{
+	RGX_META_LDR_BLOCK_HDR *psHeader = (RGX_META_LDR_BLOCK_HDR *) pbLDR;
+	RGX_META_LDR_L1_DATA_BLK *psL1Data =
+	    (RGX_META_LDR_L1_DATA_BLK*) ((IMG_UINT8 *) pbLDR + psHeader->ui32SLData);
+
+	IMG_UINT32 *pui32BootConf  = ppui32BootConf ? *ppui32BootConf : NULL;
+	IMG_UINT32 ui32CorememSize = RGXGetFWCorememSize(hPrivate);
+
+	RGXCommentLog(hPrivate, "**********************************************");
+	RGXCommentLog(hPrivate, "************** Begin LDR Parsing *************");
+	RGXCommentLog(hPrivate, "**********************************************");
+
+	while (psL1Data != NULL)
+	{
+		if (RGX_META_LDR_BLK_IS_COMMENT(psL1Data->ui16Cmd))
+		{
+			/* Don't process comment blocks */
+			goto NextBlock;
+		}
+
+		switch (psL1Data->ui16Cmd & RGX_META_LDR_CMD_MASK)
+		{
+			case RGX_META_LDR_CMD_LOADMEM:
+			{
+				RGX_META_LDR_L2_DATA_BLK *psL2Block =
+				    (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[1]);
+				IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0];
+				IMG_UINT32 ui32DataSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */;
+				void *pvWriteAddr;
+				PVRSRV_ERROR eError;
+				IMG_BOOL bIsCoremem = IMG_FALSE;
+				IMG_UINT32 ui32CorememStartAddr;
+				void *pvFWCorememAddr = NULL;
+
+				if (RGX_META_IS_COREMEM_CODE(ui32Offset, ui32CorememSize))
+				{
+					RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, META_COREMEM_CODE);
+
+					/* coremem code */
+					bIsCoremem = IMG_TRUE;
+					ui32CorememStartAddr = psEntry->ui32BaseAddr;
+					pvFWCorememAddr = pvHostFWCorememCodeAddr;
+				}
+				else if (RGX_META_IS_COREMEM_DATA(ui32Offset, ui32CorememSize))
+				{
+					RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, META_COREMEM_DATA);
+
+					/* coremem data */
+					bIsCoremem = IMG_TRUE;
+					ui32CorememStartAddr = psEntry->ui32BaseAddr;
+					pvFWCorememAddr = pvHostFWCorememDataAddr;
+				}
+
+				if (bIsCoremem)
+				{
+					/* Check that there is a valid allocation for the coremem code/data */
+					if (pvFWCorememAddr == NULL)
+					{
+						RGXErrorLog(hPrivate,
+						            "%s: Coremem code/data found but no coremem allocation available!",
+						            __func__);
+
+						return PVRSRV_ERROR_INIT_FAILURE;
+					}
+
+					/* Copy coremem data to buffer. The FW copies it to the actual coremem */
+					ui32Offset -= ui32CorememStartAddr;
+
+					RGXMemCopy(hPrivate,
+					           (void*)((IMG_UINT8 *)pvFWCorememAddr + ui32Offset),
+					           psL2Block->aui32BlockData,
+					           ui32DataSize);
+				}
+				else
+				{
+					/* Global range is aliased to local range */
+					ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT;
+
+					eError = FindMMUSegment(ui32Offset,
+					                        pvHostFWCodeAddr,
+					                        pvHostFWDataAddr,
+					                        &pvWriteAddr);
+
+					if (eError != PVRSRV_OK)
+					{
+						RGXErrorLog(hPrivate,
+						            "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment",
+						            ui32Offset, ui32DataSize);
+						return eError;
+					}
+
+					/* Write to FW allocation only if available */
+					if (pvWriteAddr)
+					{
+						RGXMemCopy(hPrivate,
+							   pvWriteAddr,
+							   psL2Block->aui32BlockData,
+							   ui32DataSize);
+					}
+				}
+
+				break;
+			}
+			case RGX_META_LDR_CMD_LOADCORE:
+			case RGX_META_LDR_CMD_LOADMMREG:
+			{
+				return PVRSRV_ERROR_INIT_FAILURE;
+			}
+			case RGX_META_LDR_CMD_START_THREADS:
+			{
+				/* Don't process this block */
+				break;
+			}
+			case RGX_META_LDR_CMD_ZEROMEM:
+			{
+				IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0];
+				IMG_UINT32 ui32ByteCount = psL1Data->aui32CmdData[1];
+				void *pvWriteAddr;
+				PVRSRV_ERROR  eError;
+
+				if (RGX_META_IS_COREMEM_DATA(ui32Offset, ui32CorememSize))
+				{
+					/* cannot zero coremem directly */
+					break;
+				}
+
+				/* Global range is aliased to local range */
+				ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT;
+
+				eError = FindMMUSegment(ui32Offset,
+				                        pvHostFWCodeAddr,
+				                        pvHostFWDataAddr,
+				                        &pvWriteAddr);
+
+				if (eError != PVRSRV_OK)
+				{
+					RGXErrorLog(hPrivate,
+					            "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment",
+					            ui32Offset, ui32ByteCount);
+					return eError;
+				}
+
+				/* Write to FW allocation only if available */
+				if (pvWriteAddr)
+				{
+					RGXMemSet(hPrivate, pvWriteAddr, 0, ui32ByteCount);
+				}
+
+				break;
+			}
+			case RGX_META_LDR_CMD_CONFIG:
+			{
+				RGX_META_LDR_L2_DATA_BLK *psL2Block =
+				    (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[0]);
+				RGX_META_LDR_CFG_BLK *psConfigCommand = (RGX_META_LDR_CFG_BLK*) psL2Block->aui32BlockData;
+				IMG_UINT32 ui32L2BlockSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */;
+				IMG_UINT32 ui32CurrBlockSize = 0;
+
+				while (ui32L2BlockSize)
+				{
+					switch (psConfigCommand->ui32Type)
+					{
+						case RGX_META_LDR_CFG_PAUSE:
+						case RGX_META_LDR_CFG_READ:
+						{
+							ui32CurrBlockSize = 8;
+							return PVRSRV_ERROR_INIT_FAILURE;
+						}
+						case RGX_META_LDR_CFG_WRITE:
+						{
+							IMG_UINT32 ui32RegisterOffset = psConfigCommand->aui32BlockData[0];
+							IMG_UINT32 ui32RegisterValue  = psConfigCommand->aui32BlockData[1];
+
+							/* Only write to bootloader if we got a valid
+							 * pointer to the FW code allocation
+							 */
+							if (pui32BootConf)
+							{
+								/* Do register write */
+								*pui32BootConf++ = ui32RegisterOffset;
+								*pui32BootConf++ = ui32RegisterValue;
+							}
+
+							RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+							              ui32RegisterOffset, ui32RegisterValue);
+
+							ui32CurrBlockSize = 12;
+							break;
+						}
+						case RGX_META_LDR_CFG_MEMSET:
+						case RGX_META_LDR_CFG_MEMCHECK:
+						{
+							ui32CurrBlockSize = 20;
+							return PVRSRV_ERROR_INIT_FAILURE;
+						}
+						default:
+						{
+							return PVRSRV_ERROR_INIT_FAILURE;
+						}
+					}
+					ui32L2BlockSize -= ui32CurrBlockSize;
+					psConfigCommand = (RGX_META_LDR_CFG_BLK*) (((IMG_UINT8*) psConfigCommand) + ui32CurrBlockSize);
+				}
+
+				break;
+			}
+			default:
+			{
+				return PVRSRV_ERROR_INIT_FAILURE;
+			}
+		}
+
+NextBlock:
+
+		if (psL1Data->ui32Next == 0xFFFFFFFF)
+		{
+			psL1Data = NULL;
+		}
+		else
+		{
+			psL1Data = (RGX_META_LDR_L1_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->ui32Next);
+		}
+	}
+
+	if (pui32BootConf)
+	{
+		*ppui32BootConf = pui32BootConf;
+	}
+
+	RGXCommentLog(hPrivate, "**********************************************");
+	RGXCommentLog(hPrivate, "************** End Loader Parsing ************");
+	RGXCommentLog(hPrivate, "**********************************************");
+
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function      ProcessELFCommandStream
+
+ @Description   Process the output of the Mips toolchain in the .ELF format
+                copying code and data sections into their final location
+
+ @Input         hPrivate          : Implementation specific data
+ @Input         pbELF             : Pointer to FW blob
+ @Input         pvHostFWCodeAddr  : Pointer to FW code
+ @Input         pvHostFWDataAddr  : Pointer to FW data
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR ProcessELFCommandStream(const void *hPrivate,
+                                     const IMG_BYTE *pbELF,
+                                     void *pvHostFWCodeAddr,
+                                     void *pvHostFWDataAddr)
+{
+	IMG_UINT32 ui32Entry;
+	RGX_MIPS_ELF_HDR *psHeader = (RGX_MIPS_ELF_HDR *)pbELF;
+	RGX_MIPS_ELF_PROGRAM_HDR *psProgramHeader =
+	    (RGX_MIPS_ELF_PROGRAM_HDR *)(pbELF + psHeader->ui32Ephoff);
+	PVRSRV_ERROR eError;
+
+	for (ui32Entry = 0; ui32Entry < psHeader->ui32Ephnum; ui32Entry++, psProgramHeader++)
+	{
+		void *pvWriteAddr;
+
+		/* Only consider loadable entries in the ELF segment table */
+		if (psProgramHeader->ui32Ptype != ELF_PT_LOAD) continue;
+
+		eError = FindMMUSegment(psProgramHeader->ui32Pvaddr,
+		                        pvHostFWCodeAddr,
+		                        pvHostFWDataAddr,
+		                        &pvWriteAddr);
+
+		if (eError != PVRSRV_OK)
+		{
+			RGXErrorLog(hPrivate,
+			            "%s: Addr 0x%x (size: %d) not found in any segment",__func__,
+			            psProgramHeader->ui32Pvaddr,
+			            psProgramHeader->ui32Pfilesz);
+			return eError;
+		}
+
+		/* Write to FW allocation only if available */
+		if (pvWriteAddr)
+		{
+			RGXMemCopy(hPrivate,
+			           pvWriteAddr,
+			           (IMG_PBYTE)(pbELF + psProgramHeader->ui32Poffset),
+			           psProgramHeader->ui32Pfilesz);
+
+			RGXMemSet(hPrivate,
+			          (IMG_PBYTE)pvWriteAddr + psProgramHeader->ui32Pfilesz,
+			          0,
+			          psProgramHeader->ui32Pmemsz - psProgramHeader->ui32Pfilesz);
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+IMG_UINT32 RGXGetFWImageSectionOffset(const void *hPrivate, RGX_FW_SECTION_ID eId)
+{
+	RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId);
+
+	return psEntry->ui32AllocOffset;
+}
+
+IMG_UINT32 RGXGetFWImageSectionMaxSize(const void *hPrivate, RGX_FW_SECTION_ID eId)
+{
+	RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId);
+
+	return psEntry->ui32MaxSize;
+}
+
+IMG_UINT32 RGXGetFWImageSectionAllocSize(const void *hPrivate, RGX_FW_SECTION_ID eId)
+{
+	RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId);
+
+	return psEntry->ui32AllocSize;
+}
+
+IMG_UINT32 RGXGetFWImageSectionAddress(const void *hPrivate, RGX_FW_SECTION_ID eId)
+{
+	RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId);
+
+	return psEntry->ui32BaseAddr;
+}
+
+PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate,
+                                    const IMG_BYTE    *pbRGXFirmware,
+                                    const IMG_UINT32  ui32RGXFirmwareSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWDataAllocSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWCorememCodeAllocSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWCorememDataAllocSize)
+{
+	RGX_FW_INFO_HEADER *psInfoHeader;
+	const IMG_BYTE *pbRGXFirmwareInfo;
+	const IMG_BYTE *pbRGXFirmwareLayout;
+	IMG_UINT32 i;
+
+	if (pbRGXFirmware == NULL || ui32RGXFirmwareSize == 0 || ui32RGXFirmwareSize <= FW_BLOCK_SIZE)
+	{
+		RGXErrorLog(hPrivate, "%s: Invalid FW binary at %p, size %u",
+		            __func__, pbRGXFirmware, ui32RGXFirmwareSize);
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+
+	/*
+	 * Acquire pointer to the FW info header within the FW image.
+	 * The format of the header in the FW image might not be the one expected
+	 * by the driver, but the driver should still be able to correctly read
+	 * the information below, as long as new/incompatible elements are added
+	 * at the end of the header (they will be ignored by the driver).
+	 */
+
+	pbRGXFirmwareInfo = pbRGXFirmware + ui32RGXFirmwareSize - FW_BLOCK_SIZE;
+	psInfoHeader = (RGX_FW_INFO_HEADER*)pbRGXFirmwareInfo;
+
+	/* If any of the following checks fails, the FW will likely not work properly */
+
+	if (psInfoHeader->ui32InfoVersion != FW_INFO_VERSION)
+	{
+		RGXErrorLog(hPrivate, "%s: FW info version mismatch (expected: %u, found: %u)",
+		            __func__,
+		            (IMG_UINT32) FW_INFO_VERSION,
+		            psInfoHeader->ui32InfoVersion);
+	}
+
+	if (psInfoHeader->ui32HeaderLen != sizeof(RGX_FW_INFO_HEADER))
+	{
+		RGXErrorLog(hPrivate, "%s: FW info header sizes mismatch (expected: %u, found: %u)",
+		            __func__,
+		            (IMG_UINT32) sizeof(RGX_FW_INFO_HEADER),
+		            psInfoHeader->ui32HeaderLen);
+	}
+
+	if (psInfoHeader->ui32LayoutEntrySize != sizeof(RGX_FW_LAYOUT_ENTRY))
+	{
+		RGXErrorLog(hPrivate, "%s: FW layout entry sizes mismatch (expected: %u, found: %u)",
+		            __func__,
+		            (IMG_UINT32) sizeof(RGX_FW_LAYOUT_ENTRY),
+		            psInfoHeader->ui32LayoutEntrySize);
+	}
+
+	if (psInfoHeader->ui32LayoutEntryNum > MAX_NUM_ENTRIES)
+	{
+		RGXErrorLog(hPrivate, "%s: Not enough storage for the FW layout table (max: %u entries, found: %u)",
+		            __func__,
+		            MAX_NUM_ENTRIES,
+		            psInfoHeader->ui32LayoutEntryNum);
+	}
+
+	ui32LayoutEntryNum = psInfoHeader->ui32LayoutEntryNum;
+
+
+	/*
+	 * Copy FW layout table from FW image to local array.
+	 * One entry is copied at a time and the copy is limited to what the driver
+	 * expects to find in it. Assuming that new/incompatible elements
+	 * are added at the end of each entry, the loop below adapts the table
+	 * in the FW image into the format expected by the driver.
+	 */
+
+	pbRGXFirmwareLayout = pbRGXFirmwareInfo + psInfoHeader->ui32HeaderLen;
+
+	for (i = 0; i < ui32LayoutEntryNum; i++)
+	{
+		RGX_FW_LAYOUT_ENTRY *psOutEntry = &asRGXFWLayoutTable[i];
+
+		RGX_FW_LAYOUT_ENTRY *psInEntry = (RGX_FW_LAYOUT_ENTRY*)
+			(pbRGXFirmwareLayout + i * psInfoHeader->ui32LayoutEntrySize);
+
+		RGXMemCopy(hPrivate,
+		           (void*)psOutEntry,
+		           (void*)psInEntry,
+		           sizeof(RGX_FW_LAYOUT_ENTRY));
+	}
+
+
+	/* Calculate how much memory the FW needs for its code and data segments */
+
+	*puiFWCodeAllocSize = 0;
+	*puiFWDataAllocSize = 0;
+	*puiFWCorememCodeAllocSize = 0;
+	*puiFWCorememDataAllocSize = 0;
+
+	for (i = 0; i < ui32LayoutEntryNum; i++)
+	{
+		switch (asRGXFWLayoutTable[i].eType)
+		{
+			case FW_CODE:
+				*puiFWCodeAllocSize += asRGXFWLayoutTable[i].ui32AllocSize;
+				break;
+
+			case FW_DATA:
+				*puiFWDataAllocSize += asRGXFWLayoutTable[i].ui32AllocSize;
+				break;
+
+			case FW_COREMEM_CODE:
+				*puiFWCorememCodeAllocSize += asRGXFWLayoutTable[i].ui32AllocSize;
+				break;
+
+			case FW_COREMEM_DATA:
+				*puiFWCorememDataAllocSize += asRGXFWLayoutTable[i].ui32AllocSize;
+				break;
+
+			default:
+				RGXErrorLog(hPrivate, "%s: Unknown FW section type %u\n",
+				            __func__, asRGXFWLayoutTable[i].eType);
+				break;
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate,
+                               const IMG_BYTE *pbRGXFirmware,
+                               void *pvFWCode,
+                               void *pvFWData,
+                               void *pvFWCorememCode,
+                               void *pvFWCorememData,
+                               RGX_FW_BOOT_PARAMS *puFWParams)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_BOOL bMIPS = RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS);
+
+	if (!bMIPS)
+	{
+		IMG_UINT32 *pui32BootConf = NULL;
+		/* Skip bootloader configuration if a pointer to the FW code
+		 * allocation is not available
+		 */
+		if (pvFWCode)
+		{
+			/* This variable points to the bootloader code which is mostly
+			 * a sequence of <register address,register value> pairs
+			 */
+			pui32BootConf = ((IMG_UINT32*) pvFWCode) + RGXFW_BOOTLDR_CONF_OFFSET;
+
+			/* Slave port and JTAG accesses are privileged */
+			*pui32BootConf++ = META_CR_SYSC_JTAG_THREAD;
+			*pui32BootConf++ = META_CR_SYSC_JTAG_THREAD_PRIV_EN;
+
+			RGXFWConfigureSegMMU(hPrivate,
+			                     &puFWParams->sMeta.sFWCodeDevVAddr,
+			                     &puFWParams->sMeta.sFWDataDevVAddr,
+			                     &pui32BootConf);
+		}
+
+		/* Process FW image data stream */
+		eError = ProcessLDRCommandStream(hPrivate,
+		                                 pbRGXFirmware,
+		                                 pvFWCode,
+		                                 pvFWData,
+		                                 pvFWCorememCode,
+		                                 pvFWCorememData,
+		                                 &pui32BootConf);
+		if (eError != PVRSRV_OK)
+		{
+			RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError);
+			return eError;
+		}
+
+		/* Skip bootloader configuration if a pointer to the FW code
+		 * allocation is not available
+		 */
+		if (pvFWCode)
+		{
+			IMG_UINT32 ui32MainThreadID = puFWParams->sMeta.ui32MainThreadID;
+			IMG_UINT32 ui32NumThreads   = puFWParams->sMeta.ui32NumThreads;
+
+			if ((ui32NumThreads == 0) || (ui32NumThreads > 2) || (ui32MainThreadID >= 2))
+			{
+				RGXErrorLog(hPrivate,
+				            "ProcessFWImage: Wrong Meta threads configuration, using one thread only");
+
+				ui32NumThreads = 1;
+				ui32MainThreadID = 0;
+			}
+
+			RGXFWConfigureMetaCaches(hPrivate,
+			                         ui32NumThreads,
+			                         ui32MainThreadID,
+			                         &pui32BootConf);
+
+			/* Signal the end of the conf sequence */
+			*pui32BootConf++ = 0x0;
+			*pui32BootConf++ = 0x0;
+
+			if (puFWParams->sMeta.uiFWCorememCodeSize && (puFWParams->sMeta.sFWCorememCodeFWAddr.ui32Addr != 0))
+			{
+				*pui32BootConf++ = puFWParams->sMeta.sFWCorememCodeFWAddr.ui32Addr;
+				*pui32BootConf++ = puFWParams->sMeta.uiFWCorememCodeSize;
+			}
+			else
+			{
+				*pui32BootConf++ = 0;
+				*pui32BootConf++ = 0;
+			}
+
+			if (RGX_DEVICE_HAS_FEATURE(hPrivate, META_DMA))
+			{
+				*pui32BootConf++ = (IMG_UINT32) (puFWParams->sMeta.sFWCorememCodeDevVAddr.uiAddr >> 32);
+				*pui32BootConf++ = (IMG_UINT32) puFWParams->sMeta.sFWCorememCodeDevVAddr.uiAddr;
+			}
+			else
+			{
+				*pui32BootConf++ = 0;
+				*pui32BootConf++ = 0;
+			}
+
+		}
+	}
+
+	if (bMIPS)
+	{
+		/* Process FW image data stream */
+		eError = ProcessELFCommandStream(hPrivate,
+		                                 pbRGXFirmware,
+		                                 pvFWCode,
+		                                 pvFWData);
+		if (eError != PVRSRV_OK)
+		{
+			RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError);
+			return eError;
+		}
+
+		if (pvFWData)
+		{
+			/* To get a pointer to the bootloader configuration data start from a pointer to the FW image... */
+			IMG_UINT64 *pui64BootConfig = (IMG_UINT64 *) pvFWData;
+
+			/* ... jump to the boot/NMI data page... */
+			pui64BootConfig += RGXMIPSFW_GET_OFFSET_IN_QWORDS(RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA));
+
+			/* ... and then jump to the bootloader data offset within the page */
+			pui64BootConfig += RGXMIPSFW_GET_OFFSET_IN_QWORDS(RGXMIPSFW_BOOTLDR_CONF_OFFSET);
+
+			/* Rogue Registers physical address */
+			pui64BootConfig[RGXMIPSFW_ROGUE_REGS_BASE_PHYADDR_OFFSET] = puFWParams->sMips.sGPURegAddr.uiAddr;
+
+			/* MIPS Page Table physical address. There are 16 pages for a firmware heap of 32 MB */
+			pui64BootConfig[RGXMIPSFW_PAGE_TABLE_BASE_PHYADDR_OFFSET] = puFWParams->sMips.sFWPageTableAddr.uiAddr;
+
+			/* MIPS Stack Pointer Physical Address */
+			pui64BootConfig[RGXMIPSFW_STACKPOINTER_PHYADDR_OFFSET] = puFWParams->sMips.sFWStackAddr.uiAddr;
+
+			/* Reserved for future use */
+			pui64BootConfig[RGXMIPSFW_RESERVED_FUTURE_OFFSET] = 0;
+		}
+	}
+
+	return eError;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwimageutils.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwimageutils.h
new file mode 100644
index 0000000..77aadda
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwimageutils.h
@@ -0,0 +1,245 @@
+/*************************************************************************/ /*!
+@File
+@Title          Header for Services Firmware image utilities used at init time
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for Services Firmware image utilities used at init time
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGXFWIMAGEUTILS_H__
+#define __RGXFWIMAGEUTILS_H__
+
+/* The routines declared here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when DRM security is enabled).
+ * Any new dependency should be added to rgxlayer.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary. */
+#include "rgxlayer.h"
+
+
+typedef union _RGX_FW_BOOT_PARAMS_
+{
+	struct
+	{
+		/* META-only parameters */
+		IMG_DEV_VIRTADDR sFWCodeDevVAddr;
+		IMG_DEV_VIRTADDR sFWDataDevVAddr;
+		IMG_DEV_VIRTADDR sFWCorememCodeDevVAddr;
+		RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr;
+		IMG_DEVMEM_SIZE_T uiFWCorememCodeSize;
+		IMG_DEV_VIRTADDR sFWCorememDataDevVAddr;
+		RGXFWIF_DEV_VIRTADDR sFWCorememDataFWAddr;
+		IMG_UINT32 ui32NumThreads;
+		IMG_UINT32 ui32MainThreadID;
+	} sMeta;
+
+	struct
+	{
+		/* MIPS-only parameters */
+		IMG_DEV_PHYADDR sGPURegAddr;
+		IMG_DEV_PHYADDR sFWPageTableAddr;
+		IMG_DEV_PHYADDR sFWStackAddr;
+	} sMips;
+} RGX_FW_BOOT_PARAMS;
+
+/*!
+*******************************************************************************
+
+ @Function     RGXGetFWImageSectionOffset
+
+ @Input        hPrivate : Implementation specific data
+ @Input        eId      : Section id
+
+ @Description  Return offset of a Firmware section, relative to the beginning
+               of the code or data allocation (depending on the section id)
+
+******************************************************************************/
+IMG_UINT32 RGXGetFWImageSectionOffset(const void *hPrivate,
+                                      RGX_FW_SECTION_ID eId);
+
+/*!
+*******************************************************************************
+
+ @Function     RGXGetFWImageSectionMaxSize
+
+ @Input        hPrivate         : Implementation specific data
+ @Input        eId              : Section id
+
+ @Description  Return maximum size (not allocation size) of a Firmware section
+
+******************************************************************************/
+IMG_UINT32 RGXGetFWImageSectionMaxSize(const void *hPrivate,
+                                       RGX_FW_SECTION_ID eId);
+
+/*!
+*******************************************************************************
+
+ @Function     RGXGetFWImageSectionAllocSize
+
+ @Input        hPrivate         : Implementation specific data
+ @Input        eId              : Section id
+
+ @Description  Return allocation size of a Firmware section
+
+******************************************************************************/
+IMG_UINT32 RGXGetFWImageSectionAllocSize(const void *hPrivate,
+                                         RGX_FW_SECTION_ID eId);
+
+/*!
+*******************************************************************************
+
+ @Function     RGXGetFWImageSectionAddress
+
+ @Input        hPrivate : Implementation specific data
+ @Input        eId      : Section id
+
+ @Description  Return base address of a Firmware section
+
+******************************************************************************/
+IMG_UINT32 RGXGetFWImageSectionAddress(const void *hPrivate,
+                                       RGX_FW_SECTION_ID eId);
+
+/*!
+*******************************************************************************
+
+ @Function     RGXGetFWImageAllocSize
+
+ @Description  Return size of Firmware code/data/coremem code allocations
+
+ @Input        hPrivate            : Implementation specific data
+ @Input        pbRGXFirmware       : Pointer to FW binary
+ @Input        ui32RGXFirmwareSize : FW binary size
+ @Output       puiFWCodeAllocSize  : Code size
+ @Output       puiFWDataAllocSize  : Data size
+ @Output       puiFWCorememCodeAllocSize : Coremem code size (0 if N/A)
+ @Output       puiFWCorememDataAllocSize : Coremem data size (0 if N/A)
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate,
+                                    const IMG_BYTE    *pbRGXFirmware,
+                                    const IMG_UINT32  ui32RGXFirmwareSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWDataAllocSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWCorememCodeAllocSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWCorememDataAllocSize);
+
+/*!
+*******************************************************************************
+
+ @Function      ProcessLDRCommandStream
+
+ @Description   Process the output of the Meta toolchain in the .LDR format
+                copying code and data sections into their final location and
+                passing some information to the Meta bootloader
+
+ @Input         hPrivate                 : Implementation specific data
+ @Input         pbLDR                    : Pointer to FW blob
+ @Input         pvHostFWCodeAddr         : Pointer to FW code
+ @Input         pvHostFWDataAddr         : Pointer to FW data
+ @Input         pvHostFWCorememCodeAddr  : Pointer to FW coremem code
+ @Input         pvHostFWCorememDataAddr  : Pointer to FW coremem data
+ @Input         ppui32BootConf           : Pointer to bootloader data
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate,
+                                     const IMG_BYTE* pbLDR,
+                                     void* pvHostFWCodeAddr,
+                                     void* pvHostFWDataAddr,
+                                     void* pvHostFWCorememCodeAddr,
+                                     void* pvHostFWCorememDataAddr,
+                                     IMG_UINT32 **ppui32BootConf);
+
+/*!
+*******************************************************************************
+
+ @Function      ProcessELFCommandStream
+
+ @Description   Process the output of the Mips toolchain in the .ELF format
+                copying code and data sections into their final location
+
+ @Input         hPrivate          : Implementation specific data
+ @Input         pbELF             : Pointer to FW blob
+ @Input         pvHostFWCodeAddr  : Pointer to FW code
+ @Input         pvHostFWDataAddr  : Pointer to FW data
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR ProcessELFCommandStream(const void *hPrivate,
+                                     const IMG_BYTE *pbELF,
+                                     void *pvHostFWCodeAddr,
+                                     void *pvHostFWDataAddr);
+
+/*!
+*******************************************************************************
+
+ @Function     RGXProcessFWImage
+
+ @Description  Process the Firmware binary blob copying code and data
+               sections into their final location and passing some
+               information to the Firmware bootloader.
+               If a pointer to the final memory location for FW code or data
+               is not valid (NULL) then the relative section will not be
+               processed.
+
+ @Input        hPrivate        : Implementation specific data
+ @Input        pbRGXFirmware   : Pointer to FW blob
+ @Input        pvFWCode        : Pointer to FW code
+ @Input        pvFWData        : Pointer to FW data
+ @Input        pvFWCorememCode : Pointer to FW coremem code
+ @Input        pvFWCorememData : Pointer to FW coremem data
+ @Input        puFWParams      : Parameters used by the FW at boot time
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate,
+                               const IMG_BYTE *pbRGXFirmware,
+                               void *pvFWCode,
+                               void *pvFWData,
+                               void *pvFWCorememCode,
+                               void *pvFWCorememData,
+                               RGX_FW_BOOT_PARAMS *puFWParams);
+
+#endif /* __RGXFWIMAGEUTILS_H__ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwload.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwload.c
new file mode 100644
index 0000000..f93f2ed9b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwload.c
@@ -0,0 +1,295 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services firmware load and access routines for Linux
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/firmware.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "device.h"
+#include "module_common.h"
+#include "rgxfwload.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+
+struct RGXFW
+{
+	const struct firmware sFW;
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)) && defined(RGX_FW_SIGNED)
+
+/* The Linux kernel does not support the RSA PSS padding mode. It only
+ * supports the legacy PKCS#1 padding mode.
+ */
+#if defined(RGX_FW_PKCS1_PSS_PADDING)
+#error Linux does not support verification of RSA PSS padded signatures
+#endif
+
+#include <crypto/public_key.h>
+#include <crypto/hash_info.h>
+#include <crypto/hash.h>
+
+#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
+
+#include "signfw.h"
+
+static bool VerifyFirmware(const struct firmware *psFW)
+{
+	struct FirmwareSignatureHeader *psHeader;
+	struct public_key_signature *psPKS;
+	unsigned char *szKeyID, *pcKeyID;
+	size_t uDigestSize, uDescSize;
+	void *pvSignature, *pvSigner;
+	struct crypto_shash *psTFM;
+	struct shash_desc *psDesc;
+	uint32_t ui32SignatureLen;
+	bool bVerified = false;
+	key_ref_t hKey;
+	uint8_t i;
+	int res;
+
+	if (psFW->size < FW_SIGN_BACKWARDS_OFFSET)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Firmware is too small (%zu bytes)",
+								__func__, psFW->size));
+		goto err_release_firmware;
+	}
+
+	psHeader = (struct FirmwareSignatureHeader *)
+					(psFW->data + (psFW->size - FW_SIGN_BACKWARDS_OFFSET));
+
+	/* All derived from u8 so can't be exploited to flow out of this page */
+	pvSigner    = (u8 *)psHeader + sizeof(struct FirmwareSignatureHeader);
+	pcKeyID     = (unsigned char *)((u8 *)pvSigner + psHeader->ui8SignerLen);
+	pvSignature = (u8 *)pcKeyID + psHeader->ui8KeyIDLen;
+
+	/* We cannot update KERNEL_RO in-place, so we must copy the len */
+	ui32SignatureLen = ntohl(psHeader->ui32SignatureLen);
+
+	if (psHeader->ui8Algo >= PKEY_ALGO__LAST)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Public key algorithm %u is not supported",
+								__func__, psHeader->ui8Algo));
+		goto err_release_firmware;
+	}
+
+	if (psHeader->ui8HashAlgo >= PKEY_HASH__LAST)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Hash algorithm %u is not supported",
+								__func__, psHeader->ui8HashAlgo));
+		goto err_release_firmware;
+	}
+
+	if (psHeader->ui8IDType != PKEY_ID_X509)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Only asymmetric X.509 PKI certificates "
+								"are supported", __func__));
+		goto err_release_firmware;
+	}
+
+	/* Generate a hash of the fw data (including the padding) */
+
+	psTFM = crypto_alloc_shash(hash_algo_name[psHeader->ui8HashAlgo], 0, 0);
+	if (IS_ERR(psTFM))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: crypto_alloc_shash() failed (%ld)",
+								__func__, PTR_ERR(psTFM)));
+		goto err_release_firmware;
+	}
+
+	uDescSize = crypto_shash_descsize(psTFM) + sizeof(*psDesc);
+	uDigestSize = crypto_shash_digestsize(psTFM);
+
+	psPKS = kzalloc(sizeof(*psPKS) + uDescSize + uDigestSize, GFP_KERNEL);
+	if (!psPKS)
+		goto err_free_crypto_shash;
+
+	psDesc = (struct shash_desc *)((u8 *)psPKS + sizeof(*psPKS));
+	psDesc->tfm = psTFM;
+	psDesc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	psPKS->pkey_algo = psHeader->ui8Algo;
+	psPKS->pkey_hash_algo = psHeader->ui8HashAlgo;
+
+	psPKS->digest = (u8 *)psPKS + sizeof(*psPKS) + uDescSize;
+	psPKS->digest_size = uDigestSize;
+
+	res = crypto_shash_init(psDesc);
+	if (res < 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: crypto_shash_init() failed (%d)",
+								__func__, res));
+		goto err_free_pks;
+	}
+
+	res = crypto_shash_finup(psDesc, psFW->data, psFW->size - FW_SIGN_BACKWARDS_OFFSET,
+							 psPKS->digest);
+	if (res < 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: crypto_shash_finup() failed (%d)",
+								__func__, res));
+		goto err_free_pks;
+	}
+
+	/* Populate the MPI with the signature payload */
+
+	psPKS->nr_mpi = 1;
+	psPKS->rsa.s = mpi_read_raw_data(pvSignature, ui32SignatureLen);
+	if (!psPKS->rsa.s)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: mpi_read_raw_data() failed", __func__));
+		goto err_free_pks;
+	}
+
+	/* Look up the key we'll use to verify this signature */
+
+	szKeyID = kmalloc(psHeader->ui8SignerLen + 2 +
+					  psHeader->ui8KeyIDLen * 2 + 1, GFP_KERNEL);
+	if (!szKeyID)
+		goto err_free_mpi;
+
+	memcpy(szKeyID, pvSigner, psHeader->ui8SignerLen);
+
+	szKeyID[psHeader->ui8SignerLen + 0] = ':';
+	szKeyID[psHeader->ui8SignerLen + 1] = ' ';
+
+	for (i = 0; i < psHeader->ui8KeyIDLen; i++)
+		sprintf(&szKeyID[psHeader->ui8SignerLen + 2 + i * 2],
+				"%02x", pcKeyID[i]);
+
+	szKeyID[psHeader->ui8SignerLen + 2 + psHeader->ui8KeyIDLen * 2] = 0;
+
+	hKey = keyring_search(make_key_ref(system_trusted_keyring, 1),
+						  &key_type_asymmetric, szKeyID);
+	if (IS_ERR(hKey))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Request for unknown key '%s' (%ld)",
+								szKeyID, PTR_ERR(hKey)));
+		goto err_free_keyid_string;
+	}
+
+	res = verify_signature(key_ref_to_ptr(hKey), psPKS);
+	if (res)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Firmware digital signature verification "
+								"failed (%d)", __func__, res));
+		goto err_put_key;
+	}
+
+	PVR_LOG(("Digital signature for '%s' verified successfully.",
+			 RGX_FW_FILENAME));
+	bVerified = true;
+err_put_key:
+	key_put(key_ref_to_ptr(hKey));
+err_free_keyid_string:
+	kfree(szKeyID);
+err_free_mpi:
+	mpi_free(psPKS->rsa.s);
+err_free_pks:
+	kfree(psPKS);
+err_free_crypto_shash:
+	crypto_free_shash(psTFM);
+err_release_firmware:
+	return bVerified;
+}
+
+#else /* defined(RGX_FW_SIGNED) */
+
+static inline bool VerifyFirmware(const struct firmware *psFW)
+{
+	return true;
+}
+
+#endif /* defined(RGX_FW_SIGNED) */
+
+struct RGXFW *
+RGXLoadFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, const IMG_CHAR *pszBVNCString)
+{
+	const struct firmware *psFW;
+	IMG_INT32 res;
+
+	res = request_firmware(&psFW, pszBVNCString, psDeviceNode->psDevConfig->pvOSDevice);
+	if (res != 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: request_firmware('%s') failed (%d)",
+						__func__, pszBVNCString, res));
+		return NULL;
+	}
+
+	if (!VerifyFirmware(psFW))
+	{
+		release_firmware(psFW);
+		return NULL;
+	}
+
+	return (struct RGXFW *)psFW;
+}
+
+void
+RGXUnloadFirmware(struct RGXFW *psRGXFW)
+{
+	const struct firmware *psFW = &psRGXFW->sFW;
+
+	release_firmware(psFW);
+}
+
+size_t
+RGXFirmwareSize(struct RGXFW *psRGXFW)
+{
+	const struct firmware *psFW = &psRGXFW->sFW;
+	return psFW->size;
+}
+
+const void *
+RGXFirmwareData(struct RGXFW *psRGXFW)
+{
+	const struct firmware *psFW = &psRGXFW->sFW;
+
+	return psFW->data;
+}
+
+/******************************************************************************
+ End of file (rgxfwload.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwload.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwload.h
new file mode 100644
index 0000000..a409082
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwload.h
@@ -0,0 +1,151 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services RGX OS Interface for loading the firmware
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This file defines the OS interface through which the RGX
+                device initialisation code in the kernel/server will obtain
+                the RGX firmware binary image. The API is used during the
+                initialisation of an RGX device via the PVRSRVDeviceInitialise()
+                call sequence.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGXFWLOAD_H__
+#define __RGXFWLOAD_H__
+
+#include "img_defs.h"
+#include "rgxdefs_km.h"
+#include "device_connection.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*! Opaque type handle defined and known to the OS layer implementation of this
+ * rgxfwload.h OS API. This private data is allocated in the implementation
+ * of RGXLoadFirmware() and contains whatever data and information needed to
+ * be able to acquire and return the firmware binary image to the Services
+ * kernel/server during initialisation.
+ * It is no longer required and may be freed when RGXUnloadFirmware()
+ * is called.
+ */
+typedef struct RGXFW RGXFW_t;
+
+
+/*************************************************************************/ /*!
+@Function     RGXLoadFirmware
+@Description  The OS implementation must load or acquire the
+              firmware (FW) image binary needed by the RGX driver stack.
+              A handle to the common layer device node is given
+              to identify which device instance in the system is being
+              initialised. The BVNC string is also supplied so that the
+              implementation knows which FW image to retrieve
+              since each FW image only supports one GPU type/revision.
+              The calling server code supports multiple GPU types and revisions
+              and will detect the specific GPU type and revision before calling
+              this API. It will also have runtime configuration of the VZ mode,
+              hence this API must be able to retrieve different FW binary images
+              based on the pszBVNCString given. The purpose of the end
+              platform/system is key to understand which FW images must be
+              available to the kernel server.
+              On exit the implementation must return a pointer to some private
+              data it uses to hold the FW image information and data. It will
+              be passed onto later API calls by the kernel server code.
+              NULL should be returned if the FW image could not be retrieved.
+              The format of the BVNC string is as follows ([x] denotes
+              optional field):
+                "rgx.fw[.signed].B.V[p].N.C[.vz]"
+              The implementation must first try to load the FW identified
+              by the pszBVpNCString parameter. If this is not available then it
+              should drop back to retrieving the FW identified by the
+              pszBVNCString parameter. The fields in the string are:
+                B, V, N, C are all unsigned integer identifying type/revision,
+                [.signed] is present when RGX_FW_SIGNED=1 is defined in the
+                  server build,
+                [p] is present for provisional GPU configurations (pre-silicon),
+                [.vz] is present when the kernel server is loaded on the HOST
+                  of a virtualised platform. See the DriverMode server
+                  AppHint for details.
+
+@Input        psDeviceNode    Device instance identifier.
+@Input        pszBVNCString   Identifier string of the FW image to
+                              be loaded/acquired in production driver.
+@Return       RGXFW*          Ptr to private data on success, NULL otherwise.
+*/ /**************************************************************************/
+struct RGXFW* RGXLoadFirmware(PVRSRV_DEVICE_NODE *psDeviceNode,
+                       const IMG_CHAR *pszBVNCString);
+
+/*************************************************************************/ /*!
+@Function     RGXFirmwareData
+@Description  This function returns a pointer to the start of the FW image
+              binary data held in memory. It must remain valid until
+              RGXUnloadFirmware() is called.
+@Input        psRGXFW  Private data opaque handle
+@Return       void*    Ptr to FW binary image to start on GPU.
+*/ /**************************************************************************/
+const void* RGXFirmwareData(struct RGXFW *psRGXFW);
+
+/*************************************************************************/ /*!
+@Function     RGXFirmwareSize
+@Description  This function returns the size of the FW image binary data.
+@Input        psRGXFW  Private data opaque handle
+@Return       size_t   Size in bytes of the firmware binary image
+*/ /**************************************************************************/
+size_t RGXFirmwareSize(struct RGXFW *psRGXFW);
+
+/*************************************************************************/ /*!
+@Function     RGXUnloadFirmware
+@Description  This is called when the server has completed firmware
+              initialisation and no longer needs the private data, possibly
+              allocated by RGXLoadFirmware().
+@Input        psRGXFW  Private data opaque handle
+*/ /**************************************************************************/
+void RGXUnloadFirmware(struct RGXFW *psRGXFW);
+
+
+#if defined(__cplusplus)
+}
+#endif
+
+
+#endif /* __RGXFWLOAD_H__ */
+
+/******************************************************************************
+ End of file (rgxfwload.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwutils.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwutils.c
new file mode 100644
index 0000000..3f9af70
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwutils.c
@@ -0,0 +1,6289 @@
+/*************************************************************************/ /*!
+@File
+@Title          Rogue firmware utility routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Rogue firmware utility routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#if defined(LINUX)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "img_defs.h"
+
+#include "rgxdefs_km.h"
+#include "rgx_fwif_km.h"
+#include "pdump_km.h"
+#include "osfunc.h"
+#include "oskm_apphint.h"
+#include "cache_km.h"
+#include "allocmem.h"
+#include "physheap.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "devicemem_server.h"
+
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "rgxfwutils.h"
+#include "rgx_options.h"
+#include "rgx_fwif_alignchecks.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgx_pdump_panics.h"
+#include "fwtrace_string.h"
+#include "rgxheapconfig.h"
+#include "pvrsrv.h"
+#include "rgxdebug.h"
+#include "rgxhwperf.h"
+#include "rgxccb.h"
+#include "rgxcompute.h"
+#include "rgxtransfer.h"
+#include "rgxpower.h"
+#include "rgxtdmtransfer.h"
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "dc_server.h"
+#endif
+#include "rgxmem.h"
+#include "rgxta3d.h"
+#include "rgxkicksync.h"
+#include "rgxutils.h"
+#include "rgxtimecorr.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_external.h"
+#include "tlstream.h"
+#include "devicemem_server_utils.h"
+#include "htbuffer.h"
+#include "rgx_bvnc_defs_km.h"
+#include "info_page.h"
+
+#include "physmem_lma.h"
+#include "physmem_osmem.h"
+
+#ifdef __linux__
+#include <linux/kernel.h>	/* sprintf */
+#include <linux/string.h>	/* strncpy, strlen */
+#include "rogue_trace_events.h"
+#else
+#include <stdio.h>
+#include <string.h>
+#endif
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+#endif
+
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+#endif
+
+#include "vz_support.h"
+#include "vz_physheap.h"
+#include "rgx_heaps.h"
+
+/*!
+ ******************************************************************************
+ * HWPERF
+ *****************************************************************************/
+/* Size of the Firmware L1 HWPERF buffer in bytes (2MB). Accessed by the
+ * Firmware and host driver. */
+#define RGXFW_HWPERF_L1_SIZE_MIN        (16U)
+#define RGXFW_HWPERF_L1_SIZE_DEFAULT    PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB
+#define RGXFW_HWPERF_L1_SIZE_MAX        (12288U)
+
+/* Kernel CCB length */
+/* Reducing the size of the KCCB in an attempt to avoid flooding and overflowing the FW kick queue
+ * in the case of multiple OSes */
+#define RGXFWIF_KCCB_NUMCMDS_LOG2_GPUVIRT_WITHOUT_FEATURE  (6)
+#define RGXFWIF_KCCB_NUMCMDS_LOG2_DEFAULT                  (7)
+
+
+/* Firmware CCB length */
+#if defined(SUPPORT_PDVFS)
+#define RGXFWIF_FWCCB_NUMCMDS_LOG2   (8)
+#else
+#define RGXFWIF_FWCCB_NUMCMDS_LOG2   (5)
+#endif
+
+#if defined(RGX_FW_IRQ_OS_COUNTERS)
+const IMG_UINT32 gaui32FwOsIrqCntRegAddr[RGXFW_MAX_NUM_OS] = {IRQ_COUNTER_STORAGE_REGS};
+#endif
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+/* Checkpoint CCB length */
+#define RGXFWIF_CHECKPOINTCCB_NUMCMDS_LOG2 (10)
+#endif
+
+/* Workload Estimation Firmware CCB length */
+#define RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2   (7)
+
+/* Size of memory buffer for firmware gcov data
+ * The actual data size is several hundred kilobytes. The buffer is an order of magnitude larger. */
+#define RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE (4*1024*1024)
+
+typedef struct
+{
+	RGXFWIF_KCCB_CMD        sKCCBcmd;
+	DLLIST_NODE             sListNode;
+	PDUMP_FLAGS_T           uiPdumpFlags;
+	PVRSRV_RGXDEV_INFO      *psDevInfo;
+	RGXFWIF_DM              eDM;
+} RGX_DEFERRED_KCCB_CMD;
+
+#if defined(PDUMP)
+/* ensure PIDs are 32-bit because a 32-bit PDump load is generated for the
+ * PID filter example entries
+ */
+static_assert(sizeof(IMG_PID) == sizeof(IMG_UINT32),
+		"FW PID filtering assumes the IMG_PID type is 32-bits wide as it "
+		"generates WRW commands for loading the PID values");
+#endif
+
+static PVRSRV_ERROR _AllocateSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo, RGXFWIF_INIT* psRGXFWInit)
+{
+	PVRSRV_ERROR eError;
+	DEVMEM_MEMDESC** ppsSLC3FenceMemDesc = &psDevInfo->psSLC3FenceMemDesc;
+	IMG_UINT32	ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE(
+			RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS));
+
+	PVR_DPF_ENTERED;
+
+	eError = DevmemAllocate(psDevInfo->psFirmwareMainHeap,
+			1,
+			ui32CacheLineSize,
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_UNCACHED |
+			PVRSRV_MEMALLOCFLAG_FW_LOCAL,
+			"FwSLC3FenceWA",
+			ppsSLC3FenceMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF_RETURN_RC(eError);
+	}
+
+	/* We need to map it so the heap for this allocation is set */
+	eError = DevmemMapToDevice(*ppsSLC3FenceMemDesc,
+			psDevInfo->psFirmwareMainHeap,
+			&psRGXFWInit->sSLC3FenceDevVAddr);
+	if (eError != PVRSRV_OK)
+	{
+		DevmemFwFree(psDevInfo, *ppsSLC3FenceMemDesc);
+		*ppsSLC3FenceMemDesc = NULL;
+	}
+
+	PVR_DPF_RETURN_RC1(eError, *ppsSLC3FenceMemDesc);
+}
+
+static void _FreeSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo)
+{
+	DEVMEM_MEMDESC* psSLC3FenceMemDesc = psDevInfo->psSLC3FenceMemDesc;
+
+	if (psSLC3FenceMemDesc)
+	{
+		DevmemReleaseDevVirtAddr(psSLC3FenceMemDesc);
+		DevmemFree(psSLC3FenceMemDesc);
+	}
+}
+
+static void __MTSScheduleWrite(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Value)
+{
+	/* ensure memory is flushed before kicking MTS */
+	OSWriteMemoryBarrier();
+
+	OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE, ui32Value);
+
+	/* ensure the MTS kick goes through before continuing */
+	OSMemoryBarrier();
+}
+
+
+/*!
+ *******************************************************************************
+ @Function		RGXFWSetupSignatureChecks
+ @Description
+ @Input			psDevInfo
+
+ @Return		PVRSRV_ERROR
+ ******************************************************************************/
+static PVRSRV_ERROR RGXFWSetupSignatureChecks(PVRSRV_RGXDEV_INFO* psDevInfo,
+		DEVMEM_MEMDESC**    ppsSigChecksMemDesc,
+		IMG_UINT32          ui32SigChecksBufSize,
+		RGXFWIF_SIGBUF_CTL* psSigBufCtl,
+		const IMG_CHAR*     pszBufferName)
+{
+	PVRSRV_ERROR	eError;
+	DEVMEM_FLAGS_T	uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_UNCACHED |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	/* Allocate memory for the checks */
+	PDUMPCOMMENT("Allocate memory for %s signature checks", pszBufferName);
+	eError = DevmemFwAllocate(psDevInfo,
+			ui32SigChecksBufSize,
+			uiMemAllocFlags,
+			"FwSignatureChecks",
+			ppsSigChecksMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %d bytes for signature checks (%u)",
+				ui32SigChecksBufSize,
+				eError));
+		return eError;
+	}
+
+	/* Prepare the pointer for the fw to access that memory */
+	RGXSetFirmwareAddress(&psSigBufCtl->sBuffer,
+			*ppsSigChecksMemDesc,
+			0, RFW_FWADDR_NOREF_FLAG);
+
+	DevmemPDumpLoadMem(	*ppsSigChecksMemDesc,
+			0,
+			ui32SigChecksBufSize,
+			PDUMP_FLAGS_CONTINUOUS);
+
+	psSigBufCtl->ui32LeftSizeInRegs = ui32SigChecksBufSize / sizeof(IMG_UINT32);
+
+	return PVRSRV_OK;
+}
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+/*!
+ *******************************************************************************
+ @Function		RGXFWSetupFirmwareGcovBuffer
+ @Description
+ @Input			psDevInfo
+
+ @Return		PVRSRV_ERROR
+ ******************************************************************************/
+static PVRSRV_ERROR RGXFWSetupFirmwareGcovBuffer(PVRSRV_RGXDEV_INFO*			psDevInfo,
+		DEVMEM_MEMDESC**			ppsBufferMemDesc,
+		IMG_UINT32					ui32FirmwareGcovBufferSize,
+		RGXFWIF_FIRMWARE_GCOV_CTL*	psFirmwareGcovCtl,
+		const IMG_CHAR*				pszBufferName)
+{
+	PVRSRV_ERROR	eError;
+	DEVMEM_FLAGS_T	uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_UNCACHED |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	/* Allocate memory for gcov */
+	PDUMPCOMMENT("Allocate memory for %s", pszBufferName);
+	eError = DevmemFwAllocate(psDevInfo,
+			ui32FirmwareGcovBufferSize,
+			uiMemAllocFlags,
+			pszBufferName,
+			ppsBufferMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %d bytes for firmware gcov buffer (%u)",
+				ui32FirmwareGcovBufferSize,
+				eError));
+		return eError;
+	}
+
+	/* Prepare the pointer for the fw to access that memory */
+	RGXSetFirmwareAddress(&psFirmwareGcovCtl->sBuffer,
+			*ppsBufferMemDesc,
+			0,
+			RFW_FWADDR_NOREF_FLAG);
+
+	psFirmwareGcovCtl->ui32Size = ui32FirmwareGcovBufferSize;
+
+	return PVRSRV_OK;
+}
+#endif
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+/*!
+ *******************************************************************************
+ @Function		RGXFWSetupCounterBuffer
+ @Description
+ @Input			psDevInfo
+
+ @Return		PVRSRV_ERROR
+ ******************************************************************************/
+static PVRSRV_ERROR RGXFWSetupCounterBuffer(PVRSRV_RGXDEV_INFO*			psDevInfo,
+		DEVMEM_MEMDESC**			ppsBufferMemDesc,
+		IMG_UINT32					ui32CounterDataBufferSize,
+		RGXFWIF_COUNTER_DUMP_CTL*	psCounterDumpCtl,
+		const IMG_CHAR*				pszBufferName)
+{
+	PVRSRV_ERROR	eError;
+	DEVMEM_FLAGS_T	uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_UNCACHED |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	/* Allocate memory for the checks */
+	PDUMPCOMMENT("Allocate memory for %s power counter buffer", pszBufferName);
+	eError = DevmemFwAllocate(psDevInfo,
+			ui32CounterDataBufferSize,
+			uiMemAllocFlags,
+			"FwCounterBuffer",
+			ppsBufferMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %d bytes for counter buffer (%u)",
+				ui32CounterDataBufferSize,
+				eError));
+		return eError;
+	}
+
+	/* Prepare the pointer for the fw to access that memory */
+	RGXSetFirmwareAddress(&psCounterDumpCtl->sBuffer,
+			*ppsBufferMemDesc,
+			0,
+			RFW_FWADDR_NOREF_FLAG);
+
+	psCounterDumpCtl->ui32SizeInDwords = ui32CounterDataBufferSize >> 2;
+
+	return PVRSRV_OK;
+}
+#endif
+
+#if defined(RGXFW_ALIGNCHECKS)
+/*!
+ *******************************************************************************
+ @Function		RGXFWSetupAlignChecks
+ @Description   This functions allocates and fills memory needed for the
+                aligns checks of the UM and KM structures shared with the
+                firmware. The format of the data in the memory is as follows:
+                    <number of elements in the KM array>
+                    <array of KM structures' sizes and members' offsets>
+                    <number of elements in the UM array>
+                    <array of UM structures' sizes and members' offsets>
+                The UM array is passed from the user side. Now the firmware is
+                is responsible for filling this part of the memory. If that
+                happens the check of the UM structures will be performed
+                by the host driver on client's connect.
+                If the macro is not defined the client driver fills the memory
+                and the firmware checks for the alignment of all structures.
+ @Input			psDevInfo
+
+ @Return		PVRSRV_ERROR
+ ******************************************************************************/
+static PVRSRV_ERROR RGXFWSetupAlignChecks(PVRSRV_RGXDEV_INFO* psDevInfo,
+		RGXFWIF_DEV_VIRTADDR	*psAlignChecksDevFW,
+		IMG_UINT32				*pui32RGXFWAlignChecks,
+		IMG_UINT32				ui32RGXFWAlignChecksArrLength)
+{
+	IMG_UINT32		aui32RGXFWAlignChecksKM[] = { RGXFW_ALIGN_CHECKS_INIT_KM };
+	IMG_UINT32		ui32RGXFWAlingChecksTotal;
+	IMG_UINT32*		paui32AlignChecks;
+	PVRSRV_ERROR	eError;
+
+	/* In this case we don't know the number of elements in UM array.
+	 * We have to assume something so we assume RGXFW_ALIGN_CHECKS_UM_MAX.
+	 */
+	PVR_ASSERT(ui32RGXFWAlignChecksArrLength == 0);
+	ui32RGXFWAlingChecksTotal = sizeof(aui32RGXFWAlignChecksKM)
+	                            + RGXFW_ALIGN_CHECKS_UM_MAX * sizeof(IMG_UINT32)
+	                            + 2 * sizeof(IMG_UINT32);
+
+	/* Allocate memory for the checks */
+	PDUMPCOMMENT("Allocate memory for alignment checks");
+	eError = DevmemFwAllocate(psDevInfo,
+			ui32RGXFWAlingChecksTotal,
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | PVRSRV_MEMALLOCFLAG_UNCACHED,
+			"FwAlignmentChecks",
+			&psDevInfo->psRGXFWAlignChecksMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %d bytes for alignment checks (%u)",
+				ui32RGXFWAlingChecksTotal,
+				eError));
+		goto failAlloc;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc,
+			(void **)&paui32AlignChecks);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel addr for alignment checks (%u)",
+				eError));
+		goto failAqCpuAddr;
+	}
+
+	/* Copy the values */
+	*paui32AlignChecks++ = ARRAY_SIZE(aui32RGXFWAlignChecksKM);
+	OSDeviceMemCopy(paui32AlignChecks, &aui32RGXFWAlignChecksKM[0], sizeof(aui32RGXFWAlignChecksKM));
+	paui32AlignChecks += ARRAY_SIZE(aui32RGXFWAlignChecksKM);
+
+	*paui32AlignChecks = 0;
+
+	DevmemPDumpLoadMem(	psDevInfo->psRGXFWAlignChecksMemDesc,
+			0,
+			ui32RGXFWAlingChecksTotal,
+			PDUMP_FLAGS_CONTINUOUS);
+
+	/* Prepare the pointer for the fw to access that memory */
+	RGXSetFirmwareAddress(psAlignChecksDevFW,
+			psDevInfo->psRGXFWAlignChecksMemDesc,
+			0, RFW_FWADDR_NOREF_FLAG);
+
+	return PVRSRV_OK;
+
+
+	failAqCpuAddr:
+	DevmemFwFree(psDevInfo, psDevInfo->psRGXFWAlignChecksMemDesc);
+	psDevInfo->psRGXFWAlignChecksMemDesc = NULL;
+	failAlloc:
+
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static void RGXFWFreeAlignChecks(PVRSRV_RGXDEV_INFO* psDevInfo)
+{
+	if (psDevInfo->psRGXFWAlignChecksMemDesc != NULL)
+	{
+		DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc);
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWAlignChecksMemDesc);
+		psDevInfo->psRGXFWAlignChecksMemDesc = NULL;
+	}
+}
+#endif
+
+static void
+RGXVzDevMemFreeGuestFwHeap(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32OSID)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_VZ_RETN_IF_NOT_MODE(DRIVER_MODE_HOST);
+
+	if (!ui32OSID || ui32OSID >= RGXFW_NUM_OS)
+	{
+		/* Guest OSID(s) range [1 up to (RGXFW_NUM_OS-1)] */
+		PVR_DPF((PVR_DBG_ERROR,
+				"Deallocating guest fw heap with invalid OSID:%u, MAX:%u",
+				ui32OSID, RGXFW_NUM_OS - 1));
+		return;
+	}
+
+	if (psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID])
+	{
+		psDeviceNode->uiKernelFwRAIdx = ui32OSID;
+		DevmemReleaseDevVirtAddr(psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]);
+		DevmemFree(psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]);
+		psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID] = NULL;
+	}
+
+	if (psDevInfo->psGuestFirmwareMainMemDesc[ui32OSID])
+	{
+		psDeviceNode->uiKernelFwRAIdx = ui32OSID;
+		DevmemReleaseDevVirtAddr(psDevInfo->psGuestFirmwareMainMemDesc[ui32OSID]);
+		DevmemFree(psDevInfo->psGuestFirmwareMainMemDesc[ui32OSID]);
+		psDevInfo->psGuestFirmwareMainMemDesc[ui32OSID] = NULL;
+	}
+
+	if (psDevInfo->psGuestFirmwareConfigMemDesc[ui32OSID])
+	{
+		psDeviceNode->uiKernelFwRAIdx = ui32OSID;
+		DevmemReleaseDevVirtAddr(psDevInfo->psGuestFirmwareConfigMemDesc[ui32OSID]);
+		DevmemFree(psDevInfo->psGuestFirmwareConfigMemDesc[ui32OSID]);
+		psDevInfo->psGuestFirmwareConfigMemDesc[ui32OSID] = NULL;
+	}
+}
+
+static PVRSRV_ERROR
+RGXVzDevMemAllocateGuestFwHeap(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32OSID)
+{
+	PVRSRV_ERROR eError;
+	IMG_CHAR szHeapName[32];
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	IMG_UINT32 ui32CacheLineSize =
+		GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS));
+	IMG_UINT32 ui32FwHeapAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+									  PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+									  PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+									  PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+									  PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+									  PVRSRV_MEMALLOCFLAG_UNCACHED |
+									  PVRSRV_MEMALLOCFLAG_FW_LOCAL |
+									  PVRSRV_MEMALLOCFLAG_FW_GUEST;
+
+	/*
+	 * This is called by the host driver only, it pre-allocates and maps
+	 * into the firmware kernel memory context all guest firmware physheaps
+	 * so we fail the call if an invalid OSID (i.e. either host OSID or
+	 * OSID outside range) is supplied (i.e. as this would have been due
+	 * to an internal error).
+	 */
+	PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_HOST, PVRSRV_ERROR_INTERNAL_ERROR);
+	if (!ui32OSID || ui32OSID >= RGXFW_NUM_OS)
+	{
+		/* Guest OSID(s) range [1 up to (RGXFW_NUM_OS-1)] */
+		PVR_DPF((PVR_DBG_ERROR,
+				"Allocating guest fw heap with invalid OSID:%u, MAX:%u",
+				ui32OSID, RGXFW_NUM_OS - 1));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto fail;
+	}
+
+	PDUMPCOMMENT("Mapping firmware physheaps for OSID: [%d]", ui32OSID);
+
+	SysVzGetPhysHeapOrigin(psDeviceNode->psDevConfig,
+						   PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL,
+						   &eHeapOrigin);
+
+	if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+	{
+		/* Target OSID physheap for allocation */
+		psDeviceNode->uiKernelFwRAIdx = ui32OSID;
+
+		OSSNPrintf(szHeapName, sizeof(szHeapName), "GuestFirmwareConfig%d", ui32OSID);
+		/* This allocates the memory for guest Fw Config heap */
+		eError = DevmemAllocate(psDevInfo->psGuestFirmwareRawHeap[ui32OSID],
+								RGX_FIRMWARE_CONFIG_HEAP_SIZE,
+								ui32CacheLineSize,
+								ui32FwHeapAllocFlags | PVRSRV_MEMALLOCFLAG_FW_CONFIG,
+								szHeapName,
+								&psDevInfo->psGuestFirmwareConfigMemDesc[ui32OSID]);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,	"DevmemAllocate() failed for Firmware Config heap (%u)", eError));
+			goto fail;
+		}
+
+		/* If allocation is successful, permanently map this into device */
+		eError = DevmemMapToDevice(psDevInfo->psGuestFirmwareConfigMemDesc[ui32OSID],
+								   psDevInfo->psGuestFirmwareRawHeap[ui32OSID],
+								   &sTmpDevVAddr);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,	"DevmemMapToDevice() failed for Firmware Config heap (%u)", eError));
+			goto fail;
+		}
+
+		/* Target OSID physheap for allocation */
+		psDeviceNode->uiKernelFwRAIdx = ui32OSID;
+
+		OSSNPrintf(szHeapName, sizeof(szHeapName), "GuestFirmwareMain%d", ui32OSID);
+		/* This allocates the memory for guest Fw Main heap */
+		eError = DevmemAllocate(psDevInfo->psGuestFirmwareRawHeap[ui32OSID],
+								RGXGetFwMainHeapSize(psDevInfo),
+								ui32CacheLineSize,
+								ui32FwHeapAllocFlags,
+								szHeapName,
+								&psDevInfo->psGuestFirmwareMainMemDesc[ui32OSID]);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,	"DevmemAllocate() failed for Firmware Main heap (%u)", eError));
+			goto fail;
+		}
+
+		/* If allocation is successful, permanently map this into device */
+		eError = DevmemMapToDevice(psDevInfo->psGuestFirmwareMainMemDesc[ui32OSID],
+								   psDevInfo->psGuestFirmwareRawHeap[ui32OSID],
+								   &sTmpDevVAddr);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,	"DevmemMapToDevice() failed for Firmware Main heap (%u)", eError));
+			goto fail;
+		}
+	}
+	else
+	{
+		/* Target OSID physheap for allocation */
+		psDeviceNode->uiKernelFwRAIdx = ui32OSID;
+
+		OSSNPrintf(szHeapName, sizeof(szHeapName), "GuestFirmwareRaw%d", ui32OSID);
+		/* This allocates the memory for guest Fw Raw heap */
+		eError = DevmemAllocate(psDevInfo->psGuestFirmwareRawHeap[ui32OSID],
+								RGX_FIRMWARE_RAW_HEAP_SIZE,
+								ui32CacheLineSize,
+								ui32FwHeapAllocFlags,
+								szHeapName,
+								&psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,	"DevmemAllocate() failed for Firmware Raw heap (%u)", eError));
+			goto fail;
+		}
+
+		/* If allocation is successful, permanently map this into device */
+		eError = DevmemMapToDevice(psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID],
+					   psDevInfo->psGuestFirmwareRawHeap[ui32OSID],
+					   &sTmpDevVAddr);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,	"DevmemMapToDevice() failed for Firmware Raw heap (%u)", eError));
+			goto fail;
+		}
+	}
+
+	return eError;
+
+fail:
+	RGXVzDevMemFreeGuestFwHeap(psDeviceNode, ui32OSID);
+
+	return eError;
+}
+
+static PVRSRV_ERROR RGXVzSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+	PVRSRV_DEVICE_PHYS_HEAP eHeapType = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+	PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_HOST, PVRSRV_OK);
+
+	eError = SysVzGetPhysHeapOrigin(psDeviceNode->psDevConfig, eHeapType, &eHeapOrigin);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#if (RGXFW_GUEST_OSID_START < RGXFW_NUM_OS)
+	if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+	{
+		IMG_UINT32 ui32OSID;
+
+		/* Guest OSID(s) in range [RGXFW_GUEST_OSID_START up to (RGXFW_NUM_OS-1)] */
+		for (ui32OSID = RGXFW_GUEST_OSID_START; ui32OSID < RGXFW_NUM_OS; ui32OSID++)
+		{
+			eError = RGXVzDevMemAllocateGuestFwHeap(psDeviceNode, ui32OSID);
+			PVR_ASSERT(eError == PVRSRV_OK);
+		}
+	}
+#endif
+
+	return eError;
+}
+
+static void
+RGXVzFreeFirmware(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+	PVRSRV_DEVICE_PHYS_HEAP eHeapType = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+	PVRSRV_VZ_RETN_IF_NOT_MODE(DRIVER_MODE_HOST);
+
+	eError = SysVzGetPhysHeapOrigin(psDeviceNode->psDevConfig, eHeapType, &eHeapOrigin);
+	if (eError != PVRSRV_OK)
+	{
+		return;
+	}
+
+#if (RGXFW_GUEST_OSID_START < RGXFW_NUM_OS)
+	if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+	{
+		IMG_UINT32 ui32OSID;
+
+		/* Guest OSID(s) in range [RGXFW_GUEST_OSID_START up to (RGXFW_NUM_OS-1)] */
+		for (ui32OSID = RGXFW_GUEST_OSID_START; ui32OSID < RGXFW_NUM_OS; ui32OSID++)
+		{
+			RGXVzDevMemFreeGuestFwHeap(psDeviceNode, ui32OSID);
+		}
+	}
+#endif
+}
+
+PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR	*ppDest,
+		DEVMEM_MEMDESC		*psSrc,
+		IMG_UINT32			uiExtraOffset,
+		IMG_UINT32			ui32Flags)
+{
+	PVRSRV_ERROR		eError;
+	IMG_DEV_VIRTADDR	psDevVirtAddr;
+	PVRSRV_DEVICE_NODE	*psDeviceNode;
+	PVRSRV_RGXDEV_INFO	*psDevInfo;
+
+	psDeviceNode = (PVRSRV_DEVICE_NODE *) DevmemGetConnection(psSrc);
+	psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+	if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+	{
+		IMG_UINT32	    ui32Offset;
+		IMG_BOOL            bCachedInMETA;
+		DEVMEM_FLAGS_T      uiDevFlags;
+		IMG_UINT32          uiGPUCacheMode;
+
+		eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr);
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		/* Convert to an address in META memmap */
+		ui32Offset = psDevVirtAddr.uiAddr + uiExtraOffset - RGX_FIRMWARE_RAW_HEAP_BASE;
+
+		/* Check in the devmem flags whether this memory is cached/uncached */
+		DevmemGetFlags(psSrc, &uiDevFlags);
+
+		/* Honour the META cache flags */
+		bCachedInMETA = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0;
+
+		/* Honour the SLC cache flags */
+		eError = DevmemDeviceCacheMode(psDeviceNode, uiDevFlags, &uiGPUCacheMode);
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		ui32Offset += RGXFW_SEGMMU_DATA_BASE_ADDRESS;
+
+		if (bCachedInMETA)
+		{
+			ui32Offset |= RGXFW_SEGMMU_DATA_META_CACHED;
+		}
+		else
+		{
+			ui32Offset |= RGXFW_SEGMMU_DATA_META_UNCACHED;
+		}
+
+		if (PVRSRV_CHECK_GPU_CACHED(uiGPUCacheMode))
+		{
+			ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED;
+		}
+		else
+		{
+			ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED;
+		}
+		ppDest->ui32Addr = ui32Offset;
+	}
+	else
+	{
+		eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr);
+		PVR_ASSERT(eError == PVRSRV_OK);
+		ppDest->ui32Addr = (IMG_UINT32)((psDevVirtAddr.uiAddr + uiExtraOffset) & 0xFFFFFFFF);
+	}
+
+	if ((ppDest->ui32Addr & 0x3U) != 0)
+	{
+		IMG_CHAR *pszAnnotation;
+
+		if (PVRSRV_OK == DevmemGetAnnotation(psSrc, &pszAnnotation))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: %s @ 0x%x is not aligned to 32 bit",
+					 __func__, pszAnnotation, ppDest->ui32Addr));
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: 0x%x is not aligned to 32 bit",
+					 __func__, ppDest->ui32Addr));
+		}
+
+		return PVRSRV_ERROR_INVALID_ALIGNMENT;
+	}
+
+	if (ui32Flags & RFW_FWADDR_NOREF_FLAG)
+	{
+		DevmemReleaseDevVirtAddr(psSrc);
+	}
+
+	return PVRSRV_OK;
+}
+
+void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR		*psDest,
+		DEVMEM_MEMDESC		*psSrcMemDesc,
+		RGXFWIF_DEV_VIRTADDR	*psSrcFWDevVAddr,
+		IMG_UINT32			uiOffset)
+{
+	PVRSRV_ERROR		eError;
+	IMG_DEV_VIRTADDR	sDevVirtAddr;
+
+	eError = DevmemAcquireDevVirtAddr(psSrcMemDesc, &sDevVirtAddr);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	psDest->psDevVirtAddr.uiAddr = sDevVirtAddr.uiAddr;
+	psDest->psDevVirtAddr.uiAddr += uiOffset;
+	psDest->pbyFWAddr.ui32Addr = psSrcFWDevVAddr->ui32Addr;
+
+	DevmemReleaseDevVirtAddr(psSrcMemDesc);
+}
+
+
+void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc)
+{
+	DevmemReleaseDevVirtAddr(psSrc);
+}
+
+struct _RGX_SERVER_COMMON_CONTEXT_ {
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	DEVMEM_MEMDESC *psFWCommonContextMemDesc;
+	PRGXFWIF_FWCOMMONCONTEXT sFWCommonContextFWAddr;
+	DEVMEM_MEMDESC *psFWMemContextMemDesc;
+	DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+	DEVMEM_MEMDESC *psContextStateMemDesc;
+	RGX_CLIENT_CCB *psClientCCB;
+	DEVMEM_MEMDESC *psClientCCBMemDesc;
+	DEVMEM_MEMDESC *psClientCCBCtrlMemDesc;
+	IMG_BOOL bCommonContextMemProvided;
+	IMG_UINT32 ui32ContextID;
+	DLLIST_NODE sListNode;
+	RGXFWIF_CONTEXT_RESET_REASON eLastResetReason;
+	IMG_UINT32 ui32LastResetJobRef;
+};
+
+PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection,
+		PVRSRV_DEVICE_NODE *psDeviceNode,
+		RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor,
+		RGXFWIF_DM eDM,
+		DEVMEM_MEMDESC *psAllocatedMemDesc,
+		IMG_UINT32 ui32AllocatedOffset,
+		DEVMEM_MEMDESC *psFWMemContextMemDesc,
+		DEVMEM_MEMDESC *psContextStateMemDesc,
+		IMG_UINT32 ui32CCBAllocSizeLog2,
+		IMG_UINT32 ui32CCBMaxAllocSizeLog2,
+		IMG_UINT32 ui32Priority,
+		RGX_COMMON_CONTEXT_INFO *psInfo,
+		RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+	RGXFWIF_FWCOMMONCONTEXT *psFWCommonContext;
+	IMG_UINT32 ui32FWCommonContextOffset;
+	IMG_UINT8 *pui8Ptr;
+	PVRSRV_ERROR eError;
+
+	/*
+	 * Allocate all the resources that are required
+	 */
+	psServerCommonContext = OSAllocMem(sizeof(*psServerCommonContext));
+	if (psServerCommonContext == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	psServerCommonContext->psDevInfo = psDevInfo;
+
+	if (psAllocatedMemDesc)
+	{
+		PDUMPCOMMENT("Using existing MemDesc for Rogue firmware %s context (offset = %d)",
+				aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+				ui32AllocatedOffset);
+		ui32FWCommonContextOffset = ui32AllocatedOffset;
+		psServerCommonContext->psFWCommonContextMemDesc = psAllocatedMemDesc;
+		psServerCommonContext->bCommonContextMemProvided = IMG_TRUE;
+	}
+	else
+	{
+		/* Allocate device memory for the firmware context */
+		PDUMPCOMMENT("Allocate Rogue firmware %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]);
+		eError = DevmemFwAllocate(psDevInfo,
+				sizeof(*psFWCommonContext),
+				RGX_FWCOMCTX_ALLOCFLAGS,
+				"FwContext",
+				&psServerCommonContext->psFWCommonContextMemDesc);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate firmware %s context (%s)",
+					__func__,
+					aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+					PVRSRVGetErrorString(eError)));
+			goto fail_contextalloc;
+		}
+		ui32FWCommonContextOffset = 0;
+		psServerCommonContext->bCommonContextMemProvided = IMG_FALSE;
+	}
+
+	/* Record this context so we can refer to it if the FW needs to tell us it was reset. */
+	psServerCommonContext->eLastResetReason    = RGXFWIF_CONTEXT_RESET_REASON_NONE;
+	psServerCommonContext->ui32LastResetJobRef = 0;
+	psServerCommonContext->ui32ContextID       = psDevInfo->ui32CommonCtxtCurrentID++;
+
+	/*
+	 * Temporarily map the firmware context to the kernel and init it
+	 */
+	eError = DevmemAcquireCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc,
+			(void **)&pui8Ptr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to map firmware %s context (%s)to CPU",
+				__func__,
+				aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+				PVRSRVGetErrorString(eError)));
+		goto fail_cpuvirtacquire;
+	}
+
+	/* Allocate the client CCB */
+	eError = RGXCreateCCB(psDevInfo,
+			ui32CCBAllocSizeLog2,
+			ui32CCBMaxAllocSizeLog2,
+			psConnection,
+			eRGXCCBRequestor,
+			psServerCommonContext,
+			&psServerCommonContext->psClientCCB,
+			&psServerCommonContext->psClientCCBMemDesc,
+			&psServerCommonContext->psClientCCBCtrlMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: failed to create CCB for %s context(%s)",
+				__func__,
+				aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+				PVRSRVGetErrorString(eError)));
+		goto fail_allocateccb;
+	}
+
+	psFWCommonContext = (RGXFWIF_FWCOMMONCONTEXT *) (pui8Ptr + ui32FWCommonContextOffset);
+	psFWCommonContext->eDM = eDM;
+
+	/* Set the firmware CCB device addresses in the firmware common context */
+	RGXSetFirmwareAddress(&psFWCommonContext->psCCB,
+			psServerCommonContext->psClientCCBMemDesc,
+			0, RFW_FWADDR_FLAG_NONE);
+	RGXSetFirmwareAddress(&psFWCommonContext->psCCBCtl,
+			psServerCommonContext->psClientCCBCtrlMemDesc,
+			0, RFW_FWADDR_FLAG_NONE);
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA))
+	{
+		RGXSetMetaDMAAddress(&psFWCommonContext->sCCBMetaDMAAddr,
+				psServerCommonContext->psClientCCBMemDesc,
+				&psFWCommonContext->psCCB,
+				0);
+	}
+
+	/* Set the memory context device address */
+	psServerCommonContext->psFWMemContextMemDesc = psFWMemContextMemDesc;
+	RGXSetFirmwareAddress(&psFWCommonContext->psFWMemContext,
+			psFWMemContextMemDesc,
+			0, RFW_FWADDR_FLAG_NONE);
+
+	/* Set the framework register updates address */
+	psServerCommonContext->psFWFrameworkMemDesc = psInfo->psFWFrameworkMemDesc;
+	if (psInfo->psFWFrameworkMemDesc != NULL)
+	{
+		RGXSetFirmwareAddress(&psFWCommonContext->psRFCmd,
+				psInfo->psFWFrameworkMemDesc,
+				0, RFW_FWADDR_FLAG_NONE);
+	}
+	else
+	{
+		/* This should never be touched in this contexts without a framework
+		 * memdesc, but ensure it is zero so we see crashes if it is.
+		 */
+		psFWCommonContext->psRFCmd.ui32Addr = 0;
+	}
+
+	psFWCommonContext->ui32Priority = ui32Priority;
+	psFWCommonContext->ui32PrioritySeqNum = 0;
+
+	if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, CDM_CONTROL_STREAM_FORMAT) &&
+			(RGX_GET_FEATURE_VALUE(psDevInfo, CDM_CONTROL_STREAM_FORMAT) == 2) && \
+			(RGX_IS_FEATURE_SUPPORTED(psDevInfo, SIGNAL_SNOOPING)))
+	{
+		if (eDM == RGXFWIF_DM_CDM)
+		{
+			if (psInfo->psResumeSignalAddr != NULL)
+			{
+				psFWCommonContext->ui64ResumeSignalAddr = psInfo->psResumeSignalAddr->uiAddr;
+			}
+		}
+	}
+
+	/* Store a references to Server Common Context and PID for notifications back from the FW. */
+	psFWCommonContext->ui32ServerCommonContextID = psServerCommonContext->ui32ContextID;
+	psFWCommonContext->ui32PID                   = OSGetCurrentClientProcessIDKM();
+
+	/* Set the firmware GPU context state buffer */
+	psServerCommonContext->psContextStateMemDesc = psContextStateMemDesc;
+	if (psContextStateMemDesc)
+	{
+		RGXSetFirmwareAddress(&psFWCommonContext->psContextState,
+				psContextStateMemDesc,
+				0,
+				RFW_FWADDR_FLAG_NONE);
+	}
+
+	/*
+	 * Dump the created context
+	 */
+	PDUMPCOMMENT("Dump %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]);
+	DevmemPDumpLoadMem(psServerCommonContext->psFWCommonContextMemDesc,
+			ui32FWCommonContextOffset,
+			sizeof(*psFWCommonContext),
+			PDUMP_FLAGS_CONTINUOUS);
+
+	/* We've finished the setup so release the CPU mapping */
+	DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc);
+
+	/* Map this allocation into the FW */
+	RGXSetFirmwareAddress(&psServerCommonContext->sFWCommonContextFWAddr,
+			psServerCommonContext->psFWCommonContextMemDesc,
+			ui32FWCommonContextOffset,
+			RFW_FWADDR_FLAG_NONE);
+
+#if defined(LINUX)
+	{
+		IMG_UINT32 ui32FWAddr;
+		switch (eDM) {
+		case RGXFWIF_DM_TA:
+			ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t)
+					psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, sTAContext));
+			break;
+		case RGXFWIF_DM_3D:
+			ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t)
+					psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, s3DContext));
+			break;
+		default:
+			ui32FWAddr = psServerCommonContext->sFWCommonContextFWAddr.ui32Addr;
+			break;
+		}
+
+		trace_rogue_create_fw_context(OSGetCurrentClientProcessNameKM(),
+				aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+				ui32FWAddr);
+	}
+#endif
+	/*Add the node to the list when finalised */
+	OSWRLockAcquireWrite(psDevInfo->hCommonCtxtListLock);
+	dllist_add_to_tail(&(psDevInfo->sCommonCtxtListHead), &(psServerCommonContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hCommonCtxtListLock);
+
+	*ppsServerCommonContext = psServerCommonContext;
+	return PVRSRV_OK;
+
+	fail_allocateccb:
+	DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc);
+	fail_cpuvirtacquire:
+	if (!psServerCommonContext->bCommonContextMemProvided)
+	{
+		DevmemFwFree(psDevInfo, psServerCommonContext->psFWCommonContextMemDesc);
+		psServerCommonContext->psFWCommonContextMemDesc = NULL;
+	}
+	fail_contextalloc:
+	OSFreeMem(psServerCommonContext);
+	fail_alloc:
+	return eError;
+}
+
+void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+
+	OSWRLockAcquireWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock);
+	/* Remove the context from the list of all contexts. */
+	dllist_remove_node(&psServerCommonContext->sListNode);
+	OSWRLockReleaseWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock);
+
+	/*
+		Unmap the context itself and then all its resources
+	 */
+
+	/* Unmap the FW common context */
+	RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc);
+	/* Umap context state buffer (if there was one) */
+	if (psServerCommonContext->psContextStateMemDesc)
+	{
+		RGXUnsetFirmwareAddress(psServerCommonContext->psContextStateMemDesc);
+	}
+	/* Unmap the framework buffer */
+	if (psServerCommonContext->psFWFrameworkMemDesc)
+	{
+		RGXUnsetFirmwareAddress(psServerCommonContext->psFWFrameworkMemDesc);
+	}
+	/* Unmap client CCB and CCB control */
+	RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc);
+	RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc);
+	/* Unmap the memory context */
+	RGXUnsetFirmwareAddress(psServerCommonContext->psFWMemContextMemDesc);
+
+	/* Destroy the client CCB */
+	RGXDestroyCCB(psServerCommonContext->psDevInfo, psServerCommonContext->psClientCCB);
+
+
+	/* Free the FW common context (if there was one) */
+	if (!psServerCommonContext->bCommonContextMemProvided)
+	{
+		DevmemFwFree(psServerCommonContext->psDevInfo,
+				psServerCommonContext->psFWCommonContextMemDesc);
+		psServerCommonContext->psFWCommonContextMemDesc = NULL;
+	}
+	/* Free the hosts representation of the common context */
+	OSFreeMem(psServerCommonContext);
+}
+
+PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+	return psServerCommonContext->sFWCommonContextFWAddr;
+}
+
+RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+	return psServerCommonContext->psClientCCB;
+}
+
+RGXFWIF_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+		IMG_UINT32 *pui32LastResetJobRef)
+{
+	RGXFWIF_CONTEXT_RESET_REASON eLastResetReason;
+
+	PVR_ASSERT(psServerCommonContext != NULL);
+	PVR_ASSERT(pui32LastResetJobRef != NULL);
+
+	/* Take the most recent reason & job ref and reset for next time... */
+	eLastResetReason      = psServerCommonContext->eLastResetReason;
+	*pui32LastResetJobRef = psServerCommonContext->ui32LastResetJobRef;
+	psServerCommonContext->eLastResetReason = RGXFWIF_CONTEXT_RESET_REASON_NONE;
+	psServerCommonContext->ui32LastResetJobRef = 0;
+
+	if (eLastResetReason == RGXFWIF_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH)
+	{
+		PVR_DPF((PVR_DBG_WARNING,"A Hard Context Switch was triggered on the GPU to ensure Quality of Service."));
+	}
+
+	return eLastResetReason;
+}
+
+PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+	return psServerCommonContext->psDevInfo;
+}
+
+/*!
+ *******************************************************************************
+ @Function		RGXFreeCCB
+ @Description	Free the kernel or firmware CCB
+ @Input			psDevInfo
+ @Input			ppsCCBCtl
+ @Input			ppsCCBCtlMemDesc
+ @Input			ppsCCBMemDesc
+ @Input			psCCBCtlFWAddr
+ ******************************************************************************/
+static void RGXFreeCCB(PVRSRV_RGXDEV_INFO	*psDevInfo,
+					   RGXFWIF_CCB_CTL		**ppsCCBCtl,
+					   DEVMEM_MEMDESC		**ppsCCBCtlMemDesc,
+					   IMG_UINT8			**ppui8CCB,
+					   DEVMEM_MEMDESC		**ppsCCBMemDesc)
+{
+	if (*ppsCCBMemDesc != NULL)
+	{
+		if (*ppui8CCB != NULL)
+		{
+			DevmemReleaseCpuVirtAddr(*ppsCCBMemDesc);
+			*ppui8CCB = NULL;
+		}
+		DevmemFwFree(psDevInfo, *ppsCCBMemDesc);
+		*ppsCCBMemDesc = NULL;
+	}
+	if (*ppsCCBCtlMemDesc != NULL)
+	{
+		if (*ppsCCBCtl != NULL)
+		{
+			DevmemReleaseCpuVirtAddr(*ppsCCBCtlMemDesc);
+			*ppsCCBCtl = NULL;
+		}
+		DevmemFwFree(psDevInfo, *ppsCCBCtlMemDesc);
+		*ppsCCBCtlMemDesc = NULL;
+	}
+}
+
+/*!
+ *******************************************************************************
+ @Function		RGXSetupCCB
+ @Description	Allocate and initialise the kernel CCB
+ @Input			psDevInfo
+ @Input			ppsCCBCtl
+ @Input			ppsCCBCtlMemDesc
+ @Input			ppui8CCB
+ @Input			ppsCCBMemDesc
+ @Input			psCCBCtlFWAddr
+ @Input			ui32NumCmdsLog2
+ @Input			ui32CmdSize
+ @Input			pszName
+
+ @Return		PVRSRV_ERROR
+ ******************************************************************************/
+static PVRSRV_ERROR RGXSetupCCB(PVRSRV_RGXDEV_INFO	*psDevInfo,
+								RGXFWIF_CCB_CTL		**ppsCCBCtl,
+								DEVMEM_MEMDESC		**ppsCCBCtlMemDesc,
+								IMG_UINT8			**ppui8CCB,
+								DEVMEM_MEMDESC		**ppsCCBMemDesc,
+								PRGXFWIF_CCB_CTL	*psCCBCtlFWAddr,
+								PRGXFWIF_CCB		*psCCBFWAddr,
+								IMG_UINT32			ui32NumCmdsLog2,
+								IMG_UINT32			ui32CmdSize,
+								DEVMEM_FLAGS_T		uiCCBMemAllocFlags,
+								const IMG_CHAR		*pszName)
+{
+	const IMG_UINT32	ui32MaxInputStrSize	= 13;
+	const IMG_UINT32	ui32AppendStrSize	= 7;
+	const IMG_UINT32	ui32MaxTotalStrSize	= ui32MaxInputStrSize + ui32AppendStrSize + 1;
+	const IMG_CHAR		sAppend[] = "Control";
+	PVRSRV_ERROR		eError;
+	RGXFWIF_CCB_CTL		*psCCBCtl;
+	DEVMEM_FLAGS_T		uiCCBCtlMemAllocFlags;
+	IMG_UINT32		ui32CCBSize = (1U << ui32NumCmdsLog2);
+	IMG_CHAR		sCCBCtlName[ui32MaxTotalStrSize];
+
+	PVR_ASSERT(strlen(sAppend) == ui32AppendStrSize);
+	PVR_ASSERT(strlen(pszName) <= ui32MaxInputStrSize);
+
+	uiCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_UNCACHED |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	/* Append "Control" to the name for the control struct. */
+	strcpy(sCCBCtlName, pszName);
+	strncat(sCCBCtlName, sAppend, ui32AppendStrSize);
+
+	/* Allocate memory for the CCB control.*/
+	PDUMPCOMMENT("Allocate memory for %s", sCCBCtlName);
+	eError = DevmemFwAllocate(psDevInfo,
+			sizeof(RGXFWIF_CCB_CTL),
+			uiCCBCtlMemAllocFlags,
+			sCCBCtlName,
+			ppsCCBCtlMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate %s (%u)", __func__, sCCBCtlName, eError));
+		goto fail;
+	}
+
+	/*
+	 * Allocate memory for the CCB.
+	 * (this will reference further command data in non-shared CCBs)
+	 */
+	PDUMPCOMMENT("Allocate memory for %s", pszName);
+	eError = DevmemFwAllocate(psDevInfo,
+			ui32CCBSize * ui32CmdSize,
+			uiCCBMemAllocFlags,
+			pszName,
+			ppsCCBMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate %s (%u)", __func__,  pszName, eError));
+		goto fail;
+	}
+
+	/*
+		Map the CCB control to the kernel.
+	 */
+	eError = DevmemAcquireCpuVirtAddr(*ppsCCBCtlMemDesc,
+			(void **)ppsCCBCtl);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire cpu %s (%u)", __func__,  sCCBCtlName, eError));
+		goto fail;
+	}
+
+	/*
+	 * Map the CCB to the kernel.
+	 */
+	eError = DevmemAcquireCpuVirtAddr(*ppsCCBMemDesc,
+			(void **)ppui8CCB);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire cpu %s (%u)", __func__,  pszName, eError));
+		goto fail;
+	}
+
+	/*
+	 * Initialise the CCB control.
+	 */
+	psCCBCtl = *ppsCCBCtl;
+	psCCBCtl->ui32WriteOffset = 0;
+	psCCBCtl->ui32ReadOffset = 0;
+	psCCBCtl->ui32WrapMask = ui32CCBSize - 1;
+	psCCBCtl->ui32CmdSize = ui32CmdSize;
+
+	/* Set-up RGXFWIfCtl pointers to access the kCCB */
+	RGXSetFirmwareAddress(psCCBCtlFWAddr,
+			*ppsCCBCtlMemDesc,
+			0, RFW_FWADDR_NOREF_FLAG);
+
+	RGXSetFirmwareAddress(psCCBFWAddr,
+			*ppsCCBMemDesc,
+			0, RFW_FWADDR_NOREF_FLAG);
+
+	/* Pdump the CCB control */
+	PDUMPCOMMENT("Initialise %s", sCCBCtlName);
+	DevmemPDumpLoadMem(*ppsCCBCtlMemDesc,
+			0,
+			sizeof(RGXFWIF_CCB_CTL),
+			0);
+
+	return PVRSRV_OK;
+
+	fail:
+	RGXFreeCCB(psDevInfo,
+			ppsCCBCtl,
+			ppsCCBCtlMemDesc,
+			ppui8CCB,
+			ppsCCBMemDesc);
+
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static void RGXSetupFaultReadRegisterRollback(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	PMR *psPMR;
+
+	if (psDevInfo->psRGXFaultAddressMemDesc)
+	{
+		if (DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc,(void **)&psPMR) == PVRSRV_OK)
+		{
+			PMRUnlockSysPhysAddresses(psPMR);
+		}
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc);
+		psDevInfo->psRGXFaultAddressMemDesc = NULL;
+	}
+}
+
+static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE	*psDeviceNode, RGXFWIF_INIT *psRGXFWInit)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	IMG_UINT32			*pui32MemoryVirtAddr;
+	IMG_UINT32			i;
+	size_t				ui32PageSize = OSGetPageSize();
+	DEVMEM_FLAGS_T		uiMemAllocFlags;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	PMR					*psPMR;
+
+	/* Allocate page of memory to use for page faults on non-blocking memory transactions */
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_UNCACHED;
+
+	psDevInfo->psRGXFaultAddressMemDesc = NULL;
+	eError = DevmemFwAllocateExportable(psDeviceNode,
+			ui32PageSize,
+			ui32PageSize,
+			uiMemAllocFlags,
+			"FwExFaultAddress",
+			&psDevInfo->psRGXFaultAddressMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to allocate mem for fault address (%u)",
+				eError));
+		goto failFaultAddressDescAlloc;
+	}
+
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc,
+			(void **)&pui32MemoryVirtAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire mem for fault address (%u)",
+				eError));
+		goto failFaultAddressDescAqCpuVirt;
+	}
+
+	for (i = 0; i < ui32PageSize/sizeof(IMG_UINT32); i++)
+	{
+		*(pui32MemoryVirtAddr + i) = 0xDEADBEE0;
+	}
+
+	eError = DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc,(void **)&psPMR);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Error getting PMR for fault address (%u)",
+				eError));
+
+		goto failFaultAddressDescGetPMR;
+	}
+	else
+	{
+		IMG_BOOL bValid;
+		IMG_UINT32 ui32Log2PageSize = OSGetPageShift();
+
+		eError = PMRLockSysPhysAddresses(psPMR);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Error locking physical address for fault address MemDesc (%u)",
+					eError));
+
+			goto failFaultAddressDescLockPhys;
+		}
+
+		eError = PMR_DevPhysAddr(psPMR,ui32Log2PageSize,1,0,&(psRGXFWInit->sFaultPhysAddr),&bValid);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Error getting physical address for fault address MemDesc (%u)",
+					eError));
+
+			goto failFaultAddressDescGetPhys;
+		}
+
+		if (!bValid)
+		{
+			psRGXFWInit->sFaultPhysAddr.uiAddr = 0;
+			PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed getting physical address for fault address MemDesc - invalid page (0x%" IMG_UINT64_FMTSPECX ")",
+					psRGXFWInit->sFaultPhysAddr.uiAddr));
+
+			goto failFaultAddressDescGetPhys;
+		}
+	}
+
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc);
+
+	return PVRSRV_OK;
+
+	failFaultAddressDescGetPhys:
+	PMRUnlockSysPhysAddresses(psPMR);
+
+	failFaultAddressDescLockPhys:
+
+	failFaultAddressDescGetPMR:
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc);
+
+	failFaultAddressDescAqCpuVirt:
+	DevmemFwFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc);
+	psDevInfo->psRGXFaultAddressMemDesc = NULL;
+
+	failFaultAddressDescAlloc:
+
+	return eError;
+}
+
+#if defined(PDUMP)
+/* Replace the DevPhy address with the one Pdump allocates at pdump_player run time */
+static PVRSRV_ERROR RGXPDumpFaultReadRegister(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	PVRSRV_ERROR eError;
+	PMR *psFWInitPMR, *psFaultAddrPMR;
+	IMG_UINT32 ui32Dstoffset;
+
+	psFWInitPMR = (PMR *)(psDevInfo->psRGXFWIfInitMemDesc->psImport->hPMR);
+	ui32Dstoffset = psDevInfo->psRGXFWIfInitMemDesc->uiOffset + offsetof(RGXFWIF_INIT, sFaultPhysAddr.uiAddr);
+
+	psFaultAddrPMR = (PMR *)(psDevInfo->psRGXFaultAddressMemDesc->psImport->hPMR);
+
+	eError = PDumpMemLabelToMem64(psFaultAddrPMR,
+			psFWInitPMR,
+			0,
+			ui32Dstoffset,
+			PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXPDumpFaultReadRegister: Dump of Fault Page Phys address failed(%u)", eError));
+	}
+	return eError;
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function       RGXTBIBufferIsInitRequired
+
+@Description    Returns true if the firmware tbi buffer is not allocated and
+		might be required by the firmware soon. TBI buffer allocated
+		on-demand to reduce RAM footprint on systems not needing
+		tbi.
+
+@Input          psDevInfo	 RGX device info
+
+@Return		IMG_BOOL	Whether on-demand allocation(s) is/are needed
+				or not
+ */ /**************************************************************************/
+INLINE IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGXFWIF_TRACEBUF*  psTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+	/* The firmware expects a tbi buffer only when:
+	 *	- Logtype is "tbi"
+	 */
+	if ((psDevInfo->psRGXFWIfTBIBufferMemDesc == NULL)
+			&& (psTraceBufCtl->ui32LogType & ~RGXFWIF_LOG_TYPE_TRACE)
+			&& (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK))
+	{
+		return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXTBIBufferDeinit
+
+@Description    Deinitialises all the allocations and references that are made
+		for the FW tbi buffer
+
+@Input          ppsDevInfo	 RGX device info
+@Return		void
+ */ /**************************************************************************/
+static void RGXTBIBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfTBIBufferMemDesc);
+	psDevInfo->psRGXFWIfTBIBufferMemDesc = NULL;
+	psDevInfo->ui32RGXFWIfHWPerfBufSize = 0;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXTBIBufferInitOnDemandResources
+
+@Description    Allocates the firmware TBI buffer required for reading SFs
+		strings and initialize it with SFs.
+
+@Input          psDevInfo	 RGX device info
+
+@Return		PVRSRV_OK	If all went good, PVRSRV_ERROR otherwise.
+ */ /**************************************************************************/
+PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	DEVMEM_FLAGS_T     uiMemAllocFlags;
+	PVRSRV_ERROR       eError = PVRSRV_OK;
+	IMG_UINT32         i, ui32Len;
+	const IMG_UINT32   ui32NumFWTBIEntries = sizeof(SFs) / sizeof(SFs[0]);
+	const IMG_UINT32   ui32FWTBIBufsize = ui32NumFWTBIEntries * sizeof(RGXFW_STID_FMT);
+	RGXFW_STID_FMT     *psFW_SFs = NULL;
+
+	uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PDUMPCOMMENT("Allocate rgxfw tbi buffer");
+	eError = DevmemFwAllocate(psDevInfo,
+			ui32FWTBIBufsize,
+			uiMemAllocFlags,
+			"FwTBIBuffer",
+			&psDevInfo->psRGXFWIfTBIBufferMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate %u bytes for fw TBI buffer (Error code:%u)",
+				__func__,
+				ui32FWTBIBufsize,
+				eError));
+		goto fail;
+	}
+
+	/* Firmware address should not be already set */
+	if (psDevInfo->sRGXFWIfTBIBuffer.ui32Addr)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: FW address for FWTBI is already set. Resetting it with newly allocated one", __func__));
+	}
+
+	/* for the FW to use this address when reading strings from tbi buffer */
+	RGXSetFirmwareAddress(&psDevInfo->sRGXFWIfTBIBuffer,
+			psDevInfo->psRGXFWIfTBIBufferMemDesc,
+			0, RFW_FWADDR_NOREF_FLAG);
+
+	/* Set an address for the host to be able to write SFs strings in buffer */
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfTBIBufferMemDesc,
+			(void **)&psFW_SFs);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire kernel tbibuf ctl (Error code: %u)",
+				__func__, eError));
+		goto fail;
+	}
+
+	/* Copy SFs entries to FW buffer */
+	for ( i = 0; i < ui32NumFWTBIEntries; i++)
+	{
+		OSDeviceMemCopy(&psFW_SFs[i].ui32Id, &SFs[i].ui32Id, sizeof(SFs[i].ui32Id));
+		ui32Len = OSStringLength(SFs[i].psName);
+		OSDeviceMemCopy(psFW_SFs[i].sName, SFs[i].psName, MIN(ui32Len, IMG_SF_STRING_MAX_SIZE - 1));
+	}
+
+	/* Set size of TBI buffer */
+	psDevInfo->ui32FWIfTBIBufferSize = ui32FWTBIBufsize;
+
+	/* release CPU mapping */
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTBIBufferMemDesc);
+
+	return PVRSRV_OK;
+	fail:
+	RGXTBIBufferDeinit(psDevInfo);
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXTraceBufferIsInitRequired
+
+@Description    Returns true if the firmware trace buffer is not allocated and
+		might be required by the firmware soon. Trace buffer allocated
+		on-demand to reduce RAM footprint on systems not needing
+		firmware trace.
+
+@Input          psDevInfo	 RGX device info
+
+@Return		IMG_BOOL	Whether on-demand allocation(s) is/are needed
+				or not
+ */ /**************************************************************************/
+INLINE IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGXFWIF_TRACEBUF*  psTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+	/* The firmware expects a trace buffer only when:
+	 *	- Logtype is "trace" AND
+	 *	- at least one LogGroup is configured
+	 *	- the Driver Mode is not Guest
+	 */
+	if ((psDevInfo->psRGXFWIfTraceBufferMemDesc[0] == NULL)
+			&& (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)
+			&& (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)
+			&& !PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXTraceBufferDeinit
+
+@Description    Deinitialises all the allocations and references that are made
+		for the FW trace buffer(s)
+
+@Input          ppsDevInfo	 RGX device info
+@Return		void
+ */ /**************************************************************************/
+static void RGXTraceBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGXFWIF_TRACEBUF*  psTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+	IMG_UINT32 i;
+
+	for (i = 0; i < RGXFW_THREAD_NUM; i++)
+	{
+		if (psDevInfo->psRGXFWIfTraceBufferMemDesc[i])
+		{
+			if (psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer != NULL)
+			{
+				DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufferMemDesc[i]);
+				psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer = NULL;
+			}
+
+			DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufferMemDesc[i]);
+			psDevInfo->psRGXFWIfTraceBufferMemDesc[i] = NULL;
+		}
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       RGXTraceBufferInitOnDemandResources
+
+@Description    Allocates the firmware trace buffer required for dumping trace
+		info from the firmware.
+
+@Input          psDevInfo	 RGX device info
+
+@Return		PVRSRV_OK	If all went good, PVRSRV_ERROR otherwise.
+ */ /**************************************************************************/
+PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGXFWIF_TRACEBUF*  psTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+	DEVMEM_FLAGS_T     uiMemAllocFlags;
+	PVRSRV_ERROR       eError = PVRSRV_OK;
+	IMG_UINT32         ui32FwThreadNum;
+	IMG_UINT32         ui32DefaultTraceBufSize;
+	IMG_DEVMEM_SIZE_T  uiTraceBufSizeInBytes;
+	void               *pvAppHintState = NULL;
+
+	/* Check AppHint value for module-param FWTraceBufSizeInDWords */
+	OSCreateKMAppHintState(&pvAppHintState);
+	ui32DefaultTraceBufSize = RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
+	OSGetKMAppHintUINT32(pvAppHintState,
+	                     FWTraceBufSizeInDWords,
+						 &ui32DefaultTraceBufSize,
+						 &psTraceBufCtl->ui32TraceBufSizeInDWords);
+	OSFreeKMAppHintState(pvAppHintState);
+	pvAppHintState = NULL;
+
+	uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_UNCACHED |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	uiTraceBufSizeInBytes = psTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32);
+
+	for (ui32FwThreadNum = 0; ui32FwThreadNum < RGXFW_THREAD_NUM; ui32FwThreadNum++)
+	{
+		/* Ensure allocation API is only called when not already allocated */
+		PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum] == NULL);
+
+		PDUMPCOMMENT("Allocate rgxfw trace buffer(%u)", ui32FwThreadNum);
+		eError = DevmemFwAllocate(psDevInfo,
+		                          uiTraceBufSizeInBytes,
+								  uiMemAllocFlags,
+								  "FwTraceBuffer",
+								  &psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum]);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate " IMG_DEVMEM_SIZE_FMTSPEC " bytes for fw trace buffer %u (Error code:%u)",
+					__func__,
+					uiTraceBufSizeInBytes,
+					ui32FwThreadNum,
+					eError));
+			goto fail;
+		}
+
+		/* Firmware address should not be already set */
+		PVR_ASSERT(psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer.ui32Addr == 0x0);
+
+		/* for the FW to use this address when dumping in log (trace) buffer */
+		RGXSetFirmwareAddress(&psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer,
+				psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum],
+				0, RFW_FWADDR_NOREF_FLAG);
+		/* Set an address for the host to be able to read fw trace buffer */
+		eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum],
+				(void **)&psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32TraceBuffer);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire kernel tracebuf (%u) ctl (Error code: %u)",
+					__func__, ui32FwThreadNum, eError));
+			goto fail;
+		}
+	}
+
+	return PVRSRV_OK;
+	fail:
+	RGXTraceBufferDeinit(psDevInfo);
+	return eError;
+}
+
+static PVRSRV_ERROR RGXSetupOSConfig(PVRSRV_RGXDEV_INFO  *psDevInfo,
+		RGXFWIF_INIT        *psRGXFWInit,
+		IMG_UINT32           ui32ConfigFlags,
+		IMG_UINT32           ui32ConfigFlagsExt,
+		RGXFWIF_DEV_VIRTADDR sTracebufCtl,
+		PRGXFWIF_HWRINFOBUF  sRGXFWIfHWRInfoBufCtl)
+{
+	PVRSRV_ERROR       eError = PVRSRV_OK;
+	DEVMEM_FLAGS_T     uiMemAllocFlags;
+	RGXFWIF_OS_CONFIG *psOSConfig;
+
+	uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_UNCACHED |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+
+	PDUMPCOMMENT("Allocate RGXFW_OS_CONFIG structure");
+	eError = DevmemFwAllocate(psDevInfo,
+			sizeof(RGXFWIF_OS_CONFIG),
+			uiMemAllocFlags,
+			"FwOSConfigStructure",
+			&psDevInfo->psRGXFWIfOSConfigDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate %u bytes for OS Config (%u)",
+				__func__,
+				(IMG_UINT32)sizeof(RGXFWIF_OS_CONFIG),
+				eError));
+		goto fail1;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInit->sOSConfig,
+			psDevInfo->psRGXFWIfOSConfigDesc,
+			0, RFW_FWADDR_NOREF_FLAG);
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfOSConfigDesc,
+			(void **)&psOSConfig);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire OS Config (%u)",
+				__func__,
+				eError));
+		goto fail2;
+	}
+
+	psOSConfig->ui32ConfigFlags    = ui32ConfigFlags & RGXFWIF_INICFG_ALL;
+	psOSConfig->ui32ConfigFlagsExt = ui32ConfigFlagsExt;
+
+	eError = SyncPrimGetFirmwareAddr(psDevInfo->psPowSyncPrim, &psOSConfig->sPowerSync.ui32Addr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to get Sync Prim FW address with error (%u)",
+				__func__, eError));
+		goto fail2;
+	}
+
+	psDevInfo->psFWIfOSConfig = psOSConfig;
+
+	/* Set the Tracebuf and HWRInfoBufCtl offsets */
+	psOSConfig->sTraceBufCtl               = sTracebufCtl;
+	psOSConfig->sRGXFWIfHWRInfoBufCtl      = sRGXFWIfHWRInfoBufCtl;
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Dump initial state of RGXFW_OS_CONFIG structure");
+	DevmemPDumpLoadMem(psDevInfo->psRGXFWIfOSConfigDesc,
+			0,
+			sizeof(RGXFWIF_OS_CONFIG),
+			PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+	return PVRSRV_OK;
+
+	fail2:
+	DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfOSConfigDesc);
+	fail1:
+	return eError;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function	RGXSetupFirmware
+
+ @Description
+
+ Setups all the firmware related data
+
+ @Input psDevInfo
+
+ @Return PVRSRV_ERROR
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE       *psDeviceNode,
+		IMG_BOOL                 bEnableSignatureChecks,
+		IMG_UINT32               ui32SignatureChecksBufSize,
+		IMG_UINT32               ui32HWPerfFWBufSizeKB,
+		IMG_UINT64               ui64HWPerfFilter,
+		IMG_UINT32               ui32RGXFWAlignChecksArrLength,
+		IMG_UINT32               *pui32RGXFWAlignChecks,
+		IMG_UINT32               ui32ConfigFlags,
+		IMG_UINT32               ui32ConfigFlagsExt,
+		IMG_UINT32               ui32LogType,
+		RGXFWIF_BIFTILINGMODE    eBifTilingMode,
+		IMG_UINT32               ui32NumTilingCfgs,
+		IMG_UINT32               *pui32BIFTilingXStrides,
+		IMG_UINT32               ui32FilterFlags,
+		IMG_UINT32               ui32JonesDisableMask,
+		IMG_UINT32               ui32HWRDebugDumpLimit,
+		IMG_UINT32               ui32HWPerfCountersDataSize,
+		IMG_UINT32               *pui32TPUTrilinearFracMask,
+		RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+		FW_PERF_CONF             eFirmwarePerf)
+
+{
+	PVRSRV_ERROR		eError;
+	DEVMEM_FLAGS_T		uiMemAllocFlags;
+	RGXFWIF_INIT		*psRGXFWInitScratch = NULL;
+	RGXFWIF_INIT		*psRGXFWInitActual = NULL;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	IMG_UINT32		dm;
+	IMG_UINT32		ui32kCCBSize = (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE) &&
+			!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK)) ?\
+					(RGXFWIF_KCCB_NUMCMDS_LOG2_GPUVIRT_WITHOUT_FEATURE) : (RGXFWIF_KCCB_NUMCMDS_LOG2_DEFAULT);
+#if defined(SUPPORT_PDVFS)
+	RGXFWIF_PDVFS_OPP   *psPDVFSOPPInfo;
+	IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg;
+#endif
+
+	/* Fw init data */
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_UNCACHED |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+			PVRSRV_MEMALLOCFLAG_FW_LOCAL |
+			PVRSRV_MEMALLOCFLAG_FW_CONFIG;
+	/* FIXME: Change to Cached */
+
+
+	PDUMPCOMMENT("Allocate RGXFWIF_INIT structure");
+
+	eError = DevmemFwAllocate(psDevInfo,
+			sizeof(RGXFWIF_INIT),
+			uiMemAllocFlags,
+			"FwInitStructure",
+			&psDevInfo->psRGXFWIfInitMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate %zu bytes for fw if ctl (%u)",
+				__func__,
+				sizeof(RGXFWIF_INIT),
+				eError));
+		goto fail;
+	}
+
+	psRGXFWInitScratch = OSAllocZMem(sizeof(*psRGXFWInitScratch));
+
+	if (psRGXFWInitScratch == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate RGXFWInit scratch structure",
+				__func__));
+		goto fail;
+	}
+
+	/* Setup FW coremem data */
+	if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc)
+	{
+		IMG_BOOL bMetaDMA = RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA);
+
+		psRGXFWInitScratch->sCorememDataStore.pbyFWAddr = psDevInfo->sFWCorememDataStoreFWAddr;
+
+		if (bMetaDMA)
+		{
+			RGXSetMetaDMAAddress(&psRGXFWInitScratch->sCorememDataStore,
+					psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+					&psRGXFWInitScratch->sCorememDataStore.pbyFWAddr,
+					0);
+		}
+	}
+
+	/* init HW frame info */
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_UNCACHED |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PDUMPCOMMENT("Allocate rgxfw HW info buffer");
+	eError = DevmemFwAllocate(psDevInfo,
+			sizeof(RGXFWIF_HWRINFOBUF),
+			uiMemAllocFlags,
+			"FwHWInfoBuffer",
+			&psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate %zu bytes for HW info (%u)",
+				__func__,
+				sizeof(RGXFWIF_HWRINFOBUF),
+				eError));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInitScratch->sRGXFWIfHWRInfoBufCtl,
+			psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc,
+			0, RFW_FWADDR_NOREF_FLAG);
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc,
+			(void **)&psDevInfo->psRGXFWIfHWRInfoBuf);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to acquire kernel tracebuf ctl (%u)",
+				__func__,
+				eError));
+		goto fail;
+	}
+
+	/* Might be uncached. Be conservative and use a DeviceMemSet */
+	OSDeviceMemSet(psDevInfo->psRGXFWIfHWRInfoBuf, 0, sizeof(RGXFWIF_HWRINFOBUF));
+
+	/* Allocate a sync for power management */
+	eError = SyncPrimContextCreate(psDevInfo->psDeviceNode,
+			&psDevInfo->hSyncPrimContext);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate sync primitive context with error (%u)",
+				__func__,
+				eError));
+		goto fail;
+	}
+
+	eError = SyncPrimAlloc(psDevInfo->hSyncPrimContext, &psDevInfo->psPowSyncPrim, "fw power ack");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate sync primitive with error (%u)",
+				__func__,
+				eError));
+
+		goto fail;
+	}
+
+	/* Setup Fault read register */
+	eError = RGXSetupFaultReadRegister(psDeviceNode, psRGXFWInitScratch);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to setup fault read register",
+				__func__));
+		goto fail;
+	}
+
+	/* Allocation flags for the kernel CCB */
+	uiMemAllocFlags  =   PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_UNCACHED |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	/* Set up kernel CCB */
+	eError = RGXSetupCCB(psDevInfo,
+			&psDevInfo->psKernelCCBCtl,
+			&psDevInfo->psKernelCCBCtlMemDesc,
+			&psDevInfo->psKernelCCB,
+			&psDevInfo->psKernelCCBMemDesc,
+			&psRGXFWInitScratch->psKernelCCBCtl,
+			&psRGXFWInitScratch->psKernelCCB,
+			ui32kCCBSize,
+			sizeof(RGXFWIF_KCCB_CMD),
+			uiMemAllocFlags,
+			"FwKernelCCB");
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate Kernel CCB", __func__));
+		goto fail;
+	}
+
+	/* Allocation flags for the firmware and checkpoint CCB */
+	uiMemAllocFlags  =   PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_UNCACHED |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	/* Set up firmware CCB */
+	eError = RGXSetupCCB(psDevInfo,
+			&psDevInfo->psFirmwareCCBCtl,
+			&psDevInfo->psFirmwareCCBCtlMemDesc,
+			&psDevInfo->psFirmwareCCB,
+			&psDevInfo->psFirmwareCCBMemDesc,
+			&psRGXFWInitScratch->psFirmwareCCBCtl,
+			&psRGXFWInitScratch->psFirmwareCCB,
+			RGXFWIF_FWCCB_NUMCMDS_LOG2,
+			sizeof(RGXFWIF_FWCCB_CMD),
+			uiMemAllocFlags,
+			"FwCCB");
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate Firmware CCB", __func__));
+		goto fail;
+	}
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+	/* Set up checkpoint CCB */
+	eError = RGXSetupCCB(psDevInfo,
+			&psDevInfo->psCheckpointCCBCtl,
+			&psDevInfo->psCheckpointCCBCtlMemDesc,
+			&psDevInfo->psCheckpointCCB,
+			&psDevInfo->psCheckpointCCBMemDesc,
+			&psRGXFWInitScratch->psCheckpointCCBCtl,
+			&psRGXFWInitScratch->psCheckpointCCB,
+			RGXFWIF_CHECKPOINTCCB_NUMCMDS_LOG2,
+			sizeof(PRGXFWIF_UFO_ADDR),
+			uiMemAllocFlags,
+			"FwChptCCB");
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate Checkpoint CCB", __func__));
+		goto fail;
+	}
+#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */
+
+	/* RD Power Island */
+	{
+		RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+		IMG_BOOL bSysEnableRDPowIsland = psRGXData->psRGXTimingInfo->bEnableRDPowIsland;
+		IMG_BOOL bEnableRDPowIsland = ((eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_DEFAULT) && bSysEnableRDPowIsland) ||
+				(eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_FORCE_ON);
+
+		ui32ConfigFlags |= bEnableRDPowIsland? RGXFWIF_INICFG_POW_RASCALDUST : 0;
+	}
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	ui32ConfigFlags |= RGXFWIF_INICFG_WORKEST_V2;
+#if defined(SUPPORT_PDVFS)
+	/* Pro-active DVFS depends on Workload Estimation */
+	psPDVFSOPPInfo = &psRGXFWInitScratch->sPDVFSOPPInfo;
+	psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
+	PVR_LOG_IF_FALSE(psDVFSDeviceCfg->pasOPPTable, "RGXSetupFirmware: Missing OPP Table");
+
+	if (psDVFSDeviceCfg->pasOPPTable != NULL)
+	{
+		if (psDVFSDeviceCfg->ui32OPPTableSize > ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues))
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: OPP Table too large: Size = %u, Maximum size = %lu",
+					__func__,
+					psDVFSDeviceCfg->ui32OPPTableSize,
+					(unsigned long)(ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues))));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto fail;
+		}
+
+		OSDeviceMemCopy(psPDVFSOPPInfo->asOPPValues,
+				psDVFSDeviceCfg->pasOPPTable,
+				sizeof(psPDVFSOPPInfo->asOPPValues));
+
+		psPDVFSOPPInfo->ui32MaxOPPPoint = psDVFSDeviceCfg->ui32OPPTableSize - 1;
+
+		ui32ConfigFlags |= RGXFWIF_INICFG_PDVFS_V2;
+
+		/* Tell the FW that the Host might use the reactive timer.
+		 * The FW might clear this flag if the timer can be run on the FW instead. */
+		ui32ConfigFlagsExt |= RGXFWIF_INICFG_EXT_PDVFS_HOST_REACTIVE_TIMER;
+	}
+#endif
+#endif
+
+	/* FW trace control structure */
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_UNCACHED |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PDUMPCOMMENT("Allocate rgxfw trace control structure");
+	eError = DevmemFwAllocate(psDevInfo,
+			sizeof(RGXFWIF_TRACEBUF),
+			uiMemAllocFlags,
+			"FwTraceCtlStruct",
+			&psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate %zu bytes for fw trace (%u)",
+				__func__,
+				sizeof(RGXFWIF_TRACEBUF),
+				eError));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInitScratch->sTraceBufCtl,
+			psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+			0, RFW_FWADDR_NOREF_FLAG);
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+			(void **)&psDevInfo->psRGXFWIfTraceBuf);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to acquire kernel tracebuf ctl (%u)",
+				__func__,
+				eError));
+		goto fail;
+	}
+
+	/* Set initial firmware log type/group(s) */
+	if (ui32LogType & ~RGXFWIF_LOG_TYPE_MASK)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		PVR_DPF((PVR_DBG_ERROR,"%s: Invalid initial log type (0x%X)",
+				__func__, ui32LogType));
+		goto fail;
+	}
+	psDevInfo->psRGXFWIfTraceBuf->ui32LogType = ui32LogType;
+
+#if !defined(PDUMP)
+	/* When PDUMP is enabled, ALWAYS allocate on-demand trace buffer resource
+	 * (irrespective of loggroup(s) enabled), given that logtype/loggroups can
+	 * be set during PDump playback in logconfig, at any point of time,
+	 * Otherwise, allocate only if required. */
+	if (RGXTraceBufferIsInitRequired(psDevInfo))
+#endif
+	{
+		eError = RGXTraceBufferInitOnDemandResources(psDevInfo);
+	}
+
+	PVR_LOGG_IF_ERROR(eError, "RGXTraceBufferInitOnDemandResources", fail);
+
+	eError = RGXSetupOSConfig(psDevInfo, psRGXFWInitScratch, ui32ConfigFlags, ui32ConfigFlagsExt,
+	                          psRGXFWInitScratch->sTraceBufCtl, psRGXFWInitScratch->sRGXFWIfHWRInfoBufCtl);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to set up the per OS configuration",
+				__func__));
+		goto fail;
+	}
+
+	psRGXFWInitScratch->eGPIOValidationMode = RGXFWIF_GPIO_VAL_OFF;
+#if defined(SUPPORT_VALIDATION)
+	{
+		IMG_INT32 ui32AppHintDefault;
+		IMG_INT32 ui32GPIOValidationMode;
+		void      *pvAppHintState = NULL;
+
+		/* Check AppHint for GPIO validation mode */
+		OSCreateKMAppHintState(&pvAppHintState);
+		ui32AppHintDefault = PVRSRV_APPHINT_GPIOVALIDATIONMODE;
+		OSGetKMAppHintUINT32(pvAppHintState,
+				GPIOValidationMode,
+				&ui32AppHintDefault,
+				&ui32GPIOValidationMode);
+		OSFreeKMAppHintState(pvAppHintState);
+		pvAppHintState = NULL;
+
+		if (ui32GPIOValidationMode >= RGXFWIF_GPIO_VAL_LAST)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Invalid GPIO validation mode: %d, only valid if smaller than %d. Disabling GPIO validation.",
+					__func__,
+					ui32GPIOValidationMode,
+					RGXFWIF_GPIO_VAL_LAST));
+		}
+		else
+		{
+			psRGXFWInitScratch->eGPIOValidationMode = (RGXFWIF_GPIO_VAL_MODE) ui32GPIOValidationMode;
+		}
+
+		psRGXFWInitScratch->eGPIOValidationMode = ui32GPIOValidationMode;
+	}
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_UNCACHED |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	/* Set up Workload Estimation firmware CCB */
+	eError = RGXSetupCCB(psDevInfo,
+			&psDevInfo->psWorkEstFirmwareCCBCtl,
+			&psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
+			&psDevInfo->psWorkEstFirmwareCCB,
+			&psDevInfo->psWorkEstFirmwareCCBMemDesc,
+			&psRGXFWInitScratch->psWorkEstFirmwareCCBCtl,
+			&psRGXFWInitScratch->psWorkEstFirmwareCCB,
+			RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2,
+			sizeof(RGXFWIF_WORKEST_FWCCB_CMD),
+			uiMemAllocFlags,
+			"FwWEstCCB");
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate Workload Estimation Firmware CCB",
+				__func__));
+		goto fail;
+	}
+#endif
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+
+	eError = RGXFWSetupCounterBuffer(psDevInfo,
+			&psDevInfo->psCounterBufferMemDesc,
+			PAGE_SIZE,
+			&psRGXFWInitScratch->sCounterDumpCtl,
+			"CounterBuffer");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate counter buffer",
+				__func__));
+		goto fail;
+	}
+
+#endif
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+
+	eError = RGXFWSetupFirmwareGcovBuffer(psDevInfo,
+			&psDevInfo->psFirmwareGcovBufferMemDesc,
+			RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE,
+			&psRGXFWInitScratch->sFirmwareGcovCtl,
+			"FirmwareGcovBuffer");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate firmware gcov buffer",
+				__func__));
+		goto fail;
+	}
+
+	psDevInfo->ui32FirmwareGcovSize = RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE;
+
+#endif
+
+	/* Require a minimum amount of memory for the signature buffers */
+	if (ui32SignatureChecksBufSize < RGXFW_SIG_BUFFER_SIZE_MIN)
+	{
+		ui32SignatureChecksBufSize = RGXFW_SIG_BUFFER_SIZE_MIN;
+	}
+
+	/* Setup Signature and Checksum Buffers for TA and 3D */
+	eError = RGXFWSetupSignatureChecks(psDevInfo,
+			&psDevInfo->psRGXFWSigTAChecksMemDesc,
+			ui32SignatureChecksBufSize,
+			&psRGXFWInitScratch->asSigBufCtl[RGXFWIF_DM_TA],
+			"TA");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to setup TA signature checks",
+				__func__));
+		goto fail;
+	}
+	psDevInfo->ui32SigTAChecksSize = ui32SignatureChecksBufSize;
+
+	eError = RGXFWSetupSignatureChecks(psDevInfo,
+			&psDevInfo->psRGXFWSig3DChecksMemDesc,
+			ui32SignatureChecksBufSize,
+			&psRGXFWInitScratch->asSigBufCtl[RGXFWIF_DM_3D],
+			"3D");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to setup 3D signature checks",
+				__func__));
+		goto fail;
+	}
+	psDevInfo->ui32Sig3DChecksSize = ui32SignatureChecksBufSize;
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM))
+	{
+		/* Buffer allocated only when feature present because, all known TDM
+		 * signature registers are dependent on this feature being present */
+		eError = RGXFWSetupSignatureChecks(psDevInfo,
+				&psDevInfo->psRGXFWSigTDM2DChecksMemDesc,
+				ui32SignatureChecksBufSize,
+				&psRGXFWInitScratch->asSigBufCtl[RGXFWIF_DM_TDM],
+				"TDM");
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to setup TDM signature checks",
+					__func__));
+			goto fail;
+		}
+		psDevInfo->ui32SigTDM2DChecksSize = ui32SignatureChecksBufSize;
+	}
+	else
+	{
+		psDevInfo->psRGXFWSigTDM2DChecksMemDesc = NULL;
+		psDevInfo->ui32SigTDM2DChecksSize = 0;
+	}
+
+#if defined(RGXFW_ALIGNCHECKS)
+	eError = RGXFWSetupAlignChecks(psDevInfo,
+			&psRGXFWInitScratch->sAlignChecks,
+			pui32RGXFWAlignChecks,
+			ui32RGXFWAlignChecksArrLength);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to setup alignment checks",
+				__func__));
+		goto fail;
+	}
+#endif
+
+	psRGXFWInitScratch->ui32FilterFlags = ui32FilterFlags;
+
+
+	if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273))
+	{
+		/* Fill the remaining bits of fw the init data */
+		psRGXFWInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_BRN_65273_HEAP_BASE;
+		psRGXFWInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_BRN_65273_HEAP_BASE;
+	}
+	else
+	{
+		/* Fill the remaining bits of fw the init data */
+		psRGXFWInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_HEAP_BASE;
+		psRGXFWInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_HEAP_BASE;
+	}
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+	{
+		psRGXFWInitScratch->ui32JonesDisableMask = ui32JonesDisableMask;
+	}
+	psDevInfo->bPDPEnabled = (ui32ConfigFlags & RGXFWIF_INICFG_DISABLE_PDP_EN)
+									? IMG_FALSE : IMG_TRUE;
+	psRGXFWInitScratch->ui32HWRDebugDumpLimit = ui32HWRDebugDumpLimit;
+
+	psRGXFWInitScratch->eFirmwarePerf = eFirmwarePerf;
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT))
+	{
+		eError = _AllocateSLC3Fence(psDevInfo, psRGXFWInitScratch);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to allocate memory for SLC3Fence",
+					__func__));
+			goto fail;
+		}
+	}
+
+
+	if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) &&
+			((ui32ConfigFlags & RGXFWIF_INICFG_METAT1_ENABLED) != 0))
+	{
+		/* Allocate a page for T1 stack */
+		eError = DevmemFwAllocate(psDevInfo,
+				RGX_META_STACK_SIZE,
+				RGX_FWCOMCTX_ALLOCFLAGS,
+				"FwMETAT1Stack",
+				& psDevInfo->psMETAT1StackMemDesc);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to allocate T1 Stack",
+					__func__));
+			goto fail;
+		}
+
+		RGXSetFirmwareAddress(&psRGXFWInitScratch->sT1Stack,
+				psDevInfo->psMETAT1StackMemDesc,
+				0, RFW_FWADDR_NOREF_FLAG);
+
+		PVR_DPF((PVR_DBG_MESSAGE,
+				"%s: T1 Stack Frame allocated at %x",
+				__func__,
+				psRGXFWInitScratch->sT1Stack.ui32Addr));
+	}
+
+#if defined(SUPPORT_PDVFS)
+	/* Core clock rate */
+	uiMemAllocFlags =
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_UNCACHED |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	eError = DevmemFwAllocate(psDevInfo,
+			sizeof(IMG_UINT32),
+			uiMemAllocFlags,
+			"FwCoreClkRate",
+			&psDevInfo->psRGXFWIFCoreClkRateMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate PDVFS core clock rate",
+				__func__));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInitScratch->sCoreClockRate,
+			psDevInfo->psRGXFWIFCoreClkRateMemDesc,
+			0, RFW_FWADDR_NOREF_FLAG);
+
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: PDVFS core clock rate allocated at %x",
+			__func__,
+			psRGXFWInitScratch->sCoreClockRate.ui32Addr));
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIFCoreClkRateMemDesc,
+			(void **)&psDevInfo->pui32RGXFWIFCoreClkRate);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to acquire core clk rate (%u)",
+				__func__,
+				eError));
+		goto fail;
+	}
+#endif
+
+
+#if !defined(PDUMP)
+	/* allocate only if required */
+	if (RGXTBIBufferIsInitRequired(psDevInfo))
+#endif
+	{
+		/* When PDUMP is enabled, ALWAYS allocate on-demand TBI buffer resource
+		 * (irrespective of loggroup(s) enabled), given that logtype/loggroups
+		 * can be set during PDump playback in logconfig, at any point of time
+		 */
+		eError = RGXTBIBufferInitOnDemandResources(psDevInfo);
+	}
+
+	PVR_LOGG_IF_ERROR(eError, "RGXTBIBufferInitOnDemandResources", fail);
+	psRGXFWInitScratch->sTBIBuf = psDevInfo->sRGXFWIfTBIBuffer;
+
+	/* Allocate shared buffer for GPU utilisation */
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_UNCACHED |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PDUMPCOMMENT("Allocate shared buffer for GPU utilisation");
+	eError = DevmemFwAllocate(psDevInfo,
+			sizeof(RGXFWIF_GPU_UTIL_FWCB),
+			uiMemAllocFlags,
+			"FwGPUUtilisationBuffer",
+			&psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate %zu bytes for GPU utilisation buffer ctl (%u)",
+				__func__,
+				sizeof(RGXFWIF_GPU_UTIL_FWCB),
+				eError));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInitScratch->sGpuUtilFWCbCtl,
+			psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc,
+			0, RFW_FWADDR_NOREF_FLAG);
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc,
+			(void **)&psDevInfo->psRGXFWIfGpuUtilFWCb);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to acquire kernel GPU utilisation buffer ctl (%u)",
+				__func__,
+				eError));
+		goto fail;
+	}
+
+	/* Initialise GPU utilisation buffer */
+	psDevInfo->psRGXFWIfGpuUtilFWCb->ui64LastWord =
+			RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64(),RGXFWIF_GPU_UTIL_STATE_IDLE);
+
+
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_UNCACHED |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PDUMPCOMMENT("Allocate rgxfw FW runtime configuration (FW)");
+	eError = DevmemFwAllocate(psDevInfo,
+			sizeof(RGXFWIF_RUNTIME_CFG),
+			uiMemAllocFlags,
+			"FwRuntimeCfg",
+			&psDevInfo->psRGXFWIfRuntimeCfgMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate %zu bytes for FW runtime configuration (%u)",
+				__func__,
+				sizeof(RGXFWIF_RUNTIME_CFG),
+				eError));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInitScratch->sRuntimeCfg,
+			psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+			0, RFW_FWADDR_NOREF_FLAG);
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+			(void **)&psDevInfo->psRGXFWIfRuntimeCfg);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to acquire kernel FW runtime configuration (%u)",
+				__func__,
+				eError));
+		goto fail;
+	}
+	/* HWPerf: Determine the size of the FW buffer */
+	if (ui32HWPerfFWBufSizeKB == 0 ||
+			ui32HWPerfFWBufSizeKB == RGXFW_HWPERF_L1_SIZE_DEFAULT)
+	{
+		/* Under pvrsrvctl 0 size implies AppHint not set or is set to zero,
+		 * use default size from driver constant. Set it to the default
+		 * size, no logging.
+		 */
+		psDevInfo->ui32RGXFWIfHWPerfBufSize = RGXFW_HWPERF_L1_SIZE_DEFAULT<<10;
+	}
+	else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MAX))
+	{
+		/* Size specified as a AppHint but it is too big */
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s: HWPerfFWBufSizeInKB value (%u) too big, using maximum (%u)",
+				__func__,
+				ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MAX));
+		psDevInfo->ui32RGXFWIfHWPerfBufSize = RGXFW_HWPERF_L1_SIZE_MAX<<10;
+	}
+	else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MIN))
+	{
+		/* Size specified as in AppHint HWPerfFWBufSizeInKB */
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s: Using HWPerf FW buffer size of %u KB",
+				__func__,
+				ui32HWPerfFWBufSizeKB));
+		psDevInfo->ui32RGXFWIfHWPerfBufSize = ui32HWPerfFWBufSizeKB<<10;
+	}
+	else
+	{
+		/* Size specified as a AppHint but it is too small */
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s: HWPerfFWBufSizeInKB value (%u) too small, using minimum (%u)",
+				__func__,
+				ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MIN));
+		psDevInfo->ui32RGXFWIfHWPerfBufSize = RGXFW_HWPERF_L1_SIZE_MIN<<10;
+	}
+
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+	/* Set flag to indicate support for SLR Log (for compatibility) */
+	psDevInfo->psRGXFWIfTraceBuf->ui32TracebufFlags |= RGXFWIF_TRACEBUFCFG_SLR_LOG;
+#endif
+
+	/* init HWPERF data */
+	psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfRIdx = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfWIdx = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfWrapCount = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfSize = psDevInfo->ui32RGXFWIfHWPerfBufSize;
+	psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfUt = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfDropCount = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32FirstDropOrdinal = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32LastDropOrdinal = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32PowMonEstimate = 0;
+
+	/* Second stage initialisation or HWPerf, hHWPerfLock created in first
+	 * stage. See RGXRegisterDevice() call to RGXHWPerfInit(). */
+	if (psDevInfo->ui64HWPerfFilter == 0)
+	{
+		psDevInfo->ui64HWPerfFilter = ui64HWPerfFilter;
+		psRGXFWInitScratch->ui64HWPerfFilter = ui64HWPerfFilter;
+	}
+	else
+	{
+		/* The filter has already been modified. This can happen if
+		 * pvr/apphint/EnableFTraceGPU was enabled. */
+		psRGXFWInitScratch->ui64HWPerfFilter = psDevInfo->ui64HWPerfFilter;
+	}
+
+	/*Send through the BVNC Feature Flags*/
+	eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, &psRGXFWInitScratch->sBvncKmFeatureFlags);
+	PVR_LOGG_IF_ERROR(eError, "RGXServerFeatureFlagsToHWPerfFlags", fail);
+
+#if !defined(PDUMP)
+	/* Allocate if HWPerf filter has already been set. This is possible either
+	 * by setting a proper AppHint or enabling GPU ftrace events. */
+	if (psDevInfo->ui64HWPerfFilter != 0)
+#endif
+	{
+		/* When PDUMP is enabled, ALWAYS allocate on-demand HWPerf resources
+		 * (irrespective of HWPerf enabled or not), given that HWPerf can be
+		 * enabled during PDump playback via RTCONF at any point of time. */
+		eError = RGXHWPerfInitOnDemandResources(psDevInfo);
+		PVR_LOGG_IF_ERROR(eError, "RGXHWPerfInitOnDemandResources", fail);
+	}
+
+	RGXHWPerfInitAppHintCallbacks(psDeviceNode);
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PDUMPCOMMENT("Allocate rgxfw register configuration structure");
+	eError = DevmemFwAllocate(psDevInfo,
+			sizeof(RGXFWIF_REG_CFG),
+			uiMemAllocFlags | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE,
+			"FwRegisterConfigStructure",
+			&psDevInfo->psRGXFWIfRegCfgMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate %zu bytes for fw register configurations (%u)",
+				__func__,
+				sizeof(RGXFWIF_REG_CFG),
+				eError));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInitScratch->sRegCfg,
+			psDevInfo->psRGXFWIfRegCfgMemDesc,
+			0, RFW_FWADDR_NOREF_FLAG);
+#endif
+
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_UNCACHED |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PDUMPCOMMENT("Allocate rgxfw hwperfctl structure");
+	eError = DevmemFwAllocate(psDevInfo,
+			ui32HWPerfCountersDataSize,
+			uiMemAllocFlags,
+			"FwHWPerfControlStructure",
+			&psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate %u bytes for fw hwperf control (%u)",
+				__func__,
+				ui32HWPerfCountersDataSize,
+				eError));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInitScratch->sHWPerfCtl,
+			psDevInfo->psRGXFWIfHWPerfCountersMemDesc,
+			0, 0);
+
+	/* Required info by FW to calculate the ActivePM idle timer latency */
+	{
+		RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+		RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+
+		psRGXFWInitScratch->ui32InitialCoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+		psRGXFWInitScratch->ui32ActivePMLatencyms = psRGXData->psRGXTimingInfo->ui32ActivePMLatencyms;
+
+		/* Initialise variable runtime configuration to the system defaults */
+		psRuntimeCfg->ui32CoreClockSpeed = psRGXFWInitScratch->ui32InitialCoreClockSpeed;
+		psRuntimeCfg->ui32ActivePMLatencyms = psRGXFWInitScratch->ui32ActivePMLatencyms;
+		psRuntimeCfg->bActivePMLatencyPersistant = IMG_TRUE;
+
+		/* Initialize the DefaultDustsNumInit Field to Max Dusts */
+		psRuntimeCfg->ui32DefaultDustsNumInit = psDevInfo->sDevFeatureCfg.ui32MAXDustCount;
+	}
+#if defined(PDUMP)
+	PDUMPCOMMENT("Dump initial state of FW runtime configuration");
+	DevmemPDumpLoadMem(	psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+			0,
+			sizeof(RGXFWIF_RUNTIME_CFG),
+			PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+	/* Initialize FW started flag */
+	psRGXFWInitScratch->bFirmwareStarted = IMG_FALSE;
+	psRGXFWInitScratch->ui32MarkerVal = 1;
+
+	/* Initialise the compatibility check data */
+	RGXFWIF_COMPCHECKS_BVNC_INIT(psRGXFWInitScratch->sRGXCompChecks.sFWBVNC);
+	RGXFWIF_COMPCHECKS_BVNC_INIT(psRGXFWInitScratch->sRGXCompChecks.sHWBVNC);
+
+	PDUMPCOMMENT("Dump RGXFW Init data");
+	if (!bEnableSignatureChecks)
+	{
+#if defined(PDUMP)
+		PDUMPCOMMENT("(to enable rgxfw signatures place the following line after the RTCONF line)");
+		DevmemPDumpLoadMem(	psDevInfo->psRGXFWIfInitMemDesc,
+				offsetof(RGXFWIF_INIT, asSigBufCtl),
+				sizeof(RGXFWIF_SIGBUF_CTL)*(RGXFWIF_DM_MAX),
+				PDUMP_FLAGS_CONTINUOUS);
+#endif
+		psRGXFWInitScratch->asSigBufCtl[RGXFWIF_DM_TDM].sBuffer.ui32Addr = 0x0;
+		psRGXFWInitScratch->asSigBufCtl[RGXFWIF_DM_TA].sBuffer.ui32Addr = 0x0;
+		psRGXFWInitScratch->asSigBufCtl[RGXFWIF_DM_3D].sBuffer.ui32Addr = 0x0;
+	}
+
+	for (dm = 0; dm < (RGXFWIF_DM_MAX); dm++)
+	{
+		psDevInfo->psRGXFWIfTraceBuf->aui32HwrDmLockedUpCount[dm] = 0;
+		psDevInfo->psRGXFWIfTraceBuf->aui32HwrDmOverranCount[dm] = 0;
+		psDevInfo->psRGXFWIfTraceBuf->aui32HwrDmRecoveredCount[dm] = 0;
+		psDevInfo->psRGXFWIfTraceBuf->aui32HwrDmFalseDetectCount[dm] = 0;
+	}
+
+	/*
+	 * BIF Tiling configuration
+	 */
+
+	psRGXFWInitScratch->eBifTilingMode = eBifTilingMode;
+
+	psRGXFWInitScratch->sBifTilingCfg[0].uiBase = RGX_BIF_TILING_HEAP_1_BASE;
+	psRGXFWInitScratch->sBifTilingCfg[0].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+	psRGXFWInitScratch->sBifTilingCfg[0].uiXStride = pui32BIFTilingXStrides[0];
+	psRGXFWInitScratch->sBifTilingCfg[1].uiBase = RGX_BIF_TILING_HEAP_2_BASE;
+	psRGXFWInitScratch->sBifTilingCfg[1].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+	psRGXFWInitScratch->sBifTilingCfg[1].uiXStride = pui32BIFTilingXStrides[1];
+	psRGXFWInitScratch->sBifTilingCfg[2].uiBase = RGX_BIF_TILING_HEAP_3_BASE;
+	psRGXFWInitScratch->sBifTilingCfg[2].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+	psRGXFWInitScratch->sBifTilingCfg[2].uiXStride = pui32BIFTilingXStrides[2];
+	psRGXFWInitScratch->sBifTilingCfg[3].uiBase = RGX_BIF_TILING_HEAP_4_BASE;
+	psRGXFWInitScratch->sBifTilingCfg[3].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+	psRGXFWInitScratch->sBifTilingCfg[3].uiXStride = pui32BIFTilingXStrides[3];
+
+#if defined(SUPPORT_VALIDATION)
+	/*
+	 * TPU trilinear rounding mask override
+	 */
+	for (dm = 0; dm < RGXFWIF_TPU_DM_LAST; dm++)
+	{
+		psRGXFWInitScratch->aui32TPUTrilinearFracMask[dm] = pui32TPUTrilinearFracMask[dm];
+	}
+#endif
+
+	/* update the FW structure proper */
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+			(void **)&psRGXFWInitActual);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to acquire kernel fw if ctl (%u)",
+				__func__,
+				eError));
+		goto fail;
+	}
+
+	OSDeviceMemCopy(psRGXFWInitActual, psRGXFWInitScratch, sizeof(*psRGXFWInitActual));
+
+	/* We don't need access to the fw init data structure anymore */
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+	psRGXFWInitActual = NULL;
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Dump rgxfw hwperfctl structure");
+	DevmemPDumpLoadZeroMem (psDevInfo->psRGXFWIfHWPerfCountersMemDesc,
+			0,
+			ui32HWPerfCountersDataSize,
+			PDUMP_FLAGS_CONTINUOUS);
+
+	PDUMPCOMMENT("Dump rgxfw trace control structure");
+	DevmemPDumpLoadMem(	psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+			0,
+			sizeof(RGXFWIF_TRACEBUF),
+			PDUMP_FLAGS_CONTINUOUS);
+
+	PDUMPCOMMENT("Dump rgx TBI buffer");
+	DevmemPDumpLoadMem(	psDevInfo->psRGXFWIfTBIBufferMemDesc,
+			0,
+			psDevInfo->ui32FWIfTBIBufferSize,
+			PDUMP_FLAGS_CONTINUOUS);
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PDUMPCOMMENT("Dump rgxfw register configuration buffer");
+	DevmemPDumpLoadMem(	psDevInfo->psRGXFWIfRegCfgMemDesc,
+			0,
+			sizeof(RGXFWIF_REG_CFG),
+			PDUMP_FLAGS_CONTINUOUS);
+#endif
+	PDUMPCOMMENT("Dump rgxfw init structure");
+	DevmemPDumpLoadMem(	psDevInfo->psRGXFWIfInitMemDesc,
+			0,
+			sizeof(RGXFWIF_INIT),
+			PDUMP_FLAGS_CONTINUOUS);
+
+	/* RGXFW Init structure needs to be loaded before we overwrite FaultPhysAddr, else this address patching won't have any effect */
+	PDUMPCOMMENT("Overwrite FaultPhysAddr of FWInit in pdump with actual physical address");
+	RGXPDumpFaultReadRegister(psDevInfo);
+
+	PDUMPCOMMENT("RTCONF: run-time configuration");
+
+
+	/* Dump the config options so they can be edited.
+	 *
+	 * FIXME: Need new DevmemPDumpWRW API which writes a WRW to load ui32ConfigFlags
+	 */
+	PDUMPCOMMENT("(Set the FW config options here)");
+	PDUMPCOMMENT("( Ctx Switch TA Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_TA_EN);
+	PDUMPCOMMENT("( Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_3D_EN);
+	PDUMPCOMMENT("( Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_CDM_EN);
+	PDUMPCOMMENT("( Ctx Switch Rand mode: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_MODE_RAND);
+	PDUMPCOMMENT("( Ctx Switch Soft Reset Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_SRESET_EN);
+	PDUMPCOMMENT("( Rascal+Dust Power Island: 0x%08x)", RGXFWIF_INICFG_POW_RASCALDUST);
+	PDUMPCOMMENT("( Enable HWPerf: 0x%08x)", RGXFWIF_INICFG_HWPERF_EN);
+	PDUMPCOMMENT("( Enable HWR: 0x%08x)", RGXFWIF_INICFG_HWR_EN);
+	PDUMPCOMMENT("( Check MList: 0x%08x)", RGXFWIF_INICFG_CHECK_MLIST_EN);
+	PDUMPCOMMENT("( Disable Auto Clock Gating: 0x%08x)", RGXFWIF_INICFG_DISABLE_CLKGATING_EN);
+	PDUMPCOMMENT("( Enable HWPerf Polling Perf Counter: 0x%08x)", RGXFWIF_INICFG_POLL_COUNTERS_EN);
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, VDM_OBJECT_LEVEL_LLS))
+	{
+		PDUMPCOMMENT("( Ctx Switch Object mode Index: 0x%08x)", RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX);
+		PDUMPCOMMENT("( Ctx Switch Object mode Instance: 0x%08x)", RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INSTANCE);
+		PDUMPCOMMENT("( Ctx Switch Object mode List: 0x%08x)", RGXFWIF_INICFG_VDM_CTX_STORE_MODE_LIST);
+	}
+
+	PDUMPCOMMENT("( Enable register configuration: 0x%08x)", RGXFWIF_INICFG_REGCONFIG_EN);
+	PDUMPCOMMENT("( Assert on TA Out-of-Memory: 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY);
+	PDUMPCOMMENT("( Disable HWPerf custom counter filter: 0x%08x)", RGXFWIF_INICFG_HWP_DISABLE_FILTER);
+	PDUMPCOMMENT("( Enable HWPerf custom performance timer: 0x%08x)", RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN);
+	PDUMPCOMMENT("( Enable CDM Killing Rand mode: 0x%08x)", RGXFWIF_INICFG_CDM_KILL_MODE_RAND_EN);
+	PDUMPCOMMENT("( Enable Ctx Switch profile mode: 0x%08x (none=b'000, fast=b'001, medium=b'010, slow=b'011, nodelay=b'100))", RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK);
+	PDUMPCOMMENT("( Disable DM overlap (except TA during SPM): 0x%08x)", RGXFWIF_INICFG_DISABLE_DM_OVERLAP);
+	PDUMPCOMMENT("( Enable Meta T1 running main code: 0x%08x)", RGXFWIF_INICFG_METAT1_MAIN);
+	PDUMPCOMMENT("( Enable Meta T1 running dummy code: 0x%08x)", RGXFWIF_INICFG_METAT1_DUMMY);
+	PDUMPCOMMENT("( Assert on HWR trigger (page fault, lockup, overrun or poll failure): 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER);
+
+	DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfOSConfigDesc,
+			offsetof(RGXFWIF_OS_CONFIG, ui32ConfigFlags),
+			ui32ConfigFlags,
+			PDUMP_FLAGS_CONTINUOUS);
+
+	PDUMPCOMMENT("( Extended FW config options start here )");
+	PDUMPCOMMENT("( Lower Priority Ctx Switch  2D Enable: 0x%08x)", RGXFWIF_INICFG_EXT_LOW_PRIO_CS_TDM);
+	PDUMPCOMMENT("( Lower Priority Ctx Switch  TA Enable: 0x%08x)", RGXFWIF_INICFG_EXT_LOW_PRIO_CS_TA);
+	PDUMPCOMMENT("( Lower Priority Ctx Switch  3D Enable: 0x%08x)", RGXFWIF_INICFG_EXT_LOW_PRIO_CS_3D);
+	PDUMPCOMMENT("( Lower Priority Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_EXT_LOW_PRIO_CS_CDM);
+
+	DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfOSConfigDesc,
+			offsetof(RGXFWIF_OS_CONFIG, ui32ConfigFlagsExt),
+			ui32ConfigFlagsExt,
+			PDUMP_FLAGS_CONTINUOUS);
+
+	/* default: no filter */
+	psRGXFWInitScratch->sPIDFilter.eMode = RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT;
+	psRGXFWInitScratch->sPIDFilter.asItems[0].uiPID = 0;
+
+	PDUMPCOMMENT("( PID filter type: %X=INCLUDE_ALL_EXCEPT, %X=EXCLUDE_ALL_EXCEPT)",
+			RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT,
+			RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT);
+
+	DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfInitMemDesc,
+			offsetof(RGXFWIF_INIT, sPIDFilter.eMode),
+			psRGXFWInitScratch->sPIDFilter.eMode,
+			PDUMP_FLAGS_CONTINUOUS);
+
+	PDUMPCOMMENT("( PID filter PID/OSID list (Up to %u entries. Terminate with a zero PID))",
+			RGXFWIF_PID_FILTER_MAX_NUM_PIDS);
+	{
+		IMG_UINT32 i;
+
+		/* generate a few WRWs in the pdump stream as an example */
+		for (i = 0; i < MIN(RGXFWIF_PID_FILTER_MAX_NUM_PIDS, 8); i++)
+		{
+			/*
+			 * Some compilers cannot cope with the uses of offsetof() below - the specific problem being the use of
+			 * a non-const variable in the expression, which it needs to be const. Typical compiler output is
+			 * "expression must have a constant value".
+			 */
+			const IMG_DEVMEM_OFFSET_T uiPIDOff
+			= (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_INIT *)0)->sPIDFilter.asItems[i].uiPID);
+
+			const IMG_DEVMEM_OFFSET_T uiOSIDOff
+			= (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_INIT *)0)->sPIDFilter.asItems[i].ui32OSID);
+
+			PDUMPCOMMENT("(PID and OSID pair %u)", i);
+
+			PDUMPCOMMENT("(PID)");
+			DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfInitMemDesc,
+					uiPIDOff,
+					0,
+					PDUMP_FLAGS_CONTINUOUS);
+
+			PDUMPCOMMENT("(OSID)");
+			DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfInitMemDesc,
+					uiOSIDOff,
+					0,
+					PDUMP_FLAGS_CONTINUOUS);
+		}
+	}
+
+	/*
+	 * Dump the log config so it can be edited.
+	 */
+	PDUMPCOMMENT("(Set the log config here)");
+	PDUMPCOMMENT("( Log Type: set bit 0 for TRACE, reset for TBI)");
+	PDUMPCOMMENT("( MAIN Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MAIN);
+	PDUMPCOMMENT("( MTS Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MTS);
+	PDUMPCOMMENT("( CLEANUP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CLEANUP);
+	PDUMPCOMMENT("( CSW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CSW);
+	PDUMPCOMMENT("( BIF Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_BIF);
+	PDUMPCOMMENT("( PM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_PM);
+	PDUMPCOMMENT("( RTD Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_RTD);
+	PDUMPCOMMENT("( SPM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_SPM);
+	PDUMPCOMMENT("( POW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_POW);
+	PDUMPCOMMENT("( HWR Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWR);
+	PDUMPCOMMENT("( HWP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWP);
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA))
+	{
+		PDUMPCOMMENT("( DMA Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DMA);
+	}
+
+	PDUMPCOMMENT("( MISC Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MISC);
+	PDUMPCOMMENT("( DEBUG Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DEBUG);
+	DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+			offsetof(RGXFWIF_TRACEBUF, ui32LogType),
+			psDevInfo->psRGXFWIfTraceBuf->ui32LogType,
+			PDUMP_FLAGS_CONTINUOUS);
+
+	PDUMPCOMMENT("Set the HWPerf Filter config here");
+	DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfInitMemDesc,
+			offsetof(RGXFWIF_INIT, ui64HWPerfFilter),
+			psRGXFWInitScratch->ui64HWPerfFilter,
+			PDUMP_FLAGS_CONTINUOUS);
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PDUMPCOMMENT("(Number of registers configurations for types(byte index): pow on(%d), dust change(%d), ta(%d), 3d(%d), cdm(%d), tla(%d), TDM(%d))",\
+			RGXFWIF_REG_CFG_TYPE_PWR_ON,\
+			RGXFWIF_REG_CFG_TYPE_DUST_CHANGE,\
+			RGXFWIF_REG_CFG_TYPE_TA,\
+			RGXFWIF_REG_CFG_TYPE_3D,\
+			RGXFWIF_REG_CFG_TYPE_CDM,\
+			RGXFWIF_REG_CFG_TYPE_TLA,\
+			RGXFWIF_REG_CFG_TYPE_TDM);
+
+	{
+		IMG_UINT32 i;
+
+		/**
+		 * Write 32 bits in each iteration as required by PDUMP WRW command.
+		 */
+		for (i = 0; i < RGXFWIF_REG_CFG_TYPE_ALL; i += sizeof(IMG_UINT32))
+		{
+			DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRegCfgMemDesc,
+					offsetof(RGXFWIF_REG_CFG, aui8NumRegsType[i]),
+					0,
+					PDUMP_FLAGS_CONTINUOUS);
+		}
+	}
+
+	PDUMPCOMMENT("(Set registers here: address, mask, value)");
+	DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+			offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Addr),
+			0,
+			PDUMP_FLAGS_CONTINUOUS);
+	DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+			offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Mask),
+			0,
+			PDUMP_FLAGS_CONTINUOUS);
+	DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+			offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Value),
+			0,
+			PDUMP_FLAGS_CONTINUOUS);
+#endif /* SUPPORT_USER_REGISTER_CONFIGURATION */
+#endif /* PDUMP */
+
+	/* Perform additional virtualisation initialisation */
+	eError = RGXVzSetupFirmware(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed RGXVzSetupFirmware", __func__));
+		goto fail;
+	}
+
+	OSFreeMem(psRGXFWInitScratch);
+
+	psDevInfo->bFirmwareInitialised = IMG_TRUE;
+
+	return PVRSRV_OK;
+
+	fail:
+	if (psDevInfo->psRGXFWIfInitMemDesc != NULL && psRGXFWInitActual != NULL)
+	{
+		DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+	}
+
+	if (psRGXFWInitScratch)
+	{
+		OSFreeMem(psRGXFWInitScratch);
+	}
+
+	RGXFreeFirmware(psDevInfo);
+
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function	RGXFreeFirmware
+
+ @Description
+
+ Frees all the firmware-related allocations
+
+ @Input psDevInfo
+
+ @Return PVRSRV_ERROR
+
+ ******************************************************************************/
+void RGXFreeFirmware(PVRSRV_RGXDEV_INFO	*psDevInfo)
+{
+	psDevInfo->bFirmwareInitialised = IMG_FALSE;
+
+	RGXVzFreeFirmware(psDevInfo->psDeviceNode);
+
+	RGXFreeCCB(psDevInfo,
+	           &psDevInfo->psKernelCCBCtl,
+	           &psDevInfo->psKernelCCBCtlMemDesc,
+	           &psDevInfo->psKernelCCB,
+	           &psDevInfo->psKernelCCBMemDesc);
+
+	RGXFreeCCB(psDevInfo,
+	           &psDevInfo->psFirmwareCCBCtl,
+	           &psDevInfo->psFirmwareCCBCtlMemDesc,
+	           &psDevInfo->psFirmwareCCB,
+	           &psDevInfo->psFirmwareCCBMemDesc);
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+	RGXFreeCCB(psDevInfo,
+	           &psDevInfo->psCheckpointCCBCtl,
+	           &psDevInfo->psCheckpointCCBCtlMemDesc,
+	           &psDevInfo->psCheckpointCCB,
+	           &psDevInfo->psCheckpointCCBMemDesc);
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	RGXFreeCCB(psDevInfo,
+	           &psDevInfo->psWorkEstFirmwareCCBCtl,
+	           &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
+	           &psDevInfo->psWorkEstFirmwareCCB,
+	           &psDevInfo->psWorkEstFirmwareCCBMemDesc);
+#endif
+
+#if defined(RGXFW_ALIGNCHECKS)
+	if (psDevInfo->psRGXFWAlignChecksMemDesc)
+	{
+		RGXFWFreeAlignChecks(psDevInfo);
+	}
+#endif
+
+	if (psDevInfo->psRGXFWIfOSConfigDesc)
+	{
+		if (psDevInfo->psFWIfOSConfig)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfOSConfigDesc);
+			psDevInfo->psFWIfOSConfig = NULL;
+		}
+
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfOSConfigDesc);
+		psDevInfo->psRGXFWIfOSConfigDesc = NULL;
+	}
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM) &&
+	    psDevInfo->psRGXFWSigTDM2DChecksMemDesc)
+	{
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWSigTDM2DChecksMemDesc);
+		psDevInfo->psRGXFWSigTDM2DChecksMemDesc = NULL;
+	}
+
+	if (psDevInfo->psRGXFWSigTAChecksMemDesc)
+	{
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWSigTAChecksMemDesc);
+		psDevInfo->psRGXFWSigTAChecksMemDesc = NULL;
+	}
+
+	if (psDevInfo->psRGXFWSig3DChecksMemDesc)
+	{
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWSig3DChecksMemDesc);
+		psDevInfo->psRGXFWSig3DChecksMemDesc = NULL;
+	}
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+	if (psDevInfo->psCounterBufferMemDesc)
+	{
+		DevmemFwFree(psDevInfo, psDevInfo->psCounterBufferMemDesc);
+		psDevInfo->psCounterBufferMemDesc = NULL;
+	}
+#endif
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+	if (psDevInfo->psFirmwareGcovBufferMemDesc)
+	{
+		DevmemFwFree(psDevInfo, psDevInfo->psFirmwareGcovBufferMemDesc);
+		psDevInfo->psFirmwareGcovBufferMemDesc = NULL;
+	}
+#endif
+
+	RGXSetupFaultReadRegisterRollback(psDevInfo);
+
+	if (psDevInfo->psPowSyncPrim != NULL)
+	{
+		SyncPrimFree(psDevInfo->psPowSyncPrim);
+		psDevInfo->psPowSyncPrim = NULL;
+	}
+
+	if (psDevInfo->hSyncPrimContext != (IMG_HANDLE) NULL)
+	{
+		SyncPrimContextDestroy(psDevInfo->hSyncPrimContext);
+		psDevInfo->hSyncPrimContext = (IMG_HANDLE) NULL;
+	}
+
+	if (psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc)
+	{
+		if (psDevInfo->psRGXFWIfGpuUtilFWCb != NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc);
+			psDevInfo->psRGXFWIfGpuUtilFWCb = NULL;
+		}
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc);
+		psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc = NULL;
+	}
+
+	RGXHWPerfDeinit(psDevInfo);
+
+	if (psDevInfo->psRGXFWIfRuntimeCfgMemDesc)
+	{
+		if (psDevInfo->psRGXFWIfRuntimeCfg != NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfRuntimeCfgMemDesc);
+			psDevInfo->psRGXFWIfRuntimeCfg = NULL;
+		}
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfRuntimeCfgMemDesc);
+		psDevInfo->psRGXFWIfRuntimeCfgMemDesc = NULL;
+	}
+
+	if (psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc)
+	{
+		if (psDevInfo->psRGXFWIfHWRInfoBuf != NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc);
+			psDevInfo->psRGXFWIfHWRInfoBuf = NULL;
+		}
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc);
+		psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc = NULL;
+	}
+
+	if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc)
+	{
+		psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL;
+	}
+
+	if (psDevInfo->psRGXFWIfTraceBufCtlMemDesc)
+	{
+		if (psDevInfo->psRGXFWIfTraceBuf != NULL)
+		{
+			/* first deinit/free the tracebuffer allocation */
+			RGXTraceBufferDeinit(psDevInfo);
+
+			DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
+			psDevInfo->psRGXFWIfTraceBuf = NULL;
+		}
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
+		psDevInfo->psRGXFWIfTraceBufCtlMemDesc = NULL;
+	}
+
+	if (psDevInfo->psRGXFWIfTBIBufferMemDesc)
+	{
+		RGXTBIBufferDeinit(psDevInfo);
+	}
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	if (psDevInfo->psRGXFWIfRegCfgMemDesc)
+	{
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfRegCfgMemDesc);
+		psDevInfo->psRGXFWIfRegCfgMemDesc = NULL;
+	}
+#endif
+	if (psDevInfo->psRGXFWIfHWPerfCountersMemDesc)
+	{
+		RGXUnsetFirmwareAddress(psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+		psDevInfo->psRGXFWIfHWPerfCountersMemDesc = NULL;
+	}
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT))
+	{
+		_FreeSLC3Fence(psDevInfo);
+	}
+
+	if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) && (psDevInfo->psMETAT1StackMemDesc))
+	{
+		DevmemFwFree(psDevInfo, psDevInfo->psMETAT1StackMemDesc);
+		psDevInfo->psMETAT1StackMemDesc = NULL;
+	}
+
+#if defined(SUPPORT_PDVFS)
+	if (psDevInfo->psRGXFWIFCoreClkRateMemDesc)
+	{
+		if (psDevInfo->pui32RGXFWIFCoreClkRate != NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIFCoreClkRateMemDesc);
+			psDevInfo->pui32RGXFWIFCoreClkRate = NULL;
+		}
+
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIFCoreClkRateMemDesc);
+		psDevInfo->psRGXFWIFCoreClkRateMemDesc = NULL;
+	}
+#endif
+
+	if (psDevInfo->psRGXFWIfInitMemDesc)
+	{
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfInitMemDesc);
+		psDevInfo->psRGXFWIfInitMemDesc = NULL;
+	}
+}
+
+
+/******************************************************************************
+ FUNCTION	: RGXAcquireKernelCCBSlot
+
+ PURPOSE	: Attempts to obtain a slot in the Kernel CCB
+
+ PARAMETERS	: psCCB - the CCB
+			: Address of space if available, NULL otherwise
+
+ RETURNS	: PVRSRV_ERROR
+ ******************************************************************************/
+static PVRSRV_ERROR RGXAcquireKernelCCBSlot(DEVMEM_MEMDESC *psKCCBCtrlMemDesc,
+		RGXFWIF_CCB_CTL	*psKCCBCtl,
+		IMG_UINT32			*pui32Offset)
+{
+	IMG_UINT32	ui32OldWriteOffset, ui32NextWriteOffset;
+
+	ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset;
+	ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask;
+
+	/*
+	 * Note: The MTS can queue up to 255 kicks (254 pending kicks and 1
+	 * executing kick), hence the kernel CCB should not queue more than
+	 * 254 commands.
+	 */
+	PVR_ASSERT(psKCCBCtl->ui32WrapMask < 255);
+
+#if defined(PDUMP)
+	/* Wait for sufficient CCB space to become available */
+	PDUMPCOMMENTWITHFLAGS(0, "Wait for kCCB woff=%u", ui32NextWriteOffset);
+	DevmemPDumpCBP(psKCCBCtrlMemDesc,
+			offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset),
+			ui32NextWriteOffset,
+			1,
+			(psKCCBCtl->ui32WrapMask + 1));
+#endif
+
+	if (ui32NextWriteOffset == psKCCBCtl->ui32ReadOffset)
+	{
+		return PVRSRV_ERROR_KERNEL_CCB_FULL;
+	}
+	*pui32Offset = ui32NextWriteOffset;
+	return PVRSRV_OK;
+}
+
+/******************************************************************************
+ FUNCTION	: RGXPollKernelCCBSlot
+
+ PURPOSE	: Poll for space in Kernel CCB
+
+ PARAMETERS	: psCCB - the CCB
+			: Address of space if available, NULL otherwise
+
+ RETURNS	: PVRSRV_ERROR
+ ******************************************************************************/
+static PVRSRV_ERROR RGXPollKernelCCBSlot(DEVMEM_MEMDESC *psKCCBCtrlMemDesc,
+		RGXFWIF_CCB_CTL	*psKCCBCtl)
+{
+	IMG_UINT32	ui32OldWriteOffset, ui32NextWriteOffset;
+
+	ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset;
+	ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask;
+
+	/*
+	 * Note: The MTS can queue up to 255 kicks (254 pending kicks and 1
+	 * executing kick), hence the kernel CCB should not queue more than
+	 * 254 commands.
+	 */
+	PVR_ASSERT(psKCCBCtl->ui32WrapMask < 255);
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+
+		if (ui32NextWriteOffset != psKCCBCtl->ui32ReadOffset)
+		{
+			return PVRSRV_OK;
+		}
+		{
+			/*
+			 * The following sanity check doesn't impact performance,
+			 * since the CPU has to wait for the GPU anyway (full kernel CCB).
+			 */
+			if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+			{
+				return PVRSRV_ERROR_KERNEL_CCB_FULL;
+			}
+		}
+
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	return PVRSRV_ERROR_KERNEL_CCB_FULL;
+}
+
+/******************************************************************************
+ FUNCTION	: RGXGetCmdMemCopySize
+
+ PURPOSE	: Calculates actual size of KCCB command getting used
+
+ PARAMETERS	: eCmdType     Type of KCCB command
+
+ RETURNS	: Returns actual size of KCCB command on success else zero
+ ******************************************************************************/
+static IMG_UINT32 RGXGetCmdMemCopySize(RGXFWIF_KCCB_CMD_TYPE eCmdType)
+{
+	/* First get offset of uCmdData inside the struct RGXFWIF_KCCB_CMD
+	 * This will account alignment requirement of uCmdData union
+	 *
+	 * Then add command-data size depending on command type to calculate actual
+	 * command size required to do mem copy
+	 *
+	 * NOTE: Make sure that uCmdData is the last member of RGXFWIF_KCCB_CMD struct.
+	 */
+	switch (eCmdType)
+	{
+		case RGXFWIF_KCCB_CMD_KICK:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_KICK_DATA);
+		}
+		case RGXFWIF_KCCB_CMD_MMUCACHE:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_MMUCACHEDATA);
+		}
+		case RGXFWIF_KCCB_CMD_BP:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_BPDATA);
+		}
+		case RGXFWIF_KCCB_CMD_SYNC:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_SYNC_DATA);
+		}
+		case RGXFWIF_KCCB_CMD_SLCFLUSHINVAL:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_SLCFLUSHINVALDATA);
+		}
+		case RGXFWIF_KCCB_CMD_CLEANUP:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CLEANUP_REQUEST);
+		}
+		case RGXFWIF_KCCB_CMD_POW:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_POWER_REQUEST);
+		}
+		case RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE:
+		case RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_ZSBUFFER_BACKING_DATA);
+		}
+		case RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_FREELIST_GS_DATA);
+		}
+		case RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_FREELISTS_RECONSTRUCTION_DATA);
+		}
+		case RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_SIGNAL_UPDATE_DATA);
+		}
+		case RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_WRITE_OFFSET_UPDATE_DATA);
+		}
+		case RGXFWIF_KCCB_CMD_FORCE_UPDATE:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA);
+		}
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+		case RGXFWIF_KCCB_CMD_REGCONFIG:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_REGCONFIG_DATA);
+		}
+#endif
+		case RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS);
+		}
+#if defined(SUPPORT_PDVFS)
+		case RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_PDVFS_MAX_FREQ_DATA);
+		}
+#endif
+		case RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_OSID_PRIORITY_DATA);
+		}
+		case RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HCS_CTL);
+		}
+		case RGXFWIF_KCCB_CMD_OS_ISOLATION_GROUP_CHANGE:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_OSID_ISOLATION_GROUP_DATA);
+		}
+		case RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_OS_STATE_CHANGE_DATA);
+		}
+		case RGXFWIF_KCCB_CMD_COUNTER_DUMP:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_COUNTER_DUMP_DATA);
+		}
+		case RGXFWIF_KCCB_CMD_SLCBPCTL:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_SLCBPCTLDATA);
+		}
+		case RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CTRL);
+		}
+		case RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS);
+		}
+		case RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CTRL_BLKS);
+		}
+		case RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE:
+		{
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CORECLKSPEEDCHANGE_DATA);
+		}
+		case RGXFWIF_KCCB_CMD_HEALTH_CHECK:
+		case RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT:
+		case RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE:
+#if defined(SUPPORT_PDVFS)
+		case RGXFWIF_KCCB_CMD_PDVFS_REQUEST_REACTIVE_UPDATE:
+#endif
+		case RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL:
+		{
+			/* No command specific data */
+			return offsetof(RGXFWIF_KCCB_CMD, uCmdData);
+		}
+		default:
+		{
+			/* Invalid (OR) Unused (OR) Newly added command type */
+			return 0; /* Error */
+		}
+	}
+}
+
+static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO	*psDevInfo,
+		RGXFWIF_DM			eKCCBType,
+		RGXFWIF_KCCB_CMD	*psKCCBCmd,
+		IMG_UINT32             uiPdumpFlags)
+{
+	PVRSRV_ERROR		eError;
+	PVRSRV_DEVICE_NODE	*psDeviceNode = psDevInfo->psDeviceNode;
+	RGXFWIF_CCB_CTL		*psKCCBCtl = psDevInfo->psKernelCCBCtl;
+	IMG_UINT8			*pui8KCCB = psDevInfo->psKernelCCB;
+	IMG_UINT32			ui32NewWriteOffset;
+	IMG_UINT32			ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset;
+	IMG_UINT32			ui32CmdMemCopySize;
+
+#if !defined(PDUMP)
+	PVR_UNREFERENCED_PARAMETER(uiPdumpFlags);
+#else
+	IMG_BOOL bPdumpEnabled = IMG_FALSE;
+	IMG_BOOL bPDumpPowTrans = PDUMPPOWCMDINTRANS();
+	IMG_BOOL bContCaptureOn = PDumpIsContCaptureOn(); /* client connected or in pdump init phase */
+
+	if (bContCaptureOn)
+	{
+		IMG_BOOL bIsInCaptureRange;
+
+		PDumpIsCaptureFrameKM(&bIsInCaptureRange);
+		bPdumpEnabled = (bIsInCaptureRange || PDUMP_IS_CONTINUOUS(uiPdumpFlags)) && !bPDumpPowTrans;
+
+		/* in capture range */
+		if (bPdumpEnabled)
+		{
+			if (!psDevInfo->bDumpedKCCBCtlAlready)
+			{
+				/* entering capture range */
+				psDevInfo->bDumpedKCCBCtlAlready = IMG_TRUE;
+
+				/* Wait for the live FW to catch up */
+				PVR_DPF((PVR_DBG_MESSAGE, "%s: waiting on fw to catch-up, roff: %d, woff: %d",
+						__func__,
+						psKCCBCtl->ui32ReadOffset, ui32OldWriteOffset));
+				PVRSRVPollForValueKM((IMG_UINT32 __iomem *)&psKCCBCtl->ui32ReadOffset, ui32OldWriteOffset, 0xFFFFFFFF);
+
+				/* Dump Init state of Kernel CCB control (read and write offset) */
+				PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Initial state of kernel CCB Control, roff: %d, woff: %d",
+						psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset);
+
+				DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc,
+						0,
+						sizeof(RGXFWIF_CCB_CTL),
+						PDUMP_FLAGS_CONTINUOUS);
+			}
+		}
+	}
+#endif
+
+	psKCCBCmd->eDM = eKCCBType;
+
+	PVR_ASSERT(sizeof(RGXFWIF_KCCB_CMD) == psKCCBCtl->ui32CmdSize);
+	if (!OSLockIsLocked(psDeviceNode->hPowerLock))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s called without power lock held!",
+				__func__));
+		PVR_ASSERT(OSLockIsLocked(psDeviceNode->hPowerLock));
+	}
+
+	/* Acquire a slot in the CCB */
+	eError = RGXAcquireKernelCCBSlot(psDevInfo->psKernelCCBCtlMemDesc, psKCCBCtl, &ui32NewWriteOffset);
+	if (eError != PVRSRV_OK)
+	{
+		goto _RGXSendCommandRaw_Exit;
+	}
+
+	/* Calculate actual size of command to optimize device mem copy */
+	ui32CmdMemCopySize = RGXGetCmdMemCopySize(psKCCBCmd->eCmdType);
+	PVR_LOGR_IF_FALSE(ui32CmdMemCopySize !=0, "RGXGetCmdMemCopySize failed", PVRSRV_ERROR_INVALID_CCB_COMMAND);
+
+	/* Copy the command into the CCB */
+	OSDeviceMemCopy(&pui8KCCB[ui32OldWriteOffset * psKCCBCtl->ui32CmdSize],
+			psKCCBCmd, ui32CmdMemCopySize);
+
+	/* ensure kCCB data is written before the offsets */
+	OSWriteMemoryBarrier();
+
+	/* Move past the current command */
+	psKCCBCtl->ui32WriteOffset = ui32NewWriteOffset;
+	/* Force a read-back to memory to avoid posted writes on certain buses */
+	(void) psKCCBCtl->ui32WriteOffset;
+
+
+#if defined(PDUMP)
+	if (bContCaptureOn)
+	{
+		/* in capture range */
+		if (bPdumpEnabled)
+		{
+			/* Dump new Kernel CCB content */
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump kCCB cmd for DM %d, woff = %d",
+					eKCCBType,
+					ui32OldWriteOffset);
+			DevmemPDumpLoadMem(psDevInfo->psKernelCCBMemDesc,
+					ui32OldWriteOffset * psKCCBCtl->ui32CmdSize,
+					ui32CmdMemCopySize,
+					PDUMP_FLAGS_CONTINUOUS);
+
+			/* Dump new kernel CCB write offset */
+			PDUMPCOMMENTWITHFLAGS(uiPdumpFlags, "Dump kCCBCtl woff (added new cmd for DM %d): %d",
+					eKCCBType,
+					ui32NewWriteOffset);
+			DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc,
+					offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset),
+					sizeof(IMG_UINT32),
+					uiPdumpFlags);
+
+			/* mimic the read-back of the write from above */
+			DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc,
+					offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset),
+					ui32NewWriteOffset,
+					0xFFFFFFFF,
+					PDUMP_POLL_OPERATOR_EQUAL,
+					uiPdumpFlags);
+
+		}
+		/* out of capture range */
+		else
+		{
+			eError = RGXPdumpDrainKCCB(psDevInfo, ui32OldWriteOffset);
+			PVR_LOGG_IF_ERROR(eError, "RGXPdumpDrainKCCB", _RGXSendCommandRaw_Exit);
+		}
+	}
+#endif
+
+
+	PDUMPCOMMENTWITHFLAGS(uiPdumpFlags, "MTS kick for kernel CCB");
+	/*
+	 * Kick the MTS to schedule the firmware.
+	 */
+	{
+		IMG_UINT32 ui32MTSRegVal;
+
+		if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE) &&
+				!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_VIRTUALISATION)))
+		{
+#if defined(SUPPORT_STRIP_RENDERING)
+			ui32MTSRegVal = ((RGXFWIF_DM_GP + PVRSRV_VZ_DRIVER_OSID) & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK);
+#else
+			ui32MTSRegVal = ((RGXFWIF_DM_GP + PVRSRV_VZ_DRIVER_OSID) & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_COUNTED;
+#endif
+		}
+		else
+		{
+#if defined(SUPPORT_STRIP_RENDERING)
+			ui32MTSRegVal = (RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK);
+#else
+			ui32MTSRegVal = (RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_COUNTED;
+#endif
+		}
+
+
+		__MTSScheduleWrite(psDevInfo, ui32MTSRegVal);
+
+		PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_MTS_SCHEDULE, ui32MTSRegVal, uiPdumpFlags);
+	}
+
+#if defined(NO_HARDWARE)
+	/* keep the roff updated because fw isn't there to update it */
+	psKCCBCtl->ui32ReadOffset = psKCCBCtl->ui32WriteOffset;
+#endif
+
+	_RGXSendCommandRaw_Exit:
+	return eError;
+}
+
+/******************************************************************************
+ FUNCTION	: _AllocDeferredCommand
+
+ PURPOSE	: Allocate a KCCB command and add it to KCCB deferred list
+
+ PARAMETERS	: psDevInfo	RGX device info
+			: eKCCBType		Firmware Command type
+			: psKCCBCmd		Firmware Command
+			: uiPdumpFlags	Pdump flags
+
+ RETURNS	: PVRSRV_OK	If all went good, PVRSRV_ERROR_RETRY otherwise.
+ ******************************************************************************/
+static PVRSRV_ERROR _AllocDeferredCommand(PVRSRV_RGXDEV_INFO	*psDevInfo,
+		RGXFWIF_DM			eKCCBType,
+		RGXFWIF_KCCB_CMD	*psKCCBCmd,
+		IMG_UINT32		uiPdumpFlags)
+{
+	RGX_DEFERRED_KCCB_CMD *psDeferredCommand;
+
+	psDeferredCommand = OSAllocMem(sizeof(*psDeferredCommand));
+
+	if (!psDeferredCommand)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Deferring a KCCB command failed: allocation failure: requesting retry"));
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	psDeferredCommand->sKCCBcmd = *psKCCBCmd;
+	psDeferredCommand->eDM = eKCCBType;
+	psDeferredCommand->uiPdumpFlags = uiPdumpFlags;
+	psDeferredCommand->psDevInfo = psDevInfo;
+
+	OSLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList);
+	dllist_add_to_tail(&(psDevInfo->sKCCBDeferredCommandsListHead), &(psDeferredCommand->sListNode));
+	psDevInfo->ui32KCCBDeferredCommandsCount++;
+	OSLockRelease(psDevInfo->hLockKCCBDeferredCommandsList);
+
+	return PVRSRV_OK;
+}
+
+/******************************************************************************
+ FUNCTION	: _FreeDeferredCommand
+
+ PURPOSE	: Remove from the deferred list the sent deferred KCCB command
+
+ PARAMETERS	: psNode			Node in deferred list
+			: psDeferredKCCBCmd	KCCB Command to free
+
+ RETURNS	: None
+ ******************************************************************************/
+static void _FreeDeferredCommand(DLLIST_NODE *psNode, RGX_DEFERRED_KCCB_CMD *psDeferredKCCBCmd)
+{
+	dllist_remove_node(psNode);
+	psDeferredKCCBCmd->psDevInfo->ui32KCCBDeferredCommandsCount--;
+	OSFreeMem(psDeferredKCCBCmd);
+}
+
+/******************************************************************************
+ FUNCTION	: RGXSendCommandsFromDeferredList
+
+ PURPOSE	: Try send KCCB commands in deferred list to KCCB
+ 		  Should be called by holding PowerLock
+
+ PARAMETERS	: psDevInfo	RGX device info
+		: bPoll		Poll for space in KCCB
+
+ RETURNS	: PVRSRV_OK	If all commands in deferred list are sent to KCCB,
+			  PVRSRV_ERROR_KERNEL_CCB_FULL otherwise.
+ ******************************************************************************/
+PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bPoll)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	DLLIST_NODE *psNode, *psNext;
+	RGX_DEFERRED_KCCB_CMD *psTempDeferredKCCBCmd;
+
+	OSLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList);
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		if (dllist_is_empty(&psDevInfo->sKCCBDeferredCommandsListHead))
+		{
+			OSLockRelease(psDevInfo->hLockKCCBDeferredCommandsList);
+			return PVRSRV_OK;
+		}
+
+		/* For every deferred KCCB command, try to send it*/
+		dllist_foreach_node(&psDevInfo->sKCCBDeferredCommandsListHead, psNode, psNext)
+		{
+			psTempDeferredKCCBCmd = IMG_CONTAINER_OF(psNode, RGX_DEFERRED_KCCB_CMD, sListNode);
+			eError = RGXSendCommandRaw(psTempDeferredKCCBCmd->psDevInfo,
+			                           psTempDeferredKCCBCmd->eDM,
+									   &(psTempDeferredKCCBCmd->sKCCBcmd),
+									   psTempDeferredKCCBCmd->uiPdumpFlags);
+			if (eError == PVRSRV_OK)
+			{
+				_FreeDeferredCommand(psNode, psTempDeferredKCCBCmd);
+			}
+			else
+			{
+				if (bPoll)
+				{
+					break;
+				}
+				else
+				{
+					OSLockRelease(psDevInfo->hLockKCCBDeferredCommandsList);
+					return PVRSRV_ERROR_KERNEL_CCB_FULL;
+				}
+			}
+		}
+
+		if (bPoll)
+		{
+			PVRSRV_ERROR eErrPollForKCCBSlot;
+
+			/* Don't overwrite eError because if RGXPollKernelCCBSlot returns OK and the
+			 * outer loop times-out, we'll still want to return KCCB_FULL to caller
+			 */
+			eErrPollForKCCBSlot = RGXPollKernelCCBSlot(psDevInfo->psKernelCCBCtlMemDesc,
+			                                           psDevInfo->psKernelCCBCtl);
+			if (eErrPollForKCCBSlot == PVRSRV_ERROR_KERNEL_CCB_FULL)
+			{
+				OSLockRelease(psDevInfo->hLockKCCBDeferredCommandsList);
+				return PVRSRV_ERROR_KERNEL_CCB_FULL;
+			}
+		}
+
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	OSLockRelease(psDevInfo->hLockKCCBDeferredCommandsList);
+	return eError;
+}
+
+PVRSRV_ERROR RGXSendCommand(PVRSRV_RGXDEV_INFO	*psDevInfo,
+		RGXFWIF_DM			eKCCBType,
+		RGXFWIF_KCCB_CMD	*psKCCBCmd,
+		IMG_UINT32		uiPdumpFlags)
+{
+
+	PVRSRV_ERROR eError;
+	IMG_BOOL	bPoll = IMG_FALSE;
+
+	if (eKCCBType == RGXFWIF_DM_GP)
+	{
+		/* Do not defer GP cmds as server will poll for its completion anyway */
+		bPoll = IMG_TRUE;
+	}
+
+	/* First try to Flush all the cmds in deferred list */
+	eError = RGXSendCommandsFromDeferredList(psDevInfo, bPoll);
+	if (eError == PVRSRV_OK)
+	{
+		eError = RGXSendCommandRaw(psDevInfo,
+				eKCCBType,
+				psKCCBCmd,
+				uiPdumpFlags);
+	}
+	/*
+	 * If we don't manage to enqueue one of the deferred commands or the command
+	 * passed as argument because the KCCB is full, insert the latter into the deferred commands list.
+	 * The deferred commands will also be flushed eventually by:
+	 *  - one more KCCB command sent for any DM
+	 *  - RGX_MISRHandler_CheckFWActivePowerState
+	 */
+	if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL)
+	{
+		eError = _AllocDeferredCommand(psDevInfo, eKCCBType, psKCCBCmd, uiPdumpFlags);
+	}
+	return eError;
+}
+
+PVRSRV_ERROR RGXSendCommandWithPowLock(PVRSRV_RGXDEV_INFO	*psDevInfo,
+		RGXFWIF_DM			eKCCBType,
+		RGXFWIF_KCCB_CMD	*psKCCBCmd,
+		IMG_UINT32			ui32PDumpFlags)
+{
+	PVRSRV_ERROR		eError;
+	PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+
+	/* Ensure Rogue is powered up before kicking MTS */
+	eError = PVRSRVPowerLock(psDeviceNode);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s: failed to acquire powerlock (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+
+		goto _PVRSRVPowerLock_Exit;
+	}
+
+	PDUMPPOWCMDSTART();
+	eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+			PVRSRV_DEV_POWER_STATE_ON,
+			IMG_FALSE);
+	PDUMPPOWCMDEND();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition Rogue to ON (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+
+		goto _PVRSRVSetDevicePowerStateKM_Exit;
+	}
+
+	eError = RGXSendCommand(psDevInfo, eKCCBType,  psKCCBCmd, ui32PDumpFlags);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: failed to schedule command (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+#if defined(DEBUG)
+		/* PVRSRVDebugRequest must be called without powerlock */
+		PVRSRVPowerUnlock(psDeviceNode);
+		PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+		goto _PVRSRVPowerLock_Exit;
+#endif
+	}
+
+	_PVRSRVSetDevicePowerStateKM_Exit:
+	PVRSRVPowerUnlock(psDeviceNode);
+
+	_PVRSRVPowerLock_Exit:
+	return eError;
+}
+
+void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*) hCmdCompHandle;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+	OSScheduleMISR(psDevInfo->hProcessQueuesMISR);
+}
+
+/*!
+ ******************************************************************************
+
+ @Function	RGX_MISRHandler_ScheduleProcessQueues
+
+ @Description - Sends uncounted kick to all the DMs (the FW will process all
+				the queue for all the DMs)
+ ******************************************************************************/
+static void RGX_MISRHandler_ScheduleProcessQueues(void *pvData)
+{
+	PVRSRV_DEVICE_NODE     *psDeviceNode = pvData;
+	PVRSRV_RGXDEV_INFO     *psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR           eError;
+	PVRSRV_DEV_POWER_STATE ePowerState;
+
+	/* We don't need to acquire the BridgeLock as this power transition won't
+	   send a command to the FW */
+	eError = PVRSRVPowerLock(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXScheduleProcessQueuesKM: failed to acquire powerlock (%s)",
+				PVRSRVGetErrorString(eError)));
+		return;
+	}
+
+	/* Check whether it's worth waking up the GPU */
+	eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+	if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) &&
+			(eError == PVRSRV_OK) && (ePowerState == PVRSRV_DEV_POWER_STATE_OFF))
+	{
+		/* For now, guest drivers will always wake-up the GPU */
+		RGXFWIF_GPU_UTIL_FWCB  *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+		IMG_BOOL               bGPUHasWorkWaiting;
+
+		bGPUHasWorkWaiting =
+				(RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED);
+
+		if (!bGPUHasWorkWaiting)
+		{
+			/* all queues are empty, don't wake up the GPU */
+			PVRSRVPowerUnlock(psDeviceNode);
+			return;
+		}
+	}
+
+	PDUMPPOWCMDSTART();
+	/* wake up the GPU */
+	eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+			PVRSRV_DEV_POWER_STATE_ON,
+			IMG_FALSE);
+	PDUMPPOWCMDEND();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXScheduleProcessQueuesKM: failed to transition Rogue to ON (%s)",
+				PVRSRVGetErrorString(eError)));
+
+		PVRSRVPowerUnlock(psDeviceNode);
+		return;
+	}
+
+	/* uncounted kick to the FW */
+	{
+		IMG_UINT32 ui32MTSRegVal;
+
+		if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE) &&
+				!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_VIRTUALISATION)))
+		{
+			ui32MTSRegVal = ((RGXFWIF_DM_GP + PVRSRV_VZ_DRIVER_OSID) & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED;
+		}
+		else
+		{
+			ui32MTSRegVal = (RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED;
+		}
+
+		HTBLOGK(HTB_SF_MAIN_KICK_UNCOUNTED);
+		__MTSScheduleWrite(psDevInfo, ui32MTSRegVal);
+	}
+
+	PVRSRVPowerUnlock(psDeviceNode);
+}
+
+PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	return OSInstallMISR(phMISR,
+			RGX_MISRHandler_ScheduleProcessQueues,
+			psDeviceNode,
+			"RGX_ScheduleProcessQueues");
+}
+
+/*!
+ ******************************************************************************
+
+ @Function	RGXScheduleCommand
+
+ @Description - Submits a CCB command and kicks the firmware but first schedules
+                any commands which have to happen before handle
+
+ @Input psDevInfo		 - pointer to device info
+ @Input eKCCBType		 - see RGXFWIF_CMD_*
+ @Input psKCCBCmd		 - kernel CCB command
+ @Input ui32CacheOpFence - CPU dcache operation fence
+ @Input ui32PDumpFlags - PDUMP_FLAGS_CONTINUOUS bit set if the pdump flags should be continuous
+
+
+ @Return PVRSRV_ERROR
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXScheduleCommand(PVRSRV_RGXDEV_INFO	*psDevInfo,
+		RGXFWIF_DM			eKCCBType,
+		RGXFWIF_KCCB_CMD	*psKCCBCmd,
+		IMG_UINT32			ui32CacheOpFence,
+		IMG_UINT32			ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT16 uiMMUSyncUpdate;
+
+	eError = CacheOpFence(eKCCBType, ui32CacheOpFence);
+	if (unlikely(eError != PVRSRV_OK)) goto RGXScheduleCommand_exit;
+
+#if defined(SUPPORT_VALIDATION)
+	/* For validation, force the core to different dust count states with each kick */
+	if ((eKCCBType == RGXFWIF_DM_TA) || (eKCCBType == RGXFWIF_DM_CDM))
+	{
+		if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_DUST_REQUEST_INJECT_EN)
+		{
+			IMG_UINT32 ui32NumDusts = RGXGetNextDustCount(&psDevInfo->sDustReqState, psDevInfo->sDevFeatureCfg.ui32MAXDustCount);
+			PVRSRVDeviceDustCountChange(psDevInfo->psDeviceNode, ui32NumDusts);
+		}
+	}
+#endif
+
+	/* PVRSRVPowerLock guarantees atomicity between commands. This is helpful
+	   in a scenario with several applications allocating resources. */
+	eError = PVRSRVPowerLock(psDevInfo->psDeviceNode);
+	if (unlikely(eError != PVRSRV_OK))
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)",
+				__func__, PVRSRVGetErrorString(eError)));
+
+		/* If system is found powered OFF, Retry scheduling the command */
+		if(likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF))
+		{
+			eError = PVRSRV_ERROR_RETRY;
+		}
+
+		goto RGXScheduleCommand_exit;
+	}
+
+	/* Ensure device is powered up before sending any commands */
+	PDUMPPOWCMDSTART();
+	eError = PVRSRVSetDevicePowerStateKM(psDevInfo->psDeviceNode,
+			PVRSRV_DEV_POWER_STATE_ON,
+			IMG_FALSE);
+	PDUMPPOWCMDEND();
+	if (unlikely(eError != PVRSRV_OK))
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition RGX to ON (%s)",
+				__func__, PVRSRVGetErrorString(eError)));
+		goto _PVRSRVSetDevicePowerStateKM_Exit;
+	}
+
+	eError = RGXPreKickCacheCommand(psDevInfo, eKCCBType, &uiMMUSyncUpdate, IMG_FALSE);
+	if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit;
+
+	eError = RGXSendCommand(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags);
+	if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit;
+
+	_PVRSRVSetDevicePowerStateKM_Exit:
+	PVRSRVPowerUnlock(psDevInfo->psDeviceNode);
+
+	RGXScheduleCommand_exit:
+	return eError;
+}
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+/*
+ * RGXCheckCheckpointCCB
+ */
+void RGXCheckCheckpointCCB(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	IMG_BOOL bSignal = IMG_FALSE;
+
+	PRGXFWIF_UFO_ADDR *psFwUFOAddr;
+	RGXFWIF_CCB_CTL *psChptCCBCtl = psDevInfo->psCheckpointCCBCtl;
+	IMG_UINT8 *psChptCCB = psDevInfo->psCheckpointCCB;
+	IMG_UINT32 ui32WriteOffset, ui32ReadOffset, ui32WrapMask = psChptCCBCtl->ui32WrapMask;
+	IMG_UINT32 uiFwAddr;
+	PVRSRV_SYNC_CHECKPOINT_STATE uiChptState;
+
+	/*
+	 * Check if the firmware has signalled a full sync state check.
+	 */
+	if (psDevInfo->psRGXFWIfTraceBuf->ui32FWSyncCheckMark != psDevInfo->psRGXFWIfTraceBuf->ui32HostSyncCheckMark)
+	{
+		/*
+		 * Update the offsets first so that if the firmware tries to write
+		 * another checkpoint it is not missed by the check state.
+		 */
+		psDevInfo->psRGXFWIfTraceBuf->ui32HostSyncCheckMark = psDevInfo->psRGXFWIfTraceBuf->ui32FWSyncCheckMark;
+		psChptCCBCtl->ui32ReadOffset = psChptCCBCtl->ui32WriteOffset;
+
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: Checkpoint CCB full, performing full sync checkpoint state check", __func__));
+
+		SyncCheckpointCheckState();
+		bSignal = IMG_TRUE;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+		pvr_buffer_sync_check_state();
+#endif
+
+		goto exit_signal;
+	}
+
+	/*
+	 * Take a snapshot of the current CCB ctl pointers at the start of
+	 * processing.
+	 */
+	ui32WriteOffset = psChptCCBCtl->ui32WriteOffset;
+	ui32ReadOffset = psChptCCBCtl->ui32ReadOffset;
+	ui32WrapMask = psChptCCBCtl->ui32WrapMask;
+
+	while (ui32ReadOffset != ui32WriteOffset)
+	{
+		/* Point to the next checkpoint address */
+		psFwUFOAddr = ((PRGXFWIF_UFO_ADDR *)psChptCCB) + ui32ReadOffset;
+
+		/*
+		 * State is encoded in bit 1 of ufo address
+		 * 1 = signalled, 0 = errored
+		 */
+		uiChptState = PVRSRV_SYNC_CHECKPOINT_ERRORED;
+		uiFwAddr = psFwUFOAddr->ui32Addr;
+
+		if (uiFwAddr & 0x1U)
+		{
+			uiChptState = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+		}
+		uiFwAddr |= 0x1U;
+
+		if (SyncCheckpointUFOHasSignalled(psDeviceNode, uiFwAddr, uiChptState))
+		{
+			bSignal = IMG_TRUE;
+		}
+		else
+#if defined(SUPPORT_BUFFER_SYNC)
+		if (pvr_buffer_sync_checkpoint_ufo_has_signalled(uiFwAddr, uiChptState))
+		{
+			/* Buffer sync does not need a signal call. */
+		}
+		else
+#endif
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware signalled checkpoint (%#08X) with no host backing", __func__, uiFwAddr));
+		}
+
+		/* Update read offset */
+		ui32ReadOffset = (ui32ReadOffset + 1) & ui32WrapMask;
+	}
+
+	psChptCCBCtl->ui32ReadOffset = ui32ReadOffset;
+
+exit_signal:
+	if (bSignal)
+	{
+		SyncCheckpointSignalWaiters();
+	}
+}
+#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */
+
+/*
+ * RGXCheckFirmwareCCB
+ */
+void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGXFWIF_FWCCB_CMD *psFwCCBCmd;
+
+	RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psFirmwareCCBCtl;
+	IMG_UINT8 *psFWCCB = psDevInfo->psFirmwareCCB;
+
+	while (psFWCCBCtl->ui32ReadOffset != psFWCCBCtl->ui32WriteOffset)
+	{
+		/* Point to the next command */
+		psFwCCBCmd = ((RGXFWIF_FWCCB_CMD *)psFWCCB) + psFWCCBCtl->ui32ReadOffset;
+
+		HTBLOGK(HTB_SF_MAIN_FWCCB_CMD, psFwCCBCmd->eCmdType);
+		switch (psFwCCBCmd->eCmdType)
+		{
+		case RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING:
+		{
+			if (psDevInfo->bPDPEnabled)
+			{
+				PDUMP_PANIC(ZSBUFFER_BACKING, "Request to add backing to ZSBuffer");
+			}
+			RGXProcessRequestZSBufferBacking(psDevInfo,
+					psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID);
+			break;
+		}
+
+		case RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING:
+		{
+			if (psDevInfo->bPDPEnabled)
+			{
+				PDUMP_PANIC(ZSBUFFER_UNBACKING, "Request to remove backing from ZSBuffer");
+			}
+			RGXProcessRequestZSBufferUnbacking(psDevInfo,
+					psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID);
+			break;
+		}
+
+		case RGXFWIF_FWCCB_CMD_FREELIST_GROW:
+		{
+			if (psDevInfo->bPDPEnabled)
+			{
+				PDUMP_PANIC(FREELIST_GROW, "Request to grow the free list");
+			}
+			RGXProcessRequestGrow(psDevInfo,
+					psFwCCBCmd->uCmdData.sCmdFreeListGS.ui32FreelistID);
+			break;
+		}
+
+		case RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION:
+		{
+			if (psDevInfo->bPDPEnabled)
+			{
+				PDUMP_PANIC(FREELISTS_RECONSTRUCTION, "Request to reconstruct free lists");
+			}
+
+			if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+			{
+				PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d) for %d freelists",
+						__func__,
+						psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1,
+						psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount));
+			}
+			else
+			{
+				PVR_ASSERT(psDevInfo->psRGXFWIfTraceBuf);
+				PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d/%d) for %d freelists",
+						__func__,
+						psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1,
+						psDevInfo->psRGXFWIfTraceBuf->ui32HwrCounter+1,
+						psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount));
+			}
+
+			RGXProcessRequestFreelistsReconstruction(psDevInfo,
+					psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount,
+					psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.aui32FreelistIDs);
+			break;
+		}
+
+		case RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION:
+		{
+			DLLIST_NODE *psNode, *psNext;
+			RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA *psCmdContextResetNotification =
+					&psFwCCBCmd->uCmdData.sCmdContextResetNotification;
+			IMG_UINT32 ui32ServerCommonContextID =
+					psCmdContextResetNotification->ui32ServerCommonContextID;
+			RGX_SERVER_COMMON_CONTEXT *psServerCommonContext = NULL;
+
+			OSWRLockAcquireRead(psDevInfo->hCommonCtxtListLock);
+			dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext)
+			{
+				RGX_SERVER_COMMON_CONTEXT *psThisContext =
+						IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode);
+
+				if (psThisContext->ui32ContextID == ui32ServerCommonContextID)
+				{
+					psServerCommonContext = psThisContext;
+					break;
+				}
+			}
+
+			PVR_DPF((PVR_DBG_MESSAGE, "%s: Context 0x%p reset (ID=0x%08x, Reason=%d, JobRef=0x%08x)",
+					__func__,
+					psServerCommonContext,
+					psCmdContextResetNotification->ui32ServerCommonContextID,
+					(IMG_UINT32)(psCmdContextResetNotification->eResetReason),
+					psCmdContextResetNotification->ui32ResetJobRef));
+
+			if (psServerCommonContext != NULL)
+			{
+				psServerCommonContext->eLastResetReason    = psCmdContextResetNotification->eResetReason;
+				psServerCommonContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef;
+			}
+			OSWRLockReleaseRead(psDevInfo->hCommonCtxtListLock);
+
+			if (psCmdContextResetNotification->bPageFault)
+			{
+				DevmemIntPFNotify(psDevInfo->psDeviceNode,
+						psCmdContextResetNotification->ui64PCAddress,
+						psCmdContextResetNotification->sFaultAddress);
+			}
+			break;
+		}
+
+		case RGXFWIF_FWCCB_CMD_DEBUG_DUMP:
+		{
+			RGXDumpDebugInfo(NULL,NULL,psDevInfo);
+			/* Notify the OS of an issue that triggered a debug dump */
+			OSWarnOn(IMG_TRUE);
+			break;
+		}
+
+		case RGXFWIF_FWCCB_CMD_UPDATE_STATS:
+		{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+			IMG_PID pidTmp = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.pidOwner;
+			IMG_INT32 i32AdjustmentValue = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.i32AdjustmentValue;
+
+			switch (psFwCCBCmd->uCmdData.sCmdUpdateStatsData.eElementToUpdate)
+			{
+			case RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS:
+			{
+				PVRSRVStatsUpdateRenderContextStats(i32AdjustmentValue,0,0,0,0,0,pidTmp);
+				break;
+			}
+			case RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY:
+			{
+				PVRSRVStatsUpdateRenderContextStats(0,i32AdjustmentValue,0,0,0,0,pidTmp);
+				break;
+			}
+			case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES:
+			{
+				PVRSRVStatsUpdateRenderContextStats(0,0,i32AdjustmentValue,0,0,0,pidTmp);
+				break;
+			}
+			case RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES:
+			{
+				PVRSRVStatsUpdateRenderContextStats(0,0,0,i32AdjustmentValue,0,0,pidTmp);
+				break;
+			}
+			case RGXFWIF_FWCCB_CMD_UPDATE_NUM_SH_STORES:
+			{
+				PVRSRVStatsUpdateRenderContextStats(0,0,0,0,i32AdjustmentValue,0,pidTmp);
+				break;
+			}
+			case RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES:
+			{
+				PVRSRVStatsUpdateRenderContextStats(0,0,0,0,0,i32AdjustmentValue,pidTmp);
+				break;
+			}
+			}
+#endif
+			break;
+		}
+		case RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE:
+		{
+#if defined(SUPPORT_PDVFS)
+			PDVFS_PROCESS_CORE_CLK_RATE_CHANGE(psDevInfo,
+					psFwCCBCmd->uCmdData.sCmdCoreClkRateChange.ui32CoreClkRate);
+#endif
+			break;
+		}
+
+		case RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART:
+		{
+			if (psDevInfo->psRGXFWIfTraceBuf != NULL  &&
+					psDevInfo->psRGXFWIfTraceBuf->ePowState != RGXFWIF_POW_OFF)
+			{
+				PVRSRV_ERROR  eError;
+
+				/* Power down... */
+				eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode,
+						PVRSRV_SYS_POWER_STATE_OFF);
+				if (eError == PVRSRV_OK)
+				{
+					/* Clear the FW faulted flags... */
+					psDevInfo->psRGXFWIfTraceBuf->ui32HWRStateFlags &= ~(RGXFWIF_HWR_FW_FAULT|RGXFWIF_HWR_RESTART_REQUESTED);
+
+					/* Power back up again... */
+					eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode,
+							PVRSRV_SYS_POWER_STATE_ON);
+
+					/* Send a dummy KCCB command to ensure the FW wakes up and checks the queues... */
+					if (eError == PVRSRV_OK)
+					{
+						LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+						{
+							eError = RGXFWHealthCheckCmd(psDevInfo);
+							if (eError != PVRSRV_ERROR_RETRY)
+							{
+								break;
+							}
+							OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+						} END_LOOP_UNTIL_TIMEOUT();
+					}
+				}
+
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s: Failed firmware restart (%s)",
+							__func__, PVRSRVGetErrorString(eError)));
+				}
+			}
+			break;
+		}
+
+		default:
+		{
+			/* unknown command */
+			PVR_DPF((PVR_DBG_WARNING, "%s: Unknown Command (eCmdType=0x%08x)",
+					__func__, psFwCCBCmd->eCmdType));
+			/* Assert on magic value corruption */
+			PVR_ASSERT((((IMG_UINT32)psFwCCBCmd->eCmdType & RGX_CMD_MAGIC_DWORD_MASK) >> RGX_CMD_MAGIC_DWORD_SHIFT) == RGX_CMD_MAGIC_DWORD);
+		}
+		}
+
+		/* Update read offset */
+		psFWCCBCtl->ui32ReadOffset = (psFWCCBCtl->ui32ReadOffset + 1) & psFWCCBCtl->ui32WrapMask;
+	}
+}
+
+/*
+ * PVRSRVRGXFrameworkCopyCommand
+ */
+PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(DEVMEM_MEMDESC	*psFWFrameworkMemDesc,
+		IMG_PBYTE		pbyGPUFRegisterList,
+		IMG_UINT32		ui32FrameworkRegisterSize)
+{
+	PVRSRV_ERROR	eError;
+	RGXFWIF_RF_REGISTERS	*psRFReg;
+
+	eError = DevmemAcquireCpuVirtAddr(psFWFrameworkMemDesc,
+			(void **)&psRFReg);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFrameworkCopyCommand: Failed to map firmware render context state (%u)",
+				eError));
+		return eError;
+	}
+
+	OSDeviceMemCopy(psRFReg, pbyGPUFRegisterList, ui32FrameworkRegisterSize);
+
+	/* Release the CPU mapping */
+	DevmemReleaseCpuVirtAddr(psFWFrameworkMemDesc);
+
+	/*
+	 * Dump the FW framework buffer
+	 */
+#if defined(PDUMP)
+	PDUMPCOMMENT("Dump FWFramework buffer");
+	DevmemPDumpLoadMem(psFWFrameworkMemDesc, 0, ui32FrameworkRegisterSize, PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVRGXFrameworkCreateKM
+ */
+PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+		DEVMEM_MEMDESC		**ppsFWFrameworkMemDesc,
+		IMG_UINT32			ui32FrameworkCommandSize)
+{
+	PVRSRV_ERROR			eError;
+	PVRSRV_RGXDEV_INFO		*psDevInfo = psDeviceNode->pvDevice;
+
+	/*
+		Allocate device memory for the firmware GPU framework state.
+		Sufficient info to kick one or more DMs should be contained in this buffer
+	 */
+	PDUMPCOMMENT("Allocate Rogue firmware framework state");
+
+	eError = DevmemFwAllocate(psDevInfo,
+			ui32FrameworkCommandSize,
+			RGX_FWCOMCTX_ALLOCFLAGS,
+			"FwGPUFrameworkState",
+			ppsFWFrameworkMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFrameworkContextKM: Failed to allocate firmware framework state (%u)",
+				eError));
+		return eError;
+	}
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXWaitForFWOp(PVRSRV_RGXDEV_INFO	*psDevInfo,
+		RGXFWIF_DM eDM,
+		PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+		IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+	RGXFWIF_KCCB_CMD	sCmdSyncPrim;
+
+	/* Setup sync primitive */
+	eError = SyncPrimSet(psSyncPrim, 0);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to set SyncPrim (%u)",
+				__func__, eError));
+		goto _Error_Exit;
+	}
+
+	/* prepare a sync command */
+	eError = SyncPrimGetFirmwareAddr(psSyncPrim,
+			&sCmdSyncPrim.uCmdData.sSyncData.sSyncObjDevVAddr.ui32Addr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to get SyncPrim FW address(%u)",
+				__func__, eError));
+		goto _Error_Exit;
+	}
+	sCmdSyncPrim.eCmdType = RGXFWIF_KCCB_CMD_SYNC;
+	sCmdSyncPrim.uCmdData.sSyncData.uiUpdateVal = 1;
+
+	PDUMPCOMMENT("RGXWaitForFWOp: Submit Kernel SyncPrim [0x%08x] to DM %d",
+			sCmdSyncPrim.uCmdData.sSyncData.sSyncObjDevVAddr.ui32Addr, eDM);
+
+	eError = RGXScheduleCommand(psDevInfo,
+			eDM,
+			&sCmdSyncPrim,
+			0,
+			ui32PDumpFlags);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to schedule Kernel SyncPrim with error (%u)",
+				__func__,
+				eError));
+		goto _Error_Exit;
+	}
+
+	/* Wait for sync primitive to be updated */
+#if defined(PDUMP)
+	PDUMPCOMMENT("RGXScheduleCommandAndWait: Poll for Kernel SyncPrim [0x%08x] on DM %d",
+			sCmdSyncPrim.uCmdData.sSyncData.sSyncObjDevVAddr.ui32Addr, eDM);
+
+	SyncPrimPDumpPol(psSyncPrim,
+			1,
+			0xffffffff,
+			PDUMP_POLL_OPERATOR_EQUAL,
+			ui32PDumpFlags);
+#endif
+
+	{
+		RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+		IMG_UINT32 ui32CurrentQueueLength =
+				(psKCCBCtl->ui32WrapMask+1 +
+						psKCCBCtl->ui32WriteOffset -
+						psKCCBCtl->ui32ReadOffset) & psKCCBCtl->ui32WrapMask;
+		IMG_UINT32 ui32MaxRetries;
+
+		ui32CurrentQueueLength += psDevInfo->ui32KCCBDeferredCommandsCount;
+		for (ui32MaxRetries = (ui32CurrentQueueLength + 1) * 3;
+				ui32MaxRetries > 0;
+				ui32MaxRetries--)
+		{
+			eError = PVRSRVWaitForValueKMAndHoldBridgeLockKM(psSyncPrim->pui32LinAddr, 1, 0xffffffff);
+
+			if (eError != PVRSRV_ERROR_TIMEOUT)
+			{
+				break;
+			}
+
+			/*
+			 * In case the KCCB was full we must ensure we flush any deferred
+			 * commands because they may be preventing the RGXFWIF_KCCB_CMD_SYNC
+			 * from being sent. No need to check the error, if the KCCB is
+			 * still full then we wait anyway.
+			 */
+
+			if (PVRSRVPowerLock(psDeviceNode) != PVRSRV_OK)
+			{
+				/* RGXSendCommandsFromDeferredList should be called while holding PowerLock */
+				PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire PowerLock (device: %p)",
+					__func__, psDeviceNode));
+				continue;
+			}
+
+			RGXSendCommandsFromDeferredList(psDevInfo, IMG_FALSE);
+
+			PVRSRVPowerUnlock(psDeviceNode);
+		}
+
+		if (eError == PVRSRV_ERROR_TIMEOUT)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s: PVRSRVWaitForValueKMAndHoldBridgeLock timed out. Dump debug information.",
+					__func__));
+			PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+			PVR_ASSERT(eError != PVRSRV_ERROR_TIMEOUT);
+			goto _Error_Exit;
+		}
+	}
+
+	_Error_Exit:
+	return eError;
+}
+
+PVRSRV_ERROR IMG_CALLCONV RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE  *psDevNode,
+												volatile IMG_UINT32	__iomem *pui32LinMemAddr,
+												IMG_UINT32			ui32Value,
+												IMG_UINT32			ui32Mask)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	RGXFWIF_CCB_CTL *psKCCBCtl;
+	IMG_UINT32 ui32CurrentQueueLength, ui32MaxRetries;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDevNode->pvDevice;
+
+	psKCCBCtl = psDevInfo->psKernelCCBCtl;
+	ui32CurrentQueueLength = (psKCCBCtl->ui32WrapMask+1 +
+					psKCCBCtl->ui32WriteOffset -
+					psKCCBCtl->ui32ReadOffset) & psKCCBCtl->ui32WrapMask;
+	ui32CurrentQueueLength += psDevInfo->ui32KCCBDeferredCommandsCount;
+
+	for (ui32MaxRetries = ui32CurrentQueueLength + 1;
+				ui32MaxRetries > 0;
+				ui32MaxRetries--)
+	{
+
+		eError = PVRSRVPollForValueKM(pui32LinMemAddr, ui32Value, ui32Mask);
+		if (eError != PVRSRV_ERROR_TIMEOUT)
+		{
+			break;
+		}
+
+		RGXSendCommandsFromDeferredList(psDevInfo, IMG_FALSE);
+	}
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed! Error(%s) CPU linear address(%p) Expected value(%u)",
+		                        __func__, PVRSRVGetErrorString(eError),
+								pui32LinMemAddr, ui32Value));
+		PVRSRVDebugRequest(psDevNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo,
+		IMG_UINT32 ui32Config,
+		IMG_UINT32 *pui32ConfigState,
+		IMG_BOOL bSetNotClear)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DEV_POWER_STATE ePowerState;
+	RGXFWIF_KCCB_CMD sStateFlagCmd;
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+	RGXFWIF_OS_CONFIG *psOSConfig;
+
+	if (!psDevInfo)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	psDeviceNode = psDevInfo->psDeviceNode;
+	psOSConfig = psDevInfo->psFWIfOSConfig;
+
+	if (NULL == psOSConfig)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: OS Config is not mapped into CPU space", __func__));
+		return PVRSRV_ERROR_INVALID_CPU_ADDR;
+	}
+
+	/* apply change and ensure the new data is written to memory
+	 * before requesting the FW to read it
+	 */
+	ui32Config = ui32Config & RGXFWIF_INICFG_ALL;
+	if (bSetNotClear)
+	{
+		psOSConfig->ui32ConfigFlags |= ui32Config;
+	}
+	else
+	{
+		psOSConfig->ui32ConfigFlags &= ~ui32Config;
+	}
+
+	/* return current/new value to caller */
+	if (pui32ConfigState)
+	{
+		*pui32ConfigState = psOSConfig->ui32ConfigFlags;
+	}
+
+	OSMemoryBarrier();
+
+	eError = PVRSRVPowerLock(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire power lock (%u)", __func__, eError));
+		goto error_lock;
+	}
+
+	/* notify FW to update setting */
+	eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+	if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF))
+	{
+		/* Ask the FW to update its cached version of the value */
+		sStateFlagCmd.eCmdType = RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL;
+
+		eError = RGXSendCommand(psDevInfo,
+				RGXFWIF_DM_GP,
+				&sStateFlagCmd,
+				PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: RGXSendCommand failed. Error:%u", __func__, eError));
+			goto error_cmd;
+		}
+		else
+		{
+			/* Give up the power lock as its acquired in RGXWaitForFWOp */
+			PVRSRVPowerUnlock(psDeviceNode);
+
+			/* Wait for the value to be updated as the FW validates
+			 * the parameters and modifies the ui32ConfigFlags
+			 * accordingly
+			 * (for completeness as registered callbacks should also
+			 *  not permit invalid transitions)
+			 */
+			eError = RGXWaitForFWOp(psDevInfo, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"%s: Waiting for value aborted with error (%u)", __func__, eError));
+			}
+			goto error_lock;
+		}
+	}
+
+	error_cmd:
+	PVRSRVPowerUnlock(psDeviceNode);
+	error_lock:
+	return eError;
+}
+
+static
+PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO	*psDevInfo,
+		RGXFWIF_DM			eDM,
+		RGXFWIF_KCCB_CMD		*psKCCBCmd,
+		RGXFWIF_CLEANUP_TYPE	eCleanupType,
+		PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+		IMG_UINT32				ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+
+	psKCCBCmd->eCmdType = RGXFWIF_KCCB_CMD_CLEANUP;
+
+	psKCCBCmd->uCmdData.sCleanupData.eCleanupType = eCleanupType;
+	eError = SyncPrimGetFirmwareAddr(psSyncPrim, &psKCCBCmd->uCmdData.sCleanupData.sSyncObjDevVAddr.ui32Addr);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_command;
+	}
+
+	eError = SyncPrimSet(psSyncPrim, 0);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_command;
+	}
+
+	/*
+		Send the cleanup request to the firmware. If the resource is still busy
+		the firmware will tell us and we'll drop out with a retry.
+	 */
+	eError = RGXScheduleCommand(psDevInfo,
+			eDM,
+			psKCCBCmd,
+			0,
+			ui32PDumpFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_command;
+	}
+
+	/* Wait for sync primitive to be updated */
+#if defined(PDUMP)
+	PDUMPCOMMENT("Wait for the firmware to reply to the cleanup command");
+	SyncPrimPDumpPol(psSyncPrim,
+			RGXFWIF_CLEANUP_RUN,
+			RGXFWIF_CLEANUP_RUN,
+			PDUMP_POLL_OPERATOR_EQUAL,
+			ui32PDumpFlags);
+
+	/*
+	 * The cleanup request to the firmware will tell us if a given resource is busy or not.
+	 * If the RGXFWIF_CLEANUP_BUSY flag is set, this means that the resource is still in use.
+	 * In this case we return a PVRSRV_ERROR_RETRY error to the client drivers and they will
+	 * re-issue the cleanup request until it succeed.
+	 *
+	 * Since this retry mechanism doesn't work for pdumps, client drivers should ensure
+	 * that cleanup requests are only submitted if the resource is unused.
+	 * If this is not the case, the following poll will block infinitely, making sure
+	 * the issue doesn't go unnoticed.
+	 */
+	PDUMPCOMMENT("Cleanup: If this poll fails, the following resource is still in use (DM=%u, type=%u, address=0x%08x), which is incorrect in pdumps",
+			eDM,
+			psKCCBCmd->uCmdData.sCleanupData.eCleanupType,
+			psKCCBCmd->uCmdData.sCleanupData.uCleanupData.psContext.ui32Addr);
+	SyncPrimPDumpPol(psSyncPrim,
+			0,
+			RGXFWIF_CLEANUP_BUSY,
+			PDUMP_POLL_OPERATOR_EQUAL,
+			ui32PDumpFlags);
+#endif
+
+	{
+		RGXFWIF_CCB_CTL  *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+		IMG_UINT32       ui32CurrentQueueLength = (psKCCBCtl->ui32WrapMask+1 +
+				psKCCBCtl->ui32WriteOffset -
+				psKCCBCtl->ui32ReadOffset) & psKCCBCtl->ui32WrapMask;
+		IMG_UINT32       ui32MaxRetries;
+
+		ui32CurrentQueueLength += psDevInfo->ui32KCCBDeferredCommandsCount;
+		for (ui32MaxRetries = ui32CurrentQueueLength + 1;
+				ui32MaxRetries > 0;
+				ui32MaxRetries--)
+		{
+			eError = PVRSRVWaitForValueKMAndHoldBridgeLockKM(psSyncPrim->pui32LinAddr, RGXFWIF_CLEANUP_RUN, RGXFWIF_CLEANUP_RUN);
+
+			if (eError != PVRSRV_ERROR_TIMEOUT)
+			{
+				break;
+			}
+
+			if (PVRSRVPowerLock(psDevInfo->psDeviceNode) != PVRSRV_OK)
+			{
+				/* RGXSendCommandsFromDeferredList should be called while holding PowerLock */
+				PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire PowerLock (device: %p)",
+					__func__, psDevInfo->psDeviceNode));
+				continue;
+			}
+
+			RGXSendCommandsFromDeferredList(psDevInfo, IMG_FALSE);
+
+			PVRSRVPowerUnlock(psDevInfo->psDeviceNode);
+		}
+
+		/*
+			If the firmware hasn't got back to us in a timely manner
+			then bail and let the caller retry the command.
+		 */
+		if (eError == PVRSRV_ERROR_TIMEOUT)
+		{
+			PVR_DPF((PVR_DBG_WARNING,"RGXScheduleCleanupCommand: PVRSRVWaitForValueKMAndHoldBridgeLock timed out. Dump debug information."));
+
+			eError = PVRSRV_ERROR_RETRY;
+#if defined(DEBUG)
+			PVRSRVDebugRequest(psDevInfo->psDeviceNode,
+					DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+#endif
+			goto fail_poll;
+		}
+		else if (eError != PVRSRV_OK)
+		{
+			goto fail_poll;
+		}
+	}
+
+	/*
+		If the command has was run but a resource was busy, then the request
+		will need to be retried.
+	 */
+	if (OSReadDeviceMem32(psSyncPrim->pui32LinAddr) & RGXFWIF_CLEANUP_BUSY)
+	{
+		eError = PVRSRV_ERROR_RETRY;
+		goto fail_requestbusy;
+	}
+
+	return PVRSRV_OK;
+
+	fail_requestbusy:
+	fail_poll:
+	fail_command:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+/*
+	RGXRequestCommonContextCleanUp
+ */
+PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+		RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+		PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+		RGXFWIF_DM eDM,
+		IMG_UINT32 ui32PDumpFlags)
+{
+	RGXFWIF_KCCB_CMD			sRCCleanUpCmd = {0};
+	PVRSRV_ERROR				eError;
+	PRGXFWIF_FWCOMMONCONTEXT	psFWCommonContextFWAddr;
+	PVRSRV_RGXDEV_INFO			*psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice;
+
+	/* Force retry if this context's CCB is currently being dumped
+	 * as part of the stalled CCB debug */
+	if (psDevInfo->pvEarliestStalledClientCCB == (void*)psServerCommonContext->psClientCCB)
+	{
+		PVR_DPF((PVR_DBG_WARNING,"%s: Forcing retry as psDevInfo->pvEarliestStalledClientCCB = psServerCommonContext->psClientCCB <%p>",
+				 __func__, (void*)psServerCommonContext->psClientCCB));
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	psFWCommonContextFWAddr = FWCommonContextGetFWAddress(psServerCommonContext);
+#if defined(PDUMP)
+	PDUMPCOMMENT("Common ctx cleanup Request DM%d [context = 0x%08x]",
+			eDM, psFWCommonContextFWAddr.ui32Addr);
+	PDUMPCOMMENT("Wait for CCB to be empty before common ctx cleanup");
+
+	RGXCCBPDumpDrainCCB(FWCommonContextGetClientCCB(psServerCommonContext), ui32PDumpFlags);
+#endif
+
+	/* Setup our command data, the cleanup call will fill in the rest */
+	sRCCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psContext = psFWCommonContextFWAddr;
+
+	/* Request cleanup of the firmware resource */
+	eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice,
+			eDM,
+			&sRCCleanUpCmd,
+			RGXFWIF_CLEANUP_FWCOMMONCONTEXT,
+			psSyncPrim,
+			ui32PDumpFlags);
+
+	if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXRequestCommonContextCleanUp: Failed to schedule a memory context cleanup with error (%u)", eError));
+	}
+
+	return eError;
+}
+
+/*
+ * RGXFWRequestHWRTDataCleanUp
+ */
+
+PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+		PRGXFWIF_HWRTDATA psHWRTData,
+		PVRSRV_CLIENT_SYNC_PRIM *psSync,
+		RGXFWIF_DM eDM)
+{
+	RGXFWIF_KCCB_CMD			sHWRTDataCleanUpCmd = {0};
+	PVRSRV_ERROR				eError;
+
+	PDUMPCOMMENT("HW RTData cleanup Request DM%d [HWRTData = 0x%08x]", eDM, psHWRTData.ui32Addr);
+
+	sHWRTDataCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psHWRTData = psHWRTData;
+
+	eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice,
+			eDM,
+			&sHWRTDataCleanUpCmd,
+			RGXFWIF_CLEANUP_HWRTDATA,
+			psSync,
+			PDUMP_FLAGS_NONE);
+
+	if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestHWRTDataCleanUp: Failed to schedule a HWRTData cleanup with error (%u)", eError));
+	}
+
+	return eError;
+}
+
+/*
+	RGXFWRequestFreeListCleanUp
+ */
+PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+		PRGXFWIF_FREELIST psFWFreeList,
+		PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	RGXFWIF_KCCB_CMD			sFLCleanUpCmd = {0};
+	PVRSRV_ERROR				eError;
+
+	PDUMPCOMMENT("Free list cleanup Request [FreeList = 0x%08x]", psFWFreeList.ui32Addr);
+
+	/* Setup our command data, the cleanup call will fill in the rest */
+	sFLCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psFreelist = psFWFreeList;
+
+	/* Request cleanup of the firmware resource */
+	eError = RGXScheduleCleanupCommand(psDevInfo,
+			RGXFWIF_DM_GP,
+			&sFLCleanUpCmd,
+			RGXFWIF_CLEANUP_FREELIST,
+			psSync,
+			PDUMP_FLAGS_NONE);
+
+	if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestFreeListCleanUp: Failed to schedule a memory context cleanup with error (%u)", eError));
+	}
+
+	return eError;
+}
+
+/*
+	RGXFWRequestZSBufferCleanUp
+ */
+PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+		PRGXFWIF_ZSBUFFER psFWZSBuffer,
+		PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	RGXFWIF_KCCB_CMD			sZSBufferCleanUpCmd = {0};
+	PVRSRV_ERROR				eError;
+
+	PDUMPCOMMENT("ZS Buffer cleanup Request [ZS Buffer = 0x%08x]", psFWZSBuffer.ui32Addr);
+
+	/* Setup our command data, the cleanup call will fill in the rest */
+	sZSBufferCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psZSBuffer = psFWZSBuffer;
+
+	/* Request cleanup of the firmware resource */
+	eError = RGXScheduleCleanupCommand(psDevInfo,
+			RGXFWIF_DM_3D,
+			&sZSBufferCleanUpCmd,
+			RGXFWIF_CLEANUP_ZSBUFFER,
+			psSync,
+			PDUMP_FLAGS_NONE);
+
+	if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestZSBufferCleanUp: Failed to schedule a memory context cleanup with error (%u)", eError));
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo,
+		IMG_UINT32 ui32HCSDeadlineMs)
+{
+	PVRSRV_ERROR eError;
+	RGXFWIF_KCCB_CMD	sSetHCSDeadline;
+
+	sSetHCSDeadline.eCmdType                            = RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE;
+	sSetHCSDeadline.eDM                                 = RGXFWIF_DM_GP;
+	sSetHCSDeadline.uCmdData.sHCSCtrl.ui32HCSDeadlineMS = ui32HCSDeadlineMs;
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psDevInfo,
+				RGXFWIF_DM_GP,
+				&sSetHCSDeadline,
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	return eError;
+}
+
+PVRSRV_ERROR RGXFWHealthCheckCmd(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGXFWIF_KCCB_CMD	sCmpKCCBCmd;
+
+	sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK;
+
+	return	RGXScheduleCommand(psDevInfo,
+							   RGXFWIF_DM_GP,
+							   &sCmpKCCBCmd,
+							   0,
+							   PDUMP_FLAGS_CONTINUOUS);
+}
+
+PVRSRV_ERROR RGXFWSetOSIsolationThreshold(PVRSRV_RGXDEV_INFO *psDevInfo,
+		IMG_UINT32 ui32IsolationPriorityThreshold)
+{
+	PVRSRV_ERROR eError;
+	RGXFWIF_KCCB_CMD	sOSidIsoConfCmd;
+
+	sOSidIsoConfCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ISOLATION_GROUP_CHANGE;
+	sOSidIsoConfCmd.uCmdData.sCmdOSidIsolationData.ui32IsolationPriorityThreshold = ui32IsolationPriorityThreshold;
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psDevInfo,
+				RGXFWIF_DM_GP,
+				&sOSidIsoConfCmd,
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	return eError;
+}
+
+PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32OSid,
+							   RGXFWIF_OS_STATE_CHANGE eOSOnlineState)
+{
+	PVRSRV_ERROR         eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD     sOSOnlineStateCmd;
+	RGXFWIF_TRACEBUF    *psRGXFWIfTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	sOSOnlineStateCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE;
+	sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.ui32OSid = ui32OSid;
+	sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = eOSOnlineState;
+
+	if (eOSOnlineState == RGXFWIF_OS_ONLINE)
+	{
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError = RGXScheduleCommand(psDevInfo,
+					RGXFWIF_DM_GP,
+					&sOSOnlineStateCmd,
+					0,
+					PDUMP_FLAGS_CONTINUOUS);
+			if (eError != PVRSRV_ERROR_RETRY) break;
+
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+	}
+	else if (psRGXFWIfTraceBuf)
+	{
+		volatile RGXFWIF_PER_OS_STATES *psPerOsState;
+
+		psPerOsState = (volatile RGXFWIF_PER_OS_STATES*) &psRGXFWIfTraceBuf->sPerOsStateMirror[ui32OSid];
+		/* Attempt several times until the FW manages to offload the OS */
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			/* Send request */
+			eError = RGXScheduleCommand(psDevInfo,
+					RGXFWIF_DM_GP,
+					&sOSOnlineStateCmd,
+					0,
+					PDUMP_FLAGS_CONTINUOUS);
+			if (unlikely(eError == PVRSRV_ERROR_RETRY)) continue;
+			PVR_LOGG_IF_ERROR(eError, "RGXScheduleCommand", return_);
+
+			/* Wait for FW to process the cmd */
+			eError = RGXWaitForFWOp(psDevInfo,
+					RGXFWIF_DM_GP,
+					psDevInfo->psDeviceNode->psSyncPrim,
+					PDUMP_FLAGS_CONTINUOUS);
+			PVR_LOGG_IF_ERROR(eError, "RGXWaitForFWOp", return_);
+
+			/* read the OS state */
+			OSMemoryBarrier();
+			/* check if FW finished offloading the OSID and is stopped */
+			if (psPerOsState->bfOsState == RGXFW_OS_STATE_STOPPED)
+			{
+				eError = PVRSRV_OK;
+				break;
+			}
+			else
+			{
+				eError = PVRSRV_ERROR_TIMEOUT;
+			}
+
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+	}
+	else
+	{
+		eError = PVRSRV_ERROR_NOT_INITIALISED;
+	}
+
+	return_ :
+	return eError;
+}
+
+PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo,
+		IMG_UINT32 ui32OSid,
+		IMG_UINT32 ui32Priority)
+{
+	PVRSRV_ERROR eError;
+	RGXFWIF_KCCB_CMD	sOSidPriorityCmd;
+
+	sOSidPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE;
+	sOSidPriorityCmd.uCmdData.sCmdOSidPriorityData.ui32OSidNum = ui32OSid;
+	sOSidPriorityCmd.uCmdData.sCmdOSidPriorityData.ui32Priority = ui32Priority;
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psDevInfo,
+				RGXFWIF_DM_GP,
+				&sOSidPriorityCmd,
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	return eError;
+}
+
+PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext,
+		CONNECTION_DATA *psConnection,
+		PVRSRV_RGXDEV_INFO *psDevInfo,
+		IMG_UINT32 ui32Priority,
+		RGXFWIF_DM eDM)
+{
+	IMG_UINT32				ui32CmdSize;
+	IMG_UINT8				*pui8CmdPtr;
+	RGXFWIF_KCCB_CMD		sPriorityCmd;
+	RGXFWIF_CCB_CMD_HEADER	*psCmdHeader;
+	RGXFWIF_CMD_PRIORITY	*psCmd;
+	PVRSRV_ERROR			eError;
+
+	/*
+		Get space for command
+	 */
+	ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_CMD_PRIORITY));
+
+	eError = RGXAcquireCCB(FWCommonContextGetClientCCB(psContext),
+			ui32CmdSize,
+			(void **) &pui8CmdPtr,
+			PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire space for client CCB", __func__));
+		}
+		goto fail_ccbacquire;
+	}
+
+	/*
+		Write the command header and command
+	 */
+	psCmdHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+	psCmdHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PRIORITY;
+	psCmdHeader->ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CMD_PRIORITY));
+	pui8CmdPtr += sizeof(*psCmdHeader);
+
+	psCmd = (RGXFWIF_CMD_PRIORITY *) pui8CmdPtr;
+	psCmd->ui32Priority = ui32Priority;
+	pui8CmdPtr += sizeof(*psCmd);
+
+	/*
+		We should reserved space in the kernel CCB here and fill in the command
+		directly.
+		This is so if there isn't space in the kernel CCB we can return with
+		retry back to services client before we take any operations
+	 */
+
+	/*
+		Submit the command
+	 */
+	RGXReleaseCCB(FWCommonContextGetClientCCB(psContext),
+			ui32CmdSize,
+			PDUMP_FLAGS_CONTINUOUS);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release space in client CCB", __func__));
+		return eError;
+	}
+
+	/* Construct the priority command. */
+	sPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+	sPriorityCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psContext);
+	sPriorityCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psContext));
+	sPriorityCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+	sPriorityCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psDevInfo,
+				eDM,
+				&sPriorityCmd,
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to submit set priority command with error (%u)",
+				__func__,
+				eError));
+	}
+
+	return PVRSRV_OK;
+
+	fail_ccbacquire:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*
+	RGXReadMETAAddr
+ */
+PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO	*psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 *pui32Value)
+{
+	IMG_UINT8 __iomem  *pui8RegBase = psDevInfo->pvRegsBaseKM;
+	IMG_UINT32 ui32Value;
+
+	/* Wait for Slave Port to be Ready */
+	if (PVRSRVPollForValueKM(
+			(IMG_UINT32 __iomem *) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1),
+			RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+			RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN) != PVRSRV_OK)
+	{
+		return PVRSRV_ERROR_TIMEOUT;
+	}
+
+	/* Issue the Read */
+	OSWriteHWReg32(
+			psDevInfo->pvRegsBaseKM,
+			RGX_CR_META_SP_MSLVCTRL0,
+			ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN);
+
+	/* Wait for Slave Port to be Ready: read complete */
+	if (PVRSRVPollForValueKM(
+			(IMG_UINT32 __iomem *) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1),
+			RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+			RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN) != PVRSRV_OK)
+	{
+		return PVRSRV_ERROR_TIMEOUT;
+	}
+
+	/* Read the value */
+	ui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAX);
+
+	*pui32Value = ui32Value;
+
+	return PVRSRV_OK;
+}
+
+/*
+	RGXWriteMETAAddr
+ */
+PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 ui32Value)
+{
+	IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM;
+
+	/* Wait for Slave Port to be Ready */
+	if (PVRSRVPollForValueKM((IMG_UINT32 __iomem *)
+			(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1),
+			RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+			RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN) != PVRSRV_OK)
+	{
+		return PVRSRV_ERROR_TIMEOUT;
+	}
+
+	/* Issue the Write */
+	OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0, ui32METAAddr);
+	OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT, ui32Value);
+
+	return PVRSRV_OK;
+}
+
+void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious)
+{
+	/* Attempt to detect and deal with any stalled client contexts.
+	 * bIgnorePrevious may be set by the caller if they know a context to be
+	 * stalled, as otherwise this function will only identify stalled
+	 * contexts which have not been previously reported.
+	 */
+
+	IMG_UINT32 ui32StalledClientMask = 0;
+
+	if (!(OSTryLockAcquire(psDevInfo->hCCBStallCheckLock)))
+	{
+		PVR_LOG(("RGXCheckForStalledClientContexts: Failed to acquire hCCBStallCheckLock, returning..."));
+		return;
+	}
+
+	ui32StalledClientMask |= CheckForStalledClientTransferCtxt(psDevInfo);
+
+	ui32StalledClientMask |= CheckForStalledClientRenderCtxt(psDevInfo);
+
+	ui32StalledClientMask |= CheckForStalledClientKickSyncCtxt(psDevInfo);
+
+	if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK)
+	{
+		ui32StalledClientMask |= CheckForStalledClientComputeCtxt(psDevInfo);
+	}
+
+	/* If at least one DM stalled bit is different than before */
+	if (bIgnorePrevious || (psDevInfo->ui32StalledClientMask != ui32StalledClientMask))//(psDevInfo->ui32StalledClientMask ^ ui32StalledClientMask))
+	{
+		if (ui32StalledClientMask > 0)
+		{
+			static __maybe_unused const char *pszStalledAction =
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+					"force";
+#else
+					"warn";
+#endif
+			/* Print all the stalled DMs */
+			PVR_LOG(("Possible stalled client RGX contexts detected: %s%s%s%s%s%s%s%s%s",
+					 RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_GP),
+					 RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TDM_2D),
+					 RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TA),
+					 RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_3D),
+					 RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_CDM),
+					 RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_RTU),
+					 RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_SHG),
+					 RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ2D),
+					 RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ3D)));
+
+			PVR_LOG(("Trying to identify stalled context...(%s) [%d]",
+			         pszStalledAction, bIgnorePrevious));
+
+			DumpStalledContextInfo(psDevInfo);
+		}
+		else
+		{
+			if (psDevInfo->ui32StalledClientMask> 0)
+			{
+				/* Indicate there are no stalled DMs */
+				PVR_LOG(("No further stalled client contexts exist"));
+			}
+		}
+		psDevInfo->ui32StalledClientMask = ui32StalledClientMask;
+		psDevInfo->pvEarliestStalledClientCCB = NULL;
+	}
+	OSLockRelease(psDevInfo->hCCBStallCheckLock);
+}
+
+/*
+	RGXUpdateHealthStatus
+ */
+PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode,
+		IMG_BOOL bCheckAfterTimePassed)
+{
+	PVRSRV_DATA*                 psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_HEALTH_STATUS  eNewStatus   = PVRSRV_DEVICE_HEALTH_STATUS_OK;
+	PVRSRV_DEVICE_HEALTH_REASON  eNewReason   = PVRSRV_DEVICE_HEALTH_REASON_NONE;
+	PVRSRV_RGXDEV_INFO*  psDevInfo;
+	RGXFWIF_TRACEBUF*  psRGXFWIfTraceBufCtl;
+	RGXFWIF_CCB_CTL *psKCCBCtl;
+	IMG_UINT32  ui32ThreadCount;
+	IMG_BOOL  bKCCBCmdsWaiting;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	PVR_ASSERT(psDevNode != NULL);
+	psDevInfo = psDevNode->pvDevice;
+	psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+	/* If the firmware is not initialised, there is not much point continuing! */
+	if (!psDevInfo->bFirmwareInitialised || psDevInfo->pvRegsBaseKM == NULL ||
+			psDevInfo->psDeviceNode == NULL)
+	{
+		return PVRSRV_OK;
+	}
+
+	/* If this is a quick update, then include the last current value... */
+	if (!bCheckAfterTimePassed)
+	{
+		eNewStatus = OSAtomicRead(&psDevNode->eHealthStatus);
+		eNewReason = OSAtomicRead(&psDevNode->eHealthReason);
+	}
+
+	/* Decrement the SLR holdoff counter (if non-zero) */
+	if (psDevInfo->ui32SLRHoldoffCounter > 0)
+	{
+		psDevInfo->ui32SLRHoldoffCounter--;
+	}
+
+	/* If Rogue is not powered on, just skip ahead and check for stalled client CCBs */
+	if (PVRSRVIsDevicePowered(psDevNode))
+	{
+		if (psRGXFWIfTraceBufCtl != NULL)
+		{
+			/*
+			   Firmware thread checks...
+			 */
+			for (ui32ThreadCount = 0;  ui32ThreadCount < RGXFW_THREAD_NUM;  ui32ThreadCount++)
+			{
+				IMG_CHAR* pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szInfo;
+
+				/*
+				Check if the FW has hit an assert...
+				 */
+				if (*pszTraceAssertInfo != '\0')
+				{
+					PVR_DPF((PVR_DBG_WARNING, "%s: Firmware thread %d has asserted: %s (%s:%d)",
+							__func__, ui32ThreadCount, pszTraceAssertInfo,
+							psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szPath,
+							psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.ui32LineNum));
+					eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD;
+					eNewReason = PVRSRV_DEVICE_HEALTH_REASON_ASSERTED;
+					goto _RGXUpdateHealthStatus_Exit;
+				}
+			}
+
+			/*
+			Check if the FW has faulted...
+			 */
+			if (psRGXFWIfTraceBufCtl->ui32HWRStateFlags & RGXFWIF_HWR_FW_FAULT)
+			{
+				PVR_DPF((PVR_DBG_WARNING,
+						"%s: Firmware has faulted and needs to restart",
+						__func__));
+				eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_FAULT;
+				if (psRGXFWIfTraceBufCtl->ui32HWRStateFlags & RGXFWIF_HWR_RESTART_REQUESTED)
+				{
+					eNewReason = PVRSRV_DEVICE_HEALTH_REASON_RESTARTING;
+				}
+				else
+				{
+					eNewReason = PVRSRV_DEVICE_HEALTH_REASON_IDLING;
+				}
+				goto _RGXUpdateHealthStatus_Exit;
+			}
+		}
+
+		/*
+		   Event Object Timeouts check...
+		 */
+		if (!bCheckAfterTimePassed)
+		{
+			if (psDevInfo->ui32GEOTimeoutsLastTime > 1 && psPVRSRVData->ui32GEOConsecutiveTimeouts > psDevInfo->ui32GEOTimeoutsLastTime)
+			{
+				PVR_DPF((PVR_DBG_WARNING, "%s: Global Event Object Timeouts have risen (from %d to %d)",
+						__func__,
+						psDevInfo->ui32GEOTimeoutsLastTime, psPVRSRVData->ui32GEOConsecutiveTimeouts));
+				eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+				eNewReason = PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS;
+			}
+			psDevInfo->ui32GEOTimeoutsLastTime = psPVRSRVData->ui32GEOConsecutiveTimeouts;
+		}
+
+		/*
+		   Check the Kernel CCB pointer is valid. If any commands were waiting last time, then check
+		   that some have executed since then.
+		 */
+		bKCCBCmdsWaiting = IMG_FALSE;
+		psKCCBCtl = psDevInfo->psKernelCCBCtl;
+
+		if (psKCCBCtl != NULL)
+		{
+			if (psKCCBCtl->ui32ReadOffset > psKCCBCtl->ui32WrapMask  ||
+					psKCCBCtl->ui32WriteOffset > psKCCBCtl->ui32WrapMask)
+			{
+				PVR_DPF((PVR_DBG_WARNING, "%s: KCCB has invalid offset (ROFF=%d WOFF=%d)",
+						__func__, psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset));
+				eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD;
+				eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT;
+			}
+
+			if (psKCCBCtl->ui32ReadOffset != psKCCBCtl->ui32WriteOffset)
+			{
+				bKCCBCmdsWaiting = IMG_TRUE;
+			}
+		}
+
+		if (bCheckAfterTimePassed && psDevInfo->psRGXFWIfTraceBuf != NULL)
+		{
+			IMG_UINT32 ui32KCCBCmdsExecuted = psDevInfo->psRGXFWIfTraceBuf->ui32KCCBCmdsExecuted;
+
+			if (psDevInfo->ui32KCCBCmdsExecutedLastTime == ui32KCCBCmdsExecuted)
+			{
+				/*
+				   If something was waiting last time then the Firmware has stopped processing commands.
+				 */
+				if (psDevInfo->bKCCBCmdsWaitingLastTime)
+				{
+					PVR_DPF((PVR_DBG_WARNING, "%s: No KCCB commands executed since check!",
+							__func__));
+					eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+					eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED;
+				}
+
+				/*
+				   If no commands are currently pending and nothing happened since the last poll, then
+				   schedule a dummy command to ping the firmware so we know it is alive and processing.
+				 */
+				if (!bKCCBCmdsWaiting)
+				{
+					/* Protect the PDumpLoadMem. RGXScheduleCommand() cannot take the
+					 * PMR lock itself, because some bridge functions will take the PMR lock
+					 * before calling RGXScheduleCommand
+					 */
+					PVRSRV_ERROR eError = RGXFWHealthCheckCmd(psDevNode->pvDevice);
+
+					if (eError != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_WARNING, "%s: Cannot schedule Health Check command! (0x%x)",
+								__func__, eError));
+					}
+					else
+					{
+						bKCCBCmdsWaiting = IMG_TRUE;
+					}
+				}
+			}
+
+			psDevInfo->bKCCBCmdsWaitingLastTime     = bKCCBCmdsWaiting;
+			psDevInfo->ui32KCCBCmdsExecutedLastTime = ui32KCCBCmdsExecuted;
+		}
+	}
+
+	if (bCheckAfterTimePassed && (PVRSRV_DEVICE_HEALTH_STATUS_OK==eNewStatus))
+	{
+		RGXCheckForStalledClientContexts(psDevInfo, IMG_FALSE);
+	}
+
+	/*
+	   Finished, save the new status...
+	 */
+	_RGXUpdateHealthStatus_Exit:
+	OSAtomicWrite(&psDevNode->eHealthStatus, eNewStatus);
+	OSAtomicWrite(&psDevNode->eHealthReason, eNewReason);
+	RGXSRV_HWPERF_DEVICE_INFO(psDevInfo, RGX_HWPERF_DEV_INFO_EV_HEALTH, eNewStatus, eNewReason);
+
+	/*
+	 * Attempt to service the HWPerf buffer to regularly transport idle/periodic
+	 * packets to host buffer.
+	 */
+	if (psDevNode->pfnServiceHWPerf != NULL)
+	{
+		PVRSRV_ERROR eError = psDevNode->pfnServiceHWPerf(psDevNode);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "%s: "
+					"Error occurred when servicing HWPerf buffer (%d)",
+					__func__, eError));
+		}
+	}
+
+	/* Attempt to refresh timer correlation data */
+	RGXTimeCorrRestartPeriodic(psDevNode);
+
+	return PVRSRV_OK;
+} /* RGXUpdateHealthStatus */
+
+PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM)
+{
+	if (psCurrentServerCommonContext == NULL)
+	{
+		/* the context has already been freed so there is nothing to do here */
+		return PVRSRV_OK;
+	}
+
+	return CheckForStalledCCB(psCurrentServerCommonContext->psDevInfo->psDeviceNode,
+	                          psCurrentServerCommonContext->psClientCCB,
+	                          eKickTypeDM);
+}
+
+void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext,
+                             DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                             void *pvDumpDebugFile,
+                             IMG_UINT32 ui32VerbLevel)
+{
+	if (psCurrentServerCommonContext == NULL)
+	{
+		/* the context has already been freed so there is nothing to do here */
+		return;
+	}
+
+	if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH))
+	{
+		/* If high verbosity requested, dump whole CCB */
+		DumpCCB(psCurrentServerCommonContext->psDevInfo,
+		        psCurrentServerCommonContext->sFWCommonContextFWAddr,
+		        psCurrentServerCommonContext->psClientCCB,
+		        pfnDumpDebugPrintf,
+		        pvDumpDebugFile);
+	}
+	else
+	{
+		/* Otherwise, only dump first stalled command in the CCB */
+		DumpStalledCCBCommand(psCurrentServerCommonContext->sFWCommonContextFWAddr,
+		                      psCurrentServerCommonContext->psClientCCB,
+		                      pfnDumpDebugPrintf,
+		                      pvDumpDebugFile);
+	}
+}
+
+void AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl,
+		IMG_UINT32 *pui32NumCleanupCtl,
+		RGXFWIF_DM eDM,
+		IMG_BOOL bKick,
+		RGX_RTDATA_CLEANUP_DATA        *psRTDataCleanup,
+		RGX_ZSBUFFER_DATA              *psZBuffer,
+		RGX_ZSBUFFER_DATA              *psSBuffer,
+		RGX_ZSBUFFER_DATA              *psMSAAScratchBuffer)
+{
+	PRGXFWIF_CLEANUP_CTL *psCleanupCtlWrite = apsCleanupCtl;
+
+	PVR_ASSERT((eDM == RGXFWIF_DM_TA) || (eDM == RGXFWIF_DM_3D));
+
+	if (bKick)
+	{
+		if (eDM == RGXFWIF_DM_TA)
+		{
+			if (psRTDataCleanup)
+			{
+				PRGXFWIF_CLEANUP_CTL psCleanupCtl;
+
+				RGXSetFirmwareAddress(&psCleanupCtl, psRTDataCleanup->psFWHWRTDataMemDesc,
+						offsetof(RGXFWIF_HWRTDATA, sTACleanupState),
+						RFW_FWADDR_NOREF_FLAG);
+
+				*(psCleanupCtlWrite++) = psCleanupCtl;
+			}
+		}
+		else
+		{
+			RGXFWIF_PRBUFFER_TYPE eBufferType;
+			RGX_ZSBUFFER_DATA *psBuffer = NULL;
+
+			if (psRTDataCleanup)
+			{
+				PRGXFWIF_CLEANUP_CTL psCleanupCtl;
+
+				RGXSetFirmwareAddress(&psCleanupCtl, psRTDataCleanup->psFWHWRTDataMemDesc,
+						offsetof(RGXFWIF_HWRTDATA, s3DCleanupState),
+						RFW_FWADDR_NOREF_FLAG);
+
+				*(psCleanupCtlWrite++) = psCleanupCtl;
+			}
+
+			for (eBufferType = RGXFWIF_PRBUFFER_START; eBufferType < RGXFWIF_PRBUFFER_MAXSUPPORTED; eBufferType++)
+			{
+				switch (eBufferType)
+				{
+				case RGXFWIF_PRBUFFER_ZBUFFER:
+					psBuffer = psZBuffer;
+					break;
+				case RGXFWIF_PRBUFFER_SBUFFER:
+					psBuffer = psSBuffer;
+					break;
+				case RGXFWIF_PRBUFFER_MSAABUFFER:
+					psBuffer = psMSAAScratchBuffer;
+					break;
+				case RGXFWIF_PRBUFFER_MAXSUPPORTED:
+					psBuffer = NULL;
+					break;
+				}
+				if (psBuffer)
+				{
+					(psCleanupCtlWrite++)->ui32Addr = psBuffer->sZSBufferFWDevVAddr.ui32Addr +
+							offsetof(RGXFWIF_PRBUFFER, sCleanupState);
+					psBuffer = NULL;
+				}
+			}
+		}
+	}
+
+	*pui32NumCleanupCtl = psCleanupCtlWrite - apsCleanupCtl;
+
+	PVR_ASSERT(*pui32NumCleanupCtl <= RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS);
+}
+
+PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo;
+	RGXFWIF_HWRINFOBUF	*psHWRInfoBuf;
+	RGXFWIF_TRACEBUF	*psRGXFWIfTraceBufCtl;
+	IMG_UINT32			i;
+
+	if (psDevNode->pvDevice == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_DEVINFO;
+	}
+	psDevInfo = psDevNode->pvDevice;
+
+	psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBuf;
+	psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+	for (i = 0 ; i < RGXFWIF_DM_MAX ; i++)
+	{
+		/* Reset the HWR numbers */
+		psRGXFWIfTraceBufCtl->aui32HwrDmLockedUpCount[i] = 0;
+		psRGXFWIfTraceBufCtl->aui32HwrDmFalseDetectCount[i] = 0;
+		psRGXFWIfTraceBufCtl->aui32HwrDmRecoveredCount[i] = 0;
+		psRGXFWIfTraceBufCtl->aui32HwrDmOverranCount[i] = 0;
+	}
+
+	for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++)
+	{
+		psHWRInfoBuf->sHWRInfo[i].ui32HWRNumber = 0;
+	}
+
+	for (i = 0 ; i < RGXFW_THREAD_NUM ; i++)
+	{
+		psHWRInfoBuf->ui32FirstCrPollAddr[i] = 0;
+		psHWRInfoBuf->ui32FirstCrPollMask[i] = 0;
+		psHWRInfoBuf->ui32FirstCrPollLastValue[i] = 0;
+	}
+
+	psHWRInfoBuf->ui32WriteIndex = 0;
+	psHWRInfoBuf->ui32DDReqCount = 0;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR,
+		IMG_DEV_PHYADDR *psPhyAddr,
+		IMG_UINT32 ui32LogicalOffset,
+		IMG_UINT32 ui32Log2PageSize,
+		IMG_UINT32 ui32NumOfPages,
+		IMG_BOOL *bValid)
+{
+
+	PVRSRV_ERROR eError;
+
+	eError = PMRLockSysPhysAddresses(psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: PMRLockSysPhysAddresses failed (%u)",
+				__func__,
+				eError));
+		return eError;
+	}
+
+	eError = PMR_DevPhysAddr(psPMR,
+			ui32Log2PageSize,
+			ui32NumOfPages,
+			ui32LogicalOffset,
+			psPhyAddr,
+			bValid);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: PMR_DevPhysAddr failed (%u)",
+				__func__,
+				eError));
+		return eError;
+	}
+
+
+	eError = PMRUnlockSysPhysAddresses(psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: PMRUnLockSysPhysAddresses failed (%u)",
+				__func__,
+				eError));
+		return eError;
+	}
+
+	return eError;
+}
+
+#if defined(PDUMP)
+PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32WriteOffset)
+{
+	RGXFWIF_CCB_CTL	*psKCCBCtl = psDevInfo->psKernelCCBCtl;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (psDevInfo->bDumpedKCCBCtlAlready)
+	{
+		/* exiting capture range or pdump block */
+		psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE;
+
+		/* make sure previous cmd is drained in pdump in case we will 'jump' over some future cmds */
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER,
+				"kCCB(%p): Draining rgxfw_roff (0x%x) == woff (0x%x)",
+				psKCCBCtl,
+				ui32WriteOffset,
+				ui32WriteOffset);
+		eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc,
+				offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset),
+				ui32WriteOffset,
+				0xffffffff,
+				PDUMP_POLL_OPERATOR_EQUAL,
+				PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXPdumpDrainKCCB: problem pdumping POL for kCCBCtl (%d)", eError));
+		}
+	}
+
+	return eError;
+
+}
+#endif
+
+/*!
+ *******************************************************************************
+
+ @Function	RGXClientConnectCompatCheck_ClientAgainstFW
+
+ @Description
+
+ Check compatibility of client and firmware (build options)
+ at the connection time.
+
+ @Input psDeviceNode - device node
+ @Input ui32ClientBuildOptions - build options for the client
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32ClientBuildOptions)
+{
+	PVRSRV_ERROR		eError;
+#if !defined(NO_HARDWARE) || defined(PDUMP)
+#if !defined(NO_HARDWARE)
+	RGXFWIF_INIT	*psRGXFWInit = NULL;
+	IMG_UINT32		ui32BuildOptionsMismatch;
+	IMG_UINT32		ui32BuildOptionsFW;
+#endif
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+#endif
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+#if !defined(NO_HARDWARE)
+	if (psDevInfo == NULL || psDevInfo->psRGXFWIfInitMemDesc == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Cannot acquire kernel fw compatibility check info, RGXFWIF_INIT structure not allocated.",
+				__func__));
+		return PVRSRV_ERROR_NOT_INITIALISED;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+			(void **)&psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire kernel fw compatibility check info (%u)",
+				__func__, eError));
+		return eError;
+	}
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		if (*((volatile IMG_BOOL *)&psRGXFWInit->sRGXCompChecks.bUpdated))
+		{
+			/* No need to wait if the FW has already updated the values */
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+#endif
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Compatibility check: client and FW build options");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+			offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+			offsetof(RGXFWIF_COMPCHECKS, ui32BuildOptions),
+			ui32ClientBuildOptions,
+			0xffffffff,
+			PDUMP_POLL_OPERATOR_EQUAL,
+			PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: problem pdumping POL for psRGXFWIfInitMemDesc (%d)",
+				__func__,
+				eError));
+		return eError;
+	}
+#endif
+
+#if !defined(NO_HARDWARE)
+	ui32BuildOptionsFW = psRGXFWInit->sRGXCompChecks.ui32BuildOptions;
+	ui32BuildOptionsMismatch = ui32ClientBuildOptions ^ ui32BuildOptionsFW;
+
+	if (ui32BuildOptionsMismatch != 0)
+	{
+		if ( (ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; "
+					"extra options present in client: (0x%x). Please check rgx_options.h",
+					ui32ClientBuildOptions & ui32BuildOptionsMismatch ));
+		}
+
+		if ( (ui32BuildOptionsFW & ui32BuildOptionsMismatch) != 0)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; "
+					"extra options present in Firmware: (0x%x). Please check rgx_options.h",
+					ui32BuildOptionsFW & ui32BuildOptionsMismatch ));
+		}
+		eError = PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+		goto chk_exit;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware and client build options match. [ OK ]"));
+	}
+#endif
+
+	eError = PVRSRV_OK;
+#if !defined(NO_HARDWARE)
+	chk_exit:
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+#endif
+
+	return eError;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function	RGXVzRegisterFirmwarePhysHeap
+
+ @Description Register firmware heap for the specified guest OSID
+
+ @Input psDeviceNode - device node
+ @Input ui32OSID     - Guest OSID
+ @Input sDevPAddr    - Heap address
+ @Input ui64DevPSize - Heap size
+
+ @Return   PVRSRV_ERROR - PVRSRV_OK if heap setup was successful.
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXVzRegisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+		IMG_UINT32 ui32OSID,
+		IMG_DEV_PHYADDR sDevPAddr,
+		IMG_UINT64 ui64DevPSize)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_HOST, PVRSRV_OK);
+
+	if (!ui32OSID ||
+		!ui64DevPSize ||
+		!sDevPAddr.uiAddr ||
+		ui32OSID >= RGXFW_NUM_OS ||
+		ui64DevPSize != RGX_FIRMWARE_RAW_HEAP_SIZE)
+	{
+		/* Guest OSID(s) range [1 up to (RGXFW_NUM_OS-1)] */
+		PVR_DPF((PVR_DBG_ERROR, "Invalid guest %d fw physheap spec", ui32OSID));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Registration creates internal RA to managed the guest(s) firmware heap */
+	eError = PVRSRVVzRegisterFirmwarePhysHeap (psDeviceNode, sDevPAddr, ui64DevPSize, ui32OSID);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Registering guest %d fw physheap failed", ui32OSID));
+		return eError;
+	}
+
+	/* Map guest DMA fw physheap into the fw kernel memory context */
+	eError = RGXVzDevMemAllocateGuestFwHeap(psDeviceNode, ui32OSID);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Mapping guest %d fw physheap failed", ui32OSID));
+		return eError;
+	}
+
+	return eError;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function	RGXVzUnregisterFirmwarePhysHeap
+
+ @Description Unregister firmware heap for the specified guest OSID
+
+ @Input psDeviceNode - device node
+ @Input ui32OSID     - Guest OSID
+
+ @Return   PVRSRV_ERROR - PVRSRV_OK if heap setup was successful.
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXVzUnregisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+		IMG_UINT32 ui32OSID)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_HOST, PVRSRV_OK);
+
+	if (!ui32OSID || ui32OSID >= RGXFW_NUM_OS)
+	{
+		/* Guest OSID(s) range [1 up to (RGXFW_NUM_OS-1)] */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Free guest fw physheap from fw kernel memory context */
+	RGXVzDevMemFreeGuestFwHeap(psDeviceNode, ui32OSID);
+
+	/* Unregistration deletes state required to maintain heap */
+	eError = PVRSRVVzUnregisterFirmwarePhysHeap (psDeviceNode, ui32OSID);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Registering guest %d fw physheap failed", ui32OSID));
+		return eError;
+	}
+
+	return eError;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function	RGXVzCreateFWKernelMemoryContext
+
+ @Description Setup additional firmware state specific to VZ
+
+ @Input psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR - PVRSRV_OK if successful.
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXVzCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		eError = SysVzRegisterFwPhysHeap(psDeviceNode->psDevConfig);
+	}
+#if (RGXFW_GUEST_OSID_START < RGXFW_NUM_OS)
+	else
+	{
+		PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+		IMG_CHAR szHeapName[32];
+		IMG_UINT32 ui32OSID;
+
+		/* Initialise each guest OSID firmware physheap heaps, note that the guest
+		   OSID(s) range is [RGXFW_GUEST_OSID_START up to (RGXFW_NUM_OS-1)] */
+		for (ui32OSID = RGXFW_GUEST_OSID_START; ui32OSID < RGXFW_NUM_OS; ui32OSID++)
+		{
+			OSSNPrintf(szHeapName, sizeof(szHeapName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID);
+
+			eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, szHeapName,
+					&psDevInfo->psGuestFirmwareRawHeap[ui32OSID]);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "DevmemFindHeapByName() for guest %d failed", ui32OSID));
+			}
+		}
+	}
+#endif
+
+	return eError;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function	RGXVzDestroyFWKernelMemoryContext
+
+ @Description Destroy additional firmware state specific to VZ
+
+ @Input psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR - PVRSRV_OK if successful.
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXVzDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		return SysVzUnregisterFwPhysHeap(psDeviceNode->psDevConfig);
+	}
+	return PVRSRV_OK;
+}
+
+
+IMG_UINT32 RGXGetFwMainHeapSize(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	if (psDevInfo == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid device info", __func__));
+		return 0;
+	}
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+	{
+		return RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE;
+	}
+	else
+	{
+		return RGX_FIRMWARE_META_MAIN_HEAP_SIZE;
+	}
+}
+
+/******************************************************************************
+ End of file (rgxfwutils.c)
+ ******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwutils.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwutils.h
new file mode 100644
index 0000000..c3c2f47
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxfwutils.h
@@ -0,0 +1,1323 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX firmware utility routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX firmware utility routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXFWUTILS_H
+#define RGXFWUTILS_H
+
+#include "log2.h"
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "devicemem.h"
+#include "device.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "connection_server.h"
+#include "rgxta3d.h"
+#include "devicemem_utils.h"
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+#include "physmem_tdfwcode.h"
+#include "physmem_tdsecbuf.h"
+#endif
+
+#if defined(SUPPORT_DEDICATED_FW_MEMORY)
+#include "physmem_fwdedicatedmem.h"
+#endif
+
+
+/*
+ * Firmware-only allocation (which are initialised by the host) must be aligned to the SLC cache line size.
+ * This is because firmware-only allocations are GPU_CACHE_INCOHERENT and this causes problems
+ * if two allocations share the same cache line; e.g. the initialisation of the second allocation won't
+ * make it into the SLC cache because it has been already loaded when accessing the content of the first allocation.
+ */
+static INLINE PVRSRV_ERROR DevmemFwAllocate(PVRSRV_RGXDEV_INFO *psDevInfo,
+											IMG_DEVMEM_SIZE_T uiSize,
+											DEVMEM_FLAGS_T uiFlags,
+											const IMG_CHAR *pszText,
+											DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+	PVRSRV_ERROR eError;
+	DEVMEM_HEAP *psFwHeap;
+
+	PVR_DPF_ENTERED;
+
+	/* Enforce the standard pre-fix naming scheme callers must follow */
+	PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w'));
+
+	psFwHeap = (PVRSRV_CHECK_FW_CONFIG(uiFlags)) ? (psDevInfo->psFirmwareConfigHeap) : (psDevInfo->psFirmwareMainHeap);
+
+	eError = DevmemAllocate(psFwHeap,
+				uiSize,
+				GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)),
+				uiFlags | PVRSRV_MEMALLOCFLAG_FW_LOCAL,
+				pszText,
+				ppsMemDescPtr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF_RETURN_RC(eError);
+	}
+
+	/*
+		We need to map it so the heap for this allocation
+		is set
+	*/
+	eError = DevmemMapToDevice(*ppsMemDescPtr,
+				   psFwHeap,
+				   &sTmpDevVAddr);
+	if (eError != PVRSRV_OK)
+	{
+		DevmemFree(*ppsMemDescPtr);
+		PVR_DPF_RETURN_RC(eError);
+	}
+
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+static INLINE PVRSRV_ERROR DevmemFwAllocateExportable(PVRSRV_DEVICE_NODE *psDeviceNode,
+													  IMG_DEVMEM_SIZE_T uiSize,
+													  IMG_DEVMEM_ALIGN_T uiAlign,
+													  DEVMEM_FLAGS_T uiFlags,
+													  const IMG_CHAR *pszText,
+													  DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	/* Enforce the standard pre-fix naming scheme callers must follow */
+	PVR_ASSERT((pszText != NULL) &&
+			(pszText[0] == 'F') && (pszText[1] == 'w') &&
+			(pszText[2] == 'E') && (pszText[3] == 'x'));
+
+	eError = DevmemAllocateExportable(psDeviceNode,
+									  uiSize,
+									  uiAlign,
+									  RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) ?
+										ExactLog2(uiAlign) :
+										DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap),
+									  uiFlags | PVRSRV_MEMALLOCFLAG_FW_LOCAL,
+									  pszText,
+									  ppsMemDescPtr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "FW DevmemAllocateExportable failed (%u)", eError));
+		PVR_DPF_RETURN_RC(eError);
+	}
+
+	/*
+		We need to map it so the heap for this allocation
+		is set
+	*/
+	eError = DevmemMapToDevice(*ppsMemDescPtr,
+							   psDevInfo->psFirmwareMainHeap,
+							   &sTmpDevVAddr);
+	if (eError != PVRSRV_OK)
+	{
+		DevmemFree(*ppsMemDescPtr);
+		PVR_DPF((PVR_DBG_ERROR, "FW DevmemMapToDevice failed (%u)", eError));
+	}
+
+	PVR_DPF_RETURN_RC1(eError, *ppsMemDescPtr);
+}
+
+static INLINE PVRSRV_ERROR DevmemFwAllocateSparse(PVRSRV_RGXDEV_INFO *psDevInfo,
+												IMG_DEVMEM_SIZE_T uiSize,
+												IMG_DEVMEM_SIZE_T uiChunkSize,
+												IMG_UINT32 ui32NumPhysChunks,
+												IMG_UINT32 ui32NumVirtChunks,
+												IMG_UINT32 *pui32MappingTable,
+												DEVMEM_FLAGS_T uiFlags,
+												const IMG_CHAR *pszText,
+												DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+	PVRSRV_ERROR eError;
+	DEVMEM_HEAP *psFwHeap;
+	IMG_UINT32 ui32Align;
+
+	PVR_DPF_ENTERED;
+
+	/* Enforce the standard pre-fix naming scheme callers must follow */
+	PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w'));
+
+	psFwHeap = (PVRSRV_CHECK_FW_CONFIG(uiFlags)) ? (psDevInfo->psFirmwareConfigHeap) : (psDevInfo->psFirmwareMainHeap);
+	ui32Align = GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS));
+
+	eError = DevmemAllocateSparse(psDevInfo->psDeviceNode,
+								uiSize,
+								uiChunkSize,
+								ui32NumPhysChunks,
+								ui32NumVirtChunks,
+								pui32MappingTable,
+								ui32Align,
+								DevmemGetHeapLog2PageSize(psFwHeap),
+								uiFlags | PVRSRV_MEMALLOCFLAG_FW_LOCAL | PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING,
+								pszText,
+								ppsMemDescPtr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF_RETURN_RC(eError);
+	}
+	/*
+		We need to map it so the heap for this allocation
+		is set
+	*/
+	eError = DevmemMapToDevice(*ppsMemDescPtr,
+				   psFwHeap,
+				   &sTmpDevVAddr);
+	if (eError != PVRSRV_OK)
+	{
+		DevmemFree(*ppsMemDescPtr);
+		PVR_DPF_RETURN_RC(eError);
+	}
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+static void DevmemFWPoison(DEVMEM_MEMDESC *psMemDesc, IMG_BYTE ubPoisonValue)
+{
+	void *pvLinAddr;
+	PVRSRV_ERROR eError;
+
+	eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvLinAddr);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "%s: Failed to acquire FW allocation mapping to poison: %s",
+		        __func__,
+		        PVRSRVGETERRORSTRING(eError)));
+		return;
+	}
+
+	OSDeviceMemSet(pvLinAddr, ubPoisonValue, psMemDesc->uiAllocSize);
+
+	DevmemReleaseCpuVirtAddr(psMemDesc);
+}
+
+static INLINE void DevmemFwFree(PVRSRV_RGXDEV_INFO *psDevInfo,
+								DEVMEM_MEMDESC *psMemDesc)
+{
+	PVR_DPF_ENTERED1(psMemDesc);
+
+	if (psDevInfo->bEnableFWPoisonOnFree)
+	{
+		DevmemFWPoison(psMemDesc, psDevInfo->ubFWPoisonOnFreeValue);
+	}
+
+	DevmemReleaseDevVirtAddr(psMemDesc);
+	DevmemFree(psMemDesc);
+
+	PVR_DPF_RETURN;
+}
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+static INLINE
+PVRSRV_ERROR DevmemImportTDFWCode(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  IMG_DEVMEM_SIZE_T uiSize,
+                                  PMR_LOG2ALIGN_T uiLog2Align,
+                                  IMG_UINT32 uiMemAllocFlags,
+                                  PVRSRV_TD_FW_MEM_REGION eRegion,
+                                  DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+	PMR *psTDFWCodePMR;
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+	IMG_DEVMEM_SIZE_T uiMemDescSize;
+	IMG_DEVMEM_ALIGN_T uiAlign = 1 << uiLog2Align;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(ppsMemDescPtr);
+
+	eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap),
+	                                             &uiSize,
+	                                             &uiAlign);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DevmemExportalignAdjustSizeAndAlign failed (%u)", eError));
+		goto PMRCreateError;
+	}
+
+	eError = PhysmemNewTDFWCodePMR(psDeviceNode,
+	                               uiSize,
+	                               uiLog2Align,
+	                               uiMemAllocFlags,
+	                               eRegion,
+	                               &psTDFWCodePMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDFWCodePMR failed (%u)", eError));
+		goto PMRCreateError;
+	}
+
+	/* NB: TDFWCodePMR refcount: 1 -> 2 */
+	eError = DevmemLocalImport(psDeviceNode,
+	                           psTDFWCodePMR,
+	                           uiMemAllocFlags,
+	                           ppsMemDescPtr,
+	                           &uiMemDescSize,
+	                           "TDFWCode");
+	if (eError != PVRSRV_OK)
+	{
+		goto ImportError;
+	}
+
+	eError = DevmemMapToDevice(*ppsMemDescPtr,
+	                           psDevInfo->psFirmwareMainHeap,
+	                           &sTmpDevVAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to map TD META code PMR (%u)", eError));
+		goto MapError;
+	}
+
+	/* NB: TDFWCodePMR refcount: 2 -> 1
+	 * The PMR will be unreferenced again (and destroyed) when
+	 * the memdesc tracking it is cleaned up
+	 */
+	PMRUnrefPMR(psTDFWCodePMR);
+
+	return PVRSRV_OK;
+
+MapError:
+	DevmemFree(*ppsMemDescPtr);
+	*ppsMemDescPtr = NULL;
+ImportError:
+	/* Unref and destroy the PMR */
+	PMRUnrefPMR(psTDFWCodePMR);
+PMRCreateError:
+
+	return eError;
+}
+
+static INLINE
+PVRSRV_ERROR DevmemImportTDSecureBuf(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     IMG_DEVMEM_SIZE_T uiSize,
+                                     PMR_LOG2ALIGN_T uiLog2Align,
+                                     IMG_UINT32 uiMemAllocFlags,
+                                     DEVMEM_MEMDESC **ppsMemDescPtr,
+                                     IMG_UINT64 *pui64SecBufHandle)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+	PMR *psTDSecureBufPMR;
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+	IMG_DEVMEM_SIZE_T uiMemDescSize;
+	IMG_DEVMEM_ALIGN_T uiAlign = 1 << uiLog2Align;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(ppsMemDescPtr);
+
+	eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap),
+	                                             &uiSize,
+	                                             &uiAlign);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DevmemExportalignAdjustSizeAndAlign failed (%u)", eError));
+		goto PMRCreateError;
+	}
+
+	eError = PhysmemNewTDSecureBufPMR(NULL,
+	                                  psDeviceNode,
+	                                  uiSize,
+	                                  uiLog2Align,
+	                                  uiMemAllocFlags,
+	                                  &psTDSecureBufPMR,
+	                                  pui64SecBufHandle);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR failed (%u)", eError));
+		goto PMRCreateError;
+	}
+
+	/* NB: psTDSecureBufPMR refcount: 1 -> 2 */
+	eError = DevmemLocalImport(psDeviceNode,
+	                           psTDSecureBufPMR,
+	                           uiMemAllocFlags,
+	                           ppsMemDescPtr,
+	                           &uiMemDescSize,
+	                           "TDSecureBuffer");
+	if (eError != PVRSRV_OK)
+	{
+		goto ImportError;
+	}
+
+	eError = DevmemMapToDevice(*ppsMemDescPtr,
+	                           psDevInfo->psFirmwareMainHeap,
+	                           &sTmpDevVAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to map TD secure buffer PMR (%u)", eError));
+		goto MapError;
+	}
+
+	/* NB: psTDSecureBufPMR refcount: 2 -> 1
+	 * The PMR will be unreferenced again (and destroyed) when
+	 * the memdesc tracking it is cleaned up
+	 */
+	PMRUnrefPMR(psTDSecureBufPMR);
+
+	return PVRSRV_OK;
+
+MapError:
+	DevmemFree(*ppsMemDescPtr);
+	*ppsMemDescPtr = NULL;
+ImportError:
+	/* Unref and destroy the PMR */
+	PMRUnrefPMR(psTDSecureBufPMR);
+PMRCreateError:
+
+	return eError;
+}
+#endif
+
+
+#if defined(SUPPORT_DEDICATED_FW_MEMORY)
+static INLINE
+PVRSRV_ERROR DevmemAllocateDedicatedFWMem(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                          IMG_DEVMEM_SIZE_T uiSize,
+                                          PMR_LOG2ALIGN_T uiLog2Align,
+                                          IMG_UINT32 uiMemAllocFlags,
+                                          const IMG_CHAR *pszText,
+                                          DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+	PMR *psPMR;
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+	IMG_DEVMEM_SIZE_T uiMemDescSize;
+	IMG_DEVMEM_ALIGN_T uiAlign = 1 << uiLog2Align;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(ppsMemDescPtr);
+
+	eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap),
+	                                             &uiSize,
+	                                             &uiAlign);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DevmemExportalignAdjustSizeAndAlign failed (%u)", eError));
+		goto PMRCreateError;
+	}
+
+	eError = PhysmemNewFWDedicatedMemPMR(psDeviceNode,
+	                                     uiSize,
+	                                     uiLog2Align,
+	                                     uiMemAllocFlags,
+	                                     &psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PhysmemNewFWDedicatedMemPMR failed (%u)", eError));
+		goto PMRCreateError;
+	}
+
+	/* NB: FWDedicatedMemPMR refcount: 1 -> 2 */
+	eError = DevmemLocalImport(psDeviceNode,
+	                           psPMR,
+	                           uiMemAllocFlags,
+	                           ppsMemDescPtr,
+	                           &uiMemDescSize,
+	                           pszText);
+	if (eError != PVRSRV_OK)
+	{
+		goto ImportError;
+	}
+
+	eError = DevmemMapToDevice(*ppsMemDescPtr,
+	                           psDevInfo->psFirmwareMainHeap,
+	                           &sTmpDevVAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to map dedicated FW memory (%u)", eError));
+		goto MapError;
+	}
+
+	/* NB: FWDedicatedMemPMR refcount: 2 -> 1
+	 * The PMR will be unreferenced again (and destroyed) when
+	 * the memdesc tracking it is cleaned up
+	 */
+	PMRUnrefPMR(psPMR);
+
+	return PVRSRV_OK;
+
+MapError:
+	DevmemFree(*ppsMemDescPtr);
+	*ppsMemDescPtr = NULL;
+ImportError:
+	/* Unref and destroy the PMR */
+	PMRUnrefPMR(psPMR);
+PMRCreateError:
+
+	return eError;
+}
+#endif
+
+
+/*
+ * This function returns the value of the hardware register RGX_CR_TIMER
+ * which is a timer counting in ticks.
+ */
+
+static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+    IMG_UINT64 ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER);
+
+    /*
+     *  In order to avoid having to issue three 32-bit reads to detect the
+     *  lower 32-bits wrapping, the MSB of the low 32-bit word is duplicated
+     *  in the MSB of the high 32-bit word. If the wrap happens, we just read
+     *  the register again (it will not wrap again so soon).
+     */
+    if ((ui64Time ^ (ui64Time << 32)) & ~RGX_CR_TIMER_BIT31_CLRMSK)
+    {
+        ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER);
+    }
+
+    return (ui64Time & ~RGX_CR_TIMER_VALUE_CLRMSK) >> RGX_CR_TIMER_VALUE_SHIFT;
+}
+
+/*
+ * This FW Common Context is only mapped into kernel for initialisation and cleanup purposes.
+ * Otherwise this allocation is only used by the FW.
+ * Therefore the GPU cache doesn't need coherency,
+ * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first kick)
+ */
+#define RGX_FWCOMCTX_ALLOCFLAGS	(PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \
+								 PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)| \
+								 PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+								 PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+								 PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | \
+								 PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+								 PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+								 PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | \
+								 PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+								 PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+
+/******************************************************************************
+ * RGXSetFirmwareAddress Flags
+ *****************************************************************************/
+#define RFW_FWADDR_FLAG_NONE		(0)			/*!< Void flag */
+#define RFW_FWADDR_NOREF_FLAG		(1U << 0)	/*!< It is safe to immediately release the reference to the pointer,
+												  otherwise RGXUnsetFirmwareAddress() must be call when finished. */
+
+IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo);
+PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo);
+PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE       *psDeviceNode,
+                              IMG_BOOL                 bEnableSignatureChecks,
+                              IMG_UINT32               ui32SignatureChecksBufSize,
+                              IMG_UINT32               ui32HWPerfFWBufSizeKB,
+                              IMG_UINT64               ui64HWPerfFilter,
+                              IMG_UINT32               ui32RGXFWAlignChecksArrLength,
+                              IMG_UINT32               *pui32RGXFWAlignChecks,
+                              IMG_UINT32               ui32ConfigFlags,
+                              IMG_UINT32               ui32ConfigFlagsExt,
+                              IMG_UINT32               ui32LogType,
+                              RGXFWIF_BIFTILINGMODE    eBifTilingMode,
+                              IMG_UINT32               ui32NumTilingCfgs,
+                              IMG_UINT32               *pui32BIFTilingXStrides,
+                              IMG_UINT32               ui32FilterFlags,
+                              IMG_UINT32               ui32JonesDisableMask,
+                              IMG_UINT32               ui32HWRDebugDumpLimit,
+                              IMG_UINT32               ui32HWPerfCountersDataSize,
+                              IMG_UINT32               *pui32TPUTrilinearFracMask,
+                              RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+                              FW_PERF_CONF             eFirmwarePerf);
+
+
+
+void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*************************************************************************/ /*!
+@Function       RGXSetFirmwareAddress
+
+@Description    Sets a pointer in a firmware data structure.
+
+@Input          ppDest		 Address of the pointer to set
+@Input          psSrc		 MemDesc describing the pointer
+@Input          ui32Flags	 Any combination of  RFW_FWADDR_*_FLAG
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR	*ppDest,
+								   DEVMEM_MEMDESC		*psSrc,
+								   IMG_UINT32			uiOffset,
+								   IMG_UINT32			ui32Flags);
+
+
+/*************************************************************************/ /*!
+@Function       RGXSetMetaDMAAddress
+
+@Description    Fills a Firmware structure used to setup the Meta DMA with two
+                pointers to the same data, one on 40 bit and one on 32 bit
+                (pointer in the FW memory space).
+
+@Input          ppDest			Address of the structure to set
+@Input          psSrcMemDesc	MemDesc describing the pointer
+@Input			psSrcFWDevVAddr Firmware memory space pointer
+
+@Return			void
+*/ /**************************************************************************/
+void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR		*psDest,
+						  DEVMEM_MEMDESC		*psSrcMemDesc,
+						  RGXFWIF_DEV_VIRTADDR	*psSrcFWDevVAddr,
+						  IMG_UINT32			uiOffset);
+
+
+/*************************************************************************/ /*!
+@Function       RGXUnsetFirmwareAddress
+
+@Description    Unsets a pointer in a firmware data structure
+
+@Input          psSrc		 MemDesc describing the pointer
+
+@Return			void
+*/ /**************************************************************************/
+void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc);
+
+/*************************************************************************/ /*!
+@Function       FWCommonContextAllocate
+
+@Description    Allocate a FW common context. This allocates the HW memory
+                for the context, the CCB and wires it all together.
+
+@Input          psConnection            Connection this context is being created on
+@Input          psDeviceNode		    Device node to create the FW context on
+                                        (must be RGX device node)
+@Input          eRGXCCBRequestor        RGX_CCB_REQUESTOR_TYPE enum constant which
+                                        which represents the requestor of this FWCC
+@Input          eDM                     Data Master type
+@Input          psAllocatedMemDesc      Pointer to pre-allocated MemDesc to use
+                                        as the FW context or NULL if this function
+                                        should allocate it
+@Input          ui32AllocatedOffset     Offset into pre-allocate MemDesc to use
+                                        as the FW context. If psAllocatedMemDesc
+                                        is NULL then this parameter is ignored
+@Input          psFWMemContextMemDesc   MemDesc of the FW memory context this
+                                        common context resides on
+@Input          psContextStateMemDesc   FW context state (context switch) MemDesc
+@Input          ui32CCBAllocSizeLog2    Size of the CCB for this context
+@Input          ui32CCBMaxAllocSizeLog2 Maximum size to which CCB can grow for this context
+@Input          ui32Priority            Priority of the context
+@Input          psInfo                  Structure that contains extra info
+                                        required for the creation of the context
+                                        (elements might change from core to core)
+@Return			PVRSRV_OK if the context was successfully created
+*/ /**************************************************************************/
+PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection,
+									 PVRSRV_DEVICE_NODE *psDeviceNode,
+									 RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor,
+									 RGXFWIF_DM eDM,
+									 DEVMEM_MEMDESC *psAllocatedMemDesc,
+									 IMG_UINT32 ui32AllocatedOffset,
+									 DEVMEM_MEMDESC *psFWMemContextMemDesc,
+									 DEVMEM_MEMDESC *psContextStateMemDesc,
+									 IMG_UINT32 ui32CCBAllocSizeLog2,
+									 IMG_UINT32 ui32CCBMaxAllocSizeLog2,
+									 IMG_UINT32 ui32Priority,
+									 RGX_COMMON_CONTEXT_INFO *psInfo,
+									 RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext);
+
+
+
+void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+RGXFWIF_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+                                                               IMG_UINT32 *pui32LastResetJobRef);
+
+PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+/*!
+******************************************************************************
+
+ @Function	RGXScheduleProcessQueuesKM
+
+ @Description - Software command complete handler
+				(sends uncounted kicks for all the DMs through the MISR)
+
+ @Input hCmdCompHandle - RGX device node
+
+******************************************************************************/
+void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle);
+
+/*!
+******************************************************************************
+
+ @Function	RGXInstallProcessQueuesMISR
+
+ @Description - Installs the MISR to handle Process Queues operations
+
+ @Input phMISR - Pointer to the MISR handler
+
+ @Input psDeviceNode - RGX Device node
+
+******************************************************************************/
+PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*************************************************************************/ /*!
+@Function       RGXSendCommandWithPowLock
+
+@Description    Sends a command to a particular DM without honouring
+                pending cache operations but taking the power lock.
+
+@Input          psDevInfo			Device Info
+@Input          eDM				To which DM the cmd is sent.
+@Input          psKCCBCmd			The cmd to send.
+@Input          ui32PDumpFlags			Pdump flags
+
+@Return			PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSendCommandWithPowLock(PVRSRV_RGXDEV_INFO	*psDevInfo,
+										RGXFWIF_DM			eKCCBType,
+										RGXFWIF_KCCB_CMD	*psKCCBCmd,
+										IMG_UINT32			ui32PDumpFlags);
+
+PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bPoll);
+/*************************************************************************/ /*!
+@Function       RGXSendCommand
+
+@Description    Sends a command to a particular DM without honouring
+                pending cache operations or the power lock.
+                The function flushes any deferred KCCB commands first.
+
+@Input          psDevInfo			Device Info
+@Input          eDM				To which DM the cmd is sent.
+@Input          psKCCBCmd			The cmd to send.
+@Input          uiPdumpFlags			PDump flags.
+
+@Return			PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSendCommand(PVRSRV_RGXDEV_INFO	*psDevInfo,
+								 RGXFWIF_DM		eKCCBType,
+								 RGXFWIF_KCCB_CMD	*psKCCBCmd,
+								 PDUMP_FLAGS_T		uiPdumpFlags);
+
+
+/*************************************************************************/ /*!
+@Function       RGXScheduleCommand
+
+@Description    Sends a command to a particular DM
+
+@Input          psDevInfo			Device Info
+@Input          eDM				To which DM the cmd is sent.
+@Input          psKCCBCmd			The cmd to send.
+@Input          ui32CacheOpFence		Pending cache op. fence value.
+@Input          ui32PDumpFlags			PDump flags
+
+@Return			PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXScheduleCommand(PVRSRV_RGXDEV_INFO	*psDevInfo,
+								RGXFWIF_DM		eKCCBType,
+								RGXFWIF_KCCB_CMD	*psKCCBCmd,
+								IMG_UINT32		ui32CacheOpFence,
+								IMG_UINT32		ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function       RGXScheduleCommandAndWait
+
+@Description    Schedules the command with RGXScheduleCommand and then waits
+				for the FW to update a sync. The sync must be piggy backed on
+				the cmd, either by passing a sync cmd or a cmd that contains the
+				sync which the FW will eventually update. The sync is created in
+				the function, therefore the function provides a FWAddr and
+				UpdateValue for that cmd.
+
+@Input          psDevInfo			Device Info
+@Input          eDM				To which DM the cmd is sent.
+@Input          psKCCBCmd			The cmd to send.
+@Input          ui32CmdSize			The cmd size.
+@Input          puiSyncObjFWAddr	Pointer to the location with the FWAddr of
+									the sync.
+@Input          puiUpdateValue		Pointer to the location with the update
+									value of the sync.
+@Input          ui32PDumpFlags		PDump flags
+
+@Return			PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXScheduleCommandAndWait(PVRSRV_RGXDEV_INFO	*psDevInfo,
+									   RGXFWIF_DM			eDM,
+									   RGXFWIF_KCCB_CMD		*psKCCBCmd,
+									   IMG_UINT32			ui32CmdSize,
+									   IMG_UINT32			*puiSyncObjDevVAddr,
+									   IMG_UINT32			*puiUpdateValue,
+									   PVRSRV_CLIENT_SYNC_PRIM	*psSyncPrim,
+									   IMG_UINT32			ui32PDumpFlags);
+
+PVRSRV_ERROR RGXFirmwareUnittests(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+
+/*! ***********************************************************************//**
+@brief          Copy framework command into FW addressable buffer
+
+@param          psFWFrameworkMemDesc
+@param          pbyGPUFRegisterList
+@param          ui32FrameworkRegisterSize
+
+@returns        PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(DEVMEM_MEMDESC	*psFWFrameworkMemDesc,
+										   IMG_PBYTE		pbyGPUFRegisterList,
+										   IMG_UINT32		ui32FrameworkRegisterSize);
+
+
+/*! ***********************************************************************//**
+@brief          Create FW addressable buffer for framework
+
+@param          psDeviceNode
+@param          ppsFWFrameworkMemDesc
+@param          ui32FrameworkRegisterSize
+
+@returns        PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE * psDeviceNode,
+										DEVMEM_MEMDESC     ** ppsFWFrameworkMemDesc,
+										IMG_UINT32         ui32FrameworkRegisterSize);
+
+/*************************************************************************/ /*!
+@Function       RGXWaitForFWOp
+
+@Description    Send a sync command and wait to be signalled.
+
+@Input          psDevInfo			Device Info
+@Input          eDM				To which DM the cmd is sent.
+@Input          ui32PDumpFlags			PDump flags
+
+@Return			void
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXWaitForFWOp(PVRSRV_RGXDEV_INFO	*psDevInfo,
+									RGXFWIF_DM	eDM,
+									PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+									IMG_UINT32	ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function       RGXPollForGPCommandCompletion
+
+@Description    Polls for completion of a submitted GP command. Poll is done
+                on a value matching a masked read from the address.
+
+@Input          psDevNode			Pointer to device node struct
+@Input          pui32LinMemAddr			CPU linear address to poll
+@Input          ui32Value			Required value
+@Input          ui32Mask			Mask
+
+@Return   PVRSRV_ERROR :
+*/ /**************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode,
+									volatile IMG_UINT32 __iomem *pui32LinMemAddr,
+									IMG_UINT32                   ui32Value,
+									IMG_UINT32                   ui32Mask);
+
+/*************************************************************************/ /*!
+@Function       RGXStateFlagCtrl
+
+@Description    Set and return FW internal state flags.
+
+@Input          psDevInfo       Device Info
+@Input          ui32Config      AppHint config flags
+@Output         pui32State      Current AppHint state flag configuration
+@Input          bSetNotClear    Set or clear the provided config flags
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo,
+				IMG_UINT32 ui32Config,
+				IMG_UINT32 *pui32State,
+				IMG_BOOL bSetNotClear);
+
+/*!
+******************************************************************************
+
+ @Function	RGXFWRequestCommonContextCleanUp
+
+ @Description Schedules a FW common context cleanup. The firmware will doesn't
+              block waiting for the resource to become idle but rather notifies
+              the host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psFWContext - firmware address of the context to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+ @Input ui32PDumpFlags - PDump continuous flag
+
+******************************************************************************/
+PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+											  RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+											  PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+											  RGXFWIF_DM eDM,
+											  IMG_UINT32 ui32PDumpFlags);
+
+/*!
+******************************************************************************
+
+ @Function	RGXFWRequestHWRTDataCleanUp
+
+ @Description Schedules a FW HWRTData memory cleanup. The firmware will doesn't
+              block waiting for the resource to become idle but rather notifies
+              the host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psHWRTData - firmware address of the HWRTData to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+										 PRGXFWIF_HWRTDATA psHWRTData,
+										 PVRSRV_CLIENT_SYNC_PRIM *psSync,
+										 RGXFWIF_DM eDM);
+
+/*!
+******************************************************************************
+
+ @Function	RGXFWRequestFreeListCleanUp
+
+ @Description Schedules a FW FreeList cleanup. The firmware will doesn't block
+              waiting for the resource to become idle but rather notifies the
+              host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psHWRTData - firmware address of the HWRTData to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDeviceNode,
+										 PRGXFWIF_FREELIST psFWFreeList,
+										 PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+/*!
+******************************************************************************
+
+ @Function	RGXFWRequestZSBufferCleanUp
+
+ @Description Schedules a FW ZS Buffer cleanup. The firmware will doesn't block
+              waiting for the resource to become idle but rather notifies the
+              host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psFWZSBuffer - firmware address of the ZS Buffer to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+ ******************************************************************************/
+
+PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+										 PRGXFWIF_ZSBUFFER psFWZSBuffer,
+										 PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext,
+								CONNECTION_DATA *psConnection,
+								PVRSRV_RGXDEV_INFO *psDevInfo,
+								IMG_UINT32 ui32Priority,
+								RGXFWIF_DM eDM);
+
+/*!
+******************************************************************************
+
+ @Function		RGXFWSetHCSDeadline
+
+ @Description	Requests the Firmware to set a new Hard Context
+				Switch timeout deadline. Context switches that
+				surpass that deadline cause the system to kill
+				the currently running workloads.
+
+ @Input psDeviceNode	pointer to device node
+
+ @Input ui32HCSDeadlineMs	The deadline in milliseconds.
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo,
+								IMG_UINT32 ui32HCSDeadlineMs);
+
+/*!
+******************************************************************************
+
+ @Function		RGXFWChangeOSidPriority
+
+ @Description	Requests the Firmware to change the priority of an
+				operating system. Higher priority number equals
+				higher priority on the scheduling system.
+
+ @Input psDevInfo - pointer to device info
+
+ @Input ui32OSid		The OSid whose priority is to be altered
+
+ @Input ui32Priority	The new priority number for the specified OSid
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo,
+									 IMG_UINT32 ui32OSid,
+									 IMG_UINT32 ui32Priority);
+
+/*!
+****************************************************************************
+
+ @Function		RGXFWSetOSIsolationThreshold
+
+ @Description	Requests the Firmware to change the priority
+				threshold of the OS Isolation group. Any OS with a
+				priority higher or equal than the threshold is
+				considered to be belonging to the isolation group.
+
+ @Input psDevInfo - pointer to device info
+
+ @Input ui32IsolationPriorityThreshold	The new priority threshold
+ ***************************************************************************/
+PVRSRV_ERROR RGXFWSetOSIsolationThreshold(PVRSRV_RGXDEV_INFO *psDevInfo,
+								IMG_UINT32 ui32IsolationPriorityThreshold);
+
+/*!
+****************************************************************************
+
+ @Function		RGXFWHealthCheckCmd
+
+ @Description	Ping the firmware to check if it is responsive.
+
+ @Input psDevInfo - pointer to device info
+ ***************************************************************************/
+PVRSRV_ERROR RGXFWHealthCheckCmd(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+****************************************************************************
+
+ @Function		RGXFWSetFwOsState
+
+ @Description	Requests the Firmware to change the guest OS Online
+				states. This should be initiated by the VMM when a
+				guest VM comes online or goes offline. If offline,
+				the FW offloads any current resource from that OSID.
+				The request is repeated until the FW has had time to
+				free all the resources or has waited for workloads
+				to finish.
+
+ @Input psDevInfo - pointer to device info
+
+ @Input ui32OSid		The Guest OSid whose state is being altered
+
+ @Input eOSOnlineState	The new state (Online or Offline)
+ ***************************************************************************/
+PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo,
+								IMG_UINT32 ui32OSid,
+								RGXFWIF_OS_STATE_CHANGE eOSOnlineState);
+
+/*!
+******************************************************************************
+
+ @Function	RGXReadMETAAddr
+
+ @Description Reads a value at given address in META memory space
+              (it can be either a memory location or a META register)
+
+ @Input psDevInfo - pointer to device info
+
+ @Input ui32METAAddr - address in META memory space
+
+ @Output pui32Value - value
+
+ ******************************************************************************/
+
+PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO	*psDevInfo,
+                             IMG_UINT32 ui32METAAddr,
+                             IMG_UINT32 *pui32Value);
+
+/*!
+******************************************************************************
+
+ @Function	RGXWriteMETAAddr
+
+ @Description Write a value to the given address in META memory space
+              (it can be either a memory location or a META register)
+
+ @Input psDevInfo - pointer to device info
+
+ @Input ui32METAAddr - address in META memory space
+
+ @Input ui32Value    - Value to write to address in META memory space
+
+ ******************************************************************************/
+
+PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo,
+                              IMG_UINT32 ui32METAAddr,
+                              IMG_UINT32 ui32Value);
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+/*!
+******************************************************************************
+
+ @Function	RGXCheckCheckpointCCB
+
+ @Description Processes all signalled checkpoints which are found in the
+              checkpoint CCB.
+
+ @Input psDevInfo - pointer to device node
+
+ ******************************************************************************/
+void RGXCheckCheckpointCCB(PVRSRV_DEVICE_NODE *psDevInfo);
+#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */
+
+/*!
+******************************************************************************
+
+ @Function	RGXCheckFirmwareCCB
+
+ @Description Processes all commands that are found in the Firmware CCB.
+
+ @Input psDevInfo - pointer to device
+
+ ******************************************************************************/
+void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+******************************************************************************
+
+ @Function	RGXCheckForStalledClientContexts
+
+ @Description Checks all client contexts, for the device with device info
+              provided, to see if any are waiting for a fence to signal and
+              optionally force signalling of the fence for the context which
+              has been waiting the longest.
+              This function is called by RGXUpdateHealthStatus() and also
+              may be invoked from other trigger points.
+
+ @Input psDevInfo - pointer to device info
+ @Input bIgnorePrevious - if IMG_TRUE, any stalled contexts will be indicated
+                          immediately, rather than only checking against any
+                          previous stalled contexts
+
+ ******************************************************************************/
+void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious);
+
+/*!
+******************************************************************************
+
+ @Function      RGXUpdateHealthStatus
+
+ @Description   Tests a number of conditions which might indicate a fatal error has
+                occurred in the firmware. The result is stored in the device node
+                eheathStatus.
+
+ @Input         psDevNode              Pointer to device node structure.
+ @Input         bCheckAfterTimePassed  When TRUE, the function will also test for
+                                      firmware queues and polls not changing
+                                      since the previous test.
+
+                                      Note: if not enough time has passed since
+                                      the last call, false positives may occur.
+
+ @returns       PVRSRV_ERROR
+ ******************************************************************************/
+PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode,
+                                   IMG_BOOL bCheckAfterTimePassed);
+
+
+PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM);
+
+void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext,
+                             DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                             void *pvDumpDebugFile,
+                             IMG_UINT32 ui32VerbLevel);
+
+/*!
+******************************************************************************
+
+ @Function      AttachKickResourcesCleanupCtls
+
+ @Description   Attaches the cleanup structures to a kick command so that
+                submission reference counting can be performed when the
+                firmware processes the command
+
+ @Output        apsCleanupCtl          Array of CleanupCtl structure pointers to populate.
+ @Output        pui32NumCleanupCtl     Number of CleanupCtl structure pointers written out.
+ @Input         eDM                    Which data master is the subject of the command.
+ @Input         bKick                  TRUE if the client originally wanted to kick this DM.
+ @Input         psRTDataCleanup        Optional RTData cleanup associated with the command.
+ @Input         psZBuffer              Optional ZBuffer associated with the command.
+ @Input         psSBuffer              Optional SBuffer associated with the command.
+ ******************************************************************************/
+void AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl,
+									IMG_UINT32 *pui32NumCleanupCtl,
+									RGXFWIF_DM eDM,
+									IMG_BOOL bKick,
+									RGX_RTDATA_CLEANUP_DATA        *psRTDataCleanup,
+									RGX_ZSBUFFER_DATA              *psZBuffer,
+									RGX_ZSBUFFER_DATA              *psSBuffer,
+									RGX_ZSBUFFER_DATA              *psMSAAScratchBuffer);
+
+/*!
+******************************************************************************
+
+ @Function		RGXResetHWRLogs
+
+ @Description	Resets the HWR Logs buffer (the hardware recovery count is not reset)
+
+ @Input			psDevInfo	Pointer to the device
+
+ @Return		PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+                               	error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode);
+
+
+/*!
+******************************************************************************
+
+ @Function		RGXGetPhyAddr
+
+ @Description	Get the physical address of a certain PMR at a certain offset within it
+
+ @Input			psPMR	    PMR of the allocation
+
+ @Input			ui32LogicalOffset	    Logical offset
+
+ @Output		psPhyAddr	    Physical address of the allocation
+
+ @Return		PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+								error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR,
+						   IMG_DEV_PHYADDR *psPhyAddr,
+						   IMG_UINT32 ui32LogicalOffset,
+						   IMG_UINT32 ui32Log2PageSize,
+						   IMG_UINT32 ui32NumOfPages,
+						   IMG_BOOL *bValid);
+
+#if defined(PDUMP)
+/*!
+******************************************************************************
+
+ @Function      RGXPdumpDrainKCCB
+
+ @Description   Wait for the firmware to execute all the commands in the kCCB
+
+ @Input         psDevInfo	Pointer to the device
+
+ @Input         ui32WriteOffset	  Woff we have to POL for the Roff to be equal to
+
+ @Return        PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+								error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+							   IMG_UINT32 ui32WriteOffset);
+#endif /* PDUMP */
+
+/*!
+******************************************************************************
+
+ @Function		RGXVzCreateFWKernelMemoryContext
+
+ @Description	Performs additional firmware memory context creation
+
+ @Return		PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+								error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXVzCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+******************************************************************************
+
+ @Function		RGXVzDestroyFWKernelMemoryContext
+
+ @Description	Performs additional firmware memory context destruction
+
+ @Return		PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+								error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXVzDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+******************************************************************************
+
+ @Function		RGXVzRegisterFirmwarePhysHeap
+
+ @Description	Register and maps to device, a guest firmware physheap
+
+ @Return		PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+								error code
+ *****************************************************************************/
+PVRSRV_ERROR RGXVzRegisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+										   IMG_UINT32 ui32OSID,
+										   IMG_DEV_PHYADDR sDevPAddr,
+										   IMG_UINT64 ui64DevPSize);
+
+/*!
+******************************************************************************
+
+ @Function		RGXVzUnregisterFirmwarePhysHeap
+
+ @Description	Unregister and unmap from device, a guest firmware physheap
+
+ @Return		PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+								error code
+ *****************************************************************************/
+PVRSRV_ERROR RGXVzUnregisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+											 IMG_UINT32 ui32OSID);
+
+
+/*!
+******************************************************************************
+
+ @Function      RGXGetFwMainHeapSize
+
+ @Description   Return size of the main FW heap in bytes
+
+ @Return        IMG_UINT32
+ *****************************************************************************/
+IMG_UINT32 RGXGetFwMainHeapSize(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* RGXFWUTILS_H */
+/******************************************************************************
+ End of file (rgxfwutils.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxheapconfig.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxheapconfig.h
new file mode 100644
index 0000000..c857198
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxheapconfig.h
@@ -0,0 +1,173 @@
+/*************************************************************************/ /*!
+@File
+@Title          device configuration
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Memory heaps device specific configuration
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXHEAPCONFIG_H
+#define RGXHEAPCONFIG_H
+
+#include "rgxdefs_km.h"
+
+/*
+	RGX Device Virtual Address Space Definitions
+	NOTES:
+		Base addresses have to be a multiple of 4MiB
+
+		RGX_PDSCODEDATA_HEAP_BASE and RGX_USCCODE_HEAP_BASE will be programmed,
+		on a global basis, into RGX_CR_PDS_EXEC_BASE and RGX_CR_USC_CODE_BASE_*
+		respectively. Therefore if clients use multiple configs they must still
+		be consistent with their definitions for these heaps.
+
+		Shared virtual memory (GENERAL_SVM) support requires half of the address
+		space be reserved for SVM allocations unless BRN fixes are required in
+		which case the SVM heap is disabled. This is reflected in the device
+		connection capability bits returned to userspace.
+
+		Variable page-size heap (GENERAL_NON4K) support reserves 64GiB from the
+		available 4K page-size heap (GENERAL) space. The actual heap page-size
+		defaults to 16K; AppHint PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE
+		can be used to forced it to these values: 4K,64K,256K,1M,2M.
+*/
+
+	/* Start at 4 MiB Size of 512 GiB less 4 MiB (managed by OS/Services) */
+	#define RGX_GENERAL_SVM_HEAP_BASE			IMG_UINT64_C(0x0000400000)
+	#define RGX_GENERAL_SVM_HEAP_SIZE			IMG_UINT64_C(0x7FFFC00000)
+
+	/* Start at 512GiB. Size of 256 GiB */
+	#define RGX_GENERAL_HEAP_BASE				IMG_UINT64_C(0x8000000000)
+	#define RGX_GENERAL_HEAP_SIZE				IMG_UINT64_C(0x4000000000)
+
+	/* HWBRN65273 workaround requires General Heap to use a unique single 1GB PCE entry. */
+	#define RGX_GENERAL_BRN_65273_HEAP_BASE		IMG_UINT64_C(0x65C0000000)
+	#define RGX_GENERAL_BRN_65273_HEAP_SIZE		IMG_UINT64_C(0x0080000000)
+
+	/* Start at 768GiB. Size of 64 GiB */
+	#define RGX_GENERAL_NON4K_HEAP_BASE			IMG_UINT64_C(0xC000000000)
+	#define RGX_GENERAL_NON4K_HEAP_SIZE			IMG_UINT64_C(0x1000000000)
+
+	/* HWBRN65273 workaround requires Non4K memory to use a unique single 1GB PCE entry. */
+	#define RGX_GENERAL_NON4K_BRN_65273_HEAP_BASE	IMG_UINT64_C(0x73C0000000)
+	#define RGX_GENERAL_NON4K_BRN_65273_HEAP_SIZE	IMG_UINT64_C(0x0080000000)
+
+	/* Start at 832 GiB. Size of 32 GiB */
+	#define RGX_BIF_TILING_NUM_HEAPS			4
+	#define RGX_BIF_TILING_HEAP_SIZE			IMG_UINT64_C(0x0200000000)
+	#define RGX_BIF_TILING_HEAP_1_BASE			IMG_UINT64_C(0xD000000000)
+	#define RGX_BIF_TILING_HEAP_2_BASE			(RGX_BIF_TILING_HEAP_1_BASE + RGX_BIF_TILING_HEAP_SIZE)
+	#define RGX_BIF_TILING_HEAP_3_BASE			(RGX_BIF_TILING_HEAP_2_BASE + RGX_BIF_TILING_HEAP_SIZE)
+	#define RGX_BIF_TILING_HEAP_4_BASE			(RGX_BIF_TILING_HEAP_3_BASE + RGX_BIF_TILING_HEAP_SIZE)
+
+	/* Start at 872 GiB. Size of 4 GiB */
+	#define RGX_PDSCODEDATA_HEAP_BASE			IMG_UINT64_C(0xDA00000000)
+    #define RGX_PDSCODEDATA_HEAP_SIZE			IMG_UINT64_C(0x0100000000)
+
+	/* HWBRN65273 workaround requires PDS memory to use a unique single 1GB PCE entry. */
+	#define RGX_PDSCODEDATA_BRN_65273_HEAP_BASE	IMG_UINT64_C(0xA800000000)
+    #define RGX_PDSCODEDATA_BRN_65273_HEAP_SIZE	IMG_UINT64_C(0x0040000000)
+
+	/* HWBRN63142 workaround requires Region Header memory to be at the top
+	   of a 16GB aligned range. This is so when masked with 0x03FFFFFFFF the
+	   address will avoid aliasing PB addresses. Start at 879.75GB. Size of 256MB. */
+	#define RGX_RGNHDR_BRN_63142_HEAP_BASE		IMG_UINT64_C(0xDBF0000000)
+	#define RGX_RGNHDR_BRN_63142_HEAP_SIZE		IMG_UINT64_C(0x0010000000)
+
+	/* Start at 880 GiB, Size of 1 MiB */
+	#define RGX_VISTEST_HEAP_BASE				IMG_UINT64_C(0xDC00000000)
+	#define RGX_VISTEST_HEAP_SIZE				IMG_UINT64_C(0x0000100000)
+
+	/* HWBRN65273 workaround requires VisTest memory to use a unique single 1GB PCE entry. */
+	#define RGX_VISTEST_BRN_65273_HEAP_BASE		IMG_UINT64_C(0xE400000000)
+	#define RGX_VISTEST_BRN_65273_HEAP_SIZE		IMG_UINT64_C(0x0000100000)
+
+	/* Start at 896 GiB Size of 4 GiB */
+	#define RGX_USCCODE_HEAP_BASE				IMG_UINT64_C(0xE000000000)
+	#define RGX_USCCODE_HEAP_SIZE				IMG_UINT64_C(0x0100000000)
+
+	/* HWBRN65273 workaround requires USC memory to use a unique single 1GB PCE entry. */
+	#define RGX_USCCODE_BRN_65273_HEAP_BASE		IMG_UINT64_C(0xBA00000000)
+	#define RGX_USCCODE_BRN_65273_HEAP_SIZE		IMG_UINT64_C(0x0040000000)
+
+	/* Start at 903GiB. Firmware heaps defined in rgxdefs_km.h
+	   	RGX_FIRMWARE_RAW_HEAP_BASE
+	   	RGX_FIRMWARE_HYPERV_MAIN_HEAP_BASE
+	   	RGX_FIRMWARE_GUEST_MAIN_HEAP_BASE
+	   	RGX_FIRMWARE_MAIN_HEAP_SIZE
+	   	RGX_FIRMWARE_CONFIG_HEAP_SIZE
+	   	RGX_FIRMWARE_RAW_HEAP_SIZE */
+
+	/* HWBRN65273 workaround requires TQ memory to start at 0GB and use a unique single 1GB PCE entry. */
+	#define RGX_TQ3DPARAMETERS_BRN_65273_HEAP_BASE		IMG_UINT64_C(0x0000000000)
+	#define RGX_TQ3DPARAMETERS_BRN_65273_HEAP_SIZE		IMG_UINT64_C(0x0040000000)
+
+	/* Start at 912GiB. Size of 16 GiB. 16GB aligned to match RGX_CR_ISP_PIXEL_BASE */
+	#define RGX_TQ3DPARAMETERS_HEAP_BASE		IMG_UINT64_C(0xE400000000)
+	#define RGX_TQ3DPARAMETERS_HEAP_SIZE		IMG_UINT64_C(0x0400000000)
+
+	/* Start at 928GiB. Size of 4 GiB */
+	#define RGX_DOPPLER_HEAP_BASE				IMG_UINT64_C(0xE800000000)
+	#define RGX_DOPPLER_HEAP_SIZE				IMG_UINT64_C(0x0100000000)
+
+	/* Start at 932GiB. Size of 4 GiB */
+	#define RGX_DOPPLER_OVERFLOW_HEAP_BASE		IMG_UINT64_C(0xE900000000)
+	#define RGX_DOPPLER_OVERFLOW_HEAP_SIZE		IMG_UINT64_C(0x0100000000)
+
+	/* Start at 936GiB. Two groups of 128 KBytes that must follow each other in this order. */
+	#define RGX_SERVICES_SIGNALS_HEAP_BASE		IMG_UINT64_C(0xEA00000000)
+	#define RGX_SERVICES_SIGNALS_HEAP_SIZE		IMG_UINT64_C(0x0000020000)
+
+	#define RGX_SIGNALS_HEAP_BASE				IMG_UINT64_C(0xEA00020000)
+	#define RGX_SIGNALS_HEAP_SIZE				IMG_UINT64_C(0x0000020000)
+
+	/* TDM TPU YUV coeffs - can be reduced to a single page */
+	#define RGX_TDM_TPU_YUV_COEFFS_HEAP_BASE	IMG_UINT64_C(0xEA00080000)
+	#define RGX_TDM_TPU_YUV_COEFFS_HEAP_SIZE	IMG_UINT64_C(0x0000040000)
+
+	/* HWBRN65273 workaround requires two Region Header buffers 4GB apart. */
+	#define RGX_MMU_INIA_BRN_65273_HEAP_BASE	IMG_UINT64_C(0xF800000000)
+	#define RGX_MMU_INIA_BRN_65273_HEAP_SIZE	IMG_UINT64_C(0x0040000000)
+	#define RGX_MMU_INIB_BRN_65273_HEAP_BASE	IMG_UINT64_C(0xF900000000)
+	#define RGX_MMU_INIB_BRN_65273_HEAP_SIZE	IMG_UINT64_C(0x0040000000)
+
+#endif /* RGXHEAPCONFIG_H */
+
+/*****************************************************************************
+ End of file (rgxheapconfig.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxhwperf.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxhwperf.c
new file mode 100644
index 0000000..c84ad69
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxhwperf.c
@@ -0,0 +1,3812 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HW Performance implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX HW Performance implementation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "rgxdevice.h"
+#include "pvrsrv_error.h"
+#include "pvr_notifier.h"
+#include "osfunc.h"
+#include "allocmem.h"
+
+#include "pvrsrv.h"
+#include "pvrsrv_tlstreams.h"
+#include "pvrsrv_tlcommon.h"
+#include "tlclient.h"
+#include "tlstream.h"
+
+#include "rgxhwperf.h"
+#include "rgxapi_km.h"
+#include "rgxfwutils.h"
+#include "rgxtimecorr.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "pdump_km.h"
+#include "pvrsrv_apphint.h"
+#include "process_stats.h"
+#include "rgx_hwperf_table.h"
+#include "rgxinit.h"
+
+/* This is defined by default to enable producer callbacks.
+ * Clients of the TL interface can disable the use of the callback
+ * with PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK. */
+#define SUPPORT_TL_PRODUCER_CALLBACK 1
+
+/* Maximum enum value to prevent access to RGX_HWPERF_STREAM_ID2_CLIENT stream */
+#define RGX_HWPERF_MAX_STREAM_ID (RGX_HWPERF_STREAM_ID2_CLIENT)
+
+/* Defines size of buffers returned from acquire/release calls */
+#define FW_STREAM_BUFFER_SIZE (0x80000)
+#define HOST_STREAM_BUFFER_SIZE (0x20000)
+
+/* Must be at least as large as two tl packets of maximum size */
+static_assert(HOST_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1),
+              "HOST_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)");
+static_assert(FW_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1),
+              "FW_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)");
+
+static inline IMG_UINT32
+RGXHWPerfGetPackets( IMG_UINT32  ui32BytesExp,
+                     IMG_UINT32  ui32AllowedSize,
+                     RGX_PHWPERF_V2_PACKET_HDR psCurPkt )
+{
+	IMG_UINT32 sizeSum = 0;
+
+	/* Traverse the array to find how many packets will fit in the available space. */
+	while ( sizeSum < ui32BytesExp  &&
+			sizeSum + RGX_HWPERF_GET_SIZE(psCurPkt) < ui32AllowedSize )
+	{
+		sizeSum += RGX_HWPERF_GET_SIZE(psCurPkt);
+		psCurPkt = RGX_HWPERF_GET_NEXT_PACKET(psCurPkt);
+	}
+
+	return sizeSum;
+}
+
+/*
+	RGXHWPerfCopyDataL1toL2
+ */
+static IMG_UINT32 RGXHWPerfCopyDataL1toL2(PVRSRV_RGXDEV_INFO* psDeviceInfo,
+                                          IMG_BYTE   *pbFwBuffer,
+                                          IMG_UINT32 ui32BytesExp)
+{
+	IMG_HANDLE   hHWPerfStream = psDeviceInfo->hHWPerfStream;
+	IMG_BYTE *   pbL2Buffer;
+	IMG_UINT32   ui32L2BufFree;
+	IMG_UINT32   ui32BytesCopied = 0;
+	IMG_UINT32   ui32BytesExpMin = RGX_HWPERF_GET_SIZE(RGX_HWPERF_GET_PACKET(pbFwBuffer));
+	PVRSRV_ERROR eError;
+
+	/* HWPERF_MISR_FUNC_DEBUG enables debug code for investigating HWPerf issues */
+#ifdef HWPERF_MISR_FUNC_DEBUG
+	static IMG_UINT32 gui32Ordinal = IMG_UINT32_MAX;
+#endif
+
+	PVR_DPF_ENTERED;
+
+#ifdef HWPERF_MISR_FUNC_DEBUG
+	PVR_DPF((PVR_DBG_VERBOSE, "EVENTS to copy from 0x%p length:%05d",
+			pbFwBuffer, ui32BytesExp));
+#endif
+
+#ifdef HWPERF_MISR_FUNC_DEBUG
+	{
+		/* Check the incoming buffer of data has not lost any packets */
+		IMG_BYTE *pbFwBufferIter = pbFwBuffer;
+		IMG_BYTE *pbFwBufferEnd = pbFwBuffer+ui32BytesExp;
+		do
+		{
+			RGX_HWPERF_V2_PACKET_HDR *asCurPos = RGX_HWPERF_GET_PACKET(pbFwBufferIter);
+			IMG_UINT32 ui32CurOrdinal = asCurPos->ui32Ordinal;
+			if (gui32Ordinal != IMG_UINT32_MAX)
+			{
+				if ((gui32Ordinal+1) != ui32CurOrdinal)
+				{
+					if (gui32Ordinal < ui32CurOrdinal)
+					{
+						PVR_DPF((PVR_DBG_WARNING,
+								"HWPerf [%p] packets lost (%u packets) between ordinal %u...%u",
+								pbFwBufferIter,
+								ui32CurOrdinal - gui32Ordinal - 1,
+								gui32Ordinal,
+								ui32CurOrdinal));
+					}
+					else
+					{
+						PVR_DPF((PVR_DBG_WARNING,
+								"HWPerf [%p] packet ordinal out of sequence last: %u, current: %u",
+								pbFwBufferIter,
+								gui32Ordinal,
+								ui32CurOrdinal));
+					}
+				}
+			}
+			gui32Ordinal = asCurPos->ui32Ordinal;
+			pbFwBufferIter += RGX_HWPERF_GET_SIZE(asCurPos);
+		} while( pbFwBufferIter < pbFwBufferEnd );
+	}
+#endif
+
+	if (ui32BytesExp > psDeviceInfo->ui32MaxPacketSize)
+	{
+		IMG_UINT32 sizeSum = RGXHWPerfGetPackets(ui32BytesExp,
+		                                         psDeviceInfo->ui32MaxPacketSize,
+		                                         RGX_HWPERF_GET_PACKET(pbFwBuffer));
+
+		if (0 != sizeSum)
+		{
+			ui32BytesExp = sizeSum;
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to write data into host buffer as "
+					"packet is too big and hence it breaches TL "
+					"packet size limit (TLBufferSize / 2.5)"));
+			goto e0;
+		}
+	}
+
+	/* Try submitting all data in one TL packet. */
+	eError = TLStreamReserve2( hHWPerfStream,
+	                           &pbL2Buffer,
+	                           (size_t)ui32BytesExp, ui32BytesExpMin,
+	                           &ui32L2BufFree);
+	if ( eError == PVRSRV_OK )
+	{
+		OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)ui32BytesExp );
+		eError = TLStreamCommit(hHWPerfStream, (size_t)ui32BytesExp);
+		if ( eError != PVRSRV_OK )
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer",
+					eError, __func__));
+			goto e0;
+		}
+		/* Data were successfully written */
+		ui32BytesCopied = ui32BytesExp;
+	}
+	else if (eError == PVRSRV_ERROR_STREAM_FULL)
+	{
+		/* There was not enough space for all data, copy as much as possible */
+		IMG_UINT32 sizeSum  = RGXHWPerfGetPackets(ui32BytesExp, ui32L2BufFree, RGX_HWPERF_GET_PACKET(pbFwBuffer));
+
+		PVR_DPF((PVR_DBG_MESSAGE, "Unable to reserve space (%d) in host buffer on first attempt, remaining free space: %d", ui32BytesExp, ui32L2BufFree));
+
+		if ( 0 != sizeSum )
+		{
+			eError = TLStreamReserve( hHWPerfStream, &pbL2Buffer, (size_t)sizeSum);
+
+			if ( eError == PVRSRV_OK )
+			{
+				OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)sizeSum );
+				eError = TLStreamCommit(hHWPerfStream, (size_t)sizeSum);
+				if ( eError != PVRSRV_OK )
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							"TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer",
+							eError, __func__));
+					goto e0;
+				}
+				/* sizeSum bytes of hwperf packets have been successfully written */
+				ui32BytesCopied = sizeSum;
+			}
+			else if ( PVRSRV_ERROR_STREAM_FULL == eError )
+			{
+				PVR_DPF((PVR_DBG_WARNING, "Cannot write HWPerf packet into host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree));
+			}
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "Cannot find space in host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree));
+		}
+	}
+	if ( PVRSRV_OK != eError && /* Some other error occurred */
+			PVRSRV_ERROR_STREAM_FULL != eError ) /* Full error handled by caller, we returning the copied bytes count to caller*/
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"HWPerf enabled: Unexpected Error ( %d ) while copying FW buffer to TL buffer.",
+				eError));
+	}
+
+	e0:
+	/* Return the remaining packets left to be transported. */
+	PVR_DPF_RETURN_VAL(ui32BytesCopied);
+}
+
+
+static INLINE IMG_UINT32 RGXHWPerfAdvanceRIdx(
+		const IMG_UINT32 ui32BufSize,
+		const IMG_UINT32 ui32Pos,
+		const IMG_UINT32 ui32Size)
+{
+	return ( ui32Pos + ui32Size < ui32BufSize ? ui32Pos + ui32Size : 0 );
+}
+
+
+/*
+	RGXHWPerfDataStore
+ */
+static IMG_UINT32 RGXHWPerfDataStore(PVRSRV_RGXDEV_INFO	*psDevInfo)
+{
+	RGXFWIF_TRACEBUF	*psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+	IMG_BYTE*			psHwPerfInfo = psDevInfo->psRGXFWIfHWPerfBuf;
+	IMG_UINT32			ui32SrcRIdx, ui32SrcWIdx, ui32SrcWrapCount;
+	IMG_UINT32			ui32BytesExp = 0, ui32BytesCopied = 0, ui32BytesCopiedSum = 0;
+#ifdef HWPERF_MISR_FUNC_DEBUG
+	IMG_UINT32			ui32BytesExpSum = 0;
+#endif
+
+	PVR_DPF_ENTERED;
+
+	/* Caller should check this member is valid before calling */
+	PVR_ASSERT(psDevInfo->hHWPerfStream);
+
+	/* Get a copy of the current
+	 *   read (first packet to read)
+	 *   write (empty location for the next write to be inserted)
+	 *   WrapCount (size in bytes of the buffer at or past end)
+	 * indexes of the FW buffer */
+	ui32SrcRIdx = psRGXFWIfTraceBufCtl->ui32HWPerfRIdx;
+	ui32SrcWIdx = psRGXFWIfTraceBufCtl->ui32HWPerfWIdx;
+	OSMemoryBarrier();
+	ui32SrcWrapCount = psRGXFWIfTraceBufCtl->ui32HWPerfWrapCount;
+
+	/* Is there any data in the buffer not yet retrieved? */
+	if ( ui32SrcRIdx != ui32SrcWIdx )
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStore EVENTS found srcRIdx:%d srcWIdx: %d", ui32SrcRIdx, ui32SrcWIdx));
+
+		/* Is the write position higher than the read position? */
+		if ( ui32SrcWIdx > ui32SrcRIdx )
+		{
+			/* Yes, buffer has not wrapped */
+			ui32BytesExp = ui32SrcWIdx - ui32SrcRIdx;
+#ifdef HWPERF_MISR_FUNC_DEBUG
+			ui32BytesExpSum += ui32BytesExp;
+#endif
+			ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo,
+			                                          psHwPerfInfo + ui32SrcRIdx,
+			                                          ui32BytesExp);
+
+			/* Advance the read index and the free bytes counter by the number
+			 * of bytes transported. Items will be left in buffer if not all data
+			 * could be transported. Exit to allow buffer to drain. */
+			psRGXFWIfTraceBufCtl->ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx(
+					psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx,
+					ui32BytesCopied);
+
+			ui32BytesCopiedSum += ui32BytesCopied;
+		}
+		/* No, buffer has wrapped and write position is behind read position */
+		else
+		{
+			/* Byte count equal to
+			 *     number of bytes from read position to the end of the buffer,
+			 *   + data in the extra space in the end of the buffer. */
+			ui32BytesExp = ui32SrcWrapCount - ui32SrcRIdx;
+
+#ifdef HWPERF_MISR_FUNC_DEBUG
+			ui32BytesExpSum += ui32BytesExp;
+#endif
+			/* Attempt to transfer the packets to the TL stream buffer */
+			ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo,
+			                                          psHwPerfInfo + ui32SrcRIdx,
+			                                          ui32BytesExp);
+
+			/* Advance read index as before and Update the local copy of the
+			 * read index as it might be used in the last if branch*/
+			ui32SrcRIdx = RGXHWPerfAdvanceRIdx(
+					psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx,
+					ui32BytesCopied);
+
+			/* Update Wrap Count */
+			if ( ui32SrcRIdx == 0)
+			{
+				psRGXFWIfTraceBufCtl->ui32HWPerfWrapCount = psDevInfo->ui32RGXFWIfHWPerfBufSize;
+			}
+			psRGXFWIfTraceBufCtl->ui32HWPerfRIdx = ui32SrcRIdx;
+
+			ui32BytesCopiedSum += ui32BytesCopied;
+
+			/* If all the data in the end of the array was copied, try copying
+			 * wrapped data in the beginning of the array, assuming there is
+			 * any and the RIdx was wrapped. */
+			if (   (ui32BytesCopied == ui32BytesExp)
+					&& (ui32SrcWIdx > 0)
+					&& (ui32SrcRIdx == 0) )
+			{
+				ui32BytesExp = ui32SrcWIdx;
+#ifdef HWPERF_MISR_FUNC_DEBUG
+				ui32BytesExpSum += ui32BytesExp;
+#endif
+				ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo,
+				                                          psHwPerfInfo,
+				                                          ui32BytesExp);
+				/* Advance the FW buffer read position. */
+				psRGXFWIfTraceBufCtl->ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx(
+						psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx,
+						ui32BytesCopied);
+
+				ui32BytesCopiedSum += ui32BytesCopied;
+			}
+		}
+#ifdef HWPERF_MISR_FUNC_DEBUG
+		if (ui32BytesCopiedSum != ui32BytesExpSum)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfDataStore: FW L1 RIdx:%u. Not all bytes copied to L2: %u bytes out of %u expected", psRGXFWIfTraceBufCtl->ui32HWPerfRIdx, ui32BytesCopiedSum, ui32BytesExpSum));
+		}
+#endif
+
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfDataStore NO EVENTS to transport"));
+	}
+
+	PVR_DPF_RETURN_VAL(ui32BytesCopiedSum);
+}
+
+
+PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE *psDevInfo)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO* psRgxDevInfo;
+	IMG_UINT32          ui32BytesCopied;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psDevInfo);
+	psRgxDevInfo = psDevInfo->pvDevice;
+
+	/* Store FW event data if the destination buffer exists.*/
+	if (psRgxDevInfo->hHWPerfStream != (IMG_HANDLE) NULL)
+	{
+		OSLockAcquire(psRgxDevInfo->hHWPerfLock);
+		ui32BytesCopied = RGXHWPerfDataStore(psRgxDevInfo);
+		if ( ui32BytesCopied )
+		{	/* Signal consumers that packets may be available to read when
+		 * running from a HW kick, not when called by client APP thread
+		 * via the transport layer CB as this can lead to stream
+		 * corruption.*/
+			eError = TLStreamSync(psRgxDevInfo->hHWPerfStream);
+			PVR_ASSERT(eError == PVRSRV_OK);
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStoreCB: Zero bytes copied"));
+			RGXDEBUG_PRINT_IRQ_COUNT(psRgxDevInfo);
+		}
+		OSLockRelease(psRgxDevInfo->hHWPerfLock);
+	}
+
+
+	PVR_DPF_RETURN_OK;
+}
+
+
+/* Currently supported by default */
+#if defined(SUPPORT_TL_PRODUCER_CALLBACK)
+static PVRSRV_ERROR RGXHWPerfTLCB(IMG_HANDLE hStream,
+                                  IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*)pvUser;
+
+	PVR_UNREFERENCED_PARAMETER(hStream);
+	PVR_UNREFERENCED_PARAMETER(ui32Resp);
+
+	PVR_ASSERT(psRgxDevInfo);
+
+	switch (ui32ReqOp)
+	{
+		case TL_SOURCECB_OP_CLIENT_EOS:
+			/* Keep HWPerf resource init check and use of
+			 * resources atomic, they may not be freed during use
+			 */
+
+			/* This solution is for avoiding a deadlock situation where -
+			 * in DoTLStreamReserve(), writer has acquired HWPerfLock and
+			 * ReadLock and is waiting on ReadPending (which will be reset
+			 * by reader), And
+			 * the reader after setting ReadPending in TLStreamAcquireReadPos(),
+			 * is waiting for HWPerfLock in RGXHWPerfTLCB().
+			 * So here in RGXHWPerfTLCB(), if HWPerfLock is already acquired we
+			 * will return to the reader without waiting to acquire HWPerfLock.
+			 */
+			if ( !OSTryLockAcquire(psRgxDevInfo->hHWPerfLock))
+			{
+				PVR_DPF((PVR_DBG_MESSAGE, "hHWPerfLock is already acquired, a write "
+						"operation might already be in process"));
+				return PVRSRV_OK;
+			}
+
+			if (psRgxDevInfo->hHWPerfStream != (IMG_HANDLE) NULL)
+			{
+				(void) RGXHWPerfDataStore(psRgxDevInfo);
+			}
+			OSLockRelease(psRgxDevInfo->hHWPerfLock);
+			break;
+
+		default:
+			break;
+	}
+
+	return eError;
+}
+#endif
+
+
+static void RGXHWPerfL1BufferDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+	if (psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc)
+	{
+		if (psRgxDevInfo->psRGXFWIfHWPerfBuf != NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc);
+			psRgxDevInfo->psRGXFWIfHWPerfBuf = NULL;
+		}
+		DevmemFwFree(psRgxDevInfo, psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc);
+		psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL;
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfInit
+
+@Description    Called during driver init for initialization of HWPerf module
+				in the Rogue device driver. This function keeps allocated
+				only the minimal necessary resources, which are required for
+				functioning of HWPerf server module.
+
+@Input          psRgxDevInfo	RGX Device Info
+
+@Return			PVRSRV_ERROR
+ */ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+	PVRSRV_ERROR eError;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	PVR_DPF_ENTERED;
+
+	/* expecting a valid device info */
+	PVR_ASSERT(psRgxDevInfo);
+
+	/* Create a lock for HWPerf server module used for serializing, L1 to L2
+	 * copy calls (e.g. in case of TL producer callback) and L1, L2 resource
+	 * allocation */
+	eError = OSLockCreate(&psRgxDevInfo->hHWPerfLock);
+	PVR_LOGR_IF_ERROR(eError, "OSLockCreate");
+
+	/* avoid uninitialised data */
+	psRgxDevInfo->hHWPerfStream = (IMG_HANDLE) NULL;
+	psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL;
+
+	PVR_DPF_RETURN_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfIsInitRequired
+
+@Description    Returns true if the HWperf firmware buffer (L1 buffer) and host
+                driver TL buffer (L2 buffer) are not already allocated. Caller
+                must possess hHWPerfLock lock before calling this
+                function so the state tested is not inconsistent.
+
+@Input          psRgxDevInfo RGX Device Info, on which init requirement is
+                checked.
+
+@Return         IMG_BOOL	Whether initialization (allocation) is required
+ */ /**************************************************************************/
+static INLINE IMG_BOOL RGXHWPerfIsInitRequired(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+	PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hHWPerfLock));
+
+#if !defined (NO_HARDWARE)
+	/* Both L1 and L2 buffers are required (for HWPerf functioning) on driver
+	 * built for actual hardware (TC, EMU, etc.)
+	 */
+	if (psRgxDevInfo->hHWPerfStream == (IMG_HANDLE) NULL)
+	{
+		/* The allocation API (RGXHWPerfInitOnDemandResources) allocates
+		 * device memory for both L1 and L2 without any checks. Hence,
+		 * either both should be allocated or both be NULL.
+		 *
+		 * In-case this changes in future (for e.g. a situation where one
+		 * of the 2 buffers is already allocated and other is required),
+		 * add required checks before allocation calls to avoid memory leaks.
+		 */
+		PVR_ASSERT(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL);
+		return IMG_TRUE;
+	}
+	PVR_ASSERT(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc != NULL);
+#else
+	/* On a NO-HW driver L2 is not allocated. So, no point in checking its
+	 * allocation */
+	if (psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL)
+	{
+		return IMG_TRUE;
+	}
+#endif
+	return IMG_FALSE;
+}
+#if !defined(NO_HARDWARE)
+static void _HWPerfFWOnReaderOpenCB(void *pvArg)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*) pvArg;
+	PVRSRV_DEVICE_NODE* psDevNode = (PVRSRV_DEVICE_NODE*) psRgxDevInfo->psDeviceNode;
+	RGXFWIF_KCCB_CMD sKccbCmd;
+
+	sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG;
+	sKccbCmd.uCmdData.sHWPerfCtrl.eOpCode = RGXFWIF_HWPERF_CTRL_EMIT_FEATURES_EV;
+	sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = 0;
+
+	eError = RGXScheduleCommand(psDevNode->pvDevice, RGXFWIF_DM_GP,
+		                        &sKccbCmd, 0, PDUMP_FLAGS_CONTINUOUS);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to generate feature packet in "
+				"firmware (error = %d)", __func__, eError));
+		return;
+	}
+
+	eError = RGXWaitForFWOp(psDevNode->pvDevice, RGXFWIF_DM_GP,
+		                    psDevNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+	PVR_LOGRN_IF_ERROR(eError, "RGXWaitForFWOp");
+
+}
+#endif
+/*************************************************************************/ /*!
+@Function       RGXHWPerfInitOnDemandResources
+
+@Description    This function allocates the HWperf firmware buffer (L1 buffer)
+                and host driver TL buffer (L2 buffer) if HWPerf is enabled at
+                driver load time. Otherwise, these buffers are allocated
+                on-demand as and when required. Caller
+                must possess hHWPerfLock lock before calling this
+                function so the state tested is not inconsistent if called
+                outside of driver initialisation.
+
+@Input          psRgxDevInfo RGX Device Info, on which init is done
+
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo)
+{
+	IMG_HANDLE hStream = NULL; /* Init required for noHW */
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32L2BufferSize = 0;
+	DEVMEM_FLAGS_T uiMemAllocFlags;
+	IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5]; /* 5 seems reasonable as it can hold
+																			  names up to "hwperf_9999", which is enough */
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	PVR_DPF_ENTERED;
+
+	/* Create the L1 HWPerf buffer on demand */
+	uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT)
+						| PVRSRV_MEMALLOCFLAG_GPU_READABLE
+						| PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE
+						| PVRSRV_MEMALLOCFLAG_CPU_READABLE
+						| PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE
+						| PVRSRV_MEMALLOCFLAG_UNCACHED
+#if defined(PDUMP) /* Helps show where the packet data ends */
+						| PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC
+#else /* Helps show corruption issues in driver-live */
+						| PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC
+#endif
+						;
+
+	/* Allocate HWPerf FW L1 buffer */
+	eError = DevmemFwAllocate(psRgxDevInfo,
+							  /* Pad it enough to hold the biggest variable sized packet. */
+	                          psRgxDevInfo->ui32RGXFWIfHWPerfBufSize+RGX_HWPERF_MAX_PACKET_SIZE,
+	                          uiMemAllocFlags,
+	                          "FwHWPerfBuffer",
+	                          &psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to allocate kernel fw hwperf buffer (%u)",
+				__func__, eError));
+		goto e0;
+	}
+
+	/* Expecting the RuntimeCfg structure is mapped into CPU virtual memory.
+	 * Also, make sure the FW address is not already set */
+	PVR_ASSERT(psRgxDevInfo->psRGXFWIfRuntimeCfg && psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf.ui32Addr == 0x0);
+
+	/* Meta cached flag removed from this allocation as it was found
+	 * FW performance was better without it. */
+	RGXSetFirmwareAddress(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf,
+	                      psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc,
+	                      0, RFW_FWADDR_NOREF_FLAG);
+
+	eError = DevmemAcquireCpuVirtAddr(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc,
+	                                  (void**)&psRgxDevInfo->psRGXFWIfHWPerfBuf);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to acquire kernel hwperf buffer (%u)",
+				__func__, eError));
+		goto e0;
+	}
+
+	/* On NO-HW driver, there is no MISR installed to copy data from L1 to L2. Hence,
+	 * L2 buffer is not allocated */
+#if !defined(NO_HARDWARE)
+	/* Host L2 HWPERF buffer size in bytes must be bigger than the L1 buffer
+	 * accessed by the FW. The MISR may try to write one packet the size of the L1
+	 * buffer in some scenarios. When logging is enabled in the MISR, it can be seen
+	 * if the L2 buffer hits a full condition. The closer in size the L2 and L1 buffers
+	 * are the more chance of this happening.
+	 * Size chosen to allow MISR to write an L1 sized packet and for the client
+	 * application/daemon to drain a L1 sized packet e.g. ~ 1.5*L1.
+	 */
+	ui32L2BufferSize = psRgxDevInfo->ui32RGXFWIfHWPerfBufSize +
+			(psRgxDevInfo->ui32RGXFWIfHWPerfBufSize>>1);
+
+	/* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */
+	if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d",
+	               PVRSRV_TL_HWPERF_RGX_FW_STREAM,
+	               psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to form HWPerf stream name for device %d",
+				__func__,
+				psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eError = TLStreamCreate(&hStream,
+	                        psRgxDevInfo->psDeviceNode,
+	                        pszHWPerfStreamName,
+	                        ui32L2BufferSize,
+	                        TL_OPMODE_DROP_NEWER | TL_FLAG_NO_SIGNAL_ON_COMMIT,
+							_HWPerfFWOnReaderOpenCB, psRgxDevInfo,
+#if !defined(SUPPORT_TL_PRODUCER_CALLBACK)
+	                        NULL, NULL
+#else
+	                        /* Not enabled by default */
+	                        RGXHWPerfTLCB, psRgxDevInfo
+#endif
+	);
+	PVR_LOGG_IF_ERROR(eError, "TLStreamCreate", e1);
+
+	eError = TLStreamSetNotifStream(hStream,
+	                                PVRSRVGetPVRSRVData()->hTLCtrlStream);
+	/* we can still discover host stream so leave it as is and just log error */
+	PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream");
+
+	/* send the event here because host stream is implicitly opened for write
+	 * in TLStreamCreate and TLStreamOpen is never called (so the event is
+	 * never emitted) */
+	TLStreamMarkStreamOpen(hStream);
+
+	{
+		TL_STREAM_INFO sTLStreamInfo;
+
+		TLStreamInfo(hStream, &sTLStreamInfo);
+		psRgxDevInfo->ui32MaxPacketSize = sTLStreamInfo.maxTLpacketSize;
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE, "HWPerf buffer size in bytes: L1: %d  L2: %d",
+			psRgxDevInfo->ui32RGXFWIfHWPerfBufSize, ui32L2BufferSize));
+
+#else /* defined (NO_HARDWARE) */
+	PVR_UNREFERENCED_PARAMETER(ui32L2BufferSize);
+	PVR_UNREFERENCED_PARAMETER(RGXHWPerfTLCB);
+	PVR_UNREFERENCED_PARAMETER(pszHWPerfStreamName);
+	ui32L2BufferSize = 0;
+#endif
+
+	psRgxDevInfo->hHWPerfStream = hStream;
+	PVR_DPF_RETURN_OK;
+
+#if !defined(NO_HARDWARE)
+	e1: /* L2 buffer initialisation failures */
+	psRgxDevInfo->hHWPerfStream = NULL;
+#endif
+	e0: /* L1 buffer initialisation failures */
+	RGXHWPerfL1BufferDeinit(psRgxDevInfo);
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+
+void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+	IMG_HANDLE hStream = psRgxDevInfo->hHWPerfStream;
+
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psRgxDevInfo);
+	psRgxDevInfo->hHWPerfStream = NULL;
+
+	/* Clean up the L2 buffer stream object if allocated */
+	if (hStream)
+	{
+		/* send the event here because host stream is implicitly opened for
+		 * write in TLStreamCreate and TLStreamClose is never called (so the
+		 * event is never emitted) */
+		TLStreamMarkStreamClose(hStream);
+		TLStreamClose(hStream);
+	}
+
+	/* Cleanup L1 buffer resources */
+	RGXHWPerfL1BufferDeinit(psRgxDevInfo);
+
+	/* Cleanup the HWPerf server module lock resource */
+	if (psRgxDevInfo->hHWPerfLock)
+	{
+		OSLockDestroy(psRgxDevInfo->hHWPerfLock);
+		psRgxDevInfo->hHWPerfLock = NULL;
+	}
+
+	PVR_DPF_RETURN;
+}
+
+
+/******************************************************************************
+ * RGX HW Performance Profiling Server API(s)
+ *****************************************************************************/
+
+static PVRSRV_ERROR RGXHWPerfCtrlFwBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                          IMG_BOOL bToggle,
+                                          IMG_UINT64 ui64Mask)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice;
+	RGXFWIF_KCCB_CMD sKccbCmd;
+
+	/* If this method is being used whether to enable or disable
+	 * then the hwperf buffers (host and FW) are likely to be needed
+	 * eventually so create them, also helps unit testing. Buffers
+	 * allocated on demand to reduce RAM foot print on systems not
+	 * needing HWPerf resources.
+	 * Obtain lock first, test and init if required. */
+	OSLockAcquire(psDevice->hHWPerfLock);
+
+	if (!psDevice->bFirmwareInitialised)
+	{
+		psDevice->ui64HWPerfFilter = ui64Mask; // at least set filter
+		eError = PVRSRV_ERROR_NOT_INITIALISED;
+
+		PVR_DPF((PVR_DBG_ERROR,
+				 "HWPerf has NOT been initialised yet. Mask has been SET to "
+				 "(%" IMG_UINT64_FMTSPECx ")",
+				 ui64Mask));
+
+		goto unlock_and_return;
+	}
+
+	if (RGXHWPerfIsInitRequired(psDevice))
+	{
+		eError = RGXHWPerfInitOnDemandResources(psDevice);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand HWPerfFW "
+					"resources failed", __func__));
+			goto unlock_and_return;
+		}
+	}
+
+	/* Unlock here as no further HWPerf resources are used below that would be
+	 * affected if freed by another thread */
+	OSLockRelease(psDevice->hHWPerfLock);
+
+	/* Return if the filter is the same */
+	if (!bToggle && psDevice->ui64HWPerfFilter == ui64Mask)
+		goto return_;
+
+	/* Prepare command parameters ... */
+	sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG;
+	sKccbCmd.uCmdData.sHWPerfCtrl.eOpCode = bToggle ? RGXFWIF_HWPERF_CTRL_TOGGLE : RGXFWIF_HWPERF_CTRL_SET;
+	sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = ui64Mask;
+
+	/* Ask the FW to carry out the HWPerf configuration command */
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,	RGXFWIF_DM_GP,
+	                            &sKccbCmd, 0, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set new HWPerfFW filter in "
+				"firmware (error = %d)", __func__, eError));
+		goto return_;
+	}
+
+	psDevice->ui64HWPerfFilter = bToggle ?
+			psDevice->ui64HWPerfFilter ^ ui64Mask : ui64Mask;
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP,
+	                        psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+	PVR_LOGG_IF_ERROR(eError, "RGXWaitForFWOp", return_);
+
+#if defined(DEBUG)
+	if (bToggle)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "HWPerfFW events (%" IMG_UINT64_FMTSPECx ") have been TOGGLED",
+				ui64Mask));
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING, "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")",
+				ui64Mask));
+	}
+#endif
+
+	return PVRSRV_OK;
+
+	unlock_and_return:
+	OSLockRelease(psDevice->hHWPerfLock);
+
+	return_:
+	return eError;
+}
+
+#define HWPERF_HOST_MAX_DEFERRED_PACKETS 800
+
+static PVRSRV_ERROR RGXHWPerfCtrlHostBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                            IMG_BOOL bToggle,
+                                            IMG_UINT32 ui32Mask)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice;
+#if defined (PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+	IMG_UINT32 ui32OldFilter = psDevice->ui32HWPerfHostFilter;
+#endif
+
+	OSLockAcquire(psDevice->hLockHWPerfHostStream);
+	if (psDevice->hHWPerfHostStream == NULL)
+	{
+		eError = RGXHWPerfHostInitOnDemandResources(psDevice);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Initialisation of on-demand HWPerfHost resources failed",
+					 __func__));
+			OSLockRelease(psDevice->hLockHWPerfHostStream);
+			return eError;
+		}
+	}
+
+	psDevice->ui32HWPerfHostFilter = bToggle ?
+			psDevice->ui32HWPerfHostFilter ^ ui32Mask : ui32Mask;
+
+	// Deferred creation of host periodic events thread
+	if (psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO))
+	{
+		eError = PVRSRVCreateHWPerfHostThread(PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS);
+		PVR_LOG_IF_ERROR(eError, "PVRSRVCreateHWPerfHostThread");
+	}
+	else if (!(psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO)))
+	{
+		eError = PVRSRVDestroyHWPerfHostThread();
+		PVR_LOG_IF_ERROR(eError, "PVRSRVDestroyHWPerfHostThread");
+	}
+
+#if defined (PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+	// Log deferred events stats if filter changed from non-zero to zero
+	if ((ui32OldFilter != 0) && (psDevice->ui32HWPerfHostFilter == 0))
+	{
+		PVR_LOG(("HWPerfHost deferred events buffer high-watermark / size: (%u / %u)",
+				psDevice->ui32DEHighWatermark, HWPERF_HOST_MAX_DEFERRED_PACKETS));
+
+		PVR_LOG(("HWPerfHost deferred event retries: WaitForAtomicCtxPktHighWatermark(%u) "\
+				"WaitForRightOrdPktHighWatermark(%u)",
+				psDevice->ui32WaitForAtomicCtxPktHighWatermark,
+				psDevice->ui32WaitForRightOrdPktHighWatermark));
+	}
+#endif
+
+	OSLockRelease(psDevice->hLockHWPerfHostStream);
+
+#if defined(DEBUG)
+	if (bToggle)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "HWPerfHost events (%x) have been TOGGLED",
+				ui32Mask));
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING, "HWPerfHost mask has been SET to (%x)",
+				ui32Mask));
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXHWPerfCtrlClientBuffer(IMG_BOOL bToggle,
+                                              IMG_UINT32 ui32InfoPageIdx,
+                                              IMG_UINT32 ui32Mask)
+{
+	PVRSRV_DATA *psData = PVRSRVGetPVRSRVData();
+
+	PVR_LOGR_IF_FALSE(ui32InfoPageIdx >= HWPERF_INFO_IDX_START &&
+	                  ui32InfoPageIdx < HWPERF_INFO_IDX_END, "invalid info"
+	                  " page index", PVRSRV_ERROR_INVALID_PARAMS);
+
+	OSLockAcquire(psData->hInfoPageLock);
+	psData->pui32InfoPage[ui32InfoPageIdx] = bToggle ?
+			psData->pui32InfoPage[ui32InfoPageIdx] ^ ui32Mask : ui32Mask;
+	OSLockRelease(psData->hInfoPageLock);
+
+#if defined(DEBUG)
+	if (bToggle)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) events (%x) have been TOGGLED",
+				ui32InfoPageIdx, ui32Mask));
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) mask has been SET to (%x)",
+				ui32InfoPageIdx, ui32Mask));
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_HWPERF_BVNC *psBVNC)
+{
+	IMG_PCHAR pszBVNC;
+	PVR_LOGR_IF_FALSE((NULL != psDevInfo), "psDevInfo invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+	if ((pszBVNC = RGXDevBVNCString(psDevInfo)))
+	{
+		size_t uiStringLength = OSStringLength(pszBVNC);
+		size_t uiBVNCStringSize = (uiStringLength + 1) * sizeof(IMG_CHAR);
+		PVR_ASSERT(uiStringLength < RGX_HWPERF_MAX_BVNC_LEN);
+		OSStringLCopy(psBVNC->aszBvncString, pszBVNC, uiBVNCStringSize);
+	}
+	else
+	{
+		*psBVNC->aszBvncString = 0;
+	}
+
+	psBVNC->ui32BvncKmFeatureFlags = 0x0;
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS))
+	{
+		psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PERFBUS_FLAG;
+	}
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+	{
+		psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG;
+	}
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE))
+	{
+		psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG;
+	}
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERF_COUNTER_BATCH))
+	{
+		psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG;
+	}
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ROGUEXE))
+	{
+		psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_ROGUEXE_FLAG;
+	}
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, DUST_POWER_ISLAND_S7))
+	{
+		psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG;
+	}
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE))
+	{
+		psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG;
+	}
+
+#ifdef SUPPORT_WORKLOAD_ESTIMATION
+	/* Not a part of BVNC feature line and so doesn't need the feature supported check */
+	psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION;
+#endif
+
+	/* Define the HW counter block counts. */
+	{
+		const IMG_UINT32 ui32rgx_units_indirect_by_phantom = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg);
+		const IMG_UINT32 ui32rgx_units_phantom_indirect_by_dust = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg);
+		const IMG_UINT32 ui32rgx_units_phantom_indirect_by_cluster = rgx_units_phantom_indirect_by_cluster(&psDevInfo->sDevFeatureCfg);
+
+		PVR_ASSERT(ui32rgx_units_indirect_by_phantom < UINT8_MAX);
+		PVR_ASSERT(ui32rgx_units_phantom_indirect_by_dust < UINT8_MAX);
+		PVR_ASSERT(ui32rgx_units_phantom_indirect_by_cluster < UINT8_MAX);
+
+		psBVNC->ui8RgxUnitsIndirectByPhantom = ui32rgx_units_indirect_by_phantom;
+		psBVNC->ui8RgxUnitsPhantomIndirectByDust = ui32rgx_units_phantom_indirect_by_dust;
+		psBVNC->ui8RgxUnitsPhantomIndirectByCluster = ui32rgx_units_phantom_indirect_by_cluster;
+	}
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(CONNECTION_DATA    *psConnection,
+                                                  PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                  RGX_HWPERF_BVNC    *psBVNC)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	PVRSRV_ERROR        eError;
+
+	PVR_LOGR_IF_FALSE((NULL != psDeviceNode), "psConnection invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+	psDevInfo = psDeviceNode->pvDevice;
+	eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, psBVNC);
+
+	return eError;
+}
+
+/*
+	PVRSRVRGXCtrlHWPerfKM
+ */
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM(
+		CONNECTION_DATA         *psConnection,
+		PVRSRV_DEVICE_NODE      *psDeviceNode,
+		RGX_HWPERF_STREAM_ID     eStreamId,
+		IMG_BOOL                 bToggle,
+		IMG_UINT64               ui64Mask)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	PVR_DPF_ENTERED;
+	PVR_ASSERT(psDeviceNode);
+
+	if (eStreamId == RGX_HWPERF_STREAM_ID0_FW)
+	{
+		return RGXHWPerfCtrlFwBuffer(psDeviceNode, bToggle, ui64Mask);
+	}
+	else if (eStreamId == RGX_HWPERF_STREAM_ID1_HOST)
+	{
+		return RGXHWPerfCtrlHostBuffer(psDeviceNode, bToggle, (IMG_UINT32) ui64Mask);
+	}
+	else if (eStreamId == RGX_HWPERF_STREAM_ID2_CLIENT)
+	{
+		IMG_UINT32 ui32Index = (IMG_UINT32) (ui64Mask >> 32);
+		IMG_UINT32 ui32Mask = (IMG_UINT32) ui64Mask;
+
+		return RGXHWPerfCtrlClientBuffer(bToggle, ui32Index, ui32Mask);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXCtrlHWPerfKM: Unknown stream id."));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	PVR_DPF_RETURN_OK;
+}
+
+/*
+	AppHint interfaces
+ */
+static
+PVRSRV_ERROR RGXHWPerfSetFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  const void *psPrivate,
+                                  IMG_UINT64 ui64Value)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDevNode;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+	psDevNode = psPVRSRVData->psDeviceNodeList;
+	/* Control HWPerf on all the devices */
+	while (psDevNode)
+	{
+		eError = RGXHWPerfCtrlFwBuffer(psDevNode, IMG_FALSE, ui64Value);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to set HWPerf firmware filter for device (%d)", psDevNode->sDevId.i32UMIdentifier));
+			return eError;
+		}
+		psDevNode = psDevNode->psNext;
+	}
+	return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXHWPerfReadFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                   const void *psPrivate,
+                                   IMG_UINT64 *pui64Value)
+{
+	PVRSRV_RGXDEV_INFO *psDevice;
+
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+	if (!psDeviceNode || !psDeviceNode->pvDevice)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Configuration command is applied for all devices, so filter value should
+	 * be same for all */
+	psDevice = psDeviceNode->pvDevice;
+	*pui64Value = psDevice->ui64HWPerfFilter;
+	return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXHWPerfSetHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                    const void *psPrivate,
+                                    IMG_UINT32 ui32Value)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDevNode;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+	psDevNode = psPVRSRVData->psDeviceNodeList;
+	/* Control HWPerf on all the devices */
+	while (psDevNode)
+	{
+		eError = RGXHWPerfCtrlHostBuffer(psDevNode, IMG_FALSE, ui32Value);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to set HWPerf firmware filter for device (%d)", psDevNode->sDevId.i32UMIdentifier));
+			return eError;
+		}
+		psDevNode = psDevNode->psNext;
+	}
+	return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXHWPerfReadHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     const void *psPrivate,
+                                     IMG_UINT32 *pui32Value)
+{
+	PVRSRV_RGXDEV_INFO *psDevice;
+
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+	if (!psDeviceNode || !psDeviceNode->pvDevice)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevice = psDeviceNode->pvDevice;
+	*pui32Value = psDevice->ui32HWPerfHostFilter;
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _ReadClientFilter(const PVRSRV_DEVICE_NODE *psDevice,
+                                      const void *psPrivData,
+                                      IMG_UINT32 *pui32Value)
+{
+	PVRSRV_DATA *psData = PVRSRVGetPVRSRVData();
+	IMG_UINT32 ui32Idx = (IMG_UINT32) (uintptr_t) psPrivData;
+	PVR_UNREFERENCED_PARAMETER(psDevice);
+
+	OSLockAcquire(psData->hInfoPageLock);
+	*pui32Value = psData->pui32InfoPage[ui32Idx];
+	OSLockRelease(psData->hInfoPageLock);
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _WriteClientFilter(const PVRSRV_DEVICE_NODE *psDevice,
+                                       const void *psPrivData,
+                                       IMG_UINT32 ui32Value)
+{
+	IMG_UINT32 ui32Idx = (IMG_UINT32) (uintptr_t) psPrivData;
+	PVR_UNREFERENCED_PARAMETER(psDevice);
+
+	return RGXHWPerfCtrlClientBuffer(IMG_FALSE, ui32Idx, ui32Value);
+}
+
+void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRVAppHintRegisterHandlersUINT64(APPHINT_ID_HWPerfFWFilter,
+	                                    RGXHWPerfReadFwFilter,
+	                                    RGXHWPerfSetFwFilter,
+	                                    psDeviceNode,
+	                                    NULL);
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfHostFilter,
+	                                    RGXHWPerfReadHostFilter,
+	                                    RGXHWPerfSetHostFilter,
+	                                    psDeviceNode,
+										NULL);
+}
+
+void RGXHWPerfClientInitAppHintCallbacks(void)
+{
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_Services,
+	                                    _ReadClientFilter,
+	                                    _WriteClientFilter,
+	                                    APPHINT_OF_DRIVER_NO_DEVICE,
+	                                    (void *) HWPERF_FILTER_SERVICES_IDX);
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_EGL,
+	                                    _ReadClientFilter,
+	                                    _WriteClientFilter,
+	                                    APPHINT_OF_DRIVER_NO_DEVICE,
+	                                    (void *) HWPERF_FILTER_EGL_IDX);
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenGLES,
+	                                    _ReadClientFilter,
+	                                    _WriteClientFilter,
+	                                    APPHINT_OF_DRIVER_NO_DEVICE,
+	                                    (void *) HWPERF_FILTER_OPENGLES_IDX);
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenCL,
+	                                    _ReadClientFilter,
+	                                    _WriteClientFilter,
+	                                    APPHINT_OF_DRIVER_NO_DEVICE,
+	                                    (void *) HWPERF_FILTER_OPENCL_IDX);
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_Vulkan,
+	                                    _ReadClientFilter,
+	                                    _WriteClientFilter,
+	                                    APPHINT_OF_DRIVER_NO_DEVICE,
+	                                    (void *) HWPERF_FILTER_VULKAN_IDX);
+}
+
+/*
+	PVRSRVRGXEnableHWPerfCountersKM
+ */
+PVRSRV_ERROR PVRSRVRGXConfigEnableHWPerfCountersKM(
+		CONNECTION_DATA          * psConnection,
+		PVRSRV_DEVICE_NODE       * psDeviceNode,
+		IMG_UINT32                 ui32ArrayLen,
+		RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs)
+{
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sKccbCmd;
+	DEVMEM_MEMDESC*		psFwBlkConfigsMemDesc;
+	RGX_HWPERF_CONFIG_CNTBLK* psFwArray;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	PVR_LOGR_IF_FALSE(ui32ArrayLen > 0, "ui32ArrayLen is 0",
+	                  PVRSRV_ERROR_INVALID_PARAMS);
+	PVR_LOGR_IF_FALSE(psBlockConfigs != NULL, "psBlockConfigs is NULL",
+	                  PVRSRV_ERROR_INVALID_PARAMS);
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psDeviceNode);
+
+	/* Fill in the command structure with the parameters needed
+	 */
+	sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS;
+	sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.ui32NumBlocks = ui32ArrayLen;
+
+	eError = DevmemFwAllocate(psDeviceNode->pvDevice,
+	                          sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen,
+	                          PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+	                          PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+	                          PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+	                          PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+	                          PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+	                          PVRSRV_MEMALLOCFLAG_UNCACHED |
+	                          PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+	                          "FwHWPerfCountersConfigBlock",
+	                          &psFwBlkConfigsMemDesc);
+	if (eError != PVRSRV_OK)
+		PVR_LOGR_IF_ERROR(eError, "DevmemFwAllocate");
+
+	RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.sBlockConfigs,
+	                      psFwBlkConfigsMemDesc, 0, 0);
+
+	eError = DevmemAcquireCpuVirtAddr(psFwBlkConfigsMemDesc, (void **)&psFwArray);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail1);
+	}
+
+	OSDeviceMemCopy(psFwArray, psBlockConfigs, sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen);
+	DevmemPDumpLoadMem(psFwBlkConfigsMemDesc,
+	                   0,
+	                   sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen,
+	                   PDUMP_FLAGS_CONTINUOUS);
+
+	/*PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigEnableHWPerfCountersKM parameters set, calling FW"));*/
+
+	/* Ask the FW to carry out the HWPerf configuration command
+	 */
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+	                            RGXFWIF_DM_GP, &sKccbCmd, 0, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOGG_IF_ERROR(eError, "RGXScheduleCommand", fail2);
+	}
+
+	/*PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigEnableHWPerfCountersKM command scheduled for FW"));*/
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOGG_IF_ERROR(eError, "RGXWaitForFWOp", fail2);
+	}
+
+	/* Release temporary memory used for block configuration
+	 */
+	RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc);
+	DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc);
+	DevmemFwFree(psDeviceNode->pvDevice, psFwBlkConfigsMemDesc);
+
+	/*PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigEnableHWPerfCountersKM firmware completed"));*/
+
+	PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks configured and ENABLED", ui32ArrayLen));
+
+	PVR_DPF_RETURN_OK;
+
+	fail2:
+	DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc);
+	fail1:
+	RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc);
+	DevmemFwFree(psDeviceNode->pvDevice, psFwBlkConfigsMemDesc);
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+
+/*
+	PVRSRVRGXConfigCustomCountersReadingHWPerfKM
+ */
+PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM(
+		CONNECTION_DATA             * psConnection,
+		PVRSRV_DEVICE_NODE          * psDeviceNode,
+		IMG_UINT16                    ui16CustomBlockID,
+		IMG_UINT16                    ui16NumCustomCounters,
+		IMG_UINT32                  * pui32CustomCounterIDs)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD	sKccbCmd;
+	DEVMEM_MEMDESC*		psFwSelectCntrsMemDesc = NULL;
+	IMG_UINT32*			psFwArray;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psDeviceNode);
+
+	PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVRGXSelectCustomCountersKM: configure block %u to read %u counters", ui16CustomBlockID, ui16NumCustomCounters));
+
+	/* Fill in the command structure with the parameters needed */
+	sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS;
+	sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.ui16NumCounters = ui16NumCustomCounters;
+	sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.ui16CustomBlock = ui16CustomBlockID;
+
+	if (ui16NumCustomCounters > 0)
+	{
+		PVR_ASSERT(pui32CustomCounterIDs);
+
+		eError = DevmemFwAllocate(psDeviceNode->pvDevice,
+		                          sizeof(IMG_UINT32) * ui16NumCustomCounters,
+		                          PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+		                          PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+		                          PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+		                          PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+		                          PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+		                          PVRSRV_MEMALLOCFLAG_UNCACHED |
+		                          PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+		                          "FwHWPerfConfigCustomCounters",
+		                          &psFwSelectCntrsMemDesc);
+		if (eError != PVRSRV_OK)
+			PVR_LOGR_IF_ERROR(eError, "DevmemFwAllocate");
+
+		RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.sCustomCounterIDs,
+		                      psFwSelectCntrsMemDesc, 0, 0);
+
+		eError = DevmemAcquireCpuVirtAddr(psFwSelectCntrsMemDesc, (void **)&psFwArray);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail1);
+		}
+
+		OSDeviceMemCopy(psFwArray, pui32CustomCounterIDs, sizeof(IMG_UINT32) * ui16NumCustomCounters);
+		DevmemPDumpLoadMem(psFwSelectCntrsMemDesc,
+		                   0,
+		                   sizeof(IMG_UINT32) * ui16NumCustomCounters,
+		                   PDUMP_FLAGS_CONTINUOUS);
+	}
+
+	/* Push in the KCCB the command to configure the custom counters block */
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+	                            RGXFWIF_DM_GP, &sKccbCmd, 0, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOGG_IF_ERROR(eError, "RGXScheduleCommand", fail2);
+	}
+	PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXSelectCustomCountersKM: Command scheduled"));
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOGG_IF_ERROR(eError, "RGXWaitForFWOp", fail2);
+	}
+	PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXSelectCustomCountersKM: FW operation completed"));
+
+	if (ui16NumCustomCounters > 0)
+	{
+		/* Release temporary memory used for block configuration */
+		RGXUnsetFirmwareAddress(psFwSelectCntrsMemDesc);
+		DevmemReleaseCpuVirtAddr(psFwSelectCntrsMemDesc);
+		DevmemFwFree(psDeviceNode->pvDevice, psFwSelectCntrsMemDesc);
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE, "HWPerf custom counters %u reading will be sent with the next HW events", ui16NumCustomCounters));
+
+	PVR_DPF_RETURN_OK;
+
+	fail2:
+	if (psFwSelectCntrsMemDesc) DevmemReleaseCpuVirtAddr(psFwSelectCntrsMemDesc);
+
+	fail1:
+	if (psFwSelectCntrsMemDesc)
+	{
+		RGXUnsetFirmwareAddress(psFwSelectCntrsMemDesc);
+		DevmemFwFree(psDeviceNode->pvDevice, psFwSelectCntrsMemDesc);
+	}
+
+	PVR_DPF_RETURN_RC(eError);
+}
+/*
+	PVRSRVRGXDisableHWPerfcountersKM
+ */
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfCountersKM(
+		CONNECTION_DATA             * psConnection,
+		PVRSRV_DEVICE_NODE          * psDeviceNode,
+		IMG_BOOL                      bEnable,
+		IMG_UINT32                    ui32ArrayLen,
+		IMG_UINT16                  * psBlockIDs)
+{
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sKccbCmd;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psDeviceNode);
+	PVR_ASSERT(ui32ArrayLen>0);
+	PVR_ASSERT(ui32ArrayLen<=RGXFWIF_HWPERF_CTRL_BLKS_MAX);
+	PVR_ASSERT(psBlockIDs);
+
+	/* Fill in the command structure with the parameters needed
+	 */
+	sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS;
+	sKccbCmd.uCmdData.sHWPerfCtrlBlks.bEnable = bEnable;
+	sKccbCmd.uCmdData.sHWPerfCtrlBlks.ui32NumBlocks = ui32ArrayLen;
+	OSDeviceMemCopy(sKccbCmd.uCmdData.sHWPerfCtrlBlks.aeBlockIDs, psBlockIDs, sizeof(IMG_UINT16)*ui32ArrayLen);
+
+	/* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfCountersKM parameters set, calling FW")); */
+
+	/* Ask the FW to carry out the HWPerf configuration command
+	 */
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+	                            RGXFWIF_DM_GP, &sKccbCmd, 0, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+		PVR_LOGR_IF_ERROR(eError, "RGXScheduleCommand");
+
+	/* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfCountersKM command scheduled for FW")); */
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+		PVR_LOGR_IF_ERROR(eError, "RGXWaitForFWOp");
+
+	/* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfCountersKM firmware completed")); */
+
+#if defined(DEBUG)
+	if (bEnable)
+		PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been ENABLED", ui32ArrayLen));
+	else
+		PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been DISABLED", ui32ArrayLen));
+#endif
+
+	PVR_DPF_RETURN_OK;
+}
+
+static INLINE IMG_UINT32 _RGXHWPerfFixBufferSize(IMG_UINT32 ui32BufSizeKB)
+{
+	if (ui32BufSizeKB > HWPERF_HOST_TL_STREAM_SIZE_MAX)
+	{
+		/* Size specified as a AppHint but it is too big */
+		PVR_DPF((PVR_DBG_WARNING,"RGXHWPerfHostInit: HWPerf Host buffer size "
+				"value (%u) too big, using maximum (%u)", ui32BufSizeKB,
+				HWPERF_HOST_TL_STREAM_SIZE_MAX));
+		return HWPERF_HOST_TL_STREAM_SIZE_MAX<<10;
+	}
+	else if (ui32BufSizeKB >= HWPERF_HOST_TL_STREAM_SIZE_MIN)
+	{
+		return ui32BufSizeKB<<10;
+	}
+	else if (ui32BufSizeKB > 0)
+	{
+		/* Size specified as a AppHint but it is too small */
+		PVR_DPF((PVR_DBG_WARNING,"RGXHWPerfHostInit: HWPerf Host buffer size "
+				"value (%u) too small, using minimum (%u)", ui32BufSizeKB,
+				HWPERF_HOST_TL_STREAM_SIZE_MIN));
+		return HWPERF_HOST_TL_STREAM_SIZE_MIN<<10;
+	}
+	else
+	{
+		/* 0 size implies AppHint not set or is set to zero,
+		 * use default size from driver constant. */
+		return HWPERF_HOST_TL_STREAM_SIZE_DEFAULT<<10;
+	}
+}
+
+/******************************************************************************
+ * RGX HW Performance Host Stream API
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfHostInit
+
+@Description    Called during driver init for initialisation of HWPerfHost
+                stream in the Rogue device driver. This function keeps allocated
+                only the minimal necessary resources, which are required for
+                functioning of HWPerf server module.
+
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB)
+{
+	PVRSRV_ERROR eError;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	PVR_ASSERT(psRgxDevInfo != NULL);
+
+	eError = OSLockCreate(&psRgxDevInfo->hLockHWPerfHostStream);
+	PVR_LOGG_IF_ERROR(eError, "OSLockCreate", error);
+
+	psRgxDevInfo->hHWPerfHostStream = NULL;
+	psRgxDevInfo->ui32HWPerfHostFilter = 0; /* disable all events */
+	psRgxDevInfo->ui32HWPerfHostNextOrdinal = 1;
+	psRgxDevInfo->ui32HWPerfHostBufSize = _RGXHWPerfFixBufferSize(ui32BufSizeKB);
+	psRgxDevInfo->pvHostHWPerfMISR = NULL;
+	psRgxDevInfo->pui8DeferredEvents = NULL;
+	/* First packet has ordinal=1, so LastOrdinal=0 will ensure ordering logic
+	 * is maintained */
+	psRgxDevInfo->ui32HWPerfHostLastOrdinal = 0;
+	psRgxDevInfo->hHWPerfHostSpinLock = NULL;
+
+	error:
+	return eError;
+}
+
+static void _HWPerfHostOnConnectCB(void *pvArg)
+{
+	PVRSRV_RGXDEV_INFO* psDevice;
+	PVRSRV_ERROR eError;
+
+	RGXSRV_HWPERF_CLK_SYNC(pvArg);
+
+	psDevice = (PVRSRV_RGXDEV_INFO*) pvArg;
+
+	/* Handle the case where the RGX_HWPERF_HOST_INFO bit is set in the event filter
+	 * before the host stream is opened for reading by a HWPerf client.
+	 * Which can result in the host periodic thread sleeping for a long duration as TLStreamIsOpenForReading may return false. */
+	if (psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO))
+	{
+		eError = PVRSRVCreateHWPerfHostThread(PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS);
+		PVR_LOG_IF_ERROR(eError, "PVRSRVCreateHWPerfHostThread");
+	}
+}
+
+/* Avoiding a holder struct using fields below, as a struct gets along padding,
+ * packing, and other compiler dependencies, and we want a continuous stream of
+ * bytes for (header+data) for use in TLStreamWrite. See
+ * _HWPerfHostDeferredEventsEmitter().
+ *
+ * A deferred (UFO) packet is represented in memory as:
+ *     - IMG_BOOL                 --> Indicates whether a packet write is
+ *                                    "complete" by atomic context or not.
+ *     - RGX_HWPERF_V2_PACKET_HDR --.
+ *                                  |--> Fed together to TLStreamWrite for
+ *                                  |    deferred packet to be written to
+ *                                  |    HWPerfHost buffer
+ *     - RGX_HWPERF_HOST_UFO_DATA---`
+ *
+ * PS: Currently only UFO events are supported in deferred list */
+#define HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE (sizeof(IMG_BOOL) +\
+		sizeof(RGX_HWPERF_V2_PACKET_HDR) +\
+		sizeof(RGX_HWPERF_HOST_UFO_DATA))
+
+static void RGX_MISRHandler_HWPerfPostDeferredHostEvents(void *pvData);
+static void _HWPerfHostDeferredEventsEmitter(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                             IMG_UINT32 ui32MaxOrdinal);
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfHostInitOnDemandResources
+
+@Description    This function allocates the HWPerfHost buffer if HWPerf is
+                enabled at driver load time. Otherwise, these buffers are
+                allocated on-demand as and when required.
+
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+	PVRSRV_ERROR eError;
+	IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 5]; /* 5 makes space up to "hwperf_host_9999" streams */
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */
+	if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d",
+	               PVRSRV_TL_HWPERF_HOST_SERVER_STREAM,
+	               psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to form HWPerf host stream name for device %d",
+				__func__,
+				psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eError = TLStreamCreate(&psRgxDevInfo->hHWPerfHostStream,
+	                        psRgxDevInfo->psDeviceNode,
+	                        pszHWPerfHostStreamName, psRgxDevInfo->ui32HWPerfHostBufSize,
+	                        TL_OPMODE_DROP_NEWER,
+	                        _HWPerfHostOnConnectCB, psRgxDevInfo,
+	                        NULL, NULL);
+	PVR_LOGR_IF_ERROR(eError, "TLStreamCreate");
+
+	eError = TLStreamSetNotifStream(psRgxDevInfo->hHWPerfHostStream,
+	                                PVRSRVGetPVRSRVData()->hTLCtrlStream);
+	/* we can still discover host stream so leave it as is and just log error */
+	PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream");
+
+	/* send the event here because host stream is implicitly opened for write
+	 * in TLStreamCreate and TLStreamOpen is never called (so the event is
+	 * never emitted) */
+	eError = TLStreamMarkStreamOpen(psRgxDevInfo->hHWPerfHostStream);
+	PVR_LOG_IF_ERROR(eError, "TLStreamMarkStreamOpen");
+
+	/* HWPerfHost deferred events specific initialization */
+	eError = OSInstallMISR(&psRgxDevInfo->pvHostHWPerfMISR,
+	                       RGX_MISRHandler_HWPerfPostDeferredHostEvents,
+	                       psRgxDevInfo,
+	                       "RGX_HWPerfDeferredEventPoster");
+	PVR_LOGG_IF_ERROR(eError, "OSInstallMISR", err_install_misr);
+
+	eError = OSSpinLockCreate(&psRgxDevInfo->hHWPerfHostSpinLock);
+	PVR_LOGG_IF_ERROR(eError, "OSSpinLockCreate", err_spinlock_create);
+
+	psRgxDevInfo->pui8DeferredEvents = OSAllocMem(HWPERF_HOST_MAX_DEFERRED_PACKETS
+	                                              * HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE);
+	if (NULL == psRgxDevInfo->pui8DeferredEvents)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: OUT OF MEMORY. Could not allocate memory for "\
+				"HWPerfHost deferred events array", __func__));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_alloc_deferred_events;
+	}
+	psRgxDevInfo->ui16DEReadIdx = 0;
+	psRgxDevInfo->ui16DEWriteIdx = 0;
+#if defined (PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+	psRgxDevInfo->ui32DEHighWatermark = 0;
+	psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark = 0;
+	psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark = 0;
+#endif
+
+	PVR_DPF((DBGPRIV_MESSAGE, "HWPerf Host buffer size is %uKB",
+			psRgxDevInfo->ui32HWPerfHostBufSize));
+
+	return PVRSRV_OK;
+
+	err_alloc_deferred_events:
+	OSSpinLockDestroy(psRgxDevInfo->hHWPerfHostSpinLock);
+	psRgxDevInfo->hHWPerfHostSpinLock = NULL;
+
+	err_spinlock_create:
+	(void) OSUninstallMISR(psRgxDevInfo->pvHostHWPerfMISR);
+	psRgxDevInfo->pvHostHWPerfMISR = NULL;
+
+	err_install_misr:
+	TLStreamMarkStreamClose(psRgxDevInfo->hHWPerfHostStream);
+	TLStreamClose(psRgxDevInfo->hHWPerfHostStream);
+	psRgxDevInfo->hHWPerfHostStream = NULL;
+
+	return eError;
+}
+
+void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+	PVR_ASSERT (psRgxDevInfo);
+
+	if (psRgxDevInfo->pui8DeferredEvents)
+	{
+		OSFreeMem(psRgxDevInfo->pui8DeferredEvents);
+		psRgxDevInfo->pui8DeferredEvents = NULL;
+	}
+
+	if (psRgxDevInfo->hHWPerfHostSpinLock)
+	{
+		OSSpinLockDestroy(psRgxDevInfo->hHWPerfHostSpinLock);
+		psRgxDevInfo->hHWPerfHostSpinLock = NULL;
+	}
+
+	if (psRgxDevInfo->pvHostHWPerfMISR)
+	{
+		(void) OSUninstallMISR(psRgxDevInfo->pvHostHWPerfMISR);
+		psRgxDevInfo->pvHostHWPerfMISR = NULL;
+	}
+
+	if (psRgxDevInfo->hHWPerfHostStream)
+	{
+		/* send the event here because host stream is implicitly opened for
+		 * write in TLStreamCreate and TLStreamClose is never called (so the
+		 * event is never emitted) */
+		TLStreamMarkStreamClose(psRgxDevInfo->hHWPerfHostStream);
+		TLStreamClose(psRgxDevInfo->hHWPerfHostStream);
+		psRgxDevInfo->hHWPerfHostStream = NULL;
+	}
+
+	if (psRgxDevInfo->hLockHWPerfHostStream)
+	{
+		OSLockDestroy(psRgxDevInfo->hLockHWPerfHostStream);
+		psRgxDevInfo->hLockHWPerfHostStream = NULL;
+	}
+}
+
+inline void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Filter)
+{
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+	psRgxDevInfo->ui32HWPerfHostFilter = ui32Filter;
+}
+
+inline IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_HOST_EVENT_TYPE eEvent)
+{
+	PVR_ASSERT(psRgxDevInfo);
+	return (psRgxDevInfo->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(eEvent)) ? IMG_TRUE : IMG_FALSE;
+}
+
+#define MAX_RETRY_COUNT 80
+static inline void _PostFunctionPrologue(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                         IMG_UINT32 ui32CurrentOrdinal)
+{
+	IMG_UINT32 ui32Retry = MAX_RETRY_COUNT;
+
+	PVR_ASSERT(psRgxDevInfo->hLockHWPerfHostStream != NULL);
+	PVR_ASSERT(psRgxDevInfo->hHWPerfHostStream != NULL);
+
+	OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream);
+
+	/* First, flush pending events (if any) */
+	_HWPerfHostDeferredEventsEmitter(psRgxDevInfo, ui32CurrentOrdinal);
+
+	while ((ui32CurrentOrdinal != psRgxDevInfo->ui32HWPerfHostLastOrdinal + 1)
+		   && (--ui32Retry != 0))
+	{
+		/* Release lock and give a chance to a waiting context to emit the
+		 * expected packet */
+		OSLockRelease (psRgxDevInfo->hLockHWPerfHostStream);
+		OSSleepms(100);
+		OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream);
+	}
+
+#if defined (PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+	if ((ui32Retry == 0) && !(psRgxDevInfo->bWarnedPktOrdinalBroke))
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s: Will warn only once! Potential packet(s) lost after ordinal"
+				 " %u (Current ordinal = %u)",
+				 __func__,
+				 psRgxDevInfo->ui32HWPerfHostLastOrdinal, ui32CurrentOrdinal));
+		psRgxDevInfo->bWarnedPktOrdinalBroke = IMG_TRUE;
+	}
+
+	if (psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark < (MAX_RETRY_COUNT - ui32Retry))
+	{
+		psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark = MAX_RETRY_COUNT - ui32Retry;
+	}
+#endif
+}
+
+static inline void _PostFunctionEpilogue(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                         IMG_UINT32 ui32CurrentOrdinal)
+{
+	/* update last ordinal emitted */
+	psRgxDevInfo->ui32HWPerfHostLastOrdinal = ui32CurrentOrdinal;
+
+	PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hLockHWPerfHostStream));
+	OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream);
+}
+
+static inline IMG_UINT8 *_ReserveHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Size)
+{
+	IMG_UINT8 *pui8Dest;
+
+	PVRSRV_ERROR eError = TLStreamReserve(psRgxDevInfo->hHWPerfHostStream,
+	                                      &pui8Dest, ui32Size);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not reserve space in %s buffer"
+				" (%d). Dropping packet.",
+				__func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError));
+		return NULL;
+	}
+	PVR_ASSERT(pui8Dest != NULL);
+
+	return pui8Dest;
+}
+
+static inline void _CommitHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Size)
+{
+	PVRSRV_ERROR eError = TLStreamCommit(psRgxDevInfo->hHWPerfHostStream,
+	                                     ui32Size);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not commit data to %s"
+				" (%d)", __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError));
+	}
+}
+
+/* Returns IMG_TRUE if packet write passes, IMG_FALSE otherwise */
+static inline IMG_BOOL _WriteHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                          RGX_HWPERF_V2_PACKET_HDR *psHeader)
+{
+	PVRSRV_ERROR eError = TLStreamWrite(psRgxDevInfo->hHWPerfHostStream,
+	                                    (IMG_UINT8*) psHeader, psHeader->ui32Size);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not write packet in %s buffer"
+				" (%d). Dropping packet.",
+				__func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError));
+	}
+
+	/* Regardless of whether write passed/failed, we consider it "written" */
+	psRgxDevInfo->ui32HWPerfHostLastOrdinal = psHeader->ui32Ordinal;
+
+	return (eError == PVRSRV_OK);
+}
+
+/* Helper macros for deferred events operations */
+#define GET_DE_NEXT_IDX(_curridx) ((_curridx + 1) % HWPERF_HOST_MAX_DEFERRED_PACKETS)
+#define GET_DE_EVENT_BASE(_idx)   (psRgxDevInfo->pui8DeferredEvents +\
+		_idx * HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE)
+
+#define GET_DE_EVENT_WRITE_STATUS(_base) ((IMG_BOOL*) _base)
+#define GET_DE_EVENT_DATA(_base)         (_base + sizeof(IMG_BOOL))
+
+/* Emits HWPerfHost event packets present in the deferred list stopping when one
+ * of the following cases is hit:
+ * case 1: Packet ordering breaks i.e. a packet found doesn't meet ordering
+ *         criteria (ordinal == last_ordinal + 1)
+ *
+ * case 2: A packet with ordinal > ui32MaxOrdinal is found
+ *
+ * case 3: Deferred list's (read == write) i.e. no more deferred packets.
+ *
+ * NOTE: Caller must possess the hLockHWPerfHostStream lock before calling
+ *       this function.*/
+static void _HWPerfHostDeferredEventsEmitter(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                             IMG_UINT32 ui32MaxOrdinal)
+{
+	RGX_HWPERF_V2_PACKET_HDR *psHeader;
+	IMG_UINT32 ui32Retry;
+	IMG_UINT8  *pui8DeferredEvent;
+	IMG_BOOL   *pbPacketWritten;
+	IMG_BOOL   bWritePassed;
+
+	PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hLockHWPerfHostStream));
+
+	while (psRgxDevInfo->ui16DEReadIdx != psRgxDevInfo->ui16DEWriteIdx)
+	{
+		pui8DeferredEvent = GET_DE_EVENT_BASE(psRgxDevInfo->ui16DEReadIdx);
+		pbPacketWritten   = GET_DE_EVENT_WRITE_STATUS(pui8DeferredEvent);
+		psHeader          = (RGX_HWPERF_V2_PACKET_HDR*) GET_DE_EVENT_DATA(pui8DeferredEvent);
+
+		for (ui32Retry = MAX_RETRY_COUNT; !(*pbPacketWritten) && (ui32Retry != 0); ui32Retry--)
+		{
+			/* Packet not yet written, re-check after a while. Wait for a short period as
+			 * atomic contexts are generally expected to finish fast */
+			OSWaitus(10);
+		}
+
+#if defined (PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+		if ((ui32Retry == 0) && !(psRgxDevInfo->bWarnedAtomicCtxPktLost))
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+					 "%s: Will warn only once. Dropping a deferred packet as atomic context"
+					 " took too long to write it",
+					 __func__));
+			psRgxDevInfo->bWarnedAtomicCtxPktLost = IMG_TRUE;
+		}
+
+		if (psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark < (MAX_RETRY_COUNT - ui32Retry))
+		{
+			psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark = MAX_RETRY_COUNT - ui32Retry;
+		}
+#endif
+
+		if (*pbPacketWritten)
+		{
+			if ((psHeader->ui32Ordinal > ui32MaxOrdinal) ||
+					(psHeader->ui32Ordinal != (psRgxDevInfo->ui32HWPerfHostLastOrdinal + 1)))
+			{
+				/* Leave remaining events to be emitted by next call to this function */
+				break;
+			}
+			bWritePassed = _WriteHWPerfStream(psRgxDevInfo, psHeader);
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "%s: Atomic context packet lost!", __func__));
+			bWritePassed = IMG_FALSE;
+		}
+
+		/* Move on to next packet */
+		psRgxDevInfo->ui16DEReadIdx = GET_DE_NEXT_IDX(psRgxDevInfo->ui16DEReadIdx);
+
+		if (!bWritePassed // if write failed
+				&& ui32MaxOrdinal == IMG_UINT32_MAX // and we are from MISR
+				&& psRgxDevInfo->ui16DEReadIdx != psRgxDevInfo->ui16DEWriteIdx) // and there are more events
+		{
+			/* Stop emitting here and re-schedule MISR */
+			OSScheduleMISR(psRgxDevInfo->pvHostHWPerfMISR);
+			break;
+		}
+	}
+}
+
+static void RGX_MISRHandler_HWPerfPostDeferredHostEvents(void *pvData)
+{
+	PVRSRV_RGXDEV_INFO *psRgxDevInfo = pvData;
+
+	OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream);
+
+	/* Since we're called from MISR, there is no upper cap of ordinal to be emitted.
+	 * Send IMG_UINT32_MAX to signify all possible packets. */
+	_HWPerfHostDeferredEventsEmitter(psRgxDevInfo, IMG_UINT32_MAX);
+
+	OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream);
+}
+
+#if defined (PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+static inline void _UpdateDEBufferHighWatermark(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+	IMG_UINT32 ui32DEWatermark;
+	IMG_UINT16 ui16LRead = psRgxDevInfo->ui16DEReadIdx;
+	IMG_UINT16 ui16LWrite = psRgxDevInfo->ui16DEWriteIdx;
+
+	if (ui16LWrite >= ui16LRead)
+	{
+		ui32DEWatermark = ui16LWrite - ui16LRead;
+	}
+	else
+	{
+		ui32DEWatermark = (HWPERF_HOST_MAX_DEFERRED_PACKETS - ui16LRead) + (ui16LWrite);
+	}
+
+	if (ui32DEWatermark > psRgxDevInfo->ui32DEHighWatermark)
+	{
+		psRgxDevInfo->ui32DEHighWatermark = ui32DEWatermark;
+	}
+}
+#endif
+
+/* @Description Gets the data/members that concerns the accuracy of a packet in HWPerfHost
+                buffer. Since the data returned by this function is	required in both, an
+				atomic as well as a process/sleepable context, it is protected under spinlock
+
+   @Ouput       pui32Ordinal Pointer to ordinal number assigned to this packet
+   @Output      pui64Timestamp Timestamp value for this packet
+   @Output      ppui8Dest If the current context cannot sleep, pointer to a place in
+                          deferred events buffer where the packet data should be written.
+						  Don't care, otherwise.
+ */
+static void _GetHWPerfHostPacketSpecifics(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                          IMG_UINT32 *pui32Ordinal,
+                                          IMG_UINT64 *pui64Timestamp,
+                                          IMG_UINT8 **ppui8Dest,
+                                          IMG_BOOL    bSleepAllowed)
+{
+	IMG_UINT64 ui64SpinLockFlags;
+
+	/* Spin lock is required to avoid getting scheduled out by a higher priority
+	 * context while we're getting header specific details and packet place in
+	 * HWPerf buffer (when in atomic context) for ourselves */
+	OSSpinLockAcquire(psRgxDevInfo->hHWPerfHostSpinLock, &ui64SpinLockFlags);
+
+	*pui32Ordinal = psRgxDevInfo->ui32HWPerfHostNextOrdinal++;
+	*pui64Timestamp = RGXTimeCorrGetClockus64();
+
+	if (!bSleepAllowed)
+	{
+		/* We're in an atomic context. So return the next position available in
+		 * deferred events buffer */
+		IMG_UINT16 ui16NewWriteIdx;
+		IMG_BOOL *pbPacketWritten;
+
+		PVR_ASSERT(ppui8Dest != NULL);
+
+		ui16NewWriteIdx = GET_DE_NEXT_IDX(psRgxDevInfo->ui16DEWriteIdx);
+		if (ui16NewWriteIdx == psRgxDevInfo->ui16DEReadIdx)
+		{
+			/* This shouldn't happen. HWPERF_HOST_MAX_DEFERRED_PACKETS should be
+			 * big enough to avoid any such scenario */
+#if defined (PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+			/* PVR_LOG/printk isn't recommended in atomic context. Perhaps we'll do
+			 * this debug output here when trace_printk support is added to DDK */
+			//			PVR_LOG(("%s: No more space in deferred events buffer (%u/%u) W=%u,R=%u",
+			//                     __func__, psRgxDevInfo->ui32DEHighWatermark,
+			//					 HWPERF_HOST_MAX_DEFERRED_PACKETS, psRgxDevInfo->ui16DEWriteIdx,
+			//					 psRgxDevInfo->ui16DEReadIdx));
+#endif
+			*ppui8Dest = NULL;
+		}
+		else
+		{
+			/* Return the position where deferred event would be written */
+			*ppui8Dest = GET_DE_EVENT_BASE(psRgxDevInfo->ui16DEWriteIdx);
+
+			/* Make sure packet write "state" is "write-pending" _before_ moving write
+			 * pointer forward */
+			pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(*ppui8Dest);
+			*pbPacketWritten = IMG_FALSE;
+
+			psRgxDevInfo->ui16DEWriteIdx = ui16NewWriteIdx;
+
+#if defined (PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+			_UpdateDEBufferHighWatermark(psRgxDevInfo);
+#endif
+		}
+	}
+
+	OSSpinLockRelease(psRgxDevInfo->hHWPerfHostSpinLock, ui64SpinLockFlags);
+}
+
+static inline void _SetupHostPacketHeader(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                          IMG_UINT8 *pui8Dest,
+                                          RGX_HWPERF_HOST_EVENT_TYPE eEvType,
+                                          IMG_UINT32 ui32Size,
+                                          IMG_UINT32 ui32Ordinal,
+                                          IMG_UINT64 ui64Timestamp)
+{
+	RGX_HWPERF_V2_PACKET_HDR *psHeader = (RGX_HWPERF_V2_PACKET_HDR *) pui8Dest;
+
+	PVR_ASSERT(ui32Size<=RGX_HWPERF_MAX_PACKET_SIZE);
+
+	psHeader->ui32Ordinal = ui32Ordinal;
+	psHeader->ui64Timestamp = ui64Timestamp;
+	psHeader->ui32Sig = HWPERF_PACKET_V2B_SIG;
+	psHeader->eTypeId = RGX_HWPERF_MAKE_TYPEID(RGX_HWPERF_STREAM_ID1_HOST,
+	                                           eEvType, 0, 0);
+	psHeader->ui32Size = ui32Size;
+}
+
+static inline void _SetupHostEnqPacketData(IMG_UINT8 *pui8Dest,
+                                           RGX_HWPERF_KICK_TYPE eEnqType,
+                                           IMG_UINT32 ui32Pid,
+                                           IMG_UINT32 ui32FWDMContext,
+                                           IMG_UINT32 ui32ExtJobRef,
+                                           IMG_UINT32 ui32IntJobRef,
+                                           PVRSRV_FENCE hCheckFence,
+                                           PVRSRV_FENCE hUpdateFence,
+                                           PVRSRV_TIMELINE hUpdateTimeline,
+                                           IMG_UINT64 ui64CheckFenceUID,
+                                           IMG_UINT64 ui64UpdateFenceUID,
+                                           IMG_UINT64 ui64DeadlineInus,
+                                           IMG_UINT64 ui64CycleEstimate)
+{
+	RGX_HWPERF_HOST_ENQ_DATA *psData = (RGX_HWPERF_HOST_ENQ_DATA *)
+	        		(pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+	psData->ui32EnqType = eEnqType;
+	psData->ui32PID = ui32Pid;
+	psData->ui32ExtJobRef = ui32ExtJobRef;
+	psData->ui32IntJobRef = ui32IntJobRef;
+	psData->ui32DMContext = ui32FWDMContext;
+	psData->hCheckFence = hCheckFence;
+	psData->hUpdateFence = hUpdateFence;
+	psData->hUpdateTimeline = hUpdateTimeline;
+	psData->ui64CheckFence_UID = ui64CheckFenceUID;
+	psData->ui64UpdateFence_UID = ui64UpdateFenceUID;
+	psData->ui64DeadlineInus = ui64DeadlineInus;
+	psData->ui64CycleEstimate = ui64CycleEstimate;
+}
+
+void RGXHWPerfHostPostEnqEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                               RGX_HWPERF_KICK_TYPE eEnqType,
+                               IMG_UINT32 ui32Pid,
+                               IMG_UINT32 ui32FWDMContext,
+                               IMG_UINT32 ui32ExtJobRef,
+                               IMG_UINT32 ui32IntJobRef,
+                               PVRSRV_FENCE hCheckFence,
+                               PVRSRV_FENCE hUpdateFence,
+                               PVRSRV_TIMELINE hUpdateTimeline,
+                               IMG_UINT64 ui64CheckFenceUID,
+                               IMG_UINT64 ui64UpdateFenceUID,
+                               IMG_UINT64 ui64DeadlineInus,
+                               IMG_UINT64 ui64CycleEstimate )
+{
+	IMG_UINT8 *pui8Dest;
+	IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_ENQ_DATA);
+	IMG_UINT32 ui32Ordinal;
+	IMG_UINT64 ui64Timestamp;
+
+	_GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+	                              NULL, IMG_TRUE);
+
+	_PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+	if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+	{
+		goto cleanup;
+	}
+
+	_SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ENQ, ui32Size,
+	                       ui32Ordinal, ui64Timestamp);
+	_SetupHostEnqPacketData(pui8Dest,
+	                        eEnqType,
+	                        ui32Pid,
+	                        ui32FWDMContext,
+	                        ui32ExtJobRef,
+	                        ui32IntJobRef,
+	                        hCheckFence,
+	                        hUpdateFence,
+	                        hUpdateTimeline,
+	                        ui64CheckFenceUID,
+	                        ui64UpdateFenceUID,
+	                        ui64DeadlineInus,
+	                        ui64CycleEstimate);
+
+	_CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+	cleanup:
+	_PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+}
+
+static inline IMG_UINT32 _CalculateHostUfoPacketSize(RGX_HWPERF_UFO_EV eUfoType)
+{
+	IMG_UINT32 ui32Size =
+			(IMG_UINT32) offsetof(RGX_HWPERF_HOST_UFO_DATA, aui32StreamData);
+	RGX_HWPERF_UFO_DATA_ELEMENT *puData;
+
+	switch (eUfoType)
+	{
+		case RGX_HWPERF_UFO_EV_CHECK_SUCCESS:
+		case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS:
+			ui32Size += sizeof(puData->sCheckSuccess);
+			break;
+		case RGX_HWPERF_UFO_EV_CHECK_FAIL:
+		case RGX_HWPERF_UFO_EV_PRCHECK_FAIL:
+			ui32Size += sizeof(puData->sCheckFail);
+			break;
+		case RGX_HWPERF_UFO_EV_UPDATE:
+			ui32Size += sizeof(puData->sUpdate);
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO"
+					" event type"));
+			PVR_ASSERT(IMG_FALSE);
+			break;
+	}
+
+	return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+static inline void _SetupHostUfoPacketData(IMG_UINT8 *pui8Dest,
+                                           RGX_HWPERF_UFO_EV eUfoType,
+                                           RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData)
+{
+	RGX_HWPERF_HOST_UFO_DATA *psData = (RGX_HWPERF_HOST_UFO_DATA *)
+	        		(pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+	RGX_HWPERF_UFO_DATA_ELEMENT *puData = (RGX_HWPERF_UFO_DATA_ELEMENT *)
+	        		 psData->aui32StreamData;
+
+	psData->eEvType = eUfoType;
+	/* HWPerfHost always emits 1 UFO at a time, since each UFO has 1-to-1 mapping
+	 * with an underlying DevNode, and each DevNode has a dedicated HWPerf buffer */
+	psData->ui32StreamInfo = RGX_HWPERF_MAKE_UFOPKTINFO(1,
+	                                                    offsetof(RGX_HWPERF_HOST_UFO_DATA, aui32StreamData));
+
+	switch (eUfoType)
+	{
+		case RGX_HWPERF_UFO_EV_CHECK_SUCCESS:
+		case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS:
+			puData->sCheckSuccess.ui32FWAddr =
+					psUFOData->sCheckSuccess.ui32FWAddr;
+			puData->sCheckSuccess.ui32Value =
+					psUFOData->sCheckSuccess.ui32Value;
+
+			puData = (RGX_HWPERF_UFO_DATA_ELEMENT *)
+							(((IMG_BYTE *) puData) + sizeof(puData->sCheckSuccess));
+			break;
+		case RGX_HWPERF_UFO_EV_CHECK_FAIL:
+		case RGX_HWPERF_UFO_EV_PRCHECK_FAIL:
+			puData->sCheckFail.ui32FWAddr =
+					psUFOData->sCheckFail.ui32FWAddr;
+			puData->sCheckFail.ui32Value =
+					psUFOData->sCheckFail.ui32Value;
+			puData->sCheckFail.ui32Required =
+					psUFOData->sCheckFail.ui32Required;
+
+			puData = (RGX_HWPERF_UFO_DATA_ELEMENT *)
+			        		(((IMG_BYTE *) puData) + sizeof(puData->sCheckFail));
+			break;
+		case RGX_HWPERF_UFO_EV_UPDATE:
+			puData->sUpdate.ui32FWAddr =
+					psUFOData->sUpdate.ui32FWAddr;
+			puData->sUpdate.ui32OldValue =
+					psUFOData->sUpdate.ui32OldValue;
+			puData->sUpdate.ui32NewValue =
+					psUFOData->sUpdate.ui32NewValue;
+
+			puData = (RGX_HWPERF_UFO_DATA_ELEMENT *)
+			        		(((IMG_BYTE *) puData) + sizeof(puData->sUpdate));
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO"
+					" event type"));
+			PVR_ASSERT(IMG_FALSE);
+			break;
+	}
+}
+
+void RGXHWPerfHostPostUfoEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                               RGX_HWPERF_UFO_EV eUfoType,
+                               RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData,
+                               const IMG_BOOL bSleepAllowed)
+{
+	IMG_UINT8 *pui8Dest;
+	IMG_UINT32 ui32Size = _CalculateHostUfoPacketSize(eUfoType);
+	IMG_UINT32 ui32Ordinal;
+	IMG_UINT64 ui64Timestamp;
+	IMG_BOOL   *pbPacketWritten = NULL;
+
+	_GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+	                              &pui8Dest, bSleepAllowed);
+
+	if (bSleepAllowed)
+	{
+		_PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+		if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+		{
+			goto cleanup;
+		}
+	}
+	else
+	{
+		if (pui8Dest == NULL)
+		{
+			// Give-up if we couldn't get a place in deferred events buffer
+			goto cleanup;
+		}
+		pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(pui8Dest);
+		pui8Dest = GET_DE_EVENT_DATA(pui8Dest);
+	}
+
+	_SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_UFO, ui32Size,
+	                       ui32Ordinal, ui64Timestamp);
+	_SetupHostUfoPacketData(pui8Dest, eUfoType, psUFOData);
+
+	if (bSleepAllowed)
+	{
+		_CommitHWPerfStream(psRgxDevInfo, ui32Size);
+	}
+	else
+	{
+		*pbPacketWritten = IMG_TRUE;
+		OSScheduleMISR(psRgxDevInfo->pvHostHWPerfMISR);
+	}
+
+	cleanup:
+	if (bSleepAllowed)
+	{
+		_PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+	}
+}
+
+#define UNKNOWN_SYNC_NAME "UnknownSync"
+
+static_assert(PVRSRV_SYNC_NAME_LENGTH==SYNC_MAX_CLASS_NAME_LEN, "Sync class name max does not match Fence Sync name max");
+
+static inline IMG_UINT32 _FixNameAndCalculateHostAllocPacketSize(
+		RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+		const IMG_CHAR **ppsName,
+		IMG_UINT32 *ui32NameSize)
+{
+	RGX_HWPERF_HOST_ALLOC_DATA *psData;
+	IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_ALLOC_DATA, uAllocDetail);
+
+	if (*ppsName != NULL && *ui32NameSize > 0)
+	{
+		/* if string longer than maximum cut it (leave space for '\0') */
+				if (*ui32NameSize >= SYNC_MAX_CLASS_NAME_LEN)
+					*ui32NameSize = SYNC_MAX_CLASS_NAME_LEN;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfHostPostAllocEvent: Invalid"
+				" resource name given."));
+		*ppsName = UNKNOWN_SYNC_NAME;
+		*ui32NameSize = sizeof(UNKNOWN_SYNC_NAME);
+	}
+
+	switch (eAllocType)
+	{
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC:
+			ui32Size += sizeof(psData->uAllocDetail.sSyncAlloc) - SYNC_MAX_CLASS_NAME_LEN +
+			*ui32NameSize;
+			break;
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+			ui32Size += sizeof(psData->uAllocDetail.sFenceAlloc) - PVRSRV_SYNC_NAME_LENGTH +
+			*ui32NameSize;
+			break;
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW:
+			ui32Size += sizeof(psData->uAllocDetail.sSWFenceAlloc) - PVRSRV_SYNC_NAME_LENGTH +
+			*ui32NameSize;
+			break;
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP:
+			ui32Size += sizeof(psData->uAllocDetail.sSyncCheckPointAlloc) - PVRSRV_SYNC_NAME_LENGTH +
+			*ui32NameSize;
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR,
+					"RGXHWPerfHostPostAllocEvent: Invalid alloc event type"));
+			PVR_ASSERT(IMG_FALSE);
+			break;
+	}
+
+	return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+static inline void _SetupHostAllocPacketData(IMG_UINT8 *pui8Dest,
+                                             RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+                                             RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail,
+                                             const IMG_CHAR *psName,
+                                             IMG_UINT32 ui32NameSize)
+{
+	RGX_HWPERF_HOST_ALLOC_DATA *psData = (RGX_HWPERF_HOST_ALLOC_DATA *)
+	        		(pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+
+	IMG_CHAR *acName = NULL;
+
+	psData->ui32AllocType = eAllocType;
+
+	switch (eAllocType)
+	{
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC:
+			psData->uAllocDetail.sSyncAlloc = puAllocDetail->sSyncAlloc;
+			acName = psData->uAllocDetail.sSyncAlloc.acName;
+			break;
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+			psData->uAllocDetail.sFenceAlloc = puAllocDetail->sFenceAlloc;
+			acName = psData->uAllocDetail.sFenceAlloc.acName;
+			break;
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW:
+			psData->uAllocDetail.sSWFenceAlloc = puAllocDetail->sSWFenceAlloc;
+			acName = psData->uAllocDetail.sSWFenceAlloc.acName;
+			break;
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP:
+			psData->uAllocDetail.sSyncCheckPointAlloc = puAllocDetail->sSyncCheckPointAlloc;
+			acName = psData->uAllocDetail.sSyncCheckPointAlloc.acName;
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR,
+					"RGXHWPerfHostPostAllocEvent: Invalid alloc event type"));
+			PVR_ASSERT(IMG_FALSE);
+	}
+
+
+	if (acName != NULL)
+	{
+		if (ui32NameSize)
+		{
+			OSStringLCopy(acName, psName, ui32NameSize);
+		}
+		else
+		{
+			/* In case no name was given make sure we don't access random
+			 * memory */
+			acName[0] = '\0';
+		}
+	}
+}
+
+void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO* psRgxDevInfo,
+                                 RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+                                 const IMG_CHAR *psName,
+                                 IMG_UINT32 ui32NameSize,
+                                 RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail)
+{
+	IMG_UINT8 *pui8Dest;
+	IMG_UINT64 ui64Timestamp;
+	IMG_UINT32 ui32Ordinal;
+	IMG_UINT32 ui32Size = _FixNameAndCalculateHostAllocPacketSize(eAllocType,
+	                                                              &psName,
+	                                                              &ui32NameSize);
+
+	_GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+	                              NULL, IMG_TRUE);
+
+	_PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+	if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+	{
+		goto cleanup;
+	}
+
+	_SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ALLOC, ui32Size,
+	                       ui32Ordinal, ui64Timestamp);
+
+	_SetupHostAllocPacketData(pui8Dest,
+	                          eAllocType,
+	                          puAllocDetail,
+	                          psName,
+	                          ui32NameSize);
+
+	_CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+	cleanup:
+	_PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+}
+
+static inline void _SetupHostFreePacketData(IMG_UINT8 *pui8Dest,
+                                            RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType,
+                                            IMG_UINT64 ui64UID,
+                                            IMG_UINT32 ui32PID,
+                                            IMG_UINT32 ui32FWAddr)
+{
+	RGX_HWPERF_HOST_FREE_DATA *psData = (RGX_HWPERF_HOST_FREE_DATA *)
+	        		(pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+
+	psData->ui32FreeType = eFreeType;
+
+	switch (eFreeType)
+	{
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC:
+			psData->uFreeDetail.sSyncFree.ui32FWAddr = ui32FWAddr;
+			break;
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+			psData->uFreeDetail.sFenceDestroy.ui64Fence_UID = ui64UID;
+			break;
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP:
+			psData->uFreeDetail.sSyncCheckPointFree.ui32CheckPt_FWAddr = ui32FWAddr;
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR,
+					"RGXHWPerfHostPostFreeEvent: Invalid free event type"));
+			PVR_ASSERT(IMG_FALSE);
+	}
+}
+
+void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType,
+                                IMG_UINT64 ui64UID,
+                                IMG_UINT32 ui32PID,
+                                IMG_UINT32 ui32FWAddr)
+{
+	IMG_UINT8 *pui8Dest;
+	IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_FREE_DATA);
+	IMG_UINT32 ui32Ordinal;
+	IMG_UINT64 ui64Timestamp;
+
+	_GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+	                              NULL, IMG_TRUE);
+	_PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+	if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+	{
+		goto cleanup;
+	}
+
+	_SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_FREE, ui32Size,
+	                       ui32Ordinal, ui64Timestamp);
+	_SetupHostFreePacketData(pui8Dest,
+	                         eFreeType,
+	                         ui64UID,
+	                         ui32PID,
+	                         ui32FWAddr);
+
+	_CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+	cleanup:
+	_PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+}
+
+static inline IMG_UINT32 _FixNameAndCalculateHostModifyPacketSize(
+		RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType,
+		const IMG_CHAR **ppsName,
+		IMG_UINT32 *ui32NameSize)
+{
+	RGX_HWPERF_HOST_MODIFY_DATA *psData;
+	RGX_HWPERF_HOST_MODIFY_DETAIL *puData;
+	IMG_UINT32 ui32Size = sizeof(psData->ui32ModifyType);
+
+	if (*ppsName != NULL && *ui32NameSize > 0)
+	{
+		/* first strip the terminator */
+		if ((*ppsName)[*ui32NameSize - 1] == '\0')
+			*ui32NameSize -= 1;
+		/* if string longer than maximum cut it (leave space for '\0') */
+		if (*ui32NameSize >= PVRSRV_SYNC_NAME_LENGTH)
+			*ui32NameSize = PVRSRV_SYNC_NAME_LENGTH - 1;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfHostPostModifyEvent: Invalid"
+				" resource name given."));
+		*ppsName = UNKNOWN_SYNC_NAME;
+		*ui32NameSize = sizeof(UNKNOWN_SYNC_NAME) - 1;
+	}
+
+	switch (eModifyType)
+	{
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+			ui32Size += sizeof(puData->sFenceMerge) - PVRSRV_SYNC_NAME_LENGTH +
+			*ui32NameSize + 1; /* +1 for '\0' */
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR,
+					"RGXHWPerfHostPostModifyEvent: Invalid modify event type"));
+			PVR_ASSERT(IMG_FALSE);
+			break;
+	}
+
+	return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+static inline void _SetupHostModifyPacketData(IMG_UINT8 *pui8Dest,
+                                              RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType,
+                                              IMG_UINT64 ui64NewUID,
+                                              IMG_UINT64 ui64UID1,
+                                              IMG_UINT64 ui64UID2,
+                                              const IMG_CHAR *psName,
+                                              IMG_UINT32 ui32NameSize)
+{
+	RGX_HWPERF_HOST_MODIFY_DATA *psData = (RGX_HWPERF_HOST_MODIFY_DATA *)
+	        		(pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+
+	IMG_CHAR *acName = NULL;
+
+	psData->ui32ModifyType = eModifyType;
+
+	switch (eModifyType)
+	{
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+			psData->uModifyDetail.sFenceMerge.ui64NewFence_UID = ui64NewUID;
+			psData->uModifyDetail.sFenceMerge.ui64InFence1_UID = ui64UID1;
+			psData->uModifyDetail.sFenceMerge.ui64InFence2_UID = ui64UID2;
+			acName = psData->uModifyDetail.sFenceMerge.acName;
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR,
+					"RGXHWPerfHostPostModifyEvent: Invalid modify event type"));
+			PVR_ASSERT(IMG_FALSE);
+	}
+
+	if (acName != NULL)
+	{
+		if (ui32NameSize)
+		{
+			OSStringLCopy(acName, psName, ui32NameSize);
+		}
+		else
+		{
+			/* In case no name was given make sure we don't access random
+			 * memory */
+			acName[0] = '\0';
+		}
+	}
+}
+
+void RGXHWPerfHostPostModifyEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                  RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType,
+                                  IMG_UINT64 ui64NewUID,
+                                  IMG_UINT64 ui64UID1,
+                                  IMG_UINT64 ui64UID2,
+                                  const IMG_CHAR *psName,
+                                  IMG_UINT32 ui32NameSize)
+{
+	IMG_UINT8 *pui8Dest;
+	IMG_UINT64 ui64Timestamp;
+	IMG_UINT32 ui32Ordinal;
+	IMG_UINT32 ui32Size = _FixNameAndCalculateHostModifyPacketSize(eModifyType,
+	                                                               &psName,
+	                                                               &ui32NameSize);
+
+	_GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+	                              NULL, IMG_TRUE);
+	_PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+	if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+	{
+		goto cleanup;
+	}
+
+	_SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_MODIFY, ui32Size,
+	                       ui32Ordinal, ui64Timestamp);
+	_SetupHostModifyPacketData(pui8Dest,
+	                           eModifyType,
+	                           ui64NewUID,
+	                           ui64UID1,
+	                           ui64UID2,
+	                           psName,
+	                           ui32NameSize);
+
+	_CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+	cleanup:
+	_PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+}
+
+static inline void _SetupHostClkSyncPacketData(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT8 *pui8Dest)
+{
+	RGX_HWPERF_HOST_CLK_SYNC_DATA *psData = (RGX_HWPERF_HOST_CLK_SYNC_DATA *)
+	        		(pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+	RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psRgxDevInfo->psRGXFWIfGpuUtilFWCb;
+	IMG_UINT32 ui32CurrIdx =
+			RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFWCB->ui32TimeCorrSeqCount);
+	RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32CurrIdx];
+
+	psData->ui64CRTimestamp = psTimeCorr->ui64CRTimeStamp;
+	psData->ui64OSTimestamp = psTimeCorr->ui64OSTimeStamp;
+	psData->ui32ClockSpeed = psTimeCorr->ui32CoreClockSpeed;
+}
+
+void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+	IMG_UINT8 *pui8Dest;
+	IMG_UINT32 ui32Size =
+			RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_CLK_SYNC_DATA);
+	IMG_UINT32 ui32Ordinal;
+	IMG_UINT64 ui64Timestamp;
+
+	_GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+	                              NULL, IMG_TRUE);
+	_PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+	if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+	{
+		goto cleanup;
+	}
+
+	_SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_CLK_SYNC, ui32Size,
+	                       ui32Ordinal, ui64Timestamp);
+	_SetupHostClkSyncPacketData(psRgxDevInfo, pui8Dest);
+
+	_CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+	cleanup:
+	_PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+}
+
+static inline RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS _ConvDeviceHealthStatus(PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus)
+{
+	switch (eDeviceHealthStatus)
+	{
+		case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: 		return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED;
+		case PVRSRV_DEVICE_HEALTH_STATUS_OK: 				return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK;
+		case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: 	return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_RESPONDING;
+		case PVRSRV_DEVICE_HEALTH_STATUS_DEAD:				return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD;
+		case PVRSRV_DEVICE_HEALTH_STATUS_FAULT:				return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT;
+		default: 											return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED;
+	}
+}
+
+static inline RGX_HWPERF_HOST_DEVICE_HEALTH_REASON _ConvDeviceHealthReason(PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason)
+{
+	switch (eDeviceHealthReason)
+	{
+		case PVRSRV_DEVICE_HEALTH_REASON_NONE: 			return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE;
+		case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: 		return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED;
+		case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: 	return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING;
+		case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: 		return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS;
+		case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT;
+		case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED;
+		case PVRSRV_DEVICE_HEALTH_REASON_IDLING: 		return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING;
+		case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: 	return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING;
+		default: 										return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED;
+	}
+}
+
+static inline void _SetupHostDeviceInfoPacketData(RGX_HWPERF_DEV_INFO_EV eEvType,
+												  PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus,
+												  PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason,
+												  IMG_UINT8 *pui8Dest)
+{
+	RGX_HWPERF_HOST_DEV_INFO_DATA *psData = (RGX_HWPERF_HOST_DEV_INFO_DATA *)(pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+	psData->eEvType = eEvType;
+
+	switch (eEvType)
+	{
+		case RGX_HWPERF_DEV_INFO_EV_HEALTH:
+			psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthStatus = _ConvDeviceHealthStatus(eDeviceHealthStatus);
+			psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthReason = _ConvDeviceHealthReason(eDeviceHealthReason);
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostDeviceInfo: Invalid event type"));
+			PVR_ASSERT(IMG_FALSE);
+			break;
+	}
+}
+
+static inline IMG_UINT32 _CalculateHostDeviceInfoPacketSize(RGX_HWPERF_DEV_INFO_EV eEvType)
+{
+	IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_DEV_INFO_DATA, uDevInfoDetail);
+
+	switch (eEvType)
+	{
+		case RGX_HWPERF_DEV_INFO_EV_HEALTH:
+			ui32Size += sizeof(((RGX_HWPERF_HOST_DEV_INFO_DATA*)0)->uDevInfoDetail.sDeviceStatus);
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostDeviceInfo: Invalid event type"));
+			PVR_ASSERT(IMG_FALSE);
+			break;
+	}
+	return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+								 RGX_HWPERF_DEV_INFO_EV eEvType,
+								 PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus,
+								 PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason)
+{
+	IMG_UINT8 *pui8Dest;
+	IMG_UINT32 ui32Ordinal;
+	IMG_UINT64 ui64Timestamp;
+	IMG_UINT32 ui32Size;
+
+	OSLockAcquire(psRgxDevInfo->hHWPerfLock);
+
+	if (psRgxDevInfo->hHWPerfHostStream != (IMG_HANDLE) NULL)
+	{
+		_GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE);
+		_PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+		ui32Size = _CalculateHostDeviceInfoPacketSize(eEvType);
+
+		if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) != NULL)
+		{
+			_SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_DEV_INFO, ui32Size, ui32Ordinal, ui64Timestamp);
+			_SetupHostDeviceInfoPacketData(eEvType, eDeviceHealthStatus, eDeviceHealthReason, pui8Dest);
+			_CommitHWPerfStream(psRgxDevInfo, ui32Size);
+		}
+
+		_PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+	}
+
+	OSLockRelease(psRgxDevInfo->hHWPerfLock);
+}
+
+static inline void _SetupHostInfoPacketData(RGX_HWPERF_INFO_EV eEvType,
+												  IMG_UINT32 ui32TotalMemoryUsage,
+												  IMG_UINT32 ui32LivePids,
+												  PVRSRV_PER_PROCESS_MEM_USAGE *psPerProcessMemUsage,
+												  IMG_UINT8 *pui8Dest)
+{
+	IMG_INT i;
+	RGX_HWPERF_HOST_INFO_DATA *psData = (RGX_HWPERF_HOST_INFO_DATA *)(pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+	psData->eEvType = eEvType;
+
+	switch (eEvType)
+	{
+		case RGX_HWPERF_INFO_EV_MEM_USAGE:
+			psData->uInfoDetail.sMemUsageStats.ui32TotalMemoryUsage = ui32TotalMemoryUsage;
+
+			if (psPerProcessMemUsage)
+			{
+				for (i = 0; i < ui32LivePids; ++i)
+				{
+					psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32Pid = psPerProcessMemUsage[i].ui32Pid;
+					psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32KernelMemUsage = psPerProcessMemUsage[i].ui32KernelMemUsage;
+					psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32GraphicsMemUsage = psPerProcessMemUsage[i].ui32GraphicsMemUsage;
+				}
+			}
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostInfo: Invalid event type"));
+			PVR_ASSERT(IMG_FALSE);
+			break;
+	}
+}
+
+static inline IMG_UINT32 _CalculateHostInfoPacketSize(RGX_HWPERF_INFO_EV eEvType,
+															IMG_UINT32 *pui32TotalMemoryUsage,
+															IMG_UINT32 *pui32LivePids,
+															PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsage)
+{
+	IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_INFO_DATA, uInfoDetail);
+
+	switch (eEvType)
+	{
+		case RGX_HWPERF_INFO_EV_MEM_USAGE:
+#if !defined(__QNXNTO__)
+			if (PVRSRVGetProcessMemUsage(pui32TotalMemoryUsage, pui32LivePids, ppsPerProcessMemUsage) == PVRSRV_OK)
+			{
+				ui32Size += ((offsetof(RGX_HWPERF_HOST_INFO_DATA, uInfoDetail.sMemUsageStats.ui32TotalMemoryUsage) - ui32Size)
+					+ ((*pui32LivePids) * sizeof(((RGX_HWPERF_HOST_INFO_DATA*)0)->uInfoDetail.sMemUsageStats.sPerProcessUsage)));
+			}
+#else
+			PVR_DPF((PVR_DBG_ERROR, "This functionality is not yet implemented for this platform"));
+#endif
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostInfo: Invalid event type"));
+			PVR_ASSERT(IMG_FALSE);
+			break;
+	}
+	return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+void RGXHWPerfHostPostInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+								 RGX_HWPERF_INFO_EV eEvType)
+{
+	IMG_UINT8 *pui8Dest;
+	IMG_UINT32 ui32Size;
+	IMG_UINT32 ui32Ordinal;
+	IMG_UINT64 ui64Timestamp;
+	IMG_UINT32 ui32TotalMemoryUsage = 0;
+	PVRSRV_PER_PROCESS_MEM_USAGE *psPerProcessMemUsage = NULL;
+	IMG_UINT32 ui32LivePids = 0;
+
+	OSLockAcquire(psRgxDevInfo->hHWPerfLock);
+
+	if (psRgxDevInfo->hHWPerfHostStream != (IMG_HANDLE) NULL)
+	{
+		_GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE);
+		_PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+		ui32Size = _CalculateHostInfoPacketSize(eEvType, &ui32TotalMemoryUsage, &ui32LivePids, &psPerProcessMemUsage);
+
+		if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) != NULL)
+		{
+			_SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_INFO, ui32Size, ui32Ordinal, ui64Timestamp);
+			_SetupHostInfoPacketData(eEvType, ui32TotalMemoryUsage, ui32LivePids, psPerProcessMemUsage, pui8Dest);
+			_CommitHWPerfStream(psRgxDevInfo, ui32Size);
+		}
+
+		_PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+
+		if (psPerProcessMemUsage)
+			OSFreeMemNoStats(psPerProcessMemUsage); // psPerProcessMemUsage was allocated with OSAllocZMemNoStats
+	}
+
+	OSLockRelease(psRgxDevInfo->hHWPerfLock);
+}
+
+static inline IMG_UINT32
+_CalculateHostFenceWaitPacketSize(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eWaitType)
+{
+	RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *psSizeCalculator;
+	IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA, uDetail);
+
+	switch (eWaitType)
+	{
+		case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN:
+			ui32Size += sizeof(psSizeCalculator->uDetail.sBegin);
+			break;
+		case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END:
+			ui32Size += sizeof(psSizeCalculator->uDetail.sEnd);
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR, "%s: Invalid wait event type (%u)", __func__,
+			         eWaitType));
+			PVR_ASSERT(IMG_FALSE);
+			break;
+	}
+	return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+static inline void
+_SetupHostFenceWaitPacketData(IMG_UINT8 *pui8Dest,
+                              RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eWaitType,
+							  IMG_PID uiPID,
+							  PVRSRV_FENCE hFence,
+							  IMG_UINT32 ui32Data)
+{
+	RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *psData = (RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *)
+	                (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+
+	psData->eType = eWaitType;
+	psData->uiPID = uiPID;
+	psData->hFence = hFence;
+
+	switch (eWaitType)
+	{
+		case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN:
+			psData->uDetail.sBegin.ui32TimeoutInMs = ui32Data;
+			break;
+		case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END:
+			psData->uDetail.sEnd.eResult =
+			    (RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT) ui32Data;
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Invalid fence-wait event type", __func__));
+			PVR_ASSERT(IMG_FALSE);
+	}
+}
+
+void RGXHWPerfHostPostFenceWait(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+								RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType,
+								IMG_PID uiPID,
+								PVRSRV_FENCE hFence,
+								IMG_UINT32 ui32Data)
+{
+	IMG_UINT8 *pui8Dest;
+	IMG_UINT32 ui32Size;
+	IMG_UINT32 ui32Ordinal;
+	IMG_UINT64 ui64Timestamp;
+
+	_GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+	                              NULL, IMG_TRUE);
+
+	_PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+	ui32Size = _CalculateHostFenceWaitPacketSize(eType);
+	if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+	{
+		goto cleanup;
+	}
+
+	_SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_SYNC_FENCE_WAIT,
+	                       ui32Size, ui32Ordinal, ui64Timestamp);
+	_SetupHostFenceWaitPacketData(pui8Dest, eType, uiPID, hFence, ui32Data);
+
+	_CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+	cleanup:
+	_PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+}
+
+static inline IMG_UINT32 _CalculateHostSWTimelineAdvPacketSize(void)
+{
+	IMG_UINT32 ui32Size = sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA);
+	return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+static inline void
+_SetupHostSWTimelineAdvPacketData(IMG_UINT8 *pui8Dest,
+                                  IMG_PID uiPID,
+								  PVRSRV_TIMELINE hSWTimeline,
+								  IMG_UINT64 ui64SyncPtIndex)
+
+{
+	RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA *psData = (RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA *)
+	                (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+
+	psData->uiPID = uiPID;
+	psData->hTimeline = hSWTimeline;
+	psData->ui64SyncPtIndex = ui64SyncPtIndex;
+}
+
+void RGXHWPerfHostPostSWTimelineAdv(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                    IMG_PID uiPID,
+									PVRSRV_TIMELINE hSWTimeline,
+									IMG_UINT64 ui64SyncPtIndex)
+{
+	IMG_UINT8 *pui8Dest;
+	IMG_UINT32 ui32Size;
+	IMG_UINT32 ui32Ordinal;
+	IMG_UINT64 ui64Timestamp;
+
+	_GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+	                              NULL, IMG_TRUE);
+
+	_PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+	ui32Size = _CalculateHostSWTimelineAdvPacketSize();
+	if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+	{
+		goto cleanup;
+	}
+
+	_SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE,
+	                       ui32Size, ui32Ordinal, ui64Timestamp);
+	_SetupHostSWTimelineAdvPacketData(pui8Dest, uiPID, hSWTimeline, ui64SyncPtIndex);
+
+	_CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+	cleanup:
+	_PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+
+}
+
+/******************************************************************************
+ * Currently only implemented on Linux. Feature can be enabled to provide
+ * an interface to 3rd-party kernel modules that wish to access the
+ * HWPerf data. The API is documented in the rgxapi_km.h header and
+ * the rgx_hwperf* headers.
+ *****************************************************************************/
+
+/* Internal HWPerf kernel connection/device data object to track the state
+ * of a client session.
+ */
+typedef struct
+{
+	PVRSRV_DEVICE_NODE* psRgxDevNode;
+	PVRSRV_RGXDEV_INFO* psRgxDevInfo;
+
+	/* TL Open/close state */
+	IMG_HANDLE          hSD[RGX_HWPERF_MAX_STREAM_ID];
+
+	/* TL Acquire/release state */
+	IMG_PBYTE			pHwpBuf[RGX_HWPERF_MAX_STREAM_ID];			/*!< buffer returned to user in acquire call */
+	IMG_PBYTE			pHwpBufEnd[RGX_HWPERF_MAX_STREAM_ID];		/*!< pointer to end of HwpBuf */
+	IMG_PBYTE			pTlBuf[RGX_HWPERF_MAX_STREAM_ID];			/*!< buffer obtained via TlAcquireData */
+	IMG_PBYTE			pTlBufPos[RGX_HWPERF_MAX_STREAM_ID];		/*!< initial position in TlBuf to acquire packets */
+	IMG_PBYTE			pTlBufRead[RGX_HWPERF_MAX_STREAM_ID];		/*!< pointer to the last packet read */
+	IMG_UINT32			ui32AcqDataLen[RGX_HWPERF_MAX_STREAM_ID];	/*!< length of acquired TlBuf */
+	IMG_BOOL			bRelease[RGX_HWPERF_MAX_STREAM_ID];		/*!< used to determine whether or not to release currently held TlBuf */
+
+
+} RGX_KM_HWPERF_DEVDATA;
+
+PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+	RGX_KM_HWPERF_DEVDATA *psDevData;
+	RGX_HWPERF_DEVICE *psNewHWPerfDevice;
+	RGX_HWPERF_CONNECTION* psHWPerfConnection;
+	IMG_BOOL bFWActive = IMG_FALSE;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* avoid uninitialised data */
+	PVR_ASSERT(*ppsHWPerfConnection == NULL);
+	PVR_ASSERT(psPVRSRVData);
+
+	/* Allocate connection object */
+	psHWPerfConnection = OSAllocZMem(sizeof(*psHWPerfConnection));
+	if (!psHWPerfConnection)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	/* early save the return pointer to aid clean-up if failure occurs */
+	*ppsHWPerfConnection = psHWPerfConnection;
+
+	psDeviceNode = psPVRSRVData->psDeviceNodeList;
+	while (psDeviceNode)
+	{
+		if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE)
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+					 "%s: HWPerf: Device not currently active. ID:%u",
+					 __func__,
+					 psDeviceNode->sDevId.i32UMIdentifier));
+			psDeviceNode = psDeviceNode->psNext;
+			continue;
+		}
+		/* Create a list node to be attached to connection object's list */
+		psNewHWPerfDevice = OSAllocMem(sizeof(*psNewHWPerfDevice));
+		if (!psNewHWPerfDevice)
+		{
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+		/* Insert node at head of the list */
+		psNewHWPerfDevice->psNext = psHWPerfConnection->psHWPerfDevList;
+		psHWPerfConnection->psHWPerfDevList = psNewHWPerfDevice;
+
+		/* create a device data object for kernel server */
+		psDevData = OSAllocZMem(sizeof(*psDevData));
+		psNewHWPerfDevice->hDevData = (IMG_HANDLE)psDevData;
+		if (!psDevData)
+		{
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+		if (OSSNPrintf(psNewHWPerfDevice->pszName, sizeof(psNewHWPerfDevice->pszName),
+		               "hwperf_device_%d", psDeviceNode->sDevId.i32UMIdentifier) < 0)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to form HWPerf device name for device %d",
+					__func__,
+					psDeviceNode->sDevId.i32UMIdentifier));
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+		psDevData->psRgxDevNode = psDeviceNode;
+		psDevData->psRgxDevInfo = psDeviceNode->pvDevice;
+
+		psDeviceNode = psDeviceNode->psNext;
+
+		/* At least one device is active */
+		bFWActive = IMG_TRUE;
+	}
+
+	if (!bFWActive)
+	{
+		return PVRSRV_ERROR_NOT_READY;
+	}
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION *psHWPerfConnection)
+{
+	RGX_KM_HWPERF_DEVDATA *psDevData;
+	RGX_HWPERF_DEVICE *psHWPerfDev;
+	PVRSRV_RGXDEV_INFO *psRgxDevInfo;
+	PVRSRV_ERROR eError;
+	IMG_CHAR pszHWPerfFwStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5];
+	IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 5];
+	IMG_UINT32 ui32BufSize;
+
+	/* Disable producer callback by default for the Kernel API. */
+	IMG_UINT32 ui32StreamFlags = PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING |
+			PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* Validate input argument values supplied by the caller */
+	if (!psHWPerfConnection)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+	while (psHWPerfDev)
+	{
+		psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+		psRgxDevInfo = psDevData->psRgxDevInfo;
+
+		/* In the case where the AppHint has not been set we need to
+		 * initialise the HWPerf resources here. Allocated on-demand
+		 * to reduce RAM foot print on systems not needing HWPerf.
+		 */
+		OSLockAcquire(psRgxDevInfo->hHWPerfLock);
+		if (RGXHWPerfIsInitRequired(psRgxDevInfo))
+		{
+			eError = RGXHWPerfInitOnDemandResources(psRgxDevInfo);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Initialisation of on-demand HWPerfFW resources failed",
+						 __func__));
+				OSLockRelease(psRgxDevInfo->hHWPerfLock);
+				return eError;
+			}
+		}
+		OSLockRelease(psRgxDevInfo->hHWPerfLock);
+
+		OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream);
+		if (psRgxDevInfo->hHWPerfHostStream == NULL)
+		{
+			eError = RGXHWPerfHostInitOnDemandResources(psRgxDevInfo);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Initialisation of on-demand HWPerfHost resources failed",
+						 __func__));
+				OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream);
+				return eError;
+			}
+		}
+		OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream);
+
+		/* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */
+		if (OSSNPrintf(pszHWPerfFwStreamName, sizeof(pszHWPerfFwStreamName), "%s%d",
+		               PVRSRV_TL_HWPERF_RGX_FW_STREAM,
+		               psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to form HWPerf stream name for device %d",
+					__func__,
+					psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier));
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+		/* Open the RGX TL stream for reading in this session */
+		eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE,
+		                            pszHWPerfFwStreamName,
+		                            ui32StreamFlags,
+		                            &psDevData->hSD[RGX_HWPERF_STREAM_ID0_FW]);
+		PVR_LOGR_IF_ERROR(eError, "TLClientOpenStream(RGX_HWPerf)");
+
+		/* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */
+		if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d",
+		               PVRSRV_TL_HWPERF_HOST_SERVER_STREAM,
+		               psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to form HWPerf host stream name for device %d",
+					__func__,
+					psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier));
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+		/* Open the host TL stream for reading in this session */
+		eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE,
+		                            pszHWPerfHostStreamName,
+		                            PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING,
+		                            &psDevData->hSD[RGX_HWPERF_STREAM_ID1_HOST]);
+		PVR_LOGR_IF_ERROR(eError, "TLClientOpenStream(Host_HWPerf)");
+
+		/* Allocate a large enough buffer for use during the entire session to
+		 * avoid the need to resize in the Acquire call as this might be in an ISR
+		 * Choose size that can contain at least one packet.
+		 */
+		/* Allocate buffer for FW Stream */
+		ui32BufSize = FW_STREAM_BUFFER_SIZE;
+		psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW] = OSAllocMem(ui32BufSize);
+		if (psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW] == NULL)
+		{
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+		psDevData->pHwpBufEnd[RGX_HWPERF_STREAM_ID0_FW] = psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW]+ui32BufSize;
+
+		/* Allocate buffer for Host Stream */
+		ui32BufSize = HOST_STREAM_BUFFER_SIZE;
+		psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST] = OSAllocMem(ui32BufSize);
+		if (psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST] == NULL)
+		{
+			OSFreeMem(psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW]);
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+		psDevData->pHwpBufEnd[RGX_HWPERF_STREAM_ID1_HOST] = psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST]+ui32BufSize;
+
+		psHWPerfDev = psHWPerfDev->psNext;
+	}
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection)
+{
+	PVRSRV_ERROR eError;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	eError = RGXHWPerfLazyConnect(ppsHWPerfConnection);
+	PVR_LOGG_IF_ERROR(eError, "RGXHWPerfLazyConnect", e0);
+
+	eError = RGXHWPerfOpen(*ppsHWPerfConnection);
+	PVR_LOGG_IF_ERROR(eError, "RGXHWPerfOpen", e1);
+
+	return PVRSRV_OK;
+
+	e1: /* HWPerfOpen might have opened some, and then failed */
+	RGXHWPerfClose(*ppsHWPerfConnection);
+	e0: /* LazyConnect might have allocated some resources and then failed,
+	 * make sure they are cleaned up */
+	RGXHWPerfFreeConnection(ppsHWPerfConnection);
+	return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfControl(
+		RGX_HWPERF_CONNECTION *psHWPerfConnection,
+		RGX_HWPERF_STREAM_ID eStreamId,
+		IMG_BOOL             bToggle,
+		IMG_UINT64           ui64Mask)
+{
+	PVRSRV_ERROR           eError;
+	RGX_KM_HWPERF_DEVDATA* psDevData;
+	RGX_HWPERF_DEVICE* psHWPerfDev;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* Validate input argument values supplied by the caller */
+	if (!psHWPerfConnection)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+
+	while (psHWPerfDev)
+	{
+		psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+
+		/* Call the internal server API */
+		eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDevData->psRgxDevNode, eStreamId, bToggle, ui64Mask);
+		PVR_LOGR_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM");
+
+		psHWPerfDev = psHWPerfDev->psNext;
+	}
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfConfigureAndEnableCounters(
+		RGX_HWPERF_CONNECTION *psHWPerfConnection,
+		IMG_UINT32					ui32NumBlocks,
+		RGX_HWPERF_CONFIG_CNTBLK*	asBlockConfigs)
+{
+	PVRSRV_ERROR           eError = PVRSRV_OK;
+	RGX_KM_HWPERF_DEVDATA* psDevData;
+	RGX_HWPERF_DEVICE *psHWPerfDev;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* Validate input argument values supplied by the caller */
+	if (!psHWPerfConnection || ui32NumBlocks==0 || !asBlockConfigs)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+
+	while (psHWPerfDev)
+	{
+		psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+
+		/* Call the internal server API */
+		eError = PVRSRVRGXConfigEnableHWPerfCountersKM(NULL,
+		                                               psDevData->psRgxDevNode, ui32NumBlocks, asBlockConfigs);
+		PVR_LOGR_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM");
+
+		psHWPerfDev = psHWPerfDev->psNext;
+	}
+
+	return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfConfigureAndEnableCustomCounters(
+		RGX_HWPERF_CONNECTION *psHWPerfConnection,
+		IMG_UINT16              ui16CustomBlockID,
+		IMG_UINT16          ui16NumCustomCounters,
+		IMG_UINT32         *pui32CustomCounterIDs)
+{
+	PVRSRV_ERROR            eError;
+	RGX_HWPERF_DEVICE       *psHWPerfDev;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* Validate input arguments supplied by the caller */
+	PVR_LOGR_IF_FALSE((NULL != psHWPerfConnection), "psHWPerfConnection invalid",
+	                   PVRSRV_ERROR_INVALID_PARAMS);
+	PVR_LOGR_IF_FALSE((0 != ui16NumCustomCounters), "uiNumBlocks invalid",
+			           PVRSRV_ERROR_INVALID_PARAMS);
+	PVR_LOGR_IF_FALSE((NULL != pui32CustomCounterIDs),"asBlockConfigs invalid",
+			           PVRSRV_ERROR_INVALID_PARAMS);
+
+	/* Check # of blocks */
+	PVR_LOGR_IF_FALSE((!(ui16CustomBlockID > RGX_HWPERF_MAX_CUSTOM_BLKS)),"ui16CustomBlockID invalid",
+			           PVRSRV_ERROR_INVALID_PARAMS);
+
+	/* Check # of counters */
+	PVR_LOGR_IF_FALSE((!(ui16NumCustomCounters > RGX_HWPERF_MAX_CUSTOM_CNTRS)),"ui16NumCustomCounters invalid",
+			           PVRSRV_ERROR_INVALID_PARAMS);
+
+	psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+
+	while (psHWPerfDev)
+	{
+		RGX_KM_HWPERF_DEVDATA *psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+
+		eError = PVRSRVRGXConfigCustomCountersKM(NULL,
+				                                 psDevData->psRgxDevNode,
+												 ui16CustomBlockID, ui16NumCustomCounters, pui32CustomCounterIDs);
+		PVR_LOGR_IF_ERROR(eError, "PVRSRVRGXCtrlCustHWPerfKM");
+
+		psHWPerfDev = psHWPerfDev->psNext;
+	}
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfDisableCounters(
+		RGX_HWPERF_CONNECTION *psHWPerfConnection,
+		IMG_UINT32   ui32NumBlocks,
+		IMG_UINT16*   aeBlockIDs)
+{
+	PVRSRV_ERROR           eError = PVRSRV_OK;
+	RGX_KM_HWPERF_DEVDATA* psDevData;
+	RGX_HWPERF_DEVICE* psHWPerfDev;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* Validate input argument values supplied by the caller */
+	if (!psHWPerfConnection || ui32NumBlocks==0 || !aeBlockIDs)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+
+	while (psHWPerfDev)
+	{
+		psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+
+		/* Call the internal server API */
+		eError = PVRSRVRGXCtrlHWPerfCountersKM(NULL,
+		                                       psDevData->psRgxDevNode, IMG_FALSE, ui32NumBlocks, aeBlockIDs);
+		PVR_LOGR_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfCountersKM");
+
+		psHWPerfDev = psHWPerfDev->psNext;
+	}
+
+	return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfAcquireEvents(
+		IMG_HANDLE  hDevData,
+		RGX_HWPERF_STREAM_ID eStreamId,
+		IMG_PBYTE*  ppBuf,
+		IMG_UINT32* pui32BufLen)
+{
+	PVRSRV_ERROR			eError;
+	RGX_KM_HWPERF_DEVDATA*	psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData;
+	IMG_PBYTE				pDataDest;
+	IMG_UINT32			ui32TlPackets = 0;
+	IMG_PBYTE			pBufferEnd;
+	PVRSRVTL_PPACKETHDR psHDRptr;
+	PVRSRVTL_PACKETTYPE ui16TlType;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* Reset the output arguments in case we discover an error */
+	*ppBuf = NULL;
+	*pui32BufLen = 0;
+
+	/* Valid input argument values supplied by the caller */
+	if (!psDevData || eStreamId >= RGX_HWPERF_MAX_STREAM_ID)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (psDevData->pTlBuf[eStreamId] == NULL)
+	{
+		/* Acquire some data to read from the HWPerf TL stream */
+		eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+		                             psDevData->hSD[eStreamId],
+		                             &psDevData->pTlBuf[eStreamId],
+		                             &psDevData->ui32AcqDataLen[eStreamId]);
+		PVR_LOGR_IF_ERROR(eError, "TLClientAcquireData");
+
+		psDevData->pTlBufPos[eStreamId] = psDevData->pTlBuf[eStreamId];
+	}
+
+	/* TL indicates no data exists so return OK and zero. */
+	if ((psDevData->pTlBufPos[eStreamId] == NULL) || (psDevData->ui32AcqDataLen[eStreamId] == 0))
+	{
+		return PVRSRV_OK;
+	}
+
+	/* Process each TL packet in the data buffer we have acquired */
+	pBufferEnd = psDevData->pTlBuf[eStreamId]+psDevData->ui32AcqDataLen[eStreamId];
+	pDataDest = psDevData->pHwpBuf[eStreamId];
+	psHDRptr = GET_PACKET_HDR(psDevData->pTlBufPos[eStreamId]);
+	psDevData->pTlBufRead[eStreamId] = psDevData->pTlBufPos[eStreamId];
+	while ( psHDRptr < (PVRSRVTL_PPACKETHDR)pBufferEnd )
+	{
+		ui16TlType = GET_PACKET_TYPE(psHDRptr);
+		if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA)
+		{
+			IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr);
+			if (0 == ui16DataLen)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfAcquireEvents: ZERO Data in TL data packet: %p", psHDRptr));
+			}
+			else
+			{
+				/* Check next packet does not fill buffer */
+				if (pDataDest + ui16DataLen > psDevData->pHwpBufEnd[eStreamId])
+				{
+					break;
+				}
+
+				/* For valid data copy it into the client buffer and move
+				 * the write position on */
+				OSDeviceMemCopy(pDataDest, GET_PACKET_DATA_PTR(psHDRptr), ui16DataLen);
+				pDataDest += ui16DataLen;
+			}
+		}
+		else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireEvents: Indication that the transport buffer was full"));
+		}
+		else
+		{
+			/* else Ignore padding packet type and others */
+			PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireEvents: Ignoring TL packet, type %d", ui16TlType ));
+		}
+
+		/* Update loop variable to the next packet and increment counts */
+		psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr);
+		/* Updated to keep track of the next packet to be read. */
+		psDevData->pTlBufRead[eStreamId] = (IMG_PBYTE) psHDRptr;
+		ui32TlPackets++;
+	}
+
+	PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfAcquireEvents: TL Packets processed %03d", ui32TlPackets));
+
+	psDevData->bRelease[eStreamId] = IMG_FALSE;
+	if (psHDRptr >= (PVRSRVTL_PPACKETHDR) pBufferEnd)
+	{
+		psDevData->bRelease[eStreamId] = IMG_TRUE;
+	}
+
+	/* Update output arguments with client buffer details and true length */
+	*ppBuf = psDevData->pHwpBuf[eStreamId];
+	*pui32BufLen = pDataDest - psDevData->pHwpBuf[eStreamId];
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfReleaseEvents(
+		IMG_HANDLE hDevData,
+		RGX_HWPERF_STREAM_ID eStreamId)
+{
+	PVRSRV_ERROR			eError = PVRSRV_OK;
+	RGX_KM_HWPERF_DEVDATA*	psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* Valid input argument values supplied by the caller */
+	if (!psDevData || eStreamId >= RGX_HWPERF_MAX_STREAM_ID)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (psDevData->bRelease[eStreamId])
+	{
+		/* Inform the TL that we are done with reading the data. */
+		eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[eStreamId]);
+		psDevData->ui32AcqDataLen[eStreamId] = 0;
+		psDevData->pTlBuf[eStreamId] = NULL;
+	}
+	else
+	{
+		psDevData->pTlBufPos[eStreamId] = psDevData->pTlBufRead[eStreamId];
+	}
+	return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfGetFilter(
+		IMG_HANDLE  hDevData,
+		RGX_HWPERF_STREAM_ID eStreamId,
+		IMG_UINT64 *ui64Filter)
+{
+	PVRSRV_RGXDEV_INFO* psRgxDevInfo;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* Valid input argument values supplied by the caller */
+	psRgxDevInfo = hDevData ? ((RGX_KM_HWPERF_DEVDATA*) hDevData)->psRgxDevInfo : NULL;
+	if (!psRgxDevInfo)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pointer to the RGX device",
+				__func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* No need to take hHWPerfLock here since we are only reading data
+	 * from always existing integers to return to debugfs which is an
+	 * atomic operation.
+	 */
+	switch (eStreamId) {
+		case RGX_HWPERF_STREAM_ID0_FW:
+			*ui64Filter = psRgxDevInfo->ui64HWPerfFilter;
+			break;
+		case RGX_HWPERF_STREAM_ID1_HOST:
+			*ui64Filter = psRgxDevInfo->ui32HWPerfHostFilter;
+			break;
+		default:
+			PVR_DPF((PVR_DBG_ERROR, "%s: Invalid stream ID",
+					__func__));
+			return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** ppsHWPerfConnection)
+{
+	RGX_HWPERF_DEVICE *psHWPerfDev, *psHWPerfNextDev;
+	RGX_HWPERF_CONNECTION *psHWPerfConnection = *ppsHWPerfConnection;
+
+	/* if connection object itself is NULL, nothing to free */
+	if (psHWPerfConnection == NULL)
+	{
+		return PVRSRV_OK;
+	}
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	psHWPerfNextDev = psHWPerfConnection->psHWPerfDevList;
+	while (psHWPerfNextDev)
+	{
+		psHWPerfDev = psHWPerfNextDev;
+		psHWPerfNextDev = psHWPerfNextDev->psNext;
+
+		/* Free the session memory */
+		if (psHWPerfDev->hDevData)
+			OSFreeMem(psHWPerfDev->hDevData);
+		OSFreeMem(psHWPerfDev);
+	}
+	OSFreeMem(psHWPerfConnection);
+	*ppsHWPerfConnection = NULL;
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection)
+{
+	RGX_HWPERF_DEVICE *psHWPerfDev;
+	RGX_KM_HWPERF_DEVDATA* psDevData;
+	IMG_UINT uiStreamId;
+	PVRSRV_ERROR eError;
+
+	/* Check session connection is not zero */
+	if (!psHWPerfConnection)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+	while (psHWPerfDev)
+	{
+		psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+		for (uiStreamId = 0; uiStreamId < RGX_HWPERF_MAX_STREAM_ID; uiStreamId++)
+		{
+			/* If the TL buffer exists they have not called ReleaseData
+			 * before disconnecting so clean it up */
+			if (psDevData->pTlBuf[uiStreamId])
+			{
+				/* TLClientReleaseData call and null out the buffer fields
+				 * and length */
+				eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[uiStreamId]);
+				psDevData->ui32AcqDataLen[uiStreamId] = 0;
+				psDevData->pTlBuf[uiStreamId] = NULL;
+				PVR_LOG_IF_ERROR(eError, "TLClientReleaseData");
+				/* Packets may be lost if release was not required */
+				if (!psDevData->bRelease[uiStreamId])
+				{
+					PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfClose: Events in buffer waiting to be read, remaining events may be lost."));
+				}
+			}
+
+			/* Close the TL stream, ignore the error if it occurs as we
+			 * are disconnecting */
+			if (psDevData->hSD[uiStreamId])
+			{
+				eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE,
+				                             psDevData->hSD[uiStreamId]);
+				PVR_LOG_IF_ERROR(eError, "TLClientCloseStream");
+				psDevData->hSD[uiStreamId] = NULL;
+			}
+
+			/* Free the client buffer used in session */
+			if (psDevData->pHwpBuf[uiStreamId])
+			{
+				OSFreeMem(psDevData->pHwpBuf[uiStreamId]);
+				psDevData->pHwpBuf[uiStreamId] = NULL;
+			}
+		}
+		psHWPerfDev = psHWPerfDev->psNext;
+	}
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	eError = RGXHWPerfClose(*ppsHWPerfConnection);
+	PVR_LOG_IF_ERROR(eError, "RGXHWPerfClose");
+
+	eError = RGXHWPerfFreeConnection(ppsHWPerfConnection);
+	PVR_LOG_IF_ERROR(eError, "RGXHWPerfFreeConnection");
+
+	return eError;
+}
+
+IMG_UINT64 RGXHWPerfConvertCRTimeStamp(
+		IMG_UINT32 ui32ClkSpeed,
+		IMG_UINT64 ui64CorrCRTimeStamp,
+		IMG_UINT64 ui64CorrOSTimeStamp,
+		IMG_UINT64 ui64CRTimeStamp)
+{
+	IMG_UINT64 ui64CRDeltaToOSDeltaKNs;
+	IMG_UINT64 ui64EventOSTimestamp, deltaRgxTimer, delta_ns;
+
+	if (!(ui64CRTimeStamp) || !(ui32ClkSpeed) || !(ui64CorrCRTimeStamp) || !(ui64CorrOSTimeStamp))
+	{
+		return 0;
+	}
+
+	ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(ui32ClkSpeed);
+
+	/* RGX CR timer ticks delta */
+	deltaRgxTimer = ui64CRTimeStamp - ui64CorrCRTimeStamp;
+	/* RGX time delta in nanoseconds */
+	delta_ns = RGXFWIF_GET_DELTA_OSTIME_NS(deltaRgxTimer, ui64CRDeltaToOSDeltaKNs);
+	/* Calculate OS time of HWPerf event */
+	ui64EventOSTimestamp = ui64CorrOSTimeStamp + delta_ns;
+
+	return ui64EventOSTimestamp;
+}
+
+/******************************************************************************
+ End of file (rgxhwperf.c)
+ ******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxhwperf.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxhwperf.h
new file mode 100644
index 0000000..b35edcf
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxhwperf.h
@@ -0,0 +1,502 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HW Performance header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX HWPerf functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXHWPERF_H_
+#define RGXHWPERF_H_
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#include "device.h"
+#include "connection_server.h"
+#include "rgxdevice.h"
+#include "rgx_hwperf.h"
+
+/* HWPerf host buffer size constraints in KBs */
+#define HWPERF_HOST_TL_STREAM_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB
+#define HWPERF_HOST_TL_STREAM_SIZE_MIN     (32U)
+#define HWPERF_HOST_TL_STREAM_SIZE_MAX     (3072U)
+
+/******************************************************************************
+ * RGX HW Performance decode Bvnc Features for HWPerf
+ *****************************************************************************/
+PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+		                                        RGX_HWPERF_BVNC    *psBVNC);
+
+PVRSRV_ERROR PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(CONNECTION_DATA    *psConnection,
+                                                  PVRSRV_DEVICE_NODE *psDeviceNode,
+											      RGX_HWPERF_BVNC    *psBVNC);
+
+/******************************************************************************
+ * RGX HW Performance Data Transport Routines
+ *****************************************************************************/
+
+PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE* psDevInfo);
+
+PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo);
+PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo);
+void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo);
+void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode);
+void RGXHWPerfClientInitAppHintCallbacks(void);
+
+/******************************************************************************
+ * RGX HW Performance Profiling API(s)
+ *****************************************************************************/
+
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM(
+	CONNECTION_DATA      * psConnection,
+	PVRSRV_DEVICE_NODE   * psDeviceNode,
+	 RGX_HWPERF_STREAM_ID  eStreamId,
+	IMG_BOOL               bToggle,
+	IMG_UINT64             ui64Mask);
+
+
+PVRSRV_ERROR PVRSRVRGXConfigEnableHWPerfCountersKM(
+	CONNECTION_DATA    * psConnection,
+	PVRSRV_DEVICE_NODE * psDeviceNode,
+	IMG_UINT32         ui32ArrayLen,
+	RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs);
+
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfCountersKM(
+	CONNECTION_DATA    * psConnection,
+	PVRSRV_DEVICE_NODE * psDeviceNode,
+	IMG_BOOL           bEnable,
+	IMG_UINT32         ui32ArrayLen,
+	IMG_UINT16         * psBlockIDs);
+
+PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM(
+	CONNECTION_DATA    * psConnection,
+	PVRSRV_DEVICE_NODE * psDeviceNode,
+	IMG_UINT16           ui16CustomBlockID,
+	IMG_UINT16           ui16NumCustomCounters,
+	IMG_UINT32         * pui32CustomCounterIDs);
+
+/******************************************************************************
+ * RGX HW Performance Host Stream API
+ *****************************************************************************/
+
+PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB);
+PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo);
+void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO	*psRgxDevInfo);
+
+void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                 IMG_UINT32 ui32Filter);
+
+void RGXHWPerfHostPostEnqEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                               RGX_HWPERF_KICK_TYPE eEnqType,
+                               IMG_UINT32 ui32Pid,
+                               IMG_UINT32 ui32FWDMContext,
+                               IMG_UINT32 ui32ExtJobRef,
+                               IMG_UINT32 ui32IntJobRef,
+                               PVRSRV_FENCE hCheckFence,
+                               PVRSRV_FENCE hUpdateFence,
+                               PVRSRV_TIMELINE hUpdateTimeline,
+                               IMG_UINT64 ui64CheckFenceUID,
+                               IMG_UINT64 ui64UpdateFenceUID,
+                               IMG_UINT64 ui64DeadlineInus,
+                               IMG_UINT64 ui64CycleEstimate);
+
+void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                 RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+                                 const IMG_CHAR *psName,
+                                 IMG_UINT32 ui32NameSize,
+                                 RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail);
+
+void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType,
+                                IMG_UINT64 ui64UID,
+                                IMG_UINT32 ui32PID,
+                                IMG_UINT32 ui32FWAddr);
+
+void RGXHWPerfHostPostModifyEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                  RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType,
+                                  IMG_UINT64 ui64NewUID,
+                                  IMG_UINT64 ui64UID1,
+                                  IMG_UINT64 ui64UID2,
+                                  const IMG_CHAR *psName,
+                                  IMG_UINT32 ui32NameSize);
+
+void RGXHWPerfHostPostUfoEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                               RGX_HWPERF_UFO_EV eUfoType,
+                               RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData,
+							   const IMG_BOOL bSleepAllowed);
+
+void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo);
+
+void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+								 RGX_HWPERF_DEV_INFO_EV eEvType,
+								 PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus,
+								 PVRSRV_DEVICE_HEALTH_REASON eDeviceHeathReason);
+
+void RGXHWPerfHostPostInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+								 RGX_HWPERF_INFO_EV eEvType);
+
+void RGXHWPerfHostPostFenceWait(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+								RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType,
+								IMG_PID uiPID,
+								PVRSRV_FENCE hFence,
+								IMG_UINT32 ui32Data);
+
+void RGXHWPerfHostPostSWTimelineAdv(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                    IMG_PID uiPID,
+									PVRSRV_TIMELINE hSWTimeline,
+									IMG_UINT64 ui64SyncPtIndex);
+
+IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_HOST_EVENT_TYPE eEvent);
+
+#define _RGX_HWPERF_HOST_FILTER(CTX, EV) \
+		(((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice)->ui32HWPerfHostFilter \
+		& RGX_HWPERF_EVENT_MASK_VALUE(EV))
+
+#define _RGX_DEVICE_INFO_FROM_CTX(CTX) \
+		((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice)
+
+#define _RGX_DEVICE_INFO_FROM_NODE(DEVNODE) \
+		((PVRSRV_RGXDEV_INFO *)DEVNODE->pvDevice)
+
+/* Deadline and cycle estimate is not supported for all ENQ events */
+#define NO_DEADLINE 0
+#define NO_CYCEST   0
+
+
+#if defined(SUPPORT_RGX)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param C      Kick context
+ * @param P      Pid of kicking process
+ * @param X      Related FW context
+ * @param E      External job reference
+ * @param I      Job ID
+ * @param K      Kick type
+ * @param CF     Check fence handle
+ * @param UF     Update fence handle
+ * @param UT     Update timeline (on which above UF was created) handle
+ * @param CHKUID Check fence UID
+ * @param UPDUID Update fence UID
+ * @param D      Deadline
+ * @param CE     Cycle estimate
+ */
+#define RGXSRV_HWPERF_ENQ(C, P, X, E, I, K, CF, UF, UT, CHKUID, UPDUID, D, CE) \
+		do { \
+			if (_RGX_HWPERF_HOST_FILTER(C, RGX_HWPERF_HOST_ENQ)) \
+			{ \
+				RGXHWPerfHostPostEnqEvent(_RGX_DEVICE_INFO_FROM_CTX(C), \
+				                          (K), (P), (X), (E), (I), \
+				                          (CF), (UF), (UT), \
+				                          (CHKUID), (UPDUID), (D), (CE)); \
+			} \
+		} while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param I Device Info pointer
+ * @param T Host UFO event type
+ * @param D Pointer to UFO data
+ * @param S Is sleeping allowed?
+ */
+#define RGXSRV_HWPERF_UFO(I, T, D, S) \
+		do { \
+			if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_UFO)) \
+			{ \
+				RGXHWPerfHostPostUfoEvent((I), (T), (D), (S)); \
+			} \
+		} while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device node pointer
+ * @param T Host ALLOC event type
+ * @param FWADDR sync firmware address
+ * @param N string containing sync name
+ * @param Z string size including null terminating character
+ */
+#define RGXSRV_HWPERF_ALLOC(D, T, FWADDR, N, Z) \
+		do { \
+			if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \
+			{ \
+				RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \
+				uAllocDetail.sSyncAlloc.ui32FWAddr = (FWADDR); \
+				RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+				                            RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \
+											(N), (Z), &uAllocDetail); \
+			} \
+		} while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device Node pointer
+ * @param PID ID of allocating process
+ * @param FENCE PVRSRV_FENCE object
+ * @param FWADDR sync firmware address
+ * @param N string containing sync name
+ * @param Z string size including null terminating character
+ */
+#define RGXSRV_HWPERF_ALLOC_FENCE(D, PID, FENCE, FWADDR, N, Z)  \
+		do { \
+			if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \
+			{ \
+				RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \
+				uAllocDetail.sFenceAlloc.uiPID = (PID); \
+				uAllocDetail.sFenceAlloc.hFence = (FENCE); \
+				uAllocDetail.sFenceAlloc.ui32CheckPt_FWAddr = (FWADDR); \
+				RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+				                            RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, \
+				                            N, Z, &uAllocDetail); \
+			} \
+		} while (0)
+
+/**
+ * @param D Device Node pointer
+ * @param TL PVRSRV_TIMELINE on which CP is allocated
+ * @param PID Allocating process ID of this TL/FENCE
+ * @param FENCE PVRSRV_FENCE as passed to SyncCheckpointResolveFence OR PVRSRV_NO_FENCE
+ * @param FWADDR sync firmware address
+ * @param N string containing sync name
+ * @param Z string size including null terminating character
+ */
+#define RGXSRV_HWPERF_ALLOC_SYNC_CP(D, TL, PID, FENCE, FWADDR, N, Z)  \
+		do { \
+			if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \
+			{ \
+				RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \
+				uAllocDetail.sSyncCheckPointAlloc.ui32CheckPt_FWAddr = (FWADDR); \
+				uAllocDetail.sSyncCheckPointAlloc.hTimeline = (TL); \
+				uAllocDetail.sSyncCheckPointAlloc.uiPID = (PID); \
+				uAllocDetail.sSyncCheckPointAlloc.hFence = (FENCE); \
+				RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+				                            RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP, \
+				                            N, Z, &uAllocDetail); \
+			} \
+		} while (0)
+
+/**
+ * @param D Device Node pointer
+ * @param PID ID of allocating process
+ * @param SW_FENCE PVRSRV_FENCE object
+ * @param SW_TL PVRSRV_TIMELINE on which SW_FENCE is allocated
+ * @param SPI Sync point index on the SW_TL on which this SW_FENCE is allocated
+ * @param N string containing sync name
+ * @param Z string size including null terminating character
+ */
+#define RGXSRV_HWPERF_ALLOC_SW_FENCE(D, PID, SW_FENCE, SW_TL, SPI, N, Z)  \
+		do { \
+			if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \
+			{ \
+				RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \
+				uAllocDetail.sSWFenceAlloc.uiPID = (PID); \
+				uAllocDetail.sSWFenceAlloc.hSWFence = (SW_FENCE); \
+				uAllocDetail.sSWFenceAlloc.hSWTimeline = (SW_TL); \
+				uAllocDetail.sSWFenceAlloc.ui64SyncPtIndex = (SPI); \
+				RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+				                            RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW, \
+				                            N, Z, &uAllocDetail); \
+			} \
+		} while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device Node pointer
+ * @param T Host ALLOC event type
+ * @param FWADDR sync firmware address
+ */
+#define RGXSRV_HWPERF_FREE(D, T, FWADDR) \
+		do { \
+			if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_FREE)) \
+			{ \
+				RGXHWPerfHostPostFreeEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+				                           RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \
+				                           (0), (0), (FWADDR)); \
+			} \
+		} while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device Node pointer
+ * @param T Host ALLOC event type
+ * @param UID ID of input object
+ * @param PID ID of allocating process
+ * @param FWADDR sync firmware address
+ */
+#define RGXSRV_HWPERF_FREE_FENCE_SYNC(D, T, UID, PID, FWADDR) \
+		do { \
+			if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_FREE)) \
+			{ \
+				RGXHWPerfHostPostFreeEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+				                           RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \
+				                           (UID), (PID), (FWADDR)); \
+			} \
+		} while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device Node pointer
+ * @param T Host ALLOC event type
+ * @param NEWUID ID of output object
+ * @param UID1 ID of first input object
+ * @param UID2 ID of second input object
+ * @param N string containing new object's name
+ * @param Z string size including null terminating character
+ */
+#define RGXSRV_HWPERF_MODIFY_FENCE_SYNC(D, T, NEWUID, UID1, UID2, N, Z) \
+		do { \
+			if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_MODIFY)) \
+			{ \
+				RGXHWPerfHostPostModifyEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+				                             RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \
+				                             (NEWUID), (UID1), (UID2), N, Z); \
+			} \
+		} while (0)
+
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param I Device info pointer
+ */
+#define RGXSRV_HWPERF_CLK_SYNC(I) \
+		do { \
+			if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_CLK_SYNC)) \
+			{ \
+				RGXHWPerfHostPostClkSyncEvent((I)); \
+			} \
+		} while (0)
+
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts a device info event to the HWPerfHost stream.
+ *
+ * @param I      Device info pointer
+ * @param T      Event type
+ * @param H		 Health status enum
+ * @param R		 Health reason enum
+ */
+#define RGXSRV_HWPERF_DEVICE_INFO(I, T, H, R) \
+		do { \
+			if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_DEV_INFO)) \
+			{ \
+				RGXHWPerfHostPostDeviceInfo((I), (T), (H), (R)); \
+			} \
+		} while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param I      Device info pointer
+ * @param T      Event type
+ */
+#define RGXSRV_HWPERF_HOST_INFO(I, T) \
+do { \
+	if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_INFO)) \
+	{ \
+		RGXHWPerfHostPostInfo((I), (T)); \
+	} \
+} while (0)
+
+/**
+ * @param I      Device info pointer
+ * @param T      Wait Event type
+ * @param PID    Process ID that the following fence belongs to
+ * @param F      Fence handle
+ * @param D      Data for this wait event type
+ */
+#define RGXSRV_HWPERF_SYNC_FENCE_WAIT(I, T, PID, F, D) \
+do { \
+	if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_SYNC_FENCE_WAIT)) \
+	{ \
+		RGXHWPerfHostPostFenceWait(I, RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_##T, \
+		                           (PID), (F), (D)); \
+	} \
+} while (0)
+
+/**
+ * @param I      Device info pointer
+ * @param PID    Process ID that the following timeline belongs to
+ * @param F      SW-timeline handle
+ * @param SPI    Sync-pt index where this SW-timeline has reached
+ */
+#define RGXSRV_HWPERF_SYNC_SW_TL_ADV(I, PID, SW_TL, SPI)\
+do { \
+	if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE)) \
+	{ \
+		RGXHWPerfHostPostSWTimelineAdv((I), (PID), (SW_TL), (SPI)); \
+	} \
+} while (0)
+#else
+
+#define RGXSRV_HWPERF_ENQ(C, P, X, E, I, K, CF, UF, UT, CHKUID, UPDUID, D, CE)
+#define RGXSRV_HWPERF_UFO(I, T, D, S)
+#define RGXSRV_HWPERF_ALLOC(D, T, FWADDR, N, Z)
+#define RGXSRV_HWPERF_ALLOC_FENCE(D, PID, FENCE, FWADDR, N, Z)
+#define RGXSRV_HWPERF_ALLOC_SYNC_CP(D, TL, PID, FENCE, FWADDR, N, Z)
+#define RGXSRV_HWPERF_ALLOC_SW_FENCE(D, PID, SW_FENCE, SW_TL, SPI, N, Z)
+#define RGXSRV_HWPERF_FREE(D, T, FWADDR)
+#define RGXSRV_HWPERF_FREE_FENCE_SYNC(D, T, UID, PID, FWADDR)
+#define RGXSRV_HWPERF_MODIFY_FENCE_SYNC(D, T, NEWUID, UID1, UID2, N, Z)
+#define RGXSRV_HWPERF_CLK_SYNC(I)
+#define RGXSRV_HWPERF_DEVICE_INFO(I, T, H, R)
+#define RGXSRV_HWPERF_HOST_INFO(I, T)
+#define RGXSRV_HWPERF_SYNC_FENCE_WAIT(I, T, PID, F, D)
+#define RGXSRV_HWPERF_SYNC_SW_TL_ADV(I, PID, SW_TL, SPI)
+
+#endif
+
+#endif /* RGXHWPERF_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxinit.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxinit.c
new file mode 100644
index 0000000..fd1f99b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxinit.c
@@ -0,0 +1,4666 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#if defined(LINUX)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "img_defs.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "pvrsrv_bridge_init.h"
+#include "syscommon.h"
+#include "rgx_heaps.h"
+#include "rgxheapconfig.h"
+#include "rgxpower.h"
+#include "tlstream.h"
+#include "pvrsrv_tlstreams.h"
+
+#include "rgxinit.h"
+#include "rgxbvnc.h"
+
+#include "pdump_km.h"
+#include "handle.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "rgxmem.h"
+#include "sync_internal.h"
+#include "pvrsrv_apphint.h"
+#include "oskm_apphint.h"
+#include "rgxfwdbg.h"
+#include "info_page.h"
+
+#include "rgxfwimageutils.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_km.h"
+
+#include "rgxmmuinit.h"
+#include "rgxmipsmmuinit.h"
+#include "physmem.h"
+#include "devicemem_utils.h"
+#include "devicemem_server.h"
+#include "physmem_osmem.h"
+
+#include "rgxdebug.h"
+#include "rgxhwperf.h"
+#include "htbserver.h"
+
+#include "rgx_options.h"
+#include "pvrversion.h"
+
+#include "rgx_compat_bvnc.h"
+
+#include "rgx_heaps.h"
+
+#include "rgxta3d.h"
+#include "rgxtimecorr.h"
+
+#include "rgx_bvnc_defs_km.h"
+#if defined(PDUMP)
+#include "rgxstartstop.h"
+#endif
+
+#include "rgx_fwif_alignchecks.h"
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+#endif
+
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+
+static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
+static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_CHAR **ppszVersionString);
+static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_PUINT32  pui32RGXClockSpeed);
+static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64  ui64ResetValue1, IMG_UINT64  ui64ResetValue2);
+static PVRSRV_ERROR RGXVzInitHeaps(DEVICE_MEMORY_INFO *psNewMemoryInfo,
+		DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor);
+static void RGXVzDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo);
+
+#define RGX_MMU_LOG2_PAGE_SIZE_4KB   (12)
+#define RGX_MMU_LOG2_PAGE_SIZE_16KB  (14)
+#define RGX_MMU_LOG2_PAGE_SIZE_64KB  (16)
+#define RGX_MMU_LOG2_PAGE_SIZE_256KB (18)
+#define RGX_MMU_LOG2_PAGE_SIZE_1MB   (20)
+#define RGX_MMU_LOG2_PAGE_SIZE_2MB   (21)
+
+#define RGX_MMU_PAGE_SIZE_4KB   (   4 * 1024)
+#define RGX_MMU_PAGE_SIZE_16KB  (  16 * 1024)
+#define RGX_MMU_PAGE_SIZE_64KB  (  64 * 1024)
+#define RGX_MMU_PAGE_SIZE_256KB ( 256 * 1024)
+#define RGX_MMU_PAGE_SIZE_1MB   (1024 * 1024)
+#define RGX_MMU_PAGE_SIZE_2MB   (2048 * 1024)
+#define RGX_MMU_PAGE_SIZE_MIN RGX_MMU_PAGE_SIZE_4KB
+#define RGX_MMU_PAGE_SIZE_MAX RGX_MMU_PAGE_SIZE_2MB
+
+#define VAR(x) #x
+
+static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo);
+
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+
+/* bits used by the LISR to provide a trace of its last execution */
+#define RGX_LISR_DEVICE_NOT_POWERED	(1 << 0)
+#define RGX_LISR_FWIF_POW_OFF		(1 << 1)
+#define RGX_LISR_EVENT_EN		(1 << 2)
+#define RGX_LISR_COUNTS_EQUAL		(1 << 3)
+#define RGX_LISR_PROCESSED		(1 << 4)
+
+typedef struct _LISR_EXECUTION_INFO_
+{
+	/* bit mask showing execution flow of last LISR invocation */
+	IMG_UINT32 ui32State;
+	/* snapshot from the last LISR invocation, regardless of
+	 * whether an interrupt was handled
+	 */
+#if defined(RGX_FW_IRQ_OS_COUNTERS)
+	IMG_UINT32 aui32InterruptCountSnapshot[RGXFW_NUM_OS];
+#else
+	IMG_UINT32 aui32InterruptCountSnapshot[RGXFW_THREAD_NUM];
+#endif
+	/* time of the last LISR invocation */
+	IMG_UINT64 ui64Clockns;
+} LISR_EXECUTION_INFO;
+
+/* information about the last execution of the LISR */
+static LISR_EXECUTION_INFO g_sLISRExecutionInfo;
+
+#endif
+
+#if !defined(NO_HARDWARE)
+/*************************************************************************/ /*!
+@Function       SampleIRQCount
+@Description    Utility function taking snapshots of RGX FW interrupt count.
+@Input          psDevInfo    Device Info structure
+
+@Return         IMG_BOOL     Returns IMG_TRUE, if RGX FW IRQ is not equal to
+                             sampled RGX FW IRQ count for any RGX FW thread.
+ */ /**************************************************************************/
+static INLINE IMG_BOOL SampleIRQCount(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	IMG_BOOL bReturnVal = IMG_FALSE;
+	volatile IMG_UINT32 *aui32SampleIrqCount = psDevInfo->aui32SampleIRQCount;
+	IMG_UINT32 ui32IrqCnt;
+
+#if defined(RGX_FW_IRQ_OS_COUNTERS)
+	get_irq_cnt_val(ui32IrqCnt, RGXFW_HYPERVISOR_OS, psDevInfo);
+
+	if (ui32IrqCnt != aui32SampleIrqCount[RGXFW_THREAD_0])
+	{
+		aui32SampleIrqCount[RGXFW_THREAD_0] = ui32IrqCnt;
+		bReturnVal = IMG_TRUE;
+	}
+#else
+	IMG_UINT32 ui32TID;
+
+	for_each_irq_cnt(ui32TID)
+	{
+		get_irq_cnt_val(ui32IrqCnt, ui32TID, psDevInfo);
+		if (aui32SampleIrqCount[ui32TID] != ui32IrqCnt)
+		{
+			/**
+			 * we are handling any unhandled interrupts here so align the host
+			 * count with the FW count
+			 */
+
+			/* Sample the current count from the FW _after_ we've cleared the interrupt. */
+			aui32SampleIrqCount[ui32TID] = ui32IrqCnt;
+			bReturnVal = IMG_TRUE;
+		}
+	}
+#endif
+
+	return bReturnVal;
+}
+
+static IMG_BOOL _WaitForInterruptsTimeoutCheck(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGXFWIF_TRACEBUF *psRGXFWIfTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+	IMG_UINT32 ui32idx;
+#endif
+
+	RGXDEBUG_PRINT_IRQ_COUNT(psDevInfo);
+
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+	PVR_DPF((PVR_DBG_ERROR, "Last RGX_LISRHandler State: 0x%08X Clock: %llu",
+			g_sLISRExecutionInfo.ui32State,
+			g_sLISRExecutionInfo.ui64Clockns));
+
+	for_each_irq_cnt(ui32idx)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				MSG_IRQ_CNT_TYPE " %u: InterruptCountSnapshot: 0x%X",
+				ui32idx, g_sLISRExecutionInfo.aui32InterruptCountSnapshot[ui32idx]));
+	}
+#else
+	PVR_DPF((PVR_DBG_ERROR, "No further information available. Please enable PVRSRV_DEBUG_LISR_EXECUTION"));
+#endif
+
+
+	if (psRGXFWIfTraceBuf->ePowState != RGXFWIF_POW_OFF)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_WaitForInterruptsTimeout: FW pow state is not OFF (is %u)",
+				(unsigned int) psRGXFWIfTraceBuf->ePowState));
+	}
+
+	return SampleIRQCount(psDevInfo);
+}
+
+void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	IMG_BOOL bScheduleMISR;
+
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		bScheduleMISR = IMG_TRUE;
+	}
+	else
+	{
+		bScheduleMISR = _WaitForInterruptsTimeoutCheck(psDevInfo);
+	}
+
+	if (bScheduleMISR)
+	{
+		OSScheduleMISR(psDevInfo->pvMISRData);
+
+		if (psDevInfo->pvAPMISRData != NULL)
+		{
+			OSScheduleMISR(psDevInfo->pvAPMISRData);
+		}
+	}
+}
+
+static IMG_BOOL RGXFWIrqEventRx(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	IMG_BOOL bIrqRx = IMG_TRUE;
+
+#if defined(RGX_IRQ_HYPERV_HANDLER)
+	 /* The hypervisor reads and clears the fw status register.
+	 * Then it injects an irq only in the recipient OS.
+	 * The KM driver should only execute the handler.*/
+	PVR_UNREFERENCED_PARAMETER(psDevInfo);
+#else
+	IMG_UINT32 ui32IRQStatus, ui32IRQStatusReg, ui32IRQStatusEventMsk, ui32IRQClearReg, ui32IRQClearMask;
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+	{
+		ui32IRQStatusReg = RGX_CR_MIPS_WRAPPER_IRQ_STATUS;
+		ui32IRQStatusEventMsk = RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN;
+		ui32IRQClearReg = RGX_CR_MIPS_WRAPPER_IRQ_CLEAR;
+		ui32IRQClearMask = RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN;
+	}else
+	{
+		ui32IRQStatusReg = RGX_CR_META_SP_MSLVIRQSTATUS;
+		ui32IRQStatusEventMsk = RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN;
+		ui32IRQClearReg = RGX_CR_META_SP_MSLVIRQSTATUS;
+		ui32IRQClearMask = RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK;
+	}
+
+	ui32IRQStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQStatusReg);
+
+	if (ui32IRQStatus & ui32IRQStatusEventMsk)
+	{
+		OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQClearReg, ui32IRQClearMask);
+	}
+	else
+	{
+		bIrqRx = IMG_FALSE;
+	}
+#endif
+
+	return bIrqRx;
+}
+
+/*
+	RGX LISR Handler
+ */
+static IMG_BOOL RGX_LISRHandler (void *pvData)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	IMG_BOOL bInterruptProcessed;
+	RGXFWIF_TRACEBUF *psRGXFWIfTraceBuf;
+
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		if (! psDevInfo->bRGXPowered)
+		{
+			return IMG_FALSE;
+		}
+
+		OSScheduleMISR(psDevInfo->pvMISRData);
+		return IMG_TRUE;
+	}
+	else
+	{
+		bInterruptProcessed = IMG_FALSE;
+		psRGXFWIfTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+	}
+
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+	{
+		IMG_UINT32 ui32idx;
+		IMG_UINT32 ui32IrqCnt;
+
+		for_each_irq_cnt(ui32idx)
+		{
+			get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo);
+			g_sLISRExecutionInfo.aui32InterruptCountSnapshot[ui32idx] = ui32IrqCnt;
+		}
+
+		g_sLISRExecutionInfo.ui32State = 0;
+		g_sLISRExecutionInfo.ui64Clockns = OSClockns64();
+	}
+#endif
+
+	if (psDevInfo->bRGXPowered == IMG_FALSE)
+	{
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+		g_sLISRExecutionInfo.ui32State |= RGX_LISR_DEVICE_NOT_POWERED;
+#endif
+		if (psRGXFWIfTraceBuf->ePowState == RGXFWIF_POW_OFF)
+		{
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+			g_sLISRExecutionInfo.ui32State |= RGX_LISR_FWIF_POW_OFF;
+#endif
+			return bInterruptProcessed;
+		}
+	}
+
+	if (RGXFWIrqEventRx(psDevInfo))
+	{
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+		g_sLISRExecutionInfo.ui32State |= RGX_LISR_EVENT_EN;
+#endif
+
+#if defined(RGX_FEATURE_OCPBUS)
+		OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OCP_IRQSTATUS_2, RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN);
+#endif
+
+		bInterruptProcessed = SampleIRQCount(psDevInfo);
+
+		if (!bInterruptProcessed)
+		{
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+			g_sLISRExecutionInfo.ui32State |= RGX_LISR_COUNTS_EQUAL;
+#endif
+
+#if defined(RGX_FW_IRQ_OS_COUNTERS) && !defined(RGX_IRQ_HYPERV_HANDLER)
+			/* if per-OS GPU IRQ counters are used, but the Host OS is still the
+			 * one that handles and clears the HW CPU IRQ, this IRQ request must be
+			 * marked as processed. Consider an interrupt aimed at a Guest OS that
+			 * doesn't require the MISR to run on the Host, only clearing the IRQ.
+			 *
+			 * This prevents the HW CPU IRQ bit being left set and marking this as
+			 * a spurious interrupt, which in time, could lead the OS to assume
+			 * a hardware failure occurred and disable the interrupt line.
+			 */
+			return IMG_TRUE;
+#else
+			return bInterruptProcessed;
+#endif
+		}
+
+		bInterruptProcessed = IMG_TRUE;
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+		g_sLISRExecutionInfo.ui32State |= RGX_LISR_PROCESSED;
+#endif
+
+		OSScheduleMISR(psDevInfo->pvMISRData);
+
+		if (psDevInfo->pvAPMISRData != NULL)
+		{
+			OSScheduleMISR(psDevInfo->pvAPMISRData);
+		}
+	}
+
+	return bInterruptProcessed;
+}
+
+static void RGX_MISR_ProcessKCCBDeferredList(PVRSRV_DEVICE_NODE	*psDeviceNode)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+	/* First check whether there are pending commands in Deferred KCCB List */
+	OSLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList);
+	if (dllist_is_empty(&psDevInfo->sKCCBDeferredCommandsListHead))
+	{
+		OSLockRelease(psDevInfo->hLockKCCBDeferredCommandsList);
+		return;
+	}
+	OSLockRelease(psDevInfo->hLockKCCBDeferredCommandsList);
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSAcquireBridgeLock();
+#endif
+
+	/* Powerlock to avoid further Power transition requests
+	   while KCCB deferred list is being processed */
+	eError = PVRSRVPowerLock(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to acquire PowerLock (device: %p, error: %s)",
+				__func__, psDeviceNode, PVRSRVGetErrorString(eError)));
+		goto _RGX_MISR_ProcessKCCBDeferredList_PowerLock_failed;
+	}
+
+	/* Try to send deferred KCCB commands Do not Poll from here*/
+	eError = RGXSendCommandsFromDeferredList(psDevInfo, IMG_FALSE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s could not flush Deferred KCCB list, KCCB is full.",
+				 __func__));
+	}
+
+	PVRSRVPowerUnlock(psDeviceNode);
+
+	_RGX_MISR_ProcessKCCBDeferredList_PowerLock_failed:
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSReleaseBridgeLock();
+#endif
+	return;
+}
+
+static void RGX_MISRHandler_CheckFWActivePowerState(void *psDevice)
+{
+	PVRSRV_DEVICE_NODE	*psDeviceNode = psDevice;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (psFWTraceBuf->ePowState == RGXFWIF_POW_ON || psFWTraceBuf->ePowState == RGXFWIF_POW_IDLE)
+	{
+		RGX_MISR_ProcessKCCBDeferredList(psDeviceNode);
+	}
+
+	if (psFWTraceBuf->ePowState == RGXFWIF_POW_IDLE)
+	{
+		/* The FW is IDLE and therefore could be shut down */
+		eError = RGXActivePowerRequest(psDeviceNode);
+
+		if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED))
+		{
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				PVR_DPF((PVR_DBG_WARNING,
+					"%s: Failed RGXActivePowerRequest call (device: %p) with %s",
+					__func__, psDeviceNode, PVRSRVGetErrorString(eError)));
+				PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+			}
+			else
+			{
+				/* Re-schedule the power down request as it was deferred. */
+				OSScheduleMISR(psDevInfo->pvAPMISRData);
+			}
+		}
+	}
+
+}
+
+/* Shorter defines to keep the code a bit shorter */
+#define GPU_ACTIVE_LOW   RGXFWIF_GPU_UTIL_STATE_ACTIVE_LOW
+#define GPU_IDLE         RGXFWIF_GPU_UTIL_STATE_IDLE
+#define GPU_ACTIVE_HIGH  RGXFWIF_GPU_UTIL_STATE_ACTIVE_HIGH
+#define GPU_BLOCKED      RGXFWIF_GPU_UTIL_STATE_BLOCKED
+#define MAX_ITERATIONS   64
+
+static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode,
+		IMG_HANDLE hGpuUtilUser,
+		RGXFWIF_GPU_UTIL_STATS *psReturnStats)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	volatile RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+	RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+	IMG_UINT64 ui64TimeNow;
+	IMG_UINT32 ui32Attempts;
+	IMG_UINT32 ui32Remainder;
+
+
+	/***** (1) Initialise return stats *****/
+
+	psReturnStats->bValid = IMG_FALSE;
+	psReturnStats->ui64GpuStatActiveLow  = 0;
+	psReturnStats->ui64GpuStatIdle       = 0;
+	psReturnStats->ui64GpuStatActiveHigh = 0;
+	psReturnStats->ui64GpuStatBlocked    = 0;
+	psReturnStats->ui64GpuStatCumulative = 0;
+
+	if (hGpuUtilUser == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	psAggregateStats = hGpuUtilUser;
+
+
+	/* Try to acquire GPU utilisation counters and repeat if the FW is in the middle of an update */
+	for (ui32Attempts = 0; ui32Attempts < 4; ui32Attempts++)
+	{
+		IMG_UINT64 aui64TmpCounters[RGXFWIF_GPU_UTIL_STATE_NUM] = {0};
+		IMG_UINT64 ui64LastPeriod = 0, ui64LastWord = 0, ui64LastState = 0, ui64LastTime = 0;
+		IMG_UINT32 i = 0;
+
+
+		/***** (2) Get latest data from shared area *****/
+
+		OSLockAcquire(psDevInfo->hGPUUtilLock);
+
+		/*
+		 * First attempt at detecting if the FW is in the middle of an update.
+		 * This should also help if the FW is in the middle of a 64 bit variable update.
+		 */
+		while (((ui64LastWord != psUtilFWCb->ui64LastWord) ||
+				(aui64TmpCounters[ui64LastState] !=
+				 psUtilFWCb->aui64StatsCounters[ui64LastState])) &&
+			   (i < MAX_ITERATIONS))
+		{
+			ui64LastWord  = psUtilFWCb->ui64LastWord;
+			ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64LastWord);
+			aui64TmpCounters[GPU_ACTIVE_LOW]  = psUtilFWCb->aui64StatsCounters[GPU_ACTIVE_LOW];
+			aui64TmpCounters[GPU_IDLE]        = psUtilFWCb->aui64StatsCounters[GPU_IDLE];
+			aui64TmpCounters[GPU_ACTIVE_HIGH] = psUtilFWCb->aui64StatsCounters[GPU_ACTIVE_HIGH];
+			aui64TmpCounters[GPU_BLOCKED]     = psUtilFWCb->aui64StatsCounters[GPU_BLOCKED];
+			i++;
+		}
+
+		OSLockRelease(psDevInfo->hGPUUtilLock);
+
+		if (i == MAX_ITERATIONS)
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+					"RGXGetGpuUtilStats could not get reliable data after trying %u times", i));
+			return PVRSRV_ERROR_TIMEOUT;
+		}
+
+
+		/***** (3) Compute return stats *****/
+
+		/* Update temp counters to account for the time since the last update to the shared ones */
+		OSMemoryBarrier(); /* Ensure the current time is read after the loop above */
+		ui64TimeNow    = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64());
+		ui64LastTime   = RGXFWIF_GPU_UTIL_GET_TIME(ui64LastWord);
+		ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
+		aui64TmpCounters[ui64LastState] += ui64LastPeriod;
+
+		/* Get statistics for a user since its last request */
+		psReturnStats->ui64GpuStatActiveLow = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_ACTIVE_LOW],
+				psAggregateStats->ui64GpuStatActiveLow);
+		psReturnStats->ui64GpuStatIdle = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_IDLE],
+				psAggregateStats->ui64GpuStatIdle);
+		psReturnStats->ui64GpuStatActiveHigh = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_ACTIVE_HIGH],
+				psAggregateStats->ui64GpuStatActiveHigh);
+		psReturnStats->ui64GpuStatBlocked = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_BLOCKED],
+				psAggregateStats->ui64GpuStatBlocked);
+		psReturnStats->ui64GpuStatCumulative = psReturnStats->ui64GpuStatActiveLow + psReturnStats->ui64GpuStatIdle +
+				psReturnStats->ui64GpuStatActiveHigh + psReturnStats->ui64GpuStatBlocked;
+
+		if (psAggregateStats->ui64TimeStamp != 0)
+		{
+			IMG_UINT64 ui64TimeSinceLastCall = ui64TimeNow - psAggregateStats->ui64TimeStamp;
+			/* We expect to return at least 75% of the time since the last call in GPU stats */
+			IMG_UINT64 ui64MinReturnedStats = ui64TimeSinceLastCall - (ui64TimeSinceLastCall / 4);
+
+			/*
+			 * If the returned stats are substantially lower than the time since
+			 * the last call, then the Host might have read a partial update from the FW.
+			 * If this happens, try sampling the shared counters again.
+			 */
+			if (psReturnStats->ui64GpuStatCumulative < ui64MinReturnedStats)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE,
+						"%s: Return stats (%" IMG_UINT64_FMTSPEC ") too low "
+						"(call period %" IMG_UINT64_FMTSPEC ")",
+						__func__, psReturnStats->ui64GpuStatCumulative, ui64TimeSinceLastCall));
+				PVR_DPF((PVR_DBG_MESSAGE, "%s: Attempt #%u has failed, trying again",
+						__func__, ui32Attempts));
+				continue;
+			}
+		}
+
+		break;
+	}
+
+
+	/***** (4) Update aggregate stats for the current user *****/
+
+	psAggregateStats->ui64GpuStatActiveLow  += psReturnStats->ui64GpuStatActiveLow;
+	psAggregateStats->ui64GpuStatIdle       += psReturnStats->ui64GpuStatIdle;
+	psAggregateStats->ui64GpuStatActiveHigh += psReturnStats->ui64GpuStatActiveHigh;
+	psAggregateStats->ui64GpuStatBlocked    += psReturnStats->ui64GpuStatBlocked;
+	psAggregateStats->ui64TimeStamp          = ui64TimeNow;
+
+
+	/***** (5) Convert return stats to microseconds *****/
+
+	psReturnStats->ui64GpuStatActiveLow  = OSDivide64(psReturnStats->ui64GpuStatActiveLow, 1000, &ui32Remainder);
+	psReturnStats->ui64GpuStatIdle       = OSDivide64(psReturnStats->ui64GpuStatIdle, 1000, &ui32Remainder);
+	psReturnStats->ui64GpuStatActiveHigh = OSDivide64(psReturnStats->ui64GpuStatActiveHigh, 1000, &ui32Remainder);
+	psReturnStats->ui64GpuStatBlocked    = OSDivide64(psReturnStats->ui64GpuStatBlocked, 1000, &ui32Remainder);
+	psReturnStats->ui64GpuStatCumulative = OSDivide64(psReturnStats->ui64GpuStatCumulative, 1000, &ui32Remainder);
+
+	/* Check that the return stats make sense */
+	if (psReturnStats->ui64GpuStatCumulative == 0)
+	{
+		/* We can enter here only if all the RGXFWIF_GPU_UTIL_GET_PERIOD
+		 * returned 0. This could happen if the GPU frequency value
+		 * is not well calibrated and the FW is updating the GPU state
+		 * while the Host is reading it.
+		 * When such an event happens frequently, timers or the aggregate
+		 * stats might not be accurate...
+		 */
+		PVR_DPF((PVR_DBG_WARNING, "RGXGetGpuUtilStats could not get reliable data."));
+		return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+	}
+
+	psReturnStats->bValid = IMG_TRUE;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser)
+{
+	RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+
+	/* NoStats used since this may be called outside of the register/de-register
+	 * process calls which track memory use. */
+	psAggregateStats = OSAllocMemNoStats(sizeof(RGXFWIF_GPU_UTIL_STATS));
+	if (psAggregateStats == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psAggregateStats->ui64GpuStatActiveLow  = 0;
+	psAggregateStats->ui64GpuStatIdle       = 0;
+	psAggregateStats->ui64GpuStatActiveHigh = 0;
+	psAggregateStats->ui64GpuStatBlocked    = 0;
+	psAggregateStats->ui64TimeStamp         = 0;
+
+	/* Not used */
+	psAggregateStats->bValid = IMG_FALSE;
+	psAggregateStats->ui64GpuStatCumulative = 0;
+
+	*phGpuUtilUser = psAggregateStats;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser)
+{
+	RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+
+	if (hGpuUtilUser == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psAggregateStats = hGpuUtilUser;
+	OSFreeMemNoStats(psAggregateStats);
+
+	return PVRSRV_OK;
+}
+
+/*
+	RGX MISR Handler
+ */
+static void RGX_MISRHandler_Main (void *pvData)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+	/* Give the HWPerf service a chance to transfer some data from the FW
+	 * buffer to the host driver transport layer buffer.
+	 */
+	RGXHWPerfDataStoreCB(psDeviceNode);
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+	/* Process the signalled checkpoints in the checkpoint CCB, before
+	 * handling all other notifiers. */
+	RGXCheckCheckpointCCB(psDeviceNode);
+#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */
+
+	/* Inform other services devices that we have finished an operation */
+	PVRSRVCheckStatus(psDeviceNode);
+
+#if defined(SUPPORT_PDVFS) && defined(RGXFW_META_SUPPORT_2ND_THREAD)
+	/* Normally, firmware CCB only exists for the primary FW thread unless PDVFS
+	   is running on the second[ary] FW thread, here we process said CCB */
+	RGXPDVFSCheckCoreClkRateChange(psDeviceNode->pvDevice);
+#endif
+
+	/* Process the Firmware CCB for pending commands */
+	RGXCheckFirmwareCCB(psDeviceNode->pvDevice);
+
+	/* Calibrate the GPU frequency and recorrelate Host and GPU timers (done every few seconds) */
+	RGXTimeCorrRestartPeriodic(psDeviceNode);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	/* Process Workload Estimation Specific commands from the FW */
+	WorkEstCheckFirmwareCCB(psDeviceNode->pvDevice);
+#endif
+
+	if (psDevInfo->pvAPMISRData == NULL)
+	{
+		RGX_MISR_ProcessKCCBDeferredList(psDeviceNode);
+	}
+}
+#endif /* !defined(NO_HARDWARE) */
+
+
+#if defined(PDUMP)
+static PVRSRV_ERROR RGXPDumpBootldrData(PVRSRV_DEVICE_NODE *psDeviceNode,
+		PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	PMR *psFWDataPMR;
+	IMG_DEV_PHYADDR sTmpAddr;
+	IMG_UINT32 ui32BootConfOffset, ui32ParamOffset;
+	PVRSRV_ERROR eError;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR);
+	ui32BootConfOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA);
+	ui32BootConfOffset += RGXMIPSFW_BOOTLDR_CONF_OFFSET;
+
+	/* The physical addresses used by a pdump player will be different
+	 * than the ones we have put in the MIPS bootloader configuration data.
+	 * We have to tell the pdump player to replace the original values with the real ones.
+	 */
+	PDUMPCOMMENT("Pass new boot parameters to the FW");
+
+	/* Rogue Registers physical address */
+	ui32ParamOffset = ui32BootConfOffset + (RGXMIPSFW_ROGUE_REGS_BASE_PHYADDR_OFFSET * sizeof(IMG_UINT64));
+
+	eError = PDumpRegLabelToMem64(RGX_PDUMPREG_NAME,
+			0x0,
+			psFWDataPMR,
+			ui32ParamOffset,
+			PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of Rogue registers phy address failed (%u)", eError));
+		return eError;
+	}
+
+	/* Page Table physical Address */
+	ui32ParamOffset = ui32BootConfOffset + (RGXMIPSFW_PAGE_TABLE_BASE_PHYADDR_OFFSET * sizeof(IMG_UINT64));
+
+	eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sTmpAddr);
+	if (eError !=  PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXBootldrDataInit: MMU_AcquireBaseAddr failed (%u)",
+				eError));
+		return eError;
+	}
+
+	eError = PDumpPTBaseObjectToMem64(psDeviceNode->psFirmwareMMUDevAttrs->pszMMUPxPDumpMemSpaceName,
+			psFWDataPMR,
+			0,
+			ui32ParamOffset,
+			PDUMP_FLAGS_CONTINUOUS,
+			MMU_LEVEL_1,
+			sTmpAddr.uiAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of page tables phy address failed (%u)", eError));
+		return eError;
+	}
+
+	/* Stack physical address */
+	ui32ParamOffset = ui32BootConfOffset + (RGXMIPSFW_STACKPOINTER_PHYADDR_OFFSET * sizeof(IMG_UINT64));
+
+	eError = PDumpMemLabelToMem64(psFWDataPMR,
+			psFWDataPMR,
+			RGXGetFWImageSectionOffset(NULL, MIPS_STACK),
+			ui32ParamOffset,
+			PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of stack phy address failed (%u)", eError));
+		return eError;
+	}
+
+	return eError;
+}
+#endif /* PDUMP */
+
+
+PVRSRV_ERROR RGXVirtPopulateLMASubArenas(PVRSRV_DEVICE_NODE	*psDeviceNode,
+		IMG_UINT32          aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+		IMG_UINT32          aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+		IMG_BOOL            bEnableTrustedDeviceAceConfig)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	{
+		IMG_UINT32	ui32OS, ui32Region;
+
+		for (ui32OS = 0; ui32OS < GPUVIRT_VALIDATION_NUM_OS; ui32OS++)
+		{
+			for (ui32Region = 0; ui32Region < GPUVIRT_VALIDATION_NUM_REGIONS; ui32Region++)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE,"OS=%u, Region=%u, Min=%u, Max=%u", ui32OS, ui32Region, aui32OSidMin[ui32OS][ui32Region], aui32OSidMax[ui32OS][ui32Region]));
+			}
+		}
+
+		PopulateLMASubArenas(psDeviceNode, aui32OSidMin, aui32OSidMax);
+
+#if defined(EMULATOR)
+		if ((bEnableTrustedDeviceAceConfig) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE)))
+		{
+			SetTrustedDeviceAceEnabled();
+		}
+#else
+		{
+			PVR_UNREFERENCED_PARAMETER(bEnableTrustedDeviceAceConfig);
+		}
+#endif
+	}
+#else
+	{
+		PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+		PVR_UNREFERENCED_PARAMETER(aui32OSidMin);
+		PVR_UNREFERENCED_PARAMETER(aui32OSidMax);
+		PVR_UNREFERENCED_PARAMETER(bEnableTrustedDeviceAceConfig);
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXSetPowerParams(PVRSRV_RGXDEV_INFO   *psDevInfo,
+		PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_ERROR eError;
+
+	/* Save information used on power transitions for later
+	 * (when RGXStart and RGXStop are executed)
+	 */
+	psDevInfo->sLayerParams.psDevInfo = psDevInfo;
+	psDevInfo->sLayerParams.psDevConfig = psDevConfig;
+#if defined(PDUMP)
+	psDevInfo->sLayerParams.ui32PdumpFlags = PDUMP_FLAGS_CONTINUOUS;
+#endif
+	if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+	{
+		IMG_DEV_PHYADDR sKernelMMUCtxPCAddr;
+
+		eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx,
+				&sKernelMMUCtxPCAddr);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire Kernel MMU Ctx page catalog"));
+			return eError;
+		}
+
+		psDevInfo->sLayerParams.sPCAddr = sKernelMMUCtxPCAddr;
+	}else
+	{
+		PMR *psFWCodePMR = (PMR *)(psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR);
+		PMR *psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR);
+		IMG_DEV_PHYADDR sPhyAddr;
+		IMG_BOOL bValid;
+
+#if defined(SUPPORT_ALT_REGBASE)
+		psDevInfo->sLayerParams.sGPURegAddr = psDevConfig->sAltRegsGpuPBase;
+#else
+		/* The physical address of the GPU registers needs to be translated
+		 * in case we are in a LMA scenario
+		 */
+		PhysHeapCpuPAddrToDevPAddr(psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL],
+				1,
+				&sPhyAddr,
+				&(psDevConfig->sRegsCpuPBase));
+
+		psDevInfo->sLayerParams.sGPURegAddr = sPhyAddr;
+#endif
+
+		/* Register bank must be aligned to 512KB (as per the core integration) to
+		 * prevent the FW accessing incorrect registers */
+		if ((psDevInfo->sLayerParams.sGPURegAddr.uiAddr & 0x7FFFFU) != 0U)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Register bank must be aligned to 512KB, but current address (0x%016"IMG_UINT64_FMTSPECX") is not",
+						psDevInfo->sLayerParams.sGPURegAddr.uiAddr));
+			return PVRSRV_ERROR_INIT_FAILURE;
+		}
+
+		eError = RGXGetPhyAddr(psFWCodePMR,
+				&sPhyAddr,
+				RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_CODE),
+				OSGetPageShift(), /* FW will be using the same page size as the OS */
+				1,
+				&bValid);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW boot/NMI code address"));
+			return eError;
+		}
+
+		psDevInfo->sLayerParams.sBootRemapAddr = sPhyAddr;
+
+		eError = RGXGetPhyAddr(psFWDataPMR,
+				&sPhyAddr,
+				RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA),
+				OSGetPageShift(),
+				1,
+				&bValid);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW boot/NMI data address"));
+			return eError;
+		}
+
+		psDevInfo->sLayerParams.sDataRemapAddr = sPhyAddr;
+
+		eError = RGXGetPhyAddr(psFWCodePMR,
+				&sPhyAddr,
+				RGXGetFWImageSectionOffset(NULL, MIPS_EXCEPTIONS_CODE),
+				OSGetPageShift(),
+				1,
+				&bValid);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW exceptions address"));
+			return eError;
+		}
+
+		psDevInfo->sLayerParams.sCodeRemapAddr = sPhyAddr;
+
+		psDevInfo->sLayerParams.sTrampolineRemapAddr.uiAddr = psDevInfo->psTrampoline->sPhysAddr.uiAddr;
+
+#if defined(SUPPORT_DEVICE_PA0_AS_VALID)
+		psDevInfo->sLayerParams.bDevicePA0IsValid = psDevConfig->bDevicePA0IsValid;
+#else
+#if defined(LMA) || defined(TC_MEMORY_CONFIG)
+		/*
+		 * On LMA system, there is a high chance that address 0x0 is used by the GPU, e.g. TC.
+		 * In that case we don't need to protect the spurious MIPS accesses to address 0x0,
+		 * since that's a valid address to access.
+		 * The TC is usually built with HYBRID memory, but even in UMA we do not need
+		 * to apply the WA code on that system, so disable it to simplify.
+		 */
+		psDevInfo->sLayerParams.bDevicePA0IsValid = IMG_TRUE;
+#else
+		psDevInfo->sLayerParams.bDevicePA0IsValid = IMG_FALSE;
+#endif
+#endif
+
+
+	}
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+	/* Send information used on power transitions to the trusted device as
+	 * in this setup the driver cannot start/stop the GPU and perform resets
+	 */
+	if (psDevConfig->pfnTDSetPowerParams)
+	{
+		PVRSRV_TD_POWER_PARAMS sTDPowerParams;
+
+		if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+		{
+			sTDPowerParams.sPCAddr = psDevInfo->sLayerParams.sPCAddr;
+		}else
+		{
+			sTDPowerParams.sGPURegAddr    = psDevInfo->sLayerParams.sGPURegAddr;
+			sTDPowerParams.sBootRemapAddr = psDevInfo->sLayerParams.sBootRemapAddr;
+			sTDPowerParams.sCodeRemapAddr = psDevInfo->sLayerParams.sCodeRemapAddr;
+			sTDPowerParams.sDataRemapAddr = psDevInfo->sLayerParams.sDataRemapAddr;
+		}
+		eError = psDevConfig->pfnTDSetPowerParams(psDevConfig->hSysData,
+				&sTDPowerParams);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: TDSetPowerParams not implemented!"));
+		eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+	}
+#endif
+
+	return eError;
+}
+
+/*
+	RGXSystemHasFBCDCVersion31
+*/
+static IMG_BOOL RGXSystemHasFBCDCVersion31(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+#if defined(SUPPORT_VALIDATION)
+	IMG_UINT32 ui32FBCDCVersionOverride = 0;
+#endif
+
+	if (RGX_IS_ERN_SUPPORTED(psDevInfo, 66622))
+	{
+#if defined(SUPPORT_VALIDATION)
+		void *pvAppHintState = NULL;
+
+		IMG_UINT32 ui32AppHintDefault;
+
+		OSCreateKMAppHintState(&pvAppHintState);
+		ui32AppHintDefault = PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE;
+		OSGetKMAppHintUINT32(pvAppHintState, FBCDCVersionOverride,
+		                     &ui32AppHintDefault, &ui32FBCDCVersionOverride);
+
+		if (ui32FBCDCVersionOverride > 0)
+		{
+			if (ui32FBCDCVersionOverride == 2)
+			{
+				return IMG_TRUE;
+			}
+		}
+		else
+#endif
+		{
+			if (psDeviceNode->psDevConfig->bHasFBCDCVersion31)
+			{
+				return IMG_TRUE;
+			}
+		}
+	}
+	else
+	{
+
+#if defined(SUPPORT_VALIDATION)
+		if (ui32FBCDCVersionOverride == 2)
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+			         "%s: FBCDCVersionOverride forces FBC3.1 but this core doesn't support it!",
+			         __func__));
+		}
+#endif
+
+#if !defined(NO_HARDWARE)
+		if (psDeviceNode->psDevConfig->bHasFBCDCVersion31)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: System uses FBCDC3.1 but GPU doesn't support it!",
+			         __func__));
+		}
+#endif
+	}
+
+	return IMG_FALSE;
+}
+
+/*
+ * RGXInitDevPart2
+ */
+PVRSRV_ERROR RGXInitDevPart2(PVRSRV_DEVICE_NODE	*psDeviceNode,
+		IMG_UINT32			ui32DeviceFlags,
+		IMG_UINT32			ui32HWPerfHostBufSizeKB,
+		IMG_UINT32			ui32HWPerfHostFilter,
+		RGX_ACTIVEPM_CONF		eActivePMConf)
+{
+	PVRSRV_ERROR			eError;
+	PVRSRV_RGXDEV_INFO		*psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_DEV_POWER_STATE	eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON;
+	PVRSRV_DEVICE_CONFIG	*psDevConfig = psDeviceNode->psDevConfig;
+
+#if defined(PDUMP)
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+	{
+		RGXPDumpBootldrData(psDeviceNode, psDevInfo);
+	}
+#endif
+#if defined(TIMING) || defined(DEBUG)
+	OSUserModeAccessToPerfCountersEn();
+#endif
+
+	PDUMPCOMMENT("RGX Initialisation Part 2");
+
+	psDevInfo->ui32RegSize = psDevConfig->ui32RegsSize;
+	psDevInfo->sRegsPhysBase = psDevConfig->sRegsCpuPBase;
+
+	/* Initialise Device Flags */
+	psDevInfo->ui32DeviceFlags = 0;
+	RGXSetDeviceFlags(psDevInfo, ui32DeviceFlags, IMG_TRUE);
+
+	/* Allocate DVFS Table (needs to be allocated before GPU trace events
+	 *  component is initialised because there is a dependency between them) */
+	psDevInfo->psGpuDVFSTable = OSAllocZMem(sizeof(*(psDevInfo->psGpuDVFSTable)));
+	if (psDevInfo->psGpuDVFSTable == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitDevPart2KM: failed to allocate gpu dvfs table storage"));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* Initialise HWPerfHost buffer. */
+	if (RGXHWPerfHostInit(psDevInfo, ui32HWPerfHostBufSizeKB) == PVRSRV_OK)
+	{
+		if (psDevInfo->ui32HWPerfHostFilter == 0)
+		{
+			RGXHWPerfHostSetEventFilter(psDevInfo, ui32HWPerfHostFilter);
+		}
+
+		/* If HWPerf enabled allocate all resources for the host side buffer. */
+		if (psDevInfo->ui32HWPerfHostFilter != 0)
+		{
+			if (RGXHWPerfHostInitOnDemandResources(psDevInfo) != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_WARNING, "HWPerfHost buffer on demand"
+						" initialisation failed."));
+			}
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING, "HWPerfHost buffer initialisation failed."));
+	}
+
+	/* Initialise lists of ZSBuffers */
+	eError = OSLockCreate(&psDevInfo->hLockZSBuffer);
+	PVR_ASSERT(eError == PVRSRV_OK);
+	dllist_init(&psDevInfo->sZSBufferHead);
+	psDevInfo->ui32ZSBufferCurrID = 1;
+
+	/* Initialise lists of growable Freelists */
+	eError = OSLockCreate(&psDevInfo->hLockFreeList);
+	PVR_ASSERT(eError == PVRSRV_OK);
+	dllist_init(&psDevInfo->sFreeListHead);
+	psDevInfo->ui32FreelistCurrID = 1;
+
+	if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+	{
+		eError = OSLockCreate(&psDevInfo->hDebugFaultInfoLock);
+
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+
+
+		eError = OSLockCreate(&psDevInfo->hMMUCtxUnregLock);
+
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+	{
+		eError = OSLockCreate(&psDevInfo->hNMILock);
+
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	/* Setup GPU utilisation stats update callback */
+	eError = OSLockCreate(&psDevInfo->hGPUUtilLock);
+	PVR_ASSERT(eError == PVRSRV_OK);
+#if !defined(NO_HARDWARE)
+	psDevInfo->pfnGetGpuUtilStats = RGXGetGpuUtilStats;
+#endif
+
+	eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON;
+	psDevInfo->eActivePMConf = eActivePMConf;
+
+	/* set-up the Active Power Mgmt callback */
+#if !defined(NO_HARDWARE)
+	{
+		RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+		IMG_BOOL bSysEnableAPM = psRGXData->psRGXTimingInfo->bEnableActivePM;
+		IMG_BOOL bEnableAPM = ((eActivePMConf == RGX_ACTIVEPM_DEFAULT) && bSysEnableAPM) ||
+				(eActivePMConf == RGX_ACTIVEPM_FORCE_ON);
+		/* Disable APM if in VZ mode */
+		bEnableAPM = bEnableAPM && PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE);
+
+		if (bEnableAPM)
+		{
+			eError = OSInstallMISR(&psDevInfo->pvAPMISRData,
+					RGX_MISRHandler_CheckFWActivePowerState,
+					psDeviceNode,
+					"RGX_CheckFWActivePower");
+			if (eError != PVRSRV_OK)
+			{
+				return eError;
+			}
+
+			/* Prevent the device being woken up before there is something to do. */
+			eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF;
+		}
+	}
+#endif
+
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableAPM,
+			RGXQueryAPMState,
+			RGXSetAPMState,
+			psDeviceNode,
+			NULL);
+
+	RGXTimeCorrInitAppHintCallbacks(psDeviceNode);
+
+	/*
+		Register the device with the power manager.
+			Normal/Hyperv Drivers: Supports power management
+			Guest Drivers: Do not currently support power management
+	 */
+	eError = PVRSRVRegisterPowerDevice(psDeviceNode,
+			&RGXPrePowerState, &RGXPostPowerState,
+			psDevConfig->pfnPrePowerState, psDevConfig->pfnPostPowerState,
+			&RGXPreClockSpeedChange, &RGXPostClockSpeedChange,
+			&RGXForcedIdleRequest, &RGXCancelForcedIdleRequest,
+			&RGXDustCountChange,
+			(IMG_HANDLE)psDeviceNode,
+			PVRSRV_DEV_POWER_STATE_OFF,
+			eDefaultPowerState);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitDevPart2KM: failed to register device with power manager"));
+		return eError;
+	}
+
+	eError = RGXSetPowerParams(psDevInfo, psDevConfig);
+	if (eError != PVRSRV_OK) return eError;
+
+#if defined(PDUMP)
+	/* Run RGXStop with the correct PDump flags to feed the last-frame deinit buffer */
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_DEINIT, "RGX deinitialisation commands");
+
+	psDevInfo->sLayerParams.ui32PdumpFlags |= PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW;
+
+	if (! PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		eError = RGXStop(&psDevInfo->sLayerParams);
+		if (eError != PVRSRV_OK) return eError;
+	}
+
+	psDevInfo->sLayerParams.ui32PdumpFlags &= ~(PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW);
+#endif
+
+#if !defined(NO_HARDWARE)
+	eError = RGXInstallProcessQueuesMISR(&psDevInfo->hProcessQueuesMISR, psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		if (psDevInfo->pvAPMISRData != NULL)
+		{
+			(void) OSUninstallMISR(psDevInfo->pvAPMISRData);
+		}
+		return eError;
+	}
+
+	/* Register the interrupt handlers */
+	eError = OSInstallMISR(&psDevInfo->pvMISRData,
+			RGX_MISRHandler_Main,
+			psDeviceNode,
+			"RGX_Main");
+	if (eError != PVRSRV_OK)
+	{
+		if (psDevInfo->pvAPMISRData != NULL)
+		{
+			(void) OSUninstallMISR(psDevInfo->pvAPMISRData);
+		}
+		(void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR);
+		return eError;
+	}
+
+	eError = SysInstallDeviceLISR(psDevConfig->hSysData,
+			psDevConfig->ui32IRQ,
+			PVRSRV_MODNAME,
+			RGX_LISRHandler,
+			psDeviceNode,
+			&psDevInfo->pvLISRData);
+	if (eError != PVRSRV_OK)
+	{
+		if (psDevInfo->pvAPMISRData != NULL)
+		{
+			(void) OSUninstallMISR(psDevInfo->pvAPMISRData);
+		}
+		(void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR);
+		(void) OSUninstallMISR(psDevInfo->pvMISRData);
+		return eError;
+	}
+#endif
+
+#if defined(PDUMP)
+	if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_CACHE_HIERARCHY)))
+	{
+		if (!PVRSRVSystemSnoopingOfCPUCache(psDevConfig) &&
+				!PVRSRVSystemSnoopingOfDeviceCache(psDevConfig))
+		{
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has NO cache snooping");
+		}
+		else
+		{
+			if (PVRSRVSystemSnoopingOfCPUCache(psDevConfig))
+			{
+				PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has CPU cache snooping");
+			}
+			if (PVRSRVSystemSnoopingOfDeviceCache(psDevConfig))
+			{
+				PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has DEVICE cache snooping");
+			}
+		}
+	}
+#endif
+
+	psDevInfo->bDevInit2Done = IMG_TRUE;
+
+	return PVRSRV_OK;
+}
+
+#define VZ_RGX_FW_FILENAME_SUFFIX ".vz"
+#define RGX_FW_FILENAME_MAX_SIZE   ((sizeof(RGX_FW_FILENAME)+ \
+			RGX_BVNC_STR_SIZE_MAX+sizeof(VZ_RGX_FW_FILENAME_SUFFIX)))
+
+static void _GetFWFileName(PVRSRV_DEVICE_NODE *psDeviceNode,
+		IMG_CHAR *pszFWFilenameStr,
+		IMG_CHAR *pszFWpFilenameStr)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	const IMG_CHAR * const pszFWFilenameSuffix =
+			PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE) ? "" : VZ_RGX_FW_FILENAME_SUFFIX;
+
+	OSSNPrintf(pszFWFilenameStr, RGX_FW_FILENAME_MAX_SIZE,
+			"%s." RGX_BVNC_STR_FMTSPEC "%s",
+			RGX_FW_FILENAME,
+			psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V,
+			psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C,
+			pszFWFilenameSuffix);
+
+	OSSNPrintf(pszFWpFilenameStr, RGX_FW_FILENAME_MAX_SIZE,
+			"%s." RGX_BVNC_STRP_FMTSPEC "%s",
+			RGX_FW_FILENAME,
+			psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V,
+			psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C,
+			pszFWFilenameSuffix);
+}
+
+const void * RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode,
+		struct RGXFW **ppsRGXFW)
+{
+	IMG_CHAR aszFWFilenameStr[RGX_FW_FILENAME_MAX_SIZE];
+	IMG_CHAR aszFWpFilenameStr[RGX_FW_FILENAME_MAX_SIZE];
+	IMG_CHAR *pszLoadedFwStr;
+
+	/* Prepare the image filenames to use in the following code */
+	_GetFWFileName(psDeviceNode, aszFWFilenameStr, aszFWpFilenameStr);
+
+	/* Get pointer to Firmware image */
+	pszLoadedFwStr = aszFWFilenameStr;
+	*ppsRGXFW = RGXLoadFirmware(psDeviceNode, pszLoadedFwStr);
+	if (*ppsRGXFW == NULL)
+	{
+		pszLoadedFwStr = aszFWpFilenameStr;
+		*ppsRGXFW = RGXLoadFirmware(psDeviceNode, pszLoadedFwStr);
+		if (*ppsRGXFW == NULL)
+		{
+			pszLoadedFwStr = RGX_FW_FILENAME;
+			*ppsRGXFW = RGXLoadFirmware(psDeviceNode, pszLoadedFwStr);
+			if (*ppsRGXFW == NULL)
+			{
+				PVR_DPF((PVR_DBG_FATAL, "All RGX Firmware image loads failed for '%s'",
+						aszFWFilenameStr));
+				return NULL;
+			}
+		}
+	}
+
+	PVR_LOG(("RGX Firmware image '%s' loaded", pszLoadedFwStr));
+
+	return RGXFirmwareData(*ppsRGXFW);
+}
+
+#if defined(PDUMP)
+PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE	*psDeviceNode)
+{
+
+	PVRSRV_ERROR			eError;
+	RGXFWIF_KCCB_CMD		sKccbCmd;
+
+	/* Fill in the command structure with the parameters needed
+	 */
+	sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT;
+
+	eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice,
+			RGXFWIF_DM_GP,
+			&sKccbCmd,
+			PDUMP_FLAGS_CONTINUOUS);
+
+	return PVRSRV_OK;
+
+}
+#endif
+
+PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	/* set up fw memory contexts */
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR        eError;
+
+	/* Register callbacks for creation of device memory contexts */
+	psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext;
+	psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext;
+
+	/* Create the memory context for the firmware. */
+	eError = DevmemCreateContext(psDeviceNode, DEVMEM_HEAPCFG_META,
+	                             IMG_FALSE, /* Does not require MCU fence allocation. */
+	                             &psDevInfo->psKernelDevmemCtx);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXInitCreateFWKernelMemoryContext: Failed DevmemCreateContext (%u)", eError));
+		goto failed_to_create_ctx;
+	}
+
+	eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, RGX_FIRMWARE_MAIN_HEAP_IDENT,
+			&psDevInfo->psFirmwareMainHeap);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXInitCreateFWKernelMemoryContext: Failed DevmemFindHeapByName (%u)", eError));
+		goto failed_to_find_heap;
+	}
+
+	eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, RGX_FIRMWARE_CONFIG_HEAP_IDENT,
+			&psDevInfo->psFirmwareConfigHeap);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXInitCreateFWKernelMemoryContext: Failed DevmemFindHeapByName (%u)", eError));
+		goto failed_to_find_heap;
+	}
+
+	/* Perform additional vz specific initialization */
+	eError = RGXVzInitCreateFWKernelMemoryContext(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"RGXInitCreateFWKernelMemoryContext: Failed RGXVzInitCreateFWKernelMemoryContext (%u)",
+				eError));
+		goto failed_to_find_heap;
+	}
+
+	return eError;
+
+	failed_to_find_heap:
+	/*
+	 * Clear the mem context create callbacks before destroying the RGX firmware
+	 * context to avoid a spurious callback.
+	 */
+	psDeviceNode->pfnRegisterMemoryContext = NULL;
+	psDeviceNode->pfnUnregisterMemoryContext = NULL;
+	DevmemDestroyContext(psDevInfo->psKernelDevmemCtx);
+	psDevInfo->psKernelDevmemCtx = NULL;
+	failed_to_create_ctx:
+	return eError;
+}
+
+void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR        eError;
+
+	RGXVzDeInitDestroyFWKernelMemoryContext(psDeviceNode);
+
+	/*
+	 * Clear the mem context create callbacks before destroying the RGX firmware
+	 * context to avoid a spurious callback.
+	 */
+	psDeviceNode->pfnRegisterMemoryContext = NULL;
+	psDeviceNode->pfnUnregisterMemoryContext = NULL;
+
+	if (psDevInfo->psKernelDevmemCtx)
+	{
+		eError = DevmemDestroyContext(psDevInfo->psKernelDevmemCtx);
+		/* FIXME - this should return void */
+		PVR_ASSERT(eError == PVRSRV_OK);
+	}
+}
+
+#if defined(RGXFW_ALIGNCHECKS)
+static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode,
+		IMG_UINT32 ui32AlignChecksSize,
+		IMG_UINT32 aui32AlignChecks[])
+{
+	static IMG_UINT32 aui32AlignChecksKM[] = {RGXFW_ALIGN_CHECKS_INIT_KM};
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+	IMG_UINT32 i, *paui32FWAlignChecks;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* Skip the alignment check if the driver is guest
+	   since there is no firmware to check against */
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, eError);
+
+	if (psDevInfo->psRGXFWAlignChecksMemDesc == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAlignmentCheckKM: FW Alignment Check"
+				" Mem Descriptor is NULL"));
+		return PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc,
+			(void **) &paui32FWAlignChecks);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVAlignmentCheckKM: Failed to acquire"
+				" kernel address for alignment checks (%u)", eError));
+		return eError;
+	}
+
+	paui32FWAlignChecks += ARRAY_SIZE(aui32AlignChecksKM) + 1;
+	if (*paui32FWAlignChecks++ != ui32AlignChecksSize)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAlignmentCheckKM: Mismatch"
+				" in number of structures to check."));
+		eError = PVRSRV_ERROR_INVALID_ALIGNMENT;
+		goto return_;
+	}
+
+	for (i = 0; i < ui32AlignChecksSize; i++)
+	{
+		if (aui32AlignChecks[i] != paui32FWAlignChecks[i])
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PVRSRVAlignmentCheckKM: Check for"
+					" structured alignment failed."));
+			eError = PVRSRV_ERROR_INVALID_ALIGNMENT;
+			goto return_;
+		}
+	}
+
+	return_:
+
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc);
+
+	return eError;
+}
+#endif
+
+static
+PVRSRV_ERROR RGXAllocateFWMemoryRegion(PVRSRV_DEVICE_NODE *psDeviceNode,
+		IMG_DEVMEM_SIZE_T ui32Size,
+		IMG_UINT32 uiMemAllocFlags,
+		PVRSRV_TD_FW_MEM_REGION eRegion,
+		const IMG_PCHAR pszText,
+		DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	PVRSRV_ERROR eError;
+	IMG_DEVMEM_LOG2ALIGN_T uiLog2Align = OSGetPageShift();
+
+#if defined(SUPPORT_MIPS_CONTIGUOUS_FW_MEMORY)
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+	{
+		uiLog2Align = RGXMIPSFW_LOG2_PAGE_SIZE_64K;
+	}
+#endif
+
+#if defined(SUPPORT_DEDICATED_FW_MEMORY)
+	PVR_UNREFERENCED_PARAMETER(eRegion);
+
+	PDUMPCOMMENT("Allocate dedicated FW %s memory",
+			eRegion == PVRSRV_DEVICE_FW_CODE_REGION ? "code" :
+			eRegion == PVRSRV_DEVICE_FW_COREMEM_CODE_REGION ? "coremem code" :
+			eRegion == PVRSRV_DEVICE_FW_COREMEM_DATA_REGION ? "coremem data" :
+			"private data");
+
+	eError = DevmemAllocateDedicatedFWMem(psDeviceNode,
+			ui32Size,
+			uiLog2Align,
+			uiMemAllocFlags,
+			pszText,
+			ppsMemDescPtr);
+	return eError;
+#elif !defined(SUPPORT_TRUSTED_DEVICE)
+	uiMemAllocFlags |= PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PVR_UNREFERENCED_PARAMETER(eRegion);
+
+	PDUMPCOMMENT("Allocate FW %s memory",
+			eRegion == PVRSRV_DEVICE_FW_CODE_REGION ? "code" :
+			eRegion == PVRSRV_DEVICE_FW_COREMEM_CODE_REGION ? "coremem code" :
+			eRegion == PVRSRV_DEVICE_FW_COREMEM_DATA_REGION ? "coremem data" :
+			"private data");
+
+	eError = DevmemFwAllocateExportable(psDeviceNode,
+			ui32Size,
+			1 << uiLog2Align,
+			uiMemAllocFlags,
+			pszText,
+			ppsMemDescPtr);
+	return eError;
+#else
+	PDUMPCOMMENT("Import secure FW %s memory",
+			eRegion == PVRSRV_DEVICE_FW_CODE_REGION ? "code" :
+			eRegion == PVRSRV_DEVICE_FW_COREMEM_CODE_REGION ? "coremem code" :
+			eRegion == PVRSRV_DEVICE_FW_COREMEM_DATA_REGION ? "coremem data" :
+			"private data");
+
+	eError = DevmemImportTDFWCode(psDeviceNode,
+			ui32Size,
+			uiLog2Align,
+			uiMemAllocFlags,
+			eRegion,
+			ppsMemDescPtr);
+	return eError;
+#endif
+}
+
+static
+PVRSRV_ERROR RGXAllocateFWDataRegion(PVRSRV_DEVICE_NODE *psDeviceNode,
+		IMG_DEVMEM_SIZE_T ui32FWDataAllocSize,
+		IMG_UINT32 uiMemAllocFlags,
+		PVRSRV_TD_FW_MEM_REGION eRegion,
+		const IMG_PCHAR pszText,
+		DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	PVRSRV_ERROR eError;
+	IMG_DEVMEM_LOG2ALIGN_T uiLog2Align = OSGetPageShift();
+
+#if defined(SUPPORT_MIPS_CONTIGUOUS_FW_MEMORY)
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+	{
+		uiLog2Align = RGXMIPSFW_LOG2_PAGE_SIZE_64K;
+	}
+#endif
+
+#if defined(SUPPORT_DEDICATED_FW_MEMORY)
+	uiMemAllocFlags |= PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	eError = RGXAllocateFWMemoryRegion(psDeviceNode,
+			ui32FWDataAllocSize,
+			uiMemAllocFlags,
+			PVRSRV_DEVICE_FW_PRIVATE_DATA_REGION,
+			pszText,
+			ppsMemDescPtr);
+
+	return eError;
+
+#elif defined(SUPPORT_TRUSTED_DEVICE)
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) &&
+		RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32)
+	{
+#if defined(NO_HARDWARE)
+		IMG_UINT64 ui64FWDataHandle;
+
+		PDUMPCOMMENT("Import secure FW private data memory");
+
+		eError = DevmemImportTDSecureBuf(psDeviceNode,
+				ui32FWDataAllocSize,
+				uiLog2Align,
+				uiMemAllocFlags,
+				ppsMemDescPtr,
+				&ui64FWDataHandle);
+#else
+		/* Reuse code for secure FW memory */
+		eError = RGXAllocateFWMemoryRegion(psDeviceNode,
+				ui32FWDataAllocSize,
+				uiMemAllocFlags,
+				PVRSRV_DEVICE_FW_PRIVATE_DATA_REGION,
+				pszText,
+				ppsMemDescPtr);
+#endif
+	}
+	else
+#endif
+	{
+		uiMemAllocFlags |= PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+				PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+				PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+		PDUMPCOMMENT("Allocate FW %s memory",
+				eRegion == PVRSRV_DEVICE_FW_COREMEM_DATA_REGION ? "coremem data" :
+				"private data");
+
+		eError = DevmemFwAllocateExportable(psDeviceNode,
+				ui32FWDataAllocSize,
+				1ULL << uiLog2Align,
+				uiMemAllocFlags,
+				pszText,
+				ppsMemDescPtr);
+	}
+
+	return eError;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver
+
+ @Description
+
+ Validate the FW build options against KM driver build options (KM build options only)
+
+ Following check is redundant, because next check checks the same bits.
+ Redundancy occurs because if client-server are build-compatible and client-firmware are
+ build-compatible then server-firmware are build-compatible as well.
+
+ This check is left for clarity in error messages if any incompatibility occurs.
+
+ @Input psRGXFWInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(RGXFWIF_INIT *psRGXFWInit)
+{
+#if !defined(NO_HARDWARE)
+	IMG_UINT32			ui32BuildOptions, ui32BuildOptionsFWKMPart, ui32BuildOptionsMismatch;
+
+	if (psRGXFWInit == NULL)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	ui32BuildOptions = (RGX_BUILD_OPTIONS_KM);
+
+	ui32BuildOptionsFWKMPart = psRGXFWInit->sRGXCompChecks.ui32BuildOptions & RGX_BUILD_OPTIONS_MASK_KM;
+
+	if (ui32BuildOptions != ui32BuildOptionsFWKMPart)
+	{
+		ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32BuildOptionsFWKMPart;
+#if !defined(PVRSRV_STRICT_COMPAT_CHECK)
+		/*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/
+		ui32BuildOptionsMismatch &= OPTIONS_STRICT;
+#endif
+		if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and KM driver build options; "
+					"extra options present in the KM driver: (0x%x). Please check rgx_options.h",
+					ui32BuildOptions & ui32BuildOptionsMismatch ));
+			return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+		}
+
+		if ( (ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch) != 0)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware-side and KM driver build options; "
+					"extra options present in Firmware: (0x%x). Please check rgx_options.h",
+					ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch ));
+			return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+		}
+		PVR_DPF((PVR_DBG_WARNING, "RGXDevInitCompatCheck: Firmware and KM driver build options differ."));
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware and KM driver build options match. [ OK ]"));
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver
+
+ @Description
+
+ Validate FW DDK version against driver DDK version
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+		RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+	IMG_UINT32			ui32DDKVersion;
+	PVRSRV_ERROR		eError;
+
+	ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN);
+#endif
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Compatibility check: KM driver and FW DDK version");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+			offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+			offsetof(RGXFWIF_COMPCHECKS, ui32DDKVersion),
+			ui32DDKVersion,
+			0xffffffff,
+			PDUMP_POLL_OPERATOR_EQUAL,
+			PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+		return eError;
+	}
+#endif
+
+#if !defined(NO_HARDWARE)
+	if (psRGXFWInit == NULL)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	if (psRGXFWInit->sRGXCompChecks.ui32DDKVersion != ui32DDKVersion)
+	{
+		PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible driver DDK version (%u.%u) / Firmware DDK version (%u.%u).",
+				PVRVERSION_MAJ, PVRVERSION_MIN,
+				PVRVERSION_UNPACK_MAJ(psRGXFWInit->sRGXCompChecks.ui32DDKVersion),
+				PVRVERSION_UNPACK_MIN(psRGXFWInit->sRGXCompChecks.ui32DDKVersion)));
+		eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH;
+		PVR_DBG_BREAK;
+		return eError;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK version (%u.%u) and Firmware DDK version (%u.%u) match. [ OK ]",
+				PVRVERSION_MAJ, PVRVERSION_MIN,
+				PVRVERSION_MAJ, PVRVERSION_MIN));
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver
+
+ @Description
+
+ Validate FW DDK build against driver DDK build
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+		RGXFWIF_INIT *psRGXFWInit)
+{
+	PVRSRV_ERROR		eError=PVRSRV_OK;
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+	IMG_UINT32			ui32DDKBuild;
+
+	ui32DDKBuild = PVRVERSION_BUILD;
+#endif
+
+#if defined(PDUMP) && defined(PVRSRV_STRICT_COMPAT_CHECK)
+	PDUMPCOMMENT("Compatibility check: KM driver and FW DDK build");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+			offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+			offsetof(RGXFWIF_COMPCHECKS, ui32DDKBuild),
+			ui32DDKBuild,
+			0xffffffff,
+			PDUMP_POLL_OPERATOR_EQUAL,
+			PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+		return eError;
+	}
+#endif
+
+#if !defined(NO_HARDWARE)
+	if (psRGXFWInit == NULL)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	if (psRGXFWInit->sRGXCompChecks.ui32DDKBuild != ui32DDKBuild)
+	{
+		PVR_LOG(("(WARN) RGXDevInitCompatCheck: Different driver DDK build version (%d) / Firmware DDK build version (%d).",
+				ui32DDKBuild, psRGXFWInit->sRGXCompChecks.ui32DDKBuild));
+#if defined(PVRSRV_STRICT_COMPAT_CHECK)
+		eError = PVRSRV_ERROR_DDK_BUILD_MISMATCH;
+		PVR_DBG_BREAK;
+		return eError;
+#endif
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK build version (%d) and Firmware DDK build version (%d) match. [ OK ]",
+				ui32DDKBuild, psRGXFWInit->sRGXCompChecks.ui32DDKBuild));
+	}
+#endif
+	return eError;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_BVNC_FWAgainstDriver
+
+ @Description
+
+ Validate FW BVNC against driver BVNC
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+		RGXFWIF_INIT *psRGXFWInit)
+{
+#if !defined(NO_HARDWARE)
+	IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleBVNC;
+#endif
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+	RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC);
+	PVRSRV_ERROR				eError;
+
+	sBVNC.ui64BVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B,
+					psDevInfo->sDevFeatureCfg.ui32V,
+					psDevInfo->sDevFeatureCfg.ui32N,
+					psDevInfo->sDevFeatureCfg.ui32C);
+#endif
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (struct version)");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+			offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+			offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+			offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion),
+			sBVNC.ui32LayoutVersion,
+			0xffffffff,
+			PDUMP_POLL_OPERATOR_EQUAL,
+			PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+	}
+
+	PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (BVNC part - Lower 32 bits)");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+			offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+			offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+			offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC),
+			(IMG_UINT32)sBVNC.ui64BVNC,
+			0xffffffff,
+			PDUMP_POLL_OPERATOR_EQUAL,
+			PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+	}
+
+	PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (BVNC part - Higher 32 bits)");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+			offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+			offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+			offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) +
+			sizeof(IMG_UINT32),
+			(IMG_UINT32)(sBVNC.ui64BVNC >> 32),
+			0xffffffff,
+			PDUMP_POLL_OPERATOR_EQUAL,
+			PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+	}
+#endif
+
+#if !defined(NO_HARDWARE)
+	if (psRGXFWInit == NULL)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	RGX_BVNC_EQUAL(sBVNC, psRGXFWInit->sRGXCompChecks.sFWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleBVNC);
+
+	if (!bCompatibleAll)
+	{
+		if (!bCompatibleVersion)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%u) and firmware (%u).",
+					__func__,
+					sBVNC.ui32LayoutVersion,
+					psRGXFWInit->sRGXCompChecks.sFWBVNC.ui32LayoutVersion));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+
+		if (!bCompatibleBVNC)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BVNC (%u.%u.%u.%u) and Firmware BVNC (%u.%u.%u.%u)",
+					RGX_BVNC_PACKED_EXTR_B(sBVNC),
+					RGX_BVNC_PACKED_EXTR_V(sBVNC),
+					RGX_BVNC_PACKED_EXTR_N(sBVNC),
+					RGX_BVNC_PACKED_EXTR_C(sBVNC),
+					RGX_BVNC_PACKED_EXTR_B(psRGXFWInit->sRGXCompChecks.sFWBVNC),
+					RGX_BVNC_PACKED_EXTR_V(psRGXFWInit->sRGXCompChecks.sFWBVNC),
+					RGX_BVNC_PACKED_EXTR_N(psRGXFWInit->sRGXCompChecks.sFWBVNC),
+					RGX_BVNC_PACKED_EXTR_C(psRGXFWInit->sRGXCompChecks.sFWBVNC)));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware BVNC and KM driver BNVC match. [ OK ]"));
+	}
+#endif
+	return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_BVNC_HWAgainstDriver
+
+ @Description
+
+ Validate HW BVNC against driver BVNC
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_HWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+		RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP) || !defined(NO_HARDWARE)
+	IMG_UINT64 ui64MaskBVNC = RGX_BVNC_PACK_MASK_B |
+			RGX_BVNC_PACK_MASK_V |
+			RGX_BVNC_PACK_MASK_N |
+			RGX_BVNC_PACK_MASK_C;
+
+	PVRSRV_ERROR				eError;
+	RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sSWBVNC);
+#endif
+#if defined(PDUMP)
+	PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+#endif
+
+#if !defined(NO_HARDWARE)
+	RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sHWBVNC);
+	IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleBVNC;
+#endif
+
+	if (psDevInfo->bIgnoreHWReportedBVNC)
+	{
+		PVR_LOG(("BVNC compatibility checks between driver and HW are disabled (AppHint override)"));
+		return PVRSRV_OK;
+	}
+
+#if defined(PDUMP) || !defined(NO_HARDWARE)
+#if defined(COMPAT_BVNC_MASK_V)
+	ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_V;
+#endif
+#if defined(COMPAT_BVNC_MASK_N)
+	ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_N;
+#endif
+#if defined(COMPAT_BVNC_MASK_C)
+	ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_C;
+#endif
+
+	sSWBVNC.ui64BVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B,
+									psDevInfo->sDevFeatureCfg.ui32V,
+									psDevInfo->sDevFeatureCfg.ui32N,
+									psDevInfo->sDevFeatureCfg.ui32C);
+
+	if (RGX_IS_BRN_SUPPORTED(psDevInfo, 38344) && (psDevInfo->sDevFeatureCfg.ui32C >= 10))
+	{
+		ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_C;
+	}
+
+	if (ui64MaskBVNC != (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_V | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C))
+	{
+		PVR_LOG(("Compatibility checks: Ignoring fields: '%s%s%s%s' of HW BVNC.",
+				((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_B))?("B"):("")),
+				((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_V))?("V"):("")),
+				((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_N))?("N"):("")),
+				((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_C))?("C"):(""))));
+	}
+#endif
+
+#if defined(PDUMP)
+	PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Compatibility check: Layout version of compchecks struct");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+			offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+			offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+			offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion),
+			sSWBVNC.ui32LayoutVersion,
+			0xffffffff,
+			PDUMP_POLL_OPERATOR_EQUAL,
+			ui32PDumpFlags);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+		return eError;
+	}
+
+	if (ui64MaskBVNC & (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C))
+	{
+		PDUMPIF("DISABLE_HWBNC_CHECK", ui32PDumpFlags);
+		PDUMPELSE("DISABLE_HWBNC_CHECK", ui32PDumpFlags);
+		PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Compatibility check: HW BNC and FW BNC (Lower 32 bits)");
+		eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+				offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+				offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+				offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC),
+				(IMG_UINT32)sSWBVNC.ui64BVNC ,
+				(IMG_UINT32)(ui64MaskBVNC & ~RGX_BVNC_PACK_MASK_V),
+				PDUMP_POLL_OPERATOR_EQUAL,
+				ui32PDumpFlags);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+			return eError;
+		}
+
+		PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Compatibility check: HW BNC and FW BNC (Higher 32 bits)");
+		eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+				offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+				offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+				offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) +
+				sizeof(IMG_UINT32),
+				(IMG_UINT32)(sSWBVNC.ui64BVNC >> 32),
+				(IMG_UINT32)((ui64MaskBVNC & ~RGX_BVNC_PACK_MASK_V) >> 32),
+				PDUMP_POLL_OPERATOR_EQUAL,
+				ui32PDumpFlags);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+			return eError;
+		}
+
+		PDUMPFI("DISABLE_HWBNC_CHECK", ui32PDumpFlags);
+	}
+	if (ui64MaskBVNC & RGX_BVNC_PACK_MASK_V)
+	{
+		PDUMPIF("DISABLE_HWV_CHECK", ui32PDumpFlags);
+		PDUMPELSE("DISABLE_HWV_CHECK", ui32PDumpFlags);
+
+		PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,"Compatibility check: HW V and FW V");
+		eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+					offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+					offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+					offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) +
+					((RGX_BVNC_PACK_SHIFT_V >= 32) ? sizeof(IMG_UINT32) : 0),
+					(IMG_UINT32)(sSWBVNC.ui64BVNC >> ((RGX_BVNC_PACK_SHIFT_V >= 32) ? 32 : 0)),
+					RGX_BVNC_PACK_MASK_V >> ((RGX_BVNC_PACK_SHIFT_V >= 32) ? 32 : 0),
+					PDUMP_POLL_OPERATOR_EQUAL,
+					ui32PDumpFlags);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+			return eError;
+		}
+		PDUMPFI("DISABLE_HWV_CHECK", ui32PDumpFlags);
+	}
+#endif
+
+#if !defined(NO_HARDWARE)
+	if (psRGXFWInit == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	sHWBVNC = psRGXFWInit->sRGXCompChecks.sHWBVNC;
+
+	sHWBVNC.ui64BVNC &= ui64MaskBVNC;
+	sSWBVNC.ui64BVNC &= ui64MaskBVNC;
+
+	RGX_BVNC_EQUAL(sSWBVNC, sHWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleBVNC);
+
+	if (RGX_IS_BRN_SUPPORTED(psDevInfo, 42480))
+	{
+		if (!bCompatibleAll && bCompatibleVersion)
+		{
+			if ((RGX_BVNC_PACKED_EXTR_B(sSWBVNC) == 1) &&
+				(RGX_BVNC_PACKED_EXTR_V(sSWBVNC) == 76) &&
+				(RGX_BVNC_PACKED_EXTR_N(sSWBVNC) == 4) &&
+				(RGX_BVNC_PACKED_EXTR_C(sSWBVNC) == 6))
+			{
+				if ((RGX_BVNC_PACKED_EXTR_B(sHWBVNC) == 1) &&
+					(RGX_BVNC_PACKED_EXTR_V(sHWBVNC) == 69) &&
+					(RGX_BVNC_PACKED_EXTR_N(sHWBVNC) == 4) &&
+					(RGX_BVNC_PACKED_EXTR_C(sHWBVNC) == 4))
+				{
+					bCompatibleBVNC = IMG_TRUE;
+					bCompatibleAll = IMG_TRUE;
+				}
+			}
+		}
+	}
+
+	if (!bCompatibleAll)
+	{
+		if (!bCompatibleVersion)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of HW (%d) and FW (%d).",
+					__func__,
+					sHWBVNC.ui32LayoutVersion,
+					sSWBVNC.ui32LayoutVersion));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+
+		if (!bCompatibleBVNC)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible HW BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d).",
+					RGX_BVNC_PACKED_EXTR_B(sHWBVNC),
+					RGX_BVNC_PACKED_EXTR_V(sHWBVNC),
+					RGX_BVNC_PACKED_EXTR_N(sHWBVNC),
+					RGX_BVNC_PACKED_EXTR_C(sHWBVNC),
+					RGX_BVNC_PACKED_EXTR_B(sSWBVNC),
+					RGX_BVNC_PACKED_EXTR_V(sSWBVNC),
+					RGX_BVNC_PACKED_EXTR_N(sSWBVNC),
+					RGX_BVNC_PACKED_EXTR_C(sSWBVNC)));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: HW BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d) match. [ OK ]",
+				RGX_BVNC_PACKED_EXTR_B(sHWBVNC),
+				RGX_BVNC_PACKED_EXTR_V(sHWBVNC),
+				RGX_BVNC_PACKED_EXTR_N(sHWBVNC),
+				RGX_BVNC_PACKED_EXTR_C(sHWBVNC),
+				RGX_BVNC_PACKED_EXTR_B(sSWBVNC),
+				RGX_BVNC_PACKED_EXTR_V(sSWBVNC),
+				RGX_BVNC_PACKED_EXTR_N(sSWBVNC),
+				RGX_BVNC_PACKED_EXTR_C(sSWBVNC)));
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_METACoreVersion_AgainstDriver
+
+ @Description
+
+ Validate HW META version against driver META version
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+		RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+	PVRSRV_ERROR		eError;
+#endif
+#if defined(PDUMP)
+	PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+#endif
+	IMG_UINT32	ui32FWCoreIDValue = 0;
+	IMG_CHAR *pcRGXFW_PROCESSOR = NULL;
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+	{
+		ui32FWCoreIDValue = RGXMIPSFW_CORE_ID_VALUE;
+		pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_MIPS;
+	}
+	else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+	{
+		switch (RGX_GET_FEATURE_VALUE(psDevInfo, META))
+		{
+		case MTP218: ui32FWCoreIDValue = RGX_CR_META_MTP218_CORE_ID_VALUE; break;
+		case MTP219: ui32FWCoreIDValue = RGX_CR_META_MTP219_CORE_ID_VALUE; break;
+		case LTP218: ui32FWCoreIDValue = RGX_CR_META_LTP218_CORE_ID_VALUE; break;
+		case LTP217: ui32FWCoreIDValue = RGX_CR_META_LTP217_CORE_ID_VALUE; break;
+		default:
+			PVR_DPF((PVR_DBG_ERROR,"%s: Undefined FW_CORE_ID_VALUE", __func__));
+			PVR_ASSERT(0);
+		}
+		pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Undefined FW_CORE_ID_VALUE", __func__));
+		PVR_ASSERT(0);
+	}
+
+#if defined(PDUMP)
+	PDUMPIF("DISABLE_HWMETA_CHECK", ui32PDumpFlags);
+	PDUMPELSE("DISABLE_HWMETA_CHECK", ui32PDumpFlags);
+	PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Compatibility check: KM driver and HW FW Processor version");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+			offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+			offsetof(RGXFWIF_COMPCHECKS, ui32FWProcessorVersion),
+			ui32FWCoreIDValue,
+			0xffffffff,
+			PDUMP_POLL_OPERATOR_EQUAL,
+			ui32PDumpFlags);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+		return eError;
+	}
+	PDUMPFI("DISABLE_HWMETA_CHECK", ui32PDumpFlags);
+#endif
+
+#if !defined(NO_HARDWARE)
+	if (psRGXFWInit == NULL)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	if (psRGXFWInit->sRGXCompChecks.ui32FWProcessorVersion != ui32FWCoreIDValue)
+	{
+		PVR_LOG(("RGXDevInitCompatCheck: Incompatible driver %s version (%d) / HW %s version (%d).",
+				pcRGXFW_PROCESSOR,
+				ui32FWCoreIDValue,
+				pcRGXFW_PROCESSOR,
+				psRGXFWInit->sRGXCompChecks.ui32FWProcessorVersion));
+		eError = PVRSRV_ERROR_FWPROCESSOR_MISMATCH;
+		PVR_DBG_BREAK;
+		return eError;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Compatible driver %s version (%d) / HW %s version (%d) [OK].",
+				pcRGXFW_PROCESSOR,
+				ui32FWCoreIDValue,
+				pcRGXFW_PROCESSOR,
+				psRGXFWInit->sRGXCompChecks.ui32FWProcessorVersion));
+	}
+#endif
+	return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_StoreBVNCInUMSharedMem
+
+ @Description
+
+Store BVNC of the core being handled in memory shared with UM for compatibility
+check performed by the UM part of the driver.
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return   PVRSRV_ERROR - PVRSRV_OK on success or appropriate error code
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_StoreBVNCInUMSharedMem(PVRSRV_RGXDEV_INFO *psDevInfo,
+																RGXFWIF_INIT *psRGXFWInit)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 *pui32InfoPage = PVRSRVGetPVRSRVData()->pui32InfoPage;
+	PVR_ASSERT(pui32InfoPage);
+
+#if !defined(NO_HARDWARE)
+	PVR_UNREFERENCED_PARAMETER(psDevInfo);
+	PVR_ASSERT(psRGXFWInit);
+
+	pui32InfoPage[CORE_ID_BRANCH] = RGX_BVNC_PACKED_EXTR_B(psRGXFWInit->sRGXCompChecks.sFWBVNC);
+	pui32InfoPage[CORE_ID_VERSION] = RGX_BVNC_PACKED_EXTR_V(psRGXFWInit->sRGXCompChecks.sFWBVNC);
+	pui32InfoPage[CORE_ID_NUMBER_OF_SCALABLE_UNITS] = RGX_BVNC_PACKED_EXTR_N(psRGXFWInit->sRGXCompChecks.sFWBVNC);
+	pui32InfoPage[CORE_ID_CONFIG] = RGX_BVNC_PACKED_EXTR_C(psRGXFWInit->sRGXCompChecks.sFWBVNC);
+#else
+	PVR_UNREFERENCED_PARAMETER(psRGXFWInit);
+	PVR_ASSERT(psDevInfo);
+
+	pui32InfoPage[CORE_ID_BRANCH] = psDevInfo->sDevFeatureCfg.ui32B;
+	pui32InfoPage[CORE_ID_VERSION] = psDevInfo->sDevFeatureCfg.ui32V;
+	pui32InfoPage[CORE_ID_NUMBER_OF_SCALABLE_UNITS] = psDevInfo->sDevFeatureCfg.ui32N;
+	pui32InfoPage[CORE_ID_CONFIG] = psDevInfo->sDevFeatureCfg.ui32C;
+#endif /* !defined(NO_HARDWARE) */
+
+	return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDevInitCompatCheck
+
+ @Description
+
+ Check compatibility of host driver and firmware (DDK and build options)
+ for RGX devices at services/device initialisation
+
+ @Input psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR		eError;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_INIT		*psRGXFWInit = NULL;
+#if !defined(NO_HARDWARE)
+	IMG_UINT32			ui32RegValue;
+	IMG_UINT8			ui8FwOsCount;
+
+	/* Retrieve the FW information */
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+			(void **)&psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to acquire kernel fw compatibility check info (%u)",
+				__func__, eError));
+		return eError;
+	}
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		if (*((volatile IMG_BOOL *)&psRGXFWInit->sRGXCompChecks.bUpdated))
+		{
+			/* No need to wait if the FW has already updated the values */
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	ui32RegValue = 0;
+
+	if ((!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST)) &&
+			RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+	{
+		eError = RGXReadMETAAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegValue);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_LOG(("%s: Reading RGX META register failed. Is the GPU correctly powered up? (%u)",
+					__func__, eError));
+			goto chk_exit;
+		}
+
+		if (!(ui32RegValue & META_CR_TXENABLE_ENABLE_BIT))
+		{
+			eError = PVRSRV_ERROR_META_THREAD0_NOT_ENABLED;
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: RGX META is not running. Is the GPU correctly powered up? %d (%u)",
+					__func__, psRGXFWInit->sRGXCompChecks.bUpdated, eError));
+			goto chk_exit;
+		}
+	}
+
+	if (!*((volatile IMG_BOOL *)&psRGXFWInit->sRGXCompChecks.bUpdated))
+	{
+		eError = PVRSRV_ERROR_TIMEOUT;
+		PVR_DPF((PVR_DBG_ERROR, "%s: GPU Firmware not responding: failed to supply compatibility info (%u)",
+				__func__, eError));
+		goto chk_exit;
+	}
+
+	ui8FwOsCount = psRGXFWInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport;
+	if ((PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE) && (ui8FwOsCount > 1)) ||
+		(PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST) && (ui8FwOsCount != RGXFW_NUM_OS)))
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)",
+				__func__, (PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE)) ? (1) : (RGXFW_NUM_OS), ui8FwOsCount));
+	}
+#endif /* defined(NO_HARDWARE) */
+
+	eError = RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		goto chk_exit;
+	}
+
+	eError = RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(psDevInfo, psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		goto chk_exit;
+	}
+
+	eError = RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(psDevInfo, psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		goto chk_exit;
+	}
+
+	if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		eError = RGXDevInitCompatCheck_BVNC_FWAgainstDriver(psDevInfo, psRGXFWInit);
+		if (eError != PVRSRV_OK)
+		{
+			goto chk_exit;
+		}
+
+		eError = RGXDevInitCompatCheck_BVNC_HWAgainstDriver(psDevInfo, psRGXFWInit);
+		if (eError != PVRSRV_OK)
+		{
+			goto chk_exit;
+		}
+	}
+	eError = RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(psDevInfo, psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		goto chk_exit;
+	}
+
+	eError = RGXDevInitCompatCheck_StoreBVNCInUMSharedMem(psDevInfo, psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to store compatibility info for UM consumption (%u)",
+				__func__, eError));
+		goto chk_exit;
+	}
+
+#if !defined(NO_HARDWARE) && defined(SUPPORT_PDVFS) && !defined(RGXFW_META_SUPPORT_2ND_THREAD)
+	/* If the FW is too old the reactive timer needs to be enabled on the Host. */
+	if (psDevInfo->psFWIfOSConfig->ui32ConfigFlagsExt & RGXFWIF_INICFG_EXT_PDVFS_HOST_REACTIVE_TIMER)
+	{
+		psDeviceNode->psDevConfig->sDVFS.sPDVFSData.hReactiveTimer =
+				OSAddTimer((PFN_TIMER_FUNC)PDVFSRequestReactiveUpdate,
+						psDevInfo,
+						PDVFS_REACTIVE_INTERVAL_MS);
+
+		OSEnableTimer(psDeviceNode->psDevConfig->sDVFS.sPDVFSData.hReactiveTimer);
+	}
+#endif
+
+	eError = PVRSRV_OK;
+	chk_exit:
+#if !defined(NO_HARDWARE)
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+#endif
+	return eError;
+}
+
+/**************************************************************************/ /*!
+@Function       RGXSoftReset
+@Description    Resets some modules of the RGX device
+@Input          psDeviceNode		Device node
+@Input          ui64ResetValue1 A mask for which each bit set corresponds
+                                to a module to reset (via the SOFT_RESET
+                                register).
+@Input          ui64ResetValue2 A mask for which each bit set corresponds
+                                to a module to reset (via the SOFT_RESET2
+                                register).
+@Return         PVRSRV_ERROR
+ */ /***************************************************************************/
+static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode,
+		IMG_UINT64  ui64ResetValue1,
+		IMG_UINT64  ui64ResetValue2)
+{
+	PVRSRV_RGXDEV_INFO        *psDevInfo;
+	IMG_BOOL	bSoftReset = IMG_FALSE;
+	IMG_UINT64	ui64SoftResetMask = 0;
+
+	PVR_ASSERT(psDeviceNode != NULL);
+	PVR_ASSERT(psDeviceNode->pvDevice != NULL);
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	/* the device info */
+	psDevInfo = psDeviceNode->pvDevice;
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE))
+	{
+		ui64SoftResetMask = RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL;
+	}else
+	{
+		ui64SoftResetMask = RGX_CR_SOFT_RESET_MASKFULL;
+	}
+
+	if ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) &&
+			((ui64ResetValue2 & RGX_CR_SOFT_RESET2_MASKFULL) != ui64ResetValue2))
+	{
+		bSoftReset = IMG_TRUE;
+	}
+
+	if (((ui64ResetValue1 & ui64SoftResetMask) != ui64ResetValue1) || bSoftReset)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Set in soft-reset */
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, ui64ResetValue1);
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+	{
+		OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, ui64ResetValue2);
+	}
+
+
+	/* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+	(void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET);
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+	{
+		(void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2);
+	}
+
+	/* Take the modules out of reset... */
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, 0);
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+	{
+		OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, 0);
+	}
+
+	/* ...and fence again */
+	(void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET);
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+	{
+		(void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2);
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+ ******************************************************************************
+
+ @Function	RGXDebugRequestNotify
+
+ @Description Dump the debug data for RGX
+
+ ******************************************************************************/
+static void RGXDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgReqestHandle,
+		IMG_UINT32 ui32VerbLevel,
+		DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+		void *pvDumpDebugFile)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = hDbgReqestHandle;
+
+	/* Only action the request if we've fully init'ed */
+	if (psDevInfo->bDevInit2Done)
+	{
+		RGXDebugRequestProcess(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui32VerbLevel);
+	}
+}
+
+static const RGX_MIPS_ADDRESS_TRAMPOLINE sNullTrampoline;
+
+static void RGXFreeTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+	DevPhysMemFree(psDeviceNode,
+#if defined(PDUMP)
+			psDevInfo->psTrampoline->hPdumpPages,
+#endif
+			&psDevInfo->psTrampoline->sPages);
+
+	if (psDevInfo->psTrampoline != &sNullTrampoline)
+	{
+		OSFreeMem(psDevInfo->psTrampoline);
+	}
+	psDevInfo->psTrampoline = (RGX_MIPS_ADDRESS_TRAMPOLINE *)&sNullTrampoline;
+}
+
+#define RANGES_OVERLAP(x,y,size) (x < (y+size) && y < (x+size))
+#define TRAMPOLINE_ALLOC_MAX_RETIRES (3)
+
+static PVRSRV_ERROR RGXAllocTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError;
+	IMG_INT32 i, j;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGX_MIPS_ADDRESS_TRAMPOLINE *pasTrampoline[TRAMPOLINE_ALLOC_MAX_RETIRES];
+
+	PDUMPCOMMENT("Allocate pages for trampoline");
+
+	/* Retry the allocation of the trampoline block (16KB), retaining any
+	 * previous allocations overlapping  with the target range until we get an
+	 * allocation that doesn't overlap with the target range.
+	 * Any allocation like this will require a maximum of 3 tries as we are
+	 * allocating a physical contiguous block of memory, not individual pages.
+	 * Free the unused allocations at the end only after the desired range
+	 * is obtained to prevent the alloc function from returning the same bad
+	 * range repeatedly.
+	 */
+	for (i = 0; i < TRAMPOLINE_ALLOC_MAX_RETIRES; i++)
+	{
+		pasTrampoline[i] = OSAllocMem(sizeof(RGX_MIPS_ADDRESS_TRAMPOLINE));
+		eError = DevPhysMemAlloc(psDeviceNode,
+				RGXMIPSFW_TRAMPOLINE_SIZE,
+				RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE,
+				0,         // (init) u8Value
+				IMG_FALSE, // bInitPage,
+#if defined(PDUMP)
+				psDeviceNode->psFirmwareMMUDevAttrs->pszMMUPxPDumpMemSpaceName,
+				"TrampolineRegion",
+				&pasTrampoline[i]->hPdumpPages,
+#endif
+				&pasTrampoline[i]->sPages,
+				&pasTrampoline[i]->sPhysAddr);
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s failed (%u)",
+					__func__, eError));
+			goto fail;
+		}
+
+		if (!RANGES_OVERLAP(pasTrampoline[i]->sPhysAddr.uiAddr,
+				RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR,
+				RGXMIPSFW_TRAMPOLINE_SIZE))
+		{
+			break;
+		}
+	}
+	if (TRAMPOLINE_ALLOC_MAX_RETIRES == i)
+	{
+		/* Failed to find a physical allocation after 3 attempts */
+		eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s failed to allocate non-overlapping pages (%u)",
+				__func__, eError));
+		/* Fall through, clean up and return error. */
+	}
+	else
+	{
+		/* Remember the last physical block allocated, it will not be freed */
+		psDevInfo->psTrampoline = pasTrampoline[i];
+	}
+
+fail:
+	/* free all unused allocations */
+	for (j = 0; j < i; j++)
+	{
+		DevPhysMemFree(psDeviceNode,
+#if defined(PDUMP)
+				pasTrampoline[j]->hPdumpPages,
+#endif
+				&pasTrampoline[j]->sPages);
+		OSFreeMem(pasTrampoline[j]);
+	}
+
+	return eError;
+}
+
+#undef RANGES_OVERLAP
+
+
+PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE   *psDeviceNode,
+		IMG_DEVMEM_SIZE_T    uiFWCodeLen,
+		IMG_DEVMEM_SIZE_T    uiFWDataLen,
+		IMG_DEVMEM_SIZE_T    uiFWCorememCodeLen,
+		IMG_DEVMEM_SIZE_T    uiFWCorememDataLen)
+{
+	DEVMEM_FLAGS_T		uiMemAllocFlags;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR        eError;
+	IMG_DEVMEM_SIZE_T	uiDummyLen;
+	DEVMEM_MEMDESC		*psDummyMemDesc = NULL;
+
+	/*
+	 * Set up Allocation for FW code section
+	 */
+	uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED);
+
+	eError = RGXAllocateFWMemoryRegion(psDeviceNode,
+			uiFWCodeLen,
+			uiMemAllocFlags,
+			PVRSRV_DEVICE_FW_CODE_REGION,
+			"FwExCodeRegion",
+			&psDevInfo->psRGXFWCodeMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to allocate fw code mem (%u)",
+				eError));
+		goto failFWCodeMemDescAlloc;
+	}
+
+	eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc,
+			&psDevInfo->sFWCodeDevVAddrBase);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to acquire devVAddr for fw code mem (%u)",
+				eError));
+		goto failFWCodeMemDescAqDevVirt;
+	}
+
+	if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) || (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))))
+	{
+		/*
+		 * The FW code must be the first allocation in the firmware heap, otherwise
+		 * the bootloader will not work (META will not be able to find the bootloader).
+		 */
+		PVR_ASSERT(psDevInfo->sFWCodeDevVAddrBase.uiAddr == RGX_FIRMWARE_RAW_HEAP_BASE);
+	}
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+	{
+		/*
+		 * Allocate Dummy Pages so that Data segment allocation gets the same
+		 * device virtual address as specified in MIPS firmware linker script
+		 */
+		uiDummyLen = RGXGetFWImageSectionMaxSize(NULL, MIPS_CODE) +
+				RGXGetFWImageSectionMaxSize(NULL, MIPS_EXCEPTIONS_CODE) +
+				RGXGetFWImageSectionMaxSize(NULL, MIPS_BOOT_CODE) -
+				uiFWCodeLen; /* code actual size */
+
+		if (uiDummyLen > 0)
+		{
+			eError = DevmemFwAllocateExportable(psDeviceNode,
+					uiDummyLen,
+					OSGetPageSize(),
+					uiMemAllocFlags,
+					"FwExDummyPages",
+					&psDummyMemDesc);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"Failed to allocate fw dummy mem (%u)", eError));
+				goto failDummyMemDescAlloc;
+			}
+		}
+	}
+
+	/*
+	 * Set up Allocation for FW data section
+	 */
+	uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+			PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT;
+
+	eError = RGXAllocateFWDataRegion(psDeviceNode,
+			uiFWDataLen,
+			uiMemAllocFlags,
+			PVRSRV_DEVICE_FW_PRIVATE_DATA_REGION,
+			"FwExDataRegion",
+			&psDevInfo->psRGXFWDataMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to allocate fw data mem (%u)",
+				eError));
+		goto failFWDataMemDescAlloc;
+	}
+
+	eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWDataMemDesc,
+			&psDevInfo->sFWDataDevVAddrBase);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to acquire devVAddr for fw data mem (%u)",
+				eError));
+		goto failFWDataMemDescAqDevVirt;
+	}
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+	{
+		eError = RGXAllocTrampoline(psDeviceNode);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"Failed to allocate trampoline region (%u)",
+					eError));
+			goto failTrampolineMemDescAlloc;
+		}
+	}
+
+	if (uiFWCorememCodeLen != 0)
+	{
+		/*
+		 * Set up Allocation for FW coremem code section
+		 */
+		uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+				PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+				PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+				PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+				PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+				PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT;
+
+		eError = RGXAllocateFWMemoryRegion(psDeviceNode,
+				uiFWCorememCodeLen,
+				uiMemAllocFlags,
+				PVRSRV_DEVICE_FW_COREMEM_CODE_REGION,
+				"FwExCorememCodeRegion",
+				&psDevInfo->psRGXFWCorememCodeMemDesc);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "Failed to allocate fw coremem code mem, size: %"  IMG_INT64_FMTSPECd ", flags: %" PVRSRV_MEMALLOCFLAGS_FMTSPEC " (%u)",
+			         uiFWCorememCodeLen, uiMemAllocFlags, eError));
+			goto failFWCorememCodeMemDescAlloc;
+		}
+
+		eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc,
+				&psDevInfo->sFWCorememCodeDevVAddrBase);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"Failed to acquire devVAddr for fw coremem mem code (%u)",
+					eError));
+			goto failFWCorememCodeMemDescAqDevVirt;
+		}
+
+		RGXSetFirmwareAddress(&psDevInfo->sFWCorememCodeFWAddr,
+				psDevInfo->psRGXFWCorememCodeMemDesc,
+				0, RFW_FWADDR_NOREF_FLAG);
+	}
+	else
+	{
+		psDevInfo->sFWCorememCodeDevVAddrBase.uiAddr = 0;
+		psDevInfo->sFWCorememCodeFWAddr.ui32Addr = 0;
+	}
+
+	if (uiFWCorememDataLen != 0)
+	{
+		/*
+		 * Set up Allocation for FW coremem data section
+		 */
+		uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+				PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+				PVRSRV_MEMALLOCFLAG_GPU_READABLE  |
+				PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+				PVRSRV_MEMALLOCFLAG_CPU_READABLE  |
+				PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+				PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+				PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT;
+
+		eError = RGXAllocateFWDataRegion(psDeviceNode,
+				uiFWCorememDataLen,
+				uiMemAllocFlags,
+				PVRSRV_DEVICE_FW_COREMEM_DATA_REGION,
+				"FwExCorememDataRegion",
+				&psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"Failed to allocate fw coremem data mem, "
+					"size: %"  IMG_INT64_FMTSPECd ", flags: %" PVRSRV_MEMALLOCFLAGS_FMTSPEC " (%u)",
+					uiFWCorememDataLen, uiMemAllocFlags, eError));
+			goto failFWCorememDataMemDescAlloc;
+		}
+
+		eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+				&psDevInfo->sFWCorememDataStoreDevVAddrBase);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"Failed to acquire devVAddr for fw coremem mem data (%u)",
+					eError));
+			goto failFWCorememDataMemDescAqDevVirt;
+		}
+
+		RGXSetFirmwareAddress(&psDevInfo->sFWCorememDataStoreFWAddr,
+				psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+				0, RFW_FWADDR_NOREF_FLAG);
+	}
+	else
+	{
+		psDevInfo->sFWCorememDataStoreDevVAddrBase.uiAddr = 0;
+		psDevInfo->sFWCorememDataStoreFWAddr.ui32Addr = 0;
+	}
+
+	/* Free Dummy Pages */
+	if (psDummyMemDesc)
+	{
+		DevmemFwFree(psDevInfo, psDummyMemDesc);
+	}
+
+	return PVRSRV_OK;
+
+	failFWCorememDataMemDescAqDevVirt:
+	if (uiFWCorememDataLen != 0)
+	{
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+		psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL;
+	}
+	failFWCorememDataMemDescAlloc:
+	if (uiFWCorememCodeLen != 0)
+	{
+		DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc);
+	}
+	failFWCorememCodeMemDescAqDevVirt:
+	if (uiFWCorememCodeLen != 0)
+	{
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWCorememCodeMemDesc);
+		psDevInfo->psRGXFWCorememCodeMemDesc = NULL;
+	}
+	failFWCorememCodeMemDescAlloc:
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+	{
+		RGXFreeTrampoline(psDeviceNode);
+	}
+	failTrampolineMemDescAlloc:
+	DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+	failFWDataMemDescAqDevVirt:
+	DevmemFwFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc);
+	psDevInfo->psRGXFWDataMemDesc = NULL;
+	failFWDataMemDescAlloc:
+	if (psDummyMemDesc)
+	{
+		DevmemFwFree(psDevInfo, psDummyMemDesc);
+	}
+	failDummyMemDescAlloc:
+	DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+	failFWCodeMemDescAqDevVirt:
+	DevmemFwFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc);
+	psDevInfo->psRGXFWCodeMemDesc = NULL;
+	failFWCodeMemDescAlloc:
+	return eError;
+}
+
+/*
+	AppHint parameter interface
+ */
+static
+PVRSRV_ERROR RGXFWTraceQueryFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+		const void *psPrivate,
+		IMG_UINT32 *pui32Value)
+{
+	PVRSRV_ERROR eResult;
+
+	eResult = PVRSRVRGXFWDebugQueryFWLogKM(NULL, psDeviceNode, pui32Value);
+	*pui32Value &= RGXFWIF_LOG_TYPE_GROUP_MASK;
+	return eResult;
+}
+
+static
+PVRSRV_ERROR RGXFWTraceQueryLogType(const PVRSRV_DEVICE_NODE *psDeviceNode,
+		const void *psPrivate,
+		IMG_UINT32 *pui32Value)
+{
+	PVRSRV_ERROR eResult;
+
+	eResult = PVRSRVRGXFWDebugQueryFWLogKM(NULL, psDeviceNode, pui32Value);
+	if (PVRSRV_OK == eResult)
+	{
+		if (*pui32Value & RGXFWIF_LOG_TYPE_TRACE)
+		{
+			*pui32Value = 2; /* Trace */
+		}
+		else if (*pui32Value & RGXFWIF_LOG_TYPE_GROUP_MASK)
+		{
+			*pui32Value = 1; /* TBI */
+		}
+		else
+		{
+			*pui32Value = 0; /* None */
+		}
+	}
+	return eResult;
+}
+
+static
+PVRSRV_ERROR RGXFWTraceSetFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+		const void *psPrivate,
+		IMG_UINT32 ui32Value)
+{
+	PVRSRV_ERROR eResult;
+	IMG_UINT32 ui32RGXFWLogType;
+
+	eResult = RGXFWTraceQueryLogType(psDeviceNode, NULL, &ui32RGXFWLogType);
+	if (PVRSRV_OK == eResult)
+	{
+		if (ui32Value && 1 != ui32RGXFWLogType)
+		{
+			ui32Value |= RGXFWIF_LOG_TYPE_TRACE;
+		}
+		eResult = PVRSRVRGXFWDebugSetFWLogKM(NULL, psDeviceNode, ui32Value);
+	}
+	return eResult;
+}
+
+static
+PVRSRV_ERROR RGXFWTraceSetLogType(const PVRSRV_DEVICE_NODE *psDeviceNode,
+		const void *psPrivate,
+		IMG_UINT32 ui32Value)
+{
+	PVRSRV_ERROR eResult;
+	IMG_UINT32 ui32RGXFWLogType = ui32Value;
+
+	/* 0 - none, 1 - tbi, 2 - trace */
+	if (ui32Value)
+	{
+		eResult = RGXFWTraceQueryFilter(psDeviceNode, NULL, &ui32RGXFWLogType);
+		if (PVRSRV_OK != eResult)
+		{
+			return eResult;
+		}
+		if (!ui32RGXFWLogType)
+		{
+			ui32RGXFWLogType = RGXFWIF_LOG_TYPE_GROUP_MAIN;
+		}
+		if (2 == ui32Value)
+		{
+			ui32RGXFWLogType |= RGXFWIF_LOG_TYPE_TRACE;
+		}
+	}
+
+	eResult = PVRSRVRGXFWDebugSetFWLogKM(NULL, psDeviceNode, ui32RGXFWLogType);
+	return eResult;
+}
+
+static
+PVRSRV_ERROR RGXQueryFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode,
+		const void *psPrivate,
+		IMG_BOOL *pbValue)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+	*pbValue = psDevInfo->bEnableFWPoisonOnFree;
+	return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXSetFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode,
+		const void *psPrivate,
+		IMG_BOOL bValue)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+	psDevInfo->bEnableFWPoisonOnFree = bValue;
+	return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXQueryFWPoisonOnFreeValue(const PVRSRV_DEVICE_NODE *psDeviceNode,
+		const void *psPrivate,
+		IMG_UINT32 *pui32Value)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+	*pui32Value = psDevInfo->ubFWPoisonOnFreeValue;
+	return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXSetFWPoisonOnFreeValue(const PVRSRV_DEVICE_NODE *psDeviceNode,
+		const void *psPrivate,
+		IMG_UINT32 ui32Value)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+	psDevInfo->ubFWPoisonOnFreeValue = (IMG_BYTE) ui32Value;
+	return PVRSRV_OK;
+}
+
+/*
+ * RGXInitFirmware
+ */
+PVRSRV_ERROR
+RGXInitFirmware(PVRSRV_DEVICE_NODE       *psDeviceNode,
+		IMG_BOOL                 bEnableSignatureChecks,
+		IMG_UINT32               ui32SignatureChecksBufSize,
+		IMG_UINT32               ui32HWPerfFWBufSizeKB,
+		IMG_UINT64               ui64HWPerfFilter,
+		IMG_UINT32               ui32RGXFWAlignChecksArrLength,
+		IMG_UINT32               *pui32RGXFWAlignChecks,
+		IMG_UINT32               ui32ConfigFlags,
+		IMG_UINT32               ui32LogType,
+		IMG_UINT32               ui32FilterFlags,
+		IMG_UINT32               ui32JonesDisableMask,
+		IMG_UINT32               ui32HWRDebugDumpLimit,
+		IMG_UINT32               ui32HWPerfCountersDataSize,
+		IMG_UINT32               *pui32TPUTrilinearFracMask,
+		RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf,
+		FW_PERF_CONF             eFirmwarePerf,
+		IMG_UINT32               ui32ConfigFlagsExt)
+{
+	PVRSRV_ERROR eError;
+	void *pvAppHintState = NULL;
+	IMG_UINT32 ui32AppHintDefault;
+	IMG_UINT32 ui32NumBIFTilingConfigs, *pui32BIFTilingXStrides, i;
+	RGXFWIF_BIFTILINGMODE eBIFTilingMode;
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+	PVRSRVSystemBIFTilingGetConfig(psDeviceNode->psDevConfig,
+			&eBIFTilingMode,
+			&ui32NumBIFTilingConfigs);
+	pui32BIFTilingXStrides = OSAllocMem(sizeof(IMG_UINT32) * ui32NumBIFTilingConfigs);
+	if (pui32BIFTilingXStrides == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitFirmwareKM: OSAllocMem failed (%u)", eError));
+		goto failed_BIF_tiling_alloc;
+	}
+	for (i = 0; i < ui32NumBIFTilingConfigs; i++)
+	{
+		eError = PVRSRVSystemBIFTilingHeapGetXStride(psDeviceNode->psDevConfig,
+				i+1,
+				&pui32BIFTilingXStrides[i]);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to get BIF tiling X stride for heap %u (%u)",
+					__func__, i + 1, eError));
+			goto failed_BIF_heap_init;
+		}
+	}
+
+	eError = RGXSetupFirmware(psDeviceNode,
+			bEnableSignatureChecks,
+			ui32SignatureChecksBufSize,
+			ui32HWPerfFWBufSizeKB,
+			ui64HWPerfFilter,
+			ui32RGXFWAlignChecksArrLength,
+			pui32RGXFWAlignChecks,
+			ui32ConfigFlags,
+			ui32ConfigFlagsExt,
+			ui32LogType,
+			eBIFTilingMode,
+			ui32NumBIFTilingConfigs,
+			pui32BIFTilingXStrides,
+			ui32FilterFlags,
+			ui32JonesDisableMask,
+			ui32HWRDebugDumpLimit,
+			ui32HWPerfCountersDataSize,
+			pui32TPUTrilinearFracMask,
+			eRGXRDPowerIslandingConf,
+			eFirmwarePerf);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitFirmwareKM: RGXSetupFirmware failed (%u)", eError));
+		goto failed_init_firmware;
+	}
+
+	OSFreeMem(pui32BIFTilingXStrides);
+
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableLogGroup,
+			RGXFWTraceQueryFilter,
+			RGXFWTraceSetFilter,
+			psDeviceNode,
+			NULL);
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_FirmwareLogType,
+			RGXFWTraceQueryLogType,
+			RGXFWTraceSetLogType,
+			psDeviceNode,
+			NULL);
+
+	/* FW Poison values are not passed through from the init code
+	 * so grab them here */
+	OSCreateKMAppHintState(&pvAppHintState);
+
+	ui32AppHintDefault = PVRSRV_APPHINT_ENABLEFWPOISONONFREE;
+	OSGetKMAppHintBOOL(pvAppHintState,
+			EnableFWPoisonOnFree,
+			&ui32AppHintDefault,
+			&psDevInfo->bEnableFWPoisonOnFree);
+
+	ui32AppHintDefault = PVRSRV_APPHINT_FWPOISONONFREEVALUE;
+	OSGetKMAppHintUINT32(pvAppHintState,
+			FWPoisonOnFreeValue,
+			&ui32AppHintDefault,
+			(IMG_UINT32*)&psDevInfo->ubFWPoisonOnFreeValue);
+
+	OSFreeKMAppHintState(pvAppHintState);
+
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFWPoisonOnFree,
+			RGXQueryFWPoisonOnFree,
+			RGXSetFWPoisonOnFree,
+			psDeviceNode,
+			NULL);
+
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_FWPoisonOnFreeValue,
+			RGXQueryFWPoisonOnFreeValue,
+			RGXSetFWPoisonOnFreeValue,
+			psDeviceNode,
+			NULL);
+
+	return PVRSRV_OK;
+
+	failed_init_firmware:
+	failed_BIF_heap_init:
+	OSFreeMem(pui32BIFTilingXStrides);
+	failed_BIF_tiling_alloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/* See device.h for function declaration */
+static PVRSRV_ERROR RGXAllocUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode,
+		DEVMEM_MEMDESC **psMemDesc,
+		IMG_UINT32 *puiSyncPrimVAddr,
+		IMG_UINT32 *puiSyncPrimBlockSize)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	PVRSRV_ERROR eError;
+	RGXFWIF_DEV_VIRTADDR pFirmwareAddr;
+	IMG_DEVMEM_SIZE_T uiUFOBlockSize = sizeof(IMG_UINT32);
+	IMG_DEVMEM_ALIGN_T ui32UFOBlockAlign = sizeof(IMG_UINT32);
+
+	psDevInfo = psDeviceNode->pvDevice;
+
+	/* Size and align are 'expanded' because we request an Exportalign allocation */
+	eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap),
+			&uiUFOBlockSize,
+			&ui32UFOBlockAlign);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	eError = DevmemFwAllocateExportable(psDeviceNode,
+			uiUFOBlockSize,
+			ui32UFOBlockAlign,
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_UNCACHED |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE,
+			"FwExUFOBlock",
+			psMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	RGXSetFirmwareAddress(&pFirmwareAddr, *psMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+	*puiSyncPrimVAddr = pFirmwareAddr.ui32Addr;
+	*puiSyncPrimBlockSize = TRUNCATE_64BITS_TO_32BITS(uiUFOBlockSize);
+
+	return PVRSRV_OK;
+
+	e0:
+	return eError;
+}
+
+/* See device.h for function declaration */
+static void RGXFreeUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode,
+		DEVMEM_MEMDESC *psMemDesc)
+{
+	/*
+		If the system has snooping of the device cache then the UFO block
+		might be in the cache so we need to flush it out before freeing
+		the memory
+
+		When the device is being shutdown/destroyed we don't care anymore.
+		Several necessary data structures to issue a flush were destroyed
+		already.
+	 */
+	if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) &&
+			psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_DEINIT)
+	{
+		RGXFWIF_KCCB_CMD sFlushInvalCmd;
+		PVRSRV_ERROR eError;
+
+		/* Schedule the SLC flush command ... */
+#if defined(PDUMP)
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate");
+#endif
+		sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.eDM = 0;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0;
+
+		eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sFlushInvalCmd,
+				PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXFreeUFOBlock: Failed to schedule SLC flush command with error (%u)", eError));
+		}
+		else
+		{
+			/* Wait for the SLC flush to complete */
+			eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXFreeUFOBlock: SLC flush and invalidate aborted with error (%u)", eError));
+			}
+		}
+	}
+
+	RGXUnsetFirmwareAddress(psMemDesc);
+	DevmemFwFree(psDeviceNode->pvDevice, psMemDesc);
+}
+
+/*
+	DevDeInitRGX
+ */
+PVRSRV_ERROR DevDeInitRGX (PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO			*psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice;
+	PVRSRV_ERROR				eError;
+	DEVICE_MEMORY_INFO		    *psDevMemoryInfo;
+	IMG_UINT32		ui32Temp=0;
+
+	if (!psDevInfo)
+	{
+		/* Can happen if DevInitRGX failed */
+		PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: Null DevInfo"));
+		return PVRSRV_OK;
+	}
+
+	eError = DeviceDepBridgeDeInit(psDevInfo->sDevFeatureCfg.ui64Features);
+	PVR_LOG_IF_ERROR(eError, "DeviceDepBridgeDeInit");
+
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		OSAtomicWrite(&psDeviceNode->sDummyPage.atRefCounter, 0);
+		PVR_UNREFERENCED_PARAMETER(ui32Temp);
+	}
+	else
+#else
+	{
+		/*Delete the Dummy page related info */
+		ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDummyPage.atRefCounter);
+		if (0 != ui32Temp)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s: Dummy page reference counter is non zero (%u)",
+					__func__,
+					ui32Temp));
+			PVR_ASSERT(0);
+		}
+	}
+#endif
+
+	/*Delete the Dummy page related info */
+	ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDevZeroPage.atRefCounter);
+	if (0 != ui32Temp)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Zero page reference counter is non zero (%u)",
+				__func__,
+				ui32Temp));
+	}
+
+#if defined(PDUMP)
+	if (NULL != psDeviceNode->sDummyPage.hPdumpPg)
+	{
+		PDUMPCOMMENT("Error dummy page handle is still active");
+	}
+
+	if (NULL != psDeviceNode->sDevZeroPage.hPdumpPg)
+	{
+		PDUMPCOMMENT("Error Zero page handle is still active");
+	}
+#endif
+
+#if defined(SUPPORT_PDVFS) && !defined(RGXFW_META_SUPPORT_2ND_THREAD)
+	if (psDeviceNode->psDevConfig->sDVFS.sPDVFSData.hReactiveTimer)
+	{
+		OSDisableTimer(psDeviceNode->psDevConfig->sDVFS.sPDVFSData.hReactiveTimer);
+		OSRemoveTimer(psDeviceNode->psDevConfig->sDVFS.sPDVFSData.hReactiveTimer);
+	}
+#endif
+
+	/*The lock type need to be dispatch type here because it can be acquired from MISR (Z-buffer) path */
+	OSLockDestroy(psDeviceNode->sDummyPage.psPgLock);
+
+	/* Destroy the zero page lock */
+	OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock);
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+	OSLockDestroy(psDevInfo->hCounterDumpingLock);
+#endif
+
+	/* Unregister debug request notifiers first as they could depend on anything. */
+	if (psDevInfo->hDbgReqNotify)
+	{
+		PVRSRVUnregisterDbgRequestNotify(psDevInfo->hDbgReqNotify);
+	}
+
+	/* Cancel notifications to this device */
+	PVRSRVUnregisterCmdCompleteNotify(psDeviceNode->hCmdCompNotify);
+	psDeviceNode->hCmdCompNotify = NULL;
+
+	/*
+	 *  De-initialise in reverse order, so stage 2 init is undone first.
+	 */
+	if (psDevInfo->bDevInit2Done)
+	{
+		psDevInfo->bDevInit2Done = IMG_FALSE;
+
+#if !defined(NO_HARDWARE)
+		(void) SysUninstallDeviceLISR(psDevInfo->pvLISRData);
+		(void) OSUninstallMISR(psDevInfo->pvMISRData);
+		(void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR);
+		if (psDevInfo->pvAPMISRData != NULL)
+		{
+			(void) OSUninstallMISR(psDevInfo->pvAPMISRData);
+		}
+#endif /* !NO_HARDWARE */
+
+		/* Remove the device from the power manager */
+		eError = PVRSRVRemovePowerDevice(psDeviceNode);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+
+		psDevInfo->pfnGetGpuUtilStats = NULL;
+		OSLockDestroy(psDevInfo->hGPUUtilLock);
+
+		/* Free DVFS Table */
+		if (psDevInfo->psGpuDVFSTable != NULL)
+		{
+			OSFreeMem(psDevInfo->psGpuDVFSTable);
+			psDevInfo->psGpuDVFSTable = NULL;
+		}
+
+		/* De-init Freelists/ZBuffers... */
+		OSLockDestroy(psDevInfo->hLockFreeList);
+		OSLockDestroy(psDevInfo->hLockZSBuffer);
+
+		/* Unregister MMU related stuff */
+		eError = RGXMMUInit_Unregister(psDeviceNode);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: Failed RGXMMUInit_Unregister (0x%x)", eError));
+			return eError;
+		}
+
+
+		if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+		{
+			/* Unregister MMU related stuff */
+			eError = RGXMipsMMUInit_Unregister(psDeviceNode);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: Failed RGXMipsMMUInit_Unregister (0x%x)", eError));
+				return eError;
+			}
+		}
+	}
+
+	/* UnMap Regs */
+	if (psDevInfo->pvRegsBaseKM != NULL)
+	{
+#if !defined(NO_HARDWARE)
+		OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM,
+				psDevInfo->ui32RegSize,
+				PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+#endif /* !NO_HARDWARE */
+		psDevInfo->pvRegsBaseKM = NULL;
+	}
+
+#if 0 /* not required at this time */
+	if (psDevInfo->hTimer)
+	{
+		eError = OSRemoveTimer(psDevInfo->hTimer);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: Failed to remove timer"));
+			return 	eError;
+		}
+		psDevInfo->hTimer = NULL;
+	}
+#endif
+
+	psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+
+	RGXDeInitHeaps(psDevMemoryInfo);
+
+	if (psDevInfo->psRGXFWCodeMemDesc)
+	{
+		/* Free fw code */
+		PDUMPCOMMENT("Freeing FW code memory");
+		DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc);
+		psDevInfo->psRGXFWCodeMemDesc = NULL;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING,"No firmware code memory to free"));
+	}
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+	{
+		if (psDevInfo->psTrampoline->sPages.u.pvHandle)
+		{
+			/* Free trampoline region */
+			PDUMPCOMMENT("Freeing trampoline memory");
+			RGXFreeTrampoline(psDeviceNode);
+		}
+	}
+
+	if (psDevInfo->psRGXFWDataMemDesc)
+	{
+		/* Free fw data */
+		PDUMPCOMMENT("Freeing FW data memory");
+		DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc);
+		psDevInfo->psRGXFWDataMemDesc = NULL;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING,"No firmware data memory to free"));
+	}
+
+	if (psDevInfo->psRGXFWCorememCodeMemDesc)
+	{
+		/* Free fw core mem code */
+		PDUMPCOMMENT("Freeing FW coremem code memory");
+		DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc);
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWCorememCodeMemDesc);
+		psDevInfo->psRGXFWCorememCodeMemDesc = NULL;
+	}
+
+	if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc)
+	{
+		/* Free fw core mem data */
+		PDUMPCOMMENT("Freeing FW coremem data store memory");
+		DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+		psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL;
+	}
+
+	/*
+	   Free the firmware allocations.
+	 */
+	RGXFreeFirmware(psDevInfo);
+	RGXDeInitDestroyFWKernelMemoryContext(psDeviceNode);
+
+	/* De-initialise non-device specific (TL) users of RGX device memory */
+	RGXHWPerfHostDeInit(psDevInfo);
+	eError = HTBDeInit();
+	PVR_LOG_IF_ERROR(eError, "HTBDeInit");
+
+	/* destroy the stalled CCB locks */
+	OSLockDestroy(psDevInfo->hCCBRecoveryLock);
+	OSLockDestroy(psDevInfo->hCCBStallCheckLock);
+
+	/* destroy the context list locks */
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psDevInfo->sRegCongfig.hLock);
+	OSLockDestroy(psDevInfo->hBPLock);
+	OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock);
+#endif
+	OSWRLockDestroy(psDevInfo->hRenderCtxListLock);
+	OSWRLockDestroy(psDevInfo->hComputeCtxListLock);
+	OSWRLockDestroy(psDevInfo->hTransferCtxListLock);
+	OSWRLockDestroy(psDevInfo->hTDMCtxListLock);
+	OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock);
+	OSWRLockDestroy(psDevInfo->hMemoryCtxListLock);
+	OSLockDestroy(psDevInfo->hLockKCCBDeferredCommandsList);
+	OSWRLockDestroy(psDevInfo->hCommonCtxtListLock);
+
+
+	if ((psDevInfo->hNMILock != NULL) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)))
+	{
+		OSLockDestroy(psDevInfo->hNMILock);
+	}
+
+	if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+	{
+		if (psDevInfo->hDebugFaultInfoLock != NULL)
+		{
+			OSLockDestroy(psDevInfo->hDebugFaultInfoLock);
+		}
+		if (psDevInfo->hMMUCtxUnregLock != NULL)
+		{
+			OSLockDestroy(psDevInfo->hMMUCtxUnregLock);
+		}
+	}
+
+	/* Free device BVNC string */
+	if (NULL != psDevInfo->sDevFeatureCfg.pszBVNCString)
+	{
+		OSFreeMem(psDevInfo->sDevFeatureCfg.pszBVNCString);
+	}
+
+	/* DeAllocate devinfo */
+	OSFreeMem(psDevInfo);
+
+	psDeviceNode->pvDevice = NULL;
+
+	return PVRSRV_OK;
+}
+
+#if defined(PDUMP)
+static
+PVRSRV_ERROR RGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice);
+
+	psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE;
+
+	return PVRSRV_OK;
+}
+#endif /* PDUMP */
+
+static INLINE DEVMEM_HEAP_BLUEPRINT _blueprint_init(IMG_CHAR *name,
+		IMG_UINT64 heap_base,
+		IMG_DEVMEM_SIZE_T heap_length,
+		IMG_UINT32 log2_import_alignment,
+		IMG_UINT32 tiling_mode)
+{
+	DEVMEM_HEAP_BLUEPRINT b = {
+			.pszName = name,
+			.sHeapBaseAddr.uiAddr = heap_base,
+			.uiHeapLength = heap_length,
+			.uiLog2DataPageSize = RGXHeapDerivePageSize(OSGetPageShift()),
+			.uiLog2ImportAlignment = log2_import_alignment,
+			.uiLog2TilingStrideFactor = (RGX_BIF_TILING_HEAP_LOG2_ALIGN_TO_STRIDE_BASE - tiling_mode)
+	};
+	void *pvAppHintState = NULL;
+	IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE;
+	IMG_UINT32 ui32GeneralNon4KHeapPageSize;
+
+	if (!OSStringCompare(name, RGX_GENERAL_NON4K_HEAP_IDENT))
+	{
+		OSCreateKMAppHintState(&pvAppHintState);
+		OSGetKMAppHintUINT32(pvAppHintState, GeneralNon4KHeapPageSize,
+				&ui32AppHintDefault, &ui32GeneralNon4KHeapPageSize);
+		switch (ui32GeneralNon4KHeapPageSize)
+		{
+		case (1 << RGX_HEAP_4KB_PAGE_SHIFT):
+				b.uiLog2DataPageSize = RGX_HEAP_4KB_PAGE_SHIFT;
+				break;
+		case (1 << RGX_HEAP_16KB_PAGE_SHIFT):
+				b.uiLog2DataPageSize = RGX_HEAP_16KB_PAGE_SHIFT;
+				break;
+		case (1 << RGX_HEAP_64KB_PAGE_SHIFT):
+				b.uiLog2DataPageSize = RGX_HEAP_64KB_PAGE_SHIFT;
+				break;
+		case (1 << RGX_HEAP_256KB_PAGE_SHIFT):
+				b.uiLog2DataPageSize = RGX_HEAP_256KB_PAGE_SHIFT;
+				break;
+		case (1 << RGX_HEAP_1MB_PAGE_SHIFT):
+				b.uiLog2DataPageSize = RGX_HEAP_1MB_PAGE_SHIFT;
+				break;
+		case (1 << RGX_HEAP_2MB_PAGE_SHIFT):
+				b.uiLog2DataPageSize = RGX_HEAP_2MB_PAGE_SHIFT;
+				break;
+		default:
+				b.uiLog2DataPageSize = RGX_HEAP_16KB_PAGE_SHIFT;
+
+				PVR_DPF((PVR_DBG_ERROR,"Invalid AppHint GeneralAltHeapPageSize [%d] value, using 16KB",
+					ui32AppHintDefault));
+				break;
+		}
+		OSFreeKMAppHintState(pvAppHintState);
+	}
+
+	return b;
+}
+
+#define INIT_HEAP(NAME) \
+    do { \
+	*psDeviceMemoryHeapCursor = _blueprint_init( \
+						     RGX_ ## NAME ## _HEAP_IDENT, \
+						     RGX_ ## NAME ## _HEAP_BASE, \
+						     RGX_ ## NAME ## _HEAP_SIZE, \
+						     0, 0); \
+						     psDeviceMemoryHeapCursor++; \
+    } while (0)
+
+#define INIT_FW_MAIN_HEAP(MODE, FWCORE) \
+    do { \
+	*psDeviceMemoryHeapCursor = _blueprint_init( \
+						     RGX_FIRMWARE_MAIN_HEAP_IDENT, \
+						     RGX_FIRMWARE_ ## MODE ## _MAIN_HEAP_BASE, \
+						     RGX_FIRMWARE_ ## FWCORE ## _MAIN_HEAP_SIZE, \
+						     0, 0); \
+						     psDeviceMemoryHeapCursor++; \
+    } while (0)
+
+#define INIT_FW_CONFIG_HEAP(MODE) \
+    do { \
+	*psDeviceMemoryHeapCursor = _blueprint_init( \
+						     RGX_FIRMWARE_CONFIG_HEAP_IDENT, \
+						     RGX_FIRMWARE_ ## MODE ## _CONFIG_HEAP_BASE, \
+						     RGX_FIRMWARE_CONFIG_HEAP_SIZE, \
+						     0, 0); \
+						     psDeviceMemoryHeapCursor++; \
+    } while (0)
+
+#define INIT_HEAP_NAME(STR, NAME) \
+    do { \
+	*psDeviceMemoryHeapCursor = _blueprint_init( \
+						     STR, \
+						     RGX_ ## NAME ## _HEAP_BASE, \
+						     RGX_ ## NAME ## _HEAP_SIZE, \
+						     0, 0); \
+						     psDeviceMemoryHeapCursor++; \
+    } while (0)
+
+#define INIT_TILING_HEAP(D, N, M)		\
+    do { \
+	IMG_UINT32 xstride; \
+	PVRSRVSystemBIFTilingHeapGetXStride((D)->psDeviceNode->psDevConfig, N, &xstride); \
+	*psDeviceMemoryHeapCursor = _blueprint_init( \
+						     RGX_BIF_TILING_HEAP_ ## N ## _IDENT, \
+						     RGX_BIF_TILING_HEAP_ ## N ## _BASE, \
+						     RGX_BIF_TILING_HEAP_SIZE, \
+						     RGX_BIF_TILING_HEAP_ALIGN_LOG2_FROM_XSTRIDE(xstride), \
+						     (IMG_UINT32)M); \
+						     psDeviceMemoryHeapCursor++; \
+    } while (0)
+
+static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo,
+		DEVICE_MEMORY_INFO *psNewMemoryInfo,
+		IMG_UINT32 *pui32Log2DummyPgSize)
+{
+	DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor;
+	RGXFWIF_BIFTILINGMODE eBIFTilingMode;
+	IMG_UINT32 uiNumHeaps;
+	void *pvAppHintState = NULL;
+	IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE;
+	IMG_UINT32 ui32GeneralNon4KHeapPageSize;
+
+#if defined(SUPPORT_VALIDATION)
+	IMG_UINT32 ui32BIFTilingMode, ui32AppHintDefaultTilingMode = RGXFWIF_BIFTILINGMODE_MAX;
+
+	OSCreateKMAppHintState(&pvAppHintState);
+	OSGetKMAppHintUINT32(pvAppHintState, BIFTilingMode,
+			&ui32AppHintDefaultTilingMode, &ui32BIFTilingMode);
+	OSFreeKMAppHintState(pvAppHintState);
+	if (ui32BIFTilingMode == RGXFWIF_BIFTILINGMODE_256x16 || ui32BIFTilingMode == RGXFWIF_BIFTILINGMODE_512x8)
+	{
+		psDevInfo->psDeviceNode->psDevConfig->eBIFTilingMode = ui32BIFTilingMode;
+	}
+	else if (ui32BIFTilingMode != RGXFWIF_BIFTILINGMODE_MAX)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXInitHeaps: BIF Tiling mode apphint is invalid"));
+	}
+#endif
+
+	/* FIXME - consider whether this ought not to be on the device node itself */
+	psNewMemoryInfo->psDeviceMemoryHeap = OSAllocMem(sizeof(DEVMEM_HEAP_BLUEPRINT) * RGX_MAX_HEAP_ID);
+	if (psNewMemoryInfo->psDeviceMemoryHeap == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXRegisterDevice : Failed to alloc memory for DEVMEM_HEAP_BLUEPRINT"));
+		goto e0;
+	}
+
+	PVRSRVSystemBIFTilingGetConfig(psDevInfo->psDeviceNode->psDevConfig, &eBIFTilingMode, &uiNumHeaps);
+
+	/* Get the page size for the dummy page from the NON4K heap apphint */
+	OSCreateKMAppHintState(&pvAppHintState);
+	OSGetKMAppHintUINT32(pvAppHintState, GeneralNon4KHeapPageSize,
+			&ui32AppHintDefault, &ui32GeneralNon4KHeapPageSize);
+	*pui32Log2DummyPgSize = ExactLog2(ui32GeneralNon4KHeapPageSize);
+	OSFreeKMAppHintState(pvAppHintState);
+
+	/* Initialise the heaps */
+	psDeviceMemoryHeapCursor = psNewMemoryInfo->psDeviceMemoryHeap;
+
+	INIT_HEAP(GENERAL_SVM);
+
+	if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273))
+	{
+		INIT_HEAP_NAME(RGX_GENERAL_HEAP_IDENT, GENERAL_BRN_65273);
+	}
+	else
+	{
+		INIT_HEAP(GENERAL);
+	}
+
+	if (RGX_IS_BRN_SUPPORTED(psDevInfo, 63142))
+	{
+		/* BRN63142 heap must be at the top of an aligned 16GB range. */
+		INIT_HEAP(RGNHDR_BRN_63142);
+		PVR_ASSERT((RGX_RGNHDR_BRN_63142_HEAP_BASE & IMG_UINT64_C(0x3FFFFFFFF)) +
+				RGX_RGNHDR_BRN_63142_HEAP_SIZE == IMG_UINT64_C(0x400000000));
+	}
+
+	if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273))
+	{
+		INIT_HEAP_NAME(RGX_GENERAL_NON4K_HEAP_IDENT, GENERAL_NON4K_BRN_65273);
+		INIT_HEAP_NAME(RGX_VISTEST_HEAP_IDENT, VISTEST_BRN_65273);
+
+		/* HWBRN65273 workaround also requires two Region Header buffers 4GB apart. */
+		INIT_HEAP(MMU_INIA_BRN_65273);
+		INIT_HEAP(MMU_INIB_BRN_65273);
+	}
+	else
+	{
+		INIT_HEAP(GENERAL_NON4K);
+		INIT_HEAP(VISTEST);
+	}
+
+	if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273))
+	{
+		INIT_HEAP_NAME(RGX_PDSCODEDATA_HEAP_IDENT, PDSCODEDATA_BRN_65273);
+		INIT_HEAP_NAME(RGX_USCCODE_HEAP_IDENT, USCCODE_BRN_65273);
+	}
+	else
+	{
+		INIT_HEAP(PDSCODEDATA);
+		INIT_HEAP(USCCODE);
+	}
+
+	if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273))
+	{
+		INIT_HEAP_NAME(RGX_TQ3DPARAMETERS_HEAP_IDENT, TQ3DPARAMETERS_BRN_65273);
+	}
+	else
+	{
+		INIT_HEAP(TQ3DPARAMETERS);
+	}
+
+	INIT_TILING_HEAP(psDevInfo, 1, eBIFTilingMode);
+	INIT_TILING_HEAP(psDevInfo, 2, eBIFTilingMode);
+	INIT_TILING_HEAP(psDevInfo, 3, eBIFTilingMode);
+	INIT_TILING_HEAP(psDevInfo, 4, eBIFTilingMode);
+	INIT_HEAP(DOPPLER);
+	INIT_HEAP(DOPPLER_OVERFLOW);
+	INIT_HEAP(TDM_TPU_YUV_COEFFS);
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SIGNAL_SNOOPING))
+	{
+		INIT_HEAP(SERVICES_SIGNALS);
+		INIT_HEAP(SIGNALS);
+	}
+
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		INIT_FW_CONFIG_HEAP(GUEST);
+
+		if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+		{
+			INIT_FW_MAIN_HEAP(GUEST, MIPS);
+		}
+		else
+		{
+			INIT_FW_MAIN_HEAP(GUEST, META);
+		}
+	}
+	else
+	{
+		if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+		{
+			INIT_FW_MAIN_HEAP(HYPERV, MIPS);
+		}
+		else
+		{
+			INIT_FW_MAIN_HEAP(HYPERV, META);
+		}
+
+		INIT_FW_CONFIG_HEAP(HYPERV);
+	}
+
+	/* set the heap count */
+	psNewMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeapCursor - psNewMemoryInfo->psDeviceMemoryHeap);
+
+	PVR_ASSERT(psNewMemoryInfo->ui32HeapCount <= RGX_MAX_HEAP_ID);
+
+	/*
+	   In the new heap setup, we initialise 2 configurations:
+		1 - One will be for the firmware only (index 1 in array)
+			a. This primarily has the firmware heap in it.
+			b. It also has additional guest OSID firmware heap(s)
+				- Only if the number of support firmware OSID > 1
+		2 - Others shall be for clients only (index 0 in array)
+			a. This has all the other client heaps in it.
+	 */
+	psNewMemoryInfo->uiNumHeapConfigs = 2;
+	psNewMemoryInfo->psDeviceMemoryHeapConfigArray = OSAllocMem(sizeof(DEVMEM_HEAP_CONFIG) * psNewMemoryInfo->uiNumHeapConfigs);
+	if (psNewMemoryInfo->psDeviceMemoryHeapConfigArray == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXRegisterDevice : Failed to alloc memory for DEVMEM_HEAP_CONFIG"));
+		goto e1;
+	}
+
+	psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].pszName = "Default Heap Configuration";
+	psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].uiNumHeaps = psNewMemoryInfo->ui32HeapCount - RGX_FIRMWARE_NUMBER_OF_FW_HEAPS;
+	psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].psHeapBlueprintArray = psNewMemoryInfo->psDeviceMemoryHeap;
+
+	psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].pszName = "Firmware Heap Configuration";
+	psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps = RGX_FIRMWARE_NUMBER_OF_FW_HEAPS;
+	psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].psHeapBlueprintArray = psDeviceMemoryHeapCursor-2;
+
+	/* Perform additional virtualization initialization */
+	if (RGXVzInitHeaps(psNewMemoryInfo, psDeviceMemoryHeapCursor) != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	return PVRSRV_OK;
+	e1:
+	OSFreeMem(psNewMemoryInfo->psDeviceMemoryHeap);
+	e0:
+	return PVRSRV_ERROR_OUT_OF_MEMORY;
+}
+
+#undef INIT_HEAP
+#undef INIT_HEAP_NAME
+#undef INIT_TILING_HEAP
+
+static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo)
+{
+	RGXVzDeInitHeaps(psDevMemoryInfo);
+	OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeapConfigArray);
+	OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeap);
+}
+
+/*
+	RGXRegisterDevice
+ */
+PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode,
+                               PVRSRV_RGXDEV_INFO **ppsDevInfo)
+{
+	PVRSRV_ERROR eError;
+	DEVICE_MEMORY_INFO *psDevMemoryInfo;
+	PVRSRV_RGXDEV_INFO	*psDevInfo;
+
+	PDUMPCOMMENT("Device Name: %s", psDeviceNode->psDevConfig->pszName);
+
+	if (psDeviceNode->psDevConfig->pszVersion)
+	{
+		PDUMPCOMMENT("Device Version: %s", psDeviceNode->psDevConfig->pszVersion);
+	}
+
+#if defined(RGX_FEATURE_SYSTEM_CACHE)
+	PDUMPCOMMENT("RGX System Level Cache is present");
+#endif /* RGX_FEATURE_SYSTEM_CACHE */
+
+	PDUMPCOMMENT("RGX Initialisation (Part 1)");
+
+	/*********************
+	 * Device node setup *
+	 *********************/
+	/* Setup static data and callbacks on the device agnostic device node */
+#if defined(PDUMP)
+	psDeviceNode->sDevId.pszPDumpRegName	= RGX_PDUMPREG_NAME;
+	/*
+		FIXME: This should not be required as PMR's should give the memspace
+		name. However, due to limitations within PDump we need a memspace name
+		when pdumping with MMU context with virtual address in which case we
+		don't have a PMR to get the name from.
+
+		There is also the issue obtaining a namespace name for the catbase which
+		is required when we PDump the write of the physical catbase into the FW
+		structure
+	 */
+	psDeviceNode->sDevId.pszPDumpDevName	= PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]);
+	psDeviceNode->pfnPDumpInitDevice = &RGXResetPDump;
+#endif /* PDUMP */
+
+	OSAtomicWrite(&psDeviceNode->eHealthStatus, PVRSRV_DEVICE_HEALTH_STATUS_OK);
+	OSAtomicWrite(&psDeviceNode->eHealthReason, PVRSRV_DEVICE_HEALTH_REASON_NONE);
+
+	/* Configure MMU specific stuff */
+	RGXMMUInit_Register(psDeviceNode);
+
+	psDeviceNode->pfnMMUCacheInvalidate = RGXMMUCacheInvalidate;
+
+	psDeviceNode->pfnMMUCacheInvalidateKick = RGXMMUCacheInvalidateKick;
+
+	/* Register RGX to receive notifies when other devices complete some work */
+	PVRSRVRegisterCmdCompleteNotify(&psDeviceNode->hCmdCompNotify, &RGXScheduleProcessQueuesKM, psDeviceNode);
+
+	psDeviceNode->pfnInitDeviceCompatCheck	= &RGXDevInitCompatCheck;
+
+	/* Register callbacks for creation of device memory contexts */
+	psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext;
+	psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext;
+
+	/* Register callbacks for Unified Fence Objects */
+	psDeviceNode->pfnAllocUFOBlock = RGXAllocUFOBlock;
+	psDeviceNode->pfnFreeUFOBlock = RGXFreeUFOBlock;
+
+	/* Register callback for checking the device's health */
+	psDeviceNode->pfnUpdateHealthStatus = RGXUpdateHealthStatus;
+
+	/* Register method to service the FW HWPerf buffer */
+	psDeviceNode->pfnServiceHWPerf = RGXHWPerfDataStoreCB;
+
+	/* Register callback for getting the device version information string */
+	psDeviceNode->pfnDeviceVersionString = RGXDevVersionString;
+
+	/* Register callback for getting the device clock speed */
+	psDeviceNode->pfnDeviceClockSpeed = RGXDevClockSpeed;
+
+	/* Register callback for soft resetting some device modules */
+	psDeviceNode->pfnSoftReset = RGXSoftReset;
+
+	/* Register callback for resetting the HWR logs */
+	psDeviceNode->pfnResetHWRLogs = RGXResetHWRLogs;
+
+#if defined(RGXFW_ALIGNCHECKS)
+	/* Register callback for checking alignment of UM structures */
+	psDeviceNode->pfnAlignmentCheck = RGXAlignmentCheck;
+#endif
+
+	/*Register callback for checking the supported features and getting the
+	 * corresponding values */
+	psDeviceNode->pfnCheckDeviceFeature = RGXBvncCheckFeatureSupported;
+	psDeviceNode->pfnGetDeviceFeatureValue = RGXBvncGetSupportedFeatureValue;
+
+	/* Callback for checking if system layer supports FBC 3.1 */
+	psDeviceNode->pfnHasFBCDCVersion31 = RGXSystemHasFBCDCVersion31;
+
+	/* Set up required support for dummy page */
+	OSAtomicWrite(&(psDeviceNode->sDummyPage.atRefCounter), 0);
+	OSAtomicWrite(&(psDeviceNode->sDevZeroPage.atRefCounter), 0);
+
+	/* Set the order to 0 */
+	psDeviceNode->sDummyPage.sPageHandle.ui32Order = 0;
+	psDeviceNode->sDevZeroPage.sPageHandle.ui32Order = 0;
+
+	/* Set the size of the Dummy page to zero */
+	psDeviceNode->sDummyPage.ui32Log2PgSize = 0;
+
+	/* Set the size of the Zero page to zero */
+	psDeviceNode->sDevZeroPage.ui32Log2PgSize = 0;
+
+	/* Set the Dummy page phys addr */
+	psDeviceNode->sDummyPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR;
+
+	/* Set the Zero page phys addr */
+	psDeviceNode->sDevZeroPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR;
+
+	/* The lock can be acquired from MISR (Z-buffer) path */
+	eError = OSLockCreate(&psDeviceNode->sDummyPage.psPgLock);
+	if (PVRSRV_OK != eError)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create dummy page lock", __func__));
+		return eError;
+	}
+
+	/* Create the lock for zero page */
+	eError = OSLockCreate(&psDeviceNode->sDevZeroPage.psPgLock);
+	if (PVRSRV_OK != eError)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create Zero page lock", __func__));
+		goto free_dummy_page;
+	}
+#if defined(PDUMP)
+	psDeviceNode->sDummyPage.hPdumpPg = NULL;
+	psDeviceNode->sDevZeroPage.hPdumpPg = NULL;
+#endif
+
+	/*********************
+	 * Device info setup *
+	 *********************/
+	/* Allocate device control block */
+	psDevInfo = OSAllocZMem(sizeof(*psDevInfo));
+	if (psDevInfo == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"DevInitRGXPart1 : Failed to alloc memory for DevInfo"));
+		return (PVRSRV_ERROR_OUT_OF_MEMORY);
+	}
+	/* Default psTrampoline to point to null struct */
+	psDevInfo->psTrampoline = (RGX_MIPS_ADDRESS_TRAMPOLINE *)&sNullTrampoline;
+
+	/* create locks for the context lists stored in the DevInfo structure.
+	 * these lists are modified on context create/destroy and read by the
+	 * watchdog thread
+	 */
+
+	eError = OSWRLockCreate(&(psDevInfo->hRenderCtxListLock));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create render context list lock", __func__));
+		goto e0;
+	}
+
+	eError = OSWRLockCreate(&(psDevInfo->hComputeCtxListLock));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create compute context list lock", __func__));
+		goto e1;
+	}
+
+	eError = OSWRLockCreate(&(psDevInfo->hTransferCtxListLock));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create transfer context list lock", __func__));
+		goto e2;
+	}
+
+	eError = OSWRLockCreate(&(psDevInfo->hTDMCtxListLock));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create TDM context list lock", __func__));
+		goto e3;
+	}
+
+	eError = OSWRLockCreate(&(psDevInfo->hKickSyncCtxListLock));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create kick sync context list lock", __func__));
+		goto e4;
+	}
+
+	eError = OSWRLockCreate(&(psDevInfo->hMemoryCtxListLock));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create memory context list lock", __func__));
+		goto e5;
+	}
+
+	eError = OSLockCreate(&psDevInfo->hLockKCCBDeferredCommandsList);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to KCCB deferred commands list lock", __func__));
+		goto e6;
+	}
+	dllist_init(&(psDevInfo->sKCCBDeferredCommandsListHead));
+
+	dllist_init(&(psDevInfo->sRenderCtxtListHead));
+	dllist_init(&(psDevInfo->sComputeCtxtListHead));
+	dllist_init(&(psDevInfo->sTransferCtxtListHead));
+	dllist_init(&(psDevInfo->sTDMCtxtListHead));
+	dllist_init(&(psDevInfo->sKickSyncCtxtListHead));
+
+	dllist_init(&(psDevInfo->sCommonCtxtListHead));
+	psDevInfo->ui32CommonCtxtCurrentID = 1;
+
+
+	eError = OSWRLockCreate(&psDevInfo->hCommonCtxtListLock);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create common context list lock", __func__));
+		goto e7;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	eError = OSLockCreate(&psDevInfo->sRegCongfig.hLock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create register configuration lock", __func__));
+		goto e8;
+	}
+
+	eError = OSLockCreate(&psDevInfo->hBPLock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for break points", __func__));
+		goto e9;
+	}
+
+	eError = OSLockCreate(&psDevInfo->hRGXFWIfBufInitLock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for trace buffers", __func__));
+		goto e10;
+	}
+#endif
+
+	eError = OSLockCreate(&psDevInfo->hCCBStallCheckLock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create stalled CCB checking lock", __func__));
+		goto e11;
+	}
+	eError = OSLockCreate(&psDevInfo->hCCBRecoveryLock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create stalled CCB recovery lock", __func__));
+		goto e12;
+	}
+
+	dllist_init(&psDevInfo->sMemoryContextList);
+
+    /* initialise ui32SLRHoldoffCounter */
+    if (RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS > DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT)
+    {
+        psDevInfo->ui32SLRHoldoffCounter = RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS / DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT;
+    }
+    else
+    {
+        psDevInfo->ui32SLRHoldoffCounter = 0;
+    }
+
+	/* Setup static data and callbacks on the device specific device info */
+	psDevInfo->psDeviceNode		= psDeviceNode;
+
+	psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+	psDevInfo->pvDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
+
+	/*
+	 * Map RGX Registers
+	 */
+#if !defined(NO_HARDWARE)
+	psDevInfo->pvRegsBaseKM = (void __iomem *) OSMapPhysToLin(psDeviceNode->psDevConfig->sRegsCpuPBase,
+			psDeviceNode->psDevConfig->ui32RegsSize,
+			PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+
+	if (psDevInfo->pvRegsBaseKM == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to create RGX register mapping", __func__));
+		eError = PVRSRV_ERROR_BAD_MAPPING;
+		goto e13;
+	}
+#endif
+
+	psDeviceNode->pvDevice = psDevInfo;
+
+	eError = RGXBvncInitialiseConfiguration(psDeviceNode);
+	if (PVRSRV_OK != eError)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Unsupported HW device detected by driver", __func__));
+		goto e14;
+	}
+
+	/* pdump info about the core */
+	PDUMPCOMMENT("RGX Version Information (KM): %d.%d.%d.%d",
+			psDevInfo->sDevFeatureCfg.ui32B,
+			psDevInfo->sDevFeatureCfg.ui32V,
+			psDevInfo->sDevFeatureCfg.ui32N,
+			psDevInfo->sDevFeatureCfg.ui32C);
+
+	eError = RGXInitHeaps(psDevInfo, psDevMemoryInfo,
+			&psDeviceNode->sDummyPage.ui32Log2PgSize);
+	if (eError != PVRSRV_OK)
+	{
+		goto e14;
+	}
+
+	/*Set the zero page size as needed for the heap with biggest page size */
+	psDeviceNode->sDevZeroPage.ui32Log2PgSize = psDeviceNode->sDummyPage.ui32Log2PgSize;
+
+	eError = RGXHWPerfInit(psDevInfo);
+	PVR_LOGG_IF_ERROR(eError, "RGXHWPerfInit", e14);
+
+	/* Register callback for dumping debug info */
+	eError = PVRSRVRegisterDbgRequestNotify(&psDevInfo->hDbgReqNotify,
+			psDeviceNode,
+			RGXDebugRequestNotify,
+			DEBUG_REQUEST_SYS,
+			psDevInfo);
+	PVR_LOG_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify");
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+	{
+		RGXMipsMMUInit_Register(psDeviceNode);
+	}
+
+	/* The device shared-virtual-memory heap address-space size is stored here for faster
+	   look-up without having to walk the device heap configuration structures during
+	   client device connection  (i.e. this size is relative to a zero-based offset) */
+	if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273))
+	{
+		psDeviceNode->ui64GeneralSVMHeapTopVA = 0;
+	}else
+	{
+		psDeviceNode->ui64GeneralSVMHeapTopVA = RGX_GENERAL_SVM_HEAP_BASE + RGX_GENERAL_SVM_HEAP_SIZE;
+	}
+
+	if (NULL != psDeviceNode->psDevConfig->pfnSysDevFeatureDepInit)
+	{
+		psDeviceNode->psDevConfig->pfnSysDevFeatureDepInit(psDeviceNode->psDevConfig,
+				psDevInfo->sDevFeatureCfg.ui64Features);
+	}
+
+	/* Initialise the device dependent bridges */
+	eError = DeviceDepBridgeInit(psDevInfo->sDevFeatureCfg.ui64Features);
+	PVR_LOG_IF_ERROR(eError, "DeviceDepBridgeInit");
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+	eError = OSLockCreate(&psDevInfo->hCounterDumpingLock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for counter sampling.", __func__));
+		goto e14;
+	}
+#endif
+
+	*ppsDevInfo = psDevInfo;
+
+	return PVRSRV_OK;
+
+e14:
+#if !defined(NO_HARDWARE)
+	OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM,
+			psDevInfo->ui32RegSize,
+			PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+
+e13:
+#endif /* !NO_HARDWARE */
+	OSLockDestroy(psDevInfo->hCCBRecoveryLock);
+e12:
+	OSLockDestroy(psDevInfo->hCCBStallCheckLock);
+	e11:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock);
+	e10:
+	OSLockDestroy(psDevInfo->hBPLock);
+	e9:
+	OSLockDestroy(psDevInfo->sRegCongfig.hLock);
+	e8:
+#endif
+	OSWRLockDestroy(psDevInfo->hCommonCtxtListLock);
+	e7:
+	OSLockDestroy(psDevInfo->hLockKCCBDeferredCommandsList);
+	e6:
+	OSWRLockDestroy(psDevInfo->hMemoryCtxListLock);
+	e5:
+	OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock);
+	e4:
+	OSWRLockDestroy(psDevInfo->hTDMCtxListLock);
+	e3:
+	OSWRLockDestroy(psDevInfo->hTransferCtxListLock);
+	e2:
+	OSWRLockDestroy(psDevInfo->hComputeCtxListLock);
+	e1:
+	OSWRLockDestroy(psDevInfo->hRenderCtxListLock);
+	e0:
+	OSFreeMem(psDevInfo);
+
+	/* Destroy the zero page lock created above */
+	OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock);
+
+	free_dummy_page:
+	/* Destroy the dummy page lock created above */
+	OSLockDestroy(psDeviceNode->sDummyPage.psPgLock);
+
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+IMG_PCHAR RGXDevBVNCString(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	IMG_PCHAR psz = psDevInfo->sDevFeatureCfg.pszBVNCString;
+	if (NULL == psz)
+	{
+		IMG_CHAR pszBVNCInfo[RGX_HWPERF_MAX_BVNC_LEN];
+		size_t uiBVNCStringSize;
+		size_t uiStringLength;
+
+		uiStringLength = OSSNPrintf(pszBVNCInfo, RGX_HWPERF_MAX_BVNC_LEN, "%d.%d.%d.%d",
+				psDevInfo->sDevFeatureCfg.ui32B,
+				psDevInfo->sDevFeatureCfg.ui32V,
+				psDevInfo->sDevFeatureCfg.ui32N,
+				psDevInfo->sDevFeatureCfg.ui32C);
+		PVR_ASSERT(uiStringLength < RGX_HWPERF_MAX_BVNC_LEN);
+
+		uiBVNCStringSize = (uiStringLength + 1) * sizeof(IMG_CHAR);
+		psz = OSAllocMem(uiBVNCStringSize);
+		if (NULL != psz)
+		{
+			OSCachedMemCopy(psz, pszBVNCInfo, uiBVNCStringSize);
+			psDevInfo->sDevFeatureCfg.pszBVNCString = psz;
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_MESSAGE,
+					"%s: Allocating memory for BVNC Info string failed",
+					__func__));
+		}
+	}
+
+	return psz;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXDevVersionString
+@Description    Gets the version string for the given device node and returns
+                a pointer to it in ppszVersionString. It is then the
+                responsibility of the caller to free this memory.
+@Input          psDeviceNode            Device node from which to obtain the
+                                        version string
+@Output	        ppszVersionString	Contains the version string upon return
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode,
+		IMG_CHAR **ppszVersionString)
+{
+#if defined(NO_HARDWARE) || defined(EMULATOR)
+	const IMG_CHAR szFormatString[] = "Rogue Version: %s (SW)";
+#else
+	const IMG_CHAR szFormatString[] = "Rogue Version: %s (HW)";
+#endif
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	IMG_PCHAR pszBVNC;
+	size_t uiStringLength;
+
+	if (psDeviceNode == NULL || ppszVersionString == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+	pszBVNC = RGXDevBVNCString(psDevInfo);
+
+	if (NULL == pszBVNC)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	uiStringLength = OSStringLength(pszBVNC);
+	uiStringLength += (sizeof(szFormatString) - 2); /* sizeof includes the null, -2 for "%s" */
+	*ppszVersionString = OSAllocMem(uiStringLength * sizeof(IMG_CHAR));
+	if (*ppszVersionString == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	OSSNPrintf(*ppszVersionString, uiStringLength, szFormatString,
+		pszBVNC);
+
+	return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function       RGXDevClockSpeed
+@Description    Gets the clock speed for the given device node and returns
+                it in pui32RGXClockSpeed.
+@Input          psDeviceNode		Device node
+@Output         pui32RGXClockSpeed  Variable for storing the clock speed
+@Return         PVRSRV_ERROR
+ */ /***************************************************************************/
+static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode,
+		IMG_PUINT32  pui32RGXClockSpeed)
+{
+	RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+
+	/* get clock speed */
+	*pui32RGXClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+
+	return PVRSRV_OK;
+}
+/*!
+ *******************************************************************************
+
+ @Function		RGXVzInitCreateFWKernelMemoryContext
+
+ @Description	Called to perform additional initialisation during firmware
+ 	 	 	 	kernel context creation.
+ ******************************************************************************/
+
+PVRSRV_ERROR RGXVzInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+	return RGXVzCreateFWKernelMemoryContext(psDeviceNode);
+}
+/*!
+ *******************************************************************************
+
+ @Function		RGXVzDeInitDestroyFWKernelMemoryContext
+
+ @Description	Called to perform additional deinitialisation during firmware
+ 	 	 	 	kernel context destruction.
+ ******************************************************************************/
+
+void RGXVzDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_NATIVE);
+	RGXVzDestroyFWKernelMemoryContext(psDeviceNode);
+}
+
+/*!
+ *******************************************************************************
+
+ @Function		RGXVzInitHeaps
+
+ @Description	Called to perform additional initialisation
+ ******************************************************************************/
+static PVRSRV_ERROR RGXVzInitHeaps(DEVICE_MEMORY_INFO *psNewMemoryInfo,
+		DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor)
+{
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+
+#if defined(RGXFW_NUM_OS) && (1 < RGXFW_NUM_OS)
+	{
+		IMG_UINT32 uiIdx;
+		IMG_UINT32 uiStringLength;
+		IMG_UINT32 uiStringLengthMax = 32;
+
+		uiStringLength = MIN(sizeof(RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT), uiStringLengthMax + 1);
+
+		/* Create additional guest OSID firmware heaps */
+		for (uiIdx = 1; uiIdx < RGXFW_NUM_OS; uiIdx++)
+		{
+			/* Start by allocating memory for this guest OSID heap identification string */
+			psDeviceMemoryHeapCursor->pszName = OSAllocMem(uiStringLength * sizeof(IMG_CHAR));
+			if (psDeviceMemoryHeapCursor->pszName == NULL)
+			{
+				for (uiIdx = uiIdx - 1; uiIdx > 0; uiIdx--)
+				{
+					void *pzsName = (void *)psDeviceMemoryHeapCursor->pszName;
+					psDeviceMemoryHeapCursor--;
+					OSFreeMem(pzsName);
+				}
+
+				return PVRSRV_ERROR_OUT_OF_MEMORY;
+			}
+
+			/* Append the guest OSID number to the RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT string */
+			OSSNPrintf((IMG_CHAR *)psDeviceMemoryHeapCursor->pszName, uiStringLength, RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, uiIdx);
+
+			/* Use the common blueprint template support function to initialise the heap */
+			*psDeviceMemoryHeapCursor = _blueprint_init((IMG_CHAR *)psDeviceMemoryHeapCursor->pszName,
+					RGX_FIRMWARE_RAW_HEAP_BASE + (uiIdx * RGX_FIRMWARE_RAW_HEAP_SIZE),
+					RGX_FIRMWARE_RAW_HEAP_SIZE,
+					0,
+					0);
+
+			/* Append additional guest(s) firmware heap to host driver firmware context heap configuration */
+			psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps += 1;
+
+			/* advance to the next heap */
+			psDeviceMemoryHeapCursor++;
+		}
+	}
+#endif
+	return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function		RGXVzDeInitHeaps
+
+ @Description	Called to perform additional deinitialisation
+ ******************************************************************************/
+static void RGXVzDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo)
+{
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_NATIVE);
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+#if defined(RGXFW_NUM_OS) && (1 < RGXFW_NUM_OS)
+	{
+		IMG_UINT32 uiIdx;
+		IMG_UINT64 uiBase, uiSpan;
+		DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor;
+		psDeviceMemoryHeapCursor = psDevMemoryInfo->psDeviceMemoryHeap;
+		uiBase = RGX_FIRMWARE_RAW_HEAP_BASE + RGX_FIRMWARE_RAW_HEAP_SIZE;
+		uiSpan = uiBase + ((RGXFW_NUM_OS - 1) * RGX_FIRMWARE_RAW_HEAP_SIZE);
+
+		for (uiIdx = 1; uiIdx < RGXFW_NUM_OS; uiIdx++)
+		{
+			/* Safe to do as the guest firmware heaps are last in the list */
+			if (psDeviceMemoryHeapCursor->sHeapBaseAddr.uiAddr >= uiBase &&
+					psDeviceMemoryHeapCursor->sHeapBaseAddr.uiAddr <  uiSpan)
+			{
+				void *pszName = (void*)psDeviceMemoryHeapCursor->pszName;
+				OSFreeMem(pszName);
+				uiIdx += 1;
+			}
+
+			psDeviceMemoryHeapCursor++;
+		}
+	}
+#endif
+}
+
+/******************************************************************************
+ End of file (rgxinit.c)
+ ******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxinit.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxinit.h
new file mode 100644
index 0000000..1427e1f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxinit.h
@@ -0,0 +1,317 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX initialisation header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX initialisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXINIT_H__)
+#define __RGXINIT_H__
+
+#include "connection_server.h"
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgx_bridge.h"
+#include "rgxfwload.h"
+
+
+/*!
+*******************************************************************************
+
+ @Function	RGXInitDevPart2
+
+ @Description
+
+ Second part of server-side RGX initialisation
+
+ @Input psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE	*psDeviceNode,
+							  IMG_UINT32			ui32DeviceFlags,
+							  IMG_UINT32			ui32HWPerfHostBufSizeKB,
+							  IMG_UINT32			ui32HWPerfHostFilter,
+							  RGX_ACTIVEPM_CONF		eActivePMConf);
+
+PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE   *psDeviceNode,
+                                  IMG_DEVMEM_SIZE_T    ui32FWCodeLen,
+                                  IMG_DEVMEM_SIZE_T    ui32FWDataLen,
+                                  IMG_DEVMEM_SIZE_T    uiFWCorememCodeLen,
+                                  IMG_DEVMEM_SIZE_T    uiFWCorememDataLen);
+
+
+/*!
+*******************************************************************************
+
+ @Function	RGXInitFirmware
+
+ @Description
+
+ Server-side RGX firmware initialisation
+
+ @Input psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR
+RGXInitFirmware(PVRSRV_DEVICE_NODE       *psDeviceNode,
+                IMG_BOOL                 bEnableSignatureChecks,
+                IMG_UINT32               ui32SignatureChecksBufSize,
+                IMG_UINT32               ui32HWPerfFWBufSizeKB,
+                IMG_UINT64               ui64HWPerfFilter,
+                IMG_UINT32               ui32RGXFWAlignChecksArrLength,
+                IMG_UINT32               *pui32RGXFWAlignChecks,
+                IMG_UINT32               ui32ConfigFlags,
+                IMG_UINT32               ui32LogType,
+                IMG_UINT32               ui32FilterFlags,
+                IMG_UINT32               ui32JonesDisableMask,
+                IMG_UINT32               ui32HWRDebugDumpLimit,
+                IMG_UINT32               ui32HWPerfCountersDataSize,
+                IMG_UINT32               *pui32TPUTrilinearFracMask,
+                RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf,
+                FW_PERF_CONF             eFirmwarePerf,
+                IMG_UINT32               ui32ConfigFlagsExt);
+
+
+/*!
+*******************************************************************************
+
+ @Function	RGXLoadAndGetFWData
+
+ @Description
+
+ Load FW and return pointer to FW data.
+
+ @Input psDeviceNode - device node
+
+ @Input ppsRGXFW - fw pointer
+
+ @Return   void * - pointer to FW data
+
+******************************************************************************/
+const void *RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode, struct RGXFW **ppsRGXFW);
+
+#if defined(PDUMP)
+/*!
+*******************************************************************************
+
+ @Function	RGXInitHWPerfCounters
+
+ @Description
+
+ Initialisation of the performance counters
+
+ @Input psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE	*psDeviceNode);
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function	RGXRegisterDevice
+
+ @Description
+
+ Registers the device with the system
+
+ @Input:   psDeviceNode - device node
+ @Output:  ppsDevInfo   - device info
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode,
+                               PVRSRV_RGXDEV_INFO **ppsDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDevBVNCString
+
+ @Description
+
+ Returns the Device BVNC string. It will allocate and fill it first, if necessary.
+
+ @Input:   psDevInfo - device info (must not be null)
+
+ @Return   IMG_PCHAR - pointer to BVNC string
+
+******************************************************************************/
+IMG_PCHAR RGXDevBVNCString(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function	DevDeInitRGX
+
+ @Description
+
+ Reset and deinitialise Chip
+
+ @Input psDeviceNode - device info. structure
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#if !defined(NO_HARDWARE)
+
+void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function     SORgxGpuUtilStatsRegister
+
+ @Description  SO Interface function called from the OS layer implementation.
+               Initialise data used to compute GPU utilisation statistics
+               for a particular user (identified by the handle passed as
+               argument). This function must be called only once for each
+               different user/handle.
+
+ @Input        phGpuUtilUser - Pointer to handle used to identify a user of
+                               RGXGetGpuUtilStats
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser);
+
+
+/*!
+*******************************************************************************
+
+ @Function     SORgxGpuUtilStatsUnregister
+
+ @Description  SO Interface function called from the OS layer implementation.
+               Free data previously used to compute GPU utilisation statistics
+               for a particular user (identified by the handle passed as
+               argument).
+
+ @Input        hGpuUtilUser - Handle used to identify a user of
+                              RGXGetGpuUtilStats
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser);
+#endif /* !defined(NO_HARDWARE) */
+
+
+/*!
+*******************************************************************************
+
+ @Function		RGXVirtPopulateLMASubArenas
+
+ @Description	Populates the LMA arenas based on the min max values passed by
+				the client during initialization. GPU Virtualisation Validation
+				only.
+
+ @Input			psDeviceNode	: Pointer to a device info structure.
+				ui32NumElements	: Total number of min / max values passed by
+								  the client
+				pui32Elements	: The array containing all the min / max values
+								  passed by the client, all bundled together
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXVirtPopulateLMASubArenas(PVRSRV_DEVICE_NODE	* psDeviceNode,
+                                         IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+                                         IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+                                         IMG_BOOL bEnableTrustedDeviceAceConfig);
+
+/*!
+ *******************************************************************************
+
+ @Function      RGXInitCreateFWKernelMemoryContext
+
+ @Description   Called to perform initialisation during firmware kernel context
+                creation.
+
+ @Input         psDeviceNode  device node
+ ******************************************************************************/
+PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+ *******************************************************************************
+
+ @Function      RGXDeInitDestroyFWKernelMemoryContext
+
+ @Description   Called to perform deinitialisation during firmware kernel
+                context destruction.
+
+ @Input         psDeviceNode  device node
+ ******************************************************************************/
+void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+ *******************************************************************************
+
+ @Function      RGXVzInitCreateFWKernelMemoryContext
+
+ @Description   Called to perform additional initialisation during firmware
+                kernel context creation.
+
+ @Input         psDeviceNode  device node
+ ******************************************************************************/
+PVRSRV_ERROR RGXVzInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+ *******************************************************************************
+
+ @Function      RGXVzDeInitDestroyFWKernelMemoryContext
+
+ @Description   Called to perform additional deinitialisation during firmware
+                kernel context destruction.
+ ******************************************************************************/
+void RGXVzDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#endif /* __RGXINIT_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxkicksync.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxkicksync.c
new file mode 100644
index 0000000..0798978
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxkicksync.c
@@ -0,0 +1,829 @@
+/*************************************************************************/ /*!
+@File           rgxkicksync.c
+@Title          Server side of the sync only kick API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "rgxkicksync.h"
+
+#include "rgxdevice.h"
+#include "rgxmem.h"
+#include "rgxfwutils.h"
+#include "allocmem.h"
+#include "sync.h"
+#include "rgxhwperf.h"
+#include "ospvr_gputrace.h"
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_KICKSYNC_UFO_DUMP	0
+
+//#define KICKSYNC_CHECKPOINT_DEBUG 1
+
+#if defined(KICKSYNC_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+struct _RGX_SERVER_KICKSYNC_CONTEXT_
+{
+	PVRSRV_DEVICE_NODE        * psDeviceNode;
+	RGX_SERVER_COMMON_CONTEXT * psServerCommonContext;
+	PVRSRV_CLIENT_SYNC_PRIM   * psSync;
+	DLLIST_NODE                 sListNode;
+	SYNC_ADDR_LIST              sSyncAddrListFence;
+	SYNC_ADDR_LIST              sSyncAddrListUpdate;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	POS_LOCK                     hLock;
+#endif
+};
+
+
+PVRSRV_ERROR PVRSRVRGXCreateKickSyncContextKM(CONNECTION_DATA             * psConnection,
+                                              PVRSRV_DEVICE_NODE          * psDeviceNode,
+                                              IMG_HANDLE					hMemCtxPrivData,
+											  IMG_UINT32					ui32PackedCCBSizeU88,
+                                              RGX_SERVER_KICKSYNC_CONTEXT ** ppsKickSyncContext)
+{
+	PVRSRV_RGXDEV_INFO          * psDevInfo = psDeviceNode->pvDevice;
+	DEVMEM_MEMDESC              * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext;
+	RGX_COMMON_CONTEXT_INFO      sInfo;
+	PVRSRV_ERROR                 eError = PVRSRV_OK;
+	IMG_UINT32					 ui32CCBAllocSizeLog2, ui32CCBMaxAllocSizeLog2;
+
+	/* Prepare cleanup struct */
+	* ppsKickSyncContext = NULL;
+	psKickSyncContext = OSAllocZMem(sizeof(*psKickSyncContext));
+	if (psKickSyncContext == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	eError = OSLockCreate(&psKickSyncContext->hLock);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+									__func__,
+									PVRSRVGetErrorString(eError)));
+		goto err_lockcreate;
+	}
+#endif
+
+	psKickSyncContext->psDeviceNode = psDeviceNode;
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+	                       & psKickSyncContext->psSync,
+	                       "kick sync cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "PVRSRVRGXCreateKickSyncContextKM: Failed to allocate cleanup sync (0x%x)",
+		         eError));
+		goto fail_syncalloc;
+	}
+
+	sInfo.psFWFrameworkMemDesc = NULL;
+
+	ui32CCBAllocSizeLog2 = U32toU8_Unpack1(ui32PackedCCBSizeU88);
+	ui32CCBMaxAllocSizeLog2 = U32toU8_Unpack2(ui32PackedCCBSizeU88);
+	eError = FWCommonContextAllocate(psConnection,
+									 psDeviceNode,
+									 REQ_TYPE_KICKSYNC,
+									 RGXFWIF_DM_GP,
+									 NULL,
+									 0,
+									 psFWMemContextMemDesc,
+									 NULL,
+									 ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_KICKSYNC_CCB_SIZE_LOG2,
+									 ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_KICKSYNC_CCB_MAX_SIZE_LOG2,
+	                                 0, /* priority */
+									 & sInfo,
+									 & psKickSyncContext->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_contextalloc;
+	}
+
+	OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock);
+	dllist_add_to_tail(&(psDevInfo->sKickSyncCtxtListHead), &(psKickSyncContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock);
+
+	SyncAddrListInit(&psKickSyncContext->sSyncAddrListFence);
+	SyncAddrListInit(&psKickSyncContext->sSyncAddrListUpdate);
+
+	* ppsKickSyncContext = psKickSyncContext;
+	return PVRSRV_OK;
+
+fail_contextalloc:
+fail_syncalloc:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psKickSyncContext->hLock);
+err_lockcreate:
+#endif
+	OSFreeMem(psKickSyncContext);
+	return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext)
+{
+	PVRSRV_ERROR         eError    = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO * psDevInfo = psKickSyncContext->psDeviceNode->pvDevice;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(psKickSyncContext->psDeviceNode,
+	                                          psKickSyncContext->psServerCommonContext,
+	                                          psKickSyncContext->psSync,
+	                                          RGXFWIF_DM_GP,
+	                                          PDUMP_FLAGS_NONE);
+
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		return eError;
+	}
+
+	/* ... it has so we can free its resources */
+
+	OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock);
+	dllist_remove_node(&(psKickSyncContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock);
+
+	FWCommonContextFree(psKickSyncContext->psServerCommonContext);
+	SyncPrimFree(psKickSyncContext->psSync);
+
+	SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListFence);
+	SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListUpdate);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psKickSyncContext->hLock);
+#endif
+
+	OSFreeMem(psKickSyncContext);
+
+	return PVRSRV_OK;
+}
+
+void DumpKickSyncCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                           DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                           void *pvDumpDebugFile,
+                           IMG_UINT32 ui32VerbLevel)
+{
+	DLLIST_NODE *psNode, *psNext;
+	OSWRLockAcquireRead(psDevInfo->hKickSyncCtxListLock);
+	dllist_foreach_node(&psDevInfo->sKickSyncCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_KICKSYNC_CONTEXT *psCurrentServerKickSyncCtx =
+				IMG_CONTAINER_OF(psNode, RGX_SERVER_KICKSYNC_CONTEXT, sListNode);
+
+		if (NULL != psCurrentServerKickSyncCtx->psServerCommonContext)
+		{
+			DumpFWCommonContextInfo(psCurrentServerKickSyncCtx->psServerCommonContext,
+			                        pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+		}
+	}
+	OSWRLockReleaseRead(psDevInfo->hKickSyncCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientKickSyncCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	DLLIST_NODE *psNode, *psNext;
+	IMG_UINT32 ui32ContextBitMask = 0;
+
+	OSWRLockAcquireRead(psDevInfo->hKickSyncCtxListLock);
+
+	dllist_foreach_node(&psDevInfo->sKickSyncCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_KICKSYNC_CONTEXT *psCurrentServerKickSyncCtx =
+			IMG_CONTAINER_OF(psNode, RGX_SERVER_KICKSYNC_CONTEXT, sListNode);
+
+		if (NULL != psCurrentServerKickSyncCtx->psServerCommonContext)
+		{
+			if (CheckStalledClientCommonContext(psCurrentServerKickSyncCtx->psServerCommonContext, RGX_KICK_TYPE_DM_GP) == PVRSRV_ERROR_CCCB_STALLED)
+			{
+				ui32ContextBitMask |= RGX_KICK_TYPE_DM_GP;
+			}
+		}
+	}
+
+	OSWRLockReleaseRead(psDevInfo->hKickSyncCtxListLock);
+	return ui32ContextBitMask;
+}
+
+PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext,
+
+                                 IMG_UINT32                    ui32ClientCacheOpSeqNum,
+
+                                 IMG_UINT32                    ui32ClientFenceCount,
+                                 SYNC_PRIMITIVE_BLOCK           ** pauiClientFenceUFOSyncPrimBlock,
+                                 IMG_UINT32                  * paui32ClientFenceOffset,
+                                 IMG_UINT32                  * paui32ClientFenceValue,
+
+                                 IMG_UINT32                    ui32ClientUpdateCount,
+                                 SYNC_PRIMITIVE_BLOCK           ** pauiClientUpdateUFOSyncPrimBlock,
+                                 IMG_UINT32                  * paui32ClientUpdateOffset,
+                                 IMG_UINT32                  * paui32ClientUpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+                                 IMG_UINT32                    ui32ServerSyncPrims,
+                                 IMG_UINT32                  * paui32ServerSyncFlags,
+                                 SERVER_SYNC_PRIMITIVE      ** pasServerSyncs,
+#endif
+                                 PVRSRV_FENCE                  iCheckFence,
+                                 PVRSRV_TIMELINE               iUpdateTimeline,
+                                 PVRSRV_FENCE                * piUpdateFence,
+                                 IMG_CHAR                      szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+
+                                 IMG_UINT32                    ui32ExtJobRef)
+{
+	RGXFWIF_KCCB_CMD         sKickSyncKCCBCmd;
+	RGX_CCB_CMD_HELPER_DATA  asCmdHelperData[1];
+	PVRSRV_ERROR             eError;
+	PVRSRV_ERROR             eError2;
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	IMG_UINT32               i;
+#endif
+	IMG_BOOL                 bCCBStateOpen = IMG_FALSE;
+	PRGXFWIF_UFO_ADDR        *pauiClientFenceUFOAddress = NULL;
+	PRGXFWIF_UFO_ADDR        *pauiClientUpdateUFOAddress = NULL;
+	PVRSRV_FENCE             iUpdateFence = PVRSRV_NO_FENCE;
+	IMG_UINT32               ui32FWCtx = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr;
+	PVRSRV_RGXDEV_INFO       *psDevInfo = FWCommonContextGetRGXDevInfo(psKickSyncContext->psServerCommonContext);
+	IMG_UINT32               ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
+	IMG_UINT64               uiCheckFenceUID = 0;
+	IMG_UINT64               uiUpdateFenceUID = 0;
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+	PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+	IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+	IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+	IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+	PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+	void *pvUpdateFenceFinaliseData = NULL;
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psKickSyncContext->hLock);
+#endif
+
+	eError = SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListFence,
+							ui32ClientFenceCount,
+							pauiClientFenceUFOSyncPrimBlock,
+							paui32ClientFenceOffset);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_syncaddrlist;
+	}
+
+	if (ui32ClientFenceCount > 0)
+	{
+		pauiClientFenceUFOAddress = psKickSyncContext->sSyncAddrListFence.pasFWAddrs;
+	}
+
+	eError = SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListUpdate,
+							ui32ClientUpdateCount,
+							pauiClientUpdateUFOSyncPrimBlock,
+							paui32ClientUpdateOffset);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_syncaddrlist;
+	}
+
+	if (ui32ClientUpdateCount > 0)
+	{
+		pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs;
+	}
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	/* Sanity check the server fences */
+	for (i = 0; i < ui32ServerSyncPrims; i++)
+	{
+		if (0 == (paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Server fence (on Kick Sync) must fence",
+					 __func__));
+			eError = PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+			goto out_unlock;
+		}
+	}
+#endif
+	/* Ensure the string is null-terminated (Required for safety) */
+	szUpdateFenceName[31] = '\0';
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	/* This will never be true if called from the bridge since piUpdateFence will always be valid */
+	if (iUpdateTimeline >= 0 && !piUpdateFence)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto out_unlock;
+	}
+
+	CHKPT_DBG((PVR_DBG_ERROR,
+			   "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), "
+			   "psKickSyncContext->psDeviceNode->hSyncCheckpointContext=<%p>...",
+			   __func__, iCheckFence,
+			   (void*)psKickSyncContext->psDeviceNode->hSyncCheckpointContext));
+	/* Resolve the sync checkpoints that make up the input fence */
+	eError = SyncCheckpointResolveFence(psKickSyncContext->psDeviceNode->hSyncCheckpointContext,
+	                                    iCheckFence,
+	                                    &ui32FenceSyncCheckpointCount,
+	                                    &apsFenceSyncCheckpoints,
+	                                    &uiCheckFenceUID);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_resolve_fence;
+	}
+
+	/* Create the output fence (if required) */
+	if (iUpdateTimeline != PVRSRV_NO_TIMELINE)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR,
+				   "%s: calling SyncCheckpointCreateFence (iUpdateTimeline=%d)...",
+				   __func__, iUpdateTimeline));
+		eError = SyncCheckpointCreateFence(psKickSyncContext->psDeviceNode,
+		                                   szUpdateFenceName,
+		                                   iUpdateTimeline,
+		                                   psKickSyncContext->psDeviceNode->hSyncCheckpointContext,
+										   &iUpdateFence,
+		                                   &uiUpdateFenceUID,
+		                                   &pvUpdateFenceFinaliseData,
+		                                   &psUpdateSyncCheckpoint,
+		                                   (void*)&psFenceTimelineUpdateSync,
+		                                   &ui32FenceTimelineUpdateValue);
+		if (eError != PVRSRV_OK)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)",
+					   __func__, eError));
+			goto fail_create_output_fence;
+		}
+		CHKPT_DBG((PVR_DBG_ERROR,
+				   "%s: ...returned from SyncCheckpointCreateFence "
+				   "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, "
+				   "ui32FenceTimelineUpdateValue=%u)",
+				   __func__, iUpdateFence, psFenceTimelineUpdateSync,
+				   ui32FenceTimelineUpdateValue));
+
+		/* Append the sync prim update for the timeline (if required) */
+		if (psFenceTimelineUpdateSync)
+		{
+			IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+			/* Allocate memory to hold the list of update values (including our timeline update) */
+			pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*paui32ClientUpdateValue) * (ui32ClientUpdateCount+1));
+			if (!pui32IntAllocatedUpdateValues)
+			{
+				/* Failed to allocate memory */
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto fail_alloc_update_values_mem;
+			}
+			OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientUpdateCount+1));
+			/* Copy the update values into the new memory, then append our timeline update value */
+			OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32ClientUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32ClientUpdateCount);
+			/* Now set the additional update value */
+			pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32ClientUpdateCount;
+			*pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+			ui32ClientUpdateCount++;
+			/* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */
+			paui32ClientUpdateValue = pui32IntAllocatedUpdateValues;
+#if defined(KICKSYNC_CHECKPOINT_DEBUG)
+			{
+				IMG_UINT32 iii;
+				IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+				for (iii=0; iii<ui32ClientUpdateCount; iii++)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR,
+							   "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x",
+							   __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+					pui32Tmp++;
+				}
+			}
+#endif
+			/* Now append the timeline sync prim addr to the kicksync context update list */
+			SyncAddrListAppendSyncPrim(&psKickSyncContext->sSyncAddrListUpdate,
+			                           psFenceTimelineUpdateSync);
+		}
+	}
+
+	if (ui32FenceSyncCheckpointCount > 0)
+	{
+		/* Append the checks (from input fence) */
+		CHKPT_DBG((PVR_DBG_ERROR,
+				   "%s:   Append %d sync checkpoints to KickSync Fence "
+				   "(&psKickSyncContext->sSyncAddrListFence=<%p>)...",
+				   __func__, ui32FenceSyncCheckpointCount,
+				   (void*)&psKickSyncContext->sSyncAddrListFence));
+		SyncAddrListAppendCheckpoints(&psKickSyncContext->sSyncAddrListFence,
+									  ui32FenceSyncCheckpointCount,
+									  apsFenceSyncCheckpoints);
+		if (!pauiClientFenceUFOAddress)
+		{
+			pauiClientFenceUFOAddress = psKickSyncContext->sSyncAddrListFence.pasFWAddrs;
+		}
+		ui32ClientFenceCount += ui32FenceSyncCheckpointCount;
+#if defined(KICKSYNC_CHECKPOINT_DEBUG)
+			{
+				IMG_UINT32 iii;
+				IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiClientFenceUFOAddress;
+
+				for (iii=0; iii<ui32ClientFenceCount; iii++)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR,
+							   "%s: pauiClientFenceUFOAddress[%d](<%p>) = 0x%x",
+							   __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+					pui32Tmp++;
+				}
+			}
+#endif
+	}
+
+	if (psUpdateSyncCheckpoint)
+	{
+		PVRSRV_ERROR eErr;
+
+		/* Append the update (from output fence) */
+		CHKPT_DBG((PVR_DBG_ERROR,
+				   "%s:   Append 1 sync checkpoint to KickSync Update "
+				   "(&psKickSyncContext->sSyncAddrListUpdate=<%p>)...",
+				   __func__, (void*)&psKickSyncContext->sSyncAddrListUpdate));
+		eErr = SyncAddrListAppendCheckpoints(&psKickSyncContext->sSyncAddrListUpdate,
+											 1,
+											 &psUpdateSyncCheckpoint);
+		if (eErr != PVRSRV_OK)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR,
+					   "%s:  ...done. SyncAddrListAppendCheckpoints() returned error (%d)",
+					   __func__, eErr));
+		}
+		else
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s:  ...done.", __func__));
+		}
+		if (!pauiClientUpdateUFOAddress)
+		{
+			pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs;
+		}
+		ui32ClientUpdateCount++;
+#if defined(KICKSYNC_CHECKPOINT_DEBUG)
+		{
+			IMG_UINT32 iii;
+			IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiClientUpdateUFOAddress;
+
+			for (iii=0; iii<ui32ClientUpdateCount; iii++)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR,
+						   "%s: pauiClientUpdateUFOAddress[%d](<%p>) = 0x%x",
+						   __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+				pui32Tmp++;
+			}
+		}
+#endif
+	}
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if (ENABLE_KICKSYNC_UFO_DUMP == 1)
+		PVR_DPF((PVR_DBG_ERROR, "%s: dumping KICKSYNC fence/updates syncs...",
+				 __func__));
+		{
+			IMG_UINT32 ii;
+			PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiClientFenceUFOAddress;
+			IMG_UINT32 *pui32TmpIntFenceValue = paui32ClientFenceValue;
+			PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiClientUpdateUFOAddress;
+			IMG_UINT32 *pui32TmpIntUpdateValue = paui32ClientUpdateValue;
+
+			/* Dump Fence syncs and Update syncs */
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Prepared %d KickSync fence syncs "
+					 "(&psKickSyncContext->sSyncAddrListFence=<%p>, "
+					 "pauiClientFenceUFOAddress=<%p>):",
+					 __func__, ui32ClientFenceCount,
+					 (void*)&psKickSyncContext->sSyncAddrListFence,
+					 (void*)pauiClientFenceUFOAddress));
+			for (ii=0; ii<ui32ClientFenceCount; ii++)
+			{
+				if (psTmpIntFenceUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							 "%s:   %d/%d<%p>. FWAddr=0x%x, "
+							 "CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+							 __func__, ii + 1, ui32ClientFenceCount,
+							 (void*)psTmpIntFenceUFOAddress,
+							 psTmpIntFenceUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							 "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)",
+							 __func__, ii + 1, ui32ClientFenceCount,
+							 (void*)psTmpIntFenceUFOAddress,
+							 psTmpIntFenceUFOAddress->ui32Addr,
+							 *pui32TmpIntFenceValue,
+							 *pui32TmpIntFenceValue));
+					pui32TmpIntFenceValue++;
+				}
+				psTmpIntFenceUFOAddress++;
+			}
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Prepared %d KickSync update syncs "
+					 "(&psKickSyncContext->sSyncAddrListUpdate=<%p>, "
+					 "pauiClientUpdateUFOAddress=<%p>):",
+					 __func__, ui32ClientUpdateCount,
+					 (void*)&psKickSyncContext->sSyncAddrListUpdate,
+					 (void*)pauiClientUpdateUFOAddress));
+			for (ii=0; ii<ui32ClientUpdateCount; ii++)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR,
+						   "%s:  Line %d, psTmpIntUpdateUFOAddress=<%p>",
+						   __func__, __LINE__,
+						   (void*)psTmpIntUpdateUFOAddress));
+				CHKPT_DBG((PVR_DBG_ERROR,
+						   "%s:  Line %d, pui32TmpIntUpdateValue=<%p>",
+						   __func__, __LINE__,
+						   (void*)pui32TmpIntUpdateValue));
+				if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							 "%s:   %d/%d<%p>. FWAddr=0x%x, "
+							 "UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+							 __func__, ii + 1, ui32ClientUpdateCount,
+							 (void*)psTmpIntUpdateUFOAddress,
+							 psTmpIntUpdateUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							 "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d",
+							 __func__, ii + 1, ui32ClientUpdateCount,
+							 (void*)psTmpIntUpdateUFOAddress,
+							 psTmpIntUpdateUFOAddress->ui32Addr,
+							 *pui32TmpIntUpdateValue));
+					pui32TmpIntUpdateValue++;
+				}
+				psTmpIntUpdateUFOAddress++;
+			}
+		}
+#endif
+
+	eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psKickSyncContext->psServerCommonContext),
+	                                ui32ClientFenceCount,
+	                                pauiClientFenceUFOAddress,
+	                                paui32ClientFenceValue,
+	                                ui32ClientUpdateCount,
+	                                pauiClientUpdateUFOAddress,
+	                                paui32ClientUpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	                                ui32ServerSyncPrims,
+	                                paui32ServerSyncFlags,
+	                                SYNC_FLAG_MASK_ALL,
+	                                pasServerSyncs,
+#endif
+	                                0,
+	                                NULL,
+	                                RGXFWIF_CCB_CMD_TYPE_NULL,
+	                                ui32ExtJobRef,
+	                                ui32IntJobRef,
+	                                PDUMP_FLAGS_NONE,
+	                                NULL,
+	                                "KickSync",
+	                                bCCBStateOpen,
+	                                asCmdHelperData);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_cmdinit;
+	}
+
+	eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_cmdaquire;
+	}
+
+	/*
+	 *  We should reserve space in the kernel CCB here and fill in the command
+	 *  directly.
+	 *  This is so if there isn't space in the kernel CCB we can return with
+	 *  retry back to services client before we take any operations
+	 */
+
+	/*
+	 * We might only be kicking for flush out a padding packet so only submit
+	 * the command if the create was successful
+	 */
+	if (eError == PVRSRV_OK)
+	{
+		/*
+		 * All the required resources are ready at this point, we can't fail so
+		 * take the required server sync operations and commit all the resources
+		 */
+		RGXCmdHelperReleaseCmdCCB(1,
+		                          asCmdHelperData,
+		                          "KickSync",
+		                          FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr);
+	}
+
+	/* Construct the kernel kicksync CCB command. */
+	sKickSyncKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+	sKickSyncKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext);
+	sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psKickSyncContext->psServerCommonContext));
+	sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+	sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+
+	/*
+	 * Submit the kicksync command to the firmware.
+	 */
+	RGXSRV_HWPERF_ENQ(psKickSyncContext,
+	                  OSGetCurrentClientProcessIDKM(),
+	                  ui32FWCtx,
+	                  ui32ExtJobRef,
+	                  ui32IntJobRef,
+	                  RGX_HWPERF_KICK_TYPE_SYNC,
+	                  iCheckFence,
+	                  iUpdateFence,
+	                  iUpdateTimeline,
+	                  uiCheckFenceUID,
+	                  uiUpdateFenceUID,
+	                  NO_DEADLINE,
+	                  NO_CYCEST);
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError2 = RGXScheduleCommand(psKickSyncContext->psDeviceNode->pvDevice,
+		                             RGXFWIF_DM_GP,
+		                             & sKickSyncKCCBCmd,
+		                             ui32ClientCacheOpSeqNum,
+		                             PDUMP_FLAGS_NONE);
+		if (eError2 != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	PVRGpuTraceEnqueueEvent(psKickSyncContext->psDeviceNode->pvDevice,
+	                        ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
+	                        RGX_HWPERF_KICK_TYPE_SYNC);
+
+	if (eError2 != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "PVRSRVRGXKickSync failed to schedule kernel CCB command. (0x%x)",
+		         eError));
+	}
+
+	/*
+	 * Now check eError (which may have returned an error from our earlier call
+	 * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+	 * so we check it now...
+	 */
+	if (eError != PVRSRV_OK )
+	{
+		goto fail_cmdaquire;
+	}
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+#if defined(NO_HARDWARE)
+	/* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+	if (psUpdateSyncCheckpoint)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR,
+				   "%s:   Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x",
+				   __func__, (void*)psUpdateSyncCheckpoint,
+				   SyncCheckpointGetId(psUpdateSyncCheckpoint),
+				   SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint)));
+		SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+	}
+	if (psFenceTimelineUpdateSync)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR,
+				   "%s:   Updating NOHW sync prim<%p> to %d",
+				   __func__, (void*)psFenceTimelineUpdateSync,
+				   ui32FenceTimelineUpdateValue));
+		SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+	}
+	SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+	/* Free memory allocated to hold the internal list of update values */
+	if (pui32IntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui32IntAllocatedUpdateValues);
+		pui32IntAllocatedUpdateValues = NULL;
+	}
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+	*piUpdateFence = iUpdateFence;
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE))
+	{
+		SyncCheckpointFinaliseFence(psKickSyncContext->psDeviceNode, iUpdateFence,
+									pvUpdateFenceFinaliseData,
+									psUpdateSyncCheckpoint, szUpdateFenceName);
+	}
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psKickSyncContext->hLock);
+#endif
+	return PVRSRV_OK;
+
+fail_cmdaquire:
+fail_cmdinit:
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	SyncAddrListRollbackCheckpoints(psKickSyncContext->psDeviceNode, &psKickSyncContext->sSyncAddrListFence);
+	SyncAddrListRollbackCheckpoints(psKickSyncContext->psDeviceNode, &psKickSyncContext->sSyncAddrListUpdate);
+	if (iUpdateFence != PVRSRV_NO_FENCE)
+	{
+		SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+	}
+
+	/* Free memory allocated to hold update values */
+	if (pui32IntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui32IntAllocatedUpdateValues);
+	}
+fail_alloc_update_values_mem:
+fail_create_output_fence:
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+	/* Free memory allocated to hold the resolved fence's checkpoints */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+fail_resolve_fence:
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+fail_syncaddrlist:
+out_unlock:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psKickSyncContext->hLock);
+#endif
+	return eError;
+}
+
+
+/**************************************************************************//**
+ End of file (rgxkicksync.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxkicksync.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxkicksync.h
new file mode 100644
index 0000000..d318788
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxkicksync.h
@@ -0,0 +1,127 @@
+/*************************************************************************/ /*!
+@File           rgxkicksync.h
+@Title          Server side of the sync only kick API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if ! defined (__RGXKICKSYNC_H__)
+#define __RGXKICKSYNC_H__
+
+#include "pvrsrv_error.h"
+#include "connection_server.h"
+#include "sync_server.h"
+#include "rgxdevice.h"
+
+
+typedef struct _RGX_SERVER_KICKSYNC_CONTEXT_ RGX_SERVER_KICKSYNC_CONTEXT;
+
+/**************************************************************************/ /*!
+@Function       DumpKickSyncCtxtsInfo
+@Description    Function that dumps debug info of kick sync ctxs on this device
+@Return         none
+ */ /**************************************************************************/
+void DumpKickSyncCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                           DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                           void *pvDumpDebugFile,
+                           IMG_UINT32 ui32VerbLevel);
+
+/**************************************************************************/ /*!
+@Function       CheckForStalledClientKickSyncCtxt
+@Description    Function that checks if a kick sync client is stalled
+@Return         RGX_KICK_TYPE_DM_GP on stalled context. Otherwise, 0
+ */ /**************************************************************************/
+IMG_UINT32 CheckForStalledClientKickSyncCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXCreateKickSyncContextKM
+@Description    Server-side implementation of RGXCreateKicksyncContext
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+ */ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXCreateKickSyncContextKM(CONNECTION_DATA             * psConnection,
+                                              PVRSRV_DEVICE_NODE          * psDeviceNode,
+                                              IMG_HANDLE					hMemCtxPrivData,
+											  IMG_UINT32					ui32PackedCCBSizeU88,
+                                              RGX_SERVER_KICKSYNC_CONTEXT ** ppsKicksyncContext);
+
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXDestroyKickSyncContextKM
+@Description    Server-side implementation of RGXDestroyKicksyncContext
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+ */ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psKicksyncContext);
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXKickSyncKM
+@Description    Kicks a sync only command
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+ */ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKicksyncContext,
+
+                                 IMG_UINT32                    ui32ClientCacheOpSeqNum,
+
+                                 IMG_UINT32                    ui32ClientFenceCount,
+                                 SYNC_PRIMITIVE_BLOCK           ** pauiClientFenceUFOSyncPrimBlock,
+                                 IMG_UINT32                  * paui32ClientFenceSyncOffset,
+                                 IMG_UINT32                  * paui32ClientFenceValue,
+
+                                 IMG_UINT32                    ui32ClientUpdateCount,
+                                 SYNC_PRIMITIVE_BLOCK           ** pauiClientUpdateUFOSyncPrimBlock,
+                                 IMG_UINT32                  * paui32ClientUpdateSyncOffset,
+                                 IMG_UINT32                  * paui32ClientUpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+                                 IMG_UINT32                    ui32ServerSyncPrims,
+                                 IMG_UINT32                  * paui32ServerSyncFlags,
+                                 SERVER_SYNC_PRIMITIVE      ** pasServerSyncs,
+#endif
+                                 PVRSRV_FENCE                  iCheckFence,
+                                 PVRSRV_TIMELINE               iUpdateTimeline,
+                                 PVRSRV_FENCE                * piUpdateFence,
+                                 IMG_CHAR                      szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+
+                                 IMG_UINT32                    ui32ExtJobRef);
+
+#endif /* __RGXKICKSYNC_H__ */
+
+/**************************************************************************//**
+ End of file (rgxkicksync.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxlayer.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxlayer.h
new file mode 100644
index 0000000..1be2a01
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxlayer.h
@@ -0,0 +1,739 @@
+/*************************************************************************/ /*!
+@File
+@Title          Header for Services abstraction layer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declaration of an interface layer used to abstract code that
+                can be compiled outside of the DDK, potentially in a
+                completely different OS.
+                All the headers included by this file must also be copied to
+                the alternative source tree.
+                All the functions declared here must have a DDK implementation
+                inside the DDK source tree (e.g. rgxlayer_impl.h/.c) and
+                another different implementation in case they are used outside
+                of the DDK.
+                All of the functions accept as a first parameter a
+                "const void *hPrivate" argument. It should be used to pass
+                around any implementation specific data required.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGXLAYER_H__)
+#define __RGXLAYER_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h" /* includes pvrsrv_errors.h */
+#include "rgx_bvnc_defs_km.h"
+#include "rgx_fw_info.h"
+#include "rgx_fwif_shared.h" /* includes rgx_common.h and mem_types.h */
+#include "rgx_meta.h"
+#include "rgx_mips.h"
+
+#include "rgxdefs_km.h"
+/* includes:
+ * rgx_cr_defs_km.h,
+ * RGX_BVNC_CORE_KM_HEADER (rgxcore_km_B.V.N.C.h),
+ * RGX_BNC_CONFIG_KM_HEADER (rgxconfig_km_B.V.N.C.h)
+ */
+
+
+/*!
+*******************************************************************************
+
+ @Function       RGXMemCopy
+
+ @Description    MemCopy implementation
+
+ @Input          hPrivate   : Implementation specific data
+ @Input          pvDst      : Pointer to the destination
+ @Input          pvSrc      : Pointer to the source location
+ @Input          uiSize     : The amount of memory to copy in bytes
+
+ @Return         void
+
+******************************************************************************/
+void RGXMemCopy(const void *hPrivate,
+                void *pvDst,
+                void *pvSrc,
+                size_t uiSize);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXMemSet
+
+ @Description    MemSet implementation
+
+ @Input          hPrivate   : Implementation specific data
+ @Input          pvDst      : Pointer to the start of the memory region
+ @Input          ui8Value   : The value to be written
+ @Input          uiSize     : The number of bytes to be set to ui8Value
+
+ @Return         void
+
+******************************************************************************/
+void RGXMemSet(const void *hPrivate,
+               void *pvDst,
+               IMG_UINT8 ui8Value,
+               size_t uiSize);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXCommentLog
+
+ @Description    Generic log function used for debugging or other purposes
+
+ @Input          hPrivate   : Implementation specific data
+ @Input          pszString  : Message to be printed
+ @Input          ...        : Variadic arguments
+
+ @Return         void
+
+******************************************************************************/
+__printf(2, 3)
+void RGXCommentLog(const void *hPrivate,
+                   const IMG_CHAR *pszString,
+                   ...);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXErrorLog
+
+ @Description    Generic error log function used for debugging or other purposes
+
+ @Input          hPrivate   : Implementation specific data
+ @Input          pszString  : Message to be printed
+ @Input          ...        : Variadic arguments
+
+ @Return         void
+
+******************************************************************************/
+__printf(2, 3)
+void RGXErrorLog(const void *hPrivate,
+                 const IMG_CHAR *pszString,
+                 ...);
+
+/* This is used to get the value of a specific feature from hprivate.
+ * Should be used instead of calling RGXDeviceHasFeature.  */
+#define RGX_DEVICE_HAS_FEATURE(hPrivate, Feature) \
+			RGXDeviceHasFeature(hPrivate, RGX_FEATURE_##Feature##_BIT_MASK)
+
+/*!
+*******************************************************************************
+
+ @Function       RGXDeviceHasFeature
+
+ @Description    Checks if a device has a particular feature
+
+ @Input          hPrivate     : Implementation specific data
+ @Input          ui64Feature  : Feature to check
+
+ @Return         IMG_TRUE if the given feature is available, IMG_FALSE otherwise
+
+******************************************************************************/
+IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXGetFWCorememSize
+
+ @Description    Get the FW coremem size
+
+ @Input          hPrivate   : Implementation specific data
+
+ @Return         FW coremem size
+
+******************************************************************************/
+IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function      RGXWriteReg32/64
+
+ @Description   Write a value to a 32/64 bit RGX register
+
+ @Input         hPrivate         : Implementation specific data
+ @Input         ui32RegAddr      : Register offset inside the register bank
+ @Input         ui32/64RegValue  : New register value
+
+ @Return        void
+
+******************************************************************************/
+void RGXWriteReg32(const void *hPrivate,
+                   IMG_UINT32 ui32RegAddr,
+                   IMG_UINT32 ui32RegValue);
+
+void RGXWriteReg64(const void *hPrivate,
+                   IMG_UINT32 ui32RegAddr,
+                   IMG_UINT64 ui64RegValue);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXReadReg32/64
+
+ @Description    Read a 32/64 bit RGX register
+
+ @Input          hPrivate     : Implementation specific data
+ @Input          ui32RegAddr  : Register offset inside the register bank
+
+ @Return         Register value
+
+******************************************************************************/
+IMG_UINT32 RGXReadReg32(const void *hPrivate,
+                        IMG_UINT32 ui32RegAddr);
+
+IMG_UINT64 RGXReadReg64(const void *hPrivate,
+                        IMG_UINT32 ui32RegAddr);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXReadModifyWriteReg32
+
+ @Description    Read-modify-write a 32 bit RGX register
+
+ @Input          hPrivate     : Implementation specific data.
+ @Input          ui32RegAddr  : Register offset inside the register bank.
+ @Input          ui32RegValue : New register value.
+ @Input          ui32RegMask  : Keep the bits set in the mask.
+
+ @Return         Always returns PVRSRV_OK
+
+******************************************************************************/
+IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate,
+                                   IMG_UINT32 ui32RegAddr,
+                                   IMG_UINT64 ui64RegValue,
+                                   IMG_UINT64 ui64RegKeepMask);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXPollReg32/64
+
+ @Description    Poll on a 32/64 bit RGX register until some bits are set/unset
+
+ @Input          hPrivate         : Implementation specific data
+ @Input          ui32RegAddr      : Register offset inside the register bank
+ @Input          ui32/64RegValue  : Value expected from the register
+ @Input          ui32/64RegMask   : Only the bits set in this mask will be
+                                    checked against uiRegValue
+
+ @Return         PVRSRV_OK if the poll succeeds,
+                 PVRSRV_ERROR_TIMEOUT if the poll takes too long
+
+******************************************************************************/
+PVRSRV_ERROR RGXPollReg32(const void *hPrivate,
+                          IMG_UINT32 ui32RegAddr,
+                          IMG_UINT32 ui32RegValue,
+                          IMG_UINT32 ui32RegMask);
+
+PVRSRV_ERROR RGXPollReg64(const void *hPrivate,
+                          IMG_UINT32 ui32RegAddr,
+                          IMG_UINT64 ui64RegValue,
+                          IMG_UINT64 ui64RegMask);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXWaitCycles
+
+ @Description    Wait for a number of GPU cycles and/or microseconds
+
+ @Input          hPrivate    : Implementation specific data
+ @Input          ui32Cycles  : Number of GPU cycles to wait for in pdumps,
+                               it can also be used when running driver-live
+                               if desired (ignoring the next parameter)
+ @Input          ui32WaitUs  : Number of microseconds to wait for when running
+                               driver-live
+
+ @Return         void
+
+******************************************************************************/
+void RGXWaitCycles(const void *hPrivate,
+                   IMG_UINT32 ui32Cycles,
+                   IMG_UINT32 ui32WaitUs);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireKernelMMUPC
+
+ @Description     Acquire the Kernel MMU Page Catalogue device physical address
+
+ @Input           hPrivate  : Implementation specific data
+ @Input           psPCAddr  : Returned page catalog address
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXWriteKernelMMUPC32/64
+
+ @Description     Write the Kernel MMU Page Catalogue to the 32/64 bit
+                  RGX register passed as argument.
+                  In a driver-live scenario without PDump these functions
+                  are the same as RGXWriteReg32/64 and they don't need
+                  to be reimplemented.
+
+ @Input           hPrivate        : Implementation specific data
+ @Input           ui32PCReg       : Register offset inside the register bank
+ @Input           ui32AlignShift  : PC register alignshift
+ @Input           ui32Shift       : PC register shift
+ @Input           ui32/64PCVal    : Page catalog value (aligned and shifted)
+
+ @Return          void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXWriteKernelMMUPC64(const void *hPrivate,
+                           IMG_UINT32 ui32PCReg,
+                           IMG_UINT32 ui32PCRegAlignShift,
+                           IMG_UINT32 ui32PCRegShift,
+                           IMG_UINT64 ui64PCVal);
+
+void RGXWriteKernelMMUPC32(const void *hPrivate,
+                           IMG_UINT32 ui32PCReg,
+                           IMG_UINT32 ui32PCRegAlignShift,
+                           IMG_UINT32 ui32PCRegShift,
+                           IMG_UINT32 ui32PCVal);
+#else  /* defined(PDUMP) */
+
+#define RGXWriteKernelMMUPC64(priv, pcreg, alignshift, shift, pcval) \
+	RGXWriteReg64(priv, pcreg, pcval)
+
+#define RGXWriteKernelMMUPC32(priv, pcreg, alignshift, shift, pcval) \
+	RGXWriteReg32(priv, pcreg, pcval)
+
+#endif /* defined(PDUMP) */
+
+
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireGPURegsAddr
+
+ @Description     Acquire the GPU registers base device physical address
+
+ @Input           hPrivate       : Implementation specific data
+ @Input           psGPURegsAddr  : Returned GPU registers base address
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireGPURegsAddr(const void *hPrivate, IMG_DEV_PHYADDR *psGPURegsAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXMIPSWrapperConfig
+
+ @Description     Write GPU register bank transaction ID and MIPS boot mode
+                  to the MIPS wrapper config register (passed as argument).
+                  In a driver-live scenario without PDump this is the same as
+                  RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input           hPrivate          : Implementation specific data
+ @Input           ui32RegAddr       : Register offset inside the register bank
+ @Input           ui64GPURegsAddr   : GPU registers base address
+ @Input           ui32GPURegsAlign  : Register bank transactions alignment
+ @Input           ui32BootMode      : Mips BOOT ISA mode
+
+ @Return          void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXMIPSWrapperConfig(const void *hPrivate,
+                          IMG_UINT32 ui32RegAddr,
+                          IMG_UINT64 ui64GPURegsAddr,
+                          IMG_UINT32 ui32GPURegsAlign,
+                          IMG_UINT32 ui32BootMode);
+#else
+#define RGXMIPSWrapperConfig(priv, regaddr, gpuregsaddr, gpuregsalign, bootmode) \
+	RGXWriteReg64(priv, regaddr, ((gpuregsaddr) >> (gpuregsalign)) | (bootmode))
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireBootRemapAddr
+
+ @Description     Acquire the device physical address of the MIPS bootloader
+                  accessed through remap region
+
+ @Input           hPrivate         : Implementation specific data
+ @Output          psBootRemapAddr  : Base address of the remapped bootloader
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireBootRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psBootRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXBootRemapConfig
+
+ @Description     Configure the bootloader remap registers passed as arguments.
+                  In a driver-live scenario without PDump this is the same as
+                  two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input           hPrivate             : Implementation specific data
+ @Input           ui32Config1RegAddr   : Remap config1 register offset
+ @Input           ui64Config1RegValue  : Remap config1 register value
+ @Input           ui32Config2RegAddr   : Remap config2 register offset
+ @Input           ui64Config2PhyAddr   : Output remapped aligned physical address
+ @Input           ui64Config2PhyMask   : Mask for the output physical address
+ @Input           ui64Config2Settings  : Extra settings for this remap region
+
+ @Return          void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXBootRemapConfig(const void *hPrivate,
+                        IMG_UINT32 ui32Config1RegAddr,
+                        IMG_UINT64 ui64Config1RegValue,
+                        IMG_UINT32 ui32Config2RegAddr,
+                        IMG_UINT64 ui64Config2PhyAddr,
+                        IMG_UINT64 ui64Config2PhyMask,
+                        IMG_UINT64 ui64Config2Settings);
+#else
+#define RGXBootRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+		RGXWriteReg64(priv, c1reg, (c1val)); \
+		RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+	} while (0)
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireCodeRemapAddr
+
+ @Description     Acquire the device physical address of the MIPS code
+                  accessed through remap region
+
+ @Input           hPrivate         : Implementation specific data
+ @Output          psCodeRemapAddr  : Base address of the remapped code
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireCodeRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psCodeRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXCodeRemapConfig
+
+ @Description     Configure the code remap registers passed as arguments.
+                  In a driver-live scenario without PDump this is the same as
+                  two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input           hPrivate             : Implementation specific data
+ @Input           ui32Config1RegAddr   : Remap config1 register offset
+ @Input           ui64Config1RegValue  : Remap config1 register value
+ @Input           ui32Config2RegAddr   : Remap config2 register offset
+ @Input           ui64Config2PhyAddr   : Output remapped aligned physical address
+ @Input           ui64Config2PhyMask   : Mask for the output physical address
+ @Input           ui64Config2Settings  : Extra settings for this remap region
+
+ @Return          void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXCodeRemapConfig(const void *hPrivate,
+                        IMG_UINT32 ui32Config1RegAddr,
+                        IMG_UINT64 ui64Config1RegValue,
+                        IMG_UINT32 ui32Config2RegAddr,
+                        IMG_UINT64 ui64Config2PhyAddr,
+                        IMG_UINT64 ui64Config2PhyMask,
+                        IMG_UINT64 ui64Config2Settings);
+#else
+#define RGXCodeRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+		RGXWriteReg64(priv, c1reg, (c1val)); \
+		RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+	} while (0)
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireDataRemapAddr
+
+ @Description     Acquire the device physical address of the MIPS data
+                  accessed through remap region
+
+ @Input           hPrivate         : Implementation specific data
+ @Output          psDataRemapAddr  : Base address of the remapped data
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireDataRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psDataRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXDataRemapConfig
+
+ @Description     Configure the data remap registers passed as arguments.
+                  In a driver-live scenario without PDump this is the same as
+                  two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input           hPrivate             : Implementation specific data
+ @Input           ui32Config1RegAddr   : Remap config1 register offset
+ @Input           ui64Config1RegValue  : Remap config1 register value
+ @Input           ui32Config2RegAddr   : Remap config2 register offset
+ @Input           ui64Config2PhyAddr   : Output remapped aligned physical address
+ @Input           ui64Config2PhyMask   : Mask for the output physical address
+ @Input           ui64Config2Settings  : Extra settings for this remap region
+
+ @Return          void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXDataRemapConfig(const void *hPrivate,
+                        IMG_UINT32 ui32Config1RegAddr,
+                        IMG_UINT64 ui64Config1RegValue,
+                        IMG_UINT32 ui32Config2RegAddr,
+                        IMG_UINT64 ui64Config2PhyAddr,
+                        IMG_UINT64 ui64Config2PhyMask,
+                        IMG_UINT64 ui64Config2Settings);
+#else
+#define RGXDataRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+		RGXWriteReg64(priv, c1reg, (c1val)); \
+		RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+	} while (0)
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireTrampolineRemapAddr
+
+ @Description     Acquire the device physical address of the MIPS data
+                  accessed through remap region
+
+ @Input           hPrivate             : Implementation specific data
+ @Output          psTrampolineRemapAddr: Base address of the remapped data
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireTrampolineRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psTrampolineRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXTrampolineRemapConfig
+
+ @Description     Configure the trampoline remap registers passed as arguments.
+                  In a driver-live scenario without PDump this is the same as
+                  two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input           hPrivate             : Implementation specific data
+ @Input           ui32Config1RegAddr   : Remap config1 register offset
+ @Input           ui64Config1RegValue  : Remap config1 register value
+ @Input           ui32Config2RegAddr   : Remap config2 register offset
+ @Input           ui64Config2PhyAddr   : Output remapped aligned physical address
+ @Input           ui64Config2PhyMask   : Mask for the output physical address
+ @Input           ui64Config2Settings  : Extra settings for this remap region
+
+ @Return          void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXTrampolineRemapConfig(const void *hPrivate,
+                        IMG_UINT32 ui32Config1RegAddr,
+                        IMG_UINT64 ui64Config1RegValue,
+                        IMG_UINT32 ui32Config2RegAddr,
+                        IMG_UINT64 ui64Config2PhyAddr,
+                        IMG_UINT64 ui64Config2PhyMask,
+                        IMG_UINT64 ui64Config2Settings);
+#else
+#define RGXTrampolineRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+		RGXWriteReg64(priv, c1reg, (c1val)); \
+		RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+	} while (0)
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function        RGXDoFWSlaveBoot
+
+ @Description     Returns whether or not a FW Slave Boot is required
+                  while powering on
+
+ @Input           hPrivate       : Implementation specific data
+
+ @Return          IMG_BOOL
+
+******************************************************************************/
+IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXFabricCoherencyTest
+
+ @Description    Performs a coherency test
+
+ @Input          hPrivate         : Implementation specific data
+
+ @Return         PVRSRV_OK if the test succeeds,
+                 PVRSRV_ERROR_INIT_FAILURE if the test fails at some point
+
+******************************************************************************/
+PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate);
+
+/* This is used to check if a specific ERN/BRN is enabled from hprivate.
+ * Should be used instead of calling RGXDeviceHasErnBrn.  */
+#define RGX_DEVICE_HAS_ERN(hPrivate, ERN) \
+			RGXDeviceHasErnBrn(hPrivate, HW_ERN_##ERN##_BIT_MASK)
+
+#define RGX_DEVICE_HAS_BRN(hPrivate, BRN) \
+			RGXDeviceHasErnBrn(hPrivate, FIX_HW_BRN_##BRN##_BIT_MASK)
+
+/*!
+*******************************************************************************
+
+ @Function       RGXDeviceHasErnBrn
+
+ @Description    Checks if a device has a particular errata
+
+ @Input          hPrivate     : Implementation specific data
+ @Input          ui64ErnsBrns : Flags to check
+
+ @Return         IMG_TRUE if the given errata is available, IMG_FALSE otherwise
+
+******************************************************************************/
+IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXGetDeviceSLCBanks
+
+ @Description    Returns the number of SLC banks used by the device
+
+ @Input          hPrivate    : Implementation specific data
+
+ @Return         Number of SLC banks
+
+******************************************************************************/
+IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXGetDeviceSLCSize
+
+ @Description    Returns the device SLC size
+
+ @Input          hPrivate    : Implementation specific data
+
+ @Return         SLC size
+
+******************************************************************************/
+IMG_UINT32 RGXGetDeviceSLCSize(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXGetDeviceCacheLineSize
+
+ @Description    Returns the device cache line size
+
+ @Input          hPrivate    : Implementation specific data
+
+ @Return         Cache line size
+
+******************************************************************************/
+IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXGetDevicePhysBusWidth
+
+ @Description    Returns the device physical bus width
+
+ @Input          hPrivate    : Implementation specific data
+
+ @Return         Physical bus width
+
+******************************************************************************/
+IMG_UINT32 RGXGetDevicePhysBusWidth(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXDevicePA0IsValid
+
+ @Description    Returns true if the device physical address 0x0 is a valid
+                 address and can be accessed by the GPU.
+
+ @Input          hPrivate    : Implementation specific data
+
+ @Return         IMG_TRUE if device physical address 0x0 is a valid address,
+                 IMG_FALSE otherwise
+
+******************************************************************************/
+IMG_BOOL RGXDevicePA0IsValid(const void *hPrivate);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* !defined (__RGXLAYER_H__) */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxlayer_impl.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxlayer_impl.c
new file mode 100644
index 0000000..9878800
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxlayer_impl.c
@@ -0,0 +1,1220 @@
+/*************************************************************************/ /*!
+@File
+@Title          DDK implementation of the Services abstraction layer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    DDK implementation of the Services abstraction layer
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "rgxlayer_impl.h"
+#include "osfunc.h"
+#include "pdump_km.h"
+#include "rgxfwutils.h"
+#include "rgxfwimageutils.h"
+#include "devicemem.h"
+#include "cache_km.h"
+#include "pmr.h"
+
+#if defined (PDUMP)
+#include <stdarg.h>
+#endif
+
+void RGXMemCopy(const void *hPrivate,
+		void *pvDst,
+		void *pvSrc,
+		size_t uiSize)
+{
+	PVR_UNREFERENCED_PARAMETER(hPrivate);
+	OSDeviceMemCopy(pvDst, pvSrc, uiSize);
+}
+
+void RGXMemSet(const void *hPrivate,
+		void *pvDst,
+		IMG_UINT8 ui8Value,
+		size_t uiSize)
+{
+	PVR_UNREFERENCED_PARAMETER(hPrivate);
+	OSDeviceMemSet(pvDst, ui8Value, uiSize);
+}
+
+void RGXCommentLog(const void *hPrivate,
+		const IMG_CHAR *pszString,
+		...)
+{
+#if defined(PDUMP)
+	va_list argList;
+	va_start(argList, pszString);
+	PDumpCommentWithFlagsVA(PDUMP_FLAGS_CONTINUOUS, pszString, argList);
+	va_end(argList);
+	PVR_UNREFERENCED_PARAMETER(hPrivate);
+#else
+	PVR_UNREFERENCED_PARAMETER(hPrivate);
+	PVR_UNREFERENCED_PARAMETER(pszString);
+#endif
+}
+
+void RGXErrorLog(const void *hPrivate,
+		const IMG_CHAR *pszString,
+		...)
+{
+	IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+	va_list argList;
+
+	PVR_UNREFERENCED_PARAMETER(hPrivate);
+
+	va_start(argList, pszString);
+	vsnprintf(szBuffer, sizeof(szBuffer), pszString, argList);
+	va_end(argList);
+
+	PVR_DPF((PVR_DBG_ERROR, "%s", szBuffer));
+}
+
+IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	IMG_UINT32 ui32CorememSize = 0;
+
+	PVR_ASSERT(hPrivate != NULL);
+
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+
+	if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE))
+	{
+		ui32CorememSize = RGX_GET_FEATURE_VALUE(psDevInfo, META_COREMEM_SIZE);
+	}
+
+	return ui32CorememSize;
+}
+
+void RGXWriteReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	void __iomem *pvRegsBase;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+	pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+	if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+	{
+		OSWriteHWReg32(pvRegsBase, ui32RegAddr, ui32RegValue);
+	}
+
+	PDUMPREG32(RGX_PDUMPREG_NAME, ui32RegAddr, ui32RegValue, psParams->ui32PdumpFlags);
+}
+
+void RGXWriteReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT64 ui64RegValue)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	void __iomem *pvRegsBase;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+	pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+	if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+	{
+		OSWriteHWReg64(pvRegsBase, ui32RegAddr, ui64RegValue);
+	}
+
+	PDUMPREG64(RGX_PDUMPREG_NAME, ui32RegAddr, ui64RegValue, psParams->ui32PdumpFlags);
+}
+
+IMG_UINT32 RGXReadReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	void __iomem *pvRegsBase;
+	IMG_UINT32 ui32RegValue;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+	pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+	if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)
+	{
+		ui32RegValue = IMG_UINT32_MAX;
+	}
+	else
+#endif
+	{
+		ui32RegValue = OSReadHWReg32(pvRegsBase, ui32RegAddr);
+	}
+
+	PDUMPREGREAD32(RGX_PDUMPREG_NAME, ui32RegAddr, psParams->ui32PdumpFlags);
+
+	return ui32RegValue;
+}
+
+IMG_UINT64 RGXReadReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	void __iomem *pvRegsBase;
+	IMG_UINT64 ui64RegValue;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+	pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+	if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)
+	{
+		ui64RegValue = IMG_UINT64_MAX;
+	}
+	else
+#endif
+	{
+		ui64RegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr);
+	}
+
+	PDUMPREGREAD64(RGX_PDUMPREG_NAME, ui32RegAddr, PDUMP_FLAGS_CONTINUOUS);
+
+	return ui64RegValue;
+}
+
+IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate,
+		IMG_UINT32 ui32RegAddr,
+		IMG_UINT64 uiRegValueNew,
+		IMG_UINT64 uiRegKeepMask)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	void __iomem *pvRegsBase;
+#if defined(PDUMP)
+	PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+#endif
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+	pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+	/* only use the new values for bits we update according to the keep mask */
+	uiRegValueNew &= ~uiRegKeepMask;
+
+#if defined(PDUMP)
+
+	PDUMP_BLKSTART(ui32PDumpFlags);
+
+	/* Store register offset to temp PDump variable */
+	PDumpRegRead64ToInternalVar(RGX_PDUMPREG_NAME, ":SYSMEM:$1", ui32RegAddr, ui32PDumpFlags);
+
+	/* Keep the bits set in the mask */
+	PDumpWriteVarANDValueOp(":SYSMEM:$1", uiRegKeepMask, ui32PDumpFlags);
+
+	/* OR the new values */
+	PDumpWriteVarORValueOp(":SYSMEM:$1", uiRegValueNew, ui32PDumpFlags);
+
+	/* Do the actual register write */
+	PDumpInternalVarToReg64(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags);
+
+	PDUMP_BLKEND(ui32PDumpFlags);
+
+	if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+
+	{
+		IMG_UINT64 uiRegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr);
+		uiRegValue &= uiRegKeepMask;
+		OSWriteHWReg64(pvRegsBase, ui32RegAddr, uiRegValue | uiRegValueNew);
+	}
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXPollReg32(const void *hPrivate,
+		IMG_UINT32 ui32RegAddr,
+		IMG_UINT32 ui32RegValue,
+		IMG_UINT32 ui32RegMask)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	void __iomem *pvRegsBase;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+	pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+	if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+	{
+		if (PVRSRVPollForValueKM((IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr),
+				ui32RegValue,
+				ui32RegMask) != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXPollReg32: Poll for Reg (0x%x) failed", ui32RegAddr));
+			return PVRSRV_ERROR_TIMEOUT;
+		}
+	}
+
+	PDUMPREGPOL(RGX_PDUMPREG_NAME,
+			ui32RegAddr,
+			ui32RegValue,
+			ui32RegMask,
+			psParams->ui32PdumpFlags,
+			PDUMP_POLL_OPERATOR_EQUAL);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXPollReg64(const void *hPrivate,
+		IMG_UINT32 ui32RegAddr,
+		IMG_UINT64 ui64RegValue,
+		IMG_UINT64 ui64RegMask)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	void __iomem *pvRegsBase;
+
+	/* Split lower and upper words */
+	IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64RegValue >> 32);
+	IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64RegValue);
+	IMG_UINT32 ui32UpperMask = (IMG_UINT32) (ui64RegMask >> 32);
+	IMG_UINT32 ui32LowerMask = (IMG_UINT32) (ui64RegMask);
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+	pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+	if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+	{
+		if (PVRSRVPollForValueKM((IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr + 4),
+				ui32UpperValue,
+				ui32UpperMask) != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr));
+			return PVRSRV_ERROR_TIMEOUT;
+		}
+
+		if (PVRSRVPollForValueKM((IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr),
+				ui32LowerValue,
+				ui32LowerMask) != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr));
+			return PVRSRV_ERROR_TIMEOUT;
+		}
+	}
+
+	PDUMPREGPOL(RGX_PDUMPREG_NAME,
+			ui32RegAddr + 4,
+			ui32UpperValue,
+			ui32UpperMask,
+			psParams->ui32PdumpFlags,
+			PDUMP_POLL_OPERATOR_EQUAL);
+
+
+	PDUMPREGPOL(RGX_PDUMPREG_NAME,
+			ui32RegAddr,
+			ui32LowerValue,
+			ui32LowerMask,
+			psParams->ui32PdumpFlags,
+			PDUMP_POLL_OPERATOR_EQUAL);
+
+	return PVRSRV_OK;
+}
+
+void RGXWaitCycles(const void *hPrivate, IMG_UINT32 ui32Cycles, IMG_UINT32 ui32TimeUs)
+{
+	PVR_UNREFERENCED_PARAMETER(hPrivate);
+	OSWaitus(ui32TimeUs);
+	PDUMPIDLWITHFLAGS(ui32Cycles, PDUMP_FLAGS_CONTINUOUS);
+}
+
+void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr)
+{
+	PVR_ASSERT(hPrivate != NULL);
+	*psPCAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sPCAddr;
+}
+
+#if defined(PDUMP)
+void RGXWriteKernelMMUPC64(const void *hPrivate,
+		IMG_UINT32 ui32PCReg,
+		IMG_UINT32 ui32PCRegAlignShift,
+		IMG_UINT32 ui32PCRegShift,
+		IMG_UINT64 ui64PCVal)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+	/* Write the cat-base address */
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, ui32PCReg, ui64PCVal);
+
+	/* Pdump catbase address */
+	MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx,
+			RGX_PDUMPREG_NAME,
+			ui32PCReg,
+			8,
+			ui32PCRegAlignShift,
+			ui32PCRegShift,
+			PDUMP_FLAGS_CONTINUOUS);
+}
+
+void RGXWriteKernelMMUPC32(const void *hPrivate,
+		IMG_UINT32 ui32PCReg,
+		IMG_UINT32 ui32PCRegAlignShift,
+		IMG_UINT32 ui32PCRegShift,
+		IMG_UINT32 ui32PCVal)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+	/* Write the cat-base address */
+	OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32PCReg, ui32PCVal);
+
+	/* Pdump catbase address */
+	MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx,
+			RGX_PDUMPREG_NAME,
+			ui32PCReg,
+			4,
+			ui32PCRegAlignShift,
+			ui32PCRegShift,
+			PDUMP_FLAGS_CONTINUOUS);
+}
+#endif /* defined(PDUMP) */
+
+void RGXAcquireGPURegsAddr(const void *hPrivate, IMG_DEV_PHYADDR *psGPURegsAddr)
+{
+	PVR_ASSERT(hPrivate != NULL);
+	*psGPURegsAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sGPURegAddr;
+}
+
+#if defined(PDUMP)
+void RGXMIPSWrapperConfig(const void *hPrivate,
+		IMG_UINT32 ui32RegAddr,
+		IMG_UINT64 ui64GPURegsAddr,
+		IMG_UINT32 ui32GPURegsAlign,
+		IMG_UINT32 ui32BootMode)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM,
+			ui32RegAddr,
+			(ui64GPURegsAddr >> ui32GPURegsAlign) | ui32BootMode);
+
+	PDUMP_BLKSTART(ui32PDumpFlags);
+
+	/* Store register offset to temp PDump variable */
+	PDumpRegLabelToInternalVar(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags);
+
+	/* Align register transactions identifier */
+	PDumpWriteVarSHRValueOp(":SYSMEM:$1", ui32GPURegsAlign, ui32PDumpFlags);
+
+	/* Enable micromips instruction encoding */
+	PDumpWriteVarORValueOp(":SYSMEM:$1", ui32BootMode, ui32PDumpFlags);
+
+	/* Do the actual register write */
+	PDumpInternalVarToReg64(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags);
+
+	PDUMP_BLKEND(ui32PDumpFlags);
+}
+#endif
+
+void RGXAcquireBootRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psBootRemapAddr)
+{
+	PVR_ASSERT(hPrivate != NULL);
+	*psBootRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sBootRemapAddr;
+}
+
+void RGXAcquireCodeRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psCodeRemapAddr)
+{
+	PVR_ASSERT(hPrivate != NULL);
+	*psCodeRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sCodeRemapAddr;
+}
+
+void RGXAcquireDataRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psDataRemapAddr)
+{
+	PVR_ASSERT(hPrivate != NULL);
+	*psDataRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sDataRemapAddr;
+}
+
+void RGXAcquireTrampolineRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psTrampolineRemapAddr)
+{
+	PVR_ASSERT(hPrivate != NULL);
+	*psTrampolineRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sTrampolineRemapAddr;
+}
+
+#if defined(PDUMP)
+static inline
+void RGXWriteRemapConfig2Reg(void __iomem *pvRegs,
+		PMR *psPMR,
+		IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+		IMG_UINT32 ui32RegAddr,
+		IMG_UINT64 ui64PhyAddr,
+		IMG_UINT64 ui64PhyMask,
+		IMG_UINT64 ui64Settings)
+{
+	PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+
+	OSWriteHWReg64(pvRegs, ui32RegAddr, (ui64PhyAddr & ui64PhyMask) | ui64Settings);
+
+	PDUMP_BLKSTART(ui32PDumpFlags);
+
+	/* Store memory offset to temp PDump variable */
+	PDumpMemLabelToInternalVar64(":SYSMEM:$1", psPMR, uiLogicalOffset, ui32PDumpFlags);
+
+	/* Keep only the relevant bits of the output physical address */
+	PDumpWriteVarANDValueOp(":SYSMEM:$1", ui64PhyMask, ui32PDumpFlags);
+
+	/* Extra settings for this remapped region */
+	PDumpWriteVarORValueOp(":SYSMEM:$1", ui64Settings, ui32PDumpFlags);
+
+	/* Do the actual register write */
+	PDumpInternalVarToReg64(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags);
+
+	PDUMP_BLKEND(ui32PDumpFlags);
+}
+
+void RGXBootRemapConfig(const void *hPrivate,
+		IMG_UINT32 ui32Config1RegAddr,
+		IMG_UINT64 ui64Config1RegValue,
+		IMG_UINT32 ui32Config2RegAddr,
+		IMG_UINT64 ui64Config2PhyAddr,
+		IMG_UINT64 ui64Config2PhyMask,
+		IMG_UINT64 ui64Config2Settings)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	IMG_UINT32 ui32BootRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_CODE);
+
+	PVR_ASSERT(hPrivate != NULL);
+	psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+	/* Write remap config1 register */
+	RGXWriteReg64(hPrivate,
+			ui32Config1RegAddr,
+			ui64Config1RegValue);
+
+	/* Write remap config2 register */
+	RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM,
+			psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR,
+			psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32BootRemapMemOffset,
+			ui32Config2RegAddr,
+			ui64Config2PhyAddr,
+			ui64Config2PhyMask,
+			ui64Config2Settings);
+}
+
+void RGXCodeRemapConfig(const void *hPrivate,
+		IMG_UINT32 ui32Config1RegAddr,
+		IMG_UINT64 ui64Config1RegValue,
+		IMG_UINT32 ui32Config2RegAddr,
+		IMG_UINT64 ui64Config2PhyAddr,
+		IMG_UINT64 ui64Config2PhyMask,
+		IMG_UINT64 ui64Config2Settings)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	IMG_UINT32 ui32CodeRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_EXCEPTIONS_CODE);
+
+	PVR_ASSERT(hPrivate != NULL);
+	psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+	/* Write remap config1 register */
+	RGXWriteReg64(hPrivate,
+			ui32Config1RegAddr,
+			ui64Config1RegValue);
+
+	/* Write remap config2 register */
+	RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM,
+			psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR,
+			psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32CodeRemapMemOffset,
+			ui32Config2RegAddr,
+			ui64Config2PhyAddr,
+			ui64Config2PhyMask,
+			ui64Config2Settings);
+}
+
+void RGXDataRemapConfig(const void *hPrivate,
+		IMG_UINT32 ui32Config1RegAddr,
+		IMG_UINT64 ui64Config1RegValue,
+		IMG_UINT32 ui32Config2RegAddr,
+		IMG_UINT64 ui64Config2PhyAddr,
+		IMG_UINT64 ui64Config2PhyMask,
+		IMG_UINT64 ui64Config2Settings)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	IMG_UINT32 ui32DataRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA);
+
+	PVR_ASSERT(hPrivate != NULL);
+	psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+	/* Write remap config1 register */
+	RGXWriteReg64(hPrivate,
+			ui32Config1RegAddr,
+			ui64Config1RegValue);
+
+	/* Write remap config2 register */
+	RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM,
+			psDevInfo->psRGXFWDataMemDesc->psImport->hPMR,
+			psDevInfo->psRGXFWDataMemDesc->uiOffset + ui32DataRemapMemOffset,
+			ui32Config2RegAddr,
+			ui64Config2PhyAddr,
+			ui64Config2PhyMask,
+			ui64Config2Settings);
+}
+
+void RGXTrampolineRemapConfig(const void *hPrivate,
+		IMG_UINT32 ui32Config1RegAddr,
+		IMG_UINT64 ui64Config1RegValue,
+		IMG_UINT32 ui32Config2RegAddr,
+		IMG_UINT64 ui64Config2PhyAddr,
+		IMG_UINT64 ui64Config2PhyMask,
+		IMG_UINT64 ui64Config2Settings)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+	/* write the register for real, without PDump */
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM,
+			ui32Config1RegAddr,
+			ui64Config1RegValue);
+
+	PDUMP_BLKSTART(ui32PDumpFlags);
+
+	/* Store the memory address in a PDump variable */
+	PDumpPhysHandleToInternalVar64(":SYSMEM:$1",
+			psDevInfo->psTrampoline->hPdumpPages,
+			ui32PDumpFlags);
+
+	/* Keep only the relevant bits of the input physical address */
+	PDumpWriteVarANDValueOp(":SYSMEM:$1",
+			~RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK,
+			ui32PDumpFlags);
+
+	/* Enable bit */
+	PDumpWriteVarORValueOp(":SYSMEM:$1",
+			RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN,
+			ui32PDumpFlags);
+
+	/* Do the PDump register write */
+	PDumpInternalVarToReg64(RGX_PDUMPREG_NAME,
+			ui32Config1RegAddr,
+			":SYSMEM:$1",
+			ui32PDumpFlags);
+
+	PDUMP_BLKEND(ui32PDumpFlags);
+
+	/* this can be written directly */
+	RGXWriteReg64(hPrivate,
+			ui32Config2RegAddr,
+			(ui64Config2PhyAddr & ui64Config2PhyMask) | ui64Config2Settings);
+}
+#endif
+
+#define MAX_NUM_COHERENCY_TESTS  (10)
+IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	PVRSRV_DEVICE_CONFIG *psDevConfig;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+	if (psDevInfo->ui32CoherencyTestsDone >= MAX_NUM_COHERENCY_TESTS)
+	{
+		return IMG_FALSE;
+	}
+
+	psDevConfig = ((RGX_LAYER_PARAMS*)hPrivate)->psDevConfig;
+
+	return PVRSRVSystemSnoopingOfCPUCache(psDevConfig);
+}
+
+static PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* Wait for Slave Port to be Ready */
+	eError = RGXPollReg32(hPrivate,
+			RGX_CR_META_SP_MSLVCTRL1,
+			RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+			RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+	if (eError != PVRSRV_OK) return eError;
+
+	/* Issue a Write */
+	RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr);
+	RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue);
+
+	return eError;
+}
+
+/*
+ * The fabric coherency test is performed when platform supports fabric coherency
+ * either in the form of ACE-lite or Full-ACE. This test is done quite early
+ * with the firmware processor quiescent and makes exclusive use of the slave
+ * port interface for reading/writing through the device memory hierarchy. The
+ * rationale for the test is to ensure that what the CPU writes to its dcache
+ * is visible to the GPU via coherency snoop miss/hit and vice-versa without
+ * any intervening cache maintenance by the writing agent.
+ */
+PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	IMG_UINT32 *pui32FabricCohTestBufferCpuVA;
+	DEVMEM_MEMDESC *psFabricCohTestBufferMemDesc;
+	RGXFWIF_DEV_VIRTADDR sFabricCohTestBufferDevVA;
+	IMG_DEVMEM_SIZE_T uiFabricCohTestBlockSize = sizeof(IMG_UINT64);
+	IMG_DEVMEM_ALIGN_T uiFabricCohTestBlockAlign = sizeof(IMG_UINT64);
+	IMG_UINT64 ui64SegOutAddrTopCached = 0;
+	IMG_UINT64 ui64SegOutAddrTopUncached = 0;
+	IMG_UINT32 ui32SLCCTRL = 0;
+	IMG_UINT32 ui32OddEven;
+	IMG_BOOL   bFeatureS7;
+	IMG_UINT32 ui32TestType;
+	IMG_UINT32 ui32OddEvenSeed = 1;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_BOOL bFullTestPassed = IMG_TRUE;
+	IMG_BOOL bSubTestPassed = IMG_FALSE;
+	IMG_BOOL bExit = IMG_FALSE;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+	PVR_LOG(("Starting fabric coherency test ....."));
+
+	bFeatureS7 = RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE);
+
+	if (bFeatureS7)
+	{
+		if (RGX_DEVICE_HAS_ERN(hPrivate, 45914))
+		{
+			ui64SegOutAddrTopCached   = RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED_ERN_45914(META_MMU_CONTEXT_MAPPING);
+			ui64SegOutAddrTopUncached = RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED_ERN_45914(META_MMU_CONTEXT_MAPPING);
+		}
+
+		/* Configure META to use SLC force-linefill for the bootloader segment */
+		RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6),
+				(ui64SegOutAddrTopUncached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32);
+	}
+	else
+	{
+		/* Bypass the SLC when IO coherency is enabled */
+		ui32SLCCTRL = RGXReadReg32(hPrivate, RGX_CR_SLC_CTRL_BYPASS);
+		RGXWriteReg32(hPrivate,
+				RGX_CR_SLC_CTRL_BYPASS,
+				ui32SLCCTRL | RGX_CR_SLC_CTRL_BYPASS_BYP_CC_EN);
+	}
+
+	/* Size and align are 'expanded' because we request an export align allocation */
+	eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap),
+			&uiFabricCohTestBlockSize,
+			&uiFabricCohTestBlockAlign);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"DevmemExportalignAdjustSizeAndAlign() error: %s, exiting",
+				PVRSRVGetErrorString(eError)));
+		goto e0;
+	}
+
+	/* Allocate, acquire cpu address and set firmware address */
+	eError = DevmemFwAllocateExportable(psDevInfo->psDeviceNode,
+			uiFabricCohTestBlockSize,
+			uiFabricCohTestBlockAlign,
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+			PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT |
+			PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE,
+			"FwExFabricCoherencyTestBuffer",
+			&psFabricCohTestBufferMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"DevmemFwAllocateExportable() error: %s, exiting",
+				PVRSRVGetErrorString(eError)));
+		goto e0;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psFabricCohTestBufferMemDesc, (void **) &pui32FabricCohTestBufferCpuVA);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"DevmemAcquireCpuVirtAddr() error: %s, exiting",
+				PVRSRVGetErrorString(eError)));
+		goto e0;
+	}
+
+	/* Create a FW address which is uncached in the Meta DCache and in the SLC
+	 * using the Meta bootloader segment.
+	 * This segment is the only one configured correctly out of reset
+	 * (when this test is meant to be executed).
+	 */
+	RGXSetFirmwareAddress(&sFabricCohTestBufferDevVA,
+			psFabricCohTestBufferMemDesc,
+			0,
+			RFW_FWADDR_FLAG_NONE);
+
+	/* Undo most of the FW mappings done by RGXSetFirmwareAddress */
+	sFabricCohTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_META_CACHE_MASK;
+	sFabricCohTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK;
+	sFabricCohTestBufferDevVA.ui32Addr -= RGXFW_SEGMMU_DATA_BASE_ADDRESS;
+
+	/* Map the buffer in the bootloader segment as uncached */
+	sFabricCohTestBufferDevVA.ui32Addr |= RGXFW_BOOTLDR_META_ADDR;
+	sFabricCohTestBufferDevVA.ui32Addr |= RGXFW_SEGMMU_DATA_META_UNCACHED;
+
+	for (ui32TestType = 0; ui32TestType < 4 && bExit == IMG_FALSE; ui32TestType++)
+	{
+		IMG_CPU_PHYADDR sCpuPhyAddr;
+		IMG_BOOL bValid;
+		PMR *psPMR;
+
+		/* Acquire underlying PMR CpuPA in preparation for cache maintenance */
+		(void) DevmemLocalGetImportHandle(psFabricCohTestBufferMemDesc, (void**)&psPMR);
+		eError = PMR_CpuPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sCpuPhyAddr, &bValid);
+		if (eError != PVRSRV_OK || bValid == IMG_FALSE)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"PMR_CpuPhysAddr error: %s, exiting",
+					PVRSRVGetErrorString(eError)));
+			bExit = IMG_TRUE;
+			continue;
+		}
+
+		/* Here we do two passes [runs] mostly to account for the effects of using
+		   the different seed (i.e. ui32OddEvenSeed) value to read and write */
+		for (ui32OddEven = 1; ui32OddEven < 3 && bExit == IMG_FALSE; ui32OddEven++)
+		{
+			IMG_UINT32 i;
+
+#if defined(DEBUG)
+			switch (ui32TestType)
+			{
+			case 0:
+				PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: starting [run #%u]", ui32OddEven));
+				break;
+			case 1:
+				PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: starting [run #%u]", ui32OddEven));
+				break;
+			case 2:
+				PVR_LOG(("CPU:Write/GPU:Read Snoop Hit  Test: starting [run #%u]", ui32OddEven));
+				break;
+			case 3:
+				PVR_LOG(("GPU:Write/CPU:Read Snoop Hit  Test: starting [run #%u]", ui32OddEven));
+				break;
+			default:
+				PVR_LOG(("Internal error, exiting test"));
+				eError = PVRSRV_ERROR_INIT_FAILURE;
+				bExit = IMG_TRUE;
+				continue;
+			}
+#endif
+
+			for (i = 0; i < 2 && bExit == IMG_FALSE; i++)
+			{
+				IMG_UINT32 ui32FWAddr;
+				IMG_UINT32 ui32FWValue;
+				IMG_UINT32 ui32FWValue2;
+				IMG_CPU_PHYADDR sCpuPhyAddrStart;
+				IMG_CPU_PHYADDR sCpuPhyAddrEnd;
+				IMG_UINT32 ui32LastFWValue = ~0;
+				IMG_UINT32 ui32Offset = i * sizeof(IMG_UINT32);
+
+				/* Calculate next address and seed value to write/read from slave-port */
+				ui32FWAddr = sFabricCohTestBufferDevVA.ui32Addr + ui32Offset;
+				sCpuPhyAddrStart.uiAddr = sCpuPhyAddr.uiAddr + ui32Offset;
+				sCpuPhyAddrEnd.uiAddr = sCpuPhyAddrStart.uiAddr;
+				ui32OddEvenSeed += 1;
+
+				if (ui32TestType & 0x1)
+				{
+					ui32FWValue = i + ui32OddEvenSeed;
+
+					switch (ui32TestType)
+					{
+					case 1:
+					case 3:
+						/* Clean dcache to ensure there is no stale data in dcache that might over-write
+						   what we are about to write via slave-port here because if it drains from the CPU
+						   dcache before we read it, it would corrupt what we are going to read back via
+						   the CPU */
+						sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32);
+						CacheOpExec(psDevInfo->psDeviceNode,
+								(IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset,
+								(IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32),
+								sCpuPhyAddrStart,
+								sCpuPhyAddrEnd,
+								PVRSRV_CACHE_OP_CLEAN);
+						break;
+					}
+
+					/* Write the value using the RGX slave-port interface */
+					eError = RGXWriteMETAAddr(psDevInfo, ui32FWAddr, ui32FWValue);
+					if (eError != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR,
+								"RGXWriteMETAAddr error: %s, exiting",
+								PVRSRVGetErrorString(eError)));
+						bExit = IMG_TRUE;
+						continue;
+					}
+
+					/* Read back value using RGX slave-port interface, this is used
+					   as a sort of memory barrier for the above write */
+					eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, &ui32FWValue2);
+					if (eError != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR,
+								"RGXReadMETAAddr error: %s, exiting",
+								PVRSRVGetErrorString(eError)));
+						bExit = IMG_TRUE;
+						continue;
+					}
+					else if (ui32FWValue != ui32FWValue2)
+					{
+						/* Fatal error, we should abort */
+						PVR_DPF((PVR_DBG_ERROR,
+								"At Offset: %d, RAW via SlavePort failed: expected: %x, got: %x",
+								i,
+								ui32FWValue,
+								ui32FWValue2));
+						eError = PVRSRV_ERROR_INIT_FAILURE;
+						bExit = IMG_TRUE;
+						continue;
+					}
+
+					if (! PVRSRVSystemSnoopingOfDeviceCache(psDevInfo->psDeviceNode->psDevConfig))
+					{
+						/* Invalidate dcache to ensure that any prefetched data by the CPU from this memory
+						   region is discarded before we read (i.e. next read must trigger a cache miss).
+						   If there is snooping of device cache, then any prefetching done by the CPU
+						   will reflect the most up to date datum writing by GPU into said location,
+						   that is to say prefetching must be coherent so CPU d-flush is not needed */
+						sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32);
+						CacheOpExec(psDevInfo->psDeviceNode,
+								(IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset,
+								(IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32),
+								sCpuPhyAddrStart,
+								sCpuPhyAddrEnd,
+								PVRSRV_CACHE_OP_INVALIDATE);
+					}
+				}
+				else
+				{
+					IMG_UINT32 ui32RAWCpuValue;
+
+					/* Ensures line is in dcache */
+					ui32FWValue = IMG_UINT32_MAX;
+
+					/* Dirty allocation in dcache */
+					ui32RAWCpuValue = i + ui32OddEvenSeed;
+					pui32FabricCohTestBufferCpuVA[i] = i + ui32OddEvenSeed;
+
+					/* Flush possible cpu store-buffer(ing) on LMA */
+					OSWriteMemoryBarrier();
+
+					switch (ui32TestType)
+					{
+					case 0:
+						/* Flush dcache to force subsequent incoming CPU-bound snoop to miss so
+						   memory is coherent before the SlavePort reads */
+						sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32);
+						CacheOpExec(psDevInfo->psDeviceNode,
+								(IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset,
+								(IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32),
+								sCpuPhyAddrStart,
+								sCpuPhyAddrEnd,
+								PVRSRV_CACHE_OP_FLUSH);
+						break;
+					}
+
+					/* Read back value using RGX slave-port interface */
+					eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, &ui32FWValue);
+					if (eError != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR,
+								"RGXReadWithSP error: %s, exiting",
+								PVRSRVGetErrorString(eError)));
+						bExit = IMG_TRUE;
+						continue;
+					}
+
+					/* We are being mostly paranoid here, just to account for CPU RAW operations */
+					sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32);
+					CacheOpExec(psDevInfo->psDeviceNode,
+							(IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset,
+							(IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32),
+							sCpuPhyAddrStart,
+							sCpuPhyAddrEnd,
+							PVRSRV_CACHE_OP_FLUSH);
+					if (pui32FabricCohTestBufferCpuVA[i] != ui32RAWCpuValue)
+					{
+						/* Fatal error, we should abort */
+						PVR_DPF((PVR_DBG_ERROR,
+								"At Offset: %d, RAW by CPU failed: expected: %x, got: %x",
+								i,
+								ui32RAWCpuValue,
+								pui32FabricCohTestBufferCpuVA[i]));
+						eError = PVRSRV_ERROR_INIT_FAILURE;
+						bExit = IMG_TRUE;
+						continue;
+					}
+				}
+
+				/* Compare to see if sub-test passed */
+				if (pui32FabricCohTestBufferCpuVA[i] == ui32FWValue)
+				{
+					bSubTestPassed = IMG_TRUE;
+				}
+				else
+				{
+					bSubTestPassed = IMG_FALSE;
+					bFullTestPassed = IMG_FALSE;
+					eError = PVRSRV_ERROR_INIT_FAILURE;
+					if (ui32LastFWValue != ui32FWValue)
+					{
+#if defined(DEBUG)
+						PVR_LOG(("At Offset: %d, Expected: %x, Got: %x",
+								i,
+								(ui32TestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i],
+										(ui32TestType & 0x1) ? pui32FabricCohTestBufferCpuVA[i] : ui32FWValue));
+#endif
+					}
+					else
+					{
+						PVR_DPF((PVR_DBG_ERROR,
+								"test encountered unexpected error, exiting"));
+						eError = PVRSRV_ERROR_INIT_FAILURE;
+						bExit = IMG_TRUE;
+						continue;
+					}
+				}
+
+				ui32LastFWValue = (ui32TestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i];
+			}
+
+#if defined(DEBUG)
+			if (bExit)
+			{
+				continue;
+			}
+
+			switch (ui32TestType)
+			{
+			case 0:
+				PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+				break;
+			case 1:
+				PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+				break;
+			case 2:
+				PVR_LOG(("CPU:Write/GPU:Read Snoop Hit  Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+				break;
+			case 3:
+				PVR_LOG(("GPU:Write/CPU:Read Snoop Hit  Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+				break;
+			default:
+				PVR_LOG(("Internal error, exiting test"));
+				bExit = IMG_TRUE;
+				continue;
+			}
+#endif
+		}
+	}
+
+	RGXUnsetFirmwareAddress(psFabricCohTestBufferMemDesc);
+	DevmemReleaseCpuVirtAddr(psFabricCohTestBufferMemDesc);
+	DevmemFwFree(psDevInfo, psFabricCohTestBufferMemDesc);
+
+	e0:
+	if (bFeatureS7)
+	{
+		/* Restore bootloader segment settings */
+		RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6),
+				(ui64SegOutAddrTopCached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32);
+	}
+	else
+	{
+		/* Restore SLC bypass settings */
+		RGXWriteReg32(hPrivate, RGX_CR_SLC_CTRL_BYPASS, ui32SLCCTRL);
+	}
+
+	bFullTestPassed = bExit ? IMG_FALSE: bFullTestPassed;
+	if (bFullTestPassed)
+	{
+		PVR_LOG(("fabric coherency test: PASSED"));
+		psDevInfo->ui32CoherencyTestsDone = MAX_NUM_COHERENCY_TESTS + 1;
+	}
+	else
+	{
+		PVR_LOG(("fabric coherency test: FAILED"));
+		psDevInfo->ui32CoherencyTestsDone++;
+	}
+
+	return eError;
+}
+
+IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+
+	return (psDevInfo->sDevFeatureCfg.ui64Features & ui64Feature) != 0;
+}
+
+IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+
+	return (psDevInfo->sDevFeatureCfg.ui64ErnsBrns & ui64ErnsBrns) != 0;
+}
+
+IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+
+	if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS))
+	{
+		return 0;
+	}
+	return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS);
+}
+
+IMG_UINT32 RGXGetDeviceSLCSize(const void *hPrivate)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+
+	if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_SIZE_IN_BYTES))
+	{
+		return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_SIZE_IN_BYTES);
+	}
+
+	if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_SIZE_IN_KILOBYTES))
+	{
+		return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_SIZE_IN_KILOBYTES) * 1024;
+	}
+
+	return 0;
+}
+
+IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+
+	if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_CACHE_LINE_SIZE_BITS))
+	{
+		return 0;
+	}
+	return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS);
+}
+
+IMG_UINT32 RGXGetDevicePhysBusWidth(const void *hPrivate)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+
+	if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, PHYS_BUS_WIDTH))
+	{
+		return 0;
+	}
+	return RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH);
+}
+
+IMG_BOOL RGXDevicePA0IsValid(const void *hPrivate)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+
+	return psDevInfo->sLayerParams.bDevicePA0IsValid;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxlayer_impl.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxlayer_impl.h
new file mode 100644
index 0000000..a14e1ab
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxlayer_impl.h
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@File
+@Title          Header for DDK implementation of the Services abstraction layer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for DDK implementation of the Services abstraction layer
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGXLAYER_IMPL_H__)
+#define __RGXLAYER_IMPL_H__
+
+#include "rgxlayer.h"
+#include "device_connection.h"
+
+typedef struct _RGX_LAYER_PARAMS_
+{
+	void *psDevInfo;
+	void *psDevConfig;
+#if defined(PDUMP)
+	IMG_UINT32 ui32PdumpFlags;
+#endif
+
+	IMG_DEV_PHYADDR sPCAddr;
+	IMG_DEV_PHYADDR sGPURegAddr;
+	IMG_DEV_PHYADDR sBootRemapAddr;
+	IMG_DEV_PHYADDR sCodeRemapAddr;
+	IMG_DEV_PHYADDR sDataRemapAddr;
+	IMG_DEV_PHYADDR sTrampolineRemapAddr;
+	IMG_BOOL bDevicePA0IsValid;
+} RGX_LAYER_PARAMS;
+
+#endif /* !defined (__RGXLAYER_IMPL_H__) */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxmem.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxmem.c
new file mode 100644
index 0000000..b1ca1f5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxmem.c
@@ -0,0 +1,764 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX memory context management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX memory context management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debug.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_server_utils.h"
+#include "devicemem_pdump.h"
+#include "rgxdevice.h"
+#include "rgx_fwif_km.h"
+#include "rgxfwutils.h"
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "sync_internal.h"
+#include "rgx_memallocflags.h"
+#include "rgx_bvnc_defs_km.h"
+#include "info_page.h"
+/*
+	FIXME:
+	For now just get global state, but what we really want is to do
+	this per memory context
+*/
+/*
+ * TestAndReset of gui32CacheOps is protected by the device power-lock,
+ * in the following way:
+ *
+ *   LOCK(Power-Lock);
+ *     ui32CacheOps = _GetCacheOpsPending(); // Gets gui32CacheOpps
+ *     if(ui32CacheOps)
+ *     {
+ *         _PrepareAndSubmitCacheCommand(ui32CacheOps);
+ *         _CacheOpsCompleted(ui32CacheOps); // Resets gui32CacheOpps
+ *     }
+ *   UNLOCK(Power-lock);
+ */
+static IMG_UINT32 gui32CacheOpps;
+/* FIXME: End */
+
+typedef struct _SERVER_MMU_CONTEXT_ {
+	DEVMEM_MEMDESC *psFWMemContextMemDesc;
+	MMU_CONTEXT *psMMUContext;
+	IMG_PID uiPID;
+	IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME];
+	DLLIST_NODE sNode;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+} SERVER_MMU_CONTEXT;
+
+
+
+void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode,
+						   IMG_HANDLE hDeviceData,
+						   MMU_LEVEL eMMULevel,
+						   IMG_BOOL bUnmap)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+	PVR_UNREFERENCED_PARAMETER(bUnmap);
+
+	switch (eMMULevel)
+	{
+		case MMU_LEVEL_3:	gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_PC;
+							break;
+		case MMU_LEVEL_2:	gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_PD;
+							break;
+		case MMU_LEVEL_1:	gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_PT;
+							if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)))
+							{
+								gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_TLB;
+							}
+							break;
+		default:
+							PVR_ASSERT(0);
+							break;
+	}
+}
+
+static inline IMG_UINT32 _GetCacheOpsPending(void)
+{
+	return gui32CacheOpps;
+}
+
+static inline void _CacheOpsCompleted(IMG_UINT32 ui32CacheOpsServiced)
+{
+	/* Mark in the global cache-ops that ui32CacheOpsServiced were submitted */
+	gui32CacheOpps ^= ui32CacheOpsServiced;
+}
+
+static
+PVRSRV_ERROR _PrepareAndSubmitCacheCommand(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                           RGXFWIF_DM eDM, IMG_UINT32 ui32CacheOps,
+										   IMG_BOOL bInterrupt,
+										   IMG_UINT16 *pui16MMUInvalidateUpdate)
+{
+	PVRSRV_ERROR eError;
+	RGXFWIF_KCCB_CMD sFlushCmd;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+	*pui16MMUInvalidateUpdate = psDeviceNode->ui16NextMMUInvalidateUpdate++;
+
+	/* Setup cmd and add the device nodes sync object */
+	sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_MMUCACHE;
+	sFlushCmd.uCmdData.sMMUCacheData.ui16MMUCacheSyncUpdateValue = *pui16MMUInvalidateUpdate;
+	SyncPrimGetFirmwareAddr(psDeviceNode->psMMUCacheSyncPrim,
+	                        &sFlushCmd.uCmdData.sMMUCacheData.sMMUCacheSync.ui32Addr);
+	sFlushCmd.uCmdData.sMMUCacheData.ui32Flags =
+		ui32CacheOps |
+		/* Set which memory context this command is for (all ctxs for now) */
+		(RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT) ? RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL : 0) |
+		(bInterrupt ? RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT : 0);
+
+#if defined(PDUMP)
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+	                      "Submit MMU flush and invalidate (flags = 0x%08x)",
+	                      sFlushCmd.uCmdData.sMMUCacheData.ui32Flags);
+#endif
+
+	/* Schedule MMU cache command */
+	eError = RGXSendCommand(psDevInfo, eDM, &sFlushCmd, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to schedule MMU cache command to "
+		                        "DM=%d with error (%u)", __func__, eDM, eError));
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                       IMG_UINT16 *pui16MMUInvalidateUpdate,
+                                       IMG_BOOL bInterrupt)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32CacheOps;
+
+	eError = PVRSRVPowerLock(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)",
+					__func__, PVRSRVGetErrorString(eError)));
+		goto RGXMMUCacheInvalidateKick_exit;
+	}
+
+	ui32CacheOps = _GetCacheOpsPending();
+	if (ui32CacheOps == 0)
+	{
+		eError = PVRSRV_OK;
+		goto _PowerUnlockAndReturnErr;
+	}
+
+	/* Ensure device is powered up before sending cache command */
+	PDUMPPOWCMDSTART();
+	eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+										 PVRSRV_DEV_POWER_STATE_ON,
+										 IMG_FALSE);
+	PDUMPPOWCMDEND();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition RGX to ON (%s)",
+					__func__, PVRSRVGetErrorString(eError)));
+		goto _PowerUnlockAndReturnErr;
+	}
+
+	eError = _PrepareAndSubmitCacheCommand(psDeviceNode, RGXFWIF_DM_GP,
+	                                       ui32CacheOps, bInterrupt,
+										   pui16MMUInvalidateUpdate);
+	if (eError != PVRSRV_OK)
+	{
+		/* failed to submit cache operations, return failure */
+		goto _PowerUnlockAndReturnErr;
+	}
+
+	/* Mark the cache ops we serviced */
+	_CacheOpsCompleted(ui32CacheOps);
+
+_PowerUnlockAndReturnErr:
+	PVRSRVPowerUnlock(psDeviceNode);
+
+RGXMMUCacheInvalidateKick_exit:
+	return eError;
+}
+
+PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                    RGXFWIF_DM eDM,
+                                    IMG_UINT16 *pui16MMUInvalidateUpdate,
+                                    IMG_BOOL bInterrupt)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32CacheOps;
+
+	/* Caller should ensure that power lock is held before calling this function */
+	PVR_ASSERT(OSLockIsLocked(psDeviceNode->hPowerLock));
+
+	ui32CacheOps = _GetCacheOpsPending();
+	if (ui32CacheOps == 0)
+	{
+		return PVRSRV_OK;
+	}
+
+	eError = _PrepareAndSubmitCacheCommand(psDeviceNode, eDM, ui32CacheOps,
+	                                       bInterrupt, pui16MMUInvalidateUpdate);
+	if (eError != PVRSRV_OK)
+	{
+		/* failed to submit cache operations, return failure */
+		return eError;
+	}
+
+	_CacheOpsCompleted(ui32CacheOps);
+
+	return eError;
+}
+
+/* page fault debug is the only current use case for needing to find process info
+ * after that process device memory context has been destroyed
+ */
+
+typedef struct _UNREGISTERED_MEMORY_CONTEXT_
+{
+	IMG_PID uiPID;
+	IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME];
+	IMG_DEV_PHYADDR sPCDevPAddr;
+} UNREGISTERED_MEMORY_CONTEXT;
+
+/* must be a power of two */
+#define UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE (1 << 3)
+
+static UNREGISTERED_MEMORY_CONTEXT gasUnregisteredMemCtxs[UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE];
+static IMG_UINT32 gui32UnregisteredMemCtxsHead;
+
+/* record a device memory context being unregistered.
+ * the list of unregistered contexts can be used to find the PID and process name
+ * belonging to a memory context which has been destroyed
+ */
+static void _RecordUnregisteredMemoryContext(PVRSRV_RGXDEV_INFO *psDevInfo, SERVER_MMU_CONTEXT *psServerMMUContext)
+{
+	UNREGISTERED_MEMORY_CONTEXT *psRecord;
+
+	OSLockAcquire(psDevInfo->hMMUCtxUnregLock);
+
+	psRecord = &gasUnregisteredMemCtxs[gui32UnregisteredMemCtxsHead];
+
+	gui32UnregisteredMemCtxsHead = (gui32UnregisteredMemCtxsHead + 1)
+					& (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1);
+
+	OSLockRelease(psDevInfo->hMMUCtxUnregLock);
+
+	psRecord->uiPID = psServerMMUContext->uiPID;
+	if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &psRecord->sPCDevPAddr) != PVRSRV_OK)
+	{
+		PVR_LOG(("_RecordUnregisteredMemoryContext: Failed to get PC address for memory context"));
+	}
+	OSStringLCopy(psRecord->szProcessName, psServerMMUContext->szProcessName, sizeof(psRecord->szProcessName));
+}
+
+
+void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData)
+{
+	SERVER_MMU_CONTEXT *psServerMMUContext = hPrivData;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psServerMMUContext->psDevInfo;
+
+	OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock);
+	dllist_remove_node(&psServerMMUContext->sNode);
+	OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock);
+
+	if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+	{
+		_RecordUnregisteredMemoryContext(psDevInfo, psServerMMUContext);
+	}
+
+	/*
+	 * Release the page catalogue address acquired in RGXRegisterMemoryContext().
+	 */
+	MMU_ReleaseBaseAddr(NULL /* FIXME */);
+
+	/*
+	 * Free the firmware memory context.
+	 */
+	DevmemFwFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc);
+
+	OSFreeMem(psServerMMUContext);
+}
+
+
+/*
+ * RGXRegisterMemoryContext
+ */
+PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE	*psDeviceNode,
+									  MMU_CONTEXT			*psMMUContext,
+									  IMG_HANDLE			*hPrivData)
+{
+	PVRSRV_ERROR			eError;
+	PVRSRV_RGXDEV_INFO 		*psDevInfo = psDeviceNode->pvDevice;
+	DEVMEM_FLAGS_T			uiFWMemContextMemAllocFlags;
+	RGXFWIF_FWMEMCONTEXT	*psFWMemContext;
+	DEVMEM_MEMDESC			*psFWMemContextMemDesc;
+	SERVER_MMU_CONTEXT *psServerMMUContext;
+
+	if (psDevInfo->psKernelMMUCtx == NULL)
+	{
+		/*
+		 * This must be the creation of the Kernel memory context. Take a copy
+		 * of the MMU context for use when programming the BIF.
+		 */
+		psDevInfo->psKernelMMUCtx = psMMUContext;
+	}
+	else
+	{
+		psServerMMUContext = OSAllocMem(sizeof(*psServerMMUContext));
+		if (psServerMMUContext == NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto fail_alloc_server_ctx;
+		}
+
+		psServerMMUContext->psDevInfo = psDevInfo;
+
+		/*
+		 * This FW MemContext is only mapped into kernel for initialisation purposes.
+		 * Otherwise this allocation is only used by the FW.
+		 * Therefore the GPU cache doesn't need coherency,
+		 * and write-combine is suffice on the CPU side (WC buffer will be flushed at any kick)
+		 */
+		uiFWMemContextMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+										PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+										PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+										PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+										PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+										PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+										PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+										PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+										PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+		/*
+			Allocate device memory for the firmware memory context for the new
+			application.
+		*/
+		PDUMPCOMMENT("Allocate RGX firmware memory context");
+		/* FIXME: why cache-consistent? */
+		eError = DevmemFwAllocate(psDevInfo,
+								sizeof(*psFWMemContext),
+								uiFWMemContextMemAllocFlags,
+								"FwMemoryContext",
+								&psFWMemContextMemDesc);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to allocate firmware memory context (%u)",
+					eError));
+			goto fail_alloc_fw_ctx;
+		}
+
+		/*
+			Temporarily map the firmware memory context to the kernel.
+		*/
+		eError = DevmemAcquireCpuVirtAddr(psFWMemContextMemDesc,
+										  (void **)&psFWMemContext);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to map firmware memory context (%u)",
+					eError));
+			goto fail_acquire_cpu_addr;
+		}
+
+		/*
+		 * Write the new memory context's page catalogue into the firmware memory
+		 * context for the client.
+		 */
+		eError = MMU_AcquireBaseAddr(psMMUContext, &psFWMemContext->sPCDevPAddr);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to acquire Page Catalogue address (%u)",
+					eError));
+			DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+			goto fail_acquire_base_addr;
+		}
+
+		/*
+		 * Set default values for the rest of the structure.
+		 */
+		psFWMemContext->uiPageCatBaseRegID = RGXFW_BIF_INVALID_PCREG;
+		psFWMemContext->uiBreakpointAddr = 0;
+		psFWMemContext->uiBPHandlerAddr = 0;
+		psFWMemContext->uiBreakpointCtl = 0;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+		IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0;
+        IMG_BOOL   bOSidAxiProt;
+
+        MMU_GetOSids(psMMUContext, &ui32OSid, &ui32OSidReg, &bOSidAxiProt);
+
+        psFWMemContext->ui32OSid     = ui32OSidReg;
+        psFWMemContext->bOSidAxiProt = bOSidAxiProt;
+}
+#endif
+
+#if defined(PDUMP)
+		{
+			IMG_CHAR			aszName[PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH];
+			IMG_DEVMEM_OFFSET_T uiOffset = 0;
+
+			/*
+			 * Dump the Mem context allocation
+			 */
+			DevmemPDumpLoadMem(psFWMemContextMemDesc, 0, sizeof(*psFWMemContext), PDUMP_FLAGS_CONTINUOUS);
+
+
+			/*
+			 * Obtain a symbolic addr of the mem context structure
+			 */
+			eError = DevmemPDumpPageCatBaseToSAddr(psFWMemContextMemDesc,
+												   &uiOffset,
+												   aszName,
+												   PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH);
+
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to generate a Dump Page Catalogue address (%u)",
+						eError));
+				DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+				goto fail_pdump_cat_base_addr;
+			}
+
+			/*
+			 * Dump the Page Cat tag in the mem context (symbolic address)
+			 */
+			eError = MMU_PDumpWritePageCatBase(psMMUContext,
+												aszName,
+												uiOffset,
+												8, /* 64-bit register write */
+												0,
+												0,
+												0);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to acquire Page Catalogue address (%u)",
+						eError));
+				DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+				goto fail_pdump_cat_base;
+			}
+		}
+#endif
+
+		/*
+		 * Release kernel address acquired above.
+		 */
+		DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+
+		/*
+		 * Store the process information for this device memory context
+		 * for use with the host page-fault analysis.
+		 */
+		psServerMMUContext->uiPID = OSGetCurrentClientProcessIDKM();
+		psServerMMUContext->psMMUContext = psMMUContext;
+		psServerMMUContext->psFWMemContextMemDesc = psFWMemContextMemDesc;
+		OSStringLCopy(psServerMMUContext->szProcessName,
+		              OSGetCurrentClientProcessNameKM(),
+		              sizeof(psServerMMUContext->szProcessName));
+
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "New memory context: Process Name: %s PID: %u (0x%08X)",
+										psServerMMUContext->szProcessName,
+										psServerMMUContext->uiPID,
+										psServerMMUContext->uiPID);
+
+		OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock);
+		dllist_add_to_tail(&psDevInfo->sMemoryContextList, &psServerMMUContext->sNode);
+		OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock);
+
+		MMU_SetDeviceData(psMMUContext, psFWMemContextMemDesc);
+		*hPrivData = psServerMMUContext;
+	}
+
+	return PVRSRV_OK;
+
+#if defined(PDUMP)
+fail_pdump_cat_base:
+fail_pdump_cat_base_addr:
+	MMU_ReleaseBaseAddr(NULL);
+#endif
+fail_acquire_base_addr:
+	/* Done before jumping to the fail point as the release is done before exit */
+fail_acquire_cpu_addr:
+	DevmemFwFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc);
+fail_alloc_fw_ctx:
+	OSFreeMem(psServerMMUContext);
+fail_alloc_server_ctx:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv)
+{
+	SERVER_MMU_CONTEXT *psMMUContext = (SERVER_MMU_CONTEXT *) hPriv;
+
+	return psMMUContext->psFWMemContextMemDesc;
+}
+
+void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo,
+				IMG_DEV_VIRTADDR *psDevVAddr,
+				IMG_DEV_PHYADDR *psDevPAddr,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile,
+				MMU_FAULT_DATA *psOutFaultData)
+{
+	IMG_DEV_PHYADDR sPCDevPAddr;
+	DLLIST_NODE *psNode, *psNext;
+
+	OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock);
+
+	dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext)
+	{
+		SERVER_MMU_CONTEXT *psServerMMUContext =
+			IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
+
+		if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK)
+		{
+			PVR_LOG(("Failed to get PC address for memory context"));
+			continue;
+		}
+
+		if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr)
+		{
+			PVR_DUMPDEBUG_LOG("Found memory context (PID = %d, %s)",
+							   psServerMMUContext->uiPID,
+							   psServerMMUContext->szProcessName);
+
+			MMU_CheckFaultAddress(psServerMMUContext->psMMUContext, psDevVAddr,
+						pfnDumpDebugPrintf, pvDumpDebugFile, psOutFaultData);
+			goto out_unlock;
+		}
+	}
+
+	/* Lastly check for fault in the kernel allocated memory */
+	if (MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sPCDevPAddr) != PVRSRV_OK)
+	{
+		PVR_LOG(("Failed to get PC address for kernel memory context"));
+	}
+
+	if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr)
+	{
+		MMU_CheckFaultAddress(psDevInfo->psKernelMMUCtx, psDevVAddr,
+					pfnDumpDebugPrintf, pvDumpDebugFile, psOutFaultData);
+	}
+
+out_unlock:
+	OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock);
+}
+
+/* given the physical address of a page catalogue, searches for a corresponding
+ * MMU context and if found, provides the caller details of the process.
+ * Returns IMG_TRUE if a process is found.
+ */
+IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress,
+								RGXMEM_PROCESS_INFO *psInfo)
+{
+	IMG_BOOL bRet = IMG_FALSE;
+	DLLIST_NODE *psNode, *psNext;
+	SERVER_MMU_CONTEXT *psServerMMUContext = NULL;
+
+	/* check if the input PC addr corresponds to an active memory context */
+	dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext)
+	{
+		SERVER_MMU_CONTEXT *psThisMMUContext =
+			IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
+		IMG_DEV_PHYADDR sPCDevPAddr;
+
+		if (MMU_AcquireBaseAddr(psThisMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK)
+		{
+			PVR_LOG(("Failed to get PC address for memory context"));
+			continue;
+		}
+
+		if (sPCAddress.uiAddr == sPCDevPAddr.uiAddr)
+		{
+			psServerMMUContext = psThisMMUContext;
+			break;
+		}
+	}
+
+	if (psServerMMUContext != NULL)
+	{
+		psInfo->uiPID = psServerMMUContext->uiPID;
+		OSStringLCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName));
+		psInfo->bUnregistered = IMG_FALSE;
+		bRet = IMG_TRUE;
+	}
+	/* else check if the input PC addr corresponds to the firmware */
+	else
+	{
+		IMG_DEV_PHYADDR sKernelPCDevPAddr;
+		PVRSRV_ERROR eError;
+
+		eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sKernelPCDevPAddr);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_LOG(("Failed to get PC address for kernel memory context"));
+		}
+		else
+		{
+			if (sPCAddress.uiAddr == sKernelPCDevPAddr.uiAddr)
+			{
+				psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE;
+				OSStringLCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName));
+				psInfo->bUnregistered = IMG_FALSE;
+				bRet = IMG_TRUE;
+			}
+		}
+	}
+
+	if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) &&
+		(bRet == IMG_FALSE))
+	{
+		/* no active memory context found with the given PC address.
+		 * Check the list of most recently freed memory contexts.
+		 */
+		IMG_UINT32 i;
+
+		OSLockAcquire(psDevInfo->hMMUCtxUnregLock);
+
+		/* iterate through the list of unregistered memory contexts
+		 * from newest (one before the head) to the oldest (the current head)
+		 */
+		i = gui32UnregisteredMemCtxsHead;
+
+		do
+		{
+			UNREGISTERED_MEMORY_CONTEXT *psRecord;
+
+			i ? i-- : (i = (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1));
+
+			psRecord = &gasUnregisteredMemCtxs[i];
+
+			if (psRecord->sPCDevPAddr.uiAddr == sPCAddress.uiAddr)
+			{
+				psInfo->uiPID = psRecord->uiPID;
+				OSStringLCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName));
+				psInfo->bUnregistered = IMG_TRUE;
+				bRet = IMG_TRUE;
+				break;
+			}
+		} while(i != gui32UnregisteredMemCtxsHead);
+
+		OSLockRelease(psDevInfo->hMMUCtxUnregLock);
+
+	}
+
+	return bRet;
+}
+
+IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID,
+								RGXMEM_PROCESS_INFO *psInfo)
+{
+	IMG_BOOL bRet = IMG_FALSE;
+	DLLIST_NODE *psNode, *psNext;
+	SERVER_MMU_CONTEXT *psServerMMUContext = NULL;
+
+	/* check if the input PID corresponds to an active memory context */
+	dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext)
+	{
+		SERVER_MMU_CONTEXT *psThisMMUContext =
+			IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
+
+		if (psThisMMUContext->uiPID == uiPID)
+		{
+			psServerMMUContext = psThisMMUContext;
+			break;
+		}
+	}
+
+	if (psServerMMUContext != NULL)
+	{
+		psInfo->uiPID = psServerMMUContext->uiPID;
+		OSStringLCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName));
+		psInfo->bUnregistered = IMG_FALSE;
+		bRet = IMG_TRUE;
+	}
+	/* else check if the input PID corresponds to the firmware */
+	else if (uiPID == RGXMEM_SERVER_PID_FIRMWARE)
+	{
+		psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE;
+		OSStringLCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName));
+		psInfo->bUnregistered = IMG_FALSE;
+		bRet = IMG_TRUE;
+	}
+
+	if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) &&
+		(bRet == IMG_FALSE))
+	{
+		/* if the PID didn't correspond to an active context or the
+		 * FW address then see if it matches a recently unregistered context
+		 */
+		const IMG_UINT32 ui32Mask = UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1;
+		IMG_UINT32 i, j;
+
+		OSLockAcquire(psDevInfo->hMMUCtxUnregLock);
+
+		for (i = (gui32UnregisteredMemCtxsHead - 1) & ui32Mask, j = 0;
+		     j < UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE;
+		     i = (gui32UnregisteredMemCtxsHead - 1) & ui32Mask, j++)
+		{
+			UNREGISTERED_MEMORY_CONTEXT *psRecord = &gasUnregisteredMemCtxs[i];
+
+			if (psRecord->uiPID == uiPID)
+			{
+				psInfo->uiPID = psRecord->uiPID;
+				OSStringLCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName));
+				psInfo->bUnregistered = IMG_TRUE;
+				bRet = IMG_TRUE;
+				break;
+			}
+		}
+
+		OSLockRelease(psDevInfo->hMMUCtxUnregLock);
+	}
+
+	return bRet;
+}
+
+/******************************************************************************
+ End of file (rgxmem.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxmem.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxmem.h
new file mode 100644
index 0000000..9c1b55e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxmem.h
@@ -0,0 +1,135 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX memory context management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for RGX memory context management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXMEM_H__)
+#define __RGXMEM_H__
+
+#include "pvrsrv_error.h"
+#include "device.h"
+#include "mmu_common.h"
+#include "rgxdevice.h"
+
+#define RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME 16
+
+/* this PID denotes the firmware */
+#define RGXMEM_SERVER_PID_FIRMWARE 0xFFFFFFFF
+
+/* this PID denotes the PM */
+#define RGXMEM_SERVER_PID_PM 0xEFFFFFFF
+
+typedef struct _RGXMEM_PROCESS_INFO_
+{
+	IMG_PID uiPID;
+	IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME];
+	IMG_BOOL bUnregistered;
+} RGXMEM_PROCESS_INFO;
+
+IMG_DEV_PHYADDR GetPC(MMU_CONTEXT * psContext);
+
+/* FIXME: SyncPrim should be stored on the memory context */
+void RGXMMUSyncPrimAlloc(PVRSRV_DEVICE_NODE *psDeviceNode);
+void RGXMMUSyncPrimFree(void);
+
+void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode,
+						   IMG_HANDLE hDeviceData,
+						   MMU_LEVEL eMMULevel,
+						   IMG_BOOL bUnmap);
+
+/*************************************************************************/ /*!
+@Function       RGXMMUCacheInvalidateKick
+
+@Description    Sends a flush command to a particular DM but first takes
+                the power lock.
+
+@Input          psDevInfo   Device Info
+@Input          pui16NextMMUInvalidateUpdate
+@Input          bInterrupt  Should the firmware signal command completion to
+                the host
+
+@Return			PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDevInfo,
+                                       IMG_UINT16 *pui16NextMMUInvalidateUpdate,
+                                       IMG_BOOL bInterrupt);
+
+/*************************************************************************/ /*!
+@Function       RGXPreKickCacheCommand
+
+@Description    Sends a cache flush command to a particular DM without
+                honouring the power lock. It's caller's responsibility
+                to ensure power lock is held before calling this function.
+
+@Input          psDevInfo   Device Info
+@Input          eDM			To which DM the cmd is sent.
+@Input          pui16MMUInvalidateUpdate
+@Input          bInterrupt  Should the firmware signal command completion to
+                the host
+
+@Return			PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                    RGXFWIF_DM eDM,
+                                    IMG_UINT16 *pui16MMUInvalidateUpdate,
+                                    IMG_BOOL bInterrupt);
+
+void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData);
+PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE	*psDeviceNode,
+									  MMU_CONTEXT			*psMMUContext,
+									  IMG_HANDLE			*hPrivData);
+
+DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv);
+
+void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo,
+				IMG_DEV_VIRTADDR *psDevVAddr,
+				IMG_DEV_PHYADDR *psDevPAddr,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile,
+				MMU_FAULT_DATA *psOutFaultData);
+
+IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress,
+								RGXMEM_PROCESS_INFO *psInfo);
+
+IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID,
+                                                                RGXMEM_PROCESS_INFO *psInfo);
+
+#endif /* __RGXMEM_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxmipsmmuinit.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxmipsmmuinit.c
new file mode 100644
index 0000000..4df5732
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxmipsmmuinit.c
@@ -0,0 +1,955 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific MMU initialisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "rgxmipsmmuinit.h"
+
+#include "device.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "mmu_common.h"
+#include "pdump_mmu.h"
+#include "rgxheapconfig.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "rgx_memallocflags.h"
+#include "pdump_km.h"
+#include "rgxdevice.h"
+
+/*
+ * Bits of PT, PD and PC not involving addresses
+ */
+
+/* Currently there is no page directory for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PDE_PROTMASK        0
+/* Currently there is no page catalog for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PCE_PROTMASK	     0
+
+
+static MMU_PxE_CONFIG sRGXMMUPCEConfig;
+static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig;
+
+
+/*
+ *
+ *  Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig4KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig16KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 64kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig64KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 256kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig256KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 1MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig1MB;
+
+
+/*
+ *
+ *  Configuration for heaps with 2MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig2MB;
+
+
+/* Forward declaration of protection bits derivation functions, for
+   the following structure */
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags);
+
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+										   const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+										   const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+										   const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+										   IMG_HANDLE *phPriv);
+
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv);
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize);
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize);
+
+static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes;
+
+/* Cached policy */
+static IMG_UINT32 gui32CachedPolicy;
+
+PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	IMG_BOOL bPhysBusAbove32Bit = 0;
+
+	if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, PHYS_BUS_WIDTH))
+	{
+		bPhysBusAbove32Bit = RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32;
+	}
+
+	sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName =
+		PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]);
+
+	/*
+	 * Setup sRGXMMUPCEConfig, no PC in MIPS MMU currently
+	 */
+	sRGXMMUPCEConfig.uiBytesPerEntry = 0; /* 32 bit entries */
+	sRGXMMUPCEConfig.uiAddrMask = 0; /* Mask to get significant address bits of PC entry */
+
+	sRGXMMUPCEConfig.uiAddrShift = 0; /* Shift this many bits to get PD address in PC entry */
+	sRGXMMUPCEConfig.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; /* Alignment of PD AND PC */
+
+	sRGXMMUPCEConfig.uiProtMask = RGX_MIPS_MMUCTRL_PCE_PROTMASK; //Mask to get the status bits of the PC */
+	sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to have status bits starting with bit 0 */
+
+	sRGXMMUPCEConfig.uiValidEnMask = RGX_MIPS_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */
+	sRGXMMUPCEConfig.uiValidEnShift = RGX_MIPS_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to have entry valid bit starting with bit 0 */
+
+	/*
+	 *  Setup sRGXMMUTopLevelDevVAddrConfig
+	 */
+	sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = 0; /* Get the PC address bits from a 40 bit virt. address (in a 64bit UINT) */
+	sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = 0;
+	sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = 0;
+
+	sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = 0; /* Get the PD address bits from a 40 bit virt. address (in a 64bit UINT) */
+	sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = 0;
+	sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = 0;
+
+	sRGXMMUTopLevelDevVAddrConfig.uiPTIndexMask = IMG_UINT64_C(0xfffffff000); /* Get the PT address bits from a 40 bit virt. address (in a 64bit UINT) */
+	sRGXMMUTopLevelDevVAddrConfig.uiPTIndexShift = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K;
+	sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPT = RGX_FIRMWARE_RAW_HEAP_SIZE >> sRGXMMUTopLevelDevVAddrConfig.uiPTIndexShift;
+
+/*
+ *
+ *  Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_4KBDP. No PD in MIPS MMU currently
+	 */
+	sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 0;
+
+	/* No PD used for MIPS */
+	sRGXMMUPDEConfig_4KBDP.uiAddrMask = 0;
+	sRGXMMUPDEConfig_4KBDP.uiAddrShift = 0;
+	sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K;
+
+	sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x0);
+	sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 0;
+
+	sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MIPS_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_4KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MIPS_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MIPS_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_4KBDP.
+	 */
+	sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE;
+
+
+	if (bPhysBusAbove32Bit)
+	{
+		sRGXMMUPTEConfig_4KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT;
+		gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT;
+	}
+	else
+	{
+		sRGXMMUPTEConfig_4KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK;
+		gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY;
+	}
+
+	sRGXMMUPTEConfig_4KBDP.uiAddrShift = RGXMIPSFW_ENTRYLO_PFN_SHIFT;
+	sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K;
+
+	sRGXMMUPTEConfig_4KBDP.uiProtMask = RGXMIPSFW_ENTRYLO_DVG | ~RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK;
+	sRGXMMUPTEConfig_4KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGXMIPSFW_ENTRYLO_VALID_EN;
+	sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGXMIPSFW_ENTRYLO_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_4KBDP
+	 */
+	sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = 0;
+	sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = 0;
+	sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = 0;
+
+
+	sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = 0;
+	sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = 0;
+	sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = 0;
+
+	sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = RGX_FIRMWARE_RAW_HEAP_SIZE >> sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift;
+
+
+	sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff);
+	sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = RGX_FIRMWARE_RAW_HEAP_BASE & IMG_UINT64_C(0x00ffffffff);
+
+	/*
+	 * Setup gsPageSizeConfig4KB
+	 */
+	gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP;
+	gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP;
+	gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP;
+	gsPageSizeConfig4KB.uiRefCount = 0;
+	gsPageSizeConfig4KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ *  Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_16KBDP
+	 */
+	sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 0;
+
+	sRGXMMUPDEConfig_16KBDP.uiAddrMask = 0;
+	sRGXMMUPDEConfig_16KBDP.uiAddrShift = 0; /* These are for a page directory ENTRY, meaning the address of a PT cropped to suit the PD */
+	sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 0; /* Alignment of the page tables NOT directories */
+
+	sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = 0;
+	sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 0;
+
+	sRGXMMUPDEConfig_16KBDP.uiProtMask = 0;
+	sRGXMMUPDEConfig_16KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_16KBDP.uiValidEnMask = 0;
+	sRGXMMUPDEConfig_16KBDP.uiValidEnShift = 0;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_16KBDP. Not supported yet
+	 */
+	sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 0;
+
+	sRGXMMUPTEConfig_16KBDP.uiAddrMask = 0;
+	sRGXMMUPTEConfig_16KBDP.uiAddrShift = 0; /* These are for a page table ENTRY, meaning the address of a PAGE cropped to suit the PD */
+	sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 0; /* Alignment of the pages NOT tables */
+
+	sRGXMMUPTEConfig_16KBDP.uiProtMask = 0;
+	sRGXMMUPTEConfig_16KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_16KBDP.uiValidEnMask = 0;
+	sRGXMMUPTEConfig_16KBDP.uiValidEnShift = 0;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_16KBDP
+	 */
+	sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = 0;
+	sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = 0;
+	sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = 0;
+
+	sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = 0;
+	sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = 0;
+	sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD= 0;
+
+	sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = 0;
+	sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 0;
+	sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = 0;
+
+	sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = 0;
+	sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig16KB
+	 */
+	gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP;
+	gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP;
+	gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP;
+	gsPageSizeConfig16KB.uiRefCount = 0;
+	gsPageSizeConfig16KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ *  Configuration for heaps with 64kB Data-Page size. Not supported yet
+ *
+ */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_64KBDP
+	 */
+	sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 0;
+
+	sRGXMMUPDEConfig_64KBDP.uiAddrMask = 0;
+	sRGXMMUPDEConfig_64KBDP.uiAddrShift = 0;
+	sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 0;
+
+	sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = 0;
+	sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 0;
+
+	sRGXMMUPDEConfig_64KBDP.uiProtMask = 0;
+	sRGXMMUPDEConfig_64KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_64KBDP.uiValidEnMask = 0;
+	sRGXMMUPDEConfig_64KBDP.uiValidEnShift = 0;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_64KBDP.
+	 *
+	 */
+	sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE;
+
+	if (bPhysBusAbove32Bit)
+	{
+		sRGXMMUPTEConfig_64KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT;
+		gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT;
+	}
+	else
+	{
+		sRGXMMUPTEConfig_64KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK;
+		gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY;
+	}
+
+	/* Even while using 64K pages, MIPS still aligns addresses to 4K */
+	sRGXMMUPTEConfig_64KBDP.uiAddrShift = RGXMIPSFW_ENTRYLO_PFN_SHIFT;
+	sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K;
+
+	sRGXMMUPTEConfig_64KBDP.uiProtMask = RGXMIPSFW_ENTRYLO_DVG | ~RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK;
+	sRGXMMUPTEConfig_64KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGXMIPSFW_ENTRYLO_VALID_EN;
+	sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGXMIPSFW_ENTRYLO_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_64KBDP.
+	 */
+	sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = 0;
+	sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = 0;
+	sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = 0;
+
+	sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = 0;
+	sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = 0;
+	sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = 0;
+
+	sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00ffff0000);
+	sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_64K;
+	sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = RGX_FIRMWARE_RAW_HEAP_SIZE >> sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift;
+
+	sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff);
+	sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = RGX_FIRMWARE_RAW_HEAP_BASE & IMG_UINT64_C(0x00ffffffff);
+
+	/*
+	 * Setup gsPageSizeConfig64KB.
+	 */
+	gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP;
+	gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP;
+	gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP;
+	gsPageSizeConfig64KB.uiRefCount = 0;
+	gsPageSizeConfig64KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ *  Configuration for heaps with 256kB Data-Page size. Not supported yet
+ *
+ */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_256KBDP
+	 */
+	sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 0;
+
+	sRGXMMUPDEConfig_256KBDP.uiAddrMask = 0;
+	sRGXMMUPDEConfig_256KBDP.uiAddrShift = 0;
+	sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 0;
+
+	sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = 0;
+	sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 0;
+
+	sRGXMMUPDEConfig_256KBDP.uiProtMask = 0;
+	sRGXMMUPDEConfig_256KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_256KBDP.uiValidEnMask = 0;
+	sRGXMMUPDEConfig_256KBDP.uiValidEnShift = 0;
+
+	/*
+	 * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP
+	 */
+	sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 0;
+
+	sRGXMMUPTEConfig_256KBDP.uiAddrMask = 0;
+	sRGXMMUPTEConfig_256KBDP.uiAddrShift = 0;
+	sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 0;
+
+	sRGXMMUPTEConfig_256KBDP.uiProtMask = 0;
+	sRGXMMUPTEConfig_256KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_256KBDP.uiValidEnMask = 0;
+	sRGXMMUPTEConfig_256KBDP.uiValidEnShift = 0;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_256KBDP
+	 */
+	sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = 0;
+	sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = 0;
+	sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = 0;
+
+	sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = 0;
+	sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = 0;
+	sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = 0;
+
+	sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = 0;
+	sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 0;
+	sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = 0;
+
+	sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = 0;
+	sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig256KB
+	 */
+	gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP;
+	gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP;
+	gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP;
+	gsPageSizeConfig256KB.uiRefCount = 0;
+	gsPageSizeConfig256KB.uiMaxRefCount = 0;
+
+	/*
+	 * Setup sRGXMMUPDEConfig_1MBDP.  Not supported yet
+	 */
+	sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 0;
+
+	sRGXMMUPDEConfig_1MBDP.uiAddrMask = 0;
+	sRGXMMUPDEConfig_1MBDP.uiAddrShift = 0;
+	sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 0;
+
+	sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = 0;
+	sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 0;
+
+	sRGXMMUPDEConfig_1MBDP.uiProtMask = 0;
+	sRGXMMUPDEConfig_1MBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_1MBDP.uiValidEnMask = 0;
+	sRGXMMUPDEConfig_1MBDP.uiValidEnShift = 0;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_1MBDP
+	 */
+	sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_1MBDP.uiAddrMask = 0;
+	sRGXMMUPTEConfig_1MBDP.uiAddrShift = 0;
+	sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 0;
+
+	sRGXMMUPTEConfig_1MBDP.uiProtMask = 0;
+	sRGXMMUPTEConfig_1MBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_1MBDP.uiValidEnMask = 0;
+	sRGXMMUPTEConfig_1MBDP.uiValidEnShift = 0;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_1MBDP
+	 */
+	sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = 0;
+	sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = 0;
+	sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = 0;
+
+	sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = 0;
+	sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = 0;
+	sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = 0;
+
+	sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = 0;
+	sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 0;
+	sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = 0;
+
+	sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = 0;
+	sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig1MB
+	 */
+	gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP;
+	gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP;
+	gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP;
+	gsPageSizeConfig1MB.uiRefCount = 0;
+	gsPageSizeConfig1MB.uiMaxRefCount = 0;
+
+	/*
+	 * Setup sRGXMMUPDEConfig_2MBDP. Not supported yet
+	 */
+	sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 0;
+
+	sRGXMMUPDEConfig_2MBDP.uiAddrMask = 0;
+	sRGXMMUPDEConfig_2MBDP.uiAddrShift = 0;
+	sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 0;
+
+	sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = 0;
+	sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 0;
+
+	sRGXMMUPDEConfig_2MBDP.uiProtMask = 0;
+	sRGXMMUPDEConfig_2MBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_2MBDP.uiValidEnMask = 0;
+	sRGXMMUPDEConfig_2MBDP.uiValidEnShift = 0;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_2MBDP
+	 */
+	sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 0;
+
+	sRGXMMUPTEConfig_2MBDP.uiAddrMask = 0;
+	sRGXMMUPTEConfig_2MBDP.uiAddrShift = 0;
+	sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 0;
+
+	sRGXMMUPTEConfig_2MBDP.uiProtMask = 0;
+	sRGXMMUPTEConfig_2MBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_2MBDP.uiValidEnMask = 0;
+	sRGXMMUPTEConfig_2MBDP.uiValidEnShift = 0;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_2MBDP
+	 */
+	sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = 0;
+	sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = 0;
+	sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = 0;
+
+	sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = 0;
+	sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = 0;
+	sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = 0;
+
+	sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = 0;
+	sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 0;
+	sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = 0;
+
+	sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = 0;
+	sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig2MB
+	 */
+	gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP;
+	gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP;
+	gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP;
+	gsPageSizeConfig2MB.uiRefCount = 0;
+	gsPageSizeConfig2MB.uiMaxRefCount = 0;
+
+	/*
+	 * Setup sRGXMMUDeviceAttributes
+	 */
+	sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_MIPS_MICROAPTIV;
+	sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_1;
+	/* The page table fits in one big physical page as big as the page table itself */
+	sRGXMMUDeviceAttributes.ui32BaseAlign = RGXMIPSFW_LOG2_PAGETABLE_PAGE_SIZE;
+	/* The base configuration is set to 4kB pages*/
+	sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPTEConfig_4KBDP;
+	sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig;
+
+	/* Functions for deriving page table/dir/cat protection bits */
+	sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8;
+	sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4;
+	sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8;
+	sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4;
+	sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8;
+	sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4;
+
+	/* Functions for establishing configurations for PDE/PTE/DEVVADDR
+	   on per-heap basis */
+	sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB;
+	sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB;
+
+	sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4;
+	sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8;
+
+	psDeviceNode->psFirmwareMMUDevAttrs = &sRGXMMUDeviceAttributes;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXMipsMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError;
+
+	eError = PVRSRV_OK;
+
+#if defined(PDUMP)
+	psDeviceNode->pfnMMUGetContextID = NULL;
+#endif
+
+	psDeviceNode->psFirmwareMMUDevAttrs = NULL;
+
+#if defined(DEBUG)
+	PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:"));
+	PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d",
+			 gsPageSizeConfig4KB.uiMaxRefCount));
+	PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d",
+			 gsPageSizeConfig4KB.uiRefCount));
+	PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d",
+			 gsPageSizeConfig16KB.uiMaxRefCount));
+	PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d",
+			 gsPageSizeConfig16KB.uiRefCount));
+	PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d",
+			 gsPageSizeConfig64KB.uiMaxRefCount));
+	PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d",
+			 gsPageSizeConfig64KB.uiRefCount));
+	PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d",
+			 gsPageSizeConfig256KB.uiMaxRefCount));
+	PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d",
+			 gsPageSizeConfig256KB.uiRefCount));
+	PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d",
+			 gsPageSizeConfig1MB.uiMaxRefCount));
+	PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d",
+			 gsPageSizeConfig1MB.uiRefCount));
+	PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d",
+			 gsPageSizeConfig2MB.uiMaxRefCount));
+	PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d",
+			 gsPageSizeConfig2MB.uiRefCount));
+#endif
+	if (gsPageSizeConfig4KB.uiRefCount > 0 ||
+		gsPageSizeConfig16KB.uiRefCount > 0 ||
+		gsPageSizeConfig64KB.uiRefCount > 0 ||
+		gsPageSizeConfig256KB.uiRefCount > 0 ||
+		gsPageSizeConfig1MB.uiRefCount > 0 ||
+		gsPageSizeConfig2MB.uiRefCount > 0
+		)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)"));
+	}
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePCEProt4
+@Description    calculate the PCE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags)
+{
+	PVR_DPF((PVR_DBG_ERROR, "Page Catalog not supported on MIPS MMU"));
+	return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePCEProt8
+@Description    calculate the PCE protection flags based on an 8 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+	PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+	PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+	PVR_DPF((PVR_DBG_ERROR, "Page Catalog not supported on MIPS MMU"));
+	return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePDEProt4
+@Description    derive the PDE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+	PVR_DPF((PVR_DBG_ERROR, "Page Directory not supported on MIPS MMU"));
+	return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePDEProt8
+@Description    derive the PDE protection flags based on an 8 byte entry
+
+@Input          uiLog2DataPageSize The log2 of the required page size.
+				E.g, for 4KiB pages, this parameter must be 12.
+				For 2MiB pages, it must be set to 21.
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+	PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+	PVR_DPF((PVR_DBG_ERROR, "Page Directory not supported on MIPS MMU"));
+	return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePTEProt4
+@Description    calculate the PTE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags)
+{
+	IMG_UINT32 ui32MMUFlags = 0;
+
+	if (((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE))
+	{
+		/* read/write */
+		ui32MMUFlags |= RGXMIPSFW_ENTRYLO_DIRTY_EN;
+	}
+	else if (MMU_PROTFLAGS_READABLE & uiProtFlags)
+	{
+		/* read only */
+	}
+	else if (MMU_PROTFLAGS_WRITEABLE & uiProtFlags)
+	{
+		/* write only */
+		ui32MMUFlags |= RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN;
+	}
+	else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt4: neither read nor write specified..."));
+	}
+
+	/* cache coherency */
+	if (MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt4: cache coherency not supported for MIPS caches"));
+	}
+
+	/* cache setup */
+	if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0)
+	{
+		ui32MMUFlags |= RGXMIPSFW_ENTRYLO_UNCACHED;
+	}
+	else
+	{
+		ui32MMUFlags |= gui32CachedPolicy <<
+		                RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT;
+	}
+
+	if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0)
+	{
+		ui32MMUFlags |= RGXMIPSFW_ENTRYLO_VALID_EN;
+		ui32MMUFlags |= RGXMIPSFW_ENTRYLO_GLOBAL_EN;
+	}
+
+	if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags)
+	{
+		/* PVR_DPF((PVR_DBG_WARNING, "RGXDerivePTEProt4: PMMETA Protect not existent for MIPS, option discarded")); */
+	}
+
+	return ui32MMUFlags;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePTEProt8
+@Description    calculate the PTE protection flags based on an 8 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+	PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+	PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+	PVR_DPF((PVR_DBG_ERROR, "8-byte PTE not supported on this device"));
+
+	return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXGetPageSizeConfig
+@Description    Set up configuration for variable sized data pages.
+				RGXPutPageSizeConfigCB has to be called to ensure correct
+				refcounting.
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+										   const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+										   const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+										   const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+										   IMG_HANDLE *phPriv)
+{
+	MMU_PAGESIZECONFIG *psPageSizeConfig;
+
+	switch (uiLog2DataPageSize)
+	{
+	case RGXMIPSFW_LOG2_PAGE_SIZE_64K:
+		psPageSizeConfig = &gsPageSizeConfig64KB;
+		break;
+	case RGXMIPSFW_LOG2_PAGE_SIZE_4K:
+		psPageSizeConfig = &gsPageSizeConfig4KB;
+		break;
+	default:
+		PVR_DPF((PVR_DBG_ERROR,
+				 "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+				 uiLog2DataPageSize));
+		*phPriv = NULL;
+		return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+	}
+
+	/* Refer caller's pointers to the data */
+	*ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig;
+	*ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig;
+	*ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig;
+
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+	/* Increment ref-count - not that we're allocating anything here
+	   (I'm using static structs), but one day we might, so we want
+	   the Get/Put code to be balanced properly */
+	psPageSizeConfig->uiRefCount ++;
+
+	/* This is purely for debug statistics */
+	psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount,
+										  psPageSizeConfig->uiRefCount);
+#endif
+
+	*phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize;
+	PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv);
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXPutPageSizeConfig
+@Description    Tells this code that the mmu module is done with the
+				configurations set in RGXGetPageSizeConfig.  This can
+				be a no-op.
+				Called after RGXGetPageSizeConfigCB.
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv)
+{
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+	MMU_PAGESIZECONFIG *psPageSizeConfig;
+	IMG_UINT32 uiLog2DataPageSize;
+
+	uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv;
+
+	switch (uiLog2DataPageSize)
+	{
+	case RGXMIPSFW_LOG2_PAGE_SIZE_64K:
+		psPageSizeConfig = &gsPageSizeConfig64KB;
+		break;
+	case RGXMIPSFW_LOG2_PAGE_SIZE_4K:
+		psPageSizeConfig = &gsPageSizeConfig4KB;
+		break;
+	default:
+		PVR_DPF((PVR_DBG_ERROR,
+				 "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+				 uiLog2DataPageSize));
+		return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+	}
+
+	/* Ref-count here is not especially useful, but it's an extra
+	   check that the API is being used correctly */
+	psPageSizeConfig->uiRefCount --;
+#else
+	PVR_UNREFERENCED_PARAMETER(hPriv);
+#endif
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32PDE);
+	PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize);
+	PVR_DPF((PVR_DBG_ERROR, "PDE not supported on MIPS"));
+	return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+	PVR_UNREFERENCED_PARAMETER(ui64PDE);
+	PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize);
+	PVR_DPF((PVR_DBG_ERROR, "PDE not supported on MIPS"));
+	return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxmipsmmuinit.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxmipsmmuinit.h
new file mode 100644
index 0000000..8d59c18
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxmipsmmuinit.h
@@ -0,0 +1,94 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific MMU initialisation for the MIPS firmware
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* NB: this file is not to be included arbitrarily.  It exists solely
+   for the linkage between rgxinit.c and rgxmmuinit.c, the former
+   being otherwise cluttered by the contents of the latter */
+
+#ifndef _SRVKM_RGXMIPSMMUINIT_H_
+#define _SRVKM_RGXMIPSMMUINIT_H_
+
+#include "device.h"
+#include "img_types.h"
+#include "mmu_common.h"
+#include "img_defs.h"
+#include "rgx_mips.h"
+
+/*
+
+		Labelling of fields within virtual address. No PD and PC are used currently for
+		the MIPS MMU
+*/
+/*
+Page Table entry #
+*/
+#define RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT        (12U)
+#define RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK       (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+
+
+/* PC entries related definitions */
+/* No PC is currently used for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_EN            (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_SHIFT         (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_CLRMSK        (0U)
+
+#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_SHIFT     (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_CLRMSK    (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_EN        (0U)
+
+/* PD entries related definitions */
+/* No PD is currently used for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_EN            (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_SHIFT         (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_CLRMSK        (0U)
+
+#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_SHIFT     (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_CLRMSK    (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_EN        (0U)
+
+
+PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode);
+PVRSRV_ERROR RGXMipsMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#endif /* #ifndef _SRVKM_RGXMIPSMMUINIT_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxmmuinit.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxmmuinit.c
new file mode 100644
index 0000000..efcbd66
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxmmuinit.c
@@ -0,0 +1,1078 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific MMU initialisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+#include "rgxmmuinit.h"
+#include "rgxmmudefs_km.h"
+
+#include "device.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "mmu_common.h"
+#include "pdump_mmu.h"
+
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "rgx_memallocflags.h"
+#include "rgx_heaps.h"
+#include "pdump_km.h"
+
+
+/* useful macros */
+/* units represented in a bitfield */
+#define UNITS_IN_BITFIELD(Mask, Shift)	((Mask >> Shift) + 1)
+
+
+/*
+ * Bits of PT, PD and PC not involving addresses
+ */
+
+#define RGX_MMUCTRL_PTE_PROTMASK	(RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN | \
+		RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN | \
+		RGX_MMUCTRL_PT_DATA_PM_SRC_EN | \
+		RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN | \
+		RGX_MMUCTRL_PT_DATA_CC_EN | \
+		RGX_MMUCTRL_PT_DATA_READ_ONLY_EN | \
+		RGX_MMUCTRL_PT_DATA_VALID_EN)
+
+#define RGX_MMUCTRL_PDE_PROTMASK	(RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN | \
+		~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK | \
+		RGX_MMUCTRL_PD_DATA_VALID_EN)
+
+#define RGX_MMUCTRL_PCE_PROTMASK	(RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN | \
+		RGX_MMUCTRL_PC_DATA_VALID_EN)
+
+
+
+static MMU_PxE_CONFIG sRGXMMUPCEConfig;
+static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig;
+
+
+/*
+ *
+ *  Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig4KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig16KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 64kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig64KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 256kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig256KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 1MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig1MB;
+
+
+/*
+ *
+ *  Configuration for heaps with 2MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig2MB;
+
+
+/* Forward declaration of protection bits derivation functions, for
+   the following structure */
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags);
+
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+		const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+		const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+		const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+		IMG_HANDLE *phPriv);
+
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv);
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize);
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize);
+
+static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes;
+
+PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	/* Setup of Px Entries:
+	 *
+	 *
+	 * PAGE TABLE (8 Byte):
+	 *
+	 * | 62              | 61...40         | 39...12 (varies) | 11...6          | 5             | 4      | 3               | 2               | 1         | 0     |
+	 * | PM/Meta protect | VP Page (39:18) | Physical Page    | VP Page (17:12) | Entry Pending | PM src | SLC Bypass Ctrl | Cache Coherency | Read Only | Valid |
+	 *
+	 *
+	 * PAGE DIRECTORY (8 Byte):
+	 *
+	 *  | 40            | 39...5  (varies)        | 4          | 3...1     | 0     |
+	 *  | Entry Pending | Page Table base address | (reserved) | Page Size | Valid |
+	 *
+	 *
+	 * PAGE CATALOGUE (4 Byte):
+	 *
+	 *  | 31...4                      | 3...2      | 1             | 0     |
+	 *  | Page Directory base address | (reserved) | Entry Pending | Valid |
+	 *
+	 */
+
+
+	/* Example how to get the PD address from a PC entry.
+	 * The procedure is the same for PD and PT entries to retrieve PT and Page addresses:
+	 *
+	 * 1) sRGXMMUPCEConfig.uiAddrMask applied to PC entry with '&':
+	 *  | 31...4   | 3...2      | 1             | 0     |
+	 *  | PD Addr  | 0          | 0             | 0     |
+	 *
+	 * 2) sRGXMMUPCEConfig.uiAddrShift applied with '>>':
+	 *  | 27...0   |
+	 *  | PD Addr  |
+	 *
+	 * 3) sRGXMMUPCEConfig.uiAddrLog2Align applied with '<<':
+	 *  | 39...0   |
+	 *  | PD Addr  |
+	 *
+	 */
+
+
+	sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName =
+			PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]);
+
+	/*
+	 * Setup sRGXMMUPCEConfig
+	 */
+	sRGXMMUPCEConfig.uiBytesPerEntry = 4;     /* 32 bit entries */
+	sRGXMMUPCEConfig.uiAddrMask = 0xfffffff0; /* Mask to get significant address bits of PC entry i.e. the address of the PD */
+
+	sRGXMMUPCEConfig.uiAddrShift = 4;         /* Shift this many bits to get PD address */
+	sRGXMMUPCEConfig.uiAddrLog2Align = 12;    /* Alignment of PD physical addresses. */
+
+	sRGXMMUPCEConfig.uiProtMask = RGX_MMUCTRL_PCE_PROTMASK; /* Mask to get the status bits (pending | valid)*/
+	sRGXMMUPCEConfig.uiProtShift = 0;                       /* Shift this many bits to get the statis bits */
+
+	sRGXMMUPCEConfig.uiValidEnMask = RGX_MMUCTRL_PC_DATA_VALID_EN;     /* Mask to get entry valid bit of the PC */
+	sRGXMMUPCEConfig.uiValidEnShift = RGX_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to get entry valid bit */
+
+	/*
+	 *  Setup sRGXMMUTopLevelDevVAddrConfig
+	 */
+	sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; /* Mask to get PC index applied to a 40 bit virt. device address */
+	sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;  /* Shift a 40 bit virt. device address by this amount to get the PC index */
+	sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask,
+			sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift));
+
+	sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; /* Mask to get PD index applied to a 40 bit virt. device address */
+	sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;  /* Shift a 40 bit virt. device address by this amount to get the PD index */
+	sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask,
+			sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift));
+
+	/*
+	 *
+	 *  Configuration for heaps with 4kB Data-Page size
+	 *
+	 */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_4KBDP
+	 */
+	sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPDEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+	sRGXMMUPDEConfig_4KBDP.uiAddrShift = 12;
+	sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = 12;
+
+	sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+	sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 1;
+
+	sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_4KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_4KBDP
+	 */
+	sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffff000);
+	sRGXMMUPTEConfig_4KBDP.uiAddrShift = 12;
+	sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = 12; /* Alignment of the physical addresses of the pages NOT PTs */
+
+	sRGXMMUPTEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+	sRGXMMUPTEConfig_4KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+	sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_4KBDP
+	 */
+	sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask,
+			sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift));
+
+	sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask,
+			sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift));
+
+	sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask,
+			sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift));
+
+	sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff);
+	sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig4KB
+	 */
+	gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP;
+	gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP;
+	gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP;
+	gsPageSizeConfig4KB.uiRefCount = 0;
+	gsPageSizeConfig4KB.uiMaxRefCount = 0;
+
+
+	/*
+	 *
+	 *  Configuration for heaps with 16kB Data-Page size
+	 *
+	 */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_16KBDP
+	 */
+	sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPDEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+	sRGXMMUPDEConfig_16KBDP.uiAddrShift = 10;
+	sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 10;
+
+	sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+	sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 1;
+
+	sRGXMMUPDEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_16KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_16KBDP
+	 */
+	sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xffffffc000);
+	sRGXMMUPTEConfig_16KBDP.uiAddrShift = 14;
+	sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 14;
+
+	sRGXMMUPTEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+	sRGXMMUPTEConfig_16KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+	sRGXMMUPTEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_16KBDP
+	 */
+	sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask,
+			sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask,
+			sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001fc000);
+	sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 14;
+	sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask,
+			sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift));
+
+	sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000003fff);
+	sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig16KB
+	 */
+	gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP;
+	gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP;
+	gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP;
+	gsPageSizeConfig16KB.uiRefCount = 0;
+	gsPageSizeConfig16KB.uiMaxRefCount = 0;
+
+
+	/*
+	 *
+	 *  Configuration for heaps with 64kB Data-Page size
+	 *
+	 */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_64KBDP
+	 */
+	sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPDEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+	sRGXMMUPDEConfig_64KBDP.uiAddrShift = 8;
+	sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 8;
+
+	sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+	sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 1;
+
+	sRGXMMUPDEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_64KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_64KBDP
+	 */
+	sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xffffff0000);
+	sRGXMMUPTEConfig_64KBDP.uiAddrShift =16;
+	sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = 16;
+
+	sRGXMMUPTEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+	sRGXMMUPTEConfig_64KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+	sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_64KBDP
+	 */
+	sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask,
+			sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask,
+			sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001f0000);
+	sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = 16;
+	sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask,
+			sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff);
+	sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig64KB
+	 */
+	gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP;
+	gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP;
+	gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP;
+	gsPageSizeConfig64KB.uiRefCount = 0;
+	gsPageSizeConfig64KB.uiMaxRefCount = 0;
+
+
+	/*
+	 *
+	 *  Configuration for heaps with 256kB Data-Page size
+	 *
+	 */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_256KBDP
+	 */
+	sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPDEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+	sRGXMMUPDEConfig_256KBDP.uiAddrShift = 6;
+	sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 6;
+
+	sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+	sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 1;
+
+	sRGXMMUPDEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_256KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP
+	 */
+	sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffc0000);
+	sRGXMMUPTEConfig_256KBDP.uiAddrShift = 18;
+	sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 18;
+
+	sRGXMMUPTEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+	sRGXMMUPTEConfig_256KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+	sRGXMMUPTEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_256KBDP
+	 */
+	sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask,
+			sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask,
+			sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001c0000);
+	sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 18;
+	sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask,
+			sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000003ffff);
+	sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig256KB
+	 */
+	gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP;
+	gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP;
+	gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP;
+	gsPageSizeConfig256KB.uiRefCount = 0;
+	gsPageSizeConfig256KB.uiMaxRefCount = 0;
+
+	/*
+	 * Setup sRGXMMUPDEConfig_1MBDP
+	 */
+	sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPDEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+	/*
+	 * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even
+	 * if they contain fewer entries.
+	 */
+	sRGXMMUPDEConfig_1MBDP.uiAddrShift = 6;
+	sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 6;
+
+	sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+	sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 1;
+
+	sRGXMMUPDEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_1MBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_1MBDP
+	 */
+	sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffff00000);
+	sRGXMMUPTEConfig_1MBDP.uiAddrShift = 20;
+	sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 20;
+
+	sRGXMMUPTEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+	sRGXMMUPTEConfig_1MBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+	sRGXMMUPTEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_1MBDP
+	 */
+	sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask,
+			sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask,
+			sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000100000);
+	sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 20;
+	sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask,
+			sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00000fffff);
+	sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig1MB
+	 */
+	gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP;
+	gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP;
+	gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP;
+	gsPageSizeConfig1MB.uiRefCount = 0;
+	gsPageSizeConfig1MB.uiMaxRefCount = 0;
+
+	/*
+	 * Setup sRGXMMUPDEConfig_2MBDP
+	 */
+	sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPDEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+	/*
+	 * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even
+	 * if they contain fewer entries.
+	 */
+	sRGXMMUPDEConfig_2MBDP.uiAddrShift = 6;
+	sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 6;
+
+	sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+	sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 1;
+
+	sRGXMMUPDEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_2MBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_2MBDP
+	 */
+	sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xffffe00000);
+	sRGXMMUPTEConfig_2MBDP.uiAddrShift = 21;
+	sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 21;
+
+	sRGXMMUPTEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+	sRGXMMUPTEConfig_2MBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+	sRGXMMUPTEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_2MBDP
+	 */
+	sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask,
+			sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask,
+			sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000000000);
+	sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 21;
+	sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask,
+			sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00001fffff);
+	sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig2MB
+	 */
+	gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP;
+	gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP;
+	gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP;
+	gsPageSizeConfig2MB.uiRefCount = 0;
+	gsPageSizeConfig2MB.uiMaxRefCount = 0;
+
+	/*
+	 * Setup sRGXMMUDeviceAttributes
+	 */
+	sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_VARPAGE_40BIT;
+	sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_3;
+	sRGXMMUDeviceAttributes.ui32BaseAlign = RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT;
+	sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPCEConfig;
+	sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig;
+
+	/* Functions for deriving page table/dir/cat protection bits */
+	sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8;
+	sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4;
+	sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8;
+	sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4;
+	sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8;
+	sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4;
+
+	/* Functions for establishing configurations for PDE/PTE/DEVVADDR
+	   on per-heap basis */
+	sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB;
+	sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB;
+
+	sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4;
+	sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8;
+
+	psDeviceNode->psMMUDevAttrs = &sRGXMMUDeviceAttributes;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError;
+
+	eError = PVRSRV_OK;
+
+#if defined(PDUMP)
+	psDeviceNode->pfnMMUGetContextID = NULL;
+#endif
+
+	psDeviceNode->psMMUDevAttrs = NULL;
+
+#if defined(DEBUG)
+	PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:"));
+	PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d",
+			gsPageSizeConfig4KB.uiMaxRefCount));
+	PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d",
+			gsPageSizeConfig4KB.uiRefCount));
+	PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d",
+			gsPageSizeConfig16KB.uiMaxRefCount));
+	PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d",
+			gsPageSizeConfig16KB.uiRefCount));
+	PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d",
+			gsPageSizeConfig64KB.uiMaxRefCount));
+	PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d",
+			gsPageSizeConfig64KB.uiRefCount));
+	PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d",
+			gsPageSizeConfig256KB.uiMaxRefCount));
+	PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d",
+			gsPageSizeConfig256KB.uiRefCount));
+	PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d",
+			gsPageSizeConfig1MB.uiMaxRefCount));
+	PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d",
+			gsPageSizeConfig1MB.uiRefCount));
+	PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d",
+			gsPageSizeConfig2MB.uiMaxRefCount));
+	PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d",
+			gsPageSizeConfig2MB.uiRefCount));
+#endif
+	if (gsPageSizeConfig4KB.uiRefCount > 0 ||
+			gsPageSizeConfig16KB.uiRefCount > 0 ||
+			gsPageSizeConfig64KB.uiRefCount > 0 ||
+			gsPageSizeConfig256KB.uiRefCount > 0 ||
+			gsPageSizeConfig1MB.uiRefCount > 0 ||
+			gsPageSizeConfig2MB.uiRefCount > 0
+	)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)"));
+	}
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePCEProt4
+@Description    calculate the PCE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags)
+{
+	return (uiProtFlags & MMU_PROTFLAGS_INVALID)?0:RGX_MMUCTRL_PC_DATA_VALID_EN;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePCEProt8
+@Description    calculate the PCE protection flags based on an 8 byte entry
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+	PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+	PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+	PVR_DPF((PVR_DBG_ERROR, "8-byte PCE not supported on this device"));
+	return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePDEProt4
+@Description    derive the PDE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+	PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device"));
+	return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePDEProt8
+@Description    derive the PDE protection flags based on an 8 byte entry
+
+@Input          uiLog2DataPageSize The log2 of the required page size.
+                E.g, for 4KiB pages, this parameter must be 12.
+                For 2MiB pages, it must be set to 21.
+
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+	IMG_UINT64 ret_value = 0; // 0 means invalid
+
+	if (! (uiProtFlags & MMU_PROTFLAGS_INVALID)) // if not invalid
+	{
+		switch (uiLog2DataPageSize)
+		{
+		case RGX_HEAP_4KB_PAGE_SHIFT:
+			ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB;
+			break;
+		case RGX_HEAP_16KB_PAGE_SHIFT:
+			ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB;
+			break;
+		case RGX_HEAP_64KB_PAGE_SHIFT:
+			ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB;
+			break;
+		case RGX_HEAP_256KB_PAGE_SHIFT:
+			ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB;
+			break;
+		case RGX_HEAP_1MB_PAGE_SHIFT:
+			ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB;
+			break;
+		case RGX_HEAP_2MB_PAGE_SHIFT:
+			ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB;
+			break;
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s:%d: in function<%s>: Invalid parameter log2_page_size. Expected {12, 14, 16, 18, 20, 21}. Got [%u]",
+					__FILE__, __LINE__, __func__, uiLog2DataPageSize));
+		}
+	}
+	return ret_value;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePTEProt4
+@Description    calculate the PTE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+	PVR_DPF((PVR_DBG_ERROR, "4-byte PTE not supported on this device"));
+
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePTEProt8
+@Description    calculate the PTE protection flags based on an 8 byte entry
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+	IMG_UINT64 ui64MMUFlags=0;
+
+	PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+	if (((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE))
+	{
+		/* read/write */
+	}
+	else if (MMU_PROTFLAGS_READABLE & uiProtFlags)
+	{
+		/* read only */
+		ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_READ_ONLY_EN;
+	}
+	else if (MMU_PROTFLAGS_WRITEABLE & uiProtFlags)
+	{
+		/* write only */
+		PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: write-only is not possible on this device"));
+	}
+	else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: neither read nor write specified..."));
+	}
+
+	/* cache coherency */
+	if (MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags)
+	{
+		ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_CC_EN;
+	}
+
+	/* cache setup */
+	if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0)
+	{
+		ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN;
+	}
+
+	if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0)
+	{
+		ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_VALID_EN;
+	}
+
+	if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags)
+	{
+		ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN;
+	}
+
+	return ui64MMUFlags;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXGetPageSizeConfig
+@Description    Set up configuration for variable sized data pages.
+                RGXPutPageSizeConfigCB has to be called to ensure correct
+                refcounting.
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+		const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+		const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+		const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+		IMG_HANDLE *phPriv)
+{
+	MMU_PAGESIZECONFIG *psPageSizeConfig;
+
+	switch (uiLog2DataPageSize)
+	{
+	case RGX_HEAP_4KB_PAGE_SHIFT:
+		psPageSizeConfig = &gsPageSizeConfig4KB;
+		break;
+	case RGX_HEAP_16KB_PAGE_SHIFT:
+		psPageSizeConfig = &gsPageSizeConfig16KB;
+		break;
+	case RGX_HEAP_64KB_PAGE_SHIFT:
+		psPageSizeConfig = &gsPageSizeConfig64KB;
+		break;
+	case RGX_HEAP_256KB_PAGE_SHIFT:
+		psPageSizeConfig = &gsPageSizeConfig256KB;
+		break;
+	case RGX_HEAP_1MB_PAGE_SHIFT:
+		psPageSizeConfig = &gsPageSizeConfig1MB;
+		break;
+	case RGX_HEAP_2MB_PAGE_SHIFT:
+		psPageSizeConfig = &gsPageSizeConfig2MB;
+		break;
+	default:
+		PVR_DPF((PVR_DBG_ERROR,
+				"RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+				uiLog2DataPageSize));
+		*phPriv = NULL;
+		return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+	}
+
+	/* Refer caller's pointers to the data */
+	*ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig;
+	*ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig;
+	*ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig;
+
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+	/* Increment ref-count - not that we're allocating anything here
+       (I'm using static structs), but one day we might, so we want
+       the Get/Put code to be balanced properly */
+	psPageSizeConfig->uiRefCount ++;
+
+	/* This is purely for debug statistics */
+	psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount,
+			psPageSizeConfig->uiRefCount);
+#endif
+
+	*phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize;
+	PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv);
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXPutPageSizeConfig
+@Description    Tells this code that the mmu module is done with the
+                configurations set in RGXGetPageSizeConfig.  This can
+                be a no-op.
+                Called after RGXGetPageSizeConfigCB.
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv)
+{
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+	MMU_PAGESIZECONFIG *psPageSizeConfig;
+	IMG_UINT32 uiLog2DataPageSize;
+
+	uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv;
+
+	switch (uiLog2DataPageSize)
+	{
+	case RGX_HEAP_4KB_PAGE_SHIFT:
+		psPageSizeConfig = &gsPageSizeConfig4KB;
+		break;
+	case RGX_HEAP_16KB_PAGE_SHIFT:
+		psPageSizeConfig = &gsPageSizeConfig16KB;
+		break;
+	case RGX_HEAP_64KB_PAGE_SHIFT:
+		psPageSizeConfig = &gsPageSizeConfig64KB;
+		break;
+	case RGX_HEAP_256KB_PAGE_SHIFT:
+		psPageSizeConfig = &gsPageSizeConfig256KB;
+		break;
+	case RGX_HEAP_1MB_PAGE_SHIFT:
+		psPageSizeConfig = &gsPageSizeConfig1MB;
+		break;
+	case RGX_HEAP_2MB_PAGE_SHIFT:
+		psPageSizeConfig = &gsPageSizeConfig2MB;
+		break;
+	default:
+		PVR_DPF((PVR_DBG_ERROR,
+				"RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+				uiLog2DataPageSize));
+		return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+	}
+
+	/* Ref-count here is not especially useful, but it's an extra
+       check that the API is being used correctly */
+	psPageSizeConfig->uiRefCount --;
+#else
+	PVR_UNREFERENCED_PARAMETER(hPriv);
+#endif
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32PDE);
+	PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize);
+	PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device"));
+	return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+	switch (ui64PDE & (~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK))
+	{
+	case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB:
+		*pui32Log2PageSize = RGX_HEAP_4KB_PAGE_SHIFT;
+		break;
+	case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB:
+		*pui32Log2PageSize = RGX_HEAP_16KB_PAGE_SHIFT;
+		break;
+	case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB:
+		*pui32Log2PageSize = RGX_HEAP_64KB_PAGE_SHIFT;
+		break;
+	case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB:
+		*pui32Log2PageSize = RGX_HEAP_256KB_PAGE_SHIFT;
+		break;
+	case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB:
+		*pui32Log2PageSize = RGX_HEAP_1MB_PAGE_SHIFT;
+		break;
+	case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB:
+		*pui32Log2PageSize = RGX_HEAP_2MB_PAGE_SHIFT;
+		break;
+	default:
+		return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+	}
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxmmuinit.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxmmuinit.h
new file mode 100644
index 0000000..48fd722
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxmmuinit.h
@@ -0,0 +1,60 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific MMU initialisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* NB: this file is not to be included arbitrarily.  It exists solely
+   for the linkage between rgxinit.c and rgxmmuinit.c, the former
+   being otherwise cluttered by the contents of the latter */
+
+#ifndef _SRVKM_RGXMMUINIT_H_
+#define _SRVKM_RGXMMUINIT_H_
+
+#include "device.h"
+#include "img_types.h"
+#include "mmu_common.h"
+#include "img_defs.h"
+
+PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode);
+PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#endif /* #ifndef _SRVKM_RGXMMUINIT_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxpower.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxpower.c
new file mode 100644
index 0000000..4b7ff31
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxpower.c
@@ -0,0 +1,1168 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific power routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#if defined(LINUX)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "rgxpower.h"
+#include "rgxinit.h"
+#include "rgx_fwif_km.h"
+#include "rgxfwutils.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "rgxdebug.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "rgxtimecorr.h"
+#include "devicemem_utils.h"
+#include "htbserver.h"
+#include "rgxstartstop.h"
+#include "rgxfwimageutils.h"
+#include "sync.h"
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+#if defined(PVR_DVFS)
+#include "pvr_dvfs_device.h"
+#endif
+
+static PVRSRV_ERROR RGXFWNotifyHostTimeout(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGXFWIF_KCCB_CMD sCmd;
+	PVRSRV_ERROR eError;
+
+	/* Send the Timeout notification to the FW */
+	sCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+	sCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ;
+	sCmd.uCmdData.sPowData.uPoweReqData.ePowRequestType = RGXFWIF_POWER_HOST_TIMEOUT;
+
+	eError = RGXSendCommand(psDevInfo,
+	                        RGXFWIF_DM_GP,
+	                        &sCmd,
+	                        PDUMP_FLAGS_NONE);
+
+	return eError;
+}
+
+static void _RGXUpdateGPUUtilStats(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb;
+	IMG_UINT64 *paui64StatsCounters;
+	IMG_UINT64 ui64LastPeriod;
+	IMG_UINT64 ui64LastState;
+	IMG_UINT64 ui64LastTime;
+	IMG_UINT64 ui64TimeNow;
+
+	psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+	paui64StatsCounters = &psUtilFWCb->aui64StatsCounters[0];
+
+	OSLockAcquire(psDevInfo->hGPUUtilLock);
+
+	ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64());
+
+	/* Update counters to account for the time since the last update */
+	ui64LastState  = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord);
+	ui64LastTime   = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->ui64LastWord);
+	ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
+	paui64StatsCounters[ui64LastState] += ui64LastPeriod;
+
+	/* Update state and time of the latest update */
+	psUtilFWCb->ui64LastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState);
+
+	OSLockRelease(psDevInfo->hGPUUtilLock);
+}
+
+static INLINE PVRSRV_ERROR RGXDoStop(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+	PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	if (psDevConfig->pfnTDRGXStop == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXPrePowerState: TDRGXStop not implemented!"));
+		return PVRSRV_ERROR_NOT_IMPLEMENTED;
+	}
+
+	eError = psDevConfig->pfnTDRGXStop(psDevConfig->hSysData);
+#else
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	eError = RGXStop(&psDevInfo->sLayerParams);
+#endif
+
+	return eError;
+}
+
+/*
+	RGXPrePowerState
+ */
+PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE				hDevHandle,
+                              PVRSRV_DEV_POWER_STATE	eNewPowerState,
+                              PVRSRV_DEV_POWER_STATE	eCurrentPowerState,
+                              IMG_BOOL					bForced)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+	if ((eNewPowerState != eCurrentPowerState) &&
+	    (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON))
+	{
+		PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+		PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+		RGXFWIF_KCCB_CMD	sPowCmd;
+		RGXFWIF_TRACEBUF	*psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+
+		/* Send the Power off request to the FW */
+		sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+		sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_OFF_REQ;
+		sPowCmd.uCmdData.sPowData.uPoweReqData.bForced = bForced;
+
+		eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim",
+					__func__));
+			return eError;
+		}
+
+		eError = RGXSendCommand(psDevInfo,
+		                        RGXFWIF_DM_GP,
+		                        &sPowCmd,
+		                        PDUMP_FLAGS_NONE);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send Power off request",
+					__func__));
+			return eError;
+		}
+
+		/* Wait for the firmware to complete processing. It cannot use PVRSRVWaitForValueKM as it relies
+		   on the EventObject which is signalled in this MISR */
+		eError = RGXPollForGPCommandCompletion(psDeviceNode,
+								  psDevInfo->psPowSyncPrim->pui32LinAddr,
+								  0x1, 0xFFFFFFFF);
+
+		/* Check the Power state after the answer */
+		if (eError == PVRSRV_OK)
+		{
+			/* Finally, de-initialise some registers. */
+			if (psFWTraceBuf->ePowState == RGXFWIF_POW_OFF)
+			{
+#if !defined(NO_HARDWARE)
+#if defined(RGX_FW_IRQ_OS_COUNTERS)
+				IMG_UINT32 ui32idx = RGXFW_HYPERVISOR_OS;
+#else
+				IMG_UINT32 ui32idx;
+				for_each_irq_cnt(ui32idx)
+#endif /* RGX_FW_IRQ_OS_COUNTERS */
+				{
+					IMG_UINT32 ui32IrqCnt;
+
+					get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo);
+
+					/* Wait for the pending META/MIPS to host interrupts to come back. */
+					eError = PVRSRVPollForValueKM((IMG_UINT32 __iomem *)&psDevInfo->aui32SampleIRQCount[ui32idx],
+					                              ui32IrqCnt,
+					                              0xffffffff);
+
+					if (eError != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR,
+								"%s: Wait for pending interrupts failed." MSG_IRQ_CNT_TYPE " %u Host: %u, FW: %u",
+								__func__,
+								ui32idx,
+								psDevInfo->aui32SampleIRQCount[ui32idx],
+								ui32IrqCnt));
+
+						RGX_WaitForInterruptsTimeout(psDevInfo);
+					}
+				}
+#endif /* NO_HARDWARE */
+
+				/* Update GPU frequency and timer correlation related data */
+				RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_POWER);
+
+				/* Update GPU state counters */
+				_RGXUpdateGPUUtilStats(psDevInfo);
+
+#if defined(PVR_DVFS)
+				eError = SuspendDVFS();
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR,"%s: Failed to suspend DVFS", __func__));
+					return eError;
+				}
+#endif
+
+				psDevInfo->bRGXPowered = IMG_FALSE;
+
+				eError = RGXDoStop(psDeviceNode);
+				if (eError != PVRSRV_OK)
+				{
+					/* Power down failures are treated as successful since the power was removed but logged. */
+					PVR_DPF((PVR_DBG_WARNING, "%s: RGXDoStop failed (%s)",
+							__func__, PVRSRVGetErrorString(eError)));
+					psDevInfo->ui32ActivePMReqNonIdle++;
+					eError = PVRSRV_OK;
+				}
+			}
+			else
+			{
+				/* the sync was updated but the pow state isn't off -> the FW denied the transition */
+				eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED;
+
+				if (bForced)
+				{	/* It is an error for a forced request to be denied */
+					PVR_DPF((PVR_DBG_ERROR,
+							 "%s: Failure to power off during a forced power off. FW: %d",
+							 __func__, psFWTraceBuf->ePowState));
+				}
+			}
+		}
+		else if (eError == PVRSRV_ERROR_TIMEOUT)
+		{
+			/* timeout waiting for the FW to ack the request: return timeout */
+			PVR_DPF((PVR_DBG_WARNING,
+					 "%s: Timeout waiting for powoff ack from the FW",
+					 __func__));
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Error waiting for powoff ack from the FW (%s)",
+					 __func__, PVRSRVGetErrorString(eError)));
+			eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE;
+		}
+	}
+
+	return eError;
+}
+
+#if defined(TRACK_FW_BOOT)
+static INLINE void RGXGetFWBootStage(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                     FW_BOOT_STAGE *peStage,
+                                     FW_BOOT_STAGE *peStageMax)
+{
+	*peStageMax = FW_BOOT_INIT_DONE;
+
+	if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+	{
+		/* Boot stage temporarily stored to the register below */
+		*peStage = OSReadHWReg32(psDevInfo->pvRegsBaseKM,
+		                         RGX_CR_POWER_ESTIMATE_RESULT);
+	}
+	else
+	{
+		IMG_UINT32 *pui32BootStage;
+
+		if (PVRSRV_OK != DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc,
+		                                          (void**)&pui32BootStage))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Could not acquire pointer to FW boot stage", __func__));
+			*peStage = FW_BOOT_STAGE_NOT_AVAILABLE;
+			return;
+		}
+
+		pui32BootStage += RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA));
+		pui32BootStage += RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXMIPSFW_BOOT_STAGE_OFFSET);
+
+		*peStage = *(FW_BOOT_STAGE*)pui32BootStage;
+
+		DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+	}
+}
+#endif
+
+static INLINE PVRSRV_ERROR RGXDoStart(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+	PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+
+	if (psDevConfig->pfnTDRGXStart == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: TDRGXStart not implemented!"));
+		return PVRSRV_ERROR_NOT_IMPLEMENTED;
+	}
+
+	eError = psDevConfig->pfnTDRGXStart(psDevConfig->hSysData);
+#else
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+	eError = RGXStart(&psDevInfo->sLayerParams);
+#endif
+
+	return eError;
+}
+
+
+#if defined(NO_HARDWARE) && defined(PDUMP)
+
+#if 0
+#include "rgxtbdefs.h"
+#else
+
+/*
+    Register RGX_TB_SYSTEM_STATUS
+*/
+#define RGX_TB_SYSTEM_STATUS                              (0x00E0U)
+#define RGX_TB_SYSTEM_STATUS_MASKFULL                     (IMG_UINT64_C(0x00000000030100FF))
+/*
+directly indicates the status of power_abort flag from the power management controller (RGX_PRCM)
+*/
+#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_ABORT_SHIFT (25U)
+#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_ABORT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_ABORT_EN    (IMG_UINT64_C(0X0000000002000000))
+/*
+directly indicates the status of power_complete flag from the power management controller (RGX_PRCM)
+*/
+#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_COMPLETE_SHIFT (24U)
+#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_COMPLETE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_COMPLETE_EN (IMG_UINT64_C(0X0000000001000000))
+/*
+directly indicates the status of GPU's hmmu_irq
+*/
+#define RGX_TB_SYSTEM_STATUS_HMMU_IRQ_SHIFT               (16U)
+#define RGX_TB_SYSTEM_STATUS_HMMU_IRQ_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define RGX_TB_SYSTEM_STATUS_HMMU_IRQ_EN                  (IMG_UINT64_C(0X0000000000010000))
+/*
+directly indicates the status of GPU's irq per OS_ID
+*/
+#define RGX_TB_SYSTEM_STATUS_IRQ_SHIFT                    (1U)
+#define RGX_TB_SYSTEM_STATUS_IRQ_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFFFFE01))
+/*
+old deprecated single irq
+*/
+#define RGX_TB_SYSTEM_STATUS_OLD_IRQ_SHIFT                    (0U)
+#define RGX_TB_SYSTEM_STATUS_OLD_IRQ_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#endif
+
+#define RGX_FEATURE_NUM_OSIDS 8
+
+static PVRSRV_ERROR
+_ValidateIrqs(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	IMG_UINT32 ui32OSid;
+	IMG_UINT32 ui32ConfigFlagsExt;
+	PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+
+	{
+		PVRSRV_ERROR eError;
+		RGXFWIF_OS_CONFIG *psOSConfig;
+
+		eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfOSConfigDesc,
+							  (void **)&psOSConfig);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to acquire OS Config (%u)",
+					__func__,
+					eError));
+			return eError;
+		}
+
+		ui32ConfigFlagsExt = psOSConfig->ui32ConfigFlagsExt;
+
+		DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfOSConfigDesc);
+	}
+
+	/* Check if the Validation IRQ flag is set */
+	if ((ui32ConfigFlagsExt & RGXFWIF_INICFG_EXT_VALIDATE_IRQ) == 0)
+	{
+		return PVRSRV_OK;
+	}
+
+	PDUMPIF("IMG_PVR_TESTBENCH", ui32PDumpFlags);
+	PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Poll for TB irq status to be set (irqs signalled)...");
+	PDUMPREGPOL(RGX_TB_PDUMPREG_NAME,
+	            RGX_TB_SYSTEM_STATUS,
+				~RGX_TB_SYSTEM_STATUS_IRQ_CLRMSK,
+				~RGX_TB_SYSTEM_STATUS_IRQ_CLRMSK,
+				ui32PDumpFlags,
+				PDUMP_POLL_OPERATOR_EQUAL);
+
+	PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "... and then clear them");
+	for (ui32OSid = 0; ui32OSid < RGX_FEATURE_NUM_OSIDS; ui32OSid++)
+	{
+		PDUMPREG32(RGX_PDUMPREG_NAME,
+		           RGX_CR_IRQ_OS0_EVENT_CLEAR + ui32OSid * 0x10000,
+		           RGX_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL,
+		           ui32PDumpFlags);
+	}
+
+	PDUMPFI("IMG_PVR_TESTBENCH", ui32PDumpFlags);
+
+	/* Poll on all the interrupt status registers for all OSes */
+	PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Validate Interrupt lines.");
+
+	for (ui32OSid = 0; ui32OSid < RGX_FEATURE_NUM_OSIDS; ui32OSid++)
+	{
+		PDUMPREGPOL(RGX_PDUMPREG_NAME,
+		            RGX_CR_IRQ_OS0_EVENT_STATUS + ui32OSid * 0x10000,
+		            0x0,
+		            0xFFFFFFFF,
+		            ui32PDumpFlags,
+		            PDUMP_POLL_OPERATOR_EQUAL);
+	}
+
+	return PVRSRV_OK;
+}
+#endif
+
+/*
+	RGXPostPowerState
+ */
+PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE				hDevHandle,
+                               PVRSRV_DEV_POWER_STATE	eNewPowerState,
+                               PVRSRV_DEV_POWER_STATE	eCurrentPowerState,
+                               IMG_BOOL					bForced)
+{
+	if ((eNewPowerState != eCurrentPowerState) &&
+	    (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON))
+	{
+		PVRSRV_ERROR		 eError;
+		PVRSRV_DEVICE_NODE	 *psDeviceNode = hDevHandle;
+		PVRSRV_RGXDEV_INFO	 *psDevInfo = psDeviceNode->pvDevice;
+		RGXFWIF_INIT *psRGXFWInit;
+
+		if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+		{
+			psDevInfo->bRGXPowered = IMG_TRUE;
+			return PVRSRV_OK;
+		}
+
+		if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+		{
+			/* Update timer correlation related data */
+			RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_POWER);
+
+			/* Update GPU state counters */
+			_RGXUpdateGPUUtilStats(psDevInfo);
+
+			eError = RGXDoStart(psDeviceNode);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXPostPowerState: RGXDoStart failed"));
+				return eError;
+			}
+
+			OSMemoryBarrier();
+
+			eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+			                                  (void **)&psRGXFWInit);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"RGXPostPowerState: Failed to acquire kernel fw if ctl (%u)",
+						eError));
+				return eError;
+			}
+
+			/*
+			 * Check whether the FW has started by polling on bFirmwareStarted flag
+			 */
+			if (PVRSRVPollForValueKM((IMG_UINT32 __iomem *)&psRGXFWInit->bFirmwareStarted,
+			                         IMG_TRUE,
+			                         0xFFFFFFFF) != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Polling for 'FW started' flag failed."));
+				eError = PVRSRV_ERROR_TIMEOUT;
+
+#if defined(TRACK_FW_BOOT)
+				{
+					FW_BOOT_STAGE eStage, eStageMax;
+
+					RGXGetFWBootStage(psDevInfo, &eStage, &eStageMax);
+
+					PVR_LOG(("%s: FW reached boot stage %i/%i.",
+					         __func__, eStage, eStageMax));
+				}
+#endif
+
+				/*
+				 * When bFirmwareStarted fails some info may be gained by doing the following
+				 * debug dump but unfortunately it could be potentially dangerous if the reason
+				 * for not booting is the GPU power is not ON. However, if we have reached this
+				 * point the System Layer has returned without errors, we assume the GPU power
+				 * is indeed ON.
+				 */
+				RGXDumpRGXDebugSummary(NULL, NULL, psDeviceNode->pvDevice, IMG_TRUE);
+				RGXDumpRGXRegisters(NULL, NULL, psDeviceNode->pvDevice);
+
+				DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+				return eError;
+			}
+
+#if defined(PDUMP)
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Wait for the Firmware to start.");
+			eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+			                                offsetof(RGXFWIF_INIT, bFirmwareStarted),
+			                                IMG_TRUE,
+			                                0xFFFFFFFFU,
+			                                PDUMP_POLL_OPERATOR_EQUAL,
+			                                PDUMP_FLAGS_CONTINUOUS);
+
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"RGXPostPowerState: problem pdumping POL for psRGXFWIfInitMemDesc (%d)",
+						eError));
+				DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+				return eError;
+			}
+
+#if defined(NO_HARDWARE) && defined(PDUMP)
+			eError = _ValidateIrqs(psDevInfo);
+			if (eError != PVRSRV_OK)
+			{
+				return eError;
+			}
+#endif
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+			SetFirmwareStartTime(psRGXFWInit->ui32FirmwareStartedTimeStamp);
+#endif
+
+			HTBSyncPartitionMarker(psRGXFWInit->ui32MarkerVal);
+
+			DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+
+			psDevInfo->bRGXPowered = IMG_TRUE;
+
+#if defined(PVR_DVFS)
+			eError = ResumeDVFS();
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXPostPowerState: Failed to resume DVFS"));
+				return eError;
+			}
+#endif
+		}
+	}
+
+	PDUMPCOMMENT("RGXPostPowerState: Current state: %d, New state: %d", eCurrentPowerState, eNewPowerState);
+
+	return PVRSRV_OK;
+}
+
+/*
+	RGXPreClockSpeedChange
+ */
+PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE				hDevHandle,
+                                    PVRSRV_DEV_POWER_STATE	eCurrentPowerState)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	RGX_DATA			*psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+	RGXFWIF_TRACEBUF	*psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+	PVR_UNREFERENCED_PARAMETER(psRGXData);
+
+	PVR_DPF((PVR_DBG_MESSAGE,"RGXPreClockSpeedChange: RGX clock speed was %uHz",
+			psRGXData->psRGXTimingInfo->ui32CoreClockSpeed));
+
+	if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) &&
+	    (psFWTraceBuf->ePowState != RGXFWIF_POW_OFF))
+	{
+		/* Update GPU frequency and timer correlation related data */
+		RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_DVFS);
+	}
+
+	return eError;
+}
+
+/*
+	RGXPostClockSpeedChange
+ */
+PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE				hDevHandle,
+                                     PVRSRV_DEV_POWER_STATE	eCurrentPowerState)
+{
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	RGX_DATA			*psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	RGXFWIF_TRACEBUF	*psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+	IMG_UINT32 		ui32NewClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	/* Update runtime configuration with the new value */
+	psDevInfo->psRGXFWIfRuntimeCfg->ui32CoreClockSpeed = ui32NewClockSpeed;
+
+	if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) &&
+	    (psFWTraceBuf->ePowState != RGXFWIF_POW_OFF))
+	{
+		RGXFWIF_KCCB_CMD sCOREClkSpeedChangeCmd;
+
+		RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_DVFS);
+
+		sCOREClkSpeedChangeCmd.eCmdType = RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE;
+		sCOREClkSpeedChangeCmd.uCmdData.sCoreClkSpeedChangeData.ui32NewClockSpeed = ui32NewClockSpeed;
+
+		/* Ensure the new clock speed is written to memory before requesting the FW to read it */
+		OSMemoryBarrier();
+
+		PDUMPCOMMENT("Scheduling CORE clock speed change command");
+
+		PDUMPPOWCMDSTART();
+		eError = RGXSendCommand(psDeviceNode->pvDevice,
+		                        RGXFWIF_DM_GP,
+		                        &sCOREClkSpeedChangeCmd,
+		                        PDUMP_FLAGS_NONE);
+		PDUMPPOWCMDEND();
+
+		if (eError != PVRSRV_OK)
+		{
+			PDUMPCOMMENT("Scheduling CORE clock speed change command failed");
+			PVR_DPF((PVR_DBG_ERROR, "RGXPostClockSpeedChange: Scheduling KCCB command failed. Error:%u", eError));
+			return eError;
+		}
+
+		PVR_DPF((PVR_DBG_MESSAGE,"RGXPostClockSpeedChange: RGX clock speed changed to %uHz",
+				psRGXData->psRGXTimingInfo->ui32CoreClockSpeed));
+	}
+
+	return eError;
+}
+
+/*!
+ ******************************************************************************
+
+ @Function	RGXDustCountChange
+
+ @Description
+
+	Does change of number of DUSTs
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   ui32NumberOfDusts : Number of DUSTs to make transition to
+
+ @Return   PVRSRV_ERROR :
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE		hDevHandle,
+                                IMG_UINT32		ui32NumberOfDusts)
+{
+
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR		eError;
+	RGXFWIF_KCCB_CMD 	sDustCountChange;
+	IMG_UINT32			ui32MaxAvailableDusts = psDevInfo->sDevFeatureCfg.ui32MAXDustCount;
+	RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	if (ui32NumberOfDusts > ui32MaxAvailableDusts)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Invalid number of DUSTs (%u) while expecting value within <0,%u>. Error:%u",
+				__func__,
+				ui32NumberOfDusts,
+				ui32MaxAvailableDusts,
+				eError));
+		return eError;
+	}
+
+#if defined(FIX_HW_BRN_59042)
+	if (ui32NumberOfDusts < ui32MaxAvailableDusts && (ui32NumberOfDusts & 0x1))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Invalid number of DUSTs (%u) due to HW restriction. Allowed values are :-",
+				__func__,
+				ui32NumberOfDusts));
+		switch (ui32MaxAvailableDusts)
+		{
+			case 2:	PVR_DPF((PVR_DBG_ERROR, "0, 2")); break;
+			case 3:	PVR_DPF((PVR_DBG_ERROR, "0, 2, 3")); break;
+			case 4:	PVR_DPF((PVR_DBG_ERROR, "0, 2, 4")); break;
+			case 5:	PVR_DPF((PVR_DBG_ERROR, "0, 2, 4, 5")); break;
+			case 6:	PVR_DPF((PVR_DBG_ERROR, "0, 2, 4, 6")); break;
+			default: break;
+		}
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+#endif
+
+	psRuntimeCfg->ui32DefaultDustsNumInit = ui32NumberOfDusts;
+
+#if !defined(NO_HARDWARE)
+	{
+		RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+
+		if (psFWTraceBuf->ePowState == RGXFWIF_POW_OFF)
+		{
+			return PVRSRV_OK;
+		}
+
+		if (psFWTraceBuf->ePowState != RGXFWIF_POW_FORCED_IDLE)
+		{
+			eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED;
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Attempt to change dust count when not IDLE",
+					 __func__));
+			return eError;
+		}
+	}
+#endif
+
+	eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim",
+				__func__));
+		return eError;
+	}
+
+	sDustCountChange.eCmdType = RGXFWIF_KCCB_CMD_POW;
+	sDustCountChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_NUMDUST_CHANGE;
+	sDustCountChange.uCmdData.sPowData.uPoweReqData.ui32NumOfDusts = ui32NumberOfDusts;
+
+	PDUMPCOMMENT("Scheduling command to change Dust Count to %u", ui32NumberOfDusts);
+	eError = RGXSendCommand(psDeviceNode->pvDevice,
+	                        RGXFWIF_DM_GP,
+	                        &sDustCountChange,
+	                        PDUMP_FLAGS_NONE);
+
+	if (eError != PVRSRV_OK)
+	{
+		PDUMPCOMMENT("Scheduling command to change Dust Count failed. Error:%u", eError);
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Scheduling KCCB to change Dust Count failed. Error:%u",
+				 __func__, eError));
+		return eError;
+	}
+
+	/* Wait for the firmware to answer. */
+	eError = RGXPollForGPCommandCompletion(psDeviceNode,
+	                              psDevInfo->psPowSyncPrim->pui32LinAddr,
+								  0x1, 0xFFFFFFFF);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Timeout waiting for idle request", __func__));
+		return eError;
+	}
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("RGXDustCountChange: Poll for Kernel SyncPrim [0x%p] on DM %d", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+	SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+	                 1,
+	                 0xffffffff,
+	                 PDUMP_POLL_OPERATOR_EQUAL,
+	                 0);
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*
+ @Function	RGXAPMLatencyChange
+ */
+PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE		hDevHandle,
+                                 IMG_UINT32		ui32ActivePMLatencyms,
+                                 IMG_BOOL		bActivePMLatencyPersistant)
+{
+
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR		eError;
+	RGXFWIF_RUNTIME_CFG	*psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+	PVRSRV_DEV_POWER_STATE	ePowerState;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	eError = PVRSRVPowerLock(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXAPMLatencyChange: Failed to acquire power lock"));
+		return eError;
+	}
+
+	/* Update runtime configuration with the new values and ensure the
+	 * new APM latency is written to memory before requesting the FW to
+	 * read it
+	 */
+	psRuntimeCfg->ui32ActivePMLatencyms = ui32ActivePMLatencyms;
+	psRuntimeCfg->bActivePMLatencyPersistant = bActivePMLatencyPersistant;
+	OSMemoryBarrier();
+
+	eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+	if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF))
+	{
+		RGXFWIF_KCCB_CMD	sActivePMLatencyChange;
+		sActivePMLatencyChange.eCmdType = RGXFWIF_KCCB_CMD_POW;
+		sActivePMLatencyChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_APM_LATENCY_CHANGE;
+		sActivePMLatencyChange.uCmdData.sPowData.uPoweReqData.ui32ActivePMLatencyms = ui32ActivePMLatencyms;
+
+		PDUMPCOMMENT("Scheduling command to change APM latency to %u", ui32ActivePMLatencyms);
+		eError = RGXSendCommand(psDeviceNode->pvDevice,
+		                        RGXFWIF_DM_GP,
+		                        &sActivePMLatencyChange,
+		                        PDUMP_FLAGS_NONE);
+
+		if (eError != PVRSRV_OK)
+		{
+			PDUMPCOMMENT("Scheduling command to change APM latency failed. Error:%u", eError);
+			PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Scheduling KCCB to change APM latency failed. Error:%u", eError));
+			goto ErrorExit;
+		}
+	}
+
+	ErrorExit:
+	PVRSRVPowerUnlock(psDeviceNode);
+
+	return eError;
+}
+
+/*
+	RGXActivePowerRequest
+ */
+PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSAcquireBridgeLock();
+	/* NOTE: If this function were to wait for event object attempt should be
+	   made to prevent releasing bridge lock during sleep. Bridge lock should
+	   be held during sleep. */
+#endif
+
+	psDevInfo->ui32ActivePMReqTotal++;
+
+	/* Powerlock to avoid further requests from racing with the FW hand-shake
+	 * from now on (previous kicks to this point are detected by the FW)
+	 * PVRSRVPowerLock is replaced with PVRSRVPowerTryLock to avoid
+	 * potential dead lock between PDumpWriteLock and PowerLock
+	 * during 'DriverLive + PDUMP=1 + EnableAPM=1'.
+	 */
+	eError = PVRSRVPowerTryLock(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			PVR_LOG_ERROR(eError, "PVRSRVPowerTryLock");
+		}
+		else
+		{
+			psDevInfo->ui32ActivePMReqRetry++;
+		}
+		goto _RGXActivePowerRequest_PowerLock_failed;
+	}
+
+	/* Check again for IDLE once we have the power lock */
+	if (psFWTraceBuf->ePowState == RGXFWIF_POW_IDLE)
+	{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+		SetFirmwareHandshakeIdleTime(RGXReadHWTimerReg(psDevInfo)-psFWTraceBuf->ui64StartIdleTime);
+#endif
+
+		PDUMPPOWCMDSTART();
+		eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+		                                     PVRSRV_DEV_POWER_STATE_OFF,
+		                                     IMG_FALSE); /* forced */
+		PDUMPPOWCMDEND();
+
+		if (eError == PVRSRV_OK)
+		{
+			psDevInfo->ui32ActivePMReqOk++;
+		}
+		else if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)
+		{
+			psDevInfo->ui32ActivePMReqDenied++;
+		}
+	}
+	else
+	{
+		psDevInfo->ui32ActivePMReqNonIdle++;
+	}
+
+	PVRSRVPowerUnlock(psDeviceNode);
+
+_RGXActivePowerRequest_PowerLock_failed:
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSReleaseBridgeLock();
+#endif
+
+	return eError;
+
+}
+/*
+	RGXForcedIdleRequest
+ */
+
+#define RGX_FORCED_IDLE_RETRY_COUNT 10
+
+PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted)
+{
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_KCCB_CMD	sPowCmd;
+	PVRSRV_ERROR		eError;
+	IMG_UINT32			ui32RetryCount = 0;
+#if !defined(NO_HARDWARE)
+	RGXFWIF_TRACEBUF	*psFWTraceBuf;
+#endif
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+#if !defined(NO_HARDWARE)
+	psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+
+	/* Firmware already forced idle */
+	if (psFWTraceBuf->ePowState == RGXFWIF_POW_FORCED_IDLE)
+	{
+		return PVRSRV_OK;
+	}
+
+	/* Firmware is not powered. Sometimes this is permitted, for instance we were forcing idle to power down. */
+	if (psFWTraceBuf->ePowState == RGXFWIF_POW_OFF)
+	{
+		return (bDeviceOffPermitted) ? PVRSRV_OK : PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED;
+	}
+#endif
+
+	eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim",
+				__func__));
+		return eError;
+	}
+	sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+	sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ;
+	sPowCmd.uCmdData.sPowData.uPoweReqData.ePowRequestType = RGXFWIF_POWER_FORCE_IDLE;
+
+	PDUMPCOMMENT("RGXForcedIdleRequest: Sending forced idle command");
+
+	/* Send one forced IDLE command to GP */
+	eError = RGXSendCommand(psDevInfo,
+	                        RGXFWIF_DM_GP,
+	                        &sPowCmd,
+	                        PDUMP_FLAGS_NONE);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to send idle request", __func__));
+		return eError;
+	}
+
+	/* Wait for GPU to finish current workload */
+	do {
+		eError = RGXPollForGPCommandCompletion(psDeviceNode,
+		                              psDevInfo->psPowSyncPrim->pui32LinAddr,
+									  0x1, 0xFFFFFFFF);
+		if ((eError == PVRSRV_OK) || (ui32RetryCount == RGX_FORCED_IDLE_RETRY_COUNT))
+		{
+			break;
+		}
+		ui32RetryCount++;
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s: Request timeout. Retry %d of %d",
+				 __func__, ui32RetryCount, RGX_FORCED_IDLE_RETRY_COUNT));
+	} while (IMG_TRUE);
+
+	if (eError != PVRSRV_OK)
+	{
+		RGXFWNotifyHostTimeout(psDevInfo);
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Idle request failed. Firmware potentially left in forced idle state",
+				 __func__));
+		return eError;
+	}
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("RGXForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+	SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+	                 1,
+	                 0xffffffff,
+	                 PDUMP_POLL_OPERATOR_EQUAL,
+	                 0);
+#endif
+
+#if !defined(NO_HARDWARE)
+	/* Check the firmware state for idleness */
+	if (psFWTraceBuf->ePowState != RGXFWIF_POW_FORCED_IDLE)
+	{
+		return PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED;
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*
+	RGXCancelForcedIdleRequest
+ */
+PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle)
+{
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_KCCB_CMD	sPowCmd;
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim",
+				__func__));
+		goto ErrorExit;
+	}
+
+	/* Send the IDLE request to the FW */
+	sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+	sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ;
+	sPowCmd.uCmdData.sPowData.uPoweReqData.ePowRequestType = RGXFWIF_POWER_CANCEL_FORCED_IDLE;
+
+	PDUMPCOMMENT("RGXForcedIdleRequest: Sending cancel forced idle command");
+
+	/* Send cancel forced IDLE command to GP */
+	eError = RGXSendCommand(psDevInfo,
+	                        RGXFWIF_DM_GP,
+	                        &sPowCmd,
+	                        PDUMP_FLAGS_NONE);
+
+	if (eError != PVRSRV_OK)
+	{
+		PDUMPCOMMENT("RGXCancelForcedIdleRequest: Failed to send cancel IDLE request for DM%d", RGXFWIF_DM_GP);
+		goto ErrorExit;
+	}
+
+	/* Wait for the firmware to answer. */
+	eError = RGXPollForGPCommandCompletion(psDeviceNode,
+	                              psDevInfo->psPowSyncPrim->pui32LinAddr,
+								  1, 0xFFFFFFFF);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Timeout waiting for cancel idle request", __func__));
+		goto ErrorExit;
+	}
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("RGXCancelForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+	SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+	                 1,
+	                 0xffffffff,
+	                 PDUMP_POLL_OPERATOR_EQUAL,
+	                 0);
+#endif
+
+	return eError;
+
+ErrorExit:
+	PVR_DPF((PVR_DBG_ERROR,"%s: Firmware potentially left in forced idle state", __func__));
+	return eError;
+}
+
+/*!
+ ******************************************************************************
+
+ @Function	PVRSRVGetNextDustCount
+
+ @Description
+
+	Calculate a sequence of dust counts to achieve full transition coverage.
+	We increment two counts of dusts and switch up and down between them.
+	It does	contain a few redundant transitions. If two dust exist, the
+	output transitions should be as follows.
+
+	0->1, 0<-1, 0->2, 0<-2, (0->1)
+	1->1, 1->2, 1<-2, (1->2)
+	2->2, (2->0),
+	0->0. Repeat.
+
+	Redundant transitions in brackets.
+
+ @Input		psDustReqState : Counter state used to calculate next dust count
+ @Input		ui32DustCount : Number of dusts in the core
+
+ @Return	PVRSRV_ERROR
+
+ ******************************************************************************/
+IMG_UINT32 RGXGetNextDustCount(RGX_DUST_STATE *psDustReqState, IMG_UINT32 ui32DustCount)
+{
+	if (psDustReqState->bToggle)
+	{
+		psDustReqState->ui32DustCount2++;
+	}
+
+	if (psDustReqState->ui32DustCount2 > ui32DustCount)
+	{
+		psDustReqState->ui32DustCount1++;
+		psDustReqState->ui32DustCount2 = psDustReqState->ui32DustCount1;
+	}
+
+	if (psDustReqState->ui32DustCount1 > ui32DustCount)
+	{
+		psDustReqState->ui32DustCount1 = 0;
+		psDustReqState->ui32DustCount2 = 0;
+	}
+
+	psDustReqState->bToggle = !psDustReqState->bToggle;
+
+	return (psDustReqState->bToggle) ? psDustReqState->ui32DustCount1 : psDustReqState->ui32DustCount2;
+}
+
+/******************************************************************************
+ End of file (rgxpower.c)
+ ******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxpower.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxpower.h
new file mode 100644
index 0000000..6b522bd
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxpower.h
@@ -0,0 +1,245 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX power header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX power
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXPOWER_H__)
+#define __RGXPOWER_H__
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "servicesext.h"
+#include "rgxdevice.h"
+
+
+/*!
+******************************************************************************
+
+ @Function	RGXPrePowerState
+
+ @Description
+
+ does necessary preparation before power state transition
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   eNewPowerState : New power state
+ @Input	   eCurrentPowerState : Current power state
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE				hDevHandle,
+							  PVRSRV_DEV_POWER_STATE	eNewPowerState,
+							  PVRSRV_DEV_POWER_STATE	eCurrentPowerState,
+							  IMG_BOOL					bForced);
+
+/*!
+******************************************************************************
+
+ @Function	RGXPostPowerState
+
+ @Description
+
+ does necessary preparation after power state transition
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   eNewPowerState : New power state
+ @Input	   eCurrentPowerState : Current power state
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE				hDevHandle,
+							   PVRSRV_DEV_POWER_STATE	eNewPowerState,
+							   PVRSRV_DEV_POWER_STATE	eCurrentPowerState,
+							  IMG_BOOL					bForced);
+
+
+/*!
+******************************************************************************
+
+ @Function	RGXPreClockSpeedChange
+
+ @Description
+
+	Does processing required before an RGX clock speed change.
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   bIdleDevice : Whether the firmware needs to be idled
+ @Input	   eCurrentPowerState : Power state of the device
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE				hDevHandle,
+									PVRSRV_DEV_POWER_STATE	eCurrentPowerState);
+
+/*!
+******************************************************************************
+
+ @Function	RGXPostClockSpeedChange
+
+ @Description
+
+	Does processing required after an RGX clock speed change.
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   bIdleDevice : Whether the firmware had been idled previously
+ @Input	   eCurrentPowerState : Power state of the device
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE				hDevHandle,
+									 PVRSRV_DEV_POWER_STATE	eCurrentPowerState);
+
+
+/*!
+******************************************************************************
+
+ @Function	RGXDustCountChange
+
+ @Description Change of number of DUSTs
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   ui32NumberOfDusts : Number of DUSTs to make transition to
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE				hDevHandle,
+								IMG_UINT32				ui32NumberOfDusts);
+
+/*!
+******************************************************************************
+
+ @Function	RGXAPMLatencyChange
+
+ @Description
+
+	Changes the wait duration used before firmware indicates IDLE.
+	Reducing this value will cause the firmware to shut off faster and
+	more often but may increase bubbles in GPU scheduling due to the added
+	power management activity. If bPersistent is NOT set, APM latency will
+	return back to system default on power up.
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   ui32ActivePMLatencyms : Number of milliseconds to wait
+ @Input	   bPersistent : Set to ensure new value is not reset
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE				hDevHandle,
+				IMG_UINT32				ui32ActivePMLatencyms,
+				IMG_BOOL				bActivePMLatencyPersistant);
+
+/*!
+******************************************************************************
+
+ @Function	RGXActivePowerRequest
+
+ @Description Initiate a handshake with the FW to power off the GPU
+
+ @Input	   hDevHandle : RGX Device Node
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function	RGXForcedIdleRequest
+
+ @Description Initiate a handshake with the FW to idle the GPU
+
+ @Input	   hDevHandle : RGX Device Node
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted);
+
+/*!
+******************************************************************************
+
+ @Function	RGXCancelForcedIdleRequest
+
+ @Description Send a request to cancel idle to the firmware.
+
+ @Input	   hDevHandle : RGX Device Node
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVGetNextDustCount
+
+ @Description
+
+	Calculate a sequence of dust counts to achieve full transition coverage.
+	We increment two counts of dusts and switch up and down between them.
+	It does	contain a few redundant transitions. If two dust exist, the
+	output transitions should be as follows.
+
+	0->1, 0<-1, 0->2, 0<-2, (0->1)
+	1->1, 1->2, 1<-2, (1->2)
+	2->2, (2->0),
+	0->0. Repeat.
+
+	Redundant transitions in brackets.
+
+ @Input		psDustReqState : Counter state used to calculate next dust count
+ @Input		ui32DustCount : Number of dusts in the core
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+IMG_UINT32 RGXGetNextDustCount(RGX_DUST_STATE *psDustState, IMG_UINT32 ui32DustCount);
+
+
+#endif /* __RGXPOWER_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxregconfig.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxregconfig.c
new file mode 100644
index 0000000..680d9c0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxregconfig.c
@@ -0,0 +1,323 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Register configuration
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Regconfig routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxregconfig.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "device.h"
+#include "sync_internal.h"
+#include "pdump_km.h"
+#include "pvrsrv.h"
+
+PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection,
+                                         PVRSRV_DEVICE_NODE	 *psDeviceNode,
+                                         IMG_UINT8           ui8RegCfgType)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PVRSRV_ERROR          eError       = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO 	  *psDevInfo   = psDeviceNode->pvDevice;
+	RGX_REG_CONFIG        *psRegCfg    = &psDevInfo->sRegCongfig;
+	RGXFWIF_REG_CFG_TYPE  eRegCfgType  = (RGXFWIF_REG_CFG_TYPE) ui8RegCfgType;
+
+	PVR_UNREFERENCED_PARAMETER(psDevConnection);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psRegCfg->hLock);
+#endif
+
+	if (eRegCfgType < psRegCfg->eRegCfgTypeToPush)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "PVRSRVRGXSetRegConfigTypeKM: Register configuration requested (%d) is not valid since it has to be at least %d."
+				 " Configurations of different types need to go in order",
+				 eRegCfgType,
+				 psRegCfg->eRegCfgTypeToPush));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockRelease(psRegCfg->hLock);
+#endif
+		return PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE;
+	}
+
+	psRegCfg->eRegCfgTypeToPush = eRegCfgType;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psRegCfg->hLock);
+#endif
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(psDevConnection);
+
+	PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetRegConfigTypeKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+	return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection,
+                                     PVRSRV_DEVICE_NODE	*psDeviceNode,
+                                     IMG_UINT32		ui32RegAddr,
+                                     IMG_UINT64		ui64RegValue,
+                                     IMG_UINT64		ui64RegMask)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sRegCfgCmd;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	RGX_REG_CONFIG          *psRegCfg = &psDevInfo->sRegCongfig;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psRegCfg->hLock);
+#endif
+
+	if (psRegCfg->bEnabled)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXAddRegConfigKM: Cannot add record whilst register configuration active."));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockRelease(psRegCfg->hLock);
+#endif
+		return PVRSRV_ERROR_REG_CONFIG_ENABLED;
+	}
+	if (psRegCfg->ui32NumRegRecords == RGXFWIF_REG_CFG_MAX_SIZE)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXAddRegConfigKM: Register configuration full."));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockRelease(psRegCfg->hLock);
+#endif
+		return PVRSRV_ERROR_REG_CONFIG_FULL;
+	}
+
+	sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+	sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Addr = (IMG_UINT64) ui32RegAddr;
+	sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Value = ui64RegValue;
+	sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Mask = ui64RegMask;
+	sRegCfgCmd.uCmdData.sRegConfigData.eRegConfigType = psRegCfg->eRegCfgTypeToPush;
+	sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ADD;
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sRegCfgCmd,
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXAddRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockRelease(psRegCfg->hLock);
+#endif
+		return eError;
+	}
+
+	psRegCfg->ui32NumRegRecords++;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psRegCfg->hLock);
+#endif
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetRegConfigPIKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+	return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection,
+                                       PVRSRV_DEVICE_NODE	*psDeviceNode)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sRegCfgCmd;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	RGX_REG_CONFIG          *psRegCfg = &psDevInfo->sRegCongfig;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psRegCfg->hLock);
+#endif
+
+	if (psRegCfg->bEnabled)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: Attempt to clear register configuration whilst active."));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockRelease(psRegCfg->hLock);
+#endif
+		return PVRSRV_ERROR_REG_CONFIG_ENABLED;
+	}
+
+	sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+	sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_CLEAR;
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sRegCfgCmd,
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockRelease(psRegCfg->hLock);
+#endif
+		return eError;
+	}
+
+	psRegCfg->ui32NumRegRecords = 0;
+	psRegCfg->eRegCfgTypeToPush = RGXFWIF_REG_CFG_TYPE_PWR_ON;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psRegCfg->hLock);
+#endif
+
+	return eError;
+#else
+	PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection,
+                                        PVRSRV_DEVICE_NODE	*psDeviceNode)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sRegCfgCmd;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	RGX_REG_CONFIG          *psRegCfg = &psDevInfo->sRegCongfig;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psRegCfg->hLock);
+#endif
+
+	sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+	sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ENABLE;
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sRegCfgCmd,
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXEnableRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockRelease(psRegCfg->hLock);
+#endif
+		return eError;
+	}
+
+	psRegCfg->bEnabled = IMG_TRUE;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psRegCfg->hLock);
+#endif
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXEnableRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+	return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection,
+                                         PVRSRV_DEVICE_NODE	*psDeviceNode)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sRegCfgCmd;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	RGX_REG_CONFIG          *psRegCfg = &psDevInfo->sRegCongfig;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psRegCfg->hLock);
+#endif
+
+	sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+	sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_DISABLE;
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sRegCfgCmd,
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDisableRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockRelease(psRegCfg->hLock);
+#endif
+		return eError;
+	}
+
+	psRegCfg->bEnabled = IMG_FALSE;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psRegCfg->hLock);
+#endif
+
+	return eError;
+#else
+	PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDisableRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+
+/******************************************************************************
+ End of file (rgxregconfig.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxregconfig.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxregconfig.h
new file mode 100644
index 0000000..5edb2b9
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxregconfig.h
@@ -0,0 +1,130 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX register configuration functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX register configuration functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXREGCONFIG_H__)
+#define __RGXREGCONFIG_H__
+
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_km.h"
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXSetRegConfigTypeKM
+
+ @Description
+	Server-side implementation of RGXSetRegConfig
+
+ @Input psDeviceNode - RGX Device node
+ @Input ui8RegPowerIsland - Reg configuration
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection,
+                                         PVRSRV_DEVICE_NODE	*psDeviceNode,
+                                         IMG_UINT8 ui8RegPowerIsland);
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXSetRegConfigKM
+
+ @Description
+	Server-side implementation of RGXSetRegConfig
+
+ @Input psDeviceNode - RGX Device node
+ @Input ui64RegAddr - Register address
+ @Input ui64RegValue - Reg value
+ @Input ui64RegMask - Reg mask
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+
+PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection,
+                                     PVRSRV_DEVICE_NODE	*psDeviceNode,
+                                     IMG_UINT32	ui64RegAddr,
+                                     IMG_UINT64	ui64RegValue,
+                                     IMG_UINT64	ui64RegMask);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXClearRegConfigKM
+
+ @Description
+	Server-side implementation of RGXClearRegConfig
+
+ @Input psDeviceNode - RGX Device node
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection,
+                                       PVRSRV_DEVICE_NODE	*psDeviceNode);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXEnableRegConfigKM
+
+ @Description
+	Server-side implementation of RGXEnableRegConfig
+
+ @Input psDeviceNode - RGX Device node
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection,
+                                        PVRSRV_DEVICE_NODE	*psDeviceNode);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXDisableRegConfigKM
+
+ @Description
+	Server-side implementation of RGXDisableRegConfig
+
+ @Input psDeviceNode - RGX Device node
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection,
+                                         PVRSRV_DEVICE_NODE	*psDeviceNode);
+
+#endif /* __RGXREGCONFIG_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxsignals.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxsignals.c
new file mode 100644
index 0000000..15764da
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxsignals.c
@@ -0,0 +1,94 @@
+/*************************************************************************/ /*!
+@File           rgxsignals.c
+@Title          RGX Signals routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Signals routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxsignals.h"
+
+#include "rgxmem.h"
+#include "rgx_fwif_km.h"
+#include "mmu_common.h"
+#include "devicemem.h"
+#include "rgxfwutils.h"
+
+
+PVRSRV_ERROR PVRSRVRGXNotifySignalUpdateKM(CONNECTION_DATA *psConnection,
+	                                   PVRSRV_DEVICE_NODE	*psDeviceNode,
+	                                   IMG_HANDLE hMemCtxPrivData,
+	                                   IMG_DEV_VIRTADDR sDevSignalAddress)
+{
+	DEVMEM_MEMDESC *psFWMemContextMemDesc;
+	RGXFWIF_KCCB_CMD sKCCBCmd;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+
+	/* Schedule the firmware command */
+	sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE;
+	sKCCBCmd.uCmdData.sSignalUpdateData.sDevSignalAddress = sDevSignalAddress;
+	RGXSetFirmwareAddress(&sKCCBCmd.uCmdData.sSignalUpdateData.psFWMemContext,
+	                      psFWMemContextMemDesc,
+	                      0, RFW_FWADDR_NOREF_FLAG);
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand((PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice,
+		                            RGXFWIF_DM_GP,
+		                            &sKCCBCmd,
+		                            0,
+		                            PDUMP_FLAGS_NONE);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXNotifySignalUpdateKM: Failed to schedule the FW command %d (%s)",
+				eError, PVRSRVGETERRORSTRING(eError)));
+	}
+
+	return eError;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxsignals.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxsignals.h
new file mode 100644
index 0000000..509d960
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxsignals.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@File           rgxsignals.h
+@Title          RGX Signals routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Signals routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_RGX_SIGNALS_H)
+#define _RGX_SIGNALS_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "connection_server.h"
+#include "device.h"
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXNotifySignalUpdateKM
+
+ @Description   Server-side implementation of RGXNotifySignalUpdate
+
+ @Input hMemCtxPrivData - memory context private data
+ @Input sDevSignalAddress - device virtual address of the updated signal
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+
+PVRSRV_ERROR PVRSRVRGXNotifySignalUpdateKM(CONNECTION_DATA *psConnection,
+                                           PVRSRV_DEVICE_NODE *psDeviceNode,
+                                           IMG_HANDLE hMemCtxPrivData,
+                                           IMG_DEV_VIRTADDR sDevSignalAddress);
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxsrvinit.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxsrvinit.c
new file mode 100644
index 0000000..d6958fa
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxsrvinit.c
@@ -0,0 +1,1382 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "srvinit.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "km_apphint_defs.h"
+#include "htbuffer_types.h"
+#include "htbuffer_init.h"
+
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+
+#include "rgx_fwif_km.h"
+#include "pdump_km.h"
+
+#include "rgxinit.h"
+
+#include "rgx_compat_bvnc.h"
+
+#include "osfunc.h"
+
+#include "rgxdefs_km.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+#include "rgx_fwif_hwperf.h"
+#include "rgx_hwperf_table.h"
+
+#include "rgxfwload.h"
+#include "rgxlayer_impl.h"
+#include "rgxfwimageutils.h"
+#include "rgxfwutils.h"
+
+#include "rgx_hwperf.h"
+#include "rgx_bvnc_defs_km.h"
+
+#include "rgxdevice.h"
+
+#include "pvrsrv.h"
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+#include "rgxdevice.h"
+#include "pvrsrv_device.h"
+#endif
+
+#define DRIVER_MODE_HOST               0          /* AppHint value for host driver mode */
+
+#define	HW_PERF_FILTER_DEFAULT         0x00000000 /* Default to no HWPerf */
+#define HW_PERF_FILTER_DEFAULT_ALL_ON  0xFFFFFFFF /* All events */
+
+#if defined(SUPPORT_VALIDATION)
+#include "pvrsrv_apphint.h"
+#endif
+
+#include "os_srvinit_param.h"
+#if !defined(LINUX)
+/*!
+*******************************************************************************
+ * AppHint mnemonic data type helper tables
+******************************************************************************/
+/* apphint map of name vs. enable flag */
+static SRV_INIT_PARAM_UINT32_LOOKUP htb_loggroup_tbl[] = {
+#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) },
+	HTB_LOG_SFGROUPLIST
+#undef X
+};
+/* apphint map of arg vs. OpMode */
+static SRV_INIT_PARAM_UINT32_LOOKUP htb_opmode_tbl[] = {
+	{ "droplatest", HTB_OPMODE_DROPLATEST},
+	{ "dropoldest", HTB_OPMODE_DROPOLDEST},
+	/* HTB should never be started in HTB_OPMODE_BLOCK
+	 * as this can lead to deadlocks
+	 */
+};
+
+static SRV_INIT_PARAM_UINT32_LOOKUP fwt_logtype_tbl[] = {
+	{ "trace", 2},
+	{ "tbi", 1},
+	{ "none", 0}
+};
+
+static SRV_INIT_PARAM_UINT32_LOOKUP timecorr_clk_tbl[] = {
+	{ "mono", 0 },
+	{ "mono_raw", 1 },
+	{ "sched", 2 }
+};
+
+static SRV_INIT_PARAM_UINT32_LOOKUP fwt_loggroup_tbl[] = { RGXFWIF_LOG_GROUP_NAME_VALUE_MAP };
+
+/*
+ * Services AppHints initialisation
+ */
+#define X(a, b, c, d, e) SrvInitParamInit ## b(a, d, e)
+APPHINT_LIST_ALL
+#undef X
+#endif /* !defined(LINUX) */
+
+/*
+ * Container for all the apphints used by this module
+ */
+typedef struct _RGX_SRVINIT_APPHINTS_
+{
+	IMG_UINT32 ui32DriverMode;
+	IMG_BOOL   bDustRequestInject;
+	IMG_BOOL   bEnableSignatureChecks;
+	IMG_UINT32 ui32SignatureChecksBufSize;
+
+#if defined(DEBUG)
+	IMG_BOOL   bAssertOnOutOfMem;
+#endif
+#if defined(SUPPORT_VALIDATION)
+	IMG_BOOL   bValidateIrq;
+#endif
+	IMG_BOOL   bAssertOnHWRTrigger;
+#if defined(SUPPORT_VALIDATION)
+	IMG_UINT32 aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST];
+	IMG_UINT32 ui32FBCDCVersionOverride;
+#endif
+	IMG_BOOL   bCheckMlist;
+	IMG_BOOL   bDisableClockGating;
+	IMG_BOOL   bDisableDMOverlap;
+	IMG_BOOL   bDisableFEDLogging;
+	IMG_BOOL   bDisablePDP;
+	IMG_BOOL   bEnableCDMKillRand;
+	IMG_BOOL   bEnableHWR;
+	IMG_BOOL   bFilteringMode;
+	IMG_BOOL   bHWPerfDisableCustomCounterFilter;
+	IMG_BOOL   bZeroFreelist;
+	IMG_UINT32 ui32EnableFWContextSwitch;
+	IMG_UINT32 ui32FWContextSwitchProfile;
+	IMG_UINT32 ui32VDMContextSwitchMode;
+	IMG_UINT32 ui32HWPerfFWBufSize;
+	IMG_UINT32 ui32HWPerfHostBufSize;
+	IMG_UINT32 ui32HWPerfFilter0;
+	IMG_UINT32 ui32HWPerfFilter1;
+	IMG_UINT32 ui32HWPerfHostFilter;
+	IMG_UINT32 ui32TimeCorrClock;
+	IMG_UINT32 ui32HWRDebugDumpLimit;
+	IMG_UINT32 ui32JonesDisableMask;
+	IMG_UINT32 ui32LogType;
+	IMG_UINT32 ui32TruncateMode;
+	FW_PERF_CONF eFirmwarePerf;
+	RGX_ACTIVEPM_CONF eRGXActivePMConf;
+	RGX_META_T1_CONF eUseMETAT1;
+	RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS];
+	IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS];
+#endif
+	IMG_BOOL   bEnableTrustedDeviceAceConfig;
+	IMG_UINT32 ui32FWContextSwitchCrossDM;
+#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__)
+	IMG_UINT32 ui32PhysMemTestPasses;
+#endif
+} RGX_SRVINIT_APPHINTS;
+
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*
+ * Parses the dot('.') separated OSID regions on a string and stores the integer results
+ * in an array. Numbers can be decimal or hex (starting with 0x) and there must be a . between each
+ * (example: 1.2.3.4.5.6.7.8)
+ */
+static void _ParseOSidRegionString(IMG_CHAR *apszBuffer, IMG_UINT32 *pui32ApphintArray)
+{
+	IMG_UINT32 ui32OSid;
+	IMG_CHAR *pui8StringParsingBase=apszBuffer;
+	IMG_UINT32 ui32StringLength = OSStringLength(apszBuffer);
+
+	/* Initialize all apphints to 0 */
+	for (ui32OSid = 0; ui32OSid < GPUVIRT_VALIDATION_NUM_OS; ui32OSid++)
+	{
+		pui32ApphintArray[ui32OSid] = 0;
+	}
+
+	/* Parse the string. Even if it fails, apphints will have been initialized */
+	for (ui32OSid = 0; ui32OSid < GPUVIRT_VALIDATION_NUM_OS; ui32OSid++)
+	{
+		IMG_UINT32 ui32Base=10;
+		IMG_CHAR *pui8StringParsingNextDelimiter;
+
+		/* Find the next character in the string that's not a ',' '.' or ' ' */
+		while ((*pui8StringParsingBase == '.' ||
+			    *pui8StringParsingBase == ',' ||
+			    *pui8StringParsingBase == ' ') &&
+			   pui8StringParsingBase - apszBuffer <= ui32StringLength)
+		{
+			pui8StringParsingBase++;
+		}
+
+		if (pui8StringParsingBase - apszBuffer > ui32StringLength)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Reached the end of the apphint string while trying to parse it.\nBuffer: %s, OSid: %d", pui8StringParsingBase, ui32OSid));
+			return;
+		}
+
+		/* If the substring begins with "0x" move the pointer 2 bytes forward and set the base to 16 */
+		if (*pui8StringParsingBase == '0' && *(pui8StringParsingBase+1) =='x')
+		{
+			ui32Base=16;
+			pui8StringParsingBase+=2;
+		}
+
+		/* Find the next delimiter in the string or the end of the string itself if we're parsing the final number */
+		pui8StringParsingNextDelimiter = pui8StringParsingBase;
+
+		while (*pui8StringParsingNextDelimiter!='.' &&
+			   *pui8StringParsingNextDelimiter!=',' &&
+			   *pui8StringParsingNextDelimiter!=' ' &&
+			   *pui8StringParsingNextDelimiter!='\0' &&
+			   (pui8StringParsingNextDelimiter - apszBuffer <= ui32StringLength))
+		{
+			pui8StringParsingNextDelimiter++;
+		}
+
+		/*
+		 * Each number is followed by a '.' except for the last one. If a string termination is found
+		 * when not expected the functions returns
+		 */
+
+		if (*pui8StringParsingNextDelimiter=='\0' && ui32OSid < GPUVIRT_VALIDATION_NUM_OS - 1)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "There was an error parsing the OSid Region Apphint Strings"));
+			return;
+		}
+
+		/*replace the . with a string termination so that it can be properly parsed to an integer */
+		*pui8StringParsingNextDelimiter = '\0';
+
+		/* Parse the number. The fact that it is followed by '\0' means that the string parsing utility
+		 * will finish there and not try to parse the rest */
+
+		OSStringToUINT32(pui8StringParsingBase, ui32Base, &pui32ApphintArray[ui32OSid]);
+
+		pui8StringParsingBase = pui8StringParsingNextDelimiter + 1;
+	}
+}
+
+#endif
+/*!
+*******************************************************************************
+
+ @Function      GetApphints
+
+ @Description   Read init time apphints and initialise internal variables
+
+ @Input         psHints : Pointer to apphints container
+
+ @Return        void
+
+******************************************************************************/
+static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_SRVINIT_APPHINTS *psHints)
+{
+	void *pvParamState = SrvInitParamOpen();
+	IMG_UINT32 ui32ParamTemp;
+	IMG_BOOL bS7TopInfra = IMG_FALSE, bE42290 = IMG_FALSE, bTPUFiltermodeCtrl = IMG_FALSE;
+	IMG_BOOL bE42606 = IMG_FALSE, bAXIACELite = IMG_FALSE;
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+	{
+		bS7TopInfra = IMG_TRUE;
+	}
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TPU_FILTERING_MODE_CONTROL))
+	{
+		bTPUFiltermodeCtrl = IMG_TRUE;
+	}
+
+	if (RGX_IS_ERN_SUPPORTED(psDevInfo, 42290))
+	{
+		bE42290 = IMG_TRUE;
+	}
+
+	if (RGX_IS_ERN_SUPPORTED(psDevInfo, 42606))
+	{
+		bE42606 = IMG_TRUE;
+	}
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE))
+	{
+		bAXIACELite = IMG_TRUE;
+	}
+
+	/*
+	 * NB AppHints initialised to a default value via SrvInitParamInit* macros above
+	 */
+	SrvInitParamGetUINT32(pvParamState,   DriverMode, psHints->ui32DriverMode);
+	SrvInitParamGetBOOL(pvParamState,     DustRequestInject, psHints->bDustRequestInject);
+	SrvInitParamGetBOOL(pvParamState,     EnableSignatureChecks, psHints->bEnableSignatureChecks);
+	SrvInitParamGetUINT32(pvParamState,   SignatureChecksBufSize, psHints->ui32SignatureChecksBufSize);
+
+#if defined(DEBUG)
+	SrvInitParamGetBOOL(pvParamState,    AssertOutOfMemory, psHints->bAssertOnOutOfMem);
+#endif
+	SrvInitParamGetBOOL(pvParamState,    AssertOnHWRTrigger, psHints->bAssertOnHWRTrigger);
+	SrvInitParamGetBOOL(pvParamState,    CheckMList, psHints->bCheckMlist);
+	SrvInitParamGetBOOL(pvParamState,    DisableClockGating, psHints->bDisableClockGating);
+	SrvInitParamGetBOOL(pvParamState,    DisableDMOverlap, psHints->bDisableDMOverlap);
+	SrvInitParamGetBOOL(pvParamState,    DisableFEDLogging, psHints->bDisableFEDLogging);
+	SrvInitParamGetUINT32(pvParamState,  EnableAPM, ui32ParamTemp);
+	psHints->eRGXActivePMConf = ui32ParamTemp;
+	SrvInitParamGetBOOL(pvParamState,    EnableCDMKillingRandMode, psHints->bEnableCDMKillRand);
+	SrvInitParamGetUINT32(pvParamState,  EnableFWContextSwitch, psHints->ui32EnableFWContextSwitch);
+	SrvInitParamGetUINT32(pvParamState,  VDMContextSwitchMode, psHints->ui32VDMContextSwitchMode);
+	SrvInitParamGetBOOL(pvParamState,    EnableHWR, psHints->bEnableHWR);
+	SrvInitParamGetUINT32(pvParamState,  EnableRDPowerIsland, ui32ParamTemp);
+	psHints->eRGXRDPowerIslandConf = ui32ParamTemp;
+	SrvInitParamGetUINT32(pvParamState,  FirmwarePerf, ui32ParamTemp);
+	psHints->eFirmwarePerf = ui32ParamTemp;
+	SrvInitParamGetUINT32(pvParamState,  FWContextSwitchProfile, psHints->ui32FWContextSwitchProfile);
+	SrvInitParamGetBOOL(pvParamState,    HWPerfDisableCustomCounterFilter, psHints->bHWPerfDisableCustomCounterFilter);
+	SrvInitParamGetUINT32(pvParamState,  HWPerfHostBufSizeInKB, psHints->ui32HWPerfHostBufSize);
+	SrvInitParamGetUINT32(pvParamState,  HWPerfFWBufSizeInKB, psHints->ui32HWPerfFWBufSize);
+#if defined(LINUX)
+	/* name changes */
+	{
+		IMG_UINT64 ui64Tmp;
+		SrvInitParamGetBOOL(pvParamState,    DisablePDumpPanic, psHints->bDisablePDP);
+		SrvInitParamGetUINT64(pvParamState,  HWPerfFWFilter, ui64Tmp);
+		psHints->ui32HWPerfFilter0 = (IMG_UINT32)(ui64Tmp & 0xffffffffllu);
+		psHints->ui32HWPerfFilter1 = (IMG_UINT32)((ui64Tmp >> 32) & 0xffffffffllu);
+	}
+#else
+	SrvInitParamUnreferenced(DisablePDumpPanic);
+	SrvInitParamUnreferenced(HWPerfFWFilter);
+	SrvInitParamUnreferenced(RGXBVNC);
+#endif
+	SrvInitParamGetUINT32(pvParamState, HWPerfHostFilter, psHints->ui32HWPerfHostFilter);
+	SrvInitParamGetUINT32List(pvParamState, TimeCorrClock, psHints->ui32TimeCorrClock);
+	SrvInitParamGetUINT32(pvParamState, HWRDebugDumpLimit, ui32ParamTemp);
+	psHints->ui32HWRDebugDumpLimit = MIN(ui32ParamTemp, RGXFWIF_HWR_DEBUG_DUMP_ALL);
+
+	if (bS7TopInfra)
+	{
+	#define RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK	(0XFFFFFFCFU)
+	#define RGX_CR_JONES_FIX_MT_ORDER_ISP_EN	(0X00000020U)
+	#define RGX_CR_JONES_FIX_MT_ORDER_TE_EN		(0X00000010U)
+
+		SrvInitParamGetUINT32(pvParamState,  JonesDisableMask, ui32ParamTemp);
+		if (((ui32ParamTemp & ~RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK) == RGX_CR_JONES_FIX_MT_ORDER_ISP_EN) ||
+			((ui32ParamTemp & ~RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK) == RGX_CR_JONES_FIX_MT_ORDER_TE_EN))
+		{
+			ui32ParamTemp |= (RGX_CR_JONES_FIX_MT_ORDER_TE_EN |
+							  RGX_CR_JONES_FIX_MT_ORDER_ISP_EN);
+			PVR_DPF((PVR_DBG_WARNING, "Tile reordering mode requires both TE and ISP enabled. Forcing JonesDisableMask = %d",
+					ui32ParamTemp));
+		}
+		psHints->ui32JonesDisableMask = ui32ParamTemp;
+	}
+
+	if ((bE42290) && (bTPUFiltermodeCtrl))
+	{
+		SrvInitParamGetBOOL(pvParamState, NewFilteringMode, psHints->bFilteringMode);
+	}
+
+	if (bE42606)
+	{
+		SrvInitParamGetUINT32(pvParamState, TruncateMode, psHints->ui32TruncateMode);
+	}
+#if defined(EMULATOR)
+	if (bAXIACELite)
+	{
+		SrvInitParamGetBOOL(pvParamState, EnableTrustedDeviceAceConfig, psHints->bEnableTrustedDeviceAceConfig);
+	}
+#endif
+
+	SrvInitParamGetUINT32(pvParamState, UseMETAT1, ui32ParamTemp);
+	psHints->eUseMETAT1 = ui32ParamTemp & RGXFWIF_INICFG_METAT1_MASK;
+
+	SrvInitParamGetBOOL(pvParamState, ZeroFreelist, psHints->bZeroFreelist);
+
+#if defined(LINUX)
+	SrvInitParamGetUINT32(pvParamState, FWContextSwitchCrossDM, psHints->ui32FWContextSwitchCrossDM);
+#else
+	SrvInitParamUnreferenced(FWContextSwitchCrossDM);
+#endif
+
+#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__)
+	SrvInitParamGetUINT32(pvParamState, PhysMemTestPasses, psHints->ui32PhysMemTestPasses);
+#endif
+
+#if defined(SUPPORT_VALIDATION)
+	/* Apphints for TPU trilinear frac masking */
+	SrvInitParamGetUINT32(pvParamState, TPUTrilinearFracMaskPDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_PDM]);
+	SrvInitParamGetUINT32(pvParamState, TPUTrilinearFracMaskVDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_VDM]);
+	SrvInitParamGetUINT32(pvParamState, TPUTrilinearFracMaskCDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_CDM]);
+	SrvInitParamGetUINT32(pvParamState, TPUTrilinearFracMaskTDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_TDM]);
+	SrvInitParamGetBOOL(pvParamState,   ValidateIrq, psHints->bValidateIrq);
+	SrvInitParamGetUINT32(pvParamState, FBCDCVersionOverride, psHints->ui32FBCDCVersionOverride);
+#endif
+
+	/*
+	 * FW logs apphints
+	 */
+	{
+		IMG_UINT32 ui32LogType;
+		IMG_BOOL bAnyLogGroupConfigured;
+
+		SrvInitParamGetUINT32BitField(pvParamState, EnableLogGroup, ui32LogType);
+		bAnyLogGroupConfigured = ui32LogType ? IMG_TRUE : IMG_FALSE;
+		SrvInitParamGetUINT32List(pvParamState, FirmwareLogType, ui32ParamTemp);
+
+		/* Defaulting to TRACE */
+		ui32LogType |= RGXFWIF_LOG_TYPE_TRACE;
+
+		if (ui32ParamTemp == 2 /* TRACE */)
+		{
+			if (!bAnyLogGroupConfigured)
+			{
+				/* No groups configured - defaulting to MAIN group */
+				ui32LogType |= RGXFWIF_LOG_TYPE_GROUP_MAIN;
+			}
+		}
+		else if (ui32ParamTemp == 1 /* TBI */)
+		{
+			if (!bAnyLogGroupConfigured)
+			{
+				/* No groups configured - defaulting to MAIN group */
+				ui32LogType |= RGXFWIF_LOG_TYPE_GROUP_MAIN;
+			}
+			ui32LogType &= ~RGXFWIF_LOG_TYPE_TRACE;
+		}
+		else if (ui32ParamTemp == 0 /* NONE */)
+		{
+			/* "NONE" means "TRACE without any log groups enabled */
+			ui32LogType = RGXFWIF_LOG_TYPE_TRACE;
+		}
+
+		psHints->ui32LogType = ui32LogType;
+	}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	/*
+	 * GPU virtualisation validation apphints
+	 */
+	{
+		IMG_CHAR pszOSidRegionBuffer[GPUVIRT_VALIDATION_MAX_STRING_LENGTH];
+
+		SrvInitParamGetSTRING(pvParamState, OSidRegion0Min, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH);
+		_ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMin[0]);
+
+		SrvInitParamGetSTRING(pvParamState, OSidRegion0Max, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH);
+		_ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMax[0]);
+
+		SrvInitParamGetSTRING(pvParamState, OSidRegion1Min, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH);
+		_ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMin[1]);
+
+		SrvInitParamGetSTRING(pvParamState, OSidRegion1Max, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH);
+		_ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMax[1]);
+	}
+#else
+#if !defined(LINUX)
+	SrvInitParamUnreferenced(OSidRegion0Min);
+	SrvInitParamUnreferenced(OSidRegion0Max);
+	SrvInitParamUnreferenced(OSidRegion1Min);
+	SrvInitParamUnreferenced(OSidRegion1Max);
+#endif /* !defined(LINUX) */
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+
+
+	SrvInitParamClose(pvParamState);
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      GetFWConfigFlags
+
+ @Description   Initialise and return FW config flags
+
+ @Input         psHints            : Apphints container
+ @Input         pui32FWConfigFlags : Pointer to config flags
+
+ @Return        void
+
+******************************************************************************/
+static INLINE void GetFWConfigFlags(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                    RGX_SRVINIT_APPHINTS *psHints,
+                                    IMG_UINT32 *pui32FWConfigFlags,
+                                    IMG_UINT32 *pui32FWConfigFlagsExt)
+{
+	IMG_UINT32 ui32FWConfigFlags = 0;
+
+#if defined(DEBUG)
+	ui32FWConfigFlags |= psHints->bAssertOnOutOfMem ? RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY : 0;
+#endif
+	ui32FWConfigFlags |= psHints->bAssertOnHWRTrigger ? RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER : 0;
+	ui32FWConfigFlags |= psHints->bCheckMlist ? RGXFWIF_INICFG_CHECK_MLIST_EN : 0;
+	ui32FWConfigFlags |= psHints->bDisableClockGating ? RGXFWIF_INICFG_DISABLE_CLKGATING_EN : 0;
+	ui32FWConfigFlags |= psHints->bDisableDMOverlap ? RGXFWIF_INICFG_DISABLE_DM_OVERLAP : 0;
+	ui32FWConfigFlags |= psHints->bDisablePDP ? RGXFWIF_INICFG_DISABLE_PDP_EN : 0;
+	ui32FWConfigFlags |= psHints->bEnableCDMKillRand ? RGXFWIF_INICFG_CDM_KILL_MODE_RAND_EN : 0;
+	ui32FWConfigFlags |= (psHints->ui32HWPerfFilter0 != 0 || psHints->ui32HWPerfFilter1 != 0) ? RGXFWIF_INICFG_HWPERF_EN : 0;
+#if !defined(NO_HARDWARE)
+	ui32FWConfigFlags |= psHints->bEnableHWR ? RGXFWIF_INICFG_HWR_EN : 0;
+#endif
+	ui32FWConfigFlags |= psHints->bHWPerfDisableCustomCounterFilter ? RGXFWIF_INICFG_HWP_DISABLE_FILTER : 0;
+	ui32FWConfigFlags |= (psHints->eFirmwarePerf == FW_PERF_CONF_CUSTOM_TIMER) ? RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN : 0;
+	ui32FWConfigFlags |= (psHints->eFirmwarePerf == FW_PERF_CONF_POLLS) ? RGXFWIF_INICFG_POLL_COUNTERS_EN : 0;
+	ui32FWConfigFlags |= psHints->eUseMETAT1 << RGXFWIF_INICFG_METAT1_SHIFT;
+	ui32FWConfigFlags |= psHints->ui32EnableFWContextSwitch & ~RGXFWIF_INICFG_CTXSWITCH_CLRMSK;
+	ui32FWConfigFlags |= (psHints->ui32VDMContextSwitchMode << RGXFWIF_INICFG_VDM_CTX_STORE_MODE_SHIFT) & RGXFWIF_INICFG_VDM_CTX_STORE_MODE_MASK;
+
+	ui32FWConfigFlags |= (psHints->ui32FWContextSwitchProfile << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) & RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK;
+
+	*pui32FWConfigFlags = ui32FWConfigFlags;
+
+	*pui32FWConfigFlagsExt = psHints->ui32FWContextSwitchCrossDM;
+#if defined(SUPPORT_VALIDATION)
+#if defined(NO_HARDWARE) && defined(PDUMP)
+	*pui32FWConfigFlagsExt |= psHints->bValidateIrq ? RGXFWIF_INICFG_EXT_VALIDATE_IRQ : 0;
+#endif
+
+	if (psHints->ui32FBCDCVersionOverride > 0)
+	{
+		*pui32FWConfigFlagsExt |= (psHints->ui32FBCDCVersionOverride == 2) ? RGXFWIF_INICFG_EXT_FBCDC_V3_1_EN : 0;
+	}
+	else
+#endif
+	{
+		*pui32FWConfigFlagsExt |= psDeviceNode->pfnHasFBCDCVersion31(psDeviceNode) ? RGXFWIF_INICFG_EXT_FBCDC_V3_1_EN : 0;
+	}
+
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      GetFilterFlags
+
+ @Description   Initialise and return filter flags
+
+ @Input         psHints : Apphints container
+
+ @Return        IMG_UINT32 : Filter flags
+
+******************************************************************************/
+static INLINE IMG_UINT32 GetFilterFlags(RGX_SRVINIT_APPHINTS *psHints)
+{
+	IMG_UINT32 ui32FilterFlags = 0;
+
+	ui32FilterFlags |= psHints->bFilteringMode ? RGXFWIF_FILTCFG_NEW_FILTER_MODE : 0;
+	if (psHints->ui32TruncateMode == 2)
+	{
+		ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_INT;
+	}
+	else if (psHints->ui32TruncateMode == 3)
+	{
+		ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_HALF;
+	}
+
+	return ui32FilterFlags;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      GetDeviceFlags
+
+ @Description   Initialise and return device flags
+
+ @Input         psHints          : Apphints container
+ @Input         pui32DeviceFlags : Pointer to device flags
+
+ @Return        void
+
+******************************************************************************/
+static INLINE void GetDeviceFlags(RGX_SRVINIT_APPHINTS *psHints,
+                                  IMG_UINT32 *pui32DeviceFlags)
+{
+	IMG_UINT32 ui32DeviceFlags = 0;
+
+	ui32DeviceFlags |= psHints->bDustRequestInject? RGXKM_DEVICE_STATE_DUST_REQUEST_INJECT_EN : 0;
+	ui32DeviceFlags |= psHints->bZeroFreelist ? RGXKM_DEVICE_STATE_ZERO_FREELIST : 0;
+	ui32DeviceFlags |= psHints->bDisableFEDLogging ? RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN : 0;
+
+	*pui32DeviceFlags = ui32DeviceFlags;
+}
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+/*!
+*******************************************************************************
+
+ @Function      RGXTDProcessFWImage
+
+ @Description   Fetch and send data used by the trusted device to complete
+                the FW image setup
+
+ @Input         psDeviceNode : Device node
+ @Input         psRGXFW      : Firmware blob
+ @Input         puFWParams   : Parameters used by the FW at boot time
+
+ @Return        PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXTDProcessFWImage(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                        struct RGXFW *psRGXFW,
+                                        RGX_FW_BOOT_PARAMS *puFWParams)
+{
+	PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_TD_FW_PARAMS sTDFWParams;
+	RGX_LAYER_PARAMS sLayerParams;
+	PVRSRV_ERROR eError;
+
+	if (psDevConfig->pfnTDSendFWImage == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXTDProcessFWImage: TDSendFWImage not implemented!"));
+		return PVRSRV_ERROR_NOT_IMPLEMENTED;
+	}
+
+	sLayerParams.psDevInfo = psDevInfo;
+
+	sTDFWParams.pvFirmware       = RGXFirmwareData(psRGXFW);
+	sTDFWParams.ui32FirmwareSize = RGXFirmwareSize(psRGXFW);
+
+	if (!RGX_DEVICE_HAS_FEATURE(&sLayerParams, MIPS))
+	{
+		sTDFWParams.uFWP.sMeta.sFWCodeDevVAddr        = puFWParams->sMeta.sFWCodeDevVAddr;
+		sTDFWParams.uFWP.sMeta.sFWDataDevVAddr        = puFWParams->sMeta.sFWDataDevVAddr;
+		sTDFWParams.uFWP.sMeta.sFWCorememCodeDevVAddr = puFWParams->sMeta.sFWCorememCodeDevVAddr;
+		sTDFWParams.uFWP.sMeta.sFWCorememCodeFWAddr   = puFWParams->sMeta.sFWCorememCodeFWAddr;
+		sTDFWParams.uFWP.sMeta.uiFWCorememCodeSize    = puFWParams->sMeta.uiFWCorememCodeSize;
+		sTDFWParams.uFWP.sMeta.sFWCorememDataDevVAddr = puFWParams->sMeta.sFWCorememDataDevVAddr;
+		sTDFWParams.uFWP.sMeta.sFWCorememDataFWAddr   = puFWParams->sMeta.sFWCorememDataFWAddr;
+		sTDFWParams.uFWP.sMeta.ui32NumThreads         = puFWParams->sMeta.ui32NumThreads;
+		sTDFWParams.uFWP.sMeta.ui32MainThreadID       = puFWParams->sMeta.ui32MainThreadID;
+	}
+	else
+	{
+		sTDFWParams.uFWP.sMips.sGPURegAddr            = puFWParams->sMips.sGPURegAddr;
+		sTDFWParams.uFWP.sMips.sFWPageTableAddr       = puFWParams->sMips.sFWPageTableAddr;
+		sTDFWParams.uFWP.sMips.sFWStackAddr           = puFWParams->sMips.sFWStackAddr;
+	}
+
+	eError = psDevConfig->pfnTDSendFWImage(psDevConfig->hSysData, &sTDFWParams);
+
+	return eError;
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function      RGXAcquireMipsBootldrData
+
+ @Description   Acquire MIPS bootloader data parameters
+
+ @Input         psDeviceNode : Device node
+ @Input         puFWParams   : FW boot parameters
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR RGXAcquireMipsBootldrData(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                              RGX_FW_BOOT_PARAMS *puFWParams)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*) psDeviceNode->pvDevice;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bValid;
+
+	/* Rogue Registers physical address */
+#if defined(SUPPORT_ALT_REGBASE)
+	puFWParams->sMips.sGPURegAddr = psDeviceNode->psDevConfig->sAltRegsGpuPBase;
+#else
+	PhysHeapCpuPAddrToDevPAddr(psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL],
+	                           1,
+	                           &puFWParams->sMips.sGPURegAddr,
+	                           &(psDeviceNode->psDevConfig->sRegsCpuPBase));
+#endif
+
+	/* MIPS Page Table physical address */
+	MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &puFWParams->sMips.sFWPageTableAddr);
+
+	/* MIPS Stack Pointer Physical Address */
+	eError = RGXGetPhyAddr(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR,
+	                       &puFWParams->sMips.sFWStackAddr,
+	                       RGXGetFWImageSectionOffset(NULL, MIPS_STACK),
+	                       OSGetPageShift(),
+	                       1,
+	                       &bValid);
+
+	return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function      InitFirmware
+
+ @Description   Allocate, initialise and pdump Firmware code and data memory
+
+ @Input         psDeviceNode : Device Node
+ @Input         psHints      : Apphints
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 RGX_SRVINIT_APPHINTS *psHints)
+{
+	struct RGXFW      *psRGXFW = NULL;
+	const IMG_BYTE    *pbRGXFirmware = NULL;
+
+	/* FW code memory */
+	IMG_DEVMEM_SIZE_T uiFWCodeAllocSize;
+	void              *pvFWCodeHostAddr;
+
+	/* FW data memory */
+	IMG_DEVMEM_SIZE_T uiFWDataAllocSize;
+	void              *pvFWDataHostAddr;
+
+	/* FW coremem code memory */
+	IMG_DEVMEM_SIZE_T uiFWCorememCodeAllocSize;
+	void              *pvFWCorememCodeHostAddr = NULL;
+
+	/* FW coremem data memory */
+	IMG_DEVMEM_SIZE_T uiFWCorememDataAllocSize;
+	void              *pvFWCorememDataHostAddr = NULL;
+
+	RGX_FW_BOOT_PARAMS uFWParams;
+	RGX_LAYER_PARAMS sLayerParams;
+	PVRSRV_ERROR eError;
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+	IMG_BOOL bUseSecureFWData = RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) &&
+	                            RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32;
+#endif
+
+	/*
+	 * Get pointer to Firmware image
+	 */
+	pbRGXFirmware = RGXLoadAndGetFWData(psDeviceNode, &psRGXFW);
+	if (!pbRGXFirmware)
+	{
+		/* Error or confirmation message generated in RGXLoadAndGetFWData */
+		eError = PVRSRV_ERROR_INIT_FAILURE;
+		goto cleanup_initfw;
+	}
+
+	sLayerParams.psDevInfo = psDevInfo;
+
+	/*
+	 * Allocate Firmware memory
+	 */
+
+	eError = RGXGetFWImageAllocSize(&sLayerParams,
+	                                pbRGXFirmware,
+	                                RGXFirmwareSize(psRGXFW),
+	                                &uiFWCodeAllocSize,
+	                                &uiFWDataAllocSize,
+	                                &uiFWCorememCodeAllocSize,
+	                                &uiFWCorememDataAllocSize);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: RGXGetFWImageAllocSize failed", __func__));
+		goto cleanup_initfw;
+	}
+
+	psDevInfo->ui32FWCorememCodeSizeInBytes = uiFWCorememCodeAllocSize;
+	psDevInfo->ui32FWCodeSizeInBytes = uiFWCodeAllocSize;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	/* Disable META core memory allocation unless the META DMA is available */
+	if (!RGX_DEVICE_HAS_FEATURE(&sLayerParams, META_DMA))
+	{
+		uiFWCorememCodeAllocSize = 0;
+		uiFWCorememDataAllocSize = 0;
+	}
+#endif
+	eError = RGXInitAllocFWImgMem(psDeviceNode,
+	                              uiFWCodeAllocSize,
+	                              uiFWDataAllocSize,
+	                              uiFWCorememCodeAllocSize,
+	                              uiFWCorememDataAllocSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: PVRSRVRGXInitAllocFWImgMemKM failed (%d)",
+				 __func__, eError));
+		goto cleanup_initfw;
+	}
+
+	/*
+	 * Acquire pointers to Firmware allocations
+	 */
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, &pvFWCodeHostAddr);
+	PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", cleanup_initfw);
+
+#else
+	/* We can't get a pointer to a secure FW allocation from within the DDK */
+	pvFWCodeHostAddr = NULL;
+#endif
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+	if (bUseSecureFWData)
+	{
+		/* We can't get a pointer to a secure FW allocation from within the DDK */
+		pvFWDataHostAddr = NULL;
+	}
+	else
+#endif
+	{
+		eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, &pvFWDataHostAddr);
+		PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_code);
+	}
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+	if (uiFWCorememCodeAllocSize)
+	{
+		eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, &pvFWCorememCodeHostAddr);
+		PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_data);
+	}
+#else
+	/* We can't get a pointer to a secure FW allocation from within the DDK */
+	pvFWCorememCodeHostAddr = NULL;
+#endif
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+	if (bUseSecureFWData)
+	{
+		pvFWCorememDataHostAddr = NULL;
+	}
+	else
+#endif
+	if (uiFWCorememDataAllocSize)
+	{
+		eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, &pvFWCorememDataHostAddr);
+		PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_corememcode);
+	}
+
+	/*
+	 * Prepare FW boot parameters
+	 */
+
+	if (RGX_DEVICE_HAS_FEATURE(&sLayerParams, MIPS))
+	{
+		eError = RGXAcquireMipsBootldrData(psDeviceNode, &uFWParams);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: RGXAcquireMipsBootldrData failed (%d)",
+					 __func__, eError));
+			goto release_fw_allocations;
+		}
+	}
+	else
+	{
+		uFWParams.sMeta.sFWCodeDevVAddr = psDevInfo->sFWCodeDevVAddrBase;
+		uFWParams.sMeta.sFWDataDevVAddr = psDevInfo->sFWDataDevVAddrBase;
+		uFWParams.sMeta.sFWCorememCodeDevVAddr = psDevInfo->sFWCorememCodeDevVAddrBase;
+		uFWParams.sMeta.sFWCorememCodeFWAddr = psDevInfo->sFWCorememCodeFWAddr;
+		uFWParams.sMeta.uiFWCorememCodeSize = uiFWCorememCodeAllocSize;
+		uFWParams.sMeta.sFWCorememDataDevVAddr = psDevInfo->sFWCorememDataStoreDevVAddrBase;
+		uFWParams.sMeta.sFWCorememDataFWAddr = psDevInfo->sFWCorememDataStoreFWAddr;
+#if defined(RGXFW_META_SUPPORT_2ND_THREAD)
+		uFWParams.sMeta.ui32NumThreads = 2,
+#else
+		uFWParams.sMeta.ui32NumThreads = psHints->eUseMETAT1 == RGX_META_T1_OFF ? 1 : 2;
+#endif
+		uFWParams.sMeta.ui32MainThreadID = psHints->eUseMETAT1 == RGX_META_T1_MAIN ? 1 : 0;
+	}
+
+
+	/*
+	 * Process the Firmware image and setup code and data segments.
+	 *
+	 * When the trusted device is enabled and the FW code lives
+	 * in secure memory we will only setup the data segments here,
+	 * while the code segments will be loaded to secure memory
+	 * by the trusted device.
+	 */
+	eError = RGXProcessFWImage(&sLayerParams,
+							   pbRGXFirmware,
+							   pvFWCodeHostAddr,
+							   pvFWDataHostAddr,
+							   pvFWCorememCodeHostAddr,
+							   pvFWCorememDataHostAddr,
+							   &uFWParams);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: RGXProcessFWImage failed (%d)",
+				 __func__, eError));
+		goto release_fw_allocations;
+	}
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+	RGXTDProcessFWImage(psDeviceNode, psRGXFW, &uFWParams);
+#endif
+
+
+	/*
+	 * PDump Firmware allocations
+	 */
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware code image");
+	DevmemPDumpLoadMem(psDevInfo->psRGXFWCodeMemDesc,
+	                   0,
+	                   uiFWCodeAllocSize,
+	                   PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+	if (!bUseSecureFWData)
+#endif
+	{
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware data image");
+		DevmemPDumpLoadMem(psDevInfo->psRGXFWDataMemDesc,
+		                   0,
+		                   uiFWDataAllocSize,
+		                   PDUMP_FLAGS_CONTINUOUS);
+	}
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+	if (uiFWCorememCodeAllocSize)
+	{
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware coremem code image");
+		DevmemPDumpLoadMem(psDevInfo->psRGXFWCorememCodeMemDesc,
+						   0,
+						   uiFWCorememCodeAllocSize,
+						   PDUMP_FLAGS_CONTINUOUS);
+	}
+#endif
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+	if (!bUseSecureFWData && uiFWCorememDataAllocSize)
+#else
+	if (uiFWCorememDataAllocSize)
+#endif
+	{
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware coremem data store image");
+		DevmemPDumpLoadMem(psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+						   0,
+						   uiFWCorememDataAllocSize,
+						   PDUMP_FLAGS_CONTINUOUS);
+	}
+
+	/*
+	 * Release Firmware allocations and clean up
+	 */
+
+release_fw_allocations:
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+	if (!bUseSecureFWData && uiFWCorememDataAllocSize)
+#else
+	if (uiFWCorememDataAllocSize)
+#endif
+	{
+		DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+	}
+release_corememcode:
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+	if (uiFWCorememCodeAllocSize)
+	{
+		DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc);
+	}
+#endif
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+release_data:
+#endif
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+	if (!bUseSecureFWData)
+#endif
+	{
+		DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+	}
+
+release_code:
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+#endif
+cleanup_initfw:
+	if (psRGXFW != NULL)
+	{
+		RGXUnloadFirmware(psRGXFW);
+	}
+
+	return eError;
+}
+
+
+#if defined(PDUMP)
+/*!
+*******************************************************************************
+
+ @Function      InitialiseHWPerfCounters
+
+ @Description   Initialisation of hardware performance counters and dumping
+                them out to pdump, so that they can be modified at a later
+                point.
+
+ @Input         pvDevice
+ @Input         psHWPerfDataMemDesc
+ @Input         psHWPerfInitDataInt
+
+ @Return        void
+
+******************************************************************************/
+
+static void InitialiseHWPerfCounters(void *pvDevice, DEVMEM_MEMDESC *psHWPerfDataMemDesc, RGXFWIF_HWPERF_CTL *psHWPerfInitDataInt)
+{
+	RGXFWIF_HWPERF_CTL_BLK *psHWPerfInitBlkData;
+	IMG_UINT32 ui32CntBlkModelLen;
+	const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel;
+	const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc;
+	IMG_UINT32 ui32BlockID, ui32BlkCfgIdx, ui32CounterIdx;
+	RGX_HWPERF_CNTBLK_RT_INFO sCntBlkRtInfo;
+
+	ui32CntBlkModelLen = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel);
+	for (ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen; ui32BlkCfgIdx++)
+	{
+		/* Exit early if this core does not have any of these counter blocks
+		 * due to core type/BVNC features.... */
+		psBlkTypeDesc = &asCntBlkTypeModel[ui32BlkCfgIdx];
+		if (psBlkTypeDesc->pfnIsBlkPresent(psBlkTypeDesc, pvDevice, &sCntBlkRtInfo) == IMG_FALSE)
+		{
+			continue;
+		}
+
+		/* Program all counters in one block so those already on may
+		 * be configured off and vice-a-versa. */
+		for (ui32BlockID = psBlkTypeDesc->uiCntBlkIdBase;
+					 ui32BlockID < psBlkTypeDesc->uiCntBlkIdBase+sCntBlkRtInfo.uiNumUnits;
+					 ui32BlockID++)
+		{
+
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Unit %d Block : %s", ui32BlockID-psBlkTypeDesc->uiCntBlkIdBase, psBlkTypeDesc->pszBlockNameComment);
+			/* Get the block configure store to update from the global store of
+			 * block configuration. This is used to remember the configuration
+			 * between configurations and core power on in APM */
+			psHWPerfInitBlkData = rgxfw_hwperf_get_block_ctl(ui32BlockID, psHWPerfInitDataInt);
+			/* Assert to check for HWPerf block mis-configuration */
+			PVR_ASSERT(psHWPerfInitBlkData);
+
+			psHWPerfInitBlkData->bValid = IMG_TRUE;
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "bValid: This specifies if the layout block is valid for the given BVNC.");
+			DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+							(size_t)&(psHWPerfInitBlkData->bValid) - (size_t)(psHWPerfInitDataInt),
+							psHWPerfInitBlkData->bValid,
+							PDUMP_FLAGS_CONTINUOUS);
+
+			psHWPerfInitBlkData->bEnabled = IMG_FALSE;
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "bEnabled: Set to 0x1 if the block needs to be enabled during playback.");
+			DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+							(size_t)&(psHWPerfInitBlkData->bEnabled) - (size_t)(psHWPerfInitDataInt),
+							psHWPerfInitBlkData->bEnabled,
+							PDUMP_FLAGS_CONTINUOUS);
+
+			psHWPerfInitBlkData->eBlockID = ui32BlockID;
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "eBlockID: The Block ID for the layout block. See RGX_HWPERF_CNTBLK_ID for further information.");
+			DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+							(size_t)&(psHWPerfInitBlkData->eBlockID) - (size_t)(psHWPerfInitDataInt),
+							psHWPerfInitBlkData->eBlockID,
+							PDUMP_FLAGS_CONTINUOUS);
+
+			psHWPerfInitBlkData->uiCounterMask = 0x00;
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "uiCounterMask: Bitmask for selecting the counters that need to be configured. (Bit 0 - counter0, bit 1 - counter1 and so on.");
+			DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+							(size_t)&(psHWPerfInitBlkData->uiCounterMask) - (size_t)(psHWPerfInitDataInt),
+							psHWPerfInitBlkData->uiCounterMask,
+							PDUMP_FLAGS_CONTINUOUS);
+
+			for (ui32CounterIdx = RGX_CNTBLK_COUNTER0_ID; ui32CounterIdx < psBlkTypeDesc->uiNumCounters; ui32CounterIdx++)
+			{
+				psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx] = IMG_UINT64_C(0x0000000000000000);
+
+				PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "%s_COUNTER_%d", psBlkTypeDesc->pszBlockNameComment,ui32CounterIdx);
+				DevmemPDumpLoadMemValue64(psHWPerfDataMemDesc,
+							(size_t)&(psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx]) - (size_t)(psHWPerfInitDataInt),
+							psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx],
+							PDUMP_FLAGS_CONTINUOUS);
+
+			}
+		}
+	}
+}
+/*!
+*******************************************************************************
+
+ @Function      InitialiseCustomCounters
+
+ @Description   Initialisation of custom counters and dumping them out to
+                pdump, so that they can be modified at a later point.
+
+ @Input         psHWPerfDataMemDesc
+
+ @Return        void
+
+******************************************************************************/
+
+static void InitialiseCustomCounters(DEVMEM_MEMDESC *psHWPerfDataMemDesc)
+{
+	IMG_UINT32 ui32CustomBlock, ui32CounterID;
+
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "ui32SelectedCountersBlockMask - The Bitmask of the custom counters that are to be selected");
+	DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+						offsetof(RGXFWIF_HWPERF_CTL, ui32SelectedCountersBlockMask),
+						0,
+						PDUMP_FLAGS_CONTINUOUS);
+
+	for (ui32CustomBlock = 0; ui32CustomBlock < RGX_HWPERF_MAX_CUSTOM_BLKS; ui32CustomBlock++)
+	{
+		/*
+		 * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of
+		 * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is
+		 * "expression must have a constant value".
+		 */
+		const IMG_DEVMEM_OFFSET_T uiOffsetOfCustomBlockSelectedCounters
+		= (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_HWPERF_CTL *)0)->SelCntr[ui32CustomBlock].ui32NumSelectedCounters);
+
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "ui32NumSelectedCounters - The Number of counters selected for this Custom Block: %d",ui32CustomBlock );
+		DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+					uiOffsetOfCustomBlockSelectedCounters,
+					0,
+					PDUMP_FLAGS_CONTINUOUS);
+
+		for (ui32CounterID = 0; ui32CounterID < RGX_HWPERF_MAX_CUSTOM_CNTRS; ui32CounterID++ )
+		{
+			const IMG_DEVMEM_OFFSET_T uiOffsetOfCustomBlockSelectedCounterIDs
+			= (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_HWPERF_CTL *)0)->SelCntr[ui32CustomBlock].aui32SelectedCountersIDs[ui32CounterID]);
+
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "CUSTOMBLK_%d_COUNTERID_%d",ui32CustomBlock, ui32CounterID);
+			DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+					uiOffsetOfCustomBlockSelectedCounterIDs,
+					0,
+					PDUMP_FLAGS_CONTINUOUS);
+		}
+	}
+}
+
+/*!
+*******************************************************************************
+
+ @Function      InitialiseAllCounters
+
+ @Description   Initialise HWPerf and custom counters
+
+ @Input         psDeviceNode : Device Node
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR InitialiseAllCounters(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+	RGXFWIF_HWPERF_CTL *psHWPerfInitData;
+	PVRSRV_ERROR eError;
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc, (void **)&psHWPerfInitData);
+	PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", failHWPerfCountersMemDescAqCpuVirt);
+
+	InitialiseHWPerfCounters(psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc, psHWPerfInitData);
+	InitialiseCustomCounters(psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+
+failHWPerfCountersMemDescAqCpuVirt:
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+
+	return eError;
+}
+#endif /* PDUMP */
+
+/*
+ * _ParseHTBAppHints:
+ *
+ * Generate necessary references to the globally visible AppHints which are
+ * declared in the above #include "km_apphint_defs.h"
+ * Without these local references some compiler tool-chains will treat
+ * unreferenced declarations as fatal errors. This function duplicates the
+ * HTB_specific apphint references which are made in htbserver.c:HTBInit()
+ * However, it makes absolutely *NO* use of these hints.
+ */
+static void
+_ParseHTBAppHints(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	void *pvParamState = NULL;
+	IMG_UINT32 ui32LogType;
+	IMG_BOOL bAnyLogGroupConfigured;
+	IMG_UINT32 ui32BufferSize;
+	IMG_UINT32 ui32OpMode;
+
+	/* Services initialisation parameters */
+	pvParamState = SrvInitParamOpen();
+	if (pvParamState == NULL)
+		return;
+
+	SrvInitParamGetUINT32BitField(pvParamState, EnableHTBLogGroup, ui32LogType);
+	bAnyLogGroupConfigured = ui32LogType ? IMG_TRUE : IMG_FALSE;
+	SrvInitParamGetUINT32List(pvParamState, HTBOperationMode, ui32OpMode);
+	SrvInitParamGetUINT32(pvParamState, HTBufferSizeInKB, ui32BufferSize);
+
+	SrvInitParamClose(pvParamState);
+}
+
+/*!
+*******************************************************************************
+
+ @Function      RGXInit
+
+ @Description   RGX Initialisation
+
+ @Input         psDeviceNode
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError;
+
+	/* Services initialisation parameters */
+	RGX_SRVINIT_APPHINTS sApphints = {0};
+	IMG_UINT32 ui32FWConfigFlags, ui32FWConfigFlagsExt;
+	IMG_UINT32 ui32DeviceFlags;
+
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+	/* Services initialisation parameters */
+	_ParseHTBAppHints(psDeviceNode);
+	GetApphints(psDevInfo, &sApphints);
+	GetDeviceFlags(&sApphints, &ui32DeviceFlags);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	RGXVirtPopulateLMASubArenas(psDeviceNode, sApphints.aui32OSidMin, sApphints.aui32OSidMax, sApphints.bEnableTrustedDeviceAceConfig);
+#endif
+
+	eError = RGXInitCreateFWKernelMemoryContext(psDeviceNode);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create FW kernel memory context (%u)",
+		         __func__, eError));
+		goto cleanup;
+	}
+
+	if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		eError = InitFirmware(psDeviceNode, &sApphints);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: InitFirmware failed (%d)",
+					 __func__, eError));
+			goto cleanup;
+		}
+	}
+
+
+	/*
+	 * Setup Firmware initialisation data
+	 */
+
+	GetFWConfigFlags(psDeviceNode, &sApphints, &ui32FWConfigFlags, &ui32FWConfigFlagsExt);
+
+	eError = RGXInitFirmware(psDeviceNode,
+	                         sApphints.bEnableSignatureChecks,
+	                         sApphints.ui32SignatureChecksBufSize,
+	                         sApphints.ui32HWPerfFWBufSize,
+	                         (IMG_UINT64)sApphints.ui32HWPerfFilter0 |
+	                         ((IMG_UINT64)sApphints.ui32HWPerfFilter1 << 32),
+	                         0,
+	                         NULL,
+	                         ui32FWConfigFlags,
+	                         sApphints.ui32LogType,
+	                         GetFilterFlags(&sApphints),
+	                         sApphints.ui32JonesDisableMask,
+	                         sApphints.ui32HWRDebugDumpLimit,
+	                         sizeof(RGXFWIF_HWPERF_CTL),
+#if defined(SUPPORT_VALIDATION)
+	                         &sApphints.aui32TPUTrilinearFracMask[0],
+#else
+	                         NULL,
+#endif
+	                         sApphints.eRGXRDPowerIslandConf,
+	                         sApphints.eFirmwarePerf,
+	                         ui32FWConfigFlagsExt);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: PVRSRVRGXInitFirmware failed (%d)",
+				 __func__, eError));
+		goto cleanup;
+	}
+
+#if defined(PDUMP)
+	eError = InitialiseAllCounters(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: InitialiseAllCounters failed (%d)",
+				 __func__, eError));
+		goto cleanup;
+	}
+#endif
+
+	/*
+	 * Perform second stage of RGX initialisation
+	 */
+	eError = RGXInitDevPart2(psDeviceNode,
+	                         ui32DeviceFlags,
+	                         sApphints.ui32HWPerfHostBufSize,
+	                         sApphints.ui32HWPerfHostFilter,
+	                         sApphints.eRGXActivePMConf);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: PVRSRVRGXInitDevPart2KM failed (%d)",
+				 __func__, eError));
+		goto cleanup;
+	}
+
+#if defined(SUPPORT_VALIDATION)
+	PVRSRVAppHintDumpState();
+#endif
+
+	eError = PVRSRV_OK;
+
+cleanup:
+	return eError;
+}
+
+/******************************************************************************
+ End of file (rgxsrvinit.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxstartstop.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxstartstop.c
new file mode 100644
index 0000000..15c44d7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxstartstop.c
@@ -0,0 +1,1167 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific start/stop routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific start/stop routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* The routines implemented here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when trusted device is enabled).
+ * Any new dependency should be added to rgxlayer.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary. */
+#include "rgxstartstop.h"
+
+#if defined(SUPPORT_SHARED_SLC)
+#include "rgxapi_km.h"
+#include "rgxdevice.h"
+#endif
+
+#include "km/tpu_cacheability_km.h"
+
+#define SOC_FEATURE_STRICT_SAME_ADDRESS_WRITE_ORDERING
+
+
+/*!
+*******************************************************************************
+
+ @Function      RGXEnableClocks
+
+ @Description   Enable RGX Clocks
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXEnableClocks(const void *hPrivate)
+{
+	RGXCommentLog(hPrivate, "RGX clock: use default (automatic clock gating)");
+}
+
+static PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* Wait for Slave Port to be Ready */
+	eError = RGXPollReg32(hPrivate,
+	                      RGX_CR_META_SP_MSLVCTRL1,
+	                      RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+	                      RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+	if (eError != PVRSRV_OK) return eError;
+
+	/* Issue a Write */
+	RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr);
+	RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue);
+
+	return eError;
+}
+
+static PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate,
+                                            IMG_UINT32 ui32RegAddr,
+                                            IMG_UINT32* ui32RegValue)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* Wait for Slave Port to be Ready */
+	eError = RGXPollReg32(hPrivate,
+	                      RGX_CR_META_SP_MSLVCTRL1,
+	                      RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+	                      RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+	if (eError != PVRSRV_OK) return eError;
+
+	/* Issue a Read */
+	RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN);
+
+	/* Wait for Slave Port to be Ready */
+	eError = RGXPollReg32(hPrivate,
+	                      RGX_CR_META_SP_MSLVCTRL1,
+	                      RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+	                      RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+	if (eError != PVRSRV_OK) return eError;
+
+#if !defined(NO_HARDWARE)
+	*ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX);
+#else
+	*ui32RegValue = 0xFFFFFFFF;
+#endif
+
+	return eError;
+}
+
+static PVRSRV_ERROR RGXWriteMetaCoreRegThoughSP(const void *hPrivate,
+                                                IMG_UINT32 ui32CoreReg,
+                                                IMG_UINT32 ui32Value)
+{
+	IMG_UINT32 i = 0;
+
+	RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXDT_OFFSET, ui32Value);
+	RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, ui32CoreReg & ~META_CR_TXUXXRXRQ_RDnWR_BIT);
+
+	do
+	{
+		RGXReadMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, &ui32Value);
+	} while (((ui32Value & META_CR_TXUXXRXRQ_DREADY_BIT) != META_CR_TXUXXRXRQ_DREADY_BIT) && (i++ < 1000));
+
+	if (i == 1000)
+	{
+		RGXCommentLog(hPrivate, "RGXWriteMetaCoreRegThoughSP: Timeout");
+		return PVRSRV_ERROR_TIMEOUT;
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXStartFirmware(const void *hPrivate)
+{
+	PVRSRV_ERROR eError;
+
+	/* Give privilege to debug and slave port */
+	RGXWriteMetaRegThroughSP(hPrivate, META_CR_SYSC_JTAG_THREAD, META_CR_SYSC_JTAG_THREAD_PRIV_EN);
+
+	/* Point Meta to the bootloader address, global (uncached) range */
+	eError = RGXWriteMetaCoreRegThoughSP(hPrivate,
+	                                     PC_ACCESS(0),
+	                                     RGXFW_BOOTLDR_META_ADDR | META_MEM_GLOBAL_RANGE_BIT);
+
+	if (eError != PVRSRV_OK)
+	{
+		RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start failed!");
+		return eError;
+	}
+
+	/* Enable minim encoding */
+	RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXPRIVEXT, META_CR_TXPRIVEXT_MINIM_EN);
+
+	/* Enable Meta thread */
+	RGXWriteMetaRegThroughSP(hPrivate, META_CR_T0ENABLE_OFFSET, META_CR_TXENABLE_ENABLE_BIT);
+
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function      RGXInitMetaProcWrapper
+
+ @Description   Configures the hardware wrapper of the META processor
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXInitMetaProcWrapper(const void *hPrivate)
+{
+	IMG_UINT64 ui64GartenConfig;
+
+	/* Set Garten IDLE to META idle and Set the Garten Wrapper BIF Fence address */
+
+	/* Garten IDLE bit controlled by META */
+	ui64GartenConfig = RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META;
+
+	/* The fence addr is set at the fw init sequence */
+
+	if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+	{
+		/* Set PC = 0 for fences */
+		ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK;
+		ui64GartenConfig |= (IMG_UINT64)META_MMU_CONTEXT_MAPPING
+		                    << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT;
+
+	}
+	else
+	{
+		/* Set PC = 0 for fences */
+		ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK;
+		ui64GartenConfig |= (IMG_UINT64)META_MMU_CONTEXT_MAPPING
+		                    << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT;
+
+		/* Set SLC DM=META */
+		ui64GartenConfig |= ((IMG_UINT64) RGXFW_SEGMMU_META_DM_ID) << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT;
+	}
+
+	RGXCommentLog(hPrivate, "RGXStart: Configure META wrapper");
+	RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, ui64GartenConfig);
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      RGXInitMipsProcWrapper
+
+ @Description   Configures the hardware wrapper of the MIPS processor
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXInitMipsProcWrapper(const void *hPrivate)
+{
+	IMG_DEV_PHYADDR sPhyAddr;
+	IMG_UINT64 ui64RemapSettings = RGXMIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE; /* Same for all remap registers */
+
+	RGXCommentLog(hPrivate, "RGXStart: Configure MIPS wrapper");
+
+	/*
+	 * MIPS wrapper (registers transaction ID and ISA mode) setup
+	 */
+
+	RGXCommentLog(hPrivate, "RGXStart: Write wrapper config register");
+
+	if (RGXGetDevicePhysBusWidth(hPrivate) > 32)
+	{
+		RGXWriteReg32(hPrivate,
+		              RGX_CR_MIPS_WRAPPER_CONFIG,
+		              (RGXMIPSFW_REGISTERS_VIRTUAL_BASE >>
+		              RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN) |
+		              RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS);
+	}
+	else
+	{
+		RGXAcquireGPURegsAddr(hPrivate, &sPhyAddr);
+
+		RGXMIPSWrapperConfig(hPrivate,
+		                     RGX_CR_MIPS_WRAPPER_CONFIG,
+		                     sPhyAddr.uiAddr,
+		                     RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN,
+		                     RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS);
+	}
+
+	/*
+	 * Boot remap setup
+	 */
+
+	RGXAcquireBootRemapAddr(hPrivate, &sPhyAddr);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	/* Do not mark accesses to a FW code remap region as DRM accesses */
+	ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+#endif
+
+#if defined(MIPS_FW_CODE_OSID)
+	ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK;
+	ui64RemapSettings |= MIPS_FW_CODE_OSID << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT;
+#endif
+
+	RGXCommentLog(hPrivate, "RGXStart: Write boot remap registers");
+	RGXBootRemapConfig(hPrivate,
+	                   RGX_CR_MIPS_ADDR_REMAP1_CONFIG1,
+	                   RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN,
+	                   RGX_CR_MIPS_ADDR_REMAP1_CONFIG2,
+	                   sPhyAddr.uiAddr,
+	                   ~RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK,
+	                   ui64RemapSettings);
+
+	if (RGX_DEVICE_HAS_BRN(hPrivate, 63553))
+	{
+		IMG_BOOL bPhysBusAbove32Bit = RGXGetDevicePhysBusWidth(hPrivate) > 32;
+		IMG_BOOL bDevicePA0IsValid  = RGXDevicePA0IsValid(hPrivate);
+
+		/* WA always required on 36 bit cores, to avoid continuous unmapped memory accesses to address 0x0 */
+		if (bPhysBusAbove32Bit || !bDevicePA0IsValid)
+		{
+			RGXCodeRemapConfig(hPrivate,
+					RGX_CR_MIPS_ADDR_REMAP5_CONFIG1,
+					0x0 | RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN,
+					RGX_CR_MIPS_ADDR_REMAP5_CONFIG2,
+					sPhyAddr.uiAddr,
+					~RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK,
+					ui64RemapSettings);
+		}
+	}
+
+	/*
+	 * Data remap setup
+	 */
+
+	RGXAcquireDataRemapAddr(hPrivate, &sPhyAddr);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	if (RGXGetDevicePhysBusWidth(hPrivate) > 32)
+	{
+		/* Remapped private data in secure memory */
+		ui64RemapSettings |= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN;
+	}
+	else
+	{
+		/* Remapped data in non-secure memory */
+		ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+	}
+#endif
+
+#if defined(MIPS_FW_CODE_OSID)
+	ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK;
+#endif
+
+	RGXCommentLog(hPrivate, "RGXStart: Write data remap registers");
+	RGXDataRemapConfig(hPrivate,
+	                   RGX_CR_MIPS_ADDR_REMAP2_CONFIG1,
+	                   RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN,
+	                   RGX_CR_MIPS_ADDR_REMAP2_CONFIG2,
+	                   sPhyAddr.uiAddr,
+	                   ~RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK,
+	                   ui64RemapSettings);
+
+	/*
+	 * Code remap setup
+	 */
+
+	RGXAcquireCodeRemapAddr(hPrivate, &sPhyAddr);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	/* Do not mark accesses to a FW code remap region as DRM accesses */
+	ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+#endif
+
+#if defined(MIPS_FW_CODE_OSID)
+	ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK;
+	ui64RemapSettings |= MIPS_FW_CODE_OSID << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT;
+#endif
+
+	RGXCommentLog(hPrivate, "RGXStart: Write exceptions remap registers");
+	RGXCodeRemapConfig(hPrivate,
+	                   RGX_CR_MIPS_ADDR_REMAP3_CONFIG1,
+	                   RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN,
+	                   RGX_CR_MIPS_ADDR_REMAP3_CONFIG2,
+	                   sPhyAddr.uiAddr,
+	                   ~RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK,
+	                   ui64RemapSettings);
+
+	if (RGXGetDevicePhysBusWidth(hPrivate) == 32)
+	{
+		/*
+		 * Trampoline remap setup
+		 */
+
+		RGXAcquireTrampolineRemapAddr(hPrivate, &sPhyAddr);
+		ui64RemapSettings = RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+		/* Remapped data in non-secure memory */
+		ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+#endif
+
+#if defined(MIPS_FW_CODE_OSID)
+		ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK;
+#endif
+
+		RGXCommentLog(hPrivate, "RGXStart: Write trampoline remap registers");
+		RGXTrampolineRemapConfig(hPrivate,
+		                         RGX_CR_MIPS_ADDR_REMAP4_CONFIG1,
+		                         sPhyAddr.uiAddr | RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN,
+		                         RGX_CR_MIPS_ADDR_REMAP4_CONFIG2,
+		                         RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR,
+		                         ~RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK,
+		                         ui64RemapSettings);
+	}
+
+	/* Garten IDLE bit controlled by MIPS */
+	RGXCommentLog(hPrivate, "RGXStart: Set GARTEN_IDLE type to MIPS");
+	RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META);
+
+	/* Turn on the EJTAG probe (only useful driver live) */
+	RGXWriteReg32(hPrivate, RGX_CR_MIPS_DEBUG_CONFIG, 0);
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      __RGXInitSLC
+
+ @Description   Initialise RGX SLC
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void __RGXInitSLC(const void *hPrivate)
+{
+	if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_CACHE_HIERARCHY))
+	{
+		IMG_UINT32 ui32Reg;
+		IMG_UINT32 ui32RegVal;
+
+		if (RGX_DEVICE_HAS_ERN(hPrivate, 51468))
+		{
+			/*
+			 * SLC control
+			 */
+			ui32Reg = RGX_CR_SLC3_CTRL_MISC;
+			ui32RegVal = RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_WEAVED_HASH |
+			             RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN;
+			RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal);
+		}
+		else
+		{
+			/*
+			 * SLC control
+			 */
+			ui32Reg = RGX_CR_SLC3_CTRL_MISC;
+			ui32RegVal = RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH |
+			             RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN;
+			RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal);
+
+			/*
+			 * SLC scramble bits
+			 */
+			{
+				IMG_UINT32 i;
+				IMG_UINT32 ui32Count=0;
+				IMG_UINT32 ui32SLCBanks = RGXGetDeviceSLCBanks(hPrivate);
+				IMG_UINT64 aui64ScrambleValues[4];
+				IMG_UINT32 aui32ScrambleRegs[] = {
+					RGX_CR_SLC3_SCRAMBLE,
+					RGX_CR_SLC3_SCRAMBLE2,
+					RGX_CR_SLC3_SCRAMBLE3,
+					RGX_CR_SLC3_SCRAMBLE4
+				};
+
+				if (2 == ui32SLCBanks)
+				{
+					aui64ScrambleValues[0] = IMG_UINT64_C(0x6965a99a55696a6a);
+					aui64ScrambleValues[1] = IMG_UINT64_C(0x6aa9aa66959aaa9a);
+					aui64ScrambleValues[2] = IMG_UINT64_C(0x9a5665965a99a566);
+					aui64ScrambleValues[3] = IMG_UINT64_C(0x5aa69596aa66669a);
+					ui32Count = 4;
+				}
+				else if (4 == ui32SLCBanks)
+				{
+					aui64ScrambleValues[0] = IMG_UINT64_C(0xc6788d722dd29ce4);
+					aui64ScrambleValues[1] = IMG_UINT64_C(0x7272e4e11b279372);
+					aui64ScrambleValues[2] = IMG_UINT64_C(0x87d872d26c6c4be1);
+					aui64ScrambleValues[3] = IMG_UINT64_C(0xe1b4878d4b36e478);
+					ui32Count = 4;
+
+				}
+				else if (8 == ui32SLCBanks)
+				{
+					aui64ScrambleValues[0] = IMG_UINT64_C(0x859d6569e8fac688);
+					aui64ScrambleValues[1] = IMG_UINT64_C(0xf285e1eae4299d33);
+					aui64ScrambleValues[2] = IMG_UINT64_C(0x1e1af2be3c0aa447);
+					ui32Count = 3;
+				}
+
+				for (i = 0; i < ui32Count; i++)
+				{
+					IMG_UINT32 ui32Reg = aui32ScrambleRegs[i];
+					IMG_UINT64 ui64Value = aui64ScrambleValues[i];
+					RGXWriteReg64(hPrivate, ui32Reg, ui64Value);
+				}
+			}
+		}
+
+		if (RGX_DEVICE_HAS_ERN(hPrivate, 45914))
+		{
+			/* Disable the forced SLC coherency which the hardware enables for compatibility with older pdumps */
+			RGXCommentLog(hPrivate, "Disable forced SLC coherency");
+			RGXWriteReg64(hPrivate, RGX_CR_GARTEN_SLC, 0);
+		}
+	}
+	else
+	{
+		IMG_UINT32 ui32Reg;
+		IMG_UINT32 ui32RegVal;
+		IMG_UINT64 ui64RegVal;
+
+#if defined(FIX_HW_BRN_36492)
+		/* Because the WA for this BRN forbids using SLC reset, need to inval it instead */
+		RGXCommentLog(hPrivate, "Invalidate the SLC");
+		RGXWriteReg32(hPrivate, RGX_CR_SLC_CTRL_FLUSH_INVAL, RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN);
+
+		/* Poll for completion */
+		RGXPollReg32(hPrivate, RGX_CR_SLC_STATUS0, 0x0, RGX_CR_SLC_STATUS0_MASKFULL);
+#endif
+
+		/*
+		 * SLC Bypass control
+		 */
+		ui32Reg = RGX_CR_SLC_CTRL_BYPASS;
+		ui64RegVal = 0;
+
+		if (RGX_DEVICE_HAS_FEATURE(hPrivate, SLCSIZE8)  ||
+		    RGX_DEVICE_HAS_BRN(hPrivate, 61450))
+		{
+			RGXCommentLog(hPrivate, "Bypass SLC for IPF_OBJ and IPF_CPF");
+			ui64RegVal |= (IMG_UINT64) RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN |
+						(IMG_UINT64) RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN;
+		}
+
+		if (RGXGetDeviceSLCSize(hPrivate) < RGX_TPU_CACHED_SLC_SIZE_THRESHOLD)
+		{
+			/* Bypass SLC for textures if the SLC size is less than the threshold. */
+			RGXCommentLog(hPrivate, "Bypass SLC for TPU");
+			ui64RegVal |= (IMG_UINT64) RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_EN;
+		}
+
+		if (ui64RegVal != 0)
+		{
+			RGXReadModifyWriteReg64(hPrivate, ui32Reg, ui64RegVal, ~ui64RegVal);
+		}
+
+
+		/*
+		 * SLC Misc control.
+		 *
+		 * Note: This is a 64bit register and we set only the lower 32bits leaving the top
+		 *       32bits (RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS) unchanged from the HW default.
+		 */
+		ui32Reg = RGX_CR_SLC_CTRL_MISC;
+		ui32RegVal = (RGXReadReg32(hPrivate, ui32Reg) & RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN) |
+		             RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1;
+
+		if (RGX_DEVICE_HAS_BRN(hPrivate, 60084))
+		{
+#if !defined(SOC_FEATURE_STRICT_SAME_ADDRESS_WRITE_ORDERING)
+			ui32RegVal |= RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN;
+#else
+			if (RGX_DEVICE_HAS_ERN(hPrivate, 61389))
+			{
+				ui32RegVal |= RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN;
+			}
+#endif
+		}
+		/* Bypass burst combiner if SLC line size is smaller than 1024 bits */
+		if (RGXGetDeviceCacheLineSize(hPrivate) < 1024)
+		{
+			ui32RegVal |= RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN;
+		}
+
+		RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal);
+	}
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      RGXInitBIF
+
+ @Description   Initialise RGX BIF
+
+ @Input         hPrivate : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXInitBIF(const void *hPrivate)
+{
+	if (!RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS))
+	{
+		IMG_DEV_PHYADDR sPCAddr;
+
+		/*
+		 * Acquire the address of the Kernel Page Catalogue.
+		 */
+		RGXAcquireKernelMMUPC(hPrivate, &sPCAddr);
+
+		/*
+		 * Write the kernel catalogue base.
+		 */
+		RGXCommentLog(hPrivate, "RGX firmware MMU Page Catalogue");
+
+		if (!RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT))
+		{
+			/* Write the cat-base address */
+			RGXWriteKernelMMUPC64(hPrivate,
+			                      RGX_CR_BIF_CAT_BASE0,
+			                      RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT,
+			                      RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT,
+			                      ((sPCAddr.uiAddr
+			                      >> RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT)
+			                      << RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT)
+			                      & ~RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK);
+			/*
+			 * Trusted Firmware boot
+			 */
+#if defined(SUPPORT_TRUSTED_DEVICE)
+			RGXCommentLog(hPrivate, "RGXInitBIF: Trusted Device enabled");
+			RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN);
+#endif
+		}
+		else
+		{
+			IMG_UINT32 uiPCAddr;
+			uiPCAddr = (((sPCAddr.uiAddr >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT)
+			             << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT)
+			            & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK);
+			/* Set the mapping context */
+			RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, 0);
+
+			/* Write the cat-base address */
+			RGXWriteKernelMMUPC32(hPrivate,
+			                      RGX_CR_MMU_CBASE_MAPPING,
+			                      RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT,
+			                      RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT,
+			                      uiPCAddr);
+#if defined(SUPPORT_TRUSTED_DEVICE)
+			/* Set-up MMU ID 1 mapping to the same PC used by MMU ID 0 */
+			RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, 1);
+			RGXWriteKernelMMUPC32(hPrivate,
+			                      RGX_CR_MMU_CBASE_MAPPING,
+			                      RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT,
+			                      RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT,
+			                      uiPCAddr);
+#endif /* SUPPORT_TRUSTED_DEVICE */
+		}
+	}
+	else
+	{
+		/*
+		 * Trusted Firmware boot
+		 */
+#if defined(SUPPORT_TRUSTED_DEVICE)
+		RGXCommentLog(hPrivate, "RGXInitBIF: Trusted Device enabled");
+		RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN);
+#endif
+	}
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      RGXAXIACELiteInit
+
+ @Description   Initialise AXI-ACE Lite interface
+
+ @Input         hPrivate : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXAXIACELiteInit(const void *hPrivate)
+{
+	IMG_UINT32 ui32RegAddr;
+	IMG_UINT64 ui64RegVal;
+
+	ui32RegAddr = RGX_CR_AXI_ACE_LITE_CONFIGURATION;
+
+	/* Setup AXI-ACE config. Set everything to outer cache */
+	ui64RegVal = (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT) |
+	             (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT) |
+	             (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT)  |
+	             (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT) |
+	             (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT) |
+	             (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT) |
+	             (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT) |
+	             (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT);
+
+	if (RGX_DEVICE_HAS_BRN(hPrivate, 42321))
+	{
+		ui64RegVal |= (((IMG_UINT64) 1) << RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT);
+	}
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	if (RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT))
+	{
+		RGXCommentLog(hPrivate, "OSID 0 and 1 are trusted");
+		ui64RegVal |= IMG_UINT64_C(0xFC)
+	              << RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT;
+	}
+#endif
+
+	RGXCommentLog(hPrivate, "Init AXI-ACE interface");
+	RGXWriteReg64(hPrivate, ui32RegAddr, ui64RegVal);
+}
+
+
+PVRSRV_ERROR RGXStart(const void *hPrivate)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_BOOL bDoFWSlaveBoot;
+	IMG_CHAR *pcRGXFW_PROCESSOR;
+	IMG_BOOL bMetaFW;
+
+	if (RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS))
+	{
+		pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_MIPS;
+		bMetaFW = IMG_FALSE;
+		bDoFWSlaveBoot = IMG_FALSE;
+	}
+	else
+	{
+		pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META;
+		bMetaFW = IMG_TRUE;
+		bDoFWSlaveBoot = RGXDoFWSlaveBoot(hPrivate);
+	}
+
+	if (RGX_DEVICE_HAS_FEATURE(hPrivate, SYS_BUS_SECURE_RESET))
+	{
+		/* Disable the default sys_bus_secure protection to perform minimal setup */
+		RGXCommentLog(hPrivate, "RGXStart: Disable sys_bus_secure");
+		RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, 0);
+		(void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */
+	}
+
+#if defined(SUPPORT_SHARED_SLC) && !defined(FIX_HW_BRN_36492)
+	/* When the SLC is shared, the SLC reset is performed by the System layer when calling
+	 * RGXInitSLC (before any device uses it), therefore mask out the SLC bit to avoid
+	 * soft_resetting it here. If HW_BRN_36492, the bit is already masked out.
+	 */
+#define RGX_CR_SOFT_RESET_ALL  (RGX_CR_SOFT_RESET_MASKFULL ^ RGX_CR_SOFT_RESET_SLC_EN)
+	RGXCommentLog(hPrivate, "RGXStart: Shared SLC (don't reset SLC as part of RGX reset)");
+#else
+#define RGX_CR_SOFT_RESET_ALL  (RGX_CR_SOFT_RESET_MASKFULL)
+#endif
+
+	if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+	{
+		/* Set RGX in soft-reset */
+		RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 1");
+		RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS);
+
+		RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 2");
+		RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_JONES_ALL | RGX_S7_SOFT_RESET_DUSTS);
+		RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET2, RGX_S7_SOFT_RESET2);
+
+		/* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+		(void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+		/* Take everything out of reset but META/MIPS */
+		RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 1 excluding %s", pcRGXFW_PROCESSOR);
+		RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS | RGX_CR_SOFT_RESET_GARTEN_EN);
+		RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET2, 0x0);
+
+		(void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+		RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 2 excluding %s", pcRGXFW_PROCESSOR);
+		RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN);
+
+		(void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+	}
+	else
+	{
+		/* Set RGX in soft-reset */
+		RGXCommentLog(hPrivate, "RGXStart: soft reset everything");
+		RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL);
+
+		/* Take Rascal and Dust out of reset */
+		RGXCommentLog(hPrivate, "RGXStart: Rascal and Dust out of reset");
+		RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL ^ RGX_CR_SOFT_RESET_RASCALDUSTS_EN);
+
+		/* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+		(void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+		/* Take everything out of reset but META/MIPS */
+		RGXCommentLog(hPrivate, "RGXStart: Take everything out of reset but %s", pcRGXFW_PROCESSOR);
+		RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN);
+	}
+
+	/* Enable clocks */
+	RGXEnableClocks(hPrivate);
+
+	/*
+	 * Initialise SLC.
+	 */
+#if !defined(SUPPORT_SHARED_SLC)
+	__RGXInitSLC(hPrivate);
+#endif
+
+	if (bMetaFW)
+	{
+		if (bDoFWSlaveBoot)
+		{
+			/* Configure META to Slave boot */
+			RGXCommentLog(hPrivate, "RGXStart: META Slave boot");
+			RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, 0);
+
+		}
+		else
+		{
+			/* Configure META to Master boot */
+			RGXCommentLog(hPrivate, "RGXStart: META Master boot");
+			RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, RGX_CR_META_BOOT_MODE_EN);
+		}
+	}
+
+	/*
+	 * Initialise Firmware wrapper
+	 */
+	if (bMetaFW)
+	{
+		RGXInitMetaProcWrapper(hPrivate);
+	}
+	else
+	{
+		RGXInitMipsProcWrapper(hPrivate);
+	}
+
+	if (RGX_DEVICE_HAS_FEATURE(hPrivate, AXI_ACELITE))
+	{
+		/* We must init the AXI-ACE interface before 1st BIF transaction */
+		RGXAXIACELiteInit(hPrivate);
+	}
+
+	/*
+	 * Initialise BIF.
+	 */
+	RGXInitBIF(hPrivate);
+
+	RGXCommentLog(hPrivate, "RGXStart: Take %s out of reset", pcRGXFW_PROCESSOR);
+
+	/* Need to wait for at least 16 cycles before taking META/MIPS out of reset ... */
+	RGXWaitCycles(hPrivate, 32, 3);
+
+	RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, 0x0);
+	(void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+	/* ... and afterwards */
+	RGXWaitCycles(hPrivate, 32, 3);
+
+	if (bMetaFW && bDoFWSlaveBoot)
+	{
+		eError = RGXFabricCoherencyTest(hPrivate);
+		if (eError != PVRSRV_OK) return eError;
+
+		RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start");
+		eError = RGXStartFirmware(hPrivate);
+		if (eError != PVRSRV_OK) return eError;
+	}
+	else
+	{
+		RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Master boot Start");
+	}
+
+	/* Enable Sys Bus security */
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	RGXCommentLog(hPrivate, "RGXStart: Enable sys_bus_secure");
+	RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, RGX_CR_SYS_BUS_SECURE_ENABLE_EN);
+	(void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */
+#endif
+
+	return eError;
+}
+
+static INLINE void ClearIRQStatusRegister(const void *hPrivate, IMG_BOOL bMetaFW)
+{
+	IMG_UINT32 ui32IRQClearReg;
+	IMG_UINT32 ui32IRQClearMask;
+
+	if (bMetaFW)
+	{
+		ui32IRQClearReg = RGX_CR_META_SP_MSLVIRQSTATUS;
+		ui32IRQClearMask = RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK;
+	}
+	else
+	{
+		ui32IRQClearReg = RGX_CR_MIPS_WRAPPER_IRQ_CLEAR;
+		ui32IRQClearMask = RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN;
+	}
+
+	RGXWriteReg32(hPrivate, ui32IRQClearReg, ui32IRQClearMask);
+
+#if defined(RGX_FEATURE_OCPBUS)
+	RGXWriteReg32(hPrivate, RGX_CR_OCP_IRQSTATUS_2, RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN);
+#endif
+}
+
+PVRSRV_ERROR RGXStop(const void *hPrivate)
+{
+	IMG_BOOL bMetaFW = !RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS);
+	PVRSRV_ERROR eError;
+
+	ClearIRQStatusRegister(hPrivate, bMetaFW);
+
+	/* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper */
+	if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+	{
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_SIDEKICK_IDLE,
+		                      RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN),
+		                      RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN));
+	}
+	else
+	{
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_JONES_IDLE,
+		                      RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN),
+		                      RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN));
+	}
+
+	if (eError != PVRSRV_OK) return eError;
+
+
+#if !defined(SUPPORT_SHARED_SLC)
+	/* Wait for SLC to signal IDLE */
+	if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+	{
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_SLC_IDLE,
+		                      RGX_CR_SLC_IDLE_MASKFULL,
+		                      RGX_CR_SLC_IDLE_MASKFULL);
+	}
+	else
+	{
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_SLC3_IDLE,
+		                      RGX_CR_SLC3_IDLE_MASKFULL,
+		                      RGX_CR_SLC3_IDLE_MASKFULL);
+	}
+#endif /* SUPPORT_SHARED_SLC */
+	if (eError != PVRSRV_OK) return eError;
+
+
+	/* Unset MTS DM association with threads */
+	RGXWriteReg32(hPrivate,
+	              RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC,
+	              RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK
+	              & RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL);
+	RGXWriteReg32(hPrivate,
+	              RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC,
+	              RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK
+	              & RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL);
+	RGXWriteReg32(hPrivate,
+	              RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC,
+	              RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK
+	              & RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL);
+	RGXWriteReg32(hPrivate,
+	              RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC,
+	              RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK
+	              & RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL);
+
+
+#if defined(PDUMP)
+	if (bMetaFW)
+	{
+		/* Disabling threads is only required for pdumps to stop the fw gracefully */
+
+		/* Disable thread 0 */
+		eError = RGXWriteMetaRegThroughSP(hPrivate,
+		                                  META_CR_T0ENABLE_OFFSET,
+		                                  ~META_CR_TXENABLE_ENABLE_BIT);
+		if (eError != PVRSRV_OK) return eError;
+
+		/* Disable thread 1 */
+		eError = RGXWriteMetaRegThroughSP(hPrivate,
+		                                  META_CR_T1ENABLE_OFFSET,
+		                                  ~META_CR_TXENABLE_ENABLE_BIT);
+		if (eError != PVRSRV_OK) return eError;
+
+		/* Clear down any irq raised by META (done after disabling the FW
+		 * threads to avoid a race condition).
+		 * This is only really needed for PDumps but we do it anyway driver-live.
+		 */
+		RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS, 0x0);
+
+		/* Wait for the Slave Port to finish all the transactions */
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_META_SP_MSLVCTRL1,
+		                      RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+		                      RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+		if (eError != PVRSRV_OK) return eError;
+	}
+#endif
+
+
+	/* Extra Idle checks */
+	eError = RGXPollReg32(hPrivate,
+	                      RGX_CR_BIF_STATUS_MMU,
+	                      0,
+	                      RGX_CR_BIF_STATUS_MMU_MASKFULL);
+	if (eError != PVRSRV_OK) return eError;
+
+	eError = RGXPollReg32(hPrivate,
+	                      RGX_CR_BIFPM_STATUS_MMU,
+	                      0,
+	                      RGX_CR_BIFPM_STATUS_MMU_MASKFULL);
+	if (eError != PVRSRV_OK) return eError;
+
+	if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE) &&
+	    !RGX_DEVICE_HAS_FEATURE(hPrivate, XT_TOP_INFRASTRUCTURE))
+	{
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_BIF_READS_EXT_STATUS,
+		                      0,
+		                      RGX_CR_BIF_READS_EXT_STATUS_MASKFULL);
+		if (eError != PVRSRV_OK) return eError;
+	}
+
+
+	eError = RGXPollReg32(hPrivate,
+	                      RGX_CR_BIFPM_READS_EXT_STATUS,
+	                      0,
+	                      RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL);
+	if (eError != PVRSRV_OK) return eError;
+
+	{
+		IMG_UINT64 ui64SLCMask = RGX_CR_SLC_STATUS1_MASKFULL;
+		eError = RGXPollReg64(hPrivate,
+		                      RGX_CR_SLC_STATUS1,
+		                      0,
+		                      ui64SLCMask);
+		if (eError != PVRSRV_OK) return eError;
+	}
+
+	if (4 == RGXGetDeviceSLCBanks(hPrivate))
+	{
+		eError = RGXPollReg64(hPrivate,
+		                      RGX_CR_SLC_STATUS2,
+		                      0,
+		                      RGX_CR_SLC_STATUS2_MASKFULL);
+		if (eError != PVRSRV_OK) return eError;
+	}
+
+#if !defined(SUPPORT_SHARED_SLC)
+	/* Wait for SLC to signal IDLE */
+	if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+	{
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_SLC_IDLE,
+		                      RGX_CR_SLC_IDLE_MASKFULL,
+		                      RGX_CR_SLC_IDLE_MASKFULL);
+	}
+	else
+	{
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_SLC3_IDLE,
+		                      RGX_CR_SLC3_IDLE_MASKFULL,
+		                      RGX_CR_SLC3_IDLE_MASKFULL);
+	}
+#endif /* SUPPORT_SHARED_SLC */
+	if (eError != PVRSRV_OK) return eError;
+
+
+	/* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper */
+	if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+	{
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_SIDEKICK_IDLE,
+		                      RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN),
+		                      RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN));
+	}
+	else
+	{
+		if (!RGX_DEVICE_HAS_FEATURE(hPrivate, FASTRENDER_DM))
+		{
+			eError = RGXPollReg32(hPrivate,
+			                      RGX_CR_JONES_IDLE,
+			                      RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN),
+			                      RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN));
+		}
+	}
+
+	if (eError != PVRSRV_OK) return eError;
+
+
+	if (bMetaFW)
+	{
+		IMG_UINT32 ui32RegValue;
+
+		eError = RGXReadMetaRegThroughSP(hPrivate,
+		                                 META_CR_TxVECINT_BHALT,
+		                                 &ui32RegValue);
+		if (eError != PVRSRV_OK) return eError;
+
+		if ((ui32RegValue & 0xFFFFFFFFU) == 0x0)
+		{
+			/* Wait for Sidekick/Jones to signal IDLE including
+			 * the Garten Wrapper if there is no debugger attached
+			 * (TxVECINT_BHALT = 0x0) */
+			if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+			{
+				eError = RGXPollReg32(hPrivate,
+				                      RGX_CR_SIDEKICK_IDLE,
+				                      RGX_CR_SIDEKICK_IDLE_GARTEN_EN,
+				                      RGX_CR_SIDEKICK_IDLE_GARTEN_EN);
+				if (eError != PVRSRV_OK) return eError;
+			}
+			else
+			{
+				eError = RGXPollReg32(hPrivate,
+				                      RGX_CR_JONES_IDLE,
+				                      RGX_CR_JONES_IDLE_GARTEN_EN,
+				                      RGX_CR_JONES_IDLE_GARTEN_EN);
+				if (eError != PVRSRV_OK) return eError;
+			}
+		}
+	}
+	else
+	{
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_SIDEKICK_IDLE,
+		                      RGX_CR_SIDEKICK_IDLE_GARTEN_EN,
+		                      RGX_CR_SIDEKICK_IDLE_GARTEN_EN);
+		if (eError != PVRSRV_OK) return eError;
+	}
+
+	return eError;
+}
+
+
+/*
+ * RGXInitSLC
+ */
+#if defined(SUPPORT_SHARED_SLC)
+PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	void *pvPowerParams;
+
+	if (psDeviceNode == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	psDevInfo = psDeviceNode->pvDevice;
+	pvPowerParams = &psDevInfo->sLayerParams;
+
+#if !defined(FIX_HW_BRN_36492)
+	/* reset the SLC */
+	RGXCommentLog(pvPowerParams, "RGXInitSLC: soft reset SLC");
+	RGXWriteReg64(pvPowerParams, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_SLC_EN);
+
+	/* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+	(void) RGXReadReg64(pvPowerParams, RGX_CR_SOFT_RESET);
+
+	/* Take everything out of reset */
+	RGXWriteReg64(pvPowerParams, RGX_CR_SOFT_RESET, 0x0);
+#endif
+
+	__RGXInitSLC(pvPowerParams);
+
+	return PVRSRV_OK;
+}
+#endif
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxstartstop.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxstartstop.h
new file mode 100644
index 0000000..ac14118
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxstartstop.h
@@ -0,0 +1,84 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX start/stop header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX start/stop functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXSTARTSTOP_H__)
+#define __RGXSTARTSTOP_H__
+
+/* The routines declared here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when DRM security is enabled).
+ * Any new dependency should be added to rgxlayer.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary. */
+#include "rgxlayer.h"
+
+/*!
+*******************************************************************************
+
+ @Function      RGXStart
+
+ @Description   Perform GPU reset and initialisation
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXStart(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function      RGXStop
+
+ @Description   Stop Rogue in preparation for power down
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXStop(const void *hPrivate);
+
+#endif /* __RGXSTARTSTOP_H__ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxsyncutils.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxsyncutils.c
new file mode 100644
index 0000000..a2f066a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxsyncutils.c
@@ -0,0 +1,175 @@
+/*************************************************************************/ /*!
+@File           rgxsyncutils.c
+@Title          RGX Sync Utilities
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Sync helper functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "rgxsyncutils.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "allocmem.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+//#define TA3D_CHECKPOINT_DEBUG
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+static
+void _DebugSyncValues(IMG_UINT32 *pui32UpdateValues,
+					  IMG_UINT32 ui32Count)
+{
+	IMG_UINT32 iii;
+	IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32UpdateValues;
+
+	for (iii = 0; iii < ui32Count; iii++)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+		pui32Tmp++;
+	}
+}
+#else
+#define CHKPT_DBG(X)
+#endif
+
+
+PVRSRV_ERROR RGXSyncAppendTimelineUpdate(IMG_UINT32 ui32FenceTimelineUpdateValue,
+										 SYNC_ADDR_LIST	*psSyncList,
+										 SYNC_ADDR_LIST	*psPRSyncList,
+										 PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync,
+										 RGX_SYNC_DATA *psSyncData,
+										 IMG_BOOL bKick3D)
+{
+	IMG_UINT32 *pui32TimelineUpdateWOff = NULL;
+	IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+
+	IMG_UINT32 ui32ClientUpdateValueCount = psSyncData->ui32ClientUpdateValueCount;
+
+	/* Space for original client updates, and the one new update */
+	size_t uiUpdateSize = sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientUpdateValueCount + 1);
+
+	if (!bKick3D)
+	{
+		/* Additional space for one PR update, only the newest one */
+		uiUpdateSize += sizeof(*pui32IntAllocatedUpdateValues) * 1;
+	}
+
+	CHKPT_DBG((PVR_DBG_ERROR, "%s: About to allocate memory to hold updates in pui32IntAllocatedUpdateValues(<%p>)", __func__, \
+		(void*)pui32IntAllocatedUpdateValues));
+
+	/* Allocate memory to hold the list of update values (including our timeline update) */
+	pui32IntAllocatedUpdateValues = OSAllocMem(uiUpdateSize);
+	if (!pui32IntAllocatedUpdateValues)
+	{
+		/* Failed to allocate memory */
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xcc, uiUpdateSize);
+	pui32TimelineUpdateWOff = pui32IntAllocatedUpdateValues;
+
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: Copying %d %s update values into pui32IntAllocatedUpdateValues(<%p>)", __func__, \
+			ui32ClientUpdateValueCount, bKick3D ? "TA/3D" : "TA/PR", (void*)pui32IntAllocatedUpdateValues));
+		/* Copy the update values into the new memory, then append our timeline update value */
+		OSCachedMemCopy(pui32TimelineUpdateWOff, psSyncData->paui32ClientUpdateValue, ui32ClientUpdateValueCount * sizeof(*psSyncData->paui32ClientUpdateValue));
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+		_DebugSyncValues(pui32TimelineUpdateWOff, ui32ClientUpdateValueCount);
+#endif
+
+		pui32TimelineUpdateWOff += ui32ClientUpdateValueCount;
+	}
+
+	/* Now set the additional update value and append the timeline sync prim addr to either the
+	 * render context 3D (or TA) update list
+	 */
+	CHKPT_DBG((PVR_DBG_ERROR, "%s: Appending the additional update value (0x%x) to psRenderContext->sSyncAddrList%sUpdate...", __func__, \
+		ui32FenceTimelineUpdateValue, bKick3D ? "TA/3D" : "TA/PR"));
+
+	/* Append the TA/3D update */
+	{
+		*pui32TimelineUpdateWOff++ = ui32FenceTimelineUpdateValue;
+		psSyncData->ui32ClientUpdateValueCount++;
+		psSyncData->ui32ClientUpdateCount++;
+		SyncAddrListAppendSyncPrim(psSyncList, psFenceTimelineUpdateSync);
+
+		if (!psSyncData->pauiClientUpdateUFOAddress)
+		{
+			psSyncData->pauiClientUpdateUFOAddress = psSyncList->pasFWAddrs;
+		}
+		/* Update paui32ClientUpdateValue to point to our new list of update values */
+		psSyncData->paui32ClientUpdateValue = pui32IntAllocatedUpdateValues;
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+		_DebugSyncValues(pui32IntAllocatedUpdateValues, psSyncData->ui32ClientUpdateValueCount);
+#endif
+	}
+
+	if (!bKick3D)
+	{
+		/* Use the sSyncAddrList3DUpdate for PR (as it doesn't have one of its own) */
+		*pui32TimelineUpdateWOff++ = ui32FenceTimelineUpdateValue;
+		psSyncData->ui32ClientPRUpdateValueCount = 1;
+		psSyncData->ui32ClientPRUpdateCount = 1;
+		SyncAddrListAppendSyncPrim(psPRSyncList, psFenceTimelineUpdateSync);
+
+		if (!psSyncData->pauiClientPRUpdateUFOAddress)
+		{
+			psSyncData->pauiClientPRUpdateUFOAddress = psPRSyncList->pasFWAddrs;
+		}
+		/* Update paui32ClientPRUpdateValue to point to our new list of update values */
+		psSyncData->paui32ClientPRUpdateValue = &pui32IntAllocatedUpdateValues[psSyncData->ui32ClientUpdateValueCount];
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+		_DebugSyncValues(psSyncData->paui32ClientPRUpdateValue, psSyncData->ui32ClientPRUpdateValueCount);
+#endif
+	}
+
+	/* Do not free the old psSyncData->ui32ClientUpdateValueCount,
+	 * as it was constant data passed through the bridge down to PVRSRVRGXKickTA3DKM() */
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxsyncutils.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxsyncutils.h
new file mode 100644
index 0000000..7ce3a89
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxsyncutils.h
@@ -0,0 +1,86 @@
+/*************************************************************************/ /*!
+@File           rgxsyncutils.h
+@Title          RGX Sync Utilities
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Sync helper functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXSYNCUTILS_H
+#define RGXSYNCUTILS_H
+
+#include "rgxdevice.h"
+#include "sync_server.h"
+#include "rgxdebug.h"
+#include "rgx_fwif_km.h"
+
+typedef struct _RGX_SYNC_DATA_
+{
+	PRGXFWIF_UFO_ADDR *pauiClientUpdateUFOAddress;
+	IMG_UINT32 *paui32ClientUpdateValue;
+	IMG_UINT32 ui32ClientUpdateValueCount;
+	IMG_UINT32 ui32ClientUpdateCount;
+
+	PRGXFWIF_UFO_ADDR *pauiClientPRUpdateUFOAddress;
+	IMG_UINT32 *paui32ClientPRUpdateValue;
+	IMG_UINT32 ui32ClientPRUpdateValueCount;
+	IMG_UINT32 ui32ClientPRUpdateCount;
+} RGX_SYNC_DATA;
+
+//#define TA3D_CHECKPOINT_DEBUG
+
+#if 0 //defined(TA3D_CHECKPOINT_DEBUG)
+void _DebugSyncValues(IMG_UINT32 *pui32UpdateValues,
+					  IMG_UINT32 ui32Count);
+
+void _DebugSyncCheckpoints(PSYNC_CHECKPOINT *apsSyncCheckpoints,
+						   IMG_UINT32 ui32Count);
+#endif
+
+PVRSRV_ERROR RGXSyncAppendTimelineUpdate(IMG_UINT32 ui32FenceTimelineUpdateValue,
+										 SYNC_ADDR_LIST	*psSyncList,
+										 SYNC_ADDR_LIST	*psPRSyncList,	/* FIXME -- is this required? */
+										 PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync,
+										 RGX_SYNC_DATA *psSyncData,
+										 IMG_BOOL bKick3D);
+
+#endif /* RGXSYNCUTILS_H */
+
+/******************************************************************************
+ End of file (rgxsyncutils.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxta3d.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxta3d.c
new file mode 100644
index 0000000..d9054cf
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxta3d.c
@@ -0,0 +1,5102 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX TA/3D routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX TA/3D routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+/* for the offsetof macro */
+#if defined(LINUX)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxta3d.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "ri_server.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "rgx_memallocflags.h"
+#include "rgxccb.h"
+#include "rgxhwperf.h"
+#include "ospvr_gputrace.h"
+#include "rgxsyncutils.h"
+#include "htbuffer.h"
+
+#include "rgxdefs_km.h"
+#include "rgx_fwif_km.h"
+#include "physmem.h"
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "process_stats.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "hash.h"
+#include "rgxworkest.h"
+
+#define HASH_CLEAN_LIMIT 6
+#endif
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_TA3D_UFO_DUMP	0
+
+//#define TA3D_CHECKPOINT_DEBUG
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+static INLINE
+void _DebugSyncValues(const IMG_CHAR *pszFunction,
+		const IMG_UINT32 *pui32UpdateValues,
+		const IMG_UINT32 ui32Count)
+{
+	IMG_UINT32 i;
+	IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32UpdateValues;
+
+	for (i = 0; i < ui32Count; i++)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", pszFunction, i, (void*)pui32Tmp, *pui32Tmp));
+		pui32Tmp++;
+	}
+}
+
+static INLINE
+void _DebugSyncCheckpoints(const IMG_CHAR *pszFunction,
+		const IMG_CHAR *pszDMName,
+		const PSYNC_CHECKPOINT *apsSyncCheckpoints,
+		const IMG_UINT32 ui32Count)
+{
+	IMG_UINT32 i;
+
+	for (i = 0; i < ui32Count; i++)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFence%sSyncCheckpoints[%d]=<%p>", pszFunction, pszDMName, i, *(apsSyncCheckpoints + i)));
+	}
+}
+
+#else
+#define CHKPT_DBG(X)
+#endif
+
+/* define the number of commands required to be set up by the CCB helper */
+/* 1 command for the TA */
+#define CCB_CMD_HELPER_NUM_TA_COMMANDS 1
+/* Up to 3 commands for the 3D (partial render fence, partial render, and render) */
+#define CCB_CMD_HELPER_NUM_3D_COMMANDS 3
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#define WORKEST_CYCLES_PREDICTION_GET(x) ((x).ui64CyclesPrediction)
+#else
+#define WORKEST_CYCLES_PREDICTION_GET(x) (NO_CYCEST)
+#endif
+
+typedef struct {
+	DEVMEM_MEMDESC				*psContextStateMemDesc;
+	RGX_SERVER_COMMON_CONTEXT	*psServerCommonContext;
+	IMG_UINT32					ui32Priority;
+} RGX_SERVER_RC_TA_DATA;
+
+typedef struct {
+	DEVMEM_MEMDESC				*psContextStateMemDesc;
+	RGX_SERVER_COMMON_CONTEXT	*psServerCommonContext;
+	IMG_UINT32					ui32Priority;
+} RGX_SERVER_RC_3D_DATA;
+
+struct _RGX_SERVER_RENDER_CONTEXT_ {
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	/* this lock protects usage of the render context.
+	 * it ensures only one kick is being prepared and/or submitted on
+	 * this render context at any time
+	 */
+	POS_LOCK				hLock;
+	RGX_CCB_CMD_HELPER_DATA asTACmdHelperData[CCB_CMD_HELPER_NUM_TA_COMMANDS];
+	RGX_CCB_CMD_HELPER_DATA as3DCmdHelperData[CCB_CMD_HELPER_NUM_3D_COMMANDS];
+#endif
+	PVRSRV_DEVICE_NODE			*psDeviceNode;
+	DEVMEM_MEMDESC				*psFWRenderContextMemDesc;
+	DEVMEM_MEMDESC				*psFWFrameworkMemDesc;
+	RGX_SERVER_RC_TA_DATA		sTAData;
+	RGX_SERVER_RC_3D_DATA		s3DData;
+	IMG_UINT32					ui32CleanupStatus;
+#define RC_CLEANUP_TA_COMPLETE		(1 << 0)
+#define RC_CLEANUP_3D_COMPLETE		(1 << 1)
+	PVRSRV_CLIENT_SYNC_PRIM		*psCleanupSync;
+	DLLIST_NODE					sListNode;
+	SYNC_ADDR_LIST				sSyncAddrListTAFence;
+	SYNC_ADDR_LIST				sSyncAddrListTAUpdate;
+	SYNC_ADDR_LIST				sSyncAddrList3DFence;
+	SYNC_ADDR_LIST				sSyncAddrList3DUpdate;
+	ATOMIC_T					hIntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	WORKEST_HOST_DATA			sWorkEstData;
+#endif
+#if defined(SUPPORT_BUFFER_SYNC)
+	struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+};
+
+
+/*
+	Static functions used by render context code
+ */
+
+static
+PVRSRV_ERROR _DestroyTAContext(RGX_SERVER_RC_TA_DATA *psTAData,
+		PVRSRV_DEVICE_NODE *psDeviceNode,
+		PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync)
+{
+	PVRSRV_ERROR eError;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+			psTAData->psServerCommonContext,
+			psCleanupSync,
+			RGXFWIF_DM_TA,
+			PDUMP_FLAGS_NONE);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		return eError;
+	}
+
+	/* ... it has so we can free it's resources */
+#if defined(DEBUG)
+	/* Log the number of TA context stores which occurred */
+	{
+		RGXFWIF_TACTX_STATE	*psFWTAState;
+
+		eError = DevmemAcquireCpuVirtAddr(psTAData->psContextStateMemDesc,
+				(void**)&psFWTAState);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to map firmware render context state (%s)",
+					__func__, PVRSRVGetErrorString(eError)));
+		}
+		else
+		{
+			/* Release the CPU virt addr */
+			DevmemReleaseCpuVirtAddr(psTAData->psContextStateMemDesc);
+		}
+	}
+#endif
+	FWCommonContextFree(psTAData->psServerCommonContext);
+	DevmemFwFree(psDeviceNode->pvDevice, psTAData->psContextStateMemDesc);
+	psTAData->psServerCommonContext = NULL;
+	return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR _Destroy3DContext(RGX_SERVER_RC_3D_DATA *ps3DData,
+		PVRSRV_DEVICE_NODE *psDeviceNode,
+		PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync)
+{
+	PVRSRV_ERROR eError;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+			ps3DData->psServerCommonContext,
+			psCleanupSync,
+			RGXFWIF_DM_3D,
+			PDUMP_FLAGS_NONE);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		return eError;
+	}
+
+	/* ... it has so we can free it's resources */
+#if defined(DEBUG)
+	/* Log the number of 3D context stores which occurred */
+	{
+		RGXFWIF_3DCTX_STATE	*psFW3DState;
+
+		eError = DevmemAcquireCpuVirtAddr(ps3DData->psContextStateMemDesc,
+				(void**)&psFW3DState);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to map firmware render context state (%s)",
+					__func__, PVRSRVGetErrorString(eError)));
+		}
+		else
+		{
+			/* Release the CPU virt addr */
+			DevmemReleaseCpuVirtAddr(ps3DData->psContextStateMemDesc);
+		}
+	}
+#endif
+
+	FWCommonContextFree(ps3DData->psServerCommonContext);
+	DevmemFwFree(psDeviceNode->pvDevice, ps3DData->psContextStateMemDesc);
+	ps3DData->psServerCommonContext = NULL;
+	return PVRSRV_OK;
+}
+
+static void _RGXDumpPMRPageList(DLLIST_NODE *psNode)
+{
+	RGX_PMR_NODE *psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+	PVRSRV_ERROR			eError;
+
+	eError = PMRDumpPageList(psPMRNode->psPMR,
+			RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"Error (%s) printing pmr %p",
+				PVRSRVGetErrorString(eError),
+				psPMRNode->psPMR));
+	}
+}
+
+IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList)
+{
+	DLLIST_NODE *psNode, *psNext;
+
+	PVR_LOG(("Freelist FWAddr 0x%08x, ID = %d, CheckSum 0x%016" IMG_UINT64_FMTSPECx,
+			psFreeList->sFreeListFWDevVAddr.ui32Addr,
+			psFreeList->ui32FreelistID,
+			psFreeList->ui64FreelistChecksum));
+
+	/* Dump Init FreeList page list */
+	PVR_LOG(("  Initial Memory block"));
+	dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext)
+	{
+		_RGXDumpPMRPageList(psNode);
+	}
+
+	/* Dump Grow FreeList page list */
+	PVR_LOG(("  Grow Memory blocks"));
+	dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext)
+	{
+		_RGXDumpPMRPageList(psNode);
+	}
+
+	return IMG_TRUE;
+}
+
+static void _CheckFreelist(RGX_FREELIST *psFreeList,
+		IMG_UINT32 ui32NumOfPagesToCheck,
+		IMG_UINT64 ui64ExpectedCheckSum,
+		IMG_UINT64 *pui64CalculatedCheckSum)
+{
+#if defined(NO_HARDWARE)
+	/* No checksum needed as we have all information in the pdumps */
+	PVR_UNREFERENCED_PARAMETER(psFreeList);
+	PVR_UNREFERENCED_PARAMETER(ui32NumOfPagesToCheck);
+	PVR_UNREFERENCED_PARAMETER(ui64ExpectedCheckSum);
+	*pui64CalculatedCheckSum = 0;
+#else
+	PVRSRV_ERROR eError;
+	size_t uiNumBytes;
+	IMG_UINT8* pui8Buffer;
+	IMG_UINT32* pui32Buffer;
+	IMG_UINT32 ui32CheckSumAdd = 0;
+	IMG_UINT32 ui32CheckSumXor = 0;
+	IMG_UINT32 ui32Entry;
+	IMG_UINT32 ui32Entry2;
+	IMG_BOOL bFreelistBad = IMG_FALSE;
+
+	*pui64CalculatedCheckSum = 0;
+
+	PVR_ASSERT(ui32NumOfPagesToCheck <= (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages));
+
+	/* Allocate Buffer of the size of the freelist */
+	pui8Buffer = OSAllocMem(ui32NumOfPagesToCheck * sizeof(IMG_UINT32));
+	if (pui8Buffer == NULL)
+	{
+		PVR_LOG(("%s: Failed to allocate buffer to check freelist %p!",
+				__func__, psFreeList));
+		PVR_ASSERT(0);
+		return;
+	}
+
+	/* Copy freelist content into Buffer */
+	eError = PMR_ReadBytes(psFreeList->psFreeListPMR,
+			psFreeList->uiFreeListPMROffset +
+			(((psFreeList->ui32MaxFLPages -
+					psFreeList->ui32CurrentFLPages -
+					psFreeList->ui32ReadyFLPages) * sizeof(IMG_UINT32)) &
+					~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)),
+					pui8Buffer,
+					ui32NumOfPagesToCheck * sizeof(IMG_UINT32),
+					&uiNumBytes);
+	if (eError != PVRSRV_OK)
+	{
+		OSFreeMem(pui8Buffer);
+		PVR_LOG(("%s: Failed to get freelist data for freelist %p!",
+				__func__, psFreeList));
+		PVR_ASSERT(0);
+		return;
+	}
+
+	PVR_ASSERT(uiNumBytes == ui32NumOfPagesToCheck * sizeof(IMG_UINT32));
+
+	/* Generate checksum (skipping the first page if not allocated) */
+	pui32Buffer = (IMG_UINT32 *)pui8Buffer;
+	ui32Entry = ((psFreeList->ui32GrowFLPages == 0  &&  psFreeList->ui32CurrentFLPages > 1) ? 1 : 0);
+	for (/*ui32Entry*/ ; ui32Entry < ui32NumOfPagesToCheck; ui32Entry++)
+	{
+		ui32CheckSumAdd += pui32Buffer[ui32Entry];
+		ui32CheckSumXor ^= pui32Buffer[ui32Entry];
+
+		/* Check for double entries */
+		for (ui32Entry2 = ui32Entry+1; ui32Entry2 < ui32NumOfPagesToCheck; ui32Entry2++)
+		{
+			if (pui32Buffer[ui32Entry] == pui32Buffer[ui32Entry2])
+			{
+				PVR_LOG(("%s: Freelist consistency failure: FW addr: 0x%08X, Double entry found 0x%08x on idx: %d and %d of %d",
+						__func__,
+						psFreeList->sFreeListFWDevVAddr.ui32Addr,
+						pui32Buffer[ui32Entry2],
+						ui32Entry,
+						ui32Entry2,
+						psFreeList->ui32CurrentFLPages));
+				bFreelistBad = IMG_TRUE;
+				break;
+			}
+		}
+	}
+
+	OSFreeMem(pui8Buffer);
+
+	/* Check the calculated checksum against the expected checksum... */
+	*pui64CalculatedCheckSum = ((IMG_UINT64)ui32CheckSumXor << 32) | ui32CheckSumAdd;
+
+	if (ui64ExpectedCheckSum != 0  &&  ui64ExpectedCheckSum != *pui64CalculatedCheckSum)
+	{
+		PVR_LOG(("%s: Checksum mismatch for freelist %p! Expected 0x%016" IMG_UINT64_FMTSPECx " calculated 0x%016" IMG_UINT64_FMTSPECx,
+				__func__, psFreeList,
+				ui64ExpectedCheckSum, *pui64CalculatedCheckSum));
+		bFreelistBad = IMG_TRUE;
+	}
+
+	if (bFreelistBad)
+	{
+		PVR_LOG(("%s: Sleeping for ever!", __func__));
+		PVR_ASSERT(!bFreelistBad);
+	}
+#endif
+}
+
+
+/*
+ *  Function to work out the number of freelist pages to reserve for growing
+ *  within the FW without having to wait for the host to progress a grow
+ *  request.
+ *
+ *  The number of pages must be a multiple of 4 to align the PM addresses
+ *  for the initial freelist allocation and also be less than the grow size.
+ *
+ *  If the threshold or grow size means less than 4 pages, then the feature
+ *  is not used.
+ */
+static IMG_UINT32 _CalculateFreelistReadyPages(RGX_FREELIST *psFreeList,
+		IMG_UINT32  ui32FLPages)
+{
+	IMG_UINT32  ui32ReadyFLPages = ((ui32FLPages * psFreeList->ui32GrowThreshold) / 100) &
+			~((RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE/sizeof(IMG_UINT32))-1);
+
+	if (ui32ReadyFLPages > psFreeList->ui32GrowFLPages)
+	{
+		ui32ReadyFLPages = psFreeList->ui32GrowFLPages;
+	}
+
+	return ui32ReadyFLPages;
+}
+
+
+PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList,
+		IMG_UINT32 ui32NumPages,
+		PDLLIST_NODE pListHeader)
+{
+	RGX_PMR_NODE	*psPMRNode;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_UINT32  ui32MappingTable = 0;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_DEVMEM_SIZE_T uiLength;
+	IMG_DEVMEM_SIZE_T uistartPage;
+	PVRSRV_ERROR eError;
+	const IMG_CHAR * pszAllocName = "Free List";
+
+	/* Are we allowed to grow ? */
+	if (psFreeList->ui32MaxFLPages - (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) < ui32NumPages)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+				"Freelist [0x%p]: grow by %u pages denied. "
+				"Max PB size reached (current pages %u+%u/%u)",
+				psFreeList,
+				ui32NumPages,
+				psFreeList->ui32CurrentFLPages,
+				psFreeList->ui32ReadyFLPages,
+				psFreeList->ui32MaxFLPages));
+		return PVRSRV_ERROR_PBSIZE_ALREADY_MAX;
+	}
+
+	/* Allocate kernel memory block structure */
+	psPMRNode = OSAllocMem(sizeof(*psPMRNode));
+	if (psPMRNode == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed to allocate host data structure",
+				__func__));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorAllocHost;
+	}
+
+	/*
+	 * Lock protects simultaneous manipulation of:
+	 * - the memory block list
+	 * - the freelist's ui32CurrentFLPages
+	 */
+	OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+
+
+	/*
+	 *  The PM never takes the last page in a freelist, so if this block
+	 *  of pages is the first one and there is no ability to grow, then
+	 *  we can skip allocating one 4K page for the lowest entry.
+	 */
+	if (OSGetPageSize() > RGX_BIF_PM_PHYSICAL_PAGE_SIZE)
+	{
+		/*
+		 * Allocation size will be rounded up to the OS page size,
+		 * any attempt to change it a bit now will be invalidated later.
+		 */
+		psPMRNode->bFirstPageMissing = IMG_FALSE;
+	}
+	else
+	{
+		psPMRNode->bFirstPageMissing = (psFreeList->ui32GrowFLPages == 0  &&  ui32NumPages > 1);
+	}
+
+	psPMRNode->ui32NumPages = ui32NumPages;
+	psPMRNode->psFreeList = psFreeList;
+
+	/* Allocate Memory Block */
+	PDUMPCOMMENT("Allocate PB Block (Pages %08X)", ui32NumPages);
+	uiSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE;
+	if (psPMRNode->bFirstPageMissing)
+	{
+		uiSize -= RGX_BIF_PM_PHYSICAL_PAGE_SIZE;
+	}
+	eError = PhysmemNewRamBackedPMR(NULL,
+			psFreeList->psDevInfo->psDeviceNode,
+			uiSize,
+			uiSize,
+			1,
+			1,
+			&ui32MappingTable,
+			RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE,
+			OSStringLength(pszAllocName) + 1,
+			pszAllocName,
+			psFreeList->ownerPid,
+			&psPMRNode->psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate PB block of size: 0x%016" IMG_UINT64_FMTSPECX,
+				__func__,
+				(IMG_UINT64)uiSize));
+		goto ErrorBlockAlloc;
+	}
+
+	/* Zeroing physical pages pointed by the PMR */
+	if (psFreeList->psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST)
+	{
+		eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to zero PMR %p of freelist %p (%s)",
+					__func__,
+					psPMRNode->psPMR,
+					psFreeList,
+					PVRSRVGetErrorString(eError)));
+			PVR_ASSERT(0);
+		}
+	}
+
+	uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32);
+	uistartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages);
+	uiOffset = psFreeList->uiFreeListPMROffset + ((uistartPage * sizeof(IMG_UINT32)) & ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1));
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+
+	eError = RIWritePMREntryWithOwnerKM(psPMRNode->psPMR,
+			psFreeList->ownerPid);
+	if ( eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: call to RIWritePMREntryWithOwnerKM failed (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+	}
+
+	/* Attach RI information */
+	eError = RIWriteMEMDESCEntryKM(psPMRNode->psPMR,
+			OSStringNLength(pszAllocName, DEVMEM_ANNOTATION_MAX_LEN),
+			pszAllocName,
+			0,
+			uiSize,
+			IMG_FALSE,
+			IMG_FALSE,
+			&psPMRNode->hRIHandle);
+	PVR_LOG_IF_ERROR(eError, "RIWriteMEMDESCEntryKM");
+
+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+
+	/* write Freelist with Memory Block physical addresses */
+	eError = PMRWritePMPageList(
+			/* Target PMR, offset, and length */
+			psFreeList->psFreeListPMR,
+			(psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset),
+			(psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength),
+			/* Referenced PMR, and "page" granularity */
+			psPMRNode->psPMR,
+			RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+			&psPMRNode->psPageList);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to write pages of Node %p",
+				__func__,
+				psPMRNode));
+		goto ErrorPopulateFreelist;
+	}
+
+#if defined(SUPPORT_SHADOW_FREELISTS)
+	/* Copy freelist memory to shadow freelist */
+	{
+		const IMG_UINT32 ui32FLMaxSize = psFreeList->ui32MaxFLPages * sizeof (IMG_UINT32);
+		const IMG_UINT32 ui32MapSize = ui32FLMaxSize * 2;
+		const IMG_UINT32 ui32CopyOffset = uiOffset - psFreeList->uiFreeListPMROffset;
+		IMG_BYTE *pFLMapAddr;
+		size_t uiNumBytes;
+		PVRSRV_ERROR res;
+		IMG_HANDLE hMapHandle;
+
+		/* Map both the FL and the shadow FL */
+		res = PMRAcquireKernelMappingData(psFreeList->psFreeListPMR, psFreeList->uiFreeListPMROffset, ui32MapSize,
+				(void**) &pFLMapAddr, &uiNumBytes, &hMapHandle);
+		if (res != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to map freelist (ID=%d)",
+					__func__,
+					psFreeList->ui32FreelistID));
+			goto ErrorPopulateFreelist;
+		}
+
+		/* Copy only the newly added memory */
+		memcpy(pFLMapAddr + ui32FLMaxSize + ui32CopyOffset, pFLMapAddr + ui32CopyOffset , uiLength);
+
+#if defined(PDUMP)
+		PDUMPCOMMENT("Initialize shadow freelist");
+
+		/* Translate memcpy to pdump */
+		{
+			IMG_DEVMEM_OFFSET_T uiCurrOffset;
+
+			for (uiCurrOffset = uiOffset; (uiCurrOffset - uiOffset) < uiLength; uiCurrOffset += sizeof (IMG_UINT32))
+			{
+				PMRPDumpCopyMem32(psFreeList->psFreeListPMR,
+						uiCurrOffset + ui32FLMaxSize,
+						psFreeList->psFreeListPMR,
+						uiCurrOffset,
+						":SYSMEM:$1",
+						PDUMP_FLAGS_CONTINUOUS);
+			}
+		}
+#endif
+
+
+		res = PMRReleaseKernelMappingData(psFreeList->psFreeListPMR, hMapHandle);
+
+		if (res != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to release freelist mapping (ID=%d)",
+					__func__,
+					psFreeList->ui32FreelistID));
+			goto ErrorPopulateFreelist;
+		}
+	}
+#endif
+
+	/* We add It must be added to the tail, otherwise the freelist population won't work */
+	dllist_add_to_head(pListHeader, &psPMRNode->sMemoryBlock);
+
+	/* Update number of available pages */
+	psFreeList->ui32CurrentFLPages += ui32NumPages;
+
+	/* Update statistics (needs to happen before the ReadyFL calculation to also count those pages) */
+	if (psFreeList->ui32NumHighPages < psFreeList->ui32CurrentFLPages)
+	{
+		psFreeList->ui32NumHighPages = psFreeList->ui32CurrentFLPages;
+	}
+
+	/* Reserve a number ready pages to allow the FW to process OOM quickly and asynchronously request a grow. */
+	psFreeList->ui32ReadyFLPages    = _CalculateFreelistReadyPages(psFreeList, psFreeList->ui32CurrentFLPages);
+	psFreeList->ui32CurrentFLPages -= psFreeList->ui32ReadyFLPages;
+
+	if (psFreeList->bCheckFreelist)
+	{
+		/*
+		 *  We can only calculate the freelist checksum when the list is full
+		 *  (e.g. at initial creation time). At other times the checksum cannot
+		 *  be calculated and has to be disabled for this freelist.
+		 */
+		if ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages)
+		{
+			_CheckFreelist(psFreeList, ui32NumPages, 0, &psFreeList->ui64FreelistChecksum);
+		}
+		else
+		{
+			psFreeList->ui64FreelistChecksum = 0;
+		}
+	}
+	OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+	PVR_DPF((PVR_DBG_MESSAGE,
+			"Freelist [%p]: %s %u pages (pages=%u+%u/%u checksum=0x%016" IMG_UINT64_FMTSPECx "%s)",
+			psFreeList,
+			((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages ? "Create initial" : "Grow by"),
+			ui32NumPages,
+			psFreeList->ui32CurrentFLPages,
+			psFreeList->ui32ReadyFLPages,
+			psFreeList->ui32MaxFLPages,
+			psFreeList->ui64FreelistChecksum,
+			(psPMRNode->bFirstPageMissing ? " - lowest page not allocated" : "")));
+
+	return PVRSRV_OK;
+
+	/* Error handling */
+	ErrorPopulateFreelist:
+	PMRUnrefPMR(psPMRNode->psPMR);
+
+	ErrorBlockAlloc:
+	OSFreeMem(psPMRNode);
+	OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+	ErrorAllocHost:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+
+}
+
+static PVRSRV_ERROR RGXShrinkFreeList(PDLLIST_NODE pListHeader,
+		RGX_FREELIST *psFreeList)
+{
+	DLLIST_NODE *psNode;
+	RGX_PMR_NODE *psPMRNode;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 ui32OldValue;
+
+	/*
+	 * Lock protects simultaneous manipulation of:
+	 * - the memory block list
+	 * - the freelist's ui32CurrentFLPages value
+	 */
+	PVR_ASSERT(pListHeader);
+	PVR_ASSERT(psFreeList);
+	PVR_ASSERT(psFreeList->psDevInfo);
+	PVR_ASSERT(psFreeList->psDevInfo->hLockFreeList);
+
+	OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+
+	/* Get node from head of list and remove it */
+	psNode = dllist_get_next_node(pListHeader);
+	if (psNode)
+	{
+		dllist_remove_node(psNode);
+
+		psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+		PVR_ASSERT(psPMRNode);
+		PVR_ASSERT(psPMRNode->psPMR);
+		PVR_ASSERT(psPMRNode->psFreeList);
+
+		/* remove block from freelist list */
+
+		/* Unwrite Freelist with Memory Block physical addresses */
+		eError = PMRUnwritePMPageList(psPMRNode->psPageList);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to unwrite pages of Node %p",
+					__func__,
+					psPMRNode));
+			PVR_ASSERT(IMG_FALSE);
+		}
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+
+		if (psPMRNode->hRIHandle)
+		{
+			PVRSRV_ERROR eError;
+
+			eError = RIDeleteMEMDESCEntryKM(psPMRNode->hRIHandle);
+			PVR_LOG_IF_ERROR(eError, "RIDeleteMEMDESCEntryKM");
+		}
+
+#endif  /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+
+		/* Free PMR (We should be the only one that holds a ref on the PMR) */
+		eError = PMRUnrefPMR(psPMRNode->psPMR);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to free PB block %p (%s)",
+					__func__,
+					psPMRNode->psPMR,
+					PVRSRVGetErrorString(eError)));
+			PVR_ASSERT(IMG_FALSE);
+		}
+
+		/* update available pages in freelist */
+		ui32OldValue = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages;
+
+		/*
+		 * Deallocated pages should first be deducted from ReadyPages bank, once
+		 * there are no more left, start deducting them from CurrentPage bank.
+		 */
+		if (psPMRNode->ui32NumPages > psFreeList->ui32ReadyFLPages)
+		{
+			psFreeList->ui32CurrentFLPages -= psPMRNode->ui32NumPages - psFreeList->ui32ReadyFLPages;
+			psFreeList->ui32ReadyFLPages = 0;
+		}
+		else
+		{
+			psFreeList->ui32ReadyFLPages -= psPMRNode->ui32NumPages;
+		}
+
+		/* check underflow */
+		PVR_ASSERT(ui32OldValue > (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages));
+
+		PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: shrink by %u pages (current pages %u/%u)",
+				psFreeList,
+				psPMRNode->ui32NumPages,
+				psFreeList->ui32CurrentFLPages,
+				psFreeList->ui32MaxFLPages));
+
+		OSFreeMem(psPMRNode);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+				"Freelist [0x%p]: shrink denied. PB already at initial PB size (%u pages)",
+				psFreeList,
+				psFreeList->ui32InitFLPages));
+		eError = PVRSRV_ERROR_PBSIZE_ALREADY_MIN;
+	}
+
+	OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+	return eError;
+}
+
+static RGX_FREELIST *FindFreeList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FreelistID)
+{
+	DLLIST_NODE *psNode, *psNext;
+	RGX_FREELIST *psFreeList = NULL;
+
+	OSLockAcquire(psDevInfo->hLockFreeList);
+
+	dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext)
+	{
+		RGX_FREELIST *psThisFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+
+		if (psThisFreeList->ui32FreelistID == ui32FreelistID)
+		{
+			psFreeList = psThisFreeList;
+			break;
+		}
+	}
+
+	OSLockRelease(psDevInfo->hLockFreeList);
+	return psFreeList;
+}
+
+void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+                           IMG_UINT32 ui32FreelistID)
+{
+	RGX_FREELIST *psFreeList = NULL;
+	RGXFWIF_KCCB_CMD s3DCCBCmd;
+	IMG_UINT32 ui32GrowValue;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psDevInfo);
+
+	psFreeList = FindFreeList(psDevInfo, ui32FreelistID);
+
+	if (psFreeList)
+	{
+		/* Since the FW made the request, it has already consumed the ready pages, update the host struct */
+		psFreeList->ui32CurrentFLPages += psFreeList->ui32ReadyFLPages;
+		psFreeList->ui32ReadyFLPages = 0;
+
+		/* Try to grow the freelist */
+		eError = RGXGrowFreeList(psFreeList,
+				psFreeList->ui32GrowFLPages,
+				&psFreeList->sMemoryBlockHead);
+
+		if (eError == PVRSRV_OK)
+		{
+			/* Grow successful, return size of grow size */
+			ui32GrowValue = psFreeList->ui32GrowFLPages;
+
+			psFreeList->ui32NumGrowReqByFW++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+			/* Update Stats */
+			PVRSRVStatsUpdateFreelistStats(0,
+					1, /* Add 1 to the appropriate counter (Requests by FW) */
+					psFreeList->ui32InitFLPages,
+					psFreeList->ui32NumHighPages,
+					psFreeList->ownerPid);
+
+#endif
+
+		}
+		else
+		{
+			/* Grow failed */
+			ui32GrowValue = 0;
+			PVR_DPF((PVR_DBG_ERROR,
+					"Grow for FreeList %p failed (%s)",
+					psFreeList,
+					PVRSRVGetErrorString(eError)));
+		}
+
+		/* send feedback */
+		s3DCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE;
+		s3DCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+		s3DCCBCmd.uCmdData.sFreeListGSData.ui32DeltaPages = ui32GrowValue;
+		s3DCCBCmd.uCmdData.sFreeListGSData.ui32NewPages = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages;
+		s3DCCBCmd.uCmdData.sFreeListGSData.ui32ReadyPages = psFreeList->ui32ReadyFLPages;
+
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError = RGXScheduleCommand(psDevInfo,
+					RGXFWIF_DM_3D,
+					&s3DCCBCmd,
+					0,
+					PDUMP_FLAGS_NONE);
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+		/* Kernel CCB should never fill up, as the FW is processing them right away  */
+
+		PVR_ASSERT(eError == PVRSRV_OK);
+	}
+	else
+	{
+		/* Should never happen */
+		PVR_DPF((PVR_DBG_ERROR,
+				"FreeList Lookup for FreeList ID 0x%08x failed (Populate)",
+				ui32FreelistID));
+		PVR_ASSERT(IMG_FALSE);
+	}
+}
+
+static void _RGXFreeListReconstruction(PDLLIST_NODE psNode)
+{
+
+	PVRSRV_RGXDEV_INFO 		*psDevInfo;
+	RGX_FREELIST			*psFreeList;
+	RGX_PMR_NODE			*psPMRNode;
+	PVRSRV_ERROR			eError;
+	IMG_DEVMEM_OFFSET_T		uiOffset;
+	IMG_DEVMEM_SIZE_T		uiLength;
+	IMG_UINT32				ui32StartPage;
+
+	psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+	psFreeList = psPMRNode->psFreeList;
+	PVR_ASSERT(psFreeList);
+	psDevInfo = psFreeList->psDevInfo;
+	PVR_ASSERT(psDevInfo);
+
+	uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32);
+	ui32StartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages);
+	uiOffset = psFreeList->uiFreeListPMROffset + ((ui32StartPage * sizeof(IMG_UINT32)) & ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1));
+
+	PMRUnwritePMPageList(psPMRNode->psPageList);
+	psPMRNode->psPageList = NULL;
+	eError = PMRWritePMPageList(
+			/* Target PMR, offset, and length */
+			psFreeList->psFreeListPMR,
+			(psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset),
+			(psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength),
+			/* Referenced PMR, and "page" granularity */
+			psPMRNode->psPMR,
+			RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+			&psPMRNode->psPageList);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Error (%s) writing FL 0x%08x",
+				__func__,
+				PVRSRVGetErrorString(eError),
+				(IMG_UINT32)psFreeList->ui32FreelistID));
+	}
+
+	/* Zeroing physical pages pointed by the reconstructed freelist */
+	if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST)
+	{
+		eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to zero PMR %p of freelist %p (%s)",
+					__func__,
+					psPMRNode->psPMR,
+					psFreeList,
+					PVRSRVGetErrorString(eError)));
+			PVR_ASSERT(0);
+		}
+	}
+
+
+	psFreeList->ui32CurrentFLPages += psPMRNode->ui32NumPages;
+}
+
+
+static PVRSRV_ERROR RGXReconstructFreeList(RGX_FREELIST *psFreeList)
+{
+	IMG_UINT32        ui32OriginalFLPages;
+	DLLIST_NODE       *psNode, *psNext;
+	RGXFWIF_FREELIST  *psFWFreeList;
+	PVRSRV_ERROR      eError;
+
+	//PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: Reconstructing freelist %p (ID=%u)", psFreeList, psFreeList->ui32FreelistID));
+
+	/* Do the FreeList Reconstruction */
+	ui32OriginalFLPages            = psFreeList->ui32CurrentFLPages;
+	psFreeList->ui32CurrentFLPages = 0;
+
+	/* Reconstructing Init FreeList pages */
+	dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext)
+	{
+		_RGXFreeListReconstruction(psNode);
+	}
+
+	/* Reconstructing Grow FreeList pages */
+	dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext)
+	{
+		_RGXFreeListReconstruction(psNode);
+	}
+
+	/* Ready pages are allocated but kept hidden until OOM occurs. */
+	psFreeList->ui32CurrentFLPages -= psFreeList->ui32ReadyFLPages;
+	if (psFreeList->ui32CurrentFLPages != ui32OriginalFLPages)
+	{
+		PVR_ASSERT(psFreeList->ui32CurrentFLPages == ui32OriginalFLPages);
+		return PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED;
+	}
+
+	/* Reset the firmware freelist structure */
+	eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	psFWFreeList->ui32CurrentStackTop       = psFWFreeList->ui32CurrentPages - 1;
+	psFWFreeList->ui32AllocatedPageCount    = 0;
+	psFWFreeList->ui32AllocatedMMUPageCount = 0;
+	psFWFreeList->ui32HWRCounter++;
+
+	DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+	/* Check the Freelist checksum if required (as the list is fully populated) */
+	if (psFreeList->bCheckFreelist)
+	{
+		IMG_UINT64  ui64CheckSum;
+
+		_CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum);
+	}
+
+	return eError;
+}
+
+
+void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo,
+		IMG_UINT32 ui32FreelistsCount,
+		IMG_UINT32 *paui32Freelists)
+{
+	PVRSRV_ERROR      eError = PVRSRV_OK;
+	DLLIST_NODE       *psNode, *psNext;
+	IMG_UINT32        ui32Loop;
+	RGXFWIF_KCCB_CMD  sTACCBCmd;
+
+	PVR_ASSERT(psDevInfo != NULL);
+	PVR_ASSERT(ui32FreelistsCount <= (MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS));
+
+	//PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: %u freelist(s) requested for reconstruction", ui32FreelistsCount));
+
+	/*
+	 *  Initialise the response command (in case we don't find a freelist ID)...
+	 */
+	sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE;
+	sTACCBCmd.uCmdData.sFreeListsReconstructionData.ui32FreelistsCount = ui32FreelistsCount;
+
+	for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+	{
+		sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] = paui32Freelists[ui32Loop] |
+				RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG;
+	}
+
+	/*
+	 *  The list of freelists we have been given for reconstruction will
+	 *  consist of local and global freelists (maybe MMU as well). Any
+	 *  local freelists will have their global list specified as well.
+	 *  However there may be other local freelists not listed, which are
+	 *  going to have their global freelist reconstructed. Therefore we
+	 *  have to find those freelists as well meaning we will have to
+	 *  iterate the entire list of freelists to find which must be reconstructed.
+	 */
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext)
+	{
+		RGX_FREELIST  *psFreeList  = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+		IMG_BOOL      bReconstruct = IMG_FALSE;
+
+		/*
+		 *  Check if this freelist needs to be reconstructed (was it requested
+		 *  or was its global freelist requested)...
+		 */
+		for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+		{
+			if (paui32Freelists[ui32Loop] == psFreeList->ui32FreelistID  ||
+			    paui32Freelists[ui32Loop] == psFreeList->ui32FreelistGlobalID)
+			{
+				bReconstruct = IMG_TRUE;
+				break;
+			}
+		}
+
+		if (bReconstruct)
+		{
+			eError = RGXReconstructFreeList(psFreeList);
+			if (eError == PVRSRV_OK)
+			{
+				for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+				{
+					if (paui32Freelists[ui32Loop] == psFreeList->ui32FreelistID)
+					{
+						/* Reconstruction of this requested freelist was successful... */
+						sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] &= ~RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG;
+						break;
+					}
+				}
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"Reconstructing of FreeList %p failed (%s)",
+						psFreeList,
+						PVRSRVGetErrorString(eError)));
+			}
+		}
+	}
+	OSLockRelease(psDevInfo->hLockFreeList);
+
+	/* Check that all freelists were found and reconstructed... */
+	for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+	{
+		PVR_ASSERT((sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] &
+				RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG) == 0);
+	}
+
+	/* send feedback */
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psDevInfo,
+				RGXFWIF_DM_TA,
+				&sTACCBCmd,
+				0,
+				PDUMP_FLAGS_NONE);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	/* Kernel CCB should never fill up, as the FW is processing them right away  */
+	PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+/* Create HWRTDataSet */
+PVRSRV_ERROR RGXCreateHWRTData(CONNECTION_DATA      *psConnection,
+		PVRSRV_DEVICE_NODE	*psDeviceNode,
+		IMG_UINT32			psRenderTarget, /* FIXME this should not be IMG_UINT32 */
+		IMG_DEV_VIRTADDR		psPMMListDevVAddr,
+		RGX_FREELIST			*apsFreeLists[RGXFW_MAX_FREELISTS],
+		RGX_RTDATA_CLEANUP_DATA	**ppsCleanupData,
+		IMG_UINT32           ui32PPPScreen,
+		IMG_UINT64           ui64MultiSampleCtl,
+		IMG_UINT64           ui64FlippedMultiSampleCtl,
+		IMG_UINT32           ui32TPCStride,
+		IMG_DEV_VIRTADDR		sTailPtrsDevVAddr,
+		IMG_UINT32           ui32TPCSize,
+		IMG_UINT32           ui32TEScreen,
+		IMG_UINT32           ui32TEAA,
+		IMG_UINT32           ui32TEMTILE1,
+		IMG_UINT32           ui32TEMTILE2,
+		IMG_UINT32           ui32MTileStride,
+		IMG_UINT32                 ui32ISPMergeLowerX,
+		IMG_UINT32                 ui32ISPMergeLowerY,
+		IMG_UINT32                 ui32ISPMergeUpperX,
+		IMG_UINT32                 ui32ISPMergeUpperY,
+		IMG_UINT32                 ui32ISPMergeScaleX,
+		IMG_UINT32                 ui32ISPMergeScaleY,
+		IMG_DEV_VIRTADDR	sMacrotileArrayDevVAddr,
+		IMG_DEV_VIRTADDR	sRgnHeaderDevVAddr,
+		IMG_DEV_VIRTADDR	sRTCDevVAddr,
+		IMG_UINT64			uiRgnHeaderSize,
+		IMG_UINT32			ui32ISPMtileSize,
+		IMG_UINT16			ui16MaxRTs,
+		DEVMEM_MEMDESC		**ppsMemDesc,
+		IMG_UINT32			*puiHWRTData)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	RGXFWIF_DEV_VIRTADDR pFirmwareAddr;
+	RGXFWIF_HWRTDATA *psHWRTData;
+	IMG_UINT32 ui32Loop;
+	RGX_RTDATA_CLEANUP_DATA *psTmpCleanup;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	/* Prepare cleanup struct */
+	psTmpCleanup = OSAllocZMem(sizeof(*psTmpCleanup));
+	if (psTmpCleanup == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto AllocError;
+	}
+
+	*ppsCleanupData = psTmpCleanup;
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+			&psTmpCleanup->psCleanupSync,
+			"HWRTData cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate cleanup sync (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto SyncAlloc;
+	}
+
+	psDevInfo = psDeviceNode->pvDevice;
+
+	/*
+	 * This FW RT-Data is only mapped into kernel for initialisation.
+	 * Otherwise this allocation is only used by the FW.
+	 * Therefore the GPU cache doesn't need coherency,
+	 * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+	 */
+	eError = DevmemFwAllocate(psDevInfo,
+			sizeof(RGXFWIF_HWRTDATA),
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+			"FwHWRTData",
+			ppsMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: DevmemAllocate for RGX_FWIF_HWRTDATA failed",
+				__func__));
+		goto FWRTDataAllocateError;
+	}
+
+	psTmpCleanup->psDeviceNode = psDeviceNode;
+	psTmpCleanup->psFWHWRTDataMemDesc = *ppsMemDesc;
+
+	RGXSetFirmwareAddress(&pFirmwareAddr, *ppsMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+
+	*puiHWRTData = pFirmwareAddr.ui32Addr;
+
+	eError = DevmemAcquireCpuVirtAddr(*ppsMemDesc, (void **)&psHWRTData);
+	PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWRTDataCpuMapError);
+
+	/* FIXME: MList is something that that PM writes physical addresses to,
+	 * so ideally its best allocated in kernel */
+	psHWRTData->psPMMListDevVAddr = psPMMListDevVAddr;
+	psHWRTData->psParentRenderTarget.ui32Addr = psRenderTarget;
+
+	psHWRTData->ui32PPPScreen         = ui32PPPScreen;
+	psHWRTData->ui64MultiSampleCtl = ui64MultiSampleCtl;
+	psHWRTData->ui64FlippedMultiSampleCtl = ui64FlippedMultiSampleCtl;
+	psHWRTData->ui32TPCStride         = ui32TPCStride;
+	psHWRTData->sTailPtrsDevVAddr     = sTailPtrsDevVAddr;
+	psHWRTData->ui32TPCSize           = ui32TPCSize;
+	psHWRTData->ui32TEScreen          = ui32TEScreen;
+	psHWRTData->ui32TEAA              = ui32TEAA;
+	psHWRTData->ui32TEMTILE1          = ui32TEMTILE1;
+	psHWRTData->ui32TEMTILE2          = ui32TEMTILE2;
+	psHWRTData->ui32MTileStride       = ui32MTileStride;
+	psHWRTData->ui32ISPMergeLowerX = ui32ISPMergeLowerX;
+	psHWRTData->ui32ISPMergeLowerY = ui32ISPMergeLowerY;
+	psHWRTData->ui32ISPMergeUpperX = ui32ISPMergeUpperX;
+	psHWRTData->ui32ISPMergeUpperY = ui32ISPMergeUpperY;
+	psHWRTData->ui32ISPMergeScaleX = ui32ISPMergeScaleX;
+	psHWRTData->ui32ISPMergeScaleY = ui32ISPMergeScaleY;
+	psHWRTData->sMacrotileArrayDevVAddr = sMacrotileArrayDevVAddr;
+	psHWRTData->sRgnHeaderDevVAddr		= sRgnHeaderDevVAddr;
+	psHWRTData->sRTCDevVAddr			= sRTCDevVAddr;
+	psHWRTData->uiRgnHeaderSize			= uiRgnHeaderSize;
+	psHWRTData->ui32ISPMtileSize		= ui32ISPMtileSize;
+
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+	{
+		psTmpCleanup->apsFreeLists[ui32Loop] = apsFreeLists[ui32Loop];
+		psTmpCleanup->apsFreeLists[ui32Loop]->ui32RefCount++;
+		psHWRTData->apsFreeLists[ui32Loop].ui32Addr = psTmpCleanup->apsFreeLists[ui32Loop]->sFreeListFWDevVAddr.ui32Addr;
+		/* invalid initial snapshot value, the snapshot is always taken during first kick
+		 * and hence the value get replaced during the first kick anyway. So it's safe to set it 0.
+		 */
+		psHWRTData->aui32FreeListHWRSnapshot[ui32Loop] = 0;
+	}
+	OSLockRelease(psDevInfo->hLockFreeList);
+
+	{
+		RGXFWIF_RTA_CTL *psRTACtl = &psHWRTData->sRTACtl;
+
+		psRTACtl->ui32RenderTargetIndex = 0;
+		psRTACtl->ui32ActiveRenderTargets = 0;
+		psRTACtl->sValidRenderTargets.ui32Addr = 0;
+		psRTACtl->sRTANumPartialRenders.ui32Addr = 0;
+		psRTACtl->ui32MaxRTs = (IMG_UINT32) ui16MaxRTs;
+
+		if (ui16MaxRTs > 1)
+		{
+			PDUMPCOMMENT("Allocate memory for shadow render target cache");
+			eError = DevmemFwAllocate(psDevInfo,
+					ui16MaxRTs * sizeof(IMG_UINT32),
+					PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+					PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+					PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+					PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+					PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+					PVRSRV_MEMALLOCFLAG_UNCACHED|
+					PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+					"FwShadowRTCache",
+					&psTmpCleanup->psRTArrayMemDesc);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Failed to allocate %u bytes for render target array (%s)",
+						__func__,
+						ui16MaxRTs, PVRSRVGetErrorString(eError)));
+				goto FWAllocateRTArryError;
+			}
+
+			RGXSetFirmwareAddress(&psRTACtl->sValidRenderTargets,
+					psTmpCleanup->psRTArrayMemDesc,
+					0, RFW_FWADDR_FLAG_NONE);
+
+			PDUMPCOMMENT("Allocate memory for tracking renders accumulation");
+			eError = DevmemFwAllocate(psDevInfo,
+					ui16MaxRTs * sizeof(IMG_UINT32),
+					PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+					PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+					PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+					PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+					PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+					PVRSRV_MEMALLOCFLAG_UNCACHED|
+					PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+					"FwRendersAccumulation",
+					&psTmpCleanup->psRendersAccArrayMemDesc);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Failed to allocate %u bytes for render target array (%s) (renders accumulation)",
+						__func__,
+						ui16MaxRTs, PVRSRVGetErrorString(eError)));
+				goto FWAllocateRTAccArryError;
+			}
+
+			RGXSetFirmwareAddress(&psRTACtl->sRTANumPartialRenders,
+					psTmpCleanup->psRendersAccArrayMemDesc,
+					0, RFW_FWADDR_FLAG_NONE);
+		}
+	}
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Dump HWRTData 0x%08X", *puiHWRTData);
+	DevmemPDumpLoadMem(*ppsMemDesc, 0, sizeof(*psHWRTData), PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+	DevmemReleaseCpuVirtAddr(*ppsMemDesc);
+	return PVRSRV_OK;
+
+	FWAllocateRTAccArryError:
+	DevmemFwFree(psDevInfo, psTmpCleanup->psRTArrayMemDesc);
+	FWAllocateRTArryError:
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+	{
+		PVR_ASSERT(psTmpCleanup->apsFreeLists[ui32Loop]->ui32RefCount > 0);
+		psTmpCleanup->apsFreeLists[ui32Loop]->ui32RefCount--;
+	}
+	OSLockRelease(psDevInfo->hLockFreeList);
+	DevmemReleaseCpuVirtAddr(*ppsMemDesc);
+	FWRTDataCpuMapError:
+	RGXUnsetFirmwareAddress(*ppsMemDesc);
+	DevmemFwFree(psDevInfo, *ppsMemDesc);
+	FWRTDataAllocateError:
+	SyncPrimFree(psTmpCleanup->psCleanupSync);
+	SyncAlloc:
+	*ppsCleanupData = NULL;
+	OSFreeMem(psTmpCleanup);
+
+	AllocError:
+	return eError;
+}
+
+/* Destroy HWRTDataSet */
+PVRSRV_ERROR RGXDestroyHWRTData(RGX_RTDATA_CLEANUP_DATA *psCleanupData)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	PVRSRV_ERROR eError;
+	PRGXFWIF_HWRTDATA psHWRTData;
+	IMG_UINT32 ui32Loop;
+
+	PVR_ASSERT(psCleanupData);
+
+	RGXSetFirmwareAddress(&psHWRTData, psCleanupData->psFWHWRTDataMemDesc, 0, RFW_FWADDR_NOREF_FLAG);
+
+	/* Cleanup HWRTData in TA */
+	eError = RGXFWRequestHWRTDataCleanUp(psCleanupData->psDeviceNode,
+			psHWRTData,
+			psCleanupData->psCleanupSync,
+			RGXFWIF_DM_TA);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+
+	psDevInfo = psCleanupData->psDeviceNode->pvDevice;
+
+	/* Cleanup HWRTData in 3D */
+	eError = RGXFWRequestHWRTDataCleanUp(psCleanupData->psDeviceNode,
+			psHWRTData,
+			psCleanupData->psCleanupSync,
+			RGXFWIF_DM_3D);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+
+	RGXUnsetFirmwareAddress(psCleanupData->psFWHWRTDataMemDesc);
+	DevmemFwFree(psDevInfo, psCleanupData->psFWHWRTDataMemDesc);
+
+	if (psCleanupData->psRTArrayMemDesc)
+	{
+		RGXUnsetFirmwareAddress(psCleanupData->psRTArrayMemDesc);
+		DevmemFwFree(psDevInfo, psCleanupData->psRTArrayMemDesc);
+	}
+
+	if (psCleanupData->psRendersAccArrayMemDesc)
+	{
+		RGXUnsetFirmwareAddress(psCleanupData->psRendersAccArrayMemDesc);
+		DevmemFwFree(psDevInfo, psCleanupData->psRendersAccArrayMemDesc);
+	}
+
+	SyncPrimFree(psCleanupData->psCleanupSync);
+
+	/* decrease freelist refcount */
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+	{
+		PVR_ASSERT(psCleanupData->apsFreeLists[ui32Loop]->ui32RefCount > 0);
+		psCleanupData->apsFreeLists[ui32Loop]->ui32RefCount--;
+	}
+	OSLockRelease(psDevInfo->hLockFreeList);
+
+	OSFreeMem(psCleanupData);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA      *psConnection,
+		PVRSRV_DEVICE_NODE	*psDeviceNode,
+		IMG_UINT32			ui32MaxFLPages,
+		IMG_UINT32			ui32InitFLPages,
+		IMG_UINT32			ui32GrowFLPages,
+		IMG_UINT32           ui32GrowParamThreshold,
+		RGX_FREELIST			*psGlobalFreeList,
+		IMG_BOOL				bCheckFreelist,
+		IMG_DEV_VIRTADDR		sFreeListDevVAddr,
+		PMR					*psFreeListPMR,
+		IMG_DEVMEM_OFFSET_T	uiFreeListPMROffset,
+		RGX_FREELIST			**ppsFreeList)
+{
+	PVRSRV_ERROR				eError;
+	RGXFWIF_FREELIST			*psFWFreeList;
+	DEVMEM_MEMDESC				*psFWFreelistMemDesc;
+	RGX_FREELIST				*psFreeList;
+	PVRSRV_RGXDEV_INFO			*psDevInfo = psDeviceNode->pvDevice;
+
+	if (OSGetPageShift() > RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT)
+	{
+		IMG_UINT32 ui32Size, ui32NewInitFLPages, ui32NewMaxFLPages, ui32NewGrowFLPages;
+
+		/* Round up number of FL pages to the next multiple of the OS page size */
+
+		ui32Size = ui32InitFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+		ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
+		ui32NewInitFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+
+		ui32Size = ui32GrowFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+		ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
+		ui32NewGrowFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+
+		ui32Size = ui32MaxFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+		ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
+		ui32NewMaxFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+
+		PVR_DPF((PVR_DBG_WARNING, "%s: Increased number of PB pages: Init %u -> %u, Grow %u -> %u, Max %u -> %u",
+				 __func__, ui32InitFLPages, ui32NewInitFLPages, ui32GrowFLPages, ui32NewGrowFLPages, ui32MaxFLPages, ui32NewMaxFLPages));
+
+		ui32InitFLPages = ui32NewInitFLPages;
+		ui32GrowFLPages = ui32NewGrowFLPages;
+		ui32MaxFLPages = ui32NewMaxFLPages;
+	}
+
+	/* Allocate kernel freelist struct */
+	psFreeList = OSAllocZMem(sizeof(*psFreeList));
+	if (psFreeList == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed to allocate host data structure",
+				__func__));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorAllocHost;
+	}
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+			&psFreeList->psCleanupSync,
+			"ta3d free list cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate cleanup sync (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto SyncAlloc;
+	}
+
+	/*
+	 * This FW FreeList context is only mapped into kernel for initialisation
+	 * and reconstruction (at other times it is not mapped and only used by
+	 * the FW. Therefore the GPU cache doesn't need coherency, and write-combine
+	 * is suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+	 */
+	eError = DevmemFwAllocate(psDevInfo,
+			sizeof(*psFWFreeList),
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+			"FwFreeList",
+			&psFWFreelistMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: DevmemAllocate for RGXFWIF_FREELIST failed",
+				__func__));
+		goto FWFreeListAlloc;
+	}
+
+	/* Initialise host data structures */
+	psFreeList->psDevInfo = psDevInfo;
+	psFreeList->psFreeListPMR = psFreeListPMR;
+	psFreeList->uiFreeListPMROffset = uiFreeListPMROffset;
+	psFreeList->psFWFreelistMemDesc = psFWFreelistMemDesc;
+	RGXSetFirmwareAddress(&psFreeList->sFreeListFWDevVAddr, psFWFreelistMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+	/* psFreeList->ui32FreelistID set below with lock... */
+	psFreeList->ui32FreelistGlobalID = (psGlobalFreeList ? psGlobalFreeList->ui32FreelistID : 0);
+	psFreeList->ui32MaxFLPages = ui32MaxFLPages;
+	psFreeList->ui32InitFLPages = ui32InitFLPages;
+	psFreeList->ui32GrowFLPages = ui32GrowFLPages;
+	psFreeList->ui32CurrentFLPages = 0;
+	psFreeList->ui32ReadyFLPages = 0;
+	psFreeList->ui32GrowThreshold = ui32GrowParamThreshold;
+	psFreeList->ui64FreelistChecksum = 0;
+	psFreeList->ui32RefCount = 0;
+	psFreeList->bCheckFreelist = bCheckFreelist;
+	dllist_init(&psFreeList->sMemoryBlockHead);
+	dllist_init(&psFreeList->sMemoryBlockInitHead);
+	psFreeList->ownerPid = OSGetCurrentClientProcessIDKM();
+
+
+	/* Add to list of freelists */
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	psFreeList->ui32FreelistID = psDevInfo->ui32FreelistCurrID++;
+	dllist_add_to_tail(&psDevInfo->sFreeListHead, &psFreeList->sNode);
+	OSLockRelease(psDevInfo->hLockFreeList);
+
+
+	/* Initialise FW data structure */
+	eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList);
+	PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWFreeListCpuMap);
+
+	{
+		const IMG_UINT32 ui32ReadyPages = _CalculateFreelistReadyPages(psFreeList, ui32InitFLPages);
+
+		psFWFreeList->ui32MaxPages = ui32MaxFLPages;
+		psFWFreeList->ui32CurrentPages = ui32InitFLPages - ui32ReadyPages;
+		psFWFreeList->ui32GrowPages = ui32GrowFLPages;
+		psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1;
+		psFWFreeList->psFreeListDevVAddr = sFreeListDevVAddr;
+		psFWFreeList->ui64CurrentDevVAddr = (sFreeListDevVAddr.uiAddr +
+				((ui32MaxFLPages - psFWFreeList->ui32CurrentPages) * sizeof(IMG_UINT32))) &
+						~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1);
+		psFWFreeList->ui32FreeListID = psFreeList->ui32FreelistID;
+		psFWFreeList->bGrowPending = IMG_FALSE;
+		psFWFreeList->ui32ReadyPages = ui32ReadyPages;
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE,
+			"Freelist %p created: Max pages 0x%08x, Init pages 0x%08x, "
+			"Max FL base address 0x%016" IMG_UINT64_FMTSPECx ", "
+			"Init FL base address 0x%016" IMG_UINT64_FMTSPECx,
+			psFreeList,
+			ui32MaxFLPages,
+			ui32InitFLPages,
+			sFreeListDevVAddr.uiAddr,
+			psFWFreeList->ui64CurrentDevVAddr));
+#if defined(PDUMP)
+	PDUMPCOMMENT("Dump FW FreeList");
+	DevmemPDumpLoadMem(psFreeList->psFWFreelistMemDesc, 0, sizeof(*psFWFreeList), PDUMP_FLAGS_CONTINUOUS);
+
+	/*
+	 * Separate dump of the Freelist's number of Pages and stack pointer.
+	 * This allows to easily modify the PB size in the out2.txt files.
+	 */
+	PDUMPCOMMENT("FreeList TotalPages");
+	DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc,
+			offsetof(RGXFWIF_FREELIST, ui32CurrentPages),
+			psFWFreeList->ui32CurrentPages,
+			PDUMP_FLAGS_CONTINUOUS);
+	PDUMPCOMMENT("FreeList StackPointer");
+	DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc,
+			offsetof(RGXFWIF_FREELIST, ui32CurrentStackTop),
+			psFWFreeList->ui32CurrentStackTop,
+			PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+	DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+
+	/* Add initial PB block */
+	eError = RGXGrowFreeList(psFreeList,
+			ui32InitFLPages,
+			&psFreeList->sMemoryBlockInitHead);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed to allocate initial memory block for free list 0x%016" IMG_UINT64_FMTSPECx " (%d)",
+				__func__,
+				sFreeListDevVAddr.uiAddr,
+				eError));
+		goto FWFreeListCpuMap;
+	}
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	/* Update Stats */
+	PVRSRVStatsUpdateFreelistStats(1, /* Add 1 to the appropriate counter (Requests by App)*/
+			0,
+			psFreeList->ui32InitFLPages,
+			psFreeList->ui32NumHighPages,
+			psFreeList->ownerPid);
+
+#endif
+
+	/* return values */
+	*ppsFreeList = psFreeList;
+
+	return PVRSRV_OK;
+
+	/* Error handling */
+
+	FWFreeListCpuMap:
+	/* Remove freelists from list  */
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	dllist_remove_node(&psFreeList->sNode);
+	OSLockRelease(psDevInfo->hLockFreeList);
+
+	RGXUnsetFirmwareAddress(psFWFreelistMemDesc);
+	DevmemFwFree(psDevInfo, psFWFreelistMemDesc);
+
+	FWFreeListAlloc:
+	SyncPrimFree(psFreeList->psCleanupSync);
+
+	SyncAlloc:
+	OSFreeMem(psFreeList);
+
+	ErrorAllocHost:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+/*
+	RGXDestroyFreeList
+ */
+PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32RefCount;
+
+	PVR_ASSERT(psFreeList);
+
+	OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+	ui32RefCount = psFreeList->ui32RefCount;
+	OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+	if (ui32RefCount != 0)
+	{
+		/* Freelist still busy */
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	/* Freelist is not in use => start firmware cleanup */
+	eError = RGXFWRequestFreeListCleanUp(psFreeList->psDevInfo,
+			psFreeList->sFreeListFWDevVAddr,
+			psFreeList->psCleanupSync);
+	if (eError != PVRSRV_OK)
+	{
+		/* Can happen if the firmware took too long to handle the cleanup request,
+		 * or if SLC-flushes didn't went through (due to some GPU lockup) */
+		return eError;
+	}
+
+	/* Remove FreeList from linked list before we destroy it... */
+	OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+	dllist_remove_node(&psFreeList->sNode);
+	OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+	if (psFreeList->bCheckFreelist)
+	{
+		RGXFWIF_FREELIST  *psFWFreeList;
+		IMG_UINT64        ui32CurrentStackTop;
+		IMG_UINT64        ui64CheckSum;
+
+		/* Get the current stack pointer for this free list */
+		DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList);
+		ui32CurrentStackTop = psFWFreeList->ui32CurrentStackTop;
+		DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+		if (ui32CurrentStackTop == psFreeList->ui32CurrentFLPages-1)
+		{
+			/* Do consistency tests (as the list is fully populated) */
+			_CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum);
+		}
+		else
+		{
+			/* Check for duplicate pages, but don't check the checksum as the list is not fully populated */
+			_CheckFreelist(psFreeList, ui32CurrentStackTop+1, 0, &ui64CheckSum);
+		}
+	}
+
+	/* Destroy FW structures */
+	RGXUnsetFirmwareAddress(psFreeList->psFWFreelistMemDesc);
+	DevmemFwFree(psFreeList->psDevInfo, psFreeList->psFWFreelistMemDesc);
+
+	/* Remove grow shrink blocks */
+	while (!dllist_is_empty(&psFreeList->sMemoryBlockHead))
+	{
+		eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockHead, psFreeList);
+		PVR_ASSERT(eError == PVRSRV_OK);
+	}
+
+	/* Remove initial PB block */
+	eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockInitHead, psFreeList);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* consistency checks */
+	PVR_ASSERT(dllist_is_empty(&psFreeList->sMemoryBlockInitHead));
+	PVR_ASSERT(psFreeList->ui32CurrentFLPages == 0);
+
+	SyncPrimFree(psFreeList->psCleanupSync);
+
+	/* free Freelist */
+	OSFreeMem(psFreeList);
+
+	return eError;
+}
+
+
+/*
+	RGXCreateRenderTarget
+ */
+PVRSRV_ERROR RGXCreateRenderTarget(CONNECTION_DATA      *psConnection,
+		PVRSRV_DEVICE_NODE	*psDeviceNode,
+		IMG_DEV_VIRTADDR		psVHeapTableDevVAddr,
+		RGX_RT_CLEANUP_DATA 	**ppsCleanupData,
+		IMG_UINT32			*sRenderTargetFWDevVAddr)
+{
+	PVRSRV_ERROR			eError = PVRSRV_OK;
+	RGXFWIF_RENDER_TARGET	*psRenderTarget;
+	RGXFWIF_DEV_VIRTADDR	pFirmwareAddr;
+	PVRSRV_RGXDEV_INFO 		*psDevInfo = psDeviceNode->pvDevice;
+	RGX_RT_CLEANUP_DATA		*psCleanupData;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	psCleanupData = OSAllocZMem(sizeof(*psCleanupData));
+	if (psCleanupData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_out;
+	}
+
+	psCleanupData->psDeviceNode = psDeviceNode;
+	/*
+	 * This FW render target context is only mapped into kernel for initialisation.
+	 * Otherwise this allocation is only used by the FW.
+	 * Therefore the GPU cache doesn't need coherency,
+	 * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+	 */
+	eError = DevmemFwAllocate(psDevInfo,
+			sizeof(*psRenderTarget),
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+			"FwRenderTarget",
+			&psCleanupData->psRenderTargetMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXCreateRenderTarget: DevmemAllocate for Render Target failed"));
+		goto err_free;
+	}
+	RGXSetFirmwareAddress(&pFirmwareAddr, psCleanupData->psRenderTargetMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+	*sRenderTargetFWDevVAddr = pFirmwareAddr.ui32Addr;
+
+	eError = DevmemAcquireCpuVirtAddr(psCleanupData->psRenderTargetMemDesc, (void **)&psRenderTarget);
+	PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", err_fwalloc);
+
+	psRenderTarget->psVHeapTableDevVAddr = psVHeapTableDevVAddr;
+	psRenderTarget->bTACachesNeedZeroing = IMG_FALSE;
+#if defined(PDUMP)
+	PDUMPCOMMENT("Dump RenderTarget");
+	DevmemPDumpLoadMem(psCleanupData->psRenderTargetMemDesc, 0, sizeof(*psRenderTarget), PDUMP_FLAGS_CONTINUOUS);
+#endif
+	DevmemReleaseCpuVirtAddr(psCleanupData->psRenderTargetMemDesc);
+
+	*ppsCleanupData = psCleanupData;
+
+	err_out:
+	return eError;
+
+	err_free:
+	OSFreeMem(psCleanupData);
+	goto err_out;
+
+	err_fwalloc:
+	DevmemFwFree(psDevInfo, psCleanupData->psRenderTargetMemDesc);
+	goto err_free;
+
+}
+
+
+/*
+	RGXDestroyRenderTarget
+ */
+PVRSRV_ERROR RGXDestroyRenderTarget(RGX_RT_CLEANUP_DATA *psCleanupData)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = psCleanupData->psDeviceNode;
+
+	RGXUnsetFirmwareAddress(psCleanupData->psRenderTargetMemDesc);
+
+	/*
+		Note:
+		When we get RT cleanup in the FW call that instead
+	 */
+	/* Flush the SLC before freeing */
+	{
+		RGXFWIF_KCCB_CMD sFlushInvalCmd;
+		PVRSRV_ERROR eError;
+
+		/* Schedule the SLC flush command ... */
+#if defined(PDUMP)
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate");
+#endif
+		sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.eDM = 0;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0;
+
+		eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sFlushInvalCmd,
+				PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to schedule SLC flush command (%s)",
+					__func__,
+					PVRSRVGetErrorString(eError)));
+		}
+		else
+		{
+			/* Wait for the SLC flush to complete */
+			eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+				"%s: SLC flush and invalidate aborted (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+			}
+		}
+	}
+
+	DevmemFwFree(psDeviceNode->pvDevice, psCleanupData->psRenderTargetMemDesc);
+	OSFreeMem(psCleanupData);
+	return PVRSRV_OK;
+}
+
+/*
+	RGXCreateZSBuffer
+ */
+PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection,
+		PVRSRV_DEVICE_NODE	*psDeviceNode,
+		DEVMEMINT_RESERVATION 	*psReservation,
+		PMR 					*psPMR,
+		PVRSRV_MEMALLOCFLAGS_T 	uiMapFlags,
+		RGX_ZSBUFFER_DATA **ppsZSBuffer,
+		IMG_UINT32 *pui32ZSBufferFWDevVAddr)
+{
+	PVRSRV_ERROR				eError;
+	PVRSRV_RGXDEV_INFO 			*psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_PRBUFFER			*psFWZSBuffer;
+	RGX_ZSBUFFER_DATA			*psZSBuffer;
+	DEVMEM_MEMDESC				*psFWZSBufferMemDesc;
+	IMG_BOOL					bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiMapFlags) ? IMG_TRUE : IMG_FALSE;
+
+	/* Allocate host data structure */
+	psZSBuffer = OSAllocZMem(sizeof(*psZSBuffer));
+	if (psZSBuffer == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate cleanup data structure for ZS-Buffer",
+				__func__));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorAllocCleanup;
+	}
+
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+			&psZSBuffer->psCleanupSync,
+			"ta3d zs buffer cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate cleanup sync (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto ErrorSyncAlloc;
+	}
+
+	/* Populate Host data */
+	psZSBuffer->psDevInfo = psDevInfo;
+	psZSBuffer->psReservation = psReservation;
+	psZSBuffer->psPMR = psPMR;
+	psZSBuffer->uiMapFlags = uiMapFlags;
+	psZSBuffer->ui32RefCount = 0;
+	psZSBuffer->bOnDemand = bOnDemand;
+	if (bOnDemand)
+	{
+		/* psZSBuffer->ui32ZSBufferID set below with lock... */
+		psZSBuffer->psMapping = NULL;
+
+		OSLockAcquire(psDevInfo->hLockZSBuffer);
+		psZSBuffer->ui32ZSBufferID = psDevInfo->ui32ZSBufferCurrID++;
+		dllist_add_to_tail(&psDevInfo->sZSBufferHead, &psZSBuffer->sNode);
+		OSLockRelease(psDevInfo->hLockZSBuffer);
+	}
+
+	/* Allocate firmware memory for ZS-Buffer. */
+	PDUMPCOMMENT("Allocate firmware ZS-Buffer data structure");
+	eError = DevmemFwAllocate(psDevInfo,
+			sizeof(*psFWZSBuffer),
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+			"FwZSBuffer",
+			&psFWZSBufferMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate firmware ZS-Buffer (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto ErrorAllocFWZSBuffer;
+	}
+	psZSBuffer->psFWZSBufferMemDesc = psFWZSBufferMemDesc;
+
+	/* Temporarily map the firmware render context to the kernel. */
+	eError = DevmemAcquireCpuVirtAddr(psFWZSBufferMemDesc,
+			(void **)&psFWZSBuffer);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to map firmware ZS-Buffer (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto ErrorAcquireFWZSBuffer;
+	}
+
+	/* Populate FW ZS-Buffer data structure */
+	psFWZSBuffer->bOnDemand = bOnDemand;
+	psFWZSBuffer->eState = (bOnDemand) ? RGXFWIF_PRBUFFER_UNBACKED : RGXFWIF_PRBUFFER_BACKED;
+	psFWZSBuffer->ui32BufferID = psZSBuffer->ui32ZSBufferID;
+
+	/* Get firmware address of ZS-Buffer. */
+	RGXSetFirmwareAddress(&psZSBuffer->sZSBufferFWDevVAddr, psFWZSBufferMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+
+	/* Dump the ZS-Buffer and the memory content */
+#if defined(PDUMP)
+	PDUMPCOMMENT("Dump firmware ZS-Buffer");
+	DevmemPDumpLoadMem(psFWZSBufferMemDesc, 0, sizeof(*psFWZSBuffer), PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+	/* Release address acquired above. */
+	DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc);
+
+
+	/* define return value */
+	*ppsZSBuffer = psZSBuffer;
+	*pui32ZSBufferFWDevVAddr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] created (%s)",
+			psZSBuffer,
+			(bOnDemand) ? "On-Demand": "Up-front"));
+
+	psZSBuffer->owner=OSGetCurrentClientProcessIDKM();
+
+	return PVRSRV_OK;
+
+	/* error handling */
+
+	ErrorAcquireFWZSBuffer:
+	DevmemFwFree(psDevInfo, psFWZSBufferMemDesc);
+
+	ErrorAllocFWZSBuffer:
+	SyncPrimFree(psZSBuffer->psCleanupSync);
+
+	ErrorSyncAlloc:
+	OSFreeMem(psZSBuffer);
+
+	ErrorAllocCleanup:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+/*
+	RGXDestroyZSBuffer
+ */
+PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+	POS_LOCK hLockZSBuffer;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psZSBuffer);
+	hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+	/* Request ZS Buffer cleanup */
+	eError = RGXFWRequestZSBufferCleanUp(psZSBuffer->psDevInfo,
+			psZSBuffer->sZSBufferFWDevVAddr,
+			psZSBuffer->psCleanupSync);
+	if (eError != PVRSRV_ERROR_RETRY)
+	{
+		/* Free the firmware render context. */
+		RGXUnsetFirmwareAddress(psZSBuffer->psFWZSBufferMemDesc);
+		DevmemFwFree(psZSBuffer->psDevInfo, psZSBuffer->psFWZSBufferMemDesc);
+
+		/* Remove Deferred Allocation from list */
+		if (psZSBuffer->bOnDemand)
+		{
+			OSLockAcquire(hLockZSBuffer);
+			PVR_ASSERT(dllist_node_is_in_list(&psZSBuffer->sNode));
+			dllist_remove_node(&psZSBuffer->sNode);
+			OSLockRelease(hLockZSBuffer);
+		}
+
+		SyncPrimFree(psZSBuffer->psCleanupSync);
+
+		PVR_ASSERT(psZSBuffer->ui32RefCount == 0);
+
+		PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] destroyed", psZSBuffer));
+
+		/* Free ZS-Buffer host data structure */
+		OSFreeMem(psZSBuffer);
+
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR
+RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+	POS_LOCK hLockZSBuffer;
+	PVRSRV_ERROR eError;
+
+	if (!psZSBuffer)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (!psZSBuffer->bOnDemand)
+	{
+		/* Only deferred allocations can be populated */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE,
+			"ZS Buffer [%p, ID=0x%08x]: Physical backing requested",
+			psZSBuffer,
+			psZSBuffer->ui32ZSBufferID));
+	hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+	OSLockAcquire(hLockZSBuffer);
+
+	if (psZSBuffer->ui32RefCount == 0)
+	{
+		if (psZSBuffer->bOnDemand)
+		{
+			IMG_HANDLE hDevmemHeap;
+
+			PVR_ASSERT(psZSBuffer->psMapping == NULL);
+
+			/* Get Heap */
+			eError = DevmemServerGetHeapHandle(psZSBuffer->psReservation, &hDevmemHeap);
+			PVR_ASSERT(psZSBuffer->psMapping == NULL);
+
+			eError = DevmemIntMapPMR(hDevmemHeap,
+					psZSBuffer->psReservation,
+					psZSBuffer->psPMR,
+					psZSBuffer->uiMapFlags,
+					&psZSBuffer->psMapping);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"Unable populate ZS Buffer [%p, ID=0x%08x] (%s)",
+						psZSBuffer,
+						psZSBuffer->ui32ZSBufferID,
+						PVRSRVGetErrorString(eError)));
+				OSLockRelease(hLockZSBuffer);
+				return eError;
+
+			}
+			PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing acquired",
+					psZSBuffer,
+					psZSBuffer->ui32ZSBufferID));
+		}
+	}
+
+	/* Increase refcount*/
+	psZSBuffer->ui32RefCount++;
+
+	OSLockRelease(hLockZSBuffer);
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer,
+		RGX_POPULATION **ppsPopulation)
+{
+	RGX_POPULATION *psPopulation;
+	PVRSRV_ERROR eError;
+
+	psZSBuffer->ui32NumReqByApp++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	PVRSRVStatsUpdateZSBufferStats(1,0,psZSBuffer->owner);
+#endif
+
+	/* Do the backing */
+	eError = RGXBackingZSBuffer(psZSBuffer);
+	if (eError != PVRSRV_OK)
+	{
+		goto OnErrorBacking;
+	}
+
+	/* Create the handle to the backing */
+	psPopulation = OSAllocMem(sizeof(*psPopulation));
+	if (psPopulation == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto OnErrorAlloc;
+	}
+
+	psPopulation->psZSBuffer = psZSBuffer;
+
+	/* return value */
+	*ppsPopulation = psPopulation;
+
+	return PVRSRV_OK;
+
+	OnErrorAlloc:
+	RGXUnbackingZSBuffer(psZSBuffer);
+
+	OnErrorBacking:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+	POS_LOCK hLockZSBuffer;
+	PVRSRV_ERROR eError;
+
+	if (!psZSBuffer)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	PVR_ASSERT(psZSBuffer->ui32RefCount);
+
+	PVR_DPF((PVR_DBG_MESSAGE,
+			"ZS Buffer [%p, ID=0x%08x]: Physical backing removal requested",
+			psZSBuffer,
+			psZSBuffer->ui32ZSBufferID));
+
+	hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+	OSLockAcquire(hLockZSBuffer);
+
+	if (psZSBuffer->bOnDemand)
+	{
+		if (psZSBuffer->ui32RefCount == 1)
+		{
+			PVR_ASSERT(psZSBuffer->psMapping);
+
+			eError = DevmemIntUnmapPMR(psZSBuffer->psMapping);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"Unable to unpopulate ZS Buffer [%p, ID=0x%08x] (%s)",
+						psZSBuffer,
+						psZSBuffer->ui32ZSBufferID,
+						PVRSRVGetErrorString(eError)));
+				OSLockRelease(hLockZSBuffer);
+				return eError;
+			}
+
+			PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing removed",
+					psZSBuffer,
+					psZSBuffer->ui32ZSBufferID));
+		}
+	}
+
+	/* Decrease refcount*/
+	psZSBuffer->ui32RefCount--;
+
+	OSLockRelease(hLockZSBuffer);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation)
+{
+	PVRSRV_ERROR eError;
+
+	if (!psPopulation)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eError = RGXUnbackingZSBuffer(psPopulation->psZSBuffer);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	OSFreeMem(psPopulation);
+
+	return PVRSRV_OK;
+}
+
+static RGX_ZSBUFFER_DATA *FindZSBuffer(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32ZSBufferID)
+{
+	DLLIST_NODE *psNode, *psNext;
+	RGX_ZSBUFFER_DATA *psZSBuffer = NULL;
+
+	OSLockAcquire(psDevInfo->hLockZSBuffer);
+
+	dllist_foreach_node(&psDevInfo->sZSBufferHead, psNode, psNext)
+	{
+		RGX_ZSBUFFER_DATA *psThisZSBuffer = IMG_CONTAINER_OF(psNode, RGX_ZSBUFFER_DATA, sNode);
+
+		if (psThisZSBuffer->ui32ZSBufferID == ui32ZSBufferID)
+		{
+			psZSBuffer = psThisZSBuffer;
+			break;
+		}
+	}
+
+	OSLockRelease(psDevInfo->hLockZSBuffer);
+	return psZSBuffer;
+}
+
+void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+		IMG_UINT32 ui32ZSBufferID)
+{
+	RGX_ZSBUFFER_DATA *psZSBuffer;
+	RGXFWIF_KCCB_CMD sTACCBCmd;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psDevInfo);
+
+	/* scan all deferred allocations */
+	psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID);
+
+	if (psZSBuffer)
+	{
+		IMG_BOOL bBackingDone = IMG_TRUE;
+
+		/* Populate ZLS */
+		eError = RGXBackingZSBuffer(psZSBuffer);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"Populating ZS-Buffer (ID = 0x%08x) failed (%s)",
+					ui32ZSBufferID,
+					PVRSRVGetErrorString(eError)));
+			bBackingDone = IMG_FALSE;
+		}
+
+		/* send confirmation */
+		sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE;
+		sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr;
+		sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = bBackingDone;
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError = RGXScheduleCommand(psDevInfo,
+					RGXFWIF_DM_TA,
+					&sTACCBCmd,
+					0,
+					PDUMP_FLAGS_NONE);
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+		/* Kernel CCB should never fill up, as the FW is processing them right away  */
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		psZSBuffer->ui32NumReqByFW++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+		PVRSRVStatsUpdateZSBufferStats(0,1,psZSBuffer->owner);
+#endif
+
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (Populate)",
+				ui32ZSBufferID));
+	}
+}
+
+void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+		IMG_UINT32 ui32ZSBufferID)
+{
+	RGX_ZSBUFFER_DATA *psZSBuffer;
+	RGXFWIF_KCCB_CMD sTACCBCmd;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psDevInfo);
+
+	/* scan all deferred allocations */
+	psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID);
+
+	if (psZSBuffer)
+	{
+		/* Unpopulate ZLS */
+		eError = RGXUnbackingZSBuffer(psZSBuffer);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"UnPopulating ZS-Buffer (ID = 0x%08x) failed (%s)",
+					ui32ZSBufferID,
+					PVRSRVGetErrorString(eError)));
+			PVR_ASSERT(IMG_FALSE);
+		}
+
+		/* send confirmation */
+		sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE;
+		sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr;
+		sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = IMG_TRUE;
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError = RGXScheduleCommand(psDevInfo,
+					RGXFWIF_DM_TA,
+					&sTACCBCmd,
+					0,
+					PDUMP_FLAGS_NONE);
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+		/* Kernel CCB should never fill up, as the FW is processing them right away  */
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (UnPopulate)",
+				ui32ZSBufferID));
+	}
+}
+
+static
+PVRSRV_ERROR _CreateTAContext(CONNECTION_DATA *psConnection,
+		PVRSRV_DEVICE_NODE *psDeviceNode,
+		DEVMEM_MEMDESC *psAllocatedMemDesc,
+		IMG_UINT32 ui32AllocatedOffset,
+		DEVMEM_MEMDESC *psFWMemContextMemDesc,
+		IMG_DEV_VIRTADDR sVDMCallStackAddr,
+		IMG_UINT32 ui32CtxSwitchSize,
+		IMG_PBYTE pCtxSwitch_Regs,
+		IMG_UINT32 ui32Priority,
+		RGX_COMMON_CONTEXT_INFO *psInfo,
+		RGX_SERVER_RC_TA_DATA *psTAData,
+		IMG_UINT32 ui32CCBAllocSizeLog2,
+		IMG_UINT32 ui32CCBMaxAllocSizeLog2)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_TACTX_STATE *psContextState;
+	PVRSRV_ERROR eError;
+	/*
+		Allocate device memory for the firmware GPU context suspend state.
+		Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+	 */
+	PDUMPCOMMENT("Allocate RGX firmware TA context suspend state");
+
+	eError = DevmemFwAllocate(psDevInfo,
+			sizeof(RGXFWIF_TACTX_STATE),
+			RGX_FWCOMCTX_ALLOCFLAGS,
+			"FwTAContextState",
+			&psTAData->psContextStateMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate firmware GPU context suspend state (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto fail_tacontextsuspendalloc;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psTAData->psContextStateMemDesc,
+			(void **)&psContextState);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to map firmware render context state (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto fail_suspendcpuvirtacquire;
+	}
+	psContextState->uTAReg_VDM_CALL_STACK_POINTER_Init = sVDMCallStackAddr.uiAddr;
+	OSDeviceMemCopy(&psContextState->sCtxSwitch_Regs, pCtxSwitch_Regs, ui32CtxSwitchSize);
+	DevmemReleaseCpuVirtAddr(psTAData->psContextStateMemDesc);
+
+	eError = FWCommonContextAllocate(psConnection,
+			psDeviceNode,
+			REQ_TYPE_TA,
+			RGXFWIF_DM_TA,
+			psAllocatedMemDesc,
+			ui32AllocatedOffset,
+			psFWMemContextMemDesc,
+			psTAData->psContextStateMemDesc,
+			ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TA_CCB_SIZE_LOG2,
+			ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TA_CCB_MAX_SIZE_LOG2,
+			ui32Priority,
+			psInfo,
+			&psTAData->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to init TA fw common context (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto fail_tacommoncontext;
+	}
+
+	/*
+	 * Dump the FW 3D context suspend state buffer
+	 */
+#if defined(PDUMP)
+	PDUMPCOMMENT("Dump the TA context suspend state buffer");
+	DevmemPDumpLoadMem(psTAData->psContextStateMemDesc,
+			0,
+			sizeof(RGXFWIF_TACTX_STATE),
+			PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+	psTAData->ui32Priority = ui32Priority;
+	return PVRSRV_OK;
+
+	fail_tacommoncontext:
+	fail_suspendcpuvirtacquire:
+	DevmemFwFree(psDevInfo, psTAData->psContextStateMemDesc);
+	fail_tacontextsuspendalloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+static
+PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection,
+		PVRSRV_DEVICE_NODE *psDeviceNode,
+		DEVMEM_MEMDESC *psAllocatedMemDesc,
+		IMG_UINT32 ui32AllocatedOffset,
+		DEVMEM_MEMDESC *psFWMemContextMemDesc,
+		IMG_UINT32 ui32Priority,
+		RGX_COMMON_CONTEXT_INFO *psInfo,
+		RGX_SERVER_RC_3D_DATA *ps3DData,
+		IMG_UINT32 ui32CCBAllocSizeLog2,
+		IMG_UINT32 ui32CCBMaxAllocSizeLog2)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR eError;
+	IMG_UINT	uiNumISPStoreRegs;
+	IMG_UINT	ui3DRegISPStateStoreSize = 0;
+
+	/*
+		Allocate device memory for the firmware GPU context suspend state.
+		Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+	 */
+	PDUMPCOMMENT("Allocate RGX firmware 3D context suspend state");
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY))
+	{
+		uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode,
+				RGX_FEATURE_NUM_RASTER_PIPES_IDX);
+	}
+	else
+	{
+		uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode,
+				RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX);
+	}
+
+	/* Size of the CS buffer */
+	/* Calculate the size of the 3DCTX ISP state */
+	ui3DRegISPStateStoreSize = sizeof(RGXFWIF_3DCTX_STATE) +
+			uiNumISPStoreRegs * sizeof(((RGXFWIF_3DCTX_STATE *)0)->au3DReg_ISP_STORE[0]);
+
+	eError = DevmemFwAllocate(psDevInfo,
+			ui3DRegISPStateStoreSize,
+			RGX_FWCOMCTX_ALLOCFLAGS,
+			"Fw3DContextState",
+			&ps3DData->psContextStateMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate firmware GPU context suspend state (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto fail_3dcontextsuspendalloc;
+	}
+
+	eError = FWCommonContextAllocate(psConnection,
+			psDeviceNode,
+			REQ_TYPE_3D,
+			RGXFWIF_DM_3D,
+			psAllocatedMemDesc,
+			ui32AllocatedOffset,
+			psFWMemContextMemDesc,
+			ps3DData->psContextStateMemDesc,
+			ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_3D_CCB_SIZE_LOG2,
+			ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_3D_CCB_MAX_SIZE_LOG2,
+			ui32Priority,
+			psInfo,
+			&ps3DData->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to init 3D fw common context (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto fail_3dcommoncontext;
+	}
+
+	/*
+	 * Dump the FW 3D context suspend state buffer
+	 */
+	PDUMPCOMMENT("Dump the 3D context suspend state buffer");
+	DevmemPDumpLoadMem(ps3DData->psContextStateMemDesc,
+			0,
+			sizeof(RGXFWIF_3DCTX_STATE),
+			PDUMP_FLAGS_CONTINUOUS);
+
+	ps3DData->ui32Priority = ui32Priority;
+	return PVRSRV_OK;
+
+	fail_3dcommoncontext:
+	DevmemFwFree(psDevInfo, ps3DData->psContextStateMemDesc);
+	fail_3dcontextsuspendalloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+
+/*
+ * PVRSRVRGXCreateRenderContextKM
+ */
+PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA				*psConnection,
+		PVRSRV_DEVICE_NODE			*psDeviceNode,
+		IMG_UINT32					ui32Priority,
+		IMG_DEV_VIRTADDR			sVDMCallStackAddr,
+		IMG_UINT32					ui32FrameworkRegisterSize,
+		IMG_PBYTE					pabyFrameworkRegisters,
+		IMG_HANDLE					hMemCtxPrivData,
+		IMG_UINT32					ui32CtxSwitchSize,
+		IMG_PBYTE					pCtxSwitch_Regs,
+		IMG_UINT32					ui32StaticRendercontextStateSize,
+		IMG_PBYTE					pStaticRendercontextState,
+		IMG_UINT32					ui32PackedCCBSizeU8888,
+		RGX_SERVER_RENDER_CONTEXT	**ppsRenderContext)
+{
+	PVRSRV_ERROR				eError;
+	PVRSRV_RGXDEV_INFO 			*psDevInfo = psDeviceNode->pvDevice;
+	RGX_SERVER_RENDER_CONTEXT	*psRenderContext;
+	DEVMEM_MEMDESC				*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	RGX_COMMON_CONTEXT_INFO		sInfo;
+	RGXFWIF_FWRENDERCONTEXT		*psFWRenderContext;
+
+	*ppsRenderContext = NULL;
+
+	if (ui32CtxSwitchSize > RGXFWIF_TAREGISTERS_CSWITCH_SIZE)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psRenderContext = OSAllocZMem(sizeof(*psRenderContext));
+	if (psRenderContext == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	eError = OSLockCreate(&psRenderContext->hLock);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_lock;
+	}
+#endif
+
+	psRenderContext->psDeviceNode = psDeviceNode;
+
+	/*
+		Create the FW render context, this has the TA and 3D FW common
+		contexts embedded within it
+	 */
+	eError = DevmemFwAllocate(psDevInfo,
+			sizeof(RGXFWIF_FWRENDERCONTEXT),
+			RGX_FWCOMCTX_ALLOCFLAGS,
+			"FwRenderContext",
+			&psRenderContext->psFWRenderContextMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_fwrendercontext;
+	}
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	WorkEstInit(psDevInfo, &psRenderContext->sWorkEstData);
+#endif
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+			&psRenderContext->psCleanupSync,
+			"ta3d render context cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate cleanup sync (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto fail_syncalloc;
+	}
+
+	/*
+	 * Create the FW framework buffer
+	 */
+	eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+			&psRenderContext->psFWFrameworkMemDesc,
+			ui32FrameworkRegisterSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate firmware GPU framework state (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto fail_frameworkcreate;
+	}
+
+	/* Copy the Framework client data into the framework buffer */
+	eError = PVRSRVRGXFrameworkCopyCommand(psRenderContext->psFWFrameworkMemDesc,
+			pabyFrameworkRegisters,
+			ui32FrameworkRegisterSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to populate the framework buffer (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto fail_frameworkcopy;
+	}
+
+	sInfo.psFWFrameworkMemDesc = psRenderContext->psFWFrameworkMemDesc;
+
+	eError = _CreateTAContext(psConnection,
+			psDeviceNode,
+			psRenderContext->psFWRenderContextMemDesc,
+			offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext),
+			psFWMemContextMemDesc,
+			sVDMCallStackAddr,
+			ui32CtxSwitchSize,
+			pCtxSwitch_Regs,
+			ui32Priority,
+			&sInfo,
+			&psRenderContext->sTAData,
+			U32toU8_Unpack1(ui32PackedCCBSizeU8888),
+			U32toU8_Unpack2(ui32PackedCCBSizeU8888));
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_tacontext;
+	}
+
+	eError = _Create3DContext(psConnection,
+			psDeviceNode,
+			psRenderContext->psFWRenderContextMemDesc,
+			offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext),
+			psFWMemContextMemDesc,
+			ui32Priority,
+			&sInfo,
+			&psRenderContext->s3DData,
+			U32toU8_Unpack3(ui32PackedCCBSizeU8888),
+			U32toU8_Unpack4(ui32PackedCCBSizeU8888));
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_3dcontext;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc,
+			(void **)&psFWRenderContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_acquire_cpu_mapping;
+	}
+
+	OSDeviceMemCopy(&psFWRenderContext->sStaticRendercontextState, pStaticRendercontextState, ui32StaticRendercontextStateSize);
+	DevmemPDumpLoadMem(psRenderContext->psFWRenderContextMemDesc, 0, sizeof(RGXFWIF_FWRENDERCONTEXT), PDUMP_FLAGS_CONTINUOUS);
+	DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc);
+
+#if defined(SUPPORT_BUFFER_SYNC)
+	psRenderContext->psBufferSyncContext =
+			pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
+					"rogue-ta3d");
+	if (IS_ERR(psRenderContext->psBufferSyncContext))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed to create buffer_sync context (err=%ld)",
+				__func__, PTR_ERR(psRenderContext->psBufferSyncContext)));
+
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto fail_buffer_sync_context_create;
+	}
+#endif
+
+	SyncAddrListInit(&psRenderContext->sSyncAddrListTAFence);
+	SyncAddrListInit(&psRenderContext->sSyncAddrListTAUpdate);
+	SyncAddrListInit(&psRenderContext->sSyncAddrList3DFence);
+	SyncAddrListInit(&psRenderContext->sSyncAddrList3DUpdate);
+
+	{
+		PVRSRV_RGXDEV_INFO			*psDevInfo = psDeviceNode->pvDevice;
+
+		OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+		dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode));
+		OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+	}
+
+	*ppsRenderContext= psRenderContext;
+	return PVRSRV_OK;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+	fail_buffer_sync_context_create:
+#endif
+	fail_acquire_cpu_mapping:
+	_Destroy3DContext(&psRenderContext->s3DData,
+			psRenderContext->psDeviceNode,
+			psRenderContext->psCleanupSync);
+	fail_3dcontext:
+	_DestroyTAContext(&psRenderContext->sTAData,
+			psDeviceNode,
+			psRenderContext->psCleanupSync);
+	fail_tacontext:
+	fail_frameworkcopy:
+	DevmemFwFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc);
+	fail_frameworkcreate:
+	SyncPrimFree(psRenderContext->psCleanupSync);
+	fail_syncalloc:
+	DevmemFwFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc);
+	fail_fwrendercontext:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psRenderContext->hLock);
+	fail_lock:
+#endif
+	OSFreeMem(psRenderContext);
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+/*
+ * PVRSRVRGXDestroyRenderContextKM
+ */
+PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext)
+{
+	PVRSRV_ERROR				eError;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psRenderContext->psDeviceNode->pvDevice;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	RGXFWIF_FWRENDERCONTEXT	*psFWRenderContext;
+	IMG_UINT32 ui32WorkEstCCBSubmitted;
+#endif
+
+	/* remove node from list before calling destroy - as destroy, if successful
+	 * will invalidate the node
+	 * must be re-added if destroy fails
+	 */
+	OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+	dllist_remove_node(&(psRenderContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+
+#if defined(SUPPORT_BUFFER_SYNC)
+	pvr_buffer_sync_context_destroy(psRenderContext->psBufferSyncContext);
+#endif
+
+	/* Cleanup the TA if we haven't already */
+	if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_TA_COMPLETE) == 0)
+	{
+		eError = _DestroyTAContext(&psRenderContext->sTAData,
+				psRenderContext->psDeviceNode,
+				psRenderContext->psCleanupSync);
+		if (eError == PVRSRV_OK)
+		{
+			psRenderContext->ui32CleanupStatus |= RC_CLEANUP_TA_COMPLETE;
+		}
+		else
+		{
+			goto e0;
+		}
+	}
+
+	/* Cleanup the 3D if we haven't already */
+	if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_3D_COMPLETE) == 0)
+	{
+		eError = _Destroy3DContext(&psRenderContext->s3DData,
+				psRenderContext->psDeviceNode,
+				psRenderContext->psCleanupSync);
+		if (eError == PVRSRV_OK)
+		{
+			psRenderContext->ui32CleanupStatus |= RC_CLEANUP_3D_COMPLETE;
+		}
+		else
+		{
+			goto e0;
+		}
+	}
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc,
+			(void **)&psFWRenderContext);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to map firmware render context (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto e0;
+	}
+
+	ui32WorkEstCCBSubmitted = psFWRenderContext->ui32WorkEstCCBSubmitted;
+
+	DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc);
+
+	/* Check if all of the workload estimation CCB commands for this workload are read */
+	if (ui32WorkEstCCBSubmitted != psRenderContext->sWorkEstData.ui32WorkEstCCBReceived)
+	{
+
+        PVR_DPF((PVR_DBG_WARNING,
+                "PVRSRVRGXDestroyRenderContextKM: WorkEst # cmds submitted (%u) and received (%u) mismatch",
+                ui32WorkEstCCBSubmitted,
+                psRenderContext->sWorkEstData.ui32WorkEstCCBReceived));
+
+		eError = PVRSRV_ERROR_RETRY;
+		goto e0;
+	}
+#endif
+
+	/*
+		Only if both TA and 3D contexts have been cleaned up can we
+		free the shared resources
+	 */
+	if (psRenderContext->ui32CleanupStatus == (RC_CLEANUP_3D_COMPLETE | RC_CLEANUP_TA_COMPLETE))
+	{
+		/* Free the framework buffer */
+		DevmemFwFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc);
+
+		/* Free the firmware render context */
+		DevmemFwFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc);
+
+		/* Free the cleanup sync */
+		SyncPrimFree(psRenderContext->psCleanupSync);
+
+		SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAFence);
+		SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAUpdate);
+		SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DFence);
+		SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DUpdate);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		WorkEstDeInit(psDevInfo, &psRenderContext->sWorkEstData);
+#endif
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockDestroy(psRenderContext->hLock);
+#endif
+
+		OSFreeMem(psRenderContext);
+	}
+
+	return PVRSRV_OK;
+
+	e0:
+	OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+	dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+	return eError;
+}
+
+
+/* TODO !!! this was local on the stack, and we managed to blow the stack for the kernel.
+ * THIS - 46 argument function needs to be sorted out.
+ */
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+static RGX_CCB_CMD_HELPER_DATA gasTACmdHelperData[CCB_CMD_HELPER_NUM_TA_COMMANDS];
+static RGX_CCB_CMD_HELPER_DATA gas3DCmdHelperData[CCB_CMD_HELPER_NUM_3D_COMMANDS];
+#endif
+
+/*
+ * PVRSRVRGXKickTA3DKM
+ */
+PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT	*psRenderContext,
+		IMG_UINT32					ui32ClientCacheOpSeqNum,
+		IMG_UINT32					ui32ClientTAFenceCount,
+		SYNC_PRIMITIVE_BLOCK		**apsClientTAFenceSyncPrimBlock,
+		IMG_UINT32					*paui32ClientTAFenceSyncOffset,
+		IMG_UINT32					*paui32ClientTAFenceValue,
+		IMG_UINT32					ui32ClientTAUpdateCount,
+		SYNC_PRIMITIVE_BLOCK		**apsClientTAUpdateSyncPrimBlock,
+		IMG_UINT32					*paui32ClientTAUpdateSyncOffset,
+		IMG_UINT32					*paui32ClientTAUpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+		IMG_UINT32					ui32ServerTASyncPrims,
+		IMG_UINT32					*paui32ServerTASyncFlags,
+		SERVER_SYNC_PRIMITIVE 		**pasServerTASyncs,
+#endif
+		IMG_UINT32					ui32Client3DFenceCount,
+		SYNC_PRIMITIVE_BLOCK		**apsClient3DFenceSyncPrimBlock,
+		IMG_UINT32					*paui32Client3DFenceSyncOffset,
+		IMG_UINT32					*paui32Client3DFenceValue,
+		IMG_UINT32					ui32Client3DUpdateCount,
+		SYNC_PRIMITIVE_BLOCK		**apsClient3DUpdateSyncPrimBlock,
+		IMG_UINT32					*paui32Client3DUpdateSyncOffset,
+		IMG_UINT32					*paui32Client3DUpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+		IMG_UINT32					ui32Server3DSyncPrims,
+		IMG_UINT32					*paui32Server3DSyncFlags,
+		SERVER_SYNC_PRIMITIVE 		**pasServer3DSyncs,
+#endif
+		SYNC_PRIMITIVE_BLOCK		*psPRFenceSyncPrimBlock,
+		IMG_UINT32					ui32PRFenceSyncOffset,
+		IMG_UINT32					ui32PRFenceValue,
+		PVRSRV_FENCE				iCheckTAFence,
+		PVRSRV_TIMELINE			iUpdateTATimeline,
+		PVRSRV_FENCE				*piUpdateTAFence,
+		IMG_CHAR					szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH],
+		PVRSRV_FENCE				iCheck3DFence,
+		PVRSRV_TIMELINE			iUpdate3DTimeline,
+		PVRSRV_FENCE				*piUpdate3DFence,
+		IMG_CHAR					szFenceName3D[PVRSRV_SYNC_NAME_LENGTH],
+		IMG_UINT32					ui32TACmdSize,
+		IMG_PBYTE					pui8TADMCmd,
+		IMG_UINT32					ui323DPRCmdSize,
+		IMG_PBYTE					pui83DPRDMCmd,
+		IMG_UINT32					ui323DCmdSize,
+		IMG_PBYTE					pui83DDMCmd,
+		IMG_UINT32					ui32ExtJobRef,
+		IMG_BOOL					bLastTAInScene,
+		IMG_BOOL					bKickTA,
+		IMG_BOOL					bKickPR,
+		IMG_BOOL					bKick3D,
+		IMG_BOOL					bAbort,
+		IMG_UINT32					ui32PDumpFlags,
+		RGX_RTDATA_CLEANUP_DATA	*psRTDataCleanup,
+		RGX_ZSBUFFER_DATA		*psZBuffer,
+		RGX_ZSBUFFER_DATA		*psSBuffer,
+		RGX_ZSBUFFER_DATA		*psMSAAScratchBuffer,
+		IMG_UINT32			ui32SyncPMRCount,
+		IMG_UINT32			*paui32SyncPMRFlags,
+		PMR				**ppsSyncPMRs,
+		IMG_UINT32			ui32RenderTargetSize,
+		IMG_UINT32			ui32NumberOfDrawCalls,
+		IMG_UINT32			ui32NumberOfIndices,
+		IMG_UINT32			ui32NumberOfMRTs,
+		IMG_UINT64			ui64DeadlineInus)
+{
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	/* if the bridge lock is present then we use the singular/global helper structures */
+	RGX_CCB_CMD_HELPER_DATA *pasTACmdHelperData = gasTACmdHelperData;
+	RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelperData = gas3DCmdHelperData;
+#else
+	/* if there is no bridge lock then we use the per-context helper structures */
+	RGX_CCB_CMD_HELPER_DATA *pasTACmdHelperData = psRenderContext->asTACmdHelperData;
+	RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelperData = psRenderContext->as3DCmdHelperData;
+#endif
+
+	IMG_UINT32				ui32TACmdCount=0;
+	IMG_UINT32				ui323DCmdCount=0;
+	IMG_UINT32				ui32TACmdOffset=0;
+	IMG_UINT32				ui323DCmdOffset=0;
+	RGXFWIF_UFO				sPRUFO;
+	IMG_UINT32				i;
+	PVRSRV_ERROR			eError = PVRSRV_OK;
+	PVRSRV_ERROR			eError2;
+
+	PVRSRV_RGXDEV_INFO      *psDevInfo = FWCommonContextGetRGXDevInfo(psRenderContext->s3DData.psServerCommonContext);
+	IMG_UINT32              ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
+	IMG_BOOL                bCCBStateOpen = IMG_FALSE;
+
+	IMG_UINT32				ui32ClientPRUpdateCount = 0;
+	PRGXFWIF_UFO_ADDR		*pauiClientPRUpdateUFOAddress = NULL;
+	IMG_UINT32				*paui32ClientPRUpdateValue = NULL;
+
+	PRGXFWIF_UFO_ADDR			*pauiClientTAFenceUFOAddress = NULL;
+	PRGXFWIF_UFO_ADDR			*pauiClientTAUpdateUFOAddress = NULL;
+	PRGXFWIF_UFO_ADDR			*pauiClient3DFenceUFOAddress = NULL;
+	PRGXFWIF_UFO_ADDR			*pauiClient3DUpdateUFOAddress = NULL;
+	PRGXFWIF_UFO_ADDR			uiPRFenceUFOAddress;
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	IMG_UINT64               uiCheckTAFenceUID = 0;
+	IMG_UINT64               uiCheck3DFenceUID = 0;
+	IMG_UINT64               uiUpdateTAFenceUID = 0;
+	IMG_UINT64               uiUpdate3DFenceUID = 0;
+#endif
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL) || defined(SUPPORT_BUFFER_SYNC)
+	IMG_BOOL bTAFenceOnSyncCheckpointsOnly = IMG_FALSE;
+#endif
+
+	IMG_BOOL bUseCombined3DAnd3DPR = bKickPR && bKick3D && !pui83DPRDMCmd;
+
+	PVRSRV_FENCE	iUpdateTAFence = PVRSRV_NO_FENCE;
+	PVRSRV_FENCE	iUpdate3DFence = PVRSRV_NO_FENCE;
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	IMG_BOOL b3DFenceOnSyncCheckpointsOnly = IMG_FALSE;
+	IMG_UINT32 ui32TAFenceTimelineUpdateValue = 0;
+	IMG_UINT32 ui323DFenceTimelineUpdateValue = 0;
+
+	/*
+	 * Count of the number of TA and 3D update values (may differ from number of
+	 * TA and 3D updates later, as sync checkpoints do not need to specify a value)
+	 */
+	IMG_UINT32 ui32ClientPRUpdateValueCount = 0;
+	IMG_UINT32 ui32ClientTAUpdateValueCount = ui32ClientTAUpdateCount;
+	IMG_UINT32 ui32Client3DUpdateValueCount = ui32Client3DUpdateCount;
+	PSYNC_CHECKPOINT *apsFenceTASyncCheckpoints = NULL;				/*!< TA fence checkpoints */
+	PSYNC_CHECKPOINT *apsFence3DSyncCheckpoints = NULL;				/*!< 3D fence checkpoints */
+	IMG_UINT32 ui32FenceTASyncCheckpointCount = 0;
+	IMG_UINT32 ui32Fence3DSyncCheckpointCount = 0;
+	PSYNC_CHECKPOINT psUpdateTASyncCheckpoint = NULL;				/*!< TA update checkpoint (output) */
+	PSYNC_CHECKPOINT psUpdate3DSyncCheckpoint = NULL;				/*!< 3D update checkpoint (output) */
+	PVRSRV_CLIENT_SYNC_PRIM *psTAFenceTimelineUpdateSync = NULL;
+	PVRSRV_CLIENT_SYNC_PRIM *ps3DFenceTimelineUpdateSync = NULL;
+	void *pvTAUpdateFenceFinaliseData = NULL;
+	void *pv3DUpdateFenceFinaliseData = NULL;
+
+	RGX_SYNC_DATA sTASyncData = {NULL};		/*!< Contains internal update syncs for TA */
+	RGX_SYNC_DATA s3DSyncData = {NULL};		/*!< Contains internal update syncs for 3D */
+
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+#if defined(SUPPORT_BUFFER_SYNC)
+	PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
+	IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
+	PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTA;
+	RGXFWIF_WORKEST_KICK_DATA sWorkloadKickData3D;
+	IMG_UINT32 ui32TACommandOffset = 0;
+	IMG_UINT32 ui323DCommandOffset = 0;
+	IMG_UINT32 ui32TACmdHeaderOffset = 0;
+	IMG_UINT32 ui323DCmdHeaderOffset = 0;
+	IMG_UINT32 ui323DFullRenderCommandOffset = 0;
+	IMG_UINT32 ui32TACmdOffsetWrapCheck = 0;
+	IMG_UINT32 ui323DCmdOffsetWrapCheck = 0;
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+	struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	if (unlikely(iUpdateTATimeline >= 0 && !piUpdateTAFence))
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	if (unlikely(iUpdate3DTimeline >= 0 && !piUpdate3DFence))
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+#else /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+	if (unlikely(iUpdateTATimeline >= 0 || iUpdate3DTimeline >= 0))
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: Providing update timeline (TA=%d, 3D=%d) in non-supporting driver",
+				__func__, iUpdateTATimeline, iUpdate3DTimeline));
+	}
+	if (unlikely(iCheckTAFence >= 0 || iCheck3DFence >= 0))
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: Providing check fence (TA=%d, 3D=%d) in non-supporting driver",
+				__func__, iCheckTAFence, iCheck3DFence));
+	}
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+	/* Ensure the string is null-terminated (Required for safety) */
+	szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH-1] = '\0';
+	szFenceName3D[PVRSRV_SYNC_NAME_LENGTH-1] = '\0';
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	sWorkloadKickDataTA.ui64ReturnDataIndex = 0;
+	sWorkloadKickDataTA.ui64CyclesPrediction = 0;
+	sWorkloadKickData3D.ui64ReturnDataIndex = 0;
+	sWorkloadKickData3D.ui64CyclesPrediction = 0;
+#endif
+
+	CHKPT_DBG((PVR_DBG_ERROR,
+			   "%s: ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d, "
+			   "ui32Client3DFenceCount=%d, ui32Client3DUpdateCount=%d",
+			   __func__,
+			   ui32ClientTAFenceCount, ui32ClientTAUpdateCount,
+			   ui32Client3DFenceCount, ui32Client3DUpdateCount));
+	CHKPT_DBG((PVR_DBG_ERROR,
+			   "%s: ui32ServerTASyncPrims=%d, ui32Server3DSyncPrims=%d",
+			   __func__, ui32ServerTASyncPrims, ui32Server3DSyncPrims));
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psRenderContext->hLock);
+#endif
+
+	CHKPT_DBG((PVR_DBG_ERROR,
+			   "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAFence, %d fences)...",
+			   __func__, ui32ClientTAFenceCount));
+	eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAFence,
+			ui32ClientTAFenceCount,
+			apsClientTAFenceSyncPrimBlock,
+			paui32ClientTAFenceSyncOffset);
+	if (unlikely(eError != PVRSRV_OK))
+	{
+		goto err_populate_sync_addr_list_ta_fence;
+	}
+
+	if (ui32ClientTAFenceCount)
+	{
+		pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+	}
+
+	CHKPT_DBG((PVR_DBG_ERROR,
+			   "%s: pauiClientTAFenceUFOAddress=<%p> ",
+			   __func__, (void*)pauiClientTAFenceUFOAddress));
+
+	CHKPT_DBG((PVR_DBG_ERROR,
+			   "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAUpdate, %d updates)...",
+			   __func__, ui32ClientTAUpdateCount));
+	eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAUpdate,
+			ui32ClientTAUpdateCount,
+			apsClientTAUpdateSyncPrimBlock,
+			paui32ClientTAUpdateSyncOffset);
+	if (unlikely(eError != PVRSRV_OK))
+	{
+		goto err_populate_sync_addr_list_ta_update;
+	}
+
+	if (ui32ClientTAUpdateCount)
+	{
+		pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs;
+	}
+	CHKPT_DBG((PVR_DBG_ERROR,
+			   "%s: pauiClientTAUpdateUFOAddress=<%p> ",
+			   __func__, (void*)pauiClientTAUpdateUFOAddress));
+
+	CHKPT_DBG((PVR_DBG_ERROR,
+			   "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DFence, %d fences)...",
+			   __func__, ui32Client3DFenceCount));
+	eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DFence,
+			ui32Client3DFenceCount,
+			apsClient3DFenceSyncPrimBlock,
+			paui32Client3DFenceSyncOffset);
+	if (unlikely(eError != PVRSRV_OK))
+	{
+		goto err_populate_sync_addr_list_3d_fence;
+	}
+
+	if (ui32Client3DFenceCount)
+	{
+		pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs;
+	}
+	CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClient3DFenceUFOAddress=<%p> ",
+			   __func__, (void*)pauiClient3DFenceUFOAddress));
+
+	CHKPT_DBG((PVR_DBG_ERROR,
+			   "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DUpdate, %d updates)...",
+			   __func__, ui32Client3DUpdateCount));
+	eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DUpdate,
+			ui32Client3DUpdateCount,
+			apsClient3DUpdateSyncPrimBlock,
+			paui32Client3DUpdateSyncOffset);
+	if (unlikely(eError != PVRSRV_OK))
+	{
+		goto err_populate_sync_addr_list_3d_update;
+	}
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	if (ui32Client3DUpdateCount || (iUpdate3DTimeline != PVRSRV_NO_TIMELINE && piUpdate3DFence && bKick3D))
+#else
+	if (ui32Client3DUpdateCount)
+#endif
+	{
+		pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+	}
+	CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClient3DUpdateUFOAddress=<%p> ",
+			   __func__, (void*)pauiClient3DUpdateUFOAddress));
+
+	eError = SyncPrimitiveBlockToFWAddr(psPRFenceSyncPrimBlock, ui32PRFenceSyncOffset, &uiPRFenceUFOAddress);
+
+	if (unlikely(eError != PVRSRV_OK))
+	{
+		goto err_pr_fence_address;
+	}
+
+#if (ENABLE_TA3D_UFO_DUMP == 1)
+	{
+		IMG_UINT32 ii;
+		PRGXFWIF_UFO_ADDR *psTmpClientTAFenceUFOAddress = pauiClientTAFenceUFOAddress;
+		IMG_UINT32 *pui32TmpClientTAFenceValue = paui32ClientTAFenceValue;
+		PRGXFWIF_UFO_ADDR *psTmpClientTAUpdateUFOAddress = pauiClientTAUpdateUFOAddress;
+		IMG_UINT32 *pui32TmpClientTAUpdateValue = paui32ClientTAUpdateValue;
+		PRGXFWIF_UFO_ADDR *psTmpClient3DFenceUFOAddress = pauiClient3DFenceUFOAddress;
+		IMG_UINT32 *pui32TmpClient3DFenceValue = paui32Client3DFenceValue;
+		PRGXFWIF_UFO_ADDR *psTmpClient3DUpdateUFOAddress = pauiClient3DUpdateUFOAddress;
+		IMG_UINT32 *pui32TmpClient3DUpdateValue = paui32Client3DUpdateValue;
+
+		PVR_DPF((PVR_DBG_ERROR, "%s: ~~~ After populating sync prims ~~~",
+				 __func__));
+
+		/* Dump Fence syncs, Update syncs and PR Update syncs */
+		PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA fence syncs:",
+				 __func__, ui32ClientTAFenceCount));
+		for (ii=0; ii<ui32ClientTAFenceCount; ii++)
+		{
+			if (psTmpClientTAFenceUFOAddress->ui32Addr & 0x1)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+						 __func__, ii + 1, ui32ClientTAFenceCount,
+						 (void*)psTmpClientTAFenceUFOAddress,
+						 psTmpClientTAFenceUFOAddress->ui32Addr));
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)",
+						 __func__, ii+1, ui32ClientTAFenceCount,
+						 (void*)psTmpClientTAFenceUFOAddress,
+						 psTmpClientTAFenceUFOAddress->ui32Addr,
+						 *pui32TmpClientTAFenceValue,
+						 *pui32TmpClientTAFenceValue));
+				pui32TmpClientTAFenceValue++;
+			}
+			psTmpClientTAFenceUFOAddress++;
+		}
+		PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA update syncs:",
+				 __func__, ui32ClientTAUpdateCount));
+		for (ii=0; ii<ui32ClientTAUpdateCount; ii++)
+		{
+			if (psTmpClientTAUpdateUFOAddress->ui32Addr & 0x1)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+						 __func__, ii + 1, ui32ClientTAUpdateCount,
+						 (void*)psTmpClientTAUpdateUFOAddress,
+						 psTmpClientTAUpdateUFOAddress->ui32Addr));
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d",
+						 __func__, ii + 1, ui32ClientTAUpdateCount,
+						 (void*)psTmpClientTAUpdateUFOAddress,
+						 psTmpClientTAUpdateUFOAddress->ui32Addr,
+						 *pui32TmpClientTAUpdateValue));
+				pui32TmpClientTAUpdateValue++;
+			}
+			psTmpClientTAUpdateUFOAddress++;
+		}
+		PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D fence syncs:",
+				 __func__, ui32Client3DFenceCount));
+		for (ii=0; ii<ui32Client3DFenceCount; ii++)
+		{
+			if (psTmpClient3DFenceUFOAddress->ui32Addr & 0x1)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+						 __func__, ii + 1, ui32Client3DFenceCount,
+						 (void*)psTmpClient3DFenceUFOAddress,
+						 psTmpClient3DFenceUFOAddress->ui32Addr));
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d",
+						 __func__, ii + 1, ui32Client3DFenceCount,
+						 (void*)psTmpClient3DFenceUFOAddress,
+						 psTmpClient3DFenceUFOAddress->ui32Addr,
+						 *pui32TmpClient3DFenceValue));
+				pui32TmpClient3DFenceValue++;
+			}
+			psTmpClient3DFenceUFOAddress++;
+		}
+		PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D update syncs:",
+				 __func__, ui32Client3DUpdateCount));
+		for (ii=0; ii<ui32Client3DUpdateCount; ii++)
+		{
+			if (psTmpClient3DUpdateUFOAddress->ui32Addr & 0x1)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+						 __func__, ii + 1, ui32Client3DUpdateCount,
+						 (void*)psTmpClient3DUpdateUFOAddress,
+						 psTmpClient3DUpdateUFOAddress->ui32Addr));
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d",
+						 __func__, ii+1, ui32Client3DUpdateCount,
+						 (void*)psTmpClient3DUpdateUFOAddress,
+						 psTmpClient3DUpdateUFOAddress->ui32Addr,
+						 *pui32TmpClient3DUpdateValue));
+				pui32TmpClient3DUpdateValue++;
+			}
+			psTmpClient3DUpdateUFOAddress++;
+		}
+	}
+#endif
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	/* Sanity check the server fences */
+	for (i=0;i<ui32ServerTASyncPrims;i++)
+	{
+		if (unlikely(!(paui32ServerTASyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on TA) must fence",
+					 __func__));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockRelease(psRenderContext->hLock);
+#endif
+			return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+		}
+	}
+
+	for (i=0;i<ui32Server3DSyncPrims;i++)
+	{
+		if (unlikely(!(paui32Server3DSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on 3D) must fence",
+					 __func__));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockRelease(psRenderContext->hLock);
+#endif
+			return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+		}
+	}
+#endif
+
+	/*
+		Sanity check we have a PR kick if there are client or server fences
+	 */
+	if (unlikely(!bKickPR && ((ui32Client3DFenceCount != 0) || (ui32Server3DSyncPrims != 0))))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: 3D fence (client or server) passed without a PR kick",
+				 __func__));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockRelease(psRenderContext->hLock);
+#endif
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (ui32SyncPMRCount)
+	{
+#if defined(SUPPORT_BUFFER_SYNC)
+		int err;
+
+		CHKPT_DBG((PVR_DBG_ERROR,
+				   "%s:   Calling pvr_buffer_sync_resolve_and_create_fences",
+				   __func__));
+		err = pvr_buffer_sync_resolve_and_create_fences(psRenderContext->psBufferSyncContext,
+				psRenderContext->psDeviceNode->hSyncCheckpointContext,
+				ui32SyncPMRCount,
+				ppsSyncPMRs,
+				paui32SyncPMRFlags,
+				&ui32BufferFenceSyncCheckpointCount,
+				&apsBufferFenceSyncCheckpoints,
+				&psBufferUpdateSyncCheckpoint,
+				&psBufferSyncData);
+		if (unlikely(err))
+		{
+			switch (err)
+			{
+				case -EINTR:
+					eError = PVRSRV_ERROR_RETRY;
+					break;
+				case -ENOMEM:
+					eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+					break;
+				default:
+					eError = PVRSRV_ERROR_INVALID_PARAMS;
+					break;
+			}
+
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+					 "%s:   pvr_buffer_sync_resolve_and_create_fences failed (%d)",
+					 __func__, eError));
+			}
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockRelease(psRenderContext->hLock);
+#endif
+			return eError;
+		}
+
+#if !defined(SUPPORT_STRIP_RENDERING)
+		/* Append buffer sync fences to TA fences */
+		if (ui32BufferFenceSyncCheckpointCount > 0)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR,
+					   "%s:   Append %d buffer sync checkpoints to TA Fence "
+					   "(&psRenderContext->sSyncAddrListTAFence=<%p>, "
+					   "pauiClientTAFenceUFOAddress=<%p>)...",
+					   __func__,
+					   ui32BufferFenceSyncCheckpointCount,
+					   (void*)&psRenderContext->sSyncAddrListTAFence ,
+					   (void*)pauiClientTAFenceUFOAddress));
+			SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrListTAFence,
+					ui32BufferFenceSyncCheckpointCount,
+					apsBufferFenceSyncCheckpoints);
+			if (!pauiClientTAFenceUFOAddress)
+			{
+				pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+			}
+			if (ui32ClientTAFenceCount == 0)
+			{
+				bTAFenceOnSyncCheckpointsOnly = IMG_TRUE;
+			}
+			ui32ClientTAFenceCount += ui32BufferFenceSyncCheckpointCount;
+		}
+#else
+		/* Append buffer sync fences to 3D fences */
+		if (ui32BufferFenceSyncCheckpointCount > 0)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR,
+					   "%s:   Append %d buffer sync checkpoints to 3D Fence "
+					   "(&psRenderContext->sSyncAddrList3DFence=<%p>, "
+					   "pauiClient3DFenceUFOAddress=<%p>)...",
+					   __func__,
+					   ui32BufferFenceSyncCheckpointCount,
+					   (void*)&psRenderContext->sSyncAddrList3DFence,
+					   (void*)pauiClient3DFenceUFOAddress));
+			SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrList3DFence,
+					ui32BufferFenceSyncCheckpointCount,
+					apsBufferFenceSyncCheckpoints);
+			if (!pauiClient3DFenceUFOAddress)
+			{
+				pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs;
+			}
+			ui32Client3DFenceCount += ui32BufferFenceSyncCheckpointCount;
+		}
+		PVR_UNREFERENCED_PARAMETER(bTAFenceOnSyncCheckpointsOnly);
+#endif
+
+		if (psBufferUpdateSyncCheckpoint)
+		{
+			/* If we have a 3D kick append update to the 3D updates else append to the PR update */
+			if (bKick3D)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR,
+						   "%s:   Append 1 buffer sync checkpoint<%p> to 3D Update"
+						   " (&psRenderContext->sSyncAddrList3DUpdate=<%p>,"
+						   " pauiClient3DUpdateUFOAddress=<%p>)...",
+						   __func__,
+						   (void*)psBufferUpdateSyncCheckpoint,
+						   (void*)&psRenderContext->sSyncAddrList3DUpdate,
+						   (void*)pauiClient3DUpdateUFOAddress));
+				/* Append buffer sync update to 3D updates */
+				SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+						1,
+						&psBufferUpdateSyncCheckpoint);
+				if (!pauiClient3DUpdateUFOAddress)
+				{
+					pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+				}
+				ui32Client3DUpdateCount++;
+			}
+			else
+			{
+				CHKPT_DBG((PVR_DBG_ERROR,
+				           "%s:   Append 1 buffer sync checkpoint<%p> to PR Update"
+						   " (&psRenderContext->sSyncAddrList3DUpdate=<%p>,"
+						   " pauiClientPRUpdateUFOAddress=<%p>)...",
+				           __func__,
+						   (void*)psBufferUpdateSyncCheckpoint,
+						   (void*)&psRenderContext->sSyncAddrList3DUpdate,
+						   (void*)pauiClientPRUpdateUFOAddress));
+				/* Attach update to the 3D (used for PR) Updates */
+				SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+						1,
+						&psBufferUpdateSyncCheckpoint);
+				if (!pauiClientPRUpdateUFOAddress)
+				{
+					pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+				}
+				ui32ClientPRUpdateCount++;
+			}
+		}
+		CHKPT_DBG((PVR_DBG_ERROR,
+				   "%s:   (after buffer_sync) ui32ClientTAFenceCount=%d, "
+				   "ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, "
+				   "ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,",
+				   __func__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount,
+				   ui32Client3DFenceCount, ui32Client3DUpdateCount,
+				   ui32ClientPRUpdateCount));
+
+#else /* defined(SUPPORT_BUFFER_SYNC) */
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Buffer sync not supported but got %u buffers",
+				 __func__, ui32SyncPMRCount));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockRelease(psRenderContext->hLock);
+#endif
+		return PVRSRV_ERROR_INVALID_PARAMS;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+	}
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	/*
+	 * The hardware requires a PR to be submitted if there is a TA (otherwise
+	 * it can wedge if we run out of PB space with no PR to run)
+	 *
+	 * If we only have a TA, attach native checks to the TA and updates to the PR
+	 * If we have a TA and 3D, attach checks to TA, updates to 3D
+	 * If we only have a 3D, attach checks and updates to the 3D
+	 *
+	 * Note that 'updates' includes the cleanup syncs for 'check' fence FDs, in
+	 * addition to the update fence FD (if supplied)
+	 *
+	 * Currently, the client driver never kicks only the 3D, so we only support
+	 * that for the time being.
+	 */
+	if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 ||
+			iCheck3DFence >= 0 || iUpdate3DTimeline >= 0)
+	{
+		PRGXFWIF_UFO_ADDR	*pauiClientTAIntUpdateUFOAddress = NULL;
+		PRGXFWIF_UFO_ADDR	*pauiClient3DIntUpdateUFOAddress = NULL;
+
+		CHKPT_DBG((PVR_DBG_ERROR,
+				   "%s: [TA] iCheckFence = %d, iUpdateTimeline = %d",
+				   __func__, iCheckTAFence, iUpdateTATimeline));
+		CHKPT_DBG((PVR_DBG_ERROR,
+				   "%s: [3D] iCheckFence = %d, iUpdateTimeline = %d",
+				   __func__, iCheck3DFence, iUpdate3DTimeline));
+
+		if (iCheckTAFence != PVRSRV_NO_FENCE)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR,
+					   "%s: calling SyncCheckpointResolveFence[TA] (iCheckFence=%d), "
+					   "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...",
+					   __func__,
+					   iCheckTAFence,
+					   (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext));
+			/* Resolve the sync checkpoints that make up the input fence */
+			eError = SyncCheckpointResolveFence(psRenderContext->psDeviceNode->hSyncCheckpointContext,
+					iCheckTAFence,
+					&ui32FenceTASyncCheckpointCount,
+					&apsFenceTASyncCheckpoints,
+					&uiCheckTAFenceUID);
+			if (unlikely(eError != PVRSRV_OK))
+			{
+				CHKPT_DBG((PVR_DBG_ERROR,
+						   "%s: ...done, returned ERROR (eError=%d)",
+						   __func__, eError));
+				goto fail_resolve_input_fence;
+			}
+
+			CHKPT_DBG((PVR_DBG_ERROR,
+					   "%s: ...done, fence %d contained %d "
+					   "checkpoints (apsFenceSyncCheckpoints=<%p>)",
+					   __func__,
+					   iCheckTAFence, ui32FenceTASyncCheckpointCount,
+					   (void*)apsFenceTASyncCheckpoints));
+#if defined(TA3D_CHECKPOINT_DEBUG)
+			if (apsFenceTASyncCheckpoints)
+			{
+				_DebugSyncCheckpoints(__func__, "TA", apsFenceTASyncCheckpoints, ui32FenceTASyncCheckpointCount);
+			}
+#endif
+		}
+
+		if (iCheck3DFence != PVRSRV_NO_FENCE)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR,
+					   "%s: calling SyncCheckpointResolveFence[3D] (iCheckFence=%d), "
+					   "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...",
+					   __func__,
+					   iCheck3DFence,
+					   (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext));
+			/* Resolve the sync checkpoints that make up the input fence */
+			eError = SyncCheckpointResolveFence(psRenderContext->psDeviceNode->hSyncCheckpointContext,
+					iCheck3DFence,
+					&ui32Fence3DSyncCheckpointCount,
+					&apsFence3DSyncCheckpoints,
+					&uiCheck3DFenceUID);
+			if (unlikely(eError != PVRSRV_OK))
+			{
+				CHKPT_DBG((PVR_DBG_ERROR,
+						   "%s: ...done, returned ERROR (eError=%d)",
+						   __func__, eError));
+				goto fail_resolve_input_fence;
+			}
+
+			CHKPT_DBG((PVR_DBG_ERROR,
+					   "%s: ...done, fence %d contained %d "
+					   "checkpoints (apsFenceSyncCheckpoints=<%p>)",
+					   __func__, iCheck3DFence, ui32Fence3DSyncCheckpointCount,
+					   (void*)apsFence3DSyncCheckpoints));
+#if defined(TA3D_CHECKPOINT_DEBUG)
+			if (apsFence3DSyncCheckpoints)
+			{
+				_DebugSyncCheckpoints(__func__, "3D", apsFence3DSyncCheckpoints, ui32Fence3DSyncCheckpointCount);
+			}
+#endif
+		}
+
+		{
+			/* Create the output fence for TA (if required) */
+			if (iUpdateTATimeline != PVRSRV_NO_TIMELINE)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR,
+						   "%s: calling SyncCheckpointCreateFence[TA] "
+						   "(iUpdateFence=%d, iUpdateTimeline=%d, "
+						   "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)",
+						   __func__, iUpdateTAFence, iUpdateTATimeline,
+						   (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext));
+				eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode,
+						szFenceNameTA,
+						iUpdateTATimeline,
+						psRenderContext->psDeviceNode->hSyncCheckpointContext,
+						&iUpdateTAFence,
+						&uiUpdateTAFenceUID,
+						&pvTAUpdateFenceFinaliseData,
+						&psUpdateTASyncCheckpoint,
+						(void*)&psTAFenceTimelineUpdateSync,
+						&ui32TAFenceTimelineUpdateValue);
+				if (unlikely(eError != PVRSRV_OK))
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							"%s:   SyncCheckpointCreateFence[TA] failed (%s)",
+							__func__,
+							PVRSRVGetErrorString(eError)));
+					goto fail_create_output_fence;
+				}
+
+				CHKPT_DBG((PVR_DBG_ERROR,
+						   "%s: returned from SyncCheckpointCreateFence[TA] "
+						   "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, "
+						   "ui32FenceTimelineUpdateValue=0x%x)",
+						   __func__, iUpdateTAFence,
+						   (void*)psTAFenceTimelineUpdateSync,
+						   ui32TAFenceTimelineUpdateValue));
+
+				/* Store the FW address of the update sync checkpoint in pauiClientTAIntUpdateUFOAddress */
+				pauiClientTAIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdateTASyncCheckpoint);
+				CHKPT_DBG((PVR_DBG_ERROR,
+						   "%s: pauiClientIntUpdateUFOAddress[TA]->ui32Addr=0x%x",
+						   __func__, pauiClientTAIntUpdateUFOAddress->ui32Addr));
+			}
+
+			/* Append the sync prim update for the TA timeline (if required) */
+			if (psTAFenceTimelineUpdateSync)
+			{
+				sTASyncData.ui32ClientUpdateCount 		= ui32ClientTAUpdateCount;
+				sTASyncData.ui32ClientUpdateValueCount	= ui32ClientTAUpdateValueCount;
+				sTASyncData.ui32ClientPRUpdateValueCount= (bKick3D) ? 0 : ui32ClientPRUpdateValueCount;
+				sTASyncData.paui32ClientUpdateValue		= paui32ClientTAUpdateValue;
+
+				eError = RGXSyncAppendTimelineUpdate(ui32TAFenceTimelineUpdateValue,
+						&psRenderContext->sSyncAddrListTAUpdate,
+						(bKick3D) ? NULL : &psRenderContext->sSyncAddrList3DUpdate,
+								psTAFenceTimelineUpdateSync,
+								&sTASyncData,
+								bKick3D);
+				if (unlikely(eError != PVRSRV_OK))
+				{
+					goto fail_alloc_update_values_mem_TA;
+				}
+
+				paui32ClientTAUpdateValue = sTASyncData.paui32ClientUpdateValue;
+				ui32ClientTAUpdateValueCount = sTASyncData.ui32ClientUpdateValueCount;
+				pauiClientTAUpdateUFOAddress = sTASyncData.pauiClientUpdateUFOAddress;
+				ui32ClientTAUpdateCount = sTASyncData.ui32ClientUpdateCount;
+			}
+
+			/* Create the output fence for 3D (if required) */
+			if (iUpdate3DTimeline != PVRSRV_NO_TIMELINE)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR,
+						   "%s: calling SyncCheckpointCreateFence[3D] "
+						   "(iUpdateFence=%d, iUpdateTimeline=%d, "
+						   "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)",
+						   __func__, iUpdate3DFence, iUpdate3DTimeline,
+						   (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext));
+				eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode,
+						szFenceName3D,
+						iUpdate3DTimeline,
+						psRenderContext->psDeviceNode->hSyncCheckpointContext,
+						&iUpdate3DFence,
+						&uiUpdate3DFenceUID,
+						&pv3DUpdateFenceFinaliseData,
+						&psUpdate3DSyncCheckpoint,
+						(void*)&ps3DFenceTimelineUpdateSync,
+						&ui323DFenceTimelineUpdateValue);
+				if (unlikely(eError != PVRSRV_OK))
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							"%s:   SyncCheckpointCreateFence[3D] failed (%s)",
+							__func__,
+							PVRSRVGetErrorString(eError)));
+					goto fail_create_output_fence;
+				}
+
+				CHKPT_DBG((PVR_DBG_ERROR,
+						   "%s: returned from SyncCheckpointCreateFence[3D] "
+						   "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, "
+						   "ui32FenceTimelineUpdateValue=0x%x)",
+						   __func__, iUpdate3DFence,
+						   (void*)ps3DFenceTimelineUpdateSync,
+						   ui323DFenceTimelineUpdateValue));
+
+				/* Store the FW address of the update sync checkpoint in pauiClient3DIntUpdateUFOAddress */
+				pauiClient3DIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdate3DSyncCheckpoint);
+				CHKPT_DBG((PVR_DBG_ERROR,
+						   "%s: pauiClientIntUpdateUFOAddress[3D]->ui32Addr=0x%x",
+						   __func__, pauiClient3DIntUpdateUFOAddress->ui32Addr));
+			}
+
+			/* Append the sync prim update for the 3D timeline (if required) */
+			if (ps3DFenceTimelineUpdateSync)
+			{
+				s3DSyncData.ui32ClientUpdateCount = ui32Client3DUpdateCount;
+				s3DSyncData.ui32ClientUpdateValueCount 	= ui32Client3DUpdateValueCount;
+				s3DSyncData.ui32ClientPRUpdateValueCount= ui32ClientPRUpdateValueCount;
+				s3DSyncData.paui32ClientUpdateValue = paui32Client3DUpdateValue;
+
+				eError = RGXSyncAppendTimelineUpdate(ui323DFenceTimelineUpdateValue,
+						&psRenderContext->sSyncAddrList3DUpdate,
+						&psRenderContext->sSyncAddrList3DUpdate,	/*!< PR update: is this required? */
+						ps3DFenceTimelineUpdateSync,
+						&s3DSyncData,
+						bKick3D);
+				if (unlikely(eError != PVRSRV_OK))
+				{
+					goto fail_alloc_update_values_mem_3D;
+				}
+
+				/* FIXME: can this be optimised? */
+				paui32Client3DUpdateValue = s3DSyncData.paui32ClientUpdateValue;
+				ui32Client3DUpdateValueCount = s3DSyncData.ui32ClientUpdateValueCount;
+				pauiClient3DUpdateUFOAddress = s3DSyncData.pauiClientUpdateUFOAddress;
+				ui32Client3DUpdateCount = s3DSyncData.ui32ClientUpdateCount;
+
+				if (!bKick3D)
+				{
+					paui32ClientPRUpdateValue = s3DSyncData.paui32ClientPRUpdateValue;
+					ui32ClientPRUpdateValueCount = s3DSyncData.ui32ClientPRUpdateValueCount;
+					pauiClientPRUpdateUFOAddress = s3DSyncData.pauiClientPRUpdateUFOAddress;
+					ui32ClientPRUpdateCount = s3DSyncData.ui32ClientPRUpdateCount;
+				}
+			}
+
+			/*
+			 * The hardware requires a PR to be submitted if there is a TA OOM.
+			 * If we only have a TA, attach native checks and updates to the TA
+			 * and 3D updates to the PR.
+			 * If we have a TA and 3D, attach the native TA checks and updates
+			 * to the TA and similarly for the 3D.
+			 * Note that 'updates' includes the cleanup syncs for 'check' fence
+			 * FDs, in addition to the update fence FD (if supplied).
+			 * Currently, the client driver never kicks only the 3D, so we don't
+			 * support that for the time being.
+			 */
+
+			{
+				if (bKickTA)
+				{
+					/* Attach checks and updates to TA */
+
+					/* Checks (from input fence) */
+					if (ui32FenceTASyncCheckpointCount > 0)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR,
+								   "%s:   Append %d sync checkpoints to TA Fence (apsFenceSyncCheckpoints=<%p>)...",
+								   __func__,
+								   ui32FenceTASyncCheckpointCount,
+								   (void*)apsFenceTASyncCheckpoints));
+						SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence,
+								ui32FenceTASyncCheckpointCount,
+								apsFenceTASyncCheckpoints);
+						if (!pauiClientTAFenceUFOAddress)
+						{
+							pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+						}
+						CHKPT_DBG((PVR_DBG_ERROR,
+								   "%s:   {ui32ClientTAFenceCount was %d, now %d}",
+								   __func__, ui32ClientTAFenceCount,
+								   ui32ClientTAFenceCount + ui32FenceTASyncCheckpointCount));
+						if (ui32ClientTAFenceCount == 0)
+						{
+							bTAFenceOnSyncCheckpointsOnly = IMG_TRUE;
+						}
+						ui32ClientTAFenceCount += ui32FenceTASyncCheckpointCount;
+					}
+					CHKPT_DBG((PVR_DBG_ERROR,
+							   "%s:   {ui32ClientTAFenceCount now %d}",
+							   __func__, ui32ClientTAFenceCount));
+
+					if (psUpdateTASyncCheckpoint)
+					{
+						/* Update (from output fence) */
+						CHKPT_DBG((PVR_DBG_ERROR,
+								   "%s:   Append 1 sync checkpoint<%p> (ID=%d) to TA Update...",
+								   __func__, (void*)psUpdateTASyncCheckpoint,
+								   SyncCheckpointGetId(psUpdateTASyncCheckpoint)));
+						SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAUpdate,
+								1,
+								&psUpdateTASyncCheckpoint);
+						if (!pauiClientTAUpdateUFOAddress)
+						{
+							pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs;
+						}
+						ui32ClientTAUpdateCount++;
+					}
+
+					if (!bKick3D && psUpdate3DSyncCheckpoint)
+					{
+						/* Attach update to the 3D (used for PR) Updates */
+						CHKPT_DBG((PVR_DBG_ERROR,
+								   "%s:   Append 1 sync checkpoint<%p> (ID=%d) to 3D(PR) Update...",
+								   __func__, (void*)psUpdate3DSyncCheckpoint,
+								   SyncCheckpointGetId(psUpdate3DSyncCheckpoint)));
+						SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+								1,
+								&psUpdate3DSyncCheckpoint);
+						if (!pauiClientPRUpdateUFOAddress)
+						{
+							pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+						}
+						ui32ClientPRUpdateCount++;
+					}
+				}
+
+				if (bKick3D)
+				{
+					/* Attach checks and updates to the 3D */
+
+					/* Checks (from input fence) */
+					if (ui32Fence3DSyncCheckpointCount > 0)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR,
+								   "%s:   Append %d sync checkpoints to 3D Fence...",
+								   __func__, ui32Fence3DSyncCheckpointCount));
+						SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence,
+								ui32Fence3DSyncCheckpointCount,
+								apsFence3DSyncCheckpoints);
+						if (!pauiClient3DFenceUFOAddress)
+						{
+							pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs;
+						}
+						CHKPT_DBG((PVR_DBG_ERROR,
+								   "%s:   {ui32Client3DFenceCount was %d, now %d}",
+								   __func__, ui32Client3DFenceCount,
+								   ui32Client3DFenceCount + ui32Fence3DSyncCheckpointCount));
+						if (ui32Client3DFenceCount == 0)
+						{
+							b3DFenceOnSyncCheckpointsOnly = IMG_TRUE;
+						}
+						ui32Client3DFenceCount += ui32Fence3DSyncCheckpointCount;
+					}
+					CHKPT_DBG((PVR_DBG_ERROR,
+							   "%s:   {ui32Client3DFenceCount was %d}",
+							   __func__, ui32Client3DFenceCount));
+
+					if (psUpdate3DSyncCheckpoint)
+					{
+						/* Update (from output fence) */
+						CHKPT_DBG((PVR_DBG_ERROR,
+								   "%s:   Append 1 sync checkpoint<%p> (ID=%d) to 3D Update...",
+								   __func__, (void*)psUpdate3DSyncCheckpoint,
+								   SyncCheckpointGetId(psUpdate3DSyncCheckpoint)));
+						SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+								1,
+								&psUpdate3DSyncCheckpoint);
+						if (!pauiClient3DUpdateUFOAddress)
+						{
+							pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+						}
+						ui32Client3DUpdateCount++;
+					}
+				}
+
+				/*
+				 * Relocate sync check points from the 3D fence that are
+				 * external to the current process, to the TA fence.
+				 * This avoids a sync lockup when dependent renders are
+				 * submitted out-of-order and a PR must be scheduled.
+				 */
+				{
+					IMG_PID uiCurrentProcess = OSGetCurrentClientProcessIDKM();
+
+					/* Search for external timeline dependencies */
+					CHKPT_DBG((PVR_DBG_ERROR,
+							   "%s: Checking 3D fence for external sync points (%d)...",
+							   __func__, ui32Fence3DSyncCheckpointCount));
+
+					for (i=0; i<ui32Fence3DSyncCheckpointCount; i++)
+					{
+						/* Check to see if the checkpoint is on a TL owned by
+						 * another process.
+						 */
+						if (SyncCheckpointGetCreator(apsFence3DSyncCheckpoints[i]) != uiCurrentProcess)
+						{
+							/* 3D Sync point represents cross process
+							 * dependency, copy sync point to TA command fence. */
+							CHKPT_DBG((PVR_DBG_ERROR,
+									   "%s:   Append 1 sync checkpoint<%p> (ID=%d) to TA Fence...",
+									   __func__, (void*)apsFence3DSyncCheckpoints[i],
+									   SyncCheckpointGetId(apsFence3DSyncCheckpoints[i])));
+
+							SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence,
+														  1,
+														  &apsFence3DSyncCheckpoints[i]);
+
+							if (!pauiClientTAFenceUFOAddress)
+							{
+								pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+							}
+
+							CHKPT_DBG((PVR_DBG_ERROR,
+									   "%s:   {ui32ClientTAFenceCount was %d, now %d}",
+									   __func__,
+									   ui32ClientTAFenceCount,
+									   ui32ClientTAFenceCount + 1));
+
+							if (ui32ClientTAFenceCount == 0)
+							{
+								bTAFenceOnSyncCheckpointsOnly = IMG_TRUE;
+							}
+
+							ui32ClientTAFenceCount++;
+						}
+					}
+				}
+
+				CHKPT_DBG((PVR_DBG_ERROR,
+						   "%s:   (after pvr_sync) ui32ClientTAFenceCount=%d, "
+						   "ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, "
+						   "ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,",
+						   __func__,
+						   ui32ClientTAFenceCount, ui32ClientTAUpdateCount,
+						   ui32Client3DFenceCount, ui32Client3DUpdateCount,
+						   ui32ClientPRUpdateCount));
+			}
+		}
+
+		if (ui32ClientTAFenceCount)
+		{
+			PVR_ASSERT(pauiClientTAFenceUFOAddress);
+			if (!bTAFenceOnSyncCheckpointsOnly)
+			{
+				PVR_ASSERT(paui32ClientTAFenceValue);
+			}
+		}
+		if (ui32ClientTAUpdateCount)
+		{
+			PVR_ASSERT(pauiClientTAUpdateUFOAddress);
+			if (ui32ClientTAUpdateValueCount>0)
+				PVR_ASSERT(paui32ClientTAUpdateValue);
+		}
+		if (ui32Client3DFenceCount)
+		{
+			PVR_ASSERT(pauiClient3DFenceUFOAddress);
+			if (!b3DFenceOnSyncCheckpointsOnly)
+			{
+				PVR_ASSERT(paui32Client3DFenceValue);
+			}
+		}
+		if (ui32Client3DUpdateCount)
+		{
+			PVR_ASSERT(pauiClient3DUpdateUFOAddress);
+			if (ui32Client3DUpdateValueCount>0)
+				PVR_ASSERT(paui32Client3DUpdateValue);
+		}
+		if (ui32ClientPRUpdateCount)
+		{
+			PVR_ASSERT(pauiClientPRUpdateUFOAddress);
+			if (ui32ClientPRUpdateValueCount>0)
+				PVR_ASSERT(paui32ClientPRUpdateValue);
+		}
+
+	}
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+	CHKPT_DBG((PVR_DBG_ERROR,
+			   "%s: ui32ClientTAFenceCount=%d, pauiClientTAFenceUFOAddress=<%p> Line ",
+			   __func__,
+			   ui32ClientTAFenceCount,
+			   (void*)paui32ClientTAFenceValue));
+	CHKPT_DBG((PVR_DBG_ERROR,
+			   "%s: ui32ClientTAUpdateCount=%d, pauiClientTAUpdateUFOAddress=<%p> Line ",
+			   __func__,
+			   ui32ClientTAUpdateCount,
+			   (void*)pauiClientTAUpdateUFOAddress));
+#if (ENABLE_TA3D_UFO_DUMP == 1)
+	{
+		IMG_UINT32 ii;
+		PRGXFWIF_UFO_ADDR *psTmpClientTAFenceUFOAddress = pauiClientTAFenceUFOAddress;
+		IMG_UINT32 *pui32TmpClientTAFenceValue = paui32ClientTAFenceValue;
+		PRGXFWIF_UFO_ADDR *psTmpClientTAUpdateUFOAddress = pauiClientTAUpdateUFOAddress;
+		IMG_UINT32 *pui32TmpClientTAUpdateValue = paui32ClientTAUpdateValue;
+		PRGXFWIF_UFO_ADDR *psTmpClient3DFenceUFOAddress = pauiClient3DFenceUFOAddress;
+		IMG_UINT32 *pui32TmpClient3DFenceValue = paui32Client3DFenceValue;
+		PRGXFWIF_UFO_ADDR *psTmpClient3DUpdateUFOAddress = pauiClient3DUpdateUFOAddress;
+		IMG_UINT32 *pui32TmpClient3DUpdateValue = paui32Client3DUpdateValue;
+
+		PVR_DPF((PVR_DBG_ERROR, "%s: ~~~ After appending sync checkpoints ",
+				 __func__));
+
+		/* Dump Fence syncs, Update syncs and PR Update syncs */
+		PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA fence syncs:", __func__, ui32ClientTAFenceCount));
+		for (ii=0; ii<ui32ClientTAFenceCount; ii++)
+		{
+			if (psTmpClientTAFenceUFOAddress->ui32Addr & 0x1)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+						 __func__, ii + 1, ui32ClientTAFenceCount,
+						 (void*)psTmpClientTAFenceUFOAddress,
+						 psTmpClientTAFenceUFOAddress->ui32Addr));
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)",
+						 __func__, ii + 1, ui32ClientTAFenceCount,
+						 (void*)psTmpClientTAFenceUFOAddress,
+						 psTmpClientTAFenceUFOAddress->ui32Addr,
+						 *pui32TmpClientTAFenceValue,
+						 *pui32TmpClientTAFenceValue));
+				pui32TmpClientTAFenceValue++;
+			}
+			psTmpClientTAFenceUFOAddress++;
+		}
+		PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA update syncs:",
+				 __func__, ui32ClientTAUpdateCount));
+		for (ii=0; ii<ui32ClientTAUpdateCount; ii++)
+		{
+			if (psTmpClientTAUpdateUFOAddress->ui32Addr & 0x1)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+						 __func__, ii + 1, ui32ClientTAUpdateCount,
+						 (void*)psTmpClientTAUpdateUFOAddress,
+						 psTmpClientTAUpdateUFOAddress->ui32Addr));
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)",
+						 __func__, ii + 1, ui32ClientTAUpdateCount,
+						 (void*)psTmpClientTAUpdateUFOAddress,
+						 psTmpClientTAUpdateUFOAddress->ui32Addr,
+						 *pui32TmpClientTAUpdateValue,
+						 *pui32TmpClientTAUpdateValue));
+				pui32TmpClientTAUpdateValue++;
+			}
+			psTmpClientTAUpdateUFOAddress++;
+		}
+		PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D fence syncs:",
+				 __func__, ui32Client3DFenceCount));
+		for (ii=0; ii<ui32Client3DFenceCount; ii++)
+		{
+			if (psTmpClient3DFenceUFOAddress->ui32Addr & 0x1)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+						 __func__, ii + 1, ui32Client3DFenceCount,
+						 (void*)psTmpClient3DFenceUFOAddress,
+						 psTmpClient3DFenceUFOAddress->ui32Addr));
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)",
+						 __func__, ii + 1, ui32Client3DFenceCount,
+						 (void*)psTmpClient3DFenceUFOAddress,
+						 psTmpClient3DFenceUFOAddress->ui32Addr,
+						 *pui32TmpClient3DFenceValue,
+						 *pui32TmpClient3DFenceValue));
+				pui32TmpClient3DFenceValue++;
+			}
+			psTmpClient3DFenceUFOAddress++;
+		}
+		PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D update syncs:",
+				 __func__, ui32Client3DUpdateCount));
+		for (ii=0; ii<ui32Client3DUpdateCount; ii++)
+		{
+			if (psTmpClient3DUpdateUFOAddress->ui32Addr & 0x1)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+						 __func__, ii + 1, ui32Client3DUpdateCount,
+						 (void*)psTmpClient3DUpdateUFOAddress,
+						 psTmpClient3DUpdateUFOAddress->ui32Addr));
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)",
+						 __func__, ii + 1, ui32Client3DUpdateCount,
+						 (void*)psTmpClient3DUpdateUFOAddress,
+						 psTmpClient3DUpdateUFOAddress->ui32Addr,
+						 *pui32TmpClient3DUpdateValue,
+						 *pui32TmpClient3DUpdateValue));
+				pui32TmpClient3DUpdateValue++;
+			}
+			psTmpClient3DUpdateUFOAddress++;
+		}
+	}
+#endif
+
+	/* Init and acquire to TA command if required */
+	if (bKickTA)
+	{
+		RGX_SERVER_RC_TA_DATA *psTAData = &psRenderContext->sTAData;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		/* Prepare workload estimation */
+		WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
+				&psRenderContext->sWorkEstData,
+				&psRenderContext->sWorkEstData.sWorkloadMatchingDataTA,
+				ui32RenderTargetSize,
+				ui32NumberOfDrawCalls,
+				ui32NumberOfIndices,
+				ui32NumberOfMRTs,
+				ui64DeadlineInus,
+				&sWorkloadKickDataTA);
+#endif
+
+		/* Init the TA command helper */
+		CHKPT_DBG((PVR_DBG_ERROR,
+				   "%s:   calling RGXCmdHelperInitCmdCCB(), ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d",
+				   __func__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount));
+		eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psTAData->psServerCommonContext),
+				ui32ClientTAFenceCount,
+				pauiClientTAFenceUFOAddress,
+				paui32ClientTAFenceValue,
+				ui32ClientTAUpdateCount,
+				pauiClientTAUpdateUFOAddress,
+				paui32ClientTAUpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+				ui32ServerTASyncPrims,
+				paui32ServerTASyncFlags,
+				SYNC_FLAG_MASK_ALL,
+				pasServerTASyncs,
+#endif
+				ui32TACmdSize,
+				pui8TADMCmd,
+				RGXFWIF_CCB_CMD_TYPE_TA,
+				ui32ExtJobRef,
+				ui32IntJobRef,
+				ui32PDumpFlags,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+				&sWorkloadKickDataTA,
+#else
+				NULL,
+#endif
+				"TA",
+				bCCBStateOpen,
+				pasTACmdHelperData);
+		if (unlikely(eError != PVRSRV_OK))
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line",
+					   __func__, eError));
+			goto fail_tacmdinit;
+		}
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		/* The following is used to determine the offset of the command header containing
+		   the workload estimation data so that can be accessed when the KCCB is read */
+		ui32TACmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(pasTACmdHelperData);
+#endif
+
+		eError = RGXCmdHelperAcquireCmdCCB(CCB_CMD_HELPER_NUM_TA_COMMANDS, pasTACmdHelperData);
+		if (unlikely(eError != PVRSRV_OK))
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line",
+					   __func__, eError));
+			goto fail_taacquirecmd;
+		}
+		else
+		{
+			ui32TACmdCount++;
+		}
+	}
+
+	/* Only kick the 3D if required */
+	if (bKickPR)
+	{
+		RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData;
+
+		/*
+			The command helper doesn't know about the PR fence so create
+			the command with all the fences against it and later create
+			the PR command itself which _must_ come after the PR fence.
+		 */
+		sPRUFO.puiAddrUFO = uiPRFenceUFOAddress;
+		sPRUFO.ui32Value = ui32PRFenceValue;
+
+		/* Init the PR fence command helper */
+		CHKPT_DBG((PVR_DBG_ERROR,
+				   "%s:   calling RGXCmdHelperInitCmdCCB(), ui32Client3DFenceCount=%d",
+				   __func__, ui32Client3DFenceCount));
+		eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+				ui32Client3DFenceCount,
+				pauiClient3DFenceUFOAddress,
+				paui32Client3DFenceValue,
+				0,
+				NULL,
+				NULL,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+				(bKick3D ? ui32Server3DSyncPrims : 0),
+				paui32Server3DSyncFlags,
+				PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK,
+				pasServer3DSyncs,
+#endif
+				sizeof(sPRUFO),
+				(IMG_UINT8*) &sPRUFO,
+				RGXFWIF_CCB_CMD_TYPE_FENCE_PR,
+				ui32ExtJobRef,
+				ui32IntJobRef,
+				ui32PDumpFlags,
+				NULL,
+				"3D-PR-Fence",
+				bCCBStateOpen,
+				&pas3DCmdHelperData[ui323DCmdCount++]);
+		if (unlikely(eError != PVRSRV_OK))
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line",
+					   __func__, eError));
+			goto fail_prfencecmdinit;
+		}
+
+		/* Init the 3D PR command helper */
+		/*
+			Updates for Android (fence sync and Timeline sync prim) are provided in the PR-update
+			if no 3D is present. This is so the timeline update cannot happen out of order with any
+			other 3D already in flight for the same timeline (PR-updates are done in the 3D cCCB).
+			This out of order timeline sync prim update could happen if we attach it to the TA update.
+		 */
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+		if (ui32ClientPRUpdateCount)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR,
+					   "%s: Line %d, ui32ClientPRUpdateCount=%d, "
+					   "pauiClientPRUpdateUFOAddress=0x%x, "
+					   "ui32ClientPRUpdateValueCount=%d, "
+					   "paui32ClientPRUpdateValue=0x%x",
+					   __func__, __LINE__, ui32ClientPRUpdateCount,
+					   pauiClientPRUpdateUFOAddress->ui32Addr,
+					   ui32ClientPRUpdateValueCount,
+					   (ui32ClientPRUpdateValueCount == 0) ? PVRSRV_SYNC_CHECKPOINT_SIGNALLED : *paui32ClientPRUpdateValue));
+		}
+#endif
+
+		if (!bUseCombined3DAnd3DPR)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR,
+					   "%s:   calling RGXCmdHelperInitCmdCCB(), ui32ClientPRUpdateCount=%d",
+					   __func__, ui32ClientPRUpdateCount));
+			eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+					0,
+					NULL,
+					NULL,
+					ui32ClientPRUpdateCount,
+					pauiClientPRUpdateUFOAddress,
+					paui32ClientPRUpdateValue,
+					0,
+					NULL,
+					SYNC_FLAG_MASK_ALL,
+					NULL,
+					pui83DPRDMCmd ? ui323DPRCmdSize : ui323DCmdSize, // If the client has not provided a 3DPR command, the regular 3D command should be used instead
+					pui83DPRDMCmd ? pui83DPRDMCmd : pui83DDMCmd,
+					RGXFWIF_CCB_CMD_TYPE_3D_PR,
+					ui32ExtJobRef,
+					ui32IntJobRef,
+					ui32PDumpFlags,
+					NULL,
+					"3D-PR",
+					bCCBStateOpen,
+					&pas3DCmdHelperData[ui323DCmdCount++]);
+			if (unlikely(eError != PVRSRV_OK))
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line",
+						   __func__, eError));
+				goto fail_prcmdinit;
+			}
+		}
+	}
+
+	if (bKick3D || bAbort)
+	{
+		RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		/* Prepare workload estimation */
+		WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
+				&psRenderContext->sWorkEstData,
+				&psRenderContext->sWorkEstData.sWorkloadMatchingData3D,
+				ui32RenderTargetSize,
+				ui32NumberOfDrawCalls,
+				ui32NumberOfIndices,
+				ui32NumberOfMRTs,
+				ui64DeadlineInus,
+				&sWorkloadKickData3D);
+#endif
+
+		/* Init the 3D command helper */
+		eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+				0,
+				NULL,
+				NULL,
+				ui32Client3DUpdateCount,
+				pauiClient3DUpdateUFOAddress,
+				paui32Client3DUpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+				ui32Server3DSyncPrims,
+				paui32Server3DSyncFlags,
+				PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE,
+				pasServer3DSyncs,
+#endif
+				ui323DCmdSize,
+				pui83DDMCmd,
+				RGXFWIF_CCB_CMD_TYPE_3D,
+				ui32ExtJobRef,
+				ui32IntJobRef,
+				ui32PDumpFlags,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+				&sWorkloadKickData3D,
+#else
+				NULL,
+#endif
+				"3D",
+				bCCBStateOpen,
+				&pas3DCmdHelperData[ui323DCmdCount++]);
+		if (unlikely(eError != PVRSRV_OK))
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line",
+					   __func__, eError));
+			goto fail_3dcmdinit;
+		}
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		/* The following are used to determine the offset of the command header containing the workload estimation
+		   data so that can be accessed when the KCCB is read */
+		ui323DCmdHeaderOffset =	RGXCmdHelperGetDMCommandHeaderOffset(&pas3DCmdHelperData[ui323DCmdCount - 1]);
+		ui323DFullRenderCommandOffset =	RGXCmdHelperGetCommandOffset(pas3DCmdHelperData, ui323DCmdCount - 1);
+#endif
+	}
+
+	/* Protect against array overflow in RGXCmdHelperAcquireCmdCCB() */
+	if (unlikely(ui323DCmdCount > CCB_CMD_HELPER_NUM_3D_COMMANDS))
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __func__, eError));
+		goto fail_3dcmdinit;
+	}
+
+	if (ui323DCmdCount)
+	{
+		PVR_ASSERT(bKickPR || bKick3D);
+
+		/* Acquire space for all the 3D command(s) */
+		eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount, pas3DCmdHelperData);
+		if (unlikely(eError != PVRSRV_OK))
+		{
+			/* If RGXCmdHelperAcquireCmdCCB fails we skip the scheduling
+			 * of a new TA command with the same Write offset in Kernel CCB.
+			 */
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __func__, eError));
+			goto fail_3dacquirecmd;
+		}
+	}
+
+	/*
+		We should acquire the space in the kernel CCB here as after this point
+		we release the commands which will take operations on server syncs
+		which can't be undone
+	 */
+
+	/*
+		Everything is ready to go now, release the commands
+	 */
+	if (ui32TACmdCount)
+	{
+		ui32TACmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
+		RGXCmdHelperReleaseCmdCCB(ui32TACmdCount,
+				pasTACmdHelperData,
+				"TA",
+				FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		ui32TACmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
+
+		/* This checks if the command would wrap around at the end of the CCB and therefore  would start at an
+		   offset of 0 rather than the current command offset */
+		if (ui32TACmdOffset < ui32TACmdOffsetWrapCheck)
+		{
+			ui32TACommandOffset = ui32TACmdOffset;
+		}
+		else
+		{
+			ui32TACommandOffset = 0;
+		}
+#endif
+	}
+
+	if (ui323DCmdCount)
+	{
+		ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
+		RGXCmdHelperReleaseCmdCCB(ui323DCmdCount,
+				pas3DCmdHelperData,
+				"3D",
+				FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		ui323DCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
+
+		if (ui323DCmdOffset < ui323DCmdOffsetWrapCheck)
+		{
+			ui323DCommandOffset = ui323DCmdOffset;
+		}
+		else
+		{
+			ui323DCommandOffset = 0;
+		}
+#endif
+	}
+
+	if (ui32TACmdCount)
+	{
+		RGXFWIF_KCCB_CMD sTAKCCBCmd;
+		IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr;
+
+		/* Construct the kernel TA CCB command. */
+		sTAKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+		sTAKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext);
+		sTAKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
+
+		/* Add the Workload data into the KCCB kick */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		/* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */
+		sTAKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32TACommandOffset + ui32TACmdHeaderOffset;
+#else
+		sTAKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+#endif
+
+		AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &sTAKCCBCmd.uCmdData.sCmdKickData.apsCleanupCtl,
+				&sTAKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl,
+				RGXFWIF_DM_TA,
+				bKickTA,
+				psRTDataCleanup,
+				psZBuffer,
+				psSBuffer,
+				psMSAAScratchBuffer);
+
+
+		HTBLOGK(HTB_SF_MAIN_KICK_TA,
+				sTAKCCBCmd.uCmdData.sCmdKickData.psContext,
+				ui32TACmdOffset
+		);
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+		RGXSRV_HWPERF_ENQ(psRenderContext,
+		                  OSGetCurrentClientProcessIDKM(),
+		                  ui32FWCtx,
+		                  ui32ExtJobRef,
+		                  ui32IntJobRef,
+		                  RGX_HWPERF_KICK_TYPE_TA,
+		                  iCheckTAFence,
+		                  iUpdateTAFence,
+		                  iUpdateTATimeline,
+		                  uiCheckTAFenceUID,
+		                  uiUpdateTAFenceUID,
+		                  ui64DeadlineInus,
+		                  WORKEST_CYCLES_PREDICTION_GET(sWorkloadKickDataTA));
+#endif
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice,
+					RGXFWIF_DM_TA,
+					&sTAKCCBCmd,
+					ui32ClientCacheOpSeqNum,
+					ui32PDumpFlags);
+			if (eError2 != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+		PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode->pvDevice,
+		                        ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
+		                        RGX_HWPERF_KICK_TYPE_TA3D);
+	}
+
+	if (ui323DCmdCount)
+	{
+		RGXFWIF_KCCB_CMD s3DKCCBCmd;
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+		IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr;
+#endif
+
+		/* Construct the kernel 3D CCB command. */
+		s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+		s3DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext);
+		s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
+
+		/* Add the Workload data into the KCCB kick */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		/* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */
+		s3DKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui323DCommandOffset + ui323DCmdHeaderOffset + ui323DFullRenderCommandOffset;
+#else
+		s3DKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+#endif
+
+		AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &s3DKCCBCmd.uCmdData.sCmdKickData.apsCleanupCtl,
+				&s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl,
+				RGXFWIF_DM_3D,
+				bKick3D,
+				psRTDataCleanup,
+				psZBuffer,
+				psSBuffer,
+				psMSAAScratchBuffer);
+
+
+		HTBLOGK(HTB_SF_MAIN_KICK_3D,
+				s3DKCCBCmd.uCmdData.sCmdKickData.psContext,
+				ui323DCmdOffset);
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+		RGXSRV_HWPERF_ENQ(psRenderContext,
+		                  OSGetCurrentClientProcessIDKM(),
+		                  ui32FWCtx,
+		                  ui32ExtJobRef,
+		                  ui32IntJobRef,
+		                  RGX_HWPERF_KICK_TYPE_3D,
+		                  iCheck3DFence,
+		                  iUpdate3DFence,
+		                  iUpdate3DTimeline,
+		                  uiCheck3DFenceUID,
+		                  uiUpdate3DFenceUID,
+		                  ui64DeadlineInus,
+		                  WORKEST_CYCLES_PREDICTION_GET(sWorkloadKickData3D));
+#endif
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice,
+					RGXFWIF_DM_3D,
+					&s3DKCCBCmd,
+					ui32ClientCacheOpSeqNum,
+					ui32PDumpFlags);
+			if (eError2 != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+	}
+
+	/*
+	 * Now check eError (which may have returned an error from our earlier calls
+	 * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+	 * so we check it now...
+	 */
+	if (unlikely(eError != PVRSRV_OK ))
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line",
+				   __func__, eError));
+		goto fail_3dacquirecmd;
+	}
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+#if defined(NO_HARDWARE)
+	/* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+	if (psUpdateTASyncCheckpoint)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR,
+				   "%s:   Signalling NOHW sync checkpoint [TA] <%p>, ID:%d, FwAddr=0x%x",
+				   __func__, (void*)psUpdateTASyncCheckpoint,
+				   SyncCheckpointGetId(psUpdateTASyncCheckpoint),
+				   SyncCheckpointGetFirmwareAddr(psUpdateTASyncCheckpoint)));
+		SyncCheckpointSignalNoHW(psUpdateTASyncCheckpoint);
+	}
+	if (psTAFenceTimelineUpdateSync)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR,
+				   "%s:   Updating NOHW sync prim [TA] <%p> to %d",
+				   __func__, (void*)psTAFenceTimelineUpdateSync,
+				   ui32TAFenceTimelineUpdateValue));
+		SyncPrimNoHwUpdate(psTAFenceTimelineUpdateSync, ui32TAFenceTimelineUpdateValue);
+	}
+
+	if (psUpdate3DSyncCheckpoint)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR,
+				   "%s:   Signalling NOHW sync checkpoint [3D] <%p>, ID:%d, FwAddr=0x%x",
+				   __func__, (void*)psUpdate3DSyncCheckpoint,
+				   SyncCheckpointGetId(psUpdate3DSyncCheckpoint),
+				   SyncCheckpointGetFirmwareAddr(psUpdate3DSyncCheckpoint)));
+		SyncCheckpointSignalNoHW(psUpdate3DSyncCheckpoint);
+	}
+	if (ps3DFenceTimelineUpdateSync)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR,
+				   "%s:   Updating NOHW sync prim [3D] <%p> to %d",
+				   __func__, (void*)ps3DFenceTimelineUpdateSync,
+				   ui323DFenceTimelineUpdateValue));
+		SyncPrimNoHwUpdate(ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue);
+	}
+	SyncCheckpointNoHWUpdateTimelines(NULL);
+
+#endif /* defined(NO_HARDWARE) */
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+	if (psBufferSyncData)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR,
+				   "%s:   calling pvr_buffer_sync_kick_succeeded(psBufferSyncData=<%p>)...",
+				   __func__, (void*)psBufferSyncData));
+		pvr_buffer_sync_kick_succeeded(psBufferSyncData);
+	}
+	if (apsBufferFenceSyncCheckpoints)
+	{
+		kfree(apsBufferFenceSyncCheckpoints);
+	}
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+	if (piUpdateTAFence)
+	{
+		*piUpdateTAFence = iUpdateTAFence;
+	}
+	if (piUpdate3DFence)
+	{
+		*piUpdate3DFence = iUpdate3DFence;
+	}
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence.
+	 * NOTE: 3D fence is always submitted, either via 3D or TA(PR).
+	 */
+	if (bKickTA)
+	{
+		SyncAddrListDeRefCheckpoints(ui32FenceTASyncCheckpointCount, apsFenceTASyncCheckpoints);
+	}
+	SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints);
+
+	if (pvTAUpdateFenceFinaliseData && (iUpdateTAFence != PVRSRV_NO_FENCE))
+	{
+		SyncCheckpointFinaliseFence(psRenderContext->psDeviceNode, iUpdateTAFence,
+									pvTAUpdateFenceFinaliseData,
+									psUpdateTASyncCheckpoint, szFenceNameTA);
+	}
+	if (pv3DUpdateFenceFinaliseData && (iUpdate3DFence != PVRSRV_NO_FENCE))
+	{
+		SyncCheckpointFinaliseFence(psRenderContext->psDeviceNode, iUpdate3DFence,
+									pv3DUpdateFenceFinaliseData,
+									psUpdate3DSyncCheckpoint, szFenceName3D);
+	}
+
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceTASyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceTASyncCheckpoints);
+	}
+	if (apsFence3DSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFence3DSyncCheckpoints);
+	}
+
+	if (sTASyncData.paui32ClientUpdateValue)
+	{
+		OSFreeMem(sTASyncData.paui32ClientUpdateValue);
+	}
+	if (s3DSyncData.paui32ClientUpdateValue)
+	{
+		OSFreeMem(s3DSyncData.paui32ClientUpdateValue);
+	}
+
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psRenderContext->hLock);
+#endif
+	return PVRSRV_OK;
+
+	fail_3dacquirecmd:
+	fail_3dcmdinit:
+	fail_prcmdinit:
+	fail_prfencecmdinit:
+	fail_taacquirecmd:
+	fail_tacmdinit:
+	SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAFence);
+	SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAUpdate);
+	SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DFence);
+	SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DUpdate);
+	/* Where a TA-only kick (ie no 3D) is submitted, the PR update will make use of the unused 3DUpdate list.
+	 * If this has happened, performing a rollback on pauiClientPRUpdateUFOAddress will simply repeat what
+	 * has already been done for the sSyncAddrList3DUpdate above and result in a double decrement of the
+	 * sync checkpoint's hEnqueuedCCBCount, so we need to check before rolling back the PRUpdate.
+	 */
+	if (pauiClientPRUpdateUFOAddress && (pauiClientPRUpdateUFOAddress != psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs))
+	{
+		SyncCheckpointRollbackFromUFO(psRenderContext->psDeviceNode, pauiClientPRUpdateUFOAddress->ui32Addr);
+	}
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	fail_alloc_update_values_mem_3D:
+	/* FIXME: sTASyncData.paui32ClientPRUpdateValue points to the same buffer, needs a review */
+	fail_alloc_update_values_mem_TA:
+	if (iUpdateTAFence != PVRSRV_NO_FENCE)
+	{
+		SyncCheckpointRollbackFenceData(iUpdateTAFence, pvTAUpdateFenceFinaliseData);
+	}
+	if (iUpdate3DFence != PVRSRV_NO_FENCE)
+	{
+		SyncCheckpointRollbackFenceData(iUpdate3DFence, pv3DUpdateFenceFinaliseData);
+	}
+	fail_create_output_fence:
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence.
+	 * NOTE: 3D fence is always submitted, either via 3D or TA(PR).
+	 */
+	if (bKickTA)
+	{
+		SyncAddrListDeRefCheckpoints(ui32FenceTASyncCheckpointCount, apsFenceTASyncCheckpoints);
+	}
+	SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints);
+	fail_resolve_input_fence:
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+	if (psBufferSyncData)
+	{
+		pvr_buffer_sync_kick_failed(psBufferSyncData);
+	}
+	if (apsBufferFenceSyncCheckpoints)
+	{
+		kfree(apsBufferFenceSyncCheckpoints);
+	}
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+	err_pr_fence_address:
+	err_populate_sync_addr_list_3d_update:
+	err_populate_sync_addr_list_3d_fence:
+	err_populate_sync_addr_list_ta_update:
+	err_populate_sync_addr_list_ta_fence:
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceTASyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceTASyncCheckpoints);
+	}
+	if (apsFence3DSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFence3DSyncCheckpoints);
+	}
+	if (sTASyncData.paui32ClientUpdateValue)
+	{
+		OSFreeMem(sTASyncData.paui32ClientUpdateValue);
+	}
+	if (s3DSyncData.paui32ClientUpdateValue)
+	{
+		OSFreeMem(s3DSyncData.paui32ClientUpdateValue);
+	}
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+	PVR_ASSERT(eError != PVRSRV_OK);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psRenderContext->hLock);
+#endif
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection,
+		PVRSRV_DEVICE_NODE * psDeviceNode,
+		RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+		IMG_UINT32 ui32Priority)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psRenderContext->hLock);
+#endif
+
+	if (psRenderContext->sTAData.ui32Priority != ui32Priority)
+	{
+		eError = ContextSetPriority(psRenderContext->sTAData.psServerCommonContext,
+				psConnection,
+				psRenderContext->psDeviceNode->pvDevice,
+				ui32Priority,
+				RGXFWIF_DM_TA);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to set the priority of the TA part of the rendercontext (%s)",
+					 __func__, PVRSRVGetErrorString(eError)));
+			goto fail_tacontext;
+		}
+		psRenderContext->sTAData.ui32Priority = ui32Priority;
+	}
+
+	if (psRenderContext->s3DData.ui32Priority != ui32Priority)
+	{
+		eError = ContextSetPriority(psRenderContext->s3DData.psServerCommonContext,
+				psConnection,
+				psRenderContext->psDeviceNode->pvDevice,
+				ui32Priority,
+				RGXFWIF_DM_3D);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to set the priority of the 3D part of the rendercontext (%s)",
+					 __func__, PVRSRVGetErrorString(eError)));
+			goto fail_3dcontext;
+		}
+		psRenderContext->s3DData.ui32Priority = ui32Priority;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psRenderContext->hLock);
+#endif
+	return PVRSRV_OK;
+
+	fail_3dcontext:
+	fail_tacontext:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psRenderContext->hLock);
+#endif
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+/*
+ * PVRSRVRGXGetLastRenderContextResetReasonKM
+ */
+PVRSRV_ERROR PVRSRVRGXGetLastRenderContextResetReasonKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+		IMG_UINT32 *peLastResetReason,
+		IMG_UINT32 *pui32LastResetJobRef)
+{
+	RGX_SERVER_RC_TA_DATA         *psRenderCtxTAData;
+	RGX_SERVER_RC_3D_DATA         *psRenderCtx3DData;
+	RGX_SERVER_COMMON_CONTEXT     *psCurrentServerTACommonCtx, *psCurrentServer3DCommonCtx;
+	RGXFWIF_CONTEXT_RESET_REASON  eLastTAResetReason, eLast3DResetReason;
+	IMG_UINT32                    ui32LastTAResetJobRef, ui32Last3DResetJobRef;
+
+	PVR_ASSERT(psRenderContext != NULL);
+	PVR_ASSERT(peLastResetReason != NULL);
+	PVR_ASSERT(pui32LastResetJobRef != NULL);
+
+	psRenderCtxTAData          = &(psRenderContext->sTAData);
+	psCurrentServerTACommonCtx = psRenderCtxTAData->psServerCommonContext;
+	psRenderCtx3DData          = &(psRenderContext->s3DData);
+	psCurrentServer3DCommonCtx = psRenderCtx3DData->psServerCommonContext;
+
+	/* Get the last reset reasons from both the TA and 3D so they are reset... */
+	eLastTAResetReason = FWCommonContextGetLastResetReason(psCurrentServerTACommonCtx, &ui32LastTAResetJobRef);
+	eLast3DResetReason = FWCommonContextGetLastResetReason(psCurrentServer3DCommonCtx, &ui32Last3DResetJobRef);
+
+	/* Combine the reset reason from TA and 3D into one... */
+	*peLastResetReason    = (IMG_UINT32) eLast3DResetReason;
+	*pui32LastResetJobRef = ui32Last3DResetJobRef;
+	if (eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_NONE  ||
+	    ((eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_LOCKUP  ||
+	      eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING)  &&
+	     (eLastTAResetReason == RGXFWIF_CONTEXT_RESET_REASON_GUILTY_LOCKUP  ||
+	      eLastTAResetReason == RGXFWIF_CONTEXT_RESET_REASON_GUILTY_OVERRUNING)) ||
+	    ((eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_LOCKUP  ||
+	      eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING)  &&
+	     (eLastTAResetReason == RGXFWIF_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH)))
+	{
+		*peLastResetReason    = eLastTAResetReason;
+		*pui32LastResetJobRef = ui32LastTAResetJobRef;
+	}
+
+	return PVRSRV_OK;
+}
+
+
+/*
+ * PVRSRVRGXGetPartialRenderCountKM
+ */
+PVRSRV_ERROR PVRSRVRGXGetPartialRenderCountKM(DEVMEM_MEMDESC *psHWRTDataMemDesc,
+		IMG_UINT32 *pui32NumPartialRenders)
+{
+	RGXFWIF_HWRTDATA *psHWRTData;
+	PVRSRV_ERROR eError;
+
+	eError = DevmemAcquireCpuVirtAddr(psHWRTDataMemDesc, (void **)&psHWRTData);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to map Firmware Render Target Data (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		return eError;
+	}
+
+	*pui32NumPartialRenders = psHWRTData->ui32NumPartialRenders;
+
+	DevmemReleaseCpuVirtAddr(psHWRTDataMemDesc);
+
+	return PVRSRV_OK;
+}
+
+void DumpRenderCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                         DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                         void *pvDumpDebugFile,
+                         IMG_UINT32 ui32VerbLevel)
+{
+	DLLIST_NODE *psNode, *psNext;
+	OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock);
+	dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx =
+				IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode);
+
+		DumpFWCommonContextInfo(psCurrentServerRenderCtx->sTAData.psServerCommonContext,
+		                        pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+		DumpFWCommonContextInfo(psCurrentServerRenderCtx->s3DData.psServerCommonContext,
+		                        pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+	}
+	OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	DLLIST_NODE *psNode, *psNext;
+	IMG_UINT32 ui32ContextBitMask = 0;
+
+	OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock);
+
+	dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx =
+				IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode);
+		if (NULL != psCurrentServerRenderCtx->sTAData.psServerCommonContext)
+		{
+			if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->sTAData.psServerCommonContext, RGX_KICK_TYPE_DM_TA) == PVRSRV_ERROR_CCCB_STALLED)
+			{
+				ui32ContextBitMask |= RGX_KICK_TYPE_DM_TA;
+			}
+		}
+
+		if (NULL != psCurrentServerRenderCtx->s3DData.psServerCommonContext)
+		{
+			if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_3D) == PVRSRV_ERROR_CCCB_STALLED)
+			{
+				ui32ContextBitMask |= RGX_KICK_TYPE_DM_3D;
+			}
+		}
+	}
+
+	OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock);
+	return ui32ContextBitMask;
+}
+
+/*
+ * RGXRenderContextStalledKM
+ */
+PVRSRV_ERROR RGXRenderContextStalledKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext)
+{
+	RGXCheckForStalledClientContexts((PVRSRV_RGXDEV_INFO *) psRenderContext->psDeviceNode->pvDevice, IMG_TRUE);
+	return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (rgxta3d.c)
+ ******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxta3d.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxta3d.h
new file mode 100644
index 0000000..e960543
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxta3d.h
@@ -0,0 +1,461 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX TA and 3D Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX TA and 3D Functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXTA3D_H
+#define RGXTA3D_H
+
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgx_fwif_shared.h"
+#include "rgx_fwif_resetframework.h"
+#include "sync_server.h"
+#include "connection_server.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+typedef struct _RGX_SERVER_RENDER_CONTEXT_ RGX_SERVER_RENDER_CONTEXT;
+typedef struct _RGX_FREELIST_ RGX_FREELIST;
+typedef struct _RGX_PMR_NODE_ RGX_PMR_NODE;
+
+typedef struct {
+	PVRSRV_DEVICE_NODE		*psDeviceNode;
+	DEVMEM_MEMDESC			*psFWHWRTDataMemDesc;
+	DEVMEM_MEMDESC			*psRTArrayMemDesc;
+	DEVMEM_MEMDESC          *psRendersAccArrayMemDesc;
+	RGX_FREELIST 			*apsFreeLists[RGXFW_MAX_FREELISTS];
+	PVRSRV_CLIENT_SYNC_PRIM	*psCleanupSync;
+} RGX_RTDATA_CLEANUP_DATA;
+
+struct _RGX_FREELIST_ {
+	PVRSRV_RGXDEV_INFO 		*psDevInfo;
+
+	/* Free list PMR */
+	PMR						*psFreeListPMR;
+	IMG_DEVMEM_OFFSET_T		uiFreeListPMROffset;
+
+	/* Freelist config */
+	IMG_UINT32				ui32MaxFLPages;
+	IMG_UINT32				ui32InitFLPages;
+	IMG_UINT32				ui32CurrentFLPages;
+	IMG_UINT32				ui32GrowFLPages;
+	IMG_UINT32              ui32ReadyFLPages;
+	IMG_UINT32              ui32GrowThreshold;      /* Percentage of FL memory used that should trigger a new grow request */
+	IMG_UINT32				ui32FreelistID;
+	IMG_UINT32				ui32FreelistGlobalID;	/* related global freelist for this freelist */
+	IMG_UINT64				ui64FreelistChecksum;	/* checksum over freelist content */
+	IMG_BOOL				bCheckFreelist;			/* freelist check enabled */
+	IMG_UINT32				ui32RefCount;			/* freelist reference counting */
+
+	IMG_UINT32				ui32NumGrowReqByApp;	/* Total number of grow requests by Application*/
+	IMG_UINT32				ui32NumGrowReqByFW;		/* Total Number of grow requests by Firmware */
+	IMG_UINT32				ui32NumHighPages;		/* High Mark of pages in the freelist */
+
+	IMG_PID					ownerPid;			/* Pid of the owner of the list */
+
+	/* Memory Blocks */
+	DLLIST_NODE				sMemoryBlockHead;
+	DLLIST_NODE				sMemoryBlockInitHead;
+	DLLIST_NODE				sNode;
+
+	/* FW data structures */
+	DEVMEM_MEMDESC			*psFWFreelistMemDesc;
+	RGXFWIF_DEV_VIRTADDR	sFreeListFWDevVAddr;
+
+	PVRSRV_CLIENT_SYNC_PRIM	*psCleanupSync;
+};
+
+struct _RGX_PMR_NODE_ {
+	RGX_FREELIST			*psFreeList;
+	PMR						*psPMR;
+	PMR_PAGELIST 			*psPageList;
+	DLLIST_NODE				sMemoryBlock;
+	IMG_UINT32				ui32NumPages;
+	IMG_BOOL				bFirstPageMissing;
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+	RI_HANDLE				hRIHandle;
+#endif
+};
+
+typedef struct {
+	PVRSRV_DEVICE_NODE		*psDeviceNode;
+	DEVMEM_MEMDESC			*psRenderTargetMemDesc;
+} RGX_RT_CLEANUP_DATA;
+
+typedef struct {
+	PVRSRV_RGXDEV_INFO		*psDevInfo;
+	DEVMEM_MEMDESC			*psFWZSBufferMemDesc;
+	RGXFWIF_DEV_VIRTADDR	sZSBufferFWDevVAddr;
+
+	DEVMEMINT_RESERVATION 	*psReservation;
+	PMR 					*psPMR;
+	DEVMEMINT_MAPPING 		*psMapping;
+	PVRSRV_MEMALLOCFLAGS_T 	uiMapFlags;
+	IMG_UINT32 				ui32ZSBufferID;
+	IMG_UINT32 				ui32RefCount;
+	IMG_BOOL				bOnDemand;
+
+	IMG_BOOL				ui32NumReqByApp;		/* Number of Backing Requests from Application */
+	IMG_BOOL				ui32NumReqByFW;			/* Number of Backing Requests from Firmware */
+
+	IMG_PID					owner;
+
+	DLLIST_NODE	sNode;
+
+	PVRSRV_CLIENT_SYNC_PRIM	*psCleanupSync;
+}RGX_ZSBUFFER_DATA;
+
+typedef struct {
+	RGX_ZSBUFFER_DATA		*psZSBuffer;
+} RGX_POPULATION;
+
+/* Dump the physical pages of a freelist */
+IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList);
+
+
+/* Create HWRTDataSet */
+PVRSRV_ERROR RGXCreateHWRTData(CONNECTION_DATA      *psConnection,
+                               PVRSRV_DEVICE_NODE	*psDeviceNode,
+							   IMG_UINT32			psRenderTarget,
+							   IMG_DEV_VIRTADDR		psPMMListDevVAddr,
+							   RGX_FREELIST			*apsFreeLists[RGXFW_MAX_FREELISTS],
+							   RGX_RTDATA_CLEANUP_DATA	**ppsCleanupData,
+							   IMG_UINT32           ui32PPPScreen,
+							   IMG_UINT64           ui64MultiSampleCtl,
+							   IMG_UINT64           ui64FlippedMultiSampleCtl,
+							   IMG_UINT32           ui32TPCStride,
+							   IMG_DEV_VIRTADDR		sTailPtrsDevVAddr,
+							   IMG_UINT32           ui32TPCSize,
+							   IMG_UINT32           ui32TEScreen,
+							   IMG_UINT32           ui32TEAA,
+							   IMG_UINT32           ui32TEMTILE1,
+							   IMG_UINT32           ui32TEMTILE2,
+							   IMG_UINT32           ui32MTileStride,
+							   IMG_UINT32                 ui32ISPMergeLowerX,
+							   IMG_UINT32                 ui32ISPMergeLowerY,
+							   IMG_UINT32                 ui32ISPMergeUpperX,
+							   IMG_UINT32                 ui32ISPMergeUpperY,
+							   IMG_UINT32                 ui32ISPMergeScaleX,
+							   IMG_UINT32                 ui32ISPMergeScaleY,
+							   IMG_DEV_VIRTADDR	sMacrotileArrayDevVAddr,
+							   IMG_DEV_VIRTADDR	sRgnHeaderDevVAddr,
+							   IMG_DEV_VIRTADDR	sRTCDevVAddr,
+							   IMG_UINT64			uiRgnHeaderSize,
+							   IMG_UINT32			ui32ISPMtileSize,
+							   IMG_UINT16			ui16MaxRTs,
+							   DEVMEM_MEMDESC		**psMemDesc,
+							   IMG_UINT32			*puiHWRTData);
+
+/* Destroy HWRTData */
+PVRSRV_ERROR RGXDestroyHWRTData(RGX_RTDATA_CLEANUP_DATA *psCleanupData);
+
+/* Create Render Target */
+PVRSRV_ERROR RGXCreateRenderTarget(CONNECTION_DATA      *psConnection,
+                                   PVRSRV_DEVICE_NODE	*psDeviceNode,
+								   IMG_DEV_VIRTADDR		psVHeapTableDevVAddr,
+								   RGX_RT_CLEANUP_DATA	**ppsCleanupData,
+								   IMG_UINT32			*sRenderTargetFWDevVAddr);
+
+/* Destroy render target */
+PVRSRV_ERROR RGXDestroyRenderTarget(RGX_RT_CLEANUP_DATA *psCleanupData);
+
+
+/*
+	RGXCreateZSBuffer
+*/
+PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection,
+                                 PVRSRV_DEVICE_NODE	* psDeviceNode,
+                                 DEVMEMINT_RESERVATION 	*psReservation,
+                                 PMR 					*psPMR,
+                                 PVRSRV_MEMALLOCFLAGS_T 	uiMapFlags,
+                                 RGX_ZSBUFFER_DATA		 	**ppsZSBuffer,
+                                 IMG_UINT32					*sRenderTargetFWDevVAddr);
+
+/*
+	RGXDestroyZSBuffer
+*/
+PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+
+/*
+ * RGXBackingZSBuffer()
+ *
+ * Backs ZS-Buffer with physical pages
+ */
+PVRSRV_ERROR
+RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+/*
+ * RGXPopulateZSBufferKM()
+ *
+ * Backs ZS-Buffer with physical pages (called by Bridge calls)
+ */
+PVRSRV_ERROR RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer,
+									RGX_POPULATION **ppsPopulation);
+
+/*
+ * RGXUnbackingZSBuffer()
+ *
+ * Frees ZS-Buffer's physical pages
+ */
+PVRSRV_ERROR RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+/*
+ * RGXUnpopulateZSBufferKM()
+ *
+ * Frees ZS-Buffer's physical pages (called by Bridge calls )
+ */
+PVRSRV_ERROR RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation);
+
+/*
+	RGXProcessRequestZSBufferBacking
+*/
+void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+									  IMG_UINT32 ui32ZSBufferID);
+
+/*
+	RGXProcessRequestZSBufferUnbacking
+*/
+void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+										IMG_UINT32 ui32ZSBufferID);
+
+/*
+	RGXGrowFreeList
+*/
+PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList,
+                             IMG_UINT32 ui32NumPages,
+                             PDLLIST_NODE pListHeader);
+
+/* Create free list */
+PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA      *psConnection,
+                               PVRSRV_DEVICE_NODE	*psDeviceNode,
+							   IMG_UINT32			ui32MaxFLPages,
+							   IMG_UINT32			ui32InitFLPages,
+							   IMG_UINT32			ui32GrowFLPages,
+                               IMG_UINT32           ui32GrowParamThreshold,
+							   RGX_FREELIST			*psGlobalFreeList,
+							   IMG_BOOL				bCheckFreelist,
+							   IMG_DEV_VIRTADDR		sFreeListDevVAddr,
+							   PMR					*psFreeListPMR,
+							   IMG_DEVMEM_OFFSET_T	uiFreeListPMROffset,
+							   RGX_FREELIST			**ppsFreeList);
+
+/* Destroy free list */
+PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList);
+
+/*
+	RGXProcessRequestGrow
+*/
+void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+						   IMG_UINT32 ui32FreelistID);
+
+
+/* Reconstruct free list after Hardware Recovery */
+void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo,
+											  IMG_UINT32 ui32FreelistsCount,
+											  IMG_UINT32 *paui32Freelists);
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXCreateRenderContextKM
+
+ @Description
+	Server-side implementation of RGXCreateRenderContext
+
+ @Input psConnection -
+ @Input psDeviceNode - device node
+ @Input ui32Priority - context priority
+ @Input sVDMCallStackAddr - VDM call stack device virtual address
+ @Input ui32FrameworkCommandSize - framework command size
+ @Input pabyFrameworkCommand - ptr to framework command
+ @Input hMemCtxPrivData - memory context private data
+ @Input ui32CtxSwitchSize -
+ @Input pCtxSwitch_Regs -
+ @Input ui32PackedCCBSizeU8888 :
+ 		ui8TACCBAllocSizeLog2 - TA CCB size
+ 		ui8TACCBMaxAllocSizeLog2 - maximum size to which TA CCB can grow
+ 		ui83DCCBAllocSizeLog2 - 3D CCB size
+ 		ui83DCCBMaxAllocSizeLog2 - maximum size to which 3D CCB can grow
+ @Output ppsRenderContext -
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA				*psConnection,
+											PVRSRV_DEVICE_NODE			*psDeviceNode,
+											IMG_UINT32					ui32Priority,
+											IMG_DEV_VIRTADDR			sVDMCallStackAddr,
+											IMG_UINT32					ui32FrameworkCommandSize,
+											IMG_PBYTE					pabyFrameworkCommand,
+											IMG_HANDLE					hMemCtxPrivData,
+											IMG_UINT32					ui32CtxSwitchSize,
+											IMG_PBYTE					pCtxSwitch_Regs,
+											IMG_UINT32					ui32StaticRendercontextStateSize,
+											IMG_PBYTE					pStaticRendercontextState,
+											IMG_UINT32					ui32PackedCCBSizeU8888,
+											RGX_SERVER_RENDER_CONTEXT	**ppsRenderContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXDestroyRenderContextKM
+
+ @Description
+	Server-side implementation of RGXDestroyRenderContext
+
+ @Input psCleanupData - clean up data
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXKickTA3DKM
+
+ @Description
+	Server-side implementation of RGXKickTA3D
+
+ @Input psRTDataCleanup - RT data associated with the kick (or NULL)
+ @Input psZBuffer - Z-buffer associated with the kick (or NULL)
+ @Input psSBuffer - S-buffer associated with the kick (or NULL)
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT	*psRenderContext,
+								 IMG_UINT32					ui32ClientCacheOpSeqNum,
+								 IMG_UINT32					ui32ClientTAFenceCount,
+								 SYNC_PRIMITIVE_BLOCK				**apsClientTAFenceSyncPrimBlock,
+								 IMG_UINT32					*paui32ClientTAFenceSyncOffset,
+								 IMG_UINT32					*paui32ClientTAFenceValue,
+								 IMG_UINT32					ui32ClientTAUpdateCount,
+								 SYNC_PRIMITIVE_BLOCK				**apsClientUpdateSyncPrimBlock,
+								 IMG_UINT32					*paui32ClientUpdateSyncOffset,
+								 IMG_UINT32					*paui32ClientTAUpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+								 IMG_UINT32					ui32ServerTASyncPrims,
+								 IMG_UINT32					*paui32ServerTASyncFlags,
+								 SERVER_SYNC_PRIMITIVE 		**pasServerTASyncs,
+#endif
+								 IMG_UINT32					ui32Client3DFenceCount,
+								 SYNC_PRIMITIVE_BLOCK				**apsClient3DFenceSyncPrimBlock,
+								 IMG_UINT32					*pauiClient3DFenceSyncOffset,
+								 IMG_UINT32					*paui32Client3DFenceValue,
+								 IMG_UINT32					ui32Client3DUpdateCount,
+								 SYNC_PRIMITIVE_BLOCK				**apsClient3DUpdateSyncPrimBlock,
+								 IMG_UINT32					*paui32Client3DUpdateSyncOffset,
+								 IMG_UINT32					*paui32Client3DUpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+								 IMG_UINT32					ui32Server3DSyncPrims,
+								 IMG_UINT32					*paui32Server3DSyncFlags,
+								 SERVER_SYNC_PRIMITIVE 		**pasServer3DSyncs,
+#endif
+								 SYNC_PRIMITIVE_BLOCK				*psPRSyncPrimBlock,
+								 IMG_UINT32					ui32PRSyncOffset,
+								 IMG_UINT32					ui32PRFenceValue,
+								 PVRSRV_FENCE				iCheckFence,
+								 PVRSRV_TIMELINE			iUpdateTimeline,
+								 PVRSRV_FENCE				*piUpdateFence,
+								 IMG_CHAR					szFenceName[PVRSRV_SYNC_NAME_LENGTH],
+								 PVRSRV_FENCE				iCheckFence3D,
+								 PVRSRV_TIMELINE			iUpdateTimeline3D,
+								 PVRSRV_FENCE				*piUpdateFence3D,
+								 IMG_CHAR					szFenceName3D[PVRSRV_SYNC_NAME_LENGTH],
+								 IMG_UINT32					ui32TACmdSize,
+								 IMG_PBYTE					pui8TADMCmd,
+								 IMG_UINT32					ui323DPRCmdSize,
+								 IMG_PBYTE					pui83DPRDMCmd,
+								 IMG_UINT32					ui323DCmdSize,
+								 IMG_PBYTE					pui83DDMCmd,
+								 IMG_UINT32					ui32ExtJobRef,
+								 IMG_BOOL					bLastTAInScene,
+								 IMG_BOOL					bKickTA,
+								 IMG_BOOL					bKickPR,
+								 IMG_BOOL					bKick3D,
+								 IMG_BOOL					bAbort,
+								 IMG_UINT32					ui32PDumpFlags,
+								 RGX_RTDATA_CLEANUP_DATA	*psRTDataCleanup,
+								 RGX_ZSBUFFER_DATA			*psZBuffer,
+								 RGX_ZSBUFFER_DATA			*psSBuffer,
+								 RGX_ZSBUFFER_DATA			*psMSAAScratchBuffer,
+								 IMG_UINT32					ui32SyncPMRCount,
+								 IMG_UINT32					*paui32SyncPMRFlags,
+								 PMR						**ppsSyncPMRs,
+								 IMG_UINT32					ui32RenderTargetSize,
+								 IMG_UINT32					ui32NumberOfDrawCalls,
+								 IMG_UINT32					ui32NumberOfIndices,
+								 IMG_UINT32					ui32NumberOfMRTs,
+								 IMG_UINT64					ui64DeadlineInus);
+
+
+PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                 PVRSRV_DEVICE_NODE * psDevNode,
+												 RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+												 IMG_UINT32 ui32Priority);
+
+PVRSRV_ERROR PVRSRVRGXGetLastRenderContextResetReasonKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+                                                        IMG_UINT32 *peLastResetReason,
+                                                        IMG_UINT32 *pui32LastResetJobRef);
+
+PVRSRV_ERROR PVRSRVRGXGetPartialRenderCountKM(DEVMEM_MEMDESC *psHWRTDataMemDesc,
+											  IMG_UINT32 *pui32NumPartialRenders);
+
+/* Debug - Dump debug info of render contexts on this device */
+void DumpRenderCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                         DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                         void *pvDumpDebugFile,
+                         IMG_UINT32 ui32VerbLevel);
+
+/* Debug/Watchdog - check if client contexts are stalled */
+IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+PVRSRV_ERROR RGXRenderContextStalledKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext);
+
+#endif /* RGXTA3D_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxtdmtransfer.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxtdmtransfer.c
new file mode 100644
index 0000000..d5b338c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxtdmtransfer.c
@@ -0,0 +1,1187 @@
+/*************************************************************************/ /*!
+@File           rgxtdmtransfer.c
+@Title          Device specific TDM transfer queue routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "pdump_km.h"
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxtdmtransfer.h"
+#include "rgx_tq_shared.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgx_memallocflags.h"
+#include "rgxhwperf.h"
+#include "ospvr_gputrace.h"
+#include "htbuffer.h"
+
+#include "pdump_km.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_TDM_UFO_DUMP	0
+
+//#define TDM_CHECKPOINT_DEBUG 1
+
+#if defined(TDM_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+typedef struct {
+	RGX_SERVER_COMMON_CONTEXT * psServerCommonContext;
+	IMG_UINT32                  ui32Priority;
+#if defined(SUPPORT_BUFFER_SYNC)
+	struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+} RGX_SERVER_TQ_TDM_DATA;
+
+
+struct _RGX_SERVER_TQ_TDM_CONTEXT_ {
+	PVRSRV_DEVICE_NODE      *psDeviceNode;
+	DEVMEM_MEMDESC          *psFWFrameworkMemDesc;
+	IMG_UINT32              ui32Flags;
+	RGX_SERVER_TQ_TDM_DATA  sTDMData;
+	PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+	DLLIST_NODE             sListNode;
+	SYNC_ADDR_LIST          sSyncAddrListFence;
+	SYNC_ADDR_LIST          sSyncAddrListUpdate;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	POS_LOCK		hLock;
+#endif
+};
+
+static PVRSRV_ERROR _CreateTDMTransferContext(
+		CONNECTION_DATA         * psConnection,
+		PVRSRV_DEVICE_NODE      * psDeviceNode,
+		DEVMEM_MEMDESC          * psFWMemContextMemDesc,
+		IMG_UINT32                ui32Priority,
+		RGX_COMMON_CONTEXT_INFO * psInfo,
+		RGX_SERVER_TQ_TDM_DATA  * psTDMData,
+		IMG_UINT32				  ui32CCBAllocSizeLog2,
+		IMG_UINT32				  ui32CCBMaxAllocSizeLog2)
+{
+	PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+	psTDMData->psBufferSyncContext =
+			pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
+			                               "rogue-tdm");
+	if (IS_ERR(psTDMData->psBufferSyncContext))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed to create buffer_sync context (err=%ld)",
+				__func__, PTR_ERR(psTDMData->psBufferSyncContext)));
+
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto fail_buffer_sync_context_create;
+	}
+#endif
+
+	eError = FWCommonContextAllocate(
+			psConnection,
+			psDeviceNode,
+			REQ_TYPE_TQ_TDM,
+			RGXFWIF_DM_TDM,
+			NULL,
+			0,
+			psFWMemContextMemDesc,
+			NULL,
+			ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TQ2D_CCB_SIZE_LOG2,
+			ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TQ2D_CCB_MAX_SIZE_LOG2,
+			ui32Priority,
+			psInfo,
+			&psTDMData->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_contextalloc;
+	}
+
+	psTDMData->ui32Priority = ui32Priority;
+	return PVRSRV_OK;
+
+	fail_contextalloc:
+#if defined(SUPPORT_BUFFER_SYNC)
+	pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext);
+	psTDMData->psBufferSyncContext = NULL;
+	fail_buffer_sync_context_create:
+#endif
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+static PVRSRV_ERROR _DestroyTDMTransferContext(
+		RGX_SERVER_TQ_TDM_DATA  * psTDMData,
+		PVRSRV_DEVICE_NODE      * psDeviceNode,
+		PVRSRV_CLIENT_SYNC_PRIM * psCleanupSync)
+{
+	PVRSRV_ERROR eError;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(
+			psDeviceNode,
+			psTDMData->psServerCommonContext,
+			psCleanupSync,
+			RGXFWIF_DM_TDM,
+			PDUMP_FLAGS_NONE);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		return eError;
+	}
+
+	/* ... it has so we can free it's resources */
+	FWCommonContextFree(psTDMData->psServerCommonContext);
+
+#if defined(SUPPORT_BUFFER_SYNC)
+	pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext);
+	psTDMData->psBufferSyncContext = NULL;
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVCreateTransferContextKM
+ */
+PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM(
+		CONNECTION_DATA            * psConnection,
+		PVRSRV_DEVICE_NODE         * psDeviceNode,
+		IMG_UINT32                   ui32Priority,
+		IMG_UINT32                   ui32FrameworkCommandSize,
+		IMG_PBYTE                    pabyFrameworkCommand,
+		IMG_HANDLE                   hMemCtxPrivData,
+		IMG_UINT32					 ui32PackedCCBSizeU88,
+		RGX_SERVER_TQ_TDM_CONTEXT ** ppsTransferContext)
+{
+	RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext;
+
+	DEVMEM_MEMDESC          * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	RGX_COMMON_CONTEXT_INFO   sInfo;
+	PVRSRV_ERROR              eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO        *psDevInfo = psDeviceNode->pvDevice;
+
+	/* Allocate the server side structure */
+	*ppsTransferContext = NULL;
+	psTransferContext = OSAllocZMem(sizeof(*psTransferContext));
+	if (psTransferContext == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	eError = OSLockCreate(&psTransferContext->hLock);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto fail_lockcreate;
+	}
+#endif
+
+	psTransferContext->psDeviceNode = psDeviceNode;
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+	                       &psTransferContext->psCleanupSync,
+	                       "transfer context cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate cleanup sync (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto fail_syncalloc;
+	}
+
+	/*
+	 * Create the FW framework buffer
+	 */
+	eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+	                                    &psTransferContext->psFWFrameworkMemDesc,
+	                                    ui32FrameworkCommandSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate firmware GPU framework state (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto fail_frameworkcreate;
+	}
+
+	/* Copy the Framework client data into the framework buffer */
+	eError = PVRSRVRGXFrameworkCopyCommand(psTransferContext->psFWFrameworkMemDesc,
+	                                       pabyFrameworkCommand,
+	                                       ui32FrameworkCommandSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to populate the framework buffer (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto fail_frameworkcopy;
+	}
+
+	sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc;
+
+	eError = _CreateTDMTransferContext(psConnection,
+	                                   psDeviceNode,
+	                                   psFWMemContextMemDesc,
+	                                   ui32Priority,
+	                                   &sInfo,
+	                                   &psTransferContext->sTDMData,
+									   U32toU8_Unpack1(ui32PackedCCBSizeU88),
+									   U32toU8_Unpack2(ui32PackedCCBSizeU88));
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_tdmtransfercontext;
+	}
+
+	SyncAddrListInit(&psTransferContext->sSyncAddrListFence);
+	SyncAddrListInit(&psTransferContext->sSyncAddrListUpdate);
+
+	{
+		OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+		dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode));
+		OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+		*ppsTransferContext = psTransferContext;
+	}
+
+	*ppsTransferContext = psTransferContext;
+
+	return PVRSRV_OK;
+
+	fail_tdmtransfercontext:
+	fail_frameworkcopy:
+	DevmemFwFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+	fail_frameworkcreate:
+	SyncPrimFree(psTransferContext->psCleanupSync);
+	fail_syncalloc:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psTransferContext->hLock);
+	fail_lockcreate:
+#endif
+	OSFreeMem(psTransferContext);
+	PVR_ASSERT(eError != PVRSRV_OK);
+	*ppsTransferContext = NULL;
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice;
+
+	/* remove node from list before calling destroy - as destroy, if successful
+	 * will invalidate the node
+	 * must be re-added if destroy fails
+	 */
+	OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+	dllist_remove_node(&(psTransferContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+
+
+	eError = _DestroyTDMTransferContext(&psTransferContext->sTDMData,
+	                                    psTransferContext->psDeviceNode,
+	                                    psTransferContext->psCleanupSync);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_destroyTDM;
+	}
+
+	DevmemFwFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+	SyncPrimFree(psTransferContext->psCleanupSync);
+
+	SyncAddrListDeinit(&psTransferContext->sSyncAddrListFence);
+	SyncAddrListDeinit(&psTransferContext->sSyncAddrListUpdate);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psTransferContext->hLock);
+#endif
+
+	OSFreeMem(psTransferContext);
+
+	return PVRSRV_OK;
+
+	fail_destroyTDM:
+
+	OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+	dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+/*
+ * PVRSRVSubmitTQ3DKickKM
+ */
+PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM(
+		RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext,
+		IMG_UINT32                  ui32PDumpFlags,
+		IMG_UINT32                  ui32ClientCacheOpSeqNum,
+		IMG_UINT32                  ui32ClientFenceCount,
+		SYNC_PRIMITIVE_BLOCK     ** pauiClientFenceUFOSyncPrimBlock,
+		IMG_UINT32                * paui32ClientFenceSyncOffset,
+		IMG_UINT32                * paui32ClientFenceValue,
+		IMG_UINT32                  ui32ClientUpdateCount,
+		SYNC_PRIMITIVE_BLOCK     ** pauiClientUpdateUFOSyncPrimBlock,
+		IMG_UINT32                * paui32ClientUpdateSyncOffset,
+		IMG_UINT32                * paui32ClientUpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+		IMG_UINT32                  ui32ServerSyncCount,
+		IMG_UINT32                * paui32ServerSyncFlags,
+		SERVER_SYNC_PRIMITIVE    ** papsServerSyncs,
+#endif
+		PVRSRV_FENCE                iCheckFence,
+		PVRSRV_TIMELINE             iUpdateTimeline,
+		PVRSRV_FENCE              * piUpdateFence,
+		IMG_CHAR                    szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+		IMG_UINT32                  ui32FWCommandSize,
+		IMG_UINT8                 * pui8FWCommand,
+		IMG_UINT32                  ui32ExtJobRef,
+		IMG_UINT32                  ui32SyncPMRCount,
+		IMG_UINT32                * paui32SyncPMRFlags,
+		PMR                      ** ppsSyncPMRs)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode;
+	RGX_CCB_CMD_HELPER_DATA *psCmdHelper;
+	PRGXFWIF_UFO_ADDR * pauiIntFenceUFOAddress   = NULL;
+	PRGXFWIF_UFO_ADDR * pauiIntUpdateUFOAddress  = NULL;
+	IMG_UINT32        * paui32IntFenceValue      = paui32ClientFenceValue;
+	IMG_UINT32          ui32IntClientFenceCount  = ui32ClientFenceCount;
+	IMG_UINT32        * paui32IntUpdateValue     = paui32ClientUpdateValue;
+	IMG_UINT32          ui32IntClientUpdateCount = ui32ClientUpdateCount;
+	PVRSRV_ERROR eError;
+	PVRSRV_ERROR eError2;
+	PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE;
+	PVRSRV_RGXDEV_INFO  *psDevInfo = FWCommonContextGetRGXDevInfo(psTransferContext->sTDMData.psServerCommonContext);
+	IMG_UINT32          ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
+
+	IMG_UINT32 ui32CmdOffset = 0;
+	IMG_BOOL bCCBStateOpen;
+
+	IMG_UINT64               uiCheckFenceUID = 0;
+	IMG_UINT64               uiUpdateFenceUID = 0;
+#if defined(SUPPORT_BUFFER_SYNC)
+	struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
+	PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
+	IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
+	PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
+#endif
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+	PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+	IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+	IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+	PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+	IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+	void *pvUpdateFenceFinaliseData = NULL;
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	if (iUpdateTimeline >= 0 && !piUpdateFence)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+#else /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+	if (iUpdateTimeline >= 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Providing update timeline (%d) in non-supporting driver",
+				__func__, iUpdateTimeline));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	if (iCheckFence >= 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Providing check fence (%d) in non-supporting driver",
+				__func__, iCheckFence));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+	/* Ensure the string is null-terminated (Required for safety) */
+	szUpdateFenceName[31] = '\0';
+
+	if (ui32SyncPMRCount != 0)
+	{
+		if (!ppsSyncPMRs)
+		{
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psTransferContext->hLock);
+#endif
+
+	/* We can't allocate the required amount of stack space on all consumer architectures */
+	psCmdHelper = OSAllocMem(sizeof(RGX_CCB_CMD_HELPER_DATA));
+	if (psCmdHelper == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_allochelper;
+	}
+
+
+	/*
+		Init the command helper commands for all the prepares
+	 */
+	{
+		RGX_CLIENT_CCB *psClientCCB;
+		RGX_SERVER_COMMON_CONTEXT *psServerCommonCtx;
+		IMG_CHAR *pszCommandName;
+		RGXFWIF_CCB_CMD_TYPE eType;
+#if defined(SUPPORT_BUFFER_SYNC)
+		struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+
+		psServerCommonCtx = psTransferContext->sTDMData.psServerCommonContext;
+		psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx);
+		pszCommandName = "TQ-TDM";
+
+		if (ui32FWCommandSize == 0)
+		{
+			/* A NULL CMD for TDM is used to append updates to a non finished
+			 * FW command. bCCBStateOpen is used in case capture range is
+			 * entered on this command, to not drain CCB up to the Roff for this
+			 * command, but the finished command prior to this.
+			 */
+			bCCBStateOpen = IMG_TRUE;
+			eType = RGXFWIF_CCB_CMD_TYPE_NULL;
+		}
+		else
+		{
+			bCCBStateOpen = IMG_FALSE;
+			eType = RGXFWIF_CCB_CMD_TYPE_TQ_TDM;
+		}
+#if defined(SUPPORT_BUFFER_SYNC)
+		psBufferSyncContext = psTransferContext->sTDMData.psBufferSyncContext;
+#endif
+
+		eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListFence,
+		                              ui32ClientFenceCount,
+		                              pauiClientFenceUFOSyncPrimBlock,
+		                              paui32ClientFenceSyncOffset);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_populate_sync_addr_list;
+		}
+		paui32IntFenceValue      = paui32ClientFenceValue;
+		pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs;
+
+		eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListUpdate,
+		                              ui32ClientUpdateCount,
+		                              pauiClientUpdateUFOSyncPrimBlock,
+		                              paui32ClientUpdateSyncOffset);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_populate_sync_addr_list;
+		}
+		paui32IntUpdateValue     = paui32ClientUpdateValue;
+		pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
+
+
+		if (ui32SyncPMRCount)
+		{
+#if defined(SUPPORT_BUFFER_SYNC)
+			int err;
+
+			CHKPT_DBG((PVR_DBG_ERROR, "%s:   Calling pvr_buffer_sync_resolve_and_create_fences", __func__));
+			err = pvr_buffer_sync_resolve_and_create_fences(psBufferSyncContext,
+			                                                psTransferContext->psDeviceNode->hSyncCheckpointContext,
+			                                                ui32SyncPMRCount,
+			                                                ppsSyncPMRs,
+			                                                paui32SyncPMRFlags,
+			                                                &ui32BufferFenceSyncCheckpointCount,
+			                                                &apsBufferFenceSyncCheckpoints,
+			                                                &psBufferUpdateSyncCheckpoint,
+			                                                &psBufferSyncData);
+			if (err)
+			{
+				switch (err)
+				{
+					case -EINTR:
+						eError = PVRSRV_ERROR_RETRY;
+						break;
+					case -ENOMEM:
+						eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+						break;
+					default:
+						eError = PVRSRV_ERROR_INVALID_PARAMS;
+						break;
+				}
+
+				if (eError != PVRSRV_ERROR_RETRY)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   pvr_buffer_sync_resolve_and_create_fences failed (%s)", __func__, PVRSRVGetErrorString(eError)));
+				}
+				goto fail_resolve_input_fence;
+			}
+
+			/* Append buffer sync fences */
+			if (ui32BufferFenceSyncCheckpointCount > 0)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d buffer sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence , (void*)pauiIntFenceUFOAddress));
+				SyncAddrListAppendAndDeRefCheckpoints(&psTransferContext->sSyncAddrListFence,
+				                                      ui32BufferFenceSyncCheckpointCount,
+				                                      apsBufferFenceSyncCheckpoints);
+				if (!pauiIntFenceUFOAddress)
+				{
+					pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs;
+				}
+				ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount;
+			}
+
+			if (psBufferUpdateSyncCheckpoint)
+			{
+				/* Append the update (from output fence) */
+				SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate,
+				                              1,
+				                              &psBufferUpdateSyncCheckpoint);
+				if (!pauiIntUpdateUFOAddress)
+				{
+					pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
+				}
+				ui32IntClientUpdateCount++;
+			}
+#else /* defined(SUPPORT_BUFFER_SYNC) */
+			PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto fail_populate_sync_addr_list;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+		}
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+		/* Resolve the sync checkpoints that make up the input fence */
+		eError = SyncCheckpointResolveFence(psTransferContext->psDeviceNode->hSyncCheckpointContext,
+		                                    iCheckFence,
+		                                    &ui32FenceSyncCheckpointCount,
+		                                    &apsFenceSyncCheckpoints,
+		                                    &uiCheckFenceUID);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_resolve_input_fence;
+		}
+#if defined(TDM_CHECKPOINT_DEBUG)
+		{
+			IMG_UINT32 ii;
+			for (ii=0; ii<32; ii++)
+			{
+				PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints +  ii);
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:    apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint)); //psFenceSyncCheckpoints[ii]));
+			}
+		}
+#endif
+		/* Create the output fence (if required) */
+		if (iUpdateTimeline != PVRSRV_NO_TIMELINE)
+		{
+			eError = SyncCheckpointCreateFence(psTransferContext->psDeviceNode,
+			                                   szUpdateFenceName,
+			                                   iUpdateTimeline,
+			                                   psTransferContext->psDeviceNode->hSyncCheckpointContext,
+			                                   &iUpdateFence,
+			                                   &uiUpdateFenceUID,
+			                                   &pvUpdateFenceFinaliseData,
+			                                   &psUpdateSyncCheckpoint,
+			                                   (void*)&psFenceTimelineUpdateSync,
+			                                   &ui32FenceTimelineUpdateValue);
+			if (eError != PVRSRV_OK)
+			{
+				goto fail_create_output_fence;
+			}
+
+			/* Append the sync prim update for the timeline (if required) */
+			if (psFenceTimelineUpdateSync)
+			{
+				IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+				/* Allocate memory to hold the list of update values (including our timeline update) */
+				pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+				if (!pui32IntAllocatedUpdateValues)
+				{
+					/* Failed to allocate memory */
+					eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+					goto fail_alloc_update_values_mem;
+				}
+				OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+				/* Copy the update values into the new memory, then append our timeline update value */
+				OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+				/* Now set the additional update value */
+				pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+				*pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+				ui32IntClientUpdateCount++;
+#if defined(TDM_CHECKPOINT_DEBUG)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+					for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+				/* Now append the timeline sync prim addr to the transfer context update list */
+				SyncAddrListAppendSyncPrim(&psTransferContext->sSyncAddrListUpdate,
+				                           psFenceTimelineUpdateSync);
+#if defined(TDM_CHECKPOINT_DEBUG)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+					for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+				/* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+				paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
+			}
+		}
+
+		if (ui32FenceSyncCheckpointCount)
+		{
+			/* Append the checks (from input fence) */
+			if (ui32FenceSyncCheckpointCount > 0)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence));
+#if defined(TDM_CHECKPOINT_DEBUG)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+					for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+				SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListFence,
+				                              ui32FenceSyncCheckpointCount,
+				                              apsFenceSyncCheckpoints);
+				if (!pauiIntFenceUFOAddress)
+				{
+					pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs;
+				}
+				ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+			}
+#if defined(TDM_CHECKPOINT_DEBUG)
+			{
+				IMG_UINT32 iii;
+				IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+				for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+					pui32Tmp++;
+				}
+			}
+#endif
+		}
+		if (psUpdateSyncCheckpoint)
+		{
+			/* Append the update (from output fence) */
+			CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 sync checkpoint to TQ Update (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->sSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress));
+			SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate,
+			                              1,
+			                              &psUpdateSyncCheckpoint);
+			if (!pauiIntUpdateUFOAddress)
+			{
+				pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
+			}
+			ui32IntClientUpdateCount++;
+#if defined(TDM_CHECKPOINT_DEBUG)
+			{
+				IMG_UINT32 iii;
+				IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+				for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+					pui32Tmp++;
+				}
+			}
+#endif
+		}
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if (ENABLE_TDM_UFO_DUMP == 1)
+		PVR_DPF((PVR_DBG_ERROR, "%s: dumping TDM fence/updates syncs...", __func__));
+		{
+			IMG_UINT32 ii;
+			PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+			IMG_UINT32 *pui32TmpIntFenceValue = paui32IntFenceValue;
+			PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+			IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+			/* Dump Fence syncs and Update syncs */
+			PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM fence syncs (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+			for (ii=0; ii<ui32IntClientFenceCount; ii++)
+			{
+				if (psTmpIntFenceUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr, *pui32TmpIntFenceValue, *pui32TmpIntFenceValue));
+					pui32TmpIntFenceValue++;
+				}
+				psTmpIntFenceUFOAddress++;
+			}
+			PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM update syncs (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+			for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+			{
+				if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+					pui32TmpIntUpdateValue++;
+				}
+				psTmpIntUpdateUFOAddress++;
+			}
+		}
+#endif
+
+		/*
+			Create the command helper data for this command
+		 */
+		eError = RGXCmdHelperInitCmdCCB(psClientCCB,
+		                                ui32IntClientFenceCount,
+		                                pauiIntFenceUFOAddress,
+		                                paui32IntFenceValue,
+		                                ui32IntClientUpdateCount,
+		                                pauiIntUpdateUFOAddress,
+		                                paui32IntUpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+		                                ui32ServerSyncCount,
+		                                paui32ServerSyncFlags,
+		                                SYNC_FLAG_MASK_ALL,
+		                                papsServerSyncs,
+#endif
+		                                ui32FWCommandSize,
+		                                pui8FWCommand,
+		                                eType,
+		                                ui32ExtJobRef,
+		                                ui32IntJobRef,
+		                                ui32PDumpFlags,
+		                                NULL,
+		                                pszCommandName,
+		                                bCCBStateOpen,
+		                                psCmdHelper);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_initcmd;
+		}
+	}
+
+	/*
+		Acquire space for all the commands in one go
+	 */
+
+	eError = RGXCmdHelperAcquireCmdCCB(1, psCmdHelper);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_3dcmdacquire;
+	}
+
+
+	/*
+		We should acquire the kernel CCB(s) space here as the schedule could fail
+		and we would have to roll back all the syncs
+	 */
+
+	/*
+		Only do the command helper release (which takes the server sync
+		operations if the acquire succeeded
+	 */
+	ui32CmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext));
+	RGXCmdHelperReleaseCmdCCB(1,
+	                          psCmdHelper,
+	                          "TQ_TDM",
+	                          FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr);
+
+
+	/*
+		Even if we failed to acquire the client CCB space we might still need
+		to kick the HW to process a padding packet to release space for us next
+		time round
+	 */
+	{
+		RGXFWIF_KCCB_CMD sTDMKCCBCmd;
+		IMG_UINT32 ui32FWAddr = FWCommonContextGetFWAddress(
+				psTransferContext->sTDMData.psServerCommonContext).ui32Addr;
+
+		/* Construct the kernel 3D CCB command. */
+		sTDMKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+		sTDMKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext);
+		sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext));
+		sTDMKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+		/* HTBLOGK(HTB_SF_MAIN_KICK_TDM, */
+		/* 		s3DKCCBCmd.uCmdData.sCmdKickData.psContext, */
+		/* 		ui323DCmdOffset); */
+		RGXSRV_HWPERF_ENQ(psTransferContext,
+		                  OSGetCurrentClientProcessIDKM(),
+		                  FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr,
+		                  ui32ExtJobRef,
+		                  ui32IntJobRef,
+		                  RGX_HWPERF_KICK_TYPE_TQTDM,
+		                  iCheckFence,
+		                  iUpdateFence,
+		                  iUpdateTimeline,
+		                  uiCheckFenceUID,
+		                  uiUpdateFenceUID,
+		                  NO_DEADLINE,
+		                  NO_CYCEST);
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError2 = RGXScheduleCommand(psDeviceNode->pvDevice,
+			                             RGXFWIF_DM_TDM,
+			                             & sTDMKCCBCmd,
+			                             ui32ClientCacheOpSeqNum,
+			                             ui32PDumpFlags);
+			if (eError2 != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+		PVRGpuTraceEnqueueEvent(psDeviceNode->pvDevice, ui32FWAddr, ui32ExtJobRef,
+		                        ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQTDM);
+	}
+
+	/*
+	 * Now check eError (which may have returned an error from our earlier calls
+	 * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+	 * so we check it now...
+	 */
+	if (eError != PVRSRV_OK )
+	{
+		goto fail_2dcmdacquire;
+	}
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+#if defined(NO_HARDWARE)
+	/* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+	if (psUpdateSyncCheckpoint)
+	{
+		SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+	}
+	if (psFenceTimelineUpdateSync)
+	{
+		SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+	}
+	SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined (NO_HARDWARE) */
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+	if (psBufferSyncData)
+	{
+		pvr_buffer_sync_kick_succeeded(psBufferSyncData);
+	}
+	if (apsBufferFenceSyncCheckpoints)
+	{
+		kfree(apsBufferFenceSyncCheckpoints);
+	}
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+	* piUpdateFence = iUpdateFence;
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE))
+	{
+		SyncCheckpointFinaliseFence(psDeviceNode, iUpdateFence, pvUpdateFenceFinaliseData,
+		                            psUpdateSyncCheckpoint, szUpdateFenceName);
+	}
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+	OSFreeMem(psCmdHelper);
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+	                             apsFenceSyncCheckpoints);
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+	/* Free memory allocated to hold the internal list of update values */
+	if (pui32IntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui32IntAllocatedUpdateValues);
+		pui32IntAllocatedUpdateValues = NULL;
+	}
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psTransferContext->hLock);
+#endif
+	return PVRSRV_OK;
+
+	/*
+	No resources are created in this function so there is nothing to free
+	unless we had to merge syncs.
+	If we fail after the client CCB acquire there is still nothing to do
+	as only the client CCB release will modify the client CCB
+	 */
+	fail_2dcmdacquire:
+	fail_3dcmdacquire:
+
+	fail_initcmd:
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListFence);
+	SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListUpdate);
+	fail_alloc_update_values_mem:
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+	/* fail_pdumpcheck: */
+	/* fail_cmdtype: */
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	if (iUpdateFence != PVRSRV_NO_FENCE)
+	{
+		SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+	}
+	fail_create_output_fence:
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+	                             apsFenceSyncCheckpoints);
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL) || defined(SUPPORT_BUFFER_SYNC)
+	fail_resolve_input_fence:
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+	if (psBufferSyncData)
+	{
+		pvr_buffer_sync_kick_failed(psBufferSyncData);
+	}
+	if (apsBufferFenceSyncCheckpoints)
+	{
+		kfree(apsBufferFenceSyncCheckpoints);
+	}
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+	fail_populate_sync_addr_list:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	OSFreeMem(psCmdHelper);
+	fail_allochelper:
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+#endif
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psTransferContext->hLock);
+#endif
+	return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(
+		RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+		IMG_UINT32                 ui32PDumpFlags)
+{
+	RGXFWIF_KCCB_CMD  sKCCBCmd;
+	PVRSRV_ERROR      eError;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psTransferContext->hLock);
+#endif
+
+	/* Schedule the firmware command */
+	sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE;
+	sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext);
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psTransferContext->psDeviceNode->pvDevice,
+		                            RGXFWIF_DM_TDM,
+		                            &sKCCBCmd,
+		                            0,
+		                            ui32PDumpFlags);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to schedule the FW command %d (%s)",
+				__func__, eError, PVRSRVGETERRORSTRING(eError)));
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psTransferContext->hLock);
+#endif
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                      PVRSRV_DEVICE_NODE * psDeviceNode,
+                                                      RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+                                                      IMG_UINT32 ui32Priority)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psTransferContext->hLock);
+#endif
+
+	if (psTransferContext->sTDMData.ui32Priority != ui32Priority)
+	{
+		eError = ContextSetPriority(psTransferContext->sTDMData.psServerCommonContext,
+		                            psConnection,
+		                            psTransferContext->psDeviceNode->pvDevice,
+		                            ui32Priority,
+		                            RGXFWIF_DM_TDM);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority (%s)", __func__, PVRSRVGetErrorString(eError)));
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockRelease(psTransferContext->hLock);
+#endif
+			return eError;
+		}
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psTransferContext->hLock);
+#endif
+	return PVRSRV_OK;
+}
+
+void DumpTDMTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                              DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                              void *pvDumpDebugFile,
+                              IMG_UINT32 ui32VerbLevel)
+{
+	DLLIST_NODE *psNode, *psNext;
+
+	OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock);
+
+	dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx =
+				IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode);
+
+		DumpFWCommonContextInfo(psCurrentServerTransferCtx->sTDMData.psServerCommonContext,
+		                        pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+	}
+
+	OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock);
+}
+
+
+IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	DLLIST_NODE *psNode, *psNext;
+	IMG_UINT32 ui32ContextBitMask = 0;
+
+	OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock);
+
+	dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx =
+				IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode);
+
+		if (CheckStalledClientCommonContext(
+				psCurrentServerTransferCtx->sTDMData.psServerCommonContext, RGX_KICK_TYPE_DM_TDM_2D)
+				== PVRSRV_ERROR_CCCB_STALLED) {
+			ui32ContextBitMask = RGX_KICK_TYPE_DM_TDM_2D;
+		}
+	}
+
+	OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock);
+	return ui32ContextBitMask;
+}
+
+/**************************************************************************//**
+ End of file (rgxtdmtransfer.c)
+ ******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxtdmtransfer.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxtdmtransfer.h
new file mode 100644
index 0000000..3e30af5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxtdmtransfer.h
@@ -0,0 +1,122 @@
+/*************************************************************************/ /*!
+@File           rgxtdmtransfer.h
+@Title          RGX Transfer queue 2 Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX Transfer queue Functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXTDMTRANSFER_H__)
+#define __RGXTDMTRANSFER_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+#include "sync_server.h"
+#include "connection_server.h"
+
+typedef struct _RGX_SERVER_TQ_TDM_CONTEXT_ RGX_SERVER_TQ_TDM_CONTEXT;
+
+
+PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM(
+	CONNECTION_DATA           * psConnection,
+	PVRSRV_DEVICE_NODE        * psDeviceNode,
+	IMG_UINT32                  ui32Priority,
+	IMG_UINT32                  ui32FrameworkCommandSize,
+	IMG_PBYTE                   pabyFrameworkCommand,
+	IMG_HANDLE                  hMemCtxPrivData,
+	IMG_UINT32					ui32PackedCCBSizeU88,
+	RGX_SERVER_TQ_TDM_CONTEXT **ppsTransferContext);
+
+
+PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext);
+
+
+PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM(
+	RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext,
+	IMG_UINT32                  ui32PDumpFlags,
+	IMG_UINT32                  ui32ClientCacheOpSeqNum,
+	IMG_UINT32                  ui32ClientFenceCount,
+	SYNC_PRIMITIVE_BLOCK     ** pauiClientFenceUFOSyncPrimBlock,
+	IMG_UINT32                * paui32ClientFenceSyncOffset,
+	IMG_UINT32                * paui32ClientFenceValue,
+	IMG_UINT32                  ui32ClientUpdateCount,
+	SYNC_PRIMITIVE_BLOCK     ** pauiClientUpdateUFOSyncPrimBlock,
+	IMG_UINT32                * paui32ClientUpdateSyncOffset,
+	IMG_UINT32                * paui32ClientUpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	IMG_UINT32                  ui32ServerSyncCount,
+	IMG_UINT32                * paui32ServerSyncFlags,
+	SERVER_SYNC_PRIMITIVE    ** papsServerSyncs,
+#endif
+	PVRSRV_FENCE                iCheckFence,
+	PVRSRV_TIMELINE             iUpdateTimeline,
+	PVRSRV_FENCE              * piUpdateFence,
+	IMG_CHAR                    szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+	IMG_UINT32                  ui32FWCommandSize,
+	IMG_UINT8                 * pui8FWCommand,
+	IMG_UINT32                  ui32ExtJobRef,
+	IMG_UINT32                  ui32SyncPMRCount,
+	IMG_UINT32                * pui32SyncPMRFlags,
+	PMR                      ** ppsSyncPMRs);
+
+PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(
+	RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+	IMG_UINT32                 ui32PDumpFlags);
+
+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                   PVRSRV_DEVICE_NODE * psDeviceNode,
+												   RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+												   IMG_UINT32 ui32Priority);
+
+/* Debug - Dump debug info of TDM transfer contexts on this device */
+void DumpTDMTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                              DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                              void *pvDumpDebugFile,
+                              IMG_UINT32 ui32VerbLevel);
+
+/* Debug/Watchdog - check if client transfer contexts are stalled */
+IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+
+#endif /* __RGXTDMTRANSFER_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxtimecorr.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxtimecorr.c
new file mode 100644
index 0000000..e3e58ae
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxtimecorr.c
@@ -0,0 +1,644 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific time correlation and calibration routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific time correlation and calibration routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxtimecorr.h"
+#include "rgxfwutils.h"
+#include "htbserver.h"
+#include "pvrsrv_apphint.h"
+
+/******************************************************************************
+ *
+ * - A calibration period is started on power-on and after a DVFS transition,
+ *   and it's closed before a power-off and before a DVFS transition
+ *   (so power-on -> dfvs -> dvfs -> power-off , power on -> dvfs -> dvfs...,
+ *   where each arrow is a calibration period).
+ *
+ * - The timers on the Host and on the FW are correlated at the beginning of
+ *   each period together with the current GPU frequency.
+ *
+ * - Correlation and calibration are also done at regular intervals using
+ *   a best effort approach.
+ *
+ *****************************************************************************/
+
+static IMG_UINT32 g_ui32ClockSource = PVRSRV_APPHINT_TIMECORRCLOCK;
+
+/*
+	AppHint interfaces
+*/
+
+static PVRSRV_ERROR _SetClock(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                              const void *psPrivate,
+                              IMG_UINT32 ui32Value)
+{
+	static const IMG_CHAR *apszClocks[] = {
+		"mono", "mono_raw", "sched"
+	};
+
+	if (ui32Value >= RGXTIMECORR_CLOCK_LAST)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Invalid clock source type (%u)", ui32Value));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	RGXTimeCorrEnd((PVRSRV_DEVICE_NODE *) psDeviceNode,
+	               RGXTIMECORR_EVENT_CLOCK_CHANGE);
+
+	PVR_DPF((PVR_DBG_WARNING, "Setting time correlation clock from \"%s\" to \"%s\"",
+			apszClocks[g_ui32ClockSource],
+			apszClocks[ui32Value]));
+
+	g_ui32ClockSource = ui32Value;
+
+	RGXTimeCorrBegin((PVRSRV_DEVICE_NODE *) psDeviceNode,
+	                 RGXTIMECORR_EVENT_CLOCK_CHANGE);
+
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+	PVR_UNREFERENCED_PARAMETER(apszClocks);
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _GetClock(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                              const void *psPrivate,
+                              IMG_UINT32 *pui32Value)
+{
+	*pui32Value = g_ui32ClockSource;
+
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+	return PVRSRV_OK;
+}
+
+void RGXTimeCorrInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_TimeCorrClock, _GetClock,
+	                                    _SetClock, psDeviceNode, NULL);
+}
+
+/*
+	End of AppHint interface
+*/
+
+IMG_UINT64 RGXTimeCorrGetClockns64(void)
+{
+	IMG_UINT64 ui64Clock;
+
+	switch (g_ui32ClockSource) {
+		case RGXTIMECORR_CLOCK_MONO:
+			return ((void) OSClockMonotonicns64(&ui64Clock), ui64Clock);
+		case RGXTIMECORR_CLOCK_MONO_RAW:
+			return OSClockMonotonicRawns64();
+		case RGXTIMECORR_CLOCK_SCHED:
+			return OSClockns64();
+		default:
+			PVR_ASSERT(IMG_FALSE);
+			return 0;
+	}
+}
+
+IMG_UINT64 RGXTimeCorrGetClockus64(void)
+{
+	IMG_UINT32 rem;
+	return OSDivide64r64(RGXTimeCorrGetClockns64(), 1000, &rem);
+}
+
+void RGXGetTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode,
+							RGXFWIF_TIME_CORR *psTimeCorrs,
+							IMG_UINT32 ui32NumOut)
+{
+	PVRSRV_RGXDEV_INFO    *psDevInfo     = psDeviceNode->pvDevice;
+	RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb;
+	IMG_UINT32 ui32CurrentIndex = psGpuUtilFWCB->ui32TimeCorrSeqCount;
+
+	while (ui32NumOut--)
+	{
+		*(psTimeCorrs++) = psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32CurrentIndex)];
+		ui32CurrentIndex--;
+	}
+}
+
+static __maybe_unused const IMG_CHAR* _EventToString(RGXTIMECORR_EVENT eEvent)
+{
+	switch (eEvent)
+	{
+		case RGXTIMECORR_EVENT_POWER:
+			return "power";
+		case RGXTIMECORR_EVENT_DVFS:
+			return "dvfs";
+		case RGXTIMECORR_EVENT_PERIODIC:
+			return "periodic";
+		case RGXTIMECORR_EVENT_CLOCK_CHANGE:
+			return "clock source";
+		default:
+			return "n/a";
+	}
+}
+
+static inline IMG_UINT32 _RGXGetSystemLayerGPUClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+
+	return psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+}
+
+static inline IMG_UINT32 _RGXGetEstimatedGPUClockSpeed(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+	GPU_FREQ_TRACKING_DATA *psTrackingData;
+
+	psTrackingData = &psGpuDVFSTable->asTrackingData[psGpuDVFSTable->ui32FreqIndex];
+
+	return psTrackingData->ui32EstCoreClockSpeed;
+}
+
+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY)
+static inline void _DumpTimerCorrelationHistory(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+	IMG_UINT32 i = psGpuDVFSTable->ui32HistoryIndex;
+
+	PVR_DPF((PVR_DBG_ERROR, "Dumping history of timer correlation data (latest first):"));
+
+	do
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "  Begin times: OS %" IMG_UINT64_FMTSPEC ", CR %" IMG_UINT64_FMTSPEC ", "
+				 "End times: OS %" IMG_UINT64_FMTSPEC ", CR %" IMG_UINT64_FMTSPEC ", "
+				 "Core clk %u, Estimated clk %u",
+				 psGpuDVFSTable->asTrackingHistory[i].ui64BeginOSTimestamp,
+				 psGpuDVFSTable->asTrackingHistory[i].ui64BeginCRTimestamp,
+				 psGpuDVFSTable->asTrackingHistory[i].ui64EndOSTimestamp,
+				 psGpuDVFSTable->asTrackingHistory[i].ui64EndCRTimestamp,
+				 psGpuDVFSTable->asTrackingHistory[i].ui32CoreClockSpeed,
+				 psGpuDVFSTable->asTrackingHistory[i].ui32EstCoreClockSpeed));
+
+		i = (i - 1) % RGX_GPU_FREQ_TRACKING_SIZE;
+
+	} while (i != psGpuDVFSTable->ui32HistoryIndex);
+}
+#endif
+
+static void _RGXMakeTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, RGXTIMECORR_EVENT eEvent)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb;
+	IMG_UINT32 ui32NewSeqCount = psGpuUtilFWCB->ui32TimeCorrSeqCount + 1;
+	RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32NewSeqCount)];
+
+	/*
+	 * The following reads must be done as close together as possible, because
+	 * they represent the same current time sampled from different clock sources.
+	 */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	if (OSClockMonotonicns64(&psTimeCorr->ui64OSMonoTimeStamp) != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"_RGXMakeTimeCorrData: System Monotonic Clock not available."));
+		PVR_ASSERT(0);
+	}
+#endif
+	psTimeCorr->ui64CRTimeStamp = RGXReadHWTimerReg(psDevInfo);
+	psTimeCorr->ui64OSTimeStamp = RGXTimeCorrGetClockns64();
+	psTimeCorr->ui32CoreClockSpeed = _RGXGetEstimatedGPUClockSpeed(psDevInfo);
+	psTimeCorr->ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(psTimeCorr->ui32CoreClockSpeed);
+
+	if (psTimeCorr->ui64CRDeltaToOSDeltaKNs == 0)
+	{
+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY)
+		_DumpTimerCorrelationHistory(psDevInfo);
+#endif
+
+		/* Revert to original clock speed (error already printed) */
+		psTimeCorr->ui32CoreClockSpeed = _RGXGetSystemLayerGPUClockSpeed(psDeviceNode);
+		psTimeCorr->ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(psTimeCorr->ui32CoreClockSpeed);
+	}
+
+	/* Make sure the values are written to memory before updating the index of the current entry */
+	OSWriteMemoryBarrier();
+
+	/* Update the index of the current entry in the timer correlation array */
+	psGpuUtilFWCB->ui32TimeCorrSeqCount = ui32NewSeqCount;
+
+	PVR_DPF((PVR_DBG_MESSAGE,
+	         "Timer correlation data (post %s event): OS %" IMG_UINT64_FMTSPEC " ns, "
+	         "CR %" IMG_UINT64_FMTSPEC ", GPU freq. %u Hz (given as %u Hz)",
+	         _EventToString(eEvent),
+	         psTimeCorr->ui64OSTimeStamp,
+	         psTimeCorr->ui64CRTimeStamp,
+	         RGXFWIF_ROUND_TO_KHZ(psTimeCorr->ui32CoreClockSpeed),
+	         _RGXGetSystemLayerGPUClockSpeed(psDeviceNode)));
+
+	/*
+	 * Don't log timing data to the HTB log after a power(-on) event.
+	 * Otherwise this will be logged before the HTB partition marker, breaking
+	 * the log sync grammar. This data will be automatically repeated when the
+	 * partition marker is written.
+	 */
+	HTBSyncScale(eEvent != RGXTIMECORR_EVENT_POWER,
+	             psTimeCorr->ui64OSTimeStamp,
+	             psTimeCorr->ui64CRTimeStamp,
+	             psTimeCorr->ui32CoreClockSpeed);
+}
+
+static void _RGXCheckTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  RGX_GPU_DVFS_TABLE *psGpuDVFSTable)
+{
+#if !defined(NO_HARDWARE) && defined(DEBUG)
+#define SCALING_FACTOR (10)
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb;
+	IMG_UINT32 ui32Index = RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFWCB->ui32TimeCorrSeqCount);
+	RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32Index];
+	IMG_UINT64 ui64EstimatedTime, ui64CRTimeStamp, ui64OSTimeStamp;
+	IMG_UINT64 ui64CRTimeDiff, ui64OSTimeDiff;
+	IMG_INT64 i64Diff;
+	IMG_UINT32 ui32Ratio, ui32Remainder;
+
+	/*
+	 * The following reads must be done as close together as possible, because
+	 * they represent the same current time sampled from different clock sources.
+	 */
+	ui64CRTimeStamp = RGXReadHWTimerReg(psDevInfo);
+	ui64OSTimeStamp = RGXTimeCorrGetClockns64();
+
+	if ((ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp) < (1 << SCALING_FACTOR))
+	{
+		/*
+		 * Less than ~1us has passed since the timer correlation data was generated.
+		 * A time frame this short is probably not enough to get an estimate
+		 * of how good the timer correlation data was.
+		 * Skip calculations for the above reason and to avoid a division by 0 below.
+		 */
+		return;
+	}
+
+
+	/* Calculate an estimated timestamp based on the latest timer correlation data */
+	ui64CRTimeDiff = ui64CRTimeStamp - psTimeCorr->ui64CRTimeStamp;
+	ui64OSTimeDiff = RGXFWIF_GET_DELTA_OSTIME_NS(ui64CRTimeDiff,
+	                                             psTimeCorr->ui64CRDeltaToOSDeltaKNs);
+	ui64EstimatedTime = psTimeCorr->ui64OSTimeStamp + ui64OSTimeDiff;
+
+	/* Get difference between estimated timestamp and current timestamp, in ns */
+	i64Diff = ui64EstimatedTime - ui64OSTimeStamp;
+
+	/*
+	 * Calculate ratio between estimated time diff and real time diff:
+	 * ratio% : 100% = (OSestimate - OStimecorr) : (OSreal - OStimecorr)
+	 *
+	 * The operands are scaled down (approximately from ns to us) so at least
+	 * the divisor fits on 32 bit.
+	 */
+	ui32Ratio = OSDivide64(((ui64EstimatedTime - psTimeCorr->ui64OSTimeStamp) * 100ULL) >> SCALING_FACTOR,
+	                       (ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp) >> SCALING_FACTOR,
+	                       &ui32Remainder);
+
+	PVR_DPF((PVR_DBG_MESSAGE,
+	         "Estimated timestamp check: diff %" IMG_INT64_FMTSPECd " ns over "
+	         "period %" IMG_UINT64_FMTSPEC " ns, estimated timer speed %u%%",
+	         i64Diff,
+	         ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp,
+	         ui32Ratio));
+
+	/* Warn if the estimated timestamp is not within +/- 1% of the current time */
+	if (ui32Ratio < 99 || ui32Ratio > 101)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+		         "Estimated timestamps generated in the last %" IMG_UINT64_FMTSPEC " ns "
+		         "were %s the real time (increasing at %u%% speed)",
+		         ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp,
+		         i64Diff > 0 ? "ahead of" : "behind",
+		         ui32Ratio));
+
+		/* Higher ratio == higher delta OS == higher delta CR == frequency higher than expected (and viceversa) */
+		PVR_DPF((PVR_DBG_WARNING,
+		         "Current GPU frequency %u Hz (given as %u Hz) is probably %s than expected",
+		         RGXFWIF_ROUND_TO_KHZ(psTimeCorr->ui32CoreClockSpeed),
+		         _RGXGetSystemLayerGPUClockSpeed(psDeviceNode),
+		         i64Diff > 0 ? "lower" : "higher"));
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(psGpuDVFSTable);
+#endif
+}
+
+static inline IMG_UINT32 _RGXGPUFreqGetIndex(RGX_GPU_DVFS_TABLE *psGpuDVFSTable, IMG_UINT32 ui32CoreClockSpeed)
+{
+	IMG_UINT32 *paui32GPUFrequencies = psGpuDVFSTable->aui32GPUFrequency;
+	IMG_UINT32 i;
+
+	for (i = 0; i < RGX_GPU_DVFS_TABLE_SIZE; i++)
+	{
+		if (paui32GPUFrequencies[i] == ui32CoreClockSpeed)
+		{
+			return i;
+		}
+
+		if (paui32GPUFrequencies[i] == 0)
+		{
+			paui32GPUFrequencies[i] = ui32CoreClockSpeed;
+			return i;
+		}
+	}
+
+	i--;
+
+	PVR_DPF((PVR_DBG_ERROR, "GPU frequency table in the driver is full! "
+	         "Table size should be increased! Overriding last entry (%u) with %u",
+	         paui32GPUFrequencies[i], ui32CoreClockSpeed));
+
+	paui32GPUFrequencies[i] = ui32CoreClockSpeed;
+
+	return i;
+}
+
+static void _RGXGPUFreqCalibrationPeriodStart(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_GPU_DVFS_TABLE *psGpuDVFSTable)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	GPU_FREQ_TRACKING_DATA *psTrackingData;
+	IMG_UINT32 ui32CoreClockSpeed, ui32Index;
+
+	IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo);
+	IMG_UINT64 ui64OSTimestamp = RGXTimeCorrGetClockus64();
+
+	psGpuDVFSTable->ui64CalibrationCRTimestamp = ui64CRTimestamp;
+	psGpuDVFSTable->ui64CalibrationOSTimestamp = ui64OSTimestamp;
+
+	ui32CoreClockSpeed = _RGXGetSystemLayerGPUClockSpeed(psDeviceNode);
+	ui32Index          = _RGXGPUFreqGetIndex(psGpuDVFSTable, ui32CoreClockSpeed);
+	psTrackingData     = &psGpuDVFSTable->asTrackingData[ui32Index];
+
+	/* Set the time needed to (re)calibrate the GPU frequency */
+	if (psTrackingData->ui32CalibrationCount == 0) /* We never met this frequency */
+	{
+		psTrackingData->ui32EstCoreClockSpeed = ui32CoreClockSpeed;
+		psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US;
+	}
+	else if (psTrackingData->ui32CalibrationCount == 1) /* We calibrated this frequency only once */
+	{
+		psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US;
+	}
+	else
+	{
+		psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US;
+	}
+
+	/* Update the index to the DVFS table */
+	psGpuDVFSTable->ui32FreqIndex = ui32Index;
+
+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY)
+	/* Update tracking history */
+	{
+		GPU_FREQ_TRACKING_HISTORY *psTrackingHistory;
+
+		psTrackingHistory = &psGpuDVFSTable->asTrackingHistory[psGpuDVFSTable->ui32HistoryIndex];
+		psTrackingHistory->ui32CoreClockSpeed    = ui32CoreClockSpeed;
+		psTrackingHistory->ui32EstCoreClockSpeed = psTrackingData->ui32EstCoreClockSpeed;
+		psTrackingHistory->ui64BeginCRTimestamp  = ui64CRTimestamp;
+		psTrackingHistory->ui64BeginOSTimestamp  = ui64OSTimestamp;
+		psTrackingHistory->ui64EndCRTimestamp    = 0ULL;
+		psTrackingHistory->ui64EndOSTimestamp    = 0ULL;
+	}
+#endif
+}
+
+static void _RGXGPUFreqCalibrationPeriodStop(PVRSRV_DEVICE_NODE *psDeviceNode,
+											 RGX_GPU_DVFS_TABLE *psGpuDVFSTable)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+	IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo);
+	IMG_UINT64 ui64OSTimestamp = RGXTimeCorrGetClockus64();
+
+	psGpuDVFSTable->ui64CalibrationCRTimediff =
+	    ui64CRTimestamp - psGpuDVFSTable->ui64CalibrationCRTimestamp;
+	psGpuDVFSTable->ui64CalibrationOSTimediff =
+	    ui64OSTimestamp - psGpuDVFSTable->ui64CalibrationOSTimestamp;
+
+	/* Check if the current timer correlation data is good enough */
+	_RGXCheckTimeCorrData(psDeviceNode, psGpuDVFSTable);
+
+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY)
+	/* Update tracking history */
+	{
+		GPU_FREQ_TRACKING_HISTORY *psTrackingHistory;
+
+		psTrackingHistory = &psGpuDVFSTable->asTrackingHistory[psGpuDVFSTable->ui32HistoryIndex];
+		psTrackingHistory->ui64EndCRTimestamp = ui64CRTimestamp;
+		psTrackingHistory->ui64EndOSTimestamp = ui64OSTimestamp;
+	}
+#endif
+}
+
+static void _RGXGPUFreqCalibrationCalculate(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                            RGX_GPU_DVFS_TABLE *psGpuDVFSTable,
+                                            RGXTIMECORR_EVENT   eEvent)
+{
+#if !defined(DISABLE_GPU_FREQUENCY_CALIBRATION)
+	GPU_FREQ_TRACKING_DATA *psTrackingData;
+	IMG_UINT32 ui32EstCoreClockSpeed, ui32PrevCoreClockSpeed;
+	IMG_INT32  i32Diff;
+	IMG_UINT32 ui32Remainder;
+
+	/*
+	 * Find out what the GPU frequency was in the last period.
+	 * This should return a value very close to the frequency passed by the system layer.
+	 */
+	ui32EstCoreClockSpeed =
+	    RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(psGpuDVFSTable->ui64CalibrationCRTimediff,
+	                                       psGpuDVFSTable->ui64CalibrationOSTimediff,
+	                                       ui32Remainder);
+
+	/* Update GPU frequency used by the driver for a given system layer frequency */
+	psTrackingData = &psGpuDVFSTable->asTrackingData[psGpuDVFSTable->ui32FreqIndex];
+
+	ui32PrevCoreClockSpeed = psTrackingData->ui32EstCoreClockSpeed;
+	psTrackingData->ui32EstCoreClockSpeed = ui32EstCoreClockSpeed;
+	psTrackingData->ui32CalibrationCount++;
+
+	i32Diff = (IMG_INT32) (ui32EstCoreClockSpeed - ui32PrevCoreClockSpeed);
+
+	if ((i32Diff < -1000000) || (i32Diff > 1000000))
+	{
+		/* Warn if the frequency changed by more than 1 MHz between recalculations */
+		PVR_DPF((PVR_DBG_WARNING,
+		         "GPU frequency calibration of system layer frequency %u Hz (pre %s event): "
+		         "more than 1 MHz difference between old and new value "
+		         "(%u Hz -> %u Hz over %"  IMG_UINT64_FMTSPEC " us)",
+		         _RGXGetSystemLayerGPUClockSpeed(psDeviceNode),
+		         _EventToString(eEvent),
+		         RGXFWIF_ROUND_TO_KHZ(ui32PrevCoreClockSpeed),
+		         RGXFWIF_ROUND_TO_KHZ(ui32EstCoreClockSpeed),
+		         psGpuDVFSTable->ui64CalibrationOSTimediff));
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE,
+		         "GPU frequency calibration of system layer frequency %u Hz (pre %s event): "
+		         "%u Hz -> %u Hz done over %" IMG_UINT64_FMTSPEC " us",
+		         _RGXGetSystemLayerGPUClockSpeed(psDeviceNode),
+		         _EventToString(eEvent),
+		         RGXFWIF_ROUND_TO_KHZ(ui32PrevCoreClockSpeed),
+		         RGXFWIF_ROUND_TO_KHZ(ui32EstCoreClockSpeed),
+		         psGpuDVFSTable->ui64CalibrationOSTimediff));
+	}
+
+	/* Reset time deltas to avoid recalibrating the same frequency over and over again */
+	psGpuDVFSTable->ui64CalibrationCRTimediff = 0;
+	psGpuDVFSTable->ui64CalibrationOSTimediff = 0;
+
+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY)
+	/* Update tracking history */
+	{
+		GPU_FREQ_TRACKING_HISTORY *psTrackingHistory;
+
+		psTrackingHistory = &psGpuDVFSTable->asTrackingHistory[psGpuDVFSTable->ui32HistoryIndex];
+		psTrackingHistory->ui32EstCoreClockSpeed = ui32EstCoreClockSpeed;
+		psGpuDVFSTable->ui32HistoryIndex =
+			(psGpuDVFSTable->ui32HistoryIndex + 1) % RGX_GPU_FREQ_TRACKING_SIZE;
+	}
+#endif
+
+#else
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(psGpuDVFSTable);
+	PVR_UNREFERENCED_PARAMETER(eEvent);
+#endif
+}
+
+void RGXTimeCorrBegin(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent)
+{
+	PVRSRV_DEVICE_NODE  *psDeviceNode   = hDevHandle;
+	PVRSRV_RGXDEV_INFO  *psDevInfo      = psDeviceNode->pvDevice;
+	RGX_GPU_DVFS_TABLE  *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+	_RGXGPUFreqCalibrationPeriodStart(psDeviceNode, psGpuDVFSTable);
+	_RGXMakeTimeCorrData(psDeviceNode, eEvent);
+}
+
+void RGXTimeCorrEnd(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent)
+{
+	PVRSRV_DEVICE_NODE  *psDeviceNode   = hDevHandle;
+	PVRSRV_RGXDEV_INFO  *psDevInfo      = psDeviceNode->pvDevice;
+	RGX_GPU_DVFS_TABLE  *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+	_RGXGPUFreqCalibrationPeriodStop(psDeviceNode, psGpuDVFSTable);
+
+	if (psGpuDVFSTable->ui64CalibrationOSTimediff >= psGpuDVFSTable->ui32CalibrationPeriod)
+	{
+		_RGXGPUFreqCalibrationCalculate(psDeviceNode, psGpuDVFSTable, eEvent);
+	}
+}
+
+void RGXTimeCorrRestartPeriodic(IMG_HANDLE hDevHandle)
+{
+	PVRSRV_DEVICE_NODE     *psDeviceNode   = hDevHandle;
+	PVRSRV_RGXDEV_INFO     *psDevInfo      = psDeviceNode->pvDevice;
+	RGX_GPU_DVFS_TABLE     *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+	IMG_UINT64             ui64TimeNow     = RGXTimeCorrGetClockus64();
+	PVRSRV_DEV_POWER_STATE ePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT;
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+	if (psDevInfo->psGpuDVFSTable == NULL)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: Required data not initialised yet", __func__));
+		return;
+	}
+
+	/* Check if it's the right time to recalibrate the GPU clock frequency */
+	if ((ui64TimeNow - psGpuDVFSTable->ui64CalibrationOSTimestamp) < psGpuDVFSTable->ui32CalibrationPeriod) return;
+
+	/* Try to acquire the powerlock, if not possible then don't wait */
+	if (!OSTryLockAcquire(psDeviceNode->hPowerLock)) return;
+
+	/* If the GPU is off then we can't do anything */
+	PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+	if (ePowerState != PVRSRV_DEV_POWER_STATE_ON)
+	{
+		PVRSRVPowerUnlock(psDeviceNode);
+		return;
+	}
+
+	/* All checks passed, we can calibrate and correlate */
+	RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_PERIODIC);
+	RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_PERIODIC);
+
+	PVRSRVPowerUnlock(psDeviceNode);
+}
+
+/*
+	RGXTimeCorrGetClockSource
+*/
+RGXTIMECORR_CLOCK_TYPE RGXTimeCorrGetClockSource(void)
+{
+	return g_ui32ClockSource;
+}
+
+/*
+	RGXTimeCorrSetClockSource
+*/
+PVRSRV_ERROR RGXTimeCorrSetClockSource(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                       RGXTIMECORR_CLOCK_TYPE eClockType)
+{
+	return _SetClock(psDeviceNode, NULL, eClockType);
+}
+
+PVRSRV_ERROR
+PVRSRVRGXCurrentTime(CONNECTION_DATA    * psConnection,
+                     PVRSRV_DEVICE_NODE * psDeviceNode,
+                     IMG_UINT64         * pui64Time)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+	*pui64Time = RGXTimeCorrGetClockns64();
+
+	return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (rgxtimecorr.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxtimecorr.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxtimecorr.h
new file mode 100644
index 0000000..8714b4c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxtimecorr.h
@@ -0,0 +1,266 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX time correlation and calibration header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX time correlation and calibration routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXTIMECORR_H__)
+#define __RGXTIMECORR_H__
+
+#include "img_types.h"
+#include "device.h"
+#include "osfunc.h"
+#include "connection_server.h"
+
+typedef enum
+{
+	RGXTIMECORR_CLOCK_MONO,
+	RGXTIMECORR_CLOCK_MONO_RAW,
+	RGXTIMECORR_CLOCK_SCHED,
+
+	RGXTIMECORR_CLOCK_LAST
+} RGXTIMECORR_CLOCK_TYPE;
+
+typedef enum
+{
+	RGXTIMECORR_EVENT_POWER,
+	RGXTIMECORR_EVENT_DVFS,
+	RGXTIMECORR_EVENT_PERIODIC,
+	RGXTIMECORR_EVENT_CLOCK_CHANGE
+} RGXTIMECORR_EVENT;
+
+/*
+ * Calibrated GPU frequencies are rounded to the nearest multiple of 1 KHz
+ * before use, to reduce the noise introduced by calculations done with
+ * imperfect operands (correlated timers not sampled at exactly the same
+ * time, GPU CR timer incrementing only once every 256 GPU cycles).
+ * This also helps reducing the variation between consecutive calculations.
+ */
+#define RGXFWIF_CONVERT_TO_KHZ(freq)   (((freq) + 500) / 1000)
+#define RGXFWIF_ROUND_TO_KHZ(freq)    ((((freq) + 500) / 1000) * 1000)
+
+
+/*
+ * Use this macro to get a more realistic GPU core clock speed than the one
+ * given by the upper layers (used when doing GPU frequency calibration)
+ */
+#define RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(deltacr_us, deltaos_us, remainder) \
+    OSDivide64((deltacr_us) * 256000000, (deltaos_us), &(remainder))
+
+
+/*!
+******************************************************************************
+
+ @Function    RGXTimeCorrGetConversionFactor
+
+ @Description Generate constant used to convert a GPU time difference into
+              an OS time difference (for more info see rgx_fwif_km.h).
+
+ @Input       ui32ClockSpeed : GPU clock speed
+
+ @Return      0 on failure, conversion factor otherwise
+
+******************************************************************************/
+static inline IMG_UINT64 RGXTimeCorrGetConversionFactor(IMG_UINT32 ui32ClockSpeed)
+{
+	IMG_UINT32 ui32Remainder;
+
+	if (RGXFWIF_CONVERT_TO_KHZ(ui32ClockSpeed) == 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: GPU clock frequency %u is too low",
+				 __func__, ui32ClockSpeed));
+
+		return 0;
+	}
+
+	return OSDivide64r64(256000000ULL << RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT,
+	                     RGXFWIF_CONVERT_TO_KHZ(ui32ClockSpeed), &ui32Remainder);
+}
+
+/*!
+******************************************************************************
+
+ @Function    RGXTimeCorrBegin
+
+ @Description Generate new timer correlation data, and start tracking
+              the current GPU frequency.
+
+ @Input       hDevHandle : RGX Device Node
+ @Input       eEvent     : Event associated with the beginning of a timer
+                           correlation period
+
+ @Return      void
+
+******************************************************************************/
+void RGXTimeCorrBegin(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent);
+
+/*!
+******************************************************************************
+
+ @Function    RGXTimeCorrEnd
+
+ @Description Stop tracking the CPU and GPU timers, and if possible
+              recalculate the GPU frequency to a value which makes the timer
+              correlation data more accurate.
+
+ @Input       hDevHandle : RGX Device Node
+ @Input       eEvent     : Event associated with the end of a timer
+                           correlation period
+
+ @Return      void
+
+******************************************************************************/
+void RGXTimeCorrEnd(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent);
+
+/*!
+******************************************************************************
+
+ @Function    RGXTimeCorrRestartPeriodic
+
+ @Description Perform actions from RGXTimeCorrEnd and RGXTimeCorrBegin,
+              but only if enough time has passed since the last timer
+              correlation data was generated.
+
+ @Input       hDevHandle : RGX Device Node
+
+ @Return      void
+
+******************************************************************************/
+void RGXTimeCorrRestartPeriodic(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function    RGXTimeCorrGetClockns64
+
+ @Description Returns value of currently selected clock (in ns).
+
+ @Return      clock value from currently selected clock source
+
+******************************************************************************/
+IMG_UINT64 RGXTimeCorrGetClockns64(void);
+
+/*!
+******************************************************************************
+
+ @Function    RGXTimeCorrGetClockus64
+
+ @Description Returns value of currently selected clock (in us).
+
+ @Return      clock value from currently selected clock source
+
+******************************************************************************/
+IMG_UINT64 RGXTimeCorrGetClockus64(void);
+
+/*!
+******************************************************************************
+
+ @Function    RGXTimeCorrGetClockSource
+
+ @Description Returns currently selected clock source
+
+ @Return      clock source type
+
+******************************************************************************/
+RGXTIMECORR_CLOCK_TYPE RGXTimeCorrGetClockSource(void);
+
+/*!
+******************************************************************************
+
+ @Function    RGXTimeCorrSetClockSource
+
+ @Description Sets clock source for correlation data.
+
+ @Input       psDeviceNode : RGX Device Node
+ @Input       eClockType : clock source type
+
+ @Return      error code
+
+******************************************************************************/
+PVRSRV_ERROR RGXTimeCorrSetClockSource(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                       RGXTIMECORR_CLOCK_TYPE eClockType);
+
+/*!
+******************************************************************************
+
+ @Function    RGXTimeCorrInitAppHintCallbacks
+
+ @Description Initialise apphint callbacks for timer correlation
+              related apphints.
+
+ @Input       psDeviceNode : RGX Device Node
+
+ @Return      void
+
+******************************************************************************/
+void RGXTimeCorrInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+******************************************************************************
+
+ @Function    RGXGetTimeCorrData
+
+ @Description Get a number of the most recent time correlation data points
+
+ @Input       psDeviceNode : RGX Device Node
+ @Output      psTimeCorrs  : Output array of RGXFWIF_TIME_CORR elements
+                             for data to be written to
+ @Input       ui32NumOut   : Number of elements to be written out
+
+ @Return      void
+
+******************************************************************************/
+void RGXGetTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode,
+							RGXFWIF_TIME_CORR *psTimeCorrs,
+							IMG_UINT32 ui32NumOut);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXCurrentTime
+@Description    Returns the current state of the device timer
+@Input          psDevData  Device data.
+@Out            pui64Time
+@Return         PVRSRV_OK on success.
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXCurrentTime(CONNECTION_DATA    * psConnection,
+                     PVRSRV_DEVICE_NODE * psDeviceNode,
+                     IMG_UINT64         * pui64Time);
+
+#endif /* __RGXTIMECORR_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxtransfer.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxtransfer.c
new file mode 100644
index 0000000..e60e7ed
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxtransfer.c
@@ -0,0 +1,1709 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific transfer queue routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pdump_km.h"
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxtransfer.h"
+#include "rgx_tq_shared.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgx_memallocflags.h"
+#include "rgxhwperf.h"
+#include "ospvr_gputrace.h"
+#include "htbuffer.h"
+
+#include "pdump_km.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "rgx_bvnc_defs_km.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_TQ_UFO_DUMP	0
+
+//#define TRANSFER_CHECKPOINT_DEBUG 1
+
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+typedef struct {
+	DEVMEM_MEMDESC				*psFWContextStateMemDesc;
+	RGX_SERVER_COMMON_CONTEXT	*psServerCommonContext;
+	IMG_UINT32					ui32Priority;
+#if defined(SUPPORT_BUFFER_SYNC)
+	struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+} RGX_SERVER_TQ_3D_DATA;
+
+
+typedef struct {
+	RGX_SERVER_COMMON_CONTEXT	*psServerCommonContext;
+	IMG_UINT32					ui32Priority;
+#if defined(SUPPORT_BUFFER_SYNC)
+	struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+} RGX_SERVER_TQ_2D_DATA;
+
+struct _RGX_SERVER_TQ_CONTEXT_ {
+	PVRSRV_DEVICE_NODE			*psDeviceNode;
+	DEVMEM_MEMDESC				*psFWFrameworkMemDesc;
+	IMG_UINT32					ui32Flags;
+#define RGX_SERVER_TQ_CONTEXT_FLAGS_2D		(1<<0)
+#define RGX_SERVER_TQ_CONTEXT_FLAGS_3D		(1<<1)
+	RGX_SERVER_TQ_3D_DATA		s3DData;
+	RGX_SERVER_TQ_2D_DATA		s2DData;
+	PVRSRV_CLIENT_SYNC_PRIM		*psCleanupSync;
+	DLLIST_NODE					sListNode;
+	ATOMIC_T			hIntJobRef;
+	IMG_UINT32			ui32PDumpFlags;
+	/* per-prepare sync address lists */
+	SYNC_ADDR_LIST			asSyncAddrListFence[TQ_MAX_PREPARES_PER_SUBMIT];
+	SYNC_ADDR_LIST			asSyncAddrListUpdate[TQ_MAX_PREPARES_PER_SUBMIT];
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	POS_LOCK				hLock;
+#endif
+};
+
+/*
+	Static functions used by transfer context code
+*/
+static PVRSRV_ERROR _Create3DTransferContext(CONNECTION_DATA *psConnection,
+											 PVRSRV_DEVICE_NODE *psDeviceNode,
+											 DEVMEM_MEMDESC *psFWMemContextMemDesc,
+											 IMG_UINT32 ui32Priority,
+											 RGX_COMMON_CONTEXT_INFO *psInfo,
+											 RGX_SERVER_TQ_3D_DATA *ps3DData,
+											 IMG_UINT32 ui32CCBAllocSizeLog2,
+											 IMG_UINT32 ui32CCBMaxAllocSizeLog2)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR eError;
+	IMG_UINT	ui3DRegISPStateStoreSize = 0;
+	IMG_UINT	uiNumISPStoreRegs = 1; /* default value 1 expected */
+	/*
+		Allocate device memory for the firmware GPU context suspend state.
+		Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+	*/
+	PDUMPCOMMENT("Allocate RGX firmware TQ/3D context suspend state");
+
+	if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY))
+	{
+		uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode,
+													RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX);
+	}
+
+	/* Calculate the size of the 3DCTX ISP state */
+	ui3DRegISPStateStoreSize = sizeof(RGXFWIF_3DCTX_STATE) +
+			uiNumISPStoreRegs * sizeof(((RGXFWIF_3DCTX_STATE *)0)->au3DReg_ISP_STORE[0]);
+
+#if defined(SUPPORT_BUFFER_SYNC)
+	ps3DData->psBufferSyncContext =
+		pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
+									   "rogue-tq3d");
+	if (IS_ERR(ps3DData->psBufferSyncContext))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: failed to create buffer_sync context (err=%ld)",
+				 __func__, PTR_ERR(ps3DData->psBufferSyncContext)));
+
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto fail_buffer_sync_context_create;
+	}
+#endif
+
+	eError = DevmemFwAllocate(psDevInfo,
+							ui3DRegISPStateStoreSize,
+							RGX_FWCOMCTX_ALLOCFLAGS,
+							"FwTQ3DContext",
+							&ps3DData->psFWContextStateMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_contextswitchstate;
+	}
+
+	eError = FWCommonContextAllocate(psConnection,
+									 psDeviceNode,
+									 REQ_TYPE_TQ_3D,
+									 RGXFWIF_DM_3D,
+									 NULL,
+									 0,
+									 psFWMemContextMemDesc,
+									 ps3DData->psFWContextStateMemDesc,
+									 ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TQ3D_CCB_SIZE_LOG2,
+									 ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TQ3D_CCB_MAX_SIZE_LOG2,
+									 ui32Priority,
+									 psInfo,
+									 &ps3DData->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_contextalloc;
+	}
+
+
+	PDUMPCOMMENT("Dump 3D context suspend state buffer");
+	DevmemPDumpLoadMem(ps3DData->psFWContextStateMemDesc, 0, sizeof(RGXFWIF_3DCTX_STATE), PDUMP_FLAGS_CONTINUOUS);
+
+	ps3DData->ui32Priority = ui32Priority;
+	return PVRSRV_OK;
+
+fail_contextalloc:
+	DevmemFwFree(psDevInfo, ps3DData->psFWContextStateMemDesc);
+fail_contextswitchstate:
+#if defined(SUPPORT_BUFFER_SYNC)
+	pvr_buffer_sync_context_destroy(ps3DData->psBufferSyncContext);
+	ps3DData->psBufferSyncContext = NULL;
+fail_buffer_sync_context_create:
+#endif
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+static PVRSRV_ERROR _Create2DTransferContext(CONNECTION_DATA *psConnection,
+											 PVRSRV_DEVICE_NODE *psDeviceNode,
+											 DEVMEM_MEMDESC *psFWMemContextMemDesc,
+											 IMG_UINT32 ui32Priority,
+											 RGX_COMMON_CONTEXT_INFO *psInfo,
+											 RGX_SERVER_TQ_2D_DATA *ps2DData,
+											 IMG_UINT32 ui32CCBAllocSizeLog2,
+											 IMG_UINT32 ui32CCBMaxAllocSizeLog2)
+{
+	PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+	ps2DData->psBufferSyncContext =
+		pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
+									   "rogue-tqtla");
+	if (IS_ERR(ps2DData->psBufferSyncContext))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: failed to create buffer_sync context (err=%ld)",
+				 __func__, PTR_ERR(ps2DData->psBufferSyncContext)));
+
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto fail_buffer_sync_context_create;
+	}
+#endif
+
+	eError = FWCommonContextAllocate(psConnection,
+									 psDeviceNode,
+									 REQ_TYPE_TQ_2D,
+									 RGXFWIF_DM_2D,
+									 NULL,
+									 0,
+									 psFWMemContextMemDesc,
+									 NULL,
+									 ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TQ2D_CCB_SIZE_LOG2,
+									 ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TQ2D_CCB_MAX_SIZE_LOG2,
+									 ui32Priority,
+									 psInfo,
+									 &ps2DData->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_contextalloc;
+	}
+
+	ps2DData->ui32Priority = ui32Priority;
+	return PVRSRV_OK;
+
+fail_contextalloc:
+#if defined(SUPPORT_BUFFER_SYNC)
+	pvr_buffer_sync_context_destroy(ps2DData->psBufferSyncContext);
+	ps2DData->psBufferSyncContext = NULL;
+fail_buffer_sync_context_create:
+#endif
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+static PVRSRV_ERROR _Destroy2DTransferContext(RGX_SERVER_TQ_2D_DATA *ps2DData,
+											  PVRSRV_DEVICE_NODE *psDeviceNode,
+											  PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync,
+											  IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+											  ps2DData->psServerCommonContext,
+											  psCleanupSync,
+											  RGXFWIF_DM_2D,
+											  ui32PDumpFlags);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				 __func__,
+				 PVRSRVGetErrorString(eError)));
+		return eError;
+	}
+
+	/* ... it has so we can free it's resources */
+	FWCommonContextFree(ps2DData->psServerCommonContext);
+	ps2DData->psServerCommonContext = NULL;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+	pvr_buffer_sync_context_destroy(ps2DData->psBufferSyncContext);
+	ps2DData->psBufferSyncContext = NULL;
+#endif
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _Destroy3DTransferContext(RGX_SERVER_TQ_3D_DATA *ps3DData,
+											  PVRSRV_DEVICE_NODE *psDeviceNode,
+											  PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync,
+											  IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+											  ps3DData->psServerCommonContext,
+											  psCleanupSync,
+											  RGXFWIF_DM_3D,
+											  ui32PDumpFlags);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				 __func__,
+				 PVRSRVGetErrorString(eError)));
+		return eError;
+	}
+
+	/* ... it has so we can free it's resources */
+	DevmemFwFree(psDeviceNode->pvDevice, ps3DData->psFWContextStateMemDesc);
+	FWCommonContextFree(ps3DData->psServerCommonContext);
+	ps3DData->psServerCommonContext = NULL;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+	pvr_buffer_sync_context_destroy(ps3DData->psBufferSyncContext);
+	ps3DData->psBufferSyncContext = NULL;
+#endif
+
+	return PVRSRV_OK;
+}
+
+
+/*
+ * PVRSRVCreateTransferContextKM
+ */
+PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA		*psConnection,
+										   PVRSRV_DEVICE_NODE		*psDeviceNode,
+										   IMG_UINT32				ui32Priority,
+										   IMG_UINT32				ui32FrameworkCommandSize,
+										   IMG_PBYTE				pabyFrameworkCommand,
+										   IMG_HANDLE				hMemCtxPrivData,
+										   IMG_UINT32				ui32PackedCCBSizeU8888,
+										   RGX_SERVER_TQ_CONTEXT	**ppsTransferContext)
+{
+	RGX_SERVER_TQ_CONTEXT	*psTransferContext;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	DEVMEM_MEMDESC			*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	RGX_COMMON_CONTEXT_INFO	sInfo;
+	PVRSRV_ERROR			eError = PVRSRV_OK;
+
+	/* Allocate the server side structure */
+	*ppsTransferContext = NULL;
+	psTransferContext = OSAllocZMem(sizeof(*psTransferContext));
+	if (psTransferContext == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	eError = OSLockCreate(&psTransferContext->hLock);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+									__func__,
+									PVRSRVGetErrorString(eError)));
+		goto fail_createlock;
+	}
+#endif
+
+	psTransferContext->psDeviceNode = psDeviceNode;
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+						   &psTransferContext->psCleanupSync,
+						   "transfer context cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate cleanup sync (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto fail_syncalloc;
+	}
+
+	/*
+	 * Create the FW framework buffer
+	 */
+	eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+										&psTransferContext->psFWFrameworkMemDesc,
+										ui32FrameworkCommandSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate firmware GPU framework state (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto fail_frameworkcreate;
+	}
+
+	/* Copy the Framework client data into the framework buffer */
+	eError = PVRSRVRGXFrameworkCopyCommand(psTransferContext->psFWFrameworkMemDesc,
+										   pabyFrameworkCommand,
+										   ui32FrameworkCommandSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to populate the framework buffer (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+		goto fail_frameworkcopy;
+	}
+
+	sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc;
+
+	eError = _Create3DTransferContext(psConnection,
+									  psDeviceNode,
+									  psFWMemContextMemDesc,
+									  ui32Priority,
+									  &sInfo,
+									  &psTransferContext->s3DData,
+									  U32toU8_Unpack3(ui32PackedCCBSizeU8888),
+									  U32toU8_Unpack4(ui32PackedCCBSizeU8888));
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_3dtransfercontext;
+	}
+	psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_3D;
+
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))
+	{
+		eError = _Create2DTransferContext(psConnection,
+										  psDeviceNode,
+										  psFWMemContextMemDesc,
+										  ui32Priority,
+										  &sInfo,
+										  &psTransferContext->s2DData,
+									  	  U32toU8_Unpack1(ui32PackedCCBSizeU8888),
+										  U32toU8_Unpack2(ui32PackedCCBSizeU8888));
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_2dtransfercontext;
+		}
+		psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_2D;
+	}
+
+	{
+		PVRSRV_RGXDEV_INFO			*psDevInfo = psDeviceNode->pvDevice;
+
+		OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock);
+		dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode));
+		OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock);
+		*ppsTransferContext = psTransferContext;
+	}
+
+	*ppsTransferContext = psTransferContext;
+
+	return PVRSRV_OK;
+
+
+fail_2dtransfercontext:
+	if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))
+	{
+		_Destroy3DTransferContext(&psTransferContext->s3DData,
+								  psTransferContext->psDeviceNode,
+								  psTransferContext->psCleanupSync,
+								  psTransferContext->ui32PDumpFlags);
+	}
+
+fail_3dtransfercontext:
+fail_frameworkcopy:
+	DevmemFwFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+fail_frameworkcreate:
+	SyncPrimFree(psTransferContext->psCleanupSync);
+fail_syncalloc:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psTransferContext->hLock);
+fail_createlock:
+#endif
+	OSFreeMem(psTransferContext);
+	PVR_ASSERT(eError != PVRSRV_OK);
+	*ppsTransferContext = NULL;
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice;
+	IMG_UINT32 i;
+
+	/* remove node from list before calling destroy - as destroy, if successful
+	 * will invalidate the node
+	 * must be re-added if destroy fails
+	 */
+	OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock);
+	dllist_remove_node(&(psTransferContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock);
+
+	if ((psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) &&
+			(RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+	{
+		eError = _Destroy2DTransferContext(&psTransferContext->s2DData,
+										   psTransferContext->psDeviceNode,
+										   psTransferContext->psCleanupSync,
+										   PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_destroy2d;
+		}
+		/* We've freed the 2D context, don't try to free it again */
+		psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_2D;
+	}
+
+	if (psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D)
+	{
+		eError = _Destroy3DTransferContext(&psTransferContext->s3DData,
+										   psTransferContext->psDeviceNode,
+										   psTransferContext->psCleanupSync,
+										   PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_destroy3d;
+		}
+		/* We've freed the 3D context, don't try to free it again */
+		psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_3D;
+	}
+
+	/* free any resources within the per-prepare UFO address stores */
+	for (i = 0; i < TQ_MAX_PREPARES_PER_SUBMIT; i++)
+	{
+		SyncAddrListDeinit(&psTransferContext->asSyncAddrListFence[i]);
+		SyncAddrListDeinit(&psTransferContext->asSyncAddrListUpdate[i]);
+	}
+
+	DevmemFwFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+	SyncPrimFree(psTransferContext->psCleanupSync);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psTransferContext->hLock);
+#endif
+
+	OSFreeMem(psTransferContext);
+
+	return PVRSRV_OK;
+
+fail_destroy3d:
+
+fail_destroy2d:
+	OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock);
+	dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock);
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*
+ * PVRSRVSubmitTQ3DKickKM
+ */
+PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT	*psTransferContext,
+									   IMG_UINT32				ui32ClientCacheOpSeqNum,
+									   IMG_UINT32				ui32PrepareCount,
+									   IMG_UINT32				*paui32ClientFenceCount,
+									   SYNC_PRIMITIVE_BLOCK		***papauiClientFenceUFOSyncPrimBlock,
+									   IMG_UINT32				**papaui32ClientFenceSyncOffset,
+									   IMG_UINT32				**papaui32ClientFenceValue,
+									   IMG_UINT32				*paui32ClientUpdateCount,
+									   SYNC_PRIMITIVE_BLOCK		***papauiClientUpdateUFOSyncPrimBlock,
+									   IMG_UINT32				**papaui32ClientUpdateSyncOffset,
+									   IMG_UINT32				**papaui32ClientUpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+									   IMG_UINT32				*paui32ServerSyncCount,
+									   IMG_UINT32				**papaui32ServerSyncFlags,
+									   SERVER_SYNC_PRIMITIVE	***papapsServerSyncs,
+#endif
+									   PVRSRV_FENCE				iCheckFence,
+									   PVRSRV_TIMELINE			i2DUpdateTimeline,
+									   PVRSRV_FENCE				*pi2DUpdateFence,
+									   PVRSRV_TIMELINE			i3DUpdateTimeline,
+									   PVRSRV_FENCE				*pi3DUpdateFence,
+									   IMG_CHAR					szFenceName[32],
+									   IMG_UINT32				*paui32FWCommandSize,
+									   IMG_UINT8				**papaui8FWCommand,
+									   IMG_UINT32				*pui32TQPrepareFlags,
+									   IMG_UINT32				ui32ExtJobRef,
+									   IMG_UINT32				ui32SyncPMRCount,
+									   IMG_UINT32				*paui32SyncPMRFlags,
+									   PMR						**ppsSyncPMRs)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelper;
+	RGX_CCB_CMD_HELPER_DATA *pas2DCmdHelper;
+	IMG_UINT32 ui323DCmdCount = 0;
+	IMG_UINT32 ui322DCmdCount = 0;
+	IMG_UINT32 ui323DCmdLast = 0;
+	IMG_UINT32 ui322DCmdLast = 0;
+	IMG_UINT32 ui323DCmdOffset = 0;
+	IMG_UINT32 ui322DCmdOffset = 0;
+	IMG_UINT32 ui32PDumpFlags = PDUMP_FLAGS_NONE;
+	IMG_UINT32 i;
+	IMG_UINT64               uiCheckFenceUID = 0;
+	IMG_UINT64               ui2DUpdateFenceUID = 0;
+	IMG_UINT64               ui3DUpdateFenceUID = 0;
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	PSYNC_CHECKPOINT ps2DUpdateSyncCheckpoint = NULL;
+	PSYNC_CHECKPOINT ps3DUpdateSyncCheckpoint = NULL;
+	PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+	IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+	IMG_UINT32 *pui322DIntAllocatedUpdateValues = NULL;
+	IMG_UINT32 *pui323DIntAllocatedUpdateValues = NULL;
+	PVRSRV_CLIENT_SYNC_PRIM *ps2DFenceTimelineUpdateSync = NULL;
+	PVRSRV_CLIENT_SYNC_PRIM *ps3DFenceTimelineUpdateSync = NULL;
+	IMG_UINT32 ui322DFenceTimelineUpdateValue = 0;
+	IMG_UINT32 ui323DFenceTimelineUpdateValue = 0;
+	void *pv2DUpdateFenceFinaliseData = NULL;
+	void *pv3DUpdateFenceFinaliseData = NULL;
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+#if defined(SUPPORT_BUFFER_SYNC)
+	PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
+	struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
+	PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
+	IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_ERROR eError2;
+	PVRSRV_FENCE i2DUpdateFence = PVRSRV_NO_FENCE;
+	PVRSRV_FENCE i3DUpdateFence = PVRSRV_NO_FENCE;
+	IMG_UINT32   ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
+	IMG_UINT32   ui32PreparesDone = 0;
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	if (i2DUpdateTimeline != PVRSRV_NO_TIMELINE && !pi2DUpdateFence)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	if (i3DUpdateTimeline != PVRSRV_NO_TIMELINE && !pi3DUpdateFence)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+#else /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+	if (i2DUpdateTimeline !=  PVRSRV_NO_TIMELINE|| i3DUpdateTimeline != PVRSRV_NO_TIMELINE)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Providing update timeline (%d, %d) in non-supporting driver",
+				 __func__, i2DUpdateTimeline, i3DUpdateTimeline));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	if (iCheckFence != PVRSRV_NO_FENCE)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Providing check fence (%d) in non-supporting driver",
+			__func__, iCheckFence));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+	/* Ensure the string is null-terminated (Required for safety) */
+	szFenceName[31] = '\0';
+
+	if ((ui32PrepareCount == 0) || (ui32PrepareCount > TQ_MAX_PREPARES_PER_SUBMIT))
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (ui32SyncPMRCount != 0)
+	{
+		if (!ppsSyncPMRs)
+		{
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+#if defined(SUPPORT_BUFFER_SYNC)
+		/* PMR sync is valid only when there is no batching */
+		if ((ui32PrepareCount != 1))
+#endif
+		{
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psTransferContext->hLock);
+#endif
+
+	/* We can't allocate the required amount of stack space on all consumer architectures */
+	pas3DCmdHelper = OSAllocMem(sizeof(*pas3DCmdHelper) * ui32PrepareCount);
+	if (pas3DCmdHelper == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc3dhelper;
+	}
+	pas2DCmdHelper = OSAllocMem(sizeof(*pas2DCmdHelper) * ui32PrepareCount);
+	if (pas2DCmdHelper == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc2dhelper;
+	}
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	if (iCheckFence != PVRSRV_NO_FENCE)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psDeviceNode->hSyncCheckpointContext));
+		/* Resolve the sync checkpoints that make up the input fence */
+		eError = SyncCheckpointResolveFence(psDeviceNode->hSyncCheckpointContext,
+											iCheckFence,
+											&ui32FenceSyncCheckpointCount,
+											&apsFenceSyncCheckpoints,
+											&uiCheckFenceUID);
+		if (eError != PVRSRV_OK)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __func__, eError));
+			goto fail_resolve_fencesync_input_fence;
+		}
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+		if (ui32FenceSyncCheckpointCount > 0)
+		{
+			IMG_UINT32 ii;
+			for (ii=0; ii<ui32FenceSyncCheckpointCount; ii++)
+			{
+				PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints +  ii);
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:    apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint));
+			}
+		}
+#endif
+	}
+#endif
+	/*
+		Ensure we do the right thing for server syncs which cross call boundaries
+	*/
+	for (i=0;i<ui32PrepareCount;i++)
+	{
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+		IMG_BOOL bHaveStartPrepare = pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_START;
+		IMG_BOOL bHaveEndPrepare = IMG_FALSE;
+
+		if (bHaveStartPrepare)
+		{
+			IMG_UINT32 k;
+			/*
+				We've at the start of a transfer operation (which might be made
+				up of multiple HW operations) so check if we also have then
+				end of the transfer operation in the batch
+			*/
+			for (k=i;k<ui32PrepareCount;k++)
+			{
+				if (pui32TQPrepareFlags[k] & TQ_PREP_FLAGS_END)
+				{
+					bHaveEndPrepare = IMG_TRUE;
+					break;
+				}
+			}
+
+			if (!bHaveEndPrepare)
+			{
+				/*
+					We don't have the complete command passed in this call
+					so drop the update request. When we get called again with
+					the last HW command in this transfer operation we'll do
+					the update at that point.
+				*/
+				for (k=0;k<paui32ServerSyncCount[i];k++)
+				{
+					papaui32ServerSyncFlags[i][k] &= ~PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE;
+				}
+			}
+		}
+#endif
+
+		if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 3D))
+		{
+			ui323DCmdLast++;
+		} else if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 2D) &&
+				(RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+		{
+			ui322DCmdLast++;
+		}
+	}
+
+	/*
+		Init the command helper commands for all the prepares
+	*/
+	for (i=0;i<ui32PrepareCount;i++)
+	{
+		RGX_CLIENT_CCB *psClientCCB;
+		RGX_SERVER_COMMON_CONTEXT *psServerCommonCtx;
+		IMG_CHAR *pszCommandName;
+		RGX_CCB_CMD_HELPER_DATA *psCmdHelper;
+		RGXFWIF_CCB_CMD_TYPE eType;
+		PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL;
+		PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL;
+		SYNC_ADDR_LIST *psSyncAddrListFence = &psTransferContext->asSyncAddrListFence[i];
+		SYNC_ADDR_LIST *psSyncAddrListUpdate = &psTransferContext->asSyncAddrListUpdate[i];
+		IMG_UINT32 ui32IntClientFenceCount = paui32ClientFenceCount[i];
+		IMG_UINT32 *paui32IntFenceValue = papaui32ClientFenceValue[i];
+		IMG_UINT32 ui32IntClientUpdateCount = paui32ClientUpdateCount[i];
+		IMG_UINT32 *paui32IntUpdateValue = papaui32ClientUpdateValue[i];
+#if defined(SUPPORT_BUFFER_SYNC)
+		struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+		PVRSRV_FENCE *piUpdateFence = NULL;
+		PVRSRV_TIMELINE	iUpdateTimeline = PVRSRV_NO_TIMELINE;
+		void **ppvUpdateFenceFinaliseData = NULL;
+		PSYNC_CHECKPOINT * ppsUpdateSyncCheckpoint = NULL;
+		PVRSRV_CLIENT_SYNC_PRIM **ppsFenceTimelineUpdateSync = NULL;
+		IMG_UINT32 *pui32FenceTimelineUpdateValue = NULL;
+		IMG_UINT32 **ppui32IntAllocatedUpdateValues = NULL;
+		IMG_BOOL bCheckFence = IMG_FALSE;
+		IMG_BOOL bUpdateFence = IMG_FALSE;
+		IMG_UINT64 *puiUpdateFenceUID = NULL;
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+		IMG_BOOL                bCCBStateOpen = IMG_FALSE;
+
+		if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 3D))
+		{
+			psServerCommonCtx = psTransferContext->s3DData.psServerCommonContext;
+			psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx);
+			pszCommandName = "TQ-3D";
+			psCmdHelper = &pas3DCmdHelper[ui323DCmdCount++];
+			eType = RGXFWIF_CCB_CMD_TYPE_TQ_3D;
+#if defined(SUPPORT_BUFFER_SYNC)
+			psBufferSyncContext = psTransferContext->s3DData.psBufferSyncContext;
+#endif
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+			bCheckFence = ui323DCmdCount == 1;
+			bUpdateFence = ui323DCmdCount == ui323DCmdLast
+				&& i3DUpdateTimeline != PVRSRV_NO_TIMELINE;
+
+			if (bUpdateFence)
+			{
+				piUpdateFence = &i3DUpdateFence;
+				iUpdateTimeline = i3DUpdateTimeline;
+				ppvUpdateFenceFinaliseData = &pv3DUpdateFenceFinaliseData;
+				ppsUpdateSyncCheckpoint = &ps3DUpdateSyncCheckpoint;
+				ppsFenceTimelineUpdateSync = &ps3DFenceTimelineUpdateSync;
+				pui32FenceTimelineUpdateValue = &ui323DFenceTimelineUpdateValue;
+				ppui32IntAllocatedUpdateValues = &pui323DIntAllocatedUpdateValues;
+				puiUpdateFenceUID = &ui3DUpdateFenceUID;
+			}
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+		}
+		else if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 2D) &&
+				(RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+		{
+			psServerCommonCtx = psTransferContext->s2DData.psServerCommonContext;
+			psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx);
+			pszCommandName = "TQ-2D";
+			psCmdHelper = &pas2DCmdHelper[ui322DCmdCount++];
+			eType = RGXFWIF_CCB_CMD_TYPE_TQ_2D;
+#if defined(SUPPORT_BUFFER_SYNC)
+			psBufferSyncContext = psTransferContext->s2DData.psBufferSyncContext;
+#endif
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+			bCheckFence = ui322DCmdCount == 1;
+			bUpdateFence = ui322DCmdCount == ui322DCmdLast
+				&& i2DUpdateTimeline != PVRSRV_NO_TIMELINE;
+
+			if (bUpdateFence)
+			{
+				piUpdateFence = &i2DUpdateFence;
+				iUpdateTimeline = i2DUpdateTimeline;
+				ppvUpdateFenceFinaliseData = &pv2DUpdateFenceFinaliseData;
+				ppsUpdateSyncCheckpoint = &ps2DUpdateSyncCheckpoint;
+				ppsFenceTimelineUpdateSync = &ps2DFenceTimelineUpdateSync;
+				pui32FenceTimelineUpdateValue = &ui322DFenceTimelineUpdateValue;
+				ppui32IntAllocatedUpdateValues = &pui322DIntAllocatedUpdateValues;
+				puiUpdateFenceUID = &ui2DUpdateFenceUID;
+			}
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+		}
+		else
+		{
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto fail_prepare_loop;
+		}
+
+		if (i == 0)
+		{
+			ui32PDumpFlags = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) != 0) ? PDUMP_FLAGS_CONTINUOUS : PDUMP_FLAGS_NONE;
+			PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+					"%s Command Server Submit on FWCtx %08x", pszCommandName, FWCommonContextGetFWAddress(psServerCommonCtx).ui32Addr);
+			psTransferContext->ui32PDumpFlags |= ui32PDumpFlags;
+		}
+		else
+		{
+			IMG_UINT32 ui32NewPDumpFlags = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) != 0) ? PDUMP_FLAGS_CONTINUOUS : PDUMP_FLAGS_NONE;
+			if (ui32NewPDumpFlags != ui32PDumpFlags)
+			{
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				PVR_DPF((PVR_DBG_ERROR, "%s: Mixing of continuous and non-continuous command in a batch is not permitted", __func__));
+				goto fail_prepare_loop;
+			}
+		}
+
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psTransferContext->sSyncAddrListFence, %d fences)", __func__, ui32IntClientFenceCount));
+		eError = SyncAddrListPopulate(psSyncAddrListFence,
+										ui32IntClientFenceCount,
+										papauiClientFenceUFOSyncPrimBlock[i],
+										papaui32ClientFenceSyncOffset[i]);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_prepare_loop;
+		}
+		if (!pauiIntFenceUFOAddress)
+		{
+			pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs;
+		}
+
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psTransferContext->asSyncAddrListUpdate[], %d updates)", __func__, ui32IntClientUpdateCount));
+		eError = SyncAddrListPopulate(psSyncAddrListUpdate,
+										ui32IntClientUpdateCount,
+										papauiClientUpdateUFOSyncPrimBlock[i],
+										papaui32ClientUpdateSyncOffset[i]);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_prepare_loop;
+		}
+		if (!pauiIntUpdateUFOAddress)
+		{
+			pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs;
+		}
+
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   (after sync prims) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+		if (ui32SyncPMRCount)
+		{
+#if defined(SUPPORT_BUFFER_SYNC)
+			int err;
+
+			CHKPT_DBG((PVR_DBG_ERROR, "%s:   Calling pvr_buffer_sync_resolve_and_create_fences", __func__));
+			err = pvr_buffer_sync_resolve_and_create_fences(psBufferSyncContext,
+			                                                psTransferContext->psDeviceNode->hSyncCheckpointContext,
+			                                                ui32SyncPMRCount,
+			                                                ppsSyncPMRs,
+			                                                paui32SyncPMRFlags,
+			                                                &ui32BufferFenceSyncCheckpointCount,
+			                                                &apsBufferFenceSyncCheckpoints,
+			                                                &psBufferUpdateSyncCheckpoint,
+			                                                &psBufferSyncData);
+			if (err)
+			{
+				switch (err)
+				{
+					case -EINTR:
+						eError = PVRSRV_ERROR_RETRY;
+						break;
+					case -ENOMEM:
+						eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+						break;
+					default:
+						eError = PVRSRV_ERROR_INVALID_PARAMS;
+						break;
+				}
+
+				if (eError != PVRSRV_ERROR_RETRY)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   pvr_buffer_sync_resolve_and_create_fences failed (%s)", __func__, PVRSRVGetErrorString(eError)));
+				}
+				goto fail_resolve_buffersync_input_fence;
+			}
+
+			/* Append buffer sync fences */
+			if (ui32BufferFenceSyncCheckpointCount > 0)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d buffer sync checkpoints to TQ Fence (psSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)psSyncAddrListFence , (void*)pauiIntFenceUFOAddress));
+				SyncAddrListAppendAndDeRefCheckpoints(psSyncAddrListFence,
+													  ui32BufferFenceSyncCheckpointCount,
+													  apsBufferFenceSyncCheckpoints);
+				if (!pauiIntFenceUFOAddress)
+				{
+					pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs;
+				}
+				ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount;
+			}
+
+			if (psBufferUpdateSyncCheckpoint)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 buffer sync checkpoint<%p> to TQ Update (&psTransferContext->asSyncAddrListUpdate[i]=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)psBufferUpdateSyncCheckpoint, (void*)psSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress));
+				/* Append the update (from output fence) */
+				SyncAddrListAppendCheckpoints(psSyncAddrListUpdate,
+											  1,
+											  &psBufferUpdateSyncCheckpoint);
+				if (!pauiIntUpdateUFOAddress)
+				{
+					pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs;
+				}
+				ui32IntClientUpdateCount++;
+			}
+			CHKPT_DBG((PVR_DBG_ERROR, "%s:   (after buffer_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+#else /* defined(SUPPORT_BUFFER_SYNC) */
+			PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount));
+			PVR_DPF((PVR_DBG_ERROR, "%s:   <--EXIT(%d)", __func__, PVRSRV_ERROR_INVALID_PARAMS));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockRelease(psTransferContext->hLock);
+#endif
+			return PVRSRV_ERROR_INVALID_PARAMS;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+		}
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+		/* Create the output fence (if required) */
+		if (bUpdateFence)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (piUpdateFence=%p, iUpdateTimeline=%d,  psTranserContext->psDeviceNode->hSyncCheckpointContext=<%p>)", __func__, piUpdateFence, iUpdateTimeline, (void*)psDeviceNode->hSyncCheckpointContext));
+			eError = SyncCheckpointCreateFence(psDeviceNode,
+			                                   szFenceName,
+			                                   iUpdateTimeline,
+			                                   psDeviceNode->hSyncCheckpointContext,
+			                                   piUpdateFence,
+			                                   puiUpdateFenceUID,
+			                                   ppvUpdateFenceFinaliseData,
+			                                   ppsUpdateSyncCheckpoint,
+			                                   (void*)ppsFenceTimelineUpdateSync,
+			                                   pui32FenceTimelineUpdateValue);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s:   SyncCheckpointCreateFence failed (%s)",
+						__func__,
+						PVRSRVGetErrorString(eError)));
+				goto fail_prepare_loop;
+			}
+
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: returned from SyncCheckpointCreateFence (piUpdateFence=%p)", __func__, piUpdateFence));
+
+			/* Append the sync prim update for the timeline (if required) */
+			if (*ppsFenceTimelineUpdateSync)
+			{
+				IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+				/* Allocate memory to hold the list of update values (including our timeline update) */
+				*ppui32IntAllocatedUpdateValues = OSAllocMem(sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+				if (!*ppui32IntAllocatedUpdateValues)
+				{
+					/* Failed to allocate memory */
+					eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+					goto fail_prepare_loop;
+				}
+				OSCachedMemSet(*ppui32IntAllocatedUpdateValues, 0xbb, sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+#if defined(SUPPORT_BUFFER_SYNC)
+				if (psBufferUpdateSyncCheckpoint)
+				{
+					/* Copy the update values into the new memory, then append our timeline update value */
+					OSCachedMemCopy(*ppui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount-1));
+					pui32TimelineUpdateWp = *ppui32IntAllocatedUpdateValues + (ui32IntClientUpdateCount-1);
+				}
+				else
+#endif
+				{
+					/* Copy the update values into the new memory, then append our timeline update value */
+					OSCachedMemCopy(*ppui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(**ppui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+					pui32TimelineUpdateWp = *ppui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+				}
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: Appending the additional update value 0x%x)", __func__, *pui32FenceTimelineUpdateValue));
+				/* Now set the additional update value */
+				*pui32TimelineUpdateWp = *pui32FenceTimelineUpdateValue;
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+				if (ui32IntClientUpdateCount > 0)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)*ppui32IntAllocatedUpdateValues;
+
+					for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: *ppui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+				/* Now append the timeline sync prim addr to the transfer context update list */
+				SyncAddrListAppendSyncPrim(psSyncAddrListUpdate,
+				                           *ppsFenceTimelineUpdateSync);
+				ui32IntClientUpdateCount++;
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+				if (ui32IntClientUpdateCount > 0)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)*ppui32IntAllocatedUpdateValues;
+
+					for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: *ppui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+				/* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: set paui32IntUpdateValue<%p> to point to *ppui32IntAllocatedUpdateValues<%p>", __func__, (void*)paui32IntUpdateValue, (void*)*ppui32IntAllocatedUpdateValues));
+				paui32IntUpdateValue = *ppui32IntAllocatedUpdateValues;
+			}
+		}
+
+		if (bCheckFence && ui32FenceSyncCheckpointCount)
+		{
+			/* Append the checks (from input fence) */
+			if (ui32FenceSyncCheckpointCount > 0)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d sync checkpoints to TQ Fence (psSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)psSyncAddrListFence));
+				SyncAddrListAppendCheckpoints(psSyncAddrListFence,
+											  ui32FenceSyncCheckpointCount,
+											  apsFenceSyncCheckpoints);
+				if (!pauiIntFenceUFOAddress)
+				{
+					pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs;
+				}
+				ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+			}
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+			if (ui32IntClientFenceCount > 0)
+			{
+				IMG_UINT32 iii;
+				IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+				for (iii=0; iii<ui32IntClientFenceCount; iii++)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: psSyncAddrListFence->pasFWAddrs[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+					pui32Tmp++;
+				}
+			}
+#endif
+		}
+		if (bUpdateFence && *ppsUpdateSyncCheckpoint)
+		{
+			/* Append the update (from output fence) */
+			CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 sync checkpoint to TQ Update (psSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->asSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress));
+			SyncAddrListAppendCheckpoints(psSyncAddrListUpdate,
+										  1,
+										  ppsUpdateSyncCheckpoint);
+			if (!pauiIntUpdateUFOAddress)
+			{
+				pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs;
+			}
+			ui32IntClientUpdateCount++;
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+			{
+				IMG_UINT32 iii;
+				IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress;
+
+				for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+					pui32Tmp++;
+				}
+			}
+#endif
+		}
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if (ENABLE_TQ_UFO_DUMP == 1)
+		PVR_DPF((PVR_DBG_ERROR, "%s: dumping TQ fence/updates syncs...", __func__));
+		{
+			IMG_UINT32 ii;
+			PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+			IMG_UINT32 *pui32TmpIntFenceValue = paui32IntFenceValue;
+			PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+			IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+			/* Dump Fence syncs and Update syncs */
+			PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TQ fence syncs (&psTransferContext->asSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->asSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+			for (ii=0; ii<ui32IntClientFenceCount; ii++)
+			{
+				if (psTmpIntFenceUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr, *pui32TmpIntFenceValue, *pui32TmpIntFenceValue));
+					pui32TmpIntFenceValue++;
+				}
+				psTmpIntFenceUFOAddress++;
+			}
+			PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TQ update syncs (&psTransferContext->asSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->asSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+			for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+			{
+				if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+					pui32TmpIntUpdateValue++;
+				}
+				psTmpIntUpdateUFOAddress++;
+			}
+		}
+#endif
+
+		ui32PreparesDone++;
+
+		/*
+			Create the command helper data for this command
+		*/
+		eError = RGXCmdHelperInitCmdCCB(psClientCCB,
+		                                ui32IntClientFenceCount,
+		                                pauiIntFenceUFOAddress,
+		                                paui32IntFenceValue,
+		                                ui32IntClientUpdateCount,
+		                                pauiIntUpdateUFOAddress,
+		                                paui32IntUpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+		                                paui32ServerSyncCount[i],
+		                                papaui32ServerSyncFlags[i],
+		                                SYNC_FLAG_MASK_ALL,
+		                                papapsServerSyncs[i],
+#endif
+		                                paui32FWCommandSize[i],
+		                                papaui8FWCommand[i],
+		                                eType,
+		                                ui32ExtJobRef,
+		                                ui32IntJobRef,
+		                                ui32PDumpFlags,
+		                                NULL,
+		                                pszCommandName,
+		                                bCCBStateOpen,
+		                                psCmdHelper);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_prepare_loop;
+		}
+	}
+
+	/*
+		Acquire space for all the commands in one go
+	*/
+	if (ui323DCmdCount)
+	{
+		eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount,
+										   &pas3DCmdHelper[0]);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_cmdacquire;
+		}
+	}
+
+	if (ui322DCmdCount)
+	{
+		eError = RGXCmdHelperAcquireCmdCCB(ui322DCmdCount,
+										   &pas2DCmdHelper[0]);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_cmdacquire;
+		}
+	}
+
+	/*
+		We should acquire the kernel CCB(s) space here as the schedule could fail
+		and we would have to roll back all the syncs
+	*/
+
+	if (ui323DCmdCount)
+	{
+		ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext));
+		RGXCmdHelperReleaseCmdCCB(ui323DCmdCount,
+								  &pas3DCmdHelper[0],
+								  "TQ_3D",
+								  FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr);
+	}
+
+	if ((ui322DCmdCount) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+	{
+		ui322DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext));
+		RGXCmdHelperReleaseCmdCCB(ui322DCmdCount,
+								  &pas2DCmdHelper[0],
+								  "TQ_2D",
+								  FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr);
+	}
+
+	if (ui323DCmdCount)
+	{
+		RGXFWIF_KCCB_CMD s3DKCCBCmd;
+		IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr;
+
+		/* Construct the kernel 3D CCB command. */
+		s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+		s3DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext);
+		s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext));
+		s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+		s3DKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+		HTBLOGK(HTB_SF_MAIN_KICK_3D,
+				s3DKCCBCmd.uCmdData.sCmdKickData.psContext,
+				ui323DCmdOffset);
+		RGXSRV_HWPERF_ENQ(psTransferContext,
+		                  OSGetCurrentClientProcessIDKM(),
+		                  ui32FWCtx,
+		                  ui32ExtJobRef,
+		                  ui32IntJobRef,
+		                  RGX_HWPERF_KICK_TYPE_TQ3D,
+		                  iCheckFence,
+		                  i3DUpdateFence,
+		                  i3DUpdateTimeline,
+		                  uiCheckFenceUID,
+		                  ui3DUpdateFenceUID,
+		                  NO_DEADLINE,
+		                  NO_CYCEST);
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError2 = RGXScheduleCommand(psDevInfo,
+										RGXFWIF_DM_3D,
+										&s3DKCCBCmd,
+										ui32ClientCacheOpSeqNum,
+										ui32PDumpFlags);
+			if (eError2 != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+		PVRGpuTraceEnqueueEvent(psDeviceNode, ui32FWCtx, ui32ExtJobRef,
+		                        ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQ3D);
+	}
+
+	if ((ui322DCmdCount) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+	{
+		RGXFWIF_KCCB_CMD s2DKCCBCmd;
+		IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr;
+
+		/* Construct the kernel 2D CCB command. */
+		s2DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+		s2DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext);
+		s2DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext));
+		s2DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+		HTBLOGK(HTB_SF_MAIN_KICK_2D,
+				s2DKCCBCmd.uCmdData.sCmdKickData.psContext,
+				ui322DCmdOffset);
+		RGXSRV_HWPERF_ENQ(psTransferContext,
+		                  OSGetCurrentClientProcessIDKM(),
+		                  ui32FWCtx,
+		                  ui32ExtJobRef,
+		                  ui32IntJobRef,
+		                  RGX_HWPERF_KICK_TYPE_TQ2D,
+		                  iCheckFence,
+		                  i2DUpdateFence,
+		                  i2DUpdateTimeline,
+		                  uiCheckFenceUID,
+		                  ui2DUpdateFenceUID,
+		                  NO_DEADLINE,
+		                  NO_CYCEST);
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError2 = RGXScheduleCommand(psDevInfo,
+										RGXFWIF_DM_2D,
+										&s2DKCCBCmd,
+										ui32ClientCacheOpSeqNum,
+										ui32PDumpFlags);
+			if (eError2 != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+		PVRGpuTraceEnqueueEvent(psDeviceNode, ui32FWCtx, ui32ExtJobRef,
+		                        ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQ2D);
+	}
+
+	/*
+	 * Now check eError (which may have returned an error from our earlier calls
+	 * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+	 * so we check it now...
+	 */
+	if (eError != PVRSRV_OK )
+	{
+		goto fail_cmdacquire;
+	}
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+#if defined(NO_HARDWARE)
+	/* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+	if (ps2DUpdateSyncCheckpoint)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s:   Signalling TLA NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)ps2DUpdateSyncCheckpoint, SyncCheckpointGetId(ps2DUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(ps2DUpdateSyncCheckpoint)));
+		SyncCheckpointSignalNoHW(ps2DUpdateSyncCheckpoint);
+	}
+	if (ps2DFenceTimelineUpdateSync)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s:   Updating TLA NOHW sync prim<%p> to %d", __func__, (void*)ps2DFenceTimelineUpdateSync, ui322DFenceTimelineUpdateValue));
+		SyncPrimNoHwUpdate(ps2DFenceTimelineUpdateSync, ui322DFenceTimelineUpdateValue);
+	}
+	if (ps3DUpdateSyncCheckpoint)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s:   Signalling TQ3D NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)ps3DUpdateSyncCheckpoint, SyncCheckpointGetId(ps3DUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(ps3DUpdateSyncCheckpoint)));
+		SyncCheckpointSignalNoHW(ps3DUpdateSyncCheckpoint);
+	}
+	if (ps3DFenceTimelineUpdateSync)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s:   Updating TQ3D NOHW sync prim<%p> to %d", __func__, (void*)ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue));
+		SyncPrimNoHwUpdate(ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue);
+	}
+	SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined (NO_HARDWARE) */
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+	if (psBufferSyncData)
+	{
+		pvr_buffer_sync_kick_succeeded(psBufferSyncData);
+	}
+	if (apsBufferFenceSyncCheckpoints)
+	{
+		kfree(apsBufferFenceSyncCheckpoints);
+	}
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+	if (pi2DUpdateFence)
+	{
+		*pi2DUpdateFence = i2DUpdateFence;
+	}
+	if (pi3DUpdateFence)
+	{
+		*pi3DUpdateFence = i3DUpdateFence;
+	}
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	if (pv2DUpdateFenceFinaliseData && (i2DUpdateFence != PVRSRV_NO_FENCE))
+	{
+		SyncCheckpointFinaliseFence(psDeviceNode, i2DUpdateFence, pv2DUpdateFenceFinaliseData,
+		                            ps2DUpdateSyncCheckpoint, szFenceName);
+	}
+	if (pv3DUpdateFenceFinaliseData && (i3DUpdateFence != PVRSRV_NO_FENCE))
+	{
+		SyncCheckpointFinaliseFence(psDeviceNode, i3DUpdateFence, pv3DUpdateFenceFinaliseData,
+		                            ps3DUpdateSyncCheckpoint, szFenceName);
+	}
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+	OSFreeMem(pas2DCmdHelper);
+	OSFreeMem(pas3DCmdHelper);
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+	/* Free memory allocated to hold the internal list of update values */
+	if (pui322DIntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui322DIntAllocatedUpdateValues);
+		pui322DIntAllocatedUpdateValues = NULL;
+	}
+	if (pui323DIntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui323DIntAllocatedUpdateValues);
+		pui323DIntAllocatedUpdateValues = NULL;
+	}
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psTransferContext->hLock);
+#endif
+	return PVRSRV_OK;
+
+/*
+	No resources are created in this function so there is nothing to free
+	unless we had to merge syncs.
+	If we fail after the client CCB acquire there is still nothing to do
+	as only the client CCB release will modify the client CCB
+*/
+fail_cmdacquire:
+fail_prepare_loop:
+
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	for (i=0;i<ui32PreparesDone;i++)
+	{
+		SyncAddrListRollbackCheckpoints(psDeviceNode, &psTransferContext->asSyncAddrListFence[i]);
+		SyncAddrListRollbackCheckpoints(psDeviceNode, &psTransferContext->asSyncAddrListUpdate[i]);
+	}
+#if defined(SUPPORT_BUFFER_SYNC)
+	if (ui32PreparesDone > 0)
+	{
+		/* Prevent duplicate rollback in case of buffer sync. */
+		psBufferUpdateSyncCheckpoint = NULL;
+	}
+#endif
+
+	/* Free memory allocated to hold the internal list of update values */
+	if (pui322DIntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui322DIntAllocatedUpdateValues);
+		pui322DIntAllocatedUpdateValues = NULL;
+	}
+	if (pui323DIntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui323DIntAllocatedUpdateValues);
+		pui323DIntAllocatedUpdateValues = NULL;
+	}
+
+	if (i2DUpdateFence != PVRSRV_NO_FENCE)
+	{
+		SyncCheckpointRollbackFenceData(i2DUpdateFence, pv2DUpdateFenceFinaliseData);
+	}
+	if (i3DUpdateFence != PVRSRV_NO_FENCE)
+	{
+		SyncCheckpointRollbackFenceData(i3DUpdateFence, pv3DUpdateFenceFinaliseData);
+	}
+#endif
+#if defined(SUPPORT_BUFFER_SYNC)
+	if (psBufferUpdateSyncCheckpoint)
+	{
+		SyncAddrListRollbackCheckpoints(psDeviceNode, &psTransferContext->asSyncAddrListUpdate[0]);
+	}
+	if (psBufferSyncData)
+	{
+		pvr_buffer_sync_kick_failed(psBufferSyncData);
+	}
+	if (apsBufferFenceSyncCheckpoints)
+	{
+		kfree(apsBufferFenceSyncCheckpoints);
+	}
+fail_resolve_buffersync_input_fence:
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+fail_resolve_fencesync_input_fence:
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+	OSFreeMem(pas2DCmdHelper);
+fail_alloc2dhelper:
+	OSFreeMem(pas3DCmdHelper);
+fail_alloc3dhelper:
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psTransferContext->hLock);
+#endif
+	return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                   PVRSRV_DEVICE_NODE * psDevNode,
+												   RGX_SERVER_TQ_CONTEXT *psTransferContext,
+												   IMG_UINT32 ui32Priority)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psTransferContext->hLock);
+#endif
+
+	if ((psTransferContext->s2DData.ui32Priority != ui32Priority) &&
+			(RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+	{
+		eError = ContextSetPriority(psTransferContext->s2DData.psServerCommonContext,
+									psConnection,
+									psTransferContext->psDeviceNode->pvDevice,
+									ui32Priority,
+									RGXFWIF_DM_2D);
+		if (eError != PVRSRV_OK)
+		{
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 2D part of the transfercontext (%s)", __func__, PVRSRVGetErrorString(eError)));
+			}
+			goto fail_2dcontext;
+		}
+		psTransferContext->s2DData.ui32Priority = ui32Priority;
+	}
+
+	if (psTransferContext->s3DData.ui32Priority != ui32Priority)
+	{
+		eError = ContextSetPriority(psTransferContext->s3DData.psServerCommonContext,
+									psConnection,
+									psTransferContext->psDeviceNode->pvDevice,
+									ui32Priority,
+									RGXFWIF_DM_3D);
+		if (eError != PVRSRV_OK)
+		{
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 3D part of the transfercontext (%s)", __func__, PVRSRVGetErrorString(eError)));
+			}
+			goto fail_3dcontext;
+		}
+		psTransferContext->s3DData.ui32Priority = ui32Priority;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psTransferContext->hLock);
+#endif
+	return PVRSRV_OK;
+
+fail_3dcontext:
+
+fail_2dcontext:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psTransferContext->hLock);
+#endif
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+void DumpTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                           DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                           void *pvDumpDebugFile,
+                           IMG_UINT32 ui32VerbLevel)
+{
+	DLLIST_NODE *psNode, *psNext;
+
+	OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock);
+
+	dllist_foreach_node(&psDevInfo->sTransferCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_TQ_CONTEXT *psCurrentServerTransferCtx =
+			IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode);
+
+		if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) &&
+				(RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+		{
+			DumpFWCommonContextInfo(psCurrentServerTransferCtx->s2DData.psServerCommonContext,
+			                        pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+		}
+
+		if (psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D)
+		{
+			DumpFWCommonContextInfo(psCurrentServerTransferCtx->s3DData.psServerCommonContext,
+			                        pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+		}
+	}
+
+	OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	DLLIST_NODE *psNode, *psNext;
+	IMG_UINT32 ui32ContextBitMask = 0;
+
+	OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock);
+
+	dllist_foreach_node(&psDevInfo->sTransferCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_TQ_CONTEXT *psCurrentServerTransferCtx =
+			IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode);
+
+		if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) &&
+				(NULL != psCurrentServerTransferCtx->s2DData.psServerCommonContext) &&
+				(RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+		{
+			if (CheckStalledClientCommonContext(psCurrentServerTransferCtx->s2DData.psServerCommonContext, RGX_KICK_TYPE_DM_TQ2D) == PVRSRV_ERROR_CCCB_STALLED)
+			{
+				ui32ContextBitMask |= RGX_KICK_TYPE_DM_TQ2D;
+			}
+		}
+
+		if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D) && (NULL != psCurrentServerTransferCtx->s3DData.psServerCommonContext))
+		{
+			if ((CheckStalledClientCommonContext(psCurrentServerTransferCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_TQ3D) == PVRSRV_ERROR_CCCB_STALLED))
+			{
+				ui32ContextBitMask |= RGX_KICK_TYPE_DM_TQ3D;
+			}
+		}
+	}
+
+	OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock);
+	return ui32ContextBitMask;
+}
+
+/**************************************************************************//**
+ End of file (rgxtransfer.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxtransfer.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxtransfer.h
new file mode 100644
index 0000000..a0814d3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxtransfer.h
@@ -0,0 +1,156 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Transfer queue Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX Transfer queue Functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXTRANSFER_H__)
+#define __RGXTRANSFER_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+#include "sync_server.h"
+#include "connection_server.h"
+
+typedef struct _RGX_SERVER_TQ_CONTEXT_ RGX_SERVER_TQ_CONTEXT;
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXCreateTransferContextKM
+
+ @Description
+	Server-side implementation of RGXCreateTransferContext
+
+ @Input pvDeviceNode - device node
+
+FIXME fill this in
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA		*psConnection,
+										   PVRSRV_DEVICE_NODE		*psDeviceNode,
+										   IMG_UINT32				ui32Priority,
+										   IMG_UINT32				ui32FrameworkCommandSize,
+										   IMG_PBYTE				pabyFrameworkCommand,
+										   IMG_HANDLE				hMemCtxPrivData,
+										   IMG_UINT32				ui32PackedCCBSizeU8888,
+										   RGX_SERVER_TQ_CONTEXT	**ppsTransferContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXDestroyTransferContextKM
+
+ @Description
+	Server-side implementation of RGXDestroyTransferContext
+
+ @Input psTransferContext - Transfer context
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext);
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVSubmitTransferKM
+
+ @Description
+	Schedules one or more 2D or 3D HW commands on the firmware
+
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT	*psTransferContext,
+									IMG_UINT32				ui32ClientCacheOpSeqNum,
+									IMG_UINT32				ui32PrepareCount,
+									IMG_UINT32				*paui32ClientFenceCount,
+									SYNC_PRIMITIVE_BLOCK		***papauiClientFenceUFOSyncPrimBlock,
+									IMG_UINT32				**papaui32ClientFenceSyncOffset,
+									IMG_UINT32				**papaui32ClientFenceValue,
+									IMG_UINT32				*paui32ClientUpdateCount,
+									SYNC_PRIMITIVE_BLOCK		***papauiClientUpdateUFOSyncPrimBlock,
+									IMG_UINT32				**papaui32ClientUpdateSyncOffset,
+									IMG_UINT32				**papaui32ClientUpdateValue,
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+									IMG_UINT32				*paui32ServerSyncCount,
+									IMG_UINT32				**papaui32ServerSyncFlags,
+									SERVER_SYNC_PRIMITIVE	***papapsServerSyncs,
+#endif
+									PVRSRV_FENCE			iCheckFence,
+									PVRSRV_TIMELINE			i2DUpdateTimeline,
+									PVRSRV_FENCE			*pi2DUpdateFence,
+									PVRSRV_TIMELINE			i3DUpdateTimeline,
+									PVRSRV_FENCE			*pi3DUpdateFence,
+									IMG_CHAR				szFenceName[32],
+									IMG_UINT32				*paui32FWCommandSize,
+									IMG_UINT8				**papaui8FWCommand,
+									IMG_UINT32				*pui32TQPrepareFlags,
+									IMG_UINT32				ui32ExtJobRef,
+									IMG_UINT32				ui32SyncPMRCount,
+									IMG_UINT32				*paui32SyncPMRFlags,
+									PMR						**ppsSyncPMRs);
+
+PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                   PVRSRV_DEVICE_NODE * psDevNode,
+												   RGX_SERVER_TQ_CONTEXT *psTransferContext,
+												   IMG_UINT32 ui32Priority);
+
+/* Debug - Dump debug info of transfer contexts on this device */
+void DumpTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                           DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                           void *pvDumpDebugFile,
+                           IMG_UINT32 ui32VerbLevel);
+
+/* Debug/Watchdog - check if client transfer contexts are stalled */
+IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* __RGXTRANSFER_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxutils.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxutils.c
new file mode 100644
index 0000000..f27babc
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxutils.c
@@ -0,0 +1,221 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific utility routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgx_fwif_km.h"
+#include "pdump_km.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "power.h"
+#include "pvrsrv.h"
+#include "sync_internal.h"
+#include "rgxfwutils.h"
+
+
+PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+	const void *pvPrivateData,
+	IMG_UINT32 *pui32State)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+	if (!psDeviceNode)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	psDevInfo = psDeviceNode->pvDevice;
+	*pui32State = psDevInfo->eActivePMConf;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXSetAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+	const void *pvPrivateData,
+	IMG_UINT32 ui32State)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+	if (!psDeviceNode || !psDeviceNode->pvDevice)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevInfo = psDeviceNode->pvDevice;
+
+	if (RGX_ACTIVEPM_FORCE_OFF != ui32State
+		|| !psDevInfo->pvAPMISRData)
+	{
+		return PVRSRV_ERROR_NOT_SUPPORTED;
+	}
+
+#if !defined(NO_HARDWARE)
+	eError = OSUninstallMISR(psDevInfo->pvAPMISRData);
+	if (PVRSRV_OK == eError)
+	{
+		psDevInfo->eActivePMConf = RGX_ACTIVEPM_FORCE_OFF;
+		psDevInfo->pvAPMISRData = NULL;
+		eError = PVRSRVSetDeviceDefaultPowerState((const PPVRSRV_DEVICE_NODE)psDeviceNode,
+		                                          PVRSRV_DEV_POWER_STATE_ON);
+	}
+#endif
+
+	return eError;
+}
+
+PVRSRV_ERROR RGXQueryPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+	const void *pvPrivateData,
+	IMG_BOOL *pbDisabled)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+	if (!psDeviceNode || !psDeviceNode->pvDevice)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevInfo = psDeviceNode->pvDevice;
+
+	*pbDisabled = !psDevInfo->bPDPEnabled;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXSetPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+	const void *pvPrivateData,
+	IMG_BOOL bDisable)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+	if (!psDeviceNode || !psDeviceNode->pvDevice)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevInfo = psDeviceNode->pvDevice;
+
+	psDevInfo->bPDPEnabled = !bDisable;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXGetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+				IMG_UINT32 *pui32DeviceFlags)
+{
+	if (!pui32DeviceFlags || !psDevInfo)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*pui32DeviceFlags = psDevInfo->ui32DeviceFlags;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+				IMG_UINT32 ui32Config,
+				IMG_BOOL bSetNotClear)
+{
+	if (!psDevInfo)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if ((ui32Config & ~RGXKM_DEVICE_STATE_MASK) != 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Bits outside of device state mask set (input: 0x%x, mask: 0x%x)",
+				 __func__, ui32Config, RGXKM_DEVICE_STATE_MASK));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (bSetNotClear)
+	{
+		psDevInfo->ui32DeviceFlags |= ui32Config;
+	}
+	else
+	{
+		psDevInfo->ui32DeviceFlags &= ~ui32Config;
+	}
+
+	return PVRSRV_OK;
+}
+
+inline const char * RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM)
+{
+	PVR_ASSERT(eKickTypeDM < RGX_KICK_TYPE_DM_LAST);
+
+	switch (eKickTypeDM) {
+		case RGX_KICK_TYPE_DM_GP:
+			return "GP ";
+		case RGX_KICK_TYPE_DM_TDM_2D:
+			return "TDM/2D ";
+		case RGX_KICK_TYPE_DM_TA:
+			return "TA ";
+		case RGX_KICK_TYPE_DM_3D:
+			return "3D ";
+		case RGX_KICK_TYPE_DM_CDM:
+			return "CDM ";
+		case RGX_KICK_TYPE_DM_RTU:
+			return "RTU ";
+		case RGX_KICK_TYPE_DM_SHG:
+			return "SHG ";
+		case RGX_KICK_TYPE_DM_TQ2D:
+			return "TQ2D ";
+		case RGX_KICK_TYPE_DM_TQ3D:
+			return "TQ3D ";
+		default:
+			return "Invalid DM ";
+	}
+}
+
+/******************************************************************************
+ End of file (rgxutils.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxutils.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxutils.h
new file mode 100644
index 0000000..6709863
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rgxutils.h
@@ -0,0 +1,185 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific utility routines declarations
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Inline functions/structures specific to RGX
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+
+/*!
+******************************************************************************
+
+ @Function      RGXQueryAPMState
+
+ @Description   Query the state of the APM configuration
+
+ @Input         psDeviceNode : The device node
+
+ @Input         pvPrivateData: Unused (required for AppHint callback)
+
+ @Output        pui32State   : The APM configuration state
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+	const void *pvPrivateData,
+	IMG_UINT32 *pui32State);
+
+/*!
+******************************************************************************
+
+ @Function      RGXSetAPMState
+
+ @Description   Set the APM configuration state. Currently only 'OFF' is
+                supported
+
+ @Input         psDeviceNode : The device node
+
+ @Input         pvPrivateData: Unused (required for AppHint callback)
+
+ @Input         ui32State    : The requested APM configuration state
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXSetAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+	const void *pvPrivateData,
+	IMG_UINT32 ui32State);
+
+/*!
+******************************************************************************
+
+ @Function      RGXQueryPdumpPanicDisable
+
+ @Description   Get the PDump Panic Enable configuration state.
+
+ @Input         psDeviceNode : The device node
+
+ @Input         pvPrivateData: Unused (required for AppHint callback)
+
+ @Input         pbDisabled    : IMG_TRUE if PDump Panic is disabled
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXQueryPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+	const void *pvPrivateData,
+	IMG_BOOL *pbDisabled);
+
+/*!
+******************************************************************************
+
+ @Function      RGXSetPdumpPanicDisable
+
+ @Description   Set the PDump Panic Enable flag
+
+ @Input         psDeviceNode : The device node
+
+ @Input         pvPrivateData: Unused (required for AppHint callback)
+
+ @Input         bDisable      : The requested configuration state
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXSetPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+	const void *pvPrivateData,
+	IMG_BOOL bDisable);
+
+/*!
+******************************************************************************
+
+ @Function      RGXGetDeviceFlags
+
+ @Description   Get the device flags for a given device
+
+ @Input         psDevInfo        : The device descriptor query
+
+ @Output        pui32DeviceFlags : The current state of the device flags
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXGetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+				IMG_UINT32 *pui32DeviceFlags);
+
+/*!
+******************************************************************************
+
+ @Function      RGXSetDeviceFlags
+
+ @Description   Set the device flags for a given device
+
+ @Input         psDevInfo : The device descriptor to modify
+
+ @Input         ui32Config : The device flags to modify
+
+ @Input         bSetNotClear : Set or clear the specified flags
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+				IMG_UINT32 ui32Config,
+				IMG_BOOL bSetNotClear);
+
+/*!
+******************************************************************************
+
+ @Function    RGXStringifyKickTypeDM
+
+ @Description Gives the kick type DM name stringified
+
+ @Input       Kick type DM
+
+ @Return      Array containing the kick type DM name
+
+******************************************************************************/
+const char* RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM);
+
+#define RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(bitmask, eKickTypeDM) bitmask & eKickTypeDM ? RGXStringifyKickTypeDM(eKickTypeDM) : ""
+/******************************************************************************
+ End of file (rgxutils.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/ri_server.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/ri_server.c
new file mode 100644
index 0000000..76df4b9
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/ri_server.c
@@ -0,0 +1,2147 @@
+/*************************************************************************/ /*!
+@File           ri_server.c
+@Title          Resource Information (RI) server implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Resource Information (RI) server functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stdarg.h>
+#include "img_defs.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+
+#include "srvkm.h"
+#include "lock.h"
+
+/* services/include */
+#include "pvr_ricommon.h"
+
+/* services/server/include/ */
+#include "ri_server.h"
+
+/* services/include/shared/ */
+#include "hash.h"
+/* services/shared/include/ */
+#include "dllist.h"
+
+#include "pmr.h"
+
+/* include/device.h */
+#include "device.h"
+
+#if !defined(RI_UNIT_TEST)
+#include "pvrsrv.h"
+#endif
+
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+
+#define USE_RI_LOCK		1
+
+/*
+ * Initial size use for Hash table. (Used to index the RI list entries).
+ */
+#define _RI_INITIAL_HASH_TABLE_SIZE	64
+
+/*
+ * Values written to the 'valid' field of RI structures when created and
+ * cleared prior to being destroyed. The code can then check this value
+ * before accessing the provided pointer contents as a valid RI structure.
+ */
+#define _VALID_RI_LIST_ENTRY	0x66bccb66
+#define _VALID_RI_SUBLIST_ENTRY	0x77cddc77
+#define _INVALID				0x00000000
+
+/*
+ * If this define is set to 1, details of the linked lists (addresses,
+ * prev/next ptrs, etc) are also output when function RIDumpList() is called.
+ */
+#define _DUMP_LINKEDLIST_INFO		0
+
+
+typedef IMG_UINT64 _RI_BASE_T;
+
+
+/* No +1 in SIZE macros since sizeof includes \0 byte in size */
+
+#define RI_PROC_BUF_SIZE    16
+
+#define RI_MEMDESC_SUM_FRMT     "PID %d %s MEMDESCs Alloc'd:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K) + "\
+                                                  "Imported:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K) = "\
+                                                     "Total:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K)\n"
+#define RI_MEMDESC_SUM_BUF_SIZE (sizeof(RI_MEMDESC_SUM_FRMT)+5+RI_PROC_BUF_SIZE+60)
+
+
+#define RI_PMR_SUM_FRMT     "PID %d %s PMRs Alloc'd:0x%010" IMG_UINT64_FMTSPECx ", %" IMG_UINT64_FMTSPEC "K  "\
+                                        "[Physical: 0x%010" IMG_UINT64_FMTSPECx ", %" IMG_UINT64_FMTSPEC "K]\n"
+#define RI_PMR_SUM_BUF_SIZE (sizeof(RI_PMR_SUM_FRMT)+(40))
+
+#define RI_PMR_ENTRY_FRMT      "%%sPID:%%-5d <%%p>\t%%-%ds\t0x%%010" IMG_UINT64_FMTSPECx "\t[0x%%010" IMG_UINT64_FMTSPECx "]\t%%c"
+#define RI_PMR_ENTRY_BUF_SIZE  (sizeof(RI_PMR_ENTRY_FRMT)+(3+5+16+PVR_ANNOTATION_MAX_LEN+10+10))
+#define RI_PMR_ENTRY_FRMT_SIZE (sizeof(RI_PMR_ENTRY_FRMT))
+
+/* Use %5d rather than %d so the output aligns in server/kernel.log, debugFS sees extra spaces */
+#define RI_MEMDESC_ENTRY_PROC_FRMT        "[%5d:%s]"
+#define RI_MEMDESC_ENTRY_PROC_BUF_SIZE    (sizeof(RI_MEMDESC_ENTRY_PROC_FRMT)+5+16)
+
+#define RI_SYS_ALLOC_IMPORT_FRMT      "{Import from PID %d}"
+#define RI_SYS_ALLOC_IMPORT_FRMT_SIZE (sizeof(RI_SYS_ALLOC_IMPORT_FRMT)+5)
+static IMG_CHAR g_szSysAllocImport[RI_SYS_ALLOC_IMPORT_FRMT_SIZE];
+
+#define RI_MEMDESC_ENTRY_IMPORT_FRMT     "{Import from PID %d}"
+#define RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_IMPORT_FRMT)+5)
+
+#define RI_MEMDESC_ENTRY_UNPINNED_FRMT     "{Unpinned}"
+#define RI_MEMDESC_ENTRY_UNPINNED_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_UNPINNED_FRMT))
+
+#define RI_MEMDESC_ENTRY_FRMT      "%%sPID:%%-5d 0x%%010" IMG_UINT64_FMTSPECx "\t%%-%ds %%s\t0x%%010" IMG_UINT64_FMTSPECx "\t<%%p> %%s%%s%%s%%c"
+#define RI_MEMDESC_ENTRY_BUF_SIZE  (sizeof(RI_MEMDESC_ENTRY_FRMT)+(3+5+10+PVR_ANNOTATION_MAX_LEN+RI_MEMDESC_ENTRY_PROC_BUF_SIZE+16+\
+                                               RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE+RI_SYS_ALLOC_IMPORT_FRMT_SIZE+RI_MEMDESC_ENTRY_UNPINNED_BUF_SIZE))
+#define RI_MEMDESC_ENTRY_FRMT_SIZE (sizeof(RI_MEMDESC_ENTRY_FRMT))
+
+
+#define RI_FRMT_SIZE_MAX (MAX(RI_MEMDESC_ENTRY_BUF_SIZE,\
+                              MAX(RI_PMR_ENTRY_BUF_SIZE,\
+                                  MAX(RI_MEMDESC_SUM_BUF_SIZE,\
+                                      RI_PMR_SUM_BUF_SIZE))))
+
+
+
+
+/* Structure used to make linked sublist of memory allocations (MEMDESC) */
+struct _RI_SUBLIST_ENTRY_
+{
+	DLLIST_NODE				sListNode;
+	struct _RI_LIST_ENTRY_	*psRI;
+	IMG_UINT32 				valid;
+	IMG_BOOL				bIsImport;
+	IMG_BOOL				bIsSuballoc;
+	IMG_PID					pid;
+	IMG_CHAR				ai8ProcName[RI_PROC_BUF_SIZE];
+	IMG_DEV_VIRTADDR 		sVAddr;
+	IMG_UINT64				ui64Offset;
+	IMG_UINT64				ui64Size;
+	IMG_CHAR				ai8TextB[DEVMEM_ANNOTATION_MAX_LEN+1];
+	DLLIST_NODE				sProcListNode;
+};
+
+/*
+ * Structure used to make linked list of PMRs. Sublists of allocations
+ * (MEMDESCs) made from these PMRs are chained off these entries.
+ */
+struct _RI_LIST_ENTRY_
+{
+	DLLIST_NODE				sListNode;
+	DLLIST_NODE				sSysAllocListNode;
+	DLLIST_NODE				sSubListFirst;
+	IMG_UINT32				valid;
+	PMR						*psPMR;
+	IMG_PID					pid;
+	IMG_CHAR				ai8ProcName[RI_PROC_BUF_SIZE];
+	IMG_UINT16				ui16SubListCount;
+	IMG_UINT16				ui16MaxSubListCount;
+	IMG_UINT32				ui32RIPMRFlags; /* Flags used to indicate the type of allocation */
+	IMG_UINT32				ui32Flags; /* Flags used to indicate if PMR appears in ri debugfs output */
+};
+
+typedef struct _RI_LIST_ENTRY_ RI_LIST_ENTRY;
+typedef struct _RI_SUBLIST_ENTRY_ RI_SUBLIST_ENTRY;
+
+static IMG_UINT16	g_ui16RICount;
+static HASH_TABLE	*g_pRIHashTable;
+static IMG_UINT16	g_ui16ProcCount;
+static HASH_TABLE	*g_pProcHashTable;
+
+static POS_LOCK		g_hRILock;
+
+/* Linked list of PMR allocations made against the PVR_SYS_ALLOC_PID and lock
+ * to prevent concurrent access to it.
+ */
+static POS_LOCK		g_hSysAllocPidListLock;
+static DLLIST_NODE	g_sSysAllocPidListHead;
+
+/*
+ * Flag used to indicate if RILock should be destroyed when final PMR entry is
+ * deleted, i.e. if RIDeInitKM() has already been called before that point but
+ * the handle manager has deferred deletion of RI entries.
+ */
+static IMG_BOOL 	bRIDeInitDeferred = IMG_FALSE;
+
+/*
+ * Used as head of linked-list of PMR RI entries - this is useful when we wish
+ * to iterate all PMR list entries (when we don't have a PMR ref)
+ */
+static DLLIST_NODE	sListFirst;
+
+/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */
+static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, IMG_BOOL bDebugFs, IMG_UINT16 ui16MaxStrLen, IMG_CHAR *pszEntryString);
+/* Function used to produce string containing info for PMR RI entries (used for both debugfs and kernel log output) */
+static void _GeneratePMREntryString(RI_LIST_ENTRY *psRIEntry, IMG_BOOL bDebugFs, IMG_UINT16 ui16MaxStrLen, IMG_CHAR *pszEntryString);
+
+static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v);
+static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v);
+static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v);
+static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid);
+#define _RIOutput(x) PVR_LOG(x)
+
+#define RI_FLAG_PARSED_BY_DEBUGFS			0x1
+#define RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS	0x2
+#define RI_FLAG_SYSALLOC_PMR				0x4
+
+static IMG_UINT32
+_ProcHashFunc(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen);
+
+static IMG_UINT32
+_ProcHashFunc(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen)
+{
+	IMG_UINT32 *p = (IMG_UINT32 *)pKey;
+	IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32);
+	IMG_UINT32 ui;
+	IMG_UINT32 uHashKey = 0;
+
+	PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+
+	for (ui = 0; ui < uKeyLen; ui++)
+	{
+		IMG_UINT32 uHashPart = *p++;
+
+		uHashPart += (uHashPart << 12);
+		uHashPart ^= (uHashPart >> 22);
+		uHashPart += (uHashPart << 4);
+		uHashPart ^= (uHashPart >> 9);
+		uHashPart += (uHashPart << 10);
+		uHashPart ^= (uHashPart >> 2);
+		uHashPart += (uHashPart << 7);
+		uHashPart ^= (uHashPart >> 12);
+
+		uHashKey += uHashPart;
+	}
+
+	return uHashKey;
+}
+
+static IMG_BOOL
+_ProcHashComp(size_t uKeySize, void *pKey1, void *pKey2);
+
+static IMG_BOOL
+_ProcHashComp(size_t uKeySize, void *pKey1, void *pKey2)
+{
+	IMG_UINT32 *p1 = (IMG_UINT32 *)pKey1;
+	IMG_UINT32 *p2 = (IMG_UINT32 *)pKey2;
+	IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32);
+	IMG_UINT32 ui;
+
+	for (ui = 0; ui < uKeyLen; ui++)
+	{
+		if (*p1++ != *p2++)
+			return IMG_FALSE;
+	}
+
+	return IMG_TRUE;
+}
+
+static void _RILock(void)
+{
+#if (USE_RI_LOCK == 1)
+	OSLockAcquire(g_hRILock);
+#endif
+}
+
+static void _RIUnlock(void)
+{
+#if (USE_RI_LOCK == 1)
+	OSLockRelease(g_hRILock);
+#endif
+}
+
+/* This value maintains a count of the number of PMRs attributed to the
+ * PVR_SYS_ALLOC_PID. Access to this value is protected by g_hRILock, so it
+ * does not need to be an ATOMIC_T.
+ */
+static IMG_UINT32 g_ui32SysAllocPMRCount;
+
+
+PVRSRV_ERROR RIInitKM(void)
+{
+	IMG_INT iCharsWritten;
+	PVRSRV_ERROR eError;
+
+	bRIDeInitDeferred = IMG_FALSE;
+
+	iCharsWritten = OSSNPrintf(g_szSysAllocImport,
+	            RI_SYS_ALLOC_IMPORT_FRMT_SIZE,
+	            RI_SYS_ALLOC_IMPORT_FRMT,
+	            PVR_SYS_ALLOC_PID);
+	PVR_LOG_IF_FALSE((iCharsWritten>0 && iCharsWritten<(IMG_INT32)RI_SYS_ALLOC_IMPORT_FRMT_SIZE), \
+			"OSSNPrintf failed to initialise g_szSysAllocImport");
+
+	eError = OSLockCreate(&g_hSysAllocPidListLock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: OSLockCreate (g_hSysAllocPidListLock) failed (returned %d)",
+		         __func__,
+		         eError));
+	}
+	dllist_init(&(g_sSysAllocPidListHead));
+#if (USE_RI_LOCK == 1)
+	eError = OSLockCreate(&g_hRILock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: OSLockCreate (g_hRILock) failed (returned %d)",
+		         __func__,
+		         eError));
+	}
+#endif
+	return eError;
+}
+void RIDeInitKM(void)
+{
+#if (USE_RI_LOCK == 1)
+	if (g_ui16RICount > 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+		         "%s: called with %d entries remaining - deferring OSLockDestroy()",
+		         __func__,
+		         g_ui16RICount));
+		bRIDeInitDeferred = IMG_TRUE;
+	}
+	else
+	{
+		OSLockDestroy(g_hRILock);
+		OSLockDestroy(g_hSysAllocPidListLock);
+	}
+#endif
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RILockAcquireKM
+
+ @Description
+            Acquires the RI Lock (which protects the integrity of the RI
+            linked lists). Caller will be suspended until lock is acquired.
+
+ @Return	None
+
+******************************************************************************/
+void RILockAcquireKM(void)
+{
+	_RILock();
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RILockReleaseKM
+
+ @Description
+            Releases the RI Lock (which protects the integrity of the RI
+            linked lists).
+
+ @Return	None
+
+******************************************************************************/
+void RILockReleaseKM(void)
+{
+	_RIUnlock();
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RIWritePMREntryWithOwnerKM
+
+ @Description
+            Writes a new Resource Information list entry.
+            The new entry will be inserted at the head of the list of
+            PMR RI entries and assigned the values provided.
+
+ @input     psPMR - Reference (handle) to the PMR to which this reference relates
+
+ @input     ui32Owner - PID of the process which owns the allocation. This
+                        may not be the current process (e.g. a request to
+                        grow a buffer may happen in the context of a kernel
+                        thread, or we may import further resource for a
+                        suballocation made from the FW heap which can then
+                        also be utilized by other processes)
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWritePMREntryWithOwnerKM(PMR *psPMR,
+                                        IMG_PID ui32Owner)
+{
+	PMR *pPMRHashKey = psPMR;
+	RI_LIST_ENTRY *psRIEntry;
+	uintptr_t hashData;
+
+	/* if Hash table has not been created, create it now */
+	if (!g_pRIHashTable)
+	{
+		g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default);
+		g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp);
+	}
+	if (!g_pRIHashTable || !g_pProcHashTable)
+	{
+		/* Error - no memory to allocate for Hash table(s) */
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	if (!psPMR)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Acquire RI Lock */
+	_RILock();
+
+	/* Look-up psPMR in Hash Table */
+	hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+	psRIEntry = (RI_LIST_ENTRY *)hashData;
+	if (!psRIEntry)
+	{
+		/*
+		 * If failed to find a matching existing entry, create a new one
+		 */
+		psRIEntry = (RI_LIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_LIST_ENTRY));
+		if (!psRIEntry)
+		{
+			/* Release RI Lock */
+			_RIUnlock();
+			/* Error - no memory to allocate for new RI entry */
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+		else
+		{
+			IMG_UINT32 ui32PMRFlags = PMR_Flags(psPMR);
+			PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)PMR_DeviceNode(psPMR);
+
+			/*
+			 * Add new RI Entry
+			 */
+			if (g_ui16RICount == 0)
+			{
+				/* Initialise PMR entry linked-list head */
+				dllist_init(&sListFirst);
+			}
+			g_ui16RICount++;
+
+			dllist_init (&(psRIEntry->sSysAllocListNode));
+			dllist_init (&(psRIEntry->sSubListFirst));
+			psRIEntry->ui16SubListCount = 0;
+			psRIEntry->ui16MaxSubListCount = 0;
+			psRIEntry->valid = _VALID_RI_LIST_ENTRY;
+
+			/* Check if this PMR should be accounted for under the
+			 * PVR_SYS_ALLOC_PID debugFS entry. This should happen if
+			 * we are in the driver init phase, the flags indicate
+			 * this is a FW local allocation (made from FW heap)
+			 * or the owner PID is PVR_SYS_ALLOC_PID.
+			 * Also record host dev node allocs on the system PID.
+			 */
+			if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT ||
+				PVRSRV_CHECK_FW_LOCAL(ui32PMRFlags) ||
+				ui32Owner == PVR_SYS_ALLOC_PID ||
+				psDeviceNode == PVRSRVGetPVRSRVData()->psHostMemDeviceNode)
+			{
+				psRIEntry->ui32RIPMRFlags = RI_FLAG_SYSALLOC_PMR;
+				OSSNPrintf(psRIEntry->ai8ProcName,
+						RI_PROC_BUF_SIZE,
+						"SysProc");
+				psRIEntry->pid = PVR_SYS_ALLOC_PID;
+				OSLockAcquire(g_hSysAllocPidListLock);
+				/* Add this psRIEntry to the list of entries for PVR_SYS_ALLOC_PID */
+				dllist_add_to_tail(&g_sSysAllocPidListHead,(PDLLIST_NODE)&(psRIEntry->sSysAllocListNode));
+				OSLockRelease(g_hSysAllocPidListLock);
+				g_ui32SysAllocPMRCount++;
+			}
+			else
+			{
+				psRIEntry->ui32RIPMRFlags = 0;
+				psRIEntry->pid = ui32Owner;
+			}
+
+			OSSNPrintf(psRIEntry->ai8ProcName,
+					RI_PROC_BUF_SIZE,
+					"%s",
+					OSGetCurrentClientProcessNameKM());
+			/* Add PMR entry to linked-list of all PMR entries */
+			dllist_init (&(psRIEntry->sListNode));
+			dllist_add_to_tail(&sListFirst,(PDLLIST_NODE)&(psRIEntry->sListNode));
+		}
+
+		psRIEntry->psPMR = psPMR;
+		psRIEntry->ui32Flags = 0;
+
+		/* Create index entry in Hash Table */
+		HASH_Insert_Extended (g_pRIHashTable, (void *)&pPMRHashKey, (uintptr_t)psRIEntry);
+
+		/* Store phRIHandle in PMR structure, so it can delete the associated RI entry when it destroys the PMR */
+		PMRStoreRIHandle(psPMR, psRIEntry);
+	}
+	/* Release RI Lock */
+	_RIUnlock();
+
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RIWritePMREntryKM
+
+ @Description
+            Writes a new Resource Information list entry.
+            The new entry will be inserted at the head of the list of
+            PMR RI entries and assigned the values provided.
+
+ @input     psPMR - Reference (handle) to the PMR to which this reference relates
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWritePMREntryKM(PMR *psPMR)
+{
+	return RIWritePMREntryWithOwnerKM(psPMR,
+	                                  OSGetCurrentClientProcessIDKM());
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RIWriteMEMDESCEntryKM
+
+ @Description
+            Writes a new Resource Information sublist entry.
+            The new entry will be inserted at the head of the sublist of
+            the indicated PMR list entry, and assigned the values provided.
+
+ @input     psPMR - Reference (handle) to the PMR to which this MEMDESC RI entry relates
+ @input     ui32TextBSize - Length of string provided in psz8TextB parameter
+ @input     psz8TextB - String describing this secondary reference (may be null)
+ @input     ui64Offset - Offset from the start of the PMR at which this allocation begins
+ @input     ui64Size - Size of this allocation
+ @input     bIsImport - Flag indicating if this is an allocation or an import
+ @input     bIsSuballoc - Flag indicating if this is a sub-allocation
+ @output    phRIHandle - Handle to the created RI entry
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR,
+					   	   	   	   IMG_UINT32 ui32TextBSize,
+					   	   	   	   const IMG_CHAR *psz8TextB,
+					   	   	   	   IMG_UINT64 ui64Offset,
+					   	   	   	   IMG_UINT64 ui64Size,
+					   	   	   	   IMG_BOOL bIsImport,
+					               IMG_BOOL bIsSuballoc,
+					   	   	   	   RI_HANDLE *phRIHandle)
+{
+	RI_SUBLIST_ENTRY *psRISubEntry;
+	RI_LIST_ENTRY *psRIEntry;
+	PMR *pPMRHashKey = psPMR;
+	uintptr_t hashData;
+	IMG_PID	pid;
+
+	/* Check Hash tables have been created (meaning at least one PMR has been defined) */
+	if (!g_pRIHashTable || !g_pProcHashTable)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (!psPMR || !phRIHandle)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Acquire RI Lock */
+	_RILock();
+
+	*phRIHandle = NULL;
+
+	/* Look-up psPMR in Hash Table */
+	hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+	psRIEntry = (RI_LIST_ENTRY *)hashData;
+	if (!psRIEntry)
+	{
+		/* Release RI Lock */
+		_RIUnlock();
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psRISubEntry = (RI_SUBLIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_SUBLIST_ENTRY));
+	if (!psRISubEntry)
+	{
+		/* Release RI Lock */
+		_RIUnlock();
+		/* Error - no memory to allocate for new RI sublist entry */
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	else
+	{
+		/*
+		 * Insert new entry in sublist
+		 */
+		PDLLIST_NODE currentNode = dllist_get_next_node(&(psRIEntry->sSubListFirst));
+
+		/*
+		 * Insert new entry before currentNode
+		 */
+		if (!currentNode)
+		{
+			currentNode = &(psRIEntry->sSubListFirst);
+		}
+		dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sListNode));
+
+		psRISubEntry->psRI = psRIEntry;
+
+		/* Increment number of entries in sublist */
+		psRIEntry->ui16SubListCount++;
+		if (psRIEntry->ui16SubListCount > psRIEntry->ui16MaxSubListCount)
+		{
+			psRIEntry->ui16MaxSubListCount = psRIEntry->ui16SubListCount;
+		}
+		psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY;
+	}
+
+	/* If allocation is made during device or driver initialisation,
+	 * track the MEMDESC entry under PVR_SYS_ALLOC_PID, otherwise use
+	 * the current PID.
+	 * Record host dev node allocations on the system PID.
+	 */
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)PMR_DeviceNode(psRISubEntry->psRI->psPMR);
+
+		if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT ||
+			psDeviceNode == PVRSRVGetPVRSRVData()->psHostMemDeviceNode)
+		{
+			psRISubEntry->pid = psRISubEntry->psRI->pid;
+		}
+		else
+		{
+			psRISubEntry->pid = OSGetCurrentClientProcessIDKM();
+		}
+	}
+
+	if (ui32TextBSize > sizeof(psRISubEntry->ai8TextB)-1)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s: TextBSize too long (%u). Text will be truncated "
+				 "to %zu characters", __func__,
+				 ui32TextBSize, sizeof(psRISubEntry->ai8TextB)-1));
+	}
+
+	/* copy ai8TextB field data */
+	OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, sizeof(psRISubEntry->ai8TextB), "%s", psz8TextB);
+
+	psRISubEntry->ui64Offset = ui64Offset;
+	psRISubEntry->ui64Size = ui64Size;
+	psRISubEntry->bIsImport = bIsImport;
+	psRISubEntry->bIsSuballoc = bIsSuballoc;
+	OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE, "%s", OSGetCurrentClientProcessNameKM());
+	dllist_init (&(psRISubEntry->sProcListNode));
+
+	/*
+	 *	Now insert this MEMDESC into the proc list
+	 */
+	/* look-up pid in Hash Table */
+	pid = psRISubEntry->pid;
+	hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid);
+	if (!hashData)
+	{
+		/*
+		 * No allocations for this pid yet
+		 */
+		HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode));
+		/* Increment number of entries in proc hash table */
+		g_ui16ProcCount++;
+	}
+	else
+	{
+		/*
+		 * Insert allocation into pid allocations linked list
+		 */
+		PDLLIST_NODE currentNode = (PDLLIST_NODE)hashData;
+
+		/*
+		 * Insert new entry
+		 */
+		dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode));
+	}
+	*phRIHandle = (RI_HANDLE)psRISubEntry;
+	/* Release RI Lock */
+	_RIUnlock();
+
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RIWriteProcListEntryKM
+
+ @Description
+            Write a new entry in the process list directly. We have to do this
+            because there might be no, multiple or changing PMR handles.
+
+            In the common case we have a PMR that will be added to the PMR list
+            and one or several MemDescs that are associated to it in a sub-list.
+            Additionally these MemDescs will be inserted in the per-process list.
+
+            There might be special descriptors from e.g. new user APIs that
+            are associated with no or multiple PMRs and not just one.
+            These can be now added to the per-process list (as RI_SUBLIST_ENTRY)
+            directly with this function and won't be listed in the PMR list (RIEntry)
+            because there might be no PMR.
+
+            To remove entries from the per-process list, just use
+            RIDeleteMEMDESCEntryKM().
+
+ @input     psz8TextB - String describing this secondary reference (may be null)
+ @input     ui64Size - Size of this allocation
+ @input     ui64DevVAddr - Virtual address of this entry
+ @output    phRIHandle - Handle to the created RI entry
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWriteProcListEntryKM(IMG_UINT32 ui32TextBSize,
+                                    const IMG_CHAR *psz8TextB,
+                                    IMG_UINT64 ui64Size,
+                                    IMG_UINT64 ui64DevVAddr,
+                                    RI_HANDLE *phRIHandle)
+{
+	uintptr_t hashData = 0;
+	IMG_PID		pid;
+	RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+
+	if (!g_pRIHashTable)
+	{
+		g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default);
+		g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp);
+
+		if (!g_pRIHashTable || !g_pProcHashTable)
+		{
+			/* Error - no memory to allocate for Hash table(s) */
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+	}
+
+	/* Acquire RI Lock */
+	_RILock();
+
+	*phRIHandle = NULL;
+
+	psRISubEntry = (RI_SUBLIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_SUBLIST_ENTRY));
+	if (!psRISubEntry)
+	{
+		/* Release RI Lock */
+		_RIUnlock();
+		/* Error - no memory to allocate for new RI sublist entry */
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY;
+
+	psRISubEntry->pid = OSGetCurrentClientProcessIDKM();
+
+	if (ui32TextBSize > sizeof(psRISubEntry->ai8TextB)-1)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+		         "%s: TextBSize too long (%u). Text will be truncated "
+		         "to %zu characters", __func__,
+		         ui32TextBSize, sizeof(psRISubEntry->ai8TextB)-1));
+	}
+
+	/* copy ai8TextB field data */
+	OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, sizeof(psRISubEntry->ai8TextB), "%s", psz8TextB);
+
+	psRISubEntry->ui64Offset = 0;
+	psRISubEntry->ui64Size = ui64Size;
+	psRISubEntry->sVAddr.uiAddr = ui64DevVAddr;
+	psRISubEntry->bIsImport = IMG_FALSE;
+	psRISubEntry->bIsSuballoc = IMG_FALSE;
+	OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE, "%s", OSGetCurrentClientProcessNameKM());
+	dllist_init (&(psRISubEntry->sProcListNode));
+
+	/*
+	 *	Now insert this MEMDESC into the proc list
+	 */
+	/* look-up pid in Hash Table */
+	pid = psRISubEntry->pid;
+	hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid);
+	if (!hashData)
+	{
+		/*
+		 * No allocations for this pid yet
+		 */
+		HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode));
+		/* Increment number of entries in proc hash table */
+		g_ui16ProcCount++;
+	}
+	else
+	{
+		/*
+		 * Insert allocation into pid allocations linked list
+		 */
+		PDLLIST_NODE currentNode = (PDLLIST_NODE)hashData;
+
+		/*
+		 * Insert new entry
+		 */
+		dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode));
+	}
+	*phRIHandle = (RI_HANDLE)psRISubEntry;
+	/* Release RI Lock */
+	_RIUnlock();
+
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RIUpdateMEMDESCAddrKM
+
+ @Description
+            Update a Resource Information entry.
+
+ @input     hRIHandle - Handle of object whose reference info is to be updated
+ @input     sVAddr - New address for the RI entry
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle,
+								   IMG_DEV_VIRTADDR sVAddr)
+{
+	RI_SUBLIST_ENTRY *psRISubEntry;
+
+	if (!hRIHandle)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle;
+	if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+	{
+		/* Pointer does not point to valid structure */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Acquire RI lock*/
+	_RILock();
+
+	psRISubEntry->sVAddr.uiAddr = sVAddr.uiAddr;
+
+	/* Release RI lock */
+	_RIUnlock();
+
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RIDeletePMREntryKM
+
+ @Description
+            Delete a Resource Information entry.
+
+ @input     hRIHandle - Handle of object whose reference info is to be deleted
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle)
+{
+	RI_LIST_ENTRY *psRIEntry;
+	PMR			*pPMRHashKey;
+	PVRSRV_ERROR eResult = PVRSRV_OK;
+
+	if (!hRIHandle)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	psRIEntry = (RI_LIST_ENTRY *)hRIHandle;
+
+	if (psRIEntry->valid != _VALID_RI_LIST_ENTRY)
+	{
+		/* Pointer does not point to valid structure */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (psRIEntry->ui16SubListCount == 0)
+	{
+		/* Acquire RI lock*/
+		_RILock();
+
+		/* Remove the HASH table index entry */
+		pPMRHashKey = psRIEntry->psPMR;
+		HASH_Remove_Extended(g_pRIHashTable, (void *)&pPMRHashKey);
+
+		psRIEntry->valid = _INVALID;
+
+		/* Remove PMR entry from linked-list of PMR entries */
+		dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sListNode));
+
+		if (psRIEntry->ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR)
+		{
+			dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sSysAllocListNode));
+			g_ui32SysAllocPMRCount--;
+		}
+
+		/* Now, free the memory used to store the RI entry */
+		OSFreeMemNoStats(psRIEntry);
+		psRIEntry = NULL;
+
+		/*
+		 * Decrement number of RI entries - if this is now zero,
+		 * we can delete the RI hash table
+		 */
+		if (--g_ui16RICount == 0)
+		{
+			HASH_Delete(g_pRIHashTable);
+			g_pRIHashTable = NULL;
+
+			_RIUnlock();
+
+			/* If deInit has been deferred, we can now destroy the RI Lock */
+			if (bRIDeInitDeferred)
+			{
+				OSLockDestroy(g_hRILock);
+			}
+		}
+		else
+		{
+			/* Release RI lock*/
+			_RIUnlock();
+		}
+		/*
+		 * Make the handle NULL once PMR RI entry is deleted
+		 */
+		hRIHandle = NULL;
+	}
+	else
+	{
+		eResult = PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP;
+	}
+
+	return eResult;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RIDeleteMEMDESCEntryKM
+
+ @Description
+            Delete a Resource Information entry.
+            Entry can be from RIEntry list or ProcList.
+
+ @input     hRIHandle - Handle of object whose reference info is to be deleted
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle)
+{
+	RI_LIST_ENTRY *psRIEntry = NULL;
+	RI_SUBLIST_ENTRY *psRISubEntry;
+	uintptr_t hashData;
+	IMG_PID pid;
+
+	if (!hRIHandle)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle;
+	if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+	{
+		/* Pointer does not point to valid structure */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Acquire RI lock*/
+	_RILock();
+
+	/* For entries which do have a parent PMR remove the node from the sublist */
+	if (psRISubEntry->psRI)
+	{
+		psRIEntry = (RI_LIST_ENTRY *)psRISubEntry->psRI;
+
+		/* Now, remove entry from the sublist */
+		dllist_remove_node(&(psRISubEntry->sListNode));
+	}
+
+	psRISubEntry->valid = _INVALID;
+
+	/* Remove the entry from the proc allocations linked list */
+	pid = psRISubEntry->pid;
+	/* If this is the only allocation for this pid, just remove it from the hash table */
+	if (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL)
+	{
+		HASH_Remove_Extended(g_pProcHashTable, (void *)&pid);
+		/* Decrement number of entries in proc hash table, and delete the hash table if there are now none */
+		if (--g_ui16ProcCount == 0)
+		{
+			HASH_Delete(g_pProcHashTable);
+			g_pProcHashTable = NULL;
+		}
+	}
+	else
+	{
+		hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid);
+		if ((PDLLIST_NODE)hashData == &(psRISubEntry->sProcListNode))
+		{
+			HASH_Remove_Extended(g_pProcHashTable, (void *)&pid);
+			HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)dllist_get_next_node(&(psRISubEntry->sProcListNode)));
+		}
+	}
+	dllist_remove_node(&(psRISubEntry->sProcListNode));
+
+	/* Now, free the memory used to store the sublist entry */
+	OSFreeMemNoStats(psRISubEntry);
+	psRISubEntry = NULL;
+
+	/*
+	 * Decrement number of entries in sublist if this MemDesc had a parent entry.
+	 */
+	if (psRIEntry)
+	{
+		psRIEntry->ui16SubListCount--;
+	}
+
+	/* Release RI lock*/
+	_RIUnlock();
+
+	/*
+	 * Make the handle NULL once MEMDESC RI entry is deleted
+	 */
+	hRIHandle = NULL;
+
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RIDeleteListKM
+
+ @Description
+            Delete all Resource Information entries and free associated
+            memory.
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDeleteListKM(void)
+{
+	PVRSRV_ERROR eResult = PVRSRV_OK;
+
+	_RILock();
+
+	if (g_pRIHashTable)
+	{
+		eResult = HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DeleteAllEntries);
+		if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE)
+		{
+			/*
+			 * PVRSRV_ERROR_RESOURCE_UNAVAILABLE is used to stop the Hash iterator when
+			 * the hash table gets deleted as a result of deleting the final PMR entry,
+			 * so this is not a real error condition...
+			 */
+			eResult = PVRSRV_OK;
+		}
+	}
+
+	/* After the run through the RIHashTable that holds the PMR entries there might be
+	 * still entries left in the per-process hash table because they were added with
+	 * RIWriteProcListEntryKM() and have no PMR parent associated.
+	 */
+	if (g_pProcHashTable)
+	{
+		eResult = HASH_Iterate(g_pProcHashTable, (HASH_pfnCallback) _DeleteAllProcEntries);
+		if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE)
+		{
+			/*
+			 * PVRSRV_ERROR_RESOURCE_UNAVAILABLE is used to stop the Hash iterator when
+			 * the hash table gets deleted as a result of deleting the final PMR entry,
+			 * so this is not a real error condition...
+			 */
+			eResult = PVRSRV_OK;
+		}
+	}
+
+	_RIUnlock();
+
+	return eResult;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RIDumpListKM
+
+ @Description
+            Dumps out the contents of the RI List entry for the
+            specified PMR, and all MEMDESC allocation entries
+            in the associated sub linked list.
+            At present, output is directed to Kernel log
+            via PVR_DPF.
+
+ @input     psPMR - PMR for which RI entry details are to be output
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpListKM(PMR *psPMR)
+{
+	PVRSRV_ERROR eError;
+
+	/* Acquire RI lock*/
+	_RILock();
+
+	eError = _DumpList(psPMR,0);
+
+	/* Release RI lock*/
+	_RIUnlock();
+
+	return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RIGetListEntryKM
+
+ @Description
+            Returns pointer to a formatted string with details of the specified
+            list entry. If no entry exists (e.g. it may have been deleted
+            since the previous call), NULL is returned.
+
+ @input     pid - pid for which RI entry details are to be output
+ @input     ppHandle - handle to the entry, if NULL, the first entry will be
+                     returned.
+ @output    pszEntryString - string to be output for the entry
+ @output    hEntry - hEntry will be returned pointing to the next entry
+                     (or NULL if there is no next entry)
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+IMG_BOOL RIGetListEntryKM(IMG_PID pid,
+						  IMG_HANDLE **ppHandle,
+						  IMG_CHAR **ppszEntryString)
+{
+	RI_SUBLIST_ENTRY  *psRISubEntry = NULL;
+	RI_LIST_ENTRY  *psRIEntry = NULL;
+	uintptr_t     hashData = 0;
+	IMG_PID       hashKey  = pid;
+
+	static IMG_CHAR acStringBuffer[RI_FRMT_SIZE_MAX];
+
+	static IMG_UINT64 ui64TotalMemdescAlloc;
+	static IMG_UINT64 ui64TotalImport;
+	static IMG_UINT64 ui64TotalPMRAlloc;
+	static IMG_UINT64 ui64TotalPMRBacked;
+	static enum {
+		RI_GET_STATE_MEMDESCS_LIST_START,
+		RI_GET_STATE_MEMDESCS_SUMMARY,
+		RI_GET_STATE_PMR_LIST,
+		RI_GET_STATE_PMR_SUMMARY,
+		RI_GET_STATE_END,
+		RI_GET_STATE_LAST
+	} g_bNextGetState = RI_GET_STATE_MEMDESCS_LIST_START;
+
+	static DLLIST_NODE *psNode;
+	static DLLIST_NODE *psSysAllocNode;
+	static IMG_CHAR szProcName[RI_PROC_BUF_SIZE];
+	static IMG_UINT32 ui32ProcessedSysAllocPMRCount;
+
+	acStringBuffer[0] = '\0';
+
+	switch (g_bNextGetState)
+	{
+	case RI_GET_STATE_MEMDESCS_LIST_START:
+		/* look-up pid in Hash Table, to obtain first entry for pid */
+		hashData = HASH_Retrieve_Extended(g_pProcHashTable, (void *)&hashKey);
+		if (hashData)
+		{
+			if (*ppHandle)
+			{
+				psRISubEntry = (RI_SUBLIST_ENTRY *)*ppHandle;
+				if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+				{
+					psRISubEntry = NULL;
+				}
+			}
+			else
+			{
+				psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode);
+				if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+				{
+					psRISubEntry = NULL;
+				}
+			}
+		}
+
+		if (psRISubEntry)
+		{
+			PDLLIST_NODE psNextProcListNode = dllist_get_next_node(&psRISubEntry->sProcListNode);
+
+			if (psRISubEntry->bIsImport)
+			{
+				ui64TotalImport += psRISubEntry->ui64Size;
+			}
+			else
+			{
+				ui64TotalMemdescAlloc += psRISubEntry->ui64Size;
+			}
+
+			_GenerateMEMDESCEntryString(psRISubEntry,
+										IMG_TRUE,
+										RI_MEMDESC_ENTRY_BUF_SIZE,
+										acStringBuffer);
+
+			/* If this MEMDESC has a parent PMR and if not an imported PMR, flag 'parent' PMR has having been listed in MEMDESCs */
+			if (psRISubEntry->psRI && !psRISubEntry->bIsImport && !(psRISubEntry->psRI->ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR))
+			{
+				psRISubEntry->psRI->ui32RIPMRFlags |= RI_FLAG_PARSED_BY_DEBUGFS;
+			}
+
+			if (szProcName[0] == '\0')
+			{
+				OSStringCopy(szProcName, (pid == PVR_SYS_ALLOC_PID) ?
+						PVRSRV_MODNAME : psRISubEntry->ai8ProcName);
+			}
+
+
+			*ppszEntryString = acStringBuffer;
+			*ppHandle        = (IMG_HANDLE)IMG_CONTAINER_OF(psNextProcListNode, RI_SUBLIST_ENTRY, sProcListNode);
+
+			if (psNextProcListNode == NULL ||
+				psNextProcListNode == (PDLLIST_NODE)hashData)
+			{
+				g_bNextGetState = RI_GET_STATE_MEMDESCS_SUMMARY;
+			}
+			/* else continue to list MEMDESCs */
+		}
+		else
+		{
+			if (ui64TotalMemdescAlloc == 0)
+			{
+				acStringBuffer[0] = '\0';
+				*ppszEntryString =acStringBuffer;
+				g_bNextGetState = RI_GET_STATE_MEMDESCS_SUMMARY;
+			}
+			/* else continue to list MEMDESCs */
+		}
+		break;
+
+	case RI_GET_STATE_MEMDESCS_SUMMARY:
+		OSSNPrintf(acStringBuffer,
+		           RI_MEMDESC_SUM_BUF_SIZE,
+		           RI_MEMDESC_SUM_FRMT,
+		           pid,
+		           szProcName,
+		           ui64TotalMemdescAlloc,
+		           ui64TotalMemdescAlloc >> 10,
+		           ui64TotalImport,
+		           ui64TotalImport >> 10,
+		           (ui64TotalMemdescAlloc + ui64TotalImport),
+		           (ui64TotalMemdescAlloc + ui64TotalImport) >> 10);
+
+		*ppszEntryString = acStringBuffer;
+		ui64TotalMemdescAlloc = 0;
+		ui64TotalImport = 0;
+		szProcName[0] = '\0';
+
+		g_bNextGetState = RI_GET_STATE_PMR_LIST;
+		break;
+
+	case RI_GET_STATE_PMR_LIST:
+		if (pid == PVR_SYS_ALLOC_PID)
+		{
+			OSLockAcquire(g_hSysAllocPidListLock);
+			acStringBuffer[0] = '\0';
+			if (!psSysAllocNode)
+			{
+				psSysAllocNode = &g_sSysAllocPidListHead;
+				ui32ProcessedSysAllocPMRCount = 0;
+			}
+			psSysAllocNode = dllist_get_next_node(psSysAllocNode);
+
+			if (szProcName[0] == '\0')
+			{
+				OSStringCopy(szProcName, PVRSRV_MODNAME);
+			}
+			if (psSysAllocNode != NULL && psSysAllocNode != &g_sSysAllocPidListHead)
+			{
+				IMG_DEVMEM_SIZE_T uiPMRPhysicalBacking, uiPMRLogicalSize = 0;
+
+				psRIEntry = IMG_CONTAINER_OF((PDLLIST_NODE)psSysAllocNode, RI_LIST_ENTRY, sSysAllocListNode);
+				_GeneratePMREntryString(psRIEntry,
+										IMG_TRUE,
+										RI_PMR_ENTRY_BUF_SIZE,
+										acStringBuffer);
+				PMR_LogicalSize(psRIEntry->psPMR,
+								&uiPMRLogicalSize);
+				ui64TotalPMRAlloc += uiPMRLogicalSize;
+				PMR_PhysicalSize(psRIEntry->psPMR, &uiPMRPhysicalBacking);
+				ui64TotalPMRBacked += uiPMRPhysicalBacking;
+
+				ui32ProcessedSysAllocPMRCount++;
+				if (ui32ProcessedSysAllocPMRCount > g_ui32SysAllocPMRCount+1)
+				{
+					g_bNextGetState = RI_GET_STATE_PMR_SUMMARY;
+				}
+				/* else continue to list PMRs */
+			}
+			else
+			{
+				g_bNextGetState = RI_GET_STATE_PMR_SUMMARY;
+			}
+			*ppszEntryString = (IMG_CHAR *)acStringBuffer;
+			OSLockRelease(g_hSysAllocPidListLock);
+		}
+		else
+		{
+			IMG_BOOL bPMRToDisplay = IMG_FALSE;
+
+			/* Iterate through the 'touched' PMRs and display details */
+			if (!psNode)
+			{
+				psNode = dllist_get_next_node(&sListFirst);
+			}
+			else
+			{
+				psNode = dllist_get_next_node(psNode);
+			}
+
+			while ((psNode != NULL && psNode != &sListFirst) &&
+					!bPMRToDisplay)
+			{
+				psRIEntry =	IMG_CONTAINER_OF(psNode, RI_LIST_ENTRY, sListNode);
+				if (psRIEntry->ui32RIPMRFlags & RI_FLAG_PARSED_BY_DEBUGFS)
+				{
+					IMG_DEVMEM_SIZE_T uiPMRPhysicalBacking, uiPMRLogicalSize = 0;
+
+					/* This PMR was 'touched', so display details and unflag it*/
+					_GeneratePMREntryString(psRIEntry,
+											IMG_TRUE,
+											RI_PMR_ENTRY_BUF_SIZE,
+											acStringBuffer);
+					psRIEntry->ui32RIPMRFlags &= ~RI_FLAG_PARSED_BY_DEBUGFS;
+					PMR_LogicalSize(psRIEntry->psPMR, &uiPMRLogicalSize);
+					ui64TotalPMRAlloc += uiPMRLogicalSize;
+					PMR_PhysicalSize(psRIEntry->psPMR, &uiPMRPhysicalBacking);
+					ui64TotalPMRBacked += uiPMRPhysicalBacking;
+
+					/* Remember the name of the process for 1 PMR for the summary */
+					if (szProcName[0] == '\0')
+					{
+						OSStringCopy(szProcName, psRIEntry->ai8ProcName);
+					}
+					bPMRToDisplay = IMG_TRUE;
+				}
+				else
+				{
+					psNode = dllist_get_next_node(psNode);
+				}
+			}
+
+			if (psNode == NULL || (psNode == &sListFirst))
+			{
+				g_bNextGetState = RI_GET_STATE_PMR_SUMMARY;
+			}
+			/* else continue listing PMRs */
+		}
+		break;
+
+	case RI_GET_STATE_PMR_SUMMARY:
+		OSSNPrintf(acStringBuffer,
+		           RI_PMR_SUM_BUF_SIZE,
+		           RI_PMR_SUM_FRMT,
+		           pid,
+		           szProcName,
+		           ui64TotalPMRAlloc,
+		           ui64TotalPMRAlloc >> 10,
+		           ui64TotalPMRBacked,
+		           ui64TotalPMRBacked >> 10);
+
+		*ppszEntryString = acStringBuffer;
+		ui64TotalPMRAlloc = 0;
+		ui64TotalPMRBacked = 0;
+		szProcName[0] = '\0';
+		psSysAllocNode = NULL;
+
+		g_bNextGetState = RI_GET_STATE_END;
+		break;
+
+	default:
+		PVR_DPF((PVR_DBG_ERROR, "%s: Bad %d)",__func__, g_bNextGetState));
+
+		__fallthrough;
+	case RI_GET_STATE_END:
+		/* Reset state ready for the next gpu_mem_area file to display */
+		*ppszEntryString = NULL;
+		*ppHandle        = NULL;
+		psNode = NULL;
+		szProcName[0] = '\0';
+
+		g_bNextGetState = RI_GET_STATE_MEMDESCS_LIST_START;
+		return IMG_FALSE;
+		break;
+	}
+
+	return IMG_TRUE;
+}
+
+/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */
+static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry,
+                                        IMG_BOOL bDebugFs,
+                                        IMG_UINT16 ui16MaxStrLen,
+                                        IMG_CHAR *pszEntryString)
+{
+	IMG_CHAR szProc[RI_MEMDESC_ENTRY_PROC_BUF_SIZE];
+	IMG_CHAR szImport[RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE];
+	IMG_CHAR szEntryFormat[RI_MEMDESC_ENTRY_FRMT_SIZE];
+	const IMG_CHAR *pszAnnotationText;
+	IMG_PID uiRIPid = 0;
+	PMR* psRIPMR = NULL;
+	IMG_UINT32 ui32RIPMRFlags = 0;
+
+	if(psRISubEntry->psRI != NULL)
+	{
+		uiRIPid = psRISubEntry->psRI->pid;
+		psRIPMR = psRISubEntry->psRI->psPMR;
+		ui32RIPMRFlags = psRISubEntry->psRI->ui32RIPMRFlags;
+	}
+
+	OSSNPrintf(szEntryFormat,
+			RI_MEMDESC_ENTRY_FRMT_SIZE,
+			RI_MEMDESC_ENTRY_FRMT,
+			DEVMEM_ANNOTATION_MAX_LEN);
+
+	if (!bDebugFs)
+	{
+		/* we don't include process ID info for debugfs output */
+		OSSNPrintf(szProc,
+				RI_MEMDESC_ENTRY_PROC_BUF_SIZE,
+				RI_MEMDESC_ENTRY_PROC_FRMT,
+				psRISubEntry->pid,
+				psRISubEntry->ai8ProcName);
+	}
+
+	if (psRISubEntry->bIsImport && psRIPMR)
+	{
+		OSSNPrintf((IMG_CHAR *)&szImport,
+		           RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE,
+		           RI_MEMDESC_ENTRY_IMPORT_FRMT,
+		           uiRIPid);
+		/* Set pszAnnotationText to that of the 'parent' PMR RI entry */
+		pszAnnotationText = PMR_GetAnnotation(psRIPMR);
+	}
+	else if (!psRISubEntry->bIsSuballoc && psRIPMR)
+	{
+		/* Set pszAnnotationText to that of the 'parent' PMR RI entry */
+		pszAnnotationText = PMR_GetAnnotation(psRIPMR);
+	}
+	else
+	{
+		/* Set pszAnnotationText to that of the MEMDESC RI entry */
+		pszAnnotationText = psRISubEntry->ai8TextB;
+	}
+
+	/* Don't print memdescs if they are local imports
+	 * (i.e. imported PMRs allocated by this process)
+	 */
+	if (bDebugFs &&
+		((psRISubEntry->sVAddr.uiAddr + psRISubEntry->ui64Offset) == 0) &&
+		(psRISubEntry->bIsImport && ((psRISubEntry->pid == uiRIPid) || (uiRIPid == PVR_SYS_ALLOC_PID))))
+	{
+		/* Don't print this entry */
+		pszEntryString[0] = '\0';
+	}
+	else
+	{
+		OSSNPrintf(pszEntryString,
+				   ui16MaxStrLen,
+				   szEntryFormat,
+				   (bDebugFs ? "" : "   "),
+				   psRISubEntry->pid,
+				   (psRISubEntry->sVAddr.uiAddr + psRISubEntry->ui64Offset),
+				   pszAnnotationText,
+				   (bDebugFs ? "" : (char *)szProc),
+				   psRISubEntry->ui64Size,
+				   psRIPMR,
+				   (psRISubEntry->bIsImport ? (char *)&szImport : ""),
+				   (!psRISubEntry->bIsImport && (ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR) && (psRISubEntry->pid != PVR_SYS_ALLOC_PID)) ? g_szSysAllocImport : "",
+				   (psRIPMR && PMR_IsUnpinned(psRIPMR)) ? RI_MEMDESC_ENTRY_UNPINNED_FRMT : "",
+				   (bDebugFs ? '\n' : ' '));
+	}
+}
+
+/* Function used to produce string containing info for PMR RI entries (used for debugfs and kernel log output) */
+static void _GeneratePMREntryString(RI_LIST_ENTRY *psRIEntry,
+                                    IMG_BOOL bDebugFs,
+                                    IMG_UINT16 ui16MaxStrLen,
+                                    IMG_CHAR *pszEntryString)
+{
+	const IMG_CHAR*   pszAnnotationText;
+	IMG_DEVMEM_SIZE_T uiLogicalSize = 0;
+	IMG_DEVMEM_SIZE_T uiPhysicalSize = 0;
+	IMG_CHAR          szEntryFormat[RI_PMR_ENTRY_FRMT_SIZE];
+
+	PMR_LogicalSize(psRIEntry->psPMR, &uiLogicalSize);
+
+	PMR_PhysicalSize(psRIEntry->psPMR, &uiPhysicalSize);
+
+	OSSNPrintf(szEntryFormat,
+			RI_PMR_ENTRY_FRMT_SIZE,
+			RI_PMR_ENTRY_FRMT,
+			DEVMEM_ANNOTATION_MAX_LEN);
+
+	/* Set pszAnnotationText to that PMR RI entry */
+	pszAnnotationText = (IMG_PCHAR) PMR_GetAnnotation(psRIEntry->psPMR);
+
+	OSSNPrintf(pszEntryString,
+	           ui16MaxStrLen,
+	           szEntryFormat,
+	           (bDebugFs ? "" : "   "),
+	           psRIEntry->pid,
+	           (void*)psRIEntry->psPMR,
+	           pszAnnotationText,
+	           uiLogicalSize,
+	           uiPhysicalSize,
+	           (bDebugFs ? '\n' : ' '));
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_DumpList
+
+ @Description
+            Dumps out RI List entries according to parameters passed.
+
+ @input     psPMR - If not NULL, function will output the RI entries for
+                   the specified PMR only
+ @input     pid - If non-zero, the function will only output MEMDESC RI
+  	  	  	  	  entries made by the process with ID pid.
+                  If zero, all MEMDESC RI entries will be output.
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid)
+{
+	RI_LIST_ENTRY *psRIEntry = NULL;
+	RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+	IMG_UINT16 ui16SubEntriesParsed = 0;
+	uintptr_t hashData = 0;
+	IMG_PID hashKey;
+	PMR *pPMRHashKey = psPMR;
+	IMG_BOOL bDisplayedThisPMR = IMG_FALSE;
+	IMG_UINT64 ui64LogicalSize = 0;
+
+	if (!psPMR)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (g_pRIHashTable && g_pProcHashTable)
+	{
+		if (pid != 0)
+		{
+			/* look-up pid in Hash Table */
+			hashKey = pid;
+			hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&hashKey);
+			if (hashData)
+			{
+				psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode);
+				if (psRISubEntry)
+				{
+					psRIEntry = psRISubEntry->psRI;
+				}
+			}
+		}
+		else
+		{
+			/* Look-up psPMR in Hash Table */
+			hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+			psRIEntry = (RI_LIST_ENTRY *)hashData;
+		}
+		if (!psRIEntry)
+		{
+			/* No entry found in hash table */
+			return PVRSRV_ERROR_NOT_FOUND;
+		}
+		while (psRIEntry)
+		{
+			bDisplayedThisPMR = IMG_FALSE;
+			/* Output details for RI entry */
+			if (!pid)
+			{
+				PMR_LogicalSize(psPMR, (IMG_DEVMEM_SIZE_T*)&ui64LogicalSize);
+
+				_RIOutput (("%s <%p> suballocs:%d size:0x%010" IMG_UINT64_FMTSPECx,
+				            PMR_GetAnnotation(psRIEntry->psPMR),
+				            psRIEntry->psPMR,
+				            (IMG_UINT)psRIEntry->ui16SubListCount,
+				            ui64LogicalSize));
+				bDisplayedThisPMR = IMG_TRUE;
+			}
+			ui16SubEntriesParsed = 0;
+			if (psRIEntry->ui16SubListCount)
+			{
+#if _DUMP_LINKEDLIST_INFO
+				_RIOutput (("RI LIST: {sSubListFirst.psNextNode:0x%p}\n",
+				            psRIEntry->sSubListFirst.psNextNode));
+#endif /* _DUMP_LINKEDLIST_INFO */
+				if (!pid)
+				{
+					psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)),
+					                                RI_SUBLIST_ENTRY, sListNode);
+				}
+				/* Traverse RI sublist and output details for each entry */
+				while (psRISubEntry)
+				{
+					if(psRIEntry)
+					{
+						if((ui16SubEntriesParsed >= psRIEntry->ui16SubListCount))
+						{
+							break;
+						}
+						if (!bDisplayedThisPMR)
+						{
+							PMR_LogicalSize(psPMR, (IMG_DEVMEM_SIZE_T*)&ui64LogicalSize);
+
+							_RIOutput (("%s <%p> suballocs:%d size:0x%010" IMG_UINT64_FMTSPECx,
+								    PMR_GetAnnotation(psRIEntry->psPMR),
+								    psRIEntry->psPMR,
+								    (IMG_UINT)psRIEntry->ui16SubListCount,
+								    ui64LogicalSize));
+							bDisplayedThisPMR = IMG_TRUE;
+						}
+					}
+#if _DUMP_LINKEDLIST_INFO
+					_RIOutput (("RI LIST:    [this subentry:0x%p]\n",psRISubEntry));
+					_RIOutput (("RI LIST:     psRI:0x%p\n",psRISubEntry->psRI));
+#endif /* _DUMP_LINKEDLIST_INFO */
+
+					{
+						IMG_CHAR szEntryString[RI_MEMDESC_ENTRY_BUF_SIZE];
+
+						_GenerateMEMDESCEntryString(psRISubEntry,
+						                            IMG_FALSE,
+						                            RI_MEMDESC_ENTRY_BUF_SIZE,
+						                            szEntryString);
+						_RIOutput (("%s",szEntryString));
+					}
+
+					if (pid)
+					{
+						if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) ||
+							(dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData))
+						{
+							psRISubEntry = NULL;
+						}
+						else
+						{
+							psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)),
+							                                RI_SUBLIST_ENTRY, sProcListNode);
+							if (psRISubEntry)
+							{
+								if (psRIEntry != psRISubEntry->psRI)
+								{
+									/*
+									 * The next MEMDESC in the process linked list is in a different PMR
+									 */
+									psRIEntry = psRISubEntry->psRI;
+									bDisplayedThisPMR = IMG_FALSE;
+								}
+							}
+						}
+					}
+					else
+					{
+						ui16SubEntriesParsed++;
+						psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sListNode)),
+						                                RI_SUBLIST_ENTRY, sListNode);
+					}
+				}
+			}
+			if (!pid && psRIEntry)
+			{
+				if (ui16SubEntriesParsed != psRIEntry->ui16SubListCount)
+				{
+					/*
+					 * Output error message as sublist does not contain the
+					 * number of entries indicated by sublist count
+					 */
+					_RIOutput (("RI ERROR: RI sublist contains %d entries, not %d entries\n",
+					            ui16SubEntriesParsed,psRIEntry->ui16SubListCount));
+				}
+				else if (psRIEntry->ui16SubListCount && !dllist_get_next_node(&(psRIEntry->sSubListFirst)))
+				{
+					/*
+					 * Output error message as sublist is empty but sublist count
+					 * is not zero
+					 */
+					_RIOutput (("RI ERROR: ui16SubListCount=%d for empty RI sublist\n",
+					            psRIEntry->ui16SubListCount));
+				}
+			}
+			psRIEntry = NULL;
+		}
+	}
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RIDumpAllKM
+
+ @Description
+            Dumps out the contents of all RI List entries (i.e. for all
+            MEMDESC allocations for each PMR).
+            At present, output is directed to Kernel log
+            via PVR_DPF.
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpAllKM(void)
+{
+	if (g_pRIHashTable)
+	{
+		return HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DumpAllEntries);
+	}
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RIDumpProcessKM
+
+ @Description
+            Dumps out the contents of all MEMDESC RI List entries (for every
+            PMR) which have been allocate by the specified process only.
+            At present, output is directed to Kernel log
+            via PVR_DPF.
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 dummyPMR;
+
+	if (!g_pProcHashTable)
+	{
+		return PVRSRV_OK;
+	}
+
+	/* Acquire RI lock*/
+	_RILock();
+
+	eError = _DumpList((PMR *)&dummyPMR,pid);
+
+	/* Release RI lock*/
+	_RIUnlock();
+
+	return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_TotalAllocsForProcess
+
+ @Description
+            Totals all PMR physical backing for given process.
+
+ @input     pid - ID of process.
+
+ @input     ePhysHeapType - type of Physical Heap for which to total allocs
+
+ @Return	Size of all physical backing for PID's PMRs allocated from the
+            specified heap type (in bytes).
+
+******************************************************************************/
+static IMG_INT32 _TotalAllocsForProcess(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType)
+{
+	RI_LIST_ENTRY *psRIEntry = NULL;
+	RI_SUBLIST_ENTRY *psInitialRISubEntry = NULL;
+	RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+	uintptr_t hashData = 0;
+	IMG_PID hashKey;
+	IMG_INT32 i32TotalPhysical = 0;
+
+	if (g_pRIHashTable && g_pProcHashTable)
+	{
+		if (pid == PVR_SYS_ALLOC_PID)
+		{
+			IMG_UINT32 ui32ProcessedSysAllocPMRCount = 0;
+			DLLIST_NODE *psSysAllocNode = NULL;
+
+			OSLockAcquire(g_hSysAllocPidListLock);
+			psSysAllocNode = dllist_get_next_node(&g_sSysAllocPidListHead);
+			while (psSysAllocNode && psSysAllocNode != &g_sSysAllocPidListHead)
+			{
+				psRIEntry = IMG_CONTAINER_OF((PDLLIST_NODE)psSysAllocNode, RI_LIST_ENTRY, sSysAllocListNode);
+				ui32ProcessedSysAllocPMRCount++;
+				if (PhysHeapGetType(PMR_PhysHeap(psRIEntry->psPMR)) == ePhysHeapType)
+				{
+					IMG_UINT64 ui64PhysicalSize;
+
+					PMR_PhysicalSize(psRIEntry->psPMR, (IMG_DEVMEM_SIZE_T*)&ui64PhysicalSize);
+					if (((IMG_UINT64)i32TotalPhysical + ui64PhysicalSize > 0x7fffffff))
+					{
+						PVR_DPF((PVR_DBG_WARNING, "%s: i32TotalPhysical exceeding size for i32",__func__));
+					}
+					i32TotalPhysical += (IMG_INT32)(ui64PhysicalSize & 0x00000000ffffffff);
+				}
+				psSysAllocNode = dllist_get_next_node(psSysAllocNode);
+			}
+			OSLockRelease(g_hSysAllocPidListLock);
+		}
+		else
+		{
+			if (pid != 0)
+			{
+				/* look-up pid in Hash Table */
+				hashKey = pid;
+				hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&hashKey);
+				if (hashData)
+				{
+					psInitialRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode);
+					psRISubEntry = psInitialRISubEntry;
+					if (psRISubEntry)
+					{
+						psRIEntry = psRISubEntry->psRI;
+					}
+				}
+			}
+
+			while (psRISubEntry && psRIEntry)
+			{
+				if (!psRISubEntry->bIsImport && !(psRIEntry->ui32RIPMRFlags & RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS) &&
+					(pid == PVR_SYS_ALLOC_PID || !(psRIEntry->ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR)) &&
+					(PhysHeapGetType(PMR_PhysHeap(psRIEntry->psPMR)) == ePhysHeapType))
+				{
+					IMG_UINT64 ui64PhysicalSize;
+
+
+					PMR_PhysicalSize(psRIEntry->psPMR, (IMG_DEVMEM_SIZE_T*)&ui64PhysicalSize);
+					if (((IMG_UINT64)i32TotalPhysical + ui64PhysicalSize > 0x7fffffff))
+					{
+						PVR_DPF((PVR_DBG_WARNING, "%s: i32TotalPhysical exceeding size for i32",__func__));
+					}
+					i32TotalPhysical += (IMG_INT32)(ui64PhysicalSize & 0x00000000ffffffff);
+					psRIEntry->ui32RIPMRFlags |= RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS;
+				}
+				if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) ||
+					(dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData))
+				{
+					psRISubEntry = NULL;
+					psRIEntry = NULL;
+				}
+				else
+				{
+					psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)),
+					                                RI_SUBLIST_ENTRY, sProcListNode);
+					if (psRISubEntry)
+					{
+						psRIEntry = psRISubEntry->psRI;
+					}
+				}
+			}
+			psRISubEntry = psInitialRISubEntry;
+			if (psRISubEntry)
+			{
+				psRIEntry = psRISubEntry->psRI;
+			}
+			while (psRISubEntry && psRIEntry)
+			{
+				psRIEntry->ui32RIPMRFlags &= ~RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS;
+				if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) ||
+					(dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData))
+				{
+					psRISubEntry = NULL;
+					psRIEntry = NULL;
+				}
+				else
+				{
+					psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)),
+					                                RI_SUBLIST_ENTRY, sProcListNode);
+					if (psRISubEntry)
+					{
+						psRIEntry = psRISubEntry->psRI;
+					}
+				}
+			}
+		}
+	}
+	return i32TotalPhysical;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RITotalAllocProcessKM
+
+ @Description
+            Returns the total of allocated GPU memory (backing for PMRs)
+            which has been allocated from the specific heap by the specified
+            process only.
+
+ @Return	Amount of physical backing allocated (in bytes)
+
+******************************************************************************/
+IMG_INT32 RITotalAllocProcessKM(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType)
+{
+	IMG_INT32 i32BackingTotal = 0;
+
+	if (g_pProcHashTable)
+	{
+		/* Acquire RI lock*/
+		_RILock();
+
+		i32BackingTotal = _TotalAllocsForProcess(pid, ePhysHeapType);
+
+		/* Release RI lock*/
+		_RIUnlock();
+	}
+	return i32BackingTotal;
+}
+
+#if defined(DEBUG)
+/*!
+*******************************************************************************
+
+ @Function	_DumpProcessList
+
+ @Description
+            Dumps out RI List entries according to parameters passed.
+
+ @input     psPMR - If not NULL, function will output the RI entries for
+                   the specified PMR only
+ @input     pid - If non-zero, the function will only output MEMDESC RI
+  	  	  	  	  entries made by the process with ID pid.
+                  If zero, all MEMDESC RI entries will be output.
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR _DumpProcessList(PMR *psPMR,
+									 IMG_PID pid,
+									 IMG_UINT64 ui64Offset,
+									 IMG_DEV_VIRTADDR *psDevVAddr)
+{
+	RI_LIST_ENTRY *psRIEntry = NULL;
+	RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+	IMG_UINT16 ui16SubEntriesParsed = 0;
+	uintptr_t hashData = 0;
+	PMR *pPMRHashKey = psPMR;
+
+	psDevVAddr->uiAddr = 0;
+
+	if (!psPMR)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (g_pRIHashTable && g_pProcHashTable)
+	{
+		PVR_ASSERT(psPMR && pid);
+
+		/* Look-up psPMR in Hash Table */
+		hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+		psRIEntry = (RI_LIST_ENTRY *)hashData;
+
+		if (!psRIEntry)
+		{
+			/* No entry found in hash table */
+			return PVRSRV_ERROR_NOT_FOUND;
+		}
+
+		if (psRIEntry->ui16SubListCount)
+		{
+			psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)),
+											RI_SUBLIST_ENTRY, sListNode);
+
+			/* Traverse RI sublist and output details for each entry */
+			while (psRISubEntry && (ui16SubEntriesParsed < psRIEntry->ui16SubListCount))
+			{
+				if (pid == psRISubEntry->pid)
+				{
+					IMG_UINT64 ui64StartOffset = psRISubEntry->ui64Offset;
+					IMG_UINT64 ui64EndOffset = psRISubEntry->ui64Offset + psRISubEntry->ui64Size;
+
+					if (ui64Offset >= ui64StartOffset && ui64Offset < ui64EndOffset)
+					{
+						psDevVAddr->uiAddr = psRISubEntry->sVAddr.uiAddr;
+						return PVRSRV_OK;
+					}
+				}
+
+				ui16SubEntriesParsed++;
+				psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sListNode)),
+												RI_SUBLIST_ENTRY, sListNode);
+			}
+		}
+	}
+
+	return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RIDumpProcessListKM
+
+ @Description
+            Dumps out selected contents of all MEMDESC RI List entries (for a
+            PMR) which have been allocate by the specified process only.
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpProcessListKM(PMR *psPMR,
+								 IMG_PID pid,
+								 IMG_UINT64 ui64Offset,
+								 IMG_DEV_VIRTADDR *psDevVAddr)
+{
+	PVRSRV_ERROR eError;
+
+	if (!g_pProcHashTable)
+	{
+		return PVRSRV_OK;
+	}
+
+	/* Acquire RI lock*/
+	_RILock();
+
+	eError = _DumpProcessList(psPMR,
+							  pid,
+							  ui64Offset,
+							  psDevVAddr);
+
+	/* Release RI lock*/
+	_RIUnlock();
+
+	return eError;
+}
+#endif
+
+static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v)
+{
+	RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v;
+
+	PVR_UNREFERENCED_PARAMETER (k);
+
+	return RIDumpListKM(psRIEntry->psPMR);
+}
+
+static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v)
+{
+	RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v;
+	RI_SUBLIST_ENTRY *psRISubEntry;
+	PVRSRV_ERROR eResult = PVRSRV_OK;
+
+	PVR_UNREFERENCED_PARAMETER (k);
+
+	while ((eResult == PVRSRV_OK) && (psRIEntry->ui16SubListCount > 0))
+	{
+		psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)), RI_SUBLIST_ENTRY, sListNode);
+		eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE)psRISubEntry);
+	}
+	if (eResult == PVRSRV_OK)
+	{
+		eResult = RIDeletePMREntryKM((RI_HANDLE)psRIEntry);
+		/*
+		 * If we've deleted the Hash table, return
+		 * an error to stop the iterator...
+		 */
+		if (!g_pRIHashTable)
+		{
+			eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+		}
+	}
+	return eResult;
+}
+
+static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v)
+{
+	RI_SUBLIST_ENTRY *psRISubEntry = (RI_SUBLIST_ENTRY *)v;
+	PVRSRV_ERROR eResult;
+
+	PVR_UNREFERENCED_PARAMETER (k);
+
+	eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE) psRISubEntry);
+	if (eResult == PVRSRV_OK && !g_pProcHashTable)
+	{
+		/*
+		 * If we've deleted the Hash table, return
+		 * an error to stop the iterator...
+		 */
+		eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+	}
+
+	return eResult;
+}
+
+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/ri_server.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/ri_server.h
new file mode 100644
index 0000000..a0c1bdc
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/ri_server.h
@@ -0,0 +1,106 @@
+/*************************************************************************/ /*!
+@File			ri_server.h
+@Title          Resource Information abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Resource Information (RI) functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RI_SERVER_H_
+#define _RI_SERVER_H_
+
+#include <img_defs.h>
+#include <ri_typedefs.h>
+#include <pmr.h>
+#include <pvrsrv_error.h>
+#include <physheap.h>
+
+PVRSRV_ERROR RIInitKM(void);
+void RIDeInitKM(void);
+
+void RILockAcquireKM(void);
+void RILockReleaseKM(void);
+
+PVRSRV_ERROR RIWritePMREntryKM(PMR *psPMR);
+
+PVRSRV_ERROR RIWritePMREntryWithOwnerKM(PMR *psPMR,
+					   	   	   	   	    IMG_PID ui32Owner);
+
+PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR,
+					   	   	   	   IMG_UINT32 ui32TextBSize,
+					   	   	   	   const IMG_CHAR ai8TextB[DEVMEM_ANNOTATION_MAX_LEN],
+					   	   	   	   IMG_UINT64 uiOffset,
+					   	   	   	   IMG_UINT64 uiSize,
+					   	   	   	   IMG_BOOL bIsImport,
+					   	   	   	   IMG_BOOL bIsSuballoc,
+					   	   	   	   RI_HANDLE *phRIHandle);
+
+PVRSRV_ERROR RIWriteProcListEntryKM(IMG_UINT32 ui32TextBSize,
+                                    const IMG_CHAR *psz8TextB,
+                                    IMG_UINT64 ui64Size,
+                                    IMG_UINT64 ui64DevVAddr,
+                                    RI_HANDLE *phRIHandle);
+
+PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle,
+								   IMG_DEV_VIRTADDR sVAddr);
+
+PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle);
+PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle);
+
+PVRSRV_ERROR RIDeleteListKM(void);
+
+PVRSRV_ERROR RIDumpListKM(PMR *psPMR);
+
+PVRSRV_ERROR RIDumpAllKM(void);
+
+PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid);
+
+#if defined(DEBUG)
+PVRSRV_ERROR RIDumpProcessListKM(PMR *psPMR,
+								 IMG_PID pid,
+								 IMG_UINT64 ui64Offset,
+								 IMG_DEV_VIRTADDR *psDevVAddr);
+#endif
+
+IMG_BOOL RIGetListEntryKM(IMG_PID pid,
+						  IMG_HANDLE **ppHandle,
+						  IMG_CHAR **ppszEntryString);
+
+IMG_INT32 RITotalAllocProcessKM(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType);
+
+#endif /* #ifndef _RI_SERVER_H _*/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/ri_typedefs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/ri_typedefs.h
new file mode 100644
index 0000000..2580b20
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/ri_typedefs.h
@@ -0,0 +1,53 @@
+/*************************************************************************/ /*!
+@File
+@Title          Resource Information (RI) Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Client side part of RI management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RI_TYPEDEFS_H
+#define RI_TYPEDEFS_H
+
+#include "img_types.h"
+
+typedef struct RI_SUBLIST_ENTRY RI_ENTRY;
+typedef RI_ENTRY* RI_HANDLE;
+
+#endif /* #ifndef RI_TYPEDEFS_H */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rogue_trace_events.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rogue_trace_events.h
new file mode 100644
index 0000000..a0d9b32
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/rogue_trace_events.h
@@ -0,0 +1,558 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rogue
+
+#if !defined(ROGUE_TRACE_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define ROGUE_TRACE_EVENTS_H
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/time.h>
+
+#define show_secs_from_ns(ns) \
+	({ \
+		u64 t = ns + (NSEC_PER_USEC / 2); \
+		do_div(t, NSEC_PER_SEC); \
+		t; \
+	})
+
+#define show_usecs_from_ns(ns) \
+	({ \
+		u64 t = ns + (NSEC_PER_USEC / 2); \
+		u32 rem; \
+		do_div(t, NSEC_PER_USEC); \
+		rem = do_div(t, USEC_PER_SEC); \
+	})
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int trace_fence_update_enabled_callback(void);
+#else
+void trace_fence_update_enabled_callback(void);
+#endif
+void trace_fence_update_disabled_callback(void);
+
+TRACE_EVENT_FN(rogue_fence_update,
+
+	TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 ctx_id, u32 offset,
+		u32 sync_fwaddr, u32 sync_value),
+
+	TP_ARGS(comm, cmd, dm, ctx_id, offset, sync_fwaddr, sync_value),
+
+	TP_STRUCT__entry(
+		__string(       comm,           comm            )
+		__string(       cmd,            cmd             )
+		__string(       dm,             dm              )
+		__field(        u32,            ctx_id          )
+		__field(        u32,            offset          )
+		__field(        u32,            sync_fwaddr     )
+		__field(        u32,            sync_value      )
+	),
+
+	TP_fast_assign(
+		__assign_str(comm, comm);
+		__assign_str(cmd, cmd);
+		__assign_str(dm, dm);
+		__entry->ctx_id = ctx_id;
+		__entry->offset = offset;
+		__entry->sync_fwaddr = sync_fwaddr;
+		__entry->sync_value = sync_value;
+	),
+
+	TP_printk("comm=%s cmd=%s dm=%s ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx",
+		__get_str(comm),
+		__get_str(cmd),
+		__get_str(dm),
+		(unsigned long)__entry->ctx_id,
+		(unsigned long)__entry->offset,
+		(unsigned long)__entry->sync_fwaddr,
+		(unsigned long)__entry->sync_value),
+
+	trace_fence_update_enabled_callback,
+	trace_fence_update_disabled_callback
+);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int trace_fence_check_enabled_callback(void);
+#else
+void trace_fence_check_enabled_callback(void);
+#endif
+void trace_fence_check_disabled_callback(void);
+
+TRACE_EVENT_FN(rogue_fence_check,
+
+	TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 ctx_id, u32 offset,
+		u32 sync_fwaddr, u32 sync_value),
+
+	TP_ARGS(comm, cmd, dm, ctx_id, offset, sync_fwaddr, sync_value),
+
+	TP_STRUCT__entry(
+		__string(       comm,           comm            )
+		__string(       cmd,            cmd             )
+		__string(       dm,             dm              )
+		__field(        u32,            ctx_id          )
+		__field(        u32,            offset          )
+		__field(        u32,            sync_fwaddr     )
+		__field(        u32,            sync_value      )
+	),
+
+	TP_fast_assign(
+		__assign_str(comm, comm);
+		__assign_str(cmd, cmd);
+		__assign_str(dm, dm);
+		__entry->ctx_id = ctx_id;
+		__entry->offset = offset;
+		__entry->sync_fwaddr = sync_fwaddr;
+		__entry->sync_value = sync_value;
+	),
+
+	TP_printk("comm=%s cmd=%s dm=%s ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx",
+		__get_str(comm),
+		__get_str(cmd),
+		__get_str(dm),
+		(unsigned long)__entry->ctx_id,
+		(unsigned long)__entry->offset,
+		(unsigned long)__entry->sync_fwaddr,
+		(unsigned long)__entry->sync_value),
+
+	trace_fence_check_enabled_callback,
+	trace_fence_check_disabled_callback
+);
+
+TRACE_EVENT(rogue_job_enqueue,
+
+	TP_PROTO(u32 ctx_id, u32 int_id, u32 ext_id,
+	         const char *kick_type),
+
+	TP_ARGS(ctx_id, int_id, ext_id, kick_type),
+
+	TP_STRUCT__entry(
+		__field(u32, ctx_id)
+		__field(u32, int_id)
+		__field(u32, ext_id)
+		__string(kick_type, kick_type)
+	),
+
+	TP_fast_assign(
+		__entry->ctx_id = ctx_id;
+		__entry->int_id = int_id;
+		__entry->ext_id = ext_id;
+		__assign_str(kick_type, kick_type);
+	),
+
+	TP_printk("ctx_id=%lu int_id=%lu ext_id=%lu kick_type=%s",
+		(unsigned long) __entry->ctx_id,
+		(unsigned long) __entry->int_id,
+		(unsigned long) __entry->ext_id,
+		__get_str(kick_type)
+	)
+);
+
+TRACE_EVENT(rogue_sched_switch,
+
+	TP_PROTO(const char *work_type, u32 switch_type, u64 timestamp, u32 next_ctx_id,
+	         u32 next_prio, u32 next_int_id, u32 next_ext_id),
+
+	TP_ARGS(work_type, switch_type, timestamp, next_ctx_id, next_prio, next_int_id, next_ext_id),
+
+	TP_STRUCT__entry(
+		__string(work_type, work_type)
+		__field(u32, switch_type)
+		__field(u64, timestamp)
+		__field(u32, next_ctx_id)
+		__field(u32, next_prio)
+		__field(u32, next_int_id)
+		__field(u32, next_ext_id)
+	),
+
+	TP_fast_assign(
+		__assign_str(work_type, work_type);
+		__entry->switch_type = switch_type;
+		__entry->timestamp = timestamp;
+		__entry->next_ctx_id = next_ctx_id;
+		__entry->next_prio = next_prio;
+		__entry->next_int_id = next_int_id;
+		__entry->next_ext_id = next_ext_id;
+	),
+
+	TP_printk("ts=%llu.%06lu next_ctx_id=%lu next_int_id=%lu next_ext_id=%lu"
+		" next_prio=%lu work_type=%s switch_type=%s",
+		(unsigned long long) show_secs_from_ns(__entry->timestamp),
+		(unsigned long) show_usecs_from_ns(__entry->timestamp),
+		(unsigned long) __entry->next_ctx_id,
+		(unsigned long) __entry->next_int_id,
+		(unsigned long) __entry->next_ext_id,
+		(unsigned long) __entry->next_prio,
+		__get_str(work_type),
+		__print_symbolic(__entry->switch_type,
+			/* These values are from ospvr_gputrace.h. */
+			{ 1, "begin" },
+			{ 2, "end" })
+	)
+);
+
+TRACE_EVENT(rogue_create_fw_context,
+
+	TP_PROTO(const char *comm, const char *dm, u32 ctx_id),
+
+	TP_ARGS(comm, dm, ctx_id),
+
+	TP_STRUCT__entry(
+		__string(       comm,           comm            )
+		__string(       dm,             dm              )
+		__field(        u32,            ctx_id          )
+	),
+
+	TP_fast_assign(
+		__assign_str(comm, comm);
+		__assign_str(dm, dm);
+		__entry->ctx_id = ctx_id;
+	),
+
+	TP_printk("comm=%s dm=%s ctx_id=%lu",
+		__get_str(comm),
+		__get_str(dm),
+		(unsigned long)__entry->ctx_id)
+);
+
+void PVRGpuTraceEnableUfoCallback(void);
+void PVRGpuTraceDisableUfoCallback(void);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int PVRGpuTraceEnableUfoCallbackWrapper(void);
+#else
+#define PVRGpuTraceEnableUfoCallbackWrapper \
+		PVRGpuTraceEnableUfoCallback
+#endif
+
+TRACE_EVENT_FN(rogue_ufo_update,
+
+	TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 ext_id, u32 int_id,
+	         u32 fwaddr, u32 old_value, u32 new_value),
+
+	TP_ARGS(timestamp, ctx_id, job_id, ext_id, int_id, fwaddr, old_value,
+	        new_value),
+
+	TP_STRUCT__entry(
+		__field(        u64,            timestamp   )
+		__field(        u32,            ctx_id      )
+		__field(        u32,            job_id      )
+		__field(        u32,            ext_id      )
+		__field(        u32,            int_id      )
+		__field(        u32,            fwaddr      )
+		__field(        u32,            old_value   )
+		__field(        u32,            new_value   )
+	),
+
+	TP_fast_assign(
+		__entry->timestamp = timestamp;
+		__entry->ctx_id = ctx_id;
+		__entry->job_id = job_id;
+		__entry->ext_id = ext_id;
+		__entry->int_id = int_id;
+		__entry->fwaddr = fwaddr;
+		__entry->old_value = old_value;
+		__entry->new_value = new_value;
+	),
+
+	TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu ext_id=%lu int_id=%lu"
+		" fwaddr=%#lx old_value=%#lx new_value=%#lx",
+		(unsigned long long)show_secs_from_ns(__entry->timestamp),
+		(unsigned long)show_usecs_from_ns(__entry->timestamp),
+		(unsigned long)__entry->ctx_id,
+		(unsigned long)__entry->job_id,
+		(unsigned long)__entry->ext_id,
+		(unsigned long)__entry->int_id,
+		(unsigned long)__entry->fwaddr,
+		(unsigned long)__entry->old_value,
+		(unsigned long)__entry->new_value),
+	PVRGpuTraceEnableUfoCallbackWrapper,
+	PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_check_fail,
+
+	TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 ext_id, u32 int_id,
+	         u32 fwaddr, u32 value, u32 required),
+
+	TP_ARGS(timestamp, ctx_id, job_id, ext_id, int_id, fwaddr, value, required),
+
+	TP_STRUCT__entry(
+		__field(        u64,            timestamp   )
+		__field(        u32,            ctx_id      )
+		__field(        u32,            job_id      )
+		__field(        u32,            ext_id      )
+		__field(        u32,            int_id      )
+		__field(        u32,            fwaddr      )
+		__field(        u32,            value       )
+		__field(        u32,            required    )
+	),
+
+	TP_fast_assign(
+		__entry->timestamp = timestamp;
+		__entry->ctx_id = ctx_id;
+		__entry->job_id = job_id;
+		__entry->ext_id = ext_id;
+		__entry->int_id = int_id;
+		__entry->fwaddr = fwaddr;
+		__entry->value = value;
+		__entry->required = required;
+	),
+
+	TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu ext_id=%lu int_id=%lu"
+		" fwaddr=%#lx value=%#lx required=%#lx",
+		(unsigned long long)show_secs_from_ns(__entry->timestamp),
+		(unsigned long)show_usecs_from_ns(__entry->timestamp),
+		(unsigned long)__entry->ctx_id,
+		(unsigned long)__entry->job_id,
+		(unsigned long)__entry->ext_id,
+		(unsigned long)__entry->int_id,
+		(unsigned long)__entry->fwaddr,
+		(unsigned long)__entry->value,
+		(unsigned long)__entry->required),
+	PVRGpuTraceEnableUfoCallbackWrapper,
+	PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_pr_check_fail,
+
+	TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 ext_id, u32 int_id,
+	         u32 fwaddr, u32 value, u32 required),
+
+	TP_ARGS(timestamp, ctx_id, job_id, ext_id, int_id, fwaddr, value, required),
+
+	TP_STRUCT__entry(
+		__field(        u64,            timestamp   )
+		__field(        u32,            ctx_id      )
+		__field(        u32,            job_id      )
+		__field(        u32,            ext_id      )
+		__field(        u32,            int_id      )
+		__field(        u32,            fwaddr      )
+		__field(        u32,            value       )
+		__field(        u32,            required    )
+	),
+
+	TP_fast_assign(
+		__entry->timestamp = timestamp;
+		__entry->ctx_id = ctx_id;
+		__entry->job_id = job_id;
+		__entry->ext_id = ext_id;
+		__entry->int_id = int_id;
+		__entry->fwaddr = fwaddr;
+		__entry->value = value;
+		__entry->required = required;
+	),
+
+	TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu ext_id=%lu int_id=%lu"
+		" fwaddr=%#lx value=%#lx required=%#lx",
+		(unsigned long long)show_secs_from_ns(__entry->timestamp),
+		(unsigned long)show_usecs_from_ns(__entry->timestamp),
+		(unsigned long)__entry->ctx_id,
+		(unsigned long)__entry->job_id,
+		(unsigned long)__entry->ext_id,
+		(unsigned long)__entry->int_id,
+		(unsigned long)__entry->fwaddr,
+		(unsigned long)__entry->value,
+		(unsigned long)__entry->required),
+	PVRGpuTraceEnableUfoCallbackWrapper,
+	PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_check_success,
+
+	TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 ext_id, u32 int_id,
+	         u32 fwaddr, u32 value),
+
+	TP_ARGS(timestamp, ctx_id, job_id, ext_id, int_id, fwaddr, value),
+
+	TP_STRUCT__entry(
+		__field(        u64,            timestamp   )
+		__field(        u32,            ctx_id      )
+		__field(        u32,            job_id      )
+		__field(        u32,            ext_id      )
+		__field(        u32,            int_id      )
+		__field(        u32,            fwaddr      )
+		__field(        u32,            value       )
+	),
+
+	TP_fast_assign(
+		__entry->timestamp = timestamp;
+		__entry->ctx_id = ctx_id;
+		__entry->job_id = job_id;
+		__entry->ext_id = ext_id;
+		__entry->int_id = int_id;
+		__entry->fwaddr = fwaddr;
+		__entry->value = value;
+	),
+
+	TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu ext_id=%lu int_id=%lu"
+		" fwaddr=%#lx value=%#lx",
+		(unsigned long long)show_secs_from_ns(__entry->timestamp),
+		(unsigned long)show_usecs_from_ns(__entry->timestamp),
+		(unsigned long)__entry->ctx_id,
+		(unsigned long)__entry->job_id,
+		(unsigned long)__entry->ext_id,
+		(unsigned long)__entry->int_id,
+		(unsigned long)__entry->fwaddr,
+		(unsigned long)__entry->value),
+	PVRGpuTraceEnableUfoCallbackWrapper,
+	PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_pr_check_success,
+
+	TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 ext_id, u32 int_id,
+	         u32 fwaddr, u32 value),
+
+	TP_ARGS(timestamp, ctx_id, job_id, ext_id, int_id, fwaddr, value),
+
+	TP_STRUCT__entry(
+		__field(        u64,            timestamp   )
+		__field(        u32,            ctx_id      )
+		__field(        u32,            job_id      )
+		__field(        u32,            ext_id      )
+		__field(        u32,            int_id      )
+		__field(        u32,            fwaddr      )
+		__field(        u32,            value       )
+	),
+
+	TP_fast_assign(
+		__entry->timestamp = timestamp;
+		__entry->ctx_id = ctx_id;
+		__entry->job_id = job_id;
+		__entry->ext_id = ext_id;
+		__entry->int_id = int_id;
+		__entry->fwaddr = fwaddr;
+		__entry->value = value;
+	),
+
+	TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu ext_id=%lu int_id=%lu"
+		" fwaddr=%#lx value=%#lx",
+		(unsigned long long)show_secs_from_ns(__entry->timestamp),
+		(unsigned long)show_usecs_from_ns(__entry->timestamp),
+		(unsigned long)__entry->ctx_id,
+		(unsigned long)__entry->job_id,
+		(unsigned long)__entry->ext_id,
+		(unsigned long)__entry->int_id,
+		(unsigned long)__entry->fwaddr,
+		(unsigned long)__entry->value),
+	PVRGpuTraceEnableUfoCallbackWrapper,
+	PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT(rogue_events_lost,
+
+	TP_PROTO(u32 event_source, u32 last_ordinal, u32 curr_ordinal),
+
+	TP_ARGS(event_source, last_ordinal, curr_ordinal),
+
+	TP_STRUCT__entry(
+		__field(        u32,            event_source     )
+		__field(        u32,            last_ordinal     )
+		__field(        u32,            curr_ordinal     )
+	),
+
+	TP_fast_assign(
+		__entry->event_source = event_source;
+		__entry->last_ordinal = last_ordinal;
+		__entry->curr_ordinal = curr_ordinal;
+	),
+
+	TP_printk("event_source=%s last_ordinal=%u curr_ordinal=%u",
+		__print_symbolic(__entry->event_source, {0, "GPU"}, {1, "Host"}),
+		__entry->last_ordinal,
+		__entry->curr_ordinal)
+);
+
+void PVRGpuTraceEnableFirmwareActivityCallback(void);
+void PVRGpuTraceDisableFirmwareActivityCallback(void);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int PVRGpuTraceEnableFirmwareActivityCallbackWrapper(void);
+#else
+#define PVRGpuTraceEnableFirmwareActivityCallbackWrapper \
+		PVRGpuTraceEnableFirmwareActivityCallback
+#endif
+
+TRACE_EVENT_FN(rogue_firmware_activity,
+
+	TP_PROTO(u64 timestamp, const char *task, u32 fw_event),
+
+	TP_ARGS(timestamp, task, fw_event),
+
+	TP_STRUCT__entry(
+		__field(        u64,            timestamp       )
+		__string(       task,           task            )
+		__field(        u32,            fw_event        )
+	),
+
+	TP_fast_assign(
+		__entry->timestamp = timestamp;
+		__assign_str(task, task);
+		__entry->fw_event = fw_event;
+	),
+
+	TP_printk("ts=%llu.%06lu task=%s event=%s",
+		(unsigned long long)show_secs_from_ns(__entry->timestamp),
+		(unsigned long)show_usecs_from_ns(__entry->timestamp),
+		__get_str(task),
+		__print_symbolic(__entry->fw_event,
+			/* These values are from ospvr_gputrace.h. */
+			{ 1, "begin" },
+			{ 2, "end" })),
+
+	PVRGpuTraceEnableFirmwareActivityCallbackWrapper,
+	PVRGpuTraceDisableFirmwareActivityCallback
+);
+
+#undef show_secs_from_ns
+#undef show_usecs_from_ns
+
+#endif /* ROGUE_TRACE_EVENTS_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+
+/* This is needed because the name of this file doesn't match TRACE_SYSTEM. */
+#define TRACE_INCLUDE_FILE rogue_trace_events
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_cache_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_cache_bridge.c
new file mode 100644
index 0000000..7e6bbc8
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_cache_bridge.c
@@ -0,0 +1,486 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for cache
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for cache
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "cache_km.h"
+
+#include "common_cache_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry,
+			 PVRSRV_BRIDGE_IN_CACHEOPQUEUE * psCacheOpQueueIN,
+			 PVRSRV_BRIDGE_OUT_CACHEOPQUEUE * psCacheOpQueueOUT,
+			 CONNECTION_DATA * psConnection)
+{
+	PMR **psPMRInt = NULL;
+	IMG_HANDLE *hPMRInt2 = NULL;
+	IMG_UINT64 *ui64AddressInt = NULL;
+	IMG_DEVMEM_OFFSET_T *uiOffsetInt = NULL;
+	IMG_DEVMEM_SIZE_T *uiSizeInt = NULL;
+	PVRSRV_CACHE_OP *iuCacheOpInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *)) +
+	    (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) +
+	    (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) +
+	    (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T)) +
+	    (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T)) +
+	    (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP)) + 0;
+
+	if (unlikely(psCacheOpQueueIN->ui32NumCacheOps > CACHE_BATCH_MAX))
+	{
+		psCacheOpQueueOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto CacheOpQueue_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psCacheOpQueueIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *) psCacheOpQueueIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psCacheOpQueueOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto CacheOpQueue_exit;
+			}
+		}
+	}
+
+	if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+	{
+		psPMRInt =
+		    (PMR **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+			      ui32NextOffset);
+		ui32NextOffset +=
+		    psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *);
+		hPMRInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hPMRInt2,
+		     (const void __user *)psCacheOpQueueIN->phPMR,
+		     psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) !=
+		    PVRSRV_OK)
+		{
+			psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto CacheOpQueue_exit;
+		}
+	}
+	if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+	{
+		ui64AddressInt =
+		    (IMG_UINT64 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64);
+	}
+
+	/* Copy the data over */
+	if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui64AddressInt,
+		     (const void __user *)psCacheOpQueueIN->pui64Address,
+		     psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) !=
+		    PVRSRV_OK)
+		{
+			psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto CacheOpQueue_exit;
+		}
+	}
+	if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+	{
+		uiOffsetInt =
+		    (IMG_DEVMEM_OFFSET_T *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+					     ui32NextOffset);
+		ui32NextOffset +=
+		    psCacheOpQueueIN->ui32NumCacheOps *
+		    sizeof(IMG_DEVMEM_OFFSET_T);
+	}
+
+	/* Copy the data over */
+	if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiOffsetInt,
+		     (const void __user *)psCacheOpQueueIN->puiOffset,
+		     psCacheOpQueueIN->ui32NumCacheOps *
+		     sizeof(IMG_DEVMEM_OFFSET_T)) != PVRSRV_OK)
+		{
+			psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto CacheOpQueue_exit;
+		}
+	}
+	if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+	{
+		uiSizeInt =
+		    (IMG_DEVMEM_SIZE_T *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+					   ui32NextOffset);
+		ui32NextOffset +=
+		    psCacheOpQueueIN->ui32NumCacheOps *
+		    sizeof(IMG_DEVMEM_SIZE_T);
+	}
+
+	/* Copy the data over */
+	if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiSizeInt,
+		     (const void __user *)psCacheOpQueueIN->puiSize,
+		     psCacheOpQueueIN->ui32NumCacheOps *
+		     sizeof(IMG_DEVMEM_SIZE_T)) != PVRSRV_OK)
+		{
+			psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto CacheOpQueue_exit;
+		}
+	}
+	if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+	{
+		iuCacheOpInt =
+		    (PVRSRV_CACHE_OP *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+					 ui32NextOffset);
+		ui32NextOffset +=
+		    psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP);
+	}
+
+	/* Copy the data over */
+	if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, iuCacheOpInt,
+		     (const void __user *)psCacheOpQueueIN->piuCacheOp,
+		     psCacheOpQueueIN->ui32NumCacheOps *
+		     sizeof(PVRSRV_CACHE_OP)) != PVRSRV_OK)
+		{
+			psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto CacheOpQueue_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psCacheOpQueueIN->ui32NumCacheOps; i++)
+		{
+			/* Look up the address from the handle */
+			psCacheOpQueueOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)&psPMRInt[i],
+						       hPMRInt2[i],
+						       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+						       IMG_TRUE);
+			if (unlikely(psCacheOpQueueOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto CacheOpQueue_exit;
+			}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psCacheOpQueueOUT->eError =
+	    CacheOpQueue(psConnection, OSGetDevData(psConnection),
+			 psCacheOpQueueIN->ui32NumCacheOps,
+			 psPMRInt,
+			 ui64AddressInt,
+			 uiOffsetInt,
+			 uiSizeInt,
+			 iuCacheOpInt,
+			 psCacheOpQueueIN->ui32OpTimeline,
+			 psCacheOpQueueIN->ui32OpInfoPgGFSeqNum,
+			 psCacheOpQueueIN->ui32CurrentFenceSeqNum,
+			 &psCacheOpQueueOUT->ui32NextFenceSeqNum);
+
+ CacheOpQueue_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	if (hPMRInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psCacheOpQueueIN->ui32NumCacheOps; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hPMRInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hPMRInt2[i],
+							    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+			}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeCacheOpExec(IMG_UINT32 ui32DispatchTableEntry,
+			PVRSRV_BRIDGE_IN_CACHEOPEXEC * psCacheOpExecIN,
+			PVRSRV_BRIDGE_OUT_CACHEOPEXEC * psCacheOpExecOUT,
+			CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPMR = psCacheOpExecIN->hPMR;
+	PMR *psPMRInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psCacheOpExecOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRInt,
+				       hPMR,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psCacheOpExecOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto CacheOpExec_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psCacheOpExecOUT->eError =
+	    CacheOpValExec(psPMRInt,
+			   psCacheOpExecIN->ui64Address,
+			   psCacheOpExecIN->uiOffset,
+			   psCacheOpExecIN->uiSize, psCacheOpExecIN->iuCacheOp);
+
+ CacheOpExec_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psPMRInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMR,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeCacheOpLog(IMG_UINT32 ui32DispatchTableEntry,
+		       PVRSRV_BRIDGE_IN_CACHEOPLOG * psCacheOpLogIN,
+		       PVRSRV_BRIDGE_OUT_CACHEOPLOG * psCacheOpLogOUT,
+		       CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPMR = psCacheOpLogIN->hPMR;
+	PMR *psPMRInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psCacheOpLogOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRInt,
+				       hPMR,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psCacheOpLogOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto CacheOpLog_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psCacheOpLogOUT->eError =
+	    CacheOpLog(psPMRInt,
+		       psCacheOpLogIN->ui64Address,
+		       psCacheOpLogIN->uiOffset,
+		       psCacheOpLogIN->uiSize,
+		       psCacheOpLogIN->i64QueuedTimeUs,
+		       psCacheOpLogIN->i64ExecuteTimeUs,
+		       psCacheOpLogIN->i32NumRBF,
+		       psCacheOpLogIN->bIsDiscard, psCacheOpLogIN->iuCacheOp);
+
+ CacheOpLog_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psPMRInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMR,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitCACHEBridge(void);
+PVRSRV_ERROR DeinitCACHEBridge(void);
+
+/*
+ * Register all CACHE functions with services
+ */
+PVRSRV_ERROR InitCACHEBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE,
+			      PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE,
+			      PVRSRVBridgeCacheOpQueue, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE,
+			      PVRSRV_BRIDGE_CACHE_CACHEOPEXEC,
+			      PVRSRVBridgeCacheOpExec, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE,
+			      PVRSRV_BRIDGE_CACHE_CACHEOPLOG,
+			      PVRSRVBridgeCacheOpLog, NULL, bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all cache functions with services
+ */
+PVRSRV_ERROR DeinitCACHEBridge(void)
+{
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE,
+				PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE,
+				PVRSRV_BRIDGE_CACHE_CACHEOPEXEC);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE,
+				PVRSRV_BRIDGE_CACHE_CACHEOPLOG);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_cmm_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_cmm_bridge.c
new file mode 100644
index 0000000..bd69633
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_cmm_bridge.c
@@ -0,0 +1,412 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for cmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for cmm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "pmr.h"
+#include "devicemem_server.h"
+
+#include "common_cmm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#if !defined(EXCLUDE_CMM_BRIDGE)
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeDevmemIntExportCtx(IMG_UINT32 ui32DispatchTableEntry,
+			       PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX *
+			       psDevmemIntExportCtxIN,
+			       PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX *
+			       psDevmemIntExportCtxOUT,
+			       CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hContext = psDevmemIntExportCtxIN->hContext;
+	DEVMEMINT_CTX *psContextInt = NULL;
+	IMG_HANDLE hPMR = psDevmemIntExportCtxIN->hPMR;
+	PMR *psPMRInt = NULL;
+	DEVMEMINT_CTX_EXPORT *psContextExportInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psDevmemIntExportCtxOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psContextInt,
+				       hContext,
+				       PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+				       IMG_TRUE);
+	if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntExportCtx_exit;
+	}
+
+	/* Look up the address from the handle */
+	psDevmemIntExportCtxOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRInt,
+				       hPMR,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntExportCtx_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psDevmemIntExportCtxOUT->eError =
+	    DevmemIntExportCtx(psContextInt, psPMRInt, &psContextExportInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK))
+	{
+		goto DevmemIntExportCtx_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psDevmemIntExportCtxOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psDevmemIntExportCtxOUT->hContextExport,
+				      (void *)psContextExportInt,
+				      PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT,
+				      PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+				      (PFN_HANDLE_RELEASE) &
+				      DevmemIntUnexportCtx);
+	if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntExportCtx_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ DevmemIntExportCtx_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hContext,
+					    PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+	}
+
+	/* Unreference the previously looked up handle */
+	if (psPMRInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMR,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psDevmemIntExportCtxOUT->eError != PVRSRV_OK)
+	{
+		if (psContextExportInt)
+		{
+			LockHandle(KERNEL_HANDLE_BASE);
+			DevmemIntUnexportCtx(psContextExportInt);
+			UnlockHandle(KERNEL_HANDLE_BASE);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnexportCtx(IMG_UINT32 ui32DispatchTableEntry,
+				 PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX *
+				 psDevmemIntUnexportCtxIN,
+				 PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX *
+				 psDevmemIntUnexportCtxOUT,
+				 CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psDevmemIntUnexportCtxOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psDevmemIntUnexportCtxIN->
+					hContextExport,
+					PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT);
+	if (unlikely
+	    ((psDevmemIntUnexportCtxOUT->eError != PVRSRV_OK)
+	     && (psDevmemIntUnexportCtxOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeDevmemIntUnexportCtx: %s",
+			 PVRSRVGetErrorString(psDevmemIntUnexportCtxOUT->
+					      eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntUnexportCtx_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ DevmemIntUnexportCtx_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntAcquireRemoteCtx(IMG_UINT32 ui32DispatchTableEntry,
+				      PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX
+				      * psDevmemIntAcquireRemoteCtxIN,
+				      PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX
+				      * psDevmemIntAcquireRemoteCtxOUT,
+				      CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPMR = psDevmemIntAcquireRemoteCtxIN->hPMR;
+	PMR *psPMRInt = NULL;
+	DEVMEMINT_CTX *psContextInt = NULL;
+	IMG_HANDLE hPrivDataInt = NULL;
+
+	psDevmemIntAcquireRemoteCtxOUT->hContext = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psDevmemIntAcquireRemoteCtxOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRInt,
+				       hPMR,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntAcquireRemoteCtx_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psDevmemIntAcquireRemoteCtxOUT->eError =
+	    DevmemIntAcquireRemoteCtx(psPMRInt, &psContextInt, &hPrivDataInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK))
+	{
+		goto DevmemIntAcquireRemoteCtx_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psDevmemIntAcquireRemoteCtxOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psDevmemIntAcquireRemoteCtxOUT->hContext,
+				      (void *)psContextInt,
+				      PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+				      PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+				      (PFN_HANDLE_RELEASE) &
+				      DevmemIntCtxDestroy);
+	if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntAcquireRemoteCtx_exit;
+	}
+
+	psDevmemIntAcquireRemoteCtxOUT->eError =
+	    PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+					 &psDevmemIntAcquireRemoteCtxOUT->
+					 hPrivData, (void *)hPrivDataInt,
+					 PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+					 PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+					 psDevmemIntAcquireRemoteCtxOUT->
+					 hContext);
+	if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntAcquireRemoteCtx_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ DevmemIntAcquireRemoteCtx_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psPMRInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMR,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)
+	{
+		if (psDevmemIntAcquireRemoteCtxOUT->hContext)
+		{
+			PVRSRV_ERROR eError;
+
+			/* Lock over handle creation cleanup. */
+			LockHandle(psConnection->psHandleBase);
+
+			eError =
+			    PVRSRVReleaseHandleUnlocked(psConnection->
+							psHandleBase,
+							(IMG_HANDLE)
+							psDevmemIntAcquireRemoteCtxOUT->
+							hContext,
+							PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+			if (unlikely
+			    ((eError != PVRSRV_OK)
+			     && (eError != PVRSRV_ERROR_RETRY)))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+					 "PVRSRVBridgeDevmemIntAcquireRemoteCtx: %s",
+					 PVRSRVGetErrorString(eError)));
+			}
+			/* Releasing the handle should free/destroy/release the resource.
+			 * This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK)
+				   || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psContextInt = NULL;
+			/* Release now we have cleaned up creation handles. */
+			UnlockHandle(psConnection->psHandleBase);
+
+		}
+
+		if (psContextInt)
+		{
+			DevmemIntCtxDestroy(psContextInt);
+		}
+	}
+
+	return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+#endif /* EXCLUDE_CMM_BRIDGE */
+
+#if !defined(EXCLUDE_CMM_BRIDGE)
+PVRSRV_ERROR InitCMMBridge(void);
+PVRSRV_ERROR DeinitCMMBridge(void);
+
+/*
+ * Register all CMM functions with services
+ */
+PVRSRV_ERROR InitCMMBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_CMM,
+			      PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX,
+			      PVRSRVBridgeDevmemIntExportCtx, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_CMM,
+			      PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX,
+			      PVRSRVBridgeDevmemIntUnexportCtx, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_CMM,
+			      PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX,
+			      PVRSRVBridgeDevmemIntAcquireRemoteCtx, NULL,
+			      bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all cmm functions with services
+ */
+PVRSRV_ERROR DeinitCMMBridge(void)
+{
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM,
+				PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM,
+				PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM,
+				PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX);
+
+	return PVRSRV_OK;
+}
+#else /* EXCLUDE_CMM_BRIDGE */
+/* This bridge is conditional on EXCLUDE_CMM_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitCMMBridge() \
+	PVRSRV_OK
+
+#define DeinitCMMBridge() \
+	PVRSRV_OK
+
+#endif /* EXCLUDE_CMM_BRIDGE */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_devicememhistory_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_devicememhistory_bridge.c
new file mode 100644
index 0000000..790a93b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_devicememhistory_bridge.c
@@ -0,0 +1,847 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for devicememhistory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for devicememhistory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_history_server.h"
+
+#include "common_devicememhistory_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "lock.h"
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry,
+				PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP *
+				psDevicememHistoryMapIN,
+				PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP *
+				psDevicememHistoryMapOUT,
+				CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPMR = psDevicememHistoryMapIN->hPMR;
+	PMR *psPMRInt = NULL;
+	IMG_CHAR *uiTextInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0;
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psDevicememHistoryMapIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psDevicememHistoryMapIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psDevicememHistoryMapOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto DevicememHistoryMap_exit;
+			}
+		}
+	}
+
+	{
+		uiTextInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiTextInt,
+		     (const void __user *)psDevicememHistoryMapIN->puiText,
+		     DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psDevicememHistoryMapOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto DevicememHistoryMap_exit;
+		}
+		((IMG_CHAR *)
+		 uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) -
+			    1] = '\0';
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psDevicememHistoryMapOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRInt,
+				       hPMR,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psDevicememHistoryMapOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevicememHistoryMap_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psDevicememHistoryMapOUT->eError =
+	    DevicememHistoryMapKM(psPMRInt,
+				  psDevicememHistoryMapIN->uiOffset,
+				  psDevicememHistoryMapIN->sDevVAddr,
+				  psDevicememHistoryMapIN->uiSize,
+				  uiTextInt,
+				  psDevicememHistoryMapIN->ui32Log2PageSize,
+				  psDevicememHistoryMapIN->ui32AllocationIndex,
+				  &psDevicememHistoryMapOUT->
+				  ui32AllocationIndexOut);
+
+ DevicememHistoryMap_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psPMRInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMR,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry,
+				  PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP *
+				  psDevicememHistoryUnmapIN,
+				  PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP *
+				  psDevicememHistoryUnmapOUT,
+				  CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPMR = psDevicememHistoryUnmapIN->hPMR;
+	PMR *psPMRInt = NULL;
+	IMG_CHAR *uiTextInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0;
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psDevicememHistoryUnmapIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psDevicememHistoryUnmapIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psDevicememHistoryUnmapOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto DevicememHistoryUnmap_exit;
+			}
+		}
+	}
+
+	{
+		uiTextInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiTextInt,
+		     (const void __user *)psDevicememHistoryUnmapIN->puiText,
+		     DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psDevicememHistoryUnmapOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto DevicememHistoryUnmap_exit;
+		}
+		((IMG_CHAR *)
+		 uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) -
+			    1] = '\0';
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psDevicememHistoryUnmapOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRInt,
+				       hPMR,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psDevicememHistoryUnmapOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevicememHistoryUnmap_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psDevicememHistoryUnmapOUT->eError =
+	    DevicememHistoryUnmapKM(psPMRInt,
+				    psDevicememHistoryUnmapIN->uiOffset,
+				    psDevicememHistoryUnmapIN->sDevVAddr,
+				    psDevicememHistoryUnmapIN->uiSize,
+				    uiTextInt,
+				    psDevicememHistoryUnmapIN->ui32Log2PageSize,
+				    psDevicememHistoryUnmapIN->
+				    ui32AllocationIndex,
+				    &psDevicememHistoryUnmapOUT->
+				    ui32AllocationIndexOut);
+
+ DevicememHistoryUnmap_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psPMRInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMR,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryMapVRange(IMG_UINT32 ui32DispatchTableEntry,
+				      PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE
+				      * psDevicememHistoryMapVRangeIN,
+				      PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE
+				      * psDevicememHistoryMapVRangeOUT,
+				      CONNECTION_DATA * psConnection)
+{
+	IMG_CHAR *uiTextInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psDevicememHistoryMapVRangeIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psDevicememHistoryMapVRangeIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psDevicememHistoryMapVRangeOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto DevicememHistoryMapVRange_exit;
+			}
+		}
+	}
+
+	{
+		uiTextInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiTextInt,
+		     (const void __user *)psDevicememHistoryMapVRangeIN->
+		     puiText,
+		     DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psDevicememHistoryMapVRangeOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto DevicememHistoryMapVRange_exit;
+		}
+		((IMG_CHAR *)
+		 uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) -
+			    1] = '\0';
+	}
+
+	psDevicememHistoryMapVRangeOUT->eError =
+	    DevicememHistoryMapVRangeKM(psDevicememHistoryMapVRangeIN->
+					sBaseDevVAddr,
+					psDevicememHistoryMapVRangeIN->
+					ui32ui32StartPage,
+					psDevicememHistoryMapVRangeIN->
+					ui32NumPages,
+					psDevicememHistoryMapVRangeIN->
+					uiAllocSize, uiTextInt,
+					psDevicememHistoryMapVRangeIN->
+					ui32Log2PageSize,
+					psDevicememHistoryMapVRangeIN->
+					ui32AllocationIndex,
+					&psDevicememHistoryMapVRangeOUT->
+					ui32AllocationIndexOut);
+
+ DevicememHistoryMapVRange_exit:
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryUnmapVRange(IMG_UINT32 ui32DispatchTableEntry,
+					PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE
+					* psDevicememHistoryUnmapVRangeIN,
+					PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE
+					* psDevicememHistoryUnmapVRangeOUT,
+					CONNECTION_DATA * psConnection)
+{
+	IMG_CHAR *uiTextInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psDevicememHistoryUnmapVRangeIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psDevicememHistoryUnmapVRangeIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psDevicememHistoryUnmapVRangeOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto DevicememHistoryUnmapVRange_exit;
+			}
+		}
+	}
+
+	{
+		uiTextInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiTextInt,
+		     (const void __user *)psDevicememHistoryUnmapVRangeIN->
+		     puiText,
+		     DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psDevicememHistoryUnmapVRangeOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto DevicememHistoryUnmapVRange_exit;
+		}
+		((IMG_CHAR *)
+		 uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) -
+			    1] = '\0';
+	}
+
+	psDevicememHistoryUnmapVRangeOUT->eError =
+	    DevicememHistoryUnmapVRangeKM(psDevicememHistoryUnmapVRangeIN->
+					  sBaseDevVAddr,
+					  psDevicememHistoryUnmapVRangeIN->
+					  ui32ui32StartPage,
+					  psDevicememHistoryUnmapVRangeIN->
+					  ui32NumPages,
+					  psDevicememHistoryUnmapVRangeIN->
+					  uiAllocSize, uiTextInt,
+					  psDevicememHistoryUnmapVRangeIN->
+					  ui32Log2PageSize,
+					  psDevicememHistoryUnmapVRangeIN->
+					  ui32AllocationIndex,
+					  &psDevicememHistoryUnmapVRangeOUT->
+					  ui32AllocationIndexOut);
+
+ DevicememHistoryUnmapVRange_exit:
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry,
+					 PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE
+					 * psDevicememHistorySparseChangeIN,
+					 PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE
+					 * psDevicememHistorySparseChangeOUT,
+					 CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPMR = psDevicememHistorySparseChangeIN->hPMR;
+	PMR *psPMRInt = NULL;
+	IMG_CHAR *uiTextInt = NULL;
+	IMG_UINT32 *ui32AllocPageIndicesInt = NULL;
+	IMG_UINT32 *ui32FreePageIndicesInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) +
+	    (psDevicememHistorySparseChangeIN->ui32AllocPageCount *
+	     sizeof(IMG_UINT32)) +
+	    (psDevicememHistorySparseChangeIN->ui32FreePageCount *
+	     sizeof(IMG_UINT32)) + 0;
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psDevicememHistorySparseChangeIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psDevicememHistorySparseChangeIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psDevicememHistorySparseChangeOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto DevicememHistorySparseChange_exit;
+			}
+		}
+	}
+
+	{
+		uiTextInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiTextInt,
+		     (const void __user *)psDevicememHistorySparseChangeIN->
+		     puiText,
+		     DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psDevicememHistorySparseChangeOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto DevicememHistorySparseChange_exit;
+		}
+		((IMG_CHAR *)
+		 uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) -
+			    1] = '\0';
+	}
+	if (psDevicememHistorySparseChangeIN->ui32AllocPageCount != 0)
+	{
+		ui32AllocPageIndicesInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psDevicememHistorySparseChangeIN->ui32AllocPageCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psDevicememHistorySparseChangeIN->ui32AllocPageCount *
+	    sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32AllocPageIndicesInt,
+		     (const void __user *)psDevicememHistorySparseChangeIN->
+		     pui32AllocPageIndices,
+		     psDevicememHistorySparseChangeIN->ui32AllocPageCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psDevicememHistorySparseChangeOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto DevicememHistorySparseChange_exit;
+		}
+	}
+	if (psDevicememHistorySparseChangeIN->ui32FreePageCount != 0)
+	{
+		ui32FreePageIndicesInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psDevicememHistorySparseChangeIN->ui32FreePageCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psDevicememHistorySparseChangeIN->ui32FreePageCount *
+	    sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32FreePageIndicesInt,
+		     (const void __user *)psDevicememHistorySparseChangeIN->
+		     pui32FreePageIndices,
+		     psDevicememHistorySparseChangeIN->ui32FreePageCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psDevicememHistorySparseChangeOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto DevicememHistorySparseChange_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psDevicememHistorySparseChangeOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRInt,
+				       hPMR,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psDevicememHistorySparseChangeOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevicememHistorySparseChange_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psDevicememHistorySparseChangeOUT->eError =
+	    DevicememHistorySparseChangeKM(psPMRInt,
+					   psDevicememHistorySparseChangeIN->
+					   uiOffset,
+					   psDevicememHistorySparseChangeIN->
+					   sDevVAddr,
+					   psDevicememHistorySparseChangeIN->
+					   uiSize, uiTextInt,
+					   psDevicememHistorySparseChangeIN->
+					   ui32Log2PageSize,
+					   psDevicememHistorySparseChangeIN->
+					   ui32AllocPageCount,
+					   ui32AllocPageIndicesInt,
+					   psDevicememHistorySparseChangeIN->
+					   ui32FreePageCount,
+					   ui32FreePageIndicesInt,
+					   psDevicememHistorySparseChangeIN->
+					   ui32AllocationIndex,
+					   &psDevicememHistorySparseChangeOUT->
+					   ui32AllocationIndexOut);
+
+ DevicememHistorySparseChange_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psPMRInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMR,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static POS_LOCK pDEVICEMEMHISTORYBridgeLock;
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void);
+PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void);
+
+/*
+ * Register all DEVICEMEMHISTORY functions with services
+ */
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void)
+{
+	PVR_LOGR_IF_ERROR(OSLockCreate(&pDEVICEMEMHISTORYBridgeLock),
+			  "OSLockCreate");
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+			      PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP,
+			      PVRSRVBridgeDevicememHistoryMap,
+			      pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+			      PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP,
+			      PVRSRVBridgeDevicememHistoryUnmap,
+			      pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+			      PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE,
+			      PVRSRVBridgeDevicememHistoryMapVRange,
+			      pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+			      PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE,
+			      PVRSRVBridgeDevicememHistoryUnmapVRange,
+			      pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+			      PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE,
+			      PVRSRVBridgeDevicememHistorySparseChange,
+			      pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all devicememhistory functions with services
+ */
+PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void)
+{
+	PVR_LOGR_IF_ERROR(OSLockDestroy(pDEVICEMEMHISTORYBridgeLock),
+			  "OSLockDestroy");
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+				PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+				PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+				PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+				PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+				PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_dmabuf_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_dmabuf_bridge.c
new file mode 100644
index 0000000..5b40a4f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_dmabuf_bridge.c
@@ -0,0 +1,513 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for dmabuf
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for dmabuf
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "physmem_dmabuf.h"
+#include "pmr.h"
+
+#include "common_dmabuf_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry,
+				PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF *
+				psPhysmemImportDmaBufIN,
+				PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF *
+				psPhysmemImportDmaBufOUT,
+				CONNECTION_DATA * psConnection)
+{
+	IMG_CHAR *uiNameInt = NULL;
+	PMR *psPMRPtrInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) + 0;
+
+	if (unlikely
+	    (psPhysmemImportDmaBufIN->ui32NameSize > DEVMEM_ANNOTATION_MAX_LEN))
+	{
+		psPhysmemImportDmaBufOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto PhysmemImportDmaBuf_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psPhysmemImportDmaBufIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psPhysmemImportDmaBufIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psPhysmemImportDmaBufOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto PhysmemImportDmaBuf_exit;
+			}
+		}
+	}
+
+	if (psPhysmemImportDmaBufIN->ui32NameSize != 0)
+	{
+		uiNameInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiNameInt,
+		     (const void __user *)psPhysmemImportDmaBufIN->puiName,
+		     psPhysmemImportDmaBufIN->ui32NameSize *
+		     sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psPhysmemImportDmaBufOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto PhysmemImportDmaBuf_exit;
+		}
+		((IMG_CHAR *)
+		 uiNameInt)[(psPhysmemImportDmaBufIN->ui32NameSize *
+			     sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+
+	psPhysmemImportDmaBufOUT->eError =
+	    PhysmemImportDmaBuf(psConnection, OSGetDevData(psConnection),
+				psPhysmemImportDmaBufIN->ifd,
+				psPhysmemImportDmaBufIN->uiFlags,
+				psPhysmemImportDmaBufIN->ui32NameSize,
+				uiNameInt,
+				&psPMRPtrInt,
+				&psPhysmemImportDmaBufOUT->uiSize,
+				&psPhysmemImportDmaBufOUT->sAlign);
+	/* Exit early if bridged call fails */
+	if (unlikely(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK))
+	{
+		goto PhysmemImportDmaBuf_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psPhysmemImportDmaBufOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psPhysmemImportDmaBufOUT->hPMRPtr,
+				      (void *)psPMRPtrInt,
+				      PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) & PMRUnrefPMR);
+	if (unlikely(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto PhysmemImportDmaBuf_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ PhysmemImportDmaBuf_exit:
+
+	if (psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)
+	{
+		if (psPMRPtrInt)
+		{
+			LockHandle(KERNEL_HANDLE_BASE);
+			PMRUnrefPMR(psPMRPtrInt);
+			UnlockHandle(KERNEL_HANDLE_BASE);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePhysmemExportDmaBuf(IMG_UINT32 ui32DispatchTableEntry,
+				PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF *
+				psPhysmemExportDmaBufIN,
+				PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF *
+				psPhysmemExportDmaBufOUT,
+				CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPMR = psPhysmemExportDmaBufIN->hPMR;
+	PMR *psPMRInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psPhysmemExportDmaBufOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRInt,
+				       hPMR,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psPhysmemExportDmaBufOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto PhysmemExportDmaBuf_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psPhysmemExportDmaBufOUT->eError =
+	    PhysmemExportDmaBuf(psConnection, OSGetDevData(psConnection),
+				psPMRInt, &psPhysmemExportDmaBufOUT->iFd);
+
+ PhysmemExportDmaBuf_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psPMRInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMR,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePhysmemImportSparseDmaBuf(IMG_UINT32 ui32DispatchTableEntry,
+				      PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF
+				      * psPhysmemImportSparseDmaBufIN,
+				      PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF
+				      * psPhysmemImportSparseDmaBufOUT,
+				      CONNECTION_DATA * psConnection)
+{
+	IMG_UINT32 *ui32MappingTableInt = NULL;
+	IMG_CHAR *uiNameInt = NULL;
+	PMR *psPMRPtrInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks *
+	     sizeof(IMG_UINT32)) +
+	    (psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) +
+	    0;
+
+	if (unlikely
+	    (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks >
+	     PMR_MAX_SUPPORTED_PAGE_COUNT))
+	{
+		psPhysmemImportSparseDmaBufOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto PhysmemImportSparseDmaBuf_exit;
+	}
+
+	if (unlikely
+	    (psPhysmemImportSparseDmaBufIN->ui32NameSize >
+	     DEVMEM_ANNOTATION_MAX_LEN))
+	{
+		psPhysmemImportSparseDmaBufOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto PhysmemImportSparseDmaBuf_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psPhysmemImportSparseDmaBufIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psPhysmemImportSparseDmaBufIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psPhysmemImportSparseDmaBufOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto PhysmemImportSparseDmaBuf_exit;
+			}
+		}
+	}
+
+	if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks != 0)
+	{
+		ui32MappingTableInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks *
+	    sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32MappingTableInt,
+		     (const void __user *)psPhysmemImportSparseDmaBufIN->
+		     pui32MappingTable,
+		     psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psPhysmemImportSparseDmaBufOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto PhysmemImportSparseDmaBuf_exit;
+		}
+	}
+	if (psPhysmemImportSparseDmaBufIN->ui32NameSize != 0)
+	{
+		uiNameInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psPhysmemImportSparseDmaBufIN->ui32NameSize *
+		    sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiNameInt,
+		     (const void __user *)psPhysmemImportSparseDmaBufIN->
+		     puiName,
+		     psPhysmemImportSparseDmaBufIN->ui32NameSize *
+		     sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psPhysmemImportSparseDmaBufOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto PhysmemImportSparseDmaBuf_exit;
+		}
+		((IMG_CHAR *)
+		 uiNameInt)[(psPhysmemImportSparseDmaBufIN->ui32NameSize *
+			     sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+
+	psPhysmemImportSparseDmaBufOUT->eError =
+	    PhysmemImportSparseDmaBuf(psConnection, OSGetDevData(psConnection),
+				      psPhysmemImportSparseDmaBufIN->ifd,
+				      psPhysmemImportSparseDmaBufIN->uiFlags,
+				      psPhysmemImportSparseDmaBufIN->
+				      uiChunkSize,
+				      psPhysmemImportSparseDmaBufIN->
+				      ui32NumPhysChunks,
+				      psPhysmemImportSparseDmaBufIN->
+				      ui32NumVirtChunks, ui32MappingTableInt,
+				      psPhysmemImportSparseDmaBufIN->
+				      ui32NameSize, uiNameInt, &psPMRPtrInt,
+				      &psPhysmemImportSparseDmaBufOUT->uiSize,
+				      &psPhysmemImportSparseDmaBufOUT->sAlign);
+	/* Exit early if bridged call fails */
+	if (unlikely(psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK))
+	{
+		goto PhysmemImportSparseDmaBuf_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psPhysmemImportSparseDmaBufOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psPhysmemImportSparseDmaBufOUT->hPMRPtr,
+				      (void *)psPMRPtrInt,
+				      PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) & PMRUnrefPMR);
+	if (unlikely(psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto PhysmemImportSparseDmaBuf_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ PhysmemImportSparseDmaBuf_exit:
+
+	if (psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)
+	{
+		if (psPMRPtrInt)
+		{
+			LockHandle(KERNEL_HANDLE_BASE);
+			PMRUnrefPMR(psPMRPtrInt);
+			UnlockHandle(KERNEL_HANDLE_BASE);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitDMABUFBridge(void);
+PVRSRV_ERROR DeinitDMABUFBridge(void);
+
+/*
+ * Register all DMABUF functions with services
+ */
+PVRSRV_ERROR InitDMABUFBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF,
+			      PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF,
+			      PVRSRVBridgePhysmemImportDmaBuf, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF,
+			      PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF,
+			      PVRSRVBridgePhysmemExportDmaBuf, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF,
+			      PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF,
+			      PVRSRVBridgePhysmemImportSparseDmaBuf, NULL,
+			      bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all dmabuf functions with services
+ */
+PVRSRV_ERROR DeinitDMABUFBridge(void)
+{
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF,
+				PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF,
+				PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF,
+				PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_htbuffer_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_htbuffer_bridge.c
new file mode 100644
index 0000000..fdbb51e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_htbuffer_bridge.c
@@ -0,0 +1,332 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for htbuffer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for htbuffer
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "htbserver.h"
+
+#include "common_htbuffer_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "lock.h"
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeHTBControl(IMG_UINT32 ui32DispatchTableEntry,
+		       PVRSRV_BRIDGE_IN_HTBCONTROL * psHTBControlIN,
+		       PVRSRV_BRIDGE_OUT_HTBCONTROL * psHTBControlOUT,
+		       CONNECTION_DATA * psConnection)
+{
+	IMG_UINT32 *ui32GroupEnableInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) + 0;
+
+	if (unlikely(psHTBControlIN->ui32NumGroups > HTB_FLAG_NUM_EL))
+	{
+		psHTBControlOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto HTBControl_exit;
+	}
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psHTBControlIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *) psHTBControlIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psHTBControlOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto HTBControl_exit;
+			}
+		}
+	}
+
+	if (psHTBControlIN->ui32NumGroups != 0)
+	{
+		ui32GroupEnableInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32GroupEnableInt,
+		     (const void __user *)psHTBControlIN->pui32GroupEnable,
+		     psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) !=
+		    PVRSRV_OK)
+		{
+			psHTBControlOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto HTBControl_exit;
+		}
+	}
+
+	psHTBControlOUT->eError =
+	    HTBControlKM(psHTBControlIN->ui32NumGroups,
+			 ui32GroupEnableInt,
+			 psHTBControlIN->ui32LogLevel,
+			 psHTBControlIN->ui32EnablePID,
+			 psHTBControlIN->ui32LogMode,
+			 psHTBControlIN->ui32OpMode);
+
+ HTBControl_exit:
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeHTBLog(IMG_UINT32 ui32DispatchTableEntry,
+		   PVRSRV_BRIDGE_IN_HTBLOG * psHTBLogIN,
+		   PVRSRV_BRIDGE_OUT_HTBLOG * psHTBLogOUT,
+		   CONNECTION_DATA * psConnection)
+{
+	IMG_UINT32 *ui32ArgsInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) + 0;
+
+	if (unlikely(psHTBLogIN->ui32NumArgs > HTB_LOG_MAX_PARAMS))
+	{
+		psHTBLogOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto HTBLog_exit;
+	}
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psHTBLogIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *) psHTBLogIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psHTBLogOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto HTBLog_exit;
+			}
+		}
+	}
+
+	if (psHTBLogIN->ui32NumArgs != 0)
+	{
+		ui32ArgsInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset += psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ArgsInt,
+		     (const void __user *)psHTBLogIN->pui32Args,
+		     psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psHTBLogOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto HTBLog_exit;
+		}
+	}
+
+	psHTBLogOUT->eError =
+	    HTBLogKM(psHTBLogIN->ui32PID,
+		     psHTBLogIN->ui64TimeStamp,
+		     psHTBLogIN->ui32SF, psHTBLogIN->ui32NumArgs, ui32ArgsInt);
+
+ HTBLog_exit:
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static POS_LOCK pHTBUFFERBridgeLock;
+static IMG_BOOL bUseLock = IMG_TRUE;
+#endif /* EXCLUDE_HTBUFFER_BRIDGE */
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+PVRSRV_ERROR InitHTBUFFERBridge(void);
+PVRSRV_ERROR DeinitHTBUFFERBridge(void);
+
+/*
+ * Register all HTBUFFER functions with services
+ */
+PVRSRV_ERROR InitHTBUFFERBridge(void)
+{
+	PVR_LOGR_IF_ERROR(OSLockCreate(&pHTBUFFERBridgeLock), "OSLockCreate");
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER,
+			      PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL,
+			      PVRSRVBridgeHTBControl, pHTBUFFERBridgeLock,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER,
+			      PVRSRV_BRIDGE_HTBUFFER_HTBLOG, PVRSRVBridgeHTBLog,
+			      pHTBUFFERBridgeLock, bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all htbuffer functions with services
+ */
+PVRSRV_ERROR DeinitHTBUFFERBridge(void)
+{
+	PVR_LOGR_IF_ERROR(OSLockDestroy(pHTBUFFERBridgeLock), "OSLockDestroy");
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER,
+				PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER,
+				PVRSRV_BRIDGE_HTBUFFER_HTBLOG);
+
+	return PVRSRV_OK;
+}
+#else /* EXCLUDE_HTBUFFER_BRIDGE */
+/* This bridge is conditional on EXCLUDE_HTBUFFER_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitHTBUFFERBridge() \
+	PVRSRV_OK
+
+#define DeinitHTBUFFERBridge() \
+	PVRSRV_OK
+
+#endif /* EXCLUDE_HTBUFFER_BRIDGE */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_mm_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_mm_bridge.c
new file mode 100644
index 0000000..497d999
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_mm_bridge.c
@@ -0,0 +1,3099 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for mm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for mm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "devicemem_heapcfg.h"
+#include "physmem.h"
+#include "physmem_tdsecbuf.h"
+#include "devicemem_utils.h"
+
+#include "common_mm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+static PVRSRV_ERROR ReleasePMRExport(void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	return PVRSRV_OK;
+}
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgePMRExportPMR(IMG_UINT32 ui32DispatchTableEntry,
+			 PVRSRV_BRIDGE_IN_PMREXPORTPMR * psPMRExportPMRIN,
+			 PVRSRV_BRIDGE_OUT_PMREXPORTPMR * psPMRExportPMROUT,
+			 CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPMR = psPMRExportPMRIN->hPMR;
+	PMR *psPMRInt = NULL;
+	PMR_EXPORT *psPMRExportInt = NULL;
+	IMG_HANDLE hPMRExportInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psPMRExportPMROUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRInt,
+				       hPMR,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto PMRExportPMR_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psPMRExportPMROUT->eError =
+	    PMRExportPMR(psPMRInt,
+			 &psPMRExportInt,
+			 &psPMRExportPMROUT->ui64Size,
+			 &psPMRExportPMROUT->ui32Log2Contig,
+			 &psPMRExportPMROUT->ui64Password);
+	/* Exit early if bridged call fails */
+	if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK))
+	{
+		goto PMRExportPMR_exit;
+	}
+
+	/*
+	 * For cases where we need a cross process handle we actually allocate two.
+	 *
+	 * The first one is a connection specific handle and it gets given the real
+	 * release function. This handle does *NOT* get returned to the caller. It's
+	 * purpose is to release any leaked resources when we either have a bad or
+	 * abnormally terminated client. If we didn't do this then the resource
+	 * wouldn't be freed until driver unload. If the resource is freed normally,
+	 * this handle can be looked up via the cross process handle and then
+	 * released accordingly.
+	 *
+	 * The second one is a cross process handle and it gets given a noop release
+	 * function. This handle does get returned to the caller.
+	 */
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+	psPMRExportPMROUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->
+				      psHandleBase, &hPMRExportInt,
+				      (void *)psPMRExportInt,
+				      PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) & PMRUnexportPMR);
+	if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+		goto PMRExportPMR_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+	/* Lock over handle creation. */
+	LockHandle(KERNEL_HANDLE_BASE);
+	psPMRExportPMROUT->eError =
+	    PVRSRVAllocHandleUnlocked(KERNEL_HANDLE_BASE,
+				      &psPMRExportPMROUT->hPMRExport,
+				      (void *)psPMRExportInt,
+				      PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) & ReleasePMRExport);
+	if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(KERNEL_HANDLE_BASE);
+		goto PMRExportPMR_exit;
+	}
+	/* Release now we have created handles. */
+	UnlockHandle(KERNEL_HANDLE_BASE);
+
+ PMRExportPMR_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psPMRInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMR,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psPMRExportPMROUT->eError != PVRSRV_OK)
+	{
+		if (psPMRExportPMROUT->hPMRExport)
+		{
+			PVRSRV_ERROR eError;
+
+			/* Lock over handle creation cleanup. */
+			LockHandle(KERNEL_HANDLE_BASE);
+
+			eError = PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE,
+							     (IMG_HANDLE)
+							     psPMRExportPMROUT->
+							     hPMRExport,
+							     PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+			if (unlikely
+			    ((eError != PVRSRV_OK)
+			     && (eError != PVRSRV_ERROR_RETRY)))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+					 "PVRSRVBridgePMRExportPMR: %s",
+					 PVRSRVGetErrorString(eError)));
+			}
+			/* Releasing the handle should free/destroy/release the resource.
+			 * This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK)
+				   || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Release now we have cleaned up creation handles. */
+			UnlockHandle(KERNEL_HANDLE_BASE);
+
+		}
+
+		if (hPMRExportInt)
+		{
+			PVRSRV_ERROR eError;
+			/* Lock over handle creation cleanup. */
+			LockHandle(psConnection->psProcessHandleBase->
+				   psHandleBase);
+
+			eError =
+			    PVRSRVReleaseHandleUnlocked(psConnection->
+							psProcessHandleBase->
+							psHandleBase,
+							hPMRExportInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+			if ((eError != PVRSRV_OK)
+			    && (eError != PVRSRV_ERROR_RETRY))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+					 "PVRSRVBridgePMRExportPMR: %s",
+					 PVRSRVGetErrorString(eError)));
+			}
+			/* Releasing the handle should free/destroy/release the resource.
+			 * This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK)
+				   || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psPMRExportInt = NULL;
+			/* Release now we have cleaned up creation handles. */
+			UnlockHandle(psConnection->psProcessHandleBase->
+				     psHandleBase);
+		}
+
+		if (psPMRExportInt)
+		{
+			LockHandle(KERNEL_HANDLE_BASE);
+			PMRUnexportPMR(psPMRExportInt);
+			UnlockHandle(KERNEL_HANDLE_BASE);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRUnexportPMR(IMG_UINT32 ui32DispatchTableEntry,
+			   PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR * psPMRUnexportPMRIN,
+			   PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR *
+			   psPMRUnexportPMROUT, CONNECTION_DATA * psConnection)
+{
+	PMR_EXPORT *psPMRExportInt = NULL;
+	IMG_HANDLE hPMRExportInt = NULL;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	/* Lock over handle destruction. */
+	LockHandle(KERNEL_HANDLE_BASE);
+	psPMRUnexportPMROUT->eError =
+	    PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE,
+				       (void **)&psPMRExportInt,
+				       (IMG_HANDLE) psPMRUnexportPMRIN->
+				       hPMRExport,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+				       IMG_FALSE);
+	if (unlikely(psPMRUnexportPMROUT->eError != PVRSRV_OK))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgePMRUnexportPMR: %s",
+			 PVRSRVGetErrorString(psPMRUnexportPMROUT->eError)));
+	}
+	PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK);
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(KERNEL_HANDLE_BASE);
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+	/*
+	 * Find the connection specific handle that represents the same data
+	 * as the cross process handle as releasing it will actually call the
+	 * data's real release function (see the function where the cross
+	 * process handle is allocated for more details).
+	 */
+	psPMRUnexportPMROUT->eError =
+	    PVRSRVFindHandleUnlocked(psConnection->psProcessHandleBase->
+				     psHandleBase, &hPMRExportInt,
+				     psPMRExportInt,
+				     PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+	if (unlikely(psPMRUnexportPMROUT->eError != PVRSRV_OK))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgePMRUnexportPMR: %s",
+			 PVRSRVGetErrorString(psPMRUnexportPMROUT->eError)));
+	}
+	PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK);
+
+	psPMRUnexportPMROUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->
+					psHandleBase, hPMRExportInt,
+					PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+	if (unlikely
+	    ((psPMRUnexportPMROUT->eError != PVRSRV_OK)
+	     && (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgePMRUnexportPMR: %s",
+			 PVRSRVGetErrorString(psPMRUnexportPMROUT->eError)));
+	}
+	PVR_ASSERT((psPMRUnexportPMROUT->eError == PVRSRV_OK) ||
+		   (psPMRUnexportPMROUT->eError == PVRSRV_ERROR_RETRY));
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+	/* Lock over handle destruction. */
+	LockHandle(KERNEL_HANDLE_BASE);
+
+	psPMRUnexportPMROUT->eError =
+	    PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE,
+					(IMG_HANDLE) psPMRUnexportPMRIN->
+					hPMRExport,
+					PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+	if (unlikely
+	    ((psPMRUnexportPMROUT->eError != PVRSRV_OK)
+	     && (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgePMRUnexportPMR: %s",
+			 PVRSRVGetErrorString(psPMRUnexportPMROUT->eError)));
+		UnlockHandle(KERNEL_HANDLE_BASE);
+		goto PMRUnexportPMR_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(KERNEL_HANDLE_BASE);
+
+ PMRUnexportPMR_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRGetUID(IMG_UINT32 ui32DispatchTableEntry,
+		      PVRSRV_BRIDGE_IN_PMRGETUID * psPMRGetUIDIN,
+		      PVRSRV_BRIDGE_OUT_PMRGETUID * psPMRGetUIDOUT,
+		      CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPMR = psPMRGetUIDIN->hPMR;
+	PMR *psPMRInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psPMRGetUIDOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRInt,
+				       hPMR,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psPMRGetUIDOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto PMRGetUID_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psPMRGetUIDOUT->eError = PMRGetUID(psPMRInt, &psPMRGetUIDOUT->ui64UID);
+
+ PMRGetUID_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psPMRInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMR,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRMakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry,
+				     PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE *
+				     psPMRMakeLocalImportHandleIN,
+				     PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE
+				     * psPMRMakeLocalImportHandleOUT,
+				     CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hBuffer = psPMRMakeLocalImportHandleIN->hBuffer;
+	PMR *psBufferInt = NULL;
+	PMR *psExtMemInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psPMRMakeLocalImportHandleOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psBufferInt,
+				       hBuffer,
+				       PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+				       IMG_TRUE);
+	if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto PMRMakeLocalImportHandle_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psPMRMakeLocalImportHandleOUT->eError =
+	    PMRMakeLocalImportHandle(psBufferInt, &psExtMemInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK))
+	{
+		goto PMRMakeLocalImportHandle_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+	psPMRMakeLocalImportHandleOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->
+				      psHandleBase,
+				      &psPMRMakeLocalImportHandleOUT->hExtMem,
+				      (void *)psExtMemInt,
+				      PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      PMRUnmakeLocalImportHandle);
+	if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+		goto PMRMakeLocalImportHandle_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ PMRMakeLocalImportHandle_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psBufferInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hBuffer,
+					    PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)
+	{
+		if (psExtMemInt)
+		{
+			LockHandle(KERNEL_HANDLE_BASE);
+			PMRUnmakeLocalImportHandle(psExtMemInt);
+			UnlockHandle(KERNEL_HANDLE_BASE);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRUnmakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry,
+				       PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE
+				       * psPMRUnmakeLocalImportHandleIN,
+				       PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE
+				       * psPMRUnmakeLocalImportHandleOUT,
+				       CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+	psPMRUnmakeLocalImportHandleOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->
+					psHandleBase,
+					(IMG_HANDLE)
+					psPMRUnmakeLocalImportHandleIN->hExtMem,
+					PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT);
+	if (unlikely
+	    ((psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_OK)
+	     && (psPMRUnmakeLocalImportHandleOUT->eError !=
+		 PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgePMRUnmakeLocalImportHandle: %s",
+			 PVRSRVGetErrorString(psPMRUnmakeLocalImportHandleOUT->
+					      eError)));
+		UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+		goto PMRUnmakeLocalImportHandle_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ PMRUnmakeLocalImportHandle_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRImportPMR(IMG_UINT32 ui32DispatchTableEntry,
+			 PVRSRV_BRIDGE_IN_PMRIMPORTPMR * psPMRImportPMRIN,
+			 PVRSRV_BRIDGE_OUT_PMRIMPORTPMR * psPMRImportPMROUT,
+			 CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPMRExport = psPMRImportPMRIN->hPMRExport;
+	PMR_EXPORT *psPMRExportInt = NULL;
+	PMR *psPMRInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(KERNEL_HANDLE_BASE);
+
+	/* Look up the address from the handle */
+	psPMRImportPMROUT->eError =
+	    PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE,
+				       (void **)&psPMRExportInt,
+				       hPMRExport,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+				       IMG_TRUE);
+	if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(KERNEL_HANDLE_BASE);
+		goto PMRImportPMR_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(KERNEL_HANDLE_BASE);
+
+	psPMRImportPMROUT->eError =
+	    PhysmemImportPMR(psConnection, OSGetDevData(psConnection),
+			     psPMRExportInt,
+			     psPMRImportPMRIN->ui64uiPassword,
+			     psPMRImportPMRIN->ui64uiSize,
+			     psPMRImportPMRIN->ui32uiLog2Contig, &psPMRInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK))
+	{
+		goto PMRImportPMR_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psPMRImportPMROUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psPMRImportPMROUT->hPMR,
+				      (void *)psPMRInt,
+				      PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) & PMRUnrefPMR);
+	if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto PMRImportPMR_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ PMRImportPMR_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(KERNEL_HANDLE_BASE);
+
+	/* Unreference the previously looked up handle */
+	if (psPMRExportInt)
+	{
+		PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE,
+					    hPMRExport,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(KERNEL_HANDLE_BASE);
+
+	if (psPMRImportPMROUT->eError != PVRSRV_OK)
+	{
+		if (psPMRInt)
+		{
+			LockHandle(KERNEL_HANDLE_BASE);
+			PMRUnrefPMR(psPMRInt);
+			UnlockHandle(KERNEL_HANDLE_BASE);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRLocalImportPMR(IMG_UINT32 ui32DispatchTableEntry,
+			      PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR *
+			      psPMRLocalImportPMRIN,
+			      PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR *
+			      psPMRLocalImportPMROUT,
+			      CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hExtHandle = psPMRLocalImportPMRIN->hExtHandle;
+	PMR *psExtHandleInt = NULL;
+	PMR *psPMRInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+	/* Look up the address from the handle */
+	psPMRLocalImportPMROUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->
+				       psHandleBase, (void **)&psExtHandleInt,
+				       hExtHandle,
+				       PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+				       IMG_TRUE);
+	if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+		goto PMRLocalImportPMR_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+	psPMRLocalImportPMROUT->eError =
+	    PMRLocalImportPMR(psExtHandleInt,
+			      &psPMRInt,
+			      &psPMRLocalImportPMROUT->uiSize,
+			      &psPMRLocalImportPMROUT->sAlign);
+	/* Exit early if bridged call fails */
+	if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK))
+	{
+		goto PMRLocalImportPMR_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psPMRLocalImportPMROUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psPMRLocalImportPMROUT->hPMR,
+				      (void *)psPMRInt,
+				      PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) & PMRUnrefPMR);
+	if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto PMRLocalImportPMR_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ PMRLocalImportPMR_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psExtHandleInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->
+					    psHandleBase, hExtHandle,
+					    PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+	if (psPMRLocalImportPMROUT->eError != PVRSRV_OK)
+	{
+		if (psPMRInt)
+		{
+			LockHandle(KERNEL_HANDLE_BASE);
+			PMRUnrefPMR(psPMRInt);
+			UnlockHandle(KERNEL_HANDLE_BASE);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRUnrefPMR(IMG_UINT32 ui32DispatchTableEntry,
+			PVRSRV_BRIDGE_IN_PMRUNREFPMR * psPMRUnrefPMRIN,
+			PVRSRV_BRIDGE_OUT_PMRUNREFPMR * psPMRUnrefPMROUT,
+			CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psPMRUnrefPMROUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psPMRUnrefPMRIN->hPMR,
+					PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	if (unlikely((psPMRUnrefPMROUT->eError != PVRSRV_OK) &&
+		     (psPMRUnrefPMROUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgePMRUnrefPMR: %s",
+			 PVRSRVGetErrorString(psPMRUnrefPMROUT->eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto PMRUnrefPMR_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ PMRUnrefPMR_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRUnrefUnlockPMR(IMG_UINT32 ui32DispatchTableEntry,
+			      PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *
+			      psPMRUnrefUnlockPMRIN,
+			      PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *
+			      psPMRUnrefUnlockPMROUT,
+			      CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psPMRUnrefUnlockPMROUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psPMRUnrefUnlockPMRIN->
+					hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	if (unlikely
+	    ((psPMRUnrefUnlockPMROUT->eError != PVRSRV_OK)
+	     && (psPMRUnrefUnlockPMROUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgePMRUnrefUnlockPMR: %s",
+			 PVRSRVGetErrorString(psPMRUnrefUnlockPMROUT->eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto PMRUnrefUnlockPMR_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ PMRUnrefUnlockPMR_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry,
+				   PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR *
+				   psPhysmemNewRamBackedPMRIN,
+				   PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR *
+				   psPhysmemNewRamBackedPMROUT,
+				   CONNECTION_DATA * psConnection)
+{
+	IMG_UINT32 *ui32MappingTableInt = NULL;
+	IMG_CHAR *uiAnnotationInt = NULL;
+	PMR *psPMRPtrInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks *
+	     sizeof(IMG_UINT32)) +
+	    (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength *
+	     sizeof(IMG_CHAR)) + 0;
+
+	if (unlikely
+	    (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks >
+	     PMR_MAX_SUPPORTED_PAGE_COUNT))
+	{
+		psPhysmemNewRamBackedPMROUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto PhysmemNewRamBackedPMR_exit;
+	}
+
+	if (unlikely
+	    (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength >
+	     DEVMEM_ANNOTATION_MAX_LEN))
+	{
+		psPhysmemNewRamBackedPMROUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto PhysmemNewRamBackedPMR_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psPhysmemNewRamBackedPMRIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psPhysmemNewRamBackedPMRIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psPhysmemNewRamBackedPMROUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto PhysmemNewRamBackedPMR_exit;
+			}
+		}
+	}
+
+	if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks != 0)
+	{
+		ui32MappingTableInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32) >
+	    0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32MappingTableInt,
+		     (const void __user *)psPhysmemNewRamBackedPMRIN->
+		     pui32MappingTable,
+		     psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psPhysmemNewRamBackedPMROUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto PhysmemNewRamBackedPMR_exit;
+		}
+	}
+	if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength != 0)
+	{
+		uiAnnotationInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psPhysmemNewRamBackedPMRIN->ui32AnnotationLength *
+		    sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength *
+	    sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiAnnotationInt,
+		     (const void __user *)psPhysmemNewRamBackedPMRIN->
+		     puiAnnotation,
+		     psPhysmemNewRamBackedPMRIN->ui32AnnotationLength *
+		     sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psPhysmemNewRamBackedPMROUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto PhysmemNewRamBackedPMR_exit;
+		}
+		((IMG_CHAR *)
+		 uiAnnotationInt)[(psPhysmemNewRamBackedPMRIN->
+				   ui32AnnotationLength * sizeof(IMG_CHAR)) -
+				  1] = '\0';
+	}
+
+	psPhysmemNewRamBackedPMROUT->eError =
+	    PhysmemNewRamBackedPMR(psConnection, OSGetDevData(psConnection),
+				   psPhysmemNewRamBackedPMRIN->uiSize,
+				   psPhysmemNewRamBackedPMRIN->uiChunkSize,
+				   psPhysmemNewRamBackedPMRIN->
+				   ui32NumPhysChunks,
+				   psPhysmemNewRamBackedPMRIN->
+				   ui32NumVirtChunks, ui32MappingTableInt,
+				   psPhysmemNewRamBackedPMRIN->ui32Log2PageSize,
+				   psPhysmemNewRamBackedPMRIN->uiFlags,
+				   psPhysmemNewRamBackedPMRIN->
+				   ui32AnnotationLength, uiAnnotationInt,
+				   psPhysmemNewRamBackedPMRIN->ui32PID,
+				   &psPMRPtrInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK))
+	{
+		goto PhysmemNewRamBackedPMR_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psPhysmemNewRamBackedPMROUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psPhysmemNewRamBackedPMROUT->hPMRPtr,
+				      (void *)psPMRPtrInt,
+				      PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) & PMRUnrefPMR);
+	if (unlikely(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto PhysmemNewRamBackedPMR_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ PhysmemNewRamBackedPMR_exit:
+
+	if (psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)
+	{
+		if (psPMRPtrInt)
+		{
+			LockHandle(KERNEL_HANDLE_BASE);
+			PMRUnrefPMR(psPMRPtrInt);
+			UnlockHandle(KERNEL_HANDLE_BASE);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePhysmemNewRamBackedLockedPMR(IMG_UINT32 ui32DispatchTableEntry,
+					 PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR
+					 * psPhysmemNewRamBackedLockedPMRIN,
+					 PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR
+					 * psPhysmemNewRamBackedLockedPMROUT,
+					 CONNECTION_DATA * psConnection)
+{
+	IMG_UINT32 *ui32MappingTableInt = NULL;
+	IMG_CHAR *uiAnnotationInt = NULL;
+	PMR *psPMRPtrInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks *
+	     sizeof(IMG_UINT32)) +
+	    (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength *
+	     sizeof(IMG_CHAR)) + 0;
+
+	if (unlikely
+	    (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks >
+	     PMR_MAX_SUPPORTED_PAGE_COUNT))
+	{
+		psPhysmemNewRamBackedLockedPMROUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto PhysmemNewRamBackedLockedPMR_exit;
+	}
+
+	if (unlikely
+	    (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength >
+	     DEVMEM_ANNOTATION_MAX_LEN))
+	{
+		psPhysmemNewRamBackedLockedPMROUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto PhysmemNewRamBackedLockedPMR_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psPhysmemNewRamBackedLockedPMRIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psPhysmemNewRamBackedLockedPMRIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psPhysmemNewRamBackedLockedPMROUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto PhysmemNewRamBackedLockedPMR_exit;
+			}
+		}
+	}
+
+	if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks != 0)
+	{
+		ui32MappingTableInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks *
+	    sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32MappingTableInt,
+		     (const void __user *)psPhysmemNewRamBackedLockedPMRIN->
+		     pui32MappingTable,
+		     psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psPhysmemNewRamBackedLockedPMROUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto PhysmemNewRamBackedLockedPMR_exit;
+		}
+	}
+	if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength != 0)
+	{
+		uiAnnotationInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength *
+		    sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength *
+	    sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiAnnotationInt,
+		     (const void __user *)psPhysmemNewRamBackedLockedPMRIN->
+		     puiAnnotation,
+		     psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength *
+		     sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psPhysmemNewRamBackedLockedPMROUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto PhysmemNewRamBackedLockedPMR_exit;
+		}
+		((IMG_CHAR *)
+		 uiAnnotationInt)[(psPhysmemNewRamBackedLockedPMRIN->
+				   ui32AnnotationLength * sizeof(IMG_CHAR)) -
+				  1] = '\0';
+	}
+
+	psPhysmemNewRamBackedLockedPMROUT->eError =
+	    PhysmemNewRamBackedLockedPMR(psConnection,
+					 OSGetDevData(psConnection),
+					 psPhysmemNewRamBackedLockedPMRIN->
+					 uiSize,
+					 psPhysmemNewRamBackedLockedPMRIN->
+					 uiChunkSize,
+					 psPhysmemNewRamBackedLockedPMRIN->
+					 ui32NumPhysChunks,
+					 psPhysmemNewRamBackedLockedPMRIN->
+					 ui32NumVirtChunks, ui32MappingTableInt,
+					 psPhysmemNewRamBackedLockedPMRIN->
+					 ui32Log2PageSize,
+					 psPhysmemNewRamBackedLockedPMRIN->
+					 uiFlags,
+					 psPhysmemNewRamBackedLockedPMRIN->
+					 ui32AnnotationLength, uiAnnotationInt,
+					 psPhysmemNewRamBackedLockedPMRIN->
+					 ui32PID, &psPMRPtrInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK))
+	{
+		goto PhysmemNewRamBackedLockedPMR_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psPhysmemNewRamBackedLockedPMROUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psPhysmemNewRamBackedLockedPMROUT->
+				      hPMRPtr, (void *)psPMRPtrInt,
+				      PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) & PMRUnrefUnlockPMR);
+	if (unlikely(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto PhysmemNewRamBackedLockedPMR_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ PhysmemNewRamBackedLockedPMR_exit:
+
+	if (psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)
+	{
+		if (psPMRPtrInt)
+		{
+			LockHandle(KERNEL_HANDLE_BASE);
+			PMRUnrefUnlockPMR(psPMRPtrInt);
+			UnlockHandle(KERNEL_HANDLE_BASE);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePhysmemImportSecBuf(IMG_UINT32 ui32DispatchTableEntry,
+				PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSECBUF *
+				psPhysmemImportSecBufIN,
+				PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSECBUF *
+				psPhysmemImportSecBufOUT,
+				CONNECTION_DATA * psConnection)
+{
+	PMR *psPMRPtrInt = NULL;
+
+	psPhysmemImportSecBufOUT->eError =
+	    PhysmemImportSecBuf(psConnection, OSGetDevData(psConnection),
+				psPhysmemImportSecBufIN->uiSize,
+				psPhysmemImportSecBufIN->ui32Log2Align,
+				psPhysmemImportSecBufIN->uiFlags,
+				&psPMRPtrInt,
+				&psPhysmemImportSecBufOUT->ui64SecBufHandle);
+	/* Exit early if bridged call fails */
+	if (unlikely(psPhysmemImportSecBufOUT->eError != PVRSRV_OK))
+	{
+		goto PhysmemImportSecBuf_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psPhysmemImportSecBufOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psPhysmemImportSecBufOUT->hPMRPtr,
+				      (void *)psPMRPtrInt,
+				      PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) & PMRUnrefPMR);
+	if (unlikely(psPhysmemImportSecBufOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto PhysmemImportSecBuf_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ PhysmemImportSecBuf_exit:
+
+	if (psPhysmemImportSecBufOUT->eError != PVRSRV_OK)
+	{
+		if (psPMRPtrInt)
+		{
+			LockHandle(KERNEL_HANDLE_BASE);
+			PMRUnrefPMR(psPMRPtrInt);
+			UnlockHandle(KERNEL_HANDLE_BASE);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntPin(IMG_UINT32 ui32DispatchTableEntry,
+			 PVRSRV_BRIDGE_IN_DEVMEMINTPIN * psDevmemIntPinIN,
+			 PVRSRV_BRIDGE_OUT_DEVMEMINTPIN * psDevmemIntPinOUT,
+			 CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPMR = psDevmemIntPinIN->hPMR;
+	PMR *psPMRInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psDevmemIntPinOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRInt,
+				       hPMR,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psDevmemIntPinOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntPin_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psDevmemIntPinOUT->eError = DevmemIntPin(psPMRInt);
+
+ DevmemIntPin_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psPMRInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMR,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnpin(IMG_UINT32 ui32DispatchTableEntry,
+			   PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN * psDevmemIntUnpinIN,
+			   PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN *
+			   psDevmemIntUnpinOUT, CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPMR = psDevmemIntUnpinIN->hPMR;
+	PMR *psPMRInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psDevmemIntUnpinOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRInt,
+				       hPMR,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psDevmemIntUnpinOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntUnpin_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psDevmemIntUnpinOUT->eError = DevmemIntUnpin(psPMRInt);
+
+ DevmemIntUnpin_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psPMRInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMR,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntPinValidate(IMG_UINT32 ui32DispatchTableEntry,
+				 PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE *
+				 psDevmemIntPinValidateIN,
+				 PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE *
+				 psDevmemIntPinValidateOUT,
+				 CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hMapping = psDevmemIntPinValidateIN->hMapping;
+	DEVMEMINT_MAPPING *psMappingInt = NULL;
+	IMG_HANDLE hPMR = psDevmemIntPinValidateIN->hPMR;
+	PMR *psPMRInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psDevmemIntPinValidateOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psMappingInt,
+				       hMapping,
+				       PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+				       IMG_TRUE);
+	if (unlikely(psDevmemIntPinValidateOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntPinValidate_exit;
+	}
+
+	/* Look up the address from the handle */
+	psDevmemIntPinValidateOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRInt,
+				       hPMR,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psDevmemIntPinValidateOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntPinValidate_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psDevmemIntPinValidateOUT->eError =
+	    DevmemIntPinValidate(psMappingInt, psPMRInt);
+
+ DevmemIntPinValidate_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psMappingInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hMapping,
+					    PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING);
+	}
+
+	/* Unreference the previously looked up handle */
+	if (psPMRInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMR,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnpinInvalidate(IMG_UINT32 ui32DispatchTableEntry,
+				     PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE *
+				     psDevmemIntUnpinInvalidateIN,
+				     PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE
+				     * psDevmemIntUnpinInvalidateOUT,
+				     CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hMapping = psDevmemIntUnpinInvalidateIN->hMapping;
+	DEVMEMINT_MAPPING *psMappingInt = NULL;
+	IMG_HANDLE hPMR = psDevmemIntUnpinInvalidateIN->hPMR;
+	PMR *psPMRInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psDevmemIntUnpinInvalidateOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psMappingInt,
+				       hMapping,
+				       PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+				       IMG_TRUE);
+	if (unlikely(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntUnpinInvalidate_exit;
+	}
+
+	/* Look up the address from the handle */
+	psDevmemIntUnpinInvalidateOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRInt,
+				       hPMR,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntUnpinInvalidate_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psDevmemIntUnpinInvalidateOUT->eError =
+	    DevmemIntUnpinInvalidate(psMappingInt, psPMRInt);
+
+ DevmemIntUnpinInvalidate_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psMappingInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hMapping,
+					    PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING);
+	}
+
+	/* Unreference the previously looked up handle */
+	if (psPMRInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMR,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntCtxCreate(IMG_UINT32 ui32DispatchTableEntry,
+			       PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *
+			       psDevmemIntCtxCreateIN,
+			       PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *
+			       psDevmemIntCtxCreateOUT,
+			       CONNECTION_DATA * psConnection)
+{
+	DEVMEMINT_CTX *psDevMemServerContextInt = NULL;
+	IMG_HANDLE hPrivDataInt = NULL;
+
+	psDevmemIntCtxCreateOUT->hDevMemServerContext = NULL;
+
+	psDevmemIntCtxCreateOUT->eError =
+	    DevmemIntCtxCreate(psConnection, OSGetDevData(psConnection),
+			       psDevmemIntCtxCreateIN->bbKernelMemoryCtx,
+			       &psDevMemServerContextInt,
+			       &hPrivDataInt,
+			       &psDevmemIntCtxCreateOUT->ui32CPUCacheLineSize);
+	/* Exit early if bridged call fails */
+	if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK))
+	{
+		goto DevmemIntCtxCreate_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psDevmemIntCtxCreateOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psDevmemIntCtxCreateOUT->
+				      hDevMemServerContext,
+				      (void *)psDevMemServerContextInt,
+				      PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      DevmemIntCtxDestroy);
+	if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntCtxCreate_exit;
+	}
+
+	psDevmemIntCtxCreateOUT->eError =
+	    PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+					 &psDevmemIntCtxCreateOUT->hPrivData,
+					 (void *)hPrivDataInt,
+					 PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+					 PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+					 psDevmemIntCtxCreateOUT->
+					 hDevMemServerContext);
+	if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntCtxCreate_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ DevmemIntCtxCreate_exit:
+
+	if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+	{
+		if (psDevmemIntCtxCreateOUT->hDevMemServerContext)
+		{
+			PVRSRV_ERROR eError;
+
+			/* Lock over handle creation cleanup. */
+			LockHandle(psConnection->psHandleBase);
+
+			eError =
+			    PVRSRVReleaseHandleUnlocked(psConnection->
+							psHandleBase,
+							(IMG_HANDLE)
+							psDevmemIntCtxCreateOUT->
+							hDevMemServerContext,
+							PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+			if (unlikely
+			    ((eError != PVRSRV_OK)
+			     && (eError != PVRSRV_ERROR_RETRY)))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+					 "PVRSRVBridgeDevmemIntCtxCreate: %s",
+					 PVRSRVGetErrorString(eError)));
+			}
+			/* Releasing the handle should free/destroy/release the resource.
+			 * This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK)
+				   || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psDevMemServerContextInt = NULL;
+			/* Release now we have cleaned up creation handles. */
+			UnlockHandle(psConnection->psHandleBase);
+
+		}
+
+		if (psDevMemServerContextInt)
+		{
+			DevmemIntCtxDestroy(psDevMemServerContextInt);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntCtxDestroy(IMG_UINT32 ui32DispatchTableEntry,
+				PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *
+				psDevmemIntCtxDestroyIN,
+				PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *
+				psDevmemIntCtxDestroyOUT,
+				CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psDevmemIntCtxDestroyOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psDevmemIntCtxDestroyIN->
+					hDevmemServerContext,
+					PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+	if (unlikely
+	    ((psDevmemIntCtxDestroyOUT->eError != PVRSRV_OK)
+	     && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeDevmemIntCtxDestroy: %s",
+			 PVRSRVGetErrorString(psDevmemIntCtxDestroyOUT->
+					      eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntCtxDestroy_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ DevmemIntCtxDestroy_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntHeapCreate(IMG_UINT32 ui32DispatchTableEntry,
+				PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *
+				psDevmemIntHeapCreateIN,
+				PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *
+				psDevmemIntHeapCreateOUT,
+				CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hDevmemCtx = psDevmemIntHeapCreateIN->hDevmemCtx;
+	DEVMEMINT_CTX *psDevmemCtxInt = NULL;
+	DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psDevmemIntHeapCreateOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psDevmemCtxInt,
+				       hDevmemCtx,
+				       PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+				       IMG_TRUE);
+	if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntHeapCreate_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psDevmemIntHeapCreateOUT->eError =
+	    DevmemIntHeapCreate(psDevmemCtxInt,
+				psDevmemIntHeapCreateIN->sHeapBaseAddr,
+				psDevmemIntHeapCreateIN->uiHeapLength,
+				psDevmemIntHeapCreateIN->ui32Log2DataPageSize,
+				&psDevmemHeapPtrInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK))
+	{
+		goto DevmemIntHeapCreate_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psDevmemIntHeapCreateOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psDevmemIntHeapCreateOUT->hDevmemHeapPtr,
+				      (void *)psDevmemHeapPtrInt,
+				      PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      DevmemIntHeapDestroy);
+	if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntHeapCreate_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ DevmemIntHeapCreate_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psDevmemCtxInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hDevmemCtx,
+					    PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)
+	{
+		if (psDevmemHeapPtrInt)
+		{
+			DevmemIntHeapDestroy(psDevmemHeapPtrInt);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntHeapDestroy(IMG_UINT32 ui32DispatchTableEntry,
+				 PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *
+				 psDevmemIntHeapDestroyIN,
+				 PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *
+				 psDevmemIntHeapDestroyOUT,
+				 CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psDevmemIntHeapDestroyOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psDevmemIntHeapDestroyIN->
+					hDevmemHeap,
+					PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+	if (unlikely
+	    ((psDevmemIntHeapDestroyOUT->eError != PVRSRV_OK)
+	     && (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeDevmemIntHeapDestroy: %s",
+			 PVRSRVGetErrorString(psDevmemIntHeapDestroyOUT->
+					      eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntHeapDestroy_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ DevmemIntHeapDestroy_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntMapPMR(IMG_UINT32 ui32DispatchTableEntry,
+			    PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *
+			    psDevmemIntMapPMRIN,
+			    PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *
+			    psDevmemIntMapPMROUT,
+			    CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hDevmemServerHeap = psDevmemIntMapPMRIN->hDevmemServerHeap;
+	DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL;
+	IMG_HANDLE hReservation = psDevmemIntMapPMRIN->hReservation;
+	DEVMEMINT_RESERVATION *psReservationInt = NULL;
+	IMG_HANDLE hPMR = psDevmemIntMapPMRIN->hPMR;
+	PMR *psPMRInt = NULL;
+	DEVMEMINT_MAPPING *psMappingInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psDevmemIntMapPMROUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psDevmemServerHeapInt,
+				       hDevmemServerHeap,
+				       PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+				       IMG_TRUE);
+	if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntMapPMR_exit;
+	}
+
+	/* Look up the address from the handle */
+	psDevmemIntMapPMROUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psReservationInt,
+				       hReservation,
+				       PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+				       IMG_TRUE);
+	if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntMapPMR_exit;
+	}
+
+	/* Look up the address from the handle */
+	psDevmemIntMapPMROUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRInt,
+				       hPMR,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntMapPMR_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psDevmemIntMapPMROUT->eError =
+	    DevmemIntMapPMR(psDevmemServerHeapInt,
+			    psReservationInt,
+			    psPMRInt,
+			    psDevmemIntMapPMRIN->uiMapFlags, &psMappingInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK))
+	{
+		goto DevmemIntMapPMR_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psDevmemIntMapPMROUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psDevmemIntMapPMROUT->hMapping,
+				      (void *)psMappingInt,
+				      PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) & DevmemIntUnmapPMR);
+	if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntMapPMR_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ DevmemIntMapPMR_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psDevmemServerHeapInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hDevmemServerHeap,
+					    PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+	}
+
+	/* Unreference the previously looked up handle */
+	if (psReservationInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hReservation,
+					    PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+	}
+
+	/* Unreference the previously looked up handle */
+	if (psPMRInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMR,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+	{
+		if (psMappingInt)
+		{
+			DevmemIntUnmapPMR(psMappingInt);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnmapPMR(IMG_UINT32 ui32DispatchTableEntry,
+			      PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *
+			      psDevmemIntUnmapPMRIN,
+			      PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *
+			      psDevmemIntUnmapPMROUT,
+			      CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psDevmemIntUnmapPMROUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psDevmemIntUnmapPMRIN->
+					hMapping,
+					PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING);
+	if (unlikely
+	    ((psDevmemIntUnmapPMROUT->eError != PVRSRV_OK)
+	     && (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeDevmemIntUnmapPMR: %s",
+			 PVRSRVGetErrorString(psDevmemIntUnmapPMROUT->eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntUnmapPMR_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ DevmemIntUnmapPMR_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntReserveRange(IMG_UINT32 ui32DispatchTableEntry,
+				  PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *
+				  psDevmemIntReserveRangeIN,
+				  PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *
+				  psDevmemIntReserveRangeOUT,
+				  CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hDevmemServerHeap =
+	    psDevmemIntReserveRangeIN->hDevmemServerHeap;
+	DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL;
+	DEVMEMINT_RESERVATION *psReservationInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psDevmemIntReserveRangeOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psDevmemServerHeapInt,
+				       hDevmemServerHeap,
+				       PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+				       IMG_TRUE);
+	if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntReserveRange_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psDevmemIntReserveRangeOUT->eError =
+	    DevmemIntReserveRange(psDevmemServerHeapInt,
+				  psDevmemIntReserveRangeIN->sAddress,
+				  psDevmemIntReserveRangeIN->uiLength,
+				  &psReservationInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK))
+	{
+		goto DevmemIntReserveRange_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psDevmemIntReserveRangeOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psDevmemIntReserveRangeOUT->hReservation,
+				      (void *)psReservationInt,
+				      PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      DevmemIntUnreserveRange);
+	if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntReserveRange_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ DevmemIntReserveRange_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psDevmemServerHeapInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hDevmemServerHeap,
+					    PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)
+	{
+		if (psReservationInt)
+		{
+			DevmemIntUnreserveRange(psReservationInt);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry,
+				    PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *
+				    psDevmemIntUnreserveRangeIN,
+				    PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *
+				    psDevmemIntUnreserveRangeOUT,
+				    CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psDevmemIntUnreserveRangeOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE)
+					psDevmemIntUnreserveRangeIN->
+					hReservation,
+					PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+	if (unlikely
+	    ((psDevmemIntUnreserveRangeOUT->eError != PVRSRV_OK)
+	     && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeDevmemIntUnreserveRange: %s",
+			 PVRSRVGetErrorString(psDevmemIntUnreserveRangeOUT->
+					      eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntUnreserveRange_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ DevmemIntUnreserveRange_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry,
+			    PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *
+			    psChangeSparseMemIN,
+			    PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *
+			    psChangeSparseMemOUT,
+			    CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hSrvDevMemHeap = psChangeSparseMemIN->hSrvDevMemHeap;
+	DEVMEMINT_HEAP *psSrvDevMemHeapInt = NULL;
+	IMG_HANDLE hPMR = psChangeSparseMemIN->hPMR;
+	PMR *psPMRInt = NULL;
+	IMG_UINT32 *ui32AllocPageIndicesInt = NULL;
+	IMG_UINT32 *ui32FreePageIndicesInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) +
+	    (psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) + 0;
+
+	if (unlikely
+	    (psChangeSparseMemIN->ui32AllocPageCount >
+	     PMR_MAX_SUPPORTED_PAGE_COUNT))
+	{
+		psChangeSparseMemOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto ChangeSparseMem_exit;
+	}
+
+	if (unlikely
+	    (psChangeSparseMemIN->ui32FreePageCount >
+	     PMR_MAX_SUPPORTED_PAGE_COUNT))
+	{
+		psChangeSparseMemOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto ChangeSparseMem_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psChangeSparseMemIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psChangeSparseMemIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psChangeSparseMemOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto ChangeSparseMem_exit;
+			}
+		}
+	}
+
+	if (psChangeSparseMemIN->ui32AllocPageCount != 0)
+	{
+		ui32AllocPageIndicesInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psChangeSparseMemIN->ui32AllocPageCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32AllocPageIndicesInt,
+		     (const void __user *)psChangeSparseMemIN->
+		     pui32AllocPageIndices,
+		     psChangeSparseMemIN->ui32AllocPageCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psChangeSparseMemOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto ChangeSparseMem_exit;
+		}
+	}
+	if (psChangeSparseMemIN->ui32FreePageCount != 0)
+	{
+		ui32FreePageIndicesInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32FreePageIndicesInt,
+		     (const void __user *)psChangeSparseMemIN->
+		     pui32FreePageIndices,
+		     psChangeSparseMemIN->ui32FreePageCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psChangeSparseMemOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto ChangeSparseMem_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psChangeSparseMemOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psSrvDevMemHeapInt,
+				       hSrvDevMemHeap,
+				       PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+				       IMG_TRUE);
+	if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto ChangeSparseMem_exit;
+	}
+
+	/* Look up the address from the handle */
+	psChangeSparseMemOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRInt,
+				       hPMR,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto ChangeSparseMem_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psChangeSparseMemOUT->eError =
+	    DevmemIntChangeSparse(psSrvDevMemHeapInt,
+				  psPMRInt,
+				  psChangeSparseMemIN->ui32AllocPageCount,
+				  ui32AllocPageIndicesInt,
+				  psChangeSparseMemIN->ui32FreePageCount,
+				  ui32FreePageIndicesInt,
+				  psChangeSparseMemIN->ui32SparseFlags,
+				  psChangeSparseMemIN->uiFlags,
+				  psChangeSparseMemIN->sDevVAddr,
+				  psChangeSparseMemIN->ui64CPUVAddr);
+
+ ChangeSparseMem_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psSrvDevMemHeapInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hSrvDevMemHeap,
+					    PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+	}
+
+	/* Unreference the previously looked up handle */
+	if (psPMRInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMR,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntMapPages(IMG_UINT32 ui32DispatchTableEntry,
+			      PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *
+			      psDevmemIntMapPagesIN,
+			      PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *
+			      psDevmemIntMapPagesOUT,
+			      CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hReservation = psDevmemIntMapPagesIN->hReservation;
+	DEVMEMINT_RESERVATION *psReservationInt = NULL;
+	IMG_HANDLE hPMR = psDevmemIntMapPagesIN->hPMR;
+	PMR *psPMRInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psDevmemIntMapPagesOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psReservationInt,
+				       hReservation,
+				       PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+				       IMG_TRUE);
+	if (unlikely(psDevmemIntMapPagesOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntMapPages_exit;
+	}
+
+	/* Look up the address from the handle */
+	psDevmemIntMapPagesOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRInt,
+				       hPMR,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psDevmemIntMapPagesOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntMapPages_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psDevmemIntMapPagesOUT->eError =
+	    DevmemIntMapPages(psReservationInt,
+			      psPMRInt,
+			      psDevmemIntMapPagesIN->ui32PageCount,
+			      psDevmemIntMapPagesIN->ui32PhysicalPgOffset,
+			      psDevmemIntMapPagesIN->uiFlags,
+			      psDevmemIntMapPagesIN->sDevVAddr);
+
+ DevmemIntMapPages_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psReservationInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hReservation,
+					    PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+	}
+
+	/* Unreference the previously looked up handle */
+	if (psPMRInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMR,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnmapPages(IMG_UINT32 ui32DispatchTableEntry,
+				PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *
+				psDevmemIntUnmapPagesIN,
+				PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *
+				psDevmemIntUnmapPagesOUT,
+				CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hReservation = psDevmemIntUnmapPagesIN->hReservation;
+	DEVMEMINT_RESERVATION *psReservationInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psDevmemIntUnmapPagesOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psReservationInt,
+				       hReservation,
+				       PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+				       IMG_TRUE);
+	if (unlikely(psDevmemIntUnmapPagesOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntUnmapPages_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psDevmemIntUnmapPagesOUT->eError =
+	    DevmemIntUnmapPages(psReservationInt,
+				psDevmemIntUnmapPagesIN->sDevVAddr,
+				psDevmemIntUnmapPagesIN->ui32PageCount);
+
+ DevmemIntUnmapPages_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psReservationInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hReservation,
+					    PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIsVDevAddrValid(IMG_UINT32 ui32DispatchTableEntry,
+				  PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID *
+				  psDevmemIsVDevAddrValidIN,
+				  PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID *
+				  psDevmemIsVDevAddrValidOUT,
+				  CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hDevmemCtx = psDevmemIsVDevAddrValidIN->hDevmemCtx;
+	DEVMEMINT_CTX *psDevmemCtxInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psDevmemIsVDevAddrValidOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psDevmemCtxInt,
+				       hDevmemCtx,
+				       PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+				       IMG_TRUE);
+	if (unlikely(psDevmemIsVDevAddrValidOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIsVDevAddrValid_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psDevmemIsVDevAddrValidOUT->eError =
+	    DevmemIntIsVDevAddrValid(psConnection, OSGetDevData(psConnection),
+				     psDevmemCtxInt,
+				     psDevmemIsVDevAddrValidIN->sAddress);
+
+ DevmemIsVDevAddrValid_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psDevmemCtxInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hDevmemCtx,
+					    PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapConfigCount(IMG_UINT32 ui32DispatchTableEntry,
+				   PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT *
+				   psHeapCfgHeapConfigCountIN,
+				   PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT *
+				   psHeapCfgHeapConfigCountOUT,
+				   CONNECTION_DATA * psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psHeapCfgHeapConfigCountIN);
+
+	psHeapCfgHeapConfigCountOUT->eError =
+	    HeapCfgHeapConfigCount(psConnection, OSGetDevData(psConnection),
+				   &psHeapCfgHeapConfigCountOUT->
+				   ui32NumHeapConfigs);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapCount(IMG_UINT32 ui32DispatchTableEntry,
+			     PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT *
+			     psHeapCfgHeapCountIN,
+			     PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT *
+			     psHeapCfgHeapCountOUT,
+			     CONNECTION_DATA * psConnection)
+{
+
+	psHeapCfgHeapCountOUT->eError =
+	    HeapCfgHeapCount(psConnection, OSGetDevData(psConnection),
+			     psHeapCfgHeapCountIN->ui32HeapConfigIndex,
+			     &psHeapCfgHeapCountOUT->ui32NumHeaps);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry,
+				  PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME *
+				  psHeapCfgHeapConfigNameIN,
+				  PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME *
+				  psHeapCfgHeapConfigNameOUT,
+				  CONNECTION_DATA * psConnection)
+{
+	IMG_CHAR *puiHeapConfigNameInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz *
+	     sizeof(IMG_CHAR)) + 0;
+
+	if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz >
+	    DEVMEM_HEAPNAME_MAXLENGTH)
+	{
+		psHeapCfgHeapConfigNameOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto HeapCfgHeapConfigName_exit;
+	}
+
+	psHeapCfgHeapConfigNameOUT->puiHeapConfigName =
+	    psHeapCfgHeapConfigNameIN->puiHeapConfigName;
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psHeapCfgHeapConfigNameIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psHeapCfgHeapConfigNameIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psHeapCfgHeapConfigNameOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto HeapCfgHeapConfigName_exit;
+			}
+		}
+	}
+
+	if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz != 0)
+	{
+		puiHeapConfigNameInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz *
+		    sizeof(IMG_CHAR);
+	}
+
+	psHeapCfgHeapConfigNameOUT->eError =
+	    HeapCfgHeapConfigName(psConnection, OSGetDevData(psConnection),
+				  psHeapCfgHeapConfigNameIN->
+				  ui32HeapConfigIndex,
+				  psHeapCfgHeapConfigNameIN->
+				  ui32HeapConfigNameBufSz,
+				  puiHeapConfigNameInt);
+
+	if ((psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz *
+	     sizeof(IMG_CHAR)) > 0)
+	{
+		if (unlikely
+		    (OSCopyToUser
+		     (NULL,
+		      (void __user *)psHeapCfgHeapConfigNameOUT->
+		      puiHeapConfigName, puiHeapConfigNameInt,
+		      (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz *
+		       sizeof(IMG_CHAR))) != PVRSRV_OK))
+		{
+			psHeapCfgHeapConfigNameOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto HeapCfgHeapConfigName_exit;
+		}
+	}
+
+ HeapCfgHeapConfigName_exit:
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry,
+			       PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS *
+			       psHeapCfgHeapDetailsIN,
+			       PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS *
+			       psHeapCfgHeapDetailsOUT,
+			       CONNECTION_DATA * psConnection)
+{
+	IMG_CHAR *puiHeapNameOutInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) + 0;
+
+	if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz >
+	    DEVMEM_HEAPNAME_MAXLENGTH)
+	{
+		psHeapCfgHeapDetailsOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto HeapCfgHeapDetails_exit;
+	}
+
+	psHeapCfgHeapDetailsOUT->puiHeapNameOut =
+	    psHeapCfgHeapDetailsIN->puiHeapNameOut;
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psHeapCfgHeapDetailsIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psHeapCfgHeapDetailsIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psHeapCfgHeapDetailsOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto HeapCfgHeapDetails_exit;
+			}
+		}
+	}
+
+	if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz != 0)
+	{
+		puiHeapNameOutInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psHeapCfgHeapDetailsIN->ui32HeapNameBufSz *
+		    sizeof(IMG_CHAR);
+	}
+
+	psHeapCfgHeapDetailsOUT->eError =
+	    HeapCfgHeapDetails(psConnection, OSGetDevData(psConnection),
+			       psHeapCfgHeapDetailsIN->ui32HeapConfigIndex,
+			       psHeapCfgHeapDetailsIN->ui32HeapIndex,
+			       psHeapCfgHeapDetailsIN->ui32HeapNameBufSz,
+			       puiHeapNameOutInt,
+			       &psHeapCfgHeapDetailsOUT->sDevVAddrBase,
+			       &psHeapCfgHeapDetailsOUT->uiHeapLength,
+			       &psHeapCfgHeapDetailsOUT->
+			       ui32Log2DataPageSizeOut,
+			       &psHeapCfgHeapDetailsOUT->
+			       ui32Log2ImportAlignmentOut,
+			       &psHeapCfgHeapDetailsOUT->
+			       ui32Log2TilingStrideFactorOut);
+
+	if ((psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) > 0)
+	{
+		if (unlikely
+		    (OSCopyToUser
+		     (NULL,
+		      (void __user *)psHeapCfgHeapDetailsOUT->puiHeapNameOut,
+		      puiHeapNameOutInt,
+		      (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz *
+		       sizeof(IMG_CHAR))) != PVRSRV_OK))
+		{
+			psHeapCfgHeapDetailsOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto HeapCfgHeapDetails_exit;
+		}
+	}
+
+ HeapCfgHeapDetails_exit:
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntRegisterPFNotifyKM(IMG_UINT32 ui32DispatchTableEntry,
+					PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM
+					* psDevmemIntRegisterPFNotifyKMIN,
+					PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM
+					* psDevmemIntRegisterPFNotifyKMOUT,
+					CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hDevmemCtx = psDevmemIntRegisterPFNotifyKMIN->hDevmemCtx;
+	DEVMEMINT_CTX *psDevmemCtxInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psDevmemIntRegisterPFNotifyKMOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psDevmemCtxInt,
+				       hDevmemCtx,
+				       PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+				       IMG_TRUE);
+	if (unlikely(psDevmemIntRegisterPFNotifyKMOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemIntRegisterPFNotifyKM_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psDevmemIntRegisterPFNotifyKMOUT->eError =
+	    DevmemIntRegisterPFNotifyKM(psDevmemCtxInt,
+					psDevmemIntRegisterPFNotifyKMIN->
+					ui32PID,
+					psDevmemIntRegisterPFNotifyKMIN->
+					bRegister);
+
+ DevmemIntRegisterPFNotifyKM_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psDevmemCtxInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hDevmemCtx,
+					    PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeGetMaxDevMemSize(IMG_UINT32 ui32DispatchTableEntry,
+			     PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE *
+			     psGetMaxDevMemSizeIN,
+			     PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE *
+			     psGetMaxDevMemSizeOUT,
+			     CONNECTION_DATA * psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psGetMaxDevMemSizeIN);
+
+	psGetMaxDevMemSizeOUT->eError =
+	    PVRSRVGetMaxDevMemSizeKM(psConnection, OSGetDevData(psConnection),
+				     &psGetMaxDevMemSizeOUT->uiLMASize,
+				     &psGetMaxDevMemSizeOUT->uiUMASize);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemGetFaultAddress(IMG_UINT32 ui32DispatchTableEntry,
+				  PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *
+				  psDevmemGetFaultAddressIN,
+				  PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *
+				  psDevmemGetFaultAddressOUT,
+				  CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hDevmemCtx = psDevmemGetFaultAddressIN->hDevmemCtx;
+	DEVMEMINT_CTX *psDevmemCtxInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psDevmemGetFaultAddressOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psDevmemCtxInt,
+				       hDevmemCtx,
+				       PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+				       IMG_TRUE);
+	if (unlikely(psDevmemGetFaultAddressOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto DevmemGetFaultAddress_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psDevmemGetFaultAddressOUT->eError =
+	    DevmemIntGetFaultAddress(psConnection, OSGetDevData(psConnection),
+				     psDevmemCtxInt,
+				     &psDevmemGetFaultAddressOUT->
+				     sFaultAddress);
+
+ DevmemGetFaultAddress_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psDevmemCtxInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hDevmemCtx,
+					    PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitMMBridge(void);
+PVRSRV_ERROR DeinitMMBridge(void);
+
+/*
+ * Register all MM functions with services
+ */
+PVRSRV_ERROR InitMMBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR,
+			      PVRSRVBridgePMRExportPMR, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR,
+			      PVRSRVBridgePMRUnexportPMR, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID,
+			      PVRSRVBridgePMRGetUID, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE,
+			      PVRSRVBridgePMRMakeLocalImportHandle, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE,
+			      PVRSRVBridgePMRUnmakeLocalImportHandle, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR,
+			      PVRSRVBridgePMRImportPMR, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR,
+			      PVRSRVBridgePMRLocalImportPMR, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR,
+			      PVRSRVBridgePMRUnrefPMR, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR,
+			      PVRSRVBridgePMRUnrefUnlockPMR, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR,
+			      PVRSRVBridgePhysmemNewRamBackedPMR, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR,
+			      PVRSRVBridgePhysmemNewRamBackedLockedPMR, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_PHYSMEMIMPORTSECBUF,
+			      PVRSRVBridgePhysmemImportSecBuf, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPIN,
+			      PVRSRVBridgeDevmemIntPin, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN,
+			      PVRSRVBridgeDevmemIntUnpin, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE,
+			      PVRSRVBridgeDevmemIntPinValidate, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE,
+			      PVRSRVBridgeDevmemIntUnpinInvalidate, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE,
+			      PVRSRVBridgeDevmemIntCtxCreate, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY,
+			      PVRSRVBridgeDevmemIntCtxDestroy, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE,
+			      PVRSRVBridgeDevmemIntHeapCreate, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY,
+			      PVRSRVBridgeDevmemIntHeapDestroy, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR,
+			      PVRSRVBridgeDevmemIntMapPMR, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR,
+			      PVRSRVBridgeDevmemIntUnmapPMR, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE,
+			      PVRSRVBridgeDevmemIntReserveRange, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE,
+			      PVRSRVBridgeDevmemIntUnreserveRange, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_CHANGESPARSEMEM,
+			      PVRSRVBridgeChangeSparseMem, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES,
+			      PVRSRVBridgeDevmemIntMapPages, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES,
+			      PVRSRVBridgeDevmemIntUnmapPages, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID,
+			      PVRSRVBridgeDevmemIsVDevAddrValid, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT,
+			      PVRSRVBridgeHeapCfgHeapConfigCount, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT,
+			      PVRSRVBridgeHeapCfgHeapCount, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME,
+			      PVRSRVBridgeHeapCfgHeapConfigName, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS,
+			      PVRSRVBridgeHeapCfgHeapDetails, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM,
+			      PVRSRVBridgeDevmemIntRegisterPFNotifyKM, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE,
+			      PVRSRVBridgeGetMaxDevMemSize, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+			      PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS,
+			      PVRSRVBridgeDevmemGetFaultAddress, NULL,
+			      bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all mm functions with services
+ */
+PVRSRV_ERROR DeinitMMBridge(void)
+{
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_PMREXPORTPMR);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_PMRIMPORTPMR);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_PHYSMEMIMPORTSECBUF);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_DEVMEMINTPIN);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_CHANGESPARSEMEM);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM,
+				PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_pvrtl_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_pvrtl_bridge.c
new file mode 100644
index 0000000..9ff3f7b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_pvrtl_bridge.c
@@ -0,0 +1,818 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for pvrtl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for pvrtl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "tlserver.h"
+
+#include "common_pvrtl_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry,
+			 PVRSRV_BRIDGE_IN_TLOPENSTREAM * psTLOpenStreamIN,
+			 PVRSRV_BRIDGE_OUT_TLOPENSTREAM * psTLOpenStreamOUT,
+			 CONNECTION_DATA * psConnection)
+{
+	IMG_CHAR *uiNameInt = NULL;
+	TL_STREAM_DESC *psSDInt = NULL;
+	PMR *psTLPMRInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) + 0;
+
+	psTLOpenStreamOUT->hSD = NULL;
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psTLOpenStreamIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *) psTLOpenStreamIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psTLOpenStreamOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto TLOpenStream_exit;
+			}
+		}
+	}
+
+	{
+		uiNameInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiNameInt,
+		     (const void __user *)psTLOpenStreamIN->puiName,
+		     PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) !=
+		    PVRSRV_OK)
+		{
+			psTLOpenStreamOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto TLOpenStream_exit;
+		}
+		((IMG_CHAR *)
+		 uiNameInt)[(PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) -
+			    1] = '\0';
+	}
+
+	psTLOpenStreamOUT->eError =
+	    TLServerOpenStreamKM(uiNameInt,
+				 psTLOpenStreamIN->ui32Mode,
+				 &psSDInt, &psTLPMRInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK))
+	{
+		goto TLOpenStream_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psTLOpenStreamOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psTLOpenStreamOUT->hSD, (void *)psSDInt,
+				      PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      TLServerCloseStreamKM);
+	if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto TLOpenStream_exit;
+	}
+
+	psTLOpenStreamOUT->eError =
+	    PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+					 &psTLOpenStreamOUT->hTLPMR,
+					 (void *)psTLPMRInt,
+					 PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+					 PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+					 psTLOpenStreamOUT->hSD);
+	if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto TLOpenStream_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ TLOpenStream_exit:
+
+	if (psTLOpenStreamOUT->eError != PVRSRV_OK)
+	{
+		if (psTLOpenStreamOUT->hSD)
+		{
+			PVRSRV_ERROR eError;
+
+			/* Lock over handle creation cleanup. */
+			LockHandle(psConnection->psHandleBase);
+
+			eError =
+			    PVRSRVReleaseHandleUnlocked(psConnection->
+							psHandleBase,
+							(IMG_HANDLE)
+							psTLOpenStreamOUT->hSD,
+							PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+			if (unlikely
+			    ((eError != PVRSRV_OK)
+			     && (eError != PVRSRV_ERROR_RETRY)))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+					 "PVRSRVBridgeTLOpenStream: %s",
+					 PVRSRVGetErrorString(eError)));
+			}
+			/* Releasing the handle should free/destroy/release the resource.
+			 * This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK)
+				   || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psSDInt = NULL;
+			/* Release now we have cleaned up creation handles. */
+			UnlockHandle(psConnection->psHandleBase);
+
+		}
+
+		if (psSDInt)
+		{
+			TLServerCloseStreamKM(psSDInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLCloseStream(IMG_UINT32 ui32DispatchTableEntry,
+			  PVRSRV_BRIDGE_IN_TLCLOSESTREAM * psTLCloseStreamIN,
+			  PVRSRV_BRIDGE_OUT_TLCLOSESTREAM * psTLCloseStreamOUT,
+			  CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psTLCloseStreamOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psTLCloseStreamIN->hSD,
+					PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+	if (unlikely((psTLCloseStreamOUT->eError != PVRSRV_OK) &&
+		     (psTLCloseStreamOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeTLCloseStream: %s",
+			 PVRSRVGetErrorString(psTLCloseStreamOUT->eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto TLCloseStream_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ TLCloseStream_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLAcquireData(IMG_UINT32 ui32DispatchTableEntry,
+			  PVRSRV_BRIDGE_IN_TLACQUIREDATA * psTLAcquireDataIN,
+			  PVRSRV_BRIDGE_OUT_TLACQUIREDATA * psTLAcquireDataOUT,
+			  CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hSD = psTLAcquireDataIN->hSD;
+	TL_STREAM_DESC *psSDInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psTLAcquireDataOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psSDInt,
+				       hSD,
+				       PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE);
+	if (unlikely(psTLAcquireDataOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto TLAcquireData_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psTLAcquireDataOUT->eError =
+	    TLServerAcquireDataKM(psSDInt,
+				  &psTLAcquireDataOUT->ui32ReadOffset,
+				  &psTLAcquireDataOUT->ui32ReadLen);
+
+ TLAcquireData_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psSDInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLReleaseData(IMG_UINT32 ui32DispatchTableEntry,
+			  PVRSRV_BRIDGE_IN_TLRELEASEDATA * psTLReleaseDataIN,
+			  PVRSRV_BRIDGE_OUT_TLRELEASEDATA * psTLReleaseDataOUT,
+			  CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hSD = psTLReleaseDataIN->hSD;
+	TL_STREAM_DESC *psSDInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psTLReleaseDataOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psSDInt,
+				       hSD,
+				       PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE);
+	if (unlikely(psTLReleaseDataOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto TLReleaseData_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psTLReleaseDataOUT->eError =
+	    TLServerReleaseDataKM(psSDInt,
+				  psTLReleaseDataIN->ui32ReadOffset,
+				  psTLReleaseDataIN->ui32ReadLen);
+
+ TLReleaseData_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psSDInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry,
+			      PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS *
+			      psTLDiscoverStreamsIN,
+			      PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS *
+			      psTLDiscoverStreamsOUT,
+			      CONNECTION_DATA * psConnection)
+{
+	IMG_CHAR *uiNamePatternInt = NULL;
+	IMG_CHAR *puiStreamsInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) +
+	    (psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) + 0;
+
+	if (psTLDiscoverStreamsIN->ui32Size >
+	    PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER)
+	{
+		psTLDiscoverStreamsOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto TLDiscoverStreams_exit;
+	}
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	psTLDiscoverStreamsOUT->puiStreams = psTLDiscoverStreamsIN->puiStreams;
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psTLDiscoverStreamsIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psTLDiscoverStreamsIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psTLDiscoverStreamsOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto TLDiscoverStreams_exit;
+			}
+		}
+	}
+
+	{
+		uiNamePatternInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiNamePatternInt,
+		     (const void __user *)psTLDiscoverStreamsIN->puiNamePattern,
+		     PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) !=
+		    PVRSRV_OK)
+		{
+			psTLDiscoverStreamsOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto TLDiscoverStreams_exit;
+		}
+		((IMG_CHAR *)
+		 uiNamePatternInt)[(PRVSRVTL_MAX_STREAM_NAME_SIZE *
+				    sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+	if (psTLDiscoverStreamsIN->ui32Size != 0)
+	{
+		puiStreamsInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR);
+	}
+
+	psTLDiscoverStreamsOUT->eError =
+	    TLServerDiscoverStreamsKM(uiNamePatternInt,
+				      psTLDiscoverStreamsIN->ui32Size,
+				      puiStreamsInt,
+				      &psTLDiscoverStreamsOUT->ui32NumFound);
+
+	if ((psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) > 0)
+	{
+		if (unlikely
+		    (OSCopyToUser
+		     (NULL, (void __user *)psTLDiscoverStreamsOUT->puiStreams,
+		      puiStreamsInt,
+		      (psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR))) !=
+		     PVRSRV_OK))
+		{
+			psTLDiscoverStreamsOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto TLDiscoverStreams_exit;
+		}
+	}
+
+ TLDiscoverStreams_exit:
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLReserveStream(IMG_UINT32 ui32DispatchTableEntry,
+			    PVRSRV_BRIDGE_IN_TLRESERVESTREAM *
+			    psTLReserveStreamIN,
+			    PVRSRV_BRIDGE_OUT_TLRESERVESTREAM *
+			    psTLReserveStreamOUT,
+			    CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hSD = psTLReserveStreamIN->hSD;
+	TL_STREAM_DESC *psSDInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psTLReserveStreamOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psSDInt,
+				       hSD,
+				       PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE);
+	if (unlikely(psTLReserveStreamOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto TLReserveStream_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psTLReserveStreamOUT->eError =
+	    TLServerReserveStreamKM(psSDInt,
+				    &psTLReserveStreamOUT->ui32BufferOffset,
+				    psTLReserveStreamIN->ui32Size,
+				    psTLReserveStreamIN->ui32SizeMin,
+				    &psTLReserveStreamOUT->ui32Available);
+
+ TLReserveStream_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psSDInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLCommitStream(IMG_UINT32 ui32DispatchTableEntry,
+			   PVRSRV_BRIDGE_IN_TLCOMMITSTREAM * psTLCommitStreamIN,
+			   PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM *
+			   psTLCommitStreamOUT, CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hSD = psTLCommitStreamIN->hSD;
+	TL_STREAM_DESC *psSDInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psTLCommitStreamOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psSDInt,
+				       hSD,
+				       PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE);
+	if (unlikely(psTLCommitStreamOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto TLCommitStream_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psTLCommitStreamOUT->eError =
+	    TLServerCommitStreamKM(psSDInt, psTLCommitStreamIN->ui32ReqSize);
+
+ TLCommitStream_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psSDInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLWriteData(IMG_UINT32 ui32DispatchTableEntry,
+			PVRSRV_BRIDGE_IN_TLWRITEDATA * psTLWriteDataIN,
+			PVRSRV_BRIDGE_OUT_TLWRITEDATA * psTLWriteDataOUT,
+			CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hSD = psTLWriteDataIN->hSD;
+	TL_STREAM_DESC *psSDInt = NULL;
+	IMG_BYTE *psDataInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) + 0;
+
+	if (unlikely(psTLWriteDataIN->ui32Size > PVRSRVTL_MAX_PACKET_SIZE))
+	{
+		psTLWriteDataOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto TLWriteData_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psTLWriteDataIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *) psTLWriteDataIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psTLWriteDataOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto TLWriteData_exit;
+			}
+		}
+	}
+
+	if (psTLWriteDataIN->ui32Size != 0)
+	{
+		psDataInt =
+		    (IMG_BYTE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset += psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE);
+	}
+
+	/* Copy the data over */
+	if (psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, psDataInt,
+		     (const void __user *)psTLWriteDataIN->psData,
+		     psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) != PVRSRV_OK)
+		{
+			psTLWriteDataOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto TLWriteData_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psTLWriteDataOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psSDInt,
+				       hSD,
+				       PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE);
+	if (unlikely(psTLWriteDataOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto TLWriteData_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psTLWriteDataOUT->eError =
+	    TLServerWriteDataKM(psSDInt, psTLWriteDataIN->ui32Size, psDataInt);
+
+ TLWriteData_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psSDInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_FALSE;
+
+PVRSRV_ERROR InitPVRTLBridge(void);
+PVRSRV_ERROR DeinitPVRTLBridge(void);
+
+/*
+ * Register all PVRTL functions with services
+ */
+PVRSRV_ERROR InitPVRTLBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL,
+			      PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM,
+			      PVRSRVBridgeTLOpenStream, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL,
+			      PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM,
+			      PVRSRVBridgeTLCloseStream, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL,
+			      PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA,
+			      PVRSRVBridgeTLAcquireData, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL,
+			      PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA,
+			      PVRSRVBridgeTLReleaseData, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL,
+			      PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS,
+			      PVRSRVBridgeTLDiscoverStreams, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL,
+			      PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM,
+			      PVRSRVBridgeTLReserveStream, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL,
+			      PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM,
+			      PVRSRVBridgeTLCommitStream, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL,
+			      PVRSRV_BRIDGE_PVRTL_TLWRITEDATA,
+			      PVRSRVBridgeTLWriteData, NULL, bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pvrtl functions with services
+ */
+PVRSRV_ERROR DeinitPVRTLBridge(void)
+{
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL,
+				PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL,
+				PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL,
+				PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL,
+				PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL,
+				PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL,
+				PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL,
+				PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL,
+				PVRSRV_BRIDGE_PVRTL_TLWRITEDATA);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxbreakpoint_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxbreakpoint_bridge.c
new file mode 100644
index 0000000..eb4c1a5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxbreakpoint_bridge.c
@@ -0,0 +1,374 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxbreakpoint
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxbreakpoint
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxbreakpoint.h"
+
+#include "common_rgxbreakpoint_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#if !defined(EXCLUDE_RGXBREAKPOINT_BRIDGE)
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXSetBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+			     PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT *
+			     psRGXSetBreakpointIN,
+			     PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT *
+			     psRGXSetBreakpointOUT,
+			     CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPrivData = psRGXSetBreakpointIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXSetBreakpointOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&hPrivDataInt,
+				       hPrivData,
+				       PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+				       IMG_TRUE);
+	if (unlikely(psRGXSetBreakpointOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXSetBreakpoint_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXSetBreakpointOUT->eError =
+	    PVRSRVRGXSetBreakpointKM(psConnection, OSGetDevData(psConnection),
+				     hPrivDataInt,
+				     psRGXSetBreakpointIN->eFWDataMaster,
+				     psRGXSetBreakpointIN->ui32BreakpointAddr,
+				     psRGXSetBreakpointIN->ui32HandlerAddr,
+				     psRGXSetBreakpointIN->ui32DM);
+
+ RGXSetBreakpoint_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (hPrivDataInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPrivData,
+					    PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXClearBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+			       PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT *
+			       psRGXClearBreakpointIN,
+			       PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT *
+			       psRGXClearBreakpointOUT,
+			       CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPrivData = psRGXClearBreakpointIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXClearBreakpointOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&hPrivDataInt,
+				       hPrivData,
+				       PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+				       IMG_TRUE);
+	if (unlikely(psRGXClearBreakpointOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXClearBreakpoint_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXClearBreakpointOUT->eError =
+	    PVRSRVRGXClearBreakpointKM(psConnection, OSGetDevData(psConnection),
+				       hPrivDataInt);
+
+ RGXClearBreakpoint_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (hPrivDataInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPrivData,
+					    PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXEnableBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+				PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT *
+				psRGXEnableBreakpointIN,
+				PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT *
+				psRGXEnableBreakpointOUT,
+				CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPrivData = psRGXEnableBreakpointIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXEnableBreakpointOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&hPrivDataInt,
+				       hPrivData,
+				       PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+				       IMG_TRUE);
+	if (unlikely(psRGXEnableBreakpointOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXEnableBreakpoint_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXEnableBreakpointOUT->eError =
+	    PVRSRVRGXEnableBreakpointKM(psConnection,
+					OSGetDevData(psConnection),
+					hPrivDataInt);
+
+ RGXEnableBreakpoint_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (hPrivDataInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPrivData,
+					    PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDisableBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+				 PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT *
+				 psRGXDisableBreakpointIN,
+				 PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT *
+				 psRGXDisableBreakpointOUT,
+				 CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPrivData = psRGXDisableBreakpointIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXDisableBreakpointOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&hPrivDataInt,
+				       hPrivData,
+				       PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+				       IMG_TRUE);
+	if (unlikely(psRGXDisableBreakpointOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXDisableBreakpoint_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXDisableBreakpointOUT->eError =
+	    PVRSRVRGXDisableBreakpointKM(psConnection,
+					 OSGetDevData(psConnection),
+					 hPrivDataInt);
+
+ RGXDisableBreakpoint_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (hPrivDataInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPrivData,
+					    PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXOverallocateBPRegisters(IMG_UINT32 ui32DispatchTableEntry,
+				       PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS
+				       * psRGXOverallocateBPRegistersIN,
+				       PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS
+				       * psRGXOverallocateBPRegistersOUT,
+				       CONNECTION_DATA * psConnection)
+{
+
+	psRGXOverallocateBPRegistersOUT->eError =
+	    PVRSRVRGXOverallocateBPRegistersKM(psConnection,
+					       OSGetDevData(psConnection),
+					       psRGXOverallocateBPRegistersIN->
+					       ui32TempRegs,
+					       psRGXOverallocateBPRegistersIN->
+					       ui32SharedRegs);
+
+	return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+#endif /* EXCLUDE_RGXBREAKPOINT_BRIDGE */
+
+#if !defined(EXCLUDE_RGXBREAKPOINT_BRIDGE)
+PVRSRV_ERROR InitRGXBREAKPOINTBridge(void);
+PVRSRV_ERROR DeinitRGXBREAKPOINTBridge(void);
+
+/*
+ * Register all RGXBREAKPOINT functions with services
+ */
+PVRSRV_ERROR InitRGXBREAKPOINTBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+			      PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT,
+			      PVRSRVBridgeRGXSetBreakpoint, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+			      PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT,
+			      PVRSRVBridgeRGXClearBreakpoint, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+			      PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT,
+			      PVRSRVBridgeRGXEnableBreakpoint, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+			      PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT,
+			      PVRSRVBridgeRGXDisableBreakpoint, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+			      PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS,
+			      PVRSRVBridgeRGXOverallocateBPRegisters, NULL,
+			      bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxbreakpoint functions with services
+ */
+PVRSRV_ERROR DeinitRGXBREAKPOINTBridge(void)
+{
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+				PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+				PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+				PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+				PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+				PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS);
+
+	return PVRSRV_OK;
+}
+#else /* EXCLUDE_RGXBREAKPOINT_BRIDGE */
+/* This bridge is conditional on EXCLUDE_RGXBREAKPOINT_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitRGXBREAKPOINTBridge() \
+	PVRSRV_OK
+
+#define DeinitRGXBREAKPOINTBridge() \
+	PVRSRV_OK
+
+#endif /* EXCLUDE_RGXBREAKPOINT_BRIDGE */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxcmp_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxcmp_bridge.c
new file mode 100644
index 0000000..36ae2e5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxcmp_bridge.c
@@ -0,0 +1,1784 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxcmp
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxcmp
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxcompute.h"
+
+#include "common_rgxcmp_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "rgx_bvnc_defs_km.h"
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry,
+				    PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *
+				    psRGXCreateComputeContextIN,
+				    PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *
+				    psRGXCreateComputeContextOUT,
+				    CONNECTION_DATA * psConnection)
+{
+	IMG_BYTE *psFrameworkCmdInt = NULL;
+	IMG_HANDLE hPrivData = psRGXCreateComputeContextIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+	IMG_BYTE *psStaticComputecontextStateInt = NULL;
+	RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psRGXCreateComputeContextIN->ui32FrameworkCmdize *
+	     sizeof(IMG_BYTE)) +
+	    (psRGXCreateComputeContextIN->ui32StaticComputecontextStateSize *
+	     sizeof(IMG_BYTE)) + 0;
+
+	if (unlikely
+	    (psRGXCreateComputeContextIN->ui32FrameworkCmdize >
+	     RGXFWIF_RF_CMD_SIZE))
+	{
+		psRGXCreateComputeContextOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXCreateComputeContext_exit;
+	}
+
+	if (unlikely
+	    (psRGXCreateComputeContextIN->ui32StaticComputecontextStateSize >
+	     RGXFWIF_STATIC_COMPUTECONTEXT_SIZE))
+	{
+		psRGXCreateComputeContextOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXCreateComputeContext_exit;
+	}
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+		    !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+							 RGX_FEATURE_COMPUTE_BIT_MASK))
+		{
+			psRGXCreateComputeContextOUT->eError =
+			    PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXCreateComputeContext_exit;
+		}
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRGXCreateComputeContextIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psRGXCreateComputeContextIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRGXCreateComputeContextOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXCreateComputeContext_exit;
+			}
+		}
+	}
+
+	if (psRGXCreateComputeContextIN->ui32FrameworkCmdize != 0)
+	{
+		psFrameworkCmdInt =
+		    (IMG_BYTE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXCreateComputeContextIN->ui32FrameworkCmdize *
+		    sizeof(IMG_BYTE);
+	}
+
+	/* Copy the data over */
+	if (psRGXCreateComputeContextIN->ui32FrameworkCmdize *
+	    sizeof(IMG_BYTE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, psFrameworkCmdInt,
+		     (const void __user *)psRGXCreateComputeContextIN->
+		     psFrameworkCmd,
+		     psRGXCreateComputeContextIN->ui32FrameworkCmdize *
+		     sizeof(IMG_BYTE)) != PVRSRV_OK)
+		{
+			psRGXCreateComputeContextOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXCreateComputeContext_exit;
+		}
+	}
+	if (psRGXCreateComputeContextIN->ui32StaticComputecontextStateSize != 0)
+	{
+		psStaticComputecontextStateInt =
+		    (IMG_BYTE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXCreateComputeContextIN->
+		    ui32StaticComputecontextStateSize * sizeof(IMG_BYTE);
+	}
+
+	/* Copy the data over */
+	if (psRGXCreateComputeContextIN->ui32StaticComputecontextStateSize *
+	    sizeof(IMG_BYTE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, psStaticComputecontextStateInt,
+		     (const void __user *)psRGXCreateComputeContextIN->
+		     psStaticComputecontextState,
+		     psRGXCreateComputeContextIN->
+		     ui32StaticComputecontextStateSize * sizeof(IMG_BYTE)) !=
+		    PVRSRV_OK)
+		{
+			psRGXCreateComputeContextOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXCreateComputeContext_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXCreateComputeContextOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&hPrivDataInt,
+				       hPrivData,
+				       PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+				       IMG_TRUE);
+	if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXCreateComputeContext_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXCreateComputeContextOUT->eError =
+	    PVRSRVRGXCreateComputeContextKM(psConnection,
+					    OSGetDevData(psConnection),
+					    psRGXCreateComputeContextIN->
+					    ui32Priority,
+					    psRGXCreateComputeContextIN->
+					    ui32FrameworkCmdize,
+					    psFrameworkCmdInt, hPrivDataInt,
+					    psRGXCreateComputeContextIN->
+					    sResumeSignalAddr,
+					    psRGXCreateComputeContextIN->
+					    ui32StaticComputecontextStateSize,
+					    psStaticComputecontextStateInt,
+					    psRGXCreateComputeContextIN->
+					    ui32PackedCCBSizeU88,
+					    &psComputeContextInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK))
+	{
+		goto RGXCreateComputeContext_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXCreateComputeContextOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psRGXCreateComputeContextOUT->
+				      hComputeContext,
+				      (void *)psComputeContextInt,
+				      PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      PVRSRVRGXDestroyComputeContextKM);
+	if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXCreateComputeContext_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXCreateComputeContext_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (hPrivDataInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPrivData,
+					    PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+	{
+		if (psComputeContextInt)
+		{
+			PVRSRVRGXDestroyComputeContextKM(psComputeContextInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyComputeContext(IMG_UINT32 ui32DispatchTableEntry,
+				     PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT *
+				     psRGXDestroyComputeContextIN,
+				     PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT
+				     * psRGXDestroyComputeContextOUT,
+				     CONNECTION_DATA * psConnection)
+{
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+		    !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+							 RGX_FEATURE_COMPUTE_BIT_MASK))
+		{
+			psRGXDestroyComputeContextOUT->eError =
+			    PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXDestroyComputeContext_exit;
+		}
+	}
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXDestroyComputeContextOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE)
+					psRGXDestroyComputeContextIN->
+					hComputeContext,
+					PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+	if (unlikely
+	    ((psRGXDestroyComputeContextOUT->eError != PVRSRV_OK)
+	     && (psRGXDestroyComputeContextOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeRGXDestroyComputeContext: %s",
+			 PVRSRVGetErrorString(psRGXDestroyComputeContextOUT->
+					      eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXDestroyComputeContext_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXDestroyComputeContext_exit:
+
+	return 0;
+}
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeRGXKickCDM(IMG_UINT32 ui32DispatchTableEntry,
+		       PVRSRV_BRIDGE_IN_RGXKICKCDM * psRGXKickCDMIN,
+		       PVRSRV_BRIDGE_OUT_RGXKICKCDM * psRGXKickCDMOUT,
+		       CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hComputeContext = psRGXKickCDMIN->hComputeContext;
+	RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+	SYNC_PRIMITIVE_BLOCK **psClientFenceUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClientFenceUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32ClientFenceOffsetInt = NULL;
+	IMG_UINT32 *ui32ClientFenceValueInt = NULL;
+	SYNC_PRIMITIVE_BLOCK **psClientUpdateUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32ClientUpdateOffsetInt = NULL;
+	IMG_UINT32 *ui32ClientUpdateValueInt = NULL;
+	IMG_UINT32 *ui32ServerSyncFlagsInt = NULL;
+	SERVER_SYNC_PRIMITIVE **psServerSyncsInt = NULL;
+	IMG_HANDLE *hServerSyncsInt2 = NULL;
+	IMG_CHAR *uiUpdateFenceNameInt = NULL;
+	IMG_BYTE *psDMCmdInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psRGXKickCDMIN->ui32ClientFenceCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) +
+	    (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickCDMIN->ui32ClientUpdateCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+	    (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickCDMIN->ui32ServerSyncCount *
+	     sizeof(SERVER_SYNC_PRIMITIVE *)) +
+	    (psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+	    (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+	    (psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE)) + 0;
+
+	if (unlikely
+	    (psRGXKickCDMIN->ui32ClientFenceCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickCDMOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickCDM_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickCDMIN->ui32ClientUpdateCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickCDMOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickCDM_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickCDMIN->ui32ServerSyncCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickCDMOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickCDM_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickCDMIN->ui32CmdSize >
+	     RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+	{
+		psRGXKickCDMOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickCDM_exit;
+	}
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+		    !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+							 RGX_FEATURE_COMPUTE_BIT_MASK))
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXKickCDM_exit;
+		}
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRGXKickCDMIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *) psRGXKickCDMIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRGXKickCDMOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXKickCDM_exit;
+			}
+		}
+	}
+
+	if (psRGXKickCDMIN->ui32ClientFenceCount != 0)
+	{
+		psClientFenceUFOSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDMIN->ui32ClientFenceCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClientFenceUFOSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hClientFenceUFOSyncPrimBlockInt2,
+		     (const void __user *)psRGXKickCDMIN->
+		     phClientFenceUFOSyncPrimBlock,
+		     psRGXKickCDMIN->ui32ClientFenceCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickCDM_exit;
+		}
+	}
+	if (psRGXKickCDMIN->ui32ClientFenceCount != 0)
+	{
+		ui32ClientFenceOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientFenceOffsetInt,
+		     (const void __user *)psRGXKickCDMIN->
+		     pui32ClientFenceOffset,
+		     psRGXKickCDMIN->ui32ClientFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickCDM_exit;
+		}
+	}
+	if (psRGXKickCDMIN->ui32ClientFenceCount != 0)
+	{
+		ui32ClientFenceValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientFenceValueInt,
+		     (const void __user *)psRGXKickCDMIN->pui32ClientFenceValue,
+		     psRGXKickCDMIN->ui32ClientFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickCDM_exit;
+		}
+	}
+	if (psRGXKickCDMIN->ui32ClientUpdateCount != 0)
+	{
+		psClientUpdateUFOSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDMIN->ui32ClientUpdateCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClientUpdateUFOSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hClientUpdateUFOSyncPrimBlockInt2,
+		     (const void __user *)psRGXKickCDMIN->
+		     phClientUpdateUFOSyncPrimBlock,
+		     psRGXKickCDMIN->ui32ClientUpdateCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickCDM_exit;
+		}
+	}
+	if (psRGXKickCDMIN->ui32ClientUpdateCount != 0)
+	{
+		ui32ClientUpdateOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientUpdateOffsetInt,
+		     (const void __user *)psRGXKickCDMIN->
+		     pui32ClientUpdateOffset,
+		     psRGXKickCDMIN->ui32ClientUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickCDM_exit;
+		}
+	}
+	if (psRGXKickCDMIN->ui32ClientUpdateCount != 0)
+	{
+		ui32ClientUpdateValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientUpdateValueInt,
+		     (const void __user *)psRGXKickCDMIN->
+		     pui32ClientUpdateValue,
+		     psRGXKickCDMIN->ui32ClientUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickCDM_exit;
+		}
+	}
+	if (psRGXKickCDMIN->ui32ServerSyncCount != 0)
+	{
+		ui32ServerSyncFlagsInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ServerSyncFlagsInt,
+		     (const void __user *)psRGXKickCDMIN->pui32ServerSyncFlags,
+		     psRGXKickCDMIN->ui32ServerSyncCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickCDM_exit;
+		}
+	}
+	if (psRGXKickCDMIN->ui32ServerSyncCount != 0)
+	{
+		psServerSyncsInt =
+		    (SERVER_SYNC_PRIMITIVE **) (((IMG_UINT8 *) pArrayArgsBuffer)
+						+ ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDMIN->ui32ServerSyncCount *
+		    sizeof(SERVER_SYNC_PRIMITIVE *);
+		hServerSyncsInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hServerSyncsInt2,
+		     (const void __user *)psRGXKickCDMIN->phServerSyncs,
+		     psRGXKickCDMIN->ui32ServerSyncCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickCDM_exit;
+		}
+	}
+
+	{
+		uiUpdateFenceNameInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiUpdateFenceNameInt,
+		     (const void __user *)psRGXKickCDMIN->puiUpdateFenceName,
+		     PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickCDM_exit;
+		}
+		((IMG_CHAR *)
+		 uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH *
+					sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+	if (psRGXKickCDMIN->ui32CmdSize != 0)
+	{
+		psDMCmdInt =
+		    (IMG_BYTE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, psDMCmdInt,
+		     (const void __user *)psRGXKickCDMIN->psDMCmd,
+		     psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE)) !=
+		    PVRSRV_OK)
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickCDM_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXKickCDMOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psComputeContextInt,
+				       hComputeContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely(psRGXKickCDMOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXKickCDM_exit;
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickCDMIN->ui32ClientFenceCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickCDMOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psClientFenceUFOSyncPrimBlockInt
+						       [i],
+						       hClientFenceUFOSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickCDMOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickCDM_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickCDMIN->ui32ClientUpdateCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickCDMOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psClientUpdateUFOSyncPrimBlockInt
+						       [i],
+						       hClientUpdateUFOSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickCDMOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickCDM_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickCDMIN->ui32ServerSyncCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickCDMOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psServerSyncsInt[i],
+						       hServerSyncsInt2[i],
+						       PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickCDMOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickCDM_exit;
+			}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXKickCDMOUT->eError =
+	    PVRSRVRGXKickCDMKM(psComputeContextInt,
+			       psRGXKickCDMIN->ui32ClientCacheOpSeqNum,
+			       psRGXKickCDMIN->ui32ClientFenceCount,
+			       psClientFenceUFOSyncPrimBlockInt,
+			       ui32ClientFenceOffsetInt,
+			       ui32ClientFenceValueInt,
+			       psRGXKickCDMIN->ui32ClientUpdateCount,
+			       psClientUpdateUFOSyncPrimBlockInt,
+			       ui32ClientUpdateOffsetInt,
+			       ui32ClientUpdateValueInt,
+			       psRGXKickCDMIN->ui32ServerSyncCount,
+			       ui32ServerSyncFlagsInt,
+			       psServerSyncsInt,
+			       psRGXKickCDMIN->hCheckFenceFd,
+			       psRGXKickCDMIN->hUpdateTimeline,
+			       &psRGXKickCDMOUT->hUpdateFence,
+			       uiUpdateFenceNameInt,
+			       psRGXKickCDMIN->ui32CmdSize,
+			       psDMCmdInt,
+			       psRGXKickCDMIN->ui32PDumpFlags,
+			       psRGXKickCDMIN->ui32ExtJobRef);
+
+ RGXKickCDM_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psComputeContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hComputeContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+	}
+
+	if (hClientFenceUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickCDMIN->ui32ClientFenceCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hClientFenceUFOSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hClientFenceUFOSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+
+	if (hClientUpdateUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickCDMIN->ui32ClientUpdateCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hClientUpdateUFOSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hClientUpdateUFOSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+
+	if (hServerSyncsInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickCDMIN->ui32ServerSyncCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hServerSyncsInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hServerSyncsInt2[i],
+							    PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+			}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeRGXKickCDM NULL
+#endif
+
+static IMG_INT
+PVRSRVBridgeRGXFlushComputeData(IMG_UINT32 ui32DispatchTableEntry,
+				PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA *
+				psRGXFlushComputeDataIN,
+				PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA *
+				psRGXFlushComputeDataOUT,
+				CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hComputeContext = psRGXFlushComputeDataIN->hComputeContext;
+	RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+		    !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+							 RGX_FEATURE_COMPUTE_BIT_MASK))
+		{
+			psRGXFlushComputeDataOUT->eError =
+			    PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXFlushComputeData_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXFlushComputeDataOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psComputeContextInt,
+				       hComputeContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely(psRGXFlushComputeDataOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXFlushComputeData_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXFlushComputeDataOUT->eError =
+	    PVRSRVRGXFlushComputeDataKM(psComputeContextInt);
+
+ RGXFlushComputeData_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psComputeContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hComputeContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXSetComputeContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+					 PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY
+					 * psRGXSetComputeContextPriorityIN,
+					 PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY
+					 * psRGXSetComputeContextPriorityOUT,
+					 CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hComputeContext =
+	    psRGXSetComputeContextPriorityIN->hComputeContext;
+	RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+		    !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+							 RGX_FEATURE_COMPUTE_BIT_MASK))
+		{
+			psRGXSetComputeContextPriorityOUT->eError =
+			    PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXSetComputeContextPriority_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXSetComputeContextPriorityOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psComputeContextInt,
+				       hComputeContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely(psRGXSetComputeContextPriorityOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXSetComputeContextPriority_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXSetComputeContextPriorityOUT->eError =
+	    PVRSRVRGXSetComputeContextPriorityKM(psConnection,
+						 OSGetDevData(psConnection),
+						 psComputeContextInt,
+						 psRGXSetComputeContextPriorityIN->
+						 ui32Priority);
+
+ RGXSetComputeContextPriority_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psComputeContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hComputeContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXGetLastComputeContextResetReason(IMG_UINT32
+						ui32DispatchTableEntry,
+						PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON
+						*
+						psRGXGetLastComputeContextResetReasonIN,
+						PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON
+						*
+						psRGXGetLastComputeContextResetReasonOUT,
+						CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hComputeContext =
+	    psRGXGetLastComputeContextResetReasonIN->hComputeContext;
+	RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+		    !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+							 RGX_FEATURE_COMPUTE_BIT_MASK))
+		{
+			psRGXGetLastComputeContextResetReasonOUT->eError =
+			    PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXGetLastComputeContextResetReason_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXGetLastComputeContextResetReasonOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psComputeContextInt,
+				       hComputeContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely
+	    (psRGXGetLastComputeContextResetReasonOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXGetLastComputeContextResetReason_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXGetLastComputeContextResetReasonOUT->eError =
+	    PVRSRVRGXGetLastComputeContextResetReasonKM(psComputeContextInt,
+							&psRGXGetLastComputeContextResetReasonOUT->
+							ui32LastResetReason,
+							&psRGXGetLastComputeContextResetReasonOUT->
+							ui32LastResetJobRef);
+
+ RGXGetLastComputeContextResetReason_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psComputeContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hComputeContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry,
+					      PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE
+					      *
+					      psRGXNotifyComputeWriteOffsetUpdateIN,
+					      PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE
+					      *
+					      psRGXNotifyComputeWriteOffsetUpdateOUT,
+					      CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hComputeContext =
+	    psRGXNotifyComputeWriteOffsetUpdateIN->hComputeContext;
+	RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+		    !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+							 RGX_FEATURE_COMPUTE_BIT_MASK))
+		{
+			psRGXNotifyComputeWriteOffsetUpdateOUT->eError =
+			    PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXNotifyComputeWriteOffsetUpdate_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXNotifyComputeWriteOffsetUpdateOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psComputeContextInt,
+				       hComputeContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely
+	    (psRGXNotifyComputeWriteOffsetUpdateOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXNotifyComputeWriteOffsetUpdate_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXNotifyComputeWriteOffsetUpdateOUT->eError =
+	    PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(psComputeContextInt);
+
+ RGXNotifyComputeWriteOffsetUpdate_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psComputeContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hComputeContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+#if !defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry,
+			PVRSRV_BRIDGE_IN_RGXKICKCDM2 * psRGXKickCDM2IN,
+			PVRSRV_BRIDGE_OUT_RGXKICKCDM2 * psRGXKickCDM2OUT,
+			CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hComputeContext = psRGXKickCDM2IN->hComputeContext;
+	RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+	SYNC_PRIMITIVE_BLOCK **psClientFenceUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClientFenceUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32ClientFenceOffsetInt = NULL;
+	IMG_UINT32 *ui32ClientFenceValueInt = NULL;
+	SYNC_PRIMITIVE_BLOCK **psClientUpdateUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32ClientUpdateOffsetInt = NULL;
+	IMG_UINT32 *ui32ClientUpdateValueInt = NULL;
+	IMG_CHAR *uiUpdateFenceNameInt = NULL;
+	IMG_BYTE *psDMCmdInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psRGXKickCDM2IN->ui32ClientFenceCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXKickCDM2IN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) +
+	    (psRGXKickCDM2IN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickCDM2IN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickCDM2IN->ui32ClientUpdateCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+	    (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+	    (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+	    (psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) + 0;
+
+	if (unlikely
+	    (psRGXKickCDM2IN->ui32ClientFenceCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickCDM2OUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickCDM2_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickCDM2IN->ui32ClientUpdateCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickCDM2OUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickCDM2_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickCDM2IN->ui32CmdSize >
+	     RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+	{
+		psRGXKickCDM2OUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickCDM2_exit;
+	}
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+		    !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+							 RGX_FEATURE_COMPUTE_BIT_MASK))
+		{
+			psRGXKickCDM2OUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXKickCDM2_exit;
+		}
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRGXKickCDM2IN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *) psRGXKickCDM2IN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRGXKickCDM2OUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXKickCDM2_exit;
+			}
+		}
+	}
+
+	if (psRGXKickCDM2IN->ui32ClientFenceCount != 0)
+	{
+		psClientFenceUFOSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDM2IN->ui32ClientFenceCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClientFenceUFOSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDM2IN->ui32ClientFenceCount * sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickCDM2IN->ui32ClientFenceCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hClientFenceUFOSyncPrimBlockInt2,
+		     (const void __user *)psRGXKickCDM2IN->
+		     phClientFenceUFOSyncPrimBlock,
+		     psRGXKickCDM2IN->ui32ClientFenceCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickCDM2_exit;
+		}
+	}
+	if (psRGXKickCDM2IN->ui32ClientFenceCount != 0)
+	{
+		ui32ClientFenceOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDM2IN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickCDM2IN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientFenceOffsetInt,
+		     (const void __user *)psRGXKickCDM2IN->
+		     pui32ClientFenceOffset,
+		     psRGXKickCDM2IN->ui32ClientFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickCDM2_exit;
+		}
+	}
+	if (psRGXKickCDM2IN->ui32ClientFenceCount != 0)
+	{
+		ui32ClientFenceValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDM2IN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickCDM2IN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientFenceValueInt,
+		     (const void __user *)psRGXKickCDM2IN->
+		     pui32ClientFenceValue,
+		     psRGXKickCDM2IN->ui32ClientFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickCDM2_exit;
+		}
+	}
+	if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0)
+	{
+		psClientUpdateUFOSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDM2IN->ui32ClientUpdateCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClientUpdateUFOSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hClientUpdateUFOSyncPrimBlockInt2,
+		     (const void __user *)psRGXKickCDM2IN->
+		     phClientUpdateUFOSyncPrimBlock,
+		     psRGXKickCDM2IN->ui32ClientUpdateCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickCDM2_exit;
+		}
+	}
+	if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0)
+	{
+		ui32ClientUpdateOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientUpdateOffsetInt,
+		     (const void __user *)psRGXKickCDM2IN->
+		     pui32ClientUpdateOffset,
+		     psRGXKickCDM2IN->ui32ClientUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickCDM2_exit;
+		}
+	}
+	if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0)
+	{
+		ui32ClientUpdateValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientUpdateValueInt,
+		     (const void __user *)psRGXKickCDM2IN->
+		     pui32ClientUpdateValue,
+		     psRGXKickCDM2IN->ui32ClientUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickCDM2_exit;
+		}
+	}
+
+	{
+		uiUpdateFenceNameInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiUpdateFenceNameInt,
+		     (const void __user *)psRGXKickCDM2IN->puiUpdateFenceName,
+		     PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickCDM2_exit;
+		}
+		((IMG_CHAR *)
+		 uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH *
+					sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+	if (psRGXKickCDM2IN->ui32CmdSize != 0)
+	{
+		psDMCmdInt =
+		    (IMG_BYTE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, psDMCmdInt,
+		     (const void __user *)psRGXKickCDM2IN->psDMCmd,
+		     psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) !=
+		    PVRSRV_OK)
+		{
+			psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickCDM2_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXKickCDM2OUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psComputeContextInt,
+				       hComputeContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXKickCDM2_exit;
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickCDM2IN->ui32ClientFenceCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickCDM2OUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psClientFenceUFOSyncPrimBlockInt
+						       [i],
+						       hClientFenceUFOSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickCDM2_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickCDM2OUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psClientUpdateUFOSyncPrimBlockInt
+						       [i],
+						       hClientUpdateUFOSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickCDM2_exit;
+			}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXKickCDM2OUT->eError =
+	    PVRSRVRGXKickCDM2KM(psComputeContextInt,
+				psRGXKickCDM2IN->ui32ClientCacheOpSeqNum,
+				psRGXKickCDM2IN->ui32ClientFenceCount,
+				psClientFenceUFOSyncPrimBlockInt,
+				ui32ClientFenceOffsetInt,
+				ui32ClientFenceValueInt,
+				psRGXKickCDM2IN->ui32ClientUpdateCount,
+				psClientUpdateUFOSyncPrimBlockInt,
+				ui32ClientUpdateOffsetInt,
+				ui32ClientUpdateValueInt,
+				psRGXKickCDM2IN->hCheckFenceFd,
+				psRGXKickCDM2IN->hUpdateTimeline,
+				&psRGXKickCDM2OUT->hUpdateFence,
+				uiUpdateFenceNameInt,
+				psRGXKickCDM2IN->ui32CmdSize,
+				psDMCmdInt,
+				psRGXKickCDM2IN->ui32PDumpFlags,
+				psRGXKickCDM2IN->ui32ExtJobRef);
+
+ RGXKickCDM2_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psComputeContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hComputeContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+	}
+
+	if (hClientFenceUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickCDM2IN->ui32ClientFenceCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hClientFenceUFOSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hClientFenceUFOSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+
+	if (hClientUpdateUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hClientUpdateUFOSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hClientUpdateUFOSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeRGXKickCDM2 NULL
+#endif
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXCMPBridge(void);
+PVRSRV_ERROR DeinitRGXCMPBridge(void);
+
+/*
+ * Register all RGXCMP functions with services
+ */
+PVRSRV_ERROR InitRGXCMPBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+			      PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT,
+			      PVRSRVBridgeRGXCreateComputeContext, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+			      PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT,
+			      PVRSRVBridgeRGXDestroyComputeContext, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+			      PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM,
+			      PVRSRVBridgeRGXKickCDM, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+			      PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA,
+			      PVRSRVBridgeRGXFlushComputeData, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+			      PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY,
+			      PVRSRVBridgeRGXSetComputeContextPriority, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+			      PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON,
+			      PVRSRVBridgeRGXGetLastComputeContextResetReason,
+			      NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+			      PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE,
+			      PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate,
+			      NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+			      PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2,
+			      PVRSRVBridgeRGXKickCDM2, NULL, bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxcmp functions with services
+ */
+PVRSRV_ERROR DeinitRGXCMPBridge(void)
+{
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+				PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+				PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+				PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+				PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+				PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+				PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+				PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+				PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxfwdbg_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxfwdbg_bridge.c
new file mode 100644
index 0000000..4a61573
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxfwdbg_bridge.c
@@ -0,0 +1,280 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxfwdbg
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxfwdbg
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "rgxfwdbg.h"
+#include "pmr.h"
+#include "rgxtimecorr.h"
+
+#include "common_rgxfwdbg_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugSLCSetBypassState(IMG_UINT32 ui32DispatchTableEntry,
+					PVRSRV_BRIDGE_IN_RGXFWDEBUGSLCSETBYPASSSTATE
+					* psRGXFWDebugSLCSetBypassStateIN,
+					PVRSRV_BRIDGE_OUT_RGXFWDEBUGSLCSETBYPASSSTATE
+					* psRGXFWDebugSLCSetBypassStateOUT,
+					CONNECTION_DATA * psConnection)
+{
+
+	psRGXFWDebugSLCSetBypassStateOUT->eError =
+	    PVRSRVRGXFWDebugSLCSetBypassStateKM(psConnection,
+						OSGetDevData(psConnection),
+						psRGXFWDebugSLCSetBypassStateIN->
+						ui32Flags,
+						psRGXFWDebugSLCSetBypassStateIN->
+						bIsBypassed);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugSetFWLog(IMG_UINT32 ui32DispatchTableEntry,
+			       PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG *
+			       psRGXFWDebugSetFWLogIN,
+			       PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG *
+			       psRGXFWDebugSetFWLogOUT,
+			       CONNECTION_DATA * psConnection)
+{
+
+	psRGXFWDebugSetFWLogOUT->eError =
+	    PVRSRVRGXFWDebugSetFWLogKM(psConnection, OSGetDevData(psConnection),
+				       psRGXFWDebugSetFWLogIN->
+				       ui32RGXFWLogType);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugDumpFreelistPageList(IMG_UINT32 ui32DispatchTableEntry,
+					   PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST
+					   * psRGXFWDebugDumpFreelistPageListIN,
+					   PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST
+					   *
+					   psRGXFWDebugDumpFreelistPageListOUT,
+					   CONNECTION_DATA * psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psRGXFWDebugDumpFreelistPageListIN);
+
+	psRGXFWDebugDumpFreelistPageListOUT->eError =
+	    PVRSRVRGXFWDebugDumpFreelistPageListKM(psConnection,
+						   OSGetDevData(psConnection));
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugSetHCSDeadline(IMG_UINT32 ui32DispatchTableEntry,
+				     PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE *
+				     psRGXFWDebugSetHCSDeadlineIN,
+				     PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE
+				     * psRGXFWDebugSetHCSDeadlineOUT,
+				     CONNECTION_DATA * psConnection)
+{
+
+	psRGXFWDebugSetHCSDeadlineOUT->eError =
+	    PVRSRVRGXFWDebugSetHCSDeadlineKM(psConnection,
+					     OSGetDevData(psConnection),
+					     psRGXFWDebugSetHCSDeadlineIN->
+					     ui32RGXHCSDeadline);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugSetOSidPriority(IMG_UINT32 ui32DispatchTableEntry,
+				      PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY
+				      * psRGXFWDebugSetOSidPriorityIN,
+				      PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY
+				      * psRGXFWDebugSetOSidPriorityOUT,
+				      CONNECTION_DATA * psConnection)
+{
+
+	psRGXFWDebugSetOSidPriorityOUT->eError =
+	    PVRSRVRGXFWDebugSetOSidPriorityKM(psConnection,
+					      OSGetDevData(psConnection),
+					      psRGXFWDebugSetOSidPriorityIN->
+					      ui32OSid,
+					      psRGXFWDebugSetOSidPriorityIN->
+					      ui32Priority);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugSetOSNewOnlineState(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE
+					  * psRGXFWDebugSetOSNewOnlineStateIN,
+					  PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE
+					  * psRGXFWDebugSetOSNewOnlineStateOUT,
+					  CONNECTION_DATA * psConnection)
+{
+
+	psRGXFWDebugSetOSNewOnlineStateOUT->eError =
+	    PVRSRVRGXFWDebugSetOSNewOnlineStateKM(psConnection,
+						  OSGetDevData(psConnection),
+						  psRGXFWDebugSetOSNewOnlineStateIN->
+						  ui32OSid,
+						  psRGXFWDebugSetOSNewOnlineStateIN->
+						  ui32OSNewState);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCurrentTime(IMG_UINT32 ui32DispatchTableEntry,
+			   PVRSRV_BRIDGE_IN_RGXCURRENTTIME * psRGXCurrentTimeIN,
+			   PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *
+			   psRGXCurrentTimeOUT, CONNECTION_DATA * psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psRGXCurrentTimeIN);
+
+	psRGXCurrentTimeOUT->eError =
+	    PVRSRVRGXCurrentTime(psConnection, OSGetDevData(psConnection),
+				 &psRGXCurrentTimeOUT->ui64Time);
+
+	return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXFWDBGBridge(void);
+PVRSRV_ERROR DeinitRGXFWDBGBridge(void);
+
+/*
+ * Register all RGXFWDBG functions with services
+ */
+PVRSRV_ERROR InitRGXFWDBGBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+			      PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSLCSETBYPASSSTATE,
+			      PVRSRVBridgeRGXFWDebugSLCSetBypassState, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+			      PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG,
+			      PVRSRVBridgeRGXFWDebugSetFWLog, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+			      PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST,
+			      PVRSRVBridgeRGXFWDebugDumpFreelistPageList, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+			      PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE,
+			      PVRSRVBridgeRGXFWDebugSetHCSDeadline, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+			      PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY,
+			      PVRSRVBridgeRGXFWDebugSetOSidPriority, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+			      PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE,
+			      PVRSRVBridgeRGXFWDebugSetOSNewOnlineState, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+			      PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME,
+			      PVRSRVBridgeRGXCurrentTime, NULL, bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxfwdbg functions with services
+ */
+PVRSRV_ERROR DeinitRGXFWDBGBridge(void)
+{
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+				PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSLCSETBYPASSSTATE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+				PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+				PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+				PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+				PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+				PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+				PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxhwperf_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxhwperf_bridge.c
new file mode 100644
index 0000000..330d57c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxhwperf_bridge.c
@@ -0,0 +1,507 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxhwperf
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxhwperf
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxhwperf.h"
+
+#include "common_rgxhwperf_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXCtrlHWPerf(IMG_UINT32 ui32DispatchTableEntry,
+			  PVRSRV_BRIDGE_IN_RGXCTRLHWPERF * psRGXCtrlHWPerfIN,
+			  PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF * psRGXCtrlHWPerfOUT,
+			  CONNECTION_DATA * psConnection)
+{
+
+	psRGXCtrlHWPerfOUT->eError =
+	    PVRSRVRGXCtrlHWPerfKM(psConnection, OSGetDevData(psConnection),
+				  psRGXCtrlHWPerfIN->ui32StreamId,
+				  psRGXCtrlHWPerfIN->bToggle,
+				  psRGXCtrlHWPerfIN->ui64Mask);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXConfigEnableHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCONFIGENABLEHWPERFCOUNTERS
+					  * psRGXConfigEnableHWPerfCountersIN,
+					  PVRSRV_BRIDGE_OUT_RGXCONFIGENABLEHWPERFCOUNTERS
+					  * psRGXConfigEnableHWPerfCountersOUT,
+					  CONNECTION_DATA * psConnection)
+{
+	RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigsInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen *
+	     sizeof(RGX_HWPERF_CONFIG_CNTBLK)) + 0;
+
+	if (unlikely
+	    (psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen >
+	     RGX_HWPERF_MAX_DEFINED_BLKS))
+	{
+		psRGXConfigEnableHWPerfCountersOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXConfigEnableHWPerfCounters_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRGXConfigEnableHWPerfCountersIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psRGXConfigEnableHWPerfCountersIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRGXConfigEnableHWPerfCountersOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXConfigEnableHWPerfCounters_exit;
+			}
+		}
+	}
+
+	if (psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen != 0)
+	{
+		psBlockConfigsInt =
+		    (RGX_HWPERF_CONFIG_CNTBLK
+		     *) (((IMG_UINT8 *) pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen *
+		    sizeof(RGX_HWPERF_CONFIG_CNTBLK);
+	}
+
+	/* Copy the data over */
+	if (psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen *
+	    sizeof(RGX_HWPERF_CONFIG_CNTBLK) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, psBlockConfigsInt,
+		     (const void __user *)psRGXConfigEnableHWPerfCountersIN->
+		     psBlockConfigs,
+		     psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen *
+		     sizeof(RGX_HWPERF_CONFIG_CNTBLK)) != PVRSRV_OK)
+		{
+			psRGXConfigEnableHWPerfCountersOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXConfigEnableHWPerfCounters_exit;
+		}
+	}
+
+	psRGXConfigEnableHWPerfCountersOUT->eError =
+	    PVRSRVRGXConfigEnableHWPerfCountersKM(psConnection,
+						  OSGetDevData(psConnection),
+						  psRGXConfigEnableHWPerfCountersIN->
+						  ui32ArrayLen,
+						  psBlockConfigsInt);
+
+ RGXConfigEnableHWPerfCounters_exit:
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCtrlHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry,
+				  PVRSRV_BRIDGE_IN_RGXCTRLHWPERFCOUNTERS *
+				  psRGXCtrlHWPerfCountersIN,
+				  PVRSRV_BRIDGE_OUT_RGXCTRLHWPERFCOUNTERS *
+				  psRGXCtrlHWPerfCountersOUT,
+				  CONNECTION_DATA * psConnection)
+{
+	IMG_UINT16 *ui16BlockIDsInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psRGXCtrlHWPerfCountersIN->ui32ArrayLen * sizeof(IMG_UINT16)) + 0;
+
+	if (unlikely
+	    (psRGXCtrlHWPerfCountersIN->ui32ArrayLen >
+	     RGX_HWPERF_MAX_DEFINED_BLKS))
+	{
+		psRGXCtrlHWPerfCountersOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXCtrlHWPerfCounters_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRGXCtrlHWPerfCountersIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psRGXCtrlHWPerfCountersIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRGXCtrlHWPerfCountersOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXCtrlHWPerfCounters_exit;
+			}
+		}
+	}
+
+	if (psRGXCtrlHWPerfCountersIN->ui32ArrayLen != 0)
+	{
+		ui16BlockIDsInt =
+		    (IMG_UINT16 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXCtrlHWPerfCountersIN->ui32ArrayLen *
+		    sizeof(IMG_UINT16);
+	}
+
+	/* Copy the data over */
+	if (psRGXCtrlHWPerfCountersIN->ui32ArrayLen * sizeof(IMG_UINT16) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui16BlockIDsInt,
+		     (const void __user *)psRGXCtrlHWPerfCountersIN->
+		     pui16BlockIDs,
+		     psRGXCtrlHWPerfCountersIN->ui32ArrayLen *
+		     sizeof(IMG_UINT16)) != PVRSRV_OK)
+		{
+			psRGXCtrlHWPerfCountersOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXCtrlHWPerfCounters_exit;
+		}
+	}
+
+	psRGXCtrlHWPerfCountersOUT->eError =
+	    PVRSRVRGXCtrlHWPerfCountersKM(psConnection,
+					  OSGetDevData(psConnection),
+					  psRGXCtrlHWPerfCountersIN->bEnable,
+					  psRGXCtrlHWPerfCountersIN->
+					  ui32ArrayLen, ui16BlockIDsInt);
+
+ RGXCtrlHWPerfCounters_exit:
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXConfigCustomCounters(IMG_UINT32 ui32DispatchTableEntry,
+				    PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS *
+				    psRGXConfigCustomCountersIN,
+				    PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS *
+				    psRGXConfigCustomCountersOUT,
+				    CONNECTION_DATA * psConnection)
+{
+	IMG_UINT32 *ui32CustomCounterIDsInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psRGXConfigCustomCountersIN->ui16NumCustomCounters *
+	     sizeof(IMG_UINT32)) + 0;
+
+	if (unlikely
+	    (psRGXConfigCustomCountersIN->ui16NumCustomCounters >
+	     RGX_HWPERF_MAX_CUSTOM_CNTRS))
+	{
+		psRGXConfigCustomCountersOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXConfigCustomCounters_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRGXConfigCustomCountersIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psRGXConfigCustomCountersIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRGXConfigCustomCountersOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXConfigCustomCounters_exit;
+			}
+		}
+	}
+
+	if (psRGXConfigCustomCountersIN->ui16NumCustomCounters != 0)
+	{
+		ui32CustomCounterIDsInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXConfigCustomCountersIN->ui16NumCustomCounters *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXConfigCustomCountersIN->ui16NumCustomCounters *
+	    sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32CustomCounterIDsInt,
+		     (const void __user *)psRGXConfigCustomCountersIN->
+		     pui32CustomCounterIDs,
+		     psRGXConfigCustomCountersIN->ui16NumCustomCounters *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXConfigCustomCountersOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXConfigCustomCounters_exit;
+		}
+	}
+
+	psRGXConfigCustomCountersOUT->eError =
+	    PVRSRVRGXConfigCustomCountersKM(psConnection,
+					    OSGetDevData(psConnection),
+					    psRGXConfigCustomCountersIN->
+					    ui16CustomBlockID,
+					    psRGXConfigCustomCountersIN->
+					    ui16NumCustomCounters,
+					    ui32CustomCounterIDsInt);
+
+ RGXConfigCustomCounters_exit:
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags(IMG_UINT32 ui32DispatchTableEntry,
+					 PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS
+					 * psRGXGetHWPerfBvncFeatureFlagsIN,
+					 PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS
+					 * psRGXGetHWPerfBvncFeatureFlagsOUT,
+					 CONNECTION_DATA * psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psRGXGetHWPerfBvncFeatureFlagsIN);
+
+	psRGXGetHWPerfBvncFeatureFlagsOUT->eError =
+	    PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(psConnection,
+						 OSGetDevData(psConnection),
+						 &psRGXGetHWPerfBvncFeatureFlagsOUT->
+						 sBVNC);
+
+	return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXHWPERFBridge(void);
+PVRSRV_ERROR DeinitRGXHWPERFBridge(void);
+
+/*
+ * Register all RGXHWPERF functions with services
+ */
+PVRSRV_ERROR InitRGXHWPERFBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+			      PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF,
+			      PVRSRVBridgeRGXCtrlHWPerf, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+			      PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS,
+			      PVRSRVBridgeRGXConfigEnableHWPerfCounters, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+			      PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS,
+			      PVRSRVBridgeRGXCtrlHWPerfCounters, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+			      PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS,
+			      PVRSRVBridgeRGXConfigCustomCounters, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+			      PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS,
+			      PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags, NULL,
+			      bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxhwperf functions with services
+ */
+PVRSRV_ERROR DeinitRGXHWPERFBridge(void)
+{
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+				PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+				PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+				PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+				PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+				PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxkicksync_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxkicksync_bridge.c
new file mode 100644
index 0000000..f8eafc7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxkicksync_bridge.c
@@ -0,0 +1,1199 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxkicksync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxkicksync
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxkicksync.h"
+
+#include "common_rgxkicksync_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXCreateKickSyncContext(IMG_UINT32 ui32DispatchTableEntry,
+				     PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT *
+				     psRGXCreateKickSyncContextIN,
+				     PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT
+				     * psRGXCreateKickSyncContextOUT,
+				     CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPrivData = psRGXCreateKickSyncContextIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+	RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXCreateKickSyncContextOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&hPrivDataInt,
+				       hPrivData,
+				       PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+				       IMG_TRUE);
+	if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXCreateKickSyncContext_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXCreateKickSyncContextOUT->eError =
+	    PVRSRVRGXCreateKickSyncContextKM(psConnection,
+					     OSGetDevData(psConnection),
+					     hPrivDataInt,
+					     psRGXCreateKickSyncContextIN->
+					     ui32PackedCCBSizeU88,
+					     &psKickSyncContextInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK))
+	{
+		goto RGXCreateKickSyncContext_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXCreateKickSyncContextOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psRGXCreateKickSyncContextOUT->
+				      hKickSyncContext,
+				      (void *)psKickSyncContextInt,
+				      PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      PVRSRVRGXDestroyKickSyncContextKM);
+	if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXCreateKickSyncContext_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXCreateKickSyncContext_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (hPrivDataInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPrivData,
+					    PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)
+	{
+		if (psKickSyncContextInt)
+		{
+			PVRSRVRGXDestroyKickSyncContextKM(psKickSyncContextInt);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyKickSyncContext(IMG_UINT32 ui32DispatchTableEntry,
+				      PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT
+				      * psRGXDestroyKickSyncContextIN,
+				      PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT
+				      * psRGXDestroyKickSyncContextOUT,
+				      CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXDestroyKickSyncContextOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE)
+					psRGXDestroyKickSyncContextIN->
+					hKickSyncContext,
+					PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT);
+	if (unlikely
+	    ((psRGXDestroyKickSyncContextOUT->eError != PVRSRV_OK)
+	     && (psRGXDestroyKickSyncContextOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeRGXDestroyKickSyncContext: %s",
+			 PVRSRVGetErrorString(psRGXDestroyKickSyncContextOUT->
+					      eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXDestroyKickSyncContext_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXDestroyKickSyncContext_exit:
+
+	return 0;
+}
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeRGXKickSync(IMG_UINT32 ui32DispatchTableEntry,
+			PVRSRV_BRIDGE_IN_RGXKICKSYNC * psRGXKickSyncIN,
+			PVRSRV_BRIDGE_OUT_RGXKICKSYNC * psRGXKickSyncOUT,
+			CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hKickSyncContext = psRGXKickSyncIN->hKickSyncContext;
+	RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL;
+	SYNC_PRIMITIVE_BLOCK **psFenceUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hFenceUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32FenceSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32FenceValueInt = NULL;
+	SYNC_PRIMITIVE_BLOCK **psUpdateUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hUpdateUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32UpdateSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32UpdateValueInt = NULL;
+	IMG_UINT32 *ui32ServerSyncFlagsInt = NULL;
+	SERVER_SYNC_PRIMITIVE **psServerSyncInt = NULL;
+	IMG_HANDLE *hServerSyncInt2 = NULL;
+	IMG_CHAR *uiUpdateFenceNameInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psRGXKickSyncIN->ui32ClientFenceCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) +
+	    (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickSyncIN->ui32ClientUpdateCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+	    (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickSyncIN->ui32ServerSyncCount *
+	     sizeof(SERVER_SYNC_PRIMITIVE *)) +
+	    (psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+	    (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + 0;
+
+	if (unlikely
+	    (psRGXKickSyncIN->ui32ClientFenceCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickSyncOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickSync_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickSyncIN->ui32ClientUpdateCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickSyncOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickSync_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickSyncIN->ui32ServerSyncCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickSyncOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickSync_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRGXKickSyncIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *) psRGXKickSyncIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRGXKickSyncOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXKickSync_exit;
+			}
+		}
+	}
+
+	if (psRGXKickSyncIN->ui32ClientFenceCount != 0)
+	{
+		psFenceUFOSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickSyncIN->ui32ClientFenceCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hFenceUFOSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hFenceUFOSyncPrimBlockInt2,
+		     (const void __user *)psRGXKickSyncIN->
+		     phFenceUFOSyncPrimBlock,
+		     psRGXKickSyncIN->ui32ClientFenceCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickSync_exit;
+		}
+	}
+	if (psRGXKickSyncIN->ui32ClientFenceCount != 0)
+	{
+		ui32FenceSyncOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32FenceSyncOffsetInt,
+		     (const void __user *)psRGXKickSyncIN->pui32FenceSyncOffset,
+		     psRGXKickSyncIN->ui32ClientFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickSync_exit;
+		}
+	}
+	if (psRGXKickSyncIN->ui32ClientFenceCount != 0)
+	{
+		ui32FenceValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32FenceValueInt,
+		     (const void __user *)psRGXKickSyncIN->pui32FenceValue,
+		     psRGXKickSyncIN->ui32ClientFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickSync_exit;
+		}
+	}
+	if (psRGXKickSyncIN->ui32ClientUpdateCount != 0)
+	{
+		psUpdateUFOSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickSyncIN->ui32ClientUpdateCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hUpdateUFOSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hUpdateUFOSyncPrimBlockInt2,
+		     (const void __user *)psRGXKickSyncIN->
+		     phUpdateUFOSyncPrimBlock,
+		     psRGXKickSyncIN->ui32ClientUpdateCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickSync_exit;
+		}
+	}
+	if (psRGXKickSyncIN->ui32ClientUpdateCount != 0)
+	{
+		ui32UpdateSyncOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32UpdateSyncOffsetInt,
+		     (const void __user *)psRGXKickSyncIN->
+		     pui32UpdateSyncOffset,
+		     psRGXKickSyncIN->ui32ClientUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickSync_exit;
+		}
+	}
+	if (psRGXKickSyncIN->ui32ClientUpdateCount != 0)
+	{
+		ui32UpdateValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32UpdateValueInt,
+		     (const void __user *)psRGXKickSyncIN->pui32UpdateValue,
+		     psRGXKickSyncIN->ui32ClientUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickSync_exit;
+		}
+	}
+	if (psRGXKickSyncIN->ui32ServerSyncCount != 0)
+	{
+		ui32ServerSyncFlagsInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ServerSyncFlagsInt,
+		     (const void __user *)psRGXKickSyncIN->pui32ServerSyncFlags,
+		     psRGXKickSyncIN->ui32ServerSyncCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickSync_exit;
+		}
+	}
+	if (psRGXKickSyncIN->ui32ServerSyncCount != 0)
+	{
+		psServerSyncInt =
+		    (SERVER_SYNC_PRIMITIVE **) (((IMG_UINT8 *) pArrayArgsBuffer)
+						+ ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickSyncIN->ui32ServerSyncCount *
+		    sizeof(SERVER_SYNC_PRIMITIVE *);
+		hServerSyncInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hServerSyncInt2,
+		     (const void __user *)psRGXKickSyncIN->phServerSync,
+		     psRGXKickSyncIN->ui32ServerSyncCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickSync_exit;
+		}
+	}
+
+	{
+		uiUpdateFenceNameInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiUpdateFenceNameInt,
+		     (const void __user *)psRGXKickSyncIN->puiUpdateFenceName,
+		     PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickSync_exit;
+		}
+		((IMG_CHAR *)
+		 uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH *
+					sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXKickSyncOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psKickSyncContextInt,
+				       hKickSyncContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely(psRGXKickSyncOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXKickSync_exit;
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickSyncIN->ui32ClientFenceCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickSyncOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psFenceUFOSyncPrimBlockInt
+						       [i],
+						       hFenceUFOSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickSyncOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickSync_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickSyncIN->ui32ClientUpdateCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickSyncOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psUpdateUFOSyncPrimBlockInt
+						       [i],
+						       hUpdateUFOSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickSyncOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickSync_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickSyncIN->ui32ServerSyncCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickSyncOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psServerSyncInt[i],
+						       hServerSyncInt2[i],
+						       PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickSyncOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickSync_exit;
+			}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXKickSyncOUT->eError =
+	    PVRSRVRGXKickSyncKM(psKickSyncContextInt,
+				psRGXKickSyncIN->ui32ClientCacheOpSeqNum,
+				psRGXKickSyncIN->ui32ClientFenceCount,
+				psFenceUFOSyncPrimBlockInt,
+				ui32FenceSyncOffsetInt,
+				ui32FenceValueInt,
+				psRGXKickSyncIN->ui32ClientUpdateCount,
+				psUpdateUFOSyncPrimBlockInt,
+				ui32UpdateSyncOffsetInt,
+				ui32UpdateValueInt,
+				psRGXKickSyncIN->ui32ServerSyncCount,
+				ui32ServerSyncFlagsInt,
+				psServerSyncInt,
+				psRGXKickSyncIN->hCheckFenceFD,
+				psRGXKickSyncIN->hTimelineFenceFD,
+				&psRGXKickSyncOUT->hUpdateFenceFD,
+				uiUpdateFenceNameInt,
+				psRGXKickSyncIN->ui32ExtJobRef);
+
+ RGXKickSync_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psKickSyncContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hKickSyncContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT);
+	}
+
+	if (hFenceUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickSyncIN->ui32ClientFenceCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hFenceUFOSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hFenceUFOSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+
+	if (hUpdateUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickSyncIN->ui32ClientUpdateCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hUpdateUFOSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hUpdateUFOSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+
+	if (hServerSyncInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickSyncIN->ui32ServerSyncCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hServerSyncInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hServerSyncInt2[i],
+							    PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+			}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeRGXKickSync NULL
+#endif
+
+#if !defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeRGXKickSync2(IMG_UINT32 ui32DispatchTableEntry,
+			 PVRSRV_BRIDGE_IN_RGXKICKSYNC2 * psRGXKickSync2IN,
+			 PVRSRV_BRIDGE_OUT_RGXKICKSYNC2 * psRGXKickSync2OUT,
+			 CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hKickSyncContext = psRGXKickSync2IN->hKickSyncContext;
+	RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL;
+	SYNC_PRIMITIVE_BLOCK **psFenceUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hFenceUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32FenceSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32FenceValueInt = NULL;
+	SYNC_PRIMITIVE_BLOCK **psUpdateUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hUpdateUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32UpdateSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32UpdateValueInt = NULL;
+	IMG_CHAR *uiUpdateFenceNameInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psRGXKickSync2IN->ui32ClientFenceCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXKickSync2IN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) +
+	    (psRGXKickSync2IN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickSync2IN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickSync2IN->ui32ClientUpdateCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+	    (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+	    (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + 0;
+
+	if (unlikely
+	    (psRGXKickSync2IN->ui32ClientFenceCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickSync2OUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickSync2_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickSync2IN->ui32ClientUpdateCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickSync2OUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickSync2_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRGXKickSync2IN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *) psRGXKickSync2IN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRGXKickSync2OUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXKickSync2_exit;
+			}
+		}
+	}
+
+	if (psRGXKickSync2IN->ui32ClientFenceCount != 0)
+	{
+		psFenceUFOSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickSync2IN->ui32ClientFenceCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hFenceUFOSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickSync2IN->ui32ClientFenceCount * sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickSync2IN->ui32ClientFenceCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hFenceUFOSyncPrimBlockInt2,
+		     (const void __user *)psRGXKickSync2IN->
+		     phFenceUFOSyncPrimBlock,
+		     psRGXKickSync2IN->ui32ClientFenceCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickSync2_exit;
+		}
+	}
+	if (psRGXKickSync2IN->ui32ClientFenceCount != 0)
+	{
+		ui32FenceSyncOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickSync2IN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickSync2IN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32FenceSyncOffsetInt,
+		     (const void __user *)psRGXKickSync2IN->
+		     pui32FenceSyncOffset,
+		     psRGXKickSync2IN->ui32ClientFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickSync2_exit;
+		}
+	}
+	if (psRGXKickSync2IN->ui32ClientFenceCount != 0)
+	{
+		ui32FenceValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickSync2IN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickSync2IN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32FenceValueInt,
+		     (const void __user *)psRGXKickSync2IN->pui32FenceValue,
+		     psRGXKickSync2IN->ui32ClientFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickSync2_exit;
+		}
+	}
+	if (psRGXKickSync2IN->ui32ClientUpdateCount != 0)
+	{
+		psUpdateUFOSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickSync2IN->ui32ClientUpdateCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hUpdateUFOSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickSync2IN->ui32ClientUpdateCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hUpdateUFOSyncPrimBlockInt2,
+		     (const void __user *)psRGXKickSync2IN->
+		     phUpdateUFOSyncPrimBlock,
+		     psRGXKickSync2IN->ui32ClientUpdateCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickSync2_exit;
+		}
+	}
+	if (psRGXKickSync2IN->ui32ClientUpdateCount != 0)
+	{
+		ui32UpdateSyncOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickSync2IN->ui32ClientUpdateCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32UpdateSyncOffsetInt,
+		     (const void __user *)psRGXKickSync2IN->
+		     pui32UpdateSyncOffset,
+		     psRGXKickSync2IN->ui32ClientUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickSync2_exit;
+		}
+	}
+	if (psRGXKickSync2IN->ui32ClientUpdateCount != 0)
+	{
+		ui32UpdateValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickSync2IN->ui32ClientUpdateCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32UpdateValueInt,
+		     (const void __user *)psRGXKickSync2IN->pui32UpdateValue,
+		     psRGXKickSync2IN->ui32ClientUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickSync2_exit;
+		}
+	}
+
+	{
+		uiUpdateFenceNameInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiUpdateFenceNameInt,
+		     (const void __user *)psRGXKickSync2IN->puiUpdateFenceName,
+		     PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickSync2_exit;
+		}
+		((IMG_CHAR *)
+		 uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH *
+					sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXKickSync2OUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psKickSyncContextInt,
+				       hKickSyncContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely(psRGXKickSync2OUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXKickSync2_exit;
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickSync2IN->ui32ClientFenceCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickSync2OUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psFenceUFOSyncPrimBlockInt
+						       [i],
+						       hFenceUFOSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickSync2OUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickSync2_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickSync2IN->ui32ClientUpdateCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickSync2OUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psUpdateUFOSyncPrimBlockInt
+						       [i],
+						       hUpdateUFOSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickSync2OUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickSync2_exit;
+			}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXKickSync2OUT->eError =
+	    PVRSRVRGXKickSync2KM(psKickSyncContextInt,
+				 psRGXKickSync2IN->ui32ClientCacheOpSeqNum,
+				 psRGXKickSync2IN->ui32ClientFenceCount,
+				 psFenceUFOSyncPrimBlockInt,
+				 ui32FenceSyncOffsetInt,
+				 ui32FenceValueInt,
+				 psRGXKickSync2IN->ui32ClientUpdateCount,
+				 psUpdateUFOSyncPrimBlockInt,
+				 ui32UpdateSyncOffsetInt,
+				 ui32UpdateValueInt,
+				 psRGXKickSync2IN->hCheckFenceFD,
+				 psRGXKickSync2IN->hTimelineFenceFD,
+				 &psRGXKickSync2OUT->hUpdateFenceFD,
+				 uiUpdateFenceNameInt,
+				 psRGXKickSync2IN->ui32ExtJobRef);
+
+ RGXKickSync2_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psKickSyncContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hKickSyncContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT);
+	}
+
+	if (hFenceUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickSync2IN->ui32ClientFenceCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hFenceUFOSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hFenceUFOSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+
+	if (hUpdateUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickSync2IN->ui32ClientUpdateCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hUpdateUFOSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hUpdateUFOSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeRGXKickSync2 NULL
+#endif
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXKICKSYNCBridge(void);
+PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void);
+
+/*
+ * Register all RGXKICKSYNC functions with services
+ */
+PVRSRV_ERROR InitRGXKICKSYNCBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+			      PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT,
+			      PVRSRVBridgeRGXCreateKickSyncContext, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+			      PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT,
+			      PVRSRVBridgeRGXDestroyKickSyncContext, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+			      PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC,
+			      PVRSRVBridgeRGXKickSync, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+			      PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2,
+			      PVRSRVBridgeRGXKickSync2, NULL, bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxkicksync functions with services
+ */
+PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void)
+{
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+				PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+				PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+				PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+				PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxregconfig_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxregconfig_bridge.c
new file mode 100644
index 0000000..d3c890d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxregconfig_bridge.c
@@ -0,0 +1,232 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxregconfig
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxregconfig
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxregconfig.h"
+
+#include "common_rgxregconfig_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE)
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXSetRegConfigType(IMG_UINT32 ui32DispatchTableEntry,
+				PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE *
+				psRGXSetRegConfigTypeIN,
+				PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE *
+				psRGXSetRegConfigTypeOUT,
+				CONNECTION_DATA * psConnection)
+{
+
+	psRGXSetRegConfigTypeOUT->eError =
+	    PVRSRVRGXSetRegConfigTypeKM(psConnection,
+					OSGetDevData(psConnection),
+					psRGXSetRegConfigTypeIN->
+					ui8RegPowerIsland);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXAddRegconfig(IMG_UINT32 ui32DispatchTableEntry,
+			    PVRSRV_BRIDGE_IN_RGXADDREGCONFIG *
+			    psRGXAddRegconfigIN,
+			    PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG *
+			    psRGXAddRegconfigOUT,
+			    CONNECTION_DATA * psConnection)
+{
+
+	psRGXAddRegconfigOUT->eError =
+	    PVRSRVRGXAddRegConfigKM(psConnection, OSGetDevData(psConnection),
+				    psRGXAddRegconfigIN->ui32RegAddr,
+				    psRGXAddRegconfigIN->ui64RegValue,
+				    psRGXAddRegconfigIN->ui64RegMask);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXClearRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+			      PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG *
+			      psRGXClearRegConfigIN,
+			      PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG *
+			      psRGXClearRegConfigOUT,
+			      CONNECTION_DATA * psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psRGXClearRegConfigIN);
+
+	psRGXClearRegConfigOUT->eError =
+	    PVRSRVRGXClearRegConfigKM(psConnection, OSGetDevData(psConnection));
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXEnableRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+			       PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG *
+			       psRGXEnableRegConfigIN,
+			       PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG *
+			       psRGXEnableRegConfigOUT,
+			       CONNECTION_DATA * psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psRGXEnableRegConfigIN);
+
+	psRGXEnableRegConfigOUT->eError =
+	    PVRSRVRGXEnableRegConfigKM(psConnection,
+				       OSGetDevData(psConnection));
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDisableRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+				PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG *
+				psRGXDisableRegConfigIN,
+				PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG *
+				psRGXDisableRegConfigOUT,
+				CONNECTION_DATA * psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psRGXDisableRegConfigIN);
+
+	psRGXDisableRegConfigOUT->eError =
+	    PVRSRVRGXDisableRegConfigKM(psConnection,
+					OSGetDevData(psConnection));
+
+	return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+#endif /* EXCLUDE_RGXREGCONFIG_BRIDGE */
+
+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE)
+PVRSRV_ERROR InitRGXREGCONFIGBridge(void);
+PVRSRV_ERROR DeinitRGXREGCONFIGBridge(void);
+
+/*
+ * Register all RGXREGCONFIG functions with services
+ */
+PVRSRV_ERROR InitRGXREGCONFIGBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+			      PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE,
+			      PVRSRVBridgeRGXSetRegConfigType, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+			      PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG,
+			      PVRSRVBridgeRGXAddRegconfig, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+			      PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG,
+			      PVRSRVBridgeRGXClearRegConfig, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+			      PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG,
+			      PVRSRVBridgeRGXEnableRegConfig, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+			      PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG,
+			      PVRSRVBridgeRGXDisableRegConfig, NULL, bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxregconfig functions with services
+ */
+PVRSRV_ERROR DeinitRGXREGCONFIGBridge(void)
+{
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+				PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+				PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+				PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+				PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+				PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG);
+
+	return PVRSRV_OK;
+}
+#else /* EXCLUDE_RGXREGCONFIG_BRIDGE */
+/* This bridge is conditional on EXCLUDE_RGXREGCONFIG_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitRGXREGCONFIGBridge() \
+	PVRSRV_OK
+
+#define DeinitRGXREGCONFIGBridge() \
+	PVRSRV_OK
+
+#endif /* EXCLUDE_RGXREGCONFIG_BRIDGE */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxsignals_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxsignals_bridge.c
new file mode 100644
index 0000000..3b5606e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxsignals_bridge.c
@@ -0,0 +1,172 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxsignals
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxsignals
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxsignals.h"
+
+#include "common_rgxsignals_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "rgx_bvnc_defs_km.h"
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXNotifySignalUpdate(IMG_UINT32 ui32DispatchTableEntry,
+				  PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE *
+				  psRGXNotifySignalUpdateIN,
+				  PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE *
+				  psRGXNotifySignalUpdateOUT,
+				  CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPrivData = psRGXNotifySignalUpdateIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+		    !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+							 RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK))
+		{
+			psRGXNotifySignalUpdateOUT->eError =
+			    PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXNotifySignalUpdate_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXNotifySignalUpdateOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&hPrivDataInt,
+				       hPrivData,
+				       PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+				       IMG_TRUE);
+	if (unlikely(psRGXNotifySignalUpdateOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXNotifySignalUpdate_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXNotifySignalUpdateOUT->eError =
+	    PVRSRVRGXNotifySignalUpdateKM(psConnection,
+					  OSGetDevData(psConnection),
+					  hPrivDataInt,
+					  psRGXNotifySignalUpdateIN->
+					  sDevSignalAddress);
+
+ RGXNotifySignalUpdate_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (hPrivDataInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPrivData,
+					    PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXSIGNALSBridge(void);
+PVRSRV_ERROR DeinitRGXSIGNALSBridge(void);
+
+/*
+ * Register all RGXSIGNALS functions with services
+ */
+PVRSRV_ERROR InitRGXSIGNALSBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXSIGNALS,
+			      PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE,
+			      PVRSRVBridgeRGXNotifySignalUpdate, NULL,
+			      bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxsignals functions with services
+ */
+PVRSRV_ERROR DeinitRGXSIGNALSBridge(void)
+{
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXSIGNALS,
+				PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxta3d_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxta3d_bridge.c
new file mode 100644
index 0000000..10bdd48
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxta3d_bridge.c
@@ -0,0 +1,4055 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxta3d
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxta3d
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxta3d.h"
+
+#include "common_rgxta3d_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXCreateHWRTData(IMG_UINT32 ui32DispatchTableEntry,
+			      PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATA *
+			      psRGXCreateHWRTDataIN,
+			      PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATA *
+			      psRGXCreateHWRTDataOUT,
+			      CONNECTION_DATA * psConnection)
+{
+	RGX_FREELIST **psapsFreeListsInt = NULL;
+	IMG_HANDLE *hapsFreeListsInt2 = NULL;
+	RGX_RTDATA_CLEANUP_DATA *psCleanupCookieInt = NULL;
+	DEVMEM_MEMDESC *pssHWRTDataMemDescInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (RGXFW_MAX_FREELISTS * sizeof(RGX_FREELIST *)) +
+	    (RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE)) + 0;
+
+	psRGXCreateHWRTDataOUT->hCleanupCookie = NULL;
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRGXCreateHWRTDataIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psRGXCreateHWRTDataIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRGXCreateHWRTDataOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXCreateHWRTData_exit;
+			}
+		}
+	}
+
+	{
+		psapsFreeListsInt =
+		    (RGX_FREELIST **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				       ui32NextOffset);
+		ui32NextOffset += RGXFW_MAX_FREELISTS * sizeof(RGX_FREELIST *);
+		hapsFreeListsInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset += RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hapsFreeListsInt2,
+		     (const void __user *)psRGXCreateHWRTDataIN->phapsFreeLists,
+		     RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXCreateHWRTDataOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXCreateHWRTData_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < RGXFW_MAX_FREELISTS; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXCreateHWRTDataOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psapsFreeListsInt[i],
+						       hapsFreeListsInt2[i],
+						       PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+						       IMG_TRUE);
+			if (unlikely
+			    (psRGXCreateHWRTDataOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXCreateHWRTData_exit;
+			}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXCreateHWRTDataOUT->eError =
+	    RGXCreateHWRTData(psConnection, OSGetDevData(psConnection),
+			      psRGXCreateHWRTDataIN->ui32RenderTarget,
+			      psRGXCreateHWRTDataIN->sPMMlistDevVAddr,
+			      psapsFreeListsInt,
+			      &psCleanupCookieInt,
+			      psRGXCreateHWRTDataIN->ui32PPPScreen,
+			      psRGXCreateHWRTDataIN->ui64MultiSampleCtl,
+			      psRGXCreateHWRTDataIN->ui64FlippedMultiSampleCtl,
+			      psRGXCreateHWRTDataIN->ui32TPCStride,
+			      psRGXCreateHWRTDataIN->sTailPtrsDevVAddr,
+			      psRGXCreateHWRTDataIN->ui32TPCSize,
+			      psRGXCreateHWRTDataIN->ui32TEScreen,
+			      psRGXCreateHWRTDataIN->ui32TEAA,
+			      psRGXCreateHWRTDataIN->ui32TEMTILE1,
+			      psRGXCreateHWRTDataIN->ui32TEMTILE2,
+			      psRGXCreateHWRTDataIN->ui32MTileStride,
+			      psRGXCreateHWRTDataIN->ui32ui32ISPMergeLowerX,
+			      psRGXCreateHWRTDataIN->ui32ui32ISPMergeLowerY,
+			      psRGXCreateHWRTDataIN->ui32ui32ISPMergeUpperX,
+			      psRGXCreateHWRTDataIN->ui32ui32ISPMergeUpperY,
+			      psRGXCreateHWRTDataIN->ui32ui32ISPMergeScaleX,
+			      psRGXCreateHWRTDataIN->ui32ui32ISPMergeScaleY,
+			      psRGXCreateHWRTDataIN->ssMacrotileArrayDevVAddr,
+			      psRGXCreateHWRTDataIN->ssRgnHeaderDevVAddr,
+			      psRGXCreateHWRTDataIN->ssRTCDevVAddr,
+			      psRGXCreateHWRTDataIN->ui64uiRgnHeaderSize,
+			      psRGXCreateHWRTDataIN->ui32ui32ISPMtileSize,
+			      psRGXCreateHWRTDataIN->ui16MaxRTs,
+			      &pssHWRTDataMemDescInt,
+			      &psRGXCreateHWRTDataOUT->ui32FWHWRTData);
+	/* Exit early if bridged call fails */
+	if (unlikely(psRGXCreateHWRTDataOUT->eError != PVRSRV_OK))
+	{
+		goto RGXCreateHWRTData_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXCreateHWRTDataOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psRGXCreateHWRTDataOUT->hCleanupCookie,
+				      (void *)psCleanupCookieInt,
+				      PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      RGXDestroyHWRTData);
+	if (unlikely(psRGXCreateHWRTDataOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXCreateHWRTData_exit;
+	}
+
+	psRGXCreateHWRTDataOUT->eError =
+	    PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+					 &psRGXCreateHWRTDataOUT->
+					 hsHWRTDataMemDesc,
+					 (void *)pssHWRTDataMemDescInt,
+					 PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+					 PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+					 psRGXCreateHWRTDataOUT->
+					 hCleanupCookie);
+	if (unlikely(psRGXCreateHWRTDataOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXCreateHWRTData_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXCreateHWRTData_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	if (hapsFreeListsInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < RGXFW_MAX_FREELISTS; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hapsFreeListsInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hapsFreeListsInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+			}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+	{
+		if (psRGXCreateHWRTDataOUT->hCleanupCookie)
+		{
+			PVRSRV_ERROR eError;
+
+			/* Lock over handle creation cleanup. */
+			LockHandle(psConnection->psHandleBase);
+
+			eError =
+			    PVRSRVReleaseHandleUnlocked(psConnection->
+							psHandleBase,
+							(IMG_HANDLE)
+							psRGXCreateHWRTDataOUT->
+							hCleanupCookie,
+							PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP);
+			if (unlikely
+			    ((eError != PVRSRV_OK)
+			     && (eError != PVRSRV_ERROR_RETRY)))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+					 "PVRSRVBridgeRGXCreateHWRTData: %s",
+					 PVRSRVGetErrorString(eError)));
+			}
+			/* Releasing the handle should free/destroy/release the resource.
+			 * This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK)
+				   || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psCleanupCookieInt = NULL;
+			/* Release now we have cleaned up creation handles. */
+			UnlockHandle(psConnection->psHandleBase);
+
+		}
+
+		if (psCleanupCookieInt)
+		{
+			RGXDestroyHWRTData(psCleanupCookieInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyHWRTData(IMG_UINT32 ui32DispatchTableEntry,
+			       PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATA *
+			       psRGXDestroyHWRTDataIN,
+			       PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATA *
+			       psRGXDestroyHWRTDataOUT,
+			       CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXDestroyHWRTDataOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyHWRTDataIN->
+					hCleanupCookie,
+					PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP);
+	if (unlikely
+	    ((psRGXDestroyHWRTDataOUT->eError != PVRSRV_OK)
+	     && (psRGXDestroyHWRTDataOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeRGXDestroyHWRTData: %s",
+			 PVRSRVGetErrorString(psRGXDestroyHWRTDataOUT->
+					      eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXDestroyHWRTData_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXDestroyHWRTData_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRenderTarget(IMG_UINT32 ui32DispatchTableEntry,
+				  PVRSRV_BRIDGE_IN_RGXCREATERENDERTARGET *
+				  psRGXCreateRenderTargetIN,
+				  PVRSRV_BRIDGE_OUT_RGXCREATERENDERTARGET *
+				  psRGXCreateRenderTargetOUT,
+				  CONNECTION_DATA * psConnection)
+{
+	RGX_RT_CLEANUP_DATA *pssRenderTargetMemDescInt = NULL;
+
+	psRGXCreateRenderTargetOUT->eError =
+	    RGXCreateRenderTarget(psConnection, OSGetDevData(psConnection),
+				  psRGXCreateRenderTargetIN->
+				  spsVHeapTableDevVAddr,
+				  &pssRenderTargetMemDescInt,
+				  &psRGXCreateRenderTargetOUT->
+				  ui32sRenderTargetFWDevVAddr);
+	/* Exit early if bridged call fails */
+	if (unlikely(psRGXCreateRenderTargetOUT->eError != PVRSRV_OK))
+	{
+		goto RGXCreateRenderTarget_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXCreateRenderTargetOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psRGXCreateRenderTargetOUT->
+				      hsRenderTargetMemDesc,
+				      (void *)pssRenderTargetMemDescInt,
+				      PVRSRV_HANDLE_TYPE_RGX_FWIF_RENDERTARGET,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      RGXDestroyRenderTarget);
+	if (unlikely(psRGXCreateRenderTargetOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXCreateRenderTarget_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXCreateRenderTarget_exit:
+
+	if (psRGXCreateRenderTargetOUT->eError != PVRSRV_OK)
+	{
+		if (pssRenderTargetMemDescInt)
+		{
+			RGXDestroyRenderTarget(pssRenderTargetMemDescInt);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRenderTarget(IMG_UINT32 ui32DispatchTableEntry,
+				   PVRSRV_BRIDGE_IN_RGXDESTROYRENDERTARGET *
+				   psRGXDestroyRenderTargetIN,
+				   PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERTARGET *
+				   psRGXDestroyRenderTargetOUT,
+				   CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXDestroyRenderTargetOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE)
+					psRGXDestroyRenderTargetIN->
+					hsRenderTargetMemDesc,
+					PVRSRV_HANDLE_TYPE_RGX_FWIF_RENDERTARGET);
+	if (unlikely
+	    ((psRGXDestroyRenderTargetOUT->eError != PVRSRV_OK)
+	     && (psRGXDestroyRenderTargetOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeRGXDestroyRenderTarget: %s",
+			 PVRSRVGetErrorString(psRGXDestroyRenderTargetOUT->
+					      eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXDestroyRenderTarget_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXDestroyRenderTarget_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCreateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+			      PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *
+			      psRGXCreateZSBufferIN,
+			      PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *
+			      psRGXCreateZSBufferOUT,
+			      CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hReservation = psRGXCreateZSBufferIN->hReservation;
+	DEVMEMINT_RESERVATION *psReservationInt = NULL;
+	IMG_HANDLE hPMR = psRGXCreateZSBufferIN->hPMR;
+	PMR *psPMRInt = NULL;
+	RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXCreateZSBufferOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psReservationInt,
+				       hReservation,
+				       PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+				       IMG_TRUE);
+	if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXCreateZSBuffer_exit;
+	}
+
+	/* Look up the address from the handle */
+	psRGXCreateZSBufferOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRInt,
+				       hPMR,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXCreateZSBuffer_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXCreateZSBufferOUT->eError =
+	    RGXCreateZSBufferKM(psConnection, OSGetDevData(psConnection),
+				psReservationInt,
+				psPMRInt,
+				psRGXCreateZSBufferIN->uiMapFlags,
+				&pssZSBufferKMInt,
+				&psRGXCreateZSBufferOUT->
+				ui32sZSBufferFWDevVAddr);
+	/* Exit early if bridged call fails */
+	if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK))
+	{
+		goto RGXCreateZSBuffer_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXCreateZSBufferOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psRGXCreateZSBufferOUT->hsZSBufferKM,
+				      (void *)pssZSBufferKMInt,
+				      PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      RGXDestroyZSBufferKM);
+	if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXCreateZSBuffer_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXCreateZSBuffer_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psReservationInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hReservation,
+					    PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+	}
+
+	/* Unreference the previously looked up handle */
+	if (psPMRInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMR,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+	{
+		if (pssZSBufferKMInt)
+		{
+			RGXDestroyZSBufferKM(pssZSBufferKMInt);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+			       PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *
+			       psRGXDestroyZSBufferIN,
+			       PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *
+			       psRGXDestroyZSBufferOUT,
+			       CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXDestroyZSBufferOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyZSBufferIN->
+					hsZSBufferMemDesc,
+					PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+	if (unlikely
+	    ((psRGXDestroyZSBufferOUT->eError != PVRSRV_OK)
+	     && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeRGXDestroyZSBuffer: %s",
+			 PVRSRVGetErrorString(psRGXDestroyZSBufferOUT->
+					      eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXDestroyZSBuffer_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXDestroyZSBuffer_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXPopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+				PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *
+				psRGXPopulateZSBufferIN,
+				PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *
+				psRGXPopulateZSBufferOUT,
+				CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hsZSBufferKM = psRGXPopulateZSBufferIN->hsZSBufferKM;
+	RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL;
+	RGX_POPULATION *pssPopulationInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXPopulateZSBufferOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&pssZSBufferKMInt,
+				       hsZSBufferKM,
+				       PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+				       IMG_TRUE);
+	if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXPopulateZSBuffer_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXPopulateZSBufferOUT->eError =
+	    RGXPopulateZSBufferKM(pssZSBufferKMInt, &pssPopulationInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK))
+	{
+		goto RGXPopulateZSBuffer_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXPopulateZSBufferOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psRGXPopulateZSBufferOUT->hsPopulation,
+				      (void *)pssPopulationInt,
+				      PVRSRV_HANDLE_TYPE_RGX_POPULATION,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      RGXUnpopulateZSBufferKM);
+	if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXPopulateZSBuffer_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXPopulateZSBuffer_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (pssZSBufferKMInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hsZSBufferKM,
+					    PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)
+	{
+		if (pssPopulationInt)
+		{
+			RGXUnpopulateZSBufferKM(pssPopulationInt);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXUnpopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+				  PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *
+				  psRGXUnpopulateZSBufferIN,
+				  PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *
+				  psRGXUnpopulateZSBufferOUT,
+				  CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXUnpopulateZSBufferOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXUnpopulateZSBufferIN->
+					hsPopulation,
+					PVRSRV_HANDLE_TYPE_RGX_POPULATION);
+	if (unlikely
+	    ((psRGXUnpopulateZSBufferOUT->eError != PVRSRV_OK)
+	     && (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeRGXUnpopulateZSBuffer: %s",
+			 PVRSRVGetErrorString(psRGXUnpopulateZSBufferOUT->
+					      eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXUnpopulateZSBuffer_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXUnpopulateZSBuffer_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCreateFreeList(IMG_UINT32 ui32DispatchTableEntry,
+			      PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *
+			      psRGXCreateFreeListIN,
+			      PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *
+			      psRGXCreateFreeListOUT,
+			      CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hsGlobalFreeList = psRGXCreateFreeListIN->hsGlobalFreeList;
+	RGX_FREELIST *pssGlobalFreeListInt = NULL;
+	IMG_HANDLE hsFreeListPMR = psRGXCreateFreeListIN->hsFreeListPMR;
+	PMR *pssFreeListPMRInt = NULL;
+	RGX_FREELIST *psCleanupCookieInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	if (psRGXCreateFreeListIN->hsGlobalFreeList)
+	{
+		/* Look up the address from the handle */
+		psRGXCreateFreeListOUT->eError =
+		    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+					       (void **)&pssGlobalFreeListInt,
+					       hsGlobalFreeList,
+					       PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+					       IMG_TRUE);
+		if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK))
+		{
+			UnlockHandle(psConnection->psHandleBase);
+			goto RGXCreateFreeList_exit;
+		}
+	}
+
+	/* Look up the address from the handle */
+	psRGXCreateFreeListOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&pssFreeListPMRInt,
+				       hsFreeListPMR,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXCreateFreeList_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXCreateFreeListOUT->eError =
+	    RGXCreateFreeList(psConnection, OSGetDevData(psConnection),
+			      psRGXCreateFreeListIN->ui32ui32MaxFLPages,
+			      psRGXCreateFreeListIN->ui32ui32InitFLPages,
+			      psRGXCreateFreeListIN->ui32ui32GrowFLPages,
+			      psRGXCreateFreeListIN->ui32ui32GrowParamThreshold,
+			      pssGlobalFreeListInt,
+			      psRGXCreateFreeListIN->bbFreeListCheck,
+			      psRGXCreateFreeListIN->spsFreeListDevVAddr,
+			      pssFreeListPMRInt,
+			      psRGXCreateFreeListIN->uiPMROffset,
+			      &psCleanupCookieInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK))
+	{
+		goto RGXCreateFreeList_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXCreateFreeListOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psRGXCreateFreeListOUT->hCleanupCookie,
+				      (void *)psCleanupCookieInt,
+				      PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      RGXDestroyFreeList);
+	if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXCreateFreeList_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXCreateFreeList_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	if (psRGXCreateFreeListIN->hsGlobalFreeList)
+	{
+
+		/* Unreference the previously looked up handle */
+		if (pssGlobalFreeListInt)
+		{
+			PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+						    hsGlobalFreeList,
+						    PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+		}
+	}
+
+	/* Unreference the previously looked up handle */
+	if (pssFreeListPMRInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hsFreeListPMR,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+	{
+		if (psCleanupCookieInt)
+		{
+			RGXDestroyFreeList(psCleanupCookieInt);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyFreeList(IMG_UINT32 ui32DispatchTableEntry,
+			       PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *
+			       psRGXDestroyFreeListIN,
+			       PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *
+			       psRGXDestroyFreeListOUT,
+			       CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXDestroyFreeListOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyFreeListIN->
+					hCleanupCookie,
+					PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+	if (unlikely
+	    ((psRGXDestroyFreeListOUT->eError != PVRSRV_OK)
+	     && (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeRGXDestroyFreeList: %s",
+			 PVRSRVGetErrorString(psRGXDestroyFreeListOUT->
+					      eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXDestroyFreeList_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXDestroyFreeList_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRenderContext(IMG_UINT32 ui32DispatchTableEntry,
+				   PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *
+				   psRGXCreateRenderContextIN,
+				   PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *
+				   psRGXCreateRenderContextOUT,
+				   CONNECTION_DATA * psConnection)
+{
+	IMG_BYTE *psFrameworkCmdInt = NULL;
+	IMG_HANDLE hPrivData = psRGXCreateRenderContextIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+	IMG_BYTE *psRegsInt = NULL;
+	IMG_BYTE *psStaticRendercontextStateInt = NULL;
+	RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psRGXCreateRenderContextIN->ui32FrameworkCmdize *
+	     sizeof(IMG_BYTE)) +
+	    (psRGXCreateRenderContextIN->ui32CtxSwitchSize * sizeof(IMG_BYTE)) +
+	    (psRGXCreateRenderContextIN->ui32StaticRendercontextStateSize *
+	     sizeof(IMG_BYTE)) + 0;
+
+	if (unlikely
+	    (psRGXCreateRenderContextIN->ui32FrameworkCmdize >
+	     RGXFWIF_RF_CMD_SIZE))
+	{
+		psRGXCreateRenderContextOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXCreateRenderContext_exit;
+	}
+
+	if (unlikely
+	    (psRGXCreateRenderContextIN->ui32CtxSwitchSize >
+	     RGXFWIF_TAREGISTERS_CSWITCH_SIZE))
+	{
+		psRGXCreateRenderContextOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXCreateRenderContext_exit;
+	}
+
+	if (unlikely
+	    (psRGXCreateRenderContextIN->ui32StaticRendercontextStateSize >
+	     RGXFWIF_STATIC_RENDERCONTEXT_SIZE))
+	{
+		psRGXCreateRenderContextOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXCreateRenderContext_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRGXCreateRenderContextIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psRGXCreateRenderContextIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRGXCreateRenderContextOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXCreateRenderContext_exit;
+			}
+		}
+	}
+
+	if (psRGXCreateRenderContextIN->ui32FrameworkCmdize != 0)
+	{
+		psFrameworkCmdInt =
+		    (IMG_BYTE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXCreateRenderContextIN->ui32FrameworkCmdize *
+		    sizeof(IMG_BYTE);
+	}
+
+	/* Copy the data over */
+	if (psRGXCreateRenderContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) >
+	    0)
+	{
+		if (OSCopyFromUser
+		    (NULL, psFrameworkCmdInt,
+		     (const void __user *)psRGXCreateRenderContextIN->
+		     psFrameworkCmd,
+		     psRGXCreateRenderContextIN->ui32FrameworkCmdize *
+		     sizeof(IMG_BYTE)) != PVRSRV_OK)
+		{
+			psRGXCreateRenderContextOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXCreateRenderContext_exit;
+		}
+	}
+	if (psRGXCreateRenderContextIN->ui32CtxSwitchSize != 0)
+	{
+		psRegsInt =
+		    (IMG_BYTE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXCreateRenderContextIN->ui32CtxSwitchSize *
+		    sizeof(IMG_BYTE);
+	}
+
+	/* Copy the data over */
+	if (psRGXCreateRenderContextIN->ui32CtxSwitchSize * sizeof(IMG_BYTE) >
+	    0)
+	{
+		if (OSCopyFromUser
+		    (NULL, psRegsInt,
+		     (const void __user *)psRGXCreateRenderContextIN->psRegs,
+		     psRGXCreateRenderContextIN->ui32CtxSwitchSize *
+		     sizeof(IMG_BYTE)) != PVRSRV_OK)
+		{
+			psRGXCreateRenderContextOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXCreateRenderContext_exit;
+		}
+	}
+	if (psRGXCreateRenderContextIN->ui32StaticRendercontextStateSize != 0)
+	{
+		psStaticRendercontextStateInt =
+		    (IMG_BYTE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXCreateRenderContextIN->
+		    ui32StaticRendercontextStateSize * sizeof(IMG_BYTE);
+	}
+
+	/* Copy the data over */
+	if (psRGXCreateRenderContextIN->ui32StaticRendercontextStateSize *
+	    sizeof(IMG_BYTE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, psStaticRendercontextStateInt,
+		     (const void __user *)psRGXCreateRenderContextIN->
+		     psStaticRendercontextState,
+		     psRGXCreateRenderContextIN->
+		     ui32StaticRendercontextStateSize * sizeof(IMG_BYTE)) !=
+		    PVRSRV_OK)
+		{
+			psRGXCreateRenderContextOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXCreateRenderContext_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXCreateRenderContextOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&hPrivDataInt,
+				       hPrivData,
+				       PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+				       IMG_TRUE);
+	if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXCreateRenderContext_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXCreateRenderContextOUT->eError =
+	    PVRSRVRGXCreateRenderContextKM(psConnection,
+					   OSGetDevData(psConnection),
+					   psRGXCreateRenderContextIN->
+					   ui32Priority,
+					   psRGXCreateRenderContextIN->
+					   sVDMCallStackAddr,
+					   psRGXCreateRenderContextIN->
+					   ui32FrameworkCmdize,
+					   psFrameworkCmdInt, hPrivDataInt,
+					   psRGXCreateRenderContextIN->
+					   ui32CtxSwitchSize, psRegsInt,
+					   psRGXCreateRenderContextIN->
+					   ui32StaticRendercontextStateSize,
+					   psStaticRendercontextStateInt,
+					   psRGXCreateRenderContextIN->
+					   ui32PackedCCBSizeU8888,
+					   &psRenderContextInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK))
+	{
+		goto RGXCreateRenderContext_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXCreateRenderContextOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psRGXCreateRenderContextOUT->
+				      hRenderContext,
+				      (void *)psRenderContextInt,
+				      PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      PVRSRVRGXDestroyRenderContextKM);
+	if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXCreateRenderContext_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXCreateRenderContext_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (hPrivDataInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPrivData,
+					    PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+	{
+		if (psRenderContextInt)
+		{
+			PVRSRVRGXDestroyRenderContextKM(psRenderContextInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRenderContext(IMG_UINT32 ui32DispatchTableEntry,
+				    PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *
+				    psRGXDestroyRenderContextIN,
+				    PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *
+				    psRGXDestroyRenderContextOUT,
+				    CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXDestroyRenderContextOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE)
+					psRGXDestroyRenderContextIN->
+					hCleanupCookie,
+					PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+	if (unlikely
+	    ((psRGXDestroyRenderContextOUT->eError != PVRSRV_OK)
+	     && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeRGXDestroyRenderContext: %s",
+			 PVRSRVGetErrorString(psRGXDestroyRenderContextOUT->
+					      eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXDestroyRenderContext_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXDestroyRenderContext_exit:
+
+	return 0;
+}
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeRGXKickTA3D(IMG_UINT32 ui32DispatchTableEntry,
+			PVRSRV_BRIDGE_IN_RGXKICKTA3D * psRGXKickTA3DIN,
+			PVRSRV_BRIDGE_OUT_RGXKICKTA3D * psRGXKickTA3DOUT,
+			CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hRenderContext = psRGXKickTA3DIN->hRenderContext;
+	RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL;
+	SYNC_PRIMITIVE_BLOCK **psClientTAFenceSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClientTAFenceSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32ClientTAFenceSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32ClientTAFenceValueInt = NULL;
+	SYNC_PRIMITIVE_BLOCK **psClientTAUpdateSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClientTAUpdateSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32ClientTAUpdateSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32ClientTAUpdateValueInt = NULL;
+	IMG_UINT32 *ui32ServerTASyncFlagsInt = NULL;
+	SERVER_SYNC_PRIMITIVE **psServerTASyncsInt = NULL;
+	IMG_HANDLE *hServerTASyncsInt2 = NULL;
+	SYNC_PRIMITIVE_BLOCK **psClient3DFenceSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClient3DFenceSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32Client3DFenceSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32Client3DFenceValueInt = NULL;
+	SYNC_PRIMITIVE_BLOCK **psClient3DUpdateSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClient3DUpdateSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32Client3DUpdateSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32Client3DUpdateValueInt = NULL;
+	IMG_UINT32 *ui32Server3DSyncFlagsInt = NULL;
+	SERVER_SYNC_PRIMITIVE **psServer3DSyncsInt = NULL;
+	IMG_HANDLE *hServer3DSyncsInt2 = NULL;
+	IMG_HANDLE hPRFenceUFOSyncPrimBlock =
+	    psRGXKickTA3DIN->hPRFenceUFOSyncPrimBlock;
+	SYNC_PRIMITIVE_BLOCK *psPRFenceUFOSyncPrimBlockInt = NULL;
+	IMG_CHAR *uiUpdateFenceNameInt = NULL;
+	IMG_CHAR *uiUpdateFenceName3DInt = NULL;
+	IMG_BYTE *psTACmdInt = NULL;
+	IMG_BYTE *ps3DPRCmdInt = NULL;
+	IMG_BYTE *ps3DCmdInt = NULL;
+	IMG_HANDLE hRTDataCleanup = psRGXKickTA3DIN->hRTDataCleanup;
+	RGX_RTDATA_CLEANUP_DATA *psRTDataCleanupInt = NULL;
+	IMG_HANDLE hZBuffer = psRGXKickTA3DIN->hZBuffer;
+	RGX_ZSBUFFER_DATA *psZBufferInt = NULL;
+	IMG_HANDLE hSBuffer = psRGXKickTA3DIN->hSBuffer;
+	RGX_ZSBUFFER_DATA *psSBufferInt = NULL;
+	IMG_HANDLE hMSAAScratchBuffer = psRGXKickTA3DIN->hMSAAScratchBuffer;
+	RGX_ZSBUFFER_DATA *psMSAAScratchBufferInt = NULL;
+	IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+	PMR **psSyncPMRsInt = NULL;
+	IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psRGXKickTA3DIN->ui32ClientTAFenceCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) +
+	    (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickTA3DIN->ui32ClientTAUpdateCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) +
+	    (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32)) +
+	    (psRGXKickTA3DIN->ui32ServerTASyncPrims *
+	     sizeof(SERVER_SYNC_PRIMITIVE *)) +
+	    (psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE)) +
+	    (psRGXKickTA3DIN->ui32Client3DFenceCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_HANDLE)) +
+	    (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickTA3DIN->ui32Client3DUpdateCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) +
+	    (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32)) +
+	    (psRGXKickTA3DIN->ui32Server3DSyncPrims *
+	     sizeof(SERVER_SYNC_PRIMITIVE *)) +
+	    (psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE)) +
+	    (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+	    (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+	    (psRGXKickTA3DIN->ui32TACmdSize * sizeof(IMG_BYTE)) +
+	    (psRGXKickTA3DIN->ui323DPRCmdSize * sizeof(IMG_BYTE)) +
+	    (psRGXKickTA3DIN->ui323DCmdSize * sizeof(IMG_BYTE)) +
+	    (psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(PMR *)) +
+	    (psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0;
+
+	if (unlikely
+	    (psRGXKickTA3DIN->ui32ClientTAFenceCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickTA3DOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickTA3D_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickTA3DIN->ui32ClientTAUpdateCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickTA3DOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickTA3D_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickTA3DIN->ui32ServerTASyncPrims > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickTA3DOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickTA3D_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickTA3DIN->ui32Client3DFenceCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickTA3DOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickTA3D_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickTA3DIN->ui32Client3DUpdateCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickTA3DOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickTA3D_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickTA3DIN->ui32Server3DSyncPrims > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickTA3DOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickTA3D_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickTA3DIN->ui32TACmdSize >
+	     RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+	{
+		psRGXKickTA3DOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickTA3D_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickTA3DIN->ui323DPRCmdSize >
+	     RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+	{
+		psRGXKickTA3DOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickTA3D_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickTA3DIN->ui323DCmdSize >
+	     RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+	{
+		psRGXKickTA3DOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickTA3D_exit;
+	}
+
+	if (unlikely(psRGXKickTA3DIN->ui32SyncPMRCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickTA3DOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickTA3D_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRGXKickTA3DIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *) psRGXKickTA3DIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRGXKickTA3DOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXKickTA3D_exit;
+			}
+		}
+	}
+
+	if (psRGXKickTA3DIN->ui32ClientTAFenceCount != 0)
+	{
+		psClientTAFenceSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32ClientTAFenceCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClientTAFenceSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32ClientTAFenceCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hClientTAFenceSyncPrimBlockInt2,
+		     (const void __user *)psRGXKickTA3DIN->
+		     phClientTAFenceSyncPrimBlock,
+		     psRGXKickTA3DIN->ui32ClientTAFenceCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+	if (psRGXKickTA3DIN->ui32ClientTAFenceCount != 0)
+	{
+		ui32ClientTAFenceSyncOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32ClientTAFenceCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientTAFenceSyncOffsetInt,
+		     (const void __user *)psRGXKickTA3DIN->
+		     pui32ClientTAFenceSyncOffset,
+		     psRGXKickTA3DIN->ui32ClientTAFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+	if (psRGXKickTA3DIN->ui32ClientTAFenceCount != 0)
+	{
+		ui32ClientTAFenceValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32ClientTAFenceCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientTAFenceValueInt,
+		     (const void __user *)psRGXKickTA3DIN->
+		     pui32ClientTAFenceValue,
+		     psRGXKickTA3DIN->ui32ClientTAFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+	if (psRGXKickTA3DIN->ui32ClientTAUpdateCount != 0)
+	{
+		psClientTAUpdateSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32ClientTAUpdateCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClientTAUpdateSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32ClientTAUpdateCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hClientTAUpdateSyncPrimBlockInt2,
+		     (const void __user *)psRGXKickTA3DIN->
+		     phClientTAUpdateSyncPrimBlock,
+		     psRGXKickTA3DIN->ui32ClientTAUpdateCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+	if (psRGXKickTA3DIN->ui32ClientTAUpdateCount != 0)
+	{
+		ui32ClientTAUpdateSyncOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32ClientTAUpdateCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientTAUpdateSyncOffsetInt,
+		     (const void __user *)psRGXKickTA3DIN->
+		     pui32ClientTAUpdateSyncOffset,
+		     psRGXKickTA3DIN->ui32ClientTAUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+	if (psRGXKickTA3DIN->ui32ClientTAUpdateCount != 0)
+	{
+		ui32ClientTAUpdateValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32ClientTAUpdateCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientTAUpdateValueInt,
+		     (const void __user *)psRGXKickTA3DIN->
+		     pui32ClientTAUpdateValue,
+		     psRGXKickTA3DIN->ui32ClientTAUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+	if (psRGXKickTA3DIN->ui32ServerTASyncPrims != 0)
+	{
+		ui32ServerTASyncFlagsInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ServerTASyncFlagsInt,
+		     (const void __user *)psRGXKickTA3DIN->
+		     pui32ServerTASyncFlags,
+		     psRGXKickTA3DIN->ui32ServerTASyncPrims *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+	if (psRGXKickTA3DIN->ui32ServerTASyncPrims != 0)
+	{
+		psServerTASyncsInt =
+		    (SERVER_SYNC_PRIMITIVE **) (((IMG_UINT8 *) pArrayArgsBuffer)
+						+ ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32ServerTASyncPrims *
+		    sizeof(SERVER_SYNC_PRIMITIVE *);
+		hServerTASyncsInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hServerTASyncsInt2,
+		     (const void __user *)psRGXKickTA3DIN->phServerTASyncs,
+		     psRGXKickTA3DIN->ui32ServerTASyncPrims *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+	if (psRGXKickTA3DIN->ui32Client3DFenceCount != 0)
+	{
+		psClient3DFenceSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32Client3DFenceCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClient3DFenceSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32Client3DFenceCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hClient3DFenceSyncPrimBlockInt2,
+		     (const void __user *)psRGXKickTA3DIN->
+		     phClient3DFenceSyncPrimBlock,
+		     psRGXKickTA3DIN->ui32Client3DFenceCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+	if (psRGXKickTA3DIN->ui32Client3DFenceCount != 0)
+	{
+		ui32Client3DFenceSyncOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32Client3DFenceCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32Client3DFenceSyncOffsetInt,
+		     (const void __user *)psRGXKickTA3DIN->
+		     pui32Client3DFenceSyncOffset,
+		     psRGXKickTA3DIN->ui32Client3DFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+	if (psRGXKickTA3DIN->ui32Client3DFenceCount != 0)
+	{
+		ui32Client3DFenceValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32Client3DFenceCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32Client3DFenceValueInt,
+		     (const void __user *)psRGXKickTA3DIN->
+		     pui32Client3DFenceValue,
+		     psRGXKickTA3DIN->ui32Client3DFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+	if (psRGXKickTA3DIN->ui32Client3DUpdateCount != 0)
+	{
+		psClient3DUpdateSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32Client3DUpdateCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClient3DUpdateSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32Client3DUpdateCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hClient3DUpdateSyncPrimBlockInt2,
+		     (const void __user *)psRGXKickTA3DIN->
+		     phClient3DUpdateSyncPrimBlock,
+		     psRGXKickTA3DIN->ui32Client3DUpdateCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+	if (psRGXKickTA3DIN->ui32Client3DUpdateCount != 0)
+	{
+		ui32Client3DUpdateSyncOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32Client3DUpdateCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32Client3DUpdateSyncOffsetInt,
+		     (const void __user *)psRGXKickTA3DIN->
+		     pui32Client3DUpdateSyncOffset,
+		     psRGXKickTA3DIN->ui32Client3DUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+	if (psRGXKickTA3DIN->ui32Client3DUpdateCount != 0)
+	{
+		ui32Client3DUpdateValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32Client3DUpdateCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32Client3DUpdateValueInt,
+		     (const void __user *)psRGXKickTA3DIN->
+		     pui32Client3DUpdateValue,
+		     psRGXKickTA3DIN->ui32Client3DUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+	if (psRGXKickTA3DIN->ui32Server3DSyncPrims != 0)
+	{
+		ui32Server3DSyncFlagsInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32Server3DSyncFlagsInt,
+		     (const void __user *)psRGXKickTA3DIN->
+		     pui32Server3DSyncFlags,
+		     psRGXKickTA3DIN->ui32Server3DSyncPrims *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+	if (psRGXKickTA3DIN->ui32Server3DSyncPrims != 0)
+	{
+		psServer3DSyncsInt =
+		    (SERVER_SYNC_PRIMITIVE **) (((IMG_UINT8 *) pArrayArgsBuffer)
+						+ ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32Server3DSyncPrims *
+		    sizeof(SERVER_SYNC_PRIMITIVE *);
+		hServer3DSyncsInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hServer3DSyncsInt2,
+		     (const void __user *)psRGXKickTA3DIN->phServer3DSyncs,
+		     psRGXKickTA3DIN->ui32Server3DSyncPrims *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+	{
+		uiUpdateFenceNameInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiUpdateFenceNameInt,
+		     (const void __user *)psRGXKickTA3DIN->puiUpdateFenceName,
+		     PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+		((IMG_CHAR *)
+		 uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH *
+					sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+
+	{
+		uiUpdateFenceName3DInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiUpdateFenceName3DInt,
+		     (const void __user *)psRGXKickTA3DIN->puiUpdateFenceName3D,
+		     PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+		((IMG_CHAR *)
+		 uiUpdateFenceName3DInt)[(PVRSRV_SYNC_NAME_LENGTH *
+					  sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+	if (psRGXKickTA3DIN->ui32TACmdSize != 0)
+	{
+		psTACmdInt =
+		    (IMG_BYTE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32TACmdSize * sizeof(IMG_BYTE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui32TACmdSize * sizeof(IMG_BYTE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, psTACmdInt,
+		     (const void __user *)psRGXKickTA3DIN->psTACmd,
+		     psRGXKickTA3DIN->ui32TACmdSize * sizeof(IMG_BYTE)) !=
+		    PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+	if (psRGXKickTA3DIN->ui323DPRCmdSize != 0)
+	{
+		ps3DPRCmdInt =
+		    (IMG_BYTE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui323DPRCmdSize * sizeof(IMG_BYTE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui323DPRCmdSize * sizeof(IMG_BYTE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ps3DPRCmdInt,
+		     (const void __user *)psRGXKickTA3DIN->ps3DPRCmd,
+		     psRGXKickTA3DIN->ui323DPRCmdSize * sizeof(IMG_BYTE)) !=
+		    PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+	if (psRGXKickTA3DIN->ui323DCmdSize != 0)
+	{
+		ps3DCmdInt =
+		    (IMG_BYTE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui323DCmdSize * sizeof(IMG_BYTE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui323DCmdSize * sizeof(IMG_BYTE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ps3DCmdInt,
+		     (const void __user *)psRGXKickTA3DIN->ps3DCmd,
+		     psRGXKickTA3DIN->ui323DCmdSize * sizeof(IMG_BYTE)) !=
+		    PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+	if (psRGXKickTA3DIN->ui32SyncPMRCount != 0)
+	{
+		ui32SyncPMRFlagsInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32SyncPMRFlagsInt,
+		     (const void __user *)psRGXKickTA3DIN->pui32SyncPMRFlags,
+		     psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) !=
+		    PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+	if (psRGXKickTA3DIN->ui32SyncPMRCount != 0)
+	{
+		psSyncPMRsInt =
+		    (PMR **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+			      ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(PMR *);
+		hSyncPMRsInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hSyncPMRsInt2,
+		     (const void __user *)psRGXKickTA3DIN->phSyncPMRs,
+		     psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) !=
+		    PVRSRV_OK)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXKickTA3DOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psRenderContextInt,
+				       hRenderContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely(psRGXKickTA3DOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXKickTA3D_exit;
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3DIN->ui32ClientTAFenceCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickTA3DOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psClientTAFenceSyncPrimBlockInt
+						       [i],
+						       hClientTAFenceSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickTA3DOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickTA3D_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3DIN->ui32ClientTAUpdateCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickTA3DOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psClientTAUpdateSyncPrimBlockInt
+						       [i],
+						       hClientTAUpdateSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickTA3DOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickTA3D_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3DIN->ui32ServerTASyncPrims; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickTA3DOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psServerTASyncsInt[i],
+						       hServerTASyncsInt2[i],
+						       PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickTA3DOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickTA3D_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3DIN->ui32Client3DFenceCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickTA3DOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psClient3DFenceSyncPrimBlockInt
+						       [i],
+						       hClient3DFenceSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickTA3DOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickTA3D_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3DIN->ui32Client3DUpdateCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickTA3DOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psClient3DUpdateSyncPrimBlockInt
+						       [i],
+						       hClient3DUpdateSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickTA3DOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickTA3D_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3DIN->ui32Server3DSyncPrims; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickTA3DOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psServer3DSyncsInt[i],
+						       hServer3DSyncsInt2[i],
+						       PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickTA3DOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickTA3D_exit;
+			}
+		}
+	}
+
+	/* Look up the address from the handle */
+	psRGXKickTA3DOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPRFenceUFOSyncPrimBlockInt,
+				       hPRFenceUFOSyncPrimBlock,
+				       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+				       IMG_TRUE);
+	if (unlikely(psRGXKickTA3DOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXKickTA3D_exit;
+	}
+
+	if (psRGXKickTA3DIN->hRTDataCleanup)
+	{
+		/* Look up the address from the handle */
+		psRGXKickTA3DOUT->eError =
+		    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+					       (void **)&psRTDataCleanupInt,
+					       hRTDataCleanup,
+					       PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP,
+					       IMG_TRUE);
+		if (unlikely(psRGXKickTA3DOUT->eError != PVRSRV_OK))
+		{
+			UnlockHandle(psConnection->psHandleBase);
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+	if (psRGXKickTA3DIN->hZBuffer)
+	{
+		/* Look up the address from the handle */
+		psRGXKickTA3DOUT->eError =
+		    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+					       (void **)&psZBufferInt,
+					       hZBuffer,
+					       PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+					       IMG_TRUE);
+		if (unlikely(psRGXKickTA3DOUT->eError != PVRSRV_OK))
+		{
+			UnlockHandle(psConnection->psHandleBase);
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+	if (psRGXKickTA3DIN->hSBuffer)
+	{
+		/* Look up the address from the handle */
+		psRGXKickTA3DOUT->eError =
+		    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+					       (void **)&psSBufferInt,
+					       hSBuffer,
+					       PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+					       IMG_TRUE);
+		if (unlikely(psRGXKickTA3DOUT->eError != PVRSRV_OK))
+		{
+			UnlockHandle(psConnection->psHandleBase);
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+	if (psRGXKickTA3DIN->hMSAAScratchBuffer)
+	{
+		/* Look up the address from the handle */
+		psRGXKickTA3DOUT->eError =
+		    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+					       (void **)&psMSAAScratchBufferInt,
+					       hMSAAScratchBuffer,
+					       PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+					       IMG_TRUE);
+		if (unlikely(psRGXKickTA3DOUT->eError != PVRSRV_OK))
+		{
+			UnlockHandle(psConnection->psHandleBase);
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3DIN->ui32SyncPMRCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickTA3DOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psSyncPMRsInt[i],
+						       hSyncPMRsInt2[i],
+						       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickTA3DOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickTA3D_exit;
+			}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXKickTA3DOUT->eError =
+	    PVRSRVRGXKickTA3DKM(psRenderContextInt,
+				psRGXKickTA3DIN->ui32ClientCacheOpSeqNum,
+				psRGXKickTA3DIN->ui32ClientTAFenceCount,
+				psClientTAFenceSyncPrimBlockInt,
+				ui32ClientTAFenceSyncOffsetInt,
+				ui32ClientTAFenceValueInt,
+				psRGXKickTA3DIN->ui32ClientTAUpdateCount,
+				psClientTAUpdateSyncPrimBlockInt,
+				ui32ClientTAUpdateSyncOffsetInt,
+				ui32ClientTAUpdateValueInt,
+				psRGXKickTA3DIN->ui32ServerTASyncPrims,
+				ui32ServerTASyncFlagsInt,
+				psServerTASyncsInt,
+				psRGXKickTA3DIN->ui32Client3DFenceCount,
+				psClient3DFenceSyncPrimBlockInt,
+				ui32Client3DFenceSyncOffsetInt,
+				ui32Client3DFenceValueInt,
+				psRGXKickTA3DIN->ui32Client3DUpdateCount,
+				psClient3DUpdateSyncPrimBlockInt,
+				ui32Client3DUpdateSyncOffsetInt,
+				ui32Client3DUpdateValueInt,
+				psRGXKickTA3DIN->ui32Server3DSyncPrims,
+				ui32Server3DSyncFlagsInt,
+				psServer3DSyncsInt,
+				psPRFenceUFOSyncPrimBlockInt,
+				psRGXKickTA3DIN->ui32FRFenceUFOSyncOffset,
+				psRGXKickTA3DIN->ui32FRFenceValue,
+				psRGXKickTA3DIN->hCheckFence,
+				psRGXKickTA3DIN->hUpdateTimeline,
+				&psRGXKickTA3DOUT->hUpdateFence,
+				uiUpdateFenceNameInt,
+				psRGXKickTA3DIN->hCheckFence3D,
+				psRGXKickTA3DIN->hUpdateTimeline3D,
+				&psRGXKickTA3DOUT->hUpdateFence3D,
+				uiUpdateFenceName3DInt,
+				psRGXKickTA3DIN->ui32TACmdSize,
+				psTACmdInt,
+				psRGXKickTA3DIN->ui323DPRCmdSize,
+				ps3DPRCmdInt,
+				psRGXKickTA3DIN->ui323DCmdSize,
+				ps3DCmdInt,
+				psRGXKickTA3DIN->ui32ExtJobRef,
+				psRGXKickTA3DIN->bbLastTAInScene,
+				psRGXKickTA3DIN->bbKickTA,
+				psRGXKickTA3DIN->bbKickPR,
+				psRGXKickTA3DIN->bbKick3D,
+				psRGXKickTA3DIN->bbAbort,
+				psRGXKickTA3DIN->ui32PDumpFlags,
+				psRTDataCleanupInt,
+				psZBufferInt,
+				psSBufferInt,
+				psMSAAScratchBufferInt,
+				psRGXKickTA3DIN->ui32SyncPMRCount,
+				ui32SyncPMRFlagsInt,
+				psSyncPMRsInt,
+				psRGXKickTA3DIN->ui32RenderTargetSize,
+				psRGXKickTA3DIN->ui32NumberOfDrawCalls,
+				psRGXKickTA3DIN->ui32NumberOfIndices,
+				psRGXKickTA3DIN->ui32NumberOfMRTs,
+				psRGXKickTA3DIN->ui64Deadline);
+
+ RGXKickTA3D_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psRenderContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hRenderContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+	}
+
+	if (hClientTAFenceSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3DIN->ui32ClientTAFenceCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hClientTAFenceSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hClientTAFenceSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+
+	if (hClientTAUpdateSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3DIN->ui32ClientTAUpdateCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hClientTAUpdateSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hClientTAUpdateSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+
+	if (hServerTASyncsInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3DIN->ui32ServerTASyncPrims; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hServerTASyncsInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hServerTASyncsInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+			}
+		}
+	}
+
+	if (hClient3DFenceSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3DIN->ui32Client3DFenceCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hClient3DFenceSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hClient3DFenceSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+
+	if (hClient3DUpdateSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3DIN->ui32Client3DUpdateCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hClient3DUpdateSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hClient3DUpdateSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+
+	if (hServer3DSyncsInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3DIN->ui32Server3DSyncPrims; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hServer3DSyncsInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hServer3DSyncsInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+			}
+		}
+	}
+
+	/* Unreference the previously looked up handle */
+	if (psPRFenceUFOSyncPrimBlockInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPRFenceUFOSyncPrimBlock,
+					    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+	}
+
+	if (psRGXKickTA3DIN->hRTDataCleanup)
+	{
+
+		/* Unreference the previously looked up handle */
+		if (psRTDataCleanupInt)
+		{
+			PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+						    hRTDataCleanup,
+						    PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP);
+		}
+	}
+
+	if (psRGXKickTA3DIN->hZBuffer)
+	{
+
+		/* Unreference the previously looked up handle */
+		if (psZBufferInt)
+		{
+			PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+						    hZBuffer,
+						    PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+		}
+	}
+
+	if (psRGXKickTA3DIN->hSBuffer)
+	{
+
+		/* Unreference the previously looked up handle */
+		if (psSBufferInt)
+		{
+			PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+						    hSBuffer,
+						    PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+		}
+	}
+
+	if (psRGXKickTA3DIN->hMSAAScratchBuffer)
+	{
+
+		/* Unreference the previously looked up handle */
+		if (psMSAAScratchBufferInt)
+		{
+			PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+						    hMSAAScratchBuffer,
+						    PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+		}
+	}
+
+	if (hSyncPMRsInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3DIN->ui32SyncPMRCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hSyncPMRsInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hSyncPMRsInt2[i],
+							    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+			}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeRGXKickTA3D NULL
+#endif
+
+static IMG_INT
+PVRSRVBridgeRGXSetRenderContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+					PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY
+					* psRGXSetRenderContextPriorityIN,
+					PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY
+					* psRGXSetRenderContextPriorityOUT,
+					CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hRenderContext =
+	    psRGXSetRenderContextPriorityIN->hRenderContext;
+	RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXSetRenderContextPriorityOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psRenderContextInt,
+				       hRenderContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely(psRGXSetRenderContextPriorityOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXSetRenderContextPriority_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXSetRenderContextPriorityOUT->eError =
+	    PVRSRVRGXSetRenderContextPriorityKM(psConnection,
+						OSGetDevData(psConnection),
+						psRenderContextInt,
+						psRGXSetRenderContextPriorityIN->
+						ui32Priority);
+
+ RGXSetRenderContextPriority_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psRenderContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hRenderContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXGetLastRenderContextResetReason(IMG_UINT32
+					       ui32DispatchTableEntry,
+					       PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON
+					       *
+					       psRGXGetLastRenderContextResetReasonIN,
+					       PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON
+					       *
+					       psRGXGetLastRenderContextResetReasonOUT,
+					       CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hRenderContext =
+	    psRGXGetLastRenderContextResetReasonIN->hRenderContext;
+	RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXGetLastRenderContextResetReasonOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psRenderContextInt,
+				       hRenderContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely
+	    (psRGXGetLastRenderContextResetReasonOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXGetLastRenderContextResetReason_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXGetLastRenderContextResetReasonOUT->eError =
+	    PVRSRVRGXGetLastRenderContextResetReasonKM(psRenderContextInt,
+						       &psRGXGetLastRenderContextResetReasonOUT->
+						       ui32LastResetReason,
+						       &psRGXGetLastRenderContextResetReasonOUT->
+						       ui32LastResetJobRef);
+
+ RGXGetLastRenderContextResetReason_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psRenderContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hRenderContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXGetPartialRenderCount(IMG_UINT32 ui32DispatchTableEntry,
+				     PVRSRV_BRIDGE_IN_RGXGETPARTIALRENDERCOUNT *
+				     psRGXGetPartialRenderCountIN,
+				     PVRSRV_BRIDGE_OUT_RGXGETPARTIALRENDERCOUNT
+				     * psRGXGetPartialRenderCountOUT,
+				     CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hHWRTDataMemDesc =
+	    psRGXGetPartialRenderCountIN->hHWRTDataMemDesc;
+	DEVMEM_MEMDESC *psHWRTDataMemDescInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXGetPartialRenderCountOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psHWRTDataMemDescInt,
+				       hHWRTDataMemDesc,
+				       PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+				       IMG_TRUE);
+	if (unlikely(psRGXGetPartialRenderCountOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXGetPartialRenderCount_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXGetPartialRenderCountOUT->eError =
+	    PVRSRVRGXGetPartialRenderCountKM(psHWRTDataMemDescInt,
+					     &psRGXGetPartialRenderCountOUT->
+					     ui32NumPartialRenders);
+
+ RGXGetPartialRenderCount_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psHWRTDataMemDescInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hHWRTDataMemDesc,
+					    PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXRenderContextStalled(IMG_UINT32 ui32DispatchTableEntry,
+				    PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *
+				    psRGXRenderContextStalledIN,
+				    PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *
+				    psRGXRenderContextStalledOUT,
+				    CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hRenderContext = psRGXRenderContextStalledIN->hRenderContext;
+	RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXRenderContextStalledOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psRenderContextInt,
+				       hRenderContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely(psRGXRenderContextStalledOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXRenderContextStalled_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXRenderContextStalledOUT->eError =
+	    RGXRenderContextStalledKM(psRenderContextInt);
+
+ RGXRenderContextStalled_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psRenderContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hRenderContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+#if !defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeRGXKickTA3D2(IMG_UINT32 ui32DispatchTableEntry,
+			 PVRSRV_BRIDGE_IN_RGXKICKTA3D2 * psRGXKickTA3D2IN,
+			 PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 * psRGXKickTA3D2OUT,
+			 CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hRenderContext = psRGXKickTA3D2IN->hRenderContext;
+	RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL;
+	SYNC_PRIMITIVE_BLOCK **psClientTAFenceSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClientTAFenceSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32ClientTAFenceSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32ClientTAFenceValueInt = NULL;
+	SYNC_PRIMITIVE_BLOCK **psClientTAUpdateSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClientTAUpdateSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32ClientTAUpdateSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32ClientTAUpdateValueInt = NULL;
+	SYNC_PRIMITIVE_BLOCK **psClient3DFenceSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClient3DFenceSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32Client3DFenceSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32Client3DFenceValueInt = NULL;
+	SYNC_PRIMITIVE_BLOCK **psClient3DUpdateSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClient3DUpdateSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32Client3DUpdateSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32Client3DUpdateValueInt = NULL;
+	IMG_HANDLE hPRFenceUFOSyncPrimBlock =
+	    psRGXKickTA3D2IN->hPRFenceUFOSyncPrimBlock;
+	SYNC_PRIMITIVE_BLOCK *psPRFenceUFOSyncPrimBlockInt = NULL;
+	IMG_CHAR *uiUpdateFenceNameInt = NULL;
+	IMG_CHAR *uiUpdateFenceName3DInt = NULL;
+	IMG_BYTE *psTACmdInt = NULL;
+	IMG_BYTE *ps3DPRCmdInt = NULL;
+	IMG_BYTE *ps3DCmdInt = NULL;
+	IMG_HANDLE hRTDataCleanup = psRGXKickTA3D2IN->hRTDataCleanup;
+	RGX_RTDATA_CLEANUP_DATA *psRTDataCleanupInt = NULL;
+	IMG_HANDLE hZBuffer = psRGXKickTA3D2IN->hZBuffer;
+	RGX_ZSBUFFER_DATA *psZBufferInt = NULL;
+	IMG_HANDLE hSBuffer = psRGXKickTA3D2IN->hSBuffer;
+	RGX_ZSBUFFER_DATA *psSBufferInt = NULL;
+	IMG_HANDLE hMSAAScratchBuffer = psRGXKickTA3D2IN->hMSAAScratchBuffer;
+	RGX_ZSBUFFER_DATA *psMSAAScratchBufferInt = NULL;
+	IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+	PMR **psSyncPMRsInt = NULL;
+	IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psRGXKickTA3D2IN->ui32ClientTAFenceCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) +
+	    (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickTA3D2IN->ui32ClientTAUpdateCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) +
+	    (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickTA3D2IN->ui32Client3DFenceCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXKickTA3D2IN->ui32Client3DFenceCount * sizeof(IMG_HANDLE)) +
+	    (psRGXKickTA3D2IN->ui32Client3DFenceCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickTA3D2IN->ui32Client3DFenceCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickTA3D2IN->ui32Client3DUpdateCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) +
+	    (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) +
+	    (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+	    (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+	    (psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) +
+	    (psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) +
+	    (psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) +
+	    (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+	    (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *)) +
+	    (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0;
+
+	if (unlikely
+	    (psRGXKickTA3D2IN->ui32ClientTAFenceCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickTA3D2OUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickTA3D2_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickTA3D2IN->ui32ClientTAUpdateCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickTA3D2OUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickTA3D2_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickTA3D2IN->ui32Client3DFenceCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickTA3D2OUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickTA3D2_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickTA3D2IN->ui32Client3DUpdateCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickTA3D2OUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickTA3D2_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickTA3D2IN->ui32TACmdSize >
+	     RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+	{
+		psRGXKickTA3D2OUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickTA3D2_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickTA3D2IN->ui323DPRCmdSize >
+	     RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+	{
+		psRGXKickTA3D2OUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickTA3D2_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickTA3D2IN->ui323DCmdSize >
+	     RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+	{
+		psRGXKickTA3D2OUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickTA3D2_exit;
+	}
+
+	if (unlikely
+	    (psRGXKickTA3D2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXKickTA3D2OUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXKickTA3D2_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRGXKickTA3D2IN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *) psRGXKickTA3D2IN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRGXKickTA3D2OUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXKickTA3D2_exit;
+			}
+		}
+	}
+
+	if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0)
+	{
+		psClientTAFenceSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32ClientTAFenceCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClientTAFenceSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32ClientTAFenceCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hClientTAFenceSyncPrimBlockInt2,
+		     (const void __user *)psRGXKickTA3D2IN->
+		     phClientTAFenceSyncPrimBlock,
+		     psRGXKickTA3D2IN->ui32ClientTAFenceCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D2_exit;
+		}
+	}
+	if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0)
+	{
+		ui32ClientTAFenceSyncOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32ClientTAFenceCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientTAFenceSyncOffsetInt,
+		     (const void __user *)psRGXKickTA3D2IN->
+		     pui32ClientTAFenceSyncOffset,
+		     psRGXKickTA3D2IN->ui32ClientTAFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D2_exit;
+		}
+	}
+	if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0)
+	{
+		ui32ClientTAFenceValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32ClientTAFenceCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientTAFenceValueInt,
+		     (const void __user *)psRGXKickTA3D2IN->
+		     pui32ClientTAFenceValue,
+		     psRGXKickTA3D2IN->ui32ClientTAFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D2_exit;
+		}
+	}
+	if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0)
+	{
+		psClientTAUpdateSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32ClientTAUpdateCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClientTAUpdateSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32ClientTAUpdateCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hClientTAUpdateSyncPrimBlockInt2,
+		     (const void __user *)psRGXKickTA3D2IN->
+		     phClientTAUpdateSyncPrimBlock,
+		     psRGXKickTA3D2IN->ui32ClientTAUpdateCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D2_exit;
+		}
+	}
+	if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0)
+	{
+		ui32ClientTAUpdateSyncOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32ClientTAUpdateCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientTAUpdateSyncOffsetInt,
+		     (const void __user *)psRGXKickTA3D2IN->
+		     pui32ClientTAUpdateSyncOffset,
+		     psRGXKickTA3D2IN->ui32ClientTAUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D2_exit;
+		}
+	}
+	if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0)
+	{
+		ui32ClientTAUpdateValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32ClientTAUpdateCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientTAUpdateValueInt,
+		     (const void __user *)psRGXKickTA3D2IN->
+		     pui32ClientTAUpdateValue,
+		     psRGXKickTA3D2IN->ui32ClientTAUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D2_exit;
+		}
+	}
+	if (psRGXKickTA3D2IN->ui32Client3DFenceCount != 0)
+	{
+		psClient3DFenceSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32Client3DFenceCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClient3DFenceSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32Client3DFenceCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3D2IN->ui32Client3DFenceCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hClient3DFenceSyncPrimBlockInt2,
+		     (const void __user *)psRGXKickTA3D2IN->
+		     phClient3DFenceSyncPrimBlock,
+		     psRGXKickTA3D2IN->ui32Client3DFenceCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D2_exit;
+		}
+	}
+	if (psRGXKickTA3D2IN->ui32Client3DFenceCount != 0)
+	{
+		ui32Client3DFenceSyncOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32Client3DFenceCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3D2IN->ui32Client3DFenceCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32Client3DFenceSyncOffsetInt,
+		     (const void __user *)psRGXKickTA3D2IN->
+		     pui32Client3DFenceSyncOffset,
+		     psRGXKickTA3D2IN->ui32Client3DFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D2_exit;
+		}
+	}
+	if (psRGXKickTA3D2IN->ui32Client3DFenceCount != 0)
+	{
+		ui32Client3DFenceValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32Client3DFenceCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3D2IN->ui32Client3DFenceCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32Client3DFenceValueInt,
+		     (const void __user *)psRGXKickTA3D2IN->
+		     pui32Client3DFenceValue,
+		     psRGXKickTA3D2IN->ui32Client3DFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D2_exit;
+		}
+	}
+	if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0)
+	{
+		psClient3DUpdateSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32Client3DUpdateCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClient3DUpdateSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32Client3DUpdateCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hClient3DUpdateSyncPrimBlockInt2,
+		     (const void __user *)psRGXKickTA3D2IN->
+		     phClient3DUpdateSyncPrimBlock,
+		     psRGXKickTA3D2IN->ui32Client3DUpdateCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D2_exit;
+		}
+	}
+	if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0)
+	{
+		ui32Client3DUpdateSyncOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32Client3DUpdateCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32Client3DUpdateSyncOffsetInt,
+		     (const void __user *)psRGXKickTA3D2IN->
+		     pui32Client3DUpdateSyncOffset,
+		     psRGXKickTA3D2IN->ui32Client3DUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D2_exit;
+		}
+	}
+	if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0)
+	{
+		ui32Client3DUpdateValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32Client3DUpdateCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32Client3DUpdateValueInt,
+		     (const void __user *)psRGXKickTA3D2IN->
+		     pui32Client3DUpdateValue,
+		     psRGXKickTA3D2IN->ui32Client3DUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D2_exit;
+		}
+	}
+
+	{
+		uiUpdateFenceNameInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiUpdateFenceNameInt,
+		     (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName,
+		     PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D2_exit;
+		}
+		((IMG_CHAR *)
+		 uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH *
+					sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+
+	{
+		uiUpdateFenceName3DInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiUpdateFenceName3DInt,
+		     (const void __user *)psRGXKickTA3D2IN->
+		     puiUpdateFenceName3D,
+		     PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D2_exit;
+		}
+		((IMG_CHAR *)
+		 uiUpdateFenceName3DInt)[(PVRSRV_SYNC_NAME_LENGTH *
+					  sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+	if (psRGXKickTA3D2IN->ui32TACmdSize != 0)
+	{
+		psTACmdInt =
+		    (IMG_BYTE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, psTACmdInt,
+		     (const void __user *)psRGXKickTA3D2IN->psTACmd,
+		     psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) !=
+		    PVRSRV_OK)
+		{
+			psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D2_exit;
+		}
+	}
+	if (psRGXKickTA3D2IN->ui323DPRCmdSize != 0)
+	{
+		ps3DPRCmdInt =
+		    (IMG_BYTE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ps3DPRCmdInt,
+		     (const void __user *)psRGXKickTA3D2IN->ps3DPRCmd,
+		     psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) !=
+		    PVRSRV_OK)
+		{
+			psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D2_exit;
+		}
+	}
+	if (psRGXKickTA3D2IN->ui323DCmdSize != 0)
+	{
+		ps3DCmdInt =
+		    (IMG_BYTE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ps3DCmdInt,
+		     (const void __user *)psRGXKickTA3D2IN->ps3DCmd,
+		     psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) !=
+		    PVRSRV_OK)
+		{
+			psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D2_exit;
+		}
+	}
+	if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0)
+	{
+		ui32SyncPMRFlagsInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32SyncPMRFlagsInt,
+		     (const void __user *)psRGXKickTA3D2IN->pui32SyncPMRFlags,
+		     psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) !=
+		    PVRSRV_OK)
+		{
+			psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D2_exit;
+		}
+	}
+	if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0)
+	{
+		psSyncPMRsInt =
+		    (PMR **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+			      ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *);
+		hSyncPMRsInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hSyncPMRsInt2,
+		     (const void __user *)psRGXKickTA3D2IN->phSyncPMRs,
+		     psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) !=
+		    PVRSRV_OK)
+		{
+			psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXKickTA3D2_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXKickTA3D2OUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psRenderContextInt,
+				       hRenderContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXKickTA3D2_exit;
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickTA3D2OUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psClientTAFenceSyncPrimBlockInt
+						       [i],
+						       hClientTAFenceSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickTA3D2_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickTA3D2OUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psClientTAUpdateSyncPrimBlockInt
+						       [i],
+						       hClientTAUpdateSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickTA3D2_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DFenceCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickTA3D2OUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psClient3DFenceSyncPrimBlockInt
+						       [i],
+						       hClient3DFenceSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickTA3D2_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickTA3D2OUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psClient3DUpdateSyncPrimBlockInt
+						       [i],
+						       hClient3DUpdateSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickTA3D2_exit;
+			}
+		}
+	}
+
+	/* Look up the address from the handle */
+	psRGXKickTA3D2OUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPRFenceUFOSyncPrimBlockInt,
+				       hPRFenceUFOSyncPrimBlock,
+				       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+				       IMG_TRUE);
+	if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXKickTA3D2_exit;
+	}
+
+	if (psRGXKickTA3D2IN->hRTDataCleanup)
+	{
+		/* Look up the address from the handle */
+		psRGXKickTA3D2OUT->eError =
+		    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+					       (void **)&psRTDataCleanupInt,
+					       hRTDataCleanup,
+					       PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP,
+					       IMG_TRUE);
+		if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+		{
+			UnlockHandle(psConnection->psHandleBase);
+			goto RGXKickTA3D2_exit;
+		}
+	}
+
+	if (psRGXKickTA3D2IN->hZBuffer)
+	{
+		/* Look up the address from the handle */
+		psRGXKickTA3D2OUT->eError =
+		    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+					       (void **)&psZBufferInt,
+					       hZBuffer,
+					       PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+					       IMG_TRUE);
+		if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+		{
+			UnlockHandle(psConnection->psHandleBase);
+			goto RGXKickTA3D2_exit;
+		}
+	}
+
+	if (psRGXKickTA3D2IN->hSBuffer)
+	{
+		/* Look up the address from the handle */
+		psRGXKickTA3D2OUT->eError =
+		    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+					       (void **)&psSBufferInt,
+					       hSBuffer,
+					       PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+					       IMG_TRUE);
+		if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+		{
+			UnlockHandle(psConnection->psHandleBase);
+			goto RGXKickTA3D2_exit;
+		}
+	}
+
+	if (psRGXKickTA3D2IN->hMSAAScratchBuffer)
+	{
+		/* Look up the address from the handle */
+		psRGXKickTA3D2OUT->eError =
+		    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+					       (void **)&psMSAAScratchBufferInt,
+					       hMSAAScratchBuffer,
+					       PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+					       IMG_TRUE);
+		if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+		{
+			UnlockHandle(psConnection->psHandleBase);
+			goto RGXKickTA3D2_exit;
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXKickTA3D2OUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psSyncPMRsInt[i],
+						       hSyncPMRsInt2[i],
+						       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+						       IMG_TRUE);
+			if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXKickTA3D2_exit;
+			}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXKickTA3D2OUT->eError =
+	    PVRSRVRGXKickTA3D2KM(psRenderContextInt,
+				 psRGXKickTA3D2IN->ui32ClientCacheOpSeqNum,
+				 psRGXKickTA3D2IN->ui32ClientTAFenceCount,
+				 psClientTAFenceSyncPrimBlockInt,
+				 ui32ClientTAFenceSyncOffsetInt,
+				 ui32ClientTAFenceValueInt,
+				 psRGXKickTA3D2IN->ui32ClientTAUpdateCount,
+				 psClientTAUpdateSyncPrimBlockInt,
+				 ui32ClientTAUpdateSyncOffsetInt,
+				 ui32ClientTAUpdateValueInt,
+				 psRGXKickTA3D2IN->ui32Client3DFenceCount,
+				 psClient3DFenceSyncPrimBlockInt,
+				 ui32Client3DFenceSyncOffsetInt,
+				 ui32Client3DFenceValueInt,
+				 psRGXKickTA3D2IN->ui32Client3DUpdateCount,
+				 psClient3DUpdateSyncPrimBlockInt,
+				 ui32Client3DUpdateSyncOffsetInt,
+				 ui32Client3DUpdateValueInt,
+				 psPRFenceUFOSyncPrimBlockInt,
+				 psRGXKickTA3D2IN->ui32FRFenceUFOSyncOffset,
+				 psRGXKickTA3D2IN->ui32FRFenceValue,
+				 psRGXKickTA3D2IN->hCheckFence,
+				 psRGXKickTA3D2IN->hUpdateTimeline,
+				 &psRGXKickTA3D2OUT->hUpdateFence,
+				 uiUpdateFenceNameInt,
+				 psRGXKickTA3D2IN->hCheckFence3D,
+				 psRGXKickTA3D2IN->hUpdateTimeline3D,
+				 &psRGXKickTA3D2OUT->hUpdateFence3D,
+				 uiUpdateFenceName3DInt,
+				 psRGXKickTA3D2IN->ui32TACmdSize,
+				 psTACmdInt,
+				 psRGXKickTA3D2IN->ui323DPRCmdSize,
+				 ps3DPRCmdInt,
+				 psRGXKickTA3D2IN->ui323DCmdSize,
+				 ps3DCmdInt,
+				 psRGXKickTA3D2IN->ui32ExtJobRef,
+				 psRGXKickTA3D2IN->bbLastTAInScene,
+				 psRGXKickTA3D2IN->bbKickTA,
+				 psRGXKickTA3D2IN->bbKickPR,
+				 psRGXKickTA3D2IN->bbKick3D,
+				 psRGXKickTA3D2IN->bbAbort,
+				 psRGXKickTA3D2IN->ui32PDumpFlags,
+				 psRTDataCleanupInt,
+				 psZBufferInt,
+				 psSBufferInt,
+				 psMSAAScratchBufferInt,
+				 psRGXKickTA3D2IN->ui32SyncPMRCount,
+				 ui32SyncPMRFlagsInt,
+				 psSyncPMRsInt,
+				 psRGXKickTA3D2IN->ui32RenderTargetSize,
+				 psRGXKickTA3D2IN->ui32NumberOfDrawCalls,
+				 psRGXKickTA3D2IN->ui32NumberOfIndices,
+				 psRGXKickTA3D2IN->ui32NumberOfMRTs,
+				 psRGXKickTA3D2IN->ui64Deadline);
+
+ RGXKickTA3D2_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psRenderContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hRenderContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+	}
+
+	if (hClientTAFenceSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hClientTAFenceSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hClientTAFenceSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+
+	if (hClientTAUpdateSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hClientTAUpdateSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hClientTAUpdateSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+
+	if (hClient3DFenceSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DFenceCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hClient3DFenceSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hClient3DFenceSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+
+	if (hClient3DUpdateSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hClient3DUpdateSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hClient3DUpdateSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+
+	/* Unreference the previously looked up handle */
+	if (psPRFenceUFOSyncPrimBlockInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPRFenceUFOSyncPrimBlock,
+					    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+	}
+
+	if (psRGXKickTA3D2IN->hRTDataCleanup)
+	{
+
+		/* Unreference the previously looked up handle */
+		if (psRTDataCleanupInt)
+		{
+			PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+						    hRTDataCleanup,
+						    PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP);
+		}
+	}
+
+	if (psRGXKickTA3D2IN->hZBuffer)
+	{
+
+		/* Unreference the previously looked up handle */
+		if (psZBufferInt)
+		{
+			PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+						    hZBuffer,
+						    PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+		}
+	}
+
+	if (psRGXKickTA3D2IN->hSBuffer)
+	{
+
+		/* Unreference the previously looked up handle */
+		if (psSBufferInt)
+		{
+			PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+						    hSBuffer,
+						    PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+		}
+	}
+
+	if (psRGXKickTA3D2IN->hMSAAScratchBuffer)
+	{
+
+		/* Unreference the previously looked up handle */
+		if (psMSAAScratchBufferInt)
+		{
+			PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+						    hMSAAScratchBuffer,
+						    PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+		}
+	}
+
+	if (hSyncPMRsInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hSyncPMRsInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hSyncPMRsInt2[i],
+							    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+			}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeRGXKickTA3D2 NULL
+#endif
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXTA3DBridge(void);
+PVRSRV_ERROR DeinitRGXTA3DBridge(void);
+
+/*
+ * Register all RGXTA3D functions with services
+ */
+PVRSRV_ERROR InitRGXTA3DBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+			      PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATA,
+			      PVRSRVBridgeRGXCreateHWRTData, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+			      PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATA,
+			      PVRSRVBridgeRGXDestroyHWRTData, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+			      PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERTARGET,
+			      PVRSRVBridgeRGXCreateRenderTarget, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+			      PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERTARGET,
+			      PVRSRVBridgeRGXDestroyRenderTarget, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+			      PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER,
+			      PVRSRVBridgeRGXCreateZSBuffer, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+			      PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER,
+			      PVRSRVBridgeRGXDestroyZSBuffer, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+			      PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER,
+			      PVRSRVBridgeRGXPopulateZSBuffer, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+			      PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER,
+			      PVRSRVBridgeRGXUnpopulateZSBuffer, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+			      PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST,
+			      PVRSRVBridgeRGXCreateFreeList, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+			      PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST,
+			      PVRSRVBridgeRGXDestroyFreeList, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+			      PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT,
+			      PVRSRVBridgeRGXCreateRenderContext, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+			      PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT,
+			      PVRSRVBridgeRGXDestroyRenderContext, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+			      PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D,
+			      PVRSRVBridgeRGXKickTA3D, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+			      PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY,
+			      PVRSRVBridgeRGXSetRenderContextPriority, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+			      PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON,
+			      PVRSRVBridgeRGXGetLastRenderContextResetReason,
+			      NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+			      PVRSRV_BRIDGE_RGXTA3D_RGXGETPARTIALRENDERCOUNT,
+			      PVRSRVBridgeRGXGetPartialRenderCount, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+			      PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED,
+			      PVRSRVBridgeRGXRenderContextStalled, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+			      PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2,
+			      PVRSRVBridgeRGXKickTA3D2, NULL, bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxta3d functions with services
+ */
+PVRSRV_ERROR DeinitRGXTA3DBridge(void)
+{
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+				PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATA);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+				PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATA);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+				PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERTARGET);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+				PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERTARGET);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+				PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+				PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+				PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+				PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+				PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+				PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+				PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+				PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+				PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+				PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+				PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+				PVRSRV_BRIDGE_RGXTA3D_RGXGETPARTIALRENDERCOUNT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+				PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+				PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxtq2_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxtq2_bridge.c
new file mode 100644
index 0000000..b1f21fb
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxtq2_bridge.c
@@ -0,0 +1,1942 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxtq2
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxtq2
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxtdmtransfer.h"
+
+#include "common_rgxtq2_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "rgx_bvnc_defs_km.h"
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+					PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT
+					* psRGXTDMCreateTransferContextIN,
+					PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT
+					* psRGXTDMCreateTransferContextOUT,
+					CONNECTION_DATA * psConnection)
+{
+	IMG_BYTE *psFrameworkCmdInt = NULL;
+	IMG_HANDLE hPrivData = psRGXTDMCreateTransferContextIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+	RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize *
+	     sizeof(IMG_BYTE)) + 0;
+
+	if (unlikely
+	    (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize >
+	     RGXFWIF_RF_CMD_SIZE))
+	{
+		psRGXTDMCreateTransferContextOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXTDMCreateTransferContext_exit;
+	}
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+		    !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+							 RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+		{
+			psRGXTDMCreateTransferContextOUT->eError =
+			    PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXTDMCreateTransferContext_exit;
+		}
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRGXTDMCreateTransferContextIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psRGXTDMCreateTransferContextIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRGXTDMCreateTransferContextOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXTDMCreateTransferContext_exit;
+			}
+		}
+	}
+
+	if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize != 0)
+	{
+		psFrameworkCmdInt =
+		    (IMG_BYTE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize *
+		    sizeof(IMG_BYTE);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize *
+	    sizeof(IMG_BYTE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, psFrameworkCmdInt,
+		     (const void __user *)psRGXTDMCreateTransferContextIN->
+		     psFrameworkCmd,
+		     psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize *
+		     sizeof(IMG_BYTE)) != PVRSRV_OK)
+		{
+			psRGXTDMCreateTransferContextOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMCreateTransferContext_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXTDMCreateTransferContextOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&hPrivDataInt,
+				       hPrivData,
+				       PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+				       IMG_TRUE);
+	if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXTDMCreateTransferContext_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXTDMCreateTransferContextOUT->eError =
+	    PVRSRVRGXTDMCreateTransferContextKM(psConnection,
+						OSGetDevData(psConnection),
+						psRGXTDMCreateTransferContextIN->
+						ui32Priority,
+						psRGXTDMCreateTransferContextIN->
+						ui32FrameworkCmdize,
+						psFrameworkCmdInt, hPrivDataInt,
+						psRGXTDMCreateTransferContextIN->
+						ui32PackedCCBSizeU88,
+						&psTransferContextInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK))
+	{
+		goto RGXTDMCreateTransferContext_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXTDMCreateTransferContextOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psRGXTDMCreateTransferContextOUT->
+				      hTransferContext,
+				      (void *)psTransferContextInt,
+				      PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      PVRSRVRGXTDMDestroyTransferContextKM);
+	if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXTDMCreateTransferContext_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXTDMCreateTransferContext_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (hPrivDataInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPrivData,
+					    PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)
+	{
+		if (psTransferContextInt)
+		{
+			PVRSRVRGXTDMDestroyTransferContextKM
+			    (psTransferContextInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXTDMDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+					 PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT
+					 * psRGXTDMDestroyTransferContextIN,
+					 PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT
+					 * psRGXTDMDestroyTransferContextOUT,
+					 CONNECTION_DATA * psConnection)
+{
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+		    !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+							 RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+		{
+			psRGXTDMDestroyTransferContextOUT->eError =
+			    PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXTDMDestroyTransferContext_exit;
+		}
+	}
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXTDMDestroyTransferContextOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE)
+					psRGXTDMDestroyTransferContextIN->
+					hTransferContext,
+					PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+	if (unlikely
+	    ((psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_OK)
+	     && (psRGXTDMDestroyTransferContextOUT->eError !=
+		 PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeRGXTDMDestroyTransferContext: %s",
+			 PVRSRVGetErrorString
+			 (psRGXTDMDestroyTransferContextOUT->eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXTDMDestroyTransferContext_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXTDMDestroyTransferContext_exit:
+
+	return 0;
+}
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeRGXTDMSubmitTransfer(IMG_UINT32 ui32DispatchTableEntry,
+				 PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER *
+				 psRGXTDMSubmitTransferIN,
+				 PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER *
+				 psRGXTDMSubmitTransferOUT,
+				 CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hTransferContext =
+	    psRGXTDMSubmitTransferIN->hTransferContext;
+	RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL;
+	SYNC_PRIMITIVE_BLOCK **psFenceUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hFenceUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32FenceSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32FenceValueInt = NULL;
+	SYNC_PRIMITIVE_BLOCK **psUpdateUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hUpdateUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32UpdateSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32UpdateValueInt = NULL;
+	IMG_UINT32 *ui32ServerSyncFlagsInt = NULL;
+	SERVER_SYNC_PRIMITIVE **psServerSyncInt = NULL;
+	IMG_HANDLE *hServerSyncInt2 = NULL;
+	IMG_CHAR *uiUpdateFenceNameInt = NULL;
+	IMG_UINT8 *ui8FWCommandInt = NULL;
+	IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+	PMR **psSyncPMRsInt = NULL;
+	IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psRGXTDMSubmitTransferIN->ui32ClientFenceCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXTDMSubmitTransferIN->ui32ClientFenceCount *
+	     sizeof(IMG_HANDLE)) +
+	    (psRGXTDMSubmitTransferIN->ui32ClientFenceCount *
+	     sizeof(IMG_UINT32)) +
+	    (psRGXTDMSubmitTransferIN->ui32ClientFenceCount *
+	     sizeof(IMG_UINT32)) +
+	    (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount *
+	     sizeof(IMG_HANDLE)) +
+	    (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount *
+	     sizeof(IMG_UINT32)) +
+	    (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount *
+	     sizeof(IMG_UINT32)) +
+	    (psRGXTDMSubmitTransferIN->ui32ServerSyncCount *
+	     sizeof(IMG_UINT32)) +
+	    (psRGXTDMSubmitTransferIN->ui32ServerSyncCount *
+	     sizeof(SERVER_SYNC_PRIMITIVE *)) +
+	    (psRGXTDMSubmitTransferIN->ui32ServerSyncCount *
+	     sizeof(IMG_HANDLE)) +
+	    (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+	    (psRGXTDMSubmitTransferIN->ui32CommandSize * sizeof(IMG_UINT8)) +
+	    (psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+	    (psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(PMR *)) +
+	    (psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) +
+	    0;
+
+	if (unlikely
+	    (psRGXTDMSubmitTransferIN->ui32ClientFenceCount >
+	     PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXTDMSubmitTransferOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXTDMSubmitTransfer_exit;
+	}
+
+	if (unlikely
+	    (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount >
+	     PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXTDMSubmitTransferOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXTDMSubmitTransfer_exit;
+	}
+
+	if (unlikely
+	    (psRGXTDMSubmitTransferIN->ui32ServerSyncCount >
+	     PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXTDMSubmitTransferOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXTDMSubmitTransfer_exit;
+	}
+
+	if (unlikely
+	    (psRGXTDMSubmitTransferIN->ui32CommandSize >
+	     RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+	{
+		psRGXTDMSubmitTransferOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXTDMSubmitTransfer_exit;
+	}
+
+	if (unlikely
+	    (psRGXTDMSubmitTransferIN->ui32SyncPMRCount >
+	     PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXTDMSubmitTransferOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXTDMSubmitTransfer_exit;
+	}
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+		    !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+							 RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+		{
+			psRGXTDMSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXTDMSubmitTransfer_exit;
+		}
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRGXTDMSubmitTransferIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psRGXTDMSubmitTransferIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRGXTDMSubmitTransferOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXTDMSubmitTransfer_exit;
+			}
+		}
+	}
+
+	if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount != 0)
+	{
+		psFenceUFOSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransferIN->ui32ClientFenceCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hFenceUFOSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransferIN->ui32ClientFenceCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount *
+	    sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hFenceUFOSyncPrimBlockInt2,
+		     (const void __user *)psRGXTDMSubmitTransferIN->
+		     phFenceUFOSyncPrimBlock,
+		     psRGXTDMSubmitTransferIN->ui32ClientFenceCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer_exit;
+		}
+	}
+	if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount != 0)
+	{
+		ui32FenceSyncOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransferIN->ui32ClientFenceCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount *
+	    sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32FenceSyncOffsetInt,
+		     (const void __user *)psRGXTDMSubmitTransferIN->
+		     pui32FenceSyncOffset,
+		     psRGXTDMSubmitTransferIN->ui32ClientFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer_exit;
+		}
+	}
+	if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount != 0)
+	{
+		ui32FenceValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransferIN->ui32ClientFenceCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount *
+	    sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32FenceValueInt,
+		     (const void __user *)psRGXTDMSubmitTransferIN->
+		     pui32FenceValue,
+		     psRGXTDMSubmitTransferIN->ui32ClientFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer_exit;
+		}
+	}
+	if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount != 0)
+	{
+		psUpdateUFOSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransferIN->ui32ClientUpdateCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hUpdateUFOSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransferIN->ui32ClientUpdateCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount *
+	    sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hUpdateUFOSyncPrimBlockInt2,
+		     (const void __user *)psRGXTDMSubmitTransferIN->
+		     phUpdateUFOSyncPrimBlock,
+		     psRGXTDMSubmitTransferIN->ui32ClientUpdateCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer_exit;
+		}
+	}
+	if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount != 0)
+	{
+		ui32UpdateSyncOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransferIN->ui32ClientUpdateCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount *
+	    sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32UpdateSyncOffsetInt,
+		     (const void __user *)psRGXTDMSubmitTransferIN->
+		     pui32UpdateSyncOffset,
+		     psRGXTDMSubmitTransferIN->ui32ClientUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer_exit;
+		}
+	}
+	if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount != 0)
+	{
+		ui32UpdateValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransferIN->ui32ClientUpdateCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount *
+	    sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32UpdateValueInt,
+		     (const void __user *)psRGXTDMSubmitTransferIN->
+		     pui32UpdateValue,
+		     psRGXTDMSubmitTransferIN->ui32ClientUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer_exit;
+		}
+	}
+	if (psRGXTDMSubmitTransferIN->ui32ServerSyncCount != 0)
+	{
+		ui32ServerSyncFlagsInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransferIN->ui32ServerSyncCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_UINT32) >
+	    0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ServerSyncFlagsInt,
+		     (const void __user *)psRGXTDMSubmitTransferIN->
+		     pui32ServerSyncFlags,
+		     psRGXTDMSubmitTransferIN->ui32ServerSyncCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer_exit;
+		}
+	}
+	if (psRGXTDMSubmitTransferIN->ui32ServerSyncCount != 0)
+	{
+		psServerSyncInt =
+		    (SERVER_SYNC_PRIMITIVE **) (((IMG_UINT8 *) pArrayArgsBuffer)
+						+ ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransferIN->ui32ServerSyncCount *
+		    sizeof(SERVER_SYNC_PRIMITIVE *);
+		hServerSyncInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransferIN->ui32ServerSyncCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) >
+	    0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hServerSyncInt2,
+		     (const void __user *)psRGXTDMSubmitTransferIN->
+		     phServerSync,
+		     psRGXTDMSubmitTransferIN->ui32ServerSyncCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer_exit;
+		}
+	}
+
+	{
+		uiUpdateFenceNameInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiUpdateFenceNameInt,
+		     (const void __user *)psRGXTDMSubmitTransferIN->
+		     puiUpdateFenceName,
+		     PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer_exit;
+		}
+		((IMG_CHAR *)
+		 uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH *
+					sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+	if (psRGXTDMSubmitTransferIN->ui32CommandSize != 0)
+	{
+		ui8FWCommandInt =
+		    (IMG_UINT8 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				   ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransferIN->ui32CommandSize *
+		    sizeof(IMG_UINT8);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransferIN->ui32CommandSize * sizeof(IMG_UINT8) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui8FWCommandInt,
+		     (const void __user *)psRGXTDMSubmitTransferIN->
+		     pui8FWCommand,
+		     psRGXTDMSubmitTransferIN->ui32CommandSize *
+		     sizeof(IMG_UINT8)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer_exit;
+		}
+	}
+	if (psRGXTDMSubmitTransferIN->ui32SyncPMRCount != 0)
+	{
+		ui32SyncPMRFlagsInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransferIN->ui32SyncPMRCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32SyncPMRFlagsInt,
+		     (const void __user *)psRGXTDMSubmitTransferIN->
+		     pui32SyncPMRFlags,
+		     psRGXTDMSubmitTransferIN->ui32SyncPMRCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer_exit;
+		}
+	}
+	if (psRGXTDMSubmitTransferIN->ui32SyncPMRCount != 0)
+	{
+		psSyncPMRsInt =
+		    (PMR **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+			      ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(PMR *);
+		hSyncPMRsInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransferIN->ui32SyncPMRCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hSyncPMRsInt2,
+		     (const void __user *)psRGXTDMSubmitTransferIN->phSyncPMRs,
+		     psRGXTDMSubmitTransferIN->ui32SyncPMRCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXTDMSubmitTransferOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psTransferContextInt,
+				       hTransferContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely(psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXTDMSubmitTransfer_exit;
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXTDMSubmitTransferIN->ui32ClientFenceCount;
+		     i++)
+		{
+			/* Look up the address from the handle */
+			psRGXTDMSubmitTransferOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psFenceUFOSyncPrimBlockInt
+						       [i],
+						       hFenceUFOSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely
+			    (psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXTDMSubmitTransfer_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXTDMSubmitTransferIN->ui32ClientUpdateCount;
+		     i++)
+		{
+			/* Look up the address from the handle */
+			psRGXTDMSubmitTransferOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psUpdateUFOSyncPrimBlockInt
+						       [i],
+						       hUpdateUFOSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely
+			    (psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXTDMSubmitTransfer_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXTDMSubmitTransferIN->ui32ServerSyncCount;
+		     i++)
+		{
+			/* Look up the address from the handle */
+			psRGXTDMSubmitTransferOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psServerSyncInt[i],
+						       hServerSyncInt2[i],
+						       PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+						       IMG_TRUE);
+			if (unlikely
+			    (psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXTDMSubmitTransfer_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXTDMSubmitTransferIN->ui32SyncPMRCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXTDMSubmitTransferOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psSyncPMRsInt[i],
+						       hSyncPMRsInt2[i],
+						       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+						       IMG_TRUE);
+			if (unlikely
+			    (psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXTDMSubmitTransfer_exit;
+			}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXTDMSubmitTransferOUT->eError =
+	    PVRSRVRGXTDMSubmitTransferKM(psTransferContextInt,
+					 psRGXTDMSubmitTransferIN->
+					 ui32PDumpFlags,
+					 psRGXTDMSubmitTransferIN->
+					 ui32ClientCacheOpSeqNum,
+					 psRGXTDMSubmitTransferIN->
+					 ui32ClientFenceCount,
+					 psFenceUFOSyncPrimBlockInt,
+					 ui32FenceSyncOffsetInt,
+					 ui32FenceValueInt,
+					 psRGXTDMSubmitTransferIN->
+					 ui32ClientUpdateCount,
+					 psUpdateUFOSyncPrimBlockInt,
+					 ui32UpdateSyncOffsetInt,
+					 ui32UpdateValueInt,
+					 psRGXTDMSubmitTransferIN->
+					 ui32ServerSyncCount,
+					 ui32ServerSyncFlagsInt,
+					 psServerSyncInt,
+					 psRGXTDMSubmitTransferIN->
+					 hCheckFenceFD,
+					 psRGXTDMSubmitTransferIN->
+					 hUpdateTimeline,
+					 &psRGXTDMSubmitTransferOUT->
+					 hUpdateFence, uiUpdateFenceNameInt,
+					 psRGXTDMSubmitTransferIN->
+					 ui32CommandSize, ui8FWCommandInt,
+					 psRGXTDMSubmitTransferIN->
+					 ui32ExternalJobReference,
+					 psRGXTDMSubmitTransferIN->
+					 ui32SyncPMRCount, ui32SyncPMRFlagsInt,
+					 psSyncPMRsInt);
+
+ RGXTDMSubmitTransfer_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psTransferContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hTransferContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+	}
+
+	if (hFenceUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXTDMSubmitTransferIN->ui32ClientFenceCount;
+		     i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hFenceUFOSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hFenceUFOSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+
+	if (hUpdateUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXTDMSubmitTransferIN->ui32ClientUpdateCount;
+		     i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hUpdateUFOSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hUpdateUFOSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+
+	if (hServerSyncInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXTDMSubmitTransferIN->ui32ServerSyncCount;
+		     i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hServerSyncInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hServerSyncInt2[i],
+							    PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+			}
+		}
+	}
+
+	if (hSyncPMRsInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXTDMSubmitTransferIN->ui32SyncPMRCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hSyncPMRsInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hSyncPMRsInt2[i],
+							    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+			}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeRGXTDMSubmitTransfer NULL
+#endif
+
+static IMG_INT
+PVRSRVBridgeRGXTDMSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+					     PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY
+					     *
+					     psRGXTDMSetTransferContextPriorityIN,
+					     PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY
+					     *
+					     psRGXTDMSetTransferContextPriorityOUT,
+					     CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hTransferContext =
+	    psRGXTDMSetTransferContextPriorityIN->hTransferContext;
+	RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL;
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+		    !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+							 RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+		{
+			psRGXTDMSetTransferContextPriorityOUT->eError =
+			    PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXTDMSetTransferContextPriority_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXTDMSetTransferContextPriorityOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psTransferContextInt,
+				       hTransferContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely
+	    (psRGXTDMSetTransferContextPriorityOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXTDMSetTransferContextPriority_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXTDMSetTransferContextPriorityOUT->eError =
+	    PVRSRVRGXTDMSetTransferContextPriorityKM(psConnection,
+						     OSGetDevData(psConnection),
+						     psTransferContextInt,
+						     psRGXTDMSetTransferContextPriorityIN->
+						     ui32Priority);
+
+ RGXTDMSetTransferContextPriority_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psTransferContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hTransferContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE
+					  * psRGXTDMNotifyWriteOffsetUpdateIN,
+					  PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE
+					  * psRGXTDMNotifyWriteOffsetUpdateOUT,
+					  CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hTransferContext =
+	    psRGXTDMNotifyWriteOffsetUpdateIN->hTransferContext;
+	RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL;
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+		    !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+							 RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+		{
+			psRGXTDMNotifyWriteOffsetUpdateOUT->eError =
+			    PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXTDMNotifyWriteOffsetUpdate_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXTDMNotifyWriteOffsetUpdateOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psTransferContextInt,
+				       hTransferContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely(psRGXTDMNotifyWriteOffsetUpdateOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXTDMNotifyWriteOffsetUpdate_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXTDMNotifyWriteOffsetUpdateOUT->eError =
+	    PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(psTransferContextInt,
+						  psRGXTDMNotifyWriteOffsetUpdateIN->
+						  ui32PDumpFlags);
+
+ RGXTDMNotifyWriteOffsetUpdate_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psTransferContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hTransferContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+#if !defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeRGXTDMSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry,
+				  PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2 *
+				  psRGXTDMSubmitTransfer2IN,
+				  PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2 *
+				  psRGXTDMSubmitTransfer2OUT,
+				  CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hTransferContext =
+	    psRGXTDMSubmitTransfer2IN->hTransferContext;
+	RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL;
+	SYNC_PRIMITIVE_BLOCK **psFenceUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hFenceUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32FenceSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32FenceValueInt = NULL;
+	SYNC_PRIMITIVE_BLOCK **psUpdateUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hUpdateUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32UpdateSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32UpdateValueInt = NULL;
+	IMG_CHAR *uiUpdateFenceNameInt = NULL;
+	IMG_UINT8 *ui8FWCommandInt = NULL;
+	IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+	PMR **psSyncPMRsInt = NULL;
+	IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount *
+	     sizeof(IMG_HANDLE)) +
+	    (psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount *
+	     sizeof(IMG_UINT32)) +
+	    (psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount *
+	     sizeof(IMG_UINT32)) +
+	    (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+	     sizeof(IMG_HANDLE)) +
+	    (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+	     sizeof(IMG_UINT32)) +
+	    (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+	     sizeof(IMG_UINT32)) +
+	    (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+	    (psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8)) +
+	    (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+	    (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *)) +
+	    (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) +
+	    0;
+
+	if (unlikely
+	    (psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount >
+	     PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXTDMSubmitTransfer2OUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXTDMSubmitTransfer2_exit;
+	}
+
+	if (unlikely
+	    (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount >
+	     PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXTDMSubmitTransfer2OUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXTDMSubmitTransfer2_exit;
+	}
+
+	if (unlikely
+	    (psRGXTDMSubmitTransfer2IN->ui32CommandSize >
+	     RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+	{
+		psRGXTDMSubmitTransfer2OUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXTDMSubmitTransfer2_exit;
+	}
+
+	if (unlikely
+	    (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount >
+	     PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXTDMSubmitTransfer2OUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXTDMSubmitTransfer2_exit;
+	}
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+		    !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+							 RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+		{
+			psRGXTDMSubmitTransfer2OUT->eError =
+			    PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXTDMSubmitTransfer2_exit;
+		}
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRGXTDMSubmitTransfer2IN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psRGXTDMSubmitTransfer2IN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRGXTDMSubmitTransfer2OUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXTDMSubmitTransfer2_exit;
+			}
+		}
+	}
+
+	if (psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount != 0)
+	{
+		psFenceUFOSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hFenceUFOSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount *
+	    sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hFenceUFOSyncPrimBlockInt2,
+		     (const void __user *)psRGXTDMSubmitTransfer2IN->
+		     phFenceUFOSyncPrimBlock,
+		     psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransfer2OUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer2_exit;
+		}
+	}
+	if (psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount != 0)
+	{
+		ui32FenceSyncOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount *
+	    sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32FenceSyncOffsetInt,
+		     (const void __user *)psRGXTDMSubmitTransfer2IN->
+		     pui32FenceSyncOffset,
+		     psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransfer2OUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer2_exit;
+		}
+	}
+	if (psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount != 0)
+	{
+		ui32FenceValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount *
+	    sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32FenceValueInt,
+		     (const void __user *)psRGXTDMSubmitTransfer2IN->
+		     pui32FenceValue,
+		     psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransfer2OUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer2_exit;
+		}
+	}
+	if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0)
+	{
+		psUpdateUFOSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hUpdateUFOSyncPrimBlockInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+	    sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hUpdateUFOSyncPrimBlockInt2,
+		     (const void __user *)psRGXTDMSubmitTransfer2IN->
+		     phUpdateUFOSyncPrimBlock,
+		     psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransfer2OUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer2_exit;
+		}
+	}
+	if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0)
+	{
+		ui32UpdateSyncOffsetInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+	    sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32UpdateSyncOffsetInt,
+		     (const void __user *)psRGXTDMSubmitTransfer2IN->
+		     pui32UpdateSyncOffset,
+		     psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransfer2OUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer2_exit;
+		}
+	}
+	if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0)
+	{
+		ui32UpdateValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+	    sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32UpdateValueInt,
+		     (const void __user *)psRGXTDMSubmitTransfer2IN->
+		     pui32UpdateValue,
+		     psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransfer2OUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer2_exit;
+		}
+	}
+
+	{
+		uiUpdateFenceNameInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiUpdateFenceNameInt,
+		     (const void __user *)psRGXTDMSubmitTransfer2IN->
+		     puiUpdateFenceName,
+		     PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransfer2OUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer2_exit;
+		}
+		((IMG_CHAR *)
+		 uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH *
+					sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+	if (psRGXTDMSubmitTransfer2IN->ui32CommandSize != 0)
+	{
+		ui8FWCommandInt =
+		    (IMG_UINT8 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				   ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransfer2IN->ui32CommandSize *
+		    sizeof(IMG_UINT8);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui8FWCommandInt,
+		     (const void __user *)psRGXTDMSubmitTransfer2IN->
+		     pui8FWCommand,
+		     psRGXTDMSubmitTransfer2IN->ui32CommandSize *
+		     sizeof(IMG_UINT8)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransfer2OUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer2_exit;
+		}
+	}
+	if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount != 0)
+	{
+		ui32SyncPMRFlagsInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) >
+	    0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32SyncPMRFlagsInt,
+		     (const void __user *)psRGXTDMSubmitTransfer2IN->
+		     pui32SyncPMRFlags,
+		     psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransfer2OUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer2_exit;
+		}
+	}
+	if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount != 0)
+	{
+		psSyncPMRsInt =
+		    (PMR **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+			      ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *);
+		hSyncPMRsInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) >
+	    0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hSyncPMRsInt2,
+		     (const void __user *)psRGXTDMSubmitTransfer2IN->phSyncPMRs,
+		     psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXTDMSubmitTransfer2OUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXTDMSubmitTransfer2_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXTDMSubmitTransfer2OUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psTransferContextInt,
+				       hTransferContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely(psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXTDMSubmitTransfer2_exit;
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount;
+		     i++)
+		{
+			/* Look up the address from the handle */
+			psRGXTDMSubmitTransfer2OUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psFenceUFOSyncPrimBlockInt
+						       [i],
+						       hFenceUFOSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely
+			    (psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXTDMSubmitTransfer2_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0;
+		     i < psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXTDMSubmitTransfer2OUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psUpdateUFOSyncPrimBlockInt
+						       [i],
+						       hUpdateUFOSyncPrimBlockInt2
+						       [i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely
+			    (psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXTDMSubmitTransfer2_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount;
+		     i++)
+		{
+			/* Look up the address from the handle */
+			psRGXTDMSubmitTransfer2OUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psSyncPMRsInt[i],
+						       hSyncPMRsInt2[i],
+						       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+						       IMG_TRUE);
+			if (unlikely
+			    (psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXTDMSubmitTransfer2_exit;
+			}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXTDMSubmitTransfer2OUT->eError =
+	    PVRSRVRGXTDMSubmitTransfer2KM(psTransferContextInt,
+					  psRGXTDMSubmitTransfer2IN->
+					  ui32PDumpFlags,
+					  psRGXTDMSubmitTransfer2IN->
+					  ui32ClientCacheOpSeqNum,
+					  psRGXTDMSubmitTransfer2IN->
+					  ui32ClientFenceCount,
+					  psFenceUFOSyncPrimBlockInt,
+					  ui32FenceSyncOffsetInt,
+					  ui32FenceValueInt,
+					  psRGXTDMSubmitTransfer2IN->
+					  ui32ClientUpdateCount,
+					  psUpdateUFOSyncPrimBlockInt,
+					  ui32UpdateSyncOffsetInt,
+					  ui32UpdateValueInt,
+					  psRGXTDMSubmitTransfer2IN->
+					  hCheckFenceFD,
+					  psRGXTDMSubmitTransfer2IN->
+					  hUpdateTimeline,
+					  &psRGXTDMSubmitTransfer2OUT->
+					  hUpdateFence, uiUpdateFenceNameInt,
+					  psRGXTDMSubmitTransfer2IN->
+					  ui32CommandSize, ui8FWCommandInt,
+					  psRGXTDMSubmitTransfer2IN->
+					  ui32ExternalJobReference,
+					  psRGXTDMSubmitTransfer2IN->
+					  ui32SyncPMRCount, ui32SyncPMRFlagsInt,
+					  psSyncPMRsInt);
+
+ RGXTDMSubmitTransfer2_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psTransferContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hTransferContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+	}
+
+	if (hFenceUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32ClientFenceCount;
+		     i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hFenceUFOSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hFenceUFOSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+
+	if (hUpdateUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0;
+		     i < psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hUpdateUFOSyncPrimBlockInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hUpdateUFOSyncPrimBlockInt2
+							    [i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+
+	if (hSyncPMRsInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount;
+		     i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hSyncPMRsInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hSyncPMRsInt2[i],
+							    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+			}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeRGXTDMSubmitTransfer2 NULL
+#endif
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXTQ2Bridge(void);
+PVRSRV_ERROR DeinitRGXTQ2Bridge(void);
+
+/*
+ * Register all RGXTQ2 functions with services
+ */
+PVRSRV_ERROR InitRGXTQ2Bridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+			      PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT,
+			      PVRSRVBridgeRGXTDMCreateTransferContext, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+			      PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT,
+			      PVRSRVBridgeRGXTDMDestroyTransferContext, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+			      PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER,
+			      PVRSRVBridgeRGXTDMSubmitTransfer, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+			      PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY,
+			      PVRSRVBridgeRGXTDMSetTransferContextPriority,
+			      NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+			      PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE,
+			      PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+			      PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2,
+			      PVRSRVBridgeRGXTDMSubmitTransfer2, NULL,
+			      bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxtq2 functions with services
+ */
+PVRSRV_ERROR DeinitRGXTQ2Bridge(void)
+{
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+				PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+				PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+				PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+				PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+				PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+				PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxtq_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxtq_bridge.c
new file mode 100644
index 0000000..69fda2a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_rgxtq_bridge.c
@@ -0,0 +1,2816 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxtq
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxtq
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxtransfer.h"
+#include "rgx_tq_shared.h"
+
+#include "common_rgxtq_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+				     PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT *
+				     psRGXCreateTransferContextIN,
+				     PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT
+				     * psRGXCreateTransferContextOUT,
+				     CONNECTION_DATA * psConnection)
+{
+	IMG_BYTE *psFrameworkCmdInt = NULL;
+	IMG_HANDLE hPrivData = psRGXCreateTransferContextIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+	RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psRGXCreateTransferContextIN->ui32FrameworkCmdize *
+	     sizeof(IMG_BYTE)) + 0;
+
+	if (unlikely
+	    (psRGXCreateTransferContextIN->ui32FrameworkCmdize >
+	     RGXFWIF_RF_CMD_SIZE))
+	{
+		psRGXCreateTransferContextOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXCreateTransferContext_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRGXCreateTransferContextIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psRGXCreateTransferContextIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRGXCreateTransferContextOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXCreateTransferContext_exit;
+			}
+		}
+	}
+
+	if (psRGXCreateTransferContextIN->ui32FrameworkCmdize != 0)
+	{
+		psFrameworkCmdInt =
+		    (IMG_BYTE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXCreateTransferContextIN->ui32FrameworkCmdize *
+		    sizeof(IMG_BYTE);
+	}
+
+	/* Copy the data over */
+	if (psRGXCreateTransferContextIN->ui32FrameworkCmdize *
+	    sizeof(IMG_BYTE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, psFrameworkCmdInt,
+		     (const void __user *)psRGXCreateTransferContextIN->
+		     psFrameworkCmd,
+		     psRGXCreateTransferContextIN->ui32FrameworkCmdize *
+		     sizeof(IMG_BYTE)) != PVRSRV_OK)
+		{
+			psRGXCreateTransferContextOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXCreateTransferContext_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXCreateTransferContextOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&hPrivDataInt,
+				       hPrivData,
+				       PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+				       IMG_TRUE);
+	if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXCreateTransferContext_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXCreateTransferContextOUT->eError =
+	    PVRSRVRGXCreateTransferContextKM(psConnection,
+					     OSGetDevData(psConnection),
+					     psRGXCreateTransferContextIN->
+					     ui32Priority,
+					     psRGXCreateTransferContextIN->
+					     ui32FrameworkCmdize,
+					     psFrameworkCmdInt, hPrivDataInt,
+					     psRGXCreateTransferContextIN->
+					     ui32PackedCCBSizeU8888,
+					     &psTransferContextInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK))
+	{
+		goto RGXCreateTransferContext_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXCreateTransferContextOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psRGXCreateTransferContextOUT->
+				      hTransferContext,
+				      (void *)psTransferContextInt,
+				      PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      PVRSRVRGXDestroyTransferContextKM);
+	if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXCreateTransferContext_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXCreateTransferContext_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (hPrivDataInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPrivData,
+					    PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
+	{
+		if (psTransferContextInt)
+		{
+			PVRSRVRGXDestroyTransferContextKM(psTransferContextInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+				      PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT
+				      * psRGXDestroyTransferContextIN,
+				      PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT
+				      * psRGXDestroyTransferContextOUT,
+				      CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRGXDestroyTransferContextOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE)
+					psRGXDestroyTransferContextIN->
+					hTransferContext,
+					PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+	if (unlikely
+	    ((psRGXDestroyTransferContextOUT->eError != PVRSRV_OK)
+	     && (psRGXDestroyTransferContextOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeRGXDestroyTransferContext: %s",
+			 PVRSRVGetErrorString(psRGXDestroyTransferContextOUT->
+					      eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXDestroyTransferContext_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RGXDestroyTransferContext_exit:
+
+	return 0;
+}
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeRGXSubmitTransfer(IMG_UINT32 ui32DispatchTableEntry,
+			      PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER *
+			      psRGXSubmitTransferIN,
+			      PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER *
+			      psRGXSubmitTransferOUT,
+			      CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hTransferContext = psRGXSubmitTransferIN->hTransferContext;
+	RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL;
+	IMG_UINT32 *ui32ClientFenceCountInt = NULL;
+	SYNC_PRIMITIVE_BLOCK ***psFenceUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE **hFenceUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 **ui32FenceSyncOffsetInt = NULL;
+	IMG_UINT32 **ui32FenceValueInt = NULL;
+	IMG_UINT32 *ui32ClientUpdateCountInt = NULL;
+	SYNC_PRIMITIVE_BLOCK ***psUpdateUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE **hUpdateUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 **ui32UpdateSyncOffsetInt = NULL;
+	IMG_UINT32 **ui32UpdateValueInt = NULL;
+	IMG_UINT32 *ui32ServerSyncCountInt = NULL;
+	IMG_UINT32 **ui32ServerSyncFlagsInt = NULL;
+	SERVER_SYNC_PRIMITIVE ***psServerSyncInt = NULL;
+	IMG_HANDLE **hServerSyncInt2 = NULL;
+	IMG_CHAR *uiUpdateFenceNameInt = NULL;
+	IMG_UINT32 *ui32CommandSizeInt = NULL;
+	IMG_UINT8 **ui8FWCommandInt = NULL;
+	IMG_UINT32 *ui32TQPrepareFlagsInt = NULL;
+	IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+	PMR **psSyncPMRsInt = NULL;
+	IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+	IMG_BYTE *pArrayArgsBuffer2 = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+	    (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+	    (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+	    (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+	    (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+	    (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+	    (psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+	    (psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(PMR *)) +
+	    (psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0;
+	IMG_UINT32 ui32BufferSize2 = 0;
+	IMG_UINT32 ui32NextOffset2 = 0;
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+
+		ui32BufferSize +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK **);
+		ui32BufferSize +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_HANDLE **);
+		ui32BufferSize +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_UINT32 *);
+		ui32BufferSize +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_UINT32 *);
+		ui32BufferSize +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK **);
+		ui32BufferSize +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_HANDLE **);
+		ui32BufferSize +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_UINT32 *);
+		ui32BufferSize +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_UINT32 *);
+		ui32BufferSize +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_UINT32 *);
+		ui32BufferSize +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(SERVER_SYNC_PRIMITIVE **);
+		ui32BufferSize +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_HANDLE **);
+		ui32BufferSize +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_UINT8 *);
+	}
+
+	if (unlikely
+	    (psRGXSubmitTransferIN->ui32PrepareCount >
+	     TQ_MAX_PREPARES_PER_SUBMIT))
+	{
+		psRGXSubmitTransferOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXSubmitTransfer_exit;
+	}
+
+	if (unlikely
+	    (psRGXSubmitTransferIN->ui32SyncPMRCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXSubmitTransferOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXSubmitTransfer_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRGXSubmitTransferIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psRGXSubmitTransferIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRGXSubmitTransferOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXSubmitTransfer_exit;
+			}
+		}
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		ui32ClientFenceCountInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientFenceCountInt,
+		     (const void __user *)psRGXSubmitTransferIN->
+		     pui32ClientFenceCount,
+		     psRGXSubmitTransferIN->ui32PrepareCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXSubmitTransfer_exit;
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		/* Assigning psFenceUFOSyncPrimBlockInt to the right offset in the pool buffer for first dimension */
+		psFenceUFOSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK ***) (((IMG_UINT8 *) pArrayArgsBuffer)
+						+ ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK **);
+		/* Assigning hFenceUFOSyncPrimBlockInt2 to the right offset in the pool buffer for first dimension */
+		hFenceUFOSyncPrimBlockInt2 =
+		    (IMG_HANDLE **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				     ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		/* Assigning ui32FenceSyncOffsetInt to the right offset in the pool buffer for first dimension */
+		ui32FenceSyncOffsetInt =
+		    (IMG_UINT32 **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				     ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_UINT32 *);
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		/* Assigning ui32FenceValueInt to the right offset in the pool buffer for first dimension */
+		ui32FenceValueInt =
+		    (IMG_UINT32 **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				     ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_UINT32 *);
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		ui32ClientUpdateCountInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientUpdateCountInt,
+		     (const void __user *)psRGXSubmitTransferIN->
+		     pui32ClientUpdateCount,
+		     psRGXSubmitTransferIN->ui32PrepareCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXSubmitTransfer_exit;
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		/* Assigning psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer for first dimension */
+		psUpdateUFOSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK ***) (((IMG_UINT8 *) pArrayArgsBuffer)
+						+ ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK **);
+		/* Assigning hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer for first dimension */
+		hUpdateUFOSyncPrimBlockInt2 =
+		    (IMG_HANDLE **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				     ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		/* Assigning ui32UpdateSyncOffsetInt to the right offset in the pool buffer for first dimension */
+		ui32UpdateSyncOffsetInt =
+		    (IMG_UINT32 **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				     ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_UINT32 *);
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		/* Assigning ui32UpdateValueInt to the right offset in the pool buffer for first dimension */
+		ui32UpdateValueInt =
+		    (IMG_UINT32 **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				     ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_UINT32 *);
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		ui32ServerSyncCountInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ServerSyncCountInt,
+		     (const void __user *)psRGXSubmitTransferIN->
+		     pui32ServerSyncCount,
+		     psRGXSubmitTransferIN->ui32PrepareCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXSubmitTransfer_exit;
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		/* Assigning ui32ServerSyncFlagsInt to the right offset in the pool buffer for first dimension */
+		ui32ServerSyncFlagsInt =
+		    (IMG_UINT32 **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				     ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_UINT32 *);
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		/* Assigning psServerSyncInt to the right offset in the pool buffer for first dimension */
+		psServerSyncInt =
+		    (SERVER_SYNC_PRIMITIVE
+		     ***) (((IMG_UINT8 *) pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(SERVER_SYNC_PRIMITIVE **);
+		/* Assigning hServerSyncInt2 to the right offset in the pool buffer for first dimension */
+		hServerSyncInt2 =
+		    (IMG_HANDLE **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				     ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	{
+		uiUpdateFenceNameInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiUpdateFenceNameInt,
+		     (const void __user *)psRGXSubmitTransferIN->
+		     puiUpdateFenceName,
+		     PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psRGXSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXSubmitTransfer_exit;
+		}
+		((IMG_CHAR *)
+		 uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH *
+					sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		ui32CommandSizeInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32CommandSizeInt,
+		     (const void __user *)psRGXSubmitTransferIN->
+		     pui32CommandSize,
+		     psRGXSubmitTransferIN->ui32PrepareCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXSubmitTransfer_exit;
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		/* Assigning ui8FWCommandInt to the right offset in the pool buffer for first dimension */
+		ui8FWCommandInt =
+		    (IMG_UINT8 **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_UINT8 *);
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		ui32TQPrepareFlagsInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32PrepareCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32TQPrepareFlagsInt,
+		     (const void __user *)psRGXSubmitTransferIN->
+		     pui32TQPrepareFlags,
+		     psRGXSubmitTransferIN->ui32PrepareCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXSubmitTransfer_exit;
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32SyncPMRCount != 0)
+	{
+		ui32SyncPMRFlagsInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32SyncPMRCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32SyncPMRFlagsInt,
+		     (const void __user *)psRGXSubmitTransferIN->
+		     pui32SyncPMRFlags,
+		     psRGXSubmitTransferIN->ui32SyncPMRCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXSubmitTransfer_exit;
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32SyncPMRCount != 0)
+	{
+		psSyncPMRsInt =
+		    (PMR **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+			      ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(PMR *);
+		hSyncPMRsInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransferIN->ui32SyncPMRCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hSyncPMRsInt2,
+		     (const void __user *)psRGXSubmitTransferIN->phSyncPMRs,
+		     psRGXSubmitTransferIN->ui32SyncPMRCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXSubmitTransfer_exit;
+		}
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			ui32BufferSize2 +=
+			    ui32ClientFenceCountInt[i] *
+			    sizeof(SYNC_PRIMITIVE_BLOCK *);
+			ui32BufferSize2 +=
+			    ui32ClientFenceCountInt[i] * sizeof(IMG_HANDLE *);
+			ui32BufferSize2 +=
+			    ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+			ui32BufferSize2 +=
+			    ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+			ui32BufferSize2 +=
+			    ui32ClientUpdateCountInt[i] *
+			    sizeof(SYNC_PRIMITIVE_BLOCK *);
+			ui32BufferSize2 +=
+			    ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE *);
+			ui32BufferSize2 +=
+			    ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+			ui32BufferSize2 +=
+			    ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+			ui32BufferSize2 +=
+			    ui32ServerSyncCountInt[i] * sizeof(IMG_UINT32);
+			ui32BufferSize2 +=
+			    ui32ServerSyncCountInt[i] *
+			    sizeof(SERVER_SYNC_PRIMITIVE *);
+			ui32BufferSize2 +=
+			    ui32ServerSyncCountInt[i] * sizeof(IMG_HANDLE *);
+			ui32BufferSize2 +=
+			    ui32CommandSizeInt[i] * sizeof(IMG_UINT8);
+		}
+	}
+
+	if (ui32BufferSize2 != 0)
+	{
+		pArrayArgsBuffer2 = OSAllocMemNoStats(ui32BufferSize2);
+
+		if (!pArrayArgsBuffer2)
+		{
+			psRGXSubmitTransferOUT->eError =
+			    PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto RGXSubmitTransfer_exit;
+		}
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			if (ui32ClientFenceCountInt[i] > PVRSRV_MAX_SYNC_PRIMS)
+			{
+				psRGXSubmitTransferOUT->eError =
+				    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Assigning each psFenceUFOSyncPrimBlockInt to the right offset in the pool buffer (this is the second dimension) */
+			psFenceUFOSyncPrimBlockInt[i] =
+			    (SYNC_PRIMITIVE_BLOCK
+			     **) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+				  ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32ClientFenceCountInt[i] *
+			    sizeof(SYNC_PRIMITIVE_BLOCK *);
+			/* Assigning each hFenceUFOSyncPrimBlockInt2 to the right offset in the pool buffer (this is the second dimension) */
+			hFenceUFOSyncPrimBlockInt2[i] =
+			    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+					    ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32ClientFenceCountInt[i] * sizeof(IMG_HANDLE);
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			/* Assigning each ui32FenceSyncOffsetInt to the right offset in the pool buffer (this is the second dimension) */
+			ui32FenceSyncOffsetInt[i] =
+			    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+					    ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			/* Assigning each ui32FenceValueInt to the right offset in the pool buffer (this is the second dimension) */
+			ui32FenceValueInt[i] =
+			    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+					    ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			if (ui32ClientUpdateCountInt[i] > PVRSRV_MAX_SYNC_PRIMS)
+			{
+				psRGXSubmitTransferOUT->eError =
+				    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Assigning each psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer (this is the second dimension) */
+			psUpdateUFOSyncPrimBlockInt[i] =
+			    (SYNC_PRIMITIVE_BLOCK
+			     **) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+				  ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32ClientUpdateCountInt[i] *
+			    sizeof(SYNC_PRIMITIVE_BLOCK *);
+			/* Assigning each hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer (this is the second dimension) */
+			hUpdateUFOSyncPrimBlockInt2[i] =
+			    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+					    ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE);
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			/* Assigning each ui32UpdateSyncOffsetInt to the right offset in the pool buffer (this is the second dimension) */
+			ui32UpdateSyncOffsetInt[i] =
+			    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+					    ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			/* Assigning each ui32UpdateValueInt to the right offset in the pool buffer (this is the second dimension) */
+			ui32UpdateValueInt[i] =
+			    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+					    ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			if (ui32ServerSyncCountInt[i] > PVRSRV_MAX_SYNC_PRIMS)
+			{
+				psRGXSubmitTransferOUT->eError =
+				    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Assigning each ui32ServerSyncFlagsInt to the right offset in the pool buffer (this is the second dimension) */
+			ui32ServerSyncFlagsInt[i] =
+			    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+					    ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32ServerSyncCountInt[i] * sizeof(IMG_UINT32);
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			/* Assigning each psServerSyncInt to the right offset in the pool buffer (this is the second dimension) */
+			psServerSyncInt[i] =
+			    (SERVER_SYNC_PRIMITIVE
+			     **) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+				  ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32ServerSyncCountInt[i] *
+			    sizeof(SERVER_SYNC_PRIMITIVE *);
+			/* Assigning each hServerSyncInt2 to the right offset in the pool buffer (this is the second dimension) */
+			hServerSyncInt2[i] =
+			    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+					    ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32ServerSyncCountInt[i] * sizeof(IMG_HANDLE);
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			if (ui32CommandSizeInt[i] >
+			    RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)
+			{
+				psRGXSubmitTransferOUT->eError =
+				    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Assigning each ui8FWCommandInt to the right offset in the pool buffer (this is the second dimension) */
+			ui8FWCommandInt[i] =
+			    (IMG_UINT8 *) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+					   ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32CommandSizeInt[i] * sizeof(IMG_UINT8);
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_HANDLE **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			/* Copy the pointer over from the client side */
+			if (OSCopyFromUser
+			    (NULL, &psPtr,
+			     (const void __user *)&psRGXSubmitTransferIN->
+			     phFenceUFOSyncPrimBlock[i],
+			     sizeof(IMG_HANDLE **)) != PVRSRV_OK)
+			{
+				psRGXSubmitTransferOUT->eError =
+				    PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ClientFenceCountInt[i] * sizeof(IMG_HANDLE)) >
+			    0)
+			{
+				if (OSCopyFromUser
+				    (NULL, (hFenceUFOSyncPrimBlockInt2[i]),
+				     (const void __user *)psPtr,
+				     (ui32ClientFenceCountInt[i] *
+				      sizeof(IMG_HANDLE))) != PVRSRV_OK)
+				{
+					psRGXSubmitTransferOUT->eError =
+					    PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			/* Copy the pointer over from the client side */
+			if (OSCopyFromUser
+			    (NULL, &psPtr,
+			     (const void __user *)&psRGXSubmitTransferIN->
+			     pui32FenceSyncOffset[i],
+			     sizeof(IMG_UINT32 **)) != PVRSRV_OK)
+			{
+				psRGXSubmitTransferOUT->eError =
+				    PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32)) >
+			    0)
+			{
+				if (OSCopyFromUser
+				    (NULL, (ui32FenceSyncOffsetInt[i]),
+				     (const void __user *)psPtr,
+				     (ui32ClientFenceCountInt[i] *
+				      sizeof(IMG_UINT32))) != PVRSRV_OK)
+				{
+					psRGXSubmitTransferOUT->eError =
+					    PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			/* Copy the pointer over from the client side */
+			if (OSCopyFromUser
+			    (NULL, &psPtr,
+			     (const void __user *)&psRGXSubmitTransferIN->
+			     pui32FenceValue[i],
+			     sizeof(IMG_UINT32 **)) != PVRSRV_OK)
+			{
+				psRGXSubmitTransferOUT->eError =
+				    PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32)) >
+			    0)
+			{
+				if (OSCopyFromUser
+				    (NULL, (ui32FenceValueInt[i]),
+				     (const void __user *)psPtr,
+				     (ui32ClientFenceCountInt[i] *
+				      sizeof(IMG_UINT32))) != PVRSRV_OK)
+				{
+					psRGXSubmitTransferOUT->eError =
+					    PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_HANDLE **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			/* Copy the pointer over from the client side */
+			if (OSCopyFromUser
+			    (NULL, &psPtr,
+			     (const void __user *)&psRGXSubmitTransferIN->
+			     phUpdateUFOSyncPrimBlock[i],
+			     sizeof(IMG_HANDLE **)) != PVRSRV_OK)
+			{
+				psRGXSubmitTransferOUT->eError =
+				    PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE)) >
+			    0)
+			{
+				if (OSCopyFromUser
+				    (NULL, (hUpdateUFOSyncPrimBlockInt2[i]),
+				     (const void __user *)psPtr,
+				     (ui32ClientUpdateCountInt[i] *
+				      sizeof(IMG_HANDLE))) != PVRSRV_OK)
+				{
+					psRGXSubmitTransferOUT->eError =
+					    PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			/* Copy the pointer over from the client side */
+			if (OSCopyFromUser
+			    (NULL, &psPtr,
+			     (const void __user *)&psRGXSubmitTransferIN->
+			     pui32UpdateSyncOffset[i],
+			     sizeof(IMG_UINT32 **)) != PVRSRV_OK)
+			{
+				psRGXSubmitTransferOUT->eError =
+				    PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) >
+			    0)
+			{
+				if (OSCopyFromUser
+				    (NULL, (ui32UpdateSyncOffsetInt[i]),
+				     (const void __user *)psPtr,
+				     (ui32ClientUpdateCountInt[i] *
+				      sizeof(IMG_UINT32))) != PVRSRV_OK)
+				{
+					psRGXSubmitTransferOUT->eError =
+					    PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			/* Copy the pointer over from the client side */
+			if (OSCopyFromUser
+			    (NULL, &psPtr,
+			     (const void __user *)&psRGXSubmitTransferIN->
+			     pui32UpdateValue[i],
+			     sizeof(IMG_UINT32 **)) != PVRSRV_OK)
+			{
+				psRGXSubmitTransferOUT->eError =
+				    PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) >
+			    0)
+			{
+				if (OSCopyFromUser
+				    (NULL, (ui32UpdateValueInt[i]),
+				     (const void __user *)psPtr,
+				     (ui32ClientUpdateCountInt[i] *
+				      sizeof(IMG_UINT32))) != PVRSRV_OK)
+				{
+					psRGXSubmitTransferOUT->eError =
+					    PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			/* Copy the pointer over from the client side */
+			if (OSCopyFromUser
+			    (NULL, &psPtr,
+			     (const void __user *)&psRGXSubmitTransferIN->
+			     pui32ServerSyncFlags[i],
+			     sizeof(IMG_UINT32 **)) != PVRSRV_OK)
+			{
+				psRGXSubmitTransferOUT->eError =
+				    PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ServerSyncCountInt[i] * sizeof(IMG_UINT32)) >
+			    0)
+			{
+				if (OSCopyFromUser
+				    (NULL, (ui32ServerSyncFlagsInt[i]),
+				     (const void __user *)psPtr,
+				     (ui32ServerSyncCountInt[i] *
+				      sizeof(IMG_UINT32))) != PVRSRV_OK)
+				{
+					psRGXSubmitTransferOUT->eError =
+					    PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_HANDLE **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			/* Copy the pointer over from the client side */
+			if (OSCopyFromUser
+			    (NULL, &psPtr,
+			     (const void __user *)&psRGXSubmitTransferIN->
+			     phServerSync[i],
+			     sizeof(IMG_HANDLE **)) != PVRSRV_OK)
+			{
+				psRGXSubmitTransferOUT->eError =
+				    PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ServerSyncCountInt[i] * sizeof(IMG_HANDLE)) >
+			    0)
+			{
+				if (OSCopyFromUser
+				    (NULL, (hServerSyncInt2[i]),
+				     (const void __user *)psPtr,
+				     (ui32ServerSyncCountInt[i] *
+				      sizeof(IMG_HANDLE))) != PVRSRV_OK)
+				{
+					psRGXSubmitTransferOUT->eError =
+					    PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT8 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			/* Copy the pointer over from the client side */
+			if (OSCopyFromUser
+			    (NULL, &psPtr,
+			     (const void __user *)&psRGXSubmitTransferIN->
+			     pui8FWCommand[i],
+			     sizeof(IMG_UINT8 **)) != PVRSRV_OK)
+			{
+				psRGXSubmitTransferOUT->eError =
+				    PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32CommandSizeInt[i] * sizeof(IMG_UINT8)) > 0)
+			{
+				if (OSCopyFromUser
+				    (NULL, (ui8FWCommandInt[i]),
+				     (const void __user *)psPtr,
+				     (ui32CommandSizeInt[i] *
+				      sizeof(IMG_UINT8))) != PVRSRV_OK)
+				{
+					psRGXSubmitTransferOUT->eError =
+					    PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXSubmitTransferOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psTransferContextInt,
+				       hTransferContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely(psRGXSubmitTransferOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXSubmitTransfer_exit;
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			IMG_UINT32 j;
+			for (j = 0; j < ui32ClientFenceCountInt[i]; j++)
+			{
+				/* Look up the address from the handle */
+				psRGXSubmitTransferOUT->eError =
+				    PVRSRVLookupHandleUnlocked(psConnection->
+							       psHandleBase,
+							       (void **)
+							       &psFenceUFOSyncPrimBlockInt
+							       [i][j],
+							       hFenceUFOSyncPrimBlockInt2
+							       [i][j],
+							       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+							       IMG_TRUE);
+				if (unlikely
+				    (psRGXSubmitTransferOUT->eError !=
+				     PVRSRV_OK))
+				{
+					UnlockHandle(psConnection->
+						     psHandleBase);
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			IMG_UINT32 j;
+			for (j = 0; j < ui32ClientUpdateCountInt[i]; j++)
+			{
+				/* Look up the address from the handle */
+				psRGXSubmitTransferOUT->eError =
+				    PVRSRVLookupHandleUnlocked(psConnection->
+							       psHandleBase,
+							       (void **)
+							       &psUpdateUFOSyncPrimBlockInt
+							       [i][j],
+							       hUpdateUFOSyncPrimBlockInt2
+							       [i][j],
+							       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+							       IMG_TRUE);
+				if (unlikely
+				    (psRGXSubmitTransferOUT->eError !=
+				     PVRSRV_OK))
+				{
+					UnlockHandle(psConnection->
+						     psHandleBase);
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			IMG_UINT32 j;
+			for (j = 0; j < ui32ServerSyncCountInt[i]; j++)
+			{
+				/* Look up the address from the handle */
+				psRGXSubmitTransferOUT->eError =
+				    PVRSRVLookupHandleUnlocked(psConnection->
+							       psHandleBase,
+							       (void **)
+							       &psServerSyncInt
+							       [i][j],
+							       hServerSyncInt2
+							       [i][j],
+							       PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+							       IMG_TRUE);
+				if (unlikely
+				    (psRGXSubmitTransferOUT->eError !=
+				     PVRSRV_OK))
+				{
+					UnlockHandle(psConnection->
+						     psHandleBase);
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXSubmitTransferIN->ui32SyncPMRCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXSubmitTransferOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psSyncPMRsInt[i],
+						       hSyncPMRsInt2[i],
+						       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+						       IMG_TRUE);
+			if (unlikely
+			    (psRGXSubmitTransferOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXSubmitTransfer_exit;
+			}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXSubmitTransferOUT->eError =
+	    PVRSRVRGXSubmitTransferKM(psTransferContextInt,
+				      psRGXSubmitTransferIN->
+				      ui32ClientCacheOpSeqNum,
+				      psRGXSubmitTransferIN->ui32PrepareCount,
+				      ui32ClientFenceCountInt,
+				      psFenceUFOSyncPrimBlockInt,
+				      ui32FenceSyncOffsetInt, ui32FenceValueInt,
+				      ui32ClientUpdateCountInt,
+				      psUpdateUFOSyncPrimBlockInt,
+				      ui32UpdateSyncOffsetInt,
+				      ui32UpdateValueInt,
+				      ui32ServerSyncCountInt,
+				      ui32ServerSyncFlagsInt, psServerSyncInt,
+				      psRGXSubmitTransferIN->hCheckFenceFD,
+				      psRGXSubmitTransferIN->h2DUpdateTimeline,
+				      &psRGXSubmitTransferOUT->h2DUpdateFence,
+				      psRGXSubmitTransferIN->h3DUpdateTimeline,
+				      &psRGXSubmitTransferOUT->h3DUpdateFence,
+				      uiUpdateFenceNameInt, ui32CommandSizeInt,
+				      ui8FWCommandInt, ui32TQPrepareFlagsInt,
+				      psRGXSubmitTransferIN->ui32ExtJobRef,
+				      psRGXSubmitTransferIN->ui32SyncPMRCount,
+				      ui32SyncPMRFlagsInt, psSyncPMRsInt);
+
+ RGXSubmitTransfer_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psTransferContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hTransferContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+	}
+
+	if (hFenceUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			IMG_UINT32 j;
+			for (j = 0; j < ui32ClientFenceCountInt[i]; j++)
+			{
+
+				/* Unreference the previously looked up handle */
+				if (hFenceUFOSyncPrimBlockInt2[i][j])
+				{
+					PVRSRVReleaseHandleUnlocked
+					    (psConnection->psHandleBase,
+					     hFenceUFOSyncPrimBlockInt2[i][j],
+					     PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+				}
+			}
+		}
+	}
+
+	if (hUpdateUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			IMG_UINT32 j;
+			for (j = 0; j < ui32ClientUpdateCountInt[i]; j++)
+			{
+
+				/* Unreference the previously looked up handle */
+				if (hUpdateUFOSyncPrimBlockInt2[i][j])
+				{
+					PVRSRVReleaseHandleUnlocked
+					    (psConnection->psHandleBase,
+					     hUpdateUFOSyncPrimBlockInt2[i][j],
+					     PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+				}
+			}
+		}
+	}
+
+	if (hServerSyncInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			IMG_UINT32 j;
+			for (j = 0; j < ui32ServerSyncCountInt[i]; j++)
+			{
+
+				/* Unreference the previously looked up handle */
+				if (hServerSyncInt2[i][j])
+				{
+					PVRSRVReleaseHandleUnlocked
+					    (psConnection->psHandleBase,
+					     hServerSyncInt2[i][j],
+					     PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+				}
+			}
+		}
+	}
+
+	if (hSyncPMRsInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXSubmitTransferIN->ui32SyncPMRCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hSyncPMRsInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hSyncPMRsInt2[i],
+							    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+			}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize2 == ui32NextOffset2);
+
+	if (pArrayArgsBuffer2)
+		OSFreeMemNoStats(pArrayArgsBuffer2);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeRGXSubmitTransfer NULL
+#endif
+
+static IMG_INT
+PVRSRVBridgeRGXSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY
+					  * psRGXSetTransferContextPriorityIN,
+					  PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY
+					  * psRGXSetTransferContextPriorityOUT,
+					  CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hTransferContext =
+	    psRGXSetTransferContextPriorityIN->hTransferContext;
+	RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXSetTransferContextPriorityOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psTransferContextInt,
+				       hTransferContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely(psRGXSetTransferContextPriorityOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXSetTransferContextPriority_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXSetTransferContextPriorityOUT->eError =
+	    PVRSRVRGXSetTransferContextPriorityKM(psConnection,
+						  OSGetDevData(psConnection),
+						  psTransferContextInt,
+						  psRGXSetTransferContextPriorityIN->
+						  ui32Priority);
+
+ RGXSetTransferContextPriority_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psTransferContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hTransferContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+#if !defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeRGXSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry,
+			       PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2 *
+			       psRGXSubmitTransfer2IN,
+			       PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2 *
+			       psRGXSubmitTransfer2OUT,
+			       CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hTransferContext = psRGXSubmitTransfer2IN->hTransferContext;
+	RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL;
+	IMG_UINT32 *ui32ClientFenceCountInt = NULL;
+	SYNC_PRIMITIVE_BLOCK ***psFenceUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE **hFenceUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 **ui32FenceSyncOffsetInt = NULL;
+	IMG_UINT32 **ui32FenceValueInt = NULL;
+	IMG_UINT32 *ui32ClientUpdateCountInt = NULL;
+	SYNC_PRIMITIVE_BLOCK ***psUpdateUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE **hUpdateUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 **ui32UpdateSyncOffsetInt = NULL;
+	IMG_UINT32 **ui32UpdateValueInt = NULL;
+	IMG_CHAR *uiUpdateFenceNameInt = NULL;
+	IMG_UINT32 *ui32CommandSizeInt = NULL;
+	IMG_UINT8 **ui8FWCommandInt = NULL;
+	IMG_UINT32 *ui32TQPrepareFlagsInt = NULL;
+	IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+	PMR **psSyncPMRsInt = NULL;
+	IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+	IMG_BYTE *pArrayArgsBuffer2 = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+	    (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+	    (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+	    (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+	    (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+	    (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+	    (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *)) +
+	    (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0;
+	IMG_UINT32 ui32BufferSize2 = 0;
+	IMG_UINT32 ui32NextOffset2 = 0;
+
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+
+		ui32BufferSize +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK **);
+		ui32BufferSize +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(IMG_HANDLE **);
+		ui32BufferSize +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(IMG_UINT32 *);
+		ui32BufferSize +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(IMG_UINT32 *);
+		ui32BufferSize +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK **);
+		ui32BufferSize +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(IMG_HANDLE **);
+		ui32BufferSize +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(IMG_UINT32 *);
+		ui32BufferSize +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(IMG_UINT32 *);
+		ui32BufferSize +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(IMG_UINT8 *);
+	}
+
+	if (unlikely
+	    (psRGXSubmitTransfer2IN->ui32PrepareCount >
+	     TQ_MAX_PREPARES_PER_SUBMIT))
+	{
+		psRGXSubmitTransfer2OUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXSubmitTransfer2_exit;
+	}
+
+	if (unlikely
+	    (psRGXSubmitTransfer2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psRGXSubmitTransfer2OUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RGXSubmitTransfer2_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRGXSubmitTransfer2IN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psRGXSubmitTransfer2IN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRGXSubmitTransfer2OUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXSubmitTransfer2_exit;
+			}
+		}
+	}
+
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+		ui32ClientFenceCountInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientFenceCountInt,
+		     (const void __user *)psRGXSubmitTransfer2IN->
+		     pui32ClientFenceCount,
+		     psRGXSubmitTransfer2IN->ui32PrepareCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXSubmitTransfer2OUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXSubmitTransfer2_exit;
+		}
+	}
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+		/* Assigning psFenceUFOSyncPrimBlockInt to the right offset in the pool buffer for first dimension */
+		psFenceUFOSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK ***) (((IMG_UINT8 *) pArrayArgsBuffer)
+						+ ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK **);
+		/* Assigning hFenceUFOSyncPrimBlockInt2 to the right offset in the pool buffer for first dimension */
+		hFenceUFOSyncPrimBlockInt2 =
+		    (IMG_HANDLE **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				     ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+		/* Assigning ui32FenceSyncOffsetInt to the right offset in the pool buffer for first dimension */
+		ui32FenceSyncOffsetInt =
+		    (IMG_UINT32 **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				     ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(IMG_UINT32 *);
+	}
+
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+		/* Assigning ui32FenceValueInt to the right offset in the pool buffer for first dimension */
+		ui32FenceValueInt =
+		    (IMG_UINT32 **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				     ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(IMG_UINT32 *);
+	}
+
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+		ui32ClientUpdateCountInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ClientUpdateCountInt,
+		     (const void __user *)psRGXSubmitTransfer2IN->
+		     pui32ClientUpdateCount,
+		     psRGXSubmitTransfer2IN->ui32PrepareCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXSubmitTransfer2OUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXSubmitTransfer2_exit;
+		}
+	}
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+		/* Assigning psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer for first dimension */
+		psUpdateUFOSyncPrimBlockInt =
+		    (SYNC_PRIMITIVE_BLOCK ***) (((IMG_UINT8 *) pArrayArgsBuffer)
+						+ ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK **);
+		/* Assigning hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer for first dimension */
+		hUpdateUFOSyncPrimBlockInt2 =
+		    (IMG_HANDLE **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				     ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+		/* Assigning ui32UpdateSyncOffsetInt to the right offset in the pool buffer for first dimension */
+		ui32UpdateSyncOffsetInt =
+		    (IMG_UINT32 **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				     ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(IMG_UINT32 *);
+	}
+
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+		/* Assigning ui32UpdateValueInt to the right offset in the pool buffer for first dimension */
+		ui32UpdateValueInt =
+		    (IMG_UINT32 **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				     ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(IMG_UINT32 *);
+	}
+
+	{
+		uiUpdateFenceNameInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiUpdateFenceNameInt,
+		     (const void __user *)psRGXSubmitTransfer2IN->
+		     puiUpdateFenceName,
+		     PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psRGXSubmitTransfer2OUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXSubmitTransfer2_exit;
+		}
+		((IMG_CHAR *)
+		 uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH *
+					sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+		ui32CommandSizeInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32CommandSizeInt,
+		     (const void __user *)psRGXSubmitTransfer2IN->
+		     pui32CommandSize,
+		     psRGXSubmitTransfer2IN->ui32PrepareCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXSubmitTransfer2OUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXSubmitTransfer2_exit;
+		}
+	}
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+		/* Assigning ui8FWCommandInt to the right offset in the pool buffer for first dimension */
+		ui8FWCommandInt =
+		    (IMG_UINT8 **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(IMG_UINT8 *);
+	}
+
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+		ui32TQPrepareFlagsInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransfer2IN->ui32PrepareCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32TQPrepareFlagsInt,
+		     (const void __user *)psRGXSubmitTransfer2IN->
+		     pui32TQPrepareFlags,
+		     psRGXSubmitTransfer2IN->ui32PrepareCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXSubmitTransfer2OUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXSubmitTransfer2_exit;
+		}
+	}
+	if (psRGXSubmitTransfer2IN->ui32SyncPMRCount != 0)
+	{
+		ui32SyncPMRFlagsInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransfer2IN->ui32SyncPMRCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32SyncPMRFlagsInt,
+		     (const void __user *)psRGXSubmitTransfer2IN->
+		     pui32SyncPMRFlags,
+		     psRGXSubmitTransfer2IN->ui32SyncPMRCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psRGXSubmitTransfer2OUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXSubmitTransfer2_exit;
+		}
+	}
+	if (psRGXSubmitTransfer2IN->ui32SyncPMRCount != 0)
+	{
+		psSyncPMRsInt =
+		    (PMR **) (((IMG_UINT8 *) pArrayArgsBuffer) +
+			      ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *);
+		hSyncPMRsInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psRGXSubmitTransfer2IN->ui32SyncPMRCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hSyncPMRsInt2,
+		     (const void __user *)psRGXSubmitTransfer2IN->phSyncPMRs,
+		     psRGXSubmitTransfer2IN->ui32SyncPMRCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psRGXSubmitTransfer2OUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RGXSubmitTransfer2_exit;
+		}
+	}
+
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+		{
+			ui32BufferSize2 +=
+			    ui32ClientFenceCountInt[i] *
+			    sizeof(SYNC_PRIMITIVE_BLOCK *);
+			ui32BufferSize2 +=
+			    ui32ClientFenceCountInt[i] * sizeof(IMG_HANDLE *);
+			ui32BufferSize2 +=
+			    ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+			ui32BufferSize2 +=
+			    ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+			ui32BufferSize2 +=
+			    ui32ClientUpdateCountInt[i] *
+			    sizeof(SYNC_PRIMITIVE_BLOCK *);
+			ui32BufferSize2 +=
+			    ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE *);
+			ui32BufferSize2 +=
+			    ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+			ui32BufferSize2 +=
+			    ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+			ui32BufferSize2 +=
+			    ui32CommandSizeInt[i] * sizeof(IMG_UINT8);
+		}
+	}
+
+	if (ui32BufferSize2 != 0)
+	{
+		pArrayArgsBuffer2 = OSAllocMemNoStats(ui32BufferSize2);
+
+		if (!pArrayArgsBuffer2)
+		{
+			psRGXSubmitTransfer2OUT->eError =
+			    PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto RGXSubmitTransfer2_exit;
+		}
+	}
+
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+		{
+			if (ui32ClientFenceCountInt[i] > PVRSRV_MAX_SYNC_PRIMS)
+			{
+				psRGXSubmitTransfer2OUT->eError =
+				    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+				goto RGXSubmitTransfer2_exit;
+			}
+
+			/* Assigning each psFenceUFOSyncPrimBlockInt to the right offset in the pool buffer (this is the second dimension) */
+			psFenceUFOSyncPrimBlockInt[i] =
+			    (SYNC_PRIMITIVE_BLOCK
+			     **) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+				  ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32ClientFenceCountInt[i] *
+			    sizeof(SYNC_PRIMITIVE_BLOCK *);
+			/* Assigning each hFenceUFOSyncPrimBlockInt2 to the right offset in the pool buffer (this is the second dimension) */
+			hFenceUFOSyncPrimBlockInt2[i] =
+			    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+					    ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32ClientFenceCountInt[i] * sizeof(IMG_HANDLE);
+		}
+	}
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+		{
+			/* Assigning each ui32FenceSyncOffsetInt to the right offset in the pool buffer (this is the second dimension) */
+			ui32FenceSyncOffsetInt[i] =
+			    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+					    ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+		}
+	}
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+		{
+			/* Assigning each ui32FenceValueInt to the right offset in the pool buffer (this is the second dimension) */
+			ui32FenceValueInt[i] =
+			    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+					    ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+		}
+	}
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+		{
+			if (ui32ClientUpdateCountInt[i] > PVRSRV_MAX_SYNC_PRIMS)
+			{
+				psRGXSubmitTransfer2OUT->eError =
+				    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+				goto RGXSubmitTransfer2_exit;
+			}
+
+			/* Assigning each psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer (this is the second dimension) */
+			psUpdateUFOSyncPrimBlockInt[i] =
+			    (SYNC_PRIMITIVE_BLOCK
+			     **) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+				  ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32ClientUpdateCountInt[i] *
+			    sizeof(SYNC_PRIMITIVE_BLOCK *);
+			/* Assigning each hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer (this is the second dimension) */
+			hUpdateUFOSyncPrimBlockInt2[i] =
+			    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+					    ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE);
+		}
+	}
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+		{
+			/* Assigning each ui32UpdateSyncOffsetInt to the right offset in the pool buffer (this is the second dimension) */
+			ui32UpdateSyncOffsetInt[i] =
+			    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+					    ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+		}
+	}
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+		{
+			/* Assigning each ui32UpdateValueInt to the right offset in the pool buffer (this is the second dimension) */
+			ui32UpdateValueInt[i] =
+			    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+					    ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+		}
+	}
+	if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+		{
+			if (ui32CommandSizeInt[i] >
+			    RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)
+			{
+				psRGXSubmitTransfer2OUT->eError =
+				    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+				goto RGXSubmitTransfer2_exit;
+			}
+
+			/* Assigning each ui8FWCommandInt to the right offset in the pool buffer (this is the second dimension) */
+			ui8FWCommandInt[i] =
+			    (IMG_UINT8 *) (((IMG_UINT8 *) pArrayArgsBuffer2) +
+					   ui32NextOffset2);
+			ui32NextOffset2 +=
+			    ui32CommandSizeInt[i] * sizeof(IMG_UINT8);
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_HANDLE **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+		{
+			/* Copy the pointer over from the client side */
+			if (OSCopyFromUser
+			    (NULL, &psPtr,
+			     (const void __user *)&psRGXSubmitTransfer2IN->
+			     phFenceUFOSyncPrimBlock[i],
+			     sizeof(IMG_HANDLE **)) != PVRSRV_OK)
+			{
+				psRGXSubmitTransfer2OUT->eError =
+				    PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer2_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ClientFenceCountInt[i] * sizeof(IMG_HANDLE)) >
+			    0)
+			{
+				if (OSCopyFromUser
+				    (NULL, (hFenceUFOSyncPrimBlockInt2[i]),
+				     (const void __user *)psPtr,
+				     (ui32ClientFenceCountInt[i] *
+				      sizeof(IMG_HANDLE))) != PVRSRV_OK)
+				{
+					psRGXSubmitTransfer2OUT->eError =
+					    PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer2_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+		{
+			/* Copy the pointer over from the client side */
+			if (OSCopyFromUser
+			    (NULL, &psPtr,
+			     (const void __user *)&psRGXSubmitTransfer2IN->
+			     pui32FenceSyncOffset[i],
+			     sizeof(IMG_UINT32 **)) != PVRSRV_OK)
+			{
+				psRGXSubmitTransfer2OUT->eError =
+				    PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer2_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32)) >
+			    0)
+			{
+				if (OSCopyFromUser
+				    (NULL, (ui32FenceSyncOffsetInt[i]),
+				     (const void __user *)psPtr,
+				     (ui32ClientFenceCountInt[i] *
+				      sizeof(IMG_UINT32))) != PVRSRV_OK)
+				{
+					psRGXSubmitTransfer2OUT->eError =
+					    PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer2_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+		{
+			/* Copy the pointer over from the client side */
+			if (OSCopyFromUser
+			    (NULL, &psPtr,
+			     (const void __user *)&psRGXSubmitTransfer2IN->
+			     pui32FenceValue[i],
+			     sizeof(IMG_UINT32 **)) != PVRSRV_OK)
+			{
+				psRGXSubmitTransfer2OUT->eError =
+				    PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer2_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32)) >
+			    0)
+			{
+				if (OSCopyFromUser
+				    (NULL, (ui32FenceValueInt[i]),
+				     (const void __user *)psPtr,
+				     (ui32ClientFenceCountInt[i] *
+				      sizeof(IMG_UINT32))) != PVRSRV_OK)
+				{
+					psRGXSubmitTransfer2OUT->eError =
+					    PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer2_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_HANDLE **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+		{
+			/* Copy the pointer over from the client side */
+			if (OSCopyFromUser
+			    (NULL, &psPtr,
+			     (const void __user *)&psRGXSubmitTransfer2IN->
+			     phUpdateUFOSyncPrimBlock[i],
+			     sizeof(IMG_HANDLE **)) != PVRSRV_OK)
+			{
+				psRGXSubmitTransfer2OUT->eError =
+				    PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer2_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE)) >
+			    0)
+			{
+				if (OSCopyFromUser
+				    (NULL, (hUpdateUFOSyncPrimBlockInt2[i]),
+				     (const void __user *)psPtr,
+				     (ui32ClientUpdateCountInt[i] *
+				      sizeof(IMG_HANDLE))) != PVRSRV_OK)
+				{
+					psRGXSubmitTransfer2OUT->eError =
+					    PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer2_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+		{
+			/* Copy the pointer over from the client side */
+			if (OSCopyFromUser
+			    (NULL, &psPtr,
+			     (const void __user *)&psRGXSubmitTransfer2IN->
+			     pui32UpdateSyncOffset[i],
+			     sizeof(IMG_UINT32 **)) != PVRSRV_OK)
+			{
+				psRGXSubmitTransfer2OUT->eError =
+				    PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer2_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) >
+			    0)
+			{
+				if (OSCopyFromUser
+				    (NULL, (ui32UpdateSyncOffsetInt[i]),
+				     (const void __user *)psPtr,
+				     (ui32ClientUpdateCountInt[i] *
+				      sizeof(IMG_UINT32))) != PVRSRV_OK)
+				{
+					psRGXSubmitTransfer2OUT->eError =
+					    PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer2_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+		{
+			/* Copy the pointer over from the client side */
+			if (OSCopyFromUser
+			    (NULL, &psPtr,
+			     (const void __user *)&psRGXSubmitTransfer2IN->
+			     pui32UpdateValue[i],
+			     sizeof(IMG_UINT32 **)) != PVRSRV_OK)
+			{
+				psRGXSubmitTransfer2OUT->eError =
+				    PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer2_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) >
+			    0)
+			{
+				if (OSCopyFromUser
+				    (NULL, (ui32UpdateValueInt[i]),
+				     (const void __user *)psPtr,
+				     (ui32ClientUpdateCountInt[i] *
+				      sizeof(IMG_UINT32))) != PVRSRV_OK)
+				{
+					psRGXSubmitTransfer2OUT->eError =
+					    PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer2_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT8 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+		{
+			/* Copy the pointer over from the client side */
+			if (OSCopyFromUser
+			    (NULL, &psPtr,
+			     (const void __user *)&psRGXSubmitTransfer2IN->
+			     pui8FWCommand[i],
+			     sizeof(IMG_UINT8 **)) != PVRSRV_OK)
+			{
+				psRGXSubmitTransfer2OUT->eError =
+				    PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer2_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32CommandSizeInt[i] * sizeof(IMG_UINT8)) > 0)
+			{
+				if (OSCopyFromUser
+				    (NULL, (ui8FWCommandInt[i]),
+				     (const void __user *)psPtr,
+				     (ui32CommandSizeInt[i] *
+				      sizeof(IMG_UINT8))) != PVRSRV_OK)
+				{
+					psRGXSubmitTransfer2OUT->eError =
+					    PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer2_exit;
+				}
+			}
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRGXSubmitTransfer2OUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psTransferContextInt,
+				       hTransferContext,
+				       PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+				       IMG_TRUE);
+	if (unlikely(psRGXSubmitTransfer2OUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RGXSubmitTransfer2_exit;
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+		{
+			IMG_UINT32 j;
+			for (j = 0; j < ui32ClientFenceCountInt[i]; j++)
+			{
+				/* Look up the address from the handle */
+				psRGXSubmitTransfer2OUT->eError =
+				    PVRSRVLookupHandleUnlocked(psConnection->
+							       psHandleBase,
+							       (void **)
+							       &psFenceUFOSyncPrimBlockInt
+							       [i][j],
+							       hFenceUFOSyncPrimBlockInt2
+							       [i][j],
+							       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+							       IMG_TRUE);
+				if (unlikely
+				    (psRGXSubmitTransfer2OUT->eError !=
+				     PVRSRV_OK))
+				{
+					UnlockHandle(psConnection->
+						     psHandleBase);
+					goto RGXSubmitTransfer2_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+		{
+			IMG_UINT32 j;
+			for (j = 0; j < ui32ClientUpdateCountInt[i]; j++)
+			{
+				/* Look up the address from the handle */
+				psRGXSubmitTransfer2OUT->eError =
+				    PVRSRVLookupHandleUnlocked(psConnection->
+							       psHandleBase,
+							       (void **)
+							       &psUpdateUFOSyncPrimBlockInt
+							       [i][j],
+							       hUpdateUFOSyncPrimBlockInt2
+							       [i][j],
+							       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+							       IMG_TRUE);
+				if (unlikely
+				    (psRGXSubmitTransfer2OUT->eError !=
+				     PVRSRV_OK))
+				{
+					UnlockHandle(psConnection->
+						     psHandleBase);
+					goto RGXSubmitTransfer2_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32SyncPMRCount; i++)
+		{
+			/* Look up the address from the handle */
+			psRGXSubmitTransfer2OUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psSyncPMRsInt[i],
+						       hSyncPMRsInt2[i],
+						       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+						       IMG_TRUE);
+			if (unlikely
+			    (psRGXSubmitTransfer2OUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto RGXSubmitTransfer2_exit;
+			}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRGXSubmitTransfer2OUT->eError =
+	    PVRSRVRGXSubmitTransfer2KM(psTransferContextInt,
+				       psRGXSubmitTransfer2IN->
+				       ui32ClientCacheOpSeqNum,
+				       psRGXSubmitTransfer2IN->ui32PrepareCount,
+				       ui32ClientFenceCountInt,
+				       psFenceUFOSyncPrimBlockInt,
+				       ui32FenceSyncOffsetInt,
+				       ui32FenceValueInt,
+				       ui32ClientUpdateCountInt,
+				       psUpdateUFOSyncPrimBlockInt,
+				       ui32UpdateSyncOffsetInt,
+				       ui32UpdateValueInt,
+				       psRGXSubmitTransfer2IN->hCheckFenceFD,
+				       psRGXSubmitTransfer2IN->
+				       h2DUpdateTimeline,
+				       &psRGXSubmitTransfer2OUT->h2DUpdateFence,
+				       psRGXSubmitTransfer2IN->
+				       h3DUpdateTimeline,
+				       &psRGXSubmitTransfer2OUT->h3DUpdateFence,
+				       uiUpdateFenceNameInt, ui32CommandSizeInt,
+				       ui8FWCommandInt, ui32TQPrepareFlagsInt,
+				       psRGXSubmitTransfer2IN->ui32ExtJobRef,
+				       psRGXSubmitTransfer2IN->ui32SyncPMRCount,
+				       ui32SyncPMRFlagsInt, psSyncPMRsInt);
+
+ RGXSubmitTransfer2_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psTransferContextInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hTransferContext,
+					    PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+	}
+
+	if (hFenceUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+		{
+			IMG_UINT32 j;
+			for (j = 0; j < ui32ClientFenceCountInt[i]; j++)
+			{
+
+				/* Unreference the previously looked up handle */
+				if (hFenceUFOSyncPrimBlockInt2[i][j])
+				{
+					PVRSRVReleaseHandleUnlocked
+					    (psConnection->psHandleBase,
+					     hFenceUFOSyncPrimBlockInt2[i][j],
+					     PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+				}
+			}
+		}
+	}
+
+	if (hUpdateUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+		{
+			IMG_UINT32 j;
+			for (j = 0; j < ui32ClientUpdateCountInt[i]; j++)
+			{
+
+				/* Unreference the previously looked up handle */
+				if (hUpdateUFOSyncPrimBlockInt2[i][j])
+				{
+					PVRSRVReleaseHandleUnlocked
+					    (psConnection->psHandleBase,
+					     hUpdateUFOSyncPrimBlockInt2[i][j],
+					     PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+				}
+			}
+		}
+	}
+
+	if (hSyncPMRsInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psRGXSubmitTransfer2IN->ui32SyncPMRCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hSyncPMRsInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hSyncPMRsInt2[i],
+							    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+			}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize2 == ui32NextOffset2);
+
+	if (pArrayArgsBuffer2)
+		OSFreeMemNoStats(pArrayArgsBuffer2);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeRGXSubmitTransfer2 NULL
+#endif
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXTQBridge(void);
+PVRSRV_ERROR DeinitRGXTQBridge(void);
+
+/*
+ * Register all RGXTQ functions with services
+ */
+PVRSRV_ERROR InitRGXTQBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ,
+			      PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT,
+			      PVRSRVBridgeRGXCreateTransferContext, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ,
+			      PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT,
+			      PVRSRVBridgeRGXDestroyTransferContext, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ,
+			      PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER,
+			      PVRSRVBridgeRGXSubmitTransfer, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ,
+			      PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY,
+			      PVRSRVBridgeRGXSetTransferContextPriority, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ,
+			      PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2,
+			      PVRSRVBridgeRGXSubmitTransfer2, NULL, bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxtq functions with services
+ */
+PVRSRV_ERROR DeinitRGXTQBridge(void)
+{
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ,
+				PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ,
+				PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ,
+				PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ,
+				PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ,
+				PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_ri_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_ri_bridge.c
new file mode 100644
index 0000000..dbfad0e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_ri_bridge.c
@@ -0,0 +1,744 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for ri
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for ri
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "ri_server.h"
+
+#include "common_ri_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRIWritePMREntry(IMG_UINT32 ui32DispatchTableEntry,
+			    PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY *
+			    psRIWritePMREntryIN,
+			    PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY *
+			    psRIWritePMREntryOUT,
+			    CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPMRHandle = psRIWritePMREntryIN->hPMRHandle;
+	PMR *psPMRHandleInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRIWritePMREntryOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRHandleInt,
+				       hPMRHandle,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psRIWritePMREntryOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RIWritePMREntry_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRIWritePMREntryOUT->eError = RIWritePMREntryKM(psPMRHandleInt);
+
+ RIWritePMREntry_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psPMRHandleInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMRHandle,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry,
+				PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY *
+				psRIWriteMEMDESCEntryIN,
+				PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY *
+				psRIWriteMEMDESCEntryOUT,
+				CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPMRHandle = psRIWriteMEMDESCEntryIN->hPMRHandle;
+	PMR *psPMRHandleInt = NULL;
+	IMG_CHAR *uiTextBInt = NULL;
+	RI_HANDLE psRIHandleInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) + 0;
+
+	if (unlikely
+	    (psRIWriteMEMDESCEntryIN->ui32TextBSize >
+	     DEVMEM_ANNOTATION_MAX_LEN))
+	{
+		psRIWriteMEMDESCEntryOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RIWriteMEMDESCEntry_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRIWriteMEMDESCEntryIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psRIWriteMEMDESCEntryIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRIWriteMEMDESCEntryOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RIWriteMEMDESCEntry_exit;
+			}
+		}
+	}
+
+	if (psRIWriteMEMDESCEntryIN->ui32TextBSize != 0)
+	{
+		uiTextBInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiTextBInt,
+		     (const void __user *)psRIWriteMEMDESCEntryIN->puiTextB,
+		     psRIWriteMEMDESCEntryIN->ui32TextBSize *
+		     sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psRIWriteMEMDESCEntryOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RIWriteMEMDESCEntry_exit;
+		}
+		((IMG_CHAR *)
+		 uiTextBInt)[(psRIWriteMEMDESCEntryIN->ui32TextBSize *
+			      sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRIWriteMEMDESCEntryOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRHandleInt,
+				       hPMRHandle,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RIWriteMEMDESCEntry_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRIWriteMEMDESCEntryOUT->eError =
+	    RIWriteMEMDESCEntryKM(psPMRHandleInt,
+				  psRIWriteMEMDESCEntryIN->ui32TextBSize,
+				  uiTextBInt,
+				  psRIWriteMEMDESCEntryIN->ui64Offset,
+				  psRIWriteMEMDESCEntryIN->ui64Size,
+				  psRIWriteMEMDESCEntryIN->bIsImport,
+				  psRIWriteMEMDESCEntryIN->bIsSuballoc,
+				  &psRIHandleInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK))
+	{
+		goto RIWriteMEMDESCEntry_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRIWriteMEMDESCEntryOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psRIWriteMEMDESCEntryOUT->hRIHandle,
+				      (void *)psRIHandleInt,
+				      PVRSRV_HANDLE_TYPE_RI_HANDLE,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      RIDeleteMEMDESCEntryKM);
+	if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RIWriteMEMDESCEntry_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RIWriteMEMDESCEntry_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psPMRHandleInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMRHandle,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)
+	{
+		if (psRIHandleInt)
+		{
+			RIDeleteMEMDESCEntryKM(psRIHandleInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIWriteProcListEntry(IMG_UINT32 ui32DispatchTableEntry,
+				 PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY *
+				 psRIWriteProcListEntryIN,
+				 PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY *
+				 psRIWriteProcListEntryOUT,
+				 CONNECTION_DATA * psConnection)
+{
+	IMG_CHAR *uiTextBInt = NULL;
+	RI_HANDLE psRIHandleInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) + 0;
+
+	if (unlikely
+	    (psRIWriteProcListEntryIN->ui32TextBSize >
+	     DEVMEM_ANNOTATION_MAX_LEN))
+	{
+		psRIWriteProcListEntryOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto RIWriteProcListEntry_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psRIWriteProcListEntryIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psRIWriteProcListEntryIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psRIWriteProcListEntryOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RIWriteProcListEntry_exit;
+			}
+		}
+	}
+
+	if (psRIWriteProcListEntryIN->ui32TextBSize != 0)
+	{
+		uiTextBInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiTextBInt,
+		     (const void __user *)psRIWriteProcListEntryIN->puiTextB,
+		     psRIWriteProcListEntryIN->ui32TextBSize *
+		     sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psRIWriteProcListEntryOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto RIWriteProcListEntry_exit;
+		}
+		((IMG_CHAR *)
+		 uiTextBInt)[(psRIWriteProcListEntryIN->ui32TextBSize *
+			      sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+
+	psRIWriteProcListEntryOUT->eError =
+	    RIWriteProcListEntryKM(psRIWriteProcListEntryIN->ui32TextBSize,
+				   uiTextBInt,
+				   psRIWriteProcListEntryIN->ui64Size,
+				   psRIWriteProcListEntryIN->ui64DevVAddr,
+				   &psRIHandleInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psRIWriteProcListEntryOUT->eError != PVRSRV_OK))
+	{
+		goto RIWriteProcListEntry_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRIWriteProcListEntryOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psRIWriteProcListEntryOUT->hRIHandle,
+				      (void *)psRIHandleInt,
+				      PVRSRV_HANDLE_TYPE_RI_HANDLE,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      RIDeleteMEMDESCEntryKM);
+	if (unlikely(psRIWriteProcListEntryOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RIWriteProcListEntry_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RIWriteProcListEntry_exit:
+
+	if (psRIWriteProcListEntryOUT->eError != PVRSRV_OK)
+	{
+		if (psRIHandleInt)
+		{
+			RIDeleteMEMDESCEntryKM(psRIHandleInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIUpdateMEMDESCAddr(IMG_UINT32 ui32DispatchTableEntry,
+				PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR *
+				psRIUpdateMEMDESCAddrIN,
+				PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR *
+				psRIUpdateMEMDESCAddrOUT,
+				CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hRIHandle = psRIUpdateMEMDESCAddrIN->hRIHandle;
+	RI_HANDLE psRIHandleInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRIUpdateMEMDESCAddrOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psRIHandleInt,
+				       hRIHandle,
+				       PVRSRV_HANDLE_TYPE_RI_HANDLE, IMG_TRUE);
+	if (unlikely(psRIUpdateMEMDESCAddrOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RIUpdateMEMDESCAddr_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRIUpdateMEMDESCAddrOUT->eError =
+	    RIUpdateMEMDESCAddrKM(psRIHandleInt,
+				  psRIUpdateMEMDESCAddrIN->sAddr);
+
+ RIUpdateMEMDESCAddr_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psRIHandleInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hRIHandle,
+					    PVRSRV_HANDLE_TYPE_RI_HANDLE);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIDeleteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry,
+				 PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY *
+				 psRIDeleteMEMDESCEntryIN,
+				 PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY *
+				 psRIDeleteMEMDESCEntryOUT,
+				 CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psRIDeleteMEMDESCEntryOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psRIDeleteMEMDESCEntryIN->
+					hRIHandle,
+					PVRSRV_HANDLE_TYPE_RI_HANDLE);
+	if (unlikely
+	    ((psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_OK)
+	     && (psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeRIDeleteMEMDESCEntry: %s",
+			 PVRSRVGetErrorString(psRIDeleteMEMDESCEntryOUT->
+					      eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto RIDeleteMEMDESCEntry_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ RIDeleteMEMDESCEntry_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIDumpList(IMG_UINT32 ui32DispatchTableEntry,
+		       PVRSRV_BRIDGE_IN_RIDUMPLIST * psRIDumpListIN,
+		       PVRSRV_BRIDGE_OUT_RIDUMPLIST * psRIDumpListOUT,
+		       CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPMRHandle = psRIDumpListIN->hPMRHandle;
+	PMR *psPMRHandleInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRIDumpListOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRHandleInt,
+				       hPMRHandle,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psRIDumpListOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RIDumpList_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRIDumpListOUT->eError = RIDumpListKM(psPMRHandleInt);
+
+ RIDumpList_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psPMRHandleInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMRHandle,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIDumpAll(IMG_UINT32 ui32DispatchTableEntry,
+		      PVRSRV_BRIDGE_IN_RIDUMPALL * psRIDumpAllIN,
+		      PVRSRV_BRIDGE_OUT_RIDUMPALL * psRIDumpAllOUT,
+		      CONNECTION_DATA * psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psRIDumpAllIN);
+
+	psRIDumpAllOUT->eError = RIDumpAllKM();
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIDumpProcess(IMG_UINT32 ui32DispatchTableEntry,
+			  PVRSRV_BRIDGE_IN_RIDUMPPROCESS * psRIDumpProcessIN,
+			  PVRSRV_BRIDGE_OUT_RIDUMPPROCESS * psRIDumpProcessOUT,
+			  CONNECTION_DATA * psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	psRIDumpProcessOUT->eError =
+	    RIDumpProcessKM(psRIDumpProcessIN->ui32Pid);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIWritePMREntryWithOwner(IMG_UINT32 ui32DispatchTableEntry,
+				     PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER *
+				     psRIWritePMREntryWithOwnerIN,
+				     PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER
+				     * psRIWritePMREntryWithOwnerOUT,
+				     CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hPMRHandle = psRIWritePMREntryWithOwnerIN->hPMRHandle;
+	PMR *psPMRHandleInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psRIWritePMREntryWithOwnerOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psPMRHandleInt,
+				       hPMRHandle,
+				       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				       IMG_TRUE);
+	if (unlikely(psRIWritePMREntryWithOwnerOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto RIWritePMREntryWithOwner_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psRIWritePMREntryWithOwnerOUT->eError =
+	    RIWritePMREntryWithOwnerKM(psPMRHandleInt,
+				       psRIWritePMREntryWithOwnerIN->ui32Owner);
+
+ RIWritePMREntryWithOwner_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psPMRHandleInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hPMRHandle,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRIBridge(void);
+PVRSRV_ERROR DeinitRIBridge(void);
+
+/*
+ * Register all RI functions with services
+ */
+PVRSRV_ERROR InitRIBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI,
+			      PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY,
+			      PVRSRVBridgeRIWritePMREntry, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI,
+			      PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY,
+			      PVRSRVBridgeRIWriteMEMDESCEntry, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI,
+			      PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY,
+			      PVRSRVBridgeRIWriteProcListEntry, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI,
+			      PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR,
+			      PVRSRVBridgeRIUpdateMEMDESCAddr, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI,
+			      PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY,
+			      PVRSRVBridgeRIDeleteMEMDESCEntry, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST,
+			      PVRSRVBridgeRIDumpList, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL,
+			      PVRSRVBridgeRIDumpAll, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS,
+			      PVRSRVBridgeRIDumpProcess, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI,
+			      PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER,
+			      PVRSRVBridgeRIWritePMREntryWithOwner, NULL,
+			      bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all ri functions with services
+ */
+PVRSRV_ERROR DeinitRIBridge(void)
+{
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI,
+				PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI,
+				PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI,
+				PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI,
+				PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI,
+				PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI,
+				PVRSRV_BRIDGE_RI_RIDUMPPROCESS);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI,
+				PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_srvcore_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_srvcore_bridge.c
new file mode 100644
index 0000000..0934850
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_srvcore_bridge.c
@@ -0,0 +1,934 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for srvcore
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for srvcore
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "srvcore.h"
+#include "info_page.h"
+#include "proc_stats.h"
+#include "rgx_fwif_alignchecks.h"
+
+#include "common_srvcore_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeConnect(IMG_UINT32 ui32DispatchTableEntry,
+		    PVRSRV_BRIDGE_IN_CONNECT * psConnectIN,
+		    PVRSRV_BRIDGE_OUT_CONNECT * psConnectOUT,
+		    CONNECTION_DATA * psConnection)
+{
+
+	psConnectOUT->eError =
+	    PVRSRVConnectKM(psConnection, OSGetDevData(psConnection),
+			    psConnectIN->ui32Flags,
+			    psConnectIN->ui32ClientBuildOptions,
+			    psConnectIN->ui32ClientDDKVersion,
+			    psConnectIN->ui32ClientDDKBuild,
+			    &psConnectOUT->ui8KernelArch,
+			    &psConnectOUT->ui32CapabilityFlags,
+			    &psConnectOUT->ui32PVRBridges,
+			    &psConnectOUT->ui32RGXBridges);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDisconnect(IMG_UINT32 ui32DispatchTableEntry,
+		       PVRSRV_BRIDGE_IN_DISCONNECT * psDisconnectIN,
+		       PVRSRV_BRIDGE_OUT_DISCONNECT * psDisconnectOUT,
+		       CONNECTION_DATA * psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDisconnectIN);
+
+	psDisconnectOUT->eError = PVRSRVDisconnectKM();
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeAcquireGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry,
+				     PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT *
+				     psAcquireGlobalEventObjectIN,
+				     PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT
+				     * psAcquireGlobalEventObjectOUT,
+				     CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hGlobalEventObjectInt = NULL;
+
+	PVR_UNREFERENCED_PARAMETER(psAcquireGlobalEventObjectIN);
+
+	psAcquireGlobalEventObjectOUT->eError =
+	    PVRSRVAcquireGlobalEventObjectKM(&hGlobalEventObjectInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK))
+	{
+		goto AcquireGlobalEventObject_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psAcquireGlobalEventObjectOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psAcquireGlobalEventObjectOUT->
+				      hGlobalEventObject,
+				      (void *)hGlobalEventObjectInt,
+				      PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      PVRSRVReleaseGlobalEventObjectKM);
+	if (unlikely(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto AcquireGlobalEventObject_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ AcquireGlobalEventObject_exit:
+
+	if (psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)
+	{
+		if (hGlobalEventObjectInt)
+		{
+			PVRSRVReleaseGlobalEventObjectKM(hGlobalEventObjectInt);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeReleaseGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry,
+				     PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT *
+				     psReleaseGlobalEventObjectIN,
+				     PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT
+				     * psReleaseGlobalEventObjectOUT,
+				     CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psReleaseGlobalEventObjectOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE)
+					psReleaseGlobalEventObjectIN->
+					hGlobalEventObject,
+					PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
+	if (unlikely
+	    ((psReleaseGlobalEventObjectOUT->eError != PVRSRV_OK)
+	     && (psReleaseGlobalEventObjectOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeReleaseGlobalEventObject: %s",
+			 PVRSRVGetErrorString(psReleaseGlobalEventObjectOUT->
+					      eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto ReleaseGlobalEventObject_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ ReleaseGlobalEventObject_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeEventObjectOpen(IMG_UINT32 ui32DispatchTableEntry,
+			    PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN *
+			    psEventObjectOpenIN,
+			    PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN *
+			    psEventObjectOpenOUT,
+			    CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hEventObject = psEventObjectOpenIN->hEventObject;
+	IMG_HANDLE hEventObjectInt = NULL;
+	IMG_HANDLE hOSEventInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psEventObjectOpenOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&hEventObjectInt,
+				       hEventObject,
+				       PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+				       IMG_TRUE);
+	if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto EventObjectOpen_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psEventObjectOpenOUT->eError =
+	    OSEventObjectOpen(hEventObjectInt, &hOSEventInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK))
+	{
+		goto EventObjectOpen_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psEventObjectOpenOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psEventObjectOpenOUT->hOSEvent,
+				      (void *)hOSEventInt,
+				      PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      OSEventObjectClose);
+	if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto EventObjectOpen_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ EventObjectOpen_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (hEventObjectInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hEventObject,
+					    PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psEventObjectOpenOUT->eError != PVRSRV_OK)
+	{
+		if (hOSEventInt)
+		{
+			OSEventObjectClose(hOSEventInt);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeEventObjectWait(IMG_UINT32 ui32DispatchTableEntry,
+			    PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT *
+			    psEventObjectWaitIN,
+			    PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT *
+			    psEventObjectWaitOUT,
+			    CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hOSEventKM = psEventObjectWaitIN->hOSEventKM;
+	IMG_HANDLE hOSEventKMInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psEventObjectWaitOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&hOSEventKMInt,
+				       hOSEventKM,
+				       PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+				       IMG_TRUE);
+	if (unlikely(psEventObjectWaitOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto EventObjectWait_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psEventObjectWaitOUT->eError = OSEventObjectWait(hOSEventKMInt);
+
+ EventObjectWait_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (hOSEventKMInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hOSEventKM,
+					    PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeEventObjectClose(IMG_UINT32 ui32DispatchTableEntry,
+			     PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE *
+			     psEventObjectCloseIN,
+			     PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE *
+			     psEventObjectCloseOUT,
+			     CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psEventObjectCloseOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psEventObjectCloseIN->
+					hOSEventKM,
+					PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+	if (unlikely
+	    ((psEventObjectCloseOUT->eError != PVRSRV_OK)
+	     && (psEventObjectCloseOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeEventObjectClose: %s",
+			 PVRSRVGetErrorString(psEventObjectCloseOUT->eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto EventObjectClose_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ EventObjectClose_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDumpDebugInfo(IMG_UINT32 ui32DispatchTableEntry,
+			  PVRSRV_BRIDGE_IN_DUMPDEBUGINFO * psDumpDebugInfoIN,
+			  PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO * psDumpDebugInfoOUT,
+			  CONNECTION_DATA * psConnection)
+{
+
+	psDumpDebugInfoOUT->eError =
+	    PVRSRVDumpDebugInfoKM(psConnection, OSGetDevData(psConnection),
+				  psDumpDebugInfoIN->ui32ui32VerbLevel);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeGetDevClockSpeed(IMG_UINT32 ui32DispatchTableEntry,
+			     PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED *
+			     psGetDevClockSpeedIN,
+			     PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED *
+			     psGetDevClockSpeedOUT,
+			     CONNECTION_DATA * psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psGetDevClockSpeedIN);
+
+	psGetDevClockSpeedOUT->eError =
+	    PVRSRVGetDevClockSpeedKM(psConnection, OSGetDevData(psConnection),
+				     &psGetDevClockSpeedOUT->
+				     ui32ui32ClockSpeed);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeHWOpTimeout(IMG_UINT32 ui32DispatchTableEntry,
+			PVRSRV_BRIDGE_IN_HWOPTIMEOUT * psHWOpTimeoutIN,
+			PVRSRV_BRIDGE_OUT_HWOPTIMEOUT * psHWOpTimeoutOUT,
+			CONNECTION_DATA * psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psHWOpTimeoutIN);
+
+	psHWOpTimeoutOUT->eError =
+	    PVRSRVHWOpTimeoutKM(psConnection, OSGetDevData(psConnection));
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeAlignmentCheck(IMG_UINT32 ui32DispatchTableEntry,
+			   PVRSRV_BRIDGE_IN_ALIGNMENTCHECK * psAlignmentCheckIN,
+			   PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK *
+			   psAlignmentCheckOUT, CONNECTION_DATA * psConnection)
+{
+	IMG_UINT32 *ui32AlignChecksInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32)) + 0;
+
+	if (unlikely
+	    (psAlignmentCheckIN->ui32AlignChecksSize >
+	     RGXFW_ALIGN_CHECKS_UM_MAX))
+	{
+		psAlignmentCheckOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto AlignmentCheck_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psAlignmentCheckIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psAlignmentCheckIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psAlignmentCheckOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto AlignmentCheck_exit;
+			}
+		}
+	}
+
+	if (psAlignmentCheckIN->ui32AlignChecksSize != 0)
+	{
+		ui32AlignChecksInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psAlignmentCheckIN->ui32AlignChecksSize *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32AlignChecksInt,
+		     (const void __user *)psAlignmentCheckIN->pui32AlignChecks,
+		     psAlignmentCheckIN->ui32AlignChecksSize *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psAlignmentCheckOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto AlignmentCheck_exit;
+		}
+	}
+
+	psAlignmentCheckOUT->eError =
+	    PVRSRVAlignmentCheckKM(psConnection, OSGetDevData(psConnection),
+				   psAlignmentCheckIN->ui32AlignChecksSize,
+				   ui32AlignChecksInt);
+
+ AlignmentCheck_exit:
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeGetDeviceStatus(IMG_UINT32 ui32DispatchTableEntry,
+			    PVRSRV_BRIDGE_IN_GETDEVICESTATUS *
+			    psGetDeviceStatusIN,
+			    PVRSRV_BRIDGE_OUT_GETDEVICESTATUS *
+			    psGetDeviceStatusOUT,
+			    CONNECTION_DATA * psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psGetDeviceStatusIN);
+
+	psGetDeviceStatusOUT->eError =
+	    PVRSRVGetDeviceStatusKM(psConnection, OSGetDevData(psConnection),
+				    &psGetDeviceStatusOUT->ui32DeviceSatus);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeEventObjectWaitTimeout(IMG_UINT32 ui32DispatchTableEntry,
+				   PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT *
+				   psEventObjectWaitTimeoutIN,
+				   PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT *
+				   psEventObjectWaitTimeoutOUT,
+				   CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hOSEventKM = psEventObjectWaitTimeoutIN->hOSEventKM;
+	IMG_HANDLE hOSEventKMInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psEventObjectWaitTimeoutOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&hOSEventKMInt,
+				       hOSEventKM,
+				       PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+				       IMG_TRUE);
+	if (unlikely(psEventObjectWaitTimeoutOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto EventObjectWaitTimeout_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psEventObjectWaitTimeoutOUT->eError =
+	    OSEventObjectWaitTimeout(hOSEventKMInt,
+				     psEventObjectWaitTimeoutIN->
+				     ui64uiTimeoutus);
+
+ EventObjectWaitTimeout_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (hOSEventKMInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hOSEventKM,
+					    PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry,
+				PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS *
+				psFindProcessMemStatsIN,
+				PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *
+				psFindProcessMemStatsOUT,
+				CONNECTION_DATA * psConnection)
+{
+	IMG_UINT32 *pui32MemStatsArrayInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) + 0;
+
+	if (psFindProcessMemStatsIN->ui32ArrSize >
+	    PVRSRV_PROCESS_STAT_TYPE_COUNT)
+	{
+		psFindProcessMemStatsOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto FindProcessMemStats_exit;
+	}
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	psFindProcessMemStatsOUT->pui32MemStatsArray =
+	    psFindProcessMemStatsIN->pui32MemStatsArray;
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psFindProcessMemStatsIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psFindProcessMemStatsIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psFindProcessMemStatsOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto FindProcessMemStats_exit;
+			}
+		}
+	}
+
+	if (psFindProcessMemStatsIN->ui32ArrSize != 0)
+	{
+		pui32MemStatsArrayInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32);
+	}
+
+	psFindProcessMemStatsOUT->eError =
+	    PVRSRVFindProcessMemStatsKM(psFindProcessMemStatsIN->ui32PID,
+					psFindProcessMemStatsIN->ui32ArrSize,
+					psFindProcessMemStatsIN->
+					bbAllProcessStats,
+					pui32MemStatsArrayInt);
+
+	if ((psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) > 0)
+	{
+		if (unlikely
+		    (OSCopyToUser
+		     (NULL,
+		      (void __user *)psFindProcessMemStatsOUT->
+		      pui32MemStatsArray, pui32MemStatsArrayInt,
+		      (psFindProcessMemStatsIN->ui32ArrSize *
+		       sizeof(IMG_UINT32))) != PVRSRV_OK))
+		{
+			psFindProcessMemStatsOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto FindProcessMemStats_exit;
+		}
+	}
+
+ FindProcessMemStats_exit:
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeAcquireInfoPage(IMG_UINT32 ui32DispatchTableEntry,
+			    PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE *
+			    psAcquireInfoPageIN,
+			    PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE *
+			    psAcquireInfoPageOUT,
+			    CONNECTION_DATA * psConnection)
+{
+	PMR *psPMRInt = NULL;
+
+	PVR_UNREFERENCED_PARAMETER(psAcquireInfoPageIN);
+
+	psAcquireInfoPageOUT->eError = PVRSRVAcquireInfoPageKM(&psPMRInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psAcquireInfoPageOUT->eError != PVRSRV_OK))
+	{
+		goto AcquireInfoPage_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+	psAcquireInfoPageOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->
+				      psHandleBase, &psAcquireInfoPageOUT->hPMR,
+				      (void *)psPMRInt,
+				      PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      PVRSRVReleaseInfoPageKM);
+	if (unlikely(psAcquireInfoPageOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+		goto AcquireInfoPage_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ AcquireInfoPage_exit:
+
+	if (psAcquireInfoPageOUT->eError != PVRSRV_OK)
+	{
+		if (psPMRInt)
+		{
+			PVRSRVReleaseInfoPageKM(psPMRInt);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeReleaseInfoPage(IMG_UINT32 ui32DispatchTableEntry,
+			    PVRSRV_BRIDGE_IN_RELEASEINFOPAGE *
+			    psReleaseInfoPageIN,
+			    PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE *
+			    psReleaseInfoPageOUT,
+			    CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+	psReleaseInfoPageOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->
+					psHandleBase,
+					(IMG_HANDLE) psReleaseInfoPageIN->hPMR,
+					PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT);
+	if (unlikely
+	    ((psReleaseInfoPageOUT->eError != PVRSRV_OK)
+	     && (psReleaseInfoPageOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeReleaseInfoPage: %s",
+			 PVRSRVGetErrorString(psReleaseInfoPageOUT->eError)));
+		UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+		goto ReleaseInfoPage_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ ReleaseInfoPage_exit:
+
+	return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitSRVCOREBridge(void);
+PVRSRV_ERROR DeinitSRVCOREBridge(void);
+
+/*
+ * Register all SRVCORE functions with services
+ */
+PVRSRV_ERROR InitSRVCOREBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+			      PVRSRV_BRIDGE_SRVCORE_CONNECT,
+			      PVRSRVBridgeConnect, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+			      PVRSRV_BRIDGE_SRVCORE_DISCONNECT,
+			      PVRSRVBridgeDisconnect, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+			      PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT,
+			      PVRSRVBridgeAcquireGlobalEventObject, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+			      PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT,
+			      PVRSRVBridgeReleaseGlobalEventObject, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+			      PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN,
+			      PVRSRVBridgeEventObjectOpen, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+			      PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT,
+			      PVRSRVBridgeEventObjectWait, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+			      PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE,
+			      PVRSRVBridgeEventObjectClose, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+			      PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO,
+			      PVRSRVBridgeDumpDebugInfo, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+			      PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED,
+			      PVRSRVBridgeGetDevClockSpeed, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+			      PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT,
+			      PVRSRVBridgeHWOpTimeout, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+			      PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK,
+			      PVRSRVBridgeAlignmentCheck, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+			      PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS,
+			      PVRSRVBridgeGetDeviceStatus, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+			      PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT,
+			      PVRSRVBridgeEventObjectWaitTimeout, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+			      PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS,
+			      PVRSRVBridgeFindProcessMemStats, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+			      PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE,
+			      PVRSRVBridgeAcquireInfoPage, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+			      PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE,
+			      PVRSRVBridgeReleaseInfoPage, NULL, bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all srvcore functions with services
+ */
+PVRSRV_ERROR DeinitSRVCOREBridge(void)
+{
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+				PVRSRV_BRIDGE_SRVCORE_CONNECT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+				PVRSRV_BRIDGE_SRVCORE_DISCONNECT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+				PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+				PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+				PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+				PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+				PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+				PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+				PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+				PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+				PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+				PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+				PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+				PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+				PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+				PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_sync_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_sync_bridge.c
new file mode 100644
index 0000000..8fbf9611
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_sync_bridge.c
@@ -0,0 +1,2190 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for sync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for sync
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "sync.h"
+#include "sync_server.h"
+#include "pdump.h"
+#include "pvrsrv_sync_km.h"
+#include "sync_fallback_server.h"
+
+#include "common_sync_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeAllocSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry,
+				    PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK *
+				    psAllocSyncPrimitiveBlockIN,
+				    PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK *
+				    psAllocSyncPrimitiveBlockOUT,
+				    CONNECTION_DATA * psConnection)
+{
+	SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+	PMR *pshSyncPMRInt = NULL;
+
+	PVR_UNREFERENCED_PARAMETER(psAllocSyncPrimitiveBlockIN);
+
+	psAllocSyncPrimitiveBlockOUT->hSyncHandle = NULL;
+
+	psAllocSyncPrimitiveBlockOUT->eError =
+	    PVRSRVAllocSyncPrimitiveBlockKM(psConnection,
+					    OSGetDevData(psConnection),
+					    &psSyncHandleInt,
+					    &psAllocSyncPrimitiveBlockOUT->
+					    ui32SyncPrimVAddr,
+					    &psAllocSyncPrimitiveBlockOUT->
+					    ui32SyncPrimBlockSize,
+					    &pshSyncPMRInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK))
+	{
+		goto AllocSyncPrimitiveBlock_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psAllocSyncPrimitiveBlockOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psAllocSyncPrimitiveBlockOUT->
+				      hSyncHandle, (void *)psSyncHandleInt,
+				      PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      PVRSRVFreeSyncPrimitiveBlockKM);
+	if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto AllocSyncPrimitiveBlock_exit;
+	}
+
+	psAllocSyncPrimitiveBlockOUT->eError =
+	    PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+					 &psAllocSyncPrimitiveBlockOUT->
+					 hhSyncPMR, (void *)pshSyncPMRInt,
+					 PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+					 PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+					 psAllocSyncPrimitiveBlockOUT->
+					 hSyncHandle);
+	if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto AllocSyncPrimitiveBlock_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ AllocSyncPrimitiveBlock_exit:
+
+	if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+	{
+		if (psAllocSyncPrimitiveBlockOUT->hSyncHandle)
+		{
+			PVRSRV_ERROR eError;
+
+			/* Lock over handle creation cleanup. */
+			LockHandle(psConnection->psHandleBase);
+
+			eError =
+			    PVRSRVReleaseHandleUnlocked(psConnection->
+							psHandleBase,
+							(IMG_HANDLE)
+							psAllocSyncPrimitiveBlockOUT->
+							hSyncHandle,
+							PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			if (unlikely
+			    ((eError != PVRSRV_OK)
+			     && (eError != PVRSRV_ERROR_RETRY)))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+					 "PVRSRVBridgeAllocSyncPrimitiveBlock: %s",
+					 PVRSRVGetErrorString(eError)));
+			}
+			/* Releasing the handle should free/destroy/release the resource.
+			 * This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK)
+				   || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psSyncHandleInt = NULL;
+			/* Release now we have cleaned up creation handles. */
+			UnlockHandle(psConnection->psHandleBase);
+
+		}
+
+		if (psSyncHandleInt)
+		{
+			PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeFreeSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry,
+				   PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK *
+				   psFreeSyncPrimitiveBlockIN,
+				   PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK *
+				   psFreeSyncPrimitiveBlockOUT,
+				   CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psFreeSyncPrimitiveBlockOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE)
+					psFreeSyncPrimitiveBlockIN->hSyncHandle,
+					PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+	if (unlikely
+	    ((psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+	     && (psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeFreeSyncPrimitiveBlock: %s",
+			 PVRSRVGetErrorString(psFreeSyncPrimitiveBlockOUT->
+					      eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto FreeSyncPrimitiveBlock_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ FreeSyncPrimitiveBlock_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncPrimSet(IMG_UINT32 ui32DispatchTableEntry,
+			PVRSRV_BRIDGE_IN_SYNCPRIMSET * psSyncPrimSetIN,
+			PVRSRV_BRIDGE_OUT_SYNCPRIMSET * psSyncPrimSetOUT,
+			CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hSyncHandle = psSyncPrimSetIN->hSyncHandle;
+	SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psSyncPrimSetOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psSyncHandleInt,
+				       hSyncHandle,
+				       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+				       IMG_TRUE);
+	if (unlikely(psSyncPrimSetOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto SyncPrimSet_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psSyncPrimSetOUT->eError =
+	    PVRSRVSyncPrimSetKM(psSyncHandleInt,
+				psSyncPrimSetIN->ui32Index,
+				psSyncPrimSetIN->ui32Value);
+
+ SyncPrimSet_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psSyncHandleInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hSyncHandle,
+					    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeServerSyncPrimSet(IMG_UINT32 ui32DispatchTableEntry,
+			      PVRSRV_BRIDGE_IN_SERVERSYNCPRIMSET *
+			      psServerSyncPrimSetIN,
+			      PVRSRV_BRIDGE_OUT_SERVERSYNCPRIMSET *
+			      psServerSyncPrimSetOUT,
+			      CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hSyncHandle = psServerSyncPrimSetIN->hSyncHandle;
+	SERVER_SYNC_PRIMITIVE *psSyncHandleInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psServerSyncPrimSetOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psSyncHandleInt,
+				       hSyncHandle,
+				       PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+				       IMG_TRUE);
+	if (unlikely(psServerSyncPrimSetOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto ServerSyncPrimSet_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psServerSyncPrimSetOUT->eError =
+	    PVRSRVServerSyncPrimSetKM(psSyncHandleInt,
+				      psServerSyncPrimSetIN->ui32Value);
+
+ ServerSyncPrimSet_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psSyncHandleInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hSyncHandle,
+					    PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeServerSyncPrimSet NULL
+#endif
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeServerSyncAlloc(IMG_UINT32 ui32DispatchTableEntry,
+			    PVRSRV_BRIDGE_IN_SERVERSYNCALLOC *
+			    psServerSyncAllocIN,
+			    PVRSRV_BRIDGE_OUT_SERVERSYNCALLOC *
+			    psServerSyncAllocOUT,
+			    CONNECTION_DATA * psConnection)
+{
+	SERVER_SYNC_PRIMITIVE *psSyncHandleInt = NULL;
+	IMG_CHAR *uiClassNameInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psServerSyncAllocIN->ui32ClassNameSize * sizeof(IMG_CHAR)) + 0;
+
+	if (unlikely
+	    (psServerSyncAllocIN->ui32ClassNameSize > SYNC_MAX_CLASS_NAME_LEN))
+	{
+		psServerSyncAllocOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto ServerSyncAlloc_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psServerSyncAllocIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psServerSyncAllocIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psServerSyncAllocOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto ServerSyncAlloc_exit;
+			}
+		}
+	}
+
+	if (psServerSyncAllocIN->ui32ClassNameSize != 0)
+	{
+		uiClassNameInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psServerSyncAllocIN->ui32ClassNameSize * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (psServerSyncAllocIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiClassNameInt,
+		     (const void __user *)psServerSyncAllocIN->puiClassName,
+		     psServerSyncAllocIN->ui32ClassNameSize *
+		     sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psServerSyncAllocOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto ServerSyncAlloc_exit;
+		}
+		((IMG_CHAR *)
+		 uiClassNameInt)[(psServerSyncAllocIN->ui32ClassNameSize *
+				  sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+
+	psServerSyncAllocOUT->eError =
+	    PVRSRVServerSyncAllocKM(psConnection, OSGetDevData(psConnection),
+				    &psSyncHandleInt,
+				    &psServerSyncAllocOUT->ui32SyncPrimVAddr,
+				    psServerSyncAllocIN->ui32ClassNameSize,
+				    uiClassNameInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psServerSyncAllocOUT->eError != PVRSRV_OK))
+	{
+		goto ServerSyncAlloc_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psServerSyncAllocOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psServerSyncAllocOUT->hSyncHandle,
+				      (void *)psSyncHandleInt,
+				      PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      PVRSRVServerSyncFreeKM);
+	if (unlikely(psServerSyncAllocOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto ServerSyncAlloc_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ ServerSyncAlloc_exit:
+
+	if (psServerSyncAllocOUT->eError != PVRSRV_OK)
+	{
+		if (psSyncHandleInt)
+		{
+			PVRSRVServerSyncFreeKM(psSyncHandleInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeServerSyncAlloc NULL
+#endif
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeServerSyncFree(IMG_UINT32 ui32DispatchTableEntry,
+			   PVRSRV_BRIDGE_IN_SERVERSYNCFREE * psServerSyncFreeIN,
+			   PVRSRV_BRIDGE_OUT_SERVERSYNCFREE *
+			   psServerSyncFreeOUT, CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psServerSyncFreeOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psServerSyncFreeIN->
+					hSyncHandle,
+					PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+	if (unlikely
+	    ((psServerSyncFreeOUT->eError != PVRSRV_OK)
+	     && (psServerSyncFreeOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeServerSyncFree: %s",
+			 PVRSRVGetErrorString(psServerSyncFreeOUT->eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto ServerSyncFree_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ ServerSyncFree_exit:
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeServerSyncFree NULL
+#endif
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeServerSyncQueueHWOp(IMG_UINT32 ui32DispatchTableEntry,
+				PVRSRV_BRIDGE_IN_SERVERSYNCQUEUEHWOP *
+				psServerSyncQueueHWOpIN,
+				PVRSRV_BRIDGE_OUT_SERVERSYNCQUEUEHWOP *
+				psServerSyncQueueHWOpOUT,
+				CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hSyncHandle = psServerSyncQueueHWOpIN->hSyncHandle;
+	SERVER_SYNC_PRIMITIVE *psSyncHandleInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psServerSyncQueueHWOpOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psSyncHandleInt,
+				       hSyncHandle,
+				       PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+				       IMG_TRUE);
+	if (unlikely(psServerSyncQueueHWOpOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto ServerSyncQueueHWOp_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psServerSyncQueueHWOpOUT->eError =
+	    PVRSRVServerSyncQueueHWOpKM(psSyncHandleInt,
+					psServerSyncQueueHWOpIN->bbUpdate,
+					&psServerSyncQueueHWOpOUT->
+					ui32FenceValue,
+					&psServerSyncQueueHWOpOUT->
+					ui32UpdateValue);
+
+ ServerSyncQueueHWOp_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psSyncHandleInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hSyncHandle,
+					    PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeServerSyncQueueHWOp NULL
+#endif
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeServerSyncGetStatus(IMG_UINT32 ui32DispatchTableEntry,
+				PVRSRV_BRIDGE_IN_SERVERSYNCGETSTATUS *
+				psServerSyncGetStatusIN,
+				PVRSRV_BRIDGE_OUT_SERVERSYNCGETSTATUS *
+				psServerSyncGetStatusOUT,
+				CONNECTION_DATA * psConnection)
+{
+	SERVER_SYNC_PRIMITIVE **psSyncHandleInt = NULL;
+	IMG_HANDLE *hSyncHandleInt2 = NULL;
+	IMG_UINT32 *pui32UIDInt = NULL;
+	IMG_UINT32 *pui32FWAddrInt = NULL;
+	IMG_UINT32 *pui32CurrentOpInt = NULL;
+	IMG_UINT32 *pui32NextOpInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psServerSyncGetStatusIN->ui32SyncCount *
+	     sizeof(SERVER_SYNC_PRIMITIVE *)) +
+	    (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_HANDLE)) +
+	    (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) +
+	    (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) +
+	    (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) +
+	    (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) + 0;
+
+	if (unlikely
+	    (psServerSyncGetStatusIN->ui32SyncCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psServerSyncGetStatusOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto ServerSyncGetStatus_exit;
+	}
+
+	psServerSyncGetStatusOUT->pui32UID = psServerSyncGetStatusIN->pui32UID;
+	psServerSyncGetStatusOUT->pui32FWAddr =
+	    psServerSyncGetStatusIN->pui32FWAddr;
+	psServerSyncGetStatusOUT->pui32CurrentOp =
+	    psServerSyncGetStatusIN->pui32CurrentOp;
+	psServerSyncGetStatusOUT->pui32NextOp =
+	    psServerSyncGetStatusIN->pui32NextOp;
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psServerSyncGetStatusIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psServerSyncGetStatusIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psServerSyncGetStatusOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto ServerSyncGetStatus_exit;
+			}
+		}
+	}
+
+	if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+	{
+		psSyncHandleInt =
+		    (SERVER_SYNC_PRIMITIVE **) (((IMG_UINT8 *) pArrayArgsBuffer)
+						+ ui32NextOffset);
+		ui32NextOffset +=
+		    psServerSyncGetStatusIN->ui32SyncCount *
+		    sizeof(SERVER_SYNC_PRIMITIVE *);
+		hSyncHandleInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hSyncHandleInt2,
+		     (const void __user *)psServerSyncGetStatusIN->phSyncHandle,
+		     psServerSyncGetStatusIN->ui32SyncCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psServerSyncGetStatusOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto ServerSyncGetStatus_exit;
+		}
+	}
+	if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+	{
+		pui32UIDInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32);
+	}
+
+	if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+	{
+		pui32FWAddrInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32);
+	}
+
+	if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+	{
+		pui32CurrentOpInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32);
+	}
+
+	if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+	{
+		pui32NextOpInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32);
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psServerSyncGetStatusIN->ui32SyncCount; i++)
+		{
+			/* Look up the address from the handle */
+			psServerSyncGetStatusOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psSyncHandleInt[i],
+						       hSyncHandleInt2[i],
+						       PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+						       IMG_TRUE);
+			if (unlikely
+			    (psServerSyncGetStatusOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto ServerSyncGetStatus_exit;
+			}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psServerSyncGetStatusOUT->eError =
+	    PVRSRVServerSyncGetStatusKM(psServerSyncGetStatusIN->ui32SyncCount,
+					psSyncHandleInt,
+					pui32UIDInt,
+					pui32FWAddrInt,
+					pui32CurrentOpInt, pui32NextOpInt);
+
+	if ((psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) > 0)
+	{
+		if (unlikely
+		    (OSCopyToUser
+		     (NULL, (void __user *)psServerSyncGetStatusOUT->pui32UID,
+		      pui32UIDInt,
+		      (psServerSyncGetStatusIN->ui32SyncCount *
+		       sizeof(IMG_UINT32))) != PVRSRV_OK))
+		{
+			psServerSyncGetStatusOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto ServerSyncGetStatus_exit;
+		}
+	}
+
+	if ((psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) > 0)
+	{
+		if (unlikely
+		    (OSCopyToUser
+		     (NULL,
+		      (void __user *)psServerSyncGetStatusOUT->pui32FWAddr,
+		      pui32FWAddrInt,
+		      (psServerSyncGetStatusIN->ui32SyncCount *
+		       sizeof(IMG_UINT32))) != PVRSRV_OK))
+		{
+			psServerSyncGetStatusOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto ServerSyncGetStatus_exit;
+		}
+	}
+
+	if ((psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) > 0)
+	{
+		if (unlikely
+		    (OSCopyToUser
+		     (NULL,
+		      (void __user *)psServerSyncGetStatusOUT->pui32CurrentOp,
+		      pui32CurrentOpInt,
+		      (psServerSyncGetStatusIN->ui32SyncCount *
+		       sizeof(IMG_UINT32))) != PVRSRV_OK))
+		{
+			psServerSyncGetStatusOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto ServerSyncGetStatus_exit;
+		}
+	}
+
+	if ((psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) > 0)
+	{
+		if (unlikely
+		    (OSCopyToUser
+		     (NULL,
+		      (void __user *)psServerSyncGetStatusOUT->pui32NextOp,
+		      pui32NextOpInt,
+		      (psServerSyncGetStatusIN->ui32SyncCount *
+		       sizeof(IMG_UINT32))) != PVRSRV_OK))
+		{
+			psServerSyncGetStatusOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto ServerSyncGetStatus_exit;
+		}
+	}
+
+ ServerSyncGetStatus_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	if (hSyncHandleInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psServerSyncGetStatusIN->ui32SyncCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hSyncHandleInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hSyncHandleInt2[i],
+							    PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+			}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeServerSyncGetStatus NULL
+#endif
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeSyncPrimOpCreate(IMG_UINT32 ui32DispatchTableEntry,
+			     PVRSRV_BRIDGE_IN_SYNCPRIMOPCREATE *
+			     psSyncPrimOpCreateIN,
+			     PVRSRV_BRIDGE_OUT_SYNCPRIMOPCREATE *
+			     psSyncPrimOpCreateOUT,
+			     CONNECTION_DATA * psConnection)
+{
+	SYNC_PRIMITIVE_BLOCK **psBlockListInt = NULL;
+	IMG_HANDLE *hBlockListInt2 = NULL;
+	IMG_UINT32 *ui32SyncBlockIndexInt = NULL;
+	IMG_UINT32 *ui32IndexInt = NULL;
+	SERVER_SYNC_PRIMITIVE **psServerSyncInt = NULL;
+	IMG_HANDLE *hServerSyncInt2 = NULL;
+	SERVER_OP_COOKIE *psServerCookieInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psSyncPrimOpCreateIN->ui32SyncBlockCount *
+	     sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+	    (psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(IMG_HANDLE)) +
+	    (psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+	    (psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+	    (psSyncPrimOpCreateIN->ui32ServerSyncCount *
+	     sizeof(SERVER_SYNC_PRIMITIVE *)) +
+	    (psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+	    0;
+
+	if (unlikely
+	    (psSyncPrimOpCreateIN->ui32SyncBlockCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psSyncPrimOpCreateOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto SyncPrimOpCreate_exit;
+	}
+
+	if (unlikely
+	    (psSyncPrimOpCreateIN->ui32ClientSyncCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psSyncPrimOpCreateOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto SyncPrimOpCreate_exit;
+	}
+
+	if (unlikely
+	    (psSyncPrimOpCreateIN->ui32ServerSyncCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psSyncPrimOpCreateOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto SyncPrimOpCreate_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psSyncPrimOpCreateIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psSyncPrimOpCreateIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psSyncPrimOpCreateOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto SyncPrimOpCreate_exit;
+			}
+		}
+	}
+
+	if (psSyncPrimOpCreateIN->ui32SyncBlockCount != 0)
+	{
+		psBlockListInt =
+		    (SYNC_PRIMITIVE_BLOCK **) (((IMG_UINT8 *) pArrayArgsBuffer)
+					       + ui32NextOffset);
+		ui32NextOffset +=
+		    psSyncPrimOpCreateIN->ui32SyncBlockCount *
+		    sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hBlockListInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psSyncPrimOpCreateIN->ui32SyncBlockCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hBlockListInt2,
+		     (const void __user *)psSyncPrimOpCreateIN->phBlockList,
+		     psSyncPrimOpCreateIN->ui32SyncBlockCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psSyncPrimOpCreateOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto SyncPrimOpCreate_exit;
+		}
+	}
+	if (psSyncPrimOpCreateIN->ui32ClientSyncCount != 0)
+	{
+		ui32SyncBlockIndexInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psSyncPrimOpCreateIN->ui32ClientSyncCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32SyncBlockIndexInt,
+		     (const void __user *)psSyncPrimOpCreateIN->
+		     pui32SyncBlockIndex,
+		     psSyncPrimOpCreateIN->ui32ClientSyncCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psSyncPrimOpCreateOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto SyncPrimOpCreate_exit;
+		}
+	}
+	if (psSyncPrimOpCreateIN->ui32ClientSyncCount != 0)
+	{
+		ui32IndexInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psSyncPrimOpCreateIN->ui32ClientSyncCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32IndexInt,
+		     (const void __user *)psSyncPrimOpCreateIN->pui32Index,
+		     psSyncPrimOpCreateIN->ui32ClientSyncCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psSyncPrimOpCreateOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto SyncPrimOpCreate_exit;
+		}
+	}
+	if (psSyncPrimOpCreateIN->ui32ServerSyncCount != 0)
+	{
+		psServerSyncInt =
+		    (SERVER_SYNC_PRIMITIVE **) (((IMG_UINT8 *) pArrayArgsBuffer)
+						+ ui32NextOffset);
+		ui32NextOffset +=
+		    psSyncPrimOpCreateIN->ui32ServerSyncCount *
+		    sizeof(SERVER_SYNC_PRIMITIVE *);
+		hServerSyncInt2 =
+		    (IMG_HANDLE *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psSyncPrimOpCreateIN->ui32ServerSyncCount *
+		    sizeof(IMG_HANDLE);
+	}
+
+	/* Copy the data over */
+	if (psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, hServerSyncInt2,
+		     (const void __user *)psSyncPrimOpCreateIN->phServerSync,
+		     psSyncPrimOpCreateIN->ui32ServerSyncCount *
+		     sizeof(IMG_HANDLE)) != PVRSRV_OK)
+		{
+			psSyncPrimOpCreateOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto SyncPrimOpCreate_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psSyncPrimOpCreateIN->ui32SyncBlockCount; i++)
+		{
+			/* Look up the address from the handle */
+			psSyncPrimOpCreateOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psBlockListInt[i],
+						       hBlockListInt2[i],
+						       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+						       IMG_TRUE);
+			if (unlikely
+			    (psSyncPrimOpCreateOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto SyncPrimOpCreate_exit;
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psSyncPrimOpCreateIN->ui32ServerSyncCount; i++)
+		{
+			/* Look up the address from the handle */
+			psSyncPrimOpCreateOUT->eError =
+			    PVRSRVLookupHandleUnlocked(psConnection->
+						       psHandleBase,
+						       (void **)
+						       &psServerSyncInt[i],
+						       hServerSyncInt2[i],
+						       PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+						       IMG_TRUE);
+			if (unlikely
+			    (psSyncPrimOpCreateOUT->eError != PVRSRV_OK))
+			{
+				UnlockHandle(psConnection->psHandleBase);
+				goto SyncPrimOpCreate_exit;
+			}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psSyncPrimOpCreateOUT->eError =
+	    PVRSRVSyncPrimOpCreateKM(psSyncPrimOpCreateIN->ui32SyncBlockCount,
+				     psBlockListInt,
+				     psSyncPrimOpCreateIN->ui32ClientSyncCount,
+				     ui32SyncBlockIndexInt,
+				     ui32IndexInt,
+				     psSyncPrimOpCreateIN->ui32ServerSyncCount,
+				     psServerSyncInt, &psServerCookieInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psSyncPrimOpCreateOUT->eError != PVRSRV_OK))
+	{
+		goto SyncPrimOpCreate_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psSyncPrimOpCreateOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psSyncPrimOpCreateOUT->hServerCookie,
+				      (void *)psServerCookieInt,
+				      PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+				      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				      (PFN_HANDLE_RELEASE) &
+				      PVRSRVSyncPrimOpDestroyKM);
+	if (unlikely(psSyncPrimOpCreateOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto SyncPrimOpCreate_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ SyncPrimOpCreate_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	if (hBlockListInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psSyncPrimOpCreateIN->ui32SyncBlockCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hBlockListInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hBlockListInt2[i],
+							    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			}
+		}
+	}
+
+	if (hServerSyncInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i = 0; i < psSyncPrimOpCreateIN->ui32ServerSyncCount; i++)
+		{
+
+			/* Unreference the previously looked up handle */
+			if (hServerSyncInt2[i])
+			{
+				PVRSRVReleaseHandleUnlocked(psConnection->
+							    psHandleBase,
+							    hServerSyncInt2[i],
+							    PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+			}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+	{
+		if (psServerCookieInt)
+		{
+			PVRSRVSyncPrimOpDestroyKM(psServerCookieInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimOpCreate NULL
+#endif
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeSyncPrimOpTake(IMG_UINT32 ui32DispatchTableEntry,
+			   PVRSRV_BRIDGE_IN_SYNCPRIMOPTAKE * psSyncPrimOpTakeIN,
+			   PVRSRV_BRIDGE_OUT_SYNCPRIMOPTAKE *
+			   psSyncPrimOpTakeOUT, CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hServerCookie = psSyncPrimOpTakeIN->hServerCookie;
+	SERVER_OP_COOKIE *psServerCookieInt = NULL;
+	IMG_UINT32 *ui32FlagsInt = NULL;
+	IMG_UINT32 *ui32FenceValueInt = NULL;
+	IMG_UINT32 *ui32UpdateValueInt = NULL;
+	IMG_UINT32 *ui32ServerFlagsInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+	    (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+	    (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+	    (psSyncPrimOpTakeIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) + 0;
+
+	if (unlikely
+	    (psSyncPrimOpTakeIN->ui32ClientSyncCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psSyncPrimOpTakeOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto SyncPrimOpTake_exit;
+	}
+
+	if (unlikely
+	    (psSyncPrimOpTakeIN->ui32ServerSyncCount > PVRSRV_MAX_SYNC_PRIMS))
+	{
+		psSyncPrimOpTakeOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto SyncPrimOpTake_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psSyncPrimOpTakeIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psSyncPrimOpTakeIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psSyncPrimOpTakeOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto SyncPrimOpTake_exit;
+			}
+		}
+	}
+
+	if (psSyncPrimOpTakeIN->ui32ClientSyncCount != 0)
+	{
+		ui32FlagsInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psSyncPrimOpTakeIN->ui32ClientSyncCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32FlagsInt,
+		     (const void __user *)psSyncPrimOpTakeIN->pui32Flags,
+		     psSyncPrimOpTakeIN->ui32ClientSyncCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psSyncPrimOpTakeOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto SyncPrimOpTake_exit;
+		}
+	}
+	if (psSyncPrimOpTakeIN->ui32ClientSyncCount != 0)
+	{
+		ui32FenceValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psSyncPrimOpTakeIN->ui32ClientSyncCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32FenceValueInt,
+		     (const void __user *)psSyncPrimOpTakeIN->pui32FenceValue,
+		     psSyncPrimOpTakeIN->ui32ClientSyncCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psSyncPrimOpTakeOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto SyncPrimOpTake_exit;
+		}
+	}
+	if (psSyncPrimOpTakeIN->ui32ClientSyncCount != 0)
+	{
+		ui32UpdateValueInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psSyncPrimOpTakeIN->ui32ClientSyncCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32UpdateValueInt,
+		     (const void __user *)psSyncPrimOpTakeIN->pui32UpdateValue,
+		     psSyncPrimOpTakeIN->ui32ClientSyncCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psSyncPrimOpTakeOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto SyncPrimOpTake_exit;
+		}
+	}
+	if (psSyncPrimOpTakeIN->ui32ServerSyncCount != 0)
+	{
+		ui32ServerFlagsInt =
+		    (IMG_UINT32 *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				    ui32NextOffset);
+		ui32NextOffset +=
+		    psSyncPrimOpTakeIN->ui32ServerSyncCount *
+		    sizeof(IMG_UINT32);
+	}
+
+	/* Copy the data over */
+	if (psSyncPrimOpTakeIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, ui32ServerFlagsInt,
+		     (const void __user *)psSyncPrimOpTakeIN->pui32ServerFlags,
+		     psSyncPrimOpTakeIN->ui32ServerSyncCount *
+		     sizeof(IMG_UINT32)) != PVRSRV_OK)
+		{
+			psSyncPrimOpTakeOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto SyncPrimOpTake_exit;
+		}
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psSyncPrimOpTakeOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psServerCookieInt,
+				       hServerCookie,
+				       PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+				       IMG_TRUE);
+	if (unlikely(psSyncPrimOpTakeOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto SyncPrimOpTake_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psSyncPrimOpTakeOUT->eError =
+	    PVRSRVSyncPrimOpTakeKM(psServerCookieInt,
+				   psSyncPrimOpTakeIN->ui32ClientSyncCount,
+				   ui32FlagsInt,
+				   ui32FenceValueInt,
+				   ui32UpdateValueInt,
+				   psSyncPrimOpTakeIN->ui32ServerSyncCount,
+				   ui32ServerFlagsInt);
+
+ SyncPrimOpTake_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psServerCookieInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hServerCookie,
+					    PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimOpTake NULL
+#endif
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeSyncPrimOpReady(IMG_UINT32 ui32DispatchTableEntry,
+			    PVRSRV_BRIDGE_IN_SYNCPRIMOPREADY *
+			    psSyncPrimOpReadyIN,
+			    PVRSRV_BRIDGE_OUT_SYNCPRIMOPREADY *
+			    psSyncPrimOpReadyOUT,
+			    CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hServerCookie = psSyncPrimOpReadyIN->hServerCookie;
+	SERVER_OP_COOKIE *psServerCookieInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psSyncPrimOpReadyOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psServerCookieInt,
+				       hServerCookie,
+				       PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+				       IMG_TRUE);
+	if (unlikely(psSyncPrimOpReadyOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto SyncPrimOpReady_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psSyncPrimOpReadyOUT->eError =
+	    PVRSRVSyncPrimOpReadyKM(psServerCookieInt,
+				    &psSyncPrimOpReadyOUT->bReady);
+
+ SyncPrimOpReady_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psServerCookieInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hServerCookie,
+					    PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimOpReady NULL
+#endif
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeSyncPrimOpComplete(IMG_UINT32 ui32DispatchTableEntry,
+			       PVRSRV_BRIDGE_IN_SYNCPRIMOPCOMPLETE *
+			       psSyncPrimOpCompleteIN,
+			       PVRSRV_BRIDGE_OUT_SYNCPRIMOPCOMPLETE *
+			       psSyncPrimOpCompleteOUT,
+			       CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hServerCookie = psSyncPrimOpCompleteIN->hServerCookie;
+	SERVER_OP_COOKIE *psServerCookieInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psSyncPrimOpCompleteOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psServerCookieInt,
+				       hServerCookie,
+				       PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+				       IMG_TRUE);
+	if (unlikely(psSyncPrimOpCompleteOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto SyncPrimOpComplete_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psSyncPrimOpCompleteOUT->eError =
+	    PVRSRVSyncPrimOpCompleteKM(psServerCookieInt);
+
+ SyncPrimOpComplete_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psServerCookieInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hServerCookie,
+					    PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimOpComplete NULL
+#endif
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_INT
+PVRSRVBridgeSyncPrimOpDestroy(IMG_UINT32 ui32DispatchTableEntry,
+			      PVRSRV_BRIDGE_IN_SYNCPRIMOPDESTROY *
+			      psSyncPrimOpDestroyIN,
+			      PVRSRV_BRIDGE_OUT_SYNCPRIMOPDESTROY *
+			      psSyncPrimOpDestroyOUT,
+			      CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psSyncPrimOpDestroyOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psSyncPrimOpDestroyIN->
+					hServerCookie,
+					PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+	if (unlikely
+	    ((psSyncPrimOpDestroyOUT->eError != PVRSRV_OK)
+	     && (psSyncPrimOpDestroyOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeSyncPrimOpDestroy: %s",
+			 PVRSRVGetErrorString(psSyncPrimOpDestroyOUT->eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto SyncPrimOpDestroy_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ SyncPrimOpDestroy_exit:
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimOpDestroy NULL
+#endif
+
+#if defined(PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimPDump(IMG_UINT32 ui32DispatchTableEntry,
+			  PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP * psSyncPrimPDumpIN,
+			  PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP * psSyncPrimPDumpOUT,
+			  CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hSyncHandle = psSyncPrimPDumpIN->hSyncHandle;
+	SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psSyncPrimPDumpOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psSyncHandleInt,
+				       hSyncHandle,
+				       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+				       IMG_TRUE);
+	if (unlikely(psSyncPrimPDumpOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto SyncPrimPDump_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psSyncPrimPDumpOUT->eError =
+	    PVRSRVSyncPrimPDumpKM(psSyncHandleInt,
+				  psSyncPrimPDumpIN->ui32Offset);
+
+ SyncPrimPDump_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psSyncHandleInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hSyncHandle,
+					    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDump NULL
+#endif
+
+#if defined(PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpValue(IMG_UINT32 ui32DispatchTableEntry,
+			       PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE *
+			       psSyncPrimPDumpValueIN,
+			       PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE *
+			       psSyncPrimPDumpValueOUT,
+			       CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hSyncHandle = psSyncPrimPDumpValueIN->hSyncHandle;
+	SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psSyncPrimPDumpValueOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psSyncHandleInt,
+				       hSyncHandle,
+				       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+				       IMG_TRUE);
+	if (unlikely(psSyncPrimPDumpValueOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto SyncPrimPDumpValue_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psSyncPrimPDumpValueOUT->eError =
+	    PVRSRVSyncPrimPDumpValueKM(psSyncHandleInt,
+				       psSyncPrimPDumpValueIN->ui32Offset,
+				       psSyncPrimPDumpValueIN->ui32Value);
+
+ SyncPrimPDumpValue_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psSyncHandleInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hSyncHandle,
+					    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDumpValue NULL
+#endif
+
+#if defined(PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpPol(IMG_UINT32 ui32DispatchTableEntry,
+			     PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL *
+			     psSyncPrimPDumpPolIN,
+			     PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL *
+			     psSyncPrimPDumpPolOUT,
+			     CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hSyncHandle = psSyncPrimPDumpPolIN->hSyncHandle;
+	SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psSyncPrimPDumpPolOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psSyncHandleInt,
+				       hSyncHandle,
+				       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+				       IMG_TRUE);
+	if (unlikely(psSyncPrimPDumpPolOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto SyncPrimPDumpPol_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psSyncPrimPDumpPolOUT->eError =
+	    PVRSRVSyncPrimPDumpPolKM(psSyncHandleInt,
+				     psSyncPrimPDumpPolIN->ui32Offset,
+				     psSyncPrimPDumpPolIN->ui32Value,
+				     psSyncPrimPDumpPolIN->ui32Mask,
+				     psSyncPrimPDumpPolIN->eOperator,
+				     psSyncPrimPDumpPolIN->uiPDumpFlags);
+
+ SyncPrimPDumpPol_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psSyncHandleInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hSyncHandle,
+					    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDumpPol NULL
+#endif
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL_PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimOpPDumpPol(IMG_UINT32 ui32DispatchTableEntry,
+			       PVRSRV_BRIDGE_IN_SYNCPRIMOPPDUMPPOL *
+			       psSyncPrimOpPDumpPolIN,
+			       PVRSRV_BRIDGE_OUT_SYNCPRIMOPPDUMPPOL *
+			       psSyncPrimOpPDumpPolOUT,
+			       CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hServerCookie = psSyncPrimOpPDumpPolIN->hServerCookie;
+	SERVER_OP_COOKIE *psServerCookieInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psSyncPrimOpPDumpPolOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psServerCookieInt,
+				       hServerCookie,
+				       PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+				       IMG_TRUE);
+	if (unlikely(psSyncPrimOpPDumpPolOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto SyncPrimOpPDumpPol_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psSyncPrimOpPDumpPolOUT->eError =
+	    PVRSRVSyncPrimOpPDumpPolKM(psServerCookieInt,
+				       psSyncPrimOpPDumpPolIN->eOperator,
+				       psSyncPrimOpPDumpPolIN->uiPDumpFlags);
+
+ SyncPrimOpPDumpPol_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psServerCookieInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hServerCookie,
+					    PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimOpPDumpPol NULL
+#endif
+
+#if defined(PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpCBP(IMG_UINT32 ui32DispatchTableEntry,
+			     PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP *
+			     psSyncPrimPDumpCBPIN,
+			     PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP *
+			     psSyncPrimPDumpCBPOUT,
+			     CONNECTION_DATA * psConnection)
+{
+	IMG_HANDLE hSyncHandle = psSyncPrimPDumpCBPIN->hSyncHandle;
+	SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psSyncPrimPDumpCBPOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&psSyncHandleInt,
+				       hSyncHandle,
+				       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+				       IMG_TRUE);
+	if (unlikely(psSyncPrimPDumpCBPOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto SyncPrimPDumpCBP_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psSyncPrimPDumpCBPOUT->eError =
+	    PVRSRVSyncPrimPDumpCBPKM(psSyncHandleInt,
+				     psSyncPrimPDumpCBPIN->ui32Offset,
+				     psSyncPrimPDumpCBPIN->uiWriteOffset,
+				     psSyncPrimPDumpCBPIN->uiPacketSize,
+				     psSyncPrimPDumpCBPIN->uiBufferSize);
+
+ SyncPrimPDumpCBP_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (psSyncHandleInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hSyncHandle,
+					    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDumpCBP NULL
+#endif
+
+static IMG_INT
+PVRSRVBridgeSyncAllocEvent(IMG_UINT32 ui32DispatchTableEntry,
+			   PVRSRV_BRIDGE_IN_SYNCALLOCEVENT * psSyncAllocEventIN,
+			   PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT *
+			   psSyncAllocEventOUT, CONNECTION_DATA * psConnection)
+{
+	IMG_CHAR *uiClassNameInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) + 0;
+
+	if (unlikely
+	    (psSyncAllocEventIN->ui32ClassNameSize > SYNC_MAX_CLASS_NAME_LEN))
+	{
+		psSyncAllocEventOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto SyncAllocEvent_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psSyncAllocEventIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer =
+			    (IMG_BYTE *) psSyncAllocEventIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psSyncAllocEventOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto SyncAllocEvent_exit;
+			}
+		}
+	}
+
+	if (psSyncAllocEventIN->ui32ClassNameSize != 0)
+	{
+		uiClassNameInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiClassNameInt,
+		     (const void __user *)psSyncAllocEventIN->puiClassName,
+		     psSyncAllocEventIN->ui32ClassNameSize *
+		     sizeof(IMG_CHAR)) != PVRSRV_OK)
+		{
+			psSyncAllocEventOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto SyncAllocEvent_exit;
+		}
+		((IMG_CHAR *)
+		 uiClassNameInt)[(psSyncAllocEventIN->ui32ClassNameSize *
+				  sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+
+	psSyncAllocEventOUT->eError =
+	    PVRSRVSyncAllocEventKM(psConnection, OSGetDevData(psConnection),
+				   psSyncAllocEventIN->bServerSync,
+				   psSyncAllocEventIN->ui32FWAddr,
+				   psSyncAllocEventIN->ui32ClassNameSize,
+				   uiClassNameInt);
+
+ SyncAllocEvent_exit:
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFreeEvent(IMG_UINT32 ui32DispatchTableEntry,
+			  PVRSRV_BRIDGE_IN_SYNCFREEEVENT * psSyncFreeEventIN,
+			  PVRSRV_BRIDGE_OUT_SYNCFREEEVENT * psSyncFreeEventOUT,
+			  CONNECTION_DATA * psConnection)
+{
+
+	psSyncFreeEventOUT->eError =
+	    PVRSRVSyncFreeEventKM(psConnection, OSGetDevData(psConnection),
+				  psSyncFreeEventIN->ui32FWAddr);
+
+	return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitSYNCBridge(void);
+PVRSRV_ERROR DeinitSYNCBridge(void);
+
+/*
+ * Register all SYNC functions with services
+ */
+PVRSRV_ERROR InitSYNCBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK,
+			      PVRSRVBridgeAllocSyncPrimitiveBlock, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK,
+			      PVRSRVBridgeFreeSyncPrimitiveBlock, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_SYNCPRIMSET,
+			      PVRSRVBridgeSyncPrimSet, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_SERVERSYNCPRIMSET,
+			      PVRSRVBridgeServerSyncPrimSet, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_SERVERSYNCALLOC,
+			      PVRSRVBridgeServerSyncAlloc, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_SERVERSYNCFREE,
+			      PVRSRVBridgeServerSyncFree, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_SERVERSYNCQUEUEHWOP,
+			      PVRSRVBridgeServerSyncQueueHWOp, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_SERVERSYNCGETSTATUS,
+			      PVRSRVBridgeServerSyncGetStatus, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCREATE,
+			      PVRSRVBridgeSyncPrimOpCreate, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_SYNCPRIMOPTAKE,
+			      PVRSRVBridgeSyncPrimOpTake, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_SYNCPRIMOPREADY,
+			      PVRSRVBridgeSyncPrimOpReady, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCOMPLETE,
+			      PVRSRVBridgeSyncPrimOpComplete, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_SYNCPRIMOPDESTROY,
+			      PVRSRVBridgeSyncPrimOpDestroy, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP,
+			      PVRSRVBridgeSyncPrimPDump, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE,
+			      PVRSRVBridgeSyncPrimPDumpValue, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL,
+			      PVRSRVBridgeSyncPrimPDumpPol, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_SYNCPRIMOPPDUMPPOL,
+			      PVRSRVBridgeSyncPrimOpPDumpPol, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP,
+			      PVRSRVBridgeSyncPrimPDumpCBP, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT,
+			      PVRSRVBridgeSyncAllocEvent, NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+			      PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT,
+			      PVRSRVBridgeSyncFreeEvent, NULL, bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all sync functions with services
+ */
+PVRSRV_ERROR DeinitSYNCBridge(void)
+{
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_SYNCPRIMSET);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_SERVERSYNCPRIMSET);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_SERVERSYNCALLOC);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_SERVERSYNCFREE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_SERVERSYNCQUEUEHWOP);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_SERVERSYNCGETSTATUS);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCREATE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_SYNCPRIMOPTAKE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_SYNCPRIMOPREADY);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCOMPLETE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_SYNCPRIMOPDESTROY);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_SYNCPRIMOPPDUMPPOL);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+				PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_synctracking_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_synctracking_bridge.c
new file mode 100644
index 0000000..0544bc4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/server_synctracking_bridge.c
@@ -0,0 +1,325 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for synctracking
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for synctracking
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "sync.h"
+#include "sync_server.h"
+
+#include "common_synctracking_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeSyncRecordRemoveByHandle(IMG_UINT32 ui32DispatchTableEntry,
+				     PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE *
+				     psSyncRecordRemoveByHandleIN,
+				     PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE
+				     * psSyncRecordRemoveByHandleOUT,
+				     CONNECTION_DATA * psConnection)
+{
+
+	/* Lock over handle destruction. */
+	LockHandle(psConnection->psHandleBase);
+
+	psSyncRecordRemoveByHandleOUT->eError =
+	    PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE)
+					psSyncRecordRemoveByHandleIN->hhRecord,
+					PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE);
+	if (unlikely
+	    ((psSyncRecordRemoveByHandleOUT->eError != PVRSRV_OK)
+	     && (psSyncRecordRemoveByHandleOUT->eError != PVRSRV_ERROR_RETRY)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVBridgeSyncRecordRemoveByHandle: %s",
+			 PVRSRVGetErrorString(psSyncRecordRemoveByHandleOUT->
+					      eError)));
+		UnlockHandle(psConnection->psHandleBase);
+		goto SyncRecordRemoveByHandle_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ SyncRecordRemoveByHandle_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry,
+			  PVRSRV_BRIDGE_IN_SYNCRECORDADD * psSyncRecordAddIN,
+			  PVRSRV_BRIDGE_OUT_SYNCRECORDADD * psSyncRecordAddOUT,
+			  CONNECTION_DATA * psConnection)
+{
+	SYNC_RECORD_HANDLE pshRecordInt = NULL;
+	IMG_HANDLE hhServerSyncPrimBlock =
+	    psSyncRecordAddIN->hhServerSyncPrimBlock;
+	SYNC_PRIMITIVE_BLOCK *pshServerSyncPrimBlockInt = NULL;
+	IMG_CHAR *uiClassNameInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize =
+	    (psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) + 0;
+
+	if (unlikely
+	    (psSyncRecordAddIN->ui32ClassNameSize > SYNC_MAX_CLASS_NAME_LEN))
+	{
+		psSyncRecordAddOUT->eError =
+		    PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+		goto SyncRecordAdd_exit;
+	}
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset =
+		    PVR_ALIGN(sizeof(*psSyncRecordAddIN),
+			      sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize =
+		    ui32InBufferOffset >=
+		    PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE -
+		    ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *) psSyncRecordAddIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if (!pArrayArgsBuffer)
+			{
+				psSyncRecordAddOUT->eError =
+				    PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto SyncRecordAdd_exit;
+			}
+		}
+	}
+
+	if (psSyncRecordAddIN->ui32ClassNameSize != 0)
+	{
+		uiClassNameInt =
+		    (IMG_CHAR *) (((IMG_UINT8 *) pArrayArgsBuffer) +
+				  ui32NextOffset);
+		ui32NextOffset +=
+		    psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR);
+	}
+
+	/* Copy the data over */
+	if (psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0)
+	{
+		if (OSCopyFromUser
+		    (NULL, uiClassNameInt,
+		     (const void __user *)psSyncRecordAddIN->puiClassName,
+		     psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) !=
+		    PVRSRV_OK)
+		{
+			psSyncRecordAddOUT->eError =
+			    PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto SyncRecordAdd_exit;
+		}
+		((IMG_CHAR *)
+		 uiClassNameInt)[(psSyncRecordAddIN->ui32ClassNameSize *
+				  sizeof(IMG_CHAR)) - 1] = '\0';
+	}
+
+	/* Lock over handle lookup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Look up the address from the handle */
+	psSyncRecordAddOUT->eError =
+	    PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+				       (void **)&pshServerSyncPrimBlockInt,
+				       hhServerSyncPrimBlock,
+				       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+				       IMG_TRUE);
+	if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto SyncRecordAdd_exit;
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	psSyncRecordAddOUT->eError =
+	    PVRSRVSyncRecordAddKM(psConnection, OSGetDevData(psConnection),
+				  &pshRecordInt,
+				  pshServerSyncPrimBlockInt,
+				  psSyncRecordAddIN->ui32ui32FwBlockAddr,
+				  psSyncRecordAddIN->ui32ui32SyncOffset,
+				  psSyncRecordAddIN->bbServerSync,
+				  psSyncRecordAddIN->ui32ClassNameSize,
+				  uiClassNameInt);
+	/* Exit early if bridged call fails */
+	if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK))
+	{
+		goto SyncRecordAdd_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle(psConnection->psHandleBase);
+
+	psSyncRecordAddOUT->eError =
+	    PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+				      &psSyncRecordAddOUT->hhRecord,
+				      (void *)pshRecordInt,
+				      PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE,
+				      PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+				      (PFN_HANDLE_RELEASE) &
+				      PVRSRVSyncRecordRemoveByHandleKM);
+	if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK))
+	{
+		UnlockHandle(psConnection->psHandleBase);
+		goto SyncRecordAdd_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+ SyncRecordAdd_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle(psConnection->psHandleBase);
+
+	/* Unreference the previously looked up handle */
+	if (pshServerSyncPrimBlockInt)
+	{
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					    hhServerSyncPrimBlock,
+					    PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle(psConnection->psHandleBase);
+
+	if (psSyncRecordAddOUT->eError != PVRSRV_OK)
+	{
+		if (pshRecordInt)
+		{
+			PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if (pArrayArgsBuffer)
+#else
+	if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitSYNCTRACKINGBridge(void);
+PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void);
+
+/*
+ * Register all SYNCTRACKING functions with services
+ */
+PVRSRV_ERROR InitSYNCTRACKINGBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING,
+			      PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE,
+			      PVRSRVBridgeSyncRecordRemoveByHandle, NULL,
+			      bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING,
+			      PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD,
+			      PVRSRVBridgeSyncRecordAdd, NULL, bUseLock);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all synctracking functions with services
+ */
+PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void)
+{
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING,
+				PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE);
+
+	UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING,
+				PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/services_kernel_client.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/services_kernel_client.h
new file mode 100644
index 0000000..138c65c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/services_kernel_client.h
@@ -0,0 +1,267 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File           services_kernel_client.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* This file contains a partial redefinition of the PowerVR Services 5
+ * interface for use by components which are checkpatch clean. This
+ * header is included by the unrefined, non-checkpatch clean headers
+ * to ensure that prototype/typedef/macro changes break the build.
+ */
+
+#ifndef __SERVICES_KERNEL_CLIENT__
+#define __SERVICES_KERNEL_CLIENT__
+
+#include "pvrsrv_error.h"
+
+#include <linux/types.h>
+
+#include "pvrsrv_sync_km.h"
+#include "sync_checkpoint_external.h"
+
+#ifndef __pvrsrv_defined_struct_enum__
+
+/* sync_external.h */
+
+struct PVRSRV_CLIENT_SYNC_PRIM {
+	volatile __u32 *pui32LinAddr;
+};
+
+struct PVRSRV_CLIENT_SYNC_PRIM_OP {
+	__u32 ui32Flags;
+	struct pvrsrv_sync_prim *psSync;
+	__u32 ui32FenceValue;
+	__u32 ui32UpdateValue;
+};
+
+#else /* __pvrsrv_defined_struct_enum__ */
+
+struct PVRSRV_CLIENT_SYNC_PRIM;
+struct PVRSRV_CLIENT_SYNC_PRIM_OP;
+
+enum tag_img_bool;
+
+#endif /* __pvrsrv_defined_struct_enum__ */
+
+struct _PMR_;
+struct _PVRSRV_DEVICE_NODE_;
+struct dma_buf;
+struct SYNC_PRIM_CONTEXT;
+
+/* pvr_notifier.h */
+
+#ifndef _CMDCOMPNOTIFY_PFN_
+typedef void (*PFN_CMDCOMP_NOTIFY)(void *hCmdCompHandle);
+#define _CMDCOMPNOTIFY_PFN_
+#endif
+enum PVRSRV_ERROR PVRSRVRegisterCmdCompleteNotify(void **phNotify,
+	PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, void *hPrivData);
+enum PVRSRV_ERROR PVRSRVUnregisterCmdCompleteNotify(void *hNotify);
+void PVRSRVCheckStatus(void *hCmdCompCallerHandle);
+
+#define DEBUG_REQUEST_DC               0
+#define DEBUG_REQUEST_SERVERSYNC       1
+#define DEBUG_REQUEST_SYS              2
+#define DEBUG_REQUEST_ANDROIDSYNC      3
+#define DEBUG_REQUEST_LINUXFENCE       4
+#define DEBUG_REQUEST_SYNCCHECKPOINT   5
+#define DEBUG_REQUEST_HTB              6
+#define DEBUG_REQUEST_APPHINT          7
+#define DEBUG_REQUEST_FALLBACKSYNC     8
+
+#define DEBUG_REQUEST_VERBOSITY_LOW    0
+#define DEBUG_REQUEST_VERBOSITY_MEDIUM 1
+#define DEBUG_REQUEST_VERBOSITY_HIGH   2
+#define DEBUG_REQUEST_VERBOSITY_MAX    DEBUG_REQUEST_VERBOSITY_HIGH
+
+#define DD_VERB_LVL_ENABLED(_verbLvl, _verbLvlChk) ((_verbLvl) >= (_verbLvlChk))
+
+#ifndef _DBGNOTIFY_PFNS_
+typedef void (DUMPDEBUG_PRINTF_FUNC)(void *pvDumpDebugFile,
+	const char *fmt, ...) __printf(2, 3);
+typedef void (*PFN_DBGREQ_NOTIFY) (void *hDebugRequestHandle,
+	__u32 ui32VerbLevel,
+	DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+	void *pvDumpDebugFile);
+#define _DBGNOTIFY_PFNS_
+#endif
+enum PVRSRV_ERROR PVRSRVRegisterDbgRequestNotify(void **phNotify,
+	struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+	PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+	__u32 ui32RequesterID,
+	void *hDbgRequestHandle);
+enum PVRSRV_ERROR PVRSRVUnregisterDbgRequestNotify(void *hNotify);
+
+/* physmem_dmabuf.h */
+
+struct dma_buf *PhysmemGetDmaBuf(struct _PMR_ *psPMR);
+
+/* pvrsrv.h */
+
+enum PVRSRV_ERROR PVRSRVAcquireGlobalEventObjectKM(void **phGlobalEventObject);
+enum PVRSRV_ERROR PVRSRVReleaseGlobalEventObjectKM(void *hGlobalEventObject);
+
+/* sync.h */
+
+enum PVRSRV_ERROR SyncPrimContextCreate(
+	struct _PVRSRV_DEVICE_NODE_ *psDevConnection,
+	struct SYNC_PRIM_CONTEXT **phSyncPrimContext);
+void SyncPrimContextDestroy(struct SYNC_PRIM_CONTEXT *hSyncPrimContext);
+
+enum PVRSRV_ERROR SyncPrimAlloc(struct SYNC_PRIM_CONTEXT *hSyncPrimContext,
+	struct PVRSRV_CLIENT_SYNC_PRIM **ppsSync, const char *pszClassName);
+enum PVRSRV_ERROR SyncPrimFree(struct PVRSRV_CLIENT_SYNC_PRIM *psSync);
+enum PVRSRV_ERROR SyncPrimGetFirmwareAddr(
+	struct PVRSRV_CLIENT_SYNC_PRIM *psSync,
+	__u32 *sync_addr);
+enum PVRSRV_ERROR SyncPrimSet(struct PVRSRV_CLIENT_SYNC_PRIM *psSync,
+	__u32 ui32Value);
+
+/* osfunc.h */
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+void OSAcquireBridgeLock(void);
+void OSReleaseBridgeLock(void);
+#endif
+enum PVRSRV_ERROR OSEventObjectWait(void *hOSEventKM);
+enum PVRSRV_ERROR OSEventObjectOpen(void *hEventObject, void **phOSEventKM);
+enum PVRSRV_ERROR OSEventObjectClose(void *hOSEventKM);
+__u32 OSGetCurrentClientProcessIDKM(void);
+
+/* srvkm.h */
+
+enum PVRSRV_ERROR PVRSRVDeviceCreate(void *pvOSDevice,
+	int i32UMIdentifier,
+	struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode);
+enum PVRSRV_ERROR PVRSRVDeviceDestroy(
+	struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+const char *PVRSRVGetErrorString(enum PVRSRV_ERROR eError);
+
+#ifndef _CHECKPOINT_PFNS_
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN)(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, PVRSRV_FENCE fence, u32 *nr_checkpoints, PSYNC_CHECKPOINT **checkpoint_handles, u64 *fence_uid);
+
+#ifndef _CHECKPOINT_PFNS_
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN)(
+		const char *fence_name,
+		PVRSRV_TIMELINE timeline,
+		PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+		PVRSRV_FENCE *new_fence,
+		u64 *fence_uid,
+		void **fence_finalise_data,
+		PSYNC_CHECKPOINT *new_checkpoint_handle,
+		void **timeline_update_sync,
+		__u32 *timeline_update_value);
+#endif
+
+#ifndef _CHECKPOINT_PFNS_
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN)(PVRSRV_FENCE fence_to_rollback, void *finalise_data);
+#endif
+
+#ifndef _CHECKPOINT_PFNS_
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN)(PVRSRV_FENCE fence_to_finalise, void *finalise_data);
+#endif
+
+#ifndef _CHECKPOINT_PFNS_
+typedef __u32 (*PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN)(__u32 num_ufos, __u32 *vaddrs);
+#endif
+
+#ifndef _CHECKPOINT_PFNS_
+typedef enum tag_img_bool (*PFN_SYNC_CHECKPOINT_UFO_HAS_SIGNALLED_FN)(
+	__u32 ui32FwAddr, __u32 ui32Value);
+typedef enum PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_SIGNAL_WAITERS_FN)(void);
+typedef void(*PFN_SYNC_CHECKPOINT_CHECK_STATE_FN)(void);
+#endif
+
+/* This is the function that kick code will call in a NO_HARDWARE build only after
+ * sync checkpoints have been manually signalled, to allow the OS native sync
+ * implementation to update its timelines (as the usual callback notification
+ * of signalled checkpoints is not supported for NO_HARDWARE).
+ */
+#ifndef _CHECKPOINT_PFNS_
+typedef void (*PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN)(void *private_data);
+typedef void (*PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN)(void *mem_ptr);
+
+#define SYNC_CHECKPOINT_IMPL_MAX_STRLEN 20
+
+typedef struct {
+	PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve;
+	PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate;
+	PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback;
+	PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise;
+	PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines;
+	PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem;
+	PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN pfnDumpInfoOnStalledUFOs;
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+	PFN_SYNC_CHECKPOINT_UFO_HAS_SIGNALLED_FN pfnCheckpointHasSignalled;
+	PFN_SYNC_CHECKPOINT_CHECK_STATE_FN pfnCheckState;
+	PFN_SYNC_CHECKPOINT_SIGNAL_WAITERS_FN pfnSignalWaiters;
+#endif
+	char pszImplName[SYNC_CHECKPOINT_IMPL_MAX_STRLEN];
+} PFN_SYNC_CHECKPOINT_STRUCT;
+
+enum PVRSRV_ERROR SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_STRUCT *psSyncCheckpointPfns);
+
+#define _CHECKPOINT_PFNS_
+#endif
+
+/* sync_checkpoint.h */
+enum PVRSRV_ERROR SyncCheckpointContextCreate(struct _PVRSRV_DEVICE_NODE_ *psDevConnection, PSYNC_CHECKPOINT_CONTEXT *phSyncCheckpointContext);
+enum PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext);
+void SyncCheckpointContextRef(PSYNC_CHECKPOINT_CONTEXT psContext);
+void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext);
+enum PVRSRV_ERROR SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, PVRSRV_TIMELINE timeline, PVRSRV_FENCE fence, const char *pszCheckpointName, PSYNC_CHECKPOINT *ppsSyncCheckpoint);
+void SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags);
+void SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags);
+enum tag_img_bool SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags);
+enum tag_img_bool SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags);
+enum PVRSRV_ERROR SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint);
+enum PVRSRV_ERROR SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint);
+void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint);
+__u32 SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint);
+void SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint);
+__u32 SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint);
+__u32 SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint);
+__u32 SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint);
+PVRSRV_TIMELINE SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint);
+const char *SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+#endif
+
+#endif /* __SERVICES_KERNEL_CLIENT__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/services_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/services_km.h
new file mode 100644
index 0000000..45a6089
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/services_km.h
@@ -0,0 +1,167 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services API Kernel mode Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exported services API details
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SERVICES_KM_H
+#define SERVICES_KM_H
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+/*! 4k page size definition */
+#define PVRSRV_4K_PAGE_SIZE					4096UL      /*!< Size of a 4K Page */
+#define PVRSRV_4K_PAGE_SIZE_ALIGNSHIFT		12          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 16k page size definition */
+#define PVRSRV_16K_PAGE_SIZE					16384UL      /*!< Size of a 16K Page */
+#define PVRSRV_16K_PAGE_SIZE_ALIGNSHIFT		14          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 64k page size definition */
+#define PVRSRV_64K_PAGE_SIZE					65536UL      /*!< Size of a 64K Page */
+#define PVRSRV_64K_PAGE_SIZE_ALIGNSHIFT		16          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 256k page size definition */
+#define PVRSRV_256K_PAGE_SIZE					262144UL      /*!< Size of a 256K Page */
+#define PVRSRV_256K_PAGE_SIZE_ALIGNSHIFT		18          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 1MB page size definition */
+#define PVRSRV_1M_PAGE_SIZE					1048576UL      /*!< Size of a 1M Page */
+#define PVRSRV_1M_PAGE_SIZE_ALIGNSHIFT		20          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 2MB page size definition */
+#define PVRSRV_2M_PAGE_SIZE					2097152UL      /*!< Size of a 2M Page */
+#define PVRSRV_2M_PAGE_SIZE_ALIGNSHIFT		21          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+
+/*!
+ * Forward declaration (look on connection.h)
+ */
+typedef struct _PVRSRV_DEV_CONNECTION_ PVRSRV_DEV_CONNECTION;
+
+/*!
+	Flags for Services connection.
+	Allows to define per-client policy for Services
+*/
+/*
+ *   Use of the 32-bit connection flags mask
+ *   ( X = taken/in use, - = available/unused )
+ *
+ *   31  27     20             6   2 0
+ *    |   |      |             |   | |
+ *    X---XXXXXXXX-------------XXXXX--
+ */
+
+#define SRV_WORKEST_ENABLED             (1U << 2)  /*!< If Workload Estimation is enabled */
+#define SRV_PDVFS_ENABLED               (1U << 3)  /*!< If PDVFS is enabled */
+#define SRV_NO_HWPERF_CLIENT_STREAM     (1U << 4)  /*!< Don't create HWPerf for this connection */
+#define SRV_FLAGS_CLIENT_64BIT_COMPAT   (1U << 5)  /*!< This flags gets set if the client is 64 Bit compatible. */
+#define SRV_FLAGS_CLIENT_SLR_DISABLED   (1U << 6)  /*!< This flag is set if the client does not want Sync Lockup Recovery (SLR) enabled. */
+#define SRV_FLAGS_PDUMPCTRL             (1U << 31) /*!< PDump Ctrl client flag */
+
+/*
+ * Bits 20 - 27 are used to pass information needed for validation
+ * of the GPU Virtualisation Validation mechanism. In particular:
+ *
+ * Bits:
+ * [20 - 22]: OSid of the memory region that will be used for allocations
+ * [23 - 25]: OSid that will be emitted by the Firmware for all memory accesses
+ *            regarding that memory context.
+ *      [26]: If the AXI Protection register will be set to secure for that OSid
+ *      [27]: If the Emulator Wrapper Register checking for protection violation
+ *            will be set to secure for that OSid
+ */
+
+#define VIRTVAL_FLAG_OSID_SHIFT        (20)
+#define SRV_VIRTVAL_FLAG_OSID_MASK     (7U << VIRTVAL_FLAG_OSID_SHIFT)
+
+#define VIRTVAL_FLAG_OSIDREG_SHIFT     (23)
+#define SRV_VIRTVAL_FLAG_OSIDREG_MASK  (7U << VIRTVAL_FLAG_OSIDREG_SHIFT)
+
+#define VIRTVAL_FLAG_AXIPREG_SHIFT     (26)
+#define SRV_VIRTVAL_FLAG_AXIPREG_MASK  (1U << VIRTVAL_FLAG_AXIPREG_SHIFT)
+
+#define VIRTVAL_FLAG_AXIPTD_SHIFT      (27)
+#define SRV_VIRTVAL_FLAG_AXIPTD_MASK   (1U << VIRTVAL_FLAG_AXIPTD_SHIFT)
+
+
+/* Size of pointer on a 64 bit machine */
+#define	POINTER_SIZE_64BIT	(8)
+
+
+/*
+    Pdump flags which are accessible to Services clients
+*/
+#define PDUMP_NONE          0x00000000UL /*<! No flags */
+
+#define PDUMP_BLKDATA       0x10000000UL /*<! This flag indicates block-mode PDump data to be recorded in
+                                                          Block script stream in addition to Main script stream,
+                                                          if capture mode is set to BLOCKED */
+
+#define PDUMP_CONT          0x40000000UL /*<! Output this entry always regardless of framed capture range,
+                                                          used by client applications being dumped. */
+#define PDUMP_PERSIST       0x80000000UL /*<! Output this entry always regardless of app and range,
+                                                          used by persistent resources created after
+                                                          driver initialisation that must appear in
+                                                          all PDump captures in that session. */
+
+/* Valid range of values for pdump block length in Block mode of PDump */
+#define PDUMP_BLOCKLEN_MIN          10
+#define PDUMP_BLOCKLEN_MAX          1000
+
+#define PDUMP_FRAME_MIN             0
+#define PDUMP_FRAME_MAX             (IMG_UINT32_MAX - 1)
+#define PDUMP_FRAME_UNSET           IMG_UINT32_MAX
+
+/* Status of the device. */
+typedef enum
+{
+	PVRSRV_DEVICE_STATUS_UNKNOWN,        /* status of the device is unknown */
+	PVRSRV_DEVICE_STATUS_OK,             /* the device is operational */
+	PVRSRV_DEVICE_STATUS_NOT_RESPONDING, /* the device is not responding */
+	PVRSRV_DEVICE_STATUS_DEVICE_ERROR    /* the device is not operational */
+} PVRSRV_DEVICE_STATUS;
+
+#endif /* SERVICES_KM_H */
+/**************************************************************************//**
+End of file (services_km.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/servicesext.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/servicesext.h
new file mode 100644
index 0000000..9b90108
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/servicesext.h
@@ -0,0 +1,172 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services definitions required by external drivers
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides services data structures, defines and prototypes
+                required by external drivers
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__SERVICESEXT_H__)
+#define __SERVICESEXT_H__
+
+/* include/ */
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "img_3dtypes.h"
+#include "pvrsrv_device_types.h"
+
+
+/*
+ * Lock buffer read/write flags
+ */
+#define PVRSRV_LOCKFLG_READONLY     	(1)		/*!< The locking process will only read the locked surface */
+
+/*!
+ *****************************************************************************
+ *	Services State
+ *****************************************************************************/
+typedef enum _PVRSRV_SERVICES_STATE_
+{
+	PVRSRV_SERVICES_STATE_UNDEFINED = 0,
+	PVRSRV_SERVICES_STATE_OK,
+	PVRSRV_SERVICES_STATE_BAD,
+} PVRSRV_SERVICES_STATE;
+
+
+/*!
+ *****************************************************************************
+ *	States for power management
+ *****************************************************************************/
+/*!
+  System Power State Enum
+ */
+typedef enum _PVRSRV_SYS_POWER_STATE_
+{
+	PVRSRV_SYS_POWER_STATE_Unspecified		= -1,	/*!< Unspecified : Uninitialised */
+	PVRSRV_SYS_POWER_STATE_OFF				= 0,	/*!< Off */
+	PVRSRV_SYS_POWER_STATE_ON				= 1,	/*!< On */
+
+	PVRSRV_SYS_POWER_STATE_FORCE_I32 = 0x7fffffff   /*!< Force enum to be at least 32-bits wide */
+
+} PVRSRV_SYS_POWER_STATE, *PPVRSRV_SYS_POWER_STATE; /*!< Typedef for ptr to PVRSRV_SYS_POWER_STATE */
+
+/*!
+  Device Power State Enum
+ */
+typedef enum _PVRSRV_DEV_POWER_STATE_
+{
+	PVRSRV_DEV_POWER_STATE_DEFAULT	= -1,	/*!< Default state for the device */
+	PVRSRV_DEV_POWER_STATE_OFF		= 0,	/*!< Unpowered */
+	PVRSRV_DEV_POWER_STATE_ON		= 1,	/*!< Running */
+
+	PVRSRV_DEV_POWER_STATE_FORCE_I32 = 0x7fffffff   /*!< Force enum to be at least 32-bits wide */
+
+} PVRSRV_DEV_POWER_STATE, *PPVRSRV_DEV_POWER_STATE;	/*!< Typedef for ptr to PVRSRV_DEV_POWER_STATE */ /* PRQA S 3205 */
+
+
+/* Power transition handler prototypes */
+
+/*!
+  Typedef for a pointer to a Function that will be called before a transition
+  from one power state to another. See also PFN_POST_POWER.
+ */
+typedef PVRSRV_ERROR (*PFN_PRE_POWER) (IMG_HANDLE				hDevHandle,
+									   PVRSRV_DEV_POWER_STATE	eNewPowerState,
+									   PVRSRV_DEV_POWER_STATE	eCurrentPowerState,
+									   IMG_BOOL					bForced);
+/*!
+  Typedef for a pointer to a Function that will be called after a transition
+  from one power state to another. See also PFN_PRE_POWER.
+ */
+typedef PVRSRV_ERROR (*PFN_POST_POWER) (IMG_HANDLE				hDevHandle,
+										PVRSRV_DEV_POWER_STATE	eNewPowerState,
+										PVRSRV_DEV_POWER_STATE	eCurrentPowerState,
+										IMG_BOOL				bForced);
+
+/* Clock speed handler prototypes */
+
+/*!
+  Typedef for a pointer to a Function that will be caled before a transition
+  from one clockspeed to another. See also PFN_POST_CLOCKSPEED_CHANGE.
+ */
+typedef PVRSRV_ERROR (*PFN_PRE_CLOCKSPEED_CHANGE) (IMG_HANDLE				hDevHandle,
+												   PVRSRV_DEV_POWER_STATE	eCurrentPowerState);
+
+/*!
+  Typedef for a pointer to a Function that will be caled after a transition
+  from one clockspeed to another. See also PFN_PRE_CLOCKSPEED_CHANGE.
+ */
+typedef PVRSRV_ERROR (*PFN_POST_CLOCKSPEED_CHANGE) (IMG_HANDLE				hDevHandle,
+													PVRSRV_DEV_POWER_STATE	eCurrentPowerState);
+
+/*!
+  Typedef for a pointer to a function that will be called to transition the device
+  to a forced idle state. Used in unison with (forced) power requests, DVFS and cluster count changes.
+ */
+typedef PVRSRV_ERROR (*PFN_FORCED_IDLE_REQUEST) (IMG_HANDLE				hDevHandle,
+							IMG_BOOL			bDeviceOffPermitted);
+
+/*!
+  Typedef for a pointer to a function that will be called to cancel a forced idle state
+  and return the firmware back to a state where the hardware can be scheduled.
+ */
+typedef PVRSRV_ERROR (*PFN_FORCED_IDLE_CANCEL_REQUEST) (IMG_HANDLE			hDevHandle);
+
+typedef PVRSRV_ERROR (*PFN_DUST_COUNT_REQUEST) (IMG_HANDLE			hDevHandle,
+						IMG_UINT32			ui32DustCount);
+
+/*!
+ *****************************************************************************
+ * This structure is used for OS independent registry (profile) access
+ *****************************************************************************/
+
+typedef struct _PVRSRV_REGISTRY_INFO
+{
+	IMG_UINT32			ui32DevCookie;
+	IMG_PCHAR			pszKey;
+	IMG_PCHAR			pszValue;
+	IMG_PCHAR			pszBuf;
+	IMG_UINT32			ui32BufSize;
+} PVRSRV_REGISTRY_INFO, *PPVRSRV_REGISTRY_INFO;
+
+#endif /* __SERVICESEXT_H__ */
+/*****************************************************************************
+ End of file (servicesext.h)
+*****************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sofunc_pvr.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sofunc_pvr.h
new file mode 100644
index 0000000..48bc94d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sofunc_pvr.h
@@ -0,0 +1,94 @@
+/*************************************************************************/ /*!
+@File
+@Title          SO Interface header file for common PVR functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Contains SO interface functions. These functions are defined in
+                the common layer and are called from the env layer OS specific
+                implementation.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(SOFUNC_PVR_H_)
+#define SOFUNC_PVR_H_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "device.h"
+#include "pvr_notifier.h"
+
+
+/**************************************************************************/ /*!
+ @Function     SOPvrDbgRequestNotifyRegister
+ @Description  SO Interface function called from the OS layer implementation.
+               Register a callback function that is called when a debug request
+               is made via a call PVRSRVDebugRequest. There are a number of
+               verbosity levels ranging from DEBUG_REQUEST_VERBOSITY_LOW up to
+               DEBUG_REQUEST_VERBOSITY_MAX. The callback will be called once
+               for each level up to the highest level specified to
+               PVRSRVDebugRequest.
+@Output        phNotify             On success, points to debug notifier handle
+@Input         psDevNode            Device node for which the debug callback
+                                    should be registered
+@Input         pfnDbgRequestNotify  Function callback
+@Input         ui32RequesterID      Requester ID. This is used to determine
+                                    the order in which callbacks are called,
+                                    see DEBUG_REQUEST_*
+@Input         hDbgReqeustHandle    Data to be passed back to the caller via
+                                    the callback function
+@Return        PVRSRV_ERROR         PVRSRV_OK on success and an error otherwise
+*/ /******************************************************************** ******/
+PVRSRV_ERROR SOPvrDbgRequestNotifyRegister(IMG_HANDLE *phNotify,
+							  PVRSRV_DEVICE_NODE *psDevNode,
+							  PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+							  IMG_UINT32 ui32RequesterID,
+							  PVRSRV_DBGREQ_HANDLE hDbgRequestHandle);
+
+/**************************************************************************/ /*!
+ @Function     SOPvrDbgRequestNotifyUnregister
+ @Description  SO Interface function called from the OS layer implementation.
+               Remove and clean up the specified notifier registration so that
+               it does not receive any further callbacks.
+ @Input        hNotify     Handle returned to caller from
+                           SOPvrDbgRequestNotifyRegister().
+ @Return       PVRSRV_ERROR
+*/ /***************************************************************************/
+PVRSRV_ERROR SOPvrDbgRequestNotifyUnregister(IMG_HANDLE hNotify);
+
+
+#endif /* SOFUNC_PVR_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sofunc_rgx.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sofunc_rgx.h
new file mode 100644
index 0000000..be9594d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sofunc_rgx.h
@@ -0,0 +1,95 @@
+/*************************************************************************/ /*!
+@File
+@Title          SO Interface header file for devices/RGX functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Contains SO interface functions. These functions are defined in
+                the common devices layer and are called from the env layer OS
+                specific implementation.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(SOFUNC_RGX_H_)
+#define SOFUNC_RGX_H_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+#if !defined(NO_HARDWARE)
+/*!
+*******************************************************************************
+
+ @Function     SORgxGpuUtilStatsRegister
+
+ @Description  SO Interface function called from the OS layer implementation.
+               Initialise data used to compute GPU utilisation statistics
+               for a particular user (identified by the handle passed as
+               argument). This function must be called only once for each
+               different user/handle.
+
+ @Input        phGpuUtilUser - Pointer to handle used to identify a user of
+                               RGXGetGpuUtilStats
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser);
+
+
+/*!
+*******************************************************************************
+
+ @Function     SORgxGpuUtilStatsUnregister
+
+ @Description  SO Interface function called from the OS layer implementation.
+               Free data previously used to compute GPU utilisation statistics
+               for a particular user (identified by the handle passed as
+               argument).
+
+ @Input        hGpuUtilUser - Handle used to identify a user of
+                              RGXGetGpuUtilStats
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser);
+#endif /* !defined(NO_HARDWARE) */
+
+
+
+#endif /* SOFUNC_RGX_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/srvcore.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/srvcore.c
new file mode 100644
index 0000000..8799d45
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/srvcore.c
@@ -0,0 +1,1421 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Common Bridge Module (kernel side)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements core PVRSRV API, server side
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "ra.h"
+#include "pvr_bridge.h"
+#include "connection_server.h"
+#include "device.h"
+#include "htbuffer.h"
+
+#include "pdump_km.h"
+
+#include "srvkm.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "log2.h"
+
+#include "srvcore.h"
+#include "pvrsrv.h"
+#include "power.h"
+
+#if defined(SUPPORT_RGX)
+#include "rgxdevice.h"
+#include "rgxinit.h"
+#endif
+
+#include "rgx_options.h"
+#include "pvrversion.h"
+#include "lock.h"
+#include "osfunc.h"
+#include "device_connection.h"
+#include "process_stats.h"
+#include "pvrsrv_pool.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "physmem_lma.h"
+#include "services_km.h"
+#endif
+
+#include "pvrsrv_tlstreams.h"
+#include "tlstream.h"
+
+#if defined(PVRSRV_MISSING_NO_SPEC_IMPL)
+#pragma message ("There is no implementation of OSConfineArrayIndexNoSpeculation() - see osfunc.h")
+#endif
+
+/* For the purpose of maintainability, it is intended that this file should not
+ * contain any OS specific #ifdefs. Please find a way to add e.g.
+ * an osfunc.c abstraction or override the entire function in question within
+ * env,*,pvr_bridge_k.c
+ */
+
+PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT] = { {.pfFunction = DummyBW,} ,};
+
+#define		PVR_DISPATCH_OFFSET_FIRST_FUNC 			0
+#define 	PVR_DISPATCH_OFFSET_LAST_FUNC 			1
+#define		PVR_DISPATCH_OFFSET_ARRAY_MAX 			2
+
+#define PVRSRV_CLIENT_TL_STREAM_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE
+
+static IMG_UINT16 g_BridgeDispatchTableStartOffsets[BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT][PVR_DISPATCH_OFFSET_ARRAY_MAX];
+
+
+#define PVRSRV_MAX_POOLED_BRIDGE_BUFFERS 8	/*!< Initial number of pooled bridge buffers */
+
+static PVRSRV_POOL *g_psBridgeBufferPool;	/*! Pool of bridge buffers */
+
+
+#if defined(DEBUG_BRIDGE_KM)
+/* a lock used for protecting bridge call timing calculations
+ * for calls which do not acquire a lock
+ */
+static POS_LOCK g_hStatsLock;
+PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
+
+void BridgeGlobalStatsLock(void)
+{
+	OSLockAcquire(g_hStatsLock);
+}
+
+void BridgeGlobalStatsUnlock(void)
+{
+	OSLockRelease(g_hStatsLock);
+}
+#endif
+
+void BridgeDispatchTableStartOffsetsInit(void)
+{
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEFAULT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEFAULT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SRVCORE][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SRVCORE][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNC_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCEXPORT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCEXPORT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCSEXPORT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCSEXPORT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPCTRL][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPCTRL][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MM_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MM_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMPLAT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMPLAT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_CMM_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_CMM_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMP][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMP][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DMABUF][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DMABUF][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DC_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DC_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CACHE][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CACHE][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_CACHE_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SMM_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SMM_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PVRTL][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PVRTL][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RI][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RI_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RI][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RI_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_VALIDATION][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_VALIDATION][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TUTILS][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TUTILS][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEVICEMEMHISTORY][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEVICEMEMHISTORY][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_HTBUFFER][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_HTBUFFER][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DCPLAT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DCPLAT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMEXTMEM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMEXTMEM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCTRACKING][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCTRACKING][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCFALLBACK][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCFALLBACK][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST;
+#if defined(SUPPORT_RGX)
+	/* Need a gap here to start next entry at element 128 */
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXCMP][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXCMP][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTA3D][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTA3D][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXBREAKPOINT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXBREAKPOINT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXFWDBG][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXFWDBG][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXPDUMP][PVR_DISPATCH_OFFSET_FIRST_FUNC]= PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXPDUMP][PVR_DISPATCH_OFFSET_LAST_FUNC]= PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXHWPERF][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXHWPERF][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXREGCONFIG][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXREGCONFIG][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXKICKSYNC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXKICKSYNC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXSIGNALS][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXSIGNALS][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ2][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ2][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST;
+#endif
+}
+
+#if defined(DEBUG_BRIDGE_KM)
+
+#if defined(INTEGRITY_OS)
+PVRSRV_ERROR PVRSRVPrintBridgeStats()
+{
+	IMG_UINT32 ui32Index;
+	IMG_UINT32 ui32Remainder;
+
+	printf("Total Bridge call count = %u\n"
+		   "Total number of bytes copied via copy_from_user = %u\n"
+		   "Total number of bytes copied via copy_to_user = %u\n"
+		   "Total number of bytes copied via copy_*_user = %u\n\n"
+		   "%3s: %-60s | %-48s | %10s | %20s | %20s | %20s | %20s \n",
+		   g_BridgeGlobalStats.ui32IOCTLCount,
+		   g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
+		   g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
+		   g_BridgeGlobalStats.ui32TotalCopyFromUserBytes + g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
+		   "#",
+		   "Bridge Name",
+		   "Wrapper Function",
+		   "Call Count",
+		   "copy_from_user (B)",
+		   "copy_to_user (B)",
+		   "Total Time (us)",
+		   "Max Time (us)");
+
+	/* Is the item asked for (starts at 0) a valid table index? */
+	for ( ui32Index=0; ui32Index < BRIDGE_DISPATCH_TABLE_ENTRY_COUNT; ui32Index++ )
+	{
+		PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry = &g_BridgeDispatchTable[ui32Index];
+		printf("%3d: %-60s   %-48s   %-10u   %-20u   %-20u   %-20llu   %-20llu\n",
+			   (IMG_UINT32)(((size_t)psEntry-(size_t)g_BridgeDispatchTable)/sizeof(*g_BridgeDispatchTable)),
+			   psEntry->pszIOCName,
+			   (psEntry->pfFunction != NULL) ? psEntry->pszFunctionName : "(null)",
+			   psEntry->ui32CallCount,
+			   psEntry->ui32CopyFromUserTotalBytes,
+			   psEntry->ui32CopyToUserTotalBytes,
+			   (unsigned long long) OSDivide64r64(psEntry->ui64TotalTimeNS, 1000, &ui32Remainder),
+			   (unsigned long long) OSDivide64r64(psEntry->ui64MaxTimeNS, 1000, &ui32Remainder));
+
+
+	}
+}
+#endif
+
+PVRSRV_ERROR
+CopyFromUserWrapper(CONNECTION_DATA *psConnection,
+					IMG_UINT32 ui32DispatchTableEntry,
+					void *pvDest,
+					void __user *pvSrc,
+					IMG_UINT32 ui32Size)
+{
+	g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyFromUserTotalBytes+=ui32Size;
+	g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size;
+	return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+PVRSRV_ERROR
+CopyToUserWrapper(CONNECTION_DATA *psConnection,
+				  IMG_UINT32 ui32DispatchTableEntry,
+				  void __user *pvDest,
+				  void *pvSrc,
+				  IMG_UINT32 ui32Size)
+{
+	g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyToUserTotalBytes+=ui32Size;
+	g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size;
+	return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+#else
+INLINE PVRSRV_ERROR
+CopyFromUserWrapper(CONNECTION_DATA *psConnection,
+					IMG_UINT32 ui32DispatchTableEntry,
+					void *pvDest,
+					void __user *pvSrc,
+					IMG_UINT32 ui32Size)
+{
+	PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry);
+	return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+INLINE PVRSRV_ERROR
+CopyToUserWrapper(CONNECTION_DATA *psConnection,
+				  IMG_UINT32 ui32DispatchTableEntry,
+				  void __user *pvDest,
+				  void *pvSrc,
+				  IMG_UINT32 ui32Size)
+{
+	PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry);
+	return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+#endif
+
+PVRSRV_ERROR
+PVRSRVConnectKM(CONNECTION_DATA *psConnection,
+                PVRSRV_DEVICE_NODE * psDeviceNode,
+				IMG_UINT32 ui32Flags,
+				IMG_UINT32 ui32ClientBuildOptions,
+				IMG_UINT32 ui32ClientDDKVersion,
+				IMG_UINT32 ui32ClientDDKBuild,
+				IMG_UINT8  *pui8KernelArch,
+				IMG_UINT32 *pui32CapabilityFlags,
+				IMG_UINT32 *ui32PVRBridges,
+				IMG_UINT32 *ui32RGXBridges)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	IMG_UINT32			ui32BuildOptions, ui32BuildOptionsMismatch;
+	IMG_UINT32			ui32DDKVersion, ui32DDKBuild;
+	PVRSRV_DATA			*psSRVData = NULL;
+	IMG_UINT64			ui64ProcessVASpaceSize = OSGetCurrentProcessVASpaceSize();
+	static IMG_BOOL		bIsFirstConnection=IMG_FALSE;
+
+	/* Clear the flags */
+	*pui32CapabilityFlags = 0;
+
+	psSRVData = PVRSRVGetPVRSRVData();
+
+	psConnection->ui32ClientFlags = ui32Flags;
+
+	/* output the available bridges */
+	*ui32PVRBridges = gui32PVRBridges;
+#if defined(SUPPORT_RGX)
+	*ui32RGXBridges = gui32RGXBridges;
+#else
+	*ui32RGXBridges = 0;
+#endif
+
+	/* Is the system snooping of caches emulated in software? */
+	if (PVRSRVSystemSnoopingIsEmulated(psDeviceNode->psDevConfig))
+	{
+		*pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_EMULATE_FLAG;
+	}
+	else
+	{
+		/*Set flags to pass back to the client showing which cache coherency is available.*/
+		/*Is the system CPU cache coherent?*/
+		if (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig))
+		{
+			*pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_DEVICE_FLAG;
+		}
+		/*Is the system device cache coherent?*/
+		if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig))
+		{
+			*pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_CPU_FLAG;
+		}
+	}
+
+	/* Has the system device non-mappable local memory?*/
+	if (PVRSRVSystemHasNonMappableLocalMemory(psDeviceNode->psDevConfig))
+	{
+		*pui32CapabilityFlags |= PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG;
+	}
+
+	if (psDeviceNode->pfnHasFBCDCVersion31(psDeviceNode))
+	{
+		*pui32CapabilityFlags |= PVRSRV_FBCDC_V3_1_USED;
+	}
+
+	/* Set flags to indicate shared-virtual-memory (SVM) allocation availability */
+	if (! psDeviceNode->ui64GeneralSVMHeapTopVA || ! ui64ProcessVASpaceSize)
+	{
+		*pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED;
+	}
+	else
+	{
+		if (ui64ProcessVASpaceSize <= psDeviceNode->ui64GeneralSVMHeapTopVA)
+		{
+			*pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED;
+		}
+		else
+		{
+			/* This can happen when processor has more virtual address bits
+			   than device (i.e. alloc is not always guaranteed to succeed) */
+			*pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL;
+		}
+	}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+	IMG_UINT32	ui32OSid = 0, ui32OSidReg = 0;
+    IMG_BOOL    bOSidAxiProtReg = IMG_FALSE;
+
+	IMG_PID pIDCurrent = OSGetCurrentClientProcessIDKM();
+
+    ui32OSid    = (ui32Flags & SRV_VIRTVAL_FLAG_OSID_MASK)    >> (VIRTVAL_FLAG_OSID_SHIFT);
+    ui32OSidReg = (ui32Flags & SRV_VIRTVAL_FLAG_OSIDREG_MASK) >> (VIRTVAL_FLAG_OSIDREG_SHIFT);
+
+#if defined(EMULATOR)
+
+    if(((PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice)->sDevFeatureCfg.ui64Features & RGX_FEATURE_AXI_ACELITE_BIT_MASK)
+    {
+    	IMG_UINT32 ui32OSidAxiProtReg = 0, ui32OSidAxiProtTD = 0;
+
+    	ui32OSidAxiProtReg = (ui32Flags & SRV_VIRTVAL_FLAG_AXIPREG_MASK) >> (VIRTVAL_FLAG_AXIPREG_SHIFT);
+    	ui32OSidAxiProtTD  = (ui32Flags & SRV_VIRTVAL_FLAG_AXIPTD_MASK)  >> (VIRTVAL_FLAG_AXIPTD_SHIFT);
+
+    	PVR_DPF((PVR_DBG_MESSAGE,
+    			"[AxiProt & Virt]: Setting bOSidAxiProt of Emulator's Trusted Device for Catbase %d to %s",
+				ui32OSidReg,
+				(ui32OSidAxiProtTD == 1)?"TRUE":"FALSE"));
+
+    	bOSidAxiProtReg = ui32OSidAxiProtReg == 1;
+    	PVR_DPF((PVR_DBG_MESSAGE,
+    			"[AxiProt & Virt]: Setting bOSidAxiProt of FW's Register for Catbase %d to %s",
+				ui32OSidReg,
+				bOSidAxiProtReg?"TRUE":"FALSE"));
+
+    	SetAxiProtOSid(ui32OSidReg, ui32OSidAxiProtTD);
+    }
+
+#endif
+
+    InsertPidOSidsCoupling(pIDCurrent, ui32OSid, ui32OSidReg, bOSidAxiProtReg);
+
+    PVR_DPF((PVR_DBG_MESSAGE,"[GPU Virtualization Validation]: OSIDs: %d, %d",ui32OSid, ui32OSidReg));
+}
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	/* Only enabled if enabled in the UM */
+	if (!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_WORKLOAD_ESTIMATION_MASK))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Workload Estimation disabled. Not enabled in UM",
+				__func__));
+	}
+#endif
+
+#if defined(SUPPORT_PDVFS)
+	/* Only enabled if enabled in the UM */
+	if (!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_PDVFS_MASK))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Proactive DVFS disabled. Not enabled in UM",
+				__func__));
+	}
+#endif
+
+	ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN);
+	ui32DDKBuild = PVRVERSION_BUILD;
+
+	if (ui32Flags & SRV_FLAGS_CLIENT_64BIT_COMPAT)
+	{
+		psSRVData->sDriverInfo.ui8UMSupportedArch |= BUILD_ARCH_64BIT;
+	}
+	else
+	{
+		psSRVData->sDriverInfo.ui8UMSupportedArch |= BUILD_ARCH_32BIT;
+	}
+
+	if (IMG_FALSE == bIsFirstConnection)
+	{
+		psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildOptions = (RGX_BUILD_OPTIONS_KM);
+		psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildOptions = ui32ClientBuildOptions;
+
+		psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildVersion = ui32DDKVersion;
+		psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildVersion = ui32ClientDDKVersion;
+
+		psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildRevision = ui32DDKBuild;
+		psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildRevision = ui32ClientDDKBuild;
+
+		psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType =
+				((RGX_BUILD_OPTIONS_KM) & OPTIONS_DEBUG_MASK) ? BUILD_TYPE_DEBUG : BUILD_TYPE_RELEASE;
+
+		psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType =
+				(ui32ClientBuildOptions & OPTIONS_DEBUG_MASK) ? BUILD_TYPE_DEBUG : BUILD_TYPE_RELEASE;
+
+		if (sizeof(void *) == POINTER_SIZE_64BIT)
+		{
+			psSRVData->sDriverInfo.ui8KMBitArch |= BUILD_ARCH_64BIT;
+		}
+		else
+		{
+			psSRVData->sDriverInfo.ui8KMBitArch |= BUILD_ARCH_32BIT;
+		}
+	}
+
+	/* Masking out every option that is not kernel specific*/
+	ui32ClientBuildOptions &= RGX_BUILD_OPTIONS_MASK_KM;
+
+	/*
+	 * Validate the build options
+	 */
+	ui32BuildOptions = (RGX_BUILD_OPTIONS_KM);
+	if (ui32BuildOptions != ui32ClientBuildOptions)
+	{
+		ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32ClientBuildOptions;
+#if !defined(PVRSRV_STRICT_COMPAT_CHECK)
+		/*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/
+		ui32BuildOptionsMismatch &= OPTIONS_STRICT;
+#endif
+		if ( (ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0)
+		{
+			PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; "
+				"extra options present in client-side driver: (0x%x). Please check rgx_options.h",
+				__func__,
+				ui32ClientBuildOptions & ui32BuildOptionsMismatch ));
+			eError = PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+			goto chk_exit;
+		}
+
+		if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
+		{
+			PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; "
+				"extra options present in KM driver: (0x%x). Please check rgx_options.h",
+				__func__,
+				ui32BuildOptions & ui32BuildOptionsMismatch ));
+			eError = PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+			goto chk_exit;
+		}
+		if (IMG_FALSE == bIsFirstConnection)
+		{
+			PVR_LOG(("%s: COMPAT_TEST: Client-side (0x%04x) (%s) and KM driver (0x%04x) (%s) build options differ.",
+																			__func__,
+																			ui32ClientBuildOptions,
+																			(psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType)?"release":"debug",
+																			ui32BuildOptions,
+																			(psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType)?"release":"debug"));
+		}else{
+			PVR_DPF((PVR_DBG_WARNING, "%s: COMPAT_TEST: Client-side (0x%04x) and KM driver (0x%04x) build options differ.",
+																		__func__,
+																		ui32ClientBuildOptions,
+																		ui32BuildOptions));
+
+		}
+		if (!psSRVData->sDriverInfo.bIsNoMatch)
+			psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: Client-side and KM driver build options match. [ OK ]", __func__));
+	}
+
+	/*
+	 * Validate DDK version
+	 */
+	if (ui32ClientDDKVersion != ui32DDKVersion)
+	{
+		if (!psSRVData->sDriverInfo.bIsNoMatch)
+			psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE;
+		PVR_LOG(("(FAIL) %s: Incompatible driver DDK version (%u.%u) / client DDK version (%u.%u).",
+				__func__,
+				PVRVERSION_MAJ, PVRVERSION_MIN,
+				PVRVERSION_UNPACK_MAJ(ui32ClientDDKVersion),
+				PVRVERSION_UNPACK_MIN(ui32ClientDDKVersion)));
+		eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH;
+		PVR_DBG_BREAK;
+		goto chk_exit;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK version (%u.%u) and client DDK version (%u.%u) match. [ OK ]",
+				__func__,
+				PVRVERSION_MAJ, PVRVERSION_MIN, PVRVERSION_MAJ, PVRVERSION_MIN));
+	}
+
+	/* Create stream for every connection except for the special clients
+	 * that don't need it e.g.: recipients of HWPerf data. */
+	if (!(psConnection->ui32ClientFlags & SRV_NO_HWPERF_CLIENT_STREAM))
+	{
+		IMG_CHAR acStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE];
+		OSSNPrintf(acStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE,
+		           PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC,
+		           psDeviceNode->sDevId.i32UMIdentifier,
+		           psConnection->pid);
+
+		eError = TLStreamCreate(&psConnection->hClientTLStream, psDeviceNode,
+		                        acStreamName,
+		                        PVRSRV_CLIENT_TL_STREAM_SIZE_DEFAULT,
+		                        TL_OPMODE_DROP_NEWER |
+		                        TL_FLAG_ALLOCATE_ON_FIRST_OPEN,
+		                        NULL, NULL, NULL, NULL);
+		if (eError != PVRSRV_OK && eError != PVRSRV_ERROR_ALREADY_EXISTS)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Could not create private TL stream (%s)",
+					PVRSRVGetErrorString(eError)));
+			psConnection->hClientTLStream = NULL;
+		}
+		else if (eError == PVRSRV_OK)
+		{
+			/* Set "tlctrl" stream as a notification channel. This channel is
+			 * is used to notify recipients about stream open/close (by writer)
+			 * actions (and possibly other actions in the future). */
+			eError = TLStreamSetNotifStream(psConnection->hClientTLStream,
+			                                psSRVData->hTLCtrlStream);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "Failed to set notification stream"));
+				TLStreamClose(psConnection->hClientTLStream);
+				psConnection->hClientTLStream = NULL;
+			}
+		}
+
+		/* Reset error status. Don't want to propagate any errors from here */
+		eError = PVRSRV_OK;
+		PVR_DPF((PVR_DBG_MESSAGE, "Created stream \"%s\".", acStreamName));
+	}
+
+	/*
+	 * Validate DDK build
+	 */
+	if (ui32ClientDDKBuild != ui32DDKBuild)
+	{
+		if (!psSRVData->sDriverInfo.bIsNoMatch)
+			psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE;
+		PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch in driver DDK revision (%d) / client DDK revision (%d).",
+				__func__, ui32DDKBuild, ui32ClientDDKBuild));
+#if defined(PVRSRV_STRICT_COMPAT_CHECK)
+		eError = PVRSRV_ERROR_DDK_BUILD_MISMATCH;
+		PVR_DBG_BREAK;
+		goto chk_exit;
+#endif
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK revision (%d) and client DDK revision (%d) match. [ OK ]",
+				__func__, ui32DDKBuild, ui32ClientDDKBuild));
+	}
+
+	/* Success so far so is it the PDump client that is connecting? */
+	if (ui32Flags & SRV_FLAGS_PDUMPCTRL)
+	{
+		PDumpConnectionNotify();
+	}
+
+	PVR_ASSERT(pui8KernelArch != NULL);
+
+	if (psSRVData->sDriverInfo.ui8KMBitArch & BUILD_ARCH_64BIT)
+	{
+		*pui8KernelArch = 64;
+	}
+	else
+	{
+		*pui8KernelArch = 32;
+	}
+
+	bIsFirstConnection = IMG_TRUE;
+
+#if defined(DEBUG_BRIDGE_KM)
+	{
+		int ii;
+
+		/* dump dispatch table offset lookup table */
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: g_BridgeDispatchTableStartOffsets[0-%lu] entries:", __func__, BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT - 1));
+		for (ii=0; ii < BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT; ii++)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "g_BridgeDispatchTableStartOffsets[%d]: %u", ii, g_BridgeDispatchTableStartOffsets[ii][PVR_DISPATCH_OFFSET_FIRST_FUNC]));
+		}
+	}
+#endif
+
+#if defined(PDUMP)
+	if (!(ui32Flags & SRV_FLAGS_PDUMPCTRL))
+	{
+		IMG_UINT64 ui64PDumpState = 0;
+
+		PDumpGetStateKM(&ui64PDumpState);
+		if (ui64PDumpState & PDUMP_STATE_CONNECTED)
+		{
+			*pui32CapabilityFlags |= PVRSRV_PDUMP_IS_RECORDING;
+		}
+	}
+#endif
+
+chk_exit:
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVDisconnectKM(void)
+{
+#if defined(INTEGRITY_OS) && defined(DEBUG_BRIDGE_KM)
+	PVRSRVPrintBridgeStats();
+#endif
+	/* just return OK, per-process data is cleaned up by resmgr */
+
+	return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function       PVRSRVAcquireGlobalEventObjectKM
+@Description    Acquire the global event object.
+@Output         phGlobalEventObject    On success, points to the global event
+                                       object handle
+@Return         PVRSRV_ERROR           PVRSRV_OK on success or an error
+                                       otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVAcquireGlobalEventObjectKM(IMG_HANDLE *phGlobalEventObject)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	*phGlobalEventObject = psPVRSRVData->hGlobalEventObject;
+
+	return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function       PVRSRVReleaseGlobalEventObjectKM
+@Description    Release the global event object.
+@Output         hGlobalEventObject    Global event object handle
+@Return         PVRSRV_ERROR          PVRSRV_OK on success or an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVReleaseGlobalEventObjectKM(IMG_HANDLE hGlobalEventObject)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	PVR_ASSERT(psPVRSRVData->hGlobalEventObject == hGlobalEventObject);
+
+	return PVRSRV_OK;
+}
+
+/*
+	PVRSRVDumpDebugInfoKM
+*/
+PVRSRV_ERROR
+PVRSRVDumpDebugInfoKM(CONNECTION_DATA *psConnection,
+					  PVRSRV_DEVICE_NODE *psDeviceNode,
+					  IMG_UINT32 ui32VerbLevel)
+{
+	if (ui32VerbLevel > DEBUG_REQUEST_VERBOSITY_MAX)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	PVR_LOG(("User requested PVR debug info"));
+
+	PVRSRVDebugRequest(psDeviceNode, ui32VerbLevel, NULL, NULL);
+
+	return PVRSRV_OK;
+}
+
+/*
+	PVRSRVGetDevClockSpeedKM
+*/
+PVRSRV_ERROR
+PVRSRVGetDevClockSpeedKM(CONNECTION_DATA * psConnection,
+                         PVRSRV_DEVICE_NODE *psDeviceNode,
+						 IMG_PUINT32  pui32RGXClockSpeed)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVR_ASSERT(psDeviceNode->pfnDeviceClockSpeed != NULL);
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	eError = psDeviceNode->pfnDeviceClockSpeed(psDeviceNode, pui32RGXClockSpeed);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetDevClockSpeedKM: "
+				"Could not get device clock speed (%d)!",
+				eError));
+	}
+
+	return eError;
+}
+
+
+/*
+	PVRSRVHWOpTimeoutKM
+*/
+PVRSRV_ERROR
+PVRSRVHWOpTimeoutKM(CONNECTION_DATA *psConnection,
+					PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if defined(PVRSRV_RESET_ON_HWTIMEOUT)
+	PVR_LOG(("User requested OS reset"));
+	OSPanic();
+#endif
+	PVR_LOG(("HW operation timeout, dump server info"));
+	PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+	return PVRSRV_OK;
+}
+
+
+IMG_INT
+DummyBW(IMG_UINT32 ui32DispatchTableEntry,
+		void *psBridgeIn,
+		void *psBridgeOut,
+		CONNECTION_DATA *psConnection)
+{
+	PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+	PVR_UNREFERENCED_PARAMETER(psBridgeOut);
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if defined(DEBUG_BRIDGE_KM)
+	PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u (%s) mapped to "
+			 "Dummy Wrapper (probably not what you want!)",
+			 __func__, ui32DispatchTableEntry, g_BridgeDispatchTable[ui32DispatchTableEntry].pszIOCName));
+#else
+	PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u mapped to "
+			 "Dummy Wrapper (probably not what you want!)",
+			 __func__, ui32DispatchTableEntry));
+#endif
+	return PVRSRV_ERROR_BRIDGE_ENOTTY;
+}
+
+PVRSRV_ERROR PVRSRVAlignmentCheckKM(CONNECTION_DATA *psConnection,
+                                    PVRSRV_DEVICE_NODE *psDeviceNode,
+                                    IMG_UINT32 ui32AlignChecksSize,
+                                    IMG_UINT32 aui32AlignChecks[])
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(NO_HARDWARE) && defined(RGXFW_ALIGNCHECKS)
+
+	PVR_ASSERT(psDeviceNode->pfnAlignmentCheck != NULL);
+	return psDeviceNode->pfnAlignmentCheck(psDeviceNode, ui32AlignChecksSize,
+	                                       aui32AlignChecks);
+
+#else
+
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(ui32AlignChecksSize);
+	PVR_UNREFERENCED_PARAMETER(aui32AlignChecks);
+
+	return PVRSRV_OK;
+
+#endif /* !defined(NO_HARDWARE) */
+
+}
+
+PVRSRV_ERROR PVRSRVGetDeviceStatusKM(CONNECTION_DATA *psConnection,
+                                     PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     IMG_UINT32 *pui32DeviceStatus)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	/* First try to update the status. */
+	if (psDeviceNode->pfnUpdateHealthStatus != NULL)
+	{
+		PVRSRV_ERROR eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode,
+		                                                          IMG_FALSE);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetDeviceStatusKM: Failed to "
+					 "check for device status (%d)", eError));
+
+			/* Return unknown status and error because we don't know what
+			 * happened and if the status is valid. */
+			*pui32DeviceStatus = PVRSRV_DEVICE_STATUS_UNKNOWN;
+			return eError;
+		}
+	}
+
+	switch (OSAtomicRead(&psDeviceNode->eHealthStatus))
+	{
+		case PVRSRV_DEVICE_HEALTH_STATUS_OK:
+			*pui32DeviceStatus = PVRSRV_DEVICE_STATUS_OK;
+			return PVRSRV_OK;
+		case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING:
+			*pui32DeviceStatus = PVRSRV_DEVICE_STATUS_NOT_RESPONDING;
+			return PVRSRV_OK;
+		case PVRSRV_DEVICE_HEALTH_STATUS_DEAD:
+		case PVRSRV_DEVICE_HEALTH_STATUS_FAULT:
+		case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED:
+			*pui32DeviceStatus = PVRSRV_DEVICE_STATUS_DEVICE_ERROR;
+			return PVRSRV_OK;
+		default:
+			*pui32DeviceStatus = PVRSRV_DEVICE_STATUS_UNKNOWN;
+			return PVRSRV_ERROR_INTERNAL_ERROR;
+	}
+}
+
+/*!
+ * *****************************************************************************
+ * @brief A wrapper for removing entries in the g_BridgeDispatchTable array.
+ * 		  All this does is zero the entry to allow for a full table re-population
+ * 		  later.
+ *
+ * @param ui32BridgeGroup
+ * @param ui32Index
+ *
+ * @return
+ ********************************************************************************/
+void
+UnsetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup,
+					   	 IMG_UINT32 ui32Index)
+{
+	ui32Index += g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC];
+
+	g_BridgeDispatchTable[ui32Index].pfFunction = NULL;
+	g_BridgeDispatchTable[ui32Index].hBridgeLock = NULL;
+	g_BridgeDispatchTable[ui32Index].bUseLock = 0;
+#if defined(DEBUG_BRIDGE_KM)
+	g_BridgeDispatchTable[ui32Index].pszIOCName = NULL;
+	g_BridgeDispatchTable[ui32Index].pszFunctionName = NULL;
+	g_BridgeDispatchTable[ui32Index].pszBridgeLockName = NULL;
+	g_BridgeDispatchTable[ui32Index].ui32CallCount = 0;
+	g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0;
+	g_BridgeDispatchTable[ui32Index].ui64TotalTimeNS = 0;
+	g_BridgeDispatchTable[ui32Index].ui64MaxTimeNS = 0;
+#endif
+}
+
+/*!
+ * *****************************************************************************
+ * @brief A wrapper for filling in the g_BridgeDispatchTable array that does
+ * 		  error checking.
+ *
+ * @param ui32Index
+ * @param pszIOCName
+ * @param pfFunction
+ * @param pszFunctionName
+ *
+ * @return
+ ********************************************************************************/
+void
+_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup,
+					   IMG_UINT32 ui32Index,
+					   const IMG_CHAR *pszIOCName,
+					   BridgeWrapperFunction pfFunction,
+					   const IMG_CHAR *pszFunctionName,
+					   POS_LOCK hBridgeLock,
+					   const IMG_CHAR *pszBridgeLockName,
+					   IMG_BOOL bUseLock)
+{
+	static IMG_UINT32 ui32PrevIndex = IMG_UINT32_MAX;		/* -1 */
+
+#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM)
+	PVR_UNREFERENCED_PARAMETER(pszFunctionName);
+	PVR_UNREFERENCED_PARAMETER(pszBridgeLockName);
+#endif
+
+	ui32Index += g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC];
+
+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
+	/* Enable this to dump out the dispatch table entries */
+	PVR_DPF((PVR_DBG_WARNING, "%s: g_BridgeDispatchTableStartOffsets[%d]=%d", __func__, ui32BridgeGroup, g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC]));
+	PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s %s", __func__, ui32Index, pszIOCName, pszFunctionName, pszBridgeLockName));
+#endif
+
+	/* Any gaps are sub-optimal in-terms of memory usage, but we are mainly
+	 * interested in spotting any large gap of wasted memory that could be
+	 * accidentally introduced.
+	 *
+	 * This will currently flag up any gaps > 5 entries.
+	 *
+	 * NOTE: This shouldn't be debug only since switching from debug->release
+	 * etc is likely to modify the available ioctls and thus be a point where
+	 * mistakes are exposed. This isn't run at a performance critical time.
+	 */
+	if ((ui32PrevIndex != IMG_UINT32_MAX) &&
+		((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) ||
+		 (ui32Index <= ui32PrevIndex)))
+	{
+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s: There is a gap in the dispatch table between indices %u (%s) and %u (%s)",
+				 __func__, ui32PrevIndex, g_BridgeDispatchTable[ui32PrevIndex].pszIOCName,
+				 ui32Index, pszIOCName));
+#else
+		PVR_DPF((PVR_DBG_MESSAGE,
+				 "%s: There is a gap in the dispatch table between indices %u and %u (%s)",
+				 __func__, (IMG_UINT)ui32PrevIndex, (IMG_UINT)ui32Index, pszIOCName));
+#endif
+	}
+
+	if (ui32Index >= BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Index %u (%s) out of range",
+				 __func__, (IMG_UINT)ui32Index, pszIOCName));
+
+#if defined(DEBUG_BRIDGE_KM)
+		PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE_DISPATCH_TABLE_ENTRY_COUNT = %lu",
+				 __func__, BRIDGE_DISPATCH_TABLE_ENTRY_COUNT));
+#if defined(SUPPORT_RGX)
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST = %lu",
+				 __func__, PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST = %lu",
+				 __func__, PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST = %lu",
+				 __func__, PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST = %lu",
+				 __func__, PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST = %lu",
+				 __func__, PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST = %lu",
+				 __func__, PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST = %lu",
+				 __func__, PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST = %lu",
+				 __func__, PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST));
+
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_DISPATCH_LAST = %lu",
+				 __func__, PVRSRV_BRIDGE_RGX_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_LAST = %lu",
+				 __func__, PVRSRV_BRIDGE_RGX_LAST));
+#endif
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_LAST = %lu",
+				 __func__, PVRSRV_BRIDGE_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST = %lu",
+				 __func__, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST = %lu",
+				 __func__, PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST = %lu",
+				 __func__, PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST));
+#endif
+
+		OSPanic();
+	}
+
+	/* Panic if the previous entry has been overwritten as this is not allowed!
+	 * NOTE: This shouldn't be debug only since switching from debug->release
+	 * etc is likely to modify the available ioctls and thus be a point where
+	 * mistakes are exposed. This isn't run at a performance critical time.
+	 */
+	if (g_BridgeDispatchTable[ui32Index].pfFunction)
+	{
+		if (g_BridgeDispatchTable[ui32Index].pfFunction != pfFunction)
+		{
+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
+			PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Adding dispatch table entry for %s clobbers an existing entry for %s (current pfn=<%p>, new pfn=<%p>)",
+				 __func__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName),
+				 (void*)g_BridgeDispatchTable[ui32Index].pfFunction, (void*)pfFunction));
+#else
+			PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Adding dispatch table entry for %s clobbers an existing entry (index=%u). (current pfn=<%p>, new pfn=<%p>)",
+				 __func__, pszIOCName, ui32Index,
+				 (void*)g_BridgeDispatchTable[ui32Index].pfFunction, (void*)pfFunction));
+			PVR_DPF((PVR_DBG_WARNING, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue."));
+#endif
+			OSPanic();
+		}
+	}
+	else
+	{
+		g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction;
+		g_BridgeDispatchTable[ui32Index].hBridgeLock = hBridgeLock;
+		g_BridgeDispatchTable[ui32Index].bUseLock = bUseLock;
+#if defined(DEBUG_BRIDGE_KM)
+		g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName;
+		g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName;
+		g_BridgeDispatchTable[ui32Index].pszBridgeLockName = pszBridgeLockName;
+		g_BridgeDispatchTable[ui32Index].ui32CallCount = 0;
+		g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0;
+		g_BridgeDispatchTable[ui32Index].ui64TotalTimeNS = 0;
+		g_BridgeDispatchTable[ui32Index].ui64MaxTimeNS = 0;
+#endif
+	}
+
+	ui32PrevIndex = ui32Index;
+}
+
+static PVRSRV_ERROR _BridgeBufferAlloc(void *pvPrivData, void **pvOut)
+{
+	PVR_UNREFERENCED_PARAMETER(pvPrivData);
+
+	*pvOut = OSAllocZMem(PVRSRV_MAX_BRIDGE_IN_SIZE +
+	                     PVRSRV_MAX_BRIDGE_OUT_SIZE);
+
+	if (*pvOut == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	return PVRSRV_OK;
+}
+
+static void _BridgeBufferFree(void *pvPrivData, void *pvFreeData)
+{
+	PVR_UNREFERENCED_PARAMETER(pvPrivData);
+
+	OSFreeMem(pvFreeData);
+}
+
+PVRSRV_ERROR BridgeDispatcherInit(void)
+{
+	PVRSRV_ERROR eError;
+
+#if defined(DEBUG_BRIDGE_KM)
+	eError = OSLockCreate(&g_hStatsLock);
+	PVR_LOGG_IF_ERROR(eError, "OSLockCreate", errorLockCreateFailed);
+#endif
+
+	eError = PVRSRVPoolCreate(_BridgeBufferAlloc,
+	                          _BridgeBufferFree,
+	                          PVRSRV_MAX_POOLED_BRIDGE_BUFFERS,
+	                          "Bridge buffer pool",
+	                          NULL,
+	                          &g_psBridgeBufferPool);
+	PVR_LOGG_IF_ERROR(eError, "PVRSRVPoolCreate", erroPoolCreateFailed);
+
+	return PVRSRV_OK;
+
+erroPoolCreateFailed:
+#if defined(DEBUG_BRIDGE_KM)
+	OSLockDestroy(g_hStatsLock);
+	g_hStatsLock = NULL;
+errorLockCreateFailed:
+#endif
+	return eError;
+}
+
+void BridgeDispatcherDeinit(void)
+{
+	if (g_psBridgeBufferPool)
+	{
+		PVRSRVPoolDestroy(g_psBridgeBufferPool);
+		g_psBridgeBufferPool = NULL;
+	}
+
+#if defined(DEBUG_BRIDGE_KM)
+	if (g_hStatsLock)
+	{
+		OSLockDestroy(g_hStatsLock);
+		g_hStatsLock = NULL;
+	}
+#endif
+}
+
+
+PVRSRV_ERROR BridgedDispatchKM(CONNECTION_DATA * psConnection,
+                          PVRSRV_BRIDGE_PACKAGE   * psBridgePackageKM)
+{
+
+	void       * psBridgeIn=NULL;
+	void       * psBridgeOut=NULL;
+	BridgeWrapperFunction pfBridgeHandler;
+	IMG_UINT32   ui32DispatchTableEntry, ui32GroupBoundary;
+	PVRSRV_ERROR err = PVRSRV_OK;
+	PVRSRV_POOL_TOKEN hBridgeBufferPoolToken = NULL;
+	IMG_UINT32 ui32Timestamp = OSClockus();
+#if defined(DEBUG_BRIDGE_KM)
+	IMG_UINT64	ui64TimeStart;
+	IMG_UINT64	ui64TimeEnd;
+	IMG_UINT64	ui64TimeDiff;
+#endif
+	IMG_UINT32	ui32DispatchTableIndex, ui32DispatchTableEntryIndex;
+
+#if defined(DEBUG_BRIDGE_KM_STOP_AT_DISPATCH)
+	PVR_DBG_BREAK;
+#endif
+
+	if (psBridgePackageKM->ui32BridgeID >= BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Out of range dispatch table group ID: %d",
+		        __func__, psBridgePackageKM->ui32BridgeID));
+		err = PVRSRV_ERROR_BRIDGE_EINVAL;
+		goto return_error;
+	}
+
+	ui32DispatchTableIndex = OSConfineArrayIndexNoSpeculation(psBridgePackageKM->ui32BridgeID, BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT);
+
+	ui32DispatchTableEntry = g_BridgeDispatchTableStartOffsets[ui32DispatchTableIndex][PVR_DISPATCH_OFFSET_FIRST_FUNC];
+	ui32GroupBoundary = g_BridgeDispatchTableStartOffsets[ui32DispatchTableIndex][PVR_DISPATCH_OFFSET_LAST_FUNC];
+
+	/* bridge function is not implemented in this build */
+	if (0 == ui32DispatchTableEntry)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Dispatch table entry=%d, boundary = %d, (bridge module %d, function %d)",
+					__func__,
+					ui32DispatchTableEntry,ui32GroupBoundary, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID));
+		/* this points to DummyBW() which returns PVRSRV_ERROR_ENOTTY */
+		err = g_BridgeDispatchTable[ui32DispatchTableEntry].pfFunction(ui32DispatchTableEntry,
+				  psBridgeIn,
+				  psBridgeOut,
+				  psConnection);
+		goto return_error;
+	}
+	if ((ui32DispatchTableEntry + psBridgePackageKM->ui32FunctionID) > ui32GroupBoundary)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Dispatch table entry=%d, boundary = %d, (bridge module %d, function %d)",
+					__func__,
+					ui32DispatchTableEntry,ui32GroupBoundary, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID));
+		err = PVRSRV_ERROR_BRIDGE_EINVAL;
+		goto return_error;
+	}
+	ui32DispatchTableEntry += psBridgePackageKM->ui32FunctionID;
+	ui32DispatchTableEntryIndex = OSConfineArrayIndexNoSpeculation(ui32DispatchTableEntry, ui32GroupBoundary+1);
+	if (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT <= ui32DispatchTableEntry)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Dispatch table entry=%d, entry count = %lu,"
+		        " (bridge module %d, function %d)", __func__,
+		        ui32DispatchTableEntry, BRIDGE_DISPATCH_TABLE_ENTRY_COUNT,
+		        psBridgePackageKM->ui32BridgeID,
+		        psBridgePackageKM->ui32FunctionID));
+		err = PVRSRV_ERROR_BRIDGE_EINVAL;
+		goto return_error;
+	}
+#if defined(DEBUG_BRIDGE_KM)
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: Dispatch table entry index=%d, (bridge module %d, function %d)",
+			__func__,
+			ui32DispatchTableEntryIndex, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID));
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: %s",
+			 __func__,
+			 g_BridgeDispatchTable[ui32DispatchTableEntryIndex].pszIOCName));
+	g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui32CallCount++;
+	g_BridgeGlobalStats.ui32IOCTLCount++;
+#endif
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock == NULL &&
+	    g_BridgeDispatchTable[ui32DispatchTableEntryIndex].bUseLock)
+	{
+		/* Acquire default global bridge lock if calling module has no independent lock */
+		OSAcquireBridgeLock();
+
+		/* Request for global bridge buffers */
+		OSGetGlobalBridgeBuffers(&psBridgeIn,
+		                         &psBridgeOut);
+	}
+	else
+#endif /* PVRSRV_USE_BRIDGE_LOCK */
+	{
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+		if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock != NULL &&
+		    g_BridgeDispatchTable[ui32DispatchTableEntryIndex].bUseLock)
+		{
+			OSLockAcquire(g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock);
+		}
+#endif
+#if !defined(INTEGRITY_OS)
+		/* try to acquire a bridge buffer from the pool */
+
+		err = PVRSRVPoolGet(g_psBridgeBufferPool,
+		                    &hBridgeBufferPoolToken,
+		                    &psBridgeIn);
+
+		if (err != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to get bridge buffer from global pool"));
+			goto unlock_and_return_error;
+		}
+
+		psBridgeOut = ((IMG_BYTE *) psBridgeIn) + PVRSRV_MAX_BRIDGE_IN_SIZE;
+#endif
+	}
+
+#if defined(DEBUG_BRIDGE_KM)
+	ui64TimeStart = OSClockns64();
+#endif
+
+	if (psBridgePackageKM->ui32InBufferSize > PVRSRV_MAX_BRIDGE_IN_SIZE)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Bridge input buffer too small "
+		        "(data size %u, buffer size %u)!", __func__,
+		        psBridgePackageKM->ui32InBufferSize, PVRSRV_MAX_BRIDGE_IN_SIZE));
+		err = PVRSRV_ERROR_BRIDGE_ERANGE;
+		goto unlock_and_return_error;
+	}
+
+#if !defined(INTEGRITY_OS)
+	if (psBridgePackageKM->ui32OutBufferSize > PVRSRV_MAX_BRIDGE_OUT_SIZE)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Bridge output buffer too small "
+		        "(data size %u, buffer size %u)!", __func__,
+		        psBridgePackageKM->ui32OutBufferSize, PVRSRV_MAX_BRIDGE_OUT_SIZE));
+		err = PVRSRV_ERROR_BRIDGE_ERANGE;
+		goto unlock_and_return_error;
+	}
+
+	if ((CopyFromUserWrapper (psConnection,
+							  ui32DispatchTableEntryIndex,
+							  psBridgeIn,
+							  psBridgePackageKM->pvParamIn,
+							  psBridgePackageKM->ui32InBufferSize) != PVRSRV_OK)
+#if defined __QNXNTO__
+/* For Neutrino, the output bridge buffer acts as an input as well */
+					|| (CopyFromUserWrapper(psConnection,
+											ui32DispatchTableEntryIndex,
+											psBridgeOut,
+											(void *)((uintptr_t)psBridgePackageKM->pvParamIn + psBridgePackageKM->ui32InBufferSize),
+											psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK)
+#endif
+		) /* end of if-condition */
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: CopyFromUserWrapper returned an error!", __func__));
+		err = PVRSRV_ERROR_BRIDGE_EFAULT;
+		goto unlock_and_return_error;
+	}
+#else
+	psBridgeIn = psBridgePackageKM->pvParamIn;
+	psBridgeOut = psBridgePackageKM->pvParamOut;
+#endif
+
+	pfBridgeHandler =
+		(BridgeWrapperFunction)g_BridgeDispatchTable[ui32DispatchTableEntryIndex].pfFunction;
+
+	if (pfBridgeHandler == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: ui32DispatchTableEntry = %d is not a registered function!",
+				 __func__, ui32DispatchTableEntry));
+		err = PVRSRV_ERROR_BRIDGE_EFAULT;
+		goto unlock_and_return_error;
+	}
+
+	/* pfBridgeHandler functions do not fail and return an IMG_INT.
+	 * The value returned is either 0 or PVRSRV_OK (0).
+	 * In the event this changes an error may be +ve or -ve,
+	 * so try to return something consistent here.
+	 */
+	if (0 != pfBridgeHandler(ui32DispatchTableEntryIndex,
+						  psBridgeIn,
+						  psBridgeOut,
+						  psConnection)
+		)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: pfBridgeHandler returned an error", __func__));
+		err = PVRSRV_ERROR_BRIDGE_EPERM;
+		goto unlock_and_return_error;
+	}
+
+	/*
+	   This should always be true as a.t.m. all bridge calls have to
+	   return an error message, but this could change so we do this
+	   check to be safe.
+	*/
+	if (psBridgePackageKM->ui32OutBufferSize > 0)
+	{
+#if !defined(INTEGRITY_OS)
+		if (CopyToUserWrapper (psConnection,
+						ui32DispatchTableEntryIndex,
+						psBridgePackageKM->pvParamOut,
+						psBridgeOut,
+						psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK)
+		{
+			err = PVRSRV_ERROR_BRIDGE_EFAULT;
+			goto unlock_and_return_error;
+		}
+#endif
+	}
+
+#if defined(DEBUG_BRIDGE_KM)
+	ui64TimeEnd = OSClockns64();
+
+	ui64TimeDiff = ui64TimeEnd - ui64TimeStart;
+
+	/* if there is no lock held then acquire the stats lock to
+	 * ensure the calculations are done safely
+	 */
+	if (!g_BridgeDispatchTable[ui32DispatchTableEntryIndex].bUseLock)
+	{
+		BridgeGlobalStatsLock();
+	}
+
+	g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64TotalTimeNS += ui64TimeDiff;
+
+	if (ui64TimeDiff > g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64MaxTimeNS)
+	{
+		g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64MaxTimeNS = ui64TimeDiff;
+	}
+
+	if (!g_BridgeDispatchTable[ui32DispatchTableEntryIndex].bUseLock)
+	{
+		BridgeGlobalStatsUnlock();
+	}
+#endif
+
+unlock_and_return_error:
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock == NULL &&
+	    g_BridgeDispatchTable[ui32DispatchTableEntryIndex].bUseLock)
+	{
+		OSReleaseBridgeLock();
+	}
+	else
+#endif
+	{
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+		if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock != NULL &&
+		    g_BridgeDispatchTable[ui32DispatchTableEntryIndex].bUseLock)
+		{
+			OSLockRelease(g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock);
+		}
+#endif
+
+#if !defined(INTEGRITY_OS)
+		if (hBridgeBufferPoolToken != NULL)
+		{
+			err = PVRSRVPoolPut(g_psBridgeBufferPool,
+			                    hBridgeBufferPoolToken);
+
+			if (err != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "Failed to return bridge buffer to global pool"));
+			}
+		}
+#endif
+	}
+
+return_error:
+	if (err)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: returning (err = %d)", __func__, err));
+	}
+	/* ignore transport layer bridge to avoid HTB flooding */
+	if (psBridgePackageKM->ui32BridgeID != PVRSRV_BRIDGE_PVRTL)
+	{
+		if (err)
+		{
+			HTBLOGK(HTB_SF_BRG_BRIDGE_CALL_ERR, ui32Timestamp,
+			        psBridgePackageKM->ui32BridgeID,
+			        psBridgePackageKM->ui32FunctionID, err);
+		}
+		else
+		{
+			HTBLOGK(HTB_SF_BRG_BRIDGE_CALL, ui32Timestamp,
+			        psBridgePackageKM->ui32BridgeID,
+			        psBridgePackageKM->ui32FunctionID);
+		}
+	}
+
+	return err;
+}
+
+PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemStatArray)
+{
+#if !defined(__QNXNTO__)
+	return PVRSRVFindProcessMemStats(pid,
+					ui32ArrSize,
+					bAllProcessStats,
+					pui32MemStatArray);
+#else
+	PVR_DPF((PVR_DBG_ERROR, "This functionality is not yet implemented for this platform"));
+
+	return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/srvcore.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/srvcore.h
new file mode 100644
index 0000000..85a8cb1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/srvcore.h
@@ -0,0 +1,213 @@
+/**************************************************************************/ /*!
+@File
+@Title          PVR Bridge Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the PVR Bridge code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __BRIDGED_PVR_BRIDGE_H__
+#define __BRIDGED_PVR_BRIDGE_H__
+
+#include "lock_types.h"
+#include "connection_server.h"
+#include "pvr_debug.h"
+
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+
+PVRSRV_ERROR
+CopyFromUserWrapper(CONNECTION_DATA *psConnection,
+					IMG_UINT32 ui32DispatchTableEntry,
+					void *pvDest,
+					void __user *pvSrc,
+					IMG_UINT32 ui32Size);
+PVRSRV_ERROR
+CopyToUserWrapper(CONNECTION_DATA *psConnection,
+				  IMG_UINT32 ui32DispatchTableEntry,
+				  void __user *pvDest,
+				  void *pvSrc,
+				  IMG_UINT32 ui32Size);
+
+IMG_INT
+DummyBW(IMG_UINT32 ui32DispatchTableEntry,
+		void *psBridgeIn,
+		void *psBridgeOut,
+		CONNECTION_DATA *psConnection);
+
+typedef IMG_INT (*BridgeWrapperFunction)(IMG_UINT32 ui32DispatchTableEntry,
+									 void *psBridgeIn,
+									 void *psBridgeOut,
+									 CONNECTION_DATA *psConnection);
+
+typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY
+{
+	BridgeWrapperFunction pfFunction; /*!< The wrapper function that validates the ioctl
+										arguments before calling into srvkm proper */
+	POS_LOCK	hBridgeLock;	/*!< The bridge lock which needs to be acquired
+						before calling the above wrapper */
+	IMG_BOOL    bUseLock;                 /*!< Specify whether to use a bridge lock at all */
+#if defined(DEBUG_BRIDGE_KM)
+	const IMG_CHAR *pszIOCName; /*!< Name of the ioctl: e.g. "PVRSRV_BRIDGE_CONNECT_SERVICES" */
+	const IMG_CHAR *pszFunctionName; /*!< Name of the wrapper function: e.g. "PVRSRVConnectBW" */
+	const IMG_CHAR *pszBridgeLockName;	/*!< Name of bridge lock which will be acquired */
+	IMG_UINT32 ui32CallCount; /*!< The total number of times the ioctl has been called */
+	IMG_UINT32 ui32CopyFromUserTotalBytes; /*!< The total number of bytes copied from
+											 userspace within this ioctl */
+	IMG_UINT32 ui32CopyToUserTotalBytes; /*!< The total number of bytes copied from
+										   userspace within this ioctl */
+	IMG_UINT64 ui64TotalTimeNS; /*!< The total amount of time spent in this bridge function */
+	IMG_UINT64 ui64MaxTimeNS; /*!< The maximum amount of time for a single call to this bridge function */
+#endif
+}PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY;
+
+#if defined(SUPPORT_RGX)
+	#define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT  (PVRSRV_BRIDGE_RGX_DISPATCH_LAST+1)
+	#define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT  (PVRSRV_BRIDGE_RGX_LAST+1)
+#else
+	#define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT  (PVRSRV_BRIDGE_DISPATCH_LAST+1)
+	#define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT  (PVRSRV_BRIDGE_LAST+1)
+#endif
+
+extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
+
+void BridgeDispatchTableStartOffsetsInit(void);
+
+void
+_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup,
+					   IMG_UINT32 ui32Index,
+					   const IMG_CHAR *pszIOCName,
+					   BridgeWrapperFunction pfFunction,
+					   const IMG_CHAR *pszFunctionName,
+					   POS_LOCK hBridgeLock,
+					   const IMG_CHAR* pszBridgeLockName,
+					   IMG_BOOL bUseLock);
+void
+UnsetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup,
+					   	IMG_UINT32 ui32Index);
+
+
+/* PRQA S 0884,3410 2*/ /* macro relies on the lack of brackets */
+#define SetDispatchTableEntry(ui32BridgeGroup, ui32Index, pfFunction,\
+					hBridgeLock, bUseLock) \
+	_SetDispatchTableEntry(ui32BridgeGroup, ui32Index, #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction,\
+							(POS_LOCK)hBridgeLock, #hBridgeLock, bUseLock )
+
+#define DISPATCH_TABLE_GAP_THRESHOLD 5
+
+
+#if defined(DEBUG_BRIDGE_KM)
+typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS
+{
+	IMG_UINT32 ui32IOCTLCount;
+	IMG_UINT32 ui32TotalCopyFromUserBytes;
+	IMG_UINT32 ui32TotalCopyToUserBytes;
+} PVRSRV_BRIDGE_GLOBAL_STATS;
+
+void BridgeGlobalStatsLock(void);
+void BridgeGlobalStatsUnlock(void);
+
+/* OS specific code may want to report the stats held here and within the
+ * BRIDGE_DISPATCH_TABLE_ENTRYs (E.g. on Linux we report these via a
+ * debugfs entry /sys/kernel/debug/pvr/bridge_stats) */
+extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
+#endif
+
+PVRSRV_ERROR BridgeDispatcherInit(void);
+void BridgeDispatcherDeinit(void);
+
+PVRSRV_ERROR BridgedDispatchKM(CONNECTION_DATA * psConnection,
+					  PVRSRV_BRIDGE_PACKAGE   * psBridgePackageKM);
+
+
+PVRSRV_ERROR
+PVRSRVConnectKM(CONNECTION_DATA *psConnection,
+                PVRSRV_DEVICE_NODE * psDeviceNode,
+				IMG_UINT32 ui32Flags,
+				IMG_UINT32 ui32ClientBuildOptions,
+				IMG_UINT32 ui32ClientDDKVersion,
+				IMG_UINT32 ui32ClientDDKBuild,
+				IMG_UINT8  *pui8KernelArch,
+				IMG_UINT32 *ui32CapabilityFlags,
+				IMG_UINT32 *ui32PVRBridges,
+				IMG_UINT32 *ui32RGXBridges);
+
+PVRSRV_ERROR
+PVRSRVDisconnectKM(void);
+
+PVRSRV_ERROR
+PVRSRVAcquireGlobalEventObjectKM(IMG_HANDLE *phGlobalEventObject);
+
+PVRSRV_ERROR
+PVRSRVReleaseGlobalEventObjectKM(IMG_HANDLE hGlobalEventObject);
+
+PVRSRV_ERROR
+PVRSRVDumpDebugInfoKM(CONNECTION_DATA *psConnection,
+					  PVRSRV_DEVICE_NODE *psDeviceNode,
+					  IMG_UINT32 ui32VerbLevel);
+
+PVRSRV_ERROR
+PVRSRVGetDevClockSpeedKM(CONNECTION_DATA * psConnection,
+                         PVRSRV_DEVICE_NODE *psDeviceNode,
+                         IMG_PUINT32  pui32RGXClockSpeed);
+
+PVRSRV_ERROR
+PVRSRVHWOpTimeoutKM(CONNECTION_DATA *psConnection,
+					PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR PVRSRVAlignmentCheckKM(CONNECTION_DATA *psConnection,
+                                    PVRSRV_DEVICE_NODE * psDeviceNode,
+                                    IMG_UINT32 ui32FWAlignChecksSize,
+                                    IMG_UINT32 aui32FWAlignChecks[]);
+
+PVRSRV_ERROR PVRSRVGetDeviceStatusKM(CONNECTION_DATA *psConnection,
+                                     PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     IMG_UINT32 *pui32DeviceStatus);
+
+PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid,
+                                         IMG_UINT32 ui32ArrSize,
+                                         IMG_BOOL bAllProcessStats,
+                                         IMG_UINT32 *ui32MemoryStats);
+
+#endif /* __BRIDGED_PVR_BRIDGE_H__ */
+
+/******************************************************************************
+ End of file (srvcore.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/srvinit.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/srvinit.h
new file mode 100644
index 0000000..5fe3ee2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/srvinit.h
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@File
+@Title          Initialisation server internal header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the connections between the various parts of the
+		initialisation server.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __SRVINIT_H__
+#define __SRVINIT_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "device_connection.h"
+#include "device.h"
+
+#if defined(SUPPORT_RGX)
+PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode);
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+#endif /* __SRVINIT_H__ */
+
+/******************************************************************************
+ End of file (srvinit.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/srvkm.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/srvkm.h
new file mode 100644
index 0000000..5f17eb3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/srvkm.h
@@ -0,0 +1,141 @@
+/**************************************************************************/ /*!
+@File
+@Title          Services kernel module internal header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVKM_H
+#define SRVKM_H
+
+#include "servicesext.h"
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+struct _PVRSRV_DEVICE_NODE_;
+
+/*************************************************************************/ /*!
+@Function     PVRSRVDriverInit
+@Description  Performs one time initialisation of Services.
+@Return       PVRSRV_ERROR   PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDriverInit(void);
+
+/*************************************************************************/ /*!
+@Function     PVRSRVDriverInit
+@Description  Performs one time de-initialisation of Services.
+@Return       void
+*/ /**************************************************************************/
+void IMG_CALLCONV PVRSRVDriverDeInit(void);
+
+/*************************************************************************/ /*!
+@Function     PVRSRVDeviceCreate
+@Description  Creates a PVR Services device node for an OS native device.
+@Input        pvOSDevice      OS native device
+@Input        i32UMIdentifier A unique identifier which helps recognize this
+                              Device in the UM space.
+@Output       ppsDeviceNode   Points to the new device node on success
+@Return       PVRSRV_ERROR    PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV
+PVRSRVDeviceCreate(void *pvOSDevice, IMG_INT32 i32UMIdentifier,
+				   struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode);
+
+/*************************************************************************/ /*!
+@Function     PVRSRVDeviceInitialise
+@Description  Initialises the given device, created by PVRSRVDeviceCreate, so
+              that's in a functional state ready to be used.
+@Input        psDeviceNode  Device node of the device to be initialised
+@Return       PVRSRV_ERROR  PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVDeviceInitialise(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+/*************************************************************************/ /*!
+@Function     PVRSRVDeviceDestroy
+@Description  Destroys a PVR Services device node.
+@Input        psDeviceNode  Device node to destroy
+@Return       PVRSRV_ERROR  PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV
+PVRSRVDeviceDestroy(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+/******************
+HIGHER LEVEL MACROS
+*******************/
+
+/*----------------------------------------------------------------------------
+Repeats the body of the loop for a certain minimum time, or until the body
+exits by its own means (break, return, goto, etc.)
+
+Example of usage:
+
+LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+{
+	if (psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset)
+	{
+		bTimeout = IMG_FALSE;
+		break;
+	}
+
+	OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+} END_LOOP_UNTIL_TIMEOUT();
+
+-----------------------------------------------------------------------------*/
+
+/*	uiNotLastLoop will remain at 1 until the timeout has expired, at which time
+ * 	it will be decremented and the loop executed one final time. This is necessary
+ *	when preemption is enabled.
+ */
+/* PRQA S 3411,3431 12 */ /* critical format, leave alone */
+#define LOOP_UNTIL_TIMEOUT(TIMEOUT) \
+{\
+	IMG_UINT32 uiOffset, uiStart, uiCurrent; \
+	IMG_INT32 iNotLastLoop;					 \
+	for (uiOffset = 0, uiStart = OSClockus(), uiCurrent = uiStart + 1, iNotLastLoop = 1;\
+		((uiCurrent - uiStart + uiOffset) < (TIMEOUT)) || iNotLastLoop--;				\
+		uiCurrent = OSClockus(),													\
+		uiOffset = uiCurrent < uiStart ? IMG_UINT32_MAX - uiStart : uiOffset,		\
+		uiStart = uiCurrent < uiStart ? 0 : uiStart)
+
+#define END_LOOP_UNTIL_TIMEOUT() \
+}
+
+#endif /* SRVKM_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync.c
new file mode 100644
index 0000000..c4944c2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync.c
@@ -0,0 +1,1951 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services synchronisation interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements client side code for services synchronisation
+                interface
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "client_sync_bridge.h"
+#include "client_synctracking_bridge.h"
+#include "info_page_client.h"
+#include "pvr_bridge.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "pvr_debug.h"
+#include "dllist.h"
+#include "sync.h"
+#include "sync_internal.h"
+#include "lock.h"
+#include "log2.h"
+/* FIXME */
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#endif
+
+
+#define SYNC_BLOCK_LIST_CHUNCK_SIZE	10
+
+/*
+	This defines the maximum amount of synchronisation memory
+	that can be allocated per SyncPrim context.
+	In reality this number is meaningless as we would run out
+	of synchronisation memory before we reach this limit, but
+	we need to provide a size to the span RA.
+ */
+#define MAX_SYNC_MEM				(4 * 1024 * 1024)
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+typedef struct _SYNC_BLOCK_LIST_
+{
+	IMG_UINT32			ui32BlockCount;			/*!< Number of contexts in the list */
+	IMG_UINT32			ui32BlockListSize;		/*!< Size of the array contexts */
+	SYNC_PRIM_BLOCK		**papsSyncPrimBlock;	/*!< Array of syncprim blocks */
+} SYNC_BLOCK_LIST;
+
+typedef struct _SYNC_OP_COOKIE_
+{
+	IMG_UINT32				ui32SyncCount;
+	IMG_UINT32				ui32ClientSyncCount;
+	IMG_UINT32				ui32ServerSyncCount;
+	IMG_BOOL				bHaveServerSync;
+	IMG_HANDLE				hBridge;
+	IMG_HANDLE				hServerCookie;
+
+	SYNC_BLOCK_LIST			*psSyncBlockList;
+	PVRSRV_CLIENT_SYNC_PRIM	**papsSyncPrim;
+	/*
+		Client sync(s) info.
+		If this changes update the calculation of ui32ClientAllocSize
+	 */
+	IMG_UINT32				*paui32SyncBlockIndex;
+	IMG_UINT32				*paui32Index;
+	IMG_UINT32				*paui32Flags;
+	IMG_UINT32				*paui32FenceValue;
+	IMG_UINT32				*paui32UpdateValue;
+
+	/*
+		Server sync(s) info
+		If this changes update the calculation of ui32ServerAllocSize
+	 */
+	IMG_HANDLE				*pahServerSync;
+	IMG_UINT32              *paui32ServerFlags;
+} SYNC_OP_COOKIE;
+#endif
+
+/* forward declaration */
+static PVRSRV_ERROR
+_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value);
+
+/*
+	Internal interfaces for management of SYNC_PRIM_CONTEXT
+ */
+static void
+_SyncPrimContextUnref(SYNC_PRIM_CONTEXT *psContext)
+{
+	if (!OSAtomicRead(&psContext->hRefCount))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_SyncPrimContextUnref context already freed"));
+	}
+	else if (0 == OSAtomicDecrement(&psContext->hRefCount))
+	{
+		/* SyncPrimContextDestroy only when no longer referenced */
+		RA_Delete(psContext->psSpanRA);
+		RA_Delete(psContext->psSubAllocRA);
+		OSFreeMem(psContext);
+	}
+}
+
+static void
+_SyncPrimContextRef(SYNC_PRIM_CONTEXT *psContext)
+{
+	if (!OSAtomicRead(&psContext->hRefCount))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_SyncPrimContextRef context use after free"));
+	}
+	else
+	{
+		OSAtomicIncrement(&psContext->hRefCount);
+	}
+}
+
+/*
+	Internal interfaces for management of synchronisation block memory
+ */
+static PVRSRV_ERROR
+AllocSyncPrimitiveBlock(SYNC_PRIM_CONTEXT *psContext,
+                        SYNC_PRIM_BLOCK **ppsSyncBlock)
+{
+	SYNC_PRIM_BLOCK *psSyncBlk;
+	IMG_HANDLE hSyncPMR;
+	IMG_HANDLE hSyncImportHandle;
+	IMG_DEVMEM_SIZE_T uiImportSize;
+	PVRSRV_ERROR eError;
+
+	psSyncBlk = OSAllocMem(sizeof(SYNC_PRIM_BLOCK));
+	if (psSyncBlk == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+	psSyncBlk->psContext = psContext;
+
+	/* Allocate sync prim block */
+	eError = BridgeAllocSyncPrimitiveBlock(GetBridgeHandle(psContext->hDevConnection),
+	                                       &psSyncBlk->hServerSyncPrimBlock,
+	                                       &psSyncBlk->ui32FirmwareAddr,
+	                                       &psSyncBlk->ui32SyncBlockSize,
+	                                       &hSyncPMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_blockalloc;
+	}
+
+	/* Make it mappable by the client */
+	eError = DevmemMakeLocalImportHandle(psContext->hDevConnection,
+	                                     hSyncPMR,
+	                                     &hSyncImportHandle);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_export;
+	}
+
+	/* Get CPU mapping of the memory block */
+	eError = DevmemLocalImport(psContext->hDevConnection,
+	                           hSyncImportHandle,
+	                           PVRSRV_MEMALLOCFLAG_CPU_READABLE,
+	                           &psSyncBlk->hMemDesc,
+	                           &uiImportSize,
+	                           "SyncPrimitiveBlock");
+
+	/*
+		Regardless of success or failure we "undo" the export
+	 */
+	DevmemUnmakeLocalImportHandle(psContext->hDevConnection,
+	                              hSyncImportHandle);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_import;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psSyncBlk->hMemDesc,
+	                                  (void **) &psSyncBlk->pui32LinAddr);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_cpuvaddr;
+	}
+
+	*ppsSyncBlock = psSyncBlk;
+	return PVRSRV_OK;
+
+	fail_cpuvaddr:
+	DevmemFree(psSyncBlk->hMemDesc);
+	fail_import:
+	fail_export:
+	BridgeFreeSyncPrimitiveBlock(GetBridgeHandle(psContext->hDevConnection),
+	                             psSyncBlk->hServerSyncPrimBlock);
+	fail_blockalloc:
+	OSFreeMem(psSyncBlk);
+	fail_alloc:
+	return eError;
+}
+
+static void
+FreeSyncPrimitiveBlock(SYNC_PRIM_BLOCK *psSyncBlk)
+{
+	SYNC_PRIM_CONTEXT *psContext = psSyncBlk->psContext;
+
+	DevmemReleaseCpuVirtAddr(psSyncBlk->hMemDesc);
+	DevmemFree(psSyncBlk->hMemDesc);
+	BridgeFreeSyncPrimitiveBlock(GetBridgeHandle(psContext->hDevConnection),
+	                             psSyncBlk->hServerSyncPrimBlock);
+	OSFreeMem(psSyncBlk);
+}
+
+static PVRSRV_ERROR
+SyncPrimBlockImport(RA_PERARENA_HANDLE hArena,
+                    RA_LENGTH_T uSize,
+                    RA_FLAGS_T uFlags,
+                    const IMG_CHAR *pszAnnotation,
+                    RA_BASE_T *puiBase,
+                    RA_LENGTH_T *puiActualSize,
+                    RA_PERISPAN_HANDLE *phImport)
+{
+	SYNC_PRIM_CONTEXT *psContext = hArena;
+	SYNC_PRIM_BLOCK *psSyncBlock = NULL;
+	RA_LENGTH_T uiSpanSize;
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(uFlags);
+
+	/* Check we've not been called with an unexpected size */
+	if (!hArena || sizeof(IMG_UINT32) != uSize)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid input params", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	/*
+		Ensure the synprim context doesn't go away while we have sync blocks
+		attached to it
+	 */
+	_SyncPrimContextRef(psContext);
+
+	/* Allocate the block of memory */
+	eError = AllocSyncPrimitiveBlock(psContext, &psSyncBlock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Failed to allocate syncprim block (%d)",
+		         __func__, eError));
+		goto fail_syncblockalloc;
+	}
+
+	/* Allocate a span for it */
+	eError = RA_Alloc(psContext->psSpanRA,
+	                  psSyncBlock->ui32SyncBlockSize,
+	                  RA_NO_IMPORT_MULTIPLIER,
+	                  0,
+	                  psSyncBlock->ui32SyncBlockSize,
+	                  pszAnnotation,
+	                  &psSyncBlock->uiSpanBase,
+	                  &uiSpanSize,
+	                  NULL);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_spanalloc;
+	}
+
+	/*
+		There is no reason the span RA should return an allocation larger
+		then we request
+	 */
+	PVR_ASSERT(uiSpanSize == psSyncBlock->ui32SyncBlockSize);
+
+	*puiBase = psSyncBlock->uiSpanBase;
+	*puiActualSize = psSyncBlock->ui32SyncBlockSize;
+	*phImport = psSyncBlock;
+	return PVRSRV_OK;
+
+	fail_spanalloc:
+	FreeSyncPrimitiveBlock(psSyncBlock);
+	fail_syncblockalloc:
+	_SyncPrimContextUnref(psContext);
+	e0:
+	return eError;
+}
+
+static void
+SyncPrimBlockUnimport(RA_PERARENA_HANDLE hArena,
+                      RA_BASE_T uiBase,
+                      RA_PERISPAN_HANDLE hImport)
+{
+	SYNC_PRIM_CONTEXT *psContext = hArena;
+	SYNC_PRIM_BLOCK *psSyncBlock = hImport;
+
+	if (!psContext || !psSyncBlock || uiBase != psSyncBlock->uiSpanBase)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid input params", __func__));
+		return;
+	}
+
+	/* Free the span this import is using */
+	RA_Free(psContext->psSpanRA, uiBase);
+
+	/* Free the syncpim block */
+	FreeSyncPrimitiveBlock(psSyncBlock);
+
+	/*	Drop our reference to the syncprim context */
+	_SyncPrimContextUnref(psContext);
+}
+
+static INLINE IMG_UINT32 SyncPrimGetOffset(SYNC_PRIM *psSyncInt)
+{
+	IMG_UINT64 ui64Temp;
+
+	PVR_ASSERT(psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL);
+
+	/* FIXME: Subtracting a 64-bit address from another and then implicit
+	 * cast to 32-bit number. Need to review all call sequences that use this
+	 * function, added explicit casting for now.
+	 */
+	ui64Temp = psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase;
+	PVR_ASSERT(ui64Temp<IMG_UINT32_MAX);
+	return (IMG_UINT32)ui64Temp;
+}
+
+static void SyncPrimGetCPULinAddr(SYNC_PRIM *psSyncInt)
+{
+	SYNC_PRIM_BLOCK *psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+
+	psSyncInt->sCommon.pui32LinAddr = psSyncBlock->pui32LinAddr +
+			(SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32));
+}
+
+static void SyncPrimLocalFree(SYNC_PRIM *psSyncInt)
+{
+	SYNC_PRIM_BLOCK *psSyncBlock;
+	SYNC_PRIM_CONTEXT *psContext;
+
+	psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+	psContext = psSyncBlock->psContext;
+
+	{
+		PVRSRV_ERROR eError;
+		IMG_HANDLE hBridge =
+				GetBridgeHandle(psSyncInt->u.sLocal.psSyncBlock->psContext->hDevConnection);
+
+		if (GetInfoPageDebugFlags(psSyncInt->u.sLocal.psSyncBlock->psContext->hDevConnection) & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+		{
+			if (psSyncInt->u.sLocal.hRecord)
+			{
+				/* remove this sync record */
+				eError = BridgeSyncRecordRemoveByHandle(hBridge,
+				                                        psSyncInt->u.sLocal.hRecord);
+				if (PVRSRV_OK != eError)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s: failed to remove SyncRecord", __func__));
+				}
+			}
+		}
+		else
+		{
+			IMG_UINT32 ui32FWAddr = psSyncBlock->ui32FirmwareAddr +
+					SyncPrimGetOffset(psSyncInt);
+
+			eError = BridgeSyncFreeEvent(hBridge, ui32FWAddr);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_WARNING,
+				         "%s: BridgeSyncFreeEvent failed with error: %d",
+				         __func__, eError));
+			}
+		}
+	}
+#if defined(PVRSRV_ENABLE_SYNC_POISONING)
+	(void) _SyncPrimSetValue(psSyncInt, LOCAL_SYNC_PRIM_POISON_VALUE);
+#else
+	/* reset the sync prim value as it is freed.
+	 * this guarantees the client sync allocated to the client will
+	 * have a value of zero and the client does not need to
+	 * explicitly initialise the sync value to zero.
+	 * the allocation of the backing memory for the sync prim block
+	 * is done with ZERO_ON_ALLOC so the memory is initially all zero.
+	 */
+	(void) _SyncPrimSetValue(psSyncInt, LOCAL_SYNC_PRIM_RESET_VALUE);
+#endif
+
+	RA_Free(psContext->psSubAllocRA, psSyncInt->u.sLocal.uiSpanAddr);
+	OSFreeMem(psSyncInt);
+	_SyncPrimContextUnref(psContext);
+}
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+static void SyncPrimServerFree(SYNC_PRIM *psSyncInt)
+{
+	PVRSRV_ERROR eError;
+
+	eError = BridgeServerSyncFree(psSyncInt->u.sServer.hBridge,
+	                              psSyncInt->u.sServer.hServerSync);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimServerFree failed"));
+	}
+	OSFreeMem(psSyncInt);
+}
+#endif
+static void SyncPrimLocalUnref(SYNC_PRIM *psSyncInt)
+{
+	if (!OSAtomicRead(&psSyncInt->u.sLocal.hRefCount))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimLocalUnref sync already freed"));
+	}
+	else if (0 == OSAtomicDecrement(&psSyncInt->u.sLocal.hRefCount))
+	{
+		SyncPrimLocalFree(psSyncInt);
+	}
+}
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+static void SyncPrimLocalRef(SYNC_PRIM *psSyncInt)
+{
+	if (!OSAtomicRead(&psSyncInt->u.sLocal.hRefCount))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimLocalRef sync use after free"));
+	}
+	else
+	{
+		OSAtomicIncrement(&psSyncInt->u.sLocal.hRefCount);
+	}
+}
+#endif
+
+static IMG_UINT32 SyncPrimGetFirmwareAddrLocal(SYNC_PRIM *psSyncInt)
+{
+	SYNC_PRIM_BLOCK *psSyncBlock;
+
+	psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+	return psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psSyncInt);
+}
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+static IMG_UINT32 SyncPrimGetFirmwareAddrServer(SYNC_PRIM *psSyncInt)
+{
+	return psSyncInt->u.sServer.ui32FirmwareAddr;
+}
+
+#if !defined(__KERNEL__)
+
+static SYNC_BRIDGE_HANDLE _SyncPrimGetBridgeHandleLocal(SYNC_PRIM *psSyncInt)
+{
+	return GetBridgeHandle(psSyncInt->u.sLocal.psSyncBlock->psContext->hDevConnection);
+}
+
+static SYNC_BRIDGE_HANDLE _SyncPrimGetBridgeHandleServer(SYNC_PRIM *psSyncInt)
+{
+	return psSyncInt->u.sServer.hBridge;
+}
+
+static SYNC_BRIDGE_HANDLE _SyncPrimGetBridgeHandle(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	SYNC_PRIM *psSyncInt;
+
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+	if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+	{
+		return _SyncPrimGetBridgeHandleLocal(psSyncInt);
+	}
+	else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+	{
+		return _SyncPrimGetBridgeHandleServer(psSyncInt);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_SyncPrimGetBridgeHandle: Invalid sync type"));
+		/*
+			Either the client has given us a bad pointer or there is an
+			error in this module
+		 */
+		return 0;
+	}
+}
+#endif
+
+/*
+	Internal interfaces for management of syncprim block lists
+ */
+static SYNC_BLOCK_LIST *_SyncPrimBlockListCreate(void)
+{
+	SYNC_BLOCK_LIST *psBlockList;
+
+	psBlockList = OSAllocMem(sizeof(SYNC_BLOCK_LIST));
+	if (!psBlockList)
+	{
+		return NULL;
+	}
+
+	psBlockList->ui32BlockCount = 0;
+	psBlockList->ui32BlockListSize = SYNC_BLOCK_LIST_CHUNCK_SIZE;
+
+	psBlockList->papsSyncPrimBlock = OSAllocZMem(sizeof(SYNC_PRIM_BLOCK *)
+	                                             * SYNC_BLOCK_LIST_CHUNCK_SIZE);
+	if (!psBlockList->papsSyncPrimBlock)
+	{
+		OSFreeMem(psBlockList);
+		return NULL;
+	}
+
+	return psBlockList;
+}
+
+static PVRSRV_ERROR _SyncPrimBlockListAdd(SYNC_BLOCK_LIST *psBlockList,
+                                          SYNC_PRIM_BLOCK *psSyncPrimBlock)
+{
+	IMG_UINT32 i;
+
+	/* Check the context isn't already on the list */
+	for (i=0;i<psBlockList->ui32BlockCount;i++)
+	{
+		if (psBlockList->papsSyncPrimBlock[i] == psSyncPrimBlock)
+		{
+			return PVRSRV_OK;
+		}
+	}
+
+	/* Check we have space for a new item */
+	if (psBlockList->ui32BlockCount == psBlockList->ui32BlockListSize)
+	{
+		SYNC_PRIM_BLOCK	**papsNewSyncPrimBlock;
+
+		papsNewSyncPrimBlock = OSAllocMem(sizeof(SYNC_PRIM_BLOCK *) *
+		                                  (psBlockList->ui32BlockListSize +
+		                                		  SYNC_BLOCK_LIST_CHUNCK_SIZE));
+		if (!papsNewSyncPrimBlock)
+		{
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+
+		OSCachedMemCopy(papsNewSyncPrimBlock,
+		                psBlockList->papsSyncPrimBlock,
+		                sizeof(SYNC_PRIM_CONTEXT *) *
+		                psBlockList->ui32BlockListSize);
+
+		OSFreeMem(psBlockList->papsSyncPrimBlock);
+
+		psBlockList->papsSyncPrimBlock = papsNewSyncPrimBlock;
+		psBlockList->ui32BlockListSize += SYNC_BLOCK_LIST_CHUNCK_SIZE;
+	}
+
+	/* Add the context to the list */
+	psBlockList->papsSyncPrimBlock[psBlockList->ui32BlockCount++] = psSyncPrimBlock;
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _SyncPrimBlockListBlockToIndex(SYNC_BLOCK_LIST *psBlockList,
+                                                   SYNC_PRIM_BLOCK *psSyncPrimBlock,
+                                                   IMG_UINT32 *pui32Index)
+{
+	IMG_UINT32 i;
+
+	for (i=0;i<psBlockList->ui32BlockCount;i++)
+	{
+		if (psBlockList->papsSyncPrimBlock[i] == psSyncPrimBlock)
+		{
+			*pui32Index = i;
+			return PVRSRV_OK;
+		}
+	}
+
+	return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+static PVRSRV_ERROR _SyncPrimBlockListHandleArrayCreate(SYNC_BLOCK_LIST *psBlockList,
+                                                        IMG_UINT32 *pui32BlockHandleCount,
+                                                        IMG_HANDLE **ppahHandleList)
+{
+	IMG_HANDLE *pahHandleList;
+	IMG_UINT32 i;
+
+	pahHandleList = OSAllocMem(sizeof(IMG_HANDLE) *
+	                           psBlockList->ui32BlockCount);
+	if (!pahHandleList)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	for (i=0;i<psBlockList->ui32BlockCount;i++)
+	{
+		pahHandleList[i] = psBlockList->papsSyncPrimBlock[i]->hServerSyncPrimBlock;
+	}
+
+	*ppahHandleList = pahHandleList;
+	*pui32BlockHandleCount = psBlockList->ui32BlockCount;
+
+	return PVRSRV_OK;
+}
+
+static void _SyncPrimBlockListHandleArrayDestroy(IMG_HANDLE *pahHandleList)
+{
+	OSFreeMem(pahHandleList);
+}
+
+static IMG_UINT32 _SyncPrimBlockListGetClientValue(SYNC_BLOCK_LIST *psBlockList,
+                                                   IMG_UINT32 ui32BlockIndex,
+                                                   IMG_UINT32 ui32Index)
+{
+	return *((IMG_UINT32 __force *)(psBlockList->papsSyncPrimBlock[ui32BlockIndex]->pui32LinAddr)+ui32Index);
+}
+
+static void _SyncPrimBlockListDestroy(SYNC_BLOCK_LIST *psBlockList)
+{
+	OSFreeMem(psBlockList->papsSyncPrimBlock);
+	OSFreeMem(psBlockList);
+}
+#endif
+
+static INLINE IMG_UINT32 _Log2(IMG_UINT32 ui32Align)
+{
+	PVR_ASSERT(IsPower2(ui32Align));
+	return ExactLog2(ui32Align);
+}
+
+/*
+	External interfaces
+ */
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection,
+                      PSYNC_PRIM_CONTEXT *phSyncPrimContext)
+{
+	SYNC_PRIM_CONTEXT *psContext;
+	PVRSRV_ERROR eError;
+
+	psContext = OSAllocMem(sizeof(SYNC_PRIM_CONTEXT));
+	if (psContext == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	psContext->hDevConnection = hDevConnection;
+
+	OSSNPrintf(psContext->azName, SYNC_PRIM_NAME_SIZE, "Sync Prim RA-%p", psContext);
+	OSSNPrintf(psContext->azSpanName, SYNC_PRIM_NAME_SIZE, "Sync Prim span RA-%p", psContext);
+
+	/*
+		Create the RA for sub-allocations of the SynPrim's
+
+		Note:
+		The import size doesn't matter here as the server will pass
+		back the blocksize when does the import which overrides
+		what we specify here.
+	 */
+
+	psContext->psSubAllocRA = RA_Create(psContext->azName,
+	                                    /* Params for imports */
+	                                    _Log2(sizeof(IMG_UINT32)),
+	                                    RA_LOCKCLASS_2,
+	                                    SyncPrimBlockImport,
+	                                    SyncPrimBlockUnimport,
+	                                    psContext,
+	                                    IMG_FALSE);
+	if (psContext->psSubAllocRA == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_suballoc;
+	}
+
+	/*
+		Create the span-management RA
+
+		The RA requires that we work with linear spans. For our use
+		here we don't require this behaviour as we're always working
+		within offsets of blocks (imports). However, we need to keep
+		the RA happy so we create the "span" management RA which
+		ensures that all are imports are added to the RA in a linear
+		fashion
+	 */
+	psContext->psSpanRA = RA_Create(psContext->azSpanName,
+	                                /* Params for imports */
+	                                0,
+	                                RA_LOCKCLASS_1,
+	                                NULL,
+	                                NULL,
+	                                NULL,
+	                                IMG_FALSE);
+	if (psContext->psSpanRA == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_span;
+	}
+
+	if (!RA_Add(psContext->psSpanRA, 0, MAX_SYNC_MEM, 0, NULL))
+	{
+		RA_Delete(psContext->psSpanRA);
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_span;
+	}
+
+	OSAtomicWrite(&psContext->hRefCount, 1);
+
+	*phSyncPrimContext = psContext;
+	return PVRSRV_OK;
+	fail_span:
+	RA_Delete(psContext->psSubAllocRA);
+	fail_suballoc:
+	OSFreeMem(psContext);
+	fail_alloc:
+	return eError;
+}
+
+IMG_INTERNAL void SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext)
+{
+	SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext;
+	if (1 != OSAtomicRead(&psContext->hRefCount))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s attempted with active references, may be the result of a race", __func__));
+	}
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+#if defined(__KERNEL__)
+	if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Forcing context destruction due to bad driver state", __func__));
+		OSAtomicWrite(&psContext->hRefCount, 1);
+	}
+#endif
+#endif
+	_SyncPrimContextUnref(psContext);
+}
+
+static PVRSRV_ERROR _SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+                                   PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+                                   const IMG_CHAR *pszClassName,
+                                   IMG_BOOL bServerSync)
+{
+	SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext;
+	SYNC_PRIM_BLOCK *psSyncBlock;
+	SYNC_PRIM *psNewSync;
+	PVRSRV_ERROR eError;
+	RA_BASE_T uiSpanAddr;
+
+	if (!hSyncPrimContext)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid context", __func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psNewSync = OSAllocMem(sizeof(SYNC_PRIM));
+	if (psNewSync == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	eError = RA_Alloc(psContext->psSubAllocRA,
+	                  sizeof(IMG_UINT32),
+	                  RA_NO_IMPORT_MULTIPLIER,
+	                  0,
+	                  sizeof(IMG_UINT32),
+	                  "Sync_Prim",
+	                  &uiSpanAddr,
+	                  NULL,
+	                  (RA_PERISPAN_HANDLE *) &psSyncBlock);
+	if (PVRSRV_OK != eError)
+	{
+		goto fail_raalloc;
+	}
+	psNewSync->eType = SYNC_PRIM_TYPE_LOCAL;
+	OSAtomicWrite(&psNewSync->u.sLocal.hRefCount, 1);
+	psNewSync->u.sLocal.uiSpanAddr = uiSpanAddr;
+	psNewSync->u.sLocal.psSyncBlock = psSyncBlock;
+	SyncPrimGetCPULinAddr(psNewSync);
+	*ppsSync = &psNewSync->sCommon;
+	_SyncPrimContextRef(psContext);
+#if defined(PVRSRV_ENABLE_SYNC_POISONING)
+	(void) _SyncPrimSetValue(psNewSync, LOCAL_SYNC_PRIM_RESET_VALUE);
+#endif
+
+	if (GetInfoPageDebugFlags(psSyncBlock->psContext->hDevConnection) & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+	{
+		IMG_CHAR szClassName[SYNC_MAX_CLASS_NAME_LEN];
+		size_t uiSize;
+
+		if (pszClassName)
+		{
+			uiSize = OSStringNLength(pszClassName, SYNC_MAX_CLASS_NAME_LEN);
+			/* Copy the class name annotation into a fixed-size array */
+			OSCachedMemCopy(szClassName, pszClassName, uiSize);
+			if (uiSize == SYNC_MAX_CLASS_NAME_LEN)
+				szClassName[SYNC_MAX_CLASS_NAME_LEN-1] = '\0';
+			else
+				szClassName[uiSize++] = '\0';
+		}
+		else
+		{
+			/* No class name annotation */
+			uiSize = 0;
+			szClassName[0] = '\0';
+		}
+
+		/* record this sync */
+		eError = BridgeSyncRecordAdd(
+				GetBridgeHandle(psSyncBlock->psContext->hDevConnection),
+				&psNewSync->u.sLocal.hRecord,
+				psSyncBlock->hServerSyncPrimBlock,
+				psSyncBlock->ui32FirmwareAddr,
+				SyncPrimGetOffset(psNewSync),
+				bServerSync,
+				uiSize,
+				szClassName);
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: failed to add SyncRecord \"%s\" (%s)",
+					__func__,
+					szClassName,
+					PVRSRVGETERRORSTRING(eError)));
+			psNewSync->u.sLocal.hRecord = NULL;
+		}
+	}
+	else
+	{
+		size_t	uiSize;
+
+		uiSize = OSStringNLength(pszClassName, SYNC_MAX_CLASS_NAME_LEN);
+
+		if (uiSize < SYNC_MAX_CLASS_NAME_LEN)
+			uiSize++;
+		/* uiSize now reflects size used for pszClassName + NUL byte */
+
+		eError = BridgeSyncAllocEvent(GetBridgeHandle(hSyncPrimContext->hDevConnection),
+		                              bServerSync,
+		                              psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psNewSync),
+		                              uiSize,
+		                              pszClassName);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+			         "%s: BridgeSyncAllocEvent failed with error: %d",
+			         __func__,
+			         eError));
+		}
+	}
+
+	return PVRSRV_OK;
+
+	fail_raalloc:
+	OSFreeMem(psNewSync);
+	fail_alloc:
+	return eError;
+}
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+#if defined(__KERNEL__)
+IMG_INTERNAL PVRSRV_ERROR SyncPrimAllocForServerSync(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+                                                     PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+                                                     const IMG_CHAR *pszClassName)
+{
+	return _SyncPrimAlloc(hSyncPrimContext,
+	                      ppsSync,
+	                      pszClassName,
+	                      IMG_TRUE);
+}
+#endif
+#endif
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+                                        PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+                                        const IMG_CHAR *pszClassName)
+{
+	return _SyncPrimAlloc(hSyncPrimContext,
+	                      ppsSync,
+	                      pszClassName,
+	                      IMG_FALSE);
+}
+
+static PVRSRV_ERROR
+_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value)
+{
+	PVRSRV_ERROR eError;
+
+	if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+	{
+		SYNC_PRIM_BLOCK *psSyncBlock;
+		SYNC_PRIM_CONTEXT *psContext;
+
+		psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+		psContext = psSyncBlock->psContext;
+
+		eError = BridgeSyncPrimSet(GetBridgeHandle(psContext->hDevConnection),
+		                           psSyncBlock->hServerSyncPrimBlock,
+		                           SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32),
+		                           ui32Value);
+	}
+	else
+	{
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+		eError = BridgeServerSyncPrimSet(psSyncInt->u.sServer.hBridge,
+		                                 psSyncInt->u.sServer.hServerSync,
+		                                 ui32Value);
+#else
+	PVR_DPF((PVR_DBG_ERROR, "%s: Server sync not supported, attempted use of server sync", __func__));
+	return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+	}
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	SYNC_PRIM *psSyncInt;
+
+	if (!psSync)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: null sync pointer", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto err_out;
+	}
+
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+	if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+	{
+		SyncPrimLocalUnref(psSyncInt);
+	}
+	else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+	{
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+		SyncPrimServerFree(psSyncInt);
+#else
+	PVR_DPF((PVR_DBG_ERROR, "%s: Server sync not supported, attempted use of server sync", __func__));
+	return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+	}
+	else
+	{
+		/*
+			Either the client has given us a bad pointer or there is an
+			error in this module
+		 */
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync type", __func__));
+		eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+		goto err_out;
+	}
+
+	err_out:
+	return eError;
+}
+
+#if defined(NO_HARDWARE)
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	SYNC_PRIM *psSyncInt;
+
+	if (!psSync)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: null sync pointer", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto err_out;
+	}
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	/* There is no check for the psSyncInt to be LOCAL as this call
+	   substitutes the Firmware updating a sync and that sync could
+	   be a server one */
+
+	eError =  _SyncPrimSetValue(psSyncInt, ui32Value);
+
+	err_out:
+	return eError;
+}
+#endif
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	SYNC_PRIM *psSyncInt;
+
+	if (!psSync)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: null sync pointer", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto err_out;
+	}
+
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+	if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync type", __func__));
+		eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+		goto err_out;
+	}
+
+	eError = _SyncPrimSetValue(psSyncInt, ui32Value);
+
+#if defined(PDUMP)
+	SyncPrimPDump(psSync);
+#endif
+	err_out:
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimLocalGetHandleAndOffset(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+                                                          IMG_HANDLE *phBlock,
+                                                          IMG_UINT32 *pui32Offset)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	SYNC_PRIM *psSyncInt;
+
+	if (unlikely(!psSync || !phBlock || !pui32Offset))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer",
+		         __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto err_out;
+	}
+
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	if (likely(psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL))
+	{
+		*phBlock = psSyncInt->u.sLocal.psSyncBlock->hServerSyncPrimBlock;
+		*pui32Offset = psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: psSync not a Local sync prim (%d)",
+				__func__, psSyncInt->eType));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto err_out;
+	}
+
+	err_out:
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 *pui32FwAddr)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	SYNC_PRIM *psSyncInt;
+
+	*pui32FwAddr = 0;
+	if (unlikely(!psSync))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto err_out;
+	}
+
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+	if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+	{
+		*pui32FwAddr = SyncPrimGetFirmwareAddrLocal(psSyncInt);
+	}
+	else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+	{
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+		*pui32FwAddr = SyncPrimGetFirmwareAddrServer(psSyncInt);
+#else
+	PVR_DPF((PVR_DBG_ERROR, "%s: Server sync not supported, attempted use of server sync", __func__));
+	return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+	}
+	else
+	{
+		/* Either the client has given us a bad pointer or there is an
+		 * error in this module
+		 */
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync type", __func__));
+		eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+		goto err_out;
+	}
+
+	err_out:
+	return eError;
+}
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpCreate(IMG_UINT32 ui32SyncCount,
+                              PVRSRV_CLIENT_SYNC_PRIM **papsSyncPrim,
+                              PSYNC_OP_COOKIE *ppsCookie)
+{
+	SYNC_OP_COOKIE *psNewCookie;
+	SYNC_BLOCK_LIST *psSyncBlockList;
+	IMG_UINT32 ui32ServerSyncCount = 0;
+	IMG_UINT32 ui32ClientSyncCount = 0;
+	IMG_UINT32 ui32ServerAllocSize;
+	IMG_UINT32 ui32ClientAllocSize;
+	IMG_UINT32 ui32TotalAllocSize;
+	IMG_UINT32 ui32ServerIndex = 0;
+	IMG_UINT32 ui32ClientIndex = 0;
+	IMG_UINT32 i;
+	IMG_UINT32 ui32SyncBlockCount;
+	IMG_HANDLE hBridge;
+	IMG_HANDLE *pahHandleList;
+	IMG_CHAR *pcPtr;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bServerSync;
+
+	psSyncBlockList = _SyncPrimBlockListCreate();
+
+	if (!psSyncBlockList)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	for (i=0;i<ui32SyncCount;i++)
+	{
+		eError = SyncPrimIsServerSync(papsSyncPrim[i], &bServerSync);
+		if (PVRSRV_OK != eError) goto e1;
+		if (bServerSync)
+		{
+			ui32ServerSyncCount++;
+		}
+		else
+		{
+			SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[i];
+
+			ui32ClientSyncCount++;
+			eError = _SyncPrimBlockListAdd(psSyncBlockList, psSync->u.sLocal.psSyncBlock);
+			if (eError != PVRSRV_OK)
+			{
+				goto e1;
+			}
+		}
+	}
+
+	ui32ServerAllocSize = ui32ServerSyncCount * (sizeof(IMG_HANDLE) + sizeof(IMG_UINT32));
+	ui32ClientAllocSize = ui32ClientSyncCount * (5 * sizeof(IMG_UINT32));
+	ui32TotalAllocSize = sizeof(SYNC_OP_COOKIE) +
+			(sizeof(PVRSRV_CLIENT_SYNC_PRIM *) * ui32SyncCount) +
+			ui32ServerAllocSize +
+			ui32ClientAllocSize;
+
+	psNewCookie = OSAllocMem(ui32TotalAllocSize);
+	pcPtr = (IMG_CHAR *) psNewCookie;
+
+	if (!psNewCookie)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e1;
+	}
+
+	/* Setup the pointers */
+	pcPtr += sizeof(SYNC_OP_COOKIE);
+	psNewCookie->papsSyncPrim = (PVRSRV_CLIENT_SYNC_PRIM **) pcPtr;
+
+	pcPtr += sizeof(PVRSRV_CLIENT_SYNC_PRIM *) * ui32SyncCount;
+	psNewCookie->paui32SyncBlockIndex = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32Index = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32Flags = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32FenceValue = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32UpdateValue = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->pahServerSync =(IMG_HANDLE *) pcPtr;
+	pcPtr += sizeof(IMG_HANDLE) * ui32ServerSyncCount;
+
+	psNewCookie->paui32ServerFlags =(IMG_UINT32 *) pcPtr;
+	pcPtr += sizeof(IMG_UINT32) * ui32ServerSyncCount;
+
+	/* Check the pointer setup went ok */
+	if (!(pcPtr == (((IMG_CHAR *) psNewCookie) + ui32TotalAllocSize)))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: cookie setup failed", __func__));
+		eError = PVRSRV_ERROR_INTERNAL_ERROR;
+		goto e2;
+	}
+
+	psNewCookie->ui32SyncCount = ui32SyncCount;
+	psNewCookie->ui32ServerSyncCount = ui32ServerSyncCount;
+	psNewCookie->ui32ClientSyncCount = ui32ClientSyncCount;
+	psNewCookie->psSyncBlockList = psSyncBlockList;
+
+	/*
+		Get the bridge handle from the 1st sync.
+
+		Note: We assume the all syncs have been created with the same
+			  services connection.
+	 */
+	eError = SyncPrimIsServerSync(papsSyncPrim[0], &bServerSync);
+	if (PVRSRV_OK != eError) goto e2;
+	if (bServerSync)
+	{
+		SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[0];
+
+		hBridge = psSync->u.sServer.hBridge;
+	}
+	else
+	{
+		SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[0];
+
+		hBridge = GetBridgeHandle(psSync->u.sLocal.psSyncBlock->psContext->hDevConnection);
+	}
+
+	psNewCookie->hBridge = hBridge;
+
+	if (ui32ServerSyncCount)
+	{
+		psNewCookie->bHaveServerSync = IMG_TRUE;
+	}
+	else
+	{
+		psNewCookie->bHaveServerSync = IMG_FALSE;
+	}
+
+	/* Fill in the server and client sync data */
+	for (i=0;i<ui32SyncCount;i++)
+	{
+		SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[i];
+
+		eError = SyncPrimIsServerSync(papsSyncPrim[i], &bServerSync);
+		if (PVRSRV_OK != eError) goto e2;
+		if (bServerSync)
+		{
+			psNewCookie->pahServerSync[ui32ServerIndex] = psSync->u.sServer.hServerSync;
+
+			ui32ServerIndex++;
+		}
+		else
+		{
+			/* Location of sync */
+			eError = _SyncPrimBlockListBlockToIndex(psSyncBlockList,
+			                                        psSync->u.sLocal.psSyncBlock,
+			                                        &psNewCookie->paui32SyncBlockIndex[ui32ClientIndex]);
+			if (eError != PVRSRV_OK)
+			{
+				goto e2;
+			}
+
+			/* Workout the index to sync */
+			psNewCookie->paui32Index[ui32ClientIndex] =
+					SyncPrimGetOffset(psSync)/sizeof(IMG_UINT32);
+
+			ui32ClientIndex++;
+		}
+
+		psNewCookie->papsSyncPrim[i] = papsSyncPrim[i];
+	}
+
+	eError = _SyncPrimBlockListHandleArrayCreate(psSyncBlockList,
+	                                             &ui32SyncBlockCount,
+	                                             &pahHandleList);
+	if (eError !=PVRSRV_OK)
+	{
+		goto e2;
+	}
+
+	/*
+		Create the server side cookie. Here we pass in all the unchanging
+		data so we only need to pass in the minimum at takeop time
+	 */
+	eError = BridgeSyncPrimOpCreate(hBridge,
+	                                ui32SyncBlockCount,
+	                                pahHandleList,
+	                                psNewCookie->ui32ClientSyncCount,
+	                                psNewCookie->paui32SyncBlockIndex,
+	                                psNewCookie->paui32Index,
+	                                psNewCookie->ui32ServerSyncCount,
+	                                psNewCookie->pahServerSync,
+	                                &psNewCookie->hServerCookie);
+
+	/* Free the handle list regardless of error */
+	_SyncPrimBlockListHandleArrayDestroy(pahHandleList);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e2;
+	}
+
+	/* Increase the reference count on all referenced local sync prims
+	 * so that they cannot be freed until this Op is finished with
+	 */
+	for (i=0;i<ui32SyncCount;i++)
+	{
+		SYNC_PRIM *psSyncInt;
+		psSyncInt = IMG_CONTAINER_OF(papsSyncPrim[i], SYNC_PRIM, sCommon);
+		if (SYNC_PRIM_TYPE_LOCAL == psSyncInt->eType)
+		{
+			SyncPrimLocalRef(psSyncInt);
+		}
+	}
+
+	*ppsCookie = psNewCookie;
+	return PVRSRV_OK;
+
+	e2:
+	OSFreeMem(psNewCookie);
+	e1:
+	_SyncPrimBlockListDestroy(psSyncBlockList);
+	e0:
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpTake(PSYNC_OP_COOKIE psCookie,
+                            IMG_UINT32 ui32SyncCount,
+                            PVRSRV_CLIENT_SYNC_PRIM_OP *pasSyncOp)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 ui32ServerIndex = 0;
+	IMG_UINT32 ui32ClientIndex = 0;
+	IMG_UINT32 i;
+	IMG_BOOL bServerSync;
+
+	/* Copy client sync operations */
+	for (i=0;i<ui32SyncCount;i++)
+	{
+		/*
+			Sanity check the client passes in the same syncs as the
+			ones we got at create time
+		 */
+		if (psCookie->papsSyncPrim[i] != pasSyncOp[i].psSync)
+		{
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto e0;
+		}
+
+		eError = SyncPrimIsServerSync(pasSyncOp[i].psSync, &bServerSync);
+		if (PVRSRV_OK != eError) goto e0;
+		if (bServerSync)
+		{
+			psCookie->paui32ServerFlags[ui32ServerIndex] =
+					pasSyncOp[i].ui32Flags;
+
+			ui32ServerIndex++;
+		}
+		else
+		{
+			/* Client operation information */
+			psCookie->paui32Flags[ui32ClientIndex] =
+					pasSyncOp[i].ui32Flags;
+			psCookie->paui32FenceValue[ui32ClientIndex] =
+					pasSyncOp[i].ui32FenceValue;
+			psCookie->paui32UpdateValue[ui32ClientIndex] =
+					pasSyncOp[i].ui32UpdateValue;
+
+			ui32ClientIndex++;
+		}
+	}
+
+	eError = BridgeSyncPrimOpTake(psCookie->hBridge,
+	                              psCookie->hServerCookie,
+	                              psCookie->ui32ClientSyncCount,
+	                              psCookie->paui32Flags,
+	                              psCookie->paui32FenceValue,
+	                              psCookie->paui32UpdateValue,
+	                              psCookie->ui32ServerSyncCount,
+	                              psCookie->paui32ServerFlags);
+
+	e0:
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpReady(PSYNC_OP_COOKIE psCookie,
+                             IMG_BOOL *pbReady)
+{
+	PVRSRV_ERROR eError;
+	if (!psCookie)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	/*
+		If we have a server sync we have no choice
+		but to do the check in the server
+	 */
+	if (psCookie->bHaveServerSync)
+	{
+		eError = BridgeSyncPrimOpReady(psCookie->hBridge,
+		                               psCookie->hServerCookie,
+		                               pbReady);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to do sync check in server (Error = %d)",
+					__func__, eError));
+			goto e0;
+		}
+	}
+	else
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 ui32SnapShot;
+		IMG_BOOL bReady = IMG_TRUE;
+
+		for (i=0;i<psCookie->ui32ClientSyncCount;i++)
+		{
+			if ((psCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK) == 0)
+			{
+				continue;
+			}
+
+			ui32SnapShot = _SyncPrimBlockListGetClientValue(psCookie->psSyncBlockList,
+			                                                psCookie->paui32SyncBlockIndex[i],
+			                                                psCookie->paui32Index[i]);
+			if (ui32SnapShot != psCookie->paui32FenceValue[i])
+			{
+				bReady = IMG_FALSE;
+				break;
+			}
+		}
+
+		*pbReady = bReady;
+	}
+
+	return PVRSRV_OK;
+	e0:
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpComplete(PSYNC_OP_COOKIE psCookie)
+{
+	PVRSRV_ERROR eError;
+
+	eError = BridgeSyncPrimOpComplete(psCookie->hBridge,
+	                                  psCookie->hServerCookie);
+
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpDestroy(PSYNC_OP_COOKIE psCookie)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 i;
+
+	eError = BridgeSyncPrimOpDestroy(psCookie->hBridge, psCookie->hServerCookie);
+	if (PVRSRV_OK != eError)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to destroy SyncPrimOp (Error = %d)",
+				__func__, eError));
+		goto err_out;
+	}
+
+	/* Decrease the reference count on all referenced local sync prims
+	 * so that they can be freed now this Op is finished with
+	 */
+	for (i=0;i<psCookie->ui32SyncCount;i++)
+	{
+		SYNC_PRIM *psSyncInt;
+		psSyncInt = IMG_CONTAINER_OF(psCookie->papsSyncPrim[i], SYNC_PRIM, sCommon);
+		if (SYNC_PRIM_TYPE_LOCAL == psSyncInt->eType)
+		{
+			SyncPrimLocalUnref(psSyncInt);
+		}
+	}
+
+	_SyncPrimBlockListDestroy(psCookie->psSyncBlockList);
+	OSFreeMem(psCookie);
+
+	err_out:
+	return eError;
+}
+
+#if !defined(__KERNEL__)
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimServerAlloc(SYNC_BRIDGE_HANDLE hBridge,
+                                 PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+                                 const IMG_CHAR *pszClassName
+                                 PVR_DBG_FILELINE_PARAM)
+{
+	IMG_CHAR szClassName[SYNC_MAX_CLASS_NAME_LEN];
+	SYNC_PRIM *psNewSync;
+	PVRSRV_ERROR eError;
+	size_t uiSize;
+
+#if !defined(PVR_SYNC_PRIM_ALLOC_TRACE)
+	PVR_DBG_FILELINE_UNREF();
+#endif
+	psNewSync = OSAllocZMem(sizeof(SYNC_PRIM));
+	if (psNewSync == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	if (pszClassName)
+	{
+		uiSize = OSStringNLength(pszClassName, SYNC_MAX_CLASS_NAME_LEN);
+		/* Copy the class name annotation into a fixed-size array */
+		OSCachedMemCopy(szClassName, pszClassName, uiSize);
+		/* NUL-terminate the ClassName if it wasn't already */
+		if (uiSize == SYNC_MAX_CLASS_NAME_LEN)
+			szClassName[SYNC_MAX_CLASS_NAME_LEN-1] = '\0';
+		else
+			szClassName[uiSize++] = '\0';
+	}
+	else
+	{
+		/* No class name annotation */
+		uiSize = 0;
+		szClassName[0] = '\0';
+	}
+
+	eError = BridgeServerSyncAlloc(hBridge,
+	                               &psNewSync->u.sServer.hServerSync,
+	                               &psNewSync->u.sServer.ui32FirmwareAddr,
+	                               uiSize,
+	                               szClassName);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+#if defined(PVR_SYNC_PRIM_ALLOC_TRACE)
+	PVR_DPF((PVR_DBG_WARNING, "Allocated sync=server fw=0x%x [%p]" PVR_DBG_FILELINE_FMT,
+			psNewSync->u.sServer.ui32FirmwareAddr, &psNewSync->sCommon PVR_DBG_FILELINE_ARG));
+#endif
+
+	psNewSync->eType = SYNC_PRIM_TYPE_SERVER;
+	psNewSync->u.sServer.hBridge = hBridge;
+	*ppsSync = &psNewSync->sCommon;
+
+	return PVRSRV_OK;
+	e1:
+	OSFreeMem(psNewSync);
+	e0:
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimServerGetStatus(IMG_UINT32 ui32SyncCount,
+                                     PVRSRV_CLIENT_SYNC_PRIM **papsSync,
+                                     IMG_UINT32 *pui32UID,
+                                     IMG_UINT32 *pui32FWAddr,
+                                     IMG_UINT32 *pui32CurrentOp,
+                                     IMG_UINT32 *pui32NextOp)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 i;
+	SYNC_BRIDGE_HANDLE hBridge = NULL;
+	IMG_HANDLE *pahServerHandle;
+	IMG_BOOL bServerSync;
+
+	if (papsSync[0])
+	{
+		hBridge = _SyncPrimGetBridgeHandle(papsSync[0]);
+	}
+	if (!hBridge)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid Sync connection", __func__));
+		eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+		goto e0;
+	}
+
+	pahServerHandle = OSAllocMem(sizeof(IMG_HANDLE) * ui32SyncCount);
+	if (pahServerHandle == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	/*
+		Check that all the sync we've been passed are server syncs
+		and that they all are on the same connection.
+	 */
+	for (i=0;i<ui32SyncCount;i++)
+	{
+		SYNC_PRIM *psIntSync = IMG_CONTAINER_OF(papsSync[i], SYNC_PRIM, sCommon);
+
+		eError = SyncPrimIsServerSync(papsSync[i], &bServerSync);
+		if (PVRSRV_OK != eError) goto e1;
+		if (!bServerSync)
+		{
+			eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+			goto e1;
+		}
+
+		if (!papsSync[i] || hBridge != _SyncPrimGetBridgeHandle(papsSync[i]))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Sync connection is different", __func__));
+			eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+			goto e1;
+		}
+
+		pahServerHandle[i] = psIntSync->u.sServer.hServerSync;
+	}
+
+	eError = BridgeServerSyncGetStatus(hBridge,
+	                                   ui32SyncCount,
+	                                   pahServerHandle,
+	                                   pui32UID,
+	                                   pui32FWAddr,
+	                                   pui32CurrentOp,
+	                                   pui32NextOp);
+	OSFreeMem(pahServerHandle);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+	return PVRSRV_OK;
+
+	e1:
+	OSFreeMem(pahServerHandle);
+	e0:
+	return eError;
+}
+
+#endif /* defined(__KERNEL__) */
+#endif /* defined(SUPPORT_SERVER_SYNC_IMPL) */
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimIsServerSync(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_BOOL *pbServerSync)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	SYNC_PRIM *psSyncInt;
+
+	if (unlikely(!psSync))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+	if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+	{
+		*pbServerSync = IMG_FALSE;
+	}
+	else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+	{
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+		*pbServerSync = IMG_TRUE;
+#else
+	PVR_DPF((PVR_DBG_ERROR, "%s: Server sync not supported, attempted use of server sync", __func__));
+	return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+	}
+	else
+	{
+		/* Either the client has given us a bad pointer or there is an
+		 * error in this module
+		 */
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync type", __func__));
+		eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+		goto e0;
+	}
+
+	e0:
+	return eError;
+}
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+IMG_INTERNAL
+IMG_HANDLE SyncPrimGetServerHandle(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	SYNC_PRIM *psSyncInt;
+
+	if (unlikely(!psSync))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __func__));
+		goto e0;
+	}
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+	if (likely(psSyncInt->eType == SYNC_PRIM_TYPE_SERVER))
+	{
+		return psSyncInt->u.sServer.hServerSync;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid sync type (%d)",
+				__func__, psSyncInt->eType));
+		goto e0;
+	}
+	e0:
+	return (IMG_HANDLE) NULL;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimServerQueueOp(PVRSRV_CLIENT_SYNC_PRIM_OP *psSyncOp)
+{
+	SYNC_PRIM *psSyncInt;
+	IMG_BOOL bUpdate;
+	PVRSRV_ERROR eError;
+
+	if (!psSyncOp)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	psSyncInt = IMG_CONTAINER_OF(psSyncOp->psSync, SYNC_PRIM, sCommon);
+	if (psSyncInt->eType != SYNC_PRIM_TYPE_SERVER)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid sync type (%d)",
+				__func__, psSyncInt->eType));
+		eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+		goto e0;
+	}
+	if (0 == psSyncOp->ui32Flags)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: no sync flags", __func__));
+		eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+		goto e0;
+	}
+
+	if (psSyncOp->ui32Flags & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)
+	{
+		bUpdate = IMG_TRUE;
+	}else
+	{
+		bUpdate = IMG_FALSE;
+	}
+
+	eError = BridgeServerSyncQueueHWOp(psSyncInt->u.sServer.hBridge,
+	                                   psSyncInt->u.sServer.hServerSync,
+	                                   bUpdate,
+	                                   &psSyncOp->ui32FenceValue,
+	                                   &psSyncOp->ui32UpdateValue);
+	e0:
+	return eError;
+}
+#endif
+
+#if defined(PDUMP)
+IMG_INTERNAL void SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	SYNC_PRIM *psSyncInt;
+	SYNC_PRIM_BLOCK *psSyncBlock;
+	SYNC_PRIM_CONTEXT *psContext;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psSync != NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync type", __func__));
+		PVR_ASSERT(IMG_FALSE);
+		return;
+	}
+
+	psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+	psContext = psSyncBlock->psContext;
+
+	eError = BridgeSyncPrimPDump(GetBridgeHandle(psContext->hDevConnection),
+	                             psSyncBlock->hServerSyncPrimBlock,
+	                             SyncPrimGetOffset(psSyncInt));
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__func__, eError));
+	}
+	PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+	SYNC_PRIM *psSyncInt;
+	SYNC_PRIM_BLOCK *psSyncBlock;
+	SYNC_PRIM_CONTEXT *psContext;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psSync != NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync type", __func__));
+		PVR_ASSERT(IMG_FALSE);
+		return;
+	}
+
+	psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+	psContext = psSyncBlock->psContext;
+
+	eError = BridgeSyncPrimPDumpValue(GetBridgeHandle(psContext->hDevConnection),
+	                                  psSyncBlock->hServerSyncPrimBlock,
+	                                  SyncPrimGetOffset(psSyncInt),
+	                                  ui32Value);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__func__, eError));
+	}
+	PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+                                   IMG_UINT32 ui32Value,
+                                   IMG_UINT32 ui32Mask,
+                                   PDUMP_POLL_OPERATOR eOperator,
+                                   IMG_UINT32 ui32PDumpFlags)
+{
+	SYNC_PRIM *psSyncInt;
+	SYNC_PRIM_BLOCK *psSyncBlock;
+	SYNC_PRIM_CONTEXT *psContext;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psSync != NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Invalid sync type (expected SYNC_PRIM_TYPE_LOCAL)",
+		         __func__));
+		PVR_ASSERT(IMG_FALSE);
+		return;
+	}
+
+	psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+	psContext = psSyncBlock->psContext;
+
+	eError = BridgeSyncPrimPDumpPol(GetBridgeHandle(psContext->hDevConnection),
+	                                psSyncBlock->hServerSyncPrimBlock,
+	                                SyncPrimGetOffset(psSyncInt),
+	                                ui32Value,
+	                                ui32Mask,
+	                                eOperator,
+	                                ui32PDumpFlags);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__func__, eError));
+	}
+	PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+IMG_INTERNAL void SyncPrimOpPDumpPol(PSYNC_OP_COOKIE psCookie,
+                                     PDUMP_POLL_OPERATOR eOperator,
+                                     IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psCookie != NULL);
+
+	eError = BridgeSyncPrimOpPDumpPol(psCookie->hBridge,
+	                                  psCookie->hServerCookie,
+	                                  eOperator,
+	                                  ui32PDumpFlags);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__func__, eError));
+	}
+
+	PVR_ASSERT(eError == PVRSRV_OK);
+}
+#endif
+
+IMG_INTERNAL void SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+                                   IMG_UINT64 uiWriteOffset,
+                                   IMG_UINT64 uiPacketSize,
+                                   IMG_UINT64 uiBufferSize)
+{
+	SYNC_PRIM *psSyncInt;
+	SYNC_PRIM_BLOCK *psSyncBlock;
+	SYNC_PRIM_CONTEXT *psContext;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psSync != NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync type", __func__));
+		PVR_ASSERT(IMG_FALSE);
+		return;
+	}
+
+	psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+	psContext = psSyncBlock->psContext;
+
+	/* FIXME: uiWriteOffset, uiPacketSize, uiBufferSize were changed to
+	 * 64-bit quantities to resolve Windows compiler warnings.
+	 * However the bridge is only 32-bit hence compiler warnings
+	 * of implicit cast and loss of data.
+	 * Added explicit cast and assert to remove warning.
+	 */
+#if defined(LINUX) && defined(__i386__)
+	PVR_ASSERT(uiWriteOffset<IMG_UINT32_MAX);
+	PVR_ASSERT(uiPacketSize<IMG_UINT32_MAX);
+	PVR_ASSERT(uiBufferSize<IMG_UINT32_MAX);
+#endif
+	eError = BridgeSyncPrimPDumpCBP(GetBridgeHandle(psContext->hDevConnection),
+	                                psSyncBlock->hServerSyncPrimBlock,
+	                                SyncPrimGetOffset(psSyncInt),
+	                                (IMG_UINT32)uiWriteOffset,
+	                                (IMG_UINT32)uiPacketSize,
+	                                (IMG_UINT32)uiBufferSize);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__func__, eError));
+	}
+	PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+#endif
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync.h
new file mode 100644
index 0000000..0a321dd
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync.h
@@ -0,0 +1,386 @@
+/*************************************************************************/ /*!
+@File
+@Title          Synchronisation interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the client side interface for synchronisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_
+#define _SYNC_
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include <powervr/sync_external.h>
+#include "pdumpdefs.h"
+#include "dllist.h"
+#include "pvr_debug.h"
+
+#include "device_connection.h"
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+/*************************************************************************/ /*!
+@Function       SyncPrimContextCreate
+
+@Description    Create a new synchronisation context
+
+@Input          hBridge                 Bridge handle
+
+@Input          hDeviceNode             Device node handle
+
+@Output         hSyncPrimContext        Handle to the created synchronisation
+                                        primitive context
+
+@Return         PVRSRV_OK if the synchronisation primitive context was
+                successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection,
+					  PSYNC_PRIM_CONTEXT	*hSyncPrimContext);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimContextDestroy
+
+@Description    Destroy a synchronisation context
+
+@Input          hSyncPrimContext        Handle to the synchronisation
+                                        primitive context to destroy
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimAlloc
+
+@Description    Allocate a new synchronisation primitive on the specified
+                synchronisation context
+
+@Input          hSyncPrimContext        Handle to the synchronisation
+                                        primitive context
+
+@Output         ppsSync                 Created synchronisation primitive
+
+@Input          pszClassName            Sync source annotation
+
+@Return         PVRSRV_OK if the synchronisation primitive was
+                successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimAlloc(PSYNC_PRIM_CONTEXT		hSyncPrimContext,
+			  PVRSRV_CLIENT_SYNC_PRIM	**ppsSync,
+			  const IMG_CHAR 			*pszClassName);
+
+#if defined(__KERNEL__)
+/*************************************************************************/ /*!
+@Function       SyncPrimAllocForServerSync
+
+@Description    Allocate a new synchronisation primitive on the specified
+                synchronisation context for a server sync
+
+@Input          hSyncPrimContext        Handle to the synchronisation
+                                        primitive context
+
+@Output         ppsSync                 Created synchronisation primitive
+
+@Input          pszClassName            Sync source annotation
+
+@Return         PVRSRV_OK if the synchronisation primitive was
+                successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimAllocForServerSync(PSYNC_PRIM_CONTEXT   hSyncPrimContext,
+						PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+						const IMG_CHAR          *pszClassName);
+#endif
+
+/*************************************************************************/ /*!
+@Function       SyncPrimFree
+
+@Description    Free a synchronisation primitive
+
+@Input          psSync                  The synchronisation primitive to free
+
+@Return         PVRSRV_OK if the synchronisation primitive was
+                successfully freed
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimSet
+
+@Description    Set the synchronisation primitive to a value
+
+@Input          psSync                  The synchronisation primitive to set
+
+@Input          ui32Value               Value to set it to
+
+@Return         PVRSRV_OK on success
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value);
+
+#if defined(NO_HARDWARE)
+
+/*************************************************************************/ /*!
+@Function       SyncPrimNoHwUpdate
+
+@Description    Updates the synchronisation primitive value (in NoHardware drivers)
+
+@Input          psSync                  The synchronisation primitive to update
+
+@Input          ui32Value               Value to update it to
+
+@Return         PVRSRV_OK on success
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value);
+#endif
+
+PVRSRV_ERROR
+SyncPrimServerAlloc(SYNC_BRIDGE_HANDLE hBridge,
+					PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+					const IMG_CHAR		*pszClassName
+					PVR_DBG_FILELINE_PARAM);
+
+PVRSRV_ERROR
+SyncPrimServerGetStatus(IMG_UINT32 ui32SyncCount,
+						PVRSRV_CLIENT_SYNC_PRIM **papsSync,
+						IMG_UINT32 *pui32UID,
+						IMG_UINT32 *pui32FWAddr,
+						IMG_UINT32 *pui32CurrentOp,
+						IMG_UINT32 *pui32NextOp);
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+PVRSRV_ERROR
+SyncPrimServerQueueOp(PVRSRV_CLIENT_SYNC_PRIM_OP *psSyncOp);
+#endif
+
+PVRSRV_ERROR
+SyncPrimIsServerSync(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_BOOL *pbServerSync);
+
+IMG_HANDLE
+SyncPrimGetServerHandle(PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+PVRSRV_ERROR
+SyncPrimOpCreate(IMG_UINT32 ui32SyncCount,
+				 PVRSRV_CLIENT_SYNC_PRIM **papsSyncPrim,
+				 PSYNC_OP_COOKIE *ppsCookie);
+
+PVRSRV_ERROR
+SyncPrimOpTake(PSYNC_OP_COOKIE psCookie,
+			   IMG_UINT32 ui32SyncCount,
+			   PVRSRV_CLIENT_SYNC_PRIM_OP *pasSyncOp);
+
+PVRSRV_ERROR
+SyncPrimOpReady(PSYNC_OP_COOKIE psCookie,
+				IMG_BOOL *pbReady);
+
+PVRSRV_ERROR
+SyncPrimOpComplete(PSYNC_OP_COOKIE psCookie);
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpDestroy(PSYNC_OP_COOKIE psCookie);
+
+#endif
+
+#if defined(PDUMP)
+/*************************************************************************/ /*!
+@Function       SyncPrimPDump
+
+@Description    PDump the current value of the synchronisation primitive
+
+@Input          psSync                  The synchronisation primitive to PDump
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimPDumpValue
+
+@Description    PDump the ui32Value as the value of the synchronisation
+				primitive (regardless of the current value).
+
+@Input          psSync          The synchronisation primitive to PDump
+@Input			ui32Value		Value to give to the sync prim on the pdump
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimPDumpPol
+
+@Description    Do a PDump poll of the synchronisation primitive
+
+@Input          psSync                  The synchronisation primitive to PDump
+
+@Input          ui32Value               Value to poll for
+
+@Input          ui32Mask                PDump mask operator
+
+@Input          ui32PDumpFlags          PDump flags
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+				 IMG_UINT32 ui32Value,
+				 IMG_UINT32 ui32Mask,
+				 PDUMP_POLL_OPERATOR eOperator,
+				 IMG_UINT32 ui32PDumpFlags);
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+/*************************************************************************/ /*!
+@Function       SyncPrimOpPDumpPol
+
+@Description    Do a PDump poll all the synchronisation primitives on this
+				Operation cookie.
+
+@Input          psCookie                Operation cookie
+
+@Input          ui32PDumpFlags          PDump flags
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncPrimOpPDumpPol(PSYNC_OP_COOKIE psCookie,
+				 PDUMP_POLL_OPERATOR eOperator,
+				 IMG_UINT32 ui32PDumpFlags);
+#endif
+
+/*************************************************************************/ /*!
+@Function       SyncPrimPDumpCBP
+
+@Description    Do a PDump CB poll using the synchronisation primitive
+
+@Input          psSync                  The synchronisation primitive to PDump
+
+@Input          uiWriteOffset           Current write offset of buffer
+
+@Input          uiPacketSize            Size of the packet to write into CB
+
+@Input          uiBufferSize            Size of the CB
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+				 IMG_UINT64 uiWriteOffset,
+				 IMG_UINT64 uiPacketSize,
+				 IMG_UINT64 uiBufferSize);
+
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDumpValue)
+#endif
+static INLINE void
+SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+	PVR_UNREFERENCED_PARAMETER(psSync);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDump)
+#endif
+static INLINE void
+SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	PVR_UNREFERENCED_PARAMETER(psSync);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDumpPol)
+#endif
+static INLINE void
+SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+				 IMG_UINT32 ui32Value,
+				 IMG_UINT32 ui32Mask,
+				 PDUMP_POLL_OPERATOR eOperator,
+				 IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psSync);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDumpCBP)
+#endif
+static INLINE void
+SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+				 IMG_UINT64 uiWriteOffset,
+				 IMG_UINT64 uiPacketSize,
+				 IMG_UINT64 uiBufferSize)
+{
+	PVR_UNREFERENCED_PARAMETER(psSync);
+	PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+	PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+	PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+}
+#endif	/* PDUMP */
+#endif	/* _PVRSRV_SYNC_ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_checkpoint.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_checkpoint.c
new file mode 100644
index 0000000..8390b9f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_checkpoint.c
@@ -0,0 +1,2683 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services synchronisation checkpoint interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements server side code for services synchronisation
+	            interface
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "osfunc.h"
+#include "dllist.h"
+#include "sync.h"
+#include "sync_checkpoint_external.h"
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+#include "sync_checkpoint_internal_fw.h"
+#include "sync_checkpoint_init.h"
+#include "lock.h"
+#include "log2.h"
+#include "pvrsrv.h"
+#include "pdump_km.h"
+#include "info_page.h"
+
+#include "pvrsrv_sync_km.h"
+#include "rgxhwperf.h"
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+/* Enable this to turn on debug relating to the creation and
+   resolution of contexts */
+#define ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG 0
+
+/* Enable this to turn on debug relating to the creation and
+   resolution of fences */
+#define ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG 0
+
+/* Enable this to turn on debug relating to the sync checkpoint
+   allocation and freeing */
+#define ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG 0
+
+/* Enable this to turn on debug relating to the sync checkpoint
+   enqueuing and signalling */
+#define ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG 0
+
+/* Enable this to turn on debug relating to the sync checkpoint pool */
+#define ENABLE_SYNC_CHECKPOINT_POOL_DEBUG 0
+
+/* Enable this to turn on debug relating to sync checkpoint UFO
+   lookup */
+#define ENABLE_SYNC_CHECKPOINT_UFO_DEBUG 0
+
+/* Enable this to turn on sync checkpoint deferred cleanup debug
+ * (for syncs we have been told to free but which have some
+ * outstanding FW operations remaining (enqueued in CCBs)
+ */
+#define ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG 0
+
+#else
+
+#define ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_POOL_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_UFO_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG 0
+
+#endif
+
+/* Set the size of the sync checkpoint pool (not used if 0).
+ * A pool will be maintained for each sync checkpoint context.
+ */
+#define SYNC_CHECKPOINT_POOL_SIZE	128
+/* The 'sediment' value represents the minimum number of
+ * sync checkpoints which must be in the pool before one
+ * will be allocated from the pool rather than from memory.
+ * This effectively helps avoid re-use of a sync checkpoint
+ * just after it has been returned to the pool, making
+ * debugging somewhat easier to understand.
+ */
+#define SYNC_CHECKPOINT_POOL_SEDIMENT 20
+
+#define SYNC_CHECKPOINT_BLOCK_LIST_CHUNK_SIZE  10
+
+/*
+	This defines the maximum amount of synchronisation memory
+	that can be allocated per sync checkpoint context.
+	In reality this number is meaningless as we would run out
+	of synchronisation memory before we reach this limit, but
+	we need to provide a size to the span RA.
+ */
+#define MAX_SYNC_CHECKPOINT_MEM  (4 * 1024 * 1024)
+
+
+typedef struct _SYNC_CHECKPOINT_BLOCK_LIST_
+{
+	IMG_UINT32            ui32BlockCount;            /*!< Number of contexts in the list */
+	IMG_UINT32            ui32BlockListSize;         /*!< Size of the array contexts */
+	SYNC_CHECKPOINT_BLOCK **papsSyncCheckpointBlock; /*!< Array of sync checkpoint blocks */
+} SYNC_CHECKPOINT_BLOCK_LIST;
+
+typedef struct _SYNC_CHECKPOINT_CONTEXT_CTL_
+{
+	SHARED_DEV_CONNECTION					psDeviceNode;
+	PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN	pfnFenceResolve;
+	PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN		pfnFenceCreate;
+	/*
+	 *  Used as head of linked-list of sync checkpoints for which
+	 *  SyncCheckpointFree() has been called, but have outstanding
+	 *  FW operations (enqueued in CCBs)
+	 *  This list will be check whenever a SyncCheckpointFree() is
+	 *  called, and when SyncCheckpointContextDestroy() is called.
+	 */
+	DLLIST_NODE								sDeferredCleanupListHead;
+	/* Lock to protect the deferred cleanup list */
+	POS_LOCK								hDeferredCleanupListLock;
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+	_SYNC_CHECKPOINT						*psSyncCheckpointPool[SYNC_CHECKPOINT_POOL_SIZE];
+	IMG_BOOL								bSyncCheckpointPoolFull;
+	IMG_BOOL								bSyncCheckpointPoolValid;
+	IMG_UINT32								ui32SyncCheckpointPoolCount;
+	IMG_UINT32								ui32SyncCheckpointPoolWp;
+	IMG_UINT32								ui32SyncCheckpointPoolRp;
+	POS_LOCK								hSyncCheckpointPoolLock;
+#endif
+} _SYNC_CHECKPOINT_CONTEXT_CTL;
+
+/* this is the max number of sync checkpoint records we will search or dump
+ * at any time.
+ */
+#define SYNC_CHECKPOINT_RECORD_LIMIT 20000
+
+#define DECREMENT_WITH_WRAP(value, sz) ((value) ? ((value) - 1) : ((sz) - 1))
+
+struct SYNC_CHECKPOINT_RECORD
+{
+	PVRSRV_DEVICE_NODE		*psDevNode;
+	SYNC_CHECKPOINT_BLOCK	*psSyncCheckpointBlock;	/*!< handle to SYNC_CHECKPOINT_BLOCK */
+	IMG_UINT32				ui32SyncOffset; 		/*!< offset to sync in block */
+	IMG_UINT32				ui32FwBlockAddr;
+	IMG_PID					uiPID;
+	IMG_UINT32				ui32UID;
+	IMG_UINT64				ui64OSTime;
+	DLLIST_NODE				sNode;
+	IMG_CHAR				szClassName[PVRSRV_SYNC_NAME_LENGTH];
+	PSYNC_CHECKPOINT		pSyncCheckpt;
+};
+
+static IMG_BOOL gbSyncCheckpointInit = IMG_FALSE;
+static PFN_SYNC_CHECKPOINT_STRUCT *g_psSyncCheckpointPfnStruct;
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+static _SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext);
+static IMG_BOOL _PutCheckpointInPool(_SYNC_CHECKPOINT *psSyncCheckpoint);
+static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext);
+#endif
+
+#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1)
+static IMG_UINT32 gui32NumSyncCheckpointContexts = 0;
+#endif
+
+/* Defined values to indicate status of sync checkpoint, which is
+ * stored in the memory of the structure */
+#define SYNC_CHECKPOINT_PATTERN_IN_USE 0x1a1aa
+#define SYNC_CHECKPOINT_PATTERN_IN_POOL 0x2b2bb
+#define SYNC_CHECKPOINT_PATTERN_FREED 0x3c3cc
+
+#if defined(SUPPORT_RGX)
+static inline void RGXSRVHWPerfSyncCheckpointUFOIsSignalled(PVRSRV_RGXDEV_INFO *psDevInfo,
+                               _SYNC_CHECKPOINT *psSyncCheckpointInt, IMG_UINT32 ui32FenceSyncFlags)
+{
+	if (RGXHWPerfHostIsEventEnabled(psDevInfo, RGX_HWPERF_HOST_UFO)
+	    && !(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+	{
+		RGX_HWPERF_UFO_EV eEv;
+		RGX_HWPERF_UFO_DATA_ELEMENT sSyncData;
+
+		if (psSyncCheckpointInt)
+		{
+			if ((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ||
+				(psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED))
+			{
+				sSyncData.sCheckSuccess.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt);
+				sSyncData.sCheckSuccess.ui32Value = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+				eEv = RGX_HWPERF_UFO_EV_CHECK_SUCCESS;
+			}
+			else
+			{
+				sSyncData.sCheckFail.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt);
+				sSyncData.sCheckFail.ui32Value = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+				sSyncData.sCheckFail.ui32Required = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+				eEv = RGX_HWPERF_UFO_EV_CHECK_FAIL;
+			}
+			RGXHWPerfHostPostUfoEvent(psDevInfo, eEv, &sSyncData,
+			    (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE);
+		}
+	}
+}
+
+static inline void RGXSRVHWPerfSyncCheckpointUFOUpdate(PVRSRV_RGXDEV_INFO *psDevInfo,
+                               _SYNC_CHECKPOINT *psSyncCheckpointInt, IMG_UINT32 ui32FenceSyncFlags)
+{
+	if (RGXHWPerfHostIsEventEnabled(psDevInfo, RGX_HWPERF_HOST_UFO)
+	    && !(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+	{
+		RGX_HWPERF_UFO_DATA_ELEMENT sSyncData;
+
+		if (psSyncCheckpointInt)
+		{
+			sSyncData.sUpdate.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt);
+			sSyncData.sUpdate.ui32OldValue = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+			sSyncData.sUpdate.ui32NewValue = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+			RGXHWPerfHostPostUfoEvent(psDevInfo, RGX_HWPERF_UFO_EV_UPDATE, &sSyncData,
+			    (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE);
+		}
+	}
+}
+#endif
+
+static PVRSRV_ERROR
+_SyncCheckpointRecordAdd(PSYNC_CHECKPOINT_RECORD_HANDLE *phRecord,
+	                    SYNC_CHECKPOINT_BLOCK *hSyncCheckpointBlock,
+	                    IMG_UINT32 ui32FwBlockAddr,
+	                    IMG_UINT32 ui32SyncOffset,
+	                    IMG_UINT32 ui32UID,
+	                    IMG_UINT32 ui32ClassNameSize,
+	                    const IMG_CHAR *pszClassName, PSYNC_CHECKPOINT pSyncCheckpt);
+static PVRSRV_ERROR
+_SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord);
+static void _SyncCheckpointState(PDLLIST_NODE psNode,
+                                 DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                 void *pvDumpDebugFile);
+static void _SyncCheckpointDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+                                        IMG_UINT32 ui32VerbLevel,
+                                        DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                        void *pvDumpDebugFile);
+static PVRSRV_ERROR _SyncCheckpointRecordListInit(PVRSRV_DEVICE_NODE *psDevNode);
+static void _SyncCheckpointRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode);
+
+#if defined(PDUMP)
+static PVRSRV_ERROR _SyncCheckpointSignalPDump(_SYNC_CHECKPOINT *psSyncCheckpoint);
+static PVRSRV_ERROR _SyncCheckpointErrorPDump(_SYNC_CHECKPOINT *psSyncCheckpoint);
+#endif
+
+/* Unique incremental ID assigned to sync checkpoints when allocated */
+static IMG_UINT32 g_SyncCheckpointUID;
+
+static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext);
+
+void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext)
+{
+	_SYNC_CHECKPOINT_CONTEXT *psContextInt = (_SYNC_CHECKPOINT_CONTEXT *)psContext;
+	IMG_UINT32 ui32RefCt = OSAtomicRead(&psContextInt->hRefCount);
+
+	if (ui32RefCt == 0)
+	{
+		PVR_LOG_ERROR(PVRSRV_ERROR_INVALID_CONTEXT,
+		              "SyncCheckpointContextUnref context already freed");
+	}
+	else if (0 == OSAtomicDecrement(&psContextInt->hRefCount))
+	{
+		/* SyncCheckpointContextDestroy only when no longer referenced */
+		OSLockDestroy(psContextInt->psContextCtl->hDeferredCleanupListLock);
+		psContextInt->psContextCtl->hDeferredCleanupListLock = NULL;
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+		if (psContextInt->psContextCtl->ui32SyncCheckpointPoolCount)
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+					"%s called for context<%p> with %d sync checkpoints still in the pool",
+					__func__,
+					(void*)psContext,
+					psContextInt->psContextCtl->ui32SyncCheckpointPoolCount));
+		}
+		psContextInt->psContextCtl->bSyncCheckpointPoolValid = IMG_FALSE;
+		OSLockDestroy(psContextInt->psContextCtl->hSyncCheckpointPoolLock);
+		psContextInt->psContextCtl->hSyncCheckpointPoolLock = NULL;
+#endif
+		OSFreeMem(psContextInt->psContextCtl);
+		RA_Delete(psContextInt->psSpanRA);
+		RA_Delete(psContextInt->psSubAllocRA);
+		OSLockDestroy(psContextInt->hLock);
+		psContextInt->hLock = NULL;
+		OSFreeMem(psContext);
+	}
+}
+
+void SyncCheckpointContextRef(PSYNC_CHECKPOINT_CONTEXT psContext)
+{
+	_SYNC_CHECKPOINT_CONTEXT *psContextInt = (_SYNC_CHECKPOINT_CONTEXT *)psContext;
+	IMG_UINT32 ui32RefCt = OSAtomicRead(&psContextInt->hRefCount);
+
+	if (ui32RefCt == 0)
+	{
+		PVR_LOG_ERROR(PVRSRV_ERROR_INVALID_CONTEXT,
+		              "SyncCheckpointContextRef context use after free");
+	}
+	else
+	{
+		OSAtomicIncrement(&psContextInt->hRefCount);
+	}
+}
+
+/*
+	Internal interfaces for management of synchronisation block memory
+ */
+static PVRSRV_ERROR
+_AllocSyncCheckpointBlock(_SYNC_CHECKPOINT_CONTEXT *psContext,
+                          SYNC_CHECKPOINT_BLOCK    **ppsSyncBlock)
+{
+	PVRSRV_DEVICE_NODE *psDevNode;
+	SYNC_CHECKPOINT_BLOCK *psSyncBlk;
+	PVRSRV_ERROR eError;
+
+	psSyncBlk = OSAllocMem(sizeof(*psSyncBlk));
+	PVR_LOGG_IF_NOMEM(psSyncBlk, "OSAllocMem", eError, fail_alloc);
+
+	psSyncBlk->psContext = psContext;
+
+	/* Allocate sync checkpoint block */
+	psDevNode = psContext->psDevNode;
+	if (!psDevNode)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		PVR_LOG_ERROR(eError, "context device node invalid");
+		goto fail_alloc_ufo_block;
+	}
+	psSyncBlk->psDevNode = psDevNode;
+
+	eError = psDevNode->pfnAllocUFOBlock(psDevNode,
+	                                     &psSyncBlk->hMemDesc,
+	                                     &psSyncBlk->ui32FirmwareAddr,
+	                                     &psSyncBlk->ui32SyncBlockSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOG_ERROR(eError, "failed to allocate ufo block");
+		goto fail_alloc_ufo_block;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psSyncBlk->hMemDesc,
+	                                  (void **) &psSyncBlk->pui32LinAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOG_ERROR(eError, "DevmemAcquireCpuVirtAddr");
+		goto fail_devmem_acquire;
+	}
+
+	OSAtomicWrite(&psSyncBlk->hRefCount, 1);
+
+	OSLockCreate(&psSyncBlk->hLock);
+
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+	                      "Allocated Sync Checkpoint UFO block (FirmwareVAddr = 0x%08x)",
+	                      psSyncBlk->ui32FirmwareAddr);
+
+	*ppsSyncBlock = psSyncBlk;
+	return PVRSRV_OK;
+
+	fail_devmem_acquire:
+	psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->hMemDesc);
+	fail_alloc_ufo_block:
+	OSFreeMem(psSyncBlk);
+	fail_alloc:
+	return eError;
+}
+
+static void
+_FreeSyncCheckpointBlock(SYNC_CHECKPOINT_BLOCK *psSyncBlk)
+{
+	OSLockAcquire(psSyncBlk->hLock);
+	if (0 == OSAtomicDecrement(&psSyncBlk->hRefCount))
+	{
+		PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode;
+
+		DevmemReleaseCpuVirtAddr(psSyncBlk->hMemDesc);
+		psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->hMemDesc);
+		OSLockRelease(psSyncBlk->hLock);
+		OSLockDestroy(psSyncBlk->hLock);
+		psSyncBlk->hLock = NULL;
+		OSFreeMem(psSyncBlk);
+	}
+	else
+	{
+		OSLockRelease(psSyncBlk->hLock);
+	}
+}
+
+static PVRSRV_ERROR
+_SyncCheckpointBlockImport(RA_PERARENA_HANDLE hArena,
+                           RA_LENGTH_T uSize,
+                           RA_FLAGS_T uFlags,
+                           const IMG_CHAR *pszAnnotation,
+                           RA_BASE_T *puiBase,
+                           RA_LENGTH_T *puiActualSize,
+                           RA_PERISPAN_HANDLE *phImport)
+{
+	_SYNC_CHECKPOINT_CONTEXT *psContext = hArena;
+	SYNC_CHECKPOINT_BLOCK *psSyncBlock = NULL;
+	RA_LENGTH_T uiSpanSize;
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(uFlags);
+
+	PVR_LOG_IF_FALSE((hArena != NULL), "hArena is NULL");
+
+	/* Check we've not be called with an unexpected size */
+	PVR_LOG_IF_FALSE((uSize == sizeof(SYNC_CHECKPOINT_FW_OBJ)),
+	                 "uiSize is not the size of SYNC_CHECKPOINT_FW_OBJ");
+
+	/*
+		Ensure the sync checkpoint context doesn't go away while we have sync blocks
+		attached to it
+	 */
+	SyncCheckpointContextRef((PSYNC_CHECKPOINT_CONTEXT)psContext);
+
+	/* Allocate the block of memory */
+	eError = _AllocSyncCheckpointBlock(psContext, &psSyncBlock);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_syncblockalloc;
+	}
+
+	/* Allocate a span for it */
+	eError = RA_Alloc(psContext->psSpanRA,
+	                  psSyncBlock->ui32SyncBlockSize,
+	                  RA_NO_IMPORT_MULTIPLIER,
+	                  0,
+	                  psSyncBlock->ui32SyncBlockSize,
+	                  pszAnnotation,
+	                  &psSyncBlock->uiSpanBase,
+	                  &uiSpanSize,
+	                  NULL);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_spanalloc;
+	}
+
+	/*
+		There is no reason the span RA should return an allocation larger
+		then we request
+	 */
+	PVR_LOG_IF_FALSE((uiSpanSize == psSyncBlock->ui32SyncBlockSize),
+	                 "uiSpanSize invalid");
+
+	*puiBase = psSyncBlock->uiSpanBase;
+	*puiActualSize = psSyncBlock->ui32SyncBlockSize;
+	*phImport = psSyncBlock;
+	return PVRSRV_OK;
+
+	fail_spanalloc:
+	_FreeSyncCheckpointBlock(psSyncBlock);
+	fail_syncblockalloc:
+	SyncCheckpointContextUnref((PSYNC_CHECKPOINT_CONTEXT)psContext);
+
+	return eError;
+}
+
+static void
+_SyncCheckpointBlockUnimport(RA_PERARENA_HANDLE hArena,
+                             RA_BASE_T uiBase,
+                             RA_PERISPAN_HANDLE hImport)
+{
+	_SYNC_CHECKPOINT_CONTEXT *psContext = hArena;
+	SYNC_CHECKPOINT_BLOCK   *psSyncBlock = hImport;
+
+	PVR_LOG_IF_FALSE((psContext != NULL), "hArena invalid");
+	PVR_LOG_IF_FALSE((psSyncBlock != NULL), "hImport invalid");
+	PVR_LOG_IF_FALSE((uiBase == psSyncBlock->uiSpanBase), "uiBase invalid");
+
+	/* Free the span this import is using */
+	RA_Free(psContext->psSpanRA, uiBase);
+
+	/* Free the sync checkpoint block */
+	_FreeSyncCheckpointBlock(psSyncBlock);
+
+	/*	Drop our reference to the sync checkpoint context */
+	SyncCheckpointContextUnref((PSYNC_CHECKPOINT_CONTEXT)psContext);
+}
+
+static INLINE IMG_UINT32 _SyncCheckpointGetOffset(_SYNC_CHECKPOINT *psSyncInt)
+{
+	IMG_UINT64 ui64Temp;
+
+	ui64Temp = psSyncInt->uiSpanAddr - psSyncInt->psSyncCheckpointBlock->uiSpanBase;
+	PVR_ASSERT(ui64Temp<IMG_UINT32_MAX);
+	return (IMG_UINT32)ui64Temp;
+}
+
+/* Used by SyncCheckpointContextCreate() below */
+static INLINE IMG_UINT32 _Log2(IMG_UINT32 ui32Align)
+{
+	PVR_ASSERT(IsPower2(ui32Align));
+	return ExactLog2(ui32Align);
+}
+
+/*
+	External interfaces
+ */
+
+PVRSRV_ERROR
+SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_STRUCT *psSyncCheckpointPfns)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	g_psSyncCheckpointPfnStruct = psSyncCheckpointPfns;
+
+	return eError;
+}
+
+PVRSRV_ERROR
+SyncCheckpointResolveFence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                           PVRSRV_FENCE hFence, IMG_UINT32 *pui32NumSyncCheckpoints,
+                           PSYNC_CHECKPOINT **papsSyncCheckpoints,
+                           IMG_UINT64 *pui64FenceUID)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceResolve)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)",
+				__func__));
+		eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+		PVR_LOG_ERROR(eError, "g_pfnFenceResolve is NULL");
+		return eError;
+	}
+
+	if (papsSyncCheckpoints)
+	{
+		eError = g_psSyncCheckpointPfnStruct->pfnFenceResolve(
+		                           psSyncCheckpointContext,
+		                           hFence,
+		                           pui32NumSyncCheckpoints,
+		                           papsSyncCheckpoints,
+		                           pui64FenceUID);
+	}
+	else
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	PVR_LOGR_IF_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceResolve");
+
+	if (*pui32NumSyncCheckpoints > MAX_SYNC_CHECKPOINTS_PER_FENCE)
+	{
+		IMG_UINT32 i;
+		PVR_DPF((PVR_DBG_ERROR, "%s: g_psSyncCheckpointPfnStruct->pfnFenceResolve() returned too many checkpoints (%u > MAX_SYNC_CHECKPOINTS_PER_FENCE=%u)",
+				__func__, *pui32NumSyncCheckpoints, MAX_SYNC_CHECKPOINTS_PER_FENCE));
+
+		/* Free resources after error */
+		if (*papsSyncCheckpoints)
+		{
+			for (i = 0; i < *pui32NumSyncCheckpoints; i++)
+			{
+				SyncCheckpointDropRef((*papsSyncCheckpoints)[i]);
+			}
+
+			SyncCheckpointFreeCheckpointListMem(*papsSyncCheckpoints);
+		}
+
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1)
+	{
+		IMG_UINT32 ii;
+
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s: g_psSyncCheckpointPfnStruct->pfnFenceResolve() for fence %d returned the following %d checkpoints:",
+				__func__,
+				hFence,
+				*pui32NumSyncCheckpoints));
+
+		for (ii=0; ii<*pui32NumSyncCheckpoints; ii++)
+		{
+			PSYNC_CHECKPOINT psNextCheckpoint = *(*papsSyncCheckpoints +  ii);
+			PVR_DPF((PVR_DBG_WARNING,
+					"%s:   *papsSyncCheckpoints[%d]:<%p>",
+					__func__,
+					ii,
+					(void*)psNextCheckpoint));
+		}
+	}
+#endif
+
+	return eError;
+}
+
+PVRSRV_ERROR
+SyncCheckpointCreateFence(PVRSRV_DEVICE_NODE *psDevNode,
+                          const IMG_CHAR *pszFenceName,
+                          PVRSRV_TIMELINE hTimeline,
+                          PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                          PVRSRV_FENCE *phNewFence,
+                          IMG_UINT64 *puiUpdateFenceUID,
+                          void **ppvFenceFinaliseData,
+                          PSYNC_CHECKPOINT *psNewSyncCheckpoint,
+                          void **ppvTimelineUpdateSyncPrim,
+                          IMG_UINT32 *pui32TimelineUpdateValue)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+	if (unlikely(!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceCreate))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)",
+				__func__));
+		eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+		PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceCreate is NULL");
+	}
+	else
+	{
+		eError = g_psSyncCheckpointPfnStruct->pfnFenceCreate(
+		                          pszFenceName,
+		                          hTimeline,
+		                          psSyncCheckpointContext,
+		                          phNewFence,
+		                          puiUpdateFenceUID,
+		                          ppvFenceFinaliseData,
+		                          psNewSyncCheckpoint,
+		                          ppvTimelineUpdateSyncPrim,
+		                          pui32TimelineUpdateValue);
+		if (unlikely(eError != PVRSRV_OK))
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s failed to create new fence<%p> for timeline<%d> using "
+					"sync checkpoint context<%p>, psNewSyncCheckpoint=<%p>, eError=%s",
+					__func__,
+					(void*)phNewFence,
+					hTimeline,
+					(void*)psSyncCheckpointContext,
+					(void*)psNewSyncCheckpoint,
+					PVRSRVGetErrorString(eError)));
+		}
+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1)
+		else
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+					"%s created new fence<%d> for timeline<%d> using "
+					"sync checkpoint context<%p>, new sync_checkpoint=<%p>",
+					__func__,
+					*phNewFence,
+					hTimeline,
+					(void*)psSyncCheckpointContext,
+					(void*)*psNewSyncCheckpoint));
+		}
+#endif
+	}
+	return eError;
+}
+
+PVRSRV_ERROR
+SyncCheckpointRollbackFenceData(PVRSRV_FENCE hFence, void *pvFinaliseData)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceDataRollback)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)",
+				__func__));
+		eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+		PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceDataRollback is NULL");
+	}
+	else
+	{
+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1)
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s: called to rollback fence data <%p>",
+				__func__,
+				pvFinaliseData));
+#endif
+		eError = g_psSyncCheckpointPfnStruct->pfnFenceDataRollback(
+		            hFence, pvFinaliseData);
+		PVR_LOG_IF_ERROR(eError,
+		                 "g_psSyncCheckpointPfnStruct->pfnFenceDataRollback returned error");
+	}
+	return eError;
+}
+
+PVRSRV_ERROR
+SyncCheckpointFinaliseFence(PPVRSRV_DEVICE_NODE psDevNode,
+                            PVRSRV_FENCE hFence,
+                            void *pvFinaliseData,
+                            PSYNC_CHECKPOINT psSyncCheckpoint,
+                            const IMG_CHAR *pszName)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceFinalise)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s: Warning (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED) (this is permitted)",
+				__func__));
+		eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+	}
+	else
+	{
+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1)
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s: called to finalise fence <%d>",
+				__func__,
+				hFence));
+#endif
+		eError = g_psSyncCheckpointPfnStruct->pfnFenceFinalise(hFence, pvFinaliseData);
+		PVR_LOG_IF_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceFinalise returned error");
+
+		RGXSRV_HWPERF_ALLOC_FENCE(psDevNode, OSGetCurrentClientProcessIDKM(), hFence,
+		                          SyncCheckpointGetFirmwareAddr(psSyncCheckpoint),
+		                          pszName, OSStringLength(pszName));
+	}
+	return eError;
+}
+
+void
+SyncCheckpointFreeCheckpointListMem(void *pvCheckpointListMem)
+{
+	if (g_psSyncCheckpointPfnStruct->pfnFreeCheckpointListMem)
+	{
+		g_psSyncCheckpointPfnStruct->pfnFreeCheckpointListMem(pvCheckpointListMem);
+	}
+}
+
+PVRSRV_ERROR
+SyncCheckpointNoHWUpdateTimelines(void *pvPrivateData)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnNoHWUpdateTimelines)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)",
+				__func__));
+		eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+		PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnNoHWUpdateTimelines is NULL");
+	}
+	else
+	{
+		g_psSyncCheckpointPfnStruct->pfnNoHWUpdateTimelines(pvPrivateData);
+	}
+	return eError;
+
+}
+
+PVRSRV_ERROR
+SyncCheckpointDumpInfoOnStalledUFOs(IMG_UINT32 ui32NumUFOs, IMG_UINT32 *pui32Vaddrs, IMG_UINT32 *pui32NumSyncOwnedUFOs)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_LOGR_IF_FALSE((pui32NumSyncOwnedUFOs != NULL), "pui32NumSyncOwnedUFOs invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+	if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnDumpInfoOnStalledUFOs)
+	{
+		*pui32NumSyncOwnedUFOs = 0;
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)",
+				__func__));
+		eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+		PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnDumpInfoOnStalledUFOs is NULL");
+	}
+	else
+	{
+		*pui32NumSyncOwnedUFOs = g_psSyncCheckpointPfnStruct->pfnDumpInfoOnStalledUFOs(ui32NumUFOs, pui32Vaddrs);
+		PVR_LOG(("%d sync checkpoint%s owned by %s in stalled context",
+		         *pui32NumSyncOwnedUFOs, *pui32NumSyncOwnedUFOs==1 ? "" : "s",
+		         g_psSyncCheckpointPfnStruct->pszImplName));
+	}
+	return eError;
+}
+
+PVRSRV_ERROR
+SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode,
+                            PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext)
+{
+	_SYNC_CHECKPOINT_CONTEXT *psContext = NULL;
+	_SYNC_CHECKPOINT_CONTEXT_CTL *psContextCtl = NULL;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_LOGR_IF_FALSE((ppsSyncCheckpointContext != NULL),
+	                  "ppsSyncCheckpointContext invalid",
+	                  PVRSRV_ERROR_INVALID_PARAMS);
+
+	psContext = OSAllocMem(sizeof(*psContext));
+	PVR_LOGG_IF_NOMEM(psContext, "OSAllocMem", eError, fail_alloc); /* Sets OOM error code */
+
+	psContextCtl = OSAllocMem(sizeof(*psContextCtl));
+	PVR_LOGG_IF_NOMEM(psContextCtl, "OSAllocMem", eError, fail_alloc2); /* Sets OOM error code */
+
+	eError = OSLockCreate(&psContext->hLock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call "
+		              "to OSLockCreate(context lock) failed");
+		goto fail_create_context_lock;
+	}
+
+	eError = OSLockCreate(&psContextCtl->hDeferredCleanupListLock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call "
+		              "to OSLockCreate(deferred cleanup list lock) failed");
+		goto fail_create_deferred_cleanup_lock;
+	}
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+	eError = OSLockCreate(&psContextCtl->hSyncCheckpointPoolLock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call "
+		              "to OSLockCreate(sync checkpoint pool lock) failed");
+		goto fail_create_pool_lock;
+	}
+#endif
+
+	dllist_init(&psContextCtl->sDeferredCleanupListHead);
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+	psContextCtl->ui32SyncCheckpointPoolCount = 0;
+	psContextCtl->ui32SyncCheckpointPoolWp = 0;
+	psContextCtl->ui32SyncCheckpointPoolRp = 0;
+	psContextCtl->bSyncCheckpointPoolFull = IMG_FALSE;
+	psContextCtl->bSyncCheckpointPoolValid = IMG_TRUE;
+#endif
+	psContext->psDevNode = psDevNode;
+
+	OSSNPrintf(psContext->azName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim RA-%p", psContext);
+	OSSNPrintf(psContext->azSpanName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim span RA-%p", psContext);
+
+	/*
+		Create the RA for sub-allocations of the sync checkpoints
+
+		Note:
+		The import size doesn't matter here as the server will pass
+		back the blocksize when it does the import which overrides
+		what we specify here.
+	 */
+	psContext->psSubAllocRA = RA_Create(psContext->azName,
+	                                    /* Params for imports */
+	                                    _Log2(sizeof(IMG_UINT32)),
+	                                    RA_LOCKCLASS_2,
+	                                    _SyncCheckpointBlockImport,
+	                                    _SyncCheckpointBlockUnimport,
+	                                    psContext,
+	                                    IMG_FALSE);
+	if (psContext->psSubAllocRA == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call to RA_Create(subAlloc) failed");
+		goto fail_suballoc;
+	}
+
+	/*
+		Create the span-management RA
+
+		The RA requires that we work with linear spans. For our use
+		here we don't require this behaviour as we're always working
+		within offsets of blocks (imports). However, we need to keep
+		the RA happy so we create the "span" management RA which
+		ensures that all are imports are added to the RA in a linear
+		fashion
+	 */
+	psContext->psSpanRA = RA_Create(psContext->azSpanName,
+	                                /* Params for imports */
+	                                0,
+	                                RA_LOCKCLASS_1,
+	                                NULL,
+	                                NULL,
+	                                NULL,
+	                                IMG_FALSE);
+	if (psContext->psSpanRA == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call to RA_Create(span) failed");
+		goto fail_span;
+	}
+
+	if (!RA_Add(psContext->psSpanRA, 0, MAX_SYNC_CHECKPOINT_MEM, 0, NULL))
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call to RA_Add(span) failed");
+		goto fail_span_add;
+	}
+
+	OSAtomicWrite(&psContext->hRefCount, 1);
+	OSAtomicWrite(&psContext->hCheckpointCount, 0);
+
+	psContext->psContextCtl = psContextCtl;
+
+	*ppsSyncCheckpointContext = (PSYNC_CHECKPOINT_CONTEXT)psContext;
+#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1)
+	PVR_DPF((PVR_DBG_WARNING,
+			"%s: created psSyncCheckpointContext=<%p> (%d contexts exist)",
+			__func__,
+			(void*)*ppsSyncCheckpointContext,
+			++gui32NumSyncCheckpointContexts));
+#endif
+	return PVRSRV_OK;
+
+	fail_span_add:
+	RA_Delete(psContext->psSpanRA);
+	fail_span:
+	RA_Delete(psContext->psSubAllocRA);
+	fail_suballoc:
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+	OSLockDestroy(psContextCtl->hSyncCheckpointPoolLock);
+	psContextCtl->hSyncCheckpointPoolLock = NULL;
+	fail_create_pool_lock:
+#endif
+	OSLockDestroy(psContextCtl->hDeferredCleanupListLock);
+	psContextCtl->hDeferredCleanupListLock = NULL;
+	fail_create_deferred_cleanup_lock:
+	OSLockDestroy(psContext->hLock);
+	psContext->hLock = NULL;
+	fail_create_context_lock:
+	OSFreeMem(psContextCtl);
+	fail_alloc2:
+	OSFreeMem(psContext);
+	fail_alloc:
+	return eError;
+}
+
+/* Poisons and frees the checkpoint and lock.
+ * Decrements context refcount. */
+static void _FreeSyncCheckpoint(_SYNC_CHECKPOINT *psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext;
+
+	psSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = 0;
+	psSyncCheckpoint->psSyncCheckpointFwObj = NULL;
+	psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_FREED;
+
+	RA_Free(psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA,
+	        psSyncCheckpoint->uiSpanAddr);
+	psSyncCheckpoint->psSyncCheckpointBlock = NULL;
+
+	OSLockDestroy(psSyncCheckpoint->hLock);
+	OSFreeMem(psSyncCheckpoint);
+
+	OSAtomicDecrement(&psContext->hCheckpointCount);
+}
+
+PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	_SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointContext;
+	PVRSRV_DEVICE_NODE *psDevNode;
+	IMG_INT iRf = 0;
+
+	PVR_LOGR_IF_FALSE((psSyncCheckpointContext != NULL),
+	                  "psSyncCheckpointContext invalid",
+	                  PVRSRV_ERROR_INVALID_PARAMS);
+
+	psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psDevNode;
+
+#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1)
+	PVR_DPF((PVR_DBG_WARNING,
+			"%s: destroying psSyncCheckpointContext=<%p> (now have %d contexts)",
+			__func__,
+			(void*)psSyncCheckpointContext,
+			--gui32NumSyncCheckpointContexts));
+#endif
+
+	_CheckDeferredCleanupList(psContext);
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+	if (psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0)
+	{
+		IMG_UINT32 ui32NumFreedFromPool = _CleanCheckpointPool(psContext);
+
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s freed %d sync checkpoints that were still in the pool for context<%p>",
+				__func__,
+				ui32NumFreedFromPool,
+				(void*)psContext));
+#else
+		PVR_UNREFERENCED_PARAMETER(ui32NumFreedFromPool);
+#endif
+	}
+#endif
+
+	iRf = OSAtomicRead(&psContext->hCheckpointCount);
+
+	if (iRf != 0)
+	{
+		/* Note, this is not a permanent error as the caller may retry later */
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s <%p> attempted with active references (iRf=%d), "
+				"may be the result of a race",
+				__func__,
+				(void*)psContext,
+				iRf));
+
+		OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+		{
+			DLLIST_NODE *psNode, *psNext;
+
+			dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
+			{
+				_SYNC_CHECKPOINT *psSyncCheckpoint = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode);
+				IMG_BOOL bDeferredFree = dllist_node_is_in_list(&psSyncCheckpoint->sDeferredFreeListNode);
+
+				/* Line below avoids build error in release builds (where PVR_DPF is not defined) */
+				PVR_UNREFERENCED_PARAMETER(bDeferredFree);
+				PVR_DPF((PVR_DBG_WARNING,
+						"%s syncCheckpoint<%p> ID=%d, %s, refs=%d, state=%s, fwaddr=%#08x, enqCount:%d, FWCount:%d %s",
+						__func__,
+						(void*)psSyncCheckpoint,
+						psSyncCheckpoint->ui32UID,
+						psSyncCheckpoint->azName,
+						OSAtomicRead(&psSyncCheckpoint->hRefCount),
+						psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED ?
+								"PVRSRV_SYNC_CHECKPOINT_SIGNALLED" :
+								psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE ?
+										"PVRSRV_SYNC_CHECKPOINT_ACTIVE" : "PVRSRV_SYNC_CHECKPOINT_ERRORED",
+						psSyncCheckpoint->ui32FWAddr,
+						OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount),
+						psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount,
+						bDeferredFree ? "(deferred free)" : ""));
+
+				eError = PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT;
+#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1)
+				gui32NumSyncCheckpointContexts++;
+#endif
+			}
+		}
+		OSLockRelease(psDevNode->hSyncCheckpointListLock);
+	}
+	else
+	{
+		IMG_INT iRf2 = 0;
+
+		iRf2 = OSAtomicRead(&psContext->hRefCount);
+		SyncCheckpointContextUnref(psSyncCheckpointContext);
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR
+SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext,
+                    PVRSRV_TIMELINE hTimeline,
+                    PVRSRV_FENCE hFence,
+                    const IMG_CHAR *pszCheckpointName,
+                    PSYNC_CHECKPOINT *ppsSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psNewSyncCheckpoint = NULL;
+	_SYNC_CHECKPOINT_CONTEXT *psSyncContextInt = (_SYNC_CHECKPOINT_CONTEXT*)psSyncContext;
+	PVRSRV_DEVICE_NODE *psDevNode;
+	PVRSRV_ERROR eError;
+
+	PVR_LOGR_IF_FALSE((psSyncContext != NULL), "psSyncContext invalid", PVRSRV_ERROR_INVALID_PARAMS);
+	PVR_LOGR_IF_FALSE((ppsSyncCheckpoint != NULL), "ppsSyncCheckpoint invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+	psDevNode = (PVRSRV_DEVICE_NODE *)psSyncContextInt->psDevNode;
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+	PVR_DPF((PVR_DBG_WARNING, "%s Entry, Getting checkpoint from pool",
+			 __func__));
+#endif
+	psNewSyncCheckpoint = _GetCheckpointFromPool(psSyncContextInt);
+	if (!psNewSyncCheckpoint)
+	{
+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s     checkpoint pool empty - will have to allocate",
+				 __func__));
+#endif
+	}
+#endif
+	/* If pool is empty (or not defined) alloc the new sync checkpoint */
+	if (!psNewSyncCheckpoint)
+	{
+		psNewSyncCheckpoint = OSAllocMem(sizeof(*psNewSyncCheckpoint));
+		PVR_LOGG_IF_NOMEM(psNewSyncCheckpoint, "OSAllocMem", eError, fail_alloc); /* Sets OOM error code */
+
+		eError = OSLockCreate(&psNewSyncCheckpoint->hLock);
+
+		PVR_LOGG_IF_ERROR(eError, "OSLockCreate", fail_create_checkpoint_lock);
+
+		eError = RA_Alloc(psSyncContextInt->psSubAllocRA,
+		                  sizeof(*psNewSyncCheckpoint->psSyncCheckpointFwObj),
+		                  RA_NO_IMPORT_MULTIPLIER,
+		                  0,
+		                  sizeof(IMG_UINT32),
+		                  (IMG_CHAR*)pszCheckpointName,
+		                  &psNewSyncCheckpoint->uiSpanAddr,
+		                  NULL,
+		                  (RA_PERISPAN_HANDLE *) &psNewSyncCheckpoint->psSyncCheckpointBlock);
+		PVR_LOGG_IF_ERROR(eError, "RA_Alloc", fail_raalloc);
+
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s CALLED RA_Alloc(), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx",
+				__func__,
+				(void*)psSyncContextInt->psSubAllocRA,
+				psNewSyncCheckpoint->uiSpanAddr));
+#endif
+		psNewSyncCheckpoint->psSyncCheckpointFwObj =
+				(volatile SYNC_CHECKPOINT_FW_OBJ*)(psNewSyncCheckpoint->psSyncCheckpointBlock->pui32LinAddr +
+						(_SyncCheckpointGetOffset(psNewSyncCheckpoint)/sizeof(IMG_UINT32)));
+		psNewSyncCheckpoint->ui32FWAddr = psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr +
+		                                  _SyncCheckpointGetOffset(psNewSyncCheckpoint) + 1;
+		OSAtomicIncrement(&psNewSyncCheckpoint->psSyncCheckpointBlock->psContext->hCheckpointCount);
+		psNewSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE;
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s called to allocate new sync checkpoint<%p> for context<%p>",
+				 __func__, (void*)psNewSyncCheckpoint, (void*)psSyncContext));
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s                    psSyncCheckpointFwObj<%p>",
+				 __func__, (void*)psNewSyncCheckpoint->psSyncCheckpointFwObj));
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s                    psSyncCheckpoint FwAddr=0x%x",
+				 __func__, SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint)));
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s                    pszCheckpointName = %s",
+				 __func__, pszCheckpointName));
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s                    psSyncCheckpoint Timeline=%d",
+				 __func__, hTimeline));
+#endif
+	}
+
+	psNewSyncCheckpoint->hTimeline = hTimeline;
+	OSAtomicWrite(&psNewSyncCheckpoint->hRefCount, 1);
+	OSAtomicWrite(&psNewSyncCheckpoint->hEnqueuedCCBCount, 0);
+	psNewSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount = 0;
+	psNewSyncCheckpoint->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_ACTIVE;
+	psNewSyncCheckpoint->uiProcess = OSGetCurrentClientProcessIDKM();
+	OSCachedMemSet(&psNewSyncCheckpoint->sDeferredFreeListNode, 0, sizeof(psNewSyncCheckpoint->sDeferredFreeListNode));
+
+	if (pszCheckpointName)
+	{
+		/* Copy over the checkpoint name annotation */
+		OSStringLCopy(psNewSyncCheckpoint->azName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH);
+	}
+	else
+	{
+		/* No sync checkpoint name annotation */
+		psNewSyncCheckpoint->azName[0] = '\0';
+	}
+
+	/* Store sync checkpoint FW address in PRGXFWIF_UFO_ADDR struct */
+	psNewSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint);
+
+	/* Assign unique ID to this sync checkpoint */
+	psNewSyncCheckpoint->ui32UID = g_SyncCheckpointUID++;
+
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+	                      "Allocated Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)",
+	                      psNewSyncCheckpoint->azName,
+	                      psNewSyncCheckpoint->ui32UID, psNewSyncCheckpoint->hTimeline,
+	                      psNewSyncCheckpoint->sCheckpointUFOAddr.ui32Addr);
+
+	RGXSRV_HWPERF_ALLOC_SYNC_CP(psDevNode, psNewSyncCheckpoint->hTimeline,
+	                            OSGetCurrentClientProcessIDKM(),
+	                            hFence,
+	                            psNewSyncCheckpoint->ui32FWAddr,
+	                            psNewSyncCheckpoint->azName,
+	                            sizeof(psNewSyncCheckpoint->azName));
+
+	if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+	{
+		IMG_CHAR szChkptName[PVRSRV_SYNC_NAME_LENGTH];
+
+		if (pszCheckpointName)
+		{
+			/* Copy the checkpoint name annotation into a fixed-size array */
+			OSStringLCopy(szChkptName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH);
+		}
+		else
+		{
+			/* No checkpoint name annotation */
+			szChkptName[0] = 0;
+		}
+		/* record this sync */
+		eError = _SyncCheckpointRecordAdd(&psNewSyncCheckpoint->hRecord,
+		                                 psNewSyncCheckpoint->psSyncCheckpointBlock,
+		                                 psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr,
+		                                 _SyncCheckpointGetOffset(psNewSyncCheckpoint),
+		                                 psNewSyncCheckpoint->ui32UID,
+		                                 OSStringNLength(szChkptName, PVRSRV_SYNC_NAME_LENGTH),
+		                                 szChkptName, (PSYNC_CHECKPOINT)psNewSyncCheckpoint);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync checkpoint record \"%s\" (%s)",
+					__func__,
+					szChkptName,
+					PVRSRVGetErrorString(eError)));
+			psNewSyncCheckpoint->hRecord = NULL;
+			/* note the error but continue without affecting driver operation */
+		}
+	}
+
+	/* Add the sync checkpoint to the device list */
+	OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+	dllist_add_to_head(&psDevNode->sSyncCheckpointSyncsList,
+	                   &psNewSyncCheckpoint->sListNode);
+	OSLockRelease(psDevNode->hSyncCheckpointListLock);
+
+	*ppsSyncCheckpoint = (PSYNC_CHECKPOINT)psNewSyncCheckpoint;
+
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+	PVR_DPF((PVR_DBG_WARNING,
+			"%s Exit(Ok), psNewSyncCheckpoint->ui32UID=%d <%p>",
+			__func__,
+			psNewSyncCheckpoint->ui32UID,
+			(void*)psNewSyncCheckpoint));
+#endif
+	return PVRSRV_OK;
+
+	fail_raalloc:
+	OSLockDestroy(psNewSyncCheckpoint->hLock);
+	psNewSyncCheckpoint->hLock = NULL;
+	fail_create_checkpoint_lock:
+	OSFreeMem(psNewSyncCheckpoint);
+	fail_alloc:
+	return eError;
+}
+
+static void SyncCheckpointUnref(_SYNC_CHECKPOINT *psSyncCheckpointInt)
+{
+	_SYNC_CHECKPOINT_CONTEXT *psContext;
+	PVRSRV_DEVICE_NODE *psDevNode;
+
+	psContext = psSyncCheckpointInt->psSyncCheckpointBlock->psContext;
+	psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psDevNode;
+
+	/*
+	 * Without this reference, the context may be destroyed as soon
+	 * as _FreeSyncCheckpoint is called, but the context is still
+	 * needed when _CheckDeferredCleanupList is called at the end
+	 * of this function.
+	 */
+	SyncCheckpointContextRef((PSYNC_CHECKPOINT_CONTEXT)psContext);
+
+	PVR_ASSERT(psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE);
+	if (!OSAtomicRead(&psSyncCheckpointInt->hRefCount))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncCheckpointUnref sync checkpoint already freed"));
+	}
+	else if (0 == OSAtomicDecrement(&psSyncCheckpointInt->hRefCount))
+	{
+		/* If the firmware has serviced all enqueued references to the sync checkpoint, free it */
+		if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount ==
+				(IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)))
+		{
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+			PVR_DPF((PVR_DBG_WARNING,
+					"%s No outstanding FW ops and hRef is zero, deleting SyncCheckpoint..",
+					__func__));
+#endif
+			if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+				&& psSyncCheckpointInt->hRecord)
+			{
+				PVRSRV_ERROR eError;
+				/* remove this sync record */
+				eError = _SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord);
+				PVR_LOG_IF_ERROR(eError, "_SyncCheckpointRecordRemove");
+			}
+
+			/* Remove the sync checkpoint from the global list */
+			OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+			dllist_remove_node(&psSyncCheckpointInt->sListNode);
+			OSLockRelease(psDevNode->hSyncCheckpointListLock);
+
+			RGXSRV_HWPERF_FREE(psDevNode, SYNC_CP, psSyncCheckpointInt->ui32FWAddr);
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+			PVR_DPF((PVR_DBG_WARNING,
+					"%s attempting to return sync checkpoint to the pool",
+					__func__));
+#endif
+			if (!_PutCheckpointInPool(psSyncCheckpointInt))
+#endif
+			{
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+				PVR_DPF((PVR_DBG_WARNING,
+						"%s pool is full, so just free it",
+						__func__));
+#endif
+#endif
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+				PVR_DPF((PVR_DBG_WARNING,
+						"%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx",
+						__func__,
+						psSyncCheckpointInt->ui32UID,
+						(void*)psSyncCheckpointInt,
+						(void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA,
+						psSyncCheckpointInt->uiSpanAddr));
+#endif
+				_FreeSyncCheckpoint(psSyncCheckpointInt);
+			}
+		}
+		else
+		{
+#if ((ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+			PVR_DPF((PVR_DBG_WARNING,
+					"%s Outstanding FW ops hEnqueuedCCBCount=%d != FwObj->ui32FwRefCount=%d "
+					"- DEFERRING CLEANUP psSyncCheckpoint(ID:%d)<%p>",
+					__func__,
+					OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount),
+					psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount,
+					psSyncCheckpointInt->ui32UID,
+					(void*)psSyncCheckpointInt));
+#endif
+			/* Add the sync checkpoint to the deferred free list */
+			OSLockAcquire(psContext->psContextCtl->hDeferredCleanupListLock);
+			dllist_add_to_tail(&psContext->psContextCtl->sDeferredCleanupListHead,
+			                   &psSyncCheckpointInt->sDeferredFreeListNode);
+			OSLockRelease(psContext->psContextCtl->hDeferredCleanupListLock);
+		}
+	}
+	else
+	{
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s psSyncCheckpoint(ID:%d)<%p>, hRefCount decremented to %d",
+				__func__,
+				psSyncCheckpointInt->ui32UID,
+				(void*)psSyncCheckpointInt,
+				(IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount))));
+#endif
+	}
+
+	/* See if any sync checkpoints in the deferred cleanup list can be freed */
+	_CheckDeferredCleanupList(psContext);
+
+	SyncCheckpointContextUnref((PSYNC_CHECKPOINT_CONTEXT)psContext);
+}
+
+void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+	PVR_DPF((PVR_DBG_WARNING,
+			"%s Entry,  psSyncCheckpoint(ID:%d)<%p>, hRefCount=%d, psSyncCheckpoint->ui32ValidationCheck=0x%x",
+			__func__,
+			psSyncCheckpointInt->ui32UID,
+			(void*)psSyncCheckpoint,
+			(IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount)),
+			psSyncCheckpointInt->ui32ValidationCheck));
+#endif
+	SyncCheckpointUnref(psSyncCheckpointInt);
+}
+
+void
+SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+	if (psSyncCheckpointInt)
+	{
+		PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE),
+		                 "psSyncCheckpoint already signalled");
+
+		if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE)
+		{
+#if defined(SUPPORT_RGX)
+			PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+
+			RGXSRVHWPerfSyncCheckpointUFOUpdate(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags);
+#endif
+			psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+
+#if defined(PDUMP)
+			/* We may need to temporarily disable the posting of PDump events here, as the caller can be
+			 * in interrupt context and PDUMPCOMMENTWITHFLAGS takes the PDUMP_LOCK mutex
+			 */
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+			                      "Signalled Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)",
+			                      psSyncCheckpointInt->azName,
+			                      psSyncCheckpointInt->ui32UID, psSyncCheckpointInt->hTimeline,
+			                      (psSyncCheckpointInt->psSyncCheckpointBlock->ui32FirmwareAddr +
+								   _SyncCheckpointGetOffset(psSyncCheckpointInt)));
+			_SyncCheckpointSignalPDump(psSyncCheckpointInt);
+#endif
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+					 "%s asked to set PVRSRV_SYNC_CHECKPOINT_SIGNALLED(%d) for (psSyncCheckpointInt->ui32UID=%d), "
+					 "when value is already %d",
+					 __func__,
+					 PVRSRV_SYNC_CHECKPOINT_SIGNALLED,
+					 psSyncCheckpointInt->ui32UID,
+					 psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State));
+		}
+	}
+}
+
+void
+SyncCheckpointSignalNoHW(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+	if (psSyncCheckpointInt)
+	{
+		PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE),
+		                 "psSyncCheckpoint already signalled");
+
+		if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE)
+		{
+#if defined(SUPPORT_RGX)
+			PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+
+			RGXSRVHWPerfSyncCheckpointUFOUpdate(psDevInfo, psSyncCheckpointInt, PVRSRV_FENCE_FLAG_NONE);
+#endif
+			psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+		}
+		else
+		{
+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+			PVR_DPF((PVR_DBG_WARNING,
+					"%s asked to set PVRSRV_SYNC_CHECKPOINT_SIGNALLED(%d) for (psSyncCheckpointInt->ui32UID=%d), "
+					"when value is already %d",
+					__func__,
+					PVRSRV_SYNC_CHECKPOINT_SIGNALLED,
+					psSyncCheckpointInt->ui32UID,
+					psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State));
+#endif
+		}
+	}
+}
+
+void
+SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+	if (psSyncCheckpointInt)
+	{
+		PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE),
+		                 "psSyncCheckpoint already signalled");
+
+		if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE)
+		{
+#if defined(SUPPORT_RGX)
+			if (!(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+			{
+				RGX_HWPERF_UFO_DATA_ELEMENT sSyncData;
+				PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+
+				sSyncData.sUpdate.ui32FWAddr = SyncCheckpointGetFirmwareAddr(psSyncCheckpoint);
+				sSyncData.sUpdate.ui32OldValue = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+				sSyncData.sUpdate.ui32NewValue = PVRSRV_SYNC_CHECKPOINT_ERRORED;
+
+				RGXSRV_HWPERF_UFO(psDevInfo, RGX_HWPERF_UFO_EV_UPDATE, &sSyncData,
+				                  (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE);
+			}
+#endif
+
+			psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_ERRORED;
+
+#if defined(PDUMP)
+			/* We may need to temporarily disable the posting of PDump events here, as the caller can be
+			 * in interrupt context and PDUMPCOMMENTWITHFLAGS takes the PDUMP_LOCK mutex
+			 */
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+			                      "Errored Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)",
+			                      psSyncCheckpointInt->azName,
+			                      psSyncCheckpointInt->ui32UID, psSyncCheckpointInt->hTimeline,
+			                      (psSyncCheckpointInt->psSyncCheckpointBlock->ui32FirmwareAddr +
+			                    		  _SyncCheckpointGetOffset(psSyncCheckpointInt)));
+			_SyncCheckpointErrorPDump(psSyncCheckpointInt);
+#endif
+		}
+	}
+}
+
+IMG_BOOL SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags)
+{
+	IMG_BOOL bRet = IMG_FALSE;
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+	if (psSyncCheckpointInt)
+	{
+#if defined(SUPPORT_RGX)
+		PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+
+		RGXSRVHWPerfSyncCheckpointUFOIsSignalled(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags);
+#endif
+		bRet = ((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ||
+				(psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED));
+
+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s called for psSyncCheckpoint<%p>, returning %d",
+				__func__,
+				(void*)psSyncCheckpoint,
+				bRet));
+#endif
+	}
+	return bRet;
+}
+
+IMG_BOOL
+SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags)
+{
+	IMG_BOOL bRet = IMG_FALSE;
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+	if (psSyncCheckpointInt)
+	{
+#if defined(SUPPORT_RGX)
+		PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+
+		RGXSRVHWPerfSyncCheckpointUFOIsSignalled(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags);
+#endif
+		bRet = (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED);
+
+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s called for psSyncCheckpoint<%p>, returning %d",
+				__func__,
+				(void*)psSyncCheckpoint,
+				bRet));
+#endif
+	}
+	return bRet;
+}
+
+const IMG_CHAR *
+SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOGR_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", "Null");
+
+	switch (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State)
+	{
+		case PVRSRV_SYNC_CHECKPOINT_SIGNALLED:
+			return "Signalled";
+		case PVRSRV_SYNC_CHECKPOINT_ACTIVE:
+			return "Active";
+		case PVRSRV_SYNC_CHECKPOINT_ERRORED:
+			return "Errored";
+		case PVRSRV_SYNC_CHECKPOINT_UNDEF:
+			return "Undefined";
+		default:
+			return "Unknown";
+	}
+}
+
+PVRSRV_ERROR
+SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	PVRSRV_ERROR eRet = PVRSRV_OK;
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOGR_IF_FALSE((psSyncCheckpoint != NULL),
+	                  "psSyncCheckpoint invalid",
+	                  PVRSRV_ERROR_INVALID_PARAMS);
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+	PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)",
+			__func__,
+			psSyncCheckpointInt,
+			OSAtomicRead(&psSyncCheckpointInt->hRefCount),
+			OSAtomicRead(&psSyncCheckpointInt->hRefCount)+1,
+			psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount));
+#endif
+	OSAtomicIncrement(&psSyncCheckpointInt->hRefCount);
+
+	return eRet;
+}
+
+PVRSRV_ERROR
+SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	PVRSRV_ERROR eRet = PVRSRV_OK;
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOGR_IF_FALSE((psSyncCheckpoint != NULL),
+	                  "psSyncCheckpoint invalid",
+	                  PVRSRV_ERROR_INVALID_PARAMS);
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+	PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)",
+			__func__,
+			psSyncCheckpointInt,
+			OSAtomicRead(&psSyncCheckpointInt->hRefCount),
+			OSAtomicRead(&psSyncCheckpointInt->hRefCount)-1,
+			psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount));
+#endif
+	SyncCheckpointUnref(psSyncCheckpointInt);
+
+	return eRet;
+}
+
+void
+SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+	if (psSyncCheckpointInt)
+	{
+#if !defined(NO_HARDWARE)
+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+		PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)",
+				__func__,
+				(void*)psSyncCheckpoint,
+				OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount),
+				OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)+1,
+				psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount));
+#endif
+		OSAtomicIncrement(&psSyncCheckpointInt->hEnqueuedCCBCount);
+#endif
+	}
+}
+
+PRGXFWIF_UFO_ADDR*
+SyncCheckpointGetRGXFWIFUFOAddr(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOGG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
+
+	if (psSyncCheckpointInt)
+	{
+		if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE)
+		{
+			return &psSyncCheckpointInt->sCheckpointUFOAddr;
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x",
+					__func__,
+					(void*)psSyncCheckpoint,
+					psSyncCheckpointInt->ui32ValidationCheck));
+		}
+	}
+
+	invalid_chkpt:
+	return NULL;
+}
+
+IMG_UINT32
+SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+	IMG_UINT32 ui32Ret = 0;
+
+	PVR_LOGG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
+
+	if (psSyncCheckpointInt)
+	{
+		if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE)
+		{
+			ui32Ret = psSyncCheckpointInt->ui32FWAddr;
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x",
+					__func__,
+					(void*)psSyncCheckpoint,
+					psSyncCheckpointInt->ui32ValidationCheck));
+		}
+	}
+
+invalid_chkpt:
+	return ui32Ret;
+}
+
+IMG_UINT32
+SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+	IMG_UINT32 ui32Ret = 0;
+
+	PVR_LOGG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
+
+	if (psSyncCheckpointInt)
+	{
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s returning ID for sync checkpoint<%p>",
+				__func__,
+				(void*)psSyncCheckpointInt));
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s (validationCheck=0x%x)",
+				__func__,
+				psSyncCheckpointInt->ui32ValidationCheck));
+#endif
+		ui32Ret = psSyncCheckpointInt->ui32UID;
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s (ui32UID=0x%x)",
+				__func__,
+				psSyncCheckpointInt->ui32UID));
+#endif
+	}
+	return ui32Ret;
+
+	invalid_chkpt:
+	return 0;
+}
+
+PVRSRV_TIMELINE
+SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+	PVRSRV_TIMELINE i32Ret = PVRSRV_NO_TIMELINE;
+
+	PVR_LOGG_IF_FALSE((psSyncCheckpoint != NULL),
+	                  "psSyncCheckpoint invalid",
+	                  invalid_chkpt);
+
+	if (psSyncCheckpointInt)
+	{
+		i32Ret = psSyncCheckpointInt->hTimeline;
+	}
+	return i32Ret;
+
+	invalid_chkpt:
+	return 0;
+}
+
+
+IMG_UINT32
+SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+	PVR_LOGR_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0);
+
+	return OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount);
+}
+
+IMG_UINT32
+SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+	PVR_LOGR_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0);
+
+	return OSAtomicRead(&psSyncCheckpointInt->hRefCount);
+}
+
+IMG_PID
+SyncCheckpointGetCreator(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+	PVR_LOGR_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0);
+
+	return psSyncCheckpointInt->uiProcess;
+}
+
+IMG_UINT32 SyncCheckpointStateFromUFO(PPVRSRV_DEVICE_NODE psDevNode,
+                                IMG_UINT32 ui32FwAddr)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt;
+	PDLLIST_NODE psNode, psNext;
+	IMG_UINT32 ui32State = 0;
+
+	OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+	dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
+	{
+		psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode);
+		if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt))
+		{
+			ui32State = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+			break;
+		}
+	}
+	OSLockRelease(psDevNode->hSyncCheckpointListLock);
+	return ui32State;
+}
+
+void SyncCheckpointErrorFromUFO(PPVRSRV_DEVICE_NODE psDevNode,
+                                IMG_UINT32 ui32FwAddr)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt;
+	PDLLIST_NODE psNode, psNext;
+
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+	PVR_DPF((PVR_DBG_WARNING,
+			"%s called to error UFO with ui32FWAddr=%d",
+			__func__,
+			ui32FwAddr));
+#endif
+
+	OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+	dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
+	{
+		psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode);
+		if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt))
+		{
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+			PVR_DPF((PVR_DBG_WARNING,
+					"%s calling SyncCheckpointError for sync checkpoint <%p>",
+					__func__,
+					(void*)psSyncCheckpointInt));
+#endif
+			/* Mark as errored */
+			SyncCheckpointError((PSYNC_CHECKPOINT)psSyncCheckpointInt, IMG_TRUE);
+			break;
+		}
+	}
+	OSLockRelease(psDevNode->hSyncCheckpointListLock);
+}
+
+void SyncCheckpointRollbackFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = NULL;
+	PDLLIST_NODE psNode = NULL, psNext = NULL;
+
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+	PVR_DPF((PVR_DBG_WARNING,
+			"%s called to rollback UFO with ui32FWAddr=0x%x",
+			__func__,
+			ui32FwAddr));
+#endif
+#if !defined(NO_HARDWARE)
+	OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+	dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
+	{
+		psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode);
+		if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt))
+		{
+#if ((ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)) || (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+			PVR_DPF((PVR_DBG_WARNING,
+					"%s called for psSyncCheckpointInt<%p> %d->%d",
+					__func__,
+					(void*)psSyncCheckpointInt,
+					OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount),
+					OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)-1));
+#endif
+			OSAtomicDecrement(&psSyncCheckpointInt->hEnqueuedCCBCount);
+			break;
+		}
+	}
+	OSLockRelease(psDevNode->hSyncCheckpointListLock);
+#else
+	PVR_UNREFERENCED_PARAMETER(psNode);
+	PVR_UNREFERENCED_PARAMETER(psNext);
+	PVR_UNREFERENCED_PARAMETER(psSyncCheckpointInt);
+#endif
+}
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+IMG_BOOL SyncCheckpointUFOHasSignalled(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr, IMG_UINT32 ui32Value)
+{
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+	PVR_LOG(("%s called because UFO with ui32FWAddr=%#08x has set to %#04x",
+			 __func__,
+			 ui32FwAddr,
+			 ui32Value));
+#endif
+
+	if (g_psSyncCheckpointPfnStruct->pfnCheckpointHasSignalled)
+	{
+		return g_psSyncCheckpointPfnStruct->pfnCheckpointHasSignalled(ui32FwAddr, ui32Value);
+	}
+	else
+	{
+		return IMG_FALSE;
+	}
+}
+
+void
+SyncCheckpointCheckState(void)
+{
+	if (g_psSyncCheckpointPfnStruct->pfnCheckState)
+	{
+		g_psSyncCheckpointPfnStruct->pfnCheckState();
+	}
+}
+
+void
+SyncCheckpointSignalWaiters(void)
+{
+	if (g_psSyncCheckpointPfnStruct->pfnSignalWaiters)
+	{
+		PVRSRV_ERROR eError = g_psSyncCheckpointPfnStruct->pfnSignalWaiters();
+		PVR_LOG_IF_ERROR(eError, __func__);
+	}
+}
+#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */
+
+static void _SyncCheckpointState(PDLLIST_NODE psNode,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpoint = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode);
+
+	if (psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE)
+	{
+		PVR_DUMPDEBUG_LOG("\t- ID = %d, FWAddr = 0x%08x, r%d:e%d:f%d: %s",
+		                   psSyncCheckpoint->ui32UID,
+		                   psSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr +
+		                   _SyncCheckpointGetOffset(psSyncCheckpoint),
+		                   OSAtomicRead(&psSyncCheckpoint->hRefCount),
+		                   OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount),
+		                   psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount,
+		                   psSyncCheckpoint->azName);
+	}
+}
+
+static void _SyncCheckpointDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+					IMG_UINT32 ui32VerbLevel,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile)
+{
+	PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+	DLLIST_NODE *psNode, *psNext;
+
+	if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM))
+	{
+		PVR_DUMPDEBUG_LOG("------[ Active Sync Checkpoints ]------");
+		OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+		dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
+		{
+			_SyncCheckpointState(psNode, pfnDumpDebugPrintf, pvDumpDebugFile);
+		}
+		OSLockRelease(psDevNode->hSyncCheckpointListLock);
+	}
+}
+
+PVRSRV_ERROR
+SyncCheckpointInit(PPVRSRV_DEVICE_NODE psDevNode)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!gbSyncCheckpointInit)
+	{
+		eError = OSLockCreate(&psDevNode->hSyncCheckpointListLock);
+		if (eError == PVRSRV_OK)
+		{
+			dllist_init(&psDevNode->sSyncCheckpointSyncsList);
+
+			eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncCheckpointNotify,
+													psDevNode,
+													_SyncCheckpointDebugRequest,
+													DEBUG_REQUEST_SYNCCHECKPOINT,
+													(PVRSRV_DBGREQ_HANDLE)psDevNode);
+			if (eError == PVRSRV_OK)
+			{
+				if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+				{
+					_SyncCheckpointRecordListInit(psDevNode);
+				}
+				gbSyncCheckpointInit = IMG_TRUE;
+			}
+			else
+			{
+				/* free the created lock */
+				OSLockDestroy(psDevNode->hSyncCheckpointListLock);
+				psDevNode->hSyncCheckpointListLock = NULL;
+			}
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s FAILED to create psDevNode->hSyncCheckpointListLock",
+					__func__));
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s called but already initialised", __func__));
+	}
+	return eError;
+}
+
+void SyncCheckpointDeinit(PPVRSRV_DEVICE_NODE psDevNode)
+{
+	PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncCheckpointNotify);
+	psDevNode->hSyncCheckpointNotify = NULL;
+	OSLockDestroy(psDevNode->hSyncCheckpointListLock);
+	psDevNode->hSyncCheckpointListLock = NULL;
+	if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+	{
+		_SyncCheckpointRecordListDeinit(psDevNode);
+	}
+	gbSyncCheckpointInit = IMG_FALSE;
+}
+
+void SyncCheckpointRecordLookup(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr,
+                                IMG_CHAR * pszSyncInfo, size_t len)
+{
+	DLLIST_NODE *psNode, *psNext;
+	IMG_BOOL bFound = IMG_FALSE;
+
+	if (!pszSyncInfo)
+	{
+		return;
+	}
+
+	pszSyncInfo[0] = '\0';
+
+	OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
+	dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext)
+	{
+		struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec =
+				IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode);
+		if ((psSyncCheckpointRec->ui32FwBlockAddr + psSyncCheckpointRec->ui32SyncOffset + 1) == ui32FwAddr)
+		{
+			SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock = psSyncCheckpointRec->psSyncCheckpointBlock;
+			if (psSyncCheckpointBlock && psSyncCheckpointBlock->pui32LinAddr)
+			{
+				void *pSyncCheckpointAddr = (void*)( ((IMG_BYTE*)
+						psSyncCheckpointBlock->pui32LinAddr) + psSyncCheckpointRec->ui32SyncOffset);
+				OSSNPrintf(pszSyncInfo, len, "%s Checkpoint:%05u (%s)",
+				           (*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ?
+				        		   "SIGNALLED" :
+				        		   ((*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_ERRORED) ?
+				        				   "ERRORED" : "ACTIVE"),
+				        				   psSyncCheckpointRec->uiPID,
+				        				   psSyncCheckpointRec->szClassName);
+			}
+			else
+			{
+				OSSNPrintf(pszSyncInfo, len, "Checkpoint:%05u (%s)",
+				           psSyncCheckpointRec->uiPID,
+				           psSyncCheckpointRec->szClassName);
+			}
+
+			bFound = IMG_TRUE;
+			break;
+		}
+	}
+	OSLockRelease(psDevNode->hSyncCheckpointRecordLock);
+
+	if (!bFound && (psDevNode->ui32SyncCheckpointRecordCountHighWatermark == SYNC_CHECKPOINT_RECORD_LIMIT))
+	{
+		OSSNPrintf(pszSyncInfo, len, "(Record may be lost)");
+	}
+}
+
+static PVRSRV_ERROR
+_SyncCheckpointRecordAdd(
+			PSYNC_CHECKPOINT_RECORD_HANDLE * phRecord,
+			SYNC_CHECKPOINT_BLOCK *hSyncCheckpointBlock,
+			IMG_UINT32 ui32FwBlockAddr,
+			IMG_UINT32 ui32SyncOffset,
+			IMG_UINT32 ui32UID,
+			IMG_UINT32 ui32ClassNameSize,
+			const IMG_CHAR *pszClassName, PSYNC_CHECKPOINT pSyncCheckpt)
+{
+	struct SYNC_CHECKPOINT_RECORD * psSyncRec;
+	_SYNC_CHECKPOINT_CONTEXT *psContext = hSyncCheckpointBlock->psContext;
+	PVRSRV_DEVICE_NODE *psDevNode = psContext->psDevNode;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!phRecord)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*phRecord = NULL;
+
+	psSyncRec = OSAllocMem(sizeof(*psSyncRec));
+	PVR_LOGG_IF_NOMEM(psSyncRec, "OSAllocMem", eError, fail_alloc); /* Sets OOM error code */
+
+	psSyncRec->psDevNode = psDevNode;
+	psSyncRec->psSyncCheckpointBlock = hSyncCheckpointBlock;
+	psSyncRec->ui32SyncOffset = ui32SyncOffset;
+	psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr;
+	psSyncRec->ui64OSTime = OSClockns64();
+	psSyncRec->uiPID = OSGetCurrentProcessID();
+	psSyncRec->ui32UID = ui32UID;
+	psSyncRec->pSyncCheckpt = pSyncCheckpt;
+	if (pszClassName)
+	{
+		if (ui32ClassNameSize >= PVRSRV_SYNC_NAME_LENGTH)
+			ui32ClassNameSize = PVRSRV_SYNC_NAME_LENGTH;
+		/* Copy over the class name annotation */
+		OSStringLCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize);
+	}
+	else
+	{
+		/* No class name annotation */
+		psSyncRec->szClassName[0] = 0;
+	}
+
+	OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
+	if (psDevNode->ui32SyncCheckpointRecordCount < SYNC_CHECKPOINT_RECORD_LIMIT)
+	{
+		dllist_add_to_head(&psDevNode->sSyncCheckpointRecordList, &psSyncRec->sNode);
+		psDevNode->ui32SyncCheckpointRecordCount++;
+
+		if (psDevNode->ui32SyncCheckpointRecordCount > psDevNode->ui32SyncCheckpointRecordCountHighWatermark)
+		{
+			psDevNode->ui32SyncCheckpointRecordCountHighWatermark = psDevNode->ui32SyncCheckpointRecordCount;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync checkpoint record \"%s\". %u records already exist.",
+				__func__,
+				pszClassName,
+				psDevNode->ui32SyncCheckpointRecordCount));
+		OSFreeMem(psSyncRec);
+		psSyncRec = NULL;
+		eError = PVRSRV_ERROR_TOOMANYBUFFERS;
+	}
+	OSLockRelease(psDevNode->hSyncCheckpointRecordLock);
+
+	*phRecord = (PSYNC_CHECKPOINT_RECORD_HANDLE)psSyncRec;
+
+	fail_alloc:
+	return eError;
+}
+
+static PVRSRV_ERROR
+_SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord)
+{
+	struct SYNC_CHECKPOINT_RECORD **ppFreedSync;
+	struct SYNC_CHECKPOINT_RECORD *pSync = (struct SYNC_CHECKPOINT_RECORD*)hRecord;
+	PVRSRV_DEVICE_NODE *psDevNode;
+
+	if (!hRecord)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevNode = pSync->psDevNode;
+
+	OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
+
+	dllist_remove_node(&pSync->sNode);
+
+	if (psDevNode->uiSyncCheckpointRecordFreeIdx >= PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: psDevNode->uiSyncCheckpointRecordFreeIdx out of range",
+				__func__));
+		psDevNode->uiSyncCheckpointRecordFreeIdx = 0;
+	}
+	ppFreedSync = &psDevNode->apsSyncCheckpointRecordsFreed[psDevNode->uiSyncCheckpointRecordFreeIdx];
+	psDevNode->uiSyncCheckpointRecordFreeIdx =
+			(psDevNode->uiSyncCheckpointRecordFreeIdx + 1) % PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN;
+
+	if (*ppFreedSync)
+	{
+		OSFreeMem(*ppFreedSync);
+	}
+	pSync->psSyncCheckpointBlock = NULL;
+	pSync->ui64OSTime = OSClockns64();
+	*ppFreedSync = pSync;
+
+	psDevNode->ui32SyncCheckpointRecordCount--;
+
+	OSLockRelease(psDevNode->hSyncCheckpointRecordLock);
+
+	return PVRSRV_OK;
+}
+
+#define NS_IN_S (1000000000UL)
+static void _SyncCheckpointRecordPrint(struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec,
+                                       IMG_UINT64 ui64TimeNow,
+                                       DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpoint = (_SYNC_CHECKPOINT *)psSyncCheckpointRec->pSyncCheckpt;
+	SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock = psSyncCheckpointRec->psSyncCheckpointBlock;
+	IMG_UINT64 ui64DeltaS;
+	IMG_UINT32 ui32DeltaF;
+	IMG_UINT64 ui64Delta = ui64TimeNow - psSyncCheckpointRec->ui64OSTime;
+	ui64DeltaS = OSDivide64(ui64Delta, NS_IN_S, &ui32DeltaF);
+
+	if (psSyncCheckpointBlock && psSyncCheckpointBlock->pui32LinAddr)
+	{
+		void *pSyncCheckpointAddr;
+		pSyncCheckpointAddr = (void*)( ((IMG_BYTE*) psSyncCheckpointBlock->pui32LinAddr) + psSyncCheckpointRec->ui32SyncOffset);
+
+		PVR_DUMPDEBUG_LOG("\t%05u %05" IMG_UINT64_FMTSPEC ".%09u %010u FWAddr=0x%08x (r%d:e%d:f%d) State=%s (%s)",
+		                  psSyncCheckpointRec->uiPID,
+		                  ui64DeltaS, ui32DeltaF,psSyncCheckpointRec->ui32UID,
+		                  (psSyncCheckpointRec->ui32FwBlockAddr+psSyncCheckpointRec->ui32SyncOffset),
+		                  OSAtomicRead(&psSyncCheckpoint->hRefCount),
+	                      OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount),
+	                      psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount,
+		                  (*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ?
+		                		  "SIGNALLED" :
+		                		  ((*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_ERRORED) ?
+		                				  "ERRORED" : "ACTIVE"),
+		                				  psSyncCheckpointRec->szClassName);
+	}
+	else
+	{
+		PVR_DUMPDEBUG_LOG("\t%05u %05" IMG_UINT64_FMTSPEC ".%09u %010u FWAddr=0x%08x State=<null_ptr> (%s)",
+		                  psSyncCheckpointRec->uiPID,
+		                  ui64DeltaS, ui32DeltaF, psSyncCheckpointRec->ui32UID,
+		                  (psSyncCheckpointRec->ui32FwBlockAddr+psSyncCheckpointRec->ui32SyncOffset),
+		                  psSyncCheckpointRec->szClassName
+		);
+	}
+}
+
+static void _SyncCheckpointRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+                                         IMG_UINT32 ui32VerbLevel,
+                                         DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                         void *pvDumpDebugFile)
+{
+	PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+	IMG_UINT64 ui64TimeNowS;
+	IMG_UINT32 ui32TimeNowF;
+	IMG_UINT64 ui64TimeNow = OSClockns64();
+	DLLIST_NODE *psNode, *psNext;
+
+	ui64TimeNowS = OSDivide64(ui64TimeNow, NS_IN_S, &ui32TimeNowF);
+
+	if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM))
+	{
+		IMG_UINT32 i;
+
+		OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
+
+		PVR_DUMPDEBUG_LOG("Dumping allocated sync checkpoints. Allocated: %u High watermark: %u (time ref %05" IMG_UINT64_FMTSPEC ".%09u)",
+		                  psDevNode->ui32SyncCheckpointRecordCount,
+		                  psDevNode->ui32SyncCheckpointRecordCountHighWatermark,
+		                  ui64TimeNowS,
+		                  ui32TimeNowF);
+		if (psDevNode->ui32SyncCheckpointRecordCountHighWatermark == SYNC_CHECKPOINT_RECORD_LIMIT)
+		{
+			PVR_DUMPDEBUG_LOG("Warning: Record limit (%u) was reached. Some sync checkpoints may not have been recorded in the debug information.",
+			                  SYNC_CHECKPOINT_RECORD_LIMIT);
+		}
+		PVR_DUMPDEBUG_LOG("\t%-5s %-15s %-10s %-17s %-14s (%s)",
+		                  "PID", "Time Delta (s)", "UID", "Address", "State", "Annotation");
+
+		dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext)
+		{
+			struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec =
+					IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode);
+			_SyncCheckpointRecordPrint(psSyncCheckpointRec, ui64TimeNow,
+			                           pfnDumpDebugPrintf, pvDumpDebugFile);
+		}
+
+		PVR_DUMPDEBUG_LOG("Dumping all recently freed sync checkpoints @ %05" IMG_UINT64_FMTSPEC ".%09u",
+		                  ui64TimeNowS,
+		                  ui32TimeNowF);
+		PVR_DUMPDEBUG_LOG("\t%-5s %-15s %-10s %-17s %-14s (%s)",
+		                  "PID", "Time Delta (s)", "UID", "Address", "State", "Annotation");
+		for (i = DECREMENT_WITH_WRAP(psDevNode->uiSyncCheckpointRecordFreeIdx, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN);
+				i != psDevNode->uiSyncCheckpointRecordFreeIdx;
+				i = DECREMENT_WITH_WRAP(i, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN))
+		{
+			if (psDevNode->apsSyncCheckpointRecordsFreed[i])
+			{
+				_SyncCheckpointRecordPrint(psDevNode->apsSyncCheckpointRecordsFreed[i],
+				                           ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile);
+			}
+			else
+			{
+				break;
+			}
+		}
+		OSLockRelease(psDevNode->hSyncCheckpointRecordLock);
+	}
+}
+#undef NS_IN_S
+static PVRSRV_ERROR _SyncCheckpointRecordListInit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+	PVRSRV_ERROR eError;
+
+	eError = OSLockCreate(&psDevNode->hSyncCheckpointRecordLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_lock_create;
+	}
+	dllist_init(&psDevNode->sSyncCheckpointRecordList);
+
+	psDevNode->ui32SyncCheckpointRecordCount = 0;
+	psDevNode->ui32SyncCheckpointRecordCountHighWatermark = 0;
+
+	eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncCheckpointRecordNotify,
+	                                        psDevNode,
+	                                        _SyncCheckpointRecordRequest,
+	                                        DEBUG_REQUEST_SYNCCHECKPOINT,
+	                                        (PVRSRV_DBGREQ_HANDLE)psDevNode);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_dbg_register;
+	}
+
+	return PVRSRV_OK;
+
+	fail_dbg_register:
+	OSLockDestroy(psDevNode->hSyncCheckpointRecordLock);
+	fail_lock_create:
+	return eError;
+}
+
+static void _SyncCheckpointRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+	DLLIST_NODE *psNode, *psNext;
+	int i;
+
+	OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
+	dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext)
+	{
+		struct SYNC_CHECKPOINT_RECORD *pSyncCheckpointRec =
+				IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode);
+
+		dllist_remove_node(psNode);
+		OSFreeMem(pSyncCheckpointRec);
+	}
+
+	for (i = 0; i < PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; i++)
+	{
+		if (psDevNode->apsSyncCheckpointRecordsFreed[i])
+		{
+			OSFreeMem(psDevNode->apsSyncCheckpointRecordsFreed[i]);
+			psDevNode->apsSyncCheckpointRecordsFreed[i] = NULL;
+		}
+	}
+	OSLockRelease(psDevNode->hSyncCheckpointRecordLock);
+
+	if (psDevNode->hSyncCheckpointRecordNotify)
+	{
+		PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncCheckpointRecordNotify);
+	}
+	OSLockDestroy(psDevNode->hSyncCheckpointRecordLock);
+}
+
+PVRSRV_ERROR
+SyncCheckpointPDumpPol(PSYNC_CHECKPOINT psSyncCheckpoint, PDUMP_FLAGS_T ui32PDumpFlags)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	DevmemPDumpDevmemPol32(psSyncCheckpointInt->psSyncCheckpointBlock->hMemDesc,
+	                       _SyncCheckpointGetOffset(psSyncCheckpointInt),
+	                       PVRSRV_SYNC_CHECKPOINT_SIGNALLED,
+	                       0xFFFFFFFF,
+	                       PDUMP_POLL_OPERATOR_EQUAL,
+	                       ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+
+#if defined(PDUMP)
+static PVRSRV_ERROR
+_SyncCheckpointSignalPDump(_SYNC_CHECKPOINT *psSyncCheckpoint)
+{
+	/*
+		We might be ask to PDump sync state outside of capture range
+		(e.g. texture uploads) so make this continuous.
+	 */
+	DevmemPDumpLoadMemValue32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc,
+	                          _SyncCheckpointGetOffset(psSyncCheckpoint),
+	                          PVRSRV_SYNC_CHECKPOINT_SIGNALLED,
+	                          PDUMP_FLAGS_CONTINUOUS);
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_SyncCheckpointErrorPDump(_SYNC_CHECKPOINT *psSyncCheckpoint)
+{
+	/*
+		We might be ask to PDump sync state outside of capture range
+		(e.g. texture uploads) so make this continuous.
+	 */
+	DevmemPDumpLoadMemValue32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc,
+	                          _SyncCheckpointGetOffset(psSyncCheckpoint),
+	                          PVRSRV_SYNC_CHECKPOINT_ERRORED,
+	                          PDUMP_FLAGS_CONTINUOUS);
+
+	return PVRSRV_OK;
+}
+#endif
+
+static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+	DLLIST_NODE *psNode, *psNext;
+	PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE*)psContext->psDevNode;
+
+	/* Check the deferred cleanup list and free any sync checkpoints we can */
+	OSLockAcquire(psContext->psContextCtl->hDeferredCleanupListLock);
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+	PVR_DPF((PVR_DBG_WARNING, "%s called", __func__));
+#endif
+
+	if (dllist_is_empty(&psContext->psContextCtl->sDeferredCleanupListHead))
+	{
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+		PVR_DPF((PVR_DBG_WARNING, "%s: Defer free list is empty", __func__));
+#endif
+	}
+
+	dllist_foreach_node(&psContext->psContextCtl->sDeferredCleanupListHead, psNode, psNext)
+	{
+		_SYNC_CHECKPOINT *psSyncCheckpointInt =
+				IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sDeferredFreeListNode);
+
+		if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount ==
+				(IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)))
+		{
+			if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+				&& psSyncCheckpointInt->hRecord)
+			{
+				PVRSRV_ERROR eError;
+				/* remove this sync record */
+				eError = _SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord);
+				PVR_LOG_IF_ERROR(eError, "_SyncCheckpointRecordRemove");
+			}
+
+			/* Remove the sync checkpoint from the deferred free list */
+			dllist_remove_node(&psSyncCheckpointInt->sDeferredFreeListNode);
+
+			/* Remove the sync checkpoint from the global list */
+			OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+			dllist_remove_node(&psSyncCheckpointInt->sListNode);
+			OSLockRelease(psDevNode->hSyncCheckpointListLock);
+
+			RGXSRV_HWPERF_FREE(psDevNode, SYNC_CP, psSyncCheckpointInt->ui32FWAddr);
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+			PVR_DPF((PVR_DBG_WARNING,
+					"%s attempting to return sync(ID:%d),%p> to pool",
+					__func__,
+					psSyncCheckpointInt->ui32UID,
+					(void*)psSyncCheckpointInt));
+#endif
+			if (!_PutCheckpointInPool(psSyncCheckpointInt))
+#endif
+			{
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+				PVR_DPF((PVR_DBG_WARNING, "%s pool is full, so just free it", __func__));
+#endif
+#endif
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+				PVR_DPF((PVR_DBG_WARNING,
+						"%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), "
+						"psSubAllocRA=<%p>, ui32SpanAddr=0x%llx",
+						__func__,
+						psSyncCheckpointInt->ui32UID,
+						(void*)psSyncCheckpointInt,
+						(void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA,
+						psSyncCheckpointInt->uiSpanAddr));
+#endif
+				_FreeSyncCheckpoint(psSyncCheckpointInt);
+			}
+		}
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+		else
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+					"%s psSyncCheckpoint '%s'' (ID:%d)<%p>), still pending (enq=%d,FWRef=%d)",
+					__func__,
+					psSyncCheckpointInt->azName,
+					psSyncCheckpointInt->ui32UID,
+					(void*)psSyncCheckpointInt,
+					(IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)),
+					psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount));
+		}
+#endif
+	}
+	OSLockRelease(psContext->psContextCtl->hDeferredCleanupListLock);
+}
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+static _SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpoint = NULL;
+
+	/* Acquire sync checkpoint pool lock */
+	OSLockAcquire(psContext->psContextCtl->hSyncCheckpointPoolLock);
+
+	/* Check if we can allocate from the pool */
+	if (psContext->psContextCtl->bSyncCheckpointPoolValid &&
+	    (psContext->psContextCtl->ui32SyncCheckpointPoolCount > SYNC_CHECKPOINT_POOL_SEDIMENT) &&
+	    (psContext->psContextCtl->ui32SyncCheckpointPoolWp != psContext->psContextCtl->ui32SyncCheckpointPoolRp))
+	{
+		/* Get the next sync checkpoint from the pool */
+		psSyncCheckpoint = psContext->psContextCtl->psSyncCheckpointPool[psContext->psContextCtl->ui32SyncCheckpointPoolRp++];
+		if (psContext->psContextCtl->ui32SyncCheckpointPoolRp == SYNC_CHECKPOINT_POOL_SIZE)
+		{
+			psContext->psContextCtl->ui32SyncCheckpointPoolRp = 0;
+		}
+		psContext->psContextCtl->ui32SyncCheckpointPoolCount--;
+		psContext->psContextCtl->bSyncCheckpointPoolFull = IMG_FALSE;
+		psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE;
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s checkpoint(old ID:%d)<-POOL(%d/%d), psContext=<%p>, poolRp=%d, poolWp=%d",
+				__func__,
+				psSyncCheckpoint->ui32UID,
+				psContext->psContextCtl->ui32SyncCheckpointPoolCount,
+				SYNC_CHECKPOINT_POOL_SIZE,
+				(void*)psContext, psContext->psContextCtl->ui32SyncCheckpointPoolRp, psContext->psContextCtl->ui32SyncCheckpointPoolWp));
+#endif
+	}
+	/* Release sync checkpoint pool lock */
+	OSLockRelease(psContext->psContextCtl->hSyncCheckpointPoolLock);
+
+	return psSyncCheckpoint;
+}
+
+static IMG_BOOL _PutCheckpointInPool(_SYNC_CHECKPOINT *psSyncCheckpoint)
+{
+	IMG_BOOL bReturnedToPool = IMG_FALSE;
+	_SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext;
+
+	/* Acquire sync checkpoint pool lock */
+	OSLockAcquire(psContext->psContextCtl->hSyncCheckpointPoolLock);
+
+	/* Check if pool has space */
+	if (psContext->psContextCtl->bSyncCheckpointPoolValid &&
+			!psContext->psContextCtl->bSyncCheckpointPoolFull)
+	{
+		/* Put the sync checkpoint into the next write slot in the pool */
+		psContext->psContextCtl->psSyncCheckpointPool[psContext->psContextCtl->ui32SyncCheckpointPoolWp++] = psSyncCheckpoint;
+		if (psContext->psContextCtl->ui32SyncCheckpointPoolWp == SYNC_CHECKPOINT_POOL_SIZE)
+		{
+			psContext->psContextCtl->ui32SyncCheckpointPoolWp = 0;
+		}
+		psContext->psContextCtl->ui32SyncCheckpointPoolCount++;
+		psContext->psContextCtl->bSyncCheckpointPoolFull =
+				((psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0) &&
+						(psContext->psContextCtl->ui32SyncCheckpointPoolWp == psContext->psContextCtl->ui32SyncCheckpointPoolRp));
+		bReturnedToPool = IMG_TRUE;
+		psSyncCheckpoint->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_UNDEF;
+		psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_POOL;
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s checkpoint(ID:%d)->POOL(%d/%d), poolRp=%d, poolWp=%d",
+				__func__,
+				psSyncCheckpoint->ui32UID,
+				psContext->psContextCtl->ui32SyncCheckpointPoolCount,
+				SYNC_CHECKPOINT_POOL_SIZE, psContext->psContextCtl->ui32SyncCheckpointPoolRp, psContext->psContextCtl->ui32SyncCheckpointPoolWp));
+#endif
+	}
+	/* Release sync checkpoint pool lock */
+	OSLockRelease(psContext->psContextCtl->hSyncCheckpointPoolLock);
+
+	return bReturnedToPool;
+}
+
+static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = NULL;
+	IMG_UINT32 ui32ItemsFreed = 0;
+
+	/* Acquire sync checkpoint pool lock */
+	OSLockAcquire(psContext->psContextCtl->hSyncCheckpointPoolLock);
+
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+	PVR_DPF((PVR_DBG_WARNING,
+			"%s psContext=<%p>, bSyncCheckpointPoolValid=%d, uiSyncCheckpointPoolCount=%d",
+			__func__,
+			(void*)psContext,
+			psContext->psContextCtl->bSyncCheckpointPoolValid,
+			psContext->psContextCtl->ui32SyncCheckpointPoolCount));
+#endif
+	/* While the pool still contains sync checkpoints, free them */
+	while (psContext->psContextCtl->bSyncCheckpointPoolValid &&
+			(psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0))
+	{
+		/* Get the sync checkpoint from the next read slot in the pool */
+		psSyncCheckpointInt = psContext->psContextCtl->psSyncCheckpointPool[psContext->psContextCtl->ui32SyncCheckpointPoolRp++];
+		if (psContext->psContextCtl->ui32SyncCheckpointPoolRp == SYNC_CHECKPOINT_POOL_SIZE)
+		{
+			psContext->psContextCtl->ui32SyncCheckpointPoolRp = 0;
+		}
+		psContext->psContextCtl->ui32SyncCheckpointPoolCount--;
+		psContext->psContextCtl->bSyncCheckpointPoolFull =
+				((psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0) &&
+						(psContext->psContextCtl->ui32SyncCheckpointPoolWp == psContext->psContextCtl->ui32SyncCheckpointPoolRp));
+
+		if (psSyncCheckpointInt)
+		{
+			if (psSyncCheckpointInt->ui32ValidationCheck != SYNC_CHECKPOINT_PATTERN_IN_POOL)
+			{
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+				PVR_DPF((PVR_DBG_WARNING,
+						"%s pool contains invalid entry (ui32ValidationCheck=0x%x)",
+						__func__,
+						psSyncCheckpointInt->ui32ValidationCheck));
+#endif
+			}
+
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+			PVR_DPF((PVR_DBG_WARNING,
+					 "%s psSyncCheckpoint(ID:%d)",
+					 __func__, psSyncCheckpointInt->ui32UID));
+			PVR_DPF((PVR_DBG_WARNING,
+					 "%s psSyncCheckpoint->ui32ValidationCheck=0x%x",
+					 __func__, psSyncCheckpointInt->ui32ValidationCheck));
+			PVR_DPF((PVR_DBG_WARNING,
+					 "%s psSyncCheckpoint->uiSpanAddr=0x%llx",
+					 __func__, psSyncCheckpointInt->uiSpanAddr));
+			PVR_DPF((PVR_DBG_WARNING,
+					 "%s psSyncCheckpoint->psSyncCheckpointBlock=<%p>",
+					 __func__, (void*)psSyncCheckpointInt->psSyncCheckpointBlock));
+			PVR_DPF((PVR_DBG_WARNING,
+					 "%s psSyncCheckpoint->psSyncCheckpointBlock->psContext=<%p>",
+					 __func__, (void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext));
+			PVR_DPF((PVR_DBG_WARNING,
+					 "%s psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA=<%p>",
+					 __func__, (void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA));
+#endif
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+			PVR_DPF((PVR_DBG_WARNING,
+					"%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), "
+					"psSubAllocRA=<%p>, ui32SpanAddr=0x%llx",
+					__func__,
+					psSyncCheckpointInt->ui32UID,
+					(void*)psSyncCheckpointInt,
+					(void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA,
+					psSyncCheckpointInt->uiSpanAddr));
+#endif
+			_FreeSyncCheckpoint(psSyncCheckpointInt);
+			ui32ItemsFreed++;
+		}
+		else
+		{
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+			PVR_DPF((PVR_DBG_WARNING, "%s pool contains NULL entry", __func__));
+#endif
+		}
+	}
+	/* Release sync checkpoint pool lock */
+	OSLockRelease(psContext->psContextCtl->hSyncCheckpointPoolLock);
+
+	return ui32ItemsFreed;
+}
+#endif /* (SYNC_CHECKPOINT_POOL_SIZE > 0) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_checkpoint.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_checkpoint.h
new file mode 100644
index 0000000..9d963e9
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_checkpoint.h
@@ -0,0 +1,706 @@
+/*************************************************************************/ /*!
+@File
+@Title          Synchronisation checkpoint interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the client side interface for synchronisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_CHECKPOINT_
+#define _SYNC_CHECKPOINT_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_sync_km.h"
+#include "pdumpdefs.h"
+#include "pdump.h"
+#include "dllist.h"
+#include "pvr_debug.h"
+#include "device_connection.h"
+#include "opaque_types.h"
+
+#ifndef CHECKPOINT_TYPES
+#define CHECKPOINT_TYPES
+typedef struct _SYNC_CHECKPOINT_CONTEXT *PSYNC_CHECKPOINT_CONTEXT;
+
+typedef struct _SYNC_CHECKPOINT *PSYNC_CHECKPOINT;
+#endif
+
+/* definitions for functions to be implemented by OS-specific sync - the OS-specific sync code
+   will call SyncCheckpointRegisterFunctions() when initialised, in order to register functions
+   we can then call */
+#ifndef _CHECKPOINT_PFNS_
+#define _CHECKPOINT_PFNS_
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN)(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                                                             PVRSRV_FENCE fence,
+                                                             IMG_UINT32 *nr_checkpoints,
+                                                             PSYNC_CHECKPOINT **checkpoint_handles,
+                                                             IMG_UINT64 *pui64FenceUID);
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN)(const IMG_CHAR *fence_name,
+                                                            PVRSRV_TIMELINE timeline,
+                                                            PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                                                            PVRSRV_FENCE *new_fence,
+                                                            IMG_UINT64 *pui64FenceUID,
+                                                            void **ppvFenceFinaliseData,
+                                                            PSYNC_CHECKPOINT *new_checkpoint_handle,
+                                                            IMG_HANDLE *timeline_update_sync,
+                                                            IMG_UINT32 *timeline_update_value);
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN)(PVRSRV_FENCE fence_to_rollback, void *finalise_data);
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN)(PVRSRV_FENCE fence_to_finalise, void *finalise_data);
+typedef void (*PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN)(void *private_data);
+typedef void (*PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN)(void *mem_ptr);
+typedef IMG_UINT32 (*PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN)(IMG_UINT32 num_ufos, IMG_UINT32 *vaddrs);
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+typedef IMG_BOOL (*PFN_SYNC_CHECKPOINT_UFO_HAS_SIGNALLED_FN)(IMG_UINT32 ui32FwAddr, IMG_UINT32 ui32Value);
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_SIGNAL_WAITERS_FN)(void);
+typedef void (*PFN_SYNC_CHECKPOINT_CHECK_STATE_FN)(void);
+#endif
+
+#define SYNC_CHECKPOINT_IMPL_MAX_STRLEN 20
+
+typedef struct
+{
+	PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve;
+	PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate;
+	PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback;
+	PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise;
+	PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines;
+	PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem;
+	PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN pfnDumpInfoOnStalledUFOs;
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+	PFN_SYNC_CHECKPOINT_UFO_HAS_SIGNALLED_FN pfnCheckpointHasSignalled;
+	PFN_SYNC_CHECKPOINT_CHECK_STATE_FN pfnCheckState;
+	PFN_SYNC_CHECKPOINT_SIGNAL_WAITERS_FN pfnSignalWaiters;
+#endif
+	IMG_CHAR pszImplName[SYNC_CHECKPOINT_IMPL_MAX_STRLEN];
+} PFN_SYNC_CHECKPOINT_STRUCT;
+
+PVRSRV_ERROR SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_STRUCT *psSyncCheckpointPfns);
+
+#endif /* ifndef _CHECKPOINT_PFNS_ */
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointContextCreate
+
+@Description    Create a new synchronisation checkpoint context
+
+@Input          psDevNode                 Device node
+
+@Output         ppsSyncCheckpointContext  Handle to the created synchronisation
+                                          checkpoint context
+
+@Return         PVRSRV_OK if the synchronisation checkpoint context was
+                successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode,
+                            PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointContextDestroy
+
+@Description    Destroy a synchronisation checkpoint context
+
+@Input          psSyncCheckpointContext  Handle to the synchronisation
+                                         checkpoint context to destroy
+
+@Return         PVRSRV_OK if the synchronisation checkpoint context was
+                successfully destroyed.
+                PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT if the context still
+                has sync checkpoints defined
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointContextRef
+
+@Description    Takes a reference on a synchronisation checkpoint context
+
+@Input          psContext  Handle to the synchronisation checkpoint context
+                           on which a ref is to be taken
+
+@Return         None
+*/
+/*****************************************************************************/
+void SyncCheckpointContextRef(PSYNC_CHECKPOINT_CONTEXT psContext);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointContextUnref
+
+@Description    Drops a reference taken on a synchronisation checkpoint
+                context
+
+@Input          psContext  Handle to the synchronisation checkpoint context
+                           on which the ref is to be dropped
+
+@Return         None
+*/
+/*****************************************************************************/
+void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointAlloc
+
+@Description    Allocate a new synchronisation checkpoint on the specified
+                synchronisation checkpoint context
+
+@Input          hSyncCheckpointContext  Handle to the synchronisation
+                                        checkpoint context
+
+@Input          hTimeline               Timeline on which this sync
+                                        checkpoint is being created
+
+@Input          hFence                  Fence as passed into pfnFenceResolve
+                                        API, when the API encounters a non-PVR
+                                        fence as part of its input fence. From
+                                        all other places this argument must be
+                                        PVRSRV_NO_FENCE.
+
+@Input          pszClassName            Sync checkpoint source annotation
+                                        (will be truncated to at most
+                                         PVRSRV_SYNC_NAME_LENGTH chars)
+
+@Output         ppsSyncCheckpoint       Created synchronisation checkpoint
+
+@Return         PVRSRV_OK if the synchronisation checkpoint was
+                successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext,
+                    PVRSRV_TIMELINE hTimeline,
+                    PVRSRV_FENCE hFence,
+                    const IMG_CHAR *pszCheckpointName,
+                    PSYNC_CHECKPOINT *ppsSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointFree
+
+@Description    Free a synchronisation checkpoint
+                The reference count held for the synchronisation checkpoint
+                is decremented - if it has becomes zero, it is also freed.
+
+@Input          psSyncCheckpoint        The synchronisation checkpoint to free
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointSignal
+
+@Description    Signal the synchronisation checkpoint
+
+@Input          psSyncCheckpoint        The synchronisation checkpoint to signal
+
+@Input          ui32FenceSyncFlags      Flags used for controlling HWPerf behavior
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointSignalNoHW
+
+@Description    Signal the synchronisation checkpoint in NO_HARWARE build
+
+@Input          psSyncCheckpoint        The synchronisation checkpoint to signal
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointSignalNoHW(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointError
+
+@Description    Error the synchronisation checkpoint
+
+@Input          psSyncCheckpoint        The synchronisation checkpoint to error
+
+@Input          ui32FenceSyncFlags      Flags used for controlling HWPerf behavior
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointStateFromUFO
+
+@Description    Returns the current state of the synchronisation checkpoint
+                which has the given UFO firmware address
+
+@Input          psDevNode               The device owning the sync
+                                        checkpoint
+
+@Input          ui32FwAddr              The firmware address of the sync
+                                        checkpoint
+
+@Return         The current state (32-bit value) of the sync checkpoint
+*/
+/*****************************************************************************/
+IMG_UINT32 SyncCheckpointStateFromUFO(PPVRSRV_DEVICE_NODE psDevNode,
+                                IMG_UINT32 ui32FwAddr);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointErrorFromUFO
+
+@Description    Error the synchronisation checkpoint which has the
+                given UFO firmware address
+
+@Input          psDevNode               The device owning the sync
+                                        checkpoint to be errored
+
+@Input          ui32FwAddr              The firmware address of the sync
+                                        checkpoint to be errored
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointErrorFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointRollbackFromUFO
+
+@Description    Drop the enqueued count reference taken on the synchronisation
+                checkpoint on behalf of the firmware.
+                Called in the event of a DM Kick failing.
+
+@Input          psDevNode               The device owning the sync
+                                        checkpoint to be rolled back
+
+@Input          ui32FwAddr              The firmware address of the sync
+                                        checkpoint to be rolled back
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointRollbackFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr);
+
+#if defined(PVRSRV_SYNC_CHECKPOINT_CCB)
+/*************************************************************************/ /*!
+@Function       SyncCheckpointUFOHasSignalled
+
+@Description    Inform the sync backend that a specific checkpoint UFO has been
+                signalled by the firmware so that the host view of the object
+                can be updated.
+
+@Input          psDevNode               The device owning the sync
+                                        checkpoint that has been signalled.
+
+@Input          ui32FwAddr              The firmware address of the sync
+                                        checkpoint that has been signalled.
+
+@Input          ui32Value               The new value of the sync checkpoint.
+
+@Return         IMG_TRUE if a backing sync point has been found and updated,
+                IMG_FALSE otherwise.
+*/
+/*****************************************************************************/
+IMG_BOOL
+SyncCheckpointUFOHasSignalled(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr, IMG_UINT32 ui32Value);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointCheckState
+
+@Description    Perform a full state check to check the state of all sync
+                points currently alive.
+
+@Return         IMG_TRUE if a backing sync point has been found and updated,
+                IMG_FALSE otherwise.
+*/
+/*****************************************************************************/
+void
+SyncCheckpointCheckState(void);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointSignalWaiters
+
+@Description    Signal any clients waiting on syncs which have been updated.
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointSignalWaiters(void);
+#endif /* defined(PVRSRV_SYNC_CHECKPOINT_CCB) */
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointIsSignalled
+
+@Description    Returns IMG_TRUE if the synchronisation checkpoint is
+                signalled or errored
+
+@Input          psSyncCheckpoint        The synchronisation checkpoint to test
+
+@Input          ui32FenceSyncFlags      Flags used for controlling HWPerf behavior
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_BOOL
+SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointIsErrored
+
+@Description    Returns IMG_TRUE if the synchronisation checkpoint is
+                errored
+
+@Input          psSyncCheckpoint        The synchronisation checkpoint to test
+
+@Input          ui32FenceSyncFlags      Flags used for controlling HWPerf behavior
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_BOOL
+SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointTakeRef
+
+@Description    Take a reference on a synchronisation checkpoint
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to take a
+                                        reference on
+
+@Return         PVRSRV_OK if a reference was taken on the synchronisation
+                primitive
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointDropRef
+
+@Description    Drop a reference on a synchronisation checkpoint
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to drop a
+                                        reference on
+
+@Return         PVRSRV_OK if a reference was dropped on the synchronisation
+                primitive
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointResolveFence
+
+@Description    Resolve a fence, returning a list of the sync checkpoints
+                that fence contains.
+                This function in turn calls a function provided by the
+                OS native sync implementation.
+
+@Input          psSyncCheckpointContext The sync checkpoint context
+                                        on which checkpoints should be
+                                        created (in the event of the fence
+                                        having a native sync pt with no
+                                        associated sync checkpoint)
+
+@Input          hFence                  The fence to be resolved
+
+@Output         pui32NumSyncCheckpoints The number of sync checkpoints the
+                                        fence contains. Can return 0 if
+                                        passed a null (-1) fence.
+
+@Output         papsSyncCheckpoints     List of sync checkpoints the fence
+                                        contains
+
+@Output         puiFenceUID             Unique ID of the resolved fence
+
+@Return         PVRSRV_OK if a valid fence was provided.
+                PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+                sync has not registered a callback function.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointResolveFence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, PVRSRV_FENCE hFence, IMG_UINT32 *pui32NumSyncCheckpoints, PSYNC_CHECKPOINT **papsSyncCheckpoints, IMG_UINT64 *puiFenceUID);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointCreateFence
+
+@Description    Create a fence containing a single sync checkpoint.
+                Return the fence and a ptr to sync checkpoint it contains.
+                This function in turn calls a function provided by the
+                OS native sync implementation.
+
+@Input          pszFenceName            String to assign to the new fence
+                                        (for debugging purposes)
+
+@Input          hTimeline               Timeline on which the new fence is
+                                        to be created
+
+@Input          psSyncCheckpointContext Sync checkpoint context to be used
+                                        when creating the new fence
+
+@Output         phNewFence              The newly created fence
+
+@Output         pui64FenceUID           Unique ID of the created fence
+
+@Output         ppvFenceFinaliseData    Any data needed to finalise the fence
+                                        in a later call to the function
+                                        SyncCheckpointFinaliseFence()
+
+@Output         psNewSyncCheckpoint     The sync checkpoint contained in
+                                        the new fence
+
+@Return         PVRSRV_OK if a valid fence was provided.
+                PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+                sync has not registered a callback function.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointCreateFence(PPVRSRV_DEVICE_NODE psDeviceNode,
+                          const IMG_CHAR *pszFenceName,
+                          PVRSRV_TIMELINE hTimeline,
+                          PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                          PVRSRV_FENCE *phNewFence,
+                          IMG_UINT64 *pui64FenceUID,
+                          void **ppvFenceFinaliseData,
+                          PSYNC_CHECKPOINT *psNewSyncCheckpoint,
+                          void **ppvTimelineUpdateSyncPrim,
+                          IMG_UINT32 *pui32TimelineUpdateValue);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointRollbackFenceData
+
+@Description    'Rolls back' the fence specified (destroys the fence and
+                takes any other required actions to undo the fence
+                creation (eg if the implementation wishes to revert the
+                incrementing of the fence's timeline, etc).
+                This function in turn calls a function provided by the
+                OS native sync implementation.
+
+@Input          hFence                  Fence to be 'rolled back'
+
+@Input          pvFinaliseData          Data needed to finalise the
+                                        fence
+
+@Return         PVRSRV_OK if a valid fence was provided.
+                PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+                sync has not registered a callback function.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointRollbackFenceData(PVRSRV_FENCE hFence, void *pvFinaliseData);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointFinaliseFence
+
+@Description    'Finalise' the fence specified (performs any actions the
+                underlying implementation may need to perform just prior
+                to the fence being returned to the client.
+                This function in turn calls a function provided by the
+                OS native sync implementation - if the native sync
+                implementation does not need to perform any actions at
+                this time, this function does not need to be registered.
+
+@Input          psDevNode               Device node
+
+@Input          hFence                  Fence to be 'finalised'
+
+@Input          pvFinaliseData          Data needed to finalise the fence
+
+@Input          psSyncCheckpoint        Base sync checkpoint that this fence
+                                        is formed of
+
+@Input          pszName                 Fence annotation
+
+@Return         PVRSRV_OK if a valid fence and finalise data were provided.
+                PVRSRV_ERROR_INVALID_PARAMS if an invalid fence or finalise
+                data were provided.
+                PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+                sync has not registered a callback function (permitted).
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointFinaliseFence(PPVRSRV_DEVICE_NODE psDevNode,
+                            PVRSRV_FENCE hFence,
+                            void *pvFinaliseData,
+                            PSYNC_CHECKPOINT psSyncCheckpoint,
+                            const IMG_CHAR *pszName);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointFreeCheckpointListMem
+
+@Description    Free memory the memory which was allocated by the sync
+                implementation and used to return the list of sync
+                checkpoints when resolving a fence.
+                to the fence being returned to the client.
+                This function in turn calls a free function registered by
+                the sync implementation (if a function has been registered).
+
+@Input          pvCheckpointListMem     Pointer to the memory to be freed
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointFreeCheckpointListMem(void *pvCheckpointListMem);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointNoHWUpdateTimelines
+
+@Description    Called by the DDK in a NO_HARDWARE build only.
+                After syncs have been manually signalled by the DDK, this
+                function is called to allow the OS native sync implementation
+                to update its timelines (as the usual callback notification
+                of signalled checkpoints is not supported for NO_HARDWARE).
+                This function in turn calls a function provided by the
+                OS native sync implementation.
+
+@Input          pvPrivateData            Any data the OS native sync
+                                         implementation might require.
+
+@Return         PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+                sync has not registered a callback function, otherwise
+                PVRSRV_OK.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointNoHWUpdateTimelines(void *pvPrivateData);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointDumpInfoOnStalledUFOs
+
+@Description    Called by the DDK in the event of the health check watchdog
+                examining the CCBs and determining that one has failed to
+                progress after 10 second when the GPU is idle due to waiting
+                on one or more UFO fences.
+                The DDK will pass a list of UFOs on which the CCB is waiting
+                and the sync implementation will check them to see if any
+                relate to sync points it has created. If so, the
+                implementation should dump debug information on those sync
+                points to the kernel log or other suitable output (which will
+                allow the unsignalled syncs to be identified).
+                The function shall return the number of syncs in the provided
+                array that were syncs which it had created.
+
+@Input          ui32NumUFOs           The number of UFOs in the array passed
+                                      in the pui32VAddrs parameter.
+                pui32Vaddr            The array of UFOs the CCB is waiting on.
+
+@Output         pui32NumSyncOwnedUFOs The number of UFOs in pui32Vaddr which
+                                      relate to syncs created by the sync
+                                      implementation.
+
+@Return         PVRSRV_OK if a valid pointer is provided in pui32NumSyncOwnedUFOs.
+				PVRSRV_ERROR_INVALID_PARAMS if a NULL value is provided in
+                pui32NumSyncOwnedUFOs.
+                PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+                sync has not registered a callback function.
+
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointDumpInfoOnStalledUFOs(IMG_UINT32 ui32NumUFOs,
+                                    IMG_UINT32 *pui32Vaddrs,
+                                    IMG_UINT32 *pui32NumSyncOwnedUFOs);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetStateString
+
+@Description    Called to get a string representing the current state of a
+                sync checkpoint.
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get the
+                                        state for.
+
+@Return         The string representing the current state of this checkpoint
+*/
+/*****************************************************************************/
+const IMG_CHAR *
+SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointPDumpPol
+
+@Description    Called to insert a poll into the PDump script on a given
+                sync checkpoint being signalled or errored.
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint for
+                                        PDump to poll on
+
+@Input          ui32PDumpFlags          PDump flags
+
+@Return         PVRSRV_OK if a valid sync checkpoint was provided.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointPDumpPol(PSYNC_CHECKPOINT psSyncCheckpoint, PDUMP_FLAGS_T ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointRecordLookup
+
+@Description    Returns a debug string with information about the
+                sync checkpoint.
+
+@Input          psDevNode               The device owning the sync
+                                        checkpoint to lookup
+
+@Input          ui32FwAddr              The firmware address of the sync
+                                        checkpoint to lookup
+
+@Input          pszSyncInfo             Character array to write to
+
+@Input          len                     Len of the character array
+
+@Return         None
+*/
+/*****************************************************************************/
+void SyncCheckpointRecordLookup(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr,
+								IMG_CHAR * pszSyncInfo, size_t len);
+
+#endif	/* _SYNC_CHECKPOINT_ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_checkpoint_external.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_checkpoint_external.h
new file mode 100644
index 0000000..399e380
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_checkpoint_external.h
@@ -0,0 +1,83 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services external synchronisation checkpoint interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines synchronisation checkpoint structures that are visible
+				internally and externally
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SYNC_CHECKPOINT_EXTERNAL_H
+#define SYNC_CHECKPOINT_EXTERNAL_H
+
+#include "img_types.h"
+
+#ifndef CHECKPOINT_TYPES
+#define CHECKPOINT_TYPES
+typedef struct _SYNC_CHECKPOINT_CONTEXT *PSYNC_CHECKPOINT_CONTEXT;
+
+typedef struct _SYNC_CHECKPOINT *PSYNC_CHECKPOINT;
+#endif
+
+/* PVRSRV_SYNC_CHECKPOINT states.
+ * The OS native sync implementation should call pfnIsSignalled() to determine if a
+ * PVRSRV_SYNC_CHECKPOINT has signalled (which will return an IMG_BOOL), but can set the
+ * state for a PVRSRV_SYNC_CHECKPOINT (which is currently in the NOT_SIGNALLED state)
+ * where that PVRSRV_SYNC_CHECKPOINT is representing a foreign sync.
+ */
+typedef IMG_UINT32 PVRSRV_SYNC_CHECKPOINT_STATE;
+
+#define PVRSRV_SYNC_CHECKPOINT_UNDEF         0x000U
+#define PVRSRV_SYNC_CHECKPOINT_ACTIVE        0xac1U  /*!< checkpoint has not signalled */
+#define PVRSRV_SYNC_CHECKPOINT_SIGNALLED     0x519U  /*!< checkpoint has signalled */
+#define PVRSRV_SYNC_CHECKPOINT_ERRORED       0xeffU   /*!< checkpoint has been errored */
+
+
+#define PVRSRV_UFO_IS_SYNC_CHECKPOINT_FWADDR(fwaddr)	((fwaddr) & 0x1U)
+#define PVRSRV_UFO_IS_SYNC_CHECKPOINT(ufoptr)			(PVRSRV_UFO_IS_SYNC_CHECKPOINT_FWADDR((ufoptr)->puiAddrUFO.ui32Addr))
+
+/* Maximum number of sync checkpoints the firmware supports in one fence */
+#define MAX_SYNC_CHECKPOINTS_PER_FENCE 32U
+
+/*!
+ * Define to be used with SyncCheckpointAlloc() to indicate a checkpoint which
+ * represents a foreign sync point or collection of foreign sync points.
+ */
+#define SYNC_CHECKPOINT_FOREIGN_CHECKPOINT ((PVRSRV_TIMELINE) - 2U)
+
+#endif /* SYNC_CHECKPOINT_EXTERNAL_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_checkpoint_init.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_checkpoint_init.h
new file mode 100644
index 0000000..f5aa139
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_checkpoint_init.h
@@ -0,0 +1,82 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services synchronisation checkpoint initialisation interface
+                header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines synchronisation checkpoint structures that are visible
+                internally and externally
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_CHECKPOINT_INIT_
+#define _SYNC_CHECKPOINT_INIT_
+
+#include "device.h"
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointInit
+
+@Description    Initialise the sync checkpoint driver by giving it the
+                device node (needed to determine the pfnUFOAlloc function
+                to call in order to allocate sync block memory).
+
+@Input          psDevNode               Device for which sync checkpoints
+                                        are being initialised
+
+@Return         PVRSRV_OK               initialised successfully,
+                PVRSRV_ERROR_<error>    otherwise
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointInit(PVRSRV_DEVICE_NODE *psDevNode);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointDeinit
+
+@Description    Deinitialise the sync checkpoint driver.
+                Frees resources allocated during initialisation.
+
+@Input          psDevNode               Device for which sync checkpoints
+                                        are being de-initialised
+
+@Return         None
+*/
+/*****************************************************************************/
+void SyncCheckpointDeinit(PVRSRV_DEVICE_NODE *psDevNode);
+
+#endif /* _SYNC_CHECKPOINT_INIT_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_checkpoint_internal.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_checkpoint_internal.h
new file mode 100644
index 0000000..9d109c6
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_checkpoint_internal.h
@@ -0,0 +1,250 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services internal synchronisation checkpoint interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the internal server interface for services
+                synchronisation checkpoints.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __SYNC_CHECKPOINT__
+#define __SYNC_CHECKPOINT__
+
+#include "img_types.h"
+#include "opaque_types.h"
+#include "sync_checkpoint_internal_fw.h"
+#include "sync_checkpoint_external.h"
+#include "sync_checkpoint.h"
+#include "ra.h"
+#include "dllist.h"
+#include "lock.h"
+#include "devicemem.h"
+#include "rgx_fwif_shared.h"
+
+struct SYNC_CHECKPOINT_RECORD;
+
+/*
+	Private structures
+*/
+
+typedef struct _SYNC_CHECKPOINT_CONTEXT_CTL_ *_PSYNC_CHECKPOINT_CONTEXT_CTL;
+
+typedef struct _SYNC_CHECKPOINT_CONTEXT_
+{
+	PPVRSRV_DEVICE_NODE				psDevNode;
+	IMG_CHAR						azName[PVRSRV_SYNC_NAME_LENGTH];       /*!< Name of the RA */
+	RA_ARENA						*psSubAllocRA;                         /*!< RA context */
+	IMG_CHAR						azSpanName[PVRSRV_SYNC_NAME_LENGTH];   /*!< Name of the span RA */
+	RA_ARENA						*psSpanRA;                             /*!< RA used for span management of SubAllocRA */
+	ATOMIC_T						hRefCount;                             /*!< Ref count for this context */
+	ATOMIC_T						hCheckpointCount;                      /*!< Checkpoint count for this context */
+	POS_LOCK						hLock;
+	_PSYNC_CHECKPOINT_CONTEXT_CTL	psContextCtl;
+} _SYNC_CHECKPOINT_CONTEXT;
+
+typedef struct _SYNC_CHECKPOINT_BLOCK_
+{
+	ATOMIC_T                  hRefCount;                  /*!< Ref count for this sync block */
+	POS_LOCK                  hLock;
+	_SYNC_CHECKPOINT_CONTEXT  *psContext;                 /*!< Our copy of the services connection */
+	PPVRSRV_DEVICE_NODE       psDevNode;
+	IMG_UINT32                ui32SyncBlockSize;          /*!< Size of the sync checkpoint block */
+	IMG_UINT32                ui32FirmwareAddr;           /*!< Firmware address */
+	DEVMEM_MEMDESC            *hMemDesc;                  /*!< DevMem allocation for block */
+	volatile IMG_UINT32       *pui32LinAddr;              /*!< Server-code CPU mapping */
+	IMG_UINT64                uiSpanBase;                 /*!< Base of this import (FW DevMem) in the span RA */
+} SYNC_CHECKPOINT_BLOCK;
+
+typedef struct SYNC_CHECKPOINT_RECORD* PSYNC_CHECKPOINT_RECORD_HANDLE;
+
+typedef struct _SYNC_CHECKPOINT_
+{
+	//_SYNC_CHECKPOINT_CONTEXT      *psContext;             /*!< pointer to the parent context of this checkpoint */
+	/* A sync checkpoint is assigned a unique ID, to avoid any confusion should
+	 * the same memory be re-used later for a different checkpoint
+	 */
+	IMG_UINT32                      ui32UID;                /*!< Unique ID assigned to sync checkpoint (to distinguish checkpoints if memory is re-used)*/
+	POS_LOCK                        hLock;
+	ATOMIC_T                        hRefCount;              /*!< Ref count for this sync */
+	ATOMIC_T                        hEnqueuedCCBCount;      /*!< Num times sync has been put in CCBs */
+	SYNC_CHECKPOINT_BLOCK           *psSyncCheckpointBlock; /*!< Synchronisation block this checkpoint is allocated on */
+	IMG_UINT64                      uiSpanAddr;             /*!< Span address of the sync */
+	volatile SYNC_CHECKPOINT_FW_OBJ *psSyncCheckpointFwObj; /*!< CPU view of the data held in the sync block */
+	PRGXFWIF_UFO_ADDR               sCheckpointUFOAddr;     /*!< PRGXFWIF_UFO_ADDR struct used to pass update address to FW */
+	IMG_CHAR                        azName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the checkpoint */
+	PVRSRV_TIMELINE                 hTimeline;              /*!< Timeline on which this sync checkpoint was created */
+	IMG_UINT32                      ui32ValidationCheck;
+	IMG_PID                         uiProcess;              /*!< The Process ID of the process which created this sync checkpoint */
+	PSYNC_CHECKPOINT_RECORD_HANDLE  hRecord;                /*!< Sync record handle */
+	DLLIST_NODE                     sListNode;              /*!< List node for the global sync chkpt list */
+	DLLIST_NODE                     sDeferredFreeListNode;  /*!< List node for the deferred free sync chkpt list */
+	IMG_UINT32                      ui32FWAddr;             /*!< FWAddr stored at sync checkpoint alloc time */
+} _SYNC_CHECKPOINT;
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetFirmwareAddr
+
+@Description    .
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get
+                                        the firmware address of
+
+@Return         The firmware address of the sync checkpoint
+
+*/
+/*****************************************************************************/
+IMG_UINT32
+SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointCCBEnqueued
+
+@Description    Increment the CCB enqueued reference count for a
+                synchronisation checkpoint. This indicates how many FW
+                operations (checks/update) have been placed into CCBs for the
+                sync checkpoint.
+                When the FW services these operation, it increments its own
+                reference count. When these two values are equal, we know
+                there are not outstanding FW operating for the checkpoint
+                in any CCB.
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint for which
+                                        to increment the enqueued reference
+                                        count
+
+@Return         None
+
+*/
+/*****************************************************************************/
+void
+SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetEnqueuedCount
+
+@Description    .
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get
+                                        the enqueued count of
+
+@Return         The enqueued count of the sync checkpoint
+                (i.e. the number of FW operations (checks or updates)
+                 currently enqueued in CCBs for the sync checkpoint)
+
+*/
+/*****************************************************************************/
+IMG_UINT32
+SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetReferenceCount
+
+@Description    .
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get
+                                        the reference count of
+
+@Return         The host reference count of the sync checkpoint
+
+*/
+/*****************************************************************************/
+IMG_UINT32
+SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetCreator
+
+@Description    .
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get
+                                        the creating process of
+
+@Return         The process id of the process which created this sync checkpoint.
+
+*/
+/*****************************************************************************/
+IMG_PID
+SyncCheckpointGetCreator(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetId
+
+@Description    .
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get
+                                        the unique Id of
+
+@Return         The unique Id of the sync checkpoint
+
+*/
+/*****************************************************************************/
+IMG_UINT32
+SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetTimeline
+
+@Description    .
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get
+                                        the parent timeline of
+
+@Return         The parent timeline of the sync checkpoint
+
+*/
+/*****************************************************************************/
+PVRSRV_TIMELINE
+SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetRGXFWIFUFOAddr
+
+@Description    .
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get
+                                        the PRGXFWIF_UFO_ADDR of
+
+@Return         The PRGXFWIF_UFO_ADDR of the sync checkpoint, used when
+                providing the update in server kick code.
+
+*/
+/*****************************************************************************/
+PRGXFWIF_UFO_ADDR*
+SyncCheckpointGetRGXFWIFUFOAddr(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+#endif /* __SYNC_CHECKPOINT__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_checkpoint_internal_fw.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_checkpoint_internal_fw.h
new file mode 100644
index 0000000..49df0ca
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_checkpoint_internal_fw.h
@@ -0,0 +1,63 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services internal synchronisation checkpoint FW obj header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the internal FW object structure for services
+                synchronisation checkpoints.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SYNC_CHECKPOINT_INTERNAL_FW_H
+#define SYNC_CHECKPOINT_INTERNAL_FW_H
+
+#include "img_types.h"
+
+/* Sync_checkpoint firmware object.
+ * This is the FW-addressable structure use to hold the sync checkpoint's
+ * state and other information which needs to be accessed by the firmware.
+ */
+typedef struct
+{
+	IMG_UINT32	ui32State;          /*!< Holds the current state of the sync checkpoint */
+	IMG_UINT32	ui32FwRefCount;     /*!< Holds the FW reference count (num of fences/updates processed) */
+} SYNC_CHECKPOINT_FW_OBJ;
+
+/* Bit mask Firmware can use to test if a checkpoint has signalled or errored */
+#define SYNC_CHECKPOINT_SIGNALLED_MASK (0x1 << 0)
+
+#endif	/* SYNC_CHECKPOINT_INTERNAL_FW_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_fallback_server.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_fallback_server.h
new file mode 100644
index 0000000..67ae990
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_fallback_server.h
@@ -0,0 +1,198 @@
+/**************************************************************************/ /*!
+@File
+@Title          Fallback sync interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _SYNC_FALLBACK_SERVER_H_
+#define _SYNC_FALLBACK_SERVER_H_
+
+#include "img_types.h"
+#include "sync_checkpoint.h"
+#include "device.h"
+#include "connection_server.h"
+
+
+typedef struct _PVRSRV_TIMELINE_SERVER_ PVRSRV_TIMELINE_SERVER;
+typedef struct _PVRSRV_FENCE_SERVER_ PVRSRV_FENCE_SERVER;
+typedef struct _PVRSRV_FENCE_EXPORT_ PVRSRV_FENCE_EXPORT;
+
+typedef struct _PVRSRV_SYNC_PT_ PVRSRV_SYNC_PT;
+
+#define SYNC_FB_TIMELINE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH
+#define SYNC_FB_FENCE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH
+
+/*****************************************************************************/
+/*                                                                           */
+/*                         SW SPECIFIC FUNCTIONS                             */
+/*                                                                           */
+/*****************************************************************************/
+
+PVRSRV_ERROR SyncFbTimelineCreateSW(IMG_UINT32 uiTimelineNameSize,
+                                    const IMG_CHAR *pszTimelineName,
+                                    PVRSRV_TIMELINE_SERVER **ppsTimeline);
+
+PVRSRV_ERROR SyncFbFenceCreateSW(PVRSRV_TIMELINE_SERVER *psTimeline,
+                                 IMG_UINT32 uiFenceNameSize,
+                                 const IMG_CHAR *pszFenceName,
+                                 PVRSRV_FENCE_SERVER **ppsOutputFence,
+                                 IMG_UINT64 *pui64SyncPtIdx);
+PVRSRV_ERROR SyncFbSWTimelineFenceCreateKM(PVRSRV_TIMELINE iSWTimeline,
+                                           const IMG_CHAR *pszFenceName,
+                                           PVRSRV_FENCE *piOutputFence,
+                                           IMG_UINT64* pui64SyncPtIdx);
+
+PVRSRV_ERROR SyncFbTimelineAdvanceSW(PVRSRV_TIMELINE_SERVER *psTimeline,
+                                     IMG_UINT64 *pui64SyncPtIdx);
+PVRSRV_ERROR SyncFbSWTimelineAdvanceKM(void *pvSWTimelineObj,
+                                       IMG_UINT64* pui64SyncPtIdx);
+
+/*****************************************************************************/
+/*                                                                           */
+/*                         PVR SPECIFIC FUNCTIONS                            */
+/*                                                                           */
+/*****************************************************************************/
+
+PVRSRV_ERROR SyncFbTimelineCreatePVR(IMG_UINT32 uiTimelineNameSize,
+                                     const IMG_CHAR *pszTimelineName,
+                                     PVRSRV_TIMELINE_SERVER **ppsTimeline);
+
+PVRSRV_ERROR SyncFbFenceCreatePVR(const IMG_CHAR *pszName,
+                                  PVRSRV_TIMELINE iTl,
+                                  PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext,
+                                  PVRSRV_FENCE *piOutFence,
+                                  IMG_UINT64 *puiFenceUID,
+                                  void **ppvFenceFinaliseData,
+                                  PSYNC_CHECKPOINT *ppsOutCheckpoint,
+                                  void **ppvTimelineUpdateSync,
+                                  IMG_UINT32 *puiTimelineUpdateValue);
+
+PVRSRV_ERROR SyncFbFenceResolvePVR(PSYNC_CHECKPOINT_CONTEXT psContext,
+                                   PVRSRV_FENCE iFence,
+                                   IMG_UINT32 *puiNumCheckpoints,
+                                   PSYNC_CHECKPOINT **papsCheckpoints,
+                                   IMG_UINT64 *puiFenceUID);
+
+/*****************************************************************************/
+/*                                                                           */
+/*                         GENERIC FUNCTIONS                                 */
+/*                                                                           */
+/*****************************************************************************/
+
+PVRSRV_ERROR SyncFbGetFenceObj(PVRSRV_FENCE iFence,
+                               void **ppvFenceObj);
+
+PVRSRV_ERROR SyncFbSWGetTimelineObj(PVRSRV_TIMELINE iSWTimeline,
+                                    void **ppvSWTimelineObj);
+
+PVRSRV_ERROR SyncFbTimelineRelease(PVRSRV_TIMELINE_SERVER *psTl);
+
+PVRSRV_ERROR SyncFbFenceRelease(PVRSRV_FENCE_SERVER *psFence);
+PVRSRV_ERROR SyncFbFenceReleaseKM(void *pvFenceObj);
+
+PVRSRV_ERROR SyncFbFenceDup(PVRSRV_FENCE_SERVER *psInFence,
+                            PVRSRV_FENCE_SERVER **ppsOutFence);
+
+PVRSRV_ERROR SyncFbFenceMerge(PVRSRV_FENCE_SERVER *psInFence1,
+                              PVRSRV_FENCE_SERVER *psInFence2,
+                              IMG_UINT32 uiFenceNameSize,
+                              const IMG_CHAR *pszFenceName,
+                              PVRSRV_FENCE_SERVER **ppsOutFence);
+
+PVRSRV_ERROR SyncFbFenceWait(PVRSRV_FENCE_SERVER *psFence,
+                             IMG_UINT32 uiTimeout);
+
+PVRSRV_ERROR SyncFbFenceDump(PVRSRV_FENCE_SERVER *psFence,
+                             IMG_UINT32 uiLine,
+                             IMG_UINT32 uiFileNameLength,
+                             const IMG_CHAR *pszFile,
+                             IMG_UINT32 uiModuleLength,
+                             const IMG_CHAR *pszModule,
+                             IMG_UINT32 uiDescLength,
+                             const IMG_CHAR *pszDesc);
+
+PVRSRV_ERROR SyncFbDumpFenceKM(void *pvSWFenceObj,
+	                           DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+	                           void *pvDumpDebugFile);
+
+PVRSRV_ERROR SyncFbSWDumpTimelineKM(void *pvSWTimelineObj,
+                                    DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                    void *pvDumpDebugFile);
+
+PVRSRV_ERROR SyncFbRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR SyncFbDeregisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_UINT32 SyncFbDumpInfoOnStalledUFOs(IMG_UINT32 nr_ufos, IMG_UINT32 *vaddrs);
+
+IMG_BOOL SyncFbCheckpointHasSignalled(IMG_UINT32 ui32FwAddr, IMG_UINT32 ui32Value);
+
+/*****************************************************************************/
+/*                                                                           */
+/*                       IMPORT/EXPORT FUNCTIONS                             */
+/*                                                                           */
+/*****************************************************************************/
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR SyncFbFenceExportInsecure(PVRSRV_FENCE_SERVER *psFence,
+                                       PVRSRV_FENCE_EXPORT **ppExport);
+
+PVRSRV_ERROR SyncFbFenceExportDestroyInsecure(PVRSRV_FENCE_EXPORT *psExport);
+
+PVRSRV_ERROR SyncFbFenceImportInsecure(CONNECTION_DATA *psConnection,
+                                       PVRSRV_DEVICE_NODE *psDevice,
+                                       PVRSRV_FENCE_EXPORT *psImport,
+                                       PVRSRV_FENCE_SERVER **psFence);
+#endif /* defined(SUPPORT_INSECURE_EXPORT) */
+
+PVRSRV_ERROR SyncFbFenceExportSecure(CONNECTION_DATA *psConnection,
+                                     PVRSRV_DEVICE_NODE * psDevNode,
+                                     PVRSRV_FENCE_SERVER *psFence,
+                                     IMG_SECURE_TYPE *phSecure,
+                                     PVRSRV_FENCE_EXPORT **ppsExport,
+                                     CONNECTION_DATA **ppsSecureConnection);
+
+PVRSRV_ERROR SyncFbFenceExportDestroySecure(PVRSRV_FENCE_EXPORT *psExport);
+
+PVRSRV_ERROR SyncFbFenceImportSecure(CONNECTION_DATA *psConnection,
+                                     PVRSRV_DEVICE_NODE *psDevice,
+                                     IMG_SECURE_TYPE hSecure,
+                                     PVRSRV_FENCE_SERVER **psFence);
+
+#endif /* _SYNC_FALLBACK_SERVER_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_internal.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_internal.h
new file mode 100644
index 0000000..060ef50
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_internal.h
@@ -0,0 +1,132 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services internal synchronisation interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the internal client side interface for services
+                synchronisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_INTERNAL_
+#define _SYNC_INTERNAL_
+
+#include "img_types.h"
+#include "img_defs.h"
+#include <powervr/sync_external.h>
+#include "ra.h"
+#include "dllist.h"
+#include "lock.h"
+#include "devicemem.h"
+
+
+#define LOCAL_SYNC_PRIM_RESET_VALUE 0
+#define LOCAL_SYNC_PRIM_POISON_VALUE 0xa5a5a5a5u
+
+/*
+	Private structure's
+*/
+#define SYNC_PRIM_NAME_SIZE		50
+typedef struct SYNC_PRIM_CONTEXT
+{
+	SHARED_DEV_CONNECTION       hDevConnection;
+	IMG_CHAR					azName[SYNC_PRIM_NAME_SIZE];	/*!< Name of the RA */
+	RA_ARENA					*psSubAllocRA;					/*!< RA context */
+	IMG_CHAR					azSpanName[SYNC_PRIM_NAME_SIZE];/*!< Name of the span RA */
+	RA_ARENA					*psSpanRA;						/*!< RA used for span management of SubAllocRA */
+	ATOMIC_T				hRefCount;	/*!< Ref count for this context */
+} SYNC_PRIM_CONTEXT;
+
+typedef struct _SYNC_PRIM_BLOCK_
+{
+	SYNC_PRIM_CONTEXT	*psContext;				/*!< Our copy of the services connection */
+	IMG_HANDLE			hServerSyncPrimBlock;	/*!< Server handle for this block */
+	IMG_UINT32			ui32SyncBlockSize;		/*!< Size of the sync prim block */
+	IMG_UINT32			ui32FirmwareAddr;		/*!< Firmware address */
+	DEVMEM_MEMDESC		*hMemDesc;				/*!< Host mapping handle */
+	IMG_UINT32 __iomem	*pui32LinAddr;			/*!< User CPU mapping */
+	IMG_UINT64			uiSpanBase;				/*!< Base of this import in the span RA */
+	DLLIST_NODE			sListNode;				/*!< List node for the sync block list */
+} SYNC_PRIM_BLOCK;
+
+typedef enum _SYNC_PRIM_TYPE_
+{
+	SYNC_PRIM_TYPE_UNKNOWN = 0,
+	SYNC_PRIM_TYPE_LOCAL,
+	SYNC_PRIM_TYPE_SERVER,
+} SYNC_PRIM_TYPE;
+
+typedef struct _SYNC_PRIM_LOCAL_
+{
+	ATOMIC_T				hRefCount;	/*!< Ref count for this sync */
+	SYNC_PRIM_BLOCK			*psSyncBlock;	/*!< Synchronisation block this primitive is allocated on */
+	IMG_UINT64				uiSpanAddr;		/*!< Span address of the sync */
+	IMG_HANDLE				hRecord;		/*!< Sync record handle */
+} SYNC_PRIM_LOCAL;
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+typedef struct _SYNC_PRIM_SERVER_
+{
+	SYNC_BRIDGE_HANDLE		hBridge;			/*!< Bridge handle */
+	IMG_HANDLE				hServerSync;		/*!< Handle to the server sync */
+	IMG_UINT32				ui32FirmwareAddr;	/*!< Firmware address of the sync */
+} SYNC_PRIM_SERVER;
+#endif
+
+typedef struct _SYNC_PRIM_
+{
+	PVRSRV_CLIENT_SYNC_PRIM	sCommon;		/*!< Client visible part of the sync prim */
+	SYNC_PRIM_TYPE			eType;			/*!< Sync primitive type */
+	union {
+		SYNC_PRIM_LOCAL		sLocal;			/*!< Local sync primitive data */
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+		SYNC_PRIM_SERVER	sServer;		/*!< Server sync primitive data */
+#endif
+	} u;
+} SYNC_PRIM;
+
+
+/* FIXME this must return a correctly typed pointer */
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 *pui32FwAddr);
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimLocalGetHandleAndOffset(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+							IMG_HANDLE *phBlock,
+							IMG_UINT32 *pui32Offset);
+
+
+#endif	/* _SYNC_INTERNAL_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_server.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_server.c
new file mode 100644
index 0000000..4084dc3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_server.c
@@ -0,0 +1,2625 @@
+/*************************************************************************/ /*!
+@File           sync_server.c
+@Title          Server side synchronisation functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side functions that for synchronisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "img_types.h"
+#include "img_defs.h"
+#include "sync_server.h"
+#include "allocmem.h"
+#include "device.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pdump.h"
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "pdump_km.h"
+#include "sync.h"
+#include "sync_internal.h"
+#include "connection_server.h"
+#include "htbuffer.h"
+#include "rgxhwperf.h"
+#include "info_page.h"
+
+#include "sync_checkpoint_internal.h"
+#include "sync_checkpoint.h"
+
+/* Include this to obtain MAX_SYNC_CHECKPOINTS_PER_FENCE */
+#include "sync_checkpoint_external.h"
+
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "ossecure_export.h"
+#endif
+
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+#include "rgxdebug.h"
+#endif
+
+/* Set this to enable debug relating to the construction and maintenance of the sync address list */
+#define SYNC_ADDR_LIST_DEBUG 0
+
+/* Set maximum number of FWAddrs that can be accommodated in a SYNC_ADDR_LIST.
+ * This should allow for PVRSRV_MAX_SYNC_PRIMS sync prims plus
+ * MAX_SYNC_CHECKPOINTS_PER_FENCE sync checkpoints plus one further sync prim
+ * to accommodate the additional sync prim update returned by Native
+ * sync implementation (used for timeline debug).
+ */
+#define PVRSRV_MAX_SYNC_ADDR_LIST_SIZE (PVRSRV_MAX_SYNC_PRIMS+MAX_SYNC_CHECKPOINTS_PER_FENCE+1)
+
+/* Max number of syncs allowed in a sync prim op */
+#define SYNC_PRIM_OP_MAX_SYNCS 1024
+
+struct _SYNC_PRIMITIVE_BLOCK_
+{
+	PVRSRV_DEVICE_NODE	*psDevNode;
+	DEVMEM_MEMDESC		*psMemDesc;
+	IMG_UINT32			*pui32LinAddr;
+	IMG_UINT32			ui32BlockSize;		/*!< Size of the Sync Primitive Block */
+	ATOMIC_T			sRefCount;
+	DLLIST_NODE			sConnectionNode;
+	SYNC_CONNECTION_DATA *psSyncConnectionData;	/*!< Link back to the sync connection data if there is one */
+	PRGXFWIF_UFO_ADDR		uiFWAddr;	/*!< The firmware address of the sync prim block */
+};
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+struct _SERVER_SYNC_PRIMITIVE_
+{
+	PVRSRV_DEVICE_NODE                      *psDevNode;
+	PVRSRV_CLIENT_SYNC_PRIM                 *psSync;
+	IMG_UINT32                              ui32NextOp;
+	ATOMIC_T                                sRefCount;
+	IMG_UINT32                              ui32UID;
+	IMG_UINT32                              ui32LastSyncRequesterID;
+	DLLIST_NODE                             sSyncServerListNode;
+	/* PDump only data */
+	IMG_BOOL                                bSWOperation;
+	IMG_BOOL                                bSWOpStartedInCaptRange;
+	IMG_UINT32                              ui32LastHWUpdate;
+	IMG_UINT32                              ui32LastPdumpedBlock;       /* This holds pdump-block number where sync primitive is pdumped last time */
+	IMG_BOOL                                bFirstOperationInBlock;     /* Is current operation taken on this sync is first in a current pdump-block? */
+	IMG_BOOL                                bPDumped;
+	POS_LOCK                                hLock;                      /*!< used to make ServerSyncQueue*Op calls atomic */
+	IMG_CHAR                                szClassName[SYNC_MAX_CLASS_NAME_LEN];
+};
+
+struct _SERVER_SYNC_EXPORT_
+{
+	SERVER_SYNC_PRIMITIVE *psSync;
+};
+
+struct _SERVER_OP_COOKIE_
+{
+	IMG_BOOL				bActive;
+	/*
+		Client syncblock(s) info.
+		If this changes update the calculation of ui32BlockAllocSize
+	*/
+	IMG_UINT32				ui32SyncBlockCount;
+	SYNC_PRIMITIVE_BLOCK	**papsSyncPrimBlock;
+
+	/*
+		Client sync(s) info.
+		If this changes update the calculation of ui32ClientAllocSize
+	*/
+	IMG_UINT32				ui32ClientSyncCount;
+	IMG_UINT32				*paui32SyncBlockIndex;
+	IMG_UINT32				*paui32Index;
+	IMG_UINT32				*paui32Flags;
+	IMG_UINT32				*paui32FenceValue;
+	IMG_UINT32				*paui32UpdateValue;
+
+	/*
+		Server sync(s) info
+		If this changes update the calculation of ui32ServerAllocSize
+	*/
+	IMG_UINT32				ui32ServerSyncCount;
+	SERVER_SYNC_PRIMITIVE	**papsServerSync;
+	IMG_UINT32				*paui32ServerFenceValue;
+	IMG_UINT32				*paui32ServerUpdateValue;
+
+};
+#endif
+
+struct _SYNC_CONNECTION_DATA_
+{
+	DLLIST_NODE	sListHead;  /*!< list of sync block associated with / created against this connection */
+	ATOMIC_T	sRefCount;  /*!< number of references to this object */
+	POS_LOCK	hLock;      /*!< lock protecting the list of sync blocks */
+};
+
+#define DECREMENT_WITH_WRAP(value, sz) ((value) ? ((value) - 1) : ((sz) - 1))
+
+/* this is the max number of syncs we will search or dump
+ * at any time.
+ */
+#define SYNC_RECORD_LIMIT 20000
+
+enum SYNC_RECORD_TYPE
+{
+	SYNC_RECORD_TYPE_UNKNOWN = 0,
+	SYNC_RECORD_TYPE_CLIENT,
+	SYNC_RECORD_TYPE_SERVER,
+};
+
+struct SYNC_RECORD
+{
+	PVRSRV_DEVICE_NODE		*psDevNode;
+	SYNC_PRIMITIVE_BLOCK	*psServerSyncPrimBlock;	/*!< handle to _SYNC_PRIMITIVE_BLOCK_ */
+	IMG_UINT32				ui32SyncOffset; 		/*!< offset to sync in block */
+	IMG_UINT32				ui32FwBlockAddr;
+	IMG_PID					uiPID;
+	IMG_UINT64				ui64OSTime;
+	enum SYNC_RECORD_TYPE	eRecordType;
+	DLLIST_NODE				sNode;
+	IMG_CHAR				szClassName[SYNC_MAX_CLASS_NAME_LEN];
+};
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+
+#define SYNC_REQUESTOR_UNKNOWN 0
+static IMG_UINT32 g_ServerSyncUID;
+
+static IMG_UINT32 g_ui32NextSyncRequestorID = 1;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+static POS_LOCK ghServerSyncLock;
+#endif
+#endif
+
+#if defined(SYNC_DEBUG) || defined(REFCOUNT_DEBUG)
+#define SYNC_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define SYNC_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+#if defined(SYNC_DEBUG)
+#define SYNC_UPDATES_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define SYNC_UPDATES_PRINT(fmt, ...)
+#endif
+
+/*!
+*****************************************************************************
+ @Function      : SyncPrimitiveBlockToFWAddr
+
+ @Description   : Given a pointer to a sync primitive block and an offset,
+                  returns the firmware address of the sync.
+
+ @Input           psSyncPrimBlock : Sync primitive block which contains the sync
+ @Input           ui32Offset      : Offset of sync within the sync primitive block
+ @Output          psAddrOut       : Absolute FW address of the sync is written out through
+                                    this pointer
+ @Return :        PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+                  parameters are invalid.
+*****************************************************************************/
+
+PVRSRV_ERROR
+SyncPrimitiveBlockToFWAddr(SYNC_PRIMITIVE_BLOCK *psSyncPrimBlock,
+							IMG_UINT32 ui32Offset,
+						PRGXFWIF_UFO_ADDR *psAddrOut)
+{
+	/* check offset is legal */
+	if (unlikely((ui32Offset >= psSyncPrimBlock->ui32BlockSize) ||
+		(ui32Offset % sizeof(IMG_UINT32))))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncPrimitiveBlockToFWAddr: parameters check failed"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psAddrOut->ui32Addr = psSyncPrimBlock->uiFWAddr.ui32Addr + ui32Offset;
+	return PVRSRV_OK;
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListGrow
+
+ @Description   : Grow the SYNC_ADDR_LIST so it can accommodate the given
+                  number of syncs, up to a maximum of PVRSRV_MAX_SYNC_PRIMS.
+
+ @Input           psList       : The SYNC_ADDR_LIST to grow
+ @Input           ui32NumSyncs : The number of sync addresses to be able to hold
+ @Return :        PVRSRV_OK on success
+*****************************************************************************/
+
+static PVRSRV_ERROR SyncAddrListGrow(SYNC_ADDR_LIST *psList, IMG_UINT32 ui32NumSyncs)
+{
+	if (unlikely(ui32NumSyncs > PVRSRV_MAX_SYNC_ADDR_LIST_SIZE))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: ui32NumSyncs=%u > PVRSRV_MAX_SYNC_ADDR_LIST_SIZE=%u", __func__, ui32NumSyncs, PVRSRV_MAX_SYNC_ADDR_LIST_SIZE));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s:     Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs));
+#endif
+	if (ui32NumSyncs > psList->ui32NumSyncs)
+	{
+		if (psList->pasFWAddrs == NULL)
+		{
+			psList->pasFWAddrs = OSAllocMem(sizeof(PRGXFWIF_UFO_ADDR) * PVRSRV_MAX_SYNC_ADDR_LIST_SIZE);
+			if (unlikely(psList->pasFWAddrs == NULL))
+			{
+				return PVRSRV_ERROR_OUT_OF_MEMORY;
+			}
+		}
+
+		psList->ui32NumSyncs = ui32NumSyncs;
+	}
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s:     Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs));
+#endif
+	return PVRSRV_OK;
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListInit
+
+ @Description   : Initialise a SYNC_ADDR_LIST structure ready for use
+
+ @Input           psList        : The SYNC_ADDR_LIST structure to initialise
+ @Return        : None
+*****************************************************************************/
+
+void
+SyncAddrListInit(SYNC_ADDR_LIST *psList)
+{
+	psList->ui32NumSyncs = 0;
+	psList->pasFWAddrs   = NULL;
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListDeinit
+
+ @Description   : Frees any resources associated with the given SYNC_ADDR_LIST
+
+ @Input           psList        : The SYNC_ADDR_LIST structure to deinitialise
+ @Return        : None
+*****************************************************************************/
+
+void
+SyncAddrListDeinit(SYNC_ADDR_LIST *psList)
+{
+	if (psList->pasFWAddrs != NULL)
+	{
+		OSFreeMem(psList->pasFWAddrs);
+	}
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListPopulate
+
+ @Description   : Populate the given SYNC_ADDR_LIST with the FW addresses
+                  of the syncs given by the SYNC_PRIMITIVE_BLOCKs and sync offsets
+
+ @Input           ui32NumSyncs    : The number of syncs being passed in
+ @Input           apsSyncPrimBlock: Array of pointers to SYNC_PRIMITIVE_BLOCK structures
+                                    in which the syncs are based
+ @Input           paui32SyncOffset: Array of offsets within each of the sync primitive blocks
+                                    where the syncs are located
+ @Return :        PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+                  parameters are invalid.
+*****************************************************************************/
+
+PVRSRV_ERROR
+SyncAddrListPopulate(SYNC_ADDR_LIST *psList,
+						IMG_UINT32 ui32NumSyncs,
+						SYNC_PRIMITIVE_BLOCK **apsSyncPrimBlock,
+						IMG_UINT32 *paui32SyncOffset)
+{
+	IMG_UINT32 i;
+	PVRSRV_ERROR eError;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs));
+#endif
+	if (ui32NumSyncs > psList->ui32NumSyncs)
+	{
+		eError = SyncAddrListGrow(psList, ui32NumSyncs);
+
+		if (unlikely(eError != PVRSRV_OK))
+		{
+			return eError;
+		}
+	}
+
+	psList->ui32NumSyncs = ui32NumSyncs;
+
+	for (i = 0; i < ui32NumSyncs; i++)
+	{
+		eError = SyncPrimitiveBlockToFWAddr(apsSyncPrimBlock[i],
+								paui32SyncOffset[i],
+								&psList->pasFWAddrs[i]);
+
+		if (unlikely(eError != PVRSRV_OK))
+		{
+			return eError;
+		}
+	}
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs));
+#endif
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+SyncAddrListAppendSyncPrim(SYNC_ADDR_LIST          *psList,
+						   PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 ui32FwAddr = 0;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs));
+#endif
+	/* Ensure there's room in psList for the additional sync prim update */
+	eError = SyncAddrListGrow(psList, psList->ui32NumSyncs + 1);
+	if (unlikely(eError != PVRSRV_OK))
+	{
+		goto e0;
+	}
+
+	SyncPrimGetFirmwareAddr(psSyncPrim, &ui32FwAddr);
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s: Appending sync prim <%p> UFO addr (0x%x) to psList[->pasFWAddrss[%d]", __func__, (void*)psSyncPrim, ui32FwAddr, psList->ui32NumSyncs-1));
+#endif
+	psList->pasFWAddrs[psList->ui32NumSyncs-1].ui32Addr = ui32FwAddr;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	{
+		IMG_UINT32 iii;
+
+		PVR_DPF((PVR_DBG_ERROR, "%s: psList->ui32NumSyncs=%d", __func__, psList->ui32NumSyncs));
+		for (iii=0; iii<psList->ui32NumSyncs; iii++)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: psList->pasFWAddrs[%d].ui32Addr=0x%x", __func__, iii, psList->pasFWAddrs[iii].ui32Addr));
+		}
+	}
+#endif
+e0:
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d", __func__, (void*)psList, psList->ui32NumSyncs));
+#endif
+	return eError;
+}
+
+
+static PVRSRV_ERROR
+_AppendCheckpoints(SYNC_ADDR_LIST *psList,
+				   IMG_UINT32 ui32NumCheckpoints,
+				   PSYNC_CHECKPOINT *apsSyncCheckpoint,
+				   IMG_BOOL bDeRefCheckpoints)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 ui32SyncCheckpointIndex;
+	IMG_UINT32 ui32RollbackSize = psList->ui32NumSyncs;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints));
+#endif
+	/* Ensure there's room in psList for the sync checkpoints */
+	eError = SyncAddrListGrow(psList, psList->ui32NumSyncs + ui32NumCheckpoints);
+	if (unlikely(eError != PVRSRV_OK))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: * * * * ERROR * * * * Trying to SyncAddrListGrow(psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints));
+		goto e0;
+	}
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s: (ui32NumCheckpoints=%d) (psList->ui32NumSyncs is now %d) array already contains %d FWAddrs:", __func__, ui32NumCheckpoints, psList->ui32NumSyncs, ui32RollbackSize));
+	if (ui32RollbackSize > 0)
+	{
+		{
+			IMG_UINT32 kk;
+			for (kk=0; kk<ui32RollbackSize; kk++)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s:    <%p>psList->pasFWAddrs[%d].ui32Addr = %u(0x%x)", __func__,
+						 (void*)&psList->pasFWAddrs[kk], kk,
+						 psList->pasFWAddrs[kk].ui32Addr, psList->pasFWAddrs[kk].ui32Addr));
+			}
+		}
+	}
+	PVR_DPF((PVR_DBG_ERROR, "%s: apsSyncCheckpoint=<%p>, apsSyncCheckpoint[0] = <%p>", __func__, (void*)apsSyncCheckpoint, (void*)apsSyncCheckpoint[0]));
+#endif
+	for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex<ui32NumCheckpoints; ui32SyncCheckpointIndex++)
+	{
+		psList->pasFWAddrs[ui32RollbackSize + ui32SyncCheckpointIndex].ui32Addr = SyncCheckpointGetFirmwareAddr(apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+		PVR_DPF((PVR_DBG_ERROR, "%s:  SyncCheckpointCCBEnqueued(<%p>)", __func__, (void*)apsSyncCheckpoint[ui32SyncCheckpointIndex]));
+		PVR_DPF((PVR_DBG_ERROR, "%s:                           ID:%d", __func__, SyncCheckpointGetId((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex])));
+#endif
+		SyncCheckpointCCBEnqueued((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+		if (bDeRefCheckpoints)
+		{
+			/* Drop the reference that was taken internally by the OS implementation of resolve_fence() */
+			SyncCheckpointDropRef((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+		}
+	}
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	if (psList->ui32NumSyncs > 0)
+	{
+		IMG_UINT32 kk;
+		for (kk=0; kk<psList->ui32NumSyncs; kk++)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s:    <%p>psList->pasFWAddrs[%d].ui32Addr = %u(0x%x)", __func__,
+			         (void*)&psList->pasFWAddrs[kk], kk,
+			         psList->pasFWAddrs[kk].ui32Addr, psList->pasFWAddrs[kk].ui32Addr));
+		}
+	}
+#endif
+	return eError;
+
+e0:
+	for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex<ui32NumCheckpoints; ui32SyncCheckpointIndex++)
+	{
+		if (bDeRefCheckpoints)
+		{
+			/* Drop the reference that was taken internally by the OS implementation of resolve_fence() */
+			SyncCheckpointDropRef((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+		}
+	}
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints));
+#endif
+	return eError;
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListAppendCheckpoints
+
+ @Description   : Append the FW addresses of the sync checkpoints given in
+                  the PSYNC_CHECKPOINTs array to the given SYNC_ADDR_LIST
+
+ @Input           ui32NumSyncCheckpoints : The number of sync checkpoints
+                                           being passed in
+ @Input           apsSyncCheckpoint : Array of PSYNC_CHECKPOINTs whose details
+                                      are to be appended to the SYNC_ADDR_LIST
+ @Return :        PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+                  parameters are invalid.
+*****************************************************************************/
+PVRSRV_ERROR
+SyncAddrListAppendCheckpoints(SYNC_ADDR_LIST *psList,
+								IMG_UINT32 ui32NumCheckpoints,
+								PSYNC_CHECKPOINT *apsSyncCheckpoint)
+{
+	return _AppendCheckpoints(psList, ui32NumCheckpoints, apsSyncCheckpoint, IMG_FALSE);
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListAppendAndDeRefCheckpoints
+
+ @Description   : Append the FW addresses of the sync checkpoints given in
+                  the PSYNC_CHECKPOINTs array to the given SYNC_ADDR_LIST.
+                  A reference is dropped for each of the checkpoints.
+
+ @Input           ui32NumSyncCheckpoints : The number of sync checkpoints
+                                           being passed in
+ @Input           apsSyncCheckpoint : Array of PSYNC_CHECKPOINTs whose details
+                                      are to be appended to the SYNC_ADDR_LIST
+ @Return :        PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+                  parameters are invalid.
+*****************************************************************************/
+PVRSRV_ERROR
+SyncAddrListAppendAndDeRefCheckpoints(SYNC_ADDR_LIST *psList,
+									  IMG_UINT32 ui32NumCheckpoints,
+									  PSYNC_CHECKPOINT *apsSyncCheckpoint)
+{
+	return _AppendCheckpoints(psList, ui32NumCheckpoints, apsSyncCheckpoint, IMG_TRUE);
+}
+
+void
+SyncAddrListDeRefCheckpoints(IMG_UINT32 ui32NumCheckpoints,
+							 PSYNC_CHECKPOINT *apsSyncCheckpoint)
+{
+	IMG_UINT32 ui32SyncCheckpointIndex;
+
+	for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex<ui32NumCheckpoints; ui32SyncCheckpointIndex++)
+	{
+		/* Drop the reference that was taken internally by the OS implementation of resolve_fence() */
+		SyncCheckpointDropRef((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+	}
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListRollbackCheckpoints
+
+ @Description   : Rollback the enqueued count of each sync checkpoint in
+                  the given SYNC_ADDR_LIST. This needs to be done in the
+                  event of the kick call failing, so that the reference
+                  taken on each sync checkpoint on the firmware's behalf
+                  is dropped.
+
+ @Input           psList        : The SYNC_ADDR_LIST structure containing
+                                  sync checkpoints to be rolled back
+
+ @Return :        PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+                  parameters are invalid.
+*****************************************************************************/
+
+PVRSRV_ERROR
+SyncAddrListRollbackCheckpoints(PVRSRV_DEVICE_NODE *psDevNode, SYNC_ADDR_LIST *psList)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 ui32SyncIndex;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s: called (psList=<%p>)", __func__, (void*)psList));
+#endif
+	if (psList)
+	{
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+		PVR_DPF((PVR_DBG_ERROR, "%s: psList->ui32NumSyncs=%d", __func__, psList->ui32NumSyncs));
+#endif
+		for (ui32SyncIndex=0; ui32SyncIndex<psList->ui32NumSyncs; ui32SyncIndex++)
+		{
+			if (psList->pasFWAddrs[ui32SyncIndex].ui32Addr & 0x1)
+			{
+				SyncCheckpointRollbackFromUFO(psDevNode, psList->pasFWAddrs[ui32SyncIndex].ui32Addr);
+			}
+		}
+	}
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection,
+					  PVRSRV_DEVICE_NODE *psDevNode,
+					  SYNC_RECORD_HANDLE *phRecord,
+					  SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock,
+					  IMG_UINT32 ui32FwBlockAddr,
+					  IMG_UINT32 ui32SyncOffset,
+					  IMG_BOOL bServerSync,
+					  IMG_UINT32 ui32ClassNameSize,
+					  const IMG_CHAR *pszClassName)
+{
+	struct SYNC_RECORD * psSyncRec;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	RGXSRV_HWPERF_ALLOC(psDevNode, SYNC,
+	                    ui32FwBlockAddr + ui32SyncOffset,
+	                    pszClassName,
+	                    ui32ClassNameSize);
+
+	if (!phRecord)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*phRecord = NULL;
+
+	psSyncRec = OSAllocMem(sizeof(*psSyncRec));
+	if (!psSyncRec)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	psSyncRec->psDevNode = psDevNode;
+	psSyncRec->psServerSyncPrimBlock = hServerSyncPrimBlock;
+	psSyncRec->ui32SyncOffset = ui32SyncOffset;
+	psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr;
+	psSyncRec->ui64OSTime = OSClockns64();
+	psSyncRec->uiPID = OSGetCurrentProcessID();
+	psSyncRec->eRecordType = bServerSync? SYNC_RECORD_TYPE_SERVER: SYNC_RECORD_TYPE_CLIENT;
+
+	if (pszClassName)
+	{
+		if (ui32ClassNameSize >= SYNC_MAX_CLASS_NAME_LEN)
+			ui32ClassNameSize = SYNC_MAX_CLASS_NAME_LEN;
+		/* Copy over the class name annotation */
+		OSStringLCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize);
+	}
+	else
+	{
+		/* No class name annotation */
+		psSyncRec->szClassName[0] = 0;
+	}
+
+	OSLockAcquire(psDevNode->hSyncServerRecordLock);
+	if (psDevNode->ui32SyncServerRecordCount < SYNC_RECORD_LIMIT)
+	{
+		dllist_add_to_head(&psDevNode->sSyncServerRecordList, &psSyncRec->sNode);
+		psDevNode->ui32SyncServerRecordCount++;
+
+		if (psDevNode->ui32SyncServerRecordCount > psDevNode->ui32SyncServerRecordCountHighWatermark)
+		{
+			psDevNode->ui32SyncServerRecordCountHighWatermark = psDevNode->ui32SyncServerRecordCount;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync record \"%s\". %u records already exist.",
+											__func__,
+											pszClassName,
+											psDevNode->ui32SyncServerRecordCount));
+		OSFreeMem(psSyncRec);
+		psSyncRec = NULL;
+		eError = PVRSRV_ERROR_TOOMANYBUFFERS;
+	}
+	OSLockRelease(psDevNode->hSyncServerRecordLock);
+
+	*phRecord = (SYNC_RECORD_HANDLE)psSyncRec;
+
+fail_alloc:
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncRecordRemoveByHandleKM(
+			SYNC_RECORD_HANDLE hRecord)
+{
+	struct SYNC_RECORD **ppFreedSync;
+	struct SYNC_RECORD *pSync = (struct SYNC_RECORD*)hRecord;
+	PVRSRV_DEVICE_NODE *psDevNode;
+
+	if (!hRecord)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevNode = pSync->psDevNode;
+
+	OSLockAcquire(psDevNode->hSyncServerRecordLock);
+
+	RGXSRV_HWPERF_FREE(psDevNode, SYNC, pSync->ui32FwBlockAddr + pSync->ui32SyncOffset);
+
+	dllist_remove_node(&pSync->sNode);
+
+	if (psDevNode->uiSyncServerRecordFreeIdx >= PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: freed sync record index out of range",
+				 __func__));
+		psDevNode->uiSyncServerRecordFreeIdx = 0;
+	}
+	ppFreedSync = &psDevNode->apsSyncServerRecordsFreed[psDevNode->uiSyncServerRecordFreeIdx];
+	psDevNode->uiSyncServerRecordFreeIdx =
+		(psDevNode->uiSyncServerRecordFreeIdx + 1) % PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN;
+
+	if (*ppFreedSync)
+	{
+		OSFreeMem(*ppFreedSync);
+	}
+	pSync->psServerSyncPrimBlock = NULL;
+	pSync->ui64OSTime = OSClockns64();
+	*ppFreedSync = pSync;
+
+	psDevNode->ui32SyncServerRecordCount--;
+
+	OSLockRelease(psDevNode->hSyncServerRecordLock);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncAllocEventKM(CONNECTION_DATA *psConnection,
+			PVRSRV_DEVICE_NODE *psDevNode,
+			IMG_BOOL bServerSync,
+			IMG_UINT32 ui32FWAddr,
+			IMG_UINT32 ui32ClassNameSize,
+			const IMG_CHAR *pszClassName)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	RGXSRV_HWPERF_ALLOC(psDevNode, SYNC, ui32FWAddr, pszClassName, ui32ClassNameSize);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncFreeEventKM(CONNECTION_DATA *psConnection,
+			PVRSRV_DEVICE_NODE *psDevNode,
+			IMG_UINT32 ui32FWAddr)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	RGXSRV_HWPERF_FREE(psDevNode, SYNC, ui32FWAddr);
+
+	return PVRSRV_OK;
+}
+
+static
+void _SyncConnectionRef(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+	IMG_INT iRefCount = OSAtomicIncrement(&psSyncConnectionData->sRefCount);
+
+	SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d",
+						__func__, psSyncConnectionData, iRefCount);
+	PVR_UNREFERENCED_PARAMETER(iRefCount);
+}
+
+static
+void _SyncConnectionUnref(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+	IMG_INT iRefCount = OSAtomicDecrement(&psSyncConnectionData->sRefCount);
+	if (iRefCount == 0)
+	{
+		SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d",
+		                    __func__, psSyncConnectionData, iRefCount);
+
+		PVR_ASSERT(dllist_is_empty(&psSyncConnectionData->sListHead));
+		OSLockDestroy(psSyncConnectionData->hLock);
+		OSFreeMem(psSyncConnectionData);
+	}
+	else
+	{
+		SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d",
+		                    __func__, psSyncConnectionData, iRefCount);
+		PVR_ASSERT(iRefCount > 0);
+	}
+}
+
+static
+void _SyncConnectionAddBlock(CONNECTION_DATA *psConnection, SYNC_PRIMITIVE_BLOCK *psBlock)
+{
+	if (psConnection)
+	{
+		SYNC_CONNECTION_DATA *psSyncConnectionData = psConnection->psSyncConnectionData;
+
+		/*
+			Make sure the connection doesn't go away. It doesn't matter that we will release
+			the lock between as the refcount and list don't have to be atomic w.r.t. to each other
+		*/
+		_SyncConnectionRef(psSyncConnectionData);
+
+		OSLockAcquire(psSyncConnectionData->hLock);
+		if (psConnection != NULL)
+		{
+			dllist_add_to_head(&psSyncConnectionData->sListHead, &psBlock->sConnectionNode);
+		}
+		OSLockRelease(psSyncConnectionData->hLock);
+		psBlock->psSyncConnectionData = psSyncConnectionData;
+	}
+	else
+	{
+		psBlock->psSyncConnectionData = NULL;
+	}
+}
+
+static
+void _SyncConnectionRemoveBlock(SYNC_PRIMITIVE_BLOCK *psBlock)
+{
+	SYNC_CONNECTION_DATA *psSyncConnectionData = psBlock->psSyncConnectionData;
+
+	if (psBlock->psSyncConnectionData)
+	{
+		OSLockAcquire(psSyncConnectionData->hLock);
+		dllist_remove_node(&psBlock->sConnectionNode);
+		OSLockRelease(psSyncConnectionData->hLock);
+
+		_SyncConnectionUnref(psBlock->psSyncConnectionData);
+	}
+}
+
+static inline
+void _DoPrimBlockFree(SYNC_PRIMITIVE_BLOCK *psSyncBlk)
+{
+	PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode;
+
+	SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d (remove)",
+	                    __func__, psSyncBlk, iRefCount);
+
+	_SyncConnectionRemoveBlock(psSyncBlk);
+	DevmemReleaseCpuVirtAddr(psSyncBlk->psMemDesc);
+	psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->psMemDesc);
+	OSFreeMem(psSyncBlk);
+}
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+
+static
+void _SyncPrimitiveBlockRef(SYNC_PRIMITIVE_BLOCK *psSyncBlk)
+{
+	IMG_INT iRefCount = OSAtomicIncrement(&psSyncBlk->sRefCount);
+
+	SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d",
+	                    __func__, psSyncBlk, iRefCount);
+	PVR_UNREFERENCED_PARAMETER(iRefCount);
+}
+
+static
+void _SyncPrimitiveBlockUnref(SYNC_PRIMITIVE_BLOCK *psSyncBlk)
+{
+	IMG_INT iRefCount = OSAtomicDecrement(&psSyncBlk->sRefCount);
+	if (iRefCount == 0)
+	{
+		PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode;
+
+		SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d (remove)",
+		                    __func__, psSyncBlk, iRefCount);
+
+		_SyncConnectionRemoveBlock(psSyncBlk);
+		DevmemReleaseCpuVirtAddr(psSyncBlk->psMemDesc);
+		psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->psMemDesc);
+		OSFreeMem(psSyncBlk);
+	}
+	else
+	{
+		SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d",
+		                    __func__, psSyncBlk, iRefCount);
+		PVR_ASSERT(iRefCount > 0);
+	}
+}
+#endif
+
+PVRSRV_ERROR
+PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection,
+                                PVRSRV_DEVICE_NODE * psDevNode,
+								SYNC_PRIMITIVE_BLOCK **ppsSyncBlk,
+								IMG_UINT32 *puiSyncPrimVAddr,
+								IMG_UINT32 *puiSyncPrimBlockSize,
+								PMR        **ppsSyncPMR)
+{
+	SYNC_PRIMITIVE_BLOCK *psNewSyncBlk;
+	PVRSRV_ERROR eError;
+
+	psNewSyncBlk = OSAllocMem(sizeof(SYNC_PRIMITIVE_BLOCK));
+	if (psNewSyncBlk == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+	psNewSyncBlk->psDevNode = psDevNode;
+
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Allocate UFO block");
+
+	eError = psDevNode->pfnAllocUFOBlock(psDevNode,
+										 &psNewSyncBlk->psMemDesc,
+										 &psNewSyncBlk->uiFWAddr.ui32Addr,
+										 &psNewSyncBlk->ui32BlockSize);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	*puiSyncPrimVAddr = psNewSyncBlk->uiFWAddr.ui32Addr;
+
+	eError = DevmemAcquireCpuVirtAddr(psNewSyncBlk->psMemDesc,
+									  (void **) &psNewSyncBlk->pui32LinAddr);
+	if (eError != PVRSRV_OK)
+	{
+		goto e2;
+	}
+
+	eError = DevmemLocalGetImportHandle(psNewSyncBlk->psMemDesc, (void **) ppsSyncPMR);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e3;
+	}
+
+	OSAtomicWrite(&psNewSyncBlk->sRefCount, 1);
+
+	/* If there is a connection pointer then add the new block onto it's list */
+	_SyncConnectionAddBlock(psConnection, psNewSyncBlk);
+
+	*ppsSyncBlk = psNewSyncBlk;
+	*puiSyncPrimBlockSize = psNewSyncBlk->ui32BlockSize;
+
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+						  "Allocated UFO block (FirmwareVAddr = 0x%08x)",
+						  *puiSyncPrimVAddr);
+
+	return PVRSRV_OK;
+
+e3:
+	DevmemReleaseCpuVirtAddr(psNewSyncBlk->psMemDesc);
+e2:
+	psDevNode->pfnFreeUFOBlock(psDevNode, psNewSyncBlk->psMemDesc);
+e1:
+	OSFreeMem(psNewSyncBlk);
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk)
+{
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	_SyncPrimitiveBlockUnref(psSyncBlk);
+#else
+	_DoPrimBlockFree(psSyncBlk);
+#endif
+	return PVRSRV_OK;
+}
+
+static INLINE IMG_BOOL _CheckSyncIndex(SYNC_PRIMITIVE_BLOCK *psSyncBlk,
+							IMG_UINT32 ui32Index)
+{
+	return ((ui32Index * sizeof(IMG_UINT32)) < psSyncBlk->ui32BlockSize);
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index,
+					IMG_UINT32 ui32Value)
+{
+	if (_CheckSyncIndex(psSyncBlk, ui32Index))
+	{
+		psSyncBlk->pui32LinAddr[ui32Index] = ui32Value;
+		return PVRSRV_OK;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncPrimSetKM: Index %u out of range for "
+							"0x%08X byte sync block (value 0x%08X)",
+							ui32Index,
+							psSyncBlk->ui32BlockSize,
+							ui32Value));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+}
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+PVRSRV_ERROR
+PVRSRVServerSyncPrimSetKM(SERVER_SYNC_PRIMITIVE *psServerSync, IMG_UINT32 ui32Value)
+{
+	OSWriteDeviceMem32(psServerSync->psSync->pui32LinAddr,ui32Value);
+
+	return PVRSRV_OK;
+}
+
+static void
+_ServerSyncRef(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	IMG_INT iRefCount = OSAtomicIncrement(&psSync->sRefCount);
+
+	SYNC_REFCOUNT_PRINT("%s: Server sync %p, refcount = %d",
+	                    __func__, psSync, iRefCount);
+	PVR_UNREFERENCED_PARAMETER(iRefCount);
+}
+
+static void
+_ServerSyncUnref(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	PVRSRV_DEVICE_NODE *psDevNode = psSync->psDevNode;
+
+	IMG_UINT32 iRefCount = OSAtomicDecrement(&psSync->sRefCount);
+	if (iRefCount == 0)
+	{
+		IMG_UINT32 ui32SyncAddr;
+
+		(void)SyncPrimGetFirmwareAddr(psSync->psSync, &ui32SyncAddr);
+		SYNC_REFCOUNT_PRINT("%s: Server sync %p, refcount = %d",
+		                    __func__, psSync, iRefCount);
+		HTBLOGK(HTB_SF_SYNC_SERVER_UNREF, ui32SyncAddr);
+
+		/* Remove the sync from the global list */
+		OSLockAcquire(psDevNode->hSyncServerListLock);
+		dllist_remove_node(&psSync->sSyncServerListNode);
+		OSLockRelease(psDevNode->hSyncServerListLock);
+
+		OSLockDestroy(psSync->hLock);
+		/* safe to ignore return value as an error indicates
+		 * the sync is either already freed or not a sync
+		 */
+		(void)SyncPrimFree(psSync->psSync);
+		OSFreeMem(psSync);
+	}
+	else
+	{
+		SYNC_REFCOUNT_PRINT("%s: Server sync %p, refcount = %d",
+		                    __func__, psSync, iRefCount);
+		PVR_ASSERT(iRefCount > 0);
+	}
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncAllocKM(CONNECTION_DATA * psConnection,
+			PVRSRV_DEVICE_NODE *psDevNode,
+			SERVER_SYNC_PRIMITIVE **ppsSync,
+			IMG_UINT32 *pui32SyncPrimVAddr,
+			IMG_UINT32 ui32ClassNameSize,
+			const IMG_CHAR *pszClassName)
+{
+	SERVER_SYNC_PRIMITIVE *psNewSync;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	psNewSync = OSAllocMem(sizeof(SERVER_SYNC_PRIMITIVE));
+	if (psNewSync == NULL)
+	{
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* szClassName must be setup now and used for the SyncPrimAlloc call because
+	 * pszClassName is allocated in the bridge code is not NULL terminated
+	 */
+	if (pszClassName)
+	{
+		if (ui32ClassNameSize >= SYNC_MAX_CLASS_NAME_LEN)
+			ui32ClassNameSize = SYNC_MAX_CLASS_NAME_LEN;
+		/* Copy over the class name annotation */
+		OSStringLCopy(psNewSync->szClassName, pszClassName, ui32ClassNameSize);
+	}
+	else
+	{
+		/* No class name annotation */
+		psNewSync->szClassName[0] = 0;
+	}
+
+	eError = SyncPrimAllocForServerSync(psDevNode->hSyncPrimContext,
+						   &psNewSync->psSync,
+						   psNewSync->szClassName);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_sync_alloc;
+	}
+
+	eError = OSLockCreate(&psNewSync->hLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_lock_create;
+	}
+
+	eError = SyncPrimSet(psNewSync->psSync, 0);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_sync_op;
+	}
+
+	psNewSync->psDevNode = psDevNode;
+	psNewSync->ui32NextOp = 0;
+	psNewSync->ui32UID = g_ServerSyncUID++;
+	psNewSync->ui32LastSyncRequesterID = SYNC_REQUESTOR_UNKNOWN;
+	psNewSync->bSWOperation = IMG_FALSE;
+	psNewSync->ui32LastHWUpdate = 0x0bad592c;
+	psNewSync->ui32LastPdumpedBlock = PDUMP_BLOCKNUM_INVALID;
+	psNewSync->bFirstOperationInBlock = IMG_FALSE;
+	psNewSync->bPDumped = IMG_FALSE;
+	OSAtomicWrite(&psNewSync->sRefCount, 1);
+
+	eError = SyncPrimGetFirmwareAddr(psNewSync->psSync, pui32SyncPrimVAddr);
+	if (PVRSRV_OK != eError)
+	{
+		goto fail_sync_op;
+	}
+
+	/* Add the sync to the global list */
+	OSLockAcquire(psDevNode->hSyncServerListLock);
+	dllist_add_to_head(&psDevNode->sSyncServerSyncsList, &psNewSync->sSyncServerListNode);
+	OSLockRelease(psDevNode->hSyncServerListLock);
+
+	HTBLOGK(HTB_SF_SYNC_SERVER_ALLOC, *pui32SyncPrimVAddr);
+	SYNC_UPDATES_PRINT("%s: sync: %p, fwaddr: %8.8X", __func__, psNewSync, *pui32SyncPrimVAddr);
+	*ppsSync = psNewSync;
+	return PVRSRV_OK;
+
+fail_sync_op:
+	OSLockDestroy(psNewSync->hLock);
+
+fail_lock_create:
+	SyncPrimFree(psNewSync->psSync);
+
+fail_sync_alloc:
+	OSFreeMem(psNewSync);
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncFreeKM(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	_ServerSyncUnref(psSync);
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncGetStatusKM(IMG_UINT32 ui32SyncCount,
+			SERVER_SYNC_PRIMITIVE **papsSyncs,
+			IMG_UINT32 *pui32UID,
+			IMG_UINT32 *pui32FWAddr,
+			IMG_UINT32 *pui32CurrentOp,
+			IMG_UINT32 *pui32NextOp)
+{
+	IMG_UINT32 i, ui32SyncAddr;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_ERROR eReturn = PVRSRV_OK;
+
+	for (i=0;i<ui32SyncCount;i++)
+	{
+		PVRSRV_CLIENT_SYNC_PRIM *psClientSync = papsSyncs[i]->psSync;
+
+		eError = SyncPrimGetFirmwareAddr(psClientSync, &ui32SyncAddr);
+		if (PVRSRV_OK != eError)
+		{
+			pui32FWAddr[i] = 0;
+			pui32CurrentOp[i] = 0;
+			eReturn = eError;
+		}
+		else
+		{
+			pui32FWAddr[i] = ui32SyncAddr;
+			pui32CurrentOp[i] = OSReadDeviceMem32(psClientSync->pui32LinAddr);
+		}
+		pui32NextOp[i] = papsSyncs[i]->ui32NextOp;
+		pui32UID[i] = papsSyncs[i]->ui32UID;
+	}
+	return eReturn;
+}
+
+#if defined(SUPPORT_INSECURE_EXPORT) || defined(SUPPORT_SECURE_EXPORT)
+static PVRSRV_ERROR
+_PVRSRVSyncPrimServerExportKM(SERVER_SYNC_PRIMITIVE *psSync,
+							  SERVER_SYNC_EXPORT **ppsExport)
+{
+	SERVER_SYNC_EXPORT *psNewExport;
+	PVRSRV_ERROR eError;
+
+	psNewExport = OSAllocMem(sizeof(SERVER_SYNC_EXPORT));
+	if (!psNewExport)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	_ServerSyncRef(psSync);
+
+	psNewExport->psSync = psSync;
+	*ppsExport = psNewExport;
+
+	return PVRSRV_OK;
+e0:
+	return eError;
+}
+
+static PVRSRV_ERROR
+_PVRSRVSyncPrimServerUnexportKM(SERVER_SYNC_EXPORT *psExport)
+{
+	_ServerSyncUnref(psExport->psSync);
+
+	OSFreeMem(psExport);
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_PVRSRVSyncPrimServerImportKM(PVRSRV_DEVICE_NODE *psDevNode,
+							  SERVER_SYNC_EXPORT *psExport,
+							  SERVER_SYNC_PRIMITIVE **ppsSync,
+							  IMG_UINT32 *pui32SyncPrimVAddr)
+{
+	SERVER_SYNC_PRIMITIVE *psSync = psExport->psSync;
+	PVRSRV_ERROR eError;
+
+	if (psSync->psDevNode != psDevNode)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: server sync invalid for this device",
+				 __func__));
+		return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+	}
+
+	_ServerSyncRef(psSync);
+
+	*ppsSync = psSync;
+	eError = SyncPrimGetFirmwareAddr(psSync->psSync,
+			pui32SyncPrimVAddr);
+	return eError;
+}
+#endif /* defined(SUPPORT_INSECURE_EXPORT) || defined(SUPPORT_SECURE_EXPORT) */
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR
+PVRSRVSyncPrimServerExportKM(SERVER_SYNC_PRIMITIVE *psSync,
+				SERVER_SYNC_EXPORT **ppsExport)
+{
+	return _PVRSRVSyncPrimServerExportKM(psSync, ppsExport);
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerUnexportKM(SERVER_SYNC_EXPORT *psExport)
+{
+	return _PVRSRVSyncPrimServerUnexportKM(psExport);
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerImportKM(CONNECTION_DATA *psConnection,
+							 PVRSRV_DEVICE_NODE *psDevNode,
+							 SERVER_SYNC_EXPORT *psExport,
+							 SERVER_SYNC_PRIMITIVE **ppsSync,
+							 IMG_UINT32 *pui32SyncPrimVAddr)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	return _PVRSRVSyncPrimServerImportKM(psDevNode, psExport, ppsSync,
+										 pui32SyncPrimVAddr);
+}
+#endif /* defined(SUPPORT_INSECURE_EXPORT) */
+
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureUnexportKM(SERVER_SYNC_EXPORT *psExport)
+{
+	_PVRSRVSyncPrimServerUnexportKM(psExport);
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _ReleaseSecureSync(void *psExport)
+{
+	return PVRSRVSyncPrimServerSecureUnexportKM(psExport);
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureExportKM(CONNECTION_DATA *psConnection,
+                                   PVRSRV_DEVICE_NODE * psDevNode,
+                                   SERVER_SYNC_PRIMITIVE *psSync,
+                                   IMG_SECURE_TYPE *phSecure,
+                                   SERVER_SYNC_EXPORT **ppsExport,
+                                   CONNECTION_DATA **ppsSecureConnection)
+{
+	SERVER_SYNC_EXPORT *psNewExport;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(ppsSecureConnection);
+
+	/* Create an export server sync */
+	eError = _PVRSRVSyncPrimServerExportKM(psSync,
+										   &psNewExport);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	/* Transform it into a secure export */
+	eError = OSSecureExport("secure_sync",
+				_ReleaseSecureSync,
+				(void *) psNewExport,
+				phSecure);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	*ppsExport = psNewExport;
+	return PVRSRV_OK;
+e1:
+	_PVRSRVSyncPrimServerUnexportKM(psNewExport);
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureImportKM(CONNECTION_DATA *psConnection,
+								   PVRSRV_DEVICE_NODE *psDevNode,
+								   IMG_SECURE_TYPE hSecure,
+								   SERVER_SYNC_PRIMITIVE **ppsSync,
+								   IMG_UINT32 *pui32SyncPrimVAddr)
+{
+	PVRSRV_ERROR eError;
+	SERVER_SYNC_EXPORT *psImport;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	/* Retrieve the data from the secure import */
+	eError = OSSecureImport(hSecure, (void **) &psImport);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	eError = _PVRSRVSyncPrimServerImportKM(psDevNode, psImport, ppsSync,
+										   pui32SyncPrimVAddr);
+e0:
+	return eError;
+}
+#endif /* defined(SUPPORT_SECURE_EXPORT) */
+
+IMG_UINT32 PVRSRVServerSyncRequesterRegisterKM(IMG_UINT32 *pui32SyncRequesterID)
+{
+	*pui32SyncRequesterID = g_ui32NextSyncRequestorID++;
+
+	return PVRSRV_OK;
+}
+
+void PVRSRVServerSyncRequesterUnregisterKM(IMG_UINT32 ui32SyncRequesterID)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32SyncRequesterID);
+}
+
+static void
+_ServerSyncTakeOperation(SERVER_SYNC_PRIMITIVE *psSync,
+						  IMG_BOOL bUpdate,
+						  IMG_UINT32 *pui32FenceValue,
+						  IMG_UINT32 *pui32UpdateValue)
+{
+	IMG_BOOL bInCaptureRange;
+#if defined(PDUMP)
+	IMG_UINT32 ui32CurrentBlock;
+#endif
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	PVR_ASSERT(OSLockIsLocked(ghServerSyncLock));
+#endif
+
+	/* Only advance the pending if an update is required */
+	if (bUpdate)
+	{
+		*pui32FenceValue = psSync->ui32NextOp++;
+	}
+	else
+	{
+		*pui32FenceValue = psSync->ui32NextOp;
+	}
+
+	*pui32UpdateValue = psSync->ui32NextOp;
+
+	PDumpIsCaptureFrameKM(&bInCaptureRange);
+
+#if defined(PDUMP)
+	PDumpGetCurrentBlockKM(&ui32CurrentBlock);
+
+	/* Is this first operation taken on _this_ sync in a new pdump-block? */
+	psSync->bFirstOperationInBlock = (psSync->ui32LastPdumpedBlock != ui32CurrentBlock) && (ui32CurrentBlock != PDUMP_BLOCKNUM_INVALID);
+#endif
+	/*
+		If this is the 1st operation (in this capture range) then PDump
+		this sync
+
+		In case of block-mode of PDump, if this is first operation taken on _this_
+		particular sync in this new pdump-block then PDump this sync
+
+		It means, this is the first operation taken on _this_ particular sync after live-FW
+		thread and driver-thread are synchronised at start of new pdump-block. So we need to
+		re-dump this sync so that its latest values can be loaded _after_ sim-FW thread and
+		script-thread are synchronised at start of playback of new/next pdump-block at playback time.
+	*/
+	if ((!psSync->bPDumped && bInCaptureRange) || psSync->bFirstOperationInBlock)
+	{
+#if defined(PDUMP)
+		{
+			IMG_UINT32 ui32SyncAddr;
+			(void)SyncPrimGetFirmwareAddr(psSync->psSync, &ui32SyncAddr);
+			PDumpCommentWithFlags(0,
+				"Dump initial sync state (0x%p, FW VAddr = 0x%08x) = 0x%08x\n",
+				psSync,
+				ui32SyncAddr,
+				OSReadDeviceMem32(psSync->psSync->pui32LinAddr));
+		}
+		psSync->ui32LastPdumpedBlock = ui32CurrentBlock; /* Update last pdumped block number */
+#endif
+
+		SyncPrimPDump(psSync->psSync);
+		psSync->bPDumped = IMG_TRUE;
+	}
+
+	/*
+		When exiting capture range clear down bPDumped as we might re-enter
+		capture range and thus need to PDump this sync again
+	*/
+	if (!bInCaptureRange)
+	{
+		psSync->bPDumped = IMG_FALSE;
+	}
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueSWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+						  IMG_UINT32 *pui32FenceValue,
+						  IMG_UINT32 *pui32UpdateValue,
+						  IMG_UINT32 ui32SyncRequesterID,
+						  IMG_BOOL bUpdate,
+						  IMG_BOOL *pbFenceRequired)
+{
+	PVRSRV_ERROR eError;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	PVRSRVLockServerSync();
+#endif
+
+	eError = PVRSRVServerSyncQueueSWOpKM_NoGlobalLock(psSync,
+								pui32FenceValue,
+								pui32UpdateValue,
+								ui32SyncRequesterID,
+								bUpdate,
+								pbFenceRequired);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	PVRSRVUnlockServerSync();
+#endif
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueSWOpKM_NoGlobalLock(SERVER_SYNC_PRIMITIVE *psSync,
+						  IMG_UINT32 *pui32FenceValue,
+						  IMG_UINT32 *pui32UpdateValue,
+						  IMG_UINT32 ui32SyncRequesterID,
+						  IMG_BOOL bUpdate,
+						  IMG_BOOL *pbFenceRequired)
+{
+
+	_ServerSyncRef(psSync);
+
+	/*
+		We need to acquire the lock here to ensure the state that we're
+		modifying below will be consistent with itself. It doesn't matter
+		if another thread acquires the lock in between this and taking reference
+		as we've ensured the sync won't go away.
+	*/
+	OSLockAcquire(psSync->hLock);
+	_ServerSyncTakeOperation(psSync,
+							 bUpdate,
+							 pui32FenceValue,
+							 pui32UpdateValue);
+
+	/*
+		The caller want to know if a fence command is required
+		i.e. was the last operation done on this sync done by
+		the same sync requester
+	*/
+	if (pbFenceRequired)
+	{
+		if (ui32SyncRequesterID == psSync->ui32LastSyncRequesterID)
+		{
+			*pbFenceRequired = IMG_FALSE;
+		}
+		else
+		{
+			*pbFenceRequired = IMG_TRUE;
+		}
+	}
+	/*
+		If we're transitioning from a HW operation to a SW operation we
+		need to save the last update the HW will do so that when we PDump
+		we can issue a POL for it before the next HW operation and then
+		LDB in the last SW fence update
+	*/
+	if (psSync->bSWOperation == IMG_FALSE)
+	{
+		psSync->bSWOperation = IMG_TRUE;
+		psSync->ui32LastHWUpdate = *pui32FenceValue;
+		PDumpIsCaptureFrameKM(&psSync->bSWOpStartedInCaptRange);
+	}
+
+	if (pbFenceRequired)
+	{
+		if (*pbFenceRequired)
+		{
+			SYNC_UPDATES_PRINT("%s: sync: %p, fence: %d, value: %d", __func__, psSync, *pui32FenceValue, *pui32UpdateValue);
+		}
+	}
+
+	/* Only update the last requester id if we are make changes to this sync
+	 * object. */
+	if (bUpdate)
+		psSync->ui32LastSyncRequesterID = ui32SyncRequesterID;
+
+	OSLockRelease(psSync->hLock);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueHWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+						       IMG_BOOL bUpdate,
+						       IMG_UINT32 *pui32FenceValue,
+						       IMG_UINT32 *pui32UpdateValue)
+{
+	PVRSRV_ERROR eError;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	PVRSRVLockServerSync();
+#endif
+
+	eError = PVRSRVServerSyncQueueHWOpKM_NoGlobalLock(psSync,
+							bUpdate,
+							pui32FenceValue,
+							pui32UpdateValue);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	PVRSRVUnlockServerSync();
+#endif
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueHWOpKM_NoGlobalLock(SERVER_SYNC_PRIMITIVE *psSync,
+						       IMG_BOOL bUpdate,
+						       IMG_UINT32 *pui32FenceValue,
+						       IMG_UINT32 *pui32UpdateValue)
+{
+	/*
+		For HW operations the client is required to ensure the
+		operation has completed before freeing the sync as we
+		no way of dropping the refcount if we were to acquire it
+		here.
+
+		Take the lock to ensure the state that we're modifying below
+		will be consistent with itself.
+	*/
+	OSLockAcquire(psSync->hLock);
+	_ServerSyncTakeOperation(psSync,
+							 bUpdate,
+							 pui32FenceValue,
+							 pui32UpdateValue);
+
+	/*
+		Note:
+
+		We might want to consider optimising the fences that we write for
+		HW operations but for now just clear it back to unknown
+	*/
+	psSync->ui32LastSyncRequesterID = SYNC_REQUESTOR_UNKNOWN;
+
+	if (psSync->bSWOperation)
+	{
+#if defined(PDUMP)
+		{
+			IMG_UINT32 ui32SyncAddr;
+			(void)SyncPrimGetFirmwareAddr(psSync->psSync, &ui32SyncAddr);
+			PDumpCommentWithFlags(0,
+				"Wait for HW ops and dummy update for SW ops (0x%p, FW VAddr = 0x%08x, value = 0x%08x)\n",
+				psSync,
+				ui32SyncAddr,
+				*pui32FenceValue);
+		}
+#endif
+
+		/* In case of block-mode of PDump, if this is NOT the first operation on _this_ sync in
+		 * current pdump-block and SW operation is started in capture range (which is always
+		 * true in case of block-mode) dump POL for previous HW operation
+		 *
+		 * It means, if this is not the first operation on _this_ sync in current pdump-block,
+		 * we need to synchronise script-thread and sim-FW thread on _this_ sync before processing
+		 * further commands from current pdump-block.
+		 * */
+		if (psSync->bSWOpStartedInCaptRange && !psSync->bFirstOperationInBlock)
+		{
+			/* Dump a POL for the previous HW operation */
+			SyncPrimPDumpPol(psSync->psSync,
+								psSync->ui32LastHWUpdate,
+								0xffffffff,
+								PDUMP_POLL_OPERATOR_EQUAL,
+								0);
+		}
+
+		/* Dump the expected value (i.e. the value after all the SW operations) */
+		SyncPrimPDumpValue(psSync->psSync, *pui32FenceValue);
+
+		/* Reset the state as we've just done a HW operation */
+		psSync->bSWOperation = IMG_FALSE;
+	}
+	OSLockRelease(psSync->hLock);
+
+	SYNC_UPDATES_PRINT("%s: sync: %p, fence: %d, value: %d", __func__, psSync, *pui32FenceValue, *pui32UpdateValue);
+
+	return PVRSRV_OK;
+}
+
+IMG_BOOL ServerSyncFenceIsMet(SERVER_SYNC_PRIMITIVE *psSync,
+							   IMG_UINT32 ui32FenceValue)
+{
+	SYNC_UPDATES_PRINT("%s: sync: %p, value(%d) == fence(%d)?", __func__, psSync, *psSync->psSync->pui32LinAddr, ui32FenceValue);
+	return (OSReadDeviceMem32(psSync->psSync->pui32LinAddr) == ui32FenceValue);
+}
+
+void
+ServerSyncCompleteOp(SERVER_SYNC_PRIMITIVE *psSync,
+					 IMG_BOOL bDoUpdate,
+					 IMG_UINT32 ui32UpdateValue)
+{
+	if (bDoUpdate)
+	{
+		SYNC_UPDATES_PRINT("%s: sync: %p (%d) = %d", __func__, psSync, *psSync->psSync->pui32LinAddr, ui32UpdateValue);
+
+		OSWriteDeviceMem32(psSync->psSync->pui32LinAddr, ui32UpdateValue);
+	}
+
+	_ServerSyncUnref(psSync);
+}
+
+IMG_UINT32 ServerSyncGetId(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	return psSync->ui32UID;
+}
+
+PVRSRV_ERROR
+ServerSyncGetFWAddr(SERVER_SYNC_PRIMITIVE *psSync, IMG_UINT32 *pui32SyncAddr)
+{
+	return SyncPrimGetFirmwareAddr(psSync->psSync, pui32SyncAddr);
+}
+
+IMG_UINT32 ServerSyncGetValue(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	return OSReadDeviceMem32(psSync->psSync->pui32LinAddr);
+}
+
+IMG_UINT32 ServerSyncGetNextValue(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	return psSync->ui32NextOp;
+}
+
+PVRSRV_DEVICE_NODE* ServerSyncGetDeviceNode(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	return psSync->psDevNode;
+}
+
+static void _ServerSyncState(PDLLIST_NODE psNode,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile)
+{
+	SERVER_SYNC_PRIMITIVE *psSync = IMG_CONTAINER_OF(psNode, SERVER_SYNC_PRIMITIVE, sSyncServerListNode);
+
+	if (OSReadDeviceMem32(psSync->psSync->pui32LinAddr) != psSync->ui32NextOp)
+	{
+		IMG_UINT32 ui32SyncAddr, ui32Val = 0;
+
+		(void)ServerSyncGetFWAddr(psSync, &ui32SyncAddr);
+#if !defined(SUPPORT_EXTRA_METASP_DEBUG)
+		PVR_UNREFERENCED_PARAMETER(ui32Val);
+		PVR_DUMPDEBUG_LOG("\tPending server sync (ID = %d, FWAddr = 0x%08x): Current = 0x%08x, NextOp = 0x%08x (%s)",
+				psSync->ui32UID,
+				ui32SyncAddr,
+		                ServerSyncGetValue(psSync),
+		                psSync->ui32NextOp,
+		                psSync->szClassName);
+#else
+		RGXReadWithSP(psSync->psDevNode->pvDevice, ui32SyncAddr, &ui32Val);
+		PVR_DUMPDEBUG_LOG("\tPending server sync (ID = %d, FWAddr = 0x%08x): Value (Host) = 0x%08x, Value (FW) = 0x%08x, NextOp = 0x%08x (%s)",
+		                   psSync->ui32UID,
+				   ui32SyncAddr,
+		                   ServerSyncGetValue(psSync),
+		                   ui32Val,
+		                   psSync->ui32NextOp,
+		                   psSync->szClassName);
+#endif
+	}
+}
+
+static void _ServerSyncDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+					IMG_UINT32 ui32VerbLevel,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile)
+{
+	PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+	DLLIST_NODE *psNode, *psNext;
+
+	if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM) &&
+			!dllist_is_empty(&psDevNode->sSyncServerSyncsList))
+	{
+		PVR_DUMPDEBUG_LOG("------[ Pending Server Syncs ]------");
+		OSLockAcquire(psDevNode->hSyncServerListLock);
+		dllist_foreach_node(&psDevNode->sSyncServerSyncsList, psNode, psNext)
+		{
+			_ServerSyncState(psNode, pfnDumpDebugPrintf, pvDumpDebugFile);
+		}
+		OSLockRelease(psDevNode->hSyncServerListLock);
+	}
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCreateKM(IMG_UINT32 ui32SyncBlockCount,
+						 SYNC_PRIMITIVE_BLOCK **papsSyncPrimBlock,
+						 IMG_UINT32 ui32ClientSyncCount,
+						 IMG_UINT32 *paui32SyncBlockIndex,
+						 IMG_UINT32 *paui32Index,
+						 IMG_UINT32 ui32ServerSyncCount,
+						 SERVER_SYNC_PRIMITIVE **papsServerSync,
+						 SERVER_OP_COOKIE **ppsServerCookie)
+{
+	SERVER_OP_COOKIE *psNewCookie;
+	IMG_UINT32 ui32BlockAllocSize;
+	IMG_UINT32 ui32ServerAllocSize;
+	IMG_UINT32 ui32ClientAllocSize;
+	IMG_UINT32 ui32TotalAllocSize;
+	IMG_UINT32 i;
+	IMG_CHAR *pcPtr;
+	PVRSRV_ERROR eError;
+
+	if ((ui32ClientSyncCount + ui32ServerSyncCount) > SYNC_PRIM_OP_MAX_SYNCS)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Too many syncs specified", __func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Allocate space for all the sync block list */
+	ui32BlockAllocSize = ui32SyncBlockCount * (sizeof(SYNC_PRIMITIVE_BLOCK *));
+
+	/* Allocate space for all the client sync size elements */
+	ui32ClientAllocSize = ui32ClientSyncCount * (5 * sizeof(IMG_UINT32));
+
+	/* Allocate space for all the server sync size elements */
+	ui32ServerAllocSize = ui32ServerSyncCount * (sizeof(SERVER_SYNC_PRIMITIVE *)
+							+ (2 * sizeof(IMG_UINT32)));
+
+	ui32TotalAllocSize = sizeof(SERVER_OP_COOKIE) +
+							 ui32BlockAllocSize +
+							 ui32ServerAllocSize +
+							 ui32ClientAllocSize;
+
+	psNewCookie = OSAllocZMem(ui32TotalAllocSize);
+	pcPtr = (IMG_CHAR *) psNewCookie;
+
+	if (!psNewCookie)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	/* Setup the pointers */
+	pcPtr += sizeof(SERVER_OP_COOKIE);
+	psNewCookie->papsSyncPrimBlock = (SYNC_PRIMITIVE_BLOCK **) pcPtr;
+
+	pcPtr += sizeof(SYNC_PRIMITIVE_BLOCK *) * ui32SyncBlockCount;
+	psNewCookie->paui32SyncBlockIndex = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32Index = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32Flags = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32FenceValue = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32UpdateValue = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->papsServerSync =(SERVER_SYNC_PRIMITIVE **) pcPtr;
+
+	pcPtr += sizeof(SERVER_SYNC_PRIMITIVE *) * ui32ServerSyncCount;
+	psNewCookie->paui32ServerFenceValue = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ServerSyncCount;
+	psNewCookie->paui32ServerUpdateValue = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ServerSyncCount;
+
+	/* Check the pointer setup went ok */
+	PVR_ASSERT(pcPtr == (((IMG_CHAR *) psNewCookie) + ui32TotalAllocSize));
+
+	psNewCookie->ui32SyncBlockCount= ui32SyncBlockCount;
+	psNewCookie->ui32ServerSyncCount = ui32ServerSyncCount;
+	psNewCookie->ui32ClientSyncCount = ui32ClientSyncCount;
+	psNewCookie->bActive = IMG_FALSE;
+	HTBLOGK(HTB_SF_SYNC_PRIM_OP_CREATE, psNewCookie, ui32SyncBlockCount,
+			ui32ServerSyncCount, ui32ClientSyncCount);
+
+	/* Copy all the data into our server cookie */
+	OSCachedMemCopy(psNewCookie->papsSyncPrimBlock,
+			  papsSyncPrimBlock,
+			  sizeof(SYNC_PRIMITIVE_BLOCK *) * ui32SyncBlockCount);
+
+	/* Copy the sync block and sync indices.
+	 *
+	 * Each index must be verified:
+	 * Each Sync Block index must be within the range of the number of sync block
+	 * pointers received. All those pointers are valid, as verified by the bridge.
+	 * And each Sync index must be valid for the Sync Block it relates to.
+	 */
+	for (i = 0; i < ui32ClientSyncCount; i++)
+	{
+		SYNC_PRIMITIVE_BLOCK *psSyncBlock;
+
+		/* first copy the sync block index and ensure it is in range */
+
+		if (paui32SyncBlockIndex[i] >= ui32SyncBlockCount)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Sync block index %u is out of range",
+										__func__,
+										paui32SyncBlockIndex[i]));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto err_range;
+		}
+
+		psNewCookie->paui32SyncBlockIndex[i] = paui32SyncBlockIndex[i];
+
+		/* now copy the sync index and ensure it is a valid index within
+		 * the corresponding sync block (note the sync block index was
+		 * verified above
+		 */
+
+		psSyncBlock = psNewCookie->papsSyncPrimBlock[paui32SyncBlockIndex[i]];
+
+		if (_CheckSyncIndex(psSyncBlock, paui32Index[i]) == IMG_FALSE)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Sync index %u is out of range",
+										__func__,
+										paui32Index[i]));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto err_range;
+		}
+
+		psNewCookie->paui32Index[i] = paui32Index[i];
+	}
+
+	OSCachedMemCopy(psNewCookie->papsServerSync,
+			  papsServerSync,
+			  sizeof(SERVER_SYNC_PRIMITIVE *) *ui32ServerSyncCount);
+
+	/*
+		Take a reference on all the sync blocks and server syncs so they can't
+		be freed while we're using them
+	*/
+	for (i=0;i<ui32SyncBlockCount;i++)
+	{
+		_SyncPrimitiveBlockRef(psNewCookie->papsSyncPrimBlock[i]);
+	}
+
+	for (i=0;i<ui32ServerSyncCount;i++)
+	{
+		_ServerSyncRef(psNewCookie->papsServerSync[i]);
+	}
+
+	*ppsServerCookie = psNewCookie;
+	return PVRSRV_OK;
+
+err_range:
+	OSFreeMem(psNewCookie);
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpTakeKM(SERVER_OP_COOKIE *psServerCookie,
+					       IMG_UINT32 ui32ClientSyncCount,
+					       IMG_UINT32 *paui32Flags,
+					       IMG_UINT32 *paui32FenceValue,
+					       IMG_UINT32 *paui32UpdateValue,
+					       IMG_UINT32 ui32ServerSyncCount,
+						   IMG_UINT32 *paui32ServerFlags)
+{
+	IMG_UINT32 i;
+
+	if ((ui32ClientSyncCount != psServerCookie->ui32ClientSyncCount) ||
+		(ui32ServerSyncCount != psServerCookie->ui32ServerSyncCount))
+	{
+		/* The bridge layer should have stopped us getting here but check in case */
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync counts", __func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	for (i=0;i<ui32ServerSyncCount;i++)
+	{
+		/* Server syncs must fence */
+		if ((paui32ServerFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK) == 0)
+		{
+			return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+		}
+	}
+
+	/*
+		For client syncs all we need to do is save the values
+		that we've been passed
+	*/
+	OSCachedMemCopy(psServerCookie->paui32Flags,
+			  paui32Flags,
+			  sizeof(IMG_UINT32) * ui32ClientSyncCount);
+	OSCachedMemCopy(psServerCookie->paui32FenceValue,
+			  paui32FenceValue,
+			  sizeof(IMG_UINT32) * ui32ClientSyncCount);
+	OSCachedMemCopy(psServerCookie->paui32UpdateValue,
+			  paui32UpdateValue,
+			  sizeof(IMG_UINT32) * ui32ClientSyncCount);
+
+	/*
+		For server syncs we just take an operation
+	*/
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	PVRSRVLockServerSync();
+#endif
+	for (i=0;i<ui32ServerSyncCount;i++)
+	{
+		/*
+			Take op can only take one operation at a time so we can't
+			optimise away fences so just report the requester as unknown
+		*/
+		PVRSRVServerSyncQueueSWOpKM_NoGlobalLock(psServerCookie->papsServerSync[i],
+								  &psServerCookie->paui32ServerFenceValue[i],
+								  &psServerCookie->paui32ServerUpdateValue[i],
+								  SYNC_REQUESTOR_UNKNOWN,
+								  (paui32ServerFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE) ? IMG_TRUE:IMG_FALSE,
+								  NULL);
+	}
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	PVRSRVUnlockServerSync();
+#endif
+
+	HTBLOGK(HTB_SF_SYNC_PRIM_OP_TAKE, psServerCookie,
+			ui32ServerSyncCount, ui32ClientSyncCount);
+	psServerCookie->bActive = IMG_TRUE;
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpReadyKM(SERVER_OP_COOKIE *psServerCookie,
+						IMG_BOOL *pbReady)
+{
+	IMG_UINT32 i;
+	IMG_BOOL bReady = IMG_TRUE;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!psServerCookie->bActive)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Operation cookie not active (no take operation performed)", __func__));
+
+		bReady = IMG_FALSE;
+		eError = PVRSRV_ERROR_BAD_SYNC_STATE;
+		goto e0;
+	}
+
+	/* Check the client syncs */
+	for (i=0;i<psServerCookie->ui32ClientSyncCount;i++)
+	{
+		if (psServerCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)
+		{
+			IMG_UINT32 ui32BlockIndex = psServerCookie->paui32SyncBlockIndex[i];
+			IMG_UINT32 ui32Index = psServerCookie->paui32Index[i];
+			SYNC_PRIMITIVE_BLOCK *psSyncBlock = psServerCookie->papsSyncPrimBlock[ui32BlockIndex];
+
+			if (psSyncBlock->pui32LinAddr[ui32Index] !=
+					psServerCookie->paui32FenceValue[i])
+			{
+				bReady = IMG_FALSE;
+				goto e0;
+			}
+		}
+	}
+
+	for (i=0;i<psServerCookie->ui32ServerSyncCount;i++)
+	{
+		bReady = ServerSyncFenceIsMet(psServerCookie->papsServerSync[i],
+									  psServerCookie->paui32ServerFenceValue[i]);
+		if (!bReady)
+		{
+			break;
+		}
+	}
+
+e0:
+	*pbReady = bReady;
+	return eError;
+}
+
+static
+IMG_BOOL _SyncPrimOpComplete(SERVER_OP_COOKIE *psServerCookie)
+{
+	RGX_HWPERF_UFO_DATA_ELEMENT sUFOData;
+	IMG_UINT32 i;
+	IMG_BOOL bDidUpdates = IMG_FALSE;
+
+	for (i=0;i<psServerCookie->ui32ClientSyncCount;i++)
+	{
+		if (psServerCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)
+		{
+			IMG_UINT32 ui32BlockIndex = psServerCookie->paui32SyncBlockIndex[i];
+			IMG_UINT32 ui32Index = psServerCookie->paui32Index[i];
+			SYNC_PRIMITIVE_BLOCK *psSyncBlock = psServerCookie->papsSyncPrimBlock[ui32BlockIndex];
+
+			sUFOData.sUpdate.ui32FWAddr = psSyncBlock->uiFWAddr.ui32Addr + ui32Index * sizeof(IMG_UINT32);
+			sUFOData.sUpdate.ui32OldValue = psSyncBlock->pui32LinAddr[ui32Index];
+			sUFOData.sUpdate.ui32NewValue = psServerCookie->paui32UpdateValue[i];
+
+			psSyncBlock->pui32LinAddr[ui32Index] = psServerCookie->paui32UpdateValue[i];
+			RGXSRV_HWPERF_UFO(psSyncBlock->psDevNode->pvDevice,
+			                  RGX_HWPERF_UFO_EV_UPDATE, &sUFOData, IMG_TRUE);
+
+			bDidUpdates = IMG_TRUE;
+		}
+	}
+
+	for (i=0;i<psServerCookie->ui32ServerSyncCount;i++)
+	{
+		IMG_BOOL bUpdate = psServerCookie->paui32ServerFenceValue[i] != psServerCookie->paui32ServerUpdateValue[i];
+
+		if (bUpdate)
+		{
+			IMG_UINT32 ui32SyncAddr;
+
+			(void)ServerSyncGetFWAddr(psServerCookie->papsServerSync[i], &ui32SyncAddr);
+			sUFOData.sUpdate.ui32FWAddr = ui32SyncAddr;
+			sUFOData.sUpdate.ui32OldValue = ServerSyncGetValue(psServerCookie->papsServerSync[i]);
+			sUFOData.sUpdate.ui32NewValue = psServerCookie->paui32ServerUpdateValue[i];
+			RGXSRV_HWPERF_UFO(psServerCookie->papsServerSync[i]->psDevNode->pvDevice,
+			                  RGX_HWPERF_UFO_EV_UPDATE, &sUFOData, IMG_TRUE);
+
+			bDidUpdates = IMG_TRUE;
+		}
+
+		ServerSyncCompleteOp(psServerCookie->papsServerSync[i],
+							 bUpdate,
+							 psServerCookie->paui32ServerUpdateValue[i]);
+	}
+
+	psServerCookie->bActive = IMG_FALSE;
+
+	return bDidUpdates;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCompleteKM(SERVER_OP_COOKIE *psServerCookie)
+{
+	IMG_BOOL bReady;
+
+	PVRSRVSyncPrimOpReadyKM(psServerCookie, &bReady);
+
+	/* Check the client is playing ball */
+	if (!bReady)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: sync op still not ready", __func__));
+
+		return PVRSRV_ERROR_BAD_SYNC_STATE;
+	}
+
+	HTBLOGK(HTB_SF_SYNC_PRIM_OP_COMPLETE, psServerCookie);
+
+	if (_SyncPrimOpComplete(psServerCookie))
+	{
+		PVRSRVCheckStatus(NULL);
+	}
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpDestroyKM(SERVER_OP_COOKIE *psServerCookie)
+{
+	IMG_UINT32 i;
+
+	/* If the operation is still active then check if it's finished yet */
+	if (psServerCookie->bActive)
+	{
+		if (PVRSRVSyncPrimOpCompleteKM(psServerCookie) == PVRSRV_ERROR_BAD_SYNC_STATE)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Not ready, ask for retry", __func__));
+			return PVRSRV_ERROR_RETRY;
+		}
+	}
+
+	/* Drop our references on the sync blocks and server syncs*/
+	for (i = 0; i < psServerCookie->ui32SyncBlockCount; i++)
+	{
+		_SyncPrimitiveBlockUnref(psServerCookie->papsSyncPrimBlock[i]);
+	}
+
+	for (i = 0; i < psServerCookie->ui32ServerSyncCount; i++)
+	{
+		_ServerSyncUnref(psServerCookie->papsServerSync[i]);
+	}
+
+	HTBLOGK(HTB_SF_SYNC_PRIM_OP_DESTROY, psServerCookie);
+	OSFreeMem(psServerCookie);
+	return PVRSRV_OK;
+}
+#endif  /* defined(SUPPORT_SERVER_SYNC_IMPL) */
+
+#if defined(PDUMP)
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
+{
+	/*
+		We might be ask to PDump sync state outside of capture range
+		(e.g. texture uploads) so make this continuous.
+	*/
+	DevmemPDumpLoadMemValue32(psSyncBlk->psMemDesc,
+					   ui32Offset,
+					   ui32Value,
+					   PDUMP_FLAGS_CONTINUOUS);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset)
+{
+	/*
+		We might be ask to PDump sync state outside of capture range
+		(e.g. texture uploads) so make this continuous.
+	*/
+	DevmemPDumpLoadMem(psSyncBlk->psMemDesc,
+					   ui32Offset,
+					   sizeof(IMG_UINT32),
+					   PDUMP_FLAGS_CONTINUOUS);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+						 IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask,
+						 PDUMP_POLL_OPERATOR eOperator,
+						 PDUMP_FLAGS_T ui32PDumpFlags)
+{
+	DevmemPDumpDevmemPol32(psSyncBlk->psMemDesc,
+						   ui32Offset,
+						   ui32Value,
+						   ui32Mask,
+						   eOperator,
+						   ui32PDumpFlags);
+
+	return PVRSRV_OK;
+}
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+PVRSRV_ERROR
+PVRSRVSyncPrimOpPDumpPolKM(SERVER_OP_COOKIE *psServerCookie,
+						 PDUMP_POLL_OPERATOR eOperator,
+						 PDUMP_FLAGS_T ui32PDumpFlags)
+{
+	IMG_UINT32 i;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!psServerCookie->bActive)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Operation cookie not active (no take operation performed)", __func__));
+
+		eError = PVRSRV_ERROR_BAD_SYNC_STATE;
+		goto e0;
+	}
+
+	/* PDump POL on the client syncs */
+	for (i = 0; i < psServerCookie->ui32ClientSyncCount; i++)
+	{
+		if (psServerCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)
+		{
+			IMG_UINT32 ui32BlockIndex = psServerCookie->paui32SyncBlockIndex[i];
+			IMG_UINT32 ui32Index = psServerCookie->paui32Index[i];
+			SYNC_PRIMITIVE_BLOCK *psSyncBlock = psServerCookie->papsSyncPrimBlock[ui32BlockIndex];
+
+			PVRSRVSyncPrimPDumpPolKM(psSyncBlock,
+									ui32Index*sizeof(IMG_UINT32),
+									psServerCookie->paui32FenceValue[i],
+									0xFFFFFFFFU,
+									eOperator,
+									ui32PDumpFlags);
+		}
+	}
+
+	/* PDump POL on the server syncs */
+	for (i = 0; i < psServerCookie->ui32ServerSyncCount; i++)
+	{
+		SERVER_SYNC_PRIMITIVE *psServerSync = psServerCookie->papsServerSync[i];
+		IMG_UINT32 ui32FenceValue = psServerCookie->paui32ServerFenceValue[i];
+
+		SyncPrimPDumpPol(psServerSync->psSync,
+						ui32FenceValue,
+						0xFFFFFFFFU,
+						PDUMP_POLL_OPERATOR_EQUAL,
+						ui32PDumpFlags);
+	}
+
+e0:
+	return eError;
+}
+#endif
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset,
+						 IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize,
+						 IMG_UINT64 uiBufferSize)
+{
+	DevmemPDumpCBP(psSyncBlk->psMemDesc,
+				   ui32Offset,
+				   uiWriteOffset,
+				   uiPacketSize,
+				   uiBufferSize);
+	return PVRSRV_OK;
+}
+#endif
+
+/* SyncRegisterConnection */
+PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData)
+{
+	SYNC_CONNECTION_DATA *psSyncConnectionData;
+	PVRSRV_ERROR eError;
+
+	psSyncConnectionData = OSAllocMem(sizeof(SYNC_CONNECTION_DATA));
+	if (psSyncConnectionData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	eError = OSLockCreate(&psSyncConnectionData->hLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_lockcreate;
+	}
+	dllist_init(&psSyncConnectionData->sListHead);
+	OSAtomicWrite(&psSyncConnectionData->sRefCount, 1);
+
+	*ppsSyncConnectionData = psSyncConnectionData;
+	return PVRSRV_OK;
+
+fail_lockcreate:
+	OSFreeMem(psSyncConnectionData);
+fail_alloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/* SyncUnregisterConnection */
+void SyncUnregisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+	_SyncConnectionUnref(psSyncConnectionData);
+}
+
+void SyncConnectionPDumpSyncBlocks(void *hSyncPrivData, PDUMP_TRANSITION_EVENT eEvent)
+{
+	if ((eEvent == PDUMP_TRANSITION_EVENT_RANGE_ENTERED) || (eEvent == PDUMP_TRANSITION_EVENT_BLOCK_STARTED))
+	{
+		SYNC_CONNECTION_DATA *psSyncConnectionData = hSyncPrivData;
+		DLLIST_NODE *psNode, *psNext;
+
+		OSLockAcquire(psSyncConnectionData->hLock);
+
+		PDUMPCOMMENT("Dump client Sync Prim state");
+		dllist_foreach_node(&psSyncConnectionData->sListHead, psNode, psNext)
+		{
+			SYNC_PRIMITIVE_BLOCK *psSyncBlock =
+				IMG_CONTAINER_OF(psNode, SYNC_PRIMITIVE_BLOCK, sConnectionNode);
+
+			DevmemPDumpLoadMem(psSyncBlock->psMemDesc,
+					0,
+					psSyncBlock->ui32BlockSize,
+					PDUMP_FLAGS_CONTINUOUS);
+		}
+
+		OSLockRelease(psSyncConnectionData->hLock);
+	}
+}
+
+void SyncRecordLookup(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32FwAddr,
+					  IMG_CHAR * pszSyncInfo, size_t len)
+{
+	DLLIST_NODE *psNode, *psNext;
+	IMG_INT iEnd;
+	IMG_BOOL bFound = IMG_FALSE;
+
+	if (!pszSyncInfo)
+	{
+		return;
+	}
+
+	OSLockAcquire(psDevNode->hSyncServerRecordLock);
+	pszSyncInfo[0] = '\0';
+
+	dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext)
+	{
+		struct SYNC_RECORD *psSyncRec =
+			IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode);
+		if ((psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset) == ui32FwAddr
+			&& SYNC_RECORD_TYPE_UNKNOWN != psSyncRec->eRecordType
+			&& psSyncRec->psServerSyncPrimBlock
+			&& psSyncRec->psServerSyncPrimBlock->pui32LinAddr
+			)
+		{
+			IMG_UINT32 *pui32SyncAddr;
+			pui32SyncAddr = psSyncRec->psServerSyncPrimBlock->pui32LinAddr
+				+ (psSyncRec->ui32SyncOffset/sizeof(IMG_UINT32));
+			iEnd = OSSNPrintf(pszSyncInfo, len, "Cur=0x%08x %s:%05u (%s)",
+				*pui32SyncAddr,
+				((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"),
+				psSyncRec->uiPID,
+				psSyncRec->szClassName
+				);
+			if (iEnd >= 0 && iEnd < len)
+			{
+				pszSyncInfo[iEnd] = '\0';
+			}
+			bFound = IMG_TRUE;
+			break;
+		}
+	}
+
+	OSLockRelease(psDevNode->hSyncServerRecordLock);
+
+	if (!bFound && (psDevNode->ui32SyncServerRecordCountHighWatermark == SYNC_RECORD_LIMIT))
+	{
+        	OSSNPrintf(pszSyncInfo, len, "(Record may be lost)");
+	}
+}
+
+#define NS_IN_S (1000000000UL)
+static void _SyncRecordPrint(struct SYNC_RECORD *psSyncRec,
+					IMG_UINT64 ui64TimeNow,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile)
+{
+	SYNC_PRIMITIVE_BLOCK *psSyncBlock = psSyncRec->psServerSyncPrimBlock;
+
+	if (SYNC_RECORD_TYPE_UNKNOWN != psSyncRec->eRecordType)
+	{
+		IMG_UINT64 ui64DeltaS;
+		IMG_UINT32 ui32DeltaF;
+		IMG_UINT64 ui64Delta = ui64TimeNow - psSyncRec->ui64OSTime;
+		ui64DeltaS = OSDivide64(ui64Delta, NS_IN_S, &ui32DeltaF);
+
+		if (psSyncBlock && psSyncBlock->pui32LinAddr)
+		{
+			IMG_UINT32 *pui32SyncAddr;
+			pui32SyncAddr = psSyncBlock->pui32LinAddr
+				+ (psSyncRec->ui32SyncOffset/sizeof(IMG_UINT32));
+
+			PVR_DUMPDEBUG_LOG("\t%s %05u %05" IMG_UINT64_FMTSPEC ".%09u FWAddr=0x%08x Val=0x%08x (%s)",
+				((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"),
+				psSyncRec->uiPID,
+				ui64DeltaS, ui32DeltaF,
+				(psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset),
+				*pui32SyncAddr,
+				psSyncRec->szClassName
+				);
+		}
+		else
+		{
+			PVR_DUMPDEBUG_LOG("\t%s %05u %05" IMG_UINT64_FMTSPEC ".%09u FWAddr=0x%08x Val=<null_ptr> (%s)",
+				((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"),
+				psSyncRec->uiPID,
+				ui64DeltaS, ui32DeltaF,
+				(psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset),
+				psSyncRec->szClassName
+				);
+		}
+	}
+}
+
+static void _SyncRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+					IMG_UINT32 ui32VerbLevel,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile)
+{
+	PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+	IMG_UINT64 ui64TimeNowS;
+	IMG_UINT32 ui32TimeNowF;
+	IMG_UINT64 ui64TimeNow = OSClockns64();
+	DLLIST_NODE *psNode, *psNext;
+
+	ui64TimeNowS = OSDivide64(ui64TimeNow, NS_IN_S, &ui32TimeNowF);
+
+	if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM))
+	{
+		IMG_UINT32 i;
+		OSLockAcquire(psDevNode->hSyncServerRecordLock);
+
+		PVR_DUMPDEBUG_LOG("Dumping all allocated syncs. Allocated: %u High watermark: %u @ %05" IMG_UINT64_FMTSPEC ".%09u",
+										psDevNode->ui32SyncServerRecordCount,
+										psDevNode->ui32SyncServerRecordCountHighWatermark,
+										ui64TimeNowS,
+										ui32TimeNowF);
+		if (psDevNode->ui32SyncServerRecordCountHighWatermark == SYNC_RECORD_LIMIT)
+		{
+			PVR_DUMPDEBUG_LOG("Warning: Record limit (%u) was reached. Some sync checkpoints may not have been recorded in the debug information.",
+                                                                                                                SYNC_RECORD_LIMIT);
+		}
+
+		PVR_DUMPDEBUG_LOG("\t%-6s %-5s %-15s %-17s %-14s (%s)",
+					"Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation");
+
+		dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext)
+		{
+			struct SYNC_RECORD *psSyncRec =
+				IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode);
+			_SyncRecordPrint(psSyncRec, ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile);
+			}
+
+		PVR_DUMPDEBUG_LOG("Dumping all recently freed syncs @ %05" IMG_UINT64_FMTSPEC ".%09u",
+						  ui64TimeNowS, ui32TimeNowF);
+		PVR_DUMPDEBUG_LOG("\t%-6s %-5s %-15s %-17s %-14s (%s)",
+					"Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation");
+		for (i = DECREMENT_WITH_WRAP(psDevNode->uiSyncServerRecordFreeIdx, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN);
+			 i != psDevNode->uiSyncServerRecordFreeIdx;
+			 i = DECREMENT_WITH_WRAP(i, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN))
+		{
+			if (psDevNode->apsSyncServerRecordsFreed[i])
+			{
+				_SyncRecordPrint(psDevNode->apsSyncServerRecordsFreed[i],
+								 ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile);
+			}
+			else
+			{
+				break;
+			}
+		}
+
+		OSLockRelease(psDevNode->hSyncServerRecordLock);
+	}
+}
+#undef NS_IN_S
+
+static PVRSRV_ERROR SyncRecordListInit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+	PVRSRV_ERROR eError;
+
+	psDevNode->ui32SyncServerRecordCount = 0;
+	psDevNode->ui32SyncServerRecordCountHighWatermark = 0;
+
+	eError = OSLockCreate(&psDevNode->hSyncServerRecordLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_lock_create;
+	}
+	dllist_init(&psDevNode->sSyncServerRecordList);
+
+	eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncServerRecordNotify,
+											psDevNode,
+											_SyncRecordRequest,
+											DEBUG_REQUEST_SERVERSYNC,
+											psDevNode);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_dbg_register;
+	}
+
+	return PVRSRV_OK;
+
+fail_dbg_register:
+	OSLockDestroy(psDevNode->hSyncServerRecordLock);
+fail_lock_create:
+	return eError;
+}
+
+static void SyncRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+	DLLIST_NODE *psNode, *psNext;
+	int i;
+
+	OSLockAcquire(psDevNode->hSyncServerRecordLock);
+	dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext)
+	{
+		struct SYNC_RECORD *pSyncRec =
+			IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode);
+
+		dllist_remove_node(psNode);
+		OSFreeMem(pSyncRec);
+	}
+
+	for (i = 0; i < PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; i++)
+	{
+		if (psDevNode->apsSyncServerRecordsFreed[i])
+		{
+			OSFreeMem(psDevNode->apsSyncServerRecordsFreed[i]);
+			psDevNode->apsSyncServerRecordsFreed[i] = NULL;
+		}
+	}
+	OSLockRelease(psDevNode->hSyncServerRecordLock);
+
+	if (psDevNode->hSyncServerRecordNotify)
+	{
+		PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncServerRecordNotify);
+	}
+	OSLockDestroy(psDevNode->hSyncServerRecordLock);
+}
+
+PVRSRV_ERROR ServerSyncInit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+	PVRSRV_ERROR eError;
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	eError = OSLockCreate(&psDevNode->hSyncServerListLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_lock_create;
+	}
+	dllist_init(&psDevNode->sSyncServerSyncsList);
+#endif
+	eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncServerNotify,
+											psDevNode,
+											_ServerSyncDebugRequest,
+											DEBUG_REQUEST_SERVERSYNC,
+											psDevNode);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_dbg_register;
+	}
+
+	if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+	{
+		eError = SyncRecordListInit(psDevNode);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_record_list;
+		}
+	}
+
+	return PVRSRV_OK;
+
+fail_record_list:
+	PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncServerNotify);
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+fail_dbg_register:
+	OSLockDestroy(psDevNode->hSyncServerListLock);
+#endif
+fail_lock_create:
+	return eError;
+}
+
+void ServerSyncDeinit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+	PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncServerNotify);
+	psDevNode->hSyncServerNotify = NULL;
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+	OSLockDestroy(psDevNode->hSyncServerListLock);
+	psDevNode->hSyncServerListLock = NULL;
+#endif
+
+	if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+	{
+		SyncRecordListDeinit(psDevNode);
+	}
+}
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+PVRSRV_ERROR ServerSyncInitOnce(PVRSRV_DATA *psPVRSRVData)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVR_UNREFERENCED_PARAMETER(psPVRSRVData);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	eError = OSLockCreate(&ghServerSyncLock);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create server sync lock", __func__));
+		goto err;
+	}
+err:
+#endif
+	return eError;
+}
+
+void ServerSyncDeinitOnce(PVRSRV_DATA *psPVRSRVData)
+{
+	PVR_UNREFERENCED_PARAMETER(psPVRSRVData);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(ghServerSyncLock);
+#endif
+}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+void PVRSRVLockServerSync(void)
+{
+	OSLockAcquire(ghServerSyncLock);
+}
+
+void PVRSRVUnlockServerSync(void)
+{
+	OSLockRelease(ghServerSyncLock);
+}
+#endif
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_server.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_server.h
new file mode 100644
index 0000000..6704544
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sync_server.h
@@ -0,0 +1,448 @@
+/**************************************************************************/ /*!
+@File
+@Title          Server side synchronisation interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Describes the server side synchronisation functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv.h"
+#include "device.h"
+#include "devicemem.h"
+#include "pdump.h"
+#include "pvrsrv_error.h"
+#include "connection_server.h"
+#include "pdump_km.h"
+
+#ifndef _SYNC_SERVER_H_
+#define _SYNC_SERVER_H_
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+typedef struct _SERVER_OP_COOKIE_ SERVER_OP_COOKIE;
+typedef struct _SERVER_SYNC_EXPORT_ SERVER_SYNC_EXPORT;
+typedef struct _SERVER_SYNC_PRIMITIVE_ SERVER_SYNC_PRIMITIVE;
+#endif
+typedef struct _SYNC_PRIMITIVE_BLOCK_ SYNC_PRIMITIVE_BLOCK;
+typedef struct _SYNC_CONNECTION_DATA_ SYNC_CONNECTION_DATA;
+typedef struct SYNC_RECORD* SYNC_RECORD_HANDLE;
+
+typedef struct _SYNC_ADDR_LIST_
+{
+	IMG_UINT32 ui32NumSyncs;
+	PRGXFWIF_UFO_ADDR *pasFWAddrs;
+} SYNC_ADDR_LIST;
+
+PVRSRV_ERROR
+SyncPrimitiveBlockToFWAddr(SYNC_PRIMITIVE_BLOCK *psSyncPrimBlock,
+						IMG_UINT32 ui32Offset,
+						PRGXFWIF_UFO_ADDR *psAddrOut);
+
+void
+SyncAddrListInit(SYNC_ADDR_LIST *psList);
+
+void
+SyncAddrListDeinit(SYNC_ADDR_LIST *psList);
+
+PVRSRV_ERROR
+SyncAddrListPopulate(SYNC_ADDR_LIST *psList,
+						IMG_UINT32 ui32NumSyncs,
+						SYNC_PRIMITIVE_BLOCK **apsSyncPrimBlock,
+						IMG_UINT32 *paui32SyncOffset);
+
+PVRSRV_ERROR
+SyncAddrListAppendSyncPrim(SYNC_ADDR_LIST          *psList,
+						   PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim);
+PVRSRV_ERROR
+SyncAddrListAppendCheckpoints(SYNC_ADDR_LIST *psList,
+								IMG_UINT32 ui32NumCheckpoints,
+								PSYNC_CHECKPOINT *apsSyncCheckpoint);
+
+PVRSRV_ERROR
+SyncAddrListAppendAndDeRefCheckpoints(SYNC_ADDR_LIST *psList,
+									  IMG_UINT32 ui32NumCheckpoints,
+									  PSYNC_CHECKPOINT *apsSyncCheckpoint);
+
+void
+SyncAddrListDeRefCheckpoints(IMG_UINT32 ui32NumCheckpoints,
+							 PSYNC_CHECKPOINT *apsSyncCheckpoint);
+
+PVRSRV_ERROR
+SyncAddrListRollbackCheckpoints(PVRSRV_DEVICE_NODE *psDevNode, SYNC_ADDR_LIST *psList);
+
+PVRSRV_ERROR
+PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection,
+                                PVRSRV_DEVICE_NODE * psDevNode,
+								SYNC_PRIMITIVE_BLOCK **ppsSyncBlk,
+								IMG_UINT32 *puiSyncPrimVAddr,
+								IMG_UINT32 *puiSyncPrimBlockSize,
+								PMR        **ppsSyncPMR);
+
+PVRSRV_ERROR
+PVRSRVExportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk,
+								 DEVMEM_EXPORTCOOKIE **psExportCookie);
+
+PVRSRV_ERROR
+PVRSRVUnexportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk);
+
+PVRSRV_ERROR
+PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *ppsSyncBlk);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index,
+					IMG_UINT32 ui32Value);
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+PVRSRV_ERROR
+PVRSRVServerSyncPrimSetKM(SERVER_SYNC_PRIMITIVE *psServerSync, IMG_UINT32 ui32Value);
+
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR
+PVRSRVSyncPrimServerExportKM(SERVER_SYNC_PRIMITIVE *psSync,
+							SERVER_SYNC_EXPORT **ppsExport);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerUnexportKM(SERVER_SYNC_EXPORT *psExport);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerImportKM(CONNECTION_DATA *psConnection,
+							 PVRSRV_DEVICE_NODE *psDevNode,
+							 SERVER_SYNC_EXPORT *psExport,
+							 SERVER_SYNC_PRIMITIVE **ppsSync,
+							 IMG_UINT32 *pui32SyncPrimVAddr);
+#endif
+
+#if defined(SUPPORT_SECURE_EXPORT)
+
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureExportKM(CONNECTION_DATA *psConnection,
+                                   PVRSRV_DEVICE_NODE * psDevNode,
+								   SERVER_SYNC_PRIMITIVE *psSync,
+								   IMG_SECURE_TYPE *phSecure,
+								   SERVER_SYNC_EXPORT **ppsExport,
+								   CONNECTION_DATA **ppsSecureConnection);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureUnexportKM(SERVER_SYNC_EXPORT *psExport);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureImportKM(CONNECTION_DATA *psConnection,
+								   PVRSRV_DEVICE_NODE *psDevNode,
+								   IMG_SECURE_TYPE hSecure,
+								   SERVER_SYNC_PRIMITIVE **ppsSync,
+								   IMG_UINT32 *pui32SyncPrimVAddr);
+#endif
+
+IMG_UINT32 PVRSRVServerSyncRequesterRegisterKM(IMG_UINT32 *pui32SyncRequesterID);
+void PVRSRVServerSyncRequesterUnregisterKM(IMG_UINT32 ui32SyncRequesterID);
+#endif
+
+PVRSRV_ERROR
+PVRSRVSyncAllocEventKM(CONNECTION_DATA *psConnection,
+					   PVRSRV_DEVICE_NODE *psDevNode,
+					   IMG_BOOL bServerSync,
+                       IMG_UINT32 ui32FWAddr,
+                       IMG_UINT32 ui32ClassNameSize,
+                       const IMG_CHAR *pszClassName);
+
+PVRSRV_ERROR
+PVRSRVSyncFreeEventKM(CONNECTION_DATA *psConnection,
+					   PVRSRV_DEVICE_NODE *psDevNode,
+					   IMG_UINT32 ui32FWAddr);
+
+PVRSRV_ERROR
+PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection,
+					  PVRSRV_DEVICE_NODE *psDevNode,
+					  SYNC_RECORD_HANDLE *phRecord,
+					  SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock,
+					  IMG_UINT32 ui32FwBlockAddr,
+					  IMG_UINT32 ui32SyncOffset,
+					  IMG_BOOL bServerSync,
+					  IMG_UINT32 ui32ClassNameSize,
+					  const IMG_CHAR *pszClassName);
+
+PVRSRV_ERROR
+PVRSRVSyncRecordRemoveByHandleKM(
+			SYNC_RECORD_HANDLE hRecord);
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+PVRSRV_ERROR
+PVRSRVServerSyncAllocKM(CONNECTION_DATA * psConnection,
+                        PVRSRV_DEVICE_NODE *psDevNode,
+						SERVER_SYNC_PRIMITIVE **ppsSync,
+						IMG_UINT32 *pui32SyncPrimVAddr,
+						IMG_UINT32 ui32ClassNameSize,
+						const IMG_CHAR *szClassName);
+PVRSRV_ERROR
+PVRSRVServerSyncFreeKM(SERVER_SYNC_PRIMITIVE *psSync);
+
+PVRSRV_ERROR
+PVRSRVServerSyncGetStatusKM(IMG_UINT32 ui32SyncCount,
+							SERVER_SYNC_PRIMITIVE **papsSyncs,
+							IMG_UINT32 *pui32UID,
+							IMG_UINT32 *pui32FWAddr,
+							IMG_UINT32 *pui32CurrentOp,
+							IMG_UINT32 *pui32NextOp);
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueSWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+						  IMG_UINT32 *pui32FenceValue,
+						  IMG_UINT32 *pui32UpdateValue,
+						  IMG_UINT32 ui32SyncRequesterID,
+						  IMG_BOOL bUpdate,
+						  IMG_BOOL *pbFenceRequired);
+PVRSRV_ERROR
+PVRSRVServerSyncQueueSWOpKM_NoGlobalLock(SERVER_SYNC_PRIMITIVE *psSync,
+						  IMG_UINT32 *pui32FenceValue,
+						  IMG_UINT32 *pui32UpdateValue,
+						  IMG_UINT32 ui32SyncRequesterID,
+						  IMG_BOOL bUpdate,
+						  IMG_BOOL *pbFenceRequired);
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueHWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+							   IMG_BOOL bUpdate,
+						       IMG_UINT32 *pui32FenceValue,
+						       IMG_UINT32 *pui32UpdateValue);
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueHWOpKM_NoGlobalLock(SERVER_SYNC_PRIMITIVE *psSync,
+							   IMG_BOOL bUpdate,
+						       IMG_UINT32 *pui32FenceValue,
+						       IMG_UINT32 *pui32UpdateValue);
+
+IMG_BOOL
+ServerSyncFenceIsMet(SERVER_SYNC_PRIMITIVE *psSync,
+					 IMG_UINT32 ui32FenceValue);
+
+void
+ServerSyncCompleteOp(SERVER_SYNC_PRIMITIVE *psSync,
+					 IMG_BOOL bDoUpdate,
+					 IMG_UINT32 ui32UpdateValue);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCreateKM(IMG_UINT32 ui32SyncBlockCount,
+						 SYNC_PRIMITIVE_BLOCK **papsSyncPrimBlock,
+						 IMG_UINT32 ui32ClientSyncCount,
+						 IMG_UINT32 *paui32SyncBlockIndex,
+						 IMG_UINT32 *paui32Index,
+						 IMG_UINT32 ui32ServerSyncCount,
+						 SERVER_SYNC_PRIMITIVE **papsServerSync,
+						 SERVER_OP_COOKIE **ppsServerCookie);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpTakeKM(SERVER_OP_COOKIE *psServerCookie,
+					       IMG_UINT32 ui32ClientSyncCount,
+					       IMG_UINT32 *paui32Flags,
+					       IMG_UINT32 *paui32FenceValue,
+					       IMG_UINT32 *paui32UpdateValue,
+					       IMG_UINT32 ui32ServerSyncCount,
+						   IMG_UINT32 *paui32ServerFlags);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpReadyKM(SERVER_OP_COOKIE *psServerCookie,
+						IMG_BOOL *pbReady);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCompleteKM(SERVER_OP_COOKIE *psServerCookie);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpDestroyKM(SERVER_OP_COOKIE *psServerCookie);
+
+IMG_UINT32 ServerSyncGetId(SERVER_SYNC_PRIMITIVE *psSync);
+
+PVRSRV_ERROR
+ServerSyncGetFWAddr(SERVER_SYNC_PRIMITIVE *psSync, IMG_UINT32 *pui32SyncAddr);
+
+IMG_UINT32 ServerSyncGetValue(SERVER_SYNC_PRIMITIVE *psSync);
+
+IMG_UINT32 ServerSyncGetNextValue(SERVER_SYNC_PRIMITIVE *psSync);
+
+PVRSRV_DEVICE_NODE* ServerSyncGetDeviceNode(SERVER_SYNC_PRIMITIVE *psSync);
+#endif
+void SyncRecordLookup(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32FwAddr,
+					  IMG_CHAR * pszSyncInfo, size_t len);
+
+void ServerSyncDumpPending(void);
+
+PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData);
+void SyncUnregisterConnection(SYNC_CONNECTION_DATA *ppsSyncConnectionData);
+void SyncConnectionPDumpSyncBlocks(void *hSyncPrivData, PDUMP_TRANSITION_EVENT eEvent);
+
+/*!
+******************************************************************************
+@Function      ServerSyncInit
+
+@Description   Per-device initialisation for the ServerSync module
+******************************************************************************/
+PVRSRV_ERROR ServerSyncInit(PVRSRV_DEVICE_NODE *psDevNode);
+void ServerSyncDeinit(PVRSRV_DEVICE_NODE *psDevNode);
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+/*!
+******************************************************************************
+@Function      ServerSyncInitOnce
+
+@Description   One-time initialisation for the ServerSync module
+******************************************************************************/
+PVRSRV_ERROR ServerSyncInitOnce(PVRSRV_DATA *psPVRSRVData);
+void ServerSyncDeinitOnce(PVRSRV_DATA *psPVRSRVData);
+#endif
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+/*!
+******************************************************************************
+@Function      PVRSRVLockServerSync
+
+@Description   Acquire a global lock to maintain server sync consistency
+******************************************************************************/
+void PVRSRVLockServerSync(void);
+/*!
+******************************************************************************
+@Function      PVRSRVUnlockServerSync
+
+@Description   Release the global server sync lock
+******************************************************************************/
+void PVRSRVUnlockServerSync(void);
+#endif
+
+#if defined(PDUMP)
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+							IMG_UINT32 ui32Value);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+						 IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask,
+						 PDUMP_POLL_OPERATOR eOperator,
+						 PDUMP_FLAGS_T uiDumpFlags);
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+PVRSRV_ERROR
+PVRSRVSyncPrimOpPDumpPolKM(SERVER_OP_COOKIE *psServerCookie,
+						 PDUMP_POLL_OPERATOR eOperator,
+						 PDUMP_FLAGS_T ui32PDumpFlags);
+#endif
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset,
+						 IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize,
+						 IMG_UINT64 uiBufferSize);
+
+#else	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset)
+{
+	PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpValueKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+							IMG_UINT32 ui32Value)
+{
+	PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+						 IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask,
+						 PDUMP_POLL_OPERATOR eOperator,
+						 PDUMP_FLAGS_T uiDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(uiDumpFlags);
+	return PVRSRV_OK;
+}
+
+#if defined(SUPPORT_SERVER_SYNC_IMPL)
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimOpPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimOpPDumpPolKM(SERVER_OP_COOKIE *psServerCookie,
+						 PDUMP_POLL_OPERATOR eOperator,
+						 PDUMP_FLAGS_T uiDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psServerCookie);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(uiDumpFlags);
+	return PVRSRV_OK;
+}
+#endif
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpCBPKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset,
+						 IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize,
+						 IMG_UINT64 uiBufferSize)
+{
+	PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+	PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+	PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+	return PVRSRV_OK;
+}
+#endif	/* PDUMP */
+#endif	/*_SYNC_SERVER_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sysconfig.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sysconfig.c
new file mode 100644
index 0000000..79a32b5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sysconfig.c
@@ -0,0 +1,311 @@
+/*************************************************************************
+* @File
+* @Title          System Configuration
+* @Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+* @Description    System Configuration functions
+* @License        Dual MIT/GPLv2
+*
+* The contents of this file are subject to the MIT license as set out below.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy
+* of this software and associated documentation files (the "Software"), to deal
+* in the Software without restriction, including without limitation the rights
+* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+* copies of the Software, and to permit persons to whom the Software is
+* furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* Alternatively, the contents of this file may be used under the terms of
+* the GNU General Public License Version 2 ("GPL") in which case the provisions
+* of GPL are applicable instead of those above.
+*
+* If you wish to allow use of your version of this file only under the terms of
+* GPL, and not to allow others to use your version of this file under the terms
+* of the MIT license, indicate your decision by deleting the provisions above
+* and replace them with the notice and other provisions required by GPL as set
+* out in the file called "GPL-COPYING" included in this distribution. If you do
+* not delete the provisions above, a recipient may use your version of this file
+* under the terms of either the MIT license or GPL.
+*
+* This License is also included in this distribution in the file called
+* "MIT-COPYING".
+*
+* EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+* PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+* PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+**************************************************************************/
+
+#include "interrupt_support.h"
+#include "pvrsrv_device.h"
+#include "syscommon.h"
+#include "sysconfig.h"
+#include "physheap.h"
+#if defined(SUPPORT_ION)
+#include "ion_support.h"
+#endif
+#include "mtk_mfgsys.h"
+
+#if defined(MTK_CONFIG_OF) && defined(CONFIG_OF)
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+struct platform_device *gpsPVRCfgDev;
+#endif
+
+#define RGX_CR_ISP_GRIDOFFSET   (0x0FA0U)
+
+static RGX_TIMING_INFORMATION   gsRGXTimingInfo;
+static RGX_DATA                 gsRGXData;
+static PVRSRV_DEVICE_CONFIG     gsDevices[1];
+
+static PHYS_HEAP_FUNCTIONS      gsPhysHeapFuncs;
+static PHYS_HEAP_CONFIG         gsPhysHeapConfig;
+
+#if  defined(SUPPORT_PDVFS)
+/* Dummy DVFS configuration used purely for testing purposes */
+static const IMG_OPP asOPPTable[] = {
+	{ 100000, 253500000},
+	{ 100000, 338000000},
+	{ 100000, 390000000},
+	{ 112500, 546000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+};
+
+#define LEVEL_COUNT (sizeof(asOPPTable) / sizeof(IMG_OPP))
+#endif
+
+
+/* CPU to Device physcial address translation */
+static void UMAPhysHeapCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+				   IMG_UINT32 ui32NumOfAddr,
+				   IMG_DEV_PHYADDR *psDevPAddr,
+				   IMG_CPU_PHYADDR *psCpuPAddr)
+{
+	PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+	/* Optimise common case */
+	psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr;
+	if (ui32NumOfAddr > 1) {
+		IMG_UINT32 ui32Idx;
+
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+			psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr;
+	}
+}
+
+/* Device to CPU physcial address translation */
+static void UMAPhysHeapDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+				   IMG_UINT32 ui32NumOfAddr,
+				   IMG_CPU_PHYADDR *psCpuPAddr,
+				   IMG_DEV_PHYADDR *psDevPAddr)
+{
+	PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+	/* Optimise common case */
+	psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr;
+	if (ui32NumOfAddr > 1) {
+		IMG_UINT32 ui32Idx;
+
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+			psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr;
+	}
+}
+
+#if defined(MTK_CONFIG_OF) && defined(CONFIG_OF)
+static int g32SysIrq = -1;
+int MTKSysGetIRQ(void)
+{
+	return g32SysIrq;
+}
+#endif
+
+/* SysCreateConfigData */
+static PHYS_HEAP_REGION gsHeapRegionsLocal[] = {
+	/* sStartAddr, sCardBase, uiSize */
+	{ { 0 }, { 0 }, 0, },
+};
+
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig)
+{
+	PVRSRV_ERROR err = PVRSRV_OK;
+
+	gsPhysHeapFuncs.pfnCpuPAddrToDevPAddr = UMAPhysHeapCpuPAddrToDevPAddr;
+	gsPhysHeapFuncs.pfnDevPAddrToCpuPAddr = UMAPhysHeapDevPAddrToCpuPAddr;
+
+	gsPhysHeapConfig.ui32PhysHeapID = 0;
+	gsPhysHeapConfig.pszPDumpMemspaceName = "SYSMEM";
+	gsPhysHeapConfig.eType = PHYS_HEAP_TYPE_UMA;
+	gsPhysHeapConfig.psMemFuncs = &gsPhysHeapFuncs;
+	gsPhysHeapConfig.hPrivData = (IMG_HANDLE)&gsDevices[0];
+
+	gsPhysHeapConfig.pasRegions = &gsHeapRegionsLocal[0];
+
+	gsDevices[0].pvOSDevice = pvOSDevice;
+	gsDevices[0].pasPhysHeaps = &gsPhysHeapConfig;
+	gsDevices[0].ui32PhysHeapCount = sizeof(gsPhysHeapConfig) / sizeof(PHYS_HEAP_CONFIG);
+
+	gsDevices[0].eBIFTilingMode = RGXFWIF_BIFTILINGMODE_256x16;
+	gsDevices[0].pui32BIFTilingHeapConfigs = gauiBIFTilingHeapXStrides;
+	gsDevices[0].ui32BIFTilingHeapCount = ARRAY_SIZE(gauiBIFTilingHeapXStrides);
+
+	/* Setup RGX specific timing data */
+	gsRGXTimingInfo.ui32CoreClockSpeed = RGX_HW_CORE_CLOCK_SPEED;
+
+#if MTK_PM_SUPPORT
+	gsRGXTimingInfo.bEnableActivePM = true;
+	gsRGXTimingInfo.ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS,
+#else
+	gsRGXTimingInfo.bEnableActivePM = false;
+#endif
+
+	/* define HW APM */
+#if defined(MTK_USE_HW_APM)
+	gsRGXTimingInfo.bEnableRDPowIsland = true;
+#else
+	gsRGXTimingInfo.bEnableRDPowIsland = false;
+#endif
+
+	/* Setup RGX specific data */
+	gsRGXData.psRGXTimingInfo = &gsRGXTimingInfo;
+
+	/* Setup RGX device */
+	gsDevices[0].pszName = "RGX";
+	gsDevices[0].pszVersion = NULL;
+
+	/* Device setup information */
+#if defined(MTK_CONFIG_OF) && defined(CONFIG_OF)
+	/* MTK: using device tree */
+	{
+		struct resource *irq_res;
+		struct resource *reg_res;
+
+		gpsPVRCfgDev = to_platform_device((struct device *)pvOSDevice);
+		irq_res = platform_get_resource(gpsPVRCfgDev, IORESOURCE_IRQ, 0);
+
+		if (irq_res) {
+			gsDevices[0].ui32IRQ = irq_res->start;
+			g32SysIrq = irq_res->start;
+
+			PVR_LOG(("irq_res = 0x%x", (int)irq_res->start));
+		} else {
+			PVR_DPF((PVR_DBG_ERROR, "irq_res = NULL!"));
+			return PVRSRV_ERROR_INIT_FAILURE;
+		}
+
+		reg_res = platform_get_resource(gpsPVRCfgDev, IORESOURCE_MEM, 0);
+
+		if (reg_res) {
+			gsDevices[0].sRegsCpuPBase.uiAddr = reg_res->start;
+			gsDevices[0].ui32RegsSize = resource_size(reg_res);
+
+			PVR_LOG(("reg_res = 0x%x, 0x%x", (int)reg_res->start,
+									(int)resource_size(reg_res)));
+		} else {
+			PVR_DPF((PVR_DBG_ERROR, "reg_res = NULL!"));
+			return PVRSRV_ERROR_INIT_FAILURE;
+		}
+	}
+#else
+	gsDevices[0].sRegsCpuPBase.uiAddr = SYS_MTK_RGX_REGS_SYS_PHYS_BASE;
+	gsDevices[0].ui32RegsSize = SYS_MTK_RGX_REGS_SIZE;
+	gsDevices[0].ui32IRQ = SYS_MTK_RGX_IRQ;
+#endif
+
+	/* Power management on HW system */
+	gsDevices[0].pfnPrePowerState = MTKDevPrePowerState;
+	gsDevices[0].pfnPostPowerState = MTKDevPostPowerState;
+
+	/* Clock frequency */
+	gsDevices[0].pfnClockFreqGet = NULL;
+
+	gsDevices[0].hDevData = &gsRGXData;
+	gsDevices[0].eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE;
+
+#if defined(CONFIG_MACH_MT6739)
+	gsDevices[0].pfnSysDevFeatureDepInit = NULL;
+#endif
+
+#if defined(SUPPORT_PDVFS)
+	/* Dummy DVFS configuration used purely for testing purposes */
+	gsDevices[0].sDVFS.sDVFSDeviceCfg.pasOPPTable = asOPPTable;
+	gsDevices[0].sDVFS.sDVFSDeviceCfg.ui32OPPTableSize = LEVEL_COUNT;
+#endif
+
+	/* Setup other system specific stuff */
+#if defined(SUPPORT_ION)
+	IonInit(NULL);
+#endif
+
+	gsDevices[0].pvOSDevice = pvOSDevice;
+	*ppsDevConfig = &gsDevices[0];
+
+	MTKRGXDeviceInit(gsDevices);
+	return err;
+}
+
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+#if defined(SUPPORT_ION)
+	IonDeinit();
+#endif
+
+	MTKRGXDeviceDeInit(gsDevices);
+	psDevConfig->pvOSDevice = NULL;
+}
+
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+				  IMG_UINT32 ui32IRQ,
+				  const IMG_CHAR *pszName,
+				  PFN_LISR pfnLISR,
+				  void *pvData,
+				  IMG_HANDLE *phLISRData)
+{
+	IMG_UINT32 ui32IRQFlags = SYS_IRQ_FLAG_TRIGGER_LOW;
+
+	PVR_UNREFERENCED_PARAMETER(hSysData);
+
+#if defined(PVRSRV_GPUVIRT_MULTIDRV_MODEL)
+	ui32IRQFlags |= SYS_IRQ_FLAG_SHARED;
+#endif
+
+	return OSInstallSystemLISR(phLISRData, ui32IRQ, pszName, pfnLISR, pvData,
+							   ui32IRQFlags);
+}
+
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+	return OSUninstallSystemLISR(hLISRData);
+}
+
+
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+	DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, void *pvDumpDebugFile)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevConfig);
+	PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+	return PVRSRV_OK;
+}
+
+/******************************************************************************
+* End of file (sysconfig.c)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sysconfig.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sysconfig.h
new file mode 100644
index 0000000..b18a774
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sysconfig.h
@@ -0,0 +1,102 @@
+/*************************************************************************
+* @File
+* @Title          System Description Header
+* @Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+* @Description    This header provides system-specific declarations and macros
+* @License        Dual MIT/GPLv2
+*
+* The contents of this file are subject to the MIT license as set out below.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy
+* of this software and associated documentation files (the "Software"), to deal
+* in the Software without restriction, including without limitation the rights
+* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+* copies of the Software, and to permit persons to whom the Software is
+* furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* Alternatively, the contents of this file may be used under the terms of
+* the GNU General Public License Version 2 ("GPL") in which case the provisions
+* of GPL are applicable instead of those above.
+*
+* If you wish to allow use of your version of this file only under the terms of
+* GPL, and not to allow others to use your version of this file under the terms
+* of the MIT license, indicate your decision by deleting the provisions above
+* and replace them with the notice and other provisions required by GPL as set
+* out in the file called "GPL-COPYING" included in this distribution. If you do
+* not delete the provisions above, a recipient may use your version of this file
+* under the terms of either the MIT license or GPL.
+*
+* This License is also included in this distribution in the file called
+* "MIT-COPYING".
+*
+* EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+* PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+* PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+***************************************************************************/
+
+#include "pvrsrv_device.h"
+#include "rgxdevice.h"
+
+#if !defined(__SYSCCONFIG_H__)
+#define __SYSCCONFIG_H__
+
+/* pedro */
+#define CONFIG_MACH_MT8167  1
+#define MTK_CONFIG_OF
+
+#define RGX_HW_SYSTEM_NAME "RGX HW"
+
+#if defined(CONFIG_MACH_MT8173)
+#define RGX_HW_CORE_CLOCK_SPEED			(455000000)
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (30)
+#elif defined(CONFIG_MACH_MT8167)
+/* pedro #define RGX_HW_CORE_CLOCK_SPEED			(500000000) */
+#define RGX_HW_CORE_CLOCK_SPEED			(598000000)
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (50)
+#elif defined(CONFIG_MACH_MT6739)
+#define RGX_HW_CORE_CLOCK_SPEED			(481000000)
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (3)
+#else
+#endif
+
+static IMG_UINT32 gauiBIFTilingHeapXStrides[RGXFWIF_NUM_BIF_TILING_CONFIGS] = {
+	0, /* BIF tiling heap 1 x-stride */
+	1, /* BIF tiling heap 2 x-stride */
+	2, /* BIF tiling heap 3 x-stride */
+	3  /* BIF tiling heap 4 x-stride */
+};
+
+#if defined(MTK_CONFIG_OF) && defined(CONFIG_OF)
+int MTKSysGetIRQ(void);
+#else
+
+/* if *CONFIG_OF is not set, please makesure the following address and IRQ number are right */
+/* #error RGX_GPU_please_fill_the_following_defines */
+#define SYS_MTK_RGX_REGS_SYS_PHYS_BASE      0x13000000
+#define SYS_MTK_RGX_REGS_SIZE               0x80000
+
+#if defined(CONFIG_MACH_MT8173)
+#define SYS_MTK_RGX_IRQ                     0x102
+#elif defined(CONFIG_MACH_MT8167)
+#define SYS_MTK_RGX_IRQ                     0xDB
+#elif defined(CONFIG_MACH_MT6739)
+#define SYS_MTK_RGX_IRQ                     0x150
+#else
+#endif
+
+#endif
+
+
+
+/*****************************************************************************
+ * system specific data structures
+ *****************************************************************************/
+
+#endif	/* __SYSCCONFIG_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sysinfo.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sysinfo.h
new file mode 100644
index 0000000..dea63c0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/sysinfo.h
@@ -0,0 +1,87 @@
+/*************************************************************************
+* @File
+* @Title          System Description Header
+* @Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+* @Description    This header provides system-specific declarations and macros
+* @License        Dual MIT/GPLv2
+*
+* The contents of this file are subject to the MIT license as set out below.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy
+* of this software and associated documentation files (the "Software"), to deal
+* in the Software without restriction, including without limitation the rights
+* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+* copies of the Software, and to permit persons to whom the Software is
+* furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* Alternatively, the contents of this file may be used under the terms of
+* the GNU General Public License Version 2 ("GPL") in which case the provisions
+* of GPL are applicable instead of those above.
+*
+* If you wish to allow use of your version of this file only under the terms of
+* GPL, and not to allow others to use your version of this file under the terms
+* of the MIT license, indicate your decision by deleting the provisions above
+* and replace them with the notice and other provisions required by GPL as set
+* out in the file called "GPL-COPYING" included in this distribution. If you do
+* not delete the provisions above, a recipient may use your version of this file
+* under the terms of either the MIT license or GPL.
+*
+* This License is also included in this distribution in the file called
+* "MIT-COPYING".
+*
+* EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+* PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+* PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+**************************************************************************/
+
+#if !defined(__SYSINFO_H__)
+#define __SYSINFO_H__
+
+/*!< System specific poll/timeout details */
+#if defined(PVR_LINUX_USING_WORKQUEUES)
+#define MAX_HW_TIME_US								(1000000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT		(10000)
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT	(3600000)
+#define WAIT_TRY_COUNT								(20000)
+#else
+#define MAX_HW_TIME_US								(5000000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT		(10000)
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT	(3600000)
+#define WAIT_TRY_COUNT								(100000)
+#endif
+
+#define SYS_DEVICE_COUNT		3 /* RGX, DISPLAY (external), BUFFER (external) */
+
+#define SYS_PHYS_HEAP_COUNT		1
+
+#define CONFIG_MACH_MT8167
+#define MTK_CONFIG_OF
+
+#if defined(CONFIG_MACH_MT8173)
+#define SYS_RGX_OF_COMPATIBLE	"mediatek,mt8173-han"
+#elif defined(CONFIG_MACH_MT8167)
+#define SYS_RGX_OF_COMPATIBLE	"mediatek,mt8167-clark"
+#elif defined(CONFIG_MACH_MT6739)
+#define SYS_RGX_OF_COMPATIBLE	"mediatek,AUCKLAND"
+#else
+#endif
+
+#if defined(__linux__)
+/*
+ * Use the static bus ID for the platform DRM device.
+ */
+#if defined(PVR_DRM_DEV_BUS_ID)
+#define	SYS_RGX_DEV_DRM_BUS_ID	PVR_DRM_DEV_BUS_ID
+#else
+#define SYS_RGX_DEV_DRM_BUS_ID	"platform:pvrsrvkm"
+#endif	/* defined(PVR_DRM_DEV_BUS_ID) */
+#endif
+
+#endif	/* !defined(__SYSINFO_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/dma_support.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/dma_support.c
new file mode 100644
index 0000000..beba85b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/dma_support.c
@@ -0,0 +1,543 @@
+/*************************************************************************/ /*!
+@File           dma_support.c
+@Title          System DMA support
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides a contiguous memory allocator (i.e. DMA allocator);
+                APIs are used for allocation/ioremapping (DMA/PA <-> CPU/VA)
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/mm.h>
+#include <asm/page.h>
+#include <linux/device.h>
+#include <linux/highmem.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-mapping.h>
+#include <asm-generic/getorder.h>
+
+#include "allocmem.h"
+#include "dma_support.h"
+#include "kernel_compatibility.h"
+
+#define DMA_MAX_IOREMAP_ENTRIES 2
+static IMG_BOOL gbEnableDmaIoRemapping = IMG_FALSE;
+static DMA_ALLOC gsDmaIoRemapArray[DMA_MAX_IOREMAP_ENTRIES] = {{0}};
+
+static void*
+SysDmaAcquireKernelAddress(struct page *psPage, IMG_UINT64 ui64Size, void *pvOSDevice)
+{
+	IMG_UINT32 uiIdx;
+	PVRSRV_ERROR eError;
+	void *pvVirtAddr = NULL;
+	IMG_UINT32 ui32PgCount = (IMG_UINT32)(ui64Size >> OSGetPageShift());
+	PVRSRV_DEVICE_NODE *psDevNode = OSAllocZMemNoStats(sizeof(*psDevNode));
+	PVRSRV_DEVICE_CONFIG *psDevConfig = OSAllocZMemNoStats(sizeof(*psDevConfig));
+	struct page **pagearray = OSAllocZMemNoStats(ui32PgCount * sizeof(struct page *));
+#if defined(CONFIG_ARM64)
+	pgprot_t prot = pgprot_writecombine(PAGE_KERNEL);
+#else
+	pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
+#endif
+
+	/* Validate all required dynamic tmp buffer allocations */
+	if (psDevNode == NULL || psDevConfig == NULL || pagearray == NULL)
+	{
+		if (psDevNode)
+		{
+			OSFreeMem(psDevNode);
+		}
+
+		if (psDevConfig)
+		{
+			OSFreeMem(psDevConfig);
+		}
+
+		if (pagearray)
+		{
+			OSFreeMem(pagearray);
+		}
+
+		goto e0;
+	}
+
+	/* Fake psDevNode->psDevConfig->pvOSDevice */
+	psDevConfig->pvOSDevice = pvOSDevice;
+	psDevNode->psDevConfig = psDevConfig;
+
+	/* Evict any page data contents from d-cache */
+#if defined(__arm__) || defined(__arm64__) || defined(__aarch64__)
+	/* ARM platforms do not support a global flush. */
+	eError = PVRSRV_ERROR_RETRY;
+#else
+	eError = OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+#endif
+	for (uiIdx = 0; uiIdx  < ui32PgCount; uiIdx++)
+	{
+		/* Prepare array required for vmap */
+		pagearray[uiIdx] = &psPage[uiIdx];
+
+		if (eError != PVRSRV_OK)
+		{
+#if defined(CONFIG_64BIT)
+			void *pvVirtStart = kmap(&psPage[uiIdx]);
+			void *pvVirtEnd = pvVirtStart + ui64Size;
+			IMG_CPU_PHYADDR sCPUPhysStart = {page_to_phys(&psPage[uiIdx])};
+			IMG_CPU_PHYADDR sCPUPhysEnd = {sCPUPhysStart.uiAddr + ui64Size};
+			/* all pages have a kernel linear address, flush entire range */
+#else
+			void *pvVirtStart = kmap(&psPage[uiIdx]);
+			void *pvVirtEnd = pvVirtStart + PAGE_SIZE;
+			IMG_CPU_PHYADDR sCPUPhysStart = {page_to_phys(&psPage[uiIdx])};
+			IMG_CPU_PHYADDR sCPUPhysEnd = {sCPUPhysStart.uiAddr + PAGE_SIZE};
+			/* pages might be from HIGHMEM, need to kmap/flush per page */
+#endif
+
+			/* Fallback to range-based d-cache flush */
+			OSCPUCacheInvalidateRangeKM(psDevNode,
+										pvVirtStart, pvVirtEnd,
+										sCPUPhysStart, sCPUPhysEnd);
+
+#if defined(CONFIG_64BIT)
+			eError = PVRSRV_OK;
+#else
+			kunmap(&psPage[uiIdx]);
+#endif
+		}
+	}
+
+	/* Remap pages into VMALLOC space */
+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
+	pvVirtAddr = vmap(pagearray, ui32PgCount, VM_READ | VM_WRITE, prot);
+#else
+	pvVirtAddr = vm_map_ram(pagearray, ui32PgCount, -1, prot);
+#endif
+
+	/* Clean-up tmp buffers */
+	OSFreeMem(psDevConfig);
+	OSFreeMem(psDevNode);
+	OSFreeMem(pagearray);
+
+e0:
+	return pvVirtAddr;
+}
+
+static void SysDmaReleaseKernelAddress(void *pvVirtAddr, IMG_UINT64 ui64Size)
+{
+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
+	vunmap(pvVirtAddr);
+#else
+	vm_unmap_ram(pvVirtAddr, ui64Size >> OSGetPageShift());
+#endif
+}
+
+/*!
+******************************************************************************
+ @Function			SysDmaAllocMem
+
+ @Description 		Allocates physically contiguous memory
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	struct device *psDev;
+	struct page *psPage;
+	size_t uiSize;
+
+	if (psDmaAlloc == NULL ||
+		psDmaAlloc->hHandle ||
+		psDmaAlloc->pvVirtAddr ||
+		psDmaAlloc->ui64Size == 0 ||
+		psDmaAlloc->sBusAddr.uiAddr ||
+		psDmaAlloc->pvOSDevice == NULL)
+	{
+		PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter");
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE);
+	psDev = (struct device *)psDmaAlloc->pvOSDevice;
+
+#if !(defined(CONFIG_L4) || defined(PVR_LINUX_PHYSMEM_SUPPRESS_DMA_AC))
+	psDmaAlloc->hHandle = dma_alloc_coherent(psDev, uiSize, (dma_addr_t *)&psDmaAlloc->sBusAddr.uiAddr, GFP_KERNEL);
+#endif
+	if (psDmaAlloc->hHandle)
+	{
+		psDmaAlloc->pvVirtAddr = psDmaAlloc->hHandle;
+
+		PVR_DPF((PVR_DBG_MESSAGE,
+				"Allocated DMA buffer V:0x%p P:0x%llx S:0x"IMG_SIZE_FMTSPECX,
+				psDmaAlloc->pvVirtAddr,
+				psDmaAlloc->sBusAddr.uiAddr,
+				uiSize));
+	}
+	else if ((psPage = alloc_pages(GFP_KERNEL, get_order(uiSize))))
+	{
+#if defined(CONFIG_L4)
+		/* L4 is a para-virtualized environment, the PFN space is a virtual space and not physical space */
+		psDmaAlloc->sBusAddr.uiAddr = l4x_virt_to_phys((void*)((unsigned long)page_to_pfn(psPage) << PAGE_SHIFT));
+#else
+		psDmaAlloc->sBusAddr.uiAddr = dma_map_page(psDev, psPage, 0, uiSize, DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(psDev, psDmaAlloc->sBusAddr.uiAddr))
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"dma_map_page() failed, page 0x%p order %d",
+					psPage,
+					get_order(uiSize)));
+			__free_pages(psPage, get_order(uiSize));
+			goto e0;
+		}
+		psDmaAlloc->psPage = psPage;
+#endif
+
+		psDmaAlloc->pvVirtAddr = SysDmaAcquireKernelAddress(psPage, uiSize, psDmaAlloc->pvOSDevice);
+		if (! psDmaAlloc->pvVirtAddr)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"SysDmaAcquireKernelAddress() failed, page 0x%p order %d",
+					psPage,
+					get_order(uiSize)));
+#if !defined(CONFIG_L4)
+			dma_unmap_page(psDev, psDmaAlloc->sBusAddr.uiAddr, uiSize, DMA_BIDIRECTIONAL);
+#endif
+			__free_pages(psPage, get_order(uiSize));
+			goto e0;
+		}
+
+		PVR_DPF((PVR_DBG_MESSAGE,
+				"Allocated contiguous buffer V:0x%p P:0x%llx S:0x"IMG_SIZE_FMTSPECX,
+				psDmaAlloc->pvVirtAddr,
+				psDmaAlloc->sBusAddr.uiAddr,
+				uiSize));
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,	"Unable to allocate contiguous buffer, size: 0x"IMG_SIZE_FMTSPECX, uiSize));
+		eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
+	}
+
+e0:
+	PVR_LOGR_IF_FALSE((psDmaAlloc->pvVirtAddr), "DMA/CMA allocation failed", PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES);
+	return eError;
+}
+
+/*!
+******************************************************************************
+ @Function			SysDmaFreeMem
+
+ @Description 		Free physically contiguous memory
+
+ @Return			void
+ ******************************************************************************/
+void SysDmaFreeMem(DMA_ALLOC *psDmaAlloc)
+{
+	size_t uiSize;
+	struct device *psDev;
+
+	if (psDmaAlloc == NULL ||
+		psDmaAlloc->ui64Size == 0 ||
+		psDmaAlloc->pvOSDevice == NULL ||
+		psDmaAlloc->pvVirtAddr == NULL ||
+		psDmaAlloc->sBusAddr.uiAddr == 0)
+	{
+		PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter");
+		return;
+	}
+
+	uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE);
+	psDev = (struct device *)psDmaAlloc->pvOSDevice;
+
+	if (psDmaAlloc->pvVirtAddr != psDmaAlloc->hHandle)
+	{
+		SysDmaReleaseKernelAddress(psDmaAlloc->pvVirtAddr, uiSize);
+	}
+
+	if (! psDmaAlloc->hHandle)
+	{
+		struct page *psPage;
+#if defined(CONFIG_L4)
+		psPage = pfn_to_page((unsigned long)l4x_phys_to_virt(psDmaAlloc->sBusAddr.uiAddr) >> PAGE_SHIFT);
+#else
+		dma_unmap_page(psDev, psDmaAlloc->sBusAddr.uiAddr, uiSize, DMA_BIDIRECTIONAL);
+		psPage = psDmaAlloc->psPage;
+#endif
+		__free_pages(psPage, get_order(uiSize));
+		return;
+	}
+
+	dma_free_coherent(psDev, uiSize, psDmaAlloc->hHandle, (dma_addr_t )psDmaAlloc->sBusAddr.uiAddr);
+}
+
+/*!
+******************************************************************************
+ @Function			SysDmaRegisterForIoRemapping
+
+ @Description 		Registers DMA_ALLOC for manual I/O remapping
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psDmaAlloc)
+{
+	size_t uiSize;
+	IMG_UINT32 ui32Idx;
+	IMG_BOOL bTabEntryFound = IMG_TRUE;
+	PVRSRV_ERROR eError = PVRSRV_ERROR_TOO_FEW_BUFFERS;
+
+	if (psDmaAlloc == NULL ||
+		psDmaAlloc->ui64Size == 0 ||
+		psDmaAlloc->pvOSDevice == NULL ||
+		psDmaAlloc->pvVirtAddr == NULL ||
+		psDmaAlloc->sBusAddr.uiAddr == 0)
+	{
+		PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter");
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE);
+
+	for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+	{
+		/* Check if an I/O remap entry exists for remapping */
+		if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr == NULL)
+		{
+			PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr == 0);
+			PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].ui64Size == 0);
+			break;
+		}
+	}
+
+	if (ui32Idx >= DMA_MAX_IOREMAP_ENTRIES)
+	{
+		bTabEntryFound = IMG_FALSE;
+	}
+
+	if (bTabEntryFound)
+	{
+		IMG_BOOL bSameVAddr, bSamePAddr, bSameSize;
+
+		bSamePAddr = gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr == psDmaAlloc->sBusAddr.uiAddr;
+		bSameVAddr = gsDmaIoRemapArray[ui32Idx].pvVirtAddr == psDmaAlloc->pvVirtAddr;
+		bSameSize = gsDmaIoRemapArray[ui32Idx].ui64Size == uiSize;
+
+		if (bSameVAddr)
+		{
+			if (bSamePAddr && bSameSize)
+			{
+				eError = PVRSRV_OK;
+			}
+			else
+			{
+				eError = PVRSRV_ERROR_ALREADY_EXISTS;
+			}
+		}
+		else
+		{
+			PVR_ASSERT(bSamePAddr == IMG_FALSE);
+
+			gsDmaIoRemapArray[ui32Idx].ui64Size = uiSize;
+			gsDmaIoRemapArray[ui32Idx].sBusAddr = psDmaAlloc->sBusAddr;
+			gsDmaIoRemapArray[ui32Idx].pvVirtAddr = psDmaAlloc->pvVirtAddr;
+
+			PVR_DPF((PVR_DBG_MESSAGE,
+					"DMA: register I/O remap: "
+					"VA: 0x%p, PA: 0x%llx, Size: 0x"IMG_SIZE_FMTSPECX,
+					psDmaAlloc->pvVirtAddr,
+					psDmaAlloc->sBusAddr.uiAddr,
+					uiSize));
+
+			gbEnableDmaIoRemapping = IMG_TRUE;
+			eError = PVRSRV_OK;
+		}
+	}
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+ @Function			SysDmaDeregisterForIoRemapping
+
+ @Description 		Deregisters DMA_ALLOC from manual I/O remapping
+
+ @Return			void
+ ******************************************************************************/
+void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psDmaAlloc)
+{
+	size_t uiSize;
+	IMG_UINT32 ui32Idx;
+
+	if (psDmaAlloc == NULL ||
+		psDmaAlloc->ui64Size == 0 ||
+		psDmaAlloc->pvOSDevice == NULL ||
+		psDmaAlloc->pvVirtAddr == NULL ||
+		psDmaAlloc->sBusAddr.uiAddr == 0)
+	{
+		PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter");
+		return;
+	}
+
+	uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE);
+
+	/* Remove specified entries from list of I/O remap entries */
+	for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+	{
+		if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr == psDmaAlloc->pvVirtAddr)
+		{
+			gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr = 0;
+			gsDmaIoRemapArray[ui32Idx].pvVirtAddr = NULL;
+			gsDmaIoRemapArray[ui32Idx].ui64Size =  0;
+
+			PVR_DPF((PVR_DBG_MESSAGE,
+					"DMA: deregister I/O remap: "
+					"VA: 0x%p, PA: 0x%llx, Size: 0x"IMG_SIZE_FMTSPECX,
+					psDmaAlloc->pvVirtAddr,
+					psDmaAlloc->sBusAddr.uiAddr,
+					uiSize));
+
+			break;
+		}
+	}
+
+	/* Check if no other I/O remap entries exists for remapping */
+	for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+	{
+		if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr != NULL)
+		{
+			break;
+		}
+	}
+
+	if (ui32Idx == DMA_MAX_IOREMAP_ENTRIES)
+	{
+		/* No entries found so disable remapping */
+		gbEnableDmaIoRemapping = IMG_FALSE;
+	}
+}
+
+/*!
+******************************************************************************
+ @Function			SysDmaDevPAddrToCpuVAddr
+
+ @Description 		Maps a DMA_ALLOC physical address to CPU virtual address
+
+ @Return			IMG_CPU_VIRTADDR on success. Otherwise, a NULL
+ ******************************************************************************/
+IMG_CPU_VIRTADDR SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size)
+{
+	IMG_CPU_VIRTADDR pvDMAVirtAddr = NULL;
+	DMA_ALLOC *psHeapDmaAlloc;
+	IMG_UINT32 ui32Idx;
+
+	if (gbEnableDmaIoRemapping == IMG_FALSE)
+	{
+		return pvDMAVirtAddr;
+	}
+
+	for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+	{
+		psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx];
+		if (psHeapDmaAlloc->sBusAddr.uiAddr && uiAddr >= psHeapDmaAlloc->sBusAddr.uiAddr)
+		{
+			IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size;
+			IMG_UINT64 uiOffset = uiAddr - psHeapDmaAlloc->sBusAddr.uiAddr;
+
+			if (uiOffset < uiSpan)
+			{
+				PVR_ASSERT((uiOffset+ui64Size-1) < uiSpan);
+				pvDMAVirtAddr = psHeapDmaAlloc->pvVirtAddr + uiOffset;
+
+				PVR_DPF((PVR_DBG_MESSAGE,
+					"DMA: remap: PA: 0x%llx => VA: 0x%p",
+					uiAddr, pvDMAVirtAddr));
+
+				break;
+			}
+		}
+	}
+
+	return pvDMAVirtAddr;
+}
+
+/*!
+******************************************************************************
+ @Function			SysDmaCpuVAddrToDevPAddr
+
+ @Description 		Maps a DMA_ALLOC CPU virtual address to physical address
+
+ @Return			Non-zero value on success. Otherwise, a 0
+ ******************************************************************************/
+IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr)
+{
+	IMG_UINT64 uiAddr = 0;
+	DMA_ALLOC *psHeapDmaAlloc;
+	IMG_UINT32 ui32Idx;
+
+	if (gbEnableDmaIoRemapping == IMG_FALSE)
+	{
+		return uiAddr;
+	}
+
+	for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+	{
+		psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx];
+		if (psHeapDmaAlloc->pvVirtAddr && pvDMAVirtAddr >= psHeapDmaAlloc->pvVirtAddr)
+		{
+			IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size;
+			IMG_UINT64 uiOffset = pvDMAVirtAddr - psHeapDmaAlloc->pvVirtAddr;
+
+			if (uiOffset < uiSpan)
+			{
+				uiAddr = psHeapDmaAlloc->sBusAddr.uiAddr + uiOffset;
+
+				PVR_DPF((PVR_DBG_MESSAGE,
+					"DMA: remap: VA: 0x%p => PA: 0x%llx",
+					pvDMAVirtAddr, uiAddr));
+
+				break;
+			}
+		}
+	}
+
+	return uiAddr;
+}
+
+/******************************************************************************
+ End of file (dma_support.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/dma_support.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/dma_support.h
new file mode 100644
index 0000000..851e68a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/dma_support.h
@@ -0,0 +1,129 @@
+/*************************************************************************/ /*!
+@File           dma_support.h
+@Title          Device contiguous memory allocator and I/O re-mapper
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides a contiguous memory allocator API; mainly
+                used for allocating / ioremapping (DMA/PA <-> CPU/VA)
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DMA_SUPPORT_H_
+#define _DMA_SUPPORT_H_
+
+#include "osfunc.h"
+#include "pvrsrv.h"
+
+typedef struct _DMA_ALLOC_
+{
+	IMG_UINT64	ui64Size;
+	IMG_CPU_VIRTADDR pvVirtAddr;
+	IMG_DEV_PHYADDR	 sBusAddr;
+	IMG_HANDLE hHandle;
+#if defined(LINUX)
+	struct page *psPage;
+#endif
+	void *pvOSDevice;
+} DMA_ALLOC;
+
+/*!
+******************************************************************************
+ @Function			SysDmaAllocMem
+
+ @Description 		Allocates physically contiguous memory
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc);
+
+/*!
+******************************************************************************
+ @Function			SysDmaFreeMem
+
+ @Description 		Free physically contiguous memory
+
+ @Return			void
+ ******************************************************************************/
+void SysDmaFreeMem(DMA_ALLOC *psCmaAlloc);
+
+/*!
+******************************************************************************
+ @Function			SysDmaRegisterForIoRemapping
+
+ @Description 		Registers DMA_ALLOC for manual I/O remapping
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc);
+
+/*!
+******************************************************************************
+ @Function			SysDmaDeregisterForIoRemapping
+
+ @Description 		Deregisters DMA_ALLOC from manual I/O remapping
+
+ @Return			void
+ ******************************************************************************/
+void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc);
+
+/*!
+******************************************************************************
+ @Function			SysDmaDevPAddrToCpuVAddr
+
+ @Description 		Maps a DMA_ALLOC physical address to CPU virtual address
+
+ @Return			IMG_CPU_VIRTADDR on success. Otherwise, a NULL
+ ******************************************************************************/
+IMG_CPU_VIRTADDR SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size);
+
+/*!
+******************************************************************************
+ @Function			SysDmaCpuVAddrToDevPAddr
+
+ @Description 		Maps a DMA_ALLOC CPU virtual address to physical address
+
+ @Return			Non-zero value on success. Otherwise, a 0
+ ******************************************************************************/
+IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr);
+
+#endif /* _DMA_SUPPORT_H_ */
+
+/*****************************************************************************
+ End of file (dma_support.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/pci_support.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/pci_support.c
new file mode 100644
index 0000000..c3bbcc4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/pci_support.c
@@ -0,0 +1,726 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#include <linux/pci.h>
+
+#if defined(CONFIG_MTRR)
+#include <asm/mtrr.h>
+#endif
+
+#include "pci_support.h"
+#include "allocmem.h"
+
+typedef	struct _PVR_PCI_DEV_TAG
+{
+	struct pci_dev		*psPCIDev;
+	HOST_PCI_INIT_FLAGS	ePCIFlags;
+	IMG_BOOL		abPCIResourceInUse[DEVICE_COUNT_RESOURCE];
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+	int			iMTRR[DEVICE_COUNT_RESOURCE];
+#endif
+} PVR_PCI_DEV;
+
+/*************************************************************************/ /*!
+@Function       OSPCISetDev
+@Description    Set a PCI device for subsequent use.
+@Input          pvPCICookie             Pointer to OS specific PCI structure
+@Input          eFlags                  Flags
+@Return		PVRSRV_PCI_DEV_HANDLE   Pointer to PCI device handle
+*/ /**************************************************************************/
+PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags)
+{
+	int err;
+	IMG_UINT32 i;
+	PVR_PCI_DEV *psPVRPCI;
+
+	psPVRPCI = OSAllocMem(sizeof(*psPVRPCI));
+	if (psPVRPCI == NULL)
+	{
+		printk(KERN_ERR "OSPCISetDev: Couldn't allocate PVR PCI structure\n");
+		return NULL;
+	}
+
+	psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie;
+	psPVRPCI->ePCIFlags = eFlags;
+
+	err = pci_enable_device(psPVRPCI->psPCIDev);
+	if (err != 0)
+	{
+		printk(KERN_ERR "OSPCISetDev: Couldn't enable device (%d)\n", err);
+		OSFreeMem(psPVRPCI);
+		return NULL;
+	}
+
+	if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)	/* PRQA S 3358 */ /* misuse of enums */
+	{
+		pci_set_master(psPVRPCI->psPCIDev);
+	}
+
+	if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI)		/* PRQA S 3358 */ /* misuse of enums */
+	{
+#if defined(CONFIG_PCI_MSI)
+		err = pci_enable_msi(psPVRPCI->psPCIDev);
+		if (err != 0)
+		{
+			printk(KERN_ERR "OSPCISetDev: Couldn't enable MSI (%d)", err);
+			psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI;	/* PRQA S 1474,3358,4130 */ /* misuse of enums */
+		}
+#else
+		printk(KERN_ERR "OSPCISetDev: MSI support not enabled in the kernel");
+#endif
+	}
+
+	/* Initialise the PCI resource and MTRR tracking array */
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+	{
+		psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+		psPVRPCI->iMTRR[i] = -1;
+#endif
+	}
+
+	return (PVRSRV_PCI_DEV_HANDLE)psPVRPCI;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIAcquireDev
+@Description    Acquire a PCI device for subsequent use.
+@Input          ui16VendorID            Vendor PCI ID
+@Input          ui16DeviceID            Device PCI ID
+@Input          eFlags                  Flags
+@Return		PVRSRV_PCI_DEV_HANDLE   Pointer to PCI device handle
+*/ /**************************************************************************/
+PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID,
+				      IMG_UINT16 ui16DeviceID,
+				      HOST_PCI_INIT_FLAGS eFlags)
+{
+	struct pci_dev *psPCIDev;
+
+	psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL);
+	if (psPCIDev == NULL)
+	{
+		return NULL;
+	}
+
+	return OSPCISetDev((void *)psPCIDev, eFlags);
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIIRQ
+@Description    Get the interrupt number for the device.
+@Input          hPVRPCI                 PCI device handle
+@Output         pui16DeviceID           Pointer to where the interrupt number
+                                        should be returned
+@Return		PVRSRV_ERROR            Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+
+	if (pui32IRQ == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*pui32IRQ = psPVRPCI->psPCIDev->irq;
+
+	return PVRSRV_OK;
+}
+
+/* Functions supported by OSPCIAddrRangeFunc */
+enum HOST_PCI_ADDR_RANGE_FUNC
+{
+	HOST_PCI_ADDR_RANGE_FUNC_LEN,
+	HOST_PCI_ADDR_RANGE_FUNC_START,
+	HOST_PCI_ADDR_RANGE_FUNC_END,
+	HOST_PCI_ADDR_RANGE_FUNC_REQUEST,
+	HOST_PCI_ADDR_RANGE_FUNC_RELEASE
+};
+
+/*************************************************************************/ /*!
+@Function       OSPCIAddrRangeFunc
+@Description    Internal support function for various address range related
+                functions
+@Input          eFunc                   Function to perform
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return		IMG_UINT32              Function dependent value
+*/ /**************************************************************************/
+static IMG_UINT64 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc,
+										 PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+										 IMG_UINT32 ui32Index)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+
+	if (ui32Index >= DEVICE_COUNT_RESOURCE)
+	{
+		printk(KERN_ERR "OSPCIAddrRangeFunc: Index out of range");
+		return 0;
+	}
+
+	switch (eFunc)
+	{
+		case HOST_PCI_ADDR_RANGE_FUNC_LEN:
+		{
+			return pci_resource_len(psPVRPCI->psPCIDev, ui32Index);
+		}
+		case HOST_PCI_ADDR_RANGE_FUNC_START:
+		{
+			return pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+		}
+		case HOST_PCI_ADDR_RANGE_FUNC_END:
+		{
+			return pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
+		}
+		case HOST_PCI_ADDR_RANGE_FUNC_REQUEST:
+		{
+			int err = pci_request_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index, PVRSRV_MODNAME);
+			if (err != 0)
+			{
+				printk(KERN_ERR "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err);
+				return 0;
+			}
+			psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE;
+			return 1;
+		}
+		case HOST_PCI_ADDR_RANGE_FUNC_RELEASE:
+		{
+			if (psPVRPCI->abPCIResourceInUse[ui32Index])
+			{
+				pci_release_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index);
+				psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE;
+			}
+			return 1;
+		}
+		default:
+		{
+			printk(KERN_ERR "OSPCIAddrRangeFunc: Unknown function");
+			break;
+		}
+	}
+
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIAddrRangeLen
+@Description    Returns length of a given address range
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return		IMG_UINT32              Length of address range or 0 if no
+                                        such range
+*/ /**************************************************************************/
+IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+	return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI, ui32Index);
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIAddrRangeStart
+@Description    Returns the start of a given address range
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return		IMG_UINT32              Start of address range or 0 if no
+                                        such range
+*/ /**************************************************************************/
+IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+	return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI, ui32Index);
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIAddrRangeEnd
+@Description    Returns the end of a given address range
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return		IMG_UINT32              End of address range or 0 if no such
+                                        range
+*/ /**************************************************************************/
+IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+	return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI, ui32Index);
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIRequestAddrRange
+@Description    Request a given address range index for subsequent use
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+								   IMG_UINT32 ui32Index)
+{
+	if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI, ui32Index) == 0)
+	{
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+	else
+	{
+		return PVRSRV_OK;
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIReleaseAddrRange
+@Description    Release a given address range that is no longer being used
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+	if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI, ui32Index) == 0)
+	{
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+	else
+	{
+		return PVRSRV_OK;
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIRequestAddrRegion
+@Description    Request a given region from an address range for subsequent use
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Input          uiOffset              Offset into the address range that forms
+                                        the start of the region
+@Input          uiLength              Length of the region
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+									IMG_UINT32 ui32Index,
+									IMG_UINT64 uiOffset,
+									IMG_UINT64 uiLength)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+	resource_size_t start;
+	resource_size_t end;
+
+	start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+	end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
+
+	/* Check that the requested region is valid */
+	if ((start + uiOffset + uiLength - 1) > end)
+	{
+		return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH;
+	}
+
+	if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO)
+	{
+		if (request_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL)
+		{
+			return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE;
+		}
+	}
+	else
+	{
+		if (request_mem_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL)
+		{
+			return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE;
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIReleaseAddrRegion
+@Description    Release a given region, from an address range, that is no
+                longer in use
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Input          ui32Offset              Offset into the address range that forms
+                                        the start of the region
+@Input          ui32Length              Length of the region
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+									IMG_UINT32 ui32Index,
+									IMG_UINT64 uiOffset,
+									IMG_UINT64 uiLength)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+	resource_size_t start;
+	resource_size_t end;
+
+	start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+	end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
+
+	/* Check that the region is valid */
+	if ((start + uiOffset + uiLength - 1) > end)
+	{
+		return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH;
+	}
+
+	if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO)
+	{
+		release_region(start + uiOffset, uiLength);
+	}
+	else
+	{
+		release_mem_region(start + uiOffset, uiLength);
+	}
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIReleaseDev
+@Description    Release a PCI device that is no longer being used
+@Input          hPVRPCI                 PCI device handle
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+	int i;
+
+	/* Release all PCI regions that are currently in use */
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+	{
+		if (psPVRPCI->abPCIResourceInUse[i])
+		{
+			pci_release_region(psPVRPCI->psPCIDev, i);
+			psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
+		}
+	}
+
+#if defined(CONFIG_PCI_MSI)
+	if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI)		/* PRQA S 3358 */ /* misuse of enums */
+	{
+		pci_disable_msi(psPVRPCI->psPCIDev);
+	}
+#endif
+
+	if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)	/* PRQA S 3358 */ /* misuse of enums */
+	{
+		pci_clear_master(psPVRPCI->psPCIDev);
+	}
+
+	pci_disable_device(psPVRPCI->psPCIDev);
+
+	OSFreeMem(psPVRPCI);
+	/*not nulling pointer, copy on stack*/
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCISuspendDev
+@Description    Prepare PCI device to be turned off by power management
+@Input          hPVRPCI                 PCI device handle
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+	int i;
+	int err;
+
+	/* Release all PCI regions that are currently in use */
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+	{
+		if (psPVRPCI->abPCIResourceInUse[i])
+		{
+			pci_release_region(psPVRPCI->psPCIDev, i);
+		}
+	}
+
+	err = pci_save_state(psPVRPCI->psPCIDev);
+	if (err != 0)
+	{
+		printk(KERN_ERR "OSPCISuspendDev: pci_save_state_failed (%d)", err);
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+
+	pci_disable_device(psPVRPCI->psPCIDev);
+
+	err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_SUSPEND));
+	switch (err)
+	{
+		case 0:
+			break;
+		case -EIO:
+			printk(KERN_ERR "OSPCISuspendDev: device doesn't support PCI PM");
+			break;
+		case -EINVAL:
+			printk(KERN_ERR "OSPCISuspendDev: can't enter requested power state");
+			break;
+		default:
+			printk(KERN_ERR "OSPCISuspendDev: pci_set_power_state failed (%d)", err);
+			break;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIResumeDev
+@Description    Prepare a PCI device to be resumed by power management
+@Input          hPVRPCI                 PCI device handle
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+	int err;
+	int i;
+
+	err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_ON));
+	switch (err)
+	{
+		case 0:
+			break;
+		case -EIO:
+			printk(KERN_ERR "OSPCIResumeDev: device doesn't support PCI PM");
+			break;
+		case -EINVAL:
+			printk(KERN_ERR "OSPCIResumeDev: can't enter requested power state");
+			return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
+		default:
+			printk(KERN_ERR "OSPCIResumeDev: pci_set_power_state failed (%d)", err);
+			return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
+	}
+
+	pci_restore_state(psPVRPCI->psPCIDev);
+
+	err = pci_enable_device(psPVRPCI->psPCIDev);
+	if (err != 0)
+	{
+		printk(KERN_ERR "OSPCIResumeDev: Couldn't enable device (%d)", err);
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+
+	if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)	/* PRQA S 3358 */ /* misuse of enums */
+		pci_set_master(psPVRPCI->psPCIDev);
+
+	/* Restore the PCI resource tracking array */
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+	{
+		if (psPVRPCI->abPCIResourceInUse[i])
+		{
+			err = pci_request_region(psPVRPCI->psPCIDev, i, PVRSRV_MODNAME);
+			if (err != 0)
+			{
+				printk(KERN_ERR "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err);
+			}
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIGetVendorDeviceIDs
+@Description    Retrieve PCI vendor ID and device ID.
+@Input          hPVRPCI                 PCI device handle
+@Output         pui16VendorID           Vendor ID
+@Output         pui16DeviceID           Device ID
+@Return         PVRSRV_ERROR            Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIGetVendorDeviceIDs(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+                                     IMG_UINT16 *pui16VendorID,
+                                     IMG_UINT16 *pui16DeviceID)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+	struct pci_dev *psPCIDev;
+
+	if (psPVRPCI == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psPCIDev = psPVRPCI->psPCIDev;
+	if (psPCIDev == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*pui16VendorID = psPCIDev->vendor;
+	*pui16DeviceID = psPCIDev->device;
+
+	return PVRSRV_OK;
+}
+
+#if defined(CONFIG_MTRR)
+
+/*************************************************************************/ /*!
+@Function       OSPCIClearResourceMTRRs
+@Description    Clear any BIOS-configured MTRRs for a PCI memory region
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+	resource_size_t start, end;
+	int res;
+
+	start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+	end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+	res = arch_io_reserve_memtype_wc(start, end - start);
+	if (res)
+	{
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+#endif
+	res = arch_phys_wc_add(start, end - start);
+	if (res < 0)
+	{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+		arch_io_free_memtype_wc(start, end - start);
+#endif
+
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+	psPVRPCI->iMTRR[ui32Index] = res;
+#else
+
+	res = mtrr_add(start, end - start, MTRR_TYPE_UNCACHABLE, 0);
+	if (res < 0)
+	{
+		printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res);
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+
+	res = mtrr_del(res, start, end - start);
+	if (res < 0)
+	{
+		printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", res);
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+
+	/* Workaround for overlapping MTRRs. */
+	{
+		IMG_BOOL bGotMTRR0 = IMG_FALSE;
+
+		/* Current mobo BIOSes will normally set up a WRBACK MTRR spanning
+		 * 0->4GB, and then another 4GB->6GB. If the PCI card's automatic &
+		 * overlapping UNCACHABLE MTRR is deleted, we see WRBACK behaviour.
+		 *
+		 * WRBACK is incompatible with some PCI devices, so try to split
+		 * the UNCACHABLE regions up and insert a WRCOMB region instead.
+		 */
+		res = mtrr_add(start, end - start, MTRR_TYPE_WRBACK, 0);
+		if (res < 0)
+		{
+			/* If this fails, services has probably run before and created
+			 * a write-combined MTRR for the test chip. Assume it has, and
+			 * don't return an error here.
+			 */
+			return PVRSRV_OK;
+		}
+
+		if (res == 0)
+			bGotMTRR0 = IMG_TRUE;
+
+		res = mtrr_del(res, start, end - start);
+		if (res < 0)
+		{
+			printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", res);
+			return PVRSRV_ERROR_PCI_CALL_FAILED;
+		}
+
+		if (bGotMTRR0)
+		{
+			/* Replace 0 with a non-overlapping WRBACK MTRR */
+			res = mtrr_add(0, start, MTRR_TYPE_WRBACK, 0);
+			if (res < 0)
+			{
+				printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res);
+				return PVRSRV_ERROR_PCI_CALL_FAILED;
+			}
+
+			/* Add a WRCOMB MTRR for the PCI device memory bar */
+			res = mtrr_add(start, end - start, MTRR_TYPE_WRCOMB, 0);
+			if (res < 0)
+			{
+				printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res);
+				return PVRSRV_ERROR_PCI_CALL_FAILED;
+			}
+		}
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIReleaseResourceMTRRs
+@Description    Release resources allocated by OSPCIClearResourceMTRRs
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+*/ /**************************************************************************/
+void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+
+	if (psPVRPCI->iMTRR[ui32Index] >= 0)
+	{
+		arch_phys_wc_del(psPVRPCI->iMTRR[ui32Index]);
+		psPVRPCI->iMTRR[ui32Index] = -1;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+		{
+			resource_size_t start, end;
+
+			start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+			end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1;
+
+			arch_io_free_memtype_wc(start, end - start);
+		}
+#endif
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(hPVRPCI);
+	PVR_UNREFERENCED_PARAMETER(ui32Index);
+#endif
+}
+#endif /* defined(CONFIG_MTRR) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/pci_support.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/pci_support.h
new file mode 100644
index 0000000..fe6d51a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/pci_support.h
@@ -0,0 +1,99 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PCI_SUPPORT_H__
+#define __PCI_SUPPORT_H__
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(LINUX)
+#include <linux/pci.h>
+#define TO_PCI_COOKIE(dev) to_pci_dev((struct device *)(dev))
+#else
+#define TO_PCI_COOKIE(dev) (dev)
+#endif
+
+typedef enum _HOST_PCI_INIT_FLAGS_
+{
+	HOST_PCI_INIT_FLAG_BUS_MASTER	= 0x00000001,
+	HOST_PCI_INIT_FLAG_MSI		= 0x00000002,
+	HOST_PCI_INIT_FLAG_FORCE_I32 	= 0x7fffffff
+} HOST_PCI_INIT_FLAGS;
+
+struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_;
+typedef struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_ *PVRSRV_PCI_DEV_HANDLE;
+
+PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags);
+PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags);
+PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
+PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ);
+IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength);
+PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength);
+PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
+PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
+PVRSRV_ERROR OSPCIGetVendorDeviceIDs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT16 *pui16VendorID, IMG_UINT16 *pui16DeviceID);
+
+#if defined(CONFIG_MTRR)
+PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+#else
+static inline PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+	PVR_UNREFERENCED_PARAMETER(hPVRPCI);
+	PVR_UNREFERENCED_PARAMETER(ui32Index);
+	return PVRSRV_OK;
+}
+
+static inline void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+	PVR_UNREFERENCED_PARAMETER(hPVRPCI);
+	PVR_UNREFERENCED_PARAMETER(ui32Index);
+}
+#endif
+
+#endif /* __PCI_SUPPORT_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/syscommon.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/syscommon.h
new file mode 100644
index 0000000..a2589d0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/syscommon.h
@@ -0,0 +1,129 @@
+/**************************************************************************/ /*!
+@File
+@Title          Common System APIs and structures
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides common system-specific declarations and
+                macros that are supported by all systems
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__SYSCOMMON_H__)
+#define __SYSCOMMON_H__
+
+#include "img_types.h"
+#include "pvr_notifier.h"
+#include "pvrsrv_device.h"
+#include "pvrsrv_error.h"
+
+/*************************************************************************/ /*!
+@Description    Pointer to a Low-level Interrupt Service Routine (LISR).
+@Input  pvData  Private data provided to the LISR.
+@Return         True if interrupt handled, false otherwise.
+*/ /**************************************************************************/
+typedef IMG_BOOL (*PFN_LISR)(void *pvData);
+
+/**************************************************************************/ /*!
+@Function       SysDevInit
+@Description    System specific device initialisation function.
+@Input          pvOSDevice          pointer to the OS device reference
+@Input          ppsDevConfig        returned device configuration info
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig);
+
+/**************************************************************************/ /*!
+@Function       SysDevDeInit
+@Description    System specific device deinitialisation function.
+@Input          psDevConfig        device configuration info of the device to be
+                                   deinitialised
+@Return         None.
+*/ /***************************************************************************/
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/**************************************************************************/ /*!
+@Function       SysDebugInfo
+@Description    Dump system specific device debug information.
+@Input          psDevConfig         pointer to device configuration info
+@Input          pfnDumpDebugPrintf  the 'printf' function to be called to
+                                    display the debug info
+@Input          pvDumpDebugFile     optional file identifier to be passed to
+                                    the 'printf' function if required
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile);
+
+/**************************************************************************/ /*!
+@Function       SysInstallDeviceLISR
+@Description    Installs the system Low-level Interrupt Service Routine (LISR)
+                which handles low-level processing of interrupts from the device
+                (GPU).
+                The LISR will be invoked when the device raises an interrupt. An
+                LISR may not be descheduled, so code which needs to do so should
+                be placed in an MISR.
+                The installed LISR will schedule any MISRs once it has completed
+                its interrupt processing, by calling OSScheduleMISR().
+@Input          hSysData      pointer to the system data of the device
+@Input          ui32IRQ       the IRQ on which the LISR is to be installed
+@Input          pszName       name of the module installing the LISR
+@Input          pfnLISR       pointer to the function to be installed as the
+                              LISR
+@Input          pvData        private data provided to the LISR
+@Output         phLISRData    handle to the installed LISR (to be used for a
+                              subsequent uninstall)
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+								  IMG_UINT32 ui32IRQ,
+								  const IMG_CHAR *pszName,
+								  PFN_LISR pfnLISR,
+								  void *pvData,
+								  IMG_HANDLE *phLISRData);
+
+/**************************************************************************/ /*!
+@Function       SysUninstallDeviceLISR
+@Description    Uninstalls the system Low-level Interrupt Service Routine (LISR)
+                which handles low-level processing of interrupts from the device
+                (GPU).
+@Input          hLISRData     handle of the LISR to be uninstalled
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData);
+
+#endif /* !defined(__SYSCOMMON_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/sysvalidation.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/sysvalidation.h
new file mode 100644
index 0000000..ae46ee5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/sysvalidation.h
@@ -0,0 +1,63 @@
+/*************************************************************************/ /*!
+@File
+@Title          Validation System APIs and structures
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides system-specific declarations and macros
+                needed for hardware validation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__SYSVALIDATION_H__)
+#define __SYSVALIDATION_H__
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "img_types.h"
+#include "rgxdefs_km.h"
+#include "virt_validation_defs.h"
+
+void SysSetOSidRegisters(IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+                         IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]);
+void SysPrintAndResetFaultStatusRegister(void);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR)
+void SysSetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState);
+void SysSetTrustedDeviceAceEnabled(void);
+#endif
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+
+#endif /* !defined(__SYSVALIDATION_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_impl.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_impl.h
new file mode 100644
index 0000000..b02b449
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_impl.h
@@ -0,0 +1,283 @@
+/*************************************************************************/ /*!
+@File           vmm_impl.h
+@Title          Common VM manager API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides common VM manager definitions that need to
+                be shared by system virtualization layer itself and modules that
+                implement the actual VM manager types.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VMM_IMPL_H_
+#define _VMM_IMPL_H_
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/*
+ 	 Virtual machine manager para-virtualization (PVZ) connection:
+		- Type is implemented by host and guest drivers
+			- Assumes synchronous function call semantics
+			- Unidirectional semantics
+				- For Host  (vmm -> host)
+				- For Guest (guest -> vmm)
+			- Parameters can be IN/OUT/INOUT
+
+		- Host pvz entries are pre-implemented by IMG
+			- For host implementation, see vmm_pvz_server.c
+			- Called by host side hypercall handler or VMM
+
+		- Guest pvz entries are supplied by 3rd-party
+			- These are specific to hypervisor (VMM) type
+			- These implement the actual hypercalls mechanism
+
+	 Para-virtualization call runtime sequence:
+		1 - Guest driver in guest VM calls PVZ function
+		1.1 - Guest PVZ connection calls
+		1.2 - Guest VM Manager type which
+		1.2.1 - Performs any pre-processing like parameter packing, etc.
+		1.2.2 - Issues hypercall (blocking synchronous call)
+
+		2 - VM Manager (hypervisor) receives hypercall
+		2.1 - Hypercall handler:
+		2.1.1 - Performs any pre-processing
+		2.1.2 - If call terminates in VM Manager: perform action and return from hypercall
+		2.1.3 - Otherwise forward to host driver (implementation specific call)
+
+		3 - Host driver receives call from VM Manager
+		3.1 - Host VM manager type:
+		3.1.1 - Performs any pre-processing like parameter unpacking, etc.
+		3.1.2 - Acquires host driver PVZ handler and calls the appropriate entry
+		3.2 - Host PVZ connection calls corresponding host system virtualisation layer
+		3.3 - Host driver system virtualisation layer:
+		3.3.1 - Perform action requested by guest driver
+		3.3.2 - Return to host VM Manager type
+		3.4 - Host VM Manager type:
+		3.4.1 - Prepare to return from hypercall
+		3.4.2 - Perform any post-processing like result packing, etc.
+		3.4.3 - Issue return from hypercall
+
+		4 - VM Manager (hypervisor)
+		4.1 - Perform any post-processing
+		4.2 - Return control to guest driver
+
+		5 - Guest driver in guest VM
+		5.1 - Perform any post-processing like parameter unpacking, etc.
+		5.2 - Continue execution in guest VM
+ */
+typedef struct _VMM_PVZ_CONNECTION_
+{
+	struct {
+		/*
+		   This pair must be implemented if the device configuration is
+		   not provided during guest build or if the device interrupt
+		   is dynamically mapped into the VM virtual interrupt line.
+		   If not implemented, return PVRSRV_ERROR_NOT_IMPLEMENTED.
+		 */
+		PVRSRV_ERROR (*pfnCreateDevConfig)(IMG_UINT32 ui32FuncID,
+										   IMG_UINT32 ui32DevID,
+										   IMG_UINT32 *pui32IRQ,
+										   IMG_UINT32 *pui32RegsSize,
+										   IMG_UINT64 *pui64RegsPBase);
+
+		PVRSRV_ERROR (*pfnDestroyDevConfig)(IMG_UINT32 ui32FuncID,
+											IMG_UINT32 ui32DevID);
+
+		/*
+		   This pair must be implemented if the host is responsible for
+		   allocating the physical heaps on behalf of the guest; these
+		   physical heaps Addr/Size are allocated in the host domain
+		   and are communicated to the guest so must be re-expressed
+		   relative to the guest VM IPA space. The guest assumes said
+		   memory is not managed by the underlying GuestOS kernel.
+   		   If not implemented, return PVRSRV_ERROR_NOT_IMPLEMENTED.
+		 */
+		PVRSRV_ERROR (*pfnCreateDevPhysHeaps)(IMG_UINT32 ui32FuncID,
+											  IMG_UINT32 ui32DevID,
+											  IMG_UINT32 *peType,
+											  IMG_UINT64 *pui64FwSize,
+											  IMG_UINT64 *pui64FwPAddr,
+											  IMG_UINT64 *pui64GpuSize,
+											  IMG_UINT64 *pui64GpuPAddr);
+
+		PVRSRV_ERROR (*pfnDestroyDevPhysHeaps)(IMG_UINT32 ui32FuncID,
+											   IMG_UINT32 ui32DevID);
+
+		/*
+		   This pair must be implemented if the guest is responsible
+		   for allocating the physical heap that backs its firmware
+		   allocations, this is the default configuration. The physical
+		   heap is allocated within the guest VM IPA space and this
+		   IPA Addr/Size must be re-expressed as PA space Addr/Size
+		   by the VM manager before forwarding request to host.
+   		   If not implemented, return PVRSRV_ERROR_NOT_IMPLEMENTED.
+		 */
+		PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32FuncID,
+										  IMG_UINT32 ui32DevID,
+										  IMG_UINT64 ui64Size,
+										  IMG_UINT64 ui64PAddr);
+
+		PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32FuncID,
+											IMG_UINT32 ui32DevID);
+	} sHostFuncTab;
+
+	struct {
+		/*
+			Corresponding server side entries to handle guest PVZ calls
+			NOTE:
+				 - Pvz function ui32OSID parameter
+				 	 - OSID determination is responsibility of VM manager
+				 	 - Actual OSID value must be supplied by VM manager
+					 	- This can be done either in client/VMM/host side
+					 - Must be done before host pvz function(s) are called
+				 	 - Host pvz function assumes valid OSID
+		 */
+		PVRSRV_ERROR (*pfnCreateDevConfig)(IMG_UINT32 ui32OSID,
+										   IMG_UINT32 ui32FuncID,
+										   IMG_UINT32 ui32DevID,
+										   IMG_UINT32 *pui32IRQ,
+										   IMG_UINT32 *pui32RegsSize,
+										   IMG_UINT64 *pui64RegsPBase);
+
+		PVRSRV_ERROR (*pfnDestroyDevConfig)(IMG_UINT32 ui32OSID,
+											IMG_UINT32 ui32FuncID,
+											IMG_UINT32 ui32DevID);
+
+		PVRSRV_ERROR (*pfnCreateDevPhysHeaps)(IMG_UINT32 ui32OSID,
+											  IMG_UINT32 ui32FuncID,
+											  IMG_UINT32 ui32DevID,
+											  IMG_UINT32 *peType,
+											  IMG_UINT64 *pui64FwSize,
+											  IMG_UINT64 *pui64FwPAddr,
+											  IMG_UINT64 *pui64GpuSize,
+											  IMG_UINT64 *pui64GpuPAddr);
+
+		PVRSRV_ERROR (*pfnDestroyDevPhysHeaps)(IMG_UINT32 ui32OSID,
+											   IMG_UINT32 ui32FuncID,
+											   IMG_UINT32 ui32DevID);
+
+		PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32OSID,
+										  IMG_UINT32 ui32FuncID,
+										  IMG_UINT32 ui32DevID,
+										  IMG_UINT64 ui64Size,
+										  IMG_UINT64 ui64PAddr);
+
+		PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32OSID,
+											IMG_UINT32 ui32FuncID,
+											IMG_UINT32 ui32DevID);
+	} sGuestFuncTab;
+
+	struct {
+		/*
+		   This configuration interface specifies which driver host/guest is
+		   responsible for allocating the physical memory backing the guest
+		   driver(s) physical heap. Both the host and guest(s) must agree to
+		   use the same policy. It must be implemented and should return
+		   PVRSRV_OK.
+		 */
+		PVRSRV_ERROR (*pfnGetDevPhysHeapOrigin)(PVRSRV_DEVICE_CONFIG *psDevConfig,
+												PVRSRV_DEVICE_PHYS_HEAP eHeap,
+												PVRSRV_DEVICE_PHYS_HEAP_ORIGIN *peOrigin);
+
+		/*
+			If the host is responsible for allocating the backing memory for
+			the physical heap, the function should return heap Addr/Size value
+			pairs obtained in sHostFuncTab->pfnCreateDevPhysHeaps().
+
+			If the guest is responsible for allocating the backing memory for
+			the physical heap, the function should return the proper values to
+			direct the guest driver on which allocation method to use. This is
+			communicated by using the returned pui64Addr/pui64Size value pairs
+			as show below:
+
+				For UMA platforms:
+					- For GPU physical heap
+						- 0/0							=> UMA
+						- 0/0x[hex-value]				=> DMA
+						- 0x[hex-value]/0x[hex-value]	=> UMA/carve-out
+
+					- For FW physical heap
+						- 0/0x[hex-value]				=> DMA
+						- 0x[hex-value]/0x[hex-value]	=> UMA/carve-out
+
+				For LMA platforms:
+					- For GPU physical heap
+						- 0x/0x[hex-value]				=> LMA
+
+					- For FW physical heap
+						- 0x/0x[hex-value]				=> LMA
+		*/
+		PVRSRV_ERROR (*pfnGetDevPhysHeapAddrSize)(PVRSRV_DEVICE_CONFIG *psDevConfig,
+												  PVRSRV_DEVICE_PHYS_HEAP eHeap,
+												  IMG_UINT64 *pui64Size,
+												  IMG_UINT64 *pui64Addr);
+	} sConfigFuncTab;
+
+	struct {
+		/*
+		   This is used by the VM manager to report pertinent runtime guest VM
+		   information to the host; these events may in turn be forwarded to
+		   the firmware
+		 */
+		PVRSRV_ERROR (*pfnOnVmOnline)(IMG_UINT32 ui32OSID, IMG_UINT32 ui32Priority);
+
+		PVRSRV_ERROR (*pfnOnVmOffline)(IMG_UINT32 ui32OSID);
+
+		PVRSRV_ERROR (*pfnVMMConfigure)(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue);
+
+	} sVmmFuncTab;
+} VMM_PVZ_CONNECTION;
+
+/*!
+******************************************************************************
+ @Function			VMMCreatePvzConnection() and VMMDestroyPvzConnection()
+
+ @Description 		Both the guest and VM manager call this in order to obtain
+ 					a PVZ connection to the VM and host respectively; that is,
+					guest calls it to obtain connection to VM, VM calls it to
+					obtain connection to host.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection);
+void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection);
+
+#endif /* _VMM_IMPL_H_ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_pvz_client.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_pvz_client.c
new file mode 100644
index 0000000..502a67d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_pvz_client.c
@@ -0,0 +1,336 @@
+/*************************************************************************/ /*!
+@File			vmm_pvz_client.c
+@Title          VM manager client para-virtualization
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header provides VMM client para-virtualization APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#include "vmm_impl.h"
+#include "vz_vmm_pvz.h"
+#include "vz_physheap.h"
+#include "vmm_pvz_client.h"
+
+
+static inline void
+PvzClientLockAcquire(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	OSLockAcquire(psPVRSRVData->hPvzConnectionLock);
+}
+
+static inline void
+PvzClientLockRelease(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	OSLockRelease(psPVRSRVData->hPvzConnectionLock);
+}
+
+/*
+ * ===========================================================
+ *  The following client para-virtualization (pvz) functions
+ *  are exclusively called by guests to initiate a pvz call
+ *  to the host via hypervisor (guest -> vm manager -> host)
+ * ===========================================================
+ */
+
+PVRSRV_ERROR
+PvzClientCreateDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+						 IMG_UINT32 ui32DevID)
+{
+	IMG_UINT32 ui32IRQ;
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32RegsSize;
+	IMG_UINT64 ui64RegsCpuPBase;
+	VMM_PVZ_CONNECTION *psVmmPvz;
+	IMG_UINT32 uiFuncID = PVZ_BRIDGE_CREATEDEVICECONFIG;
+
+	psVmmPvz = SysVzPvzConnectionAcquire();
+	PVR_ASSERT(psVmmPvz);
+
+	PvzClientLockAcquire();
+
+	PVR_ASSERT(psVmmPvz->sHostFuncTab.pfnCreateDevConfig);
+
+	eError = psVmmPvz->sHostFuncTab.pfnCreateDevConfig(uiFuncID,
+													   ui32DevID,
+													   &ui32IRQ,
+													   &ui32RegsSize,
+													   &ui64RegsCpuPBase);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	/* Device VM system configuration MMIO/IRQ values */
+	if (ui64RegsCpuPBase)
+	{
+		psDevConfig->sRegsCpuPBase.uiAddr = ui64RegsCpuPBase;
+	}
+
+	if (ui32RegsSize)
+	{
+		psDevConfig->ui32RegsSize = ui32RegsSize;
+	}
+
+	if (ui32IRQ)
+	{
+		psDevConfig->ui32IRQ = ui32IRQ;
+	}
+
+	PVR_ASSERT(psDevConfig->sRegsCpuPBase.uiAddr);
+	PVR_ASSERT(psDevConfig->ui32RegsSize);
+	PVR_ASSERT(psDevConfig->ui32IRQ);
+
+e0:
+	PvzClientLockRelease();
+	SysVzPvzConnectionRelease(psVmmPvz);
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzClientDestroyDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+						  IMG_UINT32 ui32DevID)
+{
+	PVRSRV_ERROR eError;
+	VMM_PVZ_CONNECTION *psVmmPvz;
+	IMG_UINT32 uiFuncID = PVZ_BRIDGE_DESTROYDEVICECONFIG;
+
+	PVR_UNREFERENCED_PARAMETER(psDevConfig);
+
+	psVmmPvz = SysVzPvzConnectionAcquire();
+	PVR_ASSERT(psVmmPvz);
+
+	PvzClientLockAcquire();
+
+	PVR_ASSERT(psVmmPvz->sHostFuncTab.pfnDestroyDevConfig);
+
+	eError = psVmmPvz->sHostFuncTab.pfnDestroyDevConfig(uiFuncID,
+														ui32DevID);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+e0:
+	PvzClientLockRelease();
+	SysVzPvzConnectionRelease(psVmmPvz);
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzClientCreateDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig,
+							IMG_UINT32 ui32DevID)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32HeapType;
+	PHYS_HEAP_TYPE eHeapType;
+	IMG_UINT64 ui64FwPhysHeapSize;
+	IMG_UINT64 ui64FwPhysHeapAddr;
+	IMG_UINT64 ui64GpuPhysHeapSize;
+	IMG_UINT64 ui64GpuPhysHeapAddr;
+	VMM_PVZ_CONNECTION *psVmmPvz;
+	PVRSRV_DEVICE_PHYS_HEAP ePhysHeap;
+	IMG_UINT32 uiFuncID = PVZ_BRIDGE_CREATEDEVICEPHYSHEAPS;
+
+	psVmmPvz = SysVzPvzConnectionAcquire();
+	PVR_ASSERT(psVmmPvz);
+
+	PvzClientLockAcquire();
+
+	PVR_ASSERT(psVmmPvz->sHostFuncTab.pfnCreateDevPhysHeaps);
+
+	eError = psVmmPvz->sHostFuncTab.pfnCreateDevPhysHeaps(uiFuncID,
+														  ui32DevID,
+														  &ui32HeapType,
+														  &ui64FwPhysHeapSize,
+														  &ui64FwPhysHeapAddr,
+														  &ui64GpuPhysHeapSize,
+														  &ui64GpuPhysHeapAddr);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	eHeapType = (PHYS_HEAP_TYPE) ui32HeapType;
+	for (ePhysHeap = 0; ePhysHeap < PVRSRV_DEVICE_PHYS_HEAP_LAST; ePhysHeap++)
+	{
+		IMG_UINT64 ui64PhysHeapSize;
+		IMG_DEV_PHYADDR sPhysHeapAddr;
+
+		switch (ePhysHeap)
+		{
+			case PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL:
+				sPhysHeapAddr.uiAddr = ui64GpuPhysHeapAddr;
+				ui64PhysHeapSize = ui64GpuPhysHeapSize;
+				break;
+
+			case PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL:
+				sPhysHeapAddr.uiAddr = ui64FwPhysHeapAddr;
+				ui64PhysHeapSize = ui64FwPhysHeapSize;
+				break;
+
+			default:
+				ui64PhysHeapSize = (IMG_UINT64)0;
+				break;
+		}
+
+		if (ui64PhysHeapSize)
+		{
+			eError = SysVzSetPhysHeapAddrSize(psDevConfig,
+											  ePhysHeap,
+											  eHeapType,
+											  sPhysHeapAddr,
+											  ui64PhysHeapSize);
+			PVR_ASSERT(eError == PVRSRV_OK);
+
+			eError = SysVzRegisterPhysHeap(psDevConfig, ePhysHeap);
+			PVR_ASSERT(eError == PVRSRV_OK);
+		}
+	}
+
+e0:
+	PvzClientLockRelease();
+	SysVzPvzConnectionRelease(psVmmPvz);
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzClientDestroyDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig,
+							 IMG_UINT32 ui32DevID)
+{
+	PVRSRV_ERROR eError;
+	VMM_PVZ_CONNECTION *psVmmPvz;
+	IMG_UINT32 uiFuncID = PVZ_BRIDGE_DESTROYDEVICEPHYSHEAPS;
+
+	PVR_UNREFERENCED_PARAMETER(psDevConfig);
+
+	psVmmPvz = SysVzPvzConnectionAcquire();
+	PVR_ASSERT(psVmmPvz);
+
+	PvzClientLockAcquire();
+
+	PVR_ASSERT(psVmmPvz->sHostFuncTab.pfnDestroyDevPhysHeaps);
+
+	eError = psVmmPvz->sHostFuncTab.pfnDestroyDevPhysHeaps(uiFuncID,
+														   ui32DevID);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+e0:
+	PvzClientLockRelease();
+	SysVzPvzConnectionRelease(psVmmPvz);
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzClientMapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+						IMG_UINT32 ui32DevID,
+						IMG_DEV_PHYADDR sDevPAddr,
+						IMG_UINT64 ui64DevPSize)
+{
+	PVRSRV_ERROR eError;
+	VMM_PVZ_CONNECTION *psVmmPvz;
+	IMG_UINT32 uiFuncID = PVZ_BRIDGE_MAPDEVICEPHYSHEAP;
+
+	psVmmPvz = SysVzPvzConnectionAcquire();
+	PVR_ASSERT(psVmmPvz);
+
+	PvzClientLockAcquire();
+
+	PVR_ASSERT(psVmmPvz->sHostFuncTab.pfnMapDevPhysHeap);
+
+	eError = psVmmPvz->sHostFuncTab.pfnMapDevPhysHeap(uiFuncID,
+													  ui32DevID,
+													  ui64DevPSize,
+													  sDevPAddr.uiAddr);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+e0:
+	PvzClientLockRelease();
+	SysVzPvzConnectionRelease(psVmmPvz);
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzClientUnmapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+						  IMG_UINT32 ui32DevID)
+{
+	PVRSRV_ERROR eError;
+	VMM_PVZ_CONNECTION *psVmmPvz;
+	IMG_UINT32 uiFuncID = PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP;
+
+	psVmmPvz = SysVzPvzConnectionAcquire();
+	PVR_ASSERT(psVmmPvz);
+
+	PvzClientLockAcquire();
+
+	PVR_ASSERT(psVmmPvz->sHostFuncTab.pfnUnmapDevPhysHeap);
+
+	eError = psVmmPvz->sHostFuncTab.pfnUnmapDevPhysHeap(uiFuncID,
+														ui32DevID);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+e0:
+	PvzClientLockRelease();
+	SysVzPvzConnectionRelease(psVmmPvz);
+
+	return eError;
+}
+
+/******************************************************************************
+ End of file (vmm_pvz_client.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_pvz_client.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_pvz_client.h
new file mode 100644
index 0000000..c91d28f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_pvz_client.h
@@ -0,0 +1,143 @@
+/*************************************************************************/ /*!
+@File           vmm_pvz_client.h
+@Title          Guest VM manager client para-virtualization routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header provides guest VMM client para-virtualization APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VMM_PVZ_CLIENT_H_
+#define _VMM_PVZ_CLIENT_H_
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "vmm_pvz_common.h"
+
+
+/*!
+******************************************************************************
+ @Function			PvzClientCreateDevConfig
+
+ @Description 		The guest front-end to initiate a pfnCreateDevConfig PVZ
+					call to the host.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzClientCreateDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+						 IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function			PvzClientDestroyDevConfig
+
+ @Description 		The guest front-end to initiate a pfnDestroyDevConfig PVZ
+					call to the host.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzClientDestroyDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+						  IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function			PvzClientCreateDevPhysHeaps
+
+ @Description 		The guest front-end to initiate a pfnCreateDevPhysHeaps PVZ
+					call to the host.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzClientCreateDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig,
+							IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function			PvzClientDestroyDevPhysHeaps
+
+ @Description 		The guest front-end to initiate a pfnDestroyDevPhysHeaps PVZ
+					call to the host.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzClientDestroyDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig,
+							 IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function			PvzClientMapDevPhysHeap
+
+ @Description 		The guest front-end to initiate a pfnMapDevPhysHeap PVZ
+					call to the host.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzClientMapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+						IMG_UINT32 ui32DevID,
+						IMG_DEV_PHYADDR sDevPAddr,
+						IMG_UINT64 ui64DevPSize);
+
+/*!
+******************************************************************************
+ @Function			PvzClientUnmapDevPhysHeap
+
+ @Description 		The guest front-end to initiate a pfnUnmapDevPhysHeap PVZ
+					call to the host.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzClientUnmapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+						  IMG_UINT32 ui32DevID);
+
+#endif /* _VMM_PVZ_CLIENT_H_ */
+
+/*****************************************************************************
+ End of file (vmm_pvz_client.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_pvz_common.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_pvz_common.h
new file mode 100644
index 0000000..088d9be
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_pvz_common.h
@@ -0,0 +1,69 @@
+/*************************************************************************/ /*!
+@File           vmm_pvz_common.h
+@Title          Common VM manager function IDs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header provides VM manager para-virtualization function IDs and
+                definitions of their payload structures, if appropriate.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef VMM_PVZ_COMMON_H
+#define VMM_PVZ_COMMON_H
+
+#include "img_types.h"
+
+#define PVZ_BRIDGE_DEFAULT					0UL
+#define PVZ_BRIDGE_CREATEDEVICECONFIG		(PVZ_BRIDGE_DEFAULT + 1)
+#define PVZ_BRIDGE_DESTROYDEVICECONFIG		(PVZ_BRIDGE_CREATEDEVICECONFIG  + 1)
+#define PVZ_BRIDGE_CREATEDEVICEPHYSHEAPS	(PVZ_BRIDGE_DESTROYDEVICECONFIG + 1)
+#define PVZ_BRIDGE_DESTROYDEVICEPHYSHEAPS	(PVZ_BRIDGE_CREATEDEVICEPHYSHEAPS  + 1)
+#define PVZ_BRIDGE_MAPDEVICEPHYSHEAP		(PVZ_BRIDGE_DESTROYDEVICEPHYSHEAPS + 1)
+#define PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP		(PVZ_BRIDGE_MAPDEVICEPHYSHEAP   + 1)
+#define PVZ_BRIDGE_LAST						(PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP + 1)
+
+typedef struct _PVZ_BRIDGEPARA_MAPDEVICEPHYSHEAP
+{
+	IMG_UINT64	ui64MemBase;
+	IMG_UINT32	ui32OSID;
+}PVZ_BRIDGEPARA_MAPDEVICEPHYSHEAP;
+
+#endif /* VMM_PVZ_COMMON_H */
+
+/*****************************************************************************
+ End of file (vmm_pvz_common.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_pvz_server.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_pvz_server.c
new file mode 100644
index 0000000..3f10681
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_pvz_server.c
@@ -0,0 +1,325 @@
+/*************************************************************************/ /*!
+@File			vmm_pvz_server.c
+@Title          VM manager server para-virtualization handlers
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header provides VMM server para-virtz handler APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#include "vz_vm.h"
+#include "vmm_impl.h"
+#include "vz_vmm_pvz.h"
+#include "vz_support.h"
+#include "vmm_pvz_server.h"
+#include "vz_physheap.h"
+
+
+static inline void
+PvzServerLockAcquire(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	OSLockAcquire(psPVRSRVData->hPvzConnectionLock);
+}
+
+static inline void
+PvzServerLockRelease(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	OSLockRelease(psPVRSRVData->hPvzConnectionLock);
+}
+
+
+/*
+ * ===========================================================
+ *  The following server para-virtualization (pvz) functions
+ *  are exclusively called by the VM manager (hypervisor) on
+ *  behalf of guests to complete guest pvz calls
+ *  (guest -> vm manager -> host)
+ * ===========================================================
+ */
+
+PVRSRV_ERROR
+PvzServerCreateDevConfig(IMG_UINT32 ui32OSID,
+						 IMG_UINT32 ui32FuncID,
+						 IMG_UINT32 ui32DevID,
+						 IMG_UINT32 *pui32IRQ,
+						 IMG_UINT32 *pui32RegsSize,
+						 IMG_UINT64 *pui64RegsCpuPBase)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(ui32FuncID == PVZ_BRIDGE_CREATEDEVICECONFIG);
+
+	eError = SysVzIsVmOnline(ui32OSID);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	PvzServerLockAcquire();
+
+	eError = SysVzPvzCreateDevConfig(ui32OSID,
+									 ui32DevID,
+									 pui32IRQ,
+									 pui32RegsSize,
+									 pui64RegsCpuPBase);
+
+	PvzServerLockRelease();
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzServerDestroyDevConfig(IMG_UINT32 ui32OSID,
+						  IMG_UINT32 ui32FuncID,
+						  IMG_UINT32 ui32DevID)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(ui32FuncID == PVZ_BRIDGE_DESTROYDEVICECONFIG);
+
+	eError = SysVzIsVmOnline(ui32OSID);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	PvzServerLockAcquire();
+
+	eError = SysVzPvzDestroyDevConfig(ui32OSID, ui32DevID);
+
+	PvzServerLockRelease();
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzServerCreateDevPhysHeaps(IMG_UINT32 ui32OSID,
+							IMG_UINT32 ui32FuncID,
+							IMG_UINT32  ui32DevID,
+							IMG_UINT32 *peHeapType,
+							IMG_UINT64 *pui64FwSize,
+							IMG_UINT64 *pui64FwAddr,
+							IMG_UINT64 *pui64GpuSize,
+							IMG_UINT64 *pui64GpuAddr)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(ui32FuncID == PVZ_BRIDGE_CREATEDEVICEPHYSHEAPS);
+
+	eError = SysVzIsVmOnline(ui32OSID);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	PvzServerLockAcquire();
+
+	eError = SysVzPvzCreateDevPhysHeaps(ui32OSID,
+										ui32DevID,
+										peHeapType,
+										pui64FwSize,
+										pui64FwAddr,
+										pui64GpuSize,
+										pui64GpuAddr);
+
+	PvzServerLockRelease();
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzServerDestroyDevPhysHeaps(IMG_UINT32 ui32OSID,
+							 IMG_UINT32 ui32FuncID,
+							 IMG_UINT32 ui32DevID)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(ui32FuncID == PVZ_BRIDGE_DESTROYDEVICEPHYSHEAPS);
+
+	eError = SysVzIsVmOnline(ui32OSID);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	PvzServerLockAcquire();
+
+	eError = SysVzPvzDestroyDevPhysHeaps(ui32OSID, ui32DevID);
+
+	PvzServerLockRelease();
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID,
+						IMG_UINT32 ui32FuncID,
+						IMG_UINT32 ui32DevID,
+						IMG_UINT64 ui64Size,
+						IMG_UINT64 ui64PAddr)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	VMM_PVZ_CONNECTION *psVmmPvz = SysVzPvzConnectionAcquire();
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eOrigin = PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_LAST;
+
+	PVR_ASSERT(ui32FuncID == PVZ_BRIDGE_MAPDEVICEPHYSHEAP);
+
+	eError = SysVzIsVmOnline(ui32OSID);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	PvzServerLockAcquire();
+
+	eError = psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapOrigin(psPVRSRVData->psDeviceNodeList->psDevConfig,
+															  PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL,
+															  &eOrigin);
+
+	if (eOrigin != PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST)
+	{
+		/* Reject hypercall if called with an incompatible PVZ physheap origin
+		   configuration specified on host; here the guest has been configured
+		   with guest-origin but host has not, both must use the same origin */
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Host PVZ config: Does not match with Guest PVZ config\n"
+				"=>: pfnGetDevPhysHeapOrigin() is not identical with guest\n"
+				"=>: host and guest(s) must use the same FW physheap origin",
+				__func__));
+		eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+		goto e0;
+	}
+
+	eError = SysVzPvzRegisterFwPhysHeap(ui32OSID,
+										ui32DevID,
+										ui64Size,
+										ui64PAddr);
+
+e0:
+	PvzServerLockRelease();
+	SysVzPvzConnectionRelease(psVmmPvz);
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID,
+						  IMG_UINT32 ui32FuncID,
+						  IMG_UINT32 ui32DevID)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(ui32FuncID == PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP);
+
+	eError = SysVzIsVmOnline(ui32OSID);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	PvzServerLockAcquire();
+
+	eError = SysVzPvzUnregisterFwPhysHeap(ui32OSID, ui32DevID);
+
+	PvzServerLockRelease();
+
+	return eError;
+}
+
+
+/*
+ * ============================================================
+ *  The following server para-virtualization (pvz) functions
+ *  are exclusively called by the VM manager (hypervisor) to
+ *  pass side band information to the host (vm manager -> host)
+ * ============================================================
+ */
+
+PVRSRV_ERROR
+PvzServerOnVmOnline(IMG_UINT32 ui32OSID, IMG_UINT32 ui32Priority)
+{
+	PVRSRV_ERROR eError;
+
+	PvzServerLockAcquire();
+
+	eError = SysVzPvzOnVmOnline(ui32OSID, ui32Priority);
+
+	PvzServerLockRelease();
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzServerOnVmOffline(IMG_UINT32 ui32OSID)
+{
+	PVRSRV_ERROR eError;
+
+	PvzServerLockAcquire();
+
+	eError = SysVzPvzOnVmOffline(ui32OSID);
+
+	PvzServerLockRelease();
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue)
+{
+	PVRSRV_ERROR eError;
+
+	PvzServerLockAcquire();
+
+	eError = SysVzPvzVMMConfigure(eVMMParamType, ui32ParamValue);
+
+	PvzServerLockRelease();
+
+	return eError;
+
+}
+
+/******************************************************************************
+ End of file (vmm_pvz_server.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_pvz_server.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_pvz_server.h
new file mode 100644
index 0000000..7b4fc06
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_pvz_server.h
@@ -0,0 +1,205 @@
+/*************************************************************************/ /*!
+@File           vmm_pvz_server.h
+@Title          VM manager para-virtualization interface helper routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header provides API(s) available to VM manager, this must be
+                called to close the loop during guest para-virtualization calls.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VMM_PVZ_SERVER_H_
+#define _VMM_PVZ_SERVER_H_
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "vmm_pvz_common.h"
+
+
+/*!
+******************************************************************************
+ @Function			PvzServerCreateDevConfig
+
+ @Description 		The VM manager calls this in response to guest PVZ interface
+					call pfnCreateDevConfig.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzServerCreateDevConfig(IMG_UINT32 ui32OSID,
+						 IMG_UINT32 ui32FuncID,
+						 IMG_UINT32 ui32DevID,
+						 IMG_UINT32 *pui32IRQ,
+						 IMG_UINT32 *pui32RegsSize,
+						 IMG_UINT64 *pui64RegsPAddr);
+
+/*!
+******************************************************************************
+ @Function			PvzServerDestroyDevConfig
+
+ @Description 		The VM manager calls this in response to guest PVZ interface
+					call pfnDestroyDevConfig.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzServerDestroyDevConfig(IMG_UINT32 ui32OSID,
+						  IMG_UINT32 ui32FuncID,
+						  IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function			PvzServerCreateDevPhysHeaps
+
+ @Description 		The VM manager calls this in response to guest PVZ interface
+					call pfnCreateDevPhysHeaps.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzServerCreateDevPhysHeaps(IMG_UINT32 ui32OSID,
+							IMG_UINT32 ui32FuncID,
+							IMG_UINT32 ui32DevID,
+							IMG_UINT32 *pePHeapType,
+							IMG_UINT64 *pui64FwSize,
+							IMG_UINT64 *pui64FwAddr,
+							IMG_UINT64 *pui64GpuSize,
+							IMG_UINT64 *pui64GpuAddr);
+
+/*!
+******************************************************************************
+ @Function			PvzServerDestroyDevPhysHeaps
+
+ @Description 		The VM manager calls this in response to guest PVZ interface
+					call pfnDestroyDevPhysHeaps.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzServerDestroyDevPhysHeaps(IMG_UINT32 ui32OSID,
+							 IMG_UINT32 ui32FuncID,
+							 IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function			PvzServerMapDevPhysHeap
+
+ @Description 		The VM manager calls this in response to guest PVZ interface
+					call pfnMapDevPhysHeap.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID,
+						IMG_UINT32 ui32FuncID,
+						IMG_UINT32 ui32DevID,
+						IMG_UINT64 ui64Size,
+						IMG_UINT64 ui64PAddr);
+
+/*!
+******************************************************************************
+ @Function			PvzServerUnmapDevPhysHeap
+
+ @Description 		The VM manager calls this in response to guest PVZ interface
+					call pfnUnmapDevPhysHeap.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID,
+						  IMG_UINT32 ui32FuncID,
+						  IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function			PvzServerOnVmOnline
+
+ @Description 		The VM manager calls this when guest VM machine comes online.
+                    The host driver might initialize the FW if it has not done so
+					already.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzServerOnVmOnline(IMG_UINT32 ui32OSID, IMG_UINT32 ui32Priority);
+
+/*!
+******************************************************************************
+ @Function			PvzServerOnVmOffline
+
+ @Description 		The VM manager calls this when a guest VM machine is about to
+                    go offline. The VM manager might have unmapped the GPU kick
+					register for such VM but not the GPU memory until the call returns.
+					Once the function returns, the FW does not hold any reference
+					for such VM and no workloads from it are running in the GPU and
+					it is safe to remove the memory for such VM.
+
+ @Return			PVRSRV_OK on success. PVRSRV_ERROR_TIMEOUT if
+                    for some reason the FW is taking too long to
+					clean-up the resources of the OSID. Otherwise,
+					a PVRSRV_ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzServerOnVmOffline(IMG_UINT32 ui32OSID);
+
+/*!
+******************************************************************************
+ @Function			PvzServerVMMConfigure
+
+ @Description 		The VM manager calls this to configure several parameters
+                    like HCS or isolation.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType,
+                      IMG_UINT32 ui32ParamValue);
+
+#endif /* _VMM_PVZ_SERVER_H_ */
+
+/*****************************************************************************
+ End of file (vmm_pvz_server.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_type_stub.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_type_stub.c
new file mode 100644
index 0000000..b8e5304
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vmm_type_stub.c
@@ -0,0 +1,227 @@
+/*************************************************************************/ /*!
+@File			vmm_type_stub.c
+@Title          Stub VM manager type
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Sample stub (no-operation) VM manager implementation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "rgxheapconfig.h"
+
+#include "vmm_impl.h"
+#include "vmm_pvz_server.h"
+
+static PVRSRV_ERROR
+StubVMMCreateDevConfig(IMG_UINT32 ui32FuncID,
+					   IMG_UINT32 ui32DevID,
+					   IMG_UINT32 *pui32IRQ,
+					   IMG_UINT32 *pui32RegsSize,
+					   IMG_UINT64 *pui64RegsCpuPBase)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+	PVR_UNREFERENCED_PARAMETER(ui32DevID);
+	PVR_UNREFERENCED_PARAMETER(pui32IRQ);
+	PVR_UNREFERENCED_PARAMETER(pui32RegsSize);
+	PVR_UNREFERENCED_PARAMETER(pui64RegsCpuPBase);
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMDestroyDevConfig(IMG_UINT32 ui32FuncID,
+						IMG_UINT32 ui32DevID)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+	PVR_UNREFERENCED_PARAMETER(ui32DevID);
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMCreateDevPhysHeaps(IMG_UINT32 ui32FuncID,
+						  IMG_UINT32 ui32DevID,
+						  IMG_UINT32 *peType,
+						  IMG_UINT64 *pui64FwPhysHeapSize,
+						  IMG_UINT64 *pui64FwPhysHeapAddr,
+						  IMG_UINT64 *pui64GpuPhysHeapSize,
+						  IMG_UINT64 *pui64GpuPhysHeapAddr)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+	PVR_UNREFERENCED_PARAMETER(ui32DevID);
+	PVR_UNREFERENCED_PARAMETER(peType);
+	PVR_UNREFERENCED_PARAMETER(pui64FwPhysHeapSize);
+	PVR_UNREFERENCED_PARAMETER(pui64FwPhysHeapAddr);
+	PVR_UNREFERENCED_PARAMETER(pui64GpuPhysHeapSize);
+	PVR_UNREFERENCED_PARAMETER(pui64GpuPhysHeapAddr);
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMDestroyDevPhysHeaps(IMG_UINT32 ui32FuncID,
+						   IMG_UINT32 ui32DevID)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+	PVR_UNREFERENCED_PARAMETER(ui32DevID);
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMMapDevPhysHeap(IMG_UINT32 ui32FuncID,
+					  IMG_UINT32 ui32DevID,
+					  IMG_UINT64 ui64Size,
+					  IMG_UINT64 ui64Addr)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+	PVR_UNREFERENCED_PARAMETER(ui32DevID);
+	PVR_UNREFERENCED_PARAMETER(ui64Size);
+	PVR_UNREFERENCED_PARAMETER(ui64Addr);
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMUnmapDevPhysHeap(IMG_UINT32 ui32FuncID,
+						IMG_UINT32 ui32DevID)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+	PVR_UNREFERENCED_PARAMETER(ui32DevID);
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMGetDevPhysHeapOrigin(PVRSRV_DEVICE_CONFIG *psDevConfig,
+							PVRSRV_DEVICE_PHYS_HEAP eHeapType,
+							PVRSRV_DEVICE_PHYS_HEAP_ORIGIN *peOrigin)
+{
+	*peOrigin = PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST;
+	PVR_UNREFERENCED_PARAMETER(psDevConfig);
+	PVR_UNREFERENCED_PARAMETER(eHeapType);
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+StubVMMGetDevPhysHeapAddrSize(PVRSRV_DEVICE_CONFIG *psDevConfig,
+							  PVRSRV_DEVICE_PHYS_HEAP eHeapType,
+							  IMG_UINT64 *pui64Size,
+							  IMG_UINT64 *pui64Addr)
+{
+	*pui64Size = 0;
+	*pui64Addr = 0;
+	PVR_UNREFERENCED_PARAMETER(psDevConfig);
+	PVR_UNREFERENCED_PARAMETER(eHeapType);
+	return PVRSRV_OK;
+}
+
+static VMM_PVZ_CONNECTION gsStubVmmPvz =
+{
+	.sHostFuncTab = {
+		/* pfnCreateDevConfig */
+		&StubVMMCreateDevConfig,
+
+		/* pfnDestroyDevConfig */
+		&StubVMMDestroyDevConfig,
+
+		/* pfnCreateDevPhysHeaps */
+		&StubVMMCreateDevPhysHeaps,
+
+		/* pfnDestroyDevPhysHeaps */
+		&StubVMMDestroyDevPhysHeaps,
+
+		/* pfnMapDevPhysHeap */
+		&StubVMMMapDevPhysHeap,
+
+		/* pfnUnmapDevPhysHeap */
+		&StubVMMUnmapDevPhysHeap
+	},
+
+	.sGuestFuncTab = {
+		/* pfnCreateDevConfig */
+		&PvzServerCreateDevConfig,
+
+		/* pfnDestroyDevConfig */
+		&PvzServerDestroyDevConfig,
+
+		/* pfnCreateDevPhysHeaps */
+		&PvzServerCreateDevPhysHeaps,
+
+		/* pfnDestroyDevPhysHeaps */
+		&PvzServerDestroyDevPhysHeaps,
+
+		/* pfnMapDevPhysHeap */
+		&PvzServerMapDevPhysHeap,
+
+		/* pfnUnmapDevPhysHeap */
+		&PvzServerUnmapDevPhysHeap
+	},
+
+	.sConfigFuncTab = {
+		/* pfnGetDevPhysHeapOrigin */
+		&StubVMMGetDevPhysHeapOrigin,
+
+		/* pfnGetDevPhysHeapAddrSize */
+		&StubVMMGetDevPhysHeapAddrSize
+	},
+
+	.sVmmFuncTab = {
+		/* pfnOnVmOnline */
+		&PvzServerOnVmOnline,
+
+		/* pfnOnVmOffline */
+		&PvzServerOnVmOffline,
+
+		/* pfnVMMConfigure */
+		&PvzServerVMMConfigure
+	}
+};
+
+PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection)
+{
+	PVR_LOGR_IF_FALSE((NULL != psPvzConnection), "VMMCreatePvzConnection", PVRSRV_ERROR_INVALID_PARAMS);
+	*psPvzConnection = &gsStubVmmPvz;
+	PVR_DPF((PVR_DBG_ERROR, "Using a stub VM manager type, no runtime VZ support"));
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection)
+{
+	PVR_LOG_IF_FALSE((NULL != psPvzConnection), "VMMDestroyPvzConnection");
+}
+
+/******************************************************************************
+ End of file (vmm_type_stub.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_physheap.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_physheap.h
new file mode 100644
index 0000000..eb7758e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_physheap.h
@@ -0,0 +1,267 @@
+/*************************************************************************/ /*!
+@File           vz_physheap.h
+@Title          System virtualization physheap support APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides physheaps virtualization-specific APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VZ_PHYSHEAP_H_
+#define _VZ_PHYSHEAP_H_
+
+#include "pvrsrv.h"
+
+typedef enum _PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_
+{
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST  = 0,
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST = 1,
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_LAST
+} PVRSRV_DEVICE_PHYS_HEAP_ORIGIN;
+
+/*!
+******************************************************************************
+ @Function			SysVzGetPhysHeapAddrSize
+
+ @Description 		Get the address and size value of the specified device heap
+
+ @Return			PHYS_HEAP_CONFIG * on success. Otherwise, NULL
+ ******************************************************************************/
+PVRSRV_ERROR SysVzGetPhysHeapAddrSize(PVRSRV_DEVICE_CONFIG *psDevConfig,
+									  PVRSRV_DEVICE_PHYS_HEAP eHeap,
+									  PHYS_HEAP_TYPE eType,
+									  IMG_DEV_PHYADDR *psAddr,
+									  IMG_UINT64 *pui64Size);
+
+/*!
+******************************************************************************
+ @Function			SysVzSetPhysHeapAddrSize
+
+ @Description 		Set physical heap configuration attributes
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR  SysVzSetPhysHeapAddrSize(PVRSRV_DEVICE_CONFIG *psDevConfig,
+									   PVRSRV_DEVICE_PHYS_HEAP eHeap,
+									   PHYS_HEAP_TYPE eType,
+									   IMG_DEV_PHYADDR sAddr,
+									   IMG_UINT64 ui64Size);
+
+/*!
+******************************************************************************
+ @Function			SysVzRegisterPhysHeap
+
+ @Description 		Registers heap with virtualization services
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzRegisterPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+								   PVRSRV_DEVICE_PHYS_HEAP eHeap);
+
+/*!
+******************************************************************************
+ @Function			SysVzDeregisterPhysHeap
+
+ @Description 		Deregister heap from virtualization services
+
+ @Return			void
+ ******************************************************************************/
+void SysVzDeregisterPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+							 PVRSRV_DEVICE_PHYS_HEAP eHeap);
+
+
+/*!
+******************************************************************************
+ @Function			SysVzGetPhysHeapConfig
+
+ @Description 		Looks-up device physical heap configuration
+
+ @Return			PHYS_HEAP_CONFIG * on success. Otherwise, NULL
+ ******************************************************************************/
+PHYS_HEAP_CONFIG *SysVzGetPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+										 PVRSRV_DEVICE_PHYS_HEAP eHeap);
+
+/*!
+******************************************************************************
+ @Function			SysVzGetPhysHeapOrigin
+
+ @Description 		Identify which driver is responsible for allocating the
+					device physical heap backing-memory
+
+ @Return			void
+ ******************************************************************************/
+PVRSRV_ERROR SysVzGetPhysHeapOrigin(PVRSRV_DEVICE_CONFIG *psDevConfig,
+									PVRSRV_DEVICE_PHYS_HEAP eHeap,
+									PVRSRV_DEVICE_PHYS_HEAP_ORIGIN *peOrigin);
+
+/*!
+******************************************************************************
+ @Function			SysVzGetMemoryConfigPhysHeapType
+
+ @Description 		Get the platform memory configuration physical heap type
+
+ @Return			PHYS_HEAP_TYPE
+ ******************************************************************************/
+PHYS_HEAP_TYPE SysVzGetMemoryConfigPhysHeapType(void);
+
+/*!
+******************************************************************************
+ @Function			SysVzInitDevPhysHeaps
+
+ @Description 		Initialize device physical heap
+
+ @Return			void
+ ******************************************************************************/
+PVRSRV_ERROR SysVzInitDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzDeInitDevPhysHeaps
+
+ @Description 		DeInitialize device physical heap
+
+ @Return			void
+ ******************************************************************************/
+void SysVzDeInitDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzCreateDevPhysHeaps
+
+ @Description 		Create device physical heaps
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzCreateDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzDestroyDevPhysHeaps
+
+ @Description 		Destroy device physical heaps
+
+ @Return			void
+ ******************************************************************************/
+void SysVzDestroyDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzRegisterFwPhysHeap
+
+ @Description 		Maps VM relative physically contiguous memory into the
+ 	 	 	 	 	firmware kernel memory context
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzRegisterFwPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzUnregisterFwPhysHeap
+
+ @Description 		Unmaps VM relative physically contiguous memory from the
+ 	 	 	 	 	firmware kernel memory context
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzUnregisterFwPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzCreateDevPhysHeaps
+
+ @Description 		Create guest device physical heaps
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzCreateDevPhysHeaps(IMG_UINT32 ui32OSID,
+										IMG_UINT32 ui32DevID,
+										IMG_UINT32 *peType,
+										IMG_UINT64 *pui64FwSize,
+										IMG_UINT64 *pui64FwAddr,
+										IMG_UINT64 *pui64GpuSize,
+										IMG_UINT64 *puiGpuAddr);
+
+/*!
+******************************************************************************
+ @Function			SysVzDestroyDevPhysHeaps
+
+ @Description 		Destroy guest device physical heaps
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzDestroyDevPhysHeaps(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function			SysVzRegisterFwPhysHeap
+
+ @Description 		Maps guest VM relative physically contiguous memory into
+					the firmware kernel memory context
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzRegisterFwPhysHeap(IMG_UINT32 ui32OSID,
+										IMG_UINT32 ui32DevID,
+										IMG_UINT64 ui64Size,
+										IMG_UINT64 ui64Addr);
+
+/*!
+******************************************************************************
+ @Function			SysVzUnregisterFwPhysHeap
+
+ @Description 		Unmaps guest VM relative physically contiguous memory from
+					the firmware kernel memory context
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzUnregisterFwPhysHeap(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID);
+
+#endif /* _VZ_PHYSHEAP_H_ */
+
+/*****************************************************************************
+ End of file (vz_physheap.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_physheap_common.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_physheap_common.c
new file mode 100644
index 0000000..c299eb2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_physheap_common.c
@@ -0,0 +1,590 @@
+/*************************************************************************/ /*!
+@File           vz_physheap_common.c
+@Title          System virtualization common physheap configuration API(s)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    System virtualization common physical heap configuration API(s)
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "allocmem.h"
+#include "img_defs.h"
+#include "physheap.h"
+#include "rgxdevice.h"
+#include "pvrsrv_device.h"
+#include "rgxfwutils.h"
+
+#include "dma_support.h"
+#include "vz_support.h"
+#include "vz_vmm_pvz.h"
+#include "vz_physheap.h"
+#include "vmm_pvz_client.h"
+#include "vmm_impl.h"
+
+PVRSRV_ERROR SysVzCreateDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+	PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+
+	eError = SysVzGetPhysHeapOrigin(psDevConfig, eHeap, &eHeapOrigin);
+	PVR_LOGG_IF_ERROR(eError, "SysVzGetPhysHeapOrigin", e0);
+
+	if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+	{
+		eError = PvzClientCreateDevPhysHeaps(psDevConfig, 0);
+		eError = (eError == PVRSRV_ERROR_NOT_IMPLEMENTED) ? PVRSRV_OK : eError;
+		PVR_LOGG_IF_ERROR(eError, "PvzClientCreateDevPhysHeaps", e0);
+	}
+
+e0:
+	return eError;
+}
+
+void SysVzDestroyDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+	PVRSRV_DEVICE_PHYS_HEAP eHeapType = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+
+	eError = SysVzGetPhysHeapOrigin(psDevConfig, eHeapType, &eHeapOrigin);
+	PVR_LOGG_IF_ERROR(eError, "PvzClientMapDevPhysHeap", e0);
+
+	if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+	{
+		eError = PvzClientDestroyDevPhysHeaps(psDevConfig, 0);
+		eError = (eError == PVRSRV_ERROR_NOT_IMPLEMENTED) ? PVRSRV_OK : eError;
+		PVR_LOGG_IF_ERROR(eError, "PvzClientDestroyDevPhysHeaps", e0);
+	}
+
+e0:
+	return;
+}
+
+PVRSRV_ERROR SysVzRegisterFwPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+	PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+
+	eError = SysVzGetPhysHeapOrigin(psDevConfig, eHeap, &eHeapOrigin);
+	PVR_LOGG_IF_ERROR(eError, "SysVzGetPhysHeapOrigin", e0);
+
+	if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST)
+	{
+		PHYS_HEAP_CONFIG *psPhysHeapConfig;
+		IMG_DEV_PHYADDR sDevPAddr;
+		IMG_UINT64 ui64DevPSize;
+
+		psPhysHeapConfig = SysVzGetPhysHeapConfig(psDevConfig, eHeap);
+		PVR_LOGR_IF_FALSE((NULL != psPhysHeapConfig), "SysVzGetPhysHeapConfig", PVRSRV_ERROR_INVALID_PARAMS);
+		PVR_LOGR_IF_FALSE((NULL != psPhysHeapConfig->pasRegions), "SysVzGetPhysHeapConfig", PVRSRV_ERROR_INTERNAL_ERROR);
+
+		if(psPhysHeapConfig->psMemFuncs->pfnCpuPAddrToDevPAddr != NULL)
+		{
+			psPhysHeapConfig->psMemFuncs->pfnCpuPAddrToDevPAddr(psPhysHeapConfig->hPrivData, 1, &sDevPAddr, &psPhysHeapConfig->pasRegions[0].sStartAddr);
+		}
+		else
+		{
+			sDevPAddr.uiAddr = psPhysHeapConfig->pasRegions[0].sStartAddr.uiAddr;
+		}
+
+		PVR_LOGR_IF_FALSE((0 != sDevPAddr.uiAddr), "SysVzGetPhysHeapConfig", PVRSRV_ERROR_INVALID_PARAMS);
+		ui64DevPSize = psPhysHeapConfig->pasRegions[0].uiSize;
+		PVR_LOGR_IF_FALSE((0 != ui64DevPSize), "SysVzGetPhysHeapConfig", PVRSRV_ERROR_INVALID_PARAMS);
+
+		eError = PvzClientMapDevPhysHeap(psDevConfig, 0, sDevPAddr, ui64DevPSize);
+		PVR_LOGG_IF_ERROR(eError, "PvzClientMapDevPhysHeap", e0);
+	}
+
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR SysVzUnregisterFwPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+	PVRSRV_DEVICE_PHYS_HEAP eHeapType = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+
+	eError = SysVzGetPhysHeapOrigin(psDevConfig, eHeapType, &eHeapOrigin);
+	PVR_LOGG_IF_ERROR(eError, "PvzClientMapDevPhysHeap", e0);
+
+	if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST)
+	{
+		eError = PvzClientUnmapDevPhysHeap(psDevConfig, 0);
+		PVR_LOGG_IF_ERROR(eError, "PvzClientMapDevPhysHeap", e0);
+	}
+
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR SysVzRegisterPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+								   PVRSRV_DEVICE_PHYS_HEAP eHeap)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PHYS_HEAP_CONFIG *psPhysHeapConfig;
+	PVR_LOGR_IF_FALSE((eHeap < PVRSRV_DEVICE_PHYS_HEAP_LAST), "Invalid Heap", PVRSRV_ERROR_INVALID_PARAMS);
+	PVR_LOGR_IF_FALSE((eHeap != PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL), "Skipping CPU local heap registration", PVRSRV_OK);
+
+	/* Currently we only support GPU/FW DMA physheap registration */
+	psPhysHeapConfig = SysVzGetPhysHeapConfig(psDevConfig, eHeap);
+	PVR_LOGR_IF_FALSE((NULL != psPhysHeapConfig), "SysVzGetPhysHeapConfig", PVRSRV_ERROR_INVALID_PARAMS);
+
+	if (psPhysHeapConfig &&
+		psPhysHeapConfig->pasRegions &&
+		psPhysHeapConfig->pasRegions[0].hPrivData)
+	{
+		DMA_ALLOC *psDmaAlloc;
+
+		if (psPhysHeapConfig->eType == PHYS_HEAP_TYPE_DMA)
+		{
+			/* DMA physheaps have quirks on some OS environments */
+			psDmaAlloc = psPhysHeapConfig->pasRegions[0].hPrivData;
+			eError = SysDmaRegisterForIoRemapping(psDmaAlloc);
+			PVR_LOG_IF_ERROR(eError, "SysDmaRegisterForIoRemapping");
+		}
+	}
+
+	return eError;
+}
+
+void SysVzDeregisterPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+							 PVRSRV_DEVICE_PHYS_HEAP eHeapType)
+{
+	PHYS_HEAP_CONFIG *psPhysHeapConfig;
+
+	if (eHeapType == PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL ||
+		eHeapType >= PVRSRV_DEVICE_PHYS_HEAP_LAST)
+	{
+		return;
+	}
+
+	/* Currently we only support GPU/FW physheap deregistration */
+	psPhysHeapConfig = SysVzGetPhysHeapConfig(psDevConfig, eHeapType);
+	PVR_LOG_IF_FALSE((psPhysHeapConfig!=NULL), "SysVzGetPhysHeapConfig");
+
+	if (psPhysHeapConfig &&
+		psPhysHeapConfig->pasRegions &&
+		psPhysHeapConfig->pasRegions[0].hPrivData)
+	{
+		DMA_ALLOC *psDmaAlloc;
+
+		if (psPhysHeapConfig->eType == PHYS_HEAP_TYPE_DMA)
+		{
+			psDmaAlloc = psPhysHeapConfig->pasRegions[0].hPrivData;
+			SysDmaDeregisterForIoRemapping(psDmaAlloc);
+		}
+	}
+
+}
+
+PHYS_HEAP_CONFIG *SysVzGetPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+										 PVRSRV_DEVICE_PHYS_HEAP eHeapType)
+{
+	IMG_UINT uiIdx;
+	IMG_UINT aui32PhysHeapID;
+	IMG_UINT32 ui32PhysHeapCount;
+	PHYS_HEAP_CONFIG *psPhysHeap;
+	PHYS_HEAP_CONFIG *ps1stPhysHeap = &psDevConfig->pasPhysHeaps[0];
+
+	if (eHeapType == PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL)
+	{
+		return ps1stPhysHeap;
+	}
+
+	/* Initialise here to catch lookup failures */
+	ui32PhysHeapCount = psDevConfig->ui32PhysHeapCount;
+	psPhysHeap = NULL;
+
+	if (eHeapType < PVRSRV_DEVICE_PHYS_HEAP_LAST)
+	{
+		/* Lookup ID of the physheap and get a pointer structure */
+		aui32PhysHeapID = psDevConfig->aui32PhysHeapID[eHeapType];
+		for (uiIdx = 1; uiIdx < ui32PhysHeapCount; uiIdx++)
+		{
+			if (ps1stPhysHeap[uiIdx].ui32PhysHeapID == aui32PhysHeapID)
+			{
+				psPhysHeap = &ps1stPhysHeap[uiIdx];
+				break;
+			}
+		}
+	}
+	PVR_LOG_IF_FALSE((psPhysHeap != NULL), "eHeapType >= PVRSRV_DEVICE_PHYS_HEAP_LAST");
+
+	return psPhysHeap;
+}
+
+PVRSRV_ERROR  SysVzSetPhysHeapAddrSize(PVRSRV_DEVICE_CONFIG *psDevConfig,
+									   PVRSRV_DEVICE_PHYS_HEAP ePhysHeap,
+									   PHYS_HEAP_TYPE eHeapType,
+									   IMG_DEV_PHYADDR sPhysHeapAddr,
+									   IMG_UINT64 ui64PhysHeapSize)
+{
+	PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+	PHYS_HEAP_CONFIG *psPhysHeapConfig;
+
+	psPhysHeapConfig = SysVzGetPhysHeapConfig(psDevConfig, ePhysHeap);
+	PVR_LOGR_IF_FALSE((psPhysHeapConfig != NULL), "Invalid PhysHeapConfig", eError);
+	PVR_LOGR_IF_FALSE((ui64PhysHeapSize != 0), "Invalid PhysHeapSize", eError);
+
+	if (eHeapType == PHYS_HEAP_TYPE_UMA || eHeapType == PHYS_HEAP_TYPE_LMA)
+	{
+		/* At this junction, we _may_ initialise new state */
+		PVR_ASSERT(sPhysHeapAddr.uiAddr  && ui64PhysHeapSize);
+
+		if (psPhysHeapConfig->pasRegions == NULL)
+		{
+			psPhysHeapConfig->pasRegions = OSAllocZMem(sizeof(PHYS_HEAP_REGION));
+			if (psPhysHeapConfig->pasRegions == NULL)
+			{
+				return PVRSRV_ERROR_OUT_OF_MEMORY;
+			}
+
+			PVR_ASSERT(! psPhysHeapConfig->bDynAlloc);
+			psPhysHeapConfig->bDynAlloc = IMG_TRUE;
+			psPhysHeapConfig->ui32NumOfRegions++;
+		}
+
+		if (eHeapType == PHYS_HEAP_TYPE_UMA)
+		{
+			psPhysHeapConfig->pasRegions[0].sCardBase = sPhysHeapAddr;
+		}
+
+		psPhysHeapConfig->pasRegions[0].sStartAddr.uiAddr = sPhysHeapAddr.uiAddr;
+		psPhysHeapConfig->pasRegions[0].uiSize = ui64PhysHeapSize;
+		psPhysHeapConfig->eType = eHeapType;
+
+		eError = PVRSRV_OK;
+	}
+
+	PVR_LOG_IF_ERROR(eError, "SysVzSetPhysHeapAddrSize");
+	return eError;
+}
+
+PVRSRV_ERROR SysVzGetPhysHeapAddrSize(PVRSRV_DEVICE_CONFIG *psDevConfig,
+									  PVRSRV_DEVICE_PHYS_HEAP ePhysHeap,
+									  PHYS_HEAP_TYPE eHeapType,
+									  IMG_DEV_PHYADDR *psAddr,
+									  IMG_UINT64 *pui64Size)
+{
+	IMG_UINT64 uiAddr;
+	PVRSRV_ERROR eError;
+	VMM_PVZ_CONNECTION *psVmmPvz;
+
+	PVR_UNREFERENCED_PARAMETER(eHeapType);
+
+	psVmmPvz = SysVzPvzConnectionAcquire();
+	PVR_ASSERT(psVmmPvz);
+
+	PVR_ASSERT(psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapAddrSize);
+
+	eError = psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapAddrSize(psDevConfig,
+																ePhysHeap,
+																pui64Size,
+																&uiAddr);
+	if (eError != PVRSRV_OK)
+	{
+		if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: VMM/PVZ pfnGetDevPhysHeapAddrSize() must be implemented (%s)",
+					 __func__,
+					 PVRSRVGetErrorString(eError)));
+		}
+
+		goto e0;
+	}
+
+	psAddr->uiAddr = uiAddr;
+e0:
+	SysVzPvzConnectionRelease(psVmmPvz);
+	return eError;
+}
+
+PVRSRV_ERROR SysVzGetPhysHeapOrigin(PVRSRV_DEVICE_CONFIG *psDevConfig,
+									PVRSRV_DEVICE_PHYS_HEAP eHeap,
+									PVRSRV_DEVICE_PHYS_HEAP_ORIGIN *peOrigin)
+{
+	PVRSRV_ERROR eError;
+	VMM_PVZ_CONNECTION *psVmmPvz;
+
+	*peOrigin = PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_LAST;
+
+	psVmmPvz = SysVzPvzConnectionAcquire();
+	PVR_ASSERT(psVmmPvz);
+
+	PVR_ASSERT(psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapOrigin);
+
+	eError = psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapOrigin(psDevConfig,
+															  eHeap,
+															  peOrigin);
+	if (eError != PVRSRV_OK)
+	{
+		if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: VMM/PVZ pfnGetDevPhysHeapOrigin() must be implemented (%s)",
+					__func__,
+					PVRSRVGetErrorString(eError)));
+		}
+
+		goto e0;
+	}
+
+e0:
+	SysVzPvzConnectionRelease(psVmmPvz);
+	return eError;
+}
+
+PVRSRV_ERROR SysVzPvzCreateDevPhysHeaps(IMG_UINT32 ui32OSID,
+										IMG_UINT32 ui32DevID,
+										IMG_UINT32 *pePhysHeapType,
+										IMG_UINT64 *pui64FwPhysHeapSize,
+										IMG_UINT64 *pui64FwPhysHeapAddr,
+										IMG_UINT64 *pui64GpuPhysHeapSize,
+										IMG_UINT64 *pui64GpuPhysHeapAddr)
+{
+	IMG_UINT64 uiHeapSize;
+	IMG_DEV_PHYADDR sCardBase;
+	IMG_CPU_PHYADDR sStartAddr;
+	PHYS_HEAP_CONFIG *psPhysHeap;
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+	PVRSRV_DEVICE_CONFIG *psDevConfig;
+	PVRSRV_DEVICE_PHYS_HEAP ePhysHeap;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+	PVR_LOGR_IF_FALSE((ui32DevID == 0), "Invalid Device ID", eError);
+	PVR_LOGR_IF_FALSE((psPVRSRVData != NULL), "Invalid PVRSRVData", eError);
+	PVR_LOGR_IF_FALSE((ui32OSID > 0 && ui32OSID < RGXFW_NUM_OS), "Invalid OSID", eError);
+
+	/* For now, limit support to single device setups */
+	psDeviceNode = psPVRSRVData->psDeviceNodeList;
+	psDevConfig = psDeviceNode->psDevConfig;
+
+	/* Default is a kernel managed UMA
+	   physheap memory configuration */
+	*pui64FwPhysHeapSize = (IMG_UINT64)0;
+	*pui64FwPhysHeapAddr = (IMG_UINT64)0;
+	*pui64GpuPhysHeapSize = (IMG_UINT64)0;
+	*pui64GpuPhysHeapAddr = (IMG_UINT64)0;
+
+	*pePhysHeapType = (IMG_UINT32) SysVzGetMemoryConfigPhysHeapType();
+	for (ePhysHeap = 0; ePhysHeap < PVRSRV_DEVICE_PHYS_HEAP_LAST; ePhysHeap++)
+	{
+		switch (ePhysHeap)
+		{
+			/* Only interested in these physheaps */
+			case PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL:
+			case PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL:
+				{
+					PVRSRV_ERROR eError;
+
+					eError = SysVzGetPhysHeapOrigin(psDevConfig,
+													ePhysHeap,
+													&eHeapOrigin);
+					PVR_LOGR_IF_ERROR(eError, "SysVzGetPhysHeapOrigin");
+
+					if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST)
+					{
+						continue;
+					}
+				}
+				break;
+
+			default:
+				continue;
+		}
+
+		/* Determine what type of physheap backs this phyconfig */
+		psPhysHeap = SysVzGetPhysHeapConfig(psDevConfig, ePhysHeap);
+		if (psPhysHeap && psPhysHeap->pasRegions)
+		{
+			/* Services managed physheap (LMA/UMA-carve-out/DMA) */
+			sStartAddr = psPhysHeap->pasRegions[0].sStartAddr;
+			sCardBase = psPhysHeap->pasRegions[0].sCardBase;
+			uiHeapSize = psPhysHeap->pasRegions[0].uiSize;
+
+			if (! uiHeapSize)
+			{
+				/* UMA (i.e. non carve-out), don't re-base so skip */
+				PVR_ASSERT(!sStartAddr.uiAddr && !sCardBase.uiAddr);
+				continue;
+			}
+
+			/* Rebase this guest OSID physical heap */
+			sStartAddr.uiAddr += ui32OSID * uiHeapSize;
+			sCardBase.uiAddr += ui32OSID * uiHeapSize;
+
+			switch (ePhysHeap)
+			{
+				case PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL:
+					*pui64GpuPhysHeapSize = uiHeapSize;
+					*pui64GpuPhysHeapAddr = sStartAddr.uiAddr;
+					break;
+
+				case PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL:
+					*pui64FwPhysHeapSize = uiHeapSize;
+					*pui64FwPhysHeapAddr = sStartAddr.uiAddr;
+					break;
+
+				default:
+					PVR_ASSERT(0);
+					break;
+			}
+		}
+		else
+		{
+#if defined(DEBUG)
+			eError = SysVzGetPhysHeapOrigin(psDevConfig,
+											ePhysHeap,
+											&eHeapOrigin);
+			PVR_LOGR_IF_ERROR(eError, "SysVzGetPhysHeapOrigin");
+
+			if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+			{
+				PVR_ASSERT(ePhysHeap != PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL);
+			}
+#endif
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysVzPvzDestroyDevPhysHeaps(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32OSID);
+	PVR_UNREFERENCED_PARAMETER(ui32DevID);
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysVzPvzRegisterFwPhysHeap(IMG_UINT32 ui32OSID,
+										IMG_UINT32 ui32DevID,
+										IMG_UINT64 ui64Size,
+										IMG_UINT64 ui64PAddr)
+{
+	PVRSRV_DEVICE_NODE* psDeviceNode;
+	PVRSRV_DEVICE_CONFIG *psDevConfig;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+	PVRSRV_DEVICE_PHYS_HEAP eHeapType = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+	PVR_LOGR_IF_FALSE((ui32DevID == 0), "Invalid Device ID", eError);
+	PVR_LOGR_IF_FALSE((psPVRSRVData != NULL), "Invalid PVRSRVData", eError);
+
+	psDeviceNode = psPVRSRVData->psDeviceNodeList;
+	psDevConfig = psDeviceNode->psDevConfig;
+
+	eError = SysVzGetPhysHeapOrigin(psDevConfig,
+									eHeapType,
+									&eHeapOrigin);
+	PVR_LOGG_IF_ERROR(eError, "SysVzGetPhysHeapOrigin", e0);
+
+#if defined(SUPPORT_RGX)
+	if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST)
+	{
+		IMG_DEV_PHYADDR sDevPAddr = {ui64PAddr};
+		eError = RGXVzRegisterFirmwarePhysHeap(psDeviceNode,
+											   ui32OSID,
+											   sDevPAddr,
+											   ui64Size);
+		PVR_LOGG_IF_ERROR(eError, "RGXVzRegisterFirmwarePhysHeap", e0);
+
+		if (eError == PVRSRV_OK)
+		{
+			/* Invalidate MMU cache in preparation for a kick from this Guest */
+			IMG_UINT16 sync;
+			eError = psDeviceNode->pfnMMUCacheInvalidateKick(psDeviceNode,&sync,IMG_TRUE);
+			PVR_LOGG_IF_ERROR(eError, "SysVzGetPhysHeapOrigin failed to invalidate MMU cache", e0);
+
+			if (eError == PVRSRV_OK)
+			{
+				/* Everything is ready for the firmware to start interacting with this OS */
+				PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+				eError = RGXFWSetFwOsState(psDevInfo, ui32OSID, RGXFWIF_OS_ONLINE);
+			}
+		}
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(ui32OSID);
+	PVR_UNREFERENCED_PARAMETER(ui64Size);
+	PVR_UNREFERENCED_PARAMETER(ui64PAddr);
+#endif
+
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR SysVzPvzUnregisterFwPhysHeap(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+	PVRSRV_DEVICE_CONFIG *psDevConfig;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+	PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+	PVR_LOGR_IF_FALSE((ui32DevID == 0), "Invalid Device ID", eError);
+	PVR_LOGR_IF_FALSE((psPVRSRVData != NULL), "Invalid PVRSRVData", eError);
+
+	psDeviceNode = psPVRSRVData->psDeviceNodeList;
+	psDevConfig = psDeviceNode->psDevConfig;
+
+	eError = SysVzGetPhysHeapOrigin(psDevConfig,
+									eHeap,
+									&eHeapOrigin);
+	PVR_LOGG_IF_ERROR(eError, "SysVzGetPhysHeapOrigin", e0);
+
+#if defined(SUPPORT_RGX)
+	if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST)
+	{
+		psDeviceNode = psPVRSRVData->psDeviceNodeList;
+		eError = RGXVzUnregisterFirmwarePhysHeap(psDeviceNode, ui32OSID);
+		PVR_LOG_IF_ERROR(eError, "RGXVzUnregisterFirmwarePhysHeap");
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(ui32OSID);
+#endif
+
+e0:
+	return eError;
+}
+
+/******************************************************************************
+ End of file (vz_physheap_common.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_physheap_generic.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_physheap_generic.c
new file mode 100644
index 0000000..3b85514
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_physheap_generic.c
@@ -0,0 +1,404 @@
+/*************************************************************************/ /*!
+@File           vz_physheap_generic.c
+@Title          System virtualization physheap configuration
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    System virtualization physical heap configuration
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "allocmem.h"
+#include "physheap.h"
+#include "rgxdevice.h"
+#include "pvrsrv_device.h"
+#include "rgxfwutils.h"
+
+#include "dma_support.h"
+#include "vz_support.h"
+#include "vz_vmm_pvz.h"
+#include "vz_physheap.h"
+
+#if defined(CONFIG_L4)
+static IMG_HANDLE gahPhysHeapIoRemap[PVRSRV_DEVICE_PHYS_HEAP_LAST];
+#endif
+
+static PVRSRV_ERROR
+SysVzCreateDmaPhysHeap(PHYS_HEAP_CONFIG *psPhysHeapConfig)
+{
+	PVRSRV_ERROR eError;
+	DMA_ALLOC *psDmaAlloc;
+	PHYS_HEAP_REGION *psPhysHeapRegion;
+
+	psPhysHeapRegion = &psPhysHeapConfig->pasRegions[0];
+	PVR_LOGR_IF_FALSE((NULL != psPhysHeapRegion->hPrivData), "DMA physheap already created", PVRSRV_ERROR_INVALID_PARAMS);
+
+	psDmaAlloc = (DMA_ALLOC*)psPhysHeapRegion->hPrivData;
+	psDmaAlloc->ui64Size = psPhysHeapRegion->uiSize;
+
+	eError = SysDmaAllocMem(psDmaAlloc);
+	if (eError != PVRSRV_OK)
+	{
+		psPhysHeapConfig->eType = PHYS_HEAP_TYPE_UMA;
+	}
+	else
+	{
+		psPhysHeapRegion->sStartAddr.uiAddr = psDmaAlloc->sBusAddr.uiAddr;
+		psPhysHeapRegion->sCardBase.uiAddr = psDmaAlloc->sBusAddr.uiAddr;
+		psPhysHeapConfig->eType = PHYS_HEAP_TYPE_DMA;
+	}
+
+	return eError;
+}
+
+static void
+SysVzDestroyDmaPhysHeap(PHYS_HEAP_CONFIG *psPhysHeapConfig)
+{
+	DMA_ALLOC *psDmaAlloc;
+	PHYS_HEAP_REGION *psPhysHeapRegion;
+
+	psPhysHeapRegion = &psPhysHeapConfig->pasRegions[0];
+	psDmaAlloc = (DMA_ALLOC*)psPhysHeapRegion->hPrivData;
+
+	if (psDmaAlloc != NULL)
+	{
+		PVR_LOG_IF_FALSE((0 != psPhysHeapRegion->sStartAddr.uiAddr), "Invalid DMA physheap start address");
+		PVR_LOG_IF_FALSE((0 != psPhysHeapRegion->sCardBase.uiAddr), "Invalid DMA physheap card address");
+		PVR_LOG_IF_FALSE((0 != psPhysHeapRegion->uiSize), "Invalid DMA physheap size");
+
+		SysDmaFreeMem(psDmaAlloc);
+
+		psPhysHeapRegion->sCardBase.uiAddr = 0;
+		psPhysHeapRegion->sStartAddr.uiAddr = 0;
+		psPhysHeapConfig->eType = PHYS_HEAP_TYPE_UMA;
+	}
+}
+
+static PVRSRV_ERROR
+SysVzCreatePhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+					PVRSRV_DEVICE_PHYS_HEAP ePhysHeap)
+{
+	IMG_DEV_PHYADDR sHeapAddr;
+	IMG_UINT64 ui64HeapSize = 0;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PHYS_HEAP_REGION *psPhysHeapRegion;
+	PHYS_HEAP_CONFIG *psPhysHeapConfig;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+
+	/* Lookup GPU/FW physical heap config, allocate primary region */
+	psPhysHeapConfig = SysVzGetPhysHeapConfig(psDevConfig, ePhysHeap);
+	PVR_LOGR_IF_FALSE((NULL != psPhysHeapConfig), "Invalid physheap config", PVRSRV_ERROR_INVALID_PARAMS);
+
+	if (psPhysHeapConfig->pasRegions == NULL)
+	{
+		psPhysHeapConfig->pasRegions = OSAllocZMem(sizeof(PHYS_HEAP_REGION));
+		PVR_LOGG_IF_NOMEM(psPhysHeapConfig->pasRegions, "OSAllocZMem", eError, e0);
+
+		PVR_ASSERT(! psPhysHeapConfig->bDynAlloc);
+		psPhysHeapConfig->bDynAlloc = IMG_TRUE;
+		psPhysHeapConfig->ui32NumOfRegions++;
+	}
+
+	if (psPhysHeapConfig->pasRegions[0].hPrivData == NULL)
+	{
+		DMA_ALLOC *psDmaAlloc = OSAllocZMem(sizeof(DMA_ALLOC));
+		PVR_LOGG_IF_NOMEM(psDmaAlloc, "OSAllocZMem", eError, e0);
+
+		psDmaAlloc->pvOSDevice = psDevConfig->pvOSDevice;
+		psPhysHeapConfig->pasRegions[0].hPrivData = psDmaAlloc;
+	}
+
+	/* Lookup physheap addr/size from VM manager type */
+	eError = SysVzGetPhysHeapAddrSize(psDevConfig,
+									  ePhysHeap,
+									  PHYS_HEAP_TYPE_UMA,
+								 	  &sHeapAddr,
+								 	  &ui64HeapSize);
+	PVR_LOGG_IF_ERROR(eError, "SysVzGetPhysHeapAddrSize", e0);
+
+	/* Initialise physical heap and region state */
+	psPhysHeapRegion = &psPhysHeapConfig->pasRegions[0];
+	psPhysHeapRegion->sStartAddr.uiAddr = sHeapAddr.uiAddr;
+	psPhysHeapRegion->sCardBase.uiAddr = sHeapAddr.uiAddr;
+	psPhysHeapRegion->uiSize = ui64HeapSize;
+
+	if (ePhysHeap == PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL)
+	{
+		/* Firmware physheaps require additional init */
+		psPhysHeapConfig->pszPDumpMemspaceName = "SYSMEM";
+		psPhysHeapConfig->psMemFuncs =
+				psDevConfig->pasPhysHeaps[0].psMemFuncs;
+	}
+
+	/* Which driver is responsible for allocating the
+	   physical memory backing the device physheap */
+	eError = SysVzGetPhysHeapOrigin(psDevConfig,
+									ePhysHeap,
+									&eHeapOrigin);
+	PVR_LOGG_IF_ERROR(eError, "SysVzGetPhysHeapOrigin", e0);
+
+	if (psPhysHeapRegion->sStartAddr.uiAddr == 0)
+	{
+		if (psPhysHeapRegion->uiSize)
+		{
+			if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+			{
+				/* Scale DMA size by the number of OSIDs */
+				psPhysHeapRegion->uiSize *= RGXFW_NUM_OS;
+			}
+
+			eError = SysVzCreateDmaPhysHeap(psPhysHeapConfig);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_LOGG_IF_ERROR(eError, "SysVzCreateDmaPhysHeap", e0);
+			}
+
+			/* Verify the validity of DMA physheap region */
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			PVR_LOGG_IF_FALSE((0 != psPhysHeapRegion->sStartAddr.uiAddr), "Invalid DMA physheap start address", e0);
+			PVR_LOGG_IF_FALSE((0 != psPhysHeapRegion->sCardBase.uiAddr), "Invalid DMA physheap card address", e0);
+			PVR_LOGG_IF_FALSE((0 != psPhysHeapRegion->uiSize), "Invalid DMA physheap size", e0);
+			eError = PVRSRV_OK;
+
+			/* Services managed DMA physheap setup complete */
+			psPhysHeapConfig->eType = PHYS_HEAP_TYPE_DMA;
+
+			/* Only the PHYS_HEAP_TYPE_DMA should be registered */
+			eError = SysVzRegisterPhysHeap(psDevConfig, ePhysHeap);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_LOGG_IF_ERROR(eError, "SysVzRegisterPhysHeap", e0);
+			}
+
+			if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+			{
+				/* Restore original physheap size */
+				psPhysHeapRegion->uiSize /= RGXFW_NUM_OS;
+			}
+		}
+		else
+		{
+			if (psPhysHeapConfig->pasRegions[0].hPrivData)
+			{
+				OSFreeMem(psPhysHeapConfig->pasRegions[0].hPrivData);
+				psPhysHeapConfig->pasRegions[0].hPrivData = NULL;
+			}
+
+			if (psPhysHeapConfig->bDynAlloc)
+			{
+				OSFreeMem(psPhysHeapConfig->pasRegions);
+				psPhysHeapConfig->pasRegions = NULL;
+				psPhysHeapConfig->ui32NumOfRegions--;
+				psPhysHeapConfig->bDynAlloc = IMG_FALSE;
+				PVR_LOGG_IF_FALSE((psPhysHeapConfig->ui32NumOfRegions == 0), "Invalid refcount", e0);
+			}
+
+			if (ePhysHeap == PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL)
+			{
+				/* Using UMA physheaps for FW has pre-conditions, verify */
+				if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							"%s: %s PVZ config: Invalid firmware physheap config\n"
+							"=>: HOST origin (i.e. static) VZ setups require non-UMA FW physheaps spec.",
+							__func__,
+							PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST) ? "Host" : "Guest"));
+					eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+				}
+			}
+
+			/* Kernel managed UMA physheap setup complete */
+			psPhysHeapConfig->eType = PHYS_HEAP_TYPE_UMA;
+		}
+	}
+	else
+	{
+		/* Verify the validity of the UMA carve-out physheap region */
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		PVR_LOGG_IF_FALSE((0 != psPhysHeapRegion->sStartAddr.uiAddr), "Invalid UMA carve-out physheap start address", e0);
+		PVR_LOGG_IF_FALSE((0 != psPhysHeapRegion->sCardBase.uiAddr), "Invalid UMA carve-out physheap card address", e0);
+		PVR_LOGG_IF_FALSE((0 != psPhysHeapRegion->uiSize), "Invalid UMA carve-out physheap size", e0);
+		eError = PVRSRV_OK;
+
+		if (psPhysHeapConfig->pasRegions[0].hPrivData)
+		{
+			/* Need regions but don't require the DMA priv. data */
+			OSFreeMem(psPhysHeapConfig->pasRegions[0].hPrivData);
+			psPhysHeapConfig->pasRegions[0].hPrivData = NULL;
+		}
+
+#if defined(CONFIG_L4)
+		/* On Fiasco.OC/l4linux, ioremap physheap now */
+		gahPhysHeapIoRemap[ePhysHeap] =
+						OSMapPhysToLin(psPhysHeapRegion->sStartAddr,
+									   psPhysHeapRegion->uiSize,
+									   PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+		PVR_LOGG_IF_FALSE((NULL != gahPhysHeapIoRemap[ePhysHeap]), "OSMapPhysToLin", e0);
+#endif
+
+		/* Services managed UMA carve-out physheap setup complete */
+		psPhysHeapConfig->eType = PHYS_HEAP_TYPE_UMA;
+	}
+
+	return eError;
+
+e0:
+	if (psPhysHeapConfig->pasRegions)
+	{
+		SysVzDeregisterPhysHeap(psDevConfig, ePhysHeap);
+
+		if (psPhysHeapConfig->pasRegions[0].hPrivData)
+		{
+			OSFreeMem(psPhysHeapConfig->pasRegions[0].hPrivData);
+			psPhysHeapConfig->pasRegions[0].hPrivData = NULL;
+		}
+
+		if (psPhysHeapConfig->bDynAlloc)
+		{
+			OSFreeMem(psPhysHeapConfig->pasRegions);
+			psPhysHeapConfig->pasRegions = NULL;
+			psPhysHeapConfig->ui32NumOfRegions--;
+			psPhysHeapConfig->bDynAlloc = IMG_FALSE;
+			PVR_LOG_IF_FALSE((psPhysHeapConfig->ui32NumOfRegions == 0), "Invalid refcount");
+		}
+	}
+
+	return eError;
+}
+
+static void
+SysVzDestroyPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+					 PVRSRV_DEVICE_PHYS_HEAP ePhysHeap)
+{
+	PHYS_HEAP_CONFIG *psPhysHeapConfig;
+
+	SysVzDeregisterPhysHeap(psDevConfig, ePhysHeap);
+
+	psPhysHeapConfig = SysVzGetPhysHeapConfig(psDevConfig, ePhysHeap);
+	if (psPhysHeapConfig == NULL ||
+		psPhysHeapConfig->pasRegions == NULL)
+	{
+		return;
+	}
+
+#if defined(CONFIG_L4)
+	if (gahPhysHeapIoRemap[ePhysHeap] != NULL)
+	{
+		OSUnMapPhysToLin(gahPhysHeapIoRemap[ePhysHeap],
+						psPhysHeapConfig->pasRegions[0].uiSize,
+						PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+	}
+
+	gahPhysHeapIoRemap[ePhysHeap] = NULL;
+#endif
+
+	if (psPhysHeapConfig->pasRegions[0].hPrivData)
+	{
+		SysVzDestroyDmaPhysHeap(psPhysHeapConfig);
+		OSFreeMem(psPhysHeapConfig->pasRegions[0].hPrivData);
+		psPhysHeapConfig->pasRegions[0].hPrivData = NULL;
+	}
+
+	if (psPhysHeapConfig->bDynAlloc)
+	{
+		OSFreeMem(psPhysHeapConfig->pasRegions);
+		psPhysHeapConfig->pasRegions = NULL;
+		psPhysHeapConfig->ui32NumOfRegions--;
+		psPhysHeapConfig->bDynAlloc = IMG_FALSE;
+		PVR_LOG_IF_FALSE((psPhysHeapConfig->ui32NumOfRegions == 0), "Invalid refcount");
+	}
+}
+
+static PVRSRV_ERROR
+SysVzCreateGpuPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL;
+	return SysVzCreatePhysHeap(psDevConfig, eHeap);
+}
+
+static void
+SysVzDestroyGpuPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL;
+	SysVzDestroyPhysHeap(psDevConfig, eHeap);
+}
+
+static PVRSRV_ERROR
+SysVzCreateFwPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+	return SysVzCreatePhysHeap(psDevConfig, eHeap);
+}
+
+static void
+SysVzDestroyFwPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+	SysVzDestroyPhysHeap(psDevConfig, eHeap);
+}
+
+PHYS_HEAP_TYPE SysVzGetMemoryConfigPhysHeapType(void)
+{
+	return PHYS_HEAP_TYPE_UMA;
+}
+
+PVRSRV_ERROR SysVzInitDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_ERROR eError;
+
+	eError = SysVzCreateFwPhysHeap(psDevConfig);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = SysVzCreateGpuPhysHeap(psDevConfig);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	return eError;
+}
+
+void SysVzDeInitDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	SysVzDestroyGpuPhysHeap(psDevConfig);
+	SysVzDestroyFwPhysHeap(psDevConfig);
+}
+
+/******************************************************************************
+ End of file (vz_physheap_generic.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_support.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_support.c
new file mode 100644
index 0000000..39f223b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_support.c
@@ -0,0 +1,342 @@
+/*************************************************************************/ /*!
+@File           vz_support.c
+@Title          System virtualization configuration setup
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    System virtualization configuration support API(s)
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "allocmem.h"
+#include "physheap.h"
+#include "rgxdevice.h"
+#include "pvrsrv.h"
+#include "pvrsrv_device.h"
+
+#include "dma_support.h"
+#include "vz_support.h"
+#include "vz_vmm_pvz.h"
+#include "vz_physheap.h"
+#include "vmm_pvz_client.h"
+#include "vmm_pvz_server.h"
+
+static PVRSRV_ERROR
+SysVzPvzConnectionValidate(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	VMM_PVZ_CONNECTION *psVmmPvz;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eOrigin = PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_LAST;
+	IMG_UINT64 ui64Size = 0, ui64Addr = 0;
+
+	/*
+	 * Acquire the underlying VM manager PVZ connection & validate it.
+	 */
+	psVmmPvz = SysVzPvzConnectionAcquire();
+	if (psVmmPvz == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: %s PVZ config: Unable to acquire PVZ connection",
+				__func__,
+				PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) ? "Guest" : "Host"));
+		eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+		goto e0;
+	}
+	else if (psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapOrigin == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: %s PVZ config: pfnGetDevPhysHeapOrigin cannot be NULL",
+				__func__,
+				PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) ? "Guest" : "Host"));
+		eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+		goto e1;
+	}
+	else if (psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapAddrSize == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: %s PVZ config: pfnGetDevPhysHeapAddrSize cannot be NULL",
+				__func__,
+				PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) ? "Guest" : "Host"));
+		eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+		goto e1;
+	}
+	else if (psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapAddrSize(psDevConfig,
+					 	 	 	 	 	 	 	 	 	 	 	PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL,
+																&ui64Size,
+																&ui64Addr) != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: %s PVZ config: pfnGetDevPhysHeapAddrSize(GPU) must return PVRSRV_OK",
+				__func__,
+				PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) ? "Guest" : "Host"));
+		eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+		goto e1;
+	}
+	else if (psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapAddrSize(psDevConfig,
+					 	 	 	 	 	 	 	 	 	 	 	PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL,
+																&ui64Size,
+																&ui64Addr) != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: %s PVZ config: pfnGetDevPhysHeapAddrSize(FW) must return PVRSRV_OK",
+				__func__,
+				PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) ? "Guest" : "Host"));
+		eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+		goto e1;
+	}
+	else if (PVRSRV_OK !=
+			 psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapOrigin(psDevConfig,
+															  PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL,
+															  &eOrigin))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: %s PVZ config: Invalid config. function table setup\n"
+				"=>: pfnGetDevPhysHeapOrigin() must return PVRSRV_OK",
+				__func__,
+				PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) ? "Guest" : "Host"));
+		eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+		goto e1;
+	}
+	else if (eOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_LAST)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: %s PVZ config: Invalid config. function table setup\n"
+				"=>: pfnGetDevPhysHeapOrigin() returned an invalid physheap origin",
+				__func__,
+				PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) ? "Guest" : "Host"));
+		eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+		goto e1;
+	}
+	else if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST)            &&
+			 eOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST &&
+			 psVmmPvz->sHostFuncTab.pfnMapDevPhysHeap == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Guest PVZ config: Invalid config. function table setup\n"
+				"=>: implement pfnMapDevPhysHeap() when using GUEST physheap origin",
+				__func__));
+		eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+		goto e1;
+	}
+	else if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST)           &&
+			 (psVmmPvz->sGuestFuncTab.pfnCreateDevConfig     == NULL ||
+			  psVmmPvz->sGuestFuncTab.pfnDestroyDevConfig    == NULL ||
+			  psVmmPvz->sGuestFuncTab.pfnCreateDevPhysHeaps  == NULL ||
+			  psVmmPvz->sGuestFuncTab.pfnDestroyDevPhysHeaps == NULL ||
+			  psVmmPvz->sGuestFuncTab.pfnMapDevPhysHeap      == NULL ||
+			  psVmmPvz->sGuestFuncTab.pfnUnmapDevPhysHeap    == NULL))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Host PVZ config: Invalid guest function table setup\n",
+				__func__));
+		eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+		goto e1;
+	}
+	else if (eOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST &&
+			 ui64Size == 0 &&
+			 ui64Addr == 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: %s PVZ config: Invalid pfnGetDevPhysHeapAddrSize(FW) physheap config.\n"
+				"=>: HEAP_ORIGIN_HOST is not compatible with FW UMA allocator",
+				__func__,
+				PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) ? "Guest" : "Host"));
+		eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+		goto e1;
+	}
+
+	/* Log which PVZ setup type is being used by driver */
+	if (eOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+	{
+		/*
+		 *  Static PVZ bootstrap setup
+		 *
+		 *  This setup uses host-origin, has no hypercall mechanism & does not support any
+		 *  out-of-order initialisation of host/guest VMs/drivers. The host driver has all
+		 *  the information needed to initialize all OSIDs firmware state when it's loaded
+		 *  and its PVZ layer must mark all guest OSIDs as being online as part of its PVZ
+		 *  initialisation. Having no out-of-order initialisation support, the guest driver
+		 *  can only submit a workload to the device after the host driver has completely
+		 *  initialized the firmware, the VZ hypervisor/VM setup must guarantee this.
+		 */
+		PVR_LOG(("Using static PVZ bootstrap setup"));
+	}
+	else
+	{
+		/*
+		 *  Dynamic PVZ bootstrap setup
+		 *
+		 *  This setup uses guest-origin, has PVZ hypercall mechanism & supports out-of-order
+		 *  initialisation of host/guest VMs/drivers. The host driver initializes only its
+		 *  own OSID-0 firmware state when its loaded and each guest driver will use its PVZ
+		 *  interface to hypercall to the host driver to both synchronise its initialisation
+		 *  so it does not submit any workload to the firmware before the host driver has
+		 *  had a chance to initialize the firmware and to also initialize its own OSID-x
+		 *  firmware state.
+		 */
+ 		PVR_LOG(("Using dynamic PVZ bootstrap setup"));
+	}
+
+e1:
+	SysVzPvzConnectionRelease(psVmmPvz);
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR SysVzDevInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_ERROR eError;
+	RGX_DATA* psDevData = psDevConfig->hDevData;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+
+	/* Initialise pvz connection */
+	eError =  SysVzPvzConnectionInit();
+	PVR_LOGR_IF_ERROR(eError, "SysVzPvzConnectionInit");
+
+	/* Ensure pvz connection is configured correctly */
+	eError = SysVzPvzConnectionValidate(psDevConfig);
+	PVR_LOGR_IF_ERROR(eError, "SysVzPvzConnectionValidate");
+
+	psPVRSRVData->abVmOnline[0] = IMG_TRUE;
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		/* Undo any functionality not supported in guest drivers */
+		psDevData->psRGXTimingInfo->bEnableRDPowIsland  = IMG_FALSE;
+		psDevData->psRGXTimingInfo->bEnableActivePM = IMG_FALSE;
+		psDevConfig->pfnPrePowerState  = NULL;
+		psDevConfig->pfnPostPowerState = NULL;
+
+		/* Perform additional guest-specific device
+		   configuration initialisation */
+		eError =  SysVzCreateDevConfig(psDevConfig);
+		PVR_LOGR_IF_ERROR(eError, "SysVzCreateDevConfig");
+
+		eError =  SysVzCreateDevPhysHeaps(psDevConfig);
+		PVR_LOGR_IF_ERROR(eError, "SysVzCreateDevPhysHeaps");
+	}
+
+	/* Perform general device physheap initialisation */
+	eError = SysVzInitDevPhysHeaps(psDevConfig);
+	PVR_LOGR_IF_ERROR(eError, "SysVzInitDevPhysHeaps");
+
+	return eError;
+}
+
+PVRSRV_ERROR SysVzDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+
+	SysVzDeInitDevPhysHeaps(psDevConfig);
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		SysVzDestroyDevPhysHeaps(psDevConfig);
+		SysVzDestroyDevConfig(psDevConfig);
+	}
+
+	SysVzPvzConnectionDeInit();
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysVzCreateDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_ERROR eError;
+
+	eError = PvzClientCreateDevConfig(psDevConfig, 0);
+	eError = (eError == PVRSRV_ERROR_NOT_IMPLEMENTED) ? PVRSRV_OK : eError;
+	PVR_LOG_IF_ERROR(eError, "PvzClientCreateDevConfig");
+
+	return eError;
+}
+
+PVRSRV_ERROR SysVzDestroyDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_ERROR eError;
+
+	eError = PvzClientDestroyDevConfig(psDevConfig, 0);
+	eError = (eError == PVRSRV_ERROR_NOT_IMPLEMENTED) ? PVRSRV_OK : eError;
+	PVR_LOG_IF_ERROR(eError, "SysVzDestroyDevConfig");
+
+	return eError;
+}
+
+PVRSRV_ERROR
+SysVzPvzCreateDevConfig(IMG_UINT32 ui32OSID,
+						IMG_UINT32 ui32DevID,
+						IMG_UINT32 *pui32IRQ,
+						IMG_UINT32 *pui32RegsSize,
+						IMG_UINT64 *pui64RegsCpuPBase)
+{
+	PVRSRV_DEVICE_NODE *psDevNode;
+	PVRSRV_DEVICE_CONFIG *psDevConfig;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	if (ui32OSID == 0        ||
+		ui32DevID != 0       ||
+		psPVRSRVData == NULL ||
+		ui32OSID >= RGXFW_NUM_OS)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* For now, limit support to single device setups */
+	psDevNode = psPVRSRVData->psDeviceNodeList;
+	psDevConfig = psDevNode->psDevConfig;
+
+	/* Copy across guest VM device config information, here
+	   we assume this is the same across VMs and host */
+	*pui64RegsCpuPBase = psDevConfig->sRegsCpuPBase.uiAddr;
+	*pui32RegsSize = psDevConfig->ui32RegsSize;
+	*pui32IRQ = psDevConfig->ui32IRQ;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+SysVzPvzDestroyDevConfig(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID)
+{
+	if (ui32OSID == 0        ||
+		ui32DevID != 0       ||
+		ui32OSID >= RGXFW_NUM_OS)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (vz_support.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_support.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_support.h
new file mode 100644
index 0000000..d0d526b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_support.h
@@ -0,0 +1,126 @@
+/*************************************************************************/ /*!
+@File           vz_support.h
+@Title          System virtualization support API(s)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides the system virtualization API(s)
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VZ_SUPPORT_H_
+#define _VZ_SUPPORT_H_
+
+#include "osfunc.h"
+#include "pvrsrv.h"
+
+/*!
+******************************************************************************
+ @Function			SysVzDevInit
+
+ @Description 		Entry into system virtualization per device configuration
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzDevInit(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzDevDeInit
+
+ @Description 		Exit from system virtualization per device configuration
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzCreateDevConfig
+
+ @Description 		Guest para-virtualization initialization per device
+					configuration.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzCreateDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzDestroyDevConfig
+
+ @Description 		Guest para-virtualization deinitialization per device
+					configuration.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzDestroyDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzCreateDevConfig
+
+ @Description 		Server para-virtz handler for client SysVzCreateDevConfig
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzCreateDevConfig(IMG_UINT32 ui32OSID,
+									 IMG_UINT32 ui32DevID,
+									 IMG_UINT32 *pui32IRQ,
+									 IMG_UINT32 *pui32RegsSize,
+									 IMG_UINT64 *pui64RegsPAddr);
+
+/*!
+******************************************************************************
+ @Function			SysVzDestroyDevConfig
+
+ @Description 		Server para-virtz handler for client SysVzDestroyDevConfig
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzDestroyDevConfig(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID);
+
+#endif /* _VZ_SUPPORT_H_ */
+
+/*****************************************************************************
+ End of file (vz_support.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_vm.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_vm.h
new file mode 100644
index 0000000..f74eb6a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_vm.h
@@ -0,0 +1,61 @@
+/*************************************************************************/ /*!
+@File			vz_vm.h
+@Title          System virtualization VM support APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides VM management support APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VZ_SUPPORT_VM_H_
+#define _VZ_SUPPORT_VM_H_
+
+#include "pvrsrv.h"
+
+PVRSRV_ERROR SysVzIsVmOnline(IMG_UINT32 ui32OSID);
+
+PVRSRV_ERROR SysVzPvzOnVmOnline(IMG_UINT32 ui32OSid, IMG_UINT32 ui32Priority);
+
+PVRSRV_ERROR SysVzPvzOnVmOffline(IMG_UINT32 ui32OSid);
+
+PVRSRV_ERROR SysVzPvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue);
+
+#endif /* _VZ_SUPPORT_VM_H_ */
+
+/*****************************************************************************
+ End of file (vz_vm.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_vmm_pvz.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_vmm_pvz.c
new file mode 100644
index 0000000..9dd9eb9
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_vmm_pvz.c
@@ -0,0 +1,115 @@
+/*************************************************************************/ /*!
+@File			vz_vmm_pvz.c
+@Title          VM manager para-virtualization APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    VM manager para-virtualization management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv.h"
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "allocmem.h"
+#include "pvrsrv.h"
+#include "vz_vmm_pvz.h"
+
+PVRSRV_ERROR SysVzPvzConnectionInit(void)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	/* Create para-virtualization connection lock */
+	eError = OSLockCreate(&psPVRSRVData->hPvzConnectionLock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: OSLockCreate failed (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+
+		goto e0;
+	}
+
+	/* Create VM manager para-virtualization connection */
+	eError = VMMCreatePvzConnection((VMM_PVZ_CONNECTION **)&psPVRSRVData->hPvzConnection);
+	if (eError != PVRSRV_OK)
+	{
+		OSLockDestroy(psPVRSRVData->hPvzConnectionLock);
+		psPVRSRVData->hPvzConnectionLock = NULL;
+
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Unable to create PVZ connection (%s)",
+				__func__,
+				PVRSRVGetErrorString(eError)));
+
+		goto e0;
+	}
+
+e0:
+	return eError;
+}
+
+void SysVzPvzConnectionDeInit(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	VMMDestroyPvzConnection(psPVRSRVData->hPvzConnection);
+	psPVRSRVData->hPvzConnection = NULL;
+
+	OSLockDestroy(psPVRSRVData->hPvzConnectionLock);
+	psPVRSRVData->hPvzConnectionLock = NULL;
+}
+
+VMM_PVZ_CONNECTION* SysVzPvzConnectionAcquire(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVR_ASSERT(psPVRSRVData->hPvzConnection != NULL);
+	return psPVRSRVData->hPvzConnection;
+}
+
+void SysVzPvzConnectionRelease(VMM_PVZ_CONNECTION *psParaVz)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	/* Nothing to do, sanity check the pointer passed back */
+	PVR_ASSERT(psParaVz == psPVRSRVData->hPvzConnection);
+}
+
+/******************************************************************************
+ End of file (vz_vmm_pvz.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_vmm_pvz.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_vmm_pvz.h
new file mode 100644
index 0000000..99d2a14
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_vmm_pvz.h
@@ -0,0 +1,85 @@
+/*************************************************************************/ /*!
+@File           vz_vmm_pvz.h
+@Title          System virtualization VM manager management APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides VM manager para-virtz management APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VZ_VMM_PVZ_H_
+#define _VZ_VMM_PVZ_H_
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "vmm_impl.h"
+
+/*!
+******************************************************************************
+ @Function			SysVzPvzConnectionInit() and SysVzPvzConnectionDeInit()
+
+ @Description 		SysVzPvzConnectionInit initializes the VM manager para-virtz
+					which is used subsequently for communication between guest and
+					host; depending on the underlying VM setup, this could either be
+					either a hyper-call or cross-VM call
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzConnectionInit(void);
+void SysVzPvzConnectionDeInit(void);
+
+/*!
+******************************************************************************
+ @Function			 SysVzPvzConnectionAcquire() and SysVzPvzConnectionRelease()
+
+ @Description 		 These are to acquire/release a handle to the VM manager para-virtz
+					 connection to make a pvz call; on the client, use it to make the
+					 actual pvz call and on the sever handler / VM manager, use it
+					 to complete the processing for the pvz call or make a VM manager
+					 to host pvzbridge call
+
+ @Return			PVRSRV_ERROR	VMM_PVZ_CONNECTION* on success. Otherwise NULL
+ ******************************************************************************/
+VMM_PVZ_CONNECTION* SysVzPvzConnectionAcquire(void);
+void SysVzPvzConnectionRelease(VMM_PVZ_CONNECTION *psPvzConnection);
+
+#endif /* _VZ_VMM_PVZ_H_ */
+
+/*****************************************************************************
+ End of file (vz_vmm_pvz.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_vmm_vm.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_vmm_vm.c
new file mode 100644
index 0000000..6552ed8
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/system/vz_vmm_vm.c
@@ -0,0 +1,242 @@
+/*************************************************************************/ /*!
+@File			vz_vmm_vm.c
+@Title          System virtualization VM support APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    System virtualization VM support functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "img_defs.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv.h"
+#include "pvrsrv_error.h"
+#include "vz_vm.h"
+#include "rgxfwutils.h"
+
+PVRSRV_ERROR
+SysVzIsVmOnline(IMG_UINT32 ui32OSID)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	if (ui32OSID == 0 || ui32OSID >= RGXFW_NUM_OS)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: invalid OSID (%d)",
+				 __func__, ui32OSID));
+
+		return PVRSRV_ERROR_INVALID_PVZ_OSID;
+	}
+
+	if (!psPVRSRVData->abVmOnline[ui32OSID])
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: OSID %d is already disabled.",
+				 __func__, ui32OSID));
+
+		return PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE;
+	}
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+SysVzPvzOnVmOnline(IMG_UINT32 ui32OSid, IMG_UINT32 ui32Priority)
+{
+	PVRSRV_ERROR       eError          = PVRSRV_OK;
+	PVRSRV_DATA        *psPVRSRVData   = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDevNode;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	if (ui32OSid == 0 || ui32OSid >= RGXFW_NUM_OS)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: invalid OSID (%d)",
+				 __func__, ui32OSid));
+
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (psPVRSRVData->abVmOnline[ui32OSid])
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: OSID %d is already enabled.",
+				 __func__, ui32OSid));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* For now, limit support to single device setups */
+	psDevNode = psPVRSRVData->psDeviceNodeList;
+	psDevInfo = psDevNode->pvDevice;
+
+	if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT)
+	{
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSAcquireBridgeLock();
+#endif
+
+		/* Firmware not initialized yet, do it here */
+		eError = PVRSRVDeviceInitialise(psDevNode);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: failed to initialize firmware (%s)",
+					 __func__, PVRSRVGetErrorString(eError)));
+			goto e0;
+		}
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSReleaseBridgeLock();
+#endif
+	}
+
+	/* request new priority */
+	eError = RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32Priority);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	psPVRSRVData->abVmOnline[ui32OSid] = IMG_TRUE;
+
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR
+SysVzPvzOnVmOffline(IMG_UINT32 ui32OSid)
+{
+	PVRSRV_ERROR      eError          = PVRSRV_OK;
+	PVRSRV_DATA       *psPVRSRVData   = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDevNode;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	if (ui32OSid == 0 || ui32OSid >= RGXFW_NUM_OS)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: invalid OSID (%d)",
+				 __func__, ui32OSid));
+
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (!psPVRSRVData->abVmOnline[ui32OSid])
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: OSID %d is already disabled.",
+				 __func__, ui32OSid));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* For now, limit support to single device setups */
+	psDevNode = psPVRSRVData->psDeviceNodeList;
+	psDevInfo = psDevNode->pvDevice;
+
+	eError = RGXFWSetFwOsState(psDevInfo, ui32OSid, RGXFWIF_OS_OFFLINE);
+	if (eError == PVRSRV_OK)
+	{
+		psPVRSRVData->abVmOnline[ui32OSid] = IMG_FALSE;
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR
+SysVzPvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	psDeviceNode = psPVRSRVData->psDeviceNodeList;
+	psDevInfo = psDeviceNode->pvDevice;
+
+	switch (eVMMParamType)
+	{
+#if defined(SUPPORT_RGX)
+		case VMM_CONF_PRIO_OSID0:
+		case VMM_CONF_PRIO_OSID1:
+		case VMM_CONF_PRIO_OSID2:
+		case VMM_CONF_PRIO_OSID3:
+		case VMM_CONF_PRIO_OSID4:
+		case VMM_CONF_PRIO_OSID5:
+		case VMM_CONF_PRIO_OSID6:
+		case VMM_CONF_PRIO_OSID7:
+	    {
+			IMG_UINT32 ui32OSid = eVMMParamType;
+			IMG_UINT32 ui32Prio = ui32ParamValue;
+
+			if (ui32OSid < RGXFW_NUM_OS)
+			{
+				eError = RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32Prio);
+			}
+			else
+			{
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+			}
+			break;
+		}
+		case VMM_CONF_ISOL_THRES:
+	    {
+			IMG_UINT32 ui32Threshold = ui32ParamValue;
+			eError = RGXFWSetOSIsolationThreshold(psDevInfo, ui32Threshold);
+			break;
+		}
+		case VMM_CONF_HCS_DEADLINE:
+		{
+			IMG_UINT32 ui32HCSDeadline = ui32ParamValue;
+			eError = RGXFWSetHCSDeadline(psDevInfo, ui32HCSDeadline);
+			break;
+		}
+#else
+	PVR_UNREFERENCED_PARAMETER(ui32ParamValue);
+#endif
+		default:
+		{
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+		}
+	}
+
+	return eError;
+}
+
+/******************************************************************************
+ End of file (vz_vmm_vm.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlclient.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlclient.c
new file mode 100644
index 0000000..b5d911d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlclient.c
@@ -0,0 +1,507 @@
+/*************************************************************************/ /*!
+@File			tlclient.c
+@Title          Services Transport Layer shared API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport layer common API used in both clients and server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* DESIGN NOTE
+ * This transport layer consumer-role API was created as a shared API when a
+ * client wanted to read the data of a TL stream from within the KM server
+ * driver. This was in addition to the existing clients supported externally
+ * by the UM client library component via PVR API layer.
+ * This shared API is thus used by the PVR TL API in the client library and
+ * by clients internal to the server driver module. It depends on
+ * client entry points of the TL and DEVMEM bridge modules. These entry points
+ * encapsulate from the TL shared API whether a direct bridge or an indirect
+ * (ioctl) bridge is used.
+ * One reason for needing this layer centres around the fact that some of the
+ * API functions make multiple bridge calls and the logic that glues these
+ * together is common regardless of client location. Further this layer has
+ * allowed the defensive coding that checks parameters to move into the PVR
+ * API layer where untrusted clients enter giving a more efficient KM code path.
+ */
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+
+#include "allocmem.h"
+#include "devicemem.h"
+
+#include "tlclient.h"
+#include "pvrsrv_tlcommon.h"
+#include "client_pvrtl_bridge.h"
+
+/* Defines/Constants
+ */
+
+#define NO_ACQUIRE             0xffffffffU
+
+/* User-side stream descriptor structure.
+ */
+typedef struct _TL_STREAM_DESC_
+{
+	/* Handle on kernel-side stream descriptor*/
+	IMG_HANDLE		hServerSD;
+
+	/* Stream data buffer variables */
+	DEVMEM_MEMDESC*			psUMmemDesc;
+	IMG_PBYTE				pBaseAddr;
+
+	/* Offset in bytes into the circular buffer and valid only after
+	 * an Acquire call and undefined after a release. */
+	IMG_UINT32 	uiReadOffset;
+
+	/* Always a positive integer when the Acquire call returns and a release
+	 * is outstanding. Undefined at all other times. */
+	IMG_UINT32	uiReadLen;
+
+	/* Flag indicating if the RESERVE_TOO_BIG error was already printed.
+	 * It's used to reduce number of errors in kernel log. */
+	IMG_BOOL bPrinted;
+} TL_STREAM_DESC, *PTL_STREAM_DESC;
+
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientOpenStream(SHARED_DEV_CONNECTION hDevConnection,
+		const IMG_CHAR* pszName,
+		IMG_UINT32   ui32Mode,
+		IMG_HANDLE*  phSD)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	TL_STREAM_DESC *psSD = NULL;
+	IMG_HANDLE hTLPMR;
+	IMG_HANDLE hTLImportHandle;
+	IMG_DEVMEM_SIZE_T uiImportSize;
+	IMG_UINT32 ui32MemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE;
+
+	PVR_ASSERT(hDevConnection);
+	PVR_ASSERT(pszName);
+	PVR_ASSERT(phSD);
+	*phSD = NULL;
+
+	/* Allocate memory for the stream descriptor object, initialise with
+	 * "no data read" yet. */
+	psSD = OSAllocZMem(sizeof(TL_STREAM_DESC));
+	if (psSD == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_DPF((PVR_DBG_ERROR, "BridgeTLOpenStream: KM returned %d", eError));
+		goto e0;
+	}
+	psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE;
+
+	/* Send open stream request to kernel server to get stream handle and
+	 * buffer cookie so we can get access to the buffer in this process. */
+	eError = BridgeTLOpenStream(GetBridgeHandle(hDevConnection), pszName, ui32Mode,
+										&psSD->hServerSD, &hTLPMR);
+	if (eError != PVRSRV_OK)
+	{
+		if ((ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT) &&
+			(eError == PVRSRV_ERROR_TIMEOUT))
+		{
+			goto e1;
+		}
+		PVR_LOGG_IF_ERROR(eError, "BridgeTLOpenStream", e1);
+	}
+
+	/* Convert server export cookie into a cookie for use by this client */
+	eError = DevmemMakeLocalImportHandle(hDevConnection,
+										hTLPMR, &hTLImportHandle);
+	PVR_LOGG_IF_ERROR(eError, "DevmemMakeLocalImportHandle", e2);
+
+	ui32MemFlags |= ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ?
+	        PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE : 0;
+	/* Now convert client cookie into a client handle on the buffer's
+	 * physical memory region */
+	eError = DevmemLocalImport(hDevConnection,
+	                           hTLImportHandle,
+	                           PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+	                           PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE,
+	                           &psSD->psUMmemDesc,
+	                           &uiImportSize,
+	                           "TLBuffer");
+	PVR_LOGG_IF_ERROR(eError, "DevmemImport", e3);
+
+	/* Now map the memory into the virtual address space of this process. */
+	eError = DevmemAcquireCpuVirtAddr(psSD->psUMmemDesc, (void **)
+															&psSD->pBaseAddr);
+	PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e4);
+
+	/* Ignore error, not much that can be done */
+	(void) DevmemUnmakeLocalImportHandle(hDevConnection,
+			hTLImportHandle);
+
+	/* Return client descriptor handle to caller */
+	*phSD = psSD;
+	return PVRSRV_OK;
+
+/* Clean up post buffer setup */
+e4:
+	DevmemFree(psSD->psUMmemDesc);
+e3:
+	(void) DevmemUnmakeLocalImportHandle(hDevConnection,
+				&hTLImportHandle);
+/* Clean up post stream open */
+e2:
+	BridgeTLCloseStream(GetBridgeHandle(hDevConnection), psSD->hServerSD);
+
+/* Cleanup post allocation of the descriptor object */
+e1:
+	OSFreeMem(psSD);
+
+e0:
+	return eError;
+}
+
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCloseStream(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_HANDLE hSD)
+{
+	PVRSRV_ERROR          eError = PVRSRV_OK;
+	TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+	PVR_ASSERT(hDevConnection);
+	PVR_ASSERT(hSD);
+
+	/* Check the caller provided connection is valid */
+	if (!psSD->hServerSD)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "TLClientCloseStream: descriptor already closed/not open"));
+		return PVRSRV_ERROR_HANDLE_NOT_FOUND;
+	}
+
+	/* Check if acquire is outstanding, perform release if it is, ignore result
+	 * as there is not much we can do if it is an error other than close */
+	if (psSD->uiReadLen != NO_ACQUIRE)
+	{
+		(void) BridgeTLReleaseData(GetBridgeHandle(hDevConnection), psSD->hServerSD,
+									psSD->uiReadOffset, psSD->uiReadLen);
+		psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE;
+	}
+
+	/* Clean up DevMem resources used for this stream in this client */
+	DevmemReleaseCpuVirtAddr(psSD->psUMmemDesc);
+
+	DevmemFree(psSD->psUMmemDesc);
+
+	/* Send close to server to clean up kernel mode resources for this
+	 * handle and release the memory. */
+	eError = BridgeTLCloseStream(GetBridgeHandle(hDevConnection), psSD->hServerSD);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "BridgeTLCloseStream: KM returned %d", eError));
+		/* Not much we can do with error, fall through to clean up
+		 * return eError; */
+	}
+
+	OSCachedMemSet(psSD, 0x00, sizeof(TL_STREAM_DESC));
+	OSFreeMem (psSD);
+
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientDiscoverStreams(SHARED_DEV_CONNECTION hDevConnection,
+		const IMG_CHAR *pszNamePattern,
+		IMG_CHAR aszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE],
+		IMG_UINT32 *pui32NumFound)
+{
+	PVR_ASSERT(hDevConnection);
+	PVR_ASSERT(pszNamePattern);
+	PVR_ASSERT(pui32NumFound);
+
+	return BridgeTLDiscoverStreams(GetBridgeHandle(hDevConnection),
+	                               pszNamePattern,
+	                               // we need to treat this as one dimensional
+	                               // array
+	                               *pui32NumFound * PRVSRVTL_MAX_STREAM_NAME_SIZE,
+	                               (IMG_CHAR *) aszStreams,
+	                               pui32NumFound);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_HANDLE hSD,
+		IMG_UINT8 **ppui8Data,
+		IMG_UINT32 ui32Size)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+	IMG_UINT32 ui32BufferOffset, ui32Dummy;
+
+	PVR_ASSERT(hDevConnection);
+	PVR_ASSERT(hSD);
+	PVR_ASSERT(ppui8Data);
+	PVR_ASSERT(ui32Size);
+
+	eError = BridgeTLReserveStream(GetBridgeHandle(hDevConnection), psSD->hServerSD,
+	                               &ui32BufferOffset, ui32Size, ui32Size,
+	                               &ui32Dummy);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	*ppui8Data = psSD->pBaseAddr + ui32BufferOffset;
+
+	return PVRSRV_OK;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream2(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_HANDLE hSD,
+		IMG_UINT8 **ppui8Data,
+		IMG_UINT32 ui32Size,
+		IMG_UINT32 ui32SizeMin,
+		IMG_UINT32 *pui32Available)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+	IMG_UINT32 ui32BufferOffset;
+
+	PVR_ASSERT(hDevConnection);
+	PVR_ASSERT(hSD);
+	PVR_ASSERT(ppui8Data);
+	PVR_ASSERT(ui32Size);
+
+	eError = BridgeTLReserveStream(GetBridgeHandle(hDevConnection), psSD->hServerSD,
+	                               &ui32BufferOffset, ui32Size, ui32SizeMin,
+	                               pui32Available);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	*ppui8Data = psSD->pBaseAddr + ui32BufferOffset;
+
+	return PVRSRV_OK;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCommitStream(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_HANDLE hSD,
+		IMG_UINT32 ui32Size)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+	PVR_ASSERT(hDevConnection);
+	PVR_ASSERT(hSD);
+	PVR_ASSERT(ui32Size);
+
+	eError = BridgeTLCommitStream(GetBridgeHandle(hDevConnection), psSD->hServerSD, ui32Size);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	return PVRSRV_OK;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientAcquireData(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_HANDLE  hSD,
+		IMG_PBYTE*  ppPacketBuf,
+		IMG_UINT32* pui32BufLen)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+	PVR_ASSERT(hDevConnection);
+	PVR_ASSERT(hSD);
+	PVR_ASSERT(ppPacketBuf);
+	PVR_ASSERT(pui32BufLen);
+
+	/* In case of non-blocking acquires, which can return no data, and
+	 * error paths ensure we clear the output parameters first. */
+	*ppPacketBuf = NULL;
+	*pui32BufLen = 0;
+
+	/* Check Acquire has not been called twice in a row without a release */
+	if (psSD->uiReadOffset != NO_ACQUIRE)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "TLClientAcquireData: acquire already outstanding, ReadOffset(%d), ReadLength(%d)", psSD->uiReadOffset, psSD->uiReadLen));
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	/* Ask the kernel server for the next chunk of data to read */
+	eError = BridgeTLAcquireData(GetBridgeHandle(hDevConnection), psSD->hServerSD,
+									&psSD->uiReadOffset, &psSD->uiReadLen);
+	if (eError != PVRSRV_OK)
+	{
+		/* Mask reporting of the errors seen under normal operation */
+		if ((eError != PVRSRV_ERROR_RESOURCE_UNAVAILABLE) &&
+			(eError != PVRSRV_ERROR_TIMEOUT) &&
+			(eError != PVRSRV_ERROR_STREAM_READLIMIT_REACHED))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "BridgeTLAcquireData: KM returned %d", eError));
+		}
+		psSD->uiReadOffset = psSD->uiReadLen = NO_ACQUIRE;
+		return eError;
+	}
+	/* else PVRSRV_OK */
+
+	/* Return the data offset and length to the caller if bytes are available
+	 * to be read. Could be zero for non-blocking mode so pass back cleared
+	 * values above */
+	if (psSD->uiReadLen)
+	{
+		*ppPacketBuf = psSD->pBaseAddr + psSD->uiReadOffset;
+		*pui32BufLen = psSD->uiReadLen;
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _TLClientReleaseDataLen(
+		SHARED_DEV_CONNECTION hDevConnection,
+		TL_STREAM_DESC* psSD,
+		IMG_UINT32 uiReadLen)
+{
+	PVRSRV_ERROR eError;
+
+	/* the previous acquire did not return any data, this is a no-operation */
+	if (psSD->uiReadLen == 0)
+	{
+		return PVRSRV_OK;
+	}
+
+	/* Check release has not been called twice in a row without an acquire */
+	if (psSD->uiReadOffset == NO_ACQUIRE)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "TLClientReleaseData_: no acquire to release"));
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	/* Inform the kernel to release the data from the buffer */
+	eError = BridgeTLReleaseData(GetBridgeHandle(hDevConnection), psSD->hServerSD,
+										psSD->uiReadOffset, uiReadLen);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "BridgeTLReleaseData: KM returned %d", eError));
+		/* Need to continue to keep client data consistent, fall through
+		 * return eError */
+	}
+
+	/* Reset state to indicate no outstanding acquire */
+	psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE;
+
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReleaseData(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_HANDLE hSD)
+{
+	TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+	PVR_ASSERT(hDevConnection);
+	PVR_ASSERT(hSD);
+
+	return _TLClientReleaseDataLen(hDevConnection, psSD, psSD->uiReadLen);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReleaseDataLess(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_HANDLE hSD, IMG_UINT32 uiActualReadLen)
+{
+	TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+	PVR_ASSERT(hDevConnection);
+	PVR_ASSERT(hSD);
+
+	/* Check the specified size is within the size returned by Acquire */
+	if (uiActualReadLen > psSD->uiReadLen)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "TLClientReleaseData_: no acquire to release"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return _TLClientReleaseDataLen(hDevConnection, psSD, uiActualReadLen);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientWriteData(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_HANDLE hSD,
+		IMG_UINT32 ui32Size,
+		IMG_BYTE *pui8Data)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+	PVR_ASSERT(hDevConnection);
+	PVR_ASSERT(hSD);
+	PVR_ASSERT(ui32Size);
+	PVR_ASSERT(pui8Data);
+
+	eError = BridgeTLWriteData(GetBridgeHandle(hDevConnection), psSD->hServerSD, ui32Size, pui8Data);
+	if (eError != PVRSRV_OK)
+	{
+		if (eError == PVRSRV_ERROR_STREAM_FULL)
+		{
+			if (!psSD->bPrinted)
+			{
+				psSD->bPrinted = IMG_TRUE;
+				PVR_DPF((PVR_DBG_ERROR, "Not enough space. Failed to write"
+				        " data to the stream (%d).", eError));
+			}
+		}
+		else if (eError == PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "TL packet size limit exceeded. "
+				"Failed to write data to the stream (%d).", eError));
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "TLClientWriteData: KM returned %d",
+			        eError));
+		}
+	}
+
+	return eError;
+}
+
+/******************************************************************************
+ End of file (tlclient.c)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlclient.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlclient.h
new file mode 100644
index 0000000..1f59104
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlclient.h
@@ -0,0 +1,256 @@
+/*************************************************************************/ /*!
+@File           tlclient.h
+@Title          Services Transport Layer shared API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport layer common API used in both clients and server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef TLCLIENT_H_
+#define TLCLIENT_H_
+
+
+#include "img_defs.h"
+#include "pvrsrv_tlcommon.h"
+#include "pvrsrv_error.h"
+
+
+/* This value is used for the hSrvHandle argument in the client API when
+ * called directly from the kernel which will lead to a direct bridge access.
+ */
+#define DIRECT_BRIDGE_HANDLE	((IMG_HANDLE)0xDEADBEEFU)
+
+
+/**************************************************************************/ /*!
+ @Function		TLClientOpenStream
+ @Description	Open a descriptor onto an existing kernel transport stream.
+ @Input			hDevConnection  Address of a pointer to a connection object
+ @Input			pszName			Address of the stream name string, no longer
+ 	 	 	 	 	 	 	 	than PRVSRVTL_MAX_STREAM_NAME_SIZE.
+ @Input			ui32Mode		Unused
+ @Output		phSD			Address of a pointer to an stream object
+ @Return 		PVRSRV_ERROR_NOT_FOUND:        when named stream not found
+ @Return		PVRSRV_ERROR_ALREADY_OPEN:     stream already open by another
+ @Return		PVRSRV_ERROR_STREAM_ERROR:     internal driver state error
+ @Return        PVRSRV_ERROR_TIMEOUT:          block timed out, stream not found
+ @Return		PVRSRV_ERROR:			       for other system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientOpenStream(SHARED_DEV_CONNECTION hDevConnection,
+		const IMG_CHAR* pszName,
+		IMG_UINT32   ui32Mode,
+		IMG_HANDLE*  phSD);
+
+
+/**************************************************************************/ /*!
+ @Function		TLClientCloseStream
+ @Description	Close and release the stream connection to Services kernel
+				server transport layer. Any outstanding Acquire will be
+				released.
+ @Input			hDevConnection  Address of a pointer to a connection object
+ @Input			hSD				Handle of the stream object to close
+ @Return		PVRSRV_ERROR_HANDLE_NOT_FOUND: when SD handle is not known
+ @Return		PVRSRV_ERROR_STREAM_ERROR: 	  internal driver state error
+ @Return		PVRSRV_ERROR:				  for system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCloseStream(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_HANDLE hSD);
+
+/**************************************************************************/ /*!
+ @Function      TLClientDiscoverStreams
+ @Description   Finds all streams that's name starts with pszNamePattern and
+                ends with a number.
+ @Input         hDevConnection  Address of a pointer to a connection object
+ @Input         pszNamePattern  Name pattern. Must be beginning of a string.
+ @Output        pui32Streams    Array of numbers from end of the discovered
+                names.
+ @inOut         pui32Count      When input max number of number that can fit
+                                into pui32Streams. When output number of
+                                discovered streams.
+ @Return		PVRSRV_ERROR    for system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientDiscoverStreams(SHARED_DEV_CONNECTION hDevConnection,
+		const IMG_CHAR *pszNamePattern,
+		IMG_CHAR aszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE],
+		IMG_UINT32 *pui32NumFound);
+
+/**************************************************************************/ /*!
+ @Function      TLClientReserveStream
+ @Description   Reserves a region with given size in the stream. If the stream
+                is already reserved the function will return an error.
+ @Input         hDevConnection  Address of a pointer to a connection object
+ @Input         hSD             Handle of the stream object to close
+ @Output        ppui8Data       pointer to the buffer
+ @Input         ui32Size        size of the data
+ @Return
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_HANDLE hSD,
+		IMG_UINT8 **ppui8Data,
+		IMG_UINT32 ui32Size);
+
+/**************************************************************************/ /*!
+ @Function      TLClientStreamReserve2
+ @Description   Reserves a region with given size in the stream. If the stream
+                is already reserved the function will return an error.
+ @Input         hDevConnection  Address of a pointer to a connection object
+ @Input         hSD             Handle of the stream object to close
+ @Output        ppui8Data       pointer to the buffer
+ @Input         ui32Size        size of the data
+ @Input         ui32SizeMin     minimum size of the data
+ @Input         ui32Available   available space in buffer
+ @Return
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream2(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_HANDLE hSD,
+		IMG_UINT8 **ppui8Data,
+		IMG_UINT32 ui32Size,
+		IMG_UINT32 ui32SizeMin,
+		IMG_UINT32 *pui32Available);
+
+/**************************************************************************/ /*!
+ @Function      TLClientStreamCommit
+ @Description   Commits previously reserved region in the stream and therefore
+                allows next reserves.
+                This function call has to be preceded by the call to
+                TLClientReserveStream or TLClientReserveStream2.
+ @Input         hDevConnection  Address of a pointer to a connection object
+ @Input         hSD             Handle of the stream object to close
+ @Input         ui32Size        Size of the data
+ @Return
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCommitStream(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_HANDLE hSD,
+		IMG_UINT32 ui32Size);
+
+/**************************************************************************/ /*!
+ @Function		TLClientAcquireData
+ @Description	When there is data available in the stream buffer this call
+ 	 	 	 	returns with the address and length of the data buffer the
+ 	 	 	 	client can safely read. This buffer may contain one or more
+ 	 	 	 	packets of data.
+ 	 	 	 	If no data is available then this call blocks until it becomes
+ 	 	 	 	available. However if the stream has been destroyed while
+ 	 	 	 	waiting then a resource unavailable error will be returned
+ 	 	 	 	to the caller. Clients must pair this call with a
+ 	 	 	 	ReleaseData call.
+ @Input			hDevConnection  Address of a pointer to a connection object
+ @Input			hSD				Handle of the stream object to read
+ @Output		ppPacketBuf		Address of a pointer to an byte buffer. On exit
+								pointer contains address of buffer to read from
+ @Output		puiBufLen		Pointer to an integer. On exit it is the size
+								of the data to read from the packet buffer
+ @Return		PVRSRV_ERROR_RESOURCE_UNAVAILABLE: when stream no longer exists
+ @Return		PVRSRV_ERROR_HANDLE_NOT_FOUND:     when SD handle not known
+ @Return		PVRSRV_ERROR_STREAM_ERROR: 	       internal driver state error
+ @Return		PVRSRV_ERROR_RETRY:				   release not called beforehand
+ @Return        PVRSRV_ERROR_TIMEOUT:              block timed out, no data
+ @Return		PVRSRV_ERROR:					   for other system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientAcquireData(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_HANDLE  hSD,
+		IMG_PBYTE*  ppPacketBuf,
+		IMG_UINT32* puiBufLen);
+
+
+/**************************************************************************/ /*!
+ @Function      TLClientReleaseData
+ @Description   Called after client has read the stream data out of the buffer
+                The data is subsequently flushed from the stream buffer to make
+                room for more data packets from the stream source.
+ @Input         hDevConnection  Address of a pointer to a connection object
+ @Input         hSD             Handle of the stream object to read
+ @Return        PVRSRV_ERROR_RESOURCE_UNAVAILABLE: when stream no longer exists
+ @Return        PVRSRV_ERROR_HANDLE_NOT_FOUND:   when SD handle not known to TL
+ @Return        PVRSRV_ERROR_STREAM_ERROR:       internal driver state error
+ @Return        PVRSRV_ERROR_RETRY:              acquire not called beforehand
+ @Return        PVRSRV_ERROR:                    for system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReleaseData(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_HANDLE hSD);
+
+/**************************************************************************/ /*!
+ @Function      TLClientReleaseDataLess
+ @Description   Called after client has read only some data out of the buffer
+                and wishes to complete the read early i.e. does not want to read
+                the full data that the acquire call returned e.g read just one
+                packet from the stream.
+                The data is subsequently flushed from the stream buffer to make
+                room for more data packets from the stream source.
+ @Input         hDevConnection  Address of a pointer to a connection object
+ @Input         hSD             Handle of the stream object to read
+ @Input         uiActualReadLen Size of data read, in bytes. Must be on a TL
+                                packet boundary.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS:     when read length too big
+ @Return        PVRSRV_ERROR_RESOURCE_UNAVAILABLE: when stream no longer exists
+ @Return        PVRSRV_ERROR_HANDLE_NOT_FOUND:   when SD handle not known to TL
+ @Return        PVRSRV_ERROR_STREAM_ERROR:       internal driver state error
+ @Return        PVRSRV_ERROR_RETRY:              acquire not called beforehand
+ @Return        PVRSRV_ERROR:                    for system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReleaseDataLess(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_HANDLE hSD, IMG_UINT32 uiActualReadLen);
+
+/**************************************************************************/ /*!
+ @Function      TLClientWriteData
+ @Description   Writes data to the stream.
+ @Input         hDevConnection  Address of a pointer to a connection object
+ @Input         hSD             Handle of the stream object to read
+ @Input         ui32Size        Size of the data
+ @Input         pui8Data        Pointer to data
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientWriteData(SHARED_DEV_CONNECTION hDevConnection,
+		IMG_HANDLE hSD,
+		IMG_UINT32 ui32Size,
+		IMG_BYTE *pui8Data);
+
+
+#endif /* TLCLIENT_H_ */
+
+/******************************************************************************
+ End of file (tlclient.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlintern.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlintern.c
new file mode 100644
index 0000000..9decf4a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlintern.c
@@ -0,0 +1,441 @@
+/*************************************************************************/ /*!
+@File
+@Title          Transport Layer kernel side API implementation.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport Layer functions available to driver components in
+                the driver.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+#include "pvr_debug.h"
+
+#include "allocmem.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+#include "devicemem.h"
+
+#include "pvrsrv_tlcommon.h"
+#include "tlintern.h"
+
+/*
+ * Make functions
+ */
+PTL_STREAM_DESC
+TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3)
+{
+	PTL_STREAM_DESC ps = OSAllocZMem(sizeof(TL_STREAM_DESC));
+	if (ps == NULL)
+	{
+		return NULL;
+	}
+	ps->psNode = f1;
+	ps->ui32Flags = f2;
+	ps->hReadEvent = f3;
+	ps->uiRefCount = 1;
+
+	if (f2 & PVRSRV_STREAM_FLAG_READ_LIMIT)
+	{
+		ps->ui32ReadLimit = f1->psStream->ui32Write;
+	}
+	return ps;
+}
+
+PTL_SNODE
+TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4)
+{
+	PTL_SNODE ps = OSAllocZMem(sizeof(TL_SNODE));
+	if (ps == NULL)
+	{
+		return NULL;
+	}
+	ps->hReadEventObj = f2;
+	ps->psStream = f3;
+	ps->psRDesc = f4;
+	f3->psNode = ps;
+	return ps;
+}
+
+/*
+ * Transport Layer Global top variables and functions
+ */
+static TL_GLOBAL_DATA sTLGlobalData;
+
+TL_GLOBAL_DATA *TLGGD(void)	// TLGetGlobalData()
+{
+	return &sTLGlobalData;
+}
+
+/* TLInit must only be called once at driver initialisation.
+ * An assert is provided to check this condition on debug builds.
+ */
+PVRSRV_ERROR
+TLInit(void)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT (sTLGlobalData.hTLGDLock == NULL && sTLGlobalData.hTLEventObj == NULL);
+
+	/* Allocate a lock for TL global data, to be used while updating the TL data.
+	 * This is for making TL global data muti-thread safe */
+	eError = OSLockCreate (&sTLGlobalData.hTLGDLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	/* Allocate the event object used to signal global TL events such as
+	 * new stream created */
+	eError = OSEventObjectCreate("TLGlobalEventObj", &sTLGlobalData.hTLEventObj);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	PVR_DPF_RETURN_OK;
+
+/* Don't allow the driver to start up on error */
+e1:
+	OSLockDestroy (sTLGlobalData.hTLGDLock);
+	sTLGlobalData.hTLGDLock = NULL;
+e0:
+	PVR_DPF_RETURN_RC (eError);
+}
+
+static void RemoveAndFreeStreamNode(PTL_SNODE psRemove)
+{
+	TL_GLOBAL_DATA*  psGD = TLGGD();
+	PTL_SNODE* 		 last;
+	PTL_SNODE 		 psn;
+	PVRSRV_ERROR     eError;
+
+	PVR_DPF_ENTERED;
+
+	/* Unlink the stream node from the master list */
+	PVR_ASSERT(psGD->psHead);
+	last = &psGD->psHead;
+	for (psn = psGD->psHead; psn; psn=psn->psNext)
+	{
+		if (psn == psRemove)
+		{
+			/* Other calling code may have freed and zeroed the pointers */
+			if (psn->psRDesc)
+			{
+				OSFreeMem(psn->psRDesc);
+				psn->psRDesc = NULL;
+			}
+			if (psn->psStream)
+			{
+				OSFreeMem(psn->psStream);
+				psn->psStream = NULL;
+			}
+			*last = psn->psNext;
+			break;
+		}
+		last = &psn->psNext;
+	}
+
+	/* Release the event list object owned by the stream node */
+	if (psRemove->hReadEventObj)
+	{
+		eError = OSEventObjectDestroy(psRemove->hReadEventObj);
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+
+		psRemove->hReadEventObj = NULL;
+	}
+
+	/* Release the memory of the stream node */
+	OSFreeMem(psRemove);
+
+	PVR_DPF_RETURN;
+}
+
+void
+TLDeInit(void)
+{
+	PVR_DPF_ENTERED;
+
+	if (sTLGlobalData.uiClientCnt)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "TLDeInit transport layer but %d client streams are still connected", sTLGlobalData.uiClientCnt));
+		sTLGlobalData.uiClientCnt = 0;
+	}
+
+	/* Clean up the SNODE list */
+	if (sTLGlobalData.psHead)
+	{
+		while (sTLGlobalData.psHead)
+		{
+			RemoveAndFreeStreamNode(sTLGlobalData.psHead);
+		}
+		/* Leave psHead NULL on loop exit */
+	}
+
+	/* Clean up the TL global event object */
+	if (sTLGlobalData.hTLEventObj)
+	{
+		OSEventObjectDestroy(sTLGlobalData.hTLEventObj);
+		sTLGlobalData.hTLEventObj = NULL;
+	}
+
+	/* Destroy the TL global data lock */
+	if (sTLGlobalData.hTLGDLock)
+	{
+		OSLockDestroy (sTLGlobalData.hTLGDLock);
+		sTLGlobalData.hTLGDLock = NULL;
+	}
+
+	PVR_DPF_RETURN;
+}
+
+void TLAddStreamNode(PTL_SNODE psAdd)
+{
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psAdd);
+	psAdd->psNext = TLGGD()->psHead;
+	TLGGD()->psHead = psAdd;
+
+	PVR_DPF_RETURN;
+}
+
+PTL_SNODE TLFindStreamNodeByName(const IMG_CHAR *pszName)
+{
+	TL_GLOBAL_DATA*  psGD = TLGGD();
+	PTL_SNODE 		 psn;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(pszName);
+
+	for (psn = psGD->psHead; psn; psn=psn->psNext)
+	{
+		if (psn->psStream && OSStringCompare(psn->psStream->szName, pszName)==0)
+		{
+			PVR_DPF_RETURN_VAL(psn);
+		}
+	}
+
+	PVR_DPF_RETURN_VAL(NULL);
+}
+
+PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc)
+{
+	TL_GLOBAL_DATA*  psGD = TLGGD();
+	PTL_SNODE 		 psn;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psDesc);
+
+	for (psn = psGD->psHead; psn; psn=psn->psNext)
+	{
+		if (psn->psRDesc == psDesc || psn->psWDesc == psDesc)
+		{
+			PVR_DPF_RETURN_VAL(psn);
+		}
+	}
+	PVR_DPF_RETURN_VAL(NULL);
+}
+
+static inline IMG_BOOL IsDigit(IMG_CHAR c)
+{
+	return c >= '0' && c <= '9';
+}
+
+static inline IMG_BOOL ReadNumber(const IMG_CHAR *pszBuffer,
+                                  IMG_UINT32 *pui32Number)
+{
+	IMG_CHAR acTmp[11] = {0}; // max 10 digits
+	IMG_UINT32 ui32Result;
+	IMG_UINT i;
+
+	for (i = 0; i < sizeof(acTmp) - 1; i++)
+	{
+		if (!IsDigit(*pszBuffer))
+			break;
+		acTmp[i] = *pszBuffer++;
+	}
+
+	/* if there are no digits or there is something after the number */
+	if (i == 0 || *pszBuffer != '\0')
+		return IMG_FALSE;
+
+	if (OSStringToUINT32(acTmp, 10, &ui32Result) != PVRSRV_OK)
+		return IMG_FALSE;
+
+	*pui32Number = ui32Result;
+
+	return IMG_TRUE;
+}
+
+IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern,
+                          IMG_CHAR aaszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE],
+                          IMG_UINT32 ui32Max)
+{
+	TL_GLOBAL_DATA *psGD = TLGGD();
+	PTL_SNODE psn;
+	IMG_UINT32 ui32Count = 0;
+	size_t uiLen;
+
+	PVR_ASSERT(pszNamePattern);
+
+	if ((uiLen = OSStringLength(pszNamePattern)) == 0)
+		return 0;
+
+	for (psn = psGD->psHead; psn; psn = psn->psNext)
+	{
+		if (OSStringNCompare(pszNamePattern, psn->psStream->szName, uiLen) != 0)
+			continue;
+
+		/* If aaszStreams is NULL we only count how many string match
+		 * the given pattern. If it's a valid pointer we also return
+		 * the names. */
+		if (aaszStreams != NULL)
+		{
+			if (ui32Count >= ui32Max)
+				break;
+
+			/* all of names are shorter than MAX and null terminated */
+			OSStringNCopy(aaszStreams[ui32Count], psn->psStream->szName,
+			              PRVSRVTL_MAX_STREAM_NAME_SIZE);
+		}
+
+		ui32Count++;
+	}
+
+	return ui32Count;
+}
+
+PTL_SNODE TLFindAndGetStreamNodeByDesc(PTL_STREAM_DESC psDesc)
+{
+	PTL_SNODE psn;
+
+	PVR_DPF_ENTERED;
+
+	psn = TLFindStreamNodeByDesc(psDesc);
+	if (psn == NULL)
+		PVR_DPF_RETURN_VAL(NULL);
+
+	PVR_ASSERT(psDesc == psn->psWDesc);
+
+	psn->uiWRefCount++;
+	psDesc->uiRefCount++;
+
+	PVR_DPF_RETURN_VAL(psn);
+}
+
+void TLReturnStreamNode(PTL_SNODE psNode)
+{
+	psNode->uiWRefCount--;
+	psNode->psWDesc->uiRefCount--;
+
+	PVR_ASSERT(psNode->uiWRefCount > 0);
+	PVR_ASSERT(psNode->psWDesc->uiRefCount > 0);
+}
+
+IMG_BOOL TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove)
+{
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psRemove);
+
+	/* If there is a client connected to this stream, defer stream's deletion */
+	if (psRemove->psRDesc != NULL || psRemove->psWDesc != NULL)
+	{
+		PVR_DPF_RETURN_VAL (IMG_FALSE);
+	}
+
+	/* Remove stream from TL_GLOBAL_DATA's list and free stream node */
+	psRemove->psStream = NULL;
+	RemoveAndFreeStreamNode(psRemove);
+
+	PVR_DPF_RETURN_VAL (IMG_TRUE);
+}
+
+IMG_BOOL TLUnrefDescAndTryFreeStreamNode(PTL_SNODE psNodeToRemove,
+                                          PTL_STREAM_DESC psSD)
+{
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psNodeToRemove);
+	PVR_ASSERT(psSD);
+
+	/* Decrement reference count. For descriptor obtained by reader it must
+	 * reach 0 (only single reader allowed) and for descriptors obtained by
+	 * writers it must reach value greater or equal to 0 (multiple writers
+	 * model). */
+	psSD->uiRefCount--;
+
+	if (psSD == psNodeToRemove->psRDesc)
+	{
+		PVR_ASSERT(0 == psSD->uiRefCount);
+		/* Remove stream descriptor (i.e. stream reader context) */
+		psNodeToRemove->psRDesc = NULL;
+	}
+	else if (psSD == psNodeToRemove->psWDesc)
+	{
+		PVR_ASSERT(0 <= psSD->uiRefCount);
+
+		psNodeToRemove->uiWRefCount--;
+
+		/* Remove stream descriptor if reference == 0 */
+		if (0 == psSD->uiRefCount)
+		{
+			psNodeToRemove->psWDesc = NULL;
+		}
+	}
+
+	/* Do not Free Stream Node if there is a write reference (a producer
+	 * context) to the stream */
+	if (NULL != psNodeToRemove->psRDesc || NULL != psNodeToRemove->psWDesc ||
+	    0 != psNodeToRemove->uiWRefCount)
+	{
+		PVR_DPF_RETURN_VAL (IMG_FALSE);
+	}
+
+	/* Make stream pointer NULL to prevent it from being destroyed in
+	 * RemoveAndFreeStreamNode Cleanup of stream should be done by the calling
+	 * context */
+	psNodeToRemove->psStream = NULL;
+	RemoveAndFreeStreamNode(psNodeToRemove);
+
+	PVR_DPF_RETURN_VAL (IMG_TRUE);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlintern.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlintern.h
new file mode 100644
index 0000000..28ff275
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlintern.h
@@ -0,0 +1,347 @@
+/*************************************************************************/ /*!
+@File
+@Title          Transport Layer internals
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport Layer header used by TL internally
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef TLINTERN_H
+#define TLINTERN_H
+
+
+#include "devicemem_typedefs.h"
+#include "pvrsrv_tlcommon.h"
+#include "lock.h"
+#include "tlstream.h"
+
+/* Forward declarations */
+typedef struct _TL_SNODE_* PTL_SNODE;
+
+/* To debug buffer utilisation enable this macro here and define
+ * PVRSRV_NEED_PVR_TRACE in the server pvr_debug.c and in tutils.c
+ * before the inclusion of pvr_debug.h.
+ * Issue pvrtutils 6 on target to see stream buffer utilisation. */
+//#define TL_BUFFER_STATS 1
+
+/*! TL stream structure container.
+ *    pbyBuffer   holds the circular buffer.
+ *    ui32Read    points to the beginning of the buffer, ie to where data to
+ *                  Read begin.
+ *    ui32Write   points to the end of data that have been committed, ie this is
+ *                  where new data will be written.
+ *    ui32Pending number of bytes reserved in last reserve call which have not
+ *                  yet been submitted. Therefore these data are not ready to
+ *                  be transported.
+ *    hStreamWLock - provides atomic protection for the ui32Pending & ui32Write
+ *                   members of the structure for when they are checked and/or
+ *                   updated in the context of a stream writer (producer)
+ *                   calling DoTLStreamReserve() & TLStreamCommit().
+ *                 - Reader context is not multi-threaded, only one client per
+ *                   stream is allowed. Also note the read context may be in an
+ *                   ISR which prevents a design where locks can be held in the
+ *                   AcquireData/ReleaseData() calls. Thus this lock only
+ *                   protects the stream members from simultaneous writers.
+ *
+ *      ui32Read < ui32Write <= ui32Pending
+ *        where < and <= operators are overloaded to make sense in a circular way.
+ */
+typedef struct _TL_STREAM_
+{
+	IMG_CHAR                szName[PRVSRVTL_MAX_STREAM_NAME_SIZE];  /*!< String name identifier */
+	PVRSRV_DEVICE_NODE      *psDevNode;                             /*!< Underlying device on which the stream's buffer is allocated */
+	TL_OPMODE               eOpMode;                                /*!< Mode of Operation of TL Buffer */
+
+	IMG_BOOL                bWaitForEmptyOnDestroy;                 /*!< Flag: On destroying a non-empty stream block until
+                                                                         *         stream is drained. */
+	IMG_BOOL                bNoSignalOnCommit;                      /*!< Flag: Used to avoid the TL signalling waiting consumers
+                                                                         *         that new data is available on every commit. Producers
+                                                                         *         using this flag will need to manually signal when
+                                                                         *         appropriate using the TLStreamSync() API */
+
+	void                    (*pfOnReaderOpenCallback)(void *);      /*!< Optional on reader connect callback */
+	void                    *pvOnReaderOpenUserData;                /*!< On reader connect user data */
+	void                    (*pfProducerCallback)(void);            /*!< Optional producer callback of type TL_STREAM_SOURCECB */
+	void                    *pvProducerUserData;                    /*!< Producer callback user data */
+
+	struct _TL_STREAM_      *psNotifStream;                         /*!< Pointer to the stream to which notification will be sent */
+
+	volatile IMG_UINT32     ui32Read;                               /*!< Pointer to the beginning of available data */
+	volatile IMG_UINT32     ui32Write;                              /*!< Pointer to already committed data which are ready to be
+                                                                         *   copied to user space */
+	IMG_UINT32              ui32Pending;                            /*!< Count pending bytes reserved in buffer */
+	IMG_UINT32              ui32Size;                               /*!< Buffer size */
+	IMG_UINT32              ui32ThresholdUsageForSignal;            /*!< Buffer usage threshold at which a TL writer signals a blocked/
+	                                                                     *    waiting reader when transitioning from empty->non-empty */
+	IMG_UINT32              ui32MaxPacketSize;                      /*! Max TL packet size */
+	IMG_BYTE                *pbyBuffer;                             /*!< Actual data buffer */
+
+	PTL_SNODE               psNode;                                 /*!< Ptr to parent stream node */
+	DEVMEM_MEMDESC          *psStreamMemDesc;                       /*!< MemDescriptor used to allocate buffer space through PMR */
+
+	IMG_HANDLE              hProducerEvent;	                        /*!< Handle to wait on if there is not enough space */
+	IMG_HANDLE              hProducerEventObj;                      /*!< Handle to signal blocked reserve calls */
+	IMG_BOOL                bSignalPending;                         /*!< Tracks if a "signal" is pending to be sent to a blocked/
+	                                                                     *    waiting reader */
+
+	POS_LOCK                hStreamWLock;                           /*!< Writers Lock for ui32Pending & ui32Write*/
+	POS_LOCK                hReadLock;                              /*!< Readers Lock for bReadPending & ui32Read*/
+	IMG_BOOL                bReadPending;                           /*!< Tracks if a read operation is pending or not*/
+	IMG_BOOL                bNoWrapPermanent;                       /*!< Flag: Prevents buffer wrap and subsequent data loss
+	                                                                     *    as well as resetting the read position on close. */
+
+#if defined(TL_BUFFER_STATS)
+	IMG_UINT32              ui32CntReadFails;                       /*!< Tracks how many times reader failed to acquire read lock */
+	IMG_UINT32              ui32CntReadSuccesses;                   /*!< Tracks how many times reader acquires read lock successfully */
+	IMG_UINT32              ui32CntWriteSuccesses;                  /*!< Tracks how many times writer acquires read lock successfully */
+	IMG_UINT32              ui32CntWriteWaits;                      /*!< Tracks how many times writer had to wait to acquire read lock */
+	IMG_UINT32              ui32CntNumWriteSuccess;	                /*!< Tracks how many write operations were successful*/
+	IMG_UINT32              ui32BufferUt;                           /*!< Buffer utilisation high watermark, see TL_BUFFER_STATS above */
+	IMG_UINT32              ui32MaxReserveWatermark;                /*!< Max stream reserve size that was ever requested by a writer */
+	IMG_UINT32              ui32SignalsSent;                        /*!< Number of signals that were actually sent by the write API */
+	ATOMIC_T                bNoReaderSinceFirstReserve;             /*!< Tracks if a read has been done since the buffer was last found empty */
+	IMG_UINT32              ui32TimeStart;                          /*!< Time at which a write (Reserve call) was done into an empty buffer.
+	                                                                     *    Guarded by hStreamWLock. */
+	IMG_UINT32              ui32MinTimeToFullInUs;                  /*!< Minimum time taken to (nearly) fully fill an empty buffer. Guarded
+	                                                                     *    by hStreamWLock. */
+	/* Behaviour counters, protected by hStreamLock in case of
+	 * multi-threaded access */
+	IMG_UINT32              ui32NumCommits;     /*!< Counters used to analysing stream performance, see ++ loc */
+	IMG_UINT32              ui32SignalNotSent;  /*!< Counters used to analysing stream performance, see ++ loc */
+	IMG_UINT32              ui32ManSyncs;       /*!< Counters used to analysing stream performance, see ++ loc */
+	IMG_UINT32              ui32ProducerByteCount; /*!< Counters used to analysing stream performance, see ++ loc */
+
+	/* Not protected by the lock, inc in the reader thread which is currently singular */
+	IMG_UINT32              ui32AcquireRead1;   /*!< Counters used to analysing stream performance, see ++ loc */
+	IMG_UINT32              ui32AcquireRead2;   /*!< Counters used to analysing stream performance, see ++ loc */
+#endif
+
+} TL_STREAM, *PTL_STREAM;
+
+/* there need to be enough space reserved in the buffer for 2 minimal packets
+ * and it needs to be aligned the same way the buffer is or there will be a
+ * compile error.*/
+#define BUFFER_RESERVED_SPACE (2 * PVRSRVTL_PACKET_ALIGNMENT)
+
+/* ensure the space reserved follows the buffer's alignment */
+static_assert(!(BUFFER_RESERVED_SPACE&(PVRSRVTL_PACKET_ALIGNMENT-1)),
+			  "BUFFER_RESERVED_SPACE must be a multiple of PVRSRVTL_PACKET_ALIGNMENT");
+
+/* Define the largest value that a uint that matches the
+ * PVRSRVTL_PACKET_ALIGNMENT size can hold */
+#define MAX_UINT 0xffffFFFF
+
+/*! Defines the value used for TL_STREAM.ui32Pending when no reserve is
+ * outstanding on the stream. */
+#define NOTHING_PENDING IMG_UINT32_MAX
+
+
+/*
+ * Transport Layer Stream Descriptor types/defs
+ */
+typedef struct _TL_STREAM_DESC_
+{
+	PTL_SNODE   psNode;         /*!< Ptr to parent stream node */
+	IMG_UINT32  ui32Flags;      /*!< Flags supplied by client on stream open */
+	IMG_HANDLE  hReadEvent;     /*!< For wait call (only used/set in reader descriptors) */
+	IMG_INT     uiRefCount;     /*!< Reference count to the SD */
+
+#if defined(TL_BUFFER_STATS)
+	/* Behaviour counters, no multi-threading protection need as they are
+	 * incremented in a single thread due to only supporting one reader
+	 * at present */
+	IMG_UINT32  ui32AcquireCount;  /*!< Counters used to analysing stream performance, see ++ loc */
+	IMG_UINT32  ui32NoData;        /*!< Counters used to analysing stream performance, see ++ loc */
+	IMG_UINT32  ui32NoDataSleep;   /*!< Counters used to analysing stream performance, see ++ loc */
+	IMG_UINT32  ui32Signalled;     /*!< Counters used to analysing stream performance, see ++ loc */
+	IMG_UINT32  ui32TimeoutEmpty;  /*!< Counters used to analysing stream performance, see ++ loc */
+	IMG_UINT32  ui32TimeoutData;   /*!< Counters used to analysing stream performance, see ++ loc */
+#endif
+	IMG_UINT32  ui32ReadLimit;     /*!< Limit buffer reads to data present in the
+                                        buffer at the time of stream open. */
+	IMG_UINT32  ui32ReadLen;       /*!< Size of data returned by initial Acquire */
+} TL_STREAM_DESC, *PTL_STREAM_DESC;
+
+PTL_STREAM_DESC TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3);
+
+#define TL_STREAM_KM_FLAG_MASK	0xFFFF0000
+#define TL_STREAM_FLAG_TEST		0x10000000
+#define TL_STREAM_FLAG_WRAPREAD	0x00010000
+
+#define TL_STREAM_UM_FLAG_MASK	0x0000FFFF
+
+#if defined(TL_BUFFER_STATS)
+#	define TL_COUNTER_INC(a)    ((a)++)
+#	define TL_COUNTER_ADD(a,b)  ((a) += (b))
+#else
+#	define TL_COUNTER_INC(a)    (void)(0)
+#	define TL_COUNTER_ADD(a,b)  (void)(0)
+#endif
+/*
+ * Transport Layer stream list node
+ */
+typedef struct _TL_SNODE_
+{
+	struct _TL_SNODE_*	psNext;				/*!< Linked list next element */
+	IMG_HANDLE			hReadEventObj;		/*!< Readers 'wait for data' event */
+	PTL_STREAM			psStream;			/*!< TL Stream object */
+	IMG_INT				uiWRefCount;		/*!< Stream writer reference count */
+	PTL_STREAM_DESC		psRDesc;			/*!< Stream reader 0 or ptr only */
+	PTL_STREAM_DESC		psWDesc;			/*!< Stream writer 0 or ptr only */
+} TL_SNODE;
+
+PTL_SNODE TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4);
+
+/*
+ * Transport Layer global top types and variables
+ * Use access function to obtain pointer.
+ *
+ * hTLGDLock - provides atomicity over read/check/write operations and
+ *             sequence of operations on uiClientCnt, psHead list of SNODEs and
+ *             the immediate members in a list element SNODE structure.
+ *           - This larger scope of responsibility for this lock helps avoid
+ *             the need for a lock in the SNODE structure.
+ *           - Lock held in the client (reader) context when streams are
+ *             opened/closed and in the server (writer) context when streams
+ *             are created/open/closed.
+ */
+typedef struct _TL_GDATA_
+{
+	IMG_HANDLE hTLEventObj;        /* Global TL signal object, new streams, etc */
+
+	IMG_UINT   uiClientCnt;        /* Counter to track the number of client stream connections. */
+	PTL_SNODE  psHead;             /* List of TL streams and associated client handle */
+
+	POS_LOCK   hTLGDLock;          /* Lock for structure AND psHead SNODE list */
+} TL_GLOBAL_DATA, *PTL_GLOBAL_DATA;
+
+/*
+ * Transport Layer Internal Kernel-Mode Server API
+ */
+TL_GLOBAL_DATA* TLGGD(void);		/* TLGetGlobalData() */
+
+PVRSRV_ERROR TLInit(void);
+void TLDeInit(void);
+
+void TLAddStreamNode(PTL_SNODE psAdd);
+PTL_SNODE TLFindStreamNodeByName(const IMG_CHAR *pszName);
+PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc);
+IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern,
+                          IMG_CHAR aaszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE],
+                          IMG_UINT32 ui32Max);
+PTL_SNODE TLFindAndGetStreamNodeByDesc(PTL_STREAM_DESC psDesc);
+void TLReturnStreamNode(PTL_SNODE psNode);
+
+/****************************************************************************************
+ Function Name	: TLTryRemoveStreamAndFreeStreamNode
+
+ Inputs		: PTL_SNODE	Pointer to the TL_SNODE whose stream is requested
+			to be removed from TL_GLOBAL_DATA's list
+
+ Return Value	: IMG_TRUE	-	If the stream was made NULL and this
+					TL_SNODE was removed from the
+					TL_GLOBAL_DATA's list
+
+		  IMG_FALSE	-	If the stream wasn't made NULL as there
+					is a client connected to this stream
+
+ Description	: If there is no client currently connected to this stream then,
+			This function removes this TL_SNODE from the
+			TL_GLOBAL_DATA's list. The caller is responsible for the
+			cleanup of the TL_STREAM whose TL_SNODE may be removed
+
+		  Otherwise, this function does nothing
+*****************************************************************************************/
+IMG_BOOL  TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove);
+
+/*****************************************************************************************
+ Function Name	: TLUnrefDescAndTryFreeStreamNode
+
+ Inputs		: PTL_SNODE	Pointer to the TL_SNODE whose descriptor is
+			requested to be removed
+			: PTL_STREAM_DESC	Pointer to the STREAM_DESC
+
+ Return Value	: IMG_TRUE	-	If this	TL_SNODE was removed from the
+					TL_GLOBAL_DATA's list
+
+		  IMG_FALSE	-	Otherwise
+
+ Description	: This function removes the stream descriptor from this TL_SNODE
+			and, if there is no writer (producer context) currently bound to this
+			stream, this function removes this TL_SNODE from the TL_GLOBAL_DATA's
+			list. The caller is responsible for the cleanup of the TL_STREAM
+			whose TL_SNODE may be removed
+******************************************************************************************/
+IMG_BOOL TLUnrefDescAndTryFreeStreamNode(PTL_SNODE psRemove, PTL_STREAM_DESC psSD);
+
+/*
+ * Transport Layer stream interface to server part declared here to avoid
+ * circular dependency.
+ */
+IMG_UINT32 TLStreamAcquireReadPos(PTL_STREAM psStream,
+                                  IMG_BOOL bDisableCallback,
+                                  IMG_UINT32* puiReadOffset);
+PVRSRV_ERROR TLStreamAdvanceReadPos(PTL_STREAM psStream,
+                                    IMG_UINT32 uiReadLen,
+                                    IMG_UINT32 uiOrigReadLen);
+void TLStreamResetReadPos(PTL_STREAM psStream);
+
+DEVMEM_MEMDESC* TLStreamGetBufferPointer(PTL_STREAM psStream);
+IMG_BOOL TLStreamOutOfData(IMG_HANDLE psStream);
+
+/****************************************************************************************
+ Function Name	: TLStreamDestroy
+
+ Inputs		: PTL_STREAM	Pointer to the TL_STREAM to be destroyed
+
+ Description	: This function performs all the clean-up operations required for
+			destruction of this stream
+*****************************************************************************************/
+void TLStreamDestroy(PTL_STREAM psStream);
+
+/*
+ * Test related functions
+ */
+PVRSRV_ERROR TUtilsInit(PVRSRV_DEVICE_NODE *psDeviceNode);
+PVRSRV_ERROR TUtilsDeinit(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#endif /* TLINTERN_H */
+/******************************************************************************
+ End of file (tlintern.h)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlserver.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlserver.c
new file mode 100644
index 0000000..70d2ef6
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlserver.c
@@ -0,0 +1,756 @@
+/*************************************************************************/ /*!
+@File
+@Title          KM server Transport Layer implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main bridge APIs for Transport Layer client functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+
+/*#define PVR_DPF_FUNCTION_TRACE_ON 1*/
+#undef PVR_DPF_FUNCTION_TRACE_ON
+#include "pvr_debug.h"
+
+#include "connection_server.h"
+#include "allocmem.h"
+#include "devicemem.h"
+
+#include "tlintern.h"
+#include "tlstream.h"
+#include "tlserver.h"
+
+#include "pvrsrv_tlstreams.h"
+#define NO_STREAM_WAIT_PERIOD_US 2000000ULL
+#define NO_DATA_WAIT_PERIOD_US    500000ULL
+#define NO_ACQUIRE               0xffffffffU
+
+
+/*
+ * Transport Layer Client API Kernel-Mode bridge implementation
+ */
+PVRSRV_ERROR
+TLServerOpenStreamKM(const IMG_CHAR*  	   pszName,
+				     IMG_UINT32 		   ui32Mode,
+			   	     PTL_STREAM_DESC* 	   ppsSD,
+				     PMR** 				   ppsTLPMR)
+{
+	PVRSRV_ERROR	eError = PVRSRV_OK;
+	PVRSRV_ERROR	eErrorEO = PVRSRV_OK;
+	PTL_SNODE		psNode;
+	PTL_STREAM		psStream;
+	TL_STREAM_DESC *psNewSD = NULL;
+	IMG_HANDLE		hEvent;
+	IMG_BOOL		bIsWriteOnly = ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ?
+	                               IMG_TRUE : IMG_FALSE;
+	IMG_BOOL		bResetOnOpen = ui32Mode & PVRSRV_STREAM_FLAG_RESET_ON_OPEN ?
+	                               IMG_TRUE : IMG_FALSE;
+	IMG_BOOL		bNoOpenCB    = ui32Mode & PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK ?
+	                               IMG_TRUE : IMG_FALSE;
+	PTL_GLOBAL_DATA psGD = TLGGD();
+
+#if defined(PVR_DPF_FUNCTION_TRACE_ON)
+    PVR_DPF((PVR_DBG_CALLTRACE, "--> %s:%d entered (%s, %x)", __func__, __LINE__, pszName, ui32Mode));
+#endif
+
+	PVR_ASSERT(pszName);
+
+	/* Acquire TL_GLOBAL_DATA lock here, as if the following TLFindStreamNodeByName
+	 * returns NON NULL PTL_SNODE, we try updating the global data client count and
+	 * PTL_SNODE's psRDesc and we want to make sure the TL_SNODE is valid (eg. has
+	 * not been deleted) while we are updating it
+	 */
+	OSLockAcquire (psGD->hTLGDLock);
+
+	psNode = TLFindStreamNodeByName(pszName);
+	if ((psNode == NULL) && (ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT))
+	{	/* Blocking code to wait for stream to be created if it does not exist */
+		eError = OSEventObjectOpen(psGD->hTLEventObj, &hEvent);
+		PVR_LOGG_IF_ERROR (eError, "OSEventObjectOpen", e0);
+
+		do
+		{
+			if ((psNode = TLFindStreamNodeByName(pszName)) == NULL)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE, "Stream %s does not exist, waiting...", pszName));
+
+				/* Release TL_GLOBAL_DATA lock before sleeping */
+				OSLockRelease (psGD->hTLGDLock);
+
+				/* Will exit OK or with timeout, both cases safe to ignore */
+				eErrorEO = OSEventObjectWaitTimeout(hEvent, NO_STREAM_WAIT_PERIOD_US);
+
+				/* Acquire lock after waking up */
+				OSLockAcquire (psGD->hTLGDLock);
+			}
+		}
+		while ((psNode == NULL) && (eErrorEO == PVRSRV_OK));
+
+		eError = OSEventObjectClose(hEvent);
+		PVR_LOGG_IF_ERROR (eError, "OSEventObjectClose", e0);
+	}
+
+	/* Make sure we have found a stream node after wait/search */
+	if (psNode == NULL)
+	{
+		/* Did we exit the wait with timeout, inform caller */
+		if (eErrorEO == PVRSRV_ERROR_TIMEOUT)
+		{
+			eError = eErrorEO;
+		}
+		else
+		{
+			eError = PVRSRV_ERROR_NOT_FOUND;
+			PVR_DPF((PVR_DBG_ERROR, "Stream \"%s\" does not exist", pszName));
+		}
+		goto e0;
+	}
+
+	psStream = psNode->psStream;
+
+	/* Allocate memory for the stream. The memory will be allocated with the
+	 * first call. */
+	eError = TLAllocSharedMemIfNull(psStream);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to allocate memory for stream"
+				" \"%s\"", pszName));
+		goto e0;
+	}
+
+	if (bIsWriteOnly)
+	{
+
+		/* If psWDesc == NULL it means that this is the first attempt
+		 * to open stream for write. If yes create the descriptor or increment
+		 * reference count otherwise. */
+		if (psNode->psWDesc == NULL)
+		{
+			psNewSD = TLMakeStreamDesc(psNode, ui32Mode, NULL);
+			psNode->psWDesc = psNewSD;
+		}
+		else
+		{
+			psNewSD = psNode->psWDesc;
+			psNode->psWDesc->uiRefCount++;
+		}
+
+		if (!psNewSD)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Not possible to make a new stream"
+			        " writer descriptor"));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+
+		psNode->uiWRefCount++;
+	}
+	else
+	{
+		/* Only one reader per stream supported */
+		if (psNode->psRDesc != NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Cannot open \"%s\" stream, stream already"
+			        " opened", pszName));
+			eError = PVRSRV_ERROR_ALREADY_OPEN;
+			goto e0;
+		}
+
+		/* Create an event handle for this client to wait on when no data in
+		 * stream buffer. */
+		eError = OSEventObjectOpen(psNode->hReadEventObj, &hEvent);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Not possible to open node's event object"));
+			eError = PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT;
+			goto e0;
+		}
+
+		psNewSD = TLMakeStreamDesc(psNode, ui32Mode, hEvent);
+		psNode->psRDesc = psNewSD;
+
+		if (!psNewSD)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Not possible to make a new stream descriptor"));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e1;
+		}
+
+		PVR_DPF((PVR_DBG_VERBOSE,
+		        "TLServerOpenStreamKM evList=%p, evObj=%p",
+		        psNode->hReadEventObj,
+		        psNode->psRDesc->hReadEvent));
+	}
+
+	/* Copy the import handle back to the user mode API to enable access to
+	 * the stream buffer from user-mode process. */
+	eError = DevmemLocalGetImportHandle(TLStreamGetBufferPointer(psStream),
+	                                    (void**) ppsTLPMR);
+	PVR_LOGG_IF_ERROR(eError, "DevmemLocalGetImportHandle", e2);
+
+	psGD->uiClientCnt++;
+
+	/* Global data updated. Now release global lock */
+	OSLockRelease (psGD->hTLGDLock);
+
+	*ppsSD = psNewSD;
+
+	if (bResetOnOpen)
+	{
+		TLStreamReset(psStream);
+	}
+
+	/* This callback is executed only on reader open. There are some actions
+	 * executed on reader open that don't make much sense for writers e.g.
+	 * injection on time synchronisation packet into the stream. */
+	if (!bIsWriteOnly && psStream->pfOnReaderOpenCallback != NULL && !bNoOpenCB)
+	{
+		psStream->pfOnReaderOpenCallback(psStream->pvOnReaderOpenUserData);
+	}
+
+	/* psNode->uiWRefCount is set to '1' on stream create so the first open
+	 * is '2'. */
+	if (bIsWriteOnly && psStream->psNotifStream != NULL &&
+	    psNode->uiWRefCount == 2)
+	{
+		TLStreamMarkStreamOpen(psStream);
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: Stream %s opened for %s", __func__, pszName,
+	        ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ? "write" : "read"));
+
+	PVR_DPF_RETURN_OK;
+
+e2:
+	OSFreeMem(psNewSD);
+e1:
+	if (!bIsWriteOnly)
+		OSEventObjectClose(hEvent);
+e0:
+	OSLockRelease (psGD->hTLGDLock);
+	PVR_DPF_RETURN_RC (eError);
+}
+
+PVRSRV_ERROR
+TLServerCloseStreamKM(PTL_STREAM_DESC psSD)
+{
+	PVRSRV_ERROR    eError = PVRSRV_OK;
+	PTL_GLOBAL_DATA psGD = TLGGD();
+	PTL_SNODE		psNode;
+	PTL_STREAM	psStream;
+	IMG_BOOL	bDestroyStream;
+	IMG_BOOL	bIsWriteOnly = psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO ?
+	                           IMG_TRUE : IMG_FALSE;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psSD);
+
+	/* Sanity check, quick exit if there are no streams */
+	if (psGD->psHead == NULL)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+	}
+
+	/* Check stream still valid */
+	psNode = TLFindStreamNodeByDesc(psSD);
+	if ((psNode == NULL) || (psNode != psSD->psNode))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+	}
+
+	/* Since the descriptor is valid, the stream should not have been made NULL */
+	PVR_ASSERT (psNode->psStream);
+
+	/* Save the stream's reference in-case its destruction is required after this
+	 * client is removed */
+	psStream = psNode->psStream;
+
+	/* Acquire TL_GLOBAL_DATA lock as the following TLRemoveDescAndTryFreeStreamNode
+	 * call will update the TL_SNODE's descriptor value */
+	OSLockAcquire (psGD->hTLGDLock);
+
+	/* Close event handle because event object list might be destroyed in
+	 * TLUnrefDescAndTryFreeStreamNode(). */
+	if (!bIsWriteOnly)
+	{
+		/* Reset the read position on close if the stream requires it. */
+		TLStreamResetReadPos(psStream);
+
+		/* Close and free the event handle resource used by this descriptor */
+		eError = OSEventObjectClose(psSD->hReadEvent);
+		if (eError != PVRSRV_OK)
+		{
+			/* Log error but continue as it seems best */
+			PVR_DPF((PVR_DBG_ERROR, "OSEventObjectClose() failed error %d",
+			        eError));
+			eError = PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+		}
+	}
+	else if (psNode->uiWRefCount == 2 && psStream->psNotifStream != NULL)
+	{
+		/* psNode->uiWRefCount is set to '1' on stream create so the last close
+		 * before destruction is '2'. */
+		TLStreamMarkStreamClose(psStream);
+	}
+
+	/* Remove descriptor from stream object/list */
+	bDestroyStream = TLUnrefDescAndTryFreeStreamNode (psNode, psSD);
+
+	/* Assert the counter is sane after input data validated. */
+	PVR_ASSERT(psGD->uiClientCnt > 0);
+	psGD->uiClientCnt--;
+
+	OSLockRelease (psGD->hTLGDLock);
+
+	/* Destroy the stream if its TL_SNODE was removed from TL_GLOBAL_DATA */
+	if (bDestroyStream)
+	{
+		TLStreamDestroy (psStream);
+		psStream = NULL;
+	}
+
+	PVR_DPF((PVR_DBG_VERBOSE, "%s: Stream closed", __func__));
+
+	/* Free the descriptor if ref count reaches 0. */
+	if (psSD->uiRefCount == 0)
+	{
+		/* Free the stream descriptor object */
+		OSFreeMem(psSD);
+	}
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLServerReserveStreamKM(PTL_STREAM_DESC psSD,
+                        IMG_UINT32* ui32BufferOffset,
+                        IMG_UINT32 ui32Size,
+                        IMG_UINT32 ui32SizeMin,
+                        IMG_UINT32* pui32Available)
+{
+	TL_GLOBAL_DATA* psGD = TLGGD();
+	PTL_SNODE psNode;
+	IMG_UINT8* pui8Buffer = NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psSD);
+
+	if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	/* Sanity check, quick exit if there are no streams */
+	if (psGD->psHead == NULL)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+	}
+
+	/* Acquire the global lock. We have to be sure that no one modifies
+	 * the list while we are looking for our stream. */
+	OSLockAcquire(psGD->hTLGDLock);
+	/* Check stream still valid */
+	psNode = TLFindAndGetStreamNodeByDesc(psSD);
+	OSLockRelease(psGD->hTLGDLock);
+
+	if ((psNode == NULL) || (psNode != psSD->psNode))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+	}
+
+
+	/* Since we have a valid stream descriptor, the stream should not have been
+	 * made NULL by any producer context. */
+	PVR_ASSERT (psNode->psStream);
+
+	eError = TLStreamReserve2(psNode->psStream, &pui8Buffer, ui32Size,
+	                          ui32SizeMin, pui32Available);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "Failed to reserve %u (%u, %u) bytes in the stream, error %s.", \
+				ui32Size, ui32SizeMin, *pui32Available, PVRSRVGETERRORSTRING(eError)));
+	}
+	else if (pui8Buffer == NULL)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "Not enough space in the stream."));
+		eError = PVRSRV_ERROR_STREAM_FULL;
+	}
+	else
+	{
+		*ui32BufferOffset = pui8Buffer - psNode->psStream->pbyBuffer;
+		PVR_ASSERT(*ui32BufferOffset < psNode->psStream->ui32Size);
+	}
+
+	OSLockAcquire(psGD->hTLGDLock);
+	TLReturnStreamNode(psNode);
+	OSLockRelease(psGD->hTLGDLock);
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLServerCommitStreamKM(PTL_STREAM_DESC psSD,
+                       IMG_UINT32 ui32Size)
+{
+	TL_GLOBAL_DATA*	psGD = TLGGD();
+	PTL_SNODE psNode;
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psSD);
+
+	if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	/* Sanity check, quick exit if there are no streams */
+	if (psGD->psHead == NULL)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+	}
+
+	/* Acquire the global lock. We have to be sure that no one modifies
+	 * the list while we are looking for our stream. */
+	OSLockAcquire(psGD->hTLGDLock);
+	/* Check stream still valid */
+	psNode = TLFindAndGetStreamNodeByDesc(psSD);
+	OSLockRelease(psGD->hTLGDLock);
+
+	if ((psNode == NULL) || (psNode != psSD->psNode))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+	}
+
+	/* Since we have a valid stream descriptor, the stream should not have been
+	 * made NULL by any producer context. */
+	PVR_ASSERT (psNode->psStream);
+
+	eError = TLStreamCommit(psNode->psStream, ui32Size);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to commit data into stream."));
+	}
+
+	OSLockAcquire(psGD->hTLGDLock);
+	TLReturnStreamNode(psNode);
+	OSLockRelease(psGD->hTLGDLock);
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLServerDiscoverStreamsKM(const IMG_CHAR *pszNamePattern,
+                          IMG_UINT32 ui32Size,
+                          IMG_CHAR *pszStreams,
+                          IMG_UINT32 *pui32NumFound)
+{
+	PTL_SNODE psNode = NULL;
+	IMG_CHAR (*paszStreams)[PRVSRVTL_MAX_STREAM_NAME_SIZE] =
+			(IMG_CHAR (*)[PRVSRVTL_MAX_STREAM_NAME_SIZE]) pszStreams;
+
+	if (*pszNamePattern == '\0')
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	if (ui32Size % PRVSRVTL_MAX_STREAM_NAME_SIZE != 0)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	/* Sanity check, quick exit if there are no streams */
+	if (TLGGD()->psHead == NULL)
+	{
+		*pui32NumFound = 0;
+		return PVRSRV_OK;
+	}
+
+	OSLockAcquire(TLGGD()->hTLGDLock);
+
+	*pui32NumFound = TLDiscoverStreamNodes(pszNamePattern, paszStreams,
+	                                  ui32Size / PRVSRVTL_MAX_STREAM_NAME_SIZE);
+
+	/* Find "tlctrl" stream and reset it */
+	psNode = TLFindStreamNodeByName(PVRSRV_TL_CTLR_STREAM);
+	if (psNode != NULL)
+		TLStreamReset(psNode->psStream);
+
+	OSLockRelease(TLGGD()->hTLGDLock);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+TLServerAcquireDataKM(PTL_STREAM_DESC psSD,
+		   	   		  IMG_UINT32*	  puiReadOffset,
+		   	   		  IMG_UINT32* 	  puiReadLen)
+{
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	TL_GLOBAL_DATA*		psGD = TLGGD();
+	IMG_UINT32		    uiTmpOffset;
+	IMG_UINT32  		uiTmpLen = 0;
+	PTL_SNODE			psNode;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psSD);
+
+	TL_COUNTER_INC(psSD->ui32AcquireCount);
+
+	/* Sanity check, quick exit if there are no streams */
+	if (psGD->psHead == NULL)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+	}
+
+	/* Check stream still valid */
+	psNode = TLFindStreamNodeByDesc(psSD);
+	if ((psNode == NULL) || (psNode != psSD->psNode))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+	}
+
+	/* If we are here, the stream will never be made NULL until this context itself
+	 * calls TLRemoveDescAndTryFreeStreamNode(). This is because the producer will
+	 * fail to make the stream NULL (by calling TLTryRemoveStreamAndFreeStreamNode)
+	 * when a valid stream descriptor is present (i.e. a client is connected).
+	 * Hence, no checks for stream being NON NULL are required after this. */
+	PVR_ASSERT (psNode->psStream);
+
+	psSD->ui32ReadLen = 0;	/* Handle NULL read returns */
+
+	do
+	{
+		uiTmpLen = TLStreamAcquireReadPos(psNode->psStream, psSD->ui32Flags & PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK, &uiTmpOffset);
+
+		/* Check we have not already exceeded read limit with just offset
+		 * regardless of data length to ensure the client sees the RC */
+		if (psSD->ui32Flags & PVRSRV_STREAM_FLAG_READ_LIMIT)
+		{
+			/* Check to see if we are reading beyond the read limit */
+			if (uiTmpOffset >= psSD->ui32ReadLimit)
+			{
+				PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_READLIMIT_REACHED);
+			}
+		}
+
+		if (uiTmpLen > 0)
+		{ /* Data found */
+
+			/* Check we have not already exceeded read limit offset+len */
+			if (psSD->ui32Flags & PVRSRV_STREAM_FLAG_READ_LIMIT)
+			{
+				/* Adjust the read length if it goes beyond the read limit
+				 * limit always guaranteed to be on packet  */
+				if ((uiTmpOffset + uiTmpLen) >= psSD->ui32ReadLimit)
+				{
+					uiTmpLen = psSD->ui32ReadLimit - uiTmpOffset;
+				}
+			}
+
+			*puiReadOffset = uiTmpOffset;
+			*puiReadLen = uiTmpLen;
+			psSD->ui32ReadLen = uiTmpLen;	/* Save the original data length in the stream desc */
+			PVR_DPF_RETURN_OK;
+		}
+		else if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING))
+		{ /* No data found blocking */
+
+			/* Instead of doing a complete sleep for `NO_DATA_WAIT_PERIOD_US` us, we sleep in chunks
+			 * of 168 ms. In a "deferred" signal scenario from writer, this gives us a chance to
+			 * wake-up (timeout) early and continue reading in-case some data is available */
+			IMG_UINT64 ui64WaitInChunksUs = MIN(NO_DATA_WAIT_PERIOD_US, 168000ULL);
+			IMG_BOOL bDataFound = IMG_FALSE;
+
+			TL_COUNTER_INC(psSD->ui32NoDataSleep);
+
+			LOOP_UNTIL_TIMEOUT(NO_DATA_WAIT_PERIOD_US)
+			{
+				eError = OSEventObjectWaitTimeout(psSD->hReadEvent, ui64WaitInChunksUs);
+				if (eError == PVRSRV_OK)
+				{
+					bDataFound = IMG_TRUE;
+					TL_COUNTER_INC(psSD->ui32Signalled);
+					break;
+				}
+				else if (eError == PVRSRV_ERROR_TIMEOUT)
+				{
+					if (TLStreamOutOfData(psNode->psStream))
+					{
+						/* Return on timeout if stream empty, else let while exit and return data */
+						continue;
+					}
+					else
+					{
+						bDataFound = IMG_TRUE;
+						TL_COUNTER_INC(psSD->ui32TimeoutData);
+						PVR_DPF((PVR_DBG_MESSAGE, "%s: Data found at timeout. Current BuffUt = %u",
+												 __func__, TLStreamGetUT(psNode->psStream)));
+						break;
+					}
+				}
+				else
+				{ /* Some other system error with event objects */
+					PVR_DPF_RETURN_RC(eError);
+				}
+			} END_LOOP_UNTIL_TIMEOUT();
+
+			if (bDataFound)
+			{
+				continue;
+			}
+			else
+			{
+				TL_COUNTER_INC(psSD->ui32TimeoutEmpty);
+				return PVRSRV_ERROR_TIMEOUT;
+			}
+		}
+		else
+		{ /* No data non-blocking */
+			TL_COUNTER_INC(psSD->ui32NoData);
+
+			/* When no-data in non-blocking mode, uiReadOffset should be set to NO_ACQUIRE
+			 * signifying there's no need of Release call */
+			*puiReadOffset = NO_ACQUIRE;
+			*puiReadLen = 0;
+			PVR_DPF_RETURN_OK;
+		}
+	}
+	while (1);
+}
+
+PVRSRV_ERROR
+TLServerReleaseDataKM(PTL_STREAM_DESC psSD,
+		 	 		  IMG_UINT32  	  uiReadOffset,
+		 	 		  IMG_UINT32  	  uiReadLen)
+{
+	TL_GLOBAL_DATA*		psGD = TLGGD();
+	PTL_SNODE			psNode;
+
+	PVR_DPF_ENTERED;
+
+	/* Unreferenced in release builds */
+	PVR_UNREFERENCED_PARAMETER(uiReadOffset);
+
+	PVR_ASSERT(psSD);
+
+	/* Sanity check, quick exit if there are no streams */
+	if (psGD->psHead == NULL)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+	}
+
+	if ((uiReadLen % PVRSRVTL_PACKET_ALIGNMENT != 0))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	/* Check stream still valid */
+	psNode = TLFindStreamNodeByDesc(psSD);
+	if ((psNode == NULL) || (psNode != psSD->psNode))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+	}
+
+	/* Since we have a valid stream descriptor, the stream should not have been
+	 * made NULL by any producer context. */
+	PVR_ASSERT (psNode->psStream);
+
+	PVR_DPF((PVR_DBG_VERBOSE, "TLReleaseDataKM uiReadOffset=%d, uiReadLen=%d", uiReadOffset, uiReadLen));
+
+	/* Move read position on to free up space in stream buffer */
+	PVR_DPF_RETURN_RC(TLStreamAdvanceReadPos(psNode->psStream, uiReadLen, psSD->ui32ReadLen));
+}
+
+PVRSRV_ERROR
+TLServerWriteDataKM(PTL_STREAM_DESC psSD,
+                    IMG_UINT32 ui32Size,
+                    IMG_BYTE* pui8Data)
+{
+	TL_GLOBAL_DATA* psGD = TLGGD();
+	PTL_SNODE psNode;
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psSD);
+
+	if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	/* Sanity check, quick exit if there are no streams */
+	if (psGD->psHead == NULL)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+	}
+
+	OSLockAcquire(psGD->hTLGDLock);
+	/* Check stream still valid */
+	psNode = TLFindAndGetStreamNodeByDesc(psSD);
+	OSLockRelease(psGD->hTLGDLock);
+
+	if ((psNode == NULL) || (psNode != psSD->psNode))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+	}
+
+	/* Since we have a valid stream descriptor, the stream should not have been
+	 * made NULL by any producer context. */
+	PVR_ASSERT (psNode->psStream);
+
+	eError = TLStreamWrite(psNode->psStream, pui8Data, ui32Size);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to write data to the stream (%d).",
+		        eError));
+	}
+
+	OSLockAcquire(psGD->hTLGDLock);
+	TLReturnStreamNode(psNode);
+	OSLockRelease(psGD->hTLGDLock);
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+/*****************************************************************************
+ End of file (tlserver.c)
+*****************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlserver.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlserver.h
new file mode 100644
index 0000000..8d2bd3b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlserver.h
@@ -0,0 +1,98 @@
+/*************************************************************************/ /*!
+@File
+@Title          KM server Transport Layer implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main bridge APIs for Transport Layer client functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __TLSERVER_H_
+#define __TLSERVER_H_
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+
+#include "tlintern.h"
+
+/*
+ * Transport Layer Client API Kernel-Mode bridge implementation
+ */
+
+PVRSRV_ERROR TLServerConnectKM(CONNECTION_DATA *psConnection);
+PVRSRV_ERROR TLServerDisconnectKM(CONNECTION_DATA *psConnection);
+
+PVRSRV_ERROR TLServerOpenStreamKM(const IMG_CHAR* pszName,
+			   IMG_UINT32 ui32Mode,
+			   PTL_STREAM_DESC* ppsSD,
+			   PMR** ppsTLPMR);
+
+PVRSRV_ERROR TLServerCloseStreamKM(PTL_STREAM_DESC psSD);
+
+PVRSRV_ERROR TLServerDiscoverStreamsKM(const IMG_CHAR *pszNamePattern,
+                          IMG_UINT32 ui32Max,
+                          IMG_CHAR *pszStreams,
+                          IMG_UINT32 *pui32NumFound);
+
+PVRSRV_ERROR TLServerReserveStreamKM(PTL_STREAM_DESC psSD,
+                                     IMG_UINT32* ui32BufferOffset,
+                                     IMG_UINT32 ui32Size,
+                                     IMG_UINT32 ui32SizeMin,
+                                     IMG_UINT32* pui32Available);
+
+PVRSRV_ERROR TLServerCommitStreamKM(PTL_STREAM_DESC psSD,
+                                    IMG_UINT32 ui32Size);
+
+PVRSRV_ERROR TLServerAcquireDataKM(PTL_STREAM_DESC psSD,
+			   IMG_UINT32* puiReadOffset,
+			   IMG_UINT32* puiReadLen);
+
+PVRSRV_ERROR TLServerReleaseDataKM(PTL_STREAM_DESC psSD,
+				 IMG_UINT32 uiReadOffset,
+				 IMG_UINT32 uiReadLen);
+
+PVRSRV_ERROR TLServerWriteDataKM(PTL_STREAM_DESC psSD,
+                                 IMG_UINT32 ui32Size,
+                                 IMG_BYTE *pui8Data);
+
+#endif /* __TLSERVER_H_ */
+
+/*****************************************************************************
+ End of file (tlserver.h)
+*****************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlstream.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlstream.c
new file mode 100644
index 0000000..ce81168
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlstream.c
@@ -0,0 +1,1634 @@
+/*************************************************************************/ /*!
+@File
+@Title          Transport Layer kernel side API implementation.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport Layer API implementation.
+                These functions are provided to driver components.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+#include "pvr_debug.h"
+
+#include "allocmem.h"
+#include "devicemem.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+#include "log2.h"
+
+#include "tlintern.h"
+#include "tlstream.h"
+
+#include "pvrsrv.h"
+
+#define EVENT_OBJECT_TIMEOUT_US 1000000ULL
+#define READ_PENDING_TIMEOUT_US 100000ULL
+
+/*! Compute maximum TL packet size for this stream. Max packet size will be
+ * minimum of PVRSRVTL_MAX_PACKET_SIZE and (BufferSize / 2.5). This computation
+ * is required to avoid a corner case that was observed when TL buffer size is
+ * smaller than twice of TL max packet size and read, write index are positioned
+ * in such a way that the TL packet (write packet + padding packet) size is may
+ * be bigger than the buffer size itself.
+ */
+#define GET_TL_MAX_PACKET_SIZE( bufSize ) PVRSRVTL_ALIGN( MIN( PVRSRVTL_MAX_PACKET_SIZE, ( 2 * bufSize ) / 5 ) )
+
+/* Given the state of the buffer it returns a number of bytes that the client
+ * can use for a successful allocation. */
+static INLINE IMG_UINT32 suggestAllocSize(IMG_UINT32 ui32LRead,
+                                          IMG_UINT32 ui32LWrite,
+                                          IMG_UINT32 ui32CBSize,
+                                          IMG_UINT32 ui32ReqSizeMin,
+                                          IMG_UINT32 ui32MaxPacketSize)
+{
+	IMG_UINT32 ui32AvSpace = 0;
+
+	/* This could be written in fewer lines using the ? operator but it
+		would not be kind to potential readers of this source at all. */
+	if (ui32LRead > ui32LWrite)                          /* Buffer WRAPPED */
+	{
+		if ((ui32LRead - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE))
+		{
+			ui32AvSpace =  ui32LRead - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE;
+		}
+	}
+	else                                                  /* Normal, no wrap */
+	{
+		if ((ui32CBSize - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE))
+		{
+			ui32AvSpace =  ui32CBSize - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE;
+		}
+		else if ((ui32LRead - 0) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE))
+		{
+			ui32AvSpace =  ui32LRead - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE;
+		}
+	}
+    /* The max size of a TL packet currently is UINT16. adjust accordingly */
+	return MIN(ui32AvSpace, ui32MaxPacketSize);
+}
+
+/* Returns bytes left in the buffer. Negative if there is not any.
+ * two 8b aligned values are reserved, one for the write failed buffer flag
+ * and one to be able to distinguish the buffer full state to the buffer
+ * empty state.
+ * Always returns free space -8 even when the "write failed" packet may be
+ * already in the stream before this write. */
+static INLINE IMG_INT
+circbufSpaceLeft(IMG_UINT32 ui32Read, IMG_UINT32 ui32Write, IMG_UINT32 ui32size)
+{
+	/* We need to reserve 8b (one packet) in the buffer to be able to tell empty
+	 * buffers from full buffers and one more for packet write fail packet */
+	if (ui32Read > ui32Write)
+	{
+		return (IMG_INT)ui32Read - (IMG_INT)ui32Write - (IMG_INT)BUFFER_RESERVED_SPACE;
+	}
+	else
+	{
+		return (IMG_INT)ui32size - ((IMG_INT)ui32Write - (IMG_INT)ui32Read) - (IMG_INT)BUFFER_RESERVED_SPACE;
+	}
+}
+
+IMG_UINT32 TLStreamGetUT(IMG_HANDLE hStream)
+{
+	PTL_STREAM psStream = (PTL_STREAM) hStream;
+	IMG_UINT32 ui32LRead = psStream->ui32Read, ui32LWrite = psStream->ui32Write;
+
+	if (ui32LWrite >= ui32LRead)
+	{
+		return (ui32LWrite-ui32LRead);
+	}
+	else
+	{
+		return (psStream->ui32Size-ui32LRead+ui32LWrite);
+	}
+}
+
+PVRSRV_ERROR TLAllocSharedMemIfNull(IMG_HANDLE hStream)
+{
+	PTL_STREAM psStream = (PTL_STREAM) hStream;
+	PVRSRV_ERROR eError;
+
+	/* CPU Local memory used as these buffers are not accessed by the device.
+	 * CPU Uncached write combine memory used to improve write performance,
+	 * memory barrier added in TLStreamCommit to ensure data written to memory
+	 * before CB write point is updated before consumption by the reader.
+	 */
+	IMG_CHAR pszBufferLabel[PRVSRVTL_MAX_STREAM_NAME_SIZE + 20];
+	DEVMEM_FLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+	                            PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+	                            PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+	                            PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+	                            PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+	                            PVRSRV_MEMALLOCFLAG_CPU_LOCAL;  // TL for now is only used by host driver, so cpulocal mem suffices
+
+	/* Exit if memory has already been allocated. */
+	if (psStream->pbyBuffer != NULL)
+		return PVRSRV_OK;
+
+	OSSNPrintf(pszBufferLabel, sizeof(pszBufferLabel), "TLStreamBuf-%s",
+	           psStream->szName);
+
+
+	/* Use HostMemDeviceNode instead of psStream->psDevNode to benefit from faster
+	 * accesses to CPU local memory. When the framework to access CPU_LOCAL device
+	 * memory from GPU is fixed, we'll switch back to use psStream->psDevNode for
+	 * TL buffers */
+	eError = DevmemAllocateExportable((IMG_HANDLE)PVRSRVGetPVRSRVData()->psHostMemDeviceNode,
+	                                  (IMG_DEVMEM_SIZE_T) psStream->ui32Size,
+	                                  (IMG_DEVMEM_ALIGN_T) OSGetPageSize(),
+	                                  ExactLog2(OSGetPageSize()),
+	                                  uiMemFlags,
+	                                  pszBufferLabel,
+	                                  &psStream->psStreamMemDesc);
+	PVR_LOGG_IF_ERROR(eError, "DevmemAllocateExportable", e0);
+
+	eError = DevmemAcquireCpuVirtAddr(psStream->psStreamMemDesc,
+	                                  (void**) &psStream->pbyBuffer);
+	PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e1);
+
+	return PVRSRV_OK;
+
+e1:
+	DevmemFree(psStream->psStreamMemDesc);
+e0:
+	return eError;
+}
+
+void TLFreeSharedMem(IMG_HANDLE hStream)
+{
+	PTL_STREAM psStream = (PTL_STREAM) hStream;
+
+	if (psStream->pbyBuffer != NULL)
+	{
+		DevmemReleaseCpuVirtAddr(psStream->psStreamMemDesc);
+		psStream->pbyBuffer = NULL;
+	}
+	if (psStream->psStreamMemDesc != NULL)
+	{
+		DevmemFree(psStream->psStreamMemDesc);
+		psStream->psStreamMemDesc = NULL;
+	}
+}
+
+/* Special space left routine for TL_FLAG_PERMANENT_NO_WRAP streams */
+static INLINE IMG_UINT
+bufSpaceLeft(IMG_UINT32 ui32Read, IMG_UINT32 ui32Write, IMG_UINT32 ui32size)
+{
+	 /* buffers from full buffers and one more for packet write fail packet */
+	PVR_ASSERT(ui32Read<=ui32Write);
+	return ui32size - ui32Write;
+}
+
+/*******************************************************************************
+ * TL Server public API implementation.
+ ******************************************************************************/
+PVRSRV_ERROR
+TLStreamCreate(IMG_HANDLE *phStream,
+			   PVRSRV_DEVICE_NODE *psDevNode,
+			   const IMG_CHAR *szStreamName,
+			   IMG_UINT32 ui32Size,
+			   IMG_UINT32 ui32StreamFlags,
+               TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB,
+               void *pvOnReaderOpenUD,
+               TL_STREAM_SOURCECB pfProducerCB,
+               void *pvProducerUD)
+{
+	PTL_STREAM     psTmp;
+	PVRSRV_ERROR   eError;
+	IMG_HANDLE     hEventList;
+	PTL_SNODE      psn;
+	TL_OPMODE      eOpMode;
+
+	PVR_DPF_ENTERED;
+	/* Sanity checks:  */
+	/* non NULL handler required */
+	if (NULL == phStream)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+	if (szStreamName == NULL || *szStreamName == '\0' ||
+	    OSStringLength(szStreamName) >= PRVSRVTL_MAX_STREAM_NAME_SIZE)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+	if (NULL == psDevNode)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	eOpMode = ui32StreamFlags & TL_OPMODE_MASK;
+	if (( eOpMode <= TL_OPMODE_UNDEF ) || ( eOpMode >= TL_OPMODE_LAST ))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OpMode for TL stream is invalid"));
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	/* Acquire TL_GLOBAL_DATA lock here because, if the following TLFindStreamNodeByName()
+	 * returns NULL, a new TL_SNODE will be added to TL_GLOBAL_DATA's TL_SNODE list */
+	OSLockAcquire (TLGGD()->hTLGDLock);
+
+	/* Check if there already exists a stream with this name. */
+	psn = TLFindStreamNodeByName( szStreamName );
+	if (NULL != psn)
+	{
+		eError = PVRSRV_ERROR_ALREADY_EXISTS;
+		goto e0;
+	}
+
+	/* Allocate stream structure container (stream struct) for the new stream */
+	psTmp = OSAllocZMem(sizeof(TL_STREAM));
+	if (NULL == psTmp)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	OSStringCopy(psTmp->szName, szStreamName);
+
+	if (ui32StreamFlags & TL_FLAG_FORCE_FLUSH)
+	{
+		psTmp->bWaitForEmptyOnDestroy = IMG_TRUE;
+	}
+
+	psTmp->bNoSignalOnCommit = (ui32StreamFlags&TL_FLAG_NO_SIGNAL_ON_COMMIT) ?  IMG_TRUE : IMG_FALSE;
+	psTmp->bNoWrapPermanent = (ui32StreamFlags&TL_FLAG_PERMANENT_NO_WRAP) ?  IMG_TRUE : IMG_FALSE;
+
+	psTmp->eOpMode = eOpMode;
+	if (psTmp->eOpMode == TL_OPMODE_BLOCK)
+	{
+		/* Only allow drop properties to be mixed with no-wrap type streams
+		 * since space does not become available when reads take place hence
+		 * no point blocking.
+		 */
+		if (psTmp->bNoWrapPermanent)
+		{
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto e1;
+		}
+	}
+
+	/* Additional synchronisation object required for some streams e.g. blocking */
+	eError = OSEventObjectCreate(NULL, &psTmp->hProducerEventObj);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+	/* Create an event handle for this kind of stream */
+	eError = OSEventObjectOpen(psTmp->hProducerEventObj, &psTmp->hProducerEvent);
+	if (eError != PVRSRV_OK)
+	{
+		goto e2;
+	}
+
+	psTmp->pfOnReaderOpenCallback = pfOnReaderOpenCB;
+	psTmp->pvOnReaderOpenUserData = pvOnReaderOpenUD;
+	/* Remember producer supplied CB and data for later */
+	psTmp->pfProducerCallback = (void(*)(void))pfProducerCB;
+	psTmp->pvProducerUserData = pvProducerUD;
+
+	psTmp->psNotifStream = NULL;
+
+	/* Round the requested bytes to a multiple of array elements' size, eg round 3 to 4 */
+	psTmp->ui32Size = PVRSRVTL_ALIGN(ui32Size);
+
+	/* Signalling from TLStreamCommit is deferred until buffer is slightly (~12%) filled */
+	psTmp->ui32ThresholdUsageForSignal = psTmp->ui32Size >> 3;
+	psTmp->ui32MaxPacketSize = GET_TL_MAX_PACKET_SIZE(psTmp->ui32Size);
+	psTmp->ui32Read = 0;
+	psTmp->ui32Write = 0;
+	psTmp->ui32Pending = NOTHING_PENDING;
+	psTmp->psDevNode = psDevNode;
+	psTmp->bReadPending = IMG_FALSE;
+	psTmp->bSignalPending = IMG_FALSE;
+
+#if defined (TL_BUFFER_STATS)
+	OSAtomicWrite(&psTmp->bNoReaderSinceFirstReserve, 0);
+	/* Setting MAX possible value for "minimum" time to full,
+	 * helps in the logic which calculates this time */
+	psTmp->ui32MinTimeToFullInUs = IMG_UINT32_MAX;
+#endif
+
+	/* Memory will be allocated on first connect to the stream */
+	if (!(ui32StreamFlags & TL_FLAG_ALLOCATE_ON_FIRST_OPEN))
+	{
+		/* Allocate memory for the circular buffer and export it to user space. */
+		eError = TLAllocSharedMemIfNull(psTmp);
+		PVR_LOGG_IF_ERROR(eError, "TLAllocSharedMem", e3);
+	}
+
+	/* Synchronisation object to synchronise with user side data transfers. */
+	eError = OSEventObjectCreate(psTmp->szName, &hEventList);
+	if (eError != PVRSRV_OK)
+	{
+		goto e4;
+	}
+
+	eError = OSLockCreate (&psTmp->hStreamWLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto e5;
+	}
+
+	eError = OSLockCreate (&psTmp->hReadLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto e6;
+	}
+
+	/* Now remember the stream in the global TL structures */
+	psn = TLMakeSNode(hEventList, (TL_STREAM *)psTmp, NULL);
+	if (psn == NULL)
+	{
+		eError=PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e7;
+	}
+
+	/* Stream node created, now reset the write reference count to 1
+	 * (i.e. this context's reference) */
+	psn->uiWRefCount = 1;
+
+	TLAddStreamNode(psn);
+
+	/* Release TL_GLOBAL_DATA lock as the new TL_SNODE is now added to the list */
+	OSLockRelease (TLGGD()->hTLGDLock);
+
+	/* Best effort signal, client wait timeout will ultimately let it find the
+	 * new stream if this fails, acceptable to avoid clean-up as it is tricky
+	 * at this point */
+	(void) OSEventObjectSignal(TLGGD()->hTLEventObj);
+
+	/* Pass the newly created stream handle back to caller */
+	*phStream = (IMG_HANDLE)psTmp;
+	PVR_DPF_RETURN_OK;
+
+e7:
+	OSLockDestroy(psTmp->hReadLock);
+e6:
+	OSLockDestroy(psTmp->hStreamWLock);
+e5:
+	OSEventObjectDestroy(hEventList);
+e4:
+	TLFreeSharedMem(psTmp);
+e3:
+	OSEventObjectClose(psTmp->hProducerEvent);
+e2:
+	OSEventObjectDestroy(psTmp->hProducerEventObj);
+e1:
+	OSFreeMem(psTmp);
+e0:
+	OSLockRelease (TLGGD()->hTLGDLock);
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+void TLStreamReset(IMG_HANDLE hStream)
+{
+	PTL_STREAM psStream = (PTL_STREAM) hStream;
+
+	PVR_ASSERT(psStream != NULL);
+
+	OSLockAcquire(psStream->hStreamWLock);
+
+	while (psStream->ui32Pending != NOTHING_PENDING)
+	{
+		PVRSRV_ERROR eError;
+
+		/* We're in the middle of a write so we cannot reset the stream.
+		 * We are going to wait until the data is committed. Release lock while
+		 * we're here. */
+		OSLockRelease(psStream->hStreamWLock);
+
+		/* Event when psStream->bNoSignalOnCommit is set we can still use
+		 * the timeout capability of event object API (time in us). */
+		eError = OSEventObjectWaitTimeout(psStream->psNode->hReadEventObj, 100);
+		if (eError != PVRSRV_ERROR_TIMEOUT && eError != PVRSRV_OK)
+		{
+			PVR_LOGRN_IF_ERROR(eError, "OSEventObjectWaitTimeout");
+		}
+
+		OSLockAcquire(psStream->hStreamWLock);
+
+		/* Either timeout occurred or the stream has been signalled.
+		 * If former we have to check if the data was committed and if latter
+		 * if the stream hasn't been re-reserved. Either way we have to go
+		 * back to the condition.
+		 * If the stream has been released we'll exit with the lock held so
+		 * we can finally go and reset the stream. */
+	}
+
+	psStream->ui32Read = 0;
+	psStream->ui32Write = 0;
+	/* we know that ui32Pending already has correct value (no need to set) */
+
+	OSLockRelease(psStream->hStreamWLock);
+}
+
+PVRSRV_ERROR
+TLStreamSetNotifStream(IMG_HANDLE hStream, IMG_HANDLE hNotifStream)
+{
+	PTL_STREAM psStream = (PTL_STREAM) hStream;
+
+	if (hStream == NULL || hNotifStream == NULL)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	psStream->psNotifStream = (PTL_STREAM) hNotifStream;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+TLStreamReconfigure(
+		IMG_HANDLE hStream,
+		IMG_UINT32 ui32StreamFlags)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PTL_STREAM psTmp;
+	TL_OPMODE eOpMode;
+
+	PVR_DPF_ENTERED;
+
+	if (NULL == hStream)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	eOpMode = ui32StreamFlags & TL_OPMODE_MASK;
+	if (( eOpMode <= TL_OPMODE_UNDEF ) || ( eOpMode >= TL_OPMODE_LAST ))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OpMode for TL stream is invalid"));
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	psTmp = (PTL_STREAM)hStream;
+
+	/* Prevent the TL Stream buffer from being written to
+	 * while its mode is being reconfigured
+	 */
+	OSLockAcquire (psTmp->hStreamWLock);
+	if (NOTHING_PENDING != psTmp->ui32Pending)
+	{
+		OSLockRelease (psTmp->hStreamWLock);
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_READY);
+	}
+	psTmp->ui32Pending = 0;
+	OSLockRelease (psTmp->hStreamWLock);
+
+	psTmp->eOpMode = eOpMode;
+	if (psTmp->eOpMode == TL_OPMODE_BLOCK)
+	{
+		/* Only allow drop properties to be mixed with no-wrap type streams
+		 * since space does not become available when reads take place hence
+		 * no point blocking.
+		 */
+		if (psTmp->bNoWrapPermanent)
+		{
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto e1;
+		}
+	}
+
+	OSLockAcquire (psTmp->hStreamWLock);
+	psTmp->ui32Pending = NOTHING_PENDING;
+	OSLockRelease (psTmp->hStreamWLock);
+e1:
+	PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLStreamOpen(IMG_HANDLE     *phStream,
+             const IMG_CHAR *szStreamName)
+{
+ 	PTL_SNODE  psTmpSNode;
+
+	PVR_DPF_ENTERED;
+
+	if (NULL == phStream || NULL == szStreamName)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	/* Acquire the TL_GLOBAL_DATA lock first to ensure,
+	 * the TL_STREAM while returned and being modified,
+	 * is not deleted by some other context */
+	OSLockAcquire (TLGGD()->hTLGDLock);
+
+	/* Search for a stream node with a matching stream name */
+	psTmpSNode = TLFindStreamNodeByName(szStreamName);
+
+	if (NULL == psTmpSNode)
+	{
+		OSLockRelease (TLGGD()->hTLGDLock);
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_FOUND);
+	}
+
+	if (psTmpSNode->psStream->psNotifStream != NULL &&
+	    psTmpSNode->uiWRefCount == 1)
+	{
+		TLStreamMarkStreamOpen(psTmpSNode->psStream);
+	}
+
+	/* The TL_SNODE->uiWRefCount governs the presence of this node in the
+	 * TL_GLOBAL_DATA list i.e. when uiWRefCount falls to zero we try removing
+	 * this node from the TL_GLOBAL_DATA list. Hence, is protected using the
+	 * TL_GLOBAL_DATA lock and not TL_STREAM lock */
+	psTmpSNode->uiWRefCount++;
+
+	OSLockRelease (TLGGD()->hTLGDLock);
+
+	/* Return the stream handle to the caller */
+	*phStream = (IMG_HANDLE)psTmpSNode->psStream;
+
+	PVR_DPF_RETURN_VAL(PVRSRV_OK);
+}
+
+void
+TLStreamClose(IMG_HANDLE hStream)
+{
+	PTL_STREAM	psTmp;
+	IMG_BOOL	bDestroyStream;
+
+	PVR_DPF_ENTERED;
+
+	if (NULL == hStream)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+				 "TLStreamClose failed as NULL stream handler passed, nothing done."));
+		PVR_DPF_RETURN;
+	}
+
+	psTmp = (PTL_STREAM)hStream;
+
+	/* Acquire TL_GLOBAL_DATA lock for updating the reference count as this will be required
+	 * in-case this TL_STREAM node is to be deleted */
+	OSLockAcquire (TLGGD()->hTLGDLock);
+
+	/* Decrement write reference counter of the stream */
+	psTmp->psNode->uiWRefCount--;
+
+	if (0 != psTmp->psNode->uiWRefCount)
+	{
+		/* The stream is still being used in other context(s) do not destroy
+		 * anything */
+
+		/* uiWRefCount == 1 means that stream was closed for write. Next
+		 * close is pairing TLStreamCreate(). Send notification to indicate
+		 * that no writer are connected to the stream any more. */
+		if (psTmp->psNotifStream != NULL && psTmp->psNode->uiWRefCount == 1)
+		{
+			TLStreamMarkStreamClose(psTmp);
+		}
+
+		OSLockRelease (TLGGD()->hTLGDLock);
+		PVR_DPF_RETURN;
+	}
+	else
+	{
+		/* Now we try removing this TL_STREAM from TL_GLOBAL_DATA */
+
+		if (psTmp->bWaitForEmptyOnDestroy)
+		{
+			/* We won't require the TL_STREAM lock to be acquired here for accessing its read
+			 * and write offsets. REASON: We are here because there is no producer context
+			 * referencing this TL_STREAM, hence its ui32Write offset won't be changed now.
+			 * Also, the update of ui32Read offset is not protected by locks */
+			while (psTmp->ui32Read != psTmp->ui32Write)
+			{
+				/* Release lock before sleeping */
+				OSLockRelease (TLGGD()->hTLGDLock);
+
+				OSEventObjectWaitTimeout(psTmp->hProducerEvent, EVENT_OBJECT_TIMEOUT_US);
+
+				OSLockAcquire (TLGGD()->hTLGDLock);
+
+				/* Ensure destruction of stream is still required */
+				if (0 != psTmp->psNode->uiWRefCount)
+				{
+					OSLockRelease (TLGGD()->hTLGDLock);
+					PVR_DPF_RETURN;
+				}
+			}
+		}
+
+		/* Try removing the stream from TL_GLOBAL_DATA */
+		bDestroyStream = TLTryRemoveStreamAndFreeStreamNode (psTmp->psNode);
+
+		OSLockRelease (TLGGD()->hTLGDLock);
+
+		if (bDestroyStream)
+		{
+			/* Destroy the stream if it was removed from TL_GLOBAL_DATA */
+			TLStreamDestroy (psTmp);
+			psTmp = NULL;
+		}
+		PVR_DPF_RETURN;
+	}
+}
+
+/*
+ * DoTLSetPacketHeader
+ *
+ * Ensure that whenever we update a Header we always add the RESERVED field
+ */
+static inline void DoTLSetPacketHeader(PVRSRVTL_PPACKETHDR, IMG_UINT32);
+static inline void
+DoTLSetPacketHeader(PVRSRVTL_PPACKETHDR pHdr,
+				IMG_UINT32 ui32Val)
+{
+	PVR_ASSERT(((size_t)pHdr & (size_t)(PVRSRVTL_PACKET_ALIGNMENT - 1)) == 0);
+
+	/* Check that this is a correctly aligned packet header. */
+	if (((size_t)pHdr & (size_t)(PVRSRVTL_PACKET_ALIGNMENT - 1)) != 0)
+	{
+		/* Should return an error because the header is misaligned */
+		PVR_DPF((PVR_DBG_ERROR, "%s: Misaligned header @ %p", __func__, pHdr));
+		pHdr->uiTypeSize = ui32Val;
+	}
+	else
+	{
+		pHdr->uiTypeSize = ui32Val;
+		pHdr->uiReserved = PVRSRVTL_PACKETHDR_RESERVED;
+	}
+}
+
+static PVRSRV_ERROR
+DoTLStreamReserve(IMG_HANDLE hStream,
+				IMG_UINT8 **ppui8Data,
+				IMG_UINT32 ui32ReqSize,
+				IMG_UINT32 ui32ReqSizeMin,
+				PVRSRVTL_PACKETTYPE ePacketType,
+				IMG_UINT32* pui32AvSpace,
+				IMG_UINT32* pui32Flags)
+{
+	PTL_STREAM psTmp;
+	IMG_UINT32 *pui32Buf, ui32LRead, ui32LWrite, ui32LPending, lReqSizeAligned, lReqSizeActual, ui32CreateFreeSpace;
+	IMG_INT pad, iFreeSpace;
+	IMG_UINT8 *pui8IncrRead = NULL;
+	PVRSRVTL_PPACKETHDR pHdr;
+
+	PVR_DPF_ENTERED;
+	if (pui32AvSpace) *pui32AvSpace = 0;
+	if (pui32Flags) *pui32Flags = 0;
+
+	if (NULL == hStream)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+	psTmp = (PTL_STREAM)hStream;
+
+	/* Assert used as the packet type parameter is currently only provided
+	 * by the TL APIs, not the calling client */
+	PVR_ASSERT((PVRSRVTL_PACKETTYPE_UNDEF < ePacketType) && (PVRSRVTL_PACKETTYPE_LAST >= ePacketType));
+
+	/* The buffer is only used in "rounded" (aligned) chunks */
+	lReqSizeAligned = PVRSRVTL_ALIGN(ui32ReqSize);
+
+	/* Lock the stream before reading it's pending value, because if pending is set
+	 * to NOTHING_PENDING, we update the pending value such that subsequent calls to
+	 * this function from other context(s) fail with PVRSRV_ERROR_NOT_READY */
+	OSLockAcquire (psTmp->hStreamWLock);
+
+#if defined (TL_BUFFER_STATS)
+	/* If writing into an empty buffer, start recording time-to-full */
+	if (psTmp->ui32Read == psTmp->ui32Write)
+	{
+		OSAtomicWrite(&psTmp->bNoReaderSinceFirstReserve, 1);
+		psTmp->ui32TimeStart = OSClockus();
+	}
+
+	if (ui32ReqSize > psTmp->ui32MaxReserveWatermark)
+	{
+		psTmp->ui32MaxReserveWatermark = ui32ReqSize;
+	}
+#endif
+
+	/* Get a local copy of the stream buffer parameters */
+	ui32LRead  = psTmp->ui32Read;
+	ui32LWrite = psTmp->ui32Write;
+	ui32LPending = psTmp->ui32Pending;
+
+	/*  Multiple pending reserves are not supported. */
+	if (NOTHING_PENDING != ui32LPending)
+	{
+		OSLockRelease (psTmp->hStreamWLock);
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_READY);
+	}
+
+	if (psTmp->ui32MaxPacketSize < lReqSizeAligned)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Requested Size: %u > TL Max Packet size: %u", lReqSizeAligned, psTmp->ui32MaxPacketSize));
+		psTmp->ui32Pending = NOTHING_PENDING;
+		if (pui32AvSpace)
+		{
+			*pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin, psTmp->ui32MaxPacketSize);
+			if (*pui32AvSpace == 0 && psTmp->eOpMode == TL_OPMODE_DROP_OLDEST)
+			{
+				*pui32AvSpace = psTmp->ui32MaxPacketSize;
+				PVR_DPF((PVR_DBG_MESSAGE, "Opmode is Drop_Oldest, so Available Space changed to: %u", *pui32AvSpace));
+			}
+		}
+		OSLockRelease (psTmp->hStreamWLock);
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED);
+	}
+
+	/* Prevent other threads from entering this region before we are done
+	 * updating the pending value and write offset (in case of padding). This
+	 * is not exactly a lock but a signal for other contexts that there is a
+	 * TLStreamCommit operation pending on this stream */
+	psTmp->ui32Pending = 0;
+
+	OSLockRelease (psTmp->hStreamWLock);
+
+	/* If there is enough contiguous space following the current Write
+	 * position then no padding is required */
+	if (  psTmp->ui32Size
+		< ui32LWrite + lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) )
+	{
+		pad = psTmp->ui32Size - ui32LWrite;
+	}
+	else
+	{
+		pad = 0;
+	}
+
+	lReqSizeActual = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) + pad;
+	if (psTmp->bNoWrapPermanent)
+	{
+		iFreeSpace = bufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size);
+	}
+	else
+	{
+		iFreeSpace = circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size);
+	}
+
+	if (iFreeSpace < (IMG_INT) lReqSizeActual)
+	{
+		/* If this is a blocking reserve and there is not enough space then wait. */
+		if (psTmp->eOpMode == TL_OPMODE_BLOCK)
+		{
+			/* Stream create should stop us entering here when
+			 * psTmp->bNoWrapPermanent is true as it does not make sense to
+			 * block on permanent data streams. */
+			PVR_ASSERT(psTmp->bNoWrapPermanent == IMG_FALSE);
+			while ( ( circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size)
+				 <(IMG_INT) lReqSizeActual ) )
+			{
+				/* The TL bridge is lockless now, so changing to OSEventObjectWait() */
+				OSEventObjectWait(psTmp->hProducerEvent);
+				// update local copies.
+				ui32LRead  = psTmp->ui32Read;
+				ui32LWrite = psTmp->ui32Write;
+			}
+		}
+		/* Data overwriting, also insert PACKETS_DROPPED flag into existing packet */
+		else if (psTmp->eOpMode == TL_OPMODE_DROP_OLDEST)
+		{
+			OSLockAcquire(psTmp->hReadLock);
+
+			while (psTmp->bReadPending)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE, "Waiting for the pending read operation to complete."));
+				OSLockRelease(psTmp->hReadLock);
+#if defined(TL_BUFFER_STATS)
+				TL_COUNTER_INC(psTmp->ui32CntWriteWaits);
+#endif
+				(void) OSEventObjectWaitTimeout(psTmp->hProducerEvent, READ_PENDING_TIMEOUT_US);
+				OSLockAcquire(psTmp->hReadLock);
+			}
+
+#if defined(TL_BUFFER_STATS)
+			TL_COUNTER_INC(psTmp->ui32CntWriteSuccesses);
+#endif
+			ui32LRead = psTmp->ui32Read;
+
+			if ( circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size)
+			     < (IMG_INT) lReqSizeActual )
+			{
+				ui32CreateFreeSpace = 5 * (psTmp->ui32Size / 100);
+				if (ui32CreateFreeSpace < lReqSizeActual)
+				{
+					ui32CreateFreeSpace = lReqSizeActual;
+				}
+
+				while (ui32CreateFreeSpace > (IMG_UINT32)circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size))
+				{
+					pui8IncrRead = &psTmp->pbyBuffer[ui32LRead];
+					ui32LRead += (sizeof(PVRSRVTL_PACKETHDR) + PVRSRVTL_ALIGN( GET_PACKET_DATA_LEN(pui8IncrRead) ));
+
+					/* Check if buffer needs to wrap */
+					if (ui32LRead >= psTmp->ui32Size)
+					{
+						ui32LRead = 0;
+					}
+				}
+				psTmp->ui32Read = ui32LRead;
+				pui8IncrRead = &psTmp->pbyBuffer[psTmp->ui32Read];
+
+				pHdr = GET_PACKET_HDR(pui8IncrRead);
+				DoTLSetPacketHeader(pHdr, SET_PACKETS_DROPPED(pHdr));
+			}
+			/* else fall through as there is enough space now to write the data */
+
+			OSLockRelease(psTmp->hReadLock);
+			/* If we accepted a flag var set the OVERWRITE bit*/
+			if (pui32Flags) *pui32Flags |= TL_FLAG_OVERWRITE_DETECTED;
+		}
+		/* No data overwriting, insert write_failed flag and return */
+		else if (psTmp->eOpMode == TL_OPMODE_DROP_NEWER)
+		{
+			/* Caller should not try to use ppui8Data,
+			 * NULLify to give user a chance of avoiding memory corruption */
+			*ppui8Data = NULL;
+
+			/* This flag should not be inserted two consecutive times, so
+			 * check the last ui32 in case it was a packet drop packet. */
+			pui32Buf =  ui32LWrite
+					  ?
+					    (IMG_UINT32*)&psTmp->pbyBuffer[ui32LWrite - sizeof(PVRSRVTL_PACKETHDR)]
+					   : // Previous four bytes are not guaranteed to be a packet header...
+					    (IMG_UINT32*)&psTmp->pbyBuffer[psTmp->ui32Size - PVRSRVTL_PACKET_ALIGNMENT];
+
+			pHdr = GET_PACKET_HDR(pui32Buf);
+			if ( PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED
+				 !=
+				 GET_PACKET_TYPE( pHdr ) )
+			{
+				/* Insert size-stamped packet header */
+				pui32Buf = (IMG_UINT32*)&psTmp->pbyBuffer[ui32LWrite];
+				pHdr = GET_PACKET_HDR(pui32Buf);
+				DoTLSetPacketHeader(pHdr, PVRSRVTL_SET_PACKET_WRITE_FAILED);
+				ui32LWrite += sizeof(PVRSRVTL_PACKETHDR);
+				ui32LWrite %= psTmp->ui32Size;
+				iFreeSpace -= sizeof(PVRSRVTL_PACKETHDR);
+			}
+
+			OSLockAcquire (psTmp->hStreamWLock);
+			psTmp->ui32Write = ui32LWrite;
+			psTmp->ui32Pending = NOTHING_PENDING;
+			OSLockRelease (psTmp->hStreamWLock);
+
+			if (pui32AvSpace)
+			{
+				*pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin, psTmp->ui32MaxPacketSize);
+			}
+
+			/* Inform call of permanent stream misuse, no space left,
+			 * the size of the stream will need to be increased. */
+			if (psTmp->bNoWrapPermanent)
+			{
+				PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE);
+			}
+
+			PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_FULL);
+		}
+	}
+
+	/* The easy case: buffer has enough space to hold the requested packet (data + header) */
+
+	/* Should we treat the buffer as non-circular buffer? */
+	if (psTmp->bNoWrapPermanent)
+	{
+		iFreeSpace = bufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size);
+	}
+	else
+	{
+		iFreeSpace = circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size);
+	}
+
+	if (iFreeSpace >= (IMG_INT) lReqSizeActual)
+	{
+		if (pad)
+		{
+			/* Inserting padding packet. */
+			pui32Buf = (IMG_UINT32*)&psTmp->pbyBuffer[ui32LWrite];
+			pHdr = GET_PACKET_HDR(pui32Buf);
+			DoTLSetPacketHeader(pHdr,
+				PVRSRVTL_SET_PACKET_PADDING(pad-sizeof(PVRSRVTL_PACKETHDR)));
+
+			/* CAUTION: the used pad value should always result in a properly
+			 *          aligned ui32LWrite pointer, which in this case is 0 */
+			ui32LWrite = (ui32LWrite + pad) % psTmp->ui32Size;
+			/* Detect unaligned pad value */
+			PVR_ASSERT(ui32LWrite == 0);
+		}
+		/* Insert size-stamped packet header */
+		pui32Buf = (IMG_UINT32*) &psTmp->pbyBuffer[ui32LWrite];
+
+		pHdr = GET_PACKET_HDR(pui32Buf);
+		DoTLSetPacketHeader(pHdr,
+			PVRSRVTL_SET_PACKET_HDR(ui32ReqSize, ePacketType));
+
+		/* return the next position in the buffer to the user */
+		*ppui8Data =  &psTmp->pbyBuffer[ ui32LWrite+sizeof(PVRSRVTL_PACKETHDR) ];
+
+		/* update pending offset: size stamp + data  */
+		ui32LPending = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR);
+	}
+	else
+	{
+		OSLockAcquire (psTmp->hStreamWLock);
+		psTmp->ui32Pending = NOTHING_PENDING;
+		OSLockRelease (psTmp->hStreamWLock);
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+	}
+
+	/* Acquire stream lock for updating stream parameters */
+	OSLockAcquire (psTmp->hStreamWLock);
+	psTmp->ui32Write = ui32LWrite;
+	psTmp->ui32Pending = ui32LPending;
+	OSLockRelease (psTmp->hStreamWLock);
+
+#if defined(TL_BUFFER_STATS)
+	TL_COUNTER_INC(psTmp->ui32CntNumWriteSuccess);
+#endif
+
+	PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLStreamReserve(IMG_HANDLE hStream,
+				IMG_UINT8 **ppui8Data,
+				IMG_UINT32 ui32Size)
+{
+	return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32Size, PVRSRVTL_PACKETTYPE_DATA, NULL, NULL);
+}
+
+PVRSRV_ERROR
+TLStreamReserve2(IMG_HANDLE hStream,
+                IMG_UINT8  **ppui8Data,
+                IMG_UINT32 ui32Size,
+                IMG_UINT32 ui32SizeMin,
+                IMG_UINT32* pui32Available)
+{
+	return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32SizeMin, PVRSRVTL_PACKETTYPE_DATA, pui32Available, NULL);
+}
+
+PVRSRV_ERROR
+TLStreamReserveReturnFlags(IMG_HANDLE hStream,
+						   IMG_UINT8  **ppui8Data,
+						   IMG_UINT32 ui32Size,
+						   IMG_UINT32* pui32Flags)
+{
+	return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32Size, PVRSRVTL_PACKETTYPE_DATA, NULL, pui32Flags);
+}
+
+PVRSRV_ERROR
+TLStreamCommit(IMG_HANDLE hStream, IMG_UINT32 ui32ReqSize)
+{
+	PTL_STREAM psTmp;
+	IMG_UINT32 ui32LRead, ui32OldWrite, ui32LWrite, ui32LPending;
+	PVRSRV_ERROR eError;
+
+#if defined(TL_BUFFER_STATS)
+	IMG_UINT32 ui32UnreadBytes;
+#endif
+
+	PVR_DPF_ENTERED;
+
+	if (NULL == hStream)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+	psTmp = (PTL_STREAM)hStream;
+
+	/* Get a local copy of the stream buffer parameters */
+	ui32LRead = psTmp->ui32Read;
+	ui32LWrite = psTmp->ui32Write;
+	ui32LPending = psTmp->ui32Pending;
+
+	ui32OldWrite = ui32LWrite;
+
+	// Space in buffer is aligned
+	ui32ReqSize = PVRSRVTL_ALIGN(ui32ReqSize) + sizeof(PVRSRVTL_PACKETHDR);
+
+	/* Check pending reserver and ReqSize + packet header size. */
+	if ((ui32LPending == NOTHING_PENDING) || (ui32ReqSize > ui32LPending))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE);
+	}
+
+	/* Update pointer to written data. */
+	ui32LWrite = (ui32LWrite + ui32ReqSize) % psTmp->ui32Size;
+
+	/* and reset LPending to 0 since data are now submitted  */
+	ui32LPending = NOTHING_PENDING;
+
+#if defined(TL_BUFFER_STATS)
+	/* Calculate new number of bytes unread */
+	if (ui32LWrite > ui32LRead)
+	{
+		ui32UnreadBytes = (ui32LWrite-ui32LRead);
+	}
+	else if (ui32LWrite < ui32LRead)
+	{
+		ui32UnreadBytes = (psTmp->ui32Size-ui32LRead+ui32LWrite);
+	}
+	else
+	{ /* else equal, ignore */
+		ui32UnreadBytes = 0;
+	}
+
+	/* Calculate high water mark for debug purposes */
+	if (ui32UnreadBytes > psTmp->ui32BufferUt)
+	{
+		psTmp->ui32BufferUt = ui32UnreadBytes;
+	}
+#endif
+
+	/* Memory barrier required to ensure prior data written by writer is
+	 * flushed from WC buffer to main memory. */
+	OSWriteMemoryBarrier();
+
+	/* Acquire stream lock to ensure other context(s) (if any)
+	 * wait on the lock (in DoTLStreamReserve) for consistent values
+	 * of write offset and pending value */
+	OSLockAcquire (psTmp->hStreamWLock);
+
+	/* Update stream buffer parameters to match local copies */
+	psTmp->ui32Write = ui32LWrite;
+	psTmp->ui32Pending = ui32LPending;
+
+	TL_COUNTER_ADD(psTmp->ui32ProducerByteCount, ui32ReqSize);
+	TL_COUNTER_INC(psTmp->ui32NumCommits);
+
+#if defined(TL_BUFFER_STATS)
+	/* IF there has been no-reader since first reserve on an empty-buffer,
+	 * AND current utilisation is considerably high (90%), calculate the
+	 * time taken to fill up the buffer */
+	if ((OSAtomicRead(&psTmp->bNoReaderSinceFirstReserve) == 1) &&
+	    (TLStreamGetUT(psTmp) >= 90 * psTmp->ui32Size/100))
+	{
+		IMG_UINT32 ui32TimeToFullInUs = OSClockus() - psTmp->ui32TimeStart;
+		if (psTmp->ui32MinTimeToFullInUs > ui32TimeToFullInUs)
+		{
+			psTmp->ui32MinTimeToFullInUs = ui32TimeToFullInUs;
+		}
+		/* Following write ensures ui32MinTimeToFullInUs doesn't lose its
+		 * real (expected) value in case there is no reader until next Commit call */
+		OSAtomicWrite(&psTmp->bNoReaderSinceFirstReserve, 0);
+	}
+#endif
+
+	if (!psTmp->bNoSignalOnCommit)
+	{
+		/* If we have transitioned from an empty buffer to a non-empty buffer, we
+		 * must signal possibly waiting consumer. BUT, let the signal be "deferred"
+		 * until buffer is at least 'ui32ThresholdUsageForSignal' bytes full. This
+		 * avoids a race between OSEventObjectSignal and OSEventObjectWaitTimeout
+		 * (in TLServerAcquireDataKM), where a "signal" might happen before "wait",
+		 * resulting into signal being lost and stream-reader waiting even though
+		 * buffer is no-more empty */
+		if (ui32OldWrite == ui32LRead)
+		{
+			psTmp->bSignalPending = IMG_TRUE;
+		}
+
+		if (psTmp->bSignalPending && (TLStreamGetUT(psTmp) >= psTmp->ui32ThresholdUsageForSignal))
+		{
+			TL_COUNTER_INC(psTmp->ui32SignalsSent);
+			psTmp->bSignalPending = IMG_FALSE;
+
+			/* Signal consumers that may be waiting */
+			eError = OSEventObjectSignal(psTmp->psNode->hReadEventObj);
+			if (eError != PVRSRV_OK)
+			{
+				OSLockRelease (psTmp->hStreamWLock);
+				PVR_DPF_RETURN_RC(eError);
+			}
+		}
+		else
+		{
+			TL_COUNTER_INC(psTmp->ui32SignalNotSent);
+		}
+	}
+	OSLockRelease (psTmp->hStreamWLock);
+
+	PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLStreamWrite(IMG_HANDLE hStream, IMG_UINT8 *pui8Src, IMG_UINT32 ui32Size)
+{
+	IMG_BYTE *pbyDest = NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	if (NULL == hStream)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	eError = TLStreamReserve(hStream, &pbyDest, ui32Size);
+	if (PVRSRV_OK != eError)
+	{
+		PVR_DPF_RETURN_RC(eError);
+	}
+	else
+	{
+		OSDeviceMemCopy((void*)pbyDest, (void*)pui8Src, ui32Size);
+		eError = TLStreamCommit(hStream, ui32Size);
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF_RETURN_RC(eError);
+		}
+	}
+
+	PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLStreamWriteRetFlags(IMG_HANDLE hStream, IMG_UINT8 *pui8Src, IMG_UINT32 ui32Size, IMG_UINT32 *pui32Flags){
+	IMG_BYTE *pbyDest = NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	if (NULL == hStream)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	eError = TLStreamReserveReturnFlags(hStream, &pbyDest, ui32Size, pui32Flags);
+	if (PVRSRV_OK != eError)
+	{
+		PVR_DPF_RETURN_RC(eError);
+	}
+	else
+	{
+		OSDeviceMemCopy((void*)pbyDest, (void*)pui8Src, ui32Size);
+		eError = TLStreamCommit(hStream, ui32Size);
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF_RETURN_RC(eError);
+		}
+	}
+
+	PVR_DPF_RETURN_OK;
+}
+
+void TLStreamInfo(IMG_HANDLE hStream, PTL_STREAM_INFO psInfo)
+{
+ 	IMG_DEVMEM_SIZE_T actual_req_size;
+	IMG_DEVMEM_ALIGN_T align = 4; /* Low dummy value so the real value can be obtained */
+
+ 	actual_req_size = 2;
+	/* ignore error as OSGetPageShift() should always return correct value */
+	(void) DevmemExportalignAdjustSizeAndAlign(OSGetPageShift(), &actual_req_size, &align);
+
+	psInfo->headerSize = sizeof(PVRSRVTL_PACKETHDR);
+	psInfo->minReservationSize = sizeof(IMG_UINT32);
+	psInfo->pageSize = (IMG_UINT32)(actual_req_size);
+	psInfo->pageAlign = (IMG_UINT32)(align);
+	psInfo->maxTLpacketSize = ((PTL_STREAM)hStream)->ui32MaxPacketSize;
+}
+
+PVRSRV_ERROR
+TLStreamMarkEOS(IMG_HANDLE psStream, IMG_BOOL bRemoveOld)
+{
+	PTL_STREAM   psTmp;
+	PVRSRV_ERROR eError;
+	IMG_UINT8*   pData;
+
+	PVR_DPF_ENTERED;
+
+	if (NULL == psStream)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	psTmp = (PTL_STREAM)psStream;
+
+	/* Do not support EOS packets on permanent stream buffers at present,
+	 * EOS is best used with streams where data is consumed. */
+	if (psTmp->bNoWrapPermanent)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE);
+	}
+
+	if(bRemoveOld)
+	{
+		eError = DoTLStreamReserve(psStream, &pData, 0, 0, PVRSRVTL_PACKETTYPE_MARKER_EOS_REMOVEOLD, NULL, NULL);
+	}
+	else
+	{
+	eError = DoTLStreamReserve(psStream, &pData, 0, 0, PVRSRVTL_PACKETTYPE_MARKER_EOS, NULL, NULL);
+	}
+
+	if (PVRSRV_OK !=  eError)
+	{
+		PVR_DPF_RETURN_RC(eError);
+	}
+
+	PVR_DPF_RETURN_RC(TLStreamCommit(psStream, 0));
+}
+
+
+static PVRSRV_ERROR
+_TLStreamMarkOC(IMG_HANDLE hStream, PVRSRVTL_PACKETTYPE ePacketType)
+{
+	PVRSRV_ERROR eError;
+	PTL_STREAM psStream = hStream;
+	IMG_UINT32 ui32Size;
+	IMG_UINT8 *pData;
+
+	PVR_DPF_ENTERED;
+
+	if (NULL == psStream)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	if (NULL == psStream->psNotifStream)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_NOTIF_STREAM);
+	}
+
+	ui32Size = OSStringLength(psStream->szName) + 1;
+
+	eError = DoTLStreamReserve(psStream->psNotifStream, &pData, ui32Size,
+	                           ui32Size, ePacketType, NULL, NULL);
+	if (PVRSRV_OK != eError)
+	{
+		PVR_DPF_RETURN_RC(eError);
+	}
+
+	OSDeviceMemCopy(pData, psStream->szName, ui32Size);
+
+	PVR_DPF_RETURN_RC(TLStreamCommit(psStream->psNotifStream, ui32Size));
+}
+
+PVRSRV_ERROR
+TLStreamMarkStreamOpen(IMG_HANDLE psStream)
+{
+	return _TLStreamMarkOC(psStream, PVRSRVTL_PACKETTYPE_STREAM_OPEN_FOR_WRITE);
+}
+
+PVRSRV_ERROR
+TLStreamMarkStreamClose(IMG_HANDLE psStream)
+{
+	return _TLStreamMarkOC(psStream, PVRSRVTL_PACKETTYPE_STREAM_CLOSE_FOR_WRITE);
+}
+
+PVRSRV_ERROR
+TLStreamSync(IMG_HANDLE psStream)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PTL_STREAM   psTmp;
+
+	PVR_DPF_ENTERED;
+
+	if (NULL == psStream)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+	psTmp = (PTL_STREAM)psStream;
+
+	/* If read client exists and has opened stream in blocking mode,
+	 * signal when data is available to read. */
+	if (psTmp->psNode->psRDesc &&
+		 (!(psTmp->psNode->psRDesc->ui32Flags & PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING)) &&
+			psTmp->ui32Read != psTmp->ui32Write)
+	{
+		TL_COUNTER_INC(psTmp->ui32ManSyncs);
+		eError = OSEventObjectSignal(psTmp->psNode->hReadEventObj);
+	}
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+IMG_BOOL
+TLStreamIsOpenForReading(IMG_HANDLE hStream)
+{
+	PTL_STREAM   psTmp;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(hStream);
+	psTmp = (PTL_STREAM)hStream;
+
+	PVR_DPF_RETURN_VAL(psTmp->psNode->psRDesc != NULL);
+}
+
+IMG_BOOL
+TLStreamOutOfData(IMG_HANDLE hStream)
+{
+	PTL_STREAM   psTmp;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(hStream);
+	psTmp = (PTL_STREAM)hStream;
+
+	/* If both pointers are equal then the buffer is empty */
+	PVR_DPF_RETURN_VAL(psTmp->ui32Read == psTmp->ui32Write);
+}
+
+
+PVRSRV_ERROR
+TLStreamResetProducerByteCount(IMG_HANDLE hStream, IMG_UINT32 ui32Value)
+{
+	PTL_STREAM   psTmp;
+	IMG_UINT32   ui32LRead, ui32LWrite;
+	PVRSRV_ERROR eErr = PVRSRV_OK;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(hStream);
+	psTmp = (PTL_STREAM)hStream;
+	ui32LRead = psTmp->ui32Read;
+	ui32LWrite = psTmp->ui32Write;
+
+	if (ui32LRead != ui32LWrite)
+	{
+		eErr = PVRSRV_ERROR_STREAM_MISUSE;
+	}
+#if defined(TL_BUFFER_STATS)
+	psTmp->ui32ProducerByteCount = ui32Value;
+#else
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+#endif
+	PVR_DPF_RETURN_RC(eErr);
+}
+/*
+ * Internal stream APIs to server part of Transport Layer, declared in
+ * header tlintern.h. Direct pointers to stream objects are used here as
+ * these functions are internal.
+ */
+IMG_UINT32
+TLStreamAcquireReadPos(PTL_STREAM psStream,
+                       IMG_BOOL bDisableCallback,
+                       IMG_UINT32* puiReadOffset)
+{
+	IMG_UINT32 uiReadLen = 0;
+	IMG_UINT32 ui32LRead, ui32LWrite;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psStream);
+	PVR_ASSERT(puiReadOffset);
+
+	if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST)
+	{
+		if (!OSTryLockAcquire(psStream->hReadLock))
+		{
+			/*
+			 * This is a normal event when the system is under load.
+			 * An example of how to produce this is to run testrunner /
+			 * regression/ddk_test_seq2_host_fw_mem.conf with HTB / pvrhtbd
+			 * configured as
+			 *
+			 * # pvrdebug -log trace -loggroups main,pow,debug \
+			 * -hostloggroups main,ctrl,sync,brg -hostlogtype dropoldest
+			 *
+			 * # pvrhtbd -hostloggroups main,ctrl,sync,brg
+			 *
+			 * We will see a small number of these collisions but as this is
+			 * an expected calling path, and an expected return code, we drop
+			 * the severity to just be a debug MESSAGE instead of WARNING
+			 */
+			PVR_DPF((PVR_DBG_MESSAGE,
+			    "%s: Read lock on stream '%s' is acquired by some writer, "
+				"hence reader failed to acquire read lock.", __func__,
+				psStream->szName));
+#if defined(TL_BUFFER_STATS)
+			TL_COUNTER_INC(psStream->ui32CntReadFails);
+#endif
+			PVR_DPF_RETURN_VAL(0);
+		}
+	}
+
+#if defined(TL_BUFFER_STATS)
+	TL_COUNTER_INC(psStream->ui32CntReadSuccesses);
+#endif
+
+	/* Grab a local copy */
+	ui32LRead = psStream->ui32Read;
+	ui32LWrite = psStream->ui32Write;
+
+	if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST)
+	{
+		psStream->bReadPending = IMG_TRUE;
+		OSLockRelease(psStream->hReadLock);
+	}
+
+	/* No data available and CB defined - try and get data */
+	if ((ui32LRead == ui32LWrite) && psStream->pfProducerCallback && !bDisableCallback)
+	{
+		PVRSRV_ERROR eRc;
+		IMG_UINT32   ui32Resp = 0;
+
+		eRc = ((TL_STREAM_SOURCECB)psStream->pfProducerCallback)(psStream, TL_SOURCECB_OP_CLIENT_EOS,
+				&ui32Resp, psStream->pvProducerUserData);
+		PVR_LOG_IF_ERROR(eRc, "TLStream->pfProducerCallback");
+
+		ui32LWrite = psStream->ui32Write;
+	}
+
+	/* No data available... */
+	if (ui32LRead == ui32LWrite)
+	{
+		if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST)
+		{
+			psStream->bReadPending = IMG_FALSE;
+		}
+		PVR_DPF_RETURN_VAL(0);
+	}
+
+#if defined (TL_BUFFER_STATS)
+	/* The moment reader knows it will see a non-zero data, it marks its presence in writer's eyes */
+	OSAtomicWrite (&psStream->bNoReaderSinceFirstReserve, 0);
+#endif
+
+	/* Data is available to read... */
+	*puiReadOffset = ui32LRead;
+
+	/*PVR_DPF((PVR_DBG_VERBOSE,
+	 *		"TLStreamAcquireReadPos Start before: Write:%d, Read:%d, size:%d",
+	 *		ui32LWrite, ui32LRead, psStream->ui32Size));
+	 */
+
+	if (ui32LRead > ui32LWrite)
+	{	/* CB has wrapped around. */
+		PVR_ASSERT(!psStream->bNoWrapPermanent);
+		/* Return the first contiguous piece of memory, ie [ReadLen,EndOfBuffer]
+		 * and let a subsequent AcquireReadPos read the rest of the Buffer */
+		/*PVR_DPF((PVR_DBG_VERBOSE, "TLStreamAcquireReadPos buffer has wrapped"));*/
+		uiReadLen = psStream->ui32Size - ui32LRead;
+		TL_COUNTER_INC(psStream->ui32AcquireRead2);
+	}
+	else
+	{	/* CB has not wrapped */
+		uiReadLen = ui32LWrite - ui32LRead;
+		TL_COUNTER_INC(psStream->ui32AcquireRead1);
+	}
+
+	PVR_DPF_RETURN_VAL(uiReadLen);
+}
+
+PVRSRV_ERROR
+TLStreamAdvanceReadPos(PTL_STREAM psStream,
+                       IMG_UINT32 uiReadLen,
+                       IMG_UINT32 uiOrigReadLen)
+{
+	IMG_UINT32 uiNewReadPos;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psStream);
+
+	/*
+	 * This API does not use Read lock as 'bReadPending' is sufficient
+	 * to keep Read index safe by preventing a write from updating the
+	 * index and 'bReadPending' itself is safe as it can only be modified
+	 * by readers and there can be only one reader in action at a time.
+	 */
+
+	/* Update the read offset by the length provided in a circular manner.
+	 * Assuming the update to be atomic hence, avoiding use of locks
+	 */
+	uiNewReadPos = (psStream->ui32Read + uiReadLen) % psStream->ui32Size;
+
+	/* Must validate length is on a packet boundary, for
+	 * TLReleaseDataLess calls.
+	 */
+	if (uiReadLen != uiOrigReadLen) /* buffer not empty */
+	{
+		PVRSRVTL_PPACKETHDR psHdr = GET_PACKET_HDR(psStream->pbyBuffer+uiNewReadPos);
+		PVRSRVTL_PACKETTYPE eType = GET_PACKET_TYPE(psHdr);
+
+		if ((psHdr->uiReserved != PVRSRVTL_PACKETHDR_RESERVED) ||
+			(eType == PVRSRVTL_PACKETTYPE_UNDEF) ||
+			(eType >= PVRSRVTL_PACKETTYPE_LAST))
+		{
+			PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_ALIGNMENT);
+		}
+		/* else OK, on a packet boundary */
+	}
+	/* else no check needed */
+
+	psStream->ui32Read = uiNewReadPos;
+
+	if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST)
+	{
+		psStream->bReadPending = IMG_FALSE;
+	}
+
+	/* notify reserves that may be pending */
+	/* The producer event object is used to signal the StreamReserve if the TL
+	 * Buffer is in blocking mode and is full.
+	 * Previously this event was only signalled if the buffer was created in
+	 * blocking mode. Since the buffer mode can now change dynamically the event
+	 * is signalled every time to avoid any potential race where the signal is
+	 * required, but not produced.
+	 */
+	{
+		PVRSRV_ERROR eError;
+		eError = OSEventObjectSignal(psStream->hProducerEventObj);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+					 "Error in TLStreamAdvanceReadPos: OSEventObjectSignal returned:%u",
+					 eError));
+			/* We've failed to notify the producer event. This means there may
+			 * be a delay in generating more data to be consumed until the next
+			 * Write() generating action occurs.
+			 */
+		}
+	}
+
+	PVR_DPF((PVR_DBG_VERBOSE,
+			 "TLStreamAdvanceReadPos Read now at: %d",
+			psStream->ui32Read));
+	PVR_DPF_RETURN_OK;
+}
+
+void
+TLStreamResetReadPos(PTL_STREAM psStream)
+{
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psStream);
+
+	if (psStream->bNoWrapPermanent)
+	{
+
+		/* Update the read offset by the length provided in a circular manner.
+		 * Assuming the update to be atomic hence, avoiding use of locks */
+		psStream->ui32Read = 0;
+
+		PVR_DPF((PVR_DBG_VERBOSE,
+				 "TLStreamResetReadPos Read now at: %d",
+					psStream->ui32Read));
+	}
+	else
+	{
+		/* else for other stream types this is a no-op */
+		PVR_DPF((PVR_DBG_VERBOSE,
+				"No need to reset read position of circular tlstream"));
+	}
+
+	PVR_DPF_RETURN;
+}
+
+void
+TLStreamDestroy (PTL_STREAM psStream)
+{
+	PVR_ASSERT (psStream);
+
+	OSLockDestroy (psStream->hStreamWLock);
+	OSLockDestroy (psStream->hReadLock);
+
+	OSEventObjectClose(psStream->hProducerEvent);
+	OSEventObjectDestroy(psStream->hProducerEventObj);
+
+	TLFreeSharedMem(psStream);
+	OSFreeMem(psStream);
+}
+
+DEVMEM_MEMDESC*
+TLStreamGetBufferPointer(PTL_STREAM psStream)
+{
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psStream);
+
+	PVR_DPF_RETURN_VAL(psStream->psStreamMemDesc);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlstream.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlstream.h
new file mode 100644
index 0000000..1f1d094
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/tlstream.h
@@ -0,0 +1,598 @@
+/*************************************************************************/ /*!
+@File
+@Title          Transport Layer kernel side API.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    TL provides driver components with a way to copy data from kernel
+                space to user space (e.g. screen/file).
+
+                Data can be passed to the Transport Layer through the
+                TL Stream (kernel space) API interface.
+
+                The buffer provided to every stream is a modified version of a
+                circular buffer. Which CB version is created is specified by
+                relevant flags when creating a stream. Currently two types
+                of buffer are available:
+                - TL_OPMODE_DROP_NEWER:
+                  When the buffer is full, incoming data are dropped
+                  (instead of overwriting older data) and a marker is set
+                  to let the user know that data have been lost.
+                - TL_OPMODE_BLOCK:
+                  When the circular buffer is full, reserve/write calls block
+                  until enough space is freed.
+                - TL_OPMODE_DROP_OLDEST:
+                  When the circular buffer is full, the oldest packets in the
+                  buffer are dropped and a flag is set in header of next packet
+                  to let the user know that data have been lost.
+
+                All size/space requests are in bytes. However, the actual
+                implementation uses native word sizes (i.e. 4 byte aligned).
+
+                The user does not need to provide space for the stream buffer
+                as the TL handles memory allocations and usage.
+
+                Inserting data to a stream's buffer can be done either:
+                - by using TLReserve/TLCommit: User is provided with a buffer
+                                                 to write data to.
+                - or by using TLWrite:         User provides a buffer with
+                                                 data to be committed. The TL
+                                                 copies the data from the
+                                                 buffer into the stream buffer
+                                                 and returns.
+                Users should be aware that there are implementation overheads
+                associated with every stream buffer. If you find that less
+                data are captured than expected then try increasing the
+                stream buffer size or use TLInfo to obtain buffer parameters
+                and calculate optimum required values at run time.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef TLSTREAM_H
+#define TLSTREAM_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_tlcommon.h"
+#include "device.h"
+
+/*! Extract TL stream opmode from the given stream create flags.
+ * Last 3 bits of streamFlag is used for storing opmode, hence
+ * opmode mask is set as following. */
+#define TL_OPMODE_MASK 0x7
+
+/*
+ * NOTE: This enum is used to directly access the HTB_OPMODE_xxx values
+ * within htbserver.c.
+ * As such we *MUST* keep the values matching in order of declaration.
+ */
+/*! Opmode specifying circular buffer behaviour */
+typedef enum
+{
+	/*! Undefined operation mode */
+	TL_OPMODE_UNDEF = 0,
+
+	/*! Reject new data if the buffer is full, producer may then decide to
+	 *    drop the data or retry after some time. */
+	TL_OPMODE_DROP_NEWER,
+
+	/*! When buffer is full, advance the tail/read position to accept the new
+	 * reserve call (size permitting), effectively overwriting the oldest
+	 * data in the circular buffer. Not supported yet. */
+	TL_OPMODE_DROP_OLDEST,
+
+	/*! Block Reserve (subsequently Write) calls if there is not enough space
+	 *    until some space is freed via a client read operation. */
+	TL_OPMODE_BLOCK,
+
+	/*!< For error checking */
+	TL_OPMODE_LAST
+
+} TL_OPMODE;
+
+typedef enum {
+	/* Enum to be used in conjunction with new Flags feature */
+
+	/* Flag set when Drop Oldest is set and packets have been dropped */
+	TL_FLAG_OVERWRITE_DETECTED = (1 << 0),
+
+} TL_Flags;
+
+static_assert(TL_OPMODE_LAST <= TL_OPMODE_MASK,
+	      "TL_OPMODE_LAST must not exceed TL_OPMODE_MASK");
+
+/*! Flags specifying stream behaviour */
+/*! Do not destroy stream if there still are data that have not been
+ *     copied in user space. Block until the stream is emptied. */
+#define TL_FLAG_FORCE_FLUSH            (1U<<8)
+/*! Do not signal consumers on commit automatically when the stream buffer
+ * transitions from empty to non-empty. Producer responsible for signal when
+ * it chooses. */
+#define TL_FLAG_NO_SIGNAL_ON_COMMIT    (1U<<9)
+
+/*! When a stream has this property it never wraps around and
+ * overwrites existing data, hence it is a fixed size persistent
+ * buffer, data written is permanent. Producers need to ensure
+ * the buffer is big enough for their needs.
+ * When a stream is opened for reading the client will always
+ * find the read position at the start of the buffer/data. */
+#define TL_FLAG_PERMANENT_NO_WRAP      (1U<<10)
+
+/*! Defer allocation of stream's shared memory until first open. */
+#define TL_FLAG_ALLOCATE_ON_FIRST_OPEN (1U<<11)
+
+/*! Structure used to pass internal TL stream sizes information to users.*/
+typedef struct _TL_STREAM_INFO_
+{
+    IMG_UINT32 headerSize;          /*!< Packet header size in bytes */
+    IMG_UINT32 minReservationSize;  /*!< Minimum data size reserved in bytes */
+    IMG_UINT32 pageSize;            /*!< Page size in bytes */
+    IMG_UINT32 pageAlign;           /*!< Page alignment in bytes */
+    IMG_UINT32 maxTLpacketSize;     /*! Max allowed TL packet size*/
+} TL_STREAM_INFO, *PTL_STREAM_INFO;
+
+/*! Callback operations or notifications that a stream producer may handle
+ * when requested by the Transport Layer.
+ */
+#define TL_SOURCECB_OP_CLIENT_EOS 0x01  /*!< Client has reached end of stream,
+                                         * can anymore data be supplied?
+                                         * ui32Resp ignored in this operation */
+
+/*! Function pointer type for the callback handler into the "producer" code
+ * that writes data to the TL stream.  Producer should handle the notification
+ * or operation supplied in ui32ReqOp on stream hStream. The
+ * Operations and notifications are defined above in TL_SOURCECB_OP */
+typedef PVRSRV_ERROR (*TL_STREAM_SOURCECB)(IMG_HANDLE hStream,
+		IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser);
+
+typedef void (*TL_STREAM_ONREADEROPENCB)(void *pvArg);
+
+/*************************************************************************/ /*!
+ @Function      TLAllocSharedMemIfNull
+ @Description   Allocates shared memory for the stream.
+ @Input         hStream     Stream handle.
+ @Return        eError      Internal services call returned eError error
+                            number.
+ @Return        PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLAllocSharedMemIfNull(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLFreeSharedMem
+ @Description   Frees stream's shared memory.
+ @Input         phStream    Stream handle.
+*/ /**************************************************************************/
+void
+TLFreeSharedMem(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamCreate
+ @Description   Request the creation of a new stream and open a handle.
+				If creating a stream which should continue to exist after the
+				current context is finished, then TLStreamCreate must be
+				followed by a TLStreamOpen call. On any case, the number of
+				create/open calls must balance with the number of close calls
+				used. This ensures the resources of a stream are released when
+				it is no longer required.
+ @Output        phStream        Pointer to handle to store the new stream.
+ @Input			psDevNode       Pointer to the Device Node to be used for
+                                stream allocation.
+ @Input         szStreamName    Name of stream, maximum length:
+                                PRVSRVTL_MAX_STREAM_NAME_SIZE.
+                                If a longer string is provided,creation fails.
+ @Input         ui32Size        Desired buffer size in bytes.
+ @Input         ui32StreamFlags Used to configure buffer behaviour. See above.
+ @Input         pfOnReaderOpenCB    Optional callback called when a client
+                                    opens this stream, may be null.
+ @Input         pvOnReaderOpenUD    Optional user data for pfOnReaderOpenCB,
+                                    may be null.
+ @Input         pfProducerCB    Optional callback, may be null.
+ @Input         pvProducerUD    Optional user data for callback, may be null.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handle or string name
+                                             exceeded MAX_STREAM_NAME_SIZE
+ @Return        PVRSRV_ERROR_OUT_OF_MEMORY   Failed to allocate space for
+                                             stream handle.
+ @Return        PVRSRV_ERROR_DUPLICATE_VALUE There already exists a stream with
+                                             the same stream name string.
+ @Return        eError                       Internal services call returned
+                                             eError error number.
+ @Return        PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamCreate(IMG_HANDLE *phStream,
+               PVRSRV_DEVICE_NODE *psDevNode,
+               const IMG_CHAR *szStreamName,
+               IMG_UINT32 ui32Size,
+               IMG_UINT32 ui32StreamFlags,
+               TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB,
+               void *pvOnReaderOpenUD,
+               TL_STREAM_SOURCECB pfProducerCB,
+               void *pvProducerUD);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamOpen
+ @Description   Attach to existing stream that has already been created by a
+                  TLStreamCreate call. A handle is returned to the stream.
+ @Output        phStream        Pointer to handle to store the stream.
+ @Input         szStreamName    Name of stream, should match an already
+                                  existing stream name
+ @Return        PVRSRV_ERROR_NOT_FOUND       None of the streams matched the
+                                             requested stream name.
+				PVRSRV_ERROR_INVALID_PARAMS  Non-NULL pointer to stream
+                                             handler is required.
+ @Return        PVRSRV_OK                    Success.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamOpen(IMG_HANDLE     *phStream,
+             const IMG_CHAR *szStreamName);
+
+
+/*************************************************************************/ /*!
+ @Function      TLStreamReset
+ @Description   Resets read and write pointers and pending flag.
+ @Output        phStream Pointer to stream's handle
+*/ /**************************************************************************/
+void TLStreamReset(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamSetNotifStream
+ @Description   Registers a "notification stream" which will be used to
+                publish information about state change of the "hStream"
+                stream. Notification can inform about events such as stream
+                open/close, etc.
+ @Input         hStream         Handle to stream to update.
+ @Input         hNotifStream    Handle to the stream which will be used for
+                                publishing notifications.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  If either of the parameters is
+                                             NULL
+ @Return        PVRSRV_OK                    Success.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamSetNotifStream(IMG_HANDLE hStream, IMG_HANDLE hNotifStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamReconfigure
+ @Description   Request the stream flags controlling buffer behaviour to
+                be updated.
+                In the case where TL_OPMODE_BLOCK is to be used,
+                TLStreamCreate should be called without that flag and this
+                function used to change the stream mode once a consumer process
+                has been started. This avoids a deadlock scenario where the
+                TLStreaWrite/TLStreamReserve call will hold the Bridge Lock
+                while blocking if the TL buffer is full.
+                The TL_OPMODE_BLOCK should never drop the Bridge Lock
+                as this leads to another deadlock scenario where the caller to
+                TLStreamWrite/TLStreamReserve has already acquired another lock
+                (e.g. gHandleLock) which is not dropped. This then leads to that
+                thread acquiring locks out of order.
+ @Input         hStream         Handle to stream to update.
+ @Input         ui32StreamFlags Flags that configure buffer behaviour. See above.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handle or inconsistent
+                                             stream flags.
+ @Return        PVRSRV_ERROR_NOT_READY       Stream is currently being written to
+                                             try again later.
+ @Return        eError                       Internal services call returned
+                                             eError error number.
+ @Return        PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamReconfigure(IMG_HANDLE hStream,
+                    IMG_UINT32 ui32StreamFlags);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamClose
+ @Description   Detach from the stream associated with the given handle. If
+                  the current handle is the last one accessing the stream
+				  (i.e. the number of TLStreamCreate+TLStreamOpen calls matches
+				  the number of TLStreamClose calls) then the stream is also
+				  deleted.
+				On return the handle is no longer valid.
+ @Input         hStream     Handle to stream that will be closed.
+ @Return        None.
+*/ /**************************************************************************/
+void
+TLStreamClose(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamReserve
+ @Description   Reserve space in stream buffer. When successful every
+                  TLStreamReserve call must be followed by a matching
+                  TLStreamCommit call. While a TLStreamCommit call is pending
+                  for a stream, subsequent TLStreamReserve calls for this
+                  stream will fail.
+ @Input         hStream         Stream handle.
+ @Output        ppui8Data       Pointer to a pointer to a location in the
+                                  buffer. The caller can then use this address
+                                  in writing data into the stream.
+ @Input         ui32Size        Number of bytes to reserve in buffer.
+ @Return        PVRSRV_INVALID_PARAMS       NULL stream handler.
+ @Return        PVRSRV_ERROR_NOT_READY      There are data previously reserved
+                                              that are pending to be committed.
+ @Return        PVRSRV_ERROR_STREAM_MISUSE  Misusing the stream by trying to
+                                              reserve more space than the
+                                              buffer size.
+ @Return        PVRSRV_ERROR_STREAM_FULL    The reserve size requested
+                                            is larger than the free
+                                            space.
+ @Return         PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED  The reserve size
+                                                            requested is larger
+                                                            than max TL packet size
+ @Return        PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE Permanent stream buffer
+                                                     does not have enough space
+                                                     for the reserve.
+ @Return        PVRSRV_OK                   Success, output arguments valid.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamReserve(IMG_HANDLE hStream,
+                IMG_UINT8  **ppui8Data,
+                IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamReserve2
+ @Description   Reserve space in stream buffer. When successful every
+                  TLStreamReserve call must be followed by a matching
+                  TLStreamCommit call. While a TLStreamCommit call is pending
+                  for a stream, subsequent TLStreamReserve calls for this
+                  stream will fail.
+ @Input         hStream         Stream handle.
+ @Output        ppui8Data       Pointer to a pointer to a location in the
+                                  buffer. The caller can then use this address
+                                  in writing data into the stream.
+ @Input         ui32Size        Ideal number of bytes to reserve in buffer.
+ @Input         ui32SizeMin     Minimum number of bytes to reserve in buffer.
+ @Input         pui32Available  Optional, but when present and the
+                                  RESERVE_TOO_BIG error is returned, a size
+                                  suggestion is returned in this argument which
+                                  the caller can attempt to reserve again for a
+                                  successful allocation.
+ @Return        PVRSRV_INVALID_PARAMS        NULL stream handler.
+ @Return        PVRSRV_ERROR_NOT_READY       There are data previously reserved
+                                             that are pending to be committed.
+ @Return        PVRSRV_ERROR_STREAM_MISUSE   Misusing the stream by trying to
+                                             reserve more space than the
+                                             buffer size.
+ @Return        PVRSRV_ERROR_STREAM_FULL     The reserve size requested
+                                             is larger than the free
+                                             space.
+                                             Check the pui32Available
+                                             value for the correct
+                                             reserve size to use.
+ @Return         PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED   The reserve size
+                                                             requested is larger
+                                                             than max TL packet size
+ @Return        PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE Permanent stream buffer
+                                                     does not have enough space
+                                                     for the reserve.
+ @Return        PVRSRV_OK                   Success, output arguments valid.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamReserve2(IMG_HANDLE hStream,
+                IMG_UINT8  **ppui8Data,
+                IMG_UINT32 ui32Size,
+                IMG_UINT32 ui32SizeMin,
+                IMG_UINT32* pui32Available);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamReserveReturnFlags
+ @Description   Reserve space in stream buffer. When successful every
+                  TLStreamReserve call must be followed by a matching
+                  TLStreamCommit call. While a TLStreamCommit call is pending
+                  for a stream, subsequent TLStreamReserve calls for this
+                  stream will fail.
+ @Input         hStream         Stream handle.
+ @Output        ppui8Data       Pointer to a pointer to a location in the
+                                  buffer. The caller can then use this address
+                                  in writing data into the stream.
+ @Input         ui32Size        Ideal number of bytes to reserve in buffer.
+ @Output        pui32Flags      Output parameter to return flags generated within
+                                the reserve function.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamReserveReturnFlags(IMG_HANDLE hStream,
+        IMG_UINT8  **ppui8Data,
+        IMG_UINT32 ui32Size,
+		IMG_UINT32* pui32Flags);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamGetUT
+ @Description   Returns the current stream utilisation in bytes
+ @Input         hStream     Stream handle.
+ @Return        IMG_UINT32  Stream utilisation
+*/ /**************************************************************************/
+IMG_UINT32 TLStreamGetUT(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamCommit
+ @Description   Notify TL that data have been written in the stream buffer.
+                  Should always follow and match TLStreamReserve call.
+ @Input         hStream         Stream handle.
+ @Input         ui32Size        Number of bytes that have been added to the
+                                  stream.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handle.
+ @Return        PVRSRV_ERROR_STREAM_MISUSE   Commit results in more data
+                                             committed than the buffer size,
+                                             the stream is misused.
+ @Return        eError                       Commit was successful but
+                                             internal services call returned
+                                             eError error number.
+ @Return        PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamCommit(IMG_HANDLE hStream,
+               IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamWrite
+ @Description   Combined Reserve/Commit call. This function Reserves space in
+                  the specified stream buffer, copies ui32Size bytes of data
+                  from the array pui8Src points to and Commits in an "atomic"
+                  style operation.
+ @Input         hStream         Stream handle.
+ @Input         pui8Src         Source to read data from.
+ @Input         ui32Size        Number of bytes to copy and commit.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handler.
+ @Return        eError                       Error codes returned by either
+                                               Reserve or Commit.
+ @Return        PVRSRV_OK
+ */ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamWrite(IMG_HANDLE hStream,
+              IMG_UINT8  *pui8Src,
+              IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamWriteRetFlags
+ @Description   Combined Reserve/Commit call. This function Reserves space in
+                  the specified stream buffer, copies ui32Size bytes of data
+                  from the array pui8Src points to and Commits in an "atomic"
+                  style operation. Also accepts a pointer to a bit flag value
+                  for returning write status flags.
+ @Input         hStream         Stream handle.
+ @Input         pui8Src         Source to read data from.
+ @Input         ui32Size        Number of bytes to copy and commit.
+ @Output        pui32Flags      Output parameter for write status info
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handler.
+ @Return        eError                       Error codes returned by either
+                                               Reserve or Commit.
+ @Return        PVRSRV_OK
+ */ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamWriteRetFlags(IMG_HANDLE hStream,
+                      IMG_UINT8 *pui8Src,
+					  IMG_UINT32 ui32Size,
+					  IMG_UINT32 *pui32Flags);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamSync
+ @Description   Signal the consumer to start acquiring data from the stream
+                buffer. Called by producers that use the flag
+                TL_FLAG_NO_SIGNAL_ON_COMMIT to manually control when
+                consumers starting reading the stream.
+                Used when multiple small writes need to be batched.
+ @Input         hStream         Stream handle.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handle.
+ @Return        eError                       Error codes returned by either
+                                             Reserve or Commit.
+ @Return        PVRSRV_OK
+ */ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamSync(IMG_HANDLE hStream);
+
+
+/*************************************************************************/ /*!
+ @Function      TLStreamMarkEOS
+ @Description   Insert a EOS marker packet in the given stream.
+ @Input         hStream         Stream handle.
+ @Input         bRemoveOld      if TRUE, remove old stream record file before
+                                splitting to new file.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS NULL stream handler.
+ @Return        eError                       Error codes returned by either
+                                             Reserve or Commit.
+ @Return        PVRSRV_OK                    Success.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamMarkEOS(IMG_HANDLE hStream, IMG_BOOL bRemoveOld);
+
+/*************************************************************************/ /*!
+@Function       TLStreamMarkStreamOpen
+@Description    Puts *open* stream packet into hStream's notification stream,
+                if set, error otherwise."
+@Input          hStream Stream handle.
+@Return         PVRSRV_OK on success and error code on failure
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamMarkStreamOpen(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+@Function       TLStreamMarkStreamClose
+@Description    Puts *close* stream packet into hStream's notification stream,
+                if set, error otherwise."
+@Input          hStream Stream handle.
+@Return         PVRSRV_OK on success and error code on failure
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamMarkStreamClose(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamInfo
+ @Description   Run time information about buffer elemental sizes.
+                It sets psInfo members accordingly. Users can use those values
+                to calculate the parameters they use in TLStreamCreate and
+                TLStreamReserve.
+ @Output        psInfo          pointer to stream info structure.
+ @Return        None.
+*/ /**************************************************************************/
+void
+TLStreamInfo(IMG_HANDLE hStream, PTL_STREAM_INFO psInfo);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamIsOpenForReading
+ @Description   Query if a stream has any readers connected.
+ @Input         hStream         Stream handle.
+ @Return        IMG_BOOL        True if at least one reader is connected,
+                                false otherwise
+*/ /**************************************************************************/
+IMG_BOOL
+TLStreamIsOpenForReading(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamOutOfData
+ @Description   Query if the stream is empty (no data waiting to be read).
+ @Input         hStream         Stream handle.
+ @Return        IMG_BOOL        True if read==write, no data waiting,
+                                false otherwise
+*/ /**************************************************************************/
+IMG_BOOL TLStreamOutOfData(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamResetProducerByteCount
+ @Description   Reset the producer byte counter on the specified stream.
+ @Input         hStream         Stream handle.
+ @Input         IMG_UINT32      Value to reset counter to, often 0.
+ @Return        PVRSRV_OK                   Success.
+ @Return        PVRSRV_ERROR_STREAM_MISUSE  Success but the read and write
+                                            positions did not match,
+                                            stream not empty.
+*/ /**************************************************************************/
+
+PVRSRV_ERROR
+TLStreamResetProducerByteCount(IMG_HANDLE hStream, IMG_UINT32 ui32Value);
+
+#endif /* TLSTREAM_H */
+/*****************************************************************************
+ End of file (tlstream.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/trace_events.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/trace_events.c
new file mode 100644
index 0000000..1f9cca9
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/trace_events.c
@@ -0,0 +1,242 @@
+/*************************************************************************/ /*!
+@Title          Linux trace event helper functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#include <linux/sched.h>
+
+#include "img_types.h"
+#include "trace_events.h"
+#include "rogue_trace_events.h"
+#include "sync_checkpoint_external.h"
+
+static bool fence_update_event_enabled, fence_check_event_enabled;
+
+bool trace_rogue_are_fence_updates_traced(void)
+{
+	return fence_update_event_enabled;
+}
+
+bool trace_rogue_are_fence_checks_traced(void)
+{
+	return fence_check_event_enabled;
+}
+
+/*
+ * Call backs referenced from rogue_trace_events.h. Note that these are not
+ * thread-safe, however, since running trace code when tracing is not enabled is
+ * simply a no-op, there is no harm in it.
+ */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int trace_fence_update_enabled_callback(void)
+#else
+void trace_fence_update_enabled_callback(void)
+#endif
+{
+	fence_update_event_enabled = true;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+	return 0;
+#endif
+}
+
+void trace_fence_update_disabled_callback(void)
+{
+	fence_update_event_enabled = false;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int trace_fence_check_enabled_callback(void)
+#else
+void trace_fence_check_enabled_callback(void)
+#endif
+{
+	fence_check_event_enabled = true;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+	return 0;
+#endif
+}
+
+void trace_fence_check_disabled_callback(void)
+{
+	fence_check_event_enabled = false;
+}
+
+#if defined(SUPPORT_RGX)
+/* This is a helper that calls trace_rogue_fence_update for each fence in an
+ * array.
+ */
+void trace_rogue_fence_updates(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext,
+							   IMG_UINT32 ui32Offset,
+							   IMG_UINT uCount,
+							   PRGXFWIF_UFO_ADDR *pauiAddresses,
+							   IMG_UINT32 *paui32Values)
+{
+	IMG_UINT i;
+	for (i = 0; i < uCount; i++)
+	{
+		trace_rogue_fence_update(current->comm, cmd, dm, ui32FWContext, ui32Offset,
+								 pauiAddresses[i].ui32Addr, PVRSRV_SYNC_CHECKPOINT_SIGNALLED);
+	}
+}
+
+void trace_rogue_fence_checks(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext,
+							  IMG_UINT32 ui32Offset,
+							  IMG_UINT uCount,
+							  PRGXFWIF_UFO_ADDR *pauiAddresses,
+							  IMG_UINT32 *paui32Values)
+{
+	IMG_UINT i;
+	for (i = 0; i < uCount; i++)
+	{
+		trace_rogue_fence_check(current->comm, cmd, dm, ui32FWContext, ui32Offset,
+							  pauiAddresses[i].ui32Addr, PVRSRV_SYNC_CHECKPOINT_SIGNALLED);
+	}
+}
+
+void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp,
+							 IMG_UINT32 ui32FWCtx,
+							 IMG_UINT32 ui32ExtJobRef,
+							 IMG_UINT32 ui32IntJobRef,
+							 IMG_UINT32 ui32UFOCount,
+							 const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+	IMG_UINT i;
+	for (i = 0; i < ui32UFOCount; i++)
+	{
+		trace_rogue_ufo_update(ui64OSTimestamp, ui32FWCtx,
+				ui32IntJobRef,
+				ui32ExtJobRef,
+				ui32IntJobRef,
+				puData->sUpdate.ui32FWAddr,
+				puData->sUpdate.ui32OldValue,
+				puData->sUpdate.ui32NewValue);
+		puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) (((IMG_BYTE *) puData)
+				+ sizeof(puData->sUpdate));
+	}
+}
+
+void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp,
+									IMG_UINT32 ui32FWCtx,
+									IMG_UINT32 ui32ExtJobRef,
+									IMG_UINT32 ui32IntJobRef,
+									IMG_BOOL bPrEvent,
+									IMG_UINT32 ui32UFOCount,
+									const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+	IMG_UINT i;
+	for (i = 0; i < ui32UFOCount; i++)
+	{
+		if (bPrEvent)
+		{
+			trace_rogue_ufo_pr_check_success(ui64OSTimestamp, ui32FWCtx,
+					ui32IntJobRef, ui32ExtJobRef, ui32IntJobRef,
+					puData->sCheckSuccess.ui32FWAddr,
+					puData->sCheckSuccess.ui32Value);
+		}
+		else
+		{
+			trace_rogue_ufo_check_success(ui64OSTimestamp, ui32FWCtx,
+					ui32IntJobRef, ui32ExtJobRef, ui32IntJobRef,
+					puData->sCheckSuccess.ui32FWAddr,
+					puData->sCheckSuccess.ui32Value);
+		}
+		puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) (((IMG_BYTE *) puData)
+				+ sizeof(puData->sCheckSuccess));
+	}
+}
+
+void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp,
+								 IMG_UINT32 ui32FWCtx,
+								 IMG_UINT32 ui32ExtJobRef,
+								 IMG_UINT32 ui32IntJobRef,
+								 IMG_BOOL bPrEvent,
+								 IMG_UINT32 ui32UFOCount,
+								 const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+	IMG_UINT i;
+	for (i = 0; i < ui32UFOCount; i++)
+	{
+		if (bPrEvent)
+		{
+			trace_rogue_ufo_pr_check_fail(ui64OSTimestamp, ui32FWCtx,
+					ui32IntJobRef, ui32ExtJobRef, ui32IntJobRef,
+					puData->sCheckFail.ui32FWAddr,
+					puData->sCheckFail.ui32Value,
+					puData->sCheckFail.ui32Required);
+		}
+		else
+		{
+			trace_rogue_ufo_check_fail(ui64OSTimestamp, ui32FWCtx,
+					ui32IntJobRef, ui32ExtJobRef, ui32IntJobRef,
+					puData->sCheckFail.ui32FWAddr,
+					puData->sCheckFail.ui32Value,
+					puData->sCheckFail.ui32Required);
+		}
+		puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) (((IMG_BYTE *) puData)
+				+ sizeof(puData->sCheckFail));
+	}
+}
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+
+int PVRGpuTraceEnableUfoCallbackWrapper(void)
+{
+
+#if defined(SUPPORT_RGX)
+	PVRGpuTraceEnableUfoCallback();
+#endif
+
+	return 0;
+}
+
+int PVRGpuTraceEnableFirmwareActivityCallbackWrapper(void)
+{
+
+#if defined(SUPPORT_RGX)
+	PVRGpuTraceEnableFirmwareActivityCallback();
+#endif
+
+	return 0;
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/trace_events.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/trace_events.h
new file mode 100644
index 0000000..4082b45
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/trace_events.h
@@ -0,0 +1,177 @@
+/*************************************************************************/ /*!
+@Title          Linux trace events and event helper functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(TRACE_EVENTS_H)
+#define TRACE_EVENTS_H
+
+#include "rgx_fwif_km.h"
+#include "rgx_hwperf.h"
+
+/* We need to make these functions do nothing if CONFIG_EVENT_TRACING isn't
+ * enabled, just like the actual trace event functions that the kernel
+ * defines for us.
+ */
+#ifdef CONFIG_EVENT_TRACING
+bool trace_rogue_are_fence_checks_traced(void);
+
+bool trace_rogue_are_fence_updates_traced(void);
+
+void trace_job_enqueue(IMG_UINT32 ui32FWContext,
+                       IMG_UINT32 ui32ExtJobRef,
+                       IMG_UINT32 ui32IntJobRef,
+                       const char *pszKickType);
+
+#if defined(SUPPORT_RGX)
+void trace_rogue_fence_updates(const char *cmd, const char *dm,
+							   IMG_UINT32 ui32FWContext,
+							   IMG_UINT32 ui32Offset,
+							   IMG_UINT uCount,
+							   PRGXFWIF_UFO_ADDR *pauiAddresses,
+							   IMG_UINT32 *paui32Values);
+
+void trace_rogue_fence_checks(const char *cmd, const char *dm,
+							  IMG_UINT32 ui32FWContext,
+							  IMG_UINT32 ui32Offset,
+							  IMG_UINT uCount,
+							  PRGXFWIF_UFO_ADDR *pauiAddresses,
+							  IMG_UINT32 *paui32Values);
+
+void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp,
+							 IMG_UINT32 ui32FWCtx,
+							 IMG_UINT32 ui32ExtJobRef,
+							 IMG_UINT32 ui32IntJobRef,
+							 IMG_UINT32 ui32UFOCount,
+							 const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
+#endif
+
+void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp,
+									IMG_UINT32 ui32FWCtx,
+									IMG_UINT32 ui32ExtJobRef,
+									IMG_UINT32 ui32IntJobRef,
+									IMG_BOOL bPrEvent,
+									IMG_UINT32 ui32UFOCount,
+									const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
+
+void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp,
+								 IMG_UINT32 ui32FWCtx,
+								 IMG_UINT32 ui32ExtJobRef,
+								 IMG_UINT32 ui32IntJobRef,
+								 IMG_BOOL bPrEvent,
+								 IMG_UINT32 ui32UFOCount,
+								 const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
+
+#else  /* CONFIG_TRACE_EVENTS */
+static inline
+bool trace_rogue_are_fence_checks_traced(void)
+{
+	return false;
+}
+
+static inline
+bool trace_rogue_are_fence_updates_traced(void)
+{
+	return false;
+}
+
+static inline
+void trace_job_enqueue(IMG_UINT32 ui32FWContext,
+                       IMG_UINT32 ui32ExtJobRef,
+                       IMG_UINT32 ui32IntJobRef,
+                       const char *pszKickType)
+{
+}
+
+#if defined(SUPPORT_RGX)
+static inline
+void trace_rogue_fence_updates(const char *cmd, const char *dm,
+							   IMG_UINT32 ui32FWContext,
+							   IMG_UINT32 ui32Offset,
+							   IMG_UINT uCount,
+							   PRGXFWIF_UFO_ADDR *pauiAddresses,
+							   IMG_UINT32 *paui32Values)
+{
+}
+
+static inline
+void trace_rogue_fence_checks(const char *cmd, const char *dm,
+							  IMG_UINT32 ui32FWContext,
+							  IMG_UINT32 ui32Offset,
+							  IMG_UINT uCount,
+							  PRGXFWIF_UFO_ADDR *pauiAddresses,
+							  IMG_UINT32 *paui32Values)
+{
+}
+
+static inline
+void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp,
+							 IMG_UINT32 ui32FWCtx,
+							 IMG_UINT32 ui32ExtJobRef,
+							 IMG_UINT32 ui32IntJobRef,
+							 IMG_UINT32 ui32UFOCount,
+							 const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+}
+#endif
+
+static inline
+void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp,
+									IMG_UINT32 ui32FWCtx,
+									IMG_UINT32 ui32ExtJobRef,
+									IMG_UINT32 ui32IntJobRef,
+									IMG_BOOL bPrEvent,
+									IMG_UINT32 ui32UFOCount,
+									const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+}
+
+static inline
+void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp,
+								 IMG_UINT32 ui32FWCtx,
+								 IMG_UINT32 ui32ExtJobRef,
+								 IMG_UINT32 ui32IntJobRef,
+								 IMG_BOOL bPrEvent,
+								 IMG_UINT32 ui32UFOCount,
+								 const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+}
+#endif /* CONFIG_TRACE_EVENTS */
+
+#endif /* TRACE_EVENTS_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/uniq_key_splay_tree.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/uniq_key_splay_tree.c
new file mode 100644
index 0000000..cfb10e1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/uniq_key_splay_tree.c
@@ -0,0 +1,244 @@
+/*************************************************************************/ /*!
+@File
+@Title          Provides splay-trees.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implementation of splay-trees.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "allocmem.h" /* for OSMemAlloc / OSMemFree */
+#include "osfunc.h" /* for OSMemFree */
+#include "pvr_debug.h"
+#include "uniq_key_splay_tree.h"
+
+/**
+ * This function performs a simple top down splay
+ *
+ * @param ui32Flags the flags that must splayed to the root (if possible).
+ * @param psTree The tree to splay.
+ * @return the resulting tree after the splay operation.
+ */
+IMG_INTERNAL
+IMG_PSPLAY_TREE PVRSRVSplay (IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree)
+{
+	IMG_SPLAY_TREE sTmp1;
+	IMG_PSPLAY_TREE psLeft;
+	IMG_PSPLAY_TREE psRight;
+	IMG_PSPLAY_TREE psTmp2;
+
+	if (psTree == NULL)
+	{
+		return NULL;
+	}
+
+	sTmp1.psLeft = NULL;
+	sTmp1.psRight = NULL;
+
+	psLeft = &sTmp1;
+	psRight = &sTmp1;
+
+	for (;;)
+	{
+		if (ui32Flags < psTree->ui32Flags)
+		{
+			if (psTree->psLeft == NULL)
+			{
+				break;
+			}
+
+			if (ui32Flags < psTree->psLeft->ui32Flags)
+			{
+				/* if we get to this point, we need to rotate right the tree */
+				psTmp2 = psTree->psLeft;
+				psTree->psLeft = psTmp2->psRight;
+				psTmp2->psRight = psTree;
+				psTree = psTmp2;
+				if (psTree->psLeft == NULL)
+				{
+					break;
+				}
+			}
+
+			/* if we get to this point, we need to link right */
+			psRight->psLeft = psTree;
+			psRight = psTree;
+			psTree = psTree->psLeft;
+		}
+		else
+		{
+			if (ui32Flags > psTree->ui32Flags)
+			{
+				if (psTree->psRight == NULL)
+				{
+					break;
+				}
+
+				if (ui32Flags > psTree->psRight->ui32Flags)
+				{
+					/* if we get to this point, we need to rotate left the tree */
+					psTmp2 = psTree->psRight;
+					psTree->psRight = psTmp2->psLeft;
+					psTmp2->psLeft = psTree;
+					psTree = psTmp2;
+					if (psTree->psRight == NULL)
+					{
+						break;
+					}
+				}
+
+				/* if we get to this point, we need to link left */
+				psLeft->psRight = psTree;
+				psLeft = psTree;
+				psTree = psTree->psRight;
+			}
+			else
+			{
+				break;
+			}
+		}
+	}
+
+	/* at this point re-assemble the tree */
+	psLeft->psRight = psTree->psLeft;
+	psRight->psLeft = psTree->psRight;
+	psTree->psLeft = sTmp1.psRight;
+	psTree->psRight = sTmp1.psLeft;
+	return psTree;
+}
+
+
+/**
+ * This function inserts a node into the Tree (unless it is already present, in
+ * which case it is equivalent to performing only a splay operation
+ *
+ * @param ui32Flags the key of the new node
+ * @param psTree The tree into which one wants to add a new node
+ * @return The resulting with the node in it
+ */
+IMG_INTERNAL
+IMG_PSPLAY_TREE PVRSRVInsert(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree)
+{
+	IMG_PSPLAY_TREE psNew;
+
+	if (psTree != NULL)
+	{
+		psTree = PVRSRVSplay(ui32Flags, psTree);
+		if (psTree->ui32Flags == ui32Flags)
+		{
+			return psTree;
+		}
+	}
+
+	psNew = (IMG_PSPLAY_TREE) OSAllocMem(sizeof(IMG_SPLAY_TREE));
+	if (psNew == NULL)
+	{
+		PVR_DPF ((PVR_DBG_ERROR, "Error: failed to allocate memory to add a node to the splay tree."));
+		return NULL;
+	}
+
+	psNew->ui32Flags = ui32Flags;
+	OSCachedMemSet(&(psNew->buckets[0]), 0, sizeof(psNew->buckets));
+
+#if defined(PVR_CTZLL)
+	psNew->bHasEltsMapping = ~(((IMG_ELTS_MAPPINGS) 1 << (sizeof(psNew->buckets) / (sizeof(psNew->buckets[0])))) - 1);
+#endif
+
+	if (psTree == NULL)
+	{
+		psNew->psLeft  = NULL;
+		psNew->psRight = NULL;
+		return psNew;
+	}
+
+	if (ui32Flags < psTree->ui32Flags)
+	{
+		psNew->psLeft  = psTree->psLeft;
+		psNew->psRight = psTree;
+		psTree->psLeft = NULL;
+	}
+	else
+	{
+		psNew->psRight  = psTree->psRight;
+		psNew->psLeft   = psTree;
+		psTree->psRight = NULL;
+	}
+
+	return psNew;
+}
+
+
+/**
+ * Deletes a node from the tree (unless it is not there, in which case it is
+ * equivalent to a splay operation)
+ *
+ * @param ui32Flags the value of the node to remove
+ * @param psTree the tree into which the node must be removed
+ * @return the resulting tree
+ */
+IMG_INTERNAL
+IMG_PSPLAY_TREE PVRSRVDelete(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree)
+{
+	IMG_PSPLAY_TREE psTmp;
+	if (psTree == NULL)
+	{
+		return NULL;
+	}
+
+	psTree = PVRSRVSplay(ui32Flags, psTree);
+	if (ui32Flags == psTree->ui32Flags)
+	{
+		/* The value was present in the tree */
+		if (psTree->psLeft == NULL)
+		{
+			psTmp = psTree->psRight;
+		}
+		else
+		{
+			psTmp = PVRSRVSplay(ui32Flags, psTree->psLeft);
+			psTmp->psRight = psTree->psRight;
+		}
+		OSFreeMem(psTree);
+		return psTmp;
+	}
+
+	/* the value was not present in the tree, so just return it as is (after the
+	 * splay) */
+	return psTree;
+}
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/uniq_key_splay_tree.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/uniq_key_splay_tree.h
new file mode 100644
index 0000000..c1113fd
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.11_5516664/uniq_key_splay_tree.h
@@ -0,0 +1,86 @@
+/*************************************************************************/ /*!
+@File
+@Title          Splay trees interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides debug functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef UNIQ_KEY_SPLAY_TREE_H_
+#define UNIQ_KEY_SPLAY_TREE_H_
+
+#include "img_types.h"
+#include "pvr_intrinsics.h"
+
+#if defined(PVR_CTZLL)
+  /* map the is_bucket_n_free to an int.
+   * This way, the driver can find the first non empty without loop
+   */
+  typedef IMG_UINT64 IMG_ELTS_MAPPINGS;
+#endif
+
+/* head of list of free boundary tags for indexed by pvr_log2 of the
+   boundary tag size */
+#define FREE_TABLE_LIMIT 40
+
+struct _BT_;
+
+typedef struct img_splay_tree
+{
+	/* left child/subtree */
+    struct img_splay_tree * psLeft;
+
+	/* right child/subtree */
+    struct img_splay_tree * psRight;
+
+    /* Flags to match on this span, used as the key. */
+    IMG_UINT32 ui32Flags;
+#if defined(PVR_CTZLL)
+	/* each bit of this int is a boolean telling if the corresponding
+	   bucket is empty or not */
+    IMG_ELTS_MAPPINGS bHasEltsMapping;
+#endif
+	struct _BT_ * buckets[FREE_TABLE_LIMIT];
+} IMG_SPLAY_TREE, *IMG_PSPLAY_TREE;
+
+IMG_PSPLAY_TREE PVRSRVSplay (IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree);
+IMG_PSPLAY_TREE PVRSRVInsert(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree);
+IMG_PSPLAY_TREE PVRSRVDelete(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree);
+
+
+#endif /* !UNIQ_KEY_SPLAY_TREE_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/GPL-COPYING b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/GPL-COPYING
new file mode 100644
index 0000000..83d1261
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/GPL-COPYING
@@ -0,0 +1,344 @@
+-------------------------------------------------------------------------
+
+		    GNU GENERAL PUBLIC LICENSE
+		       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+    59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+		    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+			    NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+
+	Appendix: How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) 19yy  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) 19yy name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Library General
+Public License instead of this License.
+
+-------------------------------------------------------------------------
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/INSTALL b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/INSTALL
new file mode 100644
index 0000000..035a58b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/INSTALL
@@ -0,0 +1,58 @@
+Rogue Embedded Systems DDK for the Linux kernel.
+Copyright (C) Imagination Technologies Ltd. All rights reserved.
+======================================================================
+
+This file covers how to build and install the Imagination Technologies
+Rogue DDK for the Linux kernel.  For full details, see the relevant platform
+guide.
+
+
+Build System Environment Variables
+-------------------------------------------
+
+The Rogue DDK Build scripts depend on a number of environment variables
+being setup before compilation or installation of DDK software can
+commence:
+
+$DISCIMAGE
+The DDK Build scripts install files to the location specified by the
+DISCIMAGE environment variable.  To do so, they need to know where the 
+target system image resides: 
+$ export DISCIMAGE=/path/to/filesystem
+If you are building on the target system, you can set this to '/'.
+
+$KERNELDIR
+When building the Rogue DDK kernel module, the build needs access
+to the headers of the Linux kernel.
+If you are building on the target machine, you can set this as follows:
+$ export KERNELDIR=/usr/src/linux-headers-`uname -r`
+
+$CROSS_COMPILE
+If you intend on targeting a platform that is different from your build
+machine (e.g.,if you are compiling on an x86 but targeting ARM) you need
+to set the CROSS_COMPILE variable so that the build system uses the correct
+compiler. E.g., 
+$ export CROSS_COMPILE=arm-linux-gnueabihf-
+
+
+Build and Install Instructions
+-------------------------------------------
+
+The Rogue DDK configures different target builds within directories under
+build/linux/.
+
+The most interesting build targets are:
+
+	build   Makes everything
+	clobber Removes all binaries for all builds as well.
+	install Runs the install script generated by the build.
+
+The following variables may be set on the command line to influence a build.
+
+	BUILD   The type of build being performed.
+                Alternatives are release, timing or debug.
+
+To build for, change to the appropriate target directory, e.g.:
+$ cd eurasiacon/build/linux/<platform>
+$ make BUILD=debug
+$ sudo make install
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/MIT-COPYING b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/MIT-COPYING
new file mode 100644
index 0000000..0cbd14e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/MIT-COPYING
@@ -0,0 +1,41 @@
+
+This software is Copyright (C) Imagination Technologies Ltd.
+
+You may use, distribute and copy this software under the terms of the MIT
+license displayed below.
+
+-----------------------------------------------------------------------------
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, this Software may be used under the terms of the GNU General
+Public License Version 2 ("GPL") in which case the provisions of GPL are
+applicable instead of those above.
+
+If you wish to allow use of your version of this Software only under the terms
+of GPL, and not to allow others to use your version of this file under the
+terms of the MIT license, indicate your decision by deleting from each file
+the provisions above and replace them with the notice and other provisions
+required by GPL as set out in the file called "GPL-COPYING" included in this
+distribution. If you do not delete the provisions above, a recipient may use
+your version of this file under the terms of either the MIT license or GPL.
+
+-----------------------------------------------------------------------------
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+-----------------------------------------------------------------------------
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/Makefile b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/Makefile
new file mode 100644
index 0000000..0d7fce0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/Makefile
@@ -0,0 +1,228 @@
+OPTIM = -Os
+
+ccflags-$(CONFIG_MTK_TINYSYS_SSPM_SUPPORT) += -DSSPM_DEBUG
+
+ECO := ""
+
+RGX_TOP := $(dir $(lastword $(MAKEFILE_LIST)))
+VER := $(if $(filter eng,$(TARGET_BUILD_VARIANT)),eng,user)
+
+#MTK_DDK_DEBUG := y
+MTK_PLATFORM := mt8167
+
+ifeq ($(MTK_DDK_DEBUG),y)
+	ccflags-y += -include $(RGX_TOP)/include/config_kernel_debug_$(MTK_PLATFORM)$(ECO).h
+else
+	ccflags-y += -include $(RGX_TOP)/include/config_kernel_$(VER)_$(MTK_PLATFORM)$(ECO).h
+endif
+
+ccflags-y += \
+	-D__linux__ $(OPTIM) -g \
+	-fno-strict-aliasing \
+	-W -Wall \
+	-Wno-missing-field-initializers \
+	-Wdeclaration-after-statement \
+	-Wno-format-zero-length \
+	-Wmissing-prototypes \
+	-Wstrict-prototypes \
+	-Wno-unused-parameter \
+	-Wno-sign-compare \
+	-Wno-type-limits \
+	-Wno-error
+
+pvrsrvkm-y += \
+	services/server/env/linux/event.o \
+	services/server/env/linux/km_apphint.o \
+	services/server/env/linux/module_common.o \
+	services/server/env/linux/osmmap_stub.o \
+	services/server/env/linux/osfunc.o \
+	services/server/env/linux/allocmem.o \
+	services/server/env/linux/osconnection_server.o \
+	services/server/env/linux/pdump.o \
+	services/server/env/linux/physmem_osmem_linux.o \
+	services/server/env/linux/pmr_os.o \
+	services/server/env/linux/pvr_debugfs.o \
+	services/server/env/linux/pvr_bridge_k.o \
+	services/server/env/linux/pvr_debug.o \
+	services/server/env/linux/physmem_dmabuf.o \
+	services/server/common/devicemem_heapcfg.o \
+	services/shared/common/devicemem.o \
+	services/shared/common/devicemem_utils.o \
+	services/shared/common/hash.o \
+	services/shared/common/ra.o \
+	services/shared/common/sync.o \
+	services/shared/common/mem_utils.o \
+	services/server/common/devicemem_server.o \
+	services/server/common/handle.o \
+	services/server/common/lists.o \
+	services/server/common/mmu_common.o \
+	services/server/common/connection_server.o \
+	services/server/common/physheap.o \
+	services/server/common/physmem.o \
+	services/server/common/physmem_lma.o \
+	services/server/common/physmem_hostmem.o \
+	services/server/common/physmem_tdsecbuf.o \
+	services/server/common/pmr.o \
+	services/server/common/power.o \
+	services/server/common/process_stats.o \
+	services/server/common/pvr_notifier.o \
+	services/server/common/pvrsrv.o \
+	services/server/common/srvcore.o \
+	services/server/common/sync_checkpoint.o \
+	services/server/common/sync_server.o \
+	services/shared/common/htbuffer.o \
+	services/server/common/htbserver.o \
+	services/server/common/tlintern.o \
+	services/shared/common/tlclient.o \
+	services/server/common/tlserver.o \
+	services/server/common/tlstream.o \
+	services/server/common/cache_km.o \
+	services/shared/common/uniq_key_splay_tree.o \
+	services/server/common/pvrsrv_pool.o \
+	services/server/common/info_page_km.o \
+	services/server/common/devicemem_history_server.o \
+	services/server/env/linux/handle_idr.o \
+	services/server/env/linux/pvr_gputrace.o \
+	services/server/devices/rgx/env/linux/km/rgxfwload.o \
+	services/server/devices/rgx/rgxsignals.o \
+	services/server/devices/rgx/debugmisc_server.o \
+	services/server/devices/rgx/rgxccb.o \
+	services/server/devices/rgx/rgxdebug.o \
+	services/server/devices/rgx/rgxfwimageutils.o \
+	services/server/devices/rgx/rgxfwutils.o \
+	services/server/devices/rgx/rgxinit.o \
+	services/server/devices/rgx/rgxkicksync.o \
+	services/server/devices/rgx/rgxlayer_impl.o \
+	services/server/devices/rgx/rgxbreakpoint.o \
+	services/server/devices/rgx/rgxmem.o \
+	services/server/devices/rgx/rgxmipsmmuinit.o \
+	services/server/devices/rgx/rgxmmuinit.o \
+	services/server/devices/rgx/rgxray.o \
+	services/server/devices/rgx/rgxsrvinit.o \
+	services/server/devices/rgx/rgxsrvinit_script.o \
+	services/server/devices/rgx/rgxregconfig.o \
+	services/server/devices/rgx/rgxtransfer.o \
+	services/server/devices/rgx/rgxutils.o \
+	services/server/devices/rgx/rgxta3d.o \
+	services/server/devices/rgx/rgxtimerquery.o \
+	services/server/devices/rgx/rgxtdmtransfer.o \
+	services/server/devices/rgx/rgxpower.o \
+	services/server/devices/rgx/rgxhwperf.o \
+	services/server/devices/rgx/rgxstartstop.o \
+	services/server/devices/rgx/rgxtimecorr.o \
+	services/server/devices/rgx/rgxcompute.o \
+	services/shared/devices/rgx/rgx_compat_bvnc.o \
+	services/shared/devices/rgx/rgx_hwperf_table.o \
+	services/system/common/vmm_type_stub.o \
+	services/system/common/vmm_pvz_client.o \
+	services/system/common/vmm_pvz_server.o \
+	services/system/common/vz_physheap_common.o \
+	services/system/common/vz_physheap_generic.o \
+	services/system/common/vz_support.o \
+	services/system/common/vz_vmm_pvz.o \
+	services/system/common/vz_vmm_vm.o \
+	services/system/common/env/linux/dma_support.o \
+	services/system/common/env/linux/interrupt_support.o \
+	kernel/drivers/staging/imgtec/pvr_fence.o \
+	kernel/drivers/staging/imgtec/pvr_drm.o \
+	kernel/drivers/staging/imgtec/pvr_buffer_sync.o \
+	generated/mm_bridge/client_mm_direct_bridge.o \
+	generated/mm_bridge/server_mm_bridge.o \
+	generated/cmm_bridge/server_cmm_bridge.o \
+	generated/rgxtq_bridge/server_rgxtq_bridge.o \
+	generated/rgxtq2_bridge/server_rgxtq2_bridge.o \
+	generated/rgxta3d_bridge/server_rgxta3d_bridge.o \
+	generated/srvcore_bridge/server_srvcore_bridge.o \
+	generated/sync_bridge/client_sync_direct_bridge.o \
+	generated/sync_bridge/server_sync_bridge.o \
+	generated/breakpoint_bridge/server_breakpoint_bridge.o \
+	generated/debugmisc_bridge/server_debugmisc_bridge.o \
+	generated/htbuffer_bridge/server_htbuffer_bridge.o \
+	generated/htbuffer_bridge/client_htbuffer_direct_bridge.o \
+	generated/pvrtl_bridge/client_pvrtl_direct_bridge.o \
+	generated/pvrtl_bridge/server_pvrtl_bridge.o \
+	generated/rgxhwperf_bridge/server_rgxhwperf_bridge.o \
+	generated/regconfig_bridge/server_regconfig_bridge.o \
+	generated/timerquery_bridge/server_timerquery_bridge.o \
+	generated/rgxkicksync_bridge/server_rgxkicksync_bridge.o \
+	generated/rgxray_bridge/server_rgxray_bridge.o \
+	generated/cache_bridge/client_cache_direct_bridge.o \
+	generated/cache_bridge/server_cache_bridge.o \
+	generated/dmabuf_bridge/server_dmabuf_bridge.o \
+	generated/rgxcmp_bridge/server_rgxcmp_bridge.o \
+	generated/devicememhistory_bridge/server_devicememhistory_bridge.o \
+	generated/devicememhistory_bridge/client_devicememhistory_direct_bridge.o \
+	generated/rgxsignals_bridge/server_rgxsignals_bridge.o \
+	services/system/rgx_mtk/sysconfig.o \
+	services/system/rgx_mtk/$(MTK_PLATFORM)/mtk_mfgsys.o \
+	kernel/drivers/staging/imgtec/pvr_platform_drv.o
+
+pvrsrvkm-$(CONFIG_ARM) += services/server/env/linux/osfunc_arm.o
+pvrsrvkm-$(CONFIG_ARM64) += services/server/env/linux/osfunc_arm64.o
+pvrsrvkm-$(CONFIG_EVENT_TRACING) += services/server/env/linux/trace_events.o
+
+pvrsrvkm-y += \
+	services/server/devices/rgx/rgxmipsmmuinit.o
+
+ifeq ($(MTK_DDK_DEBUG),y)
+	pvrsrvkm-y += \
+		services/server/common/devicemem_history_server.o \
+		services/server/common/ri_server.o \
+		generated/ri_bridge/client_ri_direct_bridge.o \
+		generated/ri_bridge/server_ri_bridge.o \
+		generated/devicememhistory_bridge/server_devicememhistory_bridge.o \
+		generated/devicememhistory_bridge/client_devicememhistory_direct_bridge.o
+endif
+
+ccflags-y += \
+	-I$(RGX_TOP)/hwdefs \
+	-I$(RGX_TOP)/hwdefs/km \
+	-I$(RGX_TOP)/hwdefs/km/configs \
+	-I$(RGX_TOP)/hwdefs/km/cores \
+	-I$(RGX_TOP)/include \
+	-I$(RGX_TOP)/include/drm \
+	-I$(RGX_TOP)/include/public \
+	-I$(RGX_TOP)/kernel/drivers/staging/imgtec \
+	-I$(RGX_TOP)/generated/breakpoint_bridge \
+	-I$(RGX_TOP)/generated/cache_bridge \
+	-I$(RGX_TOP)/generated/cmm_bridge \
+	-I$(RGX_TOP)/generated/debugmisc_bridge \
+	-I$(RGX_TOP)/generated/dmabuf_bridge \
+	-I$(RGX_TOP)/generated/htbuffer_bridge \
+	-I$(RGX_TOP)/generated/mm_bridge \
+	-I$(RGX_TOP)/generated/pvrtl_bridge \
+	-I$(RGX_TOP)/generated/regconfig_bridge \
+	-I$(RGX_TOP)/generated/rgxcmp_bridge \
+	-I$(RGX_TOP)/generated/rgxhwperf_bridge \
+	-I$(RGX_TOP)/generated/rgxinit_bridge \
+	-I$(RGX_TOP)/generated/rgxkicksync_bridge \
+	-I$(RGX_TOP)/generated/rgxray_bridge \
+	-I$(RGX_TOP)/generated/rgxta3d_bridge \
+	-I$(RGX_TOP)/generated/rgxtq_bridge \
+	-I$(RGX_TOP)/generated/rgxtq2_bridge \
+	-I$(RGX_TOP)/generated/srvcore_bridge \
+	-I$(RGX_TOP)/generated/sync_bridge \
+	-I$(RGX_TOP)/generated/timerquery_bridge \
+	-I$(RGX_TOP)/generated/devicememhistory_bridge \
+	-I$(RGX_TOP)/generated/rgxsignals_bridge \
+	-I$(RGX_TOP)/generated/ri_bridge \
+	-I$(RGX_TOP)/services/include \
+	-I$(RGX_TOP)/services/include/env/linux \
+	-I$(RGX_TOP)/services/include/shared \
+	-I$(RGX_TOP)/services/server/devices/rgx \
+	-I$(RGX_TOP)/services/server/env/linux \
+	-I$(RGX_TOP)/services/server/include \
+	-I$(RGX_TOP)/services/shared/common \
+	-I$(RGX_TOP)/services/shared/devices/rgx \
+	-I$(RGX_TOP)/services/shared/include \
+	-I$(RGX_TOP)/services/srvinit/devices/rgx \
+	-I$(RGX_TOP)/services/srvinit/include \
+	-I$(RGX_TOP)/services/system/include \
+	-I$(RGX_TOP)/services/system/rgx_mtk \
+	-I$(RGX_TOP)/services/system/rgx_mtk/$(MTK_PLATFORM)
+
+ccflags-y += \
+	-I$(srctree)/drivers/misc/mediatek/gpu/ged/include \
+	-I$(srctree)/drivers/misc/mediatek/include/mt-plat
+
+obj-$(CONFIG_RGX_M1_9ED4917962) += pvrsrvkm.o
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/README b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/README
new file mode 100644
index 0000000..8579ae1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/README
@@ -0,0 +1,32 @@
+Rogue Embedded Systems DDK for Linux kernel.
+Copyright (C) Imagination Technologies Ltd. All rights reserved.
+======================================================================
+
+
+About 
+-------------------------------------------
+
+This is the Imagination Technologies Rogue DDK for the Linux kernel.
+
+
+License
+-------------------------------------------
+
+You may use, distribute and copy this software under the terms of the MIT
+license.  Details of this license can be found in the file "MIT-COPYING".
+
+Alternatively, you may use, distribute and copy this software under the terms
+of the GNU General Public License version 2.  The full GNU General Public
+License version 2 can be found in the file "GPL-COPYING".
+
+
+Build and Install Instructions
+-------------------------------------------
+
+For details see the "INSTALL" file and the platform guide.
+
+Contact information:
+-------------------------------------------
+
+Imagination Technologies Ltd. <gpl-support@imgtec.com>
+Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel-tc/apollo.mk b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel-tc/apollo.mk
new file mode 100644
index 0000000..6d34673
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel-tc/apollo.mk
@@ -0,0 +1,4 @@
+apollo-y += \
+ tc_apollo.o \
+ tc_drv.o \
+ tc_odin.o
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel-tc/config_kernel.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel-tc/config_kernel.h
new file mode 100644
index 0000000..ddecaaa
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel-tc/config_kernel.h
@@ -0,0 +1,132 @@
+#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES 8
+#define DISPLAY_CONTROLLER drm_pdp
+#define GPUVIRT_VALIDATION_NUM_OS 8
+#define GPUVIRT_VALIDATION_NUM_REGIONS 2
+#define HWR_DEFAULT_ENABLED
+#define LINUX
+#define LMA
+#define PDUMP_STREAMBUF_MAX_SIZE_MB 16
+#define PVRSRV_APPHINT_ASSERTONHWRTRIGGER IMG_FALSE
+#define PVRSRV_APPHINT_ASSERTOUTOFMEMORY IMG_FALSE
+#define PVRSRV_APPHINT_CACHEOPCONFIG 0
+#define PVRSRV_APPHINT_CHECKMLIST APPHNT_BLDVAR_DEBUG
+#define PVRSRV_APPHINT_CLEANUPTHREADPRIORITY 0
+#define PVRSRV_APPHINT_CLEANUPTHREADWEIGHT 0
+#define PVRSRV_APPHINT_DISABLECLOCKGATING 0
+#define PVRSRV_APPHINT_DISABLEDMOVERLAP 0
+#define PVRSRV_APPHINT_DISABLEFEDLOGGING IMG_FALSE
+#define PVRSRV_APPHINT_DISABLEPDUMPPANIC IMG_FALSE
+#define PVRSRV_APPHINT_DRIVERMODE 0x7FFFFFFF
+#define PVRSRV_APPHINT_DUSTREQUESTINJECT IMG_FALSE
+#define PVRSRV_APPHINT_EMUMAXFREQ 0
+#define PVRSRV_APPHINT_ENABLEAPM RGX_ACTIVEPM_DEFAULT
+#define PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE 0
+#define PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH RGXFWIF_INICFG_CTXSWITCH_DM_ALL
+#define PVRSRV_APPHINT_ENABLEFWPOISONONFREE IMG_FALSE
+#define PVRSRV_APPHINT_ENABLEHTBLOGGROUP 0
+#define PVRSRV_APPHINT_ENABLELOGGROUP 0
+#define PVRSRV_APPHINT_ENABLERDPOWERISLAND RGX_RD_POWER_ISLAND_DEFAULT
+#define PVRSRV_APPHINT_ENABLESIGNATURECHECKS APPHNT_BLDVAR_ENABLESIGNATURECHECKS
+#define PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG IMG_FALSE
+#define PVRSRV_APPHINT_FIRMWARELOGTYPE 0
+#define PVRSRV_APPHINT_FIRMWAREPERF FW_PERF_CONF_NONE
+#define PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN
+#define PVRSRV_APPHINT_FWPOISONONFREEVALUE 0xBD
+#define PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE 0x4000
+#define PVRSRV_APPHINT_GPIOVALIDATIONMODE 0
+#define PVRSRV_APPHINT_HTBOPERATIONMODE HTB_OPMODE_DROPLATEST
+#define PVRSRV_APPHINT_HTBUFFERSIZE 0x1000
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENRL 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES 0
+#define PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER 0
+#define PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB 2048
+#define PVRSRV_APPHINT_HWPERFFWFILTER 0
+#define PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB 128
+#define PVRSRV_APPHINT_HWPERFHOSTFILTER 0
+#define PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT APPHNT_BLDVAR_DBGDUMPLIMIT
+#define PVRSRV_APPHINT_JONESDISABLEMASK 0
+#define PVRSRV_APPHINT_NEWFILTERINGMODE 1
+#define PVRSRV_APPHINT_OSIDREGION0MAX  "0x3FFFFFFF 0x0FFFFFFF 0x17FFFFFF 0x1FFFFFFF 0x27FFFFFF 0x2FFFFFFF 0x37FFFFFF 0x3FFFFFFF"
+#define PVRSRV_APPHINT_OSIDREGION0MIN  "0x00000000 0x04000000 0x10000000 0x18000000 0x20000000 0x28000000 0x30000000 0x38000000"
+#define PVRSRV_APPHINT_OSIDREGION1MAX  "0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF"
+#define PVRSRV_APPHINT_OSIDREGION1MIN  "0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000"
+#define PVRSRV_APPHINT_RGXBVNC ""
+#define PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE RGXFW_SIG_BUFFER_SIZE_MIN
+#define PVRSRV_APPHINT_TIMECORRCLOCK 0
+#define PVRSRV_APPHINT_TRUNCATEMODE 0
+#define PVRSRV_APPHINT_USEMETAT1 RGX_META_T1_OFF
+#define PVRSRV_APPHINT_VDMCONTEXTSWITCHMODE RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX
+#define PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY 0
+#define PVRSRV_APPHINT_WATCHDOGTHREADWEIGHT 0
+#define PVRSRV_APPHINT_ZEROFREELIST IMG_FALSE
+#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO
+#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD 90
+#define PVRSRV_ENABLE_PROCESS_STATS
+#define PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN 256
+#define PVRSRV_MODNAME "pvrsrvkm"
+#define PVRSRV_NEED_PVR_STACKTRACE_NATIVE
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D 16
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM 13
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC 13
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_RTU 15
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA 15
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D 14
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D 14
+#define PVRSRV_VZ_NUM_OSID
+#define PVR_BUILD_DIR "tc_linux"
+#define PVR_DIRTY_BYTES_FLUSH_THRESHOLD 524288
+#define PVR_DRM_NAME "pvr"
+#define PVR_LDM_DRIVER_REGISTRATION_NAME "pvrsrvkm"
+#define PVR_LDM_PLATFORM_PRE_REGISTERED
+#define PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD 256
+#define PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD  16384
+#define PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE
+#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM  2
+#define PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES 20480
+#define PVR_LINUX_PHYSMEM_MAX_POOL_PAGES 10240
+#define PVR_LINUX_TIMERS_USING_WORKQUEUES
+#define PVR_LINUX_USING_WORKQUEUES
+#define RGX_BNC_CONFIG_KM_HEADER "configs/rgxconfig_km_1.V.4.5.h"
+#define RGX_BVNC_CORE_KM_HEADER "cores/rgxcore_km_1.82.4.5.h"
+#define RGX_FW_FILENAME "rgx.fw"
+#define RGX_FW_HEAP_GUEST_OFFSET_FWCCB 0x53080U
+#define RGX_FW_HEAP_GUEST_OFFSET_FWCCBCTL 0x53040U
+#define RGX_FW_HEAP_GUEST_OFFSET_KCCB 0x54000U
+#define RGX_FW_HEAP_GUEST_OFFSET_KCCBCTL 0x53000U
+#define RGX_FW_HEAP_SHIFT 25
+#define SUPPORT_BUFFER_SYNC 1
+#define SUPPORT_DBGDRV_EVENT_OBJECTS
+#define SUPPORT_GPUTRACE_EVENTS
+#define SUPPORT_LINUX_REFCNT_PMR_ON_IMPORT
+#define SUPPORT_LINUX_X86_PAT
+#define SUPPORT_LINUX_X86_WRITECOMBINE
+#define SUPPORT_MMU_PENDING_FAULT_PROTECTION
+#define SUPPORT_MULTIBVNC_RUNTIME_BVNC_ACQUISITION
+#define SUPPORT_PERCONTEXT_FREELIST
+#define SUPPORT_RGX 1
+#define SUPPORT_SERVER_SYNC
+#define SUPPORT_VDM_CONTEXT_STORE_BUFFER_AB
+#define TC_APOLLO_ES2
+#define TC_DISPLAY_MEM_SIZE 383
+#define TC_MEMORY_CONFIG TC_MEMORY_LOCAL
+#define TC_SECURE_MEM_SIZE 128
+#ifdef CONFIG_DRM_POWERVR_ROGUE_DEBUG
+#define DEBUG
+#define DEBUG_BRIDGE_KM
+#define DEBUG_HANDLEALLOC_KM
+#define DEBUG_LINUX_MEMORY_ALLOCATIONS
+#define DEBUG_LINUX_MEM_AREAS
+#define DEBUG_LINUX_MMAP_AREAS
+#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE
+#define PVR_BUILD_TYPE "debug"
+#define PVR_RI_DEBUG
+#define RGXFW_ALIGNCHECKS
+#define SUPPORT_DEVICEMEMHISTORY_BRIDGE
+#define SUPPORT_PAGE_FAULT_DEBUG
+#else
+#define PVR_BUILD_TYPE "release"
+#define RELEASE
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel-tc/config_kernel.mk b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel-tc/config_kernel.mk
new file mode 100644
index 0000000..867cefc
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel-tc/config_kernel.mk
@@ -0,0 +1,24 @@
+override DISPLAY_CONTROLLER := drm_pdp
+override LMA := 1
+override METAG_VERSION_NEEDED := 2.8.1.0.3
+override MIPS_VERSION_NEEDED := 2014.07-1
+override PVRSRV_MODNAME := pvrsrvkm
+override PVR_BUILD_DIR := tc_linux
+override PVR_HANDLE_BACKEND := idr
+override PVR_SYSTEM := rgx_linux_tc
+override RGX_TIMECORR_CLOCK := mono
+override SUPPORT_BUFFER_SYNC := 1
+override SUPPORT_COMPUTE := 1
+override SUPPORT_GPUTRACE_EVENTS := 1
+override SUPPORT_SERVER_SYNC := 1
+override SUPPORT_TLA := 1
+ifeq ($(CONFIG_DRM_POWERVR_ROGUE_DEBUG),y)
+override BUILD := debug
+override PVR_BUILD_TYPE := debug
+override PVR_RI_DEBUG := 1
+override SUPPORT_DEVICEMEMHISTORY_BRIDGE := 1
+override SUPPORT_PAGE_FAULT_DEBUG := 1
+else
+override BUILD := release
+override PVR_BUILD_TYPE := release
+endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel-tc/copy_items.sh b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel-tc/copy_items.sh
new file mode 100644
index 0000000..97bd303
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel-tc/copy_items.sh
@@ -0,0 +1,405 @@
+copyfile generated/breakpoint_bridge/server_breakpoint_bridge.c drivers/gpu/drm/img-rogue/server_breakpoint_bridge.c
+copyfile generated/cache_bridge/client_cache_direct_bridge.c drivers/gpu/drm/img-rogue/client_cache_direct_bridge.c
+copyfile generated/cache_bridge/server_cache_bridge.c drivers/gpu/drm/img-rogue/server_cache_bridge.c
+copyfile generated/cmm_bridge/server_cmm_bridge.c drivers/gpu/drm/img-rogue/server_cmm_bridge.c
+copyfile generated/debugmisc_bridge/server_debugmisc_bridge.c drivers/gpu/drm/img-rogue/server_debugmisc_bridge.c
+copyfile generated/devicememhistory_bridge/client_devicememhistory_direct_bridge.c drivers/gpu/drm/img-rogue/client_devicememhistory_direct_bridge.c
+copyfile generated/devicememhistory_bridge/server_devicememhistory_bridge.c drivers/gpu/drm/img-rogue/server_devicememhistory_bridge.c
+copyfile generated/dmabuf_bridge/server_dmabuf_bridge.c drivers/gpu/drm/img-rogue/server_dmabuf_bridge.c
+copyfile generated/htbuffer_bridge/client_htbuffer_direct_bridge.c drivers/gpu/drm/img-rogue/client_htbuffer_direct_bridge.c
+copyfile generated/htbuffer_bridge/server_htbuffer_bridge.c drivers/gpu/drm/img-rogue/server_htbuffer_bridge.c
+copyfile generated/mm_bridge/client_mm_direct_bridge.c drivers/gpu/drm/img-rogue/client_mm_direct_bridge.c
+copyfile generated/mm_bridge/server_mm_bridge.c drivers/gpu/drm/img-rogue/server_mm_bridge.c
+copyfile generated/pvrtl_bridge/client_pvrtl_direct_bridge.c drivers/gpu/drm/img-rogue/client_pvrtl_direct_bridge.c
+copyfile generated/pvrtl_bridge/server_pvrtl_bridge.c drivers/gpu/drm/img-rogue/server_pvrtl_bridge.c
+copyfile generated/regconfig_bridge/server_regconfig_bridge.c drivers/gpu/drm/img-rogue/server_regconfig_bridge.c
+copyfile generated/rgxcmp_bridge/server_rgxcmp_bridge.c drivers/gpu/drm/img-rogue/server_rgxcmp_bridge.c
+copyfile generated/rgxhwperf_bridge/server_rgxhwperf_bridge.c drivers/gpu/drm/img-rogue/server_rgxhwperf_bridge.c
+copyfile generated/rgxkicksync_bridge/server_rgxkicksync_bridge.c drivers/gpu/drm/img-rogue/server_rgxkicksync_bridge.c
+copyfile generated/rgxray_bridge/server_rgxray_bridge.c drivers/gpu/drm/img-rogue/server_rgxray_bridge.c
+copyfile generated/rgxsignals_bridge/server_rgxsignals_bridge.c drivers/gpu/drm/img-rogue/server_rgxsignals_bridge.c
+copyfile generated/rgxta3d_bridge/server_rgxta3d_bridge.c drivers/gpu/drm/img-rogue/server_rgxta3d_bridge.c
+copyfile generated/rgxtq2_bridge/server_rgxtq2_bridge.c drivers/gpu/drm/img-rogue/server_rgxtq2_bridge.c
+copyfile generated/rgxtq_bridge/server_rgxtq_bridge.c drivers/gpu/drm/img-rogue/server_rgxtq_bridge.c
+copyfile generated/ri_bridge/client_ri_direct_bridge.c drivers/gpu/drm/img-rogue/client_ri_direct_bridge.c
+copyfile generated/ri_bridge/server_ri_bridge.c drivers/gpu/drm/img-rogue/server_ri_bridge.c
+copyfile generated/srvcore_bridge/server_srvcore_bridge.c drivers/gpu/drm/img-rogue/server_srvcore_bridge.c
+copyfile generated/sync_bridge/client_sync_direct_bridge.c drivers/gpu/drm/img-rogue/client_sync_direct_bridge.c
+copyfile generated/sync_bridge/server_sync_bridge.c drivers/gpu/drm/img-rogue/server_sync_bridge.c
+copyfile generated/timerquery_bridge/server_timerquery_bridge.c drivers/gpu/drm/img-rogue/server_timerquery_bridge.c
+copyfile kernel/drivers/staging/imgtec/pvr_buffer_sync.c drivers/gpu/drm/img-rogue/pvr_buffer_sync.c
+copyfile kernel/drivers/staging/imgtec/pvr_drm.c drivers/gpu/drm/img-rogue/pvr_drm.c
+copyfile kernel/drivers/staging/imgtec/pvr_fence.c drivers/gpu/drm/img-rogue/pvr_fence.c
+copyfile kernel/drivers/staging/imgtec/pvr_platform_drv.c drivers/gpu/drm/img-rogue/pvr_platform_drv.c
+copyfile kernel/drivers/staging/imgtec/pvr_sw_fence.c drivers/gpu/drm/img-rogue/pvr_sw_fence.c
+copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_crtc.c drivers/gpu/drm/img-rogue/apollo/drm_pdp_crtc.c
+copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_debugfs.c drivers/gpu/drm/img-rogue/apollo/drm_pdp_debugfs.c
+copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_drv.c drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.c
+copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_dvi.c drivers/gpu/drm/img-rogue/apollo/drm_pdp_dvi.c
+copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_gem.c drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.c
+copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_modeset.c drivers/gpu/drm/img-rogue/apollo/drm_pdp_modeset.c
+copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_tmds.c drivers/gpu/drm/img-rogue/apollo/drm_pdp_tmds.c
+copyfile kernel/drivers/staging/imgtec/tc/tc_apollo.c drivers/gpu/drm/img-rogue/apollo/tc_apollo.c
+copyfile kernel/drivers/staging/imgtec/tc/tc_drv.c drivers/gpu/drm/img-rogue/apollo/tc_drv.c
+copyfile kernel/drivers/staging/imgtec/tc/tc_odin.c drivers/gpu/drm/img-rogue/apollo/tc_odin.c
+copyfile services/server/common/cache_km.c drivers/gpu/drm/img-rogue/cache_km.c
+copyfile services/server/common/connection_server.c drivers/gpu/drm/img-rogue/connection_server.c
+copyfile services/server/common/devicemem_heapcfg.c drivers/gpu/drm/img-rogue/devicemem_heapcfg.c
+copyfile services/server/common/devicemem_history_server.c drivers/gpu/drm/img-rogue/devicemem_history_server.c
+copyfile services/server/common/devicemem_server.c drivers/gpu/drm/img-rogue/devicemem_server.c
+copyfile services/server/common/handle.c drivers/gpu/drm/img-rogue/handle.c
+copyfile services/server/common/htbserver.c drivers/gpu/drm/img-rogue/htbserver.c
+copyfile services/server/common/info_page_km.c drivers/gpu/drm/img-rogue/info_page_km.c
+copyfile services/server/common/lists.c drivers/gpu/drm/img-rogue/lists.c
+copyfile services/server/common/mmu_common.c drivers/gpu/drm/img-rogue/mmu_common.c
+copyfile services/server/common/physheap.c drivers/gpu/drm/img-rogue/physheap.c
+copyfile services/server/common/physmem.c drivers/gpu/drm/img-rogue/physmem.c
+copyfile services/server/common/physmem_hostmem.c drivers/gpu/drm/img-rogue/physmem_hostmem.c
+copyfile services/server/common/physmem_lma.c drivers/gpu/drm/img-rogue/physmem_lma.c
+copyfile services/server/common/physmem_tdsecbuf.c drivers/gpu/drm/img-rogue/physmem_tdsecbuf.c
+copyfile services/server/common/pmr.c drivers/gpu/drm/img-rogue/pmr.c
+copyfile services/server/common/power.c drivers/gpu/drm/img-rogue/power.c
+copyfile services/server/common/process_stats.c drivers/gpu/drm/img-rogue/process_stats.c
+copyfile services/server/common/pvr_notifier.c drivers/gpu/drm/img-rogue/pvr_notifier.c
+copyfile services/server/common/pvrsrv.c drivers/gpu/drm/img-rogue/pvrsrv.c
+copyfile services/server/common/pvrsrv_pool.c drivers/gpu/drm/img-rogue/pvrsrv_pool.c
+copyfile services/server/common/ri_server.c drivers/gpu/drm/img-rogue/ri_server.c
+copyfile services/server/common/srvcore.c drivers/gpu/drm/img-rogue/srvcore.c
+copyfile services/server/common/sync_checkpoint.c drivers/gpu/drm/img-rogue/sync_checkpoint.c
+copyfile services/server/common/sync_server.c drivers/gpu/drm/img-rogue/sync_server.c
+copyfile services/server/common/tlintern.c drivers/gpu/drm/img-rogue/tlintern.c
+copyfile services/server/common/tlserver.c drivers/gpu/drm/img-rogue/tlserver.c
+copyfile services/server/common/tlstream.c drivers/gpu/drm/img-rogue/tlstream.c
+copyfile services/server/devices/rgx/debugmisc_server.c drivers/gpu/drm/img-rogue/debugmisc_server.c
+copyfile services/server/devices/rgx/env/linux/km/rgxfwload.c drivers/gpu/drm/img-rogue/rgxfwload.c
+copyfile services/server/devices/rgx/rgxbreakpoint.c drivers/gpu/drm/img-rogue/rgxbreakpoint.c
+copyfile services/server/devices/rgx/rgxccb.c drivers/gpu/drm/img-rogue/rgxccb.c
+copyfile services/server/devices/rgx/rgxcompute.c drivers/gpu/drm/img-rogue/rgxcompute.c
+copyfile services/server/devices/rgx/rgxdebug.c drivers/gpu/drm/img-rogue/rgxdebug.c
+copyfile services/server/devices/rgx/rgxfwimageutils.c drivers/gpu/drm/img-rogue/rgxfwimageutils.c
+copyfile services/server/devices/rgx/rgxfwutils.c drivers/gpu/drm/img-rogue/rgxfwutils.c
+copyfile services/server/devices/rgx/rgxhwperf.c drivers/gpu/drm/img-rogue/rgxhwperf.c
+copyfile services/server/devices/rgx/rgxinit.c drivers/gpu/drm/img-rogue/rgxinit.c
+copyfile services/server/devices/rgx/rgxkicksync.c drivers/gpu/drm/img-rogue/rgxkicksync.c
+copyfile services/server/devices/rgx/rgxlayer_impl.c drivers/gpu/drm/img-rogue/rgxlayer_impl.c
+copyfile services/server/devices/rgx/rgxmem.c drivers/gpu/drm/img-rogue/rgxmem.c
+copyfile services/server/devices/rgx/rgxmipsmmuinit.c drivers/gpu/drm/img-rogue/rgxmipsmmuinit.c
+copyfile services/server/devices/rgx/rgxmmuinit.c drivers/gpu/drm/img-rogue/rgxmmuinit.c
+copyfile services/server/devices/rgx/rgxpower.c drivers/gpu/drm/img-rogue/rgxpower.c
+copyfile services/server/devices/rgx/rgxray.c drivers/gpu/drm/img-rogue/rgxray.c
+copyfile services/server/devices/rgx/rgxregconfig.c drivers/gpu/drm/img-rogue/rgxregconfig.c
+copyfile services/server/devices/rgx/rgxsignals.c drivers/gpu/drm/img-rogue/rgxsignals.c
+copyfile services/server/devices/rgx/rgxsrvinit.c drivers/gpu/drm/img-rogue/rgxsrvinit.c
+copyfile services/server/devices/rgx/rgxsrvinit_script.c drivers/gpu/drm/img-rogue/rgxsrvinit_script.c
+copyfile services/server/devices/rgx/rgxstartstop.c drivers/gpu/drm/img-rogue/rgxstartstop.c
+copyfile services/server/devices/rgx/rgxta3d.c drivers/gpu/drm/img-rogue/rgxta3d.c
+copyfile services/server/devices/rgx/rgxtdmtransfer.c drivers/gpu/drm/img-rogue/rgxtdmtransfer.c
+copyfile services/server/devices/rgx/rgxtimecorr.c drivers/gpu/drm/img-rogue/rgxtimecorr.c
+copyfile services/server/devices/rgx/rgxtimerquery.c drivers/gpu/drm/img-rogue/rgxtimerquery.c
+copyfile services/server/devices/rgx/rgxtransfer.c drivers/gpu/drm/img-rogue/rgxtransfer.c
+copyfile services/server/devices/rgx/rgxutils.c drivers/gpu/drm/img-rogue/rgxutils.c
+copyfile services/server/env/linux/allocmem.c drivers/gpu/drm/img-rogue/allocmem.c
+copyfile services/server/env/linux/event.c drivers/gpu/drm/img-rogue/event.c
+copyfile services/server/env/linux/handle_idr.c drivers/gpu/drm/img-rogue/handle_idr.c
+copyfile services/server/env/linux/km_apphint.c drivers/gpu/drm/img-rogue/km_apphint.c
+copyfile services/server/env/linux/module_common.c drivers/gpu/drm/img-rogue/module_common.c
+copyfile services/server/env/linux/osconnection_server.c drivers/gpu/drm/img-rogue/osconnection_server.c
+copyfile services/server/env/linux/osfunc.c drivers/gpu/drm/img-rogue/osfunc.c
+copyfile services/server/env/linux/osmmap_stub.c drivers/gpu/drm/img-rogue/osmmap_stub.c
+copyfile services/server/env/linux/pdump.c drivers/gpu/drm/img-rogue/pdump.c
+copyfile services/server/env/linux/physmem_dmabuf.c drivers/gpu/drm/img-rogue/physmem_dmabuf.c
+copyfile services/server/env/linux/physmem_osmem_linux.c drivers/gpu/drm/img-rogue/physmem_osmem_linux.c
+copyfile services/server/env/linux/pmr_os.c drivers/gpu/drm/img-rogue/pmr_os.c
+copyfile services/server/env/linux/pvr_bridge_k.c drivers/gpu/drm/img-rogue/pvr_bridge_k.c
+copyfile services/server/env/linux/pvr_debug.c drivers/gpu/drm/img-rogue/pvr_debug.c
+copyfile services/server/env/linux/pvr_debugfs.c drivers/gpu/drm/img-rogue/pvr_debugfs.c
+copyfile services/server/env/linux/pvr_gputrace.c drivers/gpu/drm/img-rogue/pvr_gputrace.c
+copyfile services/shared/common/devicemem.c drivers/gpu/drm/img-rogue/devicemem.c
+copyfile services/shared/common/devicemem_utils.c drivers/gpu/drm/img-rogue/devicemem_utils.c
+copyfile services/shared/common/hash.c drivers/gpu/drm/img-rogue/hash.c
+copyfile services/shared/common/htbuffer.c drivers/gpu/drm/img-rogue/htbuffer.c
+copyfile services/shared/common/mem_utils.c drivers/gpu/drm/img-rogue/mem_utils.c
+copyfile services/shared/common/ra.c drivers/gpu/drm/img-rogue/ra.c
+copyfile services/shared/common/sync.c drivers/gpu/drm/img-rogue/sync.c
+copyfile services/shared/common/tlclient.c drivers/gpu/drm/img-rogue/tlclient.c
+copyfile services/shared/common/uniq_key_splay_tree.c drivers/gpu/drm/img-rogue/uniq_key_splay_tree.c
+copyfile services/shared/devices/rgx/rgx_compat_bvnc.c drivers/gpu/drm/img-rogue/rgx_compat_bvnc.c
+copyfile services/shared/devices/rgx/rgx_hwperf_table.c drivers/gpu/drm/img-rogue/rgx_hwperf_table.c
+copyfile services/system/common/env/linux/dma_support.c drivers/gpu/drm/img-rogue/system/dma_support.c
+copyfile services/system/common/env/linux/pci_support.c drivers/gpu/drm/img-rogue/system/pci_support.c
+copyfile services/system/common/vmm_pvz_client.c drivers/gpu/drm/img-rogue/system/vmm_pvz_client.c
+copyfile services/system/common/vmm_pvz_server.c drivers/gpu/drm/img-rogue/system/vmm_pvz_server.c
+copyfile services/system/common/vmm_type_stub.c drivers/gpu/drm/img-rogue/system/vmm_type_stub.c
+copyfile services/system/common/vz_physheap_common.c drivers/gpu/drm/img-rogue/system/vz_physheap_common.c
+copyfile services/system/common/vz_physheap_generic.c drivers/gpu/drm/img-rogue/system/vz_physheap_generic.c
+copyfile services/system/common/vz_support.c drivers/gpu/drm/img-rogue/system/vz_support.c
+copyfile services/system/common/vz_vmm_pvz.c drivers/gpu/drm/img-rogue/system/vz_vmm_pvz.c
+copyfile services/system/common/vz_vmm_vm.c drivers/gpu/drm/img-rogue/system/vz_vmm_vm.c
+copyfile services/system/rgx_linux_tc/sysconfig.c drivers/gpu/drm/img-rogue/apollo/sysconfig.c
+copyfile services/server/env/linux/osfunc_arm.c drivers/gpu/drm/img-rogue/osfunc_arm.c
+copyfile services/server/env/linux/osfunc_arm64.c drivers/gpu/drm/img-rogue/osfunc_arm64.c
+copyfile services/server/env/linux/trace_events.c drivers/gpu/drm/img-rogue/trace_events.c
+copyfile services/server/env/linux/osfunc_x86.c drivers/gpu/drm/img-rogue/osfunc_x86.c
+copyfile services/server/env/linux/osfunc_mips.c drivers/gpu/drm/img-rogue/osfunc_mips.c
+copyfile generated/breakpoint_bridge/common_breakpoint_bridge.h drivers/gpu/drm/img-rogue/common_breakpoint_bridge.h
+copyfile generated/cache_bridge/client_cache_bridge.h drivers/gpu/drm/img-rogue/client_cache_bridge.h
+copyfile generated/cache_bridge/common_cache_bridge.h drivers/gpu/drm/img-rogue/common_cache_bridge.h
+copyfile generated/cmm_bridge/common_cmm_bridge.h drivers/gpu/drm/img-rogue/common_cmm_bridge.h
+copyfile generated/debugmisc_bridge/common_debugmisc_bridge.h drivers/gpu/drm/img-rogue/common_debugmisc_bridge.h
+copyfile generated/devicememhistory_bridge/client_devicememhistory_bridge.h drivers/gpu/drm/img-rogue/client_devicememhistory_bridge.h
+copyfile generated/devicememhistory_bridge/common_devicememhistory_bridge.h drivers/gpu/drm/img-rogue/common_devicememhistory_bridge.h
+copyfile generated/dmabuf_bridge/common_dmabuf_bridge.h drivers/gpu/drm/img-rogue/common_dmabuf_bridge.h
+copyfile generated/htbuffer_bridge/client_htbuffer_bridge.h drivers/gpu/drm/img-rogue/client_htbuffer_bridge.h
+copyfile generated/htbuffer_bridge/common_htbuffer_bridge.h drivers/gpu/drm/img-rogue/common_htbuffer_bridge.h
+copyfile generated/mm_bridge/client_mm_bridge.h drivers/gpu/drm/img-rogue/client_mm_bridge.h
+copyfile generated/mm_bridge/common_mm_bridge.h drivers/gpu/drm/img-rogue/common_mm_bridge.h
+copyfile generated/pvrtl_bridge/client_pvrtl_bridge.h drivers/gpu/drm/img-rogue/client_pvrtl_bridge.h
+copyfile generated/pvrtl_bridge/common_pvrtl_bridge.h drivers/gpu/drm/img-rogue/common_pvrtl_bridge.h
+copyfile generated/regconfig_bridge/common_regconfig_bridge.h drivers/gpu/drm/img-rogue/common_regconfig_bridge.h
+copyfile generated/rgxcmp_bridge/common_rgxcmp_bridge.h drivers/gpu/drm/img-rogue/common_rgxcmp_bridge.h
+copyfile generated/rgxhwperf_bridge/common_rgxhwperf_bridge.h drivers/gpu/drm/img-rogue/common_rgxhwperf_bridge.h
+copyfile generated/rgxkicksync_bridge/common_rgxkicksync_bridge.h drivers/gpu/drm/img-rogue/common_rgxkicksync_bridge.h
+copyfile generated/rgxray_bridge/common_rgxray_bridge.h drivers/gpu/drm/img-rogue/common_rgxray_bridge.h
+copyfile generated/rgxsignals_bridge/common_rgxsignals_bridge.h drivers/gpu/drm/img-rogue/common_rgxsignals_bridge.h
+copyfile generated/rgxta3d_bridge/common_rgxta3d_bridge.h drivers/gpu/drm/img-rogue/common_rgxta3d_bridge.h
+copyfile generated/rgxtq2_bridge/common_rgxtq2_bridge.h drivers/gpu/drm/img-rogue/common_rgxtq2_bridge.h
+copyfile generated/rgxtq_bridge/common_rgxtq_bridge.h drivers/gpu/drm/img-rogue/common_rgxtq_bridge.h
+copyfile generated/ri_bridge/client_ri_bridge.h drivers/gpu/drm/img-rogue/client_ri_bridge.h
+copyfile generated/ri_bridge/common_ri_bridge.h drivers/gpu/drm/img-rogue/common_ri_bridge.h
+copyfile generated/srvcore_bridge/common_srvcore_bridge.h drivers/gpu/drm/img-rogue/common_srvcore_bridge.h
+copyfile generated/sync_bridge/client_sync_bridge.h drivers/gpu/drm/img-rogue/client_sync_bridge.h
+copyfile generated/sync_bridge/common_sync_bridge.h drivers/gpu/drm/img-rogue/common_sync_bridge.h
+copyfile generated/timerquery_bridge/common_timerquery_bridge.h drivers/gpu/drm/img-rogue/common_timerquery_bridge.h
+copyfile hwdefs/km/configs/rgxconfig_km_1.V.4.5.h drivers/gpu/drm/img-rogue/configs/rgxconfig_km_1.V.4.5.h
+copyfile hwdefs/km/cores/rgxcore_km_1.82.4.5.h drivers/gpu/drm/img-rogue/cores/rgxcore_km_1.82.4.5.h
+copyfile hwdefs/km/rgx_bvnc_defs_km.h drivers/gpu/drm/img-rogue/km/rgx_bvnc_defs_km.h
+copyfile hwdefs/km/rgx_bvnc_table_km.h drivers/gpu/drm/img-rogue/km/rgx_bvnc_table_km.h
+copyfile hwdefs/km/rgx_cr_defs_km.h drivers/gpu/drm/img-rogue/km/rgx_cr_defs_km.h
+copyfile hwdefs/km/rgxdefs_km.h drivers/gpu/drm/img-rogue/km/rgxdefs_km.h
+copyfile hwdefs/km/rgxmmudefs_km.h drivers/gpu/drm/img-rogue/km/rgxmmudefs_km.h
+copyfile include/cache_ops.h drivers/gpu/drm/img-rogue/cache_ops.h
+copyfile include/devicemem_typedefs.h drivers/gpu/drm/img-rogue/devicemem_typedefs.h
+copyfile include/dllist.h drivers/gpu/drm/img-rogue/dllist.h
+copyfile include/drm/pdp_drm.h drivers/gpu/drm/img-rogue/pdp_drm.h
+copyfile include/drm/pvr_drm.h drivers/gpu/drm/img-rogue/pvr_drm.h
+copyfile include/img_3dtypes.h drivers/gpu/drm/img-rogue/img_3dtypes.h
+copyfile include/img_defs.h drivers/gpu/drm/img-rogue/img_defs.h
+copyfile include/img_types.h drivers/gpu/drm/img-rogue/img_types.h
+copyfile include/kernel_types.h drivers/gpu/drm/img-rogue/kernel_types.h
+copyfile include/lock_types.h drivers/gpu/drm/img-rogue/lock_types.h
+copyfile include/log2.h drivers/gpu/drm/img-rogue/log2.h
+copyfile include/pdumpdefs.h drivers/gpu/drm/img-rogue/pdumpdefs.h
+copyfile include/public/powervr/buffer_attribs.h drivers/gpu/drm/img-rogue/powervr/buffer_attribs.h
+copyfile include/public/powervr/mem_types.h drivers/gpu/drm/img-rogue/powervr/mem_types.h
+copyfile include/public/powervr/sync_external.h drivers/gpu/drm/img-rogue/powervr/sync_external.h
+copyfile include/pvr_buffer_sync_shared.h drivers/gpu/drm/img-rogue/pvr_buffer_sync_shared.h
+copyfile include/pvr_debug.h drivers/gpu/drm/img-rogue/pvr_debug.h
+copyfile include/pvr_intrinsics.h drivers/gpu/drm/img-rogue/pvr_intrinsics.h
+copyfile include/pvrmodule.h drivers/gpu/drm/img-rogue/pvrmodule.h
+copyfile include/pvrsrv_device_types.h drivers/gpu/drm/img-rogue/pvrsrv_device_types.h
+copyfile include/pvrsrv_devmem.h drivers/gpu/drm/img-rogue/pvrsrv_devmem.h
+copyfile include/pvrsrv_error.h drivers/gpu/drm/img-rogue/pvrsrv_error.h
+copyfile include/pvrsrv_errors.h drivers/gpu/drm/img-rogue/pvrsrv_errors.h
+copyfile include/pvrsrv_memallocflags.h drivers/gpu/drm/img-rogue/pvrsrv_memallocflags.h
+copyfile include/pvrsrv_sync_km.h drivers/gpu/drm/img-rogue/pvrsrv_sync_km.h
+copyfile include/pvrsrv_sync_um.h drivers/gpu/drm/img-rogue/pvrsrv_sync_um.h
+copyfile include/pvrsrv_tlcommon.h drivers/gpu/drm/img-rogue/pvrsrv_tlcommon.h
+copyfile include/pvrsrv_tlstreams.h drivers/gpu/drm/img-rogue/pvrsrv_tlstreams.h
+copyfile include/pvrversion.h drivers/gpu/drm/img-rogue/pvrversion.h
+copyfile include/rgx_common.h drivers/gpu/drm/img-rogue/rgx_common.h
+copyfile include/rgx_firmware_processor.h drivers/gpu/drm/img-rogue/rgx_firmware_processor.h
+copyfile include/rgx_heaps.h drivers/gpu/drm/img-rogue/rgx_heaps.h
+copyfile include/rgx_hwperf.h drivers/gpu/drm/img-rogue/rgx_hwperf.h
+copyfile include/rgx_memallocflags.h drivers/gpu/drm/img-rogue/rgx_memallocflags.h
+copyfile include/rgx_meta.h drivers/gpu/drm/img-rogue/rgx_meta.h
+copyfile include/rgx_mips.h drivers/gpu/drm/img-rogue/rgx_mips.h
+copyfile include/rgx_options.h drivers/gpu/drm/img-rogue/rgx_options.h
+copyfile include/rgxscript.h drivers/gpu/drm/img-rogue/rgxscript.h
+copyfile include/ri_typedefs.h drivers/gpu/drm/img-rogue/ri_typedefs.h
+copyfile include/services_km.h drivers/gpu/drm/img-rogue/services_km.h
+copyfile include/servicesext.h drivers/gpu/drm/img-rogue/servicesext.h
+copyfile include/sync_checkpoint_external.h drivers/gpu/drm/img-rogue/sync_checkpoint_external.h
+copyfile include/system/rgx_tc/apollo_regs.h drivers/gpu/drm/img-rogue/apollo/apollo_regs.h
+copyfile include/system/rgx_tc/bonnie_tcf.h drivers/gpu/drm/img-rogue/apollo/bonnie_tcf.h
+copyfile include/system/rgx_tc/odin_defs.h drivers/gpu/drm/img-rogue/apollo/odin_defs.h
+copyfile include/system/rgx_tc/odin_pdp_regs.h drivers/gpu/drm/img-rogue/apollo/odin_pdp_regs.h
+copyfile include/system/rgx_tc/odin_regs.h drivers/gpu/drm/img-rogue/apollo/odin_regs.h
+copyfile include/system/rgx_tc/pdp_regs.h drivers/gpu/drm/img-rogue/apollo/pdp_regs.h
+copyfile include/system/rgx_tc/tcf_clk_ctrl.h drivers/gpu/drm/img-rogue/apollo/tcf_clk_ctrl.h
+copyfile include/system/rgx_tc/tcf_pll.h drivers/gpu/drm/img-rogue/apollo/tcf_pll.h
+copyfile include/system/rgx_tc/tcf_rgbpdp_regs.h drivers/gpu/drm/img-rogue/apollo/tcf_rgbpdp_regs.h
+copyfile kernel/drivers/staging/imgtec/plato/pdp2_mmu_regs.h drivers/gpu/drm/img-rogue/pdp2_mmu_regs.h
+copyfile kernel/drivers/staging/imgtec/plato/pdp2_regs.h drivers/gpu/drm/img-rogue/pdp2_regs.h
+copyfile kernel/drivers/staging/imgtec/plato/plato_drv.h drivers/gpu/drm/img-rogue/plato_drv.h
+copyfile kernel/drivers/staging/imgtec/pvr_buffer_sync.h drivers/gpu/drm/img-rogue/pvr_buffer_sync.h
+copyfile kernel/drivers/staging/imgtec/pvr_drv.h drivers/gpu/drm/img-rogue/pvr_drv.h
+copyfile kernel/drivers/staging/imgtec/pvr_fence.h drivers/gpu/drm/img-rogue/pvr_fence.h
+copyfile kernel/drivers/staging/imgtec/pvr_linux_fence.h drivers/gpu/drm/img-rogue/pvr_linux_fence.h
+copyfile kernel/drivers/staging/imgtec/pvr_sw_fence.h drivers/gpu/drm/img-rogue/pvr_sw_fence.h
+copyfile kernel/drivers/staging/imgtec/services_kernel_client.h drivers/gpu/drm/img-rogue/services_kernel_client.h
+copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_drv.h drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.h
+copyfile kernel/drivers/staging/imgtec/tc/drm_pdp_gem.h drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.h
+copyfile kernel/drivers/staging/imgtec/tc/pdp_apollo.h drivers/gpu/drm/img-rogue/apollo/pdp_apollo.h
+copyfile kernel/drivers/staging/imgtec/tc/pdp_common.h drivers/gpu/drm/img-rogue/apollo/pdp_common.h
+copyfile kernel/drivers/staging/imgtec/tc/pdp_odin.h drivers/gpu/drm/img-rogue/apollo/pdp_odin.h
+copyfile kernel/drivers/staging/imgtec/tc/tc_apollo.h drivers/gpu/drm/img-rogue/apollo/tc_apollo.h
+copyfile kernel/drivers/staging/imgtec/tc/tc_drv.h drivers/gpu/drm/img-rogue/apollo/tc_drv.h
+copyfile kernel/drivers/staging/imgtec/tc/tc_drv_internal.h drivers/gpu/drm/img-rogue/apollo/tc_drv_internal.h
+copyfile kernel/drivers/staging/imgtec/tc/tc_ion.h drivers/gpu/drm/img-rogue/apollo/tc_ion.h
+copyfile kernel/drivers/staging/imgtec/tc/tc_odin.h drivers/gpu/drm/img-rogue/apollo/tc_odin.h
+copyfile services/include/htbuffer_sf.h drivers/gpu/drm/img-rogue/htbuffer_sf.h
+copyfile services/include/htbuffer_types.h drivers/gpu/drm/img-rogue/htbuffer_types.h
+copyfile services/include/info_page_defs.h drivers/gpu/drm/img-rogue/info_page_defs.h
+copyfile services/include/km_apphint_defs.h drivers/gpu/drm/img-rogue/km_apphint_defs.h
+copyfile services/include/mm_common.h drivers/gpu/drm/img-rogue/mm_common.h
+copyfile services/include/os_cpu_cache.h drivers/gpu/drm/img-rogue/os_cpu_cache.h
+copyfile services/include/os_srvinit_param.h drivers/gpu/drm/img-rogue/os_srvinit_param.h
+copyfile services/include/pdump.h drivers/gpu/drm/img-rogue/pdump.h
+copyfile services/include/physheap.h drivers/gpu/drm/img-rogue/physheap.h
+copyfile services/include/pvr_bridge.h drivers/gpu/drm/img-rogue/pvr_bridge.h
+copyfile services/include/rgx_bridge.h drivers/gpu/drm/img-rogue/rgx_bridge.h
+copyfile services/include/rgx_fwif.h drivers/gpu/drm/img-rogue/rgx_fwif.h
+copyfile services/include/rgx_fwif_alignchecks.h drivers/gpu/drm/img-rogue/rgx_fwif_alignchecks.h
+copyfile services/include/rgx_fwif_hwperf.h drivers/gpu/drm/img-rogue/rgx_fwif_hwperf.h
+copyfile services/include/rgx_fwif_km.h drivers/gpu/drm/img-rogue/rgx_fwif_km.h
+copyfile services/include/rgx_fwif_resetframework.h drivers/gpu/drm/img-rogue/rgx_fwif_resetframework.h
+copyfile services/include/rgx_fwif_sf.h drivers/gpu/drm/img-rogue/rgx_fwif_sf.h
+copyfile services/include/rgx_fwif_shared.h drivers/gpu/drm/img-rogue/rgx_fwif_shared.h
+copyfile services/include/rgx_fwif_sig.h drivers/gpu/drm/img-rogue/rgx_fwif_sig.h
+copyfile services/include/rgx_pdump_panics.h drivers/gpu/drm/img-rogue/rgx_pdump_panics.h
+copyfile services/include/rgx_tq_shared.h drivers/gpu/drm/img-rogue/rgx_tq_shared.h
+copyfile services/include/rgxapi_km.h drivers/gpu/drm/img-rogue/rgxapi_km.h
+copyfile services/include/rgxfw_log_helper.h drivers/gpu/drm/img-rogue/rgxfw_log_helper.h
+copyfile services/include/shared/allocmem.h drivers/gpu/drm/img-rogue/allocmem.h
+copyfile services/include/shared/hash.h drivers/gpu/drm/img-rogue/hash.h
+copyfile services/include/shared/lock.h drivers/gpu/drm/img-rogue/lock.h
+copyfile services/include/shared/ra.h drivers/gpu/drm/img-rogue/ra.h
+copyfile services/include/sync_checkpoint_internal.h drivers/gpu/drm/img-rogue/sync_checkpoint_internal.h
+copyfile services/include/sync_checkpoint_internal_fw.h drivers/gpu/drm/img-rogue/sync_checkpoint_internal_fw.h
+copyfile services/server/devices/rgx/debugmisc_server.h drivers/gpu/drm/img-rogue/debugmisc_server.h
+copyfile services/server/devices/rgx/rgxbreakpoint.h drivers/gpu/drm/img-rogue/rgxbreakpoint.h
+copyfile services/server/devices/rgx/rgxccb.h drivers/gpu/drm/img-rogue/rgxccb.h
+copyfile services/server/devices/rgx/rgxcompute.h drivers/gpu/drm/img-rogue/rgxcompute.h
+copyfile services/server/devices/rgx/rgxdebug.h drivers/gpu/drm/img-rogue/rgxdebug.h
+copyfile services/server/devices/rgx/rgxdevice.h drivers/gpu/drm/img-rogue/rgxdevice.h
+copyfile services/server/devices/rgx/rgxfwimageutils.h drivers/gpu/drm/img-rogue/rgxfwimageutils.h
+copyfile services/server/devices/rgx/rgxfwload.h drivers/gpu/drm/img-rogue/rgxfwload.h
+copyfile services/server/devices/rgx/rgxfwutils.h drivers/gpu/drm/img-rogue/rgxfwutils.h
+copyfile services/server/devices/rgx/rgxheapconfig.h drivers/gpu/drm/img-rogue/rgxheapconfig.h
+copyfile services/server/devices/rgx/rgxhwperf.h drivers/gpu/drm/img-rogue/rgxhwperf.h
+copyfile services/server/devices/rgx/rgxinit.h drivers/gpu/drm/img-rogue/rgxinit.h
+copyfile services/server/devices/rgx/rgxkicksync.h drivers/gpu/drm/img-rogue/rgxkicksync.h
+copyfile services/server/devices/rgx/rgxlayer.h drivers/gpu/drm/img-rogue/rgxlayer.h
+copyfile services/server/devices/rgx/rgxlayer_impl.h drivers/gpu/drm/img-rogue/rgxlayer_impl.h
+copyfile services/server/devices/rgx/rgxmem.h drivers/gpu/drm/img-rogue/rgxmem.h
+copyfile services/server/devices/rgx/rgxmipsmmuinit.h drivers/gpu/drm/img-rogue/rgxmipsmmuinit.h
+copyfile services/server/devices/rgx/rgxmmuinit.h drivers/gpu/drm/img-rogue/rgxmmuinit.h
+copyfile services/server/devices/rgx/rgxpower.h drivers/gpu/drm/img-rogue/rgxpower.h
+copyfile services/server/devices/rgx/rgxray.h drivers/gpu/drm/img-rogue/rgxray.h
+copyfile services/server/devices/rgx/rgxregconfig.h drivers/gpu/drm/img-rogue/rgxregconfig.h
+copyfile services/server/devices/rgx/rgxsignals.h drivers/gpu/drm/img-rogue/rgxsignals.h
+copyfile services/server/devices/rgx/rgxsrvinit_script.h drivers/gpu/drm/img-rogue/rgxsrvinit_script.h
+copyfile services/server/devices/rgx/rgxstartstop.h drivers/gpu/drm/img-rogue/rgxstartstop.h
+copyfile services/server/devices/rgx/rgxta3d.h drivers/gpu/drm/img-rogue/rgxta3d.h
+copyfile services/server/devices/rgx/rgxtdmtransfer.h drivers/gpu/drm/img-rogue/rgxtdmtransfer.h
+copyfile services/server/devices/rgx/rgxtimecorr.h drivers/gpu/drm/img-rogue/rgxtimecorr.h
+copyfile services/server/devices/rgx/rgxtimerquery.h drivers/gpu/drm/img-rogue/rgxtimerquery.h
+copyfile services/server/devices/rgx/rgxtransfer.h drivers/gpu/drm/img-rogue/rgxtransfer.h
+copyfile services/server/devices/rgx/rgxutils.h drivers/gpu/drm/img-rogue/rgxutils.h
+copyfile services/server/env/linux/env_connection.h drivers/gpu/drm/img-rogue/env_connection.h
+copyfile services/server/env/linux/event.h drivers/gpu/drm/img-rogue/event.h
+copyfile services/server/env/linux/kernel_compatibility.h drivers/gpu/drm/img-rogue/kernel_compatibility.h
+copyfile services/server/env/linux/km_apphint.h drivers/gpu/drm/img-rogue/km_apphint.h
+copyfile services/server/env/linux/linkage.h drivers/gpu/drm/img-rogue/linkage.h
+copyfile services/server/env/linux/module_common.h drivers/gpu/drm/img-rogue/module_common.h
+copyfile services/server/env/linux/physmem_osmem_linux.h drivers/gpu/drm/img-rogue/physmem_osmem_linux.h
+copyfile services/server/env/linux/private_data.h drivers/gpu/drm/img-rogue/private_data.h
+copyfile services/server/env/linux/pvr_debugfs.h drivers/gpu/drm/img-rogue/pvr_debugfs.h
+copyfile services/server/env/linux/pvr_gputrace.h drivers/gpu/drm/img-rogue/pvr_gputrace.h
+copyfile services/server/env/linux/pvr_uaccess.h drivers/gpu/drm/img-rogue/pvr_uaccess.h
+copyfile services/server/env/linux/rogue_trace_events.h drivers/gpu/drm/img-rogue/rogue_trace_events.h
+copyfile services/server/env/linux/trace_events.h drivers/gpu/drm/img-rogue/trace_events.h
+copyfile services/server/include/cache_km.h drivers/gpu/drm/img-rogue/cache_km.h
+copyfile services/server/include/connection_server.h drivers/gpu/drm/img-rogue/connection_server.h
+copyfile services/server/include/device.h drivers/gpu/drm/img-rogue/device.h
+copyfile services/server/include/devicemem_heapcfg.h drivers/gpu/drm/img-rogue/devicemem_heapcfg.h
+copyfile services/server/include/devicemem_history_server.h drivers/gpu/drm/img-rogue/devicemem_history_server.h
+copyfile services/server/include/devicemem_server.h drivers/gpu/drm/img-rogue/devicemem_server.h
+copyfile services/server/include/devicemem_server_utils.h drivers/gpu/drm/img-rogue/devicemem_server_utils.h
+copyfile services/server/include/handle.h drivers/gpu/drm/img-rogue/handle.h
+copyfile services/server/include/handle_impl.h drivers/gpu/drm/img-rogue/handle_impl.h
+copyfile services/server/include/handle_types.h drivers/gpu/drm/img-rogue/handle_types.h
+copyfile services/server/include/htbserver.h drivers/gpu/drm/img-rogue/htbserver.h
+copyfile services/server/include/info_page.h drivers/gpu/drm/img-rogue/info_page.h
+copyfile services/server/include/lists.h drivers/gpu/drm/img-rogue/lists.h
+copyfile services/server/include/mmu_common.h drivers/gpu/drm/img-rogue/mmu_common.h
+copyfile services/server/include/opaque_types.h drivers/gpu/drm/img-rogue/opaque_types.h
+copyfile services/server/include/osconnection_server.h drivers/gpu/drm/img-rogue/osconnection_server.h
+copyfile services/server/include/osfunc.h drivers/gpu/drm/img-rogue/osfunc.h
+copyfile services/server/include/oskm_apphint.h drivers/gpu/drm/img-rogue/oskm_apphint.h
+copyfile services/server/include/pdump_km.h drivers/gpu/drm/img-rogue/pdump_km.h
+copyfile services/server/include/pdump_mmu.h drivers/gpu/drm/img-rogue/pdump_mmu.h
+copyfile services/server/include/pdump_physmem.h drivers/gpu/drm/img-rogue/pdump_physmem.h
+copyfile services/server/include/pdump_symbolicaddr.h drivers/gpu/drm/img-rogue/pdump_symbolicaddr.h
+copyfile services/server/include/physmem.h drivers/gpu/drm/img-rogue/physmem.h
+copyfile services/server/include/physmem_dmabuf.h drivers/gpu/drm/img-rogue/physmem_dmabuf.h
+copyfile services/server/include/physmem_hostmem.h drivers/gpu/drm/img-rogue/physmem_hostmem.h
+copyfile services/server/include/physmem_lma.h drivers/gpu/drm/img-rogue/physmem_lma.h
+copyfile services/server/include/physmem_osmem.h drivers/gpu/drm/img-rogue/physmem_osmem.h
+copyfile services/server/include/physmem_tdsecbuf.h drivers/gpu/drm/img-rogue/physmem_tdsecbuf.h
+copyfile services/server/include/pmr.h drivers/gpu/drm/img-rogue/pmr.h
+copyfile services/server/include/pmr_impl.h drivers/gpu/drm/img-rogue/pmr_impl.h
+copyfile services/server/include/pmr_os.h drivers/gpu/drm/img-rogue/pmr_os.h
+copyfile services/server/include/power.h drivers/gpu/drm/img-rogue/power.h
+copyfile services/server/include/process_stats.h drivers/gpu/drm/img-rogue/process_stats.h
+copyfile services/server/include/pvr_notifier.h drivers/gpu/drm/img-rogue/pvr_notifier.h
+copyfile services/server/include/pvrsrv.h drivers/gpu/drm/img-rogue/pvrsrv.h
+copyfile services/server/include/pvrsrv_apphint.h drivers/gpu/drm/img-rogue/pvrsrv_apphint.h
+copyfile services/server/include/pvrsrv_cleanup.h drivers/gpu/drm/img-rogue/pvrsrv_cleanup.h
+copyfile services/server/include/pvrsrv_device.h drivers/gpu/drm/img-rogue/pvrsrv_device.h
+copyfile services/server/include/pvrsrv_pool.h drivers/gpu/drm/img-rogue/pvrsrv_pool.h
+copyfile services/server/include/pvrsrv_sync_server.h drivers/gpu/drm/img-rogue/pvrsrv_sync_server.h
+copyfile services/server/include/ri_server.h drivers/gpu/drm/img-rogue/ri_server.h
+copyfile services/server/include/srvcore.h drivers/gpu/drm/img-rogue/srvcore.h
+copyfile services/server/include/srvinit.h drivers/gpu/drm/img-rogue/srvinit.h
+copyfile services/server/include/srvkm.h drivers/gpu/drm/img-rogue/srvkm.h
+copyfile services/server/include/sync_checkpoint.h drivers/gpu/drm/img-rogue/sync_checkpoint.h
+copyfile services/server/include/sync_checkpoint_init.h drivers/gpu/drm/img-rogue/sync_checkpoint_init.h
+copyfile services/server/include/sync_fallback_server.h drivers/gpu/drm/img-rogue/sync_fallback_server.h
+copyfile services/server/include/sync_server.h drivers/gpu/drm/img-rogue/sync_server.h
+copyfile services/server/include/tlintern.h drivers/gpu/drm/img-rogue/tlintern.h
+copyfile services/server/include/tlserver.h drivers/gpu/drm/img-rogue/tlserver.h
+copyfile services/server/include/tlstream.h drivers/gpu/drm/img-rogue/tlstream.h
+copyfile services/shared/common/uniq_key_splay_tree.h drivers/gpu/drm/img-rogue/uniq_key_splay_tree.h
+copyfile services/shared/devices/rgx/rgx_compat_bvnc.h drivers/gpu/drm/img-rogue/rgx_compat_bvnc.h
+copyfile services/shared/devices/rgx/rgx_hwperf_table.h drivers/gpu/drm/img-rogue/rgx_hwperf_table.h
+copyfile services/shared/include/device_connection.h drivers/gpu/drm/img-rogue/device_connection.h
+copyfile services/shared/include/devicemem.h drivers/gpu/drm/img-rogue/devicemem.h
+copyfile services/shared/include/devicemem_history_shared.h drivers/gpu/drm/img-rogue/devicemem_history_shared.h
+copyfile services/shared/include/devicemem_pdump.h drivers/gpu/drm/img-rogue/devicemem_pdump.h
+copyfile services/shared/include/devicemem_utils.h drivers/gpu/drm/img-rogue/devicemem_utils.h
+copyfile services/shared/include/htbuffer.h drivers/gpu/drm/img-rogue/htbuffer.h
+copyfile services/shared/include/htbuffer_init.h drivers/gpu/drm/img-rogue/htbuffer_init.h
+copyfile services/shared/include/osmmap.h drivers/gpu/drm/img-rogue/osmmap.h
+copyfile services/shared/include/proc_stats.h drivers/gpu/drm/img-rogue/proc_stats.h
+copyfile services/shared/include/sync.h drivers/gpu/drm/img-rogue/sync.h
+copyfile services/shared/include/sync_internal.h drivers/gpu/drm/img-rogue/sync_internal.h
+copyfile services/shared/include/tlclient.h drivers/gpu/drm/img-rogue/tlclient.h
+copyfile services/system/include/dma_support.h drivers/gpu/drm/img-rogue/system/dma_support.h
+copyfile services/system/include/pci_support.h drivers/gpu/drm/img-rogue/system/pci_support.h
+copyfile services/system/include/syscommon.h drivers/gpu/drm/img-rogue/system/syscommon.h
+copyfile services/system/include/sysvalidation.h drivers/gpu/drm/img-rogue/system/sysvalidation.h
+copyfile services/system/include/vmm_impl.h drivers/gpu/drm/img-rogue/system/vmm_impl.h
+copyfile services/system/include/vmm_pvz_client.h drivers/gpu/drm/img-rogue/system/vmm_pvz_client.h
+copyfile services/system/include/vmm_pvz_common.h drivers/gpu/drm/img-rogue/system/vmm_pvz_common.h
+copyfile services/system/include/vmm_pvz_server.h drivers/gpu/drm/img-rogue/system/vmm_pvz_server.h
+copyfile services/system/include/vz_physheap.h drivers/gpu/drm/img-rogue/system/vz_physheap.h
+copyfile services/system/include/vz_support.h drivers/gpu/drm/img-rogue/system/vz_support.h
+copyfile services/system/include/vz_vm.h drivers/gpu/drm/img-rogue/system/vz_vm.h
+copyfile services/system/include/vz_vmm_pvz.h drivers/gpu/drm/img-rogue/system/vz_vmm_pvz.h
+copyfile services/system/rgx_linux_tc/sysinfo.h drivers/gpu/drm/img-rogue/apollo/sysinfo.h
+copyfile copy-to-kernel-tc/drm_pdp.mk drivers/gpu/drm/img-rogue/pdp/drm_pdp.mk
+copyfile copy-to-kernel-tc/apollo.mk drivers/gpu/drm/img-rogue/apollo/apollo.mk
+copyfile copy-to-kernel-tc/pvrsrvkm.mk drivers/gpu/drm/img-rogue/pvrsrvkm.mk
+copyfile copy-to-kernel-tc/config_kernel.mk drivers/gpu/drm/img-rogue/config_kernel.mk
+copyfile copy-to-kernel-tc/config_kernel.h drivers/gpu/drm/img-rogue/config_kernel.h
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel-tc/drm_pdp.mk b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel-tc/drm_pdp.mk
new file mode 100644
index 0000000..a9acafe
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel-tc/drm_pdp.mk
@@ -0,0 +1,9 @@
+drm_pdp-y += \
+ ../pvr_sw_fence.o \
+ ../apollo/drm_pdp_crtc.o \
+ ../apollo/drm_pdp_debugfs.o \
+ ../apollo/drm_pdp_drv.o \
+ ../apollo/drm_pdp_dvi.o \
+ ../apollo/drm_pdp_gem.o \
+ ../apollo/drm_pdp_modeset.o \
+ ../apollo/drm_pdp_tmds.o
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel-tc/pvrsrvkm.mk b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel-tc/pvrsrvkm.mk
new file mode 100644
index 0000000..1e7dff4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel-tc/pvrsrvkm.mk
@@ -0,0 +1,134 @@
+pvrsrvkm-y += \
+ server_breakpoint_bridge.o \
+ client_cache_direct_bridge.o \
+ server_cache_bridge.o \
+ server_cmm_bridge.o \
+ server_debugmisc_bridge.o \
+ server_dmabuf_bridge.o \
+ client_htbuffer_direct_bridge.o \
+ server_htbuffer_bridge.o \
+ client_mm_direct_bridge.o \
+ server_mm_bridge.o \
+ client_pvrtl_direct_bridge.o \
+ server_pvrtl_bridge.o \
+ server_regconfig_bridge.o \
+ server_rgxcmp_bridge.o \
+ server_rgxhwperf_bridge.o \
+ server_rgxkicksync_bridge.o \
+ server_rgxray_bridge.o \
+ server_rgxsignals_bridge.o \
+ server_rgxta3d_bridge.o \
+ server_rgxtq2_bridge.o \
+ server_rgxtq_bridge.o \
+ server_srvcore_bridge.o \
+ client_sync_direct_bridge.o \
+ server_sync_bridge.o \
+ server_timerquery_bridge.o \
+ pvr_buffer_sync.o \
+ pvr_drm.o \
+ pvr_fence.o \
+ pvr_platform_drv.o \
+ cache_km.o \
+ connection_server.o \
+ devicemem_heapcfg.o \
+ devicemem_server.o \
+ handle.o \
+ htbserver.o \
+ info_page_km.o \
+ lists.o \
+ mmu_common.o \
+ physheap.o \
+ physmem.o \
+ physmem_hostmem.o \
+ physmem_lma.o \
+ physmem_tdsecbuf.o \
+ pmr.o \
+ power.o \
+ process_stats.o \
+ pvr_notifier.o \
+ pvrsrv.o \
+ pvrsrv_pool.o \
+ srvcore.o \
+ sync_checkpoint.o \
+ sync_server.o \
+ tlintern.o \
+ tlserver.o \
+ tlstream.o \
+ debugmisc_server.o \
+ rgxfwload.o \
+ rgxbreakpoint.o \
+ rgxccb.o \
+ rgxcompute.o \
+ rgxdebug.o \
+ rgxfwimageutils.o \
+ rgxfwutils.o \
+ rgxhwperf.o \
+ rgxinit.o \
+ rgxkicksync.o \
+ rgxlayer_impl.o \
+ rgxmem.o \
+ rgxmipsmmuinit.o \
+ rgxmmuinit.o \
+ rgxpower.o \
+ rgxray.o \
+ rgxregconfig.o \
+ rgxsignals.o \
+ rgxsrvinit.o \
+ rgxsrvinit_script.o \
+ rgxstartstop.o \
+ rgxta3d.o \
+ rgxtdmtransfer.o \
+ rgxtimecorr.o \
+ rgxtimerquery.o \
+ rgxtransfer.o \
+ rgxutils.o \
+ allocmem.o \
+ event.o \
+ handle_idr.o \
+ km_apphint.o \
+ module_common.o \
+ osconnection_server.o \
+ osfunc.o \
+ osmmap_stub.o \
+ pdump.o \
+ physmem_dmabuf.o \
+ physmem_osmem_linux.o \
+ pmr_os.o \
+ pvr_bridge_k.o \
+ pvr_debug.o \
+ pvr_debugfs.o \
+ pvr_gputrace.o \
+ devicemem.o \
+ devicemem_utils.o \
+ hash.o \
+ htbuffer.o \
+ mem_utils.o \
+ ra.o \
+ sync.o \
+ tlclient.o \
+ uniq_key_splay_tree.o \
+ rgx_compat_bvnc.o \
+ rgx_hwperf_table.o \
+ system/dma_support.o \
+ system/pci_support.o \
+ system/vmm_pvz_client.o \
+ system/vmm_pvz_server.o \
+ system/vmm_type_stub.o \
+ system/vz_physheap_common.o \
+ system/vz_physheap_generic.o \
+ system/vz_support.o \
+ system/vz_vmm_pvz.o \
+ system/vz_vmm_vm.o \
+ apollo/sysconfig.o
+pvrsrvkm-$(CONFIG_DRM_POWERVR_ROGUE_DEBUG) += \
+ client_devicememhistory_direct_bridge.o \
+ server_devicememhistory_bridge.o \
+ client_ri_direct_bridge.o \
+ server_ri_bridge.o \
+ devicemem_history_server.o \
+ ri_server.o
+pvrsrvkm-$(CONFIG_ARM)   += osfunc_arm.o
+pvrsrvkm-$(CONFIG_ARM64) += osfunc_arm64.o
+pvrsrvkm-$(CONFIG_EVENT_TRACING) += trace_events.o
+pvrsrvkm-$(CONFIG_MIPS)  += osfunc_mips.o
+pvrsrvkm-$(CONFIG_X86)   += osfunc_x86.o
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel.sh b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel.sh
new file mode 100644
index 0000000..f5348ef
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/copy-to-kernel.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+function usage()
+{
+  echo "$0 <config_dir> <kernel_dir>"
+  echo "  Copy source files and configuration into a kernel tree."
+  echo "  The configuration and list of files to copy is found in <config_dir>."
+  echo "  The target kernel tree is <kernel_dir>."
+  echo "  Before running this script, we recommend that you clean out the old"
+  echo "  destination directories in <kernel_dir>."
+}  
+
+if [ "$#" -lt 2 ]; then
+  echo "Not enough arguments"
+  usage
+  exit 1
+fi
+
+CONFIG=$1
+DEST=$2
+
+if [ ! -f "$CONFIG/copy_items.sh" ]; then
+  echo "$CONFIG does not look like a config directory.  copy_items.sh is missing."
+  usage
+  exit 1
+fi
+
+if [ ! -f "$DEST/Kconfig" ] ; then
+  echo "$DEST does not look like a kernel directory."
+  usage
+  exit 1
+fi
+
+function copyfile()
+{
+  src=$1
+  dest="$DEST/$2"
+
+  mkdir -p `dirname $dest`
+  echo copy $src to $dest
+  cp $src $dest
+  chmod u+w $dest
+}
+
+source "$CONFIG/copy_items.sh"
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/breakpoint_bridge/common_breakpoint_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/breakpoint_bridge/common_breakpoint_bridge.h
new file mode 100644
index 0000000..28d3fd2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/breakpoint_bridge/common_breakpoint_bridge.h
@@ -0,0 +1,155 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for breakpoint
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for breakpoint
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_BREAKPOINT_BRIDGE_H
+#define COMMON_BREAKPOINT_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+
+#define PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST			0
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXSETBREAKPOINT			PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+0
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXCLEARBREAKPOINT			PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+1
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXENABLEBREAKPOINT			PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+2
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXDISABLEBREAKPOINT			PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+3
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXOVERALLOCATEBPREGISTERS			PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+4
+#define PVRSRV_BRIDGE_BREAKPOINT_CMD_LAST			(PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+4)
+
+
+/*******************************************
+            RGXSetBreakpoint          
+ *******************************************/
+
+/* Bridge in structure for RGXSetBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT_TAG
+{
+	IMG_HANDLE hPrivData;
+	IMG_UINT32 eFWDataMaster;
+	IMG_UINT32 ui32BreakpointAddr;
+	IMG_UINT32 ui32HandlerAddr;
+	IMG_UINT32 ui32DM;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT;
+
+/* Bridge out structure for RGXSetBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT;
+
+
+/*******************************************
+            RGXClearBreakpoint          
+ *******************************************/
+
+/* Bridge in structure for RGXClearBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT_TAG
+{
+	IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT;
+
+/* Bridge out structure for RGXClearBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT;
+
+
+/*******************************************
+            RGXEnableBreakpoint          
+ *******************************************/
+
+/* Bridge in structure for RGXEnableBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT_TAG
+{
+	IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT;
+
+/* Bridge out structure for RGXEnableBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT;
+
+
+/*******************************************
+            RGXDisableBreakpoint          
+ *******************************************/
+
+/* Bridge in structure for RGXDisableBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT_TAG
+{
+	IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT;
+
+/* Bridge out structure for RGXDisableBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT;
+
+
+/*******************************************
+            RGXOverallocateBPRegisters          
+ *******************************************/
+
+/* Bridge in structure for RGXOverallocateBPRegisters */
+typedef struct PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS_TAG
+{
+	IMG_UINT32 ui32TempRegs;
+	IMG_UINT32 ui32SharedRegs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS;
+
+/* Bridge out structure for RGXOverallocateBPRegisters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS;
+
+
+#endif /* COMMON_BREAKPOINT_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/breakpoint_bridge/server_breakpoint_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/breakpoint_bridge/server_breakpoint_bridge.c
new file mode 100644
index 0000000..855d8c9
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/breakpoint_bridge/server_breakpoint_bridge.c
@@ -0,0 +1,455 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for breakpoint
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for breakpoint
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxbreakpoint.h"
+
+
+#include "common_breakpoint_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRGXSetBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT *psRGXSetBreakpointIN,
+					  PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT *psRGXSetBreakpointOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPrivData = psRGXSetBreakpointIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXSetBreakpointOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &hPrivDataInt,
+											hPrivData,
+											PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+											IMG_TRUE);
+					if(psRGXSetBreakpointOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXSetBreakpoint_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXSetBreakpointOUT->eError =
+		PVRSRVRGXSetBreakpointKM(psConnection, OSGetDevData(psConnection),
+					hPrivDataInt,
+					psRGXSetBreakpointIN->eFWDataMaster,
+					psRGXSetBreakpointIN->ui32BreakpointAddr,
+					psRGXSetBreakpointIN->ui32HandlerAddr,
+					psRGXSetBreakpointIN->ui32DM);
+
+
+
+
+RGXSetBreakpoint_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(hPrivDataInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPrivData,
+										PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXClearBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT *psRGXClearBreakpointIN,
+					  PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT *psRGXClearBreakpointOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPrivData = psRGXClearBreakpointIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXClearBreakpointOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &hPrivDataInt,
+											hPrivData,
+											PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+											IMG_TRUE);
+					if(psRGXClearBreakpointOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXClearBreakpoint_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXClearBreakpointOUT->eError =
+		PVRSRVRGXClearBreakpointKM(psConnection, OSGetDevData(psConnection),
+					hPrivDataInt);
+
+
+
+
+RGXClearBreakpoint_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(hPrivDataInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPrivData,
+										PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXEnableBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT *psRGXEnableBreakpointIN,
+					  PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT *psRGXEnableBreakpointOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPrivData = psRGXEnableBreakpointIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXEnableBreakpointOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &hPrivDataInt,
+											hPrivData,
+											PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+											IMG_TRUE);
+					if(psRGXEnableBreakpointOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXEnableBreakpoint_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXEnableBreakpointOUT->eError =
+		PVRSRVRGXEnableBreakpointKM(psConnection, OSGetDevData(psConnection),
+					hPrivDataInt);
+
+
+
+
+RGXEnableBreakpoint_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(hPrivDataInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPrivData,
+										PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDisableBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT *psRGXDisableBreakpointIN,
+					  PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT *psRGXDisableBreakpointOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPrivData = psRGXDisableBreakpointIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXDisableBreakpointOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &hPrivDataInt,
+											hPrivData,
+											PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+											IMG_TRUE);
+					if(psRGXDisableBreakpointOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXDisableBreakpoint_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXDisableBreakpointOUT->eError =
+		PVRSRVRGXDisableBreakpointKM(psConnection, OSGetDevData(psConnection),
+					hPrivDataInt);
+
+
+
+
+RGXDisableBreakpoint_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(hPrivDataInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPrivData,
+										PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXOverallocateBPRegisters(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS *psRGXOverallocateBPRegistersIN,
+					  PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS *psRGXOverallocateBPRegistersOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+	psRGXOverallocateBPRegistersOUT->eError =
+		PVRSRVRGXOverallocateBPRegistersKM(psConnection, OSGetDevData(psConnection),
+					psRGXOverallocateBPRegistersIN->ui32TempRegs,
+					psRGXOverallocateBPRegistersIN->ui32SharedRegs);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+#endif /* EXCLUDE_BREAKPOINT_BRIDGE */
+
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+PVRSRV_ERROR InitBREAKPOINTBridge(void);
+PVRSRV_ERROR DeinitBREAKPOINTBridge(void);
+
+/*
+ * Register all BREAKPOINT functions with services
+ */
+PVRSRV_ERROR InitBREAKPOINTBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXSETBREAKPOINT, PVRSRVBridgeRGXSetBreakpoint,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXCLEARBREAKPOINT, PVRSRVBridgeRGXClearBreakpoint,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXENABLEBREAKPOINT, PVRSRVBridgeRGXEnableBreakpoint,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXDISABLEBREAKPOINT, PVRSRVBridgeRGXDisableBreakpoint,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXOVERALLOCATEBPREGISTERS, PVRSRVBridgeRGXOverallocateBPRegisters,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all breakpoint functions with services
+ */
+PVRSRV_ERROR DeinitBREAKPOINTBridge(void)
+{
+	return PVRSRV_OK;
+}
+#else /* EXCLUDE_BREAKPOINT_BRIDGE */
+/* This bridge is conditional on EXCLUDE_BREAKPOINT_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitBREAKPOINTBridge() \
+	PVRSRV_OK
+
+#define DeinitBREAKPOINTBridge() \
+	PVRSRV_OK
+
+#endif /* EXCLUDE_BREAKPOINT_BRIDGE */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/cache_bridge/client_cache_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/cache_bridge/client_cache_bridge.h
new file mode 100644
index 0000000..0c65f73
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/cache_bridge/client_cache_bridge.h
@@ -0,0 +1,94 @@
+/*************************************************************************/ /*!
+@File
+@Title          Client bridge header for cache
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for cache
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_CACHE_BRIDGE_H
+#define CLIENT_CACHE_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_cache_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpQueue(IMG_HANDLE hBridge,
+							  IMG_UINT32 ui32NumCacheOps,
+							  IMG_HANDLE *phPMR,
+							  IMG_UINT64 *pui64Address,
+							  IMG_DEVMEM_OFFSET_T *puiOffset,
+							  IMG_DEVMEM_SIZE_T *puiSize,
+							  PVRSRV_CACHE_OP *piuCacheOp,
+							  IMG_UINT32 ui32OpTimeline,
+							  IMG_UINT32 ui32OpInfoPgGFSeqNum,
+							  IMG_UINT32 ui32CurrentFenceSeqNum,
+							  IMG_UINT32 *pui32NextFenceSeqNum);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpExec(IMG_HANDLE hBridge,
+							 IMG_HANDLE hPMR,
+							 IMG_UINT64 ui64Address,
+							 IMG_DEVMEM_OFFSET_T uiOffset,
+							 IMG_DEVMEM_SIZE_T uiSize,
+							 PVRSRV_CACHE_OP iuCacheOp);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpLog(IMG_HANDLE hBridge,
+							IMG_HANDLE hPMR,
+							IMG_UINT64 ui64Address,
+							IMG_DEVMEM_OFFSET_T uiOffset,
+							IMG_DEVMEM_SIZE_T uiSize,
+							IMG_INT64 i64QueuedTimeUs,
+							IMG_INT64 i64ExecuteTimeUs,
+							IMG_INT32 i32NumRBF,
+							IMG_BOOL bIsDiscard,
+							PVRSRV_CACHE_OP iuCacheOp);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpAcquireInfoPage(IMG_HANDLE hBridge,
+								    IMG_HANDLE *phPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpReleaseInfoPage(IMG_HANDLE hBridge,
+								    IMG_HANDLE hPMR);
+
+
+#endif /* CLIENT_CACHE_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/cache_bridge/client_cache_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/cache_bridge/client_cache_direct_bridge.c
new file mode 100644
index 0000000..ff088ae
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/cache_bridge/client_cache_direct_bridge.c
@@ -0,0 +1,173 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge for cache
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_cache_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "cache_ops.h"
+
+#include "cache_km.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpQueue(IMG_HANDLE hBridge,
+							  IMG_UINT32 ui32NumCacheOps,
+							  IMG_HANDLE *phPMR,
+							  IMG_UINT64 *pui64Address,
+							  IMG_DEVMEM_OFFSET_T *puiOffset,
+							  IMG_DEVMEM_SIZE_T *puiSize,
+							  PVRSRV_CACHE_OP *piuCacheOp,
+							  IMG_UINT32 ui32OpTimeline,
+							  IMG_UINT32 ui32OpInfoPgGFSeqNum,
+							  IMG_UINT32 ui32CurrentFenceSeqNum,
+							  IMG_UINT32 *pui32NextFenceSeqNum)
+{
+	PVRSRV_ERROR eError;
+	PMR * *psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR **) phPMR;
+
+	eError =
+		CacheOpQueue(
+					ui32NumCacheOps,
+					psPMRInt,
+					pui64Address,
+					puiOffset,
+					puiSize,
+					piuCacheOp,
+					ui32OpTimeline,
+					ui32OpInfoPgGFSeqNum,
+					ui32CurrentFenceSeqNum,
+					pui32NextFenceSeqNum);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpExec(IMG_HANDLE hBridge,
+							 IMG_HANDLE hPMR,
+							 IMG_UINT64 ui64Address,
+							 IMG_DEVMEM_OFFSET_T uiOffset,
+							 IMG_DEVMEM_SIZE_T uiSize,
+							 PVRSRV_CACHE_OP iuCacheOp)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		CacheOpExec(
+					psPMRInt,
+					ui64Address,
+					uiOffset,
+					uiSize,
+					iuCacheOp);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpLog(IMG_HANDLE hBridge,
+							IMG_HANDLE hPMR,
+							IMG_UINT64 ui64Address,
+							IMG_DEVMEM_OFFSET_T uiOffset,
+							IMG_DEVMEM_SIZE_T uiSize,
+							IMG_INT64 i64QueuedTimeUs,
+							IMG_INT64 i64ExecuteTimeUs,
+							IMG_INT32 i32NumRBF,
+							IMG_BOOL bIsDiscard,
+							PVRSRV_CACHE_OP iuCacheOp)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		CacheOpLog(
+					psPMRInt,
+					ui64Address,
+					uiOffset,
+					uiSize,
+					i64QueuedTimeUs,
+					i64ExecuteTimeUs,
+					i32NumRBF,
+					bIsDiscard,
+					iuCacheOp);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpAcquireInfoPage(IMG_HANDLE hBridge,
+								    IMG_HANDLE *phPMR)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		CacheOpAcquireInfoPage(
+					&psPMRInt);
+
+	*phPMR = psPMRInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpReleaseInfoPage(IMG_HANDLE hBridge,
+								    IMG_HANDLE hPMR)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		CacheOpReleaseInfoPage(
+					psPMRInt);
+
+	return eError;
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/cache_bridge/common_cache_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/cache_bridge/common_cache_bridge.h
new file mode 100644
index 0000000..0ce2e26
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/cache_bridge/common_cache_bridge.h
@@ -0,0 +1,172 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for cache
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for cache
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_CACHE_BRIDGE_H
+#define COMMON_CACHE_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "cache_ops.h"
+
+
+#define PVRSRV_BRIDGE_CACHE_CMD_FIRST			0
+#define PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE			PVRSRV_BRIDGE_CACHE_CMD_FIRST+0
+#define PVRSRV_BRIDGE_CACHE_CACHEOPEXEC			PVRSRV_BRIDGE_CACHE_CMD_FIRST+1
+#define PVRSRV_BRIDGE_CACHE_CACHEOPLOG			PVRSRV_BRIDGE_CACHE_CMD_FIRST+2
+#define PVRSRV_BRIDGE_CACHE_CACHEOPACQUIREINFOPAGE			PVRSRV_BRIDGE_CACHE_CMD_FIRST+3
+#define PVRSRV_BRIDGE_CACHE_CACHEOPRELEASEINFOPAGE			PVRSRV_BRIDGE_CACHE_CMD_FIRST+4
+#define PVRSRV_BRIDGE_CACHE_CMD_LAST			(PVRSRV_BRIDGE_CACHE_CMD_FIRST+4)
+
+
+/*******************************************
+            CacheOpQueue          
+ *******************************************/
+
+/* Bridge in structure for CacheOpQueue */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPQUEUE_TAG
+{
+	IMG_UINT32 ui32NumCacheOps;
+	IMG_HANDLE * phPMR;
+	IMG_UINT64 * pui64Address;
+	IMG_DEVMEM_OFFSET_T * puiOffset;
+	IMG_DEVMEM_SIZE_T * puiSize;
+	PVRSRV_CACHE_OP * piuCacheOp;
+	IMG_UINT32 ui32OpTimeline;
+	IMG_UINT32 ui32OpInfoPgGFSeqNum;
+	IMG_UINT32 ui32CurrentFenceSeqNum;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CACHEOPQUEUE;
+
+/* Bridge out structure for CacheOpQueue */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPQUEUE_TAG
+{
+	IMG_UINT32 ui32NextFenceSeqNum;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CACHEOPQUEUE;
+
+
+/*******************************************
+            CacheOpExec          
+ *******************************************/
+
+/* Bridge in structure for CacheOpExec */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPEXEC_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_UINT64 ui64Address;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_DEVMEM_SIZE_T uiSize;
+	PVRSRV_CACHE_OP iuCacheOp;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CACHEOPEXEC;
+
+/* Bridge out structure for CacheOpExec */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPEXEC_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CACHEOPEXEC;
+
+
+/*******************************************
+            CacheOpLog          
+ *******************************************/
+
+/* Bridge in structure for CacheOpLog */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPLOG_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_UINT64 ui64Address;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_INT64 i64QueuedTimeUs;
+	IMG_INT64 i64ExecuteTimeUs;
+	IMG_INT32 i32NumRBF;
+	IMG_BOOL bIsDiscard;
+	PVRSRV_CACHE_OP iuCacheOp;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CACHEOPLOG;
+
+/* Bridge out structure for CacheOpLog */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPLOG_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CACHEOPLOG;
+
+
+/*******************************************
+            CacheOpAcquireInfoPage          
+ *******************************************/
+
+/* Bridge in structure for CacheOpAcquireInfoPage */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPACQUIREINFOPAGE_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CACHEOPACQUIREINFOPAGE;
+
+/* Bridge out structure for CacheOpAcquireInfoPage */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPACQUIREINFOPAGE_TAG
+{
+	IMG_HANDLE hPMR;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CACHEOPACQUIREINFOPAGE;
+
+
+/*******************************************
+            CacheOpReleaseInfoPage          
+ *******************************************/
+
+/* Bridge in structure for CacheOpReleaseInfoPage */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPRELEASEINFOPAGE_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CACHEOPRELEASEINFOPAGE;
+
+/* Bridge out structure for CacheOpReleaseInfoPage */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPRELEASEINFOPAGE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CACHEOPRELEASEINFOPAGE;
+
+
+#endif /* COMMON_CACHE_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/cache_bridge/server_cache_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/cache_bridge/server_cache_bridge.c
new file mode 100644
index 0000000..3f4eb55
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/cache_bridge/server_cache_bridge.c
@@ -0,0 +1,626 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for cache
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for cache
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "cache_km.h"
+
+
+#include "common_cache_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_CACHEOPQUEUE *psCacheOpQueueIN,
+					  PVRSRV_BRIDGE_OUT_CACHEOPQUEUE *psCacheOpQueueOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * *psPMRInt = NULL;
+	IMG_HANDLE *hPMRInt2 = NULL;
+	IMG_UINT64 *ui64AddressInt = NULL;
+	IMG_DEVMEM_OFFSET_T *uiOffsetInt = NULL;
+	IMG_DEVMEM_SIZE_T *uiSizeInt = NULL;
+	PVRSRV_CACHE_OP *iuCacheOpInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *)) +
+			(psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) +
+			(psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) +
+			(psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T)) +
+			(psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T)) +
+			(psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psCacheOpQueueIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psCacheOpQueueIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psCacheOpQueueOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto CacheOpQueue_exit;
+			}
+		}
+	}
+
+	if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+	{
+		psPMRInt = (PMR **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *);
+		hPMRInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hPMRInt2, psCacheOpQueueIN->phPMR, psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto CacheOpQueue_exit;
+				}
+			}
+	if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+	{
+		ui64AddressInt = (IMG_UINT64*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64);
+	}
+
+			/* Copy the data over */
+			if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui64AddressInt, psCacheOpQueueIN->pui64Address, psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) != PVRSRV_OK )
+				{
+					psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto CacheOpQueue_exit;
+				}
+			}
+	if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+	{
+		uiOffsetInt = (IMG_DEVMEM_OFFSET_T*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T);
+	}
+
+			/* Copy the data over */
+			if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiOffsetInt, psCacheOpQueueIN->puiOffset, psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T)) != PVRSRV_OK )
+				{
+					psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto CacheOpQueue_exit;
+				}
+			}
+	if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+	{
+		uiSizeInt = (IMG_DEVMEM_SIZE_T*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T);
+	}
+
+			/* Copy the data over */
+			if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiSizeInt, psCacheOpQueueIN->puiSize, psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T)) != PVRSRV_OK )
+				{
+					psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto CacheOpQueue_exit;
+				}
+			}
+	if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+	{
+		iuCacheOpInt = (PVRSRV_CACHE_OP*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP);
+	}
+
+			/* Copy the data over */
+			if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP) > 0)
+			{
+				if ( OSCopyFromUser(NULL, iuCacheOpInt, psCacheOpQueueIN->piuCacheOp, psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP)) != PVRSRV_OK )
+				{
+					psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto CacheOpQueue_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psCacheOpQueueIN->ui32NumCacheOps;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psCacheOpQueueOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt[i],
+											hPMRInt2[i],
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psCacheOpQueueOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto CacheOpQueue_exit;
+					}
+				}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psCacheOpQueueOUT->eError =
+		CacheOpQueue(
+					psCacheOpQueueIN->ui32NumCacheOps,
+					psPMRInt,
+					ui64AddressInt,
+					uiOffsetInt,
+					uiSizeInt,
+					iuCacheOpInt,
+					psCacheOpQueueIN->ui32OpTimeline,
+					psCacheOpQueueIN->ui32OpInfoPgGFSeqNum,
+					psCacheOpQueueIN->ui32CurrentFenceSeqNum,
+					&psCacheOpQueueOUT->ui32NextFenceSeqNum);
+
+
+
+
+CacheOpQueue_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+
+	if (hPMRInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psCacheOpQueueIN->ui32NumCacheOps;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hPMRInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMRInt2[i],
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeCacheOpExec(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_CACHEOPEXEC *psCacheOpExecIN,
+					  PVRSRV_BRIDGE_OUT_CACHEOPEXEC *psCacheOpExecOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMR = psCacheOpExecIN->hPMR;
+	PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psCacheOpExecOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psCacheOpExecOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto CacheOpExec_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psCacheOpExecOUT->eError =
+		CacheOpExec(
+					psPMRInt,
+					psCacheOpExecIN->ui64Address,
+					psCacheOpExecIN->uiOffset,
+					psCacheOpExecIN->uiSize,
+					psCacheOpExecIN->iuCacheOp);
+
+
+
+
+CacheOpExec_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeCacheOpLog(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_CACHEOPLOG *psCacheOpLogIN,
+					  PVRSRV_BRIDGE_OUT_CACHEOPLOG *psCacheOpLogOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMR = psCacheOpLogIN->hPMR;
+	PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psCacheOpLogOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psCacheOpLogOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto CacheOpLog_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psCacheOpLogOUT->eError =
+		CacheOpLog(
+					psPMRInt,
+					psCacheOpLogIN->ui64Address,
+					psCacheOpLogIN->uiOffset,
+					psCacheOpLogIN->uiSize,
+					psCacheOpLogIN->i64QueuedTimeUs,
+					psCacheOpLogIN->i64ExecuteTimeUs,
+					psCacheOpLogIN->i32NumRBF,
+					psCacheOpLogIN->bIsDiscard,
+					psCacheOpLogIN->iuCacheOp);
+
+
+
+
+CacheOpLog_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeCacheOpAcquireInfoPage(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_CACHEOPACQUIREINFOPAGE *psCacheOpAcquireInfoPageIN,
+					  PVRSRV_BRIDGE_OUT_CACHEOPACQUIREINFOPAGE *psCacheOpAcquireInfoPageOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psPMRInt = NULL;
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psCacheOpAcquireInfoPageIN);
+
+
+
+
+
+	psCacheOpAcquireInfoPageOUT->eError =
+		CacheOpAcquireInfoPage(
+					&psPMRInt);
+	/* Exit early if bridged call fails */
+	if(psCacheOpAcquireInfoPageOUT->eError != PVRSRV_OK)
+	{
+		goto CacheOpAcquireInfoPage_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psCacheOpAcquireInfoPageOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+
+							&psCacheOpAcquireInfoPageOUT->hPMR,
+							(void *) psPMRInt,
+							PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&CacheOpReleaseInfoPage);
+	if (psCacheOpAcquireInfoPageOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto CacheOpAcquireInfoPage_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+CacheOpAcquireInfoPage_exit:
+
+
+
+	if (psCacheOpAcquireInfoPageOUT->eError != PVRSRV_OK)
+	{
+		if (psPMRInt)
+		{
+			CacheOpReleaseInfoPage(psPMRInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeCacheOpReleaseInfoPage(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_CACHEOPRELEASEINFOPAGE *psCacheOpReleaseInfoPageIN,
+					  PVRSRV_BRIDGE_OUT_CACHEOPRELEASEINFOPAGE *psCacheOpReleaseInfoPageOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psCacheOpReleaseInfoPageOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+					(IMG_HANDLE) psCacheOpReleaseInfoPageIN->hPMR,
+					PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT);
+	if ((psCacheOpReleaseInfoPageOUT->eError != PVRSRV_OK) &&
+	    (psCacheOpReleaseInfoPageOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeCacheOpReleaseInfoPage: %s",
+		        PVRSRVGetErrorStringKM(psCacheOpReleaseInfoPageOUT->eError)));
+		UnlockHandle();
+		goto CacheOpReleaseInfoPage_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+CacheOpReleaseInfoPage_exit:
+
+
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitCACHEBridge(void);
+PVRSRV_ERROR DeinitCACHEBridge(void);
+
+/*
+ * Register all CACHE functions with services
+ */
+PVRSRV_ERROR InitCACHEBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE, PVRSRVBridgeCacheOpQueue,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPEXEC, PVRSRVBridgeCacheOpExec,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPLOG, PVRSRVBridgeCacheOpLog,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPACQUIREINFOPAGE, PVRSRVBridgeCacheOpAcquireInfoPage,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPRELEASEINFOPAGE, PVRSRVBridgeCacheOpReleaseInfoPage,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all cache functions with services
+ */
+PVRSRV_ERROR DeinitCACHEBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/cmm_bridge/common_cmm_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/cmm_bridge/common_cmm_bridge.h
new file mode 100644
index 0000000..4ca9bc5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/cmm_bridge/common_cmm_bridge.h
@@ -0,0 +1,118 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for cmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for cmm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_CMM_BRIDGE_H
+#define COMMON_CMM_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_CMM_CMD_FIRST			0
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX			PVRSRV_BRIDGE_CMM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX			PVRSRV_BRIDGE_CMM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX			PVRSRV_BRIDGE_CMM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_CMM_CMD_LAST			(PVRSRV_BRIDGE_CMM_CMD_FIRST+2)
+
+
+/*******************************************
+            DevmemIntExportCtx          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntExportCtx */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX_TAG
+{
+	IMG_HANDLE hContext;
+	IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX;
+
+/* Bridge out structure for DevmemIntExportCtx */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX_TAG
+{
+	IMG_HANDLE hContextExport;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX;
+
+
+/*******************************************
+            DevmemIntUnexportCtx          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnexportCtx */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX_TAG
+{
+	IMG_HANDLE hContextExport;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX;
+
+/* Bridge out structure for DevmemIntUnexportCtx */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX;
+
+
+/*******************************************
+            DevmemIntAcquireRemoteCtx          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntAcquireRemoteCtx */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX;
+
+/* Bridge out structure for DevmemIntAcquireRemoteCtx */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX_TAG
+{
+	IMG_HANDLE hContext;
+	IMG_HANDLE hPrivData;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX;
+
+
+#endif /* COMMON_CMM_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/cmm_bridge/server_cmm_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/cmm_bridge/server_cmm_bridge.c
new file mode 100644
index 0000000..41a408b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/cmm_bridge/server_cmm_bridge.c
@@ -0,0 +1,477 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for cmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for cmm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "pmr.h"
+#include "devicemem_server.h"
+
+
+#include "common_cmm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+#if !defined(EXCLUDE_CMM_BRIDGE)
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeDevmemIntExportCtx(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hContext = psDevmemIntExportCtxIN->hContext;
+	DEVMEMINT_CTX * psContextInt = NULL;
+	IMG_HANDLE hPMR = psDevmemIntExportCtxIN->hPMR;
+	PMR * psPMRInt = NULL;
+	DEVMEMINT_CTX_EXPORT * psContextExportInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntExportCtxOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psContextInt,
+											hContext,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+											IMG_TRUE);
+					if(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIntExportCtx_exit;
+					}
+				}
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntExportCtxOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIntExportCtx_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psDevmemIntExportCtxOUT->eError =
+		DevmemIntExportCtx(
+					psContextInt,
+					psPMRInt,
+					&psContextExportInt);
+	/* Exit early if bridged call fails */
+	if(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntExportCtx_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psDevmemIntExportCtxOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psDevmemIntExportCtxOUT->hContextExport,
+							(void *) psContextExportInt,
+							PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT,
+							PVRSRV_HANDLE_ALLOC_FLAG_NONE
+							,(PFN_HANDLE_RELEASE)&DevmemIntUnexportCtx);
+	if (psDevmemIntExportCtxOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto DevmemIntExportCtx_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+DevmemIntExportCtx_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hContext,
+										PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+					}
+				}
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psDevmemIntExportCtxOUT->eError != PVRSRV_OK)
+	{
+		if (psContextExportInt)
+		{
+			DevmemIntUnexportCtx(psContextExportInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnexportCtx(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psDevmemIntUnexportCtxOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psDevmemIntUnexportCtxIN->hContextExport,
+					PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT);
+	if ((psDevmemIntUnexportCtxOUT->eError != PVRSRV_OK) &&
+	    (psDevmemIntUnexportCtxOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeDevmemIntUnexportCtx: %s",
+		        PVRSRVGetErrorStringKM(psDevmemIntUnexportCtxOUT->eError)));
+		UnlockHandle();
+		goto DevmemIntUnexportCtx_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+DevmemIntUnexportCtx_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntAcquireRemoteCtx(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX *psDevmemIntAcquireRemoteCtxIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX *psDevmemIntAcquireRemoteCtxOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMR = psDevmemIntAcquireRemoteCtxIN->hPMR;
+	PMR * psPMRInt = NULL;
+	DEVMEMINT_CTX * psContextInt = NULL;
+	IMG_HANDLE hPrivDataInt = NULL;
+
+
+
+
+
+	psDevmemIntAcquireRemoteCtxOUT->hContext = NULL;
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntAcquireRemoteCtxOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIntAcquireRemoteCtx_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psDevmemIntAcquireRemoteCtxOUT->eError =
+		DevmemIntAcquireRemoteCtx(
+					psPMRInt,
+					&psContextInt,
+					&hPrivDataInt);
+	/* Exit early if bridged call fails */
+	if(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntAcquireRemoteCtx_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psDevmemIntAcquireRemoteCtxOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psDevmemIntAcquireRemoteCtxOUT->hContext,
+							(void *) psContextInt,
+							PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+							PVRSRV_HANDLE_ALLOC_FLAG_NONE
+							,(PFN_HANDLE_RELEASE)&DevmemIntCtxDestroy);
+	if (psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto DevmemIntAcquireRemoteCtx_exit;
+	}
+
+
+
+
+
+
+	psDevmemIntAcquireRemoteCtxOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+							&psDevmemIntAcquireRemoteCtxOUT->hPrivData,
+							(void *) hPrivDataInt,
+							PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+							PVRSRV_HANDLE_ALLOC_FLAG_NONE
+							,psDevmemIntAcquireRemoteCtxOUT->hContext);
+	if (psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto DevmemIntAcquireRemoteCtx_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+DevmemIntAcquireRemoteCtx_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)
+	{
+		/* Lock over handle creation cleanup. */
+		LockHandle();
+		if (psDevmemIntAcquireRemoteCtxOUT->hContext)
+		{
+
+
+			PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+						(IMG_HANDLE) psDevmemIntAcquireRemoteCtxOUT->hContext,
+						PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+			if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+				        "PVRSRVBridgeDevmemIntAcquireRemoteCtx: %s",
+				        PVRSRVGetErrorStringKM(eError)));
+			}
+			/* Releasing the handle should free/destroy/release the resource.
+			 * This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psContextInt = NULL;
+		}
+
+
+		/* Release now we have cleaned up creation handles. */
+		UnlockHandle();
+		if (psContextInt)
+		{
+			DevmemIntCtxDestroy(psContextInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+#endif /* EXCLUDE_CMM_BRIDGE */
+
+#if !defined(EXCLUDE_CMM_BRIDGE)
+PVRSRV_ERROR InitCMMBridge(void);
+PVRSRV_ERROR DeinitCMMBridge(void);
+
+/*
+ * Register all CMM functions with services
+ */
+PVRSRV_ERROR InitCMMBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX, PVRSRVBridgeDevmemIntExportCtx,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX, PVRSRVBridgeDevmemIntUnexportCtx,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX, PVRSRVBridgeDevmemIntAcquireRemoteCtx,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all cmm functions with services
+ */
+PVRSRV_ERROR DeinitCMMBridge(void)
+{
+	return PVRSRV_OK;
+}
+#else /* EXCLUDE_CMM_BRIDGE */
+/* This bridge is conditional on EXCLUDE_CMM_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitCMMBridge() \
+	PVRSRV_OK
+
+#define DeinitCMMBridge() \
+	PVRSRV_OK
+
+#endif /* EXCLUDE_CMM_BRIDGE */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/debugmisc_bridge/common_debugmisc_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/debugmisc_bridge/common_debugmisc_bridge.h
new file mode 100644
index 0000000..23656ae
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/debugmisc_bridge/common_debugmisc_bridge.h
@@ -0,0 +1,195 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for debugmisc
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for debugmisc
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_DEBUGMISC_BRIDGE_H
+#define COMMON_DEBUGMISC_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "rgx_bridge.h"
+#include "pvrsrv_memallocflags.h"
+
+
+#define PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST			0
+#define PVRSRV_BRIDGE_DEBUGMISC_DEBUGMISCSLCSETBYPASSSTATE			PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETFWLOG			PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCDUMPFREELISTPAGELIST			PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DEBUGMISC_PHYSMEMIMPORTSECBUF			PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+3
+#define PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETHCSDEADLINE			PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+4
+#define PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETOSIDPRIORITY			PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+5
+#define PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETOSNEWONLINESTATE			PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+6
+#define PVRSRV_BRIDGE_DEBUGMISC_CMD_LAST			(PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+6)
+
+
+/*******************************************
+            DebugMiscSLCSetBypassState          
+ *******************************************/
+
+/* Bridge in structure for DebugMiscSLCSetBypassState */
+typedef struct PVRSRV_BRIDGE_IN_DEBUGMISCSLCSETBYPASSSTATE_TAG
+{
+	IMG_UINT32 ui32Flags;
+	IMG_BOOL bIsBypassed;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEBUGMISCSLCSETBYPASSSTATE;
+
+/* Bridge out structure for DebugMiscSLCSetBypassState */
+typedef struct PVRSRV_BRIDGE_OUT_DEBUGMISCSLCSETBYPASSSTATE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEBUGMISCSLCSETBYPASSSTATE;
+
+
+/*******************************************
+            RGXDebugMiscSetFWLog          
+ *******************************************/
+
+/* Bridge in structure for RGXDebugMiscSetFWLog */
+typedef struct PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETFWLOG_TAG
+{
+	IMG_UINT32 ui32RGXFWLogType;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETFWLOG;
+
+/* Bridge out structure for RGXDebugMiscSetFWLog */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETFWLOG_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETFWLOG;
+
+
+/*******************************************
+            RGXDebugMiscDumpFreelistPageList          
+ *******************************************/
+
+/* Bridge in structure for RGXDebugMiscDumpFreelistPageList */
+typedef struct PVRSRV_BRIDGE_IN_RGXDEBUGMISCDUMPFREELISTPAGELIST_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDEBUGMISCDUMPFREELISTPAGELIST;
+
+/* Bridge out structure for RGXDebugMiscDumpFreelistPageList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDEBUGMISCDUMPFREELISTPAGELIST_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDEBUGMISCDUMPFREELISTPAGELIST;
+
+
+/*******************************************
+            PhysmemImportSecBuf          
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportSecBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSECBUF_TAG
+{
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_UINT32 ui32Log2Align;
+	PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSECBUF;
+
+/* Bridge out structure for PhysmemImportSecBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSECBUF_TAG
+{
+	IMG_HANDLE hPMRPtr;
+	IMG_UINT64 ui64SecBufHandle;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSECBUF;
+
+
+/*******************************************
+            RGXDebugMiscSetHCSDeadline          
+ *******************************************/
+
+/* Bridge in structure for RGXDebugMiscSetHCSDeadline */
+typedef struct PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETHCSDEADLINE_TAG
+{
+	IMG_UINT32 ui32RGXHCSDeadline;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETHCSDEADLINE;
+
+/* Bridge out structure for RGXDebugMiscSetHCSDeadline */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETHCSDEADLINE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETHCSDEADLINE;
+
+
+/*******************************************
+            RGXDebugMiscSetOSidPriority          
+ *******************************************/
+
+/* Bridge in structure for RGXDebugMiscSetOSidPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETOSIDPRIORITY_TAG
+{
+	IMG_UINT32 ui32OSid;
+	IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETOSIDPRIORITY;
+
+/* Bridge out structure for RGXDebugMiscSetOSidPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETOSIDPRIORITY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETOSIDPRIORITY;
+
+
+/*******************************************
+            RGXDebugMiscSetOSNewOnlineState          
+ *******************************************/
+
+/* Bridge in structure for RGXDebugMiscSetOSNewOnlineState */
+typedef struct PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETOSNEWONLINESTATE_TAG
+{
+	IMG_UINT32 ui32OSid;
+	IMG_UINT32 ui32OSNewState;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETOSNEWONLINESTATE;
+
+/* Bridge out structure for RGXDebugMiscSetOSNewOnlineState */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETOSNEWONLINESTATE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETOSNEWONLINESTATE;
+
+
+#endif /* COMMON_DEBUGMISC_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/debugmisc_bridge/server_debugmisc_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/debugmisc_bridge/server_debugmisc_bridge.c
new file mode 100644
index 0000000..c3f5677
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/debugmisc_bridge/server_debugmisc_bridge.c
@@ -0,0 +1,372 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for debugmisc
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for debugmisc
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "debugmisc_server.h"
+#include "pmr.h"
+#include "physmem_tdsecbuf.h"
+
+
+#include "common_debugmisc_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeDebugMiscSLCSetBypassState(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEBUGMISCSLCSETBYPASSSTATE *psDebugMiscSLCSetBypassStateIN,
+					  PVRSRV_BRIDGE_OUT_DEBUGMISCSLCSETBYPASSSTATE *psDebugMiscSLCSetBypassStateOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+	psDebugMiscSLCSetBypassStateOUT->eError =
+		PVRSRVDebugMiscSLCSetBypassStateKM(psConnection, OSGetDevData(psConnection),
+					psDebugMiscSLCSetBypassStateIN->ui32Flags,
+					psDebugMiscSLCSetBypassStateIN->bIsBypassed);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDebugMiscSetFWLog(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETFWLOG *psRGXDebugMiscSetFWLogIN,
+					  PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETFWLOG *psRGXDebugMiscSetFWLogOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+	psRGXDebugMiscSetFWLogOUT->eError =
+		PVRSRVRGXDebugMiscSetFWLogKM(psConnection, OSGetDevData(psConnection),
+					psRGXDebugMiscSetFWLogIN->ui32RGXFWLogType);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDebugMiscDumpFreelistPageList(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDEBUGMISCDUMPFREELISTPAGELIST *psRGXDebugMiscDumpFreelistPageListIN,
+					  PVRSRV_BRIDGE_OUT_RGXDEBUGMISCDUMPFREELISTPAGELIST *psRGXDebugMiscDumpFreelistPageListOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psRGXDebugMiscDumpFreelistPageListIN);
+
+
+
+
+
+	psRGXDebugMiscDumpFreelistPageListOUT->eError =
+		PVRSRVRGXDebugMiscDumpFreelistPageListKM(psConnection, OSGetDevData(psConnection)
+					);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePhysmemImportSecBuf(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSECBUF *psPhysmemImportSecBufIN,
+					  PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSECBUF *psPhysmemImportSecBufOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psPMRPtrInt = NULL;
+
+
+
+
+
+
+
+
+	psPhysmemImportSecBufOUT->eError =
+		PhysmemImportSecBuf(psConnection, OSGetDevData(psConnection),
+					psPhysmemImportSecBufIN->uiSize,
+					psPhysmemImportSecBufIN->ui32Log2Align,
+					psPhysmemImportSecBufIN->uiFlags,
+					&psPMRPtrInt,
+					&psPhysmemImportSecBufOUT->ui64SecBufHandle);
+	/* Exit early if bridged call fails */
+	if(psPhysmemImportSecBufOUT->eError != PVRSRV_OK)
+	{
+		goto PhysmemImportSecBuf_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psPhysmemImportSecBufOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psPhysmemImportSecBufOUT->hPMRPtr,
+							(void *) psPMRPtrInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+	if (psPhysmemImportSecBufOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto PhysmemImportSecBuf_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+PhysmemImportSecBuf_exit:
+
+
+
+	if (psPhysmemImportSecBufOUT->eError != PVRSRV_OK)
+	{
+		if (psPMRPtrInt)
+		{
+			PMRUnrefPMR(psPMRPtrInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDebugMiscSetHCSDeadline(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETHCSDEADLINE *psRGXDebugMiscSetHCSDeadlineIN,
+					  PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETHCSDEADLINE *psRGXDebugMiscSetHCSDeadlineOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+	psRGXDebugMiscSetHCSDeadlineOUT->eError =
+		PVRSRVRGXDebugMiscSetHCSDeadlineKM(psConnection, OSGetDevData(psConnection),
+					psRGXDebugMiscSetHCSDeadlineIN->ui32RGXHCSDeadline);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDebugMiscSetOSidPriority(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETOSIDPRIORITY *psRGXDebugMiscSetOSidPriorityIN,
+					  PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETOSIDPRIORITY *psRGXDebugMiscSetOSidPriorityOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+	psRGXDebugMiscSetOSidPriorityOUT->eError =
+		PVRSRVRGXDebugMiscSetOSidPriorityKM(psConnection, OSGetDevData(psConnection),
+					psRGXDebugMiscSetOSidPriorityIN->ui32OSid,
+					psRGXDebugMiscSetOSidPriorityIN->ui32Priority);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDebugMiscSetOSNewOnlineState(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETOSNEWONLINESTATE *psRGXDebugMiscSetOSNewOnlineStateIN,
+					  PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETOSNEWONLINESTATE *psRGXDebugMiscSetOSNewOnlineStateOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+	psRGXDebugMiscSetOSNewOnlineStateOUT->eError =
+		PVRSRVRGXDebugMiscSetOSNewOnlineStateKM(psConnection, OSGetDevData(psConnection),
+					psRGXDebugMiscSetOSNewOnlineStateIN->ui32OSid,
+					psRGXDebugMiscSetOSNewOnlineStateIN->ui32OSNewState);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitDEBUGMISCBridge(void);
+PVRSRV_ERROR DeinitDEBUGMISCBridge(void);
+
+/*
+ * Register all DEBUGMISC functions with services
+ */
+PVRSRV_ERROR InitDEBUGMISCBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_DEBUGMISCSLCSETBYPASSSTATE, PVRSRVBridgeDebugMiscSLCSetBypassState,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETFWLOG, PVRSRVBridgeRGXDebugMiscSetFWLog,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCDUMPFREELISTPAGELIST, PVRSRVBridgeRGXDebugMiscDumpFreelistPageList,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_PHYSMEMIMPORTSECBUF, PVRSRVBridgePhysmemImportSecBuf,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETHCSDEADLINE, PVRSRVBridgeRGXDebugMiscSetHCSDeadline,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETOSIDPRIORITY, PVRSRVBridgeRGXDebugMiscSetOSidPriority,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETOSNEWONLINESTATE, PVRSRVBridgeRGXDebugMiscSetOSNewOnlineState,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all debugmisc functions with services
+ */
+PVRSRV_ERROR DeinitDEBUGMISCBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/devicememhistory_bridge/client_devicememhistory_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/devicememhistory_bridge/client_devicememhistory_bridge.h
new file mode 100644
index 0000000..0f6ae5f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/devicememhistory_bridge/client_devicememhistory_bridge.h
@@ -0,0 +1,112 @@
+/*************************************************************************/ /*!
+@File
+@Title          Client bridge header for devicememhistory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for devicememhistory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_DEVICEMEMHISTORY_BRIDGE_H
+#define CLIENT_DEVICEMEMHISTORY_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_devicememhistory_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMap(IMG_HANDLE hBridge,
+								 IMG_HANDLE hPMR,
+								 IMG_DEVMEM_SIZE_T uiOffset,
+								 IMG_DEV_VIRTADDR sDevVAddr,
+								 IMG_DEVMEM_SIZE_T uiSize,
+								 const IMG_CHAR *puiText,
+								 IMG_UINT32 ui32Log2PageSize,
+								 IMG_UINT32 ui32AllocationIndex,
+								 IMG_UINT32 *pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmap(IMG_HANDLE hBridge,
+								   IMG_HANDLE hPMR,
+								   IMG_DEVMEM_SIZE_T uiOffset,
+								   IMG_DEV_VIRTADDR sDevVAddr,
+								   IMG_DEVMEM_SIZE_T uiSize,
+								   const IMG_CHAR *puiText,
+								   IMG_UINT32 ui32Log2PageSize,
+								   IMG_UINT32 ui32AllocationIndex,
+								   IMG_UINT32 *pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge,
+								       IMG_DEV_VIRTADDR sBaseDevVAddr,
+								       IMG_UINT32 ui32ui32StartPage,
+								       IMG_UINT32 ui32NumPages,
+								       IMG_DEVMEM_SIZE_T uiAllocSize,
+								       const IMG_CHAR *puiText,
+								       IMG_UINT32 ui32Log2PageSize,
+								       IMG_UINT32 ui32AllocationIndex,
+								       IMG_UINT32 *pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge,
+									 IMG_DEV_VIRTADDR sBaseDevVAddr,
+									 IMG_UINT32 ui32ui32StartPage,
+									 IMG_UINT32 ui32NumPages,
+									 IMG_DEVMEM_SIZE_T uiAllocSize,
+									 const IMG_CHAR *puiText,
+									 IMG_UINT32 ui32Log2PageSize,
+									 IMG_UINT32 ui32AllocationIndex,
+									 IMG_UINT32 *pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge,
+									  IMG_HANDLE hPMR,
+									  IMG_DEVMEM_SIZE_T uiOffset,
+									  IMG_DEV_VIRTADDR sDevVAddr,
+									  IMG_DEVMEM_SIZE_T uiSize,
+									  const IMG_CHAR *puiText,
+									  IMG_UINT32 ui32Log2PageSize,
+									  IMG_UINT32 ui32AllocPageCount,
+									  IMG_UINT32 *pui32AllocPageIndices,
+									  IMG_UINT32 ui32FreePageCount,
+									  IMG_UINT32 *pui32FreePageIndices,
+									  IMG_UINT32 ui32AllocationIndex,
+									  IMG_UINT32 *pui32AllocationIndexOut);
+
+
+#endif /* CLIENT_DEVICEMEMHISTORY_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/devicememhistory_bridge/client_devicememhistory_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/devicememhistory_bridge/client_devicememhistory_direct_bridge.c
new file mode 100644
index 0000000..0440968
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/devicememhistory_bridge/client_devicememhistory_direct_bridge.c
@@ -0,0 +1,206 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge for devicememhistory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_devicememhistory_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "img_types.h"
+#include "mm_common.h"
+
+#include "devicemem_history_server.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMap(IMG_HANDLE hBridge,
+								 IMG_HANDLE hPMR,
+								 IMG_DEVMEM_SIZE_T uiOffset,
+								 IMG_DEV_VIRTADDR sDevVAddr,
+								 IMG_DEVMEM_SIZE_T uiSize,
+								 const IMG_CHAR *puiText,
+								 IMG_UINT32 ui32Log2PageSize,
+								 IMG_UINT32 ui32AllocationIndex,
+								 IMG_UINT32 *pui32AllocationIndexOut)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		DevicememHistoryMapKM(
+					psPMRInt,
+					uiOffset,
+					sDevVAddr,
+					uiSize,
+					puiText,
+					ui32Log2PageSize,
+					ui32AllocationIndex,
+					pui32AllocationIndexOut);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmap(IMG_HANDLE hBridge,
+								   IMG_HANDLE hPMR,
+								   IMG_DEVMEM_SIZE_T uiOffset,
+								   IMG_DEV_VIRTADDR sDevVAddr,
+								   IMG_DEVMEM_SIZE_T uiSize,
+								   const IMG_CHAR *puiText,
+								   IMG_UINT32 ui32Log2PageSize,
+								   IMG_UINT32 ui32AllocationIndex,
+								   IMG_UINT32 *pui32AllocationIndexOut)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		DevicememHistoryUnmapKM(
+					psPMRInt,
+					uiOffset,
+					sDevVAddr,
+					uiSize,
+					puiText,
+					ui32Log2PageSize,
+					ui32AllocationIndex,
+					pui32AllocationIndexOut);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge,
+								       IMG_DEV_VIRTADDR sBaseDevVAddr,
+								       IMG_UINT32 ui32ui32StartPage,
+								       IMG_UINT32 ui32NumPages,
+								       IMG_DEVMEM_SIZE_T uiAllocSize,
+								       const IMG_CHAR *puiText,
+								       IMG_UINT32 ui32Log2PageSize,
+								       IMG_UINT32 ui32AllocationIndex,
+								       IMG_UINT32 *pui32AllocationIndexOut)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		DevicememHistoryMapVRangeKM(
+					sBaseDevVAddr,
+					ui32ui32StartPage,
+					ui32NumPages,
+					uiAllocSize,
+					puiText,
+					ui32Log2PageSize,
+					ui32AllocationIndex,
+					pui32AllocationIndexOut);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge,
+									 IMG_DEV_VIRTADDR sBaseDevVAddr,
+									 IMG_UINT32 ui32ui32StartPage,
+									 IMG_UINT32 ui32NumPages,
+									 IMG_DEVMEM_SIZE_T uiAllocSize,
+									 const IMG_CHAR *puiText,
+									 IMG_UINT32 ui32Log2PageSize,
+									 IMG_UINT32 ui32AllocationIndex,
+									 IMG_UINT32 *pui32AllocationIndexOut)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		DevicememHistoryUnmapVRangeKM(
+					sBaseDevVAddr,
+					ui32ui32StartPage,
+					ui32NumPages,
+					uiAllocSize,
+					puiText,
+					ui32Log2PageSize,
+					ui32AllocationIndex,
+					pui32AllocationIndexOut);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge,
+									  IMG_HANDLE hPMR,
+									  IMG_DEVMEM_SIZE_T uiOffset,
+									  IMG_DEV_VIRTADDR sDevVAddr,
+									  IMG_DEVMEM_SIZE_T uiSize,
+									  const IMG_CHAR *puiText,
+									  IMG_UINT32 ui32Log2PageSize,
+									  IMG_UINT32 ui32AllocPageCount,
+									  IMG_UINT32 *pui32AllocPageIndices,
+									  IMG_UINT32 ui32FreePageCount,
+									  IMG_UINT32 *pui32FreePageIndices,
+									  IMG_UINT32 ui32AllocationIndex,
+									  IMG_UINT32 *pui32AllocationIndexOut)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		DevicememHistorySparseChangeKM(
+					psPMRInt,
+					uiOffset,
+					sDevVAddr,
+					uiSize,
+					puiText,
+					ui32Log2PageSize,
+					ui32AllocPageCount,
+					pui32AllocPageIndices,
+					ui32FreePageCount,
+					pui32FreePageIndices,
+					ui32AllocationIndex,
+					pui32AllocationIndexOut);
+
+	return eError;
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/devicememhistory_bridge/common_devicememhistory_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/devicememhistory_bridge/common_devicememhistory_bridge.h
new file mode 100644
index 0000000..2d6a2ce
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/devicememhistory_bridge/common_devicememhistory_bridge.h
@@ -0,0 +1,190 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for devicememhistory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for devicememhistory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_DEVICEMEMHISTORY_BRIDGE_H
+#define COMMON_DEVICEMEMHISTORY_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "img_types.h"
+#include "mm_common.h"
+
+
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST			0
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP			PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP			PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE			PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE			PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+3
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE			PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST			(PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4)
+
+
+/*******************************************
+            DevicememHistoryMap          
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryMap */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_SIZE_T uiOffset;
+	IMG_DEV_VIRTADDR sDevVAddr;
+	IMG_DEVMEM_SIZE_T uiSize;
+	const IMG_CHAR * puiText;
+	IMG_UINT32 ui32Log2PageSize;
+	IMG_UINT32 ui32AllocationIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP;
+
+/* Bridge out structure for DevicememHistoryMap */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP_TAG
+{
+	IMG_UINT32 ui32AllocationIndexOut;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP;
+
+
+/*******************************************
+            DevicememHistoryUnmap          
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryUnmap */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_SIZE_T uiOffset;
+	IMG_DEV_VIRTADDR sDevVAddr;
+	IMG_DEVMEM_SIZE_T uiSize;
+	const IMG_CHAR * puiText;
+	IMG_UINT32 ui32Log2PageSize;
+	IMG_UINT32 ui32AllocationIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP;
+
+/* Bridge out structure for DevicememHistoryUnmap */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP_TAG
+{
+	IMG_UINT32 ui32AllocationIndexOut;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP;
+
+
+/*******************************************
+            DevicememHistoryMapVRange          
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryMapVRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE_TAG
+{
+	IMG_DEV_VIRTADDR sBaseDevVAddr;
+	IMG_UINT32 ui32ui32StartPage;
+	IMG_UINT32 ui32NumPages;
+	IMG_DEVMEM_SIZE_T uiAllocSize;
+	const IMG_CHAR * puiText;
+	IMG_UINT32 ui32Log2PageSize;
+	IMG_UINT32 ui32AllocationIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE;
+
+/* Bridge out structure for DevicememHistoryMapVRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE_TAG
+{
+	IMG_UINT32 ui32AllocationIndexOut;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE;
+
+
+/*******************************************
+            DevicememHistoryUnmapVRange          
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryUnmapVRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE_TAG
+{
+	IMG_DEV_VIRTADDR sBaseDevVAddr;
+	IMG_UINT32 ui32ui32StartPage;
+	IMG_UINT32 ui32NumPages;
+	IMG_DEVMEM_SIZE_T uiAllocSize;
+	const IMG_CHAR * puiText;
+	IMG_UINT32 ui32Log2PageSize;
+	IMG_UINT32 ui32AllocationIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE;
+
+/* Bridge out structure for DevicememHistoryUnmapVRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE_TAG
+{
+	IMG_UINT32 ui32AllocationIndexOut;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE;
+
+
+/*******************************************
+            DevicememHistorySparseChange          
+ *******************************************/
+
+/* Bridge in structure for DevicememHistorySparseChange */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_SIZE_T uiOffset;
+	IMG_DEV_VIRTADDR sDevVAddr;
+	IMG_DEVMEM_SIZE_T uiSize;
+	const IMG_CHAR * puiText;
+	IMG_UINT32 ui32Log2PageSize;
+	IMG_UINT32 ui32AllocPageCount;
+	IMG_UINT32 * pui32AllocPageIndices;
+	IMG_UINT32 ui32FreePageCount;
+	IMG_UINT32 * pui32FreePageIndices;
+	IMG_UINT32 ui32AllocationIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE;
+
+/* Bridge out structure for DevicememHistorySparseChange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE_TAG
+{
+	IMG_UINT32 ui32AllocationIndexOut;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE;
+
+
+#endif /* COMMON_DEVICEMEMHISTORY_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/devicememhistory_bridge/server_devicememhistory_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/devicememhistory_bridge/server_devicememhistory_bridge.c
new file mode 100644
index 0000000..e2bba62
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/devicememhistory_bridge/server_devicememhistory_bridge.c
@@ -0,0 +1,809 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for devicememhistory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for devicememhistory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_history_server.h"
+
+
+#include "common_devicememhistory_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "lock.h"
+
+
+#if defined(SUPPORT_DEVICEMEMHISTORY_BRIDGE)
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP *psDevicememHistoryMapIN,
+					  PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP *psDevicememHistoryMapOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMR = psDevicememHistoryMapIN->hPMR;
+	PMR * psPMRInt = NULL;
+	IMG_CHAR *uiTextInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryMapIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevicememHistoryMapIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto DevicememHistoryMap_exit;
+			}
+		}
+	}
+
+	
+	{
+		uiTextInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiTextInt, psDevicememHistoryMapIN->puiText, DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto DevicememHistoryMap_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevicememHistoryMapOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psDevicememHistoryMapOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevicememHistoryMap_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psDevicememHistoryMapOUT->eError =
+		DevicememHistoryMapKM(
+					psPMRInt,
+					psDevicememHistoryMapIN->uiOffset,
+					psDevicememHistoryMapIN->sDevVAddr,
+					psDevicememHistoryMapIN->uiSize,
+					uiTextInt,
+					psDevicememHistoryMapIN->ui32Log2PageSize,
+					psDevicememHistoryMapIN->ui32AllocationIndex,
+					&psDevicememHistoryMapOUT->ui32AllocationIndexOut);
+
+
+
+
+DevicememHistoryMap_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapIN,
+					  PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMR = psDevicememHistoryUnmapIN->hPMR;
+	PMR * psPMRInt = NULL;
+	IMG_CHAR *uiTextInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryUnmapIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevicememHistoryUnmapIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto DevicememHistoryUnmap_exit;
+			}
+		}
+	}
+
+	
+	{
+		uiTextInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiTextInt, psDevicememHistoryUnmapIN->puiText, DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto DevicememHistoryUnmap_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevicememHistoryUnmapOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psDevicememHistoryUnmapOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevicememHistoryUnmap_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psDevicememHistoryUnmapOUT->eError =
+		DevicememHistoryUnmapKM(
+					psPMRInt,
+					psDevicememHistoryUnmapIN->uiOffset,
+					psDevicememHistoryUnmapIN->sDevVAddr,
+					psDevicememHistoryUnmapIN->uiSize,
+					uiTextInt,
+					psDevicememHistoryUnmapIN->ui32Log2PageSize,
+					psDevicememHistoryUnmapIN->ui32AllocationIndex,
+					&psDevicememHistoryUnmapOUT->ui32AllocationIndexOut);
+
+
+
+
+DevicememHistoryUnmap_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryMapVRange(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE *psDevicememHistoryMapVRangeIN,
+					  PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE *psDevicememHistoryMapVRangeOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_CHAR *uiTextInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) +
+			0;
+
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryMapVRangeIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevicememHistoryMapVRangeIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto DevicememHistoryMapVRange_exit;
+			}
+		}
+	}
+
+	
+	{
+		uiTextInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiTextInt, psDevicememHistoryMapVRangeIN->puiText, DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto DevicememHistoryMapVRange_exit;
+				}
+			}
+
+
+	psDevicememHistoryMapVRangeOUT->eError =
+		DevicememHistoryMapVRangeKM(
+					psDevicememHistoryMapVRangeIN->sBaseDevVAddr,
+					psDevicememHistoryMapVRangeIN->ui32ui32StartPage,
+					psDevicememHistoryMapVRangeIN->ui32NumPages,
+					psDevicememHistoryMapVRangeIN->uiAllocSize,
+					uiTextInt,
+					psDevicememHistoryMapVRangeIN->ui32Log2PageSize,
+					psDevicememHistoryMapVRangeIN->ui32AllocationIndex,
+					&psDevicememHistoryMapVRangeOUT->ui32AllocationIndexOut);
+
+
+
+
+DevicememHistoryMapVRange_exit:
+
+
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryUnmapVRange(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE *psDevicememHistoryUnmapVRangeIN,
+					  PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE *psDevicememHistoryUnmapVRangeOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_CHAR *uiTextInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) +
+			0;
+
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryUnmapVRangeIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevicememHistoryUnmapVRangeIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psDevicememHistoryUnmapVRangeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto DevicememHistoryUnmapVRange_exit;
+			}
+		}
+	}
+
+	
+	{
+		uiTextInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiTextInt, psDevicememHistoryUnmapVRangeIN->puiText, DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psDevicememHistoryUnmapVRangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto DevicememHistoryUnmapVRange_exit;
+				}
+			}
+
+
+	psDevicememHistoryUnmapVRangeOUT->eError =
+		DevicememHistoryUnmapVRangeKM(
+					psDevicememHistoryUnmapVRangeIN->sBaseDevVAddr,
+					psDevicememHistoryUnmapVRangeIN->ui32ui32StartPage,
+					psDevicememHistoryUnmapVRangeIN->ui32NumPages,
+					psDevicememHistoryUnmapVRangeIN->uiAllocSize,
+					uiTextInt,
+					psDevicememHistoryUnmapVRangeIN->ui32Log2PageSize,
+					psDevicememHistoryUnmapVRangeIN->ui32AllocationIndex,
+					&psDevicememHistoryUnmapVRangeOUT->ui32AllocationIndexOut);
+
+
+
+
+DevicememHistoryUnmapVRange_exit:
+
+
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE *psDevicememHistorySparseChangeIN,
+					  PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE *psDevicememHistorySparseChangeOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMR = psDevicememHistorySparseChangeIN->hPMR;
+	PMR * psPMRInt = NULL;
+	IMG_CHAR *uiTextInt = NULL;
+	IMG_UINT32 *ui32AllocPageIndicesInt = NULL;
+	IMG_UINT32 *ui32FreePageIndicesInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) +
+			(psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32)) +
+			(psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistorySparseChangeIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevicememHistorySparseChangeIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto DevicememHistorySparseChange_exit;
+			}
+		}
+	}
+
+	
+	{
+		uiTextInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiTextInt, psDevicememHistorySparseChangeIN->puiText, DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto DevicememHistorySparseChange_exit;
+				}
+			}
+	if (psDevicememHistorySparseChangeIN->ui32AllocPageCount != 0)
+	{
+		ui32AllocPageIndicesInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32AllocPageIndicesInt, psDevicememHistorySparseChangeIN->pui32AllocPageIndices, psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto DevicememHistorySparseChange_exit;
+				}
+			}
+	if (psDevicememHistorySparseChangeIN->ui32FreePageCount != 0)
+	{
+		ui32FreePageIndicesInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32FreePageIndicesInt, psDevicememHistorySparseChangeIN->pui32FreePageIndices, psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto DevicememHistorySparseChange_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevicememHistorySparseChangeOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psDevicememHistorySparseChangeOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevicememHistorySparseChange_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psDevicememHistorySparseChangeOUT->eError =
+		DevicememHistorySparseChangeKM(
+					psPMRInt,
+					psDevicememHistorySparseChangeIN->uiOffset,
+					psDevicememHistorySparseChangeIN->sDevVAddr,
+					psDevicememHistorySparseChangeIN->uiSize,
+					uiTextInt,
+					psDevicememHistorySparseChangeIN->ui32Log2PageSize,
+					psDevicememHistorySparseChangeIN->ui32AllocPageCount,
+					ui32AllocPageIndicesInt,
+					psDevicememHistorySparseChangeIN->ui32FreePageCount,
+					ui32FreePageIndicesInt,
+					psDevicememHistorySparseChangeIN->ui32AllocationIndex,
+					&psDevicememHistorySparseChangeOUT->ui32AllocationIndexOut);
+
+
+
+
+DevicememHistorySparseChange_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static POS_LOCK pDEVICEMEMHISTORYBridgeLock;
+static IMG_BOOL bUseLock = IMG_TRUE;
+#endif /* SUPPORT_DEVICEMEMHISTORY_BRIDGE */
+
+#if defined(SUPPORT_DEVICEMEMHISTORY_BRIDGE)
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void);
+PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void);
+
+/*
+ * Register all DEVICEMEMHISTORY functions with services
+ */
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void)
+{
+	PVR_LOGR_IF_ERROR(OSLockCreate(&pDEVICEMEMHISTORYBridgeLock, LOCK_TYPE_PASSIVE), "OSLockCreate");
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP, PVRSRVBridgeDevicememHistoryMap,
+					pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP, PVRSRVBridgeDevicememHistoryUnmap,
+					pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE, PVRSRVBridgeDevicememHistoryMapVRange,
+					pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE, PVRSRVBridgeDevicememHistoryUnmapVRange,
+					pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE, PVRSRVBridgeDevicememHistorySparseChange,
+					pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all devicememhistory functions with services
+ */
+PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void)
+{
+	PVR_LOGR_IF_ERROR(OSLockDestroy(pDEVICEMEMHISTORYBridgeLock), "OSLockDestroy");
+	return PVRSRV_OK;
+}
+#else /* SUPPORT_DEVICEMEMHISTORY_BRIDGE */
+/* This bridge is conditional on SUPPORT_DEVICEMEMHISTORY_BRIDGE - when not defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitDEVICEMEMHISTORYBridge() \
+	PVRSRV_OK
+
+#define DeinitDEVICEMEMHISTORYBridge() \
+	PVRSRV_OK
+
+#endif /* SUPPORT_DEVICEMEMHISTORY_BRIDGE */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/dmabuf_bridge/common_dmabuf_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/dmabuf_bridge/common_dmabuf_bridge.h
new file mode 100644
index 0000000..5647cbd
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/dmabuf_bridge/common_dmabuf_bridge.h
@@ -0,0 +1,127 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for dmabuf
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for dmabuf
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_DMABUF_BRIDGE_H
+#define COMMON_DMABUF_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_memallocflags.h"
+
+
+#define PVRSRV_BRIDGE_DMABUF_CMD_FIRST			0
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF			PVRSRV_BRIDGE_DMABUF_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF			PVRSRV_BRIDGE_DMABUF_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF			PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DMABUF_CMD_LAST			(PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2)
+
+
+/*******************************************
+            PhysmemImportDmaBuf          
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportDmaBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF_TAG
+{
+	IMG_INT ifd;
+	PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF;
+
+/* Bridge out structure for PhysmemImportDmaBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF_TAG
+{
+	IMG_HANDLE hPMRPtr;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_DEVMEM_ALIGN_T sAlign;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF;
+
+
+/*******************************************
+            PhysmemExportDmaBuf          
+ *******************************************/
+
+/* Bridge in structure for PhysmemExportDmaBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF;
+
+/* Bridge out structure for PhysmemExportDmaBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF_TAG
+{
+	IMG_INT iFd;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF;
+
+
+/*******************************************
+            PhysmemImportSparseDmaBuf          
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportSparseDmaBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF_TAG
+{
+	IMG_INT ifd;
+	PVRSRV_MEMALLOCFLAGS_T uiFlags;
+	IMG_DEVMEM_SIZE_T uiChunkSize;
+	IMG_UINT32 ui32NumPhysChunks;
+	IMG_UINT32 ui32NumVirtChunks;
+	IMG_UINT32 * pui32MappingTable;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF;
+
+/* Bridge out structure for PhysmemImportSparseDmaBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF_TAG
+{
+	IMG_HANDLE hPMRPtr;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_DEVMEM_ALIGN_T sAlign;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF;
+
+
+#endif /* COMMON_DMABUF_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/dmabuf_bridge/server_dmabuf_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/dmabuf_bridge/server_dmabuf_bridge.c
new file mode 100644
index 0000000..afbdac2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/dmabuf_bridge/server_dmabuf_bridge.c
@@ -0,0 +1,392 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for dmabuf
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for dmabuf
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "physmem_dmabuf.h"
+#include "pmr.h"
+
+
+#include "common_dmabuf_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufIN,
+					  PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psPMRPtrInt = NULL;
+
+
+
+
+
+
+
+
+	psPhysmemImportDmaBufOUT->eError =
+		PhysmemImportDmaBuf(psConnection, OSGetDevData(psConnection),
+					psPhysmemImportDmaBufIN->ifd,
+					psPhysmemImportDmaBufIN->uiFlags,
+					&psPMRPtrInt,
+					&psPhysmemImportDmaBufOUT->uiSize,
+					&psPhysmemImportDmaBufOUT->sAlign);
+	/* Exit early if bridged call fails */
+	if(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)
+	{
+		goto PhysmemImportDmaBuf_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psPhysmemImportDmaBufOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psPhysmemImportDmaBufOUT->hPMRPtr,
+							(void *) psPMRPtrInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+	if (psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto PhysmemImportDmaBuf_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+PhysmemImportDmaBuf_exit:
+
+
+
+	if (psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)
+	{
+		if (psPMRPtrInt)
+		{
+			PMRUnrefPMR(psPMRPtrInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePhysmemExportDmaBuf(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufIN,
+					  PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMR = psPhysmemExportDmaBufIN->hPMR;
+	PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPhysmemExportDmaBufOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psPhysmemExportDmaBufOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto PhysmemExportDmaBuf_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psPhysmemExportDmaBufOUT->eError =
+		PhysmemExportDmaBuf(psConnection, OSGetDevData(psConnection),
+					psPMRInt,
+					&psPhysmemExportDmaBufOUT->iFd);
+
+
+
+
+PhysmemExportDmaBuf_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePhysmemImportSparseDmaBuf(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF *psPhysmemImportSparseDmaBufIN,
+					  PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF *psPhysmemImportSparseDmaBufOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_UINT32 *ui32MappingTableInt = NULL;
+	PMR * psPMRPtrInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPhysmemImportSparseDmaBufIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPhysmemImportSparseDmaBufIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto PhysmemImportSparseDmaBuf_exit;
+			}
+		}
+	}
+
+	if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks != 0)
+	{
+		ui32MappingTableInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32MappingTableInt, psPhysmemImportSparseDmaBufIN->pui32MappingTable, psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto PhysmemImportSparseDmaBuf_exit;
+				}
+			}
+
+
+	psPhysmemImportSparseDmaBufOUT->eError =
+		PhysmemImportSparseDmaBuf(psConnection, OSGetDevData(psConnection),
+					psPhysmemImportSparseDmaBufIN->ifd,
+					psPhysmemImportSparseDmaBufIN->uiFlags,
+					psPhysmemImportSparseDmaBufIN->uiChunkSize,
+					psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks,
+					psPhysmemImportSparseDmaBufIN->ui32NumVirtChunks,
+					ui32MappingTableInt,
+					&psPMRPtrInt,
+					&psPhysmemImportSparseDmaBufOUT->uiSize,
+					&psPhysmemImportSparseDmaBufOUT->sAlign);
+	/* Exit early if bridged call fails */
+	if(psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)
+	{
+		goto PhysmemImportSparseDmaBuf_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psPhysmemImportSparseDmaBufOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psPhysmemImportSparseDmaBufOUT->hPMRPtr,
+							(void *) psPMRPtrInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+	if (psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto PhysmemImportSparseDmaBuf_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+PhysmemImportSparseDmaBuf_exit:
+
+
+
+	if (psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)
+	{
+		if (psPMRPtrInt)
+		{
+			PMRUnrefPMR(psPMRPtrInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitDMABUFBridge(void);
+PVRSRV_ERROR DeinitDMABUFBridge(void);
+
+/*
+ * Register all DMABUF functions with services
+ */
+PVRSRV_ERROR InitDMABUFBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF, PVRSRVBridgePhysmemImportDmaBuf,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF, PVRSRVBridgePhysmemExportDmaBuf,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF, PVRSRVBridgePhysmemImportSparseDmaBuf,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all dmabuf functions with services
+ */
+PVRSRV_ERROR DeinitDMABUFBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/htbuffer_bridge/client_htbuffer_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/htbuffer_bridge/client_htbuffer_bridge.h
new file mode 100644
index 0000000..e9d7f59
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/htbuffer_bridge/client_htbuffer_bridge.h
@@ -0,0 +1,78 @@
+/*************************************************************************/ /*!
+@File
+@Title          Client bridge header for htbuffer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for htbuffer
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_HTBUFFER_BRIDGE_H
+#define CLIENT_HTBUFFER_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_htbuffer_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBConfigure(IMG_HANDLE hBridge,
+							  IMG_UINT32 ui32NameSize,
+							  const IMG_CHAR *puiName,
+							  IMG_UINT32 ui32BufferSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBControl(IMG_HANDLE hBridge,
+							IMG_UINT32 ui32NumGroups,
+							IMG_UINT32 *pui32GroupEnable,
+							IMG_UINT32 ui32LogLevel,
+							IMG_UINT32 ui32EnablePID,
+							IMG_UINT32 ui32LogMode,
+							IMG_UINT32 ui32OpMode);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBLog(IMG_HANDLE hBridge,
+						    IMG_UINT32 ui32PID,
+						    IMG_UINT32 ui32TimeStamp,
+						    IMG_UINT32 ui32SF,
+						    IMG_UINT32 ui32NumArgs,
+						    IMG_UINT32 *pui32Args);
+
+
+#endif /* CLIENT_HTBUFFER_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/htbuffer_bridge/client_htbuffer_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/htbuffer_bridge/client_htbuffer_direct_bridge.c
new file mode 100644
index 0000000..3a44682
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/htbuffer_bridge/client_htbuffer_direct_bridge.c
@@ -0,0 +1,115 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge for htbuffer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_htbuffer_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "devicemem_typedefs.h"
+
+#include "htbserver.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBConfigure(IMG_HANDLE hBridge,
+							  IMG_UINT32 ui32NameSize,
+							  const IMG_CHAR *puiName,
+							  IMG_UINT32 ui32BufferSize)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		HTBConfigureKM(
+					ui32NameSize,
+					puiName,
+					ui32BufferSize);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBControl(IMG_HANDLE hBridge,
+							IMG_UINT32 ui32NumGroups,
+							IMG_UINT32 *pui32GroupEnable,
+							IMG_UINT32 ui32LogLevel,
+							IMG_UINT32 ui32EnablePID,
+							IMG_UINT32 ui32LogMode,
+							IMG_UINT32 ui32OpMode)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		HTBControlKM(
+					ui32NumGroups,
+					pui32GroupEnable,
+					ui32LogLevel,
+					ui32EnablePID,
+					ui32LogMode,
+					ui32OpMode);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBLog(IMG_HANDLE hBridge,
+						    IMG_UINT32 ui32PID,
+						    IMG_UINT32 ui32TimeStamp,
+						    IMG_UINT32 ui32SF,
+						    IMG_UINT32 ui32NumArgs,
+						    IMG_UINT32 *pui32Args)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		HTBLogKM(
+					ui32PID,
+					ui32TimeStamp,
+					ui32SF,
+					ui32NumArgs,
+					pui32Args);
+
+	return eError;
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/htbuffer_bridge/common_htbuffer_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/htbuffer_bridge/common_htbuffer_bridge.h
new file mode 100644
index 0000000..496d736
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/htbuffer_bridge/common_htbuffer_bridge.h
@@ -0,0 +1,125 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for htbuffer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for htbuffer
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_HTBUFFER_BRIDGE_H
+#define COMMON_HTBUFFER_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST			0
+#define PVRSRV_BRIDGE_HTBUFFER_HTBCONFIGURE			PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+0
+#define PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL			PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1
+#define PVRSRV_BRIDGE_HTBUFFER_HTBLOG			PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+2
+#define PVRSRV_BRIDGE_HTBUFFER_CMD_LAST			(PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+2)
+
+
+/*******************************************
+            HTBConfigure          
+ *******************************************/
+
+/* Bridge in structure for HTBConfigure */
+typedef struct PVRSRV_BRIDGE_IN_HTBCONFIGURE_TAG
+{
+	IMG_UINT32 ui32NameSize;
+	const IMG_CHAR * puiName;
+	IMG_UINT32 ui32BufferSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HTBCONFIGURE;
+
+/* Bridge out structure for HTBConfigure */
+typedef struct PVRSRV_BRIDGE_OUT_HTBCONFIGURE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HTBCONFIGURE;
+
+
+/*******************************************
+            HTBControl          
+ *******************************************/
+
+/* Bridge in structure for HTBControl */
+typedef struct PVRSRV_BRIDGE_IN_HTBCONTROL_TAG
+{
+	IMG_UINT32 ui32NumGroups;
+	IMG_UINT32 * pui32GroupEnable;
+	IMG_UINT32 ui32LogLevel;
+	IMG_UINT32 ui32EnablePID;
+	IMG_UINT32 ui32LogMode;
+	IMG_UINT32 ui32OpMode;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HTBCONTROL;
+
+/* Bridge out structure for HTBControl */
+typedef struct PVRSRV_BRIDGE_OUT_HTBCONTROL_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HTBCONTROL;
+
+
+/*******************************************
+            HTBLog          
+ *******************************************/
+
+/* Bridge in structure for HTBLog */
+typedef struct PVRSRV_BRIDGE_IN_HTBLOG_TAG
+{
+	IMG_UINT32 ui32PID;
+	IMG_UINT32 ui32TimeStamp;
+	IMG_UINT32 ui32SF;
+	IMG_UINT32 ui32NumArgs;
+	IMG_UINT32 * pui32Args;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HTBLOG;
+
+/* Bridge out structure for HTBLog */
+typedef struct PVRSRV_BRIDGE_OUT_HTBLOG_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HTBLOG;
+
+
+#endif /* COMMON_HTBUFFER_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/htbuffer_bridge/server_htbuffer_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/htbuffer_bridge/server_htbuffer_bridge.c
new file mode 100644
index 0000000..f115e765
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/htbuffer_bridge/server_htbuffer_bridge.c
@@ -0,0 +1,423 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for htbuffer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for htbuffer
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "htbserver.h"
+
+
+#include "common_htbuffer_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "lock.h"
+
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeHTBConfigure(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_HTBCONFIGURE *psHTBConfigureIN,
+					  PVRSRV_BRIDGE_OUT_HTBCONFIGURE *psHTBConfigureOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_CHAR *uiNameInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psHTBConfigureIN->ui32NameSize * sizeof(IMG_CHAR)) +
+			0;
+
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHTBConfigureIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psHTBConfigureIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psHTBConfigureOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto HTBConfigure_exit;
+			}
+		}
+	}
+
+	if (psHTBConfigureIN->ui32NameSize != 0)
+	{
+		uiNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psHTBConfigureIN->ui32NameSize * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (psHTBConfigureIN->ui32NameSize * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiNameInt, psHTBConfigureIN->puiName, psHTBConfigureIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psHTBConfigureOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto HTBConfigure_exit;
+				}
+			}
+
+
+	psHTBConfigureOUT->eError =
+		HTBConfigureKM(
+					psHTBConfigureIN->ui32NameSize,
+					uiNameInt,
+					psHTBConfigureIN->ui32BufferSize);
+
+
+
+
+HTBConfigure_exit:
+
+
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHTBControl(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_HTBCONTROL *psHTBControlIN,
+					  PVRSRV_BRIDGE_OUT_HTBCONTROL *psHTBControlOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_UINT32 *ui32GroupEnableInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) +
+			0;
+
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHTBControlIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psHTBControlIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psHTBControlOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto HTBControl_exit;
+			}
+		}
+	}
+
+	if (psHTBControlIN->ui32NumGroups != 0)
+	{
+		ui32GroupEnableInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32GroupEnableInt, psHTBControlIN->pui32GroupEnable, psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psHTBControlOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto HTBControl_exit;
+				}
+			}
+
+
+	psHTBControlOUT->eError =
+		HTBControlKM(
+					psHTBControlIN->ui32NumGroups,
+					ui32GroupEnableInt,
+					psHTBControlIN->ui32LogLevel,
+					psHTBControlIN->ui32EnablePID,
+					psHTBControlIN->ui32LogMode,
+					psHTBControlIN->ui32OpMode);
+
+
+
+
+HTBControl_exit:
+
+
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHTBLog(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_HTBLOG *psHTBLogIN,
+					  PVRSRV_BRIDGE_OUT_HTBLOG *psHTBLogOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_UINT32 *ui32ArgsInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) +
+			0;
+
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHTBLogIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psHTBLogIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psHTBLogOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto HTBLog_exit;
+			}
+		}
+	}
+
+	if (psHTBLogIN->ui32NumArgs != 0)
+	{
+		ui32ArgsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ArgsInt, psHTBLogIN->pui32Args, psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psHTBLogOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto HTBLog_exit;
+				}
+			}
+
+
+	psHTBLogOUT->eError =
+		HTBLogKM(
+					psHTBLogIN->ui32PID,
+					psHTBLogIN->ui32TimeStamp,
+					psHTBLogIN->ui32SF,
+					psHTBLogIN->ui32NumArgs,
+					ui32ArgsInt);
+
+
+
+
+HTBLog_exit:
+
+
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static POS_LOCK pHTBUFFERBridgeLock;
+static IMG_BOOL bUseLock = IMG_TRUE;
+#endif /* EXCLUDE_HTBUFFER_BRIDGE */
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+PVRSRV_ERROR InitHTBUFFERBridge(void);
+PVRSRV_ERROR DeinitHTBUFFERBridge(void);
+
+/*
+ * Register all HTBUFFER functions with services
+ */
+PVRSRV_ERROR InitHTBUFFERBridge(void)
+{
+	PVR_LOGR_IF_ERROR(OSLockCreate(&pHTBUFFERBridgeLock, LOCK_TYPE_PASSIVE), "OSLockCreate");
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONFIGURE, PVRSRVBridgeHTBConfigure,
+					pHTBUFFERBridgeLock, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL, PVRSRVBridgeHTBControl,
+					pHTBUFFERBridgeLock, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBLOG, PVRSRVBridgeHTBLog,
+					pHTBUFFERBridgeLock, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all htbuffer functions with services
+ */
+PVRSRV_ERROR DeinitHTBUFFERBridge(void)
+{
+	PVR_LOGR_IF_ERROR(OSLockDestroy(pHTBUFFERBridgeLock), "OSLockDestroy");
+	return PVRSRV_OK;
+}
+#else /* EXCLUDE_HTBUFFER_BRIDGE */
+/* This bridge is conditional on EXCLUDE_HTBUFFER_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitHTBUFFERBridge() \
+	PVRSRV_OK
+
+#define DeinitHTBUFFERBridge() \
+	PVRSRV_OK
+
+#endif /* EXCLUDE_HTBUFFER_BRIDGE */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/mm_bridge/client_mm_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/mm_bridge/client_mm_bridge.h
new file mode 100644
index 0000000..050c423
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/mm_bridge/client_mm_bridge.h
@@ -0,0 +1,235 @@
+/*************************************************************************/ /*!
+@File
+@Title          Client bridge header for mm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for mm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_MM_BRIDGE_H
+#define CLIENT_MM_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_mm_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRExportPMR(IMG_HANDLE hBridge,
+							  IMG_HANDLE hPMR,
+							  IMG_HANDLE *phPMRExport,
+							  IMG_UINT64 *pui64Size,
+							  IMG_UINT32 *pui32Log2Contig,
+							  IMG_UINT64 *pui64Password);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnexportPMR(IMG_HANDLE hBridge,
+							    IMG_HANDLE hPMRExport);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRGetUID(IMG_HANDLE hBridge,
+						       IMG_HANDLE hPMR,
+						       IMG_UINT64 *pui64UID);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRMakeLocalImportHandle(IMG_HANDLE hBridge,
+								      IMG_HANDLE hBuffer,
+								      IMG_HANDLE *phExtMem);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge,
+									IMG_HANDLE hExtMem);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRImportPMR(IMG_HANDLE hBridge,
+							  IMG_HANDLE hPMRExport,
+							  IMG_UINT64 ui64uiPassword,
+							  IMG_UINT64 ui64uiSize,
+							  IMG_UINT32 ui32uiLog2Contig,
+							  IMG_HANDLE *phPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRLocalImportPMR(IMG_HANDLE hBridge,
+							       IMG_HANDLE hExtHandle,
+							       IMG_HANDLE *phPMR,
+							       IMG_DEVMEM_SIZE_T *puiSize,
+							       IMG_DEVMEM_ALIGN_T *psAlign);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefPMR(IMG_HANDLE hBridge,
+							 IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge,
+							       IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge,
+								    IMG_DEVMEM_SIZE_T uiSize,
+								    IMG_DEVMEM_SIZE_T uiChunkSize,
+								    IMG_UINT32 ui32NumPhysChunks,
+								    IMG_UINT32 ui32NumVirtChunks,
+								    IMG_UINT32 *pui32MappingTable,
+								    IMG_UINT32 ui32Log2PageSize,
+								    PVRSRV_MEMALLOCFLAGS_T uiFlags,
+								    IMG_UINT32 ui32AnnotationLength,
+								    const IMG_CHAR *puiAnnotation,
+								    IMG_HANDLE *phPMRPtr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge,
+									  IMG_DEVMEM_SIZE_T uiSize,
+									  IMG_DEVMEM_SIZE_T uiChunkSize,
+									  IMG_UINT32 ui32NumPhysChunks,
+									  IMG_UINT32 ui32NumVirtChunks,
+									  IMG_UINT32 *pui32MappingTable,
+									  IMG_UINT32 ui32Log2PageSize,
+									  PVRSRV_MEMALLOCFLAGS_T uiFlags,
+									  IMG_UINT32 ui32AnnotationLength,
+									  const IMG_CHAR *puiAnnotation,
+									  IMG_HANDLE *phPMRPtr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPin(IMG_HANDLE hBridge,
+							  IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpin(IMG_HANDLE hBridge,
+							    IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPinValidate(IMG_HANDLE hBridge,
+								  IMG_HANDLE hMapping,
+								  IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpinInvalidate(IMG_HANDLE hBridge,
+								      IMG_HANDLE hMapping,
+								      IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge,
+								IMG_BOOL bbKernelMemoryCtx,
+								IMG_HANDLE *phDevMemServerContext,
+								IMG_HANDLE *phPrivData,
+								IMG_UINT32 *pui32CPUCacheLineSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxDestroy(IMG_HANDLE hBridge,
+								 IMG_HANDLE hDevmemServerContext);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge,
+								 IMG_HANDLE hDevmemCtx,
+								 IMG_DEV_VIRTADDR sHeapBaseAddr,
+								 IMG_DEVMEM_SIZE_T uiHeapLength,
+								 IMG_UINT32 ui32Log2DataPageSize,
+								 IMG_HANDLE *phDevmemHeapPtr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge,
+								  IMG_HANDLE hDevmemHeap);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPMR(IMG_HANDLE hBridge,
+							     IMG_HANDLE hDevmemServerHeap,
+							     IMG_HANDLE hReservation,
+							     IMG_HANDLE hPMR,
+							     PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+							     IMG_HANDLE *phMapping);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge,
+							       IMG_HANDLE hMapping);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntReserveRange(IMG_HANDLE hBridge,
+								   IMG_HANDLE hDevmemServerHeap,
+								   IMG_DEV_VIRTADDR sAddress,
+								   IMG_DEVMEM_SIZE_T uiLength,
+								   IMG_HANDLE *phReservation);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge,
+								     IMG_HANDLE hReservation);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeChangeSparseMem(IMG_HANDLE hBridge,
+							     IMG_HANDLE hSrvDevMemHeap,
+							     IMG_HANDLE hPMR,
+							     IMG_UINT32 ui32AllocPageCount,
+							     IMG_UINT32 *pui32AllocPageIndices,
+							     IMG_UINT32 ui32FreePageCount,
+							     IMG_UINT32 *pui32FreePageIndices,
+							     IMG_UINT32 ui32SparseFlags,
+							     PVRSRV_MEMALLOCFLAGS_T uiFlags,
+							     IMG_DEV_VIRTADDR sDevVAddr,
+							     IMG_UINT64 ui64CPUVAddr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPages(IMG_HANDLE hBridge,
+							       IMG_HANDLE hReservation,
+							       IMG_HANDLE hPMR,
+							       IMG_UINT32 ui32PageCount,
+							       IMG_UINT32 ui32PhysicalPgOffset,
+							       PVRSRV_MEMALLOCFLAGS_T uiFlags,
+							       IMG_DEV_VIRTADDR sDevVAddr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge,
+								 IMG_HANDLE hReservation,
+								 IMG_DEV_VIRTADDR sDevVAddr,
+								 IMG_UINT32 ui32PageCount);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge,
+								   IMG_HANDLE hDevmemCtx,
+								   IMG_DEV_VIRTADDR sAddress);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigCount(IMG_HANDLE hBridge,
+								    IMG_UINT32 *pui32NumHeapConfigs);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapCount(IMG_HANDLE hBridge,
+							      IMG_UINT32 ui32HeapConfigIndex,
+							      IMG_UINT32 *pui32NumHeaps);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigName(IMG_HANDLE hBridge,
+								   IMG_UINT32 ui32HeapConfigIndex,
+								   IMG_UINT32 ui32HeapConfigNameBufSz,
+								   IMG_CHAR *puiHeapConfigName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge,
+								IMG_UINT32 ui32HeapConfigIndex,
+								IMG_UINT32 ui32HeapIndex,
+								IMG_UINT32 ui32HeapNameBufSz,
+								IMG_CHAR *puiHeapNameOut,
+								IMG_DEV_VIRTADDR *psDevVAddrBase,
+								IMG_DEVMEM_SIZE_T *puiHeapLength,
+								IMG_UINT32 *pui32Log2DataPageSizeOut,
+								IMG_UINT32 *pui32Log2ImportAlignmentOut,
+								IMG_UINT32 *pui32Log2TilingStrideFactorOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge,
+									 IMG_HANDLE hDevmemCtx,
+									 IMG_UINT32 ui32PID,
+									 IMG_BOOL bRegister);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeGetMaxDevMemSize(IMG_HANDLE hBridge,
+							      IMG_DEVMEM_SIZE_T *puiLMASize,
+							      IMG_DEVMEM_SIZE_T *puiUMASize);
+
+
+#endif /* CLIENT_MM_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/mm_bridge/client_mm_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/mm_bridge/client_mm_direct_bridge.c
new file mode 100644
index 0000000..e3a130c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/mm_bridge/client_mm_direct_bridge.c
@@ -0,0 +1,758 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge for mm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_mm_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "devicemem_heapcfg.h"
+#include "physmem.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRExportPMR(IMG_HANDLE hBridge,
+							  IMG_HANDLE hPMR,
+							  IMG_HANDLE *phPMRExport,
+							  IMG_UINT64 *pui64Size,
+							  IMG_UINT32 *pui32Log2Contig,
+							  IMG_UINT64 *pui64Password)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PMR_EXPORT * psPMRExportInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMRExportPMR(
+					psPMRInt,
+					&psPMRExportInt,
+					pui64Size,
+					pui32Log2Contig,
+					pui64Password);
+
+	*phPMRExport = psPMRExportInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnexportPMR(IMG_HANDLE hBridge,
+							    IMG_HANDLE hPMRExport)
+{
+	PVRSRV_ERROR eError;
+	PMR_EXPORT * psPMRExportInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRExportInt = (PMR_EXPORT *) hPMRExport;
+
+	eError =
+		PMRUnexportPMR(
+					psPMRExportInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRGetUID(IMG_HANDLE hBridge,
+						       IMG_HANDLE hPMR,
+						       IMG_UINT64 *pui64UID)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMRGetUID(
+					psPMRInt,
+					pui64UID);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRMakeLocalImportHandle(IMG_HANDLE hBridge,
+								      IMG_HANDLE hBuffer,
+								      IMG_HANDLE *phExtMem)
+{
+	PVRSRV_ERROR eError;
+	PMR * psBufferInt;
+	PMR * psExtMemInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psBufferInt = (PMR *) hBuffer;
+
+	eError =
+		PMRMakeLocalImportHandle(
+					psBufferInt,
+					&psExtMemInt);
+
+	*phExtMem = psExtMemInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge,
+									IMG_HANDLE hExtMem)
+{
+	PVRSRV_ERROR eError;
+	PMR * psExtMemInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psExtMemInt = (PMR *) hExtMem;
+
+	eError =
+		PMRUnmakeLocalImportHandle(
+					psExtMemInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRImportPMR(IMG_HANDLE hBridge,
+							  IMG_HANDLE hPMRExport,
+							  IMG_UINT64 ui64uiPassword,
+							  IMG_UINT64 ui64uiSize,
+							  IMG_UINT32 ui32uiLog2Contig,
+							  IMG_HANDLE *phPMR)
+{
+	PVRSRV_ERROR eError;
+	PMR_EXPORT * psPMRExportInt;
+	PMR * psPMRInt;
+
+	psPMRExportInt = (PMR_EXPORT *) hPMRExport;
+
+	eError =
+		PhysmemImportPMR(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					psPMRExportInt,
+					ui64uiPassword,
+					ui64uiSize,
+					ui32uiLog2Contig,
+					&psPMRInt);
+
+	*phPMR = psPMRInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRLocalImportPMR(IMG_HANDLE hBridge,
+							       IMG_HANDLE hExtHandle,
+							       IMG_HANDLE *phPMR,
+							       IMG_DEVMEM_SIZE_T *puiSize,
+							       IMG_DEVMEM_ALIGN_T *psAlign)
+{
+	PVRSRV_ERROR eError;
+	PMR * psExtHandleInt;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psExtHandleInt = (PMR *) hExtHandle;
+
+	eError =
+		PMRLocalImportPMR(
+					psExtHandleInt,
+					&psPMRInt,
+					puiSize,
+					psAlign);
+
+	*phPMR = psPMRInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefPMR(IMG_HANDLE hBridge,
+							 IMG_HANDLE hPMR)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMRUnrefPMR(
+					psPMRInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge,
+							       IMG_HANDLE hPMR)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMRUnrefUnlockPMR(
+					psPMRInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge,
+								    IMG_DEVMEM_SIZE_T uiSize,
+								    IMG_DEVMEM_SIZE_T uiChunkSize,
+								    IMG_UINT32 ui32NumPhysChunks,
+								    IMG_UINT32 ui32NumVirtChunks,
+								    IMG_UINT32 *pui32MappingTable,
+								    IMG_UINT32 ui32Log2PageSize,
+								    PVRSRV_MEMALLOCFLAGS_T uiFlags,
+								    IMG_UINT32 ui32AnnotationLength,
+								    const IMG_CHAR *puiAnnotation,
+								    IMG_HANDLE *phPMRPtr)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRPtrInt;
+
+
+	eError =
+		PhysmemNewRamBackedPMR(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					uiSize,
+					uiChunkSize,
+					ui32NumPhysChunks,
+					ui32NumVirtChunks,
+					pui32MappingTable,
+					ui32Log2PageSize,
+					uiFlags,
+					ui32AnnotationLength,
+					puiAnnotation,
+					&psPMRPtrInt);
+
+	*phPMRPtr = psPMRPtrInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge,
+									  IMG_DEVMEM_SIZE_T uiSize,
+									  IMG_DEVMEM_SIZE_T uiChunkSize,
+									  IMG_UINT32 ui32NumPhysChunks,
+									  IMG_UINT32 ui32NumVirtChunks,
+									  IMG_UINT32 *pui32MappingTable,
+									  IMG_UINT32 ui32Log2PageSize,
+									  PVRSRV_MEMALLOCFLAGS_T uiFlags,
+									  IMG_UINT32 ui32AnnotationLength,
+									  const IMG_CHAR *puiAnnotation,
+									  IMG_HANDLE *phPMRPtr)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRPtrInt;
+
+
+	eError =
+		PhysmemNewRamBackedLockedPMR(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					uiSize,
+					uiChunkSize,
+					ui32NumPhysChunks,
+					ui32NumVirtChunks,
+					pui32MappingTable,
+					ui32Log2PageSize,
+					uiFlags,
+					ui32AnnotationLength,
+					puiAnnotation,
+					&psPMRPtrInt);
+
+	*phPMRPtr = psPMRPtrInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPin(IMG_HANDLE hBridge,
+							  IMG_HANDLE hPMR)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		DevmemIntPin(
+					psPMRInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpin(IMG_HANDLE hBridge,
+							    IMG_HANDLE hPMR)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		DevmemIntUnpin(
+					psPMRInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPinValidate(IMG_HANDLE hBridge,
+								  IMG_HANDLE hMapping,
+								  IMG_HANDLE hPMR)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_MAPPING * psMappingInt;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psMappingInt = (DEVMEMINT_MAPPING *) hMapping;
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		DevmemIntPinValidate(
+					psMappingInt,
+					psPMRInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpinInvalidate(IMG_HANDLE hBridge,
+								      IMG_HANDLE hMapping,
+								      IMG_HANDLE hPMR)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_MAPPING * psMappingInt;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psMappingInt = (DEVMEMINT_MAPPING *) hMapping;
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		DevmemIntUnpinInvalidate(
+					psMappingInt,
+					psPMRInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge,
+								IMG_BOOL bbKernelMemoryCtx,
+								IMG_HANDLE *phDevMemServerContext,
+								IMG_HANDLE *phPrivData,
+								IMG_UINT32 *pui32CPUCacheLineSize)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_CTX * psDevMemServerContextInt;
+	IMG_HANDLE hPrivDataInt;
+
+
+	eError =
+		DevmemIntCtxCreate(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					bbKernelMemoryCtx,
+					&psDevMemServerContextInt,
+					&hPrivDataInt,
+					pui32CPUCacheLineSize);
+
+	*phDevMemServerContext = psDevMemServerContextInt;
+	*phPrivData = hPrivDataInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxDestroy(IMG_HANDLE hBridge,
+								 IMG_HANDLE hDevmemServerContext)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_CTX * psDevmemServerContextInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext;
+
+	eError =
+		DevmemIntCtxDestroy(
+					psDevmemServerContextInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge,
+								 IMG_HANDLE hDevmemCtx,
+								 IMG_DEV_VIRTADDR sHeapBaseAddr,
+								 IMG_DEVMEM_SIZE_T uiHeapLength,
+								 IMG_UINT32 ui32Log2DataPageSize,
+								 IMG_HANDLE *phDevmemHeapPtr)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_CTX * psDevmemCtxInt;
+	DEVMEMINT_HEAP * psDevmemHeapPtrInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+	eError =
+		DevmemIntHeapCreate(
+					psDevmemCtxInt,
+					sHeapBaseAddr,
+					uiHeapLength,
+					ui32Log2DataPageSize,
+					&psDevmemHeapPtrInt);
+
+	*phDevmemHeapPtr = psDevmemHeapPtrInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge,
+								  IMG_HANDLE hDevmemHeap)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_HEAP * psDevmemHeapInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemHeapInt = (DEVMEMINT_HEAP *) hDevmemHeap;
+
+	eError =
+		DevmemIntHeapDestroy(
+					psDevmemHeapInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPMR(IMG_HANDLE hBridge,
+							     IMG_HANDLE hDevmemServerHeap,
+							     IMG_HANDLE hReservation,
+							     IMG_HANDLE hPMR,
+							     PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+							     IMG_HANDLE *phMapping)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_HEAP * psDevmemServerHeapInt;
+	DEVMEMINT_RESERVATION * psReservationInt;
+	PMR * psPMRInt;
+	DEVMEMINT_MAPPING * psMappingInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap;
+	psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		DevmemIntMapPMR(
+					psDevmemServerHeapInt,
+					psReservationInt,
+					psPMRInt,
+					uiMapFlags,
+					&psMappingInt);
+
+	*phMapping = psMappingInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge,
+							       IMG_HANDLE hMapping)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_MAPPING * psMappingInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psMappingInt = (DEVMEMINT_MAPPING *) hMapping;
+
+	eError =
+		DevmemIntUnmapPMR(
+					psMappingInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntReserveRange(IMG_HANDLE hBridge,
+								   IMG_HANDLE hDevmemServerHeap,
+								   IMG_DEV_VIRTADDR sAddress,
+								   IMG_DEVMEM_SIZE_T uiLength,
+								   IMG_HANDLE *phReservation)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_HEAP * psDevmemServerHeapInt;
+	DEVMEMINT_RESERVATION * psReservationInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap;
+
+	eError =
+		DevmemIntReserveRange(
+					psDevmemServerHeapInt,
+					sAddress,
+					uiLength,
+					&psReservationInt);
+
+	*phReservation = psReservationInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge,
+								     IMG_HANDLE hReservation)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_RESERVATION * psReservationInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+
+	eError =
+		DevmemIntUnreserveRange(
+					psReservationInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeChangeSparseMem(IMG_HANDLE hBridge,
+							     IMG_HANDLE hSrvDevMemHeap,
+							     IMG_HANDLE hPMR,
+							     IMG_UINT32 ui32AllocPageCount,
+							     IMG_UINT32 *pui32AllocPageIndices,
+							     IMG_UINT32 ui32FreePageCount,
+							     IMG_UINT32 *pui32FreePageIndices,
+							     IMG_UINT32 ui32SparseFlags,
+							     PVRSRV_MEMALLOCFLAGS_T uiFlags,
+							     IMG_DEV_VIRTADDR sDevVAddr,
+							     IMG_UINT64 ui64CPUVAddr)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_HEAP * psSrvDevMemHeapInt;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSrvDevMemHeapInt = (DEVMEMINT_HEAP *) hSrvDevMemHeap;
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		DevmemIntChangeSparse(
+					psSrvDevMemHeapInt,
+					psPMRInt,
+					ui32AllocPageCount,
+					pui32AllocPageIndices,
+					ui32FreePageCount,
+					pui32FreePageIndices,
+					ui32SparseFlags,
+					uiFlags,
+					sDevVAddr,
+					ui64CPUVAddr);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPages(IMG_HANDLE hBridge,
+							       IMG_HANDLE hReservation,
+							       IMG_HANDLE hPMR,
+							       IMG_UINT32 ui32PageCount,
+							       IMG_UINT32 ui32PhysicalPgOffset,
+							       PVRSRV_MEMALLOCFLAGS_T uiFlags,
+							       IMG_DEV_VIRTADDR sDevVAddr)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_RESERVATION * psReservationInt;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		DevmemIntMapPages(
+					psReservationInt,
+					psPMRInt,
+					ui32PageCount,
+					ui32PhysicalPgOffset,
+					uiFlags,
+					sDevVAddr);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge,
+								 IMG_HANDLE hReservation,
+								 IMG_DEV_VIRTADDR sDevVAddr,
+								 IMG_UINT32 ui32PageCount)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_RESERVATION * psReservationInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+
+	eError =
+		DevmemIntUnmapPages(
+					psReservationInt,
+					sDevVAddr,
+					ui32PageCount);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge,
+								   IMG_HANDLE hDevmemCtx,
+								   IMG_DEV_VIRTADDR sAddress)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_CTX * psDevmemCtxInt;
+
+	psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+	eError =
+		DevmemIntIsVDevAddrValid(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					psDevmemCtxInt,
+					sAddress);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigCount(IMG_HANDLE hBridge,
+								    IMG_UINT32 *pui32NumHeapConfigs)
+{
+	PVRSRV_ERROR eError;
+
+
+	eError =
+		HeapCfgHeapConfigCount(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					pui32NumHeapConfigs);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapCount(IMG_HANDLE hBridge,
+							      IMG_UINT32 ui32HeapConfigIndex,
+							      IMG_UINT32 *pui32NumHeaps)
+{
+	PVRSRV_ERROR eError;
+
+
+	eError =
+		HeapCfgHeapCount(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					ui32HeapConfigIndex,
+					pui32NumHeaps);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigName(IMG_HANDLE hBridge,
+								   IMG_UINT32 ui32HeapConfigIndex,
+								   IMG_UINT32 ui32HeapConfigNameBufSz,
+								   IMG_CHAR *puiHeapConfigName)
+{
+	PVRSRV_ERROR eError;
+
+
+	eError =
+		HeapCfgHeapConfigName(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					ui32HeapConfigIndex,
+					ui32HeapConfigNameBufSz,
+					puiHeapConfigName);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge,
+								IMG_UINT32 ui32HeapConfigIndex,
+								IMG_UINT32 ui32HeapIndex,
+								IMG_UINT32 ui32HeapNameBufSz,
+								IMG_CHAR *puiHeapNameOut,
+								IMG_DEV_VIRTADDR *psDevVAddrBase,
+								IMG_DEVMEM_SIZE_T *puiHeapLength,
+								IMG_UINT32 *pui32Log2DataPageSizeOut,
+								IMG_UINT32 *pui32Log2ImportAlignmentOut,
+								IMG_UINT32 *pui32Log2TilingStrideFactorOut)
+{
+	PVRSRV_ERROR eError;
+
+
+	eError =
+		HeapCfgHeapDetails(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					ui32HeapConfigIndex,
+					ui32HeapIndex,
+					ui32HeapNameBufSz,
+					puiHeapNameOut,
+					psDevVAddrBase,
+					puiHeapLength,
+					pui32Log2DataPageSizeOut,
+					pui32Log2ImportAlignmentOut,
+					pui32Log2TilingStrideFactorOut);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge,
+									 IMG_HANDLE hDevmemCtx,
+									 IMG_UINT32 ui32PID,
+									 IMG_BOOL bRegister)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_CTX * psDevmemCtxInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+	eError =
+		DevmemIntRegisterPFNotifyKM(
+					psDevmemCtxInt,
+					ui32PID,
+					bRegister);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeGetMaxDevMemSize(IMG_HANDLE hBridge,
+							      IMG_DEVMEM_SIZE_T *puiLMASize,
+							      IMG_DEVMEM_SIZE_T *puiUMASize)
+{
+	PVRSRV_ERROR eError;
+
+
+	eError =
+		PVRSRVGetMaxDevMemSizeKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					puiLMASize,
+					puiUMASize);
+
+	return eError;
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/mm_bridge/common_mm_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/mm_bridge/common_mm_bridge.h
new file mode 100644
index 0000000..824205c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/mm_bridge/common_mm_bridge.h
@@ -0,0 +1,739 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for mm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for mm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_MM_BRIDGE_H
+#define COMMON_MM_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_MM_CMD_FIRST			0
+#define PVRSRV_BRIDGE_MM_PMREXPORTPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_MM_PMRGETUID			PVRSRV_BRIDGE_MM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE			PVRSRV_BRIDGE_MM_CMD_FIRST+3
+#define PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE			PVRSRV_BRIDGE_MM_CMD_FIRST+4
+#define PVRSRV_BRIDGE_MM_PMRIMPORTPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+5
+#define PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+6
+#define PVRSRV_BRIDGE_MM_PMRUNREFPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+7
+#define PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+8
+#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+9
+#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+10
+#define PVRSRV_BRIDGE_MM_DEVMEMINTPIN			PVRSRV_BRIDGE_MM_CMD_FIRST+11
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN			PVRSRV_BRIDGE_MM_CMD_FIRST+12
+#define PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE			PVRSRV_BRIDGE_MM_CMD_FIRST+13
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE			PVRSRV_BRIDGE_MM_CMD_FIRST+14
+#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE			PVRSRV_BRIDGE_MM_CMD_FIRST+15
+#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY			PVRSRV_BRIDGE_MM_CMD_FIRST+16
+#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE			PVRSRV_BRIDGE_MM_CMD_FIRST+17
+#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY			PVRSRV_BRIDGE_MM_CMD_FIRST+18
+#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+19
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+20
+#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE			PVRSRV_BRIDGE_MM_CMD_FIRST+21
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE			PVRSRV_BRIDGE_MM_CMD_FIRST+22
+#define PVRSRV_BRIDGE_MM_CHANGESPARSEMEM			PVRSRV_BRIDGE_MM_CMD_FIRST+23
+#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES			PVRSRV_BRIDGE_MM_CMD_FIRST+24
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES			PVRSRV_BRIDGE_MM_CMD_FIRST+25
+#define PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID			PVRSRV_BRIDGE_MM_CMD_FIRST+26
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT			PVRSRV_BRIDGE_MM_CMD_FIRST+27
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT			PVRSRV_BRIDGE_MM_CMD_FIRST+28
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME			PVRSRV_BRIDGE_MM_CMD_FIRST+29
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS			PVRSRV_BRIDGE_MM_CMD_FIRST+30
+#define PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM			PVRSRV_BRIDGE_MM_CMD_FIRST+31
+#define PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE			PVRSRV_BRIDGE_MM_CMD_FIRST+32
+#define PVRSRV_BRIDGE_MM_CMD_LAST			(PVRSRV_BRIDGE_MM_CMD_FIRST+32)
+
+
+/*******************************************
+            PMRExportPMR          
+ *******************************************/
+
+/* Bridge in structure for PMRExportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMREXPORTPMR_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMREXPORTPMR;
+
+/* Bridge out structure for PMRExportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMREXPORTPMR_TAG
+{
+	IMG_HANDLE hPMRExport;
+	IMG_UINT64 ui64Size;
+	IMG_UINT32 ui32Log2Contig;
+	IMG_UINT64 ui64Password;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMREXPORTPMR;
+
+
+/*******************************************
+            PMRUnexportPMR          
+ *******************************************/
+
+/* Bridge in structure for PMRUnexportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR_TAG
+{
+	IMG_HANDLE hPMRExport;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR;
+
+/* Bridge out structure for PMRUnexportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR;
+
+
+/*******************************************
+            PMRGetUID          
+ *******************************************/
+
+/* Bridge in structure for PMRGetUID */
+typedef struct PVRSRV_BRIDGE_IN_PMRGETUID_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRGETUID;
+
+/* Bridge out structure for PMRGetUID */
+typedef struct PVRSRV_BRIDGE_OUT_PMRGETUID_TAG
+{
+	IMG_UINT64 ui64UID;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRGETUID;
+
+
+/*******************************************
+            PMRMakeLocalImportHandle          
+ *******************************************/
+
+/* Bridge in structure for PMRMakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE_TAG
+{
+	IMG_HANDLE hBuffer;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE;
+
+/* Bridge out structure for PMRMakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE_TAG
+{
+	IMG_HANDLE hExtMem;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE;
+
+
+/*******************************************
+            PMRUnmakeLocalImportHandle          
+ *******************************************/
+
+/* Bridge in structure for PMRUnmakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE_TAG
+{
+	IMG_HANDLE hExtMem;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE;
+
+/* Bridge out structure for PMRUnmakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE;
+
+
+/*******************************************
+            PMRImportPMR          
+ *******************************************/
+
+/* Bridge in structure for PMRImportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRIMPORTPMR_TAG
+{
+	IMG_HANDLE hPMRExport;
+	IMG_UINT64 ui64uiPassword;
+	IMG_UINT64 ui64uiSize;
+	IMG_UINT32 ui32uiLog2Contig;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRIMPORTPMR;
+
+/* Bridge out structure for PMRImportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRIMPORTPMR_TAG
+{
+	IMG_HANDLE hPMR;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRIMPORTPMR;
+
+
+/*******************************************
+            PMRLocalImportPMR          
+ *******************************************/
+
+/* Bridge in structure for PMRLocalImportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR_TAG
+{
+	IMG_HANDLE hExtHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR;
+
+/* Bridge out structure for PMRLocalImportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_DEVMEM_ALIGN_T sAlign;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR;
+
+
+/*******************************************
+            PMRUnrefPMR          
+ *******************************************/
+
+/* Bridge in structure for PMRUnrefPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNREFPMR_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRUNREFPMR;
+
+/* Bridge out structure for PMRUnrefPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFPMR_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRUNREFPMR;
+
+
+/*******************************************
+            PMRUnrefUnlockPMR          
+ *******************************************/
+
+/* Bridge in structure for PMRUnrefUnlockPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR;
+
+/* Bridge out structure for PMRUnrefUnlockPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR;
+
+
+/*******************************************
+            PhysmemNewRamBackedPMR          
+ *******************************************/
+
+/* Bridge in structure for PhysmemNewRamBackedPMR */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR_TAG
+{
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_DEVMEM_SIZE_T uiChunkSize;
+	IMG_UINT32 ui32NumPhysChunks;
+	IMG_UINT32 ui32NumVirtChunks;
+	IMG_UINT32 * pui32MappingTable;
+	IMG_UINT32 ui32Log2PageSize;
+	PVRSRV_MEMALLOCFLAGS_T uiFlags;
+	IMG_UINT32 ui32AnnotationLength;
+	const IMG_CHAR * puiAnnotation;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR;
+
+/* Bridge out structure for PhysmemNewRamBackedPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR_TAG
+{
+	IMG_HANDLE hPMRPtr;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR;
+
+
+/*******************************************
+            PhysmemNewRamBackedLockedPMR          
+ *******************************************/
+
+/* Bridge in structure for PhysmemNewRamBackedLockedPMR */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG
+{
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_DEVMEM_SIZE_T uiChunkSize;
+	IMG_UINT32 ui32NumPhysChunks;
+	IMG_UINT32 ui32NumVirtChunks;
+	IMG_UINT32 * pui32MappingTable;
+	IMG_UINT32 ui32Log2PageSize;
+	PVRSRV_MEMALLOCFLAGS_T uiFlags;
+	IMG_UINT32 ui32AnnotationLength;
+	const IMG_CHAR * puiAnnotation;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR;
+
+/* Bridge out structure for PhysmemNewRamBackedLockedPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG
+{
+	IMG_HANDLE hPMRPtr;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR;
+
+
+/*******************************************
+            DevmemIntPin          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntPin */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPIN_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTPIN;
+
+/* Bridge out structure for DevmemIntPin */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPIN_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTPIN;
+
+
+/*******************************************
+            DevmemIntUnpin          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnpin */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN;
+
+/* Bridge out structure for DevmemIntUnpin */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN;
+
+
+/*******************************************
+            DevmemIntPinValidate          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntPinValidate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE_TAG
+{
+	IMG_HANDLE hMapping;
+	IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE;
+
+/* Bridge out structure for DevmemIntPinValidate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE;
+
+
+/*******************************************
+            DevmemIntUnpinInvalidate          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnpinInvalidate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE_TAG
+{
+	IMG_HANDLE hMapping;
+	IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE;
+
+/* Bridge out structure for DevmemIntUnpinInvalidate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE;
+
+
+/*******************************************
+            DevmemIntCtxCreate          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntCtxCreate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE_TAG
+{
+	IMG_BOOL bbKernelMemoryCtx;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE;
+
+/* Bridge out structure for DevmemIntCtxCreate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE_TAG
+{
+	IMG_HANDLE hDevMemServerContext;
+	IMG_HANDLE hPrivData;
+	IMG_UINT32 ui32CPUCacheLineSize;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE;
+
+
+/*******************************************
+            DevmemIntCtxDestroy          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntCtxDestroy */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY_TAG
+{
+	IMG_HANDLE hDevmemServerContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY;
+
+/* Bridge out structure for DevmemIntCtxDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY;
+
+
+/*******************************************
+            DevmemIntHeapCreate          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntHeapCreate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE_TAG
+{
+	IMG_HANDLE hDevmemCtx;
+	IMG_DEV_VIRTADDR sHeapBaseAddr;
+	IMG_DEVMEM_SIZE_T uiHeapLength;
+	IMG_UINT32 ui32Log2DataPageSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE;
+
+/* Bridge out structure for DevmemIntHeapCreate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE_TAG
+{
+	IMG_HANDLE hDevmemHeapPtr;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE;
+
+
+/*******************************************
+            DevmemIntHeapDestroy          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntHeapDestroy */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY_TAG
+{
+	IMG_HANDLE hDevmemHeap;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY;
+
+/* Bridge out structure for DevmemIntHeapDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY;
+
+
+/*******************************************
+            DevmemIntMapPMR          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntMapPMR */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR_TAG
+{
+	IMG_HANDLE hDevmemServerHeap;
+	IMG_HANDLE hReservation;
+	IMG_HANDLE hPMR;
+	PVRSRV_MEMALLOCFLAGS_T uiMapFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR;
+
+/* Bridge out structure for DevmemIntMapPMR */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR_TAG
+{
+	IMG_HANDLE hMapping;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR;
+
+
+/*******************************************
+            DevmemIntUnmapPMR          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnmapPMR */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR_TAG
+{
+	IMG_HANDLE hMapping;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR;
+
+/* Bridge out structure for DevmemIntUnmapPMR */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR;
+
+
+/*******************************************
+            DevmemIntReserveRange          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntReserveRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE_TAG
+{
+	IMG_HANDLE hDevmemServerHeap;
+	IMG_DEV_VIRTADDR sAddress;
+	IMG_DEVMEM_SIZE_T uiLength;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE;
+
+/* Bridge out structure for DevmemIntReserveRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE_TAG
+{
+	IMG_HANDLE hReservation;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE;
+
+
+/*******************************************
+            DevmemIntUnreserveRange          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnreserveRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE_TAG
+{
+	IMG_HANDLE hReservation;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE;
+
+/* Bridge out structure for DevmemIntUnreserveRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE;
+
+
+/*******************************************
+            ChangeSparseMem          
+ *******************************************/
+
+/* Bridge in structure for ChangeSparseMem */
+typedef struct PVRSRV_BRIDGE_IN_CHANGESPARSEMEM_TAG
+{
+	IMG_HANDLE hSrvDevMemHeap;
+	IMG_HANDLE hPMR;
+	IMG_UINT32 ui32AllocPageCount;
+	IMG_UINT32 * pui32AllocPageIndices;
+	IMG_UINT32 ui32FreePageCount;
+	IMG_UINT32 * pui32FreePageIndices;
+	IMG_UINT32 ui32SparseFlags;
+	PVRSRV_MEMALLOCFLAGS_T uiFlags;
+	IMG_DEV_VIRTADDR sDevVAddr;
+	IMG_UINT64 ui64CPUVAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CHANGESPARSEMEM;
+
+/* Bridge out structure for ChangeSparseMem */
+typedef struct PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM;
+
+
+/*******************************************
+            DevmemIntMapPages          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntMapPages */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES_TAG
+{
+	IMG_HANDLE hReservation;
+	IMG_HANDLE hPMR;
+	IMG_UINT32 ui32PageCount;
+	IMG_UINT32 ui32PhysicalPgOffset;
+	PVRSRV_MEMALLOCFLAGS_T uiFlags;
+	IMG_DEV_VIRTADDR sDevVAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES;
+
+/* Bridge out structure for DevmemIntMapPages */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES;
+
+
+/*******************************************
+            DevmemIntUnmapPages          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnmapPages */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES_TAG
+{
+	IMG_HANDLE hReservation;
+	IMG_DEV_VIRTADDR sDevVAddr;
+	IMG_UINT32 ui32PageCount;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES;
+
+/* Bridge out structure for DevmemIntUnmapPages */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES;
+
+
+/*******************************************
+            DevmemIsVDevAddrValid          
+ *******************************************/
+
+/* Bridge in structure for DevmemIsVDevAddrValid */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID_TAG
+{
+	IMG_HANDLE hDevmemCtx;
+	IMG_DEV_VIRTADDR sAddress;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID;
+
+/* Bridge out structure for DevmemIsVDevAddrValid */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID;
+
+
+/*******************************************
+            HeapCfgHeapConfigCount          
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapConfigCount */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT;
+
+/* Bridge out structure for HeapCfgHeapConfigCount */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT_TAG
+{
+	IMG_UINT32 ui32NumHeapConfigs;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT;
+
+
+/*******************************************
+            HeapCfgHeapCount          
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapCount */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT_TAG
+{
+	IMG_UINT32 ui32HeapConfigIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT;
+
+/* Bridge out structure for HeapCfgHeapCount */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT_TAG
+{
+	IMG_UINT32 ui32NumHeaps;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT;
+
+
+/*******************************************
+            HeapCfgHeapConfigName          
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapConfigName */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME_TAG
+{
+	IMG_UINT32 ui32HeapConfigIndex;
+	IMG_UINT32 ui32HeapConfigNameBufSz;
+	/* Output pointer puiHeapConfigName is also an implied input */
+	IMG_CHAR * puiHeapConfigName;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME;
+
+/* Bridge out structure for HeapCfgHeapConfigName */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME_TAG
+{
+	IMG_CHAR * puiHeapConfigName;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME;
+
+
+/*******************************************
+            HeapCfgHeapDetails          
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapDetails */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS_TAG
+{
+	IMG_UINT32 ui32HeapConfigIndex;
+	IMG_UINT32 ui32HeapIndex;
+	IMG_UINT32 ui32HeapNameBufSz;
+	/* Output pointer puiHeapNameOut is also an implied input */
+	IMG_CHAR * puiHeapNameOut;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS;
+
+/* Bridge out structure for HeapCfgHeapDetails */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS_TAG
+{
+	IMG_CHAR * puiHeapNameOut;
+	IMG_DEV_VIRTADDR sDevVAddrBase;
+	IMG_DEVMEM_SIZE_T uiHeapLength;
+	IMG_UINT32 ui32Log2DataPageSizeOut;
+	IMG_UINT32 ui32Log2ImportAlignmentOut;
+	IMG_UINT32 ui32Log2TilingStrideFactorOut;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS;
+
+
+/*******************************************
+            DevmemIntRegisterPFNotifyKM          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntRegisterPFNotifyKM */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM_TAG
+{
+	IMG_HANDLE hDevmemCtx;
+	IMG_UINT32 ui32PID;
+	IMG_BOOL bRegister;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM;
+
+/* Bridge out structure for DevmemIntRegisterPFNotifyKM */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM;
+
+
+/*******************************************
+            GetMaxDevMemSize          
+ *******************************************/
+
+/* Bridge in structure for GetMaxDevMemSize */
+typedef struct PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE;
+
+/* Bridge out structure for GetMaxDevMemSize */
+typedef struct PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE_TAG
+{
+	IMG_DEVMEM_SIZE_T uiLMASize;
+	IMG_DEVMEM_SIZE_T uiUMASize;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE;
+
+
+#endif /* COMMON_MM_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/mm_bridge/server_mm_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/mm_bridge/server_mm_bridge.c
new file mode 100644
index 0000000..dd8b78d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/mm_bridge/server_mm_bridge.c
@@ -0,0 +1,3280 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for mm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for mm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "devicemem_heapcfg.h"
+#include "physmem.h"
+
+
+#include "common_mm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+static PVRSRV_ERROR ReleasePMRExport(void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	return PVRSRV_OK;
+}
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgePMRExportPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMREXPORTPMR *psPMRExportPMRIN,
+					  PVRSRV_BRIDGE_OUT_PMREXPORTPMR *psPMRExportPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMR = psPMRExportPMRIN->hPMR;
+	PMR * psPMRInt = NULL;
+	PMR_EXPORT * psPMRExportInt = NULL;
+	IMG_HANDLE hPMRExportInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRExportPMROUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psPMRExportPMROUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto PMRExportPMR_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psPMRExportPMROUT->eError =
+		PMRExportPMR(
+					psPMRInt,
+					&psPMRExportInt,
+					&psPMRExportPMROUT->ui64Size,
+					&psPMRExportPMROUT->ui32Log2Contig,
+					&psPMRExportPMROUT->ui64Password);
+	/* Exit early if bridged call fails */
+	if(psPMRExportPMROUT->eError != PVRSRV_OK)
+	{
+		goto PMRExportPMR_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+	/*
+	 * For cases where we need a cross process handle we actually allocate two.
+	 * 
+	 * The first one is a connection specific handle and it gets given the real
+	 * release function. This handle does *NOT* get returned to the caller. It's
+	 * purpose is to release any leaked resources when we either have a bad or
+	 * abnormally terminated client. If we didn't do this then the resource
+	 * wouldn't be freed until driver unload. If the resource is freed normally,
+	 * this handle can be looked up via the cross process handle and then
+	 * released accordingly.
+	 * 
+	 * The second one is a cross process handle and it gets given a noop release
+	 * function. This handle does get returned to the caller.
+	 */
+
+
+
+
+	psPMRExportPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+
+							&hPMRExportInt,
+							(void *) psPMRExportInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PMRUnexportPMR);
+	if (psPMRExportPMROUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto PMRExportPMR_exit;
+	}
+
+	psPMRExportPMROUT->eError = PVRSRVAllocHandleUnlocked(KERNEL_HANDLE_BASE,
+							&psPMRExportPMROUT->hPMRExport,
+							(void *) psPMRExportInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+							(PFN_HANDLE_RELEASE)&ReleasePMRExport);
+	if (psPMRExportPMROUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto PMRExportPMR_exit;
+	}
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+PMRExportPMR_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psPMRExportPMROUT->eError != PVRSRV_OK)
+	{
+		/* Lock over handle creation cleanup. */
+		LockHandle();
+		if (psPMRExportPMROUT->hPMRExport)
+		{
+
+
+			PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE,
+						(IMG_HANDLE) psPMRExportPMROUT->hPMRExport,
+						PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+			if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+				        "PVRSRVBridgePMRExportPMR: %s",
+				        PVRSRVGetErrorStringKM(eError)));
+			}
+			/* Releasing the handle should free/destroy/release the resource.
+			 * This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+		}
+
+		if (hPMRExportInt)
+		{
+			PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+						hPMRExportInt,
+						PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+			if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+				        "PVRSRVBridgePMRExportPMR: %s",
+				        PVRSRVGetErrorStringKM(eError)));
+			}
+			/* Releasing the handle should free/destroy/release the resource.
+			 * This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psPMRExportInt = NULL;
+		}
+
+		/* Release now we have cleaned up creation handles. */
+		UnlockHandle();
+		if (psPMRExportInt)
+		{
+			PMRUnexportPMR(psPMRExportInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRUnexportPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR *psPMRUnexportPMRIN,
+					  PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR *psPMRUnexportPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR_EXPORT * psPMRExportInt = NULL;
+	IMG_HANDLE hPMRExportInt = NULL;
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+	psPMRUnexportPMROUT->eError =
+		PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE,
+					(void **) &psPMRExportInt,
+					(IMG_HANDLE) psPMRUnexportPMRIN->hPMRExport,
+					PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+					IMG_FALSE);
+	if (psPMRUnexportPMROUT->eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgePMRUnexportPMR: %s",
+		        PVRSRVGetErrorStringKM(psPMRUnexportPMROUT->eError)));
+	}
+	PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK);
+
+	/*
+	 * Find the connection specific handle that represents the same data
+	 * as the cross process handle as releasing it will actually call the
+	 * data's real release function (see the function where the cross
+	 * process handle is allocated for more details).
+	 */
+	psPMRUnexportPMROUT->eError =
+		PVRSRVFindHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+					&hPMRExportInt,
+					psPMRExportInt,
+					PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+	if (psPMRUnexportPMROUT->eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgePMRUnexportPMR: %s",
+		        PVRSRVGetErrorStringKM(psPMRUnexportPMROUT->eError)));
+	}
+	PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK);
+
+	psPMRUnexportPMROUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+					hPMRExportInt,
+					PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+	if ((psPMRUnexportPMROUT->eError != PVRSRV_OK) &&
+	    (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgePMRUnexportPMR: %s",
+		        PVRSRVGetErrorStringKM(psPMRUnexportPMROUT->eError)));
+	}
+	PVR_ASSERT((psPMRUnexportPMROUT->eError == PVRSRV_OK) ||
+	           (psPMRUnexportPMROUT->eError == PVRSRV_ERROR_RETRY));
+
+
+
+
+
+	psPMRUnexportPMROUT->eError =
+		PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE,
+					(IMG_HANDLE) psPMRUnexportPMRIN->hPMRExport,
+					PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+	if ((psPMRUnexportPMROUT->eError != PVRSRV_OK) &&
+	    (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgePMRUnexportPMR: %s",
+		        PVRSRVGetErrorStringKM(psPMRUnexportPMROUT->eError)));
+		UnlockHandle();
+		goto PMRUnexportPMR_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+PMRUnexportPMR_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRGetUID(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRGETUID *psPMRGetUIDIN,
+					  PVRSRV_BRIDGE_OUT_PMRGETUID *psPMRGetUIDOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMR = psPMRGetUIDIN->hPMR;
+	PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRGetUIDOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psPMRGetUIDOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto PMRGetUID_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psPMRGetUIDOUT->eError =
+		PMRGetUID(
+					psPMRInt,
+					&psPMRGetUIDOUT->ui64UID);
+
+
+
+
+PMRGetUID_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRMakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE *psPMRMakeLocalImportHandleIN,
+					  PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE *psPMRMakeLocalImportHandleOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hBuffer = psPMRMakeLocalImportHandleIN->hBuffer;
+	PMR * psBufferInt = NULL;
+	PMR * psExtMemInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRMakeLocalImportHandleOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psBufferInt,
+											hBuffer,
+											PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+											IMG_TRUE);
+					if(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto PMRMakeLocalImportHandle_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psPMRMakeLocalImportHandleOUT->eError =
+		PMRMakeLocalImportHandle(
+					psBufferInt,
+					&psExtMemInt);
+	/* Exit early if bridged call fails */
+	if(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)
+	{
+		goto PMRMakeLocalImportHandle_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psPMRMakeLocalImportHandleOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+
+							&psPMRMakeLocalImportHandleOUT->hExtMem,
+							(void *) psExtMemInt,
+							PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PMRUnmakeLocalImportHandle);
+	if (psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto PMRMakeLocalImportHandle_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+PMRMakeLocalImportHandle_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psBufferInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hBuffer,
+										PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)
+	{
+		if (psExtMemInt)
+		{
+			PMRUnmakeLocalImportHandle(psExtMemInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRUnmakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE *psPMRUnmakeLocalImportHandleIN,
+					  PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE *psPMRUnmakeLocalImportHandleOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psPMRUnmakeLocalImportHandleOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+					(IMG_HANDLE) psPMRUnmakeLocalImportHandleIN->hExtMem,
+					PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT);
+	if ((psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_OK) &&
+	    (psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgePMRUnmakeLocalImportHandle: %s",
+		        PVRSRVGetErrorStringKM(psPMRUnmakeLocalImportHandleOUT->eError)));
+		UnlockHandle();
+		goto PMRUnmakeLocalImportHandle_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+PMRUnmakeLocalImportHandle_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRImportPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRIMPORTPMR *psPMRImportPMRIN,
+					  PVRSRV_BRIDGE_OUT_PMRIMPORTPMR *psPMRImportPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMRExport = psPMRImportPMRIN->hPMRExport;
+	PMR_EXPORT * psPMRExportInt = NULL;
+	PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRImportPMROUT->eError =
+						PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE,
+											(void **) &psPMRExportInt,
+											hPMRExport,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+											IMG_TRUE);
+					if(psPMRImportPMROUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto PMRImportPMR_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psPMRImportPMROUT->eError =
+		PhysmemImportPMR(psConnection, OSGetDevData(psConnection),
+					psPMRExportInt,
+					psPMRImportPMRIN->ui64uiPassword,
+					psPMRImportPMRIN->ui64uiSize,
+					psPMRImportPMRIN->ui32uiLog2Contig,
+					&psPMRInt);
+	/* Exit early if bridged call fails */
+	if(psPMRImportPMROUT->eError != PVRSRV_OK)
+	{
+		goto PMRImportPMR_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psPMRImportPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psPMRImportPMROUT->hPMR,
+							(void *) psPMRInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+	if (psPMRImportPMROUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto PMRImportPMR_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+PMRImportPMR_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRExportInt)
+					{
+						PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE,
+										hPMRExport,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psPMRImportPMROUT->eError != PVRSRV_OK)
+	{
+		if (psPMRInt)
+		{
+			PMRUnrefPMR(psPMRInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRLocalImportPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR *psPMRLocalImportPMRIN,
+					  PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR *psPMRLocalImportPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hExtHandle = psPMRLocalImportPMRIN->hExtHandle;
+	PMR * psExtHandleInt = NULL;
+	PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRLocalImportPMROUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+											(void **) &psExtHandleInt,
+											hExtHandle,
+											PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+											IMG_TRUE);
+					if(psPMRLocalImportPMROUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto PMRLocalImportPMR_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psPMRLocalImportPMROUT->eError =
+		PMRLocalImportPMR(
+					psExtHandleInt,
+					&psPMRInt,
+					&psPMRLocalImportPMROUT->uiSize,
+					&psPMRLocalImportPMROUT->sAlign);
+	/* Exit early if bridged call fails */
+	if(psPMRLocalImportPMROUT->eError != PVRSRV_OK)
+	{
+		goto PMRLocalImportPMR_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psPMRLocalImportPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psPMRLocalImportPMROUT->hPMR,
+							(void *) psPMRInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+	if (psPMRLocalImportPMROUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto PMRLocalImportPMR_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+PMRLocalImportPMR_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psExtHandleInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+										hExtHandle,
+										PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psPMRLocalImportPMROUT->eError != PVRSRV_OK)
+	{
+		if (psPMRInt)
+		{
+			PMRUnrefPMR(psPMRInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRUnrefPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRUNREFPMR *psPMRUnrefPMRIN,
+					  PVRSRV_BRIDGE_OUT_PMRUNREFPMR *psPMRUnrefPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psPMRUnrefPMROUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psPMRUnrefPMRIN->hPMR,
+					PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	if ((psPMRUnrefPMROUT->eError != PVRSRV_OK) &&
+	    (psPMRUnrefPMROUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgePMRUnrefPMR: %s",
+		        PVRSRVGetErrorStringKM(psPMRUnrefPMROUT->eError)));
+		UnlockHandle();
+		goto PMRUnrefPMR_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+PMRUnrefPMR_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRUnrefUnlockPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMRIN,
+					  PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psPMRUnrefUnlockPMROUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psPMRUnrefUnlockPMRIN->hPMR,
+					PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	if ((psPMRUnrefUnlockPMROUT->eError != PVRSRV_OK) &&
+	    (psPMRUnrefUnlockPMROUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgePMRUnrefUnlockPMR: %s",
+		        PVRSRVGetErrorStringKM(psPMRUnrefUnlockPMROUT->eError)));
+		UnlockHandle();
+		goto PMRUnrefUnlockPMR_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+PMRUnrefUnlockPMR_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMRIN,
+					  PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_UINT32 *ui32MappingTableInt = NULL;
+	IMG_CHAR *uiAnnotationInt = NULL;
+	PMR * psPMRPtrInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) +
+			(psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPhysmemNewRamBackedPMRIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPhysmemNewRamBackedPMRIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto PhysmemNewRamBackedPMR_exit;
+			}
+		}
+	}
+
+	if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks != 0)
+	{
+		ui32MappingTableInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32MappingTableInt, psPhysmemNewRamBackedPMRIN->pui32MappingTable, psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto PhysmemNewRamBackedPMR_exit;
+				}
+			}
+	if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength != 0)
+	{
+		uiAnnotationInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiAnnotationInt, psPhysmemNewRamBackedPMRIN->puiAnnotation, psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto PhysmemNewRamBackedPMR_exit;
+				}
+			}
+
+
+	psPhysmemNewRamBackedPMROUT->eError =
+		PhysmemNewRamBackedPMR(psConnection, OSGetDevData(psConnection),
+					psPhysmemNewRamBackedPMRIN->uiSize,
+					psPhysmemNewRamBackedPMRIN->uiChunkSize,
+					psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks,
+					psPhysmemNewRamBackedPMRIN->ui32NumVirtChunks,
+					ui32MappingTableInt,
+					psPhysmemNewRamBackedPMRIN->ui32Log2PageSize,
+					psPhysmemNewRamBackedPMRIN->uiFlags,
+					psPhysmemNewRamBackedPMRIN->ui32AnnotationLength,
+					uiAnnotationInt,
+					&psPMRPtrInt);
+	/* Exit early if bridged call fails */
+	if(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)
+	{
+		goto PhysmemNewRamBackedPMR_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psPhysmemNewRamBackedPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psPhysmemNewRamBackedPMROUT->hPMRPtr,
+							(void *) psPMRPtrInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+	if (psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto PhysmemNewRamBackedPMR_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+PhysmemNewRamBackedPMR_exit:
+
+
+
+	if (psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)
+	{
+		if (psPMRPtrInt)
+		{
+			PMRUnrefPMR(psPMRPtrInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePhysmemNewRamBackedLockedPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR *psPhysmemNewRamBackedLockedPMRIN,
+					  PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR *psPhysmemNewRamBackedLockedPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_UINT32 *ui32MappingTableInt = NULL;
+	IMG_CHAR *uiAnnotationInt = NULL;
+	PMR * psPMRPtrInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32)) +
+			(psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPhysmemNewRamBackedLockedPMRIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPhysmemNewRamBackedLockedPMRIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto PhysmemNewRamBackedLockedPMR_exit;
+			}
+		}
+	}
+
+	if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks != 0)
+	{
+		ui32MappingTableInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32MappingTableInt, psPhysmemNewRamBackedLockedPMRIN->pui32MappingTable, psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto PhysmemNewRamBackedLockedPMR_exit;
+				}
+			}
+	if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength != 0)
+	{
+		uiAnnotationInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiAnnotationInt, psPhysmemNewRamBackedLockedPMRIN->puiAnnotation, psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto PhysmemNewRamBackedLockedPMR_exit;
+				}
+			}
+
+
+	psPhysmemNewRamBackedLockedPMROUT->eError =
+		PhysmemNewRamBackedLockedPMR(psConnection, OSGetDevData(psConnection),
+					psPhysmemNewRamBackedLockedPMRIN->uiSize,
+					psPhysmemNewRamBackedLockedPMRIN->uiChunkSize,
+					psPhysmemNewRamBackedLockedPMRIN->ui32NumPhysChunks,
+					psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks,
+					ui32MappingTableInt,
+					psPhysmemNewRamBackedLockedPMRIN->ui32Log2PageSize,
+					psPhysmemNewRamBackedLockedPMRIN->uiFlags,
+					psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength,
+					uiAnnotationInt,
+					&psPMRPtrInt);
+	/* Exit early if bridged call fails */
+	if(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)
+	{
+		goto PhysmemNewRamBackedLockedPMR_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psPhysmemNewRamBackedLockedPMROUT->hPMRPtr,
+							(void *) psPMRPtrInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PMRUnrefUnlockPMR);
+	if (psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto PhysmemNewRamBackedLockedPMR_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+PhysmemNewRamBackedLockedPMR_exit:
+
+
+
+	if (psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)
+	{
+		if (psPMRPtrInt)
+		{
+			PMRUnrefUnlockPMR(psPMRPtrInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntPin(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTPIN *psDevmemIntPinIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTPIN *psDevmemIntPinOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMR = psDevmemIntPinIN->hPMR;
+	PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntPinOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psDevmemIntPinOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIntPin_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psDevmemIntPinOUT->eError =
+		DevmemIntPin(
+					psPMRInt);
+
+
+
+
+DevmemIntPin_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnpin(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN *psDevmemIntUnpinIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN *psDevmemIntUnpinOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMR = psDevmemIntUnpinIN->hPMR;
+	PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntUnpinOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psDevmemIntUnpinOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIntUnpin_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psDevmemIntUnpinOUT->eError =
+		DevmemIntUnpin(
+					psPMRInt);
+
+
+
+
+DevmemIntUnpin_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntPinValidate(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE *psDevmemIntPinValidateIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE *psDevmemIntPinValidateOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hMapping = psDevmemIntPinValidateIN->hMapping;
+	DEVMEMINT_MAPPING * psMappingInt = NULL;
+	IMG_HANDLE hPMR = psDevmemIntPinValidateIN->hPMR;
+	PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntPinValidateOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psMappingInt,
+											hMapping,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+											IMG_TRUE);
+					if(psDevmemIntPinValidateOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIntPinValidate_exit;
+					}
+				}
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntPinValidateOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psDevmemIntPinValidateOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIntPinValidate_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psDevmemIntPinValidateOUT->eError =
+		DevmemIntPinValidate(
+					psMappingInt,
+					psPMRInt);
+
+
+
+
+DevmemIntPinValidate_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psMappingInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hMapping,
+										PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING);
+					}
+				}
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnpinInvalidate(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE *psDevmemIntUnpinInvalidateIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE *psDevmemIntUnpinInvalidateOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hMapping = psDevmemIntUnpinInvalidateIN->hMapping;
+	DEVMEMINT_MAPPING * psMappingInt = NULL;
+	IMG_HANDLE hPMR = psDevmemIntUnpinInvalidateIN->hPMR;
+	PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntUnpinInvalidateOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psMappingInt,
+											hMapping,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+											IMG_TRUE);
+					if(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIntUnpinInvalidate_exit;
+					}
+				}
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntUnpinInvalidateOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIntUnpinInvalidate_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psDevmemIntUnpinInvalidateOUT->eError =
+		DevmemIntUnpinInvalidate(
+					psMappingInt,
+					psPMRInt);
+
+
+
+
+DevmemIntUnpinInvalidate_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psMappingInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hMapping,
+										PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING);
+					}
+				}
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntCtxCreate(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DEVMEMINT_CTX * psDevMemServerContextInt = NULL;
+	IMG_HANDLE hPrivDataInt = NULL;
+
+
+
+
+
+	psDevmemIntCtxCreateOUT->hDevMemServerContext = NULL;
+
+
+
+	psDevmemIntCtxCreateOUT->eError =
+		DevmemIntCtxCreate(psConnection, OSGetDevData(psConnection),
+					psDevmemIntCtxCreateIN->bbKernelMemoryCtx,
+					&psDevMemServerContextInt,
+					&hPrivDataInt,
+					&psDevmemIntCtxCreateOUT->ui32CPUCacheLineSize);
+	/* Exit early if bridged call fails */
+	if(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntCtxCreate_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psDevmemIntCtxCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psDevmemIntCtxCreateOUT->hDevMemServerContext,
+							(void *) psDevMemServerContextInt,
+							PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&DevmemIntCtxDestroy);
+	if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto DevmemIntCtxCreate_exit;
+	}
+
+
+
+
+
+
+	psDevmemIntCtxCreateOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+							&psDevmemIntCtxCreateOUT->hPrivData,
+							(void *) hPrivDataInt,
+							PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,psDevmemIntCtxCreateOUT->hDevMemServerContext);
+	if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto DevmemIntCtxCreate_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+DevmemIntCtxCreate_exit:
+
+
+
+	if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+	{
+		/* Lock over handle creation cleanup. */
+		LockHandle();
+		if (psDevmemIntCtxCreateOUT->hDevMemServerContext)
+		{
+
+
+			PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+						(IMG_HANDLE) psDevmemIntCtxCreateOUT->hDevMemServerContext,
+						PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+			if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+				        "PVRSRVBridgeDevmemIntCtxCreate: %s",
+				        PVRSRVGetErrorStringKM(eError)));
+			}
+			/* Releasing the handle should free/destroy/release the resource.
+			 * This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psDevMemServerContextInt = NULL;
+		}
+
+
+		/* Release now we have cleaned up creation handles. */
+		UnlockHandle();
+		if (psDevMemServerContextInt)
+		{
+			DevmemIntCtxDestroy(psDevMemServerContextInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntCtxDestroy(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psDevmemIntCtxDestroyOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psDevmemIntCtxDestroyIN->hDevmemServerContext,
+					PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+	if ((psDevmemIntCtxDestroyOUT->eError != PVRSRV_OK) &&
+	    (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeDevmemIntCtxDestroy: %s",
+		        PVRSRVGetErrorStringKM(psDevmemIntCtxDestroyOUT->eError)));
+		UnlockHandle();
+		goto DevmemIntCtxDestroy_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+DevmemIntCtxDestroy_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntHeapCreate(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevmemCtx = psDevmemIntHeapCreateIN->hDevmemCtx;
+	DEVMEMINT_CTX * psDevmemCtxInt = NULL;
+	DEVMEMINT_HEAP * psDevmemHeapPtrInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntHeapCreateOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psDevmemCtxInt,
+											hDevmemCtx,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+											IMG_TRUE);
+					if(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIntHeapCreate_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psDevmemIntHeapCreateOUT->eError =
+		DevmemIntHeapCreate(
+					psDevmemCtxInt,
+					psDevmemIntHeapCreateIN->sHeapBaseAddr,
+					psDevmemIntHeapCreateIN->uiHeapLength,
+					psDevmemIntHeapCreateIN->ui32Log2DataPageSize,
+					&psDevmemHeapPtrInt);
+	/* Exit early if bridged call fails */
+	if(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntHeapCreate_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psDevmemIntHeapCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psDevmemIntHeapCreateOUT->hDevmemHeapPtr,
+							(void *) psDevmemHeapPtrInt,
+							PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&DevmemIntHeapDestroy);
+	if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto DevmemIntHeapCreate_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+DevmemIntHeapCreate_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psDevmemCtxInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hDevmemCtx,
+										PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)
+	{
+		if (psDevmemHeapPtrInt)
+		{
+			DevmemIntHeapDestroy(psDevmemHeapPtrInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntHeapDestroy(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psDevmemIntHeapDestroyOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psDevmemIntHeapDestroyIN->hDevmemHeap,
+					PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+	if ((psDevmemIntHeapDestroyOUT->eError != PVRSRV_OK) &&
+	    (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeDevmemIntHeapDestroy: %s",
+		        PVRSRVGetErrorStringKM(psDevmemIntHeapDestroyOUT->eError)));
+		UnlockHandle();
+		goto DevmemIntHeapDestroy_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+DevmemIntHeapDestroy_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntMapPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *psDevmemIntMapPMRIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *psDevmemIntMapPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevmemServerHeap = psDevmemIntMapPMRIN->hDevmemServerHeap;
+	DEVMEMINT_HEAP * psDevmemServerHeapInt = NULL;
+	IMG_HANDLE hReservation = psDevmemIntMapPMRIN->hReservation;
+	DEVMEMINT_RESERVATION * psReservationInt = NULL;
+	IMG_HANDLE hPMR = psDevmemIntMapPMRIN->hPMR;
+	PMR * psPMRInt = NULL;
+	DEVMEMINT_MAPPING * psMappingInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntMapPMROUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psDevmemServerHeapInt,
+											hDevmemServerHeap,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+											IMG_TRUE);
+					if(psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIntMapPMR_exit;
+					}
+				}
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntMapPMROUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psReservationInt,
+											hReservation,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+											IMG_TRUE);
+					if(psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIntMapPMR_exit;
+					}
+				}
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntMapPMROUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIntMapPMR_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psDevmemIntMapPMROUT->eError =
+		DevmemIntMapPMR(
+					psDevmemServerHeapInt,
+					psReservationInt,
+					psPMRInt,
+					psDevmemIntMapPMRIN->uiMapFlags,
+					&psMappingInt);
+	/* Exit early if bridged call fails */
+	if(psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntMapPMR_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psDevmemIntMapPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psDevmemIntMapPMROUT->hMapping,
+							(void *) psMappingInt,
+							PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&DevmemIntUnmapPMR);
+	if (psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto DevmemIntMapPMR_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+DevmemIntMapPMR_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psDevmemServerHeapInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hDevmemServerHeap,
+										PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+					}
+				}
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psReservationInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hReservation,
+										PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+					}
+				}
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+	{
+		if (psMappingInt)
+		{
+			DevmemIntUnmapPMR(psMappingInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnmapPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMRIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psDevmemIntUnmapPMROUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psDevmemIntUnmapPMRIN->hMapping,
+					PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING);
+	if ((psDevmemIntUnmapPMROUT->eError != PVRSRV_OK) &&
+	    (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeDevmemIntUnmapPMR: %s",
+		        PVRSRVGetErrorStringKM(psDevmemIntUnmapPMROUT->eError)));
+		UnlockHandle();
+		goto DevmemIntUnmapPMR_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+DevmemIntUnmapPMR_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntReserveRange(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevmemServerHeap = psDevmemIntReserveRangeIN->hDevmemServerHeap;
+	DEVMEMINT_HEAP * psDevmemServerHeapInt = NULL;
+	DEVMEMINT_RESERVATION * psReservationInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntReserveRangeOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psDevmemServerHeapInt,
+											hDevmemServerHeap,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+											IMG_TRUE);
+					if(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIntReserveRange_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psDevmemIntReserveRangeOUT->eError =
+		DevmemIntReserveRange(
+					psDevmemServerHeapInt,
+					psDevmemIntReserveRangeIN->sAddress,
+					psDevmemIntReserveRangeIN->uiLength,
+					&psReservationInt);
+	/* Exit early if bridged call fails */
+	if(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntReserveRange_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psDevmemIntReserveRangeOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psDevmemIntReserveRangeOUT->hReservation,
+							(void *) psReservationInt,
+							PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&DevmemIntUnreserveRange);
+	if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto DevmemIntReserveRange_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+DevmemIntReserveRange_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psDevmemServerHeapInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hDevmemServerHeap,
+										PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)
+	{
+		if (psReservationInt)
+		{
+			DevmemIntUnreserveRange(psReservationInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psDevmemIntUnreserveRangeOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psDevmemIntUnreserveRangeIN->hReservation,
+					PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+	if ((psDevmemIntUnreserveRangeOUT->eError != PVRSRV_OK) &&
+	    (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeDevmemIntUnreserveRange: %s",
+		        PVRSRVGetErrorStringKM(psDevmemIntUnreserveRangeOUT->eError)));
+		UnlockHandle();
+		goto DevmemIntUnreserveRange_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+DevmemIntUnreserveRange_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *psChangeSparseMemIN,
+					  PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *psChangeSparseMemOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hSrvDevMemHeap = psChangeSparseMemIN->hSrvDevMemHeap;
+	DEVMEMINT_HEAP * psSrvDevMemHeapInt = NULL;
+	IMG_HANDLE hPMR = psChangeSparseMemIN->hPMR;
+	PMR * psPMRInt = NULL;
+	IMG_UINT32 *ui32AllocPageIndicesInt = NULL;
+	IMG_UINT32 *ui32FreePageIndicesInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) +
+			(psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psChangeSparseMemIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psChangeSparseMemIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psChangeSparseMemOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto ChangeSparseMem_exit;
+			}
+		}
+	}
+
+	if (psChangeSparseMemIN->ui32AllocPageCount != 0)
+	{
+		ui32AllocPageIndicesInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32AllocPageIndicesInt, psChangeSparseMemIN->pui32AllocPageIndices, psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psChangeSparseMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto ChangeSparseMem_exit;
+				}
+			}
+	if (psChangeSparseMemIN->ui32FreePageCount != 0)
+	{
+		ui32FreePageIndicesInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32FreePageIndicesInt, psChangeSparseMemIN->pui32FreePageIndices, psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psChangeSparseMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto ChangeSparseMem_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psChangeSparseMemOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psSrvDevMemHeapInt,
+											hSrvDevMemHeap,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+											IMG_TRUE);
+					if(psChangeSparseMemOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto ChangeSparseMem_exit;
+					}
+				}
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psChangeSparseMemOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psChangeSparseMemOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto ChangeSparseMem_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psChangeSparseMemOUT->eError =
+		DevmemIntChangeSparse(
+					psSrvDevMemHeapInt,
+					psPMRInt,
+					psChangeSparseMemIN->ui32AllocPageCount,
+					ui32AllocPageIndicesInt,
+					psChangeSparseMemIN->ui32FreePageCount,
+					ui32FreePageIndicesInt,
+					psChangeSparseMemIN->ui32SparseFlags,
+					psChangeSparseMemIN->uiFlags,
+					psChangeSparseMemIN->sDevVAddr,
+					psChangeSparseMemIN->ui64CPUVAddr);
+
+
+
+
+ChangeSparseMem_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psSrvDevMemHeapInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hSrvDevMemHeap,
+										PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+					}
+				}
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntMapPages(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *psDevmemIntMapPagesIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *psDevmemIntMapPagesOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hReservation = psDevmemIntMapPagesIN->hReservation;
+	DEVMEMINT_RESERVATION * psReservationInt = NULL;
+	IMG_HANDLE hPMR = psDevmemIntMapPagesIN->hPMR;
+	PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntMapPagesOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psReservationInt,
+											hReservation,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+											IMG_TRUE);
+					if(psDevmemIntMapPagesOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIntMapPages_exit;
+					}
+				}
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntMapPagesOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psDevmemIntMapPagesOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIntMapPages_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psDevmemIntMapPagesOUT->eError =
+		DevmemIntMapPages(
+					psReservationInt,
+					psPMRInt,
+					psDevmemIntMapPagesIN->ui32PageCount,
+					psDevmemIntMapPagesIN->ui32PhysicalPgOffset,
+					psDevmemIntMapPagesIN->uiFlags,
+					psDevmemIntMapPagesIN->sDevVAddr);
+
+
+
+
+DevmemIntMapPages_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psReservationInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hReservation,
+										PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+					}
+				}
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnmapPages(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hReservation = psDevmemIntUnmapPagesIN->hReservation;
+	DEVMEMINT_RESERVATION * psReservationInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntUnmapPagesOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psReservationInt,
+											hReservation,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+											IMG_TRUE);
+					if(psDevmemIntUnmapPagesOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIntUnmapPages_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psDevmemIntUnmapPagesOUT->eError =
+		DevmemIntUnmapPages(
+					psReservationInt,
+					psDevmemIntUnmapPagesIN->sDevVAddr,
+					psDevmemIntUnmapPagesIN->ui32PageCount);
+
+
+
+
+DevmemIntUnmapPages_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psReservationInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hReservation,
+										PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIsVDevAddrValid(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevmemCtx = psDevmemIsVDevAddrValidIN->hDevmemCtx;
+	DEVMEMINT_CTX * psDevmemCtxInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIsVDevAddrValidOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psDevmemCtxInt,
+											hDevmemCtx,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+											IMG_TRUE);
+					if(psDevmemIsVDevAddrValidOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIsVDevAddrValid_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psDevmemIsVDevAddrValidOUT->eError =
+		DevmemIntIsVDevAddrValid(psConnection, OSGetDevData(psConnection),
+					psDevmemCtxInt,
+					psDevmemIsVDevAddrValidIN->sAddress);
+
+
+
+
+DevmemIsVDevAddrValid_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psDevmemCtxInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hDevmemCtx,
+										PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapConfigCount(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountIN,
+					  PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psHeapCfgHeapConfigCountIN);
+
+
+
+
+
+	psHeapCfgHeapConfigCountOUT->eError =
+		HeapCfgHeapConfigCount(psConnection, OSGetDevData(psConnection),
+					&psHeapCfgHeapConfigCountOUT->ui32NumHeapConfigs);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapCount(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountIN,
+					  PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+	psHeapCfgHeapCountOUT->eError =
+		HeapCfgHeapCount(psConnection, OSGetDevData(psConnection),
+					psHeapCfgHeapCountIN->ui32HeapConfigIndex,
+					&psHeapCfgHeapCountOUT->ui32NumHeaps);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameIN,
+					  PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_CHAR *puiHeapConfigNameInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR)) +
+			0;
+
+
+
+	psHeapCfgHeapConfigNameOUT->puiHeapConfigName = psHeapCfgHeapConfigNameIN->puiHeapConfigName;
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHeapCfgHeapConfigNameIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psHeapCfgHeapConfigNameIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto HeapCfgHeapConfigName_exit;
+			}
+		}
+	}
+
+	if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz != 0)
+	{
+		puiHeapConfigNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR);
+	}
+
+
+
+	psHeapCfgHeapConfigNameOUT->eError =
+		HeapCfgHeapConfigName(psConnection, OSGetDevData(psConnection),
+					psHeapCfgHeapConfigNameIN->ui32HeapConfigIndex,
+					psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz,
+					puiHeapConfigNameInt);
+
+
+
+	if ((psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR)) > 0)
+	{
+		if ( OSCopyToUser(NULL, psHeapCfgHeapConfigNameOUT->puiHeapConfigName, puiHeapConfigNameInt,
+			(psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR))) != PVRSRV_OK )
+		{
+			psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto HeapCfgHeapConfigName_exit;
+		}
+	}
+
+
+HeapCfgHeapConfigName_exit:
+
+
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsIN,
+					  PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_CHAR *puiHeapNameOutInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) +
+			0;
+
+
+
+	psHeapCfgHeapDetailsOUT->puiHeapNameOut = psHeapCfgHeapDetailsIN->puiHeapNameOut;
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHeapCfgHeapDetailsIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psHeapCfgHeapDetailsIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto HeapCfgHeapDetails_exit;
+			}
+		}
+	}
+
+	if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz != 0)
+	{
+		puiHeapNameOutInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR);
+	}
+
+
+
+	psHeapCfgHeapDetailsOUT->eError =
+		HeapCfgHeapDetails(psConnection, OSGetDevData(psConnection),
+					psHeapCfgHeapDetailsIN->ui32HeapConfigIndex,
+					psHeapCfgHeapDetailsIN->ui32HeapIndex,
+					psHeapCfgHeapDetailsIN->ui32HeapNameBufSz,
+					puiHeapNameOutInt,
+					&psHeapCfgHeapDetailsOUT->sDevVAddrBase,
+					&psHeapCfgHeapDetailsOUT->uiHeapLength,
+					&psHeapCfgHeapDetailsOUT->ui32Log2DataPageSizeOut,
+					&psHeapCfgHeapDetailsOUT->ui32Log2ImportAlignmentOut,
+					&psHeapCfgHeapDetailsOUT->ui32Log2TilingStrideFactorOut);
+
+
+
+	if ((psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) > 0)
+	{
+		if ( OSCopyToUser(NULL, psHeapCfgHeapDetailsOUT->puiHeapNameOut, puiHeapNameOutInt,
+			(psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR))) != PVRSRV_OK )
+		{
+			psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto HeapCfgHeapDetails_exit;
+		}
+	}
+
+
+HeapCfgHeapDetails_exit:
+
+
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntRegisterPFNotifyKM(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevmemCtx = psDevmemIntRegisterPFNotifyKMIN->hDevmemCtx;
+	DEVMEMINT_CTX * psDevmemCtxInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntRegisterPFNotifyKMOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psDevmemCtxInt,
+											hDevmemCtx,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+											IMG_TRUE);
+					if(psDevmemIntRegisterPFNotifyKMOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIntRegisterPFNotifyKM_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psDevmemIntRegisterPFNotifyKMOUT->eError =
+		DevmemIntRegisterPFNotifyKM(
+					psDevmemCtxInt,
+					psDevmemIntRegisterPFNotifyKMIN->ui32PID,
+					psDevmemIntRegisterPFNotifyKMIN->bRegister);
+
+
+
+
+DevmemIntRegisterPFNotifyKM_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psDevmemCtxInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hDevmemCtx,
+										PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeGetMaxDevMemSize(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE *psGetMaxDevMemSizeIN,
+					  PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE *psGetMaxDevMemSizeOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psGetMaxDevMemSizeIN);
+
+
+
+
+
+	psGetMaxDevMemSizeOUT->eError =
+		PVRSRVGetMaxDevMemSizeKM(psConnection, OSGetDevData(psConnection),
+					&psGetMaxDevMemSizeOUT->uiLMASize,
+					&psGetMaxDevMemSizeOUT->uiUMASize);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitMMBridge(void);
+PVRSRV_ERROR DeinitMMBridge(void);
+
+/*
+ * Register all MM functions with services
+ */
+PVRSRV_ERROR InitMMBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR, PVRSRVBridgePMRExportPMR,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR, PVRSRVBridgePMRUnexportPMR,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID, PVRSRVBridgePMRGetUID,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE, PVRSRVBridgePMRMakeLocalImportHandle,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE, PVRSRVBridgePMRUnmakeLocalImportHandle,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR, PVRSRVBridgePMRImportPMR,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR, PVRSRVBridgePMRLocalImportPMR,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR, PVRSRVBridgePMRUnrefPMR,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR, PVRSRVBridgePMRUnrefUnlockPMR,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR, PVRSRVBridgePhysmemNewRamBackedPMR,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR, PVRSRVBridgePhysmemNewRamBackedLockedPMR,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPIN, PVRSRVBridgeDevmemIntPin,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN, PVRSRVBridgeDevmemIntUnpin,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE, PVRSRVBridgeDevmemIntPinValidate,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE, PVRSRVBridgeDevmemIntUnpinInvalidate,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE, PVRSRVBridgeDevmemIntCtxCreate,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY, PVRSRVBridgeDevmemIntCtxDestroy,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE, PVRSRVBridgeDevmemIntHeapCreate,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY, PVRSRVBridgeDevmemIntHeapDestroy,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR, PVRSRVBridgeDevmemIntMapPMR,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR, PVRSRVBridgeDevmemIntUnmapPMR,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE, PVRSRVBridgeDevmemIntReserveRange,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE, PVRSRVBridgeDevmemIntUnreserveRange,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_CHANGESPARSEMEM, PVRSRVBridgeChangeSparseMem,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES, PVRSRVBridgeDevmemIntMapPages,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES, PVRSRVBridgeDevmemIntUnmapPages,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID, PVRSRVBridgeDevmemIsVDevAddrValid,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT, PVRSRVBridgeHeapCfgHeapConfigCount,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT, PVRSRVBridgeHeapCfgHeapCount,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME, PVRSRVBridgeHeapCfgHeapConfigName,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS, PVRSRVBridgeHeapCfgHeapDetails,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM, PVRSRVBridgeDevmemIntRegisterPFNotifyKM,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE, PVRSRVBridgeGetMaxDevMemSize,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all mm functions with services
+ */
+PVRSRV_ERROR DeinitMMBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdump_bridge/client_pdump_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdump_bridge/client_pdump_bridge.h
new file mode 100644
index 0000000..b628b08
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdump_bridge/client_pdump_bridge.h
@@ -0,0 +1,78 @@
+/*************************************************************************/ /*!
+@File
+@Title          Client bridge header for pdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for pdump
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_PDUMP_BRIDGE_H
+#define CLIENT_PDUMP_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pdump_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemPDumpBitmap(IMG_HANDLE hBridge,
+							       IMG_CHAR *puiFileName,
+							       IMG_UINT32 ui32FileOffset,
+							       IMG_UINT32 ui32Width,
+							       IMG_UINT32 ui32Height,
+							       IMG_UINT32 ui32StrideInBytes,
+							       IMG_DEV_VIRTADDR sDevBaseAddr,
+							       IMG_HANDLE hDevmemCtx,
+							       IMG_UINT32 ui32Size,
+							       PDUMP_PIXEL_FORMAT ePixelFormat,
+							       IMG_UINT32 ui32AddrMode,
+							       IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpComment(IMG_HANDLE hBridge,
+								IMG_CHAR *puiComment,
+								IMG_UINT32 ui32Flags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpSetFrame(IMG_HANDLE hBridge,
+								 IMG_UINT32 ui32Frame);
+
+
+#endif /* CLIENT_PDUMP_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdump_bridge/client_pdump_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdump_bridge/client_pdump_direct_bridge.c
new file mode 100644
index 0000000..b4977ef
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdump_bridge/client_pdump_direct_bridge.c
@@ -0,0 +1,119 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge for pdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_pdump_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+
+#include "devicemem_server.h"
+#include "pdump_km.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemPDumpBitmap(IMG_HANDLE hBridge,
+							       IMG_CHAR *puiFileName,
+							       IMG_UINT32 ui32FileOffset,
+							       IMG_UINT32 ui32Width,
+							       IMG_UINT32 ui32Height,
+							       IMG_UINT32 ui32StrideInBytes,
+							       IMG_DEV_VIRTADDR sDevBaseAddr,
+							       IMG_HANDLE hDevmemCtx,
+							       IMG_UINT32 ui32Size,
+							       PDUMP_PIXEL_FORMAT ePixelFormat,
+							       IMG_UINT32 ui32AddrMode,
+							       IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_CTX * psDevmemCtxInt;
+
+	psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+	eError =
+		DevmemIntPDumpBitmap(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					puiFileName,
+					ui32FileOffset,
+					ui32Width,
+					ui32Height,
+					ui32StrideInBytes,
+					sDevBaseAddr,
+					psDevmemCtxInt,
+					ui32Size,
+					ePixelFormat,
+					ui32AddrMode,
+					ui32PDumpFlags);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpComment(IMG_HANDLE hBridge,
+								IMG_CHAR *puiComment,
+								IMG_UINT32 ui32Flags)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		PDumpCommentKM(
+					puiComment,
+					ui32Flags);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpSetFrame(IMG_HANDLE hBridge,
+								 IMG_UINT32 ui32Frame)
+{
+	PVRSRV_ERROR eError;
+
+
+	eError =
+		PDumpSetFrameKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					ui32Frame);
+
+	return eError;
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdump_bridge/common_pdump_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdump_bridge/common_pdump_bridge.h
new file mode 100644
index 0000000..0159b82
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdump_bridge/common_pdump_bridge.h
@@ -0,0 +1,126 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for pdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for pdump
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_PDUMP_BRIDGE_H
+#define COMMON_PDUMP_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+
+
+#define PVRSRV_BRIDGE_PDUMP_CMD_FIRST			0
+#define PVRSRV_BRIDGE_PDUMP_DEVMEMPDUMPBITMAP			PVRSRV_BRIDGE_PDUMP_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT			PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME			PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PDUMP_CMD_LAST			(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2)
+
+
+/*******************************************
+            DevmemPDumpBitmap          
+ *******************************************/
+
+/* Bridge in structure for DevmemPDumpBitmap */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP_TAG
+{
+	IMG_CHAR * puiFileName;
+	IMG_UINT32 ui32FileOffset;
+	IMG_UINT32 ui32Width;
+	IMG_UINT32 ui32Height;
+	IMG_UINT32 ui32StrideInBytes;
+	IMG_DEV_VIRTADDR sDevBaseAddr;
+	IMG_HANDLE hDevmemCtx;
+	IMG_UINT32 ui32Size;
+	PDUMP_PIXEL_FORMAT ePixelFormat;
+	IMG_UINT32 ui32AddrMode;
+	IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP;
+
+/* Bridge out structure for DevmemPDumpBitmap */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP;
+
+
+/*******************************************
+            PVRSRVPDumpComment          
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpComment */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT_TAG
+{
+	IMG_CHAR * puiComment;
+	IMG_UINT32 ui32Flags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT;
+
+/* Bridge out structure for PVRSRVPDumpComment */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT;
+
+
+/*******************************************
+            PVRSRVPDumpSetFrame          
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpSetFrame */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME_TAG
+{
+	IMG_UINT32 ui32Frame;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME;
+
+/* Bridge out structure for PVRSRVPDumpSetFrame */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME;
+
+
+#endif /* COMMON_PDUMP_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdump_bridge/server_pdump_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdump_bridge/server_pdump_bridge.c
new file mode 100644
index 0000000..e303ddd
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdump_bridge/server_pdump_bridge.c
@@ -0,0 +1,382 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for pdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for pdump
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "pdump_km.h"
+
+
+#include "common_pdump_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeDevmemPDumpBitmap(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP *psDevmemPDumpBitmapIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP *psDevmemPDumpBitmapOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_CHAR *uiFileNameInt = NULL;
+	IMG_HANDLE hDevmemCtx = psDevmemPDumpBitmapIN->hDevmemCtx;
+	DEVMEMINT_CTX * psDevmemCtxInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevmemPDumpBitmapIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevmemPDumpBitmapIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psDevmemPDumpBitmapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto DevmemPDumpBitmap_exit;
+			}
+		}
+	}
+
+	
+	{
+		uiFileNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiFileNameInt, psDevmemPDumpBitmapIN->puiFileName, PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psDevmemPDumpBitmapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto DevmemPDumpBitmap_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemPDumpBitmapOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psDevmemCtxInt,
+											hDevmemCtx,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+											IMG_TRUE);
+					if(psDevmemPDumpBitmapOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemPDumpBitmap_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psDevmemPDumpBitmapOUT->eError =
+		DevmemIntPDumpBitmap(psConnection, OSGetDevData(psConnection),
+					uiFileNameInt,
+					psDevmemPDumpBitmapIN->ui32FileOffset,
+					psDevmemPDumpBitmapIN->ui32Width,
+					psDevmemPDumpBitmapIN->ui32Height,
+					psDevmemPDumpBitmapIN->ui32StrideInBytes,
+					psDevmemPDumpBitmapIN->sDevBaseAddr,
+					psDevmemCtxInt,
+					psDevmemPDumpBitmapIN->ui32Size,
+					psDevmemPDumpBitmapIN->ePixelFormat,
+					psDevmemPDumpBitmapIN->ui32AddrMode,
+					psDevmemPDumpBitmapIN->ui32PDumpFlags);
+
+
+
+
+DevmemPDumpBitmap_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psDevmemCtxInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hDevmemCtx,
+										PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpComment(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT *psPVRSRVPDumpCommentIN,
+					  PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT *psPVRSRVPDumpCommentOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_CHAR *uiCommentInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR)) +
+			0;
+
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPVRSRVPDumpCommentIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPVRSRVPDumpCommentIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psPVRSRVPDumpCommentOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto PVRSRVPDumpComment_exit;
+			}
+		}
+	}
+
+	
+	{
+		uiCommentInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiCommentInt, psPVRSRVPDumpCommentIN->puiComment, PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psPVRSRVPDumpCommentOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto PVRSRVPDumpComment_exit;
+				}
+			}
+
+
+	psPVRSRVPDumpCommentOUT->eError =
+		PDumpCommentKM(
+					uiCommentInt,
+					psPVRSRVPDumpCommentIN->ui32Flags);
+
+
+
+
+PVRSRVPDumpComment_exit:
+
+
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpSetFrame(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME *psPVRSRVPDumpSetFrameIN,
+					  PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME *psPVRSRVPDumpSetFrameOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+	psPVRSRVPDumpSetFrameOUT->eError =
+		PDumpSetFrameKM(psConnection, OSGetDevData(psConnection),
+					psPVRSRVPDumpSetFrameIN->ui32Frame);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitPDUMPBridge(void);
+PVRSRV_ERROR DeinitPDUMPBridge(void);
+
+/*
+ * Register all PDUMP functions with services
+ */
+PVRSRV_ERROR InitPDUMPBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_DEVMEMPDUMPBITMAP, PVRSRVBridgeDevmemPDumpBitmap,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT, PVRSRVBridgePVRSRVPDumpComment,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME, PVRSRVBridgePVRSRVPDumpSetFrame,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pdump functions with services
+ */
+PVRSRV_ERROR DeinitPDUMPBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpctrl_bridge/client_pdumpctrl_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpctrl_bridge/client_pdumpctrl_bridge.h
new file mode 100644
index 0000000..65ca48d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpctrl_bridge/client_pdumpctrl_bridge.h
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@File
+@Title          Client bridge header for pdumpctrl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for pdumpctrl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_PDUMPCTRL_BRIDGE_H
+#define CLIENT_PDUMPCTRL_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pdumpctrl_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpIsCapturing(IMG_HANDLE hBridge,
+								    IMG_BOOL *pbIsCapturing);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpGetFrame(IMG_HANDLE hBridge,
+								 IMG_UINT32 *pui32Frame);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpSetDefaultCaptureParams(IMG_HANDLE hBridge,
+										IMG_UINT32 ui32Mode,
+										IMG_UINT32 ui32Start,
+										IMG_UINT32 ui32End,
+										IMG_UINT32 ui32Interval,
+										IMG_UINT32 ui32MaxParamFileSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpIsLastCaptureFrame(IMG_HANDLE hBridge,
+									   IMG_BOOL *pbpbIsLastCaptureFrame);
+
+
+#endif /* CLIENT_PDUMPCTRL_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c
new file mode 100644
index 0000000..c8132fc
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c
@@ -0,0 +1,114 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge for pdumpctrl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_pdumpctrl_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+
+#include "pdump_km.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpIsCapturing(IMG_HANDLE hBridge,
+								    IMG_BOOL *pbIsCapturing)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		PDumpIsCaptureFrameKM(
+					pbIsCapturing);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpGetFrame(IMG_HANDLE hBridge,
+								 IMG_UINT32 *pui32Frame)
+{
+	PVRSRV_ERROR eError;
+
+
+	eError =
+		PDumpGetFrameKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					pui32Frame);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpSetDefaultCaptureParams(IMG_HANDLE hBridge,
+										IMG_UINT32 ui32Mode,
+										IMG_UINT32 ui32Start,
+										IMG_UINT32 ui32End,
+										IMG_UINT32 ui32Interval,
+										IMG_UINT32 ui32MaxParamFileSize)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		PDumpSetDefaultCaptureParamsKM(
+					ui32Mode,
+					ui32Start,
+					ui32End,
+					ui32Interval,
+					ui32MaxParamFileSize);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpIsLastCaptureFrame(IMG_HANDLE hBridge,
+									   IMG_BOOL *pbpbIsLastCaptureFrame)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		PDumpIsLastCaptureFrameKM(
+					pbpbIsLastCaptureFrame);
+
+	return eError;
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpctrl_bridge/common_pdumpctrl_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpctrl_bridge/common_pdumpctrl_bridge.h
new file mode 100644
index 0000000..a165141
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpctrl_bridge/common_pdumpctrl_bridge.h
@@ -0,0 +1,138 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for pdumpctrl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for pdumpctrl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_PDUMPCTRL_BRIDGE_H
+#define COMMON_PDUMPCTRL_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+
+#define PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST			0
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISCAPTURING			PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME			PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS			PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME			PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PDUMPCTRL_CMD_LAST			(PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+3)
+
+
+/*******************************************
+            PVRSRVPDumpIsCapturing          
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpIsCapturing */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPISCAPTURING_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPISCAPTURING;
+
+/* Bridge out structure for PVRSRVPDumpIsCapturing */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISCAPTURING_TAG
+{
+	IMG_BOOL bIsCapturing;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISCAPTURING;
+
+
+/*******************************************
+            PVRSRVPDumpGetFrame          
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpGetFrame */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME;
+
+/* Bridge out structure for PVRSRVPDumpGetFrame */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME_TAG
+{
+	IMG_UINT32 ui32Frame;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME;
+
+
+/*******************************************
+            PVRSRVPDumpSetDefaultCaptureParams          
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpSetDefaultCaptureParams */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS_TAG
+{
+	IMG_UINT32 ui32Mode;
+	IMG_UINT32 ui32Start;
+	IMG_UINT32 ui32End;
+	IMG_UINT32 ui32Interval;
+	IMG_UINT32 ui32MaxParamFileSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS;
+
+/* Bridge out structure for PVRSRVPDumpSetDefaultCaptureParams */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS;
+
+
+/*******************************************
+            PVRSRVPDumpIsLastCaptureFrame          
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpIsLastCaptureFrame */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME;
+
+/* Bridge out structure for PVRSRVPDumpIsLastCaptureFrame */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME_TAG
+{
+	IMG_BOOL bpbIsLastCaptureFrame;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME;
+
+
+#endif /* COMMON_PDUMPCTRL_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpctrl_bridge/server_pdumpctrl_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpctrl_bridge/server_pdumpctrl_bridge.c
new file mode 100644
index 0000000..148c726
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpctrl_bridge/server_pdumpctrl_bridge.c
@@ -0,0 +1,242 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for pdumpctrl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for pdumpctrl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "pdump_km.h"
+
+
+#include "common_pdumpctrl_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "lock.h"
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpIsCapturing(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PVRSRVPDUMPISCAPTURING *psPVRSRVPDumpIsCapturingIN,
+					  PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISCAPTURING *psPVRSRVPDumpIsCapturingOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpIsCapturingIN);
+
+
+
+
+
+	psPVRSRVPDumpIsCapturingOUT->eError =
+		PDumpIsCaptureFrameKM(
+					&psPVRSRVPDumpIsCapturingOUT->bIsCapturing);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpGetFrame(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME *psPVRSRVPDumpGetFrameIN,
+					  PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME *psPVRSRVPDumpGetFrameOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpGetFrameIN);
+
+
+
+
+
+	psPVRSRVPDumpGetFrameOUT->eError =
+		PDumpGetFrameKM(psConnection, OSGetDevData(psConnection),
+					&psPVRSRVPDumpGetFrameOUT->ui32Frame);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS *psPVRSRVPDumpSetDefaultCaptureParamsIN,
+					  PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS *psPVRSRVPDumpSetDefaultCaptureParamsOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+
+
+	psPVRSRVPDumpSetDefaultCaptureParamsOUT->eError =
+		PDumpSetDefaultCaptureParamsKM(
+					psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Mode,
+					psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Start,
+					psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32End,
+					psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Interval,
+					psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32MaxParamFileSize);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME *psPVRSRVPDumpIsLastCaptureFrameIN,
+					  PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME *psPVRSRVPDumpIsLastCaptureFrameOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpIsLastCaptureFrameIN);
+
+
+
+
+
+	psPVRSRVPDumpIsLastCaptureFrameOUT->eError =
+		PDumpIsLastCaptureFrameKM(
+					&psPVRSRVPDumpIsLastCaptureFrameOUT->bpbIsLastCaptureFrame);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static POS_LOCK pPDUMPCTRLBridgeLock;
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitPDUMPCTRLBridge(void);
+PVRSRV_ERROR DeinitPDUMPCTRLBridge(void);
+
+/*
+ * Register all PDUMPCTRL functions with services
+ */
+PVRSRV_ERROR InitPDUMPCTRLBridge(void)
+{
+	PVR_LOGR_IF_ERROR(OSLockCreate(&pPDUMPCTRLBridgeLock, LOCK_TYPE_PASSIVE), "OSLockCreate");
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISCAPTURING, PVRSRVBridgePVRSRVPDumpIsCapturing,
+					pPDUMPCTRLBridgeLock, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME, PVRSRVBridgePVRSRVPDumpGetFrame,
+					pPDUMPCTRLBridgeLock, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS, PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams,
+					pPDUMPCTRLBridgeLock, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME, PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame,
+					pPDUMPCTRLBridgeLock, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pdumpctrl functions with services
+ */
+PVRSRV_ERROR DeinitPDUMPCTRLBridge(void)
+{
+	PVR_LOGR_IF_ERROR(OSLockDestroy(pPDUMPCTRLBridgeLock), "OSLockDestroy");
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpmm_bridge/client_pdumpmm_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpmm_bridge/client_pdumpmm_bridge.h
new file mode 100644
index 0000000..6d06374
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpmm_bridge/client_pdumpmm_bridge.h
@@ -0,0 +1,119 @@
+/*************************************************************************/ /*!
+@File
+@Title          Client bridge header for pdumpmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for pdumpmm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_PDUMPMM_BRIDGE_H
+#define CLIENT_PDUMPMM_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pdumpmm_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMem(IMG_HANDLE hBridge,
+							     IMG_HANDLE hPMR,
+							     IMG_DEVMEM_OFFSET_T uiOffset,
+							     IMG_DEVMEM_SIZE_T uiSize,
+							     IMG_UINT32 ui32PDumpFlags,
+							     IMG_BOOL bbZero);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue32(IMG_HANDLE hBridge,
+								    IMG_HANDLE hPMR,
+								    IMG_DEVMEM_OFFSET_T uiOffset,
+								    IMG_UINT32 ui32Value,
+								    IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue64(IMG_HANDLE hBridge,
+								    IMG_HANDLE hPMR,
+								    IMG_DEVMEM_OFFSET_T uiOffset,
+								    IMG_UINT64 ui64Value,
+								    IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSaveToFile(IMG_HANDLE hBridge,
+								IMG_HANDLE hPMR,
+								IMG_DEVMEM_OFFSET_T uiOffset,
+								IMG_DEVMEM_SIZE_T uiSize,
+								IMG_UINT32 ui32ArraySize,
+								const IMG_CHAR *puiFileName,
+								IMG_UINT32 ui32uiFileOffset);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSymbolicAddr(IMG_HANDLE hBridge,
+								  IMG_HANDLE hPMR,
+								  IMG_DEVMEM_OFFSET_T uiOffset,
+								  IMG_UINT32 ui32MemspaceNameLen,
+								  IMG_CHAR *puiMemspaceName,
+								  IMG_UINT32 ui32SymbolicAddrLen,
+								  IMG_CHAR *puiSymbolicAddr,
+								  IMG_DEVMEM_OFFSET_T *puiNewOffset,
+								  IMG_DEVMEM_OFFSET_T *puiNextSymName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpPol32(IMG_HANDLE hBridge,
+							   IMG_HANDLE hPMR,
+							   IMG_DEVMEM_OFFSET_T uiOffset,
+							   IMG_UINT32 ui32Value,
+							   IMG_UINT32 ui32Mask,
+							   PDUMP_POLL_OPERATOR eOperator,
+							   IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpCBP(IMG_HANDLE hBridge,
+							 IMG_HANDLE hPMR,
+							 IMG_DEVMEM_OFFSET_T uiReadOffset,
+							 IMG_DEVMEM_OFFSET_T uiWriteOffset,
+							 IMG_DEVMEM_SIZE_T uiPacketSize,
+							 IMG_DEVMEM_SIZE_T uiBufferSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPDumpSaveToFileVirtual(IMG_HANDLE hBridge,
+									     IMG_HANDLE hDevmemServerContext,
+									     IMG_DEV_VIRTADDR sAddress,
+									     IMG_DEVMEM_SIZE_T uiSize,
+									     IMG_UINT32 ui32ArraySize,
+									     const IMG_CHAR *puiFileName,
+									     IMG_UINT32 ui32FileOffset,
+									     IMG_UINT32 ui32PDumpFlags);
+
+
+#endif /* CLIENT_PDUMPMM_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpmm_bridge/client_pdumpmm_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpmm_bridge/client_pdumpmm_direct_bridge.c
new file mode 100644
index 0000000..89999e2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpmm_bridge/client_pdumpmm_direct_bridge.c
@@ -0,0 +1,258 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge for pdumpmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_pdumpmm_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "physmem.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMem(IMG_HANDLE hBridge,
+							     IMG_HANDLE hPMR,
+							     IMG_DEVMEM_OFFSET_T uiOffset,
+							     IMG_DEVMEM_SIZE_T uiSize,
+							     IMG_UINT32 ui32PDumpFlags,
+							     IMG_BOOL bbZero)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMRPDumpLoadMem(
+					psPMRInt,
+					uiOffset,
+					uiSize,
+					ui32PDumpFlags,
+					bbZero);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue32(IMG_HANDLE hBridge,
+								    IMG_HANDLE hPMR,
+								    IMG_DEVMEM_OFFSET_T uiOffset,
+								    IMG_UINT32 ui32Value,
+								    IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMRPDumpLoadMemValue32(
+					psPMRInt,
+					uiOffset,
+					ui32Value,
+					ui32PDumpFlags);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue64(IMG_HANDLE hBridge,
+								    IMG_HANDLE hPMR,
+								    IMG_DEVMEM_OFFSET_T uiOffset,
+								    IMG_UINT64 ui64Value,
+								    IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMRPDumpLoadMemValue64(
+					psPMRInt,
+					uiOffset,
+					ui64Value,
+					ui32PDumpFlags);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSaveToFile(IMG_HANDLE hBridge,
+								IMG_HANDLE hPMR,
+								IMG_DEVMEM_OFFSET_T uiOffset,
+								IMG_DEVMEM_SIZE_T uiSize,
+								IMG_UINT32 ui32ArraySize,
+								const IMG_CHAR *puiFileName,
+								IMG_UINT32 ui32uiFileOffset)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMRPDumpSaveToFile(
+					psPMRInt,
+					uiOffset,
+					uiSize,
+					ui32ArraySize,
+					puiFileName,
+					ui32uiFileOffset);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSymbolicAddr(IMG_HANDLE hBridge,
+								  IMG_HANDLE hPMR,
+								  IMG_DEVMEM_OFFSET_T uiOffset,
+								  IMG_UINT32 ui32MemspaceNameLen,
+								  IMG_CHAR *puiMemspaceName,
+								  IMG_UINT32 ui32SymbolicAddrLen,
+								  IMG_CHAR *puiSymbolicAddr,
+								  IMG_DEVMEM_OFFSET_T *puiNewOffset,
+								  IMG_DEVMEM_OFFSET_T *puiNextSymName)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMR_PDumpSymbolicAddr(
+					psPMRInt,
+					uiOffset,
+					ui32MemspaceNameLen,
+					puiMemspaceName,
+					ui32SymbolicAddrLen,
+					puiSymbolicAddr,
+					puiNewOffset,
+					puiNextSymName);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpPol32(IMG_HANDLE hBridge,
+							   IMG_HANDLE hPMR,
+							   IMG_DEVMEM_OFFSET_T uiOffset,
+							   IMG_UINT32 ui32Value,
+							   IMG_UINT32 ui32Mask,
+							   PDUMP_POLL_OPERATOR eOperator,
+							   IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMRPDumpPol32(
+					psPMRInt,
+					uiOffset,
+					ui32Value,
+					ui32Mask,
+					eOperator,
+					ui32PDumpFlags);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpCBP(IMG_HANDLE hBridge,
+							 IMG_HANDLE hPMR,
+							 IMG_DEVMEM_OFFSET_T uiReadOffset,
+							 IMG_DEVMEM_OFFSET_T uiWriteOffset,
+							 IMG_DEVMEM_SIZE_T uiPacketSize,
+							 IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMRPDumpCBP(
+					psPMRInt,
+					uiReadOffset,
+					uiWriteOffset,
+					uiPacketSize,
+					uiBufferSize);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPDumpSaveToFileVirtual(IMG_HANDLE hBridge,
+									     IMG_HANDLE hDevmemServerContext,
+									     IMG_DEV_VIRTADDR sAddress,
+									     IMG_DEVMEM_SIZE_T uiSize,
+									     IMG_UINT32 ui32ArraySize,
+									     const IMG_CHAR *puiFileName,
+									     IMG_UINT32 ui32FileOffset,
+									     IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_CTX * psDevmemServerContextInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext;
+
+	eError =
+		DevmemIntPDumpSaveToFileVirtual(
+					psDevmemServerContextInt,
+					sAddress,
+					uiSize,
+					ui32ArraySize,
+					puiFileName,
+					ui32FileOffset,
+					ui32PDumpFlags);
+
+	return eError;
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpmm_bridge/common_pdumpmm_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpmm_bridge/common_pdumpmm_bridge.h
new file mode 100644
index 0000000..07e17b2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpmm_bridge/common_pdumpmm_bridge.h
@@ -0,0 +1,248 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for pdumpmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for pdumpmm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_PDUMPMM_BRIDGE_H
+#define COMMON_PDUMPMM_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST			0
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM			PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32			PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64			PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE			PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR			PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+4
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32			PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+5
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP			PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+6
+#define PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL			PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+7
+#define PVRSRV_BRIDGE_PDUMPMM_CMD_LAST			(PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+7)
+
+
+/*******************************************
+            PMRPDumpLoadMem          
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpLoadMem */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_UINT32 ui32PDumpFlags;
+	IMG_BOOL bbZero;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM;
+
+/* Bridge out structure for PMRPDumpLoadMem */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM;
+
+
+/*******************************************
+            PMRPDumpLoadMemValue32          
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpLoadMemValue32 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_UINT32 ui32Value;
+	IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32;
+
+/* Bridge out structure for PMRPDumpLoadMemValue32 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32;
+
+
+/*******************************************
+            PMRPDumpLoadMemValue64          
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpLoadMemValue64 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_UINT64 ui64Value;
+	IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64;
+
+/* Bridge out structure for PMRPDumpLoadMemValue64 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64;
+
+
+/*******************************************
+            PMRPDumpSaveToFile          
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpSaveToFile */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_UINT32 ui32ArraySize;
+	const IMG_CHAR * puiFileName;
+	IMG_UINT32 ui32uiFileOffset;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE;
+
+/* Bridge out structure for PMRPDumpSaveToFile */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE;
+
+
+/*******************************************
+            PMRPDumpSymbolicAddr          
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpSymbolicAddr */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_UINT32 ui32MemspaceNameLen;
+	IMG_UINT32 ui32SymbolicAddrLen;
+	/* Output pointer puiMemspaceName is also an implied input */
+	IMG_CHAR * puiMemspaceName;
+	/* Output pointer puiSymbolicAddr is also an implied input */
+	IMG_CHAR * puiSymbolicAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR;
+
+/* Bridge out structure for PMRPDumpSymbolicAddr */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR_TAG
+{
+	IMG_CHAR * puiMemspaceName;
+	IMG_CHAR * puiSymbolicAddr;
+	IMG_DEVMEM_OFFSET_T uiNewOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR;
+
+
+/*******************************************
+            PMRPDumpPol32          
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpPol32 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPPOL32_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_UINT32 ui32Value;
+	IMG_UINT32 ui32Mask;
+	PDUMP_POLL_OPERATOR eOperator;
+	IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPPOL32;
+
+/* Bridge out structure for PMRPDumpPol32 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32;
+
+
+/*******************************************
+            PMRPDumpCBP          
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpCBP */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPCBP_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_OFFSET_T uiReadOffset;
+	IMG_DEVMEM_OFFSET_T uiWriteOffset;
+	IMG_DEVMEM_SIZE_T uiPacketSize;
+	IMG_DEVMEM_SIZE_T uiBufferSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPCBP;
+
+/* Bridge out structure for PMRPDumpCBP */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPCBP_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPCBP;
+
+
+/*******************************************
+            DevmemIntPDumpSaveToFileVirtual          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntPDumpSaveToFileVirtual */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL_TAG
+{
+	IMG_HANDLE hDevmemServerContext;
+	IMG_DEV_VIRTADDR sAddress;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_UINT32 ui32ArraySize;
+	const IMG_CHAR * puiFileName;
+	IMG_UINT32 ui32FileOffset;
+	IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL;
+
+/* Bridge out structure for DevmemIntPDumpSaveToFileVirtual */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL;
+
+
+#endif /* COMMON_PDUMPMM_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpmm_bridge/server_pdumpmm_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpmm_bridge/server_pdumpmm_bridge.c
new file mode 100644
index 0000000..ef5d823
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pdumpmm_bridge/server_pdumpmm_bridge.c
@@ -0,0 +1,961 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for pdumpmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for pdumpmm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "physmem.h"
+
+
+#include "common_pdumpmm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgePMRPDumpLoadMem(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM *psPMRPDumpLoadMemIN,
+					  PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM *psPMRPDumpLoadMemOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMR = psPMRPDumpLoadMemIN->hPMR;
+	PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRPDumpLoadMemOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psPMRPDumpLoadMemOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto PMRPDumpLoadMem_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psPMRPDumpLoadMemOUT->eError =
+		PMRPDumpLoadMem(
+					psPMRInt,
+					psPMRPDumpLoadMemIN->uiOffset,
+					psPMRPDumpLoadMemIN->uiSize,
+					psPMRPDumpLoadMemIN->ui32PDumpFlags,
+					psPMRPDumpLoadMemIN->bbZero);
+
+
+
+
+PMRPDumpLoadMem_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRPDumpLoadMemValue32(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32 *psPMRPDumpLoadMemValue32IN,
+					  PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32 *psPMRPDumpLoadMemValue32OUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMR = psPMRPDumpLoadMemValue32IN->hPMR;
+	PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRPDumpLoadMemValue32OUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psPMRPDumpLoadMemValue32OUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto PMRPDumpLoadMemValue32_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psPMRPDumpLoadMemValue32OUT->eError =
+		PMRPDumpLoadMemValue32(
+					psPMRInt,
+					psPMRPDumpLoadMemValue32IN->uiOffset,
+					psPMRPDumpLoadMemValue32IN->ui32Value,
+					psPMRPDumpLoadMemValue32IN->ui32PDumpFlags);
+
+
+
+
+PMRPDumpLoadMemValue32_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRPDumpLoadMemValue64(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64 *psPMRPDumpLoadMemValue64IN,
+					  PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64 *psPMRPDumpLoadMemValue64OUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMR = psPMRPDumpLoadMemValue64IN->hPMR;
+	PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRPDumpLoadMemValue64OUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psPMRPDumpLoadMemValue64OUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto PMRPDumpLoadMemValue64_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psPMRPDumpLoadMemValue64OUT->eError =
+		PMRPDumpLoadMemValue64(
+					psPMRInt,
+					psPMRPDumpLoadMemValue64IN->uiOffset,
+					psPMRPDumpLoadMemValue64IN->ui64Value,
+					psPMRPDumpLoadMemValue64IN->ui32PDumpFlags);
+
+
+
+
+PMRPDumpLoadMemValue64_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRPDumpSaveToFile(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE *psPMRPDumpSaveToFileIN,
+					  PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE *psPMRPDumpSaveToFileOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMR = psPMRPDumpSaveToFileIN->hPMR;
+	PMR * psPMRInt = NULL;
+	IMG_CHAR *uiFileNameInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPMRPDumpSaveToFileIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPMRPDumpSaveToFileIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psPMRPDumpSaveToFileOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto PMRPDumpSaveToFile_exit;
+			}
+		}
+	}
+
+	if (psPMRPDumpSaveToFileIN->ui32ArraySize != 0)
+	{
+		uiFileNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiFileNameInt, psPMRPDumpSaveToFileIN->puiFileName, psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psPMRPDumpSaveToFileOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto PMRPDumpSaveToFile_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRPDumpSaveToFileOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psPMRPDumpSaveToFileOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto PMRPDumpSaveToFile_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psPMRPDumpSaveToFileOUT->eError =
+		PMRPDumpSaveToFile(
+					psPMRInt,
+					psPMRPDumpSaveToFileIN->uiOffset,
+					psPMRPDumpSaveToFileIN->uiSize,
+					psPMRPDumpSaveToFileIN->ui32ArraySize,
+					uiFileNameInt,
+					psPMRPDumpSaveToFileIN->ui32uiFileOffset);
+
+
+
+
+PMRPDumpSaveToFile_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRPDumpSymbolicAddr(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR *psPMRPDumpSymbolicAddrIN,
+					  PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR *psPMRPDumpSymbolicAddrOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMR = psPMRPDumpSymbolicAddrIN->hPMR;
+	PMR * psPMRInt = NULL;
+	IMG_CHAR *puiMemspaceNameInt = NULL;
+	IMG_CHAR *puiSymbolicAddrInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR)) +
+			(psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR)) +
+			0;
+
+
+
+	psPMRPDumpSymbolicAddrOUT->puiMemspaceName = psPMRPDumpSymbolicAddrIN->puiMemspaceName;
+	psPMRPDumpSymbolicAddrOUT->puiSymbolicAddr = psPMRPDumpSymbolicAddrIN->puiSymbolicAddr;
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPMRPDumpSymbolicAddrIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPMRPDumpSymbolicAddrIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto PMRPDumpSymbolicAddr_exit;
+			}
+		}
+	}
+
+	if (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen != 0)
+	{
+		puiMemspaceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR);
+	}
+
+	if (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen != 0)
+	{
+		puiSymbolicAddrInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR);
+	}
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRPDumpSymbolicAddrOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psPMRPDumpSymbolicAddrOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto PMRPDumpSymbolicAddr_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psPMRPDumpSymbolicAddrOUT->eError =
+		PMR_PDumpSymbolicAddr(
+					psPMRInt,
+					psPMRPDumpSymbolicAddrIN->uiOffset,
+					psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen,
+					puiMemspaceNameInt,
+					psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen,
+					puiSymbolicAddrInt,
+					&psPMRPDumpSymbolicAddrOUT->uiNewOffset,
+					&psPMRPDumpSymbolicAddrOUT->uiNextSymName);
+
+
+
+	if ((psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR)) > 0)
+	{
+		if ( OSCopyToUser(NULL, psPMRPDumpSymbolicAddrOUT->puiMemspaceName, puiMemspaceNameInt,
+			(psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR))) != PVRSRV_OK )
+		{
+			psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto PMRPDumpSymbolicAddr_exit;
+		}
+	}
+
+	if ((psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR)) > 0)
+	{
+		if ( OSCopyToUser(NULL, psPMRPDumpSymbolicAddrOUT->puiSymbolicAddr, puiSymbolicAddrInt,
+			(psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR))) != PVRSRV_OK )
+		{
+			psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto PMRPDumpSymbolicAddr_exit;
+		}
+	}
+
+
+PMRPDumpSymbolicAddr_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRPDumpPol32(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRPDUMPPOL32 *psPMRPDumpPol32IN,
+					  PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32 *psPMRPDumpPol32OUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMR = psPMRPDumpPol32IN->hPMR;
+	PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRPDumpPol32OUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psPMRPDumpPol32OUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto PMRPDumpPol32_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psPMRPDumpPol32OUT->eError =
+		PMRPDumpPol32(
+					psPMRInt,
+					psPMRPDumpPol32IN->uiOffset,
+					psPMRPDumpPol32IN->ui32Value,
+					psPMRPDumpPol32IN->ui32Mask,
+					psPMRPDumpPol32IN->eOperator,
+					psPMRPDumpPol32IN->ui32PDumpFlags);
+
+
+
+
+PMRPDumpPol32_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRPDumpCBP(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRPDUMPCBP *psPMRPDumpCBPIN,
+					  PVRSRV_BRIDGE_OUT_PMRPDUMPCBP *psPMRPDumpCBPOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMR = psPMRPDumpCBPIN->hPMR;
+	PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRPDumpCBPOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psPMRPDumpCBPOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto PMRPDumpCBP_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psPMRPDumpCBPOUT->eError =
+		PMRPDumpCBP(
+					psPMRInt,
+					psPMRPDumpCBPIN->uiReadOffset,
+					psPMRPDumpCBPIN->uiWriteOffset,
+					psPMRPDumpCBPIN->uiPacketSize,
+					psPMRPDumpCBPIN->uiBufferSize);
+
+
+
+
+PMRPDumpCBP_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *psDevmemIntPDumpSaveToFileVirtualIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *psDevmemIntPDumpSaveToFileVirtualOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevmemServerContext = psDevmemIntPDumpSaveToFileVirtualIN->hDevmemServerContext;
+	DEVMEMINT_CTX * psDevmemServerContextInt = NULL;
+	IMG_CHAR *uiFileNameInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevmemIntPDumpSaveToFileVirtualIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevmemIntPDumpSaveToFileVirtualIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psDevmemIntPDumpSaveToFileVirtualOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto DevmemIntPDumpSaveToFileVirtual_exit;
+			}
+		}
+	}
+
+	if (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize != 0)
+	{
+		uiFileNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiFileNameInt, psDevmemIntPDumpSaveToFileVirtualIN->puiFileName, psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psDevmemIntPDumpSaveToFileVirtualOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto DevmemIntPDumpSaveToFileVirtual_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntPDumpSaveToFileVirtualOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psDevmemServerContextInt,
+											hDevmemServerContext,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+											IMG_TRUE);
+					if(psDevmemIntPDumpSaveToFileVirtualOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto DevmemIntPDumpSaveToFileVirtual_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psDevmemIntPDumpSaveToFileVirtualOUT->eError =
+		DevmemIntPDumpSaveToFileVirtual(
+					psDevmemServerContextInt,
+					psDevmemIntPDumpSaveToFileVirtualIN->sAddress,
+					psDevmemIntPDumpSaveToFileVirtualIN->uiSize,
+					psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize,
+					uiFileNameInt,
+					psDevmemIntPDumpSaveToFileVirtualIN->ui32FileOffset,
+					psDevmemIntPDumpSaveToFileVirtualIN->ui32PDumpFlags);
+
+
+
+
+DevmemIntPDumpSaveToFileVirtual_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psDevmemServerContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hDevmemServerContext,
+										PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitPDUMPMMBridge(void);
+PVRSRV_ERROR DeinitPDUMPMMBridge(void);
+
+/*
+ * Register all PDUMPMM functions with services
+ */
+PVRSRV_ERROR InitPDUMPMMBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM, PVRSRVBridgePMRPDumpLoadMem,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32, PVRSRVBridgePMRPDumpLoadMemValue32,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64, PVRSRVBridgePMRPDumpLoadMemValue64,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE, PVRSRVBridgePMRPDumpSaveToFile,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR, PVRSRVBridgePMRPDumpSymbolicAddr,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32, PVRSRVBridgePMRPDumpPol32,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP, PVRSRVBridgePMRPDumpCBP,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL, PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pdumpmm functions with services
+ */
+PVRSRV_ERROR DeinitPDUMPMMBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pvrtl_bridge/client_pvrtl_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pvrtl_bridge/client_pvrtl_bridge.h
new file mode 100644
index 0000000..e5a49a1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pvrtl_bridge/client_pvrtl_bridge.h
@@ -0,0 +1,99 @@
+/*************************************************************************/ /*!
+@File
+@Title          Client bridge header for pvrtl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for pvrtl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_PVRTL_BRIDGE_H
+#define CLIENT_PVRTL_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pvrtl_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLOpenStream(IMG_HANDLE hBridge,
+							  const IMG_CHAR *puiName,
+							  IMG_UINT32 ui32Mode,
+							  IMG_HANDLE *phSD,
+							  IMG_HANDLE *phTLPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCloseStream(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSD);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLAcquireData(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSD,
+							   IMG_UINT32 *pui32ReadOffset,
+							   IMG_UINT32 *pui32ReadLen);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReleaseData(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSD,
+							   IMG_UINT32 ui32ReadOffset,
+							   IMG_UINT32 ui32ReadLen);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLDiscoverStreams(IMG_HANDLE hBridge,
+							       const IMG_CHAR *puiNamePattern,
+							       IMG_UINT32 ui32Size,
+							       IMG_CHAR *puiStreams,
+							       IMG_UINT32 *pui32NumFound);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReserveStream(IMG_HANDLE hBridge,
+							     IMG_HANDLE hSD,
+							     IMG_UINT32 *pui32BufferOffset,
+							     IMG_UINT32 ui32Size,
+							     IMG_UINT32 ui32SizeMin,
+							     IMG_UINT32 *pui32Available);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCommitStream(IMG_HANDLE hBridge,
+							    IMG_HANDLE hSD,
+							    IMG_UINT32 ui32ReqSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLWriteData(IMG_HANDLE hBridge,
+							 IMG_HANDLE hSD,
+							 IMG_UINT32 ui32Size,
+							 IMG_BYTE *psData);
+
+
+#endif /* CLIENT_PVRTL_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pvrtl_bridge/client_pvrtl_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pvrtl_bridge/client_pvrtl_direct_bridge.c
new file mode 100644
index 0000000..e644027
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pvrtl_bridge/client_pvrtl_direct_bridge.c
@@ -0,0 +1,214 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge for pvrtl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_pvrtl_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "devicemem_typedefs.h"
+#include "pvrsrv_tlcommon.h"
+
+#include "tlserver.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLOpenStream(IMG_HANDLE hBridge,
+							  const IMG_CHAR *puiName,
+							  IMG_UINT32 ui32Mode,
+							  IMG_HANDLE *phSD,
+							  IMG_HANDLE *phTLPMR)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC * psSDInt;
+	PMR * psTLPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		TLServerOpenStreamKM(
+					puiName,
+					ui32Mode,
+					&psSDInt,
+					&psTLPMRInt);
+
+	*phSD = psSDInt;
+	*phTLPMR = psTLPMRInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCloseStream(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSD)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC * psSDInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSDInt = (TL_STREAM_DESC *) hSD;
+
+	eError =
+		TLServerCloseStreamKM(
+					psSDInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLAcquireData(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSD,
+							   IMG_UINT32 *pui32ReadOffset,
+							   IMG_UINT32 *pui32ReadLen)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC * psSDInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSDInt = (TL_STREAM_DESC *) hSD;
+
+	eError =
+		TLServerAcquireDataKM(
+					psSDInt,
+					pui32ReadOffset,
+					pui32ReadLen);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReleaseData(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSD,
+							   IMG_UINT32 ui32ReadOffset,
+							   IMG_UINT32 ui32ReadLen)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC * psSDInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSDInt = (TL_STREAM_DESC *) hSD;
+
+	eError =
+		TLServerReleaseDataKM(
+					psSDInt,
+					ui32ReadOffset,
+					ui32ReadLen);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLDiscoverStreams(IMG_HANDLE hBridge,
+							       const IMG_CHAR *puiNamePattern,
+							       IMG_UINT32 ui32Size,
+							       IMG_CHAR *puiStreams,
+							       IMG_UINT32 *pui32NumFound)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		TLServerDiscoverStreamsKM(
+					puiNamePattern,
+					ui32Size,
+					puiStreams,
+					pui32NumFound);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReserveStream(IMG_HANDLE hBridge,
+							     IMG_HANDLE hSD,
+							     IMG_UINT32 *pui32BufferOffset,
+							     IMG_UINT32 ui32Size,
+							     IMG_UINT32 ui32SizeMin,
+							     IMG_UINT32 *pui32Available)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC * psSDInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSDInt = (TL_STREAM_DESC *) hSD;
+
+	eError =
+		TLServerReserveStreamKM(
+					psSDInt,
+					pui32BufferOffset,
+					ui32Size,
+					ui32SizeMin,
+					pui32Available);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCommitStream(IMG_HANDLE hBridge,
+							    IMG_HANDLE hSD,
+							    IMG_UINT32 ui32ReqSize)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC * psSDInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSDInt = (TL_STREAM_DESC *) hSD;
+
+	eError =
+		TLServerCommitStreamKM(
+					psSDInt,
+					ui32ReqSize);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLWriteData(IMG_HANDLE hBridge,
+							 IMG_HANDLE hSD,
+							 IMG_UINT32 ui32Size,
+							 IMG_BYTE *psData)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC * psSDInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSDInt = (TL_STREAM_DESC *) hSD;
+
+	eError =
+		TLServerWriteDataKM(
+					psSDInt,
+					ui32Size,
+					psData);
+
+	return eError;
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pvrtl_bridge/common_pvrtl_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pvrtl_bridge/common_pvrtl_bridge.h
new file mode 100644
index 0000000..c2b8426
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pvrtl_bridge/common_pvrtl_bridge.h
@@ -0,0 +1,224 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for pvrtl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for pvrtl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_PVRTL_BRIDGE_H
+#define COMMON_PVRTL_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "pvrsrv_tlcommon.h"
+
+
+#define PVRSRV_BRIDGE_PVRTL_CMD_FIRST			0
+#define PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+4
+#define PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+5
+#define PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+6
+#define PVRSRV_BRIDGE_PVRTL_TLWRITEDATA			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7
+#define PVRSRV_BRIDGE_PVRTL_CMD_LAST			(PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7)
+
+
+/*******************************************
+            TLOpenStream          
+ *******************************************/
+
+/* Bridge in structure for TLOpenStream */
+typedef struct PVRSRV_BRIDGE_IN_TLOPENSTREAM_TAG
+{
+	const IMG_CHAR * puiName;
+	IMG_UINT32 ui32Mode;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLOPENSTREAM;
+
+/* Bridge out structure for TLOpenStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLOPENSTREAM_TAG
+{
+	IMG_HANDLE hSD;
+	IMG_HANDLE hTLPMR;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLOPENSTREAM;
+
+
+/*******************************************
+            TLCloseStream          
+ *******************************************/
+
+/* Bridge in structure for TLCloseStream */
+typedef struct PVRSRV_BRIDGE_IN_TLCLOSESTREAM_TAG
+{
+	IMG_HANDLE hSD;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLCLOSESTREAM;
+
+/* Bridge out structure for TLCloseStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLCLOSESTREAM_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLCLOSESTREAM;
+
+
+/*******************************************
+            TLAcquireData          
+ *******************************************/
+
+/* Bridge in structure for TLAcquireData */
+typedef struct PVRSRV_BRIDGE_IN_TLACQUIREDATA_TAG
+{
+	IMG_HANDLE hSD;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLACQUIREDATA;
+
+/* Bridge out structure for TLAcquireData */
+typedef struct PVRSRV_BRIDGE_OUT_TLACQUIREDATA_TAG
+{
+	IMG_UINT32 ui32ReadOffset;
+	IMG_UINT32 ui32ReadLen;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLACQUIREDATA;
+
+
+/*******************************************
+            TLReleaseData          
+ *******************************************/
+
+/* Bridge in structure for TLReleaseData */
+typedef struct PVRSRV_BRIDGE_IN_TLRELEASEDATA_TAG
+{
+	IMG_HANDLE hSD;
+	IMG_UINT32 ui32ReadOffset;
+	IMG_UINT32 ui32ReadLen;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLRELEASEDATA;
+
+/* Bridge out structure for TLReleaseData */
+typedef struct PVRSRV_BRIDGE_OUT_TLRELEASEDATA_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLRELEASEDATA;
+
+
+/*******************************************
+            TLDiscoverStreams          
+ *******************************************/
+
+/* Bridge in structure for TLDiscoverStreams */
+typedef struct PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS_TAG
+{
+	const IMG_CHAR * puiNamePattern;
+	IMG_UINT32 ui32Size;
+	/* Output pointer puiStreams is also an implied input */
+	IMG_CHAR * puiStreams;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS;
+
+/* Bridge out structure for TLDiscoverStreams */
+typedef struct PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS_TAG
+{
+	IMG_CHAR * puiStreams;
+	IMG_UINT32 ui32NumFound;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS;
+
+
+/*******************************************
+            TLReserveStream          
+ *******************************************/
+
+/* Bridge in structure for TLReserveStream */
+typedef struct PVRSRV_BRIDGE_IN_TLRESERVESTREAM_TAG
+{
+	IMG_HANDLE hSD;
+	IMG_UINT32 ui32Size;
+	IMG_UINT32 ui32SizeMin;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLRESERVESTREAM;
+
+/* Bridge out structure for TLReserveStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLRESERVESTREAM_TAG
+{
+	IMG_UINT32 ui32BufferOffset;
+	IMG_UINT32 ui32Available;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLRESERVESTREAM;
+
+
+/*******************************************
+            TLCommitStream          
+ *******************************************/
+
+/* Bridge in structure for TLCommitStream */
+typedef struct PVRSRV_BRIDGE_IN_TLCOMMITSTREAM_TAG
+{
+	IMG_HANDLE hSD;
+	IMG_UINT32 ui32ReqSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLCOMMITSTREAM;
+
+/* Bridge out structure for TLCommitStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM;
+
+
+/*******************************************
+            TLWriteData          
+ *******************************************/
+
+/* Bridge in structure for TLWriteData */
+typedef struct PVRSRV_BRIDGE_IN_TLWRITEDATA_TAG
+{
+	IMG_HANDLE hSD;
+	IMG_UINT32 ui32Size;
+	IMG_BYTE * psData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLWRITEDATA;
+
+/* Bridge out structure for TLWriteData */
+typedef struct PVRSRV_BRIDGE_OUT_TLWRITEDATA_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLWRITEDATA;
+
+
+#endif /* COMMON_PVRTL_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pvrtl_bridge/server_pvrtl_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pvrtl_bridge/server_pvrtl_bridge.c
new file mode 100644
index 0000000..de199c1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/pvrtl_bridge/server_pvrtl_bridge.c
@@ -0,0 +1,911 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for pvrtl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for pvrtl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "tlserver.h"
+
+
+#include "common_pvrtl_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_TLOPENSTREAM *psTLOpenStreamIN,
+					  PVRSRV_BRIDGE_OUT_TLOPENSTREAM *psTLOpenStreamOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_CHAR *uiNameInt = NULL;
+	TL_STREAM_DESC * psSDInt = NULL;
+	PMR * psTLPMRInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) +
+			0;
+
+
+
+
+	psTLOpenStreamOUT->hSD = NULL;
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psTLOpenStreamIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psTLOpenStreamIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psTLOpenStreamOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto TLOpenStream_exit;
+			}
+		}
+	}
+
+	
+	{
+		uiNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiNameInt, psTLOpenStreamIN->puiName, PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psTLOpenStreamOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto TLOpenStream_exit;
+				}
+			}
+
+
+	psTLOpenStreamOUT->eError =
+		TLServerOpenStreamKM(
+					uiNameInt,
+					psTLOpenStreamIN->ui32Mode,
+					&psSDInt,
+					&psTLPMRInt);
+	/* Exit early if bridged call fails */
+	if(psTLOpenStreamOUT->eError != PVRSRV_OK)
+	{
+		goto TLOpenStream_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psTLOpenStreamOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psTLOpenStreamOUT->hSD,
+							(void *) psSDInt,
+							PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&TLServerCloseStreamKM);
+	if (psTLOpenStreamOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto TLOpenStream_exit;
+	}
+
+
+
+
+
+
+	psTLOpenStreamOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+							&psTLOpenStreamOUT->hTLPMR,
+							(void *) psTLPMRInt,
+							PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,psTLOpenStreamOUT->hSD);
+	if (psTLOpenStreamOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto TLOpenStream_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+TLOpenStream_exit:
+
+
+
+	if (psTLOpenStreamOUT->eError != PVRSRV_OK)
+	{
+		/* Lock over handle creation cleanup. */
+		LockHandle();
+		if (psTLOpenStreamOUT->hSD)
+		{
+
+
+			PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+						(IMG_HANDLE) psTLOpenStreamOUT->hSD,
+						PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+			if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+				        "PVRSRVBridgeTLOpenStream: %s",
+				        PVRSRVGetErrorStringKM(eError)));
+			}
+			/* Releasing the handle should free/destroy/release the resource.
+			 * This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psSDInt = NULL;
+		}
+
+
+		/* Release now we have cleaned up creation handles. */
+		UnlockHandle();
+		if (psSDInt)
+		{
+			TLServerCloseStreamKM(psSDInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLCloseStream(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_TLCLOSESTREAM *psTLCloseStreamIN,
+					  PVRSRV_BRIDGE_OUT_TLCLOSESTREAM *psTLCloseStreamOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psTLCloseStreamOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psTLCloseStreamIN->hSD,
+					PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+	if ((psTLCloseStreamOUT->eError != PVRSRV_OK) &&
+	    (psTLCloseStreamOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeTLCloseStream: %s",
+		        PVRSRVGetErrorStringKM(psTLCloseStreamOUT->eError)));
+		UnlockHandle();
+		goto TLCloseStream_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+TLCloseStream_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLAcquireData(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_TLACQUIREDATA *psTLAcquireDataIN,
+					  PVRSRV_BRIDGE_OUT_TLACQUIREDATA *psTLAcquireDataOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hSD = psTLAcquireDataIN->hSD;
+	TL_STREAM_DESC * psSDInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psTLAcquireDataOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psSDInt,
+											hSD,
+											PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+											IMG_TRUE);
+					if(psTLAcquireDataOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto TLAcquireData_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psTLAcquireDataOUT->eError =
+		TLServerAcquireDataKM(
+					psSDInt,
+					&psTLAcquireDataOUT->ui32ReadOffset,
+					&psTLAcquireDataOUT->ui32ReadLen);
+
+
+
+
+TLAcquireData_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psSDInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hSD,
+										PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLReleaseData(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_TLRELEASEDATA *psTLReleaseDataIN,
+					  PVRSRV_BRIDGE_OUT_TLRELEASEDATA *psTLReleaseDataOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hSD = psTLReleaseDataIN->hSD;
+	TL_STREAM_DESC * psSDInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psTLReleaseDataOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psSDInt,
+											hSD,
+											PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+											IMG_TRUE);
+					if(psTLReleaseDataOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto TLReleaseData_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psTLReleaseDataOUT->eError =
+		TLServerReleaseDataKM(
+					psSDInt,
+					psTLReleaseDataIN->ui32ReadOffset,
+					psTLReleaseDataIN->ui32ReadLen);
+
+
+
+
+TLReleaseData_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psSDInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hSD,
+										PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS *psTLDiscoverStreamsIN,
+					  PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS *psTLDiscoverStreamsOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_CHAR *uiNamePatternInt = NULL;
+	IMG_CHAR *puiStreamsInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) +
+			(psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) +
+			0;
+
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	psTLDiscoverStreamsOUT->puiStreams = psTLDiscoverStreamsIN->puiStreams;
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psTLDiscoverStreamsIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psTLDiscoverStreamsIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto TLDiscoverStreams_exit;
+			}
+		}
+	}
+
+	
+	{
+		uiNamePatternInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiNamePatternInt, psTLDiscoverStreamsIN->puiNamePattern, PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto TLDiscoverStreams_exit;
+				}
+			}
+	if (psTLDiscoverStreamsIN->ui32Size != 0)
+	{
+		puiStreamsInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR);
+	}
+
+
+
+	psTLDiscoverStreamsOUT->eError =
+		TLServerDiscoverStreamsKM(
+					uiNamePatternInt,
+					psTLDiscoverStreamsIN->ui32Size,
+					puiStreamsInt,
+					&psTLDiscoverStreamsOUT->ui32NumFound);
+
+
+
+	if ((psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) > 0)
+	{
+		if ( OSCopyToUser(NULL, psTLDiscoverStreamsOUT->puiStreams, puiStreamsInt,
+			(psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR))) != PVRSRV_OK )
+		{
+			psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto TLDiscoverStreams_exit;
+		}
+	}
+
+
+TLDiscoverStreams_exit:
+
+
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLReserveStream(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_TLRESERVESTREAM *psTLReserveStreamIN,
+					  PVRSRV_BRIDGE_OUT_TLRESERVESTREAM *psTLReserveStreamOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hSD = psTLReserveStreamIN->hSD;
+	TL_STREAM_DESC * psSDInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psTLReserveStreamOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psSDInt,
+											hSD,
+											PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+											IMG_TRUE);
+					if(psTLReserveStreamOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto TLReserveStream_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psTLReserveStreamOUT->eError =
+		TLServerReserveStreamKM(
+					psSDInt,
+					&psTLReserveStreamOUT->ui32BufferOffset,
+					psTLReserveStreamIN->ui32Size,
+					psTLReserveStreamIN->ui32SizeMin,
+					&psTLReserveStreamOUT->ui32Available);
+
+
+
+
+TLReserveStream_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psSDInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hSD,
+										PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLCommitStream(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_TLCOMMITSTREAM *psTLCommitStreamIN,
+					  PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM *psTLCommitStreamOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hSD = psTLCommitStreamIN->hSD;
+	TL_STREAM_DESC * psSDInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psTLCommitStreamOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psSDInt,
+											hSD,
+											PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+											IMG_TRUE);
+					if(psTLCommitStreamOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto TLCommitStream_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psTLCommitStreamOUT->eError =
+		TLServerCommitStreamKM(
+					psSDInt,
+					psTLCommitStreamIN->ui32ReqSize);
+
+
+
+
+TLCommitStream_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psSDInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hSD,
+										PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLWriteData(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_TLWRITEDATA *psTLWriteDataIN,
+					  PVRSRV_BRIDGE_OUT_TLWRITEDATA *psTLWriteDataOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hSD = psTLWriteDataIN->hSD;
+	TL_STREAM_DESC * psSDInt = NULL;
+	IMG_BYTE *psDataInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psTLWriteDataIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psTLWriteDataIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psTLWriteDataOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto TLWriteData_exit;
+			}
+		}
+	}
+
+	if (psTLWriteDataIN->ui32Size != 0)
+	{
+		psDataInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE);
+	}
+
+			/* Copy the data over */
+			if (psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, psDataInt, psTLWriteDataIN->psData, psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) != PVRSRV_OK )
+				{
+					psTLWriteDataOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto TLWriteData_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psTLWriteDataOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psSDInt,
+											hSD,
+											PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+											IMG_TRUE);
+					if(psTLWriteDataOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto TLWriteData_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psTLWriteDataOUT->eError =
+		TLServerWriteDataKM(
+					psSDInt,
+					psTLWriteDataIN->ui32Size,
+					psDataInt);
+
+
+
+
+TLWriteData_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psSDInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hSD,
+										PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_FALSE;
+
+PVRSRV_ERROR InitPVRTLBridge(void);
+PVRSRV_ERROR DeinitPVRTLBridge(void);
+
+/*
+ * Register all PVRTL functions with services
+ */
+PVRSRV_ERROR InitPVRTLBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM, PVRSRVBridgeTLOpenStream,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM, PVRSRVBridgeTLCloseStream,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA, PVRSRVBridgeTLAcquireData,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA, PVRSRVBridgeTLReleaseData,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS, PVRSRVBridgeTLDiscoverStreams,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM, PVRSRVBridgeTLReserveStream,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM, PVRSRVBridgeTLCommitStream,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLWRITEDATA, PVRSRVBridgeTLWriteData,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pvrtl functions with services
+ */
+PVRSRV_ERROR DeinitPVRTLBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/regconfig_bridge/common_regconfig_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/regconfig_bridge/common_regconfig_bridge.h
new file mode 100644
index 0000000..ebd98e8
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/regconfig_bridge/common_regconfig_bridge.h
@@ -0,0 +1,152 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for regconfig
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for regconfig
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_REGCONFIG_BRIDGE_H
+#define COMMON_REGCONFIG_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+
+#define PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST			0
+#define PVRSRV_BRIDGE_REGCONFIG_RGXSETREGCONFIGTYPE			PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+0
+#define PVRSRV_BRIDGE_REGCONFIG_RGXADDREGCONFIG			PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+1
+#define PVRSRV_BRIDGE_REGCONFIG_RGXCLEARREGCONFIG			PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+2
+#define PVRSRV_BRIDGE_REGCONFIG_RGXENABLEREGCONFIG			PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+3
+#define PVRSRV_BRIDGE_REGCONFIG_RGXDISABLEREGCONFIG			PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+4
+#define PVRSRV_BRIDGE_REGCONFIG_CMD_LAST			(PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+4)
+
+
+/*******************************************
+            RGXSetRegConfigType          
+ *******************************************/
+
+/* Bridge in structure for RGXSetRegConfigType */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE_TAG
+{
+	IMG_UINT8 ui8RegPowerIsland;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE;
+
+/* Bridge out structure for RGXSetRegConfigType */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE;
+
+
+/*******************************************
+            RGXAddRegconfig          
+ *******************************************/
+
+/* Bridge in structure for RGXAddRegconfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXADDREGCONFIG_TAG
+{
+	IMG_UINT32 ui32RegAddr;
+	IMG_UINT64 ui64RegValue;
+	IMG_UINT64 ui64RegMask;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXADDREGCONFIG;
+
+/* Bridge out structure for RGXAddRegconfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG;
+
+
+/*******************************************
+            RGXClearRegConfig          
+ *******************************************/
+
+/* Bridge in structure for RGXClearRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG;
+
+/* Bridge out structure for RGXClearRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG;
+
+
+/*******************************************
+            RGXEnableRegConfig          
+ *******************************************/
+
+/* Bridge in structure for RGXEnableRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG;
+
+/* Bridge out structure for RGXEnableRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG;
+
+
+/*******************************************
+            RGXDisableRegConfig          
+ *******************************************/
+
+/* Bridge in structure for RGXDisableRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG;
+
+/* Bridge out structure for RGXDisableRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG;
+
+
+#endif /* COMMON_REGCONFIG_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/regconfig_bridge/server_regconfig_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/regconfig_bridge/server_regconfig_bridge.c
new file mode 100644
index 0000000..2671e78
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/regconfig_bridge/server_regconfig_bridge.c
@@ -0,0 +1,279 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for regconfig
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for regconfig
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxregconfig.h"
+
+
+#include "common_regconfig_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRGXSetRegConfigType(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeIN,
+					  PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+	psRGXSetRegConfigTypeOUT->eError =
+		PVRSRVRGXSetRegConfigTypeKM(psConnection, OSGetDevData(psConnection),
+					psRGXSetRegConfigTypeIN->ui8RegPowerIsland);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXAddRegconfig(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXADDREGCONFIG *psRGXAddRegconfigIN,
+					  PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG *psRGXAddRegconfigOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+	psRGXAddRegconfigOUT->eError =
+		PVRSRVRGXAddRegConfigKM(psConnection, OSGetDevData(psConnection),
+					psRGXAddRegconfigIN->ui32RegAddr,
+					psRGXAddRegconfigIN->ui64RegValue,
+					psRGXAddRegconfigIN->ui64RegMask);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXClearRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG *psRGXClearRegConfigIN,
+					  PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG *psRGXClearRegConfigOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psRGXClearRegConfigIN);
+
+
+
+
+
+	psRGXClearRegConfigOUT->eError =
+		PVRSRVRGXClearRegConfigKM(psConnection, OSGetDevData(psConnection)
+					);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXEnableRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG *psRGXEnableRegConfigIN,
+					  PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG *psRGXEnableRegConfigOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psRGXEnableRegConfigIN);
+
+
+
+
+
+	psRGXEnableRegConfigOUT->eError =
+		PVRSRVRGXEnableRegConfigKM(psConnection, OSGetDevData(psConnection)
+					);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDisableRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG *psRGXDisableRegConfigIN,
+					  PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG *psRGXDisableRegConfigOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psRGXDisableRegConfigIN);
+
+
+
+
+
+	psRGXDisableRegConfigOUT->eError =
+		PVRSRVRGXDisableRegConfigKM(psConnection, OSGetDevData(psConnection)
+					);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+#endif /* EXCLUDE_REGCONFIG_BRIDGE */
+
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+PVRSRV_ERROR InitREGCONFIGBridge(void);
+PVRSRV_ERROR DeinitREGCONFIGBridge(void);
+
+/*
+ * Register all REGCONFIG functions with services
+ */
+PVRSRV_ERROR InitREGCONFIGBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXSETREGCONFIGTYPE, PVRSRVBridgeRGXSetRegConfigType,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXADDREGCONFIG, PVRSRVBridgeRGXAddRegconfig,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXCLEARREGCONFIG, PVRSRVBridgeRGXClearRegConfig,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXENABLEREGCONFIG, PVRSRVBridgeRGXEnableRegConfig,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXDISABLEREGCONFIG, PVRSRVBridgeRGXDisableRegConfig,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all regconfig functions with services
+ */
+PVRSRV_ERROR DeinitREGCONFIGBridge(void)
+{
+	return PVRSRV_OK;
+}
+#else /* EXCLUDE_REGCONFIG_BRIDGE */
+/* This bridge is conditional on EXCLUDE_REGCONFIG_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitREGCONFIGBridge() \
+	PVRSRV_OK
+
+#define DeinitREGCONFIGBridge() \
+	PVRSRV_OK
+
+#endif /* EXCLUDE_REGCONFIG_BRIDGE */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxcmp_bridge/common_rgxcmp_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxcmp_bridge/common_rgxcmp_bridge.h
new file mode 100644
index 0000000..d084f74
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxcmp_bridge/common_rgxcmp_bridge.h
@@ -0,0 +1,217 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for rgxcmp
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxcmp
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXCMP_BRIDGE_H
+#define COMMON_RGXCMP_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+#include "pvrsrv_sync_km.h"
+
+
+#define PVRSRV_BRIDGE_RGXCMP_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXCMP_CMD_LAST			(PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+6)
+
+
+/*******************************************
+            RGXCreateComputeContext          
+ *******************************************/
+
+/* Bridge in structure for RGXCreateComputeContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT_TAG
+{
+	IMG_UINT32 ui32Priority;
+	IMG_UINT32 ui32FrameworkCmdize;
+	IMG_BYTE * psFrameworkCmd;
+	IMG_HANDLE hPrivData;
+	IMG_DEV_VIRTADDR sResumeSignalAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT;
+
+/* Bridge out structure for RGXCreateComputeContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT_TAG
+{
+	IMG_HANDLE hComputeContext;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT;
+
+
+/*******************************************
+            RGXDestroyComputeContext          
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyComputeContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT_TAG
+{
+	IMG_HANDLE hComputeContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT;
+
+/* Bridge out structure for RGXDestroyComputeContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT;
+
+
+/*******************************************
+            RGXKickCDM          
+ *******************************************/
+
+/* Bridge in structure for RGXKickCDM */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM_TAG
+{
+	IMG_HANDLE hComputeContext;
+	IMG_UINT32 ui32ClientCacheOpSeqNum;
+	IMG_UINT32 ui32ClientFenceCount;
+	IMG_HANDLE * phClientFenceUFOSyncPrimBlock;
+	IMG_UINT32 * pui32ClientFenceOffset;
+	IMG_UINT32 * pui32ClientFenceValue;
+	IMG_UINT32 ui32ClientUpdateCount;
+	IMG_HANDLE * phClientUpdateUFOSyncPrimBlock;
+	IMG_UINT32 * pui32ClientUpdateOffset;
+	IMG_UINT32 * pui32ClientUpdateValue;
+	IMG_UINT32 ui32ServerSyncCount;
+	IMG_UINT32 * pui32ServerSyncFlags;
+	IMG_HANDLE * phServerSyncs;
+	PVRSRV_FENCE hCheckFenceFd;
+	PVRSRV_TIMELINE hUpdateTimeline;
+	IMG_CHAR * puiUpdateFenceName;
+	IMG_UINT32 ui32CmdSize;
+	IMG_BYTE * psDMCmd;
+	IMG_UINT32 ui32PDumpFlags;
+	IMG_UINT32 ui32ExtJobRef;
+	IMG_DEV_VIRTADDR ssRobustnessResetReason;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKCDM;
+
+/* Bridge out structure for RGXKickCDM */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKCDM_TAG
+{
+	PVRSRV_FENCE hUpdateFence;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKCDM;
+
+
+/*******************************************
+            RGXFlushComputeData          
+ *******************************************/
+
+/* Bridge in structure for RGXFlushComputeData */
+typedef struct PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA_TAG
+{
+	IMG_HANDLE hComputeContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA;
+
+/* Bridge out structure for RGXFlushComputeData */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA;
+
+
+/*******************************************
+            RGXSetComputeContextPriority          
+ *******************************************/
+
+/* Bridge in structure for RGXSetComputeContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY_TAG
+{
+	IMG_HANDLE hComputeContext;
+	IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetComputeContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY;
+
+
+/*******************************************
+            RGXGetLastComputeContextResetReason          
+ *******************************************/
+
+/* Bridge in structure for RGXGetLastComputeContextResetReason */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON_TAG
+{
+	IMG_HANDLE hComputeContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON;
+
+/* Bridge out structure for RGXGetLastComputeContextResetReason */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON_TAG
+{
+	IMG_UINT32 ui32LastResetReason;
+	IMG_UINT32 ui32LastResetJobRef;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON;
+
+
+/*******************************************
+            RGXNotifyComputeWriteOffsetUpdate          
+ *******************************************/
+
+/* Bridge in structure for RGXNotifyComputeWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG
+{
+	IMG_HANDLE hComputeContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE;
+
+/* Bridge out structure for RGXNotifyComputeWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE;
+
+
+#endif /* COMMON_RGXCMP_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxcmp_bridge/server_rgxcmp_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxcmp_bridge/server_rgxcmp_bridge.c
new file mode 100644
index 0000000..8c1044b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxcmp_bridge/server_rgxcmp_bridge.c
@@ -0,0 +1,1208 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for rgxcmp
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxcmp
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxcompute.h"
+
+
+#include "common_rgxcmp_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+#include "rgx_bvnc_defs_km.h"
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_BYTE *psFrameworkCmdInt = NULL;
+	IMG_HANDLE hPrivData = psRGXCreateComputeContextIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+	RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) +
+			0;
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+		{
+			psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXCreateComputeContext_exit;
+		}
+	}
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCreateComputeContextIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXCreateComputeContextIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXCreateComputeContext_exit;
+			}
+		}
+	}
+
+	if (psRGXCreateComputeContextIN->ui32FrameworkCmdize != 0)
+	{
+		psFrameworkCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE);
+	}
+
+			/* Copy the data over */
+			if (psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, psFrameworkCmdInt, psRGXCreateComputeContextIN->psFrameworkCmd, psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+				{
+					psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXCreateComputeContext_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateComputeContextOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &hPrivDataInt,
+											hPrivData,
+											PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+											IMG_TRUE);
+					if(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXCreateComputeContext_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXCreateComputeContextOUT->eError =
+		PVRSRVRGXCreateComputeContextKM(psConnection, OSGetDevData(psConnection),
+					psRGXCreateComputeContextIN->ui32Priority,
+					psRGXCreateComputeContextIN->ui32FrameworkCmdize,
+					psFrameworkCmdInt,
+					hPrivDataInt,
+					psRGXCreateComputeContextIN->sResumeSignalAddr,
+					&psComputeContextInt);
+	/* Exit early if bridged call fails */
+	if(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateComputeContext_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psRGXCreateComputeContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psRGXCreateComputeContextOUT->hComputeContext,
+							(void *) psComputeContextInt,
+							PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PVRSRVRGXDestroyComputeContextKM);
+	if (psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto RGXCreateComputeContext_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+RGXCreateComputeContext_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(hPrivDataInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPrivData,
+										PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+	{
+		if (psComputeContextInt)
+		{
+			PVRSRVRGXDestroyComputeContextKM(psComputeContextInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyComputeContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+		{
+			psRGXDestroyComputeContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXDestroyComputeContext_exit;
+		}
+	}
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psRGXDestroyComputeContextOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyComputeContextIN->hComputeContext,
+					PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+	if ((psRGXDestroyComputeContextOUT->eError != PVRSRV_OK) &&
+	    (psRGXDestroyComputeContextOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeRGXDestroyComputeContext: %s",
+		        PVRSRVGetErrorStringKM(psRGXDestroyComputeContextOUT->eError)));
+		UnlockHandle();
+		goto RGXDestroyComputeContext_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+RGXDestroyComputeContext_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXKickCDM(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXKICKCDM *psRGXKickCDMIN,
+					  PVRSRV_BRIDGE_OUT_RGXKICKCDM *psRGXKickCDMOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hComputeContext = psRGXKickCDMIN->hComputeContext;
+	RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = NULL;
+	SYNC_PRIMITIVE_BLOCK * *psClientFenceUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClientFenceUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32ClientFenceOffsetInt = NULL;
+	IMG_UINT32 *ui32ClientFenceValueInt = NULL;
+	SYNC_PRIMITIVE_BLOCK * *psClientUpdateUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32ClientUpdateOffsetInt = NULL;
+	IMG_UINT32 *ui32ClientUpdateValueInt = NULL;
+	IMG_UINT32 *ui32ServerSyncFlagsInt = NULL;
+	SERVER_SYNC_PRIMITIVE * *psServerSyncsInt = NULL;
+	IMG_HANDLE *hServerSyncsInt2 = NULL;
+	IMG_CHAR *uiUpdateFenceNameInt = NULL;
+	IMG_BYTE *psDMCmdInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psRGXKickCDMIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+			(psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) +
+			(psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+			(psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+			(psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+			(psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+			(psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+			(psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+			(psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+			(psRGXKickCDMIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+			(psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+			(32 * sizeof(IMG_CHAR)) +
+			(psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE)) +
+			0;
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXKickCDM_exit;
+		}
+	}
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXKickCDMIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXKickCDMIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psRGXKickCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXKickCDM_exit;
+			}
+		}
+	}
+
+	if (psRGXKickCDMIN->ui32ClientFenceCount != 0)
+	{
+		psClientFenceUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickCDMIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClientFenceUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hClientFenceUFOSyncPrimBlockInt2, psRGXKickCDMIN->phClientFenceUFOSyncPrimBlock, psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickCDM_exit;
+				}
+			}
+	if (psRGXKickCDMIN->ui32ClientFenceCount != 0)
+	{
+		ui32ClientFenceOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ClientFenceOffsetInt, psRGXKickCDMIN->pui32ClientFenceOffset, psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickCDM_exit;
+				}
+			}
+	if (psRGXKickCDMIN->ui32ClientFenceCount != 0)
+	{
+		ui32ClientFenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ClientFenceValueInt, psRGXKickCDMIN->pui32ClientFenceValue, psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickCDM_exit;
+				}
+			}
+	if (psRGXKickCDMIN->ui32ClientUpdateCount != 0)
+	{
+		psClientUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClientUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hClientUpdateUFOSyncPrimBlockInt2, psRGXKickCDMIN->phClientUpdateUFOSyncPrimBlock, psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickCDM_exit;
+				}
+			}
+	if (psRGXKickCDMIN->ui32ClientUpdateCount != 0)
+	{
+		ui32ClientUpdateOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ClientUpdateOffsetInt, psRGXKickCDMIN->pui32ClientUpdateOffset, psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickCDM_exit;
+				}
+			}
+	if (psRGXKickCDMIN->ui32ClientUpdateCount != 0)
+	{
+		ui32ClientUpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ClientUpdateValueInt, psRGXKickCDMIN->pui32ClientUpdateValue, psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickCDM_exit;
+				}
+			}
+	if (psRGXKickCDMIN->ui32ServerSyncCount != 0)
+	{
+		ui32ServerSyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ServerSyncFlagsInt, psRGXKickCDMIN->pui32ServerSyncFlags, psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickCDM_exit;
+				}
+			}
+	if (psRGXKickCDMIN->ui32ServerSyncCount != 0)
+	{
+		psServerSyncsInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickCDMIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+		hServerSyncsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hServerSyncsInt2, psRGXKickCDMIN->phServerSyncs, psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickCDM_exit;
+				}
+			}
+	
+	{
+		uiUpdateFenceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += 32 * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (32 * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiUpdateFenceNameInt, psRGXKickCDMIN->puiUpdateFenceName, 32 * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickCDM_exit;
+				}
+			}
+	if (psRGXKickCDMIN->ui32CmdSize != 0)
+	{
+		psDMCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, psDMCmdInt, psRGXKickCDMIN->psDMCmd, psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+				{
+					psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickCDM_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXKickCDMOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psComputeContextInt,
+											hComputeContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+											IMG_TRUE);
+					if(psRGXKickCDMOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickCDM_exit;
+					}
+				}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickCDMIN->ui32ClientFenceCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickCDMOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psClientFenceUFOSyncPrimBlockInt[i],
+											hClientFenceUFOSyncPrimBlockInt2[i],
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psRGXKickCDMOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickCDM_exit;
+					}
+				}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickCDMIN->ui32ClientUpdateCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickCDMOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psClientUpdateUFOSyncPrimBlockInt[i],
+											hClientUpdateUFOSyncPrimBlockInt2[i],
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psRGXKickCDMOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickCDM_exit;
+					}
+				}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickCDMIN->ui32ServerSyncCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickCDMOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psServerSyncsInt[i],
+											hServerSyncsInt2[i],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+											IMG_TRUE);
+					if(psRGXKickCDMOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickCDM_exit;
+					}
+				}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXKickCDMOUT->eError =
+		PVRSRVRGXKickCDMKM(
+					psComputeContextInt,
+					psRGXKickCDMIN->ui32ClientCacheOpSeqNum,
+					psRGXKickCDMIN->ui32ClientFenceCount,
+					psClientFenceUFOSyncPrimBlockInt,
+					ui32ClientFenceOffsetInt,
+					ui32ClientFenceValueInt,
+					psRGXKickCDMIN->ui32ClientUpdateCount,
+					psClientUpdateUFOSyncPrimBlockInt,
+					ui32ClientUpdateOffsetInt,
+					ui32ClientUpdateValueInt,
+					psRGXKickCDMIN->ui32ServerSyncCount,
+					ui32ServerSyncFlagsInt,
+					psServerSyncsInt,
+					psRGXKickCDMIN->hCheckFenceFd,
+					psRGXKickCDMIN->hUpdateTimeline,
+					&psRGXKickCDMOUT->hUpdateFence,
+					uiUpdateFenceNameInt,
+					psRGXKickCDMIN->ui32CmdSize,
+					psDMCmdInt,
+					psRGXKickCDMIN->ui32PDumpFlags,
+					psRGXKickCDMIN->ui32ExtJobRef,
+					psRGXKickCDMIN->ssRobustnessResetReason);
+
+
+
+
+RGXKickCDM_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psComputeContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hComputeContext,
+										PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+					}
+				}
+
+
+
+
+
+
+	if (hClientFenceUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickCDMIN->ui32ClientFenceCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hClientFenceUFOSyncPrimBlockInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hClientFenceUFOSyncPrimBlockInt2[i],
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+		}
+	}
+
+
+
+
+
+
+	if (hClientUpdateUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickCDMIN->ui32ClientUpdateCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hClientUpdateUFOSyncPrimBlockInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hClientUpdateUFOSyncPrimBlockInt2[i],
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+		}
+	}
+
+
+
+
+
+
+	if (hServerSyncsInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickCDMIN->ui32ServerSyncCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hServerSyncsInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hServerSyncsInt2[i],
+										PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					}
+				}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXFlushComputeData(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataIN,
+					  PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hComputeContext = psRGXFlushComputeDataIN->hComputeContext;
+	RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = NULL;
+
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+		{
+			psRGXFlushComputeDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXFlushComputeData_exit;
+		}
+	}
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXFlushComputeDataOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psComputeContextInt,
+											hComputeContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+											IMG_TRUE);
+					if(psRGXFlushComputeDataOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXFlushComputeData_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXFlushComputeDataOUT->eError =
+		PVRSRVRGXFlushComputeDataKM(
+					psComputeContextInt);
+
+
+
+
+RGXFlushComputeData_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psComputeContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hComputeContext,
+										PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXSetComputeContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY *psRGXSetComputeContextPriorityIN,
+					  PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY *psRGXSetComputeContextPriorityOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hComputeContext = psRGXSetComputeContextPriorityIN->hComputeContext;
+	RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = NULL;
+
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+		{
+			psRGXSetComputeContextPriorityOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXSetComputeContextPriority_exit;
+		}
+	}
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXSetComputeContextPriorityOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psComputeContextInt,
+											hComputeContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+											IMG_TRUE);
+					if(psRGXSetComputeContextPriorityOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXSetComputeContextPriority_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXSetComputeContextPriorityOUT->eError =
+		PVRSRVRGXSetComputeContextPriorityKM(psConnection, OSGetDevData(psConnection),
+					psComputeContextInt,
+					psRGXSetComputeContextPriorityIN->ui32Priority);
+
+
+
+
+RGXSetComputeContextPriority_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psComputeContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hComputeContext,
+										PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXGetLastComputeContextResetReason(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON *psRGXGetLastComputeContextResetReasonIN,
+					  PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON *psRGXGetLastComputeContextResetReasonOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hComputeContext = psRGXGetLastComputeContextResetReasonIN->hComputeContext;
+	RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = NULL;
+
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+		{
+			psRGXGetLastComputeContextResetReasonOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXGetLastComputeContextResetReason_exit;
+		}
+	}
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXGetLastComputeContextResetReasonOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psComputeContextInt,
+											hComputeContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+											IMG_TRUE);
+					if(psRGXGetLastComputeContextResetReasonOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXGetLastComputeContextResetReason_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXGetLastComputeContextResetReasonOUT->eError =
+		PVRSRVRGXGetLastComputeContextResetReasonKM(
+					psComputeContextInt,
+					&psRGXGetLastComputeContextResetReasonOUT->ui32LastResetReason,
+					&psRGXGetLastComputeContextResetReasonOUT->ui32LastResetJobRef);
+
+
+
+
+RGXGetLastComputeContextResetReason_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psComputeContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hComputeContext,
+										PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *psRGXNotifyComputeWriteOffsetUpdateIN,
+					  PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *psRGXNotifyComputeWriteOffsetUpdateOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hComputeContext = psRGXNotifyComputeWriteOffsetUpdateIN->hComputeContext;
+	RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = NULL;
+
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+		{
+			psRGXNotifyComputeWriteOffsetUpdateOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXNotifyComputeWriteOffsetUpdate_exit;
+		}
+	}
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXNotifyComputeWriteOffsetUpdateOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psComputeContextInt,
+											hComputeContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+											IMG_TRUE);
+					if(psRGXNotifyComputeWriteOffsetUpdateOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXNotifyComputeWriteOffsetUpdate_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXNotifyComputeWriteOffsetUpdateOUT->eError =
+		PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(
+					psComputeContextInt);
+
+
+
+
+RGXNotifyComputeWriteOffsetUpdate_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psComputeContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hComputeContext,
+										PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXCMPBridge(void);
+PVRSRV_ERROR DeinitRGXCMPBridge(void);
+
+/*
+ * Register all RGXCMP functions with services
+ */
+PVRSRV_ERROR InitRGXCMPBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT, PVRSRVBridgeRGXCreateComputeContext,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT, PVRSRVBridgeRGXDestroyComputeContext,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM, PVRSRVBridgeRGXKickCDM,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA, PVRSRVBridgeRGXFlushComputeData,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY, PVRSRVBridgeRGXSetComputeContextPriority,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON, PVRSRVBridgeRGXGetLastComputeContextResetReason,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE, PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxcmp functions with services
+ */
+PVRSRV_ERROR DeinitRGXCMPBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxhwperf_bridge/common_rgxhwperf_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxhwperf_bridge/common_rgxhwperf_bridge.h
new file mode 100644
index 0000000..9655bb0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxhwperf_bridge/common_rgxhwperf_bridge.h
@@ -0,0 +1,140 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for rgxhwperf
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxhwperf
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXHWPERF_BRIDGE_H
+#define COMMON_RGXHWPERF_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "rgx_hwperf.h"
+
+
+#define PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF			PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS			PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS			PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS			PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST			(PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3)
+
+
+/*******************************************
+            RGXCtrlHWPerf          
+ *******************************************/
+
+/* Bridge in structure for RGXCtrlHWPerf */
+typedef struct PVRSRV_BRIDGE_IN_RGXCTRLHWPERF_TAG
+{
+	IMG_UINT32 ui32StreamId;
+	IMG_BOOL bToggle;
+	IMG_UINT64 ui64Mask;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCTRLHWPERF;
+
+/* Bridge out structure for RGXCtrlHWPerf */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF;
+
+
+/*******************************************
+            RGXConfigEnableHWPerfCounters          
+ *******************************************/
+
+/* Bridge in structure for RGXConfigEnableHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGENABLEHWPERFCOUNTERS_TAG
+{
+	IMG_UINT32 ui32ArrayLen;
+	RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCONFIGENABLEHWPERFCOUNTERS;
+
+/* Bridge out structure for RGXConfigEnableHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGENABLEHWPERFCOUNTERS_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCONFIGENABLEHWPERFCOUNTERS;
+
+
+/*******************************************
+            RGXCtrlHWPerfCounters          
+ *******************************************/
+
+/* Bridge in structure for RGXCtrlHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXCTRLHWPERFCOUNTERS_TAG
+{
+	IMG_BOOL bEnable;
+	IMG_UINT32 ui32ArrayLen;
+	IMG_UINT16 * pui16BlockIDs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCTRLHWPERFCOUNTERS;
+
+/* Bridge out structure for RGXCtrlHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCTRLHWPERFCOUNTERS_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCTRLHWPERFCOUNTERS;
+
+
+/*******************************************
+            RGXConfigCustomCounters          
+ *******************************************/
+
+/* Bridge in structure for RGXConfigCustomCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS_TAG
+{
+	IMG_UINT16 ui16CustomBlockID;
+	IMG_UINT16 ui16NumCustomCounters;
+	IMG_UINT32 * pui32CustomCounterIDs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS;
+
+/* Bridge out structure for RGXConfigCustomCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS;
+
+
+#endif /* COMMON_RGXHWPERF_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxhwperf_bridge/server_rgxhwperf_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxhwperf_bridge/server_rgxhwperf_bridge.c
new file mode 100644
index 0000000..b7a18ab
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxhwperf_bridge/server_rgxhwperf_bridge.c
@@ -0,0 +1,430 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for rgxhwperf
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxhwperf
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxhwperf.h"
+
+
+#include "common_rgxhwperf_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRGXCtrlHWPerf(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *psRGXCtrlHWPerfIN,
+					  PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *psRGXCtrlHWPerfOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+	psRGXCtrlHWPerfOUT->eError =
+		PVRSRVRGXCtrlHWPerfKM(psConnection, OSGetDevData(psConnection),
+					psRGXCtrlHWPerfIN->ui32StreamId,
+					psRGXCtrlHWPerfIN->bToggle,
+					psRGXCtrlHWPerfIN->ui64Mask);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXConfigEnableHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCONFIGENABLEHWPERFCOUNTERS *psRGXConfigEnableHWPerfCountersIN,
+					  PVRSRV_BRIDGE_OUT_RGXCONFIGENABLEHWPERFCOUNTERS *psRGXConfigEnableHWPerfCountersOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigsInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXConfigEnableHWPerfCountersIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXConfigEnableHWPerfCountersIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psRGXConfigEnableHWPerfCountersOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXConfigEnableHWPerfCounters_exit;
+			}
+		}
+	}
+
+	if (psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen != 0)
+	{
+		psBlockConfigsInt = (RGX_HWPERF_CONFIG_CNTBLK*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK);
+	}
+
+			/* Copy the data over */
+			if (psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK) > 0)
+			{
+				if ( OSCopyFromUser(NULL, psBlockConfigsInt, psRGXConfigEnableHWPerfCountersIN->psBlockConfigs, psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) != PVRSRV_OK )
+				{
+					psRGXConfigEnableHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXConfigEnableHWPerfCounters_exit;
+				}
+			}
+
+
+	psRGXConfigEnableHWPerfCountersOUT->eError =
+		PVRSRVRGXConfigEnableHWPerfCountersKM(psConnection, OSGetDevData(psConnection),
+					psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen,
+					psBlockConfigsInt);
+
+
+
+
+RGXConfigEnableHWPerfCounters_exit:
+
+
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCtrlHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCTRLHWPERFCOUNTERS *psRGXCtrlHWPerfCountersIN,
+					  PVRSRV_BRIDGE_OUT_RGXCTRLHWPERFCOUNTERS *psRGXCtrlHWPerfCountersOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_UINT16 *ui16BlockIDsInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psRGXCtrlHWPerfCountersIN->ui32ArrayLen * sizeof(IMG_UINT16)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCtrlHWPerfCountersIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXCtrlHWPerfCountersIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psRGXCtrlHWPerfCountersOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXCtrlHWPerfCounters_exit;
+			}
+		}
+	}
+
+	if (psRGXCtrlHWPerfCountersIN->ui32ArrayLen != 0)
+	{
+		ui16BlockIDsInt = (IMG_UINT16*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXCtrlHWPerfCountersIN->ui32ArrayLen * sizeof(IMG_UINT16);
+	}
+
+			/* Copy the data over */
+			if (psRGXCtrlHWPerfCountersIN->ui32ArrayLen * sizeof(IMG_UINT16) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui16BlockIDsInt, psRGXCtrlHWPerfCountersIN->pui16BlockIDs, psRGXCtrlHWPerfCountersIN->ui32ArrayLen * sizeof(IMG_UINT16)) != PVRSRV_OK )
+				{
+					psRGXCtrlHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXCtrlHWPerfCounters_exit;
+				}
+			}
+
+
+	psRGXCtrlHWPerfCountersOUT->eError =
+		PVRSRVRGXCtrlHWPerfCountersKM(psConnection, OSGetDevData(psConnection),
+					psRGXCtrlHWPerfCountersIN->bEnable,
+					psRGXCtrlHWPerfCountersIN->ui32ArrayLen,
+					ui16BlockIDsInt);
+
+
+
+
+RGXCtrlHWPerfCounters_exit:
+
+
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXConfigCustomCounters(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS *psRGXConfigCustomCountersIN,
+					  PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS *psRGXConfigCustomCountersOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_UINT32 *ui32CustomCounterIDsInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXConfigCustomCountersIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXConfigCustomCountersIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXConfigCustomCounters_exit;
+			}
+		}
+	}
+
+	if (psRGXConfigCustomCountersIN->ui16NumCustomCounters != 0)
+	{
+		ui32CustomCounterIDsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32CustomCounterIDsInt, psRGXConfigCustomCountersIN->pui32CustomCounterIDs, psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXConfigCustomCounters_exit;
+				}
+			}
+
+
+	psRGXConfigCustomCountersOUT->eError =
+		PVRSRVRGXConfigCustomCountersKM(psConnection, OSGetDevData(psConnection),
+					psRGXConfigCustomCountersIN->ui16CustomBlockID,
+					psRGXConfigCustomCountersIN->ui16NumCustomCounters,
+					ui32CustomCounterIDsInt);
+
+
+
+
+RGXConfigCustomCounters_exit:
+
+
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXHWPERFBridge(void);
+PVRSRV_ERROR DeinitRGXHWPERFBridge(void);
+
+/*
+ * Register all RGXHWPERF functions with services
+ */
+PVRSRV_ERROR InitRGXHWPERFBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF, PVRSRVBridgeRGXCtrlHWPerf,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS, PVRSRVBridgeRGXConfigEnableHWPerfCounters,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS, PVRSRVBridgeRGXCtrlHWPerfCounters,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS, PVRSRVBridgeRGXConfigCustomCounters,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxhwperf functions with services
+ */
+PVRSRV_ERROR DeinitRGXHWPERFBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxkicksync_bridge/common_rgxkicksync_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxkicksync_bridge/common_rgxkicksync_bridge.h
new file mode 100644
index 0000000..ee830c77
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxkicksync_bridge/common_rgxkicksync_bridge.h
@@ -0,0 +1,134 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for rgxkicksync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxkicksync
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXKICKSYNC_BRIDGE_H
+#define COMMON_RGXKICKSYNC_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+#include "pvrsrv_sync_km.h"
+
+
+#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT			PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT			PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC			PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST			(PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+2)
+
+
+/*******************************************
+            RGXCreateKickSyncContext          
+ *******************************************/
+
+/* Bridge in structure for RGXCreateKickSyncContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT_TAG
+{
+	IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT;
+
+/* Bridge out structure for RGXCreateKickSyncContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT_TAG
+{
+	IMG_HANDLE hKickSyncContext;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT;
+
+
+/*******************************************
+            RGXDestroyKickSyncContext          
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyKickSyncContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT_TAG
+{
+	IMG_HANDLE hKickSyncContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT;
+
+/* Bridge out structure for RGXDestroyKickSyncContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT;
+
+
+/*******************************************
+            RGXKickSync          
+ *******************************************/
+
+/* Bridge in structure for RGXKickSync */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKSYNC_TAG
+{
+	IMG_HANDLE hKickSyncContext;
+	IMG_UINT32 ui32ClientCacheOpSeqNum;
+	IMG_UINT32 ui32ClientFenceCount;
+	IMG_HANDLE * phFenceUFOSyncPrimBlock;
+	IMG_UINT32 * pui32FenceSyncOffset;
+	IMG_UINT32 * pui32FenceValue;
+	IMG_UINT32 ui32ClientUpdateCount;
+	IMG_HANDLE * phUpdateUFOSyncPrimBlock;
+	IMG_UINT32 * pui32UpdateSyncOffset;
+	IMG_UINT32 * pui32UpdateValue;
+	IMG_UINT32 ui32ServerSyncCount;
+	IMG_UINT32 * pui32ServerSyncFlags;
+	IMG_HANDLE * phServerSync;
+	PVRSRV_FENCE hCheckFenceFD;
+	PVRSRV_TIMELINE hTimelineFenceFD;
+	IMG_CHAR * puiUpdateFenceName;
+	IMG_UINT32 ui32ExtJobRef;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKSYNC;
+
+/* Bridge out structure for RGXKickSync */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKSYNC_TAG
+{
+	PVRSRV_FENCE hUpdateFenceFD;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKSYNC;
+
+
+#endif /* COMMON_RGXKICKSYNC_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxkicksync_bridge/server_rgxkicksync_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxkicksync_bridge/server_rgxkicksync_bridge.c
new file mode 100644
index 0000000..34c7a1d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxkicksync_bridge/server_rgxkicksync_bridge.c
@@ -0,0 +1,726 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for rgxkicksync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxkicksync
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxkicksync.h"
+
+
+#include "common_rgxkicksync_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRGXCreateKickSyncContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT *psRGXCreateKickSyncContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT *psRGXCreateKickSyncContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPrivData = psRGXCreateKickSyncContextIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+	RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContextInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateKickSyncContextOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &hPrivDataInt,
+											hPrivData,
+											PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+											IMG_TRUE);
+					if(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXCreateKickSyncContext_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXCreateKickSyncContextOUT->eError =
+		PVRSRVRGXCreateKickSyncContextKM(psConnection, OSGetDevData(psConnection),
+					hPrivDataInt,
+					&psKickSyncContextInt);
+	/* Exit early if bridged call fails */
+	if(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateKickSyncContext_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psRGXCreateKickSyncContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psRGXCreateKickSyncContextOUT->hKickSyncContext,
+							(void *) psKickSyncContextInt,
+							PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PVRSRVRGXDestroyKickSyncContextKM);
+	if (psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto RGXCreateKickSyncContext_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+RGXCreateKickSyncContext_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(hPrivDataInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPrivData,
+										PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)
+	{
+		if (psKickSyncContextInt)
+		{
+			PVRSRVRGXDestroyKickSyncContextKM(psKickSyncContextInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyKickSyncContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT *psRGXDestroyKickSyncContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT *psRGXDestroyKickSyncContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psRGXDestroyKickSyncContextOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyKickSyncContextIN->hKickSyncContext,
+					PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT);
+	if ((psRGXDestroyKickSyncContextOUT->eError != PVRSRV_OK) &&
+	    (psRGXDestroyKickSyncContextOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeRGXDestroyKickSyncContext: %s",
+		        PVRSRVGetErrorStringKM(psRGXDestroyKickSyncContextOUT->eError)));
+		UnlockHandle();
+		goto RGXDestroyKickSyncContext_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+RGXDestroyKickSyncContext_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXKickSync(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXKICKSYNC *psRGXKickSyncIN,
+					  PVRSRV_BRIDGE_OUT_RGXKICKSYNC *psRGXKickSyncOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hKickSyncContext = psRGXKickSyncIN->hKickSyncContext;
+	RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContextInt = NULL;
+	SYNC_PRIMITIVE_BLOCK * *psFenceUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hFenceUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32FenceSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32FenceValueInt = NULL;
+	SYNC_PRIMITIVE_BLOCK * *psUpdateUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hUpdateUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32UpdateSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32UpdateValueInt = NULL;
+	IMG_UINT32 *ui32ServerSyncFlagsInt = NULL;
+	SERVER_SYNC_PRIMITIVE * *psServerSyncInt = NULL;
+	IMG_HANDLE *hServerSyncInt2 = NULL;
+	IMG_CHAR *uiUpdateFenceNameInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psRGXKickSyncIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+			(psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) +
+			(psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+			(psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+			(psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+			(psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+			(psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+			(psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+			(psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+			(psRGXKickSyncIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+			(psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+			(32 * sizeof(IMG_CHAR)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXKickSyncIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXKickSyncIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psRGXKickSyncOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXKickSync_exit;
+			}
+		}
+	}
+
+	if (psRGXKickSyncIN->ui32ClientFenceCount != 0)
+	{
+		psFenceUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickSyncIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hFenceUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hFenceUFOSyncPrimBlockInt2, psRGXKickSyncIN->phFenceUFOSyncPrimBlock, psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickSync_exit;
+				}
+			}
+	if (psRGXKickSyncIN->ui32ClientFenceCount != 0)
+	{
+		ui32FenceSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32FenceSyncOffsetInt, psRGXKickSyncIN->pui32FenceSyncOffset, psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickSync_exit;
+				}
+			}
+	if (psRGXKickSyncIN->ui32ClientFenceCount != 0)
+	{
+		ui32FenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32FenceValueInt, psRGXKickSyncIN->pui32FenceValue, psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickSync_exit;
+				}
+			}
+	if (psRGXKickSyncIN->ui32ClientUpdateCount != 0)
+	{
+		psUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hUpdateUFOSyncPrimBlockInt2, psRGXKickSyncIN->phUpdateUFOSyncPrimBlock, psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickSync_exit;
+				}
+			}
+	if (psRGXKickSyncIN->ui32ClientUpdateCount != 0)
+	{
+		ui32UpdateSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32UpdateSyncOffsetInt, psRGXKickSyncIN->pui32UpdateSyncOffset, psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickSync_exit;
+				}
+			}
+	if (psRGXKickSyncIN->ui32ClientUpdateCount != 0)
+	{
+		ui32UpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32UpdateValueInt, psRGXKickSyncIN->pui32UpdateValue, psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickSync_exit;
+				}
+			}
+	if (psRGXKickSyncIN->ui32ServerSyncCount != 0)
+	{
+		ui32ServerSyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ServerSyncFlagsInt, psRGXKickSyncIN->pui32ServerSyncFlags, psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickSync_exit;
+				}
+			}
+	if (psRGXKickSyncIN->ui32ServerSyncCount != 0)
+	{
+		psServerSyncInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickSyncIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+		hServerSyncInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hServerSyncInt2, psRGXKickSyncIN->phServerSync, psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickSync_exit;
+				}
+			}
+	
+	{
+		uiUpdateFenceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += 32 * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (32 * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiUpdateFenceNameInt, psRGXKickSyncIN->puiUpdateFenceName, 32 * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickSync_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXKickSyncOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psKickSyncContextInt,
+											hKickSyncContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT,
+											IMG_TRUE);
+					if(psRGXKickSyncOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickSync_exit;
+					}
+				}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickSyncIN->ui32ClientFenceCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickSyncOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psFenceUFOSyncPrimBlockInt[i],
+											hFenceUFOSyncPrimBlockInt2[i],
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psRGXKickSyncOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickSync_exit;
+					}
+				}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickSyncIN->ui32ClientUpdateCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickSyncOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psUpdateUFOSyncPrimBlockInt[i],
+											hUpdateUFOSyncPrimBlockInt2[i],
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psRGXKickSyncOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickSync_exit;
+					}
+				}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickSyncIN->ui32ServerSyncCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickSyncOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psServerSyncInt[i],
+											hServerSyncInt2[i],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+											IMG_TRUE);
+					if(psRGXKickSyncOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickSync_exit;
+					}
+				}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXKickSyncOUT->eError =
+		PVRSRVRGXKickSyncKM(
+					psKickSyncContextInt,
+					psRGXKickSyncIN->ui32ClientCacheOpSeqNum,
+					psRGXKickSyncIN->ui32ClientFenceCount,
+					psFenceUFOSyncPrimBlockInt,
+					ui32FenceSyncOffsetInt,
+					ui32FenceValueInt,
+					psRGXKickSyncIN->ui32ClientUpdateCount,
+					psUpdateUFOSyncPrimBlockInt,
+					ui32UpdateSyncOffsetInt,
+					ui32UpdateValueInt,
+					psRGXKickSyncIN->ui32ServerSyncCount,
+					ui32ServerSyncFlagsInt,
+					psServerSyncInt,
+					psRGXKickSyncIN->hCheckFenceFD,
+					psRGXKickSyncIN->hTimelineFenceFD,
+					&psRGXKickSyncOUT->hUpdateFenceFD,
+					uiUpdateFenceNameInt,
+					psRGXKickSyncIN->ui32ExtJobRef);
+
+
+
+
+RGXKickSync_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psKickSyncContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hKickSyncContext,
+										PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT);
+					}
+				}
+
+
+
+
+
+
+	if (hFenceUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickSyncIN->ui32ClientFenceCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hFenceUFOSyncPrimBlockInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hFenceUFOSyncPrimBlockInt2[i],
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+		}
+	}
+
+
+
+
+
+
+	if (hUpdateUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickSyncIN->ui32ClientUpdateCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hUpdateUFOSyncPrimBlockInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hUpdateUFOSyncPrimBlockInt2[i],
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+		}
+	}
+
+
+
+
+
+
+	if (hServerSyncInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickSyncIN->ui32ServerSyncCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hServerSyncInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hServerSyncInt2[i],
+										PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					}
+				}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXKICKSYNCBridge(void);
+PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void);
+
+/*
+ * Register all RGXKICKSYNC functions with services
+ */
+PVRSRV_ERROR InitRGXKICKSYNCBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT, PVRSRVBridgeRGXCreateKickSyncContext,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT, PVRSRVBridgeRGXDestroyKickSyncContext,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC, PVRSRVBridgeRGXKickSync,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxkicksync functions with services
+ */
+PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxpdump_bridge/client_rgxpdump_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxpdump_bridge/client_rgxpdump_bridge.h
new file mode 100644
index 0000000..bedac78
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxpdump_bridge/client_rgxpdump_bridge.h
@@ -0,0 +1,64 @@
+/*************************************************************************/ /*!
+@File
+@Title          Client bridge header for rgxpdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for rgxpdump
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_RGXPDUMP_BRIDGE_H
+#define CLIENT_RGXPDUMP_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_rgxpdump_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpTraceBuffer(IMG_HANDLE hBridge,
+							      IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpSignatureBuffer(IMG_HANDLE hBridge,
+								  IMG_UINT32 ui32PDumpFlags);
+
+
+#endif /* CLIENT_RGXPDUMP_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxpdump_bridge/client_rgxpdump_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxpdump_bridge/client_rgxpdump_direct_bridge.c
new file mode 100644
index 0000000..e669f50
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxpdump_bridge/client_rgxpdump_direct_bridge.c
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge for rgxpdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_rgxpdump_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "rgx_bridge.h"
+
+#include "rgxpdump.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpTraceBuffer(IMG_HANDLE hBridge,
+							      IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+
+
+	eError =
+		PVRSRVPDumpTraceBufferKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					ui32PDumpFlags);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpSignatureBuffer(IMG_HANDLE hBridge,
+								  IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+
+
+	eError =
+		PVRSRVPDumpSignatureBufferKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					ui32PDumpFlags);
+
+	return eError;
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxpdump_bridge/common_rgxpdump_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxpdump_bridge/common_rgxpdump_bridge.h
new file mode 100644
index 0000000..f1aacd5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxpdump_bridge/common_rgxpdump_bridge.h
@@ -0,0 +1,96 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for rgxpdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxpdump
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXPDUMP_BRIDGE_H
+#define COMMON_RGXPDUMP_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+
+#define PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER			PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER			PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXPDUMP_CMD_LAST			(PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+1)
+
+
+/*******************************************
+            PDumpTraceBuffer          
+ *******************************************/
+
+/* Bridge in structure for PDumpTraceBuffer */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER_TAG
+{
+	IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER;
+
+/* Bridge out structure for PDumpTraceBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER;
+
+
+/*******************************************
+            PDumpSignatureBuffer          
+ *******************************************/
+
+/* Bridge in structure for PDumpSignatureBuffer */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER_TAG
+{
+	IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER;
+
+/* Bridge out structure for PDumpSignatureBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER;
+
+
+#endif /* COMMON_RGXPDUMP_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxpdump_bridge/server_rgxpdump_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxpdump_bridge/server_rgxpdump_bridge.c
new file mode 100644
index 0000000..152450d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxpdump_bridge/server_rgxpdump_bridge.c
@@ -0,0 +1,164 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for rgxpdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxpdump
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxpdump.h"
+
+
+#include "common_rgxpdump_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgePDumpTraceBuffer(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER *psPDumpTraceBufferIN,
+					  PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER *psPDumpTraceBufferOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+	psPDumpTraceBufferOUT->eError =
+		PVRSRVPDumpTraceBufferKM(psConnection, OSGetDevData(psConnection),
+					psPDumpTraceBufferIN->ui32PDumpFlags);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePDumpSignatureBuffer(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER *psPDumpSignatureBufferIN,
+					  PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER *psPDumpSignatureBufferOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+	psPDumpSignatureBufferOUT->eError =
+		PVRSRVPDumpSignatureBufferKM(psConnection, OSGetDevData(psConnection),
+					psPDumpSignatureBufferIN->ui32PDumpFlags);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXPDUMPBridge(void);
+PVRSRV_ERROR DeinitRGXPDUMPBridge(void);
+
+/*
+ * Register all RGXPDUMP functions with services
+ */
+PVRSRV_ERROR InitRGXPDUMPBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER, PVRSRVBridgePDumpTraceBuffer,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER, PVRSRVBridgePDumpSignatureBuffer,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxpdump functions with services
+ */
+PVRSRV_ERROR DeinitRGXPDUMPBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxray_bridge/common_rgxray_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxray_bridge/common_rgxray_bridge.h
new file mode 100644
index 0000000..ff0748e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxray_bridge/common_rgxray_bridge.h
@@ -0,0 +1,291 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for rgxray
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxray
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXRAY_BRIDGE_H
+#define COMMON_RGXRAY_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "pvrsrv_devmem.h"
+#include "devicemem_typedefs.h"
+#include "pvrsrv_sync_km.h"
+
+
+#define PVRSRV_BRIDGE_RGXRAY_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXRAY_RGXCREATERPMFREELIST			PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRPMFREELIST			PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXRAY_RGXCREATERPMCONTEXT			PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRPMCONTEXT			PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXRAY_RGXKICKRS			PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXRAY_RGXKICKVRDM			PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXRAY_RGXCREATERAYCONTEXT			PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRAYCONTEXT			PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RGXRAY_RGXSETRAYCONTEXTPRIORITY			PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RGXRAY_CMD_LAST			(PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+8)
+
+
+/*******************************************
+            RGXCreateRPMFreeList          
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRPMFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERPMFREELIST_TAG
+{
+	IMG_HANDLE hRPMContext;
+	IMG_UINT32 ui32InitFLPages;
+	IMG_UINT32 ui32GrowFLPages;
+	IMG_DEV_VIRTADDR sFreeListDevVAddr;
+	IMG_BOOL bIsExternal;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATERPMFREELIST;
+
+/* Bridge out structure for RGXCreateRPMFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERPMFREELIST_TAG
+{
+	IMG_HANDLE hCleanupCookie;
+	IMG_UINT32 ui32HWFreeList;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERPMFREELIST;
+
+
+/*******************************************
+            RGXDestroyRPMFreeList          
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRPMFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRPMFREELIST_TAG
+{
+	IMG_HANDLE hCleanupCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRPMFREELIST;
+
+/* Bridge out structure for RGXDestroyRPMFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRPMFREELIST_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRPMFREELIST;
+
+
+/*******************************************
+            RGXCreateRPMContext          
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRPMContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERPMCONTEXT_TAG
+{
+	IMG_UINT32 ui32TotalRPMPages;
+	IMG_UINT32 ui32Log2DopplerPageSize;
+	IMG_DEV_VIRTADDR sSceneMemoryBaseAddr;
+	IMG_DEV_VIRTADDR sDopplerHeapBaseAddr;
+	IMG_HANDLE hSceneHeap;
+	IMG_DEV_VIRTADDR sRPMPageTableBaseAddr;
+	IMG_HANDLE hRPMPageTableHeap;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATERPMCONTEXT;
+
+/* Bridge out structure for RGXCreateRPMContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERPMCONTEXT_TAG
+{
+	IMG_HANDLE hCleanupCookie;
+	IMG_HANDLE hHWMemDesc;
+	IMG_UINT32 ui32HWFrameData;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERPMCONTEXT;
+
+
+/*******************************************
+            RGXDestroyRPMContext          
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRPMContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRPMCONTEXT_TAG
+{
+	IMG_HANDLE hCleanupCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRPMCONTEXT;
+
+/* Bridge out structure for RGXDestroyRPMContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRPMCONTEXT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRPMCONTEXT;
+
+
+/*******************************************
+            RGXKickRS          
+ *******************************************/
+
+/* Bridge in structure for RGXKickRS */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKRS_TAG
+{
+	IMG_HANDLE hRayContext;
+	IMG_UINT32 ui32ClientCacheOpSeqNum;
+	IMG_UINT32 ui32ClientFenceCount;
+	IMG_HANDLE * phClientFenceUFOSyncPrimBlock;
+	IMG_UINT32 * pui32ClientFenceSyncOffset;
+	IMG_UINT32 * pui32ClientFenceValue;
+	IMG_UINT32 ui32ClientUpdateCount;
+	IMG_HANDLE * phClientUpdateUFOSyncPrimBlock;
+	IMG_UINT32 * pui32ClientUpdateSyncOffset;
+	IMG_UINT32 * pui32ClientUpdateValue;
+	IMG_UINT32 ui32ServerSyncCount;
+	IMG_UINT32 * pui32ServerSyncFlags;
+	IMG_HANDLE * phServerSyncs;
+	PVRSRV_FENCE hCheckFenceFD;
+	PVRSRV_TIMELINE hUpdateTimeline;
+	IMG_CHAR * puiUpdateFenceName;
+	IMG_UINT32 ui32CmdSize;
+	IMG_BYTE * psDMCmd;
+	IMG_UINT32 ui32FCCmdSize;
+	IMG_BYTE * psFCDMCmd;
+	IMG_UINT32 ui32FrameContext;
+	IMG_UINT32 ui32PDumpFlags;
+	IMG_UINT32 ui32ExtJobRef;
+	IMG_DEV_VIRTADDR ssRobustnessResetReason;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKRS;
+
+/* Bridge out structure for RGXKickRS */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKRS_TAG
+{
+	PVRSRV_FENCE hUpdateFence;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKRS;
+
+
+/*******************************************
+            RGXKickVRDM          
+ *******************************************/
+
+/* Bridge in structure for RGXKickVRDM */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKVRDM_TAG
+{
+	IMG_HANDLE hRayContext;
+	IMG_UINT32 ui32ClientCacheOpSeqNum;
+	IMG_UINT32 ui32ClientFenceCount;
+	IMG_HANDLE * phClientFenceUFOSyncPrimBlock;
+	IMG_UINT32 * pui32ClientFenceSyncOffset;
+	IMG_UINT32 * pui32ClientFenceValue;
+	IMG_UINT32 ui32ClientUpdateCount;
+	IMG_HANDLE * phClientUpdateUFOSyncPrimBlock;
+	IMG_UINT32 * pui32ClientUpdateSyncOffset;
+	IMG_UINT32 * pui32ClientUpdateValue;
+	IMG_UINT32 ui32ServerSyncCount;
+	IMG_UINT32 * pui32ServerSyncFlags;
+	IMG_HANDLE * phServerSyncs;
+	PVRSRV_FENCE hCheckFenceFD;
+	PVRSRV_TIMELINE hUpdateTimeline;
+	IMG_CHAR * puiUpdateFenceName;
+	IMG_UINT32 ui32CmdSize;
+	IMG_BYTE * psDMCmd;
+	IMG_UINT32 ui32PDumpFlags;
+	IMG_UINT32 ui32ExtJobRef;
+	IMG_DEV_VIRTADDR ssRobustnessResetReason;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKVRDM;
+
+/* Bridge out structure for RGXKickVRDM */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKVRDM_TAG
+{
+	PVRSRV_FENCE hUpdateFence;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKVRDM;
+
+
+/*******************************************
+            RGXCreateRayContext          
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRayContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERAYCONTEXT_TAG
+{
+	IMG_UINT32 ui32Priority;
+	IMG_DEV_VIRTADDR sVRMCallStackAddr;
+	IMG_UINT32 ui32FrameworkCmdSize;
+	IMG_BYTE * psFrameworkCmd;
+	IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATERAYCONTEXT;
+
+/* Bridge out structure for RGXCreateRayContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERAYCONTEXT_TAG
+{
+	IMG_HANDLE hRayContext;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERAYCONTEXT;
+
+
+/*******************************************
+            RGXDestroyRayContext          
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRayContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRAYCONTEXT_TAG
+{
+	IMG_HANDLE hRayContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRAYCONTEXT;
+
+/* Bridge out structure for RGXDestroyRayContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRAYCONTEXT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRAYCONTEXT;
+
+
+/*******************************************
+            RGXSetRayContextPriority          
+ *******************************************/
+
+/* Bridge in structure for RGXSetRayContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETRAYCONTEXTPRIORITY_TAG
+{
+	IMG_HANDLE hRayContext;
+	IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETRAYCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetRayContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETRAYCONTEXTPRIORITY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETRAYCONTEXTPRIORITY;
+
+
+#endif /* COMMON_RGXRAY_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxray_bridge/server_rgxray_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxray_bridge/server_rgxray_bridge.c
new file mode 100644
index 0000000..2bfe1c6
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxray_bridge/server_rgxray_bridge.c
@@ -0,0 +1,1932 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for rgxray
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxray
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxray.h"
+#include "devicemem_server.h"
+
+
+#include "common_rgxray_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+#include "rgx_bvnc_defs_km.h"
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRGXCreateRPMFreeList(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCREATERPMFREELIST *psRGXCreateRPMFreeListIN,
+					  PVRSRV_BRIDGE_OUT_RGXCREATERPMFREELIST *psRGXCreateRPMFreeListOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hRPMContext = psRGXCreateRPMFreeListIN->hRPMContext;
+	RGX_SERVER_RPM_CONTEXT * psRPMContextInt = NULL;
+	RGX_RPM_FREELIST * psCleanupCookieInt = NULL;
+
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_BIT_MASK))
+		{
+			psRGXCreateRPMFreeListOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXCreateRPMFreeList_exit;
+		}
+	}
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateRPMFreeListOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psRPMContextInt,
+											hRPMContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_RPM_CONTEXT,
+											IMG_TRUE);
+					if(psRGXCreateRPMFreeListOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXCreateRPMFreeList_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXCreateRPMFreeListOUT->eError =
+		RGXCreateRPMFreeList(psConnection, OSGetDevData(psConnection),
+					psRPMContextInt,
+					psRGXCreateRPMFreeListIN->ui32InitFLPages,
+					psRGXCreateRPMFreeListIN->ui32GrowFLPages,
+					psRGXCreateRPMFreeListIN->sFreeListDevVAddr,
+					&psCleanupCookieInt,
+					&psRGXCreateRPMFreeListOUT->ui32HWFreeList,
+					psRGXCreateRPMFreeListIN->bIsExternal);
+	/* Exit early if bridged call fails */
+	if(psRGXCreateRPMFreeListOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateRPMFreeList_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psRGXCreateRPMFreeListOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psRGXCreateRPMFreeListOUT->hCleanupCookie,
+							(void *) psCleanupCookieInt,
+							PVRSRV_HANDLE_TYPE_RGX_RPM_FREELIST,
+							PVRSRV_HANDLE_ALLOC_FLAG_NONE
+							,(PFN_HANDLE_RELEASE)&RGXDestroyRPMFreeList);
+	if (psRGXCreateRPMFreeListOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto RGXCreateRPMFreeList_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+RGXCreateRPMFreeList_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psRPMContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hRPMContext,
+										PVRSRV_HANDLE_TYPE_RGX_SERVER_RPM_CONTEXT);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psRGXCreateRPMFreeListOUT->eError != PVRSRV_OK)
+	{
+		if (psCleanupCookieInt)
+		{
+			RGXDestroyRPMFreeList(psCleanupCookieInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRPMFreeList(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDESTROYRPMFREELIST *psRGXDestroyRPMFreeListIN,
+					  PVRSRV_BRIDGE_OUT_RGXDESTROYRPMFREELIST *psRGXDestroyRPMFreeListOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_BIT_MASK))
+		{
+			psRGXDestroyRPMFreeListOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXDestroyRPMFreeList_exit;
+		}
+	}
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psRGXDestroyRPMFreeListOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyRPMFreeListIN->hCleanupCookie,
+					PVRSRV_HANDLE_TYPE_RGX_RPM_FREELIST);
+	if ((psRGXDestroyRPMFreeListOUT->eError != PVRSRV_OK) &&
+	    (psRGXDestroyRPMFreeListOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeRGXDestroyRPMFreeList: %s",
+		        PVRSRVGetErrorStringKM(psRGXDestroyRPMFreeListOUT->eError)));
+		UnlockHandle();
+		goto RGXDestroyRPMFreeList_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+RGXDestroyRPMFreeList_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRPMContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCREATERPMCONTEXT *psRGXCreateRPMContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXCREATERPMCONTEXT *psRGXCreateRPMContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	RGX_SERVER_RPM_CONTEXT * psCleanupCookieInt = NULL;
+	IMG_HANDLE hSceneHeap = psRGXCreateRPMContextIN->hSceneHeap;
+	DEVMEMINT_HEAP * psSceneHeapInt = NULL;
+	IMG_HANDLE hRPMPageTableHeap = psRGXCreateRPMContextIN->hRPMPageTableHeap;
+	DEVMEMINT_HEAP * psRPMPageTableHeapInt = NULL;
+	DEVMEM_MEMDESC * psHWMemDescInt = NULL;
+
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_BIT_MASK))
+		{
+			psRGXCreateRPMContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXCreateRPMContext_exit;
+		}
+	}
+
+
+
+	psRGXCreateRPMContextOUT->hCleanupCookie = NULL;
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateRPMContextOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psSceneHeapInt,
+											hSceneHeap,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+											IMG_TRUE);
+					if(psRGXCreateRPMContextOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXCreateRPMContext_exit;
+					}
+				}
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateRPMContextOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psRPMPageTableHeapInt,
+											hRPMPageTableHeap,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+											IMG_TRUE);
+					if(psRGXCreateRPMContextOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXCreateRPMContext_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXCreateRPMContextOUT->eError =
+		RGXCreateRPMContext(psConnection, OSGetDevData(psConnection),
+					&psCleanupCookieInt,
+					psRGXCreateRPMContextIN->ui32TotalRPMPages,
+					psRGXCreateRPMContextIN->ui32Log2DopplerPageSize,
+					psRGXCreateRPMContextIN->sSceneMemoryBaseAddr,
+					psRGXCreateRPMContextIN->sDopplerHeapBaseAddr,
+					psSceneHeapInt,
+					psRGXCreateRPMContextIN->sRPMPageTableBaseAddr,
+					psRPMPageTableHeapInt,
+					&psHWMemDescInt,
+					&psRGXCreateRPMContextOUT->ui32HWFrameData);
+	/* Exit early if bridged call fails */
+	if(psRGXCreateRPMContextOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateRPMContext_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psRGXCreateRPMContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psRGXCreateRPMContextOUT->hCleanupCookie,
+							(void *) psCleanupCookieInt,
+							PVRSRV_HANDLE_TYPE_RGX_SERVER_RPM_CONTEXT,
+							PVRSRV_HANDLE_ALLOC_FLAG_NONE
+							,(PFN_HANDLE_RELEASE)&RGXDestroyRPMContext);
+	if (psRGXCreateRPMContextOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto RGXCreateRPMContext_exit;
+	}
+
+
+
+
+
+
+	psRGXCreateRPMContextOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+							&psRGXCreateRPMContextOUT->hHWMemDesc,
+							(void *) psHWMemDescInt,
+							PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+							PVRSRV_HANDLE_ALLOC_FLAG_NONE
+							,psRGXCreateRPMContextOUT->hCleanupCookie);
+	if (psRGXCreateRPMContextOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto RGXCreateRPMContext_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+RGXCreateRPMContext_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psSceneHeapInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hSceneHeap,
+										PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+					}
+				}
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psRPMPageTableHeapInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hRPMPageTableHeap,
+										PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psRGXCreateRPMContextOUT->eError != PVRSRV_OK)
+	{
+		/* Lock over handle creation cleanup. */
+		LockHandle();
+		if (psRGXCreateRPMContextOUT->hCleanupCookie)
+		{
+
+
+			PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+						(IMG_HANDLE) psRGXCreateRPMContextOUT->hCleanupCookie,
+						PVRSRV_HANDLE_TYPE_RGX_SERVER_RPM_CONTEXT);
+			if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+				        "PVRSRVBridgeRGXCreateRPMContext: %s",
+				        PVRSRVGetErrorStringKM(eError)));
+			}
+			/* Releasing the handle should free/destroy/release the resource.
+			 * This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psCleanupCookieInt = NULL;
+		}
+
+
+		/* Release now we have cleaned up creation handles. */
+		UnlockHandle();
+		if (psCleanupCookieInt)
+		{
+			RGXDestroyRPMContext(psCleanupCookieInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRPMContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDESTROYRPMCONTEXT *psRGXDestroyRPMContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXDESTROYRPMCONTEXT *psRGXDestroyRPMContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_BIT_MASK))
+		{
+			psRGXDestroyRPMContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXDestroyRPMContext_exit;
+		}
+	}
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psRGXDestroyRPMContextOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyRPMContextIN->hCleanupCookie,
+					PVRSRV_HANDLE_TYPE_RGX_SERVER_RPM_CONTEXT);
+	if ((psRGXDestroyRPMContextOUT->eError != PVRSRV_OK) &&
+	    (psRGXDestroyRPMContextOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeRGXDestroyRPMContext: %s",
+		        PVRSRVGetErrorStringKM(psRGXDestroyRPMContextOUT->eError)));
+		UnlockHandle();
+		goto RGXDestroyRPMContext_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+RGXDestroyRPMContext_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXKickRS(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXKICKRS *psRGXKickRSIN,
+					  PVRSRV_BRIDGE_OUT_RGXKICKRS *psRGXKickRSOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hRayContext = psRGXKickRSIN->hRayContext;
+	RGX_SERVER_RAY_CONTEXT * psRayContextInt = NULL;
+	SYNC_PRIMITIVE_BLOCK * *psClientFenceUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClientFenceUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32ClientFenceSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32ClientFenceValueInt = NULL;
+	SYNC_PRIMITIVE_BLOCK * *psClientUpdateUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32ClientUpdateSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32ClientUpdateValueInt = NULL;
+	IMG_UINT32 *ui32ServerSyncFlagsInt = NULL;
+	SERVER_SYNC_PRIMITIVE * *psServerSyncsInt = NULL;
+	IMG_HANDLE *hServerSyncsInt2 = NULL;
+	IMG_CHAR *uiUpdateFenceNameInt = NULL;
+	IMG_BYTE *psDMCmdInt = NULL;
+	IMG_BYTE *psFCDMCmdInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psRGXKickRSIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+			(psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) +
+			(psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+			(psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+			(psRGXKickRSIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+			(psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+			(psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+			(psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+			(psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+			(psRGXKickRSIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+			(psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+			(32 * sizeof(IMG_CHAR)) +
+			(psRGXKickRSIN->ui32CmdSize * sizeof(IMG_BYTE)) +
+			(psRGXKickRSIN->ui32FCCmdSize * sizeof(IMG_BYTE)) +
+			0;
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_BIT_MASK))
+		{
+			psRGXKickRSOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXKickRS_exit;
+		}
+	}
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXKickRSIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXKickRSIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psRGXKickRSOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXKickRS_exit;
+			}
+		}
+	}
+
+	if (psRGXKickRSIN->ui32ClientFenceCount != 0)
+	{
+		psClientFenceUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickRSIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClientFenceUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hClientFenceUFOSyncPrimBlockInt2, psRGXKickRSIN->phClientFenceUFOSyncPrimBlock, psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickRS_exit;
+				}
+			}
+	if (psRGXKickRSIN->ui32ClientFenceCount != 0)
+	{
+		ui32ClientFenceSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ClientFenceSyncOffsetInt, psRGXKickRSIN->pui32ClientFenceSyncOffset, psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickRS_exit;
+				}
+			}
+	if (psRGXKickRSIN->ui32ClientFenceCount != 0)
+	{
+		ui32ClientFenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ClientFenceValueInt, psRGXKickRSIN->pui32ClientFenceValue, psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickRS_exit;
+				}
+			}
+	if (psRGXKickRSIN->ui32ClientUpdateCount != 0)
+	{
+		psClientUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickRSIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClientUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hClientUpdateUFOSyncPrimBlockInt2, psRGXKickRSIN->phClientUpdateUFOSyncPrimBlock, psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickRS_exit;
+				}
+			}
+	if (psRGXKickRSIN->ui32ClientUpdateCount != 0)
+	{
+		ui32ClientUpdateSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ClientUpdateSyncOffsetInt, psRGXKickRSIN->pui32ClientUpdateSyncOffset, psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickRS_exit;
+				}
+			}
+	if (psRGXKickRSIN->ui32ClientUpdateCount != 0)
+	{
+		ui32ClientUpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ClientUpdateValueInt, psRGXKickRSIN->pui32ClientUpdateValue, psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickRS_exit;
+				}
+			}
+	if (psRGXKickRSIN->ui32ServerSyncCount != 0)
+	{
+		ui32ServerSyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ServerSyncFlagsInt, psRGXKickRSIN->pui32ServerSyncFlags, psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickRS_exit;
+				}
+			}
+	if (psRGXKickRSIN->ui32ServerSyncCount != 0)
+	{
+		psServerSyncsInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickRSIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+		hServerSyncsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hServerSyncsInt2, psRGXKickRSIN->phServerSyncs, psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickRS_exit;
+				}
+			}
+	
+	{
+		uiUpdateFenceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += 32 * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (32 * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiUpdateFenceNameInt, psRGXKickRSIN->puiUpdateFenceName, 32 * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickRS_exit;
+				}
+			}
+	if (psRGXKickRSIN->ui32CmdSize != 0)
+	{
+		psDMCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickRSIN->ui32CmdSize * sizeof(IMG_BYTE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickRSIN->ui32CmdSize * sizeof(IMG_BYTE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, psDMCmdInt, psRGXKickRSIN->psDMCmd, psRGXKickRSIN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+				{
+					psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickRS_exit;
+				}
+			}
+	if (psRGXKickRSIN->ui32FCCmdSize != 0)
+	{
+		psFCDMCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickRSIN->ui32FCCmdSize * sizeof(IMG_BYTE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickRSIN->ui32FCCmdSize * sizeof(IMG_BYTE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, psFCDMCmdInt, psRGXKickRSIN->psFCDMCmd, psRGXKickRSIN->ui32FCCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+				{
+					psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickRS_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXKickRSOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psRayContextInt,
+											hRayContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+											IMG_TRUE);
+					if(psRGXKickRSOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickRS_exit;
+					}
+				}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickRSIN->ui32ClientFenceCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickRSOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psClientFenceUFOSyncPrimBlockInt[i],
+											hClientFenceUFOSyncPrimBlockInt2[i],
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psRGXKickRSOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickRS_exit;
+					}
+				}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickRSIN->ui32ClientUpdateCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickRSOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psClientUpdateUFOSyncPrimBlockInt[i],
+											hClientUpdateUFOSyncPrimBlockInt2[i],
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psRGXKickRSOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickRS_exit;
+					}
+				}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickRSIN->ui32ServerSyncCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickRSOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psServerSyncsInt[i],
+											hServerSyncsInt2[i],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+											IMG_TRUE);
+					if(psRGXKickRSOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickRS_exit;
+					}
+				}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXKickRSOUT->eError =
+		PVRSRVRGXKickRSKM(
+					psRayContextInt,
+					psRGXKickRSIN->ui32ClientCacheOpSeqNum,
+					psRGXKickRSIN->ui32ClientFenceCount,
+					psClientFenceUFOSyncPrimBlockInt,
+					ui32ClientFenceSyncOffsetInt,
+					ui32ClientFenceValueInt,
+					psRGXKickRSIN->ui32ClientUpdateCount,
+					psClientUpdateUFOSyncPrimBlockInt,
+					ui32ClientUpdateSyncOffsetInt,
+					ui32ClientUpdateValueInt,
+					psRGXKickRSIN->ui32ServerSyncCount,
+					ui32ServerSyncFlagsInt,
+					psServerSyncsInt,
+					psRGXKickRSIN->hCheckFenceFD,
+					psRGXKickRSIN->hUpdateTimeline,
+					&psRGXKickRSOUT->hUpdateFence,
+					uiUpdateFenceNameInt,
+					psRGXKickRSIN->ui32CmdSize,
+					psDMCmdInt,
+					psRGXKickRSIN->ui32FCCmdSize,
+					psFCDMCmdInt,
+					psRGXKickRSIN->ui32FrameContext,
+					psRGXKickRSIN->ui32PDumpFlags,
+					psRGXKickRSIN->ui32ExtJobRef,
+					psRGXKickRSIN->ssRobustnessResetReason);
+
+
+
+
+RGXKickRS_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psRayContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hRayContext,
+										PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT);
+					}
+				}
+
+
+
+
+
+
+	if (hClientFenceUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickRSIN->ui32ClientFenceCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hClientFenceUFOSyncPrimBlockInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hClientFenceUFOSyncPrimBlockInt2[i],
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+		}
+	}
+
+
+
+
+
+
+	if (hClientUpdateUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickRSIN->ui32ClientUpdateCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hClientUpdateUFOSyncPrimBlockInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hClientUpdateUFOSyncPrimBlockInt2[i],
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+		}
+	}
+
+
+
+
+
+
+	if (hServerSyncsInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickRSIN->ui32ServerSyncCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hServerSyncsInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hServerSyncsInt2[i],
+										PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					}
+				}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXKickVRDM(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXKICKVRDM *psRGXKickVRDMIN,
+					  PVRSRV_BRIDGE_OUT_RGXKICKVRDM *psRGXKickVRDMOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hRayContext = psRGXKickVRDMIN->hRayContext;
+	RGX_SERVER_RAY_CONTEXT * psRayContextInt = NULL;
+	SYNC_PRIMITIVE_BLOCK * *psClientFenceUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClientFenceUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32ClientFenceSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32ClientFenceValueInt = NULL;
+	SYNC_PRIMITIVE_BLOCK * *psClientUpdateUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32ClientUpdateSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32ClientUpdateValueInt = NULL;
+	IMG_UINT32 *ui32ServerSyncFlagsInt = NULL;
+	SERVER_SYNC_PRIMITIVE * *psServerSyncsInt = NULL;
+	IMG_HANDLE *hServerSyncsInt2 = NULL;
+	IMG_CHAR *uiUpdateFenceNameInt = NULL;
+	IMG_BYTE *psDMCmdInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+			(psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) +
+			(psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+			(psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+			(psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+			(psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+			(psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+			(psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+			(psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+			(psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+			(psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+			(32 * sizeof(IMG_CHAR)) +
+			(psRGXKickVRDMIN->ui32CmdSize * sizeof(IMG_BYTE)) +
+			0;
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_BIT_MASK))
+		{
+			psRGXKickVRDMOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXKickVRDM_exit;
+		}
+	}
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXKickVRDMIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXKickVRDMIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psRGXKickVRDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXKickVRDM_exit;
+			}
+		}
+	}
+
+	if (psRGXKickVRDMIN->ui32ClientFenceCount != 0)
+	{
+		psClientFenceUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClientFenceUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hClientFenceUFOSyncPrimBlockInt2, psRGXKickVRDMIN->phClientFenceUFOSyncPrimBlock, psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickVRDM_exit;
+				}
+			}
+	if (psRGXKickVRDMIN->ui32ClientFenceCount != 0)
+	{
+		ui32ClientFenceSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ClientFenceSyncOffsetInt, psRGXKickVRDMIN->pui32ClientFenceSyncOffset, psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickVRDM_exit;
+				}
+			}
+	if (psRGXKickVRDMIN->ui32ClientFenceCount != 0)
+	{
+		ui32ClientFenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ClientFenceValueInt, psRGXKickVRDMIN->pui32ClientFenceValue, psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickVRDM_exit;
+				}
+			}
+	if (psRGXKickVRDMIN->ui32ClientUpdateCount != 0)
+	{
+		psClientUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClientUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hClientUpdateUFOSyncPrimBlockInt2, psRGXKickVRDMIN->phClientUpdateUFOSyncPrimBlock, psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickVRDM_exit;
+				}
+			}
+	if (psRGXKickVRDMIN->ui32ClientUpdateCount != 0)
+	{
+		ui32ClientUpdateSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ClientUpdateSyncOffsetInt, psRGXKickVRDMIN->pui32ClientUpdateSyncOffset, psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickVRDM_exit;
+				}
+			}
+	if (psRGXKickVRDMIN->ui32ClientUpdateCount != 0)
+	{
+		ui32ClientUpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ClientUpdateValueInt, psRGXKickVRDMIN->pui32ClientUpdateValue, psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickVRDM_exit;
+				}
+			}
+	if (psRGXKickVRDMIN->ui32ServerSyncCount != 0)
+	{
+		ui32ServerSyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ServerSyncFlagsInt, psRGXKickVRDMIN->pui32ServerSyncFlags, psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickVRDM_exit;
+				}
+			}
+	if (psRGXKickVRDMIN->ui32ServerSyncCount != 0)
+	{
+		psServerSyncsInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+		hServerSyncsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hServerSyncsInt2, psRGXKickVRDMIN->phServerSyncs, psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickVRDM_exit;
+				}
+			}
+	
+	{
+		uiUpdateFenceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += 32 * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (32 * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiUpdateFenceNameInt, psRGXKickVRDMIN->puiUpdateFenceName, 32 * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickVRDM_exit;
+				}
+			}
+	if (psRGXKickVRDMIN->ui32CmdSize != 0)
+	{
+		psDMCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickVRDMIN->ui32CmdSize * sizeof(IMG_BYTE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickVRDMIN->ui32CmdSize * sizeof(IMG_BYTE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, psDMCmdInt, psRGXKickVRDMIN->psDMCmd, psRGXKickVRDMIN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+				{
+					psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickVRDM_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXKickVRDMOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psRayContextInt,
+											hRayContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+											IMG_TRUE);
+					if(psRGXKickVRDMOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickVRDM_exit;
+					}
+				}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickVRDMIN->ui32ClientFenceCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickVRDMOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psClientFenceUFOSyncPrimBlockInt[i],
+											hClientFenceUFOSyncPrimBlockInt2[i],
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psRGXKickVRDMOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickVRDM_exit;
+					}
+				}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickVRDMIN->ui32ClientUpdateCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickVRDMOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psClientUpdateUFOSyncPrimBlockInt[i],
+											hClientUpdateUFOSyncPrimBlockInt2[i],
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psRGXKickVRDMOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickVRDM_exit;
+					}
+				}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickVRDMIN->ui32ServerSyncCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickVRDMOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psServerSyncsInt[i],
+											hServerSyncsInt2[i],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+											IMG_TRUE);
+					if(psRGXKickVRDMOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickVRDM_exit;
+					}
+				}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXKickVRDMOUT->eError =
+		PVRSRVRGXKickVRDMKM(
+					psRayContextInt,
+					psRGXKickVRDMIN->ui32ClientCacheOpSeqNum,
+					psRGXKickVRDMIN->ui32ClientFenceCount,
+					psClientFenceUFOSyncPrimBlockInt,
+					ui32ClientFenceSyncOffsetInt,
+					ui32ClientFenceValueInt,
+					psRGXKickVRDMIN->ui32ClientUpdateCount,
+					psClientUpdateUFOSyncPrimBlockInt,
+					ui32ClientUpdateSyncOffsetInt,
+					ui32ClientUpdateValueInt,
+					psRGXKickVRDMIN->ui32ServerSyncCount,
+					ui32ServerSyncFlagsInt,
+					psServerSyncsInt,
+					psRGXKickVRDMIN->hCheckFenceFD,
+					psRGXKickVRDMIN->hUpdateTimeline,
+					&psRGXKickVRDMOUT->hUpdateFence,
+					uiUpdateFenceNameInt,
+					psRGXKickVRDMIN->ui32CmdSize,
+					psDMCmdInt,
+					psRGXKickVRDMIN->ui32PDumpFlags,
+					psRGXKickVRDMIN->ui32ExtJobRef,
+					psRGXKickVRDMIN->ssRobustnessResetReason);
+
+
+
+
+RGXKickVRDM_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psRayContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hRayContext,
+										PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT);
+					}
+				}
+
+
+
+
+
+
+	if (hClientFenceUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickVRDMIN->ui32ClientFenceCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hClientFenceUFOSyncPrimBlockInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hClientFenceUFOSyncPrimBlockInt2[i],
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+		}
+	}
+
+
+
+
+
+
+	if (hClientUpdateUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickVRDMIN->ui32ClientUpdateCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hClientUpdateUFOSyncPrimBlockInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hClientUpdateUFOSyncPrimBlockInt2[i],
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+		}
+	}
+
+
+
+
+
+
+	if (hServerSyncsInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickVRDMIN->ui32ServerSyncCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hServerSyncsInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hServerSyncsInt2[i],
+										PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					}
+				}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRayContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCREATERAYCONTEXT *psRGXCreateRayContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXCREATERAYCONTEXT *psRGXCreateRayContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_BYTE *psFrameworkCmdInt = NULL;
+	IMG_HANDLE hPrivData = psRGXCreateRayContextIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+	RGX_SERVER_RAY_CONTEXT * psRayContextInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psRGXCreateRayContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) +
+			0;
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_BIT_MASK))
+		{
+			psRGXCreateRayContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXCreateRayContext_exit;
+		}
+	}
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCreateRayContextIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXCreateRayContextIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psRGXCreateRayContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXCreateRayContext_exit;
+			}
+		}
+	}
+
+	if (psRGXCreateRayContextIN->ui32FrameworkCmdSize != 0)
+	{
+		psFrameworkCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXCreateRayContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE);
+	}
+
+			/* Copy the data over */
+			if (psRGXCreateRayContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, psFrameworkCmdInt, psRGXCreateRayContextIN->psFrameworkCmd, psRGXCreateRayContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+				{
+					psRGXCreateRayContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXCreateRayContext_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateRayContextOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &hPrivDataInt,
+											hPrivData,
+											PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+											IMG_TRUE);
+					if(psRGXCreateRayContextOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXCreateRayContext_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXCreateRayContextOUT->eError =
+		PVRSRVRGXCreateRayContextKM(psConnection, OSGetDevData(psConnection),
+					psRGXCreateRayContextIN->ui32Priority,
+					psRGXCreateRayContextIN->sVRMCallStackAddr,
+					psRGXCreateRayContextIN->ui32FrameworkCmdSize,
+					psFrameworkCmdInt,
+					hPrivDataInt,
+					&psRayContextInt);
+	/* Exit early if bridged call fails */
+	if(psRGXCreateRayContextOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateRayContext_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psRGXCreateRayContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psRGXCreateRayContextOUT->hRayContext,
+							(void *) psRayContextInt,
+							PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PVRSRVRGXDestroyRayContextKM);
+	if (psRGXCreateRayContextOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto RGXCreateRayContext_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+RGXCreateRayContext_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(hPrivDataInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPrivData,
+										PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psRGXCreateRayContextOUT->eError != PVRSRV_OK)
+	{
+		if (psRayContextInt)
+		{
+			PVRSRVRGXDestroyRayContextKM(psRayContextInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRayContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDESTROYRAYCONTEXT *psRGXDestroyRayContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXDESTROYRAYCONTEXT *psRGXDestroyRayContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_BIT_MASK))
+		{
+			psRGXDestroyRayContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXDestroyRayContext_exit;
+		}
+	}
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psRGXDestroyRayContextOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyRayContextIN->hRayContext,
+					PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT);
+	if ((psRGXDestroyRayContextOUT->eError != PVRSRV_OK) &&
+	    (psRGXDestroyRayContextOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeRGXDestroyRayContext: %s",
+		        PVRSRVGetErrorStringKM(psRGXDestroyRayContextOUT->eError)));
+		UnlockHandle();
+		goto RGXDestroyRayContext_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+RGXDestroyRayContext_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXSetRayContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXSETRAYCONTEXTPRIORITY *psRGXSetRayContextPriorityIN,
+					  PVRSRV_BRIDGE_OUT_RGXSETRAYCONTEXTPRIORITY *psRGXSetRayContextPriorityOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hRayContext = psRGXSetRayContextPriorityIN->hRayContext;
+	RGX_SERVER_RAY_CONTEXT * psRayContextInt = NULL;
+
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_BIT_MASK))
+		{
+			psRGXSetRayContextPriorityOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXSetRayContextPriority_exit;
+		}
+	}
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXSetRayContextPriorityOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psRayContextInt,
+											hRayContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+											IMG_TRUE);
+					if(psRGXSetRayContextPriorityOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXSetRayContextPriority_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXSetRayContextPriorityOUT->eError =
+		PVRSRVRGXSetRayContextPriorityKM(psConnection, OSGetDevData(psConnection),
+					psRayContextInt,
+					psRGXSetRayContextPriorityIN->ui32Priority);
+
+
+
+
+RGXSetRayContextPriority_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psRayContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hRayContext,
+										PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXRAYBridge(void);
+PVRSRV_ERROR DeinitRGXRAYBridge(void);
+
+/*
+ * Register all RGXRAY functions with services
+ */
+PVRSRV_ERROR InitRGXRAYBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXCREATERPMFREELIST, PVRSRVBridgeRGXCreateRPMFreeList,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRPMFREELIST, PVRSRVBridgeRGXDestroyRPMFreeList,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXCREATERPMCONTEXT, PVRSRVBridgeRGXCreateRPMContext,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRPMCONTEXT, PVRSRVBridgeRGXDestroyRPMContext,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXKICKRS, PVRSRVBridgeRGXKickRS,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXKICKVRDM, PVRSRVBridgeRGXKickVRDM,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXCREATERAYCONTEXT, PVRSRVBridgeRGXCreateRayContext,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRAYCONTEXT, PVRSRVBridgeRGXDestroyRayContext,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXSETRAYCONTEXTPRIORITY, PVRSRVBridgeRGXSetRayContextPriority,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxray functions with services
+ */
+PVRSRV_ERROR DeinitRGXRAYBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxsignals_bridge/common_rgxsignals_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxsignals_bridge/common_rgxsignals_bridge.h
new file mode 100644
index 0000000..18d747e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxsignals_bridge/common_rgxsignals_bridge.h
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for rgxsignals
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxsignals
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXSIGNALS_BRIDGE_H
+#define COMMON_RGXSIGNALS_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+
+#define PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE			PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXSIGNALS_CMD_LAST			(PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST+0)
+
+
+/*******************************************
+            RGXNotifySignalUpdate          
+ *******************************************/
+
+/* Bridge in structure for RGXNotifySignalUpdate */
+typedef struct PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE_TAG
+{
+	IMG_HANDLE hPrivData;
+	IMG_DEV_VIRTADDR sDevSignalAddress;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE;
+
+/* Bridge out structure for RGXNotifySignalUpdate */
+typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE;
+
+
+#endif /* COMMON_RGXSIGNALS_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxsignals_bridge/server_rgxsignals_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxsignals_bridge/server_rgxsignals_bridge.c
new file mode 100644
index 0000000..af6989e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxsignals_bridge/server_rgxsignals_bridge.c
@@ -0,0 +1,190 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for rgxsignals
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxsignals
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxsignals.h"
+
+
+#include "common_rgxsignals_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+#include "rgx_bvnc_defs_km.h"
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRGXNotifySignalUpdate(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE *psRGXNotifySignalUpdateIN,
+					  PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE *psRGXNotifySignalUpdateOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPrivData = psRGXNotifySignalUpdateIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK))
+		{
+			psRGXNotifySignalUpdateOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXNotifySignalUpdate_exit;
+		}
+	}
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXNotifySignalUpdateOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &hPrivDataInt,
+											hPrivData,
+											PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+											IMG_TRUE);
+					if(psRGXNotifySignalUpdateOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXNotifySignalUpdate_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXNotifySignalUpdateOUT->eError =
+		PVRSRVRGXNotifySignalUpdateKM(psConnection, OSGetDevData(psConnection),
+					hPrivDataInt,
+					psRGXNotifySignalUpdateIN->sDevSignalAddress);
+
+
+
+
+RGXNotifySignalUpdate_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(hPrivDataInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPrivData,
+										PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXSIGNALSBridge(void);
+PVRSRV_ERROR DeinitRGXSIGNALSBridge(void);
+
+/*
+ * Register all RGXSIGNALS functions with services
+ */
+PVRSRV_ERROR InitRGXSIGNALSBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXSIGNALS, PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE, PVRSRVBridgeRGXNotifySignalUpdate,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxsignals functions with services
+ */
+PVRSRV_ERROR DeinitRGXSIGNALSBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxta3d_bridge/common_rgxta3d_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxta3d_bridge/common_rgxta3d_bridge.h
new file mode 100644
index 0000000..597e0f4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxta3d_bridge/common_rgxta3d_bridge.h
@@ -0,0 +1,497 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for rgxta3d
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxta3d
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXTA3D_BRIDGE_H
+#define COMMON_RGXTA3D_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+#include "rgx_fwif_shared.h"
+#include "pvrsrv_sync_km.h"
+
+
+#define PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATA			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATA			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERTARGET			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERTARGET			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+9
+#define PVRSRV_BRIDGE_RGXTA3D_RGXADDBLOCKTOFREELIST			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+10
+#define PVRSRV_BRIDGE_RGXTA3D_RGXREMOVEBLOCKFROMFREELIST			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+11
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+12
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13
+#define PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+14
+#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+15
+#define PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+16
+#define PVRSRV_BRIDGE_RGXTA3D_RGXGETPARTIALRENDERCOUNT			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+17
+#define PVRSRV_BRIDGE_RGXTA3D_CMD_LAST			(PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+17)
+
+
+/*******************************************
+            RGXCreateHWRTData          
+ *******************************************/
+
+/* Bridge in structure for RGXCreateHWRTData */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATA_TAG
+{
+	IMG_UINT32 ui32RenderTarget;
+	IMG_DEV_VIRTADDR sPMMlistDevVAddr;
+	IMG_HANDLE * phapsFreeLists;
+	IMG_UINT32 ui32PPPScreen;
+	IMG_UINT32 ui32PPPGridOffset;
+	IMG_UINT64 ui64PPPMultiSampleCtl;
+	IMG_UINT32 ui32TPCStride;
+	IMG_DEV_VIRTADDR sTailPtrsDevVAddr;
+	IMG_UINT32 ui32TPCSize;
+	IMG_UINT32 ui32TEScreen;
+	IMG_UINT32 ui32TEAA;
+	IMG_UINT32 ui32TEMTILE1;
+	IMG_UINT32 ui32TEMTILE2;
+	IMG_UINT32 ui32MTileStride;
+	IMG_UINT32 ui32ui32ISPMergeLowerX;
+	IMG_UINT32 ui32ui32ISPMergeLowerY;
+	IMG_UINT32 ui32ui32ISPMergeUpperX;
+	IMG_UINT32 ui32ui32ISPMergeUpperY;
+	IMG_UINT32 ui32ui32ISPMergeScaleX;
+	IMG_UINT32 ui32ui32ISPMergeScaleY;
+	IMG_UINT16 ui16MaxRTs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATA;
+
+/* Bridge out structure for RGXCreateHWRTData */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATA_TAG
+{
+	IMG_HANDLE hCleanupCookie;
+	IMG_HANDLE hRTACtlMemDesc;
+	IMG_HANDLE hsHWRTDataMemDesc;
+	IMG_UINT32 ui32FWHWRTData;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATA;
+
+
+/*******************************************
+            RGXDestroyHWRTData          
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyHWRTData */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATA_TAG
+{
+	IMG_HANDLE hCleanupCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATA;
+
+/* Bridge out structure for RGXDestroyHWRTData */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATA_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATA;
+
+
+/*******************************************
+            RGXCreateRenderTarget          
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRenderTarget */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERTARGET_TAG
+{
+	IMG_DEV_VIRTADDR spsVHeapTableDevVAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATERENDERTARGET;
+
+/* Bridge out structure for RGXCreateRenderTarget */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERTARGET_TAG
+{
+	IMG_HANDLE hsRenderTargetMemDesc;
+	IMG_UINT32 ui32sRenderTargetFWDevVAddr;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERENDERTARGET;
+
+
+/*******************************************
+            RGXDestroyRenderTarget          
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRenderTarget */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRENDERTARGET_TAG
+{
+	IMG_HANDLE hsRenderTargetMemDesc;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRENDERTARGET;
+
+/* Bridge out structure for RGXDestroyRenderTarget */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERTARGET_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERTARGET;
+
+
+/*******************************************
+            RGXCreateZSBuffer          
+ *******************************************/
+
+/* Bridge in structure for RGXCreateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER_TAG
+{
+	IMG_HANDLE hReservation;
+	IMG_HANDLE hPMR;
+	PVRSRV_MEMALLOCFLAGS_T uiMapFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER;
+
+/* Bridge out structure for RGXCreateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER_TAG
+{
+	IMG_HANDLE hsZSBufferKM;
+	IMG_UINT32 ui32sZSBufferFWDevVAddr;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER;
+
+
+/*******************************************
+            RGXDestroyZSBuffer          
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER_TAG
+{
+	IMG_HANDLE hsZSBufferMemDesc;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER;
+
+/* Bridge out structure for RGXDestroyZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER;
+
+
+/*******************************************
+            RGXPopulateZSBuffer          
+ *******************************************/
+
+/* Bridge in structure for RGXPopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER_TAG
+{
+	IMG_HANDLE hsZSBufferKM;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER;
+
+/* Bridge out structure for RGXPopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER_TAG
+{
+	IMG_HANDLE hsPopulation;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER;
+
+
+/*******************************************
+            RGXUnpopulateZSBuffer          
+ *******************************************/
+
+/* Bridge in structure for RGXUnpopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER_TAG
+{
+	IMG_HANDLE hsPopulation;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER;
+
+/* Bridge out structure for RGXUnpopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER;
+
+
+/*******************************************
+            RGXCreateFreeList          
+ *******************************************/
+
+/* Bridge in structure for RGXCreateFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEFREELIST_TAG
+{
+	IMG_UINT32 ui32ui32MaxFLPages;
+	IMG_UINT32 ui32ui32InitFLPages;
+	IMG_UINT32 ui32ui32GrowFLPages;
+	IMG_UINT32 ui32ui32GrowParamThreshold;
+	IMG_HANDLE hsGlobalFreeList;
+	IMG_BOOL bbFreeListCheck;
+	IMG_DEV_VIRTADDR spsFreeListDevVAddr;
+	IMG_HANDLE hsFreeListPMR;
+	IMG_DEVMEM_OFFSET_T uiPMROffset;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATEFREELIST;
+
+/* Bridge out structure for RGXCreateFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST_TAG
+{
+	IMG_HANDLE hCleanupCookie;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST;
+
+
+/*******************************************
+            RGXDestroyFreeList          
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST_TAG
+{
+	IMG_HANDLE hCleanupCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST;
+
+/* Bridge out structure for RGXDestroyFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST;
+
+
+/*******************************************
+            RGXAddBlockToFreeList          
+ *******************************************/
+
+/* Bridge in structure for RGXAddBlockToFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXADDBLOCKTOFREELIST_TAG
+{
+	IMG_HANDLE hsFreeList;
+	IMG_UINT32 ui3232NumPages;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXADDBLOCKTOFREELIST;
+
+/* Bridge out structure for RGXAddBlockToFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXADDBLOCKTOFREELIST_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXADDBLOCKTOFREELIST;
+
+
+/*******************************************
+            RGXRemoveBlockFromFreeList          
+ *******************************************/
+
+/* Bridge in structure for RGXRemoveBlockFromFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXREMOVEBLOCKFROMFREELIST_TAG
+{
+	IMG_HANDLE hsFreeList;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXREMOVEBLOCKFROMFREELIST;
+
+/* Bridge out structure for RGXRemoveBlockFromFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXREMOVEBLOCKFROMFREELIST_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXREMOVEBLOCKFROMFREELIST;
+
+
+/*******************************************
+            RGXCreateRenderContext          
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRenderContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT_TAG
+{
+	IMG_UINT32 ui32Priority;
+	IMG_DEV_VIRTADDR sVDMCallStackAddr;
+	IMG_UINT32 ui32FrameworkCmdize;
+	IMG_BYTE * psFrameworkCmd;
+	IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT;
+
+/* Bridge out structure for RGXCreateRenderContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT_TAG
+{
+	IMG_HANDLE hRenderContext;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT;
+
+
+/*******************************************
+            RGXDestroyRenderContext          
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRenderContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT_TAG
+{
+	IMG_HANDLE hCleanupCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT;
+
+/* Bridge out structure for RGXDestroyRenderContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT;
+
+
+/*******************************************
+            RGXKickTA3D          
+ *******************************************/
+
+/* Bridge in structure for RGXKickTA3D */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKTA3D_TAG
+{
+	IMG_HANDLE hRenderContext;
+	IMG_UINT32 ui32ClientCacheOpSeqNum;
+	IMG_UINT32 ui32ClientTAFenceCount;
+	IMG_HANDLE * phClientTAFenceSyncPrimBlock;
+	IMG_UINT32 * pui32ClientTAFenceSyncOffset;
+	IMG_UINT32 * pui32ClientTAFenceValue;
+	IMG_UINT32 ui32ClientTAUpdateCount;
+	IMG_HANDLE * phClientTAUpdateSyncPrimBlock;
+	IMG_UINT32 * pui32ClientTAUpdateSyncOffset;
+	IMG_UINT32 * pui32ClientTAUpdateValue;
+	IMG_UINT32 ui32ServerTASyncPrims;
+	IMG_UINT32 * pui32ServerTASyncFlags;
+	IMG_HANDLE * phServerTASyncs;
+	IMG_UINT32 ui32Client3DFenceCount;
+	IMG_HANDLE * phClient3DFenceSyncPrimBlock;
+	IMG_UINT32 * pui32Client3DFenceSyncOffset;
+	IMG_UINT32 * pui32Client3DFenceValue;
+	IMG_UINT32 ui32Client3DUpdateCount;
+	IMG_HANDLE * phClient3DUpdateSyncPrimBlock;
+	IMG_UINT32 * pui32Client3DUpdateSyncOffset;
+	IMG_UINT32 * pui32Client3DUpdateValue;
+	IMG_UINT32 ui32Server3DSyncPrims;
+	IMG_UINT32 * pui32Server3DSyncFlags;
+	IMG_HANDLE * phServer3DSyncs;
+	IMG_HANDLE hPRFenceUFOSyncPrimBlock;
+	IMG_UINT32 ui32FRFenceUFOSyncOffset;
+	IMG_UINT32 ui32FRFenceValue;
+	PVRSRV_FENCE hCheckFenceFD;
+	PVRSRV_TIMELINE hUpdateTimeline;
+	IMG_CHAR * puiUpdateFenceName;
+	IMG_UINT32 ui32TACmdSize;
+	IMG_BYTE * psTACmd;
+	IMG_UINT32 ui323DPRCmdSize;
+	IMG_BYTE * ps3DPRCmd;
+	IMG_UINT32 ui323DCmdSize;
+	IMG_BYTE * ps3DCmd;
+	IMG_UINT32 ui32ExtJobRef;
+	IMG_BOOL bbLastTAInScene;
+	IMG_BOOL bbKickTA;
+	IMG_BOOL bbKickPR;
+	IMG_BOOL bbKick3D;
+	IMG_BOOL bbAbort;
+	IMG_UINT32 ui32PDumpFlags;
+	IMG_HANDLE hRTDataCleanup;
+	IMG_HANDLE hZBuffer;
+	IMG_HANDLE hSBuffer;
+	IMG_HANDLE hMSAAScratchBuffer;
+	IMG_BOOL bbCommitRefCountsTA;
+	IMG_BOOL bbCommitRefCounts3D;
+	IMG_UINT32 ui32SyncPMRCount;
+	IMG_UINT32 * pui32SyncPMRFlags;
+	IMG_HANDLE * phSyncPMRs;
+	IMG_UINT32 ui32RenderTargetSize;
+	IMG_UINT32 ui32NumberOfDrawCalls;
+	IMG_UINT32 ui32NumberOfIndices;
+	IMG_UINT32 ui32NumberOfMRTs;
+	IMG_UINT64 ui64Deadline;
+	IMG_DEV_VIRTADDR ssRobustnessResetReason;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKTA3D;
+
+/* Bridge out structure for RGXKickTA3D */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKTA3D_TAG
+{
+	PVRSRV_FENCE hUpdateFence;
+	IMG_BOOL bbCommittedRefCountsTA;
+	IMG_BOOL bbCommittedRefCounts3D;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKTA3D;
+
+
+/*******************************************
+            RGXSetRenderContextPriority          
+ *******************************************/
+
+/* Bridge in structure for RGXSetRenderContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY_TAG
+{
+	IMG_HANDLE hRenderContext;
+	IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetRenderContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY;
+
+
+/*******************************************
+            RGXGetLastRenderContextResetReason          
+ *******************************************/
+
+/* Bridge in structure for RGXGetLastRenderContextResetReason */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON_TAG
+{
+	IMG_HANDLE hRenderContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON;
+
+/* Bridge out structure for RGXGetLastRenderContextResetReason */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON_TAG
+{
+	IMG_UINT32 ui32LastResetReason;
+	IMG_UINT32 ui32LastResetJobRef;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON;
+
+
+/*******************************************
+            RGXGetPartialRenderCount          
+ *******************************************/
+
+/* Bridge in structure for RGXGetPartialRenderCount */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETPARTIALRENDERCOUNT_TAG
+{
+	IMG_HANDLE hHWRTDataMemDesc;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXGETPARTIALRENDERCOUNT;
+
+/* Bridge out structure for RGXGetPartialRenderCount */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETPARTIALRENDERCOUNT_TAG
+{
+	IMG_UINT32 ui32NumPartialRenders;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXGETPARTIALRENDERCOUNT;
+
+
+#endif /* COMMON_RGXTA3D_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxta3d_bridge/server_rgxta3d_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxta3d_bridge/server_rgxta3d_bridge.c
new file mode 100644
index 0000000..e21df5b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxta3d_bridge/server_rgxta3d_bridge.c
@@ -0,0 +1,2892 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for rgxta3d
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxta3d
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxta3d.h"
+
+
+#include "common_rgxta3d_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRGXCreateHWRTData(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATA *psRGXCreateHWRTDataIN,
+					  PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATA *psRGXCreateHWRTDataOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	RGX_FREELIST * *psapsFreeListsInt = NULL;
+	IMG_HANDLE *hapsFreeListsInt2 = NULL;
+	RGX_RTDATA_CLEANUP_DATA * psCleanupCookieInt = NULL;
+	DEVMEM_MEMDESC * psRTACtlMemDescInt = NULL;
+	DEVMEM_MEMDESC * pssHWRTDataMemDescInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(RGXFW_MAX_FREELISTS * sizeof(RGX_FREELIST *)) +
+			(RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE)) +
+			0;
+
+
+
+
+	psRGXCreateHWRTDataOUT->hCleanupCookie = NULL;
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCreateHWRTDataIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXCreateHWRTDataIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psRGXCreateHWRTDataOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXCreateHWRTData_exit;
+			}
+		}
+	}
+
+	
+	{
+		psapsFreeListsInt = (RGX_FREELIST **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += RGXFW_MAX_FREELISTS * sizeof(RGX_FREELIST *);
+		hapsFreeListsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hapsFreeListsInt2, psRGXCreateHWRTDataIN->phapsFreeLists, RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXCreateHWRTDataOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXCreateHWRTData_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<RGXFW_MAX_FREELISTS;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXCreateHWRTDataOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psapsFreeListsInt[i],
+											hapsFreeListsInt2[i],
+											PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+											IMG_TRUE);
+					if(psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXCreateHWRTData_exit;
+					}
+				}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXCreateHWRTDataOUT->eError =
+		RGXCreateHWRTData(psConnection, OSGetDevData(psConnection),
+					psRGXCreateHWRTDataIN->ui32RenderTarget,
+					psRGXCreateHWRTDataIN->sPMMlistDevVAddr,
+					psapsFreeListsInt,
+					&psCleanupCookieInt,
+					&psRTACtlMemDescInt,
+					psRGXCreateHWRTDataIN->ui32PPPScreen,
+					psRGXCreateHWRTDataIN->ui32PPPGridOffset,
+					psRGXCreateHWRTDataIN->ui64PPPMultiSampleCtl,
+					psRGXCreateHWRTDataIN->ui32TPCStride,
+					psRGXCreateHWRTDataIN->sTailPtrsDevVAddr,
+					psRGXCreateHWRTDataIN->ui32TPCSize,
+					psRGXCreateHWRTDataIN->ui32TEScreen,
+					psRGXCreateHWRTDataIN->ui32TEAA,
+					psRGXCreateHWRTDataIN->ui32TEMTILE1,
+					psRGXCreateHWRTDataIN->ui32TEMTILE2,
+					psRGXCreateHWRTDataIN->ui32MTileStride,
+					psRGXCreateHWRTDataIN->ui32ui32ISPMergeLowerX,
+					psRGXCreateHWRTDataIN->ui32ui32ISPMergeLowerY,
+					psRGXCreateHWRTDataIN->ui32ui32ISPMergeUpperX,
+					psRGXCreateHWRTDataIN->ui32ui32ISPMergeUpperY,
+					psRGXCreateHWRTDataIN->ui32ui32ISPMergeScaleX,
+					psRGXCreateHWRTDataIN->ui32ui32ISPMergeScaleY,
+					psRGXCreateHWRTDataIN->ui16MaxRTs,
+					&pssHWRTDataMemDescInt,
+					&psRGXCreateHWRTDataOUT->ui32FWHWRTData);
+	/* Exit early if bridged call fails */
+	if(psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateHWRTData_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psRGXCreateHWRTDataOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psRGXCreateHWRTDataOUT->hCleanupCookie,
+							(void *) psCleanupCookieInt,
+							PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&RGXDestroyHWRTData);
+	if (psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto RGXCreateHWRTData_exit;
+	}
+
+
+
+
+
+
+	psRGXCreateHWRTDataOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+							&psRGXCreateHWRTDataOUT->hRTACtlMemDesc,
+							(void *) psRTACtlMemDescInt,
+							PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+							PVRSRV_HANDLE_ALLOC_FLAG_NONE
+							,psRGXCreateHWRTDataOUT->hCleanupCookie);
+	if (psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto RGXCreateHWRTData_exit;
+	}
+
+
+
+
+
+
+	psRGXCreateHWRTDataOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+							&psRGXCreateHWRTDataOUT->hsHWRTDataMemDesc,
+							(void *) pssHWRTDataMemDescInt,
+							PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+							PVRSRV_HANDLE_ALLOC_FLAG_NONE
+							,psRGXCreateHWRTDataOUT->hCleanupCookie);
+	if (psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto RGXCreateHWRTData_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+RGXCreateHWRTData_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+
+	if (hapsFreeListsInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<RGXFW_MAX_FREELISTS;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hapsFreeListsInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hapsFreeListsInt2[i],
+										PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+					}
+				}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+	{
+		/* Lock over handle creation cleanup. */
+		LockHandle();
+		if (psRGXCreateHWRTDataOUT->hCleanupCookie)
+		{
+
+
+			PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+						(IMG_HANDLE) psRGXCreateHWRTDataOUT->hCleanupCookie,
+						PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP);
+			if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+				        "PVRSRVBridgeRGXCreateHWRTData: %s",
+				        PVRSRVGetErrorStringKM(eError)));
+			}
+			/* Releasing the handle should free/destroy/release the resource.
+			 * This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psCleanupCookieInt = NULL;
+		}
+
+
+		/* Release now we have cleaned up creation handles. */
+		UnlockHandle();
+		if (psCleanupCookieInt)
+		{
+			RGXDestroyHWRTData(psCleanupCookieInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyHWRTData(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATA *psRGXDestroyHWRTDataIN,
+					  PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATA *psRGXDestroyHWRTDataOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psRGXDestroyHWRTDataOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyHWRTDataIN->hCleanupCookie,
+					PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP);
+	if ((psRGXDestroyHWRTDataOUT->eError != PVRSRV_OK) &&
+	    (psRGXDestroyHWRTDataOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeRGXDestroyHWRTData: %s",
+		        PVRSRVGetErrorStringKM(psRGXDestroyHWRTDataOUT->eError)));
+		UnlockHandle();
+		goto RGXDestroyHWRTData_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+RGXDestroyHWRTData_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRenderTarget(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCREATERENDERTARGET *psRGXCreateRenderTargetIN,
+					  PVRSRV_BRIDGE_OUT_RGXCREATERENDERTARGET *psRGXCreateRenderTargetOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	RGX_RT_CLEANUP_DATA * pssRenderTargetMemDescInt = NULL;
+
+
+
+
+
+
+
+
+	psRGXCreateRenderTargetOUT->eError =
+		RGXCreateRenderTarget(psConnection, OSGetDevData(psConnection),
+					psRGXCreateRenderTargetIN->spsVHeapTableDevVAddr,
+					&pssRenderTargetMemDescInt,
+					&psRGXCreateRenderTargetOUT->ui32sRenderTargetFWDevVAddr);
+	/* Exit early if bridged call fails */
+	if(psRGXCreateRenderTargetOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateRenderTarget_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psRGXCreateRenderTargetOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psRGXCreateRenderTargetOUT->hsRenderTargetMemDesc,
+							(void *) pssRenderTargetMemDescInt,
+							PVRSRV_HANDLE_TYPE_RGX_FWIF_RENDERTARGET,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&RGXDestroyRenderTarget);
+	if (psRGXCreateRenderTargetOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto RGXCreateRenderTarget_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+RGXCreateRenderTarget_exit:
+
+
+
+	if (psRGXCreateRenderTargetOUT->eError != PVRSRV_OK)
+	{
+		if (pssRenderTargetMemDescInt)
+		{
+			RGXDestroyRenderTarget(pssRenderTargetMemDescInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRenderTarget(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDESTROYRENDERTARGET *psRGXDestroyRenderTargetIN,
+					  PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERTARGET *psRGXDestroyRenderTargetOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psRGXDestroyRenderTargetOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyRenderTargetIN->hsRenderTargetMemDesc,
+					PVRSRV_HANDLE_TYPE_RGX_FWIF_RENDERTARGET);
+	if ((psRGXDestroyRenderTargetOUT->eError != PVRSRV_OK) &&
+	    (psRGXDestroyRenderTargetOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeRGXDestroyRenderTarget: %s",
+		        PVRSRVGetErrorStringKM(psRGXDestroyRenderTargetOUT->eError)));
+		UnlockHandle();
+		goto RGXDestroyRenderTarget_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+RGXDestroyRenderTarget_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCreateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *psRGXCreateZSBufferIN,
+					  PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *psRGXCreateZSBufferOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hReservation = psRGXCreateZSBufferIN->hReservation;
+	DEVMEMINT_RESERVATION * psReservationInt = NULL;
+	IMG_HANDLE hPMR = psRGXCreateZSBufferIN->hPMR;
+	PMR * psPMRInt = NULL;
+	RGX_ZSBUFFER_DATA * pssZSBufferKMInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateZSBufferOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psReservationInt,
+											hReservation,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+											IMG_TRUE);
+					if(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXCreateZSBuffer_exit;
+					}
+				}
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateZSBufferOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRInt,
+											hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXCreateZSBuffer_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXCreateZSBufferOUT->eError =
+		RGXCreateZSBufferKM(psConnection, OSGetDevData(psConnection),
+					psReservationInt,
+					psPMRInt,
+					psRGXCreateZSBufferIN->uiMapFlags,
+					&pssZSBufferKMInt,
+					&psRGXCreateZSBufferOUT->ui32sZSBufferFWDevVAddr);
+	/* Exit early if bridged call fails */
+	if(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateZSBuffer_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psRGXCreateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psRGXCreateZSBufferOUT->hsZSBufferKM,
+							(void *) pssZSBufferKMInt,
+							PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&RGXDestroyZSBufferKM);
+	if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto RGXCreateZSBuffer_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+RGXCreateZSBuffer_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psReservationInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hReservation,
+										PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+					}
+				}
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+	{
+		if (pssZSBufferKMInt)
+		{
+			RGXDestroyZSBufferKM(pssZSBufferKMInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferIN,
+					  PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psRGXDestroyZSBufferOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyZSBufferIN->hsZSBufferMemDesc,
+					PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+	if ((psRGXDestroyZSBufferOUT->eError != PVRSRV_OK) &&
+	    (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeRGXDestroyZSBuffer: %s",
+		        PVRSRVGetErrorStringKM(psRGXDestroyZSBufferOUT->eError)));
+		UnlockHandle();
+		goto RGXDestroyZSBuffer_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+RGXDestroyZSBuffer_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXPopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferIN,
+					  PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hsZSBufferKM = psRGXPopulateZSBufferIN->hsZSBufferKM;
+	RGX_ZSBUFFER_DATA * pssZSBufferKMInt = NULL;
+	RGX_POPULATION * pssPopulationInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXPopulateZSBufferOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &pssZSBufferKMInt,
+											hsZSBufferKM,
+											PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+											IMG_TRUE);
+					if(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXPopulateZSBuffer_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXPopulateZSBufferOUT->eError =
+		RGXPopulateZSBufferKM(
+					pssZSBufferKMInt,
+					&pssPopulationInt);
+	/* Exit early if bridged call fails */
+	if(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)
+	{
+		goto RGXPopulateZSBuffer_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psRGXPopulateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psRGXPopulateZSBufferOUT->hsPopulation,
+							(void *) pssPopulationInt,
+							PVRSRV_HANDLE_TYPE_RGX_POPULATION,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&RGXUnpopulateZSBufferKM);
+	if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto RGXPopulateZSBuffer_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+RGXPopulateZSBuffer_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(pssZSBufferKMInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hsZSBufferKM,
+										PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)
+	{
+		if (pssPopulationInt)
+		{
+			RGXUnpopulateZSBufferKM(pssPopulationInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXUnpopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferIN,
+					  PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psRGXUnpopulateZSBufferOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXUnpopulateZSBufferIN->hsPopulation,
+					PVRSRV_HANDLE_TYPE_RGX_POPULATION);
+	if ((psRGXUnpopulateZSBufferOUT->eError != PVRSRV_OK) &&
+	    (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeRGXUnpopulateZSBuffer: %s",
+		        PVRSRVGetErrorStringKM(psRGXUnpopulateZSBufferOUT->eError)));
+		UnlockHandle();
+		goto RGXUnpopulateZSBuffer_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+RGXUnpopulateZSBuffer_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCreateFreeList(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *psRGXCreateFreeListIN,
+					  PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *psRGXCreateFreeListOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hsGlobalFreeList = psRGXCreateFreeListIN->hsGlobalFreeList;
+	RGX_FREELIST * pssGlobalFreeListInt = NULL;
+	IMG_HANDLE hsFreeListPMR = psRGXCreateFreeListIN->hsFreeListPMR;
+	PMR * pssFreeListPMRInt = NULL;
+	RGX_FREELIST * psCleanupCookieInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				if (psRGXCreateFreeListIN->hsGlobalFreeList)
+				{
+					/* Look up the address from the handle */
+					psRGXCreateFreeListOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &pssGlobalFreeListInt,
+											hsGlobalFreeList,
+											PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+											IMG_TRUE);
+					if(psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXCreateFreeList_exit;
+					}
+				}
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateFreeListOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &pssFreeListPMRInt,
+											hsFreeListPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXCreateFreeList_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXCreateFreeListOUT->eError =
+		RGXCreateFreeList(psConnection, OSGetDevData(psConnection),
+					psRGXCreateFreeListIN->ui32ui32MaxFLPages,
+					psRGXCreateFreeListIN->ui32ui32InitFLPages,
+					psRGXCreateFreeListIN->ui32ui32GrowFLPages,
+					psRGXCreateFreeListIN->ui32ui32GrowParamThreshold,
+					pssGlobalFreeListInt,
+					psRGXCreateFreeListIN->bbFreeListCheck,
+					psRGXCreateFreeListIN->spsFreeListDevVAddr,
+					pssFreeListPMRInt,
+					psRGXCreateFreeListIN->uiPMROffset,
+					&psCleanupCookieInt);
+	/* Exit early if bridged call fails */
+	if(psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateFreeList_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psRGXCreateFreeListOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psRGXCreateFreeListOUT->hCleanupCookie,
+							(void *) psCleanupCookieInt,
+							PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&RGXDestroyFreeList);
+	if (psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto RGXCreateFreeList_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+RGXCreateFreeList_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				if (psRGXCreateFreeListIN->hsGlobalFreeList)
+				{
+					/* Unreference the previously looked up handle */
+					if(pssGlobalFreeListInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hsGlobalFreeList,
+										PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+					}
+				}
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(pssFreeListPMRInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hsFreeListPMR,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+	{
+		if (psCleanupCookieInt)
+		{
+			RGXDestroyFreeList(psCleanupCookieInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyFreeList(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *psRGXDestroyFreeListIN,
+					  PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *psRGXDestroyFreeListOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psRGXDestroyFreeListOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyFreeListIN->hCleanupCookie,
+					PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+	if ((psRGXDestroyFreeListOUT->eError != PVRSRV_OK) &&
+	    (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeRGXDestroyFreeList: %s",
+		        PVRSRVGetErrorStringKM(psRGXDestroyFreeListOUT->eError)));
+		UnlockHandle();
+		goto RGXDestroyFreeList_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+RGXDestroyFreeList_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXAddBlockToFreeList(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXADDBLOCKTOFREELIST *psRGXAddBlockToFreeListIN,
+					  PVRSRV_BRIDGE_OUT_RGXADDBLOCKTOFREELIST *psRGXAddBlockToFreeListOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hsFreeList = psRGXAddBlockToFreeListIN->hsFreeList;
+	RGX_FREELIST * pssFreeListInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXAddBlockToFreeListOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &pssFreeListInt,
+											hsFreeList,
+											PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+											IMG_TRUE);
+					if(psRGXAddBlockToFreeListOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXAddBlockToFreeList_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXAddBlockToFreeListOUT->eError =
+		RGXAddBlockToFreeListKM(
+					pssFreeListInt,
+					psRGXAddBlockToFreeListIN->ui3232NumPages);
+
+
+
+
+RGXAddBlockToFreeList_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(pssFreeListInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hsFreeList,
+										PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXRemoveBlockFromFreeList(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXREMOVEBLOCKFROMFREELIST *psRGXRemoveBlockFromFreeListIN,
+					  PVRSRV_BRIDGE_OUT_RGXREMOVEBLOCKFROMFREELIST *psRGXRemoveBlockFromFreeListOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hsFreeList = psRGXRemoveBlockFromFreeListIN->hsFreeList;
+	RGX_FREELIST * pssFreeListInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXRemoveBlockFromFreeListOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &pssFreeListInt,
+											hsFreeList,
+											PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+											IMG_TRUE);
+					if(psRGXRemoveBlockFromFreeListOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXRemoveBlockFromFreeList_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXRemoveBlockFromFreeListOUT->eError =
+		RGXRemoveBlockFromFreeListKM(
+					pssFreeListInt);
+
+
+
+
+RGXRemoveBlockFromFreeList_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(pssFreeListInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hsFreeList,
+										PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRenderContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_BYTE *psFrameworkCmdInt = NULL;
+	IMG_HANDLE hPrivData = psRGXCreateRenderContextIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+	RGX_SERVER_RENDER_CONTEXT * psRenderContextInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psRGXCreateRenderContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCreateRenderContextIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXCreateRenderContextIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXCreateRenderContext_exit;
+			}
+		}
+	}
+
+	if (psRGXCreateRenderContextIN->ui32FrameworkCmdize != 0)
+	{
+		psFrameworkCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXCreateRenderContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE);
+	}
+
+			/* Copy the data over */
+			if (psRGXCreateRenderContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, psFrameworkCmdInt, psRGXCreateRenderContextIN->psFrameworkCmd, psRGXCreateRenderContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+				{
+					psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXCreateRenderContext_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateRenderContextOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &hPrivDataInt,
+											hPrivData,
+											PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+											IMG_TRUE);
+					if(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXCreateRenderContext_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXCreateRenderContextOUT->eError =
+		PVRSRVRGXCreateRenderContextKM(psConnection, OSGetDevData(psConnection),
+					psRGXCreateRenderContextIN->ui32Priority,
+					psRGXCreateRenderContextIN->sVDMCallStackAddr,
+					psRGXCreateRenderContextIN->ui32FrameworkCmdize,
+					psFrameworkCmdInt,
+					hPrivDataInt,
+					&psRenderContextInt);
+	/* Exit early if bridged call fails */
+	if(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateRenderContext_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psRGXCreateRenderContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psRGXCreateRenderContextOUT->hRenderContext,
+							(void *) psRenderContextInt,
+							PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PVRSRVRGXDestroyRenderContextKM);
+	if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto RGXCreateRenderContext_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+RGXCreateRenderContext_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(hPrivDataInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPrivData,
+										PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+	{
+		if (psRenderContextInt)
+		{
+			PVRSRVRGXDestroyRenderContextKM(psRenderContextInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRenderContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psRGXDestroyRenderContextOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyRenderContextIN->hCleanupCookie,
+					PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+	if ((psRGXDestroyRenderContextOUT->eError != PVRSRV_OK) &&
+	    (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeRGXDestroyRenderContext: %s",
+		        PVRSRVGetErrorStringKM(psRGXDestroyRenderContextOUT->eError)));
+		UnlockHandle();
+		goto RGXDestroyRenderContext_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+RGXDestroyRenderContext_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXKickTA3D(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXKICKTA3D *psRGXKickTA3DIN,
+					  PVRSRV_BRIDGE_OUT_RGXKICKTA3D *psRGXKickTA3DOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hRenderContext = psRGXKickTA3DIN->hRenderContext;
+	RGX_SERVER_RENDER_CONTEXT * psRenderContextInt = NULL;
+	SYNC_PRIMITIVE_BLOCK * *psClientTAFenceSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClientTAFenceSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32ClientTAFenceSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32ClientTAFenceValueInt = NULL;
+	SYNC_PRIMITIVE_BLOCK * *psClientTAUpdateSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClientTAUpdateSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32ClientTAUpdateSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32ClientTAUpdateValueInt = NULL;
+	IMG_UINT32 *ui32ServerTASyncFlagsInt = NULL;
+	SERVER_SYNC_PRIMITIVE * *psServerTASyncsInt = NULL;
+	IMG_HANDLE *hServerTASyncsInt2 = NULL;
+	SYNC_PRIMITIVE_BLOCK * *psClient3DFenceSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClient3DFenceSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32Client3DFenceSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32Client3DFenceValueInt = NULL;
+	SYNC_PRIMITIVE_BLOCK * *psClient3DUpdateSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hClient3DUpdateSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32Client3DUpdateSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32Client3DUpdateValueInt = NULL;
+	IMG_UINT32 *ui32Server3DSyncFlagsInt = NULL;
+	SERVER_SYNC_PRIMITIVE * *psServer3DSyncsInt = NULL;
+	IMG_HANDLE *hServer3DSyncsInt2 = NULL;
+	IMG_HANDLE hPRFenceUFOSyncPrimBlock = psRGXKickTA3DIN->hPRFenceUFOSyncPrimBlock;
+	SYNC_PRIMITIVE_BLOCK * psPRFenceUFOSyncPrimBlockInt = NULL;
+	IMG_CHAR *uiUpdateFenceNameInt = NULL;
+	IMG_BYTE *psTACmdInt = NULL;
+	IMG_BYTE *ps3DPRCmdInt = NULL;
+	IMG_BYTE *ps3DCmdInt = NULL;
+	IMG_HANDLE hRTDataCleanup = psRGXKickTA3DIN->hRTDataCleanup;
+	RGX_RTDATA_CLEANUP_DATA * psRTDataCleanupInt = NULL;
+	IMG_HANDLE hZBuffer = psRGXKickTA3DIN->hZBuffer;
+	RGX_ZSBUFFER_DATA * psZBufferInt = NULL;
+	IMG_HANDLE hSBuffer = psRGXKickTA3DIN->hSBuffer;
+	RGX_ZSBUFFER_DATA * psSBufferInt = NULL;
+	IMG_HANDLE hMSAAScratchBuffer = psRGXKickTA3DIN->hMSAAScratchBuffer;
+	RGX_ZSBUFFER_DATA * psMSAAScratchBufferInt = NULL;
+	IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+	PMR * *psSyncPMRsInt = NULL;
+	IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+			(psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) +
+			(psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) +
+			(psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) +
+			(psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+			(psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) +
+			(psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) +
+			(psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) +
+			(psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32)) +
+			(psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+			(psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE)) +
+			(psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+			(psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_HANDLE)) +
+			(psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32)) +
+			(psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32)) +
+			(psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+			(psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) +
+			(psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) +
+			(psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) +
+			(psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32)) +
+			(psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+			(psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE)) +
+			(32 * sizeof(IMG_CHAR)) +
+			(psRGXKickTA3DIN->ui32TACmdSize * sizeof(IMG_BYTE)) +
+			(psRGXKickTA3DIN->ui323DPRCmdSize * sizeof(IMG_BYTE)) +
+			(psRGXKickTA3DIN->ui323DCmdSize * sizeof(IMG_BYTE)) +
+			(psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+			(psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(PMR *)) +
+			(psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXKickTA3DIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXKickTA3DIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXKickTA3D_exit;
+			}
+		}
+	}
+
+	if (psRGXKickTA3DIN->ui32ClientTAFenceCount != 0)
+	{
+		psClientTAFenceSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClientTAFenceSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hClientTAFenceSyncPrimBlockInt2, psRGXKickTA3DIN->phClientTAFenceSyncPrimBlock, psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui32ClientTAFenceCount != 0)
+	{
+		ui32ClientTAFenceSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ClientTAFenceSyncOffsetInt, psRGXKickTA3DIN->pui32ClientTAFenceSyncOffset, psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui32ClientTAFenceCount != 0)
+	{
+		ui32ClientTAFenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ClientTAFenceValueInt, psRGXKickTA3DIN->pui32ClientTAFenceValue, psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui32ClientTAUpdateCount != 0)
+	{
+		psClientTAUpdateSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClientTAUpdateSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hClientTAUpdateSyncPrimBlockInt2, psRGXKickTA3DIN->phClientTAUpdateSyncPrimBlock, psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui32ClientTAUpdateCount != 0)
+	{
+		ui32ClientTAUpdateSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ClientTAUpdateSyncOffsetInt, psRGXKickTA3DIN->pui32ClientTAUpdateSyncOffset, psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui32ClientTAUpdateCount != 0)
+	{
+		ui32ClientTAUpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ClientTAUpdateValueInt, psRGXKickTA3DIN->pui32ClientTAUpdateValue, psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui32ServerTASyncPrims != 0)
+	{
+		ui32ServerTASyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ServerTASyncFlagsInt, psRGXKickTA3DIN->pui32ServerTASyncFlags, psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui32ServerTASyncPrims != 0)
+	{
+		psServerTASyncsInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(SERVER_SYNC_PRIMITIVE *);
+		hServerTASyncsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hServerTASyncsInt2, psRGXKickTA3DIN->phServerTASyncs, psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui32Client3DFenceCount != 0)
+	{
+		psClient3DFenceSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClient3DFenceSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hClient3DFenceSyncPrimBlockInt2, psRGXKickTA3DIN->phClient3DFenceSyncPrimBlock, psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui32Client3DFenceCount != 0)
+	{
+		ui32Client3DFenceSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32Client3DFenceSyncOffsetInt, psRGXKickTA3DIN->pui32Client3DFenceSyncOffset, psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui32Client3DFenceCount != 0)
+	{
+		ui32Client3DFenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32Client3DFenceValueInt, psRGXKickTA3DIN->pui32Client3DFenceValue, psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui32Client3DUpdateCount != 0)
+	{
+		psClient3DUpdateSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hClient3DUpdateSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hClient3DUpdateSyncPrimBlockInt2, psRGXKickTA3DIN->phClient3DUpdateSyncPrimBlock, psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui32Client3DUpdateCount != 0)
+	{
+		ui32Client3DUpdateSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32Client3DUpdateSyncOffsetInt, psRGXKickTA3DIN->pui32Client3DUpdateSyncOffset, psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui32Client3DUpdateCount != 0)
+	{
+		ui32Client3DUpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32Client3DUpdateValueInt, psRGXKickTA3DIN->pui32Client3DUpdateValue, psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui32Server3DSyncPrims != 0)
+	{
+		ui32Server3DSyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32Server3DSyncFlagsInt, psRGXKickTA3DIN->pui32Server3DSyncFlags, psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui32Server3DSyncPrims != 0)
+	{
+		psServer3DSyncsInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(SERVER_SYNC_PRIMITIVE *);
+		hServer3DSyncsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hServer3DSyncsInt2, psRGXKickTA3DIN->phServer3DSyncs, psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	
+	{
+		uiUpdateFenceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += 32 * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (32 * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiUpdateFenceNameInt, psRGXKickTA3DIN->puiUpdateFenceName, 32 * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui32TACmdSize != 0)
+	{
+		psTACmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui32TACmdSize * sizeof(IMG_BYTE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui32TACmdSize * sizeof(IMG_BYTE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, psTACmdInt, psRGXKickTA3DIN->psTACmd, psRGXKickTA3DIN->ui32TACmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui323DPRCmdSize != 0)
+	{
+		ps3DPRCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui323DPRCmdSize * sizeof(IMG_BYTE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui323DPRCmdSize * sizeof(IMG_BYTE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ps3DPRCmdInt, psRGXKickTA3DIN->ps3DPRCmd, psRGXKickTA3DIN->ui323DPRCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui323DCmdSize != 0)
+	{
+		ps3DCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui323DCmdSize * sizeof(IMG_BYTE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui323DCmdSize * sizeof(IMG_BYTE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ps3DCmdInt, psRGXKickTA3DIN->ps3DCmd, psRGXKickTA3DIN->ui323DCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui32SyncPMRCount != 0)
+	{
+		ui32SyncPMRFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32SyncPMRFlagsInt, psRGXKickTA3DIN->pui32SyncPMRFlags, psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+	if (psRGXKickTA3DIN->ui32SyncPMRCount != 0)
+	{
+		psSyncPMRsInt = (PMR **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(PMR *);
+		hSyncPMRsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hSyncPMRsInt2, psRGXKickTA3DIN->phSyncPMRs, psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXKickTA3D_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXKickTA3DOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psRenderContextInt,
+											hRenderContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+											IMG_TRUE);
+					if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickTA3D_exit;
+					}
+				}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickTA3DIN->ui32ClientTAFenceCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickTA3DOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psClientTAFenceSyncPrimBlockInt[i],
+											hClientTAFenceSyncPrimBlockInt2[i],
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickTA3D_exit;
+					}
+				}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickTA3DIN->ui32ClientTAUpdateCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickTA3DOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psClientTAUpdateSyncPrimBlockInt[i],
+											hClientTAUpdateSyncPrimBlockInt2[i],
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickTA3D_exit;
+					}
+				}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickTA3DIN->ui32ServerTASyncPrims;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickTA3DOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psServerTASyncsInt[i],
+											hServerTASyncsInt2[i],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+											IMG_TRUE);
+					if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickTA3D_exit;
+					}
+				}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickTA3DIN->ui32Client3DFenceCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickTA3DOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psClient3DFenceSyncPrimBlockInt[i],
+											hClient3DFenceSyncPrimBlockInt2[i],
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickTA3D_exit;
+					}
+				}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickTA3DIN->ui32Client3DUpdateCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickTA3DOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psClient3DUpdateSyncPrimBlockInt[i],
+											hClient3DUpdateSyncPrimBlockInt2[i],
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickTA3D_exit;
+					}
+				}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickTA3DIN->ui32Server3DSyncPrims;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickTA3DOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psServer3DSyncsInt[i],
+											hServer3DSyncsInt2[i],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+											IMG_TRUE);
+					if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickTA3D_exit;
+					}
+				}
+		}
+	}
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXKickTA3DOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPRFenceUFOSyncPrimBlockInt,
+											hPRFenceUFOSyncPrimBlock,
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickTA3D_exit;
+					}
+				}
+
+
+
+
+
+				if (psRGXKickTA3DIN->hRTDataCleanup)
+				{
+					/* Look up the address from the handle */
+					psRGXKickTA3DOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psRTDataCleanupInt,
+											hRTDataCleanup,
+											PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP,
+											IMG_TRUE);
+					if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickTA3D_exit;
+					}
+				}
+
+
+
+
+
+				if (psRGXKickTA3DIN->hZBuffer)
+				{
+					/* Look up the address from the handle */
+					psRGXKickTA3DOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psZBufferInt,
+											hZBuffer,
+											PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+											IMG_TRUE);
+					if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickTA3D_exit;
+					}
+				}
+
+
+
+
+
+				if (psRGXKickTA3DIN->hSBuffer)
+				{
+					/* Look up the address from the handle */
+					psRGXKickTA3DOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psSBufferInt,
+											hSBuffer,
+											PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+											IMG_TRUE);
+					if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickTA3D_exit;
+					}
+				}
+
+
+
+
+
+				if (psRGXKickTA3DIN->hMSAAScratchBuffer)
+				{
+					/* Look up the address from the handle */
+					psRGXKickTA3DOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psMSAAScratchBufferInt,
+											hMSAAScratchBuffer,
+											PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+											IMG_TRUE);
+					if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickTA3D_exit;
+					}
+				}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickTA3DIN->ui32SyncPMRCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickTA3DOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psSyncPMRsInt[i],
+											hSyncPMRsInt2[i],
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXKickTA3D_exit;
+					}
+				}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXKickTA3DOUT->eError =
+		PVRSRVRGXKickTA3DKM(
+					psRenderContextInt,
+					psRGXKickTA3DIN->ui32ClientCacheOpSeqNum,
+					psRGXKickTA3DIN->ui32ClientTAFenceCount,
+					psClientTAFenceSyncPrimBlockInt,
+					ui32ClientTAFenceSyncOffsetInt,
+					ui32ClientTAFenceValueInt,
+					psRGXKickTA3DIN->ui32ClientTAUpdateCount,
+					psClientTAUpdateSyncPrimBlockInt,
+					ui32ClientTAUpdateSyncOffsetInt,
+					ui32ClientTAUpdateValueInt,
+					psRGXKickTA3DIN->ui32ServerTASyncPrims,
+					ui32ServerTASyncFlagsInt,
+					psServerTASyncsInt,
+					psRGXKickTA3DIN->ui32Client3DFenceCount,
+					psClient3DFenceSyncPrimBlockInt,
+					ui32Client3DFenceSyncOffsetInt,
+					ui32Client3DFenceValueInt,
+					psRGXKickTA3DIN->ui32Client3DUpdateCount,
+					psClient3DUpdateSyncPrimBlockInt,
+					ui32Client3DUpdateSyncOffsetInt,
+					ui32Client3DUpdateValueInt,
+					psRGXKickTA3DIN->ui32Server3DSyncPrims,
+					ui32Server3DSyncFlagsInt,
+					psServer3DSyncsInt,
+					psPRFenceUFOSyncPrimBlockInt,
+					psRGXKickTA3DIN->ui32FRFenceUFOSyncOffset,
+					psRGXKickTA3DIN->ui32FRFenceValue,
+					psRGXKickTA3DIN->hCheckFenceFD,
+					psRGXKickTA3DIN->hUpdateTimeline,
+					&psRGXKickTA3DOUT->hUpdateFence,
+					uiUpdateFenceNameInt,
+					psRGXKickTA3DIN->ui32TACmdSize,
+					psTACmdInt,
+					psRGXKickTA3DIN->ui323DPRCmdSize,
+					ps3DPRCmdInt,
+					psRGXKickTA3DIN->ui323DCmdSize,
+					ps3DCmdInt,
+					psRGXKickTA3DIN->ui32ExtJobRef,
+					psRGXKickTA3DIN->bbLastTAInScene,
+					psRGXKickTA3DIN->bbKickTA,
+					psRGXKickTA3DIN->bbKickPR,
+					psRGXKickTA3DIN->bbKick3D,
+					psRGXKickTA3DIN->bbAbort,
+					psRGXKickTA3DIN->ui32PDumpFlags,
+					psRTDataCleanupInt,
+					psZBufferInt,
+					psSBufferInt,
+					psMSAAScratchBufferInt,
+					psRGXKickTA3DIN->bbCommitRefCountsTA,
+					psRGXKickTA3DIN->bbCommitRefCounts3D,
+					&psRGXKickTA3DOUT->bbCommittedRefCountsTA,
+					&psRGXKickTA3DOUT->bbCommittedRefCounts3D,
+					psRGXKickTA3DIN->ui32SyncPMRCount,
+					ui32SyncPMRFlagsInt,
+					psSyncPMRsInt,
+					psRGXKickTA3DIN->ui32RenderTargetSize,
+					psRGXKickTA3DIN->ui32NumberOfDrawCalls,
+					psRGXKickTA3DIN->ui32NumberOfIndices,
+					psRGXKickTA3DIN->ui32NumberOfMRTs,
+					psRGXKickTA3DIN->ui64Deadline,
+					psRGXKickTA3DIN->ssRobustnessResetReason);
+
+
+
+
+RGXKickTA3D_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psRenderContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hRenderContext,
+										PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+					}
+				}
+
+
+
+
+
+
+	if (hClientTAFenceSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickTA3DIN->ui32ClientTAFenceCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hClientTAFenceSyncPrimBlockInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hClientTAFenceSyncPrimBlockInt2[i],
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+		}
+	}
+
+
+
+
+
+
+	if (hClientTAUpdateSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickTA3DIN->ui32ClientTAUpdateCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hClientTAUpdateSyncPrimBlockInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hClientTAUpdateSyncPrimBlockInt2[i],
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+		}
+	}
+
+
+
+
+
+
+	if (hServerTASyncsInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickTA3DIN->ui32ServerTASyncPrims;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hServerTASyncsInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hServerTASyncsInt2[i],
+										PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					}
+				}
+		}
+	}
+
+
+
+
+
+
+	if (hClient3DFenceSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickTA3DIN->ui32Client3DFenceCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hClient3DFenceSyncPrimBlockInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hClient3DFenceSyncPrimBlockInt2[i],
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+		}
+	}
+
+
+
+
+
+
+	if (hClient3DUpdateSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickTA3DIN->ui32Client3DUpdateCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hClient3DUpdateSyncPrimBlockInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hClient3DUpdateSyncPrimBlockInt2[i],
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+		}
+	}
+
+
+
+
+
+
+	if (hServer3DSyncsInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickTA3DIN->ui32Server3DSyncPrims;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hServer3DSyncsInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hServer3DSyncsInt2[i],
+										PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					}
+				}
+		}
+	}
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPRFenceUFOSyncPrimBlockInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPRFenceUFOSyncPrimBlock,
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+
+
+
+
+
+				if (psRGXKickTA3DIN->hRTDataCleanup)
+				{
+					/* Unreference the previously looked up handle */
+					if(psRTDataCleanupInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hRTDataCleanup,
+										PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP);
+					}
+				}
+
+
+
+
+
+				if (psRGXKickTA3DIN->hZBuffer)
+				{
+					/* Unreference the previously looked up handle */
+					if(psZBufferInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hZBuffer,
+										PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+					}
+				}
+
+
+
+
+
+				if (psRGXKickTA3DIN->hSBuffer)
+				{
+					/* Unreference the previously looked up handle */
+					if(psSBufferInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hSBuffer,
+										PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+					}
+				}
+
+
+
+
+
+				if (psRGXKickTA3DIN->hMSAAScratchBuffer)
+				{
+					/* Unreference the previously looked up handle */
+					if(psMSAAScratchBufferInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hMSAAScratchBuffer,
+										PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+					}
+				}
+
+
+
+
+
+
+	if (hSyncPMRsInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickTA3DIN->ui32SyncPMRCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hSyncPMRsInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hSyncPMRsInt2[i],
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXSetRenderContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityIN,
+					  PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hRenderContext = psRGXSetRenderContextPriorityIN->hRenderContext;
+	RGX_SERVER_RENDER_CONTEXT * psRenderContextInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXSetRenderContextPriorityOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psRenderContextInt,
+											hRenderContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+											IMG_TRUE);
+					if(psRGXSetRenderContextPriorityOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXSetRenderContextPriority_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXSetRenderContextPriorityOUT->eError =
+		PVRSRVRGXSetRenderContextPriorityKM(psConnection, OSGetDevData(psConnection),
+					psRenderContextInt,
+					psRGXSetRenderContextPriorityIN->ui32Priority);
+
+
+
+
+RGXSetRenderContextPriority_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psRenderContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hRenderContext,
+										PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXGetLastRenderContextResetReason(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON *psRGXGetLastRenderContextResetReasonIN,
+					  PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON *psRGXGetLastRenderContextResetReasonOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hRenderContext = psRGXGetLastRenderContextResetReasonIN->hRenderContext;
+	RGX_SERVER_RENDER_CONTEXT * psRenderContextInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXGetLastRenderContextResetReasonOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psRenderContextInt,
+											hRenderContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+											IMG_TRUE);
+					if(psRGXGetLastRenderContextResetReasonOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXGetLastRenderContextResetReason_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXGetLastRenderContextResetReasonOUT->eError =
+		PVRSRVRGXGetLastRenderContextResetReasonKM(
+					psRenderContextInt,
+					&psRGXGetLastRenderContextResetReasonOUT->ui32LastResetReason,
+					&psRGXGetLastRenderContextResetReasonOUT->ui32LastResetJobRef);
+
+
+
+
+RGXGetLastRenderContextResetReason_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psRenderContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hRenderContext,
+										PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXGetPartialRenderCount(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXGETPARTIALRENDERCOUNT *psRGXGetPartialRenderCountIN,
+					  PVRSRV_BRIDGE_OUT_RGXGETPARTIALRENDERCOUNT *psRGXGetPartialRenderCountOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hHWRTDataMemDesc = psRGXGetPartialRenderCountIN->hHWRTDataMemDesc;
+	DEVMEM_MEMDESC * psHWRTDataMemDescInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXGetPartialRenderCountOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psHWRTDataMemDescInt,
+											hHWRTDataMemDesc,
+											PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+											IMG_TRUE);
+					if(psRGXGetPartialRenderCountOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXGetPartialRenderCount_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXGetPartialRenderCountOUT->eError =
+		PVRSRVRGXGetPartialRenderCountKM(
+					psHWRTDataMemDescInt,
+					&psRGXGetPartialRenderCountOUT->ui32NumPartialRenders);
+
+
+
+
+RGXGetPartialRenderCount_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psHWRTDataMemDescInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hHWRTDataMemDesc,
+										PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXTA3DBridge(void);
+PVRSRV_ERROR DeinitRGXTA3DBridge(void);
+
+/*
+ * Register all RGXTA3D functions with services
+ */
+PVRSRV_ERROR InitRGXTA3DBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATA, PVRSRVBridgeRGXCreateHWRTData,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATA, PVRSRVBridgeRGXDestroyHWRTData,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERTARGET, PVRSRVBridgeRGXCreateRenderTarget,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERTARGET, PVRSRVBridgeRGXDestroyRenderTarget,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER, PVRSRVBridgeRGXCreateZSBuffer,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER, PVRSRVBridgeRGXDestroyZSBuffer,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER, PVRSRVBridgeRGXPopulateZSBuffer,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER, PVRSRVBridgeRGXUnpopulateZSBuffer,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST, PVRSRVBridgeRGXCreateFreeList,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST, PVRSRVBridgeRGXDestroyFreeList,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXADDBLOCKTOFREELIST, PVRSRVBridgeRGXAddBlockToFreeList,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXREMOVEBLOCKFROMFREELIST, PVRSRVBridgeRGXRemoveBlockFromFreeList,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT, PVRSRVBridgeRGXCreateRenderContext,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT, PVRSRVBridgeRGXDestroyRenderContext,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D, PVRSRVBridgeRGXKickTA3D,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY, PVRSRVBridgeRGXSetRenderContextPriority,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON, PVRSRVBridgeRGXGetLastRenderContextResetReason,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXGETPARTIALRENDERCOUNT, PVRSRVBridgeRGXGetPartialRenderCount,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxta3d functions with services
+ */
+PVRSRV_ERROR DeinitRGXTA3DBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxtq2_bridge/common_rgxtq2_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxtq2_bridge/common_rgxtq2_bridge.h
new file mode 100644
index 0000000..928a7c8
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxtq2_bridge/common_rgxtq2_bridge.h
@@ -0,0 +1,181 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for rgxtq2
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxtq2
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXTQ2_BRIDGE_H
+#define COMMON_RGXTQ2_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+#include "pvrsrv_sync_km.h"
+
+
+#define PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT			PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT			PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER			PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY			PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE			PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXTQ2_CMD_LAST			(PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+4)
+
+
+/*******************************************
+            RGXTDMCreateTransferContext          
+ *******************************************/
+
+/* Bridge in structure for RGXTDMCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT_TAG
+{
+	IMG_UINT32 ui32Priority;
+	IMG_UINT32 ui32FrameworkCmdize;
+	IMG_BYTE * psFrameworkCmd;
+	IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT;
+
+/* Bridge out structure for RGXTDMCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT_TAG
+{
+	IMG_HANDLE hTransferContext;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT;
+
+
+/*******************************************
+            RGXTDMDestroyTransferContext          
+ *******************************************/
+
+/* Bridge in structure for RGXTDMDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT_TAG
+{
+	IMG_HANDLE hTransferContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT;
+
+/* Bridge out structure for RGXTDMDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT;
+
+
+/*******************************************
+            RGXTDMSubmitTransfer          
+ *******************************************/
+
+/* Bridge in structure for RGXTDMSubmitTransfer */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER_TAG
+{
+	IMG_HANDLE hTransferContext;
+	IMG_UINT32 ui32PDumpFlags;
+	IMG_UINT32 ui32ClientCacheOpSeqNum;
+	IMG_UINT32 ui32ClientFenceCount;
+	IMG_HANDLE * phFenceUFOSyncPrimBlock;
+	IMG_UINT32 * pui32FenceSyncOffset;
+	IMG_UINT32 * pui32FenceValue;
+	IMG_UINT32 ui32ClientUpdateCount;
+	IMG_HANDLE * phUpdateUFOSyncPrimBlock;
+	IMG_UINT32 * pui32UpdateSyncOffset;
+	IMG_UINT32 * pui32UpdateValue;
+	IMG_UINT32 ui32ServerSyncCount;
+	IMG_UINT32 * pui32ServerSyncFlags;
+	IMG_HANDLE * phServerSync;
+	PVRSRV_FENCE hCheckFenceFD;
+	PVRSRV_TIMELINE hUpdateTimeline;
+	IMG_CHAR * puiUpdateFenceName;
+	IMG_UINT32 ui32CommandSize;
+	IMG_UINT8 * pui8FWCommand;
+	IMG_UINT32 ui32ExternalJobReference;
+	IMG_UINT32 ui32SyncPMRCount;
+	IMG_UINT32 * pui32SyncPMRFlags;
+	IMG_HANDLE * phSyncPMRs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER;
+
+/* Bridge out structure for RGXTDMSubmitTransfer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER_TAG
+{
+	PVRSRV_FENCE hUpdateFence;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER;
+
+
+/*******************************************
+            RGXTDMSetTransferContextPriority          
+ *******************************************/
+
+/* Bridge in structure for RGXTDMSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG
+{
+	IMG_HANDLE hTransferContext;
+	IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXTDMSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY;
+
+
+/*******************************************
+            RGXTDMNotifyWriteOffsetUpdate          
+ *******************************************/
+
+/* Bridge in structure for RGXTDMNotifyWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG
+{
+	IMG_HANDLE hTransferContext;
+	IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE;
+
+/* Bridge out structure for RGXTDMNotifyWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE;
+
+
+#endif /* COMMON_RGXTQ2_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxtq2_bridge/server_rgxtq2_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxtq2_bridge/server_rgxtq2_bridge.c
new file mode 100644
index 0000000..681d6066
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxtq2_bridge/server_rgxtq2_bridge.c
@@ -0,0 +1,1121 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for rgxtq2
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxtq2
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxtdmtransfer.h"
+
+
+#include "common_rgxtq2_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+#include "rgx_bvnc_defs_km.h"
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT *psRGXTDMCreateTransferContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT *psRGXTDMCreateTransferContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_BYTE *psFrameworkCmdInt = NULL;
+	IMG_HANDLE hPrivData = psRGXTDMCreateTransferContextIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+	RGX_SERVER_TQ_TDM_CONTEXT * psTransferContextInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) +
+			0;
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+		{
+			psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXTDMCreateTransferContext_exit;
+		}
+	}
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXTDMCreateTransferContextIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXTDMCreateTransferContextIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXTDMCreateTransferContext_exit;
+			}
+		}
+	}
+
+	if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize != 0)
+	{
+		psFrameworkCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE);
+	}
+
+			/* Copy the data over */
+			if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, psFrameworkCmdInt, psRGXTDMCreateTransferContextIN->psFrameworkCmd, psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+				{
+					psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXTDMCreateTransferContext_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXTDMCreateTransferContextOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &hPrivDataInt,
+											hPrivData,
+											PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+											IMG_TRUE);
+					if(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXTDMCreateTransferContext_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXTDMCreateTransferContextOUT->eError =
+		PVRSRVRGXTDMCreateTransferContextKM(psConnection, OSGetDevData(psConnection),
+					psRGXTDMCreateTransferContextIN->ui32Priority,
+					psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize,
+					psFrameworkCmdInt,
+					hPrivDataInt,
+					&psTransferContextInt);
+	/* Exit early if bridged call fails */
+	if(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)
+	{
+		goto RGXTDMCreateTransferContext_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psRGXTDMCreateTransferContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psRGXTDMCreateTransferContextOUT->hTransferContext,
+							(void *) psTransferContextInt,
+							PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PVRSRVRGXTDMDestroyTransferContextKM);
+	if (psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto RGXTDMCreateTransferContext_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+RGXTDMCreateTransferContext_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(hPrivDataInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPrivData,
+										PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)
+	{
+		if (psTransferContextInt)
+		{
+			PVRSRVRGXTDMDestroyTransferContextKM(psTransferContextInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXTDMDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT *psRGXTDMDestroyTransferContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT *psRGXTDMDestroyTransferContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+		{
+			psRGXTDMDestroyTransferContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXTDMDestroyTransferContext_exit;
+		}
+	}
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psRGXTDMDestroyTransferContextOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXTDMDestroyTransferContextIN->hTransferContext,
+					PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+	if ((psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_OK) &&
+	    (psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeRGXTDMDestroyTransferContext: %s",
+		        PVRSRVGetErrorStringKM(psRGXTDMDestroyTransferContextOUT->eError)));
+		UnlockHandle();
+		goto RGXTDMDestroyTransferContext_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+RGXTDMDestroyTransferContext_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXTDMSubmitTransfer(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER *psRGXTDMSubmitTransferIN,
+					  PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER *psRGXTDMSubmitTransferOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hTransferContext = psRGXTDMSubmitTransferIN->hTransferContext;
+	RGX_SERVER_TQ_TDM_CONTEXT * psTransferContextInt = NULL;
+	SYNC_PRIMITIVE_BLOCK * *psFenceUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hFenceUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32FenceSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32FenceValueInt = NULL;
+	SYNC_PRIMITIVE_BLOCK * *psUpdateUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE *hUpdateUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 *ui32UpdateSyncOffsetInt = NULL;
+	IMG_UINT32 *ui32UpdateValueInt = NULL;
+	IMG_UINT32 *ui32ServerSyncFlagsInt = NULL;
+	SERVER_SYNC_PRIMITIVE * *psServerSyncInt = NULL;
+	IMG_HANDLE *hServerSyncInt2 = NULL;
+	IMG_CHAR *uiUpdateFenceNameInt = NULL;
+	IMG_UINT8 *ui8FWCommandInt = NULL;
+	IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+	PMR * *psSyncPMRsInt = NULL;
+	IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+			(psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) +
+			(psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+			(psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+			(psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+			(psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+			(psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+			(psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+			(psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+			(psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+			(psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+			(32 * sizeof(IMG_CHAR)) +
+			(psRGXTDMSubmitTransferIN->ui32CommandSize * sizeof(IMG_UINT8)) +
+			(psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+			(psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(PMR *)) +
+			(psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) +
+			0;
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+		{
+			psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXTDMSubmitTransfer_exit;
+		}
+	}
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXTDMSubmitTransferIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXTDMSubmitTransferIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXTDMSubmitTransfer_exit;
+			}
+		}
+	}
+
+	if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount != 0)
+	{
+		psFenceUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hFenceUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hFenceUFOSyncPrimBlockInt2, psRGXTDMSubmitTransferIN->phFenceUFOSyncPrimBlock, psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXTDMSubmitTransfer_exit;
+				}
+			}
+	if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount != 0)
+	{
+		ui32FenceSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32FenceSyncOffsetInt, psRGXTDMSubmitTransferIN->pui32FenceSyncOffset, psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXTDMSubmitTransfer_exit;
+				}
+			}
+	if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount != 0)
+	{
+		ui32FenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32FenceValueInt, psRGXTDMSubmitTransferIN->pui32FenceValue, psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXTDMSubmitTransfer_exit;
+				}
+			}
+	if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount != 0)
+	{
+		psUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hUpdateUFOSyncPrimBlockInt2, psRGXTDMSubmitTransferIN->phUpdateUFOSyncPrimBlock, psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXTDMSubmitTransfer_exit;
+				}
+			}
+	if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount != 0)
+	{
+		ui32UpdateSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32UpdateSyncOffsetInt, psRGXTDMSubmitTransferIN->pui32UpdateSyncOffset, psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXTDMSubmitTransfer_exit;
+				}
+			}
+	if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount != 0)
+	{
+		ui32UpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32UpdateValueInt, psRGXTDMSubmitTransferIN->pui32UpdateValue, psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXTDMSubmitTransfer_exit;
+				}
+			}
+	if (psRGXTDMSubmitTransferIN->ui32ServerSyncCount != 0)
+	{
+		ui32ServerSyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ServerSyncFlagsInt, psRGXTDMSubmitTransferIN->pui32ServerSyncFlags, psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXTDMSubmitTransfer_exit;
+				}
+			}
+	if (psRGXTDMSubmitTransferIN->ui32ServerSyncCount != 0)
+	{
+		psServerSyncInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+		hServerSyncInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hServerSyncInt2, psRGXTDMSubmitTransferIN->phServerSync, psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXTDMSubmitTransfer_exit;
+				}
+			}
+	
+	{
+		uiUpdateFenceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += 32 * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (32 * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiUpdateFenceNameInt, psRGXTDMSubmitTransferIN->puiUpdateFenceName, 32 * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXTDMSubmitTransfer_exit;
+				}
+			}
+	if (psRGXTDMSubmitTransferIN->ui32CommandSize != 0)
+	{
+		ui8FWCommandInt = (IMG_UINT8*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXTDMSubmitTransferIN->ui32CommandSize * sizeof(IMG_UINT8);
+	}
+
+			/* Copy the data over */
+			if (psRGXTDMSubmitTransferIN->ui32CommandSize * sizeof(IMG_UINT8) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui8FWCommandInt, psRGXTDMSubmitTransferIN->pui8FWCommand, psRGXTDMSubmitTransferIN->ui32CommandSize * sizeof(IMG_UINT8)) != PVRSRV_OK )
+				{
+					psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXTDMSubmitTransfer_exit;
+				}
+			}
+	if (psRGXTDMSubmitTransferIN->ui32SyncPMRCount != 0)
+	{
+		ui32SyncPMRFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32SyncPMRFlagsInt, psRGXTDMSubmitTransferIN->pui32SyncPMRFlags, psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXTDMSubmitTransfer_exit;
+				}
+			}
+	if (psRGXTDMSubmitTransferIN->ui32SyncPMRCount != 0)
+	{
+		psSyncPMRsInt = (PMR **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(PMR *);
+		hSyncPMRsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hSyncPMRsInt2, psRGXTDMSubmitTransferIN->phSyncPMRs, psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXTDMSubmitTransfer_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXTDMSubmitTransferOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psTransferContextInt,
+											hTransferContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+											IMG_TRUE);
+					if(psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXTDMSubmitTransfer_exit;
+					}
+				}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXTDMSubmitTransferIN->ui32ClientFenceCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXTDMSubmitTransferOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psFenceUFOSyncPrimBlockInt[i],
+											hFenceUFOSyncPrimBlockInt2[i],
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXTDMSubmitTransfer_exit;
+					}
+				}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXTDMSubmitTransferIN->ui32ClientUpdateCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXTDMSubmitTransferOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psUpdateUFOSyncPrimBlockInt[i],
+											hUpdateUFOSyncPrimBlockInt2[i],
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXTDMSubmitTransfer_exit;
+					}
+				}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXTDMSubmitTransferIN->ui32ServerSyncCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXTDMSubmitTransferOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psServerSyncInt[i],
+											hServerSyncInt2[i],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+											IMG_TRUE);
+					if(psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXTDMSubmitTransfer_exit;
+					}
+				}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXTDMSubmitTransferIN->ui32SyncPMRCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXTDMSubmitTransferOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psSyncPMRsInt[i],
+											hSyncPMRsInt2[i],
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXTDMSubmitTransfer_exit;
+					}
+				}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXTDMSubmitTransferOUT->eError =
+		PVRSRVRGXTDMSubmitTransferKM(
+					psTransferContextInt,
+					psRGXTDMSubmitTransferIN->ui32PDumpFlags,
+					psRGXTDMSubmitTransferIN->ui32ClientCacheOpSeqNum,
+					psRGXTDMSubmitTransferIN->ui32ClientFenceCount,
+					psFenceUFOSyncPrimBlockInt,
+					ui32FenceSyncOffsetInt,
+					ui32FenceValueInt,
+					psRGXTDMSubmitTransferIN->ui32ClientUpdateCount,
+					psUpdateUFOSyncPrimBlockInt,
+					ui32UpdateSyncOffsetInt,
+					ui32UpdateValueInt,
+					psRGXTDMSubmitTransferIN->ui32ServerSyncCount,
+					ui32ServerSyncFlagsInt,
+					psServerSyncInt,
+					psRGXTDMSubmitTransferIN->hCheckFenceFD,
+					psRGXTDMSubmitTransferIN->hUpdateTimeline,
+					&psRGXTDMSubmitTransferOUT->hUpdateFence,
+					uiUpdateFenceNameInt,
+					psRGXTDMSubmitTransferIN->ui32CommandSize,
+					ui8FWCommandInt,
+					psRGXTDMSubmitTransferIN->ui32ExternalJobReference,
+					psRGXTDMSubmitTransferIN->ui32SyncPMRCount,
+					ui32SyncPMRFlagsInt,
+					psSyncPMRsInt);
+
+
+
+
+RGXTDMSubmitTransfer_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psTransferContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hTransferContext,
+										PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+					}
+				}
+
+
+
+
+
+
+	if (hFenceUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXTDMSubmitTransferIN->ui32ClientFenceCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hFenceUFOSyncPrimBlockInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hFenceUFOSyncPrimBlockInt2[i],
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+		}
+	}
+
+
+
+
+
+
+	if (hUpdateUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXTDMSubmitTransferIN->ui32ClientUpdateCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hUpdateUFOSyncPrimBlockInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hUpdateUFOSyncPrimBlockInt2[i],
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+		}
+	}
+
+
+
+
+
+
+	if (hServerSyncInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXTDMSubmitTransferIN->ui32ServerSyncCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hServerSyncInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hServerSyncInt2[i],
+										PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					}
+				}
+		}
+	}
+
+
+
+
+
+
+	if (hSyncPMRsInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXTDMSubmitTransferIN->ui32SyncPMRCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hSyncPMRsInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hSyncPMRsInt2[i],
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXTDMSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY *psRGXTDMSetTransferContextPriorityIN,
+					  PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY *psRGXTDMSetTransferContextPriorityOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hTransferContext = psRGXTDMSetTransferContextPriorityIN->hTransferContext;
+	RGX_SERVER_TQ_TDM_CONTEXT * psTransferContextInt = NULL;
+
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+		{
+			psRGXTDMSetTransferContextPriorityOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXTDMSetTransferContextPriority_exit;
+		}
+	}
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXTDMSetTransferContextPriorityOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psTransferContextInt,
+											hTransferContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+											IMG_TRUE);
+					if(psRGXTDMSetTransferContextPriorityOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXTDMSetTransferContextPriority_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXTDMSetTransferContextPriorityOUT->eError =
+		PVRSRVRGXTDMSetTransferContextPriorityKM(psConnection, OSGetDevData(psConnection),
+					psTransferContextInt,
+					psRGXTDMSetTransferContextPriorityIN->ui32Priority);
+
+
+
+
+RGXTDMSetTransferContextPriority_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psTransferContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hTransferContext,
+										PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE *psRGXTDMNotifyWriteOffsetUpdateIN,
+					  PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE *psRGXTDMNotifyWriteOffsetUpdateOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hTransferContext = psRGXTDMNotifyWriteOffsetUpdateIN->hTransferContext;
+	RGX_SERVER_TQ_TDM_CONTEXT * psTransferContextInt = NULL;
+
+
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+		/* Check that device supports the required feature */
+		if ((psDeviceNode->pfnCheckDeviceFeature) &&
+			!psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+		{
+			psRGXTDMNotifyWriteOffsetUpdateOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+			goto RGXTDMNotifyWriteOffsetUpdate_exit;
+		}
+	}
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXTDMNotifyWriteOffsetUpdateOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psTransferContextInt,
+											hTransferContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+											IMG_TRUE);
+					if(psRGXTDMNotifyWriteOffsetUpdateOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXTDMNotifyWriteOffsetUpdate_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXTDMNotifyWriteOffsetUpdateOUT->eError =
+		PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(
+					psTransferContextInt,
+					psRGXTDMNotifyWriteOffsetUpdateIN->ui32PDumpFlags);
+
+
+
+
+RGXTDMNotifyWriteOffsetUpdate_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psTransferContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hTransferContext,
+										PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXTQ2Bridge(void);
+PVRSRV_ERROR DeinitRGXTQ2Bridge(void);
+
+/*
+ * Register all RGXTQ2 functions with services
+ */
+PVRSRV_ERROR InitRGXTQ2Bridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT, PVRSRVBridgeRGXTDMCreateTransferContext,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT, PVRSRVBridgeRGXTDMDestroyTransferContext,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER, PVRSRVBridgeRGXTDMSubmitTransfer,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY, PVRSRVBridgeRGXTDMSetTransferContextPriority,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE, PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxtq2 functions with services
+ */
+PVRSRV_ERROR DeinitRGXTQ2Bridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxtq_bridge/common_rgxtq_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxtq_bridge/common_rgxtq_bridge.h
new file mode 100644
index 0000000..82fd228
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxtq_bridge/common_rgxtq_bridge.h
@@ -0,0 +1,163 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for rgxtq
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxtq
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXTQ_BRIDGE_H
+#define COMMON_RGXTQ_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+#include "pvrsrv_sync_km.h"
+
+
+#define PVRSRV_BRIDGE_RGXTQ_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT			PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT			PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER			PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY			PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXTQ_CMD_LAST			(PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+3)
+
+
+/*******************************************
+            RGXCreateTransferContext          
+ *******************************************/
+
+/* Bridge in structure for RGXCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT_TAG
+{
+	IMG_UINT32 ui32Priority;
+	IMG_UINT32 ui32FrameworkCmdize;
+	IMG_BYTE * psFrameworkCmd;
+	IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT;
+
+/* Bridge out structure for RGXCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT_TAG
+{
+	IMG_HANDLE hTransferContext;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT;
+
+
+/*******************************************
+            RGXDestroyTransferContext          
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT_TAG
+{
+	IMG_HANDLE hTransferContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT;
+
+/* Bridge out structure for RGXDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT;
+
+
+/*******************************************
+            RGXSubmitTransfer          
+ *******************************************/
+
+/* Bridge in structure for RGXSubmitTransfer */
+typedef struct PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER_TAG
+{
+	IMG_HANDLE hTransferContext;
+	IMG_UINT32 ui32ClientCacheOpSeqNum;
+	IMG_UINT32 ui32PrepareCount;
+	IMG_UINT32 * pui32ClientFenceCount;
+	IMG_HANDLE* * phFenceUFOSyncPrimBlock;
+	IMG_UINT32* * pui32FenceSyncOffset;
+	IMG_UINT32* * pui32FenceValue;
+	IMG_UINT32 * pui32ClientUpdateCount;
+	IMG_HANDLE* * phUpdateUFOSyncPrimBlock;
+	IMG_UINT32* * pui32UpdateSyncOffset;
+	IMG_UINT32* * pui32UpdateValue;
+	IMG_UINT32 * pui32ServerSyncCount;
+	IMG_UINT32* * pui32ServerSyncFlags;
+	IMG_HANDLE* * phServerSync;
+	PVRSRV_FENCE hCheckFenceFD;
+	PVRSRV_TIMELINE hUpdateTimeline;
+	IMG_CHAR * puiUpdateFenceName;
+	IMG_UINT32 * pui32CommandSize;
+	IMG_UINT8* * pui8FWCommand;
+	IMG_UINT32 * pui32TQPrepareFlags;
+	IMG_UINT32 ui32ExtJobRef;
+	IMG_UINT32 ui32SyncPMRCount;
+	IMG_UINT32 * pui32SyncPMRFlags;
+	IMG_HANDLE * phSyncPMRs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER;
+
+/* Bridge out structure for RGXSubmitTransfer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER_TAG
+{
+	PVRSRV_FENCE hUpdateFence;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER;
+
+
+/*******************************************
+            RGXSetTransferContextPriority          
+ *******************************************/
+
+/* Bridge in structure for RGXSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY_TAG
+{
+	IMG_HANDLE hTransferContext;
+	IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY;
+
+
+#endif /* COMMON_RGXTQ_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxtq_bridge/server_rgxtq_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxtq_bridge/server_rgxtq_bridge.c
new file mode 100644
index 0000000..29cb5f1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/rgxtq_bridge/server_rgxtq_bridge.c
@@ -0,0 +1,1425 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for rgxtq
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxtq
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxtransfer.h"
+
+
+#include "common_rgxtq_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRGXCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT *psRGXCreateTransferContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT *psRGXCreateTransferContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_BYTE *psFrameworkCmdInt = NULL;
+	IMG_HANDLE hPrivData = psRGXCreateTransferContextIN->hPrivData;
+	IMG_HANDLE hPrivDataInt = NULL;
+	RGX_SERVER_TQ_CONTEXT * psTransferContextInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCreateTransferContextIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXCreateTransferContextIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXCreateTransferContext_exit;
+			}
+		}
+	}
+
+	if (psRGXCreateTransferContextIN->ui32FrameworkCmdize != 0)
+	{
+		psFrameworkCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE);
+	}
+
+			/* Copy the data over */
+			if (psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, psFrameworkCmdInt, psRGXCreateTransferContextIN->psFrameworkCmd, psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+				{
+					psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXCreateTransferContext_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateTransferContextOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &hPrivDataInt,
+											hPrivData,
+											PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+											IMG_TRUE);
+					if(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXCreateTransferContext_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXCreateTransferContextOUT->eError =
+		PVRSRVRGXCreateTransferContextKM(psConnection, OSGetDevData(psConnection),
+					psRGXCreateTransferContextIN->ui32Priority,
+					psRGXCreateTransferContextIN->ui32FrameworkCmdize,
+					psFrameworkCmdInt,
+					hPrivDataInt,
+					&psTransferContextInt);
+	/* Exit early if bridged call fails */
+	if(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateTransferContext_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psRGXCreateTransferContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psRGXCreateTransferContextOUT->hTransferContext,
+							(void *) psTransferContextInt,
+							PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PVRSRVRGXDestroyTransferContextKM);
+	if (psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto RGXCreateTransferContext_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+RGXCreateTransferContext_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(hPrivDataInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPrivData,
+										PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
+	{
+		if (psTransferContextInt)
+		{
+			PVRSRVRGXDestroyTransferContextKM(psTransferContextInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT *psRGXDestroyTransferContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT *psRGXDestroyTransferContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psRGXDestroyTransferContextOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyTransferContextIN->hTransferContext,
+					PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+	if ((psRGXDestroyTransferContextOUT->eError != PVRSRV_OK) &&
+	    (psRGXDestroyTransferContextOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeRGXDestroyTransferContext: %s",
+		        PVRSRVGetErrorStringKM(psRGXDestroyTransferContextOUT->eError)));
+		UnlockHandle();
+		goto RGXDestroyTransferContext_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+RGXDestroyTransferContext_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXSubmitTransfer(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER *psRGXSubmitTransferIN,
+					  PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER *psRGXSubmitTransferOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hTransferContext = psRGXSubmitTransferIN->hTransferContext;
+	RGX_SERVER_TQ_CONTEXT * psTransferContextInt = NULL;
+	IMG_UINT32 *ui32ClientFenceCountInt = NULL;
+	SYNC_PRIMITIVE_BLOCK * **psFenceUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE **hFenceUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 **ui32FenceSyncOffsetInt = NULL;
+	IMG_UINT32 **ui32FenceValueInt = NULL;
+	IMG_UINT32 *ui32ClientUpdateCountInt = NULL;
+	SYNC_PRIMITIVE_BLOCK * **psUpdateUFOSyncPrimBlockInt = NULL;
+	IMG_HANDLE **hUpdateUFOSyncPrimBlockInt2 = NULL;
+	IMG_UINT32 **ui32UpdateSyncOffsetInt = NULL;
+	IMG_UINT32 **ui32UpdateValueInt = NULL;
+	IMG_UINT32 *ui32ServerSyncCountInt = NULL;
+	IMG_UINT32 **ui32ServerSyncFlagsInt = NULL;
+	SERVER_SYNC_PRIMITIVE * **psServerSyncInt = NULL;
+	IMG_HANDLE **hServerSyncInt2 = NULL;
+	IMG_CHAR *uiUpdateFenceNameInt = NULL;
+	IMG_UINT32 *ui32CommandSizeInt = NULL;
+	IMG_UINT8 **ui8FWCommandInt = NULL;
+	IMG_UINT32 *ui32TQPrepareFlagsInt = NULL;
+	IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+	PMR * *psSyncPMRsInt = NULL;
+	IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+	IMG_BYTE   *pArrayArgsBuffer2 = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+			(psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+			(psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+			(32 * sizeof(IMG_CHAR)) +
+			(psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+			(psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+			(psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+			(psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(PMR *)) +
+			(psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) +
+			0;
+	IMG_UINT32 ui32BufferSize2 = 0;
+	IMG_UINT32 ui32NextOffset2 = 0;
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+
+		ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **);
+		ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_HANDLE **);
+		ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+		ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+		ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **);
+		ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_HANDLE **);
+		ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+		ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+		ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+		ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(SERVER_SYNC_PRIMITIVE **);
+		ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_HANDLE **);
+		ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT8*);
+	}
+
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXSubmitTransferIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXSubmitTransferIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RGXSubmitTransfer_exit;
+			}
+		}
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		ui32ClientFenceCountInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ClientFenceCountInt, psRGXSubmitTransferIN->pui32ClientFenceCount, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		/* Assigning psFenceUFOSyncPrimBlockInt to the right offset in the pool buffer for first dimension */
+		psFenceUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK ***)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **);
+		/* Assigning hFenceUFOSyncPrimBlockInt2 to the right offset in the pool buffer for first dimension */
+		hFenceUFOSyncPrimBlockInt2 = (IMG_HANDLE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_HANDLE);
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		/* Assigning ui32FenceSyncOffsetInt to the right offset in the pool buffer for first dimension */
+		ui32FenceSyncOffsetInt = (IMG_UINT32**)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		/* Assigning ui32FenceValueInt to the right offset in the pool buffer for first dimension */
+		ui32FenceValueInt = (IMG_UINT32**)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		ui32ClientUpdateCountInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ClientUpdateCountInt, psRGXSubmitTransferIN->pui32ClientUpdateCount, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		/* Assigning psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer for first dimension */
+		psUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK ***)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **);
+		/* Assigning hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer for first dimension */
+		hUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_HANDLE);
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		/* Assigning ui32UpdateSyncOffsetInt to the right offset in the pool buffer for first dimension */
+		ui32UpdateSyncOffsetInt = (IMG_UINT32**)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		/* Assigning ui32UpdateValueInt to the right offset in the pool buffer for first dimension */
+		ui32UpdateValueInt = (IMG_UINT32**)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		ui32ServerSyncCountInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ServerSyncCountInt, psRGXSubmitTransferIN->pui32ServerSyncCount, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		/* Assigning ui32ServerSyncFlagsInt to the right offset in the pool buffer for first dimension */
+		ui32ServerSyncFlagsInt = (IMG_UINT32**)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		/* Assigning psServerSyncInt to the right offset in the pool buffer for first dimension */
+		psServerSyncInt = (SERVER_SYNC_PRIMITIVE ***)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(SERVER_SYNC_PRIMITIVE **);
+		/* Assigning hServerSyncInt2 to the right offset in the pool buffer for first dimension */
+		hServerSyncInt2 = (IMG_HANDLE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_HANDLE);
+	}
+
+	
+	{
+		uiUpdateFenceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += 32 * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (32 * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiUpdateFenceNameInt, psRGXSubmitTransferIN->puiUpdateFenceName, 32 * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		ui32CommandSizeInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32CommandSizeInt, psRGXSubmitTransferIN->pui32CommandSize, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		/* Assigning ui8FWCommandInt to the right offset in the pool buffer for first dimension */
+		ui8FWCommandInt = (IMG_UINT8**)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT8*);
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		ui32TQPrepareFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32TQPrepareFlagsInt, psRGXSubmitTransferIN->pui32TQPrepareFlags, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+	if (psRGXSubmitTransferIN->ui32SyncPMRCount != 0)
+	{
+		ui32SyncPMRFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32SyncPMRFlagsInt, psRGXSubmitTransferIN->pui32SyncPMRFlags, psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+	if (psRGXSubmitTransferIN->ui32SyncPMRCount != 0)
+	{
+		psSyncPMRsInt = (PMR **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(PMR *);
+		hSyncPMRsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hSyncPMRsInt2, psRGXSubmitTransferIN->phSyncPMRs, psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+		{
+			ui32BufferSize2 += ui32ClientFenceCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *);
+			ui32BufferSize2 += ui32ClientFenceCountInt[i] * sizeof(IMG_HANDLE *);
+			ui32BufferSize2 += ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+			ui32BufferSize2 += ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+			ui32BufferSize2 += ui32ClientUpdateCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *);
+			ui32BufferSize2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE *);
+			ui32BufferSize2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+			ui32BufferSize2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+			ui32BufferSize2 += ui32ServerSyncCountInt[i] * sizeof(IMG_UINT32);
+			ui32BufferSize2 += ui32ServerSyncCountInt[i] * sizeof(SERVER_SYNC_PRIMITIVE *);
+			ui32BufferSize2 += ui32ServerSyncCountInt[i] * sizeof(IMG_HANDLE *);
+			ui32BufferSize2 += ui32CommandSizeInt[i] * sizeof(IMG_UINT8);
+		}
+	}
+
+	if (ui32BufferSize2 != 0)
+	{
+		pArrayArgsBuffer2 = OSAllocMemNoStats(ui32BufferSize2);
+
+		if(!pArrayArgsBuffer2)
+		{
+			psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto RGXSubmitTransfer_exit;
+		}
+	}
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Assigning each psFenceUFOSyncPrimBlockInt to the right offset in the pool buffer (this is the second dimension) */
+			psFenceUFOSyncPrimBlockInt[i] = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+			ui32NextOffset2 += ui32ClientFenceCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *);
+			/* Assigning each hFenceUFOSyncPrimBlockInt2 to the right offset in the pool buffer (this is the second dimension) */
+			hFenceUFOSyncPrimBlockInt2[i] = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2); 
+			ui32NextOffset2 += ui32ClientFenceCountInt[i] * sizeof(IMG_HANDLE);
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Assigning each ui32FenceSyncOffsetInt to the right offset in the pool buffer (this is the second dimension) */
+			ui32FenceSyncOffsetInt[i] = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+			ui32NextOffset2 += ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Assigning each ui32FenceValueInt to the right offset in the pool buffer (this is the second dimension) */
+			ui32FenceValueInt[i] = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+			ui32NextOffset2 += ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Assigning each psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer (this is the second dimension) */
+			psUpdateUFOSyncPrimBlockInt[i] = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+			ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *);
+			/* Assigning each hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer (this is the second dimension) */
+			hUpdateUFOSyncPrimBlockInt2[i] = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2); 
+			ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE);
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Assigning each ui32UpdateSyncOffsetInt to the right offset in the pool buffer (this is the second dimension) */
+			ui32UpdateSyncOffsetInt[i] = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+			ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Assigning each ui32UpdateValueInt to the right offset in the pool buffer (this is the second dimension) */
+			ui32UpdateValueInt[i] = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+			ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Assigning each ui32ServerSyncFlagsInt to the right offset in the pool buffer (this is the second dimension) */
+			ui32ServerSyncFlagsInt[i] = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+			ui32NextOffset2 += ui32ServerSyncCountInt[i] * sizeof(IMG_UINT32);
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Assigning each psServerSyncInt to the right offset in the pool buffer (this is the second dimension) */
+			psServerSyncInt[i] = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+			ui32NextOffset2 += ui32ServerSyncCountInt[i] * sizeof(SERVER_SYNC_PRIMITIVE *);
+			/* Assigning each hServerSyncInt2 to the right offset in the pool buffer (this is the second dimension) */
+			hServerSyncInt2[i] = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2); 
+			ui32NextOffset2 += ui32ServerSyncCountInt[i] * sizeof(IMG_HANDLE);
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 i;
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Assigning each ui8FWCommandInt to the right offset in the pool buffer (this is the second dimension) */
+			ui8FWCommandInt[i] = (IMG_UINT8*)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+			ui32NextOffset2 += ui32CommandSizeInt[i] * sizeof(IMG_UINT8);
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_HANDLE **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Copy the pointer over from the client side */
+			if ( OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->phFenceUFOSyncPrimBlock[i],
+				sizeof(IMG_HANDLE **)) != PVRSRV_OK )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ClientFenceCountInt[i] * sizeof(IMG_HANDLE)) > 0)
+			{
+				if ( OSCopyFromUser(NULL, (hFenceUFOSyncPrimBlockInt2[i]), psPtr, (ui32ClientFenceCountInt[i] * sizeof(IMG_HANDLE))) != PVRSRV_OK )
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Copy the pointer over from the client side */
+			if ( OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->pui32FenceSyncOffset[i],
+				sizeof(IMG_UINT32 **)) != PVRSRV_OK )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32)) > 0)
+			{
+				if ( OSCopyFromUser(NULL, (ui32FenceSyncOffsetInt[i]), psPtr, (ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32))) != PVRSRV_OK )
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Copy the pointer over from the client side */
+			if ( OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->pui32FenceValue[i],
+				sizeof(IMG_UINT32 **)) != PVRSRV_OK )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32)) > 0)
+			{
+				if ( OSCopyFromUser(NULL, (ui32FenceValueInt[i]), psPtr, (ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32))) != PVRSRV_OK )
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_HANDLE **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Copy the pointer over from the client side */
+			if ( OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->phUpdateUFOSyncPrimBlock[i],
+				sizeof(IMG_HANDLE **)) != PVRSRV_OK )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE)) > 0)
+			{
+				if ( OSCopyFromUser(NULL, (hUpdateUFOSyncPrimBlockInt2[i]), psPtr, (ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE))) != PVRSRV_OK )
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Copy the pointer over from the client side */
+			if ( OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->pui32UpdateSyncOffset[i],
+				sizeof(IMG_UINT32 **)) != PVRSRV_OK )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) > 0)
+			{
+				if ( OSCopyFromUser(NULL, (ui32UpdateSyncOffsetInt[i]), psPtr, (ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32))) != PVRSRV_OK )
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Copy the pointer over from the client side */
+			if ( OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->pui32UpdateValue[i],
+				sizeof(IMG_UINT32 **)) != PVRSRV_OK )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) > 0)
+			{
+				if ( OSCopyFromUser(NULL, (ui32UpdateValueInt[i]), psPtr, (ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32))) != PVRSRV_OK )
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Copy the pointer over from the client side */
+			if ( OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->pui32ServerSyncFlags[i],
+				sizeof(IMG_UINT32 **)) != PVRSRV_OK )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ServerSyncCountInt[i] * sizeof(IMG_UINT32)) > 0)
+			{
+				if ( OSCopyFromUser(NULL, (ui32ServerSyncFlagsInt[i]), psPtr, (ui32ServerSyncCountInt[i] * sizeof(IMG_UINT32))) != PVRSRV_OK )
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_HANDLE **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Copy the pointer over from the client side */
+			if ( OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->phServerSync[i],
+				sizeof(IMG_HANDLE **)) != PVRSRV_OK )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32ServerSyncCountInt[i] * sizeof(IMG_HANDLE)) > 0)
+			{
+				if ( OSCopyFromUser(NULL, (hServerSyncInt2[i]), psPtr, (ui32ServerSyncCountInt[i] * sizeof(IMG_HANDLE))) != PVRSRV_OK )
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT8 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Copy the pointer over from the client side */
+			if ( OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->pui8FWCommand[i],
+				sizeof(IMG_UINT8 **)) != PVRSRV_OK )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+
+			/* Copy the data over */
+			if ((ui32CommandSizeInt[i] * sizeof(IMG_UINT8)) > 0)
+			{
+				if ( OSCopyFromUser(NULL, (ui8FWCommandInt[i]), psPtr, (ui32CommandSizeInt[i] * sizeof(IMG_UINT8))) != PVRSRV_OK )
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RGXSubmitTransfer_exit;
+				}
+			}
+		}
+	}
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXSubmitTransferOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psTransferContextInt,
+											hTransferContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+											IMG_TRUE);
+					if(psRGXSubmitTransferOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXSubmitTransfer_exit;
+					}
+				}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			IMG_UINT32 j;
+			for (j=0;j<ui32ClientFenceCountInt[i];j++)
+			{
+				{
+					/* Look up the address from the handle */
+					psRGXSubmitTransferOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psFenceUFOSyncPrimBlockInt[i][j],
+											hFenceUFOSyncPrimBlockInt2[i][j],
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psRGXSubmitTransferOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXSubmitTransfer_exit;
+					}
+				}
+			}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			IMG_UINT32 j;
+			for (j=0;j<ui32ClientUpdateCountInt[i];j++)
+			{
+				{
+					/* Look up the address from the handle */
+					psRGXSubmitTransferOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psUpdateUFOSyncPrimBlockInt[i][j],
+											hUpdateUFOSyncPrimBlockInt2[i][j],
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psRGXSubmitTransferOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXSubmitTransfer_exit;
+					}
+				}
+			}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			IMG_UINT32 j;
+			for (j=0;j<ui32ServerSyncCountInt[i];j++)
+			{
+				{
+					/* Look up the address from the handle */
+					psRGXSubmitTransferOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psServerSyncInt[i][j],
+											hServerSyncInt2[i][j],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+											IMG_TRUE);
+					if(psRGXSubmitTransferOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXSubmitTransfer_exit;
+					}
+				}
+			}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXSubmitTransferIN->ui32SyncPMRCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXSubmitTransferOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psSyncPMRsInt[i],
+											hSyncPMRsInt2[i],
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psRGXSubmitTransferOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXSubmitTransfer_exit;
+					}
+				}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXSubmitTransferOUT->eError =
+		PVRSRVRGXSubmitTransferKM(
+					psTransferContextInt,
+					psRGXSubmitTransferIN->ui32ClientCacheOpSeqNum,
+					psRGXSubmitTransferIN->ui32PrepareCount,
+					ui32ClientFenceCountInt,
+					psFenceUFOSyncPrimBlockInt,
+					ui32FenceSyncOffsetInt,
+					ui32FenceValueInt,
+					ui32ClientUpdateCountInt,
+					psUpdateUFOSyncPrimBlockInt,
+					ui32UpdateSyncOffsetInt,
+					ui32UpdateValueInt,
+					ui32ServerSyncCountInt,
+					ui32ServerSyncFlagsInt,
+					psServerSyncInt,
+					psRGXSubmitTransferIN->hCheckFenceFD,
+					psRGXSubmitTransferIN->hUpdateTimeline,
+					&psRGXSubmitTransferOUT->hUpdateFence,
+					uiUpdateFenceNameInt,
+					ui32CommandSizeInt,
+					ui8FWCommandInt,
+					ui32TQPrepareFlagsInt,
+					psRGXSubmitTransferIN->ui32ExtJobRef,
+					psRGXSubmitTransferIN->ui32SyncPMRCount,
+					ui32SyncPMRFlagsInt,
+					psSyncPMRsInt);
+
+
+
+
+RGXSubmitTransfer_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psTransferContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hTransferContext,
+										PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+					}
+				}
+
+
+
+
+
+
+	if (hFenceUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			IMG_UINT32 j;
+			for (j=0;j<ui32ClientFenceCountInt[i];j++)
+			{
+				{
+					/* Unreference the previously looked up handle */
+					if(hFenceUFOSyncPrimBlockInt2[i][j])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hFenceUFOSyncPrimBlockInt2[i][j],
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+			}
+		}
+	}
+
+
+
+
+
+
+	if (hUpdateUFOSyncPrimBlockInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			IMG_UINT32 j;
+			for (j=0;j<ui32ClientUpdateCountInt[i];j++)
+			{
+				{
+					/* Unreference the previously looked up handle */
+					if(hUpdateUFOSyncPrimBlockInt2[i][j])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hUpdateUFOSyncPrimBlockInt2[i][j],
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+			}
+		}
+	}
+
+
+
+
+
+
+	if (hServerSyncInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			IMG_UINT32 j;
+			for (j=0;j<ui32ServerSyncCountInt[i];j++)
+			{
+				{
+					/* Unreference the previously looked up handle */
+					if(hServerSyncInt2[i][j])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hServerSyncInt2[i][j],
+										PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					}
+				}
+			}
+		}
+	}
+
+
+
+
+
+
+	if (hSyncPMRsInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXSubmitTransferIN->ui32SyncPMRCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hSyncPMRsInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hSyncPMRsInt2[i],
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize2 == ui32NextOffset2);
+
+	if(pArrayArgsBuffer2)
+		OSFreeMemNoStats(pArrayArgsBuffer2);
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY *psRGXSetTransferContextPriorityIN,
+					  PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY *psRGXSetTransferContextPriorityOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hTransferContext = psRGXSetTransferContextPriorityIN->hTransferContext;
+	RGX_SERVER_TQ_CONTEXT * psTransferContextInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXSetTransferContextPriorityOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psTransferContextInt,
+											hTransferContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+											IMG_TRUE);
+					if(psRGXSetTransferContextPriorityOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RGXSetTransferContextPriority_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRGXSetTransferContextPriorityOUT->eError =
+		PVRSRVRGXSetTransferContextPriorityKM(psConnection, OSGetDevData(psConnection),
+					psTransferContextInt,
+					psRGXSetTransferContextPriorityIN->ui32Priority);
+
+
+
+
+RGXSetTransferContextPriority_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psTransferContextInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hTransferContext,
+										PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXTQBridge(void);
+PVRSRV_ERROR DeinitRGXTQBridge(void);
+
+/*
+ * Register all RGXTQ functions with services
+ */
+PVRSRV_ERROR InitRGXTQBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT, PVRSRVBridgeRGXCreateTransferContext,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT, PVRSRVBridgeRGXDestroyTransferContext,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER, PVRSRVBridgeRGXSubmitTransfer,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY, PVRSRVBridgeRGXSetTransferContextPriority,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxtq functions with services
+ */
+PVRSRV_ERROR DeinitRGXTQBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/ri_bridge/client_ri_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/ri_bridge/client_ri_bridge.h
new file mode 100644
index 0000000..66517c2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/ri_bridge/client_ri_bridge.h
@@ -0,0 +1,106 @@
+/*************************************************************************/ /*!
+@File
+@Title          Client bridge header for ri
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for ri
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_RI_BRIDGE_H
+#define CLIENT_RI_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_ri_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntry(IMG_HANDLE hBridge,
+							     IMG_HANDLE hPMRHandle,
+							     IMG_UINT32 ui32TextASize,
+							     const IMG_CHAR *puiTextA,
+							     IMG_UINT64 ui64LogicalSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge,
+								 IMG_HANDLE hPMRHandle,
+								 IMG_UINT32 ui32TextBSize,
+								 const IMG_CHAR *puiTextB,
+								 IMG_UINT64 ui64Offset,
+								 IMG_UINT64 ui64Size,
+								 IMG_UINT64 ui64BackedSize,
+								 IMG_BOOL bIsImport,
+								 IMG_BOOL bIsExportable,
+								 IMG_HANDLE *phRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteProcListEntry(IMG_HANDLE hBridge,
+								  IMG_UINT32 ui32TextBSize,
+								  const IMG_CHAR *puiTextB,
+								  IMG_UINT64 ui64Size,
+								  IMG_UINT64 ui64BackedSize,
+								  IMG_UINT64 ui64DevVAddr,
+								  IMG_HANDLE *phRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCAddr(IMG_HANDLE hBridge,
+								 IMG_HANDLE hRIHandle,
+								 IMG_DEV_VIRTADDR sAddr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCPinning(IMG_HANDLE hBridge,
+								    IMG_HANDLE hRIHandle,
+								    IMG_BOOL bIsPinned);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCBacking(IMG_HANDLE hBridge,
+								    IMG_HANDLE hRIHandle,
+								    IMG_INT32 i32NumModified);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDeleteMEMDESCEntry(IMG_HANDLE hBridge,
+								  IMG_HANDLE hRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpList(IMG_HANDLE hBridge,
+							IMG_HANDLE hPMRHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpAll(IMG_HANDLE hBridge);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpProcess(IMG_HANDLE hBridge,
+							   IMG_PID ui32Pid);
+
+
+#endif /* CLIENT_RI_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/ri_bridge/client_ri_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/ri_bridge/client_ri_direct_bridge.c
new file mode 100644
index 0000000..1578a68
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/ri_bridge/client_ri_direct_bridge.c
@@ -0,0 +1,245 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge for ri
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_ri_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "ri_typedefs.h"
+
+#include "ri_server.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntry(IMG_HANDLE hBridge,
+							     IMG_HANDLE hPMRHandle,
+							     IMG_UINT32 ui32TextASize,
+							     const IMG_CHAR *puiTextA,
+							     IMG_UINT64 ui64LogicalSize)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRHandleInt = (PMR *) hPMRHandle;
+
+	eError =
+		RIWritePMREntryKM(
+					psPMRHandleInt,
+					ui32TextASize,
+					puiTextA,
+					ui64LogicalSize);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge,
+								 IMG_HANDLE hPMRHandle,
+								 IMG_UINT32 ui32TextBSize,
+								 const IMG_CHAR *puiTextB,
+								 IMG_UINT64 ui64Offset,
+								 IMG_UINT64 ui64Size,
+								 IMG_UINT64 ui64BackedSize,
+								 IMG_BOOL bIsImport,
+								 IMG_BOOL bIsExportable,
+								 IMG_HANDLE *phRIHandle)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRHandleInt;
+	RI_HANDLE psRIHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRHandleInt = (PMR *) hPMRHandle;
+
+	eError =
+		RIWriteMEMDESCEntryKM(
+					psPMRHandleInt,
+					ui32TextBSize,
+					puiTextB,
+					ui64Offset,
+					ui64Size,
+					ui64BackedSize,
+					bIsImport,
+					bIsExportable,
+					&psRIHandleInt);
+
+	*phRIHandle = psRIHandleInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteProcListEntry(IMG_HANDLE hBridge,
+								  IMG_UINT32 ui32TextBSize,
+								  const IMG_CHAR *puiTextB,
+								  IMG_UINT64 ui64Size,
+								  IMG_UINT64 ui64BackedSize,
+								  IMG_UINT64 ui64DevVAddr,
+								  IMG_HANDLE *phRIHandle)
+{
+	PVRSRV_ERROR eError;
+	RI_HANDLE psRIHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		RIWriteProcListEntryKM(
+					ui32TextBSize,
+					puiTextB,
+					ui64Size,
+					ui64BackedSize,
+					ui64DevVAddr,
+					&psRIHandleInt);
+
+	*phRIHandle = psRIHandleInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCAddr(IMG_HANDLE hBridge,
+								 IMG_HANDLE hRIHandle,
+								 IMG_DEV_VIRTADDR sAddr)
+{
+	PVRSRV_ERROR eError;
+	RI_HANDLE psRIHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psRIHandleInt = (RI_HANDLE) hRIHandle;
+
+	eError =
+		RIUpdateMEMDESCAddrKM(
+					psRIHandleInt,
+					sAddr);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCPinning(IMG_HANDLE hBridge,
+								    IMG_HANDLE hRIHandle,
+								    IMG_BOOL bIsPinned)
+{
+	PVRSRV_ERROR eError;
+	RI_HANDLE psRIHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psRIHandleInt = (RI_HANDLE) hRIHandle;
+
+	eError =
+		RIUpdateMEMDESCPinningKM(
+					psRIHandleInt,
+					bIsPinned);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCBacking(IMG_HANDLE hBridge,
+								    IMG_HANDLE hRIHandle,
+								    IMG_INT32 i32NumModified)
+{
+	PVRSRV_ERROR eError;
+	RI_HANDLE psRIHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psRIHandleInt = (RI_HANDLE) hRIHandle;
+
+	eError =
+		RIUpdateMEMDESCBackingKM(
+					psRIHandleInt,
+					i32NumModified);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDeleteMEMDESCEntry(IMG_HANDLE hBridge,
+								  IMG_HANDLE hRIHandle)
+{
+	PVRSRV_ERROR eError;
+	RI_HANDLE psRIHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psRIHandleInt = (RI_HANDLE) hRIHandle;
+
+	eError =
+		RIDeleteMEMDESCEntryKM(
+					psRIHandleInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpList(IMG_HANDLE hBridge,
+							IMG_HANDLE hPMRHandle)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRHandleInt = (PMR *) hPMRHandle;
+
+	eError =
+		RIDumpListKM(
+					psPMRHandleInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpAll(IMG_HANDLE hBridge)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		RIDumpAllKM(
+					);
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpProcess(IMG_HANDLE hBridge,
+							   IMG_PID ui32Pid)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		RIDumpProcessKM(
+					ui32Pid);
+
+	return eError;
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/ri_bridge/common_ri_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/ri_bridge/common_ri_bridge.h
new file mode 100644
index 0000000..71f2fba
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/ri_bridge/common_ri_bridge.h
@@ -0,0 +1,259 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for ri
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for ri
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RI_BRIDGE_H
+#define COMMON_RI_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "ri_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_RI_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY			PVRSRV_BRIDGE_RI_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY			PVRSRV_BRIDGE_RI_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY			PVRSRV_BRIDGE_RI_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR			PVRSRV_BRIDGE_RI_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCPINNING			PVRSRV_BRIDGE_RI_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCBACKING			PVRSRV_BRIDGE_RI_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY			PVRSRV_BRIDGE_RI_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RI_RIDUMPLIST			PVRSRV_BRIDGE_RI_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RI_RIDUMPALL			PVRSRV_BRIDGE_RI_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RI_RIDUMPPROCESS			PVRSRV_BRIDGE_RI_CMD_FIRST+9
+#define PVRSRV_BRIDGE_RI_CMD_LAST			(PVRSRV_BRIDGE_RI_CMD_FIRST+9)
+
+
+/*******************************************
+            RIWritePMREntry          
+ *******************************************/
+
+/* Bridge in structure for RIWritePMREntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY_TAG
+{
+	IMG_HANDLE hPMRHandle;
+	IMG_UINT32 ui32TextASize;
+	const IMG_CHAR * puiTextA;
+	IMG_UINT64 ui64LogicalSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY;
+
+/* Bridge out structure for RIWritePMREntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY;
+
+
+/*******************************************
+            RIWriteMEMDESCEntry          
+ *******************************************/
+
+/* Bridge in structure for RIWriteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY_TAG
+{
+	IMG_HANDLE hPMRHandle;
+	IMG_UINT32 ui32TextBSize;
+	const IMG_CHAR * puiTextB;
+	IMG_UINT64 ui64Offset;
+	IMG_UINT64 ui64Size;
+	IMG_UINT64 ui64BackedSize;
+	IMG_BOOL bIsImport;
+	IMG_BOOL bIsExportable;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY;
+
+/* Bridge out structure for RIWriteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY_TAG
+{
+	IMG_HANDLE hRIHandle;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY;
+
+
+/*******************************************
+            RIWriteProcListEntry          
+ *******************************************/
+
+/* Bridge in structure for RIWriteProcListEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY_TAG
+{
+	IMG_UINT32 ui32TextBSize;
+	const IMG_CHAR * puiTextB;
+	IMG_UINT64 ui64Size;
+	IMG_UINT64 ui64BackedSize;
+	IMG_UINT64 ui64DevVAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY;
+
+/* Bridge out structure for RIWriteProcListEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY_TAG
+{
+	IMG_HANDLE hRIHandle;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY;
+
+
+/*******************************************
+            RIUpdateMEMDESCAddr          
+ *******************************************/
+
+/* Bridge in structure for RIUpdateMEMDESCAddr */
+typedef struct PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR_TAG
+{
+	IMG_HANDLE hRIHandle;
+	IMG_DEV_VIRTADDR sAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR;
+
+/* Bridge out structure for RIUpdateMEMDESCAddr */
+typedef struct PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR;
+
+
+/*******************************************
+            RIUpdateMEMDESCPinning          
+ *******************************************/
+
+/* Bridge in structure for RIUpdateMEMDESCPinning */
+typedef struct PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCPINNING_TAG
+{
+	IMG_HANDLE hRIHandle;
+	IMG_BOOL bIsPinned;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCPINNING;
+
+/* Bridge out structure for RIUpdateMEMDESCPinning */
+typedef struct PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCPINNING_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCPINNING;
+
+
+/*******************************************
+            RIUpdateMEMDESCBacking          
+ *******************************************/
+
+/* Bridge in structure for RIUpdateMEMDESCBacking */
+typedef struct PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCBACKING_TAG
+{
+	IMG_HANDLE hRIHandle;
+	IMG_INT32 i32NumModified;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCBACKING;
+
+/* Bridge out structure for RIUpdateMEMDESCBacking */
+typedef struct PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCBACKING_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCBACKING;
+
+
+/*******************************************
+            RIDeleteMEMDESCEntry          
+ *******************************************/
+
+/* Bridge in structure for RIDeleteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY_TAG
+{
+	IMG_HANDLE hRIHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY;
+
+/* Bridge out structure for RIDeleteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY;
+
+
+/*******************************************
+            RIDumpList          
+ *******************************************/
+
+/* Bridge in structure for RIDumpList */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPLIST_TAG
+{
+	IMG_HANDLE hPMRHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIDUMPLIST;
+
+/* Bridge out structure for RIDumpList */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPLIST_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIDUMPLIST;
+
+
+/*******************************************
+            RIDumpAll          
+ *******************************************/
+
+/* Bridge in structure for RIDumpAll */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPALL_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIDUMPALL;
+
+/* Bridge out structure for RIDumpAll */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPALL_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIDUMPALL;
+
+
+/*******************************************
+            RIDumpProcess          
+ *******************************************/
+
+/* Bridge in structure for RIDumpProcess */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPPROCESS_TAG
+{
+	IMG_PID ui32Pid;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIDUMPPROCESS;
+
+/* Bridge out structure for RIDumpProcess */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPPROCESS_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIDUMPPROCESS;
+
+
+#endif /* COMMON_RI_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/ri_bridge/server_ri_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/ri_bridge/server_ri_bridge.c
new file mode 100644
index 0000000..e5157c5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/ri_bridge/server_ri_bridge.c
@@ -0,0 +1,992 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for ri
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for ri
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "ri_server.h"
+
+
+#include "common_ri_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRIWritePMREntry(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY *psRIWritePMREntryIN,
+					  PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY *psRIWritePMREntryOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMRHandle = psRIWritePMREntryIN->hPMRHandle;
+	PMR * psPMRHandleInt = NULL;
+	IMG_CHAR *uiTextAInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psRIWritePMREntryIN->ui32TextASize * sizeof(IMG_CHAR)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRIWritePMREntryIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRIWritePMREntryIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psRIWritePMREntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RIWritePMREntry_exit;
+			}
+		}
+	}
+
+	if (psRIWritePMREntryIN->ui32TextASize != 0)
+	{
+		uiTextAInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRIWritePMREntryIN->ui32TextASize * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (psRIWritePMREntryIN->ui32TextASize * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiTextAInt, psRIWritePMREntryIN->puiTextA, psRIWritePMREntryIN->ui32TextASize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psRIWritePMREntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RIWritePMREntry_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRIWritePMREntryOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRHandleInt,
+											hPMRHandle,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psRIWritePMREntryOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RIWritePMREntry_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRIWritePMREntryOUT->eError =
+		RIWritePMREntryKM(
+					psPMRHandleInt,
+					psRIWritePMREntryIN->ui32TextASize,
+					uiTextAInt,
+					psRIWritePMREntryIN->ui64LogicalSize);
+
+
+
+
+RIWritePMREntry_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRHandleInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMRHandle,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryIN,
+					  PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMRHandle = psRIWriteMEMDESCEntryIN->hPMRHandle;
+	PMR * psPMRHandleInt = NULL;
+	IMG_CHAR *uiTextBInt = NULL;
+	RI_HANDLE psRIHandleInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRIWriteMEMDESCEntryIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRIWriteMEMDESCEntryIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RIWriteMEMDESCEntry_exit;
+			}
+		}
+	}
+
+	if (psRIWriteMEMDESCEntryIN->ui32TextBSize != 0)
+	{
+		uiTextBInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiTextBInt, psRIWriteMEMDESCEntryIN->puiTextB, psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RIWriteMEMDESCEntry_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRIWriteMEMDESCEntryOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRHandleInt,
+											hPMRHandle,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RIWriteMEMDESCEntry_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRIWriteMEMDESCEntryOUT->eError =
+		RIWriteMEMDESCEntryKM(
+					psPMRHandleInt,
+					psRIWriteMEMDESCEntryIN->ui32TextBSize,
+					uiTextBInt,
+					psRIWriteMEMDESCEntryIN->ui64Offset,
+					psRIWriteMEMDESCEntryIN->ui64Size,
+					psRIWriteMEMDESCEntryIN->ui64BackedSize,
+					psRIWriteMEMDESCEntryIN->bIsImport,
+					psRIWriteMEMDESCEntryIN->bIsExportable,
+					&psRIHandleInt);
+	/* Exit early if bridged call fails */
+	if(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)
+	{
+		goto RIWriteMEMDESCEntry_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psRIWriteMEMDESCEntryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psRIWriteMEMDESCEntryOUT->hRIHandle,
+							(void *) psRIHandleInt,
+							PVRSRV_HANDLE_TYPE_RI_HANDLE,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&RIDeleteMEMDESCEntryKM);
+	if (psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto RIWriteMEMDESCEntry_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+RIWriteMEMDESCEntry_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRHandleInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMRHandle,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)
+	{
+		if (psRIHandleInt)
+		{
+			RIDeleteMEMDESCEntryKM(psRIHandleInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIWriteProcListEntry(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryIN,
+					  PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_CHAR *uiTextBInt = NULL;
+	RI_HANDLE psRIHandleInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRIWriteProcListEntryIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRIWriteProcListEntryIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto RIWriteProcListEntry_exit;
+			}
+		}
+	}
+
+	if (psRIWriteProcListEntryIN->ui32TextBSize != 0)
+	{
+		uiTextBInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiTextBInt, psRIWriteProcListEntryIN->puiTextB, psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto RIWriteProcListEntry_exit;
+				}
+			}
+
+
+	psRIWriteProcListEntryOUT->eError =
+		RIWriteProcListEntryKM(
+					psRIWriteProcListEntryIN->ui32TextBSize,
+					uiTextBInt,
+					psRIWriteProcListEntryIN->ui64Size,
+					psRIWriteProcListEntryIN->ui64BackedSize,
+					psRIWriteProcListEntryIN->ui64DevVAddr,
+					&psRIHandleInt);
+	/* Exit early if bridged call fails */
+	if(psRIWriteProcListEntryOUT->eError != PVRSRV_OK)
+	{
+		goto RIWriteProcListEntry_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psRIWriteProcListEntryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psRIWriteProcListEntryOUT->hRIHandle,
+							(void *) psRIHandleInt,
+							PVRSRV_HANDLE_TYPE_RI_HANDLE,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&RIDeleteMEMDESCEntryKM);
+	if (psRIWriteProcListEntryOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto RIWriteProcListEntry_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+RIWriteProcListEntry_exit:
+
+
+
+	if (psRIWriteProcListEntryOUT->eError != PVRSRV_OK)
+	{
+		if (psRIHandleInt)
+		{
+			RIDeleteMEMDESCEntryKM(psRIHandleInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIUpdateMEMDESCAddr(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrIN,
+					  PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hRIHandle = psRIUpdateMEMDESCAddrIN->hRIHandle;
+	RI_HANDLE psRIHandleInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRIUpdateMEMDESCAddrOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psRIHandleInt,
+											hRIHandle,
+											PVRSRV_HANDLE_TYPE_RI_HANDLE,
+											IMG_TRUE);
+					if(psRIUpdateMEMDESCAddrOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RIUpdateMEMDESCAddr_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRIUpdateMEMDESCAddrOUT->eError =
+		RIUpdateMEMDESCAddrKM(
+					psRIHandleInt,
+					psRIUpdateMEMDESCAddrIN->sAddr);
+
+
+
+
+RIUpdateMEMDESCAddr_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psRIHandleInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hRIHandle,
+										PVRSRV_HANDLE_TYPE_RI_HANDLE);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIUpdateMEMDESCPinning(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCPINNING *psRIUpdateMEMDESCPinningIN,
+					  PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCPINNING *psRIUpdateMEMDESCPinningOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hRIHandle = psRIUpdateMEMDESCPinningIN->hRIHandle;
+	RI_HANDLE psRIHandleInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRIUpdateMEMDESCPinningOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psRIHandleInt,
+											hRIHandle,
+											PVRSRV_HANDLE_TYPE_RI_HANDLE,
+											IMG_TRUE);
+					if(psRIUpdateMEMDESCPinningOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RIUpdateMEMDESCPinning_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRIUpdateMEMDESCPinningOUT->eError =
+		RIUpdateMEMDESCPinningKM(
+					psRIHandleInt,
+					psRIUpdateMEMDESCPinningIN->bIsPinned);
+
+
+
+
+RIUpdateMEMDESCPinning_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psRIHandleInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hRIHandle,
+										PVRSRV_HANDLE_TYPE_RI_HANDLE);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIUpdateMEMDESCBacking(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCBACKING *psRIUpdateMEMDESCBackingIN,
+					  PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCBACKING *psRIUpdateMEMDESCBackingOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hRIHandle = psRIUpdateMEMDESCBackingIN->hRIHandle;
+	RI_HANDLE psRIHandleInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRIUpdateMEMDESCBackingOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psRIHandleInt,
+											hRIHandle,
+											PVRSRV_HANDLE_TYPE_RI_HANDLE,
+											IMG_TRUE);
+					if(psRIUpdateMEMDESCBackingOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RIUpdateMEMDESCBacking_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRIUpdateMEMDESCBackingOUT->eError =
+		RIUpdateMEMDESCBackingKM(
+					psRIHandleInt,
+					psRIUpdateMEMDESCBackingIN->i32NumModified);
+
+
+
+
+RIUpdateMEMDESCBacking_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psRIHandleInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hRIHandle,
+										PVRSRV_HANDLE_TYPE_RI_HANDLE);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIDeleteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryIN,
+					  PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psRIDeleteMEMDESCEntryOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psRIDeleteMEMDESCEntryIN->hRIHandle,
+					PVRSRV_HANDLE_TYPE_RI_HANDLE);
+	if ((psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_OK) &&
+	    (psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeRIDeleteMEMDESCEntry: %s",
+		        PVRSRVGetErrorStringKM(psRIDeleteMEMDESCEntryOUT->eError)));
+		UnlockHandle();
+		goto RIDeleteMEMDESCEntry_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+RIDeleteMEMDESCEntry_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIDumpList(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RIDUMPLIST *psRIDumpListIN,
+					  PVRSRV_BRIDGE_OUT_RIDUMPLIST *psRIDumpListOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hPMRHandle = psRIDumpListIN->hPMRHandle;
+	PMR * psPMRHandleInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRIDumpListOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psPMRHandleInt,
+											hPMRHandle,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+											IMG_TRUE);
+					if(psRIDumpListOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto RIDumpList_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psRIDumpListOUT->eError =
+		RIDumpListKM(
+					psPMRHandleInt);
+
+
+
+
+RIDumpList_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psPMRHandleInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hPMRHandle,
+										PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIDumpAll(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RIDUMPALL *psRIDumpAllIN,
+					  PVRSRV_BRIDGE_OUT_RIDUMPALL *psRIDumpAllOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psRIDumpAllIN);
+
+
+
+
+
+	psRIDumpAllOUT->eError =
+		RIDumpAllKM(
+					);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIDumpProcess(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RIDUMPPROCESS *psRIDumpProcessIN,
+					  PVRSRV_BRIDGE_OUT_RIDUMPPROCESS *psRIDumpProcessOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+
+
+	psRIDumpProcessOUT->eError =
+		RIDumpProcessKM(
+					psRIDumpProcessIN->ui32Pid);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRIBridge(void);
+PVRSRV_ERROR DeinitRIBridge(void);
+
+/*
+ * Register all RI functions with services
+ */
+PVRSRV_ERROR InitRIBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY, PVRSRVBridgeRIWritePMREntry,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY, PVRSRVBridgeRIWriteMEMDESCEntry,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY, PVRSRVBridgeRIWriteProcListEntry,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR, PVRSRVBridgeRIUpdateMEMDESCAddr,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCPINNING, PVRSRVBridgeRIUpdateMEMDESCPinning,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCBACKING, PVRSRVBridgeRIUpdateMEMDESCBacking,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY, PVRSRVBridgeRIDeleteMEMDESCEntry,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST, PVRSRVBridgeRIDumpList,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL, PVRSRVBridgeRIDumpAll,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS, PVRSRVBridgeRIDumpProcess,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all ri functions with services
+ */
+PVRSRV_ERROR DeinitRIBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/srvcore_bridge/common_srvcore_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/srvcore_bridge/common_srvcore_bridge.h
new file mode 100644
index 0000000..30dc42b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/srvcore_bridge/common_srvcore_bridge.h
@@ -0,0 +1,331 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for srvcore
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for srvcore
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_SRVCORE_BRIDGE_H
+#define COMMON_SRVCORE_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_device_types.h"
+#include "cache_ops.h"
+
+
+#define PVRSRV_BRIDGE_SRVCORE_CMD_FIRST			0
+#define PVRSRV_BRIDGE_SRVCORE_CONNECT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SRVCORE_DISCONNECT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+2
+#define PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+3
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+4
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+5
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+6
+#define PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+7
+#define PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+8
+#define PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+9
+#define PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+10
+#define PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+11
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+12
+#define PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+13
+#define PVRSRV_BRIDGE_SRVCORE_CMD_LAST			(PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+13)
+
+
+/*******************************************
+            Connect          
+ *******************************************/
+
+/* Bridge in structure for Connect */
+typedef struct PVRSRV_BRIDGE_IN_CONNECT_TAG
+{
+	IMG_UINT32 ui32Flags;
+	IMG_UINT32 ui32ClientBuildOptions;
+	IMG_UINT32 ui32ClientDDKVersion;
+	IMG_UINT32 ui32ClientDDKBuild;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CONNECT;
+
+/* Bridge out structure for Connect */
+typedef struct PVRSRV_BRIDGE_OUT_CONNECT_TAG
+{
+	IMG_UINT8 ui8KernelArch;
+	IMG_UINT32 ui32CapabilityFlags;
+	IMG_UINT32 ui32PVRBridges;
+	IMG_UINT32 ui32RGXBridges;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CONNECT;
+
+
+/*******************************************
+            Disconnect          
+ *******************************************/
+
+/* Bridge in structure for Disconnect */
+typedef struct PVRSRV_BRIDGE_IN_DISCONNECT_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DISCONNECT;
+
+/* Bridge out structure for Disconnect */
+typedef struct PVRSRV_BRIDGE_OUT_DISCONNECT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DISCONNECT;
+
+
+/*******************************************
+            AcquireGlobalEventObject          
+ *******************************************/
+
+/* Bridge in structure for AcquireGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT;
+
+/* Bridge out structure for AcquireGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT_TAG
+{
+	IMG_HANDLE hGlobalEventObject;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT;
+
+
+/*******************************************
+            ReleaseGlobalEventObject          
+ *******************************************/
+
+/* Bridge in structure for ReleaseGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT_TAG
+{
+	IMG_HANDLE hGlobalEventObject;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT;
+
+/* Bridge out structure for ReleaseGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT;
+
+
+/*******************************************
+            EventObjectOpen          
+ *******************************************/
+
+/* Bridge in structure for EventObjectOpen */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN_TAG
+{
+	IMG_HANDLE hEventObject;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN;
+
+/* Bridge out structure for EventObjectOpen */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN_TAG
+{
+	IMG_HANDLE hOSEvent;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN;
+
+
+/*******************************************
+            EventObjectWait          
+ *******************************************/
+
+/* Bridge in structure for EventObjectWait */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT_TAG
+{
+	IMG_HANDLE hOSEventKM;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT;
+
+/* Bridge out structure for EventObjectWait */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT;
+
+
+/*******************************************
+            EventObjectClose          
+ *******************************************/
+
+/* Bridge in structure for EventObjectClose */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE_TAG
+{
+	IMG_HANDLE hOSEventKM;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE;
+
+/* Bridge out structure for EventObjectClose */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE;
+
+
+/*******************************************
+            DumpDebugInfo          
+ *******************************************/
+
+/* Bridge in structure for DumpDebugInfo */
+typedef struct PVRSRV_BRIDGE_IN_DUMPDEBUGINFO_TAG
+{
+	IMG_UINT32 ui32ui32VerbLevel;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DUMPDEBUGINFO;
+
+/* Bridge out structure for DumpDebugInfo */
+typedef struct PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO;
+
+
+/*******************************************
+            GetDevClockSpeed          
+ *******************************************/
+
+/* Bridge in structure for GetDevClockSpeed */
+typedef struct PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED;
+
+/* Bridge out structure for GetDevClockSpeed */
+typedef struct PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED_TAG
+{
+	IMG_UINT32 ui32ui32ClockSpeed;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED;
+
+
+/*******************************************
+            HWOpTimeout          
+ *******************************************/
+
+/* Bridge in structure for HWOpTimeout */
+typedef struct PVRSRV_BRIDGE_IN_HWOPTIMEOUT_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HWOPTIMEOUT;
+
+/* Bridge out structure for HWOpTimeout */
+typedef struct PVRSRV_BRIDGE_OUT_HWOPTIMEOUT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HWOPTIMEOUT;
+
+
+/*******************************************
+            AlignmentCheck          
+ *******************************************/
+
+/* Bridge in structure for AlignmentCheck */
+typedef struct PVRSRV_BRIDGE_IN_ALIGNMENTCHECK_TAG
+{
+	IMG_UINT32 ui32AlignChecksSize;
+	IMG_UINT32 * pui32AlignChecks;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_ALIGNMENTCHECK;
+
+/* Bridge out structure for AlignmentCheck */
+typedef struct PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK;
+
+
+/*******************************************
+            GetDeviceStatus          
+ *******************************************/
+
+/* Bridge in structure for GetDeviceStatus */
+typedef struct PVRSRV_BRIDGE_IN_GETDEVICESTATUS_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_GETDEVICESTATUS;
+
+/* Bridge out structure for GetDeviceStatus */
+typedef struct PVRSRV_BRIDGE_OUT_GETDEVICESTATUS_TAG
+{
+	IMG_UINT32 ui32DeviceSatus;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_GETDEVICESTATUS;
+
+
+/*******************************************
+            EventObjectWaitTimeout          
+ *******************************************/
+
+/* Bridge in structure for EventObjectWaitTimeout */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT_TAG
+{
+	IMG_HANDLE hOSEventKM;
+	IMG_UINT64 ui64uiTimeoutus;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT;
+
+/* Bridge out structure for EventObjectWaitTimeout */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT;
+
+
+/*******************************************
+            FindProcessMemStats          
+ *******************************************/
+
+/* Bridge in structure for FindProcessMemStats */
+typedef struct PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS_TAG
+{
+	IMG_UINT32 ui32PID;
+	IMG_UINT32 ui32ArrSize;
+	IMG_BOOL bbAllProcessStats;
+	/* Output pointer pui32MemStatsArray is also an implied input */
+	IMG_UINT32 * pui32MemStatsArray;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS;
+
+/* Bridge out structure for FindProcessMemStats */
+typedef struct PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS_TAG
+{
+	IMG_UINT32 * pui32MemStatsArray;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS;
+
+
+#endif /* COMMON_SRVCORE_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/srvcore_bridge/server_srvcore_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/srvcore_bridge/server_srvcore_bridge.c
new file mode 100644
index 0000000..890f59a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/srvcore_bridge/server_srvcore_bridge.c
@@ -0,0 +1,947 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for srvcore
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for srvcore
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "srvcore.h"
+
+
+#include "common_srvcore_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeConnect(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_CONNECT *psConnectIN,
+					  PVRSRV_BRIDGE_OUT_CONNECT *psConnectOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+	psConnectOUT->eError =
+		PVRSRVConnectKM(psConnection, OSGetDevData(psConnection),
+					psConnectIN->ui32Flags,
+					psConnectIN->ui32ClientBuildOptions,
+					psConnectIN->ui32ClientDDKVersion,
+					psConnectIN->ui32ClientDDKBuild,
+					&psConnectOUT->ui8KernelArch,
+					&psConnectOUT->ui32CapabilityFlags,
+					&psConnectOUT->ui32PVRBridges,
+					&psConnectOUT->ui32RGXBridges);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDisconnect(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DISCONNECT *psDisconnectIN,
+					  PVRSRV_BRIDGE_OUT_DISCONNECT *psDisconnectOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDisconnectIN);
+
+
+
+
+
+	psDisconnectOUT->eError =
+		PVRSRVDisconnectKM(
+					);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeAcquireGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectIN,
+					  PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hGlobalEventObjectInt = NULL;
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psAcquireGlobalEventObjectIN);
+
+
+
+
+
+	psAcquireGlobalEventObjectOUT->eError =
+		PVRSRVAcquireGlobalEventObjectKM(
+					&hGlobalEventObjectInt);
+	/* Exit early if bridged call fails */
+	if(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)
+	{
+		goto AcquireGlobalEventObject_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psAcquireGlobalEventObjectOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psAcquireGlobalEventObjectOUT->hGlobalEventObject,
+							(void *) hGlobalEventObjectInt,
+							PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PVRSRVReleaseGlobalEventObjectKM);
+	if (psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto AcquireGlobalEventObject_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+AcquireGlobalEventObject_exit:
+
+
+
+	if (psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)
+	{
+		if (hGlobalEventObjectInt)
+		{
+			PVRSRVReleaseGlobalEventObjectKM(hGlobalEventObjectInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeReleaseGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectIN,
+					  PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psReleaseGlobalEventObjectOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psReleaseGlobalEventObjectIN->hGlobalEventObject,
+					PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
+	if ((psReleaseGlobalEventObjectOUT->eError != PVRSRV_OK) &&
+	    (psReleaseGlobalEventObjectOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeReleaseGlobalEventObject: %s",
+		        PVRSRVGetErrorStringKM(psReleaseGlobalEventObjectOUT->eError)));
+		UnlockHandle();
+		goto ReleaseGlobalEventObject_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+ReleaseGlobalEventObject_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeEventObjectOpen(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN *psEventObjectOpenIN,
+					  PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN *psEventObjectOpenOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hEventObject = psEventObjectOpenIN->hEventObject;
+	IMG_HANDLE hEventObjectInt = NULL;
+	IMG_HANDLE hOSEventInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psEventObjectOpenOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &hEventObjectInt,
+											hEventObject,
+											PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+											IMG_TRUE);
+					if(psEventObjectOpenOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto EventObjectOpen_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psEventObjectOpenOUT->eError =
+		OSEventObjectOpen(
+					hEventObjectInt,
+					&hOSEventInt);
+	/* Exit early if bridged call fails */
+	if(psEventObjectOpenOUT->eError != PVRSRV_OK)
+	{
+		goto EventObjectOpen_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psEventObjectOpenOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psEventObjectOpenOUT->hOSEvent,
+							(void *) hOSEventInt,
+							PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&OSEventObjectClose);
+	if (psEventObjectOpenOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto EventObjectOpen_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+EventObjectOpen_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(hEventObjectInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hEventObject,
+										PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psEventObjectOpenOUT->eError != PVRSRV_OK)
+	{
+		if (hOSEventInt)
+		{
+			OSEventObjectClose(hOSEventInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeEventObjectWait(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT *psEventObjectWaitIN,
+					  PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT *psEventObjectWaitOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hOSEventKM = psEventObjectWaitIN->hOSEventKM;
+	IMG_HANDLE hOSEventKMInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psEventObjectWaitOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &hOSEventKMInt,
+											hOSEventKM,
+											PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+											IMG_TRUE);
+					if(psEventObjectWaitOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto EventObjectWait_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psEventObjectWaitOUT->eError =
+		OSEventObjectWait(
+					hOSEventKMInt);
+
+
+
+
+EventObjectWait_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(hOSEventKMInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hOSEventKM,
+										PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeEventObjectClose(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE *psEventObjectCloseIN,
+					  PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE *psEventObjectCloseOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psEventObjectCloseOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psEventObjectCloseIN->hOSEventKM,
+					PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+	if ((psEventObjectCloseOUT->eError != PVRSRV_OK) &&
+	    (psEventObjectCloseOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeEventObjectClose: %s",
+		        PVRSRVGetErrorStringKM(psEventObjectCloseOUT->eError)));
+		UnlockHandle();
+		goto EventObjectClose_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+EventObjectClose_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDumpDebugInfo(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DUMPDEBUGINFO *psDumpDebugInfoIN,
+					  PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO *psDumpDebugInfoOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+	psDumpDebugInfoOUT->eError =
+		PVRSRVDumpDebugInfoKM(psConnection, OSGetDevData(psConnection),
+					psDumpDebugInfoIN->ui32ui32VerbLevel);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeGetDevClockSpeed(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED *psGetDevClockSpeedIN,
+					  PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED *psGetDevClockSpeedOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psGetDevClockSpeedIN);
+
+
+
+
+
+	psGetDevClockSpeedOUT->eError =
+		PVRSRVGetDevClockSpeedKM(psConnection, OSGetDevData(psConnection),
+					&psGetDevClockSpeedOUT->ui32ui32ClockSpeed);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHWOpTimeout(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_HWOPTIMEOUT *psHWOpTimeoutIN,
+					  PVRSRV_BRIDGE_OUT_HWOPTIMEOUT *psHWOpTimeoutOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psHWOpTimeoutIN);
+
+
+
+
+
+	psHWOpTimeoutOUT->eError =
+		PVRSRVHWOpTimeoutKM(psConnection, OSGetDevData(psConnection)
+					);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeAlignmentCheck(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_ALIGNMENTCHECK *psAlignmentCheckIN,
+					  PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK *psAlignmentCheckOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_UINT32 *ui32AlignChecksInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psAlignmentCheckIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psAlignmentCheckIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psAlignmentCheckOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto AlignmentCheck_exit;
+			}
+		}
+	}
+
+	if (psAlignmentCheckIN->ui32AlignChecksSize != 0)
+	{
+		ui32AlignChecksInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32AlignChecksInt, psAlignmentCheckIN->pui32AlignChecks, psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psAlignmentCheckOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto AlignmentCheck_exit;
+				}
+			}
+
+
+	psAlignmentCheckOUT->eError =
+		PVRSRVAlignmentCheckKM(psConnection, OSGetDevData(psConnection),
+					psAlignmentCheckIN->ui32AlignChecksSize,
+					ui32AlignChecksInt);
+
+
+
+
+AlignmentCheck_exit:
+
+
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeGetDeviceStatus(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_GETDEVICESTATUS *psGetDeviceStatusIN,
+					  PVRSRV_BRIDGE_OUT_GETDEVICESTATUS *psGetDeviceStatusOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psGetDeviceStatusIN);
+
+
+
+
+
+	psGetDeviceStatusOUT->eError =
+		PVRSRVGetDeviceStatusKM(psConnection, OSGetDevData(psConnection),
+					&psGetDeviceStatusOUT->ui32DeviceSatus);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeEventObjectWaitTimeout(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutIN,
+					  PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hOSEventKM = psEventObjectWaitTimeoutIN->hOSEventKM;
+	IMG_HANDLE hOSEventKMInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psEventObjectWaitTimeoutOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &hOSEventKMInt,
+											hOSEventKM,
+											PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+											IMG_TRUE);
+					if(psEventObjectWaitTimeoutOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto EventObjectWaitTimeout_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psEventObjectWaitTimeoutOUT->eError =
+		OSEventObjectWaitTimeout(
+					hOSEventKMInt,
+					psEventObjectWaitTimeoutIN->ui64uiTimeoutus);
+
+
+
+
+EventObjectWaitTimeout_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(hOSEventKMInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hOSEventKM,
+										PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS *psFindProcessMemStatsIN,
+					  PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *psFindProcessMemStatsOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_UINT32 *pui32MemStatsArrayInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) +
+			0;
+
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	psFindProcessMemStatsOUT->pui32MemStatsArray = psFindProcessMemStatsIN->pui32MemStatsArray;
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psFindProcessMemStatsIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psFindProcessMemStatsIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto FindProcessMemStats_exit;
+			}
+		}
+	}
+
+	if (psFindProcessMemStatsIN->ui32ArrSize != 0)
+	{
+		pui32MemStatsArrayInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32);
+	}
+
+
+
+	psFindProcessMemStatsOUT->eError =
+		PVRSRVFindProcessMemStatsKM(
+					psFindProcessMemStatsIN->ui32PID,
+					psFindProcessMemStatsIN->ui32ArrSize,
+					psFindProcessMemStatsIN->bbAllProcessStats,
+					pui32MemStatsArrayInt);
+
+
+
+	if ((psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) > 0)
+	{
+		if ( OSCopyToUser(NULL, psFindProcessMemStatsOUT->pui32MemStatsArray, pui32MemStatsArrayInt,
+			(psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32))) != PVRSRV_OK )
+		{
+			psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto FindProcessMemStats_exit;
+		}
+	}
+
+
+FindProcessMemStats_exit:
+
+
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitSRVCOREBridge(void);
+PVRSRV_ERROR DeinitSRVCOREBridge(void);
+
+/*
+ * Register all SRVCORE functions with services
+ */
+PVRSRV_ERROR InitSRVCOREBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_CONNECT, PVRSRVBridgeConnect,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DISCONNECT, PVRSRVBridgeDisconnect,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT, PVRSRVBridgeAcquireGlobalEventObject,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT, PVRSRVBridgeReleaseGlobalEventObject,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN, PVRSRVBridgeEventObjectOpen,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT, PVRSRVBridgeEventObjectWait,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE, PVRSRVBridgeEventObjectClose,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO, PVRSRVBridgeDumpDebugInfo,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED, PVRSRVBridgeGetDevClockSpeed,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT, PVRSRVBridgeHWOpTimeout,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK, PVRSRVBridgeAlignmentCheck,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS, PVRSRVBridgeGetDeviceStatus,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT, PVRSRVBridgeEventObjectWaitTimeout,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS, PVRSRVBridgeFindProcessMemStats,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all srvcore functions with services
+ */
+PVRSRV_ERROR DeinitSRVCOREBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/sync_bridge/client_sync_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/sync_bridge/client_sync_bridge.h
new file mode 100644
index 0000000..b533be8
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/sync_bridge/client_sync_bridge.h
@@ -0,0 +1,166 @@
+/*************************************************************************/ /*!
+@File
+@Title          Client bridge header for sync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for sync
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_SYNC_BRIDGE_H
+#define CLIENT_SYNC_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_sync_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeAllocSyncPrimitiveBlock(IMG_HANDLE hBridge,
+								     IMG_HANDLE *phSyncHandle,
+								     IMG_UINT32 *pui32SyncPrimVAddr,
+								     IMG_UINT32 *pui32SyncPrimBlockSize,
+								     IMG_HANDLE *phhSyncPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeFreeSyncPrimitiveBlock(IMG_HANDLE hBridge,
+								    IMG_HANDLE hSyncHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimSet(IMG_HANDLE hBridge,
+							 IMG_HANDLE hSyncHandle,
+							 IMG_UINT32 ui32Index,
+							 IMG_UINT32 ui32Value);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncPrimSet(IMG_HANDLE hBridge,
+							       IMG_HANDLE hSyncHandle,
+							       IMG_UINT32 ui32Value);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncAlloc(IMG_HANDLE hBridge,
+							     IMG_HANDLE *phSyncHandle,
+							     IMG_UINT32 *pui32SyncPrimVAddr,
+							     IMG_UINT32 ui32ClassNameSize,
+							     const IMG_CHAR *puiClassName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncFree(IMG_HANDLE hBridge,
+							    IMG_HANDLE hSyncHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncQueueHWOp(IMG_HANDLE hBridge,
+								 IMG_HANDLE hSyncHandle,
+								 IMG_BOOL bbUpdate,
+								 IMG_UINT32 *pui32FenceValue,
+								 IMG_UINT32 *pui32UpdateValue);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncGetStatus(IMG_HANDLE hBridge,
+								 IMG_UINT32 ui32SyncCount,
+								 IMG_HANDLE *phSyncHandle,
+								 IMG_UINT32 *pui32UID,
+								 IMG_UINT32 *pui32FWAddr,
+								 IMG_UINT32 *pui32CurrentOp,
+								 IMG_UINT32 *pui32NextOp);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpCreate(IMG_HANDLE hBridge,
+							      IMG_UINT32 ui32SyncBlockCount,
+							      IMG_HANDLE *phBlockList,
+							      IMG_UINT32 ui32ClientSyncCount,
+							      IMG_UINT32 *pui32SyncBlockIndex,
+							      IMG_UINT32 *pui32Index,
+							      IMG_UINT32 ui32ServerSyncCount,
+							      IMG_HANDLE *phServerSync,
+							      IMG_HANDLE *phServerCookie);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpTake(IMG_HANDLE hBridge,
+							    IMG_HANDLE hServerCookie,
+							    IMG_UINT32 ui32ClientSyncCount,
+							    IMG_UINT32 *pui32Flags,
+							    IMG_UINT32 *pui32FenceValue,
+							    IMG_UINT32 *pui32UpdateValue,
+							    IMG_UINT32 ui32ServerSyncCount,
+							    IMG_UINT32 *pui32ServerFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpReady(IMG_HANDLE hBridge,
+							     IMG_HANDLE hServerCookie,
+							     IMG_BOOL *pbReady);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpComplete(IMG_HANDLE hBridge,
+								IMG_HANDLE hServerCookie);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpDestroy(IMG_HANDLE hBridge,
+							       IMG_HANDLE hServerCookie);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDump(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSyncHandle,
+							   IMG_UINT32 ui32Offset);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpValue(IMG_HANDLE hBridge,
+								IMG_HANDLE hSyncHandle,
+								IMG_UINT32 ui32Offset,
+								IMG_UINT32 ui32Value);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpPol(IMG_HANDLE hBridge,
+							      IMG_HANDLE hSyncHandle,
+							      IMG_UINT32 ui32Offset,
+							      IMG_UINT32 ui32Value,
+							      IMG_UINT32 ui32Mask,
+							      PDUMP_POLL_OPERATOR eOperator,
+							      PDUMP_FLAGS_T uiPDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpPDumpPol(IMG_HANDLE hBridge,
+								IMG_HANDLE hServerCookie,
+								PDUMP_POLL_OPERATOR eOperator,
+								PDUMP_FLAGS_T uiPDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpCBP(IMG_HANDLE hBridge,
+							      IMG_HANDLE hSyncHandle,
+							      IMG_UINT32 ui32Offset,
+							      IMG_DEVMEM_OFFSET_T uiWriteOffset,
+							      IMG_DEVMEM_SIZE_T uiPacketSize,
+							      IMG_DEVMEM_SIZE_T uiBufferSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncAllocEvent(IMG_HANDLE hBridge,
+							    IMG_BOOL bServerSync,
+							    IMG_UINT32 ui32FWAddr,
+							    IMG_UINT32 ui32ClassNameSize,
+							    const IMG_CHAR *puiClassName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncFreeEvent(IMG_HANDLE hBridge,
+							   IMG_UINT32 ui32FWAddr);
+
+
+#endif /* CLIENT_SYNC_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/sync_bridge/client_sync_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/sync_bridge/client_sync_direct_bridge.c
new file mode 100644
index 0000000..2c59511
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/sync_bridge/client_sync_direct_bridge.c
@@ -0,0 +1,526 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge for sync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_sync_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "devicemem_typedefs.h"
+#include "pvrsrv_sync_um.h"
+#include "pvrsrv_sync_km.h"
+
+#include "sync.h"
+#include "sync_server.h"
+#include "pdump.h"
+#include "pvrsrv_sync_km.h"
+#include "sync_fallback_server.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeAllocSyncPrimitiveBlock(IMG_HANDLE hBridge,
+								     IMG_HANDLE *phSyncHandle,
+								     IMG_UINT32 *pui32SyncPrimVAddr,
+								     IMG_UINT32 *pui32SyncPrimBlockSize,
+								     IMG_HANDLE *phhSyncPMR)
+{
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+	PMR * pshSyncPMRInt;
+
+
+	eError =
+		PVRSRVAllocSyncPrimitiveBlockKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					&psSyncHandleInt,
+					pui32SyncPrimVAddr,
+					pui32SyncPrimBlockSize,
+					&pshSyncPMRInt);
+
+	*phSyncHandle = psSyncHandleInt;
+	*phhSyncPMR = pshSyncPMRInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeFreeSyncPrimitiveBlock(IMG_HANDLE hBridge,
+								    IMG_HANDLE hSyncHandle)
+{
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+	eError =
+		PVRSRVFreeSyncPrimitiveBlockKM(
+					psSyncHandleInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimSet(IMG_HANDLE hBridge,
+							 IMG_HANDLE hSyncHandle,
+							 IMG_UINT32 ui32Index,
+							 IMG_UINT32 ui32Value)
+{
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+	eError =
+		PVRSRVSyncPrimSetKM(
+					psSyncHandleInt,
+					ui32Index,
+					ui32Value);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncPrimSet(IMG_HANDLE hBridge,
+							       IMG_HANDLE hSyncHandle,
+							       IMG_UINT32 ui32Value)
+{
+	PVRSRV_ERROR eError;
+	SERVER_SYNC_PRIMITIVE * psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SERVER_SYNC_PRIMITIVE *) hSyncHandle;
+
+	eError =
+		PVRSRVServerSyncPrimSetKM(
+					psSyncHandleInt,
+					ui32Value);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncAlloc(IMG_HANDLE hBridge,
+							     IMG_HANDLE *phSyncHandle,
+							     IMG_UINT32 *pui32SyncPrimVAddr,
+							     IMG_UINT32 ui32ClassNameSize,
+							     const IMG_CHAR *puiClassName)
+{
+	PVRSRV_ERROR eError;
+	SERVER_SYNC_PRIMITIVE * psSyncHandleInt;
+
+
+	eError =
+		PVRSRVServerSyncAllocKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					&psSyncHandleInt,
+					pui32SyncPrimVAddr,
+					ui32ClassNameSize,
+					puiClassName);
+
+	*phSyncHandle = psSyncHandleInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncFree(IMG_HANDLE hBridge,
+							    IMG_HANDLE hSyncHandle)
+{
+	PVRSRV_ERROR eError;
+	SERVER_SYNC_PRIMITIVE * psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SERVER_SYNC_PRIMITIVE *) hSyncHandle;
+
+	eError =
+		PVRSRVServerSyncFreeKM(
+					psSyncHandleInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncQueueHWOp(IMG_HANDLE hBridge,
+								 IMG_HANDLE hSyncHandle,
+								 IMG_BOOL bbUpdate,
+								 IMG_UINT32 *pui32FenceValue,
+								 IMG_UINT32 *pui32UpdateValue)
+{
+	PVRSRV_ERROR eError;
+	SERVER_SYNC_PRIMITIVE * psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SERVER_SYNC_PRIMITIVE *) hSyncHandle;
+
+	eError =
+		PVRSRVServerSyncQueueHWOpKM(
+					psSyncHandleInt,
+					bbUpdate,
+					pui32FenceValue,
+					pui32UpdateValue);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncGetStatus(IMG_HANDLE hBridge,
+								 IMG_UINT32 ui32SyncCount,
+								 IMG_HANDLE *phSyncHandle,
+								 IMG_UINT32 *pui32UID,
+								 IMG_UINT32 *pui32FWAddr,
+								 IMG_UINT32 *pui32CurrentOp,
+								 IMG_UINT32 *pui32NextOp)
+{
+	PVRSRV_ERROR eError;
+	SERVER_SYNC_PRIMITIVE * *psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SERVER_SYNC_PRIMITIVE **) phSyncHandle;
+
+	eError =
+		PVRSRVServerSyncGetStatusKM(
+					ui32SyncCount,
+					psSyncHandleInt,
+					pui32UID,
+					pui32FWAddr,
+					pui32CurrentOp,
+					pui32NextOp);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpCreate(IMG_HANDLE hBridge,
+							      IMG_UINT32 ui32SyncBlockCount,
+							      IMG_HANDLE *phBlockList,
+							      IMG_UINT32 ui32ClientSyncCount,
+							      IMG_UINT32 *pui32SyncBlockIndex,
+							      IMG_UINT32 *pui32Index,
+							      IMG_UINT32 ui32ServerSyncCount,
+							      IMG_HANDLE *phServerSync,
+							      IMG_HANDLE *phServerCookie)
+{
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK * *psBlockListInt;
+	SERVER_SYNC_PRIMITIVE * *psServerSyncInt;
+	SERVER_OP_COOKIE * psServerCookieInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psBlockListInt = (SYNC_PRIMITIVE_BLOCK **) phBlockList;
+	psServerSyncInt = (SERVER_SYNC_PRIMITIVE **) phServerSync;
+
+	eError =
+		PVRSRVSyncPrimOpCreateKM(
+					ui32SyncBlockCount,
+					psBlockListInt,
+					ui32ClientSyncCount,
+					pui32SyncBlockIndex,
+					pui32Index,
+					ui32ServerSyncCount,
+					psServerSyncInt,
+					&psServerCookieInt);
+
+	*phServerCookie = psServerCookieInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpTake(IMG_HANDLE hBridge,
+							    IMG_HANDLE hServerCookie,
+							    IMG_UINT32 ui32ClientSyncCount,
+							    IMG_UINT32 *pui32Flags,
+							    IMG_UINT32 *pui32FenceValue,
+							    IMG_UINT32 *pui32UpdateValue,
+							    IMG_UINT32 ui32ServerSyncCount,
+							    IMG_UINT32 *pui32ServerFlags)
+{
+	PVRSRV_ERROR eError;
+	SERVER_OP_COOKIE * psServerCookieInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+	eError =
+		PVRSRVSyncPrimOpTakeKM(
+					psServerCookieInt,
+					ui32ClientSyncCount,
+					pui32Flags,
+					pui32FenceValue,
+					pui32UpdateValue,
+					ui32ServerSyncCount,
+					pui32ServerFlags);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpReady(IMG_HANDLE hBridge,
+							     IMG_HANDLE hServerCookie,
+							     IMG_BOOL *pbReady)
+{
+	PVRSRV_ERROR eError;
+	SERVER_OP_COOKIE * psServerCookieInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+	eError =
+		PVRSRVSyncPrimOpReadyKM(
+					psServerCookieInt,
+					pbReady);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpComplete(IMG_HANDLE hBridge,
+								IMG_HANDLE hServerCookie)
+{
+	PVRSRV_ERROR eError;
+	SERVER_OP_COOKIE * psServerCookieInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+	eError =
+		PVRSRVSyncPrimOpCompleteKM(
+					psServerCookieInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpDestroy(IMG_HANDLE hBridge,
+							       IMG_HANDLE hServerCookie)
+{
+	PVRSRV_ERROR eError;
+	SERVER_OP_COOKIE * psServerCookieInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+	eError =
+		PVRSRVSyncPrimOpDestroyKM(
+					psServerCookieInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDump(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSyncHandle,
+							   IMG_UINT32 ui32Offset)
+{
+#if defined(PDUMP)
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+	eError =
+		PVRSRVSyncPrimPDumpKM(
+					psSyncHandleInt,
+					ui32Offset);
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+	PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpValue(IMG_HANDLE hBridge,
+								IMG_HANDLE hSyncHandle,
+								IMG_UINT32 ui32Offset,
+								IMG_UINT32 ui32Value)
+{
+#if defined(PDUMP)
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+	eError =
+		PVRSRVSyncPrimPDumpValueKM(
+					psSyncHandleInt,
+					ui32Offset,
+					ui32Value);
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+	PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpPol(IMG_HANDLE hBridge,
+							      IMG_HANDLE hSyncHandle,
+							      IMG_UINT32 ui32Offset,
+							      IMG_UINT32 ui32Value,
+							      IMG_UINT32 ui32Mask,
+							      PDUMP_POLL_OPERATOR eOperator,
+							      PDUMP_FLAGS_T uiPDumpFlags)
+{
+#if defined(PDUMP)
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+	eError =
+		PVRSRVSyncPrimPDumpPolKM(
+					psSyncHandleInt,
+					ui32Offset,
+					ui32Value,
+					ui32Mask,
+					eOperator,
+					uiPDumpFlags);
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+	PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpPDumpPol(IMG_HANDLE hBridge,
+								IMG_HANDLE hServerCookie,
+								PDUMP_POLL_OPERATOR eOperator,
+								PDUMP_FLAGS_T uiPDumpFlags)
+{
+#if defined(PDUMP)
+	PVRSRV_ERROR eError;
+	SERVER_OP_COOKIE * psServerCookieInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+	eError =
+		PVRSRVSyncPrimOpPDumpPolKM(
+					psServerCookieInt,
+					eOperator,
+					uiPDumpFlags);
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+	PVR_UNREFERENCED_PARAMETER(hServerCookie);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpCBP(IMG_HANDLE hBridge,
+							      IMG_HANDLE hSyncHandle,
+							      IMG_UINT32 ui32Offset,
+							      IMG_DEVMEM_OFFSET_T uiWriteOffset,
+							      IMG_DEVMEM_SIZE_T uiPacketSize,
+							      IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+#if defined(PDUMP)
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+	eError =
+		PVRSRVSyncPrimPDumpCBPKM(
+					psSyncHandleInt,
+					ui32Offset,
+					uiWriteOffset,
+					uiPacketSize,
+					uiBufferSize);
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+	PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+	PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+	PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncAllocEvent(IMG_HANDLE hBridge,
+							    IMG_BOOL bServerSync,
+							    IMG_UINT32 ui32FWAddr,
+							    IMG_UINT32 ui32ClassNameSize,
+							    const IMG_CHAR *puiClassName)
+{
+	PVRSRV_ERROR eError;
+
+
+	eError =
+		PVRSRVSyncAllocEventKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					bServerSync,
+					ui32FWAddr,
+					ui32ClassNameSize,
+					puiClassName);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncFreeEvent(IMG_HANDLE hBridge,
+							   IMG_UINT32 ui32FWAddr)
+{
+	PVRSRV_ERROR eError;
+
+
+	eError =
+		PVRSRVSyncFreeEventKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					ui32FWAddr);
+
+	return eError;
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/sync_bridge/common_sync_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/sync_bridge/common_sync_bridge.h
new file mode 100644
index 0000000..11c6ace
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/sync_bridge/common_sync_bridge.h
@@ -0,0 +1,481 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for sync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for sync
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_SYNC_BRIDGE_H
+#define COMMON_SYNC_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "devicemem_typedefs.h"
+#include "pvrsrv_sync_um.h"
+#include "pvrsrv_sync_km.h"
+
+
+#define PVRSRV_BRIDGE_SYNC_CMD_FIRST			0
+#define PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK			PVRSRV_BRIDGE_SYNC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK			PVRSRV_BRIDGE_SYNC_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMSET			PVRSRV_BRIDGE_SYNC_CMD_FIRST+2
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCPRIMSET			PVRSRV_BRIDGE_SYNC_CMD_FIRST+3
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCALLOC			PVRSRV_BRIDGE_SYNC_CMD_FIRST+4
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCFREE			PVRSRV_BRIDGE_SYNC_CMD_FIRST+5
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCQUEUEHWOP			PVRSRV_BRIDGE_SYNC_CMD_FIRST+6
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCGETSTATUS			PVRSRV_BRIDGE_SYNC_CMD_FIRST+7
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCREATE			PVRSRV_BRIDGE_SYNC_CMD_FIRST+8
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPTAKE			PVRSRV_BRIDGE_SYNC_CMD_FIRST+9
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPREADY			PVRSRV_BRIDGE_SYNC_CMD_FIRST+10
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCOMPLETE			PVRSRV_BRIDGE_SYNC_CMD_FIRST+11
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPDESTROY			PVRSRV_BRIDGE_SYNC_CMD_FIRST+12
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP			PVRSRV_BRIDGE_SYNC_CMD_FIRST+13
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE			PVRSRV_BRIDGE_SYNC_CMD_FIRST+14
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL			PVRSRV_BRIDGE_SYNC_CMD_FIRST+15
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPPDUMPPOL			PVRSRV_BRIDGE_SYNC_CMD_FIRST+16
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP			PVRSRV_BRIDGE_SYNC_CMD_FIRST+17
+#define PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT			PVRSRV_BRIDGE_SYNC_CMD_FIRST+18
+#define PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT			PVRSRV_BRIDGE_SYNC_CMD_FIRST+19
+#define PVRSRV_BRIDGE_SYNC_CMD_LAST			(PVRSRV_BRIDGE_SYNC_CMD_FIRST+19)
+
+
+/*******************************************
+            AllocSyncPrimitiveBlock          
+ *******************************************/
+
+/* Bridge in structure for AllocSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK;
+
+/* Bridge out structure for AllocSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32SyncPrimVAddr;
+	IMG_UINT32 ui32SyncPrimBlockSize;
+	IMG_HANDLE hhSyncPMR;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK;
+
+
+/*******************************************
+            FreeSyncPrimitiveBlock          
+ *******************************************/
+
+/* Bridge in structure for FreeSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK_TAG
+{
+	IMG_HANDLE hSyncHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK;
+
+/* Bridge out structure for FreeSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK;
+
+
+/*******************************************
+            SyncPrimSet          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimSet */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMSET_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32Index;
+	IMG_UINT32 ui32Value;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMSET;
+
+/* Bridge out structure for SyncPrimSet */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMSET_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMSET;
+
+
+/*******************************************
+            ServerSyncPrimSet          
+ *******************************************/
+
+/* Bridge in structure for ServerSyncPrimSet */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCPRIMSET_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32Value;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCPRIMSET;
+
+/* Bridge out structure for ServerSyncPrimSet */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCPRIMSET_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCPRIMSET;
+
+
+/*******************************************
+            ServerSyncAlloc          
+ *******************************************/
+
+/* Bridge in structure for ServerSyncAlloc */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCALLOC_TAG
+{
+	IMG_UINT32 ui32ClassNameSize;
+	const IMG_CHAR * puiClassName;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCALLOC;
+
+/* Bridge out structure for ServerSyncAlloc */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCALLOC_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32SyncPrimVAddr;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCALLOC;
+
+
+/*******************************************
+            ServerSyncFree          
+ *******************************************/
+
+/* Bridge in structure for ServerSyncFree */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCFREE_TAG
+{
+	IMG_HANDLE hSyncHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCFREE;
+
+/* Bridge out structure for ServerSyncFree */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCFREE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCFREE;
+
+
+/*******************************************
+            ServerSyncQueueHWOp          
+ *******************************************/
+
+/* Bridge in structure for ServerSyncQueueHWOp */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCQUEUEHWOP_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_BOOL bbUpdate;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCQUEUEHWOP;
+
+/* Bridge out structure for ServerSyncQueueHWOp */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCQUEUEHWOP_TAG
+{
+	IMG_UINT32 ui32FenceValue;
+	IMG_UINT32 ui32UpdateValue;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCQUEUEHWOP;
+
+
+/*******************************************
+            ServerSyncGetStatus          
+ *******************************************/
+
+/* Bridge in structure for ServerSyncGetStatus */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCGETSTATUS_TAG
+{
+	IMG_UINT32 ui32SyncCount;
+	IMG_HANDLE * phSyncHandle;
+	/* Output pointer pui32UID is also an implied input */
+	IMG_UINT32 * pui32UID;
+	/* Output pointer pui32FWAddr is also an implied input */
+	IMG_UINT32 * pui32FWAddr;
+	/* Output pointer pui32CurrentOp is also an implied input */
+	IMG_UINT32 * pui32CurrentOp;
+	/* Output pointer pui32NextOp is also an implied input */
+	IMG_UINT32 * pui32NextOp;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCGETSTATUS;
+
+/* Bridge out structure for ServerSyncGetStatus */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCGETSTATUS_TAG
+{
+	IMG_UINT32 * pui32UID;
+	IMG_UINT32 * pui32FWAddr;
+	IMG_UINT32 * pui32CurrentOp;
+	IMG_UINT32 * pui32NextOp;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCGETSTATUS;
+
+
+/*******************************************
+            SyncPrimOpCreate          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpCreate */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPCREATE_TAG
+{
+	IMG_UINT32 ui32SyncBlockCount;
+	IMG_HANDLE * phBlockList;
+	IMG_UINT32 ui32ClientSyncCount;
+	IMG_UINT32 * pui32SyncBlockIndex;
+	IMG_UINT32 * pui32Index;
+	IMG_UINT32 ui32ServerSyncCount;
+	IMG_HANDLE * phServerSync;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPCREATE;
+
+/* Bridge out structure for SyncPrimOpCreate */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPCREATE_TAG
+{
+	IMG_HANDLE hServerCookie;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPCREATE;
+
+
+/*******************************************
+            SyncPrimOpTake          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpTake */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPTAKE_TAG
+{
+	IMG_HANDLE hServerCookie;
+	IMG_UINT32 ui32ClientSyncCount;
+	IMG_UINT32 * pui32Flags;
+	IMG_UINT32 * pui32FenceValue;
+	IMG_UINT32 * pui32UpdateValue;
+	IMG_UINT32 ui32ServerSyncCount;
+	IMG_UINT32 * pui32ServerFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPTAKE;
+
+/* Bridge out structure for SyncPrimOpTake */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPTAKE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPTAKE;
+
+
+/*******************************************
+            SyncPrimOpReady          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpReady */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPREADY_TAG
+{
+	IMG_HANDLE hServerCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPREADY;
+
+/* Bridge out structure for SyncPrimOpReady */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPREADY_TAG
+{
+	IMG_BOOL bReady;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPREADY;
+
+
+/*******************************************
+            SyncPrimOpComplete          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpComplete */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPCOMPLETE_TAG
+{
+	IMG_HANDLE hServerCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPCOMPLETE;
+
+/* Bridge out structure for SyncPrimOpComplete */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPCOMPLETE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPCOMPLETE;
+
+
+/*******************************************
+            SyncPrimOpDestroy          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpDestroy */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPDESTROY_TAG
+{
+	IMG_HANDLE hServerCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPDESTROY;
+
+/* Bridge out structure for SyncPrimOpDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPDESTROY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPDESTROY;
+
+
+/*******************************************
+            SyncPrimPDump          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDump */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32Offset;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP;
+
+/* Bridge out structure for SyncPrimPDump */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP;
+
+
+/*******************************************
+            SyncPrimPDumpValue          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpValue */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32Offset;
+	IMG_UINT32 ui32Value;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE;
+
+/* Bridge out structure for SyncPrimPDumpValue */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE;
+
+
+/*******************************************
+            SyncPrimPDumpPol          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpPol */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32Offset;
+	IMG_UINT32 ui32Value;
+	IMG_UINT32 ui32Mask;
+	PDUMP_POLL_OPERATOR eOperator;
+	PDUMP_FLAGS_T uiPDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL;
+
+/* Bridge out structure for SyncPrimPDumpPol */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL;
+
+
+/*******************************************
+            SyncPrimOpPDumpPol          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpPDumpPol */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPPDUMPPOL_TAG
+{
+	IMG_HANDLE hServerCookie;
+	PDUMP_POLL_OPERATOR eOperator;
+	PDUMP_FLAGS_T uiPDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPPDUMPPOL;
+
+/* Bridge out structure for SyncPrimOpPDumpPol */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPPDUMPPOL_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPPDUMPPOL;
+
+
+/*******************************************
+            SyncPrimPDumpCBP          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpCBP */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32Offset;
+	IMG_DEVMEM_OFFSET_T uiWriteOffset;
+	IMG_DEVMEM_SIZE_T uiPacketSize;
+	IMG_DEVMEM_SIZE_T uiBufferSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP;
+
+/* Bridge out structure for SyncPrimPDumpCBP */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP;
+
+
+/*******************************************
+            SyncAllocEvent          
+ *******************************************/
+
+/* Bridge in structure for SyncAllocEvent */
+typedef struct PVRSRV_BRIDGE_IN_SYNCALLOCEVENT_TAG
+{
+	IMG_BOOL bServerSync;
+	IMG_UINT32 ui32FWAddr;
+	IMG_UINT32 ui32ClassNameSize;
+	const IMG_CHAR * puiClassName;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCALLOCEVENT;
+
+/* Bridge out structure for SyncAllocEvent */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT;
+
+
+/*******************************************
+            SyncFreeEvent          
+ *******************************************/
+
+/* Bridge in structure for SyncFreeEvent */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFREEEVENT_TAG
+{
+	IMG_UINT32 ui32FWAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCFREEEVENT;
+
+/* Bridge out structure for SyncFreeEvent */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFREEEVENT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCFREEEVENT;
+
+
+#endif /* COMMON_SYNC_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/sync_bridge/server_sync_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/sync_bridge/server_sync_bridge.c
new file mode 100644
index 0000000..283e583
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/sync_bridge/server_sync_bridge.c
@@ -0,0 +1,2202 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for sync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for sync
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "sync.h"
+#include "sync_server.h"
+#include "pdump.h"
+#include "pvrsrv_sync_km.h"
+#include "sync_fallback_server.h"
+
+
+#include "common_sync_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeAllocSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockIN,
+					  PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = NULL;
+	PMR * pshSyncPMRInt = NULL;
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psAllocSyncPrimitiveBlockIN);
+
+
+	psAllocSyncPrimitiveBlockOUT->hSyncHandle = NULL;
+
+
+
+	psAllocSyncPrimitiveBlockOUT->eError =
+		PVRSRVAllocSyncPrimitiveBlockKM(psConnection, OSGetDevData(psConnection),
+					&psSyncHandleInt,
+					&psAllocSyncPrimitiveBlockOUT->ui32SyncPrimVAddr,
+					&psAllocSyncPrimitiveBlockOUT->ui32SyncPrimBlockSize,
+					&pshSyncPMRInt);
+	/* Exit early if bridged call fails */
+	if(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+	{
+		goto AllocSyncPrimitiveBlock_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psAllocSyncPrimitiveBlockOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psAllocSyncPrimitiveBlockOUT->hSyncHandle,
+							(void *) psSyncHandleInt,
+							PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PVRSRVFreeSyncPrimitiveBlockKM);
+	if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto AllocSyncPrimitiveBlock_exit;
+	}
+
+
+
+
+
+
+	psAllocSyncPrimitiveBlockOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+							&psAllocSyncPrimitiveBlockOUT->hhSyncPMR,
+							(void *) pshSyncPMRInt,
+							PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+							PVRSRV_HANDLE_ALLOC_FLAG_NONE
+							,psAllocSyncPrimitiveBlockOUT->hSyncHandle);
+	if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto AllocSyncPrimitiveBlock_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+AllocSyncPrimitiveBlock_exit:
+
+
+
+	if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+	{
+		/* Lock over handle creation cleanup. */
+		LockHandle();
+		if (psAllocSyncPrimitiveBlockOUT->hSyncHandle)
+		{
+
+
+			PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+						(IMG_HANDLE) psAllocSyncPrimitiveBlockOUT->hSyncHandle,
+						PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+			if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+				        "PVRSRVBridgeAllocSyncPrimitiveBlock: %s",
+				        PVRSRVGetErrorStringKM(eError)));
+			}
+			/* Releasing the handle should free/destroy/release the resource.
+			 * This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psSyncHandleInt = NULL;
+		}
+
+
+		/* Release now we have cleaned up creation handles. */
+		UnlockHandle();
+		if (psSyncHandleInt)
+		{
+			PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeFreeSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockIN,
+					  PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psFreeSyncPrimitiveBlockOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psFreeSyncPrimitiveBlockIN->hSyncHandle,
+					PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+	if ((psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_OK) &&
+	    (psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeFreeSyncPrimitiveBlock: %s",
+		        PVRSRVGetErrorStringKM(psFreeSyncPrimitiveBlockOUT->eError)));
+		UnlockHandle();
+		goto FreeSyncPrimitiveBlock_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+FreeSyncPrimitiveBlock_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncPrimSet(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMSET *psSyncPrimSetIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMSET *psSyncPrimSetOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hSyncHandle = psSyncPrimSetIN->hSyncHandle;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncPrimSetOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psSyncHandleInt,
+											hSyncHandle,
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psSyncPrimSetOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto SyncPrimSet_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psSyncPrimSetOUT->eError =
+		PVRSRVSyncPrimSetKM(
+					psSyncHandleInt,
+					psSyncPrimSetIN->ui32Index,
+					psSyncPrimSetIN->ui32Value);
+
+
+
+
+SyncPrimSet_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psSyncHandleInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hSyncHandle,
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeServerSyncPrimSet(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SERVERSYNCPRIMSET *psServerSyncPrimSetIN,
+					  PVRSRV_BRIDGE_OUT_SERVERSYNCPRIMSET *psServerSyncPrimSetOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hSyncHandle = psServerSyncPrimSetIN->hSyncHandle;
+	SERVER_SYNC_PRIMITIVE * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psServerSyncPrimSetOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psSyncHandleInt,
+											hSyncHandle,
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+											IMG_TRUE);
+					if(psServerSyncPrimSetOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto ServerSyncPrimSet_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psServerSyncPrimSetOUT->eError =
+		PVRSRVServerSyncPrimSetKM(
+					psSyncHandleInt,
+					psServerSyncPrimSetIN->ui32Value);
+
+
+
+
+ServerSyncPrimSet_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psSyncHandleInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hSyncHandle,
+										PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeServerSyncAlloc(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SERVERSYNCALLOC *psServerSyncAllocIN,
+					  PVRSRV_BRIDGE_OUT_SERVERSYNCALLOC *psServerSyncAllocOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SERVER_SYNC_PRIMITIVE * psSyncHandleInt = NULL;
+	IMG_CHAR *uiClassNameInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psServerSyncAllocIN->ui32ClassNameSize * sizeof(IMG_CHAR)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psServerSyncAllocIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psServerSyncAllocIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psServerSyncAllocOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto ServerSyncAlloc_exit;
+			}
+		}
+	}
+
+	if (psServerSyncAllocIN->ui32ClassNameSize != 0)
+	{
+		uiClassNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psServerSyncAllocIN->ui32ClassNameSize * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (psServerSyncAllocIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiClassNameInt, psServerSyncAllocIN->puiClassName, psServerSyncAllocIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psServerSyncAllocOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto ServerSyncAlloc_exit;
+				}
+			}
+
+
+	psServerSyncAllocOUT->eError =
+		PVRSRVServerSyncAllocKM(psConnection, OSGetDevData(psConnection),
+					&psSyncHandleInt,
+					&psServerSyncAllocOUT->ui32SyncPrimVAddr,
+					psServerSyncAllocIN->ui32ClassNameSize,
+					uiClassNameInt);
+	/* Exit early if bridged call fails */
+	if(psServerSyncAllocOUT->eError != PVRSRV_OK)
+	{
+		goto ServerSyncAlloc_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psServerSyncAllocOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psServerSyncAllocOUT->hSyncHandle,
+							(void *) psSyncHandleInt,
+							PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PVRSRVServerSyncFreeKM);
+	if (psServerSyncAllocOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto ServerSyncAlloc_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+ServerSyncAlloc_exit:
+
+
+
+	if (psServerSyncAllocOUT->eError != PVRSRV_OK)
+	{
+		if (psSyncHandleInt)
+		{
+			PVRSRVServerSyncFreeKM(psSyncHandleInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeServerSyncFree(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SERVERSYNCFREE *psServerSyncFreeIN,
+					  PVRSRV_BRIDGE_OUT_SERVERSYNCFREE *psServerSyncFreeOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psServerSyncFreeOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psServerSyncFreeIN->hSyncHandle,
+					PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+	if ((psServerSyncFreeOUT->eError != PVRSRV_OK) &&
+	    (psServerSyncFreeOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeServerSyncFree: %s",
+		        PVRSRVGetErrorStringKM(psServerSyncFreeOUT->eError)));
+		UnlockHandle();
+		goto ServerSyncFree_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+ServerSyncFree_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeServerSyncQueueHWOp(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SERVERSYNCQUEUEHWOP *psServerSyncQueueHWOpIN,
+					  PVRSRV_BRIDGE_OUT_SERVERSYNCQUEUEHWOP *psServerSyncQueueHWOpOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hSyncHandle = psServerSyncQueueHWOpIN->hSyncHandle;
+	SERVER_SYNC_PRIMITIVE * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psServerSyncQueueHWOpOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psSyncHandleInt,
+											hSyncHandle,
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+											IMG_TRUE);
+					if(psServerSyncQueueHWOpOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto ServerSyncQueueHWOp_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psServerSyncQueueHWOpOUT->eError =
+		PVRSRVServerSyncQueueHWOpKM(
+					psSyncHandleInt,
+					psServerSyncQueueHWOpIN->bbUpdate,
+					&psServerSyncQueueHWOpOUT->ui32FenceValue,
+					&psServerSyncQueueHWOpOUT->ui32UpdateValue);
+
+
+
+
+ServerSyncQueueHWOp_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psSyncHandleInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hSyncHandle,
+										PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeServerSyncGetStatus(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SERVERSYNCGETSTATUS *psServerSyncGetStatusIN,
+					  PVRSRV_BRIDGE_OUT_SERVERSYNCGETSTATUS *psServerSyncGetStatusOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SERVER_SYNC_PRIMITIVE * *psSyncHandleInt = NULL;
+	IMG_HANDLE *hSyncHandleInt2 = NULL;
+	IMG_UINT32 *pui32UIDInt = NULL;
+	IMG_UINT32 *pui32FWAddrInt = NULL;
+	IMG_UINT32 *pui32CurrentOpInt = NULL;
+	IMG_UINT32 *pui32NextOpInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psServerSyncGetStatusIN->ui32SyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+			(psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_HANDLE)) +
+			(psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) +
+			(psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) +
+			(psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) +
+			(psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) +
+			0;
+
+
+
+	psServerSyncGetStatusOUT->pui32UID = psServerSyncGetStatusIN->pui32UID;
+	psServerSyncGetStatusOUT->pui32FWAddr = psServerSyncGetStatusIN->pui32FWAddr;
+	psServerSyncGetStatusOUT->pui32CurrentOp = psServerSyncGetStatusIN->pui32CurrentOp;
+	psServerSyncGetStatusOUT->pui32NextOp = psServerSyncGetStatusIN->pui32NextOp;
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psServerSyncGetStatusIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psServerSyncGetStatusIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto ServerSyncGetStatus_exit;
+			}
+		}
+	}
+
+	if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+	{
+		psSyncHandleInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psServerSyncGetStatusIN->ui32SyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+		hSyncHandleInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hSyncHandleInt2, psServerSyncGetStatusIN->phSyncHandle, psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto ServerSyncGetStatus_exit;
+				}
+			}
+	if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+	{
+		pui32UIDInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32);
+	}
+
+	if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+	{
+		pui32FWAddrInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32);
+	}
+
+	if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+	{
+		pui32CurrentOpInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32);
+	}
+
+	if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+	{
+		pui32NextOpInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32);
+	}
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psServerSyncGetStatusIN->ui32SyncCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psServerSyncGetStatusOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psSyncHandleInt[i],
+											hSyncHandleInt2[i],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+											IMG_TRUE);
+					if(psServerSyncGetStatusOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto ServerSyncGetStatus_exit;
+					}
+				}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psServerSyncGetStatusOUT->eError =
+		PVRSRVServerSyncGetStatusKM(
+					psServerSyncGetStatusIN->ui32SyncCount,
+					psSyncHandleInt,
+					pui32UIDInt,
+					pui32FWAddrInt,
+					pui32CurrentOpInt,
+					pui32NextOpInt);
+
+
+
+	if ((psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) > 0)
+	{
+		if ( OSCopyToUser(NULL, psServerSyncGetStatusOUT->pui32UID, pui32UIDInt,
+			(psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) != PVRSRV_OK )
+		{
+			psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto ServerSyncGetStatus_exit;
+		}
+	}
+
+	if ((psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) > 0)
+	{
+		if ( OSCopyToUser(NULL, psServerSyncGetStatusOUT->pui32FWAddr, pui32FWAddrInt,
+			(psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) != PVRSRV_OK )
+		{
+			psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto ServerSyncGetStatus_exit;
+		}
+	}
+
+	if ((psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) > 0)
+	{
+		if ( OSCopyToUser(NULL, psServerSyncGetStatusOUT->pui32CurrentOp, pui32CurrentOpInt,
+			(psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) != PVRSRV_OK )
+		{
+			psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto ServerSyncGetStatus_exit;
+		}
+	}
+
+	if ((psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) > 0)
+	{
+		if ( OSCopyToUser(NULL, psServerSyncGetStatusOUT->pui32NextOp, pui32NextOpInt,
+			(psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) != PVRSRV_OK )
+		{
+			psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+			goto ServerSyncGetStatus_exit;
+		}
+	}
+
+
+ServerSyncGetStatus_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+
+	if (hSyncHandleInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psServerSyncGetStatusIN->ui32SyncCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hSyncHandleInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hSyncHandleInt2[i],
+										PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					}
+				}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpCreate(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMOPCREATE *psSyncPrimOpCreateIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMOPCREATE *psSyncPrimOpCreateOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SYNC_PRIMITIVE_BLOCK * *psBlockListInt = NULL;
+	IMG_HANDLE *hBlockListInt2 = NULL;
+	IMG_UINT32 *ui32SyncBlockIndexInt = NULL;
+	IMG_UINT32 *ui32IndexInt = NULL;
+	SERVER_SYNC_PRIMITIVE * *psServerSyncInt = NULL;
+	IMG_HANDLE *hServerSyncInt2 = NULL;
+	SERVER_OP_COOKIE * psServerCookieInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+			(psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(IMG_HANDLE)) +
+			(psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+			(psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+			(psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+			(psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncPrimOpCreateIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psSyncPrimOpCreateIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto SyncPrimOpCreate_exit;
+			}
+		}
+	}
+
+	if (psSyncPrimOpCreateIN->ui32SyncBlockCount != 0)
+	{
+		psBlockListInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+		hBlockListInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hBlockListInt2, psSyncPrimOpCreateIN->phBlockList, psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto SyncPrimOpCreate_exit;
+				}
+			}
+	if (psSyncPrimOpCreateIN->ui32ClientSyncCount != 0)
+	{
+		ui32SyncBlockIndexInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32SyncBlockIndexInt, psSyncPrimOpCreateIN->pui32SyncBlockIndex, psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto SyncPrimOpCreate_exit;
+				}
+			}
+	if (psSyncPrimOpCreateIN->ui32ClientSyncCount != 0)
+	{
+		ui32IndexInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32IndexInt, psSyncPrimOpCreateIN->pui32Index, psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto SyncPrimOpCreate_exit;
+				}
+			}
+	if (psSyncPrimOpCreateIN->ui32ServerSyncCount != 0)
+	{
+		psServerSyncInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+		hServerSyncInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset); 
+		ui32NextOffset += psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+	}
+
+			/* Copy the data over */
+			if (psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+			{
+				if ( OSCopyFromUser(NULL, hServerSyncInt2, psSyncPrimOpCreateIN->phServerSync, psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+				{
+					psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto SyncPrimOpCreate_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psSyncPrimOpCreateIN->ui32SyncBlockCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psSyncPrimOpCreateOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psBlockListInt[i],
+											hBlockListInt2[i],
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto SyncPrimOpCreate_exit;
+					}
+				}
+		}
+	}
+
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psSyncPrimOpCreateIN->ui32ServerSyncCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psSyncPrimOpCreateOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psServerSyncInt[i],
+											hServerSyncInt2[i],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+											IMG_TRUE);
+					if(psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto SyncPrimOpCreate_exit;
+					}
+				}
+		}
+	}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psSyncPrimOpCreateOUT->eError =
+		PVRSRVSyncPrimOpCreateKM(
+					psSyncPrimOpCreateIN->ui32SyncBlockCount,
+					psBlockListInt,
+					psSyncPrimOpCreateIN->ui32ClientSyncCount,
+					ui32SyncBlockIndexInt,
+					ui32IndexInt,
+					psSyncPrimOpCreateIN->ui32ServerSyncCount,
+					psServerSyncInt,
+					&psServerCookieInt);
+	/* Exit early if bridged call fails */
+	if(psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+	{
+		goto SyncPrimOpCreate_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psSyncPrimOpCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psSyncPrimOpCreateOUT->hServerCookie,
+							(void *) psServerCookieInt,
+							PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PVRSRVSyncPrimOpDestroyKM);
+	if (psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto SyncPrimOpCreate_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+SyncPrimOpCreate_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+
+	if (hBlockListInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psSyncPrimOpCreateIN->ui32SyncBlockCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hBlockListInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hBlockListInt2[i],
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+		}
+	}
+
+
+
+
+
+
+	if (hServerSyncInt2)
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psSyncPrimOpCreateIN->ui32ServerSyncCount;i++)
+		{
+				{
+					/* Unreference the previously looked up handle */
+					if(hServerSyncInt2[i])
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hServerSyncInt2[i],
+										PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					}
+				}
+		}
+	}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+	{
+		if (psServerCookieInt)
+		{
+			PVRSRVSyncPrimOpDestroyKM(psServerCookieInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpTake(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMOPTAKE *psSyncPrimOpTakeIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMOPTAKE *psSyncPrimOpTakeOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hServerCookie = psSyncPrimOpTakeIN->hServerCookie;
+	SERVER_OP_COOKIE * psServerCookieInt = NULL;
+	IMG_UINT32 *ui32FlagsInt = NULL;
+	IMG_UINT32 *ui32FenceValueInt = NULL;
+	IMG_UINT32 *ui32UpdateValueInt = NULL;
+	IMG_UINT32 *ui32ServerFlagsInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+			(psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+			(psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+			(psSyncPrimOpTakeIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncPrimOpTakeIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psSyncPrimOpTakeIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto SyncPrimOpTake_exit;
+			}
+		}
+	}
+
+	if (psSyncPrimOpTakeIN->ui32ClientSyncCount != 0)
+	{
+		ui32FlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32FlagsInt, psSyncPrimOpTakeIN->pui32Flags, psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto SyncPrimOpTake_exit;
+				}
+			}
+	if (psSyncPrimOpTakeIN->ui32ClientSyncCount != 0)
+	{
+		ui32FenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32FenceValueInt, psSyncPrimOpTakeIN->pui32FenceValue, psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto SyncPrimOpTake_exit;
+				}
+			}
+	if (psSyncPrimOpTakeIN->ui32ClientSyncCount != 0)
+	{
+		ui32UpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32UpdateValueInt, psSyncPrimOpTakeIN->pui32UpdateValue, psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto SyncPrimOpTake_exit;
+				}
+			}
+	if (psSyncPrimOpTakeIN->ui32ServerSyncCount != 0)
+	{
+		ui32ServerFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psSyncPrimOpTakeIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+	}
+
+			/* Copy the data over */
+			if (psSyncPrimOpTakeIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+			{
+				if ( OSCopyFromUser(NULL, ui32ServerFlagsInt, psSyncPrimOpTakeIN->pui32ServerFlags, psSyncPrimOpTakeIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+				{
+					psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto SyncPrimOpTake_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncPrimOpTakeOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psServerCookieInt,
+											hServerCookie,
+											PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+											IMG_TRUE);
+					if(psSyncPrimOpTakeOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto SyncPrimOpTake_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psSyncPrimOpTakeOUT->eError =
+		PVRSRVSyncPrimOpTakeKM(
+					psServerCookieInt,
+					psSyncPrimOpTakeIN->ui32ClientSyncCount,
+					ui32FlagsInt,
+					ui32FenceValueInt,
+					ui32UpdateValueInt,
+					psSyncPrimOpTakeIN->ui32ServerSyncCount,
+					ui32ServerFlagsInt);
+
+
+
+
+SyncPrimOpTake_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psServerCookieInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hServerCookie,
+										PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpReady(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMOPREADY *psSyncPrimOpReadyIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMOPREADY *psSyncPrimOpReadyOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hServerCookie = psSyncPrimOpReadyIN->hServerCookie;
+	SERVER_OP_COOKIE * psServerCookieInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncPrimOpReadyOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psServerCookieInt,
+											hServerCookie,
+											PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+											IMG_TRUE);
+					if(psSyncPrimOpReadyOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto SyncPrimOpReady_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psSyncPrimOpReadyOUT->eError =
+		PVRSRVSyncPrimOpReadyKM(
+					psServerCookieInt,
+					&psSyncPrimOpReadyOUT->bReady);
+
+
+
+
+SyncPrimOpReady_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psServerCookieInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hServerCookie,
+										PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpComplete(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMOPCOMPLETE *psSyncPrimOpCompleteIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMOPCOMPLETE *psSyncPrimOpCompleteOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hServerCookie = psSyncPrimOpCompleteIN->hServerCookie;
+	SERVER_OP_COOKIE * psServerCookieInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncPrimOpCompleteOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psServerCookieInt,
+											hServerCookie,
+											PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+											IMG_TRUE);
+					if(psSyncPrimOpCompleteOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto SyncPrimOpComplete_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psSyncPrimOpCompleteOUT->eError =
+		PVRSRVSyncPrimOpCompleteKM(
+					psServerCookieInt);
+
+
+
+
+SyncPrimOpComplete_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psServerCookieInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hServerCookie,
+										PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpDestroy(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMOPDESTROY *psSyncPrimOpDestroyIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMOPDESTROY *psSyncPrimOpDestroyOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psSyncPrimOpDestroyOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psSyncPrimOpDestroyIN->hServerCookie,
+					PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+	if ((psSyncPrimOpDestroyOUT->eError != PVRSRV_OK) &&
+	    (psSyncPrimOpDestroyOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeSyncPrimOpDestroy: %s",
+		        PVRSRVGetErrorStringKM(psSyncPrimOpDestroyOUT->eError)));
+		UnlockHandle();
+		goto SyncPrimOpDestroy_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+SyncPrimOpDestroy_exit:
+
+
+
+
+	return 0;
+}
+
+
+#if defined(PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimPDump(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP *psSyncPrimPDumpIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP *psSyncPrimPDumpOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hSyncHandle = psSyncPrimPDumpIN->hSyncHandle;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncPrimPDumpOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psSyncHandleInt,
+											hSyncHandle,
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psSyncPrimPDumpOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto SyncPrimPDump_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psSyncPrimPDumpOUT->eError =
+		PVRSRVSyncPrimPDumpKM(
+					psSyncHandleInt,
+					psSyncPrimPDumpIN->ui32Offset);
+
+
+
+
+SyncPrimPDump_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psSyncHandleInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hSyncHandle,
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDump NULL
+#endif
+
+#if defined(PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpValue(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hSyncHandle = psSyncPrimPDumpValueIN->hSyncHandle;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncPrimPDumpValueOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psSyncHandleInt,
+											hSyncHandle,
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psSyncPrimPDumpValueOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto SyncPrimPDumpValue_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psSyncPrimPDumpValueOUT->eError =
+		PVRSRVSyncPrimPDumpValueKM(
+					psSyncHandleInt,
+					psSyncPrimPDumpValueIN->ui32Offset,
+					psSyncPrimPDumpValueIN->ui32Value);
+
+
+
+
+SyncPrimPDumpValue_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psSyncHandleInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hSyncHandle,
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDumpValue NULL
+#endif
+
+#if defined(PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpPol(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hSyncHandle = psSyncPrimPDumpPolIN->hSyncHandle;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncPrimPDumpPolOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psSyncHandleInt,
+											hSyncHandle,
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psSyncPrimPDumpPolOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto SyncPrimPDumpPol_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psSyncPrimPDumpPolOUT->eError =
+		PVRSRVSyncPrimPDumpPolKM(
+					psSyncHandleInt,
+					psSyncPrimPDumpPolIN->ui32Offset,
+					psSyncPrimPDumpPolIN->ui32Value,
+					psSyncPrimPDumpPolIN->ui32Mask,
+					psSyncPrimPDumpPolIN->eOperator,
+					psSyncPrimPDumpPolIN->uiPDumpFlags);
+
+
+
+
+SyncPrimPDumpPol_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psSyncHandleInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hSyncHandle,
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDumpPol NULL
+#endif
+
+#if defined(PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimOpPDumpPol(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMOPPDUMPPOL *psSyncPrimOpPDumpPolIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMOPPDUMPPOL *psSyncPrimOpPDumpPolOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hServerCookie = psSyncPrimOpPDumpPolIN->hServerCookie;
+	SERVER_OP_COOKIE * psServerCookieInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncPrimOpPDumpPolOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psServerCookieInt,
+											hServerCookie,
+											PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+											IMG_TRUE);
+					if(psSyncPrimOpPDumpPolOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto SyncPrimOpPDumpPol_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psSyncPrimOpPDumpPolOUT->eError =
+		PVRSRVSyncPrimOpPDumpPolKM(
+					psServerCookieInt,
+					psSyncPrimOpPDumpPolIN->eOperator,
+					psSyncPrimOpPDumpPolIN->uiPDumpFlags);
+
+
+
+
+SyncPrimOpPDumpPol_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psServerCookieInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hServerCookie,
+										PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimOpPDumpPol NULL
+#endif
+
+#if defined(PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpCBP(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hSyncHandle = psSyncPrimPDumpCBPIN->hSyncHandle;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncPrimPDumpCBPOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &psSyncHandleInt,
+											hSyncHandle,
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psSyncPrimPDumpCBPOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto SyncPrimPDumpCBP_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psSyncPrimPDumpCBPOUT->eError =
+		PVRSRVSyncPrimPDumpCBPKM(
+					psSyncHandleInt,
+					psSyncPrimPDumpCBPIN->ui32Offset,
+					psSyncPrimPDumpCBPIN->uiWriteOffset,
+					psSyncPrimPDumpCBPIN->uiPacketSize,
+					psSyncPrimPDumpCBPIN->uiBufferSize);
+
+
+
+
+SyncPrimPDumpCBP_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(psSyncHandleInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hSyncHandle,
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+
+	return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDumpCBP NULL
+#endif
+
+static IMG_INT
+PVRSRVBridgeSyncAllocEvent(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCALLOCEVENT *psSyncAllocEventIN,
+					  PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT *psSyncAllocEventOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_CHAR *uiClassNameInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncAllocEventIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psSyncAllocEventIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psSyncAllocEventOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto SyncAllocEvent_exit;
+			}
+		}
+	}
+
+	if (psSyncAllocEventIN->ui32ClassNameSize != 0)
+	{
+		uiClassNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiClassNameInt, psSyncAllocEventIN->puiClassName, psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psSyncAllocEventOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto SyncAllocEvent_exit;
+				}
+			}
+
+
+	psSyncAllocEventOUT->eError =
+		PVRSRVSyncAllocEventKM(psConnection, OSGetDevData(psConnection),
+					psSyncAllocEventIN->bServerSync,
+					psSyncAllocEventIN->ui32FWAddr,
+					psSyncAllocEventIN->ui32ClassNameSize,
+					uiClassNameInt);
+
+
+
+
+SyncAllocEvent_exit:
+
+
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncFreeEvent(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCFREEEVENT *psSyncFreeEventIN,
+					  PVRSRV_BRIDGE_OUT_SYNCFREEEVENT *psSyncFreeEventOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+	psSyncFreeEventOUT->eError =
+		PVRSRVSyncFreeEventKM(psConnection, OSGetDevData(psConnection),
+					psSyncFreeEventIN->ui32FWAddr);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitSYNCBridge(void);
+PVRSRV_ERROR DeinitSYNCBridge(void);
+
+/*
+ * Register all SYNC functions with services
+ */
+PVRSRV_ERROR InitSYNCBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK, PVRSRVBridgeAllocSyncPrimitiveBlock,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK, PVRSRVBridgeFreeSyncPrimitiveBlock,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMSET, PVRSRVBridgeSyncPrimSet,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCPRIMSET, PVRSRVBridgeServerSyncPrimSet,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCALLOC, PVRSRVBridgeServerSyncAlloc,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCFREE, PVRSRVBridgeServerSyncFree,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCQUEUEHWOP, PVRSRVBridgeServerSyncQueueHWOp,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCGETSTATUS, PVRSRVBridgeServerSyncGetStatus,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCREATE, PVRSRVBridgeSyncPrimOpCreate,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPTAKE, PVRSRVBridgeSyncPrimOpTake,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPREADY, PVRSRVBridgeSyncPrimOpReady,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCOMPLETE, PVRSRVBridgeSyncPrimOpComplete,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPDESTROY, PVRSRVBridgeSyncPrimOpDestroy,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP, PVRSRVBridgeSyncPrimPDump,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE, PVRSRVBridgeSyncPrimPDumpValue,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL, PVRSRVBridgeSyncPrimPDumpPol,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPPDUMPPOL, PVRSRVBridgeSyncPrimOpPDumpPol,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP, PVRSRVBridgeSyncPrimPDumpCBP,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT, PVRSRVBridgeSyncAllocEvent,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT, PVRSRVBridgeSyncFreeEvent,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all sync functions with services
+ */
+PVRSRV_ERROR DeinitSYNCBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/synctracking_bridge/client_synctracking_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/synctracking_bridge/client_synctracking_bridge.h
new file mode 100644
index 0000000..ce11130
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/synctracking_bridge/client_synctracking_bridge.h
@@ -0,0 +1,70 @@
+/*************************************************************************/ /*!
+@File
+@Title          Client bridge header for synctracking
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for synctracking
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_SYNCTRACKING_BRIDGE_H
+#define CLIENT_SYNCTRACKING_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_synctracking_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordRemoveByHandle(IMG_HANDLE hBridge,
+								      IMG_HANDLE hhRecord);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordAdd(IMG_HANDLE hBridge,
+							   IMG_HANDLE *phhRecord,
+							   IMG_HANDLE hhServerSyncPrimBlock,
+							   IMG_UINT32 ui32ui32FwBlockAddr,
+							   IMG_UINT32 ui32ui32SyncOffset,
+							   IMG_BOOL bbServerSync,
+							   IMG_UINT32 ui32ClassNameSize,
+							   const IMG_CHAR *puiClassName);
+
+
+#endif /* CLIENT_SYNCTRACKING_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/synctracking_bridge/client_synctracking_direct_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/synctracking_bridge/client_synctracking_direct_bridge.c
new file mode 100644
index 0000000..3568038
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/synctracking_bridge/client_synctracking_direct_bridge.c
@@ -0,0 +1,97 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge for synctracking
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_synctracking_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+
+#include "sync.h"
+#include "sync_server.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordRemoveByHandle(IMG_HANDLE hBridge,
+								      IMG_HANDLE hhRecord)
+{
+	PVRSRV_ERROR eError;
+	SYNC_RECORD_HANDLE pshRecordInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	pshRecordInt = (SYNC_RECORD_HANDLE) hhRecord;
+
+	eError =
+		PVRSRVSyncRecordRemoveByHandleKM(
+					pshRecordInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordAdd(IMG_HANDLE hBridge,
+							   IMG_HANDLE *phhRecord,
+							   IMG_HANDLE hhServerSyncPrimBlock,
+							   IMG_UINT32 ui32ui32FwBlockAddr,
+							   IMG_UINT32 ui32ui32SyncOffset,
+							   IMG_BOOL bbServerSync,
+							   IMG_UINT32 ui32ClassNameSize,
+							   const IMG_CHAR *puiClassName)
+{
+	PVRSRV_ERROR eError;
+	SYNC_RECORD_HANDLE pshRecordInt;
+	SYNC_PRIMITIVE_BLOCK * pshServerSyncPrimBlockInt;
+
+	pshServerSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK *) hhServerSyncPrimBlock;
+
+	eError =
+		PVRSRVSyncRecordAddKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+		,
+					&pshRecordInt,
+					pshServerSyncPrimBlockInt,
+					ui32ui32FwBlockAddr,
+					ui32ui32SyncOffset,
+					bbServerSync,
+					ui32ClassNameSize,
+					puiClassName);
+
+	*phhRecord = pshRecordInt;
+	return eError;
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/synctracking_bridge/common_synctracking_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/synctracking_bridge/common_synctracking_bridge.h
new file mode 100644
index 0000000..edc856f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/synctracking_bridge/common_synctracking_bridge.h
@@ -0,0 +1,101 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for synctracking
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for synctracking
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_SYNCTRACKING_BRIDGE_H
+#define COMMON_SYNCTRACKING_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+
+#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST			0
+#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE			PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD			PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_LAST			(PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1)
+
+
+/*******************************************
+            SyncRecordRemoveByHandle          
+ *******************************************/
+
+/* Bridge in structure for SyncRecordRemoveByHandle */
+typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE_TAG
+{
+	IMG_HANDLE hhRecord;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE;
+
+/* Bridge out structure for SyncRecordRemoveByHandle */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE;
+
+
+/*******************************************
+            SyncRecordAdd          
+ *******************************************/
+
+/* Bridge in structure for SyncRecordAdd */
+typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDADD_TAG
+{
+	IMG_HANDLE hhServerSyncPrimBlock;
+	IMG_UINT32 ui32ui32FwBlockAddr;
+	IMG_UINT32 ui32ui32SyncOffset;
+	IMG_BOOL bbServerSync;
+	IMG_UINT32 ui32ClassNameSize;
+	const IMG_CHAR * puiClassName;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCRECORDADD;
+
+/* Bridge out structure for SyncRecordAdd */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDADD_TAG
+{
+	IMG_HANDLE hhRecord;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCRECORDADD;
+
+
+#endif /* COMMON_SYNCTRACKING_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/synctracking_bridge/server_synctracking_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/synctracking_bridge/server_synctracking_bridge.c
new file mode 100644
index 0000000..7a111f8
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/synctracking_bridge/server_synctracking_bridge.c
@@ -0,0 +1,336 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for synctracking
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for synctracking
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "sync.h"
+#include "sync_server.h"
+
+
+#include "common_synctracking_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeSyncRecordRemoveByHandle(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleIN,
+					  PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	/* Lock over handle destruction. */
+	LockHandle();
+
+
+
+
+
+	psSyncRecordRemoveByHandleOUT->eError =
+		PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+					(IMG_HANDLE) psSyncRecordRemoveByHandleIN->hhRecord,
+					PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE);
+	if ((psSyncRecordRemoveByHandleOUT->eError != PVRSRV_OK) &&
+	    (psSyncRecordRemoveByHandleOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "PVRSRVBridgeSyncRecordRemoveByHandle: %s",
+		        PVRSRVGetErrorStringKM(psSyncRecordRemoveByHandleOUT->eError)));
+		UnlockHandle();
+		goto SyncRecordRemoveByHandle_exit;
+	}
+
+	/* Release now we have destroyed handles. */
+	UnlockHandle();
+
+
+
+SyncRecordRemoveByHandle_exit:
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCRECORDADD *psSyncRecordAddIN,
+					  PVRSRV_BRIDGE_OUT_SYNCRECORDADD *psSyncRecordAddOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SYNC_RECORD_HANDLE pshRecordInt = NULL;
+	IMG_HANDLE hhServerSyncPrimBlock = psSyncRecordAddIN->hhServerSyncPrimBlock;
+	SYNC_PRIMITIVE_BLOCK * pshServerSyncPrimBlockInt = NULL;
+	IMG_CHAR *uiClassNameInt = NULL;
+
+	IMG_UINT32 ui32NextOffset = 0;
+	IMG_BYTE   *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+	IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+	IMG_UINT32 ui32BufferSize = 
+			(psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) +
+			0;
+
+
+
+
+
+	if (ui32BufferSize != 0)
+	{
+#if !defined(INTEGRITY_OS)
+		/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+		IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncRecordAddIN), sizeof(unsigned long));
+		IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+			PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+		bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+		if (bHaveEnoughSpace)
+		{
+			IMG_BYTE *pInputBuffer = (IMG_BYTE *)psSyncRecordAddIN;
+
+			pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+		}
+		else
+#endif
+		{
+			pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+			if(!pArrayArgsBuffer)
+			{
+				psSyncRecordAddOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto SyncRecordAdd_exit;
+			}
+		}
+	}
+
+	if (psSyncRecordAddIN->ui32ClassNameSize != 0)
+	{
+		uiClassNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+		ui32NextOffset += psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR);
+	}
+
+			/* Copy the data over */
+			if (psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0)
+			{
+				if ( OSCopyFromUser(NULL, uiClassNameInt, psSyncRecordAddIN->puiClassName, psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+				{
+					psSyncRecordAddOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+					goto SyncRecordAdd_exit;
+				}
+			}
+
+	/* Lock over handle lookup. */
+	LockHandle();
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncRecordAddOUT->eError =
+						PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+											(void **) &pshServerSyncPrimBlockInt,
+											hhServerSyncPrimBlock,
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+											IMG_TRUE);
+					if(psSyncRecordAddOUT->eError != PVRSRV_OK)
+					{
+						UnlockHandle();
+						goto SyncRecordAdd_exit;
+					}
+				}
+	/* Release now we have looked up handles. */
+	UnlockHandle();
+
+	psSyncRecordAddOUT->eError =
+		PVRSRVSyncRecordAddKM(psConnection, OSGetDevData(psConnection),
+					&pshRecordInt,
+					pshServerSyncPrimBlockInt,
+					psSyncRecordAddIN->ui32ui32FwBlockAddr,
+					psSyncRecordAddIN->ui32ui32SyncOffset,
+					psSyncRecordAddIN->bbServerSync,
+					psSyncRecordAddIN->ui32ClassNameSize,
+					uiClassNameInt);
+	/* Exit early if bridged call fails */
+	if(psSyncRecordAddOUT->eError != PVRSRV_OK)
+	{
+		goto SyncRecordAdd_exit;
+	}
+
+	/* Lock over handle creation. */
+	LockHandle();
+
+
+
+
+
+	psSyncRecordAddOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+							&psSyncRecordAddOUT->hhRecord,
+							(void *) pshRecordInt,
+							PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE,
+							PVRSRV_HANDLE_ALLOC_FLAG_NONE
+							,(PFN_HANDLE_RELEASE)&PVRSRVSyncRecordRemoveByHandleKM);
+	if (psSyncRecordAddOUT->eError != PVRSRV_OK)
+	{
+		UnlockHandle();
+		goto SyncRecordAdd_exit;
+	}
+
+	/* Release now we have created handles. */
+	UnlockHandle();
+
+
+
+SyncRecordAdd_exit:
+
+	/* Lock over handle lookup cleanup. */
+	LockHandle();
+
+
+
+
+
+
+				{
+					/* Unreference the previously looked up handle */
+					if(pshServerSyncPrimBlockInt)
+					{
+						PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+										hhServerSyncPrimBlock,
+										PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					}
+				}
+	/* Release now we have cleaned up look up handles. */
+	UnlockHandle();
+
+	if (psSyncRecordAddOUT->eError != PVRSRV_OK)
+	{
+		if (pshRecordInt)
+		{
+			PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt);
+		}
+	}
+
+	/* Allocated space should be equal to the last updated offset */
+	PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+	if(pArrayArgsBuffer)
+#else
+	if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+		OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitSYNCTRACKINGBridge(void);
+PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void);
+
+/*
+ * Register all SYNCTRACKING functions with services
+ */
+PVRSRV_ERROR InitSYNCTRACKINGBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE, PVRSRVBridgeSyncRecordRemoveByHandle,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD, PVRSRVBridgeSyncRecordAdd,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all synctracking functions with services
+ */
+PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/timerquery_bridge/common_timerquery_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/timerquery_bridge/common_timerquery_bridge.h
new file mode 100644
index 0000000..ed544f2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/timerquery_bridge/common_timerquery_bridge.h
@@ -0,0 +1,135 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for timerquery
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for timerquery
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_TIMERQUERY_BRIDGE_H
+#define COMMON_TIMERQUERY_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+
+#define PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST			0
+#define PVRSRV_BRIDGE_TIMERQUERY_RGXBEGINTIMERQUERY			PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+0
+#define PVRSRV_BRIDGE_TIMERQUERY_RGXENDTIMERQUERY			PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+1
+#define PVRSRV_BRIDGE_TIMERQUERY_RGXQUERYTIMER			PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+2
+#define PVRSRV_BRIDGE_TIMERQUERY_RGXCURRENTTIME			PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+3
+#define PVRSRV_BRIDGE_TIMERQUERY_CMD_LAST			(PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+3)
+
+
+/*******************************************
+            RGXBeginTimerQuery          
+ *******************************************/
+
+/* Bridge in structure for RGXBeginTimerQuery */
+typedef struct PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY_TAG
+{
+	IMG_UINT32 ui32QueryId;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY;
+
+/* Bridge out structure for RGXBeginTimerQuery */
+typedef struct PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY;
+
+
+/*******************************************
+            RGXEndTimerQuery          
+ *******************************************/
+
+/* Bridge in structure for RGXEndTimerQuery */
+typedef struct PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY;
+
+/* Bridge out structure for RGXEndTimerQuery */
+typedef struct PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY;
+
+
+/*******************************************
+            RGXQueryTimer          
+ *******************************************/
+
+/* Bridge in structure for RGXQueryTimer */
+typedef struct PVRSRV_BRIDGE_IN_RGXQUERYTIMER_TAG
+{
+	IMG_UINT32 ui32QueryId;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXQUERYTIMER;
+
+/* Bridge out structure for RGXQueryTimer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXQUERYTIMER_TAG
+{
+	IMG_UINT64 ui64StartTime;
+	IMG_UINT64 ui64EndTime;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXQUERYTIMER;
+
+
+/*******************************************
+            RGXCurrentTime          
+ *******************************************/
+
+/* Bridge in structure for RGXCurrentTime */
+typedef struct PVRSRV_BRIDGE_IN_RGXCURRENTTIME_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCURRENTTIME;
+
+/* Bridge out structure for RGXCurrentTime */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCURRENTTIME_TAG
+{
+	IMG_UINT64 ui64Time;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCURRENTTIME;
+
+
+#endif /* COMMON_TIMERQUERY_BRIDGE_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/timerquery_bridge/server_timerquery_bridge.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/timerquery_bridge/server_timerquery_bridge.c
new file mode 100644
index 0000000..fbd7c10
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/generated/timerquery_bridge/server_timerquery_bridge.c
@@ -0,0 +1,232 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for timerquery
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for timerquery
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxtimerquery.h"
+
+
+#include "common_timerquery_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRGXBeginTimerQuery(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY *psRGXBeginTimerQueryIN,
+					  PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY *psRGXBeginTimerQueryOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+	psRGXBeginTimerQueryOUT->eError =
+		PVRSRVRGXBeginTimerQueryKM(psConnection, OSGetDevData(psConnection),
+					psRGXBeginTimerQueryIN->ui32QueryId);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXEndTimerQuery(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY *psRGXEndTimerQueryIN,
+					  PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY *psRGXEndTimerQueryOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psRGXEndTimerQueryIN);
+
+
+
+
+
+	psRGXEndTimerQueryOUT->eError =
+		PVRSRVRGXEndTimerQueryKM(psConnection, OSGetDevData(psConnection)
+					);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXQueryTimer(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXQUERYTIMER *psRGXQueryTimerIN,
+					  PVRSRV_BRIDGE_OUT_RGXQUERYTIMER *psRGXQueryTimerOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+	psRGXQueryTimerOUT->eError =
+		PVRSRVRGXQueryTimerKM(psConnection, OSGetDevData(psConnection),
+					psRGXQueryTimerIN->ui32QueryId,
+					&psRGXQueryTimerOUT->ui64StartTime,
+					&psRGXQueryTimerOUT->ui64EndTime);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCurrentTime(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCURRENTTIME *psRGXCurrentTimeIN,
+					  PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *psRGXCurrentTimeOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+	PVR_UNREFERENCED_PARAMETER(psRGXCurrentTimeIN);
+
+
+
+
+
+	psRGXCurrentTimeOUT->eError =
+		PVRSRVRGXCurrentTime(psConnection, OSGetDevData(psConnection),
+					&psRGXCurrentTimeOUT->ui64Time);
+
+
+
+
+
+
+
+
+	return 0;
+}
+
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitTIMERQUERYBridge(void);
+PVRSRV_ERROR DeinitTIMERQUERYBridge(void);
+
+/*
+ * Register all TIMERQUERY functions with services
+ */
+PVRSRV_ERROR InitTIMERQUERYBridge(void)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXBEGINTIMERQUERY, PVRSRVBridgeRGXBeginTimerQuery,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXENDTIMERQUERY, PVRSRVBridgeRGXEndTimerQuery,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXQUERYTIMER, PVRSRVBridgeRGXQueryTimer,
+					NULL, bUseLock);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXCURRENTTIME, PVRSRVBridgeRGXCurrentTime,
+					NULL, bUseLock);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all timerquery functions with services
+ */
+PVRSRV_ERROR DeinitTIMERQUERYBridge(void)
+{
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_1.V.2.30.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_1.V.2.30.h
new file mode 100644
index 0000000..ade2dfa
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_1.V.2.30.h
@@ -0,0 +1,73 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 1.V.2.30
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_2_30_H_
+#define _RGXCONFIG_KM_1_V_2_30_H_
+
+/***** Automatically generated file (17/08/2017 07:00:41): Do not edit manually ********************/
+/***** Timestamp:  (17/08/2017 07:00:41)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 30
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS 
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_FBCDC_ALGORITHM (1)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE 
+#define RGX_FEATURE_COMPUTE_OVERLAP 
+
+
+#endif /* _RGXCONFIG_1_V_2_30_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_1.V.4.12.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_1.V.4.12.h
new file mode 100644
index 0000000..c71fb87
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_1.V.4.12.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 1.V.4.12
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_4_12_H_
+#define _RGXCONFIG_KM_1_V_4_12_H_
+
+/***** Automatically generated file (17/08/2017 07:00:41): Do not edit manually ********************/
+/***** Timestamp:  (17/08/2017 07:00:41)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 12
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (256*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS 
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_FBCDC_ALGORITHM (1)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE 
+#define RGX_FEATURE_COMPUTE_OVERLAP 
+
+
+#endif /* _RGXCONFIG_1_V_4_12_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_1.V.4.19.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_1.V.4.19.h
new file mode 100644
index 0000000..6d1b584
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_1.V.4.19.h
@@ -0,0 +1,73 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 1.V.4.19
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_4_19_H_
+#define _RGXCONFIG_KM_1_V_4_19_H_
+
+/***** Automatically generated file (17/08/2017 07:00:41): Do not edit manually ********************/
+/***** Timestamp:  (17/08/2017 07:00:41)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 19
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS 
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_FBCDC_ALGORITHM (1)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE 
+#define RGX_FEATURE_COMPUTE_OVERLAP 
+
+
+#endif /* _RGXCONFIG_1_V_4_19_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_1.V.4.5.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_1.V.4.5.h
new file mode 100644
index 0000000..9c3011f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_1.V.4.5.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 1.V.4.5
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_4_5_H_
+#define _RGXCONFIG_KM_1_V_4_5_H_
+
+/***** Automatically generated file (17/08/2017 07:00:41): Do not edit manually ********************/
+/***** Timestamp:  (17/08/2017 07:00:41)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 5
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS 
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_FBCDC_ALGORITHM (1)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE 
+#define RGX_FEATURE_COMPUTE_OVERLAP 
+
+
+#endif /* _RGXCONFIG_1_V_4_5_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_1.V.4.6.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_1.V.4.6.h
new file mode 100644
index 0000000..eae139b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_1.V.4.6.h
@@ -0,0 +1,70 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 1.V.4.6
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_4_6_H_
+#define _RGXCONFIG_KM_1_V_4_6_H_
+
+/***** Automatically generated file (22/02/2016 07:00:34): Do not edit manually ********************/
+/***** Timestamp:  (22/02/2016 07:00:34)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 6
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_PERFBUS 
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE 
+#define RGX_FEATURE_COMPUTE_OVERLAP 
+
+
+#endif /* _RGXCONFIG_1_V_4_6_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_12.V.1.20.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_12.V.1.20.h
new file mode 100644
index 0000000..94e9fcd
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_12.V.1.20.h
@@ -0,0 +1,69 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 12.V.1.20
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_12_V_1_20_H_
+#define _RGXCONFIG_KM_12_V_1_20_H_
+
+/***** Automatically generated file (04/09/2017 07:00:43): Do not edit manually ********************/
+/***** Timestamp:  (04/09/2017 07:00:43)************************************************************/
+
+#define RGX_BNC_KM_B 12
+#define RGX_BNC_KM_N 1
+#define RGX_BNC_KM_C 20
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (1)
+#define RGX_FEATURE_META (LTP217)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_COMPUTE 
+#define RGX_FEATURE_ROGUEXE 
+#define RGX_FEATURE_NUM_RASTER_PIPES (1)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (0*1024)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+
+
+#endif /* _RGXCONFIG_12_V_1_20_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_15.V.1.64.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_15.V.1.64.h
new file mode 100644
index 0000000..e933c24
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_15.V.1.64.h
@@ -0,0 +1,76 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 15.V.1.64
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_15_V_1_64_H_
+#define _RGXCONFIG_KM_15_V_1_64_H_
+
+/***** Automatically generated file (04/09/2017 07:00:43): Do not edit manually ********************/
+/***** Timestamp:  (04/09/2017 07:00:43)************************************************************/
+
+#define RGX_BNC_KM_B 15
+#define RGX_BNC_KM_N 1
+#define RGX_BNC_KM_C 64
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_META (LTP217)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_PERFBUS 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_COMPUTE 
+#define RGX_FEATURE_ROGUEXE 
+#define RGX_FEATURE_NUM_RASTER_PIPES (1)
+#define RGX_FEATURE_DYNAMIC_DUST_POWER 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (2)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_FBCDC_ALGORITHM (2)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64*1024)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_GPU_VIRTUALISATION 
+
+
+#endif /* _RGXCONFIG_15_V_1_64_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_22.V.22.23.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_22.V.22.23.h
new file mode 100644
index 0000000..878a766
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_22.V.22.23.h
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 22.V.22.23
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_22_V_22_23_H_
+#define _RGXCONFIG_KM_22_V_22_23_H_
+
+/***** Automatically generated file (06/10/2017 07:00:42): Do not edit manually ********************/
+/***** Timestamp:  (06/10/2017 07:00:42)************************************************************/
+
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 22
+#define RGX_BNC_KM_C 23
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_ROGUEXE 
+#define RGX_FEATURE_NUM_RASTER_PIPES (1)
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT 
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1)
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1 
+#define RGX_FEATURE_PBE2_IN_XE 
+#define RGX_FEATURE_COMPUTE 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_MIPS 
+#define RGX_FEATURE_SINGLE_BIF 
+#define RGX_FEATURE_GPU_VIRTUALISATION 
+#define RGX_FEATURE_PBVNC_COREID_REG 
+#define RGX_FEATURE_PERFBUS 
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET 
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (16*1024)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+
+
+#endif /* _RGXCONFIG_22_V_22_23_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_22.V.22.25.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_22.V.22.25.h
new file mode 100644
index 0000000..ce39847
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_22.V.22.25.h
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 22.V.22.25
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_22_V_22_25_H_
+#define _RGXCONFIG_KM_22_V_22_25_H_
+
+/***** Automatically generated file (06/10/2017 07:00:43): Do not edit manually ********************/
+/***** Timestamp:  (06/10/2017 07:00:43)************************************************************/
+
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 22
+#define RGX_BNC_KM_C 25
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_ROGUEXE 
+#define RGX_FEATURE_NUM_RASTER_PIPES (1)
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT 
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1)
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1 
+#define RGX_FEATURE_PBE2_IN_XE 
+#define RGX_FEATURE_COMPUTE 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_MIPS 
+#define RGX_FEATURE_SINGLE_BIF 
+#define RGX_FEATURE_GPU_VIRTUALISATION 
+#define RGX_FEATURE_PBVNC_COREID_REG 
+#define RGX_FEATURE_PERFBUS 
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (2)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (16*1024)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+
+
+#endif /* _RGXCONFIG_22_V_22_25_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_22.V.22.29.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_22.V.22.29.h
new file mode 100644
index 0000000..8aba74d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_22.V.22.29.h
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 22.V.22.29
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_22_V_22_29_H_
+#define _RGXCONFIG_KM_22_V_22_29_H_
+
+/***** Automatically generated file (06/10/2017 07:00:42): Do not edit manually ********************/
+/***** Timestamp:  (06/10/2017 07:00:42)************************************************************/
+
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 22
+#define RGX_BNC_KM_C 29
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_ROGUEXE 
+#define RGX_FEATURE_NUM_RASTER_PIPES (1)
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT 
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1)
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1 
+#define RGX_FEATURE_PBE2_IN_XE 
+#define RGX_FEATURE_COMPUTE 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_MIPS 
+#define RGX_FEATURE_SINGLE_BIF 
+#define RGX_FEATURE_GPU_VIRTUALISATION 
+#define RGX_FEATURE_PBVNC_COREID_REG 
+#define RGX_FEATURE_PERFBUS 
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (2)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (16*1024)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+
+
+#endif /* _RGXCONFIG_22_V_22_29_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_22.V.54.24.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_22.V.54.24.h
new file mode 100644
index 0000000..6d1feb0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_22.V.54.24.h
@@ -0,0 +1,78 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 22.V.54.24
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_22_V_54_24_H_
+#define _RGXCONFIG_KM_22_V_54_24_H_
+
+/***** Automatically generated file (06/10/2017 07:00:42): Do not edit manually ********************/
+/***** Timestamp:  (06/10/2017 07:00:42)************************************************************/
+
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 54
+#define RGX_BNC_KM_C 24
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_ROGUEXE 
+#define RGX_FEATURE_NUM_RASTER_PIPES (1)
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT 
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1)
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1 
+#define RGX_FEATURE_PBE2_IN_XE 
+#define RGX_FEATURE_COMPUTE 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_MIPS 
+#define RGX_FEATURE_SINGLE_BIF 
+#define RGX_FEATURE_GPU_VIRTUALISATION 
+#define RGX_FEATURE_PBVNC_COREID_REG 
+#define RGX_FEATURE_PERFBUS 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64*1024)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+
+
+#endif /* _RGXCONFIG_22_V_54_24_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_22.V.54.30.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_22.V.54.30.h
new file mode 100644
index 0000000..b109932
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_22.V.54.30.h
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 22.V.54.30
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_22_V_54_30_H_
+#define _RGXCONFIG_KM_22_V_54_30_H_
+
+/***** Automatically generated file (06/10/2017 07:00:42): Do not edit manually ********************/
+/***** Timestamp:  (06/10/2017 07:00:42)************************************************************/
+
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 54
+#define RGX_BNC_KM_C 30
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_ROGUEXE 
+#define RGX_FEATURE_NUM_RASTER_PIPES (1)
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT 
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1)
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1 
+#define RGX_FEATURE_PBE2_IN_XE 
+#define RGX_FEATURE_COMPUTE 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_MIPS 
+#define RGX_FEATURE_SINGLE_BIF 
+#define RGX_FEATURE_GPU_VIRTUALISATION 
+#define RGX_FEATURE_PBVNC_COREID_REG 
+#define RGX_FEATURE_PERFBUS 
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (64*1024)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+
+
+#endif /* _RGXCONFIG_22_V_54_30_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_4.V.2.57.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_4.V.2.57.h
new file mode 100644
index 0000000..3ea4996
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_4.V.2.57.h
@@ -0,0 +1,80 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 4.V.2.57
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_4_V_2_57_H_
+#define _RGXCONFIG_KM_4_V_2_57_H_
+
+/***** Automatically generated file (15/09/2017 07:00:43): Do not edit manually ********************/
+/***** Timestamp:  (15/09/2017 07:00:43)************************************************************/
+
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 57
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (32)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_PERFBUS 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_COMPUTE 
+#define RGX_FEATURE_DYNAMIC_DUST_POWER 
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE 
+#define RGX_FEATURE_COMPUTE_OVERLAP 
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL 
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS 
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2)
+#define RGX_FEATURE_FBCDC_ALGORITHM (2)
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE 
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_CLUSTER_GROUPING 
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_GPU_VIRTUALISATION 
+
+
+#endif /* _RGXCONFIG_4_V_2_57_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_4.V.2.58.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_4.V.2.58.h
new file mode 100644
index 0000000..1979c68
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_4.V.2.58.h
@@ -0,0 +1,80 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 4.V.2.58
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_4_V_2_58_H_
+#define _RGXCONFIG_KM_4_V_2_58_H_
+
+/***** Automatically generated file (15/09/2017 07:00:44): Do not edit manually ********************/
+/***** Timestamp:  (15/09/2017 07:00:44)************************************************************/
+
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 58
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (32)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_PERFBUS 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_COMPUTE 
+#define RGX_FEATURE_DYNAMIC_DUST_POWER 
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE 
+#define RGX_FEATURE_COMPUTE_OVERLAP 
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL 
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS 
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2)
+#define RGX_FEATURE_FBCDC_ALGORITHM (2)
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE 
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_CLUSTER_GROUPING 
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_GPU_VIRTUALISATION 
+
+
+#endif /* _RGXCONFIG_4_V_2_58_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_4.V.4.55.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_4.V.4.55.h
new file mode 100644
index 0000000..1304292
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_4.V.4.55.h
@@ -0,0 +1,78 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 4.V.4.55
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_4_V_4_55_H_
+#define _RGXCONFIG_KM_4_V_4_55_H_
+
+/***** Automatically generated file (15/09/2017 07:00:43): Do not edit manually ********************/
+/***** Timestamp:  (15/09/2017 07:00:43)************************************************************/
+
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 55
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (32)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_PERFBUS 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_COMPUTE 
+#define RGX_FEATURE_DYNAMIC_DUST_POWER 
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE 
+#define RGX_FEATURE_COMPUTE_OVERLAP 
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL 
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS 
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2)
+#define RGX_FEATURE_FBCDC_ALGORITHM (2)
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE 
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_CLUSTER_GROUPING 
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+
+
+#endif /* _RGXCONFIG_4_V_4_55_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_4.V.6.62.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_4.V.6.62.h
new file mode 100644
index 0000000..db35b2a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_4.V.6.62.h
@@ -0,0 +1,81 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 4.V.6.62
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_4_V_6_62_H_
+#define _RGXCONFIG_KM_4_V_6_62_H_
+
+/***** Automatically generated file (15/09/2017 07:00:44): Do not edit manually ********************/
+/***** Timestamp:  (15/09/2017 07:00:44)************************************************************/
+
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 6
+#define RGX_BNC_KM_C 62
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (6)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (32)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_PERFBUS 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_COMPUTE 
+#define RGX_FEATURE_DYNAMIC_DUST_POWER 
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE 
+#define RGX_FEATURE_COMPUTE_OVERLAP 
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL 
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS 
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2)
+#define RGX_FEATURE_FBCDC_ALGORITHM (2)
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE 
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_CLUSTER_GROUPING 
+#define RGX_FEATURE_SLC_BANKS (4)
+#define RGX_FEATURE_GPU_VIRTUALISATION 
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+
+
+#endif /* _RGXCONFIG_4_V_6_62_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_5.V.1.46.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_5.V.1.46.h
new file mode 100644
index 0000000..a22d094
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_5.V.1.46.h
@@ -0,0 +1,75 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 5.V.1.46
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_5_V_1_46_H_
+#define _RGXCONFIG_KM_5_V_1_46_H_
+
+/***** Automatically generated file (04/09/2017 07:00:43): Do not edit manually ********************/
+/***** Timestamp:  (04/09/2017 07:00:43)************************************************************/
+
+#define RGX_BNC_KM_B 5
+#define RGX_BNC_KM_N 1
+#define RGX_BNC_KM_C 46
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (1)
+#define RGX_FEATURE_META (LTP217)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_PERFBUS 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_COMPUTE 
+#define RGX_FEATURE_ROGUEXE 
+#define RGX_FEATURE_NUM_RASTER_PIPES (1)
+#define RGX_FEATURE_DYNAMIC_DUST_POWER 
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (16*1024)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_FBCDC_ALGORITHM (2)
+
+
+#endif /* _RGXCONFIG_5_V_1_46_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_6.V.4.35.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_6.V.4.35.h
new file mode 100644
index 0000000..4e0b4af
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_6.V.4.35.h
@@ -0,0 +1,80 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 6.V.4.35
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_6_V_4_35_H_
+#define _RGXCONFIG_KM_6_V_4_35_H_
+
+/***** Automatically generated file (15/09/2017 07:00:44): Do not edit manually ********************/
+/***** Timestamp:  (15/09/2017 07:00:44)************************************************************/
+
+#define RGX_BNC_KM_B 6
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 35
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (32)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_PERFBUS 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_COMPUTE 
+#define RGX_FEATURE_DYNAMIC_DUST_POWER 
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE 
+#define RGX_FEATURE_COMPUTE_OVERLAP 
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL 
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS 
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2)
+#define RGX_FEATURE_FBCDC_ALGORITHM (2)
+#define RGX_FEATURE_RAY_TRACING 
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE 
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_CLUSTER_GROUPING 
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+
+
+#endif /* _RGXCONFIG_6_V_4_35_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_8.V.2.39.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_8.V.2.39.h
new file mode 100644
index 0000000..656f4b0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/configs/rgxconfig_km_8.V.2.39.h
@@ -0,0 +1,94 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 8.V.2.39
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_8_V_2_39_H_
+#define _RGXCONFIG_KM_8_V_2_39_H_
+
+/***** Automatically generated file (15/09/2017 07:00:44): Do not edit manually ********************/
+/***** Timestamp:  (15/09/2017 07:00:44)************************************************************/
+
+#define RGX_BNC_KM_B 8
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 39
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_PERFBUS 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_COMPUTE 
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE 
+#define RGX_FEATURE_COMPUTE_OVERLAP 
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL 
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS 
+#define RGX_FEATURE_SCALABLE_VDM_GPP 
+#define RGX_FEATURE_VDM_DRAWINDIRECT 
+#define RGX_FEATURE_SLC_VIVT 
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS 
+#define RGX_FEATURE_PDS_PER_DUST 
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS 
+#define RGX_FEATURE_TESSELLATION 
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS 
+#define RGX_FEATURE_PDS_TEMPSIZE8 
+#define RGX_FEATURE_META_DMA 
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4)
+#define RGX_FEATURE_META_COREMEM_BANKS (8)
+#define RGX_FEATURE_META_COREMEM_SIZE (64)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (3)
+#define RGX_FEATURE_FBCDC_ALGORITHM (2)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128)
+#define RGX_FEATURE_SLC_BANKS (2)
+#define RGX_FEATURE_SCALABLE_TE_ARCH (1)
+#define RGX_FEATURE_SCALABLE_VCE (1)
+#define RGX_FEATURE_GPU_VIRTUALISATION 
+#define RGX_FEATURE_S7_CACHE_HIERARCHY 
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE 
+#define RGX_FEATURE_CLUSTER_GROUPING 
+
+
+#endif /* _RGXCONFIG_8_V_2_39_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_1.39.4.19.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_1.39.4.19.h
new file mode 100644
index 0000000..e718bee
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_1.39.4.19.h
@@ -0,0 +1,76 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 1.39.4.19
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_39_4_19_H_
+#define _RGXCORE_KM_1_39_4_19_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp:  (20/02/2017 07:01:19)************************************************************/
+/***** CS: @2784771 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.39.4.19 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 39
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 19
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_38344
+#define FIX_HW_BRN_42321
+#define FIX_HW_BRN_44455
+#define FIX_HW_BRN_54441
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_1_39_4_19_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_1.72.4.12.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_1.72.4.12.h
new file mode 100644
index 0000000..3bc0581
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_1.72.4.12.h
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 1.72.4.12
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_72_4_12_H_
+#define _RGXCORE_KM_1_72_4_12_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp:  (20/02/2017 07:01:19)************************************************************/
+/***** CS: @2646650 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.72.4.12 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 72
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 12
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_44455
+#define FIX_HW_BRN_54441
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_1_72_4_12_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_1.75.2.30.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_1.75.2.30.h
new file mode 100644
index 0000000..17369b9
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_1.75.2.30.h
@@ -0,0 +1,75 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 1.75.2.30
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_75_2_30_H_
+#define _RGXCORE_KM_1_75_2_30_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp:  (20/02/2017 07:01:19)************************************************************/
+/***** CS: @2309075 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.75.2.30 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 75
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 30
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_42321
+#define FIX_HW_BRN_44455
+#define FIX_HW_BRN_54441
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_1_75_2_30_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_1.76.4.6.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_1.76.4.6.h
new file mode 100644
index 0000000..b1127c8
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_1.76.4.6.h
@@ -0,0 +1,76 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 1.76.4.6
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_76_4_6_H_
+#define _RGXCORE_KM_1_76_4_6_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp:  (20/02/2017 07:01:19)************************************************************/
+/***** CS: @2318404 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.76.4.6 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 76
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 6
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_38344
+#define FIX_HW_BRN_42480
+#define FIX_HW_BRN_44455
+#define FIX_HW_BRN_54441
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_1_76_4_6_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_1.82.4.5.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_1.82.4.5.h
new file mode 100644
index 0000000..8257262
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_1.82.4.5.h
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 1.82.4.5
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_82_4_5_H_
+#define _RGXCORE_KM_1_82_4_5_H_
+
+/***** Automatically generated file (20/02/2017 07:01:20): Do not edit manually ********************/
+/***** Timestamp:  (20/02/2017 07:01:20)************************************************************/
+/***** CS: @2503111 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.82.4.5 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 82
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 5
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_44455
+#define FIX_HW_BRN_54441
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_1_82_4_5_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_12.5.1.20.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_12.5.1.20.h
new file mode 100644
index 0000000..8a02131
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_12.5.1.20.h
@@ -0,0 +1,75 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 12.5.1.20
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_12_5_1_20_H_
+#define _RGXCORE_KM_12_5_1_20_H_
+
+/***** Automatically generated file (17/08/2017 07:00:59): Do not edit manually ********************/
+/***** Timestamp:  (17/08/2017 07:00:59)************************************************************/
+/***** CS: @3146507 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 12.5.1.20 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 12
+#define RGX_BVNC_KM_V 5
+#define RGX_BVNC_KM_N 1
+#define RGX_BVNC_KM_C 20
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_65273
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_12_5_1_20_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_15.5.1.64.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_15.5.1.64.h
new file mode 100644
index 0000000..c77946c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_15.5.1.64.h
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 15.5.1.64
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_15_5_1_64_H_
+#define _RGXCORE_KM_15_5_1_64_H_
+
+/***** Automatically generated file (20/02/2017 07:01:20): Do not edit manually ********************/
+/***** Timestamp:  (20/02/2017 07:01:20)************************************************************/
+/***** CS: @3846532 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 15.5.1.64 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 15
+#define RGX_BVNC_KM_V 5
+#define RGX_BVNC_KM_N 1
+#define RGX_BVNC_KM_C 64
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_15_5_1_64_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.26.54.24.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.26.54.24.h
new file mode 100644
index 0000000..e447df7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.26.54.24.h
@@ -0,0 +1,80 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.26.54.24
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_26_54_24_H_
+#define _RGXCORE_KM_22_26_54_24_H_
+
+/***** Automatically generated file (25/09/2017 07:00:58): Do not edit manually ********************/
+/***** Timestamp:  (25/09/2017 07:00:58)************************************************************/
+/***** CS: @3943204 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.26.54.24 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 26
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 24
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+#define FIX_HW_BRN_65273
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* _RGXCORE_KM_22_26_54_24_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.28.22.23.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.28.22.23.h
new file mode 100644
index 0000000..3c976a9
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.28.22.23.h
@@ -0,0 +1,80 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.28.22.23
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_28_22_23_H_
+#define _RGXCORE_KM_22_28_22_23_H_
+
+/***** Automatically generated file (25/09/2017 07:00:58): Do not edit manually ********************/
+/***** Timestamp:  (25/09/2017 07:00:58)************************************************************/
+/***** CS: @3969181 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.28.22.23 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 28
+#define RGX_BVNC_KM_N 22
+#define RGX_BVNC_KM_C 23
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+#define FIX_HW_BRN_65273
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* _RGXCORE_KM_22_28_22_23_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.40.54.30.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.40.54.30.h
new file mode 100644
index 0000000..18456ab
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.40.54.30.h
@@ -0,0 +1,80 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.40.54.30
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_40_54_30_H_
+#define _RGXCORE_KM_22_40_54_30_H_
+
+/***** Automatically generated file (25/09/2017 07:00:59): Do not edit manually ********************/
+/***** Timestamp:  (25/09/2017 07:00:59)************************************************************/
+/***** CS: @4094817 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.40.54.30 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 40
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 30
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+#define FIX_HW_BRN_65273
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* _RGXCORE_KM_22_40_54_30_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.44.22.25.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.44.22.25.h
new file mode 100644
index 0000000..49ec776
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.44.22.25.h
@@ -0,0 +1,80 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.44.22.25
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_44_22_25_H_
+#define _RGXCORE_KM_22_44_22_25_H_
+
+/***** Automatically generated file (25/09/2017 07:00:58): Do not edit manually ********************/
+/***** Timestamp:  (25/09/2017 07:00:58)************************************************************/
+/***** CS: @4137146 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.44.22.25 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 44
+#define RGX_BVNC_KM_N 22
+#define RGX_BVNC_KM_C 25
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+#define FIX_HW_BRN_65273
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* _RGXCORE_KM_22_44_22_25_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.48.54.30.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.48.54.30.h
new file mode 100644
index 0000000..1b5368d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.48.54.30.h
@@ -0,0 +1,80 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.48.54.30
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_48_54_30_H_
+#define _RGXCORE_KM_22_48_54_30_H_
+
+/***** Automatically generated file (25/09/2017 07:00:57): Do not edit manually ********************/
+/***** Timestamp:  (25/09/2017 07:00:57)************************************************************/
+/***** CS: @4158661 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.48.54.30 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 48
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 30
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+#define FIX_HW_BRN_65273
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* _RGXCORE_KM_22_48_54_30_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.50.22.29.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.50.22.29.h
new file mode 100644
index 0000000..ddce071
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.50.22.29.h
@@ -0,0 +1,80 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.50.22.29
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_50_22_29_H_
+#define _RGXCORE_KM_22_50_22_29_H_
+
+/***** Automatically generated file (25/09/2017 07:00:59): Do not edit manually ********************/
+/***** Timestamp:  (25/09/2017 07:00:59)************************************************************/
+/***** CS: @4156423 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.50.22.29 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 50
+#define RGX_BVNC_KM_N 22
+#define RGX_BVNC_KM_C 29
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63027
+#define FIX_HW_BRN_65273
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* _RGXCORE_KM_22_50_22_29_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.58.22.25.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.58.22.25.h
new file mode 100644
index 0000000..8fbe459
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.58.22.25.h
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.58.22.25
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_58_22_25_H_
+#define _RGXCORE_KM_22_58_22_25_H_
+
+/***** Automatically generated file (25/09/2017 07:00:58): Do not edit manually ********************/
+/***** Timestamp:  (25/09/2017 07:00:58)************************************************************/
+/***** CS: @4279077 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.58.22.25 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 58
+#define RGX_BVNC_KM_N 22
+#define RGX_BVNC_KM_C 25
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_63027
+#define FIX_HW_BRN_65273
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_61389
+
+
+
+#endif /* _RGXCORE_KM_22_58_22_25_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.59.54.30.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.59.54.30.h
new file mode 100644
index 0000000..c6d0d3d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.59.54.30.h
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.59.54.30
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_59_54_30_H_
+#define _RGXCORE_KM_22_59_54_30_H_
+
+/***** Automatically generated file (25/09/2017 07:00:59): Do not edit manually ********************/
+/***** Timestamp:  (25/09/2017 07:00:59)************************************************************/
+/***** CS: @4317182 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.59.54.30 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 59
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 30
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_63027
+#define FIX_HW_BRN_65273
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_61389
+
+
+
+#endif /* _RGXCORE_KM_22_59_54_30_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.60.22.29.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.60.22.29.h
new file mode 100644
index 0000000..666f55f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.60.22.29.h
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.60.22.29
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_60_22_29_H_
+#define _RGXCORE_KM_22_60_22_29_H_
+
+/***** Automatically generated file (25/09/2017 07:00:57): Do not edit manually ********************/
+/***** Timestamp:  (25/09/2017 07:00:57)************************************************************/
+/***** CS: @4339983 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.60.22.29 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 60
+#define RGX_BVNC_KM_N 22
+#define RGX_BVNC_KM_C 29
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_63027
+#define FIX_HW_BRN_65273
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_61389
+
+
+
+#endif /* _RGXCORE_KM_22_60_22_29_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.67.54.30.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.67.54.30.h
new file mode 100644
index 0000000..5baabe5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.67.54.30.h
@@ -0,0 +1,80 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.67.54.30
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_67_54_30_H_
+#define _RGXCORE_KM_22_67_54_30_H_
+
+/***** Automatically generated file (25/09/2017 07:00:57): Do not edit manually ********************/
+/***** Timestamp:  (25/09/2017 07:00:57)************************************************************/
+/***** CS: @4339986 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.67.54.30 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 67
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 30
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_63027
+#define FIX_HW_BRN_65273
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_61389
+
+
+
+#endif /* _RGXCORE_KM_22_67_54_30_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.68.54.30.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.68.54.30.h
new file mode 100644
index 0000000..94949bb
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.68.54.30.h
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.68.54.30
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_68_54_30_H_
+#define _RGXCORE_KM_22_68_54_30_H_
+
+/***** Automatically generated file (25/09/2017 07:00:58): Do not edit manually ********************/
+/***** Timestamp:  (25/09/2017 07:00:58)************************************************************/
+/***** CS: @4339984 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.68.54.30 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 68
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 30
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_63027
+#define FIX_HW_BRN_65273
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_61389
+
+
+
+#endif /* _RGXCORE_KM_22_68_54_30_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.69.22.25.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.69.22.25.h
new file mode 100644
index 0000000..7b64bfc
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.69.22.25.h
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.69.22.25
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_69_22_25_H_
+#define _RGXCORE_KM_22_69_22_25_H_
+
+/***** Automatically generated file (25/09/2017 07:00:58): Do not edit manually ********************/
+/***** Timestamp:  (25/09/2017 07:00:58)************************************************************/
+/***** CS: @4339983 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.69.22.25 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 69
+#define RGX_BVNC_KM_N 22
+#define RGX_BVNC_KM_C 25
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_63027
+#define FIX_HW_BRN_65273
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_61389
+
+
+
+#endif /* _RGXCORE_KM_22_69_22_25_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.75.22.25.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.75.22.25.h
new file mode 100644
index 0000000..effa1bf
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_22.75.22.25.h
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.75.22.25
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_22_75_22_25_H_
+#define _RGXCORE_KM_22_75_22_25_H_
+
+/***** Automatically generated file (25/09/2017 07:00:59): Do not edit manually ********************/
+/***** Timestamp:  (25/09/2017 07:00:59)************************************************************/
+/***** CS: @4517870 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 22.75.22.25 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 75
+#define RGX_BVNC_KM_N 22
+#define RGX_BVNC_KM_C 25
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+#define FIX_HW_BRN_63027
+#define FIX_HW_BRN_65273
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_61389
+
+
+
+#endif /* _RGXCORE_KM_22_75_22_25_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_4.31.4.55.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_4.31.4.55.h
new file mode 100644
index 0000000..57f8e2d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_4.31.4.55.h
@@ -0,0 +1,78 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 4.31.4.55
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_31_4_55_H_
+#define _RGXCORE_KM_4_31_4_55_H_
+
+/***** Automatically generated file (06/02/2017 07:01:06): Do not edit manually ********************/
+/***** Timestamp:  (06/02/2017 07:01:06)************************************************************/
+/***** CS: @2919104 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.31.4.55 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 31
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 55
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_50767
+#define FIX_HW_BRN_54441
+#define FIX_HW_BRN_57193
+#define FIX_HW_BRN_63142
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_4_31_4_55_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_4.41.2.57.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_4.41.2.57.h
new file mode 100644
index 0000000..ea14d9e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_4.41.2.57.h
@@ -0,0 +1,78 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 4.41.2.57
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_41_2_57_H_
+#define _RGXCORE_KM_4_41_2_57_H_
+
+/***** Automatically generated file (06/02/2017 07:01:05): Do not edit manually ********************/
+/***** Timestamp:  (06/02/2017 07:01:05)************************************************************/
+/***** CS: @3254338 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.41.2.57 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 41
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 57
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_50767
+#define FIX_HW_BRN_54441
+#define FIX_HW_BRN_57193
+#define FIX_HW_BRN_63142
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_4_41_2_57_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_4.43.6.62.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_4.43.6.62.h
new file mode 100644
index 0000000..3c55e1c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_4.43.6.62.h
@@ -0,0 +1,78 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 4.43.6.62
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_43_6_62_H_
+#define _RGXCORE_KM_4_43_6_62_H_
+
+/***** Automatically generated file (06/02/2017 07:01:06): Do not edit manually ********************/
+/***** Timestamp:  (06/02/2017 07:01:06)************************************************************/
+/***** CS: @3253129 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.43.6.62 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 43
+#define RGX_BVNC_KM_N 6
+#define RGX_BVNC_KM_C 62
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_50767
+#define FIX_HW_BRN_54441
+#define FIX_HW_BRN_57193
+#define FIX_HW_BRN_63142
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_4_43_6_62_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_4.45.2.58.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_4.45.2.58.h
new file mode 100644
index 0000000..0001e9b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_4.45.2.58.h
@@ -0,0 +1,77 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 4.45.2.58
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_45_2_58_H_
+#define _RGXCORE_KM_4_45_2_58_H_
+
+/***** Automatically generated file (06/02/2017 07:01:06): Do not edit manually ********************/
+/***** Timestamp:  (06/02/2017 07:01:06)************************************************************/
+/***** CS: @3547765 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.45.2.58 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 45
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 58
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_54441
+#define FIX_HW_BRN_57193
+#define FIX_HW_BRN_63142
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_4_45_2_58_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_4.46.6.62.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_4.46.6.62.h
new file mode 100644
index 0000000..d196dd0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_4.46.6.62.h
@@ -0,0 +1,77 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 4.46.6.62
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_46_6_62_H_
+#define _RGXCORE_KM_4_46_6_62_H_
+
+/***** Automatically generated file (06/02/2017 07:01:05): Do not edit manually ********************/
+/***** Timestamp:  (06/02/2017 07:01:05)************************************************************/
+/***** CS: @4015666 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.46.6.62 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 46
+#define RGX_BVNC_KM_N 6
+#define RGX_BVNC_KM_C 62
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_50767
+#define FIX_HW_BRN_57193
+#define FIX_HW_BRN_63142
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_4_46_6_62_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_5.9.1.46.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_5.9.1.46.h
new file mode 100644
index 0000000..ac693ff
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_5.9.1.46.h
@@ -0,0 +1,75 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 5.9.1.46
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_5_9_1_46_H_
+#define _RGXCORE_KM_5_9_1_46_H_
+
+/***** Automatically generated file (20/02/2017 07:01:19): Do not edit manually ********************/
+/***** Timestamp:  (20/02/2017 07:01:19)************************************************************/
+/***** CS: @2967148 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 5.9.1.46 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 5
+#define RGX_BVNC_KM_V 9
+#define RGX_BVNC_KM_N 1
+#define RGX_BVNC_KM_C 46
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_38344
+#define FIX_HW_BRN_43276
+#define FIX_HW_BRN_44871
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+
+
+
+#endif /* _RGXCORE_KM_5_9_1_46_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_6.34.4.35.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_6.34.4.35.h
new file mode 100644
index 0000000..7e4f836
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_6.34.4.35.h
@@ -0,0 +1,77 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 6.34.4.35
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_6_34_4_35_H_
+#define _RGXCORE_KM_6_34_4_35_H_
+
+/***** Automatically generated file (06/02/2017 07:01:05): Do not edit manually ********************/
+/***** Timestamp:  (06/02/2017 07:01:05)************************************************************/
+/***** CS: @3533654 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 6.34.4.35 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 6
+#define RGX_BVNC_KM_V 34
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 35
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_54441
+#define FIX_HW_BRN_57193
+#define FIX_HW_BRN_63142
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_6_34_4_35_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_8.48.2.39.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_8.48.2.39.h
new file mode 100644
index 0000000..0e4e85e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/cores/rgxcore_km_8.48.2.39.h
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 8.48.2.39
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_8_48_2_39_H_
+#define _RGXCORE_KM_8_48_2_39_H_
+
+/***** Automatically generated file (23/02/2017 15:15:18): Do not edit manually ********************/
+/***** Timestamp:  (23/02/2017 15:15:18)************************************************************/
+/***** CS: @3753485 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 8.48.2.39 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 8
+#define RGX_BVNC_KM_V 48
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 39
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_52563
+#define FIX_HW_BRN_62204
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_45914
+#define HW_ERN_46066
+#define HW_ERN_47025
+
+
+
+#endif /* _RGXCORE_KM_8_48_2_39_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/rgx_bvnc_defs_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/rgx_bvnc_defs_km.h
new file mode 100644
index 0000000..0e749c6
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/rgx_bvnc_defs_km.h
@@ -0,0 +1,362 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file rgx_bvnc_defs_km.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/***************************************************
+*       Auto generated file by rgxbvnc_tablegen.py *
+*       This file should not be edited manually    *
+****************************************************/
+
+#ifndef _RGX_BVNC_DEFS_KM_H_
+#define _RGX_BVNC_DEFS_KM_H_
+
+#include "img_types.h"
+
+#define   BVNC_FIELD_WIDTH  (16U)
+
+
+/******************************************************************************
+ * Mask and bit-position macros for features without values
+ *****************************************************************************/
+
+#define	RGX_FEATURE_AXI_ACELITE_POS                                 	(0U)
+#define	RGX_FEATURE_AXI_ACELITE_BIT_MASK                            	(IMG_UINT64_C(0x0000000000000001))
+
+#define	RGX_FEATURE_CLUSTER_GROUPING_POS                            	(1U)
+#define	RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK                       	(IMG_UINT64_C(0x0000000000000002))
+
+#define	RGX_FEATURE_COMPUTE_POS                                     	(2U)
+#define	RGX_FEATURE_COMPUTE_BIT_MASK                                	(IMG_UINT64_C(0x0000000000000004))
+
+#define	RGX_FEATURE_COMPUTE_MORTON_CAPABLE_POS                      	(3U)
+#define	RGX_FEATURE_COMPUTE_MORTON_CAPABLE_BIT_MASK                 	(IMG_UINT64_C(0x0000000000000008))
+
+#define	RGX_FEATURE_COMPUTE_OVERLAP_POS                             	(4U)
+#define	RGX_FEATURE_COMPUTE_OVERLAP_BIT_MASK                        	(IMG_UINT64_C(0x0000000000000010))
+
+#define	RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_POS               	(5U)
+#define	RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_BIT_MASK          	(IMG_UINT64_C(0x0000000000000020))
+
+#define	RGX_FEATURE_DYNAMIC_DUST_POWER_POS                          	(6U)
+#define	RGX_FEATURE_DYNAMIC_DUST_POWER_BIT_MASK                     	(IMG_UINT64_C(0x0000000000000040))
+
+#define	RGX_FEATURE_FASTRENDER_DM_POS                               	(7U)
+#define	RGX_FEATURE_FASTRENDER_DM_BIT_MASK                          	(IMG_UINT64_C(0x0000000000000080))
+
+#define	RGX_FEATURE_GPU_CPU_COHERENCY_POS                           	(8U)
+#define	RGX_FEATURE_GPU_CPU_COHERENCY_BIT_MASK                      	(IMG_UINT64_C(0x0000000000000100))
+
+#define	RGX_FEATURE_GPU_VIRTUALISATION_POS                          	(9U)
+#define	RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK                     	(IMG_UINT64_C(0x0000000000000200))
+
+#define	RGX_FEATURE_GS_RTA_SUPPORT_POS                              	(10U)
+#define	RGX_FEATURE_GS_RTA_SUPPORT_BIT_MASK                         	(IMG_UINT64_C(0x0000000000000400))
+
+#define	RGX_FEATURE_META_DMA_POS                                    	(11U)
+#define	RGX_FEATURE_META_DMA_BIT_MASK                               	(IMG_UINT64_C(0x0000000000000800))
+
+#define	RGX_FEATURE_MIPS_POS                                        	(12U)
+#define	RGX_FEATURE_MIPS_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000001000))
+
+#define	RGX_FEATURE_PBE2_IN_XE_POS                                  	(13U)
+#define	RGX_FEATURE_PBE2_IN_XE_BIT_MASK                             	(IMG_UINT64_C(0x0000000000002000))
+
+#define	RGX_FEATURE_PBVNC_COREID_REG_POS                            	(14U)
+#define	RGX_FEATURE_PBVNC_COREID_REG_BIT_MASK                       	(IMG_UINT64_C(0x0000000000004000))
+
+#define	RGX_FEATURE_PDS_PER_DUST_POS                                	(15U)
+#define	RGX_FEATURE_PDS_PER_DUST_BIT_MASK                           	(IMG_UINT64_C(0x0000000000008000))
+
+#define	RGX_FEATURE_PDS_TEMPSIZE8_POS                               	(16U)
+#define	RGX_FEATURE_PDS_TEMPSIZE8_BIT_MASK                          	(IMG_UINT64_C(0x0000000000010000))
+
+#define	RGX_FEATURE_PERFBUS_POS                                     	(17U)
+#define	RGX_FEATURE_PERFBUS_BIT_MASK                                	(IMG_UINT64_C(0x0000000000020000))
+
+#define	RGX_FEATURE_RAY_TRACING_POS                                 	(18U)
+#define	RGX_FEATURE_RAY_TRACING_BIT_MASK                            	(IMG_UINT64_C(0x0000000000040000))
+
+#define	RGX_FEATURE_ROGUEXE_POS                                     	(19U)
+#define	RGX_FEATURE_ROGUEXE_BIT_MASK                                	(IMG_UINT64_C(0x0000000000080000))
+
+#define	RGX_FEATURE_S7_CACHE_HIERARCHY_POS                          	(20U)
+#define	RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK                     	(IMG_UINT64_C(0x0000000000100000))
+
+#define	RGX_FEATURE_S7_TOP_INFRASTRUCTURE_POS                       	(21U)
+#define	RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK                  	(IMG_UINT64_C(0x0000000000200000))
+
+#define	RGX_FEATURE_SCALABLE_VDM_GPP_POS                            	(22U)
+#define	RGX_FEATURE_SCALABLE_VDM_GPP_BIT_MASK                       	(IMG_UINT64_C(0x0000000000400000))
+
+#define	RGX_FEATURE_SIGNAL_SNOOPING_POS                             	(23U)
+#define	RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK                        	(IMG_UINT64_C(0x0000000000800000))
+
+#define	RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_POS            	(24U)
+#define	RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_BIT_MASK       	(IMG_UINT64_C(0x0000000001000000))
+
+#define	RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1_POS         	(25U)
+#define	RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1_BIT_MASK    	(IMG_UINT64_C(0x0000000002000000))
+
+#define	RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2_POS         	(26U)
+#define	RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2_BIT_MASK    	(IMG_UINT64_C(0x0000000004000000))
+
+#define	RGX_FEATURE_SINGLE_BIF_POS                                  	(27U)
+#define	RGX_FEATURE_SINGLE_BIF_BIT_MASK                             	(IMG_UINT64_C(0x0000000008000000))
+
+#define	RGX_FEATURE_SLCSIZE8_POS                                    	(28U)
+#define	RGX_FEATURE_SLCSIZE8_BIT_MASK                               	(IMG_UINT64_C(0x0000000010000000))
+
+#define	RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_POS                 	(29U)
+#define	RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_BIT_MASK            	(IMG_UINT64_C(0x0000000020000000))
+
+#define	RGX_FEATURE_SLC_VIVT_POS                                    	(30U)
+#define	RGX_FEATURE_SLC_VIVT_BIT_MASK                               	(IMG_UINT64_C(0x0000000040000000))
+
+#define	RGX_FEATURE_SYS_BUS_SECURE_RESET_POS                        	(31U)
+#define	RGX_FEATURE_SYS_BUS_SECURE_RESET_BIT_MASK                   	(IMG_UINT64_C(0x0000000080000000))
+
+#define	RGX_FEATURE_TESSELLATION_POS                                	(32U)
+#define	RGX_FEATURE_TESSELLATION_BIT_MASK                           	(IMG_UINT64_C(0x0000000100000000))
+
+#define	RGX_FEATURE_TLA_POS                                         	(33U)
+#define	RGX_FEATURE_TLA_BIT_MASK                                    	(IMG_UINT64_C(0x0000000200000000))
+
+#define	RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_POS         	(34U)
+#define	RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_BIT_MASK    	(IMG_UINT64_C(0x0000000400000000))
+
+#define	RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_POS                     	(35U)
+#define	RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_BIT_MASK                	(IMG_UINT64_C(0x0000000800000000))
+
+#define	RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_POS                  	(36U)
+#define	RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK             	(IMG_UINT64_C(0x0000001000000000))
+
+#define	RGX_FEATURE_VDM_DRAWINDIRECT_POS                            	(37U)
+#define	RGX_FEATURE_VDM_DRAWINDIRECT_BIT_MASK                       	(IMG_UINT64_C(0x0000002000000000))
+
+#define	RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_POS                        	(38U)
+#define	RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_BIT_MASK                   	(IMG_UINT64_C(0x0000004000000000))
+
+#define	RGX_FEATURE_XE_MEMORY_HIERARCHY_POS                         	(39U)
+#define	RGX_FEATURE_XE_MEMORY_HIERARCHY_BIT_MASK                    	(IMG_UINT64_C(0x0000008000000000))
+
+#define	RGX_FEATURE_XT_TOP_INFRASTRUCTURE_POS                       	(40U)
+#define	RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK                  	(IMG_UINT64_C(0x0000010000000000))
+
+
+/******************************************************************************
+ * Mask and bit-position macros for features with values
+ *****************************************************************************/
+
+#define	RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_POS                   	(0U)
+#define	RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_BIT_MASK              	(IMG_UINT64_C(0x0000000000000001))
+
+#define	RGX_FEATURE_FBCDC_ALGORITHM_POS                             	(1U)
+#define	RGX_FEATURE_FBCDC_ALGORITHM_BIT_MASK                        	(IMG_UINT64_C(0x0000000000000006))
+
+#define	RGX_FEATURE_FBCDC_ARCHITECTURE_POS                          	(3U)
+#define	RGX_FEATURE_FBCDC_ARCHITECTURE_BIT_MASK                     	(IMG_UINT64_C(0x0000000000000018))
+
+#define	RGX_FEATURE_META_POS                                        	(5U)
+#define	RGX_FEATURE_META_BIT_MASK                                   	(IMG_UINT64_C(0x00000000000000E0))
+
+#define	RGX_FEATURE_META_COREMEM_BANKS_POS                          	(8U)
+#define	RGX_FEATURE_META_COREMEM_BANKS_BIT_MASK                     	(IMG_UINT64_C(0x0000000000000300))
+
+#define	RGX_FEATURE_META_COREMEM_SIZE_POS                           	(10U)
+#define	RGX_FEATURE_META_COREMEM_SIZE_BIT_MASK                      	(IMG_UINT64_C(0x0000000000001C00))
+
+#define	RGX_FEATURE_META_DMA_CHANNEL_COUNT_POS                      	(13U)
+#define	RGX_FEATURE_META_DMA_CHANNEL_COUNT_BIT_MASK                 	(IMG_UINT64_C(0x0000000000002000))
+
+#define	RGX_FEATURE_NUM_CLUSTERS_POS                                	(14U)
+#define	RGX_FEATURE_NUM_CLUSTERS_BIT_MASK                           	(IMG_UINT64_C(0x000000000003C000))
+
+#define	RGX_FEATURE_NUM_ISP_IPP_PIPES_POS                           	(18U)
+#define	RGX_FEATURE_NUM_ISP_IPP_PIPES_BIT_MASK                      	(IMG_UINT64_C(0x00000000003C0000))
+
+#define	RGX_FEATURE_NUM_RASTER_PIPES_POS                            	(22U)
+#define	RGX_FEATURE_NUM_RASTER_PIPES_BIT_MASK                       	(IMG_UINT64_C(0x0000000000C00000))
+
+#define	RGX_FEATURE_PHYS_BUS_WIDTH_POS                              	(24U)
+#define	RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK                         	(IMG_UINT64_C(0x0000000007000000))
+
+#define	RGX_FEATURE_SCALABLE_TE_ARCH_POS                            	(27U)
+#define	RGX_FEATURE_SCALABLE_TE_ARCH_BIT_MASK                       	(IMG_UINT64_C(0x0000000018000000))
+
+#define	RGX_FEATURE_SCALABLE_VCE_POS                                	(29U)
+#define	RGX_FEATURE_SCALABLE_VCE_BIT_MASK                           	(IMG_UINT64_C(0x00000000E0000000))
+
+#define	RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_POS             	(32U)
+#define	RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_BIT_MASK        	(IMG_UINT64_C(0x0000000300000000))
+
+#define	RGX_FEATURE_SLC_BANKS_POS                                   	(34U)
+#define	RGX_FEATURE_SLC_BANKS_BIT_MASK                              	(IMG_UINT64_C(0x0000000C00000000))
+
+#define	RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_POS                    	(36U)
+#define	RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK               	(IMG_UINT64_C(0x0000003000000000))
+
+#define	RGX_FEATURE_SLC_SIZE_IN_BYTES_POS                           	(38U)
+#define	RGX_FEATURE_SLC_SIZE_IN_BYTES_BIT_MASK                      	(IMG_UINT64_C(0x000001C000000000))
+
+#define	RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_POS                       	(41U)
+#define	RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_BIT_MASK                  	(IMG_UINT64_C(0x0000060000000000))
+
+#define	RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_POS                  	(43U)
+#define	RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_BIT_MASK             	(IMG_UINT64_C(0x0000080000000000))
+
+
+/******************************************************************************
+ * Mask and bit-position macros for ERNs and BRNs
+ *****************************************************************************/
+
+#define	HW_ERN_36400_POS                                            	(0U)
+#define	HW_ERN_36400_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000000001))
+
+#define	FIX_HW_BRN_37200_POS                                        	(1U)
+#define	FIX_HW_BRN_37200_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000000002))
+
+#define	FIX_HW_BRN_37918_POS                                        	(2U)
+#define	FIX_HW_BRN_37918_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000000004))
+
+#define	FIX_HW_BRN_38344_POS                                        	(3U)
+#define	FIX_HW_BRN_38344_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000000008))
+
+#define	HW_ERN_41805_POS                                            	(4U)
+#define	HW_ERN_41805_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000000010))
+
+#define	HW_ERN_42290_POS                                            	(5U)
+#define	HW_ERN_42290_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000000020))
+
+#define	FIX_HW_BRN_42321_POS                                        	(6U)
+#define	FIX_HW_BRN_42321_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000000040))
+
+#define	FIX_HW_BRN_42480_POS                                        	(7U)
+#define	FIX_HW_BRN_42480_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000000080))
+
+#define	HW_ERN_42606_POS                                            	(8U)
+#define	HW_ERN_42606_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000000100))
+
+#define	FIX_HW_BRN_43276_POS                                        	(9U)
+#define	FIX_HW_BRN_43276_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000000200))
+
+#define	FIX_HW_BRN_44455_POS                                        	(10U)
+#define	FIX_HW_BRN_44455_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000000400))
+
+#define	FIX_HW_BRN_44871_POS                                        	(11U)
+#define	FIX_HW_BRN_44871_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000000800))
+
+#define	HW_ERN_44885_POS                                            	(12U)
+#define	HW_ERN_44885_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000001000))
+
+#define	HW_ERN_45914_POS                                            	(13U)
+#define	HW_ERN_45914_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000002000))
+
+#define	HW_ERN_46066_POS                                            	(14U)
+#define	HW_ERN_46066_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000004000))
+
+#define	HW_ERN_47025_POS                                            	(15U)
+#define	HW_ERN_47025_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000008000))
+
+#define	HW_ERN_49144_POS                                            	(16U)
+#define	HW_ERN_49144_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000010000))
+
+#define	HW_ERN_50539_POS                                            	(17U)
+#define	HW_ERN_50539_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000020000))
+
+#define	FIX_HW_BRN_50767_POS                                        	(18U)
+#define	FIX_HW_BRN_50767_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000040000))
+
+#define	FIX_HW_BRN_51281_POS                                        	(19U)
+#define	FIX_HW_BRN_51281_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000080000))
+
+#define	HW_ERN_51468_POS                                            	(20U)
+#define	HW_ERN_51468_BIT_MASK                                       	(IMG_UINT64_C(0x0000000000100000))
+
+#define	FIX_HW_BRN_52402_POS                                        	(21U)
+#define	FIX_HW_BRN_52402_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000200000))
+
+#define	FIX_HW_BRN_52563_POS                                        	(22U)
+#define	FIX_HW_BRN_52563_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000400000))
+
+#define	FIX_HW_BRN_54141_POS                                        	(23U)
+#define	FIX_HW_BRN_54141_BIT_MASK                                   	(IMG_UINT64_C(0x0000000000800000))
+
+#define	FIX_HW_BRN_54441_POS                                        	(24U)
+#define	FIX_HW_BRN_54441_BIT_MASK                                   	(IMG_UINT64_C(0x0000000001000000))
+
+#define	FIX_HW_BRN_55091_POS                                        	(25U)
+#define	FIX_HW_BRN_55091_BIT_MASK                                   	(IMG_UINT64_C(0x0000000002000000))
+
+#define	FIX_HW_BRN_57193_POS                                        	(26U)
+#define	FIX_HW_BRN_57193_BIT_MASK                                   	(IMG_UINT64_C(0x0000000004000000))
+
+#define	FIX_HW_BRN_57289_POS                                        	(27U)
+#define	FIX_HW_BRN_57289_BIT_MASK                                   	(IMG_UINT64_C(0x0000000008000000))
+
+#define	HW_ERN_57596_POS                                            	(28U)
+#define	HW_ERN_57596_BIT_MASK                                       	(IMG_UINT64_C(0x0000000010000000))
+
+#define	FIX_HW_BRN_60084_POS                                        	(29U)
+#define	FIX_HW_BRN_60084_BIT_MASK                                   	(IMG_UINT64_C(0x0000000020000000))
+
+#define	HW_ERN_61389_POS                                            	(30U)
+#define	HW_ERN_61389_BIT_MASK                                       	(IMG_UINT64_C(0x0000000040000000))
+
+#define	FIX_HW_BRN_61450_POS                                        	(31U)
+#define	FIX_HW_BRN_61450_BIT_MASK                                   	(IMG_UINT64_C(0x0000000080000000))
+
+#define	FIX_HW_BRN_62204_POS                                        	(32U)
+#define	FIX_HW_BRN_62204_BIT_MASK                                   	(IMG_UINT64_C(0x0000000100000000))
+
+#define	FIX_HW_BRN_63027_POS                                        	(33U)
+#define	FIX_HW_BRN_63027_BIT_MASK                                   	(IMG_UINT64_C(0x0000000200000000))
+
+#define	FIX_HW_BRN_63142_POS                                        	(34U)
+#define	FIX_HW_BRN_63142_BIT_MASK                                   	(IMG_UINT64_C(0x0000000400000000))
+
+#define	FIX_HW_BRN_65273_POS                                        	(35U)
+#define	FIX_HW_BRN_65273_BIT_MASK                                   	(IMG_UINT64_C(0x0000000800000000))
+
+
+
+#endif /*_RGX_BVNC_DEFS_KM_H_ */
+
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/rgx_bvnc_table_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/rgx_bvnc_table_km.h
new file mode 100644
index 0000000..df5e5bf
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/rgx_bvnc_table_km.h
@@ -0,0 +1,170 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file rgx_bvnc_table_km.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/***************************************************
+*       Auto generated file by rgxbvnc_tablegen.py *
+*       This file should not be edited manually    *
+****************************************************/
+
+#ifndef _RGX_BVNC_TABLE_KM_H_
+#define _RGX_BVNC_TABLE_KM_H_
+
+#include "img_types.h"
+#include "rgxdefs_km.h"
+
+#define	RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX	(1)
+static IMG_UINT32 aui32_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values[RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX] = {2, };
+
+#define	RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX	(3)
+static IMG_UINT32 aui32_RGX_FEATURE_FBCDC_ALGORITHM_values[RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX] = {1, 2, 3, };
+
+#define	RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX	(3)
+static IMG_UINT32 aui32_RGX_FEATURE_FBCDC_ARCHITECTURE_values[RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX] = {1, 2, 3, };
+
+#define	RGX_FEATURE_META_MAX_VALUE_IDX	(4)
+static IMG_UINT32 aui32_RGX_FEATURE_META_values[RGX_FEATURE_META_MAX_VALUE_IDX] = {LTP217, LTP218, MTP218, MTP219, };
+
+#define	RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX	(2)
+static IMG_UINT32 aui32_RGX_FEATURE_META_COREMEM_BANKS_values[RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX] = {3, 8, };
+
+#define	RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX	(5)
+static IMG_UINT32 aui32_RGX_FEATURE_META_COREMEM_SIZE_values[RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX] = {0, 32, 48, 64, 256, };
+
+#define	RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX	(1)
+static IMG_UINT32 aui32_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values[RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX] = {4, };
+
+#define	RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX	(8)
+static IMG_UINT32 aui32_RGX_FEATURE_NUM_CLUSTERS_values[RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX] = {1, 2, 3, 4, 6, 8, 12, 16, };
+
+#define	RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX	(12)
+static IMG_UINT32 aui32_RGX_FEATURE_NUM_ISP_IPP_PIPES_values[RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX] = {1, 2, 3, 4, 6, 7, 8, 12, 14, 16, 24, 32, };
+
+#define	RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX	(2)
+static IMG_UINT32 aui32_RGX_FEATURE_NUM_RASTER_PIPES_values[RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX] = {1, 2, };
+
+#define	RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX	(4)
+static IMG_UINT32 aui32_RGX_FEATURE_PHYS_BUS_WIDTH_values[RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX] = {32, 36, 38, 40, };
+
+#define	RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX	(3)
+static IMG_UINT32 aui32_RGX_FEATURE_SCALABLE_TE_ARCH_values[RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX] = {1, 2, 4, };
+
+#define	RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX	(4)
+static IMG_UINT32 aui32_RGX_FEATURE_SCALABLE_VCE_values[RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX] = {1, 2, 3, 4, };
+
+#define	RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX	(2)
+static IMG_UINT32 aui32_RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_values[RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX] = {1, 2, };
+
+#define	RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX	(3)
+static IMG_UINT32 aui32_RGX_FEATURE_SLC_BANKS_values[RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX] = {1, 2, 4, };
+
+#define	RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX	(2)
+static IMG_UINT32 aui32_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values[RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX] = {512, 1024, };
+
+#define	RGX_FEATURE_SLC_SIZE_IN_BYTES_MAX_VALUE_IDX	(6)
+static IMG_UINT32 aui32_RGX_FEATURE_SLC_SIZE_IN_BYTES_values[RGX_FEATURE_SLC_SIZE_IN_BYTES_MAX_VALUE_IDX] = {0, 16, 32, 64, 128, 256, };
+
+#define	RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX	(3)
+static IMG_UINT32 aui32_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values[RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX] = {128, 256, 512, };
+
+#define	RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX	(1)
+static IMG_UINT32 aui32_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values[RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX] = {40, };
+
+
+IMG_UINT64    gaFeatures[][3]=
+{
+	{ 0x000100000002001e, 0x0000000200020415, 0x0000010003084040 },	/* 1.0.2.30 */
+	{ 0x0001000000040005, 0x0000000200020414, 0x000001000308c040 },	/* 1.0.4.5 */
+	{ 0x0001000000040006, 0x0000000200020414, 0x000001000308c040 },	/* 1.0.4.6 */
+	{ 0x000100000004000c, 0x0000000200020414, 0x000001400308c040 },	/* 1.0.4.12 */
+	{ 0x0001000000040013, 0x0000000200020415, 0x000001000308c040 },	/* 1.0.4.19 */
+	{ 0x0004000000020039, 0x000001160002065f, 0x000001000318442a },	/* 4.0.2.57 */
+	{ 0x000400000002003a, 0x000001160002065f, 0x000001000318442a },	/* 4.0.2.58 */
+	{ 0x0004000000040037, 0x000001160002045e, 0x000001000318c42a },	/* 4.0.4.55 */
+	{ 0x000400000006003e, 0x000001160002065f, 0x000001080319042a },	/* 4.0.6.62 */
+	{ 0x000500000001002e, 0x00000000000a0445, 0x0000004003000002 },	/* 5.0.1.46 */
+	{ 0x0006000000040023, 0x000001160006045f, 0x000001000318c42a },	/* 6.0.4.35 */
+	{ 0x0008000000020027, 0x0000007d40738e3f, 0x00000004030c4d32 },	/* 8.0.2.39 */
+	{ 0x000c000000010014, 0x0000000000080005, 0x0000000003000000 },	/* 12.0.1.20 */
+	{ 0x000f000000010040, 0x00000000000a0645, 0x000000c003040002 },	/* 15.0.1.64 */
+	{ 0x0016000000160017, 0x000000008b0a7605, 0x0000004000000000 },	/* 22.0.22.23 */
+	{ 0x0016000000160019, 0x000000008b0a7605, 0x0000004000040000 },	/* 22.0.22.25 */
+	{ 0x001600000016001d, 0x000000008b0a7605, 0x0000004000040000 },	/* 22.0.22.29 */
+	{ 0x0016000000360018, 0x000000000b0a7605, 0x000000c000080000 },	/* 22.0.54.24 */
+	{ 0x001600000036001e, 0x000000008b0a7605, 0x000000c0000c0000 },	/* 22.0.54.30 */
+};
+
+IMG_UINT64    gaErnsBrns[][2]=
+{
+	{ 0x0001002700040013, 0x0000000001000449 },	/* 1.39.4.19 */
+	{ 0x000100480004000c, 0x0000000001000401 },	/* 1.72.4.12 */
+	{ 0x0001004b0002001e, 0x0000000001000441 },	/* 1.75.2.30 */
+	{ 0x0001004c00040006, 0x0000000001000489 },	/* 1.76.4.6 */
+	{ 0x0001005200040005, 0x0000000001000401 },	/* 1.82.4.5 */
+	{ 0x0004001f00040037, 0x0000000405040121 },	/* 4.31.4.55 */
+	{ 0x0004002900020039, 0x0000000405040121 },	/* 4.41.2.57 */
+	{ 0x0004002b0006003e, 0x0000000405040121 },	/* 4.43.6.62 */
+	{ 0x0004002d0002003a, 0x0000000405000121 },	/* 4.45.2.58 */
+	{ 0x0004002e0006003e, 0x0000000404040121 },	/* 4.46.6.62 */
+	{ 0x000500090001002e, 0x0000000000000a09 },	/* 5.9.1.46 */
+	{ 0x0006002200040023, 0x0000000405000121 },	/* 6.34.4.35 */
+	{ 0x0008003000020027, 0x000000010040e121 },	/* 8.48.2.39 */
+	{ 0x000c000500010014, 0x0000000800000a01 },	/* 12.5.1.20 */
+	{ 0x000f000500010040, 0x0000000000000a01 },	/* 15.5.1.64 */
+	{ 0x0016001a00360018, 0x0000000ab0000b01 },	/* 22.26.54.24 */
+	{ 0x0016001c00160017, 0x0000000ab0000b01 },	/* 22.28.22.23 */
+	{ 0x001600280036001e, 0x0000000ab0000b01 },	/* 22.40.54.30 */
+	{ 0x0016002c00160019, 0x0000000ab0000b01 },	/* 22.44.22.25 */
+	{ 0x001600300036001e, 0x0000000ab0000b01 },	/* 22.48.54.30 */
+	{ 0x001600320016001d, 0x0000000ab0000b01 },	/* 22.50.22.29 */
+	{ 0x0016003a00160019, 0x0000000a50000b01 },	/* 22.58.22.25 */
+	{ 0x0016003b0036001e, 0x0000000a50000b01 },	/* 22.59.54.30 */
+	{ 0x0016003c0016001d, 0x0000000a50000b01 },	/* 22.60.22.29 */
+	{ 0x001600430036001e, 0x0000000a70000b01 },	/* 22.67.54.30 */
+	{ 0x001600440036001e, 0x0000000a50000b01 },	/* 22.68.54.30 */
+	{ 0x0016004500160019, 0x0000000a50000b01 },	/* 22.69.22.25 */
+	{ 0x0016004b00160019, 0x0000000a50000b01 },	/* 22.75.22.25 */
+};
+
+
+#endif /*_RGX_BVNC_TABLE_KM_H_ */
+
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/rgx_cr_defs_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/rgx_cr_defs_km.h
new file mode 100644
index 0000000..03059f1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/rgx_cr_defs_km.h
@@ -0,0 +1,5498 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file rgx_cr_defs_km.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*               ****   Autogenerated C -- do not edit    ****               */
+
+/*
+ */
+
+#if !defined(__IMG_EXPLICIT_INCLUDE_HWDEFS)
+#error This file may only be included if explicitly defined
+#endif
+
+#ifndef _RGX_CR_DEFS_KM_H_
+#define _RGX_CR_DEFS_KM_H_
+
+#include "img_types.h"
+
+
+#define RGX_CR_DEFS_KM_REVISION 1
+
+/*
+    Register RGX_CR_RASTERISATION_INDIRECT
+*/
+#define RGX_CR_RASTERISATION_INDIRECT                     (0x8238U)
+#define RGX_CR_RASTERISATION_INDIRECT_MASKFULL            (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_RASTERISATION_INDIRECT_ADDRESS_SHIFT       (0U)
+#define RGX_CR_RASTERISATION_INDIRECT_ADDRESS_CLRMSK      (0XFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_PBE_INDIRECT
+*/
+#define RGX_CR_PBE_INDIRECT                               (0x83E0U)
+#define RGX_CR_PBE_INDIRECT_MASKFULL                      (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_PBE_INDIRECT_ADDRESS_SHIFT                 (0U)
+#define RGX_CR_PBE_INDIRECT_ADDRESS_CLRMSK                (0XFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_PBE_PERF_INDIRECT
+*/
+#define RGX_CR_PBE_PERF_INDIRECT                          (0x83D8U)
+#define RGX_CR_PBE_PERF_INDIRECT_MASKFULL                 (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_SHIFT            (0U)
+#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_CLRMSK           (0XFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_TPU_PERF_INDIRECT
+*/
+#define RGX_CR_TPU_PERF_INDIRECT                          (0x83F0U)
+#define RGX_CR_TPU_PERF_INDIRECT_MASKFULL                 (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_SHIFT            (0U)
+#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_CLRMSK           (0XFFFFFFF8U)
+
+
+/*
+    Register RGX_CR_RASTERISATION_PERF_INDIRECT
+*/
+#define RGX_CR_RASTERISATION_PERF_INDIRECT                (0x8318U)
+#define RGX_CR_RASTERISATION_PERF_INDIRECT_MASKFULL       (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_SHIFT  (0U)
+#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_TPU_MCU_L0_PERF_INDIRECT
+*/
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT                   (0x8028U)
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_MASKFULL          (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_SHIFT     (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_CLRMSK    (0XFFFFFFF8U)
+
+
+/*
+    Register RGX_CR_USC_PERF_INDIRECT
+*/
+#define RGX_CR_USC_PERF_INDIRECT                          (0x8030U)
+#define RGX_CR_USC_PERF_INDIRECT_MASKFULL                 (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_SHIFT            (0U)
+#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_CLRMSK           (0XFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_BLACKPEARL_INDIRECT
+*/
+#define RGX_CR_BLACKPEARL_INDIRECT                        (0x8388U)
+#define RGX_CR_BLACKPEARL_INDIRECT_MASKFULL               (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_SHIFT          (0U)
+#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_CLRMSK         (0XFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_BLACKPEARL_PERF_INDIRECT
+*/
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT                   (0x83F8U)
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT_MASKFULL          (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_SHIFT     (0U)
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_CLRMSK    (0XFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_TEXAS3_PERF_INDIRECT
+*/
+#define RGX_CR_TEXAS3_PERF_INDIRECT                       (0x83D0U)
+#define RGX_CR_TEXAS3_PERF_INDIRECT_MASKFULL              (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_SHIFT         (0U)
+#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_CLRMSK        (0XFFFFFFF8U)
+
+
+/*
+    Register RGX_CR_TEXAS_PERF_INDIRECT
+*/
+#define RGX_CR_TEXAS_PERF_INDIRECT                        (0x8288U)
+#define RGX_CR_TEXAS_PERF_INDIRECT_MASKFULL               (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_SHIFT          (0U)
+#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_CLRMSK         (0XFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_BX_TU_PERF_INDIRECT
+*/
+#define RGX_CR_BX_TU_PERF_INDIRECT                        (0xC900U)
+#define RGX_CR_BX_TU_PERF_INDIRECT_MASKFULL               (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_SHIFT          (0U)
+#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_CLRMSK         (0XFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_CLK_CTRL
+*/
+#define RGX_CR_CLK_CTRL                                   (0x0000U)
+#define RGX_CR_CLK_CTRL__PBE2_XE__MASKFULL                (IMG_UINT64_C(0xFFFFFF003F3FFFFF))
+#define RGX_CR_CLK_CTRL__S7_TOP__MASKFULL                 (IMG_UINT64_C(0xCFCF03000F3F3F0F))
+#define RGX_CR_CLK_CTRL_MASKFULL                          (IMG_UINT64_C(0xFFFFFF003F3FFFFF))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_SHIFT                   (62U)
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_CLRMSK                  (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_ON                      (IMG_UINT64_C(0x4000000000000000))  
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_AUTO                    (IMG_UINT64_C(0x8000000000000000))  
+#define RGX_CR_CLK_CTRL_IPP_SHIFT                         (60U)
+#define RGX_CR_CLK_CTRL_IPP_CLRMSK                        (IMG_UINT64_C(0XCFFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_IPP_OFF                           (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_IPP_ON                            (IMG_UINT64_C(0x1000000000000000))  
+#define RGX_CR_CLK_CTRL_IPP_AUTO                          (IMG_UINT64_C(0x2000000000000000))  
+#define RGX_CR_CLK_CTRL_FBC_SHIFT                         (58U)
+#define RGX_CR_CLK_CTRL_FBC_CLRMSK                        (IMG_UINT64_C(0XF3FFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FBC_OFF                           (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_FBC_ON                            (IMG_UINT64_C(0x0400000000000000))  
+#define RGX_CR_CLK_CTRL_FBC_AUTO                          (IMG_UINT64_C(0x0800000000000000))  
+#define RGX_CR_CLK_CTRL_FBDC_SHIFT                        (56U)
+#define RGX_CR_CLK_CTRL_FBDC_CLRMSK                       (IMG_UINT64_C(0XFCFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FBDC_OFF                          (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_FBDC_ON                           (IMG_UINT64_C(0x0100000000000000))  
+#define RGX_CR_CLK_CTRL_FBDC_AUTO                         (IMG_UINT64_C(0x0200000000000000))  
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_SHIFT                  (54U)
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_CLRMSK                 (IMG_UINT64_C(0XFF3FFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_ON                     (IMG_UINT64_C(0x0040000000000000))  
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_AUTO                   (IMG_UINT64_C(0x0080000000000000))  
+#define RGX_CR_CLK_CTRL_USCS_SHIFT                        (52U)
+#define RGX_CR_CLK_CTRL_USCS_CLRMSK                       (IMG_UINT64_C(0XFFCFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_USCS_OFF                          (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_USCS_ON                           (IMG_UINT64_C(0x0010000000000000))  
+#define RGX_CR_CLK_CTRL_USCS_AUTO                         (IMG_UINT64_C(0x0020000000000000))  
+#define RGX_CR_CLK_CTRL_PBE_SHIFT                         (50U)
+#define RGX_CR_CLK_CTRL_PBE_CLRMSK                        (IMG_UINT64_C(0XFFF3FFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_PBE_OFF                           (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_PBE_ON                            (IMG_UINT64_C(0x0004000000000000))  
+#define RGX_CR_CLK_CTRL_PBE_AUTO                          (IMG_UINT64_C(0x0008000000000000))  
+#define RGX_CR_CLK_CTRL_MCU_L1_SHIFT                      (48U)
+#define RGX_CR_CLK_CTRL_MCU_L1_CLRMSK                     (IMG_UINT64_C(0XFFFCFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_MCU_L1_OFF                        (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_MCU_L1_ON                         (IMG_UINT64_C(0x0001000000000000))  
+#define RGX_CR_CLK_CTRL_MCU_L1_AUTO                       (IMG_UINT64_C(0x0002000000000000))  
+#define RGX_CR_CLK_CTRL_CDM_SHIFT                         (46U)
+#define RGX_CR_CLK_CTRL_CDM_CLRMSK                        (IMG_UINT64_C(0XFFFF3FFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_CDM_OFF                           (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_CDM_ON                            (IMG_UINT64_C(0x0000400000000000))  
+#define RGX_CR_CLK_CTRL_CDM_AUTO                          (IMG_UINT64_C(0x0000800000000000))  
+#define RGX_CR_CLK_CTRL_SIDEKICK_SHIFT                    (44U)
+#define RGX_CR_CLK_CTRL_SIDEKICK_CLRMSK                   (IMG_UINT64_C(0XFFFFCFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_SIDEKICK_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_SIDEKICK_ON                       (IMG_UINT64_C(0x0000100000000000))  
+#define RGX_CR_CLK_CTRL_SIDEKICK_AUTO                     (IMG_UINT64_C(0x0000200000000000))  
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_SHIFT                (42U)
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_CLRMSK               (IMG_UINT64_C(0XFFFFF3FFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_OFF                  (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_ON                   (IMG_UINT64_C(0x0000040000000000))  
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_AUTO                 (IMG_UINT64_C(0x0000080000000000))  
+#define RGX_CR_CLK_CTRL_BIF_SHIFT                         (40U)
+#define RGX_CR_CLK_CTRL_BIF_CLRMSK                        (IMG_UINT64_C(0XFFFFFCFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_OFF                           (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_BIF_ON                            (IMG_UINT64_C(0x0000010000000000))  
+#define RGX_CR_CLK_CTRL_BIF_AUTO                          (IMG_UINT64_C(0x0000020000000000))  
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_SHIFT               (28U)
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFCFFFFFFF))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_OFF                 (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_ON                  (IMG_UINT64_C(0x0000000010000000))  
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_AUTO                (IMG_UINT64_C(0x0000000020000000))  
+#define RGX_CR_CLK_CTRL_MCU_L0_SHIFT                      (26U)
+#define RGX_CR_CLK_CTRL_MCU_L0_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFF3FFFFFF))
+#define RGX_CR_CLK_CTRL_MCU_L0_OFF                        (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_MCU_L0_ON                         (IMG_UINT64_C(0x0000000004000000))  
+#define RGX_CR_CLK_CTRL_MCU_L0_AUTO                       (IMG_UINT64_C(0x0000000008000000))  
+#define RGX_CR_CLK_CTRL_TPU_SHIFT                         (24U)
+#define RGX_CR_CLK_CTRL_TPU_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define RGX_CR_CLK_CTRL_TPU_OFF                           (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_TPU_ON                            (IMG_UINT64_C(0x0000000001000000))  
+#define RGX_CR_CLK_CTRL_TPU_AUTO                          (IMG_UINT64_C(0x0000000002000000))  
+#define RGX_CR_CLK_CTRL_USC_SHIFT                         (20U)
+#define RGX_CR_CLK_CTRL_USC_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFCFFFFF))
+#define RGX_CR_CLK_CTRL_USC_OFF                           (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_USC_ON                            (IMG_UINT64_C(0x0000000000100000))  
+#define RGX_CR_CLK_CTRL_USC_AUTO                          (IMG_UINT64_C(0x0000000000200000))  
+#define RGX_CR_CLK_CTRL_TLA_SHIFT                         (18U)
+#define RGX_CR_CLK_CTRL_TLA_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFF3FFFF))
+#define RGX_CR_CLK_CTRL_TLA_OFF                           (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_TLA_ON                            (IMG_UINT64_C(0x0000000000040000))  
+#define RGX_CR_CLK_CTRL_TLA_AUTO                          (IMG_UINT64_C(0x0000000000080000))  
+#define RGX_CR_CLK_CTRL_SLC_SHIFT                         (16U)
+#define RGX_CR_CLK_CTRL_SLC_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define RGX_CR_CLK_CTRL_SLC_OFF                           (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_SLC_ON                            (IMG_UINT64_C(0x0000000000010000))  
+#define RGX_CR_CLK_CTRL_SLC_AUTO                          (IMG_UINT64_C(0x0000000000020000))  
+#define RGX_CR_CLK_CTRL_UVS_SHIFT                         (14U)
+#define RGX_CR_CLK_CTRL_UVS_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFF3FFF))
+#define RGX_CR_CLK_CTRL_UVS_OFF                           (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_UVS_ON                            (IMG_UINT64_C(0x0000000000004000))  
+#define RGX_CR_CLK_CTRL_UVS_AUTO                          (IMG_UINT64_C(0x0000000000008000))  
+#define RGX_CR_CLK_CTRL_PDS_SHIFT                         (12U)
+#define RGX_CR_CLK_CTRL_PDS_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFFCFFF))
+#define RGX_CR_CLK_CTRL_PDS_OFF                           (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_PDS_ON                            (IMG_UINT64_C(0x0000000000001000))  
+#define RGX_CR_CLK_CTRL_PDS_AUTO                          (IMG_UINT64_C(0x0000000000002000))  
+#define RGX_CR_CLK_CTRL_VDM_SHIFT                         (10U)
+#define RGX_CR_CLK_CTRL_VDM_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define RGX_CR_CLK_CTRL_VDM_OFF                           (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_VDM_ON                            (IMG_UINT64_C(0x0000000000000400))  
+#define RGX_CR_CLK_CTRL_VDM_AUTO                          (IMG_UINT64_C(0x0000000000000800))  
+#define RGX_CR_CLK_CTRL_PM_SHIFT                          (8U)
+#define RGX_CR_CLK_CTRL_PM_CLRMSK                         (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define RGX_CR_CLK_CTRL_PM_OFF                            (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_PM_ON                             (IMG_UINT64_C(0x0000000000000100))  
+#define RGX_CR_CLK_CTRL_PM_AUTO                           (IMG_UINT64_C(0x0000000000000200))  
+#define RGX_CR_CLK_CTRL_GPP_SHIFT                         (6U)
+#define RGX_CR_CLK_CTRL_GPP_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFFFF3F))
+#define RGX_CR_CLK_CTRL_GPP_OFF                           (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_GPP_ON                            (IMG_UINT64_C(0x0000000000000040))  
+#define RGX_CR_CLK_CTRL_GPP_AUTO                          (IMG_UINT64_C(0x0000000000000080))  
+#define RGX_CR_CLK_CTRL_TE_SHIFT                          (4U)
+#define RGX_CR_CLK_CTRL_TE_CLRMSK                         (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define RGX_CR_CLK_CTRL_TE_OFF                            (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_TE_ON                             (IMG_UINT64_C(0x0000000000000010))  
+#define RGX_CR_CLK_CTRL_TE_AUTO                           (IMG_UINT64_C(0x0000000000000020))  
+#define RGX_CR_CLK_CTRL_TSP_SHIFT                         (2U)
+#define RGX_CR_CLK_CTRL_TSP_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFFFFF3))
+#define RGX_CR_CLK_CTRL_TSP_OFF                           (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_TSP_ON                            (IMG_UINT64_C(0x0000000000000004))  
+#define RGX_CR_CLK_CTRL_TSP_AUTO                          (IMG_UINT64_C(0x0000000000000008))  
+#define RGX_CR_CLK_CTRL_ISP_SHIFT                         (0U)
+#define RGX_CR_CLK_CTRL_ISP_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define RGX_CR_CLK_CTRL_ISP_OFF                           (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL_ISP_ON                            (IMG_UINT64_C(0x0000000000000001))  
+#define RGX_CR_CLK_CTRL_ISP_AUTO                          (IMG_UINT64_C(0x0000000000000002))  
+
+
+/*
+    Register RGX_CR_CLK_STATUS
+*/
+#define RGX_CR_CLK_STATUS                                 (0x0008U)
+#define RGX_CR_CLK_STATUS__PBE2_XE__MASKFULL              (IMG_UINT64_C(0x00000001FFF077FF))
+#define RGX_CR_CLK_STATUS__S7_TOP__MASKFULL               (IMG_UINT64_C(0x00000001B3101773))
+#define RGX_CR_CLK_STATUS_MASKFULL                        (IMG_UINT64_C(0x00000001FFF077FF))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_SHIFT                  (32U)
+#define RGX_CR_CLK_STATUS_MCU_FBTC_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_MCU_FBTC_RUNNING                (IMG_UINT64_C(0x0000000100000000))  
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_SHIFT                 (31U)
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_CLRMSK                (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_RUNNING               (IMG_UINT64_C(0x0000000080000000))  
+#define RGX_CR_CLK_STATUS_IPP_SHIFT                       (30U)
+#define RGX_CR_CLK_STATUS_IPP_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define RGX_CR_CLK_STATUS_IPP_GATED                       (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_IPP_RUNNING                     (IMG_UINT64_C(0x0000000040000000))  
+#define RGX_CR_CLK_STATUS_FBC_SHIFT                       (29U)
+#define RGX_CR_CLK_STATUS_FBC_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define RGX_CR_CLK_STATUS_FBC_GATED                       (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_FBC_RUNNING                     (IMG_UINT64_C(0x0000000020000000))  
+#define RGX_CR_CLK_STATUS_FBDC_SHIFT                      (28U)
+#define RGX_CR_CLK_STATUS_FBDC_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define RGX_CR_CLK_STATUS_FBDC_GATED                      (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_FBDC_RUNNING                    (IMG_UINT64_C(0x0000000010000000))  
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_SHIFT                (27U)
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_GATED                (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_RUNNING              (IMG_UINT64_C(0x0000000008000000))  
+#define RGX_CR_CLK_STATUS_USCS_SHIFT                      (26U)
+#define RGX_CR_CLK_STATUS_USCS_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define RGX_CR_CLK_STATUS_USCS_GATED                      (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_USCS_RUNNING                    (IMG_UINT64_C(0x0000000004000000))  
+#define RGX_CR_CLK_STATUS_PBE_SHIFT                       (25U)
+#define RGX_CR_CLK_STATUS_PBE_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define RGX_CR_CLK_STATUS_PBE_GATED                       (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_PBE_RUNNING                     (IMG_UINT64_C(0x0000000002000000))  
+#define RGX_CR_CLK_STATUS_MCU_L1_SHIFT                    (24U)
+#define RGX_CR_CLK_STATUS_MCU_L1_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define RGX_CR_CLK_STATUS_MCU_L1_GATED                    (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_MCU_L1_RUNNING                  (IMG_UINT64_C(0x0000000001000000))  
+#define RGX_CR_CLK_STATUS_CDM_SHIFT                       (23U)
+#define RGX_CR_CLK_STATUS_CDM_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define RGX_CR_CLK_STATUS_CDM_GATED                       (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_CDM_RUNNING                     (IMG_UINT64_C(0x0000000000800000))  
+#define RGX_CR_CLK_STATUS_SIDEKICK_SHIFT                  (22U)
+#define RGX_CR_CLK_STATUS_SIDEKICK_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define RGX_CR_CLK_STATUS_SIDEKICK_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_SIDEKICK_RUNNING                (IMG_UINT64_C(0x0000000000400000))  
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_SHIFT              (21U)
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_GATED              (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_RUNNING            (IMG_UINT64_C(0x0000000000200000))  
+#define RGX_CR_CLK_STATUS_BIF_SHIFT                       (20U)
+#define RGX_CR_CLK_STATUS_BIF_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_GATED                       (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_BIF_RUNNING                     (IMG_UINT64_C(0x0000000000100000))  
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_SHIFT             (14U)
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_GATED             (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_RUNNING           (IMG_UINT64_C(0x0000000000004000))  
+#define RGX_CR_CLK_STATUS_MCU_L0_SHIFT                    (13U)
+#define RGX_CR_CLK_STATUS_MCU_L0_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define RGX_CR_CLK_STATUS_MCU_L0_GATED                    (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_MCU_L0_RUNNING                  (IMG_UINT64_C(0x0000000000002000))  
+#define RGX_CR_CLK_STATUS_TPU_SHIFT                       (12U)
+#define RGX_CR_CLK_STATUS_TPU_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define RGX_CR_CLK_STATUS_TPU_GATED                       (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_TPU_RUNNING                     (IMG_UINT64_C(0x0000000000001000))  
+#define RGX_CR_CLK_STATUS_USC_SHIFT                       (10U)
+#define RGX_CR_CLK_STATUS_USC_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_CLK_STATUS_USC_GATED                       (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_USC_RUNNING                     (IMG_UINT64_C(0x0000000000000400))  
+#define RGX_CR_CLK_STATUS_TLA_SHIFT                       (9U)
+#define RGX_CR_CLK_STATUS_TLA_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define RGX_CR_CLK_STATUS_TLA_GATED                       (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_TLA_RUNNING                     (IMG_UINT64_C(0x0000000000000200))  
+#define RGX_CR_CLK_STATUS_SLC_SHIFT                       (8U)
+#define RGX_CR_CLK_STATUS_SLC_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_CLK_STATUS_SLC_GATED                       (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_SLC_RUNNING                     (IMG_UINT64_C(0x0000000000000100))  
+#define RGX_CR_CLK_STATUS_UVS_SHIFT                       (7U)
+#define RGX_CR_CLK_STATUS_UVS_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define RGX_CR_CLK_STATUS_UVS_GATED                       (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_UVS_RUNNING                     (IMG_UINT64_C(0x0000000000000080))  
+#define RGX_CR_CLK_STATUS_PDS_SHIFT                       (6U)
+#define RGX_CR_CLK_STATUS_PDS_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define RGX_CR_CLK_STATUS_PDS_GATED                       (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_PDS_RUNNING                     (IMG_UINT64_C(0x0000000000000040))  
+#define RGX_CR_CLK_STATUS_VDM_SHIFT                       (5U)
+#define RGX_CR_CLK_STATUS_VDM_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_CLK_STATUS_VDM_GATED                       (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_VDM_RUNNING                     (IMG_UINT64_C(0x0000000000000020))  
+#define RGX_CR_CLK_STATUS_PM_SHIFT                        (4U)
+#define RGX_CR_CLK_STATUS_PM_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_STATUS_PM_GATED                        (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_PM_RUNNING                      (IMG_UINT64_C(0x0000000000000010))  
+#define RGX_CR_CLK_STATUS_GPP_SHIFT                       (3U)
+#define RGX_CR_CLK_STATUS_GPP_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_CLK_STATUS_GPP_GATED                       (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_GPP_RUNNING                     (IMG_UINT64_C(0x0000000000000008))  
+#define RGX_CR_CLK_STATUS_TE_SHIFT                        (2U)
+#define RGX_CR_CLK_STATUS_TE_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_CLK_STATUS_TE_GATED                        (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_TE_RUNNING                      (IMG_UINT64_C(0x0000000000000004))  
+#define RGX_CR_CLK_STATUS_TSP_SHIFT                       (1U)
+#define RGX_CR_CLK_STATUS_TSP_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_CLK_STATUS_TSP_GATED                       (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_TSP_RUNNING                     (IMG_UINT64_C(0x0000000000000002))  
+#define RGX_CR_CLK_STATUS_ISP_SHIFT                       (0U)
+#define RGX_CR_CLK_STATUS_ISP_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_STATUS_ISP_GATED                       (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS_ISP_RUNNING                     (IMG_UINT64_C(0x0000000000000001))  
+
+
+/*
+    Register RGX_CR_CORE_ID
+*/
+#define RGX_CR_CORE_ID__PBVNC                             (0x0020U)
+#define RGX_CR_CORE_ID__PBVNC__MASKFULL                   (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT            (48U)
+#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK           (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT           (32U)
+#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK          (IMG_UINT64_C(0XFFFF0000FFFFFFFF))
+#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT (16U)
+#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT            (0U)
+#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_CORE_ID
+*/
+#define RGX_CR_CORE_ID                                    (0x0018U)
+#define RGX_CR_CORE_ID_MASKFULL                           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_CORE_ID_ID_SHIFT                           (16U)
+#define RGX_CR_CORE_ID_ID_CLRMSK                          (0X0000FFFFU)
+#define RGX_CR_CORE_ID_CONFIG_SHIFT                       (0U)
+#define RGX_CR_CORE_ID_CONFIG_CLRMSK                      (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_CORE_REVISION
+*/
+#define RGX_CR_CORE_REVISION                              (0x0020U)
+#define RGX_CR_CORE_REVISION_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_CORE_REVISION_DESIGNER_SHIFT               (24U)
+#define RGX_CR_CORE_REVISION_DESIGNER_CLRMSK              (0X00FFFFFFU)
+#define RGX_CR_CORE_REVISION_MAJOR_SHIFT                  (16U)
+#define RGX_CR_CORE_REVISION_MAJOR_CLRMSK                 (0XFF00FFFFU)
+#define RGX_CR_CORE_REVISION_MINOR_SHIFT                  (8U)
+#define RGX_CR_CORE_REVISION_MINOR_CLRMSK                 (0XFFFF00FFU)
+#define RGX_CR_CORE_REVISION_MAINTENANCE_SHIFT            (0U)
+#define RGX_CR_CORE_REVISION_MAINTENANCE_CLRMSK           (0XFFFFFF00U)
+
+
+/*
+    Register RGX_CR_DESIGNER_REV_FIELD1
+*/
+#define RGX_CR_DESIGNER_REV_FIELD1                        (0x0028U)
+#define RGX_CR_DESIGNER_REV_FIELD1_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT (0U)
+#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_CLRMSK (00000000U)
+
+
+/*
+    Register RGX_CR_DESIGNER_REV_FIELD2
+*/
+#define RGX_CR_DESIGNER_REV_FIELD2                        (0x0030U)
+#define RGX_CR_DESIGNER_REV_FIELD2_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT (0U)
+#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_CLRMSK (00000000U)
+
+
+/*
+    Register RGX_CR_CHANGESET_NUMBER
+*/
+#define RGX_CR_CHANGESET_NUMBER                           (0x0040U)
+#define RGX_CR_CHANGESET_NUMBER_MASKFULL                  (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_SHIFT    (0U)
+#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_CLRMSK   (IMG_UINT64_C(0000000000000000))
+
+
+/*
+    Register RGX_CR_CLK_XTPLUS_CTRL
+*/
+#define RGX_CR_CLK_XTPLUS_CTRL                            (0x0080U)
+#define RGX_CR_CLK_XTPLUS_CTRL_MASKFULL                   (IMG_UINT64_C(0x0000003FFFFF0000))
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_SHIFT                  (36U)
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_CLRMSK                 (IMG_UINT64_C(0XFFFFFFCFFFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_ON                     (IMG_UINT64_C(0x0000001000000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_AUTO                   (IMG_UINT64_C(0x0000002000000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_SHIFT                 (34U)
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_CLRMSK                (IMG_UINT64_C(0XFFFFFFF3FFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_OFF                   (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_ON                    (IMG_UINT64_C(0x0000000400000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_AUTO                  (IMG_UINT64_C(0x0000000800000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_SHIFT                  (32U)
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFCFFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_ON                     (IMG_UINT64_C(0x0000000100000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_AUTO                   (IMG_UINT64_C(0x0000000200000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_SHIFT              (30U)
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_CLRMSK             (IMG_UINT64_C(0XFFFFFFFF3FFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_OFF                (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_ON                 (IMG_UINT64_C(0x0000000040000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_AUTO               (IMG_UINT64_C(0x0000000080000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_SHIFT                (28U)
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFCFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_OFF                  (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_ON                   (IMG_UINT64_C(0x0000000010000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_AUTO                 (IMG_UINT64_C(0x0000000020000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_SHIFT               (26U)
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFF3FFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_OFF                 (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_ON                  (IMG_UINT64_C(0x0000000004000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_AUTO                (IMG_UINT64_C(0x0000000008000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_SHIFT                (24U)
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_OFF                  (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_ON                   (IMG_UINT64_C(0x0000000001000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_AUTO                 (IMG_UINT64_C(0x0000000002000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_SHIFT           (22U)
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFF3FFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_OFF             (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_ON              (IMG_UINT64_C(0x0000000000400000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_AUTO            (IMG_UINT64_C(0x0000000000800000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_SHIFT       (20U)
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFCFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_OFF         (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_ON          (IMG_UINT64_C(0x0000000000100000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_AUTO        (IMG_UINT64_C(0x0000000000200000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_SHIFT           (18U)
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFF3FFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_OFF             (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_ON              (IMG_UINT64_C(0x0000000000040000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_AUTO            (IMG_UINT64_C(0x0000000000080000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_SHIFT             (16U)
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_OFF               (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_ON                (IMG_UINT64_C(0x0000000000010000))  
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_AUTO              (IMG_UINT64_C(0x0000000000020000))  
+
+
+/*
+    Register RGX_CR_CLK_XTPLUS_STATUS
+*/
+#define RGX_CR_CLK_XTPLUS_STATUS                          (0x0088U)
+#define RGX_CR_CLK_XTPLUS_STATUS_MASKFULL                 (IMG_UINT64_C(0x00000000000007FF))
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_SHIFT                (10U)
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_GATED                (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_RUNNING              (IMG_UINT64_C(0x0000000000000400))  
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_SHIFT                (9U)
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_GATED                (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_RUNNING              (IMG_UINT64_C(0x0000000000000200))  
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_SHIFT            (8U)
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_GATED            (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_RUNNING          (IMG_UINT64_C(0x0000000000000100))  
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_SHIFT               (7U)
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_GATED               (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_RUNNING             (IMG_UINT64_C(0x0000000000000080))  
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_SHIFT              (6U)
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_GATED              (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_RUNNING            (IMG_UINT64_C(0x0000000000000040))  
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_SHIFT             (5U)
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_GATED             (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_RUNNING           (IMG_UINT64_C(0x0000000000000020))  
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_SHIFT              (4U)
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_GATED              (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_RUNNING            (IMG_UINT64_C(0x0000000000000010))  
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_SHIFT         (3U)
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_GATED         (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_RUNNING       (IMG_UINT64_C(0x0000000000000008))  
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_SHIFT     (2U)
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_GATED     (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_RUNNING   (IMG_UINT64_C(0x0000000000000004))  
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_SHIFT         (1U)
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_GATED         (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_RUNNING       (IMG_UINT64_C(0x0000000000000002))  
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_SHIFT           (0U)
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_GATED           (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_RUNNING         (IMG_UINT64_C(0x0000000000000001))  
+
+
+/*
+    Register RGX_CR_SOFT_RESET
+*/
+#define RGX_CR_SOFT_RESET                                 (0x0100U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL              (IMG_UINT64_C(0xFFEFFFFFFFFFFC1D))
+#define RGX_CR_SOFT_RESET_MASKFULL                        (IMG_UINT64_C(0x00E7FFFFFFFFFC1D))
+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM3_CORE_SHIFT   (63U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM3_CORE_CLRMSK  (IMG_UINT64_C(0X7FFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM3_CORE_EN      (IMG_UINT64_C(0X8000000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM2_CORE_SHIFT   (62U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM2_CORE_CLRMSK  (IMG_UINT64_C(0XBFFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM2_CORE_EN      (IMG_UINT64_C(0X4000000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__BERNADO2_CORE_SHIFT   (61U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__BERNADO2_CORE_CLRMSK  (IMG_UINT64_C(0XDFFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__BERNADO2_CORE_EN      (IMG_UINT64_C(0X2000000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__JONES_CORE_SHIFT      (60U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__JONES_CORE_CLRMSK     (IMG_UINT64_C(0XEFFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__JONES_CORE_EN         (IMG_UINT64_C(0X1000000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__TILING_CORE_SHIFT     (59U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__TILING_CORE_CLRMSK    (IMG_UINT64_C(0XF7FFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__TILING_CORE_EN        (IMG_UINT64_C(0X0800000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__TE3_SHIFT             (58U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__TE3_CLRMSK            (IMG_UINT64_C(0XFBFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__TE3_EN                (IMG_UINT64_C(0X0400000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__VCE_SHIFT             (57U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__VCE_CLRMSK            (IMG_UINT64_C(0XFDFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__VCE_EN                (IMG_UINT64_C(0X0200000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__VBS_SHIFT             (56U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__VBS_CLRMSK            (IMG_UINT64_C(0XFEFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__VBS_EN                (IMG_UINT64_C(0X0100000000000000))
+#define RGX_CR_SOFT_RESET_DPX1_CORE_SHIFT                 (55U)
+#define RGX_CR_SOFT_RESET_DPX1_CORE_CLRMSK                (IMG_UINT64_C(0XFF7FFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DPX1_CORE_EN                    (IMG_UINT64_C(0X0080000000000000))
+#define RGX_CR_SOFT_RESET_DPX0_CORE_SHIFT                 (54U)
+#define RGX_CR_SOFT_RESET_DPX0_CORE_CLRMSK                (IMG_UINT64_C(0XFFBFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DPX0_CORE_EN                    (IMG_UINT64_C(0X0040000000000000))
+#define RGX_CR_SOFT_RESET_FBA_SHIFT                       (53U)
+#define RGX_CR_SOFT_RESET_FBA_CLRMSK                      (IMG_UINT64_C(0XFFDFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_FBA_EN                          (IMG_UINT64_C(0X0020000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__FB_CDC_SHIFT          (51U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__FB_CDC_CLRMSK         (IMG_UINT64_C(0XFFF7FFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__FB_CDC_EN             (IMG_UINT64_C(0X0008000000000000))
+#define RGX_CR_SOFT_RESET_SH_SHIFT                        (50U)
+#define RGX_CR_SOFT_RESET_SH_CLRMSK                       (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_SH_EN                           (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_SOFT_RESET_VRDM_SHIFT                      (49U)
+#define RGX_CR_SOFT_RESET_VRDM_CLRMSK                     (IMG_UINT64_C(0XFFFDFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_VRDM_EN                         (IMG_UINT64_C(0X0002000000000000))
+#define RGX_CR_SOFT_RESET_MCU_FBTC_SHIFT                  (48U)
+#define RGX_CR_SOFT_RESET_MCU_FBTC_CLRMSK                 (IMG_UINT64_C(0XFFFEFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_MCU_FBTC_EN                     (IMG_UINT64_C(0X0001000000000000))
+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_SHIFT             (47U)
+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_CLRMSK            (IMG_UINT64_C(0XFFFF7FFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_EN                (IMG_UINT64_C(0X0000800000000000))
+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_SHIFT             (46U)
+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_CLRMSK            (IMG_UINT64_C(0XFFFFBFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_EN                (IMG_UINT64_C(0X0000400000000000))
+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_SHIFT             (45U)
+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_CLRMSK            (IMG_UINT64_C(0XFFFFDFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_EN                (IMG_UINT64_C(0X0000200000000000))
+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_SHIFT             (44U)
+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_CLRMSK            (IMG_UINT64_C(0XFFFFEFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_EN                (IMG_UINT64_C(0X0000100000000000))
+#define RGX_CR_SOFT_RESET_IPP_SHIFT                       (43U)
+#define RGX_CR_SOFT_RESET_IPP_CLRMSK                      (IMG_UINT64_C(0XFFFFF7FFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_IPP_EN                          (IMG_UINT64_C(0X0000080000000000))
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_SHIFT                 (42U)
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_CLRMSK                (IMG_UINT64_C(0XFFFFFBFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_EN                    (IMG_UINT64_C(0X0000040000000000))
+#define RGX_CR_SOFT_RESET_TORNADO_CORE_SHIFT              (41U)
+#define RGX_CR_SOFT_RESET_TORNADO_CORE_CLRMSK             (IMG_UINT64_C(0XFFFFFDFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_TORNADO_CORE_EN                 (IMG_UINT64_C(0X0000020000000000))
+#define RGX_CR_SOFT_RESET_DUST_H_CORE_SHIFT               (40U)
+#define RGX_CR_SOFT_RESET_DUST_H_CORE_CLRMSK              (IMG_UINT64_C(0XFFFFFEFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_H_CORE_EN                  (IMG_UINT64_C(0X0000010000000000))
+#define RGX_CR_SOFT_RESET_DUST_G_CORE_SHIFT               (39U)
+#define RGX_CR_SOFT_RESET_DUST_G_CORE_CLRMSK              (IMG_UINT64_C(0XFFFFFF7FFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_G_CORE_EN                  (IMG_UINT64_C(0X0000008000000000))
+#define RGX_CR_SOFT_RESET_DUST_F_CORE_SHIFT               (38U)
+#define RGX_CR_SOFT_RESET_DUST_F_CORE_CLRMSK              (IMG_UINT64_C(0XFFFFFFBFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_F_CORE_EN                  (IMG_UINT64_C(0X0000004000000000))
+#define RGX_CR_SOFT_RESET_DUST_E_CORE_SHIFT               (37U)
+#define RGX_CR_SOFT_RESET_DUST_E_CORE_CLRMSK              (IMG_UINT64_C(0XFFFFFFDFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_E_CORE_EN                  (IMG_UINT64_C(0X0000002000000000))
+#define RGX_CR_SOFT_RESET_DUST_D_CORE_SHIFT               (36U)
+#define RGX_CR_SOFT_RESET_DUST_D_CORE_CLRMSK              (IMG_UINT64_C(0XFFFFFFEFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_D_CORE_EN                  (IMG_UINT64_C(0X0000001000000000))
+#define RGX_CR_SOFT_RESET_DUST_C_CORE_SHIFT               (35U)
+#define RGX_CR_SOFT_RESET_DUST_C_CORE_CLRMSK              (IMG_UINT64_C(0XFFFFFFF7FFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_C_CORE_EN                  (IMG_UINT64_C(0X0000000800000000))
+#define RGX_CR_SOFT_RESET_MMU_SHIFT                       (34U)
+#define RGX_CR_SOFT_RESET_MMU_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFBFFFFFFFF))
+#define RGX_CR_SOFT_RESET_MMU_EN                          (IMG_UINT64_C(0X0000000400000000))
+#define RGX_CR_SOFT_RESET_BIF1_SHIFT                      (33U)
+#define RGX_CR_SOFT_RESET_BIF1_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFDFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BIF1_EN                         (IMG_UINT64_C(0X0000000200000000))
+#define RGX_CR_SOFT_RESET_GARTEN_SHIFT                    (32U)
+#define RGX_CR_SOFT_RESET_GARTEN_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_SOFT_RESET_GARTEN_EN                       (IMG_UINT64_C(0X0000000100000000))
+#define RGX_CR_SOFT_RESET_RASCAL_CORE_SHIFT               (31U)
+#define RGX_CR_SOFT_RESET_RASCAL_CORE_CLRMSK              (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define RGX_CR_SOFT_RESET_RASCAL_CORE_EN                  (IMG_UINT64_C(0X0000000080000000))
+#define RGX_CR_SOFT_RESET_DUST_B_CORE_SHIFT               (30U)
+#define RGX_CR_SOFT_RESET_DUST_B_CORE_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_B_CORE_EN                  (IMG_UINT64_C(0X0000000040000000))
+#define RGX_CR_SOFT_RESET_DUST_A_CORE_SHIFT               (29U)
+#define RGX_CR_SOFT_RESET_DUST_A_CORE_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_A_CORE_EN                  (IMG_UINT64_C(0X0000000020000000))
+#define RGX_CR_SOFT_RESET_FB_TLCACHE_SHIFT                (28U)
+#define RGX_CR_SOFT_RESET_FB_TLCACHE_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define RGX_CR_SOFT_RESET_FB_TLCACHE_EN                   (IMG_UINT64_C(0X0000000010000000))
+#define RGX_CR_SOFT_RESET_SLC_SHIFT                       (27U)
+#define RGX_CR_SOFT_RESET_SLC_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define RGX_CR_SOFT_RESET_SLC_EN                          (IMG_UINT64_C(0X0000000008000000))
+#define RGX_CR_SOFT_RESET_TLA_SHIFT                       (26U)
+#define RGX_CR_SOFT_RESET_TLA_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define RGX_CR_SOFT_RESET_TLA_EN                          (IMG_UINT64_C(0X0000000004000000))
+#define RGX_CR_SOFT_RESET_UVS_SHIFT                       (25U)
+#define RGX_CR_SOFT_RESET_UVS_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define RGX_CR_SOFT_RESET_UVS_EN                          (IMG_UINT64_C(0X0000000002000000))
+#define RGX_CR_SOFT_RESET_TE_SHIFT                        (24U)
+#define RGX_CR_SOFT_RESET_TE_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define RGX_CR_SOFT_RESET_TE_EN                           (IMG_UINT64_C(0X0000000001000000))
+#define RGX_CR_SOFT_RESET_GPP_SHIFT                       (23U)
+#define RGX_CR_SOFT_RESET_GPP_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define RGX_CR_SOFT_RESET_GPP_EN                          (IMG_UINT64_C(0X0000000000800000))
+#define RGX_CR_SOFT_RESET_FBDC_SHIFT                      (22U)
+#define RGX_CR_SOFT_RESET_FBDC_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define RGX_CR_SOFT_RESET_FBDC_EN                         (IMG_UINT64_C(0X0000000000400000))
+#define RGX_CR_SOFT_RESET_FBC_SHIFT                       (21U)
+#define RGX_CR_SOFT_RESET_FBC_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_SOFT_RESET_FBC_EN                          (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_SOFT_RESET_PM_SHIFT                        (20U)
+#define RGX_CR_SOFT_RESET_PM_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define RGX_CR_SOFT_RESET_PM_EN                           (IMG_UINT64_C(0X0000000000100000))
+#define RGX_CR_SOFT_RESET_PBE_SHIFT                       (19U)
+#define RGX_CR_SOFT_RESET_PBE_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define RGX_CR_SOFT_RESET_PBE_EN                          (IMG_UINT64_C(0X0000000000080000))
+#define RGX_CR_SOFT_RESET_USC_SHARED_SHIFT                (18U)
+#define RGX_CR_SOFT_RESET_USC_SHARED_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define RGX_CR_SOFT_RESET_USC_SHARED_EN                   (IMG_UINT64_C(0X0000000000040000))
+#define RGX_CR_SOFT_RESET_MCU_L1_SHIFT                    (17U)
+#define RGX_CR_SOFT_RESET_MCU_L1_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define RGX_CR_SOFT_RESET_MCU_L1_EN                       (IMG_UINT64_C(0X0000000000020000))
+#define RGX_CR_SOFT_RESET_BIF_SHIFT                       (16U)
+#define RGX_CR_SOFT_RESET_BIF_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define RGX_CR_SOFT_RESET_BIF_EN                          (IMG_UINT64_C(0X0000000000010000))
+#define RGX_CR_SOFT_RESET_CDM_SHIFT                       (15U)
+#define RGX_CR_SOFT_RESET_CDM_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define RGX_CR_SOFT_RESET_CDM_EN                          (IMG_UINT64_C(0X0000000000008000))
+#define RGX_CR_SOFT_RESET_VDM_SHIFT                       (14U)
+#define RGX_CR_SOFT_RESET_VDM_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define RGX_CR_SOFT_RESET_VDM_EN                          (IMG_UINT64_C(0X0000000000004000))
+#define RGX_CR_SOFT_RESET_TESS_SHIFT                      (13U)
+#define RGX_CR_SOFT_RESET_TESS_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define RGX_CR_SOFT_RESET_TESS_EN                         (IMG_UINT64_C(0X0000000000002000))
+#define RGX_CR_SOFT_RESET_PDS_SHIFT                       (12U)
+#define RGX_CR_SOFT_RESET_PDS_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define RGX_CR_SOFT_RESET_PDS_EN                          (IMG_UINT64_C(0X0000000000001000))
+#define RGX_CR_SOFT_RESET_ISP_SHIFT                       (11U)
+#define RGX_CR_SOFT_RESET_ISP_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define RGX_CR_SOFT_RESET_ISP_EN                          (IMG_UINT64_C(0X0000000000000800))
+#define RGX_CR_SOFT_RESET_TSP_SHIFT                       (10U)
+#define RGX_CR_SOFT_RESET_TSP_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_SOFT_RESET_TSP_EN                          (IMG_UINT64_C(0X0000000000000400))
+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_SHIFT             (4U)
+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_EN                (IMG_UINT64_C(0X0000000000000010))
+#define RGX_CR_SOFT_RESET_MCU_L0_SHIFT                    (3U)
+#define RGX_CR_SOFT_RESET_MCU_L0_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_SOFT_RESET_MCU_L0_EN                       (IMG_UINT64_C(0X0000000000000008))
+#define RGX_CR_SOFT_RESET_TPU_SHIFT                       (2U)
+#define RGX_CR_SOFT_RESET_TPU_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_SOFT_RESET_TPU_EN                          (IMG_UINT64_C(0X0000000000000004))
+#define RGX_CR_SOFT_RESET_USC_SHIFT                       (0U)
+#define RGX_CR_SOFT_RESET_USC_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_SOFT_RESET_USC_EN                          (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_SOFT_RESET2
+*/
+#define RGX_CR_SOFT_RESET2                                (0x0108U)
+#define RGX_CR_SOFT_RESET2_MASKFULL                       (IMG_UINT64_C(0x00000000001FFFFF))
+#define RGX_CR_SOFT_RESET2_SPFILTER_SHIFT                 (12U)
+#define RGX_CR_SOFT_RESET2_SPFILTER_CLRMSK                (0XFFE00FFFU)
+#define RGX_CR_SOFT_RESET2_TDM_SHIFT                      (11U)
+#define RGX_CR_SOFT_RESET2_TDM_CLRMSK                     (0XFFFFF7FFU)
+#define RGX_CR_SOFT_RESET2_TDM_EN                         (0X00000800U)
+#define RGX_CR_SOFT_RESET2_ASTC_SHIFT                     (10U)
+#define RGX_CR_SOFT_RESET2_ASTC_CLRMSK                    (0XFFFFFBFFU)
+#define RGX_CR_SOFT_RESET2_ASTC_EN                        (0X00000400U)
+#define RGX_CR_SOFT_RESET2_BLACKPEARL_SHIFT               (9U)
+#define RGX_CR_SOFT_RESET2_BLACKPEARL_CLRMSK              (0XFFFFFDFFU)
+#define RGX_CR_SOFT_RESET2_BLACKPEARL_EN                  (0X00000200U)
+#define RGX_CR_SOFT_RESET2_USCPS_SHIFT                    (8U)
+#define RGX_CR_SOFT_RESET2_USCPS_CLRMSK                   (0XFFFFFEFFU)
+#define RGX_CR_SOFT_RESET2_USCPS_EN                       (0X00000100U)
+#define RGX_CR_SOFT_RESET2_IPF_SHIFT                      (7U)
+#define RGX_CR_SOFT_RESET2_IPF_CLRMSK                     (0XFFFFFF7FU)
+#define RGX_CR_SOFT_RESET2_IPF_EN                         (0X00000080U)
+#define RGX_CR_SOFT_RESET2_GEOMETRY_SHIFT                 (6U)
+#define RGX_CR_SOFT_RESET2_GEOMETRY_CLRMSK                (0XFFFFFFBFU)
+#define RGX_CR_SOFT_RESET2_GEOMETRY_EN                    (0X00000040U)
+#define RGX_CR_SOFT_RESET2_USC_SHARED_SHIFT               (5U)
+#define RGX_CR_SOFT_RESET2_USC_SHARED_CLRMSK              (0XFFFFFFDFU)
+#define RGX_CR_SOFT_RESET2_USC_SHARED_EN                  (0X00000020U)
+#define RGX_CR_SOFT_RESET2_PDS_SHARED_SHIFT               (4U)
+#define RGX_CR_SOFT_RESET2_PDS_SHARED_CLRMSK              (0XFFFFFFEFU)
+#define RGX_CR_SOFT_RESET2_PDS_SHARED_EN                  (0X00000010U)
+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_SHIFT           (3U)
+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_CLRMSK          (0XFFFFFFF7U)
+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_EN              (0X00000008U)
+#define RGX_CR_SOFT_RESET2_PIXEL_SHIFT                    (2U)
+#define RGX_CR_SOFT_RESET2_PIXEL_CLRMSK                   (0XFFFFFFFBU)
+#define RGX_CR_SOFT_RESET2_PIXEL_EN                       (0X00000004U)
+#define RGX_CR_SOFT_RESET2_CDM_SHIFT                      (1U)
+#define RGX_CR_SOFT_RESET2_CDM_CLRMSK                     (0XFFFFFFFDU)
+#define RGX_CR_SOFT_RESET2_CDM_EN                         (0X00000002U)
+#define RGX_CR_SOFT_RESET2_VERTEX_SHIFT                   (0U)
+#define RGX_CR_SOFT_RESET2_VERTEX_CLRMSK                  (0XFFFFFFFEU)
+#define RGX_CR_SOFT_RESET2_VERTEX_EN                      (0X00000001U)
+
+
+/*
+    Register RGX_CR_EVENT_STATUS
+*/
+#define RGX_CR_EVENT_STATUS                               (0x0130U)
+#define RGX_CR_EVENT_STATUS__SIGNALS__MASKFULL            (IMG_UINT64_C(0x00000000E007FFFF))
+#define RGX_CR_EVENT_STATUS_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_SHIFT      (31U)
+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_CLRMSK     (0X7FFFFFFFU)
+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_EN         (0X80000000U)
+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_SHIFT        (30U)
+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_CLRMSK       (0XBFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_EN           (0X40000000U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_SHIFT  (29U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_EN     (0X20000000U)
+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_SHIFT       (28U)
+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_CLRMSK      (0XEFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_EN          (0X10000000U)
+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_SHIFT      (27U)
+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_CLRMSK     (0XF7FFFFFFU)
+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_EN         (0X08000000U)
+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_SHIFT       (26U)
+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_CLRMSK      (0XFBFFFFFFU)
+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_EN          (0X04000000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_SHIFT        (25U)
+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_CLRMSK       (0XFDFFFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_EN           (0X02000000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_SHIFT        (24U)
+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_CLRMSK       (0XFEFFFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_EN           (0X01000000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_SHIFT        (23U)
+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_CLRMSK       (0XFF7FFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_EN           (0X00800000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_SHIFT        (22U)
+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_CLRMSK       (0XFFBFFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_EN           (0X00400000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_SHIFT        (21U)
+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_CLRMSK       (0XFFDFFFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_EN           (0X00200000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_SHIFT        (20U)
+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_CLRMSK       (0XFFEFFFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_EN           (0X00100000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_SHIFT        (19U)
+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_CLRMSK       (0XFFF7FFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_EN           (0X00080000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_SHIFT        (18U)
+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_CLRMSK       (0XFFFBFFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_EN           (0X00040000U)
+#define RGX_CR_EVENT_STATUS__SIGNALS__TDM_CONTEXT_STORE_FINISHED_SHIFT (18U)
+#define RGX_CR_EVENT_STATUS__SIGNALS__TDM_CONTEXT_STORE_FINISHED_CLRMSK (0XFFFBFFFFU)
+#define RGX_CR_EVENT_STATUS__SIGNALS__TDM_CONTEXT_STORE_FINISHED_EN (0X00040000U)
+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_SHIFT            (17U)
+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_CLRMSK           (0XFFFDFFFFU)
+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_EN               (0X00020000U)
+#define RGX_CR_EVENT_STATUS__SIGNALS__SPFILTER_SIGNAL_UPDATE_SHIFT (17U)
+#define RGX_CR_EVENT_STATUS__SIGNALS__SPFILTER_SIGNAL_UPDATE_CLRMSK (0XFFFDFFFFU)
+#define RGX_CR_EVENT_STATUS__SIGNALS__SPFILTER_SIGNAL_UPDATE_EN (0X00020000U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_SHIFT    (16U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_CLRMSK   (0XFFFEFFFFU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_EN       (0X00010000U)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_SHIFT             (15U)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK            (0XFFFF7FFFU)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_EN                (0X00008000U)
+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_SHIFT            (14U)
+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_CLRMSK           (0XFFFFBFFFU)
+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_EN               (0X00004000U)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_SHIFT                (13U)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_CLRMSK               (0XFFFFDFFFU)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_EN                   (0X00002000U)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_SHIFT                (12U)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_CLRMSK               (0XFFFFEFFFU)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_EN                   (0X00001000U)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_SHIFT             (11U)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_CLRMSK            (0XFFFFF7FFU)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_EN                (0X00000800U)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT          (10U)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK         (0XFFFFFBFFU)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN             (0X00000400U)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT          (9U)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK         (0XFFFFFDFFU)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN             (0X00000200U)
+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_SHIFT          (8U)
+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_CLRMSK         (0XFFFFFEFFU)
+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_EN             (0X00000100U)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT        (7U)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK       (0XFFFFFF7FU)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN           (0X00000080U)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_SHIFT            (6U)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK           (0XFFFFFFBFU)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_EN               (0X00000040U)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_SHIFT             (5U)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_CLRMSK            (0XFFFFFFDFU)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_EN                (0X00000020U)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT       (4U)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK      (0XFFFFFFEFU)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_EN          (0X00000010U)
+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT      (3U)
+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_CLRMSK     (0XFFFFFFF7U)
+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_EN         (0X00000008U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT        (2U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK       (0XFFFFFFFBU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_EN           (0X00000004U)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT         (1U)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK        (0XFFFFFFFDU)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_EN            (0X00000002U)
+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_SHIFT            (0U)
+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK           (0XFFFFFFFEU)
+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_EN               (0X00000001U)
+
+
+/*
+    Register RGX_CR_TIMER
+*/
+#define RGX_CR_TIMER                                      (0x0160U)
+#define RGX_CR_TIMER_MASKFULL                             (IMG_UINT64_C(0x8000FFFFFFFFFFFF))
+#define RGX_CR_TIMER_BIT31_SHIFT                          (63U)
+#define RGX_CR_TIMER_BIT31_CLRMSK                         (IMG_UINT64_C(0X7FFFFFFFFFFFFFFF))
+#define RGX_CR_TIMER_BIT31_EN                             (IMG_UINT64_C(0X8000000000000000))
+#define RGX_CR_TIMER_VALUE_SHIFT                          (0U)
+#define RGX_CR_TIMER_VALUE_CLRMSK                         (IMG_UINT64_C(0XFFFF000000000000))
+
+
+/*
+    Register RGX_CR_TLA_STATUS
+*/
+#define RGX_CR_TLA_STATUS                                 (0x0178U)
+#define RGX_CR_TLA_STATUS_MASKFULL                        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_TLA_STATUS_BLIT_COUNT_SHIFT                (39U)
+#define RGX_CR_TLA_STATUS_BLIT_COUNT_CLRMSK               (IMG_UINT64_C(0X0000007FFFFFFFFF))
+#define RGX_CR_TLA_STATUS_REQUEST_SHIFT                   (7U)
+#define RGX_CR_TLA_STATUS_REQUEST_CLRMSK                  (IMG_UINT64_C(0XFFFFFF800000007F))
+#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_SHIFT             (1U)
+#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFFFFF81))
+#define RGX_CR_TLA_STATUS_BUSY_SHIFT                      (0U)
+#define RGX_CR_TLA_STATUS_BUSY_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_TLA_STATUS_BUSY_EN                         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_PM_PARTIAL_RENDER_ENABLE
+*/
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE                   (0x0338U)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL          (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT          (0U)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK         (0XFFFFFFFEU)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN             (0X00000001U)
+
+
+/*
+    Register RGX_CR_SIDEKICK_IDLE
+*/
+#define RGX_CR_SIDEKICK_IDLE                              (0x03C8U)
+#define RGX_CR_SIDEKICK_IDLE_MASKFULL                     (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_SHIFT                 (6U)
+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_CLRMSK                (0XFFFFFFBFU)
+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_EN                    (0X00000040U)
+#define RGX_CR_SIDEKICK_IDLE_MMU_SHIFT                    (5U)
+#define RGX_CR_SIDEKICK_IDLE_MMU_CLRMSK                   (0XFFFFFFDFU)
+#define RGX_CR_SIDEKICK_IDLE_MMU_EN                       (0X00000020U)
+#define RGX_CR_SIDEKICK_IDLE_BIF128_SHIFT                 (4U)
+#define RGX_CR_SIDEKICK_IDLE_BIF128_CLRMSK                (0XFFFFFFEFU)
+#define RGX_CR_SIDEKICK_IDLE_BIF128_EN                    (0X00000010U)
+#define RGX_CR_SIDEKICK_IDLE_TLA_SHIFT                    (3U)
+#define RGX_CR_SIDEKICK_IDLE_TLA_CLRMSK                   (0XFFFFFFF7U)
+#define RGX_CR_SIDEKICK_IDLE_TLA_EN                       (0X00000008U)
+#define RGX_CR_SIDEKICK_IDLE_GARTEN_SHIFT                 (2U)
+#define RGX_CR_SIDEKICK_IDLE_GARTEN_CLRMSK                (0XFFFFFFFBU)
+#define RGX_CR_SIDEKICK_IDLE_GARTEN_EN                    (0X00000004U)
+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_SHIFT                 (1U)
+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_CLRMSK                (0XFFFFFFFDU)
+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_EN                    (0X00000002U)
+#define RGX_CR_SIDEKICK_IDLE_SOCIF_SHIFT                  (0U)
+#define RGX_CR_SIDEKICK_IDLE_SOCIF_CLRMSK                 (0XFFFFFFFEU)
+#define RGX_CR_SIDEKICK_IDLE_SOCIF_EN                     (0X00000001U)
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_STORE_STATUS
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS                   (0x0430U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_MASKFULL          (IMG_UINT64_C(0x00000000000000F3))
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_SHIFT   (4U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_CLRMSK  (0XFFFFFF0FU)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN    (0X00000002U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT    (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK   (0XFFFFFFFEU)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_EN       (0X00000001U)
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_STORE_TASK0
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0                    (0x0438U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_MASKFULL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_SHIFT   (32U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_CLRMSK  (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_SHIFT   (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_CLRMSK  (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_STORE_TASK1
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1                    (0x0440U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_SHIFT   (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_CLRMSK  (00000000U)
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_STORE_TASK2
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2                    (0x0448U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_MASKFULL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_SHIFT  (32U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_SHIFT  (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_RESUME_TASK0
+*/
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0                   (0x0450U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_MASKFULL          (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_SHIFT  (32U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_CLRMSK (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_SHIFT  (0U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_RESUME_TASK1
+*/
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1                   (0x0458U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_SHIFT  (0U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_CLRMSK (00000000U)
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_RESUME_TASK2
+*/
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2                   (0x0460U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_MASKFULL          (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_SHIFT (32U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_CDM_CONTEXT_STORE_STATUS
+*/
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS                   (0x04A0U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_MASKFULL          (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN    (0X00000002U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT    (0U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK   (0XFFFFFFFEU)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_EN       (0X00000001U)
+
+
+/*
+    Register RGX_CR_CDM_CONTEXT_PDS0
+*/
+#define RGX_CR_CDM_CONTEXT_PDS0                           (0x04A8U)
+#define RGX_CR_CDM_CONTEXT_PDS0_MASKFULL                  (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT           (36U)
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK          (IMG_UINT64_C(0X0000000FFFFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT      (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE       (16U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT           (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK          (IMG_UINT64_C(0XFFFFFFFF0000000F))
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT      (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE       (16U)
+
+
+/*
+    Register RGX_CR_CDM_CONTEXT_PDS1
+*/
+#define RGX_CR_CDM_CONTEXT_PDS1                           (0x04B0U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__MASKFULL      (IMG_UINT64_C(0x000000007FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS1_MASKFULL                  (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0XBFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0X40000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT         (29U)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK        (0XDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN            (0X20000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0X20000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT         (28U)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK        (0XEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN            (0X10000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_SHIFT  (28U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_EN     (0X10000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT              (27U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK             (0XF7FFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_EN                 (0X08000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0XF03FFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT        (21U)
+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK       (0XF81FFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0XFFDFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0X00200000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_SHIFT       (20U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_CLRMSK      (0XFFEFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_EN          (0X00100000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0XFFE00FFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_SHIFT         (11U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_CLRMSK        (0XFFF007FFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT           (7U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK          (0XFFFFF87FU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0XFFFFF07FU)
+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT           (1U)
+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK          (0XFFFFFF81U)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_SHIFT               (0U)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_CLRMSK              (0XFFFFFFFEU)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_EN                  (0X00000001U)
+
+
+/*
+    Register RGX_CR_CDM_TERMINATE_PDS
+*/
+#define RGX_CR_CDM_TERMINATE_PDS                          (0x04B8U)
+#define RGX_CR_CDM_TERMINATE_PDS_MASKFULL                 (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT          (36U)
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK         (IMG_UINT64_C(0X0000000FFFFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT     (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE      (16U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT          (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK         (IMG_UINT64_C(0XFFFFFFFF0000000F))
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT     (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE      (16U)
+
+
+/*
+    Register RGX_CR_CDM_TERMINATE_PDS1
+*/
+#define RGX_CR_CDM_TERMINATE_PDS1                         (0x04C0U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__MASKFULL    (IMG_UINT64_C(0x000000007FFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS1_MASKFULL                (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0XBFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0X40000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT       (29U)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK      (0XDFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN          (0X20000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0X20000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT       (28U)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK      (0XEFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN          (0X10000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_SHIFT (28U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_EN   (0X10000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_SHIFT            (27U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_CLRMSK           (0XF7FFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_EN               (0X08000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0XF03FFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT      (21U)
+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK     (0XF81FFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0XFFDFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0X00200000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_SHIFT     (20U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_CLRMSK    (0XFFEFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_EN        (0X00100000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0XFFE00FFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_SHIFT       (11U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_CLRMSK      (0XFFF007FFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT         (7U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK        (0XFFFFF87FU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0XFFFFF07FU)
+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT         (1U)
+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK        (0XFFFFFF81U)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_SHIFT             (0U)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_CLRMSK            (0XFFFFFFFEU)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_EN                (0X00000001U)
+
+
+/*
+    Register RGX_CR_CDM_CONTEXT_LOAD_PDS0
+*/
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0                      (0x04D8U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_MASKFULL             (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_SHIFT      (36U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_CLRMSK     (IMG_UINT64_C(0X0000000FFFFFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSIZE  (16U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_SHIFT      (4U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_CLRMSK     (IMG_UINT64_C(0XFFFFFFFF0000000F))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSIZE  (16U)
+
+
+/*
+    Register RGX_CR_CDM_CONTEXT_LOAD_PDS1
+*/
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1                      (0x04E0U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_MASKFULL             (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0XBFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0X40000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_SHIFT    (29U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_CLRMSK   (0XDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_EN       (0X20000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0X20000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_SHIFT    (28U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_CLRMSK   (0XEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_EN       (0X10000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_SHIFT (28U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_EN (0X10000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_SHIFT         (27U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_CLRMSK        (0XF7FFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_EN            (0X08000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0XF03FFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_SHIFT   (21U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_CLRMSK  (0XF81FFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0XFFDFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0X00200000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_SHIFT  (20U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_CLRMSK (0XFFEFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_EN     (0X00100000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0XFFE00FFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_SHIFT    (11U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_CLRMSK   (0XFFF007FFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_SHIFT      (7U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_CLRMSK     (0XFFFFF87FU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0XFFFFF07FU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_SHIFT      (1U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_CLRMSK     (0XFFFFFF81U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_SHIFT          (0U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_CLRMSK         (0XFFFFFFFEU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_EN             (0X00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_WRAPPER_CONFIG
+*/
+#define RGX_CR_MIPS_WRAPPER_CONFIG                        (0x0810U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_MASKFULL               (IMG_UINT64_C(0x000000010F01FFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_SHIFT     (32U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_CLRMSK    (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_EN        (IMG_UINT64_C(0X0000000100000000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_SHIFT            (25U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFF1FFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_SHIFT          (24U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_EN             (IMG_UINT64_C(0X0000000001000000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_SHIFT    (16U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MIPS32   (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS (IMG_UINT64_C(0x0000000000010000))  
+#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_SHIFT (0U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP1_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1                   (0x0818U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN    (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP1_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2                   (0x0820U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_SHIFT    (12U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK   (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT       (6U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_SHIFT     (5U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN        (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFE0))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP2_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1                   (0x0828U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN    (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP2_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2                   (0x0830U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_SHIFT    (12U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK   (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_SHIFT       (6U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_SHIFT     (5U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_EN        (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFE0))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP3_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1                   (0x0838U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN    (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP3_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2                   (0x0840U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_SHIFT    (12U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK   (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_SHIFT       (6U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_SHIFT     (5U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_EN        (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFE0))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP4_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1                   (0x0848U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN    (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP4_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2                   (0x0850U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_SHIFT    (12U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK   (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_SHIFT       (6U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_SHIFT     (5U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_EN        (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFE0))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP5_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1                   (0x0858U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN    (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP5_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2                   (0x0860U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_SHIFT    (12U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK   (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_SHIFT       (6U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_SHIFT     (5U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_EN        (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFE0))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS            (0x0868U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_MASKFULL   (IMG_UINT64_C(0x00000001FFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_SHIFT (32U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_EN   (IMG_UINT64_C(0X0000000100000000))
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR             (0x0870U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_MASKFULL    (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_EN    (0X00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG               (0x0878U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MASKFULL      (IMG_UINT64_C(0xFFFFFFF7FFFFFFBF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_SHIFT (36U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_CLRMSK (IMG_UINT64_C(0X0000000FFFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_SHIFT   (32U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_CLRMSK  (IMG_UINT64_C(0XFFFFFFF8FFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_SHIFT (11U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_EN    (IMG_UINT64_C(0X0000000000000800))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_SHIFT (7U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF87F))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4KB (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16KB (IMG_UINT64_C(0x0000000000000080))  
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64KB (IMG_UINT64_C(0x0000000000000100))  
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256KB (IMG_UINT64_C(0x0000000000000180))  
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_1MB (IMG_UINT64_C(0x0000000000000200))  
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4MB (IMG_UINT64_C(0x0000000000000280))  
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16MB (IMG_UINT64_C(0x0000000000000300))  
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64MB (IMG_UINT64_C(0x0000000000000380))  
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256MB (IMG_UINT64_C(0x0000000000000400))  
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_SHIFT   (1U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFFFC1))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP_RANGE_READ
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ                 (0x0880U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_MASKFULL        (IMG_UINT64_C(0x000000000000003F))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_SHIFT     (1U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_CLRMSK    (0XFFFFFFC1U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_SHIFT   (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_CLRMSK  (0XFFFFFFFEU)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_EN      (0X00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA                 (0x0888U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MASKFULL        (IMG_UINT64_C(0xFFFFFFF7FFFFFF81))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_SHIFT  (36U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_CLRMSK (IMG_UINT64_C(0X0000000FFFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_SHIFT     (32U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_CLRMSK    (IMG_UINT64_C(0XFFFFFFF8FFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_SHIFT   (11U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_EN      (IMG_UINT64_C(0X0000000000000800))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_SHIFT (7U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF87F))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_EN  (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_WRAPPER_IRQ_ENABLE
+*/
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE                    (0x08A0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_MASKFULL           (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_SHIFT        (0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_CLRMSK       (0XFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_EN           (0X00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_WRAPPER_IRQ_STATUS
+*/
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS                    (0x08A8U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_MASKFULL           (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_SHIFT        (0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_CLRMSK       (0XFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN           (0X00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_WRAPPER_IRQ_CLEAR
+*/
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR                     (0x08B0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_MASKFULL            (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_SHIFT         (0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_CLRMSK        (0XFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN            (0X00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_WRAPPER_NMI_ENABLE
+*/
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE                    (0x08B8U)
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_MASKFULL           (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_SHIFT        (0U)
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_CLRMSK       (0XFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN           (0X00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_WRAPPER_NMI_EVENT
+*/
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT                     (0x08C0U)
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_MASKFULL            (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_SHIFT       (0U)
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_CLRMSK      (0XFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN          (0X00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_DEBUG_CONFIG
+*/
+#define RGX_CR_MIPS_DEBUG_CONFIG                          (0x08C8U)
+#define RGX_CR_MIPS_DEBUG_CONFIG_MASKFULL                 (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_SHIFT (0U)
+#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_EN   (0X00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_EXCEPTION_STATUS
+*/
+#define RGX_CR_MIPS_EXCEPTION_STATUS                      (0x08D0U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_MASKFULL             (IMG_UINT64_C(0x000000000000003F))
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_SHIFT       (5U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_CLRMSK      (0XFFFFFFDFU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_EN          (0X00000020U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_SHIFT   (4U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_CLRMSK  (0XFFFFFFEFU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN      (0X00000010U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_SHIFT    (3U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_CLRMSK   (0XFFFFFFF7U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_EN       (0X00000008U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_SHIFT    (2U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_CLRMSK   (0XFFFFFFFBU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_EN       (0X00000004U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_SHIFT         (1U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_CLRMSK        (0XFFFFFFFDU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_EN            (0X00000002U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_SHIFT         (0U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_CLRMSK        (0XFFFFFFFEU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN            (0X00000001U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVDATAX
+*/
+#define RGX_CR_META_SP_MSLVDATAX                          (0x0A00U)
+#define RGX_CR_META_SP_MSLVDATAX_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT          (0U)
+#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK         (00000000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVDATAT
+*/
+#define RGX_CR_META_SP_MSLVDATAT                          (0x0A08U)
+#define RGX_CR_META_SP_MSLVDATAT_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT          (0U)
+#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK         (00000000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVCTRL0
+*/
+#define RGX_CR_META_SP_MSLVCTRL0                          (0x0A10U)
+#define RGX_CR_META_SP_MSLVCTRL0_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVCTRL0_ADDR_SHIFT               (2U)
+#define RGX_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK              (0X00000003U)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT           (1U)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK          (0XFFFFFFFDU)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN              (0X00000002U)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_SHIFT                 (0U)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_CLRMSK                (0XFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_EN                    (0X00000001U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVCTRL1
+*/
+#define RGX_CR_META_SP_MSLVCTRL1                          (0x0A18U)
+#define RGX_CR_META_SP_MSLVCTRL1_MASKFULL                 (IMG_UINT64_C(0x00000000F7F4003F))
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT       (30U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK      (0X3FFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT    (29U)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK   (0XDFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN       (0X20000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT   (28U)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK  (0XEFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN      (0X10000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT       (26U)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK      (0XFBFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN          (0X04000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT       (25U)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK      (0XFDFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN          (0X02000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_SHIFT              (24U)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_CLRMSK             (0XFEFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_EN                 (0X01000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT           (21U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK          (0XFF1FFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT             (20U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK            (0XFFEFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_EN                (0X00100000U)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT          (18U)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK         (0XFFFBFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN             (0X00040000U)
+#define RGX_CR_META_SP_MSLVCTRL1_THREAD_SHIFT             (4U)
+#define RGX_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK            (0XFFFFFFCFU)
+#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT         (2U)
+#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK        (0XFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT         (0U)
+#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK        (0XFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVHANDSHKE
+*/
+#define RGX_CR_META_SP_MSLVHANDSHKE                       (0x0A50U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_MASKFULL              (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT           (2U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK          (0XFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT          (0U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK         (0XFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT0KICK
+*/
+#define RGX_CR_META_SP_MSLVT0KICK                         (0x0A80U)
+#define RGX_CR_META_SP_MSLVT0KICK_MASKFULL                (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT        (0U)
+#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK       (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT0KICKI
+*/
+#define RGX_CR_META_SP_MSLVT0KICKI                        (0x0A88U)
+#define RGX_CR_META_SP_MSLVT0KICKI_MASKFULL               (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT      (0U)
+#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK     (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT1KICK
+*/
+#define RGX_CR_META_SP_MSLVT1KICK                         (0x0A90U)
+#define RGX_CR_META_SP_MSLVT1KICK_MASKFULL                (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT        (0U)
+#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK       (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT1KICKI
+*/
+#define RGX_CR_META_SP_MSLVT1KICKI                        (0x0A98U)
+#define RGX_CR_META_SP_MSLVT1KICKI_MASKFULL               (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT      (0U)
+#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK     (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT2KICK
+*/
+#define RGX_CR_META_SP_MSLVT2KICK                         (0x0AA0U)
+#define RGX_CR_META_SP_MSLVT2KICK_MASKFULL                (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT        (0U)
+#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK       (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT2KICKI
+*/
+#define RGX_CR_META_SP_MSLVT2KICKI                        (0x0AA8U)
+#define RGX_CR_META_SP_MSLVT2KICKI_MASKFULL               (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT      (0U)
+#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK     (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT3KICK
+*/
+#define RGX_CR_META_SP_MSLVT3KICK                         (0x0AB0U)
+#define RGX_CR_META_SP_MSLVT3KICK_MASKFULL                (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT        (0U)
+#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK       (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT3KICKI
+*/
+#define RGX_CR_META_SP_MSLVT3KICKI                        (0x0AB8U)
+#define RGX_CR_META_SP_MSLVT3KICKI_MASKFULL               (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT      (0U)
+#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK     (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVRST
+*/
+#define RGX_CR_META_SP_MSLVRST                            (0x0AC0U)
+#define RGX_CR_META_SP_MSLVRST_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_SHIFT            (0U)
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK           (0XFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_EN               (0X00000001U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVIRQSTATUS
+*/
+#define RGX_CR_META_SP_MSLVIRQSTATUS                      (0x0AC8U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_MASKFULL             (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT      (3U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK     (0XFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN         (0X00000008U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT      (2U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK     (0XFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN         (0X00000004U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVIRQENABLE
+*/
+#define RGX_CR_META_SP_MSLVIRQENABLE                      (0x0AD0U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_MASKFULL             (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT         (3U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK        (0XFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_EN            (0X00000008U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT         (2U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK        (0XFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_EN            (0X00000004U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVIRQLEVEL
+*/
+#define RGX_CR_META_SP_MSLVIRQLEVEL                       (0x0AD8U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT            (0U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK           (0XFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_EN               (0X00000001U)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE
+*/
+#define RGX_CR_MTS_SCHEDULE                               (0x0B00U)
+#define RGX_CR_MTS_SCHEDULE_MASKFULL                      (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE_HOST_SHIFT                    (8U)
+#define RGX_CR_MTS_SCHEDULE_HOST_CLRMSK                   (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE_HOST_BG_TIMER                 (00000000U)
+#define RGX_CR_MTS_SCHEDULE_HOST_HOST                     (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_SHIFT                (6U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_CLRMSK               (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT0                 (00000000U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT1                 (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT2                 (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT3                 (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_SHIFT                 (5U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_CLRMSK                (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_BGCTX                 (00000000U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_INTCTX                (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE_TASK_SHIFT                    (4U)
+#define RGX_CR_MTS_SCHEDULE_TASK_CLRMSK                   (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED              (00000000U)
+#define RGX_CR_MTS_SCHEDULE_TASK_COUNTED                  (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE_DM_SHIFT                      (0U)
+#define RGX_CR_MTS_SCHEDULE_DM_CLRMSK                     (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM0                        (00000000U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM1                        (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM2                        (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM3                        (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM4                        (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM5                        (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM6                        (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM7                        (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM_ALL                     (0X0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE1
+*/
+#define RGX_CR_MTS_SCHEDULE1                              (0x10B00U)
+#define RGX_CR_MTS_SCHEDULE1_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE1_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE1_HOST_CLRMSK                  (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE1_HOST_BG_TIMER                (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_HOST_HOST                    (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK              (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT0                (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT1                (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT2                (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT3                (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK               (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_BGCTX                (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_INTCTX               (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_CLRMSK                  (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE1_TASK_NON_COUNTED             (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_COUNTED                 (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE1_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE1_DM_CLRMSK                    (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM0                       (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM1                       (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM2                       (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM3                       (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM4                       (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM5                       (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM6                       (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM7                       (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM_ALL                    (0X0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE2
+*/
+#define RGX_CR_MTS_SCHEDULE2                              (0x20B00U)
+#define RGX_CR_MTS_SCHEDULE2_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE2_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE2_HOST_CLRMSK                  (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE2_HOST_BG_TIMER                (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_HOST_HOST                    (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK              (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT0                (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT1                (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT2                (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT3                (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK               (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_BGCTX                (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_INTCTX               (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_CLRMSK                  (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE2_TASK_NON_COUNTED             (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_COUNTED                 (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE2_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE2_DM_CLRMSK                    (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM0                       (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM1                       (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM2                       (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM3                       (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM4                       (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM5                       (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM6                       (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM7                       (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM_ALL                    (0X0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE3
+*/
+#define RGX_CR_MTS_SCHEDULE3                              (0x30B00U)
+#define RGX_CR_MTS_SCHEDULE3_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE3_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE3_HOST_CLRMSK                  (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE3_HOST_BG_TIMER                (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_HOST_HOST                    (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK              (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT0                (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT1                (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT2                (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT3                (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK               (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_BGCTX                (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_INTCTX               (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_CLRMSK                  (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE3_TASK_NON_COUNTED             (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_COUNTED                 (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE3_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE3_DM_CLRMSK                    (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM0                       (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM1                       (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM2                       (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM3                       (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM4                       (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM5                       (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM6                       (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM7                       (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM_ALL                    (0X0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE4
+*/
+#define RGX_CR_MTS_SCHEDULE4                              (0x40B00U)
+#define RGX_CR_MTS_SCHEDULE4_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE4_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE4_HOST_CLRMSK                  (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE4_HOST_BG_TIMER                (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_HOST_HOST                    (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK              (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT0                (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT1                (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT2                (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT3                (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK               (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_BGCTX                (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_INTCTX               (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_CLRMSK                  (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE4_TASK_NON_COUNTED             (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_COUNTED                 (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE4_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE4_DM_CLRMSK                    (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM0                       (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM1                       (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM2                       (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM3                       (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM4                       (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM5                       (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM6                       (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM7                       (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM_ALL                    (0X0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE5
+*/
+#define RGX_CR_MTS_SCHEDULE5                              (0x50B00U)
+#define RGX_CR_MTS_SCHEDULE5_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE5_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE5_HOST_CLRMSK                  (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE5_HOST_BG_TIMER                (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_HOST_HOST                    (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK              (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT0                (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT1                (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT2                (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT3                (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK               (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_BGCTX                (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_INTCTX               (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_CLRMSK                  (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE5_TASK_NON_COUNTED             (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_COUNTED                 (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE5_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE5_DM_CLRMSK                    (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM0                       (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM1                       (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM2                       (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM3                       (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM4                       (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM5                       (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM6                       (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM7                       (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM_ALL                    (0X0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE6
+*/
+#define RGX_CR_MTS_SCHEDULE6                              (0x60B00U)
+#define RGX_CR_MTS_SCHEDULE6_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE6_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE6_HOST_CLRMSK                  (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE6_HOST_BG_TIMER                (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_HOST_HOST                    (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK              (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT0                (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT1                (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT2                (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT3                (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK               (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_BGCTX                (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_INTCTX               (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_CLRMSK                  (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE6_TASK_NON_COUNTED             (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_COUNTED                 (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE6_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE6_DM_CLRMSK                    (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM0                       (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM1                       (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM2                       (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM3                       (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM4                       (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM5                       (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM6                       (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM7                       (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM_ALL                    (0X0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE7
+*/
+#define RGX_CR_MTS_SCHEDULE7                              (0x70B00U)
+#define RGX_CR_MTS_SCHEDULE7_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE7_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE7_HOST_CLRMSK                  (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE7_HOST_BG_TIMER                (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_HOST_HOST                    (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK              (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT0                (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT1                (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT2                (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT3                (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK               (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_BGCTX                (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_INTCTX               (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_CLRMSK                  (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE7_TASK_NON_COUNTED             (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_COUNTED                 (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE7_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE7_DM_CLRMSK                    (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM0                       (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM1                       (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM2                       (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM3                       (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM4                       (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM5                       (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM6                       (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM7                       (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM_ALL                    (0X0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC
+*/
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC                 (0x0B30U)
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL        (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT  (0U)
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC
+*/
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC                 (0x0B38U)
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL        (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT  (0U)
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC
+*/
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC                (0x0B40U)
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL       (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC
+*/
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC                (0x0B48U)
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL       (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_MTS_GARTEN_WRAPPER_CONFIG
+*/
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG                  (0x0B50U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__MASKFULL (IMG_UINT64_C(0x000FF0FFFFFFF701))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL         (IMG_UINT64_C(0x0000FFFFFFFFF001))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT (44U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0XFFFF0FFFFFFFFFFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT (44U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0XFFF00FFFFFFFFFFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT   (40U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_CLRMSK  (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_SHIFT (12U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PERSISTENCE_SHIFT (9U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PERSISTENCE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF9FF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_SLC_COHERENT_SHIFT (8U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_SLC_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_SLC_COHERENT_EN (IMG_UINT64_C(0X0000000000000100))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT  (0U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META   (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS    (IMG_UINT64_C(0x0000000000000001))  
+
+
+/*
+    Register RGX_CR_MTS_INTCTX
+*/
+#define RGX_CR_MTS_INTCTX                                 (0x0B98U)
+#define RGX_CR_MTS_INTCTX_MASKFULL                        (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT          (22U)
+#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK         (0XC03FFFFFU)
+#define RGX_CR_MTS_INTCTX_DM_PTR_SHIFT                    (18U)
+#define RGX_CR_MTS_INTCTX_DM_PTR_CLRMSK                   (0XFFC3FFFFU)
+#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_SHIFT             (16U)
+#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_CLRMSK            (0XFFFCFFFFU)
+#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT         (8U)
+#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK        (0XFFFF00FFU)
+#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT     (0U)
+#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK    (0XFFFFFF00U)
+
+
+/*
+    Register RGX_CR_MTS_BGCTX
+*/
+#define RGX_CR_MTS_BGCTX                                  (0x0BA0U)
+#define RGX_CR_MTS_BGCTX_MASKFULL                         (IMG_UINT64_C(0x0000000000003FFF))
+#define RGX_CR_MTS_BGCTX_DM_PTR_SHIFT                     (10U)
+#define RGX_CR_MTS_BGCTX_DM_PTR_CLRMSK                    (0XFFFFC3FFU)
+#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_SHIFT              (8U)
+#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_CLRMSK             (0XFFFFFCFFU)
+#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT     (0U)
+#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK    (0XFFFFFF00U)
+
+
+/*
+    Register RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE
+*/
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE                 (0x0BA8U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT       (56U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK      (IMG_UINT64_C(0X00FFFFFFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT       (48U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK      (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT       (40U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK      (IMG_UINT64_C(0XFFFF00FFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT       (32U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK      (IMG_UINT64_C(0XFFFFFF00FFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT       (24U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK      (IMG_UINT64_C(0XFFFFFFFF00FFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT       (16U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFF00FFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT       (8U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT       (0U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+/*
+    Register RGX_CR_MTS_GPU_INT_STATUS
+*/
+#define RGX_CR_MTS_GPU_INT_STATUS                         (0x0BB0U)
+#define RGX_CR_MTS_GPU_INT_STATUS_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT            (0U)
+#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK           (00000000U)
+
+
+/*
+    Register RGX_CR_META_BOOT
+*/
+#define RGX_CR_META_BOOT                                  (0x0BF8U)
+#define RGX_CR_META_BOOT_MASKFULL                         (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_BOOT_MODE_SHIFT                       (0U)
+#define RGX_CR_META_BOOT_MODE_CLRMSK                      (0XFFFFFFFEU)
+#define RGX_CR_META_BOOT_MODE_EN                          (0X00000001U)
+
+
+/*
+    Register RGX_CR_GARTEN_SLC
+*/
+#define RGX_CR_GARTEN_SLC                                 (0x0BB8U)
+#define RGX_CR_GARTEN_SLC_MASKFULL                        (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT           (0U)
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK          (0XFFFFFFFEU)
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_EN              (0X00000001U)
+
+
+/*
+    Register RGX_CR_PPP
+*/
+#define RGX_CR_PPP                                        (0x0CD0U)
+#define RGX_CR_PPP_MASKFULL                               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PPP_CHECKSUM_SHIFT                         (0U)
+#define RGX_CR_PPP_CHECKSUM_CLRMSK                        (00000000U)
+
+
+#define RGX_CR_ISP_RENDER_DIR_TYPE_MASK                   (0x00000003U)
+/*
+ Top-left to bottom-right */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_TL2BR                  (0x00000000U)
+/*
+ Top-right to bottom-left */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_TR2BL                  (0x00000001U)
+/*
+ Bottom-left to top-right */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_BL2TR                  (0x00000002U)
+/*
+ Bottom-right to top-left */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_BR2TL                  (0x00000003U)
+
+
+#define RGX_CR_ISP_RENDER_MODE_TYPE_MASK                  (0x00000003U)
+/*
+ Normal render     */
+#define RGX_CR_ISP_RENDER_MODE_TYPE_NORM                  (0x00000000U)
+/*
+ Fast 2D render    */
+#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_2D               (0x00000002U)
+/*
+ Fast scale render */
+#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_SCALE            (0x00000003U)
+
+
+/*
+    Register RGX_CR_ISP_RENDER
+*/
+#define RGX_CR_ISP_RENDER                                 (0x0F08U)
+#define RGX_CR_ISP_RENDER_MASKFULL                        (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_ISP_RENDER_RESUME_SHIFT                    (4U)
+#define RGX_CR_ISP_RENDER_RESUME_CLRMSK                   (0XFFFFFFEFU)
+#define RGX_CR_ISP_RENDER_RESUME_EN                       (0X00000010U)
+#define RGX_CR_ISP_RENDER_DIR_SHIFT                       (2U)
+#define RGX_CR_ISP_RENDER_DIR_CLRMSK                      (0XFFFFFFF3U)
+#define RGX_CR_ISP_RENDER_DIR_TL2BR                       (00000000U)
+#define RGX_CR_ISP_RENDER_DIR_TR2BL                       (0X00000004U)
+#define RGX_CR_ISP_RENDER_DIR_BL2TR                       (0X00000008U)
+#define RGX_CR_ISP_RENDER_DIR_BR2TL                       (0X0000000CU)
+#define RGX_CR_ISP_RENDER_MODE_SHIFT                      (0U)
+#define RGX_CR_ISP_RENDER_MODE_CLRMSK                     (0XFFFFFFFCU)
+#define RGX_CR_ISP_RENDER_MODE_NORM                       (00000000U)
+#define RGX_CR_ISP_RENDER_MODE_FAST_2D                    (0X00000002U)
+#define RGX_CR_ISP_RENDER_MODE_FAST_SCALE                 (0X00000003U)
+
+
+/*
+    Register RGX_CR_ISP_CTL
+*/
+#define RGX_CR_ISP_CTL                                    (0x0F38U)
+#define RGX_CR_ISP_CTL_MASKFULL                           (IMG_UINT64_C(0x0000000007FFF3FF))
+#define RGX_CR_ISP_CTL_CREQ_BUF_EN_SHIFT                  (26U)
+#define RGX_CR_ISP_CTL_CREQ_BUF_EN_CLRMSK                 (0XFBFFFFFFU)
+#define RGX_CR_ISP_CTL_CREQ_BUF_EN_EN                     (0X04000000U)
+#define RGX_CR_ISP_CTL_TILE_AGE_EN_SHIFT                  (25U)
+#define RGX_CR_ISP_CTL_TILE_AGE_EN_CLRMSK                 (0XFDFFFFFFU)
+#define RGX_CR_ISP_CTL_TILE_AGE_EN_EN                     (0X02000000U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_SHIFT          (23U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_CLRMSK         (0XFE7FFFFFU)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX9            (00000000U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX10           (0X00800000U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_OGL            (0X01000000U)
+#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_SHIFT            (21U)
+#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_CLRMSK           (0XFF9FFFFFU)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_SHIFT                 (20U)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK                (0XFFEFFFFFU)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_EN                    (0X00100000U)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT           (19U)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK          (0XFFF7FFFFU)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN              (0X00080000U)
+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_SHIFT     (18U)
+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_CLRMSK    (0XFFFBFFFFU)
+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_EN        (0X00040000U)
+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_SHIFT          (17U)
+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_CLRMSK         (0XFFFDFFFFU)
+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_EN             (0X00020000U)
+#define RGX_CR_ISP_CTL_SAMPLE_POS_SHIFT                   (16U)
+#define RGX_CR_ISP_CTL_SAMPLE_POS_CLRMSK                  (0XFFFEFFFFU)
+#define RGX_CR_ISP_CTL_SAMPLE_POS_EN                      (0X00010000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_SHIFT                  (12U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_CLRMSK                 (0XFFFF0FFFU)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ONE               (00000000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWO               (0X00001000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THREE             (0X00002000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOUR              (0X00003000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIVE              (0X00004000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIX               (0X00005000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SEVEN             (0X00006000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_EIGHT             (0X00007000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_NINE              (0X00008000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TEN               (0X00009000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ELEVEN            (0X0000A000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWELVE            (0X0000B000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THIRTEEN          (0X0000C000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOURTEEN          (0X0000D000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIFTEEN           (0X0000E000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIXTEEN           (0X0000F000U)
+#define RGX_CR_ISP_CTL_VALID_ID_SHIFT                     (4U)
+#define RGX_CR_ISP_CTL_VALID_ID_CLRMSK                    (0XFFFFFC0FU)
+#define RGX_CR_ISP_CTL_UPASS_START_SHIFT                  (0U)
+#define RGX_CR_ISP_CTL_UPASS_START_CLRMSK                 (0XFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_ISP_STATUS
+*/
+#define RGX_CR_ISP_STATUS                                 (0x1038U)
+#define RGX_CR_ISP_STATUS_MASKFULL                        (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_ISP_STATUS_SPLIT_MAX_SHIFT                 (2U)
+#define RGX_CR_ISP_STATUS_SPLIT_MAX_CLRMSK                (0XFFFFFFFBU)
+#define RGX_CR_ISP_STATUS_SPLIT_MAX_EN                    (0X00000004U)
+#define RGX_CR_ISP_STATUS_ACTIVE_SHIFT                    (1U)
+#define RGX_CR_ISP_STATUS_ACTIVE_CLRMSK                   (0XFFFFFFFDU)
+#define RGX_CR_ISP_STATUS_ACTIVE_EN                       (0X00000002U)
+#define RGX_CR_ISP_STATUS_EOR_SHIFT                       (0U)
+#define RGX_CR_ISP_STATUS_EOR_CLRMSK                      (0XFFFFFFFEU)
+#define RGX_CR_ISP_STATUS_EOR_EN                          (0X00000001U)
+
+
+/*
+    Register group: RGX_CR_ISP_XTP_RESUME, with 64 repeats
+*/
+#define RGX_CR_ISP_XTP_RESUME_REPEATCOUNT                 (64)
+/*
+    Register RGX_CR_ISP_XTP_RESUME0
+*/
+#define RGX_CR_ISP_XTP_RESUME0                            (0x3A00U)
+#define RGX_CR_ISP_XTP_RESUME0_MASKFULL                   (IMG_UINT64_C(0x00000000003FF3FF))
+#define RGX_CR_ISP_XTP_RESUME0_TILE_X_SHIFT               (12U)
+#define RGX_CR_ISP_XTP_RESUME0_TILE_X_CLRMSK              (0XFFC00FFFU)
+#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_SHIFT               (0U)
+#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_CLRMSK              (0XFFFFFC00U)
+
+
+/*
+    Register group: RGX_CR_ISP_XTP_STORE, with 32 repeats
+*/
+#define RGX_CR_ISP_XTP_STORE_REPEATCOUNT                  (32)
+/*
+    Register RGX_CR_ISP_XTP_STORE0
+*/
+#define RGX_CR_ISP_XTP_STORE0                             (0x3C00U)
+#define RGX_CR_ISP_XTP_STORE0_MASKFULL                    (IMG_UINT64_C(0x000000007F3FF3FF))
+#define RGX_CR_ISP_XTP_STORE0_ACTIVE_SHIFT                (30U)
+#define RGX_CR_ISP_XTP_STORE0_ACTIVE_CLRMSK               (0XBFFFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_ACTIVE_EN                   (0X40000000U)
+#define RGX_CR_ISP_XTP_STORE0_EOR_SHIFT                   (29U)
+#define RGX_CR_ISP_XTP_STORE0_EOR_CLRMSK                  (0XDFFFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_EOR_EN                      (0X20000000U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_SHIFT             (28U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_CLRMSK            (0XEFFFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_EN                (0X10000000U)
+#define RGX_CR_ISP_XTP_STORE0_MT_SHIFT                    (24U)
+#define RGX_CR_ISP_XTP_STORE0_MT_CLRMSK                   (0XF0FFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_TILE_X_SHIFT                (12U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_X_CLRMSK               (0XFFC00FFFU)
+#define RGX_CR_ISP_XTP_STORE0_TILE_Y_SHIFT                (0U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_Y_CLRMSK               (0XFFFFFC00U)
+
+
+/*
+    Register group: RGX_CR_BIF_CAT_BASE, with 8 repeats
+*/
+#define RGX_CR_BIF_CAT_BASE_REPEATCOUNT                   (8)
+/*
+    Register RGX_CR_BIF_CAT_BASE0
+*/
+#define RGX_CR_BIF_CAT_BASE0                              (0x1200U)
+#define RGX_CR_BIF_CAT_BASE0_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK                  (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE1
+*/
+#define RGX_CR_BIF_CAT_BASE1                              (0x1208U)
+#define RGX_CR_BIF_CAT_BASE1_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE1_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE1_ADDR_CLRMSK                  (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE2
+*/
+#define RGX_CR_BIF_CAT_BASE2                              (0x1210U)
+#define RGX_CR_BIF_CAT_BASE2_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE2_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE2_ADDR_CLRMSK                  (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE3
+*/
+#define RGX_CR_BIF_CAT_BASE3                              (0x1218U)
+#define RGX_CR_BIF_CAT_BASE3_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE3_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE3_ADDR_CLRMSK                  (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE4
+*/
+#define RGX_CR_BIF_CAT_BASE4                              (0x1220U)
+#define RGX_CR_BIF_CAT_BASE4_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE4_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE4_ADDR_CLRMSK                  (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE5
+*/
+#define RGX_CR_BIF_CAT_BASE5                              (0x1228U)
+#define RGX_CR_BIF_CAT_BASE5_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE5_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE5_ADDR_CLRMSK                  (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE6
+*/
+#define RGX_CR_BIF_CAT_BASE6                              (0x1230U)
+#define RGX_CR_BIF_CAT_BASE6_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE6_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE6_ADDR_CLRMSK                  (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE7
+*/
+#define RGX_CR_BIF_CAT_BASE7                              (0x1238U)
+#define RGX_CR_BIF_CAT_BASE7_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE7_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE7_ADDR_CLRMSK                  (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE_INDEX
+*/
+#define RGX_CR_BIF_CAT_BASE_INDEX                         (0x1240U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_MASKFULL                (IMG_UINT64_C(0x0007070707070707))
+#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_SHIFT              (48U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_CLRMSK             (IMG_UINT64_C(0XFFF8FFFFFFFFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_SHIFT               (40U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_CLRMSK              (IMG_UINT64_C(0XFFFFF8FFFFFFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_SHIFT              (32U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_CLRMSK             (IMG_UINT64_C(0XFFFFFFF8FFFFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_SHIFT               (24U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFF8FFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_SHIFT               (16U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_SHIFT             (8U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_TA_SHIFT                (0U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_TA_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_VCE0
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0                       (0x1248U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_MASKFULL              (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_SHIFT       (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_CLRMSK      (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_SHIFT            (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_CLRMSK           (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_SHIFT            (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_EN               (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_SHIFT           (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_EN              (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_TE0
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_TE0                        (0x1250U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_MASKFULL               (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_SHIFT        (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_CLRMSK       (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_SHIFT             (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_CLRMSK            (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_SHIFT             (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_EN                (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_SHIFT            (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_EN               (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_ALIST0
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0                     (0x1260U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_MASKFULL            (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_SHIFT     (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_CLRMSK    (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_SHIFT          (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_CLRMSK         (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_SHIFT          (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_EN             (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_SHIFT         (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_EN            (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_VCE1
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1                       (0x1268U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_MASKFULL              (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_SHIFT       (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_CLRMSK      (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_SHIFT            (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_CLRMSK           (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_SHIFT            (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_EN               (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_SHIFT           (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_EN              (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_TE1
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_TE1                        (0x1270U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_MASKFULL               (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_SHIFT        (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_CLRMSK       (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_SHIFT             (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_CLRMSK            (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_SHIFT             (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_EN                (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_SHIFT            (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_EN               (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_ALIST1
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1                     (0x1280U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_MASKFULL            (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_SHIFT     (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_CLRMSK    (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_SHIFT          (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_CLRMSK         (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_SHIFT          (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_EN             (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_SHIFT         (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_EN            (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_MMU_ENTRY_STATUS
+*/
+#define RGX_CR_BIF_MMU_ENTRY_STATUS                       (0x1288U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFF0F3))
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_SHIFT         (12U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK        (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT        (4U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFF0F))
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT       (0U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+
+
+/*
+    Register RGX_CR_BIF_MMU_ENTRY
+*/
+#define RGX_CR_BIF_MMU_ENTRY                              (0x1290U)
+#define RGX_CR_BIF_MMU_ENTRY_MASKFULL                     (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_SHIFT                 (1U)
+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_CLRMSK                (0XFFFFFFFDU)
+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_EN                    (0X00000002U)
+#define RGX_CR_BIF_MMU_ENTRY_PENDING_SHIFT                (0U)
+#define RGX_CR_BIF_MMU_ENTRY_PENDING_CLRMSK               (0XFFFFFFFEU)
+#define RGX_CR_BIF_MMU_ENTRY_PENDING_EN                   (0X00000001U)
+
+
+/*
+    Register RGX_CR_BIF_CTRL_INVAL
+*/
+#define RGX_CR_BIF_CTRL_INVAL                             (0x12A0U)
+#define RGX_CR_BIF_CTRL_INVAL_MASKFULL                    (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_BIF_CTRL_INVAL_TLB1_SHIFT                  (3U)
+#define RGX_CR_BIF_CTRL_INVAL_TLB1_CLRMSK                 (0XFFFFFFF7U)
+#define RGX_CR_BIF_CTRL_INVAL_TLB1_EN                     (0X00000008U)
+#define RGX_CR_BIF_CTRL_INVAL_PC_SHIFT                    (2U)
+#define RGX_CR_BIF_CTRL_INVAL_PC_CLRMSK                   (0XFFFFFFFBU)
+#define RGX_CR_BIF_CTRL_INVAL_PC_EN                       (0X00000004U)
+#define RGX_CR_BIF_CTRL_INVAL_PD_SHIFT                    (1U)
+#define RGX_CR_BIF_CTRL_INVAL_PD_CLRMSK                   (0XFFFFFFFDU)
+#define RGX_CR_BIF_CTRL_INVAL_PD_EN                       (0X00000002U)
+#define RGX_CR_BIF_CTRL_INVAL_PT_SHIFT                    (0U)
+#define RGX_CR_BIF_CTRL_INVAL_PT_CLRMSK                   (0XFFFFFFFEU)
+#define RGX_CR_BIF_CTRL_INVAL_PT_EN                       (0X00000001U)
+
+
+/*
+    Register RGX_CR_BIF_CTRL
+*/
+#define RGX_CR_BIF_CTRL                                   (0x12A8U)
+#define RGX_CR_BIF_CTRL_MASKFULL                          (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_SHIFT     (7U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_CLRMSK    (0XFFFFFF7FU)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_EN        (0X00000080U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_SHIFT    (6U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_CLRMSK   (0XFFFFFFBFU)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_EN       (0X00000040U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_SHIFT              (5U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_CLRMSK             (0XFFFFFFDFU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_EN                 (0X00000020U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_SHIFT              (4U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_CLRMSK             (0XFFFFFFEFU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_EN                 (0X00000010U)
+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_SHIFT                  (3U)
+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_CLRMSK                 (0XFFFFFFF7U)
+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_EN                     (0X00000008U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_SHIFT                (2U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_CLRMSK               (0XFFFFFFFBU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_EN                   (0X00000004U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_SHIFT              (1U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_CLRMSK             (0XFFFFFFFDU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_EN                 (0X00000002U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_SHIFT              (0U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_CLRMSK             (0XFFFFFFFEU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_EN                 (0X00000001U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_BANK0_MMU_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS                 (0x12B0U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL        (IMG_UINT64_C(0x000000000000F775))
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT  (12U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0XFFFF0FFFU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0XFFFFF8FFU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0XFFFFFF9FU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT  (4U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN     (0X00000010U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0X00000004U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT     (0U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK    (0XFFFFFFFEU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN        (0X00000001U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_BANK0_REQ_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS                 (0x12B8U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL        (IMG_UINT64_C(0x0007FFFFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT       (50U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK      (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN          (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT    (44U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK   (IMG_UINT64_C(0XFFFC0FFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT    (40U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK   (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT   (4U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK  (IMG_UINT64_C(0XFFFFFF000000000F))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_BANK1_MMU_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS                 (0x12C0U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_MASKFULL        (IMG_UINT64_C(0x000000000000F775))
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_SHIFT  (12U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_CLRMSK (0XFFFF0FFFU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_CLRMSK (0XFFFFF8FFU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_CLRMSK (0XFFFFFF9FU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_SHIFT  (4U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_EN     (0X00000010U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_EN (0X00000004U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_SHIFT     (0U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_CLRMSK    (0XFFFFFFFEU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_EN        (0X00000001U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_BANK1_REQ_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS                 (0x12C8U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_MASKFULL        (IMG_UINT64_C(0x0007FFFFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_SHIFT       (50U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_CLRMSK      (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_EN          (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_SHIFT    (44U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_CLRMSK   (IMG_UINT64_C(0XFFFC0FFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_SHIFT    (40U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_CLRMSK   (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_SHIFT   (4U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_CLRMSK  (IMG_UINT64_C(0XFFFFFF000000000F))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+    Register RGX_CR_BIF_MMU_STATUS
+*/
+#define RGX_CR_BIF_MMU_STATUS                             (0x12D0U)
+#define RGX_CR_BIF_MMU_STATUS_MASKFULL                    (IMG_UINT64_C(0x000000001FFFFFF7))
+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_SHIFT              (28U)
+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_CLRMSK             (0XEFFFFFFFU)
+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_EN                 (0X10000000U)
+#define RGX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT               (20U)
+#define RGX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK              (0XF00FFFFFU)
+#define RGX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT               (12U)
+#define RGX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK              (0XFFF00FFFU)
+#define RGX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT               (4U)
+#define RGX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK              (0XFFFFF00FU)
+#define RGX_CR_BIF_MMU_STATUS_STALLED_SHIFT               (2U)
+#define RGX_CR_BIF_MMU_STATUS_STALLED_CLRMSK              (0XFFFFFFFBU)
+#define RGX_CR_BIF_MMU_STATUS_STALLED_EN                  (0X00000004U)
+#define RGX_CR_BIF_MMU_STATUS_PAUSED_SHIFT                (1U)
+#define RGX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK               (0XFFFFFFFDU)
+#define RGX_CR_BIF_MMU_STATUS_PAUSED_EN                   (0X00000002U)
+#define RGX_CR_BIF_MMU_STATUS_BUSY_SHIFT                  (0U)
+#define RGX_CR_BIF_MMU_STATUS_BUSY_CLRMSK                 (0XFFFFFFFEU)
+#define RGX_CR_BIF_MMU_STATUS_BUSY_EN                     (0X00000001U)
+
+
+/*
+    Register RGX_CR_BIF_READS_EXT_STATUS
+*/
+#define RGX_CR_BIF_READS_EXT_STATUS                       (0x1320U)
+#define RGX_CR_BIF_READS_EXT_STATUS_MASKFULL              (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_BIF_READS_EXT_STATUS_MMU_SHIFT             (16U)
+#define RGX_CR_BIF_READS_EXT_STATUS_MMU_CLRMSK            (0XFF80FFFFU)
+#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_SHIFT           (0U)
+#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_CLRMSK          (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIF_READS_INT_STATUS
+*/
+#define RGX_CR_BIF_READS_INT_STATUS                       (0x1328U)
+#define RGX_CR_BIF_READS_INT_STATUS_MASKFULL              (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_BIF_READS_INT_STATUS_MMU_SHIFT             (16U)
+#define RGX_CR_BIF_READS_INT_STATUS_MMU_CLRMSK            (0XFF80FFFFU)
+#define RGX_CR_BIF_READS_INT_STATUS_BANK1_SHIFT           (0U)
+#define RGX_CR_BIF_READS_INT_STATUS_BANK1_CLRMSK          (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIFPM_READS_INT_STATUS
+*/
+#define RGX_CR_BIFPM_READS_INT_STATUS                     (0x1330U)
+#define RGX_CR_BIFPM_READS_INT_STATUS_MASKFULL            (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_SHIFT         (0U)
+#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_CLRMSK        (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIFPM_READS_EXT_STATUS
+*/
+#define RGX_CR_BIFPM_READS_EXT_STATUS                     (0x1338U)
+#define RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL            (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_SHIFT         (0U)
+#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_CLRMSK        (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIFPM_STATUS_MMU
+*/
+#define RGX_CR_BIFPM_STATUS_MMU                           (0x1350U)
+#define RGX_CR_BIFPM_STATUS_MMU_MASKFULL                  (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_SHIFT            (0U)
+#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_CLRMSK           (0XFFFFFF00U)
+
+
+/*
+    Register RGX_CR_BIF_STATUS_MMU
+*/
+#define RGX_CR_BIF_STATUS_MMU                             (0x1358U)
+#define RGX_CR_BIF_STATUS_MMU_MASKFULL                    (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_BIF_STATUS_MMU_REQUESTS_SHIFT              (0U)
+#define RGX_CR_BIF_STATUS_MMU_REQUESTS_CLRMSK             (0XFFFFFF00U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_READ
+*/
+#define RGX_CR_BIF_FAULT_READ                             (0x13E0U)
+#define RGX_CR_BIF_FAULT_READ_MASKFULL                    (IMG_UINT64_C(0x000000FFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_SHIFT               (4U)
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_CLRMSK              (IMG_UINT64_C(0XFFFFFF000000000F))
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSHIFT          (4U)
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSIZE           (16U)
+
+
+/*
+    Register RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS
+*/
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS           (0x1430U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL  (IMG_UINT64_C(0x000000000000F775))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT (12U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0XFFFF0FFFU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0XFFFFF8FFU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0XFFFFFF9FU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT (4U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN (0X00000010U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0X00000004U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT (0U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN  (0X00000001U)
+
+
+/*
+    Register RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS
+*/
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS           (0x1438U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL  (IMG_UINT64_C(0x0007FFFFFFFFFFF0))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT (50U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN    (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT (44U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0XFFFC0FFFFFFFFFFF))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT (40U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT (4U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0XFFFFFF000000000F))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+    Register RGX_CR_MCU_FENCE
+*/
+#define RGX_CR_MCU_FENCE                                  (0x1740U)
+#define RGX_CR_MCU_FENCE_MASKFULL                         (IMG_UINT64_C(0x000007FFFFFFFFE0))
+#define RGX_CR_MCU_FENCE_DM_SHIFT                         (40U)
+#define RGX_CR_MCU_FENCE_DM_CLRMSK                        (IMG_UINT64_C(0XFFFFF8FFFFFFFFFF))
+#define RGX_CR_MCU_FENCE_DM_VERTEX                        (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_MCU_FENCE_DM_PIXEL                         (IMG_UINT64_C(0x0000010000000000))  
+#define RGX_CR_MCU_FENCE_DM_COMPUTE                       (IMG_UINT64_C(0x0000020000000000))  
+#define RGX_CR_MCU_FENCE_DM_RAY_VERTEX                    (IMG_UINT64_C(0x0000030000000000))  
+#define RGX_CR_MCU_FENCE_DM_RAY                           (IMG_UINT64_C(0x0000040000000000))  
+#define RGX_CR_MCU_FENCE_ADDR_SHIFT                       (5U)
+#define RGX_CR_MCU_FENCE_ADDR_CLRMSK                      (IMG_UINT64_C(0XFFFFFF000000001F))
+#define RGX_CR_MCU_FENCE_ADDR_ALIGNSHIFT                  (5U)
+#define RGX_CR_MCU_FENCE_ADDR_ALIGNSIZE                   (32U)
+
+
+/*
+    Register RGX_CR_SPFILTER_SIGNAL_DESCR
+*/
+#define RGX_CR_SPFILTER_SIGNAL_DESCR                      (0x2700U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MASKFULL             (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_SHIFT           (0U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_CLRMSK          (0XFFFF0000U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSHIFT      (4U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSIZE       (16U)
+
+
+/*
+    Register RGX_CR_SPFILTER_SIGNAL_DESCR_MIN
+*/
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN                  (0x2708U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_MASKFULL         (IMG_UINT64_C(0x000000FFFFFFFFF0))
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_SHIFT       (4U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF000000000F))
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSHIFT  (4U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSIZE   (16U)
+
+
+/*
+    Register RGX_CR_SLC_CTRL_MISC
+*/
+#define RGX_CR_SLC_CTRL_MISC                              (0x3800U)
+#define RGX_CR_SLC_CTRL_MISC_MASKFULL                     (IMG_UINT64_C(0xFFFFFFFF00FF0107))
+#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_SHIFT          (32U)
+#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_CLRMSK         (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SHIFT       (16U)
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFF00FFFF))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_64_BYTE (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_128_BYTE (IMG_UINT64_C(0x0000000000010000))  
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH1 (IMG_UINT64_C(0x0000000000100000))  
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH2 (IMG_UINT64_C(0x0000000000110000))  
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1   (IMG_UINT64_C(0x0000000000200000))  
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH2_SCRAMBLE (IMG_UINT64_C(0x0000000000210000))  
+#define RGX_CR_SLC_CTRL_MISC_PAUSE_SHIFT                  (8U)
+#define RGX_CR_SLC_CTRL_MISC_PAUSE_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_SLC_CTRL_MISC_PAUSE_EN                     (IMG_UINT64_C(0X0000000000000100))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_SHIFT  (2U)
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_EN     (IMG_UINT64_C(0X0000000000000004))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_SHIFT (1U)
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN   (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_SHIFT  (0U)
+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN     (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_SLC_CTRL_FLUSH_INVAL
+*/
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL                       (0x3818U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_MASKFULL              (IMG_UINT64_C(0x00000000800007FF))
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_SHIFT            (31U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_CLRMSK           (0X7FFFFFFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_EN               (0X80000000U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_SHIFT   (10U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_CLRMSK  (0XFFFFFBFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_EN      (0X00000400U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_SHIFT          (9U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_CLRMSK         (0XFFFFFDFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_EN             (0X00000200U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_SHIFT          (8U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_CLRMSK         (0XFFFFFEFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_EN             (0X00000100U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_SHIFT          (7U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_CLRMSK         (0XFFFFFF7FU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_EN             (0X00000080U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_SHIFT          (6U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_CLRMSK         (0XFFFFFFBFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_EN             (0X00000040U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_SHIFT    (5U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_CLRMSK   (0XFFFFFFDFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_EN       (0X00000020U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_SHIFT          (4U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_CLRMSK         (0XFFFFFFEFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_EN             (0X00000010U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_SHIFT      (3U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_CLRMSK     (0XFFFFFFF7U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_EN         (0X00000008U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_SHIFT        (2U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_CLRMSK       (0XFFFFFFFBU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_EN           (0X00000004U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_SHIFT           (1U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_CLRMSK          (0XFFFFFFFDU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_EN              (0X00000002U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_SHIFT             (0U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_CLRMSK            (0XFFFFFFFEU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN                (0X00000001U)
+
+
+/*
+    Register RGX_CR_SLC_STATUS0
+*/
+#define RGX_CR_SLC_STATUS0                                (0x3820U)
+#define RGX_CR_SLC_STATUS0_MASKFULL                       (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_SHIFT      (2U)
+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_CLRMSK     (0XFFFFFFFBU)
+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_EN         (0X00000004U)
+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_SHIFT            (1U)
+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_CLRMSK           (0XFFFFFFFDU)
+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_EN               (0X00000002U)
+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_SHIFT            (0U)
+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_CLRMSK           (0XFFFFFFFEU)
+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_EN               (0X00000001U)
+
+
+/*
+    Register RGX_CR_SLC_CTRL_BYPASS
+*/
+#define RGX_CR_SLC_CTRL_BYPASS                            (0x3828U)
+#define RGX_CR_SLC_CTRL_BYPASS_MASKFULL                   (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_SHIFT        (27U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_CLRMSK       (0XF7FFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_EN           (0X08000000U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_SHIFT               (26U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_CLRMSK              (0XFBFFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_EN                  (0X04000000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_SHIFT          (25U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_CLRMSK         (0XFDFFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN             (0X02000000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_SHIFT              (24U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_CLRMSK             (0XFEFFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_EN                 (0X01000000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_SHIFT             (23U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_CLRMSK            (0XFF7FFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_EN                (0X00800000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_SHIFT              (22U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_CLRMSK             (0XFFBFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_EN                 (0X00400000U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_SHIFT             (21U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_CLRMSK            (0XFFDFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_EN                (0X00200000U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_SHIFT               (20U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_CLRMSK              (0XFFEFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_EN                  (0X00100000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_SHIFT              (19U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_CLRMSK             (0XFFF7FFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_EN                 (0X00080000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_SHIFT              (18U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_CLRMSK             (0XFFFBFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_EN                 (0X00040000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_SHIFT              (17U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_CLRMSK             (0XFFFDFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_EN                 (0X00020000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_SHIFT           (16U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_CLRMSK          (0XFFFEFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_EN              (0X00010000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_SHIFT          (15U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_CLRMSK         (0XFFFF7FFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN             (0X00008000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_SHIFT              (14U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_CLRMSK             (0XFFFFBFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_EN                 (0X00004000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_SHIFT             (13U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_CLRMSK            (0XFFFFDFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_EN                (0X00002000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_SHIFT             (12U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_CLRMSK            (0XFFFFEFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_EN                (0X00001000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_SHIFT           (11U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_CLRMSK          (0XFFFFF7FFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_EN              (0X00000800U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_SHIFT           (10U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_CLRMSK          (0XFFFFFBFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_EN              (0X00000400U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_SHIFT           (9U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_CLRMSK          (0XFFFFFDFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_EN              (0X00000200U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_SHIFT               (8U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_CLRMSK              (0XFFFFFEFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_EN                  (0X00000100U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_SHIFT               (7U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_CLRMSK              (0XFFFFFF7FU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_EN                  (0X00000080U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_SHIFT               (6U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_CLRMSK              (0XFFFFFFBFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_EN                  (0X00000040U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_SHIFT         (5U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_CLRMSK        (0XFFFFFFDFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_EN            (0X00000020U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_SHIFT               (4U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_CLRMSK              (0XFFFFFFEFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_EN                  (0X00000010U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_SHIFT           (3U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_CLRMSK          (0XFFFFFFF7U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_EN              (0X00000008U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_SHIFT             (2U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_CLRMSK            (0XFFFFFFFBU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_EN                (0X00000004U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_SHIFT                (1U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_CLRMSK               (0XFFFFFFFDU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_EN                   (0X00000002U)
+#define RGX_CR_SLC_CTRL_BYPASS_ALL_SHIFT                  (0U)
+#define RGX_CR_SLC_CTRL_BYPASS_ALL_CLRMSK                 (0XFFFFFFFEU)
+#define RGX_CR_SLC_CTRL_BYPASS_ALL_EN                     (0X00000001U)
+
+
+/*
+    Register RGX_CR_SLC_STATUS1
+*/
+#define RGX_CR_SLC_STATUS1                                (0x3870U)
+#define RGX_CR_SLC_STATUS1_MASKFULL                       (IMG_UINT64_C(0x800003FF03FFFFFF))
+#define RGX_CR_SLC_STATUS1_PAUSED_SHIFT                   (63U)
+#define RGX_CR_SLC_STATUS1_PAUSED_CLRMSK                  (IMG_UINT64_C(0X7FFFFFFFFFFFFFFF))
+#define RGX_CR_SLC_STATUS1_PAUSED_EN                      (IMG_UINT64_C(0X8000000000000000))
+#define RGX_CR_SLC_STATUS1_READS1_SHIFT                   (32U)
+#define RGX_CR_SLC_STATUS1_READS1_CLRMSK                  (IMG_UINT64_C(0XFFFFFC00FFFFFFFF))
+#define RGX_CR_SLC_STATUS1_READS0_SHIFT                   (16U)
+#define RGX_CR_SLC_STATUS1_READS0_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFFFC00FFFF))
+#define RGX_CR_SLC_STATUS1_READS1_EXT_SHIFT               (8U)
+#define RGX_CR_SLC_STATUS1_READS1_EXT_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define RGX_CR_SLC_STATUS1_READS0_EXT_SHIFT               (0U)
+#define RGX_CR_SLC_STATUS1_READS0_EXT_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+/*
+    Register RGX_CR_SLC_IDLE
+*/
+#define RGX_CR_SLC_IDLE                                   (0x3898U)
+#define RGX_CR_SLC_IDLE_MASKFULL                          (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_SLC_IDLE_IMGBV4_SHIFT                      (7U)
+#define RGX_CR_SLC_IDLE_IMGBV4_CLRMSK                     (0XFFFFFF7FU)
+#define RGX_CR_SLC_IDLE_IMGBV4_EN                         (0X00000080U)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_SHIFT                 (6U)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_CLRMSK                (0XFFFFFFBFU)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_EN                    (0X00000040U)
+#define RGX_CR_SLC_IDLE_RBOFIFO_SHIFT                     (5U)
+#define RGX_CR_SLC_IDLE_RBOFIFO_CLRMSK                    (0XFFFFFFDFU)
+#define RGX_CR_SLC_IDLE_RBOFIFO_EN                        (0X00000020U)
+#define RGX_CR_SLC_IDLE_FRC_CONV_SHIFT                    (4U)
+#define RGX_CR_SLC_IDLE_FRC_CONV_CLRMSK                   (0XFFFFFFEFU)
+#define RGX_CR_SLC_IDLE_FRC_CONV_EN                       (0X00000010U)
+#define RGX_CR_SLC_IDLE_VXE_CONV_SHIFT                    (3U)
+#define RGX_CR_SLC_IDLE_VXE_CONV_CLRMSK                   (0XFFFFFFF7U)
+#define RGX_CR_SLC_IDLE_VXE_CONV_EN                       (0X00000008U)
+#define RGX_CR_SLC_IDLE_VXD_CONV_SHIFT                    (2U)
+#define RGX_CR_SLC_IDLE_VXD_CONV_CLRMSK                   (0XFFFFFFFBU)
+#define RGX_CR_SLC_IDLE_VXD_CONV_EN                       (0X00000004U)
+#define RGX_CR_SLC_IDLE_BIF1_CONV_SHIFT                   (1U)
+#define RGX_CR_SLC_IDLE_BIF1_CONV_CLRMSK                  (0XFFFFFFFDU)
+#define RGX_CR_SLC_IDLE_BIF1_CONV_EN                      (0X00000002U)
+#define RGX_CR_SLC_IDLE_CBAR_SHIFT                        (0U)
+#define RGX_CR_SLC_IDLE_CBAR_CLRMSK                       (0XFFFFFFFEU)
+#define RGX_CR_SLC_IDLE_CBAR_EN                           (0X00000001U)
+
+
+/*
+    Register RGX_CR_SLC_STATUS2
+*/
+#define RGX_CR_SLC_STATUS2                                (0x3908U)
+#define RGX_CR_SLC_STATUS2_MASKFULL                       (IMG_UINT64_C(0x000003FF03FFFFFF))
+#define RGX_CR_SLC_STATUS2_READS3_SHIFT                   (32U)
+#define RGX_CR_SLC_STATUS2_READS3_CLRMSK                  (IMG_UINT64_C(0XFFFFFC00FFFFFFFF))
+#define RGX_CR_SLC_STATUS2_READS2_SHIFT                   (16U)
+#define RGX_CR_SLC_STATUS2_READS2_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFFFC00FFFF))
+#define RGX_CR_SLC_STATUS2_READS3_EXT_SHIFT               (8U)
+#define RGX_CR_SLC_STATUS2_READS3_EXT_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define RGX_CR_SLC_STATUS2_READS2_EXT_SHIFT               (0U)
+#define RGX_CR_SLC_STATUS2_READS2_EXT_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+/*
+    Register RGX_CR_SLC_CTRL_MISC2
+*/
+#define RGX_CR_SLC_CTRL_MISC2                             (0x3930U)
+#define RGX_CR_SLC_CTRL_MISC2_MASKFULL                    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_SHIFT         (0U)
+#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_CLRMSK        (00000000U)
+
+
+/*
+    Register RGX_CR_SLC_CROSSBAR_LOAD_BALANCE
+*/
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE                  (0x3938U)
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_MASKFULL         (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_SHIFT     (0U)
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_CLRMSK    (0XFFFFFFFEU)
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_EN        (0X00000001U)
+
+
+/*
+    Register RGX_CR_USC_UVS0_CHECKSUM
+*/
+#define RGX_CR_USC_UVS0_CHECKSUM                          (0x5000U)
+#define RGX_CR_USC_UVS0_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_CLRMSK             (00000000U)
+
+
+/*
+    Register RGX_CR_USC_UVS1_CHECKSUM
+*/
+#define RGX_CR_USC_UVS1_CHECKSUM                          (0x5008U)
+#define RGX_CR_USC_UVS1_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_CLRMSK             (00000000U)
+
+
+/*
+    Register RGX_CR_USC_UVS2_CHECKSUM
+*/
+#define RGX_CR_USC_UVS2_CHECKSUM                          (0x5010U)
+#define RGX_CR_USC_UVS2_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_CLRMSK             (00000000U)
+
+
+/*
+    Register RGX_CR_USC_UVS3_CHECKSUM
+*/
+#define RGX_CR_USC_UVS3_CHECKSUM                          (0x5018U)
+#define RGX_CR_USC_UVS3_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_CLRMSK             (00000000U)
+
+
+/*
+    Register RGX_CR_PPP_SIGNATURE
+*/
+#define RGX_CR_PPP_SIGNATURE                              (0x5020U)
+#define RGX_CR_PPP_SIGNATURE_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PPP_SIGNATURE_VALUE_SHIFT                  (0U)
+#define RGX_CR_PPP_SIGNATURE_VALUE_CLRMSK                 (00000000U)
+
+
+/*
+    Register RGX_CR_TE_SIGNATURE
+*/
+#define RGX_CR_TE_SIGNATURE                               (0x5028U)
+#define RGX_CR_TE_SIGNATURE_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TE_SIGNATURE_VALUE_SHIFT                   (0U)
+#define RGX_CR_TE_SIGNATURE_VALUE_CLRMSK                  (00000000U)
+
+
+/*
+    Register RGX_CR_TE_CHECKSUM
+*/
+#define RGX_CR_TE_CHECKSUM                                (0x5110U)
+#define RGX_CR_TE_CHECKSUM_MASKFULL                       (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TE_CHECKSUM_VALUE_SHIFT                    (0U)
+#define RGX_CR_TE_CHECKSUM_VALUE_CLRMSK                   (00000000U)
+
+
+/*
+    Register RGX_CR_USC_UVB_CHECKSUM
+*/
+#define RGX_CR_USC_UVB_CHECKSUM                           (0x5118U)
+#define RGX_CR_USC_UVB_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVB_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_USC_UVB_CHECKSUM_VALUE_CLRMSK              (00000000U)
+
+
+/*
+    Register RGX_CR_VCE_CHECKSUM
+*/
+#define RGX_CR_VCE_CHECKSUM                               (0x5030U)
+#define RGX_CR_VCE_CHECKSUM_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VCE_CHECKSUM_VALUE_SHIFT                   (0U)
+#define RGX_CR_VCE_CHECKSUM_VALUE_CLRMSK                  (00000000U)
+
+
+/*
+    Register RGX_CR_ISP_PDS_CHECKSUM
+*/
+#define RGX_CR_ISP_PDS_CHECKSUM                           (0x5038U)
+#define RGX_CR_ISP_PDS_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_CLRMSK              (00000000U)
+
+
+/*
+    Register RGX_CR_ISP_TPF_CHECKSUM
+*/
+#define RGX_CR_ISP_TPF_CHECKSUM                           (0x5040U)
+#define RGX_CR_ISP_TPF_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_CLRMSK              (00000000U)
+
+
+/*
+    Register RGX_CR_TFPU_PLANE0_CHECKSUM
+*/
+#define RGX_CR_TFPU_PLANE0_CHECKSUM                       (0x5048U)
+#define RGX_CR_TFPU_PLANE0_CHECKSUM_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_SHIFT           (0U)
+#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_CLRMSK          (00000000U)
+
+
+/*
+    Register RGX_CR_TFPU_PLANE1_CHECKSUM
+*/
+#define RGX_CR_TFPU_PLANE1_CHECKSUM                       (0x5050U)
+#define RGX_CR_TFPU_PLANE1_CHECKSUM_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_SHIFT           (0U)
+#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_CLRMSK          (00000000U)
+
+
+/*
+    Register RGX_CR_PBE_CHECKSUM
+*/
+#define RGX_CR_PBE_CHECKSUM                               (0x5058U)
+#define RGX_CR_PBE_CHECKSUM_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PBE_CHECKSUM_VALUE_SHIFT                   (0U)
+#define RGX_CR_PBE_CHECKSUM_VALUE_CLRMSK                  (00000000U)
+
+
+/*
+    Register RGX_CR_PDS_DOUTM_STM_SIGNATURE
+*/
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE                    (0x5060U)
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_SHIFT        (0U)
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_CLRMSK       (00000000U)
+
+
+/*
+    Register RGX_CR_IFPU_ISP_CHECKSUM
+*/
+#define RGX_CR_IFPU_ISP_CHECKSUM                          (0x5068U)
+#define RGX_CR_IFPU_ISP_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_CLRMSK             (00000000U)
+
+
+/*
+    Register RGX_CR_USC_UVS4_CHECKSUM
+*/
+#define RGX_CR_USC_UVS4_CHECKSUM                          (0x5100U)
+#define RGX_CR_USC_UVS4_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_CLRMSK             (00000000U)
+
+
+/*
+    Register RGX_CR_USC_UVS5_CHECKSUM
+*/
+#define RGX_CR_USC_UVS5_CHECKSUM                          (0x5108U)
+#define RGX_CR_USC_UVS5_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_CLRMSK             (00000000U)
+
+
+/*
+    Register RGX_CR_PPP_CLIP_CHECKSUM
+*/
+#define RGX_CR_PPP_CLIP_CHECKSUM                          (0x5120U)
+#define RGX_CR_PPP_CLIP_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_CLRMSK             (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_TA_PHASE
+*/
+#define RGX_CR_PERF_TA_PHASE                              (0x6008U)
+#define RGX_CR_PERF_TA_PHASE_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_TA_PHASE_COUNT_SHIFT                  (0U)
+#define RGX_CR_PERF_TA_PHASE_COUNT_CLRMSK                 (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_3D_PHASE
+*/
+#define RGX_CR_PERF_3D_PHASE                              (0x6010U)
+#define RGX_CR_PERF_3D_PHASE_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_3D_PHASE_COUNT_SHIFT                  (0U)
+#define RGX_CR_PERF_3D_PHASE_COUNT_CLRMSK                 (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_COMPUTE_PHASE
+*/
+#define RGX_CR_PERF_COMPUTE_PHASE                         (0x6018U)
+#define RGX_CR_PERF_COMPUTE_PHASE_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_SHIFT             (0U)
+#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_CLRMSK            (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_TA_CYCLE
+*/
+#define RGX_CR_PERF_TA_CYCLE                              (0x6020U)
+#define RGX_CR_PERF_TA_CYCLE_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_TA_CYCLE_COUNT_SHIFT                  (0U)
+#define RGX_CR_PERF_TA_CYCLE_COUNT_CLRMSK                 (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_3D_CYCLE
+*/
+#define RGX_CR_PERF_3D_CYCLE                              (0x6028U)
+#define RGX_CR_PERF_3D_CYCLE_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_3D_CYCLE_COUNT_SHIFT                  (0U)
+#define RGX_CR_PERF_3D_CYCLE_COUNT_CLRMSK                 (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_COMPUTE_CYCLE
+*/
+#define RGX_CR_PERF_COMPUTE_CYCLE                         (0x6030U)
+#define RGX_CR_PERF_COMPUTE_CYCLE_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_SHIFT             (0U)
+#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_CLRMSK            (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_TA_OR_3D_CYCLE
+*/
+#define RGX_CR_PERF_TA_OR_3D_CYCLE                        (0x6038U)
+#define RGX_CR_PERF_TA_OR_3D_CYCLE_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_SHIFT            (0U)
+#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_CLRMSK           (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_INITIAL_TA_CYCLE
+*/
+#define RGX_CR_PERF_INITIAL_TA_CYCLE                      (0x6040U)
+#define RGX_CR_PERF_INITIAL_TA_CYCLE_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_CLRMSK         (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC0_READ_STALL
+*/
+#define RGX_CR_PERF_SLC0_READ_STALL                       (0x60B8U)
+#define RGX_CR_PERF_SLC0_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT           (0U)
+#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK          (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC0_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC0_WRITE_STALL                      (0x60C0U)
+#define RGX_CR_PERF_SLC0_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK         (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC1_READ_STALL
+*/
+#define RGX_CR_PERF_SLC1_READ_STALL                       (0x60E0U)
+#define RGX_CR_PERF_SLC1_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT           (0U)
+#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK          (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC1_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC1_WRITE_STALL                      (0x60E8U)
+#define RGX_CR_PERF_SLC1_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK         (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC2_READ_STALL
+*/
+#define RGX_CR_PERF_SLC2_READ_STALL                       (0x6158U)
+#define RGX_CR_PERF_SLC2_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT           (0U)
+#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK          (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC2_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC2_WRITE_STALL                      (0x6160U)
+#define RGX_CR_PERF_SLC2_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK         (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC3_READ_STALL
+*/
+#define RGX_CR_PERF_SLC3_READ_STALL                       (0x6180U)
+#define RGX_CR_PERF_SLC3_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT           (0U)
+#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK          (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC3_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC3_WRITE_STALL                      (0x6188U)
+#define RGX_CR_PERF_SLC3_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK         (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_3D_SPINUP
+*/
+#define RGX_CR_PERF_3D_SPINUP                             (0x6220U)
+#define RGX_CR_PERF_3D_SPINUP_MASKFULL                    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_3D_SPINUP_CYCLES_SHIFT                (0U)
+#define RGX_CR_PERF_3D_SPINUP_CYCLES_CLRMSK               (00000000U)
+
+
+/*
+    Register RGX_CR_AXI_ACE_LITE_CONFIGURATION
+*/
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION                 (0x38C0U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_MASKFULL        (IMG_UINT64_C(0x00001FFFFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT (37U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_CLRMSK (IMG_UINT64_C(0XFFFFE01FFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT (36U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_CLRMSK (IMG_UINT64_C(0XFFFFFFEFFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_EN (IMG_UINT64_C(0X0000001000000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_SHIFT (35U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_CLRMSK (IMG_UINT64_C(0XFFFFFFF7FFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_EN (IMG_UINT64_C(0X0000000800000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_SHIFT (34U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_CLRMSK (IMG_UINT64_C(0XFFFFFFFBFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_EN (IMG_UINT64_C(0X0000000400000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT (30U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC3FFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT (26U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFC3FFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT (22U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFC3FFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_SHIFT (20U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFCFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_SHIFT (18U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF3FFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT (16U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT (14U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF3FFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT (12U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFCFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT (10U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT (8U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_SHIFT (4U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF0F))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_SHIFT (0U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register RGX_CR_POWER_ESTIMATE_RESULT
+*/
+#define RGX_CR_POWER_ESTIMATE_RESULT                      (0x6328U)
+#define RGX_CR_POWER_ESTIMATE_RESULT_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_SHIFT          (0U)
+#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_CLRMSK         (00000000U)
+
+
+/*
+    Register RGX_CR_TA_PERF
+*/
+#define RGX_CR_TA_PERF                                    (0x7600U)
+#define RGX_CR_TA_PERF_MASKFULL                           (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_TA_PERF_CLR_3_SHIFT                        (4U)
+#define RGX_CR_TA_PERF_CLR_3_CLRMSK                       (0XFFFFFFEFU)
+#define RGX_CR_TA_PERF_CLR_3_EN                           (0X00000010U)
+#define RGX_CR_TA_PERF_CLR_2_SHIFT                        (3U)
+#define RGX_CR_TA_PERF_CLR_2_CLRMSK                       (0XFFFFFFF7U)
+#define RGX_CR_TA_PERF_CLR_2_EN                           (0X00000008U)
+#define RGX_CR_TA_PERF_CLR_1_SHIFT                        (2U)
+#define RGX_CR_TA_PERF_CLR_1_CLRMSK                       (0XFFFFFFFBU)
+#define RGX_CR_TA_PERF_CLR_1_EN                           (0X00000004U)
+#define RGX_CR_TA_PERF_CLR_0_SHIFT                        (1U)
+#define RGX_CR_TA_PERF_CLR_0_CLRMSK                       (0XFFFFFFFDU)
+#define RGX_CR_TA_PERF_CLR_0_EN                           (0X00000002U)
+#define RGX_CR_TA_PERF_CTRL_ENABLE_SHIFT                  (0U)
+#define RGX_CR_TA_PERF_CTRL_ENABLE_CLRMSK                 (0XFFFFFFFEU)
+#define RGX_CR_TA_PERF_CTRL_ENABLE_EN                     (0X00000001U)
+
+
+/*
+    Register RGX_CR_TA_PERF_SELECT0
+*/
+#define RGX_CR_TA_PERF_SELECT0                            (0x7608U)
+#define RGX_CR_TA_PERF_SELECT0_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_SHIFT            (48U)
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_SHIFT            (32U)
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT0_MODE_SHIFT                 (21U)
+#define RGX_CR_TA_PERF_SELECT0_MODE_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT0_MODE_EN                    (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_SHIFT         (16U)
+#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_SHIFT           (0U)
+#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TA_PERF_SELECT1
+*/
+#define RGX_CR_TA_PERF_SELECT1                            (0x7610U)
+#define RGX_CR_TA_PERF_SELECT1_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_SHIFT            (48U)
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_SHIFT            (32U)
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT1_MODE_SHIFT                 (21U)
+#define RGX_CR_TA_PERF_SELECT1_MODE_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT1_MODE_EN                    (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_SHIFT         (16U)
+#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_SHIFT           (0U)
+#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TA_PERF_SELECT2
+*/
+#define RGX_CR_TA_PERF_SELECT2                            (0x7618U)
+#define RGX_CR_TA_PERF_SELECT2_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_SHIFT            (48U)
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_SHIFT            (32U)
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT2_MODE_SHIFT                 (21U)
+#define RGX_CR_TA_PERF_SELECT2_MODE_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT2_MODE_EN                    (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_SHIFT         (16U)
+#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_SHIFT           (0U)
+#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TA_PERF_SELECT3
+*/
+#define RGX_CR_TA_PERF_SELECT3                            (0x7620U)
+#define RGX_CR_TA_PERF_SELECT3_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_SHIFT            (48U)
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_SHIFT            (32U)
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT3_MODE_SHIFT                 (21U)
+#define RGX_CR_TA_PERF_SELECT3_MODE_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT3_MODE_EN                    (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_SHIFT         (16U)
+#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_SHIFT           (0U)
+#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TA_PERF_SELECTED_BITS
+*/
+#define RGX_CR_TA_PERF_SELECTED_BITS                      (0x7648U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_MASKFULL             (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_SHIFT           (48U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_CLRMSK          (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_SHIFT           (32U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_CLRMSK          (IMG_UINT64_C(0XFFFF0000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_SHIFT           (16U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_CLRMSK          (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_SHIFT           (0U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TA_PERF_COUNTER_0
+*/
+#define RGX_CR_TA_PERF_COUNTER_0                          (0x7650U)
+#define RGX_CR_TA_PERF_COUNTER_0_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_0_REG_SHIFT                (0U)
+#define RGX_CR_TA_PERF_COUNTER_0_REG_CLRMSK               (00000000U)
+
+
+/*
+    Register RGX_CR_TA_PERF_COUNTER_1
+*/
+#define RGX_CR_TA_PERF_COUNTER_1                          (0x7658U)
+#define RGX_CR_TA_PERF_COUNTER_1_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_1_REG_SHIFT                (0U)
+#define RGX_CR_TA_PERF_COUNTER_1_REG_CLRMSK               (00000000U)
+
+
+/*
+    Register RGX_CR_TA_PERF_COUNTER_2
+*/
+#define RGX_CR_TA_PERF_COUNTER_2                          (0x7660U)
+#define RGX_CR_TA_PERF_COUNTER_2_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_2_REG_SHIFT                (0U)
+#define RGX_CR_TA_PERF_COUNTER_2_REG_CLRMSK               (00000000U)
+
+
+/*
+    Register RGX_CR_TA_PERF_COUNTER_3
+*/
+#define RGX_CR_TA_PERF_COUNTER_3                          (0x7668U)
+#define RGX_CR_TA_PERF_COUNTER_3_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_3_REG_SHIFT                (0U)
+#define RGX_CR_TA_PERF_COUNTER_3_REG_CLRMSK               (00000000U)
+
+
+/*
+    Register RGX_CR_RASTERISATION_PERF
+*/
+#define RGX_CR_RASTERISATION_PERF                         (0x7700U)
+#define RGX_CR_RASTERISATION_PERF_MASKFULL                (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_RASTERISATION_PERF_CLR_3_SHIFT             (4U)
+#define RGX_CR_RASTERISATION_PERF_CLR_3_CLRMSK            (0XFFFFFFEFU)
+#define RGX_CR_RASTERISATION_PERF_CLR_3_EN                (0X00000010U)
+#define RGX_CR_RASTERISATION_PERF_CLR_2_SHIFT             (3U)
+#define RGX_CR_RASTERISATION_PERF_CLR_2_CLRMSK            (0XFFFFFFF7U)
+#define RGX_CR_RASTERISATION_PERF_CLR_2_EN                (0X00000008U)
+#define RGX_CR_RASTERISATION_PERF_CLR_1_SHIFT             (2U)
+#define RGX_CR_RASTERISATION_PERF_CLR_1_CLRMSK            (0XFFFFFFFBU)
+#define RGX_CR_RASTERISATION_PERF_CLR_1_EN                (0X00000004U)
+#define RGX_CR_RASTERISATION_PERF_CLR_0_SHIFT             (1U)
+#define RGX_CR_RASTERISATION_PERF_CLR_0_CLRMSK            (0XFFFFFFFDU)
+#define RGX_CR_RASTERISATION_PERF_CLR_0_EN                (0X00000002U)
+#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_SHIFT       (0U)
+#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_CLRMSK      (0XFFFFFFFEU)
+#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_EN          (0X00000001U)
+
+
+/*
+    Register RGX_CR_RASTERISATION_PERF_SELECT0
+*/
+#define RGX_CR_RASTERISATION_PERF_SELECT0                 (0x7708U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MASKFULL        (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_SHIFT      (21U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_EN         (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_RASTERISATION_PERF_COUNTER_0
+*/
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0               (0x7750U)
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0_MASKFULL      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_SHIFT     (0U)
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_CLRMSK    (00000000U)
+
+
+/*
+    Register RGX_CR_HUB_BIFPMCACHE_PERF
+*/
+#define RGX_CR_HUB_BIFPMCACHE_PERF                        (0x7800U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_MASKFULL               (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_SHIFT            (4U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_CLRMSK           (0XFFFFFFEFU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_EN               (0X00000010U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_SHIFT            (3U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_CLRMSK           (0XFFFFFFF7U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_EN               (0X00000008U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_SHIFT            (2U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_CLRMSK           (0XFFFFFFFBU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_EN               (0X00000004U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_SHIFT            (1U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_CLRMSK           (0XFFFFFFFDU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_EN               (0X00000002U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_SHIFT      (0U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_CLRMSK     (0XFFFFFFFEU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_EN         (0X00000001U)
+
+
+/*
+    Register RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0
+*/
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0                (0x7808U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MASKFULL       (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_SHIFT     (21U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_EN        (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0
+*/
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0              (0x7850U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_MASKFULL     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_SHIFT    (0U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_CLRMSK   (00000000U)
+
+
+/*
+    Register RGX_CR_TPU_MCU_L0_PERF
+*/
+#define RGX_CR_TPU_MCU_L0_PERF                            (0x7900U)
+#define RGX_CR_TPU_MCU_L0_PERF_MASKFULL                   (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_SHIFT                (4U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_CLRMSK               (0XFFFFFFEFU)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_EN                   (0X00000010U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_SHIFT                (3U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_CLRMSK               (0XFFFFFFF7U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_EN                   (0X00000008U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_SHIFT                (2U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_CLRMSK               (0XFFFFFFFBU)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_EN                   (0X00000004U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_SHIFT                (1U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_CLRMSK               (0XFFFFFFFDU)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_EN                   (0X00000002U)
+#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_SHIFT          (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_CLRMSK         (0XFFFFFFFEU)
+#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_EN             (0X00000001U)
+
+
+/*
+    Register RGX_CR_TPU_MCU_L0_PERF_SELECT0
+*/
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0                    (0x7908U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MASKFULL           (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_SHIFT    (48U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_CLRMSK   (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_SHIFT    (32U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_CLRMSK   (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_SHIFT         (21U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_EN            (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_SHIFT   (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TPU_MCU_L0_PERF_COUNTER_0
+*/
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0                  (0x7950U)
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_SHIFT        (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_CLRMSK       (00000000U)
+
+
+/*
+    Register RGX_CR_USC_PERF
+*/
+#define RGX_CR_USC_PERF                                   (0x8100U)
+#define RGX_CR_USC_PERF_MASKFULL                          (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_USC_PERF_CLR_3_SHIFT                       (4U)
+#define RGX_CR_USC_PERF_CLR_3_CLRMSK                      (0XFFFFFFEFU)
+#define RGX_CR_USC_PERF_CLR_3_EN                          (0X00000010U)
+#define RGX_CR_USC_PERF_CLR_2_SHIFT                       (3U)
+#define RGX_CR_USC_PERF_CLR_2_CLRMSK                      (0XFFFFFFF7U)
+#define RGX_CR_USC_PERF_CLR_2_EN                          (0X00000008U)
+#define RGX_CR_USC_PERF_CLR_1_SHIFT                       (2U)
+#define RGX_CR_USC_PERF_CLR_1_CLRMSK                      (0XFFFFFFFBU)
+#define RGX_CR_USC_PERF_CLR_1_EN                          (0X00000004U)
+#define RGX_CR_USC_PERF_CLR_0_SHIFT                       (1U)
+#define RGX_CR_USC_PERF_CLR_0_CLRMSK                      (0XFFFFFFFDU)
+#define RGX_CR_USC_PERF_CLR_0_EN                          (0X00000002U)
+#define RGX_CR_USC_PERF_CTRL_ENABLE_SHIFT                 (0U)
+#define RGX_CR_USC_PERF_CTRL_ENABLE_CLRMSK                (0XFFFFFFFEU)
+#define RGX_CR_USC_PERF_CTRL_ENABLE_EN                    (0X00000001U)
+
+
+/*
+    Register RGX_CR_USC_PERF_SELECT0
+*/
+#define RGX_CR_USC_PERF_SELECT0                           (0x8108U)
+#define RGX_CR_USC_PERF_SELECT0_MASKFULL                  (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_SHIFT           (48U)
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_CLRMSK          (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_SHIFT           (32U)
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_CLRMSK          (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_USC_PERF_SELECT0_MODE_SHIFT                (21U)
+#define RGX_CR_USC_PERF_SELECT0_MODE_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_USC_PERF_SELECT0_MODE_EN                   (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_SHIFT        (16U)
+#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_SHIFT          (0U)
+#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_USC_PERF_COUNTER_0
+*/
+#define RGX_CR_USC_PERF_COUNTER_0                         (0x8150U)
+#define RGX_CR_USC_PERF_COUNTER_0_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_PERF_COUNTER_0_REG_SHIFT               (0U)
+#define RGX_CR_USC_PERF_COUNTER_0_REG_CLRMSK              (00000000U)
+
+
+/*
+    Register RGX_CR_JONES_IDLE
+*/
+#define RGX_CR_JONES_IDLE                                 (0x8328U)
+#define RGX_CR_JONES_IDLE_MASKFULL                        (IMG_UINT64_C(0x0000000000007FFF))
+#define RGX_CR_JONES_IDLE_TDM_SHIFT                       (14U)
+#define RGX_CR_JONES_IDLE_TDM_CLRMSK                      (0XFFFFBFFFU)
+#define RGX_CR_JONES_IDLE_TDM_EN                          (0X00004000U)
+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_SHIFT                (13U)
+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_CLRMSK               (0XFFFFDFFFU)
+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_EN                   (0X00002000U)
+#define RGX_CR_JONES_IDLE_FB_CDC_SHIFT                    (12U)
+#define RGX_CR_JONES_IDLE_FB_CDC_CLRMSK                   (0XFFFFEFFFU)
+#define RGX_CR_JONES_IDLE_FB_CDC_EN                       (0X00001000U)
+#define RGX_CR_JONES_IDLE_MMU_SHIFT                       (11U)
+#define RGX_CR_JONES_IDLE_MMU_CLRMSK                      (0XFFFFF7FFU)
+#define RGX_CR_JONES_IDLE_MMU_EN                          (0X00000800U)
+#define RGX_CR_JONES_IDLE_TLA_SHIFT                       (10U)
+#define RGX_CR_JONES_IDLE_TLA_CLRMSK                      (0XFFFFFBFFU)
+#define RGX_CR_JONES_IDLE_TLA_EN                          (0X00000400U)
+#define RGX_CR_JONES_IDLE_GARTEN_SHIFT                    (9U)
+#define RGX_CR_JONES_IDLE_GARTEN_CLRMSK                   (0XFFFFFDFFU)
+#define RGX_CR_JONES_IDLE_GARTEN_EN                       (0X00000200U)
+#define RGX_CR_JONES_IDLE_HOSTIF_SHIFT                    (8U)
+#define RGX_CR_JONES_IDLE_HOSTIF_CLRMSK                   (0XFFFFFEFFU)
+#define RGX_CR_JONES_IDLE_HOSTIF_EN                       (0X00000100U)
+#define RGX_CR_JONES_IDLE_SOCIF_SHIFT                     (7U)
+#define RGX_CR_JONES_IDLE_SOCIF_CLRMSK                    (0XFFFFFF7FU)
+#define RGX_CR_JONES_IDLE_SOCIF_EN                        (0X00000080U)
+#define RGX_CR_JONES_IDLE_TILING_SHIFT                    (6U)
+#define RGX_CR_JONES_IDLE_TILING_CLRMSK                   (0XFFFFFFBFU)
+#define RGX_CR_JONES_IDLE_TILING_EN                       (0X00000040U)
+#define RGX_CR_JONES_IDLE_IPP_SHIFT                       (5U)
+#define RGX_CR_JONES_IDLE_IPP_CLRMSK                      (0XFFFFFFDFU)
+#define RGX_CR_JONES_IDLE_IPP_EN                          (0X00000020U)
+#define RGX_CR_JONES_IDLE_USCS_SHIFT                      (4U)
+#define RGX_CR_JONES_IDLE_USCS_CLRMSK                     (0XFFFFFFEFU)
+#define RGX_CR_JONES_IDLE_USCS_EN                         (0X00000010U)
+#define RGX_CR_JONES_IDLE_PM_SHIFT                        (3U)
+#define RGX_CR_JONES_IDLE_PM_CLRMSK                       (0XFFFFFFF7U)
+#define RGX_CR_JONES_IDLE_PM_EN                           (0X00000008U)
+#define RGX_CR_JONES_IDLE_CDM_SHIFT                       (2U)
+#define RGX_CR_JONES_IDLE_CDM_CLRMSK                      (0XFFFFFFFBU)
+#define RGX_CR_JONES_IDLE_CDM_EN                          (0X00000004U)
+#define RGX_CR_JONES_IDLE_VDM_SHIFT                       (1U)
+#define RGX_CR_JONES_IDLE_VDM_CLRMSK                      (0XFFFFFFFDU)
+#define RGX_CR_JONES_IDLE_VDM_EN                          (0X00000002U)
+#define RGX_CR_JONES_IDLE_BIF_SHIFT                       (0U)
+#define RGX_CR_JONES_IDLE_BIF_CLRMSK                      (0XFFFFFFFEU)
+#define RGX_CR_JONES_IDLE_BIF_EN                          (0X00000001U)
+
+
+/*
+    Register RGX_CR_TORNADO_PERF
+*/
+#define RGX_CR_TORNADO_PERF                               (0x8228U)
+#define RGX_CR_TORNADO_PERF_MASKFULL                      (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_TORNADO_PERF_CLR_3_SHIFT                   (4U)
+#define RGX_CR_TORNADO_PERF_CLR_3_CLRMSK                  (0XFFFFFFEFU)
+#define RGX_CR_TORNADO_PERF_CLR_3_EN                      (0X00000010U)
+#define RGX_CR_TORNADO_PERF_CLR_2_SHIFT                   (3U)
+#define RGX_CR_TORNADO_PERF_CLR_2_CLRMSK                  (0XFFFFFFF7U)
+#define RGX_CR_TORNADO_PERF_CLR_2_EN                      (0X00000008U)
+#define RGX_CR_TORNADO_PERF_CLR_1_SHIFT                   (2U)
+#define RGX_CR_TORNADO_PERF_CLR_1_CLRMSK                  (0XFFFFFFFBU)
+#define RGX_CR_TORNADO_PERF_CLR_1_EN                      (0X00000004U)
+#define RGX_CR_TORNADO_PERF_CLR_0_SHIFT                   (1U)
+#define RGX_CR_TORNADO_PERF_CLR_0_CLRMSK                  (0XFFFFFFFDU)
+#define RGX_CR_TORNADO_PERF_CLR_0_EN                      (0X00000002U)
+#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_SHIFT             (0U)
+#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_CLRMSK            (0XFFFFFFFEU)
+#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_EN                (0X00000001U)
+
+
+/*
+    Register RGX_CR_TORNADO_PERF_SELECT0
+*/
+#define RGX_CR_TORNADO_PERF_SELECT0                       (0x8230U)
+#define RGX_CR_TORNADO_PERF_SELECT0_MASKFULL              (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_SHIFT       (48U)
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_CLRMSK      (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_SHIFT       (32U)
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_CLRMSK      (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_MODE_SHIFT            (21U)
+#define RGX_CR_TORNADO_PERF_SELECT0_MODE_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_MODE_EN               (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_SHIFT    (16U)
+#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_SHIFT      (0U)
+#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TORNADO_PERF_COUNTER_0
+*/
+#define RGX_CR_TORNADO_PERF_COUNTER_0                     (0x8268U)
+#define RGX_CR_TORNADO_PERF_COUNTER_0_MASKFULL            (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_SHIFT           (0U)
+#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_CLRMSK          (00000000U)
+
+
+/*
+    Register RGX_CR_TEXAS_PERF
+*/
+#define RGX_CR_TEXAS_PERF                                 (0x8290U)
+#define RGX_CR_TEXAS_PERF_MASKFULL                        (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_TEXAS_PERF_CLR_5_SHIFT                     (6U)
+#define RGX_CR_TEXAS_PERF_CLR_5_CLRMSK                    (0XFFFFFFBFU)
+#define RGX_CR_TEXAS_PERF_CLR_5_EN                        (0X00000040U)
+#define RGX_CR_TEXAS_PERF_CLR_4_SHIFT                     (5U)
+#define RGX_CR_TEXAS_PERF_CLR_4_CLRMSK                    (0XFFFFFFDFU)
+#define RGX_CR_TEXAS_PERF_CLR_4_EN                        (0X00000020U)
+#define RGX_CR_TEXAS_PERF_CLR_3_SHIFT                     (4U)
+#define RGX_CR_TEXAS_PERF_CLR_3_CLRMSK                    (0XFFFFFFEFU)
+#define RGX_CR_TEXAS_PERF_CLR_3_EN                        (0X00000010U)
+#define RGX_CR_TEXAS_PERF_CLR_2_SHIFT                     (3U)
+#define RGX_CR_TEXAS_PERF_CLR_2_CLRMSK                    (0XFFFFFFF7U)
+#define RGX_CR_TEXAS_PERF_CLR_2_EN                        (0X00000008U)
+#define RGX_CR_TEXAS_PERF_CLR_1_SHIFT                     (2U)
+#define RGX_CR_TEXAS_PERF_CLR_1_CLRMSK                    (0XFFFFFFFBU)
+#define RGX_CR_TEXAS_PERF_CLR_1_EN                        (0X00000004U)
+#define RGX_CR_TEXAS_PERF_CLR_0_SHIFT                     (1U)
+#define RGX_CR_TEXAS_PERF_CLR_0_CLRMSK                    (0XFFFFFFFDU)
+#define RGX_CR_TEXAS_PERF_CLR_0_EN                        (0X00000002U)
+#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_SHIFT               (0U)
+#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_CLRMSK              (0XFFFFFFFEU)
+#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_EN                  (0X00000001U)
+
+
+/*
+    Register RGX_CR_TEXAS_PERF_SELECT0
+*/
+#define RGX_CR_TEXAS_PERF_SELECT0                         (0x8298U)
+#define RGX_CR_TEXAS_PERF_SELECT0_MASKFULL                (IMG_UINT64_C(0x3FFF3FFF803FFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_SHIFT         (48U)
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_CLRMSK        (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_SHIFT         (32U)
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_CLRMSK        (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_MODE_SHIFT              (31U)
+#define RGX_CR_TEXAS_PERF_SELECT0_MODE_CLRMSK             (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_MODE_EN                 (IMG_UINT64_C(0X0000000080000000))
+#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_SHIFT      (16U)
+#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFC0FFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_SHIFT        (0U)
+#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TEXAS_PERF_COUNTER_0
+*/
+#define RGX_CR_TEXAS_PERF_COUNTER_0                       (0x82D8U)
+#define RGX_CR_TEXAS_PERF_COUNTER_0_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_SHIFT             (0U)
+#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_CLRMSK            (00000000U)
+
+
+/*
+    Register RGX_CR_JONES_PERF
+*/
+#define RGX_CR_JONES_PERF                                 (0x8330U)
+#define RGX_CR_JONES_PERF_MASKFULL                        (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_JONES_PERF_CLR_3_SHIFT                     (4U)
+#define RGX_CR_JONES_PERF_CLR_3_CLRMSK                    (0XFFFFFFEFU)
+#define RGX_CR_JONES_PERF_CLR_3_EN                        (0X00000010U)
+#define RGX_CR_JONES_PERF_CLR_2_SHIFT                     (3U)
+#define RGX_CR_JONES_PERF_CLR_2_CLRMSK                    (0XFFFFFFF7U)
+#define RGX_CR_JONES_PERF_CLR_2_EN                        (0X00000008U)
+#define RGX_CR_JONES_PERF_CLR_1_SHIFT                     (2U)
+#define RGX_CR_JONES_PERF_CLR_1_CLRMSK                    (0XFFFFFFFBU)
+#define RGX_CR_JONES_PERF_CLR_1_EN                        (0X00000004U)
+#define RGX_CR_JONES_PERF_CLR_0_SHIFT                     (1U)
+#define RGX_CR_JONES_PERF_CLR_0_CLRMSK                    (0XFFFFFFFDU)
+#define RGX_CR_JONES_PERF_CLR_0_EN                        (0X00000002U)
+#define RGX_CR_JONES_PERF_CTRL_ENABLE_SHIFT               (0U)
+#define RGX_CR_JONES_PERF_CTRL_ENABLE_CLRMSK              (0XFFFFFFFEU)
+#define RGX_CR_JONES_PERF_CTRL_ENABLE_EN                  (0X00000001U)
+
+
+/*
+    Register RGX_CR_JONES_PERF_SELECT0
+*/
+#define RGX_CR_JONES_PERF_SELECT0                         (0x8338U)
+#define RGX_CR_JONES_PERF_SELECT0_MASKFULL                (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_SHIFT         (48U)
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_CLRMSK        (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_SHIFT         (32U)
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_CLRMSK        (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_MODE_SHIFT              (21U)
+#define RGX_CR_JONES_PERF_SELECT0_MODE_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_MODE_EN                 (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_SHIFT      (16U)
+#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_SHIFT        (0U)
+#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_JONES_PERF_COUNTER_0
+*/
+#define RGX_CR_JONES_PERF_COUNTER_0                       (0x8368U)
+#define RGX_CR_JONES_PERF_COUNTER_0_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_JONES_PERF_COUNTER_0_REG_SHIFT             (0U)
+#define RGX_CR_JONES_PERF_COUNTER_0_REG_CLRMSK            (00000000U)
+
+
+/*
+    Register RGX_CR_BLACKPEARL_PERF
+*/
+#define RGX_CR_BLACKPEARL_PERF                            (0x8400U)
+#define RGX_CR_BLACKPEARL_PERF_MASKFULL                   (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_BLACKPEARL_PERF_CLR_5_SHIFT                (6U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_5_CLRMSK               (0XFFFFFFBFU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_5_EN                   (0X00000040U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_4_SHIFT                (5U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_4_CLRMSK               (0XFFFFFFDFU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_4_EN                   (0X00000020U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_3_SHIFT                (4U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_3_CLRMSK               (0XFFFFFFEFU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_3_EN                   (0X00000010U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_2_SHIFT                (3U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_2_CLRMSK               (0XFFFFFFF7U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_2_EN                   (0X00000008U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_1_SHIFT                (2U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_1_CLRMSK               (0XFFFFFFFBU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_1_EN                   (0X00000004U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_0_SHIFT                (1U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_0_CLRMSK               (0XFFFFFFFDU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_0_EN                   (0X00000002U)
+#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_SHIFT          (0U)
+#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_CLRMSK         (0XFFFFFFFEU)
+#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_EN             (0X00000001U)
+
+
+/*
+    Register RGX_CR_BLACKPEARL_PERF_SELECT0
+*/
+#define RGX_CR_BLACKPEARL_PERF_SELECT0                    (0x8408U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MASKFULL           (IMG_UINT64_C(0x3FFF3FFF803FFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_SHIFT    (48U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_CLRMSK   (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_SHIFT    (32U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_CLRMSK   (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_SHIFT         (31U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_CLRMSK        (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_EN            (IMG_UINT64_C(0X0000000080000000))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFC0FFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_SHIFT   (0U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_BLACKPEARL_PERF_COUNTER_0
+*/
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0                  (0x8448U)
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_SHIFT        (0U)
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_CLRMSK       (00000000U)
+
+
+/*
+    Register RGX_CR_PBE_PERF
+*/
+#define RGX_CR_PBE_PERF                                   (0x8478U)
+#define RGX_CR_PBE_PERF_MASKFULL                          (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_PBE_PERF_CLR_3_SHIFT                       (4U)
+#define RGX_CR_PBE_PERF_CLR_3_CLRMSK                      (0XFFFFFFEFU)
+#define RGX_CR_PBE_PERF_CLR_3_EN                          (0X00000010U)
+#define RGX_CR_PBE_PERF_CLR_2_SHIFT                       (3U)
+#define RGX_CR_PBE_PERF_CLR_2_CLRMSK                      (0XFFFFFFF7U)
+#define RGX_CR_PBE_PERF_CLR_2_EN                          (0X00000008U)
+#define RGX_CR_PBE_PERF_CLR_1_SHIFT                       (2U)
+#define RGX_CR_PBE_PERF_CLR_1_CLRMSK                      (0XFFFFFFFBU)
+#define RGX_CR_PBE_PERF_CLR_1_EN                          (0X00000004U)
+#define RGX_CR_PBE_PERF_CLR_0_SHIFT                       (1U)
+#define RGX_CR_PBE_PERF_CLR_0_CLRMSK                      (0XFFFFFFFDU)
+#define RGX_CR_PBE_PERF_CLR_0_EN                          (0X00000002U)
+#define RGX_CR_PBE_PERF_CTRL_ENABLE_SHIFT                 (0U)
+#define RGX_CR_PBE_PERF_CTRL_ENABLE_CLRMSK                (0XFFFFFFFEU)
+#define RGX_CR_PBE_PERF_CTRL_ENABLE_EN                    (0X00000001U)
+
+
+/*
+    Register RGX_CR_PBE_PERF_SELECT0
+*/
+#define RGX_CR_PBE_PERF_SELECT0                           (0x8480U)
+#define RGX_CR_PBE_PERF_SELECT0_MASKFULL                  (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_SHIFT           (48U)
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_CLRMSK          (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_SHIFT           (32U)
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_CLRMSK          (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_MODE_SHIFT                (21U)
+#define RGX_CR_PBE_PERF_SELECT0_MODE_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_MODE_EN                   (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_SHIFT        (16U)
+#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_SHIFT          (0U)
+#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_PBE_PERF_COUNTER_0
+*/
+#define RGX_CR_PBE_PERF_COUNTER_0                         (0x84B0U)
+#define RGX_CR_PBE_PERF_COUNTER_0_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PBE_PERF_COUNTER_0_REG_SHIFT               (0U)
+#define RGX_CR_PBE_PERF_COUNTER_0_REG_CLRMSK              (00000000U)
+
+
+/*
+    Register RGX_CR_OCP_REVINFO
+*/
+#define RGX_CR_OCP_REVINFO                                (0x9000U)
+#define RGX_CR_OCP_REVINFO_MASKFULL                       (IMG_UINT64_C(0x00000007FFFFFFFF))
+#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_SHIFT            (33U)
+#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_CLRMSK           (IMG_UINT64_C(0XFFFFFFF9FFFFFFFF))
+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_SHIFT            (32U)
+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_CLRMSK           (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_EN               (IMG_UINT64_C(0X0000000100000000))
+#define RGX_CR_OCP_REVINFO_REVISION_SHIFT                 (0U)
+#define RGX_CR_OCP_REVINFO_REVISION_CLRMSK                (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_OCP_SYSCONFIG
+*/
+#define RGX_CR_OCP_SYSCONFIG                              (0x9010U)
+#define RGX_CR_OCP_SYSCONFIG_MASKFULL                     (IMG_UINT64_C(0x0000000000000FFF))
+#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_SHIFT     (10U)
+#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_CLRMSK    (0XFFFFF3FFU)
+#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_SHIFT     (8U)
+#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_CLRMSK    (0XFFFFFCFFU)
+#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_SHIFT     (6U)
+#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_CLRMSK    (0XFFFFFF3FU)
+#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_SHIFT     (4U)
+#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_CLRMSK    (0XFFFFFFCFU)
+#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_SHIFT           (2U)
+#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_CLRMSK          (0XFFFFFFF3U)
+#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_SHIFT              (0U)
+#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_CLRMSK             (0XFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_RAW_0
+*/
+#define RGX_CR_OCP_IRQSTATUS_RAW_0                        (0x9020U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_EN (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_RAW_1
+*/
+#define RGX_CR_OCP_IRQSTATUS_RAW_1                        (0x9028U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_EN (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_RAW_2
+*/
+#define RGX_CR_OCP_IRQSTATUS_RAW_2                        (0x9030U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_SHIFT      (0U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_CLRMSK     (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_EN         (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_0
+*/
+#define RGX_CR_OCP_IRQSTATUS_0                            (0x9038U)
+#define RGX_CR_OCP_IRQSTATUS_0_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_EN  (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_1
+*/
+#define RGX_CR_OCP_IRQSTATUS_1                            (0x9040U)
+#define RGX_CR_OCP_IRQSTATUS_1_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_EN (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_2
+*/
+#define RGX_CR_OCP_IRQSTATUS_2                            (0x9048U)
+#define RGX_CR_OCP_IRQSTATUS_2_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_SHIFT       (0U)
+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_CLRMSK      (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN          (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_SET_0
+*/
+#define RGX_CR_OCP_IRQENABLE_SET_0                        (0x9050U)
+#define RGX_CR_OCP_IRQENABLE_SET_0_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_EN (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_SET_1
+*/
+#define RGX_CR_OCP_IRQENABLE_SET_1                        (0x9058U)
+#define RGX_CR_OCP_IRQENABLE_SET_1_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_EN (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_SET_2
+*/
+#define RGX_CR_OCP_IRQENABLE_SET_2                        (0x9060U)
+#define RGX_CR_OCP_IRQENABLE_SET_2_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_SHIFT   (0U)
+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_CLRMSK  (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_EN      (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_CLR_0
+*/
+#define RGX_CR_OCP_IRQENABLE_CLR_0                        (0x9068U)
+#define RGX_CR_OCP_IRQENABLE_CLR_0_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_EN (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_CLR_1
+*/
+#define RGX_CR_OCP_IRQENABLE_CLR_1                        (0x9070U)
+#define RGX_CR_OCP_IRQENABLE_CLR_1_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_EN (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_CLR_2
+*/
+#define RGX_CR_OCP_IRQENABLE_CLR_2                        (0x9078U)
+#define RGX_CR_OCP_IRQENABLE_CLR_2_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_SHIFT  (0U)
+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_EN     (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQ_EVENT
+*/
+#define RGX_CR_OCP_IRQ_EVENT                              (0x9080U)
+#define RGX_CR_OCP_IRQ_EVENT_MASKFULL                     (IMG_UINT64_C(0x00000000000FFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_SHIFT (19U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0X0000000000080000))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_SHIFT (18U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0X0000000000040000))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_SHIFT (17U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0X0000000000020000))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_SHIFT (16U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0X0000000000010000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_SHIFT (15U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0X0000000000008000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_SHIFT (14U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_EN  (IMG_UINT64_C(0X0000000000004000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_SHIFT (13U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_EN   (IMG_UINT64_C(0X0000000000002000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_SHIFT (12U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0X0000000000001000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_SHIFT (11U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0X0000000000000800))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_SHIFT (10U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_EN  (IMG_UINT64_C(0X0000000000000400))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_SHIFT (9U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_EN   (IMG_UINT64_C(0X0000000000000200))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_SHIFT (8U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0X0000000000000100))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_SHIFT (7U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0X0000000000000080))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_SHIFT (6U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_EN  (IMG_UINT64_C(0X0000000000000040))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_SHIFT (5U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_EN   (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_SHIFT (4U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0X0000000000000010))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_SHIFT (3U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0X0000000000000008))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_SHIFT (2U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_EN  (IMG_UINT64_C(0X0000000000000004))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_SHIFT (1U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_EN   (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_SHIFT (0U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_OCP_DEBUG_CONFIG
+*/
+#define RGX_CR_OCP_DEBUG_CONFIG                           (0x9088U)
+#define RGX_CR_OCP_DEBUG_CONFIG_MASKFULL                  (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_DEBUG_CONFIG_REG_SHIFT                 (0U)
+#define RGX_CR_OCP_DEBUG_CONFIG_REG_CLRMSK                (0XFFFFFFFEU)
+#define RGX_CR_OCP_DEBUG_CONFIG_REG_EN                    (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_DEBUG_STATUS
+*/
+#define RGX_CR_OCP_DEBUG_STATUS                           (0x9090U)
+#define RGX_CR_OCP_DEBUG_STATUS_MASKFULL                  (IMG_UINT64_C(0x001F1F77FFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_SHIFT    (51U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_CLRMSK   (IMG_UINT64_C(0XFFE7FFFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_SHIFT    (50U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_CLRMSK   (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_EN       (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_SHIFT    (48U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_CLRMSK   (IMG_UINT64_C(0XFFFCFFFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_SHIFT    (43U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_CLRMSK   (IMG_UINT64_C(0XFFFFE7FFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_SHIFT    (42U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_CLRMSK   (IMG_UINT64_C(0XFFFFFBFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_EN       (IMG_UINT64_C(0X0000040000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_SHIFT    (40U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_CLRMSK   (IMG_UINT64_C(0XFFFFFCFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_SHIFT        (38U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_CLRMSK       (IMG_UINT64_C(0XFFFFFFBFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_EN           (IMG_UINT64_C(0X0000004000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_SHIFT (37U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0XFFFFFFDFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_EN  (IMG_UINT64_C(0X0000002000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_SHIFT (36U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0XFFFFFFEFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_EN    (IMG_UINT64_C(0X0000001000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_SHIFT        (34U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_CLRMSK       (IMG_UINT64_C(0XFFFFFFFBFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_EN           (IMG_UINT64_C(0X0000000400000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_SHIFT (33U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0XFFFFFFFDFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_EN  (IMG_UINT64_C(0X0000000200000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_SHIFT (32U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_EN    (IMG_UINT64_C(0X0000000100000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_SHIFT      (31U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_CLRMSK     (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_EN         (IMG_UINT64_C(0X0000000080000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_SHIFT         (30U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_EN            (IMG_UINT64_C(0X0000000040000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_SHIFT      (29U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_EN         (IMG_UINT64_C(0X0000000020000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_SHIFT      (27U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFE7FFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_SHIFT      (26U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_EN         (IMG_UINT64_C(0X0000000004000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_SHIFT      (24U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_SHIFT      (23U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_EN         (IMG_UINT64_C(0X0000000000800000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_SHIFT         (22U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_EN            (IMG_UINT64_C(0X0000000000400000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_SHIFT      (21U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_EN         (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_SHIFT      (19U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFE7FFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_SHIFT      (18U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_EN         (IMG_UINT64_C(0X0000000000040000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_SHIFT      (16U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_SHIFT      (15U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_EN         (IMG_UINT64_C(0X0000000000008000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_SHIFT         (14U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_EN            (IMG_UINT64_C(0X0000000000004000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_SHIFT      (13U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_EN         (IMG_UINT64_C(0X0000000000002000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_SHIFT      (11U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFE7FF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_SHIFT      (10U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_EN         (IMG_UINT64_C(0X0000000000000400))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_SHIFT      (8U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_SHIFT      (7U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_EN         (IMG_UINT64_C(0X0000000000000080))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_SHIFT         (6U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_EN            (IMG_UINT64_C(0X0000000000000040))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_SHIFT      (5U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_EN         (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_SHIFT      (3U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFE7))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_SHIFT      (2U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_EN         (IMG_UINT64_C(0X0000000000000004))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_SHIFT      (0U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+
+
+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_SHIFT           (6U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_CLRMSK          (0XFFFFFFBFU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_EN              (0X00000040U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_SHIFT               (5U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_CLRMSK              (0XFFFFFFDFU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_EN                  (0X00000020U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_META_SHIFT               (4U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_META_CLRMSK              (0XFFFFFFEFU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_META_EN                  (0X00000010U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_SHIFT             (3U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_CLRMSK            (0XFFFFFFF7U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_EN                (0X00000008U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_SHIFT              (2U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_CLRMSK             (0XFFFFFFFBU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_EN                 (0X00000004U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_SHIFT             (1U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_CLRMSK            (0XFFFFFFFDU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_EN                (0X00000002U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_SHIFT                (0U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_CLRMSK               (0XFFFFFFFEU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_EN                   (0X00000001U)
+
+
+#define RGX_CR_BIF_TRUST_DM_MASK                          (0x0000007FU)
+
+
+/*
+    Register RGX_CR_BIF_TRUST
+*/
+#define RGX_CR_BIF_TRUST                                  (0xA000U)
+#define RGX_CR_BIF_TRUST_MASKFULL                         (IMG_UINT64_C(0x00000000001FFFFF))
+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_SHIFT (20U)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_CLRMSK (0XFFEFFFFFU)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_EN   (0X00100000U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_SHIFT  (19U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_CLRMSK (0XFFF7FFFFU)
+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_EN     (0X00080000U)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_SHIFT       (18U)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_CLRMSK      (0XFFFBFFFFU)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_EN          (0X00040000U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_SHIFT         (17U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_CLRMSK        (0XFFFDFFFFU)
+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_EN            (0X00020000U)
+#define RGX_CR_BIF_TRUST_ENABLE_SHIFT                     (16U)
+#define RGX_CR_BIF_TRUST_ENABLE_CLRMSK                    (0XFFFEFFFFU)
+#define RGX_CR_BIF_TRUST_ENABLE_EN                        (0X00010000U)
+#define RGX_CR_BIF_TRUST_DM_TRUSTED_SHIFT                 (9U)
+#define RGX_CR_BIF_TRUST_DM_TRUSTED_CLRMSK                (0XFFFF01FFU)
+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_SHIFT   (8U)
+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_CLRMSK  (0XFFFFFEFFU)
+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_EN      (0X00000100U)
+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_SHIFT     (7U)
+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_CLRMSK    (0XFFFFFF7FU)
+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_EN        (0X00000080U)
+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_SHIFT     (6U)
+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_CLRMSK    (0XFFFFFFBFU)
+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_EN        (0X00000040U)
+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_SHIFT     (5U)
+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_CLRMSK    (0XFFFFFFDFU)
+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_EN        (0X00000020U)
+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_SHIFT       (4U)
+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_CLRMSK      (0XFFFFFFEFU)
+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_EN          (0X00000010U)
+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_SHIFT       (3U)
+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_CLRMSK      (0XFFFFFFF7U)
+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_EN          (0X00000008U)
+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_SHIFT    (2U)
+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_CLRMSK   (0XFFFFFFFBU)
+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_EN       (0X00000004U)
+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_SHIFT      (1U)
+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_CLRMSK     (0XFFFFFFFDU)
+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_EN         (0X00000002U)
+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_SHIFT      (0U)
+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_CLRMSK     (0XFFFFFFFEU)
+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_EN         (0X00000001U)
+
+
+/*
+    Register RGX_CR_SYS_BUS_SECURE
+*/
+#define RGX_CR_SYS_BUS_SECURE                             (0xA100U)
+#define RGX_CR_SYS_BUS_SECURE__SECR__MASKFULL             (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SYS_BUS_SECURE_MASKFULL                    (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_SHIFT                (0U)
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_CLRMSK               (0XFFFFFFFEU)
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_EN                   (0X00000001U)
+
+
+/*
+    Register RGX_CR_FBA_FC0_CHECKSUM
+*/
+#define RGX_CR_FBA_FC0_CHECKSUM                           (0xD170U)
+#define RGX_CR_FBA_FC0_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_CLRMSK              (00000000U)
+
+
+/*
+    Register RGX_CR_FBA_FC1_CHECKSUM
+*/
+#define RGX_CR_FBA_FC1_CHECKSUM                           (0xD178U)
+#define RGX_CR_FBA_FC1_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_CLRMSK              (00000000U)
+
+
+/*
+    Register RGX_CR_FBA_FC2_CHECKSUM
+*/
+#define RGX_CR_FBA_FC2_CHECKSUM                           (0xD180U)
+#define RGX_CR_FBA_FC2_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_CLRMSK              (00000000U)
+
+
+/*
+    Register RGX_CR_FBA_FC3_CHECKSUM
+*/
+#define RGX_CR_FBA_FC3_CHECKSUM                           (0xD188U)
+#define RGX_CR_FBA_FC3_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_CLRMSK              (00000000U)
+
+
+/*
+    Register RGX_CR_CLK_CTRL2
+*/
+#define RGX_CR_CLK_CTRL2                                  (0xD200U)
+#define RGX_CR_CLK_CTRL2_MASKFULL                         (IMG_UINT64_C(0x0000000000000F33))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_SHIFT                   (10U)
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_ON                      (IMG_UINT64_C(0x0000000000000400))  
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_AUTO                    (IMG_UINT64_C(0x0000000000000800))  
+#define RGX_CR_CLK_CTRL2_VRDM_SHIFT                       (8U)
+#define RGX_CR_CLK_CTRL2_VRDM_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define RGX_CR_CLK_CTRL2_VRDM_OFF                         (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL2_VRDM_ON                          (IMG_UINT64_C(0x0000000000000100))  
+#define RGX_CR_CLK_CTRL2_VRDM_AUTO                        (IMG_UINT64_C(0x0000000000000200))  
+#define RGX_CR_CLK_CTRL2_SH_SHIFT                         (4U)
+#define RGX_CR_CLK_CTRL2_SH_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define RGX_CR_CLK_CTRL2_SH_OFF                           (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL2_SH_ON                            (IMG_UINT64_C(0x0000000000000010))  
+#define RGX_CR_CLK_CTRL2_SH_AUTO                          (IMG_UINT64_C(0x0000000000000020))  
+#define RGX_CR_CLK_CTRL2_FBA_SHIFT                        (0U)
+#define RGX_CR_CLK_CTRL2_FBA_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define RGX_CR_CLK_CTRL2_FBA_OFF                          (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_CTRL2_FBA_ON                           (IMG_UINT64_C(0x0000000000000001))  
+#define RGX_CR_CLK_CTRL2_FBA_AUTO                         (IMG_UINT64_C(0x0000000000000002))  
+
+
+/*
+    Register RGX_CR_CLK_STATUS2
+*/
+#define RGX_CR_CLK_STATUS2                                (0xD208U)
+#define RGX_CR_CLK_STATUS2_MASKFULL                       (IMG_UINT64_C(0x0000000000000015))
+#define RGX_CR_CLK_STATUS2_VRDM_SHIFT                     (4U)
+#define RGX_CR_CLK_STATUS2_VRDM_CLRMSK                    (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_STATUS2_VRDM_GATED                     (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS2_VRDM_RUNNING                   (IMG_UINT64_C(0x0000000000000010))  
+#define RGX_CR_CLK_STATUS2_SH_SHIFT                       (2U)
+#define RGX_CR_CLK_STATUS2_SH_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_CLK_STATUS2_SH_GATED                       (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS2_SH_RUNNING                     (IMG_UINT64_C(0x0000000000000004))  
+#define RGX_CR_CLK_STATUS2_FBA_SHIFT                      (0U)
+#define RGX_CR_CLK_STATUS2_FBA_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_STATUS2_FBA_GATED                      (IMG_UINT64_C(0000000000000000))  
+#define RGX_CR_CLK_STATUS2_FBA_RUNNING                    (IMG_UINT64_C(0x0000000000000001))  
+
+
+/*
+    Register RGX_CR_RPM_SHF_FPL
+*/
+#define RGX_CR_RPM_SHF_FPL                                (0xD520U)
+#define RGX_CR_RPM_SHF_FPL_MASKFULL                       (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC))
+#define RGX_CR_RPM_SHF_FPL_SIZE_SHIFT                     (40U)
+#define RGX_CR_RPM_SHF_FPL_SIZE_CLRMSK                    (IMG_UINT64_C(0XC00000FFFFFFFFFF))
+#define RGX_CR_RPM_SHF_FPL_BASE_SHIFT                     (2U)
+#define RGX_CR_RPM_SHF_FPL_BASE_CLRMSK                    (IMG_UINT64_C(0XFFFFFF0000000003))
+#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSHIFT                (2U)
+#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSIZE                 (4U)
+
+
+/*
+    Register RGX_CR_RPM_SHF_FPL_READ
+*/
+#define RGX_CR_RPM_SHF_FPL_READ                           (0xD528U)
+#define RGX_CR_RPM_SHF_FPL_READ_MASKFULL                  (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_SHIFT              (22U)
+#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_CLRMSK             (0XFFBFFFFFU)
+#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_EN                 (0X00400000U)
+#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_SHIFT              (0U)
+#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_CLRMSK             (0XFFC00000U)
+
+
+/*
+    Register RGX_CR_RPM_SHF_FPL_WRITE
+*/
+#define RGX_CR_RPM_SHF_FPL_WRITE                          (0xD530U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_MASKFULL                 (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_SHIFT             (22U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_CLRMSK            (0XFFBFFFFFU)
+#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_EN                (0X00400000U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_SHIFT             (0U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_CLRMSK            (0XFFC00000U)
+
+
+/*
+    Register RGX_CR_RPM_SHG_FPL
+*/
+#define RGX_CR_RPM_SHG_FPL                                (0xD538U)
+#define RGX_CR_RPM_SHG_FPL_MASKFULL                       (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC))
+#define RGX_CR_RPM_SHG_FPL_SIZE_SHIFT                     (40U)
+#define RGX_CR_RPM_SHG_FPL_SIZE_CLRMSK                    (IMG_UINT64_C(0XC00000FFFFFFFFFF))
+#define RGX_CR_RPM_SHG_FPL_BASE_SHIFT                     (2U)
+#define RGX_CR_RPM_SHG_FPL_BASE_CLRMSK                    (IMG_UINT64_C(0XFFFFFF0000000003))
+#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSHIFT                (2U)
+#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSIZE                 (4U)
+
+
+/*
+    Register RGX_CR_RPM_SHG_FPL_READ
+*/
+#define RGX_CR_RPM_SHG_FPL_READ                           (0xD540U)
+#define RGX_CR_RPM_SHG_FPL_READ_MASKFULL                  (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_SHIFT              (22U)
+#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_CLRMSK             (0XFFBFFFFFU)
+#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_EN                 (0X00400000U)
+#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_SHIFT              (0U)
+#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_CLRMSK             (0XFFC00000U)
+
+
+/*
+    Register RGX_CR_RPM_SHG_FPL_WRITE
+*/
+#define RGX_CR_RPM_SHG_FPL_WRITE                          (0xD548U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_MASKFULL                 (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_SHIFT             (22U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_CLRMSK            (0XFFBFFFFFU)
+#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_EN                (0X00400000U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_SHIFT             (0U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_CLRMSK            (0XFFC00000U)
+
+
+/*
+    Register RGX_CR_SH_PERF
+*/
+#define RGX_CR_SH_PERF                                    (0xD5F8U)
+#define RGX_CR_SH_PERF_MASKFULL                           (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_SH_PERF_CLR_3_SHIFT                        (4U)
+#define RGX_CR_SH_PERF_CLR_3_CLRMSK                       (0XFFFFFFEFU)
+#define RGX_CR_SH_PERF_CLR_3_EN                           (0X00000010U)
+#define RGX_CR_SH_PERF_CLR_2_SHIFT                        (3U)
+#define RGX_CR_SH_PERF_CLR_2_CLRMSK                       (0XFFFFFFF7U)
+#define RGX_CR_SH_PERF_CLR_2_EN                           (0X00000008U)
+#define RGX_CR_SH_PERF_CLR_1_SHIFT                        (2U)
+#define RGX_CR_SH_PERF_CLR_1_CLRMSK                       (0XFFFFFFFBU)
+#define RGX_CR_SH_PERF_CLR_1_EN                           (0X00000004U)
+#define RGX_CR_SH_PERF_CLR_0_SHIFT                        (1U)
+#define RGX_CR_SH_PERF_CLR_0_CLRMSK                       (0XFFFFFFFDU)
+#define RGX_CR_SH_PERF_CLR_0_EN                           (0X00000002U)
+#define RGX_CR_SH_PERF_CTRL_ENABLE_SHIFT                  (0U)
+#define RGX_CR_SH_PERF_CTRL_ENABLE_CLRMSK                 (0XFFFFFFFEU)
+#define RGX_CR_SH_PERF_CTRL_ENABLE_EN                     (0X00000001U)
+
+
+/*
+    Register RGX_CR_SH_PERF_SELECT0
+*/
+#define RGX_CR_SH_PERF_SELECT0                            (0xD600U)
+#define RGX_CR_SH_PERF_SELECT0_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_SHIFT            (48U)
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_SHIFT            (32U)
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_SH_PERF_SELECT0_MODE_SHIFT                 (21U)
+#define RGX_CR_SH_PERF_SELECT0_MODE_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_SH_PERF_SELECT0_MODE_EN                    (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_SHIFT         (16U)
+#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_SHIFT           (0U)
+#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_SH_PERF_COUNTER_0
+*/
+#define RGX_CR_SH_PERF_COUNTER_0                          (0xD628U)
+#define RGX_CR_SH_PERF_COUNTER_0_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SH_PERF_COUNTER_0_REG_SHIFT                (0U)
+#define RGX_CR_SH_PERF_COUNTER_0_REG_CLRMSK               (00000000U)
+
+
+/*
+    Register RGX_CR_SHF_SHG_CHECKSUM
+*/
+#define RGX_CR_SHF_SHG_CHECKSUM                           (0xD1C0U)
+#define RGX_CR_SHF_SHG_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_CLRMSK              (00000000U)
+
+
+/*
+    Register RGX_CR_SHF_VERTEX_BIF_CHECKSUM
+*/
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM                    (0xD1C8U)
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_SHIFT        (0U)
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_CLRMSK       (00000000U)
+
+
+/*
+    Register RGX_CR_SHF_VARY_BIF_CHECKSUM
+*/
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM                      (0xD1D0U)
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_SHIFT          (0U)
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_CLRMSK         (00000000U)
+
+
+/*
+    Register RGX_CR_RPM_BIF_CHECKSUM
+*/
+#define RGX_CR_RPM_BIF_CHECKSUM                           (0xD1D8U)
+#define RGX_CR_RPM_BIF_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_CLRMSK              (00000000U)
+
+
+/*
+    Register RGX_CR_SHG_BIF_CHECKSUM
+*/
+#define RGX_CR_SHG_BIF_CHECKSUM                           (0xD1E0U)
+#define RGX_CR_SHG_BIF_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_CLRMSK              (00000000U)
+
+
+/*
+    Register RGX_CR_SHG_FE_BE_CHECKSUM
+*/
+#define RGX_CR_SHG_FE_BE_CHECKSUM                         (0xD1E8U)
+#define RGX_CR_SHG_FE_BE_CHECKSUM_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_SHIFT             (0U)
+#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_CLRMSK            (00000000U)
+
+
+/*
+    Register DPX_CR_BF_PERF
+*/
+#define DPX_CR_BF_PERF                                    (0xC458U)
+#define DPX_CR_BF_PERF_MASKFULL                           (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_BF_PERF_CLR_3_SHIFT                        (4U)
+#define DPX_CR_BF_PERF_CLR_3_CLRMSK                       (0XFFFFFFEFU)
+#define DPX_CR_BF_PERF_CLR_3_EN                           (0X00000010U)
+#define DPX_CR_BF_PERF_CLR_2_SHIFT                        (3U)
+#define DPX_CR_BF_PERF_CLR_2_CLRMSK                       (0XFFFFFFF7U)
+#define DPX_CR_BF_PERF_CLR_2_EN                           (0X00000008U)
+#define DPX_CR_BF_PERF_CLR_1_SHIFT                        (2U)
+#define DPX_CR_BF_PERF_CLR_1_CLRMSK                       (0XFFFFFFFBU)
+#define DPX_CR_BF_PERF_CLR_1_EN                           (0X00000004U)
+#define DPX_CR_BF_PERF_CLR_0_SHIFT                        (1U)
+#define DPX_CR_BF_PERF_CLR_0_CLRMSK                       (0XFFFFFFFDU)
+#define DPX_CR_BF_PERF_CLR_0_EN                           (0X00000002U)
+#define DPX_CR_BF_PERF_CTRL_ENABLE_SHIFT                  (0U)
+#define DPX_CR_BF_PERF_CTRL_ENABLE_CLRMSK                 (0XFFFFFFFEU)
+#define DPX_CR_BF_PERF_CTRL_ENABLE_EN                     (0X00000001U)
+
+
+/*
+    Register DPX_CR_BF_PERF_SELECT0
+*/
+#define DPX_CR_BF_PERF_SELECT0                            (0xC460U)
+#define DPX_CR_BF_PERF_SELECT0_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_SHIFT            (48U)
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_SHIFT            (32U)
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define DPX_CR_BF_PERF_SELECT0_MODE_SHIFT                 (21U)
+#define DPX_CR_BF_PERF_SELECT0_MODE_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define DPX_CR_BF_PERF_SELECT0_MODE_EN                    (IMG_UINT64_C(0X0000000000200000))
+#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_SHIFT         (16U)
+#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_SHIFT           (0U)
+#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register DPX_CR_BF_PERF_COUNTER_0
+*/
+#define DPX_CR_BF_PERF_COUNTER_0                          (0xC488U)
+#define DPX_CR_BF_PERF_COUNTER_0_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_BF_PERF_COUNTER_0_REG_SHIFT                (0U)
+#define DPX_CR_BF_PERF_COUNTER_0_REG_CLRMSK               (00000000U)
+
+
+/*
+    Register DPX_CR_BT_PERF
+*/
+#define DPX_CR_BT_PERF                                    (0xC3D0U)
+#define DPX_CR_BT_PERF_MASKFULL                           (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_BT_PERF_CLR_3_SHIFT                        (4U)
+#define DPX_CR_BT_PERF_CLR_3_CLRMSK                       (0XFFFFFFEFU)
+#define DPX_CR_BT_PERF_CLR_3_EN                           (0X00000010U)
+#define DPX_CR_BT_PERF_CLR_2_SHIFT                        (3U)
+#define DPX_CR_BT_PERF_CLR_2_CLRMSK                       (0XFFFFFFF7U)
+#define DPX_CR_BT_PERF_CLR_2_EN                           (0X00000008U)
+#define DPX_CR_BT_PERF_CLR_1_SHIFT                        (2U)
+#define DPX_CR_BT_PERF_CLR_1_CLRMSK                       (0XFFFFFFFBU)
+#define DPX_CR_BT_PERF_CLR_1_EN                           (0X00000004U)
+#define DPX_CR_BT_PERF_CLR_0_SHIFT                        (1U)
+#define DPX_CR_BT_PERF_CLR_0_CLRMSK                       (0XFFFFFFFDU)
+#define DPX_CR_BT_PERF_CLR_0_EN                           (0X00000002U)
+#define DPX_CR_BT_PERF_CTRL_ENABLE_SHIFT                  (0U)
+#define DPX_CR_BT_PERF_CTRL_ENABLE_CLRMSK                 (0XFFFFFFFEU)
+#define DPX_CR_BT_PERF_CTRL_ENABLE_EN                     (0X00000001U)
+
+
+/*
+    Register DPX_CR_BT_PERF_SELECT0
+*/
+#define DPX_CR_BT_PERF_SELECT0                            (0xC3D8U)
+#define DPX_CR_BT_PERF_SELECT0_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_SHIFT            (48U)
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_SHIFT            (32U)
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define DPX_CR_BT_PERF_SELECT0_MODE_SHIFT                 (21U)
+#define DPX_CR_BT_PERF_SELECT0_MODE_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define DPX_CR_BT_PERF_SELECT0_MODE_EN                    (IMG_UINT64_C(0X0000000000200000))
+#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_SHIFT         (16U)
+#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_SHIFT           (0U)
+#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register DPX_CR_BT_PERF_COUNTER_0
+*/
+#define DPX_CR_BT_PERF_COUNTER_0                          (0xC420U)
+#define DPX_CR_BT_PERF_COUNTER_0_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_BT_PERF_COUNTER_0_REG_SHIFT                (0U)
+#define DPX_CR_BT_PERF_COUNTER_0_REG_CLRMSK               (00000000U)
+
+
+/*
+    Register DPX_CR_RQ_USC_DEBUG
+*/
+#define DPX_CR_RQ_USC_DEBUG                               (0xC110U)
+#define DPX_CR_RQ_USC_DEBUG_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_SHIFT                (0U)
+#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_CLRMSK               (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register DPX_CR_BIF_FAULT_BANK_MMU_STATUS
+*/
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS                  (0xC5C8U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_MASKFULL         (IMG_UINT64_C(0x000000000000F775))
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_SHIFT   (12U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_CLRMSK  (0XFFFF0FFFU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_SHIFT  (8U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_CLRMSK (0XFFFFF8FFU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_SHIFT  (5U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_CLRMSK (0XFFFFFF9FU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_SHIFT   (4U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_CLRMSK  (0XFFFFFFEFU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_EN      (0X00000010U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0XFFFFFFFBU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_EN (0X00000004U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_SHIFT      (0U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_CLRMSK     (0XFFFFFFFEU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_EN         (0X00000001U)
+
+
+/*
+    Register DPX_CR_BIF_FAULT_BANK_REQ_STATUS
+*/
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS                  (0xC5D0U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_MASKFULL         (IMG_UINT64_C(0x03FFFFFFFFFFFFF0))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_SHIFT        (57U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_CLRMSK       (IMG_UINT64_C(0XFDFFFFFFFFFFFFFF))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_EN           (IMG_UINT64_C(0X0200000000000000))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_SHIFT     (44U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_CLRMSK    (IMG_UINT64_C(0XFE000FFFFFFFFFFF))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_SHIFT     (40U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_CLRMSK    (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_SHIFT    (4U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_CLRMSK   (IMG_UINT64_C(0XFFFFFF000000000F))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+    Register DPX_CR_BIF_MMU_STATUS
+*/
+#define DPX_CR_BIF_MMU_STATUS                             (0xC5D8U)
+#define DPX_CR_BIF_MMU_STATUS_MASKFULL                    (IMG_UINT64_C(0x000000000FFFFFF7))
+#define DPX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT               (20U)
+#define DPX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK              (0XF00FFFFFU)
+#define DPX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT               (12U)
+#define DPX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK              (0XFFF00FFFU)
+#define DPX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT               (4U)
+#define DPX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK              (0XFFFFF00FU)
+#define DPX_CR_BIF_MMU_STATUS_STALLED_SHIFT               (2U)
+#define DPX_CR_BIF_MMU_STATUS_STALLED_CLRMSK              (0XFFFFFFFBU)
+#define DPX_CR_BIF_MMU_STATUS_STALLED_EN                  (0X00000004U)
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_SHIFT                (1U)
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK               (0XFFFFFFFDU)
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_EN                   (0X00000002U)
+#define DPX_CR_BIF_MMU_STATUS_BUSY_SHIFT                  (0U)
+#define DPX_CR_BIF_MMU_STATUS_BUSY_CLRMSK                 (0XFFFFFFFEU)
+#define DPX_CR_BIF_MMU_STATUS_BUSY_EN                     (0X00000001U)
+
+
+/*
+    Register DPX_CR_RT_PERF
+*/
+#define DPX_CR_RT_PERF                                    (0xC700U)
+#define DPX_CR_RT_PERF_MASKFULL                           (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_RT_PERF_CLR_3_SHIFT                        (4U)
+#define DPX_CR_RT_PERF_CLR_3_CLRMSK                       (0XFFFFFFEFU)
+#define DPX_CR_RT_PERF_CLR_3_EN                           (0X00000010U)
+#define DPX_CR_RT_PERF_CLR_2_SHIFT                        (3U)
+#define DPX_CR_RT_PERF_CLR_2_CLRMSK                       (0XFFFFFFF7U)
+#define DPX_CR_RT_PERF_CLR_2_EN                           (0X00000008U)
+#define DPX_CR_RT_PERF_CLR_1_SHIFT                        (2U)
+#define DPX_CR_RT_PERF_CLR_1_CLRMSK                       (0XFFFFFFFBU)
+#define DPX_CR_RT_PERF_CLR_1_EN                           (0X00000004U)
+#define DPX_CR_RT_PERF_CLR_0_SHIFT                        (1U)
+#define DPX_CR_RT_PERF_CLR_0_CLRMSK                       (0XFFFFFFFDU)
+#define DPX_CR_RT_PERF_CLR_0_EN                           (0X00000002U)
+#define DPX_CR_RT_PERF_CTRL_ENABLE_SHIFT                  (0U)
+#define DPX_CR_RT_PERF_CTRL_ENABLE_CLRMSK                 (0XFFFFFFFEU)
+#define DPX_CR_RT_PERF_CTRL_ENABLE_EN                     (0X00000001U)
+
+
+/*
+    Register DPX_CR_RT_PERF_SELECT0
+*/
+#define DPX_CR_RT_PERF_SELECT0                            (0xC708U)
+#define DPX_CR_RT_PERF_SELECT0_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_SHIFT            (48U)
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_SHIFT            (32U)
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define DPX_CR_RT_PERF_SELECT0_MODE_SHIFT                 (21U)
+#define DPX_CR_RT_PERF_SELECT0_MODE_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define DPX_CR_RT_PERF_SELECT0_MODE_EN                    (IMG_UINT64_C(0X0000000000200000))
+#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_SHIFT         (16U)
+#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_SHIFT           (0U)
+#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register DPX_CR_RT_PERF_COUNTER_0
+*/
+#define DPX_CR_RT_PERF_COUNTER_0                          (0xC730U)
+#define DPX_CR_RT_PERF_COUNTER_0_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_RT_PERF_COUNTER_0_REG_SHIFT                (0U)
+#define DPX_CR_RT_PERF_COUNTER_0_REG_CLRMSK               (00000000U)
+
+
+/*
+    Register DPX_CR_BX_TU_PERF
+*/
+#define DPX_CR_BX_TU_PERF                                 (0xC908U)
+#define DPX_CR_BX_TU_PERF_MASKFULL                        (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_BX_TU_PERF_CLR_3_SHIFT                     (4U)
+#define DPX_CR_BX_TU_PERF_CLR_3_CLRMSK                    (0XFFFFFFEFU)
+#define DPX_CR_BX_TU_PERF_CLR_3_EN                        (0X00000010U)
+#define DPX_CR_BX_TU_PERF_CLR_2_SHIFT                     (3U)
+#define DPX_CR_BX_TU_PERF_CLR_2_CLRMSK                    (0XFFFFFFF7U)
+#define DPX_CR_BX_TU_PERF_CLR_2_EN                        (0X00000008U)
+#define DPX_CR_BX_TU_PERF_CLR_1_SHIFT                     (2U)
+#define DPX_CR_BX_TU_PERF_CLR_1_CLRMSK                    (0XFFFFFFFBU)
+#define DPX_CR_BX_TU_PERF_CLR_1_EN                        (0X00000004U)
+#define DPX_CR_BX_TU_PERF_CLR_0_SHIFT                     (1U)
+#define DPX_CR_BX_TU_PERF_CLR_0_CLRMSK                    (0XFFFFFFFDU)
+#define DPX_CR_BX_TU_PERF_CLR_0_EN                        (0X00000002U)
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_SHIFT               (0U)
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_CLRMSK              (0XFFFFFFFEU)
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_EN                  (0X00000001U)
+
+
+/*
+    Register DPX_CR_BX_TU_PERF_SELECT0
+*/
+#define DPX_CR_BX_TU_PERF_SELECT0                         (0xC910U)
+#define DPX_CR_BX_TU_PERF_SELECT0_MASKFULL                (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_SHIFT         (48U)
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_CLRMSK        (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_SHIFT         (32U)
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_CLRMSK        (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_SHIFT              (21U)
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_EN                 (IMG_UINT64_C(0X0000000000200000))
+#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_SHIFT      (16U)
+#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_SHIFT        (0U)
+#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register DPX_CR_BX_TU_PERF_COUNTER_0
+*/
+#define DPX_CR_BX_TU_PERF_COUNTER_0                       (0xC938U)
+#define DPX_CR_BX_TU_PERF_COUNTER_0_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_SHIFT             (0U)
+#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_CLRMSK            (00000000U)
+
+
+/*
+    Register DPX_CR_RS_PDS_RR_CHECKSUM
+*/
+#define DPX_CR_RS_PDS_RR_CHECKSUM                         (0xC0F0U)
+#define DPX_CR_RS_PDS_RR_CHECKSUM_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_SHIFT             (0U)
+#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_CLRMSK            (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_MMU_CBASE_MAPPING_CONTEXT
+*/
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT                  (0xE140U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_MASKFULL         (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT         (0U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK        (0XFFFFFF00U)
+
+
+/*
+    Register RGX_CR_MMU_CBASE_MAPPING
+*/
+#define RGX_CR_MMU_CBASE_MAPPING                          (0xE148U)
+#define RGX_CR_MMU_CBASE_MAPPING_MASKFULL                 (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT          (0U)
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK         (0XF0000000U)
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT     (12U)
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE      (4096U)
+
+
+/*
+    Register RGX_CR_MMU_FAULT_STATUS
+*/
+#define RGX_CR_MMU_FAULT_STATUS                           (0xE150U)
+#define RGX_CR_MMU_FAULT_STATUS_MASKFULL                  (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT             (28U)
+#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK            (IMG_UINT64_C(0X000000000FFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT             (20U)
+#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFF00FFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT              (12U)
+#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFFF00FFF))
+#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT              (6U)
+#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFFFFF03F))
+#define RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT               (4U)
+#define RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define RGX_CR_MMU_FAULT_STATUS_RNW_SHIFT                 (3U)
+#define RGX_CR_MMU_FAULT_STATUS_RNW_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_MMU_FAULT_STATUS_RNW_EN                    (IMG_UINT64_C(0X0000000000000008))
+#define RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT                (1U)
+#define RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define RGX_CR_MMU_FAULT_STATUS_FAULT_SHIFT               (0U)
+#define RGX_CR_MMU_FAULT_STATUS_FAULT_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MMU_FAULT_STATUS_FAULT_EN                  (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_MMU_FAULT_STATUS_META
+*/
+#define RGX_CR_MMU_FAULT_STATUS_META                      (0xE158U)
+#define RGX_CR_MMU_FAULT_STATUS_META_MASKFULL             (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT        (28U)
+#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK       (IMG_UINT64_C(0X000000000FFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT        (20U)
+#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFF00FFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT         (12U)
+#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFF00FFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT         (6U)
+#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFF03F))
+#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT          (4U)
+#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_SHIFT            (3U)
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_EN               (IMG_UINT64_C(0X0000000000000008))
+#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT           (1U)
+#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT          (0U)
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN             (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_SLC3_CTRL_MISC
+*/
+#define RGX_CR_SLC3_CTRL_MISC                             (0xE200U)
+#define RGX_CR_SLC3_CTRL_MISC_MASKFULL                    (IMG_UINT64_C(0x0000000000000107))
+#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_SHIFT        (8U)
+#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_CLRMSK       (0XFFFFFEFFU)
+#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN           (0X00000100U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SHIFT      (0U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK     (0XFFFFFFF8U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_LINEAR     (00000000U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_IN_PAGE_HASH (0X00000001U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_FIXED_PVR_HASH (0X00000002U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH (0X00000003U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_WEAVED_HASH (0X00000004U)
+
+
+/*
+    Register RGX_CR_SLC3_SCRAMBLE
+*/
+#define RGX_CR_SLC3_SCRAMBLE                              (0xE208U)
+#define RGX_CR_SLC3_SCRAMBLE_MASKFULL                     (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE_BITS_SHIFT                   (0U)
+#define RGX_CR_SLC3_SCRAMBLE_BITS_CLRMSK                  (IMG_UINT64_C(0000000000000000))
+
+
+/*
+    Register RGX_CR_SLC3_SCRAMBLE2
+*/
+#define RGX_CR_SLC3_SCRAMBLE2                             (0xE210U)
+#define RGX_CR_SLC3_SCRAMBLE2_MASKFULL                    (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE2_BITS_SHIFT                  (0U)
+#define RGX_CR_SLC3_SCRAMBLE2_BITS_CLRMSK                 (IMG_UINT64_C(0000000000000000))
+
+
+/*
+    Register RGX_CR_SLC3_SCRAMBLE3
+*/
+#define RGX_CR_SLC3_SCRAMBLE3                             (0xE218U)
+#define RGX_CR_SLC3_SCRAMBLE3_MASKFULL                    (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE3_BITS_SHIFT                  (0U)
+#define RGX_CR_SLC3_SCRAMBLE3_BITS_CLRMSK                 (IMG_UINT64_C(0000000000000000))
+
+
+/*
+    Register RGX_CR_SLC3_SCRAMBLE4
+*/
+#define RGX_CR_SLC3_SCRAMBLE4                             (0xE260U)
+#define RGX_CR_SLC3_SCRAMBLE4_MASKFULL                    (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE4_BITS_SHIFT                  (0U)
+#define RGX_CR_SLC3_SCRAMBLE4_BITS_CLRMSK                 (IMG_UINT64_C(0000000000000000))
+
+
+/*
+    Register RGX_CR_SLC3_STATUS
+*/
+#define RGX_CR_SLC3_STATUS                                (0xE220U)
+#define RGX_CR_SLC3_STATUS_MASKFULL                       (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_STATUS_WRITES1_SHIFT                  (48U)
+#define RGX_CR_SLC3_STATUS_WRITES1_CLRMSK                 (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define RGX_CR_SLC3_STATUS_WRITES0_SHIFT                  (32U)
+#define RGX_CR_SLC3_STATUS_WRITES0_CLRMSK                 (IMG_UINT64_C(0XFFFF0000FFFFFFFF))
+#define RGX_CR_SLC3_STATUS_READS1_SHIFT                   (16U)
+#define RGX_CR_SLC3_STATUS_READS1_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+#define RGX_CR_SLC3_STATUS_READS0_SHIFT                   (0U)
+#define RGX_CR_SLC3_STATUS_READS0_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_SLC3_IDLE
+*/
+#define RGX_CR_SLC3_IDLE                                  (0xE228U)
+#define RGX_CR_SLC3_IDLE_MASKFULL                         (IMG_UINT64_C(0x00000000000FFFFF))
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_SHIFT               (18U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_CLRMSK              (0XFFF3FFFFU)
+#define RGX_CR_SLC3_IDLE_MMU_SHIFT                        (17U)
+#define RGX_CR_SLC3_IDLE_MMU_CLRMSK                       (0XFFFDFFFFU)
+#define RGX_CR_SLC3_IDLE_MMU_EN                           (0X00020000U)
+#define RGX_CR_SLC3_IDLE_RDI_SHIFT                        (16U)
+#define RGX_CR_SLC3_IDLE_RDI_CLRMSK                       (0XFFFEFFFFU)
+#define RGX_CR_SLC3_IDLE_RDI_EN                           (0X00010000U)
+#define RGX_CR_SLC3_IDLE_IMGBV4_SHIFT                     (12U)
+#define RGX_CR_SLC3_IDLE_IMGBV4_CLRMSK                    (0XFFFF0FFFU)
+#define RGX_CR_SLC3_IDLE_CACHE_BANKS_SHIFT                (4U)
+#define RGX_CR_SLC3_IDLE_CACHE_BANKS_CLRMSK               (0XFFFFF00FU)
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_SHIFT                (2U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_CLRMSK               (0XFFFFFFF3U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_SHIFT               (1U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_CLRMSK              (0XFFFFFFFDU)
+#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_EN                  (0X00000002U)
+#define RGX_CR_SLC3_IDLE_XBAR_SHIFT                       (0U)
+#define RGX_CR_SLC3_IDLE_XBAR_CLRMSK                      (0XFFFFFFFEU)
+#define RGX_CR_SLC3_IDLE_XBAR_EN                          (0X00000001U)
+
+
+/*
+    Register RGX_CR_SLC3_FAULT_STOP_STATUS
+*/
+#define RGX_CR_SLC3_FAULT_STOP_STATUS                     (0xE248U)
+#define RGX_CR_SLC3_FAULT_STOP_STATUS_MASKFULL            (IMG_UINT64_C(0x0000000000001FFF))
+#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_SHIFT           (0U)
+#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_CLRMSK          (0XFFFFE000U)
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_STORE_MODE
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_MODE                     (0xF048U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MASKFULL            (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_SHIFT          (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_CLRMSK         (0XFFFFFFFCU)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX          (00000000U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE       (0X00000001U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST           (0X00000002U)
+
+
+/*
+    Register RGX_CR_CONTEXT_MAPPING0
+*/
+#define RGX_CR_CONTEXT_MAPPING0                           (0xF078U)
+#define RGX_CR_CONTEXT_MAPPING0_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING0_2D_SHIFT                  (24U)
+#define RGX_CR_CONTEXT_MAPPING0_2D_CLRMSK                 (0X00FFFFFFU)
+#define RGX_CR_CONTEXT_MAPPING0_CDM_SHIFT                 (16U)
+#define RGX_CR_CONTEXT_MAPPING0_CDM_CLRMSK                (0XFF00FFFFU)
+#define RGX_CR_CONTEXT_MAPPING0_3D_SHIFT                  (8U)
+#define RGX_CR_CONTEXT_MAPPING0_3D_CLRMSK                 (0XFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING0_TA_SHIFT                  (0U)
+#define RGX_CR_CONTEXT_MAPPING0_TA_CLRMSK                 (0XFFFFFF00U)
+
+
+/*
+    Register RGX_CR_CONTEXT_MAPPING1
+*/
+#define RGX_CR_CONTEXT_MAPPING1                           (0xF080U)
+#define RGX_CR_CONTEXT_MAPPING1_MASKFULL                  (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_CONTEXT_MAPPING1_HOST_SHIFT                (8U)
+#define RGX_CR_CONTEXT_MAPPING1_HOST_CLRMSK               (0XFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING1_TLA_SHIFT                 (0U)
+#define RGX_CR_CONTEXT_MAPPING1_TLA_CLRMSK                (0XFFFFFF00U)
+
+
+/*
+    Register RGX_CR_CONTEXT_MAPPING2
+*/
+#define RGX_CR_CONTEXT_MAPPING2                           (0xF088U)
+#define RGX_CR_CONTEXT_MAPPING2_MASKFULL                  (IMG_UINT64_C(0x0000000000FFFFFF))
+#define RGX_CR_CONTEXT_MAPPING2_ALIST0_SHIFT              (16U)
+#define RGX_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK             (0XFF00FFFFU)
+#define RGX_CR_CONTEXT_MAPPING2_TE0_SHIFT                 (8U)
+#define RGX_CR_CONTEXT_MAPPING2_TE0_CLRMSK                (0XFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING2_VCE0_SHIFT                (0U)
+#define RGX_CR_CONTEXT_MAPPING2_VCE0_CLRMSK               (0XFFFFFF00U)
+
+
+/*
+    Register RGX_CR_CONTEXT_MAPPING3
+*/
+#define RGX_CR_CONTEXT_MAPPING3                           (0xF090U)
+#define RGX_CR_CONTEXT_MAPPING3_MASKFULL                  (IMG_UINT64_C(0x0000000000FFFFFF))
+#define RGX_CR_CONTEXT_MAPPING3_ALIST1_SHIFT              (16U)
+#define RGX_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK             (0XFF00FFFFU)
+#define RGX_CR_CONTEXT_MAPPING3_TE1_SHIFT                 (8U)
+#define RGX_CR_CONTEXT_MAPPING3_TE1_CLRMSK                (0XFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING3_VCE1_SHIFT                (0U)
+#define RGX_CR_CONTEXT_MAPPING3_VCE1_CLRMSK               (0XFFFFFF00U)
+
+
+/*
+    Register RGX_CR_BIF_JONES_OUTSTANDING_READ
+*/
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ                 (0xF098U)
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ_MASKFULL        (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_SHIFT   (0U)
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_CLRMSK  (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ
+*/
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ            (0xF0A0U)
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_MASKFULL   (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_SHIFT (0U)
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_CLRMSK (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIF_DUST_OUTSTANDING_READ
+*/
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ                  (0xF0A8U)
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ_MASKFULL         (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_SHIFT    (0U)
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_CLRMSK   (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_CONTEXT_MAPPING4
+*/
+#define RGX_CR_CONTEXT_MAPPING4                           (0xF210U)
+#define RGX_CR_CONTEXT_MAPPING4_MASKFULL                  (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_SHIFT        (40U)
+#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_CLRMSK       (IMG_UINT64_C(0XFFFF00FFFFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_SHIFT          (32U)
+#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_CLRMSK         (IMG_UINT64_C(0XFFFFFF00FFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_SHIFT           (24U)
+#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_CLRMSK          (IMG_UINT64_C(0XFFFFFFFF00FFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_SHIFT        (16U)
+#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFF00FFFF))
+#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_SHIFT          (8U)
+#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_SHIFT           (0U)
+#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+#endif /* _RGX_CR_DEFS_KM_H_ */
+
+/*****************************************************************************
+ End of file (rgx_cr_defs_km.h)
+*****************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/rgxdefs_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/rgxdefs_km.h
new file mode 100644
index 0000000..c71d903
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/rgxdefs_km.h
@@ -0,0 +1,310 @@
+/*************************************************************************/ /*!
+@Title          Rogue hw definitions (kernel mode)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXDEFS_KM_H_
+#define _RGXDEFS_KM_H_
+
+#include RGX_BVNC_CORE_KM_HEADER
+#include RGX_BNC_CONFIG_KM_HEADER
+
+#define __IMG_EXPLICIT_INCLUDE_HWDEFS
+#if defined(__KERNEL__)
+#include "rgx_cr_defs_km.h"
+#else
+#include RGX_BVNC_CORE_HEADER
+#include RGX_BNC_CONFIG_HEADER
+#include "rgx_cr_defs.h"
+#endif
+#undef __IMG_EXPLICIT_INCLUDE_HWDEFS
+
+/* The following Macros are picked up through BVNC headers for PDUMP and
+ * no hardware operations to be compatible with old build infrastructure.
+ */
+#if defined(PDUMP) || defined(NO_HARDWARE) || !defined(SUPPORT_MULTIBVNC_RUNTIME_BVNC_ACQUISITION)
+/******************************************************************************
+ * Check for valid B.X.N.C
+ *****************************************************************************/
+#if !defined(RGX_BVNC_KM_B) || !defined(RGX_BVNC_KM_V) || !defined(RGX_BVNC_KM_N) || !defined(RGX_BVNC_KM_C)
+#error "Need to specify BVNC (RGX_BVNC_KM_B, RGX_BVNC_KM_V, RGX_BVNC_KM_N and RGX_BVNC_C)"
+#endif
+#endif
+
+#if defined(PDUMP) || defined(NO_HARDWARE)
+/* Check core/config compatibility */
+#if (RGX_BVNC_KM_B != RGX_BNC_KM_B) || (RGX_BVNC_KM_N != RGX_BNC_KM_N) || (RGX_BVNC_KM_C != RGX_BNC_KM_C)
+#error "BVNC headers are mismatching (KM core/config)"
+#endif
+
+#endif
+
+/******************************************************************************
+ * RGX Version name
+ *****************************************************************************/
+#define _RGX_BVNC_ST2(S)	#S
+#define _RGX_BVNC_ST(S)		_RGX_BVNC_ST2(S)
+#define RGX_BVNC_KM			_RGX_BVNC_ST(RGX_BVNC_KM_B) "." _RGX_BVNC_ST(RGX_BVNC_KM_V) "." _RGX_BVNC_ST(RGX_BVNC_KM_N) "." _RGX_BVNC_ST(RGX_BVNC_KM_C)
+#define RGX_BVNC_KM_V_ST	_RGX_BVNC_ST(RGX_BVNC_KM_V)
+
+/*
+   Start at 903GiB. Size of 32MB per OSID (see rgxheapconfig.h)
+   NOTE:
+		The firmware heap base and size is defined here to
+		simplify #include dependencies, see rgxheapconfig.h
+		for the full RGX virtual address space layout.
+*/
+#define RGX_FIRMWARE_HEAP_BASE			IMG_UINT64_C(0xE1C0000000)
+#define RGX_FIRMWARE_HEAP_SIZE			(1<<RGX_FW_HEAP_SHIFT)
+#define RGX_FIRMWARE_HEAP_SHIFT			RGX_FW_HEAP_SHIFT
+
+/* The default number of OSID is 1, higher number implies VZ enabled firmware */
+#if !defined(RGXFW_NATIVE) && defined(PVRSRV_VZ_NUM_OSID) && (PVRSRV_VZ_NUM_OSID +1> 1)
+#define RGXFW_NUM_OS PVRSRV_VZ_NUM_OSID
+#else
+#define RGXFW_NUM_OS 1
+#endif
+
+/******************************************************************************
+ * RGX Defines
+ *****************************************************************************/
+
+#define			BVNC_FIELD_MASK			((1 << BVNC_FIELD_WIDTH) - 1)
+#define         C_POSITION              (0)
+#define         N_POSITION              ((C_POSITION) + (BVNC_FIELD_WIDTH))
+#define         V_POSITION              ((N_POSITION) + (BVNC_FIELD_WIDTH))
+#define         B_POSITION              ((V_POSITION) + (BVNC_FIELD_WIDTH))
+
+#define         B_POSTION_MASK          (((IMG_UINT64)(BVNC_FIELD_MASK) << (B_POSITION)))
+#define         V_POSTION_MASK          (((IMG_UINT64)(BVNC_FIELD_MASK) << (V_POSITION)))
+#define         N_POSTION_MASK          (((IMG_UINT64)(BVNC_FIELD_MASK) << (N_POSITION)))
+#define         C_POSTION_MASK          (((IMG_UINT64)(BVNC_FIELD_MASK) << (C_POSITION)))
+
+#define         GET_B(x)                (((x) & (B_POSTION_MASK)) >> (B_POSITION))
+#define         GET_V(x)                (((x) & (V_POSTION_MASK)) >> (V_POSITION))
+#define         GET_N(x)                (((x) & (N_POSTION_MASK)) >> (N_POSITION))
+#define         GET_C(x)                (((x) & (C_POSTION_MASK)) >> (C_POSITION))
+
+#define         BVNC_PACK(B,V,N,C)      ((((IMG_UINT64)B)) << (B_POSITION) | \
+                                         (((IMG_UINT64)V)) << (V_POSITION) | \
+                                         (((IMG_UINT64)N)) << (N_POSITION) | \
+                                         (((IMG_UINT64)C)) << (C_POSITION) \
+                                        )
+
+#define RGX_CR_CORE_ID_CONFIG_N_SHIFT                     (8U)
+#define RGX_CR_CORE_ID_CONFIG_C_SHIFT                     (0U)
+
+#define RGX_CR_CORE_ID_CONFIG_N_CLRMSK                    (0XFFFF00FFU)
+#define RGX_CR_CORE_ID_CONFIG_C_CLRMSK                    (0XFFFFFF00U)
+
+/* META cores (required for the RGX_FEATURE_META) */
+#define MTP218   (1)
+#define MTP219   (2)
+#define LTP218   (3)
+#define LTP217   (4)
+
+/* META Core memory feature depending on META variants */
+#define RGX_META_COREMEM_32K      (32*1024)
+#define RGX_META_COREMEM_48K      (48*1024)
+#define RGX_META_COREMEM_64K      (64*1024)
+#define RGX_META_COREMEM_96K      (96*1024)
+#define RGX_META_COREMEM_128K     (128*1024)
+#define RGX_META_COREMEM_256K     (256*1024)
+
+#if !defined(__KERNEL__)
+#if (!defined(SUPPORT_TRUSTED_DEVICE) || defined(RGX_FEATURE_META_DMA)) && (RGX_FEATURE_META_COREMEM_SIZE != 0)
+#define RGX_META_COREMEM_SIZE     (RGX_FEATURE_META_COREMEM_SIZE*1024)
+#define RGX_META_COREMEM          (1)
+#define RGX_META_COREMEM_CODE     (1)
+#if !defined(FIX_HW_BRN_50767) && RGXFW_NUM_OS == 1
+#define RGX_META_COREMEM_DATA     (1)
+#endif
+#else
+#undef SUPPORT_META_COREMEM
+#undef RGX_FEATURE_META_COREMEM_SIZE
+#undef RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_META_COREMEM_SIZE         (0)
+#endif
+#endif
+
+/* ISP requires valid state on all three pipes regardless of the number of
+ * active pipes/tiles in flight.
+ */
+#define RGX_MAX_NUM_PIPES	3
+
+#define GET_ROGUE_CACHE_LINE_SIZE(x)				((x)/8)
+
+
+#define MAX_HW_TA3DCONTEXTS	2
+
+
+/* useful extra defines for clock ctrl*/
+#define RGX_CR_CLK_CTRL_ALL_ON   (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL_MASKFULL)
+#define RGX_CR_CLK_CTRL_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL_MASKFULL)
+
+#define RGX_CR_SOFT_RESET_DUST_n_CORE_EN	(RGX_CR_SOFT_RESET_DUST_A_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_B_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_C_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_D_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_E_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_F_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_G_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_H_CORE_EN)
+
+/* SOFT_RESET Rascal and DUSTs bits */
+#define RGX_CR_SOFT_RESET_RASCALDUSTS_EN	(RGX_CR_SOFT_RESET_RASCAL_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_n_CORE_EN)
+
+
+
+
+/* SOFT_RESET steps as defined in the TRM */
+#define RGX_S7_SOFT_RESET_DUSTS (RGX_CR_SOFT_RESET_DUST_n_CORE_EN)
+
+#define RGX_S7_SOFT_RESET_JONES (RGX_CR_SOFT_RESET_PM_EN  | \
+                                 RGX_CR_SOFT_RESET_VDM_EN | \
+								 RGX_CR_SOFT_RESET_ISP_EN)
+
+#define RGX_S7_SOFT_RESET_JONES_ALL (RGX_S7_SOFT_RESET_JONES  | \
+									 RGX_CR_SOFT_RESET_BIF_EN | \
+                                     RGX_CR_SOFT_RESET_SLC_EN | \
+								     RGX_CR_SOFT_RESET_GARTEN_EN)
+
+#define RGX_S7_SOFT_RESET2 (RGX_CR_SOFT_RESET2_BLACKPEARL_EN | \
+                            RGX_CR_SOFT_RESET2_PIXEL_EN | \
+							RGX_CR_SOFT_RESET2_CDM_EN | \
+							RGX_CR_SOFT_RESET2_VERTEX_EN)
+
+
+
+#define RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT		(12)
+#define RGX_BIF_PM_PHYSICAL_PAGE_SIZE			(1 << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT)
+
+#define RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT		(14)
+#define RGX_BIF_PM_VIRTUAL_PAGE_SIZE			(1 << RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT)
+
+/* To get the number of required Dusts, divide the number of clusters by 2 and round up */
+#define RGX_REQ_NUM_DUSTS(CLUSTERS)    ((CLUSTERS + 1) / 2)
+
+/* To get the number of required Bernado/Phantom, divide the number of clusters by 4 and round up */
+#define RGX_REQ_NUM_PHANTOMS(CLUSTERS) ((CLUSTERS + 3) / 4)
+#define RGX_REQ_NUM_BERNADOS(CLUSTERS) ((CLUSTERS + 3) / 4)
+#define RGX_REQ_NUM_BLACKPEARLS(CLUSTERS) ((CLUSTERS + 3) / 4)
+
+#if !defined(__KERNEL__)
+# define RGX_NUM_PHANTOMS (RGX_REQ_NUM_PHANTOMS(RGX_FEATURE_NUM_CLUSTERS))
+#endif
+
+
+/* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT is not defined for format 1 cores (so define it now). */
+#if !defined(RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT)
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1)
+#endif
+
+/* META second thread feature depending on META variants and available CoreMem*/
+#if defined(RGX_FEATURE_META) && (RGX_FEATURE_META == MTP218 || RGX_FEATURE_META == MTP219) && defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && (RGX_FEATURE_META_COREMEM_SIZE == 256)
+#define RGXFW_META_SUPPORT_2ND_THREAD
+#endif
+
+/******************************************************************************
+ * WA HWBRNs
+ *****************************************************************************/
+#if defined(FIX_HW_BRN_36492)
+
+#undef RGX_CR_SOFT_RESET_SLC_EN
+#undef RGX_CR_SOFT_RESET_SLC_CLRMSK
+#undef RGX_CR_SOFT_RESET_SLC_SHIFT
+
+/* Remove the SOFT_RESET_SLC_EN bit from SOFT_RESET_MASKFULL */
+#undef RGX_CR_SOFT_RESET_MASKFULL
+#define RGX_CR_SOFT_RESET_MASKFULL IMG_UINT64_C(0x000001FFF7FFFC1D)
+
+#endif /* FIX_HW_BRN_36492 */
+
+
+#if defined(RGX_CR_JONES_IDLE_MASKFULL)
+/* Workaround for HW BRN 57289 */
+#if (RGX_CR_JONES_IDLE_MASKFULL != 0x0000000000007FFF)
+#error This WA must be updated if RGX_CR_JONES_IDLE is expanded!!!
+#endif
+#undef RGX_CR_JONES_IDLE_MASKFULL
+#undef RGX_CR_JONES_IDLE_TDM_SHIFT
+#undef RGX_CR_JONES_IDLE_TDM_CLRMSK
+#undef RGX_CR_JONES_IDLE_TDM_EN
+#define RGX_CR_JONES_IDLE_MASKFULL                        (IMG_UINT64_C(0x0000000000003FFF))
+#endif
+
+
+#define DPX_MAX_RAY_CONTEXTS 4 /* FIXME should this be in dpx file? */
+#define DPX_MAX_FBA_AP 16
+#define DPX_MAX_FBA_FILTER_WIDTH 24
+
+#if !defined(__KERNEL__)
+#if !defined(RGX_FEATURE_SLC_SIZE_IN_BYTES)
+#if defined(RGX_FEATURE_SLC_SIZE_IN_KILOBYTES)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (RGX_FEATURE_SLC_SIZE_IN_KILOBYTES * 1024)
+#else
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (0)
+#endif
+#endif
+#endif
+
+#if defined(__KERNEL__)
+
+#define RGX_GET_NUM_RASTERISATION_MODULES(DEV_FEATURE_CFG) \
+	(                                                                       \
+	 ((DEV_FEATURE_CFG).ui64Features & RGX_FEATURE_ROGUEXE_BIT_MASK) != 0 ? \
+	  (DEV_FEATURE_CFG).ui32NumClusters :                                   \
+	  RGX_REQ_NUM_PHANTOMS((DEV_FEATURE_CFG).ui32NumClusters)               \
+	)
+
+#else
+
+#if defined(RGX_FEATURE_ROGUEXE)
+#define RGX_NUM_RASTERISATION_MODULES	RGX_FEATURE_NUM_CLUSTERS
+#else
+#define RGX_NUM_RASTERISATION_MODULES	RGX_NUM_PHANTOMS
+#endif
+
+#define RGX_GET_NUM_RASTERISATION_MODULES(DEV_FEATURE_CFG) RGX_NUM_RASTERISATION_MODULES
+
+#endif /* !__KERNEL__ */
+
+#endif /* _RGXDEFS_KM_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/rgxmmudefs_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/rgxmmudefs_km.h
new file mode 100644
index 0000000..43bedd2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/hwdefs/km/rgxmmudefs_km.h
@@ -0,0 +1,396 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file rgxmmudefs_km.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*               ****   Autogenerated C -- do not edit    ****               */
+
+/*
+ *      rogue_bif.def 
+ *      rogue_bif.def 
+ */
+
+
+#ifndef _RGXMMUDEFS_KM_H_
+#define _RGXMMUDEFS_KM_H_
+
+#include "img_types.h"
+
+
+#define RGXMMUDEFS_KM_REVISION 0
+
+/*
+
+		Encoding of DM (note value 0x6 not used)
+	
+*/
+#define RGX_BIF_DM_ENCODING_VERTEX                        (0x00000000U)
+#define RGX_BIF_DM_ENCODING_PIXEL                         (0x00000001U)
+#define RGX_BIF_DM_ENCODING_COMPUTE                       (0x00000002U)
+#define RGX_BIF_DM_ENCODING_TLA                           (0x00000003U)
+#define RGX_BIF_DM_ENCODING_PB_VCE                        (0x00000004U)
+#define RGX_BIF_DM_ENCODING_PB_TE                         (0x00000005U)
+#define RGX_BIF_DM_ENCODING_META                          (0x00000007U)
+#define RGX_BIF_DM_ENCODING_HOST                          (0x00000008U)
+#define RGX_BIF_DM_ENCODING_PM_ALIST                      (0x00000009U)
+
+
+/*
+
+		Labelling of fields within virtual address
+	
+*/
+/*
+Page Catalogue entry #
+*/
+#define RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT                  (30U)
+#define RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK                 (IMG_UINT64_C(0XFFFFFF003FFFFFFF))
+/*
+Page Directory entry #
+*/
+#define RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT                  (21U)
+#define RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFC01FFFFF))
+/*
+Page Table entry #
+*/
+#define RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT                  (12U)
+#define RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFE00FFF))
+
+
+/*
+
+		Number of entries in a PC
+	
+*/
+#define RGX_MMUCTRL_ENTRIES_PC_VALUE                      (0x00000400U)
+
+
+/*
+
+		Number of entries in a PD
+	
+*/
+#define RGX_MMUCTRL_ENTRIES_PD_VALUE                      (0x00000200U)
+
+
+/*
+
+		Number of entries in a PT
+	
+*/
+#define RGX_MMUCTRL_ENTRIES_PT_VALUE                      (0x00000200U)
+
+
+/*
+
+		Size in bits of the PC entries in memory
+	
+*/
+#define RGX_MMUCTRL_ENTRY_SIZE_PC_VALUE                   (0x00000020U)
+
+
+/*
+
+		Size in bits of the PD entries in memory
+	
+*/
+#define RGX_MMUCTRL_ENTRY_SIZE_PD_VALUE                   (0x00000040U)
+
+
+/*
+
+		Size in bits of the PT entries in memory
+	
+*/
+#define RGX_MMUCTRL_ENTRY_SIZE_PT_VALUE                   (0x00000040U)
+
+
+/*
+
+		Encoding of page size field
+	
+*/
+#define RGX_MMUCTRL_PAGE_SIZE_MASK                        (0x00000007U)
+#define RGX_MMUCTRL_PAGE_SIZE_4KB                         (0x00000000U)
+#define RGX_MMUCTRL_PAGE_SIZE_16KB                        (0x00000001U)
+#define RGX_MMUCTRL_PAGE_SIZE_64KB                        (0x00000002U)
+#define RGX_MMUCTRL_PAGE_SIZE_256KB                       (0x00000003U)
+#define RGX_MMUCTRL_PAGE_SIZE_1MB                         (0x00000004U)
+#define RGX_MMUCTRL_PAGE_SIZE_2MB                         (0x00000005U)
+
+
+/*
+
+		Range of bits used for 4KB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT                  (12U)
+#define RGX_MMUCTRL_PAGE_4KB_RANGE_CLRMSK                 (IMG_UINT64_C(0XFFFFFF0000000FFF))
+
+
+/*
+
+		Range of bits used for 16KB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PAGE_16KB_RANGE_SHIFT                 (14U)
+#define RGX_MMUCTRL_PAGE_16KB_RANGE_CLRMSK                (IMG_UINT64_C(0XFFFFFF0000003FFF))
+
+
+/*
+
+		Range of bits used for 64KB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PAGE_64KB_RANGE_SHIFT                 (16U)
+#define RGX_MMUCTRL_PAGE_64KB_RANGE_CLRMSK                (IMG_UINT64_C(0XFFFFFF000000FFFF))
+
+
+/*
+
+		Range of bits used for 256KB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PAGE_256KB_RANGE_SHIFT                (18U)
+#define RGX_MMUCTRL_PAGE_256KB_RANGE_CLRMSK               (IMG_UINT64_C(0XFFFFFF000003FFFF))
+
+
+/*
+
+		Range of bits used for 1MB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PAGE_1MB_RANGE_SHIFT                  (20U)
+#define RGX_MMUCTRL_PAGE_1MB_RANGE_CLRMSK                 (IMG_UINT64_C(0XFFFFFF00000FFFFF))
+
+
+/*
+
+		Range of bits used for 2MB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PAGE_2MB_RANGE_SHIFT                  (21U)
+#define RGX_MMUCTRL_PAGE_2MB_RANGE_CLRMSK                 (IMG_UINT64_C(0XFFFFFF00001FFFFF))
+
+
+/*
+
+		Range of bits used for PT Base Address for 4KB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT               (12U)
+#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK              (IMG_UINT64_C(0XFFFFFF0000000FFF))
+
+
+/*
+
+		Range of bits used for PT Base Address for 16KB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT              (10U)
+#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK             (IMG_UINT64_C(0XFFFFFF00000003FF))
+
+
+/*
+
+		Range of bits used for PT Base Address for 64KB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT              (8U)
+#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK             (IMG_UINT64_C(0XFFFFFF00000000FF))
+
+
+/*
+
+		Range of bits used for PT Base Address for 256KB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT             (6U)
+#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK            (IMG_UINT64_C(0XFFFFFF000000003F))
+
+
+/*
+
+		Range of bits used for PT Base Address for 1MB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT               (5U)
+#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK              (IMG_UINT64_C(0XFFFFFF000000001F))
+
+
+/*
+
+		Range of bits used for PT Base Address for 2MB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT               (5U)
+#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK              (IMG_UINT64_C(0XFFFFFF000000001F))
+
+
+/*
+
+		Format of Page Table data
+	
+*/
+/*
+PM/Meta protect bit
+*/
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT         (62U)
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK        (IMG_UINT64_C(0XBFFFFFFFFFFFFFFF))
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN            (IMG_UINT64_C(0X4000000000000000))
+/*
+Upper part of vp page field
+*/
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT              (40U)
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK             (IMG_UINT64_C(0XC00000FFFFFFFFFF))
+/*
+Physical page address
+*/
+#define RGX_MMUCTRL_PT_DATA_PAGE_SHIFT                    (12U)
+#define RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK                   (IMG_UINT64_C(0XFFFFFF0000000FFF))
+/*
+Lower part of vp page field
+*/
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT              (6U)
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFFFFF03F))
+/*
+Entry pending
+*/
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT           (5U)
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN              (IMG_UINT64_C(0X0000000000000020))
+/*
+PM Src
+*/
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_SHIFT                  (4U)
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_EN                     (IMG_UINT64_C(0X0000000000000010))
+/*
+SLC Bypass Ctrl
+*/
+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_SHIFT         (3U)
+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN            (IMG_UINT64_C(0X0000000000000008))
+/*
+Cache Coherency bit
+*/
+#define RGX_MMUCTRL_PT_DATA_CC_SHIFT                      (2U)
+#define RGX_MMUCTRL_PT_DATA_CC_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_MMUCTRL_PT_DATA_CC_EN                         (IMG_UINT64_C(0X0000000000000004))
+/*
+Read only
+*/
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_SHIFT               (1U)
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_EN                  (IMG_UINT64_C(0X0000000000000002))
+/*
+Entry valid
+*/
+#define RGX_MMUCTRL_PT_DATA_VALID_SHIFT                   (0U)
+#define RGX_MMUCTRL_PT_DATA_VALID_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_MMUCTRL_PT_DATA_VALID_EN                      (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+
+		Format of Page Directory data
+	
+*/
+/*
+Entry pending
+*/
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT           (40U)
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK          (IMG_UINT64_C(0XFFFFFEFFFFFFFFFF))
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN              (IMG_UINT64_C(0X0000010000000000))
+/*
+Page Table base address
+*/
+#define RGX_MMUCTRL_PD_DATA_PT_BASE_SHIFT                 (5U)
+#define RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK                (IMG_UINT64_C(0XFFFFFF000000001F))
+/*
+Page Size
+*/
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT               (1U)
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFFFFF1))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB                 (IMG_UINT64_C(0000000000000000))  
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB                (IMG_UINT64_C(0x0000000000000002))  
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB                (IMG_UINT64_C(0x0000000000000004))  
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB               (IMG_UINT64_C(0x0000000000000006))  
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB                 (IMG_UINT64_C(0x0000000000000008))  
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB                 (IMG_UINT64_C(0x000000000000000a))  
+/*
+Entry valid
+*/
+#define RGX_MMUCTRL_PD_DATA_VALID_SHIFT                   (0U)
+#define RGX_MMUCTRL_PD_DATA_VALID_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_MMUCTRL_PD_DATA_VALID_EN                      (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+
+		Format of Page Catalogue data
+	
+*/
+/*
+Page Catalogue base address
+*/
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT                 (4U)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK                (0X0000000FU)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT            (12U)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE             (4096U)
+/*
+Entry pending
+*/
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT           (1U)
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK          (0XFFFFFFFDU)
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN              (0X00000002U)
+/*
+Entry valid
+*/
+#define RGX_MMUCTRL_PC_DATA_VALID_SHIFT                   (0U)
+#define RGX_MMUCTRL_PC_DATA_VALID_CLRMSK                  (0XFFFFFFFEU)
+#define RGX_MMUCTRL_PC_DATA_VALID_EN                      (0X00000001U)
+
+
+#endif /* _RGXMMUDEFS_KM_H_ */
+
+/*****************************************************************************
+ End of file (rgxmmudefs_km.h)
+*****************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/cache_ops.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/cache_ops.h
new file mode 100644
index 0000000..ea71bbf
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/cache_ops.h
@@ -0,0 +1,55 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services cache management header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines for cache management which are visible internally
+                and externally
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _CACHE_OPS_H_
+#define _CACHE_OPS_H_
+#include "img_types.h"
+
+typedef IMG_UINT32 PVRSRV_CACHE_OP;				/*!< Type represents cache maintenance operation */
+#define PVRSRV_CACHE_OP_NONE				0x0	/*!< No operation */
+#define PVRSRV_CACHE_OP_CLEAN				0x1	/*!< Flush w/o invalidate */
+#define PVRSRV_CACHE_OP_INVALIDATE			0x2	/*!< Invalidate w/o flush */
+#define PVRSRV_CACHE_OP_FLUSH				0x3	/*!< Flush w/ invalidate */
+
+#endif	/* _CACHE_OPS_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/config_kernel_user_mt8167.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/config_kernel_user_mt8167.h
new file mode 100644
index 0000000..aabe3e2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/config_kernel_user_mt8167.h
@@ -0,0 +1,117 @@
+#define SUPPORT_VDM_CONTEXT_STORE_BUFFER_AB 
+#define SUPPORT_PERCONTEXT_FREELIST 
+#define SUPPORT_LINUX_REFCNT_PMR_ON_IMPORT 
+#define GPUVIRT_VALIDATION_NUM_OS 8
+#define GPUVIRT_VALIDATION_NUM_REGIONS 2
+#define PVRSRV_APPHINT_OSIDREGION0MIN "0x00000000 0x04000000 0x10000000 0x18000000 0x20000000 0x28000000 0x30000000 0x38000000"
+#define PVRSRV_APPHINT_OSIDREGION0MAX "0x3FFFFFFF 0x0FFFFFFF 0x17FFFFFF 0x1FFFFFFF 0x27FFFFFF 0x2FFFFFFF 0x37FFFFFF 0x3FFFFFFF"
+#define PVRSRV_APPHINT_OSIDREGION1MIN "0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000"
+#define PVRSRV_APPHINT_OSIDREGION1MAX "0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF"
+#define RGX_FW_FILENAME "rgx.fw"
+#define LINUX 
+#define PVR_BUILD_DIR "mt8167s"
+#define PVR_BUILD_TYPE "release"
+#define PVRSRV_MODNAME "pvrsrvkm"
+#define SUPPORT_RGX 1
+#define RELEASE 
+#define RGX_BVNC_CORE_KM_HEADER "cores/rgxcore_km_22.40.54.30.h"
+#define RGX_BVNC_CORE_HEADER "cores/rgxcore_22.40.54.30.h"
+#define RGX_BNC_CONFIG_KM_HEADER "configs/rgxconfig_km_22.V.54.30.h"
+#define RGX_BNC_CONFIG_HEADER "configs/rgxconfig_22.V.54.30.h"
+#define SUPPORT_MULTIBVNC_RUNTIME_BVNC_ACQUISITION 
+#define SUPPORT_DBGDRV_EVENT_OBJECTS 
+#define PDUMP_STREAMBUF_MAX_SIZE_MB 16
+#define PVRSRV_NEED_PVR_DPF 
+#define PVRSRV_NEED_PVR_STACKTRACE_NATIVE 
+#define SUPPORT_GPUTRACE_EVENTS 
+#define PVRSRV_VZ_NUM_OSID 
+#define PVRSRV_APPHINT_DRIVERMODE 0x7FFFFFFF
+#define RGX_FW_HEAP_SHIFT 25
+#define RGX_FW_HEAP_GUEST_OFFSET_KCCB 0x54000U
+#define RGX_FW_HEAP_GUEST_OFFSET_FWCCB 0x53080U
+#define RGX_FW_HEAP_GUEST_OFFSET_KCCBCTL 0x53000U
+#define RGX_FW_HEAP_GUEST_OFFSET_FWCCBCTL 0x53040U
+#define FIX_DUSTS_POW_ON_INIT 
+#define SUPPORT_LINUX_X86_WRITECOMBINE 
+#define SUPPORT_LINUX_X86_PAT 
+#define PVR_LINUX_USING_WORKQUEUES 
+#define PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE 
+#define PVR_LINUX_TIMERS_USING_WORKQUEUES 
+#define PVR_LDM_PLATFORM_PRE_REGISTERED 
+#define PVR_LDM_DRIVER_REGISTRATION_NAME "pvrsrvkm"
+#define PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN 256
+#define SUPPORT_MMU_PENDING_FAULT_PROTECTION 
+#define HWR_DEFAULT_ENABLED 
+#define PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT APPHNT_BLDVAR_DBGDUMPLIMIT
+#define PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG IMG_FALSE
+#define PVRSRV_APPHINT_HTBUFFERSIZE 0x1000
+#define PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE 0x4000
+#define PVRSRV_APPHINT_ENABLESIGNATURECHECKS APPHNT_BLDVAR_ENABLESIGNATURECHECKS
+#define PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE RGXFW_SIG_BUFFER_SIZE_MIN
+#define PVRSRV_APPHINT_DISABLECLOCKGATING 0
+#define PVRSRV_APPHINT_DISABLEDMOVERLAP 0
+#define PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE 0
+#define PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH RGXFWIF_INICFG_CTXSWITCH_DM_ALL
+#define PVRSRV_APPHINT_VDMCONTEXTSWITCHMODE RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX
+#define PVRSRV_APPHINT_ENABLERDPOWERISLAND RGX_RD_POWER_ISLAND_DEFAULT
+#define PVRSRV_APPHINT_FIRMWAREPERF FW_PERF_CONF_NONE
+#define PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN
+#define PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER 0
+#define PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB 2048
+#define PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB 128
+#define PVRSRV_APPHINT_JONESDISABLEMASK 0
+#define PVRSRV_APPHINT_NEWFILTERINGMODE 1
+#define PVRSRV_APPHINT_TRUNCATEMODE 0
+#define PVRSRV_APPHINT_USEMETAT1 RGX_META_T1_OFF
+#define PVRSRV_APPHINT_EMUMAXFREQ 0
+#define PVRSRV_APPHINT_GPIOVALIDATIONMODE 0
+#define PVRSRV_APPHINT_RGXBVNC ""
+#define PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG IMG_FALSE
+#define PVRSRV_APPHINT_CLEANUPTHREADPRIORITY 0
+#define PVRSRV_APPHINT_CLEANUPTHREADWEIGHT 0
+#define PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY 0
+#define PVRSRV_APPHINT_WATCHDOGTHREADWEIGHT 0
+#define PVRSRV_APPHINT_ASSERTONHWRTRIGGER IMG_FALSE
+#define PVRSRV_APPHINT_ASSERTOUTOFMEMORY IMG_FALSE
+#define PVRSRV_APPHINT_CHECKMLIST APPHNT_BLDVAR_DEBUG
+#define PVRSRV_APPHINT_DISABLEFEDLOGGING IMG_FALSE
+#define PVRSRV_APPHINT_ENABLEAPM RGX_ACTIVEPM_DEFAULT
+#define PVRSRV_APPHINT_ENABLEHTBLOGGROUP 0
+#define PVRSRV_APPHINT_ENABLELOGGROUP 0
+#define PVRSRV_APPHINT_FIRMWARELOGTYPE 0
+#define PVRSRV_APPHINT_HTBOPERATIONMODE HTB_OPMODE_DROPLATEST
+#define PVRSRV_APPHINT_HWPERFFWFILTER 0
+#define PVRSRV_APPHINT_HWPERFHOSTFILTER 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENRL 0
+#define PVRSRV_APPHINT_TIMECORRCLOCK 0
+#define PVRSRV_APPHINT_ENABLEFWPOISONONFREE IMG_FALSE
+#define PVRSRV_APPHINT_FWPOISONONFREEVALUE 0xBD
+#define PVRSRV_APPHINT_ZEROFREELIST IMG_FALSE
+#define PVRSRV_APPHINT_DUSTREQUESTINJECT IMG_FALSE
+#define PVRSRV_APPHINT_DISABLEPDUMPPANIC IMG_FALSE
+#define PVRSRV_APPHINT_CACHEOPCONFIG 0
+#define PVRSRV_ENABLE_PROCESS_STATS 
+#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO 
+#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD 90
+#define PVR_LINUX_PHYSMEM_MAX_POOL_PAGES 10240
+#define PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES 20480
+#define PVR_DIRTY_BYTES_FLUSH_THRESHOLD 524288
+#define PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD 256
+#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM  2
+#define PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD  16384
+#define SUPPORT_SERVER_SYNC 
+#define PVR_DRM_NAME "pvr"
+#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES 8
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D 14
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D 14
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM 13
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA 15
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D 16
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC 13
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_RTU 15
+#define SUPPORT_BUFFER_SYNC 1
+#define MTK_CONFIG_OF
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/dbgdrvif_srv5.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/dbgdrvif_srv5.h
new file mode 100644
index 0000000..87f303e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/dbgdrvif_srv5.h
@@ -0,0 +1,263 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debug driver for Services 5
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Debug Driver Interface
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DBGDRVIF_SRV5_
+#define _DBGDRVIF_SRV5_
+
+#if defined(_MSC_VER) 
+#pragma  warning(disable:4200)
+#endif
+
+#if defined(__linux__)
+
+#define FILE_DEVICE_UNKNOWN             0
+#define METHOD_BUFFERED                 0
+#define FILE_ANY_ACCESS                 0
+
+#define CTL_CODE( DeviceType, Function, Method, Access ) (Function) 
+#define MAKEIOCTLINDEX(i)	((i) & 0xFFF)
+
+#else
+
+#include "ioctldef.h"
+
+#endif
+
+#include "img_defs.h"
+
+
+/*****************************************************************************
+ Stream mode stuff.
+*****************************************************************************/
+#define DEBUG_CAPMODE_FRAMED			0x00000001UL /* Default capture mode, set when streams created */
+#define DEBUG_CAPMODE_CONTINUOUS		0x00000002UL /* Only set in WDDM, streams created with it set to this mode */
+
+#define DEBUG_FLAGS_USE_NONPAGED_MEM	0x00000001UL /* Only set in WDDM */
+#define DEBUG_FLAGS_NO_BUF_EXPANDSION	0x00000002UL
+#define DEBUG_FLAGS_READONLY			0x00000008UL
+#define DEBUG_FLAGS_WRITEONLY			0x00000010UL
+#define DEBUG_FLAGS_CIRCULAR			0x00000020UL
+
+/* Stream name maximum length */
+#define DEBUG_STREAM_NAME_MAX			32
+
+/*****************************************************************************
+ IOCTL values.
+*****************************************************************************/
+/* IOCTL values defined here so that the windows based OS layer of PDump
+   in the server can access the GetServiceTable method.
+ */
+#define DEBUG_SERVICE_IOCTL_BASE		0x800UL
+#define DEBUG_SERVICE_GETSERVICETABLE	CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x01, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_GETSTREAM			CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x02, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_READ				CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x03, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_SETMARKER			CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x04, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_GETMARKER			CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x05, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_WAITFOREVENT		CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x06, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_GETFRAME			CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x07, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#if defined(__QNXNTO__)
+#define DEBUG_SERVICE_CREATESTREAM		CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x08, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_MAX_API			8
+#else
+#define DEBUG_SERVICE_MAX_API			9
+#endif
+
+
+#if defined(_WIN32)
+/*****************************************************************************
+ Debug driver device name
+*****************************************************************************/
+#if defined (DBGDRV_MODULE_NAME)
+#define REGISTRY_PATH_TO_DEBUG_DRIVER \
+	L"\\Registry\\Machine\\System\\CurrentControlSet\\Services\\" DBGDRV_MODULE_NAME
+#define DBGDRV_NT_DEVICE_NAME				L"\\Device\\" DBGDRV_MODULE_NAME
+#define DBGDRV_NT_SYMLINK					L"\\DosDevices\\" DBGDRV_MODULE_NAME
+#else
+#error Debug driver name must be specified
+/*
+#define DBGDRV_NT_DEVICE_NAME				L"\\Device\\VLDbgDrv"
+#define DBGDRV_NT_SYMLINK					L"\\DosDevices\\VLDBGDRV"
+*/
+#endif
+
+/* symbolic link name */
+#define DBGDRV_WIN32_DEVICE_NAME			"\\\\.\\VLDBGDRV"
+
+#define DBGDRV_WINCE_DEVICE_NAME			L"DBD1:"
+#endif
+
+/* A pointer type which is at least 64 bits wide. The fixed width ensures
+ * consistency in structures between 32 and 64-bit code.
+ * The UM code (be it 32 or 64 bit) can simply write to the native pointer type (pvPtr).
+ * 64-bit KM code must read ui32Ptr if in the case of a 32-bit client, otherwise it can
+ * just read pvPtr if the client is also 64-bit
+ *
+ * ui64Ptr ensures the union is 64-bits wide in a 32-bit client.
+ *
+ * The union is explicitly 64-bit aligned as it was found gcc on x32 only
+ * aligns it to 32-bit, as the ABI permits aligning 64-bit types to a 32-bit
+ * boundary.
+ */
+typedef union
+{
+	/* native pointer type for UM to write to */
+	void *pvPtr;
+	/* the pointer written by a 32-bit client */
+	IMG_UINT32 ui32Ptr;
+	/* force the union width */
+	IMG_UINT64 ui64Ptr;
+} DBG_WIDEPTR __aligned(8);
+
+/* Helper macro for dbgdriv (KM) to get the pointer value from the WIDEPTR type,
+ * depending on whether the client is 32 or 64-bit.
+ *
+ * note: double cast is required to avoid
+ * 'cast to pointer from integer of different size' warning.
+ * this is solved by first casting to an integer type.
+ */
+
+#if defined(CONFIG_COMPAT)
+#define WIDEPTR_GET_PTR(p, bCompat) (bCompat ? \
+					(void *) (uintptr_t) (p).ui32Ptr : \
+					(p).pvPtr)
+#else
+#define WIDEPTR_GET_PTR(p, bCompat) (p).pvPtr
+#endif
+
+typedef enum _DBG_EVENT_
+{
+	DBG_EVENT_STREAM_DATA = 1
+} DBG_EVENT;
+
+
+/*****************************************************************************
+ In/Out Structures
+*****************************************************************************/
+#if defined(__QNXNTO__)
+typedef struct _DBG_IN_CREATESTREAM_
+{
+	union
+	{
+		IMG_CHAR *pszName;
+		IMG_UINT64 ui64Name;
+	} u;
+	IMG_UINT32 ui32Pages;
+	IMG_UINT32 ui32CapMode;
+	IMG_UINT32 ui32OutMode;
+}DBG_IN_CREATESTREAM, *PDBG_IN_CREATESTREAM;
+
+typedef struct _DBG_OUT_CREATESTREAM_
+{
+	IMG_HANDLE phInit;
+	IMG_HANDLE phMain;
+	IMG_HANDLE phDeinit;
+} DBG_OUT_CREATESTREAM, *PDBG_OUT_CREATESTREAM;
+#endif
+
+typedef struct _DBG_IN_FINDSTREAM_
+{
+	IMG_CHAR pszName[DEBUG_STREAM_NAME_MAX];
+	IMG_BOOL bResetStream;
+}DBG_IN_FINDSTREAM, *PDBG_IN_FINDSTREAM;
+
+#define DEBUG_READ_BUFID_MAIN			0
+#define DEBUG_READ_BUFID_INIT			1
+#define DEBUG_READ_BUFID_DEINIT			2
+
+typedef struct _DBG_IN_READ_
+{
+	DBG_WIDEPTR pui8OutBuffer;
+	IMG_SID hStream;
+	IMG_UINT32 ui32BufID;
+	IMG_UINT32 ui32OutBufferSize;
+} DBG_IN_READ, *PDBG_IN_READ;
+
+typedef struct _DBG_OUT_READ_
+{
+	IMG_UINT32 ui32DataRead;
+	IMG_UINT32 ui32SplitMarker;
+} DBG_OUT_READ, *PDBG_OUT_READ;
+
+typedef struct _DBG_IN_SETMARKER_
+{
+	IMG_SID hStream;
+	IMG_UINT32 ui32Marker;
+} DBG_IN_SETMARKER, *PDBG_IN_SETMARKER;
+
+/*
+	DBG STREAM abstract types
+*/
+
+typedef struct _DBG_STREAM_CONTROL_* PDBG_STREAM_CONTROL;
+typedef struct _DBG_STREAM_* PDBG_STREAM;
+
+/*
+	Lookup identifiers for the GetState method in the KM service table.
+ */
+#define DBG_GET_STATE_FLAG_IS_READONLY    0x03
+
+
+/*****************************************************************************
+ Kernel mode service table
+*****************************************************************************/
+typedef struct _DBGKM_SERVICE_TABLE_
+{
+	IMG_UINT32 ui32Size;
+	IMG_BOOL	(IMG_CALLCONV *pfnCreateStream)			(IMG_CHAR * pszName,IMG_UINT32 ui32Flags,IMG_UINT32 ui32Pages, IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit);
+	void		(IMG_CALLCONV *pfnDestroyStream)		(IMG_HANDLE hInit, IMG_HANDLE hMain, IMG_HANDLE hDeinit);
+	IMG_UINT32	(IMG_CALLCONV *pfnDBGDrivWrite2)		(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize);
+	void		(IMG_CALLCONV *pfnSetMarker)			(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
+	void		(IMG_CALLCONV *pfnWaitForEvent)			(DBG_EVENT eEvent);
+	IMG_UINT32  (IMG_CALLCONV *pfnGetCtrlState)			(PDBG_STREAM psStream, IMG_UINT32 ui32StateID);
+	void		(IMG_CALLCONV *pfnSetFrame)				(IMG_UINT32 ui32Frame);
+} DBGKM_SERVICE_TABLE, *PDBGKM_SERVICE_TABLE;
+
+#if defined(_MSC_VER) 
+#pragma  warning(default:4200)
+#endif
+
+#endif
+
+/*****************************************************************************
+ End of file
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/devicemem_typedefs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/devicemem_typedefs.h
new file mode 100644
index 0000000..108fb30
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/devicemem_typedefs.h
@@ -0,0 +1,136 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Client side part of device memory management -- this file
+                is forked from new_devmem_allocation.h as this one has to
+                reside in the top level include so that client code is able
+                to make use of the typedefs.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DEVICEMEM_TYPEDEFS_H
+#define DEVICEMEM_TYPEDEFS_H
+
+#include <powervr/mem_types.h>
+#include "img_types.h"
+#include "pvrsrv_memallocflags.h"
+
+typedef struct _DEVMEM_CONTEXT_ DEVMEM_CONTEXT;		/*!< Convenience typedef for struct _DEVMEM_CONTEXT_ */
+typedef struct _DEVMEM_HEAP_ DEVMEM_HEAP;			/*!< Convenience typedef for struct _DEVMEM_HEAP_ */
+typedef struct _DEVMEM_MEMDESC_ DEVMEM_MEMDESC;		/*!< Convenience typedef for struct _DEVMEM_MEMDESC_ */
+typedef struct _DEVMEM_PAGELIST_ DEVMEM_PAGELIST;	/*!< Convenience typedef for struct _DEVMEM_PAGELIST_ */
+typedef PVRSRV_MEMALLOCFLAGS_T DEVMEM_FLAGS_T;		/*!< Convenience typedef for PVRSRV_MEMALLOCFLAGS_T */
+
+typedef IMG_HANDLE DEVMEM_EXPORTHANDLE;             /*!< Typedef for DeviceMem Export Handle */
+typedef IMG_UINT64 DEVMEM_EXPORTKEY;                /*!< Typedef for DeviceMem Export Key */
+typedef IMG_DEVMEM_SIZE_T DEVMEM_SIZE_T;            /*!< Typedef for DeviceMem SIZE_T */
+typedef IMG_DEVMEM_LOG2ALIGN_T DEVMEM_LOG2ALIGN_T;  /*!< Typedef for DeviceMem LOG2 Alignment */
+
+typedef struct _DEVMEMX_PHYS_MEMDESC_ DEVMEMX_PHYSDESC;    /*!< Convenience typedef for DevmemX physical */
+typedef struct _DEVMEMX_VIRT_MEMDESC_ DEVMEMX_VIRTDESC;    /*!< Convenience typedef for DevmemX virtual */
+
+/*! calling code needs all the info in this struct, to be able to pass it around */
+typedef struct
+{
+    /*! A handle to the PMR. */
+    IMG_HANDLE hPMRExportHandle;
+    /*! The "key" to prove we have authorization to use this PMR */
+    IMG_UINT64 uiPMRExportPassword;
+    /*! Size and alignment properties for this PMR.  Note, these
+       numbers are not trusted in kernel, but we need to cache them
+       client-side in order to allocate from the VM arena.  The kernel
+       will know the actual alignment and size of the PMR and thus
+       would prevent client code from breaching security here.  Ditto
+       for physmem granularity (aka page size) if this is different
+       from alignment */
+    IMG_DEVMEM_SIZE_T uiSize;
+    /*! We call this "contiguity guarantee" to be more precise than
+       calling it "alignment" or "page size", terms which may seem
+       similar but have different emphasis.  The number reported here
+       is the minimum contiguity guarantee from the creator of the
+       PMR.  Now, there is no requirement to allocate that coarsely
+       from the RA.  The alignment given to the RA simply needs to be
+       at least as coarse as the device page size for the heap we
+       ultimately intend to map into.  What is important is that the
+       device MMU data page size is not greater than the minimum
+       contiguity guarantee from the PMR.  This value is reported to
+       the client in order that it can choose to make early checks and
+       perhaps decide which heap (in a variable page size scenario) it
+       would be safe to map this PMR into.  For convenience, the
+       client may choose to use this argument as the alignment of the
+       virtual range he chooses to allocate, but this is _not_
+       necessary and in many cases would be able to get away with a
+       finer alignment, should the heap into which this PMR will be
+       mapped support it. */
+    IMG_DEVMEM_LOG2ALIGN_T uiLog2ContiguityGuarantee;
+} DEVMEM_EXPORTCOOKIE;
+
+/* Enum that describes the operation associated with changing sparse memory*/
+typedef enum Resize {
+	SPARSE_RESIZE_NONE = 0,
+
+	/* This should be set to indicate the change needs allocation */
+	SPARSE_RESIZE_ALLOC = 1,
+
+	/* This should be set to indicate the change needs free */
+	SPARSE_RESIZE_FREE = 2,
+
+	SPARSE_RESIZE_BOTH = (SPARSE_RESIZE_ALLOC | SPARSE_RESIZE_FREE),
+
+	/* This should be set to silently swap underlying physical memory
+	 * without disturbing its device or cpu virtual maps
+	 * This flag is not supported in the case of PDUMP and could lead to
+	 * PDUMP panic when used */
+	SPARSE_REMAP_MEM = 4,
+
+	/* Should be set to get the sparse changes appear in cpu virtual map */
+	SPARSE_MAP_CPU_ADDR = 8
+}SPARSE_MEM_RESIZE_FLAGS;
+
+/* To be used with all the sparse allocations that get mapped to CPU Virtual space
+ * The sparse allocation CPU mapping is torn down and re-mapped every time the
+ * sparse allocation layout changes */
+#define PVRSRV_UNMAP_ON_SPARSE_CHANGE 1
+
+/* To use with DevmemSubAllocate() as the default factor if no
+ * over-allocation is desired. */
+#define DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER 1
+
+#endif /* #ifndef DEVICEMEM_TYPEDEFS_H */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/dllist.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/dllist.h
new file mode 100644
index 0000000..cb4efb1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/dllist.h
@@ -0,0 +1,273 @@
+/*************************************************************************/ /*!
+@File
+@Title          Double linked list header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Double linked list interface
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DLLIST_
+#define _DLLIST_
+
+#include "img_types.h"
+
+/*!
+	Pointer to a linked list node
+*/
+typedef struct _DLLIST_NODE_	*PDLLIST_NODE;
+
+
+/*!
+	Node in a linked list
+*/
+/*
+ * Note: the following structure's size is architecture-dependent and
+ * clients may need to create a mirror the structure definition if it needs
+ * to be used in a structure shared between host and device. Consider such
+ * clients if any changes are made to this structure.
+ */ 
+typedef struct _DLLIST_NODE_
+{
+	struct _DLLIST_NODE_	*psPrevNode;
+	struct _DLLIST_NODE_	*psNextNode;
+} DLLIST_NODE;
+
+
+/*!
+	Static initialiser
+*/
+#define DECLARE_DLLIST(n) \
+DLLIST_NODE n = {&n, &n}
+
+
+/*************************************************************************/ /*!
+@Function       dllist_init
+
+@Description    Initialize a new double linked list
+
+@Input          psListHead              List head Node
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_init(PDLLIST_NODE psListHead)
+{
+	psListHead->psPrevNode = psListHead;
+	psListHead->psNextNode = psListHead;
+}
+
+/*************************************************************************/ /*!
+@Function       dllist_is_empty
+
+@Description    Returns whether the list is empty
+
+@Input          psListHead              List head Node
+
+*/
+/*****************************************************************************/
+static INLINE
+IMG_BOOL dllist_is_empty(PDLLIST_NODE psListHead)
+{
+	return (IMG_BOOL) ((psListHead->psPrevNode == psListHead)
+	                   && (psListHead->psNextNode == psListHead));
+}
+
+/*************************************************************************/ /*!
+@Function       dllist_add_to_head
+
+@Description    Add psNewNode to head of list psListHead
+
+@Input          psListHead             Head Node
+@Input          psNewNode              New Node
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_add_to_head(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode)
+{
+	PDLLIST_NODE psTmp;
+
+	psTmp = psListHead->psNextNode;
+
+	psListHead->psNextNode = psNewNode;
+	psNewNode->psNextNode = psTmp;
+
+	psTmp->psPrevNode = psNewNode;
+	psNewNode->psPrevNode = psListHead;
+}
+
+
+/*************************************************************************/ /*!
+@Function       dllist_add_to_tail
+
+@Description    Add psNewNode to tail of list psListHead
+
+@Input          psListHead             Head Node
+@Input          psNewNode              New Node
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_add_to_tail(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode)
+{
+	PDLLIST_NODE psTmp;
+
+	psTmp = psListHead->psPrevNode;
+
+	psListHead->psPrevNode = psNewNode;
+	psNewNode->psPrevNode = psTmp;
+
+	psTmp->psNextNode = psNewNode;
+	psNewNode->psNextNode = psListHead;
+}
+
+/*************************************************************************/ /*!
+@Function       dllist_node_is_in_list
+
+@Description    Returns IMG_TRUE if psNode is in a list 
+
+@Input          psNode             List node
+
+*/
+/*****************************************************************************/
+static INLINE
+IMG_BOOL dllist_node_is_in_list(PDLLIST_NODE psNode)
+{
+	return (IMG_BOOL) (psNode->psNextNode != 0);
+}
+
+/*************************************************************************/ /*!
+@Function       dllist_get_next_node
+
+@Description    Returns the list node after psListHead or NULL psListHead
+				is the only element in the list.
+
+@Input          psListHead             List node to start the operation
+
+*/
+/*****************************************************************************/
+static INLINE
+PDLLIST_NODE dllist_get_next_node(PDLLIST_NODE psListHead)
+{
+	if (psListHead->psNextNode == psListHead)
+	{
+		return NULL;
+	}
+	else
+	{
+		return psListHead->psNextNode;
+	}
+} 
+
+
+/*************************************************************************/ /*!
+@Function       dllist_remove_node
+
+@Description    Removes psListNode from the list where it currently belongs
+
+@Input          psListNode             List node to be removed
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_remove_node(PDLLIST_NODE psListNode)
+{
+	psListNode->psNextNode->psPrevNode = psListNode->psPrevNode;
+	psListNode->psPrevNode->psNextNode = psListNode->psNextNode;
+
+	/* Clear the node to show it's not on a list */
+	psListNode->psPrevNode = 0;
+	psListNode->psNextNode = 0;
+}
+
+/*************************************************************************/ /*!
+@Function       dllist_replace_head
+
+@Description    Moves the list from psOldHead to psNewHead
+
+@Input          psOldHead       List node to be replaced. Will become a head
+                                node of an empty list.
+@Input          psNewHead       List node to be inserted. Must be an empty list
+                                head.
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_replace_head(PDLLIST_NODE psOldHead, PDLLIST_NODE psNewHead)
+{
+	if (dllist_is_empty(psOldHead))
+	{
+		psNewHead->psNextNode = psNewHead;
+		psNewHead->psPrevNode = psNewHead;
+	}
+	else
+	{
+		/* Change the neighbouring nodes */
+		psOldHead->psNextNode->psPrevNode = psNewHead;
+		psOldHead->psPrevNode->psNextNode = psNewHead;
+
+		/* Copy the old data to the new node */
+		psNewHead->psNextNode = psOldHead->psNextNode;
+		psNewHead->psPrevNode = psOldHead->psPrevNode;
+
+		/* Remove links to the previous list */
+		psOldHead->psNextNode = psOldHead;
+		psOldHead->psPrevNode = psOldHead;
+	}
+
+
+}
+
+/*************************************************************************/ /*!
+@Function       dllist_foreach_node
+
+@Description    Walk through all the nodes on the list.
+				Safe against removal of (node).
+
+@Input          list_head			List node to start the operation
+@Input			node				Current list node
+@Input			next				Node after the current one
+
+*/
+/*****************************************************************************/
+#define dllist_foreach_node(list_head, node, next)						\
+	for (node = (list_head)->psNextNode, next = (node)->psNextNode;		\
+		 node != (list_head);											\
+		 node = next, next = (node)->psNextNode)
+
+#endif	/* _DLLIST_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/drm/netlink.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/drm/netlink.h
new file mode 100644
index 0000000..74a7644
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/drm/netlink.h
@@ -0,0 +1,119 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          Nulldisp/Netlink interface definition
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#ifndef __NETLINK_H__
+#define __NETLINK_H__
+
+/* For multi-plane pixel formats */
+#define NLPVRDPY_MAX_NUM_PLANES 3
+
+enum nlpvrdpy_cmd {
+	__NLPVRDPY_CMD_INVALID,
+	NLPVRDPY_CMD_CONNECT,
+	NLPVRDPY_CMD_CONNECTED,
+	NLPVRDPY_CMD_DISCONNECT,
+	NLPVRDPY_CMD_FLIP,
+	NLPVRDPY_CMD_FLIPPED,
+	NLPVRDPY_CMD_COPY,
+	NLPVRDPY_CMD_COPIED,
+	__NLPVRDPY_CMD_MAX
+};
+#define NLPVRDPY_CMD_MAX (__NLPVRDPY_CMD_MAX - 1)
+
+enum nlpvrdpy_attr {
+	__NLPVRDPY_ATTR_INVALID,
+	NLPVRDPY_ATTR_NAME,
+	NLPVRDPY_ATTR_MINOR,
+	NLPVRDPY_ATTR_NUM_PLANES,
+	NLPVRDPY_ATTR_WIDTH,
+	NLPVRDPY_ATTR_HEIGHT,
+	NLPVRDPY_ATTR_PIXFMT,
+	NLPVRDPY_ATTR_YUV_CSC,
+	NLPVRDPY_ATTR_YUV_BPP,
+	NLPVRDPY_ATTR_PLANE0_ADDR,
+	NLPVRDPY_ATTR_PLANE0_SIZE,
+	NLPVRDPY_ATTR_PLANE0_OFFSET,
+	NLPVRDPY_ATTR_PLANE0_PITCH,
+	NLPVRDPY_ATTR_PLANE1_ADDR,
+	NLPVRDPY_ATTR_PLANE1_SIZE,
+	NLPVRDPY_ATTR_PLANE1_OFFSET,
+	NLPVRDPY_ATTR_PLANE1_PITCH,
+	NLPVRDPY_ATTR_PLANE2_ADDR,
+	NLPVRDPY_ATTR_PLANE2_SIZE,
+	NLPVRDPY_ATTR_PLANE2_OFFSET,
+	NLPVRDPY_ATTR_PLANE2_PITCH,
+	NLPVRDPY_ATTR_LAYOUT,
+	NLPVRDPY_ATTR_FBC,
+	NLPVRDPY_ATTR_PAD,
+	__NLPVRDPY_ATTR_MAX
+};
+#define NLPVRDPY_ATTR_MAX  (__NLPVRDPY_ATTR_MAX - 1)
+
+static struct nla_policy nlpvrdpy_policy[NLPVRDPY_ATTR_MAX + 1] = {
+	[NLPVRDPY_ATTR_NAME]          = { .type = NLA_STRING },
+	[NLPVRDPY_ATTR_MINOR]         = { .type = NLA_U32 },
+	[NLPVRDPY_ATTR_NUM_PLANES]    = { .type = NLA_U8  },
+	[NLPVRDPY_ATTR_WIDTH]         = { .type = NLA_U32 },
+	[NLPVRDPY_ATTR_HEIGHT]        = { .type = NLA_U32 },
+	[NLPVRDPY_ATTR_PIXFMT]        = { .type = NLA_U32 },
+	[NLPVRDPY_ATTR_YUV_CSC]       = { .type = NLA_U8  },
+	[NLPVRDPY_ATTR_YUV_BPP]       = { .type = NLA_U8  },
+	[NLPVRDPY_ATTR_PLANE0_ADDR]   = { .type = NLA_U64 },
+	[NLPVRDPY_ATTR_PLANE0_SIZE]   = { .type = NLA_U64 },
+	[NLPVRDPY_ATTR_PLANE0_OFFSET] = { .type = NLA_U64 },
+	[NLPVRDPY_ATTR_PLANE0_PITCH]  = { .type = NLA_U64 },
+	[NLPVRDPY_ATTR_PLANE1_ADDR]   = { .type = NLA_U64 },
+	[NLPVRDPY_ATTR_PLANE1_SIZE]   = { .type = NLA_U64 },
+	[NLPVRDPY_ATTR_PLANE1_OFFSET] = { .type = NLA_U64 },
+	[NLPVRDPY_ATTR_PLANE1_PITCH]  = { .type = NLA_U64 },
+	[NLPVRDPY_ATTR_PLANE2_ADDR]   = { .type = NLA_U64 },
+	[NLPVRDPY_ATTR_PLANE2_SIZE]   = { .type = NLA_U64 },
+	[NLPVRDPY_ATTR_PLANE2_OFFSET] = { .type = NLA_U64 },
+	[NLPVRDPY_ATTR_PLANE2_PITCH]  = { .type = NLA_U64 },
+	[NLPVRDPY_ATTR_LAYOUT]        = { .type = NLA_U32 },
+	[NLPVRDPY_ATTR_FBC]           = { .type = NLA_U32 },
+};
+
+#endif /* __NETLINK_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/drm/nulldisp_drm.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/drm/nulldisp_drm.h
new file mode 100644
index 0000000..7cda581
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/drm/nulldisp_drm.h
@@ -0,0 +1,102 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          Nulldisp DRM definitions shared between kernel and user space.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__NULLDISP_DRM_H__)
+#define __NULLDISP_DRM_H__
+
+#if defined(__KERNEL__)
+#include <drm/drm.h>
+#else
+#include <drm.h>
+#endif
+
+struct drm_nulldisp_gem_create {
+	__u64 size;   /* in */
+	__u32 flags;  /* in */
+	__u32 handle; /* out */
+};
+
+struct drm_nulldisp_gem_mmap {
+	__u32 handle; /* in */
+	__u32 pad;
+	__u64 offset; /* out */
+};
+
+#define NULLDISP_GEM_CPU_PREP_READ   (1 << 0)
+#define NULLDISP_GEM_CPU_PREP_WRITE  (1 << 1)
+#define NULLDISP_GEM_CPU_PREP_NOWAIT (1 << 2)
+
+struct drm_nulldisp_gem_cpu_prep {
+	__u32 handle; /* in */
+	__u32 flags;  /* in */
+};
+
+struct drm_nulldisp_gem_cpu_fini {
+	__u32 handle; /* in */
+	__u32 pad;
+};
+
+/*
+ * DRM command numbers, relative to DRM_COMMAND_BASE.
+ * These defines must be prefixed with "DRM_".
+ */
+#define DRM_NULLDISP_GEM_CREATE   0x00
+#define DRM_NULLDISP_GEM_MMAP     0x01
+#define DRM_NULLDISP_GEM_CPU_PREP 0x02
+#define DRM_NULLDISP_GEM_CPU_FINI 0x03
+
+/* These defines must be prefixed with "DRM_IOCTL_". */
+#define DRM_IOCTL_NULLDISP_GEM_CREATE \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_NULLDISP_GEM_CREATE, struct drm_nulldisp_gem_create)
+
+#define DRM_IOCTL_NULLDISP_GEM_MMAP \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_NULLDISP_GEM_MMAP, struct drm_nulldisp_gem_mmap)
+
+#define DRM_IOCTL_NULLDISP_GEM_CPU_PREP \
+	DRM_IOW(DRM_COMMAND_BASE + DRM_NULLDISP_GEM_CPU_PREP, struct drm_nulldisp_gem_cpu_prep)
+
+#define DRM_IOCTL_NULLDISP_GEM_CPU_FINI \
+	DRM_IOW(DRM_COMMAND_BASE + DRM_NULLDISP_GEM_CPU_FINI, struct drm_nulldisp_gem_cpu_fini)
+
+#endif /* defined(__NULLDISP_DRM_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/drm/pdp_drm.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/drm/pdp_drm.h
new file mode 100644
index 0000000..0f47f4d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/drm/pdp_drm.h
@@ -0,0 +1,95 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          PDP DRM definitions shared between kernel and user space.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PDP_DRM_H__)
+#define __PDP_DRM_H__
+
+#if defined(__KERNEL__)
+#include <drm/drm.h>
+#else
+#include <drm.h>
+#endif
+
+struct drm_pdp_gem_create {
+	__u64 size;	/* in */
+	__u32 flags;	/* in */
+	__u32 handle;	/* out */
+};
+
+struct drm_pdp_gem_mmap {
+	__u32 handle;	/* in */
+	__u32 pad;
+	__u64 offset;	/* out */
+};
+
+#define PDP_GEM_CPU_PREP_READ	(1 << 0)
+#define PDP_GEM_CPU_PREP_WRITE	(1 << 1)
+#define PDP_GEM_CPU_PREP_NOWAIT	(1 << 2)
+
+struct drm_pdp_gem_cpu_prep {
+	__u32 handle;	/* in */
+	__u32 flags;	/* in */
+};
+
+struct drm_pdp_gem_cpu_fini {
+	__u32 handle;	/* in */
+	__u32 pad;
+};
+
+/*
+ * DRM command numbers, relative to DRM_COMMAND_BASE.
+ * These defines must be prefixed with "DRM_".
+ */
+#define DRM_PDP_GEM_CREATE		0x00
+#define DRM_PDP_GEM_MMAP		0x01
+#define DRM_PDP_GEM_CPU_PREP		0x02
+#define DRM_PDP_GEM_CPU_FINI		0x03
+
+/* These defines must be prefixed with "DRM_IOCTL_". */
+#define DRM_IOCTL_PDP_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_PDP_GEM_CREATE, struct drm_pdp_gem_create)
+#define DRM_IOCTL_PDP_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_PDP_GEM_MMAP, struct drm_pdp_gem_mmap)
+#define DRM_IOCTL_PDP_GEM_CPU_PREP	DRM_IOW(DRM_COMMAND_BASE + DRM_PDP_GEM_CPU_PREP, struct drm_pdp_gem_cpu_prep)
+#define DRM_IOCTL_PDP_GEM_CPU_FINI	DRM_IOW(DRM_COMMAND_BASE + DRM_PDP_GEM_CPU_FINI, struct drm_pdp_gem_cpu_fini)
+
+#endif /* defined(__PDP_DRM_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/drm/pvr_drm.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/drm/pvr_drm.h
new file mode 100644
index 0000000..abba65a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/drm/pvr_drm.h
@@ -0,0 +1,99 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          PVR DRM definitions shared between kernel and user space.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_DRM_H__)
+#define __PVR_DRM_H__
+
+#include <linux/types.h>
+
+#if defined(__KERNEL__)
+#include <drm/drm.h>
+#else
+#include <drm.h>
+#endif
+
+/*
+ * IMPORTANT:
+ * All structures below are designed to be the same size when compiled for 32
+ * and/or 64 bit architectures, i.e. there should be no compiler inserted
+ * padding. This is achieved by sticking to the following rules:
+ * 1) only use fixed width types
+ * 2) always naturally align fields by arranging them appropriately and by using
+ *    padding fields when necessary
+ *
+ * These rules should _always_ be followed when modifying or adding new
+ * structures to this file.
+ */
+
+struct drm_pvr_srvkm_cmd {
+	__u32 bridge_id;
+	__u32 bridge_func_id;
+	__u64 in_data_ptr;
+	__u64 out_data_ptr;
+	__u32 in_data_size;
+	__u32 out_data_size;
+};
+
+struct drm_pvr_dbgdrv_cmd {
+	__u32 cmd;
+	__u32 pad;
+	__u64 in_data_ptr;
+	__u64 out_data_ptr;
+	__u32 in_data_size;
+	__u32 out_data_size;
+};
+
+/*
+ * DRM command numbers, relative to DRM_COMMAND_BASE.
+ * These defines must be prefixed with "DRM_".
+ */
+#define DRM_PVR_SRVKM_CMD		0 /* Used for PVR Services ioctls */
+#define DRM_PVR_DBGDRV_CMD		1 /* Debug driver (PDUMP) ioctls */
+
+
+/* These defines must be prefixed with "DRM_IOCTL_". */
+#define	DRM_IOCTL_PVR_SRVKM_CMD		DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_SRVKM_CMD, struct drm_pvr_srvkm_cmd)
+#define	DRM_IOCTL_PVR_DBGDRV_CMD	DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_DBGDRV_CMD, struct drm_pvr_dbgdrv_cmd)
+
+#endif /* defined(__PVR_DRM_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/img_3dtypes.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/img_3dtypes.h
new file mode 100644
index 0000000..c940ff3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/img_3dtypes.h
@@ -0,0 +1,232 @@
+/*************************************************************************/ /*!
+@File
+@Title          Global 3D types for use by IMG APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines 3D types for use by IMG APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __IMG_3DTYPES_H__
+#define __IMG_3DTYPES_H__
+
+#include <powervr/buffer_attribs.h>
+
+/**
+ * Comparison functions
+ * This comparison function is defined as:
+ * A {CmpFunc} B
+ * A is a reference value, e.g., incoming depth etc.
+ * B is the sample value, e.g., value in depth buffer. 
+ */
+typedef enum _IMG_COMPFUNC_
+{
+	IMG_COMPFUNC_NEVER,			/**< The comparison never succeeds */
+	IMG_COMPFUNC_LESS,			/**< The comparison is a less-than operation */
+	IMG_COMPFUNC_EQUAL,			/**< The comparison is an equal-to operation */
+	IMG_COMPFUNC_LESS_EQUAL,	/**< The comparison is a less-than or equal-to 
+									 operation */
+	IMG_COMPFUNC_GREATER,		/**< The comparison is a greater-than operation 
+								*/
+	IMG_COMPFUNC_NOT_EQUAL,		/**< The comparison is a no-equal-to operation
+								*/
+	IMG_COMPFUNC_GREATER_EQUAL,	/**< The comparison is a greater-than or 
+									 equal-to operation */
+	IMG_COMPFUNC_ALWAYS,		/**< The comparison always succeeds */
+} IMG_COMPFUNC;
+
+/**
+ * Stencil op functions
+ */
+typedef enum _IMG_STENCILOP_
+{
+	IMG_STENCILOP_KEEP,		/**< Keep original value */
+	IMG_STENCILOP_ZERO,		/**< Set stencil to 0 */
+	IMG_STENCILOP_REPLACE,	/**< Replace stencil entry */
+	IMG_STENCILOP_INCR_SAT,	/**< Increment stencil entry, clamping to max */
+	IMG_STENCILOP_DECR_SAT,	/**< Decrement stencil entry, clamping to zero */
+	IMG_STENCILOP_INVERT,	/**< Invert bits in stencil entry */
+	IMG_STENCILOP_INCR,		/**< Increment stencil entry, 
+								 wrapping if necessary */
+	IMG_STENCILOP_DECR,		/**< Decrement stencil entry, 
+								 wrapping if necessary */
+} IMG_STENCILOP;
+
+/**
+ * Alpha blending allows colours and textures on one surface
+ * to be blended with transparency onto another surface.
+ * These definitions apply to both source and destination blending
+ * states
+ */
+typedef enum _IMG_BLEND_
+{
+	IMG_BLEND_ZERO = 0,        /**< Blend factor is (0,0,0,0) */
+	IMG_BLEND_ONE,             /**< Blend factor is (1,1,1,1) */
+	IMG_BLEND_SRC_COLOUR,      /**< Blend factor is the source colour */
+	IMG_BLEND_INV_SRC_COLOUR,  /**< Blend factor is the inverted source colour
+									(i.e. 1-src_col) */
+	IMG_BLEND_SRC_ALPHA,       /**< Blend factor is the source alpha */
+	IMG_BLEND_INV_SRC_ALPHA,   /**< Blend factor is the inverted source alpha
+									(i.e. 1-src_alpha) */
+	IMG_BLEND_DEST_ALPHA,      /**< Blend factor is the destination alpha */
+	IMG_BLEND_INV_DEST_ALPHA,  /**< Blend factor is the inverted destination 
+									alpha */
+	IMG_BLEND_DEST_COLOUR,     /**< Blend factor is the destination colour */
+	IMG_BLEND_INV_DEST_COLOUR, /**< Blend factor is the inverted destination 
+									colour */
+	IMG_BLEND_SRC_ALPHASAT,    /**< Blend factor is the alpha saturation (the 
+									minimum of (Src alpha, 
+									1 - destination alpha)) */
+	IMG_BLEND_BLEND_FACTOR,    /**< Blend factor is a constant */
+	IMG_BLEND_INVBLEND_FACTOR, /**< Blend factor is a constant (inverted)*/
+	IMG_BLEND_SRC1_COLOUR,     /**< Blend factor is the colour outputted from 
+									the pixel shader */
+	IMG_BLEND_INV_SRC1_COLOUR, /**< Blend factor is the inverted colour 
+									outputted from the pixel shader */
+	IMG_BLEND_SRC1_ALPHA,      /**< Blend factor is the alpha outputted from 
+									the pixel shader */
+	IMG_BLEND_INV_SRC1_ALPHA   /**< Blend factor is the inverted alpha
+									outputted from the pixel shader */
+} IMG_BLEND;
+
+/**
+ * The arithmetic operation to perform when blending
+ */
+typedef enum _IMG_BLENDOP_
+{
+	IMG_BLENDOP_ADD = 0,      /**< Result = (Source + Destination) */
+	IMG_BLENDOP_SUBTRACT,     /**< Result = (Source - Destination) */
+	IMG_BLENDOP_REV_SUBTRACT, /**< Result = (Destination - Source) */
+	IMG_BLENDOP_MIN,          /**< Result = min (Source, Destination) */
+	IMG_BLENDOP_MAX           /**< Result = max (Source, Destination) */
+} IMG_BLENDOP;
+
+/**
+ * Logical operation to perform when logic ops are enabled
+ */
+typedef enum _IMG_LOGICOP_
+{
+	IMG_LOGICOP_CLEAR = 0,     /**< Result = 0 */
+	IMG_LOGICOP_SET,           /**< Result = -1 */
+	IMG_LOGICOP_COPY,          /**< Result = Source */
+	IMG_LOGICOP_COPY_INVERTED, /**< Result = ~Source */
+	IMG_LOGICOP_NOOP,          /**< Result = Destination */
+	IMG_LOGICOP_INVERT,        /**< Result = ~Destination */
+	IMG_LOGICOP_AND,           /**< Result = Source & Destination */
+	IMG_LOGICOP_NAND,          /**< Result = ~(Source & Destination) */
+	IMG_LOGICOP_OR,            /**< Result = Source | Destination */
+	IMG_LOGICOP_NOR,           /**< Result = ~(Source | Destination) */
+	IMG_LOGICOP_XOR,           /**< Result = Source ^ Destination */
+	IMG_LOGICOP_EQUIV,         /**< Result = ~(Source ^ Destination) */
+	IMG_LOGICOP_AND_REVERSE,   /**< Result = Source & ~Destination */
+	IMG_LOGICOP_AND_INVERTED,  /**< Result = ~Source & Destination */
+	IMG_LOGICOP_OR_REVERSE,    /**< Result = Source | ~Destination */
+	IMG_LOGICOP_OR_INVERTED    /**< Result = ~Source | Destination */
+} IMG_LOGICOP;
+
+/**
+ * Type of fog blending supported
+ */
+typedef enum _IMG_FOGMODE_
+{
+	IMG_FOGMODE_NONE, /**< No fog blending - fog calculations are
+					   *   based on the value output from the vertex phase */
+	IMG_FOGMODE_LINEAR, /**< Linear interpolation */
+	IMG_FOGMODE_EXP, /**< Exponential */
+	IMG_FOGMODE_EXP2, /**< Exponential squaring */
+} IMG_FOGMODE;
+
+/**
+ * Types of filtering
+ */
+typedef enum _IMG_FILTER_
+{
+	IMG_FILTER_DONTCARE,	/**< Any filtering mode is acceptable */
+	IMG_FILTER_POINT,		/**< Point filtering */
+	IMG_FILTER_LINEAR,		/**< Bi-linear filtering */
+	IMG_FILTER_BICUBIC,		/**< Bi-cubic filtering */
+} IMG_FILTER;
+
+/**
+ * Addressing modes for textures
+ */
+typedef enum _IMG_ADDRESSMODE_
+{
+	IMG_ADDRESSMODE_REPEAT,	/**< Texture repeats continuously */
+	IMG_ADDRESSMODE_FLIP, /**< Texture flips on odd integer part */
+	IMG_ADDRESSMODE_CLAMP, /**< Texture clamped at 0 or 1 */
+	IMG_ADDRESSMODE_FLIPCLAMP, /**< Flipped once, then clamp */
+	IMG_ADDRESSMODE_CLAMPBORDER,
+	IMG_ADDRESSMODE_OGL_CLAMP,
+	IMG_ADDRESSMODE_OVG_TILEFILL,
+	IMG_ADDRESSMODE_DONTCARE,
+} IMG_ADDRESSMODE;
+
+/**
+ * Culling based on winding order of triangle.
+ */
+typedef enum _IMG_CULLMODE_
+{
+	IMG_CULLMODE_NONE,			/**< Don't cull */
+	IMG_CULLMODE_FRONTFACING,	/**< Front facing triangles */
+	IMG_CULLMODE_BACKFACING,	/**< Back facing triangles */
+} IMG_CULLMODE;
+
+
+/*! ************************************************************************//**
+@brief          Specifies the MSAA resolve operation.
+*/ /**************************************************************************/
+typedef enum _IMG_RESOLVE_OP_
+{
+	IMG_RESOLVE_BLEND   = 0,          /*!< box filter on the samples */
+	IMG_RESOLVE_MIN     = 1,          /*!< minimum of the samples */
+	IMG_RESOLVE_MAX     = 2,          /*!< maximum of the samples */
+	IMG_RESOLVE_SAMPLE0 = 3,          /*!< choose sample 0 */
+	IMG_RESOLVE_SAMPLE1 = 4,          /*!< choose sample 1 */
+	IMG_RESOLVE_SAMPLE2 = 5,          /*!< choose sample 2 */
+	IMG_RESOLVE_SAMPLE3 = 6,          /*!< choose sample 3 */
+	IMG_RESOLVE_SAMPLE4 = 7,          /*!< choose sample 4 */
+	IMG_RESOLVE_SAMPLE5 = 8,          /*!< choose sample 5 */
+	IMG_RESOLVE_SAMPLE6 = 9,          /*!< choose sample 6 */
+	IMG_RESOLVE_SAMPLE7 = 10,         /*!< choose sample 7 */
+} IMG_RESOLVE_OP;
+
+
+#endif /* __IMG_3DTYPES_H__ */
+/******************************************************************************
+ End of file (img_3dtypes.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/img_defs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/img_defs.h
new file mode 100644
index 0000000..3914d66
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/img_defs.h
@@ -0,0 +1,410 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common header containing type definitions for portability
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Contains variable and structure definitions. Any platform
+                specific types should be defined in this file.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__IMG_DEFS_H__)
+#define __IMG_DEFS_H__
+
+#include <stddef.h>
+
+#include "img_types.h"
+
+#if defined (NO_INLINE_FUNCS)
+	#define	INLINE
+	#define	FORCE_INLINE
+#elif defined(INTEGRITY_OS)
+	#ifndef INLINE
+	#define	INLINE
+	#endif
+	#define	FORCE_INLINE			static
+	#define INLINE_IS_PRAGMA
+#else
+#if defined (__cplusplus)
+	#define INLINE					inline
+	#define	FORCE_INLINE			static inline
+#else
+#if	!defined(INLINE)
+	#define	INLINE					__inline
+#endif
+#if (defined(UNDER_WDDM) || defined(WINDOWS_WDF)) && defined(_X86_)
+	#define	FORCE_INLINE			__forceinline
+#else
+	#define	FORCE_INLINE			static __inline
+#endif
+#endif
+#endif
+
+/* True if the GCC version is at least the given version. False for older
+ * versions of GCC, or other compilers.
+ */
+#define GCC_VERSION_AT_LEAST(major, minor) \
+	(__GNUC__ > (major) || \
+	(__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
+
+/* Ensure Clang's __has_extension macro is defined for all compilers so we
+ * can use it safely in preprocessor conditionals.
+ */
+#if !defined(__has_extension)
+#define __has_extension(e) 0
+#endif
+
+/* Use this in any file, or use attributes under GCC - see below */
+#ifndef PVR_UNREFERENCED_PARAMETER
+#define	PVR_UNREFERENCED_PARAMETER(param) ((void)(param))
+#endif
+
+/* static_assert(condition, "message to print if it fails");
+ *
+ * Assert something at compile time. If the assertion fails, try to print
+ * the message, otherwise do nothing. static_assert is available if:
+ *
+ * - It's already defined as a macro (e.g. by <assert.h> in C11)
+ * - We're using MSVC which exposes static_assert unconditionally
+ * - We're using a C++ compiler that supports C++11
+ * - We're using GCC 4.6 and up in C mode (in which case it's available as
+ *   _Static_assert)
+ *
+ * In all other cases, fall back to an equivalent that makes an invalid
+ * declaration.
+ */
+#if !defined(static_assert) && !defined(_MSC_VER) && \
+		(!defined(__cplusplus) || __cplusplus < 201103L)
+	/* static_assert isn't already available */
+	#if !defined(__cplusplus) && (GCC_VERSION_AT_LEAST(4, 6) || \
+								  (defined(__clang__) && __has_extension(c_static_assert)))
+		#define static_assert _Static_assert
+	#else
+		#define static_assert(expr, message) \
+			extern int _static_assert_failed[2*!!(expr) - 1] __attribute__((unused))
+	#endif
+#else
+#if defined(CONFIG_L4)
+	/* Defined but not compatible with DDK usage
+	   so undefine & ignore */
+	#undef static_assert
+	#define static_assert(expr, message)
+#endif
+#endif
+
+/*! Macro to calculate the n-byte aligned value from that supplied rounding up.
+ * n must be a power of two.
+ *
+ * Both arguments should be of a type with the same size otherwise the macro may
+ * cut off digits, e.g. imagine a 64 bit address in _x and a 32 bit value in _n.
+ */
+#define PVR_ALIGN(_x, _n)   (((_x)+((_n)-1)) & ~((_n)-1))
+
+#if defined(_WIN32)
+
+#if defined(WINDOWS_WDF)
+
+	/*
+	 * For WINDOWS_WDF drivers we don't want these defines to overwrite calling conventions propagated through the build system.
+	 * This 'empty' choice helps to resolve all the calling conv issues.
+	 *
+	 */
+	#define IMG_CALLCONV
+	#define C_CALLCONV
+
+	#define IMG_INTERNAL
+	#define IMG_RESTRICT __restrict
+
+	/*
+	 * The proper way of dll linking under MS compilers is made of two things:
+	 * - decorate implementation with __declspec(dllexport)
+	 *   this decoration helps compiler with making the so called 'export library'
+	 * - decorate forward-declaration (in a source dependent on a dll) with __declspec(dllimport),
+	 *   this decoration helps compiler with making faster and smaller code in terms of calling dll-imported functions
+	 *
+	 * Usually these decorations are performed by having a single macro define that expands to a proper __declspec()
+	 * depending on the translation unit, dllexport inside the dll source and dllimport outside the dll source.
+	 * Having IMG_EXPORT and IMG_IMPORT resolving to the same __declspec() makes no sense, but at least works.
+	 */
+	#define IMG_IMPORT __declspec(dllexport)
+	#define IMG_EXPORT __declspec(dllexport)
+
+#else
+
+	#define IMG_CALLCONV __stdcall
+	#define IMG_INTERNAL
+	#define	IMG_EXPORT	__declspec(dllexport)
+	#define IMG_RESTRICT __restrict
+	#define C_CALLCONV	__cdecl
+
+	/*
+	 * IMG_IMPORT is defined as IMG_EXPORT so that headers and implementations match.
+	 * Some compilers require the header to be declared IMPORT, while the implementation is declared EXPORT 
+	 */
+	#define	IMG_IMPORT	IMG_EXPORT
+
+#endif
+
+#if defined(UNDER_WDDM)
+	#ifndef	_INC_STDLIB
+		#if defined(__mips)
+			/* do nothing */
+		#elif defined(UNDER_MSBUILD)
+			_CRTIMP __declspec(noreturn) void __cdecl abort(void);
+		#else
+			_CRTIMP void __cdecl abort(void);
+		#endif
+	#endif
+#endif /* UNDER_WDDM */
+#else
+	#if defined(LINUX) || defined(__METAG) || defined(__QNXNTO__)
+
+		#define IMG_CALLCONV
+		#define C_CALLCONV
+		#if defined(__linux__) || defined(__QNXNTO__)
+			#define IMG_INTERNAL	__attribute__((visibility("hidden")))
+		#else
+			#define IMG_INTERNAL
+		#endif
+		#define IMG_EXPORT		__attribute__((visibility("default")))
+		#define IMG_IMPORT
+		#define IMG_RESTRICT	__restrict__
+
+	#elif defined(INTEGRITY_OS)
+		#define IMG_CALLCONV
+		#define IMG_INTERNAL
+		#define IMG_EXPORT
+		#define IMG_RESTRICT
+		#define C_CALLCONV
+		#define __cdecl
+		/* IMG_IMPORT is defined as IMG_EXPORT so that headers and implementations match.
+		 * Some compilers require the header to be declared IMPORT, while the implementation is declared EXPORT 
+		 */
+		#define	IMG_IMPORT	IMG_EXPORT 
+		#ifndef USE_CODE
+		#define IMG_ABORT()	printf("IMG_ABORT was called.\n")
+
+		#endif
+	#else
+		#error("define an OS")
+	#endif
+#endif
+
+// Use default definition if not overridden
+#ifndef IMG_ABORT
+	#if defined(EXIT_ON_ABORT)
+		#define IMG_ABORT()	exit(1)
+	#else
+		#define IMG_ABORT()	abort()
+	#endif
+#endif
+
+/* The best way to suppress unused parameter warnings using GCC is to use a
+ * variable attribute.  Place the __maybe_unused between the type and name of an
+ * unused parameter in a function parameter list, eg `int __maybe_unused var'. This
+ * should only be used in GCC build environments, for example, in files that
+ * compile only on Linux. Other files should use PVR_UNREFERENCED_PARAMETER */
+
+/* Kernel macros for compiler attributes */
+/* Note: param positions start at 1 */
+#if defined(LINUX) && defined(__KERNEL__)
+	#include <linux/compiler.h>
+#elif defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES)
+	#define __must_check       __attribute__((warn_unused_result))
+	#define __maybe_unused     __attribute__((unused))
+	#define __malloc           __attribute__((malloc))
+
+	/* Bionic's <sys/cdefs.h> might have defined these already */
+	/* See https://android.googlesource.com/platform/bionic.git/+/master/libc/include/sys/cdefs.h */
+	#if !defined(__packed)
+		#define __packed           __attribute__((packed))
+	#endif
+	#if !defined(__aligned)
+		#define __aligned(n)       __attribute__((aligned(n)))
+	#endif
+	#if !defined(__noreturn)
+		#define __noreturn         __attribute__((noreturn))
+	#endif
+
+	/* That one compiler that supports attributes but doesn't support
+	 * the printf attribute... */
+	#if defined(__GNUC__)
+		#define __printf(fmt, va)  __attribute__((format(printf, fmt, va)))
+	#else
+		#define __printf(fmt, va)
+	#endif /* defined(__GNUC__) */
+
+#else
+	/* Silently ignore those attributes */
+	#define __printf(fmt, va)
+	#define __packed
+	#define __aligned(n)
+	#define __must_check
+	#define __maybe_unused
+	#define __malloc
+
+	#if defined(_MSC_VER) || defined(CC_ARM)
+		#define __noreturn __declspec(noreturn)
+	#else
+		#define __noreturn
+	#endif
+#endif
+
+
+/* Other attributes, following the same style */
+#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES)
+	#define __param_nonnull(...)  __attribute__((nonnull(__VA_ARGS__)))
+	#define __returns_nonnull     __attribute__((returns_nonnull))
+#else
+	#define __param_nonnull(...)
+	#define __returns_nonnull
+#endif
+
+
+/* GCC builtins */
+#if defined(LINUX) && defined(__KERNEL__)
+	#include <linux/compiler.h>
+#elif defined(__GNUC__)
+	#define likely(x)   __builtin_expect(!!(x), 1)
+	#define unlikely(x) __builtin_expect(!!(x), 0)
+
+	/* Compiler memory barrier to prevent reordering */
+	#define barrier() __asm__ __volatile__("": : :"memory")
+#else
+	#define barrier() do { static_assert(0, "barrier() isn't supported by your compiler"); } while(0)
+#endif
+
+/* That one OS that defines one but not the other... */
+#ifndef likely
+	#define likely(x)   (x)
+#endif
+#ifndef unlikely
+	#define unlikely(x) (x)
+#endif
+
+#ifndef MAX
+#define MAX(a,b) 					(((a) > (b)) ? (a) : (b))
+#endif
+
+#ifndef MIN
+#define MIN(a,b) 					(((a) < (b)) ? (a) : (b))
+#endif
+
+/* Get a structures address from the address of a member */
+#define IMG_CONTAINER_OF(ptr, type, member) \
+	(type *) ((IMG_UINT8 *) (ptr) - offsetof(type, member))
+
+/* The number of elements in a fixed-sized array, IMGs ARRAY_SIZE macro */
+#define IMG_ARR_NUM_ELEMS(ARR) \
+	(sizeof(ARR) / sizeof((ARR)[0]))
+
+/* To guarantee that __func__ can be used, define it as a macro here if it
+   isn't already provided by the compiler. */
+#if defined(_MSC_VER)
+#define __func__ __FUNCTION__
+#endif
+
+#if defined(__cplusplus)
+/* C++ Specific:
+ * Disallow use of copy and assignment operator within a class.
+ * Should be placed under private. */
+#define IMG_DISALLOW_COPY_AND_ASSIGN(C) \
+	C(const C&); \
+	void operator=(const C&)
+#endif
+
+#if defined(SUPPORT_PVR_VALGRIND) && !defined(__METAG)
+	#include "/usr/include/valgrind/memcheck.h"
+
+	#define VG_MARK_INITIALIZED(pvData,ui32Size)  VALGRIND_MAKE_MEM_DEFINED(pvData,ui32Size)
+	#define VG_MARK_NOACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_NOACCESS(pvData,ui32Size)
+	#define VG_MARK_ACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_UNDEFINED(pvData,ui32Size)
+#else
+	#if defined(_MSC_VER)
+	#	define PVR_MSC_SUPPRESS_4127 __pragma(warning(suppress:4127))
+	#else
+	#	define PVR_MSC_SUPPRESS_4127
+	#endif
+
+	#define VG_MARK_INITIALIZED(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while(0)
+	#define VG_MARK_NOACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while(0)
+	#define VG_MARK_ACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while(0)
+#endif
+
+#define _STRINGIFY(x) # x
+#define IMG_STRINGIFY(x) _STRINGIFY(x)
+
+#if defined(INTEGRITY_OS)
+	/* Definitions not present in INTEGRITY. */
+	#define PATH_MAX	200
+#endif
+
+#if defined (__clang__) || defined (__GNUC__)
+	/* __SIZEOF_POINTER__ is defined already by these compilers */
+#elif defined (INTEGRITY_OS)
+	#if defined (__Ptr_Is_64)
+		#define __SIZEOF_POINTER__ 8
+	#else
+		#define __SIZEOF_POINTER__ 4
+	#endif
+#elif defined(_WIN32)
+	#define __SIZEOF_POINTER__ sizeof(char *)
+#else
+	#warning Unknown OS - using default method to determine whether CPU arch is 64-bit.
+	#define __SIZEOF_POINTER__ sizeof(char *)
+#endif
+
+/* RDI8567: clang/llvm load/store optimisations cause issues with device
+ * memory allocations. Some pointers are made 'volatile' to prevent
+ * this optimisations being applied to writes through that particular pointer.
+ */
+#if defined(__clang__) && (defined(__arm64__) || defined(__aarch64__))
+#define NOLDSTOPT volatile
+/* after applying 'volatile' to a pointer, we may need to cast it to 'void *'
+ * to keep it compatible with its existing uses
+ */
+#define NOLDSTOPT_VOID (void *)
+#else
+#define NOLDSTOPT
+#define NOLDSTOPT_VOID
+#endif
+
+#endif /* #if !defined (__IMG_DEFS_H__) */
+/*****************************************************************************
+ End of file (IMG_DEFS.H)
+*****************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/img_types.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/img_types.h
new file mode 100644
index 0000000..06fbff1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/img_types.h
@@ -0,0 +1,304 @@
+/*************************************************************************/ /*!
+@File
+@Title          Global types for use by IMG APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines type aliases for use by IMG APIs.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __IMG_TYPES_H__
+#define __IMG_TYPES_H__
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* To use C99 types and definitions, there are two special cases we need to
+ * cater for:
+ *
+ * - Visual Studio: in VS2010 or later, some standard headers are available,
+ *   and MSVC has its own built-in sized types. We can define the C99 types
+ *   in terms of these.
+ *
+ * - Linux kernel code: C99 sized types are defined in <linux/types.h>, but
+ *   some other features (like macros for constants or printf format
+ *   strings) are missing, so we need to fill in the gaps ourselves.
+ *
+ * For other cases (userspace code under Linux, Android or Neutrino, or
+ * firmware code), we can include the standard headers.
+ */
+#if defined(_MSC_VER)
+	#include "msvc_types.h"
+#elif defined(LINUX) && defined(__KERNEL__)
+	#include <linux/types.h>
+	#include "kernel_types.h"
+#elif defined(LINUX) || defined(__METAG) || defined(__QNXNTO__) || defined(INTEGRITY_OS)
+	#include <stddef.h>			/* NULL */
+	#include <inttypes.h>		/* intX_t/uintX_t, format specifiers */
+	#include <limits.h>			/* INT_MIN, etc */
+#elif defined(__mips)
+	#include <stddef.h>			/* NULL */
+	#include <inttypes.h>		/* intX_t/uintX_t, format specifiers */
+#else
+	#error C99 support not set up for this build
+#endif
+
+typedef unsigned int	IMG_UINT,	*IMG_PUINT;
+typedef int				IMG_INT,	*IMG_PINT;
+
+typedef uint8_t			IMG_UINT8,	*IMG_PUINT8;
+typedef uint8_t			IMG_BYTE,	*IMG_PBYTE;
+typedef int8_t			IMG_INT8,	*IMG_PINT8;
+typedef char			IMG_CHAR,	*IMG_PCHAR;
+
+typedef uint16_t		IMG_UINT16,	*IMG_PUINT16;
+typedef int16_t			IMG_INT16,	*IMG_PINT16;
+typedef uint32_t		IMG_UINT32,	*IMG_PUINT32;
+typedef int32_t			IMG_INT32,	*IMG_PINT32;
+
+typedef uint64_t		IMG_UINT64,	*IMG_PUINT64;
+typedef int64_t			IMG_INT64,	*IMG_PINT64;
+#define IMG_INT64_C(c)	INT64_C(c)
+#define IMG_UINT64_C(c)	UINT64_C(c)
+#define IMG_UINT64_FMTSPEC PRIu64
+#define IMG_UINT64_FMTSPECX PRIX64
+#define IMG_UINT64_FMTSPECx PRIx64
+#define IMG_UINT64_FMTSPECo PRIo64
+#define IMG_INT64_FMTSPECd PRId64
+
+#define IMG_UINT16_MAX	UINT16_MAX
+#define IMG_UINT32_MAX	UINT32_MAX
+#define IMG_UINT64_MAX	UINT64_MAX
+
+#define IMG_INT16_MAX	INT16_MAX
+#define IMG_INT32_MAX	INT32_MAX
+#define IMG_INT64_MAX	INT64_MAX
+
+/* Linux kernel mode does not use floating point */
+typedef float			IMG_FLOAT,	*IMG_PFLOAT;
+typedef double			IMG_DOUBLE, *IMG_PDOUBLE;
+
+typedef union _IMG_UINT32_FLOAT_
+{
+	IMG_UINT32 ui32;
+	IMG_FLOAT f;
+} IMG_UINT32_FLOAT;
+
+typedef int				IMG_SECURE_TYPE;
+
+typedef	enum tag_img_bool
+{
+	IMG_FALSE		= 0,
+	IMG_TRUE		= 1,
+	IMG_FORCE_ALIGN = 0x7FFFFFFF
+} IMG_BOOL, *IMG_PBOOL;
+
+#if defined(UNDER_WDDM) || defined(WINDOWS_WDF)
+typedef void            IMG_VOID, *IMG_PVOID;
+
+typedef uintptr_t		IMG_UINTPTR_T;
+typedef size_t			IMG_SIZE_T;
+
+#define IMG_SIZE_T_MAX	SIZE_MAX
+#define IMG_NULL		NULL
+
+typedef IMG_CHAR const* IMG_PCCHAR;
+#endif
+
+#if defined(_MSC_VER)
+#define IMG_SIZE_FMTSPEC  "%Iu"
+#define IMG_SIZE_FMTSPECX "%Ix"
+#else
+#define IMG_SIZE_FMTSPEC  "%zu"
+#define IMG_SIZE_FMTSPECX "%zx"
+#endif
+
+#if defined(LINUX) && defined(__KERNEL__)
+/* prints the function name when used with printk */
+#define IMG_PFN_FMTSPEC "%pf"
+#else
+#define IMG_PFN_FMTSPEC "%p"
+#endif
+
+typedef void           *IMG_HANDLE;
+
+/* services/stream ID */
+typedef IMG_UINT64      IMG_SID;
+
+/* Process IDs */
+typedef IMG_UINT32      IMG_PID;
+
+/* OS connection type */
+typedef int             IMG_OS_CONNECTION;
+
+
+/*
+ * Address types.
+ * All types used to refer to a block of memory are wrapped in structures
+ * to enforce some degree of type safety, i.e. a IMG_DEV_VIRTADDR cannot
+ * be assigned to a variable of type IMG_DEV_PHYADDR because they are not the
+ * same thing.
+ *
+ * There is an assumption that the system contains at most one non-cpu mmu,
+ * and a memory block is only mapped by the MMU once.
+ *
+ * Different devices could have offset views of the physical address space.
+ * 
+ */
+
+
+/*
+ *
+ * +------------+    +------------+      +------------+        +------------+
+ * |    CPU     |    |    DEV     |      |    DEV     |        |    DEV     |
+ * +------------+    +------------+      +------------+        +------------+
+ *       |                 |                   |                     |
+ *       | void *          |IMG_DEV_VIRTADDR   |IMG_DEV_VIRTADDR     |
+ *       |                 \-------------------/                     |
+ *       |                          |                                |
+ * +------------+             +------------+                         |
+ * |    MMU     |             |    MMU     |                         |
+ * +------------+             +------------+                         |
+ *       |                          |                                |
+ *       |                          |                                |
+ *       |                          |                                |
+ *   +--------+                +---------+                      +--------+
+ *   | Offset |                | (Offset)|                      | Offset |
+ *   +--------+                +---------+                      +--------+
+ *       |                          |                IMG_DEV_PHYADDR |
+ *       |                          |                                |
+ *       |                          | IMG_DEV_PHYADDR                |
+ * +---------------------------------------------------------------------+
+ * |                         System Address bus                          |
+ * +---------------------------------------------------------------------+
+ *
+ */
+
+#define IMG_DEV_VIRTADDR_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+#define IMG_DEVMEM_SIZE_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+#define IMG_DEVMEM_ALIGN_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+#define IMG_DEVMEM_OFFSET_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+
+/* cpu physical address */
+typedef struct _IMG_CPU_PHYADDR
+{
+#if defined(UNDER_WDDM) || defined(WINDOWS_WDF)
+	uintptr_t uiAddr;
+#define IMG_CAST_TO_CPUPHYADDR_UINT(var)		(uintptr_t)(var)
+#elif defined(LINUX) && defined(__KERNEL__)
+	phys_addr_t uiAddr;
+#define IMG_CAST_TO_CPUPHYADDR_UINT(var)		(phys_addr_t)(var)
+#else
+	IMG_UINT64 uiAddr;
+#define IMG_CAST_TO_CPUPHYADDR_UINT(var)		(IMG_UINT64)(var)
+#endif
+} IMG_CPU_PHYADDR;
+
+/* device physical address */
+typedef struct _IMG_DEV_PHYADDR
+{
+	IMG_UINT64 uiAddr;
+} IMG_DEV_PHYADDR;
+
+/* system physical address */
+typedef struct _IMG_SYS_PHYADDR
+{
+#if defined(UNDER_WDDM) || defined(WINDOWS_WDF)
+	uintptr_t uiAddr;
+#else
+	IMG_UINT64 uiAddr;
+#endif
+} IMG_SYS_PHYADDR;
+
+/* 32-bit device virtual address (e.g. MSVDX) */
+typedef struct _IMG_DEV_VIRTADDR32
+{
+	IMG_UINT32 uiAddr;
+#define IMG_CAST_TO_DEVVADDR_UINT32(var) (IMG_UINT32)(var)
+} IMG_DEV_VIRTADDR32;
+
+/*
+	rectangle structure
+*/
+typedef struct _IMG_RECT_
+{
+	IMG_INT32	x0;
+	IMG_INT32	y0;
+	IMG_INT32	x1;
+	IMG_INT32	y1;
+}IMG_RECT, *PIMG_RECT;
+
+typedef struct _IMG_RECT_16_
+{
+	IMG_INT16	x0;
+	IMG_INT16	y0;
+	IMG_INT16	x1;
+	IMG_INT16	y1;
+}IMG_RECT_16, *PIMG_RECT_16;
+
+typedef struct _IMG_RECT_32_
+{
+	IMG_FLOAT	x0;
+	IMG_FLOAT	y0;
+	IMG_FLOAT	x1;
+	IMG_FLOAT	y1;
+} IMG_RECT_F32, *PIMG_RECT_F32;
+
+/*
+ * box structure
+ */
+typedef struct _IMG_BOX_
+{
+	IMG_INT32	x0;
+	IMG_INT32	y0;
+	IMG_INT32	z0;
+	IMG_INT32	x1;
+	IMG_INT32	y1;
+	IMG_INT32	z1;
+} IMG_BOX, *PIMG_BOX;
+
+#if defined (__cplusplus)
+}
+#endif
+
+#include "img_defs.h"
+
+#endif	/* __IMG_TYPES_H__ */
+/******************************************************************************
+ End of file (img_types.h)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/kernel_types.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/kernel_types.h
new file mode 100644
index 0000000..c93b59e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/kernel_types.h
@@ -0,0 +1,138 @@
+/*************************************************************************/ /*!
+@Title          C99-compatible types and definitions for Linux kernel code
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/kernel.h>
+
+/* Limits of specified-width integer types */
+
+/* S8_MIN, etc were added in kernel version 3.14. The other versions are for
+ * earlier kernels. They can be removed once older kernels don't need to be
+ * supported.
+ */
+#ifdef S8_MIN
+	#define INT8_MIN	S8_MIN
+#else
+	#define INT8_MIN	(-128)
+#endif
+
+#ifdef S8_MAX
+	#define INT8_MAX	S8_MAX
+#else
+	#define INT8_MAX	127
+#endif
+
+#ifdef U8_MAX
+	#define UINT8_MAX	U8_MAX
+#else
+	#define UINT8_MAX	0xFF
+#endif
+
+#ifdef S16_MIN
+	#define INT16_MIN	S16_MIN
+#else
+	#define INT16_MIN	(-32768)
+#endif
+
+#ifdef S16_MAX
+	#define INT16_MAX	S16_MAX
+#else
+	#define INT16_MAX	32767
+#endif
+
+#ifdef U16_MAX
+	#define UINT16_MAX	U16_MAX
+#else
+	#define UINT16_MAX	0xFFFF
+#endif
+
+#ifdef S32_MIN
+	#define INT32_MIN	S32_MIN
+#else
+	#define INT32_MIN	(-2147483647 - 1)
+#endif
+
+#ifdef S32_MAX
+	#define INT32_MAX	S32_MAX
+#else
+	#define INT32_MAX	2147483647
+#endif
+
+#ifdef U32_MAX
+	#define UINT32_MAX	U32_MAX
+#else
+	#define UINT32_MAX	0xFFFFFFFF
+#endif
+
+#ifdef S64_MIN
+	#define INT64_MIN	S64_MIN
+#else
+	#define INT64_MIN	(-9223372036854775807LL)
+#endif
+
+#ifdef S64_MAX
+	#define INT64_MAX	S64_MAX
+#else
+	#define INT64_MAX	9223372036854775807LL
+#endif
+
+#ifdef U64_MAX
+	#define UINT64_MAX	U64_MAX
+#else
+	#define UINT64_MAX	0xFFFFFFFFFFFFFFFFULL
+#endif
+
+/* Macros for integer constants */
+#define INT8_C			S8_C
+#define UINT8_C			U8_C
+#define INT16_C			S16_C
+#define UINT16_C		U16_C
+#define INT32_C			S32_C
+#define UINT32_C		U32_C
+#define INT64_C			S64_C
+#define UINT64_C		U64_C
+
+/* Format conversion of integer types <inttypes.h> */
+
+#define PRIX64		"llX"
+#define PRIx64		"llx"
+#define PRIu64		"llu"
+#define PRId64		"lld"
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/lock_types.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/lock_types.h
new file mode 100644
index 0000000..1162737
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/lock_types.h
@@ -0,0 +1,93 @@
+/*************************************************************************/ /*!
+@File           lock_types.h
+@Title          Locking types
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Locking specific enums, defines and structures
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _LOCK_TYPES_H_
+#define _LOCK_TYPES_H_
+
+/* In Linux kernel mode we are using the kernel mutex implementation directly
+ * with macros. This allows us to use the kernel lockdep feature for lock
+ * debugging. */
+#if defined(LINUX) && defined(__KERNEL__)
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+/* The mutex is defined as a pointer to be compatible with the other code. This
+ * isn't ideal and usually you wouldn't do that in kernel code. */
+typedef struct mutex *POS_LOCK;
+typedef atomic_t ATOMIC_T;
+
+#else /* defined(LINUX) && defined(__KERNEL__) */
+#include "img_types.h" /* needed for IMG_INT */
+typedef struct _OS_LOCK_ *POS_LOCK;
+#if defined(LINUX)
+	typedef struct _OS_ATOMIC {IMG_INT counter;} ATOMIC_T;
+#elif defined(__QNXNTO__)
+	typedef struct _OS_ATOMIC {IMG_INT counter;} ATOMIC_T;
+#elif defined(_WIN32)
+	/*
+	 * Dummy definition. WDDM doesn't use Services, but some headers
+	 * still have to be shared. This is one such case.
+	 */
+	typedef struct _OS_ATOMIC {IMG_INT counter;} ATOMIC_T;
+#elif defined(INTEGRITY_OS)
+	/*Fixed size data type to hold the largest value*/
+	typedef struct _OS_ATOMIC {IMG_UINT64 counter;} ATOMIC_T;
+#else
+	#error "Please type-define an atomic lock for this environment"
+#endif
+
+#endif /* defined(LINUX) && defined(__KERNEL__) */
+
+typedef enum
+{
+	LOCK_TYPE_NONE 			= 0x00,
+
+	LOCK_TYPE_MASK			= 0x0F,
+	LOCK_TYPE_PASSIVE		= 0x01,		/* Passive level lock e.g. mutex, system may promote to dispatch */
+	LOCK_TYPE_DISPATCH		= 0x02,		/* Dispatch level lock e.g. spin lock, may be used in ISR/MISR */
+
+	LOCK_TYPE_INSIST_FLAG	= 0x80,		/* When set caller can guarantee lock not used in ISR/MISR */
+	LOCK_TYPE_PASSIVE_ONLY	= LOCK_TYPE_INSIST_FLAG | LOCK_TYPE_PASSIVE
+
+} LOCK_TYPE;
+#endif	/* _LOCK_TYPES_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/log2.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/log2.h
new file mode 100644
index 0000000..8281c02
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/log2.h
@@ -0,0 +1,252 @@
+/*************************************************************************/ /*!
+@Title          Integer log2 and related functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef LOG2_H
+#define LOG2_H
+
+#include "img_defs.h"
+
+/**************************************************************************/ /*!
+@Description    Determine if a number is a power of two.
+@Input          n
+@Return         True if n is a power of 2, false otherwise. True if n == 0.
+*/ /***************************************************************************/
+static INLINE IMG_BOOL IsPower2(uint32_t n)
+{
+	/* C++ needs this cast. */
+	return (IMG_BOOL)((n & (n - 1)) == 0);
+}
+
+/**************************************************************************/ /*!
+@Description    Determine if a number is a power of two.
+@Input          n
+@Return         True if n is a power of 2, false otherwise. True if n == 0.
+*/ /***************************************************************************/
+static INLINE IMG_BOOL IsPower2_64(uint64_t n)
+{
+	/* C++ needs this cast. */
+	return (IMG_BOOL)((n & (n - 1)) == 0);
+}
+
+/**************************************************************************/ /*!
+@Description    Round a non-power-of-two number up to the next power of two.
+@Input          n
+@Return         n rounded up to the next power of two. If n is zero or
+                already a power of two, return n unmodified.
+*/ /***************************************************************************/
+static INLINE uint32_t RoundUpToNextPowerOfTwo(uint32_t n)
+{
+	n--;
+	n |= n >> 1;  /* handle  2 bit numbers */
+	n |= n >> 2;  /* handle  4 bit numbers */
+	n |= n >> 4;  /* handle  8 bit numbers */
+	n |= n >> 8;  /* handle 16 bit numbers */
+	n |= n >> 16; /* handle 32 bit numbers */
+	n++;
+
+	return n;
+}
+
+/**************************************************************************/ /*!
+@Description    Round a non-power-of-two number up to the next power of two.
+@Input          n
+@Return         n rounded up to the next power of two. If n is zero or
+                already a power of two, return n unmodified.
+*/ /***************************************************************************/
+static INLINE uint64_t RoundUpToNextPowerOfTwo_64(uint64_t n)
+{
+	n--;
+	n |= n >> 1;  /* handle  2 bit numbers */
+	n |= n >> 2;  /* handle  4 bit numbers */
+	n |= n >> 4;  /* handle  8 bit numbers */
+	n |= n >> 8;  /* handle 16 bit numbers */
+	n |= n >> 16; /* handle 32 bit numbers */
+	n |= n >> 32; /* handle 64 bit numbers */
+	n++;
+
+	return n;
+}
+
+/**************************************************************************/ /*!
+@Description    Compute floor(log2(n))
+@Input          n
+@Return         log2(n) rounded down to the nearest integer. Returns 0 if n == 0
+*/ /***************************************************************************/
+static INLINE uint32_t FloorLog2(uint32_t n)
+{
+	uint32_t log2 = 0;
+
+	while (n >>= 1)
+		log2++;
+
+	return log2;
+}
+
+/**************************************************************************/ /*!
+@Description    Compute floor(log2(n))
+@Input          n
+@Return         log2(n) rounded down to the nearest integer. Returns 0 if n == 0
+*/ /***************************************************************************/
+static INLINE uint32_t FloorLog2_64(uint64_t n)
+{
+	uint32_t log2 = 0;
+
+	while (n >>= 1)
+		log2++;
+
+	return log2;
+}
+
+/**************************************************************************/ /*!
+@Description    Compute ceil(log2(n))
+@Input          n
+@Return         log2(n) rounded up to the nearest integer. Returns 0 if n == 0
+*/ /***************************************************************************/
+static INLINE uint32_t CeilLog2(uint32_t n)
+{
+	uint32_t log2 = 0;
+
+	if(n == 0)
+		return 0;
+
+	n--; /* Handle powers of 2 */
+
+	while(n)
+	{
+		log2++;
+		n >>= 1;
+	}
+
+	return log2;
+}
+
+/**************************************************************************/ /*!
+@Description    Compute ceil(log2(n))
+@Input          n
+@Return         log2(n) rounded up to the nearest integer. Returns 0 if n == 0
+*/ /***************************************************************************/
+static INLINE uint32_t CeilLog2_64(uint64_t n)
+{
+	uint32_t log2 = 0;
+
+	if(n == 0)
+		return 0;
+
+	n--; /* Handle powers of 2 */
+
+	while(n)
+	{
+		log2++;
+		n >>= 1;
+	}
+
+	return log2;
+}
+
+/**************************************************************************/ /*!
+@Description    Compute log2(n) for exact powers of two only
+@Input          n                   Must be a power of two
+@Return         log2(n)
+*/ /***************************************************************************/
+static INLINE uint32_t ExactLog2(uint32_t n)
+{
+	static const uint32_t b[] =
+		{ 0xAAAAAAAA, 0xCCCCCCCC, 0xF0F0F0F0, 0xFF00FF00, 0xFFFF0000};
+	uint32_t r = (n & b[0]) != 0;
+
+	r |= (uint32_t) ((n & b[4]) != 0) << 4;
+	r |= (uint32_t) ((n & b[3]) != 0) << 3;
+	r |= (uint32_t) ((n & b[2]) != 0) << 2;
+	r |= (uint32_t) ((n & b[1]) != 0) << 1;
+
+	return r;
+}
+
+/**************************************************************************/ /*!
+@Description    Compute log2(n) for exact powers of two only
+@Input          n                   Must be a power of two
+@Return         log2(n)
+*/ /***************************************************************************/
+static INLINE uint32_t ExactLog2_64(uint64_t n)
+{
+	static const uint64_t b[] =
+		{ 0xAAAAAAAAAAAAAAAAULL, 0xCCCCCCCCCCCCCCCCULL,
+		  0xF0F0F0F0F0F0F0F0ULL, 0xFF00FF00FF00FF00ULL,
+		  0xFFFF0000FFFF0000ULL, 0xFFFFFFFF00000000ULL };
+	uint32_t r = (n & b[0]) != 0;
+
+	r |= (uint32_t) ((n & b[5]) != 0) << 5;
+	r |= (uint32_t) ((n & b[4]) != 0) << 4;
+	r |= (uint32_t) ((n & b[3]) != 0) << 3;
+	r |= (uint32_t) ((n & b[2]) != 0) << 2;
+	r |= (uint32_t) ((n & b[1]) != 0) << 1;
+
+	return r;
+}
+
+/**************************************************************************/ /*!
+@Description    Compute floor(log2(size)) , where size is the max of 3 sizes
+				This is almost always the ONLY EVER valid use of FloorLog2.
+				Usually CeilLog2() should be used instead.
+				For a 5x5x1 texture, the 3 miplevels are:
+					0:  5x5x1
+					1:	2x2x1
+					2:	1x1x1
+
+				For an 8x8x1 texture, the 4 miplevels are:
+					0:  8x8x1
+					1:	4x4x1
+					2:  2x2x1
+					3:  1x1x1
+
+
+@Input          sizeX, sizeY, sizeZ
+@Return         Count of mipmap levels for given dimensions
+*/ /***************************************************************************/
+static INLINE uint32_t NumMipLevels(uint32_t sizeX, uint32_t sizeY, uint32_t sizeZ)
+{
+
+	uint32_t maxSize = MAX(MAX(sizeX, sizeY), sizeZ);
+	return FloorLog2(maxSize) + 1;
+}
+
+
+#endif /* LOG2_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pdumpdefs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pdumpdefs.h
new file mode 100644
index 0000000..9ab8e97
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pdumpdefs.h
@@ -0,0 +1,206 @@
+/*************************************************************************/ /*!
+@File
+@Title          PDUMP definitions header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    PDUMP definitions header
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__PDUMPDEFS_H__)
+#define __PDUMPDEFS_H__
+
+/*! PDump Pixel Format Enumeration */
+typedef enum _PDUMP_PIXEL_FORMAT_
+{
+	PVRSRV_PDUMP_PIXEL_FORMAT_UNSUPPORTED = 0,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2,
+	PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9,
+//	PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10,
+	PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11,
+	PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12,
+	PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13,
+	PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15,
+	PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16,
+	PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17,
+	PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18,
+	PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20,
+	PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25,
+	PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26,
+	PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27,
+	PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28,
+	PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29,
+	PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 = 30,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 = 31,
+	PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 = 32,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 = 33,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGB555 = 34,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F16 = 35,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F32 = 36,
+	PVRSRV_PDUMP_PIXEL_FORMAT_L16 = 37,
+	PVRSRV_PDUMP_PIXEL_FORMAT_L32 = 38,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGBA8888 = 39,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ABGR4444 = 40,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGBA4444 = 41,
+	PVRSRV_PDUMP_PIXEL_FORMAT_BGRA4444 = 42,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ABGR1555 = 43,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGBA5551 = 44,
+	PVRSRV_PDUMP_PIXEL_FORMAT_BGRA5551 = 45,
+	PVRSRV_PDUMP_PIXEL_FORMAT_BGR565 = 46,
+	PVRSRV_PDUMP_PIXEL_FORMAT_A8 = 47,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16F16 = 49,
+	PVRSRV_PDUMP_PIXEL_FORMAT_A4 = 50,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ARGB2101010 = 51,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RSGSBS888 = 52,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32F32 = 53,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F16F16 = 54,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F32F32 = 55,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16 = 56,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32 = 57,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U8 = 58,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U8U8 = 59,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U16 = 60,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U16U16 = 61,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U16U16U16U16 = 62,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U32 = 63,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U32U32 = 64,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U32U32U32U32 = 65,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV32 = 66,
+	
+	PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff
+
+} PDUMP_PIXEL_FORMAT;
+
+/*! PDump addrmode */
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT			0
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_MASK			0x000000FF 
+
+#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT			8
+#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_NEGATIVE		(1 << PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_SHIFT		12
+#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_MASK			0x000FF000
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT				20
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_MASK				0x00F00000
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT			24
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT			28
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_MASK			0xF0000000
+
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_STRIDE			(0 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE1 (1 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE2 (2 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE3 (3 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE4 (4 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE5 (5 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE6 (6 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE7 (7 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_TWIDDLED		(9 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_PAGETILED		(11 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_NONE				(0 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_DIRECT		(1 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_DIRECT		(2 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_32X2_DIRECT		(3 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT		(4 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT		(5 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT_4TILE	(6 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT_4TILE	(7 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBC_DECOR					(1 << PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_BASE			(1 << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_ENHANCED		(2 << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V2				(3 << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_SURFACE		(4 << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_RESOURCE		(5 << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+
+
+/*! PDump Poll Operator */
+typedef enum _PDUMP_POLL_OPERATOR
+{
+	PDUMP_POLL_OPERATOR_EQUAL = 0,
+	PDUMP_POLL_OPERATOR_LESS = 1,
+	PDUMP_POLL_OPERATOR_LESSEQUAL = 2,
+	PDUMP_POLL_OPERATOR_GREATER = 3,
+	PDUMP_POLL_OPERATOR_GREATEREQUAL = 4,
+	PDUMP_POLL_OPERATOR_NOTEQUAL = 5,
+} PDUMP_POLL_OPERATOR;
+
+
+#define PVRSRV_PDUMP_MAX_FILENAME_SIZE			75  /*!< Max length of a pdump log file name */
+#define PVRSRV_PDUMP_MAX_COMMENT_SIZE			350 /*!< Max length of a pdump comment */
+
+/*!
+	PDump MMU type
+	(Maps to values listed in "PowerVR Tools.Pdump2 Script Functions.doc" Sec 2.13)
+*/
+typedef enum
+{
+	PDUMP_MMU_TYPE_4KPAGE_32BIT_STDTILE 	= 1,
+	PDUMP_MMU_TYPE_VARPAGE_32BIT_STDTILE 	= 2,
+	PDUMP_MMU_TYPE_4KPAGE_36BIT_EXTTILE 	= 3,
+	PDUMP_MMU_TYPE_4KPAGE_32BIT_EXTTILE 	= 4,
+	PDUMP_MMU_TYPE_4KPAGE_36BIT_STDTILE 	= 5,
+	PDUMP_MMU_TYPE_VARPAGE_40BIT            = 6,
+	PDUMP_MMU_TYPE_VIDEO_40BIT_STDTILE      = 7,
+	PDUMP_MMU_TYPE_VIDEO_40BIT_EXTTILE      = 8,
+	PDUMP_MMU_TYPE_MIPS_MICROAPTIV          = 9,
+	PDUMP_MMU_TYPE_LAST
+} PDUMP_MMU_TYPE;
+
+#endif /* __PDUMPDEFS_H__ */
+
+/*****************************************************************************
+ End of file (pdumpdefs.h)
+*****************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/public/powervr/buffer_attribs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/public/powervr/buffer_attribs.h
new file mode 100644
index 0000000..4c75e7c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/public/powervr/buffer_attribs.h
@@ -0,0 +1,84 @@
+/*************************************************************************/ /*!
+@File
+@Title          3D types for use by IMG APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef _POWERVR_BUFFER_ATTRIBS_H_
+#define _POWERVR_BUFFER_ATTRIBS_H_
+
+/**
+ * Memory layouts
+ * Defines how pixels are laid out within a surface.
+ */
+typedef enum
+{
+	IMG_MEMLAYOUT_STRIDED,       /**< Resource is strided, one row at a time */
+	IMG_MEMLAYOUT_TWIDDLED,      /**< Resource is 2D twiddled, classic style */
+	IMG_MEMLAYOUT_3DTWIDDLED,    /**< Resource is 3D twiddled, classic style */
+	IMG_MEMLAYOUT_TILED,         /**< Resource is tiled, tiling config specified elsewhere. */
+	IMG_MEMLAYOUT_PAGETILED,     /**< Resource is pagetiled */
+} IMG_MEMLAYOUT;
+
+/**
+ * Rotation types
+ */
+typedef enum
+{
+	IMG_ROTATION_0DEG = 0,
+	IMG_ROTATION_90DEG = 1,
+	IMG_ROTATION_180DEG = 2,
+	IMG_ROTATION_270DEG = 3,
+	IMG_ROTATION_FLIP_Y = 4,
+
+	IMG_ROTATION_BAD = 255,
+} IMG_ROTATION;
+
+/**
+ * Alpha types.
+ */
+typedef enum
+{
+	IMG_COLOURSPACE_FORMAT_UNKNOWN   =  0x00000000,  /**< Colourspace Format: Unknown */
+	IMG_COLOURSPACE_FORMAT_LINEAR    =  0x00010000,  /**< Colourspace Format: Linear */
+	IMG_COLOURSPACE_FORMAT_NONLINEAR =  0x00020000,  /**< Colourspace Format: Non-Linear */
+	IMG_COLOURSPACE_FORMAT_MASK      =  0x000F0000,  /**< Colourspace Format Mask */
+} IMG_COLOURSPACE_FORMAT;
+
+/**
+ * Types of framebuffer compression
+ */
+typedef enum
+{
+	IMG_FB_COMPRESSION_NONE,
+	IMG_FB_COMPRESSION_DIRECT_8x8,
+	IMG_FB_COMPRESSION_DIRECT_16x4,
+	IMG_FB_COMPRESSION_DIRECT_32x2,
+	IMG_FB_COMPRESSION_INDIRECT_8x8,
+	IMG_FB_COMPRESSION_INDIRECT_16x4,
+	IMG_FB_COMPRESSION_INDIRECT_4TILE_8x8,
+	IMG_FB_COMPRESSION_INDIRECT_4TILE_16x4
+} IMG_FB_COMPRESSION;
+
+
+#endif /* _POWERVR_BUFFER_ATTRIBS_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/public/powervr/mem_types.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/public/powervr/mem_types.h
new file mode 100644
index 0000000..ee8a289
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/public/powervr/mem_types.h
@@ -0,0 +1,62 @@
+/*************************************************************************/ /*!
+@File
+@Title          Public types
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _POWERVR_TYPES_H_
+#define _POWERVR_TYPES_H_
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#if defined(_MSC_VER)
+	#include "msvc_types.h"
+#elif defined(LINUX) && defined(__KERNEL__)
+	#include <linux/types.h>
+#else
+	#include <stdint.h>
+#endif
+
+typedef void *IMG_CPU_VIRTADDR;
+
+/* device virtual address */
+typedef struct _IMG_DEV_VIRTADDR
+{
+	uint64_t  uiAddr;
+#define IMG_CAST_TO_DEVVADDR_UINT(var)		(uint64_t)(var)
+	
+} IMG_DEV_VIRTADDR;
+
+typedef uint64_t IMG_DEVMEM_SIZE_T;
+typedef uint64_t IMG_DEVMEM_ALIGN_T;
+typedef uint64_t IMG_DEVMEM_OFFSET_T;
+typedef uint32_t IMG_DEVMEM_LOG2ALIGN_T;
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/public/powervr/sync_external.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/public/powervr/sync_external.h
new file mode 100644
index 0000000..f74ab15
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/public/powervr/sync_external.h
@@ -0,0 +1,86 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services external synchronisation interface header
+@Description    Defines synchronisation structures that are visible internally
+                and externally
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_EXTERNAL_
+#define _SYNC_EXTERNAL_
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include <powervr/mem_types.h>
+
+/*!
+ * Maximum byte length for a sync prim name
+ */
+#define SYNC_MAX_CLASS_NAME_LEN 32
+
+/*!
+ * Number of sync primitives in operations
+ */
+#define	PVRSRV_MAX_SYNC_PRIMS 32
+
+typedef void* PVRSRV_CLIENT_SYNC_PRIM_HANDLE;
+typedef void* SYNC_BRIDGE_HANDLE;
+typedef struct SYNC_PRIM_CONTEXT *PSYNC_PRIM_CONTEXT;
+typedef struct _SYNC_OP_COOKIE_ *PSYNC_OP_COOKIE;
+
+/*!
+ * Client sync prim definition holding a CPU accessible address
+ *
+ *   Structure: #PVRSRV_CLIENT_SYNC_PRIM
+ *   Typedef: ::PVRSRV_CLIENT_SYNC_PRIM
+ */
+typedef struct PVRSRV_CLIENT_SYNC_PRIM
+{
+	volatile uint32_t	*pui32LinAddr;	/*!< User pointer to the primitive */
+} PVRSRV_CLIENT_SYNC_PRIM;
+
+/*!
+ * Bundled information for a sync prim operation
+ *
+ *   Structure: #PVRSRV_CLIENT_SYNC_PRIM_OP
+ *   Typedef: ::PVRSRV_CLIENT_SYNC_PRIM_OP
+ */
+typedef struct PVRSRV_CLIENT_SYNC_PRIM_OP
+{
+	#define PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK	(1 << 0)
+	#define PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE	(1 << 1)
+	#define PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE (PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE | (1<<2))
+	uint32_t                    ui32Flags;       /*!< Operation flags: PVRSRV_CLIENT_SYNC_PRIM_OP_XXX */
+	PVRSRV_CLIENT_SYNC_PRIM    *psSync;          /*!< Pointer to the client sync primitive */
+	uint32_t                    ui32FenceValue;  /*!< The Fence value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK is set) */
+	uint32_t                    ui32UpdateValue; /*!< The Update value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE is set) */
+} PVRSRV_CLIENT_SYNC_PRIM_OP;
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* _SYNC_EXTERNAL_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvr_buffer_sync_shared.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvr_buffer_sync_shared.h
new file mode 100644
index 0000000..9258a45
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvr_buffer_sync_shared.h
@@ -0,0 +1,52 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR buffer sync shared
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Shared definitions between client and server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PVR_BUFFER_SYNC_SHARED_H__
+#define __PVR_BUFFER_SYNC_SHARED_H__
+
+#define PVR_BUFFER_FLAG_READ		(1 << 0)
+#define PVR_BUFFER_FLAG_WRITE		(1 << 1)
+#define PVR_BUFFER_FLAG_MASK		(PVR_BUFFER_FLAG_READ | \
+									 PVR_BUFFER_FLAG_WRITE)
+
+#endif /* __PVR_BUFFER_SYNC_SHARED_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvr_debug.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvr_debug.h
new file mode 100644
index 0000000..bf551411
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvr_debug.h
@@ -0,0 +1,581 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Debug Declarations
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides debug functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PVR_DEBUG_H__
+#define __PVR_DEBUG_H__
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+#if defined(_MSC_VER)
+#	define MSC_SUPPRESS_4127 __pragma(warning(suppress:4127))
+#else
+#	define MSC_SUPPRESS_4127
+#endif
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#define PVR_MAX_DEBUG_MESSAGE_LEN	(512)   /*!< Max length of a Debug Message */
+
+/* These are privately used by pvr_debug, use the PVR_DBG_ defines instead */
+#define DBGPRIV_FATAL			0x001UL  /*!< Debug-Fatal. Privately used by pvr_debug. */
+#define DBGPRIV_ERROR			0x002UL  /*!< Debug-Error. Privately used by pvr_debug. */
+#define DBGPRIV_WARNING			0x004UL  /*!< Debug-Warning. Privately used by pvr_debug. */
+#define DBGPRIV_MESSAGE			0x008UL  /*!< Debug-Message. Privately used by pvr_debug. */
+#define DBGPRIV_VERBOSE			0x010UL  /*!< Debug-Verbose. Privately used by pvr_debug. */
+#define DBGPRIV_CALLTRACE		0x020UL  /*!< Debug-CallTrace. Privately used by pvr_debug. */
+#define DBGPRIV_ALLOC			0x040UL  /*!< Debug-Alloc. Privately used by pvr_debug. */
+#define DBGPRIV_BUFFERED		0x080UL  /*!< Debug-Buffered. Privately used by pvr_debug. */
+#define DBGPRIV_DEBUG			0x100UL  /*!< Debug-AdHoc-Debug. Never submitted. Privately used by pvr_debug. */
+#define DBGPRIV_DBGDRV_MESSAGE	0x200UL  /*!< Debug-DbgDrivMessage. Privately used by pvr_debug. */
+#define DBGPRIV_LAST			0x200UL  /*!< Always set to highest mask value. Privately used by pvr_debug. */
+
+
+#if !defined(PVRSRV_NEED_PVR_ASSERT) && defined(DEBUG)
+#define PVRSRV_NEED_PVR_ASSERT
+#endif
+
+#if defined(PVRSRV_NEED_PVR_ASSERT) && !defined(PVRSRV_NEED_PVR_DPF)
+#define PVRSRV_NEED_PVR_DPF
+#endif
+
+#if !defined(PVRSRV_NEED_PVR_TRACE) && (defined(DEBUG) || defined(TIMING))
+#define PVRSRV_NEED_PVR_TRACE
+#endif
+
+#if !defined(DOXYGEN)
+#if defined(__KERNEL__)
+	IMG_IMPORT const IMG_CHAR *PVRSRVGetErrorStringKM(PVRSRV_ERROR eError);
+#	define PVRSRVGETERRORSTRING PVRSRVGetErrorStringKM
+#else
+/*************************************************************************/ /*
+PVRSRVGetErrorString
+Returns a string describing the provided PVRSRV_ERROR code
+NB No doxygen comments provided as this function does not require porting
+   for other operating systems
+*/ /**************************************************************************/
+	IMG_IMPORT const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError);
+#	define PVRSRVGETERRORSTRING PVRSRVGetErrorString
+#endif
+#endif
+
+/* PVR_ASSERT() and PVR_DBG_BREAK handling */
+
+#if defined(PVRSRV_NEED_PVR_ASSERT) || defined(DOXYGEN)
+
+/* Unfortunately the klocworks static analysis checker doesn't understand our
+ * ASSERT macros. Thus it reports lots of false positive. Defining our Assert
+ * macros in a special way when the code is analysed by klocworks avoids
+ * them. */
+#if defined(__KLOCWORK__)
+  #define PVR_ASSERT(x) do { if (!(x)) abort(); } while (0)
+#else /* ! __KLOCWORKS__ */
+
+#if defined(_WIN32)
+#define PVR_ASSERT(expr) do 									\
+	{															\
+		MSC_SUPPRESS_4127										\
+		if (unlikely(!(expr)))								\
+		{														\
+			PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__,\
+					  "*** Debug assertion failed!");			\
+			__debugbreak();										\
+		}														\
+	MSC_SUPPRESS_4127											\
+	} while (0)
+
+#else
+
+#if defined(LINUX) && defined(__KERNEL__)
+#include <linux/kernel.h>
+#include <linux/bug.h>
+
+/* In Linux kernel mode, use WARN_ON() directly. This produces the
+   correct filename and line number in the warning message. */
+#define PVR_ASSERT(EXPR) do											\
+	{																\
+		if (unlikely(!(EXPR)))										\
+		{															\
+			PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__,	\
+							  "Debug assertion failed!");			\
+			WARN_ON(1);												\
+		}															\
+	} while (0)
+
+#else /* defined(LINUX) && defined(__KERNEL__) */
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDebugAssertFail
+@Description    Indicate to the user that a debug assertion has failed and
+                prevent the program from continuing.
+                Invoked from the macro PVR_ASSERT().
+@Input          pszFile       The name of the source file where the assertion failed
+@Input          ui32Line      The line number of the failed assertion
+@Input          pszAssertion  String describing the assertion
+@Return         NEVER!
+*/ /**************************************************************************/
+IMG_IMPORT void IMG_CALLCONV __noreturn
+PVRSRVDebugAssertFail(const IMG_CHAR *pszFile,
+                      IMG_UINT32 ui32Line,
+                      const IMG_CHAR *pszAssertion);
+
+#define PVR_ASSERT(EXPR) do										\
+	{															\
+		if (unlikely(!(EXPR)))								\
+			PVRSRVDebugAssertFail(__FILE__, __LINE__, #EXPR);	\
+	} while (0)
+
+#endif /* defined(LINUX) && defined(__KERNEL__) */
+#endif /* defined(_WIN32) */
+#endif /* defined(__KLOCWORK__) */
+
+#if defined(__KLOCWORK__)
+	#define PVR_DBG_BREAK do { abort(); } while (0)
+#else
+	#if defined (WIN32)
+		#define PVR_DBG_BREAK __debugbreak();   /*!< Implementation of PVR_DBG_BREAK for (non-WinCE) Win32 */
+	#else
+		#if defined(PVR_DBG_BREAK_ASSERT_FAIL)
+		/*!< Implementation of PVR_DBG_BREAK that maps onto PVRSRVDebugAssertFail */
+			#if defined(_WIN32)
+				#define PVR_DBG_BREAK	DBG_BREAK
+			#else
+				#if defined(LINUX) && defined(__KERNEL__)
+					#define PVR_DBG_BREAK BUG()
+				#else
+					#define PVR_DBG_BREAK	PVRSRVDebugAssertFail(__FILE__, __LINE__, "PVR_DBG_BREAK")
+				#endif
+			#endif
+		#else
+			/*!< Null Implementation of PVR_DBG_BREAK (does nothing) */
+			#define PVR_DBG_BREAK
+		#endif
+	#endif
+#endif
+
+
+#else  /* defined(PVRSRV_NEED_PVR_ASSERT) */
+    /* Unfortunately the klocworks static analysis checker doesn't understand our
+     * ASSERT macros. Thus it reports lots of false positive. Defining our Assert
+     * macros in a special way when the code is analysed by klocworks avoids
+     * them. */
+    #if defined(__KLOCWORK__)
+        #define PVR_ASSERT(EXPR) do { if (unlikely(!(EXPR))) abort(); } while (0)
+    #else
+        #define PVR_ASSERT(EXPR) (void)(EXPR) /*!< Null Implementation of PVR_ASSERT (does nothing) */
+    #endif
+
+    #define PVR_DBG_BREAK    /*!< Null Implementation of PVR_DBG_BREAK (does nothing) */
+
+#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */
+
+
+/* PVR_DPF() handling */
+
+#if defined(PVRSRV_NEED_PVR_DPF) || defined(DOXYGEN)
+
+	/* New logging mechanism */
+	#define PVR_DBG_FATAL		DBGPRIV_FATAL		/*!< Debug level passed to PVRSRVDebugPrintf() for fatal errors. */
+	#define PVR_DBG_ERROR		DBGPRIV_ERROR		/*!< Debug level passed to PVRSRVDebugPrintf() for non-fatal errors. */
+	#define PVR_DBG_WARNING		DBGPRIV_WARNING		/*!< Debug level passed to PVRSRVDebugPrintf() for warnings. */
+	#define PVR_DBG_MESSAGE		DBGPRIV_MESSAGE		/*!< Debug level passed to PVRSRVDebugPrintf() for information only. */
+	#define PVR_DBG_VERBOSE		DBGPRIV_VERBOSE		/*!< Debug level passed to PVRSRVDebugPrintf() for very low-priority debug. */
+	#define PVR_DBG_CALLTRACE	DBGPRIV_CALLTRACE
+	#define PVR_DBG_ALLOC		DBGPRIV_ALLOC
+	#define PVR_DBG_BUFFERED	DBGPRIV_BUFFERED	/*!< Debug level passed to PVRSRVDebugPrintf() when debug should be written to the debug circular buffer. */
+	#define PVR_DBG_DEBUG		DBGPRIV_DEBUG
+	#define PVR_DBGDRIV_MESSAGE	DBGPRIV_DBGDRV_MESSAGE
+
+	/* These levels are always on with PVRSRV_NEED_PVR_DPF */
+	#define __PVR_DPF_0x001UL(...) PVRSRVDebugPrintf(DBGPRIV_FATAL, __VA_ARGS__)
+	#define __PVR_DPF_0x002UL(...) PVRSRVDebugPrintf(DBGPRIV_ERROR, __VA_ARGS__)
+	#define __PVR_DPF_0x080UL(...) PVRSRVDebugPrintf(DBGPRIV_BUFFERED, __VA_ARGS__)
+
+	/*
+	  The AdHoc-Debug level is only supported when enabled in the local
+	  build environment and may need to be used in both debug and release
+	  builds. An error is generated in the formal build if it is checked in.
+	*/
+#if defined(PVR_DPF_ADHOC_DEBUG_ON)
+	#define __PVR_DPF_0x100UL(...) PVRSRVDebugPrintf(DBGPRIV_DEBUG, __VA_ARGS__)
+#else
+    /* Use an undefined token here to stop compilation dead in the offending module */
+	#define __PVR_DPF_0x100UL(...) __ERROR__PVR_DBG_DEBUG_is_in_use_but_has_not_been_enabled__Note_Debug_DPF_must_not_be_checked_in__Define_PVR_DPF_ADHOC_DEBUG_ON_for_testing
+#endif
+
+	/* Some are compiled out completely in release builds */
+#if defined(DEBUG) || defined(DOXYGEN)
+	#define __PVR_DPF_0x004UL(...) PVRSRVDebugPrintf(DBGPRIV_WARNING, __VA_ARGS__)
+	#define __PVR_DPF_0x008UL(...) PVRSRVDebugPrintf(DBGPRIV_MESSAGE, __VA_ARGS__)
+	#define __PVR_DPF_0x010UL(...) PVRSRVDebugPrintf(DBGPRIV_VERBOSE, __VA_ARGS__)
+	#define __PVR_DPF_0x020UL(...) PVRSRVDebugPrintf(DBGPRIV_CALLTRACE, __VA_ARGS__)
+	#define __PVR_DPF_0x040UL(...) PVRSRVDebugPrintf(DBGPRIV_ALLOC, __VA_ARGS__)
+	#define __PVR_DPF_0x200UL(...) PVRSRVDebugPrintf(DBGPRIV_DBGDRV_MESSAGE, __VA_ARGS__)
+#else
+	#define __PVR_DPF_0x004UL(...)
+	#define __PVR_DPF_0x008UL(...)
+	#define __PVR_DPF_0x010UL(...)
+	#define __PVR_DPF_0x020UL(...)
+	#define __PVR_DPF_0x040UL(...)
+	#define __PVR_DPF_0x200UL(...)
+#endif
+
+	/* Translate the different log levels to separate macros
+	 * so they can each be compiled out.
+	 */
+#if defined(DEBUG)
+	#define __PVR_DPF(lvl, ...) __PVR_DPF_ ## lvl (__FILE__, __LINE__, __VA_ARGS__)
+#else
+	#define __PVR_DPF(lvl, ...) __PVR_DPF_ ## lvl ("", 0, __VA_ARGS__)
+#endif
+
+	/* Get rid of the double bracketing */
+	#define PVR_DPF(x) __PVR_DPF x
+
+	#define PVR_LOG_ERROR(_rc, _call) \
+		PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__));
+
+	#define PVR_LOG_IF_ERROR(_rc, _call) do \
+		{ if (unlikely(_rc != PVRSRV_OK)) \
+			PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOGR_IF_NOMEM(_expr, _call) do \
+		{ if (unlikely(_expr == NULL)) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s() failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", _call, __func__)); \
+			return (PVRSRV_ERROR_OUT_OF_MEMORY); }\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOGG_IF_NOMEM(_expr, _call, _err, _go) do \
+		{ if (unlikely(_expr == NULL)) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s() failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", _call, __func__)); \
+			_err = PVRSRV_ERROR_OUT_OF_MEMORY; \
+			goto _go; } \
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOGR_IF_ERROR(_rc, _call) do \
+		{ if (unlikely(_rc != PVRSRV_OK)) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+			return (_rc); }\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOGRN_IF_ERROR(_rc, _call) do \
+		{ if (unlikely(_rc != PVRSRV_OK)) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+			return; }\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOGG_IF_ERROR(_rc, _call, _go) do \
+		{ if (unlikely(_rc != PVRSRV_OK)) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+			goto _go; }\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOG_IF_FALSE(_expr, _msg) do \
+		{ if (unlikely(!(_expr))) \
+			PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOGR_IF_FALSE(_expr, _msg, _rc) do \
+		{ if (unlikely(!(_expr))) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \
+			return (_rc); }\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOGG_IF_FALSE(_expr, _msg, _go) do \
+		{ if (unlikely(!(_expr))) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \
+			goto _go; }\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDebugPrintf
+@Description    Output a debug message to the user, using an OS-specific
+                method, to a log or console which can be read by developers
+                Invoked from the macro PVR_DPF().
+@Input          ui32DebugLevel   The debug level of the message. This can
+                                 be used to restrict the output of debug
+                                 messages based on their severity.
+                                 If this is PVR_DBG_BUFFERED, the message
+                                 should be written into a debug circular
+                                 buffer instead of being output immediately
+                                 (useful when performance would otherwise
+                                 be adversely affected).
+                                 The debug circular buffer shall only be
+                                 output when PVRSRVDebugPrintfDumpCCB() is
+                                 called.
+@Input          pszFileName      The source file containing the code that is
+                                 generating the message
+@Input          ui32Line         The line number in the source file
+@Input          pszFormat        The formatted message string
+@Input          ...              Zero or more arguments for use by the
+                                 formatted string
+@Return         None
+*/ /**************************************************************************/
+IMG_IMPORT void IMG_CALLCONV PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel,
+                                               const IMG_CHAR *pszFileName,
+                                               IMG_UINT32 ui32Line,
+                                               const IMG_CHAR *pszFormat,
+                                               ...) __printf(4, 5);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDebugPrintfDumpCCB
+@Description    When PVRSRVDebugPrintf() is called with the ui32DebugLevel
+                specified as DBGPRIV_BUFFERED, the debug shall be written to
+                the debug circular buffer instead of being output immediately.
+                (This could be used to obtain debug without incurring a
+                performance hit by printing it at that moment).
+                This function shall dump the contents of that debug circular
+                buffer to be output in an OS-specific method to a log or
+                console which can be read by developers.
+@Return         None
+*/ /**************************************************************************/
+IMG_IMPORT void IMG_CALLCONV PVRSRVDebugPrintfDumpCCB(void);
+
+#else  /* defined(PVRSRV_NEED_PVR_DPF) */
+
+	#define PVR_DPF(X)  /*!< Null Implementation of PowerVR Debug Printf (does nothing) */
+
+	#define PVR_LOG_ERROR(_rc, _call) (void)(_rc)
+	#define PVR_LOG_IF_ERROR(_rc, _call) (void)(_rc)
+
+	#define PVR_LOGR_IF_NOMEM(_expr, _call) do { if (unlikely(_expr == NULL)) { return (PVRSRV_ERROR_OUT_OF_MEMORY); } MSC_SUPPRESS_4127 } while (0)
+	#define PVR_LOGG_IF_NOMEM(_expr, _call, _err, _go) do { if (unlikely(_expr == NULL)) { _err = PVRSRV_ERROR_OUT_OF_MEMORY; goto _go; } MSC_SUPPRESS_4127	} while (0)
+	#define PVR_LOGR_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return (_rc); } MSC_SUPPRESS_4127 } while(0)
+	#define PVR_LOGRN_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return; } MSC_SUPPRESS_4127 } while(0)
+	#define PVR_LOGG_IF_ERROR(_rc, _call, _go) do { if (unlikely(_rc != PVRSRV_OK)) { goto _go; } MSC_SUPPRESS_4127 } while(0)
+	
+	#define PVR_LOG_IF_FALSE(_expr, _msg) (void)(_expr)
+	#define PVR_LOGR_IF_FALSE(_expr, _msg, _rc) do { if (unlikely(!(_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while(0)
+	#define PVR_LOGG_IF_FALSE(_expr, _msg, _go) do { if (unlikely(!(_expr))) { goto _go; } MSC_SUPPRESS_4127 } while(0)
+
+	#undef PVR_DPF_FUNCTION_TRACE_ON
+
+#endif /* defined(PVRSRV_NEED_PVR_DPF) */
+
+
+#if defined(DEBUG)
+	#define PVR_LOG_WARN(_rc, _call) \
+		PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__));
+
+	#define PVR_LOG_WARN_IF_ERROR(_rc, _call) do \
+		{ if (unlikely(_rc != PVRSRV_OK)) \
+			PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+		MSC_SUPPRESS_4127\
+		} while (0)
+#else
+	#define PVR_LOG_WARN(_rc, _call) (void)(_rc)
+	#define PVR_LOG_WARN_IF_ERROR(_rc, _call) (void)(_rc)
+#endif
+
+
+#if defined(PVR_DPF_FUNCTION_TRACE_ON)
+
+	#define PVR_DPF_ENTERED \
+        PVR_DPF((PVR_DBG_CALLTRACE, "|-> %s:%d entered", __func__, __LINE__))
+
+	#define PVR_DPF_ENTERED1(p1) \
+		PVR_DPF((PVR_DBG_CALLTRACE, "|-> %s:%d entered (0x%lx)", __func__, __LINE__, ((unsigned long)p1)))
+
+	#define PVR_DPF_RETURN_RC(a) \
+        do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned %d", __func__, __LINE__, (_r))); return (_r); MSC_SUPPRESS_4127 } while (0)
+
+	#define PVR_DPF_RETURN_RC1(a,p1) \
+		do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned %d (0x%lx)", __func__, __LINE__, (_r), ((unsigned long)p1))); return (_r); MSC_SUPPRESS_4127 } while (0)
+
+	#define PVR_DPF_RETURN_VAL(a) \
+		do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned with value", __func__, __LINE__ )); return (a); MSC_SUPPRESS_4127 } while (0)
+
+	#define PVR_DPF_RETURN_OK \
+		do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned ok", __func__, __LINE__)); return PVRSRV_OK; MSC_SUPPRESS_4127 } while (0)
+
+	#define PVR_DPF_RETURN \
+		do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned", __func__, __LINE__)); return; MSC_SUPPRESS_4127 } while (0)
+
+	#if !defined(DEBUG)
+	#error PVR DPF Function trace enabled in release build, rectify
+	#endif
+
+#else /* defined(PVR_DPF_FUNCTION_TRACE_ON) */
+
+	#define PVR_DPF_ENTERED
+	#define PVR_DPF_ENTERED1(p1)
+	#define PVR_DPF_RETURN_RC(a) 	 return (a)
+	#define PVR_DPF_RETURN_RC1(a,p1) return (a)
+	#define PVR_DPF_RETURN_VAL(a) 	 return (a)
+	#define PVR_DPF_RETURN_OK 		 return PVRSRV_OK
+	#define PVR_DPF_RETURN	 		 return
+
+#endif /* defined(PVR_DPF_FUNCTION_TRACE_ON) */
+
+#if defined(__KERNEL__) || defined(DOXYGEN) || defined(__QNXNTO__)
+/*Use PVR_DPF() unless message is necessary in release build */
+#ifdef PVR_DISABLE_LOGGING
+#define PVR_LOG(X)
+#else
+#define PVR_LOG(X) PVRSRVReleasePrintf X;
+#endif
+
+/*************************************************************************/ /*!
+@Function       PVRSRVReleasePrintf
+@Description    Output an important message, using an OS-specific method,
+                to a log or console which can be read by developers in
+                release builds.
+                Invoked from the macro PVR_LOG().
+@Input          pszFormat   The message format string
+@Input          ...         Zero or more arguments for use by the format string
+@Return         None
+*/ /**************************************************************************/
+IMG_IMPORT void IMG_CALLCONV PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) __printf(1, 2);
+#endif
+
+/* PVR_TRACE() handling */
+
+#if defined(PVRSRV_NEED_PVR_TRACE) || defined(DOXYGEN)
+
+	#define PVR_TRACE(X)	PVRSRVTrace X    /*!< PowerVR Debug Trace Macro */
+	/* Empty string implementation that is -O0 build friendly */
+	#define PVR_TRACE_EMPTY_LINE()	PVR_TRACE(("%s", ""))
+
+/*************************************************************************/ /*!
+@Function       PVRTrace
+@Description    Output a debug message to the user
+                Invoked from the macro PVR_TRACE().
+@Input          pszFormat   The message format string
+@Input          ...         Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+IMG_IMPORT void IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... )
+	__printf(1, 2);
+
+#else /* defined(PVRSRV_NEED_PVR_TRACE) */
+    /*! Null Implementation of PowerVR Debug Trace Macro (does nothing) */
+	#define PVR_TRACE(X)
+
+#endif /* defined(PVRSRV_NEED_PVR_TRACE) */
+
+
+#if defined(PVRSRV_NEED_PVR_ASSERT)
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(TRUNCATE_64BITS_TO_32BITS)
+#endif
+	INLINE static IMG_UINT32 TRUNCATE_64BITS_TO_32BITS(IMG_UINT64 uiInput)
+	{
+		 IMG_UINT32 uiTruncated;
+
+		 uiTruncated = (IMG_UINT32)uiInput;
+		 PVR_ASSERT(uiInput == uiTruncated);
+		 return uiTruncated;
+	}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(TRUNCATE_64BITS_TO_SIZE_T)
+#endif
+	INLINE static size_t TRUNCATE_64BITS_TO_SIZE_T(IMG_UINT64 uiInput)
+	{
+		 size_t uiTruncated;
+
+		 uiTruncated = (size_t)uiInput;
+		 PVR_ASSERT(uiInput == uiTruncated);
+		 return uiTruncated;
+	}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(TRUNCATE_SIZE_T_TO_32BITS)
+#endif
+	INLINE static IMG_UINT32 TRUNCATE_SIZE_T_TO_32BITS(size_t uiInput)
+	{
+		 IMG_UINT32 uiTruncated;
+
+		 uiTruncated = (IMG_UINT32)uiInput;
+		 PVR_ASSERT(uiInput == uiTruncated);
+		 return uiTruncated;
+	}
+
+
+#else /* defined(PVRSRV_NEED_PVR_ASSERT) */
+	#define TRUNCATE_64BITS_TO_32BITS(expr) ((IMG_UINT32)(expr))
+	#define TRUNCATE_64BITS_TO_SIZE_T(expr) ((size_t)(expr))
+	#define TRUNCATE_SIZE_T_TO_32BITS(expr) ((IMG_UINT32)(expr))
+#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */
+
+/* Macros used to trace calls */
+#if defined(DEBUG)
+	#define PVR_DBG_FILELINE , (__FILE__), (__LINE__)
+	#define PVR_DBG_FILELINE_PARAM , const IMG_CHAR *pszaFile, IMG_UINT32 ui32Line
+	#define PVR_DBG_FILELINE_ARG , pszaFile, ui32Line
+	#define PVR_DBG_FILELINE_FMT " %s:%u"
+	#define PVR_DBG_FILELINE_UNREF() do { PVR_UNREFERENCED_PARAMETER(pszaFile); \
+				PVR_UNREFERENCED_PARAMETER(ui32Line); } while(0)
+#else
+	#define PVR_DBG_FILELINE
+	#define PVR_DBG_FILELINE_PARAM
+	#define PVR_DBG_FILELINE_ARG
+	#define PVR_DBG_FILELINE_FMT
+	#define PVR_DBG_FILELINE_UNREF()
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif	/* __PVR_DEBUG_H__ */
+
+/******************************************************************************
+ End of file (pvr_debug.h)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvr_drm_display_external.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvr_drm_display_external.h
new file mode 100644
index 0000000..e315eae
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvr_drm_display_external.h
@@ -0,0 +1,57 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services external DRM display interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services DRM display declarations and definitions that are
+                visible internally and externally
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_PVR_DRM_DISPLAY_EXTERNAL_)
+#define _PVR_DRM_DISPLAY_EXTERNAL_
+
+#include <powervr/buffer_attribs.h>
+
+typedef enum _FB_MEMLAYOUT_
+{
+	FB_MEMLAYOUT_STRIDED = 0,	/*!< Strided buffer */
+	FB_MEMLAYOUT_COMPRESSED,	/*!< Compressed buffer */
+	FB_MEMLAYOUT_BIF_PAGE_TILED,	/*!< BIF page tiled buffer */
+} FB_MEMLAYOUT;
+
+#endif /* !defined(_PVR_DRM_DISPLAY_EXTERNAL_) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvr_intrinsics.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvr_intrinsics.h
new file mode 100644
index 0000000..ee7de67
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvr_intrinsics.h
@@ -0,0 +1,70 @@
+/*************************************************************************/ /*!
+@File
+@Title          Intrinsics definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVR_INTRINSICS_H_
+#define _PVR_INTRINSICS_H_
+
+/* PVR_CTZLL:
+ * Count the number of trailing zeroes in a long long integer
+ */
+
+#if defined(__GNUC__)
+#if defined(__x86_64__)
+
+	#define PVR_CTZLL __builtin_ctzll
+#endif
+#endif
+
+/* PVR_CLZLL:
+ * Count the number of leading zeroes in a long long integer
+ */
+
+#if defined(__GNUC__)
+#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || \
+					defined(__arm__) || defined(__mips)
+
+#define PVR_CLZLL __builtin_clzll
+
+#endif
+#endif
+
+#endif /* _PVR_INTRINSICS_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrmodule.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrmodule.h
new file mode 100644
index 0000000..267c7b6
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrmodule.h
@@ -0,0 +1,48 @@
+/*************************************************************************/ /*!
+@Title          Module Author and License.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef	_PVRMODULE_H_
+#define	_PVRMODULE_H_
+
+MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
+MODULE_LICENSE("Dual MIT/GPL");
+
+#endif	/* _PVRMODULE_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_device_types.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_device_types.h
new file mode 100644
index 0000000..0439c34
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_device_types.h
@@ -0,0 +1,56 @@
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR device type definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVRSRV_DEVICE_TYPES_H__)
+#define __PVRSRV_DEVICE_TYPES_H__
+
+#include "img_types.h"
+
+#define PVRSRV_MAX_DEVICES		16	/*!< Largest supported number of devices on the system */
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#endif /* __PVRSRV_DEVICE_TYPES_H__ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_devmem.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_devmem.h
new file mode 100644
index 0000000..c1ac238
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_devmem.h
@@ -0,0 +1,907 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management core
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Client side part of device memory management -- This
+                file defines the exposed Services API to core memory management
+                functions.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVRSRV_DEVMEM_H
+#define PVRSRV_DEVMEM_H
+
+#if defined __cplusplus
+extern "C" {
+#endif
+
+#include "img_types.h"
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include <powervr/sync_external.h>
+#include "services_km.h" /* for PVRSRV_DEV_CONNECTION */
+
+
+/*
+  Device memory contexts, heaps and memory descriptors are passed
+  through to underlying memory APIs directly, but are to be regarded
+  as an opaque handle externally.
+*/
+typedef struct _PVRSRV_DEVMEMCTX_ *PVRSRV_DEVMEMCTX;       /*!< Device-Mem Client-Side Interface: Typedef for Context Ptr */
+typedef DEVMEM_HEAP *PVRSRV_HEAP;               /*!< Device-Mem Client-Side Interface: Typedef for Heap Ptr */
+typedef DEVMEM_MEMDESC *PVRSRV_MEMDESC;         /*!< Device-Mem Client-Side Interface: Typedef for Memory Descriptor Ptr */
+typedef DEVMEM_EXPORTCOOKIE PVRSRV_DEVMEM_EXPORTCOOKIE;     /*!< Device-Mem Client-Side Interface: Typedef for Export Cookie */
+typedef DEVMEM_FLAGS_T PVRSRV_MEMMAP_FLAGS_T;               /*!< Device-Mem Client-Side Interface: Typedef for Memory-Mapping Flags Enum */
+typedef IMG_HANDLE PVRSRV_REMOTE_DEVMEMCTX;                 /*!< Type to use with context export import */
+typedef struct _PVRSRV_EXPORT_DEVMEMCTX_ *PVRSRV_EXPORT_DEVMEMCTX;
+
+/* To use with PVRSRVSubAllocDeviceMem() as the default factor if no
+ * over-allocation is desired. */
+#define PVRSRV_DEVMEM_PRE_ALLOC_MULTIPLIER_NONE     DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER
+
+/* N.B.  Flags are now defined in pvrsrv_memallocflags.h as they need
+         to be omnipresent. */
+
+/*
+ *
+ *  API functions
+ *
+ */
+
+/**************************************************************************/ /*!
+@Function       PVRSRVCreateDeviceMemContext
+@Description    Creates a device memory context.  There is a one-to-one
+                correspondence between this context data structure and the top
+                level MMU page table (known as the Page Catalogue, in the case of a
+                3-tier MMU).  It is intended that a process with its own virtual
+                space on the CPU will also have its own virtual space on the GPU.
+                Thus there is loosely a one-to-one correspondence between process
+                and device memory context, but this is not enforced at this API.
+
+                Every process must create the device memory context before any
+                memory allocations are made, and is responsible for freeing all
+                such allocations before destroying the context
+
+                This is a wrapper function above the "bare-metal" device memory
+                context creation function which would create just a context and no
+                heaps.  This function will also create the heaps, according to the
+                heap config that the device specific initialization code has
+                nominated for use by this API.
+
+                The number of heaps thus created is returned to the caller, such
+                that the caller can allocate an array and the call in to fetch
+                details of each heap, or look up the heap with the "Find Heap" API
+                described below.
+
+                In order to derive the details of the MMU configuration for the
+                device, and for retrieving the "bridge handle" for communication
+                internally in services, it is necessary to pass in a
+                PVRSRV_DEV_CONNECTION.
+@Input          psDev           dev data
+@Output         phCtxOut        On success, the returned DevMem Context. The
+                                caller is responsible for providing storage
+                                for this.
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+*/ /***************************************************************************/
+extern IMG_IMPORT PVRSRV_ERROR
+PVRSRVCreateDeviceMemContext(PVRSRV_DEV_CONNECTION *psDevConnection,
+                             PVRSRV_DEVMEMCTX *phCtxOut);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVDestroyDeviceMemContext
+@Description    Destroy cannot fail.  Well.  It shouldn't, assuming the caller
+                has obeyed the protocol, i.e. has freed all his allocations 
+                beforehand.
+@Input          hCtx            Handle to a DevMem Context
+@Return         None
+*/ /***************************************************************************/
+extern IMG_IMPORT void
+PVRSRVDestroyDeviceMemContext(PVRSRV_DEVMEMCTX hCtx);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVFindHeapByName
+@Description    Returns the heap handle for the named heap which is assumed to
+                exist in this context. PVRSRV_HEAP *phHeapOut,  
+
+                N.B.  No need for acquire/release semantics here, as when using
+                this wrapper layer, the heaps are automatically instantiated at
+                context creation time and destroyed when the context is 
+                destroyed.
+
+                The caller is required to know the heap names already as these 
+                will vary from device to device and from purpose to purpose.
+@Input          hCtx            Handle to a DevMem Context
+@Input          pszHeapName     Name of the heap to look for
+@Output         phHeapOut       a handle to the heap, for use in future calls 
+                                to OpenAllocation / AllocDeviceMemory / Map 
+                                DeviceClassMemory, etc. (The PVRSRV_HEAP type
+                                to be regarded by caller as an opaque, but 
+                                strongly typed, handle)
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+*/ /***************************************************************************/
+extern IMG_IMPORT PVRSRV_ERROR
+PVRSRVFindHeapByName(PVRSRV_DEVMEMCTX hCtx,
+                     const IMG_CHAR *pszHeapName,
+                     PVRSRV_HEAP *phHeapOut);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVDevmemGetHeapBaseDevVAddr
+@Description    returns the device virtual address of the base of the heap.
+@Input          hHeap           Handle to a Heap
+@Output         pDevVAddr       On success, the device virtual address of the
+                                base of the heap.
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+*/ /***************************************************************************/
+IMG_IMPORT PVRSRV_ERROR
+PVRSRVDevmemGetHeapBaseDevVAddr(PVRSRV_HEAP hHeap,
+                                IMG_DEV_VIRTADDR *pDevVAddr);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVSubAllocDeviceMem
+@Description    Allocate memory from the specified heap, acquiring physical
+                memory from OS as we go and mapping this into
+                the GPU (mandatorily) and CPU (optionally)
+
+                Size must be a positive integer multiple of alignment, or, to
+                put it another way, the uiLog2Align LSBs should all be zero, but
+                at least one other bit should not be.
+
+                Caller to take charge of the PVRSRV_MEMDESC (the memory
+                descriptor) which is to be regarded as an opaque handle.
+
+                If the allocation is supposed to be used with PVRSRVDevmemUnpin()
+                the size must be a page multiple.
+                This is a general rule when suballocations are to
+                be avoided.
+
+@Input          uiPreAllocMultiplier  Size factor for internal pre-allocation of
+                                      memory to make subsequent calls with the
+                                      same flags faster. Independently if a value
+                                      is set, the function will try to allocate
+                                      from any pre-allocated memory first and -if
+                                      successful- not pre-allocate anything more.
+                                      That means the factor can always be set and
+                                      the correct thing will be done internally.
+@Input          hHeap                 Handle to the heap from which memory will be
+                                      allocated
+@Input          uiSize                Amount of memory to be allocated.
+@Input          uiLog2Align           LOG2 of the required alignment
+@Input          uiMemAllocFlags       Allocation Flags
+@Input          pszText     		  Text to describe the allocation
+@Output         phMemDescOut          On success, the resulting memory descriptor
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /***************************************************************************/
+extern IMG_IMPORT PVRSRV_ERROR
+PVRSRVSubAllocDeviceMem(IMG_UINT8 uiPreAllocMultiplier,
+                        PVRSRV_HEAP hHeap,
+                        IMG_DEVMEM_SIZE_T uiSize,
+                        IMG_DEVMEM_LOG2ALIGN_T uiLog2Align,
+                        PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags,
+                        const IMG_CHAR *pszText,
+                        PVRSRV_MEMDESC *phMemDescOut);
+
+#define PVRSRVAllocDeviceMem(...) \
+    PVRSRVSubAllocDeviceMem(PVRSRV_DEVMEM_PRE_ALLOC_MULTIPLIER_NONE, __VA_ARGS__)
+
+/**************************************************************************/ /*!
+@Function       PVRSRVGetMaxDevMemSize
+@Description    Get the amount of device memory on current platform
+		(Memory size in Bytes)
+		(Consider scaling down the values returned by this API)
+@Output         puiLMASize            LMA memory size
+@Output         puiUMASize            UMA memory size
+@Return         None
+*/ /***************************************************************************/
+extern IMG_IMPORT void
+PVRSRVGetMaxDevMemSize(PVRSRV_DEV_CONNECTION *psConnection,
+		    IMG_DEVMEM_SIZE_T *puiLMASize,
+		    IMG_DEVMEM_SIZE_T *puiUMASize);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVFreeDeviceMem
+@Description    Free that allocated by PVRSRVSubAllocDeviceMem (Memory descriptor
+                will be destroyed)
+@Input          hMemDesc            Handle to the descriptor of the memory to be
+                                    freed
+@Return         None
+*/ /***************************************************************************/
+extern IMG_IMPORT void
+PVRSRVFreeDeviceMem(PVRSRV_MEMDESC hMemDesc);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVAcquireCPUMapping
+@Description    Causes the allocation referenced by this memory descriptor to be
+                mapped into cpu virtual memory, if it wasn't already, and the
+                CPU virtual address returned in the caller-provided location.
+
+                The caller must call PVRSRVReleaseCPUMapping to advise when he
+                has finished with the mapping.
+
+                Does not accept unpinned allocations.
+                Returns PVRSRV_ERROR_INVALID_MAP_REQUEST if an unpinned
+                MemDesc is passed in.
+
+@Input          hMemDesc            Handle to the memory descriptor for which a
+                                    CPU mapping is required
+@Output         ppvCpuVirtAddrOut   On success, the caller's ptr is set to the
+                                    new CPU mapping
+@Return         PVRSRV_ERROR:       PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+*/ /***************************************************************************/
+extern IMG_IMPORT PVRSRV_ERROR
+PVRSRVAcquireCPUMapping(PVRSRV_MEMDESC hMemDesc,
+                        void **ppvCpuVirtAddrOut);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVReleaseCPUMapping
+@Description    Relinquishes the cpu mapping acquired with 
+                PVRSRVAcquireCPUMapping()
+@Input          hMemDesc            Handle of the memory descriptor
+@Return         None
+*/ /***************************************************************************/
+extern IMG_IMPORT void
+PVRSRVReleaseCPUMapping(PVRSRV_MEMDESC hMemDesc);
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVMapToDevice
+@Description    Map allocation into the device MMU. This function must only be
+                called once, any further calls will return
+                PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED
+
+                The caller must call PVRSRVReleaseDeviceMapping when they
+                are finished with the mapping.
+
+                Does not accept unpinned allocations.
+                Returns PVRSRV_ERROR_INVALID_MAP_REQUEST if an unpinned
+                MemDesc is passed in.
+
+@Input          hMemDesc            Handle of the memory descriptor
+@Input          hHeap               Device heap to map the allocation into
+@Output         psDevVirtAddrOut    Device virtual address
+@Return         PVRSRV_ERROR:       PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+*/ /***************************************************************************/
+extern IMG_IMPORT PVRSRV_ERROR
+PVRSRVMapToDevice(PVRSRV_MEMDESC hMemDesc,
+                  PVRSRV_HEAP hHeap,
+                  IMG_DEV_VIRTADDR *psDevVirtAddrOut);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVMapToDeviceAddress
+@Description    Same as PVRSRVMapToDevice but caller chooses the address to
+                map into.
+
+                The caller is able to overwrite existing mappings so never use
+                this function on a heap where PVRSRVMapToDevice() has been
+                used before or will be used in the future.
+
+                In general the caller has to know which regions of the heap have
+                been mapped already and should avoid overlapping mappings.
+
+@Input          hMemDesc            Handle of the memory descriptor
+@Input          hHeap               Device heap to map the allocation into
+@Output         sDevVirtAddr        Device virtual address to map to
+@Return         PVRSRV_ERROR:       PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+*/ /***************************************************************************/
+extern IMG_IMPORT PVRSRV_ERROR
+PVRSRVMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc,
+                         DEVMEM_HEAP *psHeap,
+                         IMG_DEV_VIRTADDR sDevVirtAddr);
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVAcquireDeviceMapping
+@Description    Acquire a reference on the device mapping the allocation.
+                If the allocation wasn't mapped into the device then 
+                and the device virtual address returned in the
+                PVRSRV_ERROR_DEVICEMEM_NO_MAPPING will be returned as
+                PVRSRVMapToDevice must be called first.
+
+                The caller must call PVRSRVReleaseDeviceMapping when they
+                are finished with the mapping.
+
+                Does not accept unpinned allocations.
+                Returns PVRSRV_ERROR_INVALID_MAP_REQUEST if an unpinned
+                MemDesc is passed in.
+
+@Input          hMemDesc            Handle to the memory descriptor for which a
+                                    device mapping is required
+@Output         psDevVirtAddrOut    On success, the caller's ptr is set to the
+                                    new device mapping
+@Return         PVRSRV_ERROR:       PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+*/ /***************************************************************************/
+extern IMG_IMPORT PVRSRV_ERROR
+PVRSRVAcquireDeviceMapping(PVRSRV_MEMDESC hMemDesc,
+                           IMG_DEV_VIRTADDR *psDevVirtAddrOut);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVReleaseDeviceMapping
+@Description    Relinquishes the device mapping acquired with
+                PVRSRVAcquireDeviceMapping or PVRSRVMapToDevice
+@Input          hMemDesc            Handle of the memory descriptor
+@Return         None
+*/ /***************************************************************************/
+extern IMG_IMPORT void
+PVRSRVReleaseDeviceMapping(PVRSRV_MEMDESC hMemDesc);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDevmemLocalImport
+
+@Description    Import a PMR that was created with this connection.
+                The general usage of this function is as follows:
+                1) Create a devmem allocation on server side.
+                2) Pass back the PMR of that allocation to client side by
+                   creating a handle of type PMR_LOCAL_EXPORT_HANDLE.
+                3) Pass the PMR_LOCAL_EXPORT_HANDLE to
+                   PVRSRVMakeLocalImportHandle()to create a new handle type
+                   (DEVMEM_MEM_IMPORT) that can be used with this function.
+
+@Input          hExtHandle              External memory handle
+
+@Input          uiFlags                 Import flags
+
+@Output         phMemDescPtr            Created MemDesc
+
+@Output         puiSizePtr              Size of the created MemDesc
+
+@Input          pszAnnotation           Annotation string for this allocation/import
+
+@Return         PVRSRV_OK is successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR PVRSRVDevmemLocalImport(const PVRSRV_DEV_CONNECTION *psDevConnection,
+                                     IMG_HANDLE hExtHandle,
+                                     PVRSRV_MEMMAP_FLAGS_T uiFlags,
+                                     PVRSRV_MEMDESC *phMemDescPtr,
+                                     IMG_DEVMEM_SIZE_T *puiSizePtr,
+                                     const IMG_CHAR *pszAnnotation);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDevmemGetImportUID
+
+@Description    Get the UID of the import that backs this MemDesc
+
+@Input          hMemDesc                MemDesc
+
+@Return         UID of import
+*/
+/*****************************************************************************/
+PVRSRV_ERROR PVRSRVDevmemGetImportUID(PVRSRV_MEMDESC hMemDesc,
+                                      IMG_UINT64 *pui64UID);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVAllocExportableDevMem
+@Description    Allocate memory without mapping into device memory context.  This
+                memory is exported and ready to be mapped into the device memory
+                context of other processes, or to CPU only with 
+                PVRSRVMapMemoryToCPUOnly(). The caller agrees to later call 
+                PVRSRVFreeUnmappedExportedMemory(). The caller must give the page
+                size of the heap into which this memory may be subsequently 
+                mapped, or the largest of such page sizes if it may be mapped 
+                into multiple places.  This information is to be communicated in
+                the Log2Align field.
+
+                Size must be a positive integer multiple of the page size
+@Input          uiLog2Align         Log2 of the alignment required
+@Input          uiLog2HeapPageSize  The page size to allocate. Must be a
+                                    multiple of the heap that this is going
+                                    to be mapped into.
+@Input          uiSize              the amount of memory to be allocated
+@Input          uiFlags             Allocation flags
+@Input          pszText             Text to describe the allocation
+@Output         hMemDesc
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVAllocExportableDevMem(const PVRSRV_DEV_CONNECTION *psDevConnection,
+                            IMG_DEVMEM_SIZE_T uiSize,
+                            IMG_DEVMEM_LOG2ALIGN_T uiLog2Align,
+                            IMG_UINT32 uiLog2HeapPageSize,
+                            PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                            const IMG_CHAR *pszText,
+                            PVRSRV_MEMDESC *hMemDesc);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVChangeSparseDevMem
+@Description    This function alters the underlying memory layout of the given
+                allocation by allocating/removing pages as requested
+                This function also re-writes the GPU & CPU Maps accordingly
+                The specific actions can be controlled by corresponding flags
+
+@Input          psMemDesc           The memory layout that needs to be modified
+@Input          ui32AllocPageCount	New page allocation count
+@Input          pai32AllocIndices   New page allocation indices (page granularity)
+@Input          ui32FreePageCount   Number of pages that need to be freed
+@Input          pai32FreeIndices    Indices of the pages that need to be freed
+@Input          uiFlags             Flags that control the behaviour of the call
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVChangeSparseDevMem(PVRSRV_MEMDESC psMemDesc,
+                         IMG_UINT32 ui32AllocPageCount,
+                         IMG_UINT32 *pai32AllocIndices,
+                         IMG_UINT32 ui32FreePageCount,
+                         IMG_UINT32 *pai32FreeIndices,
+                         SPARSE_MEM_RESIZE_FLAGS uiFlags);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVAllocSparseDevMem2
+@Description    Allocate sparse memory without mapping into device memory context.
+                Sparse memory is used where you have an allocation that has a
+                logical size (i.e. the amount of VM space it will need when
+                mapping it into a device) that is larger than the amount of
+                physical memory that allocation will use. An example of this
+                is a NPOT texture where the twiddling algorithm requires you
+                to round the width and height to next POT and so you know there
+                will be pages that are never accessed.
+
+                This memory can be exported and mapped into the device
+                memory context of other processes, or to CPU.
+
+                Size must be a positive integer multiple of the page size
+@Input          psDevConnection     Device to allocation the memory for
+@Input          uiSize              The logical size of allocation
+@Input          uiChunkSize         The size of the chunk (== page size in byte)
+@Input          ui32NumPhysChunks   The number of physical chunks required
+@Input          ui32NumVirtChunks   The number of virtual chunks required
+@Input          pui32MappingTable	index based Mapping table
+@Input          uiLog2Align         Log2 of the required alignment
+@Input          uiLog2HeapPageSize  Log2 page size of the heap we map this into
+@Input          uiFlags             Allocation flags
+@Input          pszText             Text to describe the allocation
+@Output         hMemDesc
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVAllocSparseDevMem2(const PVRSRV_DEVMEMCTX psDevMemCtx,
+                         IMG_DEVMEM_SIZE_T uiSize,
+                         IMG_DEVMEM_SIZE_T uiChunkSize,
+                         IMG_UINT32 ui32NumPhysChunks,
+                         IMG_UINT32 ui32NumVirtChunks,
+                         IMG_UINT32 *pui32MappingTable,
+                         IMG_DEVMEM_LOG2ALIGN_T uiLog2Align,
+                         IMG_UINT32 uiLog2HeapPageSize,
+                         PVRSRV_MEMMAP_FLAGS_T uiFlags,
+                         const IMG_CHAR *pszText,
+                         PVRSRV_MEMDESC *hMemDesc);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVAllocSparseDevMem (DEPRECATED and will be removed in future)
+@Description    Allocate sparse memory without mapping into device memory context.
+                Sparse memory is used where you have an allocation that has a
+                logical size (i.e. the amount of VM space it will need when
+                mapping it into a device) that is larger than the amount of
+                physical memory that allocation will use. An example of this
+                is a NPOT texture where the twiddling algorithm requires you
+                to round the width and height to next POT and so you know there
+                will be pages that are never accessed.
+
+                This memory is can to be exported and mapped into the device
+                memory context of other processes, or to CPU.
+
+                Size must be a positive integer multiple of the page size
+                This function is deprecated and should not be used in any new code
+                It will be removed in the subsequent changes.
+@Input          psDevConnection     Device to allocation the memory for
+@Input          uiSize              The logical size of allocation
+@Input          uiChunkSize         The size of the chunk
+@Input          ui32NumPhysChunks   The number of physical chunks required
+@Input          ui32NumVirtChunks   The number of virtual chunks required
+@Input          pabMappingTable     boolean based Mapping table
+@Input          uiLog2Align         Log2 of the required alignment
+@Input          uiLog2HeapPageSize  Log2 of the heap we map this into
+@Input          uiFlags             Allocation flags
+@Input          pszText             Text to describe the allocation
+@Output         hMemDesc
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVAllocSparseDevMem(const PVRSRV_DEVMEMCTX psDevMemCtx,
+                        IMG_DEVMEM_SIZE_T uiSize,
+                        IMG_DEVMEM_SIZE_T uiChunkSize,
+                        IMG_UINT32 ui32NumPhysChunks,
+                        IMG_UINT32 ui32NumVirtChunks,
+                        IMG_BOOL *pabMappingTable,
+                        IMG_DEVMEM_LOG2ALIGN_T uiLog2Align,
+                        IMG_UINT32 uiLog2HeapPageSize,
+                        DEVMEM_FLAGS_T uiFlags,
+                        const IMG_CHAR *pszText,
+                        PVRSRV_MEMDESC *hMemDesc);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVGetOSLog2PageSize
+@Description    Just call AFTER setting up the connection to the kernel module
+                otherwise it will run into an assert.
+                Gives the log2 of the page size that is utilised by the OS.
+
+@Return         The page size
+*/ /***************************************************************************/
+
+IMG_UINT32 PVRSRVGetOSLog2PageSize(void);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVGetHeapLog2PageSize
+@Description    Queries the page size of a passed heap.
+
+@Input          hHeap             Heap that is queried
+@Output         puiLog2PageSize   Log2 page size will be returned in this
+
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVGetHeapLog2PageSize(PVRSRV_HEAP hHeap, IMG_UINT32* puiLog2PageSize);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVGetHeapTilingProperties
+@Description    Queries the import alignment and tiling stride conversion
+                factor of a passed heap.
+
+@Input          hHeap                      Heap that is queried
+@Output         puiLog2ImportAlignment     Log2 import alignment will be
+                                           returned in this
+@Output         puiLog2TilingStrideFactor  Log2 alignment to tiling stride
+                                           conversion factor will be returned
+                                           in this
+
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVGetHeapTilingProperties(PVRSRV_HEAP hHeap,
+                              IMG_UINT32* puiLog2ImportAlignment,
+                              IMG_UINT32* puiLog2TilingStrideFactor);
+
+/**************************************************************************/ /*!
+@Function PVRSRVMakeLocalImportHandle
+@Description    This is a "special case" function for making a local import
+                handle. The server handle is a handle to a PMR of bridge type
+                PMR_LOCAL_EXPORT_HANDLE. The returned local import handle will
+                be of the bridge type DEVMEM_MEM_IMPORT that can be used with
+                PVRSRVDevmemLocalImport().
+@Input          psConnection        Services connection
+@Input          hServerHandle       Server export handle
+@Output         hLocalImportHandle  Returned client import handle
+@Return         PVRSRV_ERROR:       PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVMakeLocalImportHandle(const PVRSRV_DEV_CONNECTION *psConnection,
+                            IMG_HANDLE hServerHandle,
+                            IMG_HANDLE *hLocalImportHandle);
+
+/**************************************************************************/ /*!
+@Function PVRSRVUnmakeLocalImportHandle
+@Description    Destroy the hLocalImportHandle created with
+                PVRSRVMakeLocalImportHandle().
+@Input          psConnection        Services connection
+@Output         hLocalImportHandle  Local import handle
+@Return         PVRSRV_ERROR:       PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVUnmakeLocalImportHandle(const PVRSRV_DEV_CONNECTION *psConnection,
+                              IMG_HANDLE hLocalImportHandle);
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+/**************************************************************************/ /*!
+@Function       PVRSRVExport
+@Description    Given a memory allocation allocated with Devmem_Allocate(),
+                create a "cookie" that can be passed intact by the caller's own
+                choice of secure IPC to another process and used as the argument
+                to "map" to map this memory into a heap in the target processes.
+                N.B.  This can also be used to map into multiple heaps in one
+                process, though that's not the intention.
+
+                Note, the caller must later call Unexport before freeing the
+                memory.
+@Input          hMemDesc        handle to the descriptor of the memory to be
+                                exported
+@Output         phExportCookie  On success, a handle to the exported cookie
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVExportDevMem(PVRSRV_MEMDESC hMemDesc,
+                                PVRSRV_DEVMEM_EXPORTCOOKIE *phExportCookie);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVUnexport
+@Description    Undo the export caused by "PVRSRVExport" - note - it doesn't
+                actually tear down any mapping made by processes that received
+                the export cookie.  It will simply make the cookie null and void
+                and prevent further mappings.
+@Input          hMemDesc        handle to the descriptor of the memory which
+                                will no longer be exported
+@Output         phExportCookie  On success, the export cookie provided will be
+                                set to null
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVUnexportDevMem(PVRSRV_MEMDESC hMemDesc,
+                                  PVRSRV_DEVMEM_EXPORTCOOKIE *phExportCookie);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVImportDevMem
+@Description    Import memory that was previously exported with PVRSRVExport()
+                into the current process.
+
+                Note: This call only makes the memory accessible to this
+                process, it doesn't map it into the device or CPU.
+
+@Input          psConnection    Connection to services
+@Input          phExportCookie  Ptr to the handle of the export-cookie 
+                                identifying                          
+@Output         phMemDescOut    On Success, a handle to a new memory descriptor
+                                representing the memory as mapped into the
+                                local process address space.
+@Input          uiFlags         Device memory mapping flags                                
+@Input          pszText     	Text to describe the import
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVImportDevMem(const PVRSRV_DEV_CONNECTION *psConnection,
+                                PVRSRV_DEVMEM_EXPORTCOOKIE *phExportCookie,
+                                PVRSRV_MEMMAP_FLAGS_T uiFlags,
+                                PVRSRV_MEMDESC *phMemDescOut);
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+/**************************************************************************/ /*!
+@Function       PVRSRVIsDeviceMemAddrValid
+@Description    Checks if given device virtual memory address is valid
+                from the GPU's point of view.
+
+                This method is intended to be called by a process that imported
+                another process' memory context, hence the expected
+                PVRSRV_REMOTE_DEVMEMCTX parameter.
+
+                See PVRSRVAcquireRemoteDevMemContext for details about
+                importing memory contexts.
+
+@Input          hContext handle to memory context
+@Input          sDevVAddr device 40bit virtual memory address
+@Return         PVRSRV_OK if address is valid or
+                PVRSRV_ERROR_INVALID_GPU_ADDR when address is invalid
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVIsDeviceMemAddrValid(PVRSRV_REMOTE_DEVMEMCTX hContext,
+                                        IMG_DEV_VIRTADDR sDevVAddr);
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVDevmemPin
+@Description    This is the counterpart to PVRSRVDevmemUnpin. It is meant to be
+                called after unpinning an allocation.
+
+                It will make an unpinned allocation available again and
+                unregister it from the OS shrinker. In the case the shrinker
+                was invoked by the OS while the allocation was unpinned it will
+                allocate new physical pages.
+
+                If any GPU mapping existed before, the same virtual address
+                range will be valid again.
+
+@Input          hMemDesc        The MemDesc that is going to be pinned.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the pre-unpin content
+                                is still present and can be reused.
+
+                                PVRSRV_ERROR_PMR_NEW_MEMORY if the memory has
+                                been pinned successfully but the pre-unpin
+                                content was lost.
+
+                                PVRSRV_ERROR_INVALID_PARAMS if the MemDesc is
+                                invalid e.g. NULL.
+
+                                PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES if the
+                                memory of the allocation is lost and we failed
+                                to allocate new one.
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVDevmemPin(PVRSRV_MEMDESC hMemDesc);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVDevmemUnpin
+@Description    Unpins an allocation. Unpinning means that the
+                memory must not be accessed anymore by neither CPU nor GPU.
+                The physical memory pages will be registered with a shrinker
+                and the OS is able to reclaim them in OOM situations when the
+                shrinker is invoked.
+
+                The counterpart to this is PVRSRVDevmemPin() which
+                checks if the physical pages were reclaimed by the OS and then
+                either allocates new physical pages or just unregisters the
+                allocation from the shrinker. The device virtual address range
+                (if any existed) will be kept.
+
+                The GPU mapping will be kept but is going be invalidated.
+                It is allowed to free an unpinned allocation or remove the GPU
+                mapping.
+
+                RESTRICTIONS:
+                - Unpinning should only be done if the caller is sure that
+                the GPU finished all pending/running operations on the allocation.
+
+                - The caller must ensure that no other process than the calling
+                one itself has imported or mapped the allocation, otherwise the
+                unpinning will fail.
+
+                - All CPU mappings have to be removed beforehand by the caller.
+
+                - Any attempts to map the allocation while it is unpinned are
+                forbidden.
+
+                - When using PVRSRVAllocDeviceMem() the caller must allocate
+                whole pages from the chosen heap to avoid suballocations.
+
+@Input          hMemDesc       The MemDesc that is going to be unpinned.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success.
+
+                                PVRSRV_ERROR_INVALID_PARAMS if the passed
+                                allocation is not a multiple of the heap page
+                                size but was allocated with
+                                PVRSRVAllocDeviceMem(), or if its NULL.
+
+                                PVRSRV_ERROR_PMR_STILL_REFERENCED if the passed
+                                allocation is still referenced i.e. is still
+                                exported or mapped somewhere else.
+
+                                PVRSRV_ERROR_STILL_MAPPED will be thrown if the
+                                calling process still has CPU mappings set up
+                                or the GPU mapping was acquired more than once.
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVDevmemUnpin(PVRSRV_MEMDESC hMemDesc);
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVDevmemGetSize
+@Description    Returns the allocated size for this device-memory.
+
+@Input          hMemDesc handle to memory allocation
+@Output         puiSize return value for size
+@Return         PVRSRV_OK on success or
+                PVRSRV_ERROR_INVALID_PARAMS
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVDevmemGetSize(PVRSRV_MEMDESC hMemDesc, IMG_DEVMEM_SIZE_T* puiSize);
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVExportDevMemContext
+@Description    Makes the given memory context available to other processes that
+                can get a handle to it via PVRSRVAcquireRemoteDevmemContext.
+                This handle can be used for e.g. the breakpoint functions.
+
+                The context will be only available to other processes that are able
+                to pass in a memory descriptor that is shared between this and the
+                importing process. We use the memory descriptor to identify the
+                correct context and verify that the caller is allowed to request
+                the context.
+
+                The whole mechanism is intended to be used with the debugger that
+                for example can load USC breakpoint handlers into the shared allocation
+                and then use the acquired remote context (that is exported here)
+                to set/clear breakpoints in USC code.
+
+@Input          hLocalDevmemCtx    Context to export
+@Input          hSharedAllocation  A memory descriptor that points to a shared allocation
+                                   between the two processes. Must be in the given context.
+@Output         phExportCtx        A handle to the exported context that is needed for
+                                   the destruction with PVRSRVUnexportDevMemContext().
+@Return         PVRSRV_ERROR:      PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                   error code
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVExportDevMemContext(PVRSRV_DEVMEMCTX hLocalDevmemCtx,
+                          PVRSRV_MEMDESC hSharedAllocation,
+                          PVRSRV_EXPORT_DEVMEMCTX *phExportCtx);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVUnexportDevMemContext
+@Description    Removes the context from the list of sharable contexts that
+                that can be imported via PVRSRVReleaseRemoteDevmemContext.
+
+@Input          psExportCtx     An export context retrieved from
+                                PVRSRVExportDevmemContext.
+*/ /***************************************************************************/
+extern void
+PVRSRVUnexportDevMemContext(PVRSRV_EXPORT_DEVMEMCTX hExportCtx);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVAcquireRemoteDevMemContext
+@Description    Retrieves an exported context that has been made available with
+                PVRSRVExportDevmemContext in the remote process.
+
+                hSharedMemDesc must be a memory descriptor pointing to the same
+                physical resource as the one passed to PVRSRVExportDevmemContext
+                in the remote process.
+                The memory descriptor has to be retrieved from the remote process
+                via a secure buffer export/import mechanism like DMABuf.
+
+@Input          hDevmemCtx         Memory context of the calling process.
+@Input          hSharedAllocation  The memory descriptor used to export the context
+@Output         phRemoteCtx        Handle to the remote context.
+@Return         PVRSRV_ERROR:      PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                   error code
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVAcquireRemoteDevMemContext(PVRSRV_DEVMEMCTX hDevmemCtx,
+                                 PVRSRV_MEMDESC hSharedAllocation,
+                                 PVRSRV_REMOTE_DEVMEMCTX *phRemoteCtx);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVReleaseRemoteDevMemContext
+@Description    Releases the remote context and destroys it if this is the last
+                reference.
+
+@Input          hRemoteCtx      Handle to the remote context that will be removed.
+*/ /***************************************************************************/
+extern void
+PVRSRVReleaseRemoteDevMemContext(PVRSRV_REMOTE_DEVMEMCTX hRemoteCtx);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVRegisterDevmemPageFaultNotify
+@Description    Registers to be notified when a page fault occurs on a
+                specific device memory context.
+@Input          psDevmemCtx     The context to be notified about.
+@Return         PVRSRV_ERROR.
+*/ /**************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVRegisterDevmemPageFaultNotify(PVRSRV_DEVMEMCTX psDevmemCtx);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVUnregisterDevmemPageFaultNotify
+@Description    Unegisters to be notified when a page fault occurs on a
+                specific device memory context.
+@Input          psDevmemCtx     The context to be unregistered from.
+@Return         PVRSRV_ERROR.
+*/ /**************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVUnregisterDevmemPageFaultNotify(PVRSRV_DEVMEMCTX psDevmemCtx);
+
+#if defined __cplusplus
+};
+#endif
+#endif /* PVRSRV_DEVMEM_H */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_error.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_error.h
new file mode 100644
index 0000000..82ef82a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_error.h
@@ -0,0 +1,61 @@
+/*************************************************************************/ /*!
+@File           pvrsrv_error.h
+@Title          services error enumerant
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines error codes used by any/all services modules
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__PVRSRV_ERROR_H__)
+#define __PVRSRV_ERROR_H__
+
+/*!
+ *****************************************************************************
+ * Error values
+ *****************************************************************************/
+typedef enum PVRSRV_ERROR
+{
+	PVRSRV_OK,
+#define PVRE(x) x,
+#include "pvrsrv_errors.h"
+#undef PVRE
+	PVRSRV_ERROR_FORCE_I32 = 0x7fffffff
+
+} PVRSRV_ERROR;
+
+#endif /* !defined (__PVRSRV_ERROR_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_errors.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_errors.h
new file mode 100644
index 0000000..3537df3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_errors.h
@@ -0,0 +1,375 @@
+/*************************************************************************/ /*!
+@File           pvrsrv_errors.h
+@Title          services error codes
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines error codes used by any/all services modules
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* Don't add include guards to this file! */
+
+PVRE(PVRSRV_ERROR_OUT_OF_MEMORY)
+PVRE(PVRSRV_ERROR_TOO_FEW_BUFFERS)
+PVRE(PVRSRV_ERROR_INVALID_PARAMS)
+PVRE(PVRSRV_ERROR_INIT_FAILURE)
+PVRE(PVRSRV_ERROR_CANT_REGISTER_CALLBACK)
+PVRE(PVRSRV_ERROR_INVALID_DEVICE)
+PVRE(PVRSRV_ERROR_NOT_OWNER)
+PVRE(PVRSRV_ERROR_BAD_MAPPING)
+PVRE(PVRSRV_ERROR_TIMEOUT)
+PVRE(PVRSRV_ERROR_NOT_IMPLEMENTED)
+PVRE(PVRSRV_ERROR_FLIP_CHAIN_EXISTS)
+PVRE(PVRSRV_ERROR_INVALID_SWAPINTERVAL)
+PVRE(PVRSRV_ERROR_SCENE_INVALID)
+PVRE(PVRSRV_ERROR_STREAM_ERROR)
+PVRE(PVRSRV_ERROR_FAILED_DEPENDENCIES)
+PVRE(PVRSRV_ERROR_CMD_NOT_PROCESSED)
+PVRE(PVRSRV_ERROR_CMD_TOO_BIG)
+PVRE(PVRSRV_ERROR_DEVICE_REGISTER_FAILED)
+PVRE(PVRSRV_ERROR_TOOMANYBUFFERS)
+PVRE(PVRSRV_ERROR_NOT_SUPPORTED)
+PVRE(PVRSRV_ERROR_PROCESSING_BLOCKED)
+PVRE(PVRSRV_ERROR_CANNOT_FLUSH_QUEUE)
+PVRE(PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE)
+PVRE(PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS)
+PVRE(PVRSRV_ERROR_RETRY)
+PVRE(PVRSRV_ERROR_DDK_VERSION_MISMATCH)
+PVRE(PVRSRV_ERROR_DDK_BUILD_MISMATCH)
+PVRE(PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH)
+PVRE(PVRSRV_ERROR_BVNC_MISMATCH)
+PVRE(PVRSRV_ERROR_FWPROCESSOR_MISMATCH)
+PVRE(PVRSRV_ERROR_UPLOAD_TOO_BIG)
+PVRE(PVRSRV_ERROR_INVALID_FLAGS)
+PVRE(PVRSRV_ERROR_FAILED_TO_REGISTER_PROCESS)
+PVRE(PVRSRV_ERROR_UNABLE_TO_LOAD_LIBRARY)
+PVRE(PVRSRV_ERROR_UNABLE_GET_FUNC_ADDR)
+PVRE(PVRSRV_ERROR_UNLOAD_LIBRARY_FAILED)
+PVRE(PVRSRV_ERROR_BRIDGE_CALL_FAILED)
+PVRE(PVRSRV_ERROR_IOCTL_CALL_FAILED)
+PVRE(PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR)
+PVRE(PVRSRV_ERROR_MMU_CONFIG_IS_WRONG)
+PVRE(PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_CREATE_HEAP)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_MAP_PAGE_TABLE)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_UNMAP_PAGE_TABLE)
+PVRE(PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE)
+PVRE(PVRSRV_ERROR_MMU_LIVE_ALLOCATIONS_IN_HEAP)
+PVRE(PVRSRV_ERROR_MMU_RESERVATION_NOT_INSIDE_HEAP)
+PVRE(PVRSRV_ERROR_PMR_NEW_MEMORY)
+PVRE(PVRSRV_ERROR_PMR_STILL_REFERENCED)
+PVRE(PVRSRV_ERROR_PMR_CLIENT_NOT_TRUSTED)
+PVRE(PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES)
+PVRE(PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY)
+PVRE(PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES)
+PVRE(PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE)
+PVRE(PVRSRV_ERROR_PMR_NOT_PERMITTED)
+PVRE(PVRSRV_ERROR_PMR_ALREADY_OCCUPIED)
+PVRE(PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR)
+PVRE(PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR)
+PVRE(PVRSRV_ERROR_PMR_WRONG_PMR_TYPE)
+PVRE(PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS)
+PVRE(PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE)
+PVRE(PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE)
+PVRE(PVRSRV_ERROR_PMR_MAPPINGTABLE_MISMATCH)
+PVRE(PVRSRV_ERROR_PMR_INVALID_CHUNK)
+PVRE(PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING)
+PVRE(PVRSRV_ERROR_PMR_EMPTY)
+PVRE(PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND)
+PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_UNMAP_FAILED)
+PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED)
+PVRE(PVRSRV_ERROR_PMR_PAGE_POISONING_FAILED)
+PVRE(PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY)
+PVRE(PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP)
+PVRE(PVRSRV_ERROR_DEVICEMEM_BAD_IMPORT_SIZE)
+PVRE(PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX)
+PVRE(PVRSRV_ERROR_DEVICEMEM_MAP_FAILED)
+PVRE(PVRSRV_ERROR_DEVICEMEM_NON_ZERO_USAGE_COUNT)
+PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE)
+PVRE(PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA)
+PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM)
+PVRE(PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED)
+PVRE(PVRSRV_ERROR_DEVICEMEM_NO_MAPPING)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_LMA_HEAP)
+PVRE(PVRSRV_ERROR_INVALID_MMU_TYPE)
+PVRE(PVRSRV_ERROR_BUFFER_DEVICE_NOT_FOUND)
+PVRE(PVRSRV_ERROR_BUFFER_DEVICE_ALREADY_PRESENT)
+PVRE(PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND)
+PVRE(PVRSRV_ERROR_PCI_CALL_FAILED)
+PVRE(PVRSRV_ERROR_PCI_REGION_TOO_SMALL)
+PVRE(PVRSRV_ERROR_PCI_REGION_UNAVAILABLE)
+PVRE(PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH)
+PVRE(PVRSRV_ERROR_REGISTER_BASE_NOT_SET)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_USER_MEM)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VP_MEMORY)
+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_SHARED_PBDESC)
+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_KERNELVIRTUAL)
+PVRE(PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY)
+PVRE(PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES)
+PVRE(PVRSRV_ERROR_FAILED_TO_FREE_PAGES)
+PVRE(PVRSRV_ERROR_FAILED_TO_COPY_PAGES)
+PVRE(PVRSRV_ERROR_UNABLE_TO_LOCK_PAGES)
+PVRE(PVRSRV_ERROR_UNABLE_TO_UNLOCK_PAGES)
+PVRE(PVRSRV_ERROR_STILL_MAPPED)
+PVRE(PVRSRV_ERROR_MAPPING_NOT_FOUND)
+PVRE(PVRSRV_ERROR_PHYS_ADDRESS_EXCEEDS_32BIT)
+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE)
+PVRE(PVRSRV_ERROR_INVALID_SEGMENT_BLOCK)
+PVRE(PVRSRV_ERROR_INVALID_GFXDEVDEVDATA)
+PVRE(PVRSRV_ERROR_INVALID_DEVINFO)
+PVRE(PVRSRV_ERROR_INVALID_MEMINFO)
+PVRE(PVRSRV_ERROR_INVALID_MISCINFO)
+PVRE(PVRSRV_ERROR_UNKNOWN_IOCTL)
+PVRE(PVRSRV_ERROR_INVALID_CONTEXT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT)
+PVRE(PVRSRV_ERROR_INVALID_HEAP)
+PVRE(PVRSRV_ERROR_INVALID_KERNELINFO)
+PVRE(PVRSRV_ERROR_UNKNOWN_POWER_STATE)
+PVRE(PVRSRV_ERROR_INVALID_HANDLE_TYPE)
+PVRE(PVRSRV_ERROR_INVALID_WRAP_TYPE)
+PVRE(PVRSRV_ERROR_INVALID_PHYS_ADDR)
+PVRE(PVRSRV_ERROR_INVALID_CPU_ADDR)
+PVRE(PVRSRV_ERROR_INVALID_HEAPINFO)
+PVRE(PVRSRV_ERROR_INVALID_PERPROC)
+PVRE(PVRSRV_ERROR_FAILED_TO_RETRIEVE_HEAPINFO)
+PVRE(PVRSRV_ERROR_INVALID_MAP_REQUEST)
+PVRE(PVRSRV_ERROR_INVALID_UNMAP_REQUEST)
+PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP)
+PVRE(PVRSRV_ERROR_MAPPING_STILL_IN_USE)
+PVRE(PVRSRV_ERROR_EXCEEDED_HW_LIMITS)
+PVRE(PVRSRV_ERROR_NO_STAGING_BUFFER_ALLOCATED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_PERPROC_AREA)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_THREAD)
+PVRE(PVRSRV_ERROR_THREAD_READ_ERROR)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR)
+PVRE(PVRSRV_ERROR_UNABLE_TO_UNINSTALL_ISR)
+PVRE(PVRSRV_ERROR_ISR_ALREADY_INSTALLED)
+PVRE(PVRSRV_ERROR_ISR_NOT_INSTALLED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INITIALISE_INTERRUPT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_INFO)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DO_BACKWARDS_BLIT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_SERVICES)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE)
+PVRE(PVRSRV_ERROR_INVALID_CCB_COMMAND)
+PVRE(PVRSRV_ERROR_KERNEL_CCB_FULL)
+PVRE(PVRSRV_ERROR_FLIP_FAILED)
+PVRE(PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED)
+PVRE(PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE)
+PVRE(PVRSRV_ERROR_TIMEOUT_WAITING_FOR_CLIENT_CCB)
+PVRE(PVRSRV_ERROR_CREATE_RENDER_CONTEXT_FAILED)
+PVRE(PVRSRV_ERROR_UNKNOWN_PRIMARY_FRAG)
+PVRE(PVRSRV_ERROR_UNEXPECTED_SECONDARY_FRAG)
+PVRE(PVRSRV_ERROR_UNEXPECTED_PRIMARY_FRAG)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_FENCE_ID)
+PVRE(PVRSRV_ERROR_BLIT_SETUP_FAILED)
+PVRE(PVRSRV_ERROR_SUBMIT_NEEDED)
+PVRE(PVRSRV_ERROR_PDUMP_NOT_AVAILABLE)
+PVRE(PVRSRV_ERROR_PDUMP_BUFFER_FULL)
+PVRE(PVRSRV_ERROR_PDUMP_BUF_OVERFLOW)
+PVRE(PVRSRV_ERROR_PDUMP_NOT_ACTIVE)
+PVRE(PVRSRV_ERROR_INCOMPLETE_LINE_OVERLAPS_PAGES)
+PVRE(PVRSRV_ERROR_MUTEX_DESTROY_FAILED)
+PVRE(PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR)
+PVRE(PVRSRV_ERROR_INSUFFICIENT_SCRIPT_SPACE)
+PVRE(PVRSRV_ERROR_INSUFFICIENT_SPACE_FOR_COMMAND)
+PVRE(PVRSRV_ERROR_PROCESS_NOT_INITIALISED)
+PVRE(PVRSRV_ERROR_PROCESS_NOT_FOUND)
+PVRE(PVRSRV_ERROR_SRV_CONNECT_FAILED)
+PVRE(PVRSRV_ERROR_SRV_DISCONNECT_FAILED)
+PVRE(PVRSRV_ERROR_DEINT_PHASE_FAILED)
+PVRE(PVRSRV_ERROR_INIT2_PHASE_FAILED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE)
+PVRE(PVRSRV_ERROR_NO_DC_DEVICES_FOUND)
+PVRE(PVRSRV_ERROR_DC_DEVICE_INACCESSIBLE)
+PVRE(PVRSRV_ERROR_DC_INVALID_MAXDEPTH)
+PVRE(PVRSRV_ERROR_UNABLE_TO_OPEN_DC_DEVICE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_UNREGISTER_DEVICE)
+PVRE(PVRSRV_ERROR_NO_DEVICEDATA_FOUND)
+PVRE(PVRSRV_ERROR_NO_DEVICENODE_FOUND)
+PVRE(PVRSRV_ERROR_NO_CLIENTNODE_FOUND)
+PVRE(PVRSRV_ERROR_FAILED_TO_PROCESS_QUEUE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INIT_TASK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SCHEDULE_TASK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_KILL_TASK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_TIMER)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DISABLE_TIMER)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_TIMER)
+PVRE(PVRSRV_ERROR_UNKNOWN_PIXEL_FORMAT)
+PVRE(PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION)
+PVRE(PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE)
+PVRE(PVRSRV_ERROR_HANDLE_NOT_ALLOCATED)
+PVRE(PVRSRV_ERROR_HANDLE_TYPE_MISMATCH)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE)
+PVRE(PVRSRV_ERROR_HANDLE_NOT_SHAREABLE)
+PVRE(PVRSRV_ERROR_HANDLE_NOT_FOUND)
+PVRE(PVRSRV_ERROR_INVALID_SUBHANDLE)
+PVRE(PVRSRV_ERROR_HANDLE_BATCH_IN_USE)
+PVRE(PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_HASH_VALUE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_HASH_VALUE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_HASH_VALUE)
+PVRE(PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP)
+PVRE(PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE)
+PVRE(PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVAILABLE)
+PVRE(PVRSRV_ERROR_INVALID_DEVICEID)
+PVRE(PVRSRV_ERROR_DEVICEID_NOT_FOUND)
+PVRE(PVRSRV_ERROR_MEMORY_TEST_FAILED)
+PVRE(PVRSRV_ERROR_CPUPADDR_TEST_FAILED)
+PVRE(PVRSRV_ERROR_COPY_TEST_FAILED)
+PVRE(PVRSRV_ERROR_SEMAPHORE_NOT_INITIALISED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CLOCK)
+PVRE(PVRSRV_ERROR_CLOCK_REQUEST_FAILED)
+PVRE(PVRSRV_ERROR_DISABLE_CLOCK_FAILURE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_CLOCK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_SYSTEM_CLOCK)
+PVRE(PVRSRV_ERROR_UNKNOWN_SGL_ERROR)
+PVRE(PVRSRV_ERROR_SYSTEM_POWER_CHANGE_FAILURE)
+PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE)
+PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)
+PVRE(PVRSRV_ERROR_BAD_SYNC_STATE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_MMUCONTEXT_ID)
+PVRE(PVRSRV_ERROR_PARAMETER_BUFFER_INVALID_ALIGNMENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ACQUIRE_CONNECTION)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CONNECTION)
+PVRE(PVRSRV_ERROR_PHYSHEAP_ID_IN_USE)
+PVRE(PVRSRV_ERROR_PHYSHEAP_ID_INVALID)
+PVRE(PVRSRV_ERROR_HP_REQUEST_TOO_LONG)
+PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM)
+PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM_OP)
+PVRE(PVRSRV_ERROR_INVALID_SYNC_CONTEXT)
+PVRE(PVRSRV_ERROR_BP_NOT_SET)
+PVRE(PVRSRV_ERROR_BP_ALREADY_SET)
+PVRE(PVRSRV_ERROR_FEATURE_DISABLED)
+PVRE(PVRSRV_ERROR_REG_CONFIG_ENABLED)
+PVRE(PVRSRV_ERROR_REG_CONFIG_FULL)
+PVRE(PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE)
+PVRE(PVRSRV_ERROR_MEMORY_ACCESS)
+PVRE(PVRSRV_ERROR_NO_SYSTEM_BUFFER)
+PVRE(PVRSRV_ERROR_DC_INVALID_CONFIG)
+PVRE(PVRSRV_ERROR_DC_INVALID_CROP_RECT)
+PVRE(PVRSRV_ERROR_DC_INVALID_DISPLAY_RECT)
+PVRE(PVRSRV_ERROR_DC_INVALID_BUFFER_DIMS)
+PVRE(PVRSRV_ERROR_DC_INVALID_TRANSFORM)
+PVRE(PVRSRV_ERROR_DC_INVALID_SCALE)
+PVRE(PVRSRV_ERROR_DC_INVALID_CUSTOM)
+PVRE(PVRSRV_ERROR_DC_TOO_MANY_PIPES)
+PVRE(PVRSRV_ERROR_DC_INVALID_PLANE_ALPHA)
+PVRE(PVRSRV_ERROR_NOT_READY)
+PVRE(PVRSRV_ERROR_RESOURCE_UNAVAILABLE)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_PIXEL_FORMAT)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_DIMS)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_TIMER)
+PVRE(PVRSRV_ERROR_NOT_FOUND)
+PVRE(PVRSRV_ERROR_ALREADY_OPEN)
+PVRE(PVRSRV_ERROR_STREAM_MISUSE)
+PVRE(PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG)
+PVRE(PVRSRV_ERROR_PHYSMEM_NOT_ALLOCATED)
+PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MAX)
+PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MIN)
+PVRE(PVRSRV_ERROR_INVALID_PB_CONFIG)
+PVRE(PVRSRV_ERROR_META_THREAD0_NOT_ENABLED)
+PVRE(PVRSRV_ERROR_NOT_AUTHENTICATED)
+PVRE(PVRSRV_ERROR_REQUEST_TDFWCODE_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_INIT_TDFWCODE_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_INIT_TDSECUREBUF_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_MUTEX_ALREADY_CREATED)
+PVRE(PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED)
+PVRE(PVRSRV_ERROR_ALREADY_EXISTS)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SEND_PULSE)
+PVRE(PVRSRV_ERROR_TASK_FAILED)
+PVRE(PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+PVRE(PVRSRV_ERROR_INVALID_GPU_ADDR)
+PVRE(PVRSRV_ERROR_INVALID_OFFSET)
+PVRE(PVRSRV_ERROR_CCCB_STALLED)
+PVRE(PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE)
+PVRE(PVRSRV_ERROR_NOT_ENABLED)
+PVRE(PVRSRV_ERROR_SYSTEM_LOCAL_MEMORY_INIT_FAIL)
+PVRE(PVRSRV_ERROR_FW_IMAGE_MISMATCH)
+PVRE(PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+PVRE(PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL)
+PVRE(PVRSRV_ERROR_RPM_PBSIZE_ALREADY_MAX)
+PVRE(PVRSRV_ERROR_NONZERO_REFCOUNT)
+PVRE(PVRSRV_ERROR_SETAFFINITY_FAILED)
+PVRE(PVRSRV_ERROR_INTERNAL_ERROR)
+PVRE(PVRSRV_ERROR_BRIDGE_EFAULT)
+PVRE(PVRSRV_ERROR_BRIDGE_EINVAL)
+PVRE(PVRSRV_ERROR_BRIDGE_ENOMEM)
+PVRE(PVRSRV_ERROR_BRIDGE_ERANGE)
+PVRE(PVRSRV_ERROR_BRIDGE_EPERM)
+PVRE(PVRSRV_ERROR_BRIDGE_ENOTTY)
+PVRE(PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)
+PVRE(PVRSRV_ERROR_PROBE_DEFER)
+PVRE(PVRSRV_ERROR_INVALID_ALIGNMENT)
+PVRE(PVRSRV_ERROR_CLOSE_FAILED)
+PVRE(PVRSRV_ERROR_NOT_INITIALISED)
+PVRE(PVRSRV_ERROR_CONVERSION_FAILED)
+PVRE(PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL)
+PVRE(PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED)
+PVRE(PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED)
+PVRE(PVRSRV_ERROR_OBJECT_STILL_REFERENCED)
+PVRE(PVRSRV_ERROR_BVNC_UNSUPPORTED)
+PVRE(PVRSRV_ERROR_INVALID_BVNC_PARAMS)
+PVRE(PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE)
+PVRE(PVRSRV_ERROR_DEVICEMEM_ADDITIONAL_HEAPS_IN_CONTEXT)
+PVRE(PVRSRV_ERROR_PID_ALREADY_REGISTERED)
+PVRE(PVRSRV_ERROR_PID_NOT_REGISTERED)
+PVRE(PVRSRV_ERROR_SIGNAL_FAILED)
+PVRE(PVRSRV_ERROR_INVALID_NOTIF_STREAM)
+PVRE(PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED)
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_memallocflags.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_memallocflags.h
new file mode 100644
index 0000000..2bc7a88
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_memallocflags.h
@@ -0,0 +1,606 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This file defines flags used on memory allocations and mappings
+                These flags are relevant throughout the memory management
+                software stack and are specified by users of services and
+                understood by all levels of the memory management in both
+                client and server.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVRSRV_MEMALLOCFLAGS_H
+#define PVRSRV_MEMALLOCFLAGS_H
+
+#include "img_types.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_memallocflags.h"
+#endif
+typedef IMG_UINT32 PVRSRV_MEMALLOCFLAGS_T;
+
+/*
+ * --- MAPPING FLAGS ---
+ * | 0-3    | 4-7    | 8-10        | 11-13       | 14          |
+ * | GPU-RW | CPU-RW | GPU-Caching | CPU-Caching | KM-Mappable |
+ *
+ * --- MISC FLAGS ---
+ * | 15    | 16        | 17       | 18  | 19                | 20              |
+ * | Defer | CPU-Local | FW-Local | SVM | Sparse-Dummy-Page | CPU-Cache-Clean |
+ *
+ * --- DEV CONTROL FLAGS ---
+ * | 24-27        |
+ * | Device-Flags |
+ *
+ * --- MEMSET FLAGS ---
+ * | 29             | 30          | 31            |
+ * | Poison-On-Free | P.-On-Alloc | Zero-On-Alloc |
+ *
+ */
+
+/*!
+ *  **********************************************************
+ *  *                                                        *
+ *  *                       MAPPING FLAGS                    *
+ *  *                                                        *
+ *  **********************************************************
+ */
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_READABLE
+ *
+ * This flag affects the device MMU protection flags, and specifies
+ * that the memory may be read by the GPU (is this always true?)
+ *
+ * Typically all device memory allocations would specify this flag.
+ *
+ * At the moment, memory allocations without this flag are not supported
+ *
+ * This flag will live with the PMR, thus subsequent mappings would
+ * honour this flag.
+ *
+ * This is a dual purpose flag.  It specifies that memory is permitted
+ * to be read by the GPU, and also requests that the allocation is
+ * mapped into the GPU as a readable mapping
+ *
+ * To be clear:
+ * - When used as an argument on PMR creation; it specifies
+ *       that GPU readable mappings will be _permitted_
+ * - When used as an argument to a "map" function: it specifies
+ *       that a GPU readable mapping is _desired_
+ * - When used as an argument to "AllocDeviceMem": it specifies
+ *       that the PMR will be created with permission to be mapped
+ *       with a GPU readable mapping, _and_ that this PMR will be
+ *       mapped with a GPU readble mapping.
+ * This distinction becomes important when (a) we export allocations;
+ * and (b) when we separate the creation of the PMR from the mapping.
+ */
+#define PVRSRV_MEMALLOCFLAG_GPU_READABLE 		(1U<<0)
+#define PVRSRV_CHECK_GPU_READABLE(uiFlags) 		(((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READABLE) != 0)
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE
+ *
+ * This flag affects the device MMU protection flags, and specifies
+ * that the memory may be written by the GPU
+ *
+ * Using this flag on an allocation signifies that the allocation is
+ * intended to be written by the GPU.
+ *
+ * Omitting this flag causes a read-only mapping.
+ *
+ * This flag will live with the PMR, thus subsequent mappings would
+ * honour this flag.
+ *
+ * This is a dual purpose flag.  It specifies that memory is permitted
+ * to be written by the GPU, and also requests that the allocation is
+ * mapped into the GPU as a writable mapping (see note above about
+ * permission vs. mapping mode, and why this flag causes permissions
+ * to be inferred from mapping mode on first allocation)
+ *
+ * N.B.  This flag has no relevance to the CPU's MMU mapping, if any,
+ * and would therefore not enforce read-only mapping on CPU.
+ */
+#define PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE       (1U<<1) /*!< mapped as writable to the GPU */
+#define PVRSRV_CHECK_GPU_WRITEABLE(uiFlags)				(((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED  (1U<<2) /*!< can be mapped is GPU readable in another GPU mem context */
+#define PVRSRV_CHECK_GPU_READ_PERMITTED(uiFlags) 		(((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED (1U<<3) /*!< can be mapped is GPU writable in another GPU mem context */
+#define PVRSRV_CHECK_GPU_WRITE_PERMITTED(uiFlags)		(((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_CPU_READABLE        (1U<<4) /*!< mapped as readable to the CPU */
+#define PVRSRV_CHECK_CPU_READABLE(uiFlags) 				(((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READABLE) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE       (1U<<5) /*!< mapped as writable to the CPU */
+#define PVRSRV_CHECK_CPU_WRITEABLE(uiFlags)				(((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED  (1U<<6) /*!< can be mapped is CPU readable in another CPU mem context */
+#define PVRSRV_CHECK_CPU_READ_PERMITTED(uiFlags)		(((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED (1U<<7) /*!< can be mapped is CPU writable in another CPU mem context */
+#define PVRSRV_CHECK_CPU_WRITE_PERMITTED(uiFlags)		(((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED) != 0)
+
+
+/*
+ *  **********************************************************
+ *  *                                                        *
+ *  *                    CACHE CONTROL FLAGS                 *
+ *  *                                                        *
+ *  **********************************************************
+ */
+
+/*
+	GPU domain
+	==========
+
+	The following defines are used to control the GPU cache bit field.
+	The defines are mutually exclusive.
+
+	A helper macro, PVRSRV_GPU_CACHE_MODE, is provided to obtain just the GPU cache
+	bit field from the flags. This should be used whenever the GPU cache mode
+	needs to be determined.
+*/
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_UNCACHED
+
+   GPU domain. Request uncached memory. This means that any writes to memory
+  allocated with this flag are written straight to memory and thus are coherent
+  for any device in the system.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED         		(0U<<8)
+#define PVRSRV_CHECK_GPU_UNCACHED(uiFlags)		 		(PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_UNCACHED)
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE
+
+   GPU domain. Use write combiner (if supported) to combine sequential writes
+   together to reduce memory access by doing burst writes.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE    		(1U<<8)
+#define PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags)	 		(PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE)
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT
+
+    GPU domain. This flag affects the GPU MMU protection flags.
+    The allocation will be cached.
+    Services will try to set the coherent bit in the GPU MMU tables so the
+    GPU cache is snooping the CPU cache. If coherency is not supported the
+    caller is responsible to ensure the caches are up to date.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT   		(2U<<8)
+#define PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) 		(PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT)
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT
+
+   GPU domain. Request cached memory, but not coherent (i.e. no cache snooping).
+   Services will flush the GPU internal caches after every GPU task so no
+   cache maintenance requests from the users are necessary.
+
+    Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future
+    expansion.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT 		(3U<<8)
+#define PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags)		(PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT)
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_CACHED
+
+    GPU domain. This flag is for internal use only and is used to indicate
+    that the underlying allocation should be cached on the GPU
+    after all the snooping and coherent checks have been done
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHED					(7U<<8)
+#define PVRSRV_CHECK_GPU_CACHED(uiFlags)				(PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHED)
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK
+
+    GPU domain. GPU cache mode mask
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK  		(7U<<8)
+#define PVRSRV_GPU_CACHE_MODE(uiFlags)					((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK)
+
+
+/*
+	CPU domain
+	==========
+
+	The following defines are used to control the CPU cache bit field.
+	The defines are mutually exclusive.
+
+	A helper macro, PVRSRV_CPU_CACHE_MODE, is provided to obtain just the CPU cache
+	bit field from the flags. This should be used whenever the CPU cache mode
+	needs to be determined.
+*/
+
+/*! PVRSRV_MEMALLOCFLAG_CPU_UNCACHED
+
+   CPU domain. Request uncached memory. This means that any writes to memory
+    allocated with this flag are written straight to memory and thus are coherent
+    for any device in the system.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_UNCACHED         		(0U<<11)
+#define PVRSRV_CHECK_CPU_UNCACHED(uiFlags)				(PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_UNCACHED)
+
+/*! PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE
+
+   CPU domain. Use write combiner (if supported) to combine sequential writes
+   together to reduce memory access by doing burst writes.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE 		   	(1U<<11)
+#define PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags)			(PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE)
+
+/*! PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT
+
+    CPU domain. This flag affects the CPU MMU protection flags.
+    The allocation will be cached.
+    Services will try to set the coherent bit in the CPU MMU tables so the
+    CPU cache is snooping the GPU cache. If coherency is not supported the
+    caller is responsible to ensure the caches are up to date.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT   		(2U<<11)
+#define PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags)		(PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT)
+
+/*! PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT
+
+    CPU domain. Request cached memory, but not coherent (i.e. no cache snooping).
+    This means that if the allocation needs to transition from one device
+    to another services has to be informed so it can flush/invalidate the
+    appropriate caches.
+
+    Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future
+    expansion.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT 		(3U<<11)
+#define PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags)		(PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT)
+
+/*! PVRSRV_MEMALLOCFLAG_CPU_CACHED
+
+    CPU domain. This flag is for internal use only and is used to indicate
+    that the underlying allocation should be cached on the CPU
+    after all the snooping and coherent checks have been done
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHED					(7U<<11)
+#define PVRSRV_CHECK_CPU_CACHED(uiFlags)				(PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHED)
+
+/*!
+	CPU domain. CPU cache mode mask
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK  		(7U<<11)
+#define PVRSRV_CPU_CACHE_MODE(uiFlags)					((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)
+
+/* Helper flags for usual cases */
+
+/*! PVRSRV_MEMALLOCFLAG_UNCACHED
+ * Memory will be uncached on CPU and GPU
+ */
+#define PVRSRV_MEMALLOCFLAG_UNCACHED             		(PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED)
+#define PVRSRV_CHECK_UNCACHED(uiFlags)					(PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_UNCACHED)
+
+/*! PVRSRV_MEMALLOCFLAG_WRITE_COMBINE
+ * Memory will be write-combined on CPU and GPU
+ */
+#define PVRSRV_MEMALLOCFLAG_WRITE_COMBINE        		(PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE | PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE)
+#define PVRSRV_CHECK_WRITE_COMBINE(uiFlags)				(PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_WRITE_COMBINE)
+
+/*! PVRSRV_MEMALLOCFLAG_CACHE_COHERENT
+ * Memory will be cached on CPU and GPU
+ * Services will try to set the correct flags in the MMU tables.
+ * In case there is no coherency support the caller has to ensure caches are up to date
+ */
+#define PVRSRV_MEMALLOCFLAG_CACHE_COHERENT       		(PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT)
+#define PVRSRV_CHECK_CACHE_COHERENT(uiFlags)			(PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CACHE_COHERENT)
+
+/*! PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT
+ * Memory will be cache-incoherent on CPU and GPU
+ */
+#define PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT     		(PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT)
+#define PVRSRV_CHECK_CACHE_INCOHERENT(uiFlags)			(PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT)
+
+/*!
+	Cache mode mask
+*/
+#define PVRSRV_CACHE_MODE(uiFlags)						(PVRSRV_GPU_CACHE_MODE(uiFlags) | PVRSRV_CPU_CACHE_MODE(uiFlags))
+
+
+/*!
+   CPU MMU Flags mask -- intended for use internal to services only
+ */
+#define PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK  (PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+												PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+												PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)
+
+/*!
+   MMU Flags mask -- intended for use internal to services only - used
+   for partitioning the flags bits and determining which flags to pass
+   down to mmu_common.c
+ */
+#define PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK  (PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+                                                PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+                                                PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK)
+
+/*!
+    PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE
+
+    Indicates that the PMR created due to this allocation will support
+    in-kernel CPU mappings.  Only privileged processes may use this
+    flag as it may cause wastage of precious kernel virtual memory on
+    some platforms.
+ */
+#define PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE 		(1U<<14)
+#define PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags)		(((uiFlags) & PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) != 0)
+
+
+
+/*
+ *
+ *  **********************************************************
+ *  *                                                        *
+ *  *            ALLOC MEMORY FLAGS                          *
+ *  *                                                        *
+ *  **********************************************************
+ *
+ * (Bits 15)
+ *
+ */
+#define PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC			(1U<<15)
+#define PVRSRV_CHECK_ON_DEMAND(uiFlags)					(((uiFlags) & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) != 0)
+
+/*!
+    PVRSRV_MEMALLOCFLAG_CPU_LOCAL
+
+    Indicates that the allocation will primarily be accessed by
+    the CPU, so a UMA allocation (if available) is preferable.
+    If not set, the allocation will primarily be accessed by
+    the GPU, so LMA allocation (if available) is preferable.
+ */
+#define PVRSRV_MEMALLOCFLAG_CPU_LOCAL 					(1U<<16)
+#define PVRSRV_CHECK_CPU_LOCAL(uiFlags)					(((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_LOCAL) != 0)
+
+
+/*!
+    PVRSRV_MEMALLOCFLAG_FW_LOCAL
+
+    Indicates that the allocation will primarily be accessed by
+    the FW.
+ */
+#define PVRSRV_MEMALLOCFLAG_FW_LOCAL 					(1U<<17)
+#define PVRSRV_CHECK_FW_LOCAL(uiFlags)					(((uiFlags) & PVRSRV_MEMALLOCFLAG_FW_LOCAL) != 0)
+
+/*! PVRSRV_MEMALLOCFLAG_SVM
+
+    Indicates that the allocation will be accessed by the
+    CPU and GPU using the same virtual address, i.e. for
+	all SVM allocs, IMG_CPU_VIRTADDR == IMG_DEV_VIRTADDR
+ */
+#define PVRSRV_MEMALLOCFLAG_SVM_ALLOC 					(1U<<18)
+#define PVRSRV_CHECK_SVM_ALLOC(uiFlags)					(((uiFlags) & PVRSRV_MEMALLOCFLAG_SVM_ALLOC) != 0)
+
+/*! PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING
+
+    Indicates the particular memory that's being allocated is sparse
+    and the sparse regions should not be backed by dummy page */
+#define PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING		(1U << 19)
+#define PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiFlags)		(((uiFlags) & PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) == 0)
+
+/*! PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN
+
+    Services is going to clean the cache for the allocated memory.
+    For performance reasons avoid usage if allocation is written to by the CPU anyway
+    before the next GPU kick.
+ */
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN				(1U<<20)
+#define PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags)			(((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN) != 0)
+
+
+/*
+ *
+ *  **********************************************************
+ *  *                                                        *
+ *  *            MEMORY ZEROING AND POISONING FLAGS          *
+ *  *                                                        *
+ *  **********************************************************
+ *
+ * Zero / Poison, on alloc/free
+ *
+ * We think the following usecases are required:
+ *
+ *  don't poison or zero on alloc or free
+ *     (normal operation, also most efficient)
+ *  poison on alloc
+ *     (for helping to highlight bugs)
+ *  poison on alloc and free
+ *     (for helping to highlight bugs)
+ *  zero on alloc
+ *     (avoid highlighting security issues in other uses of memory)
+ *  zero on alloc and poison on free
+ *     (avoid highlighting security issues in other uses of memory,
+ *      while helping to highlight a subset of bugs e.g. memory
+ *      freed prematurely)
+ *
+ * Since there are more than 4, we can't encode this in just two bits,
+ * so we might as well have a separate flag for each of the three
+ * actions.
+ */
+
+/*! PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC
+
+    Ensures that the memory allocated is initialised with zeroes.
+ */
+#define PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC 				(1U<<31)
+#define PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags)				(((uiFlags) & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) != 0)
+#define PVRSRV_GET_ZERO_ON_ALLOC_FLAG(uiFlags)			((uiFlags) & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+
+/*! PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC
+
+    Scribbles over the allocated memory with a poison value
+
+    Not compatible with ZERO_ON_ALLOC
+
+    Poisoning is very deliberately _not_ reflected in PDump as we want
+    a simulation to cry loudly if the initialised data propagates to a
+    result.
+ */
+#define PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC 			(1U<<30)
+#define PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) 			(((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC) != 0)
+
+/*! PVRSRV_MEMALLOCFLAG_POISON_ON_FREE
+
+    Causes memory to be trashed when freed, as a lazy man's security
+    measure.
+ */
+#define PVRSRV_MEMALLOCFLAG_POISON_ON_FREE (1U<<29)
+#define PVRSRV_CHECK_POISON_ON_FREE(uiFlags)			(((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_FREE) != 0)
+
+/*
+ *
+ *  **********************************************************
+ *  *                                                        *
+ *  *                Device specific MMU flags               *
+ *  *                                                        *
+ *  **********************************************************
+ *
+ * (Bits 24 to 27)
+ *
+ * Some services controlled devices have device specific control
+ * bits in their page table entries, we need to allow these flags
+ * to be passed down the memory management layers so the user
+ * can control these bits.
+ */
+
+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET		24
+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK		0x0f000000UL
+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(n)	\
+			(((n) << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) & \
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK)
+
+
+/*!
+ * Secure buffer mask -- Flags in the mask are allowed for secure buffers
+ * because they are not related to CPU mappings.
+ */
+#define PVRSRV_MEMALLOCFLAGS_SECBUFMASK  ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+                                           PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \
+                                           PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED)
+
+
+
+/*!
+  PMR flags mask -- for internal services use only.  This is the set
+  of flags that will be passed down and stored with the PMR, this also
+  includes the MMU flags which the PMR has to pass down to mm_common.c
+  at PMRMap time.
+*/
+#define PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK  (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \
+                                            PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \
+                                            PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                            PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                            PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \
+                                            PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
+                                            PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \
+                                            PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
+                                            PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+                                            PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \
+                                            PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \
+                                            PVRSRV_MEMALLOCFLAG_FW_LOCAL | \
+                                            PVRSRV_MEMALLOCFLAG_CPU_LOCAL)
+
+/*!
+  RA differentiation mask
+
+  for use internal to services
+
+  this is the set of flags bits that are able to determine whether a
+  pair of allocations are permitted to live in the same page table.
+  Allocations whose flags differ in any of these places would be
+  allocated from separate RA Imports and therefore would never coexist
+  in the same page.
+  Special cases are zeroing and poisoning of memory. The caller is responsible
+  to set the sub-allocations to the value he wants it to be. To differentiate
+  between zeroed and poisoned RA Imports does not make sense because the
+  memory might be reused.
+
+*/
+#define PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK (PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK \
+                                                      & \
+                                                      ~(PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC   | \
+                                                        PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC))
+
+/*!
+  Flags that affect _allocation_
+*/
+#define PVRSRV_MEMALLOCFLAGS_PERALLOCFLAGSMASK (0xFFFFFFFFU)
+
+/*!
+  Flags that affect _mapping_
+*/
+#define PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK   (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \
+                                                    PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
+                                                    PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+                                                    PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \
+                                                    PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \
+                                                    PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING)
+
+#if ((~(PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK) & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK) != 0)
+#error PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK is not a subset of PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK
+#endif
+
+
+/*!
+  Flags that affect _physical allocations_ in the DevMemX API
+ */
+#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_PHYSICAL_MASK (PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+                                                    PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \
+                                                    PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED | \
+                                                    PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \
+                                                    PVRSRV_MEMALLOCFLAG_CPU_LOCAL | \
+                                                    PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                                    PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
+                                                    PVRSRV_MEMALLOCFLAG_POISON_ON_FREE)
+
+/*!
+  Flags that affect _virtual allocations_ in the DevMemX API
+ */
+#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_VIRTUAL_MASK  (PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
+                                                    PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED | \
+                                                    PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED)
+
+#endif /* #ifndef PVRSRV_MEMALLOCFLAGS_H */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_sync_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_sync_km.h
new file mode 100644
index 0000000..b0133fe
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_sync_km.h
@@ -0,0 +1,105 @@
+/*************************************************************************/ /*!
+@File
+@Title         PVR synchronization interface
+@Copyright     Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description   Types for server side code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef PVRSRV_SYNC_KM_H
+#define PVRSRV_SYNC_KM_H
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*! Implementation independent types for passing fence/timeline to Services.
+ */
+typedef int32_t PVRSRV_FENCE;
+typedef int32_t PVRSRV_TIMELINE;
+
+/*! Maximum length for an annotation name string for fence sync model objects.
+ */
+#define PVRSRV_SYNC_NAME_LENGTH 32
+
+/*! Possible states for a PVRSRV_FENCE */
+typedef enum
+{
+    PVRSRV_FENCE_NOT_SIGNALLED,             /*!< fence has not yet signalled (not all components have signalled) */
+    PVRSRV_FENCE_SIGNALLED                  /*!< fence has signalled (all components have signalled/errored) */
+} PVRSRV_FENCE_STATE;
+
+/* Typedefs for opaque pointers to implementation-specific structures
+ */
+typedef void *SYNC_TIMELINE_OBJ;
+typedef void *SYNC_FENCE_OBJ;
+
+/* Macros for Kick API callers using the fence sync model
+ */
+#define PVRSRV_NO_CHECK_FENCE_REQUIRED      -1   /*!< used when submitted work is not fenced (eg first kick) */
+#define PVRSRV_NO_UPDATE_TIMELINE_REQUIRED  -1   /*!< used when caller does not want an update fence generated */
+#define PVRSRV_NO_UPDATE_FENCE_REQUIRED     NULL /*!< used when caller does not want an update fence generated */
+
+/* Macros for Kick API callers NOT using the fence sync model
+ */
+#define PVRSRV_FENCE_INTERFACE_UNUSED     -1     /*!< passed in Timeline and Fence values when interface is unused */
+#define PVRSRV_FENCE_INTERFACE_PTR_UNUSED NULL   /*!< passed in update fence parameter when interface is unused */
+
+
+/* PVRSRV Layer internal only
+ * Negative references to timelines and fences are invalid.
+ */
+#define PVRSRV_TIMELINE_INVALID     -1
+#define PVRSRV_FENCE_INVALID        -1
+
+#if PVRSRV_NO_UPDATE_TIMELINE_REQUIRED != PVRSRV_TIMELINE_INVALID
+#error "PVRSRV_NO_UPDATE_TIMELINE_REQUIRED must equal PVRSRV_TIMELINE_INVALID"
+#endif
+
+#if PVRSRV_NO_CHECK_FENCE_REQUIRED != PVRSRV_FENCE_INVALID
+#error "PVRSRV_NO_CHECK_FENCE_REQUIRED must equal PVRSRV_FENCE_INVALID"
+#endif
+
+#if PVRSRV_FENCE_INTERFACE_UNUSED != PVRSRV_FENCE_INVALID
+#error "PVRSRV_FENCE_INTERFACE_UNUSED must equal PVRSRV_FENCE_INVALID"
+#endif
+
+
+#if defined (__cplusplus)
+}
+#endif
+#endif	/* PVRSRV_SYNC_KM_H */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_sync_um.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_sync_um.h
new file mode 100644
index 0000000..622971f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_sync_um.h
@@ -0,0 +1,378 @@
+/*************************************************************************/ /*!
+@File
+@Title         PVR synchronisation interface
+@Copyright     Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description   API for synchronisation functions for client side code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvr_debug.h"
+#include "pvrsrv_sync_km.h"
+
+#ifndef PVRSRV_SYNC_UM_H
+#define PVRSRV_SYNC_UM_H
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*************************************************************************/ /*!
+@Function       PVRSRVSyncInit
+
+@Description    Initialise the synchronisation code for the calling
+                process. This has to be called before any other functions in
+                this synchronisation module.
+
+@Return         PVRSRV_OK if the initialisation was successful
+                PVRSRV_ERROR_xxx if an error occurred
+*/
+/*****************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVSyncInitI(void);
+#define PVRSRVSyncInit(void) \
+		PVRSRVSyncInitI(void)
+
+/*************************************************************************/ /*!
+@Function       PVRSRVSyncDeinit
+
+@Description    Deinitialises the synchronisation code for the calling process.
+                This has to be called after successful initialisation and after
+                any other call of this module has finished.
+
+@Return         PVRSRV_OK if the deinitialisation was successful
+                PVRSRV_ERROR_xxx if an error occurred
+*/
+/*****************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVSyncDeinitI(void);
+#define PVRSRVSyncDeinit(void) \
+		PVRSRVSyncDeinitI(void)
+
+
+/*************************************************************************/ /*!
+@Function       PVRSRVTimelineCreate
+
+@Description    Allocate a new synchronisation timeline.
+                The timeline value is initialised to zero.
+
+@Input          pszTimelineName     String to be used to annotate timeline
+                                    (for debug)
+
+@Output         hTimeline           Handle to created timeline
+
+@Return         PVRSRV_OK if the timeline was successfully created
+                PVRSRV_ERROR_NOMEM if there was insufficient memory to
+                    create the new timeline
+*/
+/*****************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVTimelineCreateI(PVRSRV_TIMELINE   *phTimeline,
+                      const IMG_CHAR    *pszTimelineName
+                      PVR_DBG_FILELINE_PARAM);
+#define PVRSRVTimelineCreate(hTimeline, pszTimelineName) \
+    PVRSRVTimelineCreateI( (hTimeline), (pszTimelineName) \
+                           PVR_DBG_FILELINE)
+
+/*************************************************************************/ /*!
+@Function       PVRSRVTimelineDestroy
+
+@Description    Destroy a timeline
+                If the timeline has no outstanding checks or updates on
+                it then it will be destroyed immediately.
+                If there are outstanding checks or updates, the timeline
+                will be flagged for destruction once all outstanding
+                checks and updates are destroyed.
+                A timeline marked for destruction may not have further
+                checks or updates created for it.
+
+@Input          hTimeline            The timeline to destroy
+
+@Return         PVRSRV_OK if a valid active timeline was specified
+                PVRSRV_ERROR_INVALID_PARAMS if an unrecognised timeline
+                    was specified
+*/
+/*****************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVTimelineDestroyI(PVRSRV_TIMELINE hTimeline
+                       PVR_DBG_FILELINE_PARAM);
+#define PVRSRVTimelineDestroy(hTimeline) \
+    PVRSRVTimelineDestroyI( (hTimeline) PVR_DBG_FILELINE)
+
+/*************************************************************************/ /*!
+@Function       PVRSRVSWTimelineCreate
+
+@Description    Allocate a new software timeline for synchronisation.
+                Software timelines are different to timelines created with
+                PVRSRVTimelineCreate in that they represent a strictly ordered
+                sequence of events *progressed on the CPU* rather than the GPU
+
+                The sequence of events has to be modelled by the application
+                itself:
+                1. First the application creates a SW timeline (this call)
+                2. After creating some workload on the CPU the application can
+                create a fence for it by calling PVRSRVSWFenceCreate and pass
+                in a software timeline.
+                3. When the workload finished and the application wants
+                to signal potential waiters that work has finished, it can call
+                PVRSRVSWTimelineAdvance which will signal the oldest fence
+                on this software timeline
+
+                Destroy with PVRSRVTimelineDestroy
+
+@Input          pszSWTimelineName    String to be used to annotate the software
+                                     timeline (for debug)
+
+@Output         phSWTimeline         Handle to created software timeline
+
+@Return         PVRSRV_OK if the timeline was successfully created
+                PVRSRV_ERROR_xxx if an error occurred
+*/
+/*****************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVSWTimelineCreateI(PVRSRV_TIMELINE   *phSWTimeline,
+                        const IMG_CHAR    *pszSWTimelineName
+                        PVR_DBG_FILELINE_PARAM);
+#define PVRSRVSWTimelineCreate(hSWTimeline, pszSWTimelineName) \
+    PVRSRVSWTimelineCreateI( (hSWTimeline), (pszSWTimelineName) \
+                              PVR_DBG_FILELINE)
+
+/*************************************************************************/ /*!
+@Function       PVRSRVFenceWait
+
+@Description    Wait for a fence checkpoint to be signalled.
+
+@Input          hFence              Handle to the fence
+
+@Input          ui32TimeoutInMs     Maximum time to wait (in milliseconds)
+
+@Return         PVRSRV_OK once the fence has been passed (all component
+                    checkpoints have either signalled or errored)
+                PVRSRV_ERROR_TIMEOUT if the poll has exceeded the timeout
+                PVRSRV_ERROR_INVALID_PARAMS if an unrecognised fence was specified
+*/
+/*****************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVFenceWaitI(PVRSRV_FENCE hFence,
+                 IMG_UINT32 ui32TimeoutInMs
+                 PVR_DBG_FILELINE_PARAM);
+#define PVRSRVFenceWait(hFence, ui32TimeoutInMs) \
+    PVRSRVFenceWaitI( (hFence), (ui32TimeoutInMs) \
+                      PVR_DBG_FILELINE)
+
+/*************************************************************************/ /*!
+@Function       PVRSRVFenceDup
+
+@Description    Create a duplicate of the specified fence.
+                The original fence will remain unchanged.
+                The new fence will be an exact copy of the original and
+                will reference the same timeline checkpoints as the
+                source fence at the time of its creation.
+                Any OSNativeSyncs attached to the original fence will also
+                be attached to the duplicated fence.
+                NB. If the source fence is subsequently merged or deleted
+                    it will then differ from the dup'ed copy (which will
+                    be unaffected).
+
+@Input          hSourceFence        Handle of the fence to be duplicated
+
+@Output         phOutputFence       Handle of newly created duplicate fence
+
+@Return         PVRSRV_OK if the duplicate fence was successfully created
+                PVRSRV_ERROR_INVALID_PARAMS if an unrecognised fence was specified
+                PVRSRV_ERROR_NOMEM if there was insufficient memory to create
+                    the new fence
+*/
+/*****************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVFenceDupI(PVRSRV_FENCE hSourceFence,
+                PVRSRV_FENCE *phOutputFence
+                PVR_DBG_FILELINE_PARAM);
+#define PVRSRVFenceDup(hSourceFence, phOutputFence) \
+    PVRSRVFenceDupI( (hSourceFence), (phOutputFence) \
+                      PVR_DBG_FILELINE)
+
+
+/*************************************************************************/ /*!
+@Function       PVRSRVFenceMerge
+
+@Description    Merges two fences to create a new third fence.
+                The original fences will remain unchanged.
+                The new fence will be merge of two original fences and
+                will reference the same timeline checkpoints as the
+                two source fences with the exception that where each
+                source fence contains a checkpoint for the same timeline
+                the output fence will only contain the later of the two
+                checkpoints.
+                Any OSNativeSyncs attached to the original fences will also
+                be attached to the resultant merged fence.
+                If only one of the two source fences is valid, the function
+                shall simply return a duplicate of the valid fence with no
+                error indicated.
+                NB. If the source fences are subsequently merged or deleted
+                    they will then differ from the merged copy (which will
+                    be unaffected).
+
+@Input          hSourceFence1       Handle of the 1st fence to be merged
+
+@Input          hSourceFence2       Handle of the 2nd fence to be merged
+
+@Input          pszFenceName        Name of the created merged fence
+
+@Output         phOutputFence       Handle of the newly created merged fence
+
+@Return         PVRSRV_OK if the merged fence was successfully created
+                PVRSRV_ERROR_INVALID_PARAMS if both source fences are unrecognised
+                PVRSRV_ERROR_NOMEM if there was insufficient memory to create
+                    the new merged fence
+*/
+/*****************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVFenceMergeI(PVRSRV_FENCE hSourceFence1,
+                  PVRSRV_FENCE hSourceFence2,
+                  const IMG_CHAR *pszFenceName,
+                  PVRSRV_FENCE *phOutputFence
+                  PVR_DBG_FILELINE_PARAM);
+#define PVRSRVFenceMerge(hSourceFence1, hSourceFence2, pszFenceName, phOutputFence) \
+    PVRSRVFenceMergeI( (hSourceFence1), (hSourceFence2), (pszFenceName), (phOutputFence) \
+                       PVR_DBG_FILELINE)
+
+
+/*************************************************************************/ /*!
+@Function       PVRSRVFenceAccumulate
+
+@Description    Same as PVRSRVFenceMerge but destroys the input fences.
+                If only one of the two source fences is valid, the function
+                shall simply return the valid fence rather than performing
+                a merge.
+
+@Input          hSourceFence1       Handle of the 1st fence to be accumulated
+
+@Input          hSourceFence2       Handle of the 2nd fence to be accumulated
+
+@Input          pszFenceName        Name of the created accumulated fence
+
+@Output         phOutputFence       Handle of the newly created fence
+
+@Return         PVRSRV_OK if the accumulated fence was successfully created or
+                    returned
+                PVRSRV_ERROR_INVALID_PARAMS if both source fences are unrecognised
+                PVRSRV_ERROR_NOMEM if there was insufficient memory to create
+                    the new merged fence
+*/
+/*****************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVFenceAccumulateI(PVRSRV_FENCE hSourceFence1,
+                       PVRSRV_FENCE hSourceFence2,
+                       const IMG_CHAR *pszFenceName,
+                       PVRSRV_FENCE *phOutputFence
+                       PVR_DBG_FILELINE_PARAM);
+#define PVRSRVFenceAccumulate(hSourceFence1, hSourceFence2, pszFenceName, phOutputFence) \
+    PVRSRVFenceAccumulateI( (hSourceFence1), (hSourceFence2), (pszFenceName), (phOutputFence) \
+                            PVR_DBG_FILELINE)
+
+
+/*************************************************************************/ /*!
+@Function       PVRSRVFenceDump
+
+@Description    Dumps debug information about the specified fence.
+
+@Input          hFence              Handle to the fence
+
+@Return         PVRSRV_OK if a valid fence was specified
+                PVRSRV_ERROR_INVALID_PARAMS if an invalid fence was specified
+*/
+/*****************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVFenceDumpI(PVRSRV_FENCE hFence
+                 PVR_DBG_FILELINE_PARAM);
+#define PVRSRVFenceDump(hFence) \
+    PVRSRVFenceDumpI( (hFence) PVR_DBG_FILELINE)
+
+/*************************************************************************/ /*!
+@Function       PVRSRVFenceDestroy
+
+@Description    Destroy a fence.
+                The fence will be destroyed immediately if its refCount
+                is now 0, otherwise it will be destroyed once all references
+                to it have been dropped.
+
+@Input          hFence              Handle to the fence
+
+@Return         PVRSRV_OK if the fence was successfully marked for
+                    destruction (now or later)
+                PVRSRV_ERROR_INVALID_PARAMS if an unrecognised fence was specified
+*/
+/*****************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVFenceDestroyI(PVRSRV_FENCE hFence
+                    PVR_DBG_FILELINE_PARAM);
+#define PVRSRVFenceDestroy(hFence) \
+    PVRSRVFenceDestroyI( (hFence) PVR_DBG_FILELINE)
+
+/*************************************************************************/ /*!
+@Function       PVRSRVIsTimelineValid
+
+@Description    Checks whether the passed timeline handle has been set to an
+                invalid value.
+                Used to find out if the timeline can be passed into fence
+                sync functions.
+
+@Return         IMG_TRUE if the passed timeline is valid, IMG_FALSE if invalid
+*/
+/*****************************************************************************/
+IMG_BOOL PVRSRVIsTimelineValid(PVRSRV_TIMELINE iTimeline);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVIsFenceValid
+
+@Description    Checks whether the passed fence handle has been set to an
+                invalid value.
+                Used to find out if the fence can be passed into fence
+                sync functions.
+
+@Return         IMG_TRUE if the passed fence is valid, IMG_FALSE if invalid
+*/
+/*****************************************************************************/
+IMG_BOOL PVRSRVIsFenceValid(PVRSRV_FENCE iFence);
+
+
+#if defined (__cplusplus)
+}
+#endif
+#endif	/* PVRSRV_SYNC_UM_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_tlcommon.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_tlcommon.h
new file mode 100644
index 0000000..2ce53b3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_tlcommon.h
@@ -0,0 +1,229 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services Transport Layer common types and definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport layer common types and definitions included into
+                both user mode and kernel mode source.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __PVR_TLCOMMON_H__
+#define __PVR_TLCOMMON_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+
+
+/*! Handle type for stream descriptor objects as created by this API */
+typedef IMG_HANDLE PVRSRVTL_SD;
+
+/*! Maximum stream name length including the null byte */
+#define PRVSRVTL_MAX_STREAM_NAME_SIZE	40U
+
+/*! Packet lengths are always rounded up to a multiple of 8 bytes */
+#define PVRSRVTL_PACKET_ALIGNMENT		8U
+#define PVRSRVTL_ALIGN(x) 				((x+PVRSRVTL_PACKET_ALIGNMENT-1) & ~(PVRSRVTL_PACKET_ALIGNMENT-1))
+
+
+/*! A packet is made up of a header structure followed by the data bytes.
+ * There are 3 types of packet: normal (has data), data lost and padding,
+ * see packet flags. Header kept small to reduce data overhead.
+ *
+ * if the ORDER of the structure members is changed, please UPDATE the 
+ *   PVRSRVTL_PACKET_FLAG_OFFSET macro.
+ *
+ * Layout of uiTypeSize member is :
+ *
+ * |<---------------------------32-bits------------------------------>|
+ * |<----8---->|<-----1----->|<----7--->|<------------16------------->|
+ * |    Type   | Drop-Oldest |  UNUSED  |             Size            |
+ *
+ */
+typedef struct _PVRSRVTL_PACKETHDR_
+{
+	IMG_UINT32 uiTypeSize;	/*!< Type, Drop-Oldest flag & number of bytes following header */
+	IMG_UINT32 uiReserved;	/*!< Reserve, packets and data must be 8 byte aligned */
+
+	/* First bytes of TL packet data follow header ... */
+} PVRSRVTL_PACKETHDR, *PVRSRVTL_PPACKETHDR;
+
+/* Structure must always be a size multiple of 8 as stream buffer
+ * still an array of IMG_UINT32s.
+ */
+static_assert((sizeof(PVRSRVTL_PACKETHDR) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+			  "sizeof(PVRSRVTL_PACKETHDR) must be a multiple of 8");
+
+/*! Packet header mask used to extract the size from the uiTypeSize member.
+ * Do not use directly, see GET macros.
+ */
+#define PVRSRVTL_PACKETHDR_SIZE_MASK    0x0000FFFFU
+#define PVRSRVTL_MAX_PACKET_SIZE        (PVRSRVTL_PACKETHDR_SIZE_MASK & ~0xFU)
+
+
+/*! Packet header mask used to extract the type from the uiTypeSize member.
+ * Do not use directly, see GET macros.
+ */
+#define PVRSRVTL_PACKETHDR_TYPE_MASK    0xFF000000U
+#define PVRSRVTL_PACKETHDR_TYPE_OFFSET  24U
+
+/*! Packet header mask used to check if packets before this one were dropped or not.
+ * Do not use directly, see GET macros.
+ */
+#define PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK    0x00800000U
+#define PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET    23U
+
+/*! Packet type enumeration.
+ */
+typedef enum _PVRSRVTL_PACKETTYPE_
+{
+	/*! Undefined packet */
+	PVRSRVTL_PACKETTYPE_UNDEF = 0,
+
+	/*! Normal packet type. Indicates data follows the header.
+	 */
+	PVRSRVTL_PACKETTYPE_DATA = 1,
+
+	/*! When seen this packet type indicates that at this moment in the stream
+	 * packet(s) were not able to be accepted due to space constraints and that
+	 * recent data may be lost - depends on how the producer handles the
+	 * error. Such packets have no data, data length is 0.
+	 */
+	PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED = 2,
+
+	/*! Packets with this type set are padding packets that contain undefined
+	 * data and must be ignored/skipped by the client. They are used when the
+	 * circular stream buffer wraps around and there is not enough space for
+	 * the data at the end of the buffer. Such packets have a length of 0 or
+	 * more.
+	 */
+	PVRSRVTL_PACKETTYPE_PADDING = 3,
+
+	/*! This packet type conveys to the stream consumer that the stream producer
+	 * has reached the end of data for that data sequence. The TLDaemon
+	 * has several options for processing these packets that can be selected
+	 * on a per stream basis.
+	 */
+	PVRSRVTL_PACKETTYPE_MARKER_EOS = 4,
+
+	/*! Packet emitted on first stream opened by writer. Packet carries a name
+	 * of the opened stream in a form of null-terminated string.
+	 */
+	PVRSRVTL_PACKETTYPE_STREAM_OPEN_FOR_WRITE = 5,
+
+	/*! Packet emitted on last stream closed by writer. Packet carries a name
+	 * of the closed stream in a form of null-terminated string.
+	 */
+	PVRSRVTL_PACKETTYPE_STREAM_CLOSE_FOR_WRITE = 6,
+
+	PVRSRVTL_PACKETTYPE_LAST
+} PVRSRVTL_PACKETTYPE;
+
+/* The SET_PACKET_* macros rely on the order the PVRSRVTL_PACKETHDR members are declared:
+ * uiFlags is the upper half of a structure consisting of 2 uint16 quantities.
+ */
+#define PVRSRVTL_SET_PACKET_DATA(len)       (len) | (PVRSRVTL_PACKETTYPE_DATA                     << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+#define PVRSRVTL_SET_PACKET_PADDING(len)    (len) | (PVRSRVTL_PACKETTYPE_PADDING                  << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+#define PVRSRVTL_SET_PACKET_WRITE_FAILED    (0)   | (PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+#define PVRSRVTL_SET_PACKET_HDR(len,type)   (len) | ((type)                                       << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+
+/*! Returns the number of bytes of data in the packet. p may be any address type
+ * */
+#define GET_PACKET_DATA_LEN(p)	\
+	((IMG_UINT32) ((PVRSRVTL_PPACKETHDR)(p))->uiTypeSize & PVRSRVTL_PACKETHDR_SIZE_MASK)
+
+
+/*! Returns a IMG_BYTE* pointer to the first byte of data in the packet */
+#define GET_PACKET_DATA_PTR(p)	\
+	((IMG_PBYTE) ( ((size_t)p) + sizeof(PVRSRVTL_PACKETHDR)) )
+
+/*! Given a PVRSRVTL_PPACKETHDR address, return the address of the next pack
+ *  It is up to the caller to determine if the new address is within the packet
+ *  buffer.
+ */
+#define GET_NEXT_PACKET_ADDR(p) \
+	((PVRSRVTL_PPACKETHDR) ( ((IMG_UINT8 *)p) + sizeof(PVRSRVTL_PACKETHDR) + \
+	(((((PVRSRVTL_PPACKETHDR)p)->uiTypeSize & PVRSRVTL_PACKETHDR_SIZE_MASK) + \
+	(PVRSRVTL_PACKET_ALIGNMENT-1)) & (~(PVRSRVTL_PACKET_ALIGNMENT-1)) ) ))
+
+/*! Turns the packet address p into a PVRSRVTL_PPACKETHDR pointer type
+ */
+#define GET_PACKET_HDR(p)		((PVRSRVTL_PPACKETHDR)(p))
+
+/*! Get the type of the packet. p is of type PVRSRVTL_PPACKETHDR
+ */
+#define GET_PACKET_TYPE(p)		(((p)->uiTypeSize & PVRSRVTL_PACKETHDR_TYPE_MASK)>>PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+
+/*! Set PACKETS_DROPPED flag in packet header as a part of uiTypeSize. p is of type PVRSRVTL_PPACKETHDR.
+ */
+#define SET_PACKETS_DROPPED(p)		(((p)->uiTypeSize) | (1<<PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET))
+
+/*! Check if packets were dropped before this packet. p is of type PVRSRVTL_PPACKETHDR
+ */
+#define CHECK_PACKETS_DROPPED(p)	(((p)->uiTypeSize & PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK)>>PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET)
+
+/*! Flags for use with PVRSRVTLOpenStream
+ * 0x01 - Do not block in PVRSRVTLAcquireData() when no bytes are available
+ * 0x02 - When the stream does not exist wait for a bit (2s) in
+ *        PVRSRVTLOpenStream() and then exit with a timeout error if it still
+ *        does not exist.
+ * 0x04 - Open stream for write only operations.
+ *        If flag is not used stream is opened as read-only. This flag is
+ *        required if one wants to call reserve/commit/write function on the
+ *        stream descriptor. Read from on the stream descriptor opened
+ *        with this flag will fail.
+ * 0x10 - Reset stream on open.
+ *        When this flag is used the stream will drop all of the stored data.
+ */
+#define PVRSRV_STREAM_FLAG_NONE                        (0U)
+#define PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING         (1U<<0)
+#define PVRSRV_STREAM_FLAG_OPEN_WAIT                   (1U<<1)
+#define PVRSRV_STREAM_FLAG_OPEN_WO                     (1U<<2)
+#define PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK   (1U<<3)
+#define PVRSRV_STREAM_FLAG_RESET_ON_OPEN               (1U<<4)
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __PVR_TLCOMMON_H__ */
+/******************************************************************************
+ End of file (pvrsrv_tlcommon.h)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_tlstreams.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_tlstreams.h
new file mode 100644
index 0000000..a4ead13
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrsrv_tlstreams.h
@@ -0,0 +1,62 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services Transport Layer stream names
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport layer common types and definitions included into
+                both user mode and kernel mode source.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVRSRV_TLSTREAMS_H_
+#define _PVRSRV_TLSTREAMS_H_
+
+#define PVRSRV_TL_CTLR_STREAM "tlctrl"
+
+#define PVRSRV_TL_HWPERF_RGX_FW_STREAM      "hwperf_fw_"
+#define PVRSRV_TL_HWPERF_HOST_SERVER_STREAM "hwperf_host_"
+
+/* Host HWPerf client stream names are of the form 'hwperf_client_<pid>' */
+#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM         "hwperf_client_"
+#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC "hwperf_client_%u_%u"
+
+#endif /* _PVRSRV_TLSTREAMS_H_ */
+
+/******************************************************************************
+ End of file (pvrsrv_tlstreams.h)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrversion.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrversion.h
new file mode 100644
index 0000000..106a356
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/pvrversion.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@File
+@Title          Version numbers and strings.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Version numbers and strings for PVR Consumer services
+                components.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVRVERSION_H_
+#define _PVRVERSION_H_
+
+#define PVR_STR(X) #X
+#define PVR_STR2(X) PVR_STR(X)
+
+#define PVRVERSION_MAJ               1
+#define PVRVERSION_MIN               9
+
+#define PVRVERSION_FAMILY           "rogueddk"
+#define PVRVERSION_BRANCHNAME       "1.9"
+#define PVRVERSION_BUILD             4917962
+#define PVRVERSION_BSCONTROL        "Rogue_DDK_Linux_WS"
+
+#define PVRVERSION_STRING           "Rogue_DDK_Linux_WS rogueddk 1.9@" PVR_STR2(PVRVERSION_BUILD)
+#define PVRVERSION_STRING_SHORT     "1.9@" PVR_STR2(PVRVERSION_BUILD) ""
+
+#define COPYRIGHT_TXT               "Copyright (c) Imagination Technologies Ltd. All Rights Reserved."
+
+#define PVRVERSION_BUILD_HI          491
+#define PVRVERSION_BUILD_LO          7962
+#define PVRVERSION_STRING_NUMERIC    PVR_STR2(PVRVERSION_MAJ) "." PVR_STR2(PVRVERSION_MIN) "." PVR_STR2(PVRVERSION_BUILD_HI) "." PVR_STR2(PVRVERSION_BUILD_LO)
+
+#define PVRVERSION_PACK(MAJ,MIN) ((((MAJ)&0xFFFF) << 16) | (((MIN)&0xFFFF) << 0))
+#define PVRVERSION_UNPACK_MAJ(VERSION) (((VERSION) >> 16) & 0xFFFF)
+#define PVRVERSION_UNPACK_MIN(VERSION) (((VERSION) >> 0) & 0xFFFF)
+
+#endif /* _PVRVERSION_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_common.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_common.h
new file mode 100644
index 0000000..541f14d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_common.h
@@ -0,0 +1,181 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Common Types and Defines Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Common types and definitions for RGX software
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_COMMON_H_
+#define RGX_COMMON_H_
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+
+/* Included to get the BVNC_KM_N defined and other feature defs */
+#include "km/rgxdefs_km.h"
+
+/*! This macro represents a mask of LSBs that must be zero on data structure
+ * sizes and offsets to ensure they are 8-byte granular on types shared between
+ * the FW and host driver */
+#define RGX_FW_ALIGNMENT_LSB (7)
+
+/*! Macro to test structure size alignment */
+#define RGX_FW_STRUCT_SIZE_ASSERT(_a)	\
+	static_assert((sizeof(_a) & RGX_FW_ALIGNMENT_LSB) == 0,	\
+				  "Size of " #_a " is not properly aligned")
+
+/*! Macro to test structure member alignment */
+#define RGX_FW_STRUCT_OFFSET_ASSERT(_a, _b)	\
+	static_assert((offsetof(_a, _b) & RGX_FW_ALIGNMENT_LSB) == 0,	\
+				  "Offset of " #_a "." #_b " is not properly aligned")
+
+
+/* The following enum assumes only one of RGX_FEATURE_TLA or RGX_FEATURE_FASTRENDER_DM feature
+ * is present. In case this is no more true, fail build to fix code */
+#if defined (RGX_FEATURE_TLA) && defined (RGX_FEATURE_FASTRENDER_DM)
+#error "Both RGX_FEATURE_TLA and RGX_FEATURE_FASTRENDER_DM defined. Fix code to handle this!"
+#endif
+
+/*! The master definition for data masters known to the firmware of RGX.
+ * When a new DM is added to this enum, relevant entry should be added to
+ * RGX_HWPERF_DM enum list.
+ * The DM in a V1 HWPerf packet uses this definition. */
+typedef enum _RGXFWIF_DM_
+{
+	RGXFWIF_DM_GP			= 0,
+
+	/* Either TDM or 2D DM is present. The above build time error is present to verify this */
+	RGXFWIF_DM_2D			= 1, /* when RGX_FEATURE_TLA defined */
+	RGXFWIF_DM_TDM			= 1, /* when RGX_FEATURE_FASTRENDER_DM defined */
+
+	RGXFWIF_DM_TA			= 2,
+	RGXFWIF_DM_3D			= 3,
+	RGXFWIF_DM_CDM			= 4,
+
+	/* present on Ray cores only */
+	RGXFWIF_DM_RTU			= 5,
+	RGXFWIF_DM_SHG			= 6,
+
+	RGXFWIF_DM_LAST,
+
+	RGXFWIF_DM_FORCE_I32  = 0x7fffffff   /*!< Force enum to be at least 32-bits wide */
+} RGXFWIF_DM;
+
+typedef enum _RGX_KICK_TYPE_DM_
+{
+	RGX_KICK_TYPE_DM_GP			= 1 << 0,
+	RGX_KICK_TYPE_DM_TDM_2D		= 1 << 1,
+	RGX_KICK_TYPE_DM_TA			= 1 << 2,
+	RGX_KICK_TYPE_DM_3D			= 1 << 3,
+	RGX_KICK_TYPE_DM_CDM		= 1 << 4,
+	RGX_KICK_TYPE_DM_RTU		= 1 << 5,
+	RGX_KICK_TYPE_DM_SHG		= 1 << 6,
+	RGX_KICK_TYPE_DM_TQ2D		= 1 << 7,
+	RGX_KICK_TYPE_DM_TQ3D		= 1 << 8,
+	RGX_KICK_TYPE_DM_LAST		= 1 << 9
+} RGX_KICK_TYPE_DM;
+
+/* Maximum number of DM in use: GP, 2D/TDM, TA, 3D, CDM, SHG, RTU */
+#define RGXFWIF_DM_DEFAULT_MAX	(7)
+
+#if !defined(__KERNEL__)
+#if defined(RGX_FEATURE_RAY_TRACING)
+#define RGXFWIF_DM_MAX_MTS 8
+#else
+#define RGXFWIF_DM_MAX_MTS 6
+#endif
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+/* Maximum number of DM in use: GP, 2D/TDM, TA, 3D, CDM, SHG, RTU */
+#define RGXFWIF_DM_MAX			(7)
+#else
+/* Maximum number of DM in use: GP, 2D/TDM, TA, 3D, CDM*/
+#define RGXFWIF_DM_MAX			(5)
+#endif
+#define RGXFWIF_HWDM_MAX		(RGXFWIF_DM_MAX)
+#else
+	#define RGXFWIF_DM_MIN_MTS_CNT (6)
+	#define RGXFWIF_RAY_TRACING_DM_MTS_CNT (2)
+	#define RGXFWIF_DM_MIN_CNT			(5)
+	#define RGXFWIF_RAY_TRACING_DM_CNT	(2)
+	#define RGXFWIF_DM_MAX	(RGXFWIF_DM_MIN_CNT + RGXFWIF_RAY_TRACING_DM_CNT)
+#endif
+
+/* Min/Max number of HW DMs (all but GP) */
+#if defined(RGX_FEATURE_TLA)
+#define RGXFWIF_HWDM_MIN		(1)
+#else
+#if defined(RGX_FEATURE_FASTRENDER_DM)
+#define RGXFWIF_HWDM_MIN		(1)
+#else
+#define RGXFWIF_HWDM_MIN		(2)
+#endif
+#endif
+
+/*!
+ ******************************************************************************
+ * RGXFW Compiler alignment definitions
+ *****************************************************************************/
+#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES)
+#define RGXFW_ALIGN			__attribute__ ((aligned (8)))
+#elif defined(_MSC_VER)
+#define RGXFW_ALIGN			__declspec(align(8))
+#pragma warning (disable : 4324)
+#else
+#error "Align MACROS need to be defined for this compiler"
+#endif
+
+/*!
+ ******************************************************************************
+ * Force 8-byte alignment for structures allocated uncached.
+ *****************************************************************************/
+#define UNCACHED_ALIGN      RGXFW_ALIGN
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* RGX_COMMON_H_ */
+
+/******************************************************************************
+ End of file
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_firmware_processor.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_firmware_processor.h
new file mode 100644
index 0000000..2b2daef
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_firmware_processor.h
@@ -0,0 +1,92 @@
+/*************************************************************************/ /*!
+@File           rgx_firmware_processor.h
+@Title
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Platform       RGX
+@Description    Generic include file for firmware processors (META and MIPS)
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#if !defined(RGX_FIRMWARE_PROCESSOR_H)
+#define RGX_FIRMWARE_PROCESSOR_H
+
+#include "km/rgxdefs_km.h"
+
+#include "rgx_meta.h"
+#include "rgx_mips.h"
+
+/* Processor independent need to be defined here common for all processors */
+typedef enum
+{
+	FW_PERF_CONF_NONE = 0,
+	FW_PERF_CONF_ICACHE = 1,
+	FW_PERF_CONF_DCACHE = 2,
+	FW_PERF_CONF_POLLS = 3,
+	FW_PERF_CONF_CUSTOM_TIMER = 4,
+	FW_PERF_CONF_JTLB_INSTR = 5,
+	FW_PERF_CONF_INSTRUCTIONS = 6
+} FW_PERF_CONF;
+
+#if !defined(__KERNEL__)
+	#if defined(RGX_FEATURE_MIPS)
+
+		#define FW_CORE_ID_VALUE                                      RGXMIPSFW_CORE_ID_VALUE
+		#define RGXFW_PROCESSOR                                       RGXFW_PROCESSOR_MIPS
+
+		/* Firmware to host interrupts defines */
+		#define RGXFW_CR_IRQ_STATUS                                   RGX_CR_MIPS_WRAPPER_IRQ_STATUS
+		#define RGXFW_CR_IRQ_STATUS_EVENT_EN                          RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN
+		#define RGXFW_CR_IRQ_CLEAR                                    RGX_CR_MIPS_WRAPPER_IRQ_CLEAR
+		#define RGXFW_CR_IRQ_CLEAR_MASK                               RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN
+
+	#else
+
+		#define RGXFW_PROCESSOR         RGXFW_PROCESSOR_META
+
+		/* Firmware to host interrupts defines */
+		#define RGXFW_CR_IRQ_STATUS           RGX_CR_META_SP_MSLVIRQSTATUS
+		#define RGXFW_CR_IRQ_STATUS_EVENT_EN  RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN
+		#define RGXFW_CR_IRQ_CLEAR            RGX_CR_META_SP_MSLVIRQSTATUS
+		#define RGXFW_CR_IRQ_CLEAR_MASK       (RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK & \
+													RGX_CR_META_SP_MSLVIRQSTATUS_MASKFULL)
+
+	#endif
+#endif
+
+#endif /* RGX_FIRMWARE_PROCESSOR_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_heaps.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_heaps.h
new file mode 100644
index 0000000..42fac6d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_heaps.h
@@ -0,0 +1,182 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX heap definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGX_HEAPS_H__)
+#define __RGX_HEAPS_H__
+
+#include "km/rgxdefs_km.h"
+#include "log2.h"
+#include "pvr_debug.h"
+
+/* RGX Heap IDs, note: not all heaps are available to clients */
+/* N.B.  Old heap identifiers are deprecated now that the old memory
+   management is. New heap identifiers should be suitably renamed */
+#define RGX_UNDEFINED_HEAP_ID					(~0LU)			/*!< RGX Undefined Heap ID */
+#define RGX_GENERAL_SVM_HEAP_ID					0				/*!< RGX General SVM (shared virtual memory) Heap ID */
+#define RGX_GENERAL_HEAP_ID						1				/*!< RGX General Heap ID */
+#define RGX_GENERAL_NON4K_HEAP_ID				2				/*!< RGX General none-4K Heap ID */
+#define RGX_RGNHDR_BRN_63142_HEAP_ID			3				/*!< RGX RgnHdr BRN63142 Heap ID */
+#define RGX_MMU_INIA_BRN_65273_ID				4				/*!< RGX MMU INIA Heap ID */
+#define RGX_MMU_INIB_BRN_65273_ID				5				/*!< RGX MMU INIB Heap ID */
+#define RGX_PDSCODEDATA_HEAP_ID					6				/*!< RGX PDS Code/Data Heap ID */
+#define RGX_USCCODE_HEAP_ID						7				/*!< RGX USC Code Heap ID */
+#define RGX_FIRMWARE_HEAP_ID					8				/*!< RGX Firmware Heap ID */
+#define RGX_TQ3DPARAMETERS_HEAP_ID				9				/*!< RGX Firmware Heap ID */
+#define RGX_BIF_TILING_HEAP_1_ID				10				/*!< RGX BIF Tiling Heap 1 ID */
+#define RGX_BIF_TILING_HEAP_2_ID				11				/*!< RGX BIF Tiling Heap 2 ID */
+#define RGX_BIF_TILING_HEAP_3_ID				12				/*!< RGX BIF Tiling Heap 3 ID */
+#define RGX_BIF_TILING_HEAP_4_ID				13				/*!< RGX BIF Tiling Heap 4 ID */
+#define RGX_HWBRN37200_HEAP_ID					14				/*!< RGX HWBRN37200 */
+#define RGX_DOPPLER_HEAP_ID						15				/*!< Doppler Heap ID */
+#define RGX_DOPPLER_OVERFLOW_HEAP_ID			16				/*!< Doppler Overflow Heap ID */
+#define RGX_SERVICES_SIGNALS_HEAP_ID			17				/*!< Services Signals Heap ID */
+#define RGX_SIGNALS_HEAP_ID						18				/*!< Signals Heap ID */
+#define RGX_TDM_TPU_YUV_COEFFS_HEAP_ID          19
+#define RGX_GUEST_FIRMWARE_HEAP_ID				20				/*!< Additional OSIDs Firmware */
+#define RGX_MAX_HEAP_ID     	(RGX_GUEST_FIRMWARE_HEAP_ID + RGXFW_NUM_OS)	/*!< Max Valid Heap ID */
+
+/*
+  Identify heaps by their names
+*/
+#define RGX_GENERAL_SVM_HEAP_IDENT		"General SVM"			/*!< RGX General SVM (shared virtual memory) Heap Identifier */
+#define RGX_GENERAL_HEAP_IDENT 			"General"               /*!< RGX General Heap Identifier */
+#define RGX_GENERAL_NON4K_HEAP_IDENT	"General NON-4K"        /*!< RGX General non-4K Heap Identifier */
+#define RGX_RGNHDR_BRN_63142_HEAP_IDENT "RgnHdr BRN63142"       /*!< RGX RgnHdr BRN63142 Heap Identifier */
+#define RGX_MMU_INIA_BRN_65273_HEAP_IDENT "MMU INIA BRN65273"   /*!< MMU BRN65273 Heap A Identifier */
+#define RGX_MMU_INIB_BRN_65273_HEAP_IDENT "MMU INIB BRN65273"   /*!< MMU BRN65273 Heap B Identifier */
+#define RGX_PDSCODEDATA_HEAP_IDENT 		"PDS Code and Data"     /*!< RGX PDS Code/Data Heap Identifier */
+#define RGX_USCCODE_HEAP_IDENT			"USC Code"              /*!< RGX USC Code Heap Identifier */
+#define RGX_TQ3DPARAMETERS_HEAP_IDENT	"TQ3DParameters"        /*!< RGX TQ 3D Parameters Heap Identifier */
+#define RGX_BIF_TILING_HEAP_1_IDENT	    "BIF Tiling Heap l"	    /*!< RGX BIF Tiling Heap 1 identifier */
+#define RGX_BIF_TILING_HEAP_2_IDENT	    "BIF Tiling Heap 2"	    /*!< RGX BIF Tiling Heap 2 identifier */
+#define RGX_BIF_TILING_HEAP_3_IDENT	    "BIF Tiling Heap 3"	    /*!< RGX BIF Tiling Heap 3 identifier */
+#define RGX_BIF_TILING_HEAP_4_IDENT	    "BIF Tiling Heap 4"	    /*!< RGX BIF Tiling Heap 4 identifier */
+#define RGX_DOPPLER_HEAP_IDENT			"Doppler"				/*!< Doppler Heap Identifier */
+#define RGX_DOPPLER_OVERFLOW_HEAP_IDENT	"Doppler Overflow"		/*!< Doppler Heap Identifier */
+#define RGX_SERVICES_SIGNALS_HEAP_IDENT	"Services Signals"		/*!< Services Signals Heap Identifier */
+#define RGX_SIGNALS_HEAP_IDENT	        "Signals"		        /*!< Signals Heap Identifier */
+#define RGX_VISTEST_HEAP_IDENT			"VisTest"				/*!< VisTest heap */
+#define RGX_TDM_TPU_YUV_COEFFS_HEAP_IDENT "TDM TPU YUV Coeffs"
+
+/* BIF tiling heaps have specific buffer requirements based on their XStride
+ * configuration. This is detailed in the BIF tiling documentation and ensures
+ * that the bits swapped by the BIF tiling algorithm do not result in addresses
+ * outside the allocated buffer. The representation here reflects the diagram
+ * in the BIF tiling documentation for tiling mode '0'.
+ *
+ * For tiling mode '1', the overall tile size does not change, width increases
+ * to 2^9 but the height drops to 2^3.
+ * This means the RGX_BIF_TILING_HEAP_ALIGN_LOG2_FROM_XSTRIDE macro can be
+ * used for both modes.
+ *
+ * Previous TILING_HEAP_STRIDE macros are retired in preference to storing an
+ * alignment to stride factor, derived from the tiling mode, with the tiling
+ * heap configuration data.
+ *
+ * XStride is defined for a platform in sysconfig.h, but the resulting
+ * alignment and stride factor can be queried through the
+ * PVRSRVGetHeapLog2ImportAlignmentAndTilingStrideFactor() API.
+ * For reference:
+ *   Log2BufferStride = Log2Alignment - Log2AlignmentToTilingStrideFactor
+ */
+#define RGX_BIF_TILING_HEAP_ALIGN_LOG2_FROM_XSTRIDE(X)       (4+X+1+8)
+#define RGX_BIF_TILING_HEAP_LOG2_ALIGN_TO_STRIDE_BASE              (4)
+
+/*
+ *  Supported log2 page size values for RGX_GENERAL_NON_4K_HEAP_ID
+ */
+#define RGX_HEAP_4KB_PAGE_SHIFT					(12)
+#define RGX_HEAP_16KB_PAGE_SHIFT				(14)
+#define RGX_HEAP_64KB_PAGE_SHIFT				(16)
+#define RGX_HEAP_256KB_PAGE_SHIFT				(18)
+#define RGX_HEAP_1MB_PAGE_SHIFT					(20)
+#define RGX_HEAP_2MB_PAGE_SHIFT					(21)
+
+/* Takes a log2 page size parameter and calculates a suitable page size
+ * for the RGX heaps. Returns 0 if parameter is wrong.*/
+static INLINE IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize)
+{
+	IMG_BOOL bFound = IMG_FALSE;
+
+	/* OS page shift must be at least RGX_HEAP_4KB_PAGE_SHIFT,
+	 * max RGX_HEAP_2MB_PAGE_SHIFT, non-zero and a power of two*/
+	if ( uiLog2PageSize == 0 ||
+		(uiLog2PageSize < RGX_HEAP_4KB_PAGE_SHIFT) ||
+		(uiLog2PageSize > RGX_HEAP_2MB_PAGE_SHIFT))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Provided incompatible log2 page size %u",
+				__FUNCTION__,
+				uiLog2PageSize));
+		PVR_ASSERT(0);
+		return 0;
+	}
+
+	do
+	{
+		switch (uiLog2PageSize)
+		{
+			case RGX_HEAP_4KB_PAGE_SHIFT:
+			case RGX_HEAP_16KB_PAGE_SHIFT:
+			case RGX_HEAP_64KB_PAGE_SHIFT:
+			case RGX_HEAP_256KB_PAGE_SHIFT:
+			case RGX_HEAP_1MB_PAGE_SHIFT:
+			case RGX_HEAP_2MB_PAGE_SHIFT:
+				/* All good, RGX page size equals given page size
+				 * => use it as default for heaps */
+				bFound = IMG_TRUE;
+				break;
+			default:
+				/* We have to fall back to a smaller device
+				 * page size than given page size because there
+				 * is no exact match for any supported size. */
+				uiLog2PageSize -= 1;
+				break;
+		}
+	} while (!bFound);
+
+	return uiLog2PageSize;
+}
+
+
+#endif /* __RGX_HEAPS_H__ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_hwperf.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_hwperf.h
new file mode 100644
index 0000000..833d3fd
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_hwperf.h
@@ -0,0 +1,1238 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HWPerf and Debug Types and Defines Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Common data types definitions for hardware performance API
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_HWPERF_H_
+#define RGX_HWPERF_H_
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* These structures are used on both GPU and CPU and must be a size that is a
+ * multiple of 64 bits, 8 bytes to allow the FW to write 8 byte quantities
+ * at 8 byte aligned addresses.  RGX_FW_STRUCT_*_ASSERT() is used to check this.
+ */
+
+/******************************************************************************
+ * 	Includes and Defines
+ *****************************************************************************/
+
+#include "img_types.h"
+#include "img_defs.h"
+
+#include "rgx_common.h"
+#include "pvrsrv_tlcommon.h"
+#include "pvrsrv_sync_km.h"
+
+
+/* HWPerf interface assumption checks */
+static_assert(RGX_FEATURE_NUM_CLUSTERS <= 16, "Cluster count too large for HWPerf protocol definition");
+
+
+#if !defined(__KERNEL__)
+/* User-mode and Firmware definitions only */
+
+/*! The number of indirectly addressable TPU_MSC blocks in the GPU */
+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST MAX((RGX_FEATURE_NUM_CLUSTERS>>1),1)
+
+/*! The number of indirectly addressable USC blocks in the GPU */
+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER (RGX_FEATURE_NUM_CLUSTERS)
+
+# if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+
+ /*! Defines the number of performance counter blocks that are directly
+  * addressable in the RGX register map for S. */
+#  define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS      1 /* JONES */
+#  define RGX_HWPERF_INDIRECT_BY_PHANTOM       (RGX_NUM_PHANTOMS)
+#  define RGX_HWPERF_PHANTOM_NONDUST_BLKS      1 /* BLACKPEARL */
+#  define RGX_HWPERF_PHANTOM_DUST_BLKS         2 /* TPU, TEXAS */
+#  define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 2 /* USC, PBE */
+#  define RGX_HWPERF_DOPPLER_BX_TU_BLKS        0
+
+# elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+
+#  if defined(RGX_FEATURE_RAY_TRACING)
+  /*! Defines the number of performance counter blocks that are directly
+   * addressable in the RGX register map for Series 6XT. */
+#   define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS    6 /* TORNADO, TA, BF, BT, RT, SH */
+#   define RGX_HWPERF_DOPPLER_BX_TU_BLKS      4 /* Doppler unit unconditionally has 4 instances of BX_TU */
+#  else /*#if defined(RAY_TRACING) */
+  /*! Defines the number of performance counter blocks that are directly
+   * addressable in the RGX register map. */
+#   define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS    2 /* TORNADO, TA */
+#   define RGX_HWPERF_DOPPLER_BX_TU_BLKS      0
+#  endif /*#if defined(RAY_TRACING) */
+
+#  define RGX_HWPERF_INDIRECT_BY_PHANTOM       (RGX_NUM_PHANTOMS)
+#  define RGX_HWPERF_PHANTOM_NONDUST_BLKS      2 /* RASTER, TEXAS */
+#  define RGX_HWPERF_PHANTOM_DUST_BLKS         1 /* TPU */
+#  define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 1 /* USC */
+
+# else /* !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && ! defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) i.e. S6 */
+
+ /*! Defines the number of performance counter blocks that are
+  * addressable in the RGX register map for Series 6. */
+#  define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS      3 /* TA, RASTER, HUB */
+#  define RGX_HWPERF_INDIRECT_BY_PHANTOM       0  /* PHANTOM is not there is Rogue1. Just using it to keep naming same as later series (RogueXT n Rogue XT+) */
+#  define RGX_HWPERF_PHANTOM_NONDUST_BLKS      0
+#  define RGX_HWPERF_PHANTOM_DUST_BLKS         1 /* TPU */
+#  define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 1 /* USC */
+#  define RGX_HWPERF_DOPPLER_BX_TU_BLKS        0
+
+# endif
+
+/*! The number of performance counters in each layout block defined for UM/FW code */
+#if defined(RGX_FEATURE_CLUSTER_GROUPING)
+  #define RGX_HWPERF_CNTRS_IN_BLK 6
+ #else
+  #define RGX_HWPERF_CNTRS_IN_BLK 4
+#endif
+
+#else /* defined(__KERNEL__) */
+/* Kernel/server definitions - not used, hence invalid definitions */
+
+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST    0xFF
+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER 0xFF
+
+# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS      0xFF
+# define RGX_HWPERF_INDIRECT_BY_PHANTOM       0xFF
+# define RGX_HWPERF_PHANTOM_NONDUST_BLKS      0xFF
+# define RGX_HWPERF_PHANTOM_DUST_BLKS         0xFF
+# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 0xFF
+
+# if defined(RGX_FEATURE_RAY_TRACING)
+   /* Exception case, must have valid value since ray-tracing BX_TU unit does
+    * not vary by feature. Always read by rgx_hwperf_blk_present_raytracing()
+    * regardless of call context */
+#  define RGX_HWPERF_DOPPLER_BX_TU_BLKS       4
+# else
+#  define RGX_HWPERF_DOPPLER_BX_TU_BLKS       0
+# endif
+
+#endif
+
+/*! The number of custom non-mux counter blocks supported */
+#define RGX_HWPERF_MAX_CUSTOM_BLKS 5
+
+/*! The number of counters supported in each non-mux counter block */
+#define RGX_HWPERF_MAX_CUSTOM_CNTRS 8
+
+
+/******************************************************************************
+ * 	Packet Event Type Enumerations
+ *****************************************************************************/
+
+/*! Type used to encode the event that generated the packet.
+ * NOTE: When this type is updated the corresponding hwperfbin2json tool source
+ * needs to be updated as well. The RGX_HWPERF_EVENT_MASK_* macros will also need
+ * updating when adding new types.
+ */
+typedef enum
+{
+	RGX_HWPERF_INVALID				= 0x00,
+
+	/* FW types 0x01..0x06 */
+	RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE	= 0x01,
+
+	RGX_HWPERF_FW_BGSTART			= 0x01,
+	RGX_HWPERF_FW_BGEND				= 0x02,
+	RGX_HWPERF_FW_IRQSTART			= 0x03,
+
+	RGX_HWPERF_FW_IRQEND			= 0x04,
+	RGX_HWPERF_FW_DBGSTART			= 0x05,
+	RGX_HWPERF_FW_DBGEND			= 0x06,
+
+	RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE		= 0x06,
+
+	/* HW types 0x07..0x19 */
+	RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE	= 0x07,
+
+	RGX_HWPERF_HW_PMOOM_TAPAUSE		= 0x07,
+	RGX_HWPERF_HW_TAKICK			= 0x08,
+/*	RGX_HWPERF_HW_PMOOM_TAPAUSE		= 0x07, */
+/*	RGX_HWPERF_HW_PMOOM_TARESUME	= 0x19, */
+	RGX_HWPERF_HW_TAFINISHED		= 0x09,
+	RGX_HWPERF_HW_3DTQKICK			= 0x0A,
+/*	RGX_HWPERF_HW_3DTQFINISHED		= 0x17, */
+/*	RGX_HWPERF_HW_3DSPMKICK			= 0x11, */
+/*	RGX_HWPERF_HW_3DSPMFINISHED		= 0x18, */
+	RGX_HWPERF_HW_3DKICK			= 0x0B,
+	RGX_HWPERF_HW_3DFINISHED		= 0x0C,
+	RGX_HWPERF_HW_CDMKICK			= 0x0D,
+	RGX_HWPERF_HW_CDMFINISHED		= 0x0E,
+	RGX_HWPERF_HW_TLAKICK			= 0x0F,
+	RGX_HWPERF_HW_TLAFINISHED		= 0x10,
+	RGX_HWPERF_HW_3DSPMKICK			= 0x11,
+	RGX_HWPERF_HW_PERIODIC			= 0x12,
+	RGX_HWPERF_HW_RTUKICK			= 0x13,
+	RGX_HWPERF_HW_RTUFINISHED		= 0x14,
+	RGX_HWPERF_HW_SHGKICK			= 0x15,
+	RGX_HWPERF_HW_SHGFINISHED		= 0x16,
+	RGX_HWPERF_HW_3DTQFINISHED		= 0x17,
+	RGX_HWPERF_HW_3DSPMFINISHED		= 0x18,
+	RGX_HWPERF_HW_PMOOM_TARESUME	= 0x19,
+
+	/* HW_EVENT_RANGE0 used up. Use next empty range below to add new hardware events */
+	RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE	= 0x19,
+
+	/* other types 0x1A..0x1F */
+	RGX_HWPERF_CLKS_CHG				= 0x1A,
+	RGX_HWPERF_GPU_STATE_CHG		= 0x1B,
+
+	/* power types 0x20..0x27 */
+	RGX_HWPERF_PWR_EST_RANGE_FIRST_TYPE	= 0x20,
+	RGX_HWPERF_PWR_EST_REQUEST		= 0x20,
+	RGX_HWPERF_PWR_EST_READY		= 0x21,
+	RGX_HWPERF_PWR_EST_RESULT		= 0x22,
+	RGX_HWPERF_PWR_EST_RANGE_LAST_TYPE	= 0x22,
+
+	RGX_HWPERF_PWR_CHG				= 0x23,
+
+	/* HW_EVENT_RANGE1 0x28..0x2F, for accommodating new hardware events */
+	RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE	= 0x28,
+
+	RGX_HWPERF_HW_TDMKICK			= 0x28,
+	RGX_HWPERF_HW_TDMFINISHED		= 0x29,
+
+	RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE = 0x29,
+
+	/* context switch types 0x30..0x31 */
+	RGX_HWPERF_CSW_START			= 0x30,
+	RGX_HWPERF_CSW_FINISHED			= 0x31,
+
+	/* firmware misc 0x38..0x39 */
+	RGX_HWPERF_UFO					= 0x38,
+	RGX_HWPERF_FWACT				= 0x39,
+
+	/* last */
+	RGX_HWPERF_LAST_TYPE,
+
+	/* This enumeration must have a value that is a power of two as it is
+	 * used in masks and a filter bit field (currently 64 bits long).
+	 */
+	RGX_HWPERF_MAX_TYPE				= 0x40
+} RGX_HWPERF_EVENT_TYPE;
+
+/* The event type values are incrementing integers for use as a shift ordinal
+ * in the event filtering process at the point events are generated.
+ * This scheme thus implies a limit of 63 event types.
+ */
+static_assert(RGX_HWPERF_LAST_TYPE < RGX_HWPERF_MAX_TYPE, "Too many HWPerf event types");
+
+/* Macro used to check if an event type ID is present in the known set of hardware type events */
+#define HWPERF_PACKET_IS_HW_TYPE(_etype)	(((_etype) >= RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE) || \
+											 ((_etype) >= RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE))
+
+#define HWPERF_PACKET_IS_FW_TYPE(_etype)					\
+	((_etype) >= RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE &&	\
+	 (_etype) <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE)
+
+
+typedef enum {
+	RGX_HWPERF_HOST_INVALID        = 0x00,
+	RGX_HWPERF_HOST_ENQ            = 0x01,
+	RGX_HWPERF_HOST_UFO            = 0x02,
+	RGX_HWPERF_HOST_ALLOC          = 0x03,
+	RGX_HWPERF_HOST_CLK_SYNC       = 0x04,
+	RGX_HWPERF_HOST_FREE           = 0x05,
+	RGX_HWPERF_HOST_MODIFY         = 0x06,
+
+	/* last */
+	RGX_HWPERF_HOST_LAST_TYPE,
+
+	/* This enumeration must have a value that is a power of two as it is
+	 * used in masks and a filter bit field (currently 32 bits long).
+	 */
+	RGX_HWPERF_HOST_MAX_TYPE       = 0x20
+} RGX_HWPERF_HOST_EVENT_TYPE;
+
+/* The event type values are incrementing integers for use as a shift ordinal
+ * in the event filtering process at the point events are generated.
+ * This scheme thus implies a limit of 31 event types.
+ */
+static_assert(RGX_HWPERF_HOST_LAST_TYPE < RGX_HWPERF_HOST_MAX_TYPE, "Too many HWPerf host event types");
+
+
+/******************************************************************************
+ * 	Packet Header Format Version 2 Types
+ *****************************************************************************/
+
+/*! Major version number of the protocol in operation
+ */
+#define RGX_HWPERF_V2_FORMAT 2
+
+/*! Signature ASCII pattern 'HWP2' found in the first word of a HWPerfV2 packet
+ */
+#define HWPERF_PACKET_V2_SIG		0x48575032
+
+/*! Signature ASCII pattern 'HWPA' found in the first word of a HWPerfV2a packet
+ */
+#define HWPERF_PACKET_V2A_SIG		0x48575041
+
+/*! Signature ASCII pattern 'HWPB' found in the first word of a HWPerfV2b packet
+ */
+#define HWPERF_PACKET_V2B_SIG		0x48575042
+
+#define HWPERF_PACKET_ISVALID(_ptr) (((_ptr) == HWPERF_PACKET_V2_SIG) || ((_ptr) == HWPERF_PACKET_V2A_SIG)|| ((_ptr) == HWPERF_PACKET_V2B_SIG))
+
+/*! Type defines the HWPerf packet header common to all events. */
+typedef struct
+{
+	IMG_UINT32  ui32Sig;        /*!< Always the value HWPERF_PACKET_SIG */
+	IMG_UINT32  ui32Size;       /*!< Overall packet size in bytes */
+	IMG_UINT32  eTypeId;        /*!< Event type information field */
+	IMG_UINT32  ui32Ordinal;    /*!< Sequential number of the packet */
+	IMG_UINT64  ui64Timestamp;  /*!< Event timestamp */
+} RGX_HWPERF_V2_PACKET_HDR, *RGX_PHWPERF_V2_PACKET_HDR;
+
+RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_V2_PACKET_HDR, ui64Timestamp);
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_HDR);
+
+
+/*! Mask for use with the IMG_UINT32 ui32Size header field */
+#define RGX_HWPERF_SIZE_MASK         0xFFFFU
+
+/*! This macro defines an upper limit to which the size of the largest variable
+ * length HWPerf packet must fall within, currently 3KB. This constant may be
+ * used to allocate a buffer to hold one packet.
+ * This upper limit is policed by packet producing code.
+ */
+#define RGX_HWPERF_MAX_PACKET_SIZE   0xC00U
+
+/*! Defines an upper limit to the size of a variable length packet payload.
+ */
+#define RGX_HWPERF_MAX_PAYLOAD_SIZE	 ((IMG_UINT32)(RGX_HWPERF_MAX_PACKET_SIZE-\
+	sizeof(RGX_HWPERF_V2_PACKET_HDR)))
+
+
+/*! Macro which takes a structure name and provides the packet size for
+ * a fixed size payload packet, rounded up to 8 bytes to align packets
+ * for 64 bit architectures. */
+#define RGX_HWPERF_MAKE_SIZE_FIXED(_struct)       ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN(sizeof(_struct), PVRSRVTL_PACKET_ALIGNMENT))))
+
+/*! Macro which takes the number of bytes written in the data payload of a
+ * packet for a variable size payload packet, rounded up to 8 bytes to
+ * align packets for 64 bit architectures. */
+#define RGX_HWPERF_MAKE_SIZE_VARIABLE(_size)      ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN(_size, PVRSRVTL_PACKET_ALIGNMENT))))
+
+/*! Macro to obtain the size of the packet */
+#define RGX_HWPERF_GET_SIZE(_packet_addr)         ((IMG_UINT16)(((_packet_addr)->ui32Size) & RGX_HWPERF_SIZE_MASK))
+
+/*! Macro to obtain the size of the packet data */
+#define RGX_HWPERF_GET_DATA_SIZE(_packet_addr)    (RGX_HWPERF_GET_SIZE(_packet_addr) - sizeof(RGX_HWPERF_V2_PACKET_HDR))
+
+
+
+/*! Masks for use with the IMG_UINT32 eTypeId header field */
+#define RGX_HWPERF_TYPEID_MASK			0x7FFFFU
+#define RGX_HWPERF_TYPEID_EVENT_MASK	0x07FFFU
+#define RGX_HWPERF_TYPEID_THREAD_MASK	0x08000U
+#define RGX_HWPERF_TYPEID_STREAM_MASK	0x70000U
+#define RGX_HWPERF_TYPEID_OSID_MASK		0xFF000000U
+
+/*! Meta thread macros for encoding the ID into the type field of a packet */
+#define RGX_HWPERF_META_THREAD_SHIFT	15U
+#define RGX_HWPERF_META_THREAD_ID0		0x0U
+#define RGX_HWPERF_META_THREAD_ID1		0x1U
+/*! Obsolete, kept for source compatibility */
+#define RGX_HWPERF_META_THREAD_MASK		0x1U
+/*! Stream ID macros for encoding the ID into the type field of a packet */
+#define RGX_HWPERF_STREAM_SHIFT			16U
+/*! OSID bit-shift macro used for encoding OSID into type field of a packet */
+#define RGX_HWPERF_OSID_SHIFT			24U
+typedef enum {
+	RGX_HWPERF_STREAM_ID0_FW,     /*!< Events from the Firmware/GPU */
+	RGX_HWPERF_STREAM_ID1_HOST,   /*!< Events from the Server host driver component */
+	RGX_HWPERF_STREAM_ID2_CLIENT, /*!< Events from the Client host driver component */
+	RGX_HWPERF_STREAM_ID_LAST,
+} RGX_HWPERF_STREAM_ID;
+
+/* Checks if all stream IDs can fit under RGX_HWPERF_TYPEID_STREAM_MASK. */
+static_assert((RGX_HWPERF_STREAM_ID_LAST - 1) < (RGX_HWPERF_TYPEID_STREAM_MASK >> RGX_HWPERF_STREAM_SHIFT),
+		"To many HWPerf stream IDs.");
+
+/*! Macros used to set the packet type and encode meta thread ID (0|1), HWPerf stream ID, and OSID within */
+#define RGX_HWPERF_MAKE_TYPEID(_stream,_type,_thread,_osid)\
+		((IMG_UINT32) ((RGX_HWPERF_TYPEID_STREAM_MASK&((_stream)<<RGX_HWPERF_STREAM_SHIFT)) | \
+		(RGX_HWPERF_TYPEID_THREAD_MASK&((_thread)<<RGX_HWPERF_META_THREAD_SHIFT)) | \
+		(RGX_HWPERF_TYPEID_EVENT_MASK&(_type)) | \
+		(RGX_HWPERF_TYPEID_OSID_MASK & ((_osid) << RGX_HWPERF_OSID_SHIFT))))
+
+/*! Obtains the event type that generated the packet */
+#define RGX_HWPERF_GET_TYPE(_packet_addr)            (((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_EVENT_MASK)
+
+/*! Obtains the META Thread number that generated the packet */
+#define RGX_HWPERF_GET_THREAD_ID(_packet_addr)       (((((_packet_addr)->eTypeId)&RGX_HWPERF_TYPEID_THREAD_MASK) >> RGX_HWPERF_META_THREAD_SHIFT))
+
+/*! Obtains the guest OSID which resulted in packet generation */
+#define RGX_HWPERF_GET_OSID(_packet_addr)            (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_OSID_MASK) >> RGX_HWPERF_OSID_SHIFT)
+
+/*! Obtain stream id */
+#define RGX_HWPERF_GET_STREAM_ID(_packet_addr)       (((((_packet_addr)->eTypeId)&RGX_HWPERF_TYPEID_STREAM_MASK) >> RGX_HWPERF_STREAM_SHIFT))
+
+/*! Macros to obtain a typed pointer to a packet or data structure given a packet address */
+#define RGX_HWPERF_GET_PACKET(_buffer_addr)            ((RGX_HWPERF_V2_PACKET_HDR*)  (_buffer_addr))
+#define RGX_HWPERF_GET_PACKET_DATA_BYTES(_packet_addr) ((IMG_BYTE*) ( ((IMG_BYTE*)(_packet_addr)) +sizeof(RGX_HWPERF_V2_PACKET_HDR) ) )
+#define RGX_HWPERF_GET_NEXT_PACKET(_packet_addr)       ((RGX_HWPERF_V2_PACKET_HDR*)  ( ((IMG_BYTE*)(_packet_addr))+(RGX_HWPERF_SIZE_MASK&(_packet_addr)->ui32Size)) )
+
+/*! Obtains a typed pointer to a packet header given the packed data address */
+#define RGX_HWPERF_GET_PACKET_HEADER(_packet_addr)     ((RGX_HWPERF_V2_PACKET_HDR*)  ( ((IMG_BYTE*)(_packet_addr)) - sizeof(RGX_HWPERF_V2_PACKET_HDR) ))
+
+
+/******************************************************************************
+ * 	Other Common Defines
+ *****************************************************************************/
+
+/* This macro is not a real array size, but indicates the array has a
+ * variable length only known at run-time but always contains at least 1 element.
+ * The final size of the array is deduced from the size field of a packet
+ * header. */
+#define RGX_HWPERF_ONE_OR_MORE_ELEMENTS  1U
+
+/* This macro is not a real array size, but indicates the array is optional
+ * and if present has a variable length only known at run-time. The final
+ * size of the array is deduced from the size field of a packet header. */
+#define RGX_HWPERF_ZERO_OR_MORE_ELEMENTS 1U
+
+
+/*! Masks for use with the IMG_UINT32 ui32BlkInfo field */
+#define RGX_HWPERF_BLKINFO_BLKCOUNT_MASK	0xFFFF0000U
+#define RGX_HWPERF_BLKINFO_BLKOFFSET_MASK	0x0000FFFFU
+
+/*! Shift for the NumBlocks and counter block offset field in ui32BlkInfo */
+#define RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT	16U
+#define RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT 0U
+
+/*! Macro used to set the block info word as a combination of two 16-bit integers */
+#define RGX_HWPERF_MAKE_BLKINFO(_numblks,_blkoffset) ((IMG_UINT32) ((RGX_HWPERF_BLKINFO_BLKCOUNT_MASK&((_numblks) << RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT)) | (RGX_HWPERF_BLKINFO_BLKOFFSET_MASK&((_blkoffset) << RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT))))
+
+/*! Macro used to obtain get the number of counter blocks present in the packet */
+#define RGX_HWPERF_GET_BLKCOUNT(_blkinfo)            ((_blkinfo & RGX_HWPERF_BLKINFO_BLKCOUNT_MASK) >> RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT)
+
+/*! Obtains the offset of the counter block stream in the packet */
+#define RGX_HWPERF_GET_BLKOFFSET(_blkinfo)           ((_blkinfo & RGX_HWPERF_BLKINFO_BLKOFFSET_MASK) >> RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT)
+
+/* This macro gets the number of blocks depending on the packet version */
+#define RGX_HWPERF_GET_NUMBLKS(_sig, _packet_data, _numblocks)	\
+	if(HWPERF_PACKET_V2B_SIG == _sig)\
+	{\
+		(_numblocks) = RGX_HWPERF_GET_BLKCOUNT((_packet_data)->ui32BlkInfo);\
+	}\
+	else\
+	{\
+		IMG_UINT32 ui32VersionOffset = (((_sig) == HWPERF_PACKET_V2_SIG) ? 1 : 3);\
+		(_numblocks) = *(IMG_UINT16 *)(&((_packet_data)->ui32WorkTarget) + ui32VersionOffset);\
+	}
+
+/* This macro gets the counter stream pointer depending on the packet version */
+#define RGX_HWPERF_GET_CNTSTRM(_sig, _hw_packet_data, _cntstream_ptr)	\
+{\
+	if(HWPERF_PACKET_V2B_SIG == _sig)\
+	{\
+		(_cntstream_ptr) = (IMG_UINT32 *)((IMG_BYTE *)(_hw_packet_data) + RGX_HWPERF_GET_BLKOFFSET((_hw_packet_data)->ui32BlkInfo));\
+	}\
+	else\
+	{\
+		IMG_UINT32 ui32BlkStreamOffsetInWords = ((_sig == HWPERF_PACKET_V2_SIG) ? 6 : 8);\
+		(_cntstream_ptr) = ((IMG_UINT32 *)_hw_packet_data) + ui32BlkStreamOffsetInWords;\
+	}\
+}
+
+/* This is the maximum frame contexts that are supported in the driver at the moment */
+#define RGX_HWPERF_HW_MAX_WORK_CONTEXT               2
+
+/*! Masks for use with the RGX_HWPERF_UFO_EV eEvType field */
+#define RGX_HWPERF_UFO_STREAMSIZE_MASK 0xFFFF0000U
+#define RGX_HWPERF_UFO_STREAMOFFSET_MASK 0x0000FFFFU
+
+/*! Shift for the UFO count and data stream fields */
+#define RGX_HWPERF_UFO_STREAMSIZE_SHIFT 16U
+#define RGX_HWPERF_UFO_STREAMOFFSET_SHIFT 0U
+
+/*! Macro used to set UFO stream info word as a combination of two 16-bit integers */
+#define RGX_HWPERF_MAKE_UFOPKTINFO(_ssize,_soff)\
+        ((IMG_UINT32) ((RGX_HWPERF_UFO_STREAMSIZE_MASK&((_ssize) << RGX_HWPERF_UFO_STREAMSIZE_SHIFT)) |\
+        (RGX_HWPERF_UFO_STREAMOFFSET_MASK&((_soff) << RGX_HWPERF_UFO_STREAMOFFSET_SHIFT))))
+
+/*! Macro used to obtain UFO count*/
+#define RGX_HWPERF_GET_UFO_STREAMSIZE(_streaminfo)\
+        ((_streaminfo & RGX_HWPERF_UFO_STREAMSIZE_MASK) >> RGX_HWPERF_UFO_STREAMSIZE_SHIFT)
+
+/*! Obtains the offset of the UFO stream in the packet */
+#define RGX_HWPERF_GET_UFO_STREAMOFFSET(_streaminfo)\
+        ((_streaminfo & RGX_HWPERF_UFO_STREAMOFFSET_MASK) >> RGX_HWPERF_UFO_STREAMOFFSET_SHIFT)
+
+
+
+/******************************************************************************
+ * 	Data Stream Common Types
+ *****************************************************************************/
+
+/* All the Data Masters HWPerf is aware of. When a new DM is added to this list,
+ * it should be appended at the end to maintain backward compatibility of HWPerf data */
+typedef enum _RGX_HWPERF_DM {
+
+	RGX_HWPERF_DM_GP,
+	RGX_HWPERF_DM_2D,
+	RGX_HWPERF_DM_TA,
+	RGX_HWPERF_DM_3D,
+	RGX_HWPERF_DM_CDM,
+	RGX_HWPERF_DM_RTU,
+	RGX_HWPERF_DM_SHG,
+	RGX_HWPERF_DM_TDM,
+
+	RGX_HWPERF_DM_LAST,
+
+	RGX_HWPERF_DM_INVALID = 0x1FFFFFFF
+} RGX_HWPERF_DM;
+
+/*! This structure holds the data of a firmware packet. */
+typedef struct
+{
+	RGX_HWPERF_DM eDM;				/*!< DataMaster identifier, see RGX_HWPERF_DM */
+	IMG_UINT32 ui32TxtActCyc;		/*!< Meta TXTACTCYC register value */
+	IMG_UINT32 ui32FWPerfCount0;	/*!< Meta/MIPS PERF_COUNT0 register */
+	IMG_UINT32 ui32FWPerfCount1;	/*!< Meta/MIPS PERF_COUNT1 register */
+	IMG_UINT32 ui32TimeCorrIndex;
+	IMG_UINT32 ui32Padding;
+} RGX_HWPERF_FW_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FW_DATA);
+
+/*! This structure holds the data of a hardware packet, including counters. */
+typedef struct
+{
+	IMG_UINT32 ui32DMCyc;         /*!< DataMaster cycle count register, 0 if none */
+	IMG_UINT32 ui32FrameNum;      /*!< Frame number, undefined on some DataMasters */
+	IMG_UINT32 ui32PID;           /*!< Process identifier */
+	IMG_UINT32 ui32DMContext;     /*!< GPU Data Master (FW) Context */
+	IMG_UINT32 ui32WorkTarget;    /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */
+	IMG_UINT32 ui32ExtJobRef;     /*!< Externally provided job reference used to track work for debugging purposes */
+	IMG_UINT32 ui32IntJobRef;     /*!< Internally provided job reference used to track work for debugging purposes */
+	IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the time correlation at the time the packet was generated */
+	IMG_UINT32 ui32BlkInfo;       /*!< <31..16> NumBlocks <15..0> Counter block stream offset */
+	IMG_UINT32 ui32WorkCtx;       /*!< Work context: Render Context for TA/3D; RayTracing Context for RTU/SHG; 0x0 otherwise */
+	IMG_UINT32 ui32CtxPriority;   /*!< Context priority */
+	IMG_UINT32 ui32Padding1;      /* To ensure correct alignment */
+	IMG_UINT32 aui32CountBlksStream[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; /*!< Counter data */
+	IMG_UINT32 ui32Padding2;     /* To ensure correct alignment */
+} RGX_HWPERF_HW_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA);
+
+/*! Mask for use with the aui32CountBlksStream field when decoding the
+ * counter block ID and mask word. */
+#define RGX_HWPERF_CNTBLK_ID_MASK	0xFFFF0000U
+#define RGX_HWPERF_CNTBLK_ID_SHIFT	16U
+
+/*! Obtains the counter block ID from the supplied RGX_HWPERF_HW_DATA address
+ * and stream index. May be used in decoding the counter block stream words of
+ * a RGX_HWPERF_HW_DATA structure. */
+#define RGX_HWPERF_GET_CNTBLK_ID(_data_addr, _idx) ((IMG_UINT16)(((_data_addr)->aui32CountBlksStream[(_idx)]&RGX_HWPERF_CNTBLK_ID_MASK)>>RGX_HWPERF_CNTBLK_ID_SHIFT))
+#define RGX_HWPERF_GET_CNTBLK_IDW(_word)           ((IMG_UINT16)(((_word)&RGX_HWPERF_CNTBLK_ID_MASK)>>RGX_HWPERF_CNTBLK_ID_SHIFT))
+
+/*! Obtains the counter mask from the supplied RGX_HWPERF_HW_DATA address
+ * and stream index. May be used in decoding the counter block stream words
+ * of a RGX_HWPERF_HW_DATA structure. */
+#define RGX_HWPERF_GET_CNT_MASK(_data_addr, _idx) ((IMG_UINT16)((_data_addr)->aui32CountBlksStream[(_idx)]&((1<<RGX_CNTBLK_COUNTERS_MAX)-1)))
+#define RGX_HWPERF_GET_CNT_MASKW(_word)           ((IMG_UINT16)((_word)&((1<<RGX_CNTBLK_COUNTERS_MAX)-1)))
+
+
+typedef struct
+{
+	RGX_HWPERF_DM	eDM;					/*!< DataMaster identifier, see RGX_HWPERF_DM */
+	IMG_UINT32		ui32DMContext;			/*!< GPU Data Master (FW) Context */
+	IMG_UINT32		ui32FrameNum;			/*!< Frame number */
+	IMG_UINT32		ui32TxtActCyc;          /*!< Meta TXTACTCYC register value */
+	IMG_UINT32		ui32PerfCycle;			/*!< Cycle count. Used to measure HW context store latency */
+	IMG_UINT32		ui32PerfPhase;			/*!< Phase. Used to determine geometry content */
+	IMG_UINT32		ui32Padding[2];			/*!< Padding to 8 DWords */
+} RGX_HWPERF_CSW_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CSW_DATA);
+
+/*! Enumeration of clocks supporting this event  */
+typedef enum
+{
+	RGX_HWPERF_CLKS_CHG_INVALID = 0,
+
+	RGX_HWPERF_CLKS_CHG_NAME_CORE = 1,
+
+	RGX_HWPERF_CLKS_CHG_LAST,
+} RGX_HWPERF_CLKS_CHG_NAME;
+
+/*! This structure holds the data of a clocks change packet. */
+typedef struct
+{
+	IMG_UINT64                ui64NewClockSpeed;         /*!< New Clock Speed (in Hz) */
+	RGX_HWPERF_CLKS_CHG_NAME  eClockName;                /*!< Clock name */
+	IMG_UINT32                ui32CalibratedClockSpeed;  /*!< Calibrated new GPU clock speed (in Hz) */
+	IMG_UINT64                ui64OSTimeStamp;           /*!< OSTimeStamp sampled by the host */
+	IMG_UINT64                ui64CRTimeStamp;           /*!< CRTimeStamp sampled by the host and
+	                                                          correlated to OSTimeStamp */
+} RGX_HWPERF_CLKS_CHG_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CLKS_CHG_DATA);
+
+/*! Enumeration of GPU utilisation states supported by this event  */
+typedef enum
+{
+	RGX_HWPERF_GPU_STATE_ACTIVE_LOW  = 0,
+	RGX_HWPERF_GPU_STATE_IDLE        = 1,
+	RGX_HWPERF_GPU_STATE_ACTIVE_HIGH = 2,
+	RGX_HWPERF_GPU_STATE_BLOCKED     = 3,
+	RGX_HWPERF_GPU_STATE_LAST,
+} RGX_HWPERF_GPU_STATE;
+
+/*! This structure holds the data of a GPU utilisation state change packet. */
+typedef struct
+{
+	RGX_HWPERF_GPU_STATE	eState;		/*!< New GPU utilisation state */
+	IMG_UINT32				uiUnused1;	/*!< Padding */
+	IMG_UINT32				uiUnused2;	/*!< Padding */
+	IMG_UINT32				uiUnused3;	/*!< Padding */
+} RGX_HWPERF_GPU_STATE_CHG_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_GPU_STATE_CHG_DATA);
+
+
+/*! Signature pattern 'HPE1' found in the first word of a PWR_EST packet data */
+#define HWPERF_PWR_EST_V1_SIG	0x48504531
+
+/*! Macros to obtain a component field from a counter ID word */
+#define RGX_HWPERF_GET_PWR_EST_HIGH_FLAG(_word) (((_word)&0x80000000)>>31)
+#define RGX_HWPERF_GET_PWR_EST_UNIT(_word)      (((_word)&0x0F000000)>>24)
+#define RGX_HWPERF_GET_PWR_EST_NUMBER(_word)    ((_word)&0x0000FFFF)
+
+/*! This macro constructs a counter ID for a power estimate data stream from
+ * the component parts of: high word flag, unit id, counter number */
+#define RGX_HWPERF_MAKE_PWR_EST_COUNTERID(_high, _unit, _number)           \
+			((IMG_UINT32)((((_high)&0x1)<<31) | (((_unit)&0xF)<<24) | \
+			              ((_number)&0x0000FFFF)))
+
+/*! This structure holds the data for a power estimate packet. */
+typedef struct
+{
+	IMG_UINT32  ui32StreamVersion;  /*!< HWPERF_PWR_EST_V1_SIG */
+	IMG_UINT32  ui32StreamSize;     /*!< Size of array in bytes of stream data
+	                                     held in the aui32StreamData member */
+	IMG_UINT32  aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Counter data */
+	IMG_UINT32  ui32Padding; /* To ensure correct alignment */
+} RGX_HWPERF_PWR_EST_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_PWR_EST_DATA);
+
+/*! Enumeration of the kinds of power change events that can occur */
+typedef enum
+{
+	RGX_HWPERF_PWR_UNDEFINED = 0,
+	RGX_HWPERF_PWR_ON        = 1, /*!< Whole device powered on */
+	RGX_HWPERF_PWR_OFF       = 2, /*!< Whole device powered off */
+	RGX_HWPERF_PWR_UP        = 3, /*!< Power turned on to a HW domain */
+	RGX_HWPERF_PWR_DOWN      = 4, /*!< Power turned off to a HW domain */
+
+	RGX_HWPERF_PWR_LAST,
+} RGX_HWPERF_PWR;
+
+/*! This structure holds the data of a power packet. */
+typedef struct
+{
+	RGX_HWPERF_PWR eChange;                  /*!< Defines the type of power change */
+	IMG_UINT32     ui32Domains;              /*!< HW Domains affected */
+	IMG_UINT64     ui64OSTimeStamp;          /*!< OSTimeStamp sampled by the host */
+	IMG_UINT64     ui64CRTimeStamp;          /*!< CRTimeStamp sampled by the host and
+	                                              correlated to OSTimeStamp */
+	IMG_UINT32     ui32CalibratedClockSpeed; /*!< GPU clock speed (in Hz) at the time
+	                                              the two timers were correlated */
+	IMG_UINT32     ui32Unused1;              /*!< Padding */
+} RGX_HWPERF_PWR_CHG_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_PWR_CHG_DATA);
+
+
+
+/*! Firmware Activity event. */
+typedef enum
+{
+	RGX_HWPERF_FWACT_EV_INVALID,            /*! Invalid value. */
+	RGX_HWPERF_FWACT_EV_REGS_SET,           /*! Registers set. */
+	RGX_HWPERF_FWACT_EV_HWR_DETECTED,       /*! HWR detected. */
+	RGX_HWPERF_FWACT_EV_HWR_RESET_REQUIRED, /*! Reset required. */
+	RGX_HWPERF_FWACT_EV_HWR_RECOVERED,      /*! HWR recovered. */
+	RGX_HWPERF_FWACT_EV_HWR_FREELIST_READY, /*! Freelist ready. */
+
+	RGX_HWPERF_FWACT_EV_LAST,               /*! Number of element. */
+} RGX_HWPERF_FWACT_EV;
+
+/*! Cause of the HWR event. */
+typedef enum
+{
+	RGX_HWPERF_HWR_REASON_INVALID,              /*! Invalid value.*/
+	RGX_HWPERF_HWR_REASON_LOCKUP,               /*! Lockup. */
+	RGX_HWPERF_HWR_REASON_PAGEFAULT,            /*! Page fault. */
+	RGX_HWPERF_HWR_REASON_POLLFAIL,             /*! Poll fail. */
+	RGX_HWPERF_HWR_REASON_DEADLINE_OVERRUN,     /*! Deadline overrun. */
+	RGX_HWPERF_HWR_REASON_CSW_DEADLINE_OVERRUN, /*! Hard Context Switch deadline overrun. */
+
+	RGX_HWPERF_HWR_REASON_LAST                  /*! Number of elements. */
+} RGX_HWPERF_HWR_REASON;
+
+/*! Sub-event's data. */
+typedef union
+{
+	struct
+	{
+		RGX_HWPERF_DM eDM;                 /*!< Data Master ID. */
+		RGX_HWPERF_HWR_REASON eReason;     /*!< Reason of the HWR. */
+		IMG_UINT32 ui32DMContext;          /*!< FW render context */
+	} sHWR;                                /*!< HWR sub-event data. */
+} RGX_HWPERF_FWACT_DETAIL;
+
+/*! This structure holds the data of a FW activity event packet */
+typedef struct
+{
+	RGX_HWPERF_FWACT_EV eEvType;           /*!< Event type. */
+	RGX_HWPERF_FWACT_DETAIL uFwActDetail;  /*!< Data of the sub-event. */
+} RGX_HWPERF_FWACT_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DATA);
+
+
+
+typedef enum {
+	RGX_HWPERF_UFO_EV_UPDATE,
+	RGX_HWPERF_UFO_EV_CHECK_SUCCESS,
+	RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS,
+	RGX_HWPERF_UFO_EV_CHECK_FAIL,
+	RGX_HWPERF_UFO_EV_PRCHECK_FAIL,
+
+	RGX_HWPERF_UFO_EV_LAST
+} RGX_HWPERF_UFO_EV;
+
+/*! Data stream tuple. */
+typedef union
+{
+	struct
+	{
+		IMG_UINT32 ui32FWAddr;
+		IMG_UINT32 ui32Value;
+	} sCheckSuccess;
+	struct
+	{
+		IMG_UINT32 ui32FWAddr;
+		IMG_UINT32 ui32Value;
+		IMG_UINT32 ui32Required;
+	} sCheckFail;
+	struct
+	{
+		IMG_UINT32 ui32FWAddr;
+		IMG_UINT32 ui32OldValue;
+		IMG_UINT32 ui32NewValue;
+	} sUpdate;
+} RGX_HWPERF_UFO_DATA_ELEMENT;
+
+/*! This structure holds the packet payload data for UFO event. */
+typedef struct
+{
+	RGX_HWPERF_UFO_EV eEvType;
+	IMG_UINT32 ui32TimeCorrIndex;
+	IMG_UINT32 ui32PID;
+	IMG_UINT32 ui32ExtJobRef;
+	IMG_UINT32 ui32IntJobRef;
+	IMG_UINT32 ui32DMContext;      /*!< GPU Data Master (FW) Context */
+	IMG_UINT32 ui32StreamInfo;
+	RGX_HWPERF_DM eDM;
+	IMG_UINT32 ui32Padding;
+	IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS];
+} RGX_HWPERF_UFO_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_UFO_DATA);
+
+
+
+typedef enum
+{
+	RGX_HWPERF_KICK_TYPE_TA3D, /*!< Replaced by separate TA and 3D types */
+	RGX_HWPERF_KICK_TYPE_TQ2D,
+	RGX_HWPERF_KICK_TYPE_TQ3D,
+	RGX_HWPERF_KICK_TYPE_CDM,
+	RGX_HWPERF_KICK_TYPE_RS,
+	RGX_HWPERF_KICK_TYPE_VRDM,
+	RGX_HWPERF_KICK_TYPE_TQTDM,
+	RGX_HWPERF_KICK_TYPE_SYNC,
+	RGX_HWPERF_KICK_TYPE_TA,
+	RGX_HWPERF_KICK_TYPE_3D,
+	RGX_HWPERF_KICK_TYPE_LAST
+} RGX_HWPERF_KICK_TYPE;
+
+typedef struct
+{
+	RGX_HWPERF_KICK_TYPE ui32EnqType;
+	IMG_UINT32 ui32PID;
+	IMG_UINT32 ui32ExtJobRef;
+	IMG_UINT32 ui32IntJobRef;
+	IMG_UINT32 ui32DMContext;      /*!< GPU Data Master (FW) Context */
+	IMG_UINT32 ui32Padding;        /* Align structure size to 8 bytes */
+	IMG_UINT32 ui32CheckFence_UID;
+	IMG_UINT32 ui32UpdateFence_UID;
+	IMG_UINT64 ui64DeadlineInus;  /*!< Workload deadline in system monotonic time */
+	IMG_UINT64 ui64CycleEstimate; /*!< Estimated cycle time for the workload */
+} RGX_HWPERF_HOST_ENQ_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_ENQ_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+			  "sizeof(RGX_HWPERF_HOST_ENQ_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef struct
+{
+	RGX_HWPERF_UFO_EV eEvType;
+	IMG_UINT32 ui32StreamInfo;
+	IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS];
+	IMG_UINT32 ui32Padding;      /* Align structure size to 8 bytes */
+} RGX_HWPERF_HOST_UFO_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_UFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+			  "sizeof(RGX_HWPERF_HOST_UFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef enum
+{
+	RGX_HWPERF_HOST_RESOURCE_TYPE_INVALID,
+	RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC, //PRIM
+	RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE,
+	RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, // Fence for use on GPU (SYNCP backed)
+	RGX_HWPERF_HOST_RESOURCE_TYPE_SYNCCP,
+
+	RGX_HWPERF_HOST_RESOURCE_TYPE_LAST
+} RGX_HWPERF_HOST_RESOURCE_TYPE;
+
+typedef union
+{
+	struct
+	{
+		IMG_UINT32 uiPid;
+		IMG_UINT32 ui32Timeline_UID1;
+		IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+	} sTimelineAlloc;
+
+	struct
+	{
+		IMG_UINT32 ui32Fence_UID;
+		IMG_UINT32 ui32CheckPt_FWAddr;
+		IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+	} sFenceAlloc;
+
+	struct
+	{
+		IMG_UINT32 ui32CheckPt_FWAddr;
+		IMG_UINT32 ui32Timeline_UID;
+		IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of original fence synCP created for */
+	} sSyncCheckPointAlloc;
+
+	struct
+	{
+		IMG_UINT32 ui32FWAddr;
+		IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+	} sSyncAlloc;
+} RGX_HWPERF_HOST_ALLOC_DETAIL;
+
+typedef struct
+{
+	RGX_HWPERF_HOST_RESOURCE_TYPE ui32AllocType;
+	RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail;
+	IMG_UINT32 ui32Padding;       /* Align structure size to 8 bytes */
+} RGX_HWPERF_HOST_ALLOC_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_ALLOC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+			  "sizeof(RGX_HWPERF_HOST_ALLOC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef union
+{
+	struct
+	{
+		IMG_UINT32 uiPid;
+		IMG_UINT32 ui32Timeline_UID1;
+	} sTimelineDestroy;
+
+	struct
+	{
+		IMG_UINT32 ui32Fence_UID;
+	} sFenceDestroy;
+
+	struct
+	{
+		IMG_UINT32 ui32CheckPt_FWAddr;
+	} sSyncCheckPointFree;
+
+	struct
+	{
+		IMG_UINT32 ui32FWAddr;
+	} sSyncFree;
+} RGX_HWPERF_HOST_FREE_DETAIL;
+
+typedef struct
+{
+	RGX_HWPERF_HOST_RESOURCE_TYPE ui32FreeType;
+	RGX_HWPERF_HOST_FREE_DETAIL uFreeDetail;
+	IMG_UINT32 ui32Padding;       /* Align structure size to 8 bytes */
+} RGX_HWPERF_HOST_FREE_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_FREE_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+			  "sizeof(RGX_HWPERF_HOST_FREE_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef struct
+{
+	IMG_UINT64 ui64CRTimestamp;
+	IMG_UINT64 ui64OSTimestamp;
+	IMG_UINT32 ui32ClockSpeed;
+	IMG_UINT32 ui32Padding;       /* Align structure size to 8 bytes */
+} RGX_HWPERF_HOST_CLK_SYNC_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+			  "sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+
+typedef union
+{
+	struct
+	{
+		IMG_UINT32 ui32NewFence_UID;
+		IMG_UINT32 ui32InFence1_UID;
+		IMG_UINT32 ui32InFence2_UID;
+		IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+	} sFenceMerge;
+} RGX_HWPERF_HOST_MODIFY_DETAIL;
+
+typedef struct
+{
+	RGX_HWPERF_HOST_RESOURCE_TYPE ui32ModifyType;
+	RGX_HWPERF_HOST_MODIFY_DETAIL uModifyDetail;
+} RGX_HWPERF_HOST_MODIFY_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_MODIFY_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+			  "sizeof(RGX_HWPERF_HOST_MODIFY_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+
+/*! This type is a union of packet payload data structures associated with
+ * various FW and Host  events */
+typedef union
+{
+	RGX_HWPERF_FW_DATA             sFW;           /*!< Firmware event packet data */
+	RGX_HWPERF_HW_DATA             sHW;           /*!< Hardware event packet data */
+	RGX_HWPERF_CLKS_CHG_DATA       sCLKSCHG;      /*!< Clock change event packet data */
+	RGX_HWPERF_GPU_STATE_CHG_DATA  sGPUSTATECHG;  /*!< GPU utilisation state change event packet data */
+	RGX_HWPERF_PWR_EST_DATA        sPWREST;       /*!< Power estimate event packet data */
+	RGX_HWPERF_PWR_CHG_DATA        sPWR;          /*!< Power event packet data */
+	RGX_HWPERF_CSW_DATA			   sCSW;		  /*!< Context switch packet data */
+	RGX_HWPERF_UFO_DATA            sUFO;          /*!< UFO data */
+	RGX_HWPERF_FWACT_DATA          sFWACT;        /*!< Firmware activity event packet data */
+	/* */
+	RGX_HWPERF_HOST_ENQ_DATA       sENQ;          /*!< Host ENQ data */
+	RGX_HWPERF_HOST_UFO_DATA       sHUFO;         /*!< Host UFO data */
+	RGX_HWPERF_HOST_ALLOC_DATA     sHALLOC;       /*!< Host Alloc data */
+	RGX_HWPERF_HOST_CLK_SYNC_DATA  sHCLKSYNC;     /*!< Host CLK_SYNC data */
+	RGX_HWPERF_HOST_FREE_DATA      sHFREE;        /*!< Host Free data */
+	RGX_HWPERF_HOST_MODIFY_DATA    sHMOD;         /*!< Host Modify data */
+} _RGX_HWPERF_V2_PACKET_DATA_, *RGX_PHWPERF_V2_PACKET_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(_RGX_HWPERF_V2_PACKET_DATA_);
+
+#define RGX_HWPERF_GET_PACKET_DATA(_packet_addr) ((RGX_PHWPERF_V2_PACKET_DATA) ( ((IMG_BYTE*)(_packet_addr)) +sizeof(RGX_HWPERF_V2_PACKET_HDR) ) )
+
+
+/******************************************************************************
+ * 	API Types
+ *****************************************************************************/
+
+/*! Counter block IDs for all the hardware blocks with counters.
+ * Directly addressable blocks must have a value between 0..15.
+ * First hex digit represents a group number and the second hex digit represents
+ * the unit within the group. Group 0 is the direct group, all others are
+ * indirect groups.
+ */
+typedef enum
+{
+	/* Directly addressable counter blocks */
+	RGX_CNTBLK_ID_TA			= 0x0000,
+	RGX_CNTBLK_ID_RASTER		= 0x0001, /* Non-cluster grouping cores */
+	RGX_CNTBLK_ID_HUB			= 0x0002, /* Non-cluster grouping cores */
+	RGX_CNTBLK_ID_TORNADO		= 0x0003, /* XT cores */
+	RGX_CNTBLK_ID_JONES			= 0x0004, /* S7 cores */
+	RGX_CNTBLK_ID_BF			= 0x0005, /* Doppler unit */
+	RGX_CNTBLK_ID_BT			= 0x0006, /* Doppler unit */
+	RGX_CNTBLK_ID_RT			= 0x0007, /* Doppler unit */
+	RGX_CNTBLK_ID_SH			= 0x0008, /* Ray tracing unit */
+
+	RGX_CNTBLK_ID_DIRECT_LAST,
+
+	/* Indirectly addressable counter blocks */
+	RGX_CNTBLK_ID_TPU_MCU0		= 0x0010, /* Addressable by Dust */
+	RGX_CNTBLK_ID_TPU_MCU1		= 0x0011,
+	RGX_CNTBLK_ID_TPU_MCU2		= 0x0012,
+	RGX_CNTBLK_ID_TPU_MCU3		= 0x0013,
+	RGX_CNTBLK_ID_TPU_MCU4		= 0x0014,
+	RGX_CNTBLK_ID_TPU_MCU5		= 0x0015,
+	RGX_CNTBLK_ID_TPU_MCU6		= 0x0016,
+	RGX_CNTBLK_ID_TPU_MCU7		= 0x0017,
+	RGX_CNTBLK_ID_TPU_MCU_ALL	= 0x4010,
+
+	RGX_CNTBLK_ID_USC0			= 0x0020, /* Addressable by Cluster */
+	RGX_CNTBLK_ID_USC1			= 0x0021,
+	RGX_CNTBLK_ID_USC2			= 0x0022,
+	RGX_CNTBLK_ID_USC3			= 0x0023,
+	RGX_CNTBLK_ID_USC4			= 0x0024,
+	RGX_CNTBLK_ID_USC5			= 0x0025,
+	RGX_CNTBLK_ID_USC6			= 0x0026,
+	RGX_CNTBLK_ID_USC7			= 0x0027,
+	RGX_CNTBLK_ID_USC8			= 0x0028,
+	RGX_CNTBLK_ID_USC9			= 0x0029,
+	RGX_CNTBLK_ID_USC10			= 0x002A,
+	RGX_CNTBLK_ID_USC11			= 0x002B,
+	RGX_CNTBLK_ID_USC12			= 0x002C,
+	RGX_CNTBLK_ID_USC13			= 0x002D,
+	RGX_CNTBLK_ID_USC14			= 0x002E,
+	RGX_CNTBLK_ID_USC15			= 0x002F,
+	RGX_CNTBLK_ID_USC_ALL		= 0x4020,
+
+	RGX_CNTBLK_ID_TEXAS0		= 0x0030, /* Addressable by Phantom in XT, Dust in S7 */
+	RGX_CNTBLK_ID_TEXAS1		= 0x0031,
+	RGX_CNTBLK_ID_TEXAS2		= 0x0032,
+	RGX_CNTBLK_ID_TEXAS3		= 0x0033,
+	RGX_CNTBLK_ID_TEXAS4		= 0x0034,
+	RGX_CNTBLK_ID_TEXAS5		= 0x0035,
+	RGX_CNTBLK_ID_TEXAS6		= 0x0036,
+	RGX_CNTBLK_ID_TEXAS7		= 0x0037,
+	RGX_CNTBLK_ID_TEXAS_ALL		= 0x4030,
+
+	RGX_CNTBLK_ID_RASTER0		= 0x0040, /* Addressable by Phantom, XT only */
+	RGX_CNTBLK_ID_RASTER1		= 0x0041,
+	RGX_CNTBLK_ID_RASTER2		= 0x0042,
+	RGX_CNTBLK_ID_RASTER3		= 0x0043,
+	RGX_CNTBLK_ID_RASTER_ALL	= 0x4040,
+
+	RGX_CNTBLK_ID_BLACKPEARL0	= 0x0050, /* Addressable by Phantom, S7 only */
+	RGX_CNTBLK_ID_BLACKPEARL1	= 0x0051,
+	RGX_CNTBLK_ID_BLACKPEARL2	= 0x0052,
+	RGX_CNTBLK_ID_BLACKPEARL3	= 0x0053,
+	RGX_CNTBLK_ID_BLACKPEARL_ALL= 0x4050,
+
+	RGX_CNTBLK_ID_PBE0			= 0x0060, /* Addressable by Cluster, S7 only */
+	RGX_CNTBLK_ID_PBE1			= 0x0061,
+	RGX_CNTBLK_ID_PBE2			= 0x0062,
+	RGX_CNTBLK_ID_PBE3			= 0x0063,
+	RGX_CNTBLK_ID_PBE4			= 0x0064,
+	RGX_CNTBLK_ID_PBE5			= 0x0065,
+	RGX_CNTBLK_ID_PBE6			= 0x0066,
+	RGX_CNTBLK_ID_PBE7			= 0x0067,
+	RGX_CNTBLK_ID_PBE8			= 0x0068,
+	RGX_CNTBLK_ID_PBE9			= 0x0069,
+	RGX_CNTBLK_ID_PBE10			= 0x006A,
+	RGX_CNTBLK_ID_PBE11			= 0x006B,
+	RGX_CNTBLK_ID_PBE12			= 0x006C,
+	RGX_CNTBLK_ID_PBE13			= 0x006D,
+	RGX_CNTBLK_ID_PBE14			= 0x006E,
+	RGX_CNTBLK_ID_PBE15			= 0x006F,
+	RGX_CNTBLK_ID_PBE_ALL		= 0x4060,
+
+	RGX_CNTBLK_ID_BX_TU0		= 0x0070, /* Doppler unit, XT only */
+	RGX_CNTBLK_ID_BX_TU1		= 0x0071,
+	RGX_CNTBLK_ID_BX_TU2		= 0x0072,
+	RGX_CNTBLK_ID_BX_TU3		= 0x0073,
+	RGX_CNTBLK_ID_BX_TU_ALL		= 0x4070,
+
+	RGX_CNTBLK_ID_LAST			= 0x0074,
+
+	RGX_CNTBLK_ID_CUSTOM0		= 0x7FF0,
+	RGX_CNTBLK_ID_CUSTOM1		= 0x7FF1,
+	RGX_CNTBLK_ID_CUSTOM2		= 0x7FF2,
+	RGX_CNTBLK_ID_CUSTOM3		= 0x7FF3,
+	RGX_CNTBLK_ID_CUSTOM4_FW	= 0x7FF4	/* Custom block used for getting statistics held in the FW */
+
+} RGX_HWPERF_CNTBLK_ID;
+
+/* Masks for the counter block ID*/
+#define RGX_CNTBLK_ID_GROUP_MASK     (0x00F0U)
+#define RGX_CNTBLK_ID_GROUP_SHIFT    (4)
+#define RGX_CNTBLK_ID_UNIT_ALL_MASK  (0x4000U)
+#define RGX_CNTBLK_ID_UNIT_MASK		 (0xf)
+
+#define RGX_CNTBLK_INDIRECT_COUNT(_class, _n) ((RGX_CNTBLK_ID_ ## _class ## _n) - (RGX_CNTBLK_ID_ ## _class ## 0) +1)
+
+/*! The number of layout blocks defined with configurable multiplexed
+ * performance counters, hence excludes custom counter blocks.
+ */
+#define RGX_HWPERF_MAX_DEFINED_BLKS  (\
+	RGX_CNTBLK_ID_DIRECT_LAST               +\
+	RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU,     7)+\
+	RGX_CNTBLK_INDIRECT_COUNT(USC,        15)+\
+	RGX_CNTBLK_INDIRECT_COUNT(TEXAS,       7)+\
+	RGX_CNTBLK_INDIRECT_COUNT(RASTER,      3)+\
+	RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL,  3)+\
+	RGX_CNTBLK_INDIRECT_COUNT(PBE,        15)+\
+	RGX_CNTBLK_INDIRECT_COUNT(BX_TU,       3) )
+
+#define RGX_HWPERF_EVENT_MASK_VALUE(e)      (((IMG_UINT64)1)<<(e))
+
+#define RGX_CUSTOM_FW_CNTRS	\
+		X(TA_LOCAL_FL_SIZE,		0x0,	RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \
+										RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \
+										RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \
+										RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED))	\
+		X(TA_GLOBAL_FL_SIZE,	0x1,	RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \
+										RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \
+										RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \
+										RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED))	\
+		X(3D_LOCAL_FL_SIZE,		0x2,	RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \
+										RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED))	\
+		X(3D_GLOBAL_FL_SIZE,	0x3,	RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \
+										RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED))
+
+/*! Counter IDs for the firmware held statistics */
+typedef enum
+{
+#define X(ctr, id, allow_mask)	RGX_CUSTOM_FW_CNTR_##ctr = id,
+	RGX_CUSTOM_FW_CNTRS
+#undef X
+
+	/* always the last entry in the list */
+	RGX_CUSTOM_FW_CNTR_LAST
+} RGX_HWPERF_CUSTOM_FW_CNTR_ID;
+
+/*! Identifier for each counter in a performance counting module */
+typedef enum
+{
+	RGX_CNTBLK_COUNTER0_ID	  = 0,
+	RGX_CNTBLK_COUNTER1_ID	  = 1,
+	RGX_CNTBLK_COUNTER2_ID	  = 2,
+	RGX_CNTBLK_COUNTER3_ID	  = 3,
+	RGX_CNTBLK_COUNTER4_ID	  = 4,
+	RGX_CNTBLK_COUNTER5_ID	  = 5,
+	/* MAX value used in server handling of counter config arrays */
+	RGX_CNTBLK_COUNTERS_MAX
+} RGX_HWPERF_CNTBLK_COUNTER_ID;
+
+/* sets all the bits from bit _b1 to _b2, in a IMG_UINT64 type */
+#define _MASK_RANGE(_b1, _b2)	(((IMG_UINT64_C(1) << ((_b2)-(_b1)+1)) - 1) << _b1)
+#define MASK_RANGE(R)			_MASK_RANGE(R##_FIRST_TYPE, R##_LAST_TYPE)
+#define RGX_HWPERF_HOST_EVENT_MASK_VALUE(e) ((IMG_UINT32)(1<<(e)))
+
+/*! Mask macros for use with RGXCtrlHWPerf() API.
+ */
+#define RGX_HWPERF_EVENT_MASK_NONE          (IMG_UINT64_C(0x0000000000000000))
+#define RGX_HWPERF_EVENT_MASK_ALL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+
+/*! HWPerf Firmware event masks
+ * Next macro covers all FW Start/End/Debug (SED) events.
+ */
+#define RGX_HWPERF_EVENT_MASK_FW_SED    (MASK_RANGE(RGX_HWPERF_FW_EVENT_RANGE))
+
+#define RGX_HWPERF_EVENT_MASK_FW_UFO    (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO))
+#define RGX_HWPERF_EVENT_MASK_FW_CSW    (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_START) |\
+                                          RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_FINISHED))
+#define RGX_HWPERF_EVENT_MASK_ALL_FW    (RGX_HWPERF_EVENT_MASK_FW_SED |\
+                                          RGX_HWPERF_EVENT_MASK_FW_UFO |\
+                                          RGX_HWPERF_EVENT_MASK_FW_CSW)
+
+#define RGX_HWPERF_EVENT_MASK_HW_PERIODIC   (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PERIODIC))
+#define RGX_HWPERF_EVENT_MASK_HW_KICKFINISH ((MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE0) |\
+                                               MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE1)) &\
+                                              ~(RGX_HWPERF_EVENT_MASK_HW_PERIODIC))
+
+#define RGX_HWPERF_EVENT_MASK_ALL_HW        (RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |\
+                                              RGX_HWPERF_EVENT_MASK_HW_PERIODIC)
+
+#define RGX_HWPERF_EVENT_MASK_ALL_PWR_EST   (MASK_RANGE(RGX_HWPERF_PWR_EST_RANGE))
+
+#define RGX_HWPERF_EVENT_MASK_ALL_PWR       (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG) |\
+                                              RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_GPU_STATE_CHG) |\
+                                              RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG))
+
+/*! HWPerf Host event masks
+ */
+#define RGX_HWPERF_EVENT_MASK_HOST_WORK_ENQ  (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_ENQ))
+#define RGX_HWPERF_EVENT_MASK_HOST_ALL_UFO   (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_UFO))
+#define RGX_HWPERF_EVENT_MASK_HOST_ALL_PWR   (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_CLK_SYNC))
+
+
+/*! Type used in the RGX API RGXConfigureAndEnableHWPerfCounters() */
+ typedef struct _RGX_HWPERF_CONFIG_CNTBLK_
+{
+	/*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */
+	IMG_UINT16 ui16BlockID;
+
+	/*! 4 or 6 LSBs used to select counters to configure in this block. */
+	IMG_UINT8  ui8CounterSelect;
+
+	/*! 4 or 6 LSBs used as MODE bits for the counters in the group. */
+	IMG_UINT8  ui8Mode;
+
+	/*! 5 or 6 LSBs used as the GROUP_SELECT value for the counter. */
+	IMG_UINT8  aui8GroupSelect[RGX_CNTBLK_COUNTERS_MAX];
+
+	/*! 16 LSBs used as the BIT_SELECT value for the counter. */
+	IMG_UINT16 aui16BitSelect[RGX_CNTBLK_COUNTERS_MAX];
+
+	/*! 14 LSBs used as the BATCH_MAX value for the counter. */
+	IMG_UINT32 aui32BatchMax[RGX_CNTBLK_COUNTERS_MAX];
+
+	/*! 14 LSBs used as the BATCH_MIN value for the counter. */
+	IMG_UINT32 aui32BatchMin[RGX_CNTBLK_COUNTERS_MAX];
+} UNCACHED_ALIGN RGX_HWPERF_CONFIG_CNTBLK;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CONFIG_CNTBLK);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* RGX_HWPERF_H_ */
+
+/******************************************************************************
+ End of file
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_memallocflags.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_memallocflags.h
new file mode 100644
index 0000000..130eb44
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_memallocflags.h
@@ -0,0 +1,49 @@
+/**************************************************************************/ /*!
+@File
+@Title          RGX memory allocation flags
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGX_MEMALLOCFLAGS_H_
+#define _RGX_MEMALLOCFLAGS_H_
+
+#define PMMETA_PROTECT          (1 << 0)      /* Memory that only the PM and Meta can access */
+#define FIRMWARE_CACHED         (1 << 1)      /* Memory that is cached in META/MIPS */
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_meta.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_meta.h
new file mode 100644
index 0000000..c7cf319
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_meta.h
@@ -0,0 +1,456 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX META definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX META helper definitions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_META_H__)
+#define __RGX_META_H__
+
+
+/***** The META HW register definitions in the file are updated manually *****/
+
+
+#include "img_defs.h"
+#include "km/rgxdefs_km.h"
+
+
+/************************************************************************
+* META registers and MACROS 
+************************************************************************/
+#define	META_CR_CTRLREG_BASE(T)					(0x04800000 + 0x1000*(T))
+
+#define META_CR_TXPRIVEXT						(0x048000E8)
+#define META_CR_TXPRIVEXT_MINIM_EN				(0x1<<7)
+
+#define META_CR_SYSC_JTAG_THREAD				(0x04830030)
+#define META_CR_SYSC_JTAG_THREAD_PRIV_EN		(0x00000004)
+
+#define META_CR_PERF_COUNT0						(0x0480FFE0)
+#define META_CR_PERF_COUNT1						(0x0480FFE8)
+#define META_CR_PERF_COUNT_CTRL_SHIFT			(28)
+#define META_CR_PERF_COUNT_CTRL_MASK			(0xF0000000)
+#define META_CR_PERF_COUNT_CTRL_DCACHEHITS		(0x8 << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICACHEHITS		(0x9 << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICACHEMISS		(0xA << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICORE			(0xD << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_THR_SHIFT			(24)
+#define META_CR_PERF_COUNT_THR_MASK				(0x0F000000)
+#define META_CR_PERF_COUNT_THR_0				(0x1 << META_CR_PERF_COUNT_THR_SHIFT)
+#define META_CR_PERF_COUNT_THR_1				(0x2 << META_CR_PERF_COUNT_THR_1)
+
+#define META_CR_TxVECINT_BHALT					(0x04820500)
+#define META_CR_PERF_ICORE0						(0x0480FFD0)
+#define META_CR_PERF_ICORE1						(0x0480FFD8)
+#define META_CR_PERF_ICORE_DCACHEMISS			(0x8)
+
+#define META_CR_PERF_COUNT(CTRL, THR)			((META_CR_PERF_COUNT_CTRL_##CTRL << META_CR_PERF_COUNT_CTRL_SHIFT) | \
+												 (THR << META_CR_PERF_COUNT_THR_SHIFT))
+
+#define	META_CR_TXUXXRXDT_OFFSET				(META_CR_CTRLREG_BASE(0) + 0x0000FFF0)
+#define	META_CR_TXUXXRXRQ_OFFSET				(META_CR_CTRLREG_BASE(0) + 0x0000FFF8)
+
+#define META_CR_TXUXXRXRQ_DREADY_BIT			(0x80000000)	/* Poll for done */
+#define META_CR_TXUXXRXRQ_RDnWR_BIT  			(0x00010000)	/* Set for read  */
+#define META_CR_TXUXXRXRQ_TX_S       			(12)
+#define META_CR_TXUXXRXRQ_RX_S       			(4)
+#define META_CR_TXUXXRXRQ_UXX_S      			(0)
+
+#define META_CR_TXUIN_ID						(0x0)			/* Internal ctrl regs */
+#define META_CR_TXUD0_ID						(0x1)			/* Data unit regs */
+#define META_CR_TXUD1_ID						(0x2)			/* Data unit regs */
+#define META_CR_TXUA0_ID						(0x3)			/* Address unit regs */
+#define META_CR_TXUA1_ID						(0x4)			/* Address unit regs */
+#define META_CR_TXUPC_ID						(0x5)			/* PC registers */
+
+/* Macros to calculate register access values */
+#define META_CR_CORE_REG(Thr, RegNum, Unit)	(((Thr)			<< META_CR_TXUXXRXRQ_TX_S ) | \
+											 ((RegNum)		<< META_CR_TXUXXRXRQ_RX_S ) | \
+											 ((Unit)		<< META_CR_TXUXXRXRQ_UXX_S))
+
+#define META_CR_THR0_PC		META_CR_CORE_REG(0, 0, META_CR_TXUPC_ID)
+#define META_CR_THR0_PCX	META_CR_CORE_REG(0, 1, META_CR_TXUPC_ID)
+#define META_CR_THR0_SP		META_CR_CORE_REG(0, 0, META_CR_TXUA0_ID)
+
+#define META_CR_THR1_PC		META_CR_CORE_REG(1, 0, META_CR_TXUPC_ID)
+#define META_CR_THR1_PCX	META_CR_CORE_REG(1, 1, META_CR_TXUPC_ID)
+#define META_CR_THR1_SP		META_CR_CORE_REG(1, 0, META_CR_TXUA0_ID)
+
+#define SP_ACCESS(Thread)	META_CR_CORE_REG(Thread, 0, META_CR_TXUA0_ID)
+#define PC_ACCESS(Thread)	META_CR_CORE_REG(Thread, 0, META_CR_TXUPC_ID)
+
+#define	META_CR_COREREG_ENABLE			(0x0000000)
+#define	META_CR_COREREG_STATUS			(0x0000010)
+#define	META_CR_COREREG_DEFR			(0x00000A0)
+#define	META_CR_COREREG_PRIVEXT			(0x00000E8)
+
+#define	META_CR_T0ENABLE_OFFSET			(META_CR_CTRLREG_BASE(0) + META_CR_COREREG_ENABLE)
+#define	META_CR_T0STATUS_OFFSET			(META_CR_CTRLREG_BASE(0) + META_CR_COREREG_STATUS)
+#define	META_CR_T0DEFR_OFFSET			(META_CR_CTRLREG_BASE(0) + META_CR_COREREG_DEFR)
+#define	META_CR_T0PRIVEXT_OFFSET		(META_CR_CTRLREG_BASE(0) + META_CR_COREREG_PRIVEXT)
+
+#define	META_CR_T1ENABLE_OFFSET			(META_CR_CTRLREG_BASE(1) + META_CR_COREREG_ENABLE)
+#define	META_CR_T1STATUS_OFFSET			(META_CR_CTRLREG_BASE(1) + META_CR_COREREG_STATUS)
+#define	META_CR_T1DEFR_OFFSET			(META_CR_CTRLREG_BASE(1) + META_CR_COREREG_DEFR)
+#define	META_CR_T1PRIVEXT_OFFSET		(META_CR_CTRLREG_BASE(1) + META_CR_COREREG_PRIVEXT)
+
+#define META_CR_TXENABLE_ENABLE_BIT		(0x00000001)   /* Set if running */
+#define META_CR_TXSTATUS_PRIV			(0x00020000)
+#define META_CR_TXPRIVEXT_MINIM			(0x00000080)
+
+#define META_MEM_GLOBAL_RANGE_BIT				(0x80000000)
+
+
+/************************************************************************
+* META LDR Format
+************************************************************************/
+/* Block header structure */
+typedef struct 
+{
+	IMG_UINT32	ui32DevID;
+	IMG_UINT32	ui32SLCode;
+	IMG_UINT32	ui32SLData;
+	IMG_UINT16	ui16PLCtrl;
+	IMG_UINT16	ui16CRC;
+
+} RGX_META_LDR_BLOCK_HDR;
+
+/* High level data stream block  structure */
+typedef struct 
+{
+	IMG_UINT16	ui16Cmd;
+	IMG_UINT16	ui16Length;
+	IMG_UINT32	ui32Next;
+	IMG_UINT32	aui32CmdData[4];
+
+} RGX_META_LDR_L1_DATA_BLK;
+
+/* High level data stream block  structure */
+typedef struct
+{
+	IMG_UINT16	ui16Tag;
+	IMG_UINT16	ui16Length;
+	IMG_UINT32	aui32BlockData[4];
+
+} RGX_META_LDR_L2_DATA_BLK;
+
+/* Config command structure */
+typedef struct
+{
+	IMG_UINT32	ui32Type;
+	IMG_UINT32	aui32BlockData[4];
+
+} RGX_META_LDR_CFG_BLK;
+
+/* Block type definitions */
+#define RGX_META_LDR_COMMENT_TYPE_MASK			(0x0010)
+#define RGX_META_LDR_BLK_IS_COMMENT(X)			((X & RGX_META_LDR_COMMENT_TYPE_MASK) != 0)
+
+/* Command definitions
+	Value	Name			Description
+	0		LoadMem			Load memory with binary data.
+	1		LoadCore		Load a set of core registers.
+	2		LoadMMReg		Load a set of memory mapped registers.
+	3		StartThreads	Set each thread PC and SP, then enable	threads.
+	4		ZeroMem			Zeros a memory region.
+	5		Config			Perform	a configuration command. */
+#define RGX_META_LDR_CMD_MASK				(0x000F)
+
+#define RGX_META_LDR_CMD_LOADMEM			(0x0000)
+#define RGX_META_LDR_CMD_LOADCORE			(0x0001)
+#define RGX_META_LDR_CMD_LOADMMREG			(0x0002)
+#define RGX_META_LDR_CMD_START_THREADS		(0x0003)
+#define RGX_META_LDR_CMD_ZEROMEM			(0x0004)
+#define RGX_META_LDR_CMD_CONFIG			(0x0005)
+
+/* Config Command definitions
+	Value	Name		Description
+	0		Pause		Pause for x times 100 instructions
+	1		Read		Read a value from register - No value return needed.
+						Utilises effects of issuing reads to certain registers
+	2		Write		Write to mem location
+	3		MemSet		Set mem to value
+	4		MemCheck	check mem for specific value.*/
+#define RGX_META_LDR_CFG_PAUSE			(0x0000)
+#define RGX_META_LDR_CFG_READ			(0x0001)
+#define RGX_META_LDR_CFG_WRITE			(0x0002)
+#define RGX_META_LDR_CFG_MEMSET			(0x0003)
+#define RGX_META_LDR_CFG_MEMCHECK		(0x0004)
+
+
+/************************************************************************
+* RGX FW segmented MMU definitions
+************************************************************************/
+/* All threads can access the segment */
+#define RGXFW_SEGMMU_ALLTHRS	(0xf << 8)
+/* Writable */
+#define RGXFW_SEGMMU_WRITEABLE	(0x1 << 1)
+/* All threads can access and writable */
+#define RGXFW_SEGMMU_ALLTHRS_WRITEABLE	(RGXFW_SEGMMU_ALLTHRS | RGXFW_SEGMMU_WRITEABLE)
+
+/* Direct map region 11 used for mapping GPU memory */
+#define RGXFW_SEGMMU_DMAP_GPU_ID			(11)
+#define RGXFW_SEGMMU_DMAP_GPU_ADDR_START	(0x07800000U)
+
+/* Segment IDs */
+#define RGXFW_SEGMMU_DATA_ID			(1)
+#define RGXFW_SEGMMU_BOOTLDR_ID			(2)
+#define RGXFW_SEGMMU_TEXT_ID			(RGXFW_SEGMMU_BOOTLDR_ID)
+
+#define RGXFW_SEGMMU_META_DM_ID			(0x7)
+
+
+/*
+ * SLC caching strategy in S7 is emitted through the segment MMU. All the segments
+ * configured through the macro RGXFW_SEGMMU_OUTADDR_TOP are CACHED in the SLC.
+ * The interface has been kept the same to simplify the code changes.
+ * The bifdm argument is ignored (no longer relevant) in S7.
+ */
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_ERN_49144(pers, coheren, mmu_ctx)  ( (((IMG_UINT64) ((pers)    & 0x3))  << 50) | \
+                                                                         (((IMG_UINT64) ((mmu_ctx) & 0xFF)) << 42) | \
+                                                                         (((IMG_UINT64) ((coheren) & 0x1))  << 40) )
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED_ERN_49144(mmu_ctx)      RGXFW_SEGMMU_OUTADDR_TOP_S7_ERN_49144(0x3, 0x0, mmu_ctx)
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED_ERN_49144(mmu_ctx)    RGXFW_SEGMMU_OUTADDR_TOP_S7_ERN_49144(0x0, 0x1, mmu_ctx)
+/* Set FW code/data cached in the SLC as default */
+#define RGXFW_SEGMMU_OUTADDR_TOP_ERN_49144(mmu_ctx, bifdm)             RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED_ERN_49144(mmu_ctx | (bifdm&0x0))
+
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_ERN_45914(pers, coheren, mmu_ctx)  ( (((IMG_UINT64) ((pers)    & 0x3))  << 52) | \
+                                                                         (((IMG_UINT64) ((mmu_ctx) & 0xFF)) << 44) | \
+                                                                         (((IMG_UINT64) ((coheren) & 0x1))  << 40) )
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED_ERN_45914(mmu_ctx)      RGXFW_SEGMMU_OUTADDR_TOP_S7_ERN_45914(0x3, 0x0, mmu_ctx)
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED_ERN_45914(mmu_ctx)    RGXFW_SEGMMU_OUTADDR_TOP_S7_ERN_45914(0x0, 0x1, mmu_ctx)
+/* Set FW code/data cached in the SLC as default */
+#define RGXFW_SEGMMU_OUTADDR_TOP_ERN_45914(mmu_ctx, bifdm)             RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED_ERN_45914(mmu_ctx | (bifdm&0x0))
+
+/* To configure the Page Catalog and BIF-DM fed into the BIF for Garten accesses through this segment */
+#define RGXFW_SEGMMU_OUTADDR_TOP_PRE_S7(pc, bifdm)              ( (((IMG_UINT64) ((pc)    & 0xF)) << 44) | \
+                                                                  (((IMG_UINT64) ((bifdm) & 0xF)) << 40) )
+
+#if !defined(__KERNEL__) && defined(RGX_FEATURE_META)
+#if defined(HW_ERN_49144)
+#define RGXFW_SEGMMU_OUTADDR_TOP                  RGXFW_SEGMMU_OUTADDR_TOP_ERN_49144
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED  RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED_ERN_49144
+#elif defined(HW_ERN_45914)
+#define RGXFW_SEGMMU_OUTADDR_TOP                  RGXFW_SEGMMU_OUTADDR_TOP_ERN_45914
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED  RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED_ERN_45914
+#else
+#define RGXFW_SEGMMU_OUTADDR_TOP                  RGXFW_SEGMMU_OUTADDR_TOP_PRE_S7
+#endif
+#endif
+
+
+/* META segments have 4kB minimum size */
+#define RGXFW_SEGMMU_ALIGN			(0x1000) 
+
+/* Segmented MMU registers (n = segment id) */
+#define META_CR_MMCU_SEGMENTn_BASE(n)			(0x04850000 + (n)*0x10)
+#define META_CR_MMCU_SEGMENTn_LIMIT(n)			(0x04850004 + (n)*0x10)
+#define META_CR_MMCU_SEGMENTn_OUTA0(n)			(0x04850008 + (n)*0x10)
+#define META_CR_MMCU_SEGMENTn_OUTA1(n)			(0x0485000C + (n)*0x10)
+
+/* The following defines must be recalculated if the Meta MMU segments
+ * used to access Host-FW data are changed
+ * Current combinations are:
+ * - SLC uncached, META cached,   FW base address 0x70000000
+ * - SLC uncached, META uncached, FW base address 0xF0000000
+ * - SLC cached,   META cached,   FW base address 0x10000000
+ * - SLC cached,   META uncached, FW base address 0x90000000
+ */
+#define RGXFW_SEGMMU_DATA_BASE_ADDRESS        (0x10000000)
+#define RGXFW_SEGMMU_DATA_META_CACHED         (0x0)
+#define RGXFW_SEGMMU_DATA_META_UNCACHED       (META_MEM_GLOBAL_RANGE_BIT) // 0x80000000
+#define RGXFW_SEGMMU_DATA_META_CACHE_MASK     (META_MEM_GLOBAL_RANGE_BIT)
+/* For non-VIVT SLCs the cacheability of the FW data in the SLC is selected
+ * in the PTEs for the FW data, not in the Meta Segment MMU,
+ * which means these defines have no real effect in those cases */
+#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED     (0x0)
+#define RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED   (0x60000000)
+#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK (0x60000000)
+
+
+/************************************************************************
+* RGX FW RGX MMU definitions
+************************************************************************/
+#if defined(RGX_FEATURE_SLC_VIVT) && defined(SUPPORT_TRUSTED_DEVICE)
+
+#define META_MMU_CONTEXT_MAPPING        (0x1) /* fw data */
+#define META_MMU_CONTEXT_MAPPING_CODE   (0x0) /* fw code */
+
+#else 
+
+#define META_MMU_CONTEXT_MAPPING       (0x0)
+
+#endif 
+
+/************************************************************************
+* RGX FW Bootloader defaults
+************************************************************************/
+#define RGXFW_BOOTLDR_META_ADDR		(0x40000000)
+#define RGXFW_BOOTLDR_DEVV_ADDR_0	(0xC0000000)
+#define RGXFW_BOOTLDR_DEVV_ADDR_1	(0x000000E1)
+#define RGXFW_BOOTLDR_DEVV_ADDR		((((IMG_UINT64) RGXFW_BOOTLDR_DEVV_ADDR_1) << 32) | RGXFW_BOOTLDR_DEVV_ADDR_0)
+#define RGXFW_BOOTLDR_LIMIT			(0x1FFFF000)
+
+/* Bootloader configuration offset is in dwords (512 bytes) */
+#define RGXFW_BOOTLDR_CONF_OFFSET	(0x80)
+
+
+/************************************************************************
+* RGX META Stack
+************************************************************************/
+#define RGX_META_STACK_SIZE  (0x1000)
+
+/************************************************************************
+ RGX META Core memory
+ ====================
+ Sections:
+    * Stack: Thread internal stack
+	* BSS: Internal/private FW memory (rgxfw_ctl.h and static vars)
+	* CCB Buf: DMA buffer to request CCB data
+	* Code: Functions marked with RGXFW_COREMEM_CODE_<xx>
+	
+   +---------+  0
+   |         |
+   |  Stack  |
+   |         |
+   +---------+- RGX_META_COREMEM_2ND_STACK_ADDR
+   *         *
+   * 2nd Thr * #if RGXFW_META_SUPPORT_2ND_THREAD
+   *  Stack  *    
+   *         *
+   +---------+- RGX_META_COREMEM_BSS_ADDR
+   |         |
+   |   BSS   |
+   |         |
+   +---------+- RGX_META_COREMEM_CCBBUF_ADDR
+   *         *
+   * CCB Buf * #if RGX_FEATURE_META_DMA
+   *         *
+   +---------+- RGX_META_COREMEM_DATA_SIZE
+   |         |
+   |  Code   |
+   |         |
+   +---------+
+   /         /
+   / Unused  /
+   /         /
+   +---------+  RGX_META_COREMEM_SIZE
+************************************************************************/
+/* code and data both map to the same physical memory */
+#define RGX_META_COREMEM_CODE_ADDR   (0x80000000)
+#define RGX_META_COREMEM_DATA_ADDR   (0x82000000)
+
+#define RGX_META_COREMEM_STACK_ADDR      (RGX_META_COREMEM_DATA_ADDR)
+
+#if defined(RGXFW_META_SUPPORT_2ND_THREAD)
+	#define RGX_META_COREMEM_STACK_SIZE      (RGX_META_STACK_SIZE*2)
+	#define RGX_META_COREMEM_BSS_SIZE        (0xF40)
+	#define RGX_META_COREMEM_2ND_STACK_ADDR  (RGX_META_COREMEM_STACK_ADDR + RGX_META_STACK_SIZE)
+#else
+	#define RGX_META_COREMEM_STACK_SIZE      (RGX_META_STACK_SIZE)
+	#define RGX_META_COREMEM_BSS_SIZE        (0xE00)
+#endif
+
+#define RGX_META_COREMEM_BSS_ADDR            (RGX_META_COREMEM_STACK_ADDR + RGX_META_COREMEM_STACK_SIZE)
+
+#if defined(RGX_FEATURE_META_DMA)
+	#define RGX_META_COREMEM_CCBBUF_ADDR     (RGX_META_COREMEM_BSS_ADDR + RGX_META_COREMEM_BSS_SIZE)
+	#define RGX_META_COREMEM_CCBBUF_SIZE     (0x3C0)
+#else
+	#define RGX_META_COREMEM_CCBBUF_SIZE     (0x0)
+#endif
+
+#define RGX_META_COREMEM_DATA_SIZE       (RGX_META_COREMEM_STACK_SIZE + RGX_META_COREMEM_BSS_SIZE + RGX_META_COREMEM_CCBBUF_SIZE)
+
+#if defined (RGX_META_COREMEM_CODE)
+#define RGX_META_COREMEM_CODE_SIZE   (RGX_META_COREMEM_SIZE - RGX_META_COREMEM_DATA_SIZE)
+#endif
+
+/* because data and code share the same memory, base address for code is offset by the data */
+#define RGX_META_COREMEM_CODE_BADDR  (RGX_META_COREMEM_CODE_ADDR + RGX_META_COREMEM_DATA_SIZE)
+
+#define RGX_META_IS_COREMEM_CODE(A, B)  (((A) >= RGX_META_COREMEM_CODE_ADDR) && ((A) < (RGX_META_COREMEM_CODE_ADDR + (B))))
+#define RGX_META_IS_COREMEM_DATA(A, B)  (((A) >= RGX_META_COREMEM_DATA_ADDR) && ((A) < (RGX_META_COREMEM_DATA_ADDR + (B))))
+
+/************************************************************************
+* 2nd thread
+************************************************************************/
+#define RGXFW_THR1_PC		(0x18930000)
+#define RGXFW_THR1_SP		(0x78890000)
+
+/************************************************************************
+* META compatibility
+************************************************************************/
+
+#define META_CR_CORE_ID			(0x04831000)
+#define META_CR_CORE_ID_VER_SHIFT	(16U)
+#define META_CR_CORE_ID_VER_CLRMSK	(0XFF00FFFFU)
+
+#if !defined(__KERNEL__) && defined(RGX_FEATURE_META)
+
+	#if (RGX_FEATURE_META == MTP218)
+	#define RGX_CR_META_CORE_ID_VALUE 0x19
+	#elif (RGX_FEATURE_META == MTP219)
+	#define RGX_CR_META_CORE_ID_VALUE 0x1E
+	#elif (RGX_FEATURE_META == LTP218)
+	#define RGX_CR_META_CORE_ID_VALUE 0x1C
+	#elif (RGX_FEATURE_META == LTP217)
+	#define RGX_CR_META_CORE_ID_VALUE 0x1F
+	#else
+	#error "Unknown META ID"
+	#endif
+#else
+
+	#define RGX_CR_META_MTP218_CORE_ID_VALUE 0x19
+	#define RGX_CR_META_MTP219_CORE_ID_VALUE 0x1E
+	#define RGX_CR_META_LTP218_CORE_ID_VALUE 0x1C
+	#define RGX_CR_META_LTP217_CORE_ID_VALUE 0x1F
+
+#endif
+#define RGXFW_PROCESSOR_META        "META"
+
+
+#endif /*  __RGX_META_H__ */
+
+/******************************************************************************
+ End of file (rgx_meta.h)
+******************************************************************************/
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_mips.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_mips.h
new file mode 100644
index 0000000..f2c3e43
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_mips.h
@@ -0,0 +1,456 @@
+/*************************************************************************/ /*!
+@File           rgx_mips.h
+@Title
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Platform       RGX
+@Description    RGX MIPS definitions, user space
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_MIPS_H__)
+#define __RGX_MIPS_H__
+
+/*
+ * Utility defines for memory management
+ */
+#define RGXMIPSFW_LOG2_PAGE_SIZE                 (12)
+#define RGXMIPSFW_LOG2_PAGE_SIZE_64K             (16)
+#define RGXMIPSFW_PAGE_SIZE                      (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_PAGE_MASK                      (RGXMIPSFW_PAGE_SIZE - 1)
+#define RGXMIPSFW_LOG2_PAGETABLE_PAGE_SIZE       (15)
+#define RGXMIPSFW_LOG2_PTE_ENTRY_SIZE            (2)
+/* Page mask MIPS register setting for bigger pages */
+#define RGXMIPSFW_PAGE_MASK_16K                  (0x00007800)
+#define RGXMIPSFW_PAGE_MASK_64K                  (0x0001F800)
+/* Total number of TLB entries */
+#define RGXMIPSFW_NUMBER_OF_TLB_ENTRIES          (16)
+/* "Uncached" caching policy */
+#define RGXMIPSFW_UNCACHED_CACHE_POLICY          (0X00000002)
+/* "Write-back write-allocate" caching policy */
+#define RGXMIPSFW_WRITEBACK_CACHE_POLICY         (0X00000003)
+/* "Write-through no write-allocate" caching policy */
+#define RGXMIPSFW_WRITETHROUGH_CACHE_POLICY      (0X00000001)
+/* Cached policy used by MIPS in case of physical bus on 32 bit */
+#define RGXMIPSFW_CACHED_POLICY                  (RGXMIPSFW_WRITEBACK_CACHE_POLICY)
+/* Cached policy used by MIPS in case of physical bus on more than 32 bit */
+#define RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT      (RGXMIPSFW_WRITETHROUGH_CACHE_POLICY)
+
+
+/*
+ * MIPS EntryLo/PTE format
+ */
+
+#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_SHIFT     (31U)
+#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_CLRMSK    (0X7FFFFFFF)
+#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN        (0X80000000)
+
+#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_SHIFT     (30U)
+#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_CLRMSK    (0XBFFFFFFF)
+#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_EN        (0X40000000)
+
+/* Page Frame Number */
+#define RGXMIPSFW_ENTRYLO_PFN_SHIFT              (6)
+#define RGXMIPSFW_ENTRYLO_PFN_ALIGNSHIFT         (12)
+/* Mask used for the MIPS Page Table in case of physical bus on 32 bit */
+#define RGXMIPSFW_ENTRYLO_PFN_MASK               (0x03FFFFC0)
+#define RGXMIPSFW_ENTRYLO_PFN_SIZE               (20)
+/* Mask used for the MIPS Page Table in case of physical bus on more than 32 bit */
+#define RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT   (0x3FFFFFC0)
+#define RGXMIPSFW_ENTRYLO_PFN_SIZE_ABOVE_32BIT   (24)
+#define RGXMIPSFW_ADDR_TO_ENTRYLO_PFN_RSHIFT     (RGXMIPSFW_ENTRYLO_PFN_ALIGNSHIFT - \
+                                                  RGXMIPSFW_ENTRYLO_PFN_SHIFT)
+
+#define RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT     (3U)
+#define RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK    (0XFFFFFFC7)
+
+#define RGXMIPSFW_ENTRYLO_DIRTY_SHIFT            (2U)
+#define RGXMIPSFW_ENTRYLO_DIRTY_CLRMSK           (0XFFFFFFFB)
+#define RGXMIPSFW_ENTRYLO_DIRTY_EN               (0X00000004)
+
+#define RGXMIPSFW_ENTRYLO_VALID_SHIFT            (1U)
+#define RGXMIPSFW_ENTRYLO_VALID_CLRMSK           (0XFFFFFFFD)
+#define RGXMIPSFW_ENTRYLO_VALID_EN               (0X00000002)
+
+#define RGXMIPSFW_ENTRYLO_GLOBAL_SHIFT           (0U)
+#define RGXMIPSFW_ENTRYLO_GLOBAL_CLRMSK          (0XFFFFFFFE)
+#define RGXMIPSFW_ENTRYLO_GLOBAL_EN              (0X00000001)
+
+#define RGXMIPSFW_ENTRYLO_DVG                    (RGXMIPSFW_ENTRYLO_DIRTY_EN | \
+                                                  RGXMIPSFW_ENTRYLO_VALID_EN | \
+                                                  RGXMIPSFW_ENTRYLO_GLOBAL_EN)
+#define RGXMIPSFW_ENTRYLO_UNCACHED               (RGXMIPSFW_UNCACHED_CACHE_POLICY << \
+                                                  RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT)
+#define RGXMIPSFW_ENTRYLO_DVG_UNCACHED           (RGXMIPSFW_ENTRYLO_DVG | RGXMIPSFW_ENTRYLO_UNCACHED)
+
+
+/* Remap Range Config Addr Out */
+/* These defines refer to the upper half of the Remap Range Config register */
+#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_MASK      (0x0FFFFFF0)
+#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT     (4)  /* wrt upper half of the register */
+#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT (12)
+#define RGXMIPSFW_ADDR_TO_RR_ADDR_OUT_RSHIFT     (RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT - \
+                                                  RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT)
+
+
+/*
+ * Firmware physical layout
+ */
+#define RGXMIPSFW_CODE_BASE_PAGE                 (0x0)
+#define RGXMIPSFW_CODE_OFFSET                    (RGXMIPSFW_CODE_BASE_PAGE << RGXMIPSFW_LOG2_PAGE_SIZE)
+#if defined(SUPPORT_TRUSTED_DEVICE)
+/* Clean way of getting a 256K allocation (62 + 1 + 1 pages) without using too many ifdefs */
+/* This will need to be changed if the non-secure builds reach this amount of pages */
+#define RGXMIPSFW_CODE_NUMPAGES                  (62)
+#else
+#define RGXMIPSFW_CODE_NUMPAGES                  (42)
+#endif
+#define RGXMIPSFW_CODE_SIZE                      (RGXMIPSFW_CODE_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+
+#define RGXMIPSFW_EXCEPTIONSVECTORS_BASE_PAGE    (RGXMIPSFW_CODE_BASE_PAGE + RGXMIPSFW_CODE_NUMPAGES)
+#define RGXMIPSFW_EXCEPTIONSVECTORS_OFFSET       (RGXMIPSFW_EXCEPTIONSVECTORS_BASE_PAGE << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_EXCEPTIONSVECTORS_NUMPAGES     (1)
+#define RGXMIPSFW_EXCEPTIONSVECTORS_SIZE         (RGXMIPSFW_EXCEPTIONSVECTORS_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+
+#define RGXMIPSFW_BOOT_NMI_CODE_BASE_PAGE        (RGXMIPSFW_EXCEPTIONSVECTORS_BASE_PAGE + RGXMIPSFW_EXCEPTIONSVECTORS_NUMPAGES)
+#define RGXMIPSFW_BOOT_NMI_CODE_OFFSET           (RGXMIPSFW_BOOT_NMI_CODE_BASE_PAGE << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_BOOT_NMI_CODE_NUMPAGES         (1)
+#define RGXMIPSFW_BOOT_NMI_CODE_SIZE             (RGXMIPSFW_BOOT_NMI_CODE_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+
+
+#define RGXMIPSFW_DATA_BASE_PAGE                 (0x0)
+#define RGXMIPSFW_DATA_OFFSET                    (RGXMIPSFW_DATA_BASE_PAGE << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_DATA_NUMPAGES                  (17)
+#define RGXMIPSFW_DATA_SIZE                      (RGXMIPSFW_DATA_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+
+#define RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE        (RGXMIPSFW_DATA_BASE_PAGE + RGXMIPSFW_DATA_NUMPAGES)
+#define RGXMIPSFW_BOOT_NMI_DATA_OFFSET           (RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_BOOT_NMI_DATA_NUMPAGES         (1)
+#define RGXMIPSFW_BOOT_NMI_DATA_SIZE             (RGXMIPSFW_BOOT_NMI_DATA_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+
+#define RGXMIPSFW_STACK_BASE_PAGE                (RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE + RGXMIPSFW_BOOT_NMI_DATA_NUMPAGES)
+#define RGXMIPSFW_STACK_OFFSET                   (RGXMIPSFW_STACK_BASE_PAGE << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_STACK_NUMPAGES                 (1)
+#define RGXMIPSFW_STACK_SIZE                     (RGXMIPSFW_STACK_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+
+/*
+ * Pages to trampoline problematic physical addresses:
+ *   - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN : 0x1FC0_0000
+ *   - RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN : 0x1FC0_1000
+ *   - RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN : 0x1FC0_2000
+ *   - (benign trampoline)               : 0x1FC0_3000
+ * that would otherwise be erroneously remapped by the MIPS wrapper
+ * (see "Firmware virtual layout and remap configuration" section below)
+ */
+
+#define RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES       (2)
+#define RGXMIPSFW_TRAMPOLINE_NUMPAGES            (1 << RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES)
+#define RGXMIPSFW_TRAMPOLINE_SIZE                (RGXMIPSFW_TRAMPOLINE_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE   (RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES + RGXMIPSFW_LOG2_PAGE_SIZE)
+
+#define RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR    (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN)
+#define RGXMIPSFW_TRAMPOLINE_OFFSET(a)           (a - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN)
+
+#define RGXMIPSFW_SENSITIVE_ADDR(a)              (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN == (~((1<<RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE)-1) & a))
+
+/*
+ * Firmware virtual layout and remap configuration
+ */
+/*
+ * For each remap region we define:
+ * - the virtual base used by the Firmware to access code/data through that region
+ * - the microAptivAP physical address correspondent to the virtual base address,
+ *   used as input address and remapped to the actual physical address
+ * - log2 of size of the region remapped by the MIPS wrapper, i.e. number of bits from
+ *   the bottom of the base input address that survive onto the output address
+ *   (this defines both the alignment and the maximum size of the remapped region)
+ * - one or more code/data segments within the remapped region
+ */
+
+/* Boot remap setup */
+#define RGXMIPSFW_BOOT_REMAP_VIRTUAL_BASE        (0xBFC00000)
+#define RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN        (0x1FC00000)
+#define RGXMIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE   (12)
+#define RGXMIPSFW_BOOT_NMI_CODE_VIRTUAL_BASE     (RGXMIPSFW_BOOT_REMAP_VIRTUAL_BASE)
+
+/* Data remap setup */
+#define RGXMIPSFW_DATA_REMAP_VIRTUAL_BASE        (0xBFC01000)
+#define RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN        (0x1FC01000)
+#define RGXMIPSFW_DATA_REMAP_LOG2_SEGMENT_SIZE   (12)
+#define RGXMIPSFW_BOOT_NMI_DATA_VIRTUAL_BASE     (RGXMIPSFW_DATA_REMAP_VIRTUAL_BASE)
+
+/* Code remap setup */
+#define RGXMIPSFW_CODE_REMAP_VIRTUAL_BASE        (0x9FC02000)
+#define RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN        (0x1FC02000)
+#define RGXMIPSFW_CODE_REMAP_LOG2_SEGMENT_SIZE   (12)
+#define RGXMIPSFW_EXCEPTIONS_VIRTUAL_BASE        (RGXMIPSFW_CODE_REMAP_VIRTUAL_BASE)
+
+/* Fixed TLB setup */
+#define RGXMIPSFW_PT_VIRTUAL_BASE                (0xCF000000)
+#define RGXMIPSFW_REGISTERS_VIRTUAL_BASE         (0xCF400000)
+#define RGXMIPSFW_STACK_VIRTUAL_BASE             (0xCF600000)
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+/* The extra fixed TLB entries are used in security builds for the FW code */
+#define RGXMIPSFW_NUMBER_OF_RESERVED_TLB         (5)
+#else
+#define RGXMIPSFW_NUMBER_OF_RESERVED_TLB         (3)
+#endif
+
+/* Firmware heap setup */
+#define RGXMIPSFW_FIRMWARE_HEAP_BASE             (0xC0000000)
+#define RGXMIPSFW_CODE_VIRTUAL_BASE              (RGXMIPSFW_FIRMWARE_HEAP_BASE)
+/* The data virtual base takes into account the exception vectors page
+ * and the boot code page mapped in the FW heap together with the FW code
+ * (we can only map Firmware code allocation as a whole) */
+#define RGXMIPSFW_DATA_VIRTUAL_BASE              (RGXMIPSFW_CODE_VIRTUAL_BASE + RGXMIPSFW_CODE_SIZE + \
+                                                  RGXMIPSFW_EXCEPTIONSVECTORS_SIZE + RGXMIPSFW_BOOT_NMI_CODE_SIZE)
+
+
+/*
+ * Bootloader configuration data
+ */
+/* Bootloader configuration offset within the bootloader/NMI data page */
+#define RGXMIPSFW_BOOTLDR_CONF_OFFSET                         (0x0)
+/* Offsets of bootloader configuration parameters in 64-bit words */
+#define RGXMIPSFW_ROGUE_REGS_BASE_PHYADDR_OFFSET              (0x0)
+#define RGXMIPSFW_PAGE_TABLE_BASE_PHYADDR_OFFSET              (0x1)
+#define RGXMIPSFW_STACKPOINTER_PHYADDR_OFFSET                 (0x2)
+#define RGXMIPSFW_RESERVED_FUTURE_OFFSET                      (0x3)
+#define RGXMIPSFW_FWINIT_VIRTADDR_OFFSET                      (0x4)
+
+/*
+ * MIPS Fence offset in the bootloader/NMI data page
+ */
+#define RGXMIPSFW_FENCE_OFFSET                                (0x80)
+
+/*
+ * NMI shared data
+ */
+/* Base address of the shared data within the bootloader/NMI data page */
+#define RGXMIPSFW_NMI_SHARED_DATA_BASE                        (0x100)
+/* Size used by Debug dump data */
+#define RGXMIPSFW_NMI_SHARED_SIZE                             (0x128)
+/* Offsets in the NMI shared area in 32-bit words */
+#define RGXMIPSFW_NMI_SYNC_FLAG_OFFSET                        (0x0)
+#define RGXMIPSFW_NMI_STATE_OFFSET                            (0x1)
+
+/*
+ * MIPS fault data
+ */
+/* Base address of the fault data within the bootloader/NMI data page */
+#define RGXMIPSFW_FAULT_DATA_BASE                             (0x280)
+
+/* The things that follow are excluded when compiling assembly sources*/
+#if !defined (RGXMIPSFW_ASSEMBLY_CODE)
+#include "img_types.h"
+#include "km/rgxdefs_km.h"
+
+#define RGXMIPSFW_GET_OFFSET_IN_DWORDS(offset)                (offset / sizeof(IMG_UINT32))
+#define RGXMIPSFW_GET_OFFSET_IN_QWORDS(offset)                (offset / sizeof(IMG_UINT64))
+
+/* Used for compatibility checks */
+#define RGXMIPSFW_ARCHTYPE_VER_CLRMSK                         (0xFFFFE3FFU)
+#define RGXMIPSFW_ARCHTYPE_VER_SHIFT                          (10U)
+#define RGXMIPSFW_CORE_ID_VALUE                               (0x001U)
+#define RGXFW_PROCESSOR_MIPS		                          "MIPS"
+
+/* microAptivAP cache line size */
+#define RGXMIPSFW_MICROAPTIVEAP_CACHELINE_SIZE                (16U)
+
+/* The SOCIF transactions are identified with the top 16 bits of the physical address emitted by the MIPS */
+#define RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN           (16U)
+
+/* Values to put in the MIPS selectors for performance counters*/
+#define RGXMIPSFW_PERF_COUNT_CTRL_ICACHE_ACCESSES_C0          (9U)   /* Icache accesses in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_ICACHE_MISSES_C1            (9U)   /* Icache misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_DCACHE_ACCESSES_C0          (10U)  /* Dcache accesses in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_DCACHE_MISSES_C1            (11U) /* Dcache misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_ITLB_INSTR_ACCESSES_C0      (5U)  /* ITLB instruction accesses in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_JTLB_INSTR_MISSES_C1        (7U)  /* JTLB instruction accesses misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_INSTR_COMPLETED_C0          (1U)  /* Instructions completed in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_JTLB_DATA_MISSES_C1         (8U)  /* JTLB data misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_EVENT_SHIFT                 (5U)  /* Shift for the Event field in the MIPS perf ctrl registers */
+/* Additional flags for performance counters. See MIPS manual for further reference*/
+#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_USER_MODE             (8U)
+#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_KERNEL_MODE           (2U)
+#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_EXL                   (1U)
+
+
+#define RGXMIPSFW_C0_NBHWIRQ	8
+
+/* Macros to decode C0_Cause register */
+#define RGXMIPSFW_C0_CAUSE_EXCCODE(CAUSE)       (((CAUSE) & 0x7c) >> 2)
+/* Use only when Coprocessor Unusable exception */
+#define RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(CAUSE) (((CAUSE) >> 28) & 0x3)
+#define RGXMIPSFW_C0_CAUSE_PENDING_HWIRQ(CAUSE) (((CAUSE) & 0x3fc00) >> 10)
+#define RGXMIPSFW_C0_CAUSE_FDCIPENDING          (1 << 21)
+#define RGXMIPSFW_C0_CAUSE_IV                   (1 << 23)
+#define RGXMIPSFW_C0_CAUSE_IC                   (1 << 25)
+#define RGXMIPSFW_C0_CAUSE_PCIPENDING           (1 << 26)
+#define RGXMIPSFW_C0_CAUSE_TIPENDING            (1 << 30)
+
+/* Macros to decode C0_Debug register */
+#define RGXMIPSFW_C0_DEBUG_EXCCODE(DEBUG) (((DEBUG) >> 10) & 0x1f)
+#define RGXMIPSFW_C0_DEBUG_DSS            (1 << 0)
+#define RGXMIPSFW_C0_DEBUG_DBP            (1 << 1)
+#define RGXMIPSFW_C0_DEBUG_DDBL           (1 << 2)
+#define RGXMIPSFW_C0_DEBUG_DDBS           (1 << 3)
+#define RGXMIPSFW_C0_DEBUG_DIB            (1 << 4)
+#define RGXMIPSFW_C0_DEBUG_DINT           (1 << 5)
+#define RGXMIPSFW_C0_DEBUG_DIBIMPR        (1 << 6)
+#define RGXMIPSFW_C0_DEBUG_DDBLIMPR       (1 << 18)
+#define RGXMIPSFW_C0_DEBUG_DDBSIMPR       (1 << 19)
+#define RGXMIPSFW_C0_DEBUG_IEXI           (1 << 20)
+#define RGXMIPSFW_C0_DEBUG_DBUSEP         (1 << 21)
+#define RGXMIPSFW_C0_DEBUG_CACHEEP        (1 << 22)
+#define RGXMIPSFW_C0_DEBUG_MCHECKP        (1 << 23)
+#define RGXMIPSFW_C0_DEBUG_IBUSEP         (1 << 24)
+#define RGXMIPSFW_C0_DEBUG_DM             (1 << 30)
+#define RGXMIPSFW_C0_DEBUG_DBD            (1 << 31)
+
+/* ELF format defines */
+#define ELF_PT_LOAD     (0x1U)   /* Program header identifier as Load */
+#define ELF_SHT_SYMTAB  (0x2U)   /* Section identifier as Symbol Table */
+#define ELF_SHT_STRTAB  (0x3U)   /* Section identifier as String Table */
+#define MAX_STRTAB_NUM  (0x8U)   /* Maximum number of string table in the firmware ELF file */
+
+
+/* Redefined structs of ELF format */
+typedef struct
+{
+	IMG_UINT8    ui32Eident[16];
+	IMG_UINT16   ui32Etype;
+	IMG_UINT16   ui32Emachine;
+	IMG_UINT32   ui32Eversion;
+	IMG_UINT32   ui32Eentry;
+	IMG_UINT32   ui32Ephoff;
+	IMG_UINT32   ui32Eshoff;
+	IMG_UINT32   ui32Eflags;
+	IMG_UINT16   ui32Eehsize;
+	IMG_UINT16   ui32Ephentsize;
+	IMG_UINT16   ui32Ephnum;
+	IMG_UINT16   ui32Eshentsize;
+	IMG_UINT16   ui32Eshnum;
+	IMG_UINT16   ui32Eshtrndx;
+} RGX_MIPS_ELF_HDR;
+
+
+typedef struct
+{
+	IMG_UINT32   ui32Stname;
+	IMG_UINT32   ui32Stvalue;
+	IMG_UINT32   ui32Stsize;
+	IMG_UINT8    ui32Stinfo;
+	IMG_UINT8    ui32Stother;
+	IMG_UINT16   ui32Stshndx;
+} RGX_MIPS_ELF_SYM;
+
+
+typedef struct
+{
+	IMG_UINT32   ui32Shname;
+	IMG_UINT32   ui32Shtype;
+	IMG_UINT32   ui32Shflags;
+	IMG_UINT32   ui32Shaddr;
+	IMG_UINT32   ui32Shoffset;
+	IMG_UINT32   ui32Shsize;
+	IMG_UINT32   ui32Shlink;
+	IMG_UINT32   ui32Shinfo;
+	IMG_UINT32   ui32Shaddralign;
+	IMG_UINT32   ui32Shentsize;
+} RGX_MIPS_ELF_SHDR;
+
+typedef struct
+{
+	IMG_UINT32   ui32Ptype;
+	IMG_UINT32   ui32Poffset;
+	IMG_UINT32   ui32Pvaddr;
+	IMG_UINT32   ui32Ppaddr;
+	IMG_UINT32   ui32Pfilesz;
+	IMG_UINT32   ui32Pmemsz;
+	IMG_UINT32   ui32Pflags;
+	IMG_UINT32   ui32Palign;
+ } RGX_MIPS_ELF_PROGRAM_HDR;
+
+#define RGXMIPSFW_TLB_GET_MASK(ENTRY_PAGE_MASK) (((ENTRY_PAGE_MASK) >> 13) & 0xffffU)
+#define RGXMIPSFW_TLB_GET_VPN2(ENTRY_HI)        ((ENTRY_HI) >> 13)
+#define RGXMIPSFW_TLB_GET_COHERENCY(ENTRY_LO)   (((ENTRY_LO) >> 3) & 0x7U)
+#define RGXMIPSFW_TLB_GET_PFN(ENTRY_LO)         (((ENTRY_LO) >> 6) & 0xfffffU)
+#define RGXMIPSFW_TLB_GET_INHIBIT(ENTRY_LO)     (((ENTRY_LO) >> 30) & 0x3U)
+#define RGXMIPSFW_TLB_GET_DGV(ENTRY_LO)         ((ENTRY_LO) & 0x7U)
+#define RGXMIPSFW_TLB_GLOBAL                    (1U)
+#define RGXMIPSFW_TLB_VALID                     (1U << 1)
+#define RGXMIPSFW_TLB_DIRTY                     (1U << 2)
+#define RGXMIPSFW_TLB_XI                        (1U << 30)
+#define RGXMIPSFW_TLB_RI                        (1U << 31)
+
+typedef struct {
+	IMG_UINT32 ui32TLBPageMask;
+	IMG_UINT32 ui32TLBHi;
+	IMG_UINT32 ui32TLBLo0;
+	IMG_UINT32 ui32TLBLo1;
+} RGX_MIPS_TLB_ENTRY;
+
+typedef struct {
+	IMG_UINT32 ui32ErrorEPC;
+	IMG_UINT32 ui32StatusRegister;
+	IMG_UINT32 ui32CauseRegister;
+	IMG_UINT32 ui32BadRegister;
+	IMG_UINT32 ui32EPC;
+	IMG_UINT32 ui32SP;
+	IMG_UINT32 ui32Debug;
+	IMG_UINT32 ui32DEPC;
+	IMG_UINT32 ui32BadInstr;
+	RGX_MIPS_TLB_ENTRY asTLB[RGXMIPSFW_NUMBER_OF_TLB_ENTRIES];
+} RGX_MIPS_STATE;
+
+typedef struct {
+	IMG_UINT32 ui32FaultPageInfo;
+	IMG_UINT32 ui32BadVAddr;
+	IMG_UINT32 ui32EntryLo0;
+	IMG_UINT32 ui32EntryLo1;
+} RGX_MIPS_FAULT_DATA;
+
+#endif  /* RGXMIPSFW_ASSEMBLY_CODE */
+
+
+#endif /*__RGX_MIPS_H__*/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_options.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_options.h
new file mode 100644
index 0000000..54417f2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgx_options.h
@@ -0,0 +1,216 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX build options
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* Each build option listed here is packed into a dword which
+ * provides up to log2(RGX_BUILD_OPTIONS_MASK_KM + 1) flags for KM 
+ * and (32 - log2(RGX_BUILD_OPTIONS_MASK_KM + 1)) flags for UM.
+ * The corresponding bit is set if the build option
+ * was enabled at compile time.
+ *
+ * In order to extract the enabled build flags the INTERNAL_TEST
+ * switch should be enabled in a client program which includes this
+ * header. Then the client can test specific build flags by reading
+ * the bit value at ##OPTIONNAME##_SET_OFFSET in RGX_BUILD_OPTIONS_KM 
+ * RGX_BUILD_OPTIONS.
+ *
+ * IMPORTANT: add new options to unused bits or define a new dword
+ * (e.g. RGX_BUILD_OPTIONS_KM2 or RGX_BUILD_OPTIONS2) so that the bitfield 
+ * remains backwards
+ * compatible.
+ */
+
+#define RGX_BUILD_OPTIONS_MASK_KM 0x0000FFFFUL
+
+#if defined(NO_HARDWARE) || defined (INTERNAL_TEST)
+	#define NO_HARDWARE_SET_OFFSET	OPTIONS_BIT0
+	#define OPTIONS_BIT0		(0x1ul << 0)
+	#if OPTIONS_BIT0 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT0		0x0
+#endif /* NO_HARDWARE */
+
+
+#if defined(PDUMP) || defined (INTERNAL_TEST)
+	#define PDUMP_SET_OFFSET	OPTIONS_BIT1
+	#define OPTIONS_BIT1		(0x1ul << 1)
+	#if OPTIONS_BIT1 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT1		0x0
+#endif /* PDUMP */
+
+
+#if defined (INTERNAL_TEST)
+	#define UNUSED_SET_OFFSET	OPTIONS_BIT2
+	#define OPTIONS_BIT2		(0x1ul << 2)
+	#if OPTIONS_BIT2 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT2		0x0
+#endif /* SUPPORT_META_SLAVE_BOOT */
+
+/* No longer used */
+#if defined (INTERNAL_TEST)
+	#define OPTIONS_BIT3		(0x1ul << 3)
+	#if OPTIONS_BIT3 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT3		0x0
+#endif
+
+
+#if defined(SUPPORT_RGX) || defined (INTERNAL_TEST)
+	#define SUPPORT_RGX_SET_OFFSET	OPTIONS_BIT4
+	#define OPTIONS_BIT4		(0x1ul << 4)
+	#if OPTIONS_BIT4 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT4		0x0
+#endif /* SUPPORT_RGX */
+
+
+#if defined(SUPPORT_SECURE_EXPORT) || defined (INTERNAL_TEST)
+	#define SUPPORT_SECURE_EXPORT_SET_OFFSET	OPTIONS_BIT5
+	#define OPTIONS_BIT5		(0x1ul << 5)
+	#if OPTIONS_BIT5 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT5		0x0
+#endif /* SUPPORT_SECURE_EXPORT */
+
+
+#if defined(SUPPORT_INSECURE_EXPORT) || defined (INTERNAL_TEST)
+	#define SUPPORT_INSECURE_EXPORT_SET_OFFSET	OPTIONS_BIT6
+	#define OPTIONS_BIT6		(0x1ul << 6)
+	#if OPTIONS_BIT6 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT6	0x0
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+
+#if defined(SUPPORT_VFP) || defined (INTERNAL_TEST)
+	#define SUPPORT_VFP_SET_OFFSET	OPTIONS_BIT7
+	#define OPTIONS_BIT7		(0x1ul << 7)
+	#if OPTIONS_BIT7 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT7		0x0
+#endif /* SUPPORT_VFP */
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION) || defined (INTERNAL_TEST)
+	#define SUPPORT_WORKLOAD_ESTIMATION_OFFSET	OPTIONS_BIT8
+	#define OPTIONS_BIT8		(0x1ul << 8)
+	#if OPTIONS_BIT8 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT8		0x0
+#endif /* SUPPORT_WORKLOAD_ESTIMAITON */
+#define OPTIONS_WORKLOAD_ESTIMATION_MASK	(0x1ul << 8)
+
+#if defined(SUPPORT_PDVFS) || defined (INTERNAL_TEST)
+	#define SUPPORT_PDVFS_OFFSET	OPTIONS_BIT9
+	#define OPTIONS_BIT9		(0x1ul << 9)
+	#if OPTIONS_BIT9 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT9		0x0
+#endif /* SUPPORT_PDVFS */
+#define OPTIONS_PDVFS_MASK	(0x1ul << 9)
+
+#if defined(DEBUG) || defined (INTERNAL_TEST)
+	#define DEBUG_SET_OFFSET	OPTIONS_BIT10
+	#define OPTIONS_BIT10		(0x1ul << 10)
+	#if OPTIONS_BIT10 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT10		0x0
+#endif /* DEBUG */
+/* The bit position of this should be the
+ * same as DEBUG_SET_OFFSET option when
+ * defined */
+#define OPTIONS_DEBUG_MASK	(0x1ul << 10)
+
+
+#define RGX_BUILD_OPTIONS_KM	\
+	(OPTIONS_BIT0  |\
+	 OPTIONS_BIT1  |\
+	 OPTIONS_BIT2  |\
+	 OPTIONS_BIT3  |\
+	 OPTIONS_BIT4  |\
+	 OPTIONS_BIT6  |\
+	 OPTIONS_BIT7  |\
+	 OPTIONS_BIT8  |\
+	 OPTIONS_BIT9  |\
+	 OPTIONS_BIT10)
+
+
+#if defined(SUPPORT_PERCONTEXT_FREELIST) || defined (INTERNAL_TEST)
+	#define OPTIONS_BIT31		(0x1ul << 31)
+	#if OPTIONS_BIT31 <= RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+	#define SUPPORT_PERCONTEXT_FREELIST_SET_OFFSET	OPTIONS_BIT31
+#else
+	#define OPTIONS_BIT31		0x0
+#endif /* SUPPORT_PERCONTEXT_FREELIST */
+
+#define _KM_RGX_BUILD_OPTIONS_ RGX_BUILD_OPTIONS
+
+#define RGX_BUILD_OPTIONS (RGX_BUILD_OPTIONS_KM | OPTIONS_BIT31)
+
+#define OPTIONS_STRICT (RGX_BUILD_OPTIONS &                  \
+                        ~(OPTIONS_DEBUG_MASK               | \
+                          OPTIONS_WORKLOAD_ESTIMATION_MASK | \
+                          OPTIONS_PDVFS_MASK))
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgxscript.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgxscript.h
new file mode 100644
index 0000000..293e86c5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/rgxscript.h
@@ -0,0 +1,173 @@
+/*************************************************************************/ /*!
+@File
+@Title          rgx kernel services structues/functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX initialisation script definitions.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGXSCRIPT_H__
+#define __RGXSCRIPT_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#define	RGX_MAX_DEBUG_COMMANDS	(320)
+#define RGX_DBG_CMD_NAME_SIZE	(40)
+
+typedef	enum _RGX_INIT_OPERATION
+{
+	RGX_INIT_OP_ILLEGAL = 0,
+	RGX_INIT_OP_WRITE_HW_REG,
+	RGX_INIT_OP_POLL_64_HW_REG,
+	RGX_INIT_OP_POLL_HW_REG,
+	RGX_INIT_OP_COND_POLL_HW_REG,
+	RGX_INIT_OP_LOOP_POINT,
+	RGX_INIT_OP_COND_BRANCH,
+	RGX_INIT_OP_HALT,
+	RGX_INIT_OP_DBG_READ32_HW_REG,
+	RGX_INIT_OP_DBG_READ64_HW_REG,
+	RGX_INIT_OP_DBG_CALC,
+	RGX_INIT_OP_DBG_WAIT,
+	RGX_INIT_OP_DBG_STRING,
+	RGX_INIT_OP_PDUMP_HW_REG,
+} RGX_INIT_OPERATION;
+
+typedef union _RGX_INIT_COMMAND_
+{
+	RGX_INIT_OPERATION eOp;
+	
+	struct {
+		RGX_INIT_OPERATION eOp;
+		IMG_UINT32 ui32Offset;
+		IMG_UINT32 ui32Value;
+	} sWriteHWReg;
+
+	struct {
+		RGX_INIT_OPERATION eOp;
+		IMG_UINT32 ui32Offset;
+		IMG_UINT32 ui32Value;
+	} sPDumpHWReg;
+	
+	struct 
+	{
+		RGX_INIT_OPERATION eOp;
+		IMG_UINT32 ui32Offset;
+		IMG_UINT64 ui64Value;
+		IMG_UINT64 ui64Mask;		
+	} sPoll64HWReg;
+
+	struct 
+	{
+		RGX_INIT_OPERATION eOp;
+		IMG_UINT32 ui32Offset;
+		IMG_UINT32 ui32Value;
+		IMG_UINT32 ui32Mask;		
+	} sPollHWReg;
+	
+	struct 
+	{
+		RGX_INIT_OPERATION eOp;
+		IMG_UINT32 ui32CondOffset;
+		IMG_UINT32 ui32CondValue;
+		IMG_UINT32 ui32CondMask;		
+		IMG_UINT32 ui32Offset;
+		IMG_UINT32 ui32Value;
+		IMG_UINT32 ui32Mask;		
+	} sCondPollHWReg;
+	
+	struct
+	{
+		RGX_INIT_OPERATION eOp;
+	} sLoopPoint;
+
+	struct
+	{
+		RGX_INIT_OPERATION eOp;
+		IMG_UINT32 ui32Offset;
+		IMG_UINT32 ui32Value;
+		IMG_UINT32 ui32Mask;
+
+	} sConditionalBranchPoint;
+
+	struct 
+	{
+		RGX_INIT_OPERATION eOp;
+		IMG_UINT32 ui32Offset;
+		IMG_CHAR aszName[RGX_DBG_CMD_NAME_SIZE];
+	} sDBGReadHWReg;
+
+	struct
+	{
+		RGX_INIT_OPERATION eOp;
+		IMG_UINT32 ui32Offset1;
+		IMG_UINT32 ui32Offset2;
+		IMG_UINT32 ui32Offset3;
+		IMG_CHAR aszName[RGX_DBG_CMD_NAME_SIZE];
+	} sDBGCalc;
+
+	struct
+	{
+		RGX_INIT_OPERATION eOp;
+		IMG_UINT32 ui32WaitInUs;
+	} sDBGWait;
+
+	struct
+	{
+		RGX_INIT_OPERATION eOp;
+		IMG_CHAR aszString[RGX_DBG_CMD_NAME_SIZE];
+	} sDBGString;
+
+} RGX_INIT_COMMAND;
+
+typedef struct _RGX_INIT_SCRIPTS_
+{
+	RGX_INIT_COMMAND asDbgCommands[RGX_MAX_DEBUG_COMMANDS];
+} RGX_SCRIPTS;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __RGXSCRIPT_H__ */
+
+/*****************************************************************************
+ End of file (rgxscript.h)
+*****************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/ri_typedefs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/ri_typedefs.h
new file mode 100644
index 0000000..66de0b5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/ri_typedefs.h
@@ -0,0 +1,55 @@
+/*************************************************************************/ /*!
+@File
+@Title          Resource Information (RI) Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Client side part of RI management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RI_TYPEDEFS_H
+#define RI_TYPEDEFS_H
+
+#include "img_types.h"
+
+#define RI_MAX_TEXT_LEN 96
+
+typedef struct RI_SUBLIST_ENTRY RI_ENTRY;
+typedef RI_ENTRY* RI_HANDLE;
+
+#endif /* #ifndef RI_TYPEDEFS_H */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/services_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/services_km.h
new file mode 100644
index 0000000..aca163b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/services_km.h
@@ -0,0 +1,149 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services API Kernel mode Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exported services API details
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+*/ /**************************************************************************/
+
+#ifndef SERVICES_KM_H
+#define SERVICES_KM_H
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+/*! 4k page size definition */
+#define PVRSRV_4K_PAGE_SIZE					4096UL      /*!< Size of a 4K Page */
+#define PVRSRV_4K_PAGE_SIZE_ALIGNSHIFT		12          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 16k page size definition */
+#define PVRSRV_16K_PAGE_SIZE					16384UL      /*!< Size of a 16K Page */
+#define PVRSRV_16K_PAGE_SIZE_ALIGNSHIFT		14          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 64k page size definition */
+#define PVRSRV_64K_PAGE_SIZE					65536UL      /*!< Size of a 64K Page */
+#define PVRSRV_64K_PAGE_SIZE_ALIGNSHIFT		16          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 256k page size definition */
+#define PVRSRV_256K_PAGE_SIZE					262144UL      /*!< Size of a 256K Page */
+#define PVRSRV_256K_PAGE_SIZE_ALIGNSHIFT		18          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 1MB page size definition */
+#define PVRSRV_1M_PAGE_SIZE					1048576UL      /*!< Size of a 1M Page */
+#define PVRSRV_1M_PAGE_SIZE_ALIGNSHIFT		20          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 2MB page size definition */
+#define PVRSRV_2M_PAGE_SIZE					2097152UL      /*!< Size of a 2M Page */
+#define PVRSRV_2M_PAGE_SIZE_ALIGNSHIFT		21          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+
+/*!
+ * Forward declaration (look on connection.h)
+ */
+typedef struct _PVRSRV_DEV_CONNECTION_ PVRSRV_DEV_CONNECTION;
+
+/*!
+	Flags for Services connection.
+	Allows to define per-client policy for Services
+*/
+
+#define SRV_WORKEST_ENABLED             (1U << 2)  /*!< If Workload Estimation is enabled */
+#define SRV_PDVFS_ENABLED               (1U << 3)  /*!< If PDVFS is enabled */
+
+#define SRV_NO_HWPERF_CLIENT_STREAM     (1U << 4)  /*!< Don't create HWPerf for this connection */
+
+#define SRV_FLAGS_CLIENT_64BIT_COMPAT	(1U << 5)	/* This flags gets set if the client is 64 Bit
+ 	 	 	 	 	 	 	 	 	 	 	 	 	   compatible.
+ 	 	 	 	 	 	 	 	 	 	 	 	 	 */
+/* Size of pointer on a 64 bit machine */
+#define	POINTER_SIZE_64BIT	(8)
+
+/*
+ * Bits 20 - 27 are used to pass information needed for validation
+ * of the GPU Virtualisation Validation mechanism. In particular:
+ *
+ * Bits:
+ * [20 - 22]: OSid of the memory region that will be used for allocations
+ * [23 - 25]: OSid that will be emitted by the Firmware for all memory accesses
+ *            regarding that memory context.
+ *      [26]: If the AXI Protection register will be set to secure for that OSid
+ *      [27]: If the Emulator Wrapper Register checking for protection violation
+ *            will be set to secure for that OSid
+ */
+
+#define VIRTVAL_FLAG_OSID_SHIFT        (20)
+#define SRV_VIRTVAL_FLAG_OSID_MASK     (7U << VIRTVAL_FLAG_OSID_SHIFT)
+
+#define VIRTVAL_FLAG_OSIDREG_SHIFT     (23)
+#define SRV_VIRTVAL_FLAG_OSIDREG_MASK  (7U << VIRTVAL_FLAG_OSIDREG_SHIFT)
+
+#define VIRTVAL_FLAG_AXIPREG_SHIFT     (26)
+#define SRV_VIRTVAL_FLAG_AXIPREG_MASK  (1U << VIRTVAL_FLAG_AXIPREG_SHIFT)
+
+#define VIRTVAL_FLAG_AXIPTD_SHIFT      (27)
+#define SRV_VIRTVAL_FLAG_AXIPTD_MASK   (1U << VIRTVAL_FLAG_AXIPTD_SHIFT)
+
+#define SRV_FLAGS_PDUMPCTRL             (1U << 31) /*!< PDump Ctrl client flag */
+
+/*
+    Pdump flags which are accessible to Services clients
+*/
+#define PDUMP_NONE          0x00000000UL /*<! No flags */
+
+#define PDUMP_CONT          0x40000000UL /*<! Output this entry always regardless of framed capture range,
+                                                          used by client applications being dumped. */
+#define PDUMP_PERSIST       0x80000000UL /*<! Output this entry always regardless of app and range,
+                                                          used by persistent resources created after 
+                                                          driver initialisation that must appear in 
+                                                          all PDump captures in that session. */
+
+/* Status of the device. */
+typedef enum
+{
+	PVRSRV_DEVICE_STATUS_UNKNOWN,        /* status of the device is unknown */
+	PVRSRV_DEVICE_STATUS_OK,             /* the device is operational */
+	PVRSRV_DEVICE_STATUS_NOT_RESPONDING, /* the device is not responding */
+	PVRSRV_DEVICE_STATUS_DEVICE_ERROR    /* the device is not operational */
+} PVRSRV_DEVICE_STATUS;
+
+#endif /* SERVICES_KM_H */
+/**************************************************************************//**
+End of file (services_km.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/servicesext.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/servicesext.h
new file mode 100644
index 0000000..a6537a0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/servicesext.h
@@ -0,0 +1,171 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services definitions required by external drivers
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides services data structures, defines and prototypes
+                required by external drivers
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__SERVICESEXT_H__)
+#define __SERVICESEXT_H__
+
+/* include/ */
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "img_3dtypes.h"
+#include "pvrsrv_device_types.h"
+
+
+/*
+ * Lock buffer read/write flags
+ */
+#define PVRSRV_LOCKFLG_READONLY     	(1)		/*!< The locking process will only read the locked surface */
+
+/*!
+ *****************************************************************************
+ *	Services State
+ *****************************************************************************/
+typedef enum _PVRSRV_SERVICES_STATE_
+{
+	PVRSRV_SERVICES_STATE_OK = 0,
+	PVRSRV_SERVICES_STATE_BAD,
+} PVRSRV_SERVICES_STATE;
+
+
+/*!
+ *****************************************************************************
+ *	States for power management
+ *****************************************************************************/
+/*!
+  System Power State Enum
+ */
+typedef enum _PVRSRV_SYS_POWER_STATE_
+{
+	PVRSRV_SYS_POWER_STATE_Unspecified		= -1,	/*!< Unspecified : Uninitialised */
+	PVRSRV_SYS_POWER_STATE_OFF				= 0,	/*!< Off */
+	PVRSRV_SYS_POWER_STATE_ON				= 1,	/*!< On */
+
+	PVRSRV_SYS_POWER_STATE_FORCE_I32 = 0x7fffffff   /*!< Force enum to be at least 32-bits wide */
+
+} PVRSRV_SYS_POWER_STATE, *PPVRSRV_SYS_POWER_STATE; /*!< Typedef for ptr to PVRSRV_SYS_POWER_STATE */
+
+/*!
+  Device Power State Enum
+ */
+typedef enum _PVRSRV_DEV_POWER_STATE_
+{
+	PVRSRV_DEV_POWER_STATE_DEFAULT	= -1,	/*!< Default state for the device */
+	PVRSRV_DEV_POWER_STATE_OFF		= 0,	/*!< Unpowered */
+	PVRSRV_DEV_POWER_STATE_ON		= 1,	/*!< Running */
+
+	PVRSRV_DEV_POWER_STATE_FORCE_I32 = 0x7fffffff   /*!< Force enum to be at least 32-bits wide */
+
+} PVRSRV_DEV_POWER_STATE, *PPVRSRV_DEV_POWER_STATE;	/*!< Typedef for ptr to PVRSRV_DEV_POWER_STATE */ /* PRQA S 3205 */
+
+
+/* Power transition handler prototypes */
+
+/*! 
+  Typedef for a pointer to a Function that will be called before a transition
+  from one power state to another. See also PFN_POST_POWER.
+ */
+typedef PVRSRV_ERROR (*PFN_PRE_POWER) (IMG_HANDLE				hDevHandle,
+									   PVRSRV_DEV_POWER_STATE	eNewPowerState,
+									   PVRSRV_DEV_POWER_STATE	eCurrentPowerState,
+									   IMG_BOOL					bForced);
+/*! 
+  Typedef for a pointer to a Function that will be called after a transition
+  from one power state to another. See also PFN_PRE_POWER.
+ */
+typedef PVRSRV_ERROR (*PFN_POST_POWER) (IMG_HANDLE				hDevHandle,
+										PVRSRV_DEV_POWER_STATE	eNewPowerState,
+										PVRSRV_DEV_POWER_STATE	eCurrentPowerState,
+										IMG_BOOL				bForced);
+
+/* Clock speed handler prototypes */
+
+/*!
+  Typedef for a pointer to a Function that will be caled before a transition
+  from one clockspeed to another. See also PFN_POST_CLOCKSPEED_CHANGE.
+ */
+typedef PVRSRV_ERROR (*PFN_PRE_CLOCKSPEED_CHANGE) (IMG_HANDLE				hDevHandle,
+												   PVRSRV_DEV_POWER_STATE	eCurrentPowerState);
+
+/*!
+  Typedef for a pointer to a Function that will be caled after a transition
+  from one clockspeed to another. See also PFN_PRE_CLOCKSPEED_CHANGE.
+ */
+typedef PVRSRV_ERROR (*PFN_POST_CLOCKSPEED_CHANGE) (IMG_HANDLE				hDevHandle,
+													PVRSRV_DEV_POWER_STATE	eCurrentPowerState);
+
+/*!
+  Typedef for a pointer to a function that will be called to transition the device
+  to a forced idle state. Used in unison with (forced) power requests, DVFS and cluster count changes.
+ */
+typedef PVRSRV_ERROR (*PFN_FORCED_IDLE_REQUEST) (IMG_HANDLE				hDevHandle,
+							IMG_BOOL			bDeviceOffPermitted);
+
+/*!
+  Typedef for a pointer to a function that will be called to cancel a forced idle state
+  and return the firmware back to a state where the hardware can be scheduled.
+ */
+typedef PVRSRV_ERROR (*PFN_FORCED_IDLE_CANCEL_REQUEST) (IMG_HANDLE			hDevHandle);
+
+typedef PVRSRV_ERROR (*PFN_DUST_COUNT_REQUEST) (IMG_HANDLE			hDevHandle,
+						IMG_UINT32			ui32DustCount);
+
+/*!
+ *****************************************************************************
+ * This structure is used for OS independent registry (profile) access
+ *****************************************************************************/
+
+typedef struct _PVRSRV_REGISTRY_INFO
+{
+	IMG_UINT32			ui32DevCookie;
+	IMG_PCHAR			pszKey;
+	IMG_PCHAR			pszValue;
+	IMG_PCHAR			pszBuf;
+	IMG_UINT32			ui32BufSize;
+} PVRSRV_REGISTRY_INFO, *PPVRSRV_REGISTRY_INFO;
+
+#endif /* __SERVICESEXT_H__ */
+/*****************************************************************************
+ End of file (servicesext.h)
+*****************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/sync_checkpoint_external.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/sync_checkpoint_external.h
new file mode 100644
index 0000000..951e4d4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/sync_checkpoint_external.h
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services external synchronisation checkpoint interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines synchronisation checkpoint structures that are visible
+				internally and externally
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_CHECKPOINT_EXTERNAL_
+#define _SYNC_CHECKPOINT_EXTERNAL_
+
+#ifndef _CHECKPOINT_TYPES_
+#define _CHECKPOINT_TYPES_
+typedef struct _SYNC_CHECKPOINT_CONTEXT *PSYNC_CHECKPOINT_CONTEXT;
+
+typedef struct _SYNC_CHECKPOINT *PSYNC_CHECKPOINT;
+#endif
+
+/* PVRSRV_SYNC_CHECKPOINT states.
+ * The OS native sync implementation should call pfnIsSignalled() to determine if a
+ * PVRSRV_SYNC_CHECKPOINT has signalled (which will return an IMG_BOOL), but can set the
+ * state for a PVRSRV_SYNC_CHECKPOINT (which is currently in the NOT_SIGNALLED state)
+ * where that PVRSRV_SYNC_CHECKPOINT is representing a foreign sync.
+ */
+typedef enum
+{
+    PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED = 0x000,  /*!< checkpoint has not signalled */
+    PVRSRV_SYNC_CHECKPOINT_SIGNALLED     = 0x519,  /*!< checkpoint has signalled */
+    PVRSRV_SYNC_CHECKPOINT_ERRORED       = 0xeff   /*!< checkpoint has been errored */
+} PVRSRV_SYNC_CHECKPOINT_STATE;
+
+#endif /* _SYNC_CHECKPOINT_EXTERNAL_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/apollo_regs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/apollo_regs.h
new file mode 100644
index 0000000..1b63d2c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/apollo_regs.h
@@ -0,0 +1,212 @@
+/*************************************************************************/ /*!
+@File		
+@Title          System Description Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides system-specific declarations and macros
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__APOLLO_REGS_H__)
+#define __APOLLO_REGS_H__
+
+/*
+ * The core clock speed is passed through a multiplier depending on the TC version.
+ *
+ * On TC_ES1: Multiplier = x3, final speed = 270MHz
+ * On TC_ES2: Multiplier = x6, final speed = 540MHz
+ * On TCF5:   Multiplier = 1x final speed = 45MHz
+ *
+ *
+ * The base (unmultiplied speed) can be adjusted using a module parameter called "sys_core_clk_speed",
+ * a number in Hz.
+ *
+ * As an example:
+ *
+ * PVR_SRVKM_PARAMS="sys_core_clk_speed=60000000" /etc/init.d/rc.pvr start
+ *
+ * would result in a core speed of 60MHz xMultiplier.
+ *
+ *
+ * The memory clock is unmultiplied and can be adjusted using a module parameter called 
+ * "sys_mem_clk_speed", this should be the number in Hz for the memory clock speed.
+ * 
+ * As an example:
+ *
+ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=100000000" /etc/init.d/rc.pvr start
+ *
+ * would attempt to start the driver with the memory clock speed set to 100MHz.
+ *
+ *
+ * Same applies to the system interface clock speed sys_sysif_clk_speed.
+ * Needed for TCF5 but not for TC_ES2/ES1.
+ * As an example:
+ *
+ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=45000000" /etc/init.d/rc.pvr start
+ *
+ * would attempt to start the driver with the system clock speed set to 45MHz.
+ *
+ *
+ * All parameters can be specified at once, eg:
+ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=MEMORY_SPEED sys_core_clk_speed=CORE_SPEED sys_mem_clk_speed=SYSIF_SPEED" /etc/init.d/rc.pvr start
+ */
+
+#define RGX_TC_SYS_CLOCK_SPEED		(25000000) /*< At the moment just used for TCF5 */
+
+#if defined(TC_APOLLO_TCF5_12_4_1_48)
+ /* TC TCF5 (12.*) */
+ #undef RGX_TC_SYS_CLOCK_SPEED
+ #define RGX_TC_CORE_CLOCK_SPEED	(60000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(45000000)
+ #define RGX_TC_SYS_CLOCK_SPEED		(45000000)
+#elif defined(TC_APOLLO_TCF5_22_46_54_330)
+ #undef RGX_TC_SYS_CLOCK_SPEED
+ #define RGX_TC_CORE_CLOCK_SPEED	(100000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(45000000)
+ #define RGX_TC_SYS_CLOCK_SPEED		(45000000)
+#elif defined(TC_APOLLO_TCF5_22_49_21_16) || defined(TC_APOLLO_TCF5_22_50_22_29) || \
+      defined(TC_APOLLO_TCF5_22_60_22_29) || defined(TC_APOLLO_TCF5_22_69_22_25) || \
+      defined(TC_APOLLO_TCF5_22_75_22_25)
+ #define RGX_TC_CORE_CLOCK_SPEED	(20000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(50000000)
+#elif defined(TC_APOLLO_TCF5_22_62_21_16) || defined(TC_APOLLO_TCF5_22_80_21_19)
+ #define RGX_TC_CORE_CLOCK_SPEED	(20000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(45000000)
+#elif defined(TC_APOLLO_TCF5_22_67_54_30) || defined(TC_APOLLO_TCF5_22_90_104_18) || defined(TC_APOLLO_TCF5_22_63_54_330) 
+ #define RGX_TC_CORE_CLOCK_SPEED	(100000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(45000000)
+#elif defined(TC_APOLLO_TCF5_22_73_104_312) || defined(TC_APOLLO_TCF5_22_78_104_212)
+ #define RGX_TC_CORE_CLOCK_SPEED	(50000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(40000000)
+#elif defined(TC_APOLLO_TCF5_22_76_104_12)
+ #define RGX_TC_CORE_CLOCK_SPEED	(50000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(50000000)
+#elif defined(TC_APOLLO_TCF5_22_81_104_12)
+ #define RGX_TC_CORE_CLOCK_SPEED	(50000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(45000000)
+#elif defined(TC_APOLLO_TCF5_22_86_104_218)
+ #define RGX_TC_CORE_CLOCK_SPEED	(30000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(40000000)
+#elif defined(TC_APOLLO_TCF5_22_88_104_318)
+ #define RGX_TC_CORE_CLOCK_SPEED	(28000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(40000000)
+#elif defined(TC_APOLLO_TCF5_BVNC_NOT_SUPPORTED)
+ /* TC TCF5 (22.*) fallback frequencies */
+ #undef RGX_TC_SYS_CLOCK_SPEED
+ #define RGX_TC_CORE_CLOCK_SPEED	(20000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(50000000)
+ #define RGX_TC_SYS_CLOCK_SPEED		(25000000)
+#elif defined(TC_APOLLO_TCF5_REFERENCE)
+ /* TC TCF5 (Reference bitfile) */
+ #undef RGX_TC_SYS_CLOCK_SPEED
+ #define RGX_TC_CORE_CLOCK_SPEED	(50000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(50000000)
+ #define RGX_TC_SYS_CLOCK_SPEED		(45000000)
+#elif defined(TC_APOLLO_BONNIE)
+ /* TC Bonnie */
+ #define RGX_TC_CORE_CLOCK_SPEED	(18000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(65000000)
+#elif defined(TC_APOLLO_ES2)
+ /* TC ES2 */
+ #define RGX_TC_CORE_CLOCK_SPEED	(90000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(104000000)
+#else
+ /* TC ES1 */
+ #define RGX_TC_CORE_CLOCK_SPEED	(90000000)
+ #define RGX_TC_MEM_CLOCK_SPEED		(65000000)
+#endif
+
+/* TC TCF5 */
+#define TC5_SYS_APOLLO_REG_PCI_BASENUM (1)
+#define TC5_SYS_APOLLO_REG_PDP2_OFFSET (0x800000)
+#define TC5_SYS_APOLLO_REG_PDP2_SIZE   (0x7C4)
+
+#define TC5_SYS_APOLLO_REG_PDP2_FBDC_OFFSET (0xA00000)
+#define TC5_SYS_APOLLO_REG_PDP2_FBDC_SIZE   (0x14)
+
+#define TC5_SYS_APOLLO_REG_HDMI_OFFSET (0xC00000)
+#define TC5_SYS_APOLLO_REG_HDMI_SIZE   (0x1C)
+
+/* TC ES2 */
+#define TCF_TEMP_SENSOR_SPI_OFFSET 	0xe
+#define TCF_TEMP_SENSOR_TO_C(raw) 	(((raw) * 248 / 4096) - 54)
+
+/* Number of bytes that are broken */
+#define SYS_DEV_MEM_BROKEN_BYTES	(1024 * 1024)
+#define SYS_DEV_MEM_REGION_SIZE		(0x40000000 - SYS_DEV_MEM_BROKEN_BYTES)
+
+/* Apollo reg on base register 0 */
+#define SYS_APOLLO_REG_PCI_BASENUM	(0)
+#define SYS_APOLLO_REG_REGION_SIZE	(0x00010000)
+
+#define SYS_APOLLO_REG_SYS_OFFSET	(0x0000)
+#define SYS_APOLLO_REG_SYS_SIZE		(0x0400)
+
+#define SYS_APOLLO_REG_PLL_OFFSET	(0x1000)
+#define SYS_APOLLO_REG_PLL_SIZE		(0x0400)
+
+#define SYS_APOLLO_REG_HOST_OFFSET	(0x4050)
+#define SYS_APOLLO_REG_HOST_SIZE	(0x0014)
+
+#define SYS_APOLLO_REG_PDP1_OFFSET	(0xC000)
+#define SYS_APOLLO_REG_PDP1_SIZE	(0x2000)
+
+/* Offsets for flashing Apollo PROMs from base 0 */
+#define APOLLO_FLASH_STAT_OFFSET	(0x4058)
+#define APOLLO_FLASH_DATA_WRITE_OFFSET	(0x4050)
+#define APOLLO_FLASH_RESET_OFFSET	(0x4060)
+
+#define APOLLO_FLASH_FIFO_STATUS_MASK 	 (0xF)
+#define APOLLO_FLASH_FIFO_STATUS_SHIFT 	 (0)
+#define APOLLO_FLASH_PROGRAM_STATUS_MASK (0xF)
+#define APOLLO_FLASH_PROGRAM_STATUS_SHIFT (16)
+
+#define APOLLO_FLASH_PROG_COMPLETE_BIT	(0x1)
+#define APOLLO_FLASH_PROG_PROGRESS_BIT	(0x2)
+#define APOLLO_FLASH_PROG_FAILED_BIT	(0x4)
+#define APOLLO_FLASH_INV_FILETYPE_BIT	(0x8)
+
+#define APOLLO_FLASH_FIFO_SIZE		(8)
+
+/* RGX reg on base register 1 */
+#define SYS_RGX_REG_PCI_BASENUM		(1)
+#define SYS_RGX_REG_REGION_SIZE		(0x7FFFF)
+
+/* Device memory (including HP mapping) on base register 2 */
+#define SYS_DEV_MEM_PCI_BASENUM		(2)
+
+#endif /* if !defined(__APOLLO_REGS_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/bonnie_tcf.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/bonnie_tcf.h
new file mode 100644
index 0000000..f9c03ea
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/bonnie_tcf.h
@@ -0,0 +1,68 @@
+/****************************************************************************
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+****************************************************************************/
+
+/* bonnie_tcf.h - Bonnie TCF register definitions */
+
+/* tab size 4 */
+
+#ifndef _BONNIE_TCF_DEFS_H_
+#define _BONNIE_TCF_DEFS_H_
+
+#define BONNIE_TCF_OFFSET_BONNIETC_REGBANK							0x00000000
+#define BONNIE_TCF_OFFSET_TC_IFACE_COUNTERS							0x00004000
+#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_IMGV4_RTM_TOP				0x00008000
+#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_TCF_SCRATCH_PAD_SECN		0x0000C000
+#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_TCF_SCRATCH_PAD_DBG		0x00010000
+#define BONNIE_TCF_OFFSET_MULTI_CLK_ALIGN							0x00014000
+#define BONNIE_TCF_OFFSET_ALIGN_DATA_TX								0x00018000
+#define BONNIE_TCF_OFFSET_SAI_RX_1									0x0001C000
+#define BONNIE_TCF_OFFSET_SAI_RX_SDR								0x00040000
+#define BONNIE_TCF_OFFSET_SAI_TX_1									0x00044000
+#define BONNIE_TCF_OFFSET_SAI_TX_SDR								0x00068000
+
+#define BONNIE_TCF_OFFSET_SAI_RX_DELTA								0x00004000
+#define BONNIE_TCF_OFFSET_SAI_TX_DELTA								0x00004000
+
+#define BONNIE_TCF_OFFSET_SAI_CLK_TAPS								0x0000000C
+#define BONNIE_TCF_OFFSET_SAI_EYES									0x00000010
+#define BONNIE_TCF_OFFSET_SAI_TRAIN_ACK								0x00000018
+
+
+#endif //_BONNIE_TCF_DEFS_H_
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/odin_defs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/odin_defs.h
new file mode 100644
index 0000000..98a2af1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/odin_defs.h
@@ -0,0 +1,289 @@
+/****************************************************************************
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Odin Memory Map - View from PCIe
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+****************************************************************************/
+
+#ifndef _ODIN_DEFS_H_
+#define _ODIN_DEFS_H_
+
+/* These defines have not been autogenerated */
+
+#define PCI_VENDOR_ID_ODIN                  (0x1AEE)
+#define DEVICE_ID_ODIN                      (0x1010)
+
+/* PCI BAR 0 contains the PDP regs and the Odin system regs */
+#define ODN_SYS_BAR                         0
+#define ODN_SYS_REGION_SIZE                 0x000800000 /* 8MB */
+
+#define ODN_SYS_REGS_OFFSET                 0
+#define ODN_SYS_REGS_SIZE                   0x000400000 /* 4MB */
+
+#define ODN_PDP_REGS_OFFSET                 0x000440000
+#define ODN_PDP_REGS_SIZE                   0x000040000 /* 256k */
+
+
+/* PCI BAR 2 contains the Device Under Test SOCIF 64MB region */
+#define ODN_DUT_SOCIF_BAR                   2
+#define ODN_DUT_SOCIF_OFFSET                0x000000000
+#define ODN_DUT_SOCIF_SIZE                  0x004000000 /* 64MB */
+
+/* PCI BAR 4 contains the on-board 1GB DDR memory */
+#define ODN_DDR_BAR                         4
+#define ODN_DDR_MEM_OFFSET                  0x000000000
+#define ODN_DDR_MEM_SIZE                    0x040000000 /* 1GB */
+
+/* Odin system register banks */
+#define ODN_REG_BANK_CORE                   0x00000
+#define ODN_REG_BANK_TCF_SPI_MASTER         0x02000
+#define ODN_REG_BANK_ODN_CLK_BLK            0x0A000
+#define ODN_REG_BANK_ODN_MCU_COMMUNICATOR   0x0C000
+#define ODN_REG_BANK_DB_TYPE_ID             0x0C200
+#define ODN_REG_BANK_DB_TYPE_ID_TYPE_MASK   0x000000C0U
+#define ODN_REG_BANK_DB_TYPE_ID_TYPE_SHIFT  0x6
+#define ODN_REG_BANK_ODN_I2C                0x0E000
+#define ODN_REG_BANK_MULTI_CLK_ALIGN        0x20000
+#define ODN_REG_BANK_ALIGN_DATA_TX          0x22000
+#define ODN_REG_BANK_SAI_RX_DDR_0           0x24000
+#define ODN_REG_BANK_SAI_RX_DDR(n)          (ODN_REG_BANK_SAI_RX_DDR_0 + (0x02000*n))
+#define ODN_REG_BANK_SAI_TX_DDR_0           0x3A000
+#define ODN_REG_BANK_SAI_TX_DDR(n)          (ODN_REG_BANK_SAI_TX_DDR_0 + (0x02000*n))
+#define ODN_REG_BANK_SAI_TX_SDR             0x4E000
+
+/* Odin SPI regs */
+#define ODN_SPI_MST_ADDR_RDNWR              0x0000
+#define ODN_SPI_MST_WDATA                   0x0004
+#define ODN_SPI_MST_RDATA                   0x0008
+#define ODN_SPI_MST_STATUS                  0x000C
+#define ODN_SPI_MST_GO                      0x0010
+
+
+/*
+   Odin CLK regs - the odn_clk_blk module defs are not auto generated
+   because it is licenced 3rd party IP from Xilinx.
+   These defs are taken from the Odin TRM.
+ */
+#define ODN_PDP_P_CLK_OUT_DIVIDER_REG1           0x620
+#define ODN_PDP_PCLK_ODIV1_LO_TIME_MASK          0x0000003FU
+#define ODN_PDP_PCLK_ODIV1_LO_TIME_SHIFT         0
+#define ODN_PDP_PCLK_ODIV1_HI_TIME_MASK          0x00000FC0U
+#define ODN_PDP_PCLK_ODIV1_HI_TIME_SHIFT         6
+
+#define ODN_PDP_P_CLK_OUT_DIVIDER_REG2           0x624
+#define ODN_PDP_PCLK_ODIV2_NOCOUNT_MASK          0x00000040U
+#define ODN_PDP_PCLK_ODIV2_NOCOUNT_SHIFT         6
+#define ODN_PDP_PCLK_ODIV2_EDGE_MASK             0x00000080U
+#define ODN_PDP_PCLK_ODIV2_EDGE_SHIFT            7
+
+#define ODN_PDP_P_CLK_OUT_DIVIDER_REG3           0x61C
+
+#define ODN_PDP_M_CLK_OUT_DIVIDER_REG1           0x628
+#define ODN_PDP_MCLK_ODIV1_LO_TIME_MASK          0x0000003FU
+#define ODN_PDP_MCLK_ODIV1_LO_TIME_SHIFT         0
+#define ODN_PDP_MCLK_ODIV1_HI_TIME_MASK	         0x00000FC0U
+#define ODN_PDP_MCLK_ODIV1_HI_TIME_SHIFT         6
+
+#define ODN_PDP_M_CLK_OUT_DIVIDER_REG2           0x62C
+#define ODN_PDP_MCLK_ODIV2_NOCOUNT_MASK          0x00000040U
+#define ODN_PDP_MCLK_ODIV2_NOCOUNT_SHIFT         6
+#define ODN_PDP_MCLK_ODIV2_EDGE_MASK             0x00000080U
+#define ODN_PDP_MCLK_ODIV2_EDGE_SHIFT            7
+
+#define ODN_PDP_P_CLK_MULTIPLIER_REG1            0x650
+#define ODN_PDP_PCLK_MUL1_LO_TIME_MASK           0x0000003FU
+#define ODN_PDP_PCLK_MUL1_LO_TIME_SHIFT          0
+#define ODN_PDP_PCLK_MUL1_HI_TIME_MASK           0x00000FC0U
+#define ODN_PDP_PCLK_MUL1_HI_TIME_SHIFT          6
+
+#define ODN_PDP_P_CLK_MULTIPLIER_REG2            0x654
+#define ODN_PDP_PCLK_MUL2_NOCOUNT_MASK           0x00000040U
+#define ODN_PDP_PCLK_MUL2_NOCOUNT_SHIFT          6
+#define ODN_PDP_PCLK_MUL2_EDGE_MASK              0x00000080U
+#define ODN_PDP_PCLK_MUL2_EDGE_SHIFT             7
+
+#define ODN_PDP_P_CLK_MULTIPLIER_REG3            0x64C
+
+#define ODN_PDP_P_CLK_IN_DIVIDER_REG             0x658
+#define ODN_PDP_PCLK_IDIV_LO_TIME_MASK           0x0000003FU
+#define ODN_PDP_PCLK_IDIV_LO_TIME_SHIFT          0
+#define ODN_PDP_PCLK_IDIV_HI_TIME_MASK           0x00000FC0U
+#define ODN_PDP_PCLK_IDIV_HI_TIME_SHIFT          6
+#define ODN_PDP_PCLK_IDIV_NOCOUNT_MASK           0x00001000U
+#define ODN_PDP_PCLK_IDIV_NOCOUNT_SHIFT          12
+#define ODN_PDP_PCLK_IDIV_EDGE_MASK              0x00002000U
+#define ODN_PDP_PCLK_IDIV_EDGE_SHIFT             13
+
+/*
+ * DUT core clock input divider, multiplier and out divider.
+ */
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1                (0x0028)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_MASK   (0x00000FC0U)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT  (6)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_MASK   (0x0000003FU)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT  (0)
+
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2                (0x002C)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_MASK      (0x00000080U)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_SHIFT     (7)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_MASK   (0x00000040U)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT  (6)
+
+#define ODN_DUT_CORE_CLK_MULTIPLIER1                 (0x0050)
+#define ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME_MASK    (0x00000FC0U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME_SHIFT   (6)
+#define ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME_MASK    (0x0000003FU)
+#define ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME_SHIFT   (0)
+
+#define ODN_DUT_CORE_CLK_MULTIPLIER2                 (0x0054)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_MASK       (0x00007000U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_SHIFT      (12)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_EN_MASK    (0x00000800U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_EN_SHIFT   (11)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE_MASK       (0x00000080U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE_SHIFT      (7)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT_MASK    (0x00000040U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT_SHIFT   (6)
+
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1                 (0x0058)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE_MASK       (0x00002000U)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE_SHIFT      (13)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT_MASK    (0x00001000U)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT_SHIFT   (12)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME_MASK    (0x00000FC0U)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME_SHIFT   (6)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME_MASK    (0x0000003FU)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME_SHIFT   (0)
+
+/*
+ * DUT interface clock input divider, multiplier and out divider.
+ */
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1               (0x0220)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME_MASK  (0x00000FC0U)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT (6)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME_MASK  (0x0000003FU)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT (0)
+
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2               (0x0224)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE_MASK     (0x00000080U)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE_SHIFT    (7)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT_MASK  (0x00000040U)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT (6)
+
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1                (0x0250)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME_MASK   (0x00000FC0U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME_SHIFT  (6)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME_MASK   (0x0000003FU)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME_SHIFT  (0)
+
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2                (0x0254)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_MASK      (0x00007000U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_SHIFT     (12)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_EN_MASK   (0x00000800U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_EN_SHIFT  (11)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE_MASK      (0x00000080U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE_SHIFT     (7)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT_MASK   (0x00000040U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT_SHIFT  (6)
+
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1                (0x0258)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE_MASK      (0x00002000U)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE_SHIFT     (13)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT_MASK   (0x00001000U)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT_SHIFT  (12)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME_MASK   (0x00000FC0U)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME_SHIFT  (6)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME_MASK   (0x0000003FU)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME_SHIFT  (0)
+
+
+/*
+ * Min max values from Xilinx Virtex7 data sheet DS183, for speed grade 2
+ * All in Hz
+ */
+#define ODN_INPUT_CLOCK_SPEED                        (100000000U)
+#define ODN_INPUT_CLOCK_SPEED_MIN                    (10000000U)
+#define ODN_INPUT_CLOCK_SPEED_MAX                    (933000000U)
+#define ODN_OUTPUT_CLOCK_SPEED_MIN                   (4690000U)
+#define ODN_OUTPUT_CLOCK_SPEED_MAX                   (933000000U)
+#define ODN_VCO_MIN                                  (600000000U)
+#define ODN_VCO_MAX                                  (1440000000U)
+#define ODN_PFD_MIN                                  (10000000U)
+#define ODN_PFD_MAX                                  (500000000U)
+
+/*
+ * Max values that can be set in DRP registers
+ */
+#define ODN_OREG_VALUE_MAX                            (126.875f)
+#define ODN_MREG_VALUE_MAX                            (126.875f)
+#define ODN_DREG_VALUE_MAX                            (126U)
+
+
+#define ODN_MMCM_LOCK_STATUS_DUT_CORE                (0x00000001U)
+#define ODN_MMCM_LOCK_STATUS_DUT_IF                  (0x00000002U)
+#define ODN_MMCM_LOCK_STATUS_PDPP                    (0x00000008U)
+
+/*
+    Odin interrupt flags
+*/
+#define ODN_INTERRUPT_ENABLE_PDP1           (1 << ODN_INTERRUPT_ENABLE_PDP1_SHIFT)
+#define ODN_INTERRUPT_ENABLE_DUT            (1 << ODN_INTERRUPT_ENABLE_DUT_SHIFT)
+#define ODN_INTERRUPT_STATUS_PDP1           (1 << ODN_INTERRUPT_STATUS_PDP1_SHIFT)
+#define ODN_INTERRUPT_STATUS_DUT            (1 << ODN_INTERRUPT_STATUS_DUT_SHIFT)
+#define ODN_INTERRUPT_CLEAR_PDP1            (1 << ODN_INTERRUPT_CLR_PDP1_SHIFT)
+#define ODN_INTERRUPT_CLEAR_DUT             (1 << ODN_INTERRUPT_CLR_DUT_SHIFT)
+
+/*
+   Other defines
+*/
+#define ODN_STREAM_OFF                      0
+#define ODN_STREAM_ON                       1
+#define ODN_SYNC_GEN_DISABLE                0
+#define ODN_SYNC_GEN_ENABLE                 1
+#define ODN_INTERLACE_DISABLE               0
+#define ODN_INTERLACE_ENABLE                1
+#define ODN_PIXEL_CLOCK_INVERTED            1
+#define ODN_HSYNC_POLARITY_ACTIVE_HIGH      1
+
+#define ODN_PDP_INTCLR_ALL                  0x000FFFFFU
+#define	ODN_PDP_INTSTAT_ALL_OURUN_MASK      0x000FFFF0U
+
+#endif /* _ODIN_DEFS_H_ */
+
+/*****************************************************************************
+ End of file (odn_defs.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/odin_pdp_regs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/odin_pdp_regs.h
new file mode 100644
index 0000000..2a010c9
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/odin_pdp_regs.h
@@ -0,0 +1,8541 @@
+/*************************************************************************/ /*!
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* tab size 4 */
+
+#ifndef _ODN_PDP_REGS_H_
+#define _ODN_PDP_REGS_H_
+
+/* Odin-PDP hardware register definitions */
+
+
+#define ODN_PDP_GRPH1SURF_OFFSET					(0x0000)
+
+/* PDP, GRPH1SURF, GRPH1PIXFMT
+*/
+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_MASK			(0xF8000000)
+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_LSBMASK		(0x0000001F)
+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT			(27)
+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_LENGTH		(5)
+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1USEGAMMA
+*/
+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_MASK		(0x04000000)
+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_SHIFT		(26)
+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_LENGTH		(1)
+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1USECSC
+*/
+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_MASK			(0x02000000)
+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_SHIFT			(25)
+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_LENGTH		(1)
+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1LUTRWCHOICE
+*/
+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_MASK		(0x01000000)
+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SHIFT	(24)
+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LENGTH	(1)
+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1USELUT
+*/
+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_MASK			(0x00800000)
+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_SHIFT			(23)
+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_LENGTH		(1)
+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH2SURF_OFFSET					(0x0004)
+
+/* PDP, GRPH2SURF, GRPH2PIXFMT
+*/
+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_MASK			(0xF8000000)
+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_LSBMASK		(0x0000001F)
+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_SHIFT			(27)
+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_LENGTH		(5)
+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2USEGAMMA
+*/
+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_MASK		(0x04000000)
+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_SHIFT		(26)
+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_LENGTH		(1)
+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2USECSC
+*/
+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_MASK			(0x02000000)
+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_SHIFT			(25)
+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_LENGTH		(1)
+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2LUTRWCHOICE
+*/
+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_MASK		(0x01000000)
+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SHIFT	(24)
+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LENGTH	(1)
+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2USELUT
+*/
+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_MASK			(0x00800000)
+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_SHIFT			(23)
+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_LENGTH		(1)
+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH3SURF_OFFSET					(0x0008)
+
+/* PDP, GRPH3SURF, GRPH3PIXFMT
+*/
+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_MASK			(0xF8000000)
+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_LSBMASK		(0x0000001F)
+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_SHIFT			(27)
+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_LENGTH		(5)
+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3USEGAMMA
+*/
+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_MASK		(0x04000000)
+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_SHIFT		(26)
+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_LENGTH		(1)
+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3USECSC
+*/
+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_MASK			(0x02000000)
+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_SHIFT			(25)
+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_LENGTH		(1)
+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3LUTRWCHOICE
+*/
+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_MASK		(0x01000000)
+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SHIFT	(24)
+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LENGTH	(1)
+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3USELUT
+*/
+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_MASK			(0x00800000)
+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_SHIFT			(23)
+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_LENGTH		(1)
+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH4SURF_OFFSET					(0x000C)
+
+/* PDP, GRPH4SURF, GRPH4PIXFMT
+*/
+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_MASK			(0xF8000000)
+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_LSBMASK		(0x0000001F)
+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_SHIFT			(27)
+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_LENGTH		(5)
+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4USEGAMMA
+*/
+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_MASK		(0x04000000)
+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_SHIFT		(26)
+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_LENGTH		(1)
+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4USECSC
+*/
+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_MASK			(0x02000000)
+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_SHIFT			(25)
+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_LENGTH		(1)
+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4LUTRWCHOICE
+*/
+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_MASK		(0x01000000)
+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SHIFT	(24)
+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LENGTH	(1)
+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4USELUT
+*/
+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_MASK			(0x00800000)
+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_SHIFT			(23)
+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_LENGTH		(1)
+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1SURF_OFFSET						(0x0010)
+
+/* PDP, VID1SURF, VID1PIXFMT
+*/
+#define ODN_PDP_VID1SURF_VID1PIXFMT_MASK			(0xF8000000)
+#define ODN_PDP_VID1SURF_VID1PIXFMT_LSBMASK			(0x0000001F)
+#define ODN_PDP_VID1SURF_VID1PIXFMT_SHIFT			(27)
+#define ODN_PDP_VID1SURF_VID1PIXFMT_LENGTH			(5)
+#define ODN_PDP_VID1SURF_VID1PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEGAMMA
+*/
+#define ODN_PDP_VID1SURF_VID1USEGAMMA_MASK			(0x04000000)
+#define ODN_PDP_VID1SURF_VID1USEGAMMA_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1SURF_VID1USEGAMMA_SHIFT			(26)
+#define ODN_PDP_VID1SURF_VID1USEGAMMA_LENGTH		(1)
+#define ODN_PDP_VID1SURF_VID1USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USECSC
+*/
+#define ODN_PDP_VID1SURF_VID1USECSC_MASK			(0x02000000)
+#define ODN_PDP_VID1SURF_VID1USECSC_LSBMASK			(0x00000001)
+#define ODN_PDP_VID1SURF_VID1USECSC_SHIFT			(25)
+#define ODN_PDP_VID1SURF_VID1USECSC_LENGTH			(1)
+#define ODN_PDP_VID1SURF_VID1USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEI2P
+*/
+#define ODN_PDP_VID1SURF_VID1USEI2P_MASK			(0x01000000)
+#define ODN_PDP_VID1SURF_VID1USEI2P_LSBMASK			(0x00000001)
+#define ODN_PDP_VID1SURF_VID1USEI2P_SHIFT			(24)
+#define ODN_PDP_VID1SURF_VID1USEI2P_LENGTH			(1)
+#define ODN_PDP_VID1SURF_VID1USEI2P_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1COSITED
+*/
+#define ODN_PDP_VID1SURF_VID1COSITED_MASK			(0x00800000)
+#define ODN_PDP_VID1SURF_VID1COSITED_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1SURF_VID1COSITED_SHIFT			(23)
+#define ODN_PDP_VID1SURF_VID1COSITED_LENGTH			(1)
+#define ODN_PDP_VID1SURF_VID1COSITED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEHQCD
+*/
+#define ODN_PDP_VID1SURF_VID1USEHQCD_MASK			(0x00400000)
+#define ODN_PDP_VID1SURF_VID1USEHQCD_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1SURF_VID1USEHQCD_SHIFT			(22)
+#define ODN_PDP_VID1SURF_VID1USEHQCD_LENGTH			(1)
+#define ODN_PDP_VID1SURF_VID1USEHQCD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEINSTREAM
+*/
+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_MASK		(0x00200000)
+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_LSBMASK	(0x00000001)
+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_SHIFT		(21)
+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_LENGTH		(1)
+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2SURF_OFFSET						(0x0014)
+
+/* PDP, VID2SURF, VID2PIXFMT
+*/
+#define ODN_PDP_VID2SURF_VID2PIXFMT_MASK			(0xF8000000)
+#define ODN_PDP_VID2SURF_VID2PIXFMT_LSBMASK			(0x0000001F)
+#define ODN_PDP_VID2SURF_VID2PIXFMT_SHIFT			(27)
+#define ODN_PDP_VID2SURF_VID2PIXFMT_LENGTH			(5)
+#define ODN_PDP_VID2SURF_VID2PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SURF, VID2COSITED
+*/
+#define ODN_PDP_VID2SURF_VID2COSITED_MASK			(0x00800000)
+#define ODN_PDP_VID2SURF_VID2COSITED_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2SURF_VID2COSITED_SHIFT			(23)
+#define ODN_PDP_VID2SURF_VID2COSITED_LENGTH			(1)
+#define ODN_PDP_VID2SURF_VID2COSITED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SURF, VID2USEGAMMA
+*/
+#define ODN_PDP_VID2SURF_VID2USEGAMMA_MASK			(0x04000000)
+#define ODN_PDP_VID2SURF_VID2USEGAMMA_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2SURF_VID2USEGAMMA_SHIFT			(26)
+#define ODN_PDP_VID2SURF_VID2USEGAMMA_LENGTH		(1)
+#define ODN_PDP_VID2SURF_VID2USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SURF, VID2USECSC
+*/
+#define ODN_PDP_VID2SURF_VID2USECSC_MASK			(0x02000000)
+#define ODN_PDP_VID2SURF_VID2USECSC_LSBMASK			(0x00000001)
+#define ODN_PDP_VID2SURF_VID2USECSC_SHIFT			(25)
+#define ODN_PDP_VID2SURF_VID2USECSC_LENGTH			(1)
+#define ODN_PDP_VID2SURF_VID2USECSC_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3SURF_OFFSET						(0x0018)
+
+/* PDP, VID3SURF, VID3PIXFMT
+*/
+#define ODN_PDP_VID3SURF_VID3PIXFMT_MASK			(0xF8000000)
+#define ODN_PDP_VID3SURF_VID3PIXFMT_LSBMASK			(0x0000001F)
+#define ODN_PDP_VID3SURF_VID3PIXFMT_SHIFT			(27)
+#define ODN_PDP_VID3SURF_VID3PIXFMT_LENGTH			(5)
+#define ODN_PDP_VID3SURF_VID3PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SURF, VID3COSITED
+*/
+#define ODN_PDP_VID3SURF_VID3COSITED_MASK			(0x00800000)
+#define ODN_PDP_VID3SURF_VID3COSITED_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3SURF_VID3COSITED_SHIFT			(23)
+#define ODN_PDP_VID3SURF_VID3COSITED_LENGTH			(1)
+#define ODN_PDP_VID3SURF_VID3COSITED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SURF, VID3USEGAMMA
+*/
+#define ODN_PDP_VID3SURF_VID3USEGAMMA_MASK			(0x04000000)
+#define ODN_PDP_VID3SURF_VID3USEGAMMA_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3SURF_VID3USEGAMMA_SHIFT			(26)
+#define ODN_PDP_VID3SURF_VID3USEGAMMA_LENGTH		(1)
+#define ODN_PDP_VID3SURF_VID3USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SURF, VID3USECSC
+*/
+#define ODN_PDP_VID3SURF_VID3USECSC_MASK			(0x02000000)
+#define ODN_PDP_VID3SURF_VID3USECSC_LSBMASK			(0x00000001)
+#define ODN_PDP_VID3SURF_VID3USECSC_SHIFT			(25)
+#define ODN_PDP_VID3SURF_VID3USECSC_LENGTH			(1)
+#define ODN_PDP_VID3SURF_VID3USECSC_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4SURF_OFFSET						(0x001C)
+
+/* PDP, VID4SURF, VID4PIXFMT
+*/
+#define ODN_PDP_VID4SURF_VID4PIXFMT_MASK			(0xF8000000)
+#define ODN_PDP_VID4SURF_VID4PIXFMT_LSBMASK			(0x0000001F)
+#define ODN_PDP_VID4SURF_VID4PIXFMT_SHIFT			(27)
+#define ODN_PDP_VID4SURF_VID4PIXFMT_LENGTH			(5)
+#define ODN_PDP_VID4SURF_VID4PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SURF, VID4COSITED
+*/
+#define ODN_PDP_VID4SURF_VID4COSITED_MASK			(0x00800000)
+#define ODN_PDP_VID4SURF_VID4COSITED_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4SURF_VID4COSITED_SHIFT			(23)
+#define ODN_PDP_VID4SURF_VID4COSITED_LENGTH			(1)
+#define ODN_PDP_VID4SURF_VID4COSITED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SURF, VID4USEGAMMA
+*/
+#define ODN_PDP_VID4SURF_VID4USEGAMMA_MASK			(0x04000000)
+#define ODN_PDP_VID4SURF_VID4USEGAMMA_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4SURF_VID4USEGAMMA_SHIFT			(26)
+#define ODN_PDP_VID4SURF_VID4USEGAMMA_LENGTH		(1)
+#define ODN_PDP_VID4SURF_VID4USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SURF, VID4USECSC
+*/
+#define ODN_PDP_VID4SURF_VID4USECSC_MASK			(0x02000000)
+#define ODN_PDP_VID4SURF_VID4USECSC_LSBMASK			(0x00000001)
+#define ODN_PDP_VID4SURF_VID4USECSC_SHIFT			(25)
+#define ODN_PDP_VID4SURF_VID4USECSC_LENGTH			(1)
+#define ODN_PDP_VID4SURF_VID4USECSC_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1CTRL_OFFSET					(0x0020)
+
+/* PDP, GRPH1CTRL, GRPH1STREN
+*/
+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_MASK			(0x80000000)
+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_SHIFT			(31)
+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_LENGTH			(1)
+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1CKEYEN
+*/
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_MASK			(0x40000000)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_SHIFT			(30)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_LENGTH		(1)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1CKEYSRC
+*/
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_MASK			(0x20000000)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_SHIFT		(29)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_LENGTH		(1)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1BLEND
+*/
+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_MASK			(0x18000000)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_LSBMASK		(0x00000003)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_SHIFT			(27)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_LENGTH			(2)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1BLENDPOS
+*/
+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_MASK		(0x07000000)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_LSBMASK		(0x00000007)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_SHIFT		(24)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_LENGTH		(3)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1DITHEREN
+*/
+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_MASK		(0x00800000)
+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_SHIFT		(23)
+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_LENGTH		(1)
+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2CTRL_OFFSET					(0x0024)
+
+/* PDP, GRPH2CTRL, GRPH2STREN
+*/
+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_MASK			(0x80000000)
+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_SHIFT			(31)
+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_LENGTH			(1)
+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2CKEYEN
+*/
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_MASK			(0x40000000)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_SHIFT			(30)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_LENGTH		(1)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2CKEYSRC
+*/
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_MASK			(0x20000000)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_SHIFT		(29)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_LENGTH		(1)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2BLEND
+*/
+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_MASK			(0x18000000)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_LSBMASK		(0x00000003)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_SHIFT			(27)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_LENGTH			(2)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2BLENDPOS
+*/
+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_MASK		(0x07000000)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_LSBMASK		(0x00000007)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_SHIFT		(24)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_LENGTH		(3)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2DITHEREN
+*/
+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_MASK		(0x00800000)
+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_SHIFT		(23)
+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_LENGTH		(1)
+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3CTRL_OFFSET					(0x0028)
+
+/* PDP, GRPH3CTRL, GRPH3STREN
+*/
+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_MASK			(0x80000000)
+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_SHIFT			(31)
+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_LENGTH			(1)
+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3CKEYEN
+*/
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_MASK			(0x40000000)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_SHIFT			(30)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_LENGTH		(1)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3CKEYSRC
+*/
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_MASK			(0x20000000)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_SHIFT		(29)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_LENGTH		(1)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3BLEND
+*/
+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_MASK			(0x18000000)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_LSBMASK		(0x00000003)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_SHIFT			(27)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_LENGTH			(2)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3BLENDPOS
+*/
+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_MASK		(0x07000000)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_LSBMASK		(0x00000007)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_SHIFT		(24)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_LENGTH		(3)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3DITHEREN
+*/
+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_MASK		(0x00800000)
+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_SHIFT		(23)
+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_LENGTH		(1)
+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4CTRL_OFFSET					(0x002C)
+
+/* PDP, GRPH4CTRL, GRPH4STREN
+*/
+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_MASK			(0x80000000)
+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_SHIFT			(31)
+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_LENGTH			(1)
+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4CKEYEN
+*/
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_MASK			(0x40000000)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_SHIFT			(30)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_LENGTH		(1)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4CKEYSRC
+*/
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_MASK			(0x20000000)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_SHIFT		(29)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_LENGTH		(1)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4BLEND
+*/
+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_MASK			(0x18000000)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_LSBMASK		(0x00000003)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_SHIFT			(27)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_LENGTH			(2)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4BLENDPOS
+*/
+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_MASK		(0x07000000)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_LSBMASK		(0x00000007)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_SHIFT		(24)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_LENGTH		(3)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4DITHEREN
+*/
+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_MASK		(0x00800000)
+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_SHIFT		(23)
+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_LENGTH		(1)
+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1CTRL_OFFSET						(0x0030)
+
+/* PDP, VID1CTRL, VID1STREN
+*/
+#define ODN_PDP_VID1CTRL_VID1STREN_MASK				(0x80000000)
+#define ODN_PDP_VID1CTRL_VID1STREN_LSBMASK			(0x00000001)
+#define ODN_PDP_VID1CTRL_VID1STREN_SHIFT			(31)
+#define ODN_PDP_VID1CTRL_VID1STREN_LENGTH			(1)
+#define ODN_PDP_VID1CTRL_VID1STREN_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID1CTRL, VID1CKEYEN
+*/
+#define ODN_PDP_VID1CTRL_VID1CKEYEN_MASK			(0x40000000)
+#define ODN_PDP_VID1CTRL_VID1CKEYEN_LSBMASK			(0x00000001)
+#define ODN_PDP_VID1CTRL_VID1CKEYEN_SHIFT			(30)
+#define ODN_PDP_VID1CTRL_VID1CKEYEN_LENGTH			(1)
+#define ODN_PDP_VID1CTRL_VID1CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CTRL, VID1CKEYSRC
+*/
+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_MASK			(0x20000000)
+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_SHIFT			(29)
+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_LENGTH			(1)
+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CTRL, VID1BLEND
+*/
+#define ODN_PDP_VID1CTRL_VID1BLEND_MASK				(0x18000000)
+#define ODN_PDP_VID1CTRL_VID1BLEND_LSBMASK			(0x00000003)
+#define ODN_PDP_VID1CTRL_VID1BLEND_SHIFT			(27)
+#define ODN_PDP_VID1CTRL_VID1BLEND_LENGTH			(2)
+#define ODN_PDP_VID1CTRL_VID1BLEND_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID1CTRL, VID1BLENDPOS
+*/
+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_MASK			(0x07000000)
+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_LSBMASK		(0x00000007)
+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_SHIFT			(24)
+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_LENGTH		(3)
+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CTRL, VID1DITHEREN
+*/
+#define ODN_PDP_VID1CTRL_VID1DITHEREN_MASK			(0x00800000)
+#define ODN_PDP_VID1CTRL_VID1DITHEREN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1CTRL_VID1DITHEREN_SHIFT			(23)
+#define ODN_PDP_VID1CTRL_VID1DITHEREN_LENGTH		(1)
+#define ODN_PDP_VID1CTRL_VID1DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2CTRL_OFFSET						(0x0034)
+
+/* PDP, VID2CTRL, VID2STREN
+*/
+#define ODN_PDP_VID2CTRL_VID2STREN_MASK				(0x80000000)
+#define ODN_PDP_VID2CTRL_VID2STREN_LSBMASK			(0x00000001)
+#define ODN_PDP_VID2CTRL_VID2STREN_SHIFT			(31)
+#define ODN_PDP_VID2CTRL_VID2STREN_LENGTH			(1)
+#define ODN_PDP_VID2CTRL_VID2STREN_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID2CTRL, VID2CKEYEN
+*/
+#define ODN_PDP_VID2CTRL_VID2CKEYEN_MASK			(0x40000000)
+#define ODN_PDP_VID2CTRL_VID2CKEYEN_LSBMASK			(0x00000001)
+#define ODN_PDP_VID2CTRL_VID2CKEYEN_SHIFT			(30)
+#define ODN_PDP_VID2CTRL_VID2CKEYEN_LENGTH			(1)
+#define ODN_PDP_VID2CTRL_VID2CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CTRL, VID2CKEYSRC
+*/
+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_MASK			(0x20000000)
+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_SHIFT			(29)
+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_LENGTH			(1)
+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CTRL, VID2BLEND
+*/
+#define ODN_PDP_VID2CTRL_VID2BLEND_MASK				(0x18000000)
+#define ODN_PDP_VID2CTRL_VID2BLEND_LSBMASK			(0x00000003)
+#define ODN_PDP_VID2CTRL_VID2BLEND_SHIFT			(27)
+#define ODN_PDP_VID2CTRL_VID2BLEND_LENGTH			(2)
+#define ODN_PDP_VID2CTRL_VID2BLEND_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID2CTRL, VID2BLENDPOS
+*/
+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_MASK			(0x07000000)
+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_LSBMASK		(0x00000007)
+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_SHIFT			(24)
+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_LENGTH		(3)
+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CTRL, VID2DITHEREN
+*/
+#define ODN_PDP_VID2CTRL_VID2DITHEREN_MASK			(0x00800000)
+#define ODN_PDP_VID2CTRL_VID2DITHEREN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2CTRL_VID2DITHEREN_SHIFT			(23)
+#define ODN_PDP_VID2CTRL_VID2DITHEREN_LENGTH		(1)
+#define ODN_PDP_VID2CTRL_VID2DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3CTRL_OFFSET						(0x0038)
+
+/* PDP, VID3CTRL, VID3STREN
+*/
+#define ODN_PDP_VID3CTRL_VID3STREN_MASK				(0x80000000)
+#define ODN_PDP_VID3CTRL_VID3STREN_LSBMASK			(0x00000001)
+#define ODN_PDP_VID3CTRL_VID3STREN_SHIFT			(31)
+#define ODN_PDP_VID3CTRL_VID3STREN_LENGTH			(1)
+#define ODN_PDP_VID3CTRL_VID3STREN_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID3CTRL, VID3CKEYEN
+*/
+#define ODN_PDP_VID3CTRL_VID3CKEYEN_MASK			(0x40000000)
+#define ODN_PDP_VID3CTRL_VID3CKEYEN_LSBMASK			(0x00000001)
+#define ODN_PDP_VID3CTRL_VID3CKEYEN_SHIFT			(30)
+#define ODN_PDP_VID3CTRL_VID3CKEYEN_LENGTH			(1)
+#define ODN_PDP_VID3CTRL_VID3CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CTRL, VID3CKEYSRC
+*/
+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_MASK			(0x20000000)
+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_SHIFT			(29)
+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_LENGTH			(1)
+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CTRL, VID3BLEND
+*/
+#define ODN_PDP_VID3CTRL_VID3BLEND_MASK				(0x18000000)
+#define ODN_PDP_VID3CTRL_VID3BLEND_LSBMASK			(0x00000003)
+#define ODN_PDP_VID3CTRL_VID3BLEND_SHIFT			(27)
+#define ODN_PDP_VID3CTRL_VID3BLEND_LENGTH			(2)
+#define ODN_PDP_VID3CTRL_VID3BLEND_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID3CTRL, VID3BLENDPOS
+*/
+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_MASK			(0x07000000)
+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_LSBMASK		(0x00000007)
+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_SHIFT			(24)
+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_LENGTH		(3)
+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CTRL, VID3DITHEREN
+*/
+#define ODN_PDP_VID3CTRL_VID3DITHEREN_MASK			(0x00800000)
+#define ODN_PDP_VID3CTRL_VID3DITHEREN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3CTRL_VID3DITHEREN_SHIFT			(23)
+#define ODN_PDP_VID3CTRL_VID3DITHEREN_LENGTH		(1)
+#define ODN_PDP_VID3CTRL_VID3DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4CTRL_OFFSET						(0x003C)
+
+/* PDP, VID4CTRL, VID4STREN
+*/
+#define ODN_PDP_VID4CTRL_VID4STREN_MASK				(0x80000000)
+#define ODN_PDP_VID4CTRL_VID4STREN_LSBMASK			(0x00000001)
+#define ODN_PDP_VID4CTRL_VID4STREN_SHIFT			(31)
+#define ODN_PDP_VID4CTRL_VID4STREN_LENGTH			(1)
+#define ODN_PDP_VID4CTRL_VID4STREN_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID4CTRL, VID4CKEYEN
+*/
+#define ODN_PDP_VID4CTRL_VID4CKEYEN_MASK			(0x40000000)
+#define ODN_PDP_VID4CTRL_VID4CKEYEN_LSBMASK			(0x00000001)
+#define ODN_PDP_VID4CTRL_VID4CKEYEN_SHIFT			(30)
+#define ODN_PDP_VID4CTRL_VID4CKEYEN_LENGTH			(1)
+#define ODN_PDP_VID4CTRL_VID4CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CTRL, VID4CKEYSRC
+*/
+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_MASK			(0x20000000)
+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_SHIFT			(29)
+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_LENGTH			(1)
+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CTRL, VID4BLEND
+*/
+#define ODN_PDP_VID4CTRL_VID4BLEND_MASK				(0x18000000)
+#define ODN_PDP_VID4CTRL_VID4BLEND_LSBMASK			(0x00000003)
+#define ODN_PDP_VID4CTRL_VID4BLEND_SHIFT			(27)
+#define ODN_PDP_VID4CTRL_VID4BLEND_LENGTH			(2)
+#define ODN_PDP_VID4CTRL_VID4BLEND_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID4CTRL, VID4BLENDPOS
+*/
+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_MASK			(0x07000000)
+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_LSBMASK		(0x00000007)
+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_SHIFT			(24)
+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_LENGTH		(3)
+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CTRL, VID4DITHEREN
+*/
+#define ODN_PDP_VID4CTRL_VID4DITHEREN_MASK			(0x00800000)
+#define ODN_PDP_VID4CTRL_VID4DITHEREN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4CTRL_VID4DITHEREN_SHIFT			(23)
+#define ODN_PDP_VID4CTRL_VID4DITHEREN_LENGTH		(1)
+#define ODN_PDP_VID4CTRL_VID4DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1UCTRL_OFFSET					(0x0050)
+
+/* PDP, VID1UCTRL, VID1UVHALFSTR
+*/
+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_MASK		(0xC0000000)
+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_LSBMASK		(0x00000003)
+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_SHIFT		(30)
+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_LENGTH		(2)
+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2UCTRL_OFFSET					(0x0054)
+
+/* PDP, VID2UCTRL, VID2UVHALFSTR
+*/
+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_MASK		(0xC0000000)
+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_LSBMASK		(0x00000003)
+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_SHIFT		(30)
+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_LENGTH		(2)
+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3UCTRL_OFFSET					(0x0058)
+
+/* PDP, VID3UCTRL, VID3UVHALFSTR
+*/
+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_MASK		(0xC0000000)
+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_LSBMASK		(0x00000003)
+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_SHIFT		(30)
+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_LENGTH		(2)
+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4UCTRL_OFFSET					(0x005C)
+
+/* PDP, VID4UCTRL, VID4UVHALFSTR
+*/
+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_MASK		(0xC0000000)
+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_LSBMASK		(0x00000003)
+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_SHIFT		(30)
+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_LENGTH		(2)
+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH1STRIDE_OFFSET					(0x0060)
+
+/* PDP, GRPH1STRIDE, GRPH1STRIDE
+*/
+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_MASK		(0xFFC00000)
+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT		(22)
+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_LENGTH		(10)
+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2STRIDE_OFFSET					(0x0064)
+
+/* PDP, GRPH2STRIDE, GRPH2STRIDE
+*/
+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_MASK		(0xFFC00000)
+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_SHIFT		(22)
+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_LENGTH		(10)
+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3STRIDE_OFFSET					(0x0068)
+
+/* PDP, GRPH3STRIDE, GRPH3STRIDE
+*/
+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_MASK		(0xFFC00000)
+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_SHIFT		(22)
+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_LENGTH		(10)
+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4STRIDE_OFFSET					(0x006C)
+
+/* PDP, GRPH4STRIDE, GRPH4STRIDE
+*/
+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_MASK		(0xFFC00000)
+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_SHIFT		(22)
+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_LENGTH		(10)
+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1STRIDE_OFFSET					(0x0070)
+
+/* PDP, VID1STRIDE, VID1STRIDE
+*/
+#define ODN_PDP_VID1STRIDE_VID1STRIDE_MASK			(0xFFC00000)
+#define ODN_PDP_VID1STRIDE_VID1STRIDE_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID1STRIDE_VID1STRIDE_SHIFT			(22)
+#define ODN_PDP_VID1STRIDE_VID1STRIDE_LENGTH		(10)
+#define ODN_PDP_VID1STRIDE_VID1STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2STRIDE_OFFSET					(0x0074)
+
+/* PDP, VID2STRIDE, VID2STRIDE
+*/
+#define ODN_PDP_VID2STRIDE_VID2STRIDE_MASK			(0xFFC00000)
+#define ODN_PDP_VID2STRIDE_VID2STRIDE_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID2STRIDE_VID2STRIDE_SHIFT			(22)
+#define ODN_PDP_VID2STRIDE_VID2STRIDE_LENGTH		(10)
+#define ODN_PDP_VID2STRIDE_VID2STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3STRIDE_OFFSET					(0x0078)
+
+/* PDP, VID3STRIDE, VID3STRIDE
+*/
+#define ODN_PDP_VID3STRIDE_VID3STRIDE_MASK			(0xFFC00000)
+#define ODN_PDP_VID3STRIDE_VID3STRIDE_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID3STRIDE_VID3STRIDE_SHIFT			(22)
+#define ODN_PDP_VID3STRIDE_VID3STRIDE_LENGTH		(10)
+#define ODN_PDP_VID3STRIDE_VID3STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4STRIDE_OFFSET					(0x007C)
+
+/* PDP, VID4STRIDE, VID4STRIDE
+*/
+#define ODN_PDP_VID4STRIDE_VID4STRIDE_MASK			(0xFFC00000)
+#define ODN_PDP_VID4STRIDE_VID4STRIDE_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID4STRIDE_VID4STRIDE_SHIFT			(22)
+#define ODN_PDP_VID4STRIDE_VID4STRIDE_LENGTH		(10)
+#define ODN_PDP_VID4STRIDE_VID4STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1SIZE_OFFSET					(0x0080)
+
+/* PDP, GRPH1SIZE, GRPH1WIDTH
+*/
+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_MASK			(0x0FFF0000)
+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT			(16)
+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_LENGTH			(12)
+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1SIZE, GRPH1HEIGHT
+*/
+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_MASK			(0x00000FFF)
+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT			(0)
+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_LENGTH		(12)
+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH2SIZE_OFFSET					(0x0084)
+
+/* PDP, GRPH2SIZE, GRPH2WIDTH
+*/
+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_MASK			(0x0FFF0000)
+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_SHIFT			(16)
+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_LENGTH			(12)
+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2SIZE, GRPH2HEIGHT
+*/
+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_MASK			(0x00000FFF)
+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_SHIFT			(0)
+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_LENGTH		(12)
+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH3SIZE_OFFSET					(0x0088)
+
+/* PDP, GRPH3SIZE, GRPH3WIDTH
+*/
+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_MASK			(0x0FFF0000)
+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_SHIFT			(16)
+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_LENGTH			(12)
+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3SIZE, GRPH3HEIGHT
+*/
+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_MASK			(0x00000FFF)
+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_SHIFT			(0)
+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_LENGTH		(12)
+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH4SIZE_OFFSET					(0x008C)
+
+/* PDP, GRPH4SIZE, GRPH4WIDTH
+*/
+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_MASK			(0x0FFF0000)
+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_SHIFT			(16)
+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_LENGTH			(12)
+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4SIZE, GRPH4HEIGHT
+*/
+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_MASK			(0x00000FFF)
+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_SHIFT			(0)
+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_LENGTH		(12)
+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1SIZE_OFFSET						(0x0090)
+
+/* PDP, VID1SIZE, VID1WIDTH
+*/
+#define ODN_PDP_VID1SIZE_VID1WIDTH_MASK				(0x0FFF0000)
+#define ODN_PDP_VID1SIZE_VID1WIDTH_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID1SIZE_VID1WIDTH_SHIFT			(16)
+#define ODN_PDP_VID1SIZE_VID1WIDTH_LENGTH			(12)
+#define ODN_PDP_VID1SIZE_VID1WIDTH_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID1SIZE, VID1HEIGHT
+*/
+#define ODN_PDP_VID1SIZE_VID1HEIGHT_MASK			(0x00000FFF)
+#define ODN_PDP_VID1SIZE_VID1HEIGHT_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID1SIZE_VID1HEIGHT_SHIFT			(0)
+#define ODN_PDP_VID1SIZE_VID1HEIGHT_LENGTH			(12)
+#define ODN_PDP_VID1SIZE_VID1HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2SIZE_OFFSET						(0x0094)
+
+/* PDP, VID2SIZE, VID2WIDTH
+*/
+#define ODN_PDP_VID2SIZE_VID2WIDTH_MASK				(0x0FFF0000)
+#define ODN_PDP_VID2SIZE_VID2WIDTH_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID2SIZE_VID2WIDTH_SHIFT			(16)
+#define ODN_PDP_VID2SIZE_VID2WIDTH_LENGTH			(12)
+#define ODN_PDP_VID2SIZE_VID2WIDTH_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID2SIZE, VID2HEIGHT
+*/
+#define ODN_PDP_VID2SIZE_VID2HEIGHT_MASK			(0x00000FFF)
+#define ODN_PDP_VID2SIZE_VID2HEIGHT_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID2SIZE_VID2HEIGHT_SHIFT			(0)
+#define ODN_PDP_VID2SIZE_VID2HEIGHT_LENGTH			(12)
+#define ODN_PDP_VID2SIZE_VID2HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3SIZE_OFFSET						(0x0098)
+
+/* PDP, VID3SIZE, VID3WIDTH
+*/
+#define ODN_PDP_VID3SIZE_VID3WIDTH_MASK				(0x0FFF0000)
+#define ODN_PDP_VID3SIZE_VID3WIDTH_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID3SIZE_VID3WIDTH_SHIFT			(16)
+#define ODN_PDP_VID3SIZE_VID3WIDTH_LENGTH			(12)
+#define ODN_PDP_VID3SIZE_VID3WIDTH_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID3SIZE, VID3HEIGHT
+*/
+#define ODN_PDP_VID3SIZE_VID3HEIGHT_MASK			(0x00000FFF)
+#define ODN_PDP_VID3SIZE_VID3HEIGHT_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID3SIZE_VID3HEIGHT_SHIFT			(0)
+#define ODN_PDP_VID3SIZE_VID3HEIGHT_LENGTH			(12)
+#define ODN_PDP_VID3SIZE_VID3HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4SIZE_OFFSET						(0x009C)
+
+/* PDP, VID4SIZE, VID4WIDTH
+*/
+#define ODN_PDP_VID4SIZE_VID4WIDTH_MASK				(0x0FFF0000)
+#define ODN_PDP_VID4SIZE_VID4WIDTH_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID4SIZE_VID4WIDTH_SHIFT			(16)
+#define ODN_PDP_VID4SIZE_VID4WIDTH_LENGTH			(12)
+#define ODN_PDP_VID4SIZE_VID4WIDTH_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VID4SIZE, VID4HEIGHT
+*/
+#define ODN_PDP_VID4SIZE_VID4HEIGHT_MASK			(0x00000FFF)
+#define ODN_PDP_VID4SIZE_VID4HEIGHT_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID4SIZE_VID4HEIGHT_SHIFT			(0)
+#define ODN_PDP_VID4SIZE_VID4HEIGHT_LENGTH			(12)
+#define ODN_PDP_VID4SIZE_VID4HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1POSN_OFFSET					(0x00A0)
+
+/* PDP, GRPH1POSN, GRPH1XSTART
+*/
+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_MASK			(0x0FFF0000)
+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_SHIFT			(16)
+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_LENGTH		(12)
+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1POSN, GRPH1YSTART
+*/
+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_MASK			(0x00000FFF)
+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_SHIFT			(0)
+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_LENGTH		(12)
+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH2POSN_OFFSET					(0x00A4)
+
+/* PDP, GRPH2POSN, GRPH2XSTART
+*/
+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_MASK			(0x0FFF0000)
+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_SHIFT			(16)
+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_LENGTH		(12)
+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2POSN, GRPH2YSTART
+*/
+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_MASK			(0x00000FFF)
+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_SHIFT			(0)
+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_LENGTH		(12)
+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH3POSN_OFFSET					(0x00A8)
+
+/* PDP, GRPH3POSN, GRPH3XSTART
+*/
+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_MASK			(0x0FFF0000)
+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_SHIFT			(16)
+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_LENGTH		(12)
+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3POSN, GRPH3YSTART
+*/
+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_MASK			(0x00000FFF)
+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_SHIFT			(0)
+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_LENGTH		(12)
+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH4POSN_OFFSET					(0x00AC)
+
+/* PDP, GRPH4POSN, GRPH4XSTART
+*/
+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_MASK			(0x0FFF0000)
+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_SHIFT			(16)
+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_LENGTH		(12)
+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4POSN, GRPH4YSTART
+*/
+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_MASK			(0x00000FFF)
+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_LSBMASK		(0x00000FFF)
+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_SHIFT			(0)
+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_LENGTH		(12)
+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1POSN_OFFSET						(0x00B0)
+
+/* PDP, VID1POSN, VID1XSTART
+*/
+#define ODN_PDP_VID1POSN_VID1XSTART_MASK			(0x0FFF0000)
+#define ODN_PDP_VID1POSN_VID1XSTART_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID1POSN_VID1XSTART_SHIFT			(16)
+#define ODN_PDP_VID1POSN_VID1XSTART_LENGTH			(12)
+#define ODN_PDP_VID1POSN_VID1XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1POSN, VID1YSTART
+*/
+#define ODN_PDP_VID1POSN_VID1YSTART_MASK			(0x00000FFF)
+#define ODN_PDP_VID1POSN_VID1YSTART_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID1POSN_VID1YSTART_SHIFT			(0)
+#define ODN_PDP_VID1POSN_VID1YSTART_LENGTH			(12)
+#define ODN_PDP_VID1POSN_VID1YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2POSN_OFFSET						(0x00B4)
+
+/* PDP, VID2POSN, VID2XSTART
+*/
+#define ODN_PDP_VID2POSN_VID2XSTART_MASK			(0x0FFF0000)
+#define ODN_PDP_VID2POSN_VID2XSTART_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID2POSN_VID2XSTART_SHIFT			(16)
+#define ODN_PDP_VID2POSN_VID2XSTART_LENGTH			(12)
+#define ODN_PDP_VID2POSN_VID2XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2POSN, VID2YSTART
+*/
+#define ODN_PDP_VID2POSN_VID2YSTART_MASK			(0x00000FFF)
+#define ODN_PDP_VID2POSN_VID2YSTART_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID2POSN_VID2YSTART_SHIFT			(0)
+#define ODN_PDP_VID2POSN_VID2YSTART_LENGTH			(12)
+#define ODN_PDP_VID2POSN_VID2YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3POSN_OFFSET						(0x00B8)
+
+/* PDP, VID3POSN, VID3XSTART
+*/
+#define ODN_PDP_VID3POSN_VID3XSTART_MASK			(0x0FFF0000)
+#define ODN_PDP_VID3POSN_VID3XSTART_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID3POSN_VID3XSTART_SHIFT			(16)
+#define ODN_PDP_VID3POSN_VID3XSTART_LENGTH			(12)
+#define ODN_PDP_VID3POSN_VID3XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3POSN, VID3YSTART
+*/
+#define ODN_PDP_VID3POSN_VID3YSTART_MASK			(0x00000FFF)
+#define ODN_PDP_VID3POSN_VID3YSTART_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID3POSN_VID3YSTART_SHIFT			(0)
+#define ODN_PDP_VID3POSN_VID3YSTART_LENGTH			(12)
+#define ODN_PDP_VID3POSN_VID3YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4POSN_OFFSET						(0x00BC)
+
+/* PDP, VID4POSN, VID4XSTART
+*/
+#define ODN_PDP_VID4POSN_VID4XSTART_MASK			(0x0FFF0000)
+#define ODN_PDP_VID4POSN_VID4XSTART_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID4POSN_VID4XSTART_SHIFT			(16)
+#define ODN_PDP_VID4POSN_VID4XSTART_LENGTH			(12)
+#define ODN_PDP_VID4POSN_VID4XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4POSN, VID4YSTART
+*/
+#define ODN_PDP_VID4POSN_VID4YSTART_MASK			(0x00000FFF)
+#define ODN_PDP_VID4POSN_VID4YSTART_LSBMASK			(0x00000FFF)
+#define ODN_PDP_VID4POSN_VID4YSTART_SHIFT			(0)
+#define ODN_PDP_VID4POSN_VID4YSTART_LENGTH			(12)
+#define ODN_PDP_VID4POSN_VID4YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1GALPHA_OFFSET					(0x00C0)
+
+/* PDP, GRPH1GALPHA, GRPH1GALPHA
+*/
+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_MASK		(0x000003FF)
+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_SHIFT		(0)
+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_LENGTH		(10)
+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2GALPHA_OFFSET					(0x00C4)
+
+/* PDP, GRPH2GALPHA, GRPH2GALPHA
+*/
+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_MASK		(0x000003FF)
+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_SHIFT		(0)
+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_LENGTH		(10)
+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3GALPHA_OFFSET					(0x00C8)
+
+/* PDP, GRPH3GALPHA, GRPH3GALPHA
+*/
+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_MASK		(0x000003FF)
+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_SHIFT		(0)
+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_LENGTH		(10)
+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4GALPHA_OFFSET					(0x00CC)
+
+/* PDP, GRPH4GALPHA, GRPH4GALPHA
+*/
+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_MASK		(0x000003FF)
+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_SHIFT		(0)
+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_LENGTH		(10)
+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1GALPHA_OFFSET					(0x00D0)
+
+/* PDP, VID1GALPHA, VID1GALPHA
+*/
+#define ODN_PDP_VID1GALPHA_VID1GALPHA_MASK			(0x000003FF)
+#define ODN_PDP_VID1GALPHA_VID1GALPHA_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID1GALPHA_VID1GALPHA_SHIFT			(0)
+#define ODN_PDP_VID1GALPHA_VID1GALPHA_LENGTH		(10)
+#define ODN_PDP_VID1GALPHA_VID1GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2GALPHA_OFFSET					(0x00D4)
+
+/* PDP, VID2GALPHA, VID2GALPHA
+*/
+#define ODN_PDP_VID2GALPHA_VID2GALPHA_MASK			(0x000003FF)
+#define ODN_PDP_VID2GALPHA_VID2GALPHA_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID2GALPHA_VID2GALPHA_SHIFT			(0)
+#define ODN_PDP_VID2GALPHA_VID2GALPHA_LENGTH		(10)
+#define ODN_PDP_VID2GALPHA_VID2GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3GALPHA_OFFSET					(0x00D8)
+
+/* PDP, VID3GALPHA, VID3GALPHA
+*/
+#define ODN_PDP_VID3GALPHA_VID3GALPHA_MASK			(0x000003FF)
+#define ODN_PDP_VID3GALPHA_VID3GALPHA_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID3GALPHA_VID3GALPHA_SHIFT			(0)
+#define ODN_PDP_VID3GALPHA_VID3GALPHA_LENGTH		(10)
+#define ODN_PDP_VID3GALPHA_VID3GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4GALPHA_OFFSET				    (0x00DC)
+
+/* PDP, VID4GALPHA, VID4GALPHA
+*/
+#define ODN_PDP_VID4GALPHA_VID4GALPHA_MASK			(0x000003FF)
+#define ODN_PDP_VID4GALPHA_VID4GALPHA_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID4GALPHA_VID4GALPHA_SHIFT			(0)
+#define ODN_PDP_VID4GALPHA_VID4GALPHA_LENGTH		(10)
+#define ODN_PDP_VID4GALPHA_VID4GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1CKEY_R_OFFSET					(0x00E0)
+
+/* PDP, GRPH1CKEY_R, GRPH1CKEY_R
+*/
+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_MASK		(0x000003FF)
+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_SHIFT		(0)
+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_LENGTH		(10)
+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH1CKEY_GB_OFFSET			        (0x00E4)
+
+/* PDP, GRPH1CKEY_GB, GRPH1CKEY_G
+*/
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_MASK		(0x03FF0000)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LSBMASK	(0x000003FF)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SHIFT		(16)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LENGTH		(10)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH1CKEY_GB, GRPH1CKEY_B
+*/
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_MASK		(0x000003FF)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LSBMASK	(0x000003FF)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SHIFT		(0)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LENGTH		(10)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2CKEY_R_OFFSET					(0x00E8)
+
+/* PDP, GRPH2CKEY_R, GRPH2CKEY_R
+*/
+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_MASK		(0x000003FF)
+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_SHIFT		(0)
+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_LENGTH		(10)
+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2CKEY_GB_OFFSET					(0x00EC)
+
+/* PDP, GRPH2CKEY_GB, GRPH2CKEY_G
+*/
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_MASK		(0x03FF0000)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LSBMASK	(0x000003FF)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SHIFT		(16)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LENGTH		(10)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH2CKEY_GB, GRPH2CKEY_B
+*/
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_MASK		(0x000003FF)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LSBMASK	(0x000003FF)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SHIFT		(0)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LENGTH		(10)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3CKEY_R_OFFSET					(0x00F0)
+
+/* PDP, GRPH3CKEY_R, GRPH3CKEY_R
+*/
+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_MASK		(0x000003FF)
+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_SHIFT		(0)
+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_LENGTH		(10)
+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3CKEY_GB_OFFSET					(0x00F4)
+
+/* PDP, GRPH3CKEY_GB, GRPH3CKEY_G
+*/
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_MASK		(0x03FF0000)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LSBMASK	(0x000003FF)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SHIFT		(16)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LENGTH		(10)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH3CKEY_GB, GRPH3CKEY_B
+*/
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_MASK		(0x000003FF)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LSBMASK	(0x000003FF)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SHIFT		(0)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LENGTH		(10)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4CKEY_R_OFFSET					(0x00F8)
+
+/* PDP, GRPH4CKEY_R, GRPH4CKEY_R
+*/
+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_MASK		(0x000003FF)
+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_SHIFT		(0)
+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_LENGTH		(10)
+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4CKEY_GB_OFFSET					(0x00FC)
+
+/* PDP, GRPH4CKEY_GB, GRPH4CKEY_G
+*/
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_MASK		(0x03FF0000)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LSBMASK	(0x000003FF)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SHIFT		(16)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LENGTH		(10)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH4CKEY_GB, GRPH4CKEY_B
+*/
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_MASK		(0x000003FF)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LSBMASK	(0x000003FF)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SHIFT		(0)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LENGTH		(10)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1CKEY_R_OFFSET					(0x0100)
+
+/* PDP, VID1CKEY_R, VID1CKEY_R
+*/
+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_MASK			(0x000003FF)
+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_SHIFT			(0)
+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_LENGTH		(10)
+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1CKEY_GB_OFFSET					(0x0104)
+
+/* PDP, VID1CKEY_GB, VID1CKEY_G
+*/
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_MASK			(0x03FF0000)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_SHIFT		(16)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_LENGTH		(10)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CKEY_GB, VID1CKEY_B
+*/
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_MASK			(0x000003FF)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_SHIFT		(0)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_LENGTH		(10)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2CKEY_R_OFFSET					(0x0108)
+
+/* PDP, VID2CKEY_R, VID2CKEY_R
+*/
+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_MASK			(0x000003FF)
+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_SHIFT			(0)
+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_LENGTH		(10)
+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2CKEY_GB_OFFSET					(0x010C)
+
+/* PDP, VID2CKEY_GB, VID2CKEY_G
+*/
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_MASK			(0x03FF0000)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_SHIFT		(16)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_LENGTH		(10)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CKEY_GB, VID2CKEY_B
+*/
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_MASK			(0x000003FF)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_SHIFT		(0)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_LENGTH		(10)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3CKEY_R_OFFSET					(0x0110)
+
+/* PDP, VID3CKEY_R, VID3CKEY_R
+*/
+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_MASK			(0x000003FF)
+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_SHIFT			(0)
+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_LENGTH		(10)
+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3CKEY_GB_OFFSET					(0x0114)
+
+/* PDP, VID3CKEY_GB, VID3CKEY_G
+*/
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_MASK			(0x03FF0000)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_SHIFT		(16)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_LENGTH		(10)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CKEY_GB, VID3CKEY_B
+*/
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_MASK			(0x000003FF)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_SHIFT		(0)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_LENGTH		(10)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4CKEY_R_OFFSET					(0x0118)
+
+/* PDP, VID4CKEY_R, VID4CKEY_R
+*/
+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_MASK			(0x000003FF)
+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_SHIFT			(0)
+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_LENGTH		(10)
+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4CKEY_GB_OFFSET					(0x011C)
+
+/* PDP, VID4CKEY_GB, VID4CKEY_G
+*/
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_MASK			(0x03FF0000)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_SHIFT		(16)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_LENGTH		(10)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CKEY_GB, VID4CKEY_B
+*/
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_MASK			(0x000003FF)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_SHIFT		(0)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_LENGTH		(10)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1BLND2_R_OFFSET					(0x0120)
+
+/* PDP, GRPH1BLND2_R, GRPH1PIXDBL
+*/
+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_MASK		(0x80000000)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_SHIFT		(31)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_LENGTH		(1)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH1BLND2_R, GRPH1LINDBL
+*/
+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_MASK		(0x20000000)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_SHIFT		(29)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_LENGTH		(1)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH1BLND2_R, GRPH1CKEYMASK_R
+*/
+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_MASK	(0x000003FF)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SHIFT	(0)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LENGTH	(10)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH1BLND2_GB_OFFSET				(0x0124)
+
+/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_G
+*/
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_MASK	(0x03FF0000)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SHIFT	(16)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LENGTH (10)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_B
+*/
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_MASK	(0x000003FF)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SHIFT	(0)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LENGTH (10)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2BLND2_R_OFFSET					(0x0128)
+
+/* PDP, GRPH2BLND2_R, GRPH2PIXDBL
+*/
+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_MASK		(0x80000000)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_SHIFT		(31)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_LENGTH		(1)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH2BLND2_R, GRPH2LINDBL
+*/
+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_MASK		(0x20000000)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_SHIFT		(29)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_LENGTH		(1)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH2BLND2_R, GRPH2CKEYMASK_R
+*/
+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_MASK	(0x000003FF)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SHIFT	(0)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LENGTH	(10)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2BLND2_GB_OFFSET				(0x012C)
+
+/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_G
+*/
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_MASK	(0x03FF0000)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SHIFT	(16)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LENGTH (10)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_B
+*/
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_MASK	(0x000003FF)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SHIFT	(0)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LENGTH (10)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3BLND2_R_OFFSET					(0x0130)
+
+/* PDP, GRPH3BLND2_R, GRPH3PIXDBL
+*/
+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_MASK		(0x80000000)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_SHIFT		(31)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_LENGTH		(1)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH3BLND2_R, GRPH3LINDBL
+*/
+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_MASK		(0x20000000)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_SHIFT		(29)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_LENGTH		(1)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH3BLND2_R, GRPH3CKEYMASK_R
+*/
+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_MASK	(0x000003FF)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SHIFT	(0)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LENGTH	(10)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3BLND2_GB_OFFSET				(0x0134)
+
+/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_G
+*/
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_MASK	(0x03FF0000)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SHIFT	(16)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LENGTH (10)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_B
+*/
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_MASK	(0x000003FF)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SHIFT	(0)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LENGTH (10)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4BLND2_R_OFFSET					(0x0138)
+
+/* PDP, GRPH4BLND2_R, GRPH4PIXDBL
+*/
+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_MASK		(0x80000000)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_SHIFT		(31)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_LENGTH		(1)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH4BLND2_R, GRPH4LINDBL
+*/
+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_MASK		(0x20000000)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_LSBMASK	(0x00000001)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_SHIFT		(29)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_LENGTH		(1)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH4BLND2_R, GRPH4CKEYMASK_R
+*/
+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_MASK	(0x000003FF)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SHIFT	(0)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LENGTH	(10)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4BLND2_GB_OFFSET				(0x013C)
+
+/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_G
+*/
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_MASK	(0x03FF0000)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SHIFT	(16)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LENGTH (10)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_B
+*/
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_MASK	(0x000003FF)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SHIFT	(0)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LENGTH (10)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1BLND2_R_OFFSET					(0x0140)
+
+/* PDP, VID1BLND2_R, VID1CKEYMASK_R
+*/
+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_MASK		(0x000003FF)
+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_SHIFT	(0)
+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_LENGTH	(10)
+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1BLND2_GB_OFFSET					(0x0144)
+
+/* PDP, VID1BLND2_GB, VID1CKEYMASK_G
+*/
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_MASK	(0x03FF0000)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_SHIFT	(16)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_LENGTH	(10)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID1BLND2_GB, VID1CKEYMASK_B
+*/
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_MASK	(0x000003FF)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_SHIFT	(0)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_LENGTH	(10)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2BLND2_R_OFFSET		            (0x0148)
+
+/* PDP, VID2BLND2_R, VID2CKEYMASK_R
+*/
+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_MASK		(0x000003FF)
+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_SHIFT	(0)
+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_LENGTH	(10)
+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2BLND2_GB_OFFSET		            (0x014C)
+
+/* PDP, VID2BLND2_GB, VID2CKEYMASK_G
+*/
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_MASK	(0x03FF0000)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_SHIFT	(16)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_LENGTH	(10)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID2BLND2_GB, VID2CKEYMASK_B
+*/
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_MASK	(0x000003FF)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_SHIFT	(0)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_LENGTH	(10)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3BLND2_R_OFFSET		            (0x0150)
+
+/* PDP, VID3BLND2_R, VID3CKEYMASK_R
+*/
+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_MASK		(0x000003FF)
+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_SHIFT	(0)
+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_LENGTH	(10)
+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3BLND2_GB_OFFSET		            (0x0154)
+
+/* PDP, VID3BLND2_GB, VID3CKEYMASK_G
+*/
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_MASK	(0x03FF0000)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_SHIFT	(16)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_LENGTH	(10)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID3BLND2_GB, VID3CKEYMASK_B
+*/
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_MASK	(0x000003FF)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_SHIFT	(0)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_LENGTH	(10)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4BLND2_R_OFFSET		            (0x0158)
+
+/* PDP, VID4BLND2_R, VID4CKEYMASK_R
+*/
+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_MASK		(0x000003FF)
+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_SHIFT	(0)
+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_LENGTH	(10)
+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4BLND2_GB_OFFSET		            (0x015C)
+
+/* PDP, VID4BLND2_GB, VID4CKEYMASK_G
+*/
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_MASK	(0x03FF0000)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_SHIFT	(16)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_LENGTH	(10)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID4BLND2_GB, VID4CKEYMASK_B
+*/
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_MASK	(0x000003FF)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_SHIFT	(0)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_LENGTH	(10)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_OFFSET		    (0x0160)
+
+/* PDP, GRPH1INTERLEAVE_CTRL, GRPH1INTFIELD
+*/
+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_MASK	(0x00000001)
+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SHIFT (0)
+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LENGTH (1)
+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_OFFSET		    (0x0164)
+
+/* PDP, GRPH2INTERLEAVE_CTRL, GRPH2INTFIELD
+*/
+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_MASK (0x00000001)
+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SHIFT (0)
+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LENGTH (1)
+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_OFFSET		    (0x0168)
+
+/* PDP, GRPH3INTERLEAVE_CTRL, GRPH3INTFIELD
+*/
+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_MASK (0x00000001)
+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SHIFT (0)
+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LENGTH (1)
+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_OFFSET	    	(0x016C)
+
+/* PDP, GRPH4INTERLEAVE_CTRL, GRPH4INTFIELD
+*/
+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_MASK (0x00000001)
+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SHIFT (0)
+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LENGTH (1)
+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1INTERLEAVE_CTRL_OFFSET		    (0x0170)
+
+/* PDP, VID1INTERLEAVE_CTRL, VID1INTFIELD
+*/
+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_MASK (0x00000001)
+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SHIFT (0)
+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LENGTH (1)
+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2INTERLEAVE_CTRL_OFFSET		    (0x0174)
+
+/* PDP, VID2INTERLEAVE_CTRL, VID2INTFIELD
+*/
+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_MASK (0x00000001)
+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SHIFT (0)
+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LENGTH (1)
+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3INTERLEAVE_CTRL_OFFSET		    (0x0178)
+
+/* PDP, VID3INTERLEAVE_CTRL, VID3INTFIELD
+*/
+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_MASK (0x00000001)
+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SHIFT (0)
+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LENGTH (1)
+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4INTERLEAVE_CTRL_OFFSET		    (0x017C)
+
+/* PDP, VID4INTERLEAVE_CTRL, VID4INTFIELD
+*/
+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_MASK (0x00000001)
+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SHIFT (0)
+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LENGTH (1)
+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH1BASEADDR_OFFSET		        (0x0180)
+
+/* PDP, GRPH1BASEADDR, GRPH1BASEADDR
+*/
+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_MASK	(0xFFFFFFE0)
+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_LSBMASK	(0x07FFFFFF)
+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_SHIFT	(5)
+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_LENGTH	(27)
+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2BASEADDR_OFFSET		        (0x0184)
+
+/* PDP, GRPH2BASEADDR, GRPH2BASEADDR
+*/
+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_MASK	(0xFFFFFFE0)
+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_LSBMASK	(0x07FFFFFF)
+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_SHIFT	(5)
+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_LENGTH	(27)
+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3BASEADDR_OFFSET		        (0x0188)
+
+/* PDP, GRPH3BASEADDR, GRPH3BASEADDR
+*/
+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_MASK	(0xFFFFFFE0)
+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_LSBMASK	(0x07FFFFFF)
+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_SHIFT	(5)
+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_LENGTH	(27)
+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4BASEADDR_OFFSET		        (0x018C)
+
+/* PDP, GRPH4BASEADDR, GRPH4BASEADDR
+*/
+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_MASK	(0xFFFFFFE0)
+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_LSBMASK	(0x07FFFFFF)
+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_SHIFT	(5)
+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_LENGTH	(27)
+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1BASEADDR_OFFSET		            (0x0190)
+
+/* PDP, VID1BASEADDR, VID1BASEADDR
+*/
+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_LSBMASK	(0x07FFFFFF)
+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_SHIFT		(5)
+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_LENGTH	(27)
+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2BASEADDR_OFFSET		            (0x0194)
+
+/* PDP, VID2BASEADDR, VID2BASEADDR
+*/
+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_LSBMASK	(0x07FFFFFF)
+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_SHIFT		(5)
+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_LENGTH	(27)
+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3BASEADDR_OFFSET		            (0x0198)
+
+/* PDP, VID3BASEADDR, VID3BASEADDR
+*/
+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_LSBMASK	(0x07FFFFFF)
+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_SHIFT		(5)
+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_LENGTH	(27)
+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4BASEADDR_OFFSET		            (0x019C)
+
+/* PDP, VID4BASEADDR, VID4BASEADDR
+*/
+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_LSBMASK	(0x07FFFFFF)
+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_SHIFT		(5)
+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_LENGTH	(27)
+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1UBASEADDR_OFFSET				(0x01B0)
+
+/* PDP, VID1UBASEADDR, VID1UBASEADDR
+*/
+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_MASK	(0xFFFFFFE0)
+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LSBMASK	(0x07FFFFFF)
+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_SHIFT	(5)
+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH	(27)
+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2UBASEADDR_OFFSET		        (0x01B4)
+
+/* PDP, VID2UBASEADDR, VID2UBASEADDR
+*/
+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_LSBMASK		(0x07FFFFFF)
+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_SHIFT		(5)
+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_LENGTH		(27)
+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3UBASEADDR_OFFSET		        (0x01B8)
+
+/* PDP, VID3UBASEADDR, VID3UBASEADDR
+*/
+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_LSBMASK		(0x07FFFFFF)
+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_SHIFT		(5)
+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_LENGTH		(27)
+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4UBASEADDR_OFFSET		        (0x01BC)
+
+/* PDP, VID4UBASEADDR, VID4UBASEADDR
+*/
+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_LSBMASK		(0x07FFFFFF)
+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_SHIFT		(5)
+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_LENGTH		(27)
+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VBASEADDR_OFFSET		        (0x01D0)
+
+/* PDP, VID1VBASEADDR, VID1VBASEADDR
+*/
+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LSBMASK		(0x07FFFFFF)
+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_SHIFT		(5)
+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH		(27)
+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VBASEADDR_OFFSET		        (0x01D4)
+
+/* PDP, VID2VBASEADDR, VID2VBASEADDR
+*/
+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_LSBMASK		(0x07FFFFFF)
+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_SHIFT		(5)
+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_LENGTH		(27)
+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VBASEADDR_OFFSET		        (0x01D8)
+
+/* PDP, VID3VBASEADDR, VID3VBASEADDR
+*/
+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_LSBMASK		(0x07FFFFFF)
+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_SHIFT		(5)
+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_LENGTH		(27)
+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VBASEADDR_OFFSET		(0x01DC)
+
+/* PDP, VID4VBASEADDR, VID4VBASEADDR
+*/
+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_MASK		(0xFFFFFFE0)
+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_LSBMASK		(0x07FFFFFF)
+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_SHIFT		(5)
+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_LENGTH		(27)
+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1POSTSKIPCTRL_OFFSET		(0x0230)
+
+/* PDP, VID1POSTSKIPCTRL, VID1HPOSTCLIP
+*/
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_MASK		(0x007F0000)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SHIFT		(16)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LENGTH		(7)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1POSTSKIPCTRL, VID1VPOSTCLIP
+*/
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_MASK		(0x0000003F)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LSBMASK		(0x0000003F)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SHIFT		(0)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LENGTH		(6)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2POSTSKIPCTRL_OFFSET		(0x0234)
+
+/* PDP, VID2POSTSKIPCTRL, VID2HPOSTCLIP
+*/
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_MASK		(0x007F0000)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SHIFT		(16)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LENGTH		(7)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2POSTSKIPCTRL, VID2VPOSTCLIP
+*/
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_MASK		(0x0000003F)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LSBMASK		(0x0000003F)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SHIFT		(0)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LENGTH		(6)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3POSTSKIPCTRL_OFFSET		(0x0238)
+
+/* PDP, VID3POSTSKIPCTRL, VID3HPOSTCLIP
+*/
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_MASK		(0x007F0000)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SHIFT		(16)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LENGTH		(7)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3POSTSKIPCTRL, VID3VPOSTCLIP
+*/
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_MASK		(0x0000003F)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LSBMASK		(0x0000003F)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SHIFT		(0)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LENGTH		(6)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4POSTSKIPCTRL_OFFSET		(0x023C)
+
+/* PDP, VID4POSTSKIPCTRL, VID4HPOSTCLIP
+*/
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_MASK		(0x007F0000)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SHIFT		(16)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LENGTH		(7)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4POSTSKIPCTRL, VID4VPOSTCLIP
+*/
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_MASK		(0x0000003F)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LSBMASK		(0x0000003F)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SHIFT		(0)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LENGTH		(6)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1DECIMATE_CTRL_OFFSET		(0x0240)
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_EN
+*/
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_MASK		(0x00000001)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SHIFT		(0)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LENGTH		(1)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH2DECIMATE_CTRL_OFFSET		(0x0244)
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_EN
+*/
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_MASK		(0x00000001)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SHIFT		(0)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LENGTH		(1)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH3DECIMATE_CTRL_OFFSET		(0x0248)
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_EN
+*/
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_MASK		(0x00000001)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SHIFT		(0)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LENGTH		(1)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH4DECIMATE_CTRL_OFFSET		(0x024C)
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_EN
+*/
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_MASK		(0x00000001)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SHIFT		(0)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LENGTH		(1)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1DECIMATE_CTRL_OFFSET		(0x0250)
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_EN
+*/
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_MASK		(0x00000001)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SHIFT		(0)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LENGTH		(1)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2DECIMATE_CTRL_OFFSET		(0x0254)
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_EN
+*/
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_MASK		(0x00000001)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SHIFT		(0)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LENGTH		(1)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3DECIMATE_CTRL_OFFSET		(0x0258)
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_EN
+*/
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_MASK		(0x00000001)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SHIFT		(0)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LENGTH		(1)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4DECIMATE_CTRL_OFFSET		(0x025C)
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_EN
+*/
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_MASK		(0x00000001)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SHIFT		(0)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LENGTH		(1)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1SKIPCTRL_OFFSET		(0x0270)
+
+/* PDP, VID1SKIPCTRL, VID1HSKIP
+*/
+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_MASK		(0x0FFF0000)
+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_SHIFT		(16)
+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_LENGTH		(12)
+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SKIPCTRL, VID1VSKIP
+*/
+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_MASK		(0x00000FFF)
+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_SHIFT		(0)
+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_LENGTH		(12)
+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2SKIPCTRL_OFFSET		(0x0274)
+
+/* PDP, VID2SKIPCTRL, VID2HSKIP
+*/
+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_MASK		(0x0FFF0000)
+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_SHIFT		(16)
+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_LENGTH		(12)
+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SKIPCTRL, VID2VSKIP
+*/
+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_MASK		(0x00000FFF)
+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_SHIFT		(0)
+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_LENGTH		(12)
+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3SKIPCTRL_OFFSET		(0x0278)
+
+/* PDP, VID3SKIPCTRL, VID3HSKIP
+*/
+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_MASK		(0x0FFF0000)
+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_SHIFT		(16)
+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_LENGTH		(12)
+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SKIPCTRL, VID3VSKIP
+*/
+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_MASK		(0x00000FFF)
+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_SHIFT		(0)
+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_LENGTH		(12)
+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4SKIPCTRL_OFFSET		(0x027C)
+
+/* PDP, VID4SKIPCTRL, VID4HSKIP
+*/
+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_MASK		(0x0FFF0000)
+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_SHIFT		(16)
+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_LENGTH		(12)
+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SKIPCTRL, VID4VSKIP
+*/
+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_MASK		(0x00000FFF)
+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_SHIFT		(0)
+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_LENGTH		(12)
+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1SCALECTRL_OFFSET		(0x0460)
+
+/* PDP, VID1SCALECTRL, VID1HSCALEBP
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_MASK		(0x80000000)
+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_SHIFT		(31)
+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_LENGTH		(1)
+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VSCALEBP
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_MASK		(0x40000000)
+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_SHIFT		(30)
+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_LENGTH		(1)
+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1HSBEFOREVS
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_MASK		(0x20000000)
+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_SHIFT		(29)
+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_LENGTH		(1)
+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VSURUNCTRL
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_MASK		(0x08000000)
+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_SHIFT		(27)
+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_LENGTH		(1)
+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1PAN_EN
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_MASK		(0x00040000)
+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_SHIFT		(18)
+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_LENGTH		(1)
+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VORDER
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_MASK		(0x00030000)
+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_LSBMASK		(0x00000003)
+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_SHIFT		(16)
+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_LENGTH		(2)
+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VPITCH
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_MASK		(0x0000FFFF)
+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_SHIFT		(0)
+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_LENGTH		(16)
+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VSINIT_OFFSET		(0x0464)
+
+/* PDP, VID1VSINIT, VID1VINITIAL1
+*/
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_MASK		(0xFFFF0000)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_SHIFT		(16)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_LENGTH		(16)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1VSINIT, VID1VINITIAL0
+*/
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_MASK		(0x0000FFFF)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_SHIFT		(0)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_LENGTH		(16)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF0_OFFSET		(0x0468)
+
+/* PDP, VID1VCOEFF0, VID1VCOEFF0
+*/
+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_SHIFT		(0)
+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_LENGTH		(32)
+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF1_OFFSET		(0x046C)
+
+/* PDP, VID1VCOEFF1, VID1VCOEFF1
+*/
+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_SHIFT		(0)
+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_LENGTH		(32)
+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF2_OFFSET		(0x0470)
+
+/* PDP, VID1VCOEFF2, VID1VCOEFF2
+*/
+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_SHIFT		(0)
+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_LENGTH		(32)
+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF3_OFFSET		(0x0474)
+
+/* PDP, VID1VCOEFF3, VID1VCOEFF3
+*/
+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_SHIFT		(0)
+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_LENGTH		(32)
+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF4_OFFSET		(0x0478)
+
+/* PDP, VID1VCOEFF4, VID1VCOEFF4
+*/
+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_SHIFT		(0)
+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_LENGTH		(32)
+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF5_OFFSET		(0x047C)
+
+/* PDP, VID1VCOEFF5, VID1VCOEFF5
+*/
+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_SHIFT		(0)
+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_LENGTH		(32)
+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF6_OFFSET		(0x0480)
+
+/* PDP, VID1VCOEFF6, VID1VCOEFF6
+*/
+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_SHIFT		(0)
+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_LENGTH		(32)
+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF7_OFFSET		(0x0484)
+
+/* PDP, VID1VCOEFF7, VID1VCOEFF7
+*/
+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_SHIFT		(0)
+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_LENGTH		(32)
+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF8_OFFSET		(0x0488)
+
+/* PDP, VID1VCOEFF8, VID1VCOEFF8
+*/
+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_MASK		(0x000000FF)
+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_SHIFT		(0)
+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_LENGTH		(8)
+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HSINIT_OFFSET		(0x048C)
+
+/* PDP, VID1HSINIT, VID1HINITIAL
+*/
+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_MASK		(0xFFFF0000)
+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_SHIFT		(16)
+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_LENGTH		(16)
+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1HSINIT, VID1HPITCH
+*/
+#define ODN_PDP_VID1HSINIT_VID1HPITCH_MASK		(0x0000FFFF)
+#define ODN_PDP_VID1HSINIT_VID1HPITCH_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID1HSINIT_VID1HPITCH_SHIFT		(0)
+#define ODN_PDP_VID1HSINIT_VID1HPITCH_LENGTH		(16)
+#define ODN_PDP_VID1HSINIT_VID1HPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF0_OFFSET		(0x0490)
+
+/* PDP, VID1HCOEFF0, VID1HCOEFF0
+*/
+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF1_OFFSET		(0x0494)
+
+/* PDP, VID1HCOEFF1, VID1HCOEFF1
+*/
+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF2_OFFSET		(0x0498)
+
+/* PDP, VID1HCOEFF2, VID1HCOEFF2
+*/
+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF3_OFFSET		(0x049C)
+
+/* PDP, VID1HCOEFF3, VID1HCOEFF3
+*/
+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF4_OFFSET		(0x04A0)
+
+/* PDP, VID1HCOEFF4, VID1HCOEFF4
+*/
+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF5_OFFSET		(0x04A4)
+
+/* PDP, VID1HCOEFF5, VID1HCOEFF5
+*/
+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF6_OFFSET		(0x04A8)
+
+/* PDP, VID1HCOEFF6, VID1HCOEFF6
+*/
+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF7_OFFSET		(0x04AC)
+
+/* PDP, VID1HCOEFF7, VID1HCOEFF7
+*/
+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF8_OFFSET		(0x04B0)
+
+/* PDP, VID1HCOEFF8, VID1HCOEFF8
+*/
+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF9_OFFSET		(0x04B4)
+
+/* PDP, VID1HCOEFF9, VID1HCOEFF9
+*/
+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF10_OFFSET		(0x04B8)
+
+/* PDP, VID1HCOEFF10, VID1HCOEFF10
+*/
+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF11_OFFSET		(0x04BC)
+
+/* PDP, VID1HCOEFF11, VID1HCOEFF11
+*/
+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF12_OFFSET		(0x04C0)
+
+/* PDP, VID1HCOEFF12, VID1HCOEFF12
+*/
+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF13_OFFSET		(0x04C4)
+
+/* PDP, VID1HCOEFF13, VID1HCOEFF13
+*/
+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF14_OFFSET		(0x04C8)
+
+/* PDP, VID1HCOEFF14, VID1HCOEFF14
+*/
+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF15_OFFSET		(0x04CC)
+
+/* PDP, VID1HCOEFF15, VID1HCOEFF15
+*/
+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_LENGTH		(32)
+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF16_OFFSET		(0x04D0)
+
+/* PDP, VID1HCOEFF16, VID1HCOEFF16
+*/
+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_MASK		(0x000000FF)
+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_SHIFT		(0)
+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_LENGTH		(8)
+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1SCALESIZE_OFFSET		(0x04D4)
+
+/* PDP, VID1SCALESIZE, VID1SCALEWIDTH
+*/
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_MASK		(0x0FFF0000)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_SHIFT		(16)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_LENGTH		(12)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALESIZE, VID1SCALEHEIGHT
+*/
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_MASK		(0x00000FFF)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SHIFT		(0)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LENGTH		(12)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CORE_ID_OFFSET		(0x04E0)
+
+/* PDP, PVR_ODN_PDP_CORE_ID, GROUP_ID
+*/
+#define ODN_PDP_CORE_ID_GROUP_ID_MASK		(0xFF000000)
+#define ODN_PDP_CORE_ID_GROUP_ID_LSBMASK		(0x000000FF)
+#define ODN_PDP_CORE_ID_GROUP_ID_SHIFT		(24)
+#define ODN_PDP_CORE_ID_GROUP_ID_LENGTH		(8)
+#define ODN_PDP_CORE_ID_GROUP_ID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_ODN_PDP_CORE_ID, CORE_ID
+*/
+#define ODN_PDP_CORE_ID_CORE_ID_MASK		(0x00FF0000)
+#define ODN_PDP_CORE_ID_CORE_ID_LSBMASK		(0x000000FF)
+#define ODN_PDP_CORE_ID_CORE_ID_SHIFT		(16)
+#define ODN_PDP_CORE_ID_CORE_ID_LENGTH		(8)
+#define ODN_PDP_CORE_ID_CORE_ID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_ODN_PDP_CORE_ID, CONFIG_ID
+*/
+#define ODN_PDP_CORE_ID_CONFIG_ID_MASK		(0x0000FFFF)
+#define ODN_PDP_CORE_ID_CONFIG_ID_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_CORE_ID_CONFIG_ID_SHIFT		(0)
+#define ODN_PDP_CORE_ID_CONFIG_ID_LENGTH		(16)
+#define ODN_PDP_CORE_ID_CONFIG_ID_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CORE_REV_OFFSET		(0x04F0)
+
+/* PDP, PVR_ODN_PDP_CORE_REV, MAJOR_REV
+*/
+#define ODN_PDP_CORE_REV_MAJOR_REV_MASK		(0x00FF0000)
+#define ODN_PDP_CORE_REV_MAJOR_REV_LSBMASK		(0x000000FF)
+#define ODN_PDP_CORE_REV_MAJOR_REV_SHIFT		(16)
+#define ODN_PDP_CORE_REV_MAJOR_REV_LENGTH		(8)
+#define ODN_PDP_CORE_REV_MAJOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_ODN_PDP_CORE_REV, MINOR_REV
+*/
+#define ODN_PDP_CORE_REV_MINOR_REV_MASK		(0x0000FF00)
+#define ODN_PDP_CORE_REV_MINOR_REV_LSBMASK		(0x000000FF)
+#define ODN_PDP_CORE_REV_MINOR_REV_SHIFT		(8)
+#define ODN_PDP_CORE_REV_MINOR_REV_LENGTH		(8)
+#define ODN_PDP_CORE_REV_MINOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_ODN_PDP_CORE_REV, MAINT_REV
+*/
+#define ODN_PDP_CORE_REV_MAINT_REV_MASK		(0x000000FF)
+#define ODN_PDP_CORE_REV_MAINT_REV_LSBMASK		(0x000000FF)
+#define ODN_PDP_CORE_REV_MAINT_REV_SHIFT		(0)
+#define ODN_PDP_CORE_REV_MAINT_REV_LENGTH		(8)
+#define ODN_PDP_CORE_REV_MAINT_REV_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2SCALECTRL_OFFSET		(0x0500)
+
+/* PDP, VID2SCALECTRL, VID2HSCALEBP
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_MASK		(0x80000000)
+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_SHIFT		(31)
+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_LENGTH		(1)
+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VSCALEBP
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_MASK		(0x40000000)
+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_SHIFT		(30)
+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_LENGTH		(1)
+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2HSBEFOREVS
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_MASK		(0x20000000)
+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_SHIFT		(29)
+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_LENGTH		(1)
+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VSURUNCTRL
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_MASK		(0x08000000)
+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_SHIFT		(27)
+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_LENGTH		(1)
+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2PAN_EN
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_MASK		(0x00040000)
+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_SHIFT		(18)
+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_LENGTH		(1)
+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VORDER
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_MASK		(0x00030000)
+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_LSBMASK		(0x00000003)
+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_SHIFT		(16)
+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_LENGTH		(2)
+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VPITCH
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_MASK		(0x0000FFFF)
+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_SHIFT		(0)
+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_LENGTH		(16)
+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VSINIT_OFFSET		(0x0504)
+
+/* PDP, VID2VSINIT, VID2VINITIAL1
+*/
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_MASK		(0xFFFF0000)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_SHIFT		(16)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_LENGTH		(16)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2VSINIT, VID2VINITIAL0
+*/
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_MASK		(0x0000FFFF)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_SHIFT		(0)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_LENGTH		(16)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF0_OFFSET		(0x0508)
+
+/* PDP, VID2VCOEFF0, VID2VCOEFF0
+*/
+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_SHIFT		(0)
+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_LENGTH		(32)
+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF1_OFFSET		(0x050C)
+
+/* PDP, VID2VCOEFF1, VID2VCOEFF1
+*/
+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_SHIFT		(0)
+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_LENGTH		(32)
+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF2_OFFSET		(0x0510)
+
+/* PDP, VID2VCOEFF2, VID2VCOEFF2
+*/
+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_SHIFT		(0)
+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_LENGTH		(32)
+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF3_OFFSET		(0x0514)
+
+/* PDP, VID2VCOEFF3, VID2VCOEFF3
+*/
+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_SHIFT		(0)
+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_LENGTH		(32)
+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF4_OFFSET		(0x0518)
+
+/* PDP, VID2VCOEFF4, VID2VCOEFF4
+*/
+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_SHIFT		(0)
+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_LENGTH		(32)
+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF5_OFFSET		(0x051C)
+
+/* PDP, VID2VCOEFF5, VID2VCOEFF5
+*/
+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_SHIFT		(0)
+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_LENGTH		(32)
+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF6_OFFSET		(0x0520)
+
+/* PDP, VID2VCOEFF6, VID2VCOEFF6
+*/
+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_SHIFT		(0)
+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_LENGTH		(32)
+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF7_OFFSET		(0x0524)
+
+/* PDP, VID2VCOEFF7, VID2VCOEFF7
+*/
+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_SHIFT		(0)
+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_LENGTH		(32)
+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF8_OFFSET		(0x0528)
+
+/* PDP, VID2VCOEFF8, VID2VCOEFF8
+*/
+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_MASK		(0x000000FF)
+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_SHIFT		(0)
+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_LENGTH		(8)
+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HSINIT_OFFSET		(0x052C)
+
+/* PDP, VID2HSINIT, VID2HINITIAL
+*/
+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_MASK		(0xFFFF0000)
+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_SHIFT		(16)
+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_LENGTH		(16)
+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2HSINIT, VID2HPITCH
+*/
+#define ODN_PDP_VID2HSINIT_VID2HPITCH_MASK		(0x0000FFFF)
+#define ODN_PDP_VID2HSINIT_VID2HPITCH_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID2HSINIT_VID2HPITCH_SHIFT		(0)
+#define ODN_PDP_VID2HSINIT_VID2HPITCH_LENGTH		(16)
+#define ODN_PDP_VID2HSINIT_VID2HPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF0_OFFSET		(0x0530)
+
+/* PDP, VID2HCOEFF0, VID2HCOEFF0
+*/
+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF1_OFFSET		(0x0534)
+
+/* PDP, VID2HCOEFF1, VID2HCOEFF1
+*/
+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF2_OFFSET		(0x0538)
+
+/* PDP, VID2HCOEFF2, VID2HCOEFF2
+*/
+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF3_OFFSET		(0x053C)
+
+/* PDP, VID2HCOEFF3, VID2HCOEFF3
+*/
+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF4_OFFSET		(0x0540)
+
+/* PDP, VID2HCOEFF4, VID2HCOEFF4
+*/
+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF5_OFFSET		(0x0544)
+
+/* PDP, VID2HCOEFF5, VID2HCOEFF5
+*/
+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF6_OFFSET		(0x0548)
+
+/* PDP, VID2HCOEFF6, VID2HCOEFF6
+*/
+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF7_OFFSET		(0x054C)
+
+/* PDP, VID2HCOEFF7, VID2HCOEFF7
+*/
+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF8_OFFSET		(0x0550)
+
+/* PDP, VID2HCOEFF8, VID2HCOEFF8
+*/
+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF9_OFFSET		(0x0554)
+
+/* PDP, VID2HCOEFF9, VID2HCOEFF9
+*/
+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF10_OFFSET		(0x0558)
+
+/* PDP, VID2HCOEFF10, VID2HCOEFF10
+*/
+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF11_OFFSET		(0x055C)
+
+/* PDP, VID2HCOEFF11, VID2HCOEFF11
+*/
+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF12_OFFSET		(0x0560)
+
+/* PDP, VID2HCOEFF12, VID2HCOEFF12
+*/
+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF13_OFFSET		(0x0564)
+
+/* PDP, VID2HCOEFF13, VID2HCOEFF13
+*/
+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF14_OFFSET		(0x0568)
+
+/* PDP, VID2HCOEFF14, VID2HCOEFF14
+*/
+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF15_OFFSET		(0x056C)
+
+/* PDP, VID2HCOEFF15, VID2HCOEFF15
+*/
+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_LENGTH		(32)
+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF16_OFFSET		(0x0570)
+
+/* PDP, VID2HCOEFF16, VID2HCOEFF16
+*/
+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_MASK		(0x000000FF)
+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_SHIFT		(0)
+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_LENGTH		(8)
+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2SCALESIZE_OFFSET		(0x0574)
+
+/* PDP, VID2SCALESIZE, VID2SCALEWIDTH
+*/
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_MASK		(0x0FFF0000)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_SHIFT		(16)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_LENGTH		(12)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALESIZE, VID2SCALEHEIGHT
+*/
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_MASK		(0x00000FFF)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SHIFT		(0)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LENGTH		(12)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3SCALECTRL_OFFSET		(0x0578)
+
+/* PDP, VID3SCALECTRL, VID3HSCALEBP
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_MASK		(0x80000000)
+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_SHIFT		(31)
+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_LENGTH		(1)
+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VSCALEBP
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_MASK		(0x40000000)
+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_SHIFT		(30)
+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_LENGTH		(1)
+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3HSBEFOREVS
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_MASK		(0x20000000)
+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_SHIFT		(29)
+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_LENGTH		(1)
+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VSURUNCTRL
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_MASK		(0x08000000)
+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_SHIFT		(27)
+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_LENGTH		(1)
+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3PAN_EN
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_MASK		(0x00040000)
+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_SHIFT		(18)
+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_LENGTH		(1)
+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VORDER
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_MASK		(0x00030000)
+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_LSBMASK		(0x00000003)
+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_SHIFT		(16)
+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_LENGTH		(2)
+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VPITCH
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_MASK		(0x0000FFFF)
+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_SHIFT		(0)
+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_LENGTH		(16)
+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VSINIT_OFFSET		(0x057C)
+
+/* PDP, VID3VSINIT, VID3VINITIAL1
+*/
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_MASK		(0xFFFF0000)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_SHIFT		(16)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_LENGTH		(16)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3VSINIT, VID3VINITIAL0
+*/
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_MASK		(0x0000FFFF)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_SHIFT		(0)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_LENGTH		(16)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF0_OFFSET		(0x0580)
+
+/* PDP, VID3VCOEFF0, VID3VCOEFF0
+*/
+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_SHIFT		(0)
+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_LENGTH		(32)
+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF1_OFFSET		(0x0584)
+
+/* PDP, VID3VCOEFF1, VID3VCOEFF1
+*/
+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_SHIFT		(0)
+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_LENGTH		(32)
+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF2_OFFSET		(0x0588)
+
+/* PDP, VID3VCOEFF2, VID3VCOEFF2
+*/
+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_SHIFT		(0)
+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_LENGTH		(32)
+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF3_OFFSET		(0x058C)
+
+/* PDP, VID3VCOEFF3, VID3VCOEFF3
+*/
+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_SHIFT		(0)
+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_LENGTH		(32)
+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF4_OFFSET		(0x0590)
+
+/* PDP, VID3VCOEFF4, VID3VCOEFF4
+*/
+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_SHIFT		(0)
+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_LENGTH		(32)
+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF5_OFFSET		(0x0594)
+
+/* PDP, VID3VCOEFF5, VID3VCOEFF5
+*/
+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_SHIFT		(0)
+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_LENGTH		(32)
+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF6_OFFSET		(0x0598)
+
+/* PDP, VID3VCOEFF6, VID3VCOEFF6
+*/
+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_SHIFT		(0)
+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_LENGTH		(32)
+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF7_OFFSET		(0x059C)
+
+/* PDP, VID3VCOEFF7, VID3VCOEFF7
+*/
+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_SHIFT		(0)
+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_LENGTH		(32)
+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF8_OFFSET		(0x05A0)
+
+/* PDP, VID3VCOEFF8, VID3VCOEFF8
+*/
+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_MASK		(0x000000FF)
+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_SHIFT		(0)
+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_LENGTH		(8)
+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HSINIT_OFFSET		(0x05A4)
+
+/* PDP, VID3HSINIT, VID3HINITIAL
+*/
+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_MASK		(0xFFFF0000)
+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_SHIFT		(16)
+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_LENGTH		(16)
+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3HSINIT, VID3HPITCH
+*/
+#define ODN_PDP_VID3HSINIT_VID3HPITCH_MASK		(0x0000FFFF)
+#define ODN_PDP_VID3HSINIT_VID3HPITCH_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID3HSINIT_VID3HPITCH_SHIFT		(0)
+#define ODN_PDP_VID3HSINIT_VID3HPITCH_LENGTH		(16)
+#define ODN_PDP_VID3HSINIT_VID3HPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF0_OFFSET		(0x05A8)
+
+/* PDP, VID3HCOEFF0, VID3HCOEFF0
+*/
+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF1_OFFSET		(0x05AC)
+
+/* PDP, VID3HCOEFF1, VID3HCOEFF1
+*/
+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF2_OFFSET		(0x05B0)
+
+/* PDP, VID3HCOEFF2, VID3HCOEFF2
+*/
+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF3_OFFSET		(0x05B4)
+
+/* PDP, VID3HCOEFF3, VID3HCOEFF3
+*/
+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF4_OFFSET		(0x05B8)
+
+/* PDP, VID3HCOEFF4, VID3HCOEFF4
+*/
+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF5_OFFSET		(0x05BC)
+
+/* PDP, VID3HCOEFF5, VID3HCOEFF5
+*/
+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF6_OFFSET		(0x05C0)
+
+/* PDP, VID3HCOEFF6, VID3HCOEFF6
+*/
+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF7_OFFSET		(0x05C4)
+
+/* PDP, VID3HCOEFF7, VID3HCOEFF7
+*/
+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF8_OFFSET		(0x05C8)
+
+/* PDP, VID3HCOEFF8, VID3HCOEFF8
+*/
+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF9_OFFSET		(0x05CC)
+
+/* PDP, VID3HCOEFF9, VID3HCOEFF9
+*/
+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF10_OFFSET		(0x05D0)
+
+/* PDP, VID3HCOEFF10, VID3HCOEFF10
+*/
+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_LSBMASK	(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_LENGTH	(32)
+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF11_OFFSET		(0x05D4)
+
+/* PDP, VID3HCOEFF11, VID3HCOEFF11
+*/
+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_LSBMASK	(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_LENGTH	(32)
+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF12_OFFSET		(0x05D8)
+
+/* PDP, VID3HCOEFF12, VID3HCOEFF12
+*/
+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF13_OFFSET		(0x05DC)
+
+/* PDP, VID3HCOEFF13, VID3HCOEFF13
+*/
+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF14_OFFSET		(0x05E0)
+
+/* PDP, VID3HCOEFF14, VID3HCOEFF14
+*/
+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF15_OFFSET		(0x05E4)
+
+/* PDP, VID3HCOEFF15, VID3HCOEFF15
+*/
+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_LENGTH		(32)
+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF16_OFFSET		(0x05E8)
+
+/* PDP, VID3HCOEFF16, VID3HCOEFF16
+*/
+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_MASK		(0x000000FF)
+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_SHIFT		(0)
+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_LENGTH		(8)
+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3SCALESIZE_OFFSET		(0x05EC)
+
+/* PDP, VID3SCALESIZE, VID3SCALEWIDTH
+*/
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_MASK		(0x0FFF0000)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_SHIFT		(16)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_LENGTH		(12)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALESIZE, VID3SCALEHEIGHT
+*/
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_MASK		(0x00000FFF)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SHIFT		(0)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LENGTH		(12)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4SCALECTRL_OFFSET		(0x05F0)
+
+/* PDP, VID4SCALECTRL, VID4HSCALEBP
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_MASK		(0x80000000)
+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_LSBMASK	(0x00000001)
+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_SHIFT	(31)
+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_LENGTH	(1)
+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VSCALEBP
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_MASK		(0x40000000)
+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_LSBMASK	(0x00000001)
+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_SHIFT	(30)
+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_LENGTH	(1)
+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4HSBEFOREVS
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_MASK	(0x20000000)
+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_LSBMASK	(0x00000001)
+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_SHIFT	(29)
+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_LENGTH	(1)
+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VSURUNCTRL
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_MASK	(0x08000000)
+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_LSBMASK	(0x00000001)
+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_SHIFT	(27)
+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_LENGTH	(1)
+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4PAN_EN
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_MASK		(0x00040000)
+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_LSBMASK	(0x00000001)
+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_SHIFT		(18)
+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_LENGTH		(1)
+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VORDER
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_MASK		(0x00030000)
+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_LSBMASK	(0x00000003)
+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_SHIFT		(16)
+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_LENGTH		(2)
+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VPITCH
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_MASK		(0x0000FFFF)
+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_LSBMASK	(0x0000FFFF)
+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_SHIFT		(0)
+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_LENGTH		(16)
+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VSINIT_OFFSET			(0x05F4)
+
+/* PDP, VID4VSINIT, VID4VINITIAL1
+*/
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_MASK		(0xFFFF0000)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_SHIFT		(16)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_LENGTH		(16)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4VSINIT, VID4VINITIAL0
+*/
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_MASK		(0x0000FFFF)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_SHIFT		(0)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_LENGTH		(16)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF0_OFFSET			(0x05F8)
+
+/* PDP, VID4VCOEFF0, VID4VCOEFF0
+*/
+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_SHIFT		(0)
+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_LENGTH		(32)
+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF1_OFFSET			(0x05FC)
+
+/* PDP, VID4VCOEFF1, VID4VCOEFF1
+*/
+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_SHIFT		(0)
+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_LENGTH		(32)
+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF2_OFFSET			(0x0600)
+
+/* PDP, VID4VCOEFF2, VID4VCOEFF2
+*/
+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_SHIFT		(0)
+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_LENGTH		(32)
+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF3_OFFSET			(0x0604)
+
+/* PDP, VID4VCOEFF3, VID4VCOEFF3
+*/
+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_SHIFT		(0)
+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_LENGTH		(32)
+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF4_OFFSET			(0x0608)
+
+/* PDP, VID4VCOEFF4, VID4VCOEFF4
+*/
+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_SHIFT		(0)
+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_LENGTH		(32)
+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF5_OFFSET			(0x060C)
+
+/* PDP, VID4VCOEFF5, VID4VCOEFF5
+*/
+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_SHIFT		(0)
+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_LENGTH		(32)
+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF6_OFFSET			(0x0610)
+
+/* PDP, VID4VCOEFF6, VID4VCOEFF6
+*/
+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_SHIFT		(0)
+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_LENGTH		(32)
+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF7_OFFSET			(0x0614)
+
+/* PDP, VID4VCOEFF7, VID4VCOEFF7
+*/
+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_SHIFT		(0)
+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_LENGTH		(32)
+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF8_OFFSET			(0x0618)
+
+/* PDP, VID4VCOEFF8, VID4VCOEFF8
+*/
+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_MASK		(0x000000FF)
+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_SHIFT		(0)
+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_LENGTH		(8)
+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HSINIT_OFFSET			(0x061C)
+
+/* PDP, VID4HSINIT, VID4HINITIAL
+*/
+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_MASK		(0xFFFF0000)
+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_SHIFT		(16)
+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_LENGTH		(16)
+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4HSINIT, VID4HPITCH
+*/
+#define ODN_PDP_VID4HSINIT_VID4HPITCH_MASK		(0x0000FFFF)
+#define ODN_PDP_VID4HSINIT_VID4HPITCH_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_VID4HSINIT_VID4HPITCH_SHIFT		(0)
+#define ODN_PDP_VID4HSINIT_VID4HPITCH_LENGTH		(16)
+#define ODN_PDP_VID4HSINIT_VID4HPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF0_OFFSET			(0x0620)
+
+/* PDP, VID4HCOEFF0, VID4HCOEFF0
+*/
+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF1_OFFSET			(0x0624)
+
+/* PDP, VID4HCOEFF1, VID4HCOEFF1
+*/
+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF2_OFFSET			(0x0628)
+
+/* PDP, VID4HCOEFF2, VID4HCOEFF2
+*/
+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF3_OFFSET			(0x062C)
+
+/* PDP, VID4HCOEFF3, VID4HCOEFF3
+*/
+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF4_OFFSET			(0x0630)
+
+/* PDP, VID4HCOEFF4, VID4HCOEFF4
+*/
+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF5_OFFSET			(0x0634)
+
+/* PDP, VID4HCOEFF5, VID4HCOEFF5
+*/
+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF6_OFFSET			(0x0638)
+
+/* PDP, VID4HCOEFF6, VID4HCOEFF6
+*/
+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF7_OFFSET			(0x063C)
+
+/* PDP, VID4HCOEFF7, VID4HCOEFF7
+*/
+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF8_OFFSET			(0x0640)
+
+/* PDP, VID4HCOEFF8, VID4HCOEFF8
+*/
+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF9_OFFSET			(0x0644)
+
+/* PDP, VID4HCOEFF9, VID4HCOEFF9
+*/
+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_LSBMASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_LENGTH		(32)
+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF10_OFFSET			(0x0648)
+
+/* PDP, VID4HCOEFF10, VID4HCOEFF10
+*/
+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_LSBMASK	(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_LENGTH	(32)
+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF11_OFFSET			(0x064C)
+
+/* PDP, VID4HCOEFF11, VID4HCOEFF11
+*/
+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_LSBMASK	(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_LENGTH	(32)
+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF12_OFFSET			(0x0650)
+
+/* PDP, VID4HCOEFF12, VID4HCOEFF12
+*/
+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_LSBMASK	(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_LENGTH	(32)
+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF13_OFFSET			(0x0654)
+
+/* PDP, VID4HCOEFF13, VID4HCOEFF13
+*/
+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_LSBMASK	(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_LENGTH	(32)
+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF14_OFFSET			(0x0658)
+
+/* PDP, VID4HCOEFF14, VID4HCOEFF14
+*/
+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_LSBMASK	(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_LENGTH	(32)
+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF15_OFFSET			(0x065C)
+
+/* PDP, VID4HCOEFF15, VID4HCOEFF15
+*/
+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_MASK		(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_LSBMASK	(0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_LENGTH	(32)
+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF16_OFFSET			(0x0660)
+
+/* PDP, VID4HCOEFF16, VID4HCOEFF16
+*/
+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_MASK		(0x000000FF)
+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_LSBMASK	(0x000000FF)
+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_SHIFT		(0)
+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_LENGTH	(8)
+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4SCALESIZE_OFFSET			(0x0664)
+
+/* PDP, VID4SCALESIZE, VID4SCALEWIDTH
+*/
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_MASK		(0x0FFF0000)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_SHIFT		(16)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_LENGTH		(12)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALESIZE, VID4SCALEHEIGHT
+*/
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_MASK		(0x00000FFF)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SHIFT		(0)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LENGTH		(12)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND0_OFFSET				(0x0668)
+
+/* PDP, PORTER_BLND0, BLND0BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_MASK		(0x00000010)
+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_LSBMASK		(0x00000001)
+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_SHIFT		(4)
+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_LENGTH		(1)
+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND0, BLND0PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_MASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_LSBMASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_SHIFT		(0)
+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_LENGTH		(4)
+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND1_OFFSET				(0x066C)
+
+/* PDP, PORTER_BLND1, BLND1BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_MASK		(0x00000010)
+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_LSBMASK		(0x00000001)
+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_SHIFT		(4)
+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_LENGTH		(1)
+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND1, BLND1PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_MASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_LSBMASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_SHIFT		(0)
+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_LENGTH		(4)
+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND2_OFFSET				(0x0670)
+
+/* PDP, PORTER_BLND2, BLND2BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_MASK		(0x00000010)
+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_LSBMASK		(0x00000001)
+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_SHIFT		(4)
+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_LENGTH		(1)
+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND2, BLND2PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_MASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_LSBMASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_SHIFT		(0)
+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_LENGTH		(4)
+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND3_OFFSET				(0x0674)
+
+/* PDP, PORTER_BLND3, BLND3BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_MASK		(0x00000010)
+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_LSBMASK		(0x00000001)
+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_SHIFT		(4)
+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_LENGTH		(1)
+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND3, BLND3PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_MASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_LSBMASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_SHIFT		(0)
+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_LENGTH		(4)
+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND4_OFFSET				(0x0678)
+
+/* PDP, PORTER_BLND4, BLND4BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_MASK		(0x00000010)
+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_LSBMASK		(0x00000001)
+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_SHIFT		(4)
+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_LENGTH		(1)
+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND4, BLND4PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_MASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_LSBMASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_SHIFT		(0)
+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_LENGTH		(4)
+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND5_OFFSET				(0x067C)
+
+/* PDP, PORTER_BLND5, BLND5BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_MASK		(0x00000010)
+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_LSBMASK		(0x00000001)
+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_SHIFT		(4)
+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_LENGTH		(1)
+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND5, BLND5PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_MASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_LSBMASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_SHIFT		(0)
+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_LENGTH		(4)
+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND6_OFFSET				(0x0680)
+
+/* PDP, PORTER_BLND6, BLND6BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_MASK		(0x00000010)
+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_LSBMASK		(0x00000001)
+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_SHIFT		(4)
+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_LENGTH		(1)
+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND6, BLND6PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_MASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_LSBMASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_SHIFT		(0)
+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_LENGTH		(4)
+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND7_OFFSET				(0x0684)
+
+/* PDP, PORTER_BLND7, BLND7BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_MASK		(0x00000010)
+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_LSBMASK		(0x00000001)
+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_SHIFT		(4)
+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_LENGTH		(1)
+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND7, BLND7PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_MASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_LSBMASK		(0x0000000F)
+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_SHIFT		(0)
+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_LENGTH		(4)
+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET		(0x06C8)
+
+/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_TRANS
+*/
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_MASK		(0x03FF0000)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SHIFT		(16)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LENGTH		(10)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_OPAQUE
+*/
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_MASK		(0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SHIFT		(0)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LENGTH		(10)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_OFFSET		(0x06CC)
+
+/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMAX
+*/
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_MASK		(0x03FF0000)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SHIFT		(16)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LENGTH		(10)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMIN
+*/
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_MASK		(0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SHIFT		(0)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LENGTH		(10)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1LUMAKEY_C_RG_OFFSET		(0x06D0)
+
+/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_R
+*/
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_MASK		(0x0FFF0000)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SHIFT		(16)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LENGTH		(12)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_G
+*/
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_MASK		(0x00000FFF)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SHIFT		(0)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LENGTH		(12)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1LUMAKEY_C_B_OFFSET		(0x06D4)
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYALPHAMULT
+*/
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_MASK		(0x20000000)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SHIFT		(29)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LENGTH		(1)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYEN
+*/
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_MASK		(0x10000000)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SHIFT		(28)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LENGTH		(1)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYOUTOFF
+*/
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_MASK		(0x03FF0000)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SHIFT		(16)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LENGTH	(10)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYC_B
+*/
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_MASK		(0x00000FFF)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SHIFT		(0)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LENGTH		(12)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET		(0x06D8)
+
+/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_TRANS
+*/
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_MASK		(0x03FF0000)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SHIFT		(16)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LENGTH		(10)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_OPAQUE
+*/
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_MASK		(0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SHIFT		(0)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LENGTH		(10)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_OFFSET				(0x06DC)
+
+/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMAX
+*/
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_MASK		(0x03FF0000)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SHIFT		(16)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LENGTH		(10)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMIN
+*/
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_MASK		(0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SHIFT		(0)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LENGTH		(10)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2LUMAKEY_C_RG_OFFSET				(0x06E0)
+
+/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_R
+*/
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_MASK		(0x0FFF0000)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SHIFT		(16)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LENGTH		(12)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_G
+*/
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_MASK		(0x00000FFF)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SHIFT		(0)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LENGTH		(12)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2LUMAKEY_C_B_OFFSET				(0x06E4)
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYALPHAMULT
+*/
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_MASK		(0x20000000)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SHIFT		(29)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LENGTH		(1)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYEN
+*/
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_MASK		(0x10000000)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SHIFT		(28)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LENGTH		(1)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYOUTOFF
+*/
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_MASK		(0x03FF0000)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SHIFT		(16)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LENGTH	(10)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYC_B
+*/
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_MASK		(0x00000FFF)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SHIFT		(0)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LENGTH		(12)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET		(0x06E8)
+
+/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_TRANS
+*/
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_MASK		(0x03FF0000)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SHIFT		(16)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LENGTH		(10)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_OPAQUE
+*/
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_MASK		(0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SHIFT		(0)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LENGTH		(10)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_OFFSET			(0x06EC)
+
+/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMAX
+*/
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_MASK		(0x03FF0000)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SHIFT		(16)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LENGTH		(10)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMIN
+*/
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_MASK		(0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SHIFT		(0)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LENGTH		(10)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3LUMAKEY_C_RG_OFFSET		(0x06F0)
+
+/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_R
+*/
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_MASK		(0x0FFF0000)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SHIFT		(16)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LENGTH		(12)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_G
+*/
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_MASK		(0x00000FFF)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SHIFT		(0)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LENGTH		(12)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3LUMAKEY_C_B_OFFSET		(0x06F4)
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYALPHAMULT
+*/
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_MASK		(0x20000000)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SHIFT		(29)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LENGTH		(1)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYEN
+*/
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_MASK		(0x10000000)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SHIFT		(28)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LENGTH		(1)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYOUTOFF
+*/
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_MASK		(0x03FF0000)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SHIFT		(16)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LENGTH	(10)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYC_B
+*/
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_MASK		(0x00000FFF)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SHIFT		(0)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LENGTH		(12)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET		(0x06F8)
+
+/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_TRANS
+*/
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_MASK		(0x03FF0000)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SHIFT		(16)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LENGTH		(10)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_OPAQUE
+*/
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_MASK		(0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LSBMASK		(0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SHIFT		(0)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LENGTH		(10)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_OFFSET		(0x06FC)
+
+/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMAX
+*/
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_MASK		(0x03FF0000)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SHIFT		(16)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LENGTH		(10)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMIN
+*/
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_MASK		(0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SHIFT		(0)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LENGTH		(10)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4LUMAKEY_C_RG_OFFSET			(0x0700)
+
+/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_R
+*/
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_MASK		(0x0FFF0000)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SHIFT		(16)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LENGTH		(12)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_G
+*/
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_MASK		(0x00000FFF)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SHIFT		(0)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LENGTH		(12)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4LUMAKEY_C_B_OFFSET		(0x0704)
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYALPHAMULT
+*/
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_MASK	(0x20000000)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LSBMASK	(0x00000001)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SHIFT	(29)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LENGTH	(1)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYEN
+*/
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_MASK		(0x10000000)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SHIFT		(28)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LENGTH		(1)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYOUTOFF
+*/
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_MASK		(0x03FF0000)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LSBMASK	(0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SHIFT		(16)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LENGTH	(10)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYC_B
+*/
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_MASK		(0x00000FFF)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LSBMASK		(0x00000FFF)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SHIFT		(0)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LENGTH		(12)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CSCCOEFF0_OFFSET			(0x0708)
+
+/* PDP, CSCCOEFF0, CSCCOEFFRU
+*/
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_MASK		(0x003FF800)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_LSBMASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_SHIFT		(11)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_LENGTH		(11)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CSCCOEFF0, CSCCOEFFRY
+*/
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_MASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_LSBMASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_SHIFT		(0)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_LENGTH		(11)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CSCCOEFF1_OFFSET			(0x070C)
+
+/* PDP, CSCCOEFF1, CSCCOEFFGY
+*/
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_MASK		(0x003FF800)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_LSBMASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_SHIFT		(11)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_LENGTH		(11)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CSCCOEFF1, CSCCOEFFRV
+*/
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_MASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_LSBMASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_SHIFT		(0)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_LENGTH		(11)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CSCCOEFF2_OFFSET			(0x0710)
+
+/* PDP, CSCCOEFF2, CSCCOEFFGV
+*/
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_MASK		(0x003FF800)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_LSBMASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_SHIFT		(11)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_LENGTH		(11)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CSCCOEFF2, CSCCOEFFGU
+*/
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_MASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_LSBMASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_SHIFT		(0)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_LENGTH		(11)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CSCCOEFF3_OFFSET			(0x0714)
+
+/* PDP, CSCCOEFF3, CSCCOEFFBU
+*/
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_MASK		(0x003FF800)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_LSBMASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_SHIFT		(11)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_LENGTH		(11)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CSCCOEFF3, CSCCOEFFBY
+*/
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_MASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_LSBMASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_SHIFT		(0)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_LENGTH		(11)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CSCCOEFF4_OFFSET			(0x0718)
+
+/* PDP, CSCCOEFF4, CSCCOEFFBV
+*/
+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_MASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_LSBMASK		(0x000007FF)
+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_SHIFT		(0)
+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_LENGTH		(11)
+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_BGNDCOL_AR_OFFSET			(0x071C)
+
+/* PDP, BGNDCOL_AR, BGNDCOL_A
+*/
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_MASK		(0x03FF0000)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_LSBMASK		(0x000003FF)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_SHIFT		(16)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_LENGTH		(10)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, BGNDCOL_AR, BGNDCOL_R
+*/
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_MASK		(0x000003FF)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_SHIFT		(0)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_LENGTH		(10)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_BGNDCOL_GB_OFFSET			(0x0720)
+
+/* PDP, BGNDCOL_GB, BGNDCOL_G
+*/
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_MASK		(0x03FF0000)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_SHIFT		(16)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_LENGTH		(10)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, BGNDCOL_GB, BGNDCOL_B
+*/
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_MASK		(0x000003FF)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_SHIFT		(0)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_LENGTH		(10)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_BORDCOL_R_OFFSET			(0x0724)
+
+/* PDP, BORDCOL_R, BORDCOL_R
+*/
+#define ODN_PDP_BORDCOL_R_BORDCOL_R_MASK		(0x000003FF)
+#define ODN_PDP_BORDCOL_R_BORDCOL_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_BORDCOL_R_BORDCOL_R_SHIFT		(0)
+#define ODN_PDP_BORDCOL_R_BORDCOL_R_LENGTH		(10)
+#define ODN_PDP_BORDCOL_R_BORDCOL_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_BORDCOL_GB_OFFSET			(0x0728)
+
+/* PDP, BORDCOL_GB, BORDCOL_G
+*/
+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_MASK		(0x03FF0000)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_SHIFT		(16)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_LENGTH		(10)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, BORDCOL_GB, BORDCOL_B
+*/
+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_MASK		(0x000003FF)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_SHIFT		(0)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_LENGTH		(10)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_LINESTAT_OFFSET				(0x0734)
+
+/* PDP, LINESTAT, LINENO
+*/
+#define ODN_PDP_LINESTAT_LINENO_MASK			(0x00001FFF)
+#define ODN_PDP_LINESTAT_LINENO_LSBMASK			(0x00001FFF)
+#define ODN_PDP_LINESTAT_LINENO_SHIFT			(0)
+#define ODN_PDP_LINESTAT_LINENO_LENGTH			(13)
+#define ODN_PDP_LINESTAT_LINENO_SIGNED_FIELD		IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_OFFSET	(0x0738)
+
+/* PDP, CR_ODN_PDP_PROCAMP_C11C12, CR_PROCAMP_C12
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_MASK		(0x3FFF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LSBMASK	(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SHIFT		(16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LENGTH		(14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_C11C12, CR_PROCAMP_C11
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_MASK		(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LSBMASK	(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SHIFT		(0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LENGTH		(14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_OFFSET		(0x073C)
+
+/* PDP, CR_ODN_PDP_PROCAMP_C13C21, CR_PROCAMP_C21
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_MASK		(0x3FFF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LSBMASK	(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SHIFT		(16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LENGTH		(14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_C13C21, CR_PROCAMP_C13
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_MASK		(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LSBMASK	(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SHIFT		(0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LENGTH		(14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_OFFSET		(0x0740)
+
+/* PDP, CR_ODN_PDP_PROCAMP_C22C23, CR_PROCAMP_C23
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_MASK		(0x3FFF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LSBMASK	(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SHIFT		(16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LENGTH		(14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_C22C23, CR_PROCAMP_C22
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_MASK		(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LSBMASK	(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SHIFT		(0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LENGTH		(14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_OFFSET		(0x0744)
+
+/* PDP, CR_ODN_PDP_PROCAMP_C31C32, CR_PROCAMP_C32
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_MASK		(0x3FFF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LSBMASK	(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SHIFT		(16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LENGTH		(14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_C31C32, CR_PROCAMP_C31
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_MASK		(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LSBMASK	(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SHIFT		(0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LENGTH		(14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_OFFSET		(0x0748)
+
+/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_C33
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_MASK		(0x3FFF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_LSBMASK		(0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_SHIFT		(16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_LENGTH		(14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_RANGE
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_MASK		(0x00000030)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LSBMASK		(0x00000003)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SHIFT		(4)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LENGTH		(2)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_EN
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_MASK		(0x00000001)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_SHIFT		(0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_LENGTH		(1)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_OFFSET		(0x074C)
+
+/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_G
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_MASK		(0x0FFF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LSBMASK		(0x00000FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SHIFT		(16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LENGTH		(12)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_B
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_MASK		(0x00000FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LSBMASK		(0x00000FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SHIFT		(0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LENGTH		(12)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_OFFSET		(0x0750)
+
+/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_R, CR_PROCAMP_OUTOFF_R
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_MASK		(0x00000FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LSBMASK		(0x00000FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SHIFT		(0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LENGTH		(12)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_OFFSET		(0x0754)
+
+/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_G
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_MASK		(0x03FF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SHIFT		(16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LENGTH		(10)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_B
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_MASK		(0x000003FF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SHIFT		(0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LENGTH		(10)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_OFFSET		(0x0758)
+
+/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_R, CR_PROCAMP_INOFF_R
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_MASK		(0x000003FF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SHIFT		(0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LENGTH		(10)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_SIGNAT_R_OFFSET		(0x075C)
+
+/* PDP, SIGNAT_R, SIGNATURE_R
+*/
+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_MASK		(0x000003FF)
+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_SHIFT		(0)
+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_LENGTH		(10)
+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_SIGNAT_GB_OFFSET		(0x0760)
+
+/* PDP, SIGNAT_GB, SIGNATURE_G
+*/
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_MASK		(0x03FF0000)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_SHIFT		(16)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_LENGTH		(10)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SIGNAT_GB, SIGNATURE_B
+*/
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_MASK		(0x000003FF)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_SHIFT		(0)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_LENGTH		(10)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_REGISTER_UPDATE_CTRL_OFFSET		(0x0764)
+
+/* PDP, REGISTER_UPDATE_CTRL, BYPASS_DOUBLE_BUFFERING
+*/
+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_MASK		(0x00000004)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LSBMASK		(0x00000001)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SHIFT		(2)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LENGTH		(1)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, REGISTER_UPDATE_CTRL, REGISTERS_VALID
+*/
+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_MASK		(0x00000002)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LSBMASK		(0x00000001)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT		(1)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LENGTH		(1)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, REGISTER_UPDATE_CTRL, USE_VBLANK
+*/
+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_MASK		(0x00000001)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LSBMASK		(0x00000001)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SHIFT		(0)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LENGTH		(1)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_REGISTER_UPDATE_STATUS_OFFSET		(0x0768)
+
+/* PDP, REGISTER_UPDATE_STATUS, REGISTERS_UPDATED
+*/
+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_MASK		(0x00000002)
+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LSBMASK		(0x00000001)
+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SHIFT		(1)
+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LENGTH		(1)
+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DBGCTRL_OFFSET		(0x076C)
+
+/* PDP, DBGCTRL, DBG_READ
+*/
+#define ODN_PDP_DBGCTRL_DBG_READ_MASK		(0x00000002)
+#define ODN_PDP_DBGCTRL_DBG_READ_LSBMASK		(0x00000001)
+#define ODN_PDP_DBGCTRL_DBG_READ_SHIFT		(1)
+#define ODN_PDP_DBGCTRL_DBG_READ_LENGTH		(1)
+#define ODN_PDP_DBGCTRL_DBG_READ_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DBGCTRL, DBG_ENAB
+*/
+#define ODN_PDP_DBGCTRL_DBG_ENAB_MASK		(0x00000001)
+#define ODN_PDP_DBGCTRL_DBG_ENAB_LSBMASK		(0x00000001)
+#define ODN_PDP_DBGCTRL_DBG_ENAB_SHIFT		(0)
+#define ODN_PDP_DBGCTRL_DBG_ENAB_LENGTH		(1)
+#define ODN_PDP_DBGCTRL_DBG_ENAB_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DBGDATA_R_OFFSET		(0x0770)
+
+/* PDP, DBGDATA_R, DBG_DATA_R
+*/
+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_MASK		(0x000003FF)
+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_SHIFT		(0)
+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_LENGTH		(10)
+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DBGDATA_GB_OFFSET		(0x0774)
+
+/* PDP, DBGDATA_GB, DBG_DATA_G
+*/
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_MASK		(0x03FF0000)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_SHIFT		(16)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_LENGTH		(10)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DBGDATA_GB, DBG_DATA_B
+*/
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_MASK		(0x000003FF)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_SHIFT		(0)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_LENGTH		(10)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DBGSIDE_OFFSET				(0x0778)
+
+/* PDP, DBGSIDE, DBG_VAL
+*/
+#define ODN_PDP_DBGSIDE_DBG_VAL_MASK			(0x00000008)
+#define ODN_PDP_DBGSIDE_DBG_VAL_LSBMASK			(0x00000001)
+#define ODN_PDP_DBGSIDE_DBG_VAL_SHIFT			(3)
+#define ODN_PDP_DBGSIDE_DBG_VAL_LENGTH			(1)
+#define ODN_PDP_DBGSIDE_DBG_VAL_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, DBGSIDE, DBG_SIDE
+*/
+#define ODN_PDP_DBGSIDE_DBG_SIDE_MASK			(0x00000007)
+#define ODN_PDP_DBGSIDE_DBG_SIDE_LSBMASK		(0x00000007)
+#define ODN_PDP_DBGSIDE_DBG_SIDE_SHIFT			(0)
+#define ODN_PDP_DBGSIDE_DBG_SIDE_LENGTH			(3)
+#define ODN_PDP_DBGSIDE_DBG_SIDE_SIGNED_FIELD		IMG_FALSE
+
+#define ODN_PDP_OUTPUT_OFFSET				(0x077C)
+
+/* PDP, OUTPUT, EIGHT_BIT_OUTPUT
+*/
+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_MASK		(0x00000002)
+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_LSBMASK		(0x00000001)
+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_SHIFT		(1)
+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_LENGTH		(1)
+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, OUTPUT, OUTPUT_CONFIG
+*/
+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_MASK		(0x00000001)
+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_LSBMASK		(0x00000001)
+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_SHIFT		(0)
+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_LENGTH		(1)
+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_SYNCCTRL_OFFSET				(0x0780)
+
+/* PDP, SYNCCTRL, SYNCACTIVE
+*/
+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_MASK		(0x80000000)
+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_SHIFT		(31)
+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, ODN_PDP_RST
+*/
+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_MASK		(0x20000000)
+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_SHIFT		(29)
+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, POWERDN
+*/
+#define ODN_PDP_SYNCCTRL_POWERDN_MASK			(0x10000000)
+#define ODN_PDP_SYNCCTRL_POWERDN_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_POWERDN_SHIFT			(28)
+#define ODN_PDP_SYNCCTRL_POWERDN_LENGTH			(1)
+#define ODN_PDP_SYNCCTRL_POWERDN_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, SYNCCTRL, LOWPWRMODE
+*/
+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_MASK		(0x08000000)
+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_SHIFT		(27)
+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDSYNCTRL
+*/
+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_MASK		(0x04000000)
+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_SHIFT		(26)
+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDINTCTRL
+*/
+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_MASK		(0x02000000)
+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_SHIFT		(25)
+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDCTRL
+*/
+#define ODN_PDP_SYNCCTRL_UPDCTRL_MASK			(0x01000000)
+#define ODN_PDP_SYNCCTRL_UPDCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_UPDCTRL_SHIFT			(24)
+#define ODN_PDP_SYNCCTRL_UPDCTRL_LENGTH			(1)
+#define ODN_PDP_SYNCCTRL_UPDCTRL_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDWAIT
+*/
+#define ODN_PDP_SYNCCTRL_UPDWAIT_MASK			(0x000F0000)
+#define ODN_PDP_SYNCCTRL_UPDWAIT_LSBMASK		(0x0000000F)
+#define ODN_PDP_SYNCCTRL_UPDWAIT_SHIFT			(16)
+#define ODN_PDP_SYNCCTRL_UPDWAIT_LENGTH			(4)
+#define ODN_PDP_SYNCCTRL_UPDWAIT_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, SYNCCTRL, FIELD_EN
+*/
+#define ODN_PDP_SYNCCTRL_FIELD_EN_MASK			(0x00002000)
+#define ODN_PDP_SYNCCTRL_FIELD_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_FIELD_EN_SHIFT			(13)
+#define ODN_PDP_SYNCCTRL_FIELD_EN_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_FIELD_EN_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, SYNCCTRL, CSYNC_EN
+*/
+#define ODN_PDP_SYNCCTRL_CSYNC_EN_MASK			(0x00001000)
+#define ODN_PDP_SYNCCTRL_CSYNC_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_CSYNC_EN_SHIFT			(12)
+#define ODN_PDP_SYNCCTRL_CSYNC_EN_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_CSYNC_EN_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, SYNCCTRL, CLKPOL
+*/
+#define ODN_PDP_SYNCCTRL_CLKPOL_MASK		(0x00000800)
+#define ODN_PDP_SYNCCTRL_CLKPOL_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_CLKPOL_SHIFT		(11)
+#define ODN_PDP_SYNCCTRL_CLKPOL_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_CLKPOL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, VS_SLAVE
+*/
+#define ODN_PDP_SYNCCTRL_VS_SLAVE_MASK		(0x00000080)
+#define ODN_PDP_SYNCCTRL_VS_SLAVE_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_VS_SLAVE_SHIFT		(7)
+#define ODN_PDP_SYNCCTRL_VS_SLAVE_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_VS_SLAVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, HS_SLAVE
+*/
+#define ODN_PDP_SYNCCTRL_HS_SLAVE_MASK		(0x00000040)
+#define ODN_PDP_SYNCCTRL_HS_SLAVE_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_HS_SLAVE_SHIFT		(6)
+#define ODN_PDP_SYNCCTRL_HS_SLAVE_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_HS_SLAVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, BLNKPOL
+*/
+#define ODN_PDP_SYNCCTRL_BLNKPOL_MASK		(0x00000020)
+#define ODN_PDP_SYNCCTRL_BLNKPOL_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_BLNKPOL_SHIFT		(5)
+#define ODN_PDP_SYNCCTRL_BLNKPOL_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_BLNKPOL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, BLNKDIS
+*/
+#define ODN_PDP_SYNCCTRL_BLNKDIS_MASK		(0x00000010)
+#define ODN_PDP_SYNCCTRL_BLNKDIS_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_BLNKDIS_SHIFT		(4)
+#define ODN_PDP_SYNCCTRL_BLNKDIS_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_BLNKDIS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, VSPOL
+*/
+#define ODN_PDP_SYNCCTRL_VSPOL_MASK		(0x00000008)
+#define ODN_PDP_SYNCCTRL_VSPOL_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_VSPOL_SHIFT		(3)
+#define ODN_PDP_SYNCCTRL_VSPOL_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_VSPOL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, VSDIS
+*/
+#define ODN_PDP_SYNCCTRL_VSDIS_MASK		(0x00000004)
+#define ODN_PDP_SYNCCTRL_VSDIS_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_VSDIS_SHIFT		(2)
+#define ODN_PDP_SYNCCTRL_VSDIS_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_VSDIS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, HSPOL
+*/
+#define ODN_PDP_SYNCCTRL_HSPOL_MASK		(0x00000002)
+#define ODN_PDP_SYNCCTRL_HSPOL_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_HSPOL_SHIFT		(1)
+#define ODN_PDP_SYNCCTRL_HSPOL_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_HSPOL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, HSDIS
+*/
+#define ODN_PDP_SYNCCTRL_HSDIS_MASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_HSDIS_LSBMASK		(0x00000001)
+#define ODN_PDP_SYNCCTRL_HSDIS_SHIFT		(0)
+#define ODN_PDP_SYNCCTRL_HSDIS_LENGTH		(1)
+#define ODN_PDP_SYNCCTRL_HSDIS_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_HSYNC1_OFFSET			(0x0784)
+
+/* PDP, HSYNC1, HBPS
+*/
+#define ODN_PDP_HSYNC1_HBPS_MASK		(0x1FFF0000)
+#define ODN_PDP_HSYNC1_HBPS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_HSYNC1_HBPS_SHIFT		(16)
+#define ODN_PDP_HSYNC1_HBPS_LENGTH		(13)
+#define ODN_PDP_HSYNC1_HBPS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, HSYNC1, HT
+*/
+#define ODN_PDP_HSYNC1_HT_MASK			(0x00001FFF)
+#define ODN_PDP_HSYNC1_HT_LSBMASK		(0x00001FFF)
+#define ODN_PDP_HSYNC1_HT_SHIFT			(0)
+#define ODN_PDP_HSYNC1_HT_LENGTH		(13)
+#define ODN_PDP_HSYNC1_HT_SIGNED_FIELD		IMG_FALSE
+
+#define ODN_PDP_HSYNC2_OFFSET			(0x0788)
+
+/* PDP, HSYNC2, HAS
+*/
+#define ODN_PDP_HSYNC2_HAS_MASK			(0x1FFF0000)
+#define ODN_PDP_HSYNC2_HAS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_HSYNC2_HAS_SHIFT		(16)
+#define ODN_PDP_HSYNC2_HAS_LENGTH		(13)
+#define ODN_PDP_HSYNC2_HAS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, HSYNC2, HLBS
+*/
+#define ODN_PDP_HSYNC2_HLBS_MASK		(0x00001FFF)
+#define ODN_PDP_HSYNC2_HLBS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_HSYNC2_HLBS_SHIFT		(0)
+#define ODN_PDP_HSYNC2_HLBS_LENGTH		(13)
+#define ODN_PDP_HSYNC2_HLBS_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_HSYNC3_OFFSET			(0x078C)
+
+/* PDP, HSYNC3, HFPS
+*/
+#define ODN_PDP_HSYNC3_HFPS_MASK		(0x1FFF0000)
+#define ODN_PDP_HSYNC3_HFPS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_HSYNC3_HFPS_SHIFT		(16)
+#define ODN_PDP_HSYNC3_HFPS_LENGTH		(13)
+#define ODN_PDP_HSYNC3_HFPS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, HSYNC3, HRBS
+*/
+#define ODN_PDP_HSYNC3_HRBS_MASK		(0x00001FFF)
+#define ODN_PDP_HSYNC3_HRBS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_HSYNC3_HRBS_SHIFT		(0)
+#define ODN_PDP_HSYNC3_HRBS_LENGTH		(13)
+#define ODN_PDP_HSYNC3_HRBS_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VSYNC1_OFFSET			(0x0790)
+
+/* PDP, VSYNC1, VBPS
+*/
+#define ODN_PDP_VSYNC1_VBPS_MASK		(0x1FFF0000)
+#define ODN_PDP_VSYNC1_VBPS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_VSYNC1_VBPS_SHIFT		(16)
+#define ODN_PDP_VSYNC1_VBPS_LENGTH		(13)
+#define ODN_PDP_VSYNC1_VBPS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VSYNC1, VT
+*/
+#define ODN_PDP_VSYNC1_VT_MASK			(0x00001FFF)
+#define ODN_PDP_VSYNC1_VT_LSBMASK		(0x00001FFF)
+#define ODN_PDP_VSYNC1_VT_SHIFT			(0)
+#define ODN_PDP_VSYNC1_VT_LENGTH		(13)
+#define ODN_PDP_VSYNC1_VT_SIGNED_FIELD		IMG_FALSE
+
+#define ODN_PDP_VSYNC2_OFFSET			(0x0794)
+
+/* PDP, VSYNC2, VAS
+*/
+#define ODN_PDP_VSYNC2_VAS_MASK			(0x1FFF0000)
+#define ODN_PDP_VSYNC2_VAS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_VSYNC2_VAS_SHIFT		(16)
+#define ODN_PDP_VSYNC2_VAS_LENGTH		(13)
+#define ODN_PDP_VSYNC2_VAS_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VSYNC2, VTBS
+*/
+#define ODN_PDP_VSYNC2_VTBS_MASK		(0x00001FFF)
+#define ODN_PDP_VSYNC2_VTBS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_VSYNC2_VTBS_SHIFT		(0)
+#define ODN_PDP_VSYNC2_VTBS_LENGTH		(13)
+#define ODN_PDP_VSYNC2_VTBS_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VSYNC3_OFFSET			(0x0798)
+
+/* PDP, VSYNC3, VFPS
+*/
+#define ODN_PDP_VSYNC3_VFPS_MASK		(0x1FFF0000)
+#define ODN_PDP_VSYNC3_VFPS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_VSYNC3_VFPS_SHIFT		(16)
+#define ODN_PDP_VSYNC3_VFPS_LENGTH		(13)
+#define ODN_PDP_VSYNC3_VFPS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VSYNC3, VBBS
+*/
+#define ODN_PDP_VSYNC3_VBBS_MASK		(0x00001FFF)
+#define ODN_PDP_VSYNC3_VBBS_LSBMASK		(0x00001FFF)
+#define ODN_PDP_VSYNC3_VBBS_SHIFT		(0)
+#define ODN_PDP_VSYNC3_VBBS_LENGTH		(13)
+#define ODN_PDP_VSYNC3_VBBS_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_INTSTAT_OFFSET			(0x079C)
+
+/* PDP, INTSTAT, INTS_VID4ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_MASK		(0x00080000)
+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_SHIFT		(19)
+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID3ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_MASK		(0x00040000)
+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_SHIFT		(18)
+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID2ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_MASK		(0x00020000)
+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_SHIFT		(17)
+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID1ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_MASK		(0x00010000)
+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_SHIFT		(16)
+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH4ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_MASK		(0x00008000)
+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_SHIFT		(15)
+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH3ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_MASK		(0x00004000)
+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_SHIFT		(14)
+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH2ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_MASK		(0x00002000)
+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_SHIFT		(13)
+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH1ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_MASK		(0x00001000)
+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_SHIFT		(12)
+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID4URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID4URUN_MASK		(0x00000800)
+#define ODN_PDP_INTSTAT_INTS_VID4URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID4URUN_SHIFT		(11)
+#define ODN_PDP_INTSTAT_INTS_VID4URUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VID4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID3URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID3URUN_MASK		(0x00000400)
+#define ODN_PDP_INTSTAT_INTS_VID3URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID3URUN_SHIFT		(10)
+#define ODN_PDP_INTSTAT_INTS_VID3URUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VID3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID2URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID2URUN_MASK		(0x00000200)
+#define ODN_PDP_INTSTAT_INTS_VID2URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID2URUN_SHIFT		(9)
+#define ODN_PDP_INTSTAT_INTS_VID2URUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VID2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID1URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID1URUN_MASK		(0x00000100)
+#define ODN_PDP_INTSTAT_INTS_VID1URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID1URUN_SHIFT		(8)
+#define ODN_PDP_INTSTAT_INTS_VID1URUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VID1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH4URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_MASK		(0x00000080)
+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_SHIFT		(7)
+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH3URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_MASK		(0x00000040)
+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_SHIFT		(6)
+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH2URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_MASK		(0x00000020)
+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_SHIFT		(5)
+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH1URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_MASK		(0x00000010)
+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_SHIFT		(4)
+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VBLNK1
+*/
+#define ODN_PDP_INTSTAT_INTS_VBLNK1_MASK		(0x00000008)
+#define ODN_PDP_INTSTAT_INTS_VBLNK1_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VBLNK1_SHIFT		(3)
+#define ODN_PDP_INTSTAT_INTS_VBLNK1_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VBLNK0
+*/
+#define ODN_PDP_INTSTAT_INTS_VBLNK0_MASK		(0x00000004)
+#define ODN_PDP_INTSTAT_INTS_VBLNK0_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VBLNK0_SHIFT		(2)
+#define ODN_PDP_INTSTAT_INTS_VBLNK0_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_VBLNK0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_HBLNK1
+*/
+#define ODN_PDP_INTSTAT_INTS_HBLNK1_MASK		(0x00000002)
+#define ODN_PDP_INTSTAT_INTS_HBLNK1_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_HBLNK1_SHIFT		(1)
+#define ODN_PDP_INTSTAT_INTS_HBLNK1_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_HBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_HBLNK0
+*/
+#define ODN_PDP_INTSTAT_INTS_HBLNK0_MASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_HBLNK0_LSBMASK		(0x00000001)
+#define ODN_PDP_INTSTAT_INTS_HBLNK0_SHIFT		(0)
+#define ODN_PDP_INTSTAT_INTS_HBLNK0_LENGTH		(1)
+#define ODN_PDP_INTSTAT_INTS_HBLNK0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_INTENAB_OFFSET				(0x07A0)
+
+/* PDP, INTENAB, INTEN_VID4ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_MASK		(0x00080000)
+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_SHIFT		(19)
+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID3ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_MASK		(0x00040000)
+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_SHIFT		(18)
+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID2ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_MASK		(0x00020000)
+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_SHIFT		(17)
+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID1ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_MASK		(0x00010000)
+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_SHIFT		(16)
+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH4ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_MASK		(0x00008000)
+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_SHIFT		(15)
+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH3ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_MASK		(0x00004000)
+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_SHIFT		(14)
+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH2ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_MASK		(0x00002000)
+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_SHIFT		(13)
+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH1ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_MASK		(0x00001000)
+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_SHIFT		(12)
+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID4URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID4URUN_MASK		(0x00000800)
+#define ODN_PDP_INTENAB_INTEN_VID4URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID4URUN_SHIFT		(11)
+#define ODN_PDP_INTENAB_INTEN_VID4URUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VID4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID3URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID3URUN_MASK		(0x00000400)
+#define ODN_PDP_INTENAB_INTEN_VID3URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID3URUN_SHIFT		(10)
+#define ODN_PDP_INTENAB_INTEN_VID3URUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VID3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID2URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID2URUN_MASK		(0x00000200)
+#define ODN_PDP_INTENAB_INTEN_VID2URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID2URUN_SHIFT		(9)
+#define ODN_PDP_INTENAB_INTEN_VID2URUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VID2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID1URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID1URUN_MASK		(0x00000100)
+#define ODN_PDP_INTENAB_INTEN_VID1URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID1URUN_SHIFT		(8)
+#define ODN_PDP_INTENAB_INTEN_VID1URUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VID1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH4URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_MASK		(0x00000080)
+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_SHIFT		(7)
+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH3URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_MASK		(0x00000040)
+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_SHIFT		(6)
+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH2URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_MASK		(0x00000020)
+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_SHIFT		(5)
+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH1URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_MASK		(0x00000010)
+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_SHIFT		(4)
+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VBLNK1
+*/
+#define ODN_PDP_INTENAB_INTEN_VBLNK1_MASK		(0x00000008)
+#define ODN_PDP_INTENAB_INTEN_VBLNK1_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VBLNK1_SHIFT		(3)
+#define ODN_PDP_INTENAB_INTEN_VBLNK1_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VBLNK0
+*/
+#define ODN_PDP_INTENAB_INTEN_VBLNK0_MASK		(0x00000004)
+#define ODN_PDP_INTENAB_INTEN_VBLNK0_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VBLNK0_SHIFT		(2)
+#define ODN_PDP_INTENAB_INTEN_VBLNK0_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_VBLNK0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_HBLNK1
+*/
+#define ODN_PDP_INTENAB_INTEN_HBLNK1_MASK		(0x00000002)
+#define ODN_PDP_INTENAB_INTEN_HBLNK1_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_HBLNK1_SHIFT		(1)
+#define ODN_PDP_INTENAB_INTEN_HBLNK1_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_HBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_HBLNK0
+*/
+#define ODN_PDP_INTENAB_INTEN_HBLNK0_MASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_HBLNK0_LSBMASK		(0x00000001)
+#define ODN_PDP_INTENAB_INTEN_HBLNK0_SHIFT		(0)
+#define ODN_PDP_INTENAB_INTEN_HBLNK0_LENGTH		(1)
+#define ODN_PDP_INTENAB_INTEN_HBLNK0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_INTCLR_OFFSET		(0x07A4)
+
+/* PDP, INTCLR, INTCLR_VID4ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_MASK		(0x00080000)
+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_SHIFT		(19)
+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID3ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_MASK		(0x00040000)
+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_SHIFT		(18)
+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID2ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_MASK		(0x00020000)
+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_SHIFT		(17)
+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID1ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_MASK		(0x00010000)
+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_SHIFT		(16)
+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH4ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_MASK		(0x00008000)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_SHIFT		(15)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH3ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_MASK		(0x00004000)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_SHIFT		(14)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH2ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_MASK		(0x00002000)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_SHIFT		(13)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH1ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_MASK		(0x00001000)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_SHIFT		(12)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID4URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_MASK		(0x00000800)
+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_SHIFT		(11)
+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID3URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_MASK		(0x00000400)
+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_SHIFT		(10)
+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID2URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_MASK		(0x00000200)
+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_SHIFT		(9)
+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID1URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_MASK		(0x00000100)
+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_SHIFT		(8)
+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH4URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_MASK		(0x00000080)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_SHIFT		(7)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH3URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_MASK		(0x00000040)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_SHIFT		(6)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH2URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_MASK		(0x00000020)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_SHIFT		(5)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH1URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_MASK		(0x00000010)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_SHIFT		(4)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VBLNK1
+*/
+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_MASK		(0x00000008)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_SHIFT		(3)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VBLNK0
+*/
+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_MASK		(0x00000004)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_SHIFT		(2)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_HBLNK1
+*/
+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_MASK		(0x00000002)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_SHIFT		(1)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_HBLNK0
+*/
+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_MASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_LSBMASK		(0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_SHIFT		(0)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_LENGTH		(1)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_MEMCTRL_OFFSET		(0x07A8)
+
+/* PDP, MEMCTRL, MEMREFRESH
+*/
+#define ODN_PDP_MEMCTRL_MEMREFRESH_MASK		(0xC0000000)
+#define ODN_PDP_MEMCTRL_MEMREFRESH_LSBMASK		(0x00000003)
+#define ODN_PDP_MEMCTRL_MEMREFRESH_SHIFT		(30)
+#define ODN_PDP_MEMCTRL_MEMREFRESH_LENGTH		(2)
+#define ODN_PDP_MEMCTRL_MEMREFRESH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, MEMCTRL, BURSTLEN
+*/
+#define ODN_PDP_MEMCTRL_BURSTLEN_MASK		(0x000000FF)
+#define ODN_PDP_MEMCTRL_BURSTLEN_LSBMASK		(0x000000FF)
+#define ODN_PDP_MEMCTRL_BURSTLEN_SHIFT		(0)
+#define ODN_PDP_MEMCTRL_BURSTLEN_LENGTH		(8)
+#define ODN_PDP_MEMCTRL_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_MEM_THRESH_OFFSET		(0x07AC)
+
+/* PDP, MEM_THRESH, UVTHRESHOLD
+*/
+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_MASK		(0xFF000000)
+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_SHIFT		(24)
+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_LENGTH		(8)
+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, MEM_THRESH, YTHRESHOLD
+*/
+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_MASK		(0x001FF000)
+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_SHIFT		(12)
+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_LENGTH		(9)
+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, MEM_THRESH, THRESHOLD
+*/
+#define ODN_PDP_MEM_THRESH_THRESHOLD_MASK		(0x000001FF)
+#define ODN_PDP_MEM_THRESH_THRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_MEM_THRESH_THRESHOLD_SHIFT		(0)
+#define ODN_PDP_MEM_THRESH_THRESHOLD_LENGTH		(9)
+#define ODN_PDP_MEM_THRESH_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_ALTERNATE_3D_CTRL_OFFSET		(0x07B0)
+
+/* PDP, ALTERNATE_3D_CTRL, ALT3D_ON
+*/
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_MASK		(0x00000010)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LSBMASK		(0x00000001)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SHIFT		(4)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LENGTH		(1)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, ALTERNATE_3D_CTRL, ALT3D_BLENDSEL
+*/
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_MASK		(0x00000007)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LSBMASK		(0x00000007)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SHIFT		(0)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LENGTH		(3)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA0_R_OFFSET		(0x07B4)
+
+/* PDP, GAMMA0_R, GAMMA0_R
+*/
+#define ODN_PDP_GAMMA0_R_GAMMA0_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA0_R_GAMMA0_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA0_R_GAMMA0_R_SHIFT		(0)
+#define ODN_PDP_GAMMA0_R_GAMMA0_R_LENGTH		(10)
+#define ODN_PDP_GAMMA0_R_GAMMA0_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA0_GB_OFFSET		(0x07B8)
+
+/* PDP, GAMMA0_GB, GAMMA0_G
+*/
+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_SHIFT		(16)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_LENGTH		(10)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA0_GB, GAMMA0_B
+*/
+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_SHIFT		(0)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_LENGTH		(10)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA1_R_OFFSET		(0x07BC)
+
+/* PDP, GAMMA1_R, GAMMA1_R
+*/
+#define ODN_PDP_GAMMA1_R_GAMMA1_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA1_R_GAMMA1_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA1_R_GAMMA1_R_SHIFT		(0)
+#define ODN_PDP_GAMMA1_R_GAMMA1_R_LENGTH		(10)
+#define ODN_PDP_GAMMA1_R_GAMMA1_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA1_GB_OFFSET		(0x07C0)
+
+/* PDP, GAMMA1_GB, GAMMA1_G
+*/
+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_SHIFT		(16)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_LENGTH		(10)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA1_GB, GAMMA1_B
+*/
+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_SHIFT		(0)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_LENGTH		(10)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA2_R_OFFSET		(0x07C4)
+
+/* PDP, GAMMA2_R, GAMMA2_R
+*/
+#define ODN_PDP_GAMMA2_R_GAMMA2_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA2_R_GAMMA2_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA2_R_GAMMA2_R_SHIFT		(0)
+#define ODN_PDP_GAMMA2_R_GAMMA2_R_LENGTH		(10)
+#define ODN_PDP_GAMMA2_R_GAMMA2_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA2_GB_OFFSET		(0x07C8)
+
+/* PDP, GAMMA2_GB, GAMMA2_G
+*/
+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_SHIFT		(16)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_LENGTH		(10)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA2_GB, GAMMA2_B
+*/
+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_SHIFT		(0)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_LENGTH		(10)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA3_R_OFFSET		(0x07CC)
+
+/* PDP, GAMMA3_R, GAMMA3_R
+*/
+#define ODN_PDP_GAMMA3_R_GAMMA3_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA3_R_GAMMA3_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA3_R_GAMMA3_R_SHIFT		(0)
+#define ODN_PDP_GAMMA3_R_GAMMA3_R_LENGTH		(10)
+#define ODN_PDP_GAMMA3_R_GAMMA3_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA3_GB_OFFSET		(0x07D0)
+
+/* PDP, GAMMA3_GB, GAMMA3_G
+*/
+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_SHIFT		(16)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_LENGTH		(10)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA3_GB, GAMMA3_B
+*/
+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_SHIFT		(0)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_LENGTH		(10)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA4_R_OFFSET		(0x07D4)
+
+/* PDP, GAMMA4_R, GAMMA4_R
+*/
+#define ODN_PDP_GAMMA4_R_GAMMA4_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA4_R_GAMMA4_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA4_R_GAMMA4_R_SHIFT		(0)
+#define ODN_PDP_GAMMA4_R_GAMMA4_R_LENGTH		(10)
+#define ODN_PDP_GAMMA4_R_GAMMA4_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA4_GB_OFFSET		(0x07D8)
+
+/* PDP, GAMMA4_GB, GAMMA4_G
+*/
+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_SHIFT		(16)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_LENGTH		(10)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA4_GB, GAMMA4_B
+*/
+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_SHIFT		(0)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_LENGTH		(10)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA5_R_OFFSET		(0x07DC)
+
+/* PDP, GAMMA5_R, GAMMA5_R
+*/
+#define ODN_PDP_GAMMA5_R_GAMMA5_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA5_R_GAMMA5_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA5_R_GAMMA5_R_SHIFT		(0)
+#define ODN_PDP_GAMMA5_R_GAMMA5_R_LENGTH		(10)
+#define ODN_PDP_GAMMA5_R_GAMMA5_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA5_GB_OFFSET		(0x07E0)
+
+/* PDP, GAMMA5_GB, GAMMA5_G
+*/
+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_SHIFT		(16)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_LENGTH		(10)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA5_GB, GAMMA5_B
+*/
+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_SHIFT		(0)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_LENGTH		(10)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA6_R_OFFSET		(0x07E4)
+
+/* PDP, GAMMA6_R, GAMMA6_R
+*/
+#define ODN_PDP_GAMMA6_R_GAMMA6_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA6_R_GAMMA6_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA6_R_GAMMA6_R_SHIFT		(0)
+#define ODN_PDP_GAMMA6_R_GAMMA6_R_LENGTH		(10)
+#define ODN_PDP_GAMMA6_R_GAMMA6_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA6_GB_OFFSET		(0x07E8)
+
+/* PDP, GAMMA6_GB, GAMMA6_G
+*/
+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_SHIFT		(16)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_LENGTH		(10)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA6_GB, GAMMA6_B
+*/
+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_SHIFT		(0)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_LENGTH		(10)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA7_R_OFFSET		(0x07EC)
+
+/* PDP, GAMMA7_R, GAMMA7_R
+*/
+#define ODN_PDP_GAMMA7_R_GAMMA7_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA7_R_GAMMA7_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA7_R_GAMMA7_R_SHIFT		(0)
+#define ODN_PDP_GAMMA7_R_GAMMA7_R_LENGTH		(10)
+#define ODN_PDP_GAMMA7_R_GAMMA7_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA7_GB_OFFSET		(0x07F0)
+
+/* PDP, GAMMA7_GB, GAMMA7_G
+*/
+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_SHIFT		(16)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_LENGTH		(10)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA7_GB, GAMMA7_B
+*/
+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_SHIFT		(0)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_LENGTH		(10)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA8_R_OFFSET		(0x07F4)
+
+/* PDP, GAMMA8_R, GAMMA8_R
+*/
+#define ODN_PDP_GAMMA8_R_GAMMA8_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA8_R_GAMMA8_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA8_R_GAMMA8_R_SHIFT		(0)
+#define ODN_PDP_GAMMA8_R_GAMMA8_R_LENGTH		(10)
+#define ODN_PDP_GAMMA8_R_GAMMA8_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA8_GB_OFFSET		(0x07F8)
+
+/* PDP, GAMMA8_GB, GAMMA8_G
+*/
+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_SHIFT		(16)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_LENGTH		(10)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA8_GB, GAMMA8_B
+*/
+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_SHIFT		(0)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_LENGTH		(10)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA9_R_OFFSET		(0x07FC)
+
+/* PDP, GAMMA9_R, GAMMA9_R
+*/
+#define ODN_PDP_GAMMA9_R_GAMMA9_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA9_R_GAMMA9_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA9_R_GAMMA9_R_SHIFT		(0)
+#define ODN_PDP_GAMMA9_R_GAMMA9_R_LENGTH		(10)
+#define ODN_PDP_GAMMA9_R_GAMMA9_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA9_GB_OFFSET		(0x0800)
+
+/* PDP, GAMMA9_GB, GAMMA9_G
+*/
+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_SHIFT		(16)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_LENGTH		(10)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA9_GB, GAMMA9_B
+*/
+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_SHIFT		(0)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_LENGTH		(10)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA10_R_OFFSET		(0x0804)
+
+/* PDP, GAMMA10_R, GAMMA10_R
+*/
+#define ODN_PDP_GAMMA10_R_GAMMA10_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA10_R_GAMMA10_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA10_R_GAMMA10_R_SHIFT		(0)
+#define ODN_PDP_GAMMA10_R_GAMMA10_R_LENGTH		(10)
+#define ODN_PDP_GAMMA10_R_GAMMA10_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA10_GB_OFFSET		(0x0808)
+
+/* PDP, GAMMA10_GB, GAMMA10_G
+*/
+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_SHIFT		(16)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_LENGTH		(10)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA10_GB, GAMMA10_B
+*/
+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_SHIFT		(0)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_LENGTH		(10)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA11_R_OFFSET		(0x080C)
+
+/* PDP, GAMMA11_R, GAMMA11_R
+*/
+#define ODN_PDP_GAMMA11_R_GAMMA11_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA11_R_GAMMA11_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA11_R_GAMMA11_R_SHIFT		(0)
+#define ODN_PDP_GAMMA11_R_GAMMA11_R_LENGTH		(10)
+#define ODN_PDP_GAMMA11_R_GAMMA11_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA11_GB_OFFSET		(0x0810)
+
+/* PDP, GAMMA11_GB, GAMMA11_G
+*/
+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_SHIFT		(16)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_LENGTH		(10)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA11_GB, GAMMA11_B
+*/
+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_SHIFT		(0)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_LENGTH		(10)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA12_R_OFFSET		(0x0814)
+
+/* PDP, GAMMA12_R, GAMMA12_R
+*/
+#define ODN_PDP_GAMMA12_R_GAMMA12_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA12_R_GAMMA12_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA12_R_GAMMA12_R_SHIFT		(0)
+#define ODN_PDP_GAMMA12_R_GAMMA12_R_LENGTH		(10)
+#define ODN_PDP_GAMMA12_R_GAMMA12_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA12_GB_OFFSET		(0x0818)
+
+/* PDP, GAMMA12_GB, GAMMA12_G
+*/
+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_SHIFT		(16)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_LENGTH		(10)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA12_GB, GAMMA12_B
+*/
+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_SHIFT		(0)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_LENGTH		(10)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA13_R_OFFSET		(0x081C)
+
+/* PDP, GAMMA13_R, GAMMA13_R
+*/
+#define ODN_PDP_GAMMA13_R_GAMMA13_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA13_R_GAMMA13_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA13_R_GAMMA13_R_SHIFT		(0)
+#define ODN_PDP_GAMMA13_R_GAMMA13_R_LENGTH		(10)
+#define ODN_PDP_GAMMA13_R_GAMMA13_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA13_GB_OFFSET		(0x0820)
+
+/* PDP, GAMMA13_GB, GAMMA13_G
+*/
+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_SHIFT		(16)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_LENGTH		(10)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA13_GB, GAMMA13_B
+*/
+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_SHIFT		(0)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_LENGTH		(10)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA14_R_OFFSET		(0x0824)
+
+/* PDP, GAMMA14_R, GAMMA14_R
+*/
+#define ODN_PDP_GAMMA14_R_GAMMA14_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA14_R_GAMMA14_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA14_R_GAMMA14_R_SHIFT		(0)
+#define ODN_PDP_GAMMA14_R_GAMMA14_R_LENGTH		(10)
+#define ODN_PDP_GAMMA14_R_GAMMA14_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA14_GB_OFFSET		(0x0828)
+
+/* PDP, GAMMA14_GB, GAMMA14_G
+*/
+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_SHIFT		(16)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_LENGTH		(10)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA14_GB, GAMMA14_B
+*/
+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_SHIFT		(0)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_LENGTH		(10)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA15_R_OFFSET		(0x082C)
+
+/* PDP, GAMMA15_R, GAMMA15_R
+*/
+#define ODN_PDP_GAMMA15_R_GAMMA15_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA15_R_GAMMA15_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA15_R_GAMMA15_R_SHIFT		(0)
+#define ODN_PDP_GAMMA15_R_GAMMA15_R_LENGTH		(10)
+#define ODN_PDP_GAMMA15_R_GAMMA15_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA15_GB_OFFSET		(0x0830)
+
+/* PDP, GAMMA15_GB, GAMMA15_G
+*/
+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_SHIFT		(16)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_LENGTH		(10)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA15_GB, GAMMA15_B
+*/
+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_SHIFT		(0)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_LENGTH		(10)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA16_R_OFFSET		(0x0834)
+
+/* PDP, GAMMA16_R, GAMMA16_R
+*/
+#define ODN_PDP_GAMMA16_R_GAMMA16_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA16_R_GAMMA16_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA16_R_GAMMA16_R_SHIFT		(0)
+#define ODN_PDP_GAMMA16_R_GAMMA16_R_LENGTH		(10)
+#define ODN_PDP_GAMMA16_R_GAMMA16_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA16_GB_OFFSET		(0x0838)
+
+/* PDP, GAMMA16_GB, GAMMA16_G
+*/
+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_SHIFT		(16)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_LENGTH		(10)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA16_GB, GAMMA16_B
+*/
+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_SHIFT		(0)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_LENGTH		(10)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA17_R_OFFSET		(0x083C)
+
+/* PDP, GAMMA17_R, GAMMA17_R
+*/
+#define ODN_PDP_GAMMA17_R_GAMMA17_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA17_R_GAMMA17_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA17_R_GAMMA17_R_SHIFT		(0)
+#define ODN_PDP_GAMMA17_R_GAMMA17_R_LENGTH		(10)
+#define ODN_PDP_GAMMA17_R_GAMMA17_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA17_GB_OFFSET		(0x0840)
+
+/* PDP, GAMMA17_GB, GAMMA17_G
+*/
+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_SHIFT		(16)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_LENGTH		(10)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA17_GB, GAMMA17_B
+*/
+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_SHIFT		(0)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_LENGTH		(10)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA18_R_OFFSET		(0x0844)
+
+/* PDP, GAMMA18_R, GAMMA18_R
+*/
+#define ODN_PDP_GAMMA18_R_GAMMA18_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA18_R_GAMMA18_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA18_R_GAMMA18_R_SHIFT		(0)
+#define ODN_PDP_GAMMA18_R_GAMMA18_R_LENGTH		(10)
+#define ODN_PDP_GAMMA18_R_GAMMA18_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA18_GB_OFFSET		(0x0848)
+
+/* PDP, GAMMA18_GB, GAMMA18_G
+*/
+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_SHIFT		(16)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_LENGTH		(10)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA18_GB, GAMMA18_B
+*/
+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_SHIFT		(0)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_LENGTH		(10)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA19_R_OFFSET		(0x084C)
+
+/* PDP, GAMMA19_R, GAMMA19_R
+*/
+#define ODN_PDP_GAMMA19_R_GAMMA19_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA19_R_GAMMA19_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA19_R_GAMMA19_R_SHIFT		(0)
+#define ODN_PDP_GAMMA19_R_GAMMA19_R_LENGTH		(10)
+#define ODN_PDP_GAMMA19_R_GAMMA19_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA19_GB_OFFSET		(0x0850)
+
+/* PDP, GAMMA19_GB, GAMMA19_G
+*/
+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_SHIFT		(16)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_LENGTH		(10)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA19_GB, GAMMA19_B
+*/
+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_SHIFT		(0)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_LENGTH		(10)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA20_R_OFFSET		(0x0854)
+
+/* PDP, GAMMA20_R, GAMMA20_R
+*/
+#define ODN_PDP_GAMMA20_R_GAMMA20_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA20_R_GAMMA20_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA20_R_GAMMA20_R_SHIFT		(0)
+#define ODN_PDP_GAMMA20_R_GAMMA20_R_LENGTH		(10)
+#define ODN_PDP_GAMMA20_R_GAMMA20_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA20_GB_OFFSET		(0x0858)
+
+/* PDP, GAMMA20_GB, GAMMA20_G
+*/
+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_SHIFT		(16)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_LENGTH		(10)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA20_GB, GAMMA20_B
+*/
+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_SHIFT		(0)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_LENGTH		(10)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA21_R_OFFSET		(0x085C)
+
+/* PDP, GAMMA21_R, GAMMA21_R
+*/
+#define ODN_PDP_GAMMA21_R_GAMMA21_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA21_R_GAMMA21_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA21_R_GAMMA21_R_SHIFT		(0)
+#define ODN_PDP_GAMMA21_R_GAMMA21_R_LENGTH		(10)
+#define ODN_PDP_GAMMA21_R_GAMMA21_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA21_GB_OFFSET		(0x0860)
+
+/* PDP, GAMMA21_GB, GAMMA21_G
+*/
+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_SHIFT		(16)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_LENGTH		(10)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA21_GB, GAMMA21_B
+*/
+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_SHIFT		(0)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_LENGTH		(10)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA22_R_OFFSET		(0x0864)
+
+/* PDP, GAMMA22_R, GAMMA22_R
+*/
+#define ODN_PDP_GAMMA22_R_GAMMA22_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA22_R_GAMMA22_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA22_R_GAMMA22_R_SHIFT		(0)
+#define ODN_PDP_GAMMA22_R_GAMMA22_R_LENGTH		(10)
+#define ODN_PDP_GAMMA22_R_GAMMA22_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA22_GB_OFFSET		(0x0868)
+
+/* PDP, GAMMA22_GB, GAMMA22_G
+*/
+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_SHIFT		(16)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_LENGTH		(10)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA22_GB, GAMMA22_B
+*/
+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_SHIFT		(0)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_LENGTH		(10)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA23_R_OFFSET		(0x086C)
+
+/* PDP, GAMMA23_R, GAMMA23_R
+*/
+#define ODN_PDP_GAMMA23_R_GAMMA23_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA23_R_GAMMA23_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA23_R_GAMMA23_R_SHIFT		(0)
+#define ODN_PDP_GAMMA23_R_GAMMA23_R_LENGTH		(10)
+#define ODN_PDP_GAMMA23_R_GAMMA23_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA23_GB_OFFSET		(0x0870)
+
+/* PDP, GAMMA23_GB, GAMMA23_G
+*/
+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_SHIFT		(16)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_LENGTH		(10)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA23_GB, GAMMA23_B
+*/
+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_SHIFT		(0)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_LENGTH		(10)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA24_R_OFFSET		(0x0874)
+
+/* PDP, GAMMA24_R, GAMMA24_R
+*/
+#define ODN_PDP_GAMMA24_R_GAMMA24_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA24_R_GAMMA24_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA24_R_GAMMA24_R_SHIFT		(0)
+#define ODN_PDP_GAMMA24_R_GAMMA24_R_LENGTH		(10)
+#define ODN_PDP_GAMMA24_R_GAMMA24_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA24_GB_OFFSET		(0x0878)
+
+/* PDP, GAMMA24_GB, GAMMA24_G
+*/
+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_SHIFT		(16)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_LENGTH		(10)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA24_GB, GAMMA24_B
+*/
+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_SHIFT		(0)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_LENGTH		(10)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA25_R_OFFSET		(0x087C)
+
+/* PDP, GAMMA25_R, GAMMA25_R
+*/
+#define ODN_PDP_GAMMA25_R_GAMMA25_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA25_R_GAMMA25_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA25_R_GAMMA25_R_SHIFT		(0)
+#define ODN_PDP_GAMMA25_R_GAMMA25_R_LENGTH		(10)
+#define ODN_PDP_GAMMA25_R_GAMMA25_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA25_GB_OFFSET		(0x0880)
+
+/* PDP, GAMMA25_GB, GAMMA25_G
+*/
+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_SHIFT		(16)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_LENGTH		(10)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA25_GB, GAMMA25_B
+*/
+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_SHIFT		(0)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_LENGTH		(10)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA26_R_OFFSET		(0x0884)
+
+/* PDP, GAMMA26_R, GAMMA26_R
+*/
+#define ODN_PDP_GAMMA26_R_GAMMA26_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA26_R_GAMMA26_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA26_R_GAMMA26_R_SHIFT		(0)
+#define ODN_PDP_GAMMA26_R_GAMMA26_R_LENGTH		(10)
+#define ODN_PDP_GAMMA26_R_GAMMA26_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA26_GB_OFFSET		(0x0888)
+
+/* PDP, GAMMA26_GB, GAMMA26_G
+*/
+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_SHIFT		(16)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_LENGTH		(10)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA26_GB, GAMMA26_B
+*/
+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_SHIFT		(0)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_LENGTH		(10)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA27_R_OFFSET		(0x088C)
+
+/* PDP, GAMMA27_R, GAMMA27_R
+*/
+#define ODN_PDP_GAMMA27_R_GAMMA27_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA27_R_GAMMA27_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA27_R_GAMMA27_R_SHIFT		(0)
+#define ODN_PDP_GAMMA27_R_GAMMA27_R_LENGTH		(10)
+#define ODN_PDP_GAMMA27_R_GAMMA27_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA27_GB_OFFSET		(0x0890)
+
+/* PDP, GAMMA27_GB, GAMMA27_G
+*/
+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_SHIFT		(16)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_LENGTH		(10)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA27_GB, GAMMA27_B
+*/
+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_SHIFT		(0)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_LENGTH		(10)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA28_R_OFFSET		(0x0894)
+
+/* PDP, GAMMA28_R, GAMMA28_R
+*/
+#define ODN_PDP_GAMMA28_R_GAMMA28_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA28_R_GAMMA28_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA28_R_GAMMA28_R_SHIFT		(0)
+#define ODN_PDP_GAMMA28_R_GAMMA28_R_LENGTH		(10)
+#define ODN_PDP_GAMMA28_R_GAMMA28_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA28_GB_OFFSET		(0x0898)
+
+/* PDP, GAMMA28_GB, GAMMA28_G
+*/
+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_SHIFT		(16)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_LENGTH		(10)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA28_GB, GAMMA28_B
+*/
+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_SHIFT		(0)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_LENGTH		(10)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA29_R_OFFSET		(0x089C)
+
+/* PDP, GAMMA29_R, GAMMA29_R
+*/
+#define ODN_PDP_GAMMA29_R_GAMMA29_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA29_R_GAMMA29_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA29_R_GAMMA29_R_SHIFT		(0)
+#define ODN_PDP_GAMMA29_R_GAMMA29_R_LENGTH		(10)
+#define ODN_PDP_GAMMA29_R_GAMMA29_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA29_GB_OFFSET		(0x08A0)
+
+/* PDP, GAMMA29_GB, GAMMA29_G
+*/
+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_SHIFT		(16)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_LENGTH		(10)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA29_GB, GAMMA29_B
+*/
+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_SHIFT		(0)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_LENGTH		(10)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA30_R_OFFSET		(0x08A4)
+
+/* PDP, GAMMA30_R, GAMMA30_R
+*/
+#define ODN_PDP_GAMMA30_R_GAMMA30_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA30_R_GAMMA30_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA30_R_GAMMA30_R_SHIFT		(0)
+#define ODN_PDP_GAMMA30_R_GAMMA30_R_LENGTH		(10)
+#define ODN_PDP_GAMMA30_R_GAMMA30_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA30_GB_OFFSET		(0x08A8)
+
+/* PDP, GAMMA30_GB, GAMMA30_G
+*/
+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_SHIFT		(16)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_LENGTH		(10)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA30_GB, GAMMA30_B
+*/
+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_SHIFT		(0)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_LENGTH		(10)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA31_R_OFFSET		(0x08AC)
+
+/* PDP, GAMMA31_R, GAMMA31_R
+*/
+#define ODN_PDP_GAMMA31_R_GAMMA31_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA31_R_GAMMA31_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA31_R_GAMMA31_R_SHIFT		(0)
+#define ODN_PDP_GAMMA31_R_GAMMA31_R_LENGTH		(10)
+#define ODN_PDP_GAMMA31_R_GAMMA31_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA31_GB_OFFSET		(0x08B0)
+
+/* PDP, GAMMA31_GB, GAMMA31_G
+*/
+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_SHIFT		(16)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_LENGTH		(10)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA31_GB, GAMMA31_B
+*/
+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_SHIFT		(0)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_LENGTH		(10)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA32_R_OFFSET			(0x08B4)
+
+/* PDP, GAMMA32_R, GAMMA32_R
+*/
+#define ODN_PDP_GAMMA32_R_GAMMA32_R_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA32_R_GAMMA32_R_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA32_R_GAMMA32_R_SHIFT		(0)
+#define ODN_PDP_GAMMA32_R_GAMMA32_R_LENGTH		(10)
+#define ODN_PDP_GAMMA32_R_GAMMA32_R_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GAMMA32_GB_OFFSET			(0x08B8)
+
+/* PDP, GAMMA32_GB, GAMMA32_G
+*/
+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_MASK		(0x03FF0000)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_SHIFT		(16)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_LENGTH		(10)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA32_GB, GAMMA32_B
+*/
+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_MASK		(0x000003FF)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_LSBMASK		(0x000003FF)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_SHIFT		(0)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_LENGTH		(10)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VEVENT_OFFSET				(0x08BC)
+
+/* PDP, VEVENT, VEVENT
+*/
+#define ODN_PDP_VEVENT_VEVENT_MASK			(0x1FFF0000)
+#define ODN_PDP_VEVENT_VEVENT_LSBMASK			(0x00001FFF)
+#define ODN_PDP_VEVENT_VEVENT_SHIFT			(16)
+#define ODN_PDP_VEVENT_VEVENT_LENGTH			(13)
+#define ODN_PDP_VEVENT_VEVENT_SIGNED_FIELD		IMG_FALSE
+
+/* PDP, VEVENT, VFETCH
+*/
+#define ODN_PDP_VEVENT_VFETCH_MASK			(0x00001FFF)
+#define ODN_PDP_VEVENT_VFETCH_LSBMASK			(0x00001FFF)
+#define ODN_PDP_VEVENT_VFETCH_SHIFT			(0)
+#define ODN_PDP_VEVENT_VFETCH_LENGTH			(13)
+#define ODN_PDP_VEVENT_VFETCH_SIGNED_FIELD		IMG_FALSE
+
+#define ODN_PDP_HDECTRL_OFFSET				(0x08C0)
+
+/* PDP, HDECTRL, HDES
+*/
+#define ODN_PDP_HDECTRL_HDES_MASK		(0x1FFF0000)
+#define ODN_PDP_HDECTRL_HDES_LSBMASK		(0x00001FFF)
+#define ODN_PDP_HDECTRL_HDES_SHIFT		(16)
+#define ODN_PDP_HDECTRL_HDES_LENGTH		(13)
+#define ODN_PDP_HDECTRL_HDES_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, HDECTRL, HDEF
+*/
+#define ODN_PDP_HDECTRL_HDEF_MASK		(0x00001FFF)
+#define ODN_PDP_HDECTRL_HDEF_LSBMASK		(0x00001FFF)
+#define ODN_PDP_HDECTRL_HDEF_SHIFT		(0)
+#define ODN_PDP_HDECTRL_HDEF_LENGTH		(13)
+#define ODN_PDP_HDECTRL_HDEF_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VDECTRL_OFFSET			(0x08C4)
+
+/* PDP, VDECTRL, VDES
+*/
+#define ODN_PDP_VDECTRL_VDES_MASK		(0x1FFF0000)
+#define ODN_PDP_VDECTRL_VDES_LSBMASK		(0x00001FFF)
+#define ODN_PDP_VDECTRL_VDES_SHIFT		(16)
+#define ODN_PDP_VDECTRL_VDES_LENGTH		(13)
+#define ODN_PDP_VDECTRL_VDES_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VDECTRL, VDEF
+*/
+#define ODN_PDP_VDECTRL_VDEF_MASK		(0x00001FFF)
+#define ODN_PDP_VDECTRL_VDEF_LSBMASK		(0x00001FFF)
+#define ODN_PDP_VDECTRL_VDEF_SHIFT		(0)
+#define ODN_PDP_VDECTRL_VDEF_LENGTH		(13)
+#define ODN_PDP_VDECTRL_VDEF_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_OPMASK_R_OFFSET			(0x08C8)
+
+/* PDP, OPMASK_R, MASKLEVEL
+*/
+#define ODN_PDP_OPMASK_R_MASKLEVEL_MASK		(0x80000000)
+#define ODN_PDP_OPMASK_R_MASKLEVEL_LSBMASK		(0x00000001)
+#define ODN_PDP_OPMASK_R_MASKLEVEL_SHIFT		(31)
+#define ODN_PDP_OPMASK_R_MASKLEVEL_LENGTH		(1)
+#define ODN_PDP_OPMASK_R_MASKLEVEL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, OPMASK_R, BLANKLEVEL
+*/
+#define ODN_PDP_OPMASK_R_BLANKLEVEL_MASK		(0x40000000)
+#define ODN_PDP_OPMASK_R_BLANKLEVEL_LSBMASK		(0x00000001)
+#define ODN_PDP_OPMASK_R_BLANKLEVEL_SHIFT		(30)
+#define ODN_PDP_OPMASK_R_BLANKLEVEL_LENGTH		(1)
+#define ODN_PDP_OPMASK_R_BLANKLEVEL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, OPMASK_R, MASKR
+*/
+#define ODN_PDP_OPMASK_R_MASKR_MASK		(0x000003FF)
+#define ODN_PDP_OPMASK_R_MASKR_LSBMASK		(0x000003FF)
+#define ODN_PDP_OPMASK_R_MASKR_SHIFT		(0)
+#define ODN_PDP_OPMASK_R_MASKR_LENGTH		(10)
+#define ODN_PDP_OPMASK_R_MASKR_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_OPMASK_GB_OFFSET		(0x08CC)
+
+/* PDP, OPMASK_GB, MASKG
+*/
+#define ODN_PDP_OPMASK_GB_MASKG_MASK		(0x03FF0000)
+#define ODN_PDP_OPMASK_GB_MASKG_LSBMASK		(0x000003FF)
+#define ODN_PDP_OPMASK_GB_MASKG_SHIFT		(16)
+#define ODN_PDP_OPMASK_GB_MASKG_LENGTH		(10)
+#define ODN_PDP_OPMASK_GB_MASKG_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, OPMASK_GB, MASKB
+*/
+#define ODN_PDP_OPMASK_GB_MASKB_MASK		(0x000003FF)
+#define ODN_PDP_OPMASK_GB_MASKB_LSBMASK		(0x000003FF)
+#define ODN_PDP_OPMASK_GB_MASKB_SHIFT		(0)
+#define ODN_PDP_OPMASK_GB_MASKB_LENGTH		(10)
+#define ODN_PDP_OPMASK_GB_MASKB_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_REGLD_ADDR_CTRL_OFFSET		(0x08D0)
+
+/* PDP, REGLD_ADDR_CTRL, REGLD_ADDRIN
+*/
+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_MASK		(0xFFFFFFF0)
+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LSBMASK		(0x0FFFFFFF)
+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SHIFT		(4)
+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LENGTH		(28)
+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_REGLD_ADDR_STAT_OFFSET		(0x08D4)
+
+/* PDP, REGLD_ADDR_STAT, REGLD_ADDROUT
+*/
+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_MASK		(0xFFFFFFF0)
+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LSBMASK		(0x0FFFFFFF)
+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SHIFT		(4)
+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LENGTH		(28)
+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_REGLD_STAT_OFFSET		(0x08D8)
+
+/* PDP, REGLD_STAT, REGLD_ADDREN
+*/
+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_MASK		(0x00800000)
+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_LSBMASK		(0x00000001)
+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_SHIFT		(23)
+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_LENGTH		(1)
+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_REGLD_CTRL_OFFSET		(0x08DC)
+
+/* PDP, REGLD_CTRL, REGLD_ADDRLEN
+*/
+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_MASK		(0xFF000000)
+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_LSBMASK	(0x000000FF)
+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_SHIFT		(24)
+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_LENGTH		(8)
+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, REGLD_CTRL, REGLD_VAL
+*/
+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_MASK		(0x00800000)
+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_LSBMASK		(0x00000001)
+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_SHIFT		(23)
+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_LENGTH		(1)
+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_UPDCTRL_OFFSET			(0x08E0)
+
+/* PDP, UPDCTRL, UPDFIELD
+*/
+#define ODN_PDP_UPDCTRL_UPDFIELD_MASK		(0x00000001)
+#define ODN_PDP_UPDCTRL_UPDFIELD_LSBMASK	(0x00000001)
+#define ODN_PDP_UPDCTRL_UPDFIELD_SHIFT		(0)
+#define ODN_PDP_UPDCTRL_UPDFIELD_LENGTH		(1)
+#define ODN_PDP_UPDCTRL_UPDFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_INTCTRL_OFFSET			(0x08E4)
+
+/* PDP, PVR_ODN_PDP_INTCTRL, HBLNK_LINE
+*/
+#define ODN_PDP_INTCTRL_HBLNK_LINE_MASK		(0x00010000)
+#define ODN_PDP_INTCTRL_HBLNK_LINE_LSBMASK	(0x00000001)
+#define ODN_PDP_INTCTRL_HBLNK_LINE_SHIFT	(16)
+#define ODN_PDP_INTCTRL_HBLNK_LINE_LENGTH	(1)
+#define ODN_PDP_INTCTRL_HBLNK_LINE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_ODN_PDP_INTCTRL, HBLNK_LINENO
+*/
+#define ODN_PDP_INTCTRL_HBLNK_LINENO_MASK	(0x00001FFF)
+#define ODN_PDP_INTCTRL_HBLNK_LINENO_LSBMASK	(0x00001FFF)
+#define ODN_PDP_INTCTRL_HBLNK_LINENO_SHIFT	(0)
+#define ODN_PDP_INTCTRL_HBLNK_LINENO_LENGTH	(13)
+#define ODN_PDP_INTCTRL_HBLNK_LINENO_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_PDISETUP_OFFSET		(0x0900)
+
+/* PDP, PDISETUP, PDI_BLNKLVL
+*/
+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_MASK		(0x00000040)
+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_LSBMASK		(0x00000001)
+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_SHIFT		(6)
+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_LENGTH		(1)
+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_BLNK
+*/
+#define ODN_PDP_PDISETUP_PDI_BLNK_MASK		(0x00000020)
+#define ODN_PDP_PDISETUP_PDI_BLNK_LSBMASK		(0x00000001)
+#define ODN_PDP_PDISETUP_PDI_BLNK_SHIFT		(5)
+#define ODN_PDP_PDISETUP_PDI_BLNK_LENGTH		(1)
+#define ODN_PDP_PDISETUP_PDI_BLNK_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_PWR
+*/
+#define ODN_PDP_PDISETUP_PDI_PWR_MASK		(0x00000010)
+#define ODN_PDP_PDISETUP_PDI_PWR_LSBMASK		(0x00000001)
+#define ODN_PDP_PDISETUP_PDI_PWR_SHIFT		(4)
+#define ODN_PDP_PDISETUP_PDI_PWR_LENGTH		(1)
+#define ODN_PDP_PDISETUP_PDI_PWR_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_EN
+*/
+#define ODN_PDP_PDISETUP_PDI_EN_MASK		(0x00000008)
+#define ODN_PDP_PDISETUP_PDI_EN_LSBMASK		(0x00000001)
+#define ODN_PDP_PDISETUP_PDI_EN_SHIFT		(3)
+#define ODN_PDP_PDISETUP_PDI_EN_LENGTH		(1)
+#define ODN_PDP_PDISETUP_PDI_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_GDEN
+*/
+#define ODN_PDP_PDISETUP_PDI_GDEN_MASK		(0x00000004)
+#define ODN_PDP_PDISETUP_PDI_GDEN_LSBMASK		(0x00000001)
+#define ODN_PDP_PDISETUP_PDI_GDEN_SHIFT		(2)
+#define ODN_PDP_PDISETUP_PDI_GDEN_LENGTH		(1)
+#define ODN_PDP_PDISETUP_PDI_GDEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_NFEN
+*/
+#define ODN_PDP_PDISETUP_PDI_NFEN_MASK		(0x00000002)
+#define ODN_PDP_PDISETUP_PDI_NFEN_LSBMASK		(0x00000001)
+#define ODN_PDP_PDISETUP_PDI_NFEN_SHIFT		(1)
+#define ODN_PDP_PDISETUP_PDI_NFEN_LENGTH		(1)
+#define ODN_PDP_PDISETUP_PDI_NFEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_CR
+*/
+#define ODN_PDP_PDISETUP_PDI_CR_MASK		(0x00000001)
+#define ODN_PDP_PDISETUP_PDI_CR_LSBMASK		(0x00000001)
+#define ODN_PDP_PDISETUP_PDI_CR_SHIFT		(0)
+#define ODN_PDP_PDISETUP_PDI_CR_LENGTH		(1)
+#define ODN_PDP_PDISETUP_PDI_CR_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PDITIMING0_OFFSET		(0x0904)
+
+/* PDP, PDITIMING0, PDI_PWRSVGD
+*/
+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_MASK		(0x0F000000)
+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_LSBMASK		(0x0000000F)
+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_SHIFT		(24)
+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_LENGTH		(4)
+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDITIMING0, PDI_LSDEL
+*/
+#define ODN_PDP_PDITIMING0_PDI_LSDEL_MASK		(0x007F0000)
+#define ODN_PDP_PDITIMING0_PDI_LSDEL_LSBMASK		(0x0000007F)
+#define ODN_PDP_PDITIMING0_PDI_LSDEL_SHIFT		(16)
+#define ODN_PDP_PDITIMING0_PDI_LSDEL_LENGTH		(7)
+#define ODN_PDP_PDITIMING0_PDI_LSDEL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDITIMING0, PDI_PWRSV2GD2
+*/
+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_MASK		(0x000003FF)
+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_LSBMASK		(0x000003FF)
+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_SHIFT		(0)
+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_LENGTH		(10)
+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PDITIMING1_OFFSET		(0x0908)
+
+/* PDP, PDITIMING1, PDI_NLDEL
+*/
+#define ODN_PDP_PDITIMING1_PDI_NLDEL_MASK		(0x000F0000)
+#define ODN_PDP_PDITIMING1_PDI_NLDEL_LSBMASK		(0x0000000F)
+#define ODN_PDP_PDITIMING1_PDI_NLDEL_SHIFT		(16)
+#define ODN_PDP_PDITIMING1_PDI_NLDEL_LENGTH		(4)
+#define ODN_PDP_PDITIMING1_PDI_NLDEL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDITIMING1, PDI_ACBDEL
+*/
+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_MASK		(0x000003FF)
+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_LSBMASK		(0x000003FF)
+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_SHIFT		(0)
+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_LENGTH		(10)
+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PDICOREID_OFFSET		(0x090C)
+
+/* PDP, PDICOREID, PDI_GROUP_ID
+*/
+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_MASK		(0xFF000000)
+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_LSBMASK		(0x000000FF)
+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_SHIFT		(24)
+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_LENGTH		(8)
+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDICOREID, PDI_CORE_ID
+*/
+#define ODN_PDP_PDICOREID_PDI_CORE_ID_MASK		(0x00FF0000)
+#define ODN_PDP_PDICOREID_PDI_CORE_ID_LSBMASK		(0x000000FF)
+#define ODN_PDP_PDICOREID_PDI_CORE_ID_SHIFT		(16)
+#define ODN_PDP_PDICOREID_PDI_CORE_ID_LENGTH		(8)
+#define ODN_PDP_PDICOREID_PDI_CORE_ID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDICOREID, PDI_CONFIG_ID
+*/
+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_MASK		(0x0000FFFF)
+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_LSBMASK		(0x0000FFFF)
+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_SHIFT		(0)
+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_LENGTH		(16)
+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_PDICOREREV_OFFSET		(0x0910)
+
+/* PDP, PDICOREREV, PDI_MAJOR_REV
+*/
+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_MASK		(0x00FF0000)
+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_LSBMASK		(0x000000FF)
+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_SHIFT		(16)
+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_LENGTH		(8)
+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDICOREREV, PDI_MINOR_REV
+*/
+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_MASK		(0x0000FF00)
+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_LSBMASK		(0x000000FF)
+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_SHIFT		(8)
+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_LENGTH		(8)
+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDICOREREV, PDI_MAINT_REV
+*/
+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_MASK		(0x000000FF)
+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_LSBMASK		(0x000000FF)
+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_SHIFT		(0)
+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_LENGTH		(8)
+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX2_OFFSET		(0x0920)
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y1
+*/
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_MASK		(0x000000C0)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LSBMASK		(0x00000003)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LENGTH		(2)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y1
+*/
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_MASK		(0x00000030)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LSBMASK		(0x00000003)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SHIFT		(4)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LENGTH		(2)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y0
+*/
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_MASK		(0x0000000C)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LSBMASK		(0x00000003)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SHIFT		(2)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LENGTH		(2)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y0
+*/
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_MASK		(0x00000003)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LSBMASK		(0x00000003)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LENGTH		(2)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX4_0_OFFSET		(0x0924)
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y1
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_MASK		(0xF0000000)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SHIFT		(28)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y1
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_MASK		(0x0F000000)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y1
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_MASK		(0x00F00000)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SHIFT		(20)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y1
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_MASK		(0x000F0000)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SHIFT		(16)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y0
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_MASK		(0x0000F000)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y0
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_MASK		(0x00000F00)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SHIFT		(8)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y0
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_MASK		(0x000000F0)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SHIFT		(4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y0
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_MASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX4_1_OFFSET		(0x0928)
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y3
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_MASK		(0xF0000000)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SHIFT		(28)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y3
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_MASK		(0x0F000000)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y3
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_MASK		(0x00F00000)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SHIFT		(20)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y3
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_MASK		(0x000F0000)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SHIFT		(16)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y2
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_MASK		(0x0000F000)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y2
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_MASK		(0x00000F00)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SHIFT		(8)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y2
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_MASK		(0x000000F0)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SHIFT		(4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y2
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_MASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LSBMASK		(0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LENGTH		(4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_0_OFFSET		(0x092C)
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X4Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X3Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X2Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X1Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X0Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_1_OFFSET		(0x0930)
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X1Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X0Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X7Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X6Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X5Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_2_OFFSET		(0x0934)
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X6Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X5Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X4Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X3Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X2Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_3_OFFSET		(0x0938)
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X3Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X2Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X1Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X0Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X7Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_4_OFFSET		(0x093C)
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X0Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X7Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X6Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X5Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X4Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_5_OFFSET		(0x0940)
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X5Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X4Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X3Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X2Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X1Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_6_OFFSET		(0x0944)
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X2Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X1Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X0Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X7Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X6Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_7_OFFSET		(0x0948)
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X7Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X6Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X5Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X4Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X3Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_8_OFFSET		(0x094C)
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X4Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X3Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X2Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X1Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X0Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_9_OFFSET		(0x0950)
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X1Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X0Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X7Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X6Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X5Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_10_OFFSET		(0x0954)
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X6Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X5Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X4Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X3Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X2Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_11_OFFSET		(0x0958)
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X3Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_MASK		(0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SHIFT		(24)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X2Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X1Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X0Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X7Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_12_OFFSET		(0x095C)
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X7Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_MASK		(0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SHIFT		(18)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X6Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_MASK		(0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SHIFT		(12)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X5Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_MASK		(0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SHIFT		(6)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X4Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_MASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LSBMASK		(0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SHIFT		(0)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LENGTH		(6)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1_MEMCTRL_OFFSET		(0x0960)
+
+/* PDP, GRPH1_MEMCTRL, GRPH1_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_MEMCTRL, GRPH1_BURSTLEN
+*/
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_MASK		(0x000000FF)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SHIFT		(0)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LENGTH		(8)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1_MEM_THRESH_OFFSET		(0x0964)
+
+/* PDP, GRPH1_MEM_THRESH, GRPH1_UVTHRESHOLD
+*/
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_MASK		(0xFF000000)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SHIFT		(24)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LENGTH		(8)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_MEM_THRESH, GRPH1_YTHRESHOLD
+*/
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_MASK		(0x001FF000)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SHIFT		(12)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LENGTH		(9)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_MEM_THRESH, GRPH1_THRESHOLD
+*/
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_MASK		(0x000001FF)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SHIFT		(0)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LENGTH		(9)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH2_MEMCTRL_OFFSET		(0x0968)
+
+/* PDP, GRPH2_MEMCTRL, GRPH2_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_MEMCTRL, GRPH2_BURSTLEN
+*/
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_MASK		(0x000000FF)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SHIFT		(0)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LENGTH		(8)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH2_MEM_THRESH_OFFSET		(0x096C)
+
+/* PDP, GRPH2_MEM_THRESH, GRPH2_UVTHRESHOLD
+*/
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_MASK		(0xFF000000)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SHIFT		(24)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LENGTH		(8)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_MEM_THRESH, GRPH2_YTHRESHOLD
+*/
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_MASK		(0x001FF000)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SHIFT		(12)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LENGTH		(9)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_MEM_THRESH, GRPH2_THRESHOLD
+*/
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_MASK		(0x000001FF)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SHIFT		(0)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LENGTH		(9)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH3_MEMCTRL_OFFSET		(0x0970)
+
+/* PDP, GRPH3_MEMCTRL, GRPH3_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_MEMCTRL, GRPH3_BURSTLEN
+*/
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_MASK		(0x000000FF)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SHIFT		(0)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LENGTH		(8)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH3_MEM_THRESH_OFFSET		(0x0974)
+
+/* PDP, GRPH3_MEM_THRESH, GRPH3_UVTHRESHOLD
+*/
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_MASK		(0xFF000000)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SHIFT		(24)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LENGTH		(8)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_MEM_THRESH, GRPH3_YTHRESHOLD
+*/
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_MASK		(0x001FF000)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SHIFT		(12)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LENGTH		(9)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_MEM_THRESH, GRPH3_THRESHOLD
+*/
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_MASK		(0x000001FF)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SHIFT		(0)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LENGTH		(9)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH4_MEMCTRL_OFFSET		(0x0978)
+
+/* PDP, GRPH4_MEMCTRL, GRPH4_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_MEMCTRL, GRPH4_BURSTLEN
+*/
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_MASK		(0x000000FF)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SHIFT		(0)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LENGTH		(8)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH4_MEM_THRESH_OFFSET		(0x097C)
+
+/* PDP, GRPH4_MEM_THRESH, GRPH4_UVTHRESHOLD
+*/
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_MASK		(0xFF000000)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SHIFT		(24)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LENGTH		(8)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_MEM_THRESH, GRPH4_YTHRESHOLD
+*/
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_MASK		(0x001FF000)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SHIFT		(12)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LENGTH		(9)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_MEM_THRESH, GRPH4_THRESHOLD
+*/
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_MASK		(0x000001FF)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SHIFT		(0)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LENGTH		(9)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1_MEMCTRL_OFFSET		(0x0980)
+
+/* PDP, VID1_MEMCTRL, VID1_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_MEMCTRL, VID1_BURSTLEN
+*/
+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_MASK		(0x000000FF)
+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_SHIFT		(0)
+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_LENGTH		(8)
+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1_MEM_THRESH_OFFSET		(0x0984)
+
+/* PDP, VID1_MEM_THRESH, VID1_UVTHRESHOLD
+*/
+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_MASK		(0xFF000000)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SHIFT		(24)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LENGTH		(8)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_MEM_THRESH, VID1_YTHRESHOLD
+*/
+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_MASK		(0x001FF000)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SHIFT		(12)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LENGTH		(9)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_MEM_THRESH, VID1_THRESHOLD
+*/
+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_MASK		(0x000001FF)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SHIFT		(0)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LENGTH		(9)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2_MEMCTRL_OFFSET		(0x0988)
+
+/* PDP, VID2_MEMCTRL, VID2_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_MEMCTRL, VID2_BURSTLEN
+*/
+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_MASK		(0x000000FF)
+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_SHIFT		(0)
+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_LENGTH		(8)
+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2_MEM_THRESH_OFFSET		(0x098C)
+
+/* PDP, VID2_MEM_THRESH, VID2_UVTHRESHOLD
+*/
+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_MASK		(0xFF000000)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SHIFT		(24)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LENGTH		(8)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_MEM_THRESH, VID2_YTHRESHOLD
+*/
+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_MASK		(0x001FF000)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SHIFT		(12)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LENGTH		(9)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_MEM_THRESH, VID2_THRESHOLD
+*/
+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_MASK		(0x000001FF)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SHIFT		(0)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LENGTH		(9)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3_MEMCTRL_OFFSET		(0x0990)
+
+/* PDP, VID3_MEMCTRL, VID3_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_MEMCTRL, VID3_BURSTLEN
+*/
+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_MASK		(0x000000FF)
+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_SHIFT		(0)
+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_LENGTH		(8)
+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3_MEM_THRESH_OFFSET		(0x0994)
+
+/* PDP, VID3_MEM_THRESH, VID3_UVTHRESHOLD
+*/
+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_MASK		(0xFF000000)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SHIFT		(24)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LENGTH		(8)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_MEM_THRESH, VID3_YTHRESHOLD
+*/
+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_MASK		(0x001FF000)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SHIFT		(12)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LENGTH		(9)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_MEM_THRESH, VID3_THRESHOLD
+*/
+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_MASK		(0x000001FF)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SHIFT		(0)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LENGTH		(9)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4_MEMCTRL_OFFSET		(0x0998)
+
+/* PDP, VID4_MEMCTRL, VID4_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_MEMCTRL, VID4_BURSTLEN
+*/
+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_MASK		(0x000000FF)
+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_SHIFT		(0)
+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_LENGTH		(8)
+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4_MEM_THRESH_OFFSET		(0x099C)
+
+/* PDP, VID4_MEM_THRESH, VID4_UVTHRESHOLD
+*/
+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_MASK		(0xFF000000)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SHIFT		(24)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LENGTH		(8)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_MEM_THRESH, VID4_YTHRESHOLD
+*/
+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_MASK		(0x001FF000)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SHIFT		(12)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LENGTH		(9)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_MEM_THRESH, VID4_THRESHOLD
+*/
+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_MASK		(0x000001FF)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LSBMASK		(0x000001FF)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SHIFT		(0)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LENGTH		(9)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH1_PANIC_THRESH_OFFSET		(0x09A0)
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SHIFT		(31)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LENGTH		(1)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SHIFT		(30)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LENGTH		(1)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH2_PANIC_THRESH_OFFSET		(0x09A4)
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SHIFT		(31)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LENGTH		(1)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SHIFT		(30)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LENGTH		(1)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH3_PANIC_THRESH_OFFSET		(0x09A8)
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SHIFT		(31)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LENGTH		(1)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SHIFT		(30)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LENGTH		(1)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_GRPH4_PANIC_THRESH_OFFSET		(0x09AC)
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SHIFT		(31)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LENGTH		(1)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SHIFT		(30)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LENGTH		(1)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID1_PANIC_THRESH_OFFSET		(0x09B0)
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SHIFT		(31)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LENGTH		(1)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SHIFT		(30)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LENGTH		(1)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID2_PANIC_THRESH_OFFSET		(0x09B4)
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SHIFT		(31)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LENGTH		(1)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SHIFT		(30)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LENGTH		(1)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID3_PANIC_THRESH_OFFSET		(0x09B8)
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SHIFT		(31)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LENGTH		(1)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SHIFT		(30)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LENGTH		(1)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_VID4_PANIC_THRESH_OFFSET		(0x09BC)
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SHIFT		(31)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LENGTH		(1)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SHIFT		(30)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LENGTH		(1)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define ODN_PDP_BURST_BOUNDARY_OFFSET		(0x09C0)
+
+/* PDP, BURST_BOUNDARY, BURST_BOUNDARY
+*/
+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_MASK		(0x0000003F)
+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_LSBMASK		(0x0000003F)
+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_SHIFT		(0)
+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_LENGTH		(6)
+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_SIGNED_FIELD	IMG_FALSE
+
+
+/* ------------------------ End of register definitions ------------------------ */
+
+/*
+// NUMREG defines the extent of register address space.
+*/
+
+#define		ODN_PDP_NUMREG	   ((0x09C0 >> 2)+1)
+
+/* Info about video plane addresses */
+#define ODN_PDP_YADDR_BITS		(ODN_PDP_VID1BASEADDR_VID1BASEADDR_LENGTH)
+#define ODN_PDP_YADDR_ALIGN		5
+#define ODN_PDP_UADDR_BITS		(ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH)
+#define ODN_PDP_UADDR_ALIGN		5
+#define ODN_PDP_VADDR_BITS		(ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH)
+#define ODN_PDP_VADDR_ALIGN		5
+
+#define ODN_PDP_YSTRIDE_BITS	(ODN_PDP_VID1STRIDE_VID1STRIDE_LENGTH)
+#define ODN_PDP_YSTRIDE_ALIGN	5
+
+#define ODN_PDP_MAX_INPUT_WIDTH (ODN_PDP_VID1SIZE_VID1WIDTH_LSBMASK + 1)
+#define ODN_PDP_MAX_INPUT_HEIGHT (ODN_PDP_VID1SIZE_VID1HEIGHT_LSBMASK + 1)
+
+/* Maximum 6 bytes per pixel for RGB161616 */
+#define ODN_PDP_MAX_IMAGE_BYTES (ODN_PDP_MAX_INPUT_WIDTH * ODN_PDP_MAX_INPUT_HEIGHT * 6)
+
+/* Round up */
+#define ODN_PDP_MAX_IMAGE_PAGES ((ODN_PDP_MAX_IMAGE_BYTES+PAGE_SIZE-1)/PAGE_SIZE)
+
+#define ODN_PDP_YADDR_MAX		(((1 << ODN_PDP_YADDR_BITS) - 1) << ODN_PDP_YADDR_ALIGN)
+#define ODN_PDP_UADDR_MAX		(((1 << ODN_PDP_UADDR_BITS) - 1) << ODN_PDP_UADDR_ALIGN)
+#define ODN_PDP_VADDR_MAX		(((1 << ODN_PDP_VADDR_BITS) - 1) << ODN_PDP_VADDR_ALIGN)
+#define ODN_PDP_YSTRIDE_MAX		((1 << ODN_PDP_YSTRIDE_BITS) << ODN_PDP_YSTRIDE_ALIGN)
+#define ODN_PDP_YADDR_ALIGNMASK		((1 << ODN_PDP_YADDR_ALIGN) - 1)
+#define ODN_PDP_UADDR_ALIGNMASK		((1 << ODN_PDP_UADDR_ALIGN) - 1)
+#define ODN_PDP_VADDR_ALIGNMASK		((1 << ODN_PDP_VADDR_ALIGN) - 1)
+#define ODN_PDP_YSTRIDE_ALIGNMASK	((1 << ODN_PDP_YSTRIDE_ALIGN) - 1)
+
+/* Field Values (some are reserved for future use) */
+#define ODN_PDP_SURF_PIXFMT_RGB332					0x3
+#define ODN_PDP_SURF_PIXFMT_ARGB4444				0x4
+#define ODN_PDP_SURF_PIXFMT_ARGB1555				0x5
+#define ODN_PDP_SURF_PIXFMT_RGB888					0x6
+#define ODN_PDP_SURF_PIXFMT_RGB565					0x7
+#define ODN_PDP_SURF_PIXFMT_ARGB8888				0x8
+#define ODN_PDP_SURF_PIXFMT_420_PL8					0x9
+#define ODN_PDP_SURF_PIXFMT_420_PL8IVU				0xA
+#define ODN_PDP_SURF_PIXFMT_420_PL8IUV				0xB
+#define ODN_PDP_SURF_PIXFMT_422_UY0VY1_8888			0xC
+#define ODN_PDP_SURF_PIXFMT_422_VY0UY1_8888			0xD
+#define ODN_PDP_SURF_PIXFMT_422_Y0UY1V_8888			0xE
+#define ODN_PDP_SURF_PIXFMT_422_Y0VY1U_8888			0xF
+#define ODN_PDP_SURF_PIXFMT_AYUV8888				0x10
+#define ODN_PDP_SURF_PIXFMT_YUV101010				0x15
+#define ODN_PDP_SURF_PIXFMT_RGB101010				0x17
+#define ODN_PDP_SURF_PIXFMT_420_PL10IUV				0x18
+#define ODN_PDP_SURF_PIXFMT_420_PL10IVU				0x19
+#define ODN_PDP_SURF_PIXFMT_422_PL10IUV				0x1A
+#define ODN_PDP_SURF_PIXFMT_422_PL10IVU				0x1B
+#define ODN_PDP_SURF_PIXFMT_RGB121212				0x1E
+#define ODN_PDP_SURF_PIXFMT_RGB161616				0x1F
+
+#define ODN_PDP_CTRL_CKEYSRC_PREV					0x0
+#define ODN_PDP_CTRL_CKEYSRC_CUR					0x1
+
+#define ODN_PDP_MEMCTRL_MEMREFRESH_ALWAYS			0x0
+#define ODN_PDP_MEMCTRL_MEMREFRESH_HBLNK			0x1
+#define ODN_PDP_MEMCTRL_MEMREFRESH_VBLNK			0x2
+#define ODN_PDP_MEMCTRL_MEMREFRESH_BOTH				0x3
+
+#define ODN_PDP_3D_CTRL_BLENDSEL_BGND_WITH_POS0		0x0
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS0_WITH_POS1		0x1
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS1_WITH_POS2		0x2
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS2_WITH_POS3		0x3
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS3_WITH_POS4		0x4
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS4_WITH_POS5		0x5
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS5_WITH_POS6		0x6
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS6_WITH_POS7		0x7
+
+#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_Y_STRIDE	0x0
+#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_DOUBLE_Y_STRIDE 0x1
+#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_HALF_Y_STRIDE 0x2
+
+#define ODN_PDP_PROCAMP_OUTPUT_OFFSET_FRACTIONAL_BITS 1
+#define ODN_PDP_PROCAMP_COEFFICIENT_FRACTIONAL_BITS	10
+
+/*-------------------------------------------------------------------------------*/
+
+#endif /* _ODN_PDP_REGS_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/odin_regs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/odin_regs.h
new file mode 100644
index 0000000..f6f2f0c6
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/odin_regs.h
@@ -0,0 +1,843 @@
+/****************************************************************************
+@Title          Odin system control register definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+@Description    Odin FPGA register defs for IMG 3rd generation TCF
+
+	Auto generated headers, eg. odn_core.h:
+		regconv -d . -a 8 odn_core.def
+
+	Source files :
+		odn_core.def
+		mca_debug.def
+		sai_rx_debug.def
+		sai_tx_debug.def
+		ad_tx.def
+
+	Changes:
+		Removed obsolete copyright dates
+		Changed lower case to upper case
+			(eg. odn_core changed to ODN_CORE)
+		Changed PVR5__ to ODN_
+		Merged multiple .def files into one header
+
+****************************************************************************/
+
+/* tab size 4 */
+
+#ifndef _ODIN_REGS_H_
+#define _ODIN_REGS_H_
+
+/******************************
+  Generated from: odn_core.def
+*******************************/
+
+/*
+	Register ID
+*/
+#define ODN_CORE_ID                             0x0000
+#define ODN_ID_VARIANT_MASK                     0x0000FFFFU
+#define ODN_ID_VARIANT_SHIFT                    0
+#define ODN_ID_VARIANT_SIGNED                   0
+
+#define ODN_ID_ID_MASK                          0xFFFF0000U
+#define ODN_ID_ID_SHIFT                         16
+#define ODN_ID_ID_SIGNED                        0
+
+/*
+	Register REVISION
+*/
+#define ODN_CORE_REVISION                       0x0004
+#define ODN_REVISION_MINOR_MASK                 0x000000FFU
+#define ODN_REVISION_MINOR_SHIFT                0
+#define ODN_REVISION_MINOR_SIGNED               0
+
+#define ODN_REVISION_MAJOR_MASK                 0x00000F00U
+#define ODN_REVISION_MAJOR_SHIFT                8
+#define ODN_REVISION_MAJOR_SIGNED               0
+
+/*
+	Register CHANGE_SET
+*/
+#define ODN_CORE_CHANGE_SET                     0x0008
+#define ODN_CHANGE_SET_SET_MASK                 0xFFFFFFFFU
+#define ODN_CHANGE_SET_SET_SHIFT                0
+#define ODN_CHANGE_SET_SET_SIGNED               0
+
+/*
+	Register USER_ID
+*/
+#define ODN_CORE_USER_ID                        0x000C
+#define ODN_USER_ID_ID_MASK                     0x0000000FU
+#define ODN_USER_ID_ID_SHIFT                    0
+#define ODN_USER_ID_ID_SIGNED                   0
+
+/*
+	Register USER_BUILD
+*/
+#define ODN_CORE_USER_BUILD                     0x0010
+#define ODN_USER_BUILD_BUILD_MASK               0xFFFFFFFFU
+#define ODN_USER_BUILD_BUILD_SHIFT              0
+#define ODN_USER_BUILD_BUILD_SIGNED             0
+
+/*
+	Register INTERNAL_RESETN
+*/
+#define ODN_CORE_INTERNAL_RESETN                0x0080
+#define ODN_INTERNAL_RESETN_DDR_MASK            0x00000001U
+#define ODN_INTERNAL_RESETN_DDR_SHIFT           0
+#define ODN_INTERNAL_RESETN_DDR_SIGNED          0
+
+#define ODN_INTERNAL_RESETN_MIG0_MASK           0x00000002U
+#define ODN_INTERNAL_RESETN_MIG0_SHIFT          1
+#define ODN_INTERNAL_RESETN_MIG0_SIGNED         0
+
+#define ODN_INTERNAL_RESETN_MIG1_MASK           0x00000004U
+#define ODN_INTERNAL_RESETN_MIG1_SHIFT          2
+#define ODN_INTERNAL_RESETN_MIG1_SIGNED         0
+
+#define ODN_INTERNAL_RESETN_PDP1_MASK           0x00000008U
+#define ODN_INTERNAL_RESETN_PDP1_SHIFT          3
+#define ODN_INTERNAL_RESETN_PDP1_SIGNED         0
+
+#define ODN_INTERNAL_RESETN_PDP2_MASK           0x00000010U
+#define ODN_INTERNAL_RESETN_PDP2_SHIFT          4
+#define ODN_INTERNAL_RESETN_PDP2_SIGNED         0
+
+#define ODN_INTERNAL_RESETN_PERIP_MASK          0x00000020U
+#define ODN_INTERNAL_RESETN_PERIP_SHIFT         5
+#define ODN_INTERNAL_RESETN_PERIP_SIGNED        0
+
+#define ODN_INTERNAL_RESETN_GIST_MASK           0x00000040U
+#define ODN_INTERNAL_RESETN_GIST_SHIFT          6
+#define ODN_INTERNAL_RESETN_GIST_SIGNED         0
+
+#define ODN_INTERNAL_RESETN_PIKE_MASK           0x00000080U
+#define ODN_INTERNAL_RESETN_PIKE_SHIFT          7
+#define ODN_INTERNAL_RESETN_PIKE_SIGNED         0
+
+
+/*
+	Register EXTERNAL_RESETN
+*/
+#define ODN_CORE_EXTERNAL_RESETN                0x0084
+#define ODN_EXTERNAL_RESETN_DUT_MASK            0x00000001U
+#define ODN_EXTERNAL_RESETN_DUT_SHIFT           0
+#define ODN_EXTERNAL_RESETN_DUT_SIGNED          0
+
+#define ODN_EXTERNAL_RESETN_DUT_SPI_MASK        0x00000002U
+#define ODN_EXTERNAL_RESETN_DUT_SPI_SHIFT       1
+#define ODN_EXTERNAL_RESETN_DUT_SPI_SIGNED      0
+
+/*
+	Register EXTERNAL_RESET
+*/
+#define ODN_CORE_EXTERNAL_RESET                 0x0088
+#define ODN_EXTERNAL_RESET_PVT_CAL_MASK         0x00000001U
+#define ODN_EXTERNAL_RESET_PVT_CAL_SHIFT        0
+#define ODN_EXTERNAL_RESET_PVT_CAL_SIGNED       0
+
+#define ODN_EXTERNAL_RESET_PLL_MASK             0x00000002U
+#define ODN_EXTERNAL_RESET_PLL_SHIFT            1
+#define ODN_EXTERNAL_RESET_PLL_SIGNED           0
+
+/*
+	Register INTERNAL_AUTO_RESETN
+*/
+#define ODN_CORE_INTERNAL_AUTO_RESETN       	0x008C
+#define ODN_INTERNAL_AUTO_RESETN_AUX_MASK       0x00000001U
+#define ODN_INTERNAL_AUTO_RESETN_AUX_SHIFT      0
+#define ODN_INTERNAL_AUTO_RESETN_AUX_SIGNED     0
+
+/*
+	Register CLK_GEN_RESET
+*/
+#define ODN_CORE_CLK_GEN_RESET                  0x0090
+#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_MASK    0x00000001U
+#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_SHIFT   0
+#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_SIGNED  0
+
+#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_MASK      0x00000002U
+#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_SHIFT     1
+#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_SIGNED    0
+
+#define ODN_CLK_GEN_RESET_MULTI_MMCM_MASK       0x00000004U
+#define ODN_CLK_GEN_RESET_MULTI_MMCM_SHIFT      2
+#define ODN_CLK_GEN_RESET_MULTI_MMCM_SIGNED     0
+
+#define ODN_CLK_GEN_RESET_PDP_MMCM_MASK         0x00000008U
+#define ODN_CLK_GEN_RESET_PDP_MMCM_SHIFT        3
+#define ODN_CLK_GEN_RESET_PDP_MMCM_SIGNED       0
+
+/*
+	Register INTERRUPT_STATUS
+*/
+#define ODN_CORE_INTERRUPT_STATUS               0x0100
+#define ODN_INTERRUPT_STATUS_DUT_MASK           0x00000001U
+#define ODN_INTERRUPT_STATUS_DUT_SHIFT          0
+#define ODN_INTERRUPT_STATUS_DUT_SIGNED         0
+
+#define ODN_INTERRUPT_STATUS_PDP1_MASK          0x00000002U
+#define ODN_INTERRUPT_STATUS_PDP1_SHIFT         1
+#define ODN_INTERRUPT_STATUS_PDP1_SIGNED        0
+
+#define ODN_INTERRUPT_STATUS_PDP2_MASK          0x00000004U
+#define ODN_INTERRUPT_STATUS_PDP2_SHIFT         2
+#define ODN_INTERRUPT_STATUS_PDP2_SIGNED        0
+
+#define ODN_INTERRUPT_STATUS_PERIP_MASK         0x00000008U
+#define ODN_INTERRUPT_STATUS_PERIP_SHIFT        3
+#define ODN_INTERRUPT_STATUS_PERIP_SIGNED       0
+
+#define ODN_INTERRUPT_STATUS_UART_MASK          0x00000010U
+#define ODN_INTERRUPT_STATUS_UART_SHIFT         4
+#define ODN_INTERRUPT_STATUS_UART_SIGNED        0
+
+#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_MASK 0x00000020U
+#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_SHIFT 5
+#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_MASK 0x00000040U
+#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_SHIFT 6
+#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_MASK 0x00000080U
+#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_SHIFT 7
+#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_MASK 0x00000100U
+#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_SHIFT 8
+#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_STATUS_IRQ_TEST_MASK      0x40000000U
+#define ODN_INTERRUPT_STATUS_IRQ_TEST_SHIFT     30
+#define ODN_INTERRUPT_STATUS_IRQ_TEST_SIGNED    0
+
+#define ODN_INTERRUPT_STATUS_MASTER_STATUS_MASK 0x80000000U
+#define ODN_INTERRUPT_STATUS_MASTER_STATUS_SHIFT 31
+#define ODN_INTERRUPT_STATUS_MASTER_STATUS_SIGNED 0
+
+/*
+	Register INTERRUPT_ENABLE
+*/
+#define ODN_CORE_INTERRUPT_ENABLE               0x0104
+#define ODN_INTERRUPT_ENABLE_DUT_MASK           0x00000001U
+#define ODN_INTERRUPT_ENABLE_DUT_SHIFT          0
+#define ODN_INTERRUPT_ENABLE_DUT_SIGNED         0
+
+#define ODN_INTERRUPT_ENABLE_PDP1_MASK          0x00000002U
+#define ODN_INTERRUPT_ENABLE_PDP1_SHIFT         1
+#define ODN_INTERRUPT_ENABLE_PDP1_SIGNED        0
+
+#define ODN_INTERRUPT_ENABLE_PDP2_MASK          0x00000004U
+#define ODN_INTERRUPT_ENABLE_PDP2_SHIFT         2
+#define ODN_INTERRUPT_ENABLE_PDP2_SIGNED        0
+
+#define ODN_INTERRUPT_ENABLE_PERIP_MASK         0x00000008U
+#define ODN_INTERRUPT_ENABLE_PERIP_SHIFT        3
+#define ODN_INTERRUPT_ENABLE_PERIP_SIGNED       0
+
+#define ODN_INTERRUPT_ENABLE_UART_MASK          0x00000010U
+#define ODN_INTERRUPT_ENABLE_UART_SHIFT         4
+#define ODN_INTERRUPT_ENABLE_UART_SIGNED        0
+
+#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_MASK 0x00000020U
+#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_SHIFT 5
+#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_MASK 0x00000040U
+#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_SHIFT 6
+#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_MASK 0x00000080U
+#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_SHIFT 7
+#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_MASK 0x00000100U
+#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_SHIFT 8
+#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_ENABLE_IRQ_TEST_MASK      0x40000000U
+#define ODN_INTERRUPT_ENABLE_IRQ_TEST_SHIFT     30
+#define ODN_INTERRUPT_ENABLE_IRQ_TEST_SIGNED    0
+
+#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_MASK 0x80000000U
+#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_SHIFT 31
+#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_SIGNED 0
+
+/*
+	Register INTERRUPT_CLR
+*/
+#define ODN_CORE_INTERRUPT_CLR                  0x010C
+#define ODN_INTERRUPT_CLR_DUT_MASK              0x00000001U
+#define ODN_INTERRUPT_CLR_DUT_SHIFT             0
+#define ODN_INTERRUPT_CLR_DUT_SIGNED            0
+
+#define ODN_INTERRUPT_CLR_PDP1_MASK             0x00000002U
+#define ODN_INTERRUPT_CLR_PDP1_SHIFT            1
+#define ODN_INTERRUPT_CLR_PDP1_SIGNED           0
+
+#define ODN_INTERRUPT_CLR_PDP2_MASK             0x00000004U
+#define ODN_INTERRUPT_CLR_PDP2_SHIFT            2
+#define ODN_INTERRUPT_CLR_PDP2_SIGNED           0
+
+#define ODN_INTERRUPT_CLR_PERIP_MASK            0x00000008U
+#define ODN_INTERRUPT_CLR_PERIP_SHIFT           3
+#define ODN_INTERRUPT_CLR_PERIP_SIGNED          0
+
+#define ODN_INTERRUPT_CLR_UART_MASK             0x00000010U
+#define ODN_INTERRUPT_CLR_UART_SHIFT            4
+#define ODN_INTERRUPT_CLR_UART_SIGNED           0
+
+#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_MASK  0x00000020U
+#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_SHIFT 5
+#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_MASK   0x00000040U
+#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_SHIFT  6
+#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_MASK 0x00000080U
+#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_SHIFT 7
+#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_MASK  0x00000100U
+#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_SHIFT 8
+#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_CLR_IRQ_TEST_MASK         0x40000000U
+#define ODN_INTERRUPT_CLR_IRQ_TEST_SHIFT        30
+#define ODN_INTERRUPT_CLR_IRQ_TEST_SIGNED       0
+
+#define ODN_INTERRUPT_CLR_MASTER_CLEAR_MASK     0x80000000U
+#define ODN_INTERRUPT_CLR_MASTER_CLEAR_SHIFT    31
+#define ODN_INTERRUPT_CLR_MASTER_CLEAR_SIGNED   0
+
+/*
+	Register INTERRUPT_TEST
+*/
+#define ODN_CORE_INTERRUPT_TEST             	0x0110
+#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_MASK  0x00000001U
+#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_SHIFT 0
+#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_SIGNED 0
+
+/*
+	Register NUM_GPIO
+*/
+#define ODN_CORE_NUM_GPIO                   	0x0180
+#define ODN_NUM_GPIO_NUMBER_MASK                0x0000000FU
+#define ODN_NUM_GPIO_NUMBER_SHIFT               0
+#define ODN_NUM_GPIO_NUMBER_SIGNED              0
+
+/*
+	Register GPIO_EN
+*/
+#define ODN_CORE_GPIO_EN                    	0x0184
+#define ODN_GPIO_EN_DIRECTION_MASK              0x000000FFU
+#define ODN_GPIO_EN_DIRECTION_SHIFT             0
+#define ODN_GPIO_EN_DIRECTION_SIGNED            0
+
+/*
+	Register GPIO
+*/
+#define ODN_CORE_GPIO                           0x0188
+#define ODN_GPIO_GPIO_MASK                      0x000000FFU
+#define ODN_GPIO_GPIO_SHIFT                     0
+#define ODN_GPIO_GPIO_SIGNED                    0
+
+/*
+	Register NUM_DUT_CTRL
+*/
+#define ODN_CORE_NUM_DUT_CTRL               	0x0190
+#define ODN_NUM_DUT_CTRL_NUM_PINS_MASK          0xFFFFFFFFU
+#define ODN_NUM_DUT_CTRL_NUM_PINS_SHIFT         0
+#define ODN_NUM_DUT_CTRL_NUM_PINS_SIGNED        0
+
+/*
+	Register DUT_CTRL1
+*/
+#define ODN_CORE_DUT_CTRL1                  	0x0194
+#define ODN_DUT_CTRL1_CONTROL1_MASK             0x3FFFFFFFU
+#define ODN_DUT_CTRL1_CONTROL1_SHIFT            0
+#define ODN_DUT_CTRL1_CONTROL1_SIGNED           0
+
+#define ODN_DUT_CTRL1_FBDC_BYPASS_MASK          0x40000000U
+#define ODN_DUT_CTRL1_FBDC_BYPASS_SHIFT         30
+#define ODN_DUT_CTRL1_FBDC_BYPASS_SIGNED        0
+
+#define ODN_DUT_CTRL1_DUT_MST_OFFSET_MASK       0x80000000U
+#define ODN_DUT_CTRL1_DUT_MST_OFFSET_SHIFT      31
+#define ODN_DUT_CTRL1_DUT_MST_OFFSET_SIGNED     0
+
+/*
+	Register DUT_CTRL2
+*/
+#define ODN_CORE_DUT_CTRL2                  	0x0198
+#define ODN_DUT_CTRL2_CONTROL2_MASK             0xFFFFFFFFU
+#define ODN_DUT_CTRL2_CONTROL2_SHIFT            0
+#define ODN_DUT_CTRL2_CONTROL2_SIGNED           0
+
+/*
+	Register NUM_DUT_STAT
+*/
+#define ODN_CORE_NUM_DUT_STAT               	0x019C
+#define ODN_NUM_DUT_STAT_NUM_PINS_MASK          0xFFFFFFFFU
+#define ODN_NUM_DUT_STAT_NUM_PINS_SHIFT         0
+#define ODN_NUM_DUT_STAT_NUM_PINS_SIGNED        0
+
+/*
+	Register DUT_STAT1
+*/
+#define ODN_CORE_DUT_STAT1                  	0x01A0
+#define ODN_DUT_STAT1_STATUS1_MASK              0xFFFFFFFFU
+#define ODN_DUT_STAT1_STATUS1_SHIFT             0
+#define ODN_DUT_STAT1_STATUS1_SIGNED            0
+
+/*
+	Register DUT_STAT2
+*/
+#define ODN_CORE_DUT_STAT2                  	0x01A4
+#define ODN_DUT_STAT2_STATUS2_MASK              0xFFFFFFFFU
+#define ODN_DUT_STAT2_STATUS2_SHIFT             0
+#define ODN_DUT_STAT2_STATUS2_SIGNED            0
+
+/*
+	Register DASH_LEDS
+*/
+#define ODN_CORE_DASH_LEDS                  	0x01A8
+#define ODN_DASH_LEDS_REPA_MASK                 0xFFF00000U
+#define ODN_DASH_LEDS_REPA_SHIFT                20
+#define ODN_DASH_LEDS_REPA_SIGNED               0
+
+#define ODN_DASH_LEDS_PIKE_MASK                 0x00000FFFU
+#define ODN_DASH_LEDS_PIKE_SHIFT                0
+#define ODN_DASH_LEDS_PIKE_SIGNED               0
+
+/*
+	Register CORE_STATUS
+*/
+#define ODN_CORE_CORE_STATUS                    0x0200
+#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_MASK   0x00000001U
+#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_SHIFT  0
+#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_SIGNED 0
+
+#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_MASK 0x00000010U
+#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_SHIFT 4
+#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_SIGNED 0
+
+#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_MASK 0x00000020U
+#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_SHIFT 5
+#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_SIGNED 0
+
+#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_MASK 0x00000040U
+#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_SHIFT 6
+#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_SIGNED 0
+
+#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_MASK 0x00000080U
+#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_SHIFT 7
+#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_SIGNED 0
+
+#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_MASK 0x00000100U
+#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_SHIFT 8
+#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_SIGNED 0
+
+#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_MASK 0x00000200U
+#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_SHIFT 9
+#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_SIGNED 0
+
+#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_MASK 0x00001000U
+#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SHIFT 12
+#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SIGNED 0
+
+#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_MASK 0x00002000U
+#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SHIFT 13
+#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SIGNED 0
+
+/*
+	Register CORE_CONTROL
+*/
+#define ODN_CORE_CORE_CONTROL               	0x0204
+#define ODN_CORE_CONTROL_BAR4_OFFSET_MASK       0x0000001FU
+#define ODN_CORE_CONTROL_BAR4_OFFSET_SHIFT      0
+#define ODN_CORE_CONTROL_BAR4_OFFSET_SIGNED     0
+
+#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_MASK 0x00000300U
+#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SHIFT 8
+#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SIGNED 0
+
+#define ODN_CORE_CONTROL_HDMI_MODULE_EN_MASK    0x00001C00U
+#define ODN_CORE_CONTROL_HDMI_MODULE_EN_SHIFT   10
+#define ODN_CORE_CONTROL_HDMI_MODULE_EN_SIGNED  0
+
+#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_MASK 0x00002000U
+#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SHIFT 13
+#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SIGNED 0
+
+#define ODN_CORE_CONTROL_PDP1_OFFSET_MASK       0x00070000U
+#define ODN_CORE_CONTROL_PDP1_OFFSET_SHIFT      16
+#define ODN_CORE_CONTROL_PDP1_OFFSET_SIGNED     0
+
+#define ODN_CORE_CONTROL_PDP2_OFFSET_MASK       0x00700000U
+#define ODN_CORE_CONTROL_PDP2_OFFSET_SHIFT      20
+#define ODN_CORE_CONTROL_PDP2_OFFSET_SIGNED     0
+
+#define ODN_CORE_CONTROL_DUT_OFFSET_MASK        0x07000000U
+#define ODN_CORE_CONTROL_DUT_OFFSET_SHIFT       24
+#define ODN_CORE_CONTROL_DUT_OFFSET_SIGNED      0
+
+/*
+	Register REG_BANK_STATUS
+*/
+#define ODN_CORE_REG_BANK_STATUS            0x0208
+#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_MASK 0xFFFFFFFFU
+#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SHIFT 0
+#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SIGNED 0
+
+/*
+	Register MMCM_LOCK_STATUS
+*/
+#define ODN_CORE_MMCM_LOCK_STATUS            0x020C
+
+
+/****************************
+  Generated from: ad_tx.def
+*****************************/
+
+/*
+	Register ADT_CONTROL
+*/
+#define ODN_AD_TX_DEBUG_ADT_CONTROL             0x0000
+#define ODN_SET_ADTX_READY_MASK                 0x00000004U
+#define ODN_SET_ADTX_READY_SHIFT                2
+#define ODN_SET_ADTX_READY_SIGNED               0
+
+#define ODN_SEND_ALIGN_DATA_MASK                0x00000002U
+#define ODN_SEND_ALIGN_DATA_SHIFT               1
+#define ODN_SEND_ALIGN_DATA_SIGNED              0
+
+#define ODN_ENABLE_FLUSHING_MASK                0x00000001U
+#define ODN_ENABLE_FLUSHING_SHIFT               0
+#define ODN_ENABLE_FLUSHING_SIGNED              0
+
+/*
+	Register ADT_STATUS
+*/
+#define ODN_AD_TX_DEBUG_ADT_STATUS              0x0004
+#define ODN_REQUEST_COMPLETE_MASK               0x00000001U
+#define ODN_REQUEST_COMPLETE_SHIFT              0
+#define ODN_REQUEST_COMPLETE_SIGNED             0
+
+
+
+/******************************
+ Generated from: mca_debug.def
+*******************************/
+
+/*
+	Register MCA_CONTROL
+*/
+#define ODN_MCA_DEBUG_MCA_CONTROL               0x0000
+#define ODN_ALIGN_START_MASK                    0x00000001U
+#define ODN_ALIGN_START_SHIFT                   0
+#define ODN_ALIGN_START_SIGNED                  0
+
+/*
+	Register MCA_STATUS
+*/
+#define ODN_MCA_DEBUG_MCA_STATUS                0x0004
+#define ODN_TCHECK_SDEBUG_MASK                  0x40000000U
+#define ODN_TCHECK_SDEBUG_SHIFT                 30
+#define ODN_TCHECK_SDEBUG_SIGNED                0
+
+#define ODN_CHECK_SDEBUG_MASK                   0x20000000U
+#define ODN_CHECK_SDEBUG_SHIFT                  29
+#define ODN_CHECK_SDEBUG_SIGNED                 0
+
+#define ODN_ALIGN_SDEBUG_MASK                   0x10000000U
+#define ODN_ALIGN_SDEBUG_SHIFT                  28
+#define ODN_ALIGN_SDEBUG_SIGNED                 0
+
+#define ODN_FWAIT_SDEBUG_MASK                   0x08000000U
+#define ODN_FWAIT_SDEBUG_SHIFT                  27
+#define ODN_FWAIT_SDEBUG_SIGNED                 0
+
+#define ODN_IDLE_SDEBUG_MASK                    0x04000000U
+#define ODN_IDLE_SDEBUG_SHIFT                   26
+#define ODN_IDLE_SDEBUG_SIGNED                  0
+
+#define ODN_FIFO_FULL_MASK                      0x03FF0000U
+#define ODN_FIFO_FULL_SHIFT                     16
+#define ODN_FIFO_FULL_SIGNED                    0
+
+#define ODN_FIFO_EMPTY_MASK                     0x0000FFC0U
+#define ODN_FIFO_EMPTY_SHIFT                    6
+#define ODN_FIFO_EMPTY_SIGNED                   0
+
+#define ODN_TAG_CHECK_ERROR_MASK                0x00000020U
+#define ODN_TAG_CHECK_ERROR_SHIFT               5
+#define ODN_TAG_CHECK_ERROR_SIGNED              0
+
+#define ODN_ALIGN_CHECK_ERROR_MASK              0x00000010U
+#define ODN_ALIGN_CHECK_ERROR_SHIFT             4
+#define ODN_ALIGN_CHECK_ERROR_SIGNED            0
+
+#define ODN_ALIGN_ERROR_MASK                    0x00000008U
+#define ODN_ALIGN_ERROR_SHIFT                   3
+#define ODN_ALIGN_ERROR_SIGNED                  0
+
+#define ODN_TAG_CHECKING_OK_MASK                0x00000004U
+#define ODN_TAG_CHECKING_OK_SHIFT               2
+#define ODN_TAG_CHECKING_OK_SIGNED              0
+
+#define ODN_ALIGN_CHECK_OK_MASK                 0x00000002U
+#define ODN_ALIGN_CHECK_OK_SHIFT                1
+#define ODN_ALIGN_CHECK_OK_SIGNED               0
+
+#define ODN_ALIGNMENT_FOUND_MASK                0x00000001U
+#define ODN_ALIGNMENT_FOUND_SHIFT               0
+#define ODN_ALIGNMENT_FOUND_SIGNED              0
+
+
+/*********************************
+ Generated from: sai_rx_debug.def
+**********************************/
+
+/*
+	Register SIG_RESULT
+*/
+#define ODN_SAI_RX_DEBUG_SIG_RESULT             0x0000
+#define ODN_SIG_RESULT_VALUE_MASK               0xFFFFFFFFU
+#define ODN_SIG_RESULT_VALUE_SHIFT              0
+#define ODN_SIG_RESULT_VALUE_SIGNED             0
+
+/*
+	Register INIT_SIG
+*/
+#define ODN_SAI_RX_DEBUG_INIT_SIG               0x0004
+#define ODN_INIT_SIG_VALUE_MASK                 0x00000001U
+#define ODN_INIT_SIG_VALUE_SHIFT                0
+#define ODN_INIT_SIG_VALUE_SIGNED               0
+
+/*
+	Register SAI_BYPASS
+*/
+#define ODN_SAI_RX_DEBUG_SAI_BYPASS             0x0008
+#define ODN_BYPASS_CLK_TAPS_VALUE_MASK          0x000003FFU
+#define ODN_BYPASS_CLK_TAPS_VALUE_SHIFT         0
+#define ODN_BYPASS_CLK_TAPS_VALUE_SIGNED        0
+
+#define ODN_BYPASS_SET_MASK                     0x00010000U
+#define ODN_BYPASS_SET_SHIFT                    16
+#define ODN_BYPASS_SET_SIGNED                   0
+
+#define ODN_BYPASS_EN_MASK                      0x00100000U
+#define ODN_BYPASS_EN_SHIFT                     20
+#define ODN_BYPASS_EN_SIGNED                    0
+
+#define ODN_EN_STATUS_MASK                      0x01000000U
+#define ODN_EN_STATUS_SHIFT                     24
+#define ODN_EN_STATUS_SIGNED                    0
+
+/*
+	Register SAI_CLK_TAPS
+*/
+#define ODN_SAI_RX_DEBUG_SAI_CLK_TAPS           0x000C
+#define ODN_CLK_TAPS_VALUE_MASK                 0x000003FFU
+#define ODN_CLK_TAPS_VALUE_SHIFT                0
+#define ODN_CLK_TAPS_VALUE_SIGNED               0
+
+#define ODN_TRAINING_COMPLETE_MASK              0x00010000U
+#define ODN_TRAINING_COMPLETE_SHIFT             16
+#define ODN_TRAINING_COMPLETE_SIGNED            0
+
+/*
+	Register SAI_EYES
+*/
+#define ODN_SAI_RX_DEBUG_SAI_EYES               0x0010
+#define ODN_MIN_EYE_END_MASK                    0x0000FFFFU
+#define ODN_MIN_EYE_END_SHIFT                   0
+#define ODN_MIN_EYE_END_SIGNED                  0
+
+#define ODN_MAX_EYE_START_MASK                  0xFFFF0000U
+#define ODN_MAX_EYE_START_SHIFT                 16
+#define ODN_MAX_EYE_START_SIGNED                0
+
+/*
+	Register SAI_DDR_INVERT
+*/
+#define SAI_RX_DEBUG_SAI_DDR_INVERT             0x0014
+#define ODN_DDR_INVERT_MASK                     0x00000001U
+#define ODN_DDR_INVERT_SHIFT                    0
+#define ODN_DDR_INVERT_SIGNED                   0
+
+#define ODN_OVERIDE_VALUE_MASK                  0x00010000U
+#define ODN_OVERIDE_VALUE_SHIFT                 16
+#define ODN_OVERIDE_VALUE_SIGNED                0
+
+#define ODN_INVERT_OVERIDE_MASK                 0x00100000U
+#define ODN_INVERT_OVERIDE_SHIFT                20
+#define ODN_INVERT_OVERIDE_SIGNED               0
+
+/*
+	Register SAI_TRAIN_ACK
+*/
+#define ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK          0x0018
+#define ODN_TRAIN_ACK_FAIL_MASK                 0x00000001U
+#define ODN_TRAIN_ACK_FAIL_SHIFT                0
+#define ODN_TRAIN_ACK_FAIL_SIGNED               0
+
+#define ODN_TRAIN_ACK_FAIL_COUNT_MASK           0x000000F0U
+#define ODN_TRAIN_ACK_FAIL_COUNT_SHIFT          4
+#define ODN_TRAIN_ACK_FAIL_COUNT_SIGNED         0
+
+#define ODN_TRAIN_ACK_COMPLETE_MASK             0x00000100U
+#define ODN_TRAIN_ACK_COMPLETE_SHIFT            8
+#define ODN_TRAIN_ACK_COMPLETE_SIGNED           0
+
+#define ODN_TRAIN_ACK_OVERIDE_MASK              0x00001000U
+#define ODN_TRAIN_ACK_OVERIDE_SHIFT             12
+#define ODN_TRAIN_ACK_OVERIDE_SIGNED            0
+
+/*
+	Register SAI_TRAIN_ACK_COUNT
+*/
+#define ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK_COUNT    0x001C
+#define ODN_TRAIN_COUNT_MASK                    0xFFFFFFFFU
+#define ODN_TRAIN_COUNT_SHIFT                   0
+#define ODN_TRAIN_COUNT_SIGNED                  0
+
+/*
+	Register SAI_CHANNEL_NUMBER
+*/
+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_NUMBER     0x0020
+#define ODN_CHANNEL_NUMBER_MASK                 0x0000FFFFU
+#define ODN_CHANNEL_NUMBER_SHIFT                0
+#define ODN_CHANNEL_NUMBER_SIGNED               0
+
+/*
+	Register SAI_CHANNEL_EYE_START
+*/
+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_START  0x0024
+#define ODN_CHANNEL_EYE_START_MASK              0xFFFFFFFFU
+#define ODN_CHANNEL_EYE_START_SHIFT             0
+#define ODN_CHANNEL_EYE_START_SIGNED            0
+
+/*
+	Register SAI_CHANNEL_EYE_END
+*/
+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_END    0x0028
+#define ODN_CHANNEL_EYE_END_MASK                0xFFFFFFFFU
+#define ODN_CHANNEL_EYE_END_SHIFT               0
+#define ODN_CHANNEL_EYE_END_SIGNED              0
+
+/*
+	Register SAI_CHANNEL_EYE_PATTERN
+*/
+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_PATTERN 0x002C
+#define ODN_CHANNEL_EYE_PATTERN_MASK            0xFFFFFFFFU
+#define ODN_CHANNEL_EYE_PATTERN_SHIFT           0
+#define ODN_CHANNEL_EYE_PATTERN_SIGNED          0
+
+/*
+	Register SAI_CHANNEL_EYE_DEBUG
+*/
+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_DEBUG  0x0030
+#define ODN_CHANNEL_EYE_SENSE_MASK              0x00000001U
+#define ODN_CHANNEL_EYE_SENSE_SHIFT             0
+#define ODN_CHANNEL_EYE_SENSE_SIGNED            0
+
+#define ODN_CHANNEL_EYE_COMPLETE_MASK           0x00000002U
+#define ODN_CHANNEL_EYE_COMPLETE_SHIFT          1
+#define ODN_CHANNEL_EYE_COMPLETE_SIGNED         0
+
+
+/*********************************
+ Generated from: sai_tx_debug.def
+**********************************/
+
+/*
+	Register SIG_RESULT
+*/
+#define ODN_SAI_TX_DEBUG_SIG_RESULT             0x0000
+#define ODN_TX_SIG_RESULT_VALUE_MASK            0xFFFFFFFFU
+#define ODN_TX_SIG_RESULT_VALUE_SHIFT           0
+#define ODN_TX_SIG_RESULT_VALUE_SIGNED          0
+
+/*
+	Register INIT_SIG
+*/
+#define ODN_SAI_TX_DEBUG_INIT_SIG               0x0004
+#define ODN_TX_INIT_SIG_VALUE_MASK              0x00000001U
+#define ODN_TX_INIT_SIG_VALUE_SHIFT             0
+#define ODN_TX_INIT_SIG_VALUE_SIGNED            0
+
+/*
+	Register SAI_BYPASS
+*/
+#define ODN_SAI_TX_DEBUG_SAI_BYPASS             0x0008
+#define ODN_TX_BYPASS_EN_MASK                   0x00000001U
+#define ODN_TX_BYPASS_EN_SHIFT                  0
+#define ODN_TX_BYPASS_EN_SIGNED                 0
+
+#define ODN_TX_ACK_RESEND_MASK                  0x00000002U
+#define ODN_TX_ACK_RESEND_SHIFT                 1
+#define ODN_TX_ACK_RESEND_SIGNED                0
+
+#define ODN_TX_DISABLE_ACK_SEND_MASK            0x00000004U
+#define ODN_TX_DISABLE_ACK_SEND_SHIFT           2
+#define ODN_TX_DISABLE_ACK_SEND_SIGNED          0
+
+/*
+	Register SAI_STATUS
+*/
+#define ODN_SAI_TX_DEBUG_SAI_STATUS             0x000C
+#define ODN_TX_TRAINING_COMPLETE_MASK           0x00000001U
+#define ODN_TX_TRAINING_COMPLETE_SHIFT          0
+#define ODN_TX_TRAINING_COMPLETE_SIGNED         0
+
+#define ODN_TX_TRAINING_ACK_COMPLETE_MASK       0x00000002U
+#define ODN_TX_TRAINING_ACK_COMPLETE_SHIFT      1
+#define ODN_TX_TRAINING_ACK_COMPLETE_SIGNED     0
+
+
+
+#endif /* _ODIN_REGS_H_ */
+
+/*****************************************************************************
+ End of file (odn_regs.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/pdp_regs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/pdp_regs.h
new file mode 100644
index 0000000..bd26b06
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/pdp_regs.h
@@ -0,0 +1,75 @@
+/*************************************************************************/ /*!
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PDP_REGS_H__)
+#define __PDP_REGS_H__
+
+/*************************************************************************/ /*!
+ PCI Device Information
+*/ /**************************************************************************/
+
+#define DCPDP_VENDOR_ID_POWERVR			(0x1010)
+
+#define DCPDP_DEVICE_ID_PCI_APOLLO_FPGA		(0x1CF1)
+#define DCPDP_DEVICE_ID_PCIE_APOLLO_FPGA	(0x1CF2)
+
+/*************************************************************************/ /*!
+ PCI Device Base Address Information
+*/ /**************************************************************************/
+
+/* PLL and PDP registers on base address register 0 */
+#define DCPDP_REG_PCI_BASENUM			(0)
+
+#define DCPDP_PCI_PLL_REG_OFFSET		(0x1000)
+#define DCPDP_PCI_PLL_REG_SIZE			(0x0400)
+
+#define DCPDP_PCI_PDP_REG_OFFSET		(0xC000)
+#define DCPDP_PCI_PDP_REG_SIZE			(0x2000)
+
+/*************************************************************************/ /*!
+ Misc register information
+*/ /**************************************************************************/
+
+/* This information isn't captured in tcf_rgbpdp_regs.h so define it here */
+#define DCPDP_STR1SURF_FORMAT_ARGB8888		(0xE)
+#define DCPDP_STR1ADDRCTRL_BASE_ADDR_SHIFT	(4)
+#define DCPDP_STR1POSN_STRIDE_SHIFT		(4)
+
+#endif /* !defined(__PDP_REGS_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/tcf_clk_ctrl.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/tcf_clk_ctrl.h
new file mode 100644
index 0000000..a42d23e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/tcf_clk_ctrl.h
@@ -0,0 +1,808 @@
+/*************************************************************************/ /*!
+@Title          Test Chip Framework system control register definitions	
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Autogenerated C -- do not edit
+                Generated from: tcf_clk_ctrl.def
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_TCF_CLK_CTRL_H_)
+#define _TCF_CLK_CTRL_H_
+
+/*
+	Register FPGA_ID_REG
+*/
+#define TCF_CLK_CTRL_FPGA_ID_REG            0x0000
+#define FPGA_ID_REG_CORE_CFG_MASK           0x0000FFFFU
+#define FPGA_ID_REG_CORE_CFG_SHIFT          0
+#define FPGA_ID_REG_CORE_CFG_SIGNED         0
+
+#define FPGA_ID_REG_CORE_ID_MASK            0xFFFF0000U
+#define FPGA_ID_REG_CORE_ID_SHIFT           16
+#define FPGA_ID_REG_CORE_ID_SIGNED          0
+
+/*
+	Register FPGA_REV_REG
+*/
+#define TCF_CLK_CTRL_FPGA_REV_REG           0x0008
+#define FPGA_REV_REG_MAINT_MASK             0x000000FFU
+#define FPGA_REV_REG_MAINT_SHIFT            0
+#define FPGA_REV_REG_MAINT_SIGNED           0
+
+#define FPGA_REV_REG_MINOR_MASK             0x0000FF00U
+#define FPGA_REV_REG_MINOR_SHIFT            8
+#define FPGA_REV_REG_MINOR_SIGNED           0
+
+#define FPGA_REV_REG_MAJOR_MASK             0x00FF0000U
+#define FPGA_REV_REG_MAJOR_SHIFT            16
+#define FPGA_REV_REG_MAJOR_SIGNED           0
+
+#define FPGA_REV_REG_DESIGNER_MASK          0xFF000000U
+#define FPGA_REV_REG_DESIGNER_SHIFT         24
+#define FPGA_REV_REG_DESIGNER_SIGNED        0
+
+/*
+	Register FPGA_DES_REV_1
+*/
+#define TCF_CLK_CTRL_FPGA_DES_REV_1         0x0010
+#define FPGA_DES_REV_1_MASK                 0xFFFFFFFFU
+#define FPGA_DES_REV_1_SHIFT                0
+#define FPGA_DES_REV_1_SIGNED               0
+
+/*
+	Register FPGA_DES_REV_2
+*/
+#define TCF_CLK_CTRL_FPGA_DES_REV_2         0x0018
+#define FPGA_DES_REV_2_MASK                 0xFFFFFFFFU
+#define FPGA_DES_REV_2_SHIFT                0
+#define FPGA_DES_REV_2_SIGNED               0
+
+/*
+	Register TCF_CORE_ID_REG
+*/
+#define TCF_CLK_CTRL_TCF_CORE_ID_REG        0x0020
+#define TCF_CORE_ID_REG_CORE_CFG_MASK       0x0000FFFFU
+#define TCF_CORE_ID_REG_CORE_CFG_SHIFT      0
+#define TCF_CORE_ID_REG_CORE_CFG_SIGNED     0
+
+#define TCF_CORE_ID_REG_CORE_ID_MASK        0xFFFF0000U
+#define TCF_CORE_ID_REG_CORE_ID_SHIFT       16
+#define TCF_CORE_ID_REG_CORE_ID_SIGNED      0
+
+/*
+	Register TCF_CORE_REV_REG
+*/
+#define TCF_CLK_CTRL_TCF_CORE_REV_REG       0x0028
+#define TCF_CORE_REV_REG_MAINT_MASK         0x000000FFU
+#define TCF_CORE_REV_REG_MAINT_SHIFT        0
+#define TCF_CORE_REV_REG_MAINT_SIGNED       0
+
+#define TCF_CORE_REV_REG_MINOR_MASK         0x0000FF00U
+#define TCF_CORE_REV_REG_MINOR_SHIFT        8
+#define TCF_CORE_REV_REG_MINOR_SIGNED       0
+
+#define TCF_CORE_REV_REG_MAJOR_MASK         0x00FF0000U
+#define TCF_CORE_REV_REG_MAJOR_SHIFT        16
+#define TCF_CORE_REV_REG_MAJOR_SIGNED       0
+
+#define TCF_CORE_REV_REG_DESIGNER_MASK      0xFF000000U
+#define TCF_CORE_REV_REG_DESIGNER_SHIFT     24
+#define TCF_CORE_REV_REG_DESIGNER_SIGNED    0
+
+/*
+	Register TCF_CORE_DES_REV_1
+*/
+#define TCF_CLK_CTRL_TCF_CORE_DES_REV_1     0x0030
+#define TCF_CORE_DES_REV_1_MASK             0xFFFFFFFFU
+#define TCF_CORE_DES_REV_1_SHIFT            0
+#define TCF_CORE_DES_REV_1_SIGNED           0
+
+/*
+	Register TCF_CORE_DES_REV_2
+*/
+#define TCF_CLK_CTRL_TCF_CORE_DES_REV_2     0x0038
+#define TCF_CORE_DES_REV_2_MASK             0xFFFFFFFFU
+#define TCF_CORE_DES_REV_2_SHIFT            0
+#define TCF_CORE_DES_REV_2_SIGNED           0
+
+/*
+	Register SCB_GENERAL_CONTROL
+*/
+#define TCF_CLK_CTRL_SCB_GENERAL_CONTROL    0x0040
+#define SCB_GC_TRANS_HALT_MASK              0x00000200U
+#define SCB_GC_TRANS_HALT_SHIFT             9
+#define SCB_GC_TRANS_HALT_SIGNED            0
+
+#define SCB_GC_CKD_REGS_MASK                0x00000100U
+#define SCB_GC_CKD_REGS_SHIFT               8
+#define SCB_GC_CKD_REGS_SIGNED              0
+
+#define SCB_GC_CKD_SLAVE_MASK               0x00000080U
+#define SCB_GC_CKD_SLAVE_SHIFT              7
+#define SCB_GC_CKD_SLAVE_SIGNED             0
+
+#define SCB_GC_CKD_MASTER_MASK              0x00000040U
+#define SCB_GC_CKD_MASTER_SHIFT             6
+#define SCB_GC_CKD_MASTER_SIGNED            0
+
+#define SCB_GC_CKD_XDATA_MASK               0x00000020U
+#define SCB_GC_CKD_XDATA_SHIFT              5
+#define SCB_GC_CKD_XDATA_SIGNED             0
+
+#define SCB_GC_SFR_REG_MASK                 0x00000010U
+#define SCB_GC_SFR_REG_SHIFT                4
+#define SCB_GC_SFR_REG_SIGNED               0
+
+#define SCB_GC_SFR_SLAVE_MASK               0x00000008U
+#define SCB_GC_SFR_SLAVE_SHIFT              3
+#define SCB_GC_SFR_SLAVE_SIGNED             0
+
+#define SCB_GC_SFR_MASTER_MASK              0x00000004U
+#define SCB_GC_SFR_MASTER_SHIFT             2
+#define SCB_GC_SFR_MASTER_SIGNED            0
+
+#define SCB_GC_SFR_DET_DATA_MASK            0x00000002U
+#define SCB_GC_SFR_DET_DATA_SHIFT           1
+#define SCB_GC_SFR_DET_DATA_SIGNED          0
+
+#define SCB_GC_SFR_GEN_DATA_MASK            0x00000001U
+#define SCB_GC_SFR_GEN_DATA_SHIFT           0
+#define SCB_GC_SFR_GEN_DATA_SIGNED          0
+
+/*
+	Register SCB_MASTER_READ_COUNT
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_READ_COUNT  0x0048
+#define MASTER_READ_COUNT_MASK              0x0000FFFFU
+#define MASTER_READ_COUNT_SHIFT             0
+#define MASTER_READ_COUNT_SIGNED            0
+
+/*
+	Register SCB_MASTER_READ_DATA
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_READ_DATA   0x0050
+#define MASTER_READ_DATA_MASK               0x000000FFU
+#define MASTER_READ_DATA_SHIFT              0
+#define MASTER_READ_DATA_SIGNED             0
+
+/*
+	Register SCB_MASTER_ADDRESS
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_ADDRESS     0x0058
+#define SCB_MASTER_ADDRESS_MASK             0x000003FFU
+#define SCB_MASTER_ADDRESS_SHIFT            0
+#define SCB_MASTER_ADDRESS_SIGNED           0
+
+/*
+	Register SCB_MASTER_WRITE_DATA
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_WRITE_DATA  0x0060
+#define MASTER_WRITE_DATA_MASK              0x000000FFU
+#define MASTER_WRITE_DATA_SHIFT             0
+#define MASTER_WRITE_DATA_SIGNED            0
+
+/*
+	Register SCB_MASTER_WRITE_COUNT
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_WRITE_COUNT 0x0068
+#define MASTER_WRITE_COUNT_MASK             0x0000FFFFU
+#define MASTER_WRITE_COUNT_SHIFT            0
+#define MASTER_WRITE_COUNT_SIGNED           0
+
+/*
+	Register SCB_BUS_SELECT
+*/
+#define TCF_CLK_CTRL_SCB_BUS_SELECT         0x0070
+#define BUS_SELECT_MASK                     0x00000003U
+#define BUS_SELECT_SHIFT                    0
+#define BUS_SELECT_SIGNED                   0
+
+/*
+	Register SCB_MASTER_FILL_STATUS
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_FILL_STATUS 0x0078
+#define MASTER_WRITE_FIFO_EMPTY_MASK        0x00000008U
+#define MASTER_WRITE_FIFO_EMPTY_SHIFT       3
+#define MASTER_WRITE_FIFO_EMPTY_SIGNED      0
+
+#define MASTER_WRITE_FIFO_FULL_MASK         0x00000004U
+#define MASTER_WRITE_FIFO_FULL_SHIFT        2
+#define MASTER_WRITE_FIFO_FULL_SIGNED       0
+
+#define MASTER_READ_FIFO_EMPTY_MASK         0x00000002U
+#define MASTER_READ_FIFO_EMPTY_SHIFT        1
+#define MASTER_READ_FIFO_EMPTY_SIGNED       0
+
+#define MASTER_READ_FIFO_FULL_MASK          0x00000001U
+#define MASTER_READ_FIFO_FULL_SHIFT         0
+#define MASTER_READ_FIFO_FULL_SIGNED        0
+
+/*
+	Register CLK_AND_RST_CTRL
+*/
+#define TCF_CLK_CTRL_CLK_AND_RST_CTRL       0x0080
+#define GLB_CLKG_EN_MASK                    0x00020000U
+#define GLB_CLKG_EN_SHIFT                   17
+#define GLB_CLKG_EN_SIGNED                  0
+
+#define CLK_GATE_CNTL_MASK                  0x00010000U
+#define CLK_GATE_CNTL_SHIFT                 16
+#define CLK_GATE_CNTL_SIGNED                0
+
+#define DUT_DCM_RESETN_MASK                 0x00000400U
+#define DUT_DCM_RESETN_SHIFT                10
+#define DUT_DCM_RESETN_SIGNED               0
+
+#define MEM_RESYNC_BYPASS_MASK              0x00000200U
+#define MEM_RESYNC_BYPASS_SHIFT             9
+#define MEM_RESYNC_BYPASS_SIGNED            0
+
+#define SYS_RESYNC_BYPASS_MASK              0x00000100U
+#define SYS_RESYNC_BYPASS_SHIFT             8
+#define SYS_RESYNC_BYPASS_SIGNED            0
+
+#define SCB_RESETN_MASK                     0x00000010U
+#define SCB_RESETN_SHIFT                    4
+#define SCB_RESETN_SIGNED                   0
+
+#define PDP2_RESETN_MASK                    0x00000008U
+#define PDP2_RESETN_SHIFT                   3
+#define PDP2_RESETN_SIGNED                  0
+
+#define PDP1_RESETN_MASK                    0x00000004U
+#define PDP1_RESETN_SHIFT                   2
+#define PDP1_RESETN_SIGNED                  0
+
+#define DDR_RESETN_MASK                     0x00000002U
+#define DDR_RESETN_SHIFT                    1
+#define DDR_RESETN_SIGNED                   0
+
+#define DUT_RESETN_MASK                     0x00000001U
+#define DUT_RESETN_SHIFT                    0
+#define DUT_RESETN_SIGNED                   0
+
+/*
+	Register TEST_REG_OUT
+*/
+#define TCF_CLK_CTRL_TEST_REG_OUT           0x0088
+#define TEST_REG_OUT_MASK                   0xFFFFFFFFU
+#define TEST_REG_OUT_SHIFT                  0
+#define TEST_REG_OUT_SIGNED                 0
+
+/*
+	Register TEST_REG_IN
+*/
+#define TCF_CLK_CTRL_TEST_REG_IN            0x0090
+#define TEST_REG_IN_MASK                    0xFFFFFFFFU
+#define TEST_REG_IN_SHIFT                   0
+#define TEST_REG_IN_SIGNED                  0
+
+/*
+	Register TEST_CTRL
+*/
+#define TCF_CLK_CTRL_TEST_CTRL              0x0098
+#define PCI_TEST_OFFSET_MASK                0xF8000000U
+#define PCI_TEST_OFFSET_SHIFT               27
+#define PCI_TEST_OFFSET_SIGNED              0
+
+#define HOST_PHY_MODE_MASK                  0x00000100U
+#define HOST_PHY_MODE_SHIFT                 8
+#define HOST_PHY_MODE_SIGNED                0
+
+#define HOST_ONLY_MODE_MASK                 0x00000080U
+#define HOST_ONLY_MODE_SHIFT                7
+#define HOST_ONLY_MODE_SIGNED               0
+
+#define PCI_TEST_MODE_MASK                  0x00000040U
+#define PCI_TEST_MODE_SHIFT                 6
+#define PCI_TEST_MODE_SIGNED                0
+
+#define TURN_OFF_DDR_MASK                   0x00000020U
+#define TURN_OFF_DDR_SHIFT                  5
+#define TURN_OFF_DDR_SIGNED                 0
+
+#define SYS_RD_CLK_INV_MASK                 0x00000010U
+#define SYS_RD_CLK_INV_SHIFT                4
+#define SYS_RD_CLK_INV_SIGNED               0
+
+#define MEM_REQ_CLK_INV_MASK                0x00000008U
+#define MEM_REQ_CLK_INV_SHIFT               3
+#define MEM_REQ_CLK_INV_SIGNED              0
+
+#define BURST_SPLIT_MASK                    0x00000004U
+#define BURST_SPLIT_SHIFT                   2
+#define BURST_SPLIT_SIGNED                  0
+
+#define CLK_INVERSION_MASK                  0x00000002U
+#define CLK_INVERSION_SHIFT                 1
+#define CLK_INVERSION_SIGNED                0
+
+#define ADDRESS_FORCE_MASK                  0x00000001U
+#define ADDRESS_FORCE_SHIFT                 0
+#define ADDRESS_FORCE_SIGNED                0
+
+/*
+	Register CLEAR_HOST_MEM_SIG
+*/
+#define TCF_CLK_CTRL_CLEAR_HOST_MEM_SIG     0x00A0
+#define SIGNATURE_TAG_ID_MASK               0x00000F00U
+#define SIGNATURE_TAG_ID_SHIFT              8
+#define SIGNATURE_TAG_ID_SIGNED             0
+
+#define CLEAR_HOST_MEM_SIGNATURE_MASK       0x00000001U
+#define CLEAR_HOST_MEM_SIGNATURE_SHIFT      0
+#define CLEAR_HOST_MEM_SIGNATURE_SIGNED     0
+
+/*
+	Register HOST_MEM_SIGNATURE
+*/
+#define TCF_CLK_CTRL_HOST_MEM_SIGNATURE     0x00A8
+#define HOST_MEM_SIGNATURE_MASK             0xFFFFFFFFU
+#define HOST_MEM_SIGNATURE_SHIFT            0
+#define HOST_MEM_SIGNATURE_SIGNED           0
+
+/*
+	Register INTERRUPT_STATUS
+*/
+#define TCF_CLK_CTRL_INTERRUPT_STATUS       0x00C8
+#define INTERRUPT_MASTER_STATUS_MASK        0x80000000U
+#define INTERRUPT_MASTER_STATUS_SHIFT       31
+#define INTERRUPT_MASTER_STATUS_SIGNED      0
+
+#define OTHER_INTS_MASK                     0x7FFE0000U
+#define OTHER_INTS_SHIFT                    17
+#define OTHER_INTS_SIGNED                   0
+
+#define HOST_MST_NORESPONSE_MASK            0x00010000U
+#define HOST_MST_NORESPONSE_SHIFT           16
+#define HOST_MST_NORESPONSE_SIGNED          0
+
+#define PDP2_INT_MASK                       0x00008000U
+#define PDP2_INT_SHIFT                      15
+#define PDP2_INT_SIGNED                     0
+
+#define PDP1_INT_MASK                       0x00004000U
+#define PDP1_INT_SHIFT                      14
+#define PDP1_INT_SIGNED                     0
+
+#define EXT_INT_MASK                        0x00002000U
+#define EXT_INT_SHIFT                       13
+#define EXT_INT_SIGNED                      0
+
+#define SCB_MST_HLT_BIT_MASK                0x00001000U
+#define SCB_MST_HLT_BIT_SHIFT               12
+#define SCB_MST_HLT_BIT_SIGNED              0
+
+#define SCB_SLV_EVENT_MASK                  0x00000800U
+#define SCB_SLV_EVENT_SHIFT                 11
+#define SCB_SLV_EVENT_SIGNED                0
+
+#define SCB_TDONE_RX_MASK                   0x00000400U
+#define SCB_TDONE_RX_SHIFT                  10
+#define SCB_TDONE_RX_SIGNED                 0
+
+#define SCB_SLV_WT_RD_DAT_MASK              0x00000200U
+#define SCB_SLV_WT_RD_DAT_SHIFT             9
+#define SCB_SLV_WT_RD_DAT_SIGNED            0
+
+#define SCB_SLV_WT_PRV_RD_MASK              0x00000100U
+#define SCB_SLV_WT_PRV_RD_SHIFT             8
+#define SCB_SLV_WT_PRV_RD_SIGNED            0
+
+#define SCB_SLV_WT_WR_DAT_MASK              0x00000080U
+#define SCB_SLV_WT_WR_DAT_SHIFT             7
+#define SCB_SLV_WT_WR_DAT_SIGNED            0
+
+#define SCB_MST_WT_RD_DAT_MASK              0x00000040U
+#define SCB_MST_WT_RD_DAT_SHIFT             6
+#define SCB_MST_WT_RD_DAT_SIGNED            0
+
+#define SCB_ADD_ACK_ERR_MASK                0x00000020U
+#define SCB_ADD_ACK_ERR_SHIFT               5
+#define SCB_ADD_ACK_ERR_SIGNED              0
+
+#define SCB_WR_ACK_ERR_MASK                 0x00000010U
+#define SCB_WR_ACK_ERR_SHIFT                4
+#define SCB_WR_ACK_ERR_SIGNED               0
+
+#define SCB_SDAT_LO_TIM_MASK                0x00000008U
+#define SCB_SDAT_LO_TIM_SHIFT               3
+#define SCB_SDAT_LO_TIM_SIGNED              0
+
+#define SCB_SCLK_LO_TIM_MASK                0x00000004U
+#define SCB_SCLK_LO_TIM_SHIFT               2
+#define SCB_SCLK_LO_TIM_SIGNED              0
+
+#define SCB_UNEX_START_BIT_MASK             0x00000002U
+#define SCB_UNEX_START_BIT_SHIFT            1
+#define SCB_UNEX_START_BIT_SIGNED           0
+
+#define SCB_BUS_INACTIVE_MASK               0x00000001U
+#define SCB_BUS_INACTIVE_SHIFT              0
+#define SCB_BUS_INACTIVE_SIGNED             0
+
+/*
+	Register INTERRUPT_OP_CFG
+*/
+#define TCF_CLK_CTRL_INTERRUPT_OP_CFG       0x00D0
+#define PULSE_NLEVEL_MASK                   0x80000000U
+#define PULSE_NLEVEL_SHIFT                  31
+#define PULSE_NLEVEL_SIGNED                 0
+
+#define INT_SENSE_MASK                      0x40000000U
+#define INT_SENSE_SHIFT                     30
+#define INT_SENSE_SIGNED                    0
+
+#define INTERRUPT_DEST_MASK                 0x0000000FU
+#define INTERRUPT_DEST_SHIFT                0
+#define INTERRUPT_DEST_SIGNED               0
+
+/*
+	Register INTERRUPT_ENABLE
+*/
+#define TCF_CLK_CTRL_INTERRUPT_ENABLE       0x00D8
+#define INTERRUPT_MASTER_ENABLE_MASK        0x80000000U
+#define INTERRUPT_MASTER_ENABLE_SHIFT       31
+#define INTERRUPT_MASTER_ENABLE_SIGNED      0
+
+#define INTERRUPT_ENABLE_MASK               0x7FFFFFFFU
+#define INTERRUPT_ENABLE_SHIFT              0
+#define INTERRUPT_ENABLE_SIGNED             0
+
+/*
+	Register INTERRUPT_CLEAR
+*/
+#define TCF_CLK_CTRL_INTERRUPT_CLEAR        0x00E0
+#define INTERRUPT_MASTER_CLEAR_MASK         0x80000000U
+#define INTERRUPT_MASTER_CLEAR_SHIFT        31
+#define INTERRUPT_MASTER_CLEAR_SIGNED       0
+
+#define INTERRUPT_CLEAR_MASK                0x7FFFFFFFU
+#define INTERRUPT_CLEAR_SHIFT               0
+#define INTERRUPT_CLEAR_SIGNED              0
+
+/*
+	Register YCC_RGB_CTRL
+*/
+#define TCF_CLK_CTRL_YCC_RGB_CTRL           0x00E8
+#define RGB_CTRL1_MASK                      0x000001FFU
+#define RGB_CTRL1_SHIFT                     0
+#define RGB_CTRL1_SIGNED                    0
+
+#define RGB_CTRL2_MASK                      0x01FF0000U
+#define RGB_CTRL2_SHIFT                     16
+#define RGB_CTRL2_SIGNED                    0
+
+/*
+	Register EXP_BRD_CTRL
+*/
+#define TCF_CLK_CTRL_EXP_BRD_CTRL           0x00F8
+#define PDP1_DATA_EN_MASK                   0x00000003U
+#define PDP1_DATA_EN_SHIFT                  0
+#define PDP1_DATA_EN_SIGNED                 0
+
+#define PDP2_DATA_EN_MASK                   0x00000030U
+#define PDP2_DATA_EN_SHIFT                  4
+#define PDP2_DATA_EN_SIGNED                 0
+
+#define EXP_BRD_OUTPUT_MASK                 0xFFFFFF00U
+#define EXP_BRD_OUTPUT_SHIFT                8
+#define EXP_BRD_OUTPUT_SIGNED               0
+
+/*
+	Register HOSTIF_CONTROL
+*/
+#define TCF_CLK_CTRL_HOSTIF_CONTROL         0x0100
+#define HOSTIF_CTRL_MASK                    0x000000FFU
+#define HOSTIF_CTRL_SHIFT                   0
+#define HOSTIF_CTRL_SIGNED                  0
+
+/*
+	Register DUT_CONTROL_1
+*/
+#define TCF_CLK_CTRL_DUT_CONTROL_1          0x0108
+#define DUT_CTRL_1_MASK                     0xFFFFFFFFU
+#define DUT_CTRL_1_SHIFT                    0
+#define DUT_CTRL_1_SIGNED                   0
+
+/* TC ES2 additional needs those: */
+#define DUT_CTRL_TEST_MODE_SHIFT            0
+#define DUT_CTRL_TEST_MODE_MASK             0x3
+
+#define DUT_CTRL_VCC_0V9EN                  (1<<12)
+#define DUT_CTRL_VCC_1V8EN                  (1<<13)
+#define DUT_CTRL_VCC_IO_INH                 (1<<14)
+#define DUT_CTRL_VCC_CORE_INH               (1<<15)
+
+/*
+	Register DUT_STATUS_1
+*/
+#define TCF_CLK_CTRL_DUT_STATUS_1           0x0110
+#define DUT_STATUS_1_MASK                   0xFFFFFFFFU
+#define DUT_STATUS_1_SHIFT                  0
+#define DUT_STATUS_1_SIGNED                 0
+
+/*
+	Register DUT_CTRL_NOT_STAT_1
+*/
+#define TCF_CLK_CTRL_DUT_CTRL_NOT_STAT_1    0x0118
+#define DUT_STAT_NOT_CTRL_1_MASK            0xFFFFFFFFU
+#define DUT_STAT_NOT_CTRL_1_SHIFT           0
+#define DUT_STAT_NOT_CTRL_1_SIGNED          0
+
+/*
+	Register DUT_CONTROL_2
+*/
+#define TCF_CLK_CTRL_DUT_CONTROL_2          0x0120
+#define DUT_CTRL_2_MASK                     0xFFFFFFFFU
+#define DUT_CTRL_2_SHIFT                    0
+#define DUT_CTRL_2_SIGNED                   0
+
+/*
+	Register DUT_STATUS_2
+*/
+#define TCF_CLK_CTRL_DUT_STATUS_2           0x0128
+#define DUT_STATUS_2_MASK                   0xFFFFFFFFU
+#define DUT_STATUS_2_SHIFT                  0
+#define DUT_STATUS_2_SIGNED                 0
+
+/*
+	Register DUT_CTRL_NOT_STAT_2
+*/
+#define TCF_CLK_CTRL_DUT_CTRL_NOT_STAT_2    0x0130
+#define DUT_CTRL_NOT_STAT_2_MASK            0xFFFFFFFFU
+#define DUT_CTRL_NOT_STAT_2_SHIFT           0
+#define DUT_CTRL_NOT_STAT_2_SIGNED          0
+
+/*
+	Register BUS_CAP_BASE_ADDR
+*/
+#define TCF_CLK_CTRL_BUS_CAP_BASE_ADDR      0x0138
+#define BUS_CAP_BASE_ADDR_MASK              0xFFFFFFFFU
+#define BUS_CAP_BASE_ADDR_SHIFT             0
+#define BUS_CAP_BASE_ADDR_SIGNED            0
+
+/*
+	Register BUS_CAP_ENABLE
+*/
+#define TCF_CLK_CTRL_BUS_CAP_ENABLE         0x0140
+#define BUS_CAP_ENABLE_MASK                 0x00000001U
+#define BUS_CAP_ENABLE_SHIFT                0
+#define BUS_CAP_ENABLE_SIGNED               0
+
+/*
+	Register BUS_CAP_COUNT
+*/
+#define TCF_CLK_CTRL_BUS_CAP_COUNT          0x0148
+#define BUS_CAP_COUNT_MASK                  0xFFFFFFFFU
+#define BUS_CAP_COUNT_SHIFT                 0
+#define BUS_CAP_COUNT_SIGNED                0
+
+/*
+	Register DCM_LOCK_STATUS
+*/
+#define TCF_CLK_CTRL_DCM_LOCK_STATUS        0x0150
+#define DCM_LOCK_STATUS_MASK                0x00000007U
+#define DCM_LOCK_STATUS_SHIFT               0
+#define DCM_LOCK_STATUS_SIGNED              0
+
+/*
+	Register AUX_DUT_RESETNS
+*/
+#define TCF_CLK_CTRL_AUX_DUT_RESETNS        0x0158
+#define AUX_DUT_RESETNS_MASK                0x0000000FU
+#define AUX_DUT_RESETNS_SHIFT               0
+#define AUX_DUT_RESETNS_SIGNED              0
+
+/*
+	Register TCF_SPI_MST_ADDR_RDNWR
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR 0x0160
+#define TCF_SPI_MST_ADDR_MASK               0x00000FFFU
+#define TCF_SPI_MST_ADDR_SHIFT              0
+#define TCF_SPI_MST_ADDR_SIGNED             0
+
+#define TCF_SPI_MST_RDNWR_MASK              0x00001000U
+#define TCF_SPI_MST_RDNWR_SHIFT             12
+#define TCF_SPI_MST_RDNWR_SIGNED            0
+
+#define TCF_SPI_MST_SLAVE_ID_MASK           0x00010000U
+#define TCF_SPI_MST_SLAVE_ID_SHIFT          16
+#define TCF_SPI_MST_SLAVE_ID_SIGNED         0
+
+/*
+	Register TCF_SPI_MST_WDATA
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_WDATA      0x0168
+#define TCF_SPI_MST_WDATA_MASK              0xFFFFFFFFU
+#define TCF_SPI_MST_WDATA_SHIFT             0
+#define TCF_SPI_MST_WDATA_SIGNED            0
+
+/*
+	Register TCF_SPI_MST_RDATA
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_RDATA      0x0170
+#define TCF_SPI_MST_RDATA_MASK              0xFFFFFFFFU
+#define TCF_SPI_MST_RDATA_SHIFT             0
+#define TCF_SPI_MST_RDATA_SIGNED            0
+
+/*
+	Register TCF_SPI_MST_STATUS
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_STATUS     0x0178
+#define TCF_SPI_MST_STATUS_MASK             0x0000000FU
+#define TCF_SPI_MST_STATUS_SHIFT            0
+#define TCF_SPI_MST_STATUS_SIGNED           0
+
+/*
+	Register TCF_SPI_MST_GO
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_GO         0x0180
+#define TCF_SPI_MST_GO_MASK                 0x00000001U
+#define TCF_SPI_MST_GO_SHIFT                0
+#define TCF_SPI_MST_GO_SIGNED               0
+
+/*
+	Register EXT_SIG_CTRL
+*/
+#define TCF_CLK_CTRL_EXT_SIG_CTRL           0x0188
+#define EXT_SYS_REQ_SIG_START_MASK          0x00000001U
+#define EXT_SYS_REQ_SIG_START_SHIFT         0
+#define EXT_SYS_REQ_SIG_START_SIGNED        0
+
+#define EXT_SYS_RD_SIG_START_MASK           0x00000002U
+#define EXT_SYS_RD_SIG_START_SHIFT          1
+#define EXT_SYS_RD_SIG_START_SIGNED         0
+
+#define EXT_MEM_REQ_SIG_START_MASK          0x00000004U
+#define EXT_MEM_REQ_SIG_START_SHIFT         2
+#define EXT_MEM_REQ_SIG_START_SIGNED        0
+
+#define EXT_MEM_RD_SIG_START_MASK           0x00000008U
+#define EXT_MEM_RD_SIG_START_SHIFT          3
+#define EXT_MEM_RD_SIG_START_SIGNED         0
+
+/*
+	Register EXT_SYS_REQ_SIG
+*/
+#define TCF_CLK_CTRL_EXT_SYS_REQ_SIG        0x0190
+#define EXT_SYS_REQ_SIG_MASK                0xFFFFFFFFU
+#define EXT_SYS_REQ_SIG_SHIFT               0
+#define EXT_SYS_REQ_SIG_SIGNED              0
+
+/*
+	Register EXT_SYS_RD_SIG
+*/
+#define TCF_CLK_CTRL_EXT_SYS_RD_SIG         0x0198
+#define EXT_SYS_RD_SIG_MASK                 0xFFFFFFFFU
+#define EXT_SYS_RD_SIG_SHIFT                0
+#define EXT_SYS_RD_SIG_SIGNED               0
+
+/*
+	Register EXT_MEM_REQ_SIG
+*/
+#define TCF_CLK_CTRL_EXT_MEM_REQ_SIG        0x01A0
+#define EXT_MEM_REQ_SIG_MASK                0xFFFFFFFFU
+#define EXT_MEM_REQ_SIG_SHIFT               0
+#define EXT_MEM_REQ_SIG_SIGNED              0
+
+/*
+	Register EXT_MEM_RD_SIG
+*/
+#define TCF_CLK_CTRL_EXT_MEM_RD_SIG         0x01A8
+#define EXT_MEM_RD_SIG_MASK                 0xFFFFFFFFU
+#define EXT_MEM_RD_SIG_SHIFT                0
+#define EXT_MEM_RD_SIG_SIGNED               0
+
+/*
+	Register EXT_SYS_REQ_WR_CNT
+*/
+#define TCF_CLK_CTRL_EXT_SYS_REQ_WR_CNT     0x01B0
+#define EXT_SYS_REQ_WR_CNT_MASK             0xFFFFFFFFU
+#define EXT_SYS_REQ_WR_CNT_SHIFT            0
+#define EXT_SYS_REQ_WR_CNT_SIGNED           0
+
+/*
+	Register EXT_SYS_REQ_RD_CNT
+*/
+#define TCF_CLK_CTRL_EXT_SYS_REQ_RD_CNT     0x01B8
+#define EXT_SYS_REQ_RD_CNT_MASK             0xFFFFFFFFU
+#define EXT_SYS_REQ_RD_CNT_SHIFT            0
+#define EXT_SYS_REQ_RD_CNT_SIGNED           0
+
+/*
+	Register EXT_SYS_RD_CNT
+*/
+#define TCF_CLK_CTRL_EXT_SYS_RD_CNT         0x01C0
+#define EXT_SYS_RD_CNT_MASK                 0xFFFFFFFFU
+#define EXT_SYS_RD_CNT_SHIFT                0
+#define EXT_SYS_RD_CNT_SIGNED               0
+
+/*
+	Register EXT_MEM_REQ_WR_CNT
+*/
+#define TCF_CLK_CTRL_EXT_MEM_REQ_WR_CNT     0x01C8
+#define EXT_MEM_REQ_WR_CNT_MASK             0xFFFFFFFFU
+#define EXT_MEM_REQ_WR_CNT_SHIFT            0
+#define EXT_MEM_REQ_WR_CNT_SIGNED           0
+
+/*
+	Register EXT_MEM_REQ_RD_CNT
+*/
+#define TCF_CLK_CTRL_EXT_MEM_REQ_RD_CNT     0x01D0
+#define EXT_MEM_REQ_RD_CNT_MASK             0xFFFFFFFFU
+#define EXT_MEM_REQ_RD_CNT_SHIFT            0
+#define EXT_MEM_REQ_RD_CNT_SIGNED           0
+
+/*
+	Register EXT_MEM_RD_CNT
+*/
+#define TCF_CLK_CTRL_EXT_MEM_RD_CNT         0x01D8
+#define EXT_MEM_RD_CNT_MASK                 0xFFFFFFFFU
+#define EXT_MEM_RD_CNT_SHIFT                0
+#define EXT_MEM_RD_CNT_SIGNED               0
+
+/*
+	Register TCF_CORE_TARGET_BUILD_CFG
+*/
+#define TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG 0x01E0
+#define TCF_CORE_TARGET_BUILD_ID_MASK       0x000000FFU
+#define TCF_CORE_TARGET_BUILD_ID_SHIFT      0
+#define TCF_CORE_TARGET_BUILD_ID_SIGNED     0
+
+/*
+	Register MEM_THROUGH_SYS
+*/
+#define TCF_CLK_CTRL_MEM_THROUGH_SYS        0x01E8
+#define MEM_THROUGH_SYS_MASK                0x00000001U
+#define MEM_THROUGH_SYS_SHIFT               0
+#define MEM_THROUGH_SYS_SIGNED              0
+
+/*
+	Register HOST_PHY_OFFSET
+*/
+#define TCF_CLK_CTRL_HOST_PHY_OFFSET        0x01F0
+#define HOST_PHY_OFFSET_MASK                0xFFFFFFFFU
+#define HOST_PHY_OFFSET_SHIFT               0
+#define HOST_PHY_OFFSET_SIGNED              0
+
+#endif /* !defined(_TCF_CLK_CTRL_H_) */
+
+/*****************************************************************************
+ End of file (tcf_clk_ctrl.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/tcf_pll.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/tcf_pll.h
new file mode 100644
index 0000000..fa0f823
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/tcf_pll.h
@@ -0,0 +1,311 @@
+/*************************************************************************/ /*!
+@Title          Test Chip Framework PDP register definitions	
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Autogenerated C -- do not edit
+                Generated from tcf_pll.def
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_TCF_PLL_H_)
+#define _TCF_PLL_H_
+
+/*
+	Register PLL_DDR2_CLK0
+*/
+#define TCF_PLL_PLL_DDR2_CLK0               0x0000
+#define DDR2_PLL_CLK0_PHS_MASK              0x00300000U
+#define DDR2_PLL_CLK0_PHS_SHIFT             20
+#define DDR2_PLL_CLK0_PHS_SIGNED            0
+
+#define DDR2_PLL_CLK0_MS_MASK               0x00030000U
+#define DDR2_PLL_CLK0_MS_SHIFT              16
+#define DDR2_PLL_CLK0_MS_SIGNED             0
+
+#define DDR2_PLL_CLK0_FREQ_MASK             0x000001FFU
+#define DDR2_PLL_CLK0_FREQ_SHIFT            0
+#define DDR2_PLL_CLK0_FREQ_SIGNED           0
+
+/*
+	Register PLL_DDR2_CLK1TO5
+*/
+#define TCF_PLL_PLL_DDR2_CLK1TO5            0x0008
+#define DDR2_PLL_CLK1TO5_PHS_MASK           0x3FF00000U
+#define DDR2_PLL_CLK1TO5_PHS_SHIFT          20
+#define DDR2_PLL_CLK1TO5_PHS_SIGNED         0
+
+#define DDR2_PLL_CLK1TO5_MS_MASK            0x000FFC00U
+#define DDR2_PLL_CLK1TO5_MS_SHIFT           10
+#define DDR2_PLL_CLK1TO5_MS_SIGNED          0
+
+#define DDR2_PLL_CLK1TO5_FREQ_MASK          0x000003FFU
+#define DDR2_PLL_CLK1TO5_FREQ_SHIFT         0
+#define DDR2_PLL_CLK1TO5_FREQ_SIGNED        0
+
+/*
+	Register PLL_DDR2_DRP_GO
+*/
+#define TCF_PLL_PLL_DDR2_DRP_GO             0x0010
+#define PLL_DDR2_DRP_GO_MASK                0x00000001U
+#define PLL_DDR2_DRP_GO_SHIFT               0
+#define PLL_DDR2_DRP_GO_SIGNED              0
+
+/*
+	Register PLL_PDP_CLK0
+*/
+#define TCF_PLL_PLL_PDP_CLK0                0x0018
+#define PDP_PLL_CLK0_PHS_MASK               0x00300000U
+#define PDP_PLL_CLK0_PHS_SHIFT              20
+#define PDP_PLL_CLK0_PHS_SIGNED             0
+
+#define PDP_PLL_CLK0_MS_MASK                0x00030000U
+#define PDP_PLL_CLK0_MS_SHIFT               16
+#define PDP_PLL_CLK0_MS_SIGNED              0
+
+#define PDP_PLL_CLK0_FREQ_MASK              0x000001FFU
+#define PDP_PLL_CLK0_FREQ_SHIFT             0
+#define PDP_PLL_CLK0_FREQ_SIGNED            0
+
+/*
+	Register PLL_PDP_CLK1TO5
+*/
+#define TCF_PLL_PLL_PDP_CLK1TO5             0x0020
+#define PDP_PLL_CLK1TO5_PHS_MASK            0x3FF00000U
+#define PDP_PLL_CLK1TO5_PHS_SHIFT           20
+#define PDP_PLL_CLK1TO5_PHS_SIGNED          0
+
+#define PDP_PLL_CLK1TO5_MS_MASK             0x000FFC00U
+#define PDP_PLL_CLK1TO5_MS_SHIFT            10
+#define PDP_PLL_CLK1TO5_MS_SIGNED           0
+
+#define PDP_PLL_CLK1TO5_FREQ_MASK           0x000003FFU
+#define PDP_PLL_CLK1TO5_FREQ_SHIFT          0
+#define PDP_PLL_CLK1TO5_FREQ_SIGNED         0
+
+/*
+	Register PLL_PDP_DRP_GO
+*/
+#define TCF_PLL_PLL_PDP_DRP_GO              0x0028
+#define PLL_PDP_DRP_GO_MASK                 0x00000001U
+#define PLL_PDP_DRP_GO_SHIFT                0
+#define PLL_PDP_DRP_GO_SIGNED               0
+
+/*
+	Register PLL_PDP2_CLK0
+*/
+#define TCF_PLL_PLL_PDP2_CLK0               0x0030
+#define PDP2_PLL_CLK0_PHS_MASK              0x00300000U
+#define PDP2_PLL_CLK0_PHS_SHIFT             20
+#define PDP2_PLL_CLK0_PHS_SIGNED            0
+
+#define PDP2_PLL_CLK0_MS_MASK               0x00030000U
+#define PDP2_PLL_CLK0_MS_SHIFT              16
+#define PDP2_PLL_CLK0_MS_SIGNED             0
+
+#define PDP2_PLL_CLK0_FREQ_MASK             0x000001FFU
+#define PDP2_PLL_CLK0_FREQ_SHIFT            0
+#define PDP2_PLL_CLK0_FREQ_SIGNED           0
+
+/*
+	Register PLL_PDP2_CLK1TO5
+*/
+#define TCF_PLL_PLL_PDP2_CLK1TO5            0x0038
+#define PDP2_PLL_CLK1TO5_PHS_MASK           0x3FF00000U
+#define PDP2_PLL_CLK1TO5_PHS_SHIFT          20
+#define PDP2_PLL_CLK1TO5_PHS_SIGNED         0
+
+#define PDP2_PLL_CLK1TO5_MS_MASK            0x000FFC00U
+#define PDP2_PLL_CLK1TO5_MS_SHIFT           10
+#define PDP2_PLL_CLK1TO5_MS_SIGNED          0
+
+#define PDP2_PLL_CLK1TO5_FREQ_MASK          0x000003FFU
+#define PDP2_PLL_CLK1TO5_FREQ_SHIFT         0
+#define PDP2_PLL_CLK1TO5_FREQ_SIGNED        0
+
+/*
+	Register PLL_PDP2_DRP_GO
+*/
+#define TCF_PLL_PLL_PDP2_DRP_GO             0x0040
+#define PLL_PDP2_DRP_GO_MASK                0x00000001U
+#define PLL_PDP2_DRP_GO_SHIFT               0
+#define PLL_PDP2_DRP_GO_SIGNED              0
+
+/*
+	Register PLL_CORE_CLK0
+*/
+#define TCF_PLL_PLL_CORE_CLK0               0x0048
+#define CORE_PLL_CLK0_PHS_MASK              0x00300000U
+#define CORE_PLL_CLK0_PHS_SHIFT             20
+#define CORE_PLL_CLK0_PHS_SIGNED            0
+
+#define CORE_PLL_CLK0_MS_MASK               0x00030000U
+#define CORE_PLL_CLK0_MS_SHIFT              16
+#define CORE_PLL_CLK0_MS_SIGNED             0
+
+#define CORE_PLL_CLK0_FREQ_MASK             0x000001FFU
+#define CORE_PLL_CLK0_FREQ_SHIFT            0
+#define CORE_PLL_CLK0_FREQ_SIGNED           0
+
+/*
+	Register PLL_CORE_CLK1TO5
+*/
+#define TCF_PLL_PLL_CORE_CLK1TO5            0x0050
+#define CORE_PLL_CLK1TO5_PHS_MASK           0x3FF00000U
+#define CORE_PLL_CLK1TO5_PHS_SHIFT          20
+#define CORE_PLL_CLK1TO5_PHS_SIGNED         0
+
+#define CORE_PLL_CLK1TO5_MS_MASK            0x000FFC00U
+#define CORE_PLL_CLK1TO5_MS_SHIFT           10
+#define CORE_PLL_CLK1TO5_MS_SIGNED          0
+
+#define CORE_PLL_CLK1TO5_FREQ_MASK          0x000003FFU
+#define CORE_PLL_CLK1TO5_FREQ_SHIFT         0
+#define CORE_PLL_CLK1TO5_FREQ_SIGNED        0
+
+/*
+	Register PLL_CORE_DRP_GO
+*/
+#define TCF_PLL_PLL_CORE_DRP_GO             0x0058
+#define PLL_CORE_DRP_GO_MASK                0x00000001U
+#define PLL_CORE_DRP_GO_SHIFT               0
+#define PLL_CORE_DRP_GO_SIGNED              0
+
+/*
+	Register PLL_SYSIF_CLK0
+*/
+#define TCF_PLL_PLL_SYSIF_CLK0              0x0060
+#define SYSIF_PLL_CLK0_PHS_MASK             0x00300000U
+#define SYSIF_PLL_CLK0_PHS_SHIFT            20
+#define SYSIF_PLL_CLK0_PHS_SIGNED           0
+
+#define SYSIF_PLL_CLK0_MS_MASK              0x00030000U
+#define SYSIF_PLL_CLK0_MS_SHIFT             16
+#define SYSIF_PLL_CLK0_MS_SIGNED            0
+
+#define SYSIF_PLL_CLK0_FREQ_MASK            0x000001FFU
+#define SYSIF_PLL_CLK0_FREQ_SHIFT           0
+#define SYSIF_PLL_CLK0_FREQ_SIGNED          0
+
+/*
+	Register PLL_SYSIF_CLK1TO5
+*/
+#define TCF_PLL_PLL_SYSIF_CLK1TO5           0x0068
+#define SYSIF_PLL_CLK1TO5_PHS_MASK          0x3FF00000U
+#define SYSIF_PLL_CLK1TO5_PHS_SHIFT         20
+#define SYSIF_PLL_CLK1TO5_PHS_SIGNED        0
+
+#define SYSIF_PLL_CLK1TO5_MS_MASK           0x000FFC00U
+#define SYSIF_PLL_CLK1TO5_MS_SHIFT          10
+#define SYSIF_PLL_CLK1TO5_MS_SIGNED         0
+
+#define SYSIF_PLL_CLK1TO5_FREQ_MASK         0x000003FFU
+#define SYSIF_PLL_CLK1TO5_FREQ_SHIFT        0
+#define SYSIF_PLL_CLK1TO5_FREQ_SIGNED       0
+
+/*
+	Register PLL_SYS_DRP_GO
+*/
+#define TCF_PLL_PLL_SYS_DRP_GO              0x0070
+#define PLL_SYS_DRP_GO_MASK                 0x00000001U
+#define PLL_SYS_DRP_GO_SHIFT                0
+#define PLL_SYS_DRP_GO_SIGNED               0
+
+/*
+	Register PLL_MEMIF_CLK0
+*/
+#define TCF_PLL_PLL_MEMIF_CLK0              0x0078
+#define MEMIF_PLL_CLK0_PHS_MASK             0x00300000U
+#define MEMIF_PLL_CLK0_PHS_SHIFT            20
+#define MEMIF_PLL_CLK0_PHS_SIGNED           0
+
+#define MEMIF_PLL_CLK0_MS_MASK              0x00030000U
+#define MEMIF_PLL_CLK0_MS_SHIFT             16
+#define MEMIF_PLL_CLK0_MS_SIGNED            0
+
+#define MEMIF_PLL_CLK0_FREQ_MASK            0x000001FFU
+#define MEMIF_PLL_CLK0_FREQ_SHIFT           0
+#define MEMIF_PLL_CLK0_FREQ_SIGNED          0
+
+/*
+	Register PLL_MEMIF_CLK1TO5
+*/
+#define TCF_PLL_PLL_MEMIF_CLK1TO5           0x0080
+#define MEMIF_PLL_CLK1TO5_PHS_MASK          0x3FF00000U
+#define MEMIF_PLL_CLK1TO5_PHS_SHIFT         20
+#define MEMIF_PLL_CLK1TO5_PHS_SIGNED        0
+
+#define MEMIF_PLL_CLK1TO5_MS_MASK           0x000FFC00U
+#define MEMIF_PLL_CLK1TO5_MS_SHIFT          10
+#define MEMIF_PLL_CLK1TO5_MS_SIGNED         0
+
+#define MEMIF_PLL_CLK1TO5_FREQ_MASK         0x000003FFU
+#define MEMIF_PLL_CLK1TO5_FREQ_SHIFT        0
+#define MEMIF_PLL_CLK1TO5_FREQ_SIGNED       0
+
+/*
+	Register PLL_MEM_DRP_GO
+*/
+#define TCF_PLL_PLL_MEM_DRP_GO              0x0088
+#define PLL_MEM_DRP_GO_MASK                 0x00000001U
+#define PLL_MEM_DRP_GO_SHIFT                0
+#define PLL_MEM_DRP_GO_SIGNED               0
+
+/*
+	Register PLL_ALL_DRP_GO
+*/
+#define TCF_PLL_PLL_ALL_DRP_GO              0x0090
+#define PLL_ALL_DRP_GO_MASK                 0x00000001U
+#define PLL_ALL_DRP_GO_SHIFT                0
+#define PLL_ALL_DRP_GO_SIGNED               0
+
+/*
+	Register PLL_DRP_STATUS
+*/
+#define TCF_PLL_PLL_DRP_STATUS              0x0098
+#define PLL_LOCKS_MASK                      0x00003F00U
+#define PLL_LOCKS_SHIFT                     8
+#define PLL_LOCKS_SIGNED                    0
+
+#define PLL_DRP_GOOD_MASK                   0x0000003FU
+#define PLL_DRP_GOOD_SHIFT                  0
+#define PLL_DRP_GOOD_SIGNED                 0
+
+#endif /* !defined(_TCF_PLL_H_) */
+
+/*****************************************************************************
+ End of file (tcf_pll.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/tcf_rgbpdp_regs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/tcf_rgbpdp_regs.h
new file mode 100644
index 0000000..f3e6d54
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/include/system/rgx_tc/tcf_rgbpdp_regs.h
@@ -0,0 +1,559 @@
+/*************************************************************************/ /*!
+@Title          Test Chip Framework PDP register definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Autogenerated C -- do not edit
+                Generated from: tcf_rgbpdp_regs.def 
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_TCF_RGBPDP_REGS_H_)
+#define _TCF_RGBPDP_REGS_H_
+
+/*
+	Register PVR_TCF_RGBPDP_STR1SURF
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF  0x0000
+#define STR1HEIGHT_MASK                     0x000007FFU
+#define STR1HEIGHT_SHIFT                    0
+#define STR1HEIGHT_SIGNED                   0
+
+#define STR1WIDTH_MASK                      0x003FF800U
+#define STR1WIDTH_SHIFT                     11
+#define STR1WIDTH_SIGNED                    0
+
+#define STR1PIXFMT_MASK                     0x0F000000U
+#define STR1PIXFMT_SHIFT                    24
+#define STR1PIXFMT_SIGNED                   0
+
+/*
+	Register PVR_TCF_RGBPDP_STR1ADDRCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL 0x0004
+#define STR1BASE_MASK                       0x03FFFFFFU
+#define STR1BASE_SHIFT                      0
+#define STR1BASE_SIGNED                     0
+
+#define STR1INTFIELD_MASK                   0x40000000U
+#define STR1INTFIELD_SHIFT                  30
+#define STR1INTFIELD_SIGNED                 0
+
+#define STR1STREN_MASK                      0x80000000U
+#define STR1STREN_SHIFT                     31
+#define STR1STREN_SIGNED                    0
+
+/*
+	Register PVR_PDP_STR1POSN
+*/
+#define TCF_RGBPDP_PVR_PDP_STR1POSN         0x0008
+#define STR1STRIDE_MASK                     0x000003FFU
+#define STR1STRIDE_SHIFT                    0
+#define STR1STRIDE_SIGNED                   0
+
+/*
+	Register PVR_TCF_RGBPDP_MEMCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_MEMCTRL   0x000C
+#define MEMREFRESH_MASK                     0xC0000000U
+#define MEMREFRESH_SHIFT                    30
+#define MEMREFRESH_SIGNED                   0
+
+/*
+	Register PVR_TCF_RGBPDP_STRCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL   0x0010
+#define BURSTLEN_GFX_MASK                   0x000000FFU
+#define BURSTLEN_GFX_SHIFT                  0
+#define BURSTLEN_GFX_SIGNED                 0
+
+#define THRESHOLD_GFX_MASK                  0x0000FF00U
+#define THRESHOLD_GFX_SHIFT                 8
+#define THRESHOLD_GFX_SIGNED                0
+
+/*
+	Register PVR_TCF_RGBPDP_SYNCCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL  0x0014
+#define HSDIS_MASK                          0x00000001U
+#define HSDIS_SHIFT                         0
+#define HSDIS_SIGNED                        0
+
+#define HSPOL_MASK                          0x00000002U
+#define HSPOL_SHIFT                         1
+#define HSPOL_SIGNED                        0
+
+#define VSDIS_MASK                          0x00000004U
+#define VSDIS_SHIFT                         2
+#define VSDIS_SIGNED                        0
+
+#define VSPOL_MASK                          0x00000008U
+#define VSPOL_SHIFT                         3
+#define VSPOL_SIGNED                        0
+
+#define BLNKDIS_MASK                        0x00000010U
+#define BLNKDIS_SHIFT                       4
+#define BLNKDIS_SIGNED                      0
+
+#define BLNKPOL_MASK                        0x00000020U
+#define BLNKPOL_SHIFT                       5
+#define BLNKPOL_SIGNED                      0
+
+#define HS_SLAVE_MASK                       0x00000040U
+#define HS_SLAVE_SHIFT                      6
+#define HS_SLAVE_SIGNED                     0
+
+#define VS_SLAVE_MASK                       0x00000080U
+#define VS_SLAVE_SHIFT                      7
+#define VS_SLAVE_SIGNED                     0
+
+#define INTERLACE_MASK                      0x00000100U
+#define INTERLACE_SHIFT                     8
+#define INTERLACE_SIGNED                    0
+
+#define FIELDPOL_MASK                       0x00000200U
+#define FIELDPOL_SHIFT                      9
+#define FIELDPOL_SIGNED                     0
+
+#define CLKPOL_MASK                         0x00000800U
+#define CLKPOL_SHIFT                        11
+#define CLKPOL_SIGNED                       0
+
+#define CSYNC_EN_MASK                       0x00001000U
+#define CSYNC_EN_SHIFT                      12
+#define CSYNC_EN_SIGNED                     0
+
+#define FIELD_EN_MASK                       0x00002000U
+#define FIELD_EN_SHIFT                      13
+#define FIELD_EN_SIGNED                     0
+
+#define UPDWAIT_MASK                        0x000F0000U
+#define UPDWAIT_SHIFT                       16
+#define UPDWAIT_SIGNED                      0
+
+#define UPDCTRL_MASK                        0x01000000U
+#define UPDCTRL_SHIFT                       24
+#define UPDCTRL_SIGNED                      0
+
+#define UPDINTCTRL_MASK                     0x02000000U
+#define UPDINTCTRL_SHIFT                    25
+#define UPDINTCTRL_SIGNED                   0
+
+#define UPDSYNCTRL_MASK                     0x04000000U
+#define UPDSYNCTRL_SHIFT                    26
+#define UPDSYNCTRL_SIGNED                   0
+
+#define POWERDN_MASK                        0x10000000U
+#define POWERDN_SHIFT                       28
+#define POWERDN_SIGNED                      0
+
+#define DISP_RST_MASK                       0x20000000U
+#define DISP_RST_SHIFT                      29
+#define DISP_RST_SIGNED                     0
+
+#define SYNCACTIVE_MASK                     0x80000000U
+#define SYNCACTIVE_SHIFT                    31
+#define SYNCACTIVE_SIGNED                   0
+
+/*
+	Register PVR_TCF_RGBPDP_BORDCOL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL   0x0018
+#define BORDCOL_MASK                        0x00FFFFFFU
+#define BORDCOL_SHIFT                       0
+#define BORDCOL_SIGNED                      0
+
+/*
+	Register PVR_TCF_RGBPDP_UPDCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL   0x001C
+#define UPDFIELD_MASK                       0x00000001U
+#define UPDFIELD_SHIFT                      0
+#define UPDFIELD_SIGNED                     0
+
+/*
+	Register PVR_TCF_RGBPDP_HSYNC1
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1    0x0020
+#define HT_MASK                             0x00000FFFU
+#define HT_SHIFT                            0
+#define HT_SIGNED                           0
+
+#define HBPS_MASK                           0x0FFF0000U
+#define HBPS_SHIFT                          16
+#define HBPS_SIGNED                         0
+
+/*
+	Register PVR_TCF_RGBPDP_HSYNC2
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2    0x0024
+#define HLBS_MASK                           0x00000FFFU
+#define HLBS_SHIFT                          0
+#define HLBS_SIGNED                         0
+
+#define HAS_MASK                            0x0FFF0000U
+#define HAS_SHIFT                           16
+#define HAS_SIGNED                          0
+
+/*
+	Register PVR_TCF_RGBPDP_HSYNC3
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3    0x0028
+#define HRBS_MASK                           0x00000FFFU
+#define HRBS_SHIFT                          0
+#define HRBS_SIGNED                         0
+
+#define HFPS_MASK                           0x0FFF0000U
+#define HFPS_SHIFT                          16
+#define HFPS_SIGNED                         0
+
+/*
+	Register PVR_TCF_RGBPDP_VSYNC1
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1    0x002C
+#define VT_MASK                             0x00000FFFU
+#define VT_SHIFT                            0
+#define VT_SIGNED                           0
+
+#define VBPS_MASK                           0x0FFF0000U
+#define VBPS_SHIFT                          16
+#define VBPS_SIGNED                         0
+
+/*
+	Register PVR_TCF_RGBPDP_VSYNC2
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2    0x0030
+#define VTBS_MASK                           0x00000FFFU
+#define VTBS_SHIFT                          0
+#define VTBS_SIGNED                         0
+
+#define VAS_MASK                            0x0FFF0000U
+#define VAS_SHIFT                           16
+#define VAS_SIGNED                          0
+
+/*
+	Register PVR_TCF_RGBPDP_VSYNC3
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3    0x0034
+#define VBBS_MASK                           0x00000FFFU
+#define VBBS_SHIFT                          0
+#define VBBS_SIGNED                         0
+
+#define VFPS_MASK                           0x0FFF0000U
+#define VFPS_SHIFT                          16
+#define VFPS_SIGNED                         0
+
+/*
+	Register PVR_TCF_RGBPDP_HDECTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL   0x0038
+#define HDEF_MASK                           0x00000FFFU
+#define HDEF_SHIFT                          0
+#define HDEF_SIGNED                         0
+
+#define HDES_MASK                           0x0FFF0000U
+#define HDES_SHIFT                          16
+#define HDES_SIGNED                         0
+
+/*
+	Register PVR_TCF_RGBPDP_VDECTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL   0x003C
+#define VDEF_MASK                           0x00000FFFU
+#define VDEF_SHIFT                          0
+#define VDEF_SIGNED                         0
+
+#define VDES_MASK                           0x0FFF0000U
+#define VDES_SHIFT                          16
+#define VDES_SIGNED                         0
+
+/*
+	Register PVR_TCF_RGBPDP_VEVENT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT    0x0040
+#define VFETCH_MASK                         0x00000FFFU
+#define VFETCH_SHIFT                        0
+#define VFETCH_SIGNED                       0
+
+#define VEVENT_MASK                         0x0FFF0000U
+#define VEVENT_SHIFT                        16
+#define VEVENT_SIGNED                       0
+
+/*
+	Register PVR_TCF_RGBPDP_OPMASK
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_OPMASK    0x0044
+#define MASKR_MASK                          0x000000FFU
+#define MASKR_SHIFT                         0
+#define MASKR_SIGNED                        0
+
+#define MASKG_MASK                          0x0000FF00U
+#define MASKG_SHIFT                         8
+#define MASKG_SIGNED                        0
+
+#define MASKB_MASK                          0x00FF0000U
+#define MASKB_SHIFT                         16
+#define MASKB_SIGNED                        0
+
+#define BLANKLEVEL_MASK                     0x40000000U
+#define BLANKLEVEL_SHIFT                    30
+#define BLANKLEVEL_SIGNED                   0
+
+#define MASKLEVEL_MASK                      0x80000000U
+#define MASKLEVEL_SHIFT                     31
+#define MASKLEVEL_SIGNED                    0
+
+/*
+	Register PVR_TCF_RGBPDP_INTSTAT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTSTAT   0x0048
+#define INTS_HBLNK0_MASK                    0x00000001U
+#define INTS_HBLNK0_SHIFT                   0
+#define INTS_HBLNK0_SIGNED                  0
+
+#define INTS_HBLNK1_MASK                    0x00000002U
+#define INTS_HBLNK1_SHIFT                   1
+#define INTS_HBLNK1_SIGNED                  0
+
+#define INTS_VBLNK0_MASK                    0x00000004U
+#define INTS_VBLNK0_SHIFT                   2
+#define INTS_VBLNK0_SIGNED                  0
+
+#define INTS_VBLNK1_MASK                    0x00000008U
+#define INTS_VBLNK1_SHIFT                   3
+#define INTS_VBLNK1_SIGNED                  0
+
+#define INTS_STR1URUN_MASK                  0x00000010U
+#define INTS_STR1URUN_SHIFT                 4
+#define INTS_STR1URUN_SIGNED                0
+
+#define INTS_STR1ORUN_MASK                  0x00000020U
+#define INTS_STR1ORUN_SHIFT                 5
+#define INTS_STR1ORUN_SIGNED                0
+
+#define INTS_DISPURUN_MASK                  0x00000040U
+#define INTS_DISPURUN_SHIFT                 6
+#define INTS_DISPURUN_SIGNED                0
+
+/*
+	Register PVR_TCF_RGBPDP_INTENAB
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB   0x004C
+#define INTEN_HBLNK0_MASK                   0x00000001U
+#define INTEN_HBLNK0_SHIFT                  0
+#define INTEN_HBLNK0_SIGNED                 0
+
+#define INTEN_HBLNK1_MASK                   0x00000002U
+#define INTEN_HBLNK1_SHIFT                  1
+#define INTEN_HBLNK1_SIGNED                 0
+
+#define INTEN_VBLNK0_MASK                   0x00000004U
+#define INTEN_VBLNK0_SHIFT                  2
+#define INTEN_VBLNK0_SIGNED                 0
+
+#define INTEN_VBLNK1_MASK                   0x00000008U
+#define INTEN_VBLNK1_SHIFT                  3
+#define INTEN_VBLNK1_SIGNED                 0
+
+#define INTEN_STR1URUN_MASK                 0x00000010U
+#define INTEN_STR1URUN_SHIFT                4
+#define INTEN_STR1URUN_SIGNED               0
+
+#define INTEN_STR1ORUN_MASK                 0x00000020U
+#define INTEN_STR1ORUN_SHIFT                5
+#define INTEN_STR1ORUN_SIGNED               0
+
+#define INTEN_DISPURUN_MASK                 0x00000040U
+#define INTEN_DISPURUN_SHIFT                6
+#define INTEN_DISPURUN_SIGNED               0
+
+/*
+	Register PVR_TCF_RGBPDP_INTCLEAR
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTCLEAR  0x0050
+#define INTCLR_HBLNK0_MASK                  0x00000001U
+#define INTCLR_HBLNK0_SHIFT                 0
+#define INTCLR_HBLNK0_SIGNED                0
+
+#define INTCLR_HBLNK1_MASK                  0x00000002U
+#define INTCLR_HBLNK1_SHIFT                 1
+#define INTCLR_HBLNK1_SIGNED                0
+
+#define INTCLR_VBLNK0_MASK                  0x00000004U
+#define INTCLR_VBLNK0_SHIFT                 2
+#define INTCLR_VBLNK0_SIGNED                0
+
+#define INTCLR_VBLNK1_MASK                  0x00000008U
+#define INTCLR_VBLNK1_SHIFT                 3
+#define INTCLR_VBLNK1_SIGNED                0
+
+#define INTCLR_STR1URUN_MASK                0x00000010U
+#define INTCLR_STR1URUN_SHIFT               4
+#define INTCLR_STR1URUN_SIGNED              0
+
+#define INTCLR_STR1ORUN_MASK                0x00000020U
+#define INTCLR_STR1ORUN_SHIFT               5
+#define INTCLR_STR1ORUN_SIGNED              0
+
+#define INTCLR_DISPURUN_MASK                0x00000040U
+#define INTCLR_DISPURUN_SHIFT               6
+#define INTCLR_DISPURUN_SIGNED              0
+
+/*
+	Register PVR_TCF_RGBPDP_INTCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTCTRL   0x0054
+#define HBLNK_LINENO_MASK                   0x00000FFFU
+#define HBLNK_LINENO_SHIFT                  0
+#define HBLNK_LINENO_SIGNED                 0
+
+#define HBLNK_LINE_MASK                     0x00010000U
+#define HBLNK_LINE_SHIFT                    16
+#define HBLNK_LINE_SIGNED                   0
+
+/*
+	Register PVR_TCF_RGBPDP_SIGNAT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_SIGNAT    0x0058
+#define SIGNATURE_MASK                      0xFFFFFFFFU
+#define SIGNATURE_SHIFT                     0
+#define SIGNATURE_SIGNED                    0
+
+/*
+	Register PVR_TCF_RGBPDP_LINESTAT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_LINESTAT  0x005C
+#define LINENO_MASK                         0x00000FFFU
+#define LINENO_SHIFT                        0
+#define LINENO_SIGNED                       0
+
+/*
+	Register PVR_TCF_RGBPDP_DBGCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGCTRL   0x0060
+#define DBG_ENAB_MASK                       0x00000001U
+#define DBG_ENAB_SHIFT                      0
+#define DBG_ENAB_SIGNED                     0
+
+#define DBG_READ_MASK                       0x00000002U
+#define DBG_READ_SHIFT                      1
+#define DBG_READ_SIGNED                     0
+
+/*
+	Register PVR_TCF_RGBPDP_DBGDATA
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGDATA   0x0064
+#define DBG_DATA_MASK                       0x00FFFFFFU
+#define DBG_DATA_SHIFT                      0
+#define DBG_DATA_SIGNED                     0
+
+/*
+	Register PVR_TCF_RGBPDP_DBGSIDE
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGSIDE   0x0068
+#define DBG_SIDE_MASK                       0x00000007U
+#define DBG_SIDE_SHIFT                      0
+#define DBG_SIDE_SIGNED                     0
+
+#define DBG_VAL_MASK                        0x00000008U
+#define DBG_VAL_SHIFT                       3
+#define DBG_VAL_SIGNED                      0
+
+/*
+	Register PVR_TCF_RGBPDP_REGLD_STAT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_REGLD_STAT 0x0070
+#define REGLD_ADDROUT_MASK                  0x00FFFFFFU
+#define REGLD_ADDROUT_SHIFT                 0
+#define REGLD_ADDROUT_SIGNED                0
+
+#define REGLD_ADDREN_MASK                   0x80000000U
+#define REGLD_ADDREN_SHIFT                  31
+#define REGLD_ADDREN_SIGNED                 0
+
+/*
+	Register PVR_TCF_RGBPDP_REGLD_CTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_REGLD_CTRL 0x0074
+#define REGLD_ADDRIN_MASK                   0x00FFFFFFU
+#define REGLD_ADDRIN_SHIFT                  0
+#define REGLD_ADDRIN_SIGNED                 0
+
+#define REGLD_VAL_MASK                      0x01000000U
+#define REGLD_VAL_SHIFT                     24
+#define REGLD_VAL_SIGNED                    0
+
+#define REGLD_ADDRLEN_MASK                  0xFE000000U
+#define REGLD_ADDRLEN_SHIFT                 25
+#define REGLD_ADDRLEN_SIGNED                0
+
+/*
+	Register PVR_TCF_RGBPDP_CORE_ID
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_CORE_ID   0x0078
+#define CONFIG_ID_MASK                      0x0000FFFFU
+#define CONFIG_ID_SHIFT                     0
+#define CONFIG_ID_SIGNED                    0
+
+#define CORE_ID_MASK                        0x00FF0000U
+#define CORE_ID_SHIFT                       16
+#define CORE_ID_SIGNED                      0
+
+#define GROUP_ID_MASK                       0xFF000000U
+#define GROUP_ID_SHIFT                      24
+#define GROUP_ID_SIGNED                     0
+
+/*
+	Register PVR_TCF_RGBPDP_CORE_REV
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_CORE_REV  0x007C
+#define MAINT_REV_MASK                      0x000000FFU
+#define MAINT_REV_SHIFT                     0
+#define MAINT_REV_SIGNED                    0
+
+#define MINOR_REV_MASK                      0x0000FF00U
+#define MINOR_REV_SHIFT                     8
+#define MINOR_REV_SIGNED                    0
+
+#define MAJOR_REV_MASK                      0x00FF0000U
+#define MAJOR_REV_SHIFT                     16
+#define MAJOR_REV_SIGNED                    0
+
+#endif /* !defined(_TCF_RGBPDP_REGS_H_) */
+
+/*****************************************************************************
+ End of file (tcf_rgbpdp_regs.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/Kbuild.mk b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/Kbuild.mk
new file mode 100644
index 0000000..ec7b97a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/Kbuild.mk
@@ -0,0 +1,122 @@
+########################################################################### ###
+#@Copyright     Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License       Dual MIT/GPLv2
+# 
+# The contents of this file are subject to the MIT license as set out below.
+# 
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+# 
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+# 
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+# 
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+# 
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+# 
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+
+ccflags-y := \
+ -I$(TOP)/kernel/drivers/staging/imgtec \
+ -I$(TOP)/kernel/drivers/staging/imgtec/tc \
+ -I$(TOP)/kernel/drivers/staging/imgtec/rk3368 \
+ -I$(TOP)/kernel/drivers/staging/imgtec/plato \
+ -I$(TOP)/kernel/drivers/staging/imgtec/sunxi \
+ -I$(TOP)/include/system/rgx_tc \
+ -I$(TOP)/include/drm \
+ -I$(TOP)/hwdefs \
+ $(ccflags-y)
+
+adf_fbdev-y += \
+ kernel/drivers/staging/imgtec/adf_fbdev.o \
+ kernel/drivers/staging/imgtec/adf_common.o
+
+adf_pdp-y += \
+ kernel/drivers/staging/imgtec/tc/adf_pdp.o \
+ kernel/drivers/staging/imgtec/adf_common.o \
+ kernel/drivers/staging/imgtec/debugfs_dma_buf.o
+
+adf_tc5_pdp-y += \
+ kernel/drivers/staging/imgtec/tc/adf_tc5_pdp.o \
+ kernel/drivers/staging/imgtec/adf_common.o \
+ kernel/drivers/staging/imgtec/debugfs_dma_buf.o
+
+tc-y += \
+ kernel/drivers/staging/imgtec/tc/tc_apollo.o \
+ kernel/drivers/staging/imgtec/tc/tc_odin.o \
+ kernel/drivers/staging/imgtec/tc/tc_drv.o
+
+ifeq ($(SUPPORT_APOLLO_FPGA),1)
+tc-y += \
+ kernel/drivers/staging/imgtec/tc/tc_apollo_debugfs.o
+endif
+
+ifeq ($(SUPPORT_ION),1)
+tc-y += \
+ kernel/drivers/staging/imgtec/tc/tc_ion.o \
+ kernel/drivers/staging/imgtec/tc/ion_lma_heap.o \
+ kernel/drivers/staging/imgtec/ion_fbcdc_clear.o
+endif
+
+adf_sunxi-y += \
+ kernel/drivers/staging/imgtec/sunxi/adf_sunxi.o \
+ kernel/drivers/staging/imgtec/adf_common.o
+
+drm_nulldisp-y += \
+ kernel/drivers/staging/imgtec/drm_nulldisp_drv.o \
+ kernel/drivers/staging/imgtec/pvr_sw_fence.o \
+ kernel/drivers/staging/imgtec/drm_nulldisp_netlink.o \
+ kernel/drivers/staging/imgtec/drm_netlink_gem.o
+
+ifeq ($(LMA),1)
+drm_nulldisp-y += \
+ kernel/drivers/staging/imgtec/tc/drm_pdp_gem.o
+else
+drm_nulldisp-y += \
+ kernel/drivers/staging/imgtec/drm_nulldisp_gem.o
+endif
+
+drm_pdp-y += \
+ kernel/drivers/staging/imgtec/tc/drm_pdp_debugfs.o \
+ kernel/drivers/staging/imgtec/tc/drm_pdp_drv.o \
+ kernel/drivers/staging/imgtec/pvr_sw_fence.o \
+ kernel/drivers/staging/imgtec/tc/drm_pdp_gem.o \
+ kernel/drivers/staging/imgtec/tc/drm_pdp_modeset.o \
+ kernel/drivers/staging/imgtec/tc/drm_pdp_crtc.o \
+ kernel/drivers/staging/imgtec/tc/drm_pdp_dvi.o \
+ kernel/drivers/staging/imgtec/tc/drm_pdp_tmds.o
+
+plato-y += \
+ kernel/drivers/staging/imgtec/plato/plato_drv.o \
+ kernel/drivers/staging/imgtec/plato/plato_init.o
+
+drm_rk-y += \
+  kernel/drivers/staging/imgtec/rk3368/drm_rk_drv.o \
+  kernel/drivers/staging/imgtec/pvr_sw_fence.o \
+  kernel/drivers/staging/imgtec/rk3368/drm_rk_gem.o \
+  kernel/drivers/staging/imgtec/rk3368/drm_rk_modeset.o \
+  kernel/drivers/staging/imgtec/rk3368/drm_rk_crtc.o \
+  kernel/drivers/staging/imgtec/rk3368/drm_rk_hdmi.o \
+  kernel/drivers/staging/imgtec/rk3368/drm_rk_encoder.o
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/Linux.mk b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/Linux.mk
new file mode 100644
index 0000000..3365887
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/Linux.mk
@@ -0,0 +1,77 @@
+########################################################################### ###
+#@Copyright     Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License       Dual MIT/GPLv2
+# 
+# The contents of this file are subject to the MIT license as set out below.
+# 
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+# 
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+# 
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+# 
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+# 
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+# 
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+
+modules := adf_fbdev adf_pdp adf_tc5_pdp tc adf_sunxi drm_nulldisp drm_pdp plato drm_rk
+
+adf_fbdev_type := kernel_module
+adf_fbdev_target := adf_fbdev.ko
+adf_fbdev_makefile := $(THIS_DIR)/Kbuild.mk
+
+adf_pdp_type := kernel_module
+adf_pdp_target := adf_pdp.ko
+adf_pdp_makefile := $(THIS_DIR)/Kbuild.mk
+
+adf_tc5_pdp_type := kernel_module
+adf_tc5_pdp_target := adf_tc5_pdp.ko
+adf_tc5_pdp_makefile := $(THIS_DIR)/Kbuild.mk
+
+tc_type := kernel_module
+tc_target := tc.ko
+tc_makefile := $(THIS_DIR)/Kbuild.mk
+
+adf_sunxi_type := kernel_module
+adf_sunxi_target := adf_sunxi.ko
+adf_sunxi_makefile := $(THIS_DIR)/Kbuild.mk
+
+drm_nulldisp_type := kernel_module
+drm_nulldisp_target := drm_nulldisp.ko
+drm_nulldisp_makefile := $(THIS_DIR)/Kbuild.mk
+
+drm_pdp_type := kernel_module
+drm_pdp_target := drm_pdp.ko
+drm_pdp_makefile := $(THIS_DIR)/Kbuild.mk
+
+plato_type := kernel_module
+plato_target := plato.ko
+plato_makefile := $(THIS_DIR)/Kbuild.mk
+
+drm_rk_type := kernel_module
+drm_rk_target := drm_rk.ko
+drm_rk_makefile := $(THIS_DIR)/Kbuild.mk
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_netlink_gem.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_netlink_gem.c
new file mode 100644
index 0000000..41884b4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_netlink_gem.c
@@ -0,0 +1,126 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "drm_netlink_gem.h"
+
+#include <linux/capability.h>
+#include "kernel_compatibility.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+int netlink_gem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv = file->private_data;
+	struct drm_device *dev = file_priv->minor->dev;
+	struct drm_vma_offset_node *node;
+	struct drm_gem_object *obj = NULL;
+	int err;
+
+	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+					   vma->vm_pgoff,
+					   vma_pages(vma));
+	if (node) {
+		obj = container_of(node, struct drm_gem_object, vma_node);
+
+		/* Don't mmap an object that is being destroyed */
+		if (!kref_get_unless_zero(&obj->refcount))
+			obj = NULL;
+	}
+	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+
+	if (!obj)
+		return -EINVAL;
+
+	/* Allow Netlink clients to mmap any object for reading */
+	if (!capable(CAP_SYS_RAWIO) || (vma->vm_flags & VM_WRITE))
+	{
+		if (!drm_vma_node_is_allowed(node, file_priv)) {
+			err = -EACCES;
+			goto exit_unref_obj;
+		}
+	}
+
+	err = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
+
+exit_unref_obj:
+	drm_gem_object_unreference_unlocked(obj);
+	return err;
+}
+#else	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) */
+int netlink_gem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv = file->private_data;
+	struct drm_device *dev = file_priv->minor->dev;
+	struct drm_vma_offset_node *node;
+	struct drm_gem_object *obj;
+	int err;
+
+	mutex_lock(&dev->struct_mutex);
+
+	node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
+					   vma->vm_pgoff,
+					   vma_pages(vma));
+	if (!node) {
+		err = -EINVAL;
+		goto exit_unlock;
+	}
+
+	/* Allow Netlink clients to mmap any object for reading */
+	if (!capable(CAP_SYS_RAWIO) || (vma->vm_flags & VM_WRITE))
+	{
+		if (!drm_vma_node_is_allowed(node, file_priv)) {
+			err = -EACCES;
+			goto exit_unlock;
+		}
+	}
+
+	obj = container_of(node, struct drm_gem_object, vma_node);
+
+	err = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
+
+exit_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return err;
+}
+#endif	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_netlink_gem.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_netlink_gem.h
new file mode 100644
index 0000000..940f47b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_netlink_gem.h
@@ -0,0 +1,59 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__DRM_NETLINK_GEM_H__)
+#define __DRM_NETLINK_GEM_H__
+
+#include <linux/version.h>
+
+#include <drm/drmP.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+#include <drm/drm_gem.h>
+#endif
+
+int netlink_gem_mmap(struct file *file, struct vm_area_struct *vma);
+
+#endif	/* !defined(__DRM_NETLINK_GEM_H__) */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_nulldisp_drv.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_nulldisp_drv.c
new file mode 100644
index 0000000..1688b44
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_nulldisp_drv.c
@@ -0,0 +1,1984 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/jiffies.h>
+#include "pvr_linux_fence.h"
+#include "pvr_sw_fence.h"
+#include <linux/reservation.h>
+#include <linux/workqueue.h>
+#include <linux/dma-mapping.h>
+#include <linux/version.h>
+#include <linux/mutex.h>
+#include <linux/capability.h>
+#include <linux/completion.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include <pvr_drm_display_external.h>
+#include <pvrversion.h>
+
+#include "drm_nulldisp_drv.h"
+#if defined(LMA)
+#include "tc_drv.h"
+#include "drm_pdp_gem.h"
+#include "pdp_drm.h"
+#else
+#include "drm_nulldisp_gem.h"
+#endif
+#include "nulldisp_drm.h"
+#include "drm_netlink_gem.h"
+#include "drm_nulldisp_netlink.h"
+
+#include "kernel_compatibility.h"
+
+#define DRIVER_NAME "nulldisp"
+#define DRIVER_DESC "Imagination Technologies Null DRM Display Driver"
+#define DRIVER_DATE "20150612"
+
+#define NULLDISP_FB_WIDTH_MIN 0
+#define NULLDISP_FB_WIDTH_MAX 2048
+#define NULLDISP_FB_HEIGHT_MIN 0
+#define NULLDISP_FB_HEIGHT_MAX 2048
+
+#define NULLDISP_MAX_PLANES 3
+
+enum nulldisp_crtc_flip_status {
+	NULLDISP_CRTC_FLIP_STATUS_NONE = 0,
+	NULLDISP_CRTC_FLIP_STATUS_PENDING,
+	NULLDISP_CRTC_FLIP_STATUS_DONE,
+};
+
+struct nulldisp_flip_data {
+	struct dma_fence_cb base;
+	struct drm_crtc *crtc;
+	struct dma_fence *wait_fence;
+	struct dma_fence *complete_fence;
+};
+
+struct nulldisp_crtc {
+	struct drm_crtc base;
+	struct delayed_work vb_work;
+	struct work_struct flip_work;
+	struct delayed_work flip_to_work;
+	struct delayed_work copy_to_work;
+
+	struct completion flip_scheduled;
+	struct completion copy_done;
+
+	/* Reuse the drm_device event_lock to protect these */
+	atomic_t flip_status;
+	struct drm_pending_vblank_event *flip_event;
+	struct drm_framebuffer *old_fb;
+	struct nulldisp_flip_data *flip_data;
+	bool flip_async;
+};
+
+struct nulldisp_display_device {
+	struct drm_device *dev;
+
+	struct drm_property *mem_layout_prop;
+	struct drm_property *fbc_format_prop;
+
+	struct pvr_sw_fence_context *fctx;
+
+	struct workqueue_struct *workqueue;
+	struct nulldisp_crtc *nulldisp_crtc;
+	struct nlpvrdpy *nlpvrdpy;
+#if defined(LMA)
+	struct pdp_gem_private *pdp_gem_priv;
+#endif
+};
+
+struct nulldisp_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_gem_object *obj[NULLDISP_MAX_PLANES];
+};
+
+#define to_nulldisp_crtc(crtc) \
+	container_of(crtc, struct nulldisp_crtc, base)
+#define to_nulldisp_framebuffer(framebuffer) \
+	container_of(framebuffer, struct nulldisp_framebuffer, base)
+
+#if defined(LMA)
+#define	obj_to_resv(obj) pdp_gem_get_resv(obj)
+#else
+#define	obj_to_resv(obj) nulldisp_gem_get_resv(obj)
+#endif
+
+#define PARAM_STRING_LEN 128
+
+static const struct drm_prop_enum_list nulldisp_mem_layout_enum_list[] = {
+	{ FB_MEMLAYOUT_STRIDED,		"strided" },
+	{ FB_MEMLAYOUT_COMPRESSED,	"compressed" },
+	{ FB_MEMLAYOUT_BIF_PAGE_TILED,	"bif_page_tiled" }
+};
+
+static char param_mem_layout[PARAM_STRING_LEN + 1] = "strided";
+
+module_param_string(mem_layout, param_mem_layout, PARAM_STRING_LEN, S_IRUGO);
+MODULE_PARM_DESC(mem_layout,
+		 "Preferred memory layout (strided, compressed or bif_page_tiled)");
+
+static const struct drm_prop_enum_list nulldisp_fbc_format_enum_list[] = {
+	{ IMG_FB_COMPRESSION_NONE,			"none" },
+	{ IMG_FB_COMPRESSION_DIRECT_8x8,		"direct_8x8" },
+	{ IMG_FB_COMPRESSION_DIRECT_16x4,		"direct_16x4" },
+	{ IMG_FB_COMPRESSION_DIRECT_32x2,		"direct_32x2" },
+	{ IMG_FB_COMPRESSION_INDIRECT_8x8,		"indirect_8x8" },
+	{ IMG_FB_COMPRESSION_INDIRECT_16x4,		"indirect_16x4" },
+	{ IMG_FB_COMPRESSION_INDIRECT_4TILE_8x8,	"indirect_4tile_8x8" },
+	{ IMG_FB_COMPRESSION_INDIRECT_4TILE_16x4,	"indirect_4tile_16x4" }
+};
+
+static char param_fbc_format[PARAM_STRING_LEN + 1] = "none";
+
+module_param_string(fbc_format, param_fbc_format, PARAM_STRING_LEN, S_IRUGO);
+MODULE_PARM_DESC(fbc_format,
+		 "Specifies the preferred framebuffer compression format "
+		 "(none, direct_8x8, direct_16x4, direct_32x2, indirect_8x8, "
+		 "indirect_16x4, indirect_4tile_8x8 or indirect_4tile_16x4)");
+
+
+static const uint32_t nulldisp_modeset_formats[] = {
+	DRM_FORMAT_NV12,
+	DRM_FORMAT_NV21,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_YUV444,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_RGB565,
+};
+
+/******************************************************************************
+ * Linux compatibility functions
+ ******************************************************************************/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+static inline void nulldisp_drm_fb_set_format(struct drm_framebuffer *fb,
+					      u32 pixel_format)
+{
+	fb->format = drm_format_info(pixel_format);
+}
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+static inline void nulldisp_drm_fb_set_format(struct drm_framebuffer *fb,
+					      u32 pixel_format)
+{
+	const struct drm_format_info *format = drm_format_info(pixel_format);
+
+	fb->pixel_format = pixel_format;
+
+	fb->depth = format->depth;
+	fb->bits_per_pixel = format->depth ? (format->cpp[0] * 8) : 0;
+}
+#else
+static inline void nulldisp_drm_fb_set_format(struct drm_framebuffer *fb,
+					      u32 pixel_format)
+{
+	fb->pixel_format = pixel_format;
+
+	switch (pixel_format) {
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_YUYV:
+		/* Unused for YUV formats */
+		fb->depth = 0;
+		fb->bits_per_pixel = 0;
+		break;
+
+	default: /* RGB */
+		drm_fb_get_bpp_depth(pixel_format,
+				     &fb->depth,
+				     &fb->bits_per_pixel);
+	}
+}
+#endif
+
+/******************************************************************************
+ * CRTC functions
+ ******************************************************************************/
+
+static void nulldisp_crtc_helper_dpms(struct drm_crtc *crtc,
+				      int mode)
+{
+	/*
+	 * Change the power state of the display/pipe/port/etc. If the mode
+	 * passed in is unsupported, the provider must use the next lowest
+	 * power level.
+	 */
+}
+
+static void nulldisp_crtc_helper_prepare(struct drm_crtc *crtc)
+{
+	drm_crtc_vblank_off(crtc);
+
+	/*
+	 * Prepare the display/pipe/port/etc for a mode change e.g. put them
+	 * in a low power state/turn them off
+	 */
+}
+
+static void nulldisp_crtc_helper_commit(struct drm_crtc *crtc)
+{
+	/* Turn the display/pipe/port/etc back on */
+
+	drm_crtc_vblank_on(crtc);
+}
+
+static bool
+nulldisp_crtc_helper_mode_fixup(struct drm_crtc *crtc,
+				const struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	/*
+	 * Fix up mode so that it's compatible with the hardware. The results
+	 * should be stored in adjusted_mode (i.e. mode should be untouched).
+	 */
+	return true;
+}
+
+static int
+nulldisp_crtc_helper_mode_set_base_atomic(struct drm_crtc *crtc,
+					  struct drm_framebuffer *fb,
+					  int x, int y,
+					  enum mode_set_atomic atomic)
+{
+	/* Set the display base address or offset from the base address */
+	return 0;
+}
+
+static int nulldisp_crtc_helper_mode_set_base(struct drm_crtc *crtc,
+					      int x, int y,
+					      struct drm_framebuffer *old_fb)
+{
+	return nulldisp_crtc_helper_mode_set_base_atomic(crtc,
+							 crtc->primary->fb,
+							 x,
+							 y,
+							 0);
+}
+
+static int
+nulldisp_crtc_helper_mode_set(struct drm_crtc *crtc,
+			      struct drm_display_mode *mode,
+			      struct drm_display_mode *adjusted_mode,
+			      int x, int y,
+			      struct drm_framebuffer *old_fb)
+{
+	/* Setup the the new mode and/or framebuffer */
+	return nulldisp_crtc_helper_mode_set_base(crtc, x, y, old_fb);
+}
+
+static void nulldisp_crtc_helper_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static void nulldisp_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc);
+
+	DRM_DEBUG_DRIVER("[CRTC:%d]\n", crtc->base.id);
+
+	drm_crtc_cleanup(crtc);
+
+	BUG_ON(atomic_read(&nulldisp_crtc->flip_status) !=
+	       NULLDISP_CRTC_FLIP_STATUS_NONE);
+
+	kfree(nulldisp_crtc);
+}
+
+static int nulldisp_crtc_set_config(struct drm_mode_set *mode_set)
+{
+	return drm_crtc_helper_set_config(mode_set);
+}
+
+static void nulldisp_crtc_flip_complete(struct drm_crtc *crtc)
+{
+	struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc);
+	unsigned long flags;
+	struct dma_fence *fence;
+
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+	/* The flipping process has been completed so reset the flip status */
+	atomic_set(&nulldisp_crtc->flip_status, NULLDISP_CRTC_FLIP_STATUS_NONE);
+
+	fence = nulldisp_crtc->flip_data->complete_fence;
+
+	dma_fence_put(nulldisp_crtc->flip_data->wait_fence);
+	kfree(nulldisp_crtc->flip_data);
+	nulldisp_crtc->flip_data = NULL;
+
+	if (nulldisp_crtc->flip_event) {
+		drm_crtc_send_vblank_event(crtc, nulldisp_crtc->flip_event);
+		nulldisp_crtc->flip_event = NULL;
+	}
+
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+	WARN_ON(dma_fence_signal(fence));
+	dma_fence_put(fence);
+}
+
+static void nulldisp_crtc_flip_done(struct nulldisp_crtc *nulldisp_crtc)
+{
+	struct drm_crtc *crtc = &nulldisp_crtc->base;
+
+	struct drm_framebuffer *old_fb;
+
+	WARN_ON(atomic_read(&nulldisp_crtc->flip_status) !=
+		NULLDISP_CRTC_FLIP_STATUS_PENDING);
+
+	old_fb = nulldisp_crtc->old_fb;
+	nulldisp_crtc->old_fb = NULL;
+
+	(void) nulldisp_crtc_helper_mode_set_base(crtc, crtc->x, crtc->y,
+						  old_fb);
+
+	atomic_set(&nulldisp_crtc->flip_status, NULLDISP_CRTC_FLIP_STATUS_DONE);
+
+	if (nulldisp_crtc->flip_async)
+		nulldisp_crtc_flip_complete(crtc);
+}
+
+static inline unsigned long nulldisp_netlink_timeout(void)
+{
+	return msecs_to_jiffies(30000);
+}
+
+static bool nulldisp_set_flip_to(struct nulldisp_crtc *nulldisp_crtc)
+{
+	struct drm_crtc *crtc = &nulldisp_crtc->base;
+	struct nulldisp_display_device *nulldisp_dev = crtc->dev->dev_private;
+
+	/* Returns false if work already queued, else true */
+	return queue_delayed_work(nulldisp_dev->workqueue,
+				  &nulldisp_crtc->flip_to_work,
+				  nulldisp_netlink_timeout());
+}
+
+static bool nulldisp_set_copy_to(struct nulldisp_crtc *nulldisp_crtc)
+{
+	struct drm_crtc *crtc = &nulldisp_crtc->base;
+	struct nulldisp_display_device *nulldisp_dev = crtc->dev->dev_private;
+
+	/* Returns false if work already queued, else true */
+	return queue_delayed_work(nulldisp_dev->workqueue,
+				  &nulldisp_crtc->copy_to_work,
+				  nulldisp_netlink_timeout());
+}
+
+static void nulldisp_flip_to_work(struct work_struct *w)
+{
+	struct delayed_work *dw =
+		container_of(w, struct delayed_work, work);
+	struct nulldisp_crtc *nulldisp_crtc =
+		container_of(dw, struct nulldisp_crtc, flip_to_work);
+
+	if (atomic_read(&nulldisp_crtc->flip_status) ==
+	    NULLDISP_CRTC_FLIP_STATUS_PENDING)
+		nulldisp_crtc_flip_done(nulldisp_crtc);
+}
+
+static void nulldisp_copy_to_work(struct work_struct *w)
+{
+	struct delayed_work *dw =
+		container_of(w, struct delayed_work, work);
+	struct nulldisp_crtc *nulldisp_crtc =
+		container_of(dw, struct nulldisp_crtc, copy_to_work);
+
+	complete(&nulldisp_crtc->copy_done);
+}
+
+static int nulldisp_object_property_get_value_u32(struct drm_mode_object *obj,
+						  struct drm_property *property,
+						  u32 *val32)
+{
+	u64 val64;
+	int err;
+
+	if (!obj)
+		return -EINVAL;
+
+	err = drm_object_property_get_value(obj, property, &val64);
+	if (!err)
+		*val32 = (u32) val64;
+
+	return err;
+}
+
+static void nulldisp_get_crtc_properties(struct drm_crtc *crtc,
+					 u32 *layout, u32 *fbc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct nulldisp_display_device *nulldisp_dev = dev->dev_private;
+
+	if (nulldisp_object_property_get_value_u32(&crtc->base,
+						   nulldisp_dev->mem_layout_prop,
+						   layout)) {
+		*layout = FB_MEMLAYOUT_STRIDED;
+	}
+
+	if (nulldisp_object_property_get_value_u32(&crtc->base,
+						   nulldisp_dev->fbc_format_prop,
+						   fbc)) {
+		*fbc = IMG_FB_COMPRESSION_NONE;
+	}
+}
+
+static void nulldisp_flip_work(struct work_struct *w)
+{
+	struct nulldisp_crtc *nulldisp_crtc =
+		container_of(w, struct nulldisp_crtc, flip_work);
+	struct drm_crtc *crtc = &nulldisp_crtc->base;
+	struct drm_device *dev = crtc->dev;
+	struct nulldisp_display_device *nulldisp_dev = dev->dev_private;
+	struct nulldisp_framebuffer *nulldisp_fb =
+		to_nulldisp_framebuffer(crtc->primary->fb);
+	u32 layout, fbc;
+	u64 addr[NULLDISP_MAX_PLANES],
+	    size[NULLDISP_MAX_PLANES];
+	int i;
+
+	nulldisp_get_crtc_properties(crtc, &layout, &fbc);
+
+	/*
+	 * To prevent races with disconnect requests from user space,
+	 * set the timeout before sending the flip request.
+	 */
+	for (i = 0; i < nulldisp_drm_fb_num_planes(crtc->primary->fb); i++) {
+		struct drm_gem_object *obj = nulldisp_fb->obj[i];
+
+		if (drm_gem_create_mmap_offset(obj)) {
+			DRM_ERROR("Failed to get mmap offset for buffer[%d] = %p\n", i, obj);
+			goto fail_cancel;
+		}
+
+		addr[i] = drm_vma_node_offset_addr(&obj->vma_node);
+		size[i] = obj->size;
+	}
+
+	nulldisp_set_flip_to(nulldisp_crtc);
+
+	if (nlpvrdpy_send_flip(nulldisp_dev->nlpvrdpy,
+			       &nulldisp_fb->base,
+			       &addr[0],
+			       &size[0],
+			       layout,
+			       fbc))
+		goto fail_cancel;
+
+	return;
+
+fail_cancel:
+	/*
+	 * We can't flush the work, as we are running on the same
+	 * single threaded workqueue as the work to be flushed.
+	 */
+	cancel_delayed_work(&nulldisp_crtc->flip_to_work);
+
+	nulldisp_crtc_flip_done(nulldisp_crtc);
+}
+
+static void nulldisp_crtc_flip_cb(struct dma_fence *fence,
+				  struct dma_fence_cb *cb)
+{
+	struct nulldisp_flip_data *flip_data =
+		container_of(cb, struct nulldisp_flip_data, base);
+	struct drm_crtc *crtc = flip_data->crtc;
+	struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct nulldisp_display_device *nulldisp_dev = dev->dev_private;
+
+	(void) queue_work(nulldisp_dev->workqueue,
+			  &nulldisp_crtc->flip_work);
+
+	complete_all(&nulldisp_crtc->flip_scheduled);
+}
+
+static void nulldisp_crtc_flip_schedule_cb(struct dma_fence *fence,
+					   struct dma_fence_cb *cb)
+{
+	struct nulldisp_flip_data *flip_data =
+		container_of(cb, struct nulldisp_flip_data, base);
+	int err = 0;
+
+	if (flip_data->wait_fence)
+		err = dma_fence_add_callback(flip_data->wait_fence,
+					     &flip_data->base,
+					     nulldisp_crtc_flip_cb);
+
+	if (!flip_data->wait_fence || err) {
+		if (err && err != -ENOENT)
+			DRM_ERROR("flip failed to wait on old buffer\n");
+		nulldisp_crtc_flip_cb(flip_data->wait_fence, &flip_data->base);
+	}
+}
+
+static int nulldisp_crtc_flip_schedule(struct drm_crtc *crtc,
+				       struct drm_gem_object *obj,
+				       struct drm_gem_object *old_obj)
+{
+	struct nulldisp_display_device *nulldisp_dev = crtc->dev->dev_private;
+	struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc);
+	struct reservation_object *resv = obj_to_resv(obj);
+	struct reservation_object *old_resv = obj_to_resv(old_obj);
+	struct nulldisp_flip_data *flip_data;
+	struct dma_fence *fence;
+	int err;
+
+	flip_data = kmalloc(sizeof(*flip_data), GFP_KERNEL);
+	if (!flip_data)
+		return -ENOMEM;
+
+	flip_data->crtc = crtc;
+
+	flip_data->complete_fence = pvr_sw_fence_create(nulldisp_dev->fctx);
+	if (!flip_data->complete_fence) {
+		err = -ENOMEM;
+		goto err_free_fence_data;
+	}
+
+	ww_mutex_lock(&old_resv->lock, NULL);
+	err = reservation_object_reserve_shared(old_resv);
+	if (err) {
+		ww_mutex_unlock(&old_resv->lock);
+		goto err_complete_fence_put;
+	}
+
+	reservation_object_add_shared_fence(old_resv,
+					    flip_data->complete_fence);
+
+	flip_data->wait_fence =
+		dma_fence_get(reservation_object_get_excl(old_resv));
+
+	if (old_resv != resv) {
+		ww_mutex_unlock(&old_resv->lock);
+		ww_mutex_lock(&resv->lock, NULL);
+	}
+
+	fence = dma_fence_get(reservation_object_get_excl(resv));
+	ww_mutex_unlock(&resv->lock);
+
+	nulldisp_crtc->flip_data = flip_data;
+	reinit_completion(&nulldisp_crtc->flip_scheduled);
+	atomic_set(&nulldisp_crtc->flip_status,
+		   NULLDISP_CRTC_FLIP_STATUS_PENDING);
+
+	if (fence) {
+		err = dma_fence_add_callback(fence, &flip_data->base,
+					     nulldisp_crtc_flip_schedule_cb);
+		dma_fence_put(fence);
+		if (err && err != -ENOENT)
+			goto err_set_flip_status_none;
+	}
+
+	if (!fence || err == -ENOENT) {
+		nulldisp_crtc_flip_schedule_cb(fence, &flip_data->base);
+		err = 0;
+	}
+
+	return err;
+
+err_set_flip_status_none:
+	atomic_set(&nulldisp_crtc->flip_status, NULLDISP_CRTC_FLIP_STATUS_NONE);
+	dma_fence_put(flip_data->wait_fence);
+err_complete_fence_put:
+	dma_fence_put(flip_data->complete_fence);
+err_free_fence_data:
+	kfree(flip_data);
+	return err;
+}
+
+static int nulldisp_crtc_page_flip(struct drm_crtc *crtc,
+				   struct drm_framebuffer *fb,
+				   struct drm_pending_vblank_event *event,
+				   uint32_t page_flip_flags)
+{
+	struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc);
+	struct nulldisp_framebuffer *nulldisp_fb = to_nulldisp_framebuffer(fb);
+	struct nulldisp_framebuffer *nulldisp_old_fb =
+		to_nulldisp_framebuffer(crtc->primary->fb);
+	enum nulldisp_crtc_flip_status status;
+	unsigned long flags;
+	int err;
+
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+	status = atomic_read(&nulldisp_crtc->flip_status);
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+	if (status != NULLDISP_CRTC_FLIP_STATUS_NONE)
+		return -EBUSY;
+
+	if (!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)) {
+		err = drm_crtc_vblank_get(crtc);
+		if (err)
+			return err;
+	}
+
+	nulldisp_crtc->old_fb = crtc->primary->fb;
+	nulldisp_crtc->flip_event = event;
+	nulldisp_crtc->flip_async = !!(page_flip_flags &
+				       DRM_MODE_PAGE_FLIP_ASYNC);
+
+	/* Set the crtc to point to the new framebuffer */
+	crtc->primary->fb = fb;
+
+	err = nulldisp_crtc_flip_schedule(crtc, nulldisp_fb->obj[0],
+					  nulldisp_old_fb->obj[0]);
+	if (err) {
+		crtc->primary->fb = nulldisp_crtc->old_fb;
+		nulldisp_crtc->old_fb = NULL;
+		nulldisp_crtc->flip_event = NULL;
+		nulldisp_crtc->flip_async = false;
+
+		DRM_ERROR("failed to schedule flip (err=%d)\n", err);
+		goto err_vblank_put;
+	}
+
+	return 0;
+
+err_vblank_put:
+	if (!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC))
+		drm_crtc_vblank_put(crtc);
+	return err;
+}
+
+static void nulldisp_crtc_helper_disable(struct drm_crtc *crtc)
+{
+	struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc);
+
+	if (atomic_read(&nulldisp_crtc->flip_status) ==
+	    NULLDISP_CRTC_FLIP_STATUS_PENDING)
+		wait_for_completion(&nulldisp_crtc->flip_scheduled);
+
+	/*
+	 * Flush any outstanding page flip related work. The order this
+	 * is done is important, to ensure there are no outstanding
+	 * page flips.
+	 */
+	flush_work(&nulldisp_crtc->flip_work);
+	flush_delayed_work(&nulldisp_crtc->flip_to_work);
+	flush_delayed_work(&nulldisp_crtc->vb_work);
+
+	drm_crtc_vblank_off(crtc);
+	flush_delayed_work(&nulldisp_crtc->vb_work);
+
+	/*
+	 * Vblank has been disabled, so the vblank handler shouldn't be
+	 * able to reschedule itself.
+	 */
+	BUG_ON(cancel_delayed_work(&nulldisp_crtc->vb_work));
+
+	BUG_ON(atomic_read(&nulldisp_crtc->flip_status) !=
+	       NULLDISP_CRTC_FLIP_STATUS_NONE);
+
+	/* Flush any remaining dirty FB work */
+	flush_delayed_work(&nulldisp_crtc->copy_to_work);
+}
+
+static const struct drm_crtc_helper_funcs nulldisp_crtc_helper_funcs = {
+	.dpms = nulldisp_crtc_helper_dpms,
+	.prepare = nulldisp_crtc_helper_prepare,
+	.commit = nulldisp_crtc_helper_commit,
+	.mode_fixup = nulldisp_crtc_helper_mode_fixup,
+	.mode_set = nulldisp_crtc_helper_mode_set,
+	.mode_set_base = nulldisp_crtc_helper_mode_set_base,
+	.load_lut = nulldisp_crtc_helper_load_lut,
+	.mode_set_base_atomic = nulldisp_crtc_helper_mode_set_base_atomic,
+	.disable = nulldisp_crtc_helper_disable,
+};
+
+static const struct drm_crtc_funcs nulldisp_crtc_funcs = {
+	.reset = NULL,
+	.cursor_set = NULL,
+	.cursor_move = NULL,
+	.gamma_set = NULL,
+	.destroy = nulldisp_crtc_destroy,
+	.set_config = nulldisp_crtc_set_config,
+	.page_flip = nulldisp_crtc_page_flip,
+};
+
+static bool nulldisp_queue_vblank_work(struct nulldisp_crtc *nulldisp_crtc)
+{
+	struct drm_crtc *crtc = &nulldisp_crtc->base;
+	struct nulldisp_display_device *nulldisp_dev = crtc->dev->dev_private;
+	int vrefresh;
+	const int vrefresh_default = 60;
+
+	if (crtc->hwmode.vrefresh) {
+		vrefresh = crtc->hwmode.vrefresh;
+	} else {
+		vrefresh = vrefresh_default;
+		DRM_ERROR("vertical refresh rate is zero, defaulting to %d\n",
+			  vrefresh);
+	}
+
+	/* Returns false if work already queued, else true */
+	return queue_delayed_work(nulldisp_dev->workqueue,
+				  &nulldisp_crtc->vb_work,
+				  usecs_to_jiffies(1000000/vrefresh));
+}
+
+static void nulldisp_handle_vblank(struct work_struct *w)
+{
+	struct delayed_work *dw =
+		container_of(w, struct delayed_work, work);
+	struct nulldisp_crtc *nulldisp_crtc =
+		container_of(dw, struct nulldisp_crtc, vb_work);
+	struct drm_crtc *crtc = &nulldisp_crtc->base;
+	struct drm_device *dev = crtc->dev;
+	enum nulldisp_crtc_flip_status status;
+
+	/*
+	 * Reschedule the handler, if necessary. This is done before
+	 * calling drm_crtc_vblank_put, so that the work can be cancelled
+	 * if vblank events are disabled.
+	 */
+	if (drm_handle_vblank(dev, 0))
+		(void) nulldisp_queue_vblank_work(nulldisp_crtc);
+
+	status = atomic_read(&nulldisp_crtc->flip_status);
+	if (status == NULLDISP_CRTC_FLIP_STATUS_DONE) {
+		if (!nulldisp_crtc->flip_async)
+			nulldisp_crtc_flip_complete(crtc);
+		drm_crtc_vblank_put(crtc);
+	}
+
+}
+
+static struct nulldisp_crtc *
+nulldisp_crtc_create(struct nulldisp_display_device *nulldisp_dev)
+{
+	struct nulldisp_crtc *nulldisp_crtc;
+	struct drm_crtc *crtc;
+	struct drm_plane *primary;
+
+	nulldisp_crtc = kzalloc(sizeof(*nulldisp_crtc), GFP_KERNEL);
+	if (!nulldisp_crtc)
+		goto err_return;
+
+	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+	if (!primary)
+		goto err_free_crtc;
+
+	crtc = &nulldisp_crtc->base;
+
+	atomic_set(&nulldisp_crtc->flip_status, NULLDISP_CRTC_FLIP_STATUS_NONE);
+	init_completion(&nulldisp_crtc->flip_scheduled);
+	init_completion(&nulldisp_crtc->copy_done);
+
+	if (drm_universal_plane_init(nulldisp_dev->dev, primary, 0,
+				     &drm_primary_helper_funcs,
+				     nulldisp_modeset_formats,
+				     ARRAY_SIZE(nulldisp_modeset_formats),
+				     DRM_PLANE_TYPE_PRIMARY, NULL)) {
+		goto err_free_primary;
+	}
+
+	if (drm_crtc_init_with_planes(nulldisp_dev->dev, crtc, primary,
+				      NULL, &nulldisp_crtc_funcs, NULL)) {
+		goto err_cleanup_plane;
+	}
+
+	drm_crtc_helper_add(crtc, &nulldisp_crtc_helper_funcs);
+
+	if (nulldisp_dev->mem_layout_prop) {
+		int value = FB_MEMLAYOUT_STRIDED;
+		int i;
+
+		for (i = 0;
+		     i < ARRAY_SIZE(nulldisp_mem_layout_enum_list);
+		     i++) {
+			if (strncmp(nulldisp_mem_layout_enum_list[i].name,
+				    param_mem_layout,
+				    PARAM_STRING_LEN) == 0) {
+				DRM_INFO("set default mem_layout to '%s'\n",
+					 param_mem_layout);
+				value = nulldisp_mem_layout_enum_list[i].type;
+				break;
+			}
+		}
+
+		if (i == ARRAY_SIZE(nulldisp_mem_layout_enum_list))
+			DRM_INFO("mem_layout unrecognised value '%s'\n",
+				 param_mem_layout);
+
+		drm_object_attach_property(&crtc->base,
+					   nulldisp_dev->mem_layout_prop,
+					   value);
+	}
+
+	if (nulldisp_dev->fbc_format_prop) {
+		int value = IMG_FB_COMPRESSION_NONE;
+		int i;
+
+		for (i = 0;
+		     i < ARRAY_SIZE(nulldisp_fbc_format_enum_list);
+		     i++) {
+			if (strncmp(nulldisp_fbc_format_enum_list[i].name,
+				    param_fbc_format,
+				    PARAM_STRING_LEN) == 0) {
+				DRM_INFO("set default fbc_format to '%s'\n",
+					 param_fbc_format);
+				value = nulldisp_fbc_format_enum_list[i].type;
+				break;
+			}
+		}
+
+		if (i == ARRAY_SIZE(nulldisp_fbc_format_enum_list))
+			DRM_INFO("fbc_format unrecognised value '%s'\n",
+				 param_fbc_format);
+
+		drm_object_attach_property(&crtc->base,
+					   nulldisp_dev->fbc_format_prop,
+					   value);
+	}
+
+	INIT_DELAYED_WORK(&nulldisp_crtc->vb_work, nulldisp_handle_vblank);
+	INIT_WORK(&nulldisp_crtc->flip_work, nulldisp_flip_work);
+	INIT_DELAYED_WORK(&nulldisp_crtc->flip_to_work, nulldisp_flip_to_work);
+	INIT_DELAYED_WORK(&nulldisp_crtc->copy_to_work, nulldisp_copy_to_work);
+
+	DRM_DEBUG_DRIVER("[CRTC:%d]\n", crtc->base.id);
+
+	return nulldisp_crtc;
+
+err_cleanup_plane:
+	drm_plane_cleanup(primary);
+err_free_primary:
+	kfree(primary);
+err_free_crtc:
+	kfree(nulldisp_crtc);
+err_return:
+	return NULL;
+}
+
+
+/******************************************************************************
+ * Connector functions
+ ******************************************************************************/
+
+static int
+nulldisp_connector_helper_get_modes(struct drm_connector *connector)
+{
+	/*
+	 * Gather modes. Here we can get the EDID data from the monitor and
+	 * turn it into drm_display_mode structures.
+	 */
+	return 0;
+}
+
+static int
+nulldisp_connector_helper_mode_valid(struct drm_connector *connector,
+				     struct drm_display_mode *mode)
+{
+	/*
+	 * This function is called on each gathered mode (e.g. via EDID)
+	 * and gives the driver a chance to reject it if the hardware
+	 * cannot support it.
+	 */
+	return MODE_OK;
+}
+
+static struct drm_encoder *
+nulldisp_connector_helper_best_encoder(struct drm_connector *connector)
+{
+	/* Pick the first encoder we find */
+	if (connector->encoder_ids[0] != 0) {
+		struct drm_mode_object *mode_object;
+
+		mode_object = drm_mode_object_find(connector->dev,
+						   connector->encoder_ids[0],
+						   DRM_MODE_OBJECT_ENCODER);
+		if (mode_object) {
+			struct drm_encoder *encoder =
+				obj_to_encoder(mode_object);
+
+			DRM_DEBUG_DRIVER("[ENCODER:%d:%s] best for "
+					 "[CONNECTOR:%d:%s]\n",
+					 encoder->base.id,
+					 encoder->name,
+					 connector->base.id,
+					 connector->name);
+			return encoder;
+		}
+	}
+
+	return NULL;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+static enum drm_connector_status
+nulldisp_connector_detect(struct drm_connector *connector,
+			  bool force)
+{
+	/* Return whether or not a monitor is attached to the connector */
+	return connector_status_connected;
+}
+#endif
+
+static void nulldisp_connector_destroy(struct drm_connector *connector)
+{
+	DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s]\n",
+			 connector->base.id,
+			 connector->name);
+
+	drm_connector_unregister(connector);
+
+	drm_mode_connector_update_edid_property(connector, NULL);
+	drm_connector_cleanup(connector);
+
+	kfree(connector);
+}
+
+static void nulldisp_connector_force(struct drm_connector *connector)
+{
+}
+
+static const struct drm_connector_helper_funcs
+nulldisp_connector_helper_funcs = {
+	.get_modes = nulldisp_connector_helper_get_modes,
+	.mode_valid = nulldisp_connector_helper_mode_valid,
+	.best_encoder = nulldisp_connector_helper_best_encoder,
+};
+
+static const struct drm_connector_funcs nulldisp_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.reset = NULL,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	.detect = nulldisp_connector_detect,
+#endif
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = nulldisp_connector_destroy,
+	.force = nulldisp_connector_force,
+};
+
+static struct drm_connector *
+nulldisp_connector_create(struct nulldisp_display_device *nulldisp_dev,
+			  int type)
+{
+	struct drm_connector *connector;
+
+	connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+	if (!connector)
+		return NULL;
+
+	drm_connector_init(nulldisp_dev->dev,
+			   connector,
+			   &nulldisp_connector_funcs,
+			   type);
+	drm_connector_helper_add(connector, &nulldisp_connector_helper_funcs);
+
+	connector->dpms = DRM_MODE_DPMS_OFF;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+	connector->display_info.subpixel_order = SubPixelUnknown;
+
+	DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s]\n",
+			 connector->base.id,
+			 connector->name);
+
+	return connector;
+}
+
+
+/******************************************************************************
+ * Encoder functions
+ ******************************************************************************/
+
+static void nulldisp_encoder_helper_dpms(struct drm_encoder *encoder,
+					 int mode)
+{
+	/*
+	 * Set the display power state or active encoder based on the mode. If
+	 * the mode passed in is unsupported, the provider must use the next
+	 * lowest power level.
+	 */
+}
+
+static bool
+nulldisp_encoder_helper_mode_fixup(struct drm_encoder *encoder,
+				   const struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	/*
+	 * Fix up mode so that it's compatible with the hardware. The results
+	 * should be stored in adjusted_mode (i.e. mode should be untouched).
+	 */
+	return true;
+}
+
+static void nulldisp_encoder_helper_prepare(struct drm_encoder *encoder)
+{
+	/*
+	 * Prepare the encoder for a mode change e.g. set the active encoder
+	 * accordingly/turn the encoder off
+	 */
+}
+
+static void nulldisp_encoder_helper_commit(struct drm_encoder *encoder)
+{
+	/* Turn the encoder back on/set the active encoder */
+}
+
+static void
+nulldisp_encoder_helper_mode_set(struct drm_encoder *encoder,
+				 struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode)
+{
+	/* Setup the encoder for the new mode */
+}
+
+static void nulldisp_encoder_destroy(struct drm_encoder *encoder)
+{
+	DRM_DEBUG_DRIVER("[ENCODER:%d:%s]\n", encoder->base.id, encoder->name);
+
+	drm_encoder_cleanup(encoder);
+	kfree(encoder);
+}
+
+static const struct drm_encoder_helper_funcs nulldisp_encoder_helper_funcs = {
+	.dpms = nulldisp_encoder_helper_dpms,
+	.mode_fixup = nulldisp_encoder_helper_mode_fixup,
+	.prepare = nulldisp_encoder_helper_prepare,
+	.commit = nulldisp_encoder_helper_commit,
+	.mode_set = nulldisp_encoder_helper_mode_set,
+	.get_crtc = NULL,
+	.detect = NULL,
+	.disable = NULL,
+};
+
+static const struct drm_encoder_funcs nulldisp_encoder_funcs = {
+	.reset = NULL,
+	.destroy = nulldisp_encoder_destroy,
+};
+
+static struct drm_encoder *
+nulldisp_encoder_create(struct nulldisp_display_device *nulldisp_dev,
+			int type)
+{
+	struct drm_encoder *encoder;
+	int err;
+
+	encoder = kzalloc(sizeof(*encoder), GFP_KERNEL);
+	if (!encoder)
+		return ERR_PTR(-ENOMEM);
+
+	err = drm_encoder_init(nulldisp_dev->dev,
+			       encoder,
+			       &nulldisp_encoder_funcs,
+			       type,
+			       NULL);
+	if (err) {
+		DRM_ERROR("Failed to initialise encoder\n");
+		return ERR_PTR(err);
+	}
+	drm_encoder_helper_add(encoder, &nulldisp_encoder_helper_funcs);
+
+	/*
+	 * This is a bit field that's used to determine which
+	 * CRTCs can drive this encoder.
+	 */
+	encoder->possible_crtcs = 0x1;
+
+	DRM_DEBUG_DRIVER("[ENCODER:%d:%s]\n", encoder->base.id, encoder->name);
+
+	return encoder;
+}
+
+
+/******************************************************************************
+ * Framebuffer functions
+ ******************************************************************************/
+
+static void nulldisp_framebuffer_destroy(struct drm_framebuffer *framebuffer)
+{
+	struct nulldisp_framebuffer *nulldisp_framebuffer =
+		to_nulldisp_framebuffer(framebuffer);
+	int i;
+
+	DRM_DEBUG_DRIVER("[FB:%d]\n", framebuffer->base.id);
+
+	drm_framebuffer_cleanup(framebuffer);
+
+	for (i = 0; i < nulldisp_drm_fb_num_planes(framebuffer); i++)
+		drm_gem_object_unreference_unlocked(nulldisp_framebuffer->obj[i]);
+
+	kfree(nulldisp_framebuffer);
+}
+
+static int
+nulldisp_framebuffer_create_handle(struct drm_framebuffer *framebuffer,
+				   struct drm_file *file_priv,
+				   unsigned int *handle)
+{
+	struct nulldisp_framebuffer *nulldisp_framebuffer =
+		to_nulldisp_framebuffer(framebuffer);
+
+	DRM_DEBUG_DRIVER("[FB:%d]\n", framebuffer->base.id);
+
+	return drm_gem_handle_create(file_priv,
+				     nulldisp_framebuffer->obj[0],
+				     handle);
+}
+
+static int
+nulldisp_framebuffer_dirty(struct drm_framebuffer *framebuffer,
+			   struct drm_file *file_priv,
+			   unsigned flags,
+			   unsigned color,
+			   struct drm_clip_rect *clips,
+			   unsigned num_clips)
+{
+	struct nulldisp_framebuffer *nulldisp_fb =
+		to_nulldisp_framebuffer(framebuffer);
+	struct nulldisp_display_device *nulldisp_dev =
+		framebuffer->dev->dev_private;
+	struct nulldisp_crtc *nulldisp_crtc = nulldisp_dev->nulldisp_crtc;
+	u32 layout, fbc;
+	u64 addr[NULLDISP_MAX_PLANES],
+	    size[NULLDISP_MAX_PLANES];
+	int i;
+
+	nulldisp_get_crtc_properties(&nulldisp_crtc->base, &layout, &fbc);
+
+	/*
+	 * To prevent races with disconnect requests from user space,
+	 * set the timeout before sending the copy request.
+	 */
+	for (i = 0; i < nulldisp_drm_fb_num_planes(framebuffer); i++) {
+		struct drm_gem_object *obj = nulldisp_fb->obj[i];
+
+		if (drm_gem_create_mmap_offset(obj)) {
+			DRM_ERROR("Failed to get mmap offset for buffer[%d] = %p\n", i, obj);
+			goto fail_flush;
+		}
+
+		addr[i] = drm_vma_node_offset_addr(&obj->vma_node);
+		size[i] = obj->size;
+	}
+
+	nulldisp_set_copy_to(nulldisp_crtc);
+
+	if (nlpvrdpy_send_copy(nulldisp_dev->nlpvrdpy,
+			       &nulldisp_fb->base,
+			       &addr[0],
+			       &size[0],
+			       layout,
+			       fbc))
+		goto fail_flush;
+
+	wait_for_completion(&nulldisp_crtc->copy_done);
+
+	return 0;
+
+fail_flush:
+	flush_delayed_work(&nulldisp_crtc->copy_to_work);
+
+	wait_for_completion(&nulldisp_crtc->copy_done);
+
+	return 0;
+
+}
+
+static const struct drm_framebuffer_funcs nulldisp_framebuffer_funcs = {
+	.destroy = nulldisp_framebuffer_destroy,
+	.create_handle = nulldisp_framebuffer_create_handle,
+	.dirty = nulldisp_framebuffer_dirty,
+};
+
+static int
+nulldisp_framebuffer_init(struct drm_device *dev,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || \
+	(defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)))
+			  const
+#endif
+			  struct drm_mode_fb_cmd2 *mode_cmd,
+			  struct nulldisp_framebuffer *nulldisp_framebuffer,
+			  struct drm_gem_object **obj)
+{
+	struct drm_framebuffer *fb = &nulldisp_framebuffer->base;
+	int err;
+	int i;
+
+	fb->dev = dev;
+
+	nulldisp_drm_fb_set_format(fb, mode_cmd->pixel_format);
+
+	fb->width        = mode_cmd->width;
+	fb->height       = mode_cmd->height;
+	fb->flags        = mode_cmd->flags;
+
+	for (i = 0; i < nulldisp_drm_fb_num_planes(fb); i++) {
+		fb->pitches[i]  = mode_cmd->pitches[i];
+		fb->offsets[i]  = mode_cmd->offsets[i];
+
+		nulldisp_framebuffer->obj[i] = obj[i];
+	}
+
+	err = drm_framebuffer_init(dev, fb, &nulldisp_framebuffer_funcs);
+	if (err) {
+		DRM_ERROR("failed to initialise framebuffer structure (%d)\n",
+			  err);
+		return err;
+	}
+
+	DRM_DEBUG_DRIVER("[FB:%d]\n", fb->base.id);
+
+	return 0;
+}
+
+static struct drm_framebuffer *
+nulldisp_fb_create(struct drm_device *dev,
+		   struct drm_file *file_priv,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || \
+	(defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)))
+		   const
+#endif
+		   struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_gem_object *obj[NULLDISP_MAX_PLANES];
+	struct nulldisp_framebuffer *nulldisp_framebuffer;
+	int err;
+	int i;
+
+	nulldisp_framebuffer = kzalloc(sizeof(*nulldisp_framebuffer),
+				       GFP_KERNEL);
+	if (!nulldisp_framebuffer) {
+		err = -ENOMEM;
+		goto fail_exit;
+	}
+
+	for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
+		obj[i] = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
+		if (!obj[i]) {
+			DRM_ERROR("failed to find buffer with handle %u\n",
+				  mode_cmd->handles[i]);
+			err = -ENOENT;
+			goto fail_unreference;
+		}
+	}
+
+	err = nulldisp_framebuffer_init(dev,
+					mode_cmd,
+					nulldisp_framebuffer,
+					obj);
+	if (err)
+		goto fail_unreference;
+
+	DRM_DEBUG_DRIVER("[FB:%d]\n", nulldisp_framebuffer->base.base.id);
+
+	return &nulldisp_framebuffer->base;
+
+fail_unreference:
+	kfree(nulldisp_framebuffer);
+
+	while (i--)
+		drm_gem_object_unreference_unlocked(obj[i]);
+
+fail_exit:
+	return ERR_PTR(err);
+}
+
+static const struct drm_mode_config_funcs nulldisp_mode_config_funcs = {
+	.fb_create = nulldisp_fb_create,
+	.output_poll_changed = NULL,
+};
+
+static int nulldisp_nl_flipped_cb(void *data)
+{
+	struct nulldisp_crtc *nulldisp_crtc = data;
+
+	flush_delayed_work(&nulldisp_crtc->flip_to_work);
+	flush_delayed_work(&nulldisp_crtc->vb_work);
+
+	return 0;
+}
+
+static int nulldisp_nl_copied_cb(void *data)
+{
+	struct nulldisp_crtc *nulldisp_crtc = data;
+
+	flush_delayed_work(&nulldisp_crtc->copy_to_work);
+
+	return 0;
+}
+
+static void nulldisp_nl_disconnect_cb(void *data)
+{
+	struct nulldisp_crtc *nulldisp_crtc = data;
+
+	flush_delayed_work(&nulldisp_crtc->flip_to_work);
+	flush_delayed_work(&nulldisp_crtc->copy_to_work);
+}
+
+static int nulldisp_load(struct drm_device *dev, unsigned long flags)
+{
+	struct nulldisp_display_device *nulldisp_dev;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	int err;
+
+	platform_set_drvdata(dev->platformdev, dev);
+
+	nulldisp_dev = kzalloc(sizeof(*nulldisp_dev), GFP_KERNEL);
+	if (!nulldisp_dev)
+		return -ENOMEM;
+
+	dev->dev_private = nulldisp_dev;
+	nulldisp_dev->dev = dev;
+
+	drm_mode_config_init(dev);
+
+	dev->mode_config.funcs = (void *)&nulldisp_mode_config_funcs;
+	dev->mode_config.min_width = NULLDISP_FB_WIDTH_MIN;
+	dev->mode_config.max_width = NULLDISP_FB_WIDTH_MAX;
+	dev->mode_config.min_height = NULLDISP_FB_HEIGHT_MIN;
+	dev->mode_config.max_height = NULLDISP_FB_HEIGHT_MAX;
+	dev->mode_config.fb_base = 0;
+	dev->mode_config.async_page_flip = true;
+
+	nulldisp_dev->mem_layout_prop =
+		drm_property_create_enum(dev,
+					 DRM_MODE_PROP_IMMUTABLE,
+					 "mem_layout",
+					 nulldisp_mem_layout_enum_list,
+					 ARRAY_SIZE(nulldisp_mem_layout_enum_list));
+	if (!nulldisp_dev->mem_layout_prop) {
+		DRM_ERROR("failed to create memory layout property.\n");
+		err = -ENOMEM;
+		goto err_config_cleanup;
+	}
+
+	nulldisp_dev->fbc_format_prop =
+		drm_property_create_enum(dev,
+					 DRM_MODE_PROP_IMMUTABLE,
+					 "fbc_format",
+					 nulldisp_fbc_format_enum_list,
+					 ARRAY_SIZE(nulldisp_fbc_format_enum_list));
+	if (!nulldisp_dev->fbc_format_prop) {
+		DRM_ERROR("failed to create FBC format property.\n");
+
+		err = -ENOMEM;
+		goto err_config_cleanup;
+	}
+
+	nulldisp_dev->nulldisp_crtc = nulldisp_crtc_create(nulldisp_dev);
+	if (!nulldisp_dev->nulldisp_crtc) {
+		DRM_ERROR("failed to create a CRTC.\n");
+
+		err = -ENOMEM;
+		goto err_config_cleanup;
+	}
+
+	connector = nulldisp_connector_create(nulldisp_dev,
+					      DRM_MODE_CONNECTOR_Unknown);
+	if (!connector) {
+		DRM_ERROR("failed to create a connector.\n");
+
+		err = -ENOMEM;
+		goto err_config_cleanup;
+	}
+
+	encoder = nulldisp_encoder_create(nulldisp_dev,
+					  DRM_MODE_ENCODER_NONE);
+	if (IS_ERR(encoder)) {
+		DRM_ERROR("failed to create an encoder.\n");
+
+		err = PTR_ERR(encoder);
+		goto err_config_cleanup;
+	}
+
+	err = drm_mode_connector_attach_encoder(connector, encoder);
+	if (err) {
+		DRM_ERROR("failed to attach [ENCODER:%d:%s] to "
+			  "[CONNECTOR:%d:%s] (err=%d)\n",
+			  encoder->base.id,
+			  encoder->name,
+			  connector->base.id,
+			  connector->name,
+			  err);
+		goto err_config_cleanup;
+	}
+
+	err = drm_connector_register(connector);
+	if (err) {
+		DRM_ERROR("[CONNECTOR:%d:%s] failed to register (err=%d)\n",
+			  connector->base.id,
+			  connector->name,
+			  err);
+		goto err_config_cleanup;
+	}
+
+#if defined(LMA)
+	nulldisp_dev->pdp_gem_priv = pdp_gem_init(dev);
+	if (!nulldisp_dev->pdp_gem_priv) {
+		err = -ENOMEM;
+		goto err_config_cleanup;
+	}
+#endif
+	nulldisp_dev->fctx =
+		pvr_sw_fence_context_create("nulldisp-nohw", "nulldisp");
+
+	if (!nulldisp_dev->fctx) {
+		err = -ENOMEM;
+		goto err_gem_cleanup;
+	}
+
+	nulldisp_dev->workqueue =
+		create_singlethread_workqueue(DRIVER_NAME);
+	if (!nulldisp_dev->workqueue) {
+		DRM_ERROR("failed to create work queue\n");
+		goto err_fence_context_cleanup;
+	}
+
+	err = drm_vblank_init(nulldisp_dev->dev, 1);
+	if (err) {
+		DRM_ERROR("failed to complete vblank init (err=%d)\n", err);
+		goto err_workqueue_cleanup;
+	}
+
+	dev->irq_enabled = true;
+
+	nulldisp_dev->nlpvrdpy = nlpvrdpy_create(dev,
+						 nulldisp_nl_disconnect_cb,
+						 nulldisp_dev->nulldisp_crtc,
+						 nulldisp_nl_flipped_cb,
+						 nulldisp_dev->nulldisp_crtc,
+						 nulldisp_nl_copied_cb,
+						 nulldisp_dev->nulldisp_crtc);
+	if (!nulldisp_dev->nlpvrdpy) {
+		DRM_ERROR("Netlink initialisation failed (err=%d)\n", err);
+		goto err_vblank_cleanup;
+	}
+
+	nlpvrdpy_start();
+
+	return 0;
+
+err_vblank_cleanup:
+	drm_vblank_cleanup(dev);
+err_workqueue_cleanup:
+	destroy_workqueue(nulldisp_dev->workqueue);
+	dev->irq_enabled = false;
+err_fence_context_cleanup:
+	pvr_sw_fence_context_destroy(nulldisp_dev->fctx);
+err_gem_cleanup:
+#if defined(LMA)
+	pdp_gem_cleanup(nulldisp_dev->pdp_gem_priv);
+#endif
+err_config_cleanup:
+	drm_mode_config_cleanup(dev);
+	kfree(nulldisp_dev);
+	return err;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+static int nulldisp_unload(struct drm_device *dev)
+#else
+static void nulldisp_unload(struct drm_device *dev)
+#endif
+{
+	struct nulldisp_display_device *nulldisp_dev = dev->dev_private;
+
+	nlpvrdpy_send_disconnect(nulldisp_dev->nlpvrdpy);
+	nlpvrdpy_destroy(nulldisp_dev->nlpvrdpy);
+
+	drm_vblank_cleanup(dev);
+
+	destroy_workqueue(nulldisp_dev->workqueue);
+
+	dev->irq_enabled = false;
+
+	pvr_sw_fence_context_destroy(nulldisp_dev->fctx);
+
+#if defined(LMA)
+	pdp_gem_cleanup(nulldisp_dev->pdp_gem_priv);
+#endif
+	drm_mode_config_cleanup(dev);
+
+	kfree(nulldisp_dev);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+	return 0;
+#endif
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+static void
+nulldisp_crtc_flip_event_cancel(struct drm_crtc *crtc, struct drm_file *file)
+{
+	struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc);
+	unsigned long flags;
+
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+	if (nulldisp_crtc->flip_event &&
+	    nulldisp_crtc->flip_event->base.file_priv == file) {
+		struct drm_pending_event *pending_event =
+			&nulldisp_crtc->flip_event->base;
+
+		pending_event->destroy(pending_event);
+		nulldisp_crtc->flip_event = NULL;
+	}
+
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+static void nulldisp_preclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		nulldisp_crtc_flip_event_cancel(crtc, file);
+}
+#endif
+
+static void nulldisp_lastclose(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+
+	drm_modeset_lock_all(dev);
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc->primary->fb) {
+			struct drm_mode_set mode_set = { .crtc = crtc };
+			int err;
+
+			err = drm_mode_set_config_internal(&mode_set);
+			if (err)
+				DRM_ERROR("failed to disable crtc %p (err=%d)\n",
+					  crtc, err);
+		}
+	}
+	drm_modeset_unlock_all(dev);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || \
+	(defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)))
+static int nulldisp_enable_vblank(struct drm_device *dev, unsigned int crtc)
+#else
+static int nulldisp_enable_vblank(struct drm_device *dev, int crtc)
+#endif
+{
+	struct nulldisp_display_device *nulldisp_dev = dev->dev_private;
+
+	switch (crtc) {
+	case 0:
+		break;
+	default:
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+		DRM_ERROR("invalid crtc %u\n", crtc);
+#else
+		DRM_ERROR("invalid crtc %d\n", crtc);
+#endif
+		return -EINVAL;
+	}
+
+	if (!nulldisp_queue_vblank_work(nulldisp_dev->nulldisp_crtc)) {
+		DRM_ERROR("work already queued\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || \
+	(defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)))
+static void nulldisp_disable_vblank(struct drm_device *dev, unsigned int crtc)
+#else
+static void nulldisp_disable_vblank(struct drm_device *dev, int crtc)
+#endif
+{
+	struct nulldisp_display_device *nulldisp_dev = dev->dev_private;
+
+	switch (crtc) {
+	case 0:
+		break;
+	default:
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+		DRM_ERROR("invalid crtc %u\n", crtc);
+#else
+		DRM_ERROR("invalid crtc %d\n", crtc);
+#endif
+		return;
+	}
+
+	/*
+	 * Vblank events may be disabled from within the vblank handler,
+	 * so don't wait for the work to complete.
+	 */
+	(void) cancel_delayed_work(&nulldisp_dev->nulldisp_crtc->vb_work);
+}
+
+static const struct vm_operations_struct nulldisp_gem_vm_ops = {
+#if defined(LMA)
+	.fault	= pdp_gem_object_vm_fault,
+	.open	= drm_gem_vm_open,
+	.close	= drm_gem_vm_close,
+#else
+	.fault	= nulldisp_gem_object_vm_fault,
+	.open	= nulldisp_gem_vm_open,
+	.close	= nulldisp_gem_vm_close,
+#endif
+};
+
+#if defined(LMA)
+static int pdp_gem_dumb_create(struct drm_file *file,
+			       struct drm_device *dev,
+			       struct drm_mode_create_dumb *args)
+{
+	struct nulldisp_display_device *nulldisp_dev = dev->dev_private;
+
+	return pdp_gem_dumb_create_priv(file,
+					dev,
+					nulldisp_dev->pdp_gem_priv,
+					args);
+}
+
+static int nulldisp_gem_object_create_ioctl(struct drm_device *dev,
+					    void *data,
+					    struct drm_file *file)
+{
+	struct drm_nulldisp_gem_create *args = data;
+	struct nulldisp_display_device *nulldisp_dev = dev->dev_private;	
+	struct drm_pdp_gem_create pdp_args;
+	int err;
+	
+	if (args->flags) {
+		DRM_ERROR("invalid flags: %#08x\n", args->flags);
+		return -EINVAL;
+	}
+
+	if (args->handle) {
+		DRM_ERROR("invalid handle (this should always be 0)\n");
+		return -EINVAL;
+	}
+
+	/* 
+	 * Remapping of nulldisp create args to pdp create args.
+	 *
+	 * Note: even though the nulldisp and pdp args are identical
+	 * in this case, they may potentially change in future.
+	 */
+	pdp_args.size = args->size;
+	pdp_args.flags = args->flags;
+	pdp_args.handle = args->handle;
+
+	err = pdp_gem_object_create_ioctl_priv(dev,
+					       nulldisp_dev->pdp_gem_priv,
+					       &pdp_args,
+					       file);
+	
+	if (!err)
+		args->handle = pdp_args.handle;
+
+	return err;
+}
+
+static int nulldisp_gem_object_mmap_ioctl(struct drm_device *dev,
+					  void *data,
+					  struct drm_file *file)
+{
+	struct drm_nulldisp_gem_mmap *args = data;
+	struct drm_pdp_gem_mmap pdp_args;
+	int err;
+
+	pdp_args.handle = args->handle;
+	pdp_args.pad = args->pad;
+	pdp_args.offset = args->offset;
+
+	err = pdp_gem_object_mmap_ioctl(dev, &pdp_args, file);
+
+	if (!err)
+		args->offset = pdp_args.offset;
+
+	return err;
+}
+
+static int nulldisp_gem_object_cpu_prep_ioctl(struct drm_device *dev,
+					      void *data,
+					      struct drm_file *file)
+{
+	struct drm_nulldisp_gem_cpu_prep *args =
+		(struct drm_nulldisp_gem_cpu_prep *)data;
+	struct drm_pdp_gem_cpu_prep pdp_args;
+
+	pdp_args.handle = args->handle;
+	pdp_args.flags = args->flags;
+
+	return pdp_gem_object_cpu_prep_ioctl(dev, &pdp_args, file);
+}
+
+static int nulldisp_gem_object_cpu_fini_ioctl(struct drm_device *dev,
+				       void *data,
+				       struct drm_file *file)
+{
+	struct drm_nulldisp_gem_cpu_fini *args =
+		(struct drm_nulldisp_gem_cpu_fini *)data;
+	struct drm_pdp_gem_cpu_fini pdp_args;
+
+	pdp_args.handle = args->handle;
+	pdp_args.pad = args->pad;
+
+	return pdp_gem_object_cpu_fini_ioctl(dev, &pdp_args, file);
+}
+
+static void pdp_gem_object_free(struct drm_gem_object *obj)
+{
+	struct nulldisp_display_device *nulldisp_dev = obj->dev->dev_private;
+
+	pdp_gem_object_free_priv(nulldisp_dev->pdp_gem_priv, obj);
+}
+#endif
+
+static const struct drm_ioctl_desc nulldisp_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(NULLDISP_GEM_CREATE, nulldisp_gem_object_create_ioctl, DRM_AUTH | DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(NULLDISP_GEM_MMAP, nulldisp_gem_object_mmap_ioctl, DRM_AUTH | DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(NULLDISP_GEM_CPU_PREP, nulldisp_gem_object_cpu_prep_ioctl, DRM_AUTH | DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(NULLDISP_GEM_CPU_FINI, nulldisp_gem_object_cpu_fini_ioctl, DRM_AUTH | DRM_UNLOCKED),
+};
+
+static int nulldisp_gem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	int err;
+
+	err = netlink_gem_mmap(file, vma);
+#if !defined(LMA)
+	if (!err) {
+		struct drm_file *file_priv = file->private_data;
+		struct drm_device *dev = file_priv->minor->dev;
+		struct drm_gem_object *obj;
+
+		mutex_lock(&dev->struct_mutex);
+		obj = vma->vm_private_data;
+		(void) nulldisp_gem_object_get_pages(obj);
+		mutex_unlock(&dev->struct_mutex);
+	}
+#endif
+	return err;
+}
+
+static const struct file_operations nulldisp_driver_fops = {
+	.owner		= THIS_MODULE,
+	.open		= drm_open,
+	.release	= drm_release,
+	.unlocked_ioctl	= drm_ioctl,
+	.mmap		= nulldisp_gem_mmap,
+	.poll		= drm_poll,
+	.read		= drm_read,
+	.llseek		= noop_llseek,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= drm_compat_ioctl,
+#endif
+};
+
+static struct drm_driver nulldisp_drm_driver = {
+	.load				= nulldisp_load,
+	.unload				= nulldisp_unload,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+	.preclose			= nulldisp_preclose,
+#endif
+	.lastclose			= nulldisp_lastclose,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) && \
+	(LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+	.set_busid			= drm_platform_set_busid,
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+	.get_vblank_counter		= drm_vblank_no_hw_counter,
+#else
+	.get_vblank_counter		= drm_vblank_count,
+#endif
+	.enable_vblank			= nulldisp_enable_vblank,
+	.disable_vblank			= nulldisp_disable_vblank,
+
+
+	.prime_handle_to_fd		= drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle		= drm_gem_prime_fd_to_handle,
+
+#if defined(LMA)
+	.gem_free_object		= pdp_gem_object_free,
+	.gem_prime_export		= pdp_gem_prime_export,
+	.gem_prime_import		= pdp_gem_prime_import,
+	.gem_prime_import_sg_table	= pdp_gem_prime_import_sg_table,
+
+	.dumb_create			= pdp_gem_dumb_create,
+	.dumb_map_offset		= pdp_gem_dumb_map_offset,
+#else
+	.gem_free_object		= nulldisp_gem_object_free,
+	.gem_prime_export		= nulldisp_gem_prime_export,
+	.gem_prime_import		= drm_gem_prime_import,
+	.gem_prime_pin			= nulldisp_gem_prime_pin,
+	.gem_prime_unpin		= nulldisp_gem_prime_unpin,
+	.gem_prime_get_sg_table		= nulldisp_gem_prime_get_sg_table,
+	.gem_prime_import_sg_table	= nulldisp_gem_prime_import_sg_table,
+	.gem_prime_vmap			= nulldisp_gem_prime_vmap,
+	.gem_prime_vunmap		= nulldisp_gem_prime_vunmap,
+	.gem_prime_mmap			= nulldisp_gem_prime_mmap,
+	.gem_prime_res_obj		= nulldisp_gem_prime_res_obj,
+
+	.dumb_create			= nulldisp_gem_dumb_create,
+	.dumb_map_offset		= nulldisp_gem_dumb_map_offset,
+#endif
+	.dumb_destroy			= drm_gem_dumb_destroy,
+
+	.gem_vm_ops			= &nulldisp_gem_vm_ops,
+
+	.name				= DRIVER_NAME,
+	.desc				= DRIVER_DESC,
+	.date				= DRIVER_DATE,
+	.major				= PVRVERSION_MAJ,
+	.minor				= PVRVERSION_MIN,
+	.patchlevel			= PVRVERSION_BUILD,
+
+	.driver_features		= DRIVER_GEM |
+					  DRIVER_MODESET |
+					  DRIVER_PRIME,
+	.ioctls				= nulldisp_ioctls,
+	.num_ioctls			= ARRAY_SIZE(nulldisp_ioctls),
+	.fops				= &nulldisp_driver_fops,
+};
+
+static int nulldisp_probe(struct platform_device *pdev)
+{
+	return drm_platform_init(&nulldisp_drm_driver, pdev);
+}
+
+static int nulldisp_remove(struct platform_device *pdev)
+{
+	struct drm_device *dev = platform_get_drvdata(pdev);
+
+	drm_put_dev(dev);
+
+	return 0;
+}
+
+static void nulldisp_shutdown(struct platform_device *pdev)
+{
+}
+
+static struct platform_device_id nulldisp_platform_device_id_table[] = {
+#if defined(LMA)
+	{ .name = APOLLO_DEVICE_NAME_PDP, .driver_data = 0 },
+	{ .name = ODN_DEVICE_NAME_PDP, .driver_data = 0 },
+#else
+	{ .name = "nulldisp", .driver_data = 0 },
+#endif
+	{ },
+};
+
+static struct platform_driver nulldisp_platform_driver = {
+	.probe		= nulldisp_probe,
+	.remove		= nulldisp_remove,
+	.shutdown	= nulldisp_shutdown,
+	.driver		= {
+		.owner  = THIS_MODULE,
+		.name	= DRIVER_NAME,
+	},
+	.id_table	= nulldisp_platform_device_id_table,
+};
+
+
+static struct platform_device_info nulldisp_device_info = {
+	.name		= "nulldisp",
+	.id		= -1,
+	.dma_mask	= DMA_BIT_MASK(32),
+};
+
+static struct platform_device *nulldisp_dev;
+
+static int __init nulldisp_init(void)
+{
+	int err;
+
+	err = nlpvrdpy_register();
+	if (err) {
+		DRM_ERROR("failed to register with netlink (err=%d)\n", err);
+		return err;
+	}
+
+	nulldisp_dev = platform_device_register_full(&nulldisp_device_info);
+	if (IS_ERR(nulldisp_dev)) {
+		err = PTR_ERR(nulldisp_dev);
+		nulldisp_dev = NULL;
+		goto err_unregister_family;
+	}
+
+	err = platform_driver_register(&nulldisp_platform_driver);
+	if (err)
+		goto err_unregister_family;
+
+	return 0;
+
+err_unregister_family:
+		(void) nlpvrdpy_unregister();
+		return err;
+}
+
+static void __exit nulldisp_exit(void)
+{
+	int err;
+
+	err = nlpvrdpy_unregister();
+	BUG_ON(err);
+
+	if (nulldisp_dev)
+		platform_device_unregister(nulldisp_dev);
+
+	platform_driver_unregister(&nulldisp_platform_driver);
+}
+
+module_init(nulldisp_init);
+module_exit(nulldisp_exit);
+
+MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_nulldisp_drv.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_nulldisp_drv.h
new file mode 100644
index 0000000..8d8d0fb
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_nulldisp_drv.h
@@ -0,0 +1,78 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __DRM_NULLDISP_DRV_H__
+#define __DRM_NULLDISP_DRV_H__
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+#include <drm/drm_fourcc.h>
+#endif
+
+struct drm_framebuffer;
+
+/******************************************************************************
+ * Linux compatibility functions
+ ******************************************************************************/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+static inline u32 nulldisp_drm_fb_format(struct drm_framebuffer *fb)
+{
+	return fb->format->format;
+}
+#else	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) */
+static inline u32 nulldisp_drm_fb_format(struct drm_framebuffer *fb)
+{
+	return fb->pixel_format;
+}
+#endif	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) */
+
+/******************************************************************************
+ * DRM framebuffer support functions
+ ******************************************************************************/
+static inline int nulldisp_drm_fb_num_planes(struct drm_framebuffer *fb)
+{
+	return drm_format_num_planes(nulldisp_drm_fb_format(fb));
+}
+#endif /* __DRM_NULLDISP_DRV_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_nulldisp_gem.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_nulldisp_gem.c
new file mode 100644
index 0000000..1bbc27c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_nulldisp_gem.c
@@ -0,0 +1,563 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/atomic.h>
+#include <linux/mm_types.h>
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/mutex.h>
+#include <linux/capability.h>
+
+#include "drm_nulldisp_gem.h"
+#include "nulldisp_drm.h"
+#include "kernel_compatibility.h"
+
+struct nulldisp_gem_object {
+	struct drm_gem_object base;
+
+	atomic_t pg_refcnt;
+	struct page **pages;
+	dma_addr_t *addrs;
+
+	struct reservation_object _resv;
+	struct reservation_object *resv;
+
+	bool cpu_prep;
+};
+
+#define to_nulldisp_obj(obj) \
+	container_of(obj, struct nulldisp_gem_object, base)
+
+struct page **nulldisp_gem_object_get_pages(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj);
+	struct page **pages;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	if (atomic_inc_return(&nulldisp_obj->pg_refcnt) == 1) {
+		unsigned npages = obj->size >> PAGE_SHIFT;
+		dma_addr_t *addrs;
+		unsigned i;
+
+		pages = drm_gem_get_pages(obj);
+		if (IS_ERR(pages))
+			goto dec_refcnt;
+
+		addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
+		if (!addrs) {
+			pages = ERR_PTR(-ENOMEM);
+			goto free_pages;
+		}
+
+		for (i = 0; i < npages; i++) {
+			addrs[i] = dma_map_page(dev->dev, pages[i],
+						0, PAGE_SIZE,
+						DMA_BIDIRECTIONAL);
+		}
+
+		nulldisp_obj->pages = pages;
+		nulldisp_obj->addrs = addrs;
+	}
+
+	return nulldisp_obj->pages;
+
+free_pages:
+	drm_gem_put_pages(obj, pages, false, false);
+dec_refcnt:
+	atomic_dec(&nulldisp_obj->pg_refcnt);
+	return pages;
+}
+
+static void nulldisp_gem_object_put_pages(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj);
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	if (WARN_ON(atomic_read(&nulldisp_obj->pg_refcnt) == 0))
+		return;
+
+	if (atomic_dec_and_test(&nulldisp_obj->pg_refcnt)) {
+		unsigned npages = obj->size >> PAGE_SHIFT;
+		unsigned i;
+
+		for (i = 0; i < npages; i++) {
+			dma_unmap_page(dev->dev, nulldisp_obj->addrs[i],
+				       PAGE_SIZE, DMA_BIDIRECTIONAL);
+		}
+
+		kfree(nulldisp_obj->addrs);
+		nulldisp_obj->addrs = NULL;
+
+		drm_gem_put_pages(obj, nulldisp_obj->pages, true, true);
+		nulldisp_obj->pages = NULL;
+	}
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+int nulldisp_gem_object_vm_fault(struct vm_fault *vmf)
+#else
+int nulldisp_gem_object_vm_fault(struct vm_area_struct *vma,
+				 struct vm_fault *vmf)
+#endif
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+	struct vm_area_struct *vma = vmf->vma;
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+	unsigned long addr = vmf->address;
+#else
+	unsigned long addr = (unsigned long)vmf->virtual_address;
+#endif
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj);
+	unsigned long pg_off;
+	struct page *page;
+
+	/*
+	 * nulldisp_gem_object_get_pages should have been called in
+	 * nulldisp_gem_mmap so there's no need to do it here.
+	 */
+	if (WARN_ON(atomic_read(&nulldisp_obj->pg_refcnt) == 0))
+		return VM_FAULT_SIGBUS;
+
+	pg_off = (addr - vma->vm_start) >> PAGE_SHIFT;
+	page = nulldisp_obj->pages[pg_off];
+
+	get_page(page);
+	vmf->page = page;
+
+	return 0;
+}
+
+void nulldisp_gem_vm_open(struct vm_area_struct *vma)
+{
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct drm_device *dev = obj->dev;
+
+	drm_gem_vm_open(vma);
+
+	mutex_lock(&dev->struct_mutex);
+	(void) nulldisp_gem_object_get_pages(obj);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+void nulldisp_gem_vm_close(struct vm_area_struct *vma)
+{
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct drm_device *dev = obj->dev;
+
+	mutex_lock(&dev->struct_mutex);
+	(void) nulldisp_gem_object_put_pages(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	drm_gem_vm_close(vma);
+}
+
+void nulldisp_gem_object_free(struct drm_gem_object *obj)
+{
+	struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj);
+
+	WARN_ON(atomic_read(&nulldisp_obj->pg_refcnt) != 0);
+
+	reservation_object_fini(nulldisp_obj->resv);
+
+	drm_gem_object_release(obj);
+
+	kfree(nulldisp_obj);
+}
+
+int nulldisp_gem_prime_pin(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct page **pages;
+
+	mutex_lock(&dev->struct_mutex);
+	pages = nulldisp_gem_object_get_pages(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return IS_ERR(pages) ? PTR_ERR(pages) : 0;
+}
+
+void nulldisp_gem_prime_unpin(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+
+	mutex_lock(&dev->struct_mutex);
+	nulldisp_gem_object_put_pages(obj);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+struct sg_table *
+nulldisp_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj);
+	int nr_pages = obj->size >> PAGE_SHIFT;
+
+	/*
+	 * nulldisp_gem_prime_pin should have been called in which case we don't
+	 * need to call nulldisp_gem_object_get_pages.
+	 */
+	if (WARN_ON(atomic_read(&nulldisp_obj->pg_refcnt) == 0))
+		return NULL;
+
+	return drm_prime_pages_to_sg(nulldisp_obj->pages, nr_pages);
+}
+
+struct drm_gem_object *
+nulldisp_gem_prime_import_sg_table(struct drm_device *dev,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+				   struct dma_buf_attachment *attach,
+#else
+				   size_t size,
+#endif
+				   struct sg_table *sgt)
+{
+	/* No support for importing dma-bufs from other devices or drivers */
+	return NULL;
+}
+
+struct dma_buf *nulldisp_gem_prime_export(struct drm_device *dev,
+					  struct drm_gem_object *obj,
+					  int flags)
+{
+	/* Read/write access required */
+	flags |= O_RDWR;
+	return drm_gem_prime_export(dev, obj, flags);
+}
+
+void *nulldisp_gem_prime_vmap(struct drm_gem_object *obj)
+{
+	struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj);
+	int nr_pages = obj->size >> PAGE_SHIFT;
+
+	/*
+	 * nulldisp_gem_prime_pin should have been called in which case we don't
+	 * need to call nulldisp_gem_object_get_pages.
+	 */
+	if (WARN_ON(atomic_read(&nulldisp_obj->pg_refcnt) == 0))
+		return NULL;
+
+
+	return vmap(nulldisp_obj->pages, nr_pages, 0, PAGE_KERNEL);
+}
+
+void nulldisp_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	vunmap(vaddr);
+}
+
+int nulldisp_gem_prime_mmap(struct drm_gem_object *obj,
+			    struct vm_area_struct *vma)
+{
+	int err;
+
+	mutex_lock(&obj->dev->struct_mutex);
+	(void) nulldisp_gem_object_get_pages(obj);
+	err = drm_gem_mmap_obj(obj, obj->size, vma);
+	mutex_unlock(&obj->dev->struct_mutex);
+
+	return err;
+}
+
+struct reservation_object *
+nulldisp_gem_prime_res_obj(struct drm_gem_object *obj)
+{
+	struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj);
+
+	return nulldisp_obj->resv;
+}
+
+int nulldisp_gem_object_mmap_ioctl(struct drm_device *dev, void *data,
+				   struct drm_file *file)
+{
+	struct drm_nulldisp_gem_mmap *args =
+		(struct drm_nulldisp_gem_mmap *)data;
+
+	if (args->pad) {
+		DRM_ERROR("invalid pad (this should always be 0)\n");
+		return -EINVAL;
+	}
+
+	if (args->offset) {
+		DRM_ERROR("invalid offset (this should always be 0)\n");
+		return -EINVAL;
+	}
+
+	return nulldisp_gem_dumb_map_offset(file, dev, args->handle,
+					    &args->offset);
+}
+
+int nulldisp_gem_object_cpu_prep_ioctl(struct drm_device *dev, void *data,
+				       struct drm_file *file)
+{
+	struct drm_nulldisp_gem_cpu_prep *args =
+		(struct drm_nulldisp_gem_cpu_prep *)data;
+
+	struct drm_gem_object *obj;
+	struct nulldisp_gem_object *nulldisp_obj;
+	bool write = !!(args->flags & NULLDISP_GEM_CPU_PREP_WRITE);
+	bool wait = !(args->flags & NULLDISP_GEM_CPU_PREP_NOWAIT);
+	int err;
+
+	if (args->flags & ~(NULLDISP_GEM_CPU_PREP_READ |
+			    NULLDISP_GEM_CPU_PREP_WRITE |
+			    NULLDISP_GEM_CPU_PREP_NOWAIT)) {
+		DRM_ERROR("invalid flags: %#08x\n", args->flags);
+		return -EINVAL;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	obj = drm_gem_object_lookup(file, args->handle);
+	if (!obj) {
+		err = -ENOENT;
+		goto exit_unlock;
+	}
+
+	nulldisp_obj = to_nulldisp_obj(obj);
+
+	if (nulldisp_obj->cpu_prep) {
+		err = -EBUSY;
+		goto exit_unref;
+	}
+
+	if (wait) {
+		long lerr;
+
+		lerr = reservation_object_wait_timeout_rcu(nulldisp_obj->resv,
+							   write,
+							   true,
+							   30 * HZ);
+
+		/* remap return value (0 indicates busy state, > 0 success) */
+		if (lerr > 0)
+			err = 0;
+		else if (!lerr)
+			err = -EBUSY;
+		else
+			err = lerr;
+	} else {
+		/* remap return value (false indicates busy state, true success) */
+		if (!reservation_object_test_signaled_rcu(nulldisp_obj->resv,
+							  write))
+			err = -EBUSY;
+		else
+			err = 0;
+	}
+
+	if (!err)
+		nulldisp_obj->cpu_prep = true;
+exit_unref:
+	drm_gem_object_unreference_unlocked(obj);
+exit_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return err;
+}
+
+int nulldisp_gem_object_cpu_fini_ioctl(struct drm_device *dev, void *data,
+				       struct drm_file *file)
+{
+	struct drm_nulldisp_gem_cpu_fini *args =
+		(struct drm_nulldisp_gem_cpu_fini *)data;
+
+	struct drm_gem_object *obj;
+	struct nulldisp_gem_object *nulldisp_obj;
+	int err;
+
+	if (args->pad) {
+		DRM_ERROR("invalid pad (this should always be 0)\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	obj = drm_gem_object_lookup(file, args->handle);
+	if (!obj) {
+		err = -ENOENT;
+		goto exit_unlock;
+	}
+
+	nulldisp_obj = to_nulldisp_obj(obj);
+
+	if (!nulldisp_obj->cpu_prep) {
+		err = -EINVAL;
+		goto exit_unref;
+	}
+
+	nulldisp_obj->cpu_prep = false;
+	err = 0;
+exit_unref:
+	drm_gem_object_unreference_unlocked(obj);
+exit_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return err;
+}
+
+static int nulldisp_gem_object_create_priv(struct drm_file *file,
+					   struct drm_device *dev,
+					   u64 size,
+					   u32 *handle)
+{
+	struct nulldisp_gem_object *nulldisp_obj;
+	struct drm_gem_object *obj;
+	struct address_space *mapping;
+	int err;
+
+	nulldisp_obj = kzalloc(sizeof(*nulldisp_obj), GFP_KERNEL);
+	if (!nulldisp_obj)
+		return -ENOMEM;
+
+	nulldisp_obj->resv = &nulldisp_obj->_resv;
+	obj = &nulldisp_obj->base;
+
+	err = drm_gem_object_init(dev, obj, size);
+	if (err) {
+		kfree(nulldisp_obj);
+		return err;
+	}
+
+	mapping = file_inode(obj->filp)->i_mapping;
+	mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
+
+	err = drm_gem_handle_create(file, obj, handle);
+	if (err)
+		goto exit;
+
+	reservation_object_init(nulldisp_obj->resv);
+exit:
+	drm_gem_object_unreference_unlocked(obj);
+	return err;
+}
+
+int nulldisp_gem_object_create_ioctl(struct drm_device *dev,
+				     void *data,
+				     struct drm_file *file)
+{
+	struct drm_nulldisp_gem_create *args = data;
+	u32 handle;
+	int err;
+	u64 aligned_size;
+
+	if (args->flags) {
+		DRM_ERROR("invalid flags: %#08x\n", args->flags);
+		return -EINVAL;
+	}
+
+	if (args->handle) {
+		DRM_ERROR("invalid handle (this should always be 0)\n");
+		return -EINVAL;
+	}
+
+	aligned_size = PAGE_ALIGN(args->size);
+
+	err = nulldisp_gem_object_create_priv(file, dev, aligned_size, &handle);
+	if (!err)
+		args->handle = handle;
+
+	return err;
+}
+
+int nulldisp_gem_dumb_create(struct drm_file *file,
+			     struct drm_device *dev,
+			     struct drm_mode_create_dumb *args)
+{
+	u32 handle;
+	u32 pitch;
+	size_t size;
+	int err;
+
+	pitch = args->width * (ALIGN(args->bpp, 8) >> 3);
+	size = PAGE_ALIGN(pitch * args->height);
+
+	err = nulldisp_gem_object_create_priv(file, dev, size, &handle);
+	if (!err) {
+		args->handle = handle;
+		args->pitch = pitch;
+		args->size = size;
+	}
+
+	return err;
+}
+
+int nulldisp_gem_dumb_map_offset(struct drm_file *file,
+				 struct drm_device *dev,
+				 uint32_t handle,
+				 uint64_t *offset)
+{
+	struct drm_gem_object *obj;
+	int err;
+
+	mutex_lock(&dev->struct_mutex);
+
+	obj = drm_gem_object_lookup(file, handle);
+	if (!obj) {
+		err = -ENOENT;
+		goto exit_unlock;
+	}
+
+	err = drm_gem_create_mmap_offset(obj);
+	if (err)
+		goto exit_obj_unref;
+
+	*offset = drm_vma_node_offset_addr(&obj->vma_node);
+
+exit_obj_unref:
+	drm_gem_object_unreference_unlocked(obj);
+exit_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return err;
+}
+
+struct reservation_object *nulldisp_gem_get_resv(struct drm_gem_object *obj)
+{
+	return (to_nulldisp_obj(obj)->resv);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_nulldisp_gem.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_nulldisp_gem.h
new file mode 100644
index 0000000..f7f56e9
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_nulldisp_gem.h
@@ -0,0 +1,128 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__DRM_NULLDISP_H__)
+#define __DRM_NULLDISP_H__
+
+#include <linux/version.h>
+
+#include <drm/drmP.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+#include <drm/drm_gem.h>
+#endif
+
+struct page **nulldisp_gem_object_get_pages(struct drm_gem_object *obj);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+int nulldisp_gem_object_vm_fault(struct vm_fault *vmf);
+#else
+int nulldisp_gem_object_vm_fault(struct vm_area_struct *vma,
+				 struct vm_fault *vmf);
+#endif
+
+void nulldisp_gem_vm_open(struct vm_area_struct *vma);
+
+void nulldisp_gem_vm_close(struct vm_area_struct *vma);
+
+void nulldisp_gem_object_free(struct drm_gem_object *obj);
+
+int nulldisp_gem_prime_pin(struct drm_gem_object *obj);
+
+void nulldisp_gem_prime_unpin(struct drm_gem_object *obj);
+
+struct sg_table *nulldisp_gem_prime_get_sg_table(struct drm_gem_object *obj);
+
+struct drm_gem_object *
+nulldisp_gem_prime_import_sg_table(struct drm_device *dev,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+				   struct dma_buf_attachment *attach,
+#else
+				   size_t size,
+#endif
+				   struct sg_table *sgt);
+
+struct dma_buf *nulldisp_gem_prime_export(struct drm_device *dev,
+					  struct drm_gem_object *obj,
+					  int flags);
+
+void *nulldisp_gem_prime_vmap(struct drm_gem_object *obj);
+
+void nulldisp_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+
+int nulldisp_gem_prime_mmap(struct drm_gem_object *obj,
+			    struct vm_area_struct *vma);
+
+struct reservation_object *
+nulldisp_gem_prime_res_obj(struct drm_gem_object *obj);
+
+int nulldisp_gem_dumb_create(struct drm_file *file,
+			     struct drm_device *dev,
+			     struct drm_mode_create_dumb *args);
+
+int nulldisp_gem_dumb_map_offset(struct drm_file *file,
+				 struct drm_device *dev,
+				 uint32_t handle,
+				 uint64_t *offset);
+
+/* internal interfaces */
+struct reservation_object *nulldisp_gem_get_resv(struct drm_gem_object *obj);
+
+int nulldisp_gem_object_mmap_ioctl(struct drm_device *dev,
+				   void *data,
+				   struct drm_file *file);
+
+int nulldisp_gem_object_cpu_prep_ioctl(struct drm_device *dev,
+				       void *data,
+				       struct drm_file *file);
+
+int nulldisp_gem_object_cpu_fini_ioctl(struct drm_device *dev,
+				       void *data,
+				       struct drm_file *file);
+
+int nulldisp_gem_object_create_ioctl(struct drm_device *dev,
+				     void *data,
+				     struct drm_file *file);
+
+#endif	/* !defined(__DRM_NULLDISP_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_nulldisp_netlink.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_nulldisp_netlink.c
new file mode 100644
index 0000000..e383aa6
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_nulldisp_netlink.c
@@ -0,0 +1,569 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/types.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+#include <linux/bug.h>
+
+#include "drm_nulldisp_drv.h"
+#include "drm_nulldisp_netlink.h"
+#include "kernel_compatibility.h"
+
+#include "netlink.h"
+
+struct nlpvrdpy {
+	atomic_t connected;
+	struct net *net;
+	u32 dst_portid;
+	struct drm_device *dev;
+	nlpvrdpy_disconnect_cb disconnect_cb;
+	void *disconnect_cb_data;
+	nlpvrdpy_flipped_cb flipped_cb;
+	void *flipped_cb_data;
+	nlpvrdpy_copied_cb copied_cb;
+	void *copied_cb_data;
+	struct mutex mutex;
+	struct list_head nl_list;
+};
+#define NLPVRDPY_MINOR(nlpvrdpy) ((unsigned)((nlpvrdpy)->dev->primary->index))
+
+/* Command internal flags */
+#define	NLPVRDPY_CIF_NLPVRDPY_NOT_CONNECTED	0x00000001
+#define	NLPVRDPY_CIF_NLPVRDPY			0x00000002
+
+static atomic_t nlpvrdpy_started = ATOMIC_INIT(0);
+static DECLARE_COMPLETION(nlpvrdpy_wait_for_start);
+
+static LIST_HEAD(nlpvrdpy_list);
+static DEFINE_MUTEX(nlpvrdpy_list_mutex);
+
+static inline void nlpvrdpy_lock(struct nlpvrdpy *nlpvrdpy)
+{
+	mutex_lock(&nlpvrdpy->mutex);
+}
+
+static inline void nlpvrdpy_unlock(struct nlpvrdpy *nlpvrdpy)
+{
+	mutex_unlock(&nlpvrdpy->mutex);
+}
+
+struct nlpvrdpy *nlpvrdpy_create(struct drm_device *dev,
+					nlpvrdpy_disconnect_cb disconnect_cb,
+					void *disconnect_cb_data,
+					nlpvrdpy_flipped_cb flipped_cb,
+					void *flipped_cb_data,
+					nlpvrdpy_copied_cb copied_cb,
+					void *copied_cb_data)
+{
+	struct nlpvrdpy *nlpvrdpy = kzalloc(sizeof(*nlpvrdpy), GFP_KERNEL);
+
+	if (!nlpvrdpy)
+		return NULL;
+
+	mutex_init(&nlpvrdpy->mutex);
+	INIT_LIST_HEAD(&nlpvrdpy->nl_list);
+
+	atomic_set(&nlpvrdpy->connected, 0);
+
+	nlpvrdpy->dev = dev;
+	nlpvrdpy->disconnect_cb = disconnect_cb;
+	nlpvrdpy->disconnect_cb_data = disconnect_cb_data;
+	nlpvrdpy->flipped_cb = flipped_cb;
+	nlpvrdpy->flipped_cb_data = flipped_cb_data;
+	nlpvrdpy->copied_cb = copied_cb;
+	nlpvrdpy->copied_cb_data = copied_cb_data;
+
+	mutex_lock(&nlpvrdpy_list_mutex);
+	list_add_tail(&nlpvrdpy->nl_list, &nlpvrdpy_list);
+	mutex_unlock(&nlpvrdpy_list_mutex);
+
+	return nlpvrdpy;
+}
+
+void nlpvrdpy_destroy(struct nlpvrdpy *nlpvrdpy)
+{
+	if (!nlpvrdpy)
+		return;
+
+	mutex_lock(&nlpvrdpy_list_mutex);
+	nlpvrdpy_lock(nlpvrdpy);
+	list_del(&nlpvrdpy->nl_list);
+	nlpvrdpy_unlock(nlpvrdpy);
+	mutex_unlock(&nlpvrdpy_list_mutex);
+
+	mutex_destroy(&nlpvrdpy->mutex);
+
+	kfree(nlpvrdpy);
+}
+
+static struct nlpvrdpy *nlpvrdpy_lookup(u32 minor)
+{
+	struct nlpvrdpy *nlpvrdpy = NULL;
+	struct nlpvrdpy *iter;
+
+	mutex_lock(&nlpvrdpy_list_mutex);
+	list_for_each_entry(iter, &nlpvrdpy_list, nl_list) {
+		if (NLPVRDPY_MINOR(iter) == minor) {
+			nlpvrdpy = iter;
+			nlpvrdpy_lock(nlpvrdpy);
+			break;
+		}
+	}
+	mutex_unlock(&nlpvrdpy_list_mutex);
+
+	return nlpvrdpy;
+}
+
+static int nlpvrdpy_pre_cmd(const struct genl_ops *ops,
+				struct sk_buff *skb,
+				struct genl_info *info)
+{
+	struct nlattr **attrs = info->attrs;
+	struct nlpvrdpy *nlpvrdpy = NULL;
+	int ret;
+
+	if (ops->internal_flags & NLPVRDPY_CIF_NLPVRDPY_NOT_CONNECTED) {
+		if (!(ops->flags & GENL_ADMIN_PERM))
+			return -EINVAL;
+
+		if (!atomic_read(&nlpvrdpy_started))
+			wait_for_completion(&nlpvrdpy_wait_for_start);
+	}
+
+	if (ops->internal_flags & (NLPVRDPY_CIF_NLPVRDPY_NOT_CONNECTED |
+					NLPVRDPY_CIF_NLPVRDPY)) {
+		u32 minor;
+
+		if (!attrs[NLPVRDPY_ATTR_MINOR])
+			return -EINVAL;
+
+		minor = nla_get_u32(attrs[NLPVRDPY_ATTR_MINOR]);
+
+		nlpvrdpy = nlpvrdpy_lookup(minor);
+		if (!nlpvrdpy)
+			return -ENODEV;
+
+		if (ops->internal_flags & NLPVRDPY_CIF_NLPVRDPY) {
+			if (!atomic_read(&nlpvrdpy->connected)) {
+				ret = -ENOTCONN;
+				goto err_unlock;
+			}
+			if ((nlpvrdpy->net != genl_info_net(info)) ||
+				(nlpvrdpy->dst_portid != info->snd_portid)) {
+				ret = -EPROTO;
+				goto err_unlock;
+			}
+		}
+
+		info->user_ptr[0] = nlpvrdpy;
+	}
+
+	ret = 0;
+
+err_unlock:
+	nlpvrdpy_unlock(nlpvrdpy);
+	return ret;
+}
+
+static void nlpvrdpy_post_cmd(const struct genl_ops *ops,
+				struct sk_buff *skb,
+				struct genl_info *info)
+{
+}
+
+static struct genl_family nlpvrdpy_family = {
+	.name = "nlpvrdpy",
+	.version = 1,
+	.maxattr = NLPVRDPY_ATTR_MAX,
+	.pre_doit = &nlpvrdpy_pre_cmd,
+	.post_doit = &nlpvrdpy_post_cmd
+};
+
+/* Must be called with the struct nlpvrdpy mutex held */
+static int nlpvrdpy_send_msg_locked(struct nlpvrdpy *nlpvrdpy,
+					struct sk_buff *msg)
+{
+	int err;
+
+	if (atomic_read(&nlpvrdpy->connected)) {
+		err = genlmsg_unicast(nlpvrdpy->net, msg, nlpvrdpy->dst_portid);
+		if (err == -ECONNREFUSED)
+			atomic_set(&nlpvrdpy->connected, 0);
+	} else {
+		err = -ENOTCONN;
+		nlmsg_free(msg);
+	}
+
+	return err;
+}
+
+static int nlpvrdpy_send_msg(struct nlpvrdpy *nlpvrdpy, struct sk_buff *msg)
+{
+	int err;
+
+	nlpvrdpy_lock(nlpvrdpy);
+	err = nlpvrdpy_send_msg_locked(nlpvrdpy, msg);
+	nlpvrdpy_unlock(nlpvrdpy);
+
+	return err;
+}
+
+void nlpvrdpy_send_disconnect(struct nlpvrdpy *nlpvrdpy)
+{
+	struct sk_buff *msg;
+	void *hdr;
+	int err;
+
+	if (!atomic_read(&nlpvrdpy->connected))
+		return;
+
+	msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (!msg)
+		return;
+
+	hdr = genlmsg_put(msg, nlpvrdpy->dst_portid, 0,
+				&nlpvrdpy_family, 0, NLPVRDPY_CMD_DISCONNECT);
+	if (!hdr)
+		goto err_msg_free;
+
+	err = nla_put_u32(msg, NLPVRDPY_ATTR_MINOR, NLPVRDPY_MINOR(nlpvrdpy));
+	if (err)
+		goto err_msg_free;
+
+	genlmsg_end(msg, hdr);
+
+	nlpvrdpy_lock(nlpvrdpy);
+
+	(void) nlpvrdpy_send_msg_locked(nlpvrdpy, msg);
+
+	atomic_set(&nlpvrdpy->connected, 0);
+	nlpvrdpy->net = 0;
+	nlpvrdpy->dst_portid = 0;
+
+	nlpvrdpy_unlock(nlpvrdpy);
+
+	return;
+
+err_msg_free:
+	nlmsg_free(msg);
+}
+
+static int nlpvrdpy_put_fb_attributes(struct sk_buff *msg,
+                                      struct drm_framebuffer *fb,
+                                      struct nlpvrdpy *nlpvrdpy,
+                                      u64 *plane_addr,
+                                      u64 *plane_size,
+                                      u32 layout,
+                                      u32 fbc)
+{
+#define RETURN_ON_ERROR(f) \
+	{ \
+		int err = (f); \
+		if (err) { \
+			pr_err("%s: command failed: %s", __func__, #f); \
+			return err; \
+		} \
+	}
+
+	const int num_planes = nulldisp_drm_fb_num_planes(fb);
+
+	RETURN_ON_ERROR(nla_put_u32(msg, NLPVRDPY_ATTR_MINOR, NLPVRDPY_MINOR(nlpvrdpy)));
+
+	RETURN_ON_ERROR(nla_put_u8(msg, NLPVRDPY_ATTR_NUM_PLANES, num_planes));
+
+	RETURN_ON_ERROR(nla_put_u32(msg, NLPVRDPY_ATTR_WIDTH,  fb->width));
+	RETURN_ON_ERROR(nla_put_u32(msg, NLPVRDPY_ATTR_HEIGHT, fb->height));
+	RETURN_ON_ERROR(nla_put_u32(msg, NLPVRDPY_ATTR_PIXFMT, nulldisp_drm_fb_format(fb)));
+
+	/*
+	 * TODO YUV: get the actual CSC and BPP
+	 * for now only 8-bit BT601 short range is supported
+	 */
+	RETURN_ON_ERROR(nla_put_u8(msg, NLPVRDPY_ATTR_YUV_CSC, 1));  /* IMG_COLORSPACE_BT601_CONFORMANT_RANGE */
+	RETURN_ON_ERROR(nla_put_u8(msg, NLPVRDPY_ATTR_YUV_BPP, 8));  /* 8-bit per sample */
+
+	RETURN_ON_ERROR(nla_put_u64_64bit(msg, NLPVRDPY_ATTR_PLANE0_ADDR,    plane_addr[0], NLPVRDPY_ATTR_PAD));
+	RETURN_ON_ERROR(nla_put_u64_64bit(msg, NLPVRDPY_ATTR_PLANE0_SIZE,    plane_size[0], NLPVRDPY_ATTR_PAD));
+	RETURN_ON_ERROR(nla_put_u64_64bit(msg, NLPVRDPY_ATTR_PLANE0_OFFSET, fb->offsets[0], NLPVRDPY_ATTR_PAD));
+	RETURN_ON_ERROR(nla_put_u64_64bit(msg, NLPVRDPY_ATTR_PLANE0_PITCH,  fb->pitches[0], NLPVRDPY_ATTR_PAD));
+
+	if (num_planes > 1) {
+		RETURN_ON_ERROR(nla_put_u64_64bit(msg, NLPVRDPY_ATTR_PLANE1_ADDR,    plane_addr[1], NLPVRDPY_ATTR_PAD));
+		RETURN_ON_ERROR(nla_put_u64_64bit(msg, NLPVRDPY_ATTR_PLANE1_SIZE,    plane_size[1], NLPVRDPY_ATTR_PAD));
+		RETURN_ON_ERROR(nla_put_u64_64bit(msg, NLPVRDPY_ATTR_PLANE1_OFFSET, fb->offsets[1], NLPVRDPY_ATTR_PAD));
+		RETURN_ON_ERROR(nla_put_u64_64bit(msg, NLPVRDPY_ATTR_PLANE1_PITCH,  fb->pitches[1], NLPVRDPY_ATTR_PAD));
+	}
+
+	if (num_planes > 2) {
+		RETURN_ON_ERROR(nla_put_u64_64bit(msg, NLPVRDPY_ATTR_PLANE2_ADDR,    plane_addr[2], NLPVRDPY_ATTR_PAD));
+		RETURN_ON_ERROR(nla_put_u64_64bit(msg, NLPVRDPY_ATTR_PLANE2_SIZE,    plane_size[2], NLPVRDPY_ATTR_PAD));
+		RETURN_ON_ERROR(nla_put_u64_64bit(msg, NLPVRDPY_ATTR_PLANE2_OFFSET, fb->offsets[2], NLPVRDPY_ATTR_PAD));
+		RETURN_ON_ERROR(nla_put_u64_64bit(msg, NLPVRDPY_ATTR_PLANE2_PITCH,  fb->pitches[2], NLPVRDPY_ATTR_PAD));
+	}
+
+	WARN_ON_ONCE(num_planes > NLPVRDPY_MAX_NUM_PLANES);
+
+	RETURN_ON_ERROR(nla_put_u32(msg, NLPVRDPY_ATTR_LAYOUT, layout));
+
+	RETURN_ON_ERROR(nla_put_u32(msg, NLPVRDPY_ATTR_FBC, fbc));
+
+	return 0;
+#undef RETURN_ON_ERROR
+}
+
+int nlpvrdpy_send_flip(struct nlpvrdpy *nlpvrdpy,
+			struct drm_framebuffer *fb,
+			u64 *plane_addr,
+			u64 *plane_size,
+			u32 layout,
+			u32 fbc)
+{
+	struct sk_buff *msg;
+	void *hdr;
+	int err;
+
+	if (!atomic_read(&nlpvrdpy->connected))
+		return -ENOTCONN;
+
+	msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	hdr = genlmsg_put(msg, nlpvrdpy->dst_portid, 0,
+				&nlpvrdpy_family, 0, NLPVRDPY_CMD_FLIP);
+	if (!hdr) {
+		err = -ENOMEM;
+		goto err_msg_free;
+	}
+
+	err = nlpvrdpy_put_fb_attributes(msg, fb, nlpvrdpy, plane_addr,
+						plane_size, layout, fbc);
+	if (err)
+		goto err_msg_free;
+
+	genlmsg_end(msg, hdr);
+
+	return nlpvrdpy_send_msg(nlpvrdpy, msg);
+
+err_msg_free:
+	nlmsg_free(msg);
+	return err;
+}
+
+int nlpvrdpy_send_copy(struct nlpvrdpy *nlpvrdpy,
+			struct drm_framebuffer *fb,
+			u64 *plane_addr,
+			u64 *plane_size,
+			u32 layout,
+			u32 fbc)
+{
+	struct sk_buff *msg;
+	void *hdr;
+	int err;
+
+	if (!atomic_read(&nlpvrdpy->connected))
+		return -ENOTCONN;
+
+	msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	hdr = genlmsg_put(msg, nlpvrdpy->dst_portid, 0,
+				&nlpvrdpy_family, 0, NLPVRDPY_CMD_COPY);
+	if (!hdr) {
+		err = -ENOMEM;
+		goto err_msg_free;
+	}
+
+	err = nlpvrdpy_put_fb_attributes(msg, fb, nlpvrdpy, plane_addr,
+						plane_size, layout, fbc);
+	if (err)
+		goto err_msg_free;
+
+	genlmsg_end(msg, hdr);
+
+	return nlpvrdpy_send_msg(nlpvrdpy, msg);
+
+err_msg_free:
+	nlmsg_free(msg);
+	return err;
+}
+
+static int nlpvrdpy_cmd_connect(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nlpvrdpy *nlpvrdpy = info->user_ptr[0];
+	struct sk_buff *msg;
+	void *hdr;
+	int err;
+
+	msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	hdr = genlmsg_put_reply(msg, info, &nlpvrdpy_family,
+						0, NLPVRDPY_CMD_CONNECTED);
+	if (!hdr) {
+		err = -ENOMEM;
+		goto err_msg_free;
+	}
+
+	err = nla_put_string(msg, NLPVRDPY_ATTR_NAME,
+				nlpvrdpy->dev->driver->name);
+	if (err)
+		goto err_msg_free;
+
+	genlmsg_end(msg, hdr);
+
+	err = genlmsg_reply(msg, info);
+
+	if (!err) {
+		nlpvrdpy_lock(nlpvrdpy);
+
+		nlpvrdpy->net = genl_info_net(info);
+		nlpvrdpy->dst_portid = info->snd_portid;
+		atomic_set(&nlpvrdpy->connected, 1);
+
+		nlpvrdpy_unlock(nlpvrdpy);
+	}
+
+	return err;
+
+err_msg_free:
+	nlmsg_free(msg);
+	return err;
+}
+
+static int nlpvrdpy_cmd_disconnect(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nlpvrdpy *nlpvrdpy = info->user_ptr[0];
+
+	atomic_set(&nlpvrdpy->connected, 0);
+
+	if (nlpvrdpy->disconnect_cb)
+		nlpvrdpy->disconnect_cb(nlpvrdpy->disconnect_cb_data);
+
+	return 0;
+}
+
+static int nlpvrdpy_cmd_flipped(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nlpvrdpy *nlpvrdpy = info->user_ptr[0];
+
+	return (nlpvrdpy->flipped_cb) ?
+			nlpvrdpy->flipped_cb(nlpvrdpy->flipped_cb_data) :
+			0;
+}
+
+static int nlpvrdpy_cmd_copied(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nlpvrdpy *nlpvrdpy = info->user_ptr[0];
+
+	return (nlpvrdpy->copied_cb) ?
+			nlpvrdpy->copied_cb(nlpvrdpy->copied_cb_data) :
+			0;
+}
+
+static struct genl_ops nlpvrdpy_ops[] = {
+	{
+		.cmd = NLPVRDPY_CMD_CONNECT,
+		.policy = nlpvrdpy_policy,
+		.doit = nlpvrdpy_cmd_connect,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NLPVRDPY_CIF_NLPVRDPY_NOT_CONNECTED
+	},
+	{
+		.cmd = NLPVRDPY_CMD_DISCONNECT,
+		.policy = nlpvrdpy_policy,
+		.doit = nlpvrdpy_cmd_disconnect,
+		.flags = 0,
+		.internal_flags = NLPVRDPY_CIF_NLPVRDPY
+	},
+	{
+		.cmd = NLPVRDPY_CMD_FLIPPED,
+		.policy = nlpvrdpy_policy,
+		.doit = nlpvrdpy_cmd_flipped,
+		.flags = 0,
+		.internal_flags = NLPVRDPY_CIF_NLPVRDPY
+	},
+	{
+		.cmd = NLPVRDPY_CMD_COPIED,
+		.policy = nlpvrdpy_policy,
+		.doit = nlpvrdpy_cmd_copied,
+		.flags = 0,
+		.internal_flags = NLPVRDPY_CIF_NLPVRDPY
+	}
+};
+
+int nlpvrdpy_register(void)
+{
+	nlpvrdpy_family.module = THIS_MODULE;
+	nlpvrdpy_family.ops = nlpvrdpy_ops;
+	nlpvrdpy_family.n_ops = ARRAY_SIZE(nlpvrdpy_ops);
+
+	return genl_register_family(&nlpvrdpy_family);
+}
+
+void nlpvrdpy_start(void)
+{
+	if (!atomic_read(&nlpvrdpy_started)) {
+		atomic_set(&nlpvrdpy_started, 1);
+		complete_all(&nlpvrdpy_wait_for_start);
+	}
+}
+
+int nlpvrdpy_unregister(void)
+{
+	nlpvrdpy_start();
+
+	return genl_unregister_family(&nlpvrdpy_family);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_nulldisp_netlink.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_nulldisp_netlink.h
new file mode 100644
index 0000000..0a9d6ee
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/drm_nulldisp_netlink.h
@@ -0,0 +1,87 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __DRM_NULLDISP_NETLINK_H__
+#define __DRM_NULLDISP_NETLINK_H__
+
+#include <linux/types.h>
+#include <linux/version.h>
+
+typedef void (*nlpvrdpy_disconnect_cb)(void *data);
+typedef int (*nlpvrdpy_flipped_cb)(void *data);
+typedef int (*nlpvrdpy_copied_cb)(void *data);
+
+struct nlpvrdpy *nlpvrdpy_create(struct drm_device *dev,
+					nlpvrdpy_disconnect_cb disconnect_cb,
+					void *disconnect_cb_data,
+					nlpvrdpy_flipped_cb flipped_cb,
+					void *flipped_cb_data,
+					nlpvrdpy_copied_cb copied_cb,
+					void *copied_cb_data);
+
+void nlpvrdpy_destroy(struct nlpvrdpy *nlpvrdpy);
+
+int nlpvrdpy_send_flip(struct nlpvrdpy *nlpvrdpy,
+			struct drm_framebuffer *fb,
+			u64 *plane_addr,
+			u64 *plane_size,
+			u32 layout,
+			u32 fbc);
+
+int nlpvrdpy_send_copy(struct nlpvrdpy *nlpvrdpy,
+			struct drm_framebuffer *fb,
+			u64 *plane_addr,
+			u64 *plane_size,
+			u32 layout,
+			u32 fbc);
+
+void nlpvrdpy_send_disconnect(struct nlpvrdpy *nlpvrdpy);
+
+int nlpvrdpy_register(void);
+
+void nlpvrdpy_start(void);
+
+int nlpvrdpy_unregister(void);
+
+#endif /* __DRM_NULLDISP_NETLINK_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/plato/pdp2_mmu_regs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/plato/pdp2_mmu_regs.h
new file mode 100644
index 0000000..6164c58
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/plato/pdp2_mmu_regs.h
@@ -0,0 +1,764 @@
+/*************************************************************************/ /*!
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#ifndef _PDP2_MMU_REGS_H
+#define _PDP2_MMU_REGS_H
+
+/* Hardware register definitions */
+
+#define PDP_BIF_DIR_BASE_ADDR_OFFSET		(0x0020)
+#define PDP_BIF_DIR_BASE_ADDR_STRIDE		(4)
+#define PDP_BIF_DIR_BASE_ADDR_NO_ENTRIES		(4)
+
+/* PDP_BIF, DIR_BASE_ADDR, MMU_DIR_BASE_ADDR
+Base address in physical memory for MMU Directory n Entries. When MMU_ENABLE_EXT_ADDRESSING is '1', the bits 31:0 are assigned to the address 31+EXT_ADDR_RANGE:0+EXT_ADDR_RANGE, but then any address offset within a page is forced to 0. When MMU_ENABLE_EXT_ADDRESSING is '0', bits 31:12 are assigned to address 31:12
+*/
+#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_MASK		(0xFFFFFFFF)
+#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_SHIFT		(0)
+#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_LENGTH		(32)
+#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_TILE_CFG_OFFSET		(0x0040)
+#define PDP_BIF_TILE_CFG_STRIDE		(4)
+#define PDP_BIF_TILE_CFG_NO_ENTRIES		(4)
+
+/* PDP_BIF, TILE_CFG, TILE_128INTERLEAVE
+*/
+#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_MASK		(0x00000010)
+#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_LSBMASK		(0x00000001)
+#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_SHIFT		(4)
+#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_LENGTH		(1)
+#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, TILE_CFG, TILE_ENABLE
+*/
+#define PDP_BIF_TILE_CFG_TILE_ENABLE_MASK		(0x00000008)
+#define PDP_BIF_TILE_CFG_TILE_ENABLE_LSBMASK		(0x00000001)
+#define PDP_BIF_TILE_CFG_TILE_ENABLE_SHIFT		(3)
+#define PDP_BIF_TILE_CFG_TILE_ENABLE_LENGTH		(1)
+#define PDP_BIF_TILE_CFG_TILE_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, TILE_CFG, TILE_STRIDE
+*/
+#define PDP_BIF_TILE_CFG_TILE_STRIDE_MASK		(0x00000007)
+#define PDP_BIF_TILE_CFG_TILE_STRIDE_LSBMASK		(0x00000007)
+#define PDP_BIF_TILE_CFG_TILE_STRIDE_SHIFT		(0)
+#define PDP_BIF_TILE_CFG_TILE_STRIDE_LENGTH		(3)
+#define PDP_BIF_TILE_CFG_TILE_STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_TILE_MIN_ADDR_OFFSET		(0x0050)
+#define PDP_BIF_TILE_MIN_ADDR_STRIDE		(4)
+#define PDP_BIF_TILE_MIN_ADDR_NO_ENTRIES		(4)
+
+/* PDP_BIF, TILE_MIN_ADDR, TILE_MIN_ADDR
+*/
+#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_MASK		(0xFFFFFFFF)
+#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_SHIFT		(0)
+#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_LENGTH		(32)
+#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_TILE_MAX_ADDR_OFFSET		(0x0060)
+#define PDP_BIF_TILE_MAX_ADDR_STRIDE		(4)
+#define PDP_BIF_TILE_MAX_ADDR_NO_ENTRIES		(4)
+
+/* PDP_BIF, TILE_MAX_ADDR, TILE_MAX_ADDR
+*/
+#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_MASK		(0xFFFFFFFF)
+#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_SHIFT		(0)
+#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_LENGTH		(32)
+#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_CONTROL0_OFFSET		(0x0000)
+
+/* PDP_BIF, CONTROL0, MMU_TILING_SCHEME
+*/
+#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_MASK		(0x00000001)
+#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_SHIFT		(0)
+#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_LENGTH		(1)
+#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL0, MMU_CACHE_POLICY
+*/
+#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_MASK		(0x00000100)
+#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_SHIFT		(8)
+#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_LENGTH		(1)
+#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL0, FORCE_CACHE_POLICY_BYPASS
+*/
+#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_MASK		(0x00000200)
+#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_SHIFT		(9)
+#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_LENGTH		(1)
+#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL0, STALL_ON_PROTOCOL_FAULT
+*/
+#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_MASK		(0x00001000)
+#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_SHIFT		(12)
+#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_LENGTH		(1)
+#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_CONTROL1_OFFSET		(0x0008)
+
+/* PDP_BIF, CONTROL1, MMU_FLUSH0
+*/
+#define PDP_BIF_CONTROL1_MMU_FLUSH0_MASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_FLUSH0_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_FLUSH0_SHIFT		(0)
+#define PDP_BIF_CONTROL1_MMU_FLUSH0_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_FLUSH0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_FLUSH1
+*/
+#define PDP_BIF_CONTROL1_MMU_FLUSH1_MASK		(0x00000002)
+#define PDP_BIF_CONTROL1_MMU_FLUSH1_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_FLUSH1_SHIFT		(1)
+#define PDP_BIF_CONTROL1_MMU_FLUSH1_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_FLUSH1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_FLUSH2
+*/
+#define PDP_BIF_CONTROL1_MMU_FLUSH2_MASK		(0x00000004)
+#define PDP_BIF_CONTROL1_MMU_FLUSH2_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_FLUSH2_SHIFT		(2)
+#define PDP_BIF_CONTROL1_MMU_FLUSH2_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_FLUSH2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_FLUSH3
+*/
+#define PDP_BIF_CONTROL1_MMU_FLUSH3_MASK		(0x00000008)
+#define PDP_BIF_CONTROL1_MMU_FLUSH3_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_FLUSH3_SHIFT		(3)
+#define PDP_BIF_CONTROL1_MMU_FLUSH3_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_FLUSH3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_INVALDC0
+*/
+#define PDP_BIF_CONTROL1_MMU_INVALDC0_MASK		(0x00000100)
+#define PDP_BIF_CONTROL1_MMU_INVALDC0_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_INVALDC0_SHIFT		(8)
+#define PDP_BIF_CONTROL1_MMU_INVALDC0_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_INVALDC0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_INVALDC1
+*/
+#define PDP_BIF_CONTROL1_MMU_INVALDC1_MASK		(0x00000200)
+#define PDP_BIF_CONTROL1_MMU_INVALDC1_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_INVALDC1_SHIFT		(9)
+#define PDP_BIF_CONTROL1_MMU_INVALDC1_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_INVALDC1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_INVALDC2
+*/
+#define PDP_BIF_CONTROL1_MMU_INVALDC2_MASK		(0x00000400)
+#define PDP_BIF_CONTROL1_MMU_INVALDC2_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_INVALDC2_SHIFT		(10)
+#define PDP_BIF_CONTROL1_MMU_INVALDC2_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_INVALDC2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_INVALDC3
+*/
+#define PDP_BIF_CONTROL1_MMU_INVALDC3_MASK		(0x00000800)
+#define PDP_BIF_CONTROL1_MMU_INVALDC3_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_INVALDC3_SHIFT		(11)
+#define PDP_BIF_CONTROL1_MMU_INVALDC3_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_INVALDC3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_FAULT_CLEAR
+*/
+#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_MASK		(0x00010000)
+#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_SHIFT		(16)
+#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, PROTOCOL_FAULT_CLEAR
+*/
+#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_MASK		(0x00100000)
+#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_SHIFT		(20)
+#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_LENGTH		(1)
+#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_PAUSE_SET
+*/
+#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_MASK		(0x01000000)
+#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_SHIFT		(24)
+#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_PAUSE_CLEAR
+*/
+#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_MASK		(0x02000000)
+#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_SHIFT		(25)
+#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONTROL1, MMU_SOFT_RESET
+*/
+#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_MASK		(0x10000000)
+#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_LSBMASK		(0x00000001)
+#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_SHIFT		(28)
+#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_LENGTH		(1)
+#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_BANK_INDEX_OFFSET		(0x0010)
+
+/* PDP_BIF, BANK_INDEX, MMU_BANK_INDEX
+*/
+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_MASK		(0xC0000000)
+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_LSBMASK		(0x00000003)
+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_SHIFT		(30)
+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_LENGTH		(2)
+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_SIGNED_FIELD	IMG_FALSE
+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_NO_REPS		(16)
+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_SIZE		(2)
+
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_OFFSET		(0x0018)
+
+/* PDP_BIF, REQUEST_PRIORITY_ENABLE, CMD_PRIORITY_ENABLE
+*/
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_MASK		(0x00008000)
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_LSBMASK		(0x00000001)
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_SHIFT		(15)
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_LENGTH		(1)
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_SIGNED_FIELD	IMG_FALSE
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_NO_REPS		(16)
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_SIZE		(1)
+
+/* PDP_BIF, REQUEST_PRIORITY_ENABLE, CMD_MMU_PRIORITY_ENABLE
+*/
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_MASK		(0x00010000)
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_LSBMASK		(0x00000001)
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_SHIFT		(16)
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_LENGTH		(1)
+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_OFFSET		(0x001C)
+
+/* PDP_BIF, REQUEST_LIMITED_THROUGHPUT, LIMITED_WORDS
+*/
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_MASK		(0x000003FF)
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_LSBMASK		(0x000003FF)
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_SHIFT		(0)
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_LENGTH		(10)
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, REQUEST_LIMITED_THROUGHPUT, REQUEST_GAP
+*/
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_MASK		(0x0FFF0000)
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_LSBMASK		(0x00000FFF)
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_SHIFT		(16)
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_LENGTH		(12)
+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_ADDRESS_CONTROL_OFFSET		(0x0070)
+
+/* PDP_BIF, ADDRESS_CONTROL, MMU_BYPASS
+*/
+#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_MASK		(0x00000001)
+#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_LSBMASK		(0x00000001)
+#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_SHIFT		(0)
+#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_LENGTH		(1)
+#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, ADDRESS_CONTROL, MMU_ENABLE_EXT_ADDRESSING
+*/
+#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_MASK		(0x00000010)
+#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_LSBMASK		(0x00000001)
+#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_SHIFT		(4)
+#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_LENGTH		(1)
+#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, ADDRESS_CONTROL, UPPER_ADDRESS_FIXED
+*/
+#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_MASK		(0x00FF0000)
+#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_LSBMASK		(0x000000FF)
+#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_SHIFT		(16)
+#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_LENGTH		(8)
+#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_CONFIG0_OFFSET		(0x0080)
+
+/* PDP_BIF, CONFIG0, NUM_REQUESTORS
+*/
+#define PDP_BIF_CONFIG0_NUM_REQUESTORS_MASK		(0x0000000F)
+#define PDP_BIF_CONFIG0_NUM_REQUESTORS_LSBMASK		(0x0000000F)
+#define PDP_BIF_CONFIG0_NUM_REQUESTORS_SHIFT		(0)
+#define PDP_BIF_CONFIG0_NUM_REQUESTORS_LENGTH		(4)
+#define PDP_BIF_CONFIG0_NUM_REQUESTORS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG0, EXTENDED_ADDR_RANGE
+*/
+#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_MASK		(0x000000F0)
+#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_LSBMASK		(0x0000000F)
+#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_SHIFT		(4)
+#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_LENGTH		(4)
+#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG0, GROUP_OVERRIDE_SIZE
+*/
+#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_MASK		(0x00000700)
+#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_LSBMASK		(0x00000007)
+#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_SHIFT		(8)
+#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_LENGTH		(3)
+#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG0, ADDR_COHERENCY_SUPPORTED
+*/
+#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_MASK		(0x00001000)
+#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_LSBMASK		(0x00000001)
+#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_SHIFT		(12)
+#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_LENGTH		(1)
+#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG0, MMU_SUPPORTED
+*/
+#define PDP_BIF_CONFIG0_MMU_SUPPORTED_MASK		(0x00002000)
+#define PDP_BIF_CONFIG0_MMU_SUPPORTED_LSBMASK		(0x00000001)
+#define PDP_BIF_CONFIG0_MMU_SUPPORTED_SHIFT		(13)
+#define PDP_BIF_CONFIG0_MMU_SUPPORTED_LENGTH		(1)
+#define PDP_BIF_CONFIG0_MMU_SUPPORTED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG0, TILE_ADDR_GRANULARITY
+*/
+#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_MASK		(0x001F0000)
+#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_LSBMASK		(0x0000001F)
+#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_SHIFT		(16)
+#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_LENGTH		(5)
+#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG0, NO_READ_REORDER
+*/
+#define PDP_BIF_CONFIG0_NO_READ_REORDER_MASK		(0x00200000)
+#define PDP_BIF_CONFIG0_NO_READ_REORDER_LSBMASK		(0x00000001)
+#define PDP_BIF_CONFIG0_NO_READ_REORDER_SHIFT		(21)
+#define PDP_BIF_CONFIG0_NO_READ_REORDER_LENGTH		(1)
+#define PDP_BIF_CONFIG0_NO_READ_REORDER_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG0, TAGS_SUPPORTED
+*/
+#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_MASK		(0xFFC00000)
+#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_LSBMASK		(0x000003FF)
+#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_SHIFT		(22)
+#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_LENGTH		(10)
+#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_CONFIG1_OFFSET		(0x0084)
+
+/* PDP_BIF, CONFIG1, PAGE_SIZE
+*/
+#define PDP_BIF_CONFIG1_PAGE_SIZE_MASK		(0x0000000F)
+#define PDP_BIF_CONFIG1_PAGE_SIZE_LSBMASK		(0x0000000F)
+#define PDP_BIF_CONFIG1_PAGE_SIZE_SHIFT		(0)
+#define PDP_BIF_CONFIG1_PAGE_SIZE_LENGTH		(4)
+#define PDP_BIF_CONFIG1_PAGE_SIZE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG1, PAGE_CACHE_ENTRIES
+*/
+#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_MASK		(0x0000FF00)
+#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_LSBMASK		(0x000000FF)
+#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_SHIFT		(8)
+#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_LENGTH		(8)
+#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG1, DIR_CACHE_ENTRIES
+*/
+#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_MASK		(0x001F0000)
+#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_LSBMASK		(0x0000001F)
+#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_SHIFT		(16)
+#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_LENGTH		(5)
+#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG1, BANDWIDTH_COUNT_SUPPORTED
+*/
+#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_MASK		(0x01000000)
+#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_LSBMASK		(0x00000001)
+#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_SHIFT		(24)
+#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_LENGTH		(1)
+#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG1, STALL_COUNT_SUPPORTED
+*/
+#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_MASK		(0x02000000)
+#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_LSBMASK		(0x00000001)
+#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_SHIFT		(25)
+#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_LENGTH		(1)
+#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG1, LATENCY_COUNT_SUPPORTED
+*/
+#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_MASK		(0x04000000)
+#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_LSBMASK		(0x00000001)
+#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_SHIFT		(26)
+#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_LENGTH		(1)
+#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, CONFIG1, SUPPORT_READ_INTERLEAVE
+*/
+#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_MASK		(0x10000000)
+#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_LSBMASK		(0x00000001)
+#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_SHIFT		(28)
+#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_LENGTH		(1)
+#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_STATUS0_OFFSET		(0x0088)
+
+/* PDP_BIF, STATUS0, MMU_PF_N_RW
+*/
+#define PDP_BIF_STATUS0_MMU_PF_N_RW_MASK		(0x00000001)
+#define PDP_BIF_STATUS0_MMU_PF_N_RW_LSBMASK		(0x00000001)
+#define PDP_BIF_STATUS0_MMU_PF_N_RW_SHIFT		(0)
+#define PDP_BIF_STATUS0_MMU_PF_N_RW_LENGTH		(1)
+#define PDP_BIF_STATUS0_MMU_PF_N_RW_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, STATUS0, MMU_FAULT_ADDR
+*/
+#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_MASK		(0xFFFFF000)
+#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_LSBMASK		(0x000FFFFF)
+#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_SHIFT		(12)
+#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_LENGTH		(20)
+#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_STATUS1_OFFSET		(0x008C)
+
+/* PDP_BIF, STATUS1, MMU_FAULT_REQ_STAT
+*/
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_MASK		(0x0000FFFF)
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_LSBMASK		(0x0000FFFF)
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_SHIFT		(0)
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_LENGTH		(16)
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, STATUS1, MMU_FAULT_REQ_ID
+*/
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_MASK		(0x000F0000)
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_LSBMASK		(0x0000000F)
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_SHIFT		(16)
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_LENGTH		(4)
+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, STATUS1, MMU_FAULT_INDEX
+*/
+#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_MASK		(0x03000000)
+#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_LSBMASK		(0x00000003)
+#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_SHIFT		(24)
+#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_LENGTH		(2)
+#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, STATUS1, MMU_FAULT_RNW
+*/
+#define PDP_BIF_STATUS1_MMU_FAULT_RNW_MASK		(0x10000000)
+#define PDP_BIF_STATUS1_MMU_FAULT_RNW_LSBMASK		(0x00000001)
+#define PDP_BIF_STATUS1_MMU_FAULT_RNW_SHIFT		(28)
+#define PDP_BIF_STATUS1_MMU_FAULT_RNW_LENGTH		(1)
+#define PDP_BIF_STATUS1_MMU_FAULT_RNW_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_MEM_REQ_OFFSET		(0x0090)
+
+/* PDP_BIF, MEM_REQ, TAG_OUTSTANDING
+*/
+#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_MASK		(0x000003FF)
+#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_LSBMASK		(0x000003FF)
+#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_SHIFT		(0)
+#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_LENGTH		(10)
+#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, MEM_REQ, EXT_WRRESP_FAULT
+*/
+#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_MASK		(0x00001000)
+#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_LSBMASK		(0x00000001)
+#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_SHIFT		(12)
+#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_LENGTH		(1)
+#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, MEM_REQ, EXT_RDRESP_FAULT
+*/
+#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_MASK		(0x00002000)
+#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_LSBMASK		(0x00000001)
+#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_SHIFT		(13)
+#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_LENGTH		(1)
+#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, MEM_REQ, EXT_READ_BURST_FAULT
+*/
+#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_MASK		(0x00004000)
+#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_LSBMASK		(0x00000001)
+#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_SHIFT		(14)
+#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_LENGTH		(1)
+#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, MEM_REQ, INT_PROTOCOL_FAULT
+*/
+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_MASK		(0x80000000)
+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_LSBMASK		(0x00000001)
+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_SHIFT		(31)
+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_LENGTH		(1)
+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_SIGNED_FIELD	IMG_FALSE
+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_NO_REPS		(16)
+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_SIZE		(1)
+
+#define PDP_BIF_MEM_EXT_OUTSTANDING_OFFSET		(0x0094)
+
+/* PDP_BIF, MEM_EXT_OUTSTANDING, READ_WORDS_OUTSTANDING
+*/
+#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_MASK		(0x0000FFFF)
+#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_LSBMASK		(0x0000FFFF)
+#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_SHIFT		(0)
+#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_LENGTH		(16)
+#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_FAULT_SELECT_OFFSET		(0x00A0)
+
+/* PDP_BIF, FAULT_SELECT, MMU_FAULT_SELECT
+*/
+#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_MASK		(0x0000000F)
+#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_LSBMASK		(0x0000000F)
+#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_SHIFT		(0)
+#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_LENGTH		(4)
+#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_PROTOCOL_FAULT_OFFSET		(0x00A8)
+
+/* PDP_BIF, PROTOCOL_FAULT, FAULT_PAGE_BREAK
+*/
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_MASK		(0x00000001)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_LSBMASK		(0x00000001)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_SHIFT		(0)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_LENGTH		(1)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, PROTOCOL_FAULT, FAULT_WRITE
+*/
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_MASK		(0x00000010)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_LSBMASK		(0x00000001)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_SHIFT		(4)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_LENGTH		(1)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, PROTOCOL_FAULT, FAULT_READ
+*/
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_MASK		(0x00000020)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_LSBMASK		(0x00000001)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_SHIFT		(5)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_LENGTH		(1)
+#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_TOTAL_READ_REQ_OFFSET		(0x0100)
+
+/* PDP_BIF, TOTAL_READ_REQ, TOTAL_READ_REQ
+*/
+#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_MASK		(0xFFFFFFFF)
+#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_SHIFT		(0)
+#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_LENGTH		(32)
+#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_TOTAL_WRITE_REQ_OFFSET		(0x0104)
+
+/* PDP_BIF, TOTAL_WRITE_REQ, TOTAL_WRITE_REQ
+*/
+#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_MASK		(0xFFFFFFFF)
+#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_SHIFT		(0)
+#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_LENGTH		(32)
+#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_READS_LESS_64_REQ_OFFSET		(0x0108)
+
+/* PDP_BIF, READS_LESS_64_REQ, READS_LESS_64_REQ
+*/
+#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_MASK		(0xFFFFFFFF)
+#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_SHIFT		(0)
+#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_LENGTH		(32)
+#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_WRITES_LESS_64_REQ_OFFSET		(0x010C)
+
+/* PDP_BIF, WRITES_LESS_64_REQ, WRITES_LESS_64_REQ
+*/
+#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_MASK		(0xFFFFFFFF)
+#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_SHIFT		(0)
+#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_LENGTH		(32)
+#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_EXT_CMD_STALL_OFFSET		(0x0120)
+
+/* PDP_BIF, EXT_CMD_STALL, EXT_CMD_STALL
+*/
+#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_MASK		(0xFFFFFFFF)
+#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_SHIFT		(0)
+#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_LENGTH		(32)
+#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_WRITE_REQ_STALL_OFFSET		(0x0124)
+
+/* PDP_BIF, WRITE_REQ_STALL, WRITE_REQ_STALL
+*/
+#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_MASK		(0xFFFFFFFF)
+#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_SHIFT		(0)
+#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_LENGTH		(32)
+#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_MISS_STALL_OFFSET		(0x0128)
+
+/* PDP_BIF, MISS_STALL, MMU_MISS_STALL
+*/
+#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_MASK		(0xFFFFFFFF)
+#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_SHIFT		(0)
+#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_LENGTH		(32)
+#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_ADDRESS_STALL_OFFSET		(0x012C)
+
+/* PDP_BIF, ADDRESS_STALL, ADDRESS_STALL
+*/
+#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_MASK		(0xFFFFFFFF)
+#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_SHIFT		(0)
+#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_LENGTH		(32)
+#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_TAG_STALL_OFFSET		(0x0130)
+
+/* PDP_BIF, TAG_STALL, TAG_STALL
+*/
+#define PDP_BIF_TAG_STALL_TAG_STALL_MASK		(0xFFFFFFFF)
+#define PDP_BIF_TAG_STALL_TAG_STALL_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_TAG_STALL_TAG_STALL_SHIFT		(0)
+#define PDP_BIF_TAG_STALL_TAG_STALL_LENGTH		(32)
+#define PDP_BIF_TAG_STALL_TAG_STALL_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_PEAK_READ_OUTSTANDING_OFFSET		(0x0140)
+
+/* PDP_BIF, PEAK_READ_OUTSTANDING, PEAK_TAG_OUTSTANDING
+*/
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_MASK		(0x000003FF)
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_LSBMASK		(0x000003FF)
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_SHIFT		(0)
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_LENGTH		(10)
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, PEAK_READ_OUTSTANDING, PEAK_READ_LATENCY
+*/
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_MASK		(0xFFFF0000)
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_LSBMASK		(0x0000FFFF)
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_SHIFT		(16)
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_LENGTH		(16)
+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_AVERAGE_READ_LATENCY_OFFSET		(0x0144)
+
+/* PDP_BIF, AVERAGE_READ_LATENCY, AVERAGE_READ_LATENCY
+*/
+#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_MASK		(0xFFFFFFFF)
+#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_LSBMASK		(0xFFFFFFFF)
+#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_SHIFT		(0)
+#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_LENGTH		(32)
+#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_STATISTICS_CONTROL_OFFSET		(0x0160)
+
+/* PDP_BIF, STATISTICS_CONTROL, BANDWIDTH_STATS_INIT
+*/
+#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_MASK		(0x00000001)
+#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_LSBMASK		(0x00000001)
+#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_SHIFT		(0)
+#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_LENGTH		(1)
+#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, STATISTICS_CONTROL, STALL_STATS_INIT
+*/
+#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_MASK		(0x00000002)
+#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_LSBMASK		(0x00000001)
+#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_SHIFT		(1)
+#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_LENGTH		(1)
+#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, STATISTICS_CONTROL, LATENCY_STATS_INIT
+*/
+#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_MASK		(0x00000004)
+#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_LSBMASK		(0x00000001)
+#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_SHIFT		(2)
+#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_LENGTH		(1)
+#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BIF_VERSION_OFFSET		(0x01D0)
+
+/* PDP_BIF, VERSION, MMU_MAJOR_REV
+*/
+#define PDP_BIF_VERSION_MMU_MAJOR_REV_MASK		(0x00FF0000)
+#define PDP_BIF_VERSION_MMU_MAJOR_REV_LSBMASK		(0x000000FF)
+#define PDP_BIF_VERSION_MMU_MAJOR_REV_SHIFT		(16)
+#define PDP_BIF_VERSION_MMU_MAJOR_REV_LENGTH		(8)
+#define PDP_BIF_VERSION_MMU_MAJOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, VERSION, MMU_MINOR_REV
+*/
+#define PDP_BIF_VERSION_MMU_MINOR_REV_MASK		(0x0000FF00)
+#define PDP_BIF_VERSION_MMU_MINOR_REV_LSBMASK		(0x000000FF)
+#define PDP_BIF_VERSION_MMU_MINOR_REV_SHIFT		(8)
+#define PDP_BIF_VERSION_MMU_MINOR_REV_LENGTH		(8)
+#define PDP_BIF_VERSION_MMU_MINOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP_BIF, VERSION, MMU_MAINT_REV
+*/
+#define PDP_BIF_VERSION_MMU_MAINT_REV_MASK		(0x000000FF)
+#define PDP_BIF_VERSION_MMU_MAINT_REV_LSBMASK		(0x000000FF)
+#define PDP_BIF_VERSION_MMU_MAINT_REV_SHIFT		(0)
+#define PDP_BIF_VERSION_MMU_MAINT_REV_LENGTH		(8)
+#define PDP_BIF_VERSION_MMU_MAINT_REV_SIGNED_FIELD	IMG_FALSE
+
+#endif /* _PDP2_MMU_REGS_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/plato/pdp2_regs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/plato/pdp2_regs.h
new file mode 100644
index 0000000..bf85386
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/plato/pdp2_regs.h
@@ -0,0 +1,8565 @@
+/*************************************************************************/ /*!
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#ifndef _PDP2_REGS_H
+#define _PDP2_REGS_H
+
+/*
+ * Bitfield operations
+ * For each argument field, the following preprocessor macros must exist
+ * field##_MASK - the number of bits in the bit field
+ * field##_SHIFT - offset from the first bit
+ */
+#define PLACE_FIELD(field, val) \
+	(((u32)(val) << (field##_SHIFT)) & (field##_MASK))
+
+#define ADJ_FIELD(x, field, val) \
+	(((x) & ~(field##_MASK)) \
+	| PLACE_FIELD(field, val))
+
+#define SET_FIELD(x, field, val) \
+	(x) = ADJ_FIELD(x, field, val)
+
+#define GET_FIELD(x, field) \
+	(((x) & (field##_MASK)) >> (field##_SHIFT))
+
+/* Keeps most significant bits */
+#define MOVE_FIELD(x, o1, l1, o2, l2) \
+	(((x) >> ((o1) + (l1) - (l2))) << (o2))
+
+#define MAX_FIELD_VALUE(field) \
+	((field##_MASK) >> (field##_SHIFT))
+
+/* Hardware register definitions */
+
+#define PDP_GRPH1SURF_OFFSET		(0x0000)
+
+/* PDP, GRPH1SURF, GRPH1PIXFMT
+*/
+#define PDP_GRPH1SURF_GRPH1PIXFMT_MASK		(0xF8000000)
+#define PDP_GRPH1SURF_GRPH1PIXFMT_LSBMASK		(0x0000001F)
+#define PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT		(27)
+#define PDP_GRPH1SURF_GRPH1PIXFMT_LENGTH		(5)
+#define PDP_GRPH1SURF_GRPH1PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1USEGAMMA
+*/
+#define PDP_GRPH1SURF_GRPH1USEGAMMA_MASK		(0x04000000)
+#define PDP_GRPH1SURF_GRPH1USEGAMMA_LSBMASK		(0x00000001)
+#define PDP_GRPH1SURF_GRPH1USEGAMMA_SHIFT		(26)
+#define PDP_GRPH1SURF_GRPH1USEGAMMA_LENGTH		(1)
+#define PDP_GRPH1SURF_GRPH1USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1USECSC
+*/
+#define PDP_GRPH1SURF_GRPH1USECSC_MASK		(0x02000000)
+#define PDP_GRPH1SURF_GRPH1USECSC_LSBMASK		(0x00000001)
+#define PDP_GRPH1SURF_GRPH1USECSC_SHIFT		(25)
+#define PDP_GRPH1SURF_GRPH1USECSC_LENGTH		(1)
+#define PDP_GRPH1SURF_GRPH1USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1LUTRWCHOICE
+*/
+#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_MASK		(0x01000000)
+#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LSBMASK		(0x00000001)
+#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SHIFT		(24)
+#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LENGTH		(1)
+#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1USELUT
+*/
+#define PDP_GRPH1SURF_GRPH1USELUT_MASK		(0x00800000)
+#define PDP_GRPH1SURF_GRPH1USELUT_LSBMASK		(0x00000001)
+#define PDP_GRPH1SURF_GRPH1USELUT_SHIFT		(23)
+#define PDP_GRPH1SURF_GRPH1USELUT_LENGTH		(1)
+#define PDP_GRPH1SURF_GRPH1USELUT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2SURF_OFFSET		(0x0004)
+
+/* PDP, GRPH2SURF, GRPH2PIXFMT
+*/
+#define PDP_GRPH2SURF_GRPH2PIXFMT_MASK		(0xF8000000)
+#define PDP_GRPH2SURF_GRPH2PIXFMT_LSBMASK		(0x0000001F)
+#define PDP_GRPH2SURF_GRPH2PIXFMT_SHIFT		(27)
+#define PDP_GRPH2SURF_GRPH2PIXFMT_LENGTH		(5)
+#define PDP_GRPH2SURF_GRPH2PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2USEGAMMA
+*/
+#define PDP_GRPH2SURF_GRPH2USEGAMMA_MASK		(0x04000000)
+#define PDP_GRPH2SURF_GRPH2USEGAMMA_LSBMASK		(0x00000001)
+#define PDP_GRPH2SURF_GRPH2USEGAMMA_SHIFT		(26)
+#define PDP_GRPH2SURF_GRPH2USEGAMMA_LENGTH		(1)
+#define PDP_GRPH2SURF_GRPH2USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2USECSC
+*/
+#define PDP_GRPH2SURF_GRPH2USECSC_MASK		(0x02000000)
+#define PDP_GRPH2SURF_GRPH2USECSC_LSBMASK		(0x00000001)
+#define PDP_GRPH2SURF_GRPH2USECSC_SHIFT		(25)
+#define PDP_GRPH2SURF_GRPH2USECSC_LENGTH		(1)
+#define PDP_GRPH2SURF_GRPH2USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2LUTRWCHOICE
+*/
+#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_MASK		(0x01000000)
+#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LSBMASK		(0x00000001)
+#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SHIFT		(24)
+#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LENGTH		(1)
+#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2USELUT
+*/
+#define PDP_GRPH2SURF_GRPH2USELUT_MASK		(0x00800000)
+#define PDP_GRPH2SURF_GRPH2USELUT_LSBMASK		(0x00000001)
+#define PDP_GRPH2SURF_GRPH2USELUT_SHIFT		(23)
+#define PDP_GRPH2SURF_GRPH2USELUT_LENGTH		(1)
+#define PDP_GRPH2SURF_GRPH2USELUT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3SURF_OFFSET		(0x0008)
+
+/* PDP, GRPH3SURF, GRPH3PIXFMT
+*/
+#define PDP_GRPH3SURF_GRPH3PIXFMT_MASK		(0xF8000000)
+#define PDP_GRPH3SURF_GRPH3PIXFMT_LSBMASK		(0x0000001F)
+#define PDP_GRPH3SURF_GRPH3PIXFMT_SHIFT		(27)
+#define PDP_GRPH3SURF_GRPH3PIXFMT_LENGTH		(5)
+#define PDP_GRPH3SURF_GRPH3PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3USEGAMMA
+*/
+#define PDP_GRPH3SURF_GRPH3USEGAMMA_MASK		(0x04000000)
+#define PDP_GRPH3SURF_GRPH3USEGAMMA_LSBMASK		(0x00000001)
+#define PDP_GRPH3SURF_GRPH3USEGAMMA_SHIFT		(26)
+#define PDP_GRPH3SURF_GRPH3USEGAMMA_LENGTH		(1)
+#define PDP_GRPH3SURF_GRPH3USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3USECSC
+*/
+#define PDP_GRPH3SURF_GRPH3USECSC_MASK		(0x02000000)
+#define PDP_GRPH3SURF_GRPH3USECSC_LSBMASK		(0x00000001)
+#define PDP_GRPH3SURF_GRPH3USECSC_SHIFT		(25)
+#define PDP_GRPH3SURF_GRPH3USECSC_LENGTH		(1)
+#define PDP_GRPH3SURF_GRPH3USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3LUTRWCHOICE
+*/
+#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_MASK		(0x01000000)
+#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LSBMASK		(0x00000001)
+#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SHIFT		(24)
+#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LENGTH		(1)
+#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3USELUT
+*/
+#define PDP_GRPH3SURF_GRPH3USELUT_MASK		(0x00800000)
+#define PDP_GRPH3SURF_GRPH3USELUT_LSBMASK		(0x00000001)
+#define PDP_GRPH3SURF_GRPH3USELUT_SHIFT		(23)
+#define PDP_GRPH3SURF_GRPH3USELUT_LENGTH		(1)
+#define PDP_GRPH3SURF_GRPH3USELUT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4SURF_OFFSET		(0x000C)
+
+/* PDP, GRPH4SURF, GRPH4PIXFMT
+*/
+#define PDP_GRPH4SURF_GRPH4PIXFMT_MASK		(0xF8000000)
+#define PDP_GRPH4SURF_GRPH4PIXFMT_LSBMASK		(0x0000001F)
+#define PDP_GRPH4SURF_GRPH4PIXFMT_SHIFT		(27)
+#define PDP_GRPH4SURF_GRPH4PIXFMT_LENGTH		(5)
+#define PDP_GRPH4SURF_GRPH4PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4USEGAMMA
+*/
+#define PDP_GRPH4SURF_GRPH4USEGAMMA_MASK		(0x04000000)
+#define PDP_GRPH4SURF_GRPH4USEGAMMA_LSBMASK		(0x00000001)
+#define PDP_GRPH4SURF_GRPH4USEGAMMA_SHIFT		(26)
+#define PDP_GRPH4SURF_GRPH4USEGAMMA_LENGTH		(1)
+#define PDP_GRPH4SURF_GRPH4USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4USECSC
+*/
+#define PDP_GRPH4SURF_GRPH4USECSC_MASK		(0x02000000)
+#define PDP_GRPH4SURF_GRPH4USECSC_LSBMASK		(0x00000001)
+#define PDP_GRPH4SURF_GRPH4USECSC_SHIFT		(25)
+#define PDP_GRPH4SURF_GRPH4USECSC_LENGTH		(1)
+#define PDP_GRPH4SURF_GRPH4USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4LUTRWCHOICE
+*/
+#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_MASK		(0x01000000)
+#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LSBMASK		(0x00000001)
+#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SHIFT		(24)
+#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LENGTH		(1)
+#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4USELUT
+*/
+#define PDP_GRPH4SURF_GRPH4USELUT_MASK		(0x00800000)
+#define PDP_GRPH4SURF_GRPH4USELUT_LSBMASK		(0x00000001)
+#define PDP_GRPH4SURF_GRPH4USELUT_SHIFT		(23)
+#define PDP_GRPH4SURF_GRPH4USELUT_LENGTH		(1)
+#define PDP_GRPH4SURF_GRPH4USELUT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1SURF_OFFSET		(0x0010)
+
+/* PDP, VID1SURF, VID1PIXFMT
+*/
+#define PDP_VID1SURF_VID1PIXFMT_MASK		(0xF8000000)
+#define PDP_VID1SURF_VID1PIXFMT_LSBMASK		(0x0000001F)
+#define PDP_VID1SURF_VID1PIXFMT_SHIFT		(27)
+#define PDP_VID1SURF_VID1PIXFMT_LENGTH		(5)
+#define PDP_VID1SURF_VID1PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEGAMMA
+*/
+#define PDP_VID1SURF_VID1USEGAMMA_MASK		(0x04000000)
+#define PDP_VID1SURF_VID1USEGAMMA_LSBMASK		(0x00000001)
+#define PDP_VID1SURF_VID1USEGAMMA_SHIFT		(26)
+#define PDP_VID1SURF_VID1USEGAMMA_LENGTH		(1)
+#define PDP_VID1SURF_VID1USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USECSC
+*/
+#define PDP_VID1SURF_VID1USECSC_MASK		(0x02000000)
+#define PDP_VID1SURF_VID1USECSC_LSBMASK		(0x00000001)
+#define PDP_VID1SURF_VID1USECSC_SHIFT		(25)
+#define PDP_VID1SURF_VID1USECSC_LENGTH		(1)
+#define PDP_VID1SURF_VID1USECSC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEI2P
+*/
+#define PDP_VID1SURF_VID1USEI2P_MASK		(0x01000000)
+#define PDP_VID1SURF_VID1USEI2P_LSBMASK		(0x00000001)
+#define PDP_VID1SURF_VID1USEI2P_SHIFT		(24)
+#define PDP_VID1SURF_VID1USEI2P_LENGTH		(1)
+#define PDP_VID1SURF_VID1USEI2P_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1COSITED
+*/
+#define PDP_VID1SURF_VID1COSITED_MASK		(0x00800000)
+#define PDP_VID1SURF_VID1COSITED_LSBMASK		(0x00000001)
+#define PDP_VID1SURF_VID1COSITED_SHIFT		(23)
+#define PDP_VID1SURF_VID1COSITED_LENGTH		(1)
+#define PDP_VID1SURF_VID1COSITED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEHQCD
+*/
+#define PDP_VID1SURF_VID1USEHQCD_MASK		(0x00400000)
+#define PDP_VID1SURF_VID1USEHQCD_LSBMASK		(0x00000001)
+#define PDP_VID1SURF_VID1USEHQCD_SHIFT		(22)
+#define PDP_VID1SURF_VID1USEHQCD_LENGTH		(1)
+#define PDP_VID1SURF_VID1USEHQCD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEINSTREAM
+*/
+#define PDP_VID1SURF_VID1USEINSTREAM_MASK		(0x00200000)
+#define PDP_VID1SURF_VID1USEINSTREAM_LSBMASK		(0x00000001)
+#define PDP_VID1SURF_VID1USEINSTREAM_SHIFT		(21)
+#define PDP_VID1SURF_VID1USEINSTREAM_LENGTH		(1)
+#define PDP_VID1SURF_VID1USEINSTREAM_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2SURF_OFFSET		(0x0014)
+
+/* PDP, VID2SURF, VID2PIXFMT
+*/
+#define PDP_VID2SURF_VID2PIXFMT_MASK		(0xF8000000)
+#define PDP_VID2SURF_VID2PIXFMT_LSBMASK		(0x0000001F)
+#define PDP_VID2SURF_VID2PIXFMT_SHIFT		(27)
+#define PDP_VID2SURF_VID2PIXFMT_LENGTH		(5)
+#define PDP_VID2SURF_VID2PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SURF, VID2COSITED
+*/
+#define PDP_VID2SURF_VID2COSITED_MASK		(0x00800000)
+#define PDP_VID2SURF_VID2COSITED_LSBMASK		(0x00000001)
+#define PDP_VID2SURF_VID2COSITED_SHIFT		(23)
+#define PDP_VID2SURF_VID2COSITED_LENGTH		(1)
+#define PDP_VID2SURF_VID2COSITED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SURF, VID2USEGAMMA
+*/
+#define PDP_VID2SURF_VID2USEGAMMA_MASK		(0x04000000)
+#define PDP_VID2SURF_VID2USEGAMMA_LSBMASK		(0x00000001)
+#define PDP_VID2SURF_VID2USEGAMMA_SHIFT		(26)
+#define PDP_VID2SURF_VID2USEGAMMA_LENGTH		(1)
+#define PDP_VID2SURF_VID2USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SURF, VID2USECSC
+*/
+#define PDP_VID2SURF_VID2USECSC_MASK		(0x02000000)
+#define PDP_VID2SURF_VID2USECSC_LSBMASK		(0x00000001)
+#define PDP_VID2SURF_VID2USECSC_SHIFT		(25)
+#define PDP_VID2SURF_VID2USECSC_LENGTH		(1)
+#define PDP_VID2SURF_VID2USECSC_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3SURF_OFFSET		(0x0018)
+
+/* PDP, VID3SURF, VID3PIXFMT
+*/
+#define PDP_VID3SURF_VID3PIXFMT_MASK		(0xF8000000)
+#define PDP_VID3SURF_VID3PIXFMT_LSBMASK		(0x0000001F)
+#define PDP_VID3SURF_VID3PIXFMT_SHIFT		(27)
+#define PDP_VID3SURF_VID3PIXFMT_LENGTH		(5)
+#define PDP_VID3SURF_VID3PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SURF, VID3COSITED
+*/
+#define PDP_VID3SURF_VID3COSITED_MASK		(0x00800000)
+#define PDP_VID3SURF_VID3COSITED_LSBMASK		(0x00000001)
+#define PDP_VID3SURF_VID3COSITED_SHIFT		(23)
+#define PDP_VID3SURF_VID3COSITED_LENGTH		(1)
+#define PDP_VID3SURF_VID3COSITED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SURF, VID3USEGAMMA
+*/
+#define PDP_VID3SURF_VID3USEGAMMA_MASK		(0x04000000)
+#define PDP_VID3SURF_VID3USEGAMMA_LSBMASK		(0x00000001)
+#define PDP_VID3SURF_VID3USEGAMMA_SHIFT		(26)
+#define PDP_VID3SURF_VID3USEGAMMA_LENGTH		(1)
+#define PDP_VID3SURF_VID3USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SURF, VID3USECSC
+*/
+#define PDP_VID3SURF_VID3USECSC_MASK		(0x02000000)
+#define PDP_VID3SURF_VID3USECSC_LSBMASK		(0x00000001)
+#define PDP_VID3SURF_VID3USECSC_SHIFT		(25)
+#define PDP_VID3SURF_VID3USECSC_LENGTH		(1)
+#define PDP_VID3SURF_VID3USECSC_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4SURF_OFFSET		(0x001C)
+
+/* PDP, VID4SURF, VID4PIXFMT
+*/
+#define PDP_VID4SURF_VID4PIXFMT_MASK		(0xF8000000)
+#define PDP_VID4SURF_VID4PIXFMT_LSBMASK		(0x0000001F)
+#define PDP_VID4SURF_VID4PIXFMT_SHIFT		(27)
+#define PDP_VID4SURF_VID4PIXFMT_LENGTH		(5)
+#define PDP_VID4SURF_VID4PIXFMT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SURF, VID4COSITED
+*/
+#define PDP_VID4SURF_VID4COSITED_MASK		(0x00800000)
+#define PDP_VID4SURF_VID4COSITED_LSBMASK		(0x00000001)
+#define PDP_VID4SURF_VID4COSITED_SHIFT		(23)
+#define PDP_VID4SURF_VID4COSITED_LENGTH		(1)
+#define PDP_VID4SURF_VID4COSITED_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SURF, VID4USEGAMMA
+*/
+#define PDP_VID4SURF_VID4USEGAMMA_MASK		(0x04000000)
+#define PDP_VID4SURF_VID4USEGAMMA_LSBMASK		(0x00000001)
+#define PDP_VID4SURF_VID4USEGAMMA_SHIFT		(26)
+#define PDP_VID4SURF_VID4USEGAMMA_LENGTH		(1)
+#define PDP_VID4SURF_VID4USEGAMMA_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SURF, VID4USECSC
+*/
+#define PDP_VID4SURF_VID4USECSC_MASK		(0x02000000)
+#define PDP_VID4SURF_VID4USECSC_LSBMASK		(0x00000001)
+#define PDP_VID4SURF_VID4USECSC_SHIFT		(25)
+#define PDP_VID4SURF_VID4USECSC_LENGTH		(1)
+#define PDP_VID4SURF_VID4USECSC_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1CTRL_OFFSET		(0x0020)
+
+/* PDP, GRPH1CTRL, GRPH1STREN
+*/
+#define PDP_GRPH1CTRL_GRPH1STREN_MASK		(0x80000000)
+#define PDP_GRPH1CTRL_GRPH1STREN_LSBMASK		(0x00000001)
+#define PDP_GRPH1CTRL_GRPH1STREN_SHIFT		(31)
+#define PDP_GRPH1CTRL_GRPH1STREN_LENGTH		(1)
+#define PDP_GRPH1CTRL_GRPH1STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1CKEYEN
+*/
+#define PDP_GRPH1CTRL_GRPH1CKEYEN_MASK		(0x40000000)
+#define PDP_GRPH1CTRL_GRPH1CKEYEN_LSBMASK		(0x00000001)
+#define PDP_GRPH1CTRL_GRPH1CKEYEN_SHIFT		(30)
+#define PDP_GRPH1CTRL_GRPH1CKEYEN_LENGTH		(1)
+#define PDP_GRPH1CTRL_GRPH1CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1CKEYSRC
+*/
+#define PDP_GRPH1CTRL_GRPH1CKEYSRC_MASK		(0x20000000)
+#define PDP_GRPH1CTRL_GRPH1CKEYSRC_LSBMASK		(0x00000001)
+#define PDP_GRPH1CTRL_GRPH1CKEYSRC_SHIFT		(29)
+#define PDP_GRPH1CTRL_GRPH1CKEYSRC_LENGTH		(1)
+#define PDP_GRPH1CTRL_GRPH1CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1BLEND
+*/
+#define PDP_GRPH1CTRL_GRPH1BLEND_MASK		(0x18000000)
+#define PDP_GRPH1CTRL_GRPH1BLEND_LSBMASK		(0x00000003)
+#define PDP_GRPH1CTRL_GRPH1BLEND_SHIFT		(27)
+#define PDP_GRPH1CTRL_GRPH1BLEND_LENGTH		(2)
+#define PDP_GRPH1CTRL_GRPH1BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1BLENDPOS
+*/
+#define PDP_GRPH1CTRL_GRPH1BLENDPOS_MASK		(0x07000000)
+#define PDP_GRPH1CTRL_GRPH1BLENDPOS_LSBMASK		(0x00000007)
+#define PDP_GRPH1CTRL_GRPH1BLENDPOS_SHIFT		(24)
+#define PDP_GRPH1CTRL_GRPH1BLENDPOS_LENGTH		(3)
+#define PDP_GRPH1CTRL_GRPH1BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1DITHEREN
+*/
+#define PDP_GRPH1CTRL_GRPH1DITHEREN_MASK		(0x00800000)
+#define PDP_GRPH1CTRL_GRPH1DITHEREN_LSBMASK		(0x00000001)
+#define PDP_GRPH1CTRL_GRPH1DITHEREN_SHIFT		(23)
+#define PDP_GRPH1CTRL_GRPH1DITHEREN_LENGTH		(1)
+#define PDP_GRPH1CTRL_GRPH1DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2CTRL_OFFSET		(0x0024)
+
+/* PDP, GRPH2CTRL, GRPH2STREN
+*/
+#define PDP_GRPH2CTRL_GRPH2STREN_MASK		(0x80000000)
+#define PDP_GRPH2CTRL_GRPH2STREN_LSBMASK		(0x00000001)
+#define PDP_GRPH2CTRL_GRPH2STREN_SHIFT		(31)
+#define PDP_GRPH2CTRL_GRPH2STREN_LENGTH		(1)
+#define PDP_GRPH2CTRL_GRPH2STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2CKEYEN
+*/
+#define PDP_GRPH2CTRL_GRPH2CKEYEN_MASK		(0x40000000)
+#define PDP_GRPH2CTRL_GRPH2CKEYEN_LSBMASK		(0x00000001)
+#define PDP_GRPH2CTRL_GRPH2CKEYEN_SHIFT		(30)
+#define PDP_GRPH2CTRL_GRPH2CKEYEN_LENGTH		(1)
+#define PDP_GRPH2CTRL_GRPH2CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2CKEYSRC
+*/
+#define PDP_GRPH2CTRL_GRPH2CKEYSRC_MASK		(0x20000000)
+#define PDP_GRPH2CTRL_GRPH2CKEYSRC_LSBMASK		(0x00000001)
+#define PDP_GRPH2CTRL_GRPH2CKEYSRC_SHIFT		(29)
+#define PDP_GRPH2CTRL_GRPH2CKEYSRC_LENGTH		(1)
+#define PDP_GRPH2CTRL_GRPH2CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2BLEND
+*/
+#define PDP_GRPH2CTRL_GRPH2BLEND_MASK		(0x18000000)
+#define PDP_GRPH2CTRL_GRPH2BLEND_LSBMASK		(0x00000003)
+#define PDP_GRPH2CTRL_GRPH2BLEND_SHIFT		(27)
+#define PDP_GRPH2CTRL_GRPH2BLEND_LENGTH		(2)
+#define PDP_GRPH2CTRL_GRPH2BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2BLENDPOS
+*/
+#define PDP_GRPH2CTRL_GRPH2BLENDPOS_MASK		(0x07000000)
+#define PDP_GRPH2CTRL_GRPH2BLENDPOS_LSBMASK		(0x00000007)
+#define PDP_GRPH2CTRL_GRPH2BLENDPOS_SHIFT		(24)
+#define PDP_GRPH2CTRL_GRPH2BLENDPOS_LENGTH		(3)
+#define PDP_GRPH2CTRL_GRPH2BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2DITHEREN
+*/
+#define PDP_GRPH2CTRL_GRPH2DITHEREN_MASK		(0x00800000)
+#define PDP_GRPH2CTRL_GRPH2DITHEREN_LSBMASK		(0x00000001)
+#define PDP_GRPH2CTRL_GRPH2DITHEREN_SHIFT		(23)
+#define PDP_GRPH2CTRL_GRPH2DITHEREN_LENGTH		(1)
+#define PDP_GRPH2CTRL_GRPH2DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3CTRL_OFFSET		(0x0028)
+
+/* PDP, GRPH3CTRL, GRPH3STREN
+*/
+#define PDP_GRPH3CTRL_GRPH3STREN_MASK		(0x80000000)
+#define PDP_GRPH3CTRL_GRPH3STREN_LSBMASK		(0x00000001)
+#define PDP_GRPH3CTRL_GRPH3STREN_SHIFT		(31)
+#define PDP_GRPH3CTRL_GRPH3STREN_LENGTH		(1)
+#define PDP_GRPH3CTRL_GRPH3STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3CKEYEN
+*/
+#define PDP_GRPH3CTRL_GRPH3CKEYEN_MASK		(0x40000000)
+#define PDP_GRPH3CTRL_GRPH3CKEYEN_LSBMASK		(0x00000001)
+#define PDP_GRPH3CTRL_GRPH3CKEYEN_SHIFT		(30)
+#define PDP_GRPH3CTRL_GRPH3CKEYEN_LENGTH		(1)
+#define PDP_GRPH3CTRL_GRPH3CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3CKEYSRC
+*/
+#define PDP_GRPH3CTRL_GRPH3CKEYSRC_MASK		(0x20000000)
+#define PDP_GRPH3CTRL_GRPH3CKEYSRC_LSBMASK		(0x00000001)
+#define PDP_GRPH3CTRL_GRPH3CKEYSRC_SHIFT		(29)
+#define PDP_GRPH3CTRL_GRPH3CKEYSRC_LENGTH		(1)
+#define PDP_GRPH3CTRL_GRPH3CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3BLEND
+*/
+#define PDP_GRPH3CTRL_GRPH3BLEND_MASK		(0x18000000)
+#define PDP_GRPH3CTRL_GRPH3BLEND_LSBMASK		(0x00000003)
+#define PDP_GRPH3CTRL_GRPH3BLEND_SHIFT		(27)
+#define PDP_GRPH3CTRL_GRPH3BLEND_LENGTH		(2)
+#define PDP_GRPH3CTRL_GRPH3BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3BLENDPOS
+*/
+#define PDP_GRPH3CTRL_GRPH3BLENDPOS_MASK		(0x07000000)
+#define PDP_GRPH3CTRL_GRPH3BLENDPOS_LSBMASK		(0x00000007)
+#define PDP_GRPH3CTRL_GRPH3BLENDPOS_SHIFT		(24)
+#define PDP_GRPH3CTRL_GRPH3BLENDPOS_LENGTH		(3)
+#define PDP_GRPH3CTRL_GRPH3BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3DITHEREN
+*/
+#define PDP_GRPH3CTRL_GRPH3DITHEREN_MASK		(0x00800000)
+#define PDP_GRPH3CTRL_GRPH3DITHEREN_LSBMASK		(0x00000001)
+#define PDP_GRPH3CTRL_GRPH3DITHEREN_SHIFT		(23)
+#define PDP_GRPH3CTRL_GRPH3DITHEREN_LENGTH		(1)
+#define PDP_GRPH3CTRL_GRPH3DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4CTRL_OFFSET		(0x002C)
+
+/* PDP, GRPH4CTRL, GRPH4STREN
+*/
+#define PDP_GRPH4CTRL_GRPH4STREN_MASK		(0x80000000)
+#define PDP_GRPH4CTRL_GRPH4STREN_LSBMASK		(0x00000001)
+#define PDP_GRPH4CTRL_GRPH4STREN_SHIFT		(31)
+#define PDP_GRPH4CTRL_GRPH4STREN_LENGTH		(1)
+#define PDP_GRPH4CTRL_GRPH4STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4CKEYEN
+*/
+#define PDP_GRPH4CTRL_GRPH4CKEYEN_MASK		(0x40000000)
+#define PDP_GRPH4CTRL_GRPH4CKEYEN_LSBMASK		(0x00000001)
+#define PDP_GRPH4CTRL_GRPH4CKEYEN_SHIFT		(30)
+#define PDP_GRPH4CTRL_GRPH4CKEYEN_LENGTH		(1)
+#define PDP_GRPH4CTRL_GRPH4CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4CKEYSRC
+*/
+#define PDP_GRPH4CTRL_GRPH4CKEYSRC_MASK		(0x20000000)
+#define PDP_GRPH4CTRL_GRPH4CKEYSRC_LSBMASK		(0x00000001)
+#define PDP_GRPH4CTRL_GRPH4CKEYSRC_SHIFT		(29)
+#define PDP_GRPH4CTRL_GRPH4CKEYSRC_LENGTH		(1)
+#define PDP_GRPH4CTRL_GRPH4CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4BLEND
+*/
+#define PDP_GRPH4CTRL_GRPH4BLEND_MASK		(0x18000000)
+#define PDP_GRPH4CTRL_GRPH4BLEND_LSBMASK		(0x00000003)
+#define PDP_GRPH4CTRL_GRPH4BLEND_SHIFT		(27)
+#define PDP_GRPH4CTRL_GRPH4BLEND_LENGTH		(2)
+#define PDP_GRPH4CTRL_GRPH4BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4BLENDPOS
+*/
+#define PDP_GRPH4CTRL_GRPH4BLENDPOS_MASK		(0x07000000)
+#define PDP_GRPH4CTRL_GRPH4BLENDPOS_LSBMASK		(0x00000007)
+#define PDP_GRPH4CTRL_GRPH4BLENDPOS_SHIFT		(24)
+#define PDP_GRPH4CTRL_GRPH4BLENDPOS_LENGTH		(3)
+#define PDP_GRPH4CTRL_GRPH4BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4DITHEREN
+*/
+#define PDP_GRPH4CTRL_GRPH4DITHEREN_MASK		(0x00800000)
+#define PDP_GRPH4CTRL_GRPH4DITHEREN_LSBMASK		(0x00000001)
+#define PDP_GRPH4CTRL_GRPH4DITHEREN_SHIFT		(23)
+#define PDP_GRPH4CTRL_GRPH4DITHEREN_LENGTH		(1)
+#define PDP_GRPH4CTRL_GRPH4DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1CTRL_OFFSET		(0x0030)
+
+/* PDP, VID1CTRL, VID1STREN
+*/
+#define PDP_VID1CTRL_VID1STREN_MASK		(0x80000000)
+#define PDP_VID1CTRL_VID1STREN_LSBMASK		(0x00000001)
+#define PDP_VID1CTRL_VID1STREN_SHIFT		(31)
+#define PDP_VID1CTRL_VID1STREN_LENGTH		(1)
+#define PDP_VID1CTRL_VID1STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CTRL, VID1CKEYEN
+*/
+#define PDP_VID1CTRL_VID1CKEYEN_MASK		(0x40000000)
+#define PDP_VID1CTRL_VID1CKEYEN_LSBMASK		(0x00000001)
+#define PDP_VID1CTRL_VID1CKEYEN_SHIFT		(30)
+#define PDP_VID1CTRL_VID1CKEYEN_LENGTH		(1)
+#define PDP_VID1CTRL_VID1CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CTRL, VID1CKEYSRC
+*/
+#define PDP_VID1CTRL_VID1CKEYSRC_MASK		(0x20000000)
+#define PDP_VID1CTRL_VID1CKEYSRC_LSBMASK		(0x00000001)
+#define PDP_VID1CTRL_VID1CKEYSRC_SHIFT		(29)
+#define PDP_VID1CTRL_VID1CKEYSRC_LENGTH		(1)
+#define PDP_VID1CTRL_VID1CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CTRL, VID1BLEND
+*/
+#define PDP_VID1CTRL_VID1BLEND_MASK		(0x18000000)
+#define PDP_VID1CTRL_VID1BLEND_LSBMASK		(0x00000003)
+#define PDP_VID1CTRL_VID1BLEND_SHIFT		(27)
+#define PDP_VID1CTRL_VID1BLEND_LENGTH		(2)
+#define PDP_VID1CTRL_VID1BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CTRL, VID1BLENDPOS
+*/
+#define PDP_VID1CTRL_VID1BLENDPOS_MASK		(0x07000000)
+#define PDP_VID1CTRL_VID1BLENDPOS_LSBMASK		(0x00000007)
+#define PDP_VID1CTRL_VID1BLENDPOS_SHIFT		(24)
+#define PDP_VID1CTRL_VID1BLENDPOS_LENGTH		(3)
+#define PDP_VID1CTRL_VID1BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CTRL, VID1DITHEREN
+*/
+#define PDP_VID1CTRL_VID1DITHEREN_MASK		(0x00800000)
+#define PDP_VID1CTRL_VID1DITHEREN_LSBMASK		(0x00000001)
+#define PDP_VID1CTRL_VID1DITHEREN_SHIFT		(23)
+#define PDP_VID1CTRL_VID1DITHEREN_LENGTH		(1)
+#define PDP_VID1CTRL_VID1DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2CTRL_OFFSET		(0x0034)
+
+/* PDP, VID2CTRL, VID2STREN
+*/
+#define PDP_VID2CTRL_VID2STREN_MASK		(0x80000000)
+#define PDP_VID2CTRL_VID2STREN_LSBMASK		(0x00000001)
+#define PDP_VID2CTRL_VID2STREN_SHIFT		(31)
+#define PDP_VID2CTRL_VID2STREN_LENGTH		(1)
+#define PDP_VID2CTRL_VID2STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CTRL, VID2CKEYEN
+*/
+#define PDP_VID2CTRL_VID2CKEYEN_MASK		(0x40000000)
+#define PDP_VID2CTRL_VID2CKEYEN_LSBMASK		(0x00000001)
+#define PDP_VID2CTRL_VID2CKEYEN_SHIFT		(30)
+#define PDP_VID2CTRL_VID2CKEYEN_LENGTH		(1)
+#define PDP_VID2CTRL_VID2CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CTRL, VID2CKEYSRC
+*/
+#define PDP_VID2CTRL_VID2CKEYSRC_MASK		(0x20000000)
+#define PDP_VID2CTRL_VID2CKEYSRC_LSBMASK		(0x00000001)
+#define PDP_VID2CTRL_VID2CKEYSRC_SHIFT		(29)
+#define PDP_VID2CTRL_VID2CKEYSRC_LENGTH		(1)
+#define PDP_VID2CTRL_VID2CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CTRL, VID2BLEND
+*/
+#define PDP_VID2CTRL_VID2BLEND_MASK		(0x18000000)
+#define PDP_VID2CTRL_VID2BLEND_LSBMASK		(0x00000003)
+#define PDP_VID2CTRL_VID2BLEND_SHIFT		(27)
+#define PDP_VID2CTRL_VID2BLEND_LENGTH		(2)
+#define PDP_VID2CTRL_VID2BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CTRL, VID2BLENDPOS
+*/
+#define PDP_VID2CTRL_VID2BLENDPOS_MASK		(0x07000000)
+#define PDP_VID2CTRL_VID2BLENDPOS_LSBMASK		(0x00000007)
+#define PDP_VID2CTRL_VID2BLENDPOS_SHIFT		(24)
+#define PDP_VID2CTRL_VID2BLENDPOS_LENGTH		(3)
+#define PDP_VID2CTRL_VID2BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CTRL, VID2DITHEREN
+*/
+#define PDP_VID2CTRL_VID2DITHEREN_MASK		(0x00800000)
+#define PDP_VID2CTRL_VID2DITHEREN_LSBMASK		(0x00000001)
+#define PDP_VID2CTRL_VID2DITHEREN_SHIFT		(23)
+#define PDP_VID2CTRL_VID2DITHEREN_LENGTH		(1)
+#define PDP_VID2CTRL_VID2DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3CTRL_OFFSET		(0x0038)
+
+/* PDP, VID3CTRL, VID3STREN
+*/
+#define PDP_VID3CTRL_VID3STREN_MASK		(0x80000000)
+#define PDP_VID3CTRL_VID3STREN_LSBMASK		(0x00000001)
+#define PDP_VID3CTRL_VID3STREN_SHIFT		(31)
+#define PDP_VID3CTRL_VID3STREN_LENGTH		(1)
+#define PDP_VID3CTRL_VID3STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CTRL, VID3CKEYEN
+*/
+#define PDP_VID3CTRL_VID3CKEYEN_MASK		(0x40000000)
+#define PDP_VID3CTRL_VID3CKEYEN_LSBMASK		(0x00000001)
+#define PDP_VID3CTRL_VID3CKEYEN_SHIFT		(30)
+#define PDP_VID3CTRL_VID3CKEYEN_LENGTH		(1)
+#define PDP_VID3CTRL_VID3CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CTRL, VID3CKEYSRC
+*/
+#define PDP_VID3CTRL_VID3CKEYSRC_MASK		(0x20000000)
+#define PDP_VID3CTRL_VID3CKEYSRC_LSBMASK		(0x00000001)
+#define PDP_VID3CTRL_VID3CKEYSRC_SHIFT		(29)
+#define PDP_VID3CTRL_VID3CKEYSRC_LENGTH		(1)
+#define PDP_VID3CTRL_VID3CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CTRL, VID3BLEND
+*/
+#define PDP_VID3CTRL_VID3BLEND_MASK		(0x18000000)
+#define PDP_VID3CTRL_VID3BLEND_LSBMASK		(0x00000003)
+#define PDP_VID3CTRL_VID3BLEND_SHIFT		(27)
+#define PDP_VID3CTRL_VID3BLEND_LENGTH		(2)
+#define PDP_VID3CTRL_VID3BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CTRL, VID3BLENDPOS
+*/
+#define PDP_VID3CTRL_VID3BLENDPOS_MASK		(0x07000000)
+#define PDP_VID3CTRL_VID3BLENDPOS_LSBMASK		(0x00000007)
+#define PDP_VID3CTRL_VID3BLENDPOS_SHIFT		(24)
+#define PDP_VID3CTRL_VID3BLENDPOS_LENGTH		(3)
+#define PDP_VID3CTRL_VID3BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CTRL, VID3DITHEREN
+*/
+#define PDP_VID3CTRL_VID3DITHEREN_MASK		(0x00800000)
+#define PDP_VID3CTRL_VID3DITHEREN_LSBMASK		(0x00000001)
+#define PDP_VID3CTRL_VID3DITHEREN_SHIFT		(23)
+#define PDP_VID3CTRL_VID3DITHEREN_LENGTH		(1)
+#define PDP_VID3CTRL_VID3DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4CTRL_OFFSET		(0x003C)
+
+/* PDP, VID4CTRL, VID4STREN
+*/
+#define PDP_VID4CTRL_VID4STREN_MASK		(0x80000000)
+#define PDP_VID4CTRL_VID4STREN_LSBMASK		(0x00000001)
+#define PDP_VID4CTRL_VID4STREN_SHIFT		(31)
+#define PDP_VID4CTRL_VID4STREN_LENGTH		(1)
+#define PDP_VID4CTRL_VID4STREN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CTRL, VID4CKEYEN
+*/
+#define PDP_VID4CTRL_VID4CKEYEN_MASK		(0x40000000)
+#define PDP_VID4CTRL_VID4CKEYEN_LSBMASK		(0x00000001)
+#define PDP_VID4CTRL_VID4CKEYEN_SHIFT		(30)
+#define PDP_VID4CTRL_VID4CKEYEN_LENGTH		(1)
+#define PDP_VID4CTRL_VID4CKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CTRL, VID4CKEYSRC
+*/
+#define PDP_VID4CTRL_VID4CKEYSRC_MASK		(0x20000000)
+#define PDP_VID4CTRL_VID4CKEYSRC_LSBMASK		(0x00000001)
+#define PDP_VID4CTRL_VID4CKEYSRC_SHIFT		(29)
+#define PDP_VID4CTRL_VID4CKEYSRC_LENGTH		(1)
+#define PDP_VID4CTRL_VID4CKEYSRC_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CTRL, VID4BLEND
+*/
+#define PDP_VID4CTRL_VID4BLEND_MASK		(0x18000000)
+#define PDP_VID4CTRL_VID4BLEND_LSBMASK		(0x00000003)
+#define PDP_VID4CTRL_VID4BLEND_SHIFT		(27)
+#define PDP_VID4CTRL_VID4BLEND_LENGTH		(2)
+#define PDP_VID4CTRL_VID4BLEND_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CTRL, VID4BLENDPOS
+*/
+#define PDP_VID4CTRL_VID4BLENDPOS_MASK		(0x07000000)
+#define PDP_VID4CTRL_VID4BLENDPOS_LSBMASK		(0x00000007)
+#define PDP_VID4CTRL_VID4BLENDPOS_SHIFT		(24)
+#define PDP_VID4CTRL_VID4BLENDPOS_LENGTH		(3)
+#define PDP_VID4CTRL_VID4BLENDPOS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CTRL, VID4DITHEREN
+*/
+#define PDP_VID4CTRL_VID4DITHEREN_MASK		(0x00800000)
+#define PDP_VID4CTRL_VID4DITHEREN_LSBMASK		(0x00000001)
+#define PDP_VID4CTRL_VID4DITHEREN_SHIFT		(23)
+#define PDP_VID4CTRL_VID4DITHEREN_LENGTH		(1)
+#define PDP_VID4CTRL_VID4DITHEREN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1UCTRL_OFFSET		(0x0050)
+
+/* PDP, VID1UCTRL, VID1UVHALFSTR
+*/
+#define PDP_VID1UCTRL_VID1UVHALFSTR_MASK		(0xC0000000)
+#define PDP_VID1UCTRL_VID1UVHALFSTR_LSBMASK		(0x00000003)
+#define PDP_VID1UCTRL_VID1UVHALFSTR_SHIFT		(30)
+#define PDP_VID1UCTRL_VID1UVHALFSTR_LENGTH		(2)
+#define PDP_VID1UCTRL_VID1UVHALFSTR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2UCTRL_OFFSET		(0x0054)
+
+/* PDP, VID2UCTRL, VID2UVHALFSTR
+*/
+#define PDP_VID2UCTRL_VID2UVHALFSTR_MASK		(0xC0000000)
+#define PDP_VID2UCTRL_VID2UVHALFSTR_LSBMASK		(0x00000003)
+#define PDP_VID2UCTRL_VID2UVHALFSTR_SHIFT		(30)
+#define PDP_VID2UCTRL_VID2UVHALFSTR_LENGTH		(2)
+#define PDP_VID2UCTRL_VID2UVHALFSTR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3UCTRL_OFFSET		(0x0058)
+
+/* PDP, VID3UCTRL, VID3UVHALFSTR
+*/
+#define PDP_VID3UCTRL_VID3UVHALFSTR_MASK		(0xC0000000)
+#define PDP_VID3UCTRL_VID3UVHALFSTR_LSBMASK		(0x00000003)
+#define PDP_VID3UCTRL_VID3UVHALFSTR_SHIFT		(30)
+#define PDP_VID3UCTRL_VID3UVHALFSTR_LENGTH		(2)
+#define PDP_VID3UCTRL_VID3UVHALFSTR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4UCTRL_OFFSET		(0x005C)
+
+/* PDP, VID4UCTRL, VID4UVHALFSTR
+*/
+#define PDP_VID4UCTRL_VID4UVHALFSTR_MASK		(0xC0000000)
+#define PDP_VID4UCTRL_VID4UVHALFSTR_LSBMASK		(0x00000003)
+#define PDP_VID4UCTRL_VID4UVHALFSTR_SHIFT		(30)
+#define PDP_VID4UCTRL_VID4UVHALFSTR_LENGTH		(2)
+#define PDP_VID4UCTRL_VID4UVHALFSTR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1STRIDE_OFFSET		(0x0060)
+
+/* PDP, GRPH1STRIDE, GRPH1STRIDE
+*/
+#define PDP_GRPH1STRIDE_GRPH1STRIDE_MASK		(0xFFC00000)
+#define PDP_GRPH1STRIDE_GRPH1STRIDE_LSBMASK		(0x000003FF)
+#define PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT		(22)
+#define PDP_GRPH1STRIDE_GRPH1STRIDE_LENGTH		(10)
+#define PDP_GRPH1STRIDE_GRPH1STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2STRIDE_OFFSET		(0x0064)
+
+/* PDP, GRPH2STRIDE, GRPH2STRIDE
+*/
+#define PDP_GRPH2STRIDE_GRPH2STRIDE_MASK		(0xFFC00000)
+#define PDP_GRPH2STRIDE_GRPH2STRIDE_LSBMASK		(0x000003FF)
+#define PDP_GRPH2STRIDE_GRPH2STRIDE_SHIFT		(22)
+#define PDP_GRPH2STRIDE_GRPH2STRIDE_LENGTH		(10)
+#define PDP_GRPH2STRIDE_GRPH2STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3STRIDE_OFFSET		(0x0068)
+
+/* PDP, GRPH3STRIDE, GRPH3STRIDE
+*/
+#define PDP_GRPH3STRIDE_GRPH3STRIDE_MASK		(0xFFC00000)
+#define PDP_GRPH3STRIDE_GRPH3STRIDE_LSBMASK		(0x000003FF)
+#define PDP_GRPH3STRIDE_GRPH3STRIDE_SHIFT		(22)
+#define PDP_GRPH3STRIDE_GRPH3STRIDE_LENGTH		(10)
+#define PDP_GRPH3STRIDE_GRPH3STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4STRIDE_OFFSET		(0x006C)
+
+/* PDP, GRPH4STRIDE, GRPH4STRIDE
+*/
+#define PDP_GRPH4STRIDE_GRPH4STRIDE_MASK		(0xFFC00000)
+#define PDP_GRPH4STRIDE_GRPH4STRIDE_LSBMASK		(0x000003FF)
+#define PDP_GRPH4STRIDE_GRPH4STRIDE_SHIFT		(22)
+#define PDP_GRPH4STRIDE_GRPH4STRIDE_LENGTH		(10)
+#define PDP_GRPH4STRIDE_GRPH4STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1STRIDE_OFFSET		(0x0070)
+
+/* PDP, VID1STRIDE, VID1STRIDE
+*/
+#define PDP_VID1STRIDE_VID1STRIDE_MASK		(0xFFC00000)
+#define PDP_VID1STRIDE_VID1STRIDE_LSBMASK		(0x000003FF)
+#define PDP_VID1STRIDE_VID1STRIDE_SHIFT		(22)
+#define PDP_VID1STRIDE_VID1STRIDE_LENGTH		(10)
+#define PDP_VID1STRIDE_VID1STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2STRIDE_OFFSET		(0x0074)
+
+/* PDP, VID2STRIDE, VID2STRIDE
+*/
+#define PDP_VID2STRIDE_VID2STRIDE_MASK		(0xFFC00000)
+#define PDP_VID2STRIDE_VID2STRIDE_LSBMASK		(0x000003FF)
+#define PDP_VID2STRIDE_VID2STRIDE_SHIFT		(22)
+#define PDP_VID2STRIDE_VID2STRIDE_LENGTH		(10)
+#define PDP_VID2STRIDE_VID2STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3STRIDE_OFFSET		(0x0078)
+
+/* PDP, VID3STRIDE, VID3STRIDE
+*/
+#define PDP_VID3STRIDE_VID3STRIDE_MASK		(0xFFC00000)
+#define PDP_VID3STRIDE_VID3STRIDE_LSBMASK		(0x000003FF)
+#define PDP_VID3STRIDE_VID3STRIDE_SHIFT		(22)
+#define PDP_VID3STRIDE_VID3STRIDE_LENGTH		(10)
+#define PDP_VID3STRIDE_VID3STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4STRIDE_OFFSET		(0x007C)
+
+/* PDP, VID4STRIDE, VID4STRIDE
+*/
+#define PDP_VID4STRIDE_VID4STRIDE_MASK		(0xFFC00000)
+#define PDP_VID4STRIDE_VID4STRIDE_LSBMASK		(0x000003FF)
+#define PDP_VID4STRIDE_VID4STRIDE_SHIFT		(22)
+#define PDP_VID4STRIDE_VID4STRIDE_LENGTH		(10)
+#define PDP_VID4STRIDE_VID4STRIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1SIZE_OFFSET		(0x0080)
+
+/* PDP, GRPH1SIZE, GRPH1WIDTH
+*/
+#define PDP_GRPH1SIZE_GRPH1WIDTH_MASK		(0x0FFF0000)
+#define PDP_GRPH1SIZE_GRPH1WIDTH_LSBMASK		(0x00000FFF)
+#define PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT		(16)
+#define PDP_GRPH1SIZE_GRPH1WIDTH_LENGTH		(12)
+#define PDP_GRPH1SIZE_GRPH1WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1SIZE, GRPH1HEIGHT
+*/
+#define PDP_GRPH1SIZE_GRPH1HEIGHT_MASK		(0x00000FFF)
+#define PDP_GRPH1SIZE_GRPH1HEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT		(0)
+#define PDP_GRPH1SIZE_GRPH1HEIGHT_LENGTH		(12)
+#define PDP_GRPH1SIZE_GRPH1HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2SIZE_OFFSET		(0x0084)
+
+/* PDP, GRPH2SIZE, GRPH2WIDTH
+*/
+#define PDP_GRPH2SIZE_GRPH2WIDTH_MASK		(0x0FFF0000)
+#define PDP_GRPH2SIZE_GRPH2WIDTH_LSBMASK		(0x00000FFF)
+#define PDP_GRPH2SIZE_GRPH2WIDTH_SHIFT		(16)
+#define PDP_GRPH2SIZE_GRPH2WIDTH_LENGTH		(12)
+#define PDP_GRPH2SIZE_GRPH2WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2SIZE, GRPH2HEIGHT
+*/
+#define PDP_GRPH2SIZE_GRPH2HEIGHT_MASK		(0x00000FFF)
+#define PDP_GRPH2SIZE_GRPH2HEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_GRPH2SIZE_GRPH2HEIGHT_SHIFT		(0)
+#define PDP_GRPH2SIZE_GRPH2HEIGHT_LENGTH		(12)
+#define PDP_GRPH2SIZE_GRPH2HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3SIZE_OFFSET		(0x0088)
+
+/* PDP, GRPH3SIZE, GRPH3WIDTH
+*/
+#define PDP_GRPH3SIZE_GRPH3WIDTH_MASK		(0x0FFF0000)
+#define PDP_GRPH3SIZE_GRPH3WIDTH_LSBMASK		(0x00000FFF)
+#define PDP_GRPH3SIZE_GRPH3WIDTH_SHIFT		(16)
+#define PDP_GRPH3SIZE_GRPH3WIDTH_LENGTH		(12)
+#define PDP_GRPH3SIZE_GRPH3WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3SIZE, GRPH3HEIGHT
+*/
+#define PDP_GRPH3SIZE_GRPH3HEIGHT_MASK		(0x00000FFF)
+#define PDP_GRPH3SIZE_GRPH3HEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_GRPH3SIZE_GRPH3HEIGHT_SHIFT		(0)
+#define PDP_GRPH3SIZE_GRPH3HEIGHT_LENGTH		(12)
+#define PDP_GRPH3SIZE_GRPH3HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4SIZE_OFFSET		(0x008C)
+
+/* PDP, GRPH4SIZE, GRPH4WIDTH
+*/
+#define PDP_GRPH4SIZE_GRPH4WIDTH_MASK		(0x0FFF0000)
+#define PDP_GRPH4SIZE_GRPH4WIDTH_LSBMASK		(0x00000FFF)
+#define PDP_GRPH4SIZE_GRPH4WIDTH_SHIFT		(16)
+#define PDP_GRPH4SIZE_GRPH4WIDTH_LENGTH		(12)
+#define PDP_GRPH4SIZE_GRPH4WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4SIZE, GRPH4HEIGHT
+*/
+#define PDP_GRPH4SIZE_GRPH4HEIGHT_MASK		(0x00000FFF)
+#define PDP_GRPH4SIZE_GRPH4HEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_GRPH4SIZE_GRPH4HEIGHT_SHIFT		(0)
+#define PDP_GRPH4SIZE_GRPH4HEIGHT_LENGTH		(12)
+#define PDP_GRPH4SIZE_GRPH4HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1SIZE_OFFSET		(0x0090)
+
+/* PDP, VID1SIZE, VID1WIDTH
+*/
+#define PDP_VID1SIZE_VID1WIDTH_MASK		(0x0FFF0000)
+#define PDP_VID1SIZE_VID1WIDTH_LSBMASK		(0x00000FFF)
+#define PDP_VID1SIZE_VID1WIDTH_SHIFT		(16)
+#define PDP_VID1SIZE_VID1WIDTH_LENGTH		(12)
+#define PDP_VID1SIZE_VID1WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SIZE, VID1HEIGHT
+*/
+#define PDP_VID1SIZE_VID1HEIGHT_MASK		(0x00000FFF)
+#define PDP_VID1SIZE_VID1HEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_VID1SIZE_VID1HEIGHT_SHIFT		(0)
+#define PDP_VID1SIZE_VID1HEIGHT_LENGTH		(12)
+#define PDP_VID1SIZE_VID1HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2SIZE_OFFSET		(0x0094)
+
+/* PDP, VID2SIZE, VID2WIDTH
+*/
+#define PDP_VID2SIZE_VID2WIDTH_MASK		(0x0FFF0000)
+#define PDP_VID2SIZE_VID2WIDTH_LSBMASK		(0x00000FFF)
+#define PDP_VID2SIZE_VID2WIDTH_SHIFT		(16)
+#define PDP_VID2SIZE_VID2WIDTH_LENGTH		(12)
+#define PDP_VID2SIZE_VID2WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SIZE, VID2HEIGHT
+*/
+#define PDP_VID2SIZE_VID2HEIGHT_MASK		(0x00000FFF)
+#define PDP_VID2SIZE_VID2HEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_VID2SIZE_VID2HEIGHT_SHIFT		(0)
+#define PDP_VID2SIZE_VID2HEIGHT_LENGTH		(12)
+#define PDP_VID2SIZE_VID2HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3SIZE_OFFSET		(0x0098)
+
+/* PDP, VID3SIZE, VID3WIDTH
+*/
+#define PDP_VID3SIZE_VID3WIDTH_MASK		(0x0FFF0000)
+#define PDP_VID3SIZE_VID3WIDTH_LSBMASK		(0x00000FFF)
+#define PDP_VID3SIZE_VID3WIDTH_SHIFT		(16)
+#define PDP_VID3SIZE_VID3WIDTH_LENGTH		(12)
+#define PDP_VID3SIZE_VID3WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SIZE, VID3HEIGHT
+*/
+#define PDP_VID3SIZE_VID3HEIGHT_MASK		(0x00000FFF)
+#define PDP_VID3SIZE_VID3HEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_VID3SIZE_VID3HEIGHT_SHIFT		(0)
+#define PDP_VID3SIZE_VID3HEIGHT_LENGTH		(12)
+#define PDP_VID3SIZE_VID3HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4SIZE_OFFSET		(0x009C)
+
+/* PDP, VID4SIZE, VID4WIDTH
+*/
+#define PDP_VID4SIZE_VID4WIDTH_MASK		(0x0FFF0000)
+#define PDP_VID4SIZE_VID4WIDTH_LSBMASK		(0x00000FFF)
+#define PDP_VID4SIZE_VID4WIDTH_SHIFT		(16)
+#define PDP_VID4SIZE_VID4WIDTH_LENGTH		(12)
+#define PDP_VID4SIZE_VID4WIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SIZE, VID4HEIGHT
+*/
+#define PDP_VID4SIZE_VID4HEIGHT_MASK		(0x00000FFF)
+#define PDP_VID4SIZE_VID4HEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_VID4SIZE_VID4HEIGHT_SHIFT		(0)
+#define PDP_VID4SIZE_VID4HEIGHT_LENGTH		(12)
+#define PDP_VID4SIZE_VID4HEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1POSN_OFFSET		(0x00A0)
+
+/* PDP, GRPH1POSN, GRPH1XSTART
+*/
+#define PDP_GRPH1POSN_GRPH1XSTART_MASK		(0x0FFF0000)
+#define PDP_GRPH1POSN_GRPH1XSTART_LSBMASK		(0x00000FFF)
+#define PDP_GRPH1POSN_GRPH1XSTART_SHIFT		(16)
+#define PDP_GRPH1POSN_GRPH1XSTART_LENGTH		(12)
+#define PDP_GRPH1POSN_GRPH1XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1POSN, GRPH1YSTART
+*/
+#define PDP_GRPH1POSN_GRPH1YSTART_MASK		(0x00000FFF)
+#define PDP_GRPH1POSN_GRPH1YSTART_LSBMASK		(0x00000FFF)
+#define PDP_GRPH1POSN_GRPH1YSTART_SHIFT		(0)
+#define PDP_GRPH1POSN_GRPH1YSTART_LENGTH		(12)
+#define PDP_GRPH1POSN_GRPH1YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2POSN_OFFSET		(0x00A4)
+
+/* PDP, GRPH2POSN, GRPH2XSTART
+*/
+#define PDP_GRPH2POSN_GRPH2XSTART_MASK		(0x0FFF0000)
+#define PDP_GRPH2POSN_GRPH2XSTART_LSBMASK		(0x00000FFF)
+#define PDP_GRPH2POSN_GRPH2XSTART_SHIFT		(16)
+#define PDP_GRPH2POSN_GRPH2XSTART_LENGTH		(12)
+#define PDP_GRPH2POSN_GRPH2XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2POSN, GRPH2YSTART
+*/
+#define PDP_GRPH2POSN_GRPH2YSTART_MASK		(0x00000FFF)
+#define PDP_GRPH2POSN_GRPH2YSTART_LSBMASK		(0x00000FFF)
+#define PDP_GRPH2POSN_GRPH2YSTART_SHIFT		(0)
+#define PDP_GRPH2POSN_GRPH2YSTART_LENGTH		(12)
+#define PDP_GRPH2POSN_GRPH2YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3POSN_OFFSET		(0x00A8)
+
+/* PDP, GRPH3POSN, GRPH3XSTART
+*/
+#define PDP_GRPH3POSN_GRPH3XSTART_MASK		(0x0FFF0000)
+#define PDP_GRPH3POSN_GRPH3XSTART_LSBMASK		(0x00000FFF)
+#define PDP_GRPH3POSN_GRPH3XSTART_SHIFT		(16)
+#define PDP_GRPH3POSN_GRPH3XSTART_LENGTH		(12)
+#define PDP_GRPH3POSN_GRPH3XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3POSN, GRPH3YSTART
+*/
+#define PDP_GRPH3POSN_GRPH3YSTART_MASK		(0x00000FFF)
+#define PDP_GRPH3POSN_GRPH3YSTART_LSBMASK		(0x00000FFF)
+#define PDP_GRPH3POSN_GRPH3YSTART_SHIFT		(0)
+#define PDP_GRPH3POSN_GRPH3YSTART_LENGTH		(12)
+#define PDP_GRPH3POSN_GRPH3YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4POSN_OFFSET		(0x00AC)
+
+/* PDP, GRPH4POSN, GRPH4XSTART
+*/
+#define PDP_GRPH4POSN_GRPH4XSTART_MASK		(0x0FFF0000)
+#define PDP_GRPH4POSN_GRPH4XSTART_LSBMASK		(0x00000FFF)
+#define PDP_GRPH4POSN_GRPH4XSTART_SHIFT		(16)
+#define PDP_GRPH4POSN_GRPH4XSTART_LENGTH		(12)
+#define PDP_GRPH4POSN_GRPH4XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4POSN, GRPH4YSTART
+*/
+#define PDP_GRPH4POSN_GRPH4YSTART_MASK		(0x00000FFF)
+#define PDP_GRPH4POSN_GRPH4YSTART_LSBMASK		(0x00000FFF)
+#define PDP_GRPH4POSN_GRPH4YSTART_SHIFT		(0)
+#define PDP_GRPH4POSN_GRPH4YSTART_LENGTH		(12)
+#define PDP_GRPH4POSN_GRPH4YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1POSN_OFFSET		(0x00B0)
+
+/* PDP, VID1POSN, VID1XSTART
+*/
+#define PDP_VID1POSN_VID1XSTART_MASK		(0x0FFF0000)
+#define PDP_VID1POSN_VID1XSTART_LSBMASK		(0x00000FFF)
+#define PDP_VID1POSN_VID1XSTART_SHIFT		(16)
+#define PDP_VID1POSN_VID1XSTART_LENGTH		(12)
+#define PDP_VID1POSN_VID1XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1POSN, VID1YSTART
+*/
+#define PDP_VID1POSN_VID1YSTART_MASK		(0x00000FFF)
+#define PDP_VID1POSN_VID1YSTART_LSBMASK		(0x00000FFF)
+#define PDP_VID1POSN_VID1YSTART_SHIFT		(0)
+#define PDP_VID1POSN_VID1YSTART_LENGTH		(12)
+#define PDP_VID1POSN_VID1YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2POSN_OFFSET		(0x00B4)
+
+/* PDP, VID2POSN, VID2XSTART
+*/
+#define PDP_VID2POSN_VID2XSTART_MASK		(0x0FFF0000)
+#define PDP_VID2POSN_VID2XSTART_LSBMASK		(0x00000FFF)
+#define PDP_VID2POSN_VID2XSTART_SHIFT		(16)
+#define PDP_VID2POSN_VID2XSTART_LENGTH		(12)
+#define PDP_VID2POSN_VID2XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2POSN, VID2YSTART
+*/
+#define PDP_VID2POSN_VID2YSTART_MASK		(0x00000FFF)
+#define PDP_VID2POSN_VID2YSTART_LSBMASK		(0x00000FFF)
+#define PDP_VID2POSN_VID2YSTART_SHIFT		(0)
+#define PDP_VID2POSN_VID2YSTART_LENGTH		(12)
+#define PDP_VID2POSN_VID2YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3POSN_OFFSET		(0x00B8)
+
+/* PDP, VID3POSN, VID3XSTART
+*/
+#define PDP_VID3POSN_VID3XSTART_MASK		(0x0FFF0000)
+#define PDP_VID3POSN_VID3XSTART_LSBMASK		(0x00000FFF)
+#define PDP_VID3POSN_VID3XSTART_SHIFT		(16)
+#define PDP_VID3POSN_VID3XSTART_LENGTH		(12)
+#define PDP_VID3POSN_VID3XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3POSN, VID3YSTART
+*/
+#define PDP_VID3POSN_VID3YSTART_MASK		(0x00000FFF)
+#define PDP_VID3POSN_VID3YSTART_LSBMASK		(0x00000FFF)
+#define PDP_VID3POSN_VID3YSTART_SHIFT		(0)
+#define PDP_VID3POSN_VID3YSTART_LENGTH		(12)
+#define PDP_VID3POSN_VID3YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4POSN_OFFSET		(0x00BC)
+
+/* PDP, VID4POSN, VID4XSTART
+*/
+#define PDP_VID4POSN_VID4XSTART_MASK		(0x0FFF0000)
+#define PDP_VID4POSN_VID4XSTART_LSBMASK		(0x00000FFF)
+#define PDP_VID4POSN_VID4XSTART_SHIFT		(16)
+#define PDP_VID4POSN_VID4XSTART_LENGTH		(12)
+#define PDP_VID4POSN_VID4XSTART_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4POSN, VID4YSTART
+*/
+#define PDP_VID4POSN_VID4YSTART_MASK		(0x00000FFF)
+#define PDP_VID4POSN_VID4YSTART_LSBMASK		(0x00000FFF)
+#define PDP_VID4POSN_VID4YSTART_SHIFT		(0)
+#define PDP_VID4POSN_VID4YSTART_LENGTH		(12)
+#define PDP_VID4POSN_VID4YSTART_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1GALPHA_OFFSET		(0x00C0)
+
+/* PDP, GRPH1GALPHA, GRPH1GALPHA
+*/
+#define PDP_GRPH1GALPHA_GRPH1GALPHA_MASK		(0x000003FF)
+#define PDP_GRPH1GALPHA_GRPH1GALPHA_LSBMASK		(0x000003FF)
+#define PDP_GRPH1GALPHA_GRPH1GALPHA_SHIFT		(0)
+#define PDP_GRPH1GALPHA_GRPH1GALPHA_LENGTH		(10)
+#define PDP_GRPH1GALPHA_GRPH1GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2GALPHA_OFFSET		(0x00C4)
+
+/* PDP, GRPH2GALPHA, GRPH2GALPHA
+*/
+#define PDP_GRPH2GALPHA_GRPH2GALPHA_MASK		(0x000003FF)
+#define PDP_GRPH2GALPHA_GRPH2GALPHA_LSBMASK		(0x000003FF)
+#define PDP_GRPH2GALPHA_GRPH2GALPHA_SHIFT		(0)
+#define PDP_GRPH2GALPHA_GRPH2GALPHA_LENGTH		(10)
+#define PDP_GRPH2GALPHA_GRPH2GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3GALPHA_OFFSET		(0x00C8)
+
+/* PDP, GRPH3GALPHA, GRPH3GALPHA
+*/
+#define PDP_GRPH3GALPHA_GRPH3GALPHA_MASK		(0x000003FF)
+#define PDP_GRPH3GALPHA_GRPH3GALPHA_LSBMASK		(0x000003FF)
+#define PDP_GRPH3GALPHA_GRPH3GALPHA_SHIFT		(0)
+#define PDP_GRPH3GALPHA_GRPH3GALPHA_LENGTH		(10)
+#define PDP_GRPH3GALPHA_GRPH3GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4GALPHA_OFFSET		(0x00CC)
+
+/* PDP, GRPH4GALPHA, GRPH4GALPHA
+*/
+#define PDP_GRPH4GALPHA_GRPH4GALPHA_MASK		(0x000003FF)
+#define PDP_GRPH4GALPHA_GRPH4GALPHA_LSBMASK		(0x000003FF)
+#define PDP_GRPH4GALPHA_GRPH4GALPHA_SHIFT		(0)
+#define PDP_GRPH4GALPHA_GRPH4GALPHA_LENGTH		(10)
+#define PDP_GRPH4GALPHA_GRPH4GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1GALPHA_OFFSET		(0x00D0)
+
+/* PDP, VID1GALPHA, VID1GALPHA
+*/
+#define PDP_VID1GALPHA_VID1GALPHA_MASK		(0x000003FF)
+#define PDP_VID1GALPHA_VID1GALPHA_LSBMASK		(0x000003FF)
+#define PDP_VID1GALPHA_VID1GALPHA_SHIFT		(0)
+#define PDP_VID1GALPHA_VID1GALPHA_LENGTH		(10)
+#define PDP_VID1GALPHA_VID1GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2GALPHA_OFFSET		(0x00D4)
+
+/* PDP, VID2GALPHA, VID2GALPHA
+*/
+#define PDP_VID2GALPHA_VID2GALPHA_MASK		(0x000003FF)
+#define PDP_VID2GALPHA_VID2GALPHA_LSBMASK		(0x000003FF)
+#define PDP_VID2GALPHA_VID2GALPHA_SHIFT		(0)
+#define PDP_VID2GALPHA_VID2GALPHA_LENGTH		(10)
+#define PDP_VID2GALPHA_VID2GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3GALPHA_OFFSET		(0x00D8)
+
+/* PDP, VID3GALPHA, VID3GALPHA
+*/
+#define PDP_VID3GALPHA_VID3GALPHA_MASK		(0x000003FF)
+#define PDP_VID3GALPHA_VID3GALPHA_LSBMASK		(0x000003FF)
+#define PDP_VID3GALPHA_VID3GALPHA_SHIFT		(0)
+#define PDP_VID3GALPHA_VID3GALPHA_LENGTH		(10)
+#define PDP_VID3GALPHA_VID3GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4GALPHA_OFFSET		(0x00DC)
+
+/* PDP, VID4GALPHA, VID4GALPHA
+*/
+#define PDP_VID4GALPHA_VID4GALPHA_MASK		(0x000003FF)
+#define PDP_VID4GALPHA_VID4GALPHA_LSBMASK		(0x000003FF)
+#define PDP_VID4GALPHA_VID4GALPHA_SHIFT		(0)
+#define PDP_VID4GALPHA_VID4GALPHA_LENGTH		(10)
+#define PDP_VID4GALPHA_VID4GALPHA_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1CKEY_R_OFFSET		(0x00E0)
+
+/* PDP, GRPH1CKEY_R, GRPH1CKEY_R
+*/
+#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_MASK		(0x000003FF)
+#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_LSBMASK		(0x000003FF)
+#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_SHIFT		(0)
+#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_LENGTH		(10)
+#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1CKEY_GB_OFFSET		(0x00E4)
+
+/* PDP, GRPH1CKEY_GB, GRPH1CKEY_G
+*/
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_MASK		(0x03FF0000)
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LSBMASK		(0x000003FF)
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SHIFT		(16)
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LENGTH		(10)
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1CKEY_GB, GRPH1CKEY_B
+*/
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_MASK		(0x000003FF)
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LSBMASK		(0x000003FF)
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SHIFT		(0)
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LENGTH		(10)
+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2CKEY_R_OFFSET		(0x00E8)
+
+/* PDP, GRPH2CKEY_R, GRPH2CKEY_R
+*/
+#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_MASK		(0x000003FF)
+#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_LSBMASK		(0x000003FF)
+#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_SHIFT		(0)
+#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_LENGTH		(10)
+#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2CKEY_GB_OFFSET		(0x00EC)
+
+/* PDP, GRPH2CKEY_GB, GRPH2CKEY_G
+*/
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_MASK		(0x03FF0000)
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LSBMASK		(0x000003FF)
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SHIFT		(16)
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LENGTH		(10)
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2CKEY_GB, GRPH2CKEY_B
+*/
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_MASK		(0x000003FF)
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LSBMASK		(0x000003FF)
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SHIFT		(0)
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LENGTH		(10)
+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3CKEY_R_OFFSET		(0x00F0)
+
+/* PDP, GRPH3CKEY_R, GRPH3CKEY_R
+*/
+#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_MASK		(0x000003FF)
+#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_LSBMASK		(0x000003FF)
+#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_SHIFT		(0)
+#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_LENGTH		(10)
+#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3CKEY_GB_OFFSET		(0x00F4)
+
+/* PDP, GRPH3CKEY_GB, GRPH3CKEY_G
+*/
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_MASK		(0x03FF0000)
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LSBMASK		(0x000003FF)
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SHIFT		(16)
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LENGTH		(10)
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3CKEY_GB, GRPH3CKEY_B
+*/
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_MASK		(0x000003FF)
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LSBMASK		(0x000003FF)
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SHIFT		(0)
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LENGTH		(10)
+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4CKEY_R_OFFSET		(0x00F8)
+
+/* PDP, GRPH4CKEY_R, GRPH4CKEY_R
+*/
+#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_MASK		(0x000003FF)
+#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_LSBMASK		(0x000003FF)
+#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_SHIFT		(0)
+#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_LENGTH		(10)
+#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4CKEY_GB_OFFSET		(0x00FC)
+
+/* PDP, GRPH4CKEY_GB, GRPH4CKEY_G
+*/
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_MASK		(0x03FF0000)
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LSBMASK		(0x000003FF)
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SHIFT		(16)
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LENGTH		(10)
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4CKEY_GB, GRPH4CKEY_B
+*/
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_MASK		(0x000003FF)
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LSBMASK		(0x000003FF)
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SHIFT		(0)
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LENGTH		(10)
+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1CKEY_R_OFFSET		(0x0100)
+
+/* PDP, VID1CKEY_R, VID1CKEY_R
+*/
+#define PDP_VID1CKEY_R_VID1CKEY_R_MASK		(0x000003FF)
+#define PDP_VID1CKEY_R_VID1CKEY_R_LSBMASK		(0x000003FF)
+#define PDP_VID1CKEY_R_VID1CKEY_R_SHIFT		(0)
+#define PDP_VID1CKEY_R_VID1CKEY_R_LENGTH		(10)
+#define PDP_VID1CKEY_R_VID1CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1CKEY_GB_OFFSET		(0x0104)
+
+/* PDP, VID1CKEY_GB, VID1CKEY_G
+*/
+#define PDP_VID1CKEY_GB_VID1CKEY_G_MASK		(0x03FF0000)
+#define PDP_VID1CKEY_GB_VID1CKEY_G_LSBMASK		(0x000003FF)
+#define PDP_VID1CKEY_GB_VID1CKEY_G_SHIFT		(16)
+#define PDP_VID1CKEY_GB_VID1CKEY_G_LENGTH		(10)
+#define PDP_VID1CKEY_GB_VID1CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1CKEY_GB, VID1CKEY_B
+*/
+#define PDP_VID1CKEY_GB_VID1CKEY_B_MASK		(0x000003FF)
+#define PDP_VID1CKEY_GB_VID1CKEY_B_LSBMASK		(0x000003FF)
+#define PDP_VID1CKEY_GB_VID1CKEY_B_SHIFT		(0)
+#define PDP_VID1CKEY_GB_VID1CKEY_B_LENGTH		(10)
+#define PDP_VID1CKEY_GB_VID1CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2CKEY_R_OFFSET		(0x0108)
+
+/* PDP, VID2CKEY_R, VID2CKEY_R
+*/
+#define PDP_VID2CKEY_R_VID2CKEY_R_MASK		(0x000003FF)
+#define PDP_VID2CKEY_R_VID2CKEY_R_LSBMASK		(0x000003FF)
+#define PDP_VID2CKEY_R_VID2CKEY_R_SHIFT		(0)
+#define PDP_VID2CKEY_R_VID2CKEY_R_LENGTH		(10)
+#define PDP_VID2CKEY_R_VID2CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2CKEY_GB_OFFSET		(0x010C)
+
+/* PDP, VID2CKEY_GB, VID2CKEY_G
+*/
+#define PDP_VID2CKEY_GB_VID2CKEY_G_MASK		(0x03FF0000)
+#define PDP_VID2CKEY_GB_VID2CKEY_G_LSBMASK		(0x000003FF)
+#define PDP_VID2CKEY_GB_VID2CKEY_G_SHIFT		(16)
+#define PDP_VID2CKEY_GB_VID2CKEY_G_LENGTH		(10)
+#define PDP_VID2CKEY_GB_VID2CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2CKEY_GB, VID2CKEY_B
+*/
+#define PDP_VID2CKEY_GB_VID2CKEY_B_MASK		(0x000003FF)
+#define PDP_VID2CKEY_GB_VID2CKEY_B_LSBMASK		(0x000003FF)
+#define PDP_VID2CKEY_GB_VID2CKEY_B_SHIFT		(0)
+#define PDP_VID2CKEY_GB_VID2CKEY_B_LENGTH		(10)
+#define PDP_VID2CKEY_GB_VID2CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3CKEY_R_OFFSET		(0x0110)
+
+/* PDP, VID3CKEY_R, VID3CKEY_R
+*/
+#define PDP_VID3CKEY_R_VID3CKEY_R_MASK		(0x000003FF)
+#define PDP_VID3CKEY_R_VID3CKEY_R_LSBMASK		(0x000003FF)
+#define PDP_VID3CKEY_R_VID3CKEY_R_SHIFT		(0)
+#define PDP_VID3CKEY_R_VID3CKEY_R_LENGTH		(10)
+#define PDP_VID3CKEY_R_VID3CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3CKEY_GB_OFFSET		(0x0114)
+
+/* PDP, VID3CKEY_GB, VID3CKEY_G
+*/
+#define PDP_VID3CKEY_GB_VID3CKEY_G_MASK		(0x03FF0000)
+#define PDP_VID3CKEY_GB_VID3CKEY_G_LSBMASK		(0x000003FF)
+#define PDP_VID3CKEY_GB_VID3CKEY_G_SHIFT		(16)
+#define PDP_VID3CKEY_GB_VID3CKEY_G_LENGTH		(10)
+#define PDP_VID3CKEY_GB_VID3CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3CKEY_GB, VID3CKEY_B
+*/
+#define PDP_VID3CKEY_GB_VID3CKEY_B_MASK		(0x000003FF)
+#define PDP_VID3CKEY_GB_VID3CKEY_B_LSBMASK		(0x000003FF)
+#define PDP_VID3CKEY_GB_VID3CKEY_B_SHIFT		(0)
+#define PDP_VID3CKEY_GB_VID3CKEY_B_LENGTH		(10)
+#define PDP_VID3CKEY_GB_VID3CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4CKEY_R_OFFSET		(0x0118)
+
+/* PDP, VID4CKEY_R, VID4CKEY_R
+*/
+#define PDP_VID4CKEY_R_VID4CKEY_R_MASK		(0x000003FF)
+#define PDP_VID4CKEY_R_VID4CKEY_R_LSBMASK		(0x000003FF)
+#define PDP_VID4CKEY_R_VID4CKEY_R_SHIFT		(0)
+#define PDP_VID4CKEY_R_VID4CKEY_R_LENGTH		(10)
+#define PDP_VID4CKEY_R_VID4CKEY_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4CKEY_GB_OFFSET		(0x011C)
+
+/* PDP, VID4CKEY_GB, VID4CKEY_G
+*/
+#define PDP_VID4CKEY_GB_VID4CKEY_G_MASK		(0x03FF0000)
+#define PDP_VID4CKEY_GB_VID4CKEY_G_LSBMASK		(0x000003FF)
+#define PDP_VID4CKEY_GB_VID4CKEY_G_SHIFT		(16)
+#define PDP_VID4CKEY_GB_VID4CKEY_G_LENGTH		(10)
+#define PDP_VID4CKEY_GB_VID4CKEY_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4CKEY_GB, VID4CKEY_B
+*/
+#define PDP_VID4CKEY_GB_VID4CKEY_B_MASK		(0x000003FF)
+#define PDP_VID4CKEY_GB_VID4CKEY_B_LSBMASK		(0x000003FF)
+#define PDP_VID4CKEY_GB_VID4CKEY_B_SHIFT		(0)
+#define PDP_VID4CKEY_GB_VID4CKEY_B_LENGTH		(10)
+#define PDP_VID4CKEY_GB_VID4CKEY_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1BLND2_R_OFFSET		(0x0120)
+
+/* PDP, GRPH1BLND2_R, GRPH1PIXDBL
+*/
+#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_MASK		(0x80000000)
+#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_LSBMASK		(0x00000001)
+#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_SHIFT		(31)
+#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_LENGTH		(1)
+#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1BLND2_R, GRPH1LINDBL
+*/
+#define PDP_GRPH1BLND2_R_GRPH1LINDBL_MASK		(0x20000000)
+#define PDP_GRPH1BLND2_R_GRPH1LINDBL_LSBMASK		(0x00000001)
+#define PDP_GRPH1BLND2_R_GRPH1LINDBL_SHIFT		(29)
+#define PDP_GRPH1BLND2_R_GRPH1LINDBL_LENGTH		(1)
+#define PDP_GRPH1BLND2_R_GRPH1LINDBL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1BLND2_R, GRPH1CKEYMASK_R
+*/
+#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_MASK		(0x000003FF)
+#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LSBMASK		(0x000003FF)
+#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SHIFT		(0)
+#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LENGTH		(10)
+#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1BLND2_GB_OFFSET		(0x0124)
+
+/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_G
+*/
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_MASK		(0x03FF0000)
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LSBMASK		(0x000003FF)
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SHIFT		(16)
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LENGTH		(10)
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_B
+*/
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_MASK		(0x000003FF)
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LSBMASK		(0x000003FF)
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SHIFT		(0)
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LENGTH		(10)
+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2BLND2_R_OFFSET		(0x0128)
+
+/* PDP, GRPH2BLND2_R, GRPH2PIXDBL
+*/
+#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_MASK		(0x80000000)
+#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_LSBMASK		(0x00000001)
+#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_SHIFT		(31)
+#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_LENGTH		(1)
+#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2BLND2_R, GRPH2LINDBL
+*/
+#define PDP_GRPH2BLND2_R_GRPH2LINDBL_MASK		(0x20000000)
+#define PDP_GRPH2BLND2_R_GRPH2LINDBL_LSBMASK		(0x00000001)
+#define PDP_GRPH2BLND2_R_GRPH2LINDBL_SHIFT		(29)
+#define PDP_GRPH2BLND2_R_GRPH2LINDBL_LENGTH		(1)
+#define PDP_GRPH2BLND2_R_GRPH2LINDBL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2BLND2_R, GRPH2CKEYMASK_R
+*/
+#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_MASK		(0x000003FF)
+#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LSBMASK		(0x000003FF)
+#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SHIFT		(0)
+#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LENGTH		(10)
+#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2BLND2_GB_OFFSET		(0x012C)
+
+/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_G
+*/
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_MASK		(0x03FF0000)
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LSBMASK		(0x000003FF)
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SHIFT		(16)
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LENGTH		(10)
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_B
+*/
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_MASK		(0x000003FF)
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LSBMASK		(0x000003FF)
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SHIFT		(0)
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LENGTH		(10)
+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3BLND2_R_OFFSET		(0x0130)
+
+/* PDP, GRPH3BLND2_R, GRPH3PIXDBL
+*/
+#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_MASK		(0x80000000)
+#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_LSBMASK		(0x00000001)
+#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_SHIFT		(31)
+#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_LENGTH		(1)
+#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3BLND2_R, GRPH3LINDBL
+*/
+#define PDP_GRPH3BLND2_R_GRPH3LINDBL_MASK		(0x20000000)
+#define PDP_GRPH3BLND2_R_GRPH3LINDBL_LSBMASK		(0x00000001)
+#define PDP_GRPH3BLND2_R_GRPH3LINDBL_SHIFT		(29)
+#define PDP_GRPH3BLND2_R_GRPH3LINDBL_LENGTH		(1)
+#define PDP_GRPH3BLND2_R_GRPH3LINDBL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3BLND2_R, GRPH3CKEYMASK_R
+*/
+#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_MASK		(0x000003FF)
+#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LSBMASK		(0x000003FF)
+#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SHIFT		(0)
+#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LENGTH		(10)
+#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3BLND2_GB_OFFSET		(0x0134)
+
+/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_G
+*/
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_MASK		(0x03FF0000)
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LSBMASK		(0x000003FF)
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SHIFT		(16)
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LENGTH		(10)
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_B
+*/
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_MASK		(0x000003FF)
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LSBMASK		(0x000003FF)
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SHIFT		(0)
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LENGTH		(10)
+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4BLND2_R_OFFSET		(0x0138)
+
+/* PDP, GRPH4BLND2_R, GRPH4PIXDBL
+*/
+#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_MASK		(0x80000000)
+#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_LSBMASK		(0x00000001)
+#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_SHIFT		(31)
+#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_LENGTH		(1)
+#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4BLND2_R, GRPH4LINDBL
+*/
+#define PDP_GRPH4BLND2_R_GRPH4LINDBL_MASK		(0x20000000)
+#define PDP_GRPH4BLND2_R_GRPH4LINDBL_LSBMASK		(0x00000001)
+#define PDP_GRPH4BLND2_R_GRPH4LINDBL_SHIFT		(29)
+#define PDP_GRPH4BLND2_R_GRPH4LINDBL_LENGTH		(1)
+#define PDP_GRPH4BLND2_R_GRPH4LINDBL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4BLND2_R, GRPH4CKEYMASK_R
+*/
+#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_MASK		(0x000003FF)
+#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LSBMASK		(0x000003FF)
+#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SHIFT		(0)
+#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LENGTH		(10)
+#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4BLND2_GB_OFFSET		(0x013C)
+
+/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_G
+*/
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_MASK		(0x03FF0000)
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LSBMASK		(0x000003FF)
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SHIFT		(16)
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LENGTH		(10)
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_B
+*/
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_MASK		(0x000003FF)
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LSBMASK		(0x000003FF)
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SHIFT		(0)
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LENGTH		(10)
+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1BLND2_R_OFFSET		(0x0140)
+
+/* PDP, VID1BLND2_R, VID1CKEYMASK_R
+*/
+#define PDP_VID1BLND2_R_VID1CKEYMASK_R_MASK		(0x000003FF)
+#define PDP_VID1BLND2_R_VID1CKEYMASK_R_LSBMASK		(0x000003FF)
+#define PDP_VID1BLND2_R_VID1CKEYMASK_R_SHIFT		(0)
+#define PDP_VID1BLND2_R_VID1CKEYMASK_R_LENGTH		(10)
+#define PDP_VID1BLND2_R_VID1CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1BLND2_GB_OFFSET		(0x0144)
+
+/* PDP, VID1BLND2_GB, VID1CKEYMASK_G
+*/
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_MASK		(0x03FF0000)
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_LSBMASK		(0x000003FF)
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_SHIFT		(16)
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_LENGTH		(10)
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1BLND2_GB, VID1CKEYMASK_B
+*/
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_MASK		(0x000003FF)
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_LSBMASK		(0x000003FF)
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_SHIFT		(0)
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_LENGTH		(10)
+#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2BLND2_R_OFFSET		(0x0148)
+
+/* PDP, VID2BLND2_R, VID2CKEYMASK_R
+*/
+#define PDP_VID2BLND2_R_VID2CKEYMASK_R_MASK		(0x000003FF)
+#define PDP_VID2BLND2_R_VID2CKEYMASK_R_LSBMASK		(0x000003FF)
+#define PDP_VID2BLND2_R_VID2CKEYMASK_R_SHIFT		(0)
+#define PDP_VID2BLND2_R_VID2CKEYMASK_R_LENGTH		(10)
+#define PDP_VID2BLND2_R_VID2CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2BLND2_GB_OFFSET		(0x014C)
+
+/* PDP, VID2BLND2_GB, VID2CKEYMASK_G
+*/
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_MASK		(0x03FF0000)
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_LSBMASK		(0x000003FF)
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_SHIFT		(16)
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_LENGTH		(10)
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2BLND2_GB, VID2CKEYMASK_B
+*/
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_MASK		(0x000003FF)
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_LSBMASK		(0x000003FF)
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_SHIFT		(0)
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_LENGTH		(10)
+#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3BLND2_R_OFFSET		(0x0150)
+
+/* PDP, VID3BLND2_R, VID3CKEYMASK_R
+*/
+#define PDP_VID3BLND2_R_VID3CKEYMASK_R_MASK		(0x000003FF)
+#define PDP_VID3BLND2_R_VID3CKEYMASK_R_LSBMASK		(0x000003FF)
+#define PDP_VID3BLND2_R_VID3CKEYMASK_R_SHIFT		(0)
+#define PDP_VID3BLND2_R_VID3CKEYMASK_R_LENGTH		(10)
+#define PDP_VID3BLND2_R_VID3CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3BLND2_GB_OFFSET		(0x0154)
+
+/* PDP, VID3BLND2_GB, VID3CKEYMASK_G
+*/
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_MASK		(0x03FF0000)
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_LSBMASK		(0x000003FF)
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_SHIFT		(16)
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_LENGTH		(10)
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3BLND2_GB, VID3CKEYMASK_B
+*/
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_MASK		(0x000003FF)
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_LSBMASK		(0x000003FF)
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_SHIFT		(0)
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_LENGTH		(10)
+#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4BLND2_R_OFFSET		(0x0158)
+
+/* PDP, VID4BLND2_R, VID4CKEYMASK_R
+*/
+#define PDP_VID4BLND2_R_VID4CKEYMASK_R_MASK		(0x000003FF)
+#define PDP_VID4BLND2_R_VID4CKEYMASK_R_LSBMASK		(0x000003FF)
+#define PDP_VID4BLND2_R_VID4CKEYMASK_R_SHIFT		(0)
+#define PDP_VID4BLND2_R_VID4CKEYMASK_R_LENGTH		(10)
+#define PDP_VID4BLND2_R_VID4CKEYMASK_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4BLND2_GB_OFFSET		(0x015C)
+
+/* PDP, VID4BLND2_GB, VID4CKEYMASK_G
+*/
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_MASK		(0x03FF0000)
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_LSBMASK		(0x000003FF)
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_SHIFT		(16)
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_LENGTH		(10)
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4BLND2_GB, VID4CKEYMASK_B
+*/
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_MASK		(0x000003FF)
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_LSBMASK		(0x000003FF)
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_SHIFT		(0)
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_LENGTH		(10)
+#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1INTERLEAVE_CTRL_OFFSET		(0x0160)
+
+/* PDP, GRPH1INTERLEAVE_CTRL, GRPH1INTFIELD
+*/
+#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_MASK		(0x00000001)
+#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LSBMASK		(0x00000001)
+#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SHIFT		(0)
+#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LENGTH		(1)
+#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2INTERLEAVE_CTRL_OFFSET		(0x0164)
+
+/* PDP, GRPH2INTERLEAVE_CTRL, GRPH2INTFIELD
+*/
+#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_MASK		(0x00000001)
+#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LSBMASK		(0x00000001)
+#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SHIFT		(0)
+#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LENGTH		(1)
+#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3INTERLEAVE_CTRL_OFFSET		(0x0168)
+
+/* PDP, GRPH3INTERLEAVE_CTRL, GRPH3INTFIELD
+*/
+#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_MASK		(0x00000001)
+#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LSBMASK		(0x00000001)
+#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SHIFT		(0)
+#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LENGTH		(1)
+#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4INTERLEAVE_CTRL_OFFSET		(0x016C)
+
+/* PDP, GRPH4INTERLEAVE_CTRL, GRPH4INTFIELD
+*/
+#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_MASK		(0x00000001)
+#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LSBMASK		(0x00000001)
+#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SHIFT		(0)
+#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LENGTH		(1)
+#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1INTERLEAVE_CTRL_OFFSET		(0x0170)
+
+/* PDP, VID1INTERLEAVE_CTRL, VID1INTFIELD
+*/
+#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_MASK		(0x00000001)
+#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LSBMASK		(0x00000001)
+#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SHIFT		(0)
+#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LENGTH		(1)
+#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2INTERLEAVE_CTRL_OFFSET		(0x0174)
+
+/* PDP, VID2INTERLEAVE_CTRL, VID2INTFIELD
+*/
+#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_MASK		(0x00000001)
+#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LSBMASK		(0x00000001)
+#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SHIFT		(0)
+#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LENGTH		(1)
+#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3INTERLEAVE_CTRL_OFFSET		(0x0178)
+
+/* PDP, VID3INTERLEAVE_CTRL, VID3INTFIELD
+*/
+#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_MASK		(0x00000001)
+#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LSBMASK		(0x00000001)
+#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SHIFT		(0)
+#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LENGTH		(1)
+#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4INTERLEAVE_CTRL_OFFSET		(0x017C)
+
+/* PDP, VID4INTERLEAVE_CTRL, VID4INTFIELD
+*/
+#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_MASK		(0x00000001)
+#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LSBMASK		(0x00000001)
+#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SHIFT		(0)
+#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LENGTH		(1)
+#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1BASEADDR_OFFSET		(0x0180)
+
+/* PDP, GRPH1BASEADDR, GRPH1BASEADDR
+*/
+#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_SHIFT		(5)
+#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_LENGTH		(27)
+#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2BASEADDR_OFFSET		(0x0184)
+
+/* PDP, GRPH2BASEADDR, GRPH2BASEADDR
+*/
+#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_SHIFT		(5)
+#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_LENGTH		(27)
+#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3BASEADDR_OFFSET		(0x0188)
+
+/* PDP, GRPH3BASEADDR, GRPH3BASEADDR
+*/
+#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_SHIFT		(5)
+#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_LENGTH		(27)
+#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4BASEADDR_OFFSET		(0x018C)
+
+/* PDP, GRPH4BASEADDR, GRPH4BASEADDR
+*/
+#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_SHIFT		(5)
+#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_LENGTH		(27)
+#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1BASEADDR_OFFSET		(0x0190)
+
+/* PDP, VID1BASEADDR, VID1BASEADDR
+*/
+#define PDP_VID1BASEADDR_VID1BASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID1BASEADDR_VID1BASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID1BASEADDR_VID1BASEADDR_SHIFT		(5)
+#define PDP_VID1BASEADDR_VID1BASEADDR_LENGTH		(27)
+#define PDP_VID1BASEADDR_VID1BASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2BASEADDR_OFFSET		(0x0194)
+
+/* PDP, VID2BASEADDR, VID2BASEADDR
+*/
+#define PDP_VID2BASEADDR_VID2BASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID2BASEADDR_VID2BASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID2BASEADDR_VID2BASEADDR_SHIFT		(5)
+#define PDP_VID2BASEADDR_VID2BASEADDR_LENGTH		(27)
+#define PDP_VID2BASEADDR_VID2BASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3BASEADDR_OFFSET		(0x0198)
+
+/* PDP, VID3BASEADDR, VID3BASEADDR
+*/
+#define PDP_VID3BASEADDR_VID3BASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID3BASEADDR_VID3BASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID3BASEADDR_VID3BASEADDR_SHIFT		(5)
+#define PDP_VID3BASEADDR_VID3BASEADDR_LENGTH		(27)
+#define PDP_VID3BASEADDR_VID3BASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4BASEADDR_OFFSET		(0x019C)
+
+/* PDP, VID4BASEADDR, VID4BASEADDR
+*/
+#define PDP_VID4BASEADDR_VID4BASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID4BASEADDR_VID4BASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID4BASEADDR_VID4BASEADDR_SHIFT		(5)
+#define PDP_VID4BASEADDR_VID4BASEADDR_LENGTH		(27)
+#define PDP_VID4BASEADDR_VID4BASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1UBASEADDR_OFFSET		(0x01B0)
+
+/* PDP, VID1UBASEADDR, VID1UBASEADDR
+*/
+#define PDP_VID1UBASEADDR_VID1UBASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID1UBASEADDR_VID1UBASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID1UBASEADDR_VID1UBASEADDR_SHIFT		(5)
+#define PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH		(27)
+#define PDP_VID1UBASEADDR_VID1UBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2UBASEADDR_OFFSET		(0x01B4)
+
+/* PDP, VID2UBASEADDR, VID2UBASEADDR
+*/
+#define PDP_VID2UBASEADDR_VID2UBASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID2UBASEADDR_VID2UBASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID2UBASEADDR_VID2UBASEADDR_SHIFT		(5)
+#define PDP_VID2UBASEADDR_VID2UBASEADDR_LENGTH		(27)
+#define PDP_VID2UBASEADDR_VID2UBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3UBASEADDR_OFFSET		(0x01B8)
+
+/* PDP, VID3UBASEADDR, VID3UBASEADDR
+*/
+#define PDP_VID3UBASEADDR_VID3UBASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID3UBASEADDR_VID3UBASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID3UBASEADDR_VID3UBASEADDR_SHIFT		(5)
+#define PDP_VID3UBASEADDR_VID3UBASEADDR_LENGTH		(27)
+#define PDP_VID3UBASEADDR_VID3UBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4UBASEADDR_OFFSET		(0x01BC)
+
+/* PDP, VID4UBASEADDR, VID4UBASEADDR
+*/
+#define PDP_VID4UBASEADDR_VID4UBASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID4UBASEADDR_VID4UBASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID4UBASEADDR_VID4UBASEADDR_SHIFT		(5)
+#define PDP_VID4UBASEADDR_VID4UBASEADDR_LENGTH		(27)
+#define PDP_VID4UBASEADDR_VID4UBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VBASEADDR_OFFSET		(0x01D0)
+
+/* PDP, VID1VBASEADDR, VID1VBASEADDR
+*/
+#define PDP_VID1VBASEADDR_VID1VBASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID1VBASEADDR_VID1VBASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID1VBASEADDR_VID1VBASEADDR_SHIFT		(5)
+#define PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH		(27)
+#define PDP_VID1VBASEADDR_VID1VBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VBASEADDR_OFFSET		(0x01D4)
+
+/* PDP, VID2VBASEADDR, VID2VBASEADDR
+*/
+#define PDP_VID2VBASEADDR_VID2VBASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID2VBASEADDR_VID2VBASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID2VBASEADDR_VID2VBASEADDR_SHIFT		(5)
+#define PDP_VID2VBASEADDR_VID2VBASEADDR_LENGTH		(27)
+#define PDP_VID2VBASEADDR_VID2VBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VBASEADDR_OFFSET		(0x01D8)
+
+/* PDP, VID3VBASEADDR, VID3VBASEADDR
+*/
+#define PDP_VID3VBASEADDR_VID3VBASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID3VBASEADDR_VID3VBASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID3VBASEADDR_VID3VBASEADDR_SHIFT		(5)
+#define PDP_VID3VBASEADDR_VID3VBASEADDR_LENGTH		(27)
+#define PDP_VID3VBASEADDR_VID3VBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VBASEADDR_OFFSET		(0x01DC)
+
+/* PDP, VID4VBASEADDR, VID4VBASEADDR
+*/
+#define PDP_VID4VBASEADDR_VID4VBASEADDR_MASK		(0xFFFFFFE0)
+#define PDP_VID4VBASEADDR_VID4VBASEADDR_LSBMASK		(0x07FFFFFF)
+#define PDP_VID4VBASEADDR_VID4VBASEADDR_SHIFT		(5)
+#define PDP_VID4VBASEADDR_VID4VBASEADDR_LENGTH		(27)
+#define PDP_VID4VBASEADDR_VID4VBASEADDR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1POSTSKIPCTRL_OFFSET		(0x0230)
+
+/* PDP, VID1POSTSKIPCTRL, VID1HPOSTCLIP
+*/
+#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_MASK		(0x007F0000)
+#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LSBMASK		(0x0000007F)
+#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SHIFT		(16)
+#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LENGTH		(7)
+#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1POSTSKIPCTRL, VID1VPOSTCLIP
+*/
+#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_MASK		(0x0000003F)
+#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LSBMASK		(0x0000003F)
+#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SHIFT		(0)
+#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LENGTH		(6)
+#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2POSTSKIPCTRL_OFFSET		(0x0234)
+
+/* PDP, VID2POSTSKIPCTRL, VID2HPOSTCLIP
+*/
+#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_MASK		(0x007F0000)
+#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LSBMASK		(0x0000007F)
+#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SHIFT		(16)
+#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LENGTH		(7)
+#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2POSTSKIPCTRL, VID2VPOSTCLIP
+*/
+#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_MASK		(0x0000003F)
+#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LSBMASK		(0x0000003F)
+#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SHIFT		(0)
+#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LENGTH		(6)
+#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3POSTSKIPCTRL_OFFSET		(0x0238)
+
+/* PDP, VID3POSTSKIPCTRL, VID3HPOSTCLIP
+*/
+#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_MASK		(0x007F0000)
+#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LSBMASK		(0x0000007F)
+#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SHIFT		(16)
+#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LENGTH		(7)
+#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3POSTSKIPCTRL, VID3VPOSTCLIP
+*/
+#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_MASK		(0x0000003F)
+#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LSBMASK		(0x0000003F)
+#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SHIFT		(0)
+#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LENGTH		(6)
+#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4POSTSKIPCTRL_OFFSET		(0x023C)
+
+/* PDP, VID4POSTSKIPCTRL, VID4HPOSTCLIP
+*/
+#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_MASK		(0x007F0000)
+#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LSBMASK		(0x0000007F)
+#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SHIFT		(16)
+#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LENGTH		(7)
+#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4POSTSKIPCTRL, VID4VPOSTCLIP
+*/
+#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_MASK		(0x0000003F)
+#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LSBMASK		(0x0000003F)
+#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SHIFT		(0)
+#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LENGTH		(6)
+#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1DECIMATE_CTRL_OFFSET		(0x0240)
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_COUNT
+*/
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_MODE
+*/
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_PIXEL_HALVE
+*/
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_EN
+*/
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_MASK		(0x00000001)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LSBMASK		(0x00000001)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SHIFT		(0)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LENGTH		(1)
+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2DECIMATE_CTRL_OFFSET		(0x0244)
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_COUNT
+*/
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_MODE
+*/
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_PIXEL_HALVE
+*/
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_EN
+*/
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_MASK		(0x00000001)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LSBMASK		(0x00000001)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SHIFT		(0)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LENGTH		(1)
+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3DECIMATE_CTRL_OFFSET		(0x0248)
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_COUNT
+*/
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_MODE
+*/
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_PIXEL_HALVE
+*/
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_EN
+*/
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_MASK		(0x00000001)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LSBMASK		(0x00000001)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SHIFT		(0)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LENGTH		(1)
+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4DECIMATE_CTRL_OFFSET		(0x024C)
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_COUNT
+*/
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_MODE
+*/
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_PIXEL_HALVE
+*/
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_EN
+*/
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_MASK		(0x00000001)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LSBMASK		(0x00000001)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SHIFT		(0)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LENGTH		(1)
+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1DECIMATE_CTRL_OFFSET		(0x0250)
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_COUNT
+*/
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_MODE
+*/
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_PIXEL_HALVE
+*/
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_EN
+*/
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_MASK		(0x00000001)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LSBMASK		(0x00000001)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SHIFT		(0)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LENGTH		(1)
+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2DECIMATE_CTRL_OFFSET		(0x0254)
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_COUNT
+*/
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_MODE
+*/
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_PIXEL_HALVE
+*/
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_EN
+*/
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_MASK		(0x00000001)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LSBMASK		(0x00000001)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SHIFT		(0)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LENGTH		(1)
+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3DECIMATE_CTRL_OFFSET		(0x0258)
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_COUNT
+*/
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_MODE
+*/
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_PIXEL_HALVE
+*/
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_EN
+*/
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_MASK		(0x00000001)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LSBMASK		(0x00000001)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SHIFT		(0)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LENGTH		(1)
+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4DECIMATE_CTRL_OFFSET		(0x025C)
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_COUNT
+*/
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_MASK		(0x000000F0)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LSBMASK		(0x0000000F)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SHIFT		(4)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LENGTH		(4)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_MODE
+*/
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_MASK		(0x00000008)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LSBMASK		(0x00000001)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SHIFT		(3)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LENGTH		(1)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_PIXEL_HALVE
+*/
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_MASK		(0x00000004)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LSBMASK		(0x00000001)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SHIFT		(2)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LENGTH		(1)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_EN
+*/
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_MASK		(0x00000001)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LSBMASK		(0x00000001)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SHIFT		(0)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LENGTH		(1)
+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1SKIPCTRL_OFFSET		(0x0270)
+
+/* PDP, VID1SKIPCTRL, VID1HSKIP
+*/
+#define PDP_VID1SKIPCTRL_VID1HSKIP_MASK		(0x0FFF0000)
+#define PDP_VID1SKIPCTRL_VID1HSKIP_LSBMASK		(0x00000FFF)
+#define PDP_VID1SKIPCTRL_VID1HSKIP_SHIFT		(16)
+#define PDP_VID1SKIPCTRL_VID1HSKIP_LENGTH		(12)
+#define PDP_VID1SKIPCTRL_VID1HSKIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SKIPCTRL, VID1VSKIP
+*/
+#define PDP_VID1SKIPCTRL_VID1VSKIP_MASK		(0x00000FFF)
+#define PDP_VID1SKIPCTRL_VID1VSKIP_LSBMASK		(0x00000FFF)
+#define PDP_VID1SKIPCTRL_VID1VSKIP_SHIFT		(0)
+#define PDP_VID1SKIPCTRL_VID1VSKIP_LENGTH		(12)
+#define PDP_VID1SKIPCTRL_VID1VSKIP_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2SKIPCTRL_OFFSET		(0x0274)
+
+/* PDP, VID2SKIPCTRL, VID2HSKIP
+*/
+#define PDP_VID2SKIPCTRL_VID2HSKIP_MASK		(0x0FFF0000)
+#define PDP_VID2SKIPCTRL_VID2HSKIP_LSBMASK		(0x00000FFF)
+#define PDP_VID2SKIPCTRL_VID2HSKIP_SHIFT		(16)
+#define PDP_VID2SKIPCTRL_VID2HSKIP_LENGTH		(12)
+#define PDP_VID2SKIPCTRL_VID2HSKIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SKIPCTRL, VID2VSKIP
+*/
+#define PDP_VID2SKIPCTRL_VID2VSKIP_MASK		(0x00000FFF)
+#define PDP_VID2SKIPCTRL_VID2VSKIP_LSBMASK		(0x00000FFF)
+#define PDP_VID2SKIPCTRL_VID2VSKIP_SHIFT		(0)
+#define PDP_VID2SKIPCTRL_VID2VSKIP_LENGTH		(12)
+#define PDP_VID2SKIPCTRL_VID2VSKIP_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3SKIPCTRL_OFFSET		(0x0278)
+
+/* PDP, VID3SKIPCTRL, VID3HSKIP
+*/
+#define PDP_VID3SKIPCTRL_VID3HSKIP_MASK		(0x0FFF0000)
+#define PDP_VID3SKIPCTRL_VID3HSKIP_LSBMASK		(0x00000FFF)
+#define PDP_VID3SKIPCTRL_VID3HSKIP_SHIFT		(16)
+#define PDP_VID3SKIPCTRL_VID3HSKIP_LENGTH		(12)
+#define PDP_VID3SKIPCTRL_VID3HSKIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SKIPCTRL, VID3VSKIP
+*/
+#define PDP_VID3SKIPCTRL_VID3VSKIP_MASK		(0x00000FFF)
+#define PDP_VID3SKIPCTRL_VID3VSKIP_LSBMASK		(0x00000FFF)
+#define PDP_VID3SKIPCTRL_VID3VSKIP_SHIFT		(0)
+#define PDP_VID3SKIPCTRL_VID3VSKIP_LENGTH		(12)
+#define PDP_VID3SKIPCTRL_VID3VSKIP_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4SKIPCTRL_OFFSET		(0x027C)
+
+/* PDP, VID4SKIPCTRL, VID4HSKIP
+*/
+#define PDP_VID4SKIPCTRL_VID4HSKIP_MASK		(0x0FFF0000)
+#define PDP_VID4SKIPCTRL_VID4HSKIP_LSBMASK		(0x00000FFF)
+#define PDP_VID4SKIPCTRL_VID4HSKIP_SHIFT		(16)
+#define PDP_VID4SKIPCTRL_VID4HSKIP_LENGTH		(12)
+#define PDP_VID4SKIPCTRL_VID4HSKIP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SKIPCTRL, VID4VSKIP
+*/
+#define PDP_VID4SKIPCTRL_VID4VSKIP_MASK		(0x00000FFF)
+#define PDP_VID4SKIPCTRL_VID4VSKIP_LSBMASK		(0x00000FFF)
+#define PDP_VID4SKIPCTRL_VID4VSKIP_SHIFT		(0)
+#define PDP_VID4SKIPCTRL_VID4VSKIP_LENGTH		(12)
+#define PDP_VID4SKIPCTRL_VID4VSKIP_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1SCALECTRL_OFFSET		(0x0460)
+
+/* PDP, VID1SCALECTRL, VID1HSCALEBP
+*/
+#define PDP_VID1SCALECTRL_VID1HSCALEBP_MASK		(0x80000000)
+#define PDP_VID1SCALECTRL_VID1HSCALEBP_LSBMASK		(0x00000001)
+#define PDP_VID1SCALECTRL_VID1HSCALEBP_SHIFT		(31)
+#define PDP_VID1SCALECTRL_VID1HSCALEBP_LENGTH		(1)
+#define PDP_VID1SCALECTRL_VID1HSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VSCALEBP
+*/
+#define PDP_VID1SCALECTRL_VID1VSCALEBP_MASK		(0x40000000)
+#define PDP_VID1SCALECTRL_VID1VSCALEBP_LSBMASK		(0x00000001)
+#define PDP_VID1SCALECTRL_VID1VSCALEBP_SHIFT		(30)
+#define PDP_VID1SCALECTRL_VID1VSCALEBP_LENGTH		(1)
+#define PDP_VID1SCALECTRL_VID1VSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1HSBEFOREVS
+*/
+#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_MASK		(0x20000000)
+#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_LSBMASK		(0x00000001)
+#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_SHIFT		(29)
+#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_LENGTH		(1)
+#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VSURUNCTRL
+*/
+#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_MASK		(0x08000000)
+#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_LSBMASK		(0x00000001)
+#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_SHIFT		(27)
+#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_LENGTH		(1)
+#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1PAN_EN
+*/
+#define PDP_VID1SCALECTRL_VID1PAN_EN_MASK		(0x00040000)
+#define PDP_VID1SCALECTRL_VID1PAN_EN_LSBMASK		(0x00000001)
+#define PDP_VID1SCALECTRL_VID1PAN_EN_SHIFT		(18)
+#define PDP_VID1SCALECTRL_VID1PAN_EN_LENGTH		(1)
+#define PDP_VID1SCALECTRL_VID1PAN_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VORDER
+*/
+#define PDP_VID1SCALECTRL_VID1VORDER_MASK		(0x00030000)
+#define PDP_VID1SCALECTRL_VID1VORDER_LSBMASK		(0x00000003)
+#define PDP_VID1SCALECTRL_VID1VORDER_SHIFT		(16)
+#define PDP_VID1SCALECTRL_VID1VORDER_LENGTH		(2)
+#define PDP_VID1SCALECTRL_VID1VORDER_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VPITCH
+*/
+#define PDP_VID1SCALECTRL_VID1VPITCH_MASK		(0x0000FFFF)
+#define PDP_VID1SCALECTRL_VID1VPITCH_LSBMASK		(0x0000FFFF)
+#define PDP_VID1SCALECTRL_VID1VPITCH_SHIFT		(0)
+#define PDP_VID1SCALECTRL_VID1VPITCH_LENGTH		(16)
+#define PDP_VID1SCALECTRL_VID1VPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VSINIT_OFFSET		(0x0464)
+
+/* PDP, VID1VSINIT, VID1VINITIAL1
+*/
+#define PDP_VID1VSINIT_VID1VINITIAL1_MASK		(0xFFFF0000)
+#define PDP_VID1VSINIT_VID1VINITIAL1_LSBMASK		(0x0000FFFF)
+#define PDP_VID1VSINIT_VID1VINITIAL1_SHIFT		(16)
+#define PDP_VID1VSINIT_VID1VINITIAL1_LENGTH		(16)
+#define PDP_VID1VSINIT_VID1VINITIAL1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1VSINIT, VID1VINITIAL0
+*/
+#define PDP_VID1VSINIT_VID1VINITIAL0_MASK		(0x0000FFFF)
+#define PDP_VID1VSINIT_VID1VINITIAL0_LSBMASK		(0x0000FFFF)
+#define PDP_VID1VSINIT_VID1VINITIAL0_SHIFT		(0)
+#define PDP_VID1VSINIT_VID1VINITIAL0_LENGTH		(16)
+#define PDP_VID1VSINIT_VID1VINITIAL0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VCOEFF0_OFFSET		(0x0468)
+
+/* PDP, VID1VCOEFF0, VID1VCOEFF0
+*/
+#define PDP_VID1VCOEFF0_VID1VCOEFF0_MASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF0_VID1VCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF0_VID1VCOEFF0_SHIFT		(0)
+#define PDP_VID1VCOEFF0_VID1VCOEFF0_LENGTH		(32)
+#define PDP_VID1VCOEFF0_VID1VCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VCOEFF1_OFFSET		(0x046C)
+
+/* PDP, VID1VCOEFF1, VID1VCOEFF1
+*/
+#define PDP_VID1VCOEFF1_VID1VCOEFF1_MASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF1_VID1VCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF1_VID1VCOEFF1_SHIFT		(0)
+#define PDP_VID1VCOEFF1_VID1VCOEFF1_LENGTH		(32)
+#define PDP_VID1VCOEFF1_VID1VCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VCOEFF2_OFFSET		(0x0470)
+
+/* PDP, VID1VCOEFF2, VID1VCOEFF2
+*/
+#define PDP_VID1VCOEFF2_VID1VCOEFF2_MASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF2_VID1VCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF2_VID1VCOEFF2_SHIFT		(0)
+#define PDP_VID1VCOEFF2_VID1VCOEFF2_LENGTH		(32)
+#define PDP_VID1VCOEFF2_VID1VCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VCOEFF3_OFFSET		(0x0474)
+
+/* PDP, VID1VCOEFF3, VID1VCOEFF3
+*/
+#define PDP_VID1VCOEFF3_VID1VCOEFF3_MASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF3_VID1VCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF3_VID1VCOEFF3_SHIFT		(0)
+#define PDP_VID1VCOEFF3_VID1VCOEFF3_LENGTH		(32)
+#define PDP_VID1VCOEFF3_VID1VCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VCOEFF4_OFFSET		(0x0478)
+
+/* PDP, VID1VCOEFF4, VID1VCOEFF4
+*/
+#define PDP_VID1VCOEFF4_VID1VCOEFF4_MASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF4_VID1VCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF4_VID1VCOEFF4_SHIFT		(0)
+#define PDP_VID1VCOEFF4_VID1VCOEFF4_LENGTH		(32)
+#define PDP_VID1VCOEFF4_VID1VCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VCOEFF5_OFFSET		(0x047C)
+
+/* PDP, VID1VCOEFF5, VID1VCOEFF5
+*/
+#define PDP_VID1VCOEFF5_VID1VCOEFF5_MASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF5_VID1VCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF5_VID1VCOEFF5_SHIFT		(0)
+#define PDP_VID1VCOEFF5_VID1VCOEFF5_LENGTH		(32)
+#define PDP_VID1VCOEFF5_VID1VCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VCOEFF6_OFFSET		(0x0480)
+
+/* PDP, VID1VCOEFF6, VID1VCOEFF6
+*/
+#define PDP_VID1VCOEFF6_VID1VCOEFF6_MASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF6_VID1VCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF6_VID1VCOEFF6_SHIFT		(0)
+#define PDP_VID1VCOEFF6_VID1VCOEFF6_LENGTH		(32)
+#define PDP_VID1VCOEFF6_VID1VCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VCOEFF7_OFFSET		(0x0484)
+
+/* PDP, VID1VCOEFF7, VID1VCOEFF7
+*/
+#define PDP_VID1VCOEFF7_VID1VCOEFF7_MASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF7_VID1VCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1VCOEFF7_VID1VCOEFF7_SHIFT		(0)
+#define PDP_VID1VCOEFF7_VID1VCOEFF7_LENGTH		(32)
+#define PDP_VID1VCOEFF7_VID1VCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1VCOEFF8_OFFSET		(0x0488)
+
+/* PDP, VID1VCOEFF8, VID1VCOEFF8
+*/
+#define PDP_VID1VCOEFF8_VID1VCOEFF8_MASK		(0x000000FF)
+#define PDP_VID1VCOEFF8_VID1VCOEFF8_LSBMASK		(0x000000FF)
+#define PDP_VID1VCOEFF8_VID1VCOEFF8_SHIFT		(0)
+#define PDP_VID1VCOEFF8_VID1VCOEFF8_LENGTH		(8)
+#define PDP_VID1VCOEFF8_VID1VCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HSINIT_OFFSET		(0x048C)
+
+/* PDP, VID1HSINIT, VID1HINITIAL
+*/
+#define PDP_VID1HSINIT_VID1HINITIAL_MASK		(0xFFFF0000)
+#define PDP_VID1HSINIT_VID1HINITIAL_LSBMASK		(0x0000FFFF)
+#define PDP_VID1HSINIT_VID1HINITIAL_SHIFT		(16)
+#define PDP_VID1HSINIT_VID1HINITIAL_LENGTH		(16)
+#define PDP_VID1HSINIT_VID1HINITIAL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1HSINIT, VID1HPITCH
+*/
+#define PDP_VID1HSINIT_VID1HPITCH_MASK		(0x0000FFFF)
+#define PDP_VID1HSINIT_VID1HPITCH_LSBMASK		(0x0000FFFF)
+#define PDP_VID1HSINIT_VID1HPITCH_SHIFT		(0)
+#define PDP_VID1HSINIT_VID1HPITCH_LENGTH		(16)
+#define PDP_VID1HSINIT_VID1HPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF0_OFFSET		(0x0490)
+
+/* PDP, VID1HCOEFF0, VID1HCOEFF0
+*/
+#define PDP_VID1HCOEFF0_VID1HCOEFF0_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF0_VID1HCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF0_VID1HCOEFF0_SHIFT		(0)
+#define PDP_VID1HCOEFF0_VID1HCOEFF0_LENGTH		(32)
+#define PDP_VID1HCOEFF0_VID1HCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF1_OFFSET		(0x0494)
+
+/* PDP, VID1HCOEFF1, VID1HCOEFF1
+*/
+#define PDP_VID1HCOEFF1_VID1HCOEFF1_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF1_VID1HCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF1_VID1HCOEFF1_SHIFT		(0)
+#define PDP_VID1HCOEFF1_VID1HCOEFF1_LENGTH		(32)
+#define PDP_VID1HCOEFF1_VID1HCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF2_OFFSET		(0x0498)
+
+/* PDP, VID1HCOEFF2, VID1HCOEFF2
+*/
+#define PDP_VID1HCOEFF2_VID1HCOEFF2_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF2_VID1HCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF2_VID1HCOEFF2_SHIFT		(0)
+#define PDP_VID1HCOEFF2_VID1HCOEFF2_LENGTH		(32)
+#define PDP_VID1HCOEFF2_VID1HCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF3_OFFSET		(0x049C)
+
+/* PDP, VID1HCOEFF3, VID1HCOEFF3
+*/
+#define PDP_VID1HCOEFF3_VID1HCOEFF3_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF3_VID1HCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF3_VID1HCOEFF3_SHIFT		(0)
+#define PDP_VID1HCOEFF3_VID1HCOEFF3_LENGTH		(32)
+#define PDP_VID1HCOEFF3_VID1HCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF4_OFFSET		(0x04A0)
+
+/* PDP, VID1HCOEFF4, VID1HCOEFF4
+*/
+#define PDP_VID1HCOEFF4_VID1HCOEFF4_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF4_VID1HCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF4_VID1HCOEFF4_SHIFT		(0)
+#define PDP_VID1HCOEFF4_VID1HCOEFF4_LENGTH		(32)
+#define PDP_VID1HCOEFF4_VID1HCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF5_OFFSET		(0x04A4)
+
+/* PDP, VID1HCOEFF5, VID1HCOEFF5
+*/
+#define PDP_VID1HCOEFF5_VID1HCOEFF5_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF5_VID1HCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF5_VID1HCOEFF5_SHIFT		(0)
+#define PDP_VID1HCOEFF5_VID1HCOEFF5_LENGTH		(32)
+#define PDP_VID1HCOEFF5_VID1HCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF6_OFFSET		(0x04A8)
+
+/* PDP, VID1HCOEFF6, VID1HCOEFF6
+*/
+#define PDP_VID1HCOEFF6_VID1HCOEFF6_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF6_VID1HCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF6_VID1HCOEFF6_SHIFT		(0)
+#define PDP_VID1HCOEFF6_VID1HCOEFF6_LENGTH		(32)
+#define PDP_VID1HCOEFF6_VID1HCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF7_OFFSET		(0x04AC)
+
+/* PDP, VID1HCOEFF7, VID1HCOEFF7
+*/
+#define PDP_VID1HCOEFF7_VID1HCOEFF7_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF7_VID1HCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF7_VID1HCOEFF7_SHIFT		(0)
+#define PDP_VID1HCOEFF7_VID1HCOEFF7_LENGTH		(32)
+#define PDP_VID1HCOEFF7_VID1HCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF8_OFFSET		(0x04B0)
+
+/* PDP, VID1HCOEFF8, VID1HCOEFF8
+*/
+#define PDP_VID1HCOEFF8_VID1HCOEFF8_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF8_VID1HCOEFF8_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF8_VID1HCOEFF8_SHIFT		(0)
+#define PDP_VID1HCOEFF8_VID1HCOEFF8_LENGTH		(32)
+#define PDP_VID1HCOEFF8_VID1HCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF9_OFFSET		(0x04B4)
+
+/* PDP, VID1HCOEFF9, VID1HCOEFF9
+*/
+#define PDP_VID1HCOEFF9_VID1HCOEFF9_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF9_VID1HCOEFF9_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF9_VID1HCOEFF9_SHIFT		(0)
+#define PDP_VID1HCOEFF9_VID1HCOEFF9_LENGTH		(32)
+#define PDP_VID1HCOEFF9_VID1HCOEFF9_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF10_OFFSET		(0x04B8)
+
+/* PDP, VID1HCOEFF10, VID1HCOEFF10
+*/
+#define PDP_VID1HCOEFF10_VID1HCOEFF10_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF10_VID1HCOEFF10_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF10_VID1HCOEFF10_SHIFT		(0)
+#define PDP_VID1HCOEFF10_VID1HCOEFF10_LENGTH		(32)
+#define PDP_VID1HCOEFF10_VID1HCOEFF10_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF11_OFFSET		(0x04BC)
+
+/* PDP, VID1HCOEFF11, VID1HCOEFF11
+*/
+#define PDP_VID1HCOEFF11_VID1HCOEFF11_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF11_VID1HCOEFF11_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF11_VID1HCOEFF11_SHIFT		(0)
+#define PDP_VID1HCOEFF11_VID1HCOEFF11_LENGTH		(32)
+#define PDP_VID1HCOEFF11_VID1HCOEFF11_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF12_OFFSET		(0x04C0)
+
+/* PDP, VID1HCOEFF12, VID1HCOEFF12
+*/
+#define PDP_VID1HCOEFF12_VID1HCOEFF12_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF12_VID1HCOEFF12_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF12_VID1HCOEFF12_SHIFT		(0)
+#define PDP_VID1HCOEFF12_VID1HCOEFF12_LENGTH		(32)
+#define PDP_VID1HCOEFF12_VID1HCOEFF12_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF13_OFFSET		(0x04C4)
+
+/* PDP, VID1HCOEFF13, VID1HCOEFF13
+*/
+#define PDP_VID1HCOEFF13_VID1HCOEFF13_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF13_VID1HCOEFF13_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF13_VID1HCOEFF13_SHIFT		(0)
+#define PDP_VID1HCOEFF13_VID1HCOEFF13_LENGTH		(32)
+#define PDP_VID1HCOEFF13_VID1HCOEFF13_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF14_OFFSET		(0x04C8)
+
+/* PDP, VID1HCOEFF14, VID1HCOEFF14
+*/
+#define PDP_VID1HCOEFF14_VID1HCOEFF14_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF14_VID1HCOEFF14_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF14_VID1HCOEFF14_SHIFT		(0)
+#define PDP_VID1HCOEFF14_VID1HCOEFF14_LENGTH		(32)
+#define PDP_VID1HCOEFF14_VID1HCOEFF14_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF15_OFFSET		(0x04CC)
+
+/* PDP, VID1HCOEFF15, VID1HCOEFF15
+*/
+#define PDP_VID1HCOEFF15_VID1HCOEFF15_MASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF15_VID1HCOEFF15_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID1HCOEFF15_VID1HCOEFF15_SHIFT		(0)
+#define PDP_VID1HCOEFF15_VID1HCOEFF15_LENGTH		(32)
+#define PDP_VID1HCOEFF15_VID1HCOEFF15_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1HCOEFF16_OFFSET		(0x04D0)
+
+/* PDP, VID1HCOEFF16, VID1HCOEFF16
+*/
+#define PDP_VID1HCOEFF16_VID1HCOEFF16_MASK		(0x000000FF)
+#define PDP_VID1HCOEFF16_VID1HCOEFF16_LSBMASK		(0x000000FF)
+#define PDP_VID1HCOEFF16_VID1HCOEFF16_SHIFT		(0)
+#define PDP_VID1HCOEFF16_VID1HCOEFF16_LENGTH		(8)
+#define PDP_VID1HCOEFF16_VID1HCOEFF16_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1SCALESIZE_OFFSET		(0x04D4)
+
+/* PDP, VID1SCALESIZE, VID1SCALEWIDTH
+*/
+#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_MASK		(0x0FFF0000)
+#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_LSBMASK		(0x00000FFF)
+#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_SHIFT		(16)
+#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_LENGTH		(12)
+#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1SCALESIZE, VID1SCALEHEIGHT
+*/
+#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_MASK		(0x00000FFF)
+#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SHIFT		(0)
+#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LENGTH		(12)
+#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CORE_ID_OFFSET		(0x04E0)
+
+/* PDP, PVR_PDP_CORE_ID, GROUP_ID
+*/
+#define PDP_CORE_ID_GROUP_ID_MASK		(0xFF000000)
+#define PDP_CORE_ID_GROUP_ID_LSBMASK		(0x000000FF)
+#define PDP_CORE_ID_GROUP_ID_SHIFT		(24)
+#define PDP_CORE_ID_GROUP_ID_LENGTH		(8)
+#define PDP_CORE_ID_GROUP_ID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_PDP_CORE_ID, CORE_ID
+*/
+#define PDP_CORE_ID_CORE_ID_MASK		(0x00FF0000)
+#define PDP_CORE_ID_CORE_ID_LSBMASK		(0x000000FF)
+#define PDP_CORE_ID_CORE_ID_SHIFT		(16)
+#define PDP_CORE_ID_CORE_ID_LENGTH		(8)
+#define PDP_CORE_ID_CORE_ID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_PDP_CORE_ID, CONFIG_ID
+*/
+#define PDP_CORE_ID_CONFIG_ID_MASK		(0x0000FFFF)
+#define PDP_CORE_ID_CONFIG_ID_LSBMASK		(0x0000FFFF)
+#define PDP_CORE_ID_CONFIG_ID_SHIFT		(0)
+#define PDP_CORE_ID_CONFIG_ID_LENGTH		(16)
+#define PDP_CORE_ID_CONFIG_ID_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CORE_REV_OFFSET		(0x04F0)
+
+/* PDP, PVR_PDP_CORE_REV, MAJOR_REV
+*/
+#define PDP_CORE_REV_MAJOR_REV_MASK		(0x00FF0000)
+#define PDP_CORE_REV_MAJOR_REV_LSBMASK		(0x000000FF)
+#define PDP_CORE_REV_MAJOR_REV_SHIFT		(16)
+#define PDP_CORE_REV_MAJOR_REV_LENGTH		(8)
+#define PDP_CORE_REV_MAJOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_PDP_CORE_REV, MINOR_REV
+*/
+#define PDP_CORE_REV_MINOR_REV_MASK		(0x0000FF00)
+#define PDP_CORE_REV_MINOR_REV_LSBMASK		(0x000000FF)
+#define PDP_CORE_REV_MINOR_REV_SHIFT		(8)
+#define PDP_CORE_REV_MINOR_REV_LENGTH		(8)
+#define PDP_CORE_REV_MINOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_PDP_CORE_REV, MAINT_REV
+*/
+#define PDP_CORE_REV_MAINT_REV_MASK		(0x000000FF)
+#define PDP_CORE_REV_MAINT_REV_LSBMASK		(0x000000FF)
+#define PDP_CORE_REV_MAINT_REV_SHIFT		(0)
+#define PDP_CORE_REV_MAINT_REV_LENGTH		(8)
+#define PDP_CORE_REV_MAINT_REV_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2SCALECTRL_OFFSET		(0x0500)
+
+/* PDP, VID2SCALECTRL, VID2HSCALEBP
+*/
+#define PDP_VID2SCALECTRL_VID2HSCALEBP_MASK		(0x80000000)
+#define PDP_VID2SCALECTRL_VID2HSCALEBP_LSBMASK		(0x00000001)
+#define PDP_VID2SCALECTRL_VID2HSCALEBP_SHIFT		(31)
+#define PDP_VID2SCALECTRL_VID2HSCALEBP_LENGTH		(1)
+#define PDP_VID2SCALECTRL_VID2HSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VSCALEBP
+*/
+#define PDP_VID2SCALECTRL_VID2VSCALEBP_MASK		(0x40000000)
+#define PDP_VID2SCALECTRL_VID2VSCALEBP_LSBMASK		(0x00000001)
+#define PDP_VID2SCALECTRL_VID2VSCALEBP_SHIFT		(30)
+#define PDP_VID2SCALECTRL_VID2VSCALEBP_LENGTH		(1)
+#define PDP_VID2SCALECTRL_VID2VSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2HSBEFOREVS
+*/
+#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_MASK		(0x20000000)
+#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_LSBMASK		(0x00000001)
+#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_SHIFT		(29)
+#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_LENGTH		(1)
+#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VSURUNCTRL
+*/
+#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_MASK		(0x08000000)
+#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_LSBMASK		(0x00000001)
+#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_SHIFT		(27)
+#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_LENGTH		(1)
+#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2PAN_EN
+*/
+#define PDP_VID2SCALECTRL_VID2PAN_EN_MASK		(0x00040000)
+#define PDP_VID2SCALECTRL_VID2PAN_EN_LSBMASK		(0x00000001)
+#define PDP_VID2SCALECTRL_VID2PAN_EN_SHIFT		(18)
+#define PDP_VID2SCALECTRL_VID2PAN_EN_LENGTH		(1)
+#define PDP_VID2SCALECTRL_VID2PAN_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VORDER
+*/
+#define PDP_VID2SCALECTRL_VID2VORDER_MASK		(0x00030000)
+#define PDP_VID2SCALECTRL_VID2VORDER_LSBMASK		(0x00000003)
+#define PDP_VID2SCALECTRL_VID2VORDER_SHIFT		(16)
+#define PDP_VID2SCALECTRL_VID2VORDER_LENGTH		(2)
+#define PDP_VID2SCALECTRL_VID2VORDER_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VPITCH
+*/
+#define PDP_VID2SCALECTRL_VID2VPITCH_MASK		(0x0000FFFF)
+#define PDP_VID2SCALECTRL_VID2VPITCH_LSBMASK		(0x0000FFFF)
+#define PDP_VID2SCALECTRL_VID2VPITCH_SHIFT		(0)
+#define PDP_VID2SCALECTRL_VID2VPITCH_LENGTH		(16)
+#define PDP_VID2SCALECTRL_VID2VPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VSINIT_OFFSET		(0x0504)
+
+/* PDP, VID2VSINIT, VID2VINITIAL1
+*/
+#define PDP_VID2VSINIT_VID2VINITIAL1_MASK		(0xFFFF0000)
+#define PDP_VID2VSINIT_VID2VINITIAL1_LSBMASK		(0x0000FFFF)
+#define PDP_VID2VSINIT_VID2VINITIAL1_SHIFT		(16)
+#define PDP_VID2VSINIT_VID2VINITIAL1_LENGTH		(16)
+#define PDP_VID2VSINIT_VID2VINITIAL1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2VSINIT, VID2VINITIAL0
+*/
+#define PDP_VID2VSINIT_VID2VINITIAL0_MASK		(0x0000FFFF)
+#define PDP_VID2VSINIT_VID2VINITIAL0_LSBMASK		(0x0000FFFF)
+#define PDP_VID2VSINIT_VID2VINITIAL0_SHIFT		(0)
+#define PDP_VID2VSINIT_VID2VINITIAL0_LENGTH		(16)
+#define PDP_VID2VSINIT_VID2VINITIAL0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VCOEFF0_OFFSET		(0x0508)
+
+/* PDP, VID2VCOEFF0, VID2VCOEFF0
+*/
+#define PDP_VID2VCOEFF0_VID2VCOEFF0_MASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF0_VID2VCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF0_VID2VCOEFF0_SHIFT		(0)
+#define PDP_VID2VCOEFF0_VID2VCOEFF0_LENGTH		(32)
+#define PDP_VID2VCOEFF0_VID2VCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VCOEFF1_OFFSET		(0x050C)
+
+/* PDP, VID2VCOEFF1, VID2VCOEFF1
+*/
+#define PDP_VID2VCOEFF1_VID2VCOEFF1_MASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF1_VID2VCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF1_VID2VCOEFF1_SHIFT		(0)
+#define PDP_VID2VCOEFF1_VID2VCOEFF1_LENGTH		(32)
+#define PDP_VID2VCOEFF1_VID2VCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VCOEFF2_OFFSET		(0x0510)
+
+/* PDP, VID2VCOEFF2, VID2VCOEFF2
+*/
+#define PDP_VID2VCOEFF2_VID2VCOEFF2_MASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF2_VID2VCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF2_VID2VCOEFF2_SHIFT		(0)
+#define PDP_VID2VCOEFF2_VID2VCOEFF2_LENGTH		(32)
+#define PDP_VID2VCOEFF2_VID2VCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VCOEFF3_OFFSET		(0x0514)
+
+/* PDP, VID2VCOEFF3, VID2VCOEFF3
+*/
+#define PDP_VID2VCOEFF3_VID2VCOEFF3_MASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF3_VID2VCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF3_VID2VCOEFF3_SHIFT		(0)
+#define PDP_VID2VCOEFF3_VID2VCOEFF3_LENGTH		(32)
+#define PDP_VID2VCOEFF3_VID2VCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VCOEFF4_OFFSET		(0x0518)
+
+/* PDP, VID2VCOEFF4, VID2VCOEFF4
+*/
+#define PDP_VID2VCOEFF4_VID2VCOEFF4_MASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF4_VID2VCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF4_VID2VCOEFF4_SHIFT		(0)
+#define PDP_VID2VCOEFF4_VID2VCOEFF4_LENGTH		(32)
+#define PDP_VID2VCOEFF4_VID2VCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VCOEFF5_OFFSET		(0x051C)
+
+/* PDP, VID2VCOEFF5, VID2VCOEFF5
+*/
+#define PDP_VID2VCOEFF5_VID2VCOEFF5_MASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF5_VID2VCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF5_VID2VCOEFF5_SHIFT		(0)
+#define PDP_VID2VCOEFF5_VID2VCOEFF5_LENGTH		(32)
+#define PDP_VID2VCOEFF5_VID2VCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VCOEFF6_OFFSET		(0x0520)
+
+/* PDP, VID2VCOEFF6, VID2VCOEFF6
+*/
+#define PDP_VID2VCOEFF6_VID2VCOEFF6_MASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF6_VID2VCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF6_VID2VCOEFF6_SHIFT		(0)
+#define PDP_VID2VCOEFF6_VID2VCOEFF6_LENGTH		(32)
+#define PDP_VID2VCOEFF6_VID2VCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VCOEFF7_OFFSET		(0x0524)
+
+/* PDP, VID2VCOEFF7, VID2VCOEFF7
+*/
+#define PDP_VID2VCOEFF7_VID2VCOEFF7_MASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF7_VID2VCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2VCOEFF7_VID2VCOEFF7_SHIFT		(0)
+#define PDP_VID2VCOEFF7_VID2VCOEFF7_LENGTH		(32)
+#define PDP_VID2VCOEFF7_VID2VCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2VCOEFF8_OFFSET		(0x0528)
+
+/* PDP, VID2VCOEFF8, VID2VCOEFF8
+*/
+#define PDP_VID2VCOEFF8_VID2VCOEFF8_MASK		(0x000000FF)
+#define PDP_VID2VCOEFF8_VID2VCOEFF8_LSBMASK		(0x000000FF)
+#define PDP_VID2VCOEFF8_VID2VCOEFF8_SHIFT		(0)
+#define PDP_VID2VCOEFF8_VID2VCOEFF8_LENGTH		(8)
+#define PDP_VID2VCOEFF8_VID2VCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HSINIT_OFFSET		(0x052C)
+
+/* PDP, VID2HSINIT, VID2HINITIAL
+*/
+#define PDP_VID2HSINIT_VID2HINITIAL_MASK		(0xFFFF0000)
+#define PDP_VID2HSINIT_VID2HINITIAL_LSBMASK		(0x0000FFFF)
+#define PDP_VID2HSINIT_VID2HINITIAL_SHIFT		(16)
+#define PDP_VID2HSINIT_VID2HINITIAL_LENGTH		(16)
+#define PDP_VID2HSINIT_VID2HINITIAL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2HSINIT, VID2HPITCH
+*/
+#define PDP_VID2HSINIT_VID2HPITCH_MASK		(0x0000FFFF)
+#define PDP_VID2HSINIT_VID2HPITCH_LSBMASK		(0x0000FFFF)
+#define PDP_VID2HSINIT_VID2HPITCH_SHIFT		(0)
+#define PDP_VID2HSINIT_VID2HPITCH_LENGTH		(16)
+#define PDP_VID2HSINIT_VID2HPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF0_OFFSET		(0x0530)
+
+/* PDP, VID2HCOEFF0, VID2HCOEFF0
+*/
+#define PDP_VID2HCOEFF0_VID2HCOEFF0_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF0_VID2HCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF0_VID2HCOEFF0_SHIFT		(0)
+#define PDP_VID2HCOEFF0_VID2HCOEFF0_LENGTH		(32)
+#define PDP_VID2HCOEFF0_VID2HCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF1_OFFSET		(0x0534)
+
+/* PDP, VID2HCOEFF1, VID2HCOEFF1
+*/
+#define PDP_VID2HCOEFF1_VID2HCOEFF1_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF1_VID2HCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF1_VID2HCOEFF1_SHIFT		(0)
+#define PDP_VID2HCOEFF1_VID2HCOEFF1_LENGTH		(32)
+#define PDP_VID2HCOEFF1_VID2HCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF2_OFFSET		(0x0538)
+
+/* PDP, VID2HCOEFF2, VID2HCOEFF2
+*/
+#define PDP_VID2HCOEFF2_VID2HCOEFF2_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF2_VID2HCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF2_VID2HCOEFF2_SHIFT		(0)
+#define PDP_VID2HCOEFF2_VID2HCOEFF2_LENGTH		(32)
+#define PDP_VID2HCOEFF2_VID2HCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF3_OFFSET		(0x053C)
+
+/* PDP, VID2HCOEFF3, VID2HCOEFF3
+*/
+#define PDP_VID2HCOEFF3_VID2HCOEFF3_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF3_VID2HCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF3_VID2HCOEFF3_SHIFT		(0)
+#define PDP_VID2HCOEFF3_VID2HCOEFF3_LENGTH		(32)
+#define PDP_VID2HCOEFF3_VID2HCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF4_OFFSET		(0x0540)
+
+/* PDP, VID2HCOEFF4, VID2HCOEFF4
+*/
+#define PDP_VID2HCOEFF4_VID2HCOEFF4_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF4_VID2HCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF4_VID2HCOEFF4_SHIFT		(0)
+#define PDP_VID2HCOEFF4_VID2HCOEFF4_LENGTH		(32)
+#define PDP_VID2HCOEFF4_VID2HCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF5_OFFSET		(0x0544)
+
+/* PDP, VID2HCOEFF5, VID2HCOEFF5
+*/
+#define PDP_VID2HCOEFF5_VID2HCOEFF5_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF5_VID2HCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF5_VID2HCOEFF5_SHIFT		(0)
+#define PDP_VID2HCOEFF5_VID2HCOEFF5_LENGTH		(32)
+#define PDP_VID2HCOEFF5_VID2HCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF6_OFFSET		(0x0548)
+
+/* PDP, VID2HCOEFF6, VID2HCOEFF6
+*/
+#define PDP_VID2HCOEFF6_VID2HCOEFF6_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF6_VID2HCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF6_VID2HCOEFF6_SHIFT		(0)
+#define PDP_VID2HCOEFF6_VID2HCOEFF6_LENGTH		(32)
+#define PDP_VID2HCOEFF6_VID2HCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF7_OFFSET		(0x054C)
+
+/* PDP, VID2HCOEFF7, VID2HCOEFF7
+*/
+#define PDP_VID2HCOEFF7_VID2HCOEFF7_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF7_VID2HCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF7_VID2HCOEFF7_SHIFT		(0)
+#define PDP_VID2HCOEFF7_VID2HCOEFF7_LENGTH		(32)
+#define PDP_VID2HCOEFF7_VID2HCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF8_OFFSET		(0x0550)
+
+/* PDP, VID2HCOEFF8, VID2HCOEFF8
+*/
+#define PDP_VID2HCOEFF8_VID2HCOEFF8_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF8_VID2HCOEFF8_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF8_VID2HCOEFF8_SHIFT		(0)
+#define PDP_VID2HCOEFF8_VID2HCOEFF8_LENGTH		(32)
+#define PDP_VID2HCOEFF8_VID2HCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF9_OFFSET		(0x0554)
+
+/* PDP, VID2HCOEFF9, VID2HCOEFF9
+*/
+#define PDP_VID2HCOEFF9_VID2HCOEFF9_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF9_VID2HCOEFF9_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF9_VID2HCOEFF9_SHIFT		(0)
+#define PDP_VID2HCOEFF9_VID2HCOEFF9_LENGTH		(32)
+#define PDP_VID2HCOEFF9_VID2HCOEFF9_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF10_OFFSET		(0x0558)
+
+/* PDP, VID2HCOEFF10, VID2HCOEFF10
+*/
+#define PDP_VID2HCOEFF10_VID2HCOEFF10_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF10_VID2HCOEFF10_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF10_VID2HCOEFF10_SHIFT		(0)
+#define PDP_VID2HCOEFF10_VID2HCOEFF10_LENGTH		(32)
+#define PDP_VID2HCOEFF10_VID2HCOEFF10_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF11_OFFSET		(0x055C)
+
+/* PDP, VID2HCOEFF11, VID2HCOEFF11
+*/
+#define PDP_VID2HCOEFF11_VID2HCOEFF11_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF11_VID2HCOEFF11_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF11_VID2HCOEFF11_SHIFT		(0)
+#define PDP_VID2HCOEFF11_VID2HCOEFF11_LENGTH		(32)
+#define PDP_VID2HCOEFF11_VID2HCOEFF11_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF12_OFFSET		(0x0560)
+
+/* PDP, VID2HCOEFF12, VID2HCOEFF12
+*/
+#define PDP_VID2HCOEFF12_VID2HCOEFF12_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF12_VID2HCOEFF12_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF12_VID2HCOEFF12_SHIFT		(0)
+#define PDP_VID2HCOEFF12_VID2HCOEFF12_LENGTH		(32)
+#define PDP_VID2HCOEFF12_VID2HCOEFF12_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF13_OFFSET		(0x0564)
+
+/* PDP, VID2HCOEFF13, VID2HCOEFF13
+*/
+#define PDP_VID2HCOEFF13_VID2HCOEFF13_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF13_VID2HCOEFF13_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF13_VID2HCOEFF13_SHIFT		(0)
+#define PDP_VID2HCOEFF13_VID2HCOEFF13_LENGTH		(32)
+#define PDP_VID2HCOEFF13_VID2HCOEFF13_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF14_OFFSET		(0x0568)
+
+/* PDP, VID2HCOEFF14, VID2HCOEFF14
+*/
+#define PDP_VID2HCOEFF14_VID2HCOEFF14_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF14_VID2HCOEFF14_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF14_VID2HCOEFF14_SHIFT		(0)
+#define PDP_VID2HCOEFF14_VID2HCOEFF14_LENGTH		(32)
+#define PDP_VID2HCOEFF14_VID2HCOEFF14_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF15_OFFSET		(0x056C)
+
+/* PDP, VID2HCOEFF15, VID2HCOEFF15
+*/
+#define PDP_VID2HCOEFF15_VID2HCOEFF15_MASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF15_VID2HCOEFF15_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID2HCOEFF15_VID2HCOEFF15_SHIFT		(0)
+#define PDP_VID2HCOEFF15_VID2HCOEFF15_LENGTH		(32)
+#define PDP_VID2HCOEFF15_VID2HCOEFF15_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2HCOEFF16_OFFSET		(0x0570)
+
+/* PDP, VID2HCOEFF16, VID2HCOEFF16
+*/
+#define PDP_VID2HCOEFF16_VID2HCOEFF16_MASK		(0x000000FF)
+#define PDP_VID2HCOEFF16_VID2HCOEFF16_LSBMASK		(0x000000FF)
+#define PDP_VID2HCOEFF16_VID2HCOEFF16_SHIFT		(0)
+#define PDP_VID2HCOEFF16_VID2HCOEFF16_LENGTH		(8)
+#define PDP_VID2HCOEFF16_VID2HCOEFF16_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2SCALESIZE_OFFSET		(0x0574)
+
+/* PDP, VID2SCALESIZE, VID2SCALEWIDTH
+*/
+#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_MASK		(0x0FFF0000)
+#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_LSBMASK		(0x00000FFF)
+#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_SHIFT		(16)
+#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_LENGTH		(12)
+#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2SCALESIZE, VID2SCALEHEIGHT
+*/
+#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_MASK		(0x00000FFF)
+#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SHIFT		(0)
+#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LENGTH		(12)
+#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3SCALECTRL_OFFSET		(0x0578)
+
+/* PDP, VID3SCALECTRL, VID3HSCALEBP
+*/
+#define PDP_VID3SCALECTRL_VID3HSCALEBP_MASK		(0x80000000)
+#define PDP_VID3SCALECTRL_VID3HSCALEBP_LSBMASK		(0x00000001)
+#define PDP_VID3SCALECTRL_VID3HSCALEBP_SHIFT		(31)
+#define PDP_VID3SCALECTRL_VID3HSCALEBP_LENGTH		(1)
+#define PDP_VID3SCALECTRL_VID3HSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VSCALEBP
+*/
+#define PDP_VID3SCALECTRL_VID3VSCALEBP_MASK		(0x40000000)
+#define PDP_VID3SCALECTRL_VID3VSCALEBP_LSBMASK		(0x00000001)
+#define PDP_VID3SCALECTRL_VID3VSCALEBP_SHIFT		(30)
+#define PDP_VID3SCALECTRL_VID3VSCALEBP_LENGTH		(1)
+#define PDP_VID3SCALECTRL_VID3VSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3HSBEFOREVS
+*/
+#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_MASK		(0x20000000)
+#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_LSBMASK		(0x00000001)
+#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_SHIFT		(29)
+#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_LENGTH		(1)
+#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VSURUNCTRL
+*/
+#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_MASK		(0x08000000)
+#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_LSBMASK		(0x00000001)
+#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_SHIFT		(27)
+#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_LENGTH		(1)
+#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3PAN_EN
+*/
+#define PDP_VID3SCALECTRL_VID3PAN_EN_MASK		(0x00040000)
+#define PDP_VID3SCALECTRL_VID3PAN_EN_LSBMASK		(0x00000001)
+#define PDP_VID3SCALECTRL_VID3PAN_EN_SHIFT		(18)
+#define PDP_VID3SCALECTRL_VID3PAN_EN_LENGTH		(1)
+#define PDP_VID3SCALECTRL_VID3PAN_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VORDER
+*/
+#define PDP_VID3SCALECTRL_VID3VORDER_MASK		(0x00030000)
+#define PDP_VID3SCALECTRL_VID3VORDER_LSBMASK		(0x00000003)
+#define PDP_VID3SCALECTRL_VID3VORDER_SHIFT		(16)
+#define PDP_VID3SCALECTRL_VID3VORDER_LENGTH		(2)
+#define PDP_VID3SCALECTRL_VID3VORDER_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VPITCH
+*/
+#define PDP_VID3SCALECTRL_VID3VPITCH_MASK		(0x0000FFFF)
+#define PDP_VID3SCALECTRL_VID3VPITCH_LSBMASK		(0x0000FFFF)
+#define PDP_VID3SCALECTRL_VID3VPITCH_SHIFT		(0)
+#define PDP_VID3SCALECTRL_VID3VPITCH_LENGTH		(16)
+#define PDP_VID3SCALECTRL_VID3VPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VSINIT_OFFSET		(0x057C)
+
+/* PDP, VID3VSINIT, VID3VINITIAL1
+*/
+#define PDP_VID3VSINIT_VID3VINITIAL1_MASK		(0xFFFF0000)
+#define PDP_VID3VSINIT_VID3VINITIAL1_LSBMASK		(0x0000FFFF)
+#define PDP_VID3VSINIT_VID3VINITIAL1_SHIFT		(16)
+#define PDP_VID3VSINIT_VID3VINITIAL1_LENGTH		(16)
+#define PDP_VID3VSINIT_VID3VINITIAL1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3VSINIT, VID3VINITIAL0
+*/
+#define PDP_VID3VSINIT_VID3VINITIAL0_MASK		(0x0000FFFF)
+#define PDP_VID3VSINIT_VID3VINITIAL0_LSBMASK		(0x0000FFFF)
+#define PDP_VID3VSINIT_VID3VINITIAL0_SHIFT		(0)
+#define PDP_VID3VSINIT_VID3VINITIAL0_LENGTH		(16)
+#define PDP_VID3VSINIT_VID3VINITIAL0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VCOEFF0_OFFSET		(0x0580)
+
+/* PDP, VID3VCOEFF0, VID3VCOEFF0
+*/
+#define PDP_VID3VCOEFF0_VID3VCOEFF0_MASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF0_VID3VCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF0_VID3VCOEFF0_SHIFT		(0)
+#define PDP_VID3VCOEFF0_VID3VCOEFF0_LENGTH		(32)
+#define PDP_VID3VCOEFF0_VID3VCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VCOEFF1_OFFSET		(0x0584)
+
+/* PDP, VID3VCOEFF1, VID3VCOEFF1
+*/
+#define PDP_VID3VCOEFF1_VID3VCOEFF1_MASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF1_VID3VCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF1_VID3VCOEFF1_SHIFT		(0)
+#define PDP_VID3VCOEFF1_VID3VCOEFF1_LENGTH		(32)
+#define PDP_VID3VCOEFF1_VID3VCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VCOEFF2_OFFSET		(0x0588)
+
+/* PDP, VID3VCOEFF2, VID3VCOEFF2
+*/
+#define PDP_VID3VCOEFF2_VID3VCOEFF2_MASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF2_VID3VCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF2_VID3VCOEFF2_SHIFT		(0)
+#define PDP_VID3VCOEFF2_VID3VCOEFF2_LENGTH		(32)
+#define PDP_VID3VCOEFF2_VID3VCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VCOEFF3_OFFSET		(0x058C)
+
+/* PDP, VID3VCOEFF3, VID3VCOEFF3
+*/
+#define PDP_VID3VCOEFF3_VID3VCOEFF3_MASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF3_VID3VCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF3_VID3VCOEFF3_SHIFT		(0)
+#define PDP_VID3VCOEFF3_VID3VCOEFF3_LENGTH		(32)
+#define PDP_VID3VCOEFF3_VID3VCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VCOEFF4_OFFSET		(0x0590)
+
+/* PDP, VID3VCOEFF4, VID3VCOEFF4
+*/
+#define PDP_VID3VCOEFF4_VID3VCOEFF4_MASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF4_VID3VCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF4_VID3VCOEFF4_SHIFT		(0)
+#define PDP_VID3VCOEFF4_VID3VCOEFF4_LENGTH		(32)
+#define PDP_VID3VCOEFF4_VID3VCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VCOEFF5_OFFSET		(0x0594)
+
+/* PDP, VID3VCOEFF5, VID3VCOEFF5
+*/
+#define PDP_VID3VCOEFF5_VID3VCOEFF5_MASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF5_VID3VCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF5_VID3VCOEFF5_SHIFT		(0)
+#define PDP_VID3VCOEFF5_VID3VCOEFF5_LENGTH		(32)
+#define PDP_VID3VCOEFF5_VID3VCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VCOEFF6_OFFSET		(0x0598)
+
+/* PDP, VID3VCOEFF6, VID3VCOEFF6
+*/
+#define PDP_VID3VCOEFF6_VID3VCOEFF6_MASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF6_VID3VCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF6_VID3VCOEFF6_SHIFT		(0)
+#define PDP_VID3VCOEFF6_VID3VCOEFF6_LENGTH		(32)
+#define PDP_VID3VCOEFF6_VID3VCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VCOEFF7_OFFSET		(0x059C)
+
+/* PDP, VID3VCOEFF7, VID3VCOEFF7
+*/
+#define PDP_VID3VCOEFF7_VID3VCOEFF7_MASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF7_VID3VCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3VCOEFF7_VID3VCOEFF7_SHIFT		(0)
+#define PDP_VID3VCOEFF7_VID3VCOEFF7_LENGTH		(32)
+#define PDP_VID3VCOEFF7_VID3VCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3VCOEFF8_OFFSET		(0x05A0)
+
+/* PDP, VID3VCOEFF8, VID3VCOEFF8
+*/
+#define PDP_VID3VCOEFF8_VID3VCOEFF8_MASK		(0x000000FF)
+#define PDP_VID3VCOEFF8_VID3VCOEFF8_LSBMASK		(0x000000FF)
+#define PDP_VID3VCOEFF8_VID3VCOEFF8_SHIFT		(0)
+#define PDP_VID3VCOEFF8_VID3VCOEFF8_LENGTH		(8)
+#define PDP_VID3VCOEFF8_VID3VCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HSINIT_OFFSET		(0x05A4)
+
+/* PDP, VID3HSINIT, VID3HINITIAL
+*/
+#define PDP_VID3HSINIT_VID3HINITIAL_MASK		(0xFFFF0000)
+#define PDP_VID3HSINIT_VID3HINITIAL_LSBMASK		(0x0000FFFF)
+#define PDP_VID3HSINIT_VID3HINITIAL_SHIFT		(16)
+#define PDP_VID3HSINIT_VID3HINITIAL_LENGTH		(16)
+#define PDP_VID3HSINIT_VID3HINITIAL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3HSINIT, VID3HPITCH
+*/
+#define PDP_VID3HSINIT_VID3HPITCH_MASK		(0x0000FFFF)
+#define PDP_VID3HSINIT_VID3HPITCH_LSBMASK		(0x0000FFFF)
+#define PDP_VID3HSINIT_VID3HPITCH_SHIFT		(0)
+#define PDP_VID3HSINIT_VID3HPITCH_LENGTH		(16)
+#define PDP_VID3HSINIT_VID3HPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF0_OFFSET		(0x05A8)
+
+/* PDP, VID3HCOEFF0, VID3HCOEFF0
+*/
+#define PDP_VID3HCOEFF0_VID3HCOEFF0_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF0_VID3HCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF0_VID3HCOEFF0_SHIFT		(0)
+#define PDP_VID3HCOEFF0_VID3HCOEFF0_LENGTH		(32)
+#define PDP_VID3HCOEFF0_VID3HCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF1_OFFSET		(0x05AC)
+
+/* PDP, VID3HCOEFF1, VID3HCOEFF1
+*/
+#define PDP_VID3HCOEFF1_VID3HCOEFF1_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF1_VID3HCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF1_VID3HCOEFF1_SHIFT		(0)
+#define PDP_VID3HCOEFF1_VID3HCOEFF1_LENGTH		(32)
+#define PDP_VID3HCOEFF1_VID3HCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF2_OFFSET		(0x05B0)
+
+/* PDP, VID3HCOEFF2, VID3HCOEFF2
+*/
+#define PDP_VID3HCOEFF2_VID3HCOEFF2_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF2_VID3HCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF2_VID3HCOEFF2_SHIFT		(0)
+#define PDP_VID3HCOEFF2_VID3HCOEFF2_LENGTH		(32)
+#define PDP_VID3HCOEFF2_VID3HCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF3_OFFSET		(0x05B4)
+
+/* PDP, VID3HCOEFF3, VID3HCOEFF3
+*/
+#define PDP_VID3HCOEFF3_VID3HCOEFF3_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF3_VID3HCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF3_VID3HCOEFF3_SHIFT		(0)
+#define PDP_VID3HCOEFF3_VID3HCOEFF3_LENGTH		(32)
+#define PDP_VID3HCOEFF3_VID3HCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF4_OFFSET		(0x05B8)
+
+/* PDP, VID3HCOEFF4, VID3HCOEFF4
+*/
+#define PDP_VID3HCOEFF4_VID3HCOEFF4_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF4_VID3HCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF4_VID3HCOEFF4_SHIFT		(0)
+#define PDP_VID3HCOEFF4_VID3HCOEFF4_LENGTH		(32)
+#define PDP_VID3HCOEFF4_VID3HCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF5_OFFSET		(0x05BC)
+
+/* PDP, VID3HCOEFF5, VID3HCOEFF5
+*/
+#define PDP_VID3HCOEFF5_VID3HCOEFF5_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF5_VID3HCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF5_VID3HCOEFF5_SHIFT		(0)
+#define PDP_VID3HCOEFF5_VID3HCOEFF5_LENGTH		(32)
+#define PDP_VID3HCOEFF5_VID3HCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF6_OFFSET		(0x05C0)
+
+/* PDP, VID3HCOEFF6, VID3HCOEFF6
+*/
+#define PDP_VID3HCOEFF6_VID3HCOEFF6_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF6_VID3HCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF6_VID3HCOEFF6_SHIFT		(0)
+#define PDP_VID3HCOEFF6_VID3HCOEFF6_LENGTH		(32)
+#define PDP_VID3HCOEFF6_VID3HCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF7_OFFSET		(0x05C4)
+
+/* PDP, VID3HCOEFF7, VID3HCOEFF7
+*/
+#define PDP_VID3HCOEFF7_VID3HCOEFF7_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF7_VID3HCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF7_VID3HCOEFF7_SHIFT		(0)
+#define PDP_VID3HCOEFF7_VID3HCOEFF7_LENGTH		(32)
+#define PDP_VID3HCOEFF7_VID3HCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF8_OFFSET		(0x05C8)
+
+/* PDP, VID3HCOEFF8, VID3HCOEFF8
+*/
+#define PDP_VID3HCOEFF8_VID3HCOEFF8_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF8_VID3HCOEFF8_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF8_VID3HCOEFF8_SHIFT		(0)
+#define PDP_VID3HCOEFF8_VID3HCOEFF8_LENGTH		(32)
+#define PDP_VID3HCOEFF8_VID3HCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF9_OFFSET		(0x05CC)
+
+/* PDP, VID3HCOEFF9, VID3HCOEFF9
+*/
+#define PDP_VID3HCOEFF9_VID3HCOEFF9_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF9_VID3HCOEFF9_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF9_VID3HCOEFF9_SHIFT		(0)
+#define PDP_VID3HCOEFF9_VID3HCOEFF9_LENGTH		(32)
+#define PDP_VID3HCOEFF9_VID3HCOEFF9_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF10_OFFSET		(0x05D0)
+
+/* PDP, VID3HCOEFF10, VID3HCOEFF10
+*/
+#define PDP_VID3HCOEFF10_VID3HCOEFF10_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF10_VID3HCOEFF10_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF10_VID3HCOEFF10_SHIFT		(0)
+#define PDP_VID3HCOEFF10_VID3HCOEFF10_LENGTH		(32)
+#define PDP_VID3HCOEFF10_VID3HCOEFF10_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF11_OFFSET		(0x05D4)
+
+/* PDP, VID3HCOEFF11, VID3HCOEFF11
+*/
+#define PDP_VID3HCOEFF11_VID3HCOEFF11_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF11_VID3HCOEFF11_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF11_VID3HCOEFF11_SHIFT		(0)
+#define PDP_VID3HCOEFF11_VID3HCOEFF11_LENGTH		(32)
+#define PDP_VID3HCOEFF11_VID3HCOEFF11_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF12_OFFSET		(0x05D8)
+
+/* PDP, VID3HCOEFF12, VID3HCOEFF12
+*/
+#define PDP_VID3HCOEFF12_VID3HCOEFF12_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF12_VID3HCOEFF12_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF12_VID3HCOEFF12_SHIFT		(0)
+#define PDP_VID3HCOEFF12_VID3HCOEFF12_LENGTH		(32)
+#define PDP_VID3HCOEFF12_VID3HCOEFF12_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF13_OFFSET		(0x05DC)
+
+/* PDP, VID3HCOEFF13, VID3HCOEFF13
+*/
+#define PDP_VID3HCOEFF13_VID3HCOEFF13_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF13_VID3HCOEFF13_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF13_VID3HCOEFF13_SHIFT		(0)
+#define PDP_VID3HCOEFF13_VID3HCOEFF13_LENGTH		(32)
+#define PDP_VID3HCOEFF13_VID3HCOEFF13_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF14_OFFSET		(0x05E0)
+
+/* PDP, VID3HCOEFF14, VID3HCOEFF14
+*/
+#define PDP_VID3HCOEFF14_VID3HCOEFF14_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF14_VID3HCOEFF14_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF14_VID3HCOEFF14_SHIFT		(0)
+#define PDP_VID3HCOEFF14_VID3HCOEFF14_LENGTH		(32)
+#define PDP_VID3HCOEFF14_VID3HCOEFF14_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF15_OFFSET		(0x05E4)
+
+/* PDP, VID3HCOEFF15, VID3HCOEFF15
+*/
+#define PDP_VID3HCOEFF15_VID3HCOEFF15_MASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF15_VID3HCOEFF15_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID3HCOEFF15_VID3HCOEFF15_SHIFT		(0)
+#define PDP_VID3HCOEFF15_VID3HCOEFF15_LENGTH		(32)
+#define PDP_VID3HCOEFF15_VID3HCOEFF15_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3HCOEFF16_OFFSET		(0x05E8)
+
+/* PDP, VID3HCOEFF16, VID3HCOEFF16
+*/
+#define PDP_VID3HCOEFF16_VID3HCOEFF16_MASK		(0x000000FF)
+#define PDP_VID3HCOEFF16_VID3HCOEFF16_LSBMASK		(0x000000FF)
+#define PDP_VID3HCOEFF16_VID3HCOEFF16_SHIFT		(0)
+#define PDP_VID3HCOEFF16_VID3HCOEFF16_LENGTH		(8)
+#define PDP_VID3HCOEFF16_VID3HCOEFF16_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3SCALESIZE_OFFSET		(0x05EC)
+
+/* PDP, VID3SCALESIZE, VID3SCALEWIDTH
+*/
+#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_MASK		(0x0FFF0000)
+#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_LSBMASK		(0x00000FFF)
+#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_SHIFT		(16)
+#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_LENGTH		(12)
+#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3SCALESIZE, VID3SCALEHEIGHT
+*/
+#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_MASK		(0x00000FFF)
+#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SHIFT		(0)
+#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LENGTH		(12)
+#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4SCALECTRL_OFFSET		(0x05F0)
+
+/* PDP, VID4SCALECTRL, VID4HSCALEBP
+*/
+#define PDP_VID4SCALECTRL_VID4HSCALEBP_MASK		(0x80000000)
+#define PDP_VID4SCALECTRL_VID4HSCALEBP_LSBMASK		(0x00000001)
+#define PDP_VID4SCALECTRL_VID4HSCALEBP_SHIFT		(31)
+#define PDP_VID4SCALECTRL_VID4HSCALEBP_LENGTH		(1)
+#define PDP_VID4SCALECTRL_VID4HSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VSCALEBP
+*/
+#define PDP_VID4SCALECTRL_VID4VSCALEBP_MASK		(0x40000000)
+#define PDP_VID4SCALECTRL_VID4VSCALEBP_LSBMASK		(0x00000001)
+#define PDP_VID4SCALECTRL_VID4VSCALEBP_SHIFT		(30)
+#define PDP_VID4SCALECTRL_VID4VSCALEBP_LENGTH		(1)
+#define PDP_VID4SCALECTRL_VID4VSCALEBP_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4HSBEFOREVS
+*/
+#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_MASK		(0x20000000)
+#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_LSBMASK		(0x00000001)
+#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_SHIFT		(29)
+#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_LENGTH		(1)
+#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VSURUNCTRL
+*/
+#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_MASK		(0x08000000)
+#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_LSBMASK		(0x00000001)
+#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_SHIFT		(27)
+#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_LENGTH		(1)
+#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4PAN_EN
+*/
+#define PDP_VID4SCALECTRL_VID4PAN_EN_MASK		(0x00040000)
+#define PDP_VID4SCALECTRL_VID4PAN_EN_LSBMASK		(0x00000001)
+#define PDP_VID4SCALECTRL_VID4PAN_EN_SHIFT		(18)
+#define PDP_VID4SCALECTRL_VID4PAN_EN_LENGTH		(1)
+#define PDP_VID4SCALECTRL_VID4PAN_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VORDER
+*/
+#define PDP_VID4SCALECTRL_VID4VORDER_MASK		(0x00030000)
+#define PDP_VID4SCALECTRL_VID4VORDER_LSBMASK		(0x00000003)
+#define PDP_VID4SCALECTRL_VID4VORDER_SHIFT		(16)
+#define PDP_VID4SCALECTRL_VID4VORDER_LENGTH		(2)
+#define PDP_VID4SCALECTRL_VID4VORDER_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VPITCH
+*/
+#define PDP_VID4SCALECTRL_VID4VPITCH_MASK		(0x0000FFFF)
+#define PDP_VID4SCALECTRL_VID4VPITCH_LSBMASK		(0x0000FFFF)
+#define PDP_VID4SCALECTRL_VID4VPITCH_SHIFT		(0)
+#define PDP_VID4SCALECTRL_VID4VPITCH_LENGTH		(16)
+#define PDP_VID4SCALECTRL_VID4VPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VSINIT_OFFSET		(0x05F4)
+
+/* PDP, VID4VSINIT, VID4VINITIAL1
+*/
+#define PDP_VID4VSINIT_VID4VINITIAL1_MASK		(0xFFFF0000)
+#define PDP_VID4VSINIT_VID4VINITIAL1_LSBMASK		(0x0000FFFF)
+#define PDP_VID4VSINIT_VID4VINITIAL1_SHIFT		(16)
+#define PDP_VID4VSINIT_VID4VINITIAL1_LENGTH		(16)
+#define PDP_VID4VSINIT_VID4VINITIAL1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4VSINIT, VID4VINITIAL0
+*/
+#define PDP_VID4VSINIT_VID4VINITIAL0_MASK		(0x0000FFFF)
+#define PDP_VID4VSINIT_VID4VINITIAL0_LSBMASK		(0x0000FFFF)
+#define PDP_VID4VSINIT_VID4VINITIAL0_SHIFT		(0)
+#define PDP_VID4VSINIT_VID4VINITIAL0_LENGTH		(16)
+#define PDP_VID4VSINIT_VID4VINITIAL0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VCOEFF0_OFFSET		(0x05F8)
+
+/* PDP, VID4VCOEFF0, VID4VCOEFF0
+*/
+#define PDP_VID4VCOEFF0_VID4VCOEFF0_MASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF0_VID4VCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF0_VID4VCOEFF0_SHIFT		(0)
+#define PDP_VID4VCOEFF0_VID4VCOEFF0_LENGTH		(32)
+#define PDP_VID4VCOEFF0_VID4VCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VCOEFF1_OFFSET		(0x05FC)
+
+/* PDP, VID4VCOEFF1, VID4VCOEFF1
+*/
+#define PDP_VID4VCOEFF1_VID4VCOEFF1_MASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF1_VID4VCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF1_VID4VCOEFF1_SHIFT		(0)
+#define PDP_VID4VCOEFF1_VID4VCOEFF1_LENGTH		(32)
+#define PDP_VID4VCOEFF1_VID4VCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VCOEFF2_OFFSET		(0x0600)
+
+/* PDP, VID4VCOEFF2, VID4VCOEFF2
+*/
+#define PDP_VID4VCOEFF2_VID4VCOEFF2_MASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF2_VID4VCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF2_VID4VCOEFF2_SHIFT		(0)
+#define PDP_VID4VCOEFF2_VID4VCOEFF2_LENGTH		(32)
+#define PDP_VID4VCOEFF2_VID4VCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VCOEFF3_OFFSET		(0x0604)
+
+/* PDP, VID4VCOEFF3, VID4VCOEFF3
+*/
+#define PDP_VID4VCOEFF3_VID4VCOEFF3_MASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF3_VID4VCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF3_VID4VCOEFF3_SHIFT		(0)
+#define PDP_VID4VCOEFF3_VID4VCOEFF3_LENGTH		(32)
+#define PDP_VID4VCOEFF3_VID4VCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VCOEFF4_OFFSET		(0x0608)
+
+/* PDP, VID4VCOEFF4, VID4VCOEFF4
+*/
+#define PDP_VID4VCOEFF4_VID4VCOEFF4_MASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF4_VID4VCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF4_VID4VCOEFF4_SHIFT		(0)
+#define PDP_VID4VCOEFF4_VID4VCOEFF4_LENGTH		(32)
+#define PDP_VID4VCOEFF4_VID4VCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VCOEFF5_OFFSET		(0x060C)
+
+/* PDP, VID4VCOEFF5, VID4VCOEFF5
+*/
+#define PDP_VID4VCOEFF5_VID4VCOEFF5_MASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF5_VID4VCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF5_VID4VCOEFF5_SHIFT		(0)
+#define PDP_VID4VCOEFF5_VID4VCOEFF5_LENGTH		(32)
+#define PDP_VID4VCOEFF5_VID4VCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VCOEFF6_OFFSET		(0x0610)
+
+/* PDP, VID4VCOEFF6, VID4VCOEFF6
+*/
+#define PDP_VID4VCOEFF6_VID4VCOEFF6_MASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF6_VID4VCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF6_VID4VCOEFF6_SHIFT		(0)
+#define PDP_VID4VCOEFF6_VID4VCOEFF6_LENGTH		(32)
+#define PDP_VID4VCOEFF6_VID4VCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VCOEFF7_OFFSET		(0x0614)
+
+/* PDP, VID4VCOEFF7, VID4VCOEFF7
+*/
+#define PDP_VID4VCOEFF7_VID4VCOEFF7_MASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF7_VID4VCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4VCOEFF7_VID4VCOEFF7_SHIFT		(0)
+#define PDP_VID4VCOEFF7_VID4VCOEFF7_LENGTH		(32)
+#define PDP_VID4VCOEFF7_VID4VCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4VCOEFF8_OFFSET		(0x0618)
+
+/* PDP, VID4VCOEFF8, VID4VCOEFF8
+*/
+#define PDP_VID4VCOEFF8_VID4VCOEFF8_MASK		(0x000000FF)
+#define PDP_VID4VCOEFF8_VID4VCOEFF8_LSBMASK		(0x000000FF)
+#define PDP_VID4VCOEFF8_VID4VCOEFF8_SHIFT		(0)
+#define PDP_VID4VCOEFF8_VID4VCOEFF8_LENGTH		(8)
+#define PDP_VID4VCOEFF8_VID4VCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HSINIT_OFFSET		(0x061C)
+
+/* PDP, VID4HSINIT, VID4HINITIAL
+*/
+#define PDP_VID4HSINIT_VID4HINITIAL_MASK		(0xFFFF0000)
+#define PDP_VID4HSINIT_VID4HINITIAL_LSBMASK		(0x0000FFFF)
+#define PDP_VID4HSINIT_VID4HINITIAL_SHIFT		(16)
+#define PDP_VID4HSINIT_VID4HINITIAL_LENGTH		(16)
+#define PDP_VID4HSINIT_VID4HINITIAL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4HSINIT, VID4HPITCH
+*/
+#define PDP_VID4HSINIT_VID4HPITCH_MASK		(0x0000FFFF)
+#define PDP_VID4HSINIT_VID4HPITCH_LSBMASK		(0x0000FFFF)
+#define PDP_VID4HSINIT_VID4HPITCH_SHIFT		(0)
+#define PDP_VID4HSINIT_VID4HPITCH_LENGTH		(16)
+#define PDP_VID4HSINIT_VID4HPITCH_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF0_OFFSET		(0x0620)
+
+/* PDP, VID4HCOEFF0, VID4HCOEFF0
+*/
+#define PDP_VID4HCOEFF0_VID4HCOEFF0_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF0_VID4HCOEFF0_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF0_VID4HCOEFF0_SHIFT		(0)
+#define PDP_VID4HCOEFF0_VID4HCOEFF0_LENGTH		(32)
+#define PDP_VID4HCOEFF0_VID4HCOEFF0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF1_OFFSET		(0x0624)
+
+/* PDP, VID4HCOEFF1, VID4HCOEFF1
+*/
+#define PDP_VID4HCOEFF1_VID4HCOEFF1_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF1_VID4HCOEFF1_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF1_VID4HCOEFF1_SHIFT		(0)
+#define PDP_VID4HCOEFF1_VID4HCOEFF1_LENGTH		(32)
+#define PDP_VID4HCOEFF1_VID4HCOEFF1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF2_OFFSET		(0x0628)
+
+/* PDP, VID4HCOEFF2, VID4HCOEFF2
+*/
+#define PDP_VID4HCOEFF2_VID4HCOEFF2_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF2_VID4HCOEFF2_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF2_VID4HCOEFF2_SHIFT		(0)
+#define PDP_VID4HCOEFF2_VID4HCOEFF2_LENGTH		(32)
+#define PDP_VID4HCOEFF2_VID4HCOEFF2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF3_OFFSET		(0x062C)
+
+/* PDP, VID4HCOEFF3, VID4HCOEFF3
+*/
+#define PDP_VID4HCOEFF3_VID4HCOEFF3_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF3_VID4HCOEFF3_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF3_VID4HCOEFF3_SHIFT		(0)
+#define PDP_VID4HCOEFF3_VID4HCOEFF3_LENGTH		(32)
+#define PDP_VID4HCOEFF3_VID4HCOEFF3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF4_OFFSET		(0x0630)
+
+/* PDP, VID4HCOEFF4, VID4HCOEFF4
+*/
+#define PDP_VID4HCOEFF4_VID4HCOEFF4_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF4_VID4HCOEFF4_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF4_VID4HCOEFF4_SHIFT		(0)
+#define PDP_VID4HCOEFF4_VID4HCOEFF4_LENGTH		(32)
+#define PDP_VID4HCOEFF4_VID4HCOEFF4_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF5_OFFSET		(0x0634)
+
+/* PDP, VID4HCOEFF5, VID4HCOEFF5
+*/
+#define PDP_VID4HCOEFF5_VID4HCOEFF5_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF5_VID4HCOEFF5_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF5_VID4HCOEFF5_SHIFT		(0)
+#define PDP_VID4HCOEFF5_VID4HCOEFF5_LENGTH		(32)
+#define PDP_VID4HCOEFF5_VID4HCOEFF5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF6_OFFSET		(0x0638)
+
+/* PDP, VID4HCOEFF6, VID4HCOEFF6
+*/
+#define PDP_VID4HCOEFF6_VID4HCOEFF6_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF6_VID4HCOEFF6_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF6_VID4HCOEFF6_SHIFT		(0)
+#define PDP_VID4HCOEFF6_VID4HCOEFF6_LENGTH		(32)
+#define PDP_VID4HCOEFF6_VID4HCOEFF6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF7_OFFSET		(0x063C)
+
+/* PDP, VID4HCOEFF7, VID4HCOEFF7
+*/
+#define PDP_VID4HCOEFF7_VID4HCOEFF7_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF7_VID4HCOEFF7_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF7_VID4HCOEFF7_SHIFT		(0)
+#define PDP_VID4HCOEFF7_VID4HCOEFF7_LENGTH		(32)
+#define PDP_VID4HCOEFF7_VID4HCOEFF7_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF8_OFFSET		(0x0640)
+
+/* PDP, VID4HCOEFF8, VID4HCOEFF8
+*/
+#define PDP_VID4HCOEFF8_VID4HCOEFF8_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF8_VID4HCOEFF8_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF8_VID4HCOEFF8_SHIFT		(0)
+#define PDP_VID4HCOEFF8_VID4HCOEFF8_LENGTH		(32)
+#define PDP_VID4HCOEFF8_VID4HCOEFF8_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF9_OFFSET		(0x0644)
+
+/* PDP, VID4HCOEFF9, VID4HCOEFF9
+*/
+#define PDP_VID4HCOEFF9_VID4HCOEFF9_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF9_VID4HCOEFF9_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF9_VID4HCOEFF9_SHIFT		(0)
+#define PDP_VID4HCOEFF9_VID4HCOEFF9_LENGTH		(32)
+#define PDP_VID4HCOEFF9_VID4HCOEFF9_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF10_OFFSET		(0x0648)
+
+/* PDP, VID4HCOEFF10, VID4HCOEFF10
+*/
+#define PDP_VID4HCOEFF10_VID4HCOEFF10_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF10_VID4HCOEFF10_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF10_VID4HCOEFF10_SHIFT		(0)
+#define PDP_VID4HCOEFF10_VID4HCOEFF10_LENGTH		(32)
+#define PDP_VID4HCOEFF10_VID4HCOEFF10_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF11_OFFSET		(0x064C)
+
+/* PDP, VID4HCOEFF11, VID4HCOEFF11
+*/
+#define PDP_VID4HCOEFF11_VID4HCOEFF11_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF11_VID4HCOEFF11_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF11_VID4HCOEFF11_SHIFT		(0)
+#define PDP_VID4HCOEFF11_VID4HCOEFF11_LENGTH		(32)
+#define PDP_VID4HCOEFF11_VID4HCOEFF11_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF12_OFFSET		(0x0650)
+
+/* PDP, VID4HCOEFF12, VID4HCOEFF12
+*/
+#define PDP_VID4HCOEFF12_VID4HCOEFF12_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF12_VID4HCOEFF12_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF12_VID4HCOEFF12_SHIFT		(0)
+#define PDP_VID4HCOEFF12_VID4HCOEFF12_LENGTH		(32)
+#define PDP_VID4HCOEFF12_VID4HCOEFF12_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF13_OFFSET		(0x0654)
+
+/* PDP, VID4HCOEFF13, VID4HCOEFF13
+*/
+#define PDP_VID4HCOEFF13_VID4HCOEFF13_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF13_VID4HCOEFF13_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF13_VID4HCOEFF13_SHIFT		(0)
+#define PDP_VID4HCOEFF13_VID4HCOEFF13_LENGTH		(32)
+#define PDP_VID4HCOEFF13_VID4HCOEFF13_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF14_OFFSET		(0x0658)
+
+/* PDP, VID4HCOEFF14, VID4HCOEFF14
+*/
+#define PDP_VID4HCOEFF14_VID4HCOEFF14_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF14_VID4HCOEFF14_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF14_VID4HCOEFF14_SHIFT		(0)
+#define PDP_VID4HCOEFF14_VID4HCOEFF14_LENGTH		(32)
+#define PDP_VID4HCOEFF14_VID4HCOEFF14_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF15_OFFSET		(0x065C)
+
+/* PDP, VID4HCOEFF15, VID4HCOEFF15
+*/
+#define PDP_VID4HCOEFF15_VID4HCOEFF15_MASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF15_VID4HCOEFF15_LSBMASK		(0xFFFFFFFF)
+#define PDP_VID4HCOEFF15_VID4HCOEFF15_SHIFT		(0)
+#define PDP_VID4HCOEFF15_VID4HCOEFF15_LENGTH		(32)
+#define PDP_VID4HCOEFF15_VID4HCOEFF15_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4HCOEFF16_OFFSET		(0x0660)
+
+/* PDP, VID4HCOEFF16, VID4HCOEFF16
+*/
+#define PDP_VID4HCOEFF16_VID4HCOEFF16_MASK		(0x000000FF)
+#define PDP_VID4HCOEFF16_VID4HCOEFF16_LSBMASK		(0x000000FF)
+#define PDP_VID4HCOEFF16_VID4HCOEFF16_SHIFT		(0)
+#define PDP_VID4HCOEFF16_VID4HCOEFF16_LENGTH		(8)
+#define PDP_VID4HCOEFF16_VID4HCOEFF16_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4SCALESIZE_OFFSET		(0x0664)
+
+/* PDP, VID4SCALESIZE, VID4SCALEWIDTH
+*/
+#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_MASK		(0x0FFF0000)
+#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_LSBMASK		(0x00000FFF)
+#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_SHIFT		(16)
+#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_LENGTH		(12)
+#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4SCALESIZE, VID4SCALEHEIGHT
+*/
+#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_MASK		(0x00000FFF)
+#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LSBMASK		(0x00000FFF)
+#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SHIFT		(0)
+#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LENGTH		(12)
+#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PORTER_BLND0_OFFSET		(0x0668)
+
+/* PDP, PORTER_BLND0, BLND0BLENDTYPE
+*/
+#define PDP_PORTER_BLND0_BLND0BLENDTYPE_MASK		(0x00000010)
+#define PDP_PORTER_BLND0_BLND0BLENDTYPE_LSBMASK		(0x00000001)
+#define PDP_PORTER_BLND0_BLND0BLENDTYPE_SHIFT		(4)
+#define PDP_PORTER_BLND0_BLND0BLENDTYPE_LENGTH		(1)
+#define PDP_PORTER_BLND0_BLND0BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND0, BLND0PORTERMODE
+*/
+#define PDP_PORTER_BLND0_BLND0PORTERMODE_MASK		(0x0000000F)
+#define PDP_PORTER_BLND0_BLND0PORTERMODE_LSBMASK		(0x0000000F)
+#define PDP_PORTER_BLND0_BLND0PORTERMODE_SHIFT		(0)
+#define PDP_PORTER_BLND0_BLND0PORTERMODE_LENGTH		(4)
+#define PDP_PORTER_BLND0_BLND0PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PORTER_BLND1_OFFSET		(0x066C)
+
+/* PDP, PORTER_BLND1, BLND1BLENDTYPE
+*/
+#define PDP_PORTER_BLND1_BLND1BLENDTYPE_MASK		(0x00000010)
+#define PDP_PORTER_BLND1_BLND1BLENDTYPE_LSBMASK		(0x00000001)
+#define PDP_PORTER_BLND1_BLND1BLENDTYPE_SHIFT		(4)
+#define PDP_PORTER_BLND1_BLND1BLENDTYPE_LENGTH		(1)
+#define PDP_PORTER_BLND1_BLND1BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND1, BLND1PORTERMODE
+*/
+#define PDP_PORTER_BLND1_BLND1PORTERMODE_MASK		(0x0000000F)
+#define PDP_PORTER_BLND1_BLND1PORTERMODE_LSBMASK		(0x0000000F)
+#define PDP_PORTER_BLND1_BLND1PORTERMODE_SHIFT		(0)
+#define PDP_PORTER_BLND1_BLND1PORTERMODE_LENGTH		(4)
+#define PDP_PORTER_BLND1_BLND1PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PORTER_BLND2_OFFSET		(0x0670)
+
+/* PDP, PORTER_BLND2, BLND2BLENDTYPE
+*/
+#define PDP_PORTER_BLND2_BLND2BLENDTYPE_MASK		(0x00000010)
+#define PDP_PORTER_BLND2_BLND2BLENDTYPE_LSBMASK		(0x00000001)
+#define PDP_PORTER_BLND2_BLND2BLENDTYPE_SHIFT		(4)
+#define PDP_PORTER_BLND2_BLND2BLENDTYPE_LENGTH		(1)
+#define PDP_PORTER_BLND2_BLND2BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND2, BLND2PORTERMODE
+*/
+#define PDP_PORTER_BLND2_BLND2PORTERMODE_MASK		(0x0000000F)
+#define PDP_PORTER_BLND2_BLND2PORTERMODE_LSBMASK		(0x0000000F)
+#define PDP_PORTER_BLND2_BLND2PORTERMODE_SHIFT		(0)
+#define PDP_PORTER_BLND2_BLND2PORTERMODE_LENGTH		(4)
+#define PDP_PORTER_BLND2_BLND2PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PORTER_BLND3_OFFSET		(0x0674)
+
+/* PDP, PORTER_BLND3, BLND3BLENDTYPE
+*/
+#define PDP_PORTER_BLND3_BLND3BLENDTYPE_MASK		(0x00000010)
+#define PDP_PORTER_BLND3_BLND3BLENDTYPE_LSBMASK		(0x00000001)
+#define PDP_PORTER_BLND3_BLND3BLENDTYPE_SHIFT		(4)
+#define PDP_PORTER_BLND3_BLND3BLENDTYPE_LENGTH		(1)
+#define PDP_PORTER_BLND3_BLND3BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND3, BLND3PORTERMODE
+*/
+#define PDP_PORTER_BLND3_BLND3PORTERMODE_MASK		(0x0000000F)
+#define PDP_PORTER_BLND3_BLND3PORTERMODE_LSBMASK		(0x0000000F)
+#define PDP_PORTER_BLND3_BLND3PORTERMODE_SHIFT		(0)
+#define PDP_PORTER_BLND3_BLND3PORTERMODE_LENGTH		(4)
+#define PDP_PORTER_BLND3_BLND3PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PORTER_BLND4_OFFSET		(0x0678)
+
+/* PDP, PORTER_BLND4, BLND4BLENDTYPE
+*/
+#define PDP_PORTER_BLND4_BLND4BLENDTYPE_MASK		(0x00000010)
+#define PDP_PORTER_BLND4_BLND4BLENDTYPE_LSBMASK		(0x00000001)
+#define PDP_PORTER_BLND4_BLND4BLENDTYPE_SHIFT		(4)
+#define PDP_PORTER_BLND4_BLND4BLENDTYPE_LENGTH		(1)
+#define PDP_PORTER_BLND4_BLND4BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND4, BLND4PORTERMODE
+*/
+#define PDP_PORTER_BLND4_BLND4PORTERMODE_MASK		(0x0000000F)
+#define PDP_PORTER_BLND4_BLND4PORTERMODE_LSBMASK		(0x0000000F)
+#define PDP_PORTER_BLND4_BLND4PORTERMODE_SHIFT		(0)
+#define PDP_PORTER_BLND4_BLND4PORTERMODE_LENGTH		(4)
+#define PDP_PORTER_BLND4_BLND4PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PORTER_BLND5_OFFSET		(0x067C)
+
+/* PDP, PORTER_BLND5, BLND5BLENDTYPE
+*/
+#define PDP_PORTER_BLND5_BLND5BLENDTYPE_MASK		(0x00000010)
+#define PDP_PORTER_BLND5_BLND5BLENDTYPE_LSBMASK		(0x00000001)
+#define PDP_PORTER_BLND5_BLND5BLENDTYPE_SHIFT		(4)
+#define PDP_PORTER_BLND5_BLND5BLENDTYPE_LENGTH		(1)
+#define PDP_PORTER_BLND5_BLND5BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND5, BLND5PORTERMODE
+*/
+#define PDP_PORTER_BLND5_BLND5PORTERMODE_MASK		(0x0000000F)
+#define PDP_PORTER_BLND5_BLND5PORTERMODE_LSBMASK		(0x0000000F)
+#define PDP_PORTER_BLND5_BLND5PORTERMODE_SHIFT		(0)
+#define PDP_PORTER_BLND5_BLND5PORTERMODE_LENGTH		(4)
+#define PDP_PORTER_BLND5_BLND5PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PORTER_BLND6_OFFSET		(0x0680)
+
+/* PDP, PORTER_BLND6, BLND6BLENDTYPE
+*/
+#define PDP_PORTER_BLND6_BLND6BLENDTYPE_MASK		(0x00000010)
+#define PDP_PORTER_BLND6_BLND6BLENDTYPE_LSBMASK		(0x00000001)
+#define PDP_PORTER_BLND6_BLND6BLENDTYPE_SHIFT		(4)
+#define PDP_PORTER_BLND6_BLND6BLENDTYPE_LENGTH		(1)
+#define PDP_PORTER_BLND6_BLND6BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND6, BLND6PORTERMODE
+*/
+#define PDP_PORTER_BLND6_BLND6PORTERMODE_MASK		(0x0000000F)
+#define PDP_PORTER_BLND6_BLND6PORTERMODE_LSBMASK		(0x0000000F)
+#define PDP_PORTER_BLND6_BLND6PORTERMODE_SHIFT		(0)
+#define PDP_PORTER_BLND6_BLND6PORTERMODE_LENGTH		(4)
+#define PDP_PORTER_BLND6_BLND6PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PORTER_BLND7_OFFSET		(0x0684)
+
+/* PDP, PORTER_BLND7, BLND7BLENDTYPE
+*/
+#define PDP_PORTER_BLND7_BLND7BLENDTYPE_MASK		(0x00000010)
+#define PDP_PORTER_BLND7_BLND7BLENDTYPE_LSBMASK		(0x00000001)
+#define PDP_PORTER_BLND7_BLND7BLENDTYPE_SHIFT		(4)
+#define PDP_PORTER_BLND7_BLND7BLENDTYPE_LENGTH		(1)
+#define PDP_PORTER_BLND7_BLND7BLENDTYPE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PORTER_BLND7, BLND7PORTERMODE
+*/
+#define PDP_PORTER_BLND7_BLND7PORTERMODE_MASK		(0x0000000F)
+#define PDP_PORTER_BLND7_BLND7PORTERMODE_LSBMASK		(0x0000000F)
+#define PDP_PORTER_BLND7_BLND7PORTERMODE_SHIFT		(0)
+#define PDP_PORTER_BLND7_BLND7PORTERMODE_LENGTH		(4)
+#define PDP_PORTER_BLND7_BLND7PORTERMODE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET		(0x06C8)
+
+/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_TRANS
+*/
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_MASK		(0x03FF0000)
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LSBMASK		(0x000003FF)
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SHIFT		(16)
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LENGTH		(10)
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_OPAQUE
+*/
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_MASK		(0x000003FF)
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LSBMASK		(0x000003FF)
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SHIFT		(0)
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LENGTH		(10)
+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_OFFSET		(0x06CC)
+
+/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMAX
+*/
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_MASK		(0x03FF0000)
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LSBMASK		(0x000003FF)
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SHIFT		(16)
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LENGTH		(10)
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMIN
+*/
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_MASK		(0x000003FF)
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LSBMASK		(0x000003FF)
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SHIFT		(0)
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LENGTH		(10)
+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1LUMAKEY_C_RG_OFFSET		(0x06D0)
+
+/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_R
+*/
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_MASK		(0x0FFF0000)
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LSBMASK		(0x00000FFF)
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SHIFT		(16)
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LENGTH		(12)
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_G
+*/
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_MASK		(0x00000FFF)
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LSBMASK		(0x00000FFF)
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SHIFT		(0)
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LENGTH		(12)
+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1LUMAKEY_C_B_OFFSET		(0x06D4)
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYALPHAMULT
+*/
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_MASK		(0x20000000)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LSBMASK		(0x00000001)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SHIFT		(29)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LENGTH		(1)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYEN
+*/
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_MASK		(0x10000000)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LSBMASK		(0x00000001)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SHIFT		(28)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LENGTH		(1)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYOUTOFF
+*/
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_MASK		(0x03FF0000)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LSBMASK		(0x000003FF)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SHIFT		(16)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LENGTH		(10)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYC_B
+*/
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_MASK		(0x00000FFF)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LSBMASK		(0x00000FFF)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SHIFT		(0)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LENGTH		(12)
+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET		(0x06D8)
+
+/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_TRANS
+*/
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_MASK		(0x03FF0000)
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LSBMASK		(0x000003FF)
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SHIFT		(16)
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LENGTH		(10)
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_OPAQUE
+*/
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_MASK		(0x000003FF)
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LSBMASK		(0x000003FF)
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SHIFT		(0)
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LENGTH		(10)
+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_OFFSET		(0x06DC)
+
+/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMAX
+*/
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_MASK		(0x03FF0000)
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LSBMASK		(0x000003FF)
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SHIFT		(16)
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LENGTH		(10)
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMIN
+*/
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_MASK		(0x000003FF)
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LSBMASK		(0x000003FF)
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SHIFT		(0)
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LENGTH		(10)
+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2LUMAKEY_C_RG_OFFSET		(0x06E0)
+
+/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_R
+*/
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_MASK		(0x0FFF0000)
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LSBMASK		(0x00000FFF)
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SHIFT		(16)
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LENGTH		(12)
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_G
+*/
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_MASK		(0x00000FFF)
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LSBMASK		(0x00000FFF)
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SHIFT		(0)
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LENGTH		(12)
+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2LUMAKEY_C_B_OFFSET		(0x06E4)
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYALPHAMULT
+*/
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_MASK		(0x20000000)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LSBMASK		(0x00000001)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SHIFT		(29)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LENGTH		(1)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYEN
+*/
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_MASK		(0x10000000)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LSBMASK		(0x00000001)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SHIFT		(28)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LENGTH		(1)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYOUTOFF
+*/
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_MASK		(0x03FF0000)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LSBMASK		(0x000003FF)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SHIFT		(16)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LENGTH		(10)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYC_B
+*/
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_MASK		(0x00000FFF)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LSBMASK		(0x00000FFF)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SHIFT		(0)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LENGTH		(12)
+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET		(0x06E8)
+
+/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_TRANS
+*/
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_MASK		(0x03FF0000)
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LSBMASK		(0x000003FF)
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SHIFT		(16)
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LENGTH		(10)
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_OPAQUE
+*/
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_MASK		(0x000003FF)
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LSBMASK		(0x000003FF)
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SHIFT		(0)
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LENGTH		(10)
+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_OFFSET		(0x06EC)
+
+/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMAX
+*/
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_MASK		(0x03FF0000)
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LSBMASK		(0x000003FF)
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SHIFT		(16)
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LENGTH		(10)
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMIN
+*/
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_MASK		(0x000003FF)
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LSBMASK		(0x000003FF)
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SHIFT		(0)
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LENGTH		(10)
+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3LUMAKEY_C_RG_OFFSET		(0x06F0)
+
+/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_R
+*/
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_MASK		(0x0FFF0000)
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LSBMASK		(0x00000FFF)
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SHIFT		(16)
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LENGTH		(12)
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_G
+*/
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_MASK		(0x00000FFF)
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LSBMASK		(0x00000FFF)
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SHIFT		(0)
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LENGTH		(12)
+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3LUMAKEY_C_B_OFFSET		(0x06F4)
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYALPHAMULT
+*/
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_MASK		(0x20000000)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LSBMASK		(0x00000001)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SHIFT		(29)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LENGTH		(1)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYEN
+*/
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_MASK		(0x10000000)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LSBMASK		(0x00000001)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SHIFT		(28)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LENGTH		(1)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYOUTOFF
+*/
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_MASK		(0x03FF0000)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LSBMASK		(0x000003FF)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SHIFT		(16)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LENGTH		(10)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYC_B
+*/
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_MASK		(0x00000FFF)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LSBMASK		(0x00000FFF)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SHIFT		(0)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LENGTH		(12)
+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET		(0x06F8)
+
+/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_TRANS
+*/
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_MASK		(0x03FF0000)
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LSBMASK		(0x000003FF)
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SHIFT		(16)
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LENGTH		(10)
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_OPAQUE
+*/
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_MASK		(0x000003FF)
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LSBMASK		(0x000003FF)
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SHIFT		(0)
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LENGTH		(10)
+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_OFFSET		(0x06FC)
+
+/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMAX
+*/
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_MASK		(0x03FF0000)
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LSBMASK		(0x000003FF)
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SHIFT		(16)
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LENGTH		(10)
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMIN
+*/
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_MASK		(0x000003FF)
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LSBMASK		(0x000003FF)
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SHIFT		(0)
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LENGTH		(10)
+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4LUMAKEY_C_RG_OFFSET		(0x0700)
+
+/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_R
+*/
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_MASK		(0x0FFF0000)
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LSBMASK		(0x00000FFF)
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SHIFT		(16)
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LENGTH		(12)
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_G
+*/
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_MASK		(0x00000FFF)
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LSBMASK		(0x00000FFF)
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SHIFT		(0)
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LENGTH		(12)
+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4LUMAKEY_C_B_OFFSET		(0x0704)
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYALPHAMULT
+*/
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_MASK		(0x20000000)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LSBMASK		(0x00000001)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SHIFT		(29)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LENGTH		(1)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYEN
+*/
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_MASK		(0x10000000)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LSBMASK		(0x00000001)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SHIFT		(28)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LENGTH		(1)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYOUTOFF
+*/
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_MASK		(0x03FF0000)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LSBMASK		(0x000003FF)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SHIFT		(16)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LENGTH		(10)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYC_B
+*/
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_MASK		(0x00000FFF)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LSBMASK		(0x00000FFF)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SHIFT		(0)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LENGTH		(12)
+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CSCCOEFF0_OFFSET		(0x0708)
+
+/* PDP, CSCCOEFF0, CSCCOEFFRU
+*/
+#define PDP_CSCCOEFF0_CSCCOEFFRU_MASK		(0x003FF800)
+#define PDP_CSCCOEFF0_CSCCOEFFRU_LSBMASK		(0x000007FF)
+#define PDP_CSCCOEFF0_CSCCOEFFRU_SHIFT		(11)
+#define PDP_CSCCOEFF0_CSCCOEFFRU_LENGTH		(11)
+#define PDP_CSCCOEFF0_CSCCOEFFRU_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CSCCOEFF0, CSCCOEFFRY
+*/
+#define PDP_CSCCOEFF0_CSCCOEFFRY_MASK		(0x000007FF)
+#define PDP_CSCCOEFF0_CSCCOEFFRY_LSBMASK		(0x000007FF)
+#define PDP_CSCCOEFF0_CSCCOEFFRY_SHIFT		(0)
+#define PDP_CSCCOEFF0_CSCCOEFFRY_LENGTH		(11)
+#define PDP_CSCCOEFF0_CSCCOEFFRY_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CSCCOEFF1_OFFSET		(0x070C)
+
+/* PDP, CSCCOEFF1, CSCCOEFFGY
+*/
+#define PDP_CSCCOEFF1_CSCCOEFFGY_MASK		(0x003FF800)
+#define PDP_CSCCOEFF1_CSCCOEFFGY_LSBMASK		(0x000007FF)
+#define PDP_CSCCOEFF1_CSCCOEFFGY_SHIFT		(11)
+#define PDP_CSCCOEFF1_CSCCOEFFGY_LENGTH		(11)
+#define PDP_CSCCOEFF1_CSCCOEFFGY_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CSCCOEFF1, CSCCOEFFRV
+*/
+#define PDP_CSCCOEFF1_CSCCOEFFRV_MASK		(0x000007FF)
+#define PDP_CSCCOEFF1_CSCCOEFFRV_LSBMASK		(0x000007FF)
+#define PDP_CSCCOEFF1_CSCCOEFFRV_SHIFT		(0)
+#define PDP_CSCCOEFF1_CSCCOEFFRV_LENGTH		(11)
+#define PDP_CSCCOEFF1_CSCCOEFFRV_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CSCCOEFF2_OFFSET		(0x0710)
+
+/* PDP, CSCCOEFF2, CSCCOEFFGV
+*/
+#define PDP_CSCCOEFF2_CSCCOEFFGV_MASK		(0x003FF800)
+#define PDP_CSCCOEFF2_CSCCOEFFGV_LSBMASK		(0x000007FF)
+#define PDP_CSCCOEFF2_CSCCOEFFGV_SHIFT		(11)
+#define PDP_CSCCOEFF2_CSCCOEFFGV_LENGTH		(11)
+#define PDP_CSCCOEFF2_CSCCOEFFGV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CSCCOEFF2, CSCCOEFFGU
+*/
+#define PDP_CSCCOEFF2_CSCCOEFFGU_MASK		(0x000007FF)
+#define PDP_CSCCOEFF2_CSCCOEFFGU_LSBMASK		(0x000007FF)
+#define PDP_CSCCOEFF2_CSCCOEFFGU_SHIFT		(0)
+#define PDP_CSCCOEFF2_CSCCOEFFGU_LENGTH		(11)
+#define PDP_CSCCOEFF2_CSCCOEFFGU_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CSCCOEFF3_OFFSET		(0x0714)
+
+/* PDP, CSCCOEFF3, CSCCOEFFBU
+*/
+#define PDP_CSCCOEFF3_CSCCOEFFBU_MASK		(0x003FF800)
+#define PDP_CSCCOEFF3_CSCCOEFFBU_LSBMASK		(0x000007FF)
+#define PDP_CSCCOEFF3_CSCCOEFFBU_SHIFT		(11)
+#define PDP_CSCCOEFF3_CSCCOEFFBU_LENGTH		(11)
+#define PDP_CSCCOEFF3_CSCCOEFFBU_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CSCCOEFF3, CSCCOEFFBY
+*/
+#define PDP_CSCCOEFF3_CSCCOEFFBY_MASK		(0x000007FF)
+#define PDP_CSCCOEFF3_CSCCOEFFBY_LSBMASK		(0x000007FF)
+#define PDP_CSCCOEFF3_CSCCOEFFBY_SHIFT		(0)
+#define PDP_CSCCOEFF3_CSCCOEFFBY_LENGTH		(11)
+#define PDP_CSCCOEFF3_CSCCOEFFBY_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CSCCOEFF4_OFFSET		(0x0718)
+
+/* PDP, CSCCOEFF4, CSCCOEFFBV
+*/
+#define PDP_CSCCOEFF4_CSCCOEFFBV_MASK		(0x000007FF)
+#define PDP_CSCCOEFF4_CSCCOEFFBV_LSBMASK		(0x000007FF)
+#define PDP_CSCCOEFF4_CSCCOEFFBV_SHIFT		(0)
+#define PDP_CSCCOEFF4_CSCCOEFFBV_LENGTH		(11)
+#define PDP_CSCCOEFF4_CSCCOEFFBV_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BGNDCOL_AR_OFFSET		(0x071C)
+
+/* PDP, BGNDCOL_AR, BGNDCOL_A
+*/
+#define PDP_BGNDCOL_AR_BGNDCOL_A_MASK		(0x03FF0000)
+#define PDP_BGNDCOL_AR_BGNDCOL_A_LSBMASK		(0x000003FF)
+#define PDP_BGNDCOL_AR_BGNDCOL_A_SHIFT		(16)
+#define PDP_BGNDCOL_AR_BGNDCOL_A_LENGTH		(10)
+#define PDP_BGNDCOL_AR_BGNDCOL_A_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, BGNDCOL_AR, BGNDCOL_R
+*/
+#define PDP_BGNDCOL_AR_BGNDCOL_R_MASK		(0x000003FF)
+#define PDP_BGNDCOL_AR_BGNDCOL_R_LSBMASK		(0x000003FF)
+#define PDP_BGNDCOL_AR_BGNDCOL_R_SHIFT		(0)
+#define PDP_BGNDCOL_AR_BGNDCOL_R_LENGTH		(10)
+#define PDP_BGNDCOL_AR_BGNDCOL_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BGNDCOL_GB_OFFSET		(0x0720)
+
+/* PDP, BGNDCOL_GB, BGNDCOL_G
+*/
+#define PDP_BGNDCOL_GB_BGNDCOL_G_MASK		(0x03FF0000)
+#define PDP_BGNDCOL_GB_BGNDCOL_G_LSBMASK		(0x000003FF)
+#define PDP_BGNDCOL_GB_BGNDCOL_G_SHIFT		(16)
+#define PDP_BGNDCOL_GB_BGNDCOL_G_LENGTH		(10)
+#define PDP_BGNDCOL_GB_BGNDCOL_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, BGNDCOL_GB, BGNDCOL_B
+*/
+#define PDP_BGNDCOL_GB_BGNDCOL_B_MASK		(0x000003FF)
+#define PDP_BGNDCOL_GB_BGNDCOL_B_LSBMASK		(0x000003FF)
+#define PDP_BGNDCOL_GB_BGNDCOL_B_SHIFT		(0)
+#define PDP_BGNDCOL_GB_BGNDCOL_B_LENGTH		(10)
+#define PDP_BGNDCOL_GB_BGNDCOL_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BORDCOL_R_OFFSET		(0x0724)
+
+/* PDP, BORDCOL_R, BORDCOL_R
+*/
+#define PDP_BORDCOL_R_BORDCOL_R_MASK		(0x000003FF)
+#define PDP_BORDCOL_R_BORDCOL_R_LSBMASK		(0x000003FF)
+#define PDP_BORDCOL_R_BORDCOL_R_SHIFT		(0)
+#define PDP_BORDCOL_R_BORDCOL_R_LENGTH		(10)
+#define PDP_BORDCOL_R_BORDCOL_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BORDCOL_GB_OFFSET		(0x0728)
+
+/* PDP, BORDCOL_GB, BORDCOL_G
+*/
+#define PDP_BORDCOL_GB_BORDCOL_G_MASK		(0x03FF0000)
+#define PDP_BORDCOL_GB_BORDCOL_G_LSBMASK		(0x000003FF)
+#define PDP_BORDCOL_GB_BORDCOL_G_SHIFT		(16)
+#define PDP_BORDCOL_GB_BORDCOL_G_LENGTH		(10)
+#define PDP_BORDCOL_GB_BORDCOL_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, BORDCOL_GB, BORDCOL_B
+*/
+#define PDP_BORDCOL_GB_BORDCOL_B_MASK		(0x000003FF)
+#define PDP_BORDCOL_GB_BORDCOL_B_LSBMASK		(0x000003FF)
+#define PDP_BORDCOL_GB_BORDCOL_B_SHIFT		(0)
+#define PDP_BORDCOL_GB_BORDCOL_B_LENGTH		(10)
+#define PDP_BORDCOL_GB_BORDCOL_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_LINESTAT_OFFSET		(0x0734)
+
+/* PDP, LINESTAT, LINENO
+*/
+#define PDP_LINESTAT_LINENO_MASK		(0x00001FFF)
+#define PDP_LINESTAT_LINENO_LSBMASK		(0x00001FFF)
+#define PDP_LINESTAT_LINENO_SHIFT		(0)
+#define PDP_LINESTAT_LINENO_LENGTH		(13)
+#define PDP_LINESTAT_LINENO_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CR_PDP_PROCAMP_C11C12_OFFSET		(0x0738)
+
+/* PDP, CR_PDP_PROCAMP_C11C12, CR_PROCAMP_C12
+*/
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_MASK		(0x3FFF0000)
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LSBMASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SHIFT		(16)
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LENGTH		(14)
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_PDP_PROCAMP_C11C12, CR_PROCAMP_C11
+*/
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_MASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LSBMASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SHIFT		(0)
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LENGTH		(14)
+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CR_PDP_PROCAMP_C13C21_OFFSET		(0x073C)
+
+/* PDP, CR_PDP_PROCAMP_C13C21, CR_PROCAMP_C21
+*/
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_MASK		(0x3FFF0000)
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LSBMASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SHIFT		(16)
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LENGTH		(14)
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_PDP_PROCAMP_C13C21, CR_PROCAMP_C13
+*/
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_MASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LSBMASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SHIFT		(0)
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LENGTH		(14)
+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CR_PDP_PROCAMP_C22C23_OFFSET		(0x0740)
+
+/* PDP, CR_PDP_PROCAMP_C22C23, CR_PROCAMP_C23
+*/
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_MASK		(0x3FFF0000)
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LSBMASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SHIFT		(16)
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LENGTH		(14)
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_PDP_PROCAMP_C22C23, CR_PROCAMP_C22
+*/
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_MASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LSBMASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SHIFT		(0)
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LENGTH		(14)
+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CR_PDP_PROCAMP_C31C32_OFFSET		(0x0744)
+
+/* PDP, CR_PDP_PROCAMP_C31C32, CR_PROCAMP_C32
+*/
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_MASK		(0x3FFF0000)
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LSBMASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SHIFT		(16)
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LENGTH		(14)
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_PDP_PROCAMP_C31C32, CR_PROCAMP_C31
+*/
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_MASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LSBMASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SHIFT		(0)
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LENGTH		(14)
+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CR_PDP_PROCAMP_C33_OFFSET		(0x0748)
+
+/* PDP, CR_PDP_PROCAMP_C33, CR_PROCAMP_C33
+*/
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_MASK		(0x3FFF0000)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_LSBMASK		(0x00003FFF)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_SHIFT		(16)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_LENGTH		(14)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_PDP_PROCAMP_C33, CR_PROCAMP_RANGE
+*/
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_MASK		(0x00000030)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LSBMASK		(0x00000003)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SHIFT		(4)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LENGTH		(2)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_PDP_PROCAMP_C33, CR_PROCAMP_EN
+*/
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_MASK		(0x00000001)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_LSBMASK		(0x00000001)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_SHIFT		(0)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_LENGTH		(1)
+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_OFFSET		(0x074C)
+
+/* PDP, CR_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_G
+*/
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_MASK		(0x0FFF0000)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LSBMASK		(0x00000FFF)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SHIFT		(16)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LENGTH		(12)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_B
+*/
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_MASK		(0x00000FFF)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LSBMASK		(0x00000FFF)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SHIFT		(0)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LENGTH		(12)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_OFFSET		(0x0750)
+
+/* PDP, CR_PDP_PROCAMP_OUTOFFSET_R, CR_PROCAMP_OUTOFF_R
+*/
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_MASK		(0x00000FFF)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LSBMASK		(0x00000FFF)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SHIFT		(0)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LENGTH		(12)
+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_OFFSET		(0x0754)
+
+/* PDP, CR_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_G
+*/
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_MASK		(0x03FF0000)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LSBMASK		(0x000003FF)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SHIFT		(16)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LENGTH		(10)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, CR_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_B
+*/
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_MASK		(0x000003FF)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LSBMASK		(0x000003FF)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SHIFT		(0)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LENGTH		(10)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_CR_PDP_PROCAMP_INOFFSET_R_OFFSET		(0x0758)
+
+/* PDP, CR_PDP_PROCAMP_INOFFSET_R, CR_PROCAMP_INOFF_R
+*/
+#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_MASK		(0x000003FF)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LSBMASK		(0x000003FF)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SHIFT		(0)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LENGTH		(10)
+#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_SIGNAT_R_OFFSET		(0x075C)
+
+/* PDP, SIGNAT_R, SIGNATURE_R
+*/
+#define PDP_SIGNAT_R_SIGNATURE_R_MASK		(0x000003FF)
+#define PDP_SIGNAT_R_SIGNATURE_R_LSBMASK		(0x000003FF)
+#define PDP_SIGNAT_R_SIGNATURE_R_SHIFT		(0)
+#define PDP_SIGNAT_R_SIGNATURE_R_LENGTH		(10)
+#define PDP_SIGNAT_R_SIGNATURE_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_SIGNAT_GB_OFFSET		(0x0760)
+
+/* PDP, SIGNAT_GB, SIGNATURE_G
+*/
+#define PDP_SIGNAT_GB_SIGNATURE_G_MASK		(0x03FF0000)
+#define PDP_SIGNAT_GB_SIGNATURE_G_LSBMASK		(0x000003FF)
+#define PDP_SIGNAT_GB_SIGNATURE_G_SHIFT		(16)
+#define PDP_SIGNAT_GB_SIGNATURE_G_LENGTH		(10)
+#define PDP_SIGNAT_GB_SIGNATURE_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SIGNAT_GB, SIGNATURE_B
+*/
+#define PDP_SIGNAT_GB_SIGNATURE_B_MASK		(0x000003FF)
+#define PDP_SIGNAT_GB_SIGNATURE_B_LSBMASK		(0x000003FF)
+#define PDP_SIGNAT_GB_SIGNATURE_B_SHIFT		(0)
+#define PDP_SIGNAT_GB_SIGNATURE_B_LENGTH		(10)
+#define PDP_SIGNAT_GB_SIGNATURE_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_REGISTER_UPDATE_CTRL_OFFSET		(0x0764)
+
+/* PDP, REGISTER_UPDATE_CTRL, BYPASS_DOUBLE_BUFFERING
+*/
+#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_MASK		(0x00000004)
+#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LSBMASK		(0x00000001)
+#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SHIFT		(2)
+#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LENGTH		(1)
+#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, REGISTER_UPDATE_CTRL, REGISTERS_VALID
+*/
+#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_MASK		(0x00000002)
+#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LSBMASK		(0x00000001)
+#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT		(1)
+#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LENGTH		(1)
+#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, REGISTER_UPDATE_CTRL, USE_VBLANK
+*/
+#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_MASK		(0x00000001)
+#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LSBMASK		(0x00000001)
+#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SHIFT		(0)
+#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LENGTH		(1)
+#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_REGISTER_UPDATE_STATUS_OFFSET		(0x0768)
+
+/* PDP, REGISTER_UPDATE_STATUS, REGISTERS_UPDATED
+*/
+#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_MASK		(0x00000002)
+#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LSBMASK		(0x00000001)
+#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SHIFT		(1)
+#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LENGTH		(1)
+#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DBGCTRL_OFFSET		(0x076C)
+
+/* PDP, DBGCTRL, DBG_READ
+*/
+#define PDP_DBGCTRL_DBG_READ_MASK		(0x00000002)
+#define PDP_DBGCTRL_DBG_READ_LSBMASK		(0x00000001)
+#define PDP_DBGCTRL_DBG_READ_SHIFT		(1)
+#define PDP_DBGCTRL_DBG_READ_LENGTH		(1)
+#define PDP_DBGCTRL_DBG_READ_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DBGCTRL, DBG_ENAB
+*/
+#define PDP_DBGCTRL_DBG_ENAB_MASK		(0x00000001)
+#define PDP_DBGCTRL_DBG_ENAB_LSBMASK		(0x00000001)
+#define PDP_DBGCTRL_DBG_ENAB_SHIFT		(0)
+#define PDP_DBGCTRL_DBG_ENAB_LENGTH		(1)
+#define PDP_DBGCTRL_DBG_ENAB_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DBGDATA_R_OFFSET		(0x0770)
+
+/* PDP, DBGDATA_R, DBG_DATA_R
+*/
+#define PDP_DBGDATA_R_DBG_DATA_R_MASK		(0x000003FF)
+#define PDP_DBGDATA_R_DBG_DATA_R_LSBMASK		(0x000003FF)
+#define PDP_DBGDATA_R_DBG_DATA_R_SHIFT		(0)
+#define PDP_DBGDATA_R_DBG_DATA_R_LENGTH		(10)
+#define PDP_DBGDATA_R_DBG_DATA_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DBGDATA_GB_OFFSET		(0x0774)
+
+/* PDP, DBGDATA_GB, DBG_DATA_G
+*/
+#define PDP_DBGDATA_GB_DBG_DATA_G_MASK		(0x03FF0000)
+#define PDP_DBGDATA_GB_DBG_DATA_G_LSBMASK		(0x000003FF)
+#define PDP_DBGDATA_GB_DBG_DATA_G_SHIFT		(16)
+#define PDP_DBGDATA_GB_DBG_DATA_G_LENGTH		(10)
+#define PDP_DBGDATA_GB_DBG_DATA_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DBGDATA_GB, DBG_DATA_B
+*/
+#define PDP_DBGDATA_GB_DBG_DATA_B_MASK		(0x000003FF)
+#define PDP_DBGDATA_GB_DBG_DATA_B_LSBMASK		(0x000003FF)
+#define PDP_DBGDATA_GB_DBG_DATA_B_SHIFT		(0)
+#define PDP_DBGDATA_GB_DBG_DATA_B_LENGTH		(10)
+#define PDP_DBGDATA_GB_DBG_DATA_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DBGSIDE_OFFSET		(0x0778)
+
+/* PDP, DBGSIDE, DBG_VAL
+*/
+#define PDP_DBGSIDE_DBG_VAL_MASK		(0x00000008)
+#define PDP_DBGSIDE_DBG_VAL_LSBMASK		(0x00000001)
+#define PDP_DBGSIDE_DBG_VAL_SHIFT		(3)
+#define PDP_DBGSIDE_DBG_VAL_LENGTH		(1)
+#define PDP_DBGSIDE_DBG_VAL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DBGSIDE, DBG_SIDE
+*/
+#define PDP_DBGSIDE_DBG_SIDE_MASK		(0x00000007)
+#define PDP_DBGSIDE_DBG_SIDE_LSBMASK		(0x00000007)
+#define PDP_DBGSIDE_DBG_SIDE_SHIFT		(0)
+#define PDP_DBGSIDE_DBG_SIDE_LENGTH		(3)
+#define PDP_DBGSIDE_DBG_SIDE_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_OUTPUT_OFFSET		(0x077C)
+
+/* PDP, OUTPUT, EIGHT_BIT_OUTPUT
+*/
+#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_MASK		(0x00000002)
+#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_LSBMASK		(0x00000001)
+#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_SHIFT		(1)
+#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_LENGTH		(1)
+#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, OUTPUT, OUTPUT_CONFIG
+*/
+#define PDP_OUTPUT_OUTPUT_CONFIG_MASK		(0x00000001)
+#define PDP_OUTPUT_OUTPUT_CONFIG_LSBMASK		(0x00000001)
+#define PDP_OUTPUT_OUTPUT_CONFIG_SHIFT		(0)
+#define PDP_OUTPUT_OUTPUT_CONFIG_LENGTH		(1)
+#define PDP_OUTPUT_OUTPUT_CONFIG_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_SYNCCTRL_OFFSET		(0x0780)
+
+/* PDP, SYNCCTRL, SYNCACTIVE
+*/
+#define PDP_SYNCCTRL_SYNCACTIVE_MASK		(0x80000000)
+#define PDP_SYNCCTRL_SYNCACTIVE_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_SYNCACTIVE_SHIFT		(31)
+#define PDP_SYNCCTRL_SYNCACTIVE_LENGTH		(1)
+#define PDP_SYNCCTRL_SYNCACTIVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, PDP_RST
+*/
+#define PDP_SYNCCTRL_PDP_RST_MASK		(0x20000000)
+#define PDP_SYNCCTRL_PDP_RST_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_PDP_RST_SHIFT		(29)
+#define PDP_SYNCCTRL_PDP_RST_LENGTH		(1)
+#define PDP_SYNCCTRL_PDP_RST_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, POWERDN
+*/
+#define PDP_SYNCCTRL_POWERDN_MASK		(0x10000000)
+#define PDP_SYNCCTRL_POWERDN_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_POWERDN_SHIFT		(28)
+#define PDP_SYNCCTRL_POWERDN_LENGTH		(1)
+#define PDP_SYNCCTRL_POWERDN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, LOWPWRMODE
+*/
+#define PDP_SYNCCTRL_LOWPWRMODE_MASK		(0x08000000)
+#define PDP_SYNCCTRL_LOWPWRMODE_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_LOWPWRMODE_SHIFT		(27)
+#define PDP_SYNCCTRL_LOWPWRMODE_LENGTH		(1)
+#define PDP_SYNCCTRL_LOWPWRMODE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDSYNCTRL
+*/
+#define PDP_SYNCCTRL_UPDSYNCTRL_MASK		(0x04000000)
+#define PDP_SYNCCTRL_UPDSYNCTRL_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_UPDSYNCTRL_SHIFT		(26)
+#define PDP_SYNCCTRL_UPDSYNCTRL_LENGTH		(1)
+#define PDP_SYNCCTRL_UPDSYNCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDINTCTRL
+*/
+#define PDP_SYNCCTRL_UPDINTCTRL_MASK		(0x02000000)
+#define PDP_SYNCCTRL_UPDINTCTRL_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_UPDINTCTRL_SHIFT		(25)
+#define PDP_SYNCCTRL_UPDINTCTRL_LENGTH		(1)
+#define PDP_SYNCCTRL_UPDINTCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDCTRL
+*/
+#define PDP_SYNCCTRL_UPDCTRL_MASK		(0x01000000)
+#define PDP_SYNCCTRL_UPDCTRL_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_UPDCTRL_SHIFT		(24)
+#define PDP_SYNCCTRL_UPDCTRL_LENGTH		(1)
+#define PDP_SYNCCTRL_UPDCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDWAIT
+*/
+#define PDP_SYNCCTRL_UPDWAIT_MASK		(0x000F0000)
+#define PDP_SYNCCTRL_UPDWAIT_LSBMASK		(0x0000000F)
+#define PDP_SYNCCTRL_UPDWAIT_SHIFT		(16)
+#define PDP_SYNCCTRL_UPDWAIT_LENGTH		(4)
+#define PDP_SYNCCTRL_UPDWAIT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, FIELD_EN
+*/
+#define PDP_SYNCCTRL_FIELD_EN_MASK		(0x00002000)
+#define PDP_SYNCCTRL_FIELD_EN_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_FIELD_EN_SHIFT		(13)
+#define PDP_SYNCCTRL_FIELD_EN_LENGTH		(1)
+#define PDP_SYNCCTRL_FIELD_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, CSYNC_EN
+*/
+#define PDP_SYNCCTRL_CSYNC_EN_MASK		(0x00001000)
+#define PDP_SYNCCTRL_CSYNC_EN_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_CSYNC_EN_SHIFT		(12)
+#define PDP_SYNCCTRL_CSYNC_EN_LENGTH		(1)
+#define PDP_SYNCCTRL_CSYNC_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, CLKPOL
+*/
+#define PDP_SYNCCTRL_CLKPOL_MASK		(0x00000800)
+#define PDP_SYNCCTRL_CLKPOL_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_CLKPOL_SHIFT		(11)
+#define PDP_SYNCCTRL_CLKPOL_LENGTH		(1)
+#define PDP_SYNCCTRL_CLKPOL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, VS_SLAVE
+*/
+#define PDP_SYNCCTRL_VS_SLAVE_MASK		(0x00000080)
+#define PDP_SYNCCTRL_VS_SLAVE_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_VS_SLAVE_SHIFT		(7)
+#define PDP_SYNCCTRL_VS_SLAVE_LENGTH		(1)
+#define PDP_SYNCCTRL_VS_SLAVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, HS_SLAVE
+*/
+#define PDP_SYNCCTRL_HS_SLAVE_MASK		(0x00000040)
+#define PDP_SYNCCTRL_HS_SLAVE_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_HS_SLAVE_SHIFT		(6)
+#define PDP_SYNCCTRL_HS_SLAVE_LENGTH		(1)
+#define PDP_SYNCCTRL_HS_SLAVE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, BLNKPOL
+*/
+#define PDP_SYNCCTRL_BLNKPOL_MASK		(0x00000020)
+#define PDP_SYNCCTRL_BLNKPOL_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_BLNKPOL_SHIFT		(5)
+#define PDP_SYNCCTRL_BLNKPOL_LENGTH		(1)
+#define PDP_SYNCCTRL_BLNKPOL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, BLNKDIS
+*/
+#define PDP_SYNCCTRL_BLNKDIS_MASK		(0x00000010)
+#define PDP_SYNCCTRL_BLNKDIS_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_BLNKDIS_SHIFT		(4)
+#define PDP_SYNCCTRL_BLNKDIS_LENGTH		(1)
+#define PDP_SYNCCTRL_BLNKDIS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, VSPOL
+*/
+#define PDP_SYNCCTRL_VSPOL_MASK		(0x00000008)
+#define PDP_SYNCCTRL_VSPOL_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_VSPOL_SHIFT		(3)
+#define PDP_SYNCCTRL_VSPOL_LENGTH		(1)
+#define PDP_SYNCCTRL_VSPOL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, VSDIS
+*/
+#define PDP_SYNCCTRL_VSDIS_MASK		(0x00000004)
+#define PDP_SYNCCTRL_VSDIS_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_VSDIS_SHIFT		(2)
+#define PDP_SYNCCTRL_VSDIS_LENGTH		(1)
+#define PDP_SYNCCTRL_VSDIS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, HSPOL
+*/
+#define PDP_SYNCCTRL_HSPOL_MASK		(0x00000002)
+#define PDP_SYNCCTRL_HSPOL_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_HSPOL_SHIFT		(1)
+#define PDP_SYNCCTRL_HSPOL_LENGTH		(1)
+#define PDP_SYNCCTRL_HSPOL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, SYNCCTRL, HSDIS
+*/
+#define PDP_SYNCCTRL_HSDIS_MASK		(0x00000001)
+#define PDP_SYNCCTRL_HSDIS_LSBMASK		(0x00000001)
+#define PDP_SYNCCTRL_HSDIS_SHIFT		(0)
+#define PDP_SYNCCTRL_HSDIS_LENGTH		(1)
+#define PDP_SYNCCTRL_HSDIS_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_HSYNC1_OFFSET		(0x0784)
+
+/* PDP, HSYNC1, HBPS
+*/
+#define PDP_HSYNC1_HBPS_MASK		(0x1FFF0000)
+#define PDP_HSYNC1_HBPS_LSBMASK		(0x00001FFF)
+#define PDP_HSYNC1_HBPS_SHIFT		(16)
+#define PDP_HSYNC1_HBPS_LENGTH		(13)
+#define PDP_HSYNC1_HBPS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, HSYNC1, HT
+*/
+#define PDP_HSYNC1_HT_MASK		(0x00001FFF)
+#define PDP_HSYNC1_HT_LSBMASK		(0x00001FFF)
+#define PDP_HSYNC1_HT_SHIFT		(0)
+#define PDP_HSYNC1_HT_LENGTH		(13)
+#define PDP_HSYNC1_HT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_HSYNC2_OFFSET		(0x0788)
+
+/* PDP, HSYNC2, HAS
+*/
+#define PDP_HSYNC2_HAS_MASK		(0x1FFF0000)
+#define PDP_HSYNC2_HAS_LSBMASK		(0x00001FFF)
+#define PDP_HSYNC2_HAS_SHIFT		(16)
+#define PDP_HSYNC2_HAS_LENGTH		(13)
+#define PDP_HSYNC2_HAS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, HSYNC2, HLBS
+*/
+#define PDP_HSYNC2_HLBS_MASK		(0x00001FFF)
+#define PDP_HSYNC2_HLBS_LSBMASK		(0x00001FFF)
+#define PDP_HSYNC2_HLBS_SHIFT		(0)
+#define PDP_HSYNC2_HLBS_LENGTH		(13)
+#define PDP_HSYNC2_HLBS_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_HSYNC3_OFFSET		(0x078C)
+
+/* PDP, HSYNC3, HFPS
+*/
+#define PDP_HSYNC3_HFPS_MASK		(0x1FFF0000)
+#define PDP_HSYNC3_HFPS_LSBMASK		(0x00001FFF)
+#define PDP_HSYNC3_HFPS_SHIFT		(16)
+#define PDP_HSYNC3_HFPS_LENGTH		(13)
+#define PDP_HSYNC3_HFPS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, HSYNC3, HRBS
+*/
+#define PDP_HSYNC3_HRBS_MASK		(0x00001FFF)
+#define PDP_HSYNC3_HRBS_LSBMASK		(0x00001FFF)
+#define PDP_HSYNC3_HRBS_SHIFT		(0)
+#define PDP_HSYNC3_HRBS_LENGTH		(13)
+#define PDP_HSYNC3_HRBS_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VSYNC1_OFFSET		(0x0790)
+
+/* PDP, VSYNC1, VBPS
+*/
+#define PDP_VSYNC1_VBPS_MASK		(0x1FFF0000)
+#define PDP_VSYNC1_VBPS_LSBMASK		(0x00001FFF)
+#define PDP_VSYNC1_VBPS_SHIFT		(16)
+#define PDP_VSYNC1_VBPS_LENGTH		(13)
+#define PDP_VSYNC1_VBPS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VSYNC1, VT
+*/
+#define PDP_VSYNC1_VT_MASK		(0x00001FFF)
+#define PDP_VSYNC1_VT_LSBMASK		(0x00001FFF)
+#define PDP_VSYNC1_VT_SHIFT		(0)
+#define PDP_VSYNC1_VT_LENGTH		(13)
+#define PDP_VSYNC1_VT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VSYNC2_OFFSET		(0x0794)
+
+/* PDP, VSYNC2, VAS
+*/
+#define PDP_VSYNC2_VAS_MASK		(0x1FFF0000)
+#define PDP_VSYNC2_VAS_LSBMASK		(0x00001FFF)
+#define PDP_VSYNC2_VAS_SHIFT		(16)
+#define PDP_VSYNC2_VAS_LENGTH		(13)
+#define PDP_VSYNC2_VAS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VSYNC2, VTBS
+*/
+#define PDP_VSYNC2_VTBS_MASK		(0x00001FFF)
+#define PDP_VSYNC2_VTBS_LSBMASK		(0x00001FFF)
+#define PDP_VSYNC2_VTBS_SHIFT		(0)
+#define PDP_VSYNC2_VTBS_LENGTH		(13)
+#define PDP_VSYNC2_VTBS_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VSYNC3_OFFSET		(0x0798)
+
+/* PDP, VSYNC3, VFPS
+*/
+#define PDP_VSYNC3_VFPS_MASK		(0x1FFF0000)
+#define PDP_VSYNC3_VFPS_LSBMASK		(0x00001FFF)
+#define PDP_VSYNC3_VFPS_SHIFT		(16)
+#define PDP_VSYNC3_VFPS_LENGTH		(13)
+#define PDP_VSYNC3_VFPS_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VSYNC3, VBBS
+*/
+#define PDP_VSYNC3_VBBS_MASK		(0x00001FFF)
+#define PDP_VSYNC3_VBBS_LSBMASK		(0x00001FFF)
+#define PDP_VSYNC3_VBBS_SHIFT		(0)
+#define PDP_VSYNC3_VBBS_LENGTH		(13)
+#define PDP_VSYNC3_VBBS_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_INTSTAT_OFFSET		(0x079C)
+
+/* PDP, INTSTAT, INTS_VID4ORUN
+*/
+#define PDP_INTSTAT_INTS_VID4ORUN_MASK		(0x00080000)
+#define PDP_INTSTAT_INTS_VID4ORUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VID4ORUN_SHIFT		(19)
+#define PDP_INTSTAT_INTS_VID4ORUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VID4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID3ORUN
+*/
+#define PDP_INTSTAT_INTS_VID3ORUN_MASK		(0x00040000)
+#define PDP_INTSTAT_INTS_VID3ORUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VID3ORUN_SHIFT		(18)
+#define PDP_INTSTAT_INTS_VID3ORUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VID3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID2ORUN
+*/
+#define PDP_INTSTAT_INTS_VID2ORUN_MASK		(0x00020000)
+#define PDP_INTSTAT_INTS_VID2ORUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VID2ORUN_SHIFT		(17)
+#define PDP_INTSTAT_INTS_VID2ORUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VID2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID1ORUN
+*/
+#define PDP_INTSTAT_INTS_VID1ORUN_MASK		(0x00010000)
+#define PDP_INTSTAT_INTS_VID1ORUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VID1ORUN_SHIFT		(16)
+#define PDP_INTSTAT_INTS_VID1ORUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VID1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH4ORUN
+*/
+#define PDP_INTSTAT_INTS_GRPH4ORUN_MASK		(0x00008000)
+#define PDP_INTSTAT_INTS_GRPH4ORUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_GRPH4ORUN_SHIFT		(15)
+#define PDP_INTSTAT_INTS_GRPH4ORUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_GRPH4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH3ORUN
+*/
+#define PDP_INTSTAT_INTS_GRPH3ORUN_MASK		(0x00004000)
+#define PDP_INTSTAT_INTS_GRPH3ORUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_GRPH3ORUN_SHIFT		(14)
+#define PDP_INTSTAT_INTS_GRPH3ORUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_GRPH3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH2ORUN
+*/
+#define PDP_INTSTAT_INTS_GRPH2ORUN_MASK		(0x00002000)
+#define PDP_INTSTAT_INTS_GRPH2ORUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_GRPH2ORUN_SHIFT		(13)
+#define PDP_INTSTAT_INTS_GRPH2ORUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_GRPH2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH1ORUN
+*/
+#define PDP_INTSTAT_INTS_GRPH1ORUN_MASK		(0x00001000)
+#define PDP_INTSTAT_INTS_GRPH1ORUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_GRPH1ORUN_SHIFT		(12)
+#define PDP_INTSTAT_INTS_GRPH1ORUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_GRPH1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID4URUN
+*/
+#define PDP_INTSTAT_INTS_VID4URUN_MASK		(0x00000800)
+#define PDP_INTSTAT_INTS_VID4URUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VID4URUN_SHIFT		(11)
+#define PDP_INTSTAT_INTS_VID4URUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VID4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID3URUN
+*/
+#define PDP_INTSTAT_INTS_VID3URUN_MASK		(0x00000400)
+#define PDP_INTSTAT_INTS_VID3URUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VID3URUN_SHIFT		(10)
+#define PDP_INTSTAT_INTS_VID3URUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VID3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID2URUN
+*/
+#define PDP_INTSTAT_INTS_VID2URUN_MASK		(0x00000200)
+#define PDP_INTSTAT_INTS_VID2URUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VID2URUN_SHIFT		(9)
+#define PDP_INTSTAT_INTS_VID2URUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VID2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID1URUN
+*/
+#define PDP_INTSTAT_INTS_VID1URUN_MASK		(0x00000100)
+#define PDP_INTSTAT_INTS_VID1URUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VID1URUN_SHIFT		(8)
+#define PDP_INTSTAT_INTS_VID1URUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VID1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH4URUN
+*/
+#define PDP_INTSTAT_INTS_GRPH4URUN_MASK		(0x00000080)
+#define PDP_INTSTAT_INTS_GRPH4URUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_GRPH4URUN_SHIFT		(7)
+#define PDP_INTSTAT_INTS_GRPH4URUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_GRPH4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH3URUN
+*/
+#define PDP_INTSTAT_INTS_GRPH3URUN_MASK		(0x00000040)
+#define PDP_INTSTAT_INTS_GRPH3URUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_GRPH3URUN_SHIFT		(6)
+#define PDP_INTSTAT_INTS_GRPH3URUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_GRPH3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH2URUN
+*/
+#define PDP_INTSTAT_INTS_GRPH2URUN_MASK		(0x00000020)
+#define PDP_INTSTAT_INTS_GRPH2URUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_GRPH2URUN_SHIFT		(5)
+#define PDP_INTSTAT_INTS_GRPH2URUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_GRPH2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH1URUN
+*/
+#define PDP_INTSTAT_INTS_GRPH1URUN_MASK		(0x00000010)
+#define PDP_INTSTAT_INTS_GRPH1URUN_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_GRPH1URUN_SHIFT		(4)
+#define PDP_INTSTAT_INTS_GRPH1URUN_LENGTH		(1)
+#define PDP_INTSTAT_INTS_GRPH1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VBLNK1
+*/
+#define PDP_INTSTAT_INTS_VBLNK1_MASK		(0x00000008)
+#define PDP_INTSTAT_INTS_VBLNK1_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VBLNK1_SHIFT		(3)
+#define PDP_INTSTAT_INTS_VBLNK1_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VBLNK0
+*/
+#define PDP_INTSTAT_INTS_VBLNK0_MASK		(0x00000004)
+#define PDP_INTSTAT_INTS_VBLNK0_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_VBLNK0_SHIFT		(2)
+#define PDP_INTSTAT_INTS_VBLNK0_LENGTH		(1)
+#define PDP_INTSTAT_INTS_VBLNK0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_HBLNK1
+*/
+#define PDP_INTSTAT_INTS_HBLNK1_MASK		(0x00000002)
+#define PDP_INTSTAT_INTS_HBLNK1_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_HBLNK1_SHIFT		(1)
+#define PDP_INTSTAT_INTS_HBLNK1_LENGTH		(1)
+#define PDP_INTSTAT_INTS_HBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTSTAT, INTS_HBLNK0
+*/
+#define PDP_INTSTAT_INTS_HBLNK0_MASK		(0x00000001)
+#define PDP_INTSTAT_INTS_HBLNK0_LSBMASK		(0x00000001)
+#define PDP_INTSTAT_INTS_HBLNK0_SHIFT		(0)
+#define PDP_INTSTAT_INTS_HBLNK0_LENGTH		(1)
+#define PDP_INTSTAT_INTS_HBLNK0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_INTENAB_OFFSET		(0x07A0)
+
+/* PDP, INTENAB, INTEN_VID4ORUN
+*/
+#define PDP_INTENAB_INTEN_VID4ORUN_MASK		(0x00080000)
+#define PDP_INTENAB_INTEN_VID4ORUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VID4ORUN_SHIFT		(19)
+#define PDP_INTENAB_INTEN_VID4ORUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VID4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID3ORUN
+*/
+#define PDP_INTENAB_INTEN_VID3ORUN_MASK		(0x00040000)
+#define PDP_INTENAB_INTEN_VID3ORUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VID3ORUN_SHIFT		(18)
+#define PDP_INTENAB_INTEN_VID3ORUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VID3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID2ORUN
+*/
+#define PDP_INTENAB_INTEN_VID2ORUN_MASK		(0x00020000)
+#define PDP_INTENAB_INTEN_VID2ORUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VID2ORUN_SHIFT		(17)
+#define PDP_INTENAB_INTEN_VID2ORUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VID2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID1ORUN
+*/
+#define PDP_INTENAB_INTEN_VID1ORUN_MASK		(0x00010000)
+#define PDP_INTENAB_INTEN_VID1ORUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VID1ORUN_SHIFT		(16)
+#define PDP_INTENAB_INTEN_VID1ORUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VID1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH4ORUN
+*/
+#define PDP_INTENAB_INTEN_GRPH4ORUN_MASK		(0x00008000)
+#define PDP_INTENAB_INTEN_GRPH4ORUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_GRPH4ORUN_SHIFT		(15)
+#define PDP_INTENAB_INTEN_GRPH4ORUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_GRPH4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH3ORUN
+*/
+#define PDP_INTENAB_INTEN_GRPH3ORUN_MASK		(0x00004000)
+#define PDP_INTENAB_INTEN_GRPH3ORUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_GRPH3ORUN_SHIFT		(14)
+#define PDP_INTENAB_INTEN_GRPH3ORUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_GRPH3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH2ORUN
+*/
+#define PDP_INTENAB_INTEN_GRPH2ORUN_MASK		(0x00002000)
+#define PDP_INTENAB_INTEN_GRPH2ORUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_GRPH2ORUN_SHIFT		(13)
+#define PDP_INTENAB_INTEN_GRPH2ORUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_GRPH2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH1ORUN
+*/
+#define PDP_INTENAB_INTEN_GRPH1ORUN_MASK		(0x00001000)
+#define PDP_INTENAB_INTEN_GRPH1ORUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_GRPH1ORUN_SHIFT		(12)
+#define PDP_INTENAB_INTEN_GRPH1ORUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_GRPH1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID4URUN
+*/
+#define PDP_INTENAB_INTEN_VID4URUN_MASK		(0x00000800)
+#define PDP_INTENAB_INTEN_VID4URUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VID4URUN_SHIFT		(11)
+#define PDP_INTENAB_INTEN_VID4URUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VID4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID3URUN
+*/
+#define PDP_INTENAB_INTEN_VID3URUN_MASK		(0x00000400)
+#define PDP_INTENAB_INTEN_VID3URUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VID3URUN_SHIFT		(10)
+#define PDP_INTENAB_INTEN_VID3URUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VID3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID2URUN
+*/
+#define PDP_INTENAB_INTEN_VID2URUN_MASK		(0x00000200)
+#define PDP_INTENAB_INTEN_VID2URUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VID2URUN_SHIFT		(9)
+#define PDP_INTENAB_INTEN_VID2URUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VID2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID1URUN
+*/
+#define PDP_INTENAB_INTEN_VID1URUN_MASK		(0x00000100)
+#define PDP_INTENAB_INTEN_VID1URUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VID1URUN_SHIFT		(8)
+#define PDP_INTENAB_INTEN_VID1URUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VID1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH4URUN
+*/
+#define PDP_INTENAB_INTEN_GRPH4URUN_MASK		(0x00000080)
+#define PDP_INTENAB_INTEN_GRPH4URUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_GRPH4URUN_SHIFT		(7)
+#define PDP_INTENAB_INTEN_GRPH4URUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_GRPH4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH3URUN
+*/
+#define PDP_INTENAB_INTEN_GRPH3URUN_MASK		(0x00000040)
+#define PDP_INTENAB_INTEN_GRPH3URUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_GRPH3URUN_SHIFT		(6)
+#define PDP_INTENAB_INTEN_GRPH3URUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_GRPH3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH2URUN
+*/
+#define PDP_INTENAB_INTEN_GRPH2URUN_MASK		(0x00000020)
+#define PDP_INTENAB_INTEN_GRPH2URUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_GRPH2URUN_SHIFT		(5)
+#define PDP_INTENAB_INTEN_GRPH2URUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_GRPH2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH1URUN
+*/
+#define PDP_INTENAB_INTEN_GRPH1URUN_MASK		(0x00000010)
+#define PDP_INTENAB_INTEN_GRPH1URUN_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_GRPH1URUN_SHIFT		(4)
+#define PDP_INTENAB_INTEN_GRPH1URUN_LENGTH		(1)
+#define PDP_INTENAB_INTEN_GRPH1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VBLNK1
+*/
+#define PDP_INTENAB_INTEN_VBLNK1_MASK		(0x00000008)
+#define PDP_INTENAB_INTEN_VBLNK1_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VBLNK1_SHIFT		(3)
+#define PDP_INTENAB_INTEN_VBLNK1_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VBLNK0
+*/
+#define PDP_INTENAB_INTEN_VBLNK0_MASK		(0x00000004)
+#define PDP_INTENAB_INTEN_VBLNK0_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_VBLNK0_SHIFT		(2)
+#define PDP_INTENAB_INTEN_VBLNK0_LENGTH		(1)
+#define PDP_INTENAB_INTEN_VBLNK0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_HBLNK1
+*/
+#define PDP_INTENAB_INTEN_HBLNK1_MASK		(0x00000002)
+#define PDP_INTENAB_INTEN_HBLNK1_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_HBLNK1_SHIFT		(1)
+#define PDP_INTENAB_INTEN_HBLNK1_LENGTH		(1)
+#define PDP_INTENAB_INTEN_HBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTENAB, INTEN_HBLNK0
+*/
+#define PDP_INTENAB_INTEN_HBLNK0_MASK		(0x00000001)
+#define PDP_INTENAB_INTEN_HBLNK0_LSBMASK		(0x00000001)
+#define PDP_INTENAB_INTEN_HBLNK0_SHIFT		(0)
+#define PDP_INTENAB_INTEN_HBLNK0_LENGTH		(1)
+#define PDP_INTENAB_INTEN_HBLNK0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_INTCLR_OFFSET		(0x07A4)
+
+/* PDP, INTCLR, INTCLR_VID4ORUN
+*/
+#define PDP_INTCLR_INTCLR_VID4ORUN_MASK		(0x00080000)
+#define PDP_INTCLR_INTCLR_VID4ORUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VID4ORUN_SHIFT		(19)
+#define PDP_INTCLR_INTCLR_VID4ORUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VID4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID3ORUN
+*/
+#define PDP_INTCLR_INTCLR_VID3ORUN_MASK		(0x00040000)
+#define PDP_INTCLR_INTCLR_VID3ORUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VID3ORUN_SHIFT		(18)
+#define PDP_INTCLR_INTCLR_VID3ORUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VID3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID2ORUN
+*/
+#define PDP_INTCLR_INTCLR_VID2ORUN_MASK		(0x00020000)
+#define PDP_INTCLR_INTCLR_VID2ORUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VID2ORUN_SHIFT		(17)
+#define PDP_INTCLR_INTCLR_VID2ORUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VID2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID1ORUN
+*/
+#define PDP_INTCLR_INTCLR_VID1ORUN_MASK		(0x00010000)
+#define PDP_INTCLR_INTCLR_VID1ORUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VID1ORUN_SHIFT		(16)
+#define PDP_INTCLR_INTCLR_VID1ORUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VID1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH4ORUN
+*/
+#define PDP_INTCLR_INTCLR_GRPH4ORUN_MASK		(0x00008000)
+#define PDP_INTCLR_INTCLR_GRPH4ORUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_GRPH4ORUN_SHIFT		(15)
+#define PDP_INTCLR_INTCLR_GRPH4ORUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_GRPH4ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH3ORUN
+*/
+#define PDP_INTCLR_INTCLR_GRPH3ORUN_MASK		(0x00004000)
+#define PDP_INTCLR_INTCLR_GRPH3ORUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_GRPH3ORUN_SHIFT		(14)
+#define PDP_INTCLR_INTCLR_GRPH3ORUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_GRPH3ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH2ORUN
+*/
+#define PDP_INTCLR_INTCLR_GRPH2ORUN_MASK		(0x00002000)
+#define PDP_INTCLR_INTCLR_GRPH2ORUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_GRPH2ORUN_SHIFT		(13)
+#define PDP_INTCLR_INTCLR_GRPH2ORUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_GRPH2ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH1ORUN
+*/
+#define PDP_INTCLR_INTCLR_GRPH1ORUN_MASK		(0x00001000)
+#define PDP_INTCLR_INTCLR_GRPH1ORUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_GRPH1ORUN_SHIFT		(12)
+#define PDP_INTCLR_INTCLR_GRPH1ORUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_GRPH1ORUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID4URUN
+*/
+#define PDP_INTCLR_INTCLR_VID4URUN_MASK		(0x00000800)
+#define PDP_INTCLR_INTCLR_VID4URUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VID4URUN_SHIFT		(11)
+#define PDP_INTCLR_INTCLR_VID4URUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VID4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID3URUN
+*/
+#define PDP_INTCLR_INTCLR_VID3URUN_MASK		(0x00000400)
+#define PDP_INTCLR_INTCLR_VID3URUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VID3URUN_SHIFT		(10)
+#define PDP_INTCLR_INTCLR_VID3URUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VID3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID2URUN
+*/
+#define PDP_INTCLR_INTCLR_VID2URUN_MASK		(0x00000200)
+#define PDP_INTCLR_INTCLR_VID2URUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VID2URUN_SHIFT		(9)
+#define PDP_INTCLR_INTCLR_VID2URUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VID2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID1URUN
+*/
+#define PDP_INTCLR_INTCLR_VID1URUN_MASK		(0x00000100)
+#define PDP_INTCLR_INTCLR_VID1URUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VID1URUN_SHIFT		(8)
+#define PDP_INTCLR_INTCLR_VID1URUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VID1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH4URUN
+*/
+#define PDP_INTCLR_INTCLR_GRPH4URUN_MASK		(0x00000080)
+#define PDP_INTCLR_INTCLR_GRPH4URUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_GRPH4URUN_SHIFT		(7)
+#define PDP_INTCLR_INTCLR_GRPH4URUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_GRPH4URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH3URUN
+*/
+#define PDP_INTCLR_INTCLR_GRPH3URUN_MASK		(0x00000040)
+#define PDP_INTCLR_INTCLR_GRPH3URUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_GRPH3URUN_SHIFT		(6)
+#define PDP_INTCLR_INTCLR_GRPH3URUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_GRPH3URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH2URUN
+*/
+#define PDP_INTCLR_INTCLR_GRPH2URUN_MASK		(0x00000020)
+#define PDP_INTCLR_INTCLR_GRPH2URUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_GRPH2URUN_SHIFT		(5)
+#define PDP_INTCLR_INTCLR_GRPH2URUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_GRPH2URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH1URUN
+*/
+#define PDP_INTCLR_INTCLR_GRPH1URUN_MASK		(0x00000010)
+#define PDP_INTCLR_INTCLR_GRPH1URUN_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_GRPH1URUN_SHIFT		(4)
+#define PDP_INTCLR_INTCLR_GRPH1URUN_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_GRPH1URUN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VBLNK1
+*/
+#define PDP_INTCLR_INTCLR_VBLNK1_MASK		(0x00000008)
+#define PDP_INTCLR_INTCLR_VBLNK1_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VBLNK1_SHIFT		(3)
+#define PDP_INTCLR_INTCLR_VBLNK1_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VBLNK0
+*/
+#define PDP_INTCLR_INTCLR_VBLNK0_MASK		(0x00000004)
+#define PDP_INTCLR_INTCLR_VBLNK0_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_VBLNK0_SHIFT		(2)
+#define PDP_INTCLR_INTCLR_VBLNK0_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_VBLNK0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_HBLNK1
+*/
+#define PDP_INTCLR_INTCLR_HBLNK1_MASK		(0x00000002)
+#define PDP_INTCLR_INTCLR_HBLNK1_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_HBLNK1_SHIFT		(1)
+#define PDP_INTCLR_INTCLR_HBLNK1_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_HBLNK1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_HBLNK0
+*/
+#define PDP_INTCLR_INTCLR_HBLNK0_MASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_HBLNK0_LSBMASK		(0x00000001)
+#define PDP_INTCLR_INTCLR_HBLNK0_SHIFT		(0)
+#define PDP_INTCLR_INTCLR_HBLNK0_LENGTH		(1)
+#define PDP_INTCLR_INTCLR_HBLNK0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_MEMCTRL_OFFSET		(0x07A8)
+
+/* PDP, MEMCTRL, MEMREFRESH
+*/
+#define PDP_MEMCTRL_MEMREFRESH_MASK		(0xC0000000)
+#define PDP_MEMCTRL_MEMREFRESH_LSBMASK		(0x00000003)
+#define PDP_MEMCTRL_MEMREFRESH_SHIFT		(30)
+#define PDP_MEMCTRL_MEMREFRESH_LENGTH		(2)
+#define PDP_MEMCTRL_MEMREFRESH_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, MEMCTRL, BURSTLEN
+*/
+#define PDP_MEMCTRL_BURSTLEN_MASK		(0x000000FF)
+#define PDP_MEMCTRL_BURSTLEN_LSBMASK		(0x000000FF)
+#define PDP_MEMCTRL_BURSTLEN_SHIFT		(0)
+#define PDP_MEMCTRL_BURSTLEN_LENGTH		(8)
+#define PDP_MEMCTRL_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_MEM_THRESH_OFFSET		(0x07AC)
+
+/* PDP, MEM_THRESH, UVTHRESHOLD
+*/
+#define PDP_MEM_THRESH_UVTHRESHOLD_MASK		(0xFF000000)
+#define PDP_MEM_THRESH_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define PDP_MEM_THRESH_UVTHRESHOLD_SHIFT		(24)
+#define PDP_MEM_THRESH_UVTHRESHOLD_LENGTH		(8)
+#define PDP_MEM_THRESH_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, MEM_THRESH, YTHRESHOLD
+*/
+#define PDP_MEM_THRESH_YTHRESHOLD_MASK		(0x001FF000)
+#define PDP_MEM_THRESH_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_MEM_THRESH_YTHRESHOLD_SHIFT		(12)
+#define PDP_MEM_THRESH_YTHRESHOLD_LENGTH		(9)
+#define PDP_MEM_THRESH_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, MEM_THRESH, THRESHOLD
+*/
+#define PDP_MEM_THRESH_THRESHOLD_MASK		(0x000001FF)
+#define PDP_MEM_THRESH_THRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_MEM_THRESH_THRESHOLD_SHIFT		(0)
+#define PDP_MEM_THRESH_THRESHOLD_LENGTH		(9)
+#define PDP_MEM_THRESH_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_ALTERNATE_3D_CTRL_OFFSET		(0x07B0)
+
+/* PDP, ALTERNATE_3D_CTRL, ALT3D_ON
+*/
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_MASK		(0x00000010)
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LSBMASK		(0x00000001)
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SHIFT		(4)
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LENGTH		(1)
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, ALTERNATE_3D_CTRL, ALT3D_BLENDSEL
+*/
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_MASK		(0x00000007)
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LSBMASK		(0x00000007)
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SHIFT		(0)
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LENGTH		(3)
+#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA0_R_OFFSET		(0x07B4)
+
+/* PDP, GAMMA0_R, GAMMA0_R
+*/
+#define PDP_GAMMA0_R_GAMMA0_R_MASK		(0x000003FF)
+#define PDP_GAMMA0_R_GAMMA0_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA0_R_GAMMA0_R_SHIFT		(0)
+#define PDP_GAMMA0_R_GAMMA0_R_LENGTH		(10)
+#define PDP_GAMMA0_R_GAMMA0_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA0_GB_OFFSET		(0x07B8)
+
+/* PDP, GAMMA0_GB, GAMMA0_G
+*/
+#define PDP_GAMMA0_GB_GAMMA0_G_MASK		(0x03FF0000)
+#define PDP_GAMMA0_GB_GAMMA0_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA0_GB_GAMMA0_G_SHIFT		(16)
+#define PDP_GAMMA0_GB_GAMMA0_G_LENGTH		(10)
+#define PDP_GAMMA0_GB_GAMMA0_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA0_GB, GAMMA0_B
+*/
+#define PDP_GAMMA0_GB_GAMMA0_B_MASK		(0x000003FF)
+#define PDP_GAMMA0_GB_GAMMA0_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA0_GB_GAMMA0_B_SHIFT		(0)
+#define PDP_GAMMA0_GB_GAMMA0_B_LENGTH		(10)
+#define PDP_GAMMA0_GB_GAMMA0_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA1_R_OFFSET		(0x07BC)
+
+/* PDP, GAMMA1_R, GAMMA1_R
+*/
+#define PDP_GAMMA1_R_GAMMA1_R_MASK		(0x000003FF)
+#define PDP_GAMMA1_R_GAMMA1_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA1_R_GAMMA1_R_SHIFT		(0)
+#define PDP_GAMMA1_R_GAMMA1_R_LENGTH		(10)
+#define PDP_GAMMA1_R_GAMMA1_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA1_GB_OFFSET		(0x07C0)
+
+/* PDP, GAMMA1_GB, GAMMA1_G
+*/
+#define PDP_GAMMA1_GB_GAMMA1_G_MASK		(0x03FF0000)
+#define PDP_GAMMA1_GB_GAMMA1_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA1_GB_GAMMA1_G_SHIFT		(16)
+#define PDP_GAMMA1_GB_GAMMA1_G_LENGTH		(10)
+#define PDP_GAMMA1_GB_GAMMA1_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA1_GB, GAMMA1_B
+*/
+#define PDP_GAMMA1_GB_GAMMA1_B_MASK		(0x000003FF)
+#define PDP_GAMMA1_GB_GAMMA1_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA1_GB_GAMMA1_B_SHIFT		(0)
+#define PDP_GAMMA1_GB_GAMMA1_B_LENGTH		(10)
+#define PDP_GAMMA1_GB_GAMMA1_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA2_R_OFFSET		(0x07C4)
+
+/* PDP, GAMMA2_R, GAMMA2_R
+*/
+#define PDP_GAMMA2_R_GAMMA2_R_MASK		(0x000003FF)
+#define PDP_GAMMA2_R_GAMMA2_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA2_R_GAMMA2_R_SHIFT		(0)
+#define PDP_GAMMA2_R_GAMMA2_R_LENGTH		(10)
+#define PDP_GAMMA2_R_GAMMA2_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA2_GB_OFFSET		(0x07C8)
+
+/* PDP, GAMMA2_GB, GAMMA2_G
+*/
+#define PDP_GAMMA2_GB_GAMMA2_G_MASK		(0x03FF0000)
+#define PDP_GAMMA2_GB_GAMMA2_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA2_GB_GAMMA2_G_SHIFT		(16)
+#define PDP_GAMMA2_GB_GAMMA2_G_LENGTH		(10)
+#define PDP_GAMMA2_GB_GAMMA2_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA2_GB, GAMMA2_B
+*/
+#define PDP_GAMMA2_GB_GAMMA2_B_MASK		(0x000003FF)
+#define PDP_GAMMA2_GB_GAMMA2_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA2_GB_GAMMA2_B_SHIFT		(0)
+#define PDP_GAMMA2_GB_GAMMA2_B_LENGTH		(10)
+#define PDP_GAMMA2_GB_GAMMA2_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA3_R_OFFSET		(0x07CC)
+
+/* PDP, GAMMA3_R, GAMMA3_R
+*/
+#define PDP_GAMMA3_R_GAMMA3_R_MASK		(0x000003FF)
+#define PDP_GAMMA3_R_GAMMA3_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA3_R_GAMMA3_R_SHIFT		(0)
+#define PDP_GAMMA3_R_GAMMA3_R_LENGTH		(10)
+#define PDP_GAMMA3_R_GAMMA3_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA3_GB_OFFSET		(0x07D0)
+
+/* PDP, GAMMA3_GB, GAMMA3_G
+*/
+#define PDP_GAMMA3_GB_GAMMA3_G_MASK		(0x03FF0000)
+#define PDP_GAMMA3_GB_GAMMA3_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA3_GB_GAMMA3_G_SHIFT		(16)
+#define PDP_GAMMA3_GB_GAMMA3_G_LENGTH		(10)
+#define PDP_GAMMA3_GB_GAMMA3_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA3_GB, GAMMA3_B
+*/
+#define PDP_GAMMA3_GB_GAMMA3_B_MASK		(0x000003FF)
+#define PDP_GAMMA3_GB_GAMMA3_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA3_GB_GAMMA3_B_SHIFT		(0)
+#define PDP_GAMMA3_GB_GAMMA3_B_LENGTH		(10)
+#define PDP_GAMMA3_GB_GAMMA3_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA4_R_OFFSET		(0x07D4)
+
+/* PDP, GAMMA4_R, GAMMA4_R
+*/
+#define PDP_GAMMA4_R_GAMMA4_R_MASK		(0x000003FF)
+#define PDP_GAMMA4_R_GAMMA4_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA4_R_GAMMA4_R_SHIFT		(0)
+#define PDP_GAMMA4_R_GAMMA4_R_LENGTH		(10)
+#define PDP_GAMMA4_R_GAMMA4_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA4_GB_OFFSET		(0x07D8)
+
+/* PDP, GAMMA4_GB, GAMMA4_G
+*/
+#define PDP_GAMMA4_GB_GAMMA4_G_MASK		(0x03FF0000)
+#define PDP_GAMMA4_GB_GAMMA4_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA4_GB_GAMMA4_G_SHIFT		(16)
+#define PDP_GAMMA4_GB_GAMMA4_G_LENGTH		(10)
+#define PDP_GAMMA4_GB_GAMMA4_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA4_GB, GAMMA4_B
+*/
+#define PDP_GAMMA4_GB_GAMMA4_B_MASK		(0x000003FF)
+#define PDP_GAMMA4_GB_GAMMA4_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA4_GB_GAMMA4_B_SHIFT		(0)
+#define PDP_GAMMA4_GB_GAMMA4_B_LENGTH		(10)
+#define PDP_GAMMA4_GB_GAMMA4_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA5_R_OFFSET		(0x07DC)
+
+/* PDP, GAMMA5_R, GAMMA5_R
+*/
+#define PDP_GAMMA5_R_GAMMA5_R_MASK		(0x000003FF)
+#define PDP_GAMMA5_R_GAMMA5_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA5_R_GAMMA5_R_SHIFT		(0)
+#define PDP_GAMMA5_R_GAMMA5_R_LENGTH		(10)
+#define PDP_GAMMA5_R_GAMMA5_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA5_GB_OFFSET		(0x07E0)
+
+/* PDP, GAMMA5_GB, GAMMA5_G
+*/
+#define PDP_GAMMA5_GB_GAMMA5_G_MASK		(0x03FF0000)
+#define PDP_GAMMA5_GB_GAMMA5_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA5_GB_GAMMA5_G_SHIFT		(16)
+#define PDP_GAMMA5_GB_GAMMA5_G_LENGTH		(10)
+#define PDP_GAMMA5_GB_GAMMA5_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA5_GB, GAMMA5_B
+*/
+#define PDP_GAMMA5_GB_GAMMA5_B_MASK		(0x000003FF)
+#define PDP_GAMMA5_GB_GAMMA5_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA5_GB_GAMMA5_B_SHIFT		(0)
+#define PDP_GAMMA5_GB_GAMMA5_B_LENGTH		(10)
+#define PDP_GAMMA5_GB_GAMMA5_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA6_R_OFFSET		(0x07E4)
+
+/* PDP, GAMMA6_R, GAMMA6_R
+*/
+#define PDP_GAMMA6_R_GAMMA6_R_MASK		(0x000003FF)
+#define PDP_GAMMA6_R_GAMMA6_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA6_R_GAMMA6_R_SHIFT		(0)
+#define PDP_GAMMA6_R_GAMMA6_R_LENGTH		(10)
+#define PDP_GAMMA6_R_GAMMA6_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA6_GB_OFFSET		(0x07E8)
+
+/* PDP, GAMMA6_GB, GAMMA6_G
+*/
+#define PDP_GAMMA6_GB_GAMMA6_G_MASK		(0x03FF0000)
+#define PDP_GAMMA6_GB_GAMMA6_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA6_GB_GAMMA6_G_SHIFT		(16)
+#define PDP_GAMMA6_GB_GAMMA6_G_LENGTH		(10)
+#define PDP_GAMMA6_GB_GAMMA6_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA6_GB, GAMMA6_B
+*/
+#define PDP_GAMMA6_GB_GAMMA6_B_MASK		(0x000003FF)
+#define PDP_GAMMA6_GB_GAMMA6_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA6_GB_GAMMA6_B_SHIFT		(0)
+#define PDP_GAMMA6_GB_GAMMA6_B_LENGTH		(10)
+#define PDP_GAMMA6_GB_GAMMA6_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA7_R_OFFSET		(0x07EC)
+
+/* PDP, GAMMA7_R, GAMMA7_R
+*/
+#define PDP_GAMMA7_R_GAMMA7_R_MASK		(0x000003FF)
+#define PDP_GAMMA7_R_GAMMA7_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA7_R_GAMMA7_R_SHIFT		(0)
+#define PDP_GAMMA7_R_GAMMA7_R_LENGTH		(10)
+#define PDP_GAMMA7_R_GAMMA7_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA7_GB_OFFSET		(0x07F0)
+
+/* PDP, GAMMA7_GB, GAMMA7_G
+*/
+#define PDP_GAMMA7_GB_GAMMA7_G_MASK		(0x03FF0000)
+#define PDP_GAMMA7_GB_GAMMA7_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA7_GB_GAMMA7_G_SHIFT		(16)
+#define PDP_GAMMA7_GB_GAMMA7_G_LENGTH		(10)
+#define PDP_GAMMA7_GB_GAMMA7_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA7_GB, GAMMA7_B
+*/
+#define PDP_GAMMA7_GB_GAMMA7_B_MASK		(0x000003FF)
+#define PDP_GAMMA7_GB_GAMMA7_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA7_GB_GAMMA7_B_SHIFT		(0)
+#define PDP_GAMMA7_GB_GAMMA7_B_LENGTH		(10)
+#define PDP_GAMMA7_GB_GAMMA7_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA8_R_OFFSET		(0x07F4)
+
+/* PDP, GAMMA8_R, GAMMA8_R
+*/
+#define PDP_GAMMA8_R_GAMMA8_R_MASK		(0x000003FF)
+#define PDP_GAMMA8_R_GAMMA8_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA8_R_GAMMA8_R_SHIFT		(0)
+#define PDP_GAMMA8_R_GAMMA8_R_LENGTH		(10)
+#define PDP_GAMMA8_R_GAMMA8_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA8_GB_OFFSET		(0x07F8)
+
+/* PDP, GAMMA8_GB, GAMMA8_G
+*/
+#define PDP_GAMMA8_GB_GAMMA8_G_MASK		(0x03FF0000)
+#define PDP_GAMMA8_GB_GAMMA8_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA8_GB_GAMMA8_G_SHIFT		(16)
+#define PDP_GAMMA8_GB_GAMMA8_G_LENGTH		(10)
+#define PDP_GAMMA8_GB_GAMMA8_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA8_GB, GAMMA8_B
+*/
+#define PDP_GAMMA8_GB_GAMMA8_B_MASK		(0x000003FF)
+#define PDP_GAMMA8_GB_GAMMA8_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA8_GB_GAMMA8_B_SHIFT		(0)
+#define PDP_GAMMA8_GB_GAMMA8_B_LENGTH		(10)
+#define PDP_GAMMA8_GB_GAMMA8_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA9_R_OFFSET		(0x07FC)
+
+/* PDP, GAMMA9_R, GAMMA9_R
+*/
+#define PDP_GAMMA9_R_GAMMA9_R_MASK		(0x000003FF)
+#define PDP_GAMMA9_R_GAMMA9_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA9_R_GAMMA9_R_SHIFT		(0)
+#define PDP_GAMMA9_R_GAMMA9_R_LENGTH		(10)
+#define PDP_GAMMA9_R_GAMMA9_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA9_GB_OFFSET		(0x0800)
+
+/* PDP, GAMMA9_GB, GAMMA9_G
+*/
+#define PDP_GAMMA9_GB_GAMMA9_G_MASK		(0x03FF0000)
+#define PDP_GAMMA9_GB_GAMMA9_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA9_GB_GAMMA9_G_SHIFT		(16)
+#define PDP_GAMMA9_GB_GAMMA9_G_LENGTH		(10)
+#define PDP_GAMMA9_GB_GAMMA9_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA9_GB, GAMMA9_B
+*/
+#define PDP_GAMMA9_GB_GAMMA9_B_MASK		(0x000003FF)
+#define PDP_GAMMA9_GB_GAMMA9_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA9_GB_GAMMA9_B_SHIFT		(0)
+#define PDP_GAMMA9_GB_GAMMA9_B_LENGTH		(10)
+#define PDP_GAMMA9_GB_GAMMA9_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA10_R_OFFSET		(0x0804)
+
+/* PDP, GAMMA10_R, GAMMA10_R
+*/
+#define PDP_GAMMA10_R_GAMMA10_R_MASK		(0x000003FF)
+#define PDP_GAMMA10_R_GAMMA10_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA10_R_GAMMA10_R_SHIFT		(0)
+#define PDP_GAMMA10_R_GAMMA10_R_LENGTH		(10)
+#define PDP_GAMMA10_R_GAMMA10_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA10_GB_OFFSET		(0x0808)
+
+/* PDP, GAMMA10_GB, GAMMA10_G
+*/
+#define PDP_GAMMA10_GB_GAMMA10_G_MASK		(0x03FF0000)
+#define PDP_GAMMA10_GB_GAMMA10_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA10_GB_GAMMA10_G_SHIFT		(16)
+#define PDP_GAMMA10_GB_GAMMA10_G_LENGTH		(10)
+#define PDP_GAMMA10_GB_GAMMA10_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA10_GB, GAMMA10_B
+*/
+#define PDP_GAMMA10_GB_GAMMA10_B_MASK		(0x000003FF)
+#define PDP_GAMMA10_GB_GAMMA10_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA10_GB_GAMMA10_B_SHIFT		(0)
+#define PDP_GAMMA10_GB_GAMMA10_B_LENGTH		(10)
+#define PDP_GAMMA10_GB_GAMMA10_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA11_R_OFFSET		(0x080C)
+
+/* PDP, GAMMA11_R, GAMMA11_R
+*/
+#define PDP_GAMMA11_R_GAMMA11_R_MASK		(0x000003FF)
+#define PDP_GAMMA11_R_GAMMA11_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA11_R_GAMMA11_R_SHIFT		(0)
+#define PDP_GAMMA11_R_GAMMA11_R_LENGTH		(10)
+#define PDP_GAMMA11_R_GAMMA11_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA11_GB_OFFSET		(0x0810)
+
+/* PDP, GAMMA11_GB, GAMMA11_G
+*/
+#define PDP_GAMMA11_GB_GAMMA11_G_MASK		(0x03FF0000)
+#define PDP_GAMMA11_GB_GAMMA11_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA11_GB_GAMMA11_G_SHIFT		(16)
+#define PDP_GAMMA11_GB_GAMMA11_G_LENGTH		(10)
+#define PDP_GAMMA11_GB_GAMMA11_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA11_GB, GAMMA11_B
+*/
+#define PDP_GAMMA11_GB_GAMMA11_B_MASK		(0x000003FF)
+#define PDP_GAMMA11_GB_GAMMA11_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA11_GB_GAMMA11_B_SHIFT		(0)
+#define PDP_GAMMA11_GB_GAMMA11_B_LENGTH		(10)
+#define PDP_GAMMA11_GB_GAMMA11_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA12_R_OFFSET		(0x0814)
+
+/* PDP, GAMMA12_R, GAMMA12_R
+*/
+#define PDP_GAMMA12_R_GAMMA12_R_MASK		(0x000003FF)
+#define PDP_GAMMA12_R_GAMMA12_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA12_R_GAMMA12_R_SHIFT		(0)
+#define PDP_GAMMA12_R_GAMMA12_R_LENGTH		(10)
+#define PDP_GAMMA12_R_GAMMA12_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA12_GB_OFFSET		(0x0818)
+
+/* PDP, GAMMA12_GB, GAMMA12_G
+*/
+#define PDP_GAMMA12_GB_GAMMA12_G_MASK		(0x03FF0000)
+#define PDP_GAMMA12_GB_GAMMA12_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA12_GB_GAMMA12_G_SHIFT		(16)
+#define PDP_GAMMA12_GB_GAMMA12_G_LENGTH		(10)
+#define PDP_GAMMA12_GB_GAMMA12_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA12_GB, GAMMA12_B
+*/
+#define PDP_GAMMA12_GB_GAMMA12_B_MASK		(0x000003FF)
+#define PDP_GAMMA12_GB_GAMMA12_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA12_GB_GAMMA12_B_SHIFT		(0)
+#define PDP_GAMMA12_GB_GAMMA12_B_LENGTH		(10)
+#define PDP_GAMMA12_GB_GAMMA12_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA13_R_OFFSET		(0x081C)
+
+/* PDP, GAMMA13_R, GAMMA13_R
+*/
+#define PDP_GAMMA13_R_GAMMA13_R_MASK		(0x000003FF)
+#define PDP_GAMMA13_R_GAMMA13_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA13_R_GAMMA13_R_SHIFT		(0)
+#define PDP_GAMMA13_R_GAMMA13_R_LENGTH		(10)
+#define PDP_GAMMA13_R_GAMMA13_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA13_GB_OFFSET		(0x0820)
+
+/* PDP, GAMMA13_GB, GAMMA13_G
+*/
+#define PDP_GAMMA13_GB_GAMMA13_G_MASK		(0x03FF0000)
+#define PDP_GAMMA13_GB_GAMMA13_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA13_GB_GAMMA13_G_SHIFT		(16)
+#define PDP_GAMMA13_GB_GAMMA13_G_LENGTH		(10)
+#define PDP_GAMMA13_GB_GAMMA13_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA13_GB, GAMMA13_B
+*/
+#define PDP_GAMMA13_GB_GAMMA13_B_MASK		(0x000003FF)
+#define PDP_GAMMA13_GB_GAMMA13_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA13_GB_GAMMA13_B_SHIFT		(0)
+#define PDP_GAMMA13_GB_GAMMA13_B_LENGTH		(10)
+#define PDP_GAMMA13_GB_GAMMA13_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA14_R_OFFSET		(0x0824)
+
+/* PDP, GAMMA14_R, GAMMA14_R
+*/
+#define PDP_GAMMA14_R_GAMMA14_R_MASK		(0x000003FF)
+#define PDP_GAMMA14_R_GAMMA14_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA14_R_GAMMA14_R_SHIFT		(0)
+#define PDP_GAMMA14_R_GAMMA14_R_LENGTH		(10)
+#define PDP_GAMMA14_R_GAMMA14_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA14_GB_OFFSET		(0x0828)
+
+/* PDP, GAMMA14_GB, GAMMA14_G
+*/
+#define PDP_GAMMA14_GB_GAMMA14_G_MASK		(0x03FF0000)
+#define PDP_GAMMA14_GB_GAMMA14_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA14_GB_GAMMA14_G_SHIFT		(16)
+#define PDP_GAMMA14_GB_GAMMA14_G_LENGTH		(10)
+#define PDP_GAMMA14_GB_GAMMA14_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA14_GB, GAMMA14_B
+*/
+#define PDP_GAMMA14_GB_GAMMA14_B_MASK		(0x000003FF)
+#define PDP_GAMMA14_GB_GAMMA14_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA14_GB_GAMMA14_B_SHIFT		(0)
+#define PDP_GAMMA14_GB_GAMMA14_B_LENGTH		(10)
+#define PDP_GAMMA14_GB_GAMMA14_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA15_R_OFFSET		(0x082C)
+
+/* PDP, GAMMA15_R, GAMMA15_R
+*/
+#define PDP_GAMMA15_R_GAMMA15_R_MASK		(0x000003FF)
+#define PDP_GAMMA15_R_GAMMA15_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA15_R_GAMMA15_R_SHIFT		(0)
+#define PDP_GAMMA15_R_GAMMA15_R_LENGTH		(10)
+#define PDP_GAMMA15_R_GAMMA15_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA15_GB_OFFSET		(0x0830)
+
+/* PDP, GAMMA15_GB, GAMMA15_G
+*/
+#define PDP_GAMMA15_GB_GAMMA15_G_MASK		(0x03FF0000)
+#define PDP_GAMMA15_GB_GAMMA15_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA15_GB_GAMMA15_G_SHIFT		(16)
+#define PDP_GAMMA15_GB_GAMMA15_G_LENGTH		(10)
+#define PDP_GAMMA15_GB_GAMMA15_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA15_GB, GAMMA15_B
+*/
+#define PDP_GAMMA15_GB_GAMMA15_B_MASK		(0x000003FF)
+#define PDP_GAMMA15_GB_GAMMA15_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA15_GB_GAMMA15_B_SHIFT		(0)
+#define PDP_GAMMA15_GB_GAMMA15_B_LENGTH		(10)
+#define PDP_GAMMA15_GB_GAMMA15_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA16_R_OFFSET		(0x0834)
+
+/* PDP, GAMMA16_R, GAMMA16_R
+*/
+#define PDP_GAMMA16_R_GAMMA16_R_MASK		(0x000003FF)
+#define PDP_GAMMA16_R_GAMMA16_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA16_R_GAMMA16_R_SHIFT		(0)
+#define PDP_GAMMA16_R_GAMMA16_R_LENGTH		(10)
+#define PDP_GAMMA16_R_GAMMA16_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA16_GB_OFFSET		(0x0838)
+
+/* PDP, GAMMA16_GB, GAMMA16_G
+*/
+#define PDP_GAMMA16_GB_GAMMA16_G_MASK		(0x03FF0000)
+#define PDP_GAMMA16_GB_GAMMA16_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA16_GB_GAMMA16_G_SHIFT		(16)
+#define PDP_GAMMA16_GB_GAMMA16_G_LENGTH		(10)
+#define PDP_GAMMA16_GB_GAMMA16_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA16_GB, GAMMA16_B
+*/
+#define PDP_GAMMA16_GB_GAMMA16_B_MASK		(0x000003FF)
+#define PDP_GAMMA16_GB_GAMMA16_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA16_GB_GAMMA16_B_SHIFT		(0)
+#define PDP_GAMMA16_GB_GAMMA16_B_LENGTH		(10)
+#define PDP_GAMMA16_GB_GAMMA16_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA17_R_OFFSET		(0x083C)
+
+/* PDP, GAMMA17_R, GAMMA17_R
+*/
+#define PDP_GAMMA17_R_GAMMA17_R_MASK		(0x000003FF)
+#define PDP_GAMMA17_R_GAMMA17_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA17_R_GAMMA17_R_SHIFT		(0)
+#define PDP_GAMMA17_R_GAMMA17_R_LENGTH		(10)
+#define PDP_GAMMA17_R_GAMMA17_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA17_GB_OFFSET		(0x0840)
+
+/* PDP, GAMMA17_GB, GAMMA17_G
+*/
+#define PDP_GAMMA17_GB_GAMMA17_G_MASK		(0x03FF0000)
+#define PDP_GAMMA17_GB_GAMMA17_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA17_GB_GAMMA17_G_SHIFT		(16)
+#define PDP_GAMMA17_GB_GAMMA17_G_LENGTH		(10)
+#define PDP_GAMMA17_GB_GAMMA17_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA17_GB, GAMMA17_B
+*/
+#define PDP_GAMMA17_GB_GAMMA17_B_MASK		(0x000003FF)
+#define PDP_GAMMA17_GB_GAMMA17_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA17_GB_GAMMA17_B_SHIFT		(0)
+#define PDP_GAMMA17_GB_GAMMA17_B_LENGTH		(10)
+#define PDP_GAMMA17_GB_GAMMA17_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA18_R_OFFSET		(0x0844)
+
+/* PDP, GAMMA18_R, GAMMA18_R
+*/
+#define PDP_GAMMA18_R_GAMMA18_R_MASK		(0x000003FF)
+#define PDP_GAMMA18_R_GAMMA18_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA18_R_GAMMA18_R_SHIFT		(0)
+#define PDP_GAMMA18_R_GAMMA18_R_LENGTH		(10)
+#define PDP_GAMMA18_R_GAMMA18_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA18_GB_OFFSET		(0x0848)
+
+/* PDP, GAMMA18_GB, GAMMA18_G
+*/
+#define PDP_GAMMA18_GB_GAMMA18_G_MASK		(0x03FF0000)
+#define PDP_GAMMA18_GB_GAMMA18_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA18_GB_GAMMA18_G_SHIFT		(16)
+#define PDP_GAMMA18_GB_GAMMA18_G_LENGTH		(10)
+#define PDP_GAMMA18_GB_GAMMA18_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA18_GB, GAMMA18_B
+*/
+#define PDP_GAMMA18_GB_GAMMA18_B_MASK		(0x000003FF)
+#define PDP_GAMMA18_GB_GAMMA18_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA18_GB_GAMMA18_B_SHIFT		(0)
+#define PDP_GAMMA18_GB_GAMMA18_B_LENGTH		(10)
+#define PDP_GAMMA18_GB_GAMMA18_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA19_R_OFFSET		(0x084C)
+
+/* PDP, GAMMA19_R, GAMMA19_R
+*/
+#define PDP_GAMMA19_R_GAMMA19_R_MASK		(0x000003FF)
+#define PDP_GAMMA19_R_GAMMA19_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA19_R_GAMMA19_R_SHIFT		(0)
+#define PDP_GAMMA19_R_GAMMA19_R_LENGTH		(10)
+#define PDP_GAMMA19_R_GAMMA19_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA19_GB_OFFSET		(0x0850)
+
+/* PDP, GAMMA19_GB, GAMMA19_G
+*/
+#define PDP_GAMMA19_GB_GAMMA19_G_MASK		(0x03FF0000)
+#define PDP_GAMMA19_GB_GAMMA19_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA19_GB_GAMMA19_G_SHIFT		(16)
+#define PDP_GAMMA19_GB_GAMMA19_G_LENGTH		(10)
+#define PDP_GAMMA19_GB_GAMMA19_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA19_GB, GAMMA19_B
+*/
+#define PDP_GAMMA19_GB_GAMMA19_B_MASK		(0x000003FF)
+#define PDP_GAMMA19_GB_GAMMA19_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA19_GB_GAMMA19_B_SHIFT		(0)
+#define PDP_GAMMA19_GB_GAMMA19_B_LENGTH		(10)
+#define PDP_GAMMA19_GB_GAMMA19_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA20_R_OFFSET		(0x0854)
+
+/* PDP, GAMMA20_R, GAMMA20_R
+*/
+#define PDP_GAMMA20_R_GAMMA20_R_MASK		(0x000003FF)
+#define PDP_GAMMA20_R_GAMMA20_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA20_R_GAMMA20_R_SHIFT		(0)
+#define PDP_GAMMA20_R_GAMMA20_R_LENGTH		(10)
+#define PDP_GAMMA20_R_GAMMA20_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA20_GB_OFFSET		(0x0858)
+
+/* PDP, GAMMA20_GB, GAMMA20_G
+*/
+#define PDP_GAMMA20_GB_GAMMA20_G_MASK		(0x03FF0000)
+#define PDP_GAMMA20_GB_GAMMA20_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA20_GB_GAMMA20_G_SHIFT		(16)
+#define PDP_GAMMA20_GB_GAMMA20_G_LENGTH		(10)
+#define PDP_GAMMA20_GB_GAMMA20_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA20_GB, GAMMA20_B
+*/
+#define PDP_GAMMA20_GB_GAMMA20_B_MASK		(0x000003FF)
+#define PDP_GAMMA20_GB_GAMMA20_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA20_GB_GAMMA20_B_SHIFT		(0)
+#define PDP_GAMMA20_GB_GAMMA20_B_LENGTH		(10)
+#define PDP_GAMMA20_GB_GAMMA20_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA21_R_OFFSET		(0x085C)
+
+/* PDP, GAMMA21_R, GAMMA21_R
+*/
+#define PDP_GAMMA21_R_GAMMA21_R_MASK		(0x000003FF)
+#define PDP_GAMMA21_R_GAMMA21_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA21_R_GAMMA21_R_SHIFT		(0)
+#define PDP_GAMMA21_R_GAMMA21_R_LENGTH		(10)
+#define PDP_GAMMA21_R_GAMMA21_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA21_GB_OFFSET		(0x0860)
+
+/* PDP, GAMMA21_GB, GAMMA21_G
+*/
+#define PDP_GAMMA21_GB_GAMMA21_G_MASK		(0x03FF0000)
+#define PDP_GAMMA21_GB_GAMMA21_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA21_GB_GAMMA21_G_SHIFT		(16)
+#define PDP_GAMMA21_GB_GAMMA21_G_LENGTH		(10)
+#define PDP_GAMMA21_GB_GAMMA21_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA21_GB, GAMMA21_B
+*/
+#define PDP_GAMMA21_GB_GAMMA21_B_MASK		(0x000003FF)
+#define PDP_GAMMA21_GB_GAMMA21_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA21_GB_GAMMA21_B_SHIFT		(0)
+#define PDP_GAMMA21_GB_GAMMA21_B_LENGTH		(10)
+#define PDP_GAMMA21_GB_GAMMA21_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA22_R_OFFSET		(0x0864)
+
+/* PDP, GAMMA22_R, GAMMA22_R
+*/
+#define PDP_GAMMA22_R_GAMMA22_R_MASK		(0x000003FF)
+#define PDP_GAMMA22_R_GAMMA22_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA22_R_GAMMA22_R_SHIFT		(0)
+#define PDP_GAMMA22_R_GAMMA22_R_LENGTH		(10)
+#define PDP_GAMMA22_R_GAMMA22_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA22_GB_OFFSET		(0x0868)
+
+/* PDP, GAMMA22_GB, GAMMA22_G
+*/
+#define PDP_GAMMA22_GB_GAMMA22_G_MASK		(0x03FF0000)
+#define PDP_GAMMA22_GB_GAMMA22_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA22_GB_GAMMA22_G_SHIFT		(16)
+#define PDP_GAMMA22_GB_GAMMA22_G_LENGTH		(10)
+#define PDP_GAMMA22_GB_GAMMA22_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA22_GB, GAMMA22_B
+*/
+#define PDP_GAMMA22_GB_GAMMA22_B_MASK		(0x000003FF)
+#define PDP_GAMMA22_GB_GAMMA22_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA22_GB_GAMMA22_B_SHIFT		(0)
+#define PDP_GAMMA22_GB_GAMMA22_B_LENGTH		(10)
+#define PDP_GAMMA22_GB_GAMMA22_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA23_R_OFFSET		(0x086C)
+
+/* PDP, GAMMA23_R, GAMMA23_R
+*/
+#define PDP_GAMMA23_R_GAMMA23_R_MASK		(0x000003FF)
+#define PDP_GAMMA23_R_GAMMA23_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA23_R_GAMMA23_R_SHIFT		(0)
+#define PDP_GAMMA23_R_GAMMA23_R_LENGTH		(10)
+#define PDP_GAMMA23_R_GAMMA23_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA23_GB_OFFSET		(0x0870)
+
+/* PDP, GAMMA23_GB, GAMMA23_G
+*/
+#define PDP_GAMMA23_GB_GAMMA23_G_MASK		(0x03FF0000)
+#define PDP_GAMMA23_GB_GAMMA23_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA23_GB_GAMMA23_G_SHIFT		(16)
+#define PDP_GAMMA23_GB_GAMMA23_G_LENGTH		(10)
+#define PDP_GAMMA23_GB_GAMMA23_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA23_GB, GAMMA23_B
+*/
+#define PDP_GAMMA23_GB_GAMMA23_B_MASK		(0x000003FF)
+#define PDP_GAMMA23_GB_GAMMA23_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA23_GB_GAMMA23_B_SHIFT		(0)
+#define PDP_GAMMA23_GB_GAMMA23_B_LENGTH		(10)
+#define PDP_GAMMA23_GB_GAMMA23_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA24_R_OFFSET		(0x0874)
+
+/* PDP, GAMMA24_R, GAMMA24_R
+*/
+#define PDP_GAMMA24_R_GAMMA24_R_MASK		(0x000003FF)
+#define PDP_GAMMA24_R_GAMMA24_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA24_R_GAMMA24_R_SHIFT		(0)
+#define PDP_GAMMA24_R_GAMMA24_R_LENGTH		(10)
+#define PDP_GAMMA24_R_GAMMA24_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA24_GB_OFFSET		(0x0878)
+
+/* PDP, GAMMA24_GB, GAMMA24_G
+*/
+#define PDP_GAMMA24_GB_GAMMA24_G_MASK		(0x03FF0000)
+#define PDP_GAMMA24_GB_GAMMA24_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA24_GB_GAMMA24_G_SHIFT		(16)
+#define PDP_GAMMA24_GB_GAMMA24_G_LENGTH		(10)
+#define PDP_GAMMA24_GB_GAMMA24_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA24_GB, GAMMA24_B
+*/
+#define PDP_GAMMA24_GB_GAMMA24_B_MASK		(0x000003FF)
+#define PDP_GAMMA24_GB_GAMMA24_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA24_GB_GAMMA24_B_SHIFT		(0)
+#define PDP_GAMMA24_GB_GAMMA24_B_LENGTH		(10)
+#define PDP_GAMMA24_GB_GAMMA24_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA25_R_OFFSET		(0x087C)
+
+/* PDP, GAMMA25_R, GAMMA25_R
+*/
+#define PDP_GAMMA25_R_GAMMA25_R_MASK		(0x000003FF)
+#define PDP_GAMMA25_R_GAMMA25_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA25_R_GAMMA25_R_SHIFT		(0)
+#define PDP_GAMMA25_R_GAMMA25_R_LENGTH		(10)
+#define PDP_GAMMA25_R_GAMMA25_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA25_GB_OFFSET		(0x0880)
+
+/* PDP, GAMMA25_GB, GAMMA25_G
+*/
+#define PDP_GAMMA25_GB_GAMMA25_G_MASK		(0x03FF0000)
+#define PDP_GAMMA25_GB_GAMMA25_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA25_GB_GAMMA25_G_SHIFT		(16)
+#define PDP_GAMMA25_GB_GAMMA25_G_LENGTH		(10)
+#define PDP_GAMMA25_GB_GAMMA25_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA25_GB, GAMMA25_B
+*/
+#define PDP_GAMMA25_GB_GAMMA25_B_MASK		(0x000003FF)
+#define PDP_GAMMA25_GB_GAMMA25_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA25_GB_GAMMA25_B_SHIFT		(0)
+#define PDP_GAMMA25_GB_GAMMA25_B_LENGTH		(10)
+#define PDP_GAMMA25_GB_GAMMA25_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA26_R_OFFSET		(0x0884)
+
+/* PDP, GAMMA26_R, GAMMA26_R
+*/
+#define PDP_GAMMA26_R_GAMMA26_R_MASK		(0x000003FF)
+#define PDP_GAMMA26_R_GAMMA26_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA26_R_GAMMA26_R_SHIFT		(0)
+#define PDP_GAMMA26_R_GAMMA26_R_LENGTH		(10)
+#define PDP_GAMMA26_R_GAMMA26_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA26_GB_OFFSET		(0x0888)
+
+/* PDP, GAMMA26_GB, GAMMA26_G
+*/
+#define PDP_GAMMA26_GB_GAMMA26_G_MASK		(0x03FF0000)
+#define PDP_GAMMA26_GB_GAMMA26_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA26_GB_GAMMA26_G_SHIFT		(16)
+#define PDP_GAMMA26_GB_GAMMA26_G_LENGTH		(10)
+#define PDP_GAMMA26_GB_GAMMA26_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA26_GB, GAMMA26_B
+*/
+#define PDP_GAMMA26_GB_GAMMA26_B_MASK		(0x000003FF)
+#define PDP_GAMMA26_GB_GAMMA26_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA26_GB_GAMMA26_B_SHIFT		(0)
+#define PDP_GAMMA26_GB_GAMMA26_B_LENGTH		(10)
+#define PDP_GAMMA26_GB_GAMMA26_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA27_R_OFFSET		(0x088C)
+
+/* PDP, GAMMA27_R, GAMMA27_R
+*/
+#define PDP_GAMMA27_R_GAMMA27_R_MASK		(0x000003FF)
+#define PDP_GAMMA27_R_GAMMA27_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA27_R_GAMMA27_R_SHIFT		(0)
+#define PDP_GAMMA27_R_GAMMA27_R_LENGTH		(10)
+#define PDP_GAMMA27_R_GAMMA27_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA27_GB_OFFSET		(0x0890)
+
+/* PDP, GAMMA27_GB, GAMMA27_G
+*/
+#define PDP_GAMMA27_GB_GAMMA27_G_MASK		(0x03FF0000)
+#define PDP_GAMMA27_GB_GAMMA27_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA27_GB_GAMMA27_G_SHIFT		(16)
+#define PDP_GAMMA27_GB_GAMMA27_G_LENGTH		(10)
+#define PDP_GAMMA27_GB_GAMMA27_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA27_GB, GAMMA27_B
+*/
+#define PDP_GAMMA27_GB_GAMMA27_B_MASK		(0x000003FF)
+#define PDP_GAMMA27_GB_GAMMA27_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA27_GB_GAMMA27_B_SHIFT		(0)
+#define PDP_GAMMA27_GB_GAMMA27_B_LENGTH		(10)
+#define PDP_GAMMA27_GB_GAMMA27_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA28_R_OFFSET		(0x0894)
+
+/* PDP, GAMMA28_R, GAMMA28_R
+*/
+#define PDP_GAMMA28_R_GAMMA28_R_MASK		(0x000003FF)
+#define PDP_GAMMA28_R_GAMMA28_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA28_R_GAMMA28_R_SHIFT		(0)
+#define PDP_GAMMA28_R_GAMMA28_R_LENGTH		(10)
+#define PDP_GAMMA28_R_GAMMA28_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA28_GB_OFFSET		(0x0898)
+
+/* PDP, GAMMA28_GB, GAMMA28_G
+*/
+#define PDP_GAMMA28_GB_GAMMA28_G_MASK		(0x03FF0000)
+#define PDP_GAMMA28_GB_GAMMA28_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA28_GB_GAMMA28_G_SHIFT		(16)
+#define PDP_GAMMA28_GB_GAMMA28_G_LENGTH		(10)
+#define PDP_GAMMA28_GB_GAMMA28_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA28_GB, GAMMA28_B
+*/
+#define PDP_GAMMA28_GB_GAMMA28_B_MASK		(0x000003FF)
+#define PDP_GAMMA28_GB_GAMMA28_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA28_GB_GAMMA28_B_SHIFT		(0)
+#define PDP_GAMMA28_GB_GAMMA28_B_LENGTH		(10)
+#define PDP_GAMMA28_GB_GAMMA28_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA29_R_OFFSET		(0x089C)
+
+/* PDP, GAMMA29_R, GAMMA29_R
+*/
+#define PDP_GAMMA29_R_GAMMA29_R_MASK		(0x000003FF)
+#define PDP_GAMMA29_R_GAMMA29_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA29_R_GAMMA29_R_SHIFT		(0)
+#define PDP_GAMMA29_R_GAMMA29_R_LENGTH		(10)
+#define PDP_GAMMA29_R_GAMMA29_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA29_GB_OFFSET		(0x08A0)
+
+/* PDP, GAMMA29_GB, GAMMA29_G
+*/
+#define PDP_GAMMA29_GB_GAMMA29_G_MASK		(0x03FF0000)
+#define PDP_GAMMA29_GB_GAMMA29_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA29_GB_GAMMA29_G_SHIFT		(16)
+#define PDP_GAMMA29_GB_GAMMA29_G_LENGTH		(10)
+#define PDP_GAMMA29_GB_GAMMA29_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA29_GB, GAMMA29_B
+*/
+#define PDP_GAMMA29_GB_GAMMA29_B_MASK		(0x000003FF)
+#define PDP_GAMMA29_GB_GAMMA29_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA29_GB_GAMMA29_B_SHIFT		(0)
+#define PDP_GAMMA29_GB_GAMMA29_B_LENGTH		(10)
+#define PDP_GAMMA29_GB_GAMMA29_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA30_R_OFFSET		(0x08A4)
+
+/* PDP, GAMMA30_R, GAMMA30_R
+*/
+#define PDP_GAMMA30_R_GAMMA30_R_MASK		(0x000003FF)
+#define PDP_GAMMA30_R_GAMMA30_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA30_R_GAMMA30_R_SHIFT		(0)
+#define PDP_GAMMA30_R_GAMMA30_R_LENGTH		(10)
+#define PDP_GAMMA30_R_GAMMA30_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA30_GB_OFFSET		(0x08A8)
+
+/* PDP, GAMMA30_GB, GAMMA30_G
+*/
+#define PDP_GAMMA30_GB_GAMMA30_G_MASK		(0x03FF0000)
+#define PDP_GAMMA30_GB_GAMMA30_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA30_GB_GAMMA30_G_SHIFT		(16)
+#define PDP_GAMMA30_GB_GAMMA30_G_LENGTH		(10)
+#define PDP_GAMMA30_GB_GAMMA30_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA30_GB, GAMMA30_B
+*/
+#define PDP_GAMMA30_GB_GAMMA30_B_MASK		(0x000003FF)
+#define PDP_GAMMA30_GB_GAMMA30_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA30_GB_GAMMA30_B_SHIFT		(0)
+#define PDP_GAMMA30_GB_GAMMA30_B_LENGTH		(10)
+#define PDP_GAMMA30_GB_GAMMA30_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA31_R_OFFSET		(0x08AC)
+
+/* PDP, GAMMA31_R, GAMMA31_R
+*/
+#define PDP_GAMMA31_R_GAMMA31_R_MASK		(0x000003FF)
+#define PDP_GAMMA31_R_GAMMA31_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA31_R_GAMMA31_R_SHIFT		(0)
+#define PDP_GAMMA31_R_GAMMA31_R_LENGTH		(10)
+#define PDP_GAMMA31_R_GAMMA31_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA31_GB_OFFSET		(0x08B0)
+
+/* PDP, GAMMA31_GB, GAMMA31_G
+*/
+#define PDP_GAMMA31_GB_GAMMA31_G_MASK		(0x03FF0000)
+#define PDP_GAMMA31_GB_GAMMA31_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA31_GB_GAMMA31_G_SHIFT		(16)
+#define PDP_GAMMA31_GB_GAMMA31_G_LENGTH		(10)
+#define PDP_GAMMA31_GB_GAMMA31_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA31_GB, GAMMA31_B
+*/
+#define PDP_GAMMA31_GB_GAMMA31_B_MASK		(0x000003FF)
+#define PDP_GAMMA31_GB_GAMMA31_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA31_GB_GAMMA31_B_SHIFT		(0)
+#define PDP_GAMMA31_GB_GAMMA31_B_LENGTH		(10)
+#define PDP_GAMMA31_GB_GAMMA31_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA32_R_OFFSET		(0x08B4)
+
+/* PDP, GAMMA32_R, GAMMA32_R
+*/
+#define PDP_GAMMA32_R_GAMMA32_R_MASK		(0x000003FF)
+#define PDP_GAMMA32_R_GAMMA32_R_LSBMASK		(0x000003FF)
+#define PDP_GAMMA32_R_GAMMA32_R_SHIFT		(0)
+#define PDP_GAMMA32_R_GAMMA32_R_LENGTH		(10)
+#define PDP_GAMMA32_R_GAMMA32_R_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GAMMA32_GB_OFFSET		(0x08B8)
+
+/* PDP, GAMMA32_GB, GAMMA32_G
+*/
+#define PDP_GAMMA32_GB_GAMMA32_G_MASK		(0x03FF0000)
+#define PDP_GAMMA32_GB_GAMMA32_G_LSBMASK		(0x000003FF)
+#define PDP_GAMMA32_GB_GAMMA32_G_SHIFT		(16)
+#define PDP_GAMMA32_GB_GAMMA32_G_LENGTH		(10)
+#define PDP_GAMMA32_GB_GAMMA32_G_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GAMMA32_GB, GAMMA32_B
+*/
+#define PDP_GAMMA32_GB_GAMMA32_B_MASK		(0x000003FF)
+#define PDP_GAMMA32_GB_GAMMA32_B_LSBMASK		(0x000003FF)
+#define PDP_GAMMA32_GB_GAMMA32_B_SHIFT		(0)
+#define PDP_GAMMA32_GB_GAMMA32_B_LENGTH		(10)
+#define PDP_GAMMA32_GB_GAMMA32_B_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VEVENT_OFFSET		(0x08BC)
+
+/* PDP, VEVENT, VEVENT
+*/
+#define PDP_VEVENT_VEVENT_MASK		(0x1FFF0000)
+#define PDP_VEVENT_VEVENT_LSBMASK		(0x00001FFF)
+#define PDP_VEVENT_VEVENT_SHIFT		(16)
+#define PDP_VEVENT_VEVENT_LENGTH		(13)
+#define PDP_VEVENT_VEVENT_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VEVENT, VFETCH
+*/
+#define PDP_VEVENT_VFETCH_MASK		(0x00001FFF)
+#define PDP_VEVENT_VFETCH_LSBMASK		(0x00001FFF)
+#define PDP_VEVENT_VFETCH_SHIFT		(0)
+#define PDP_VEVENT_VFETCH_LENGTH		(13)
+#define PDP_VEVENT_VFETCH_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_HDECTRL_OFFSET		(0x08C0)
+
+/* PDP, HDECTRL, HDES
+*/
+#define PDP_HDECTRL_HDES_MASK		(0x1FFF0000)
+#define PDP_HDECTRL_HDES_LSBMASK		(0x00001FFF)
+#define PDP_HDECTRL_HDES_SHIFT		(16)
+#define PDP_HDECTRL_HDES_LENGTH		(13)
+#define PDP_HDECTRL_HDES_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, HDECTRL, HDEF
+*/
+#define PDP_HDECTRL_HDEF_MASK		(0x00001FFF)
+#define PDP_HDECTRL_HDEF_LSBMASK		(0x00001FFF)
+#define PDP_HDECTRL_HDEF_SHIFT		(0)
+#define PDP_HDECTRL_HDEF_LENGTH		(13)
+#define PDP_HDECTRL_HDEF_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VDECTRL_OFFSET		(0x08C4)
+
+/* PDP, VDECTRL, VDES
+*/
+#define PDP_VDECTRL_VDES_MASK		(0x1FFF0000)
+#define PDP_VDECTRL_VDES_LSBMASK		(0x00001FFF)
+#define PDP_VDECTRL_VDES_SHIFT		(16)
+#define PDP_VDECTRL_VDES_LENGTH		(13)
+#define PDP_VDECTRL_VDES_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VDECTRL, VDEF
+*/
+#define PDP_VDECTRL_VDEF_MASK		(0x00001FFF)
+#define PDP_VDECTRL_VDEF_LSBMASK		(0x00001FFF)
+#define PDP_VDECTRL_VDEF_SHIFT		(0)
+#define PDP_VDECTRL_VDEF_LENGTH		(13)
+#define PDP_VDECTRL_VDEF_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_OPMASK_R_OFFSET		(0x08C8)
+
+/* PDP, OPMASK_R, MASKLEVEL
+*/
+#define PDP_OPMASK_R_MASKLEVEL_MASK		(0x80000000)
+#define PDP_OPMASK_R_MASKLEVEL_LSBMASK		(0x00000001)
+#define PDP_OPMASK_R_MASKLEVEL_SHIFT		(31)
+#define PDP_OPMASK_R_MASKLEVEL_LENGTH		(1)
+#define PDP_OPMASK_R_MASKLEVEL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, OPMASK_R, BLANKLEVEL
+*/
+#define PDP_OPMASK_R_BLANKLEVEL_MASK		(0x40000000)
+#define PDP_OPMASK_R_BLANKLEVEL_LSBMASK		(0x00000001)
+#define PDP_OPMASK_R_BLANKLEVEL_SHIFT		(30)
+#define PDP_OPMASK_R_BLANKLEVEL_LENGTH		(1)
+#define PDP_OPMASK_R_BLANKLEVEL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, OPMASK_R, MASKR
+*/
+#define PDP_OPMASK_R_MASKR_MASK		(0x000003FF)
+#define PDP_OPMASK_R_MASKR_LSBMASK		(0x000003FF)
+#define PDP_OPMASK_R_MASKR_SHIFT		(0)
+#define PDP_OPMASK_R_MASKR_LENGTH		(10)
+#define PDP_OPMASK_R_MASKR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_OPMASK_GB_OFFSET		(0x08CC)
+
+/* PDP, OPMASK_GB, MASKG
+*/
+#define PDP_OPMASK_GB_MASKG_MASK		(0x03FF0000)
+#define PDP_OPMASK_GB_MASKG_LSBMASK		(0x000003FF)
+#define PDP_OPMASK_GB_MASKG_SHIFT		(16)
+#define PDP_OPMASK_GB_MASKG_LENGTH		(10)
+#define PDP_OPMASK_GB_MASKG_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, OPMASK_GB, MASKB
+*/
+#define PDP_OPMASK_GB_MASKB_MASK		(0x000003FF)
+#define PDP_OPMASK_GB_MASKB_LSBMASK		(0x000003FF)
+#define PDP_OPMASK_GB_MASKB_SHIFT		(0)
+#define PDP_OPMASK_GB_MASKB_LENGTH		(10)
+#define PDP_OPMASK_GB_MASKB_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_REGLD_ADDR_CTRL_OFFSET		(0x08D0)
+
+/* PDP, REGLD_ADDR_CTRL, REGLD_ADDRIN
+*/
+#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_MASK		(0xFFFFFFF0)
+#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LSBMASK		(0x0FFFFFFF)
+#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SHIFT		(4)
+#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LENGTH		(28)
+#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_REGLD_ADDR_STAT_OFFSET		(0x08D4)
+
+/* PDP, REGLD_ADDR_STAT, REGLD_ADDROUT
+*/
+#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_MASK		(0xFFFFFFF0)
+#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LSBMASK		(0x0FFFFFFF)
+#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SHIFT		(4)
+#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LENGTH		(28)
+#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_REGLD_STAT_OFFSET		(0x08D8)
+
+/* PDP, REGLD_STAT, REGLD_ADDREN
+*/
+#define PDP_REGLD_STAT_REGLD_ADDREN_MASK		(0x00800000)
+#define PDP_REGLD_STAT_REGLD_ADDREN_LSBMASK		(0x00000001)
+#define PDP_REGLD_STAT_REGLD_ADDREN_SHIFT		(23)
+#define PDP_REGLD_STAT_REGLD_ADDREN_LENGTH		(1)
+#define PDP_REGLD_STAT_REGLD_ADDREN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_REGLD_CTRL_OFFSET		(0x08DC)
+
+/* PDP, REGLD_CTRL, REGLD_ADDRLEN
+*/
+#define PDP_REGLD_CTRL_REGLD_ADDRLEN_MASK		(0xFF000000)
+#define PDP_REGLD_CTRL_REGLD_ADDRLEN_LSBMASK		(0x000000FF)
+#define PDP_REGLD_CTRL_REGLD_ADDRLEN_SHIFT		(24)
+#define PDP_REGLD_CTRL_REGLD_ADDRLEN_LENGTH		(8)
+#define PDP_REGLD_CTRL_REGLD_ADDRLEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, REGLD_CTRL, REGLD_VAL
+*/
+#define PDP_REGLD_CTRL_REGLD_VAL_MASK		(0x00800000)
+#define PDP_REGLD_CTRL_REGLD_VAL_LSBMASK		(0x00000001)
+#define PDP_REGLD_CTRL_REGLD_VAL_SHIFT		(23)
+#define PDP_REGLD_CTRL_REGLD_VAL_LENGTH		(1)
+#define PDP_REGLD_CTRL_REGLD_VAL_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_UPDCTRL_OFFSET		(0x08E0)
+
+/* PDP, UPDCTRL, UPDFIELD
+*/
+#define PDP_UPDCTRL_UPDFIELD_MASK		(0x00000001)
+#define PDP_UPDCTRL_UPDFIELD_LSBMASK		(0x00000001)
+#define PDP_UPDCTRL_UPDFIELD_SHIFT		(0)
+#define PDP_UPDCTRL_UPDFIELD_LENGTH		(1)
+#define PDP_UPDCTRL_UPDFIELD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_INTCTRL_OFFSET		(0x08E4)
+
+/* PDP, PVR_PDP_INTCTRL, HBLNK_LINE
+*/
+#define PDP_INTCTRL_HBLNK_LINE_MASK		(0x00010000)
+#define PDP_INTCTRL_HBLNK_LINE_LSBMASK		(0x00000001)
+#define PDP_INTCTRL_HBLNK_LINE_SHIFT		(16)
+#define PDP_INTCTRL_HBLNK_LINE_LENGTH		(1)
+#define PDP_INTCTRL_HBLNK_LINE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PVR_PDP_INTCTRL, HBLNK_LINENO
+*/
+#define PDP_INTCTRL_HBLNK_LINENO_MASK		(0x00001FFF)
+#define PDP_INTCTRL_HBLNK_LINENO_LSBMASK		(0x00001FFF)
+#define PDP_INTCTRL_HBLNK_LINENO_SHIFT		(0)
+#define PDP_INTCTRL_HBLNK_LINENO_LENGTH		(13)
+#define PDP_INTCTRL_HBLNK_LINENO_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PDISETUP_OFFSET		(0x0900)
+
+/* PDP, PDISETUP, PDI_BLNKLVL
+*/
+#define PDP_PDISETUP_PDI_BLNKLVL_MASK		(0x00000040)
+#define PDP_PDISETUP_PDI_BLNKLVL_LSBMASK		(0x00000001)
+#define PDP_PDISETUP_PDI_BLNKLVL_SHIFT		(6)
+#define PDP_PDISETUP_PDI_BLNKLVL_LENGTH		(1)
+#define PDP_PDISETUP_PDI_BLNKLVL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_BLNK
+*/
+#define PDP_PDISETUP_PDI_BLNK_MASK		(0x00000020)
+#define PDP_PDISETUP_PDI_BLNK_LSBMASK		(0x00000001)
+#define PDP_PDISETUP_PDI_BLNK_SHIFT		(5)
+#define PDP_PDISETUP_PDI_BLNK_LENGTH		(1)
+#define PDP_PDISETUP_PDI_BLNK_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_PWR
+*/
+#define PDP_PDISETUP_PDI_PWR_MASK		(0x00000010)
+#define PDP_PDISETUP_PDI_PWR_LSBMASK		(0x00000001)
+#define PDP_PDISETUP_PDI_PWR_SHIFT		(4)
+#define PDP_PDISETUP_PDI_PWR_LENGTH		(1)
+#define PDP_PDISETUP_PDI_PWR_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_EN
+*/
+#define PDP_PDISETUP_PDI_EN_MASK		(0x00000008)
+#define PDP_PDISETUP_PDI_EN_LSBMASK		(0x00000001)
+#define PDP_PDISETUP_PDI_EN_SHIFT		(3)
+#define PDP_PDISETUP_PDI_EN_LENGTH		(1)
+#define PDP_PDISETUP_PDI_EN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_GDEN
+*/
+#define PDP_PDISETUP_PDI_GDEN_MASK		(0x00000004)
+#define PDP_PDISETUP_PDI_GDEN_LSBMASK		(0x00000001)
+#define PDP_PDISETUP_PDI_GDEN_SHIFT		(2)
+#define PDP_PDISETUP_PDI_GDEN_LENGTH		(1)
+#define PDP_PDISETUP_PDI_GDEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_NFEN
+*/
+#define PDP_PDISETUP_PDI_NFEN_MASK		(0x00000002)
+#define PDP_PDISETUP_PDI_NFEN_LSBMASK		(0x00000001)
+#define PDP_PDISETUP_PDI_NFEN_SHIFT		(1)
+#define PDP_PDISETUP_PDI_NFEN_LENGTH		(1)
+#define PDP_PDISETUP_PDI_NFEN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDISETUP, PDI_CR
+*/
+#define PDP_PDISETUP_PDI_CR_MASK		(0x00000001)
+#define PDP_PDISETUP_PDI_CR_LSBMASK		(0x00000001)
+#define PDP_PDISETUP_PDI_CR_SHIFT		(0)
+#define PDP_PDISETUP_PDI_CR_LENGTH		(1)
+#define PDP_PDISETUP_PDI_CR_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PDITIMING0_OFFSET		(0x0904)
+
+/* PDP, PDITIMING0, PDI_PWRSVGD
+*/
+#define PDP_PDITIMING0_PDI_PWRSVGD_MASK		(0x0F000000)
+#define PDP_PDITIMING0_PDI_PWRSVGD_LSBMASK		(0x0000000F)
+#define PDP_PDITIMING0_PDI_PWRSVGD_SHIFT		(24)
+#define PDP_PDITIMING0_PDI_PWRSVGD_LENGTH		(4)
+#define PDP_PDITIMING0_PDI_PWRSVGD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDITIMING0, PDI_LSDEL
+*/
+#define PDP_PDITIMING0_PDI_LSDEL_MASK		(0x007F0000)
+#define PDP_PDITIMING0_PDI_LSDEL_LSBMASK		(0x0000007F)
+#define PDP_PDITIMING0_PDI_LSDEL_SHIFT		(16)
+#define PDP_PDITIMING0_PDI_LSDEL_LENGTH		(7)
+#define PDP_PDITIMING0_PDI_LSDEL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDITIMING0, PDI_PWRSV2GD2
+*/
+#define PDP_PDITIMING0_PDI_PWRSV2GD2_MASK		(0x000003FF)
+#define PDP_PDITIMING0_PDI_PWRSV2GD2_LSBMASK		(0x000003FF)
+#define PDP_PDITIMING0_PDI_PWRSV2GD2_SHIFT		(0)
+#define PDP_PDITIMING0_PDI_PWRSV2GD2_LENGTH		(10)
+#define PDP_PDITIMING0_PDI_PWRSV2GD2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PDITIMING1_OFFSET		(0x0908)
+
+/* PDP, PDITIMING1, PDI_NLDEL
+*/
+#define PDP_PDITIMING1_PDI_NLDEL_MASK		(0x000F0000)
+#define PDP_PDITIMING1_PDI_NLDEL_LSBMASK		(0x0000000F)
+#define PDP_PDITIMING1_PDI_NLDEL_SHIFT		(16)
+#define PDP_PDITIMING1_PDI_NLDEL_LENGTH		(4)
+#define PDP_PDITIMING1_PDI_NLDEL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDITIMING1, PDI_ACBDEL
+*/
+#define PDP_PDITIMING1_PDI_ACBDEL_MASK		(0x000003FF)
+#define PDP_PDITIMING1_PDI_ACBDEL_LSBMASK		(0x000003FF)
+#define PDP_PDITIMING1_PDI_ACBDEL_SHIFT		(0)
+#define PDP_PDITIMING1_PDI_ACBDEL_LENGTH		(10)
+#define PDP_PDITIMING1_PDI_ACBDEL_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PDICOREID_OFFSET		(0x090C)
+
+/* PDP, PDICOREID, PDI_GROUP_ID
+*/
+#define PDP_PDICOREID_PDI_GROUP_ID_MASK		(0xFF000000)
+#define PDP_PDICOREID_PDI_GROUP_ID_LSBMASK		(0x000000FF)
+#define PDP_PDICOREID_PDI_GROUP_ID_SHIFT		(24)
+#define PDP_PDICOREID_PDI_GROUP_ID_LENGTH		(8)
+#define PDP_PDICOREID_PDI_GROUP_ID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDICOREID, PDI_CORE_ID
+*/
+#define PDP_PDICOREID_PDI_CORE_ID_MASK		(0x00FF0000)
+#define PDP_PDICOREID_PDI_CORE_ID_LSBMASK		(0x000000FF)
+#define PDP_PDICOREID_PDI_CORE_ID_SHIFT		(16)
+#define PDP_PDICOREID_PDI_CORE_ID_LENGTH		(8)
+#define PDP_PDICOREID_PDI_CORE_ID_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDICOREID, PDI_CONFIG_ID
+*/
+#define PDP_PDICOREID_PDI_CONFIG_ID_MASK		(0x0000FFFF)
+#define PDP_PDICOREID_PDI_CONFIG_ID_LSBMASK		(0x0000FFFF)
+#define PDP_PDICOREID_PDI_CONFIG_ID_SHIFT		(0)
+#define PDP_PDICOREID_PDI_CONFIG_ID_LENGTH		(16)
+#define PDP_PDICOREID_PDI_CONFIG_ID_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_PDICOREREV_OFFSET		(0x0910)
+
+/* PDP, PDICOREREV, PDI_MAJOR_REV
+*/
+#define PDP_PDICOREREV_PDI_MAJOR_REV_MASK		(0x00FF0000)
+#define PDP_PDICOREREV_PDI_MAJOR_REV_LSBMASK		(0x000000FF)
+#define PDP_PDICOREREV_PDI_MAJOR_REV_SHIFT		(16)
+#define PDP_PDICOREREV_PDI_MAJOR_REV_LENGTH		(8)
+#define PDP_PDICOREREV_PDI_MAJOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDICOREREV, PDI_MINOR_REV
+*/
+#define PDP_PDICOREREV_PDI_MINOR_REV_MASK		(0x0000FF00)
+#define PDP_PDICOREREV_PDI_MINOR_REV_LSBMASK		(0x000000FF)
+#define PDP_PDICOREREV_PDI_MINOR_REV_SHIFT		(8)
+#define PDP_PDICOREREV_PDI_MINOR_REV_LENGTH		(8)
+#define PDP_PDICOREREV_PDI_MINOR_REV_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, PDICOREREV, PDI_MAINT_REV
+*/
+#define PDP_PDICOREREV_PDI_MAINT_REV_MASK		(0x000000FF)
+#define PDP_PDICOREREV_PDI_MAINT_REV_LSBMASK		(0x000000FF)
+#define PDP_PDICOREREV_PDI_MAINT_REV_SHIFT		(0)
+#define PDP_PDICOREREV_PDI_MAINT_REV_LENGTH		(8)
+#define PDP_PDICOREREV_PDI_MAINT_REV_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX2_OFFSET		(0x0920)
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y1
+*/
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_MASK		(0x000000C0)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LSBMASK		(0x00000003)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SHIFT		(6)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LENGTH		(2)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y1
+*/
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_MASK		(0x00000030)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LSBMASK		(0x00000003)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SHIFT		(4)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LENGTH		(2)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y0
+*/
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_MASK		(0x0000000C)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LSBMASK		(0x00000003)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SHIFT		(2)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LENGTH		(2)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y0
+*/
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_MASK		(0x00000003)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LSBMASK		(0x00000003)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SHIFT		(0)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LENGTH		(2)
+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX4_0_OFFSET		(0x0924)
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y1
+*/
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_MASK		(0xF0000000)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SHIFT		(28)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LENGTH		(4)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y1
+*/
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_MASK		(0x0F000000)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SHIFT		(24)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LENGTH		(4)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y1
+*/
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_MASK		(0x00F00000)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SHIFT		(20)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LENGTH		(4)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y1
+*/
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_MASK		(0x000F0000)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SHIFT		(16)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LENGTH		(4)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y0
+*/
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_MASK		(0x0000F000)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SHIFT		(12)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LENGTH		(4)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y0
+*/
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_MASK		(0x00000F00)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SHIFT		(8)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LENGTH		(4)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y0
+*/
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_MASK		(0x000000F0)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SHIFT		(4)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LENGTH		(4)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y0
+*/
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_MASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SHIFT		(0)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LENGTH		(4)
+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX4_1_OFFSET		(0x0928)
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y3
+*/
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_MASK		(0xF0000000)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SHIFT		(28)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LENGTH		(4)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y3
+*/
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_MASK		(0x0F000000)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SHIFT		(24)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LENGTH		(4)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y3
+*/
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_MASK		(0x00F00000)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SHIFT		(20)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LENGTH		(4)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y3
+*/
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_MASK		(0x000F0000)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SHIFT		(16)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LENGTH		(4)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y2
+*/
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_MASK		(0x0000F000)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SHIFT		(12)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LENGTH		(4)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y2
+*/
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_MASK		(0x00000F00)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SHIFT		(8)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LENGTH		(4)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y2
+*/
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_MASK		(0x000000F0)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SHIFT		(4)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LENGTH		(4)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y2
+*/
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_MASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LSBMASK		(0x0000000F)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SHIFT		(0)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LENGTH		(4)
+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_0_OFFSET		(0x092C)
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X4Y0
+*/
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SHIFT		(24)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LENGTH		(6)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X3Y0
+*/
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SHIFT		(18)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LENGTH		(6)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X2Y0
+*/
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SHIFT		(12)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LENGTH		(6)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X1Y0
+*/
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SHIFT		(6)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LENGTH		(6)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X0Y0
+*/
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SHIFT		(0)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LENGTH		(6)
+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_1_OFFSET		(0x0930)
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X1Y1
+*/
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SHIFT		(24)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LENGTH		(6)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X0Y1
+*/
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SHIFT		(18)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LENGTH		(6)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X7Y0
+*/
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SHIFT		(12)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LENGTH		(6)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X6Y0
+*/
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SHIFT		(6)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LENGTH		(6)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X5Y0
+*/
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SHIFT		(0)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LENGTH		(6)
+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_2_OFFSET		(0x0934)
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X6Y1
+*/
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SHIFT		(24)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LENGTH		(6)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X5Y1
+*/
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SHIFT		(18)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LENGTH		(6)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X4Y1
+*/
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SHIFT		(12)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LENGTH		(6)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X3Y1
+*/
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SHIFT		(6)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LENGTH		(6)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X2Y1
+*/
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SHIFT		(0)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LENGTH		(6)
+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_3_OFFSET		(0x0938)
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X3Y2
+*/
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SHIFT		(24)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LENGTH		(6)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X2Y2
+*/
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SHIFT		(18)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LENGTH		(6)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X1Y2
+*/
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SHIFT		(12)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LENGTH		(6)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X0Y2
+*/
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SHIFT		(6)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LENGTH		(6)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X7Y1
+*/
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SHIFT		(0)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LENGTH		(6)
+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_4_OFFSET		(0x093C)
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X0Y3
+*/
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SHIFT		(24)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LENGTH		(6)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X7Y2
+*/
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SHIFT		(18)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LENGTH		(6)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X6Y2
+*/
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SHIFT		(12)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LENGTH		(6)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X5Y2
+*/
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SHIFT		(6)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LENGTH		(6)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X4Y2
+*/
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SHIFT		(0)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LENGTH		(6)
+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_5_OFFSET		(0x0940)
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X5Y3
+*/
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SHIFT		(24)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LENGTH		(6)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X4Y3
+*/
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SHIFT		(18)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LENGTH		(6)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X3Y3
+*/
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SHIFT		(12)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LENGTH		(6)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X2Y3
+*/
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SHIFT		(6)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LENGTH		(6)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X1Y3
+*/
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SHIFT		(0)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LENGTH		(6)
+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_6_OFFSET		(0x0944)
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X2Y4
+*/
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SHIFT		(24)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LENGTH		(6)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X1Y4
+*/
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SHIFT		(18)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LENGTH		(6)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X0Y4
+*/
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SHIFT		(12)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LENGTH		(6)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X7Y3
+*/
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SHIFT		(6)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LENGTH		(6)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X6Y3
+*/
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SHIFT		(0)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LENGTH		(6)
+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_7_OFFSET		(0x0948)
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X7Y4
+*/
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SHIFT		(24)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LENGTH		(6)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X6Y4
+*/
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SHIFT		(18)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LENGTH		(6)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X5Y4
+*/
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SHIFT		(12)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LENGTH		(6)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X4Y4
+*/
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SHIFT		(6)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LENGTH		(6)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X3Y4
+*/
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SHIFT		(0)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LENGTH		(6)
+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_8_OFFSET		(0x094C)
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X4Y5
+*/
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SHIFT		(24)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LENGTH		(6)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X3Y5
+*/
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SHIFT		(18)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LENGTH		(6)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X2Y5
+*/
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SHIFT		(12)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LENGTH		(6)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X1Y5
+*/
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SHIFT		(6)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LENGTH		(6)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X0Y5
+*/
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SHIFT		(0)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LENGTH		(6)
+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_9_OFFSET		(0x0950)
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X1Y6
+*/
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SHIFT		(24)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LENGTH		(6)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X0Y6
+*/
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SHIFT		(18)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LENGTH		(6)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X7Y5
+*/
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SHIFT		(12)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LENGTH		(6)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X6Y5
+*/
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SHIFT		(6)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LENGTH		(6)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X5Y5
+*/
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SHIFT		(0)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LENGTH		(6)
+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_10_OFFSET		(0x0954)
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X6Y6
+*/
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SHIFT		(24)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LENGTH		(6)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X5Y6
+*/
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SHIFT		(18)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LENGTH		(6)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X4Y6
+*/
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SHIFT		(12)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LENGTH		(6)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X3Y6
+*/
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SHIFT		(6)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LENGTH		(6)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X2Y6
+*/
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SHIFT		(0)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LENGTH		(6)
+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_11_OFFSET		(0x0958)
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X3Y7
+*/
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_MASK		(0x3F000000)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SHIFT		(24)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LENGTH		(6)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X2Y7
+*/
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SHIFT		(18)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LENGTH		(6)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X1Y7
+*/
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SHIFT		(12)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LENGTH		(6)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X0Y7
+*/
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SHIFT		(6)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LENGTH		(6)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X7Y6
+*/
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SHIFT		(0)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LENGTH		(6)
+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_DITHERMATRIX8_12_OFFSET		(0x095C)
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X7Y7
+*/
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_MASK		(0x00FC0000)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SHIFT		(18)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LENGTH		(6)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X6Y7
+*/
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_MASK		(0x0003F000)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SHIFT		(12)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LENGTH		(6)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X5Y7
+*/
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_MASK		(0x00000FC0)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SHIFT		(6)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LENGTH		(6)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X4Y7
+*/
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_MASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LSBMASK		(0x0000003F)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SHIFT		(0)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LENGTH		(6)
+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1_MEMCTRL_OFFSET		(0x0960)
+
+/* PDP, GRPH1_MEMCTRL, GRPH1_LOCAL_GLOBAL_MEMCTRL
+*/
+#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_MEMCTRL, GRPH1_BURSTLEN
+*/
+#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_MASK		(0x000000FF)
+#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LSBMASK		(0x000000FF)
+#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SHIFT		(0)
+#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LENGTH		(8)
+#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1_MEM_THRESH_OFFSET		(0x0964)
+
+/* PDP, GRPH1_MEM_THRESH, GRPH1_UVTHRESHOLD
+*/
+#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_MASK		(0xFF000000)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SHIFT		(24)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LENGTH		(8)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_MEM_THRESH, GRPH1_YTHRESHOLD
+*/
+#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_MASK		(0x001FF000)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SHIFT		(12)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LENGTH		(9)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_MEM_THRESH, GRPH1_THRESHOLD
+*/
+#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_MASK		(0x000001FF)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SHIFT		(0)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LENGTH		(9)
+#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2_MEMCTRL_OFFSET		(0x0968)
+
+/* PDP, GRPH2_MEMCTRL, GRPH2_LOCAL_GLOBAL_MEMCTRL
+*/
+#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_MEMCTRL, GRPH2_BURSTLEN
+*/
+#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_MASK		(0x000000FF)
+#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LSBMASK		(0x000000FF)
+#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SHIFT		(0)
+#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LENGTH		(8)
+#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2_MEM_THRESH_OFFSET		(0x096C)
+
+/* PDP, GRPH2_MEM_THRESH, GRPH2_UVTHRESHOLD
+*/
+#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_MASK		(0xFF000000)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SHIFT		(24)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LENGTH		(8)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_MEM_THRESH, GRPH2_YTHRESHOLD
+*/
+#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_MASK		(0x001FF000)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SHIFT		(12)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LENGTH		(9)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_MEM_THRESH, GRPH2_THRESHOLD
+*/
+#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_MASK		(0x000001FF)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SHIFT		(0)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LENGTH		(9)
+#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3_MEMCTRL_OFFSET		(0x0970)
+
+/* PDP, GRPH3_MEMCTRL, GRPH3_LOCAL_GLOBAL_MEMCTRL
+*/
+#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_MEMCTRL, GRPH3_BURSTLEN
+*/
+#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_MASK		(0x000000FF)
+#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LSBMASK		(0x000000FF)
+#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SHIFT		(0)
+#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LENGTH		(8)
+#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3_MEM_THRESH_OFFSET		(0x0974)
+
+/* PDP, GRPH3_MEM_THRESH, GRPH3_UVTHRESHOLD
+*/
+#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_MASK		(0xFF000000)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SHIFT		(24)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LENGTH		(8)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_MEM_THRESH, GRPH3_YTHRESHOLD
+*/
+#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_MASK		(0x001FF000)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SHIFT		(12)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LENGTH		(9)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_MEM_THRESH, GRPH3_THRESHOLD
+*/
+#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_MASK		(0x000001FF)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SHIFT		(0)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LENGTH		(9)
+#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4_MEMCTRL_OFFSET		(0x0978)
+
+/* PDP, GRPH4_MEMCTRL, GRPH4_LOCAL_GLOBAL_MEMCTRL
+*/
+#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_MEMCTRL, GRPH4_BURSTLEN
+*/
+#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_MASK		(0x000000FF)
+#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LSBMASK		(0x000000FF)
+#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SHIFT		(0)
+#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LENGTH		(8)
+#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4_MEM_THRESH_OFFSET		(0x097C)
+
+/* PDP, GRPH4_MEM_THRESH, GRPH4_UVTHRESHOLD
+*/
+#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_MASK		(0xFF000000)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SHIFT		(24)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LENGTH		(8)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_MEM_THRESH, GRPH4_YTHRESHOLD
+*/
+#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_MASK		(0x001FF000)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SHIFT		(12)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LENGTH		(9)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_MEM_THRESH, GRPH4_THRESHOLD
+*/
+#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_MASK		(0x000001FF)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SHIFT		(0)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LENGTH		(9)
+#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1_MEMCTRL_OFFSET		(0x0980)
+
+/* PDP, VID1_MEMCTRL, VID1_LOCAL_GLOBAL_MEMCTRL
+*/
+#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_MEMCTRL, VID1_BURSTLEN
+*/
+#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_MASK		(0x000000FF)
+#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_LSBMASK		(0x000000FF)
+#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_SHIFT		(0)
+#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_LENGTH		(8)
+#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1_MEM_THRESH_OFFSET		(0x0984)
+
+/* PDP, VID1_MEM_THRESH, VID1_UVTHRESHOLD
+*/
+#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_MASK		(0xFF000000)
+#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SHIFT		(24)
+#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LENGTH		(8)
+#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_MEM_THRESH, VID1_YTHRESHOLD
+*/
+#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_MASK		(0x001FF000)
+#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SHIFT		(12)
+#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LENGTH		(9)
+#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_MEM_THRESH, VID1_THRESHOLD
+*/
+#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_MASK		(0x000001FF)
+#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SHIFT		(0)
+#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LENGTH		(9)
+#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2_MEMCTRL_OFFSET		(0x0988)
+
+/* PDP, VID2_MEMCTRL, VID2_LOCAL_GLOBAL_MEMCTRL
+*/
+#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_MEMCTRL, VID2_BURSTLEN
+*/
+#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_MASK		(0x000000FF)
+#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_LSBMASK		(0x000000FF)
+#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_SHIFT		(0)
+#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_LENGTH		(8)
+#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2_MEM_THRESH_OFFSET		(0x098C)
+
+/* PDP, VID2_MEM_THRESH, VID2_UVTHRESHOLD
+*/
+#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_MASK		(0xFF000000)
+#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SHIFT		(24)
+#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LENGTH		(8)
+#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_MEM_THRESH, VID2_YTHRESHOLD
+*/
+#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_MASK		(0x001FF000)
+#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SHIFT		(12)
+#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LENGTH		(9)
+#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_MEM_THRESH, VID2_THRESHOLD
+*/
+#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_MASK		(0x000001FF)
+#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SHIFT		(0)
+#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LENGTH		(9)
+#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3_MEMCTRL_OFFSET		(0x0990)
+
+/* PDP, VID3_MEMCTRL, VID3_LOCAL_GLOBAL_MEMCTRL
+*/
+#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_MEMCTRL, VID3_BURSTLEN
+*/
+#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_MASK		(0x000000FF)
+#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_LSBMASK		(0x000000FF)
+#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_SHIFT		(0)
+#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_LENGTH		(8)
+#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3_MEM_THRESH_OFFSET		(0x0994)
+
+/* PDP, VID3_MEM_THRESH, VID3_UVTHRESHOLD
+*/
+#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_MASK		(0xFF000000)
+#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SHIFT		(24)
+#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LENGTH		(8)
+#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_MEM_THRESH, VID3_YTHRESHOLD
+*/
+#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_MASK		(0x001FF000)
+#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SHIFT		(12)
+#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LENGTH		(9)
+#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_MEM_THRESH, VID3_THRESHOLD
+*/
+#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_MASK		(0x000001FF)
+#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SHIFT		(0)
+#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LENGTH		(9)
+#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4_MEMCTRL_OFFSET		(0x0998)
+
+/* PDP, VID4_MEMCTRL, VID4_LOCAL_GLOBAL_MEMCTRL
+*/
+#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_MASK		(0x80000000)
+#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LSBMASK		(0x00000001)
+#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SHIFT		(31)
+#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LENGTH		(1)
+#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_MEMCTRL, VID4_BURSTLEN
+*/
+#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_MASK		(0x000000FF)
+#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_LSBMASK		(0x000000FF)
+#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_SHIFT		(0)
+#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_LENGTH		(8)
+#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4_MEM_THRESH_OFFSET		(0x099C)
+
+/* PDP, VID4_MEM_THRESH, VID4_UVTHRESHOLD
+*/
+#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_MASK		(0xFF000000)
+#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LSBMASK		(0x000000FF)
+#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SHIFT		(24)
+#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LENGTH		(8)
+#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_MEM_THRESH, VID4_YTHRESHOLD
+*/
+#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_MASK		(0x001FF000)
+#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SHIFT		(12)
+#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LENGTH		(9)
+#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_MEM_THRESH, VID4_THRESHOLD
+*/
+#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_MASK		(0x000001FF)
+#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LSBMASK		(0x000001FF)
+#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SHIFT		(0)
+#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LENGTH		(9)
+#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH1_PANIC_THRESH_OFFSET		(0x09A0)
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_ENABLE
+*/
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SHIFT		(31)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LENGTH		(1)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_ENABLE
+*/
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SHIFT		(30)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LENGTH		(1)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MAX
+*/
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MIN
+*/
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MAX
+*/
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MIN
+*/
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH2_PANIC_THRESH_OFFSET		(0x09A4)
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_ENABLE
+*/
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SHIFT		(31)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LENGTH		(1)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_ENABLE
+*/
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SHIFT		(30)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LENGTH		(1)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MAX
+*/
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MIN
+*/
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MAX
+*/
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MIN
+*/
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH3_PANIC_THRESH_OFFSET		(0x09A8)
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_ENABLE
+*/
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SHIFT		(31)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LENGTH		(1)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_ENABLE
+*/
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SHIFT		(30)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LENGTH		(1)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MAX
+*/
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MIN
+*/
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MAX
+*/
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MIN
+*/
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_GRPH4_PANIC_THRESH_OFFSET		(0x09AC)
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_ENABLE
+*/
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SHIFT		(31)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LENGTH		(1)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_ENABLE
+*/
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SHIFT		(30)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LENGTH		(1)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MAX
+*/
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MIN
+*/
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MAX
+*/
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MIN
+*/
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID1_PANIC_THRESH_OFFSET		(0x09B0)
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_ENABLE
+*/
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SHIFT		(31)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LENGTH		(1)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_ENABLE
+*/
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SHIFT		(30)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LENGTH		(1)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MAX
+*/
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MIN
+*/
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MAX
+*/
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MIN
+*/
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID2_PANIC_THRESH_OFFSET		(0x09B4)
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_ENABLE
+*/
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SHIFT		(31)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LENGTH		(1)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_ENABLE
+*/
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SHIFT		(30)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LENGTH		(1)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MAX
+*/
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MIN
+*/
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MAX
+*/
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MIN
+*/
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID3_PANIC_THRESH_OFFSET		(0x09B8)
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_ENABLE
+*/
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SHIFT		(31)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LENGTH		(1)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_ENABLE
+*/
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SHIFT		(30)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LENGTH		(1)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MAX
+*/
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MIN
+*/
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MAX
+*/
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MIN
+*/
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_VID4_PANIC_THRESH_OFFSET		(0x09BC)
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_ENABLE
+*/
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_MASK		(0x80000000)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LSBMASK		(0x00000001)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SHIFT		(31)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LENGTH		(1)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_ENABLE
+*/
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_MASK		(0x40000000)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LSBMASK		(0x00000001)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SHIFT		(30)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LENGTH		(1)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MAX
+*/
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_MASK		(0x3F800000)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LSBMASK		(0x0000007F)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SHIFT		(23)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LENGTH		(7)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MIN
+*/
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_MASK		(0x007F0000)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LSBMASK		(0x0000007F)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SHIFT		(16)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LENGTH		(7)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MAX
+*/
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_MASK		(0x0000FF00)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LSBMASK		(0x000000FF)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SHIFT		(8)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LENGTH		(8)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD	IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MIN
+*/
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_MASK		(0x000000FF)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LSBMASK		(0x000000FF)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SHIFT		(0)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LENGTH		(8)
+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD	IMG_FALSE
+
+#define PDP_BURST_BOUNDARY_OFFSET		(0x09C0)
+
+/* PDP, BURST_BOUNDARY, BURST_BOUNDARY
+*/
+#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_MASK		(0x0000003F)
+#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_LSBMASK		(0x0000003F)
+#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_SHIFT		(0)
+#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_LENGTH		(6)
+#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_SIGNED_FIELD	IMG_FALSE
+
+
+/* ------------------------ End of register definitions ------------------------ */
+
+/*
+// NUMREG defines the extent of register address space.
+*/
+
+#define		PDP_NUMREG	   ((0x09C0 >> 2)+1)
+
+/* Info about video plane addresses */
+#define PDP_YADDR_BITS		(PDP_VID1BASEADDR_VID1BASEADDR_LENGTH)
+#define PDP_YADDR_ALIGN		5
+#define PDP_UADDR_BITS		(PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH)
+#define PDP_UADDR_ALIGN		5
+#define PDP_VADDR_BITS		(PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH)
+#define PDP_VADDR_ALIGN		5
+
+#define PDP_YSTRIDE_BITS	(PDP_VID1STRIDE_VID1STRIDE_LENGTH)
+#define PDP_YSTRIDE_ALIGN	5
+
+#define PDP_MAX_INPUT_WIDTH (PDP_VID1SIZE_VID1WIDTH_LSBMASK + 1)
+#define PDP_MAX_INPUT_HEIGHT (PDP_VID1SIZE_VID1HEIGHT_LSBMASK + 1)
+
+/* Maximum 6 bytes per pixel for RGB161616 */
+#define PDP_MAX_IMAGE_BYTES (PDP_MAX_INPUT_WIDTH * PDP_MAX_INPUT_HEIGHT * 6)
+
+/* Round up */
+#define PDP_MAX_IMAGE_PAGES ((PDP_MAX_IMAGE_BYTES+PAGE_SIZE-1)/PAGE_SIZE)
+
+#define PDP_YADDR_MAX		(((1 << PDP_YADDR_BITS) - 1) << PDP_YADDR_ALIGN)
+#define PDP_UADDR_MAX		(((1 << PDP_UADDR_BITS) - 1) << PDP_UADDR_ALIGN)
+#define PDP_VADDR_MAX		(((1 << PDP_VADDR_BITS) - 1) << PDP_VADDR_ALIGN)
+#define PDP_YSTRIDE_MAX		((1 << PDP_YSTRIDE_BITS) << PDP_YSTRIDE_ALIGN)
+#define PDP_YADDR_ALIGNMASK	((1 << PDP_YADDR_ALIGN) - 1)
+#define PDP_UADDR_ALIGNMASK	((1 << PDP_UADDR_ALIGN) - 1)
+#define PDP_VADDR_ALIGNMASK	((1 << PDP_VADDR_ALIGN) - 1)
+#define PDP_YSTRIDE_ALIGNMASK	((1 << PDP_YSTRIDE_ALIGN) - 1)
+
+/* Field Values */
+#define PDP_SURF_PIXFMT_RGB332					  0x3
+#define PDP_SURF_PIXFMT_ARGB4444				  0x4
+#define PDP_SURF_PIXFMT_ARGB1555				  0x5
+#define PDP_SURF_PIXFMT_RGB888					  0x6
+#define PDP_SURF_PIXFMT_RGB565					  0x7
+#define PDP_SURF_PIXFMT_ARGB8888				  0x8
+#define PDP_SURF_PIXFMT_420_PL8					  0x9
+#define PDP_SURF_PIXFMT_420_PL8IVU				  0xA
+#define PDP_SURF_PIXFMT_420_PL8IUV				  0xB
+#define PDP_SURF_PIXFMT_422_UY0VY1_8888			  0xC
+#define PDP_SURF_PIXFMT_422_VY0UY1_8888			  0xD
+#define PDP_SURF_PIXFMT_422_Y0UY1V_8888			  0xE
+#define PDP_SURF_PIXFMT_422_Y0VY1U_8888			  0xF
+#define PDP_SURF_PIXFMT_AYUV8888				  0x10
+#define PDP_SURF_PIXFMT_YUV101010				  0x15
+#define PDP_SURF_PIXFMT_RGB101010				  0x17
+#define PDP_SURF_PIXFMT_420_PL10IUV				  0x18
+#define PDP_SURF_PIXFMT_420_PL10IVU				  0x19
+#define PDP_SURF_PIXFMT_422_PL10IUV				  0x1A
+#define PDP_SURF_PIXFMT_422_PL10IVU				  0x1B
+#define PDP_SURF_PIXFMT_RGB121212				  0x1E
+#define PDP_SURF_PIXFMT_RGB161616				  0x1F
+
+#define PDP_CTRL_CKEYSRC_PREV					  0x0
+#define PDP_CTRL_CKEYSRC_CUR					  0x1
+
+#define PDP_MEMCTRL_MEMREFRESH_ALWAYS			  0x0
+#define PDP_MEMCTRL_MEMREFRESH_HBLNK			  0x1
+#define PDP_MEMCTRL_MEMREFRESH_VBLNK			  0x2
+#define PDP_MEMCTRL_MEMREFRESH_BOTH				  0x3
+
+#define PDP_3D_CTRL_BLENDSEL_BGND_WITH_POS0		  0x0
+#define PDP_3D_CTRL_BLENDSEL_POS0_WITH_POS1		  0x1
+#define PDP_3D_CTRL_BLENDSEL_POS1_WITH_POS2		  0x2
+#define PDP_3D_CTRL_BLENDSEL_POS2_WITH_POS3		  0x3
+#define PDP_3D_CTRL_BLENDSEL_POS3_WITH_POS4		  0x4
+#define PDP_3D_CTRL_BLENDSEL_POS4_WITH_POS5		  0x5
+#define PDP_3D_CTRL_BLENDSEL_POS5_WITH_POS6		  0x6
+#define PDP_3D_CTRL_BLENDSEL_POS6_WITH_POS7		  0x7
+
+#define PDP_UADDR_UV_STRIDE_EQUAL_TO_Y_STRIDE		  0x0
+#define PDP_UADDR_UV_STRIDE_EQUAL_TO_DOUBLE_Y_STRIDE  0x1
+#define PDP_UADDR_UV_STRIDE_EQUAL_TO_HALF_Y_STRIDE	  0x2
+
+#define PDP_PROCAMP_OUTPUT_OFFSET_FRACTIONAL_BITS 1
+#define PDP_PROCAMP_COEFFICIENT_FRACTIONAL_BITS	  10
+
+/*-------------------------------------------------------------------------------*/
+
+#endif /* _PDP2_REGS_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/plato/plato_drv.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/plato/plato_drv.h
new file mode 100644
index 0000000..9fc77cb
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/plato/plato_drv.h
@@ -0,0 +1,510 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File           plato_drv.h
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PLATO_DRV_H
+#define _PLATO_DRV_H
+
+/*
+ * This contains the hooks for the plato pci driver, as used by the
+ * Rogue and PDP sub-devices, and the platform data passed to each of their
+ * drivers
+ */
+
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+
+#define PLATO_INIT_SUCCESS	0
+#define PLATO_INIT_FAILURE	1
+#define PLATO_INIT_RETRY	2
+
+#define PCI_VENDOR_ID_PLATO				(0x1AEE)
+#define PCI_DEVICE_ID_PLATO				(0x0003)
+
+#define PLATO_SYSTEM_NAME				"Plato"
+
+/* Interrupt defines */
+typedef enum _PLATO_INTERRUPT_ {
+	PLATO_INTERRUPT_GPU = 0,
+	PLATO_INTERRUPT_PDP,
+	PLATO_INTERRUPT_HDMI,
+	PLATO_INTERRUPT_MAX,
+} PLATO_INTERRUPT;
+
+#define PLATO_INT_SHIFT_GPU				(0)
+#define PLATO_INT_SHIFT_PDP				(8)
+#define PLATO_INT_SHIFT_HDMI			(9)
+#define PLATO_INT_SHIFT_HDMI_WAKEUP		(11)
+#define PLATO_INT_SHIFT_TEMP_A			(12)
+
+
+typedef struct plato_region_ {
+	resource_size_t base;
+	resource_size_t size;
+} plato_region;
+
+typedef struct plato_io_region_ {
+	plato_region region;
+	void __iomem *registers;
+} plato_io_region;
+
+/* The following structs are initialised and passed down by the parent plato
+ * driver to the respective sub-drivers
+ */
+
+#define PLATO_DEVICE_NAME_PDP			"plato_pdp"
+#define PLATO_PDP_RESOURCE_REGS			"pdp-regs"
+#define PLATO_PDP_RESOURCE_BIF_REGS		"pdp-bif-regs"
+
+#define PLATO_DEVICE_NAME_HDMI			"plato_hdmi"
+#define PLATO_HDMI_RESOURCE_REGS		"hdmi-regs"
+
+struct plato_pdp_platform_data {
+	resource_size_t memory_base;
+
+	/* The following is used by the drm_pdp driver as it manages the
+	 * pdp memory
+	 */
+	resource_size_t pdp_heap_memory_base;
+	resource_size_t pdp_heap_memory_size;
+};
+
+typedef struct plato_hdmi_platform_data_ {
+
+	resource_size_t plato_memory_base;
+
+} plato_hdmi_platform_data;
+
+
+#define PLATO_DEVICE_NAME_ROGUE			"plato_rogue"
+#define PLATO_ROGUE_RESOURCE_REGS		"rogue-regs"
+
+typedef struct plato_rogue_platform_data_ {
+
+	/* The base address of the plato memory (CPU physical address) -
+	 * used to convert from CPU-Physical to device-physical addresses
+	 */
+	resource_size_t plato_memory_base;
+
+	/* The following is used to setup the services heaps */
+	int has_nonmappable;
+	plato_region rogue_heap_mappable;
+	resource_size_t rogue_heap_dev_addr;
+	plato_region rogue_heap_nonmappable;
+#if defined(SUPPORT_PLATO_DISPLAY)
+	plato_region pdp_heap;
+#endif
+} plato_rogue_platform_data;
+
+typedef struct plato_interrupt_handler_ {
+	bool enabled;
+	void (*handler_function)(void *);
+	void *handler_data;
+} plato_interrupt_handler;
+
+typedef struct plato_device_ {
+	struct pci_dev *pdev;
+
+	plato_io_region sys_io;
+	plato_io_region aon_regs;
+
+	spinlock_t interrupt_handler_lock;
+	spinlock_t interrupt_enable_lock;
+
+	plato_interrupt_handler
+		interrupt_handlers[PLATO_INTERRUPT_MAX];
+
+	plato_region rogue_mem;
+	plato_region rogue_heap_mappable;
+	plato_region rogue_heap_nonmappable;
+	int has_nonmappable;
+
+	resource_size_t dev_mem_base; /* Pointer to device memory base */
+
+	struct platform_device *rogue_dev;
+
+#if defined(SUPPORT_PLATO_DISPLAY)
+	struct platform_device *pdp_dev;
+	plato_region pdp_heap;
+
+	struct platform_device *hdmi_dev;
+#endif
+
+#if defined(CONFIG_MTRR) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+	int mtrr;
+#endif
+
+#if defined(PLATO_DEBUGFS)
+	struct dentry *debugfs_plato_dir;
+	struct dentry *debugfs_rogue_name;
+#endif
+} plato_device;
+
+#if defined(PLATO_LOG_CHECKPOINTS)
+#define PLATO_CHECKPOINT(p) dev_info(&p->pdev->dev, "- %s: %d", __func__, __LINE__)
+#else
+#define PLATO_CHECKPOINT(p)
+#endif
+
+#define plato_write_reg32(base, offset, value) \
+	iowrite32(value, (base) + (offset))
+#define plato_read_reg32(base, offset) ioread32(base + offset)
+#define plato_sleep_ms(x) msleep(x)
+#define plato_sleep_us(x) msleep(x/1000)
+
+/* Valid values for the PLATO_MEMORY_CONFIG configuration option */
+#define PLATO_MEMORY_LOCAL			(1)
+#define PLATO_MEMORY_HOST			(2)
+#define PLATO_MEMORY_HYBRID			(3)
+
+#if defined(PLATO_MEMORY_CONFIG)
+#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID)
+#define PVRSRV_DEVICE_PHYS_HEAP_PDP_LOCAL 2
+#elif (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL)
+#define PVRSRV_DEVICE_PHYS_HEAP_PDP_LOCAL 1
+#endif
+#endif /* PLATO_MEMORY_CONFIG */
+
+#define DCPDP_PHYS_HEAP_ID PVRSRV_DEVICE_PHYS_HEAP_PDP_LOCAL
+
+#define PLATO_PDP_MEM_SIZE			(32 * 1024 * 1024)
+
+#define SYS_PLATO_REG_PCI_BASENUM	(1)
+#define SYS_PLATO_REG_REGION_SIZE	(4 * 1024 * 1024)
+
+/*
+ * Give system region a whole span of the reg space including
+ * RGX registers. That's because there are sys register segments
+ * both before and after the RGX segment.
+ */
+#define SYS_PLATO_REG_SYS_OFFSET			(0x0)
+#define SYS_PLATO_REG_SYS_SIZE				(4 * 1024 * 1024)
+
+/* Entire Peripheral region */
+#define SYS_PLATO_REG_PERIP_OFFSET			(0x20000)
+#define SYS_PLATO_REG_PERIP_SIZE			(164 * 1024)
+
+/* Chip level registers */
+#define SYS_PLATO_REG_CHIP_LEVEL_OFFSET		(SYS_PLATO_REG_PERIP_OFFSET)
+#define SYS_PLATO_REG_CHIP_LEVEL_SIZE		(64 * 1024)
+
+#define SYS_PLATO_REG_TEMPA_OFFSET			(0x80000)
+#define SYS_PLATO_REG_TEMPA_SIZE			(64 * 1024)
+
+/* USB, DMA not included */
+
+#define SYS_PLATO_REG_DDR_A_CTRL_OFFSET		(0x120000)
+#define SYS_PLATO_REG_DDR_A_CTRL_SIZE		(64 * 1024)
+
+#define SYS_PLATO_REG_DDR_B_CTRL_OFFSET		(0x130000)
+#define SYS_PLATO_REG_DDR_B_CTRL_SIZE		(64 * 1024)
+
+#define SYS_PLATO_REG_DDR_A_PUBL_OFFSET		(0x140000)
+#define SYS_PLATO_REG_DDR_A_PUBL_SIZE		(64 * 1024)
+
+#define SYS_PLATO_REG_DDR_B_PUBL_OFFSET		(0x150000)
+#define SYS_PLATO_REG_DDR_B_PUBL_SIZE		(64 * 1024)
+
+#define SYS_PLATO_REG_NOC_OFFSET			(0x160000)
+#define SYS_PLATO_REG_NOC_SIZE		        (64 * 1024)
+
+/* Debug NOC registers */
+#define SYS_PLATO_REG_NOC_DBG_DDR_A_CTRL_OFFSET (0x1500)
+#define SYS_PLATO_REG_NOC_DBG_DDR_A_DATA_OFFSET (0x1580)
+#define SYS_PLATO_REG_NOC_DBG_DDR_A_PUBL_OFFSET (0x1600)
+#define SYS_PLATO_REG_NOC_DBG_DDR_B_CTRL_OFFSET (0x1680)
+#define SYS_PLATO_REG_NOC_DBG_DDR_B_DATA_OFFSET (0x1700)
+#define SYS_PLATO_REG_NOC_DBG_DDR_B_PUBL_OFFSET (0x1780)
+#define SYS_PLATO_REG_NOC_DBG_DISPLAY_S_OFFSET  (0x1800)
+#define SYS_PLATO_REG_NOC_DBG_GPIO_0_S_OFFSET   (0x1900)
+#define SYS_PLATO_REG_NOC_DBG_GPIO_1_S_OFFSET   (0x1980)
+#define SYS_PLATO_REG_NOC_DBG_GPU_S_OFFSET      (0x1A00)
+#define SYS_PLATO_REG_NOC_DBG_PCI_PHY_OFFSET    (0x1A80)
+#define SYS_PLATO_REG_NOC_DBG_PCI_REG_OFFSET    (0x1B00)
+#define SYS_PLATO_REG_NOC_DBG_PCI_S_OFFSET      (0x1B80)
+#define SYS_PLATO_REG_NOC_DBG_PERIPH_S_OFFSET   (0x1c00)
+#define SYS_PLATO_REG_NOC_DBG_RET_REG_OFFSET    (0x1D00)
+#define SYS_PLATO_REG_NOC_DBG_SERVICE_OFFSET    (0x1E00)
+
+#define SYS_PLATO_REG_RGX_OFFSET			(0x170000)
+#define SYS_PLATO_REG_RGX_SIZE				(64 * 1024)
+
+#define SYS_PLATO_REG_AON_OFFSET			(0x180000)
+#define SYS_PLATO_REG_AON_SIZE				(64 * 1024)
+
+#define SYS_PLATO_REG_PDP_OFFSET			(0x200000)
+#define SYS_PLATO_REG_PDP_SIZE				(128 * 1024)
+
+#define SYS_PLATO_REG_PDP_BIF_OFFSET        (SYS_PLATO_REG_PDP_OFFSET + SYS_PLATO_REG_PDP_SIZE)
+#define SYS_PLATO_REG_PDP_BIF_SIZE          (0x200)
+
+#define SYS_PLATO_REG_HDMI_OFFSET           (SYS_PLATO_REG_PDP_OFFSET + 0x20000)
+#define SYS_PLATO_REG_HDMI_SIZE             (128 * 1024)
+
+/* Device memory (including HP mapping) on base register 4 */
+#define SYS_DEV_MEM_PCI_BASENUM		(4)
+
+/* Device memory size */
+#define ONE_GB_IN_BYTES					(0x40000000ULL)
+#define SYS_DEV_MEM_REGION_SIZE			(PLATO_MEMORY_SIZE_GIGABYTES * ONE_GB_IN_BYTES)
+
+/* Plato DDR offset in device memory map at 32GB */
+#define PLATO_DDR_DEV_PHYSICAL_BASE		(0x800000000)
+
+/* DRAM is split at 48GB */
+#define PLATO_DRAM_SPLIT_ADDR			(0xc00000000)
+
+/*
+ * Plato DDR region is aliased if less than 32GB memory is present.
+ * This defines memory base closest to the DRAM split point.
+ * If 32GB is present this is equal to PLATO_DDR_DEV_PHYSICAL_BASE
+ */
+#define PLATO_DDR_ALIASED_DEV_PHYSICAL_BASE \
+	(PLATO_DRAM_SPLIT_ADDR - (SYS_DEV_MEM_REGION_SIZE >> 1))
+
+#define PLATO_DDR_ALIASED_DEV_PHYSICAL_END \
+	(PLATO_DRAM_SPLIT_ADDR + (SYS_DEV_MEM_REGION_SIZE >> 1))
+
+#define PLATO_DDR_ALIASED_DEV_SEGMENT_SIZE \
+	((32ULL / PLATO_MEMORY_SIZE_GIGABYTES) * ONE_GB_IN_BYTES)
+
+/* Plato Host memory offset in device memory map at 512GB */
+#define PLATO_HOSTRAM_DEV_PHYSICAL_BASE (0x8000000000)
+
+/* Plato PLL, DDR/GPU, PDP and HDMI-SFR/CEC clocks */
+#define PLATO_PLL_REF_CLOCK_SPEED	(19200000)
+
+/* 600 MHz */
+#define PLATO_MEM_CLOCK_SPEED		(600000000)
+#define PLATO_MIN_MEM_CLOCK_SPEED	(600000000)
+#define PLATO_MAX_MEM_CLOCK_SPEED	(800000000)
+
+/* 396 MHz (~400 MHz) on HW, around 1MHz on the emulator */
+#if defined(EMULATOR) || defined(VIRTUAL_PLATFORM)
+#define	PLATO_RGX_CORE_CLOCK_SPEED	(1000000)
+#else
+
+#define	PLATO_RGX_CORE_CLOCK_SPEED	(396000000)
+#define	PLATO_RGX_MIN_CORE_CLOCK_SPEED	(396000000)
+#define	PLATO_RGX_MAX_CORE_CLOCK_SPEED	(742500000)
+#endif
+
+#define PLATO_MIN_PDP_CLOCK_SPEED		(165000000)
+#define PLATO_TARGET_HDMI_SFR_CLOCK_SPEED	(27000000)
+#define PLATO_TARGET_HDMI_CEC_CLOCK_SPEED	(32768)
+
+#define REG_TO_CELSIUS(reg)			(((reg) * 352/4096) - 109)
+#define CELSIUS_TO_REG(temp)		((((temp) + 109) * 4096) / 352)
+#define PLATO_MAX_TEMP_CELSIUS		(100)
+
+#define PLATO_LMA_HEAP_REGION_MAPPABLE			0
+#define PLATO_LMA_HEAP_REGION_NONMAPPABLE		1
+
+typedef struct {
+	char *description;
+	unsigned int offset;
+	unsigned int value;
+} PLATO_DBG_REG;
+
+#if defined(ENABLE_PLATO_HDMI)
+
+#if defined(HDMI_PDUMP)
+/* Hard coded video formats for pdump type run only */
+#define VIDEO_FORMAT_1280_720p          0
+#define VIDEO_FORMAT_1920_1080p         1
+#define DC_DEFAULT_VIDEO_FORMAT     (VIDEO_FORMAT_1920_1080p)
+#endif
+
+#define DEFAULT_BORDER_WIDTH_PIXELS     (0)
+#define HDMI_DTD_MAX                    (20)
+
+typedef enum _ENCODING_T_ {
+	ENCODING_RGB = 0,
+	ENCODING_YCC444,
+	ENCODING_YCC422,
+	ENCODING_YCC420
+} ENCODING_T;
+
+typedef enum _COLORIMETRY_T_ {
+	ITU601 = 1,
+	ITU709,
+	EXTENDED_COLORIMETRY
+} COLORIMETRY_T;
+
+typedef struct _DTD_ {
+	/** VIC code */
+	u32 mCode;
+	/** Identifies modes that ONLY can be displayed in YCC 4:2:0 */
+	u8 mLimitedToYcc420;
+	/** Identifies modes that can also be displayed in YCC 4:2:0 */
+	u8 mYcc420;
+	u16 mPixelRepetitionInput;
+	/** in units of 10KHz */
+	u16 mPixelClock;
+	/** 1 for interlaced, 0 progressive */
+	u8 mInterlaced;
+	u16 mHActive;
+	u16 mHBlanking;
+	u16 mHBorder;
+	u16 mHImageSize;
+	u16 mHFrontPorchWidth;
+	u16 mHBackPorchWidth;
+	u16 mHSyncPulseWidth;
+	/** 0 for Active low, 1 active high */
+	u8 mHSyncPolarity;
+	u16 mVActive;
+	u16 mVBlanking;
+	u16 mVBorder;
+	u16 mVImageSize;
+	u16 mVFrontPorchWidth;
+	u16 mVBackPorchWidth;
+	u16 mVSyncPulseWidth;
+	/** 0 for Active low, 1 active high */
+	u8 mVSyncPolarity;
+
+	u8 mValid;
+} DTD;
+
+/* Monitor range limits comes from Display Descriptor (EDID) */
+typedef struct _MONITOR_RANGE_LIMITS {
+	u8 vMinRate;
+	u8 vMaxRate;
+	u8 hMinRate;
+	u8 hMaxRate;
+	u8 maxPixelClock;
+	u8 valid;
+} MONITOR_RANGE_LIMITS;
+
+typedef struct _VIDEO_PARAMS {
+	u8                      mHdmi;
+	ENCODING_T              mEncodingOut;
+	ENCODING_T              mEncodingIn;
+	u8                      mColorResolution;
+	u8                      mPixelRepetitionFactor;
+	DTD                     mDtdList[HDMI_DTD_MAX];
+	u8                      mDtdIndex;
+	u8                      mDtdActiveIndex;
+	u8                      mRgbQuantizationRange;
+	u8                      mPixelPackingDefaultPhase;
+	u8                      mColorimetry;
+	u8                      mScanInfo;
+	u8                      mActiveFormatAspectRatio;
+	u8                      mNonUniformScaling;
+	u8                      mExtColorimetry;
+	u8                      mItContent;
+	u16                     mEndTopBar;
+	u16                     mStartBottomBar;
+	u16                     mEndLeftBar;
+	u16                     mStartRightBar;
+	u16                     mCscFilter;
+	u16                     mCscA[4];
+	u16                     mCscC[4];
+	u16                     mCscB[4];
+	u16                     mCscScale;
+	u8                      mHdmiVideoFormat;
+	u8                      m3dStructure;
+	u8                      m3dExtData;
+	u8                      mHdmiVic;
+	u8                      mDataEnablePolarity;
+	MONITOR_RANGE_LIMITS    mRangeLimits;
+	u8                      mPreferredTimingIncluded;
+} VIDEO_PARAMS;
+
+#endif /* ENABLE_PLATO_HDMI */
+
+/* Exposed APIs */
+int plato_enable(struct device *dev);
+void plato_disable(struct device *dev);
+
+int plato_enable_interrupt(struct device *dev, PLATO_INTERRUPT interrupt_id);
+int plato_disable_interrupt(struct device *dev, PLATO_INTERRUPT interrupt_id);
+
+int plato_set_interrupt_handler(struct device *dev,
+	PLATO_INTERRUPT interrupt_id,
+	void (*handler_function)(void *),
+	void *handler_data);
+unsigned int plato_core_clock_speed(struct device *dev);
+unsigned int plato_mem_clock_speed(struct device *dev);
+unsigned int plato_pll_clock_speed(struct device *dev,
+	unsigned int clock_speed);
+void plato_enable_pdp_clock(struct device *dev);
+
+int plato_debug_info(struct device *dev,
+	PLATO_DBG_REG *noc_dbg_regs,
+	PLATO_DBG_REG *aon_dbg_regs);
+
+/* Internal */
+int plato_memory_init(plato_device *plato);
+void plato_memory_deinit(plato_device *plato);
+int plato_cfg_init(plato_device *plato);
+int request_pci_io_addr(struct pci_dev *pdev, u32 index,
+	resource_size_t offset, resource_size_t length);
+void release_pci_io_addr(struct pci_dev *pdev, u32 index,
+	resource_size_t start, resource_size_t length);
+
+#if defined(PLATO_SYSTEM_PDUMP)
+#include "plato_pdump.h"
+
+static void plato_write_reg32_pdump(void *base, u32 offset, u32 value)
+{
+	plato_write_reg32(base, offset, value);
+	plato_pdump_reg32(base, offset, value, PLATO_SYSTEM_NAME);
+}
+
+static void plato_sleep_ms_pdump(u32 intrvl)
+{
+	msleep(intrvl);
+	plato_pdump_idl(intrvl);
+}
+#undef plato_write_reg32
+#undef plato_sleep_ms
+
+#define plato_write_reg32 plato_write_reg32_pdump
+#define plato_sleep_ms plato_sleep_ms_pdump
+#endif
+
+#endif /* _PLATO_DRV_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_buffer_sync.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_buffer_sync.c
new file mode 100644
index 0000000..737d7ff
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_buffer_sync.c
@@ -0,0 +1,1035 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          Linux buffer sync interface
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
+#include "services_kernel_client.h"
+#include "pvr_buffer_sync.h"
+#include "pvr_buffer_sync_shared.h"
+#include "pvr_fence.h"
+
+
+struct pvr_buffer_sync_context {
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	struct mutex ctx_lock;
+#endif
+	struct pvr_fence_context *fence_ctx;
+	struct ww_acquire_ctx acquire_ctx;
+};
+
+struct pvr_buffer_sync_check_data {
+	struct dma_fence_cb base;
+
+	u32 nr_fences;
+	struct pvr_fence **fences;
+};
+
+struct pvr_buffer_sync_append_data {
+	bool appended;
+
+	struct pvr_buffer_sync_context *ctx;
+
+	u32 nr_checks;
+	struct _RGXFWIF_DEV_VIRTADDR_ *check_ufo_addrs;
+	u32 *check_values;
+
+	u32 nr_updates;
+	struct _RGXFWIF_DEV_VIRTADDR_ *update_ufo_addrs;
+	u32 *update_values;
+
+	u32 nr_pmrs;
+	struct _PMR_ **pmrs;
+	u32 *pmr_flags;
+
+	struct pvr_fence *update_fence;
+};
+
+
+static struct reservation_object *
+pmr_reservation_object_get(struct _PMR_ *pmr)
+{
+	struct dma_buf *dmabuf;
+
+	dmabuf = PhysmemGetDmaBuf(pmr);
+	if (dmabuf)
+		return dmabuf->resv;
+
+	return NULL;
+}
+
+static int
+pvr_buffer_sync_pmrs_lock(struct pvr_buffer_sync_context *ctx,
+			  u32 nr_pmrs,
+			  struct _PMR_ **pmrs)
+{
+	struct reservation_object *resv, *cresv = NULL, *lresv = NULL;
+	int i, err;
+	struct ww_acquire_ctx *acquire_ctx = &ctx->acquire_ctx;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	mutex_lock(&ctx->ctx_lock);
+#endif
+
+	ww_acquire_init(acquire_ctx, &reservation_ww_class);
+retry:
+	for (i = 0; i < nr_pmrs; i++) {
+		resv = pmr_reservation_object_get(pmrs[i]);
+		if (!resv) {
+			pr_err("%s: Failed to get reservation object from pmr %p\n",
+			       __func__, pmrs[i]);
+			err = -EINVAL;
+			goto fail;
+		}
+
+		if (resv != lresv) {
+			err = ww_mutex_lock_interruptible(&resv->lock,
+							  acquire_ctx);
+			if (err) {
+				cresv = (err == -EDEADLK) ? resv : NULL;
+				goto fail;
+			}
+		} else {
+			lresv = NULL;
+		}
+	}
+
+	ww_acquire_done(acquire_ctx);
+
+	return 0;
+
+fail:
+	while (i--) {
+		resv = pmr_reservation_object_get(pmrs[i]);
+		if (WARN_ON_ONCE(!resv))
+			continue;
+		ww_mutex_unlock(&resv->lock);
+	}
+
+	if (lresv)
+		ww_mutex_unlock(&lresv->lock);
+
+	if (cresv) {
+		err = ww_mutex_lock_slow_interruptible(&cresv->lock,
+						       acquire_ctx);
+		if (!err) {
+			lresv = cresv;
+			cresv = NULL;
+			goto retry;
+		}
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	mutex_unlock(&ctx->ctx_lock);
+#endif
+	return err;
+}
+
+static void
+pvr_buffer_sync_pmrs_unlock(struct pvr_buffer_sync_context *ctx,
+			    u32 nr_pmrs,
+			    struct _PMR_ **pmrs)
+{
+	struct reservation_object *resv;
+	int i;
+	struct ww_acquire_ctx *acquire_ctx = &ctx->acquire_ctx;
+
+	for (i = 0; i < nr_pmrs; i++) {
+		resv = pmr_reservation_object_get(pmrs[i]);
+		if (WARN_ON_ONCE(!resv))
+			continue;
+		ww_mutex_unlock(&resv->lock);
+	}
+
+	ww_acquire_fini(acquire_ctx);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	mutex_unlock(&ctx->ctx_lock);
+#endif
+}
+
+static u32
+pvr_buffer_sync_pmrs_fence_count(u32 nr_pmrs, struct _PMR_ **pmrs,
+				 u32 *pmr_flags)
+{
+	struct reservation_object *resv;
+	struct reservation_object_list *resv_list;
+	struct dma_fence *fence;
+	u32 fence_count = 0;
+	bool exclusive;
+	int i;
+
+	for (i = 0; i < nr_pmrs; i++) {
+		exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE);
+
+		resv = pmr_reservation_object_get(pmrs[i]);
+		if (WARN_ON_ONCE(!resv))
+			continue;
+
+		resv_list = reservation_object_get_list(resv);
+		fence = reservation_object_get_excl(resv);
+
+		if (fence &&
+		    (!exclusive || !resv_list || !resv_list->shared_count))
+			fence_count++;
+
+		if (exclusive && resv_list)
+			fence_count += resv_list->shared_count;
+	}
+
+	return fence_count;
+}
+
+static struct pvr_buffer_sync_check_data *
+pvr_buffer_sync_check_fences_create(struct pvr_fence_context *fence_ctx,
+				    u32 nr_pmrs,
+				    struct _PMR_ **pmrs,
+				    u32 *pmr_flags)
+{
+	struct pvr_buffer_sync_check_data *data;
+	struct reservation_object *resv;
+	struct reservation_object_list *resv_list;
+	struct dma_fence *fence;
+	u32 fence_count;
+	bool exclusive;
+	int i, j;
+	int err;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return NULL;
+
+	fence_count = pvr_buffer_sync_pmrs_fence_count(nr_pmrs, pmrs,
+						       pmr_flags);
+	if (fence_count) {
+		data->fences = kcalloc(fence_count, sizeof(*data->fences),
+				       GFP_KERNEL);
+		if (!data->fences)
+			goto err_check_data_free;
+	}
+
+	for (i = 0; i < nr_pmrs; i++) {
+		resv = pmr_reservation_object_get(pmrs[i]);
+		if (WARN_ON_ONCE(!resv))
+			continue;
+
+		exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE);
+		if (!exclusive) {
+			err = reservation_object_reserve_shared(resv);
+			if (err)
+				goto err_destroy_fences;
+		}
+
+		resv_list = reservation_object_get_list(resv);
+		fence = reservation_object_get_excl(resv);
+
+		if (fence &&
+		    (!exclusive || !resv_list || !resv_list->shared_count)) {
+			data->fences[data->nr_fences++] =
+				pvr_fence_create_from_fence(fence_ctx,
+							    fence,
+							    "exclusive check fence");
+			if (!data->fences[data->nr_fences - 1]) {
+				data->nr_fences--;
+				PVR_FENCE_TRACE(fence,
+						"waiting on exclusive fence\n");
+				WARN_ON(dma_fence_wait(fence, true) <= 0);
+			}
+		}
+
+		if (exclusive && resv_list) {
+			for (j = 0; j < resv_list->shared_count; j++) {
+				fence = rcu_dereference_protected(resv_list->shared[j],
+								  reservation_object_held(resv));
+				data->fences[data->nr_fences++] =
+					pvr_fence_create_from_fence(fence_ctx,
+								    fence,
+								    "check fence");
+				if (!data->fences[data->nr_fences - 1]) {
+					data->nr_fences--;
+					PVR_FENCE_TRACE(fence,
+							"waiting on non-exclusive fence\n");
+					WARN_ON(dma_fence_wait(fence, true) <= 0);
+				}
+			}
+		}
+	}
+
+	WARN_ON((i != nr_pmrs) || (data->nr_fences != fence_count));
+
+	return data;
+
+err_destroy_fences:
+	for (i = 0; i < data->nr_fences; i++)
+		pvr_fence_destroy(data->fences[i]);
+	kfree(data->fences);
+err_check_data_free:
+	kfree(data);
+	return NULL;
+}
+
+static void
+pvr_buffer_sync_check_fences_destroy(struct pvr_buffer_sync_check_data *data)
+{
+	int i;
+
+	for (i = 0; i < data->nr_fences; i++)
+		pvr_fence_destroy(data->fences[i]);
+
+	kfree(data->fences);
+	kfree(data);
+}
+
+static void
+pvr_buffer_sync_check_data_cleanup(struct dma_fence *fence,
+				   struct dma_fence_cb *cb)
+{
+	struct pvr_buffer_sync_check_data *data =
+		container_of(cb, struct pvr_buffer_sync_check_data, base);
+	struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+
+	pvr_buffer_sync_check_fences_destroy(data);
+
+	pvr_fence_destroy(pvr_fence);
+}
+
+static int
+pvr_buffer_sync_append_fences(u32 nr,
+			      struct _RGXFWIF_DEV_VIRTADDR_ *ufo_addrs,
+			      u32 *values,
+			      u32 nr_fences,
+			      struct pvr_fence **pvr_fences,
+			      u32 *nr_out,
+			      struct _RGXFWIF_DEV_VIRTADDR_ **ufo_addrs_out,
+			      u32 **values_out)
+{
+	u32 nr_new = nr + nr_fences;
+	struct _RGXFWIF_DEV_VIRTADDR_ *ufo_addrs_new = NULL;
+	u32 *values_new = NULL;
+	int i;
+	int err;
+
+	if (!nr_new)
+		goto finish;
+
+	ufo_addrs_new = kmalloc_array(nr_new, sizeof(*ufo_addrs_new),
+				      GFP_KERNEL);
+	if (!ufo_addrs_new)
+		return -ENOMEM;
+
+	values_new = kmalloc_array(nr_new, sizeof(*values_new), GFP_KERNEL);
+	if (!values_new) {
+		err = -ENOMEM;
+		goto err_free_ufo_addrs;
+	}
+
+	/* Copy the original data */
+	if (nr) {
+		memcpy(ufo_addrs_new, ufo_addrs, sizeof(*ufo_addrs) * nr);
+		memcpy(values_new, values, sizeof(*values) * nr);
+	}
+
+	/* Append the fence data */
+	for (i = 0; i < nr_fences; i++) {
+		u32 sync_addr;
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+		PVRSRV_ERROR srv_err;
+
+		srv_err = SyncPrimGetFirmwareAddr(pvr_fences[i]->sync,
+						  &sync_addr);
+		if (srv_err != PVRSRV_OK) {
+			err = -EINVAL;
+			goto err_free_values;
+		}
+#else
+		sync_addr = SyncCheckpointGetFirmwareAddr(pvr_fences[i]->sync_checkpoint);
+#endif
+		ufo_addrs_new[i + nr].ui32Addr = sync_addr;
+
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)		
+		values_new[i + nr] = PVR_FENCE_SYNC_VAL_SIGNALED;
+#else
+		values_new[i + nr] = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+		SyncCheckpointCCBEnqueued(pvr_fences[i]->sync_checkpoint);
+#endif
+	}
+
+finish:
+	*nr_out = nr_new;
+	*ufo_addrs_out = ufo_addrs_new;
+	*values_out = values_new;
+
+	return 0;
+
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+err_free_values:
+	kfree(values_new);
+#endif
+err_free_ufo_addrs:
+	kfree(ufo_addrs_new);
+	return err;
+}
+
+struct pvr_buffer_sync_context *
+pvr_buffer_sync_context_create(void *dev_cookie)
+{
+	struct pvr_buffer_sync_context *ctx;
+	int err;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx) {
+		err = -ENOMEM;
+		goto err_exit;
+	}
+
+	ctx->fence_ctx = pvr_fence_context_create(dev_cookie, "rogue-gpu");
+	if (!ctx->fence_ctx) {
+		err = -ENOMEM;
+		goto err_free_ctx;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	mutex_init(&ctx->ctx_lock);
+#endif
+
+	return ctx;
+
+err_free_ctx:
+	kfree(ctx);
+err_exit:
+	return ERR_PTR(err);
+}
+
+void pvr_buffer_sync_context_destroy(struct pvr_buffer_sync_context *ctx)
+{
+	pvr_fence_context_destroy(ctx->fence_ctx);
+	kfree(ctx);
+}
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+int
+pvr_buffer_sync_resolve_and_create_fences(struct pvr_buffer_sync_context *ctx,
+										  u32 nr_pmrs,
+										  struct _PMR_ **pmrs,
+										  u32 *pmr_flags,
+										  u32 *nr_checks,
+										  PSYNC_CHECKPOINT **fence_checkpoint_handles,
+										  PSYNC_CHECKPOINT *update_checkpoint_handle,
+										  struct pvr_buffer_sync_append_data **data_out)
+{
+	struct pvr_buffer_sync_append_data *data;
+	PSYNC_CHECKPOINT *fence_checkpoints = NULL;
+	struct pvr_buffer_sync_check_data *check_data;
+	struct pvr_fence *update_fence;
+	int i;
+	int err = 0;
+
+	//pr_err("%s: ->ENTRY (%d pmrs)\n", __func__, nr_pmrs);
+
+	if ((nr_pmrs && !(pmrs && pmr_flags)) ||
+	    ((!nr_checks) || (!fence_checkpoint_handles) || (!update_checkpoint_handle)))
+		return -EINVAL;
+
+	for (i = 0; i < nr_pmrs; i++) {
+		if (!(pmr_flags[i] & PVR_BUFFER_FLAG_MASK)) {
+			pr_err("%s: Invalid flags %#08x for pmr %p\n",
+			       __func__, pmr_flags[i], pmrs[i]);
+			return -EINVAL;
+		}
+	}
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->appended = true;
+	data->ctx = ctx;
+	data->nr_pmrs = nr_pmrs;
+	data->pmrs = pmrs;
+	data->pmr_flags = pmr_flags;
+
+#if defined(NO_HARDWARE)
+	/*
+	 * For NO_HARDWARE there's no checking or updating of sync checkpoints
+	 * which means SW waits on our fences will cause a deadlock (since they
+	 * will never be signalled). Avoid this by not creating any fences.
+	 */
+	*nr_checks = 0;
+	*fence_checkpoint_handles = NULL;
+	goto err_nohw;
+#endif
+
+	if (nr_pmrs > 0) {
+		//pr_err("%s: Allocating memory for 32 sync checkpoints...\n", __func__);
+		/* Allocate memory for 32 sync checkpoints - this will be freed by the caller */
+		fence_checkpoints = kzalloc(sizeof(*fence_checkpoints) * 32, GFP_KERNEL);
+		if (!fence_checkpoints)
+			return -ENOMEM;
+		//pr_err("%s: ...done\n", __func__);
+
+		//pr_err("%s: Locking pmrs...\n", __func__);
+		err = pvr_buffer_sync_pmrs_lock(ctx, nr_pmrs, pmrs);
+		if (err) {
+			pr_err("%s: failed to lock pmrs (errno=%d)\n",
+				   __func__, err);
+			goto err_free_mem;
+		}
+		//pr_err("%s: done...\n", __func__);
+
+		//pr_err("%s: Creating the check fence data...\n", __func__);
+		/* create the check data */
+		check_data = pvr_buffer_sync_check_fences_create(ctx->fence_ctx,
+								 nr_pmrs,
+								 pmrs,
+								 pmr_flags);
+		if (!check_data) {
+			err = -ENOMEM;
+			goto err_pmrs_unlock;
+		}
+		//pr_err("%s: done (check_data=<%p>, check_data->fences=<%p>...\n", __func__, (void*)check_data, (void*)check_data->fences);
+
+		//pr_err("%s: Creating the update fence...\n", __func__);
+		/* create the update fence */
+		update_fence = pvr_fence_create(ctx->fence_ctx, "update fence");
+		if (!update_fence) {
+			err = -ENOMEM;
+			goto err_free_check_data;
+		}
+		//pr_err("%s: done (update_fence=<%p>)...\n", __func__, (void*)update_fence);
+
+		//pr_err("%s: check_data->fences=%d\n", __func__, check_data->nr_fences);
+		if (check_data->nr_fences > 0) {
+			//pr_err("%s: Copying checkpoints from check_data->fences<%p> into fence_checkpoints<%p>...\n", __func__, (void*)check_data->fences, (void*)fence_checkpoints);
+			//pr_err("%s: Calling pvr_fence_get_checkpoints()...\n", __func__);
+			/* Copy check data sync checkpoints into fence_checkpoints */
+			pvr_fence_get_checkpoints(check_data->fences, check_data->nr_fences, fence_checkpoints);
+			//pr_err("%s: done...\n", __func__);
+			*fence_checkpoint_handles = fence_checkpoints;
+
+			if (0) /* Enable to dump list of sync checkpoints */
+			{
+				PSYNC_CHECKPOINT *next_checkpoint = fence_checkpoints;
+				int iii;
+
+				for (iii=0; iii<check_data->nr_fences; iii++)
+				{
+					pr_err("%s: fence_checkpoints[%d] = <%p>\n", __func__, iii, (void*)*next_checkpoint++);
+				}
+			}
+		}
+		else
+		{
+			/* No checkpoints to return, free the memory allocated for them */
+			//pr_err("%s: Freeing memory for 32 sync checkpoints...\n", __func__);
+			kfree(fence_checkpoints);
+			*fence_checkpoint_handles = NULL;
+		}
+		*nr_checks = check_data->nr_fences;
+
+		/* copy the sync checkpoint used in update_fence into update_checkpoint_handle */
+		*update_checkpoint_handle = pvr_fence_get_checkpoint(update_fence);
+
+		data->update_fence = update_fence;
+		*data_out = data;
+
+		/*
+		 * We need to clean up the fences once the HW has finished with them.
+		 * We can do this using fence callbacks. However, instead of adding a
+		 * callback to every fence, which would result in more work, we can
+		 * simply add one to the update fence since this will be the last fence
+		 * to be signalled. This callback can do all the necessary clean up.
+		 *
+		 * Note: we take an additional reference on the update fence in case
+		 * it signals before we can add it to a reservation object.
+		 */
+		dma_fence_get(&data->update_fence->base);
+
+		err = dma_fence_add_callback(&data->update_fence->base, &check_data->base,
+					 pvr_buffer_sync_check_data_cleanup);
+
+		//pr_err("%s: <-EXIT (err = %d, *data_out=<%p>)\n", __func__, err, (void*)*data_out);
+		return err;
+
+err_free_check_data:
+		pvr_buffer_sync_check_fences_destroy(check_data);
+err_pmrs_unlock:
+		pvr_buffer_sync_pmrs_unlock(ctx, nr_pmrs, pmrs);
+err_free_mem:
+		//pr_err("%s: Freeing memory for 32 sync checkpoints...\n", __func__);
+		kfree (fence_checkpoints);
+	}
+#if defined(NO_HARDWARE)
+err_nohw:
+#endif
+	kfree(data);
+	//pr_err("%s: <-EXIT (err = %d)\n", __func__, err);
+	return err;
+}
+
+void
+pvr_buffer_sync_kick_succeeded(struct pvr_buffer_sync_append_data *data_in)
+{
+	struct reservation_object *resv;
+	int i;
+
+	//pr_err("%s: called, data_in=<%p>\n", __func__, (void*)data_in);
+	//pr_err("%s: called, update_fence=<%p>, nr_pmrs=%d\n", __func__, (void*)data_in->update_fence, data_in->nr_pmrs);
+	if (data_in->nr_pmrs > 0) {
+		for (i = 0; i < data_in->nr_pmrs; i++) {
+			resv = pmr_reservation_object_get(data_in->pmrs[i]);
+			if (WARN_ON_ONCE(!resv))
+				continue;
+
+			if (data_in->pmr_flags[i] & PVR_BUFFER_FLAG_WRITE) {
+				//pr_err("%s: added exclusive fence (%s) to resv %p\n", __func__, data_in->update_fence->name, resv);
+				PVR_FENCE_TRACE(&data_in->update_fence->base,
+						"added exclusive fence (%s) to resv %p\n",
+						data_in->update_fence->name, resv);
+				reservation_object_add_excl_fence(resv,
+								  &data_in->update_fence->base);
+			} else if (data_in->pmr_flags[i] & PVR_BUFFER_FLAG_READ) {
+				//pr_err("%s: added non-exclusive fence (%s) to resv %p\n", __func__, data_in->update_fence->name, resv);
+				PVR_FENCE_TRACE(&data_in->update_fence->base,
+						"added non-exclusive fence (%s) to resv %p\n",
+						data_in->update_fence->name, resv);
+				reservation_object_add_shared_fence(resv,
+								    &data_in->update_fence->base);
+			}
+		}
+
+		//pr_err("%s: unlock pmrs\n", __func__);
+		/*
+		 * Now that the fence has been added to the necessary reservation
+		 * objects we can safely drop the extra reference we took in
+		 * pvr_buffer_sync_append_start.
+		 */
+		dma_fence_put(&data_in->update_fence->base);
+		pvr_buffer_sync_pmrs_unlock(data_in->ctx, data_in->nr_pmrs, data_in->pmrs);
+	}
+}
+
+void
+pvr_buffer_sync_kick_failed(struct pvr_buffer_sync_append_data *data_in)
+{
+	dma_fence_put(&data_in->update_fence->base);
+	//pr_err("%s: unlock pmrs\n", __func__);
+	if (data_in->nr_pmrs > 0) {
+		pvr_buffer_sync_pmrs_unlock(data_in->ctx, data_in->nr_pmrs, data_in->pmrs);
+	}
+}
+#endif /* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+
+int
+pvr_buffer_sync_append_start(struct pvr_buffer_sync_context *ctx,
+			     u32 nr_pmrs,
+			     struct _PMR_ **pmrs,
+			     u32 *pmr_flags,
+			     u32 nr_checks,
+			     struct _RGXFWIF_DEV_VIRTADDR_ *check_ufo_addrs,
+			     u32 *check_values,
+			     u32 nr_updates,
+			     struct _RGXFWIF_DEV_VIRTADDR_ *update_ufo_addrs,
+			     u32 *update_values,
+			     struct pvr_buffer_sync_append_data **data_out)
+{
+	struct pvr_buffer_sync_append_data *data;
+	struct pvr_buffer_sync_check_data *check_data;
+	const size_t data_size = sizeof(*data);
+	const size_t pmrs_size = sizeof(*pmrs) * nr_pmrs;
+	const size_t pmr_flags_size = sizeof(*pmr_flags) * nr_pmrs;
+	int i;
+	int j;
+	int err;
+
+	if ((nr_pmrs && !(pmrs && pmr_flags)) ||
+	    (nr_updates && (!update_ufo_addrs || !update_values)) ||
+	    (nr_checks && (!check_ufo_addrs || !check_values)))
+		return -EINVAL;
+
+	for (i = 0; i < nr_pmrs; i++) {
+		if (!(pmr_flags[i] & PVR_BUFFER_FLAG_MASK)) {
+			pr_err("%s: Invalid flags %#08x for pmr %p\n",
+			       __func__, pmr_flags[i], pmrs[i]);
+			return -EINVAL;
+		}
+	}
+
+	data = kzalloc(data_size + pmrs_size + pmr_flags_size, GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+#if defined(NO_HARDWARE)
+	/*
+	 * For NO_HARDWARE there's no checking or updating of client sync prims
+	 * which means SW waits on our fences will cause a deadlock (since they
+	 * will never be signalled). Avoid this by not creating any fences.
+	 */
+	nr_pmrs = 0;
+#endif
+
+	if (!nr_pmrs) {
+		data->appended = false;
+		data->nr_checks = nr_checks;
+		data->check_ufo_addrs = check_ufo_addrs;
+		data->check_values = check_values;
+		data->nr_updates = nr_updates;
+		data->update_ufo_addrs = update_ufo_addrs;
+		data->update_values = update_values;
+		goto finish;
+	}
+
+	data->appended = true;
+	data->ctx = ctx;
+	data->pmrs = (struct _PMR_ **)((char *)data + data_size);
+	data->pmr_flags = (u32 *)((char *)data->pmrs + pmrs_size);
+
+	/*
+	 * It's expected that user space will provide a set of unique PMRs
+	 * but, as a PMR can have multiple handles, it's still possible to
+	 * end up here with duplicates. Take this opportunity to filter out
+	 * any remaining duplicates (updating flags when necessary) before
+	 * trying to process them further.
+	 */
+	for (i = 0; i < nr_pmrs; i++) {
+		for (j = 0; j < data->nr_pmrs; j++) {
+			if (data->pmrs[j] == pmrs[i]) {
+				data->pmr_flags[j] |= pmr_flags[i];
+				break;
+			}
+		}
+
+		if (j == data->nr_pmrs) {
+			data->pmrs[j] = pmrs[i];
+			data->pmr_flags[j] = pmr_flags[i];
+			data->nr_pmrs++;
+		}
+	}
+
+	err = pvr_buffer_sync_pmrs_lock(ctx,
+					data->nr_pmrs,
+					data->pmrs);
+	if (err) {
+		pr_err("%s: failed to lock pmrs (errno=%d)\n",
+		       __func__, err);
+		goto err_free_data;
+	}
+
+	check_data = pvr_buffer_sync_check_fences_create(ctx->fence_ctx,
+							 data->nr_pmrs,
+							 data->pmrs,
+							 data->pmr_flags);
+	if (!check_data) {
+		err = -ENOMEM;
+		goto err_pmrs_unlock;
+	}
+
+	data->update_fence = pvr_fence_create(ctx->fence_ctx, "update fence");
+	if (!data->update_fence) {
+		err = -ENOMEM;
+		goto err_free_check_data;
+	}
+
+	err = pvr_buffer_sync_append_fences(nr_checks,
+					    check_ufo_addrs,
+					    check_values,
+					    check_data->nr_fences,
+					    check_data->fences,
+					    &data->nr_checks,
+					    &data->check_ufo_addrs,
+					    &data->check_values);
+	if (err)
+		goto err_cleanup_update_fence;
+
+	err = pvr_buffer_sync_append_fences(nr_updates,
+					    update_ufo_addrs,
+					    update_values,
+					    1,
+					    &data->update_fence,
+					    &data->nr_updates,
+					    &data->update_ufo_addrs,
+					    &data->update_values);
+	if (err)
+		goto err_free_data_checks;
+
+	/*
+	 * We need to clean up the fences once the HW has finished with them.
+	 * We can do this using fence callbacks. However, instead of adding a
+	 * callback to every fence, which would result in more work, we can
+	 * simply add one to the update fence since this will be the last fence
+	 * to be signalled. This callback can do all the necessary clean up.
+	 *
+	 * Note: we take an additional reference on the update fence in case
+	 * it signals before we can add it to a reservation object.
+	 */
+	dma_fence_get(&data->update_fence->base);
+
+	err = dma_fence_add_callback(&data->update_fence->base,
+				     &check_data->base,
+				     pvr_buffer_sync_check_data_cleanup);
+	if (err) {
+		/*
+		 * We should only ever get -ENOENT if the fence has already
+		 * signalled, which should *never* be the case as we've not
+		 * even inserted the fence into a CCB yet!
+		 */
+		WARN_ON(err != -ENOENT);
+		goto err_free_data_updates;
+	}
+
+finish:
+	*data_out = data;
+	return 0;
+
+err_free_data_updates:
+	kfree(data->update_ufo_addrs);
+	kfree(data->update_values);
+err_free_data_checks:
+	kfree(data->check_ufo_addrs);
+	kfree(data->check_values);
+err_cleanup_update_fence:
+	pvr_fence_destroy(data->update_fence);
+err_free_check_data:
+	pvr_buffer_sync_check_fences_destroy(check_data);
+err_pmrs_unlock:
+	pvr_buffer_sync_pmrs_unlock(ctx,
+				    data->nr_pmrs,
+				    data->pmrs);
+err_free_data:
+	kfree(data);
+	return err;
+}
+
+void
+pvr_buffer_sync_append_finish(struct pvr_buffer_sync_append_data *data)
+{
+	struct reservation_object *resv;
+	int i;
+
+	if (!data->appended)
+		goto finish;
+
+	for (i = 0; i < data->nr_pmrs; i++) {
+		resv = pmr_reservation_object_get(data->pmrs[i]);
+		if (WARN_ON_ONCE(!resv))
+			continue;
+
+		if (data->pmr_flags[i] & PVR_BUFFER_FLAG_WRITE) {
+			PVR_FENCE_TRACE(&data->update_fence->base,
+					"added exclusive fence (%s) to resv %p\n",
+					data->update_fence->name, resv);
+			reservation_object_add_excl_fence(resv,
+							  &data->update_fence->base);
+		} else if (data->pmr_flags[i] & PVR_BUFFER_FLAG_READ) {
+			PVR_FENCE_TRACE(&data->update_fence->base,
+					"added non-exclusive fence (%s) to resv %p\n",
+					data->update_fence->name, resv);
+			reservation_object_add_shared_fence(resv,
+							    &data->update_fence->base);
+		}
+	}
+
+	/*
+	 * Now that the fence has been added to the necessary reservation
+	 * objects we can safely drop the extra reference we took in
+	 * pvr_buffer_sync_append_start.
+	 */
+	dma_fence_put(&data->update_fence->base);
+
+	pvr_buffer_sync_pmrs_unlock(data->ctx,
+				    data->nr_pmrs,
+				    data->pmrs);
+
+	kfree(data->check_ufo_addrs);
+	kfree(data->check_values);
+	kfree(data->update_ufo_addrs);
+	kfree(data->update_values);
+
+finish:
+	kfree(data);
+}
+
+void
+pvr_buffer_sync_append_abort(struct pvr_buffer_sync_append_data *data)
+{
+	if (!data)
+		return;
+
+	if (!data->appended)
+		goto finish;
+
+	/*
+	 * Signal the fence to trigger clean-up and drop the additional
+	 * reference taken in pvr_buffer_sync_append_start.
+	 */
+	pvr_fence_sync_sw_signal(data->update_fence);
+	dma_fence_put(&data->update_fence->base);
+	pvr_buffer_sync_pmrs_unlock(data->ctx,
+				    data->nr_pmrs,
+				    data->pmrs);
+
+	kfree(data->check_ufo_addrs);
+	kfree(data->check_values);
+	kfree(data->update_ufo_addrs);
+	kfree(data->update_values);
+
+finish:
+	kfree(data);
+}
+
+void
+pvr_buffer_sync_append_checks_get(struct pvr_buffer_sync_append_data *data,
+				  u32 *nr_checks_out,
+				  struct _RGXFWIF_DEV_VIRTADDR_ **check_ufo_addrs_out,
+				  u32 **check_values_out)
+{
+	*nr_checks_out = data->nr_checks;
+	*check_ufo_addrs_out = data->check_ufo_addrs;
+	*check_values_out = data->check_values;
+}
+
+void
+pvr_buffer_sync_append_updates_get(struct pvr_buffer_sync_append_data *data,
+				   u32 *nr_updates_out,
+				   struct _RGXFWIF_DEV_VIRTADDR_ **update_ufo_addrs_out,
+				   u32 **update_values_out)
+{
+	*nr_updates_out = data->nr_updates;
+	*update_ufo_addrs_out = data->update_ufo_addrs;
+	*update_values_out = data->update_values;
+}
+
+void
+pvr_buffer_sync_wait_handle_get(struct pvr_buffer_sync_context *ctx,
+				struct _PMR_ *pmr,
+				void **wait_handle)
+{
+	struct reservation_object *resv;
+	struct reservation_object_list *resv_list = NULL;
+	struct dma_fence *fence, *wait_fence = NULL;
+	unsigned int seq;
+	int i;
+
+	resv = pmr_reservation_object_get(pmr);
+	if (!resv)
+		goto exit;
+
+retry:
+	seq = read_seqcount_begin(&resv->seq);
+	rcu_read_lock();
+	resv_list = rcu_dereference(resv->fence);
+
+	if (read_seqcount_retry(&resv->seq, seq))
+		goto unlock_retry;
+
+	if (resv_list) {
+		for (i = 0; i < resv_list->shared_count; i++) {
+			fence = rcu_dereference(resv_list->shared[i]);
+			if (is_our_fence(ctx->fence_ctx, fence) &&
+			    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+				      &fence->flags)) {
+				wait_fence = dma_fence_get_rcu(fence);
+				if (!wait_fence)
+					goto unlock_retry;
+				break;
+			}
+		}
+	}
+
+	if (!wait_fence) {
+		fence = rcu_dereference(resv->fence_excl);
+
+		if (read_seqcount_retry(&resv->seq, seq))
+			goto unlock_retry;
+
+		if (fence &&
+		    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+			wait_fence = dma_fence_get_rcu(fence);
+			if (!wait_fence)
+				goto unlock_retry;
+		}
+	}
+	rcu_read_unlock();
+
+exit:
+	*wait_handle = wait_fence;
+	return;
+
+unlock_retry:
+	rcu_read_unlock();
+	goto retry;
+}
+
+void
+pvr_buffer_sync_wait_handle_put(void *wait_handle)
+{
+	dma_fence_put(wait_handle);
+}
+
+int
+pvr_buffer_sync_wait(void *wait_handle,
+		     bool intr,
+		     unsigned long timeout)
+{
+	if (!timeout)
+		return -EINVAL;
+
+	if (wait_handle) {
+		struct dma_fence *wait_fence = wait_handle;
+		long lerr;
+
+		if (dma_fence_is_signaled(wait_fence))
+			lerr = timeout;
+		else
+			lerr = dma_fence_wait_timeout(wait_fence, intr,
+						      timeout);
+
+		if (!lerr)
+			return -EBUSY;
+		else if (lerr < 0)
+			return (int)lerr;
+	}
+
+	return 0;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_buffer_sync.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_buffer_sync.h
new file mode 100644
index 0000000..6075f74
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_buffer_sync.h
@@ -0,0 +1,99 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR Linux buffer sync interface
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_BUFFER_SYNC_H__)
+#define __PVR_BUFFER_SYNC_H__
+
+struct _RGXFWIF_DEV_VIRTADDR_;
+struct _PMR_;
+struct pvr_buffer_sync_context;
+struct pvr_buffer_sync_append_data;
+
+struct pvr_buffer_sync_context *
+pvr_buffer_sync_context_create(void *dev_cookie);
+void pvr_buffer_sync_context_destroy(struct pvr_buffer_sync_context *ctx);
+
+int pvr_buffer_sync_append_start(struct pvr_buffer_sync_context *ctx,
+				 u32 nr_pmrs,
+				 struct _PMR_ **pmrs,
+				 u32 *pmr_flags,
+				 u32 nr_checks,
+				 struct _RGXFWIF_DEV_VIRTADDR_ *check_ufo_addrs,
+				 u32 *check_values,
+				 u32 nr_updates,
+				 struct _RGXFWIF_DEV_VIRTADDR_ *update_ufo_addrs,
+				 u32 *update_values,
+				 struct pvr_buffer_sync_append_data **data_out);
+void pvr_buffer_sync_append_finish(struct pvr_buffer_sync_append_data *data);
+void pvr_buffer_sync_append_abort(struct pvr_buffer_sync_append_data *data);
+void pvr_buffer_sync_append_checks_get(struct pvr_buffer_sync_append_data *data,
+				       u32 *nr_checks_out,
+				       struct _RGXFWIF_DEV_VIRTADDR_ **check_ufo_addrs_out,
+				       u32 **check_values_out);
+void pvr_buffer_sync_append_updates_get(struct pvr_buffer_sync_append_data *data,
+					u32 *nr_updates_out,
+					struct _RGXFWIF_DEV_VIRTADDR_ **update_ufo_addrs_out,
+					u32 **update_values_out);
+
+void pvr_buffer_sync_wait_handle_get(struct pvr_buffer_sync_context *ctx,
+				     struct _PMR_ *pmr,
+				     void **wait_handle);
+void pvr_buffer_sync_wait_handle_put(void *wait_handle);
+int pvr_buffer_sync_wait(void *wait_handle, bool intr, unsigned long timeout);
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+int pvr_buffer_sync_resolve_and_create_fences(struct pvr_buffer_sync_context *ctx,
+										  u32 nr_pmrs,
+										  struct _PMR_ **pmrs,
+										  u32 *pmr_flags,
+										  u32 *nr_checks,
+										  PSYNC_CHECKPOINT **fence_checkpoint_handles,
+										  PSYNC_CHECKPOINT *update_checkpoint_handle,
+										  struct pvr_buffer_sync_append_data **data_out);
+
+void pvr_buffer_sync_kick_succeeded(struct pvr_buffer_sync_append_data *data_in);
+void pvr_buffer_sync_kick_failed(struct pvr_buffer_sync_append_data *data_in);
+#endif /* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#endif /* !defined(__PVR_BUFFER_SYNC_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_drm.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_drm.c
new file mode 100644
index 0000000..c3de396
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_drm.c
@@ -0,0 +1,276 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR DRM driver
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <drm/drm.h>
+#include <drm/drmP.h> /* include before drm_crtc.h for kernels older than 3.9 */
+#include <drm/drm_crtc.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/version.h>
+
+#include "module_common.h"
+#include "pvr_drm.h"
+#include "pvr_drv.h"
+#include "pvrversion.h"
+#include "services_kernel_client.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
+#define DRIVER_RENDER 0
+#define DRM_RENDER_ALLOW 0
+#endif
+
+#define PVR_DRM_DRIVER_NAME PVR_DRM_NAME
+#define PVR_DRM_DRIVER_DESC "Imagination Technologies PVR DRM"
+#define	PVR_DRM_DRIVER_DATE "20110701"
+
+
+static int pvr_pm_suspend(struct device *dev)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+
+	DRM_DEBUG_DRIVER("device %p\n", dev);
+
+	return PVRSRVCommonDeviceSuspend(ddev->dev_private);
+}
+
+static int pvr_pm_resume(struct device *dev)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+
+	DRM_DEBUG_DRIVER("device %p\n", dev);
+
+	return PVRSRVCommonDeviceResume(ddev->dev_private);
+}
+
+const struct dev_pm_ops pvr_pm_ops = {
+	.suspend = pvr_pm_suspend,
+	.resume = pvr_pm_resume,
+};
+
+
+static int pvr_drm_load(struct drm_device *ddev, unsigned long flags)
+{
+	struct _PVRSRV_DEVICE_NODE_ *dev_node;
+	enum PVRSRV_ERROR srv_err;
+	int err, deviceId;
+
+	DRM_DEBUG_DRIVER("device %p\n", ddev->dev);
+
+	/*
+	 * The equivalent is done for PCI modesetting drivers by
+	 * drm_get_pci_dev()
+	 */
+	platform_set_drvdata(to_platform_device(ddev->dev), ddev);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
+	/* older kernels do not have render drm_minor member in drm_device,
+	 * so we fallback to primary node for device identification */
+	deviceId = ddev->primary->index;
+#else
+	if (ddev->render)
+		deviceId = ddev->render->index;
+	else /* when render node is NULL, fallback to primary node */
+		deviceId = ddev->primary->index;
+#endif
+
+	srv_err = PVRSRVDeviceCreate(ddev->dev, deviceId, &dev_node);
+	if (srv_err != PVRSRV_OK) {
+		DRM_ERROR("failed to create device node for device %p (%s)\n",
+			  ddev->dev, PVRSRVGetErrorStringKM(srv_err));
+		if (srv_err == PVRSRV_ERROR_PROBE_DEFER)
+			err = -EPROBE_DEFER;
+		else
+			err = -ENODEV;
+		goto err_exit;
+	}
+
+	err = PVRSRVCommonDeviceInit(dev_node);
+	if (err) {
+		DRM_ERROR("device %p initialisation failed (err=%d)\n",
+			  ddev->dev, err);
+		goto err_device_destroy;
+	}
+
+	drm_mode_config_init(ddev);
+	ddev->dev_private = dev_node;
+
+	return 0;
+
+err_device_destroy:
+	PVRSRVDeviceDestroy(dev_node);
+err_exit:
+	return err;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+static int pvr_drm_unload(struct drm_device *ddev)
+#else
+static void pvr_drm_unload(struct drm_device *ddev)
+#endif
+{
+	DRM_DEBUG_DRIVER("device %p\n", ddev->dev);
+
+	drm_mode_config_cleanup(ddev);
+
+	PVRSRVCommonDeviceDeinit(ddev->dev_private);
+
+	PVRSRVDeviceDestroy(ddev->dev_private);
+	ddev->dev_private = NULL;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+	return 0;
+#endif
+}
+
+static int pvr_drm_open(struct drm_device *ddev, struct drm_file *dfile)
+{
+	int err;
+
+	if (!try_module_get(THIS_MODULE)) {
+		DRM_ERROR("failed to get module reference\n");
+		return -ENOENT;
+	}
+
+	err = PVRSRVCommonDeviceOpen(ddev->dev_private, dfile);
+	if (err)
+		module_put(THIS_MODULE);
+
+	return err;
+}
+
+static void pvr_drm_release(struct drm_device *ddev, struct drm_file *dfile)
+{
+	PVRSRVCommonDeviceRelease(ddev->dev_private, dfile);
+
+	module_put(THIS_MODULE);
+}
+
+/*
+ * The DRM global lock is taken for ioctls unless the DRM_UNLOCKED flag is set.
+ * If you revise one of the driver specific ioctls, or add a new one, that has
+ * DRM_UNLOCKED set then consider whether the gPVRSRVLock mutex needs to be
+ * taken.
+ */
+static struct drm_ioctl_desc pvr_drm_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(PVR_SRVKM_CMD, PVRSRV_BridgeDispatchKM, DRM_RENDER_ALLOW | DRM_UNLOCKED),
+#if defined(PDUMP)
+	DRM_IOCTL_DEF_DRV(PVR_DBGDRV_CMD, dbgdrv_ioctl, DRM_RENDER_ALLOW | DRM_AUTH | DRM_UNLOCKED),
+#endif
+};
+
+#if defined(CONFIG_COMPAT)
+#if defined(PDUMP)
+static drm_ioctl_compat_t *pvr_drm_compat_ioctls[] = {
+	[DRM_PVR_DBGDRV_CMD] = dbgdrv_ioctl_compat,
+};
+#endif
+
+static long pvr_compat_ioctl(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+
+	if (nr < DRM_COMMAND_BASE)
+		return drm_compat_ioctl(file, cmd, arg);
+
+#if defined(PDUMP)
+	if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(pvr_drm_compat_ioctls)) {
+		drm_ioctl_compat_t *pfnBridge;
+
+		pfnBridge = pvr_drm_compat_ioctls[nr - DRM_COMMAND_BASE];
+		if (pfnBridge)
+			return pfnBridge(file, cmd, arg);
+	}
+#endif
+
+	return drm_ioctl(file, cmd, arg);
+}
+#endif /* defined(CONFIG_COMPAT) */
+
+static const struct file_operations pvr_drm_fops = {
+	.owner			= THIS_MODULE,
+	.open			= drm_open,
+	.release		= drm_release,
+	/*
+	 * FIXME:
+	 * Wrap this in a function that checks enough data has been
+	 * supplied with the ioctl (e.g. _IOCDIR(nr) != _IOC_NONE &&
+	 * _IOC_SIZE(nr) == size).
+	 */
+	.unlocked_ioctl		= drm_ioctl,
+#if defined(CONFIG_COMPAT)
+	.compat_ioctl		= pvr_compat_ioctl,
+#endif
+	.mmap			= PVRSRV_MMap,
+	.poll			= drm_poll,
+	.read			= drm_read,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
+	.fasync			= drm_fasync,
+#endif
+};
+
+const struct drm_driver pvr_drm_generic_driver = {
+	.driver_features	= DRIVER_MODESET | DRIVER_RENDER,
+
+	.dev_priv_size		= 0,
+	.load			= pvr_drm_load,
+	.unload			= pvr_drm_unload,
+	.open			= pvr_drm_open,
+	.postclose		= pvr_drm_release,
+
+	.ioctls			= pvr_drm_ioctls,
+	.num_ioctls		= ARRAY_SIZE(pvr_drm_ioctls),
+	.fops			= &pvr_drm_fops,
+
+	.name			= PVR_DRM_DRIVER_NAME,
+	.desc			= PVR_DRM_DRIVER_DESC,
+	.date			= PVR_DRM_DRIVER_DATE,
+	.major			= PVRVERSION_MAJ,
+	.minor			= PVRVERSION_MIN,
+	.patchlevel		= PVRVERSION_BUILD,
+};
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_drv.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_drv.h
new file mode 100644
index 0000000..e92cfe9
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_drv.h
@@ -0,0 +1,69 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR DRM driver
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_DRV_H__)
+#define __PVR_DRV_H__
+
+#include <drm/drmP.h>
+#include <linux/pm.h>
+
+struct file;
+struct vm_area_struct;
+
+extern const struct dev_pm_ops pvr_pm_ops;
+extern const struct drm_driver pvr_drm_generic_driver;
+
+#if defined(PDUMP)
+int dbgdrv_init(void);
+void dbgdrv_cleanup(void);
+int dbgdrv_ioctl(struct drm_device *dev, void *arg, struct drm_file *file);
+int dbgdrv_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg);
+#endif
+
+int PVRSRV_BridgeDispatchKM(struct drm_device *dev, void *arg,
+			    struct drm_file *file);
+int PVRSRV_MMap(struct file *file, struct vm_area_struct *ps_vma);
+
+#endif /* !defined(__PVR_DRV_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_fence.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_fence.c
new file mode 100644
index 0000000..40f5f86
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_fence.c
@@ -0,0 +1,903 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR Linux fence interface
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "pvr_fence.h"
+#include "services_kernel_client.h"
+
+#include "kernel_compatibility.h"
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+#define	PVR_FENCE_CONTEXT_DESTROY_INITAL_WAIT_MS	100
+#define	PVR_FENCE_CONTEXT_DESTROY_RETRIES		5
+#endif
+
+#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \
+	do {                                                             \
+		if (pfnDumpDebugPrintf)                                  \
+			pfnDumpDebugPrintf(pvDumpDebugFile, fmt,         \
+					   ## __VA_ARGS__);              \
+		else                                                     \
+			pr_err(fmt "\n", ## __VA_ARGS__);                \
+	} while (0)
+
+static inline u32
+pvr_fence_sync_value_get(struct pvr_fence *pvr_fence)
+{
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	return *pvr_fence->sync->pui32LinAddr;
+#else
+	if (SyncCheckpointIsErrored(pvr_fence->sync_checkpoint))
+	{
+		return PVRSRV_SYNC_CHECKPOINT_ERRORED;
+	}
+	else if (SyncCheckpointIsSignalled(pvr_fence->sync_checkpoint))
+	{
+		return PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+	}
+	else
+	{
+		return PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED;
+	}
+#endif
+}
+
+static inline bool
+pvr_fence_sync_value_met(struct pvr_fence *pvr_fence,
+			 enum pvr_fence_sync_val value)
+{
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	return !!(*pvr_fence->sync->pui32LinAddr == value);
+#else
+	(void)value;
+	return SyncCheckpointIsSignalled(pvr_fence->sync_checkpoint);
+#endif
+}
+
+static inline void
+pvr_fence_sync_value_set(struct pvr_fence *pvr_fence,
+			 enum pvr_fence_sync_val value)
+{
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	SyncPrimSet(pvr_fence->sync, value);
+#else
+	(void)value;
+//	pr_err("%s: setting sync_checkpoint <%p> (UFO=0x%x) (%s)\n",
+//	       __func__, (void*)pvr_fence->sync_checkpoint, SyncCheckpointGetFirmwareAddr(pvr_fence->sync_checkpoint), SyncCheckpointIsSignalled(pvr_fence->sync_checkpoint) ? "Is currently signalled" : "Is not currently signalled");
+	SyncCheckpointSignal(pvr_fence->sync_checkpoint);
+#endif
+}
+
+static void
+pvr_fence_context_check_status(struct work_struct *data)
+{
+	PVRSRVCheckStatus(NULL);
+}
+
+static void
+pvr_fence_context_fences_dump(struct pvr_fence_context *fctx,
+			      DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+			      void *pvDumpDebugFile)
+{
+	struct pvr_fence *pvr_fence;
+	unsigned long flags;
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	u32 sync_addr;
+#endif
+
+	spin_lock_irqsave(&fctx->list_lock, flags);
+	list_for_each_entry(pvr_fence, &fctx->fence_list, fence_head) {
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+		(void)SyncPrimGetFirmwareAddr(pvr_fence->sync, &sync_addr);
+		PVR_DUMPDEBUG_LOG(
+			pfnDumpDebugPrintf, pvDumpDebugFile,
+			"f %llu#%u: (%s%s) Refs = %u, FWAddr = %#08x, Current = %#08x, Next = %#08x, %s %s",
+			(u64) pvr_fence->fence->context,
+			pvr_fence->fence->seqno,
+			test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+				 &pvr_fence->fence->flags) ? "+" : "-",
+			test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+				 &pvr_fence->fence->flags) ? "+" : "-",
+			refcount_read(&pvr_fence->fence->refcount.refcount),
+			sync_addr,
+			pvr_fence_sync_value_get(pvr_fence),
+			PVR_FENCE_SYNC_VAL_SIGNALED,
+			pvr_fence->name,
+			(&pvr_fence->base != pvr_fence->fence) ? "(foreign)" : "");
+#else
+		PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+				  "f %llu#%u: (%s%s) Refs = %u, FWAddr = %#08x, Current = %#08x, Next = %#08x, %s %s",
+				  (u64) pvr_fence->fence->context,
+				  pvr_fence->fence->seqno,
+				  test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &pvr_fence->fence->flags) ? "+" : "-",
+				  test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &pvr_fence->fence->flags) ? "+" : "-",
+				  refcount_read(&pvr_fence->fence->refcount.refcount),
+				  SyncCheckpointGetFirmwareAddr(pvr_fence->sync_checkpoint),
+				  pvr_fence_sync_value_get(pvr_fence),
+				  PVRSRV_SYNC_CHECKPOINT_SIGNALLED,
+				  pvr_fence->name,
+				  (&pvr_fence->base != pvr_fence->fence) ? "(foreign)" : "");
+#endif
+	}
+	spin_unlock_irqrestore(&fctx->list_lock, flags);
+}
+
+static void
+pvr_fence_context_queue_signal_work(void *data)
+{
+	struct pvr_fence_context *fctx = (struct pvr_fence_context *)data;
+
+	queue_work(fctx->fence_wq, &fctx->signal_work);
+}
+
+static inline unsigned int
+pvr_fence_context_seqno_next(struct pvr_fence_context *fctx)
+{
+	return atomic_inc_return(&fctx->fence_seqno) - 1;
+}
+
+static inline void
+pvr_fence_context_free_deferred(struct pvr_fence_context *fctx)
+{
+	struct pvr_fence *pvr_fence, *tmp;
+	LIST_HEAD(deferred_free_list);
+	unsigned long flags;
+
+	spin_lock_irqsave(&fctx->list_lock, flags);
+	list_for_each_entry_safe(pvr_fence, tmp,
+				 &fctx->deferred_free_list,
+				 fence_head)
+		list_move(&pvr_fence->fence_head, &deferred_free_list);
+	spin_unlock_irqrestore(&fctx->list_lock, flags);
+
+	list_for_each_entry_safe(pvr_fence, tmp,
+				 &deferred_free_list,
+				 fence_head) {
+		list_del(&pvr_fence->fence_head);
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+		SyncPrimFree(pvr_fence->sync);
+#else
+//		pr_err("%s: calling SyncCheckpointFree(<%p>)\n",
+//		       __func__, (void*)pvr_fence->sync_checkpoint);
+		SyncCheckpointFree(pvr_fence->sync_checkpoint);
+#endif
+		dma_fence_free(&pvr_fence->base);
+	}
+}
+
+static void
+pvr_fence_context_signal_fences(struct work_struct *data)
+{
+	struct pvr_fence_context *fctx =
+		container_of(data, struct pvr_fence_context, signal_work);
+	struct pvr_fence *pvr_fence, *tmp;
+	unsigned long flags;
+	LIST_HEAD(signal_list);
+
+	/*
+	 * We can't call fence_signal while holding the lock as we can end up
+	 * in a situation whereby pvr_fence_foreign_signal_sync, which also
+	 * takes the list lock, ends up being called as a result of the
+	 * fence_signal below, i.e. fence_signal(fence) -> fence->callback()
+	 *  -> fence_signal(foreign_fence) -> foreign_fence->callback() where
+	 * the foreign_fence callback is pvr_fence_foreign_signal_sync.
+	 *
+	 * So extract the items we intend to signal and add them to their own
+	 * queue.
+	 */
+	spin_lock_irqsave(&fctx->list_lock, flags);
+	list_for_each_entry_safe(pvr_fence, tmp, &fctx->signal_list, signal_head) {
+		if (pvr_fence_sync_value_met(pvr_fence,
+					      PVR_FENCE_SYNC_VAL_SIGNALED))
+			list_move(&pvr_fence->signal_head, &signal_list);
+	}
+	spin_unlock_irqrestore(&fctx->list_lock, flags);
+
+	list_for_each_entry_safe(pvr_fence, tmp, &signal_list, signal_head) {
+
+		PVR_FENCE_TRACE(&pvr_fence->base, "signalled fence (%s)\n",
+				pvr_fence->name);
+		list_del(&pvr_fence->signal_head);
+		dma_fence_signal(pvr_fence->fence);
+		dma_fence_put(pvr_fence->fence);
+	}
+
+	/*
+	 * Take this opportunity to free up any fence objects we
+	 * have deferred freeing.
+	 */
+	pvr_fence_context_free_deferred(fctx);
+}
+
+static void
+pvr_fence_context_destroy_work(struct work_struct *data)
+{
+	struct delayed_work *dwork =
+		container_of(data, struct delayed_work, work);
+	struct pvr_fence_context *fctx =
+		container_of(dwork, struct pvr_fence_context, destroy_work);
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	PVRSRV_ERROR srv_err;
+#endif
+
+	PVR_FENCE_CTX_TRACE(fctx, "destroyed fence context (%s)\n", fctx->name);
+
+	pvr_fence_context_free_deferred(fctx);
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	/*
+	 * Ensure any outstanding calls to SyncCheckpointFree have completed
+	 * on other workqueues before calling SyncCheckpointContextDestroy,
+	 * to avoid an unnecessary retry.
+	 */
+	flush_workqueue(fctx->fence_wq);
+
+	srv_err = SyncCheckpointContextDestroy(fctx->sync_checkpoint_context);
+	if (srv_err != PVRSRV_OK) {
+		if (fctx->module_got && fctx->destroy_retries_left) {
+			unsigned long destroy_delay_jiffies =
+				msecs_to_jiffies(fctx->destroy_delay_ms);
+
+			pr_debug("%s: SyncCheckpointContextDestroy of %p failed, retrying in %ums\n", __func__, fctx->sync_checkpoint_context,
+					fctx->destroy_delay_ms);
+
+			fctx->destroy_retries_left--;
+			fctx->destroy_delay_ms *= 2;
+
+			schedule_delayed_work(&fctx->destroy_work,
+						destroy_delay_jiffies);
+			return;
+		}
+	}
+#endif
+
+	if (WARN_ON(!list_empty_careful(&fctx->fence_list)))
+		pvr_fence_context_fences_dump(fctx, NULL, NULL);
+
+	PVRSRVUnregisterDbgRequestNotify(fctx->dbg_request_handle);
+	PVRSRVUnregisterCmdCompleteNotify(fctx->cmd_complete_handle);
+
+	destroy_workqueue(fctx->fence_wq);
+
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	SyncPrimContextDestroy(fctx->sync_prim_context);
+#else
+	if (fctx->module_got) {
+		if (srv_err == PVRSRV_OK) {
+			unsigned retries = PVR_FENCE_CONTEXT_DESTROY_RETRIES -
+						fctx->destroy_retries_left;
+
+			if (retries)
+				pr_debug("%s: SyncCheckpointContextDestroy of %p succesful, after %u %s\n", __func__, fctx->sync_checkpoint_context,
+					retries,
+					(retries == 1) ? "retry" : "retries");
+
+			module_put(THIS_MODULE);
+		} else {
+			pr_err("%s: SyncCheckpointContextDestroy of %p failed, module unloadable\n", __func__, fctx->sync_checkpoint_context);
+		}
+	} else {
+		if (srv_err != PVRSRV_OK)
+			pr_err("%s: SyncCheckpointContextDestroy of %p failed, context may be leaked\n", __func__, fctx->sync_checkpoint_context);
+	}
+#endif
+
+	kfree(fctx);
+}
+
+static void
+pvr_fence_context_debug_request(void *data, u32 verbosity,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile)
+{
+	struct pvr_fence_context *fctx = (struct pvr_fence_context *)data;
+
+	if (verbosity == DEBUG_REQUEST_VERBOSITY_MEDIUM)
+		pvr_fence_context_fences_dump(fctx, pfnDumpDebugPrintf,
+					      pvDumpDebugFile);
+}
+
+/**
+ * pvr_fence_context_create - creates a PVR fence context
+ * @dev_cookie: services device cookie
+ * @name: context name (used for debugging)
+ *
+ * Creates a PVR fence context that can be used to create PVR fences or to
+ * create PVR fences from an existing fence.
+ *
+ * pvr_fence_context_destroy should be called to clean up the fence context.
+ *
+ * Returns NULL if a context cannot be created.
+ */
+struct pvr_fence_context *
+pvr_fence_context_create(void *dev_cookie,
+			 const char *name)
+{
+	struct pvr_fence_context *fctx;
+	PVRSRV_ERROR srv_err;
+
+	fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
+	if (!fctx)
+		return NULL;
+
+	spin_lock_init(&fctx->lock);
+	atomic_set(&fctx->fence_seqno, 0);
+	INIT_WORK(&fctx->check_status_work, pvr_fence_context_check_status);
+	INIT_WORK(&fctx->signal_work, pvr_fence_context_signal_fences);
+	INIT_DELAYED_WORK(&fctx->destroy_work, pvr_fence_context_destroy_work);
+	spin_lock_init(&fctx->list_lock);
+	INIT_LIST_HEAD(&fctx->signal_list);
+	INIT_LIST_HEAD(&fctx->fence_list);
+	INIT_LIST_HEAD(&fctx->deferred_free_list);
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	fctx->destroy_retries_left = PVR_FENCE_CONTEXT_DESTROY_RETRIES;
+	fctx->destroy_delay_ms = PVR_FENCE_CONTEXT_DESTROY_INITAL_WAIT_MS;
+#endif
+
+	fctx->fence_context = dma_fence_context_alloc(1);
+	fctx->name = name;
+
+	fctx->fence_wq =
+		create_freezable_workqueue("pvr_fence_sync_workqueue");
+	if (!fctx->fence_wq) {
+		pr_err("%s: failed to create fence workqueue\n", __func__);
+		goto err_free_fctx;
+	}
+
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	srv_err = SyncPrimContextCreate(dev_cookie, &fctx->sync_prim_context);
+	if (srv_err != PVRSRV_OK) {
+		pr_err("%s: failed to create sync prim context (%s)\n",
+		       __func__, PVRSRVGetErrorStringKM(srv_err));
+		goto err_destroy_workqueue;
+	}
+#else
+	srv_err = SyncCheckpointContextCreate(dev_cookie, &fctx->sync_checkpoint_context);
+	if (srv_err != PVRSRV_OK) {
+		pr_err("%s: failed to create sync checkpoint context (%s)\n",
+		       __func__, PVRSRVGetErrorStringKM(srv_err));
+		goto err_destroy_workqueue;
+	}
+	//pr_err("%s: called SyncCheckpointContextCreate(<%p>)\n",
+	//       __func__, (void*)fctx->sync_checkpoint_context);
+#endif
+
+	srv_err = PVRSRVRegisterCmdCompleteNotify(&fctx->cmd_complete_handle,
+						  pvr_fence_context_queue_signal_work,
+						  fctx);
+	if (srv_err != PVRSRV_OK) {
+		pr_err("%s: failed to register command complete callback (%s)\n",
+		       __func__, PVRSRVGetErrorStringKM(srv_err));
+		goto err_sync_prim_context_destroy;
+	}
+
+	srv_err = PVRSRVRegisterDbgRequestNotify(&fctx->dbg_request_handle,
+						 dev_cookie,
+						 pvr_fence_context_debug_request,
+						 DEBUG_REQUEST_LINUXFENCE,
+						 fctx);
+	if (srv_err != PVRSRV_OK) {
+		pr_err("%s: failed to register debug request callback (%s)\n",
+		       __func__, PVRSRVGetErrorStringKM(srv_err));
+		goto err_unregister_cmd_complete_notify;
+	}
+
+	kref_init(&fctx->kref);
+
+	PVR_FENCE_CTX_TRACE(fctx, "created fence context (%s)\n", name);
+
+	return fctx;
+
+err_unregister_cmd_complete_notify:
+	PVRSRVUnregisterCmdCompleteNotify(fctx->cmd_complete_handle);
+err_sync_prim_context_destroy:
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	SyncPrimContextDestroy(fctx->sync_prim_context);
+#else
+	SyncCheckpointContextDestroy(fctx->sync_checkpoint_context);
+#endif
+err_destroy_workqueue:
+	destroy_workqueue(fctx->fence_wq);
+err_free_fctx:
+	kfree(fctx);
+	return NULL;
+}
+
+static void pvr_fence_context_destroy_kref(struct kref *kref)
+{
+	struct pvr_fence_context *fctx =
+		container_of(kref, struct pvr_fence_context, kref);
+		
+	PVR_FENCE_CTX_TRACE(fctx, "scheduling destruction of fence context (%s)\n", fctx->name);
+
+	schedule_delayed_work(&fctx->destroy_work, 0);
+}
+
+/**
+ * pvr_fence_context_destroy - destroys a context
+ * @fctx: PVR fence context to destroy
+ *
+ * Destroys a PVR fence context with the expectation that all fences have been
+ * destroyed.
+ */
+void
+pvr_fence_context_destroy(struct pvr_fence_context *fctx)
+{
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	fctx->module_got = try_module_get(THIS_MODULE);
+#endif
+
+	kref_put(&fctx->kref, pvr_fence_context_destroy_kref);
+}
+
+static const char *
+pvr_fence_get_driver_name(struct dma_fence *fence)
+{
+	return PVR_LDM_DRIVER_REGISTRATION_NAME;
+}
+
+static const char *
+pvr_fence_get_timeline_name(struct dma_fence *fence)
+{
+	struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+
+	return pvr_fence->fctx->name;
+}
+
+static bool
+pvr_fence_enable_signaling(struct dma_fence *fence)
+{
+	struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+	struct pvr_fence_context *fctx = pvr_fence->fctx;
+	unsigned long flags;
+
+	WARN_ON_SMP(!spin_is_locked(&fctx->lock));
+
+	if (pvr_fence_sync_value_met(pvr_fence,
+				     PVR_FENCE_SYNC_VAL_SIGNALED))
+		return false;
+
+	dma_fence_get(&pvr_fence->base);
+
+	spin_lock_irqsave(&fctx->list_lock, flags);
+	list_add_tail(&pvr_fence->signal_head, &fctx->signal_list);
+	spin_unlock_irqrestore(&fctx->list_lock, flags);
+
+	PVR_FENCE_TRACE(&pvr_fence->base, "signalling enabled (%s)\n",
+			pvr_fence->name);
+
+	return true;
+}
+
+static bool
+pvr_fence_is_signaled(struct dma_fence *fence)
+{
+	struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+
+	return pvr_fence_sync_value_met(pvr_fence, PVR_FENCE_SYNC_VAL_SIGNALED);
+}
+
+static void
+pvr_fence_release(struct dma_fence *fence)
+{
+	struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+	struct pvr_fence_context *fctx = pvr_fence->fctx;
+	unsigned long flags;
+
+	PVR_FENCE_TRACE(&pvr_fence->base, "released fence (%s)\n",
+			pvr_fence->name);
+
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	pvr_fence_sync_value_set(pvr_fence, PVR_FENCE_SYNC_VAL_DONE);
+#else
+	//SyncCheckpointDropRef(pvr_fence->sync_checkpoint);
+#endif
+
+	spin_lock_irqsave(&fctx->list_lock, flags);
+	list_move(&pvr_fence->fence_head, &fctx->deferred_free_list);
+	spin_unlock_irqrestore(&fctx->list_lock, flags);
+
+	kref_put(&fctx->kref, pvr_fence_context_destroy_kref);
+}
+
+const struct dma_fence_ops pvr_fence_ops = {
+	.get_driver_name = pvr_fence_get_driver_name,
+	.get_timeline_name = pvr_fence_get_timeline_name,
+	.enable_signaling = pvr_fence_enable_signaling,
+	.signaled = pvr_fence_is_signaled,
+	.wait = dma_fence_default_wait,
+	.release = pvr_fence_release,
+};
+
+/**
+ * pvr_fence_create - creates a PVR fence
+ * @fctx: PVR fence context on which the PVR fence should be created
+ * @name: PVR fence name (used for debugging)
+ *
+ * Creates a PVR fence.
+ *
+ * Once the fence is finished with pvr_fence_destroy should be called.
+ *
+ * Returns NULL if a PVR fence cannot be created.
+ */
+struct pvr_fence *
+pvr_fence_create(struct pvr_fence_context *fctx, const char *name)
+{
+	struct pvr_fence *pvr_fence;
+	unsigned int seqno;
+	unsigned long flags;
+	PVRSRV_ERROR srv_err;
+
+	pvr_fence = kzalloc(sizeof(*pvr_fence), GFP_KERNEL);
+	if (!pvr_fence)
+		return NULL;
+
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	srv_err = SyncPrimAlloc(fctx->sync_prim_context, &pvr_fence->sync,
+				name);
+#else
+	srv_err = SyncCheckpointAlloc(fctx->sync_checkpoint_context, -1, name, &pvr_fence->sync_checkpoint);
+	//pr_err("%s: created sync_checkpoint <%p> (UFO=0x%x) (%s)\n",
+	//       __func__, (void*)pvr_fence->sync_checkpoint, SyncCheckpointGetFirmwareAddr(pvr_fence->sync_checkpoint), SyncCheckpointIsSignalled(pvr_fence->sync_checkpoint) ? "Is currently signalled" : "Is not currently signalled");
+#endif
+	if (srv_err != PVRSRV_OK)
+		goto err_free_fence;
+
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	pvr_fence_sync_value_set(pvr_fence, PVR_FENCE_SYNC_VAL_INIT);
+#else
+	//SyncCheckpointTakeRef(pvr_fence->sync_checkpoint);
+#endif
+
+
+	INIT_LIST_HEAD(&pvr_fence->fence_head);
+	INIT_LIST_HEAD(&pvr_fence->signal_head);
+	pvr_fence->fctx = fctx;
+	pvr_fence->name = name;
+	pvr_fence->fence = &pvr_fence->base;
+
+	seqno = pvr_fence_context_seqno_next(fctx);
+	dma_fence_init(&pvr_fence->base, &pvr_fence_ops, &fctx->lock,
+		       fctx->fence_context, seqno);
+
+	spin_lock_irqsave(&fctx->list_lock, flags);
+	list_add_tail(&pvr_fence->fence_head, &fctx->fence_list);
+	spin_unlock_irqrestore(&fctx->list_lock, flags);
+	
+	kref_get(&fctx->kref);
+
+	PVR_FENCE_TRACE(&pvr_fence->base, "created fence (%s)\n", name);
+
+	return pvr_fence;
+
+err_free_fence:
+	kfree(pvr_fence);
+	return NULL;
+}
+
+static const char *
+pvr_fence_foreign_get_driver_name(struct dma_fence *fence)
+{
+	return "unknown";
+}
+
+static const char *
+pvr_fence_foreign_get_timeline_name(struct dma_fence *fence)
+{
+	return "unknown";
+}
+
+static bool
+pvr_fence_foreign_enable_signaling(struct dma_fence *fence)
+{
+	WARN_ON("cannot enable signalling on foreign fence");
+	return false;
+}
+
+static signed long
+pvr_fence_foreign_wait(struct dma_fence *fence, bool intr, signed long timeout)
+{
+	WARN_ON("cannot wait on foreign fence");
+	return 0;
+}
+
+static void
+pvr_fence_foreign_release(struct dma_fence *fence)
+{
+	struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+	struct pvr_fence_context *fctx = pvr_fence->fctx;
+	unsigned long flags;
+
+	PVR_FENCE_TRACE(&pvr_fence->base,
+			"released fence for foreign fence %llu#%d (%s)\n",
+			(u64) pvr_fence->fence->context,
+			pvr_fence->fence->seqno, pvr_fence->name);
+
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	pvr_fence_sync_value_set(pvr_fence, PVR_FENCE_SYNC_VAL_DONE);
+#else
+	//SyncCheckpointDropRef(pvr_fence->sync_checkpoint);
+#endif
+
+	spin_lock_irqsave(&fctx->list_lock, flags);
+	list_move(&pvr_fence->fence_head, &fctx->deferred_free_list);
+	spin_unlock_irqrestore(&fctx->list_lock, flags);
+
+	kref_put(&fctx->kref, pvr_fence_context_destroy_kref);
+}
+
+const struct dma_fence_ops pvr_fence_foreign_ops = {
+	.get_driver_name = pvr_fence_foreign_get_driver_name,
+	.get_timeline_name = pvr_fence_foreign_get_timeline_name,
+	.enable_signaling = pvr_fence_foreign_enable_signaling,
+	.wait = pvr_fence_foreign_wait,
+	.release = pvr_fence_foreign_release,
+};
+
+static void
+pvr_fence_foreign_signal_sync(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+	struct pvr_fence *pvr_fence = container_of(cb, struct pvr_fence, cb);
+	struct pvr_fence_context *fctx = pvr_fence->fctx;
+
+	if (WARN_ON_ONCE(is_pvr_fence(fence)))
+		return;
+
+	pvr_fence_sync_value_set(pvr_fence, PVR_FENCE_SYNC_VAL_SIGNALED);
+
+	queue_work(fctx->fence_wq, &fctx->check_status_work);
+
+	PVR_FENCE_TRACE(&pvr_fence->base,
+			"foreign fence %llu#%d signalled (%s)\n",
+			(u64) pvr_fence->fence->context,
+			pvr_fence->fence->seqno, pvr_fence->name);
+
+	/* Drop the reference on the base fence */
+	dma_fence_put(&pvr_fence->base);
+}
+
+/**
+ * pvr_fence_create_from_fence - creates a PVR fence from a fence
+ * @fctx: PVR fence context on which the PVR fence should be created
+ * @fence: fence from which the PVR fence should be created
+ * @name: PVR fence name (used for debugging)
+ *
+ * Creates a PVR fence from an existing fence. If the fence is a foreign fence,
+ * i.e. one that doesn't originate from a PVR fence context, then a new PVR
+ * fence will be created. Otherwise, a reference will be taken on the underlying
+ * fence and the PVR fence will be returned.
+ *
+ * Once the fence is finished with pvr_fence_destroy should be called.
+ *
+ * Returns NULL if a PVR fence cannot be created.
+ */
+struct pvr_fence *
+pvr_fence_create_from_fence(struct pvr_fence_context *fctx,
+			    struct dma_fence *fence,
+			    const char *name)
+{
+	struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+	unsigned int seqno;
+	unsigned long flags;
+	PVRSRV_ERROR srv_err;
+	int err;
+
+	if (pvr_fence) {
+		if (WARN_ON(fence->ops == &pvr_fence_foreign_ops))
+			return NULL;
+		dma_fence_get(fence);
+
+		PVR_FENCE_TRACE(fence, "created fence from PVR fence (%s)\n",
+				name);
+		return pvr_fence;
+	}
+
+	pvr_fence = kzalloc(sizeof(*pvr_fence), GFP_KERNEL);
+	if (!pvr_fence)
+		return NULL;
+
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	srv_err = SyncPrimAlloc(fctx->sync_prim_context, &pvr_fence->sync,
+				name);
+#else
+	srv_err = SyncCheckpointAlloc(fctx->sync_checkpoint_context, -1, name, &pvr_fence->sync_checkpoint);
+	//pr_err("%s: created sync_checkpoint <%p> (UFO=0x%x) (%s)\n",
+	//       __func__, (void*)pvr_fence->sync_checkpoint, SyncCheckpointGetFirmwareAddr(pvr_fence->sync_checkpoint), SyncCheckpointIsSignalled(pvr_fence->sync_checkpoint) ? "Is currently signalled" : "Is not currently signalled");
+#endif
+	if (srv_err != PVRSRV_OK)
+		goto err_free_pvr_fence;
+
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	pvr_fence_sync_value_set(pvr_fence, PVR_FENCE_SYNC_VAL_INIT);
+#else
+	//SyncCheckpointTakeRef(pvr_fence->sync_checkpoint);
+#endif
+	INIT_LIST_HEAD(&pvr_fence->fence_head);
+	INIT_LIST_HEAD(&pvr_fence->signal_head);
+	pvr_fence->fctx = fctx;
+	pvr_fence->name = name;
+	pvr_fence->fence = fence;
+	/*
+	 * We use the base fence to refcount the PVR fence and to do the
+	 * necessary clean up once the refcount drops to 0.
+	 */
+	seqno = pvr_fence_context_seqno_next(fctx);
+	dma_fence_init(&pvr_fence->base, &pvr_fence_foreign_ops, &fctx->lock,
+		       fctx->fence_context, seqno);
+
+	/*
+	 * Take an extra reference on the base fence that gets dropped when the
+	 * foreign fence is signalled.
+	 */
+	dma_fence_get(&pvr_fence->base);
+
+	spin_lock_irqsave(&fctx->list_lock, flags);
+	list_add_tail(&pvr_fence->fence_head, &fctx->fence_list);
+	spin_unlock_irqrestore(&fctx->list_lock, flags);
+	kref_get(&fctx->kref);
+
+	PVR_FENCE_TRACE(&pvr_fence->base,
+			"created fence from foreign fence %llu#%d (%s)\n",
+			(u64) pvr_fence->fence->context,
+			pvr_fence->fence->seqno, name);
+
+	err = dma_fence_add_callback(fence, &pvr_fence->cb,
+				     pvr_fence_foreign_signal_sync);
+	if (err) {
+		if (err != -ENOENT)
+			goto err_put_ref;
+
+		/*
+		 * The fence has already signalled so set the sync as signalled.
+		 */
+		pvr_fence_sync_value_set(pvr_fence,
+					 PVR_FENCE_SYNC_VAL_SIGNALED);
+		PVR_FENCE_TRACE(&pvr_fence->base,
+				"foreign fence %llu#%d already signaled (%s)\n",
+				(u64) pvr_fence->fence->context,
+				pvr_fence->fence->seqno,
+				name);
+		dma_fence_put(&pvr_fence->base);
+	}
+
+
+	return pvr_fence;
+
+err_put_ref:
+	kref_put(&fctx->kref, pvr_fence_context_destroy_kref);
+	spin_lock_irqsave(&fctx->list_lock, flags);
+	list_del(&pvr_fence->fence_head);
+	spin_unlock_irqrestore(&fctx->list_lock, flags);
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	SyncPrimFree(pvr_fence->sync);
+#else
+	SyncCheckpointFree(pvr_fence->sync_checkpoint);
+#endif
+err_free_pvr_fence:
+	kfree(pvr_fence);
+	return NULL;
+}
+
+/**
+ * pvr_fence_destroy - destroys a PVR fence
+ * @pvr_fence: PVR fence to destroy
+ *
+ * Destroys a PVR fence. Upon return, the PVR fence may still exist if something
+ * else still references the underlying fence, e.g. a reservation object, or if
+ * software signalling has been enabled and the fence hasn't yet been signalled.
+ */
+void
+pvr_fence_destroy(struct pvr_fence *pvr_fence)
+{
+	PVR_FENCE_TRACE(&pvr_fence->base, "destroyed fence (%s)\n",
+			pvr_fence->name);
+
+	dma_fence_put(&pvr_fence->base);
+}
+
+/**
+ * pvr_fence_sync_sw_signal - signals a PVR fence sync
+ * @pvr_fence: PVR fence to signal
+ *
+ * Sets the PVR fence sync value to signalled.
+ *
+ * Returns -EINVAL if the PVR fence represents a foreign fence.
+ */
+int
+pvr_fence_sync_sw_signal(struct pvr_fence *pvr_fence)
+{
+	if (!is_our_fence(pvr_fence->fctx, &pvr_fence->base))
+		return -EINVAL;
+
+	pvr_fence_sync_value_set(pvr_fence, PVR_FENCE_SYNC_VAL_SIGNALED);
+
+	queue_work(pvr_fence->fctx->fence_wq,
+		   &pvr_fence->fctx->check_status_work);
+
+	PVR_FENCE_TRACE(&pvr_fence->base, "sw set fence sync signalled (%s)\n",
+			pvr_fence->name);
+
+	return 0;
+}
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+int
+pvr_fence_get_checkpoints(struct pvr_fence **pvr_fences, u32 nr_fences, PSYNC_CHECKPOINT *fence_checkpoints)
+{
+	PSYNC_CHECKPOINT *next_fence_checkpoint = fence_checkpoints;
+	struct pvr_fence **next_pvr_fence = pvr_fences;
+	int fence_checkpoint_idx;
+
+	//pr_err("%s: copy sync checkpoints from %d fences (from pvr_fences<%p> to fence_checkpoints<%p>)\n",
+	//       __func__, nr_fences, (void*)pvr_fences, (void*)fence_checkpoints);
+
+	if (nr_fences > 0) {
+		struct pvr_fence *next_fence = *next_pvr_fence++;
+		//pr_err("%s: copy sync checkpoints from %d fences (from *pvr_fences<%p> to fence_checkpoints<%p>)\n",
+		//	   __func__, nr_fences, (void*)*pvr_fences, (void*)fence_checkpoints);
+		for (fence_checkpoint_idx=0; fence_checkpoint_idx<nr_fences; fence_checkpoint_idx++) {
+			//pr_err("%s:   *<%p> = <%p> (copy to <%p>)\n",
+			//	   __func__, (void*)next_fence, (void*)next_fence->sync_checkpoint, (void*)next_fence_checkpoint);
+			*next_fence_checkpoint++ = next_fence->sync_checkpoint;
+			/* Take reference on sync checkpoint (will be dropped later by kick code) */
+			//pr_err("%s:   taking ref on sync checkpoint<%p>\n",
+			//	   __func__, (void*)next_fence->sync_checkpoint);
+			SyncCheckpointTakeRef(next_fence->sync_checkpoint);
+		}
+	}
+
+	return 0;
+}
+PSYNC_CHECKPOINT
+pvr_fence_get_checkpoint(struct pvr_fence *update_fence)
+{
+	return update_fence->sync_checkpoint;
+}
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_fence.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_fence.h
new file mode 100644
index 0000000..31ebf9a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_fence.h
@@ -0,0 +1,248 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR Linux fence interface
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_FENCE_H__)
+#define __PVR_FENCE_H__
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+static inline void pvr_fence_cleanup(void)
+{
+}
+#else
+#include "pvr_linux_fence.h"
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+#include "pvrsrv_sync_km.h"
+#include "sync_checkpoint_external.h"
+#endif
+
+struct SYNC_PRIM_CONTEXT;
+struct PVRSRV_CLIENT_SYNC_PRIM;
+
+/**
+ * pvr_fence_context - PVR fence context used to create and manage PVR fences
+ * @lock: protects the context and fences created on the context
+ * @name: fence context name (used for debugging)
+ * @dbg_request_handle: handle for callback used to dump debug data
+ * @sync_prim_context: sync prim context used to create services client syncs
+ * @fence_context: fence context with which to associate fences
+ * @fence_seqno: sequence number to use for the next fence
+ * @fence_wq: work queue for signalled fence work
+ * @check_status_work: work item used to inform services when a foreign fence
+ * has signalled
+ * @cmd_complete_handle: handle for callback used to signal fences when fence
+ * syncs are met
+ * @signal_work: work item used to signal fences when fence syncs are met
+ * @list_lock: protects the active and active foreign lists
+ * @signal_list: list of fences waiting to be signalled
+ * @fence_list: list of fences (used for debugging)
+ * @deferred_free_list: list of fences that we will free when we are no longer
+ * holding spinlocks.  The frees get implemented when an update fence is
+ * signalled or the context is freed.
+ */
+struct pvr_fence_context {
+	spinlock_t lock;
+	const char *name;
+	void *dbg_request_handle;
+
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	struct SYNC_PRIM_CONTEXT *sync_prim_context;
+#else
+	/* Context used to create sync checkpoints. */
+	PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_context;
+#endif
+	u64 fence_context;
+	atomic_t fence_seqno;
+
+	struct workqueue_struct *fence_wq;
+	struct work_struct check_status_work;
+
+	void *cmd_complete_handle;
+	struct work_struct signal_work;
+
+	spinlock_t list_lock;
+	struct list_head signal_list;
+	struct list_head fence_list;
+	struct list_head deferred_free_list;
+
+	struct kref kref;
+	struct delayed_work destroy_work;
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	unsigned destroy_retries_left;
+	unsigned destroy_delay_ms;
+	bool module_got;
+#endif
+};
+
+/**
+ * pvr_fence - PVR fence that represents both native and foreign fences
+ * @base: fence structure
+ * @fctx: fence context on which this fence was created
+ * @name: fence name (used for debugging)
+ * @fence: pointer to base fence structure or foreign fence
+ * @sync: services sync primitive used by hardware
+ * @fence_head: entry on the context fence and deferred free list
+ * @signal_head: entry on the context signal list
+ * @cb: foreign fence callback to set the sync to signalled
+ */
+struct pvr_fence {
+	struct dma_fence base;
+	struct pvr_fence_context *fctx;
+	const char *name;
+
+	struct dma_fence *fence;
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	struct PVRSRV_CLIENT_SYNC_PRIM *sync;
+#else
+	PSYNC_CHECKPOINT sync_checkpoint;
+#endif
+
+	struct list_head fence_head;
+	struct list_head signal_head;
+	struct dma_fence_cb cb;
+};
+
+enum pvr_fence_sync_val {
+	PVR_FENCE_SYNC_VAL_INIT = 0x60606060,
+	PVR_FENCE_SYNC_VAL_SIGNALED = 0x90909090,
+	PVR_FENCE_SYNC_VAL_DONE = 0xDEADDEAD,
+};
+
+extern const struct dma_fence_ops pvr_fence_ops;
+extern const struct dma_fence_ops pvr_fence_foreign_ops;
+
+static inline bool is_our_fence(struct pvr_fence_context *fctx,
+				struct dma_fence *fence)
+{
+	return (fence->context == fctx->fence_context);
+}
+
+static inline bool is_pvr_fence(struct dma_fence *fence)
+{
+	return ((fence->ops == &pvr_fence_ops) ||
+		(fence->ops == &pvr_fence_foreign_ops));
+}
+
+static inline struct pvr_fence *to_pvr_fence(struct dma_fence *fence)
+{
+	if (is_pvr_fence(fence))
+		return container_of(fence, struct pvr_fence, base);
+
+	return NULL;
+}
+
+struct pvr_fence_context *pvr_fence_context_create(void *dev_cookie,
+						   const char *name);
+void pvr_fence_context_destroy(struct pvr_fence_context *fctx);
+
+struct pvr_fence *pvr_fence_create(struct pvr_fence_context *fctx,
+				   const char *name);
+struct pvr_fence *pvr_fence_create_from_fence(struct pvr_fence_context *fctx,
+					      struct dma_fence *fence,
+					      const char *name);
+void pvr_fence_destroy(struct pvr_fence *pvr_fence);
+int pvr_fence_sync_sw_signal(struct pvr_fence *pvr_fence);
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+int pvr_fence_get_checkpoints(struct pvr_fence **pvr_fences, u32 nr_fences, PSYNC_CHECKPOINT *fence_checkpoints);
+PSYNC_CHECKPOINT pvr_fence_get_checkpoint(struct pvr_fence *update_fence);
+#endif
+
+static inline void pvr_fence_cleanup(void)
+{
+	/*
+	 * Ensure all PVR fence contexts have been destroyed, by flushing
+	 * the global workqueue.
+	 * For those versions of the DDK that don't use PVR fences, this
+	 * isn't necessary, but should be harmless.
+	 */
+	flush_scheduled_work();
+}
+
+#if defined(PVR_FENCE_DEBUG)
+#define PVR_FENCE_CTX_TRACE(c, fmt, ...)                                   \
+	do {                                                               \
+		struct pvr_fence_context *__fctx = (c);                    \
+		pr_err("c %llu: (PVR) " fmt, (u64) __fctx->fence_context,  \
+		       ## __VA_ARGS__);                                    \
+	} while (0)
+#else
+#define PVR_FENCE_CTX_TRACE(c, fmt, ...)
+#endif
+
+#define PVR_FENCE_CTX_WARN(c, fmt, ...)                                    \
+	do {                                                               \
+		struct pvr_fence_context *__fctx = (c);                    \
+		pr_warn("c %llu: (PVR) " fmt, (u64) __fctx->fence_context, \
+			## __VA_ARGS__);                                   \
+	} while (0)
+
+#define PVR_FENCE_CTX_ERR(c, fmt, ...)                                     \
+	do {                                                               \
+		struct pvr_fence_context *__fctx = (c);                    \
+		pr_err("c %llu: (PVR) " fmt, (u64) __fctx->fence_context,  \
+		       ## __VA_ARGS__);                                    \
+	} while (0)
+
+#if defined(PVR_FENCE_DEBUG)
+#define PVR_FENCE_TRACE(f, fmt, ...)                                       \
+	DMA_FENCE_ERR(f, "(PVR) " fmt, ## __VA_ARGS__)
+#else
+#define PVR_FENCE_TRACE(f, fmt, ...)
+#endif
+
+#define PVR_FENCE_WARN(f, fmt, ...)                                        \
+	DMA_FENCE_WARN(f, "(PVR) " fmt, ## __VA_ARGS__)
+
+#define PVR_FENCE_ERR(f, fmt, ...)                                         \
+	DMA_FENCE_ERR(f, "(PVR) " fmt, ## __VA_ARGS__)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */
+#endif /* !defined(__PVR_FENCE_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_linux_fence.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_linux_fence.h
new file mode 100644
index 0000000..69a3df9
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_linux_fence.h
@@ -0,0 +1,90 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR Linux fence compatibility header
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_LINUX_FENCE_H__)
+#define __PVR_LINUX_FENCE_H__
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) && \
+	(!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)))
+#include <linux/fence.h>
+#else
+#include <linux/dma-fence.h>
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) && \
+	(!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)))
+/* Structures */
+#define	dma_fence fence
+#define	dma_fence_cb fence_cb
+#define	dma_fence_ops fence_ops
+
+/* Defines and Enums */
+#define DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT FENCE_FLAG_ENABLE_SIGNAL_BIT
+#define DMA_FENCE_FLAG_SIGNALED_BIT FENCE_FLAG_SIGNALED_BIT
+#define DMA_FENCE_FLAG_USER_BITS FENCE_FLAG_USER_BITS
+
+#define DMA_FENCE_ERR FENCE_ERR
+#define	DMA_FENCE_TRACE FENCE_TRACE
+#define DMA_FENCE_WARN FENCE_WARN
+
+/* Functions */
+#define dma_fence_add_callback fence_add_callback
+#define dma_fence_context_alloc fence_context_alloc
+#define dma_fence_default_wait fence_default_wait
+#define dma_fence_is_signaled fence_is_signaled
+#define dma_fence_free fence_free 
+#define dma_fence_get fence_get 
+#define dma_fence_get_rcu fence_get_rcu 
+#define dma_fence_init fence_init 
+#define dma_fence_put fence_put 
+#define dma_fence_signal fence_signal 
+#define dma_fence_wait fence_wait
+#define dma_fence_wait_timeout fence_wait_timeout
+
+#endif
+
+#endif /* !defined(__PVR_LINUX_FENCE_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_platform_drv.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_platform_drv.c
new file mode 100644
index 0000000..25a8b30
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_platform_drv.c
@@ -0,0 +1,255 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR DRM platform driver
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <drm/drmP.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/version.h>
+
+#include "module_common.h"
+#include "pvr_drv.h"
+#include "pvrmodule.h"
+#include "sysinfo.h"
+
+static struct drm_driver pvr_drm_platform_driver;
+
+#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED)
+/*
+ * This is an arbitrary value. If it's changed then the 'num_devices' module
+ * parameter description should also be updated to match.
+ */
+#define MAX_DEVICES 16
+
+static unsigned int pvr_num_devices = 1;
+static struct platform_device **pvr_devices;
+
+#if defined(NO_HARDWARE)
+static int pvr_num_devices_set(const char *val,
+			       const struct kernel_param *param)
+{
+	int err;
+
+	err = param_set_uint(val, param);
+	if (err)
+		return err;
+
+	if (pvr_num_devices == 0 || pvr_num_devices > MAX_DEVICES)
+		return -EINVAL;
+
+	return 0;
+}
+
+static const struct kernel_param_ops pvr_num_devices_ops = {
+	.set = pvr_num_devices_set,
+	.get = param_get_uint,
+};
+
+module_param_cb(num_devices, &pvr_num_devices_ops, &pvr_num_devices,
+		S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(num_devices,
+		 "Number of platform devices to register (default: 1 - max: 16)");
+#endif /* defined(NO_HARDWARE) */
+#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */
+
+static int pvr_devices_register(void)
+{
+#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED)
+	struct platform_device_info pvr_dev_info = {
+		.name = SYS_RGX_DEV_NAME,
+		.id = -2,
+#if defined(NO_HARDWARE)
+		/* Not all cores have 40 bit physical support, but this
+		 * will work unless > 32 bit address is returned on those cores.
+		 * In the future this will be fixed more correctly.
+		 */
+		.dma_mask = DMA_BIT_MASK(40),
+#else
+		.dma_mask = DMA_BIT_MASK(32),
+#endif
+	};
+	unsigned int i;
+
+	BUG_ON(pvr_num_devices == 0 || pvr_num_devices > MAX_DEVICES);
+
+	pvr_devices = kmalloc_array(pvr_num_devices, sizeof(*pvr_devices),
+				    GFP_KERNEL);
+	if (!pvr_devices)
+		return -ENOMEM;
+
+	for (i = 0; i < pvr_num_devices; i++) {
+		pvr_devices[i] = platform_device_register_full(&pvr_dev_info);
+		if (IS_ERR(pvr_devices[i])) {
+			DRM_ERROR("unable to register device %u (err=%ld)\n",
+				  i, PTR_ERR(pvr_devices[i]));
+			pvr_devices[i] = NULL;
+			return -ENODEV;
+		}
+	}
+#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */
+
+	return 0;
+}
+
+static void pvr_devices_unregister(void)
+{
+#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED)
+	unsigned int i;
+
+	BUG_ON(!pvr_devices);
+
+	for (i = 0; i < pvr_num_devices && pvr_devices[i]; i++)
+		platform_device_unregister(pvr_devices[i]);
+
+	kfree(pvr_devices);
+	pvr_devices = NULL;
+#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */
+}
+
+static int pvr_probe(struct platform_device *pdev)
+{
+	struct drm_device *dev;
+
+	DRM_DEBUG_DRIVER("device %p\n", &pdev->dev);
+
+	dev = drm_dev_alloc(&pvr_drm_platform_driver, &pdev->dev);
+	if (!dev)
+		return -ENOMEM;
+
+	dev_set_drvdata(dev->dev, pdev);
+
+	return drm_dev_register(dev, 0);
+}
+
+static int pvr_remove(struct platform_device *pdev)
+{
+	struct drm_device *ddev = platform_get_drvdata(pdev);
+
+	DRM_DEBUG_DRIVER("device %p\n", &pdev->dev);
+
+	drm_put_dev(ddev);
+
+	return 0;
+}
+
+static void pvr_shutdown(struct platform_device *pdev)
+{
+	struct drm_device *ddev = platform_get_drvdata(pdev);
+
+	DRM_DEBUG_DRIVER("device %p\n", &pdev->dev);
+
+	PVRSRVCommonDeviceShutdown(ddev->dev_private);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+static struct of_device_id pvr_of_ids[] = {
+#if defined(SYS_RGX_OF_COMPATIBLE)
+	{ .compatible = SYS_RGX_OF_COMPATIBLE, },
+#endif
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, pvr_of_ids);
+#endif
+
+static struct platform_device_id pvr_platform_ids[] = {
+#if defined(SYS_RGX_DEV_NAME)
+	{ SYS_RGX_DEV_NAME, 0 },
+#endif
+	{ }
+};
+MODULE_DEVICE_TABLE(platform, pvr_platform_ids);
+
+static struct platform_driver pvr_platform_driver = {
+	.driver = {
+		.name		= DRVNAME,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+		.of_match_table	= of_match_ptr(pvr_of_ids),
+#endif
+		.pm		= &pvr_pm_ops,
+	},
+	.id_table		= pvr_platform_ids,
+	.probe			= pvr_probe,
+	.remove			= pvr_remove,
+	.shutdown		= pvr_shutdown,
+};
+
+static int __init pvr_init(void)
+{
+	int err;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	pvr_drm_platform_driver = pvr_drm_generic_driver;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) && \
+	(LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+	pvr_drm_platform_driver.set_busid = drm_platform_set_busid;
+#endif
+
+	err = PVRSRVCommonDriverInit();
+	if (err)
+		return err;
+
+	err = platform_driver_register(&pvr_platform_driver);
+	if (err)
+		return err;
+	err = pvr_devices_register();
+
+	MTKCommonDisablePowerDomain();
+	return err;
+}
+
+static void __exit pvr_exit(void)
+{
+	DRM_DEBUG_DRIVER("\n");
+
+	pvr_devices_unregister();
+	platform_driver_unregister(&pvr_platform_driver);
+	PVRSRVCommonDriverDeinit();
+
+	DRM_DEBUG_DRIVER("done\n");
+}
+
+late_initcall(pvr_init);
+module_exit(pvr_exit);
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_sw_fence.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_sw_fence.c
new file mode 100644
index 0000000..e43d626
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_sw_fence.c
@@ -0,0 +1,173 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/spinlock_types.h>
+#include <linux/atomic.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/bug.h>
+
+#include "pvr_sw_fence.h"
+
+struct pvr_sw_fence_context {
+	struct kref kref;
+	unsigned int context;
+	const char *context_name;
+	const char *driver_name;
+	atomic_t seqno;
+	atomic_t fence_count;
+};
+
+struct pvr_sw_fence {
+	struct dma_fence base;
+	struct pvr_sw_fence_context *fence_context;
+	spinlock_t lock;
+};
+
+#define to_pvr_sw_fence(fence) container_of(fence, struct pvr_sw_fence, base)
+
+static inline unsigned
+pvr_sw_fence_context_seqno_next(struct pvr_sw_fence_context *fence_context)
+{
+	return atomic_inc_return(&fence_context->seqno) - 1;
+}
+
+static const char * pvr_sw_fence_get_driver_name(struct dma_fence *fence)
+{
+	struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence);
+
+	return pvr_sw_fence->fence_context->driver_name;
+}
+
+static const char * pvr_sw_fence_get_timeline_name(struct dma_fence *fence)
+{
+	struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence);
+
+	return pvr_sw_fence->fence_context->context_name;
+}
+
+static bool pvr_sw_fence_enable_signaling(struct dma_fence *fence)
+{
+	return true;
+}
+
+static void pvr_sw_fence_context_destroy_kref(struct kref *kref)
+{
+	struct pvr_sw_fence_context *fence_context =
+		container_of(kref, struct pvr_sw_fence_context, kref);
+	unsigned fence_count;
+
+	fence_count = atomic_read(&fence_context->fence_count);
+	if (WARN_ON(fence_count))
+		pr_debug("%s context has %u fence(s) remaining\n",
+			 fence_context->context_name, fence_count);
+
+	kfree(fence_context);
+}
+
+static void pvr_sw_fence_release(struct dma_fence *fence)
+{
+	struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence);
+
+	atomic_dec(&pvr_sw_fence->fence_context->fence_count);
+	kref_put(&pvr_sw_fence->fence_context->kref,
+		pvr_sw_fence_context_destroy_kref);
+	kfree(pvr_sw_fence);
+}
+
+static struct dma_fence_ops pvr_sw_fence_ops = {
+	.get_driver_name = pvr_sw_fence_get_driver_name,
+	.get_timeline_name = pvr_sw_fence_get_timeline_name,
+	.enable_signaling = pvr_sw_fence_enable_signaling,
+	.wait = dma_fence_default_wait,
+	.release = pvr_sw_fence_release,
+};
+
+struct pvr_sw_fence_context *
+pvr_sw_fence_context_create(const char *context_name, const char *driver_name)
+{
+	struct pvr_sw_fence_context *fence_context;
+
+	fence_context = kmalloc(sizeof(*fence_context), GFP_KERNEL);
+	if (!fence_context)
+		return NULL;
+
+	fence_context->context = dma_fence_context_alloc(1);
+	fence_context->context_name = context_name;
+	fence_context->driver_name = driver_name;
+	atomic_set(&fence_context->seqno, 0);
+	atomic_set(&fence_context->fence_count, 0);
+	kref_init(&fence_context->kref);
+
+	return fence_context;
+}
+
+void pvr_sw_fence_context_destroy(struct pvr_sw_fence_context *fence_context)
+{
+	kref_put(&fence_context->kref, pvr_sw_fence_context_destroy_kref);
+}
+
+struct dma_fence *
+pvr_sw_fence_create(struct pvr_sw_fence_context *fence_context)
+{
+	struct pvr_sw_fence *pvr_sw_fence;
+	unsigned int seqno;
+
+	pvr_sw_fence = kmalloc(sizeof(*pvr_sw_fence), GFP_KERNEL);
+	if (!pvr_sw_fence)
+		return NULL;
+
+	spin_lock_init(&pvr_sw_fence->lock);
+	pvr_sw_fence->fence_context = fence_context;
+
+	seqno = pvr_sw_fence_context_seqno_next(fence_context);
+	dma_fence_init(&pvr_sw_fence->base, &pvr_sw_fence_ops,
+		       &pvr_sw_fence->lock, fence_context->context, seqno);
+
+	atomic_inc(&fence_context->fence_count);
+	kref_get(&fence_context->kref);
+
+	return &pvr_sw_fence->base;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_sw_fence.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_sw_fence.h
new file mode 100644
index 0000000..c915b09
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/pvr_sw_fence.h
@@ -0,0 +1,56 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_SW_FENCES_H__)
+#define __PVR_SW_FENCES_H__
+
+#include "pvr_linux_fence.h"
+
+struct pvr_sw_fence_context;
+
+struct pvr_sw_fence_context *pvr_sw_fence_context_create(const char *name, const char *driver_name);
+void pvr_sw_fence_context_destroy(struct pvr_sw_fence_context *fence_context);
+struct dma_fence *pvr_sw_fence_create(struct pvr_sw_fence_context *fence_context);
+
+#endif /* !defined(__PVR_SW_FENCES_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/services_kernel_client.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/services_kernel_client.h
new file mode 100644
index 0000000..e256807
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/services_kernel_client.h
@@ -0,0 +1,321 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File           services_kernel_client.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* This file contains a partial redefinition of the PowerVR Services 5
+ * interface for use by components which are checkpatch clean. This
+ * header is included by the unrefined, non-checkpatch clean headers
+ * to ensure that prototype/typedef/macro changes break the build.
+ */
+
+#ifndef __SERVICES_KERNEL_CLIENT__
+#define __SERVICES_KERNEL_CLIENT__
+
+#include "pvrsrv_error.h"
+
+#include <linux/types.h>
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+#include "pvrsrv_sync_km.h"
+#include "sync_checkpoint_external.h"
+#endif
+
+#ifndef __pvrsrv_defined_struct_enum__
+
+/* rgx_fwif_shared.h */
+
+struct _RGXFWIF_DEV_VIRTADDR_ {
+	__u32 ui32Addr;
+};
+
+/* sync_external.h */
+
+struct PVRSRV_CLIENT_SYNC_PRIM {
+	volatile __u32 *pui32LinAddr;
+};
+
+struct PVRSRV_CLIENT_SYNC_PRIM_OP {
+	__u32 ui32Flags;
+	struct pvrsrv_sync_prim *psSync;
+	__u32 ui32FenceValue;
+	__u32 ui32UpdateValue;
+};
+
+typedef	enum tag_img_bool
+{
+	IMG_FALSE		= 0,
+	IMG_TRUE		= 1,
+	IMG_FORCE_ALIGN = 0x7FFFFFFF
+} IMG_BOOL, *IMG_PBOOL;
+
+#else /* __pvrsrv_defined_struct_enum__ */
+
+struct _RGXFWIF_DEV_VIRTADDR_;
+
+struct PVRSRV_CLIENT_SYNC_PRIM;
+struct PVRSRV_CLIENT_SYNC_PRIM_OP;
+
+enum tag_img_bool;
+
+#endif /* __pvrsrv_defined_struct_enum__ */
+
+struct _PMR_;
+struct _PVRSRV_DEVICE_NODE_;
+struct dma_buf;
+struct SYNC_PRIM_CONTEXT;
+
+
+/* pvr_notifier.h */
+
+#ifndef _CMDCOMPNOTIFY_PFN_
+typedef void (*PFN_CMDCOMP_NOTIFY)(void *hCmdCompHandle);
+#define _CMDCOMPNOTIFY_PFN_
+#endif
+enum PVRSRV_ERROR PVRSRVRegisterCmdCompleteNotify(void **phNotify,
+	PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, void *hPrivData);
+enum PVRSRV_ERROR PVRSRVUnregisterCmdCompleteNotify(void *hNotify);
+void PVRSRVCheckStatus(void *hCmdCompCallerHandle);
+
+#define DEBUG_REQUEST_DC               0
+#define DEBUG_REQUEST_SERVERSYNC       1
+#define DEBUG_REQUEST_SYS              2
+#define DEBUG_REQUEST_ANDROIDSYNC      3
+#define DEBUG_REQUEST_LINUXFENCE       4
+#define DEBUG_REQUEST_SYNCCHECKPOINT   5
+#define DEBUG_REQUEST_HTB              6
+#define DEBUG_REQUEST_APPHINT          7
+#define DEBUG_REQUEST_FALLBACKSYNC     8
+
+#define DEBUG_REQUEST_VERBOSITY_LOW    0
+#define DEBUG_REQUEST_VERBOSITY_MEDIUM 1
+#define DEBUG_REQUEST_VERBOSITY_HIGH   2
+#define DEBUG_REQUEST_VERBOSITY_MAX    DEBUG_REQUEST_VERBOSITY_HIGH
+
+#ifndef _DBGNOTIFY_PFNS_
+typedef void (DUMPDEBUG_PRINTF_FUNC)(void *pvDumpDebugFile,
+	const char *fmt, ...) __printf(2, 3);
+typedef void (*PFN_DBGREQ_NOTIFY) (void *hDebugRequestHandle,
+	__u32 ui32VerbLevel,
+	DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+	void *pvDumpDebugFile);
+#define _DBGNOTIFY_PFNS_
+#endif
+enum PVRSRV_ERROR PVRSRVRegisterDbgRequestNotify(void **phNotify,
+	struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+	PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+	__u32 ui32RequesterID,
+	void *hDbgRequestHandle);
+enum PVRSRV_ERROR PVRSRVUnregisterDbgRequestNotify(void *hNotify);
+
+/* physmem_dmabuf.h */
+
+struct dma_buf *PhysmemGetDmaBuf(struct _PMR_ *psPMR);
+
+/* pvrsrv.h */
+
+enum PVRSRV_ERROR PVRSRVAcquireGlobalEventObjectKM(void **phGlobalEventObject);
+enum PVRSRV_ERROR PVRSRVReleaseGlobalEventObjectKM(void *hGlobalEventObject);
+
+/* sync.h */
+
+enum PVRSRV_ERROR SyncPrimContextCreate(
+	struct _PVRSRV_DEVICE_NODE_ *psDevConnection,
+	struct SYNC_PRIM_CONTEXT **phSyncPrimContext);
+void SyncPrimContextDestroy(struct SYNC_PRIM_CONTEXT *hSyncPrimContext);
+
+enum PVRSRV_ERROR SyncPrimAlloc(struct SYNC_PRIM_CONTEXT *hSyncPrimContext,
+	struct PVRSRV_CLIENT_SYNC_PRIM **ppsSync, const char *pszClassName);
+enum PVRSRV_ERROR SyncPrimFree(struct PVRSRV_CLIENT_SYNC_PRIM *psSync);
+enum PVRSRV_ERROR SyncPrimGetFirmwareAddr(
+	struct PVRSRV_CLIENT_SYNC_PRIM *psSync,
+	__u32 *sync_addr);
+enum PVRSRV_ERROR SyncPrimSet(struct PVRSRV_CLIENT_SYNC_PRIM *psSync,
+	__u32 ui32Value);
+
+/* pdump_km.h */
+
+#ifdef PDUMP
+enum PVRSRV_ERROR __printf(1, 2) PDumpComment(char *fmt, ...);
+#else
+static inline enum PVRSRV_ERROR __printf(1, 2) PDumpComment(char *fmt, ...)
+{
+	return PVRSRV_OK;
+}
+#endif
+
+/* osfunc.h */
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+void OSAcquireBridgeLock(void);
+void OSReleaseBridgeLock(void);
+#endif
+enum PVRSRV_ERROR OSEventObjectWait(void *hOSEventKM);
+enum PVRSRV_ERROR OSEventObjectOpen(void *hEventObject, void **phOSEventKM);
+enum PVRSRV_ERROR OSEventObjectClose(void *hOSEventKM);
+
+/* srvkm.h */
+
+enum PVRSRV_ERROR PVRSRVDeviceCreate(void *pvOSDevice,
+	int i32UMIdentifier,
+	struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode);
+enum PVRSRV_ERROR PVRSRVDeviceDestroy(
+	struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+const char *PVRSRVGetErrorStringKM(enum PVRSRV_ERROR eError);
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+
+/* This is the function that kick code will call in order to obtain a list of the PSYNC_CHECKPOINTs
+ * for a given PVRSRV_FENCE passed to a kick function.
+ * The OS native sync code will allocate the memory to hold the returned list of PSYNC_CHECKPOINT ptrs.
+ * The caller will free this memory once it has finished referencing it.
+ *
+ * Input: fence                     The input (check) fence
+ * Output: nr_checkpoints           The number of PVRSRV_SYNC_CHECKPOINT ptrs returned in the
+ *                                  checkpoint_handles parameter.
+ * Output: fence_uid                Unique ID of the check fence
+ * Input/Output: checkpoint_handles The returned list of PVRSRV_SYNC_CHECKPOINTs.
+ */
+enum PVRSRV_ERROR
+pvr_sync_resolve_fence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, PVRSRV_FENCE fence, u32 *nr_checkpoints, PSYNC_CHECKPOINT **checkpoint_handles, u32 *fence_uid);
+#ifndef _CHECKPOINT_PFNS_
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN)(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, PVRSRV_FENCE fence, u32 *nr_checkpoints, PSYNC_CHECKPOINT **checkpoint_handles, u32 *fence_uid);
+#endif
+
+/* This is the function that kick code will call in order to obtain a new PVRSRV_FENCE from the
+ * OS native sync code and the PSYNC_CHECKPOINT used in that fence.
+ * The OS native sync code needs to implement a function meeting this specification.
+ *
+ * Input: fence_name               A string to annotate the fence with (for debug).
+ * Input: timeline                 The timeline on which the new fence is to be created.
+ * Output: new_fence               The new PVRSRV_FENCE to be returned by the kick call.
+ * Output: fence_uid               Unique ID of the update fence.
+ * Output: fence_finalise_data     Pointer to data needed to finalise the fence.
+ * Output: new_checkpoint_handle   The PSYNC_CHECKPOINT used by the new fence.
+ */
+enum PVRSRV_ERROR
+pvr_sync_create_fence(const char *fence_name,
+                      PVRSRV_TIMELINE timeline,
+                      PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                      PVRSRV_FENCE *new_fence,
+                      u32 *fence_uid,
+                      void **fence_finalise_data,
+                      PSYNC_CHECKPOINT *new_checkpoint_handle,
+                      void **timeline_update_sync,
+                      __u32 *timeline_update_value);
+#ifndef _CHECKPOINT_PFNS_
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN)(
+		const char *fence_name,
+		PVRSRV_TIMELINE timeline,
+		PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+		PVRSRV_FENCE *new_fence,
+		u32 *fence_uid,
+		void **fence_finalise_data,
+		PSYNC_CHECKPOINT *new_checkpoint_handle,
+		void **timeline_update_sync,
+		__u32 *timeline_update_value);
+#endif
+
+/* This is the function that kick code will call in order to 'rollback' a created
+ * output fence should an error occur when submitting the kick.
+ * The OS native sync code needs to implement a function meeting this specification.
+ *
+ * Input: fence_to_rollback   The PVRSRV_FENCE to be 'rolled back'. The fence
+ *                            should be destroyed and any actions taken due to
+ *                            its creation that need to be undone should be
+ *                            reverted.
+ * Input: finalise_data       The finalise data for the fence to be 'rolled back'.
+ */
+enum PVRSRV_ERROR
+pvr_sync_rollback_fence_data(PVRSRV_FENCE fence_to_rollback, void* finalise_data);
+#ifndef _CHECKPOINT_PFNS_
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN)(PVRSRV_FENCE fence_to_rollback, void *finalise_data);
+#endif
+
+/* This is the function that kick code will call in order to 'finalise' a created
+ * output fence just prior to returning from the kick function.
+ * The OS native sync code needs to implement a function meeting this
+ * specification - the implementation may be a nop if the OS does not need to
+ * perform any actions at this point.
+ *
+ * Input: fence_fd            The PVRSRV_FENCE to be 'finalised'. This value
+ *                            will have been returned by an earlier call to
+ *                            pvr_sync_create_fence().
+ * Input: finalise_data       The finalise data returned by an earlier call
+ *                            to pvr_sync_create_fence().
+ */
+enum PVRSRV_ERROR
+pvr_sync_finalise_fence (PVRSRV_FENCE fence_fd, void *finalise_data);
+#ifndef _CHECKPOINT_PFNS_
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN)(PVRSRV_FENCE fence_to_finalise, void *finalise_data);
+#endif
+
+/* This is the function that kick code will call in a NO_HARDWARE build only after
+ * sync checkpoints have been manually signalled, to allow the OS native sync
+ * implementation to update its timelines (as the usual callback notification
+ * of signalled checkpoints is not supported for NO_HARDWARE).
+ */
+#ifndef _CHECKPOINT_PFNS_
+typedef void (*PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN)(void *private_data);
+typedef void (*PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN)(void *mem_ptr);
+#define _CHECKPOINT_PFNS_
+#endif
+enum PVRSRV_ERROR SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve, PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate, PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback, PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise, PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines, PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem);
+
+/* sync_checkpoint.h */
+enum PVRSRV_ERROR SyncCheckpointContextCreate(struct _PVRSRV_DEVICE_NODE_ *psDevConnection, PSYNC_CHECKPOINT_CONTEXT *phSyncCheckpointContext);
+enum PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext);
+enum PVRSRV_ERROR SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, PVRSRV_TIMELINE timeline, const char *pszCheckpointName, PSYNC_CHECKPOINT *ppsSyncCheckpoint);
+void SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint);
+void SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint);
+enum tag_img_bool SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint);
+enum tag_img_bool SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint);
+enum PVRSRV_ERROR SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint);
+enum PVRSRV_ERROR SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint);
+void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint);
+__u32 SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint);
+void SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint);
+__u32 SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint);
+__u32 SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint);
+PVRSRV_TIMELINE SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+#endif /* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+
+#endif /* __SERVICES_KERNEL_CLIENT__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_crtc.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_crtc.c
new file mode 100644
index 0000000..4fb9e04
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_crtc.c
@@ -0,0 +1,1375 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_linux_fence.h"
+#include "pvr_sw_fence.h"
+
+#include <linux/reservation.h>
+#include <linux/version.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "drm_pdp_drv.h"
+#include "drm_pdp_gem.h"
+
+#include "pdp_apollo.h"
+#include "pdp_odin.h"
+
+#include "plato_drv.h"
+#include "pdp2_regs.h"
+#include "pdp2_mmu_regs.h"
+
+#include "kernel_compatibility.h"
+
+#define PDP_STRIDE_SHIFT 4
+#define PDP_BASE_ADDR_SHIFT 4
+#define PLATO_PDP_STRIDE_SHIFT 5
+#define PDP_REDUCED_BLANKING_VEVENT 1
+
+#define PLATO_PDP_PIXEL_FORMAT_G		(0x00)
+#define PLATO_PDP_PIXEL_FORMAT_ARGB4	(0x04)
+#define PLATO_PDP_PIXEL_FORMAT_ARGB1555	(0x05)
+#define PLATO_PDP_PIXEL_FORMAT_RGB8		(0x06)
+#define PLATO_PDP_PIXEL_FORMAT_RGB565	(0x07)
+#define PLATO_PDP_PIXEL_FORMAT_ARGB8	(0x08)
+#define PLATO_PDP_PIXEL_FORMAT_AYUV8	(0x10)
+#define PLATO_PDP_PIXEL_FORMAT_YUV10	(0x15)
+#define PLATO_PDP_PIXEL_FORMAT_RGBA8	(0x16)
+
+enum pdp_crtc_flip_status {
+	PDP_CRTC_FLIP_STATUS_NONE = 0,
+	PDP_CRTC_FLIP_STATUS_PENDING,
+	PDP_CRTC_FLIP_STATUS_DONE,
+};
+
+struct pdp_flip_data {
+	struct dma_fence_cb base;
+	struct drm_crtc *crtc;
+	struct dma_fence *wait_fence;
+	struct dma_fence *complete_fence;
+};
+
+/* returns true for ok, false for fail */
+static bool pdp_clocks_set(struct drm_crtc *crtc,
+			   struct drm_display_mode *adjusted_mode)
+{
+	struct pdp_drm_private *dev_priv = crtc->dev->dev_private;
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	bool res;
+
+	switch (dev_priv->version) {
+	case PDP_VERSION_ODIN: {
+		pdp_odin_set_updates_enabled(crtc->dev->dev,
+					     pdp_crtc->pdp_reg, false);
+		res = pdp_odin_clocks_set(crtc->dev->dev,
+				pdp_crtc->pdp_reg, pdp_crtc->pll_reg,
+				0,                       /* apollo only */
+				pdp_crtc->odn_core_reg,  /* odin only */
+				adjusted_mode->hdisplay,
+				adjusted_mode->vdisplay);
+		pdp_odin_set_updates_enabled(crtc->dev->dev,
+					     pdp_crtc->pdp_reg, true);
+
+		break;
+	}
+	case PDP_VERSION_APOLLO: {
+		int clock_in_mhz = adjusted_mode->clock / 1000;
+
+		pdp_apollo_set_updates_enabled(crtc->dev->dev,
+					       pdp_crtc->pdp_reg, false);
+		res = pdp_apollo_clocks_set(crtc->dev->dev,
+				pdp_crtc->pdp_reg, pdp_crtc->pll_reg,
+				clock_in_mhz,           /* apollo only */
+				NULL,                   /* odin only */
+				adjusted_mode->hdisplay,
+				adjusted_mode->vdisplay);
+		pdp_apollo_set_updates_enabled(crtc->dev->dev,
+					       pdp_crtc->pdp_reg, true);
+
+		DRM_DEBUG_DRIVER("pdp clock set to %dMhz\n", clock_in_mhz);
+
+		break;
+	}
+	case PDP_VERSION_PLATO:
+		/*plato_enable_pdp_clock(dev_priv->dev->dev->parent);*/
+		res = true;
+		break;
+	default:
+		BUG();
+	}
+
+	return res;
+}
+
+void pdp_crtc_set_plane_enabled(struct drm_crtc *crtc, bool enable)
+{
+	struct pdp_drm_private *dev_priv = crtc->dev->dev_private;
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	uint32_t value;
+
+	switch (dev_priv->version) {
+	case PDP_VERSION_ODIN:
+		pdp_odin_set_plane_enabled(crtc->dev->dev,
+					   pdp_crtc->pdp_reg,
+					   0, enable);
+		break;
+	case PDP_VERSION_APOLLO:
+		pdp_apollo_set_plane_enabled(crtc->dev->dev,
+					     pdp_crtc->pdp_reg,
+					     0, enable);
+		break;
+	case PDP_VERSION_PLATO:
+		dev_info(crtc->dev->dev, "Set plane: %s\n",
+			enable ? "enable" : "disable");
+
+		value = plato_read_reg32(pdp_crtc->pdp_reg,
+				PDP_GRPH1CTRL_OFFSET);
+		value = REG_VALUE_SET(value,
+				enable ? 0x1 : 0x0,
+				PDP_GRPH1CTRL_GRPH1STREN_SHIFT,
+				PDP_GRPH1CTRL_GRPH1STREN_MASK);
+		plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_GRPH1CTRL_OFFSET,
+				value);
+		break;
+	default:
+		BUG();
+	}
+}
+
+static void pdp_crtc_set_syncgen_enabled(struct drm_crtc *crtc, bool enable)
+{
+	struct pdp_drm_private *dev_priv = crtc->dev->dev_private;
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	uint32_t value;
+
+	switch (dev_priv->version) {
+	case PDP_VERSION_ODIN:
+		pdp_odin_set_syncgen_enabled(crtc->dev->dev,
+					     pdp_crtc->pdp_reg,
+					     enable);
+		break;
+	case PDP_VERSION_APOLLO:
+		pdp_apollo_set_syncgen_enabled(crtc->dev->dev,
+					       pdp_crtc->pdp_reg,
+					       enable);
+		break;
+	case PDP_VERSION_PLATO:
+		dev_info(crtc->dev->dev, "Set syncgen: %s\n",
+		enable ? "enable" : "disable");
+
+		value = plato_read_reg32(pdp_crtc->pdp_reg,
+			PDP_SYNCCTRL_OFFSET);
+		/* Starts Sync Generator. */
+		value = REG_VALUE_SET(value,
+			enable ? 0x1 : 0x0,
+			PDP_SYNCCTRL_SYNCACTIVE_SHIFT,
+			PDP_SYNCCTRL_SYNCACTIVE_MASK);
+		/* Controls polarity of pixel clock: Pixel clock is inverted */
+		value = REG_VALUE_SET(value, 0x01,
+			PDP_SYNCCTRL_CLKPOL_SHIFT,
+			PDP_SYNCCTRL_CLKPOL_MASK);
+		plato_write_reg32(pdp_crtc->pdp_reg,
+			PDP_SYNCCTRL_OFFSET,
+			value);
+		break;
+	default:
+		BUG();
+	}
+}
+
+static void pdp_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
+{
+	struct pdp_drm_private *dev_priv = crtc->dev->dev_private;
+
+	if (enable) {
+		pdp_crtc_set_syncgen_enabled(crtc, enable);
+		pdp_crtc_set_plane_enabled(crtc, dev_priv->display_enabled);
+		drm_crtc_vblank_on(crtc);
+	} else {
+		drm_crtc_vblank_off(crtc);
+		pdp_crtc_set_plane_enabled(crtc, enable);
+		pdp_crtc_set_syncgen_enabled(crtc, enable);
+	}
+}
+
+
+static void pdp_crtc_helper_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static void pdp_crtc_helper_prepare(struct drm_crtc *crtc)
+{
+	pdp_crtc_set_enabled(crtc, false);
+}
+
+static void pdp_crtc_helper_commit(struct drm_crtc *crtc)
+{
+	pdp_crtc_set_enabled(crtc, true);
+}
+
+static bool pdp_crtc_helper_mode_fixup(struct drm_crtc *crtc,
+					const struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode)
+{
+	struct pdp_drm_private *dev_priv = crtc->dev->dev_private;
+
+	if (dev_priv->version == PDP_VERSION_ODIN
+		&& mode->hdisplay == 1920
+		&& mode->vdisplay == 1080) {
+
+		/* 1080p 60Hz */
+		const int h_total = 2200;
+		const int h_active_start = 192;
+		const int h_back_porch_start = 44;
+		const int v_total = 1125;
+		const int v_active_start = 41;
+		const int v_back_porch_start = 5;
+
+		adjusted_mode->htotal = h_total;
+		adjusted_mode->hsync_start = adjusted_mode->htotal -
+						h_active_start;
+		adjusted_mode->hsync_end = adjusted_mode->hsync_start +
+						h_back_porch_start;
+		adjusted_mode->vtotal = v_total;
+		adjusted_mode->vsync_start = adjusted_mode->vtotal -
+						v_active_start;
+		adjusted_mode->vsync_end = adjusted_mode->vsync_start +
+						v_back_porch_start;
+	}
+	return true;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+static inline unsigned pdp_drm_fb_cpp(struct drm_framebuffer *fb)
+{
+	return fb->format->cpp[0];
+}
+
+static inline u32 pdp_drm_fb_format(struct drm_framebuffer *fb)
+{
+	return fb->format->format;
+}
+#else
+static inline unsigned pdp_drm_fb_cpp(struct drm_framebuffer *fb)
+{
+	return fb->bits_per_pixel / 8;
+}
+
+static inline u32 pdp_drm_fb_format(struct drm_framebuffer *fb)
+{
+	return fb->pixel_format;
+}
+#endif
+
+static int pdp_crtc_helper_mode_set_base_atomic(struct drm_crtc *crtc,
+						struct drm_framebuffer *fb,
+						int x, int y,
+						enum mode_set_atomic atomic)
+{
+	struct pdp_drm_private *dev_priv = crtc->dev->dev_private;
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb);
+	unsigned int pitch = fb->pitches[0];
+	uint64_t address  = pdp_gem_get_dev_addr(pdp_fb->obj);
+	uint32_t value;
+
+	/*
+	 * User space specifies 'x' and 'y' and this is used to tell the display
+	 * to scan out from part way through a buffer.
+	 */
+	address += ((y * pitch) + (x * (pdp_drm_fb_cpp(fb))));
+
+	/*
+	 * NOTE: If the buffer dimensions are less than the current mode then
+	 * the output will appear in the top left of the screen. This can be
+	 * centered by adjusting horizontal active start, right border start,
+	 * vertical active start and bottom border start. At this point it's
+	 * not entirely clear where this should be done. On the one hand it's
+	 * related to pdp_crtc_helper_mode_set but on the other hand there
+	 * might not always be a call to pdp_crtc_helper_mode_set. This needs
+	 * to be investigated.
+	 */
+	switch (dev_priv->version) {
+	case PDP_VERSION_ODIN:
+		switch (pdp_drm_fb_format(fb)) {
+		case DRM_FORMAT_ARGB8888:
+		case DRM_FORMAT_XRGB8888:
+			break;
+		default:
+			DRM_ERROR("unsupported pixel format (format = %d)\n",
+				pdp_drm_fb_format(fb));
+			return -1;
+		}
+
+		pdp_odin_set_updates_enabled(crtc->dev->dev,
+					     pdp_crtc->pdp_reg, false);
+
+		pdp_odin_reset_planes(crtc->dev->dev,
+				      pdp_crtc->pdp_reg);
+		pdp_odin_set_surface(crtc->dev->dev,
+			pdp_crtc->pdp_reg,
+			0,
+			address,
+			0, 0,
+			fb->width,
+			fb->height, pitch,
+			ODN_PDP_SURF_PIXFMT_ARGB8888,
+			255,
+			false);
+		pdp_odin_set_plane_enabled(crtc->dev->dev,
+					   pdp_crtc->pdp_reg,
+					   0, dev_priv->display_enabled);
+
+		pdp_odin_set_updates_enabled(crtc->dev->dev,
+					     pdp_crtc->pdp_reg, true);
+
+		break;
+	case PDP_VERSION_APOLLO:
+		switch (pdp_drm_fb_format(fb)) {
+		case DRM_FORMAT_ARGB8888:
+		case DRM_FORMAT_XRGB8888:
+			break;
+		default:
+			DRM_ERROR("unsupported pixel format (format = %d)\n",
+				pdp_drm_fb_format(fb));
+			return -1;
+		}
+
+		pdp_apollo_set_updates_enabled(crtc->dev->dev,
+					       pdp_crtc->pdp_reg, false);
+
+		pdp_apollo_reset_planes(crtc->dev->dev,
+					pdp_crtc->pdp_reg);
+		pdp_apollo_set_surface(crtc->dev->dev,
+			pdp_crtc->pdp_reg,
+			0,
+			address,
+			0, 0,
+			fb->width, fb->height,
+			pitch,
+			0xE,
+			255,
+			false);
+		pdp_apollo_set_plane_enabled(crtc->dev->dev,
+					     pdp_crtc->pdp_reg,
+					     0, dev_priv->display_enabled);
+
+		pdp_apollo_set_updates_enabled(crtc->dev->dev,
+					       pdp_crtc->pdp_reg, true);
+
+		break;
+	case PDP_VERSION_PLATO:
+		/* Set the frame buffer base address */
+		if (address & 0xF) {
+			dev_warn(crtc->dev->dev,
+				"Warning - the frame buffer address is not aligned\n");
+		}
+		plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_GRPH1BASEADDR_OFFSET,
+				address & PDP_GRPH1BASEADDR_GRPH1BASEADDR_MASK);
+
+		/* Write 8 msb of the address to address extension bits in the
+		 * PDP MMU control register
+		 */
+		value = plato_read_reg32(pdp_crtc->pdp_reg,
+					 SYS_PLATO_REG_PDP_SIZE +
+					 PDP_BIF_ADDRESS_CONTROL_OFFSET);
+		value = REG_VALUE_SET(value,
+			address >> 32,
+			PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_SHIFT,
+			PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_MASK);
+		value = REG_VALUE_SET(value,
+			0x00,
+			PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_SHIFT,
+			PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_MASK);
+		value = REG_VALUE_SET(value,
+			0x01,
+			PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_SHIFT,
+			PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_MASK);
+		plato_write_reg32(pdp_crtc->pdp_reg,
+			SYS_PLATO_REG_PDP_SIZE + PDP_BIF_ADDRESS_CONTROL_OFFSET,
+			value);
+
+		/* Set the framebuffer pixel format */
+		value = plato_read_reg32(pdp_crtc->pdp_reg,
+				PDP_GRPH1SURF_OFFSET);
+
+		switch (pdp_drm_fb_format(fb)) {
+		case DRM_FORMAT_ARGB8888:
+			value = REG_VALUE_SET(value,
+				PLATO_PDP_PIXEL_FORMAT_ARGB8,
+				PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT,
+				PDP_GRPH1SURF_GRPH1PIXFMT_MASK);
+			break;
+		default:
+			DRM_ERROR("unsupported pixel format (format = %d)\n",
+				pdp_drm_fb_format(fb));
+		}
+
+		plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_GRPH1SURF_OFFSET, value);
+		/*
+		 * Set the framebuffer size (this might be smaller than the
+		 * resolution)
+		 */
+		value = REG_VALUE_SET(0,
+				fb->width - 1,
+				PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT,
+				PDP_GRPH1SIZE_GRPH1WIDTH_MASK);
+		value = REG_VALUE_SET(value,
+				fb->height - 1,
+				PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT,
+				PDP_GRPH1SIZE_GRPH1HEIGHT_MASK);
+		plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_GRPH1SIZE_OFFSET,
+				value);
+
+		/* Set the framebuffer stride in 16byte words */
+		value = REG_VALUE_SET(0,
+				(pitch >> PLATO_PDP_STRIDE_SHIFT) - 1,
+				PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT,
+				PDP_GRPH1STRIDE_GRPH1STRIDE_MASK);
+		plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_GRPH1STRIDE_OFFSET, value);
+
+		/* Issues with NoC sending interleaved read responses to PDP
+		 * require burst to be 1
+		 */
+		value = REG_VALUE_SET(0,
+				0x02,
+				PDP_MEMCTRL_MEMREFRESH_SHIFT,
+				PDP_MEMCTRL_MEMREFRESH_MASK);
+		value = REG_VALUE_SET(value,
+				0x01,
+				PDP_MEMCTRL_BURSTLEN_SHIFT,
+				PDP_MEMCTRL_BURSTLEN_MASK);
+		plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_MEMCTRL_OFFSET,
+				value);
+		break;
+	default:
+		BUG();
+	}
+	return 0;
+}
+
+static int pdp_crtc_helper_mode_set_base(struct drm_crtc *crtc,
+					 int x, int y,
+					 struct drm_framebuffer *old_fb)
+{
+	if (!crtc->primary->fb) {
+		DRM_ERROR("no framebuffer\n");
+		return 0;
+	}
+
+	return pdp_crtc_helper_mode_set_base_atomic(crtc,
+						    crtc->primary->fb,
+						    x, y,
+						    0);
+}
+
+static int pdp_crtc_helper_mode_set(struct drm_crtc *crtc,
+				    struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode,
+				    int x, int y,
+				    struct drm_framebuffer *old_fb)
+{
+	/*
+	 * ht   = horizontal total
+	 * hbps = horizontal back porch start
+	 * has  = horizontal active start
+	 * hlbs = horizontal left border start
+	 * hfps = horizontal front porch start
+	 * hrbs = horizontal right border start
+	 *
+	 * vt   = vertical total
+	 * vbps = vertical back porch start
+	 * vas  = vertical active start
+	 * vtbs = vertical top border start
+	 * vfps = vertical front porch start
+	 * vbbs = vertical bottom border start
+	 */
+	struct pdp_drm_private *dev_priv = crtc->dev->dev_private;
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	uint32_t ht = adjusted_mode->htotal;
+	uint32_t hbps = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
+	uint32_t has = (adjusted_mode->htotal - adjusted_mode->hsync_start);
+	uint32_t hlbs = has;
+	uint32_t hfps = (hlbs + adjusted_mode->hdisplay);
+	uint32_t hrbs = hfps;
+	uint32_t vt = adjusted_mode->vtotal;
+	uint32_t vbps = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
+	uint32_t vas = (adjusted_mode->vtotal - adjusted_mode->vsync_start);
+	uint32_t vtbs = vas;
+	uint32_t vfps = (vtbs + adjusted_mode->vdisplay);
+	uint32_t vbbs = vfps;
+	uint32_t value;
+	bool ok;
+
+	ok = pdp_clocks_set(crtc, adjusted_mode);
+
+	if (!ok) {
+		dev_info(crtc->dev->dev, "pdp_crtc_helper_mode_set failed\n");
+		return 0;
+	}
+
+	switch (dev_priv->version) {
+	case PDP_VERSION_ODIN:
+		pdp_odin_set_updates_enabled(crtc->dev->dev,
+					     pdp_crtc->pdp_reg, false);
+		pdp_odin_reset_planes(crtc->dev->dev,
+				      pdp_crtc->pdp_reg);
+		pdp_odin_mode_set(crtc->dev->dev,
+			     pdp_crtc->pdp_reg,
+			     adjusted_mode->hdisplay, adjusted_mode->vdisplay,
+			     hbps, ht, has,
+			     hlbs, hfps, hrbs,
+			     vbps, vt, vas,
+			     vtbs, vfps, vbbs,
+			     adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC,
+			     adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC);
+		pdp_odin_set_powerdwn_enabled(crtc->dev->dev,
+					      pdp_crtc->pdp_reg, false);
+		pdp_odin_set_updates_enabled(crtc->dev->dev,
+					     pdp_crtc->pdp_reg, true);
+		break;
+	case PDP_VERSION_APOLLO:
+		pdp_apollo_set_updates_enabled(crtc->dev->dev,
+					       pdp_crtc->pdp_reg, false);
+		pdp_apollo_reset_planes(crtc->dev->dev,
+					pdp_crtc->pdp_reg);
+		pdp_apollo_mode_set(crtc->dev->dev,
+			     pdp_crtc->pdp_reg,
+			     adjusted_mode->hdisplay, adjusted_mode->vdisplay,
+			     hbps, ht, has,
+			     hlbs, hfps, hrbs,
+			     vbps, vt, vas,
+			     vtbs, vfps, vbbs,
+			     adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC,
+			     adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC);
+		pdp_apollo_set_powerdwn_enabled(crtc->dev->dev,
+						pdp_crtc->pdp_reg, false);
+		pdp_apollo_set_updates_enabled(crtc->dev->dev,
+					       pdp_crtc->pdp_reg, true);
+		break;
+	case PDP_VERSION_PLATO:
+		dev_info(crtc->dev->dev,
+			 "setting mode to %dx%d\n",
+			 adjusted_mode->hdisplay, adjusted_mode->vdisplay);
+
+		/* Update control */
+		value = plato_read_reg32(pdp_crtc->pdp_reg,
+				PDP_REGISTER_UPDATE_CTRL_OFFSET);
+		value = REG_VALUE_SET(value, 0x0,
+				PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT,
+				PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_MASK);
+		plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_REGISTER_UPDATE_CTRL_OFFSET, value);
+
+		/* Set hsync timings */
+		value = plato_read_reg32(pdp_crtc->pdp_reg,
+				PDP_HSYNC1_OFFSET);
+		value = REG_VALUE_SET(value,
+				hbps,
+				PDP_HSYNC1_HBPS_SHIFT,
+				PDP_HSYNC1_HBPS_MASK);
+		value = REG_VALUE_SET(value,
+				ht,
+				PDP_HSYNC1_HT_SHIFT,
+				PDP_HSYNC1_HT_MASK);
+		plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_HSYNC1_OFFSET, value);
+
+		value = plato_read_reg32(pdp_crtc->pdp_reg,
+				PDP_HSYNC2_OFFSET);
+		value = REG_VALUE_SET(value,
+				has,
+				PDP_HSYNC2_HAS_SHIFT,
+				PDP_HSYNC2_HAS_MASK);
+		value = REG_VALUE_SET(value,
+				hlbs,
+				PDP_HSYNC2_HLBS_SHIFT,
+				PDP_HSYNC2_HLBS_MASK);
+		plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_HSYNC2_OFFSET, value);
+
+		value = plato_read_reg32(pdp_crtc->pdp_reg,
+				PDP_HSYNC3_OFFSET);
+		value = REG_VALUE_SET(value,
+				hfps,
+				PDP_HSYNC3_HFPS_SHIFT,
+				PDP_HSYNC3_HFPS_MASK);
+		value = REG_VALUE_SET(value,
+				hrbs,
+				PDP_HSYNC3_HRBS_SHIFT,
+				PDP_HSYNC3_HRBS_MASK);
+		plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_HSYNC3_OFFSET, value);
+
+		/* Set vsync timings */
+		value = plato_read_reg32(pdp_crtc->pdp_reg,
+				PDP_VSYNC1_OFFSET);
+		value = REG_VALUE_SET(value,
+				vbps,
+				PDP_VSYNC1_VBPS_SHIFT,
+				PDP_VSYNC1_VBPS_MASK);
+		value = REG_VALUE_SET(value,
+				vt,
+				PDP_VSYNC1_VT_SHIFT,
+				PDP_VSYNC1_VT_MASK);
+		plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_VSYNC1_OFFSET, value);
+
+		value = plato_read_reg32(pdp_crtc->pdp_reg,
+				PDP_VSYNC2_OFFSET);
+		value = REG_VALUE_SET(value,
+				vas,
+				PDP_VSYNC2_VAS_SHIFT,
+				PDP_VSYNC2_VAS_MASK);
+		value = REG_VALUE_SET(value,
+				vtbs,
+				PDP_VSYNC2_VTBS_SHIFT,
+				PDP_VSYNC2_VTBS_MASK);
+		plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_VSYNC2_OFFSET, value);
+
+		value = plato_read_reg32(pdp_crtc->pdp_reg,
+				PDP_VSYNC3_OFFSET);
+		value = REG_VALUE_SET(value,
+				vfps,
+				PDP_VSYNC3_VFPS_SHIFT,
+				PDP_VSYNC3_VFPS_MASK);
+		value = REG_VALUE_SET(value,
+				vbbs,
+				PDP_VSYNC3_VBBS_SHIFT,
+				PDP_VSYNC3_VBBS_MASK);
+		plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_VSYNC3_OFFSET, value);
+
+		/* Horizontal data enable */
+		value = plato_read_reg32(pdp_crtc->pdp_reg,
+				PDP_HDECTRL_OFFSET);
+		value = REG_VALUE_SET(value,
+				has,
+				PDP_HDECTRL_HDES_SHIFT,
+				PDP_HDECTRL_HDES_MASK);
+		value = REG_VALUE_SET(value,
+				hrbs,
+				PDP_HDECTRL_HDEF_SHIFT,
+				PDP_HDECTRL_HDEF_MASK);
+		plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_HDECTRL_OFFSET, value);
+
+		/* Vertical data enable */
+		value = plato_read_reg32(pdp_crtc->pdp_reg,
+				PDP_VDECTRL_OFFSET);
+		value = REG_VALUE_SET(value,
+				vtbs, /* XXX: plato we're setting this to VAS */
+				PDP_VDECTRL_VDES_SHIFT,
+				PDP_VDECTRL_VDES_MASK);
+		value = REG_VALUE_SET(value,
+				vfps, /* XXX: plato set to VBBS */
+				PDP_VDECTRL_VDEF_SHIFT,
+				PDP_VDECTRL_VDEF_MASK);
+		plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_VDECTRL_OFFSET, value);
+
+		/* Vertical event start and vertical fetch start */
+		/* XXX: Review this */
+		if (pdp_crtc->reduced_blanking == true) {
+			value = 0;
+			value = REG_VALUE_SET(value,
+				vbbs + PDP_REDUCED_BLANKING_VEVENT,
+				PDP_VEVENT_VEVENT_SHIFT,
+				PDP_VEVENT_VEVENT_MASK);
+			value = REG_VALUE_SET(value,
+				vbps / 2,
+				PDP_VEVENT_VFETCH_SHIFT,
+				PDP_VEVENT_VFETCH_MASK);
+			plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_VEVENT_OFFSET, value);
+		} else {
+			value = 0;
+			value = REG_VALUE_SET(value,
+				0,
+				PDP_VEVENT_VEVENT_SHIFT,
+				PDP_VEVENT_VEVENT_MASK);
+			value = REG_VALUE_SET(value,
+				vbps,
+				PDP_VEVENT_VFETCH_SHIFT,
+				PDP_VEVENT_VFETCH_MASK);
+			plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_VEVENT_OFFSET, value);
+		}
+
+		value = plato_read_reg32(pdp_crtc->pdp_reg,
+				PDP_VEVENT_OFFSET);
+		value = REG_VALUE_SET(value,
+				vbps,
+				PDP_VEVENT_VFETCH_SHIFT,
+				PDP_VEVENT_VFETCH_MASK);
+		value = REG_VALUE_SET(value,
+				vfps,
+				PDP_VEVENT_VEVENT_SHIFT,
+				PDP_VEVENT_VEVENT_MASK);
+		plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_VEVENT_OFFSET, value);
+
+		/* Set up polarities of sync/blank */
+		value = REG_VALUE_SET(0,
+				0x1,
+				PDP_SYNCCTRL_BLNKPOL_SHIFT,
+				PDP_SYNCCTRL_BLNKPOL_MASK);
+
+		if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+			value = REG_VALUE_SET(value, 0x1,
+				PDP_SYNCCTRL_HSPOL_SHIFT,
+				PDP_SYNCCTRL_HSPOL_MASK);
+
+		if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+			value = REG_VALUE_SET(value, 0x1,
+				PDP_SYNCCTRL_VSPOL_SHIFT,
+				PDP_SYNCCTRL_VSPOL_MASK);
+
+		plato_write_reg32(pdp_crtc->pdp_reg,
+			PDP_SYNCCTRL_OFFSET,
+			value);
+		break;
+	default:
+		BUG();
+	}
+	return pdp_crtc_helper_mode_set_base(crtc, x, y, old_fb);
+}
+
+static void pdp_crtc_helper_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static void pdp_crtc_flip_complete(struct drm_crtc *crtc);
+
+static void pdp_crtc_helper_disable(struct drm_crtc *crtc)
+{
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	enum pdp_crtc_flip_status status;
+
+	pdp_crtc_set_enabled(crtc, false);
+
+	status = atomic_read(&pdp_crtc->flip_status);
+	if (status != PDP_CRTC_FLIP_STATUS_NONE) {
+		long lerr;
+
+		lerr = wait_event_timeout(
+			pdp_crtc->flip_pending_wait_queue,
+			atomic_read(&pdp_crtc->flip_status)
+					!= PDP_CRTC_FLIP_STATUS_PENDING,
+			30 * HZ);
+		if (!lerr)
+			DRM_ERROR("Failed to wait for pending flip\n");
+		else if (!pdp_crtc->flip_async)
+			pdp_crtc_flip_complete(crtc);
+	}
+}
+
+static void pdp_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+
+	DRM_DEBUG_DRIVER("[CRTC:%d]\n", crtc->base.id);
+
+	drm_crtc_cleanup(crtc);
+
+	iounmap(pdp_crtc->pll_reg);
+
+	iounmap(pdp_crtc->pdp_reg);
+	release_mem_region(pdp_crtc->pdp_reg_phys_base, pdp_crtc->pdp_reg_size);
+
+	kfree(pdp_crtc->primary_plane);
+	kfree(pdp_crtc);
+	dev_priv->crtc = NULL;
+}
+
+static void pdp_crtc_flip_complete(struct drm_crtc *crtc)
+{
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	unsigned long flags;
+	struct dma_fence *fence;
+
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+	/* The flipping process has been completed so reset the flip status */
+	atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_NONE);
+
+	fence = pdp_crtc->flip_data->complete_fence;
+
+	dma_fence_put(pdp_crtc->flip_data->wait_fence);
+	kfree(pdp_crtc->flip_data);
+	pdp_crtc->flip_data = NULL;
+
+	if (pdp_crtc->flip_event) {
+		drm_crtc_send_vblank_event(crtc, pdp_crtc->flip_event);
+		pdp_crtc->flip_event = NULL;
+	}
+
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+	WARN_ON(dma_fence_signal(fence));
+	dma_fence_put(fence);
+}
+
+static void pdp_crtc_flip(struct drm_crtc *crtc)
+{
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	struct drm_framebuffer *old_fb;
+
+	WARN_ON(atomic_read(&to_pdp_crtc(crtc)->flip_status)
+			!= PDP_CRTC_FLIP_STATUS_PENDING);
+
+	old_fb = pdp_crtc->old_fb;
+	pdp_crtc->old_fb = NULL;
+
+	/*
+	 * The graphics stream registers latch on vsync so we can go ahead and
+	 * do the flip now.
+	 */
+	(void) pdp_crtc_helper_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
+
+	atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_DONE);
+	wake_up(&pdp_crtc->flip_pending_wait_queue);
+
+	if (pdp_crtc->flip_async)
+		pdp_crtc_flip_complete(crtc);
+}
+
+static void pdp_crtc_flip_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+	struct pdp_flip_data *flip_data =
+		container_of(cb, struct pdp_flip_data, base);
+
+	pdp_crtc_flip(flip_data->crtc);
+}
+
+static void pdp_crtc_flip_schedule_cb(struct dma_fence *fence,
+				      struct dma_fence_cb *cb)
+{
+	struct pdp_flip_data *flip_data =
+		container_of(cb, struct pdp_flip_data, base);
+	int err = 0;
+
+	if (flip_data->wait_fence)
+		err = dma_fence_add_callback(flip_data->wait_fence,
+					     &flip_data->base,
+					     pdp_crtc_flip_cb);
+
+	if (!flip_data->wait_fence || err) {
+		if (err && err != -ENOENT)
+			DRM_ERROR("flip failed to wait on old buffer\n");
+		pdp_crtc_flip_cb(flip_data->wait_fence, &flip_data->base);
+	}
+}
+
+static int pdp_crtc_flip_schedule(struct drm_crtc *crtc,
+				  struct drm_gem_object *obj,
+				  struct drm_gem_object *old_obj)
+{
+	struct pdp_drm_private *dev_priv = crtc->dev->dev_private;
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	struct reservation_object *resv = pdp_gem_get_resv(obj);
+	struct reservation_object *old_resv = pdp_gem_get_resv(old_obj);
+	struct pdp_flip_data *flip_data;
+	struct dma_fence *fence;
+	int err;
+
+	flip_data = kmalloc(sizeof(*flip_data), GFP_KERNEL);
+	if (!flip_data)
+		return -ENOMEM;
+
+	flip_data->crtc = crtc;
+
+	flip_data->complete_fence = pvr_sw_fence_create(dev_priv->dev_fctx);
+	if (!flip_data->complete_fence) {
+		err = -ENOMEM;
+		goto err_free_fence_data;
+	}
+
+	ww_mutex_lock(&old_resv->lock, NULL);
+	err = reservation_object_reserve_shared(old_resv);
+	if (err) {
+		ww_mutex_unlock(&old_resv->lock);
+		goto err_complete_fence_put;
+	}
+
+	reservation_object_add_shared_fence(old_resv,
+					    flip_data->complete_fence);
+
+	flip_data->wait_fence =
+		dma_fence_get(reservation_object_get_excl(old_resv));
+
+	if (old_resv != resv) {
+		ww_mutex_unlock(&old_resv->lock);
+		ww_mutex_lock(&resv->lock, NULL);
+	}
+
+	fence = dma_fence_get(reservation_object_get_excl(resv));
+	ww_mutex_unlock(&resv->lock);
+
+	pdp_crtc->flip_data = flip_data;
+	atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_PENDING);
+
+	if (fence) {
+		err = dma_fence_add_callback(fence, &flip_data->base,
+					     pdp_crtc_flip_schedule_cb);
+		dma_fence_put(fence);
+		if (err && err != -ENOENT)
+			goto err_set_flip_status_none;
+	}
+
+	if (!fence || err == -ENOENT) {
+		pdp_crtc_flip_schedule_cb(fence, &flip_data->base);
+		err = 0;
+	}
+
+	return err;
+
+err_set_flip_status_none:
+	atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_NONE);
+	dma_fence_put(flip_data->wait_fence);
+err_complete_fence_put:
+	dma_fence_put(flip_data->complete_fence);
+err_free_fence_data:
+	kfree(flip_data);
+	return err;
+}
+
+static int pdp_crtc_page_flip(struct drm_crtc *crtc,
+			      struct drm_framebuffer *fb,
+			      struct drm_pending_vblank_event *event,
+			      uint32_t page_flip_flags)
+{
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb);
+	struct pdp_framebuffer *pdp_old_fb =
+		to_pdp_framebuffer(crtc->primary->fb);
+	enum pdp_crtc_flip_status status;
+	unsigned long flags;
+	int err;
+
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+	status = atomic_read(&pdp_crtc->flip_status);
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+	if (status != PDP_CRTC_FLIP_STATUS_NONE)
+		return -EBUSY;
+
+	if (!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)) {
+		err = drm_crtc_vblank_get(crtc);
+		if (err)
+			return err;
+	}
+
+	pdp_crtc->old_fb = crtc->primary->fb;
+	pdp_crtc->flip_event = event;
+	pdp_crtc->flip_async = !!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC);
+
+	/* Set the crtc primary plane to point to the new framebuffer */
+	crtc->primary->fb = fb;
+
+	err = pdp_crtc_flip_schedule(crtc, pdp_fb->obj, pdp_old_fb->obj);
+	if (err) {
+		crtc->primary->fb = pdp_crtc->old_fb;
+		pdp_crtc->old_fb = NULL;
+		pdp_crtc->flip_event = NULL;
+		pdp_crtc->flip_async = false;
+
+		DRM_ERROR("failed to schedule flip (err=%d)\n", err);
+		goto err_vblank_put;
+	}
+
+	return 0;
+
+err_vblank_put:
+	if (!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC))
+		drm_crtc_vblank_put(crtc);
+	return err;
+}
+
+static const struct drm_crtc_helper_funcs pdp_crtc_helper_funcs = {
+	.dpms = pdp_crtc_helper_dpms,
+	.prepare = pdp_crtc_helper_prepare,
+	.commit = pdp_crtc_helper_commit,
+	.mode_fixup = pdp_crtc_helper_mode_fixup,
+	.mode_set = pdp_crtc_helper_mode_set,
+	.mode_set_base = pdp_crtc_helper_mode_set_base,
+	.load_lut = pdp_crtc_helper_load_lut,
+	.mode_set_base_atomic = pdp_crtc_helper_mode_set_base_atomic,
+	.disable = pdp_crtc_helper_disable,
+};
+
+static const struct drm_crtc_funcs pdp_crtc_funcs = {
+	.reset = NULL,
+	.cursor_set = NULL,
+	.cursor_move = NULL,
+	.gamma_set = NULL,
+	.destroy = pdp_crtc_destroy,
+	.set_config = drm_crtc_helper_set_config,
+	.page_flip = pdp_crtc_page_flip,
+};
+
+
+struct drm_crtc *pdp_crtc_create(struct drm_device *dev, uint32_t number)
+{
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+	struct pdp_crtc *pdp_crtc;
+	const char *crtc_name = NULL;
+	int err;
+
+	pdp_crtc = kzalloc(sizeof(*pdp_crtc), GFP_KERNEL);
+	if (!pdp_crtc) {
+		err = -ENOMEM;
+		goto err_exit;
+	}
+
+	init_waitqueue_head(&pdp_crtc->flip_pending_wait_queue);
+	atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_NONE);
+	pdp_crtc->number = number;
+
+	switch (number) {
+	case 0:
+	{
+		struct resource *regs;
+
+		regs = platform_get_resource_byname(dev->platformdev,
+						    IORESOURCE_MEM,
+						    "pdp-regs");
+		if (!regs) {
+			DRM_ERROR("missing pdp register info\n");
+			err = -ENXIO;
+			goto err_crtc_free;
+		}
+
+		pdp_crtc->pdp_reg_phys_base = regs->start;
+		pdp_crtc->pdp_reg_size = resource_size(regs);
+
+		if (dev_priv->version == PDP_VERSION_ODIN ||
+			dev_priv->version == PDP_VERSION_APOLLO) {
+			regs = platform_get_resource_byname(dev->platformdev,
+							    IORESOURCE_MEM,
+							    "pll-regs");
+			if (!regs) {
+				DRM_ERROR("missing pll register info\n");
+				err = -ENXIO;
+				goto err_crtc_free;
+			}
+
+			pdp_crtc->pll_reg_phys_base = regs->start;
+			pdp_crtc->pll_reg_size = resource_size(regs);
+
+			pdp_crtc->pll_reg =
+				ioremap_nocache(pdp_crtc->pll_reg_phys_base,
+						pdp_crtc->pll_reg_size);
+			if (!pdp_crtc->pll_reg) {
+				DRM_ERROR("failed to map pll registers\n");
+				err = -ENOMEM;
+				goto err_crtc_free;
+			}
+		} else if (dev_priv->version == PDP_VERSION_PLATO) {
+			regs = platform_get_resource_byname(dev->platformdev,
+				    IORESOURCE_MEM,
+				    PLATO_PDP_RESOURCE_BIF_REGS);
+			if (!regs) {
+				DRM_ERROR("missing pdp-bif register info\n");
+				err = -ENXIO;
+				goto err_crtc_free;
+			}
+
+			pdp_crtc->pdp_bif_reg_phys_base = regs->start;
+			pdp_crtc->pdp_bif_reg_size = resource_size(regs);
+
+			if (!request_mem_region(pdp_crtc->pdp_bif_reg_phys_base,
+						pdp_crtc->pdp_bif_reg_size,
+						crtc_name)) {
+				DRM_ERROR("failed to reserve pdp-bif registers\n");
+				err = -EBUSY;
+				goto err_crtc_free;
+			}
+
+			pdp_crtc->pdp_bif_reg =
+				ioremap_nocache(pdp_crtc->pdp_bif_reg_phys_base,
+						pdp_crtc->pdp_bif_reg_size);
+			if (!pdp_crtc->pdp_bif_reg) {
+				DRM_ERROR("failed to map pdp-bif registers\n");
+				err = -ENOMEM;
+				goto err_iounmap_regs;
+			}
+		}
+
+		if (dev_priv->version == PDP_VERSION_ODIN) {
+			regs = platform_get_resource_byname(dev->platformdev,
+							    IORESOURCE_MEM,
+							    "odn-core");
+			if (!regs) {
+				DRM_ERROR("missing odn-core info\n");
+				err = -ENXIO;
+				goto err_crtc_free;
+			}
+
+			pdp_crtc->odn_core_phys_base = regs->start;
+			pdp_crtc->odn_core_size = resource_size(regs);
+
+			pdp_crtc->odn_core_reg
+				= ioremap_nocache(pdp_crtc->odn_core_phys_base,
+						  pdp_crtc->odn_core_size);
+			if (!pdp_crtc->odn_core_reg) {
+				DRM_ERROR("failed to map pdp reset register\n");
+				err = -ENOMEM;
+				goto err_iounmap_regs;
+			}
+		}
+
+		crtc_name = "crtc-0";
+		break;
+	}
+	default:
+		DRM_ERROR("invalid crtc number %u\n", number);
+		err = -EINVAL;
+		goto err_crtc_free;
+	}
+
+	if (!request_mem_region(pdp_crtc->pdp_reg_phys_base,
+				pdp_crtc->pdp_reg_size,
+				crtc_name)) {
+		DRM_ERROR("failed to reserve pdp registers\n");
+		err = -EBUSY;
+		goto err_crtc_free;
+	}
+
+	pdp_crtc->pdp_reg = ioremap_nocache(pdp_crtc->pdp_reg_phys_base,
+					    pdp_crtc->pdp_reg_size);
+	if (!pdp_crtc->pdp_reg) {
+		DRM_ERROR("failed to map pdp registers\n");
+		err = -ENOMEM;
+		goto err_release_mem_region;
+	}
+
+	if (dev_priv->version == PDP_VERSION_PLATO) {
+		const uint32_t format = DRM_FORMAT_ARGB8888;
+
+		pdp_crtc->primary_plane = kzalloc(
+			sizeof(*pdp_crtc->primary_plane),
+			GFP_KERNEL);
+		if (pdp_crtc->primary_plane == NULL) {
+			DRM_ERROR("Failed to allocate primary plane");
+			err = -ENOMEM;
+			goto err_iounmap_regs;
+		}
+
+		err = drm_universal_plane_init(dev, pdp_crtc->primary_plane,
+					       0, &drm_primary_helper_funcs,
+					       &format,
+					       1, DRM_PLANE_TYPE_PRIMARY, NULL);
+		if (err) {
+			DRM_ERROR("Universal plane init failed!");
+			goto err_free_primary_plane;
+		}
+
+		err = drm_crtc_init_with_planes(dev, &pdp_crtc->base,
+			pdp_crtc->primary_plane, NULL, &pdp_crtc_funcs, NULL);
+		if (err) {
+			DRM_ERROR("CRTC init with planes failed");
+			goto err_free_primary_plane;
+		}
+	} else {
+		drm_crtc_init(dev, &pdp_crtc->base, &pdp_crtc_funcs);
+	}
+
+	drm_crtc_helper_add(&pdp_crtc->base, &pdp_crtc_helper_funcs);
+
+	DRM_DEBUG_DRIVER("[CRTC:%d]\n", pdp_crtc->base.base.id);
+
+	return &pdp_crtc->base;
+
+err_free_primary_plane:
+	kfree(pdp_crtc->primary_plane);
+err_iounmap_regs:
+	iounmap(pdp_crtc->pdp_reg);
+	if (pdp_crtc->odn_core_reg)
+		iounmap(pdp_crtc->odn_core_reg);
+	if (pdp_crtc->pdp_bif_reg)
+		iounmap(pdp_crtc->pdp_bif_reg);
+err_release_mem_region:
+	release_mem_region(pdp_crtc->pdp_reg_phys_base, pdp_crtc->pdp_reg_size);
+err_crtc_free:
+	kfree(pdp_crtc);
+err_exit:
+	return ERR_PTR(err);
+}
+
+void pdp_crtc_set_vblank_enabled(struct drm_crtc *crtc, bool enable)
+{
+	struct pdp_drm_private *dev_priv = crtc->dev->dev_private;
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	uint32_t value;
+
+	switch (dev_priv->version) {
+	case PDP_VERSION_ODIN:
+		pdp_odin_set_vblank_enabled(crtc->dev->dev,
+					    pdp_crtc->pdp_reg,
+					    enable);
+		break;
+	case PDP_VERSION_APOLLO:
+		pdp_apollo_set_vblank_enabled(crtc->dev->dev,
+					    pdp_crtc->pdp_reg,
+					    enable);
+		break;
+	case PDP_VERSION_PLATO:
+		plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_INTCLR_OFFSET,
+				0xFFFFFFFF);
+
+		value = plato_read_reg32(pdp_crtc->pdp_reg,
+				PDP_INTENAB_OFFSET);
+		value = REG_VALUE_SET(value,
+				enable ? 0x1 : 0x0,
+				PDP_INTENAB_INTEN_VBLNK0_SHIFT,
+				PDP_INTENAB_INTEN_VBLNK0_MASK);
+		plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_INTENAB_OFFSET, value);
+		break;
+	default:
+		BUG();
+	}
+}
+
+void pdp_crtc_irq_handler(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	uint32_t value;
+
+	switch (dev_priv->version) {
+	case PDP_VERSION_ODIN:
+		if (pdp_odin_check_and_clear_vblank(
+			crtc->dev->dev,
+			pdp_crtc->pdp_reg)) {
+
+			enum pdp_crtc_flip_status status;
+
+			drm_handle_vblank(dev, pdp_crtc->number);
+
+			status = atomic_read(&pdp_crtc->flip_status);
+			if (status == PDP_CRTC_FLIP_STATUS_DONE) {
+				if (!pdp_crtc->flip_async)
+					pdp_crtc_flip_complete(crtc);
+				drm_crtc_vblank_put(crtc);
+			}
+		}
+		break;
+	case PDP_VERSION_APOLLO:
+		if (pdp_apollo_check_and_clear_vblank(
+			crtc->dev->dev,
+			pdp_crtc->pdp_reg)) {
+
+			enum pdp_crtc_flip_status status;
+
+			drm_handle_vblank(dev, pdp_crtc->number);
+
+			status = atomic_read(&pdp_crtc->flip_status);
+			if (status == PDP_CRTC_FLIP_STATUS_DONE) {
+				if (!pdp_crtc->flip_async)
+					pdp_crtc_flip_complete(crtc);
+				drm_crtc_vblank_put(crtc);
+			}
+		}
+		break;
+	case PDP_VERSION_PLATO:
+		value = plato_read_reg32(pdp_crtc->pdp_reg,
+				PDP_INTSTAT_OFFSET);
+
+		if (REG_VALUE_GET(value,
+				PDP_INTSTAT_INTS_VBLNK0_SHIFT,
+				PDP_INTSTAT_INTS_VBLNK0_MASK)) {
+			enum pdp_crtc_flip_status status;
+
+			plato_write_reg32(pdp_crtc->pdp_reg,
+				PDP_INTCLR_OFFSET,
+				(1 << PDP_INTCLR_INTCLR_VBLNK0_SHIFT));
+
+			drm_handle_vblank(dev, pdp_crtc->number);
+
+			status = atomic_read(&pdp_crtc->flip_status);
+			if (status == PDP_CRTC_FLIP_STATUS_DONE) {
+				if (!pdp_crtc->flip_async)
+					pdp_crtc_flip_complete(crtc);
+				drm_crtc_vblank_put(crtc);
+			}
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+void pdp_crtc_flip_event_cancel(struct drm_crtc *crtc, struct drm_file *file)
+{
+	struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc);
+	unsigned long flags;
+
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+	if (pdp_crtc->flip_event &&
+	    pdp_crtc->flip_event->base.file_priv == file) {
+		pdp_crtc->flip_event->base.destroy(&pdp_crtc->flip_event->base);
+		pdp_crtc->flip_event = NULL;
+	}
+
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_debugfs.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_debugfs.c
new file mode 100644
index 0000000..295adf4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_debugfs.c
@@ -0,0 +1,170 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/debugfs.h>
+
+#include "drm_pdp_drv.h"
+
+#define PDP_DEBUGFS_DISPLAY_ENABLED "display_enabled"
+
+static int display_enabled_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+
+	return 0;
+}
+
+static ssize_t display_enabled_read(struct file *file,
+				    char __user *user_buffer,
+				    size_t count,
+				    loff_t *position_ptr)
+{
+	struct drm_device *dev = file->private_data;
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+	loff_t position = *position_ptr;
+	char buffer[] = "N\n";
+	size_t buffer_size = ARRAY_SIZE(buffer);
+	int err;
+
+	if (position < 0)
+		return -EINVAL;
+	else if (position >= buffer_size || count == 0)
+		return 0;
+
+	if (dev_priv->display_enabled)
+		buffer[0] = 'Y';
+
+	if (count > buffer_size - position)
+		count = buffer_size - position;
+
+	err = copy_to_user(user_buffer, &buffer[position], count);
+	if (err)
+		return -EFAULT;
+
+	*position_ptr = position + count;
+
+	return count;
+}
+
+static ssize_t display_enabled_write(struct file *file,
+				     const char __user *user_buffer,
+				     size_t count,
+				     loff_t *position)
+{
+	struct drm_device *dev = file->private_data;
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+	char buffer[3];
+	int err;
+
+	count = min(count, ARRAY_SIZE(buffer) - 1);
+
+	err = copy_from_user(buffer, user_buffer, count);
+	if (err)
+		return -EFAULT;
+	buffer[count] = '\0';
+
+	if (!strtobool(buffer, &dev_priv->display_enabled) && dev_priv->crtc)
+		pdp_crtc_set_plane_enabled(dev_priv->crtc, dev_priv->display_enabled);
+
+	return count;
+}
+
+static const struct file_operations pdp_display_enabled_fops = {
+	.owner = THIS_MODULE,
+	.open = display_enabled_open,
+	.read = display_enabled_read,
+	.write = display_enabled_write,
+	.llseek = default_llseek,
+};
+
+static int pdp_debugfs_create(struct drm_minor *minor, const char *name,
+			      umode_t mode, const struct file_operations *fops)
+{
+	struct drm_info_node *node;
+
+	/*
+	 * We can't get access to our driver private data when this function is
+	 * called so we fake up a node so that we can clean up entries later on.
+	 */
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	node->dent = debugfs_create_file(name, mode, minor->debugfs_root,
+					 minor->dev, fops);
+	if (!node->dent) {
+		kfree(node);
+		return -ENOMEM;
+	}
+
+	node->minor = minor;
+	node->info_ent = (void *) fops;
+
+	mutex_lock(&minor->debugfs_lock);
+	list_add(&node->list, &minor->debugfs_list);
+	mutex_unlock(&minor->debugfs_lock);
+
+	return 0;
+}
+
+int pdp_debugfs_init(struct drm_minor *minor)
+{
+	int err;
+
+	err = pdp_debugfs_create(minor, PDP_DEBUGFS_DISPLAY_ENABLED,
+				 S_IFREG | S_IRUGO | S_IWUSR,
+				 &pdp_display_enabled_fops);
+	if (err) {
+		DRM_INFO("failed to create '%s' debugfs entry\n",
+			 PDP_DEBUGFS_DISPLAY_ENABLED);
+	}
+
+	return err;
+}
+
+void pdp_debugfs_cleanup(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files((struct drm_info_list *) &pdp_display_enabled_fops,
+				 1, minor);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_drv.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_drv.c
new file mode 100644
index 0000000..6ad70db
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_drv.c
@@ -0,0 +1,532 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_linux_fence.h"
+#include "pvr_sw_fence.h"
+
+#include <linux/module.h>
+#include <linux/reservation.h>
+#include <linux/version.h>
+
+#include <drm/drmP.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+#include <drm/drm_gem.h>
+#endif
+
+#include "tc_drv.h"
+#include "pvrversion.h"
+
+#include "drm_pdp_drv.h"
+#include "drm_pdp_gem.h"
+#include "pdp_drm.h"
+
+#include "odin_defs.h"
+
+#if defined(SUPPORT_PLATO_DISPLAY)
+#include "plato_drv.h"
+#include "pdp2_regs.h"
+#include "pdp2_mmu_regs.h"
+#endif
+
+#define DRIVER_NAME "pdp"
+#define DRIVER_DESC "Imagination Technologies PDP DRM Display Driver"
+#define DRIVER_DATE "20150612"
+
+static bool display_enable = true;
+
+module_param(display_enable, bool, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(display_enable, "Enable all displays (default: Y)");
+
+
+static void pdp_irq_handler(void *data)
+{
+	struct drm_device *dev = data;
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		pdp_crtc_irq_handler(crtc);
+}
+
+static int pdp_load(struct drm_device *dev, unsigned long flags)
+{
+	struct pdp_drm_private *dev_priv;
+	int err;
+
+	DRM_INFO("loading %s device\n", dev->platformdev->name);
+
+	platform_set_drvdata(dev->platformdev, dev);
+
+	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+	if (!dev_priv)
+		return -ENOMEM;
+
+	dev->dev_private = dev_priv;
+	dev_priv->dev = dev;
+	dev_priv->version =
+		(enum pdp_version) dev->platformdev->id_entry->driver_data;
+	dev_priv->display_enabled = display_enable;
+
+	if (dev_priv->version == PDP_VERSION_APOLLO ||
+		dev_priv->version == PDP_VERSION_ODIN) {
+#if !defined(SUPPORT_PLATO_DISPLAY)
+		err = tc_enable(dev->dev->parent);
+		if (err) {
+			DRM_ERROR("failed to enable parent device (err=%d)\n", err);
+			goto err_dev_priv_free;
+		}
+#endif
+	}
+#if defined(SUPPORT_PLATO_DISPLAY)
+	else if (dev_priv->version == PDP_VERSION_PLATO) {
+		err = plato_enable(dev->dev->parent);
+		if (err) {
+			DRM_ERROR("failed to enable parent device (err=%d)\n", err);
+			goto err_dev_priv_free;
+		}
+	}
+#endif
+
+	dev_priv->gem_priv = pdp_gem_init(dev);
+	if (!dev_priv->gem_priv) {
+		DRM_ERROR("gem initialisation failed\n");
+		err = -ENOMEM;
+		goto err_disable_parent_device;
+	}
+
+	dev_priv->dev_fctx = pvr_sw_fence_context_create("pdp-hw", "pdp");
+	if (!dev_priv->dev_fctx) {
+		err = -ENOMEM;
+		goto err_gem_cleanup;
+	}
+
+	err = pdp_modeset_init(dev_priv);
+	if (err) {
+		DRM_ERROR("modeset initialisation failed (err=%d)\n", err);
+		goto err_dev_fence_context_destroy;
+	}
+
+	err = drm_vblank_init(dev_priv->dev, 1);
+	if (err) {
+		DRM_ERROR("failed to complete vblank init (err=%d)\n", err);
+		goto err_modeset_deinit;
+	}
+
+	if (dev_priv->version == PDP_VERSION_APOLLO ||
+		dev_priv->version == PDP_VERSION_ODIN) {
+#if !defined(SUPPORT_PLATO_DISPLAY)
+		err = tc_set_interrupt_handler(dev->dev->parent,
+					   TC_INTERRUPT_PDP,
+					   pdp_irq_handler,
+					   dev);
+		if (err) {
+			DRM_ERROR("failed to set interrupt handler (err=%d)\n",
+				  err);
+			goto err_vblank_cleanup;
+		}
+
+		err = tc_enable_interrupt(dev->dev->parent, TC_INTERRUPT_PDP);
+		if (err) {
+			DRM_ERROR("failed to enable pdp interrupts (err=%d)\n",
+				  err);
+			goto err_uninstall_interrupt_handle;
+		}
+#endif
+	}
+#if defined(SUPPORT_PLATO_DISPLAY)
+	else if (dev_priv->version == PDP_VERSION_PLATO) {
+		err = plato_set_interrupt_handler(dev->dev->parent,
+							PLATO_INTERRUPT_PDP,
+							pdp_irq_handler,
+							dev);
+		if (err) {
+			DRM_ERROR("failed to set interrupt handler (err=%d)\n",
+				  err);
+			goto err_vblank_cleanup;
+		}
+
+		err = plato_enable_interrupt(dev->dev->parent, PLATO_INTERRUPT_PDP);
+		if (err) {
+			DRM_ERROR("failed to enable pdp interrupts (err=%d)\n",
+				  err);
+			goto err_uninstall_interrupt_handle;
+		}
+	}
+#endif
+
+	dev->irq_enabled = true;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0))
+	dev->vblank_disable_allowed = 1;
+#endif
+
+	return 0;
+
+err_uninstall_interrupt_handle:
+	if (dev_priv->version == PDP_VERSION_APOLLO ||
+		dev_priv->version == PDP_VERSION_ODIN) {
+#if !defined(SUPPORT_PLATO_DISPLAY)
+		tc_set_interrupt_handler(dev->dev->parent,
+					     TC_INTERRUPT_PDP,
+					     NULL,
+					     NULL);
+#endif
+	}
+#if defined(SUPPORT_PLATO_DISPLAY)
+	else if (dev_priv->version == PDP_VERSION_PLATO) {
+		plato_set_interrupt_handler(dev->dev->parent,
+				PLATO_INTERRUPT_PDP,
+				NULL,
+				NULL);
+	}
+#endif
+err_vblank_cleanup:
+	drm_vblank_cleanup(dev_priv->dev);
+err_modeset_deinit:
+	pdp_modeset_cleanup(dev_priv);
+err_dev_fence_context_destroy:
+	pvr_sw_fence_context_destroy(dev_priv->dev_fctx);
+err_gem_cleanup:
+	pdp_gem_cleanup(dev_priv->gem_priv);
+err_disable_parent_device:
+	if (dev_priv->version == PDP_VERSION_APOLLO ||
+		dev_priv->version == PDP_VERSION_ODIN) {
+#if !defined(SUPPORT_PLATO_DISPLAY)
+		tc_disable(dev->dev->parent);
+#endif
+	}
+#if defined(SUPPORT_PLATO_DISPLAY)
+	else if (dev_priv->version == PDP_VERSION_PLATO) {
+		plato_disable(dev->dev->parent);
+	}
+#endif
+err_dev_priv_free:
+	kfree(dev_priv);
+	return err;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+static int pdp_unload(struct drm_device *dev)
+#else
+static void pdp_unload(struct drm_device *dev)
+#endif
+{
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->version == PDP_VERSION_APOLLO ||
+		dev_priv->version == PDP_VERSION_ODIN) {
+#if !defined(SUPPORT_PLATO_DISPLAY)
+		tc_disable_interrupt(dev->dev->parent, TC_INTERRUPT_PDP);
+		tc_set_interrupt_handler(dev->dev->parent,
+					     TC_INTERRUPT_PDP,
+					     NULL,
+					     NULL);
+#endif
+	}
+#if defined(SUPPORT_PLATO_DISPLAY)
+	else if (dev_priv->version == PDP_VERSION_PLATO) {
+		plato_disable_interrupt(dev->dev->parent, PLATO_INTERRUPT_PDP);
+		plato_set_interrupt_handler(dev->dev->parent,
+						PLATO_INTERRUPT_PDP,
+						NULL,
+						NULL);
+	}
+#endif
+
+	drm_vblank_cleanup(dev_priv->dev);
+
+	pdp_modeset_cleanup(dev_priv);
+	pdp_gem_cleanup(dev_priv->gem_priv);
+	pvr_sw_fence_context_destroy(dev_priv->dev_fctx);
+
+	if (dev_priv->version == PDP_VERSION_APOLLO ||
+		dev_priv->version == PDP_VERSION_ODIN) {
+#if !defined(SUPPORT_PLATO_DISPLAY)
+		tc_disable(dev->dev->parent);
+#endif
+	}
+#if defined(SUPPORT_PLATO_DISPLAY)
+	else if (dev_priv->version == PDP_VERSION_PLATO) {
+		plato_disable(dev->dev->parent);
+	}
+#endif
+
+	kfree(dev_priv);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+	return 0;
+#endif
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+static void pdp_preclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		pdp_crtc_flip_event_cancel(crtc, file);
+}
+#endif
+
+static void pdp_lastclose(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+
+	drm_modeset_lock_all(dev);
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc->primary->fb) {
+			struct drm_mode_set mode_set = { .crtc = crtc };
+			int err;
+
+			err = drm_mode_set_config_internal(&mode_set);
+			if (err)
+				DRM_ERROR("failed to disable crtc %p (err=%d)\n",
+					  crtc, err);
+		}
+	}
+	drm_modeset_unlock_all(dev);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+static int pdp_enable_vblank(struct drm_device *dev, unsigned int crtc)
+#else
+static int pdp_enable_vblank(struct drm_device *dev, int crtc)
+#endif
+{
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+
+	switch (crtc) {
+	case 0:
+		pdp_crtc_set_vblank_enabled(dev_priv->crtc, true);
+		break;
+	default:
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+		DRM_ERROR("invalid crtc %u\n", crtc);
+#else
+		DRM_ERROR("invalid crtc %d\n", crtc);
+#endif
+		return -EINVAL;
+	}
+
+	DRM_DEBUG_DRIVER("vblank interrupts enabled for crtc %d\n", crtc);
+
+	return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+static void pdp_disable_vblank(struct drm_device *dev, unsigned int crtc)
+#else
+static void pdp_disable_vblank(struct drm_device *dev, int crtc)
+#endif
+{
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+
+	switch (crtc) {
+	case 0:
+		pdp_crtc_set_vblank_enabled(dev_priv->crtc, false);
+		break;
+	default:
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+		DRM_ERROR("invalid crtc %u\n", crtc);
+#else
+		DRM_ERROR("invalid crtc %d\n", crtc);
+#endif
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("vblank interrupts disabled for crtc %d\n", crtc);
+}
+
+static int pdp_gem_object_create_ioctl(struct drm_device *dev,
+				       void *data,
+				       struct drm_file *file)
+{
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+
+	return pdp_gem_object_create_ioctl_priv(dev,
+						dev_priv->gem_priv,
+						data,
+						file);
+}
+
+static int pdp_gem_dumb_create(struct drm_file *file,
+			       struct drm_device *dev,
+			       struct drm_mode_create_dumb *args)
+{
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+
+	return pdp_gem_dumb_create_priv(file,
+					dev,
+					dev_priv->gem_priv,
+					args);
+}
+
+static void pdp_gem_object_free(struct drm_gem_object *obj)
+{
+	struct pdp_drm_private *dev_priv = obj->dev->dev_private;
+
+	pdp_gem_object_free_priv(dev_priv->gem_priv, obj);
+}
+
+static const struct vm_operations_struct pdp_gem_vm_ops = {
+	.fault	= pdp_gem_object_vm_fault,
+	.open	= drm_gem_vm_open,
+	.close	= drm_gem_vm_close,
+};
+
+static const struct drm_ioctl_desc pdp_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(PDP_GEM_CREATE, pdp_gem_object_create_ioctl, DRM_AUTH | DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(PDP_GEM_MMAP, pdp_gem_object_mmap_ioctl, DRM_AUTH | DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(PDP_GEM_CPU_PREP, pdp_gem_object_cpu_prep_ioctl, DRM_AUTH | DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(PDP_GEM_CPU_FINI, pdp_gem_object_cpu_fini_ioctl, DRM_AUTH | DRM_UNLOCKED),
+};
+
+static const struct file_operations pdp_driver_fops = {
+	.owner		= THIS_MODULE,
+	.open		= drm_open,
+	.release	= drm_release,
+	.unlocked_ioctl	= drm_ioctl,
+	.mmap		= drm_gem_mmap,
+	.poll		= drm_poll,
+	.read		= drm_read,
+	.llseek		= noop_llseek,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= drm_compat_ioctl,
+#endif
+};
+
+static struct drm_driver pdp_drm_driver = {
+	.load				= pdp_load,
+	.unload				= pdp_unload,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+	.preclose			= pdp_preclose,
+#endif
+	.lastclose			= pdp_lastclose,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) && \
+	(LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+	.set_busid			= drm_platform_set_busid,
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+	.get_vblank_counter		= drm_vblank_no_hw_counter,
+#else
+	.get_vblank_counter		= drm_vblank_count,
+#endif
+	.enable_vblank			= pdp_enable_vblank,
+	.disable_vblank			= pdp_disable_vblank,
+
+	.debugfs_init			= pdp_debugfs_init,
+	.debugfs_cleanup		= pdp_debugfs_cleanup,
+
+	.gem_free_object		= pdp_gem_object_free,
+
+	.prime_handle_to_fd		= drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle		= drm_gem_prime_fd_to_handle,
+	.gem_prime_export		= pdp_gem_prime_export,
+	.gem_prime_import		= pdp_gem_prime_import,
+	.gem_prime_import_sg_table	= pdp_gem_prime_import_sg_table,
+
+	.dumb_create			= pdp_gem_dumb_create,
+	.dumb_map_offset		= pdp_gem_dumb_map_offset,
+	.dumb_destroy			= drm_gem_dumb_destroy,
+
+	.gem_vm_ops			= &pdp_gem_vm_ops,
+
+	.name				= DRIVER_NAME,
+	.desc				= DRIVER_DESC,
+	.date				= DRIVER_DATE,
+	.major				= PVRVERSION_MAJ,
+	.minor				= PVRVERSION_MIN,
+	.patchlevel			= PVRVERSION_BUILD,
+
+	.driver_features		= DRIVER_GEM |
+					  DRIVER_MODESET |
+					  DRIVER_PRIME,
+	.ioctls				= pdp_ioctls,
+	.num_ioctls			= ARRAY_SIZE(pdp_ioctls),
+	.fops				= &pdp_driver_fops,
+};
+
+
+static int pdp_probe(struct platform_device *pdev)
+{
+	return drm_platform_init(&pdp_drm_driver, pdev);
+}
+
+static int pdp_remove(struct platform_device *pdev)
+{
+	struct drm_device *dev = platform_get_drvdata(pdev);
+
+	drm_put_dev(dev);
+
+	return 0;
+}
+
+static void pdp_shutdown(struct platform_device *pdev)
+{
+}
+
+static struct platform_device_id pdp_platform_device_id_table[] = {
+	{ .name = APOLLO_DEVICE_NAME_PDP, .driver_data = PDP_VERSION_APOLLO },
+	{ .name = ODN_DEVICE_NAME_PDP, .driver_data = PDP_VERSION_ODIN },
+//	{ .name = PLATO_DEVICE_NAME_PDP, .driver_data = PDP_VERSION_PLATO },
+	{ },
+};
+
+static struct platform_driver pdp_platform_driver = {
+	.probe		= pdp_probe,
+	.remove		= pdp_remove,
+	.shutdown	= pdp_shutdown,
+	.driver		= {
+		.owner  = THIS_MODULE,
+		.name	= DRIVER_NAME,
+	},
+	.id_table	= pdp_platform_device_id_table,
+};
+
+module_platform_driver(pdp_platform_driver);
+
+MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_DEVICE_TABLE(platform, pdp_platform_device_id_table);
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_drv.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_drv.h
new file mode 100644
index 0000000..98de9d7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_drv.h
@@ -0,0 +1,149 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__DRM_PDP_DRV_H__)
+#define __DRM_PDP_DRV_H__
+
+#include <linux/version.h>
+#include <linux/wait.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_mm.h>
+
+#include "pdp_common.h"
+
+struct pdp_gem_context;
+enum pdp_crtc_flip_status;
+struct pdp_flip_data;
+struct pdp_gem_private;
+
+#if !defined(SUPPORT_PLATO_DISPLAY)
+struct tc_pdp_platform_data;
+#else
+struct plato_pdp_platform_data;
+#endif
+
+struct pdp_drm_private {
+	struct drm_device *dev;
+
+	enum pdp_version version;
+
+	struct pvr_sw_fence_context *dev_fctx;
+
+	/* created by pdp_gem_init */
+	struct pdp_gem_private	*gem_priv;
+
+	/* initialised by pdp_modeset_init */
+	struct drm_crtc *crtc;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+
+	bool display_enabled;
+};
+
+struct pdp_crtc {
+	struct drm_crtc base;
+
+	uint32_t number;
+
+	resource_size_t pdp_reg_size;
+	resource_size_t pdp_reg_phys_base;
+	void __iomem *pdp_reg;
+
+	resource_size_t pdp_bif_reg_size;
+	resource_size_t pdp_bif_reg_phys_base;
+	void __iomem *pdp_bif_reg;
+
+	resource_size_t pll_reg_size;
+	resource_size_t pll_reg_phys_base;
+	void __iomem *pll_reg;
+
+	resource_size_t odn_core_size; /* needed for odin pdp clk reset */
+	resource_size_t odn_core_phys_base;
+	void __iomem *odn_core_reg;
+
+	wait_queue_head_t flip_pending_wait_queue;
+
+	/* Reuse the drm_device event_lock to protect these */
+	atomic_t flip_status;
+	struct drm_pending_vblank_event *flip_event;
+	struct drm_framebuffer *old_fb;
+	struct pdp_flip_data *flip_data;
+	bool flip_async;
+	bool reduced_blanking;
+
+	struct drm_plane *primary_plane;
+};
+
+struct pdp_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_gem_object *obj;
+};
+
+#define to_pdp_crtc(crtc) container_of(crtc, struct pdp_crtc, base)
+#define to_pdp_framebuffer(fb) container_of(fb, struct pdp_framebuffer, base)
+
+int pdp_debugfs_init(struct drm_minor *minor);
+void pdp_debugfs_cleanup(struct drm_minor *minor);
+
+struct drm_crtc *pdp_crtc_create(struct drm_device *dev, uint32_t number);
+void pdp_crtc_set_plane_enabled(struct drm_crtc *crtc, bool enable);
+void pdp_crtc_set_vblank_enabled(struct drm_crtc *crtc, bool enable);
+void pdp_crtc_irq_handler(struct drm_crtc *crtc);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+void pdp_crtc_flip_event_cancel(struct drm_crtc *crtc, struct drm_file *file);
+#endif
+
+struct drm_connector *pdp_dvi_connector_create(struct drm_device *dev);
+
+struct drm_encoder *pdp_tmds_encoder_create(struct drm_device *dev);
+
+int pdp_modeset_init(struct pdp_drm_private *dev_priv);
+void pdp_modeset_cleanup(struct pdp_drm_private *dev_priv);
+
+#endif /* !defined(__DRM_PDP_DRV_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_dvi.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_dvi.c
new file mode 100644
index 0000000..243d3b63
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_dvi.c
@@ -0,0 +1,286 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "drm_pdp_drv.h"
+
+struct pdp_mode_data {
+	int hdisplay;
+	int vdisplay;
+	int vrefresh;
+	bool reduced_blanking;
+	bool interlaced;
+	bool margins;
+};
+
+static const struct pdp_mode_data pdp_extra_modes[] = {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0))
+	{
+		.hdisplay = 1280,
+		.vdisplay = 720,
+		.vrefresh = 60,
+		.reduced_blanking = false,
+		.interlaced = false,
+		.margins = false,
+	},
+	{
+		.hdisplay = 1920,
+		.vdisplay = 1080,
+		.vrefresh = 60,
+		.reduced_blanking = false,
+		.interlaced = false,
+		.margins = false,
+	},
+#endif
+};
+
+static char preferred_mode_name[DRM_DISPLAY_MODE_LEN] = "\0";
+
+module_param_string(dvi_preferred_mode,
+		    preferred_mode_name,
+		    DRM_DISPLAY_MODE_LEN,
+		    S_IRUSR | S_IRGRP | S_IROTH);
+
+MODULE_PARM_DESC(dvi_preferred_mode,
+		 "Specify the preferred mode (if supported), e.g. 1280x1024.");
+
+
+static int pdp_dvi_add_extra_modes(struct drm_connector *connector)
+{
+	struct drm_display_mode *mode;
+	int num_modes;
+	int i;
+
+	for (i = 0, num_modes = 0; i < ARRAY_SIZE(pdp_extra_modes); i++) {
+		mode = drm_cvt_mode(connector->dev,
+				    pdp_extra_modes[i].hdisplay,
+				    pdp_extra_modes[i].vdisplay,
+				    pdp_extra_modes[i].vrefresh,
+				    pdp_extra_modes[i].reduced_blanking,
+				    pdp_extra_modes[i].interlaced,
+				    pdp_extra_modes[i].margins);
+		if (mode) {
+			drm_mode_probed_add(connector, mode);
+			num_modes++;
+		}
+	}
+
+	return num_modes;
+}
+
+static int pdp_dvi_connector_helper_get_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	int num_modes;
+	int len = strlen(preferred_mode_name);
+
+	if (len)
+		dev_info(dev->dev, "detected dvi_preferred_mode=%s\n",
+					preferred_mode_name);
+	else
+		dev_info(dev->dev, "no dvi_preferred_mode\n");
+
+	num_modes = drm_add_modes_noedid(connector,
+					 dev->mode_config.max_width,
+					 dev->mode_config.max_height);
+
+	num_modes += pdp_dvi_add_extra_modes(connector);
+	if (num_modes) {
+		struct drm_display_mode *pref_mode = NULL;
+
+		if (len) {
+			struct drm_display_mode *mode;
+			struct list_head *entry;
+
+			list_for_each(entry, &connector->probed_modes) {
+				mode = list_entry(entry,
+						  struct drm_display_mode,
+						  head);
+				if (!strcmp(mode->name, preferred_mode_name)) {
+					pref_mode = mode;
+					break;
+				}
+			}
+		}
+
+		if (pref_mode)
+			pref_mode->type |= DRM_MODE_TYPE_PREFERRED;
+		else
+			drm_set_preferred_mode(connector,
+					       dev->mode_config.max_width,
+					       dev->mode_config.max_height);
+	}
+
+	drm_mode_sort(&connector->probed_modes);
+
+	DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s] found %d modes\n",
+			 connector->base.id,
+			 connector->name,
+			 num_modes);
+
+	return num_modes;
+}
+
+static int pdp_dvi_connector_helper_mode_valid(struct drm_connector *connector,
+					       struct drm_display_mode *mode)
+{
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		return MODE_NO_INTERLACE;
+	else if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	return MODE_OK;
+}
+
+static struct drm_encoder *
+pdp_dvi_connector_helper_best_encoder(struct drm_connector *connector)
+{
+	/* Pick the first encoder we find */
+	if (connector->encoder_ids[0] != 0) {
+		struct drm_mode_object *mode_obj;
+
+		mode_obj = drm_mode_object_find(connector->dev,
+						connector->encoder_ids[0],
+						DRM_MODE_OBJECT_ENCODER);
+		if (mode_obj) {
+			struct drm_encoder *encoder =
+				obj_to_encoder(mode_obj);
+
+			DRM_DEBUG_DRIVER("[ENCODER:%d:%s] best for "
+					 "[CONNECTOR:%d:%s]\n",
+					 encoder->base.id,
+					 encoder->name,
+					 connector->base.id,
+					 connector->name);
+			return encoder;
+		}
+	}
+
+	return NULL;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+static enum drm_connector_status
+pdp_dvi_connector_detect(struct drm_connector *connector,
+			 bool force)
+{
+	/*
+	 * It appears that there is no way to determine if a monitor
+	 * is connected. This needs to be set to connected otherwise
+	 * DPMS never gets set to ON.
+	 */
+	return connector_status_connected;
+}
+#endif
+
+static void pdp_dvi_connector_destroy(struct drm_connector *connector)
+{
+	struct pdp_drm_private *dev_priv = connector->dev->dev_private;
+
+	DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s]\n",
+			 connector->base.id,
+			 connector->name);
+
+	drm_connector_unregister(connector);
+
+	drm_connector_cleanup(connector);
+
+	kfree(connector);
+	dev_priv->connector = NULL;
+}
+
+static void pdp_dvi_connector_force(struct drm_connector *connector)
+{
+}
+
+static struct drm_connector_helper_funcs pdp_dvi_connector_helper_funcs = {
+	.get_modes = pdp_dvi_connector_helper_get_modes,
+	.mode_valid = pdp_dvi_connector_helper_mode_valid,
+	.best_encoder = pdp_dvi_connector_helper_best_encoder,
+};
+
+static const struct drm_connector_funcs pdp_dvi_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.reset = NULL,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	.detect = pdp_dvi_connector_detect,
+#endif
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = pdp_dvi_connector_destroy,
+	.force = pdp_dvi_connector_force,
+};
+
+
+struct drm_connector *
+pdp_dvi_connector_create(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+
+	connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+	if (!connector)
+		return ERR_PTR(-ENOMEM);
+
+	drm_connector_init(dev,
+			   connector,
+			   &pdp_dvi_connector_funcs,
+			   DRM_MODE_CONNECTOR_DVID);
+	drm_connector_helper_add(connector, &pdp_dvi_connector_helper_funcs);
+
+	connector->dpms = DRM_MODE_DPMS_OFF;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+
+	DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s]\n",
+			 connector->base.id,
+			 connector->name);
+
+	return connector;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_gem.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_gem.c
new file mode 100644
index 0000000..b82e80d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_gem.c
@@ -0,0 +1,671 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/capability.h>
+
+#include <drm/drm_mm.h>
+
+#if defined(SUPPORT_PLATO_DISPLAY)
+#include "plato_drv.h"
+#else
+#include "tc_drv.h"
+#endif
+
+#include "drm_pdp_drv.h"
+#include "drm_pdp_gem.h"
+#include "pdp_drm.h"
+#include "kernel_compatibility.h"
+
+struct pdp_gem_object {
+	struct drm_gem_object base;
+
+	/* Non-null if backing originated from this driver */
+	struct drm_mm_node *vram;
+
+	/* Non-null if backing was imported */
+	struct sg_table *sgt;
+
+	phys_addr_t cpu_addr;
+	dma_addr_t dev_addr;
+
+	struct reservation_object _resv;
+	struct reservation_object *resv;
+
+	bool cpu_prep;
+};
+
+#define to_pdp_obj(obj) container_of(obj, struct pdp_gem_object, base)
+
+#if defined(SUPPORT_PLATO_DISPLAY)
+	typedef struct plato_pdp_platform_data pdp_gem_platform_data;
+#else
+	typedef struct tc_pdp_platform_data pdp_gem_platform_data;
+#endif
+
+struct pdp_gem_private {
+	struct mutex			vram_lock;
+	struct				drm_mm vram;
+};
+
+static struct pdp_gem_object *
+pdp_gem_private_object_create(struct drm_device *dev,
+			      size_t size)
+{
+	struct pdp_gem_object *pdp_obj;
+
+	WARN_ON(PAGE_ALIGN(size) != size);
+
+	pdp_obj = kzalloc(sizeof(*pdp_obj), GFP_KERNEL);
+	if (!pdp_obj)
+		return ERR_PTR(-ENOMEM);
+
+	drm_gem_private_object_init(dev, &pdp_obj->base, size);
+	reservation_object_init(&pdp_obj->_resv);
+
+	return pdp_obj;
+}
+
+static struct drm_gem_object *pdp_gem_object_create(struct drm_device *dev,
+					struct pdp_gem_private *gem_priv,
+					size_t size,
+					u32 flags)
+{
+	pdp_gem_platform_data *pdata = dev->platformdev->dev.platform_data;
+	struct pdp_gem_object *pdp_obj;
+	struct drm_mm_node *node;
+	int err = 0;
+
+	pdp_obj = pdp_gem_private_object_create(dev, size);
+	if (!pdp_obj) {
+		err = -ENOMEM;
+		goto err_exit;
+	}
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node) {
+		err = -ENOMEM;
+		goto err_unref;
+	}
+
+	mutex_lock(&gem_priv->vram_lock);
+	err = drm_mm_insert_node(&gem_priv->vram, node, size);
+	mutex_unlock(&gem_priv->vram_lock);
+	if (err)
+		goto err_free_node;
+
+	pdp_obj->vram = node;
+	pdp_obj->dev_addr = pdp_obj->vram->start;
+	pdp_obj->cpu_addr = pdata->memory_base + pdp_obj->dev_addr;
+	pdp_obj->resv = &pdp_obj->_resv;
+
+	return &pdp_obj->base;
+
+err_free_node:
+	kfree(node);
+err_unref:
+	drm_gem_object_unreference_unlocked(&pdp_obj->base);
+err_exit:
+	return ERR_PTR(err);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+int pdp_gem_object_vm_fault(struct vm_fault *vmf)
+#else
+int pdp_gem_object_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+#endif
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+	struct vm_area_struct *vma = vmf->vma;
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+	unsigned long addr = vmf->address;
+#else
+	unsigned long addr = (unsigned long)vmf->virtual_address;
+#endif
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct pdp_gem_object *pdp_obj = to_pdp_obj(obj);
+	unsigned long off;
+	unsigned long pfn;
+	int err;
+
+	off = addr - vma->vm_start;
+	pfn = (pdp_obj->cpu_addr + off) >> PAGE_SHIFT;
+
+	err = vm_insert_pfn(vma, addr, pfn);
+	switch (err) {
+	case 0:
+	case -EBUSY:
+		return VM_FAULT_NOPAGE;
+	case -ENOMEM:
+		return VM_FAULT_OOM;
+	default:
+		return VM_FAULT_SIGBUS;
+	}
+}
+
+void pdp_gem_object_free_priv(struct pdp_gem_private *gem_priv,
+			      struct drm_gem_object *obj)
+{
+	struct pdp_gem_object *pdp_obj = to_pdp_obj(obj);
+
+	drm_gem_free_mmap_offset(obj);
+
+	if (&pdp_obj->_resv == pdp_obj->resv)
+		reservation_object_fini(pdp_obj->resv);
+
+	if (pdp_obj->vram) {
+		mutex_lock(&gem_priv->vram_lock);
+		drm_mm_remove_node(pdp_obj->vram);
+		mutex_unlock(&gem_priv->vram_lock);
+
+		kfree(pdp_obj->vram);
+	} else if (obj->import_attach) {
+		drm_prime_gem_destroy(obj, pdp_obj->sgt);
+	}
+
+	drm_gem_object_release(&pdp_obj->base);
+	kfree(pdp_obj);
+}
+
+static int pdp_gem_prime_attach(struct dma_buf *dma_buf, struct device *dev,
+				struct dma_buf_attachment *attach)
+{
+	struct drm_gem_object *obj = dma_buf->priv;
+
+	/* Restrict access to Rogue */
+	if (WARN_ON(!obj->dev->dev->parent) ||
+	    obj->dev->dev->parent != dev->parent)
+		return -EPERM;
+
+	return 0;
+}
+
+static struct sg_table *
+pdp_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
+			  enum dma_data_direction dir)
+{
+	struct drm_gem_object *obj = attach->dmabuf->priv;
+	struct pdp_gem_object *pdp_obj = to_pdp_obj(obj);
+	struct sg_table *sgt;
+
+	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+	if (!sgt)
+		return NULL;
+
+	if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+		goto err_free_sgt;
+
+	sg_dma_address(sgt->sgl) = pdp_obj->dev_addr;
+	sg_dma_len(sgt->sgl) = obj->size;
+
+	return sgt;
+
+err_free_sgt:
+	kfree(sgt);
+	return NULL;
+}
+
+static void pdp_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+					struct sg_table *sgt,
+					enum dma_data_direction dir)
+{
+	sg_free_table(sgt);
+	kfree(sgt);
+}
+
+static void *pdp_gem_prime_kmap_atomic(struct dma_buf *dma_buf,
+				       unsigned long page_num)
+{
+	return NULL;
+}
+
+static void *pdp_gem_prime_kmap(struct dma_buf *dma_buf,
+				unsigned long page_num)
+{
+	return NULL;
+}
+
+static int pdp_gem_prime_mmap(struct dma_buf *dma_buf,
+			      struct vm_area_struct *vma)
+{
+	struct drm_gem_object *obj = dma_buf->priv;
+	int err;
+
+	mutex_lock(&obj->dev->struct_mutex);
+	err = drm_gem_mmap_obj(obj, obj->size, vma);
+	mutex_unlock(&obj->dev->struct_mutex);
+
+	return err;
+}
+
+static void *pdp_gem_prime_vmap(struct dma_buf *dma_buf)
+{
+	struct drm_gem_object *obj = dma_buf->priv;
+	struct pdp_gem_object *pdp_obj = to_pdp_obj(obj);
+	void *vaddr;
+
+	mutex_lock(&obj->dev->struct_mutex);
+
+	vaddr = ioremap(pdp_obj->cpu_addr, obj->size);
+	if (vaddr == NULL)
+		DRM_DEBUG_DRIVER("ioremap failed");
+
+	mutex_unlock(&obj->dev->struct_mutex);
+
+	return vaddr;
+}
+
+static void pdp_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+	struct drm_gem_object *obj = dma_buf->priv;
+
+	mutex_lock(&obj->dev->struct_mutex);
+	iounmap(vaddr);
+	mutex_unlock(&obj->dev->struct_mutex);
+}
+
+static const struct dma_buf_ops pdp_gem_prime_dmabuf_ops = {
+	.attach		= pdp_gem_prime_attach,
+	.map_dma_buf	= pdp_gem_prime_map_dma_buf,
+	.unmap_dma_buf	= pdp_gem_prime_unmap_dma_buf,
+	.release	= drm_gem_dmabuf_release,
+	.kmap_atomic	= pdp_gem_prime_kmap_atomic,
+	.kmap		= pdp_gem_prime_kmap,
+	.mmap		= pdp_gem_prime_mmap,
+	.vmap		= pdp_gem_prime_vmap,
+	.vunmap		= pdp_gem_prime_vunmap
+};
+
+
+static int
+pdp_gem_lookup_our_object(struct drm_file *file, u32 handle,
+			  struct drm_gem_object **objp)
+
+{
+	struct drm_gem_object *obj;
+
+	obj = drm_gem_object_lookup(file, handle);
+	if (!obj)
+		return -ENOENT;
+
+	if (obj->import_attach) {
+		/*
+		 * The dmabuf associated with the object is not one of
+		 * ours. Our own buffers are handled differently on import.
+		 */
+		drm_gem_object_unreference_unlocked(obj);
+		return -EINVAL;
+	}
+
+	*objp = obj;
+	return 0;
+}
+
+struct dma_buf *pdp_gem_prime_export(struct drm_device *dev,
+				     struct drm_gem_object *obj,
+				     int flags)
+{
+	struct pdp_gem_object *pdp_obj = to_pdp_obj(obj);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+	DEFINE_DMA_BUF_EXPORT_INFO(export_info);
+
+	export_info.ops = &pdp_gem_prime_dmabuf_ops;
+	export_info.size = obj->size;
+	export_info.flags = flags;
+	export_info.resv = pdp_obj->resv;
+	export_info.priv = obj;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+	return drm_gem_dmabuf_export(dev, &export_info);
+#else
+	return dma_buf_export(&export_info);
+#endif
+#else
+	return dma_buf_export(obj, &pdp_gem_prime_dmabuf_ops, obj->size,
+			      flags, pdp_obj->resv);
+#endif
+}
+
+struct drm_gem_object *
+pdp_gem_prime_import(struct drm_device *dev,
+		     struct dma_buf *dma_buf)
+{
+	struct drm_gem_object *obj = dma_buf->priv;
+
+	if (obj->dev == dev) {
+		BUG_ON(dma_buf->ops != &pdp_gem_prime_dmabuf_ops);
+
+		/*
+		 * The dmabuf is one of ours, so return the associated
+		 * PDP GEM object, rather than create a new one.
+		 */
+		drm_gem_object_reference(obj);
+
+		return obj;
+	}
+
+	return drm_gem_prime_import(dev, dma_buf);
+}
+
+struct drm_gem_object *
+pdp_gem_prime_import_sg_table(struct drm_device *dev,
+			      struct dma_buf_attachment *attach,
+			      struct sg_table *sgt)
+{
+	pdp_gem_platform_data *pdata = dev->platformdev->dev.platform_data;
+	struct pdp_gem_object *pdp_obj;
+	int err;
+
+	pdp_obj = pdp_gem_private_object_create(dev, attach->dmabuf->size);
+	if (!pdp_obj) {
+		err = -ENOMEM;
+		goto err_exit;
+	}
+
+	pdp_obj->sgt = sgt;
+
+	/* We only expect a single entry for card memory */
+	if (pdp_obj->sgt->nents != 1) {
+		err = -EINVAL;
+		goto err_obj_unref;
+	}
+
+	pdp_obj->dev_addr = sg_dma_address(pdp_obj->sgt->sgl);
+	pdp_obj->cpu_addr = pdata->memory_base + pdp_obj->dev_addr;
+	pdp_obj->resv = attach->dmabuf->resv;
+
+	return &pdp_obj->base;
+
+err_obj_unref:
+	drm_gem_object_unreference_unlocked(&pdp_obj->base);
+err_exit:
+	return ERR_PTR(err);
+}
+
+int pdp_gem_dumb_create_priv(struct drm_file *file,
+			     struct drm_device *dev,
+			     struct pdp_gem_private *gem_priv,
+			     struct drm_mode_create_dumb *args)
+{
+	struct drm_gem_object *obj;
+	u32 handle;
+	u32 pitch;
+	size_t size;
+	int err;
+
+	pitch = args->width * (ALIGN(args->bpp, 8) >> 3);
+	size = PAGE_ALIGN(pitch * args->height);
+
+	obj = pdp_gem_object_create(dev, gem_priv, size, 0);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	err = drm_gem_handle_create(file, obj, &handle);
+	if (err)
+		goto exit;
+
+	args->handle = handle;
+	args->pitch = pitch;
+	args->size = size;
+
+exit:
+	drm_gem_object_unreference_unlocked(obj);
+	return err;
+}
+
+int pdp_gem_dumb_map_offset(struct drm_file *file,
+			    struct drm_device *dev,
+			    uint32_t handle,
+			    uint64_t *offset)
+{
+	struct drm_gem_object *obj;
+	int err;
+
+	mutex_lock(&dev->struct_mutex);
+
+	err = pdp_gem_lookup_our_object(file, handle, &obj);
+	if (err)
+		goto exit_unlock;
+
+	err = drm_gem_create_mmap_offset(obj);
+	if (err)
+		goto exit_obj_unref;
+
+	*offset = drm_vma_node_offset_addr(&obj->vma_node);
+
+exit_obj_unref:
+	drm_gem_object_unreference_unlocked(obj);
+exit_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return err;
+}
+
+struct pdp_gem_private *pdp_gem_init(struct drm_device *dev)
+{
+	pdp_gem_platform_data *pdata = dev->platformdev->dev.platform_data;
+	struct pdp_gem_private *gem_priv =
+					kmalloc(sizeof(*gem_priv), GFP_KERNEL);
+
+	if (!gem_priv)
+		return NULL;
+
+	mutex_init(&gem_priv->vram_lock);
+
+	drm_mm_init(&gem_priv->vram,
+		    pdata->pdp_heap_memory_base - pdata->memory_base,
+		    pdata->pdp_heap_memory_size);
+
+	DRM_INFO("%s has %pa bytes of allocatable memory\n",
+		 dev->driver->name, &pdata->pdp_heap_memory_size);
+
+	return gem_priv;
+}
+
+void pdp_gem_cleanup(struct pdp_gem_private *gem_priv)
+{
+	drm_mm_takedown(&gem_priv->vram);
+	mutex_destroy(&gem_priv->vram_lock);
+
+	kfree(gem_priv);
+}
+
+struct reservation_object *pdp_gem_get_resv(struct drm_gem_object *obj)
+{
+	return (to_pdp_obj(obj)->resv);
+}
+
+u64 pdp_gem_get_dev_addr(struct drm_gem_object *obj)
+{
+	struct pdp_gem_object *pdp_obj = to_pdp_obj(obj);
+
+	return pdp_obj->dev_addr;
+}
+
+int pdp_gem_object_create_ioctl_priv(struct drm_device *dev,
+				struct pdp_gem_private *gem_priv,
+				void *data,
+				struct drm_file *file)
+{
+	struct drm_pdp_gem_create *args = data;
+	struct drm_gem_object *obj;
+	int err;
+
+	if (args->flags) {
+		DRM_ERROR("invalid flags: %#08x\n", args->flags);
+		return -EINVAL;
+	}
+
+	if (args->handle) {
+		DRM_ERROR("invalid handle (this should always be 0)\n");
+		return -EINVAL;
+	}
+
+	obj = pdp_gem_object_create(dev,
+					gem_priv,
+					PAGE_ALIGN(args->size),
+					args->flags);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	err = drm_gem_handle_create(file, obj, &args->handle);
+	drm_gem_object_unreference_unlocked(obj);
+
+	return err;
+
+}
+
+int pdp_gem_object_mmap_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file)
+{
+	struct drm_pdp_gem_mmap *args = (struct drm_pdp_gem_mmap *)data;
+
+	if (args->pad) {
+		DRM_ERROR("invalid pad (this should always be 0)\n");
+		return -EINVAL;
+	}
+
+	if (args->offset) {
+		DRM_ERROR("invalid offset (this should always be 0)\n");
+		return -EINVAL;
+	}
+
+	return pdp_gem_dumb_map_offset(file, dev, args->handle, &args->offset);
+}
+
+int pdp_gem_object_cpu_prep_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file)
+{
+	struct drm_pdp_gem_cpu_prep *args = (struct drm_pdp_gem_cpu_prep *)data;
+	struct drm_gem_object *obj;
+	struct pdp_gem_object *pdp_obj;
+	bool write = !!(args->flags & PDP_GEM_CPU_PREP_WRITE);
+	bool wait = !(args->flags & PDP_GEM_CPU_PREP_NOWAIT);
+	int err = 0;
+
+	if (args->flags & ~(PDP_GEM_CPU_PREP_READ |
+			    PDP_GEM_CPU_PREP_WRITE |
+			    PDP_GEM_CPU_PREP_NOWAIT)) {
+		DRM_ERROR("invalid flags: %#08x\n", args->flags);
+		return -EINVAL;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	err = pdp_gem_lookup_our_object(file, args->handle, &obj);
+	if (err)
+		goto exit_unlock;
+
+	pdp_obj = to_pdp_obj(obj);
+
+	if (pdp_obj->cpu_prep) {
+		err = -EBUSY;
+		goto exit_unref;
+	}
+
+	if (wait) {
+		long lerr;
+
+		lerr = reservation_object_wait_timeout_rcu(pdp_obj->resv,
+							   write,
+							   true,
+							   30 * HZ);
+		if (!lerr)
+			err = -EBUSY;
+		else if (lerr < 0)
+			err = lerr;
+	} else {
+		if (!reservation_object_test_signaled_rcu(pdp_obj->resv,
+							  write))
+			err = -EBUSY;
+	}
+
+	if (!err)
+		pdp_obj->cpu_prep = true;
+
+exit_unref:
+	drm_gem_object_unreference_unlocked(obj);
+exit_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return err;
+}
+
+int pdp_gem_object_cpu_fini_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file)
+{
+	struct drm_pdp_gem_cpu_fini *args = (struct drm_pdp_gem_cpu_fini *)data;
+	struct drm_gem_object *obj;
+	struct pdp_gem_object *pdp_obj;
+	int err = 0;
+
+	if (args->pad) {
+		DRM_ERROR("invalid pad (this should always be 0)\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	err = pdp_gem_lookup_our_object(file, args->handle, &obj);
+	if (err)
+		goto exit_unlock;
+
+	pdp_obj = to_pdp_obj(obj);
+
+	if (!pdp_obj->cpu_prep) {
+		err = -EINVAL;
+		goto exit_unref;
+	}
+
+	pdp_obj->cpu_prep = false;
+
+exit_unref:
+	drm_gem_object_unreference_unlocked(obj);
+exit_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return err;
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_gem.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_gem.h
new file mode 100644
index 0000000..d878c8b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_gem.h
@@ -0,0 +1,108 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__DRM_PDP_GEM_H__)
+#define __DRM_PDP_GEM_H__
+
+#include <linux/version.h>
+#include <drm/drmP.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+#include <drm/drm_gem.h>
+#endif
+
+struct pdp_gem_private;
+
+struct pdp_gem_private *pdp_gem_init(struct drm_device *dev);
+
+void pdp_gem_cleanup(struct pdp_gem_private *dev_priv);
+
+/* ioctl functions */
+int pdp_gem_object_create_ioctl_priv(struct drm_device *dev,
+				     struct pdp_gem_private *gem_priv,
+				     void *data,
+				     struct drm_file *file);
+int pdp_gem_object_mmap_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file);
+int pdp_gem_object_cpu_prep_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file);
+int pdp_gem_object_cpu_fini_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file);
+
+/* drm driver functions */
+void pdp_gem_object_free_priv(struct pdp_gem_private *gem_priv,
+			      struct drm_gem_object *obj);
+
+struct dma_buf *pdp_gem_prime_export(struct drm_device *dev,
+				     struct drm_gem_object *obj,
+				     int flags);
+
+struct drm_gem_object *pdp_gem_prime_import(struct drm_device *dev,
+					    struct dma_buf *dma_buf);
+
+struct drm_gem_object *
+pdp_gem_prime_import_sg_table(struct drm_device *dev,
+			      struct dma_buf_attachment *attach,
+			      struct sg_table *sgt);
+
+int pdp_gem_dumb_create_priv(struct drm_file *file,
+			     struct drm_device *dev,
+			     struct pdp_gem_private *gem_priv,
+			     struct drm_mode_create_dumb *args);
+
+int pdp_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+			    uint32_t handle, uint64_t *offset);
+
+/* vm operation functions */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+int pdp_gem_object_vm_fault(struct vm_fault *vmf);
+#else
+int pdp_gem_object_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+#endif
+
+/* internal interfaces */
+struct reservation_object *pdp_gem_get_resv(struct drm_gem_object *obj);
+u64 pdp_gem_get_dev_addr(struct drm_gem_object *obj);
+
+#endif /* !defined(__DRM_PDP_GEM_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_modeset.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_modeset.c
new file mode 100644
index 0000000..764c08a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_modeset.c
@@ -0,0 +1,315 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+#include <drm/drm_gem.h>
+#endif
+
+#include "drm_pdp_drv.h"
+#include "kernel_compatibility.h"
+
+#define PDP_WIDTH_MIN			640
+#define PDP_WIDTH_MAX			1280
+#define PDP_HEIGHT_MIN			480
+#define PDP_HEIGHT_MAX			1024
+
+#define ODIN_PDP_WIDTH_MAX		1920
+#define ODIN_PDP_HEIGHT_MAX		1080
+
+#define PLATO_PDP_WIDTH_MAX		1920
+#define PLATO_PDP_HEIGHT_MAX	1080
+
+static bool async_flip_enable = true;
+
+module_param(async_flip_enable,
+	     bool,
+	     S_IRUSR | S_IRGRP | S_IROTH);
+
+MODULE_PARM_DESC(async_flip_enable,
+		 "Enable support for 'faked' async flipping (default: Y)");
+
+
+static void pdp_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb);
+
+	DRM_DEBUG_DRIVER("[FB:%d]\n", fb->base.id);
+
+	drm_framebuffer_cleanup(fb);
+
+	drm_gem_object_unreference_unlocked(pdp_fb->obj);
+
+	kfree(pdp_fb);
+}
+
+static int pdp_framebuffer_create_handle(struct drm_framebuffer *fb,
+					 struct drm_file *file,
+					 unsigned int *handle)
+{
+	struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb);
+
+	DRM_DEBUG_DRIVER("[FB:%d]\n", fb->base.id);
+
+	return drm_gem_handle_create(file, pdp_fb->obj, handle);
+}
+
+static const struct drm_framebuffer_funcs pdp_framebuffer_funcs = {
+	.destroy = pdp_framebuffer_destroy,
+	.create_handle = pdp_framebuffer_create_handle,
+	.dirty = NULL,
+};
+
+static int pdp_framebuffer_init(struct pdp_drm_private *dev_priv,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || \
+    (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)))
+				const
+#endif
+				struct drm_mode_fb_cmd2 *mode_cmd,
+				struct pdp_framebuffer *pdp_fb,
+				struct drm_gem_object *obj)
+{
+	int err;
+
+	switch (mode_cmd->pixel_format) {
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_XRGB8888:
+		break;
+	default:
+		DRM_ERROR("pixel format not supported (format = %u)\n",
+			  mode_cmd->pixel_format);
+		return -EINVAL;
+	}
+
+	if (mode_cmd->flags & DRM_MODE_FB_INTERLACED) {
+		DRM_ERROR("interlaced framebuffers not supported\n");
+		return -EINVAL;
+	}
+
+	pdp_fb->base.dev = dev_priv->dev;
+	drm_helper_mode_fill_fb_struct(dev_priv->dev, &pdp_fb->base, mode_cmd);
+	pdp_fb->obj = obj;
+
+	err = drm_framebuffer_init(dev_priv->dev,
+				   &pdp_fb->base,
+				   &pdp_framebuffer_funcs);
+	if (err) {
+		DRM_ERROR("failed to initialise framebuffer (err=%d)\n",
+			  err);
+		return err;
+	}
+
+	DRM_DEBUG_DRIVER("[FB:%d]\n", pdp_fb->base.base.id);
+
+	return 0;
+}
+
+
+/*************************************************************************/ /*!
+ DRM mode config callbacks
+*/ /**************************************************************************/
+
+static struct drm_framebuffer *
+pdp_fb_create(struct drm_device *dev,
+			struct drm_file *file,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || \
+    (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)))
+			const
+#endif
+			struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct pdp_drm_private *dev_priv = dev->dev_private;
+	struct pdp_framebuffer *pdp_fb;
+	struct drm_gem_object *bo;
+	int err;
+
+	bo = drm_gem_object_lookup(file, mode_cmd->handles[0]);
+	if (!bo) {
+		DRM_ERROR("failed to find buffer with handle %u\n",
+			  mode_cmd->handles[0]);
+		return ERR_PTR(-ENOENT);
+	}
+
+	pdp_fb = kzalloc(sizeof(*pdp_fb), GFP_KERNEL);
+	if (!pdp_fb) {
+		drm_gem_object_unreference_unlocked(bo);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	err = pdp_framebuffer_init(dev_priv, mode_cmd, pdp_fb, bo);
+	if (err) {
+		kfree(pdp_fb);
+		drm_gem_object_unreference_unlocked(bo);
+		return ERR_PTR(err);
+	}
+
+	DRM_DEBUG_DRIVER("[FB:%d]\n", pdp_fb->base.base.id);
+
+	return &pdp_fb->base;
+}
+
+static const struct drm_mode_config_funcs pdp_mode_config_funcs = {
+	.fb_create = pdp_fb_create,
+	.output_poll_changed = NULL,
+};
+
+
+int pdp_modeset_init(struct pdp_drm_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	int err;
+
+	drm_mode_config_init(dev);
+
+	dev->mode_config.funcs = &pdp_mode_config_funcs;
+	dev->mode_config.min_width = PDP_WIDTH_MIN;
+	dev->mode_config.min_height = PDP_HEIGHT_MIN;
+
+	switch (dev_priv->version) {
+	case PDP_VERSION_APOLLO:
+		dev->mode_config.max_width = PDP_WIDTH_MAX;
+		dev->mode_config.max_height = PDP_HEIGHT_MAX;
+		break;
+	case PDP_VERSION_ODIN:
+		dev->mode_config.max_width = ODIN_PDP_WIDTH_MAX;
+		dev->mode_config.max_height = ODIN_PDP_HEIGHT_MAX;
+		break;
+	case PDP_VERSION_PLATO:
+		dev->mode_config.max_width = PLATO_PDP_WIDTH_MAX;
+		dev->mode_config.max_height = PLATO_PDP_HEIGHT_MAX;
+		break;
+	default:
+		BUG();
+	}
+
+	DRM_INFO("max_width is %d\n",
+		dev->mode_config.max_width);
+	DRM_INFO("max_height is %d\n",
+		dev->mode_config.max_height);
+
+	dev->mode_config.fb_base = 0;
+	dev->mode_config.async_page_flip = async_flip_enable;
+
+	DRM_INFO("%s async flip support is %s\n",
+		 dev->driver->name, async_flip_enable ? "enabled" : "disabled");
+
+	dev_priv->crtc = pdp_crtc_create(dev, 0);
+	if (IS_ERR(dev_priv->crtc)) {
+		DRM_ERROR("failed to create a CRTC\n");
+		err = PTR_ERR(dev_priv->crtc);
+		goto err_config_cleanup;
+	}
+
+	switch (dev_priv->version) {
+	case PDP_VERSION_APOLLO:
+	case PDP_VERSION_ODIN:
+		dev_priv->connector = pdp_dvi_connector_create(dev);
+		if (IS_ERR(dev_priv->connector)) {
+			DRM_ERROR("failed to create a connector\n");
+			err = PTR_ERR(dev_priv->connector);
+			goto err_config_cleanup;
+		}
+
+		dev_priv->encoder = pdp_tmds_encoder_create(dev);
+		if (IS_ERR(dev_priv->encoder)) {
+			DRM_ERROR("failed to create an encoder\n");
+			err = PTR_ERR(dev_priv->encoder);
+			goto err_config_cleanup;
+		}
+
+		err = drm_mode_connector_attach_encoder(dev_priv->connector,
+							dev_priv->encoder);
+		if (err) {
+			DRM_ERROR("failed to attach [ENCODER:%d:%s] to "
+				  "[CONNECTOR:%d:%s] (err=%d)\n",
+				  dev_priv->encoder->base.id,
+				  dev_priv->encoder->name,
+				  dev_priv->connector->base.id,
+				  dev_priv->connector->name,
+				  err);
+			goto err_config_cleanup;
+		}
+
+		err = drm_connector_register(dev_priv->connector);
+		if (err) {
+			DRM_ERROR("[CONNECTOR:%d:%s] failed to register (err=%d)\n",
+				  dev_priv->connector->base.id,
+				  dev_priv->connector->name,
+				  err);
+			goto err_config_cleanup;
+		}
+		break;
+	case PDP_VERSION_PLATO:
+		/* dev_priv->connector = pdp_hdmi_connector_create(dev);
+		if (IS_ERR(dev_priv->connector)) {
+			DRM_ERROR("failed to create a connector\n");
+			err = PTR_ERR(dev_priv->connector);
+			goto err_config_cleanup;
+		} */
+		break;
+	default:
+		BUG();
+	}
+
+	DRM_DEBUG_DRIVER("initialised\n");
+
+	return 0;
+
+err_config_cleanup:
+	drm_mode_config_cleanup(dev);
+
+	return err;
+}
+
+void pdp_modeset_cleanup(struct pdp_drm_private *dev_priv)
+{
+	drm_mode_config_cleanup(dev_priv->dev);
+
+	DRM_DEBUG_DRIVER("cleaned up\n");
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_tmds.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_tmds.c
new file mode 100644
index 0000000..b7a2d93
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/drm_pdp_tmds.c
@@ -0,0 +1,141 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+
+#include "drm_pdp_drv.h"
+
+#include "kernel_compatibility.h"
+
+static void pdp_tmds_encoder_helper_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool
+pdp_tmds_encoder_helper_mode_fixup(struct drm_encoder *encoder,
+				   const struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void pdp_tmds_encoder_helper_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void pdp_tmds_encoder_helper_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+pdp_tmds_encoder_helper_mode_set(struct drm_encoder *encoder,
+				 struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void pdp_tmds_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct pdp_drm_private *dev_priv = encoder->dev->dev_private;
+
+	DRM_DEBUG_DRIVER("[ENCODER:%d:%s]\n",
+			 encoder->base.id,
+			 encoder->name);
+
+	drm_encoder_cleanup(encoder);
+
+	kfree(encoder);
+	dev_priv->encoder = NULL;
+}
+
+static const struct drm_encoder_helper_funcs pdp_tmds_encoder_helper_funcs = {
+	.dpms = pdp_tmds_encoder_helper_dpms,
+	.mode_fixup = pdp_tmds_encoder_helper_mode_fixup,
+	.prepare = pdp_tmds_encoder_helper_prepare,
+	.commit = pdp_tmds_encoder_helper_commit,
+	.mode_set = pdp_tmds_encoder_helper_mode_set,
+	.get_crtc = NULL,
+	.detect = NULL,
+	.disable = NULL,
+};
+
+static const struct drm_encoder_funcs pdp_tmds_encoder_funcs = {
+	.reset = NULL,
+	.destroy = pdp_tmds_encoder_destroy,
+};
+
+struct drm_encoder *
+pdp_tmds_encoder_create(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+	int err;
+
+	encoder = kzalloc(sizeof(*encoder), GFP_KERNEL);
+	if (!encoder)
+		return ERR_PTR(-ENOMEM);
+
+	err = drm_encoder_init(dev,
+			       encoder,
+			       &pdp_tmds_encoder_funcs,
+			       DRM_MODE_ENCODER_TMDS,
+			       NULL);
+	if (err) {
+		DRM_ERROR("Failed to initialise encoder");
+		return ERR_PTR(err);
+	}
+	drm_encoder_helper_add(encoder, &pdp_tmds_encoder_helper_funcs);
+
+	/*
+	 * This is a bit field that's used to determine which
+	 * CRTCs can drive this encoder.
+	 */
+	encoder->possible_crtcs = 0x1;
+
+	DRM_DEBUG_DRIVER("[ENCODER:%d:%s]\n",
+			 encoder->base.id,
+			 encoder->name);
+
+	return encoder;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/pdp_apollo.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/pdp_apollo.h
new file mode 100644
index 0000000..d8731df
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/pdp_apollo.h
@@ -0,0 +1,517 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PDP_APOLLO_H__)
+#define __PDP_APOLLO_H__
+
+#include "pdp_common.h"
+#include "pdp_regs.h"
+#include "tcf_rgbpdp_regs.h"
+#include "tcf_pll.h"
+
+/* Map a register to the "pll-regs" region */
+#define PLL_REG(n) ((n) - TCF_PLL_PLL_PDP_CLK0)
+
+/* Apollo register R-W */
+static inline u32 pdp_rreg32(void __iomem *base, resource_size_t reg)
+{
+	return ioread32(base + reg);
+}
+
+static inline void pdp_wreg32(void __iomem *base, resource_size_t reg,
+							  u32 value)
+{
+	iowrite32(value, base + reg);
+}
+
+static inline u32 pll_rreg32(void __iomem *base, resource_size_t reg)
+{
+	return ioread32(base + reg);
+}
+
+static inline void pll_wreg32(void __iomem *base, resource_size_t reg,
+				u32 value)
+{
+	iowrite32(value, base + reg);
+}
+
+static bool pdp_apollo_clocks_set(struct device *dev,
+				void __iomem *pdp_reg, void __iomem *pll_reg,
+				u32 clock_in_mhz,
+				void __iomem *odn_core_reg,
+				u32 hdisplay, u32 vdisplay)
+{
+	u32 clock;
+
+	/*
+	 * Setup TCF_CR_PLL_PDP_CLK1TO5 based on the main clock speed
+	 * (clock 0 or 3)
+	 */
+	clock = (clock_in_mhz >= 50) ? 0 : 0x3;
+
+	/* Set phase 0, ratio 50:50 and frequency in MHz */
+	pll_wreg32(pll_reg,
+			   PLL_REG(TCF_PLL_PLL_PDP_CLK0), clock_in_mhz);
+
+	pll_wreg32(pll_reg,
+			   PLL_REG(TCF_PLL_PLL_PDP_CLK1TO5), clock);
+
+	/* Now initiate reprogramming of the PLLs */
+	pll_wreg32(pll_reg, PLL_REG(TCF_PLL_PLL_PDP_DRP_GO),
+			   0x1);
+
+	udelay(1000);
+
+	pll_wreg32(pll_reg, PLL_REG(TCF_PLL_PLL_PDP_DRP_GO),
+			   0x0);
+
+	return true;
+}
+
+static void pdp_apollo_set_updates_enabled(struct device *dev,
+				void __iomem *pdp_reg, bool enable)
+{
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set updates: %s\n",
+			 enable ? "enable" : "disable");
+#endif
+
+	/* nothing to do here */
+}
+
+static void pdp_apollo_set_syncgen_enabled(struct device *dev,
+				void __iomem *pdp_reg, bool enable)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set syncgen: %s\n",
+		enable ? "enable" : "disable");
+#endif
+
+	value = pdp_rreg32(pdp_reg,
+					   TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL);
+	value = REG_VALUE_SET(value,
+						  enable ? 0x1 : 0x0,
+						  SYNCACTIVE_SHIFT,
+						  SYNCACTIVE_MASK);
+	pdp_wreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL,
+			   value);
+}
+
+static void pdp_apollo_set_powerdwn_enabled(struct device *dev,
+				void __iomem *pdp_reg, bool enable)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set powerdwn: %s\n",
+		enable ? "enable" : "disable");
+#endif
+
+	value = pdp_rreg32(pdp_reg,
+					   TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL);
+	value = REG_VALUE_SET(value,
+						  enable ? 0x1 : 0x0,
+						  POWERDN_SHIFT,
+						  POWERDN_MASK);
+	pdp_wreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL,
+			   value);
+}
+
+static void pdp_apollo_set_vblank_enabled(struct device *dev,
+				void __iomem *pdp_reg, bool enable)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set vblank: %s\n",
+		enable ? "enable" : "disable");
+#endif
+
+	value = pdp_rreg32(pdp_reg,
+					   TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB);
+	value = REG_VALUE_SET(value,
+						  enable ? 0x1 : 0x0,
+						  INTEN_VBLNK0_SHIFT,
+						  INTEN_VBLNK0_MASK);
+	pdp_wreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB, value);
+}
+
+static bool pdp_apollo_check_and_clear_vblank(struct device *dev,
+				void __iomem *pdp_reg)
+{
+	u32 value;
+
+	value = pdp_rreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_TCF_RGBPDP_INTSTAT);
+
+	if (REG_VALUE_GET(value,
+					INTS_VBLNK0_SHIFT,
+					INTS_VBLNK0_MASK)) {
+		value = REG_VALUE_SET(0,
+					0x1,
+					INTCLR_VBLNK0_SHIFT,
+					INTCLR_VBLNK0_MASK);
+		pdp_wreg32(pdp_reg,
+				   TCF_RGBPDP_PVR_TCF_RGBPDP_INTCLEAR,
+				   value);
+		return true;
+	}
+	return false;
+}
+
+static void pdp_apollo_set_plane_enabled(struct device *dev,
+				void __iomem *pdp_reg, u32 plane, bool enable)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set plane %u: %s\n",
+		 plane, enable ? "enable" : "disable");
+#endif
+
+	if (plane > 0) {
+		dev_err(dev,
+			"Maximum of 1 plane is supported\n");
+		return;
+	}
+
+	value = pdp_rreg32(pdp_reg,
+					TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL);
+	value = REG_VALUE_SET(value,
+					enable ? 0x1 : 0x0,
+					STR1STREN_SHIFT,
+					STR1STREN_MASK);
+	pdp_wreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL,
+			   value);
+}
+
+static void pdp_apollo_reset_planes(struct device *dev,
+				void __iomem *pdp_reg)
+{
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Reset planes\n");
+#endif
+
+	pdp_apollo_set_plane_enabled(dev, pdp_reg, 0, false);
+}
+
+static void pdp_apollo_set_surface(struct device *dev,
+				void __iomem *pdp_reg,
+				u32 plane,
+				u32 address,
+				u32 posx, u32 posy,
+				u32 width, u32 height, u32 stride,
+				u32 format,
+				u32 alpha,
+				bool blend)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set surface: size=%dx%d stride=%d format=%d address=0x%x\n",
+			 width, height, stride, format, address);
+#endif
+
+	if (plane > 0) {
+		dev_err(dev,
+			"Maximum of 1 plane is supported\n");
+		return;
+	}
+
+	/* Size & format */
+	value = pdp_rreg32(pdp_reg,
+				TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF);
+	value = REG_VALUE_SET(value,
+				width - 1,
+				STR1WIDTH_SHIFT,
+				STR1WIDTH_MASK);
+	value = REG_VALUE_SET(value,
+				height - 1,
+				STR1HEIGHT_SHIFT,
+				STR1HEIGHT_MASK);
+	value = REG_VALUE_SET(value,
+				format,
+				STR1PIXFMT_SHIFT,
+				STR1PIXFMT_MASK);
+	pdp_wreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF,
+			   value);
+	/* Stride */
+	value = pdp_rreg32(pdp_reg,
+					   TCF_RGBPDP_PVR_PDP_STR1POSN);
+	value = REG_VALUE_SET(value,
+				(stride >> DCPDP_STR1POSN_STRIDE_SHIFT) - 1,
+				STR1STRIDE_SHIFT,
+				STR1STRIDE_MASK);
+	pdp_wreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_PDP_STR1POSN, value);
+	/* Disable interlaced output */
+	value = pdp_rreg32(pdp_reg,
+				TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL);
+	value = REG_VALUE_SET(value,
+				0x0,
+				STR1INTFIELD_SHIFT,
+				STR1INTFIELD_MASK);
+	pdp_wreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL,
+			   value);
+	/* Frame buffer base address */
+	value = REG_VALUE_SET(0,
+				address >> DCPDP_STR1ADDRCTRL_BASE_ADDR_SHIFT,
+				STR1BASE_SHIFT,
+				STR1BASE_MASK);
+	pdp_wreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL,
+			   value);
+}
+
+static void pdp_apollo_mode_set(struct device *dev,
+				void __iomem *pdp_reg,
+				u32 h_display, u32 v_display,
+				u32 hbps, u32 ht, u32 has,
+				u32 hlbs, u32 hfps, u32 hrbs,
+				u32 vbps, u32 vt, u32 vas,
+				u32 vtbs, u32 vfps, u32 vbbs,
+				bool nhsync, bool nvsync)
+{
+	u32 value;
+
+	dev_info(dev, "Set mode: %dx%d\n", h_display, v_display);
+#ifdef PDP_VERBOSE
+	dev_info(dev, " ht: %d hbps %d has %d hlbs %d hfps %d hrbs %d\n",
+			 ht, hbps, has, hlbs, hfps, hrbs);
+	dev_info(dev, " vt: %d vbps %d vas %d vtbs %d vfps %d vbbs %d\n",
+			 vt, vbps, vas, vtbs, vfps, vbbs);
+#endif
+
+#if 0
+	/* I don't really know what this is doing but it was in the Android
+	 * implementation (not in the Linux one). Seems not to be necessary
+	 * though!
+	 */
+	if (pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL)
+		!= 0x0000C010) {
+		/* Buffer request threshold */
+		pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL,
+				   0x00001C10);
+	}
+#endif
+
+	/* Border colour */
+	value = pdp_rreg32(pdp_reg,
+					   TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL);
+	value = REG_VALUE_SET(value,
+						  0x0,
+						  BORDCOL_SHIFT,
+						  BORDCOL_MASK);
+	pdp_wreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL, value);
+
+	/* Update control */
+	value = pdp_rreg32(pdp_reg,
+					   TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL);
+	value = REG_VALUE_SET(value,
+						  0x0,
+						  UPDFIELD_SHIFT,
+						  UPDFIELD_MASK);
+	pdp_wreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL, value);
+
+	/* Set hsync timings */
+	value = pdp_rreg32(pdp_reg,
+					   TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1);
+	value = REG_VALUE_SET(value,
+						  hbps,
+						  HBPS_SHIFT,
+						  HBPS_MASK);
+	value = REG_VALUE_SET(value,
+						  ht,
+						  HT_SHIFT,
+						  HT_MASK);
+	pdp_wreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1, value);
+
+	value = pdp_rreg32(pdp_reg,
+					   TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2);
+	value = REG_VALUE_SET(value,
+						  has,
+						  HAS_SHIFT,
+						  HAS_MASK);
+	value = REG_VALUE_SET(value,
+						  hlbs,
+						  HLBS_SHIFT,
+						  HLBS_MASK);
+	pdp_wreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2, value);
+
+	value = pdp_rreg32(pdp_reg,
+					   TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3);
+	value = REG_VALUE_SET(value,
+						  hfps,
+						  HFPS_SHIFT,
+						  HFPS_MASK);
+	value = REG_VALUE_SET(value,
+						  hrbs,
+						  HRBS_SHIFT,
+						  HRBS_MASK);
+	pdp_wreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3, value);
+
+	/* Set vsync timings */
+	value = pdp_rreg32(pdp_reg,
+					   TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1);
+	value = REG_VALUE_SET(value,
+						  vbps,
+						  VBPS_SHIFT,
+						  VBPS_MASK);
+	value = REG_VALUE_SET(value,
+						  vt,
+						  VT_SHIFT,
+						  VT_MASK);
+	pdp_wreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1, value);
+
+	value = pdp_rreg32(pdp_reg,
+					   TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2);
+	value = REG_VALUE_SET(value,
+						  vas,
+						  VAS_SHIFT,
+						  VAS_MASK);
+	value = REG_VALUE_SET(value,
+						  vtbs,
+						  VTBS_SHIFT,
+						  VTBS_MASK);
+	pdp_wreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2, value);
+
+	value = pdp_rreg32(pdp_reg,
+					   TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3);
+	value = REG_VALUE_SET(value,
+						  vfps,
+						  VFPS_SHIFT,
+						  VFPS_MASK);
+	value = REG_VALUE_SET(value,
+						  vbbs,
+						  VBBS_SHIFT,
+						  VBBS_MASK);
+	pdp_wreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3, value);
+
+	/* Horizontal data enable */
+	value = pdp_rreg32(pdp_reg,
+					   TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL);
+	value = REG_VALUE_SET(value,
+						  hlbs,
+						  HDES_SHIFT,
+						  HDES_MASK);
+	value = REG_VALUE_SET(value,
+						  hfps,
+						  HDEF_SHIFT,
+						  HDEF_MASK);
+	pdp_wreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL, value);
+
+	/* Vertical data enable */
+	value = pdp_rreg32(pdp_reg,
+					   TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL);
+	value = REG_VALUE_SET(value,
+						  vtbs,
+						  VDES_SHIFT,
+						  VDES_MASK);
+	value = REG_VALUE_SET(value,
+						  vfps,
+						  VDEF_SHIFT,
+						  VDEF_MASK);
+	pdp_wreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL, value);
+
+	/* Vertical event start and vertical fetch start */
+	value = pdp_rreg32(pdp_reg,
+					   TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT);
+	value = REG_VALUE_SET(value,
+						  vbps,
+						  VFETCH_SHIFT,
+						  VFETCH_MASK);
+	value = REG_VALUE_SET(value,
+						  vfps,
+						  VEVENT_SHIFT,
+						  VEVENT_MASK);
+	pdp_wreg32(pdp_reg,
+			   TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT, value);
+
+	/* Set up polarities of sync/blank */
+	value = REG_VALUE_SET(0,
+						  0x1,
+						  BLNKPOL_SHIFT,
+						  BLNKPOL_MASK);
+	/* Enable this if you want vblnk1. You also need to change to vblnk1
+	 * in the interrupt handler.
+	 */
+#if 0
+	value = REG_VALUE_SET(value,
+						  0x1,
+						  FIELDPOL_SHIFT,
+						  FIELDPOL_MASK);
+#endif
+	if (nhsync)
+		value = REG_VALUE_SET(value,
+							  0x1,
+							  HSPOL_SHIFT,
+							  HSPOL_MASK);
+	if (nvsync)
+		value = REG_VALUE_SET(value,
+							  0x1,
+							  VSPOL_SHIFT,
+							  VSPOL_MASK);
+	pdp_wreg32(pdp_reg,
+		TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL,
+		value);
+}
+
+#endif /* __PDP_APOLLO_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/pdp_common.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/pdp_common.h
new file mode 100644
index 0000000..3cd2b7de
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/pdp_common.h
@@ -0,0 +1,63 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PDP_COMMON_H__)
+#define __PDP_COMMON_H__
+
+/*#define PDP_VERBOSE*/
+
+#define REG_VALUE_GET(v, s, m) \
+	(u32)(((v) & (m)) >> (s))
+#define REG_VALUE_SET(v, b, s, m) \
+	(u32)(((v) & (u32)~(m)) | (u32)(((b) << (s)) & (m)))
+/* Active low */
+#define REG_VALUE_LO(v, b, s, m) \
+	(u32)((v) & ~(u32)(((b) << (s)) & (m)))
+
+enum pdp_version {
+	PDP_VERSION_APOLLO,
+	PDP_VERSION_ODIN,
+	PDP_VERSION_PLATO,
+};
+
+#endif /* __PDP_COMMON_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/pdp_odin.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/pdp_odin.h
new file mode 100644
index 0000000..88844f5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/pdp_odin.h
@@ -0,0 +1,1061 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PDP_ODIN_H__)
+#define __PDP_ODI_H__
+
+#include "pdp_common.h"
+#include "odin_pdp_regs.h"
+#include "odin_regs.h"
+#include "odin_defs.h"
+
+#define ODIN_PLL_REG(n)	((n) - ODN_PDP_P_CLK_OUT_DIVIDER_REG1)
+
+struct odin_displaymode {
+	int w;		/* display width */
+	int h;		/* display height */
+	int id;		/* pixel clock input divider */
+	int m;		/* pixel clock multiplier */
+	int od1;	/* pixel clock output divider */
+	int od2;	/* mem clock output divider */
+};
+
+/*
+ * For Odin, only the listed modes below are supported.
+ * 1080p id=5, m=37, od1=5, od2=5
+ * 720p id=5, m=37, od1=10, od2=5
+ * 1280x1024 id=1, m=14, od1=13, od2=8
+ * 1440x900 id=5, m=53, od1=10, od2=8
+ * 1280x960 id=3, m=40, od1=13, od2=9
+ * 1024x768 id=1, m=13, od1=20, od2=10
+ * 800x600 id=2, m=20, od1=25, od2=7
+ * 640x480 id=1, m=12, od1=48, od2=9
+ * ... where id is the PDP_P_CLK input divider,
+ * m is PDP_P_CLK multiplier regs 1 to 3
+ * od1 is PDP_P_clk output divider regs 1 to 3
+ * od2 is PDP_M_clk output divider regs 1 to 2
+ */
+static const struct odin_displaymode odin_modes[] = {
+	{.w = 1920, .h = 1080, .id = 5, .m = 37, .od1 = 5, .od2 = 5},
+	{.w = 1280, .h = 720, .id = 5, .m = 37, .od1 = 10, .od2 = 5},
+	{.w = 1280, .h = 1024, .id = 1, .m = 14, .od1 = 13, .od2 = 10},
+	{.w = 1440, .h = 900, .id = 5, .m = 53, .od1 = 10, .od2 = 8},
+	{.w = 1280, .h = 960, .id = 3, .m = 40, .od1 = 13, .od2 = 9},
+	{.w = 1024, .h = 768, .id = 1, .m = 13, .od1 = 20, .od2 = 10},
+	{.w = 800, .h = 600, .id = 2, .m = 20, .od1 = 25, .od2 = 7},
+	{.w = 640, .h = 480, .id = 1, .m = 12, .od1 = 48, .od2 = 9},
+	{.w = 0, .h = 0, .id = 0, .m = 0, .od1 = 0, .od2 = 0}
+};
+
+static const u32 GRPH_SURF_OFFSET[] = {
+	ODN_PDP_GRPH1SURF_OFFSET,
+	ODN_PDP_GRPH2SURF_OFFSET,
+	ODN_PDP_VID1SURF_OFFSET,
+	ODN_PDP_GRPH4SURF_OFFSET
+};
+static const u32 GRPH_SURF_GRPH_PIXFMT_SHIFT[] = {
+	ODN_PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT,
+	ODN_PDP_GRPH2SURF_GRPH2PIXFMT_SHIFT,
+	ODN_PDP_VID1SURF_VID1PIXFMT_SHIFT,
+	ODN_PDP_GRPH4SURF_GRPH4PIXFMT_SHIFT
+};
+static const u32 GRPH_SURF_GRPH_PIXFMT_MASK[] = {
+	ODN_PDP_GRPH1SURF_GRPH1PIXFMT_MASK,
+	ODN_PDP_GRPH2SURF_GRPH2PIXFMT_MASK,
+	ODN_PDP_VID1SURF_VID1PIXFMT_MASK,
+	ODN_PDP_GRPH4SURF_GRPH4PIXFMT_MASK
+};
+static const u32 GRPH_GALPHA_OFFSET[] = {
+	ODN_PDP_GRPH1GALPHA_OFFSET,
+	ODN_PDP_GRPH2GALPHA_OFFSET,
+	ODN_PDP_VID1GALPHA_OFFSET,
+	ODN_PDP_GRPH4GALPHA_OFFSET
+};
+static const u32 GRPH_GALPHA_GRPH_GALPHA_SHIFT[] = {
+	ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_SHIFT,
+	ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_SHIFT,
+	ODN_PDP_VID1GALPHA_VID1GALPHA_SHIFT,
+	ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_SHIFT
+};
+static const u32 GRPH_GALPHA_GRPH_GALPHA_MASK[] = {
+	ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_MASK,
+	ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_MASK,
+	ODN_PDP_VID1GALPHA_VID1GALPHA_MASK,
+	ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_MASK
+};
+static const u32 GRPH_CTRL_OFFSET[] = {
+	ODN_PDP_GRPH1CTRL_OFFSET,
+	ODN_PDP_GRPH2CTRL_OFFSET,
+	ODN_PDP_VID1CTRL_OFFSET,
+	ODN_PDP_GRPH4CTRL_OFFSET,
+};
+static const u32 GRPH_CTRL_GRPH_BLEND_SHIFT[] = {
+	ODN_PDP_GRPH1CTRL_GRPH1BLEND_SHIFT,
+	ODN_PDP_GRPH2CTRL_GRPH2BLEND_SHIFT,
+	ODN_PDP_VID1CTRL_VID1BLEND_SHIFT,
+	ODN_PDP_GRPH4CTRL_GRPH4BLEND_SHIFT
+};
+static const u32 GRPH_CTRL_GRPH_BLEND_MASK[] = {
+	ODN_PDP_GRPH1CTRL_GRPH1BLEND_MASK,
+	ODN_PDP_GRPH2CTRL_GRPH2BLEND_MASK,
+	ODN_PDP_VID1CTRL_VID1BLEND_MASK,
+	ODN_PDP_GRPH4CTRL_GRPH4BLEND_MASK
+};
+static const u32 GRPH_CTRL_GRPH_BLENDPOS_SHIFT[] = {
+	ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_SHIFT,
+	ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_SHIFT,
+	ODN_PDP_VID1CTRL_VID1BLENDPOS_SHIFT,
+	ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_SHIFT
+};
+static const u32 GRPH_CTRL_GRPH_BLENDPOS_MASK[] = {
+	ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_MASK,
+	ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_MASK,
+	ODN_PDP_VID1CTRL_VID1BLENDPOS_MASK,
+	ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_MASK
+};
+static const u32 GRPH_CTRL_GRPH_STREN_SHIFT[] = {
+	ODN_PDP_GRPH1CTRL_GRPH1STREN_SHIFT,
+	ODN_PDP_GRPH2CTRL_GRPH2STREN_SHIFT,
+	ODN_PDP_VID1CTRL_VID1STREN_SHIFT,
+	ODN_PDP_GRPH4CTRL_GRPH4STREN_SHIFT
+};
+static const u32 GRPH_CTRL_GRPH_STREN_MASK[] = {
+	ODN_PDP_GRPH1CTRL_GRPH1STREN_MASK,
+	ODN_PDP_GRPH2CTRL_GRPH2STREN_MASK,
+	ODN_PDP_VID1CTRL_VID1STREN_MASK,
+	ODN_PDP_GRPH4CTRL_GRPH4STREN_MASK
+};
+static const u32 GRPH_POSN_OFFSET[] = {
+	ODN_PDP_GRPH1POSN_OFFSET,
+	ODN_PDP_GRPH2POSN_OFFSET,
+	ODN_PDP_VID1POSN_OFFSET,
+	ODN_PDP_GRPH4POSN_OFFSET
+};
+static const u32 GRPH_POSN_GRPH_XSTART_SHIFT[] = {
+	ODN_PDP_GRPH1POSN_GRPH1XSTART_SHIFT,
+	ODN_PDP_GRPH2POSN_GRPH2XSTART_SHIFT,
+	ODN_PDP_VID1POSN_VID1XSTART_SHIFT,
+	ODN_PDP_GRPH4POSN_GRPH4XSTART_SHIFT,
+};
+static const u32 GRPH_POSN_GRPH_XSTART_MASK[] = {
+	ODN_PDP_GRPH1POSN_GRPH1XSTART_MASK,
+	ODN_PDP_GRPH2POSN_GRPH2XSTART_MASK,
+	ODN_PDP_VID1POSN_VID1XSTART_MASK,
+	ODN_PDP_GRPH4POSN_GRPH4XSTART_MASK,
+};
+static const u32 GRPH_POSN_GRPH_YSTART_SHIFT[] = {
+	ODN_PDP_GRPH1POSN_GRPH1YSTART_SHIFT,
+	ODN_PDP_GRPH2POSN_GRPH2YSTART_SHIFT,
+	ODN_PDP_VID1POSN_VID1YSTART_SHIFT,
+	ODN_PDP_GRPH4POSN_GRPH4YSTART_SHIFT,
+};
+static const u32 GRPH_POSN_GRPH_YSTART_MASK[] = {
+	ODN_PDP_GRPH1POSN_GRPH1YSTART_MASK,
+	ODN_PDP_GRPH2POSN_GRPH2YSTART_MASK,
+	ODN_PDP_VID1POSN_VID1YSTART_MASK,
+	ODN_PDP_GRPH4POSN_GRPH4YSTART_MASK,
+};
+static const u32 GRPH_SIZE_OFFSET[] = {
+	ODN_PDP_GRPH1SIZE_OFFSET,
+	ODN_PDP_GRPH2SIZE_OFFSET,
+	ODN_PDP_VID1SIZE_OFFSET,
+	ODN_PDP_GRPH4SIZE_OFFSET,
+};
+static const u32 GRPH_SIZE_GRPH_WIDTH_SHIFT[] = {
+	ODN_PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT,
+	ODN_PDP_GRPH2SIZE_GRPH2WIDTH_SHIFT,
+	ODN_PDP_VID1SIZE_VID1WIDTH_SHIFT,
+	ODN_PDP_GRPH4SIZE_GRPH4WIDTH_SHIFT
+};
+static const u32 GRPH_SIZE_GRPH_WIDTH_MASK[] = {
+	ODN_PDP_GRPH1SIZE_GRPH1WIDTH_MASK,
+	ODN_PDP_GRPH2SIZE_GRPH2WIDTH_MASK,
+	ODN_PDP_VID1SIZE_VID1WIDTH_MASK,
+	ODN_PDP_GRPH4SIZE_GRPH4WIDTH_MASK
+};
+static const u32 GRPH_SIZE_GRPH_HEIGHT_SHIFT[] = {
+	ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT,
+	ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_SHIFT,
+	ODN_PDP_VID1SIZE_VID1HEIGHT_SHIFT,
+	ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_SHIFT
+};
+static const u32 GRPH_SIZE_GRPH_HEIGHT_MASK[] = {
+	ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_MASK,
+	ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_MASK,
+	ODN_PDP_VID1SIZE_VID1HEIGHT_MASK,
+	ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_MASK
+};
+static const u32 GRPH_STRIDE_OFFSET[] = {
+	ODN_PDP_GRPH1STRIDE_OFFSET,
+	ODN_PDP_GRPH2STRIDE_OFFSET,
+	ODN_PDP_VID1STRIDE_OFFSET,
+	ODN_PDP_GRPH4STRIDE_OFFSET
+};
+static const u32 GRPH_STRIDE_GRPH_STRIDE_SHIFT[] = {
+	ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT,
+	ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_SHIFT,
+	ODN_PDP_VID1STRIDE_VID1STRIDE_SHIFT,
+	ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_SHIFT
+};
+static const u32 GRPH_STRIDE_GRPH_STRIDE_MASK[] = {
+	ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_MASK,
+	ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_MASK,
+	ODN_PDP_VID1STRIDE_VID1STRIDE_MASK,
+	ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_MASK
+};
+static const u32 GRPH_INTERLEAVE_CTRL_OFFSET[] = {
+	ODN_PDP_GRPH1INTERLEAVE_CTRL_OFFSET,
+	ODN_PDP_GRPH2INTERLEAVE_CTRL_OFFSET,
+	ODN_PDP_VID1INTERLEAVE_CTRL_OFFSET,
+	ODN_PDP_GRPH4INTERLEAVE_CTRL_OFFSET
+};
+static const u32 GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_SHIFT[] = {
+	ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SHIFT,
+	ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SHIFT,
+	ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SHIFT,
+	ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SHIFT
+};
+static const u32 GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_MASK[] = {
+	ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_MASK,
+	ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_MASK,
+	ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_MASK,
+	ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_MASK
+};
+static const u32 GRPH_BASEADDR_OFFSET[] = {
+	ODN_PDP_GRPH1BASEADDR_OFFSET,
+	ODN_PDP_GRPH2BASEADDR_OFFSET,
+	ODN_PDP_VID1BASEADDR_OFFSET,
+	ODN_PDP_GRPH4BASEADDR_OFFSET
+};
+
+/* Odin register R-W */
+static inline u32 odin_pdp_rreg32(void __iomem *base, resource_size_t reg)
+{
+	return ioread32(base + reg);
+}
+
+static inline void odin_pdp_wreg32(void __iomem *base, resource_size_t reg,
+								   u32 value)
+{
+	iowrite32(value, base + reg);
+}
+
+static inline u32 odin_pll_rreg32(void __iomem *base, resource_size_t reg)
+{
+	return ioread32(base + reg);
+}
+
+static inline void odin_pll_wreg32(void __iomem *base, resource_size_t reg,
+								   u32 value)
+{
+	iowrite32(value, base + reg);
+}
+
+static inline u32 odin_core_rreg32(void __iomem *base, resource_size_t reg)
+{
+	return ioread32(base + reg);
+}
+
+static inline void odin_core_wreg32(void __iomem *base,
+				resource_size_t reg,
+				u32 value)
+{
+	iowrite32(value, base + reg);
+}
+
+static void get_odin_clock_settings(u32 value, u32 *lo_time, u32 *hi_time,
+				u32 *no_count, u32 *edge)
+{
+	u32 lt, ht;
+
+	/* If the value is 1, High Time & Low Time are both set to 1
+	 * and the NOCOUNT bit is set to 1.
+	 */
+	 if (value == 1) {
+		*lo_time = 1;
+		*hi_time = 1;
+
+		/* If od is an odd number then write 1 to NO_COUNT
+		 *  otherwise write 0.
+		 */
+		*no_count = 1;
+
+		/* If m is and odd number then write 1 to EDGE bit of MR2
+		 * otherwise write 0.
+		 * If id is an odd number then write 1 to EDGE bit of ID
+		 *  otherwise write 0.
+		 */
+		*edge = 0;
+		return;
+	}
+	*no_count = 0;
+
+	/* High Time & Low time is half the value listed for each PDP mode */
+	lt = value>>1;
+	ht = lt;
+
+	/* If the value is odd, Low Time is rounded up to nearest integer
+	 * and High Time is rounded down, and Edge is set to 1.
+	 */
+	if (value & 1) {
+		lt++;
+
+		/* If m is and odd number then write 1 to EDGE bit of MR2
+		 * otherwise write 0.
+		 * If id is an odd number then write 1 to EDGE bit of ID
+		 * otherwise write 0.
+		 */
+		*edge = 1;
+
+	} else {
+		*edge = 0;
+	}
+	*hi_time = ht;
+	*lo_time = lt;
+}
+
+static const struct odin_displaymode *get_odin_mode(int w, int h)
+{
+	int n = 0;
+
+	do {
+		if ((odin_modes[n].w == w) && (odin_modes[n].h == h))
+			return odin_modes+n;
+
+	} while (odin_modes[n++].w);
+
+	return NULL;
+}
+
+static bool pdp_odin_clocks_set(struct device *dev,
+				void __iomem *pdp_reg, void __iomem *pll_reg,
+				u32 clock_freq,
+				void __iomem *odn_core_reg,
+				u32 hdisplay, u32 vdisplay)
+{
+	u32 value;
+	const struct odin_displaymode *odispl;
+	u32 hi_time, lo_time, no_count, edge;
+	u32 core_id, core_rev;
+
+	core_id = odin_pdp_rreg32(pdp_reg,
+				ODN_PDP_CORE_ID_OFFSET);
+	dev_info(dev, "Odin-PDP CORE_ID  %08X\n", core_id);
+
+	core_rev = odin_pdp_rreg32(odn_core_reg,
+				ODN_PDP_CORE_REV_OFFSET);
+	dev_info(dev, "Odin-PDP CORE_REV %08X\n", core_rev);
+
+	odispl = get_odin_mode(hdisplay, vdisplay);
+	if (!odispl) {
+		dev_err(dev,
+				"Error - display mode not supported.\n");
+		return false;
+	}
+
+	/* The PDP uses a Xilinx clock that requires read
+	 * modify write for all registers.
+	 * It is essential that only the specified bits are changed
+	 * because other bits are in use.
+	 * To change PDP clocks reset PDP & PDP mmcm (PLL) first,
+	 * then apply changes and then un-reset mmcm & PDP.
+	 * Warm reset will keep the changes.
+	 *    wr 0x000080 0x1f7 ; # reset pdp
+	 *    wr 0x000090 8 ; # reset pdp  mmcm
+	 * then apply clock changes, then
+	 *    wr 0x000090 0x0 ; # un-reset pdp mmcm
+	 *    wr 0x000080 0x1ff ; # un-reset pdp
+	 */
+
+	/* Hold Odin PDP1 in reset while changing the clock regs.
+	 * Set the PDP1 bit of ODN_CORE_INTERNAL_RESETN low to reset.
+	 * set bit 3 to 0 (active low)
+	 */
+	value = odin_core_rreg32(odn_core_reg,
+				ODN_CORE_INTERNAL_RESETN);
+	value = REG_VALUE_LO(value, 1, ODN_INTERNAL_RESETN_PDP1_SHIFT,
+				ODN_INTERNAL_RESETN_PDP1_MASK);
+	odin_core_wreg32(odn_core_reg,
+				ODN_CORE_INTERNAL_RESETN, value);
+
+	/* Hold the PDP MMCM in reset while changing the clock regs.
+	 * Set the PDP1 bit of ODN_CORE_CLK_GEN_RESET high to reset.
+	 */
+	value = odin_core_rreg32(odn_core_reg,
+				ODN_CORE_CLK_GEN_RESET);
+	value = REG_VALUE_SET(value, 0x1,
+				ODN_INTERNAL_RESETN_PDP1_SHIFT,
+				ODN_INTERNAL_RESETN_PDP1_MASK);
+	odin_core_wreg32(odn_core_reg,
+				ODN_CORE_CLK_GEN_RESET, value);
+
+	/* Pixel clock Input divider */
+	get_odin_clock_settings(odispl->id, &lo_time, &hi_time,
+				&no_count, &edge);
+
+	value = odin_pll_rreg32(pll_reg,
+				ODIN_PLL_REG(ODN_PDP_P_CLK_IN_DIVIDER_REG));
+	value = REG_VALUE_SET(value, lo_time,
+				ODN_PDP_PCLK_IDIV_LO_TIME_SHIFT,
+				ODN_PDP_PCLK_IDIV_LO_TIME_MASK);
+	value = REG_VALUE_SET(value, hi_time,
+				ODN_PDP_PCLK_IDIV_HI_TIME_SHIFT,
+				ODN_PDP_PCLK_IDIV_HI_TIME_MASK);
+	value = REG_VALUE_SET(value, no_count,
+				ODN_PDP_PCLK_IDIV_NOCOUNT_SHIFT,
+				ODN_PDP_PCLK_IDIV_NOCOUNT_MASK);
+	value = REG_VALUE_SET(value, edge,
+				ODN_PDP_PCLK_IDIV_EDGE_SHIFT,
+				ODN_PDP_PCLK_IDIV_EDGE_MASK);
+	odin_pll_wreg32(pll_reg,
+				ODIN_PLL_REG(ODN_PDP_P_CLK_IN_DIVIDER_REG),
+				value);
+
+	/* Pixel clock Output divider */
+	get_odin_clock_settings(odispl->od1, &lo_time, &hi_time,
+				&no_count, &edge);
+
+	/* Pixel clock Output divider reg1 */
+	value = odin_pll_rreg32(pll_reg,
+				ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG1));
+	value = REG_VALUE_SET(value, lo_time,
+				ODN_PDP_PCLK_ODIV1_LO_TIME_SHIFT,
+				ODN_PDP_PCLK_ODIV1_LO_TIME_MASK);
+	value = REG_VALUE_SET(value, hi_time,
+				ODN_PDP_PCLK_ODIV1_HI_TIME_SHIFT,
+				ODN_PDP_PCLK_ODIV1_HI_TIME_MASK);
+	odin_pll_wreg32(pll_reg,
+				ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG1),
+				value);
+
+	/* Pixel clock Output divider reg2 */
+	value = odin_pll_rreg32(pll_reg,
+				ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG2));
+	value = REG_VALUE_SET(value, no_count,
+				ODN_PDP_PCLK_ODIV2_NOCOUNT_SHIFT,
+				ODN_PDP_PCLK_ODIV2_NOCOUNT_MASK);
+	value = REG_VALUE_SET(value, edge,
+				ODN_PDP_PCLK_ODIV2_EDGE_SHIFT,
+				ODN_PDP_PCLK_ODIV2_EDGE_MASK);
+	odin_pll_wreg32(pll_reg,
+				ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG2),
+				value);
+
+	/* Pixel clock Multiplier */
+	get_odin_clock_settings(odispl->m, &lo_time, &hi_time,
+				&no_count, &edge);
+
+	/* Pixel clock Multiplier reg1 */
+	value = odin_pll_rreg32(pll_reg,
+				ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG1));
+	value = REG_VALUE_SET(value, lo_time,
+				ODN_PDP_PCLK_MUL1_LO_TIME_SHIFT,
+				ODN_PDP_PCLK_MUL1_LO_TIME_MASK);
+	value = REG_VALUE_SET(value, hi_time,
+				ODN_PDP_PCLK_MUL1_HI_TIME_SHIFT,
+				ODN_PDP_PCLK_MUL1_HI_TIME_MASK);
+	odin_pll_wreg32(pll_reg,
+				ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG1),
+				value);
+
+	/* Pixel clock Multiplier reg2 */
+	value = odin_pll_rreg32(pll_reg,
+				ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG2));
+	value = REG_VALUE_SET(value, no_count,
+				ODN_PDP_PCLK_MUL2_NOCOUNT_SHIFT,
+				ODN_PDP_PCLK_MUL2_NOCOUNT_MASK);
+	value = REG_VALUE_SET(value, edge,
+				ODN_PDP_PCLK_MUL2_EDGE_SHIFT,
+				ODN_PDP_PCLK_MUL2_EDGE_MASK);
+	odin_pll_wreg32(pll_reg,
+				ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG2),
+				value);
+
+	/* Mem clock Output divider */
+	get_odin_clock_settings(odispl->od2, &lo_time, &hi_time,
+				&no_count, &edge);
+
+	/* Mem clock Output divider reg1 */
+	value = odin_pll_rreg32(pll_reg,
+				ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG1));
+	value = REG_VALUE_SET(value, lo_time,
+				ODN_PDP_MCLK_ODIV1_LO_TIME_SHIFT,
+				ODN_PDP_MCLK_ODIV1_LO_TIME_MASK);
+	value = REG_VALUE_SET(value, hi_time,
+				ODN_PDP_MCLK_ODIV1_HI_TIME_SHIFT,
+				ODN_PDP_MCLK_ODIV1_HI_TIME_MASK);
+	odin_pll_wreg32(pll_reg,
+				ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG1),
+				value);
+
+	/* Mem clock Output divider reg2 */
+	value = odin_pll_rreg32(pll_reg,
+				ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG2));
+	value = REG_VALUE_SET(value, no_count,
+				ODN_PDP_MCLK_ODIV2_NOCOUNT_SHIFT,
+				ODN_PDP_MCLK_ODIV2_NOCOUNT_MASK);
+	value = REG_VALUE_SET(value, edge,
+				ODN_PDP_MCLK_ODIV2_EDGE_SHIFT,
+				ODN_PDP_MCLK_ODIV2_EDGE_MASK);
+	odin_pll_wreg32(pll_reg,
+				ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG2),
+				value);
+
+	/* Take the PDP MMCM out of reset.
+	 * Set the PDP1 bit of ODN_CORE_CLK_GEN_RESET to 0.
+	 */
+	value = odin_core_rreg32(odn_core_reg,
+				ODN_CORE_CLK_GEN_RESET);
+	value = REG_VALUE_LO(value, 1, ODN_INTERNAL_RESETN_PDP1_SHIFT,
+				ODN_INTERNAL_RESETN_PDP1_MASK);
+	odin_core_wreg32(odn_core_reg,
+				ODN_CORE_CLK_GEN_RESET, value);
+
+	/* Wait until MMCM_LOCK_STATUS_PDPP bit is ‘1’ in register
+	 * MMCM_LOCK_STATUS. Issue an error if this does not
+	 * go to ‘1’ within 500ms.
+	 */
+	{
+		int count;
+		bool locked = false;
+
+		for (count = 0; count < 10; count++) {
+			value = odin_core_rreg32(odn_core_reg,
+						ODN_CORE_MMCM_LOCK_STATUS);
+			if (value & ODN_MMCM_LOCK_STATUS_PDPP) {
+				locked = true;
+				break;
+			}
+			msleep(50);
+		}
+
+		if (!locked) {
+			dev_err(dev,
+					"Error - the MMCM pll did not lock\n");
+			return false;
+		}
+	}
+
+	/* Take Odin-PDP1 out of reset:
+	 * Set the PDP1 bit of ODN_CORE_INTERNAL_RESETN to 1.
+	 */
+	value = odin_core_rreg32(odn_core_reg,
+				ODN_CORE_INTERNAL_RESETN);
+	value = REG_VALUE_SET(value, 1, ODN_INTERNAL_RESETN_PDP1_SHIFT,
+				ODN_INTERNAL_RESETN_PDP1_MASK);
+	odin_core_wreg32(odn_core_reg,
+				ODN_CORE_INTERNAL_RESETN, value);
+
+	return true;
+}
+
+static void pdp_odin_set_updates_enabled(struct device *dev,
+				void __iomem *pdp_reg, bool enable)
+{
+	u32 value = enable ?
+		(1 << ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SHIFT |
+		 1 << ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT) :
+		0x0;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set updates: %s\n",
+			 enable ? "enable" : "disable");
+#endif
+
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_REGISTER_UPDATE_CTRL_OFFSET,
+			value);
+}
+
+static void pdp_odin_set_syncgen_enabled(struct device *dev,
+				void __iomem *pdp_reg, bool enable)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set syncgen: %s\n",
+		enable ? "enable" : "disable");
+#endif
+
+	value = odin_pdp_rreg32(pdp_reg,
+			ODN_PDP_SYNCCTRL_OFFSET);
+
+	value = REG_VALUE_SET(value,
+		enable ? ODN_SYNC_GEN_ENABLE : ODN_SYNC_GEN_DISABLE,
+		ODN_PDP_SYNCCTRL_SYNCACTIVE_SHIFT,
+		ODN_PDP_SYNCCTRL_SYNCACTIVE_MASK);
+
+	/* Invert the pixel clock */
+	value = REG_VALUE_SET(value,  ODN_PIXEL_CLOCK_INVERTED,
+		ODN_PDP_SYNCCTRL_CLKPOL_SHIFT,
+		ODN_PDP_SYNCCTRL_CLKPOL_MASK);
+
+	/* Set the Horizontal Sync Polarity to active high */
+	value = REG_VALUE_LO(value, ODN_HSYNC_POLARITY_ACTIVE_HIGH,
+			ODN_PDP_SYNCCTRL_HSPOL_SHIFT,
+			ODN_PDP_SYNCCTRL_HSPOL_MASK);
+
+	odin_pdp_wreg32(pdp_reg,
+		ODN_PDP_SYNCCTRL_OFFSET,
+		value);
+
+	/* Check for underruns when the sync generator
+	 * is being turned off.
+	 */
+	if (!enable) {
+		value = odin_pdp_rreg32(pdp_reg,
+				ODN_PDP_INTSTAT_OFFSET);
+		value &= ODN_PDP_INTSTAT_ALL_OURUN_MASK;
+
+		if (value)
+			dev_warn(dev,
+				"underruns detected. status=0x%08X\n",
+				value);
+		else
+			dev_info(dev,
+				"no underruns detected\n");
+	}
+}
+
+static void pdp_odin_set_powerdwn_enabled(struct device *dev,
+				void __iomem *pdp_reg, bool enable)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set powerdwn: %s\n",
+		enable ? "enable" : "disable");
+#endif
+
+	value = odin_pdp_rreg32(pdp_reg,
+			ODN_PDP_SYNCCTRL_OFFSET);
+
+	value = REG_VALUE_SET(value,
+			enable ? 0x1 : 0x0,
+			ODN_PDP_SYNCCTRL_POWERDN_SHIFT,
+			ODN_PDP_SYNCCTRL_POWERDN_MASK);
+
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_SYNCCTRL_OFFSET,
+			value);
+}
+
+static void pdp_odin_set_vblank_enabled(struct device *dev,
+				void __iomem *pdp_reg, bool enable)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set vblank: %s\n",
+		enable ? "enable" : "disable");
+#endif
+
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_INTCLR_OFFSET,
+			ODN_PDP_INTCLR_ALL);
+
+	value = odin_pdp_rreg32(pdp_reg,
+				ODN_PDP_INTENAB_OFFSET);
+	value = REG_VALUE_SET(value,
+			enable ? 0x1 : 0x0,
+			ODN_PDP_INTENAB_INTEN_VBLNK0_SHIFT,
+			ODN_PDP_INTENAB_INTEN_VBLNK0_MASK);
+	value = enable ? (1 << ODN_PDP_INTENAB_INTEN_VBLNK0_SHIFT) : 0;
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_INTENAB_OFFSET, value);
+}
+
+static bool pdp_odin_check_and_clear_vblank(struct device *dev,
+				void __iomem *pdp_reg)
+{
+	u32 value;
+
+	value = odin_pdp_rreg32(pdp_reg,
+			ODN_PDP_INTSTAT_OFFSET);
+
+	if (REG_VALUE_GET(value,
+			ODN_PDP_INTSTAT_INTS_VBLNK0_SHIFT,
+			ODN_PDP_INTSTAT_INTS_VBLNK0_MASK)) {
+		odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_INTCLR_OFFSET,
+			(1 << ODN_PDP_INTCLR_INTCLR_VBLNK0_SHIFT));
+
+		return true;
+	}
+	return false;
+}
+
+static void pdp_odin_set_plane_enabled(struct device *dev,
+				void __iomem *pdp_reg, u32 plane, bool enable)
+{
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Set plane %u: %s\n",
+		 plane, enable ? "enable" : "disable");
+#endif
+
+	if (plane > 3) {
+		dev_err(dev,
+			"Maximum of 4 planes are supported\n");
+		return;
+	}
+
+	value = odin_pdp_rreg32(pdp_reg,
+			GRPH_CTRL_OFFSET[plane]);
+	value = REG_VALUE_SET(value,
+			enable ? 0x1 : 0x0,
+			GRPH_CTRL_GRPH_STREN_SHIFT[plane],
+			GRPH_CTRL_GRPH_STREN_MASK[plane]);
+	odin_pdp_wreg32(pdp_reg,
+			GRPH_CTRL_OFFSET[plane], value);
+}
+
+static void pdp_odin_reset_planes(struct device *dev,
+				void __iomem *pdp_reg)
+{
+#ifdef PDP_VERBOSE
+	dev_info(dev, "Reset planes\n");
+#endif
+
+	odin_pdp_wreg32(pdp_reg,
+			GRPH_CTRL_OFFSET[0], 0x00000000);
+	odin_pdp_wreg32(pdp_reg,
+			GRPH_CTRL_OFFSET[1], 0x01000000);
+	odin_pdp_wreg32(pdp_reg,
+			GRPH_CTRL_OFFSET[2], 0x02000000);
+	odin_pdp_wreg32(pdp_reg,
+			GRPH_CTRL_OFFSET[3], 0x03000000);
+}
+
+static void pdp_odin_set_surface(struct device *dev,
+				void __iomem *pdp_reg,
+				u32 plane,
+				u32 address,
+				u32 posx, u32 posy,
+				u32 width, u32 height,
+				u32 stride,
+				u32 format,
+				u32 alpha,
+				bool blend)
+{
+	/* Use a blender based on the plane number (this defines the Z
+	 * ordering)
+	 */
+	static const int GRPH_BLEND_POS[] = { 0x0, 0x1, 0x2, 0x3 };
+	u32 blend_mode;
+	u32 value;
+
+#ifdef PDP_VERBOSE
+	dev_info(dev,
+		 "Set surface: plane=%d pos=%d:%d size=%dx%d stride=%d format=%d alpha=%d address=0x%x\n",
+		 plane, posx, posy, width, height, stride,
+		 format, alpha, address);
+#endif
+
+	if (plane > 3) {
+		dev_err(dev,
+			"Maximum of 4 planes are supported\n");
+		return;
+	}
+
+	if (address & 0xf) {
+		dev_warn(dev,
+			 "The frame buffer address is not aligned\n");
+	}
+
+	/* Frame buffer base address */
+	odin_pdp_wreg32(pdp_reg,
+			GRPH_BASEADDR_OFFSET[plane],
+			address);
+	/* Pos */
+	value = REG_VALUE_SET(0x0,
+			posx,
+			GRPH_POSN_GRPH_XSTART_SHIFT[plane],
+			GRPH_POSN_GRPH_XSTART_MASK[plane]);
+	value = REG_VALUE_SET(value,
+			posy,
+			GRPH_POSN_GRPH_YSTART_SHIFT[plane],
+			GRPH_POSN_GRPH_YSTART_MASK[plane]);
+	odin_pdp_wreg32(pdp_reg,
+			GRPH_POSN_OFFSET[plane],
+			value);
+	/* Size */
+	value = REG_VALUE_SET(0x0,
+			width - 1,
+			GRPH_SIZE_GRPH_WIDTH_SHIFT[plane],
+			GRPH_SIZE_GRPH_WIDTH_MASK[plane]);
+	value = REG_VALUE_SET(value,
+			height - 1,
+			GRPH_SIZE_GRPH_HEIGHT_SHIFT[plane],
+			GRPH_SIZE_GRPH_HEIGHT_MASK[plane]);
+	odin_pdp_wreg32(pdp_reg,
+			GRPH_SIZE_OFFSET[plane],
+			value);
+	/* Stride */
+	value = REG_VALUE_SET(0x0,
+			(stride >> 4) - 1,
+			GRPH_STRIDE_GRPH_STRIDE_SHIFT[plane],
+			GRPH_STRIDE_GRPH_STRIDE_MASK[plane]);
+	odin_pdp_wreg32(pdp_reg,
+			GRPH_STRIDE_OFFSET[plane], value);
+	/* Interlace mode: progressive */
+	value = REG_VALUE_SET(0x0,
+			ODN_INTERLACE_DISABLE,
+			GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_SHIFT[plane],
+			GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_MASK[plane]);
+	odin_pdp_wreg32(pdp_reg,
+			GRPH_INTERLEAVE_CTRL_OFFSET[plane],
+			value);
+	/* Format */
+	value = REG_VALUE_SET(0x0,
+			      format,
+			      GRPH_SURF_GRPH_PIXFMT_SHIFT[plane],
+			      GRPH_SURF_GRPH_PIXFMT_MASK[plane]);
+	odin_pdp_wreg32(pdp_reg,
+			GRPH_SURF_OFFSET[plane], value);
+	/* Global alpha (0...1023) */
+	value = REG_VALUE_SET(0x0,
+			      ((1024*256)/255 * alpha)/256,
+			      GRPH_GALPHA_GRPH_GALPHA_SHIFT[plane],
+			      GRPH_GALPHA_GRPH_GALPHA_MASK[plane]);
+	odin_pdp_wreg32(pdp_reg,
+			GRPH_GALPHA_OFFSET[plane], value);
+	value = odin_pdp_rreg32(pdp_reg,
+				GRPH_CTRL_OFFSET[plane]);
+	/* Blend mode */
+	if (blend) {
+		if (alpha != 255)
+			blend_mode = 0x2; /* 0b10 = global alpha blending */
+		else
+			blend_mode = 0x3; /* 0b11 = pixel alpha blending */
+	} else 
+		blend_mode = 0x0; /* 0b00 = no blending */
+	value = REG_VALUE_SET(0x0,
+			      blend_mode,
+			      GRPH_CTRL_GRPH_BLEND_SHIFT[plane],
+			      GRPH_CTRL_GRPH_BLEND_MASK[plane]);
+	/* Blend position */
+	value = REG_VALUE_SET(value,
+			      GRPH_BLEND_POS[plane],
+			      GRPH_CTRL_GRPH_BLENDPOS_SHIFT[plane],
+			      GRPH_CTRL_GRPH_BLENDPOS_MASK[plane]);
+	odin_pdp_wreg32(pdp_reg,
+			GRPH_CTRL_OFFSET[plane], value);
+}
+
+static void pdp_odin_mode_set(struct device *dev,
+				void __iomem *pdp_reg,
+				u32 h_display, u32 v_display,
+				u32 hbps, u32 ht, u32 has,
+				u32 hlbs, u32 hfps, u32 hrbs,
+				u32 vbps, u32 vt, u32 vas,
+				u32 vtbs, u32 vfps, u32 vbbs,
+				bool nhsync, bool nvsync)
+{
+	u32 value;
+
+	dev_info(dev, "Set mode: %dx%d\n", h_display, v_display);
+#ifdef PDP_VERBOSE
+	dev_info(dev, " ht: %d hbps %d has %d hlbs %d hfps %d hrbs %d\n",
+			 ht, hbps, has, hlbs, hfps, hrbs);
+	dev_info(dev, " vt: %d vbps %d vas %d vtbs %d vfps %d vbbs %d\n",
+			 vt, vbps, vas, vtbs, vfps, vbbs);
+#endif
+
+	/* Border colour: 10bits per channel */
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_BORDCOL_R_OFFSET, 0x0);
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_BORDCOL_GB_OFFSET, 0x0);
+	/* Background: 10bits per channel */
+	value = odin_pdp_rreg32(pdp_reg,
+			ODN_PDP_BGNDCOL_AR_OFFSET);
+	value = REG_VALUE_SET(value, 0x3ff,
+			ODN_PDP_BGNDCOL_AR_BGNDCOL_A_SHIFT,
+			ODN_PDP_BGNDCOL_AR_BGNDCOL_A_MASK);
+	value = REG_VALUE_SET(value, 0x0,
+			ODN_PDP_BGNDCOL_AR_BGNDCOL_R_SHIFT,
+			ODN_PDP_BGNDCOL_AR_BGNDCOL_R_MASK);
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_BGNDCOL_AR_OFFSET, value);
+	value = odin_pdp_rreg32(pdp_reg,
+			ODN_PDP_BGNDCOL_GB_OFFSET);
+	value = REG_VALUE_SET(value, 0x0,
+			ODN_PDP_BGNDCOL_GB_BGNDCOL_G_SHIFT,
+			ODN_PDP_BGNDCOL_GB_BGNDCOL_G_MASK);
+	value = REG_VALUE_SET(value, 0x0,
+			ODN_PDP_BGNDCOL_GB_BGNDCOL_B_SHIFT,
+			ODN_PDP_BGNDCOL_GB_BGNDCOL_B_MASK);
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_BGNDCOL_GB_OFFSET, value);
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_BORDCOL_GB_OFFSET, 0x0);
+	/* Update control */
+	value = odin_pdp_rreg32(pdp_reg,
+			ODN_PDP_UPDCTRL_OFFSET);
+	value = REG_VALUE_SET(value, 0x0,
+			ODN_PDP_UPDCTRL_UPDFIELD_SHIFT,
+			ODN_PDP_UPDCTRL_UPDFIELD_MASK);
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_UPDCTRL_OFFSET, value);
+
+	/* Horizontal timing */
+	value = odin_pdp_rreg32(pdp_reg,
+			ODN_PDP_HSYNC1_OFFSET);
+	value = REG_VALUE_SET(value, hbps,
+			ODN_PDP_HSYNC1_HBPS_SHIFT,
+			ODN_PDP_HSYNC1_HBPS_MASK);
+	value = REG_VALUE_SET(value, ht,
+			ODN_PDP_HSYNC1_HT_SHIFT,
+			ODN_PDP_HSYNC1_HT_MASK);
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_HSYNC1_OFFSET, value);
+
+	value = odin_pdp_rreg32(pdp_reg,
+			ODN_PDP_HSYNC2_OFFSET);
+	value = REG_VALUE_SET(value, has,
+			ODN_PDP_HSYNC2_HAS_SHIFT,
+			ODN_PDP_HSYNC2_HAS_MASK);
+	value = REG_VALUE_SET(value, hlbs,
+			ODN_PDP_HSYNC2_HLBS_SHIFT,
+			ODN_PDP_HSYNC2_HLBS_MASK);
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_HSYNC2_OFFSET, value);
+
+	value = odin_pdp_rreg32(pdp_reg,
+			ODN_PDP_HSYNC3_OFFSET);
+	value = REG_VALUE_SET(value, hfps,
+			ODN_PDP_HSYNC3_HFPS_SHIFT,
+			ODN_PDP_HSYNC3_HFPS_MASK);
+	value = REG_VALUE_SET(value, hrbs,
+			ODN_PDP_HSYNC3_HRBS_SHIFT,
+			ODN_PDP_HSYNC3_HRBS_MASK);
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_HSYNC3_OFFSET, value);
+
+	/* Vertical timing */
+	value = odin_pdp_rreg32(pdp_reg,
+			ODN_PDP_VSYNC1_OFFSET);
+	value = REG_VALUE_SET(value, vbps,
+			ODN_PDP_VSYNC1_VBPS_SHIFT,
+			ODN_PDP_VSYNC1_VBPS_MASK);
+	value = REG_VALUE_SET(value, vt,
+			ODN_PDP_VSYNC1_VT_SHIFT,
+			ODN_PDP_VSYNC1_VT_MASK);
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_VSYNC1_OFFSET, value);
+
+	value = odin_pdp_rreg32(pdp_reg,
+			ODN_PDP_VSYNC2_OFFSET);
+	value = REG_VALUE_SET(value, vas,
+			ODN_PDP_VSYNC2_VAS_SHIFT,
+			ODN_PDP_VSYNC2_VAS_MASK);
+	value = REG_VALUE_SET(value, vtbs,
+			ODN_PDP_VSYNC2_VTBS_SHIFT,
+			ODN_PDP_VSYNC2_VTBS_MASK);
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_VSYNC2_OFFSET, value);
+
+	value = odin_pdp_rreg32(pdp_reg,
+			ODN_PDP_VSYNC3_OFFSET);
+	value = REG_VALUE_SET(value, vfps,
+			ODN_PDP_VSYNC3_VFPS_SHIFT,
+			ODN_PDP_VSYNC3_VFPS_MASK);
+	value = REG_VALUE_SET(value, vbbs,
+			ODN_PDP_VSYNC3_VBBS_SHIFT,
+			ODN_PDP_VSYNC3_VBBS_MASK);
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_VSYNC3_OFFSET, value);
+
+	/* Horizontal data enable */
+	value = odin_pdp_rreg32(pdp_reg,
+			ODN_PDP_HDECTRL_OFFSET);
+	value = REG_VALUE_SET(value, hlbs,
+			ODN_PDP_HDECTRL_HDES_SHIFT,
+			ODN_PDP_HDECTRL_HDES_MASK);
+	value = REG_VALUE_SET(value, hfps,
+			ODN_PDP_HDECTRL_HDEF_SHIFT,
+			ODN_PDP_HDECTRL_HDEF_MASK);
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_HDECTRL_OFFSET, value);
+
+	/* Vertical data enable */
+	value = odin_pdp_rreg32(pdp_reg,
+			ODN_PDP_VDECTRL_OFFSET);
+	value = REG_VALUE_SET(value, vtbs,
+			ODN_PDP_VDECTRL_VDES_SHIFT,
+			ODN_PDP_VDECTRL_VDES_MASK);
+	value = REG_VALUE_SET(value, vfps,
+			ODN_PDP_VDECTRL_VDEF_SHIFT,
+			ODN_PDP_VDECTRL_VDEF_MASK);
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_VDECTRL_OFFSET, value);
+
+	/* Vertical event start and vertical fetch start */
+	value = odin_pdp_rreg32(pdp_reg,
+			ODN_PDP_VEVENT_OFFSET);
+	value = REG_VALUE_SET(value, vbps,
+			ODN_PDP_VEVENT_VFETCH_SHIFT,
+			ODN_PDP_VEVENT_VFETCH_MASK);
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_VEVENT_OFFSET, value);
+
+	/* Set up polarities of sync/blank */
+	value = REG_VALUE_SET(0, 0x1,
+			ODN_PDP_SYNCCTRL_BLNKPOL_SHIFT,
+			ODN_PDP_SYNCCTRL_BLNKPOL_MASK);
+	if (nhsync)
+		value = REG_VALUE_SET(value, 0x1,
+			ODN_PDP_SYNCCTRL_HSPOL_SHIFT,
+			ODN_PDP_SYNCCTRL_HSPOL_MASK);
+	if (nvsync)
+		value = REG_VALUE_SET(value, 0x1,
+			ODN_PDP_SYNCCTRL_VSPOL_SHIFT,
+			ODN_PDP_SYNCCTRL_VSPOL_MASK);
+	odin_pdp_wreg32(pdp_reg,
+			ODN_PDP_SYNCCTRL_OFFSET,
+			value);
+}
+
+#endif /* __PDP_ODIN_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_apollo.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_apollo.c
new file mode 100644
index 0000000..0408bf7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_apollo.c
@@ -0,0 +1,1204 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*
+ * This is a device driver for the apollo testchip framework. It creates
+ * platform devices for the pdp and ext sub-devices, and exports functions to
+ * manage the shared interrupt handling
+ */
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include <linux/thermal.h>
+
+#include "tc_drv_internal.h"
+#include "tc_apollo.h"
+#include "tc_ion.h"
+
+#include "apollo_regs.h"
+#include "tcf_clk_ctrl.h"
+#include "tcf_pll.h"
+
+#if defined(SUPPORT_APOLLO_FPGA)
+#include "tc_apollo_debugfs.h"
+#endif /* defined(SUPPORT_APOLLO_FPGA) */
+
+#define TC_INTERRUPT_FLAG_PDP      (1 << PDP1_INT_SHIFT)
+#define TC_INTERRUPT_FLAG_EXT      (1 << EXT_INT_SHIFT)
+
+#define PCI_VENDOR_ID_POWERVR      0x1010
+#define DEVICE_ID_PCI_APOLLO_FPGA  0x1CF1
+#define DEVICE_ID_PCIE_APOLLO_FPGA 0x1CF2
+
+#define APOLLO_MEM_PCI_BASENUM	   (2)
+
+static struct {
+	struct thermal_zone_device *thermal_zone;
+
+#if  defined(SUPPORT_APOLLO_FPGA)
+	struct tc_io_region fpga;
+	struct apollo_debugfs_fpga_entries fpga_entries;
+#endif
+} apollo_pdata;
+
+#if defined(SUPPORT_APOLLO_FPGA)
+
+#define APOLLO_DEVICE_NAME_FPGA "apollo_fpga"
+
+struct apollo_fpga_platform_data {
+	resource_size_t tc_memory_base;
+
+	resource_size_t pdp_heap_memory_base;
+	resource_size_t pdp_heap_memory_size;
+};
+
+#endif /* defined(SUPPORT_APOLLO_FPGA) */
+
+static void spi_write(struct tc_device *tc, u32 off, u32 val)
+{
+	iowrite32(off, tc->tcf.registers
+		  + TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR);
+	iowrite32(val, tc->tcf.registers
+		  + TCF_CLK_CTRL_TCF_SPI_MST_WDATA);
+	iowrite32(TCF_SPI_MST_GO_MASK, tc->tcf.registers
+		  + TCF_CLK_CTRL_TCF_SPI_MST_GO);
+	udelay(1000);
+}
+
+static int spi_read(struct tc_device *tc, u32 off, u32 *val)
+{
+	int cnt = 0;
+	u32 spi_mst_status;
+
+	iowrite32(0x40000 | off, tc->tcf.registers
+		  + TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR);
+	iowrite32(TCF_SPI_MST_GO_MASK, tc->tcf.registers
+		  + TCF_CLK_CTRL_TCF_SPI_MST_GO);
+
+	udelay(100);
+
+	do {
+		spi_mst_status = ioread32(tc->tcf.registers
+					  + TCF_CLK_CTRL_TCF_SPI_MST_STATUS);
+
+		if (cnt++ > 10000) {
+			dev_err(&tc->pdev->dev,
+				"spi_read: Time out reading SPI reg (0x%x)\n",
+				off);
+			return -1;
+		}
+
+	} while (spi_mst_status != 0x08);
+
+	*val = ioread32(tc->tcf.registers
+			+ TCF_CLK_CTRL_TCF_SPI_MST_RDATA);
+
+	return 0;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0))
+static int apollo_thermal_get_temp(struct thermal_zone_device *thermal,
+				   unsigned long *t)
+#else
+static int apollo_thermal_get_temp(struct thermal_zone_device *thermal,
+				   int *t)
+#endif
+{
+	struct tc_device *tc;
+	int err = -ENODEV;
+	u32 tmp;
+
+	if (!thermal)
+		goto err_out;
+
+	tc = (struct tc_device *)thermal->devdata;
+
+	if (!tc)
+		goto err_out;
+
+	if (spi_read(tc, TCF_TEMP_SENSOR_SPI_OFFSET, &tmp)) {
+		dev_err(&tc->pdev->dev,
+				"Failed to read apollo temperature sensor\n");
+
+		goto err_out;
+	}
+
+	/* Report this in millidegree Celsius */
+	*t = TCF_TEMP_SENSOR_TO_C(tmp) * 1000;
+
+	err = 0;
+
+err_out:
+	return err;
+}
+
+static struct thermal_zone_device_ops apollo_thermal_dev_ops = {
+	.get_temp = apollo_thermal_get_temp,
+};
+
+#if defined(SUPPORT_RGX)
+
+static void pll_write_reg(struct tc_device *tc,
+	resource_size_t reg_offset, u32 reg_value)
+{
+	BUG_ON(reg_offset < TCF_PLL_PLL_CORE_CLK0);
+	BUG_ON(reg_offset > tc->tcf_pll.region.size +
+		TCF_PLL_PLL_CORE_CLK0 - 4);
+
+	/* Tweak the offset because we haven't mapped the full pll region */
+	iowrite32(reg_value, tc->tcf_pll.registers +
+		reg_offset - TCF_PLL_PLL_CORE_CLK0);
+}
+
+static u32 sai_read_es2(struct tc_device *tc, u32 addr)
+{
+	iowrite32(0x200 | addr, tc->tcf.registers + 0x300);
+	iowrite32(0x1 | addr, tc->tcf.registers + 0x318);
+	return ioread32(tc->tcf.registers + 0x310);
+}
+
+static int apollo_align_interface_es2(struct tc_device *tc)
+{
+	int reg = 0;
+	u32 reg_reset_n;
+	int reset_cnt = 0;
+	int err = -EFAULT;
+	bool aligned = false;
+
+	/* Try to enable the core clock PLL */
+	spi_write(tc, 0x1, 0x0);
+	reg  = ioread32(tc->tcf.registers + 0x320);
+	reg |= 0x1;
+	iowrite32(reg, tc->tcf.registers + 0x320);
+	reg &= 0xfffffffe;
+	iowrite32(reg, tc->tcf.registers + 0x320);
+	msleep(1000);
+
+	if (spi_read(tc, 0x2, &reg)) {
+		dev_err(&tc->pdev->dev,
+				"Unable to read PLL status\n");
+		goto err_out;
+	}
+
+	if (reg == 0x1) {
+		/* Select DUT PLL as core clock */
+		reg  = ioread32(tc->tcf.registers +
+			TCF_CLK_CTRL_DUT_CONTROL_1);
+		reg &= 0xfffffff7;
+		iowrite32(reg, tc->tcf.registers +
+			TCF_CLK_CTRL_DUT_CONTROL_1);
+	} else {
+		dev_err(&tc->pdev->dev,
+			"PLL has failed to lock, status = %x\n", reg);
+		goto err_out;
+	}
+
+	reg_reset_n = ioread32(tc->tcf.registers +
+		TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+
+	while (!aligned && reset_cnt < 10 &&
+			tc->version != APOLLO_VERSION_TCF_5) {
+		int bank;
+		u32 eyes;
+		u32 clk_taps;
+		u32 train_ack;
+
+		++reset_cnt;
+
+		/* Reset the DUT to allow the SAI to retrain */
+		reg_reset_n &= ~(0x1 << DUT_RESETN_SHIFT);
+		iowrite32(reg_reset_n, tc->tcf.registers +
+			  TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+		udelay(100);
+		reg_reset_n |= (0x1 << DUT_RESETN_SHIFT);
+		iowrite32(reg_reset_n, tc->tcf.registers +
+			  TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+		udelay(100);
+
+		/* Assume alignment passed, if any bank fails on either DUT or
+		 * FPGA we will set this to false and try again for a max of 10
+		 * times.
+		 */
+		aligned = true;
+
+		/* For each of the banks */
+		for (bank = 0; bank < 10; bank++) {
+			int bank_aligned = 0;
+			/* Check alignment on the DUT */
+			u32 bank_base = 0x7000 + (0x1000 * bank);
+
+			spi_read(tc, bank_base + 0x4, &eyes);
+			spi_read(tc, bank_base + 0x3, &clk_taps);
+			spi_read(tc, bank_base + 0x6, &train_ack);
+
+			bank_aligned = tc_is_interface_aligned(
+					eyes, clk_taps, train_ack);
+			if (!bank_aligned) {
+				dev_warn(&tc->pdev->dev,
+					"Alignment check failed, retrying\n");
+				aligned = false;
+				break;
+			}
+
+			/* Check alignment on the FPGA */
+			bank_base = 0xb0 + (0x10 * bank);
+
+			eyes = sai_read_es2(tc, bank_base + 0x4);
+			clk_taps = sai_read_es2(tc, bank_base + 0x3);
+			train_ack = sai_read_es2(tc, bank_base + 0x6);
+
+			bank_aligned = tc_is_interface_aligned(
+					eyes, clk_taps, train_ack);
+
+			if (!bank_aligned) {
+				dev_warn(&tc->pdev->dev,
+					"Alignment check failed, retrying\n");
+				aligned = false;
+				break;
+			}
+		}
+	}
+
+	if (!aligned) {
+		dev_err(&tc->pdev->dev, "Unable to initialise the testchip (interface alignment failure), please restart the system.\n");
+		/* We are not returning an error here, cause VP doesn't
+		 * implement the necessary registers although they claim to be
+		 * TC compatible. */
+	}
+
+	if (reset_cnt > 1) {
+		dev_dbg(&tc->pdev->dev, "Note: The testchip required more than one reset to find a good interface alignment!\n");
+		dev_dbg(&tc->pdev->dev, "      This should be harmless, but if you do suspect foul play, please reset the machine.\n");
+		dev_dbg(&tc->pdev->dev, "      If you continue to see this message you may want to report it to IMGWORKS.\n");
+	}
+
+	err = 0;
+err_out:
+	return err;
+}
+
+static void apollo_set_clocks(struct tc_device *tc,
+			      int core_clock, int mem_clock, int sys_clock)
+{
+	u32 val;
+
+	/* This is disabled for TCF2 since the current FPGA builds do not
+	 * like their core clocks being set (it takes apollo down).
+	 */
+	if (tc->version != APOLLO_VERSION_TCF_2) {
+		val = core_clock / 1000000;
+		pll_write_reg(tc, TCF_PLL_PLL_CORE_CLK0, val);
+
+		val = 0x1 << PLL_CORE_DRP_GO_SHIFT;
+		pll_write_reg(tc, TCF_PLL_PLL_CORE_DRP_GO, val);
+	}
+
+	val = mem_clock / 1000000;
+	pll_write_reg(tc, TCF_PLL_PLL_MEMIF_CLK0, val);
+
+	val = 0x1 << PLL_MEM_DRP_GO_SHIFT;
+	pll_write_reg(tc, TCF_PLL_PLL_MEM_DRP_GO, val);
+
+	if (tc->version == APOLLO_VERSION_TCF_5) {
+		val = sys_clock / 1000000;
+		pll_write_reg(tc, TCF_PLL_PLL_SYSIF_CLK0, val);
+
+		val = 0x1 << PLL_MEM_DRP_GO_SHIFT;
+		pll_write_reg(tc, TCF_PLL_PLL_SYS_DRP_GO, val);
+	}
+
+	dev_info(&tc->pdev->dev, "Setting clocks to %uMHz/%uMHz\n",
+			 core_clock / 1000000,
+			 mem_clock / 1000000);
+	udelay(400);
+}
+
+#endif /* defined(SUPPORT_RGX) */
+
+static void apollo_set_mem_mode(struct tc_device *tc)
+{
+	u32 val;
+
+	val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_TEST_CTRL);
+	val &= ~(ADDRESS_FORCE_MASK | PCI_TEST_MODE_MASK | HOST_ONLY_MODE_MASK
+		| HOST_PHY_MODE_MASK);
+	val |= (0x1 << ADDRESS_FORCE_SHIFT);
+	iowrite32(val, tc->tcf.registers + TCF_CLK_CTRL_TEST_CTRL);
+}
+
+static int apollo_hard_reset(struct tc_device *tc,
+			     int core_clock, int mem_clock, int sys_clock)
+{
+	u32 reg;
+	u32 reg_reset_n = 0;
+
+	/* For displaying some build info */
+	u32 build_inc;
+	u32 build_owner;
+
+	int err = 0;
+
+	/* This is required for SPI reset which is not yet implemented. */
+	/*u32 aux_reset_n;*/
+
+	if (tc->version == APOLLO_VERSION_TCF_2) {
+		/* Power down */
+		reg = ioread32(tc->tcf.registers +
+			TCF_CLK_CTRL_DUT_CONTROL_1);
+		reg &= ~DUT_CTRL_VCC_0V9EN;
+		reg &= ~DUT_CTRL_VCC_1V8EN;
+		reg |= DUT_CTRL_VCC_IO_INH;
+		reg |= DUT_CTRL_VCC_CORE_INH;
+		iowrite32(reg, tc->tcf.registers +
+			TCF_CLK_CTRL_DUT_CONTROL_1);
+		msleep(500);
+	}
+
+	/* Put everything into reset */
+	iowrite32(reg_reset_n, tc->tcf.registers +
+		TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+
+	/* Take PDP1 and PDP2 out of reset */
+	reg_reset_n |= (0x1 << PDP1_RESETN_SHIFT);
+	reg_reset_n |= (0x1 << PDP2_RESETN_SHIFT);
+
+	iowrite32(reg_reset_n, tc->tcf.registers +
+		TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+	msleep(100);
+
+	/* Take DDR out of reset */
+	reg_reset_n |= (0x1 << DDR_RESETN_SHIFT);
+	iowrite32(reg_reset_n, tc->tcf.registers +
+		  TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+
+#if defined(SUPPORT_RGX)
+	/* Set clock speed here, before reset. */
+	apollo_set_clocks(tc, core_clock, mem_clock, sys_clock);
+
+	/* Put take GLB_CLKG and SCB out of reset */
+	reg_reset_n |= (0x1 << GLB_CLKG_EN_SHIFT);
+	reg_reset_n |= (0x1 << SCB_RESETN_SHIFT);
+	iowrite32(reg_reset_n, tc->tcf.registers +
+		  TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+	msleep(100);
+
+	if (tc->version == APOLLO_VERSION_TCF_2) {
+		/* Enable the voltage control regulators on DUT */
+		reg = ioread32(tc->tcf.registers +
+			TCF_CLK_CTRL_DUT_CONTROL_1);
+		reg |= DUT_CTRL_VCC_0V9EN;
+		reg |= DUT_CTRL_VCC_1V8EN;
+		reg &= ~DUT_CTRL_VCC_IO_INH;
+		reg &= ~DUT_CTRL_VCC_CORE_INH;
+		iowrite32(reg, tc->tcf.registers +
+			TCF_CLK_CTRL_DUT_CONTROL_1);
+		msleep(300);
+	}
+
+	/* Take DUT_DCM out of reset */
+	reg_reset_n |= (0x1 << DUT_DCM_RESETN_SHIFT);
+	iowrite32(reg_reset_n, tc->tcf.registers +
+		  TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+	msleep(100);
+
+
+	err = tc_iopol32_nonzero(DCM_LOCK_STATUS_MASK,
+		tc->tcf.registers + TCF_CLK_CTRL_DCM_LOCK_STATUS);
+
+	if (err != 0)
+		goto err_out;
+
+	if (tc->version == APOLLO_VERSION_TCF_2) {
+		/* Set ODT to a specific value that seems to provide the most
+		 * stable signals.
+		 */
+		spi_write(tc, 0x11, 0x413130);
+	}
+
+	/* Take DUT out of reset */
+	reg_reset_n |= (0x1 << DUT_RESETN_SHIFT);
+	iowrite32(reg_reset_n, tc->tcf.registers +
+		  TCF_CLK_CTRL_CLK_AND_RST_CTRL);
+	msleep(100);
+
+	if (tc->version != APOLLO_VERSION_TCF_5) {
+		err = apollo_align_interface_es2(tc);
+		if (err)
+			goto err_out;
+	}
+
+#endif /* defined(SUPPORT_RGX) */
+
+	if (tc->version == APOLLO_VERSION_TCF_2) {
+		/* Enable the temperature sensor */
+		spi_write(tc, 0xc, 0); /* power up */
+		spi_write(tc, 0xc, 2); /* reset */
+		spi_write(tc, 0xc, 6); /* init & run */
+
+		/* Register a new thermal zone */
+		apollo_pdata.thermal_zone =
+			thermal_zone_device_register("apollo", 0, 0, tc,
+						     &apollo_thermal_dev_ops,
+						     NULL, 0, 0);
+		if (IS_ERR(apollo_pdata.thermal_zone)) {
+			dev_warn(&tc->pdev->dev, "Couldn't register thermal zone");
+			apollo_pdata.thermal_zone = NULL;
+		}
+	}
+
+	/* Check the build */
+	reg = ioread32(tc->tcf.registers + 0x10);
+	build_inc = (reg >> 12) & 0xff;
+	build_owner = (reg >> 20) & 0xf;
+
+	if (build_inc) {
+		dev_alert(&tc->pdev->dev,
+			"BE WARNED: You are not running a tagged release of the FPGA!\n");
+
+		dev_alert(&tc->pdev->dev, "Owner: 0x%01x, Inc: 0x%02x\n",
+			  build_owner, build_inc);
+	}
+
+	dev_info(&tc->pdev->dev, "FPGA Release: %u.%02u\n", reg >> 8 & 0xf,
+		reg & 0xff);
+
+#if defined(SUPPORT_RGX)
+err_out:
+#endif /* defined(SUPPORT_RGX) */
+	return err;
+}
+
+static int apollo_hw_init(struct tc_device *tc,
+			  int core_clock, int mem_clock, int sys_clock)
+{
+	int err = 0;
+
+	err = apollo_hard_reset(tc, core_clock, mem_clock, sys_clock);
+	if (err)
+		goto err_out;
+
+	apollo_set_mem_mode(tc);
+
+#if defined(SUPPORT_RGX)
+	if (tc->version == APOLLO_VERSION_TCF_BONNIE) {
+		u32 reg;
+		/* Enable ASTC via SPI */
+		if (spi_read(tc, 0xf, &reg)) {
+			dev_err(&tc->pdev->dev,
+				"Failed to read apollo ASTC register\n");
+			err = -ENODEV;
+			goto err_out;
+		}
+
+		reg |= 0x1 << 4;
+		spi_write(tc, 0xf, reg);
+	}
+#endif /* defined(SUPPORT_RGX) */
+
+err_out:
+	return err;
+}
+
+static int apollo_enable_irq(struct tc_device *tc)
+{
+	int err = 0;
+
+#if defined(TC_FAKE_INTERRUPTS)
+	setup_timer(&tc->timer, tc_irq_fake_wrapper,
+		(unsigned long)tc);
+	mod_timer(&tc->timer,
+		jiffies + msecs_to_jiffies(FAKE_INTERRUPT_TIME_MS));
+#else
+	{
+		u32 val;
+
+		iowrite32(0, tc->tcf.registers +
+			TCF_CLK_CTRL_INTERRUPT_ENABLE);
+		iowrite32(0xffffffff, tc->tcf.registers +
+			TCF_CLK_CTRL_INTERRUPT_CLEAR);
+
+		/* Set sense to active high */
+		val = ioread32(tc->tcf.registers +
+			TCF_CLK_CTRL_INTERRUPT_OP_CFG) & ~(INT_SENSE_MASK);
+		iowrite32(val, tc->tcf.registers +
+			TCF_CLK_CTRL_INTERRUPT_OP_CFG);
+
+		err = request_irq(tc->pdev->irq, apollo_irq_handler,
+			IRQF_SHARED, DRV_NAME, tc);
+	}
+#endif
+	return err;
+}
+
+static void apollo_disable_irq(struct tc_device *tc)
+{
+#if defined(TC_FAKE_INTERRUPTS)
+	del_timer_sync(&tc->timer);
+#else
+	iowrite32(0, tc->tcf.registers +
+		TCF_CLK_CTRL_INTERRUPT_ENABLE);
+	iowrite32(0xffffffff, tc->tcf.registers +
+		TCF_CLK_CTRL_INTERRUPT_CLEAR);
+
+	free_irq(tc->pdev->irq, tc);
+#endif
+}
+
+static enum tc_version_t
+apollo_detect_tc_version(struct tc_device *tc)
+{
+	u32 val = ioread32(tc->tcf.registers +
+		       TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG);
+
+	switch (val) {
+	default:
+		dev_err(&tc->pdev->dev,
+			"Unknown TCF core target build ID (0x%x) - assuming Hood ES2 - PLEASE REPORT TO ANDROID TEAM\n",
+			val);
+		/* Fall-through */
+	case 5:
+		dev_err(&tc->pdev->dev, "Looks like a Hood ES2 TC\n");
+		return APOLLO_VERSION_TCF_2;
+	case 1:
+		dev_err(&tc->pdev->dev, "Looks like a TCF5\n");
+		return APOLLO_VERSION_TCF_5;
+	case 6:
+		dev_err(&tc->pdev->dev, "Looks like a Bonnie TC\n");
+		return APOLLO_VERSION_TCF_BONNIE;
+	}
+}
+
+static u32 apollo_interrupt_id_to_flag(int interrupt_id)
+{
+	switch (interrupt_id) {
+	case TC_INTERRUPT_PDP:
+		return TC_INTERRUPT_FLAG_PDP;
+	case TC_INTERRUPT_EXT:
+		return TC_INTERRUPT_FLAG_EXT;
+	default:
+		BUG();
+	}
+}
+
+static int apollo_dev_init(struct tc_device *tc, struct pci_dev *pdev,
+			   int pdp_mem_size, int secure_mem_size)
+{
+	int err;
+
+	/* Reserve and map the tcf_clk / "sys" registers */
+	err = setup_io_region(pdev, &tc->tcf,
+		SYS_APOLLO_REG_PCI_BASENUM,
+		SYS_APOLLO_REG_SYS_OFFSET, SYS_APOLLO_REG_SYS_SIZE);
+	if (err)
+		goto err_out;
+
+	/* Reserve and map the tcf_pll registers */
+	err = setup_io_region(pdev, &tc->tcf_pll,
+		SYS_APOLLO_REG_PCI_BASENUM,
+		SYS_APOLLO_REG_PLL_OFFSET + TCF_PLL_PLL_CORE_CLK0,
+		TCF_PLL_PLL_DRP_STATUS - TCF_PLL_PLL_CORE_CLK0 + 4);
+	if (err)
+		goto err_unmap_sys_registers;
+
+#if defined(SUPPORT_APOLLO_FPGA)
+#define FPGA_REGISTERS_SIZE 4
+	/* If this is a special 'fgpa' build, have the apollo driver manage
+	 * the second register bar.
+	 */
+	err = setup_io_region(pdev, &apollo_pdata.fpga,
+		SYS_RGX_REG_PCI_BASENUM, 0, FPGA_REGISTERS_SIZE);
+	if (err)
+		goto err_unmap_pll_registers;
+#endif
+
+	/* Detect testchip version */
+	tc->version = apollo_detect_tc_version(tc);
+
+	/* Setup card memory */
+	tc->tc_mem.base =
+		pci_resource_start(pdev, APOLLO_MEM_PCI_BASENUM);
+	tc->tc_mem.size =
+		pci_resource_len(pdev, APOLLO_MEM_PCI_BASENUM);
+
+	if (tc->tc_mem.size < pdp_mem_size) {
+		dev_err(&pdev->dev,
+			"Apollo MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu",
+			APOLLO_MEM_PCI_BASENUM,
+			(unsigned long)tc->tc_mem.size,
+			(unsigned long)pdp_mem_size);
+		err = -EIO;
+		goto err_unmap_fpga_registers;
+	}
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+	if (tc->tc_mem.size <
+	    (pdp_mem_size + secure_mem_size)) {
+		dev_err(&pdev->dev,
+			"Apollo MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu plus the requested secure heap size %lu",
+			APOLLO_MEM_PCI_BASENUM,
+			(unsigned long)tc->tc_mem.size,
+			(unsigned long)pdp_mem_size,
+			(unsigned long)secure_mem_size);
+		err = -EIO;
+		goto err_unmap_fpga_registers;
+	}
+#endif
+
+	err = tc_mtrr_setup(tc);
+	if (err)
+		goto err_unmap_fpga_registers;
+
+	/* Setup ranges for the device heaps */
+	tc->pdp_heap_mem_size = pdp_mem_size;
+
+	/* We know ext_heap_mem_size won't underflow as we've compared
+	 * tc_mem.size against the pdp_mem_size value earlier
+	 */
+	tc->ext_heap_mem_size =
+		tc->tc_mem.size - tc->pdp_heap_mem_size;
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+	tc->ext_heap_mem_size -= secure_mem_size;
+#endif
+
+	if (tc->ext_heap_mem_size < TC_EXT_MINIMUM_MEM_SIZE) {
+		dev_warn(&pdev->dev,
+			"Apollo MEM region (bar %d) has size of %lu, with %lu pdp_mem_size only %lu bytes are left for ext device, which looks too small",
+			APOLLO_MEM_PCI_BASENUM,
+			(unsigned long)tc->tc_mem.size,
+			(unsigned long)pdp_mem_size,
+			(unsigned long)tc->ext_heap_mem_size);
+		/* Continue as this is only a 'helpful warning' not a hard
+		 * requirement
+		 */
+	}
+
+	tc->ext_heap_mem_base = tc->tc_mem.base;
+	tc->pdp_heap_mem_base =
+		tc->tc_mem.base + tc->ext_heap_mem_size;
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+	tc->secure_heap_mem_base = tc->pdp_heap_mem_base +
+		tc->pdp_heap_mem_size;
+	tc->secure_heap_mem_size = secure_mem_size;
+#endif
+
+#if defined(SUPPORT_ION)
+	err = tc_ion_init(tc, APOLLO_MEM_PCI_BASENUM);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to initialise ION\n");
+		goto err_unmap_fpga_registers;
+	}
+#endif
+
+#if defined(SUPPORT_APOLLO_FPGA)
+	apollo_debugfs_add_fpga_entries(tc, &apollo_pdata.fpga,
+					&apollo_pdata.fpga_entries);
+#endif /* defined(SUPPORT_APOLLO_FPGA) */
+
+err_out:
+	return err;
+err_unmap_fpga_registers:
+#if defined(SUPPORT_APOLLO_FPGA)
+	iounmap(apollo_pdata.fpga.registers);
+	release_pci_io_addr(pdev, SYS_RGX_REG_PCI_BASENUM,
+		apollo_pdata.fpga.region.base, apollo_pdata.fpga.region.size);
+err_unmap_pll_registers:
+#endif /* defined(SUPPORT_APOLLO_FPGA) */
+	iounmap(tc->tcf_pll.registers);
+	release_pci_io_addr(pdev, SYS_APOLLO_REG_PCI_BASENUM,
+		tc->tcf_pll.region.base, tc->tcf_pll.region.size);
+err_unmap_sys_registers:
+	iounmap(tc->tcf.registers);
+	release_pci_io_addr(pdev, SYS_APOLLO_REG_PCI_BASENUM,
+		tc->tcf.region.base, tc->tcf.region.size);
+	goto err_out;
+}
+
+static void apollo_dev_cleanup(struct tc_device *tc)
+{
+#if defined(SUPPORT_APOLLO_FPGA)
+	apollo_debugfs_remove_fpga_entries(&apollo_pdata.fpga_entries);
+#endif
+
+#if defined(SUPPORT_ION)
+	tc_ion_deinit(tc, APOLLO_MEM_PCI_BASENUM);
+#endif
+
+	tc_mtrr_cleanup(tc);
+
+#if defined(SUPPORT_APOLLO_FPGA)
+	iounmap(apollo_pdata.fpga.registers);
+	release_pci_io_addr(tc->pdev, SYS_RGX_REG_PCI_BASENUM,
+		apollo_pdata.fpga.region.base, apollo_pdata.fpga.region.size);
+#endif
+
+	iounmap(tc->tcf_pll.registers);
+	release_pci_io_addr(tc->pdev, SYS_APOLLO_REG_PCI_BASENUM,
+		tc->tcf_pll.region.base, tc->tcf_pll.region.size);
+
+	iounmap(tc->tcf.registers);
+	release_pci_io_addr(tc->pdev, SYS_APOLLO_REG_PCI_BASENUM,
+		tc->tcf.region.base, tc->tcf.region.size);
+
+	if (apollo_pdata.thermal_zone)
+		thermal_zone_device_unregister(apollo_pdata.thermal_zone);
+}
+
+int apollo_init(struct tc_device *tc, struct pci_dev *pdev,
+		int core_clock, int mem_clock, int sys_clock,
+		int pdp_mem_size, int secure_mem_size)
+{
+	int err = 0;
+
+	err = apollo_dev_init(tc, pdev, pdp_mem_size, secure_mem_size);
+	if (err) {
+		dev_err(&pdev->dev, "apollo_dev_init failed\n");
+		goto err_out;
+	}
+
+	err = apollo_hw_init(tc, core_clock, mem_clock, sys_clock);
+	if (err) {
+		dev_err(&pdev->dev, "apollo_hw_init failed\n");
+		goto err_dev_cleanup;
+	}
+
+	err = apollo_enable_irq(tc);
+	if (err) {
+		dev_err(&pdev->dev,
+			"Failed to initialise IRQ\n");
+		goto err_dev_cleanup;
+	}
+
+err_out:
+	return err;
+
+err_dev_cleanup:
+	apollo_dev_cleanup(tc);
+	goto err_out;
+}
+
+int apollo_cleanup(struct tc_device *tc)
+{
+	apollo_disable_irq(tc);
+	apollo_dev_cleanup(tc);
+
+	return 0;
+}
+
+int apollo_register_pdp_device(struct tc_device *tc)
+{
+	int err = 0;
+	resource_size_t reg_start =
+		pci_resource_start(tc->pdev,
+				   SYS_APOLLO_REG_PCI_BASENUM);
+	struct resource pdp_resources_es2[] = {
+		DEFINE_RES_MEM_NAMED(reg_start + SYS_APOLLO_REG_PDP1_OFFSET,
+				SYS_APOLLO_REG_PDP1_SIZE, "pdp-regs"),
+		DEFINE_RES_MEM_NAMED(reg_start +
+				SYS_APOLLO_REG_PLL_OFFSET +
+				TCF_PLL_PLL_PDP_CLK0,
+				TCF_PLL_PLL_PDP2_DRP_GO -
+				TCF_PLL_PLL_PDP_CLK0 + 4, "pll-regs"),
+	};
+	struct resource pdp_resources_tcf5[] = {
+		DEFINE_RES_MEM_NAMED(reg_start + SYS_APOLLO_REG_PDP1_OFFSET,
+				SYS_APOLLO_REG_PDP1_SIZE, "pdp-regs"),
+		DEFINE_RES_MEM_NAMED(reg_start +
+				SYS_APOLLO_REG_PLL_OFFSET +
+				TCF_PLL_PLL_PDP_CLK0,
+				TCF_PLL_PLL_PDP2_DRP_GO -
+				TCF_PLL_PLL_PDP_CLK0 + 4, "pll-regs"),
+		DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev,
+				TC5_SYS_APOLLO_REG_PCI_BASENUM)
+				+ TC5_SYS_APOLLO_REG_PDP2_OFFSET,
+			TC5_SYS_APOLLO_REG_PDP2_SIZE, "tc5-pdp2-regs"),
+
+		DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev,
+				TC5_SYS_APOLLO_REG_PCI_BASENUM)
+				+ TC5_SYS_APOLLO_REG_PDP2_FBDC_OFFSET,
+				TC5_SYS_APOLLO_REG_PDP2_FBDC_SIZE,
+				"tc5-pdp2-fbdc-regs"),
+
+		DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev,
+				TC5_SYS_APOLLO_REG_PCI_BASENUM)
+				+ TC5_SYS_APOLLO_REG_HDMI_OFFSET,
+				TC5_SYS_APOLLO_REG_HDMI_SIZE,
+				"tc5-adv5711-regs"),
+	};
+
+	struct tc_pdp_platform_data pdata = {
+#if defined(SUPPORT_ION)
+		.ion_device = tc->ion_device,
+		.ion_heap_id = ION_HEAP_TC_PDP,
+#endif
+		.memory_base = tc->tc_mem.base,
+		.pdp_heap_memory_base = tc->pdp_heap_mem_base,
+		.pdp_heap_memory_size = tc->pdp_heap_mem_size,
+	};
+	struct platform_device_info pdp_device_info = {
+		.parent = &tc->pdev->dev,
+		.name = APOLLO_DEVICE_NAME_PDP,
+		.id = -2,
+		.data = &pdata,
+		.size_data = sizeof(pdata),
+		.dma_mask = DMA_BIT_MASK(32),
+	};
+
+	if (tc->version == APOLLO_VERSION_TCF_5) {
+		pdp_device_info.res = pdp_resources_tcf5;
+		pdp_device_info.num_res = ARRAY_SIZE(pdp_resources_tcf5);
+	} else if (tc->version == APOLLO_VERSION_TCF_2 ||
+			tc->version == APOLLO_VERSION_TCF_BONNIE) {
+		pdp_device_info.res = pdp_resources_es2;
+		pdp_device_info.num_res = ARRAY_SIZE(pdp_resources_es2);
+	} else {
+		dev_err(&tc->pdev->dev,
+			"Unable to set PDP resource info for unknown apollo device\n");
+	}
+
+	tc->pdp_dev = platform_device_register_full(&pdp_device_info);
+	if (IS_ERR(tc->pdp_dev)) {
+		err = PTR_ERR(tc->pdp_dev);
+		dev_err(&tc->pdev->dev,
+			"Failed to register PDP device (%d)\n", err);
+		tc->pdp_dev = NULL;
+		goto err;
+	}
+err:
+	return err;
+}
+
+#if defined(SUPPORT_RGX)
+
+int apollo_register_ext_device(struct tc_device *tc)
+{
+	int err = 0;
+	struct resource rogue_resources[] = {
+		DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev,
+				SYS_RGX_REG_PCI_BASENUM),
+			 SYS_RGX_REG_REGION_SIZE, "rogue-regs"),
+	};
+	struct tc_rogue_platform_data pdata = {
+#if defined(SUPPORT_ION)
+		.ion_device = tc->ion_device,
+		.ion_heap_id = ION_HEAP_TC_ROGUE,
+#endif
+		.tc_memory_base = tc->tc_mem.base,
+		.pdp_heap_memory_base = tc->pdp_heap_mem_base,
+		.pdp_heap_memory_size = tc->pdp_heap_mem_size,
+		.rogue_heap_memory_base = tc->ext_heap_mem_base,
+		.rogue_heap_memory_size = tc->ext_heap_mem_size,
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+		.secure_heap_memory_base = tc->secure_heap_mem_base,
+		.secure_heap_memory_size = tc->secure_heap_mem_size,
+#endif
+	};
+	struct platform_device_info rogue_device_info = {
+		.parent = &tc->pdev->dev,
+		.name = TC_DEVICE_NAME_ROGUE,
+		.id = -2,
+		.res = rogue_resources,
+		.num_res = ARRAY_SIZE(rogue_resources),
+		.data = &pdata,
+		.size_data = sizeof(pdata),
+	};
+
+	tc->ext_dev
+		= platform_device_register_full(&rogue_device_info);
+
+	if (IS_ERR(tc->ext_dev)) {
+		err = PTR_ERR(tc->ext_dev);
+		dev_err(&tc->pdev->dev,
+			"Failed to register rogue device (%d)\n", err);
+		tc->ext_dev = NULL;
+	}
+	return err;
+}
+
+#elif defined(SUPPORT_APOLLO_FPGA)
+
+int apollo_register_ext_device(struct tc_device *tc)
+{
+	int err = 0;
+	struct resource fpga_resources[] = {
+		/* FIXME: Don't overload SYS_RGX_REG_xxx for FPGA */
+		DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev,
+				SYS_RGX_REG_PCI_BASENUM),
+			 SYS_RGX_REG_REGION_SIZE, "fpga-regs"),
+	};
+	struct apollo_fpga_platform_data pdata = {
+		.tc_memory_base = tc->tc_mem.base,
+		.pdp_heap_memory_base = tc->pdp_heap_mem_base,
+		.pdp_heap_memory_size = tc->pdp_heap_mem_size,
+	};
+	struct platform_device_info fpga_device_info = {
+		.parent = &tc->pdev->dev,
+		.name = APOLLO_DEVICE_NAME_FPGA,
+		.id = -1,
+		.res = fpga_resources,
+		.num_res = ARRAY_SIZE(fpga_resources),
+		.data = &pdata,
+		.size_data = sizeof(pdata),
+		.dma_mask = DMA_BIT_MASK(32),
+	};
+
+	tc->ext_dev = platform_device_register_full(&fpga_device_info);
+	if (IS_ERR(tc->ext_dev)) {
+		err = PTR_ERR(tc->ext_dev);
+		dev_err(&tc->pdev->dev,
+			"Failed to register fpga device (%d)\n", err);
+		tc->ext_dev = NULL;
+		/* Fall through */
+	}
+
+	return err;
+}
+
+#else /* defined(SUPPORT_APOLLO_FPGA) */
+
+int apollo_register_ext_device(struct tc_device *tc)
+{
+	return 0;
+}
+
+#endif /* defined(SUPPORT_RGX) */
+
+void apollo_enable_interrupt_register(struct tc_device *tc,
+				      int interrupt_id)
+{
+	u32 val;
+
+	if (interrupt_id == TC_INTERRUPT_PDP ||
+		interrupt_id == TC_INTERRUPT_EXT) {
+		val = ioread32(
+			tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE);
+		val |= apollo_interrupt_id_to_flag(interrupt_id);
+		iowrite32(val,
+			tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE);
+	}
+}
+
+void apollo_disable_interrupt_register(struct tc_device *tc,
+				       int interrupt_id)
+{
+	u32 val;
+
+	if (interrupt_id == TC_INTERRUPT_PDP ||
+		interrupt_id == TC_INTERRUPT_EXT) {
+		val = ioread32(
+			tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE);
+		val &= ~(apollo_interrupt_id_to_flag(interrupt_id));
+		iowrite32(val,
+			tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE);
+	}
+}
+
+irqreturn_t apollo_irq_handler(int irq, void *data)
+{
+	u32 interrupt_status;
+	u32 interrupt_clear = 0;
+	unsigned long flags;
+	irqreturn_t ret = IRQ_NONE;
+	struct tc_device *tc = (struct tc_device *)data;
+
+	spin_lock_irqsave(&tc->interrupt_handler_lock, flags);
+
+#if defined(TC_FAKE_INTERRUPTS)
+	/* If we're faking interrupts pretend we got both ext and PDP ints */
+	interrupt_status = TC_INTERRUPT_FLAG_EXT
+		| TC_INTERRUPT_FLAG_PDP;
+#else
+	interrupt_status = ioread32(tc->tcf.registers
+			+ TCF_CLK_CTRL_INTERRUPT_STATUS);
+#endif
+
+	if (interrupt_status & TC_INTERRUPT_FLAG_EXT) {
+		struct tc_interrupt_handler *ext_int =
+			&tc->interrupt_handlers[TC_INTERRUPT_EXT];
+
+		if (ext_int->enabled && ext_int->handler_function) {
+			ext_int->handler_function(ext_int->handler_data);
+			interrupt_clear |= TC_INTERRUPT_FLAG_EXT;
+		}
+		ret = IRQ_HANDLED;
+	}
+	if (interrupt_status & TC_INTERRUPT_FLAG_PDP) {
+		struct tc_interrupt_handler *pdp_int =
+			&tc->interrupt_handlers[TC_INTERRUPT_PDP];
+
+		if (pdp_int->enabled && pdp_int->handler_function) {
+			pdp_int->handler_function(pdp_int->handler_data);
+			interrupt_clear |= TC_INTERRUPT_FLAG_PDP;
+		}
+		ret = IRQ_HANDLED;
+	}
+
+	if (tc->version == APOLLO_VERSION_TCF_5) {
+		/* On TC5 the interrupt is not  by the TC framework, but
+		 * by the PDP itself. So we always have to callback to the tc5
+		 * pdp code regardless of the interrupt status of the TCF.
+		 */
+		struct tc_interrupt_handler *pdp_int =
+			&tc->interrupt_handlers[TC_INTERRUPT_TC5_PDP];
+
+		if (pdp_int->enabled && pdp_int->handler_function) {
+			pdp_int->handler_function(pdp_int->handler_data);
+			ret = IRQ_HANDLED;
+		}
+	}
+
+	if (interrupt_clear)
+		iowrite32(0xffffffff,
+			tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_CLEAR);
+
+	spin_unlock_irqrestore(&tc->interrupt_handler_lock, flags);
+
+	return ret;
+}
+
+int apollo_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll)
+{
+	int err = 0;
+
+	*tmp = 0;
+	*pll = 0;
+
+	if (tc->version == APOLLO_VERSION_TCF_5)
+		/* Not implemented on TCF5 */
+		goto err_out;
+	else if (tc->version == APOLLO_VERSION_TCF_2) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0))
+		unsigned long t;
+#else
+		int t;
+#endif
+
+		err = apollo_thermal_get_temp(apollo_pdata.thermal_zone, &t);
+		if (err)
+			goto err_out;
+		*tmp = t / 1000;
+	}
+
+	if (spi_read(tc, 0x2, pll)) {
+		dev_err(&tc->pdev->dev, "Failed to read PLL status\n");
+		err = -ENODEV;
+		goto err_out;
+	}
+
+err_out:
+	return err;
+}
+
+int apollo_sys_strings(struct tc_device *tc,
+		       char *str_fpga_rev, size_t size_fpga_rev,
+		       char *str_tcf_core_rev, size_t size_tcf_core_rev,
+		       char *str_tcf_core_target_build_id,
+		       size_t size_tcf_core_target_build_id,
+		       char *str_pci_ver, size_t size_pci_ver,
+		       char *str_macro_ver, size_t size_macro_ver)
+{
+	int err = 0;
+	u32 val;
+	resource_size_t host_fpga_base;
+	void __iomem *host_fpga_registers;
+
+	/* To get some of the version information we need to read from a
+	 * register that we don't normally have mapped. Map it temporarily
+	 * (without trying to reserve it) to get the information we need.
+	 */
+	host_fpga_base =
+		pci_resource_start(tc->pdev, SYS_APOLLO_REG_PCI_BASENUM)
+				+ 0x40F0;
+
+	host_fpga_registers = ioremap_nocache(host_fpga_base, 0x04);
+	if (!host_fpga_registers) {
+		dev_err(&tc->pdev->dev,
+			"Failed to map host fpga registers\n");
+		err = -EIO;
+		goto err_out;
+	}
+
+	/* Create the components of the PCI and macro versions */
+	val = ioread32(host_fpga_registers);
+	snprintf(str_pci_ver, size_pci_ver, "%d",
+		 HEX2DEC((val & 0x00FF0000) >> 16));
+	snprintf(str_macro_ver, size_macro_ver, "%d.%d",
+		 (val & 0x00000F00) >> 8,
+		 HEX2DEC((val & 0x000000FF) >> 0));
+
+	/* Unmap the register now that we no longer need it */
+	iounmap(host_fpga_registers);
+
+	/* Create the components of the FPGA revision number */
+	val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_FPGA_REV_REG);
+	snprintf(str_fpga_rev, size_fpga_rev, "%d.%d.%d",
+		 HEX2DEC((val & FPGA_REV_REG_MAJOR_MASK)
+			 >> FPGA_REV_REG_MAJOR_SHIFT),
+		 HEX2DEC((val & FPGA_REV_REG_MINOR_MASK)
+			 >> FPGA_REV_REG_MINOR_SHIFT),
+		 HEX2DEC((val & FPGA_REV_REG_MAINT_MASK)
+			 >> FPGA_REV_REG_MAINT_SHIFT));
+
+	/* Create the components of the TCF core revision number */
+	val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_TCF_CORE_REV_REG);
+	snprintf(str_tcf_core_rev, size_tcf_core_rev, "%d.%d.%d",
+		 HEX2DEC((val & TCF_CORE_REV_REG_MAJOR_MASK)
+			 >> TCF_CORE_REV_REG_MAJOR_SHIFT),
+		 HEX2DEC((val & TCF_CORE_REV_REG_MINOR_MASK)
+			 >> TCF_CORE_REV_REG_MINOR_SHIFT),
+		 HEX2DEC((val & TCF_CORE_REV_REG_MAINT_MASK)
+			 >> TCF_CORE_REV_REG_MAINT_SHIFT));
+
+	/* Create the component of the TCF core target build ID */
+	val = ioread32(tc->tcf.registers +
+		       TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG);
+	snprintf(str_tcf_core_target_build_id, size_tcf_core_target_build_id,
+		"%d",
+		(val & TCF_CORE_TARGET_BUILD_ID_MASK)
+		>> TCF_CORE_TARGET_BUILD_ID_SHIFT);
+
+err_out:
+	return err;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_apollo.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_apollo.h
new file mode 100644
index 0000000..487a03d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_apollo.h
@@ -0,0 +1,78 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _APOLLO_DRV_H
+#define _APOLLO_DRV_H
+
+#include "tc_drv_internal.h"
+#include "apollo_regs.h"
+
+#if defined(SUPPORT_RGX) && defined(SUPPORT_APOLLO_FPGA)
+#error Define either SUPPORT_RGX or SUPPORT_APOLLO_FGPA, not both
+#endif
+
+int apollo_init(struct tc_device *tc, struct pci_dev *pdev,
+		int core_clock, int mem_clock, int sys_clock,
+		int pdp_mem_size, int secure_mem_size);
+int apollo_cleanup(struct tc_device *tc);
+
+int apollo_register_pdp_device(struct tc_device *tc);
+int apollo_register_ext_device(struct tc_device *tc);
+
+void apollo_enable_interrupt_register(struct tc_device *tc,
+				      int interrupt_id);
+void apollo_disable_interrupt_register(struct tc_device *tc,
+				       int interrupt_id);
+
+irqreturn_t apollo_irq_handler(int irq, void *data);
+
+int apollo_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll);
+int apollo_sys_strings(struct tc_device *tc,
+		       char *str_fpga_rev, size_t size_fpga_rev,
+		       char *str_tcf_core_rev, size_t size_tcf_core_rev,
+		       char *str_tcf_core_target_build_id,
+		       size_t size_tcf_core_target_build_id,
+		       char *str_pci_ver, size_t size_pci_ver,
+		       char *str_macro_ver, size_t size_macro_ver);
+
+#endif /* _APOLLO_DRV_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_drv.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_drv.c
new file mode 100644
index 0000000..1432164
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_drv.c
@@ -0,0 +1,774 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*
+ * This is a device driver for the testchip framework. It creates platform
+ * devices for the pdp and ext sub-devices, and exports functions to manage the
+ * shared interrupt handling
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+
+#if defined(CONFIG_MTRR)
+#include <asm/mtrr.h>
+#endif
+
+#include "pvrmodule.h"
+
+#include "tc_apollo.h"
+#include "tc_odin.h"
+
+/* How much memory to give to the PDP heap (used for pdp buffers). */
+#define TC_PDP_MEM_SIZE_BYTES           ((TC_DISPLAY_MEM_SIZE)*1024*1024)
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+/* How much memory to give to the secure heap. */
+#define TC_SECURE_MEM_SIZE_BYTES        ((TC_SECURE_MEM_SIZE)*1024*1024)
+#endif
+
+#define PCI_VENDOR_ID_POWERVR		0x1010
+#define DEVICE_ID_PCI_APOLLO_FPGA	0x1CF1
+#define DEVICE_ID_PCIE_APOLLO_FPGA	0x1CF2
+
+MODULE_DESCRIPTION("PowerVR testchip framework driver");
+
+static int tc_core_clock = RGX_TC_CORE_CLOCK_SPEED;
+module_param(tc_core_clock, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(tc_core_clock, "TC core clock speed");
+
+static int tc_mem_clock = RGX_TC_MEM_CLOCK_SPEED;
+module_param(tc_mem_clock, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(tc_mem_clock, "TC memory clock speed");
+
+static int tc_sys_clock = RGX_TC_SYS_CLOCK_SPEED;
+module_param(tc_sys_clock, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(tc_sys_clock, "TC system clock speed (TCF5 only)");
+
+static unsigned long tc_pdp_mem_size = TC_PDP_MEM_SIZE_BYTES;
+module_param(tc_pdp_mem_size, ulong, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(tc_pdp_mem_size,
+	"TC PDP reserved memory size in bytes");
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+static unsigned long tc_secure_mem_size = TC_SECURE_MEM_SIZE_BYTES;
+module_param(tc_secure_mem_size, ulong, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(tc_secure_mem_size,
+	"TC secure reserved memory size in bytes");
+#endif
+
+static struct debugfs_blob_wrapper tc_debugfs_rogue_name_blobs[] = {
+	[APOLLO_VERSION_TCF_2] = {
+		.data = "hood", /* probably */
+		.size = sizeof("hood") - 1,
+	},
+	[APOLLO_VERSION_TCF_5] = {
+		.data = "fpga (unknown)",
+		.size = sizeof("fpga (unknown)") - 1,
+	},
+	[APOLLO_VERSION_TCF_BONNIE] = {
+		.data = "bonnie",
+		.size = sizeof("bonnie") - 1,
+	},
+	[ODIN_VERSION_TCF_BONNIE] = {
+		.data = "bonnie",
+		.size = sizeof("bonnie") - 1,
+	},
+	[ODIN_VERSION_FPGA] = {
+		.data = "fpga (unknown)",
+		.size = sizeof("fpga (unknown)") - 1,
+	},
+};
+
+#if defined(CONFIG_MTRR) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0))
+/*
+ * A return value of:
+ *      0 or more means success
+ *     -1 means we were unable to add an mtrr but we should continue
+ *     -2 means we were unable to add an mtrr but we shouldn't continue
+ */
+static int mtrr_setup(struct pci_dev *pdev,
+		      resource_size_t mem_start,
+		      resource_size_t mem_size)
+{
+	int err;
+	int mtrr;
+
+	/* Reset MTRR */
+	mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_UNCACHABLE, 0);
+	if (mtrr < 0) {
+		dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n",
+			__LINE__, __func__, mtrr);
+		mtrr = -2;
+		goto err_out;
+	}
+
+	err = mtrr_del(mtrr, mem_start, mem_size);
+	if (err < 0) {
+		dev_err(&pdev->dev, "%d - %s: mtrr_del failed (%d)\n",
+			__LINE__, __func__, err);
+		mtrr = -2;
+		goto err_out;
+	}
+
+	mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_WRBACK, 0);
+	if (mtrr < 0) {
+		/* Stop, but not an error as this may be already be setup */
+		dev_dbg(&pdev->dev,
+			"%d - %s: mtrr_add failed (%d) - probably means the mtrr is already setup\n",
+			__LINE__, __func__, mtrr);
+		mtrr = -1;
+		goto err_out;
+	}
+
+	err = mtrr_del(mtrr, mem_start, mem_size);
+	if (err < 0) {
+		dev_err(&pdev->dev, "%d - %s: mtrr_del failed (%d)\n",
+			__LINE__, __func__, err);
+		mtrr = -2;
+		goto err_out;
+	}
+
+	if (mtrr == 0) {
+		/* Replace 0 with a non-overlapping WRBACK mtrr */
+		err = mtrr_add(0, mem_start, MTRR_TYPE_WRBACK, 0);
+		if (err < 0) {
+			dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n",
+				__LINE__, __func__, err);
+			mtrr = -2;
+			goto err_out;
+		}
+	}
+
+	mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_WRCOMB, 0);
+	if (mtrr < 0) {
+		dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n",
+			__LINE__, __func__, mtrr);
+		mtrr = -1;
+	}
+
+err_out:
+	return mtrr;
+}
+#endif /* defined(CONFIG_MTRR) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) */
+
+int tc_mtrr_setup(struct tc_device *tc)
+{
+	int err = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+	/* Register the LMA as write combined */
+	err = arch_io_reserve_memtype_wc(tc->tc_mem.base,
+					 tc->tc_mem.size);
+	if (err)
+		return -ENODEV;
+#endif
+	/* Enable write combining */
+	tc->mtrr = arch_phys_wc_add(tc->tc_mem.base,
+				    tc->tc_mem.size);
+	if (tc->mtrr < 0) {
+		err = -ENODEV;
+		goto err_out;
+	}
+
+#elif defined(CONFIG_MTRR)
+	/* Enable mtrr region caching */
+	tc->mtrr = mtrr_setup(tc->pdev,
+			      tc->tc_mem.base,
+			      tc->tc_mem.size);
+	if (tc->mtrr == -2) {
+		err = -ENODEV;
+		goto err_out;
+	}
+#endif
+	return err;
+
+err_out:
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+	arch_io_free_memtype_wc(tc->tc_mem.base,
+				tc->tc_mem.size);
+#endif
+	return err;
+}
+
+void tc_mtrr_cleanup(struct tc_device *tc)
+{
+	if (tc->mtrr >= 0) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+		arch_phys_wc_del(tc->mtrr);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+		arch_io_free_memtype_wc(tc->tc_mem.base,
+					tc->tc_mem.size);
+#endif
+#elif defined(CONFIG_MTRR)
+		int err;
+
+		err = mtrr_del(tc->mtrr,
+			       tc->tc_mem.base,
+			       tc->tc_mem.size);
+		if (err < 0)
+			dev_err(&tc->pdev->dev,
+				"mtrr_del failed (%d)\n", err);
+#endif
+	}
+}
+
+int tc_is_interface_aligned(u32 eyes, u32 clk_taps, u32 train_ack)
+{
+	u32	max_eye_start = eyes >> 16;
+	u32	min_eye_end   = eyes & 0xffff;
+
+	/* If either the training or training ack failed, we haven't aligned */
+	if (!(clk_taps & 0x10000) || !(train_ack & 0x100))
+		return 0;
+
+	/* If the max eye >= min eye it means the readings are nonsense */
+	if (max_eye_start >= min_eye_end)
+		return 0;
+
+	/* If we failed the ack pattern more than 4 times */
+	if (((train_ack & 0xf0) >> 4) > 4)
+		return 0;
+
+	/* If there is less than 7 taps (240ps @40ps/tap, this number should be
+	 * lower for the fpga, since its taps are bigger We should really
+	 * calculate the "7" based on the interface clock speed.
+	 */
+	if ((min_eye_end - max_eye_start) < 7)
+		return 0;
+
+	return 1;
+}
+
+int tc_iopol32_nonzero(u32 mask, void __iomem *addr)
+{
+	int polnum;
+	u32 read_value;
+
+	for (polnum = 0; polnum < 50; polnum++) {
+		read_value = ioread32(addr) & mask;
+		if (read_value != 0)
+			break;
+		msleep(20);
+	}
+	if (polnum == 50) {
+		pr_err(DRV_NAME " iopol32_nonzero timeout\n");
+		return -ETIME;
+	}
+	return 0;
+}
+
+int request_pci_io_addr(struct pci_dev *pdev, u32 index,
+	resource_size_t offset, resource_size_t length)
+{
+	resource_size_t start, end;
+
+	start = pci_resource_start(pdev, index);
+	end = pci_resource_end(pdev, index);
+
+	if ((start + offset + length - 1) > end)
+		return -EIO;
+	if (pci_resource_flags(pdev, index) & IORESOURCE_IO) {
+		if (request_region(start + offset, length, DRV_NAME) == NULL)
+			return -EIO;
+	} else {
+		if (request_mem_region(start + offset, length, DRV_NAME)
+			== NULL)
+			return -EIO;
+	}
+	return 0;
+}
+
+void release_pci_io_addr(struct pci_dev *pdev, u32 index,
+	resource_size_t start, resource_size_t length)
+{
+	if (pci_resource_flags(pdev, index) & IORESOURCE_IO)
+		release_region(start, length);
+	else
+		release_mem_region(start, length);
+}
+
+int setup_io_region(struct pci_dev *pdev,
+	struct tc_io_region *region, u32 index,
+	resource_size_t offset,	resource_size_t size)
+{
+	int err;
+	resource_size_t pci_phys_addr;
+
+	err = request_pci_io_addr(pdev, index, offset, size);
+	if (err) {
+		dev_err(&pdev->dev,
+			"Failed to request tc registers (err=%d)\n", err);
+		return -EIO;
+	}
+	pci_phys_addr = pci_resource_start(pdev, index);
+	region->region.base = pci_phys_addr + offset;
+	region->region.size = size;
+
+	region->registers
+		= ioremap_nocache(region->region.base, region->region.size);
+
+	if (!region->registers) {
+		dev_err(&pdev->dev, "Failed to map tc registers\n");
+		release_pci_io_addr(pdev, index,
+			region->region.base, region->region.size);
+		return -EIO;
+	}
+	return 0;
+}
+
+#if defined(TC_FAKE_INTERRUPTS)
+void tc_irq_fake_wrapper(unsigned long data)
+{
+	struct tc_device *tc = (struct tc_device *)data;
+
+	if (tc->odin)
+		odin_irq_handler(0, tc);
+	else
+		apollo_irq_handler(0, tc);
+
+	mod_timer(&tc->timer,
+		jiffies + msecs_to_jiffies(FAKE_INTERRUPT_TIME_MS));
+}
+#endif
+
+static int tc_register_pdp_device(struct tc_device *tc)
+{
+	int err = 0;
+
+	if (tc->odin)
+		err = odin_register_pdp_device(tc);
+	else
+		err = apollo_register_pdp_device(tc);
+
+	return err;
+}
+
+static int tc_register_ext_device(struct tc_device *tc)
+{
+	int err = 0;
+
+	if (tc->odin)
+		err = odin_register_ext_device(tc);
+	else
+		err = apollo_register_ext_device(tc);
+
+	return err;
+}
+
+static void tc_devres_release(struct device *dev, void *res)
+{
+	/* No extra cleanup needed */
+}
+
+static int tc_cleanup(struct pci_dev *pdev)
+{
+	struct tc_device *tc = devres_find(&pdev->dev,
+					   tc_devres_release, NULL, NULL);
+	int i, err = 0;
+
+	if (!tc) {
+		dev_err(&pdev->dev, "No tc device resources found\n");
+		return -ENODEV;
+	}
+
+	debugfs_remove(tc->debugfs_rogue_name);
+
+	for (i = 0; i < TC_INTERRUPT_COUNT; i++)
+		if (tc->interrupt_handlers[i].enabled)
+			tc_disable_interrupt(&pdev->dev, i);
+
+	if (tc->odin)
+		err = odin_cleanup(tc);
+	else
+		err = apollo_cleanup(tc);
+
+	debugfs_remove(tc->debugfs_tc_dir);
+
+	return err;
+}
+
+static int tc_init(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct tc_device *tc;
+	int err = 0;
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+	int sec_mem_size = TC_SECURE_MEM_SIZE_BYTES;
+#else /* defined(SUPPORT_FAKE_SECURE_ION_HEAP) */
+	int sec_mem_size = 0;
+#endif /* defined(SUPPORT_FAKE_SECURE_ION_HEAP) */
+
+	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
+		return -ENOMEM;
+
+	tc = devres_alloc(tc_devres_release,
+		sizeof(*tc), GFP_KERNEL);
+	if (!tc) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	devres_add(&pdev->dev, tc);
+
+	err = tc_enable(&pdev->dev);
+	if (err) {
+		dev_err(&pdev->dev,
+			"tc_enable failed %d\n", err);
+		goto err_release;
+	}
+
+	tc->pdev = pdev;
+
+	spin_lock_init(&tc->interrupt_handler_lock);
+	spin_lock_init(&tc->interrupt_enable_lock);
+
+	tc->debugfs_tc_dir = debugfs_create_dir(DRV_NAME, NULL);
+
+	if (pdev->vendor == PCI_VENDOR_ID_ODIN &&
+	    pdev->device == DEVICE_ID_ODIN) {
+
+		dev_info(&pdev->dev, "Odin detected");
+		tc->odin = true;
+
+		err = odin_init(tc, pdev,
+				tc_core_clock, tc_mem_clock,
+				tc_pdp_mem_size, sec_mem_size);
+		if (err)
+			goto err_dev_cleanup;
+
+	} else {
+		dev_info(&pdev->dev, "Apollo detected");
+		tc->odin = false;
+
+		err = apollo_init(tc, pdev,
+				  tc_core_clock, tc_mem_clock, tc_sys_clock,
+				  tc_pdp_mem_size, sec_mem_size);
+		if (err)
+			goto err_dev_cleanup;
+	}
+
+	/* Add the rogue name debugfs entry */
+	tc->debugfs_rogue_name =
+		debugfs_create_blob("rogue-name", S_IRUGO,
+			tc->debugfs_tc_dir,
+			&tc_debugfs_rogue_name_blobs[tc->version]);
+
+#if defined(TC_FAKE_INTERRUPTS)
+	dev_warn(&pdev->dev, "WARNING: Faking interrupts every %d ms",
+		FAKE_INTERRUPT_TIME_MS);
+#endif
+
+	/* Register pdp and ext platform devices */
+	err = tc_register_pdp_device(tc);
+	if (err)
+		goto err_dev_cleanup;
+
+	err = tc_register_ext_device(tc);
+	if (err)
+		goto err_dev_cleanup;
+
+	devres_remove_group(&pdev->dev, NULL);
+
+err_out:
+	if (err)
+		dev_err(&pdev->dev, "tc_init failed\n");
+
+	return err;
+
+err_dev_cleanup:
+	tc_cleanup(pdev);
+	tc_disable(&pdev->dev);
+err_release:
+	devres_release_group(&pdev->dev, NULL);
+	goto err_out;
+}
+
+static void tc_exit(struct pci_dev *pdev)
+{
+	struct tc_device *tc = devres_find(&pdev->dev,
+					   tc_devres_release, NULL, NULL);
+
+	if (!tc) {
+		dev_err(&pdev->dev, "No tc device resources found\n");
+		return;
+	}
+
+	if (tc->pdp_dev)
+		platform_device_unregister(tc->pdp_dev);
+
+	if (tc->ext_dev)
+		platform_device_unregister(tc->ext_dev);
+
+	tc_cleanup(pdev);
+
+	tc_disable(&pdev->dev);
+}
+
+struct pci_device_id tc_pci_tbl[] = {
+	{ PCI_VDEVICE(POWERVR, DEVICE_ID_PCI_APOLLO_FPGA) },
+	{ PCI_VDEVICE(POWERVR, DEVICE_ID_PCIE_APOLLO_FPGA) },
+	{ PCI_VDEVICE(ODIN, DEVICE_ID_ODIN) },
+	{ },
+};
+
+static struct pci_driver tc_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= tc_pci_tbl,
+	.probe		= tc_init,
+	.remove		= tc_exit,
+};
+
+module_pci_driver(tc_pci_driver);
+
+MODULE_DEVICE_TABLE(pci, tc_pci_tbl);
+
+int tc_enable(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	return pci_enable_device(pdev);
+}
+EXPORT_SYMBOL(tc_enable);
+
+void tc_disable(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	pci_disable_device(pdev);
+}
+EXPORT_SYMBOL(tc_disable);
+
+int tc_set_interrupt_handler(struct device *dev, int interrupt_id,
+	void (*handler_function)(void *), void *data)
+{
+	struct tc_device *tc = devres_find(dev, tc_devres_release,
+		NULL, NULL);
+	int err = 0;
+	unsigned long flags;
+
+	if (!tc) {
+		dev_err(dev, "No tc device resources found\n");
+		err = -ENODEV;
+		goto err_out;
+	}
+
+	if (interrupt_id < 0 || interrupt_id >= TC_INTERRUPT_COUNT) {
+		dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id);
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	spin_lock_irqsave(&tc->interrupt_handler_lock, flags);
+
+	tc->interrupt_handlers[interrupt_id].handler_function =
+		handler_function;
+	tc->interrupt_handlers[interrupt_id].handler_data = data;
+
+	spin_unlock_irqrestore(&tc->interrupt_handler_lock, flags);
+
+err_out:
+	return err;
+}
+EXPORT_SYMBOL(tc_set_interrupt_handler);
+
+int tc_enable_interrupt(struct device *dev, int interrupt_id)
+{
+	struct tc_device *tc = devres_find(dev, tc_devres_release,
+		NULL, NULL);
+	int err = 0;
+	unsigned long flags;
+
+	if (!tc) {
+		dev_err(dev, "No tc device resources found\n");
+		err = -ENODEV;
+		goto err_out;
+	}
+	if (interrupt_id < 0 || interrupt_id >= TC_INTERRUPT_COUNT) {
+		dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id);
+		err = -EINVAL;
+		goto err_out;
+	}
+	spin_lock_irqsave(&tc->interrupt_enable_lock, flags);
+
+	if (tc->interrupt_handlers[interrupt_id].enabled) {
+		dev_warn(dev, "Interrupt ID %d already enabled\n",
+			interrupt_id);
+		err = -EEXIST;
+		goto err_unlock;
+	}
+	tc->interrupt_handlers[interrupt_id].enabled = true;
+
+	if (tc->odin)
+		odin_enable_interrupt_register(tc, interrupt_id);
+	else
+		apollo_enable_interrupt_register(tc, interrupt_id);
+
+err_unlock:
+	spin_unlock_irqrestore(&tc->interrupt_enable_lock, flags);
+err_out:
+	return err;
+}
+EXPORT_SYMBOL(tc_enable_interrupt);
+
+int tc_disable_interrupt(struct device *dev, int interrupt_id)
+{
+	struct tc_device *tc = devres_find(dev, tc_devres_release,
+		NULL, NULL);
+	int err = 0;
+	unsigned long flags;
+
+	if (!tc) {
+		dev_err(dev, "No tc device resources found\n");
+		err = -ENODEV;
+		goto err_out;
+	}
+	if (interrupt_id < 0 || interrupt_id >= TC_INTERRUPT_COUNT) {
+		dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id);
+		err = -EINVAL;
+		goto err_out;
+	}
+	spin_lock_irqsave(&tc->interrupt_enable_lock, flags);
+
+	if (!tc->interrupt_handlers[interrupt_id].enabled) {
+		dev_warn(dev, "Interrupt ID %d already disabled\n",
+			interrupt_id);
+	}
+	tc->interrupt_handlers[interrupt_id].enabled = false;
+
+	if (tc->odin)
+		odin_disable_interrupt_register(tc, interrupt_id);
+	else
+		apollo_disable_interrupt_register(tc, interrupt_id);
+
+	spin_unlock_irqrestore(&tc->interrupt_enable_lock, flags);
+err_out:
+	return err;
+}
+EXPORT_SYMBOL(tc_disable_interrupt);
+
+int tc_sys_info(struct device *dev, u32 *tmp, u32 *pll)
+{
+	int err = -ENODEV;
+	struct tc_device *tc = devres_find(dev, tc_devres_release,
+		NULL, NULL);
+
+	if (!tc) {
+		dev_err(dev, "No tc device resources found\n");
+		goto err_out;
+	}
+
+	if (tc->odin)
+		err = odin_sys_info(tc, tmp, pll);
+	else
+		err = apollo_sys_info(tc, tmp, pll);
+
+err_out:
+	return err;
+}
+EXPORT_SYMBOL(tc_sys_info);
+
+int tc_sys_strings(struct device *dev,
+		   char *str_fpga_rev, size_t size_fpga_rev,
+		   char *str_tcf_core_rev, size_t size_tcf_core_rev,
+		   char *str_tcf_core_target_build_id,
+		   size_t size_tcf_core_target_build_id,
+		   char *str_pci_ver, size_t size_pci_ver,
+		   char *str_macro_ver, size_t size_macro_ver)
+{
+	int err = -ENODEV;
+
+	struct tc_device *tc = devres_find(dev, tc_devres_release,
+		NULL, NULL);
+
+	if (!tc) {
+		dev_err(dev, "No tc device resources found\n");
+		goto err_out;
+	}
+
+	if (!str_fpga_rev ||
+	    !size_fpga_rev ||
+	    !str_tcf_core_rev ||
+	    !size_tcf_core_rev ||
+	    !str_tcf_core_target_build_id ||
+	    !size_tcf_core_target_build_id ||
+	    !str_pci_ver ||
+	    !size_pci_ver ||
+	    !str_macro_ver ||
+	    !size_macro_ver) {
+
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	if (tc->odin) {
+		err = odin_sys_strings(tc,
+				 str_fpga_rev, size_fpga_rev,
+				 str_tcf_core_rev, size_tcf_core_rev,
+				 str_tcf_core_target_build_id,
+				 size_tcf_core_target_build_id,
+				 str_pci_ver, size_pci_ver,
+				 str_macro_ver, size_macro_ver);
+	} else {
+		err = apollo_sys_strings(tc,
+				 str_fpga_rev, size_fpga_rev,
+				 str_tcf_core_rev, size_tcf_core_rev,
+				 str_tcf_core_target_build_id,
+				 size_tcf_core_target_build_id,
+				 str_pci_ver, size_pci_ver,
+				 str_macro_ver, size_macro_ver);
+	}
+
+err_out:
+	return err;
+}
+EXPORT_SYMBOL(tc_sys_strings);
+
+int tc_core_clock_speed(struct device *dev)
+{
+	return tc_core_clock;
+}
+EXPORT_SYMBOL(tc_core_clock_speed);
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_drv.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_drv.h
new file mode 100644
index 0000000..e843287
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_drv.h
@@ -0,0 +1,144 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _TC_DRV_H
+#define _TC_DRV_H
+
+/*
+ * This contains the hooks for the testchip driver, as used by the Rogue and
+ * PDP sub-devices, and the platform data passed to each of their drivers
+ */
+
+#include <linux/pci.h>
+#include <linux/device.h>
+
+#if defined(SUPPORT_ION)
+
+#include PVR_ANDROID_ION_HEADER
+
+/* NOTE: This should be kept in sync with the user side (in buffer_generic.c) */
+#if defined(SUPPORT_RGX)
+#define ION_HEAP_TC_ROGUE    (ION_HEAP_TYPE_CUSTOM+1)
+#endif
+#define ION_HEAP_TC_PDP      (ION_HEAP_TYPE_CUSTOM+2)
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+#define ION_HEAP_TC_SECURE   (ION_HEAP_TYPE_CUSTOM+3)
+#endif
+
+#endif /* defined(SUPPORT_ION) */
+
+#define TC_INTERRUPT_PDP     0
+#define TC_INTERRUPT_EXT     1
+#define TC_INTERRUPT_TC5_PDP 2
+#define TC_INTERRUPT_COUNT   3
+
+int tc_enable(struct device *dev);
+void tc_disable(struct device *dev);
+
+int tc_enable_interrupt(struct device *dev, int interrupt_id);
+int tc_disable_interrupt(struct device *dev, int interrupt_id);
+
+int tc_set_interrupt_handler(struct device *dev, int interrupt_id,
+	void (*handler_function)(void *), void *handler_data);
+
+int tc_sys_info(struct device *dev, u32 *tmp, u32 *pll);
+int tc_sys_strings(struct device *dev,
+	char *str_fpga_rev, size_t size_fpga_rev, char *str_tcf_core_rev,
+	size_t size_tcf_core_rev, char *str_tcf_core_target_build_id,
+	size_t size_tcf_core_target_build_id, char *str_pci_ver,
+	size_t size_pci_ver, char *str_macro_ver, size_t size_macro_ver);
+int tc_core_clock_speed(struct device *dev);
+
+#define APOLLO_DEVICE_NAME_PDP   "apollo_pdp"
+#define ODN_DEVICE_NAME_PDP      "odin_pdp"
+
+/* The following structs are initialised and passed down by the parent tc
+ * driver to the respective sub-drivers
+ */
+
+struct tc_pdp_platform_data {
+#if defined(SUPPORT_ION)
+	struct ion_device *ion_device;
+	int ion_heap_id;
+#endif
+	resource_size_t memory_base;
+
+	/* The following is used by the drm_pdp driver as it manages the
+	 * pdp memory
+	 */
+	resource_size_t pdp_heap_memory_base;
+	resource_size_t pdp_heap_memory_size;
+};
+
+#if defined(SUPPORT_RGX)
+
+#define TC_DEVICE_NAME_ROGUE "tc_rogue"
+
+struct tc_rogue_platform_data {
+#if defined(SUPPORT_ION)
+	struct ion_device *ion_device;
+	int ion_heap_id;
+#endif
+
+	/* The base address of the testchip memory (CPU physical address) -
+	 * used to convert from CPU-Physical to device-physical addresses
+	 */
+	resource_size_t tc_memory_base;
+
+	/* The following is used to setup the services heaps that map to the
+	 * ion heaps
+	 */
+	resource_size_t pdp_heap_memory_base;
+	resource_size_t pdp_heap_memory_size;
+	resource_size_t rogue_heap_memory_base;
+	resource_size_t rogue_heap_memory_size;
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+	resource_size_t secure_heap_memory_base;
+	resource_size_t secure_heap_memory_size;
+#endif
+};
+
+#endif /* defined(SUPPORT_RGX) */
+
+#endif /* _TC_DRV_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_drv_internal.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_drv_internal.h
new file mode 100644
index 0000000..c0c5327
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_drv_internal.h
@@ -0,0 +1,175 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _TC_DRV_INTERNAL_H
+#define _TC_DRV_INTERNAL_H
+
+#include "tc_drv.h"
+
+#include <linux/version.h>
+
+#if defined(TC_FAKE_INTERRUPTS)
+#define FAKE_INTERRUPT_TIME_MS 16
+#include <linux/timer.h>
+#include <linux/time.h>
+#endif
+
+#define DRV_NAME "tc"
+
+/* This is a guess of what's a minimum sensible size for the ext heap
+ * It is only used for a warning if the ext heap is smaller, and does
+ * not affect the functional logic in any way
+ */
+#define TC_EXT_MINIMUM_MEM_SIZE (10*1024*1024)
+
+#if defined(SUPPORT_ION)
+ #if defined(SUPPORT_RGX)
+  #if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+   #define TC_ION_HEAP_COUNT 4
+  #else
+   #define TC_ION_HEAP_COUNT 3
+  #endif
+ #else
+  #define TC_ION_HEAP_COUNT 2
+ #endif
+#endif
+
+/* Convert a byte offset to a 32 bit dword offset */
+#define DWORD_OFFSET(byte_offset)  ((byte_offset)>>2)
+
+#define HEX2DEC(v)                 ((((v) >> 4) * 10) + ((v) & 0x0F))
+
+enum tc_version_t {
+	APOLLO_VERSION_TCF_2 = 0,
+	APOLLO_VERSION_TCF_5,
+	APOLLO_VERSION_TCF_BONNIE,
+	ODIN_VERSION_TCF_BONNIE,
+	ODIN_VERSION_FPGA
+};
+
+struct tc_interrupt_handler {
+	bool enabled;
+	void (*handler_function)(void *);
+	void *handler_data;
+};
+
+struct tc_region {
+	resource_size_t base;
+	resource_size_t size;
+};
+
+struct tc_io_region {
+	struct tc_region region;
+	void __iomem *registers;
+};
+
+struct tc_device {
+	struct pci_dev *pdev;
+
+	enum tc_version_t version;
+	bool odin;
+
+	struct tc_io_region tcf;
+	struct tc_io_region tcf_pll;
+
+	struct tc_region tc_mem;
+
+	struct platform_device *pdp_dev;
+
+	resource_size_t pdp_heap_mem_base;
+	resource_size_t pdp_heap_mem_size;
+
+	struct platform_device *ext_dev;
+
+	resource_size_t ext_heap_mem_base;
+	resource_size_t ext_heap_mem_size;
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+	resource_size_t secure_heap_mem_base;
+	resource_size_t secure_heap_mem_size;
+#endif
+
+#if defined(CONFIG_MTRR) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+	int mtrr;
+#endif
+	spinlock_t interrupt_handler_lock;
+	spinlock_t interrupt_enable_lock;
+
+	struct tc_interrupt_handler
+		interrupt_handlers[TC_INTERRUPT_COUNT];
+
+#if defined(TC_FAKE_INTERRUPTS)
+	struct timer_list timer;
+#endif
+
+#if defined(SUPPORT_ION)
+	struct ion_device *ion_device;
+	struct ion_heap *ion_heaps[TC_ION_HEAP_COUNT];
+	int ion_heap_count;
+#endif
+
+	struct dentry *debugfs_tc_dir;
+	struct dentry *debugfs_rogue_name;
+};
+
+int tc_mtrr_setup(struct tc_device *tc);
+void tc_mtrr_cleanup(struct tc_device *tc);
+
+int tc_is_interface_aligned(u32 eyes, u32 clk_taps, u32 train_ack);
+
+int tc_iopol32_nonzero(u32 mask, void __iomem *addr);
+
+int request_pci_io_addr(struct pci_dev *pdev, u32 index,
+	resource_size_t offset, resource_size_t length);
+void release_pci_io_addr(struct pci_dev *pdev, u32 index,
+	resource_size_t start, resource_size_t length);
+
+int setup_io_region(struct pci_dev *pdev,
+	struct tc_io_region *region, u32 index,
+	resource_size_t offset,	resource_size_t size);
+
+#if defined(TC_FAKE_INTERRUPTS)
+void tc_irq_fake_wrapper(unsigned long data);
+#endif /* defined(TC_FAKE_INTERRUPTS) */
+
+#endif /* _TC_DRV_INTERNAL_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_ion.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_ion.h
new file mode 100644
index 0000000..d24b51a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_ion.h
@@ -0,0 +1,54 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _TC_ION_H
+#define _TC_ION_H
+
+struct ion_client;
+struct tc_device;
+
+int tc_ion_init(struct tc_device *tc, int mem_bar);
+
+void tc_ion_deinit(struct tc_device *tc, int mem_bar);
+
+#endif /* _TC_ION_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_odin.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_odin.c
new file mode 100644
index 0000000..75319ea
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_odin.c
@@ -0,0 +1,1237 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*
+ * This is a device driver for the odin testchip framework. It creates
+ * platform devices for the pdp and ext sub-devices, and exports functions
+ * to manage the shared interrupt handling
+ */
+
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+
+#include "tc_drv_internal.h"
+#include "tc_odin.h"
+#include "tc_ion.h"
+
+/* Odin (3rd gen TCF FPGA) */
+#include "odin_defs.h"
+#include "odin_regs.h"
+#include "bonnie_tcf.h"
+
+/* Macro's to set and get register fields */
+#define REG_FIELD_GET(v, str) \
+	(u32)(((v) & (s##_MASK)) >> (s##_SHIFT))
+#define REG_FIELD_SET(v, f, str) \
+	v = (u32)(((v) & (u32)~(str##_MASK)) | \
+		  (u32)(((f) << (str##_SHIFT)) & (str##_MASK)))
+
+#define SAI_STATUS_UNALIGNED 0
+#define SAI_STATUS_ALIGNED   1
+#define SAI_STATUS_ERROR     2
+
+#if defined(SUPPORT_RGX)
+
+static int spi_read(struct tc_device *tc, u32 off, u32 *val)
+{
+	int cnt = 0;
+	u32 spi_mst_status;
+
+	iowrite32(0x40000 | off, tc->tcf.registers
+		  + ODN_REG_BANK_TCF_SPI_MASTER
+		  + ODN_SPI_MST_ADDR_RDNWR);
+	iowrite32(0x1, tc->tcf.registers
+		  + ODN_REG_BANK_TCF_SPI_MASTER
+		  + ODN_SPI_MST_GO);
+	udelay(100);
+
+	do {
+		spi_mst_status = ioread32(tc->tcf.registers
+					  + ODN_REG_BANK_TCF_SPI_MASTER
+					  + ODN_SPI_MST_STATUS);
+
+		if (cnt++ > 10000) {
+			dev_err(&tc->pdev->dev,
+				"spi_read: Time out reading SPI reg (0x%x)\n",
+				off);
+			return -1;
+		}
+
+	} while (spi_mst_status != 0x08);
+
+	*val = ioread32(tc->tcf.registers
+			+ ODN_REG_BANK_TCF_SPI_MASTER
+			+ ODN_SPI_MST_RDATA);
+
+	return 0;
+}
+
+/* returns 1 for aligned, 0 for unaligned */
+static int get_odin_sai_status(struct tc_device *tc, int bank)
+{
+	void __iomem *bank_addr = tc->tcf.registers
+					+ ODN_REG_BANK_SAI_RX_DDR(bank);
+	void __iomem *reg_addr;
+	u32 eyes;
+	u32 clk_taps;
+	u32 train_ack;
+
+	reg_addr = bank_addr + ODN_SAI_RX_DEBUG_SAI_EYES;
+	eyes = ioread32(reg_addr);
+
+	reg_addr = bank_addr + ODN_SAI_RX_DEBUG_SAI_CLK_TAPS;
+	clk_taps = ioread32(reg_addr);
+
+	reg_addr = bank_addr + ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK;
+	train_ack = ioread32(reg_addr);
+
+#if 0 /* enable this to get debug info if the board is not aligning */
+	dev_info(&tc->pdev->dev,
+		"odin bank %d align: eyes=%08x clk_taps=%08x train_ack=%08x\n",
+		bank, eyes, clk_taps, train_ack);
+#endif
+
+	if (tc_is_interface_aligned(eyes, clk_taps, train_ack))
+		return SAI_STATUS_ALIGNED;
+
+	dev_warn(&tc->pdev->dev, "odin bank %d is unaligned\n", bank);
+	return SAI_STATUS_UNALIGNED;
+}
+
+/* Read the odin multi clocked bank align status.
+ * Returns 1 for aligned, 0 for unaligned
+ */
+static int read_odin_mca_status(struct tc_device *tc)
+{
+	void __iomem *bank_addr = tc->tcf.registers
+					+ ODN_REG_BANK_MULTI_CLK_ALIGN;
+	void __iomem *reg_addr = bank_addr + ODN_MCA_DEBUG_MCA_STATUS;
+	u32 mca_status;
+
+	mca_status = ioread32(reg_addr);
+
+#if 0 /* Enable this if there are alignment issues */
+	dev_info(&tc->pdev->dev,
+		"Odin MCA_STATUS = %08x\n", mca_status);
+#endif
+	return mca_status & ODN_ALIGNMENT_FOUND_MASK;
+}
+
+/* Read the DUT multi clocked bank align status.
+ * Returns 1 for aligned, 0 for unaligned
+ */
+static int read_dut_mca_status(struct tc_device *tc)
+{
+	int mca_status;
+	const int mca_status_register_offset = 1; /* not in bonnie_tcf.h */
+	int spi_address = DWORD_OFFSET(BONNIE_TCF_OFFSET_MULTI_CLK_ALIGN);
+
+	spi_address = DWORD_OFFSET(BONNIE_TCF_OFFSET_MULTI_CLK_ALIGN)
+			+ mca_status_register_offset;
+
+	spi_read(tc, spi_address, &mca_status);
+
+#if 0 /* Enable this if there are alignment issues */
+	dev_info(&tc->pdev->dev,
+		"DUT MCA_STATUS = %08x\n", mca_status);
+#endif
+	return mca_status & 1;  /* 'alignment found' status is in bit 1 */
+}
+
+/* returns 1 for aligned, 0 for unaligned */
+static int get_dut_sai_status(struct tc_device *tc, int bank)
+{
+	u32 eyes;
+	u32 clk_taps;
+	u32 train_ack;
+	const u32 bank_base = DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_RX_1
+				+ (BONNIE_TCF_OFFSET_SAI_RX_DELTA * bank));
+	int spi_timeout;
+
+	spi_timeout = spi_read(tc, bank_base
+		+ DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_EYES), &eyes);
+	if (spi_timeout)
+		return SAI_STATUS_ERROR;
+
+	spi_read(tc, bank_base
+		+ DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_CLK_TAPS), &clk_taps);
+	spi_read(tc, bank_base
+		+ DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_TRAIN_ACK), &train_ack);
+
+#if 0 /* enable this to get debug info if the board is not aligning */
+	dev_info(&tc->pdev->dev,
+		"dut  bank %d align: eyes=%08x clk_taps=%08x train_ack=%08x\n",
+		bank, eyes, clk_taps, train_ack);
+#endif
+
+	if (tc_is_interface_aligned(eyes, clk_taps, train_ack))
+		return SAI_STATUS_ALIGNED;
+
+	dev_warn(&tc->pdev->dev, "dut bank %d is unaligned\n", bank);
+	return SAI_STATUS_UNALIGNED;
+}
+
+/*
+ * Returns the divider group register fields for the specified counter value.
+ * See Xilinx Application Note xapp888.
+ */
+static void odin_mmcm_reg_param_calc(u32 value, u32 *low, u32 *high,
+				     u32 *edge, u32 *no_count)
+{
+	if (value == 1U) {
+		*no_count = 1U;
+		*edge = 0;
+		*high = 0;
+		*low = 0;
+	} else {
+		*no_count = 0;
+		*edge = value % 2U;
+		*high = value >> 1;
+		*low = (value + *edge) >> 1U;
+	}
+}
+
+/*
+ * Returns the MMCM Input Divider, FB Multiplier and Output Divider values for
+ * the specified input frequency and target output frequency.
+ * Function doesn't support fractional values for multiplier and output divider
+ * As per Xilinx 7 series FPGAs clocking resources user guide, aims for highest
+ * VCO and smallest D and M.
+ * Configured for Xilinx Virtex7 speed grade 2.
+ */
+static int odin_mmcm_counter_calc(struct device *dev,
+				  u32 freq_input, u32 freq_output,
+				  u32 *d, u32 *m, u32 *o)
+{
+	u32 d_min, d_max;
+	u32 m_min, m_max, m_ideal;
+	u32 vco_max_freq;
+	u32 i, j, o_tmp;
+
+	/*
+	 * Check specified input frequency is within range
+	 */
+	if (freq_input < ODN_INPUT_CLOCK_SPEED_MIN) {
+		dev_err(dev, "Input frequency (%u hz) below minimum supported value (%u hz)\n",
+			freq_input, ODN_INPUT_CLOCK_SPEED_MIN);
+		return -EINVAL;
+	}
+	if (freq_input > ODN_INPUT_CLOCK_SPEED_MAX) {
+		dev_err(dev, "Input frequency (%u hz) above maximum supported value (%u hz)\n",
+			freq_input, ODN_INPUT_CLOCK_SPEED_MAX);
+		return -EINVAL;
+	}
+
+	/*
+	 * Check specified target frequency is within range
+	 */
+	if (freq_output < ODN_OUTPUT_CLOCK_SPEED_MIN) {
+		dev_err(dev, "Output frequency (%u hz) below minimum supported value (%u hz)\n",
+			freq_input, ODN_OUTPUT_CLOCK_SPEED_MIN);
+		return -EINVAL;
+	}
+	if (freq_output > ODN_OUTPUT_CLOCK_SPEED_MAX) {
+		dev_err(dev, "Output frequency (%u hz) above maximum supported value (%u hz)\n",
+			freq_output, ODN_OUTPUT_CLOCK_SPEED_MAX);
+		return -EINVAL;
+	}
+
+	/*
+	 * Determine upper limit of VCO from spec and register
+	 */
+	vco_max_freq = min(ODN_VCO_MAX,
+			   (freq_output * (u32)ODN_OREG_VALUE_MAX));
+
+	/*
+	 * Calculate min and max for Input Divider.
+	 * Refer Xilinx 7 series FPGAs clocking resources user guide
+	 * equation 3-6 and 3-7
+	 */
+	d_min = DIV_ROUND_UP(freq_input, ODN_PFD_MAX);
+	d_max = min(freq_input/ODN_PFD_MIN, (u32)ODN_DREG_VALUE_MAX);
+
+	/*
+	 * Calculate min and max for Input Divider.
+	 * Refer Xilinx 7 series FPGAs clocking resources user guide.
+	 * equation 3-8 and 3-9
+	 */
+	m_min = DIV_ROUND_UP((ODN_VCO_MIN * d_min), freq_input);
+	m_max = min(((ODN_VCO_MAX * d_max) / freq_input),
+		    (u32)ODN_MREG_VALUE_MAX);
+
+	for (i = d_min; i <= d_max; i++) {
+		/*
+		 * Refer Xilinx 7 series FPGAs clocking resources user guide.
+		 * equation 3-10
+		 */
+		m_ideal = min(((i * ODN_VCO_MAX)/freq_input), m_max);
+
+		for (j = m_ideal; j >= m_min; j -= 1) {
+			/**
+			 * Skip if VCO for given 'm' and 'd' value is not an
+			 * integer since fractional component is not supported
+			 */
+			if (((freq_input * j) % i) != 0)
+				continue;
+
+			/**
+			 * Skip if divider for given 'm' and 'd' value is not
+			 * an integer since fractional component is not
+			 * supported
+			 */
+			if ((freq_input * j) % (i * freq_output) != 0)
+				continue;
+
+			/**
+			 * Calculate output divider value.
+			 */
+			o_tmp = (freq_input * j)/(i * freq_output);
+
+			*d = i;
+			*m = j;
+			*o = o_tmp;
+			return 0;
+		}
+	}
+
+	dev_err(dev, "Unable to find integer values for d, m and o for requested frequency (%u)\n",
+		freq_output);
+
+	return -ERANGE;
+}
+
+static int odin_fpga_set_dut_core_clk(struct tc_device *tc,
+				      u32 input_clk, u32 output_clk)
+{
+	int err = 0;
+	u32 in_div, mul, out_div;
+	u32 high_time, low_time, edge, no_count;
+	u32 value;
+	void __iomem *base = tc->tcf.registers;
+	void __iomem *clk_blk_base = base + ODN_REG_BANK_ODN_CLK_BLK;
+	struct device *dev = &tc->pdev->dev;
+
+	err = odin_mmcm_counter_calc(dev, input_clk, output_clk, &in_div,
+				     &mul, &out_div);
+	if (err != 0)
+		return err;
+
+	/* Put DUT into reset */
+	iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK,
+		  base + ODN_CORE_EXTERNAL_RESETN);
+	msleep(20);
+
+	/* Put DUT Core MMCM into reset */
+	iowrite32(ODN_CLK_GEN_RESET_DUT_CORE_MMCM_MASK,
+		  base + ODN_CORE_CLK_GEN_RESET);
+	msleep(20);
+
+	/* Calculate the register fields for output divider */
+	odin_mmcm_reg_param_calc(out_div, &high_time, &low_time,
+				 &edge, &no_count);
+
+	/* Read-modify-write the required fields to output divider register 1 */
+	value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER1);
+	REG_FIELD_SET(value, high_time,
+			ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME);
+	REG_FIELD_SET(value, low_time,
+			ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME);
+	iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER1);
+
+	/* Read-modify-write the required fields to output divider register 2 */
+	value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER2);
+	REG_FIELD_SET(value, edge,
+			ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE);
+	REG_FIELD_SET(value, no_count,
+			ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT);
+	iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER2);
+
+	/* Calculate the register fields for multiplier */
+	odin_mmcm_reg_param_calc(mul, &high_time, &low_time,
+				 &edge, &no_count);
+
+	/* Read-modify-write the required fields to multiplier register 1*/
+	value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER1);
+	REG_FIELD_SET(value, high_time,
+			ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME);
+	REG_FIELD_SET(value, low_time,
+			ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME);
+	iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER1);
+
+	/* Read-modify-write the required fields to multiplier register 2 */
+	value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER2);
+	REG_FIELD_SET(value, edge,
+			ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE);
+	REG_FIELD_SET(value, no_count,
+			ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT);
+	iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER2);
+
+	/* Calculate the register fields for input divider */
+	odin_mmcm_reg_param_calc(in_div, &high_time, &low_time,
+				 &edge, &no_count);
+
+	/* Read-modify-write the required fields to input divider register 1 */
+	value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_IN_DIVIDER1);
+	REG_FIELD_SET(value, high_time,
+			 ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME);
+	REG_FIELD_SET(value, low_time,
+			 ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME);
+	REG_FIELD_SET(value, edge,
+			 ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE);
+	REG_FIELD_SET(value, no_count,
+			 ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT);
+	iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_IN_DIVIDER1);
+
+	/* Bring DUT clock MMCM out of reset */
+	iowrite32(0, tc->tcf.registers + ODN_CORE_CLK_GEN_RESET);
+
+	err = tc_iopol32_nonzero(ODN_MMCM_LOCK_STATUS_DUT_CORE,
+				 base + ODN_CORE_MMCM_LOCK_STATUS);
+	if (err != 0) {
+		dev_err(dev, "MMCM failed to lock for DUT core\n");
+		return err;
+	}
+
+	/* Bring DUT out of reset */
+	iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK |
+		  ODN_EXTERNAL_RESETN_DUT_MASK,
+		  tc->tcf.registers + ODN_CORE_EXTERNAL_RESETN);
+	msleep(20);
+
+	dev_info(dev, "DUT core clock set-up successful\n");
+
+	return err;
+}
+
+static int odin_fpga_set_dut_if_clk(struct tc_device *tc,
+				    u32 input_clk, u32 output_clk)
+{
+	int err = 0;
+	u32 in_div, mul, out_div;
+	u32 high_time, low_time, edge, no_count;
+	u32 value;
+	void __iomem *base = tc->tcf.registers;
+	void __iomem *clk_blk_base = base + ODN_REG_BANK_ODN_CLK_BLK;
+	struct device *dev = &tc->pdev->dev;
+
+	err = odin_mmcm_counter_calc(dev, input_clk, output_clk,
+				     &in_div, &mul, &out_div);
+	if (err != 0)
+		return err;
+
+	/* Put DUT into reset */
+	iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK,
+		  base + ODN_CORE_EXTERNAL_RESETN);
+	msleep(20);
+
+	/* Put DUT Core MMCM into reset */
+	iowrite32(ODN_CLK_GEN_RESET_DUT_IF_MMCM_MASK,
+		  base + ODN_CORE_CLK_GEN_RESET);
+	msleep(20);
+
+	/* Calculate the register fields for output divider */
+	odin_mmcm_reg_param_calc(out_div, &high_time, &low_time,
+				 &edge, &no_count);
+
+	/* Read-modify-write the required fields to output divider register 1 */
+	value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER1);
+	REG_FIELD_SET(value, high_time,
+			ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME);
+	REG_FIELD_SET(value, low_time,
+			ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME);
+	iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER1);
+
+	/* Read-modify-write the required fields to output divider register 2 */
+	value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER2);
+	REG_FIELD_SET(value, edge,
+			ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE);
+	REG_FIELD_SET(value, no_count,
+			ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT);
+	iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER2);
+
+	/* Calculate the register fields for multiplier */
+	odin_mmcm_reg_param_calc(mul, &high_time, &low_time, &edge, &no_count);
+
+	/* Read-modify-write the required fields to multiplier register 1*/
+	value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER1);
+	REG_FIELD_SET(value, high_time,
+			ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME);
+	REG_FIELD_SET(value, low_time,
+			ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME);
+	iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER1);
+
+	/* Read-modify-write the required fields to multiplier register 2 */
+	value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER2);
+	REG_FIELD_SET(value, edge,
+			ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE);
+	REG_FIELD_SET(value, no_count,
+			ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT);
+	iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER2);
+
+	/* Calculate the register fields for input divider */
+	odin_mmcm_reg_param_calc(in_div, &high_time, &low_time,
+				 &edge, &no_count);
+
+	/* Read-modify-write the required fields to input divider register 1 */
+	value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_IN_DIVIDER1);
+	REG_FIELD_SET(value, high_time,
+			 ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME);
+	REG_FIELD_SET(value, low_time,
+			 ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME);
+	REG_FIELD_SET(value, edge,
+			 ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE);
+	REG_FIELD_SET(value, no_count,
+			 ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT);
+	iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_IN_DIVIDER1);
+
+	/* Bring DUT interface clock MMCM out of reset */
+	iowrite32(0, tc->tcf.registers + ODN_CORE_CLK_GEN_RESET);
+
+	err = tc_iopol32_nonzero(ODN_MMCM_LOCK_STATUS_DUT_IF,
+				 base + ODN_CORE_MMCM_LOCK_STATUS);
+	if (err != 0) {
+		dev_err(dev, "MMCM failed to lock for DUT IF\n");
+		return err;
+	}
+
+	/* Bring DUT out of reset */
+	iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK |
+		  ODN_EXTERNAL_RESETN_DUT_MASK,
+		  tc->tcf.registers + ODN_CORE_EXTERNAL_RESETN);
+	msleep(20);
+
+	dev_info(dev, "DUT IF clock set-up successful\n");
+
+	return err;
+}
+
+static int odin_hard_reset_fpga(struct tc_device *tc,
+				int core_clock, int mem_clock)
+{
+	int err = 0;
+
+	err = odin_fpga_set_dut_core_clk(tc, ODN_INPUT_CLOCK_SPEED, core_clock);
+	if (err != 0)
+		goto err_out;
+
+	err = odin_fpga_set_dut_if_clk(tc, ODN_INPUT_CLOCK_SPEED, mem_clock);
+
+err_out:
+	return err;
+}
+
+static int odin_hard_reset_bonnie(struct tc_device *tc)
+{
+	int reset_cnt = 0;
+	bool aligned = false;
+	int alignment_found;
+
+	msleep(100);
+
+	/* It is essential to do an SPI reset once on power-up before
+	 * doing any DUT reads via the SPI interface.
+	 */
+	iowrite32(1, tc->tcf.registers		/* set bit 1 low */
+			+ ODN_CORE_EXTERNAL_RESETN);
+	msleep(20);
+
+	iowrite32(3, tc->tcf.registers		/* set bit 1 high */
+			+ ODN_CORE_EXTERNAL_RESETN);
+	msleep(20);
+
+	while (!aligned && (reset_cnt < 20)) {
+
+		int bank;
+
+		/* Reset the DUT to allow the SAI to retrain */
+		iowrite32(2, /* set bit 0 low */
+			tc->tcf.registers
+			+ ODN_CORE_EXTERNAL_RESETN);
+
+		/* Hold the DUT in reset for 50mS */
+		msleep(50);
+
+		/* Take the DUT out of reset */
+		iowrite32(3, /* set bit 0 hi */
+			tc->tcf.registers
+			+ ODN_CORE_EXTERNAL_RESETN);
+		reset_cnt++;
+
+		/* Wait 200mS for the DUT to stabilise */
+		msleep(200);
+
+		/* Check the odin Multi Clocked bank Align status */
+		alignment_found = read_odin_mca_status(tc);
+		dev_info(&tc->pdev->dev,
+				"Odin mca_status indicates %s\n",
+				(alignment_found)?"aligned":"UNALIGNED");
+
+		/* Check the DUT MCA status */
+		alignment_found = read_dut_mca_status(tc);
+		dev_info(&tc->pdev->dev,
+				"DUT mca_status indicates %s\n",
+				(alignment_found)?"aligned":"UNALIGNED");
+
+		/* If all banks have aligned then the reset was successful */
+		for (bank = 0; bank < 10; bank++) {
+
+			int dut_aligned = 0;
+			int odin_aligned = 0;
+
+			odin_aligned = get_odin_sai_status(tc, bank);
+			dut_aligned = get_dut_sai_status(tc, bank);
+
+			if (dut_aligned == SAI_STATUS_ERROR)
+				return SAI_STATUS_ERROR;
+
+			if (!dut_aligned || !odin_aligned) {
+				aligned = false;
+				break;
+			}
+			aligned = true;
+		}
+
+		if (aligned) {
+			dev_info(&tc->pdev->dev,
+				"all banks have aligned\n");
+			break;
+		}
+
+		dev_warn(&tc->pdev->dev,
+			"Warning- not all banks have aligned. Trying again.\n");
+	}
+
+	if (!aligned)
+		dev_warn(&tc->pdev->dev, "odin_hard_reset failed\n");
+
+	return (aligned) ? 0 : 1; /* return 0 for success */
+}
+
+#endif /* defined(SUPPORT_RGX) */
+
+static void odin_set_mem_mode(struct tc_device *tc)
+{
+	u32 val;
+
+	if (tc->version != ODIN_VERSION_FPGA)
+		return;
+
+	/* Enable memory offset to be applied to DUT and PDP1 */
+	iowrite32(0x80000A10, tc->tcf.registers + ODN_CORE_DUT_CTRL1);
+
+	/* Apply memory offset to GPU and PDP1 to point to DDR memory.
+	 * Enable HDMI.
+	 */
+	val = (0x4 << ODN_CORE_CONTROL_DUT_OFFSET_SHIFT) |
+	      (0x4 << ODN_CORE_CONTROL_PDP1_OFFSET_SHIFT) |
+	      (0x2 << ODN_CORE_CONTROL_HDMI_MODULE_EN_SHIFT) |
+	      (0x1 << ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SHIFT);
+	iowrite32(val, tc->tcf.registers + ODN_CORE_CORE_CONTROL);
+}
+
+/* Do a hard reset on the DUT */
+static int odin_hard_reset(struct tc_device *tc, int core_clock, int mem_clock)
+{
+#if defined(SUPPORT_RGX)
+	if (tc->version == ODIN_VERSION_TCF_BONNIE)
+		return odin_hard_reset_bonnie(tc);
+	if (tc->version == ODIN_VERSION_FPGA)
+		return odin_hard_reset_fpga(tc, core_clock, mem_clock);
+
+	dev_err(&tc->pdev->dev, "Invalid Odin version");
+	return 1;
+#else /* defined(SUPPORT_RGX) */
+	return 0;
+#endif /* defined(SUPPORT_RGX) */
+}
+
+static int odin_hw_init(struct tc_device *tc, int core_clock, int mem_clock)
+{
+	int err;
+
+	err = odin_hard_reset(tc, core_clock, mem_clock);
+	if (err) {
+		dev_err(&tc->pdev->dev, "Failed to initialise Odin");
+		goto err_out;
+	}
+
+	odin_set_mem_mode(tc);
+
+err_out:
+	return err;
+}
+
+static int odin_enable_irq(struct tc_device *tc)
+{
+	int err = 0;
+
+#if defined(TC_FAKE_INTERRUPTS)
+	setup_timer(&tc->timer, tc_irq_fake_wrapper,
+		(unsigned long)tc);
+	mod_timer(&tc->timer,
+		jiffies + msecs_to_jiffies(FAKE_INTERRUPT_TIME_MS));
+#else
+	iowrite32(0, tc->tcf.registers +
+		ODN_CORE_INTERRUPT_ENABLE);
+	iowrite32(0xffffffff, tc->tcf.registers +
+		ODN_CORE_INTERRUPT_CLR);
+
+	dev_info(&tc->pdev->dev,
+		"Registering IRQ %d for use by Odin\n",
+		tc->pdev->irq);
+
+	err = request_irq(tc->pdev->irq, odin_irq_handler,
+		IRQF_SHARED, DRV_NAME, tc);
+
+	if (err) {
+		dev_err(&tc->pdev->dev,
+			"Error - IRQ %d failed to register\n",
+			tc->pdev->irq);
+	} else {
+		dev_info(&tc->pdev->dev,
+			"IRQ %d was successfully registered for use by Odin\n",
+			tc->pdev->irq);
+	}
+#endif
+	return err;
+}
+
+static void odin_disable_irq(struct tc_device *tc)
+{
+#if defined(TC_FAKE_INTERRUPTS)
+	del_timer_sync(&tc->timer);
+#else
+	iowrite32(0, tc->tcf.registers +
+			ODN_CORE_INTERRUPT_ENABLE);
+	iowrite32(0xffffffff, tc->tcf.registers +
+			ODN_CORE_INTERRUPT_CLR);
+
+	free_irq(tc->pdev->irq, tc);
+#endif
+}
+
+static enum tc_version_t
+odin_detect_daughterboard_version(struct tc_device *tc)
+{
+	u32 val = ioread32(tc->tcf.registers + ODN_REG_BANK_DB_TYPE_ID);
+
+	val = (val & ODN_REG_BANK_DB_TYPE_ID_TYPE_MASK) >>
+		ODN_REG_BANK_DB_TYPE_ID_TYPE_SHIFT;
+
+	switch (val) {
+	default:
+		dev_err(&tc->pdev->dev,
+			"Unknown odin version ID type (0x%x)\n",
+			val);
+		BUG();
+		return 0;
+	case 1:
+		dev_info(&tc->pdev->dev, "DUT: Bonnie TC\n");
+		return ODIN_VERSION_TCF_BONNIE;
+	case 2:
+	case 3:
+		dev_info(&tc->pdev->dev, "DUT: FPGA\n");
+		return ODIN_VERSION_FPGA;
+	}
+}
+
+static int odin_dev_init(struct tc_device *tc, struct pci_dev *pdev,
+			 int pdp_mem_size, int secure_mem_size)
+{
+	int err;
+	u32 val;
+
+	/* Reserve and map the tcf system registers */
+	err = setup_io_region(pdev, &tc->tcf,
+		ODN_SYS_BAR, ODN_SYS_REGS_OFFSET, ODN_SYS_REGS_SIZE);
+	if (err)
+		goto err_out;
+
+	tc->version = odin_detect_daughterboard_version(tc);
+
+	/* Setup card memory */
+	tc->tc_mem.base = pci_resource_start(pdev, ODN_DDR_BAR);
+	tc->tc_mem.size = pci_resource_len(pdev, ODN_DDR_BAR);
+
+	if (tc->tc_mem.size < pdp_mem_size) {
+		dev_err(&pdev->dev,
+			"Odin MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu",
+			ODN_DDR_BAR,
+			(unsigned long)tc->tc_mem.size,
+			(unsigned long)pdp_mem_size);
+
+		err = -EIO;
+		goto err_odin_unmap_sys_registers;
+	}
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+	if (tc->tc_mem.size <
+	    (pdp_mem_size + secure_mem_size)) {
+		dev_err(&pdev->dev,
+			"Odin MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu plus the requested secure heap size %lu",
+			ODN_DDR_BAR,
+			(unsigned long)tc->tc_mem.size,
+			(unsigned long)pdp_mem_size,
+			(unsigned long)secure_mem_size);
+		err = -EIO;
+		goto err_odin_unmap_sys_registers;
+	}
+#endif
+
+	err = tc_mtrr_setup(tc);
+	if (err)
+		goto err_odin_unmap_sys_registers;
+
+	/* Setup ranges for the device heaps */
+	tc->pdp_heap_mem_size = pdp_mem_size;
+
+	/* We know ext_heap_mem_size won't underflow as we've compared
+	 * tc_mem.size against the pdp_mem_size value earlier
+	 */
+	tc->ext_heap_mem_size =
+		tc->tc_mem.size - tc->pdp_heap_mem_size;
+
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+	tc->ext_heap_mem_size -= secure_mem_size;
+#endif
+
+	if (tc->ext_heap_mem_size < TC_EXT_MINIMUM_MEM_SIZE) {
+		dev_warn(&pdev->dev,
+			"Odin MEM region (bar 4) has size of %lu, with %lu pdp_mem_size only %lu bytes are left for ext device, which looks too small",
+			(unsigned long)tc->tc_mem.size,
+			(unsigned long)pdp_mem_size,
+			(unsigned long)tc->ext_heap_mem_size);
+		/* Continue as this is only a 'helpful warning' not a hard
+		 * requirement
+		 */
+	}
+	tc->ext_heap_mem_base = tc->tc_mem.base;
+	tc->pdp_heap_mem_base =
+		tc->tc_mem.base + tc->ext_heap_mem_size;
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+	tc->secure_heap_mem_base = tc->pdp_heap_mem_base +
+		tc->pdp_heap_mem_size;
+	tc->secure_heap_mem_size = secure_mem_size;
+#endif
+
+#if defined(SUPPORT_ION)
+	err = tc_ion_init(tc, ODN_DDR_BAR);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to initialise ION\n");
+		goto err_odin_unmap_sys_registers;
+	}
+#endif
+
+	val = ioread32(tc->tcf.registers + ODN_CORE_REVISION);
+	dev_info(&pdev->dev, "ODN_CORE_REVISION = %08x\n", val);
+
+	val = ioread32(tc->tcf.registers + ODN_CORE_CHANGE_SET);
+	dev_info(&pdev->dev, "ODN_CORE_CHANGE_SET = %08x\n", val);
+
+	val = ioread32(tc->tcf.registers + ODN_CORE_USER_ID);
+	dev_info(&pdev->dev, "ODN_CORE_USER_ID = %08x\n", val);
+
+	val = ioread32(tc->tcf.registers + ODN_CORE_USER_BUILD);
+	dev_info(&pdev->dev, "ODN_CORE_USER_BUILD = %08x\n", val);
+
+err_out:
+	return err;
+
+err_odin_unmap_sys_registers:
+	dev_info(&pdev->dev,
+		"odin_dev_init failed. unmapping the io regions.\n");
+
+	iounmap(tc->tcf.registers);
+	release_pci_io_addr(pdev, ODN_SYS_BAR,
+			 tc->tcf.region.base, tc->tcf.region.size);
+	goto err_out;
+}
+
+static void odin_dev_cleanup(struct tc_device *tc)
+{
+#if defined(SUPPORT_ION)
+	tc_ion_deinit(tc, ODN_DDR_BAR);
+#endif
+
+	tc_mtrr_cleanup(tc);
+
+	iounmap(tc->tcf.registers);
+
+	release_pci_io_addr(tc->pdev,
+			ODN_SYS_BAR,
+			tc->tcf.region.base,
+			tc->tcf.region.size);
+}
+
+static u32 odin_interrupt_id_to_flag(int interrupt_id)
+{
+	switch (interrupt_id) {
+	case TC_INTERRUPT_PDP:
+		return ODN_INTERRUPT_ENABLE_PDP1;
+	case TC_INTERRUPT_EXT:
+		return ODN_INTERRUPT_ENABLE_DUT;
+	default:
+		BUG();
+	}
+}
+
+int odin_init(struct tc_device *tc, struct pci_dev *pdev,
+	      int core_clock, int mem_clock,
+	      int pdp_mem_size, int secure_mem_size)
+{
+	int err = 0;
+
+	err = odin_dev_init(tc, pdev, pdp_mem_size, secure_mem_size);
+	if (err) {
+		dev_err(&pdev->dev, "odin_dev_init failed\n");
+		goto err_out;
+	}
+
+	err = odin_hw_init(tc, core_clock, mem_clock);
+	if (err) {
+		dev_err(&pdev->dev, "odin_hw_init failed\n");
+		goto err_dev_cleanup;
+	}
+
+	err = odin_enable_irq(tc);
+	if (err) {
+		dev_err(&pdev->dev,
+			"Failed to initialise IRQ\n");
+		goto err_dev_cleanup;
+	}
+
+err_out:
+	return err;
+
+err_dev_cleanup:
+	odin_dev_cleanup(tc);
+	goto err_out;
+}
+
+int odin_cleanup(struct tc_device *tc)
+{
+	odin_disable_irq(tc);
+	odin_dev_cleanup(tc);
+
+	return 0;
+}
+
+int odin_register_pdp_device(struct tc_device *tc)
+{
+	int err = 0;
+	resource_size_t reg_start = pci_resource_start(tc->pdev, ODN_SYS_BAR);
+	struct resource pdp_resources_odin[] = {
+		DEFINE_RES_MEM_NAMED(reg_start +
+				ODN_PDP_REGS_OFFSET, /* start */
+				ODN_PDP_REGS_SIZE, /* size */
+				"pdp-regs"),
+		DEFINE_RES_MEM_NAMED(reg_start +
+				ODN_SYS_REGS_OFFSET +
+				ODN_REG_BANK_ODN_CLK_BLK +
+				ODN_PDP_P_CLK_OUT_DIVIDER_REG1, /* start */
+				ODN_PDP_P_CLK_IN_DIVIDER_REG -
+				ODN_PDP_P_CLK_OUT_DIVIDER_REG1 + 4, /* size */
+				"pll-regs"),
+		DEFINE_RES_MEM_NAMED(reg_start +
+				ODN_SYS_REGS_OFFSET +
+				ODN_REG_BANK_CORE, /* start */
+				ODN_CORE_MMCM_LOCK_STATUS + 4, /* size */
+				"odn-core"),
+	};
+
+	struct tc_pdp_platform_data pdata = {
+#if defined(SUPPORT_ION)
+		.ion_device = tc->ion_device,
+		.ion_heap_id = ION_HEAP_TC_PDP,
+#endif
+		.memory_base = tc->tc_mem.base,
+		.pdp_heap_memory_base = tc->pdp_heap_mem_base,
+		.pdp_heap_memory_size = tc->pdp_heap_mem_size,
+	};
+	struct platform_device_info pdp_device_info = {
+		.parent = &tc->pdev->dev,
+		.name = ODN_DEVICE_NAME_PDP,
+		.id = -2,
+		.data = &pdata,
+		.size_data = sizeof(pdata),
+		.dma_mask = DMA_BIT_MASK(32),
+	};
+
+	pdp_device_info.res = pdp_resources_odin;
+	pdp_device_info.num_res = ARRAY_SIZE(pdp_resources_odin);
+
+	tc->pdp_dev = platform_device_register_full(&pdp_device_info);
+	if (IS_ERR(tc->pdp_dev)) {
+		err = PTR_ERR(tc->pdp_dev);
+		dev_err(&tc->pdev->dev,
+			"Failed to register PDP device (%d)\n", err);
+		tc->pdp_dev = NULL;
+		goto err_out;
+	}
+
+err_out:
+	return err;
+}
+
+int odin_register_ext_device(struct tc_device *tc)
+{
+#if defined(SUPPORT_RGX)
+	int err = 0;
+	struct resource odin_rogue_resources[] = {
+		DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev,
+							ODN_DUT_SOCIF_BAR),
+				     ODN_DUT_SOCIF_SIZE, "rogue-regs"),
+	};
+	struct tc_rogue_platform_data pdata = {
+#if defined(SUPPORT_ION)
+		.ion_device = tc->ion_device,
+		.ion_heap_id = ION_HEAP_TC_ROGUE,
+#endif
+		.tc_memory_base = tc->tc_mem.base,
+		.pdp_heap_memory_base = tc->pdp_heap_mem_base,
+		.pdp_heap_memory_size = tc->pdp_heap_mem_size,
+		.rogue_heap_memory_base = tc->ext_heap_mem_base,
+		.rogue_heap_memory_size = tc->ext_heap_mem_size,
+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP)
+		.secure_heap_memory_base = tc->secure_heap_mem_base,
+		.secure_heap_memory_size = tc->secure_heap_mem_size,
+#endif
+	};
+	struct platform_device_info odin_rogue_dev_info = {
+		.parent = &tc->pdev->dev,
+		.name = TC_DEVICE_NAME_ROGUE,
+		.id = -2,
+		.res = odin_rogue_resources,
+		.num_res = ARRAY_SIZE(odin_rogue_resources),
+		.data = &pdata,
+		.size_data = sizeof(pdata),
+	};
+
+	tc->ext_dev
+		= platform_device_register_full(&odin_rogue_dev_info);
+
+	if (IS_ERR(tc->ext_dev)) {
+		err = PTR_ERR(tc->ext_dev);
+		dev_err(&tc->pdev->dev,
+			"Failed to register rogue device (%d)\n", err);
+		tc->ext_dev = NULL;
+	}
+	return err;
+#else /* defined(SUPPORT_RGX) */
+	return 0;
+#endif /* defined(SUPPORT_RGX) */
+}
+
+void odin_enable_interrupt_register(struct tc_device *tc,
+				    int interrupt_id)
+{
+	u32 val;
+	u32 flag;
+
+	switch (interrupt_id) {
+	case TC_INTERRUPT_PDP:
+		dev_info(&tc->pdev->dev,
+			"Enabling Odin PDP interrupts\n");
+		break;
+	case TC_INTERRUPT_EXT:
+		dev_info(&tc->pdev->dev,
+			"Enabling Odin DUT interrupts\n");
+		break;
+	default:
+		dev_err(&tc->pdev->dev,
+			"Error - illegal interrupt id\n");
+		return;
+	}
+
+	val = ioread32(tc->tcf.registers +
+		       ODN_CORE_INTERRUPT_ENABLE);
+	flag = odin_interrupt_id_to_flag(interrupt_id);
+	val |= flag;
+	iowrite32(val, tc->tcf.registers +
+		  ODN_CORE_INTERRUPT_ENABLE);
+}
+
+void odin_disable_interrupt_register(struct tc_device *tc,
+				     int interrupt_id)
+{
+	u32 val;
+
+	switch (interrupt_id) {
+	case TC_INTERRUPT_PDP:
+		dev_info(&tc->pdev->dev,
+			"Disabling Odin PDP interrupts\n");
+		break;
+	case TC_INTERRUPT_EXT:
+		dev_info(&tc->pdev->dev,
+			"Disabling Odin DUT interrupts\n");
+		break;
+	default:
+		dev_err(&tc->pdev->dev,
+			"Error - illegal interrupt id\n");
+		return;
+	}
+	val = ioread32(tc->tcf.registers +
+		       ODN_CORE_INTERRUPT_ENABLE);
+	val &= ~(odin_interrupt_id_to_flag(interrupt_id));
+	iowrite32(val, tc->tcf.registers +
+		  ODN_CORE_INTERRUPT_ENABLE);
+}
+
+irqreturn_t odin_irq_handler(int irq, void *data)
+{
+	u32 interrupt_status;
+	u32 interrupt_clear = 0;
+	unsigned long flags;
+	irqreturn_t ret = IRQ_NONE;
+	struct tc_device *tc = (struct tc_device *)data;
+
+	spin_lock_irqsave(&tc->interrupt_handler_lock, flags);
+
+#if defined(TC_FAKE_INTERRUPTS)
+	/* If we're faking interrupts pretend we got both ext and PDP ints */
+	interrupt_status = ODN_INTERRUPT_STATUS_DUT
+		| ODN_INTERRUPT_STATUS_PDP1;
+#else
+	interrupt_status = ioread32(tc->tcf.registers +
+				    ODN_CORE_INTERRUPT_STATUS);
+#endif
+
+	if (interrupt_status & ODN_INTERRUPT_STATUS_DUT) {
+		struct tc_interrupt_handler *ext_int =
+			&tc->interrupt_handlers[TC_INTERRUPT_EXT];
+
+		if (ext_int->enabled && ext_int->handler_function) {
+			ext_int->handler_function(ext_int->handler_data);
+			interrupt_clear |= ODN_INTERRUPT_CLEAR_DUT;
+		}
+		ret = IRQ_HANDLED;
+	}
+	if (interrupt_status & ODN_INTERRUPT_STATUS_PDP1) {
+		struct tc_interrupt_handler *pdp_int =
+			&tc->interrupt_handlers[TC_INTERRUPT_PDP];
+
+		if (pdp_int->enabled && pdp_int->handler_function) {
+			pdp_int->handler_function(pdp_int->handler_data);
+			interrupt_clear |= ODN_INTERRUPT_CLEAR_PDP1;
+		}
+		ret = IRQ_HANDLED;
+	}
+
+	if (interrupt_clear)
+		iowrite32(interrupt_clear,
+			  tc->tcf.registers + ODN_CORE_INTERRUPT_CLR);
+
+	spin_unlock_irqrestore(&tc->interrupt_handler_lock, flags);
+
+	return ret;
+}
+
+int odin_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll)
+{
+	*tmp = 0;
+	*pll = 0;
+	return 0;
+}
+
+int odin_sys_strings(struct tc_device *tc,
+		     char *str_fpga_rev, size_t size_fpga_rev,
+		     char *str_tcf_core_rev, size_t size_tcf_core_rev,
+		     char *str_tcf_core_target_build_id,
+		     size_t size_tcf_core_target_build_id,
+		     char *str_pci_ver, size_t size_pci_ver,
+		     char *str_macro_ver, size_t size_macro_ver)
+{
+	u32 val;
+	char temp_str[12];
+
+	/* Read the Odin major and minor revision ID register Rx-xx */
+	val = ioread32(tc->tcf.registers + ODN_CORE_REVISION);
+
+	snprintf(str_tcf_core_rev,
+		 size_tcf_core_rev,
+		 "%d.%d",
+		 HEX2DEC((val & ODN_REVISION_MAJOR_MASK)
+			 >> ODN_REVISION_MAJOR_SHIFT),
+		 HEX2DEC((val & ODN_REVISION_MINOR_MASK)
+			 >> ODN_REVISION_MINOR_SHIFT));
+
+	dev_info(&tc->pdev->dev, "Odin core revision %s\n",
+		 str_tcf_core_rev);
+
+	/* Read the Odin register containing the Perforce changelist
+	 * value that the FPGA build was generated from
+	 */
+	val = ioread32(tc->tcf.registers + ODN_CORE_CHANGE_SET);
+
+	snprintf(str_tcf_core_target_build_id,
+		 size_tcf_core_target_build_id,
+		 "%d",
+		 (val & ODN_CHANGE_SET_SET_MASK)
+		 >> ODN_CHANGE_SET_SET_SHIFT);
+
+	/* Read the Odin User_ID register containing the User ID for
+	 * identification of a modified build
+	 */
+	val = ioread32(tc->tcf.registers + ODN_CORE_USER_ID);
+
+	snprintf(temp_str,
+		 sizeof(temp_str),
+		 "%d",
+		 HEX2DEC((val & ODN_USER_ID_ID_MASK)
+			 >> ODN_USER_ID_ID_SHIFT));
+
+	/* Read the Odin User_Build register containing the User build
+	 * number for identification of modified builds
+	 */
+	val = ioread32(tc->tcf.registers + ODN_CORE_USER_BUILD);
+
+	snprintf(temp_str,
+		 sizeof(temp_str),
+		 "%d",
+		 HEX2DEC((val & ODN_USER_BUILD_BUILD_MASK)
+			 >> ODN_USER_BUILD_BUILD_SHIFT));
+
+	return 0;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_odin.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_odin.h
new file mode 100644
index 0000000..40cc113
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/kernel/drivers/staging/imgtec/tc/tc_odin.h
@@ -0,0 +1,74 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle    LinuxKernel
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _ODIN_DRV_H
+#define _ODIN_DRV_H
+
+#include "tc_drv_internal.h"
+#include "odin_defs.h"
+
+int odin_init(struct tc_device *tc, struct pci_dev *pdev,
+	      int core_clock, int mem_clock,
+	      int pdp_mem_size, int secure_mem_size);
+int odin_cleanup(struct tc_device *tc);
+
+int odin_register_pdp_device(struct tc_device *tc);
+int odin_register_ext_device(struct tc_device *tc);
+
+void odin_enable_interrupt_register(struct tc_device *tc,
+				    int interrupt_id);
+void odin_disable_interrupt_register(struct tc_device *tc,
+				     int interrupt_id);
+
+irqreturn_t odin_irq_handler(int irq, void *data);
+
+int odin_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll);
+int odin_sys_strings(struct tc_device *tc,
+		     char *str_fpga_rev, size_t size_fpga_rev,
+		     char *str_tcf_core_rev, size_t size_tcf_core_rev,
+		     char *str_tcf_core_target_build_id,
+		     size_t size_tcf_core_target_build_id,
+		     char *str_pci_ver, size_t size_pci_ver,
+		     char *str_macro_ver, size_t size_macro_ver);
+
+#endif /* _ODIN_DRV_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/htbuffer_sf.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/htbuffer_sf.h
new file mode 100644
index 0000000..517c384
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/htbuffer_sf.h
@@ -0,0 +1,218 @@
+/*************************************************************************/ /*!
+@File           htbuffer_sf.h
+@Title          Host Trace Buffer interface string format specifiers
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the Host Trace Buffer logging messages. The following
+                list are the messages the host driver prints. Changing anything
+				but the first column or spelling mistakes in the strings will
+                break compatibility with log files created with older/newer
+                driver versions.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __HTBUFFER_SF_H__
+#define __HTBUFFER_SF_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*****************************************************************************
+ * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you
+ *          WILL BREAK host tracing message compatibility with previous
+ *          driver versions. Only add new ones, if so required.
+ ****************************************************************************/
+
+
+/* String used in pvrdebug -h output */
+#define HTB_LOG_GROUPS_STRING_LIST   "ctrl,mmu,sync,main,brg"
+
+/* Used in print statements to display log group state, one %s per group defined */
+#define HTB_LOG_ENABLED_GROUPS_LIST_PFSPEC  "%s%s%s%s%s"
+
+/* Available log groups - Master template
+ *
+ * Group usage is as follows:
+ *    CTRL  - Internal Host Trace information and synchronisation data
+ *    MMU   - MMU page mapping information
+ *    SYNC  - Synchronisation debug
+ *    MAIN  - Data master kicks, etc. tying in with the MAIN group in FWTrace
+ *    DBG   - Temporary debugging group, logs not to be left in the driver
+ *
+ */
+#define HTB_LOG_SFGROUPLIST                               \
+	X( HTB_GROUP_NONE,     NONE  )                        \
+/*     gid,                group flag / apphint name */   \
+	X( HTB_GROUP_CTRL,     CTRL  )                        \
+	X( HTB_GROUP_MMU,      MMU   )                        \
+	X( HTB_GROUP_SYNC,     SYNC  )                        \
+	X( HTB_GROUP_MAIN,     MAIN  )                        \
+	X( HTB_GROUP_BRG,      BRG  )                         \
+/* Debug group HTB_GROUP_DBG must always be last */       \
+	X( HTB_GROUP_DBG,      DBG   )
+
+
+/* Table of String Format specifiers, the group they belong and the number of
+ * arguments each expects. Xmacro styled macros are used to generate what is
+ * needed without requiring hand editing.
+ *
+ * id		: unique id within a group
+ * gid		: group id as defined above
+ * sym name	: symbolic name of enumerations used to identify message strings
+ * string	: Actual string
+ * #args	: number of arguments the string format requires
+ */
+#define HTB_LOG_SFIDLIST \
+/*id,  gid,             sym name,                       string,                           # arguments */ \
+X( 0,  HTB_GROUP_NONE,  HTB_SF_FIRST,                   "You should not use this string", 0) \
+\
+X( 1,  HTB_GROUP_CTRL,  HTB_SF_CTRL_LOGMODE,            "HTB log mode set to %d (1- all PID, 2 - restricted PID)\n", 1) \
+X( 2,  HTB_GROUP_CTRL,  HTB_SF_CTRL_ENABLE_PID,         "HTB enable logging for PID %d\n", 1) \
+X( 3,  HTB_GROUP_CTRL,  HTB_SF_CTRL_ENABLE_GROUP,       "HTB enable logging groups 0x%08x\n", 1) \
+X( 4,  HTB_GROUP_CTRL,  HTB_SF_CTRL_LOG_LEVEL,          "HTB log level set to %d\n", 1) \
+X( 5,  HTB_GROUP_CTRL,  HTB_SF_CTRL_OPMODE,             "HTB operating mode set to %d (1 - droplatest, 2 - drop oldest, 3 - block)\n", 1) \
+X( 6,  HTB_GROUP_CTRL,  HTB_SF_CTRL_FWSYNC_SCALE,       "HTBFWSync OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \
+X( 7,  HTB_GROUP_CTRL,  HTB_SF_CTRL_FWSYNC_SCALE_RPT,   "FW Sync scale info OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \
+X( 8,  HTB_GROUP_CTRL,  HTB_SF_CTRL_FWSYNC_MARK,        "FW Sync Partition marker: %d\n", 1) \
+X( 9,  HTB_GROUP_CTRL,  HTB_SF_CTRL_FWSYNC_MARK_RPT,    "FW Sync Partition repeat: %d\n", 1) \
+\
+X( 1,  HTB_GROUP_MMU,   HTB_SF_MMU_PAGE_OP_TABLE,       "MMU page op table entry page_id=%08x%08x index=%d level=%d val=%08x%08x map=%d\n", 7) \
+X( 2,  HTB_GROUP_MMU,   HTB_SF_MMU_PAGE_OP_ALLOC,       "MMU allocating DevVAddr from %08x%08x to %08x%08x\n", 4) \
+X( 3,  HTB_GROUP_MMU,   HTB_SF_MMU_PAGE_OP_FREE,        "MMU freeing DevVAddr from %08x%08x to %08x%08x\n", 4) \
+X( 4,  HTB_GROUP_MMU,   HTB_SF_MMU_PAGE_OP_MAP,         "MMU mapping DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \
+X( 5,  HTB_GROUP_MMU,   HTB_SF_MMU_PAGE_OP_PMRMAP,      "MMU mapping PMR DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \
+X( 6,  HTB_GROUP_MMU,   HTB_SF_MMU_PAGE_OP_UNMAP,       "MMU unmapping DevVAddr %08x%08x\n", 2) \
+\
+X( 1,  HTB_GROUP_SYNC,  HTB_SF_SYNC_SERVER_ALLOC,       "Server sync allocation [%08X]\n", 1) \
+X( 2,  HTB_GROUP_SYNC,  HTB_SF_SYNC_SERVER_UNREF,       "Server sync unreferenced [%08X]\n", 1) \
+X( 3,  HTB_GROUP_SYNC,  HTB_SF_SYNC_PRIM_OP_CREATE,     "Sync OP create 0x%08x, block count=%d, server syncs=%d, client syncs=%d\n", 4) \
+X( 4,  HTB_GROUP_SYNC,  HTB_SF_SYNC_PRIM_OP_TAKE,       "Sync OP take 0x%08x server syncs=%d, client syncs=%d\n", 3) \
+X( 5,  HTB_GROUP_SYNC,  HTB_SF_SYNC_PRIM_OP_COMPLETE,   "Sync OP complete 0x%08x\n", 1) \
+X( 6,  HTB_GROUP_SYNC,  HTB_SF_SYNC_PRIM_OP_DESTROY,    "Sync OP destroy 0x%08x\n", 1) \
+\
+X( 1,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_TA,            "Kick TA: FWCtx %08X @ %d\n", 2) \
+X( 2,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_3D,            "Kick 3D: FWCtx %08X @ %d\n", 2) \
+X( 3,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_CDM,           "Kick CDM: FWCtx %08X @ %d\n", 2) \
+X( 4,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_RTU,           "Kick RTU: FWCtx %08X @ %d\n", 2) \
+X( 5,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_SHG,           "Kick SHG: FWCtx %08X @ %d\n", 2) \
+X( 6,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_2D,            "Kick 2D: FWCtx %08X @ %d\n", 2) \
+X( 7,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_UNCOUNTED,     "Kick (uncounted) for all DMs\n", 0) \
+X( 8,  HTB_GROUP_MAIN,  HTB_SF_MAIN_FWCCB_CMD,          "FW CCB Cmd: %d\n", 1) \
+\
+X( 1,  HTB_GROUP_BRG,   HTB_SF_BRG_BRIDGE_CALL,         "Bridge call: start: %010u: bid %03d fid %d\n", 3) \
+X( 2,  HTB_GROUP_BRG,   HTB_SF_BRG_BRIDGE_CALL_ERR,     "Bridge call: start: %010u: bid %03d fid %d error %d\n", 4) \
+\
+X( 1,  HTB_GROUP_DBG,   HTB_SF_DBG_INTPAIR,             "0x%8.8x 0x%8.8x\n", 2) \
+\
+X( 65535, HTB_GROUP_NONE, HTB_SF_LAST,                  "You should not use this string\n", 15)
+
+
+
+/* gid - Group numbers */
+typedef enum _HTB_LOG_SFGROUPS {
+#define X(A,B) A,
+	HTB_LOG_SFGROUPLIST
+#undef X
+} HTB_LOG_SFGROUPS;
+
+
+/* group flags are stored in an array of elements */
+/* each of which have a certain number of bits */
+#define HTB_FLAG_EL_T                   IMG_UINT32
+#define HTB_FLAG_NUM_BITS_IN_EL         ( sizeof(HTB_FLAG_EL_T) * 8 )
+
+#define HTB_LOG_GROUP_FLAG_GROUP(gid)   ( (gid-1) / HTB_FLAG_NUM_BITS_IN_EL )
+#define HTB_LOG_GROUP_FLAG(gid)         (gid? (0x1 << ((gid-1)%HTB_FLAG_NUM_BITS_IN_EL)): 0)
+#define HTB_LOG_GROUP_FLAG_NAME(gid)    HTB_LOG_TYPE_ ## gid
+
+/* group enable flags */
+typedef enum _HTB_LOG_TYPE {
+#define X(a, b) HTB_LOG_GROUP_FLAG_NAME(b) = HTB_LOG_GROUP_FLAG(a),
+	HTB_LOG_SFGROUPLIST
+#undef X
+} HTB_LOG_TYPE;
+
+
+
+/*  The symbolic names found in the table above are assigned an ui32 value of
+ *  the following format:
+ *  31 30 28 27       20   19  16    15  12      11            0   bits
+ *  -   ---   ---- ----     ----      ----        ---- ---- ----
+ *     0-11: id number
+ *    12-15: group id number
+ *    16-19: number of parameters
+ *    20-27: unused
+ *    28-30: active: identify SF packet, otherwise regular int32
+ *       31: reserved for signed/unsigned compatibility
+ *
+ *   The following macro assigns those values to the enum generated SF ids list.
+ */
+#define HTB_LOG_IDMARKER            (0x70000000)
+#define HTB_LOG_CREATESFID(a,b,e)   ((a) | (b<<12) | (e<<16)) | HTB_LOG_IDMARKER
+
+#define HTB_LOG_IDMASK              (0xFFF00000)
+#define HTB_LOG_VALIDID(I)          ( ((I) & HTB_LOG_IDMASK) == HTB_LOG_IDMARKER )
+
+typedef enum HTB_LOG_SFids {
+#define X(a, b, c, d, e) c = HTB_LOG_CREATESFID(a,b,e),
+	HTB_LOG_SFIDLIST
+#undef X
+} HTB_LOG_SFids;
+
+/* Return the group id that the given (enum generated) id belongs to */
+#define HTB_SF_GID(x) (((x)>>12) & 0xf)
+/* future improvement to support log levels */
+#define HTB_SF_LVL(x) (0)
+/* Returns how many arguments the SF(string format) for the given (enum generated) id requires */
+#define HTB_SF_PARAMNUM(x) (((x)>>16) & 0xf)
+
+/* format of messages is: SF:PID:TIME:[PARn]*
+ */
+#define HTB_LOG_HEADER_SIZE         3
+#define HTB_LOG_MAX_PARAMS          15
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __HTBUFFER_SF_H__ */
+/*****************************************************************************
+ End of file (htbuffer_sf.h)
+*****************************************************************************/
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/htbuffer_types.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/htbuffer_types.h
new file mode 100644
index 0000000..c4f19b3e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/htbuffer_types.h
@@ -0,0 +1,124 @@
+/*************************************************************************/ /*!
+@File           htbuffer_types.h
+@Title          Host Trace Buffer types.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Host Trace Buffer provides a mechanism to log Host events to a
+                buffer in a similar way to the Firmware Trace mechanism.
+                Host Trace Buffer logs data using a Transport Layer buffer.
+                The Transport Layer and pvrtld tool provides the mechanism to
+                retrieve the trace data.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __HTBUFFER_TYPES_H__
+#define __HTBUFFER_TYPES_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+#include "htbuffer_sf.h"
+
+/* the group flags array of ints large enough to store all the group flags */
+#define HTB_FLAG_NUM_EL ( ((HTB_GROUP_DBG-1) / HTB_FLAG_NUM_BITS_IN_EL) + 1 )
+extern IMG_INTERNAL HTB_FLAG_EL_T g_auiHTBGroupEnable[HTB_FLAG_NUM_EL];
+
+#define HTB_GROUP_ENABLED(SF) (g_auiHTBGroupEnable[HTB_LOG_GROUP_FLAG_GROUP(HTB_SF_GID(SF))] & HTB_LOG_GROUP_FLAG(HTB_SF_GID(SF)))
+
+/*************************************************************************/ /*!
+ Host Trace Buffer operation mode
+ Care must be taken if changing this enum to ensure the MapFlags[] array
+ in htbserver.c is kept in-step.
+*/ /**************************************************************************/
+typedef enum
+{
+	/*! Undefined operation mode */
+	HTB_OPMODE_UNDEF = 0,
+
+	/*! Drop latest, intended for continuous logging to a UM daemon.
+	 *  If the daemon does not keep up, the most recent log data
+	 *  will be dropped
+	 */
+	HTB_OPMODE_DROPLATEST,
+
+	/*! Drop oldest, intended for crash logging.
+	 *  Data will be continuously written to a circular buffer.
+	 *  After a crash the buffer will contain events leading up to the crash
+	 */
+	HTB_OPMODE_DROPOLDEST,
+
+	/*! Block write if buffer is full
+	 */
+	HTB_OPMODE_BLOCK,
+
+	HTB_OPMODE_LAST = HTB_OPMODE_BLOCK
+} HTB_OPMODE_CTRL;
+
+
+/*************************************************************************/ /*!
+ Host Trace Buffer log mode control
+*/ /**************************************************************************/
+typedef enum
+{
+	/*! Undefined log mode, used if update is not applied */
+	HTB_LOGMODE_UNDEF = 0,
+
+	/*! Log trace messages for all PIDs.
+	 */
+	HTB_LOGMODE_ALLPID,
+
+	/*! Log trace messages for specific PIDs only.
+	 */
+	HTB_LOGMODE_RESTRICTEDPID,
+
+	HTB_LOGMODE_LAST = HTB_LOGMODE_RESTRICTEDPID
+} HTB_LOGMODE_CTRL;
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __HTBUFFER_TYPES_H__ */
+
+/*****************************************************************************
+ End of file (htbuffer.h)
+*****************************************************************************/
+
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/info_page_defs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/info_page_defs.h
new file mode 100644
index 0000000..9873ee7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/info_page_defs.h
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@File
+@Title          Kernel/User mode general purpose shared memory.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    General purpose memory shared between kernel driver and user
+                mode.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _INFO_PAGE_DEFS_H_
+#define _INFO_PAGE_DEFS_H_
+
+/* The CacheOp information page entries, each entry is of size(IMG_UINT32) on
+ * both 32/64-bit CPU */
+
+#define CACHEOP_INFO_SIZE         sizeof(IMG_UINT32)
+
+/* The CacheOp information page entries, each entry is of size(IMG_UINT32)
+ * on both 32/64-bit CPU */
+#define CACHEOP_INFO_GFSEQNUM0    0x00 /*!< Current global flush sequence number */
+#define CACHEOP_INFO_GFSEQNUM1    0x01 /*!< Validity global flush sequence number */
+#define CACHEOP_INFO_UMRBFONLY    0x02 /*!< Use UM flush only (i.e no KM GF) */
+#define CACHEOP_INFO_UMKMTHRESHLD 0x03 /*!< UM=>KM routing threshold in bytes */
+#define CACHEOP_INFO_KMGFTHRESHLD 0x04 /*!< KM/GF threshold in bytes */
+#define CACHEOP_INFO_KMDFTHRESHLD 0x05 /*!< KM/DF threshold in bytes */
+#define CACHEOP_INFO_LINESIZE     0x06 /*!< CPU d-cache line size */
+#define CACHEOP_INFO_PGSIZE       0x07 /*!< CPU MMU page size */
+
+/* HWPerf information page entries */
+
+#define HWPERF_INFO_IDX_START      0x08
+#define HWPERF_FILTER_SERVICES_IDX (HWPERF_INFO_IDX_START + 0)
+#define HWPERF_FILTER_EGL_IDX      (HWPERF_INFO_IDX_START + 1)
+#define HWPERF_FILTER_OPENGLES_IDX (HWPERF_INFO_IDX_START + 2)
+#define HWPERF_FILTER_OPENCL_IDX   (HWPERF_INFO_IDX_START + 3)
+#define HWPERF_FILTER_OPENRL_IDX   (HWPERF_INFO_IDX_START + 4)
+#define HWPERF_INFO_IDX_END        (HWPERF_INFO_IDX_START + 5)
+
+#endif /* _INFO_PAGE_DEFS_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/km_apphint_defs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/km_apphint_defs.h
new file mode 100644
index 0000000..6eb365c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/km_apphint_defs.h
@@ -0,0 +1,305 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services AppHint definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#ifndef __KM_APPHINT_DEFS_H__
+#define __KM_APPHINT_DEFS_H__
+
+/* NB: The 'DEVICE' AppHints must be last in this list as they will be
+ * duplicated in the case of a driver supporting multiple devices
+ */
+#define APPHINT_LIST_ALL \
+	APPHINT_LIST_BUILDVAR \
+	APPHINT_LIST_MODPARAM \
+	APPHINT_LIST_DEBUGFS \
+	APPHINT_LIST_DEBUGFS_DEVICE
+
+/*
+*******************************************************************************
+ Build variables
+ All of these should be configurable only through the 'default' value
+******************************************************************************/
+#define APPHINT_LIST_BUILDVAR \
+/* name,                            type,           class,       default,                                         helper,         */ \
+X(HWRDebugDumpLimit,                UINT32,         DEBUG,       PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT,                NULL             ) \
+X(EnableTrustedDeviceAceConfig,     BOOL,           GPUVIRT_VAL, PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG,     NULL             ) \
+X(HTBufferSize,                     UINT32,         ALWAYS,      PVRSRV_APPHINT_HTBUFFERSIZE,                     NULL             ) \
+X(CleanupThreadPriority,            UINT32,         NEVER,       PVRSRV_APPHINT_CLEANUPTHREADPRIORITY,            NULL             ) \
+X(CleanupThreadWeight,              UINT32,         NEVER,       PVRSRV_APPHINT_CLEANUPTHREADWEIGHT,              NULL             ) \
+X(WatchdogThreadPriority,           UINT32,         NEVER,       PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY,           NULL             ) \
+X(WatchdogThreadWeight,             UINT32,         NEVER,       PVRSRV_APPHINT_WATCHDOGTHREADWEIGHT,             NULL             ) \
+
+/*
+*******************************************************************************
+ Module parameters
+******************************************************************************/
+#define APPHINT_LIST_MODPARAM \
+/* name,                            type,           class,       default,                                         helper,         */ \
+X(EnableSignatureChecks,            BOOL,           PDUMP,       PVRSRV_APPHINT_ENABLESIGNATURECHECKS,            NULL             ) \
+X(SignatureChecksBufSize,           UINT32,         PDUMP,       PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE,           NULL             ) \
+\
+X(DisableClockGating,               BOOL,           FWDBGCTRL,   PVRSRV_APPHINT_DISABLECLOCKGATING,               NULL             ) \
+X(DisableDMOverlap,                 BOOL,           FWDBGCTRL,   PVRSRV_APPHINT_DISABLEDMOVERLAP,                 NULL             ) \
+\
+X(EnableCDMKillingRandMode,         BOOL,           VALIDATION,  PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE,         NULL             ) \
+X(EnableFWContextSwitch,            UINT32,         FWDBGCTRL,   PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH,            NULL             ) \
+X(VDMContextSwitchMode,             UINT32,         VALIDATION,  PVRSRV_APPHINT_VDMCONTEXTSWITCHMODE,             NULL             ) \
+X(EnableRDPowerIsland,              UINT32,         FWDBGCTRL,   PVRSRV_APPHINT_ENABLERDPOWERISLAND,              NULL             ) \
+\
+X(GeneralNon4KHeapPageSize,         UINT32,         ALWAYS,      PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE,     NULL             ) \
+\
+X(DriverMode,                       UINT32,         ALWAYS,      PVRSRV_APPHINT_DRIVERMODE,                       NULL             ) \
+\
+X(FirmwarePerf,                     UINT32,         VALIDATION,  PVRSRV_APPHINT_FIRMWAREPERF,                     NULL             ) \
+X(FWContextSwitchProfile,           UINT32,         VALIDATION,  PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE,           NULL             ) \
+X(HWPerfDisableCustomCounterFilter, BOOL,           VALIDATION,  PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER, NULL             ) \
+X(HWPerfFWBufSizeInKB,              UINT32,         VALIDATION,  PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB,              NULL             ) \
+X(HWPerfHostBufSizeInKB,            UINT32,         VALIDATION,  PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB,            NULL             ) \
+\
+X(JonesDisableMask,                 UINT32,         VALIDATION,  PVRSRV_APPHINT_JONESDISABLEMASK,                 NULL             ) \
+X(NewFilteringMode,                 BOOL,           VALIDATION,  PVRSRV_APPHINT_NEWFILTERINGMODE,                 NULL             ) \
+X(TruncateMode,                     UINT32,         VALIDATION,  PVRSRV_APPHINT_TRUNCATEMODE,                     NULL             ) \
+X(UseMETAT1,                        UINT32,         VALIDATION,  PVRSRV_APPHINT_USEMETAT1,                        NULL             ) \
+X(EmuMaxFreq,                       UINT32,         ALWAYS,      PVRSRV_APPHINT_EMUMAXFREQ,                       NULL             ) \
+X(GPIOValidationMode,               UINT32,         VALIDATION,  PVRSRV_APPHINT_GPIOVALIDATIONMODE,               NULL             ) \
+X(RGXBVNC,                          STRING,         ALWAYS,      PVRSRV_APPHINT_RGXBVNC,                          NULL             ) \
+\
+X(FWContextSwitchCrossDM,           UINT32,         ALWAYS,      0,                                               NULL             ) \
+\
+X(OSidRegion0Min,                   STRING,         GPUVIRT_VAL, PVRSRV_APPHINT_OSIDREGION0MIN,                   NULL             ) \
+X(OSidRegion0Max,                   STRING,         GPUVIRT_VAL, PVRSRV_APPHINT_OSIDREGION0MAX,                   NULL             ) \
+X(OSidRegion1Min,                   STRING,         GPUVIRT_VAL, PVRSRV_APPHINT_OSIDREGION1MIN,                   NULL             ) \
+X(OSidRegion1Max,                   STRING,         GPUVIRT_VAL, PVRSRV_APPHINT_OSIDREGION1MAX,                   NULL             ) \
+
+/*
+*******************************************************************************
+ Debugfs parameters - driver configuration
+******************************************************************************/
+#define APPHINT_LIST_DEBUGFS \
+/* name,                            type,           class,       default,                                         helper,         */ \
+X(EnableHTBLogGroup,                UINT32Bitfield, ALWAYS,      PVRSRV_APPHINT_ENABLEHTBLOGGROUP,                htb_loggroup_tbl ) \
+X(HTBOperationMode,                 UINT32List,     ALWAYS,      PVRSRV_APPHINT_HTBOPERATIONMODE,                 htb_opmode_tbl   ) \
+X(HWPerfFWFilter,                   UINT64,         ALWAYS,      PVRSRV_APPHINT_HWPERFFWFILTER,                   NULL             ) \
+X(HWPerfHostFilter,                 UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFHOSTFILTER,                 NULL             ) \
+X(HWPerfClientFilter_Services,      UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES,      NULL             ) \
+X(HWPerfClientFilter_EGL,           UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL,           NULL             ) \
+X(HWPerfClientFilter_OpenGLES,      UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES,      NULL             ) \
+X(HWPerfClientFilter_OpenCL,        UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL,        NULL             ) \
+X(HWPerfClientFilter_OpenRL,        UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENRL,        NULL             ) \
+X(CacheOpConfig,                    UINT32,         ALWAYS,      PVRSRV_APPHINT_CACHEOPCONFIG,                    NULL             ) \
+X(TimeCorrClock,                    UINT32List,     ALWAYS,      PVRSRV_APPHINT_TIMECORRCLOCK,                    timecorr_clk_tbl )
+
+/*
+*******************************************************************************
+ Debugfs parameters - device configuration
+******************************************************************************/
+#define APPHINT_LIST_DEBUGFS_DEVICE \
+/* name,                            type,           class,       default,                                         helper,         */ \
+/* Device Firmware config */\
+X(AssertOnHWRTrigger,               BOOL,           ALWAYS,      PVRSRV_APPHINT_ASSERTONHWRTRIGGER,               NULL             ) \
+X(AssertOutOfMemory,                BOOL,           ALWAYS,      PVRSRV_APPHINT_ASSERTOUTOFMEMORY,                NULL             ) \
+X(CheckMList,                       BOOL,           ALWAYS,      PVRSRV_APPHINT_CHECKMLIST,                       NULL             ) \
+X(EnableHWR,                        BOOL,           ALWAYS,      APPHNT_BLDVAR_ENABLEHWR,                         NULL             ) \
+X(EnableLogGroup,                   UINT32Bitfield, ALWAYS,      PVRSRV_APPHINT_ENABLELOGGROUP,                   fwt_loggroup_tbl ) \
+X(FirmwareLogType,                  UINT32List,     ALWAYS,      PVRSRV_APPHINT_FIRMWARELOGTYPE,                  fwt_logtype_tbl  ) \
+/* Device host config */ \
+X(EnableAPM,                        UINT32,         ALWAYS,      PVRSRV_APPHINT_ENABLEAPM,                        NULL             ) \
+X(DisableFEDLogging,                BOOL,           ALWAYS,      PVRSRV_APPHINT_DISABLEFEDLOGGING,                NULL             ) \
+X(ZeroFreelist,                     BOOL,           ALWAYS,      PVRSRV_APPHINT_ZEROFREELIST,                     NULL             ) \
+X(DustRequestInject,                BOOL,           VALIDATION,  PVRSRV_APPHINT_DUSTREQUESTINJECT,                NULL             ) \
+X(DisablePDumpPanic,                BOOL,           PDUMP,       PVRSRV_APPHINT_DISABLEPDUMPPANIC,                NULL             ) \
+X(EnableFWPoisonOnFree,             BOOL,           ALWAYS,      PVRSRV_APPHINT_ENABLEFWPOISONONFREE,             NULL             ) \
+X(FWPoisonOnFreeValue,              UINT32,         ALWAYS,      PVRSRV_APPHINT_FWPOISONONFREEVALUE,              NULL             ) \
+
+/*
+*******************************************************************************
+ * Types used in the APPHINT_LIST_<GROUP> lists must be defined here.
+ * New types require specific handling code to be added
+******************************************************************************/
+#define APPHINT_DATA_TYPE_LIST \
+X(BOOL) \
+X(UINT64) \
+X(UINT32) \
+X(UINT32Bitfield) \
+X(UINT32List) \
+X(STRING)
+
+#define APPHINT_CLASS_LIST \
+X(ALWAYS) \
+X(NEVER) \
+X(DEBUG) \
+X(FWDBGCTRL) \
+X(PDUMP) \
+X(VALIDATION) \
+X(GPUVIRT_VAL)
+
+/*
+*******************************************************************************
+ Visibility control for module parameters
+ These bind build variables to AppHint Visibility Groups.
+******************************************************************************/
+#define APPHINT_ENABLED_CLASS_ALWAYS IMG_TRUE
+#define APPHINT_ENABLED_CLASS_NEVER IMG_FALSE
+#define apphint_modparam_class_ALWAYS(a, b, c) apphint_modparam_enable(a, b, c)
+#if defined(DEBUG)
+	#define APPHINT_ENABLED_CLASS_DEBUG IMG_TRUE
+	#define apphint_modparam_class_DEBUG(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+	#define APPHINT_ENABLED_CLASS_DEBUG IMG_FALSE
+	#define apphint_modparam_class_DEBUG(a, b, c)
+#endif
+#if defined(SUPPORT_FWDBGCTRL)
+	#define APPHINT_ENABLED_CLASS_FWDBGCTRL IMG_TRUE
+	#define apphint_modparam_class_FWDBGCTRL(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+	#define APPHINT_ENABLED_CLASS_FWDBGCTRL IMG_FALSE
+	#define apphint_modparam_class_FWDBGCTRL(a, b, c)
+#endif
+#if defined(PDUMP)
+	#define APPHINT_ENABLED_CLASS_PDUMP IMG_TRUE
+	#define apphint_modparam_class_PDUMP(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+	#define APPHINT_ENABLED_CLASS_PDUMP IMG_FALSE
+	#define apphint_modparam_class_PDUMP(a, b, c)
+#endif
+#if defined(SUPPORT_VALIDATION)
+	#define APPHINT_ENABLED_CLASS_VALIDATION IMG_TRUE
+	#define apphint_modparam_class_VALIDATION(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+	#define APPHINT_ENABLED_CLASS_VALIDATION IMG_FALSE
+	#define apphint_modparam_class_VALIDATION(a, b, c)
+#endif
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	#define APPHINT_ENABLED_CLASS_GPUVIRT_VAL IMG_TRUE
+	#define apphint_modparam_class_GPUVIRT_VAL(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+	#define APPHINT_ENABLED_CLASS_GPUVIRT_VAL IMG_FALSE
+	#define apphint_modparam_class_GPUVIRT_VAL(a, b, c)
+#endif
+
+/*
+*******************************************************************************
+ AppHint defaults based on other build parameters
+******************************************************************************/
+#if defined(HWR_DEFAULT_ENABLED)
+	#define APPHNT_BLDVAR_ENABLEHWR         1
+#else
+	#define APPHNT_BLDVAR_ENABLEHWR         0
+#endif
+#if defined(DEBUG)
+	#define APPHNT_BLDVAR_DEBUG             1
+	#define APPHNT_BLDVAR_DBGDUMPLIMIT      RGXFWIF_HWR_DEBUG_DUMP_ALL
+#else
+	#define APPHNT_BLDVAR_DEBUG             0
+	#define APPHNT_BLDVAR_DBGDUMPLIMIT      1
+#endif
+#if defined(DEBUG) || defined(PDUMP)
+#define APPHNT_BLDVAR_ENABLESIGNATURECHECKS     IMG_TRUE
+#else
+#define APPHNT_BLDVAR_ENABLESIGNATURECHECKS     IMG_FALSE
+#endif
+
+/*
+*******************************************************************************
+
+ Table generated enums
+
+******************************************************************************/
+/* Unique ID for all AppHints */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_ID_ ## a,
+	APPHINT_LIST_ALL
+#undef X
+	APPHINT_ID_MAX
+} APPHINT_ID;
+
+/* ID for build variable Apphints - used for build variable only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_BUILDVAR_ID_ ## a,
+	APPHINT_LIST_BUILDVAR
+#undef X
+	APPHINT_BUILDVAR_ID_MAX
+} APPHINT_BUILDVAR_ID;
+
+/* ID for Modparam Apphints - used for modparam only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_MODPARAM_ID_ ## a,
+	APPHINT_LIST_MODPARAM
+#undef X
+	APPHINT_MODPARAM_ID_MAX
+} APPHINT_MODPARAM_ID;
+
+/* ID for Debugfs Apphints - used for debugfs only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_DEBUGFS_ID_ ## a,
+	APPHINT_LIST_DEBUGFS
+#undef X
+	APPHINT_DEBUGFS_ID_MAX
+} APPHINT_DEBUGFS_ID;
+
+/* ID for Debugfs Device Apphints - used for debugfs device only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_DEBUGFS_DEVICE_ID_ ## a,
+	APPHINT_LIST_DEBUGFS_DEVICE
+#undef X
+	APPHINT_DEBUGFS_DEVICE_ID_MAX
+} APPHINT_DEBUGFS_DEVICE_ID;
+
+/* data types and actions */
+typedef enum {
+	APPHINT_DATA_TYPE_INVALID = 0,
+#define X(a) APPHINT_DATA_TYPE_ ## a,
+	APPHINT_DATA_TYPE_LIST
+#undef X
+	APPHINT_DATA_TYPE_MAX
+} APPHINT_DATA_TYPE;
+
+typedef enum {
+#define X(a) APPHINT_CLASS_ ## a,
+	APPHINT_CLASS_LIST
+#undef X
+	APPHINT_CLASS_MAX
+} APPHINT_CLASS;
+
+#endif /* __KM_APPHINT_DEFS_H__ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/mm_common.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/mm_common.h
new file mode 100644
index 0000000..4de0a19
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/mm_common.h
@@ -0,0 +1,51 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common memory management definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Common memory management definitions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef MM_COMMON_H
+#define MM_COMMON_H
+
+#define DEVICEMEM_HISTORY_TEXT_BUFSZ 40
+#define DEVICEMEM_HISTORY_ALLOC_INDEX_NONE 0xFFFFFFFF
+
+#endif
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/os_cpu_cache.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/os_cpu_cache.h
new file mode 100644
index 0000000..1c47632
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/os_cpu_cache.h
@@ -0,0 +1,73 @@
+/*************************************************************************/ /*!
+@File
+@Title          OS and CPU d-cache maintenance mechanisms
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines for cache management which are visible internally only
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _OS_CPU_CACHE_H_
+#define _OS_CPU_CACHE_H_
+
+#include "info_page_defs.h"
+
+#define PVRSRV_CACHE_OP_GLOBAL				0x4 /*!< Extends cache_ops.h with explicit global flush w/ invalidate */
+#define PVRSRV_CACHE_OP_TIMELINE			0x8 /*!< Request SW_SYNC timeline notification when executed */
+
+#define CACHEFLUSH_ISA_X86					0x1	/*!< x86/x64 specific UM range-based cache flush */
+#define CACHEFLUSH_ISA_ARM64				0x2	/*!< Aarch64 specific UM range-based cache flush */
+#define CACHEFLUSH_ISA_GENERIC				0x3	/*!< Other ISA's without UM range-based cache flush */
+#ifndef CACHEFLUSH_ISA_TYPE
+	#if defined(__i386__) || defined(__x86_64__)
+		#define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_X86
+	#elif defined(__arm64__) || defined(__aarch64__)
+		#define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_ARM64
+	#else
+		#define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_GENERIC
+	#endif
+#endif
+
+#if (CACHEFLUSH_ISA_TYPE == CACHEFLUSH_ISA_X86) || (CACHEFLUSH_ISA_TYPE == CACHEFLUSH_ISA_ARM64)
+#define CACHEFLUSH_ISA_SUPPORTS_UM_FLUSH		/*!< x86/x86_64/ARM64 supports user-mode d-cache flush */
+#endif
+
+#if !defined(__mips__)
+#define CACHEFLUSH_ISA_SUPPORTS_GLOBAL_FLUSH	/*!< MIPS32/64 has no concept of a global d-cache flush */
+#endif
+
+#endif	/* _OS_CPU_CACHE_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/os_srvinit_param.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/os_srvinit_param.h
new file mode 100644
index 0000000..694c415
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/os_srvinit_param.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services initialisation parameters header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services initialisation parameter support for the Linux kernel.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __OS_SRVINIT_PARAM_H__
+#define __OS_SRVINIT_PARAM_H__
+
+#include "km_apphint_defs.h"
+
+#define SrvInitParamOpen() 0
+#define SrvInitParamClose(pvState) ((void)(pvState))
+
+#define	SrvInitParamGetBOOL(state, name, value) \
+	(void) pvr_apphint_get_bool(APPHINT_ID_ ## name, &value)
+
+#define	SrvInitParamGetUINT32(state, name, value) \
+	(void) pvr_apphint_get_uint32(APPHINT_ID_ ## name, &value)
+
+#define	SrvInitParamGetUINT64(state, name, value) \
+	(void) pvr_apphint_get_uint64(APPHINT_ID_ ## name, &value)
+
+#define SrvInitParamGetSTRING(state, name, buffer, size) \
+	(void) pvr_apphint_get_string(APPHINT_ID_ ## name, buffer, size)
+
+#define	SrvInitParamGetUINT32BitField(state, name, value) \
+	(void) pvr_apphint_get_uint32(APPHINT_ID_ ## name, &value)
+
+#define	SrvInitParamGetUINT32List(state, name, value) \
+	(void) pvr_apphint_get_uint32(APPHINT_ID_ ## name, &value)
+
+
+#endif /* __OS_SRVINIT_PARAM_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/pdump.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/pdump.h
new file mode 100644
index 0000000..47e615e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/pdump.h
@@ -0,0 +1,83 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef _SERVICES_PDUMP_H_
+#define _SERVICES_PDUMP_H_
+
+#include "img_types.h"
+#include "services_km.h"
+
+typedef IMG_UINT32 PDUMP_FLAGS_T;
+
+#define PDUMP_FLAGS_NONE            0x00000000UL   /*<! Output this entry with no special treatment i.e. output only if in frame range */
+
+#define PDUMP_FLAGS_DEINIT          0x20000000UL   /*<! Output this entry to the de-initialisation section */
+
+#define PDUMP_FLAGS_POWER           0x08000000UL   /*<! Output this entry even when a power transition is ongoing */
+
+#define PDUMP_FLAGS_CONTINUOUS      PDUMP_CONT     /*<! Output this entry always regardless of framed capture range, 
+                                                          used by client applications being dumped. */
+
+#define PDUMP_FLAGS_PERSISTENT      PDUMP_PERSIST  /*<! Output this entry always regardless of app and range,
+                                                          used by persistent resources created after
+                                                          driver initialisation that must appear in
+                                                          all PDump captures in that session. */
+
+#define PDUMP_FLAGS_DEBUG           0x00010000U    /*<! For internal debugging use */
+
+#define PDUMP_FLAGS_NOHW            0x00000001U    /* For internal use: Skip sending instructions to the hardware */ 
+
+#define PDUMP_FILEOFFSET_FMTSPEC    "0x%08X"
+typedef IMG_UINT32 PDUMP_FILEOFFSET_T;
+
+#define PDUMP_PARAM_CHANNEL_NAME    "ParamChannel2"
+#define PDUMP_SCRIPT_CHANNEL_NAME   "ScriptChannel2"
+
+#define PDUMP_CHANNEL_PARAM         0
+#define PDUMP_CHANNEL_SCRIPT        1
+#define PDUMP_NUM_CHANNELS          2
+
+#define PDUMP_PARAM_0_FILE_NAME     "%%0%%.prm"      /*!< Initial Param filename used in PDump capture */
+#define PDUMP_PARAM_N_FILE_NAME     "%%0%%_%02u.prm" /*!< Param filename used when PRM file split */
+#define PDUMP_PARAM_MAX_FILE_NAME   32               /*!< Max Size of parameter name used in out2.txt */
+
+#define PDUMP_IS_CONTINUOUS(flags) ((flags & PDUMP_FLAGS_CONTINUOUS) != 0)
+
+#endif /* _SERVICES_PDUMP_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/physheap.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/physheap.h
new file mode 100644
index 0000000..76b9cf1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/physheap.h
@@ -0,0 +1,159 @@
+/*************************************************************************/ /*!
+@File
+@Title          Physical heap management header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the interface for the physical heap management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+#ifndef _PHYSHEAP_H_
+#define _PHYSHEAP_H_
+
+typedef struct _PHYS_HEAP_ PHYS_HEAP;
+
+typedef void (*CpuPAddrToDevPAddr)(IMG_HANDLE hPrivData,
+								   IMG_UINT32 ui32NumOfAddr,
+								   IMG_DEV_PHYADDR *psDevPAddr,
+								   IMG_CPU_PHYADDR *psCpuPAddr);
+
+typedef void (*DevPAddrToCpuPAddr)(IMG_HANDLE hPrivData,
+								   IMG_UINT32 ui32NumOfAddr,
+								   IMG_CPU_PHYADDR *psCpuPAddr,
+								   IMG_DEV_PHYADDR *psDevPAddr);
+
+typedef IMG_UINT32 (*GetRegionId)(IMG_HANDLE hPrivData,
+								   PVRSRV_MEMALLOCFLAGS_T uiAllocationFlags);
+
+typedef struct _PHYS_HEAP_FUNCTIONS_
+{
+	/*! Translate CPU physical address to device physical address */
+	CpuPAddrToDevPAddr	pfnCpuPAddrToDevPAddr;
+	/*! Translate device physical address to CPU physical address */
+	DevPAddrToCpuPAddr	pfnDevPAddrToCpuPAddr;
+	/*! Return id of heap region to allocate from */
+	GetRegionId			pfnGetRegionId;
+} PHYS_HEAP_FUNCTIONS;
+
+typedef enum _PHYS_HEAP_TYPE_
+{
+	PHYS_HEAP_TYPE_UNKNOWN = 0,
+	PHYS_HEAP_TYPE_UMA,
+	PHYS_HEAP_TYPE_LMA,
+	PHYS_HEAP_TYPE_DMA,
+#if defined(SUPPORT_WRAP_EXTMEMOBJECT)
+	PHYS_HEAP_TYPE_WRAP,
+#endif
+} PHYS_HEAP_TYPE;
+
+typedef struct _PHYS_HEAP_REGION_
+{
+	IMG_CPU_PHYADDR			sStartAddr;
+	IMG_DEV_PHYADDR			sCardBase;
+	IMG_UINT64				uiSize;
+	IMG_HANDLE				hPrivData;
+	IMG_BOOL				bDynAlloc;
+} PHYS_HEAP_REGION;
+
+typedef struct _PHYS_HEAP_CONFIG_
+{
+	IMG_UINT32				ui32PhysHeapID;
+	PHYS_HEAP_TYPE			eType;
+	IMG_CHAR				*pszPDumpMemspaceName;
+	PHYS_HEAP_FUNCTIONS		*psMemFuncs;
+
+	PHYS_HEAP_REGION		*pasRegions;
+	IMG_UINT32				ui32NumOfRegions;
+
+	IMG_HANDLE				hPrivData;
+} PHYS_HEAP_CONFIG;
+
+PVRSRV_ERROR PhysHeapRegister(PHYS_HEAP_CONFIG *psConfig,
+							  PHYS_HEAP **ppsPhysHeap);
+
+void PhysHeapUnregister(PHYS_HEAP *psPhysHeap);
+
+PVRSRV_ERROR PhysHeapAcquire(IMG_UINT32 ui32PhysHeapID,
+							 PHYS_HEAP **ppsPhysHeap);
+
+void PhysHeapRelease(PHYS_HEAP *psPhysHeap);
+
+PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap);
+
+PVRSRV_ERROR PhysHeapRegionGetCpuPAddr(PHYS_HEAP *psPhysHeap,
+									   IMG_UINT32 ui32RegionId,
+								IMG_CPU_PHYADDR *psCpuPAddr);
+
+
+PVRSRV_ERROR PhysHeapRegionGetSize(PHYS_HEAP *psPhysHeap,
+							IMG_UINT32 ui32RegionId,
+						     IMG_UINT64 *puiSize);
+
+PVRSRV_ERROR PhysHeapRegionGetDevPAddr(PHYS_HEAP *psPhysHeap,
+									   IMG_UINT32 ui32RegionId,
+							 		   IMG_DEV_PHYADDR *psDevPAddr);
+
+PVRSRV_ERROR PhysHeapRegionGetSize(PHYS_HEAP *psPhysHeap,
+								   IMG_UINT32 ui32RegionId,
+						     	   IMG_UINT64 *puiSize);
+
+IMG_UINT32 PhysHeapNumberOfRegions(PHYS_HEAP *psPhysHeap);
+
+void PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap,
+								IMG_UINT32 ui32NumOfAddr,
+								IMG_DEV_PHYADDR *psDevPAddr,
+								IMG_CPU_PHYADDR *psCpuPAddr);
+
+void PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap,
+								IMG_UINT32 ui32NumOfAddr,
+								IMG_CPU_PHYADDR *psCpuPAddr,
+								IMG_DEV_PHYADDR *psDevPAddr);
+
+IMG_UINT32 PhysHeapGetRegionId(PHYS_HEAP *psPhysHeap,
+						PVRSRV_MEMALLOCFLAGS_T uiAllocFlags);
+
+
+IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap);
+
+PVRSRV_ERROR PhysHeapInit(void);
+PVRSRV_ERROR PhysHeapDeinit(void);
+
+#endif /* _PHYSHEAP_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/pvr_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/pvr_bridge.h
new file mode 100644
index 0000000..0953484
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/pvr_bridge.h
@@ -0,0 +1,469 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Bridge Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the PVR Bridge code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PVR_BRIDGE_H__
+#define __PVR_BRIDGE_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "pvrsrv_error.h"
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "common_dc_bridge.h"
+#  if defined(SUPPORT_DCPLAT_BRIDGE)
+#    include "common_dcplat_bridge.h"
+#  endif
+#endif
+#include "common_mm_bridge.h"
+#if defined(SUPPORT_MMPLAT_BRIDGE)
+#include "common_mmplat_bridge.h"
+#endif
+#if defined(SUPPORT_WRAP_EXTMEM)
+#include "common_mmextmem_bridge.h"
+#endif
+#if !defined(EXCLUDE_CMM_BRIDGE)
+#include "common_cmm_bridge.h"
+#endif
+#if defined(LINUX)
+#include "common_dmabuf_bridge.h"
+#endif
+#if defined(PDUMP)
+#include "common_pdump_bridge.h"
+#include "common_pdumpctrl_bridge.h"
+#include "common_pdumpmm_bridge.h"
+#endif
+#include "common_cache_bridge.h"
+#include "common_srvcore_bridge.h"
+#include "common_sync_bridge.h"
+#if defined(SUPPORT_SERVER_SYNC)
+#if defined(SUPPORT_INSECURE_EXPORT)
+#include "common_syncexport_bridge.h"
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "common_syncsexport_bridge.h"
+#endif
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "common_smm_bridge.h"
+#endif
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+#include "common_htbuffer_bridge.h"
+#endif
+#include "common_pvrtl_bridge.h"
+#if defined(PVR_RI_DEBUG)
+#include "common_ri_bridge.h"
+#endif
+
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+#include "common_validation_bridge.h"
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+#include "common_tutils_bridge.h"
+#endif
+
+#if defined(SUPPORT_DEVICEMEMHISTORY_BRIDGE)
+#include "common_devicememhistory_bridge.h"
+#endif
+
+#if defined(SUPPORT_SYNCTRACKING_BRIDGE)
+#include "common_synctracking_bridge.h"
+#endif
+
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#include "common_syncfallback_bridge.h"
+#endif
+
+/* 
+ * Bridge Cmd Ids
+ */
+
+
+/* Note: The pattern
+ *   #define PVRSRV_BRIDGE_FEATURE (PVRSRV_BRIDGE_PREVFEATURE + 1)
+ *   #if defined(SUPPORT_FEATURE)
+ *   #define PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST	(PVRSRV_BRIDGE_PREVFEATURE_DISPATCH_LAST + 1)
+ *   #define PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST	(PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST + PVRSRV_BRIDGE_FEATURE_CMD_LAST)
+ *   #else
+ *   #define PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST	0
+ *   #define PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST	(PVRSRV_BRIDGE_PREVFEATURE_DISPATCH_LAST)
+ *   #endif
+ * is used in the macro definitions below to make PVRSRV_BRIDGE_FEATURE_*
+ * take up no space in the dispatch table if SUPPORT_FEATURE is disabled.
+ *
+ * Note however that a bridge always defines PVRSRV_BRIDGE_FEATURE, even where 
+ * the feature is not enabled (each bridge group retains its own ioctl number).
+ */
+
+#define PVRSRV_BRIDGE_FIRST                                     0UL
+
+/*	 0:	Default handler */
+#define PVRSRV_BRIDGE_DEFAULT					0UL
+#define PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST	0UL
+#define PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST		(PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST)
+/*   1: CORE functions  */
+#define PVRSRV_BRIDGE_SRVCORE					1UL
+#define PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST	(PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST+1)
+#define PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST		(PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST + PVRSRV_BRIDGE_SRVCORE_CMD_LAST)
+
+/*   2: SYNC functions  */
+#define PVRSRV_BRIDGE_SYNC					2UL
+#define PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST	(PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNC_DISPATCH_LAST	(PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNC_CMD_LAST)
+
+/*   3: SYNCEXPORT functions  */
+#define PVRSRV_BRIDGE_SYNCEXPORT			3UL
+#if defined(SUPPORT_INSECURE_EXPORT) && defined(SUPPORT_SERVER_SYNC)
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST	(PVRSRV_BRIDGE_SYNC_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST	(PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCEXPORT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST	(PVRSRV_BRIDGE_SYNC_DISPATCH_LAST)
+#endif
+
+/*   4: SYNCSEXPORT functions  */
+#define PVRSRV_BRIDGE_SYNCSEXPORT		    4UL
+#if defined(SUPPORT_SECURE_EXPORT) && defined(SUPPORT_SERVER_SYNC)
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST	 (PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCSEXPORT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST	 (PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST)
+#endif
+
+/*   5: PDUMP CTRL layer functions*/
+#define PVRSRV_BRIDGE_PDUMPCTRL				5UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST	(PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST	(PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMPCTRL_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST	0
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST	(PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST)
+#endif
+
+/*   6: Memory Management functions */
+#define PVRSRV_BRIDGE_MM      				6UL
+#define PVRSRV_BRIDGE_MM_DISPATCH_FIRST		(PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_MM_DISPATCH_LAST		(PVRSRV_BRIDGE_MM_DISPATCH_FIRST + PVRSRV_BRIDGE_MM_CMD_LAST)
+
+/*   7: Non-Linux Memory Management functions */
+#define PVRSRV_BRIDGE_MMPLAT          		7UL
+#if defined(SUPPORT_MMPLAT_BRIDGE)
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST	(PVRSRV_BRIDGE_MM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST	(PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST + PVRSRV_BRIDGE_MMPLAT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST	(PVRSRV_BRIDGE_MM_DISPATCH_LAST)
+#endif
+
+/*   8: Context Memory Management functions */
+#define PVRSRV_BRIDGE_CMM      				8UL
+#if !defined(EXCLUDE_CMM_BRIDGE)
+#define PVRSRV_BRIDGE_CMM_DISPATCH_FIRST	(PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_CMM_DISPATCH_LAST		(PVRSRV_BRIDGE_CMM_DISPATCH_FIRST + PVRSRV_BRIDGE_CMM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_CMM_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_CMM_DISPATCH_LAST	 (PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST)
+#endif
+
+/*   9: PDUMP Memory Management functions */
+#define PVRSRV_BRIDGE_PDUMPMM      			9UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST (PVRSRV_BRIDGE_CMM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST	 (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMPMM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST	 (PVRSRV_BRIDGE_CMM_DISPATCH_LAST)
+#endif
+
+/*   10: PDUMP functions */
+#define PVRSRV_BRIDGE_PDUMP      			10UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST	(PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMP_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST	(PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST)
+#endif
+
+/*  11: DMABUF functions */
+#define PVRSRV_BRIDGE_DMABUF					11UL
+#if defined(LINUX)
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST	(PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST + PVRSRV_BRIDGE_DMABUF_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST	(PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST)
+#endif
+
+/*  12: Display Class functions */
+#define PVRSRV_BRIDGE_DC						12UL
+#if defined(SUPPORT_DISPLAY_CLASS)
+#define PVRSRV_BRIDGE_DC_DISPATCH_FIRST     (PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DC_DISPATCH_LAST		(PVRSRV_BRIDGE_DC_DISPATCH_FIRST + PVRSRV_BRIDGE_DC_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DC_DISPATCH_FIRST     0
+#define PVRSRV_BRIDGE_DC_DISPATCH_LAST		(PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST)
+#endif
+
+/*  13: Cache interface functions */
+#define PVRSRV_BRIDGE_CACHE					13UL
+#define PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST (PVRSRV_BRIDGE_DC_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_CACHE_DISPATCH_LAST  (PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST + PVRSRV_BRIDGE_CACHE_CMD_LAST)
+
+/*  14: Secure Memory Management functions*/
+#define PVRSRV_BRIDGE_SMM					14UL
+#if defined(SUPPORT_SECURE_EXPORT)
+#define PVRSRV_BRIDGE_SMM_DISPATCH_FIRST    (PVRSRV_BRIDGE_CACHE_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SMM_DISPATCH_LAST  	(PVRSRV_BRIDGE_SMM_DISPATCH_FIRST + PVRSRV_BRIDGE_SMM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SMM_DISPATCH_FIRST   0
+#define PVRSRV_BRIDGE_SMM_DISPATCH_LAST  	(PVRSRV_BRIDGE_CACHE_DISPATCH_LAST)
+#endif
+
+/*  15: Transport Layer interface functions */
+#define PVRSRV_BRIDGE_PVRTL					15UL
+#define PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST  (PVRSRV_BRIDGE_SMM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST  	(PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST + PVRSRV_BRIDGE_PVRTL_CMD_LAST)
+
+/*  16: Resource Information (RI) interface functions */
+#define PVRSRV_BRIDGE_RI						16UL
+#if defined(PVR_RI_DEBUG)
+#define PVRSRV_BRIDGE_RI_DISPATCH_FIRST     (PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RI_DISPATCH_LAST  	(PVRSRV_BRIDGE_RI_DISPATCH_FIRST + PVRSRV_BRIDGE_RI_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_RI_DISPATCH_FIRST     0
+#define PVRSRV_BRIDGE_RI_DISPATCH_LAST  	(PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST)
+#endif
+
+/*  17: Validation interface functions */
+#define PVRSRV_BRIDGE_VALIDATION				17UL
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST (PVRSRV_BRIDGE_RI_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST  (PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST + PVRSRV_BRIDGE_VALIDATION_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST 0 
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST  (PVRSRV_BRIDGE_RI_DISPATCH_LAST)
+#endif
+
+/*  18: TUTILS interface functions */
+#define PVRSRV_BRIDGE_TUTILS					18UL
+#if defined(PVR_TESTING_UTILS)
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST  (PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST + PVRSRV_BRIDGE_TUTILS_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST  (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST)
+#endif
+
+/*  19: DevMem history interface functions */
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY		19UL
+#if defined(SUPPORT_DEVICEMEMHISTORY_BRIDGE)
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST (PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST  (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST + PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST  (PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST)
+#endif
+
+/*  20: Host Trace Buffer interface functions */
+#define PVRSRV_BRIDGE_HTBUFFER                 20UL
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST  (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST   (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST + PVRSRV_BRIDGE_HTBUFFER_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST  (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST)
+#endif
+
+/*  21: Non-Linux Display functions */
+#define PVRSRV_BRIDGE_DCPLAT          		21UL
+#if defined(SUPPORT_DISPLAY_CLASS) && defined (SUPPORT_DCPLAT_BRIDGE)
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST	(PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST	(PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST + PVRSRV_BRIDGE_DCPLAT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST	(PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST)
+#endif
+
+/*  22: Extmem functions */
+#define PVRSRV_BRIDGE_MMEXTMEM				   22UL
+#if defined(SUPPORT_WRAP_EXTMEM)
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST  (PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST + PVRSRV_BRIDGE_MMEXTMEM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST)
+#endif
+
+/*  23: Sync tracking functions */
+#define PVRSRV_BRIDGE_SYNCTRACKING				   23UL
+#if defined(SUPPORT_SYNCTRACKING_BRIDGE)
+#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST (PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST  (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCTRACKING_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST (PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST)
+#endif
+
+/*  24: Sync tracking functions */
+#define PVRSRV_BRIDGE_SYNCFALLBACK				   24UL
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST  (PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCFALLBACK_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST)
+#endif
+
+/* NB PVRSRV_BRIDGE_LAST below must be the last bridge group defined above (PVRSRV_BRIDGE_FEATURE) */
+#define PVRSRV_BRIDGE_LAST       			(PVRSRV_BRIDGE_SYNCFALLBACK)
+/* NB PVRSRV_BRIDGE_DISPATCH LAST below must be the last dispatch entry defined above (PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST) */
+#define PVRSRV_BRIDGE_DISPATCH_LAST			(PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST)
+
+/* bit mask representing the enabled PVR bridges */
+
+static const IMG_UINT32 gui32PVRBridges =
+	  (1U << (PVRSRV_BRIDGE_DEFAULT - PVRSRV_BRIDGE_FIRST))
+	| (1U << (PVRSRV_BRIDGE_SRVCORE - PVRSRV_BRIDGE_FIRST))
+	| (1U << (PVRSRV_BRIDGE_SYNC - PVRSRV_BRIDGE_FIRST))
+#if defined(SUPPORT_INSECURE_EXPORT) && defined(SUPPORT_SERVER_SYNC)
+	| (1U << (PVRSRV_BRIDGE_SYNCEXPORT - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_SECURE_EXPORT) && defined(SUPPORT_SERVER_SYNC)
+	| (1U << (PVRSRV_BRIDGE_SYNCSEXPORT - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(PDUMP)
+	| (1U << (PVRSRV_BRIDGE_PDUMPCTRL - PVRSRV_BRIDGE_FIRST))
+#endif
+	| (1U << (PVRSRV_BRIDGE_MM - PVRSRV_BRIDGE_FIRST))
+#if defined(SUPPORT_MMPLAT_BRIDGE)
+	| (1U << (PVRSRV_BRIDGE_MMPLAT - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_CMM)
+	| (1U << (PVRSRV_BRIDGE_CMM - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(PDUMP)
+	| (1U << (PVRSRV_BRIDGE_PDUMPMM - PVRSRV_BRIDGE_FIRST))
+	| (1U << (PVRSRV_BRIDGE_PDUMP - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(LINUX)
+	| (1U << (PVRSRV_BRIDGE_DMABUF - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_DISPLAY_CLASS)
+	| (1U << (PVRSRV_BRIDGE_DC - PVRSRV_BRIDGE_FIRST))
+#endif
+	| (1U << (PVRSRV_BRIDGE_CACHE - PVRSRV_BRIDGE_FIRST))
+#if defined(SUPPORT_SECURE_EXPORT)
+	| (1U << (PVRSRV_BRIDGE_SMM - PVRSRV_BRIDGE_FIRST))
+#endif
+	| (1U << (PVRSRV_BRIDGE_PVRTL - PVRSRV_BRIDGE_FIRST))
+#if defined(PVR_RI_DEBUG)
+	| (1U << (PVRSRV_BRIDGE_RI - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_VALIDATION)
+	| (1U << (PVRSRV_BRIDGE_VALIDATION - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(PVR_TESTING_UTILS)
+	| (1U << (PVRSRV_BRIDGE_TUTILS - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_DEVICEMEMHISTORY_BRIDGE)
+	| (1U << (PVRSRV_BRIDGE_DEVICEMEMHISTORY - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_HTBUFFER)
+	| (1U << (PVRSRV_BRIDGE_HTBUFFER - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_DISPLAY_CLASS) && defined (SUPPORT_DCPLAT_BRIDGE)
+	| (1U << (PVRSRV_BRIDGE_DCPLAT - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_WRAP_EXTMEM)
+	| (1U << (PVRSRV_BRIDGE_MMEXTMEM - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_SYNCTRACKING_BRIDGE)
+	| (1U << (PVRSRV_BRIDGE_SYNCTRACKING - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	| (1U << (PVRSRV_BRIDGE_SYNCFALLBACK - PVRSRV_BRIDGE_FIRST))
+#endif
+	;
+
+/* bit field representing which PVR bridge groups may optionally not
+ * be present in the server
+ */
+#define PVR_BRIDGES_OPTIONAL \
+	( \
+		(1U << (PVRSRV_BRIDGE_RI - PVRSRV_BRIDGE_FIRST)) | \
+		(1U << (PVRSRV_BRIDGE_DEVICEMEMHISTORY - PVRSRV_BRIDGE_FIRST)) | \
+		(1U << (PVRSRV_BRIDGE_SYNCTRACKING - PVRSRV_BRIDGE_FIRST)) \
+	)
+
+/******************************************************************************
+ * Generic bridge structures
+ *****************************************************************************/
+
+
+/******************************************************************************
+ *	bridge packaging structure
+ *****************************************************************************/
+typedef struct PVRSRV_BRIDGE_PACKAGE_TAG
+{
+	IMG_UINT32				ui32BridgeID;			/*!< ioctl bridge group */
+	IMG_UINT32				ui32FunctionID;         /*!< ioctl function index */
+	IMG_UINT32				ui32Size;				/*!< size of structure */
+	void					*pvParamIn;				/*!< input data buffer */
+	IMG_UINT32				ui32InBufferSize;		/*!< size of input data buffer */
+	void					*pvParamOut;			/*!< output data buffer */
+	IMG_UINT32				ui32OutBufferSize;		/*!< size of output data buffer */
+}PVRSRV_BRIDGE_PACKAGE;
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __PVR_BRIDGE_H__ */
+
+/******************************************************************************
+ End of file (pvr_bridge.h)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_bridge.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_bridge.h
new file mode 100644
index 0000000..e47515e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_bridge.h
@@ -0,0 +1,235 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Bridge Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the rgx Bridge code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGX_BRIDGE_H__)
+#define __RGX_BRIDGE_H__
+
+#include "pvr_bridge.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "rgx_fwif.h"
+
+#define RGXFWINITPARAMS_VERSION   1
+#define RGXFWINITPARAMS_EXTENSION 128
+
+#include "common_rgxta3d_bridge.h"
+#include "common_rgxcmp_bridge.h"
+
+#include "common_rgxtq2_bridge.h"
+#include "common_rgxtq_bridge.h"
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+#include "common_breakpoint_bridge.h"
+#endif
+#include "common_debugmisc_bridge.h"
+#if defined(PDUMP)
+#include "common_rgxpdump_bridge.h"
+#endif
+#include "common_rgxhwperf_bridge.h"
+#include "common_rgxray_bridge.h"
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+#include "common_regconfig_bridge.h"
+#endif
+#include "common_timerquery_bridge.h"
+#include "common_rgxkicksync_bridge.h"
+
+#include "common_rgxsignals_bridge.h"
+
+
+/*
+ * Bridge Cmd Ids
+ */
+
+/* *REMEMBER* to update PVRSRV_BRIDGE_RGX_LAST if you add/remove a bridge
+ * group!
+ * Also you need to ensure all PVRSRV_BRIDGE_RGX_xxx_DISPATCH_FIRST
+ * offsets follow on from the previous bridge group's commands!
+ *
+ * If a bridge group is optional, ensure you *ALWAYS* define its index
+ * (e.g. PVRSRV_BRIDGE_RGXCMP is always 151, even is the feature is
+ * not defined). If an optional bridge group is not defined you must
+ * still define PVRSRV_BRIDGE_RGX_xxx_DISPATCH_FIRST for it with an
+ * assigned value of 0.
+ */
+
+/* The RGX bridge groups start at 128 (PVRSRV_BRIDGE_RGX_FIRST) rather than follow-on from the other
+ * non-device bridge groups (meaning that they then won't be displaced if
+ * other non-device bridge groups are added)
+ */
+
+#define PVRSRV_BRIDGE_RGX_FIRST                  128UL
+
+/* 128: RGX TQ interface functions */
+#define PVRSRV_BRIDGE_RGXTQ                      128UL
+#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST       (PVRSRV_BRIDGE_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST        (PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTQ_CMD_LAST)
+
+
+/* 129: RGX Compute interface functions */
+#define PVRSRV_BRIDGE_RGXCMP                     129UL
+#	define PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST   (PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST + 1)
+#	define PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST    (PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXCMP_CMD_LAST)
+
+
+/* 130: RGX TA/3D interface functions */
+#define PVRSRV_BRIDGE_RGXTA3D                    130UL
+#define PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST     (PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST +1)
+#define PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST      (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTA3D_CMD_LAST)
+
+/* 131: RGX Breakpoint interface functions */
+#define PVRSRV_BRIDGE_BREAKPOINT                 131UL
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+#define PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_FIRST  (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST   (PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_FIRST + PVRSRV_BRIDGE_BREAKPOINT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_FIRST  0
+#define PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST   (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST)
+#endif
+
+/* 132: RGX Debug/Misc interface functions */
+#define PVRSRV_BRIDGE_DEBUGMISC                  132UL
+#define PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_FIRST   (PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST    (PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_FIRST + PVRSRV_BRIDGE_DEBUGMISC_CMD_LAST)
+
+/* 133: RGX PDump interface functions */
+#define PVRSRV_BRIDGE_RGXPDUMP                   133UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST    (PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST +1)
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST     (PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXPDUMP_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST    0
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST     (PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST)
+#endif
+
+/* 134: RGX HWPerf interface functions */
+#define PVRSRV_BRIDGE_RGXHWPERF                  134UL
+#define PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST   (PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST    (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST)
+
+/* 135: RGX Ray Tracing interface functions */
+#define PVRSRV_BRIDGE_RGXRAY                     135UL
+#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST      (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST       (PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXRAY_CMD_LAST)
+
+/* 136: RGX Register Configuration interface functions */
+#define PVRSRV_BRIDGE_REGCONFIG                  136UL
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+#define PVRSRV_BRIDGE_REGCONFIG_DISPATCH_FIRST   (PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST    (PVRSRV_BRIDGE_REGCONFIG_DISPATCH_FIRST + PVRSRV_BRIDGE_REGCONFIG_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_REGCONFIG_DISPATCH_FIRST   0
+#define PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST    (PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST)
+#endif
+
+/* 137: RGX Timer Query interface functions */
+#define PVRSRV_BRIDGE_TIMERQUERY                 137UL
+#define PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_FIRST  (PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_LAST   (PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_FIRST + PVRSRV_BRIDGE_TIMERQUERY_CMD_LAST)
+
+/* 138: RGX kicksync interface */
+#define PVRSRV_BRIDGE_RGXKICKSYNC                138UL
+#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST (PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST  (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST)
+
+/* 139: RGX signals interface */
+#define PVRSRV_BRIDGE_RGXSIGNALS                139UL
+#define PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_LAST  (PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXSIGNALS_CMD_LAST)
+
+
+#define PVRSRV_BRIDGE_RGXTQ2                      140UL
+#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST       (PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST        (PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTQ2_CMD_LAST)
+
+#define PVRSRV_BRIDGE_RGX_LAST                   (PVRSRV_BRIDGE_RGXTQ2)
+#define PVRSRV_BRIDGE_RGX_DISPATCH_LAST          (PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST)
+
+/* bit mask representing the enabled RGX bridges */
+
+static const IMG_UINT32 gui32RGXBridges =
+	  (1U << (PVRSRV_BRIDGE_RGXTQ - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(RGX_FEATURE_COMPUTE)
+	| (1U << (PVRSRV_BRIDGE_RGXCMP - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+	| (1U << (PVRSRV_BRIDGE_RGXTA3D - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(SUPPORT_BREAKPOINT)
+	| (1U << (PVRSRV_BRIDGE_BREAKPOINT - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+#if defined(SUPPORT_DEBUGMISC)
+	| (1U << (PVRSRV_BRIDGE_DEBUGMISC - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+#if defined(PDUMP)
+	| (1U << (PVRSRV_BRIDGE_RGXPDUMP - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+	| (1U << (PVRSRV_BRIDGE_RGXHWPERF - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(RGX_FEATURE_RAY_TRACING)
+	| (1U << (PVRSRV_BRIDGE_RGXRAY - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+#if defined(SUPPORT_REGCONFIG)
+	| (1U << (PVRSRV_BRIDGE_REGCONFIG - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+#if defined(SUPPORT_TIMERQUERY)
+	| (1U << (PVRSRV_BRIDGE_TIMERQUERY - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+	| (1U << (PVRSRV_BRIDGE_RGXKICKSYNC - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(RGX_FEATURE_SIGNAL_SNOOPING)
+	| (1U << (PVRSRV_BRIDGE_RGXSIGNALS - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+	| (1U << (PVRSRV_BRIDGE_RGXTQ2 - PVRSRV_BRIDGE_RGX_FIRST));
+
+/* bit field representing which RGX bridge groups may optionally not
+ * be present in the server
+ */
+
+#define RGX_BRIDGES_OPTIONAL \
+	( \
+		0 /* no RGX bridges are currently optional */ \
+	)
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __RGX_BRIDGE_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif.h
new file mode 100644
index 0000000..203a911
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif.h
@@ -0,0 +1,538 @@
+/*************************************************************************/ /*!
+@File			rgx_fwif.h
+@Title          RGX firmware interface structures
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX firmware interface structures used by srvinit and server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_FWIF_H__)
+#define __RGX_FWIF_H__
+
+#include "rgx_firmware_processor.h"
+#include "rgx_fwif_shared.h"
+
+/*************************************************************************/ /*!
+ Logging type
+*/ /**************************************************************************/
+#define RGXFWIF_LOG_TYPE_NONE			0x00000000
+#define RGXFWIF_LOG_TYPE_TRACE			0x00000001
+#define RGXFWIF_LOG_TYPE_GROUP_MAIN		0x00000002
+#define RGXFWIF_LOG_TYPE_GROUP_MTS		0x00000004
+#define RGXFWIF_LOG_TYPE_GROUP_CLEANUP	0x00000008
+#define RGXFWIF_LOG_TYPE_GROUP_CSW		0x00000010
+#define RGXFWIF_LOG_TYPE_GROUP_BIF		0x00000020
+#define RGXFWIF_LOG_TYPE_GROUP_PM		0x00000040
+#define RGXFWIF_LOG_TYPE_GROUP_RTD		0x00000080
+#define RGXFWIF_LOG_TYPE_GROUP_SPM		0x00000100
+#define RGXFWIF_LOG_TYPE_GROUP_POW		0x00000200
+#define RGXFWIF_LOG_TYPE_GROUP_HWR		0x00000400
+#define RGXFWIF_LOG_TYPE_GROUP_HWP		0x00000800
+#define RGXFWIF_LOG_TYPE_GROUP_RPM		0x00001000
+#define RGXFWIF_LOG_TYPE_GROUP_DMA		0x00002000
+#define RGXFWIF_LOG_TYPE_GROUP_DEBUG	0x80000000
+#define RGXFWIF_LOG_TYPE_GROUP_MASK		0x80003FFE
+#define RGXFWIF_LOG_TYPE_MASK			0x80003FFF
+
+/* String used in pvrdebug -h output */
+#define RGXFWIF_LOG_GROUPS_STRING_LIST   "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,debug"
+
+/* Table entry to map log group strings to log type value */
+typedef struct {
+	const IMG_CHAR* pszLogGroupName;
+	IMG_UINT32      ui32LogGroupType;
+} RGXFWIF_LOG_GROUP_MAP_ENTRY;
+
+/*
+  Macro for use with the RGXFWIF_LOG_GROUP_MAP_ENTRY type to create a lookup
+  table where needed. Keep log group names short, no more than 20 chars.
+*/
+#define RGXFWIF_LOG_GROUP_NAME_VALUE_MAP { "none",    RGXFWIF_LOG_TYPE_NONE }, \
+                                         { "main",    RGXFWIF_LOG_TYPE_GROUP_MAIN }, \
+                                         { "mts",     RGXFWIF_LOG_TYPE_GROUP_MTS }, \
+                                         { "cleanup", RGXFWIF_LOG_TYPE_GROUP_CLEANUP }, \
+                                         { "csw",     RGXFWIF_LOG_TYPE_GROUP_CSW }, \
+                                         { "bif",     RGXFWIF_LOG_TYPE_GROUP_BIF }, \
+                                         { "pm",      RGXFWIF_LOG_TYPE_GROUP_PM }, \
+                                         { "rtd",     RGXFWIF_LOG_TYPE_GROUP_RTD }, \
+                                         { "spm",     RGXFWIF_LOG_TYPE_GROUP_SPM }, \
+                                         { "pow",     RGXFWIF_LOG_TYPE_GROUP_POW }, \
+                                         { "hwr",     RGXFWIF_LOG_TYPE_GROUP_HWR }, \
+                                         { "hwp",     RGXFWIF_LOG_TYPE_GROUP_HWP }, \
+                                         { "rpm",     RGXFWIF_LOG_TYPE_GROUP_RPM }, \
+                                         { "dma",     RGXFWIF_LOG_TYPE_GROUP_DMA }, \
+                                         { "debug",   RGXFWIF_LOG_TYPE_GROUP_DEBUG }
+
+
+/* Used in print statements to display log group state, one %s per group defined */
+#define RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC  "%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
+
+/* Used in a print statement to display log group state, one per group */
+#define RGXFWIF_LOG_ENABLED_GROUPS_LIST(types)  (((types) & RGXFWIF_LOG_TYPE_GROUP_MAIN)	?("main ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_MTS)		?("mts ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_CLEANUP)	?("cleanup ")	:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_CSW)		?("csw ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_BIF)		?("bif ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_PM)		?("pm ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_RTD)		?("rtd ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_SPM)		?("spm ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_POW)		?("pow ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_HWR)		?("hwr ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_HWP)		?("hwp ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_RPM)		?("rpm ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_DMA)		?("dma ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_DEBUG)	?("debug ")		:(""))
+
+
+/*! Logging function */
+typedef void (*PFN_RGXFW_LOG) (const IMG_CHAR* pszFmt, ...);
+
+
+/************************************************************************
+* RGX FW signature checks
+************************************************************************/
+#define RGXFW_SIG_BUFFER_SIZE_MIN       (1024)
+
+/*!
+ ******************************************************************************
+ * HWPERF
+ *****************************************************************************/
+/* Size of the Firmware L1 HWPERF buffer in bytes (2MB). Accessed by the
+ * Firmware and host driver. */
+#define RGXFW_HWPERF_L1_SIZE_MIN        (16U)
+#define RGXFW_HWPERF_L1_SIZE_DEFAULT    PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB
+#define RGXFW_HWPERF_L1_SIZE_MAX        (12288U)
+
+/* This padding value must always be large enough to hold the biggest
+ * variable sized packet. */
+#define RGXFW_HWPERF_L1_PADDING_DEFAULT (RGX_HWPERF_MAX_PACKET_SIZE)
+
+
+/*!
+ ******************************************************************************
+ * Trace Buffer
+ *****************************************************************************/
+
+/*! Number of elements on each line when dumping the trace buffer */
+#define RGXFW_TRACE_BUFFER_LINESIZE	(30)
+
+/*! Total size of RGXFWIF_TRACEBUF dword (needs to be a multiple of RGXFW_TRACE_BUFFER_LINESIZE) */
+#define RGXFW_TRACE_BUFFER_SIZE		(400*RGXFW_TRACE_BUFFER_LINESIZE)
+#define RGXFW_TRACE_BUFFER_ASSERT_SIZE 200
+#if defined(RGXFW_META_SUPPORT_2ND_THREAD)
+#define RGXFW_THREAD_NUM 2
+#else
+#define RGXFW_THREAD_NUM 1
+#endif
+
+#define RGXFW_POLL_TYPE_SET 0x80000000
+
+typedef struct _RGXFWIF_ASSERTBUF_
+{
+	IMG_CHAR	szPath[RGXFW_TRACE_BUFFER_ASSERT_SIZE];
+	IMG_CHAR	szInfo[RGXFW_TRACE_BUFFER_ASSERT_SIZE];
+	IMG_UINT32	ui32LineNum;
+} UNCACHED_ALIGN RGXFWIF_ASSERTBUF;
+
+typedef struct _RGXFWIF_TRACEBUF_SPACE_
+{
+	IMG_UINT32			ui32TracePointer;
+
+#if defined (RGX_FIRMWARE)
+	IMG_UINT32 *pui32RGXFWIfTraceBuffer;		/* To be used by firmware for writing into trace buffer */
+#else
+	RGXFWIF_DEV_VIRTADDR pui32RGXFWIfTraceBuffer;
+#endif
+	IMG_PUINT32             pui32TraceBuffer;	/* To be used by host when reading from trace buffer */
+
+	RGXFWIF_ASSERTBUF	sAssertBuf;
+} UNCACHED_ALIGN RGXFWIF_TRACEBUF_SPACE;
+
+#define RGXFWIF_POW_STATES \
+  X(RGXFWIF_POW_OFF)			/* idle and handshaked with the host (ready to full power down) */ \
+  X(RGXFWIF_POW_ON)				/* running HW mds */ \
+  X(RGXFWIF_POW_FORCED_IDLE)	/* forced idle */ \
+  X(RGXFWIF_POW_IDLE)			/* idle waiting for host handshake */
+
+typedef enum _RGXFWIF_POW_STATE_
+{
+#define X(NAME) NAME,
+	RGXFWIF_POW_STATES
+#undef X
+} RGXFWIF_POW_STATE;
+
+/* Firmware HWR states */
+#define RGXFWIF_HWR_HARDWARE_OK		(0x1 << 0)	/*!< Tells if the HW state is ok or locked up */
+#define RGXFWIF_HWR_ANALYSIS_DONE	(0x1 << 2)	/*!< Tells if the analysis of a GPU lockup has already been performed */
+#define RGXFWIF_HWR_GENERAL_LOCKUP	(0x1 << 3)	/*!< Tells if a DM unrelated lockup has been detected */
+#define RGXFWIF_HWR_DM_RUNNING_OK	(0x1 << 4)	/*!< Tells if at least one DM is running without being close to a lockup */
+#define RGXFWIF_HWR_DM_STALLING		(0x1 << 5)	/*!< Tells if at least one DM is close to lockup */
+typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS;
+
+/* Firmware per-DM HWR states */
+#define RGXFWIF_DM_STATE_WORKING 					(0x00)		/*!< DM is working if all flags are cleared */
+#define RGXFWIF_DM_STATE_READY_FOR_HWR 				(0x1 << 0)	/*!< DM is idle and ready for HWR */
+#define RGXFWIF_DM_STATE_NEEDS_SKIP					(0x1 << 2)	/*!< DM need to skip to next cmd before resuming processing */
+#define RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP			(0x1 << 3)	/*!< DM need partial render cleanup before resuming processing */
+#define RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR			(0x1 << 4)	/*!< DM need to increment Recovery Count once fully recovered */
+#define RGXFWIF_DM_STATE_GUILTY_LOCKUP				(0x1 << 5)	/*!< DM was identified as locking up and causing HWR */
+#define RGXFWIF_DM_STATE_INNOCENT_LOCKUP			(0x1 << 6)	/*!< DM was innocently affected by another lockup which caused HWR */
+#define RGXFWIF_DM_STATE_GUILTY_OVERRUNING			(0x1 << 7)	/*!< DM was identified as over-running and causing HWR */
+#define RGXFWIF_DM_STATE_INNOCENT_OVERRUNING		(0x1 << 8)	/*!< DM was innocently affected by another DM over-running which caused HWR */
+
+/* Per-OSid States */
+#define RGXFW_OS_STATE_ACTIVE_OS						(1 << 0)    /*!< Non active operating systems should not be served by the FW */
+#define RGXFW_OS_STATE_FREELIST_OK						(1 << 1)    /*!< Pending freelist reconstruction from that particular OS */
+#define RGXFW_OS_STATE_OFFLOADING						(1 << 2)    /*!< Transient state while all the OS resources in the FW are cleaned up */
+#define RGXFW_OS_STATE_GROW_REQUEST_PENDING				(1 << 3)    /*!< Signifies whether a request to grow a freelist is pending completion */
+
+typedef IMG_UINT32 RGXFWIF_HWR_RECOVERYFLAGS;
+
+typedef struct _RGXFWIF_TRACEBUF_
+{
+    IMG_UINT32				ui32LogType;
+	volatile RGXFWIF_POW_STATE		ePowState;
+	RGXFWIF_TRACEBUF_SPACE	sTraceBuf[RGXFW_THREAD_NUM];
+
+	IMG_UINT32				aui32HwrDmLockedUpCount[RGXFWIF_DM_DEFAULT_MAX];
+	IMG_UINT32				aui32HwrDmOverranCount[RGXFWIF_DM_DEFAULT_MAX];
+	IMG_UINT32				aui32HwrDmRecoveredCount[RGXFWIF_DM_DEFAULT_MAX];
+	IMG_UINT32				aui32HwrDmFalseDetectCount[RGXFWIF_DM_DEFAULT_MAX];
+	IMG_UINT32				ui32HwrCounter;
+
+	IMG_UINT32				aui32CrPollAddr[RGXFW_THREAD_NUM];
+	IMG_UINT32				aui32CrPollMask[RGXFW_THREAD_NUM];
+
+	RGXFWIF_HWR_STATEFLAGS		ui32HWRStateFlags;
+	RGXFWIF_HWR_RECOVERYFLAGS	aui32HWRRecoveryFlags[RGXFWIF_DM_DEFAULT_MAX];
+
+	volatile IMG_UINT32		ui32HWPerfRIdx;
+	volatile IMG_UINT32		ui32HWPerfWIdx;
+	volatile IMG_UINT32		ui32HWPerfWrapCount;
+	IMG_UINT32				ui32HWPerfSize;       /* Constant after setup, needed in FW */
+	IMG_UINT32				ui32HWPerfDropCount;  /* The number of times the FW drops a packet due to buffer full */
+
+	/* These next three items are only valid at runtime when the FW is built
+	 * with RGX_HWPERF_UTILIZATION & RGX_HWPERF_DROP_TRACKING defined
+	 * in rgxfw_hwperf.c */
+	IMG_UINT32				ui32HWPerfUt;         /* Buffer utilisation, high watermark of bytes in use */
+	IMG_UINT32				ui32FirstDropOrdinal;/* The ordinal of the first packet the FW dropped */
+	IMG_UINT32              ui32LastDropOrdinal; /* The ordinal of the last packet the FW dropped */
+
+	volatile IMG_UINT32			aui32InterruptCount[RGXFW_THREAD_NUM]; /*!< Interrupt count from Threads > */
+	IMG_UINT32				ui32KCCBCmdsExecuted;
+	IMG_UINT64 RGXFW_ALIGN			ui64StartIdleTime;
+	IMG_UINT32				ui32PowMonEnergy;	/* Non-volatile power monitor energy count */
+
+#define RGXFWIF_MAX_PCX 16
+	IMG_UINT32				ui32T1PCX[RGXFWIF_MAX_PCX];
+	IMG_UINT32				ui32T1PCXWOff;
+
+	IMG_UINT32                  ui32OSStateFlags[RGXFW_NUM_OS];		/*!< State flags for each Operating System > */
+
+	IMG_UINT32				ui32MMUFlushCounter;
+} UNCACHED_ALIGN RGXFWIF_TRACEBUF;
+
+
+/*!
+ ******************************************************************************
+ * GPU Utilisation
+ *****************************************************************************/
+#define RGXFWIF_GPU_STATS_MAX_VALUE_OF_STATE  10000
+
+#define RGXFWIF_GPU_UTIL_STATE_ACTIVE_LOW     (0U)
+#define RGXFWIF_GPU_UTIL_STATE_IDLE           (1U)
+#define RGXFWIF_GPU_UTIL_STATE_ACTIVE_HIGH    (2U)
+#define RGXFWIF_GPU_UTIL_STATE_BLOCKED        (3U)
+#define RGXFWIF_GPU_UTIL_STATE_NUM            (4U)
+
+#define RGXFWIF_GPU_UTIL_TIME_MASK            IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)
+#define RGXFWIF_GPU_UTIL_STATE_MASK           IMG_UINT64_C(0x0000000000000003)
+
+#define RGXFWIF_GPU_UTIL_GET_TIME(word)       ((word) & RGXFWIF_GPU_UTIL_TIME_MASK)
+#define RGXFWIF_GPU_UTIL_GET_STATE(word)      ((word) & RGXFWIF_GPU_UTIL_STATE_MASK)
+
+/* The OS timestamps computed by the FW are approximations of the real time,
+ * which means they could be slightly behind or ahead the real timer on the Host.
+ * In some cases we can perform subtractions between FW approximated
+ * timestamps and real OS timestamps, so we need a form of protection against
+ * negative results if for instance the FW one is a bit ahead of time.
+ */
+#define RGXFWIF_GPU_UTIL_GET_PERIOD(newtime,oldtime) \
+	((newtime) > (oldtime) ? ((newtime) - (oldtime)) : 0)
+
+#define RGXFWIF_GPU_UTIL_MAKE_WORD(time,state) \
+	(RGXFWIF_GPU_UTIL_GET_TIME(time) | RGXFWIF_GPU_UTIL_GET_STATE(state))
+
+
+/* The timer correlation array must be big enough to ensure old entries won't be
+ * overwritten before all the HWPerf events linked to those entries are processed
+ * by the MISR. The update frequency of this array depends on how fast the system
+ * can change state (basically how small the APM latency is) and perform DVFS transitions.
+ *
+ * The minimum size is 2 (not 1) to avoid race conditions between the FW reading
+ * an entry while the Host is updating it. With 2 entries in the worst case the FW
+ * will read old data, which is still quite ok if the Host is updating the timer
+ * correlation at that time.
+ */
+#define RGXFWIF_TIME_CORR_ARRAY_SIZE            256
+#define RGXFWIF_TIME_CORR_CURR_INDEX(seqcount)  ((seqcount) % RGXFWIF_TIME_CORR_ARRAY_SIZE)
+
+/* Make sure the timer correlation array size is a power of 2 */
+static_assert((RGXFWIF_TIME_CORR_ARRAY_SIZE & (RGXFWIF_TIME_CORR_ARRAY_SIZE - 1)) == 0,
+			  "RGXFWIF_TIME_CORR_ARRAY_SIZE must be a power of two");
+
+typedef struct _RGXFWIF_GPU_UTIL_FWCB_
+{
+	RGXFWIF_TIME_CORR sTimeCorr[RGXFWIF_TIME_CORR_ARRAY_SIZE];
+	IMG_UINT32        ui32TimeCorrSeqCount;
+
+	/* Last GPU state + OS time of the last state update */
+	IMG_UINT64 RGXFW_ALIGN ui64LastWord;
+
+	/* Counters for the amount of time the GPU was active/idle/blocked */
+	IMG_UINT64 RGXFW_ALIGN aui64StatsCounters[RGXFWIF_GPU_UTIL_STATE_NUM];
+} UNCACHED_ALIGN RGXFWIF_GPU_UTIL_FWCB;
+
+
+/*!
+ ******************************************************************************
+ * HWR Data
+ *****************************************************************************/
+typedef enum _RGX_HWRTYPE_
+{
+	RGX_HWRTYPE_UNKNOWNFAILURE  = 0,
+	RGX_HWRTYPE_OVERRUN         = 1,
+	RGX_HWRTYPE_POLLFAILURE     = 2,
+	RGX_HWRTYPE_BIF0FAULT       = 3,
+	RGX_HWRTYPE_BIF1FAULT       = 4,
+	RGX_HWRTYPE_TEXASBIF0FAULT	= 5,
+	RGX_HWRTYPE_DPXMMUFAULT		= 6,
+	RGX_HWRTYPE_MMUFAULT        = 7,
+	RGX_HWRTYPE_MMUMETAFAULT    = 8,
+} RGX_HWRTYPE;
+
+#define RGXFWIF_HWRTYPE_BIF_BANK_GET(eHWRType) ((eHWRType == RGX_HWRTYPE_BIF0FAULT) ? 0 : 1 )
+
+#define RGXFWIF_HWRTYPE_PAGE_FAULT_GET(eHWRType) ((eHWRType == RGX_HWRTYPE_BIF0FAULT      ||       \
+                                                   eHWRType == RGX_HWRTYPE_BIF1FAULT      ||       \
+                                                   eHWRType == RGX_HWRTYPE_TEXASBIF0FAULT ||       \
+                                                   eHWRType == RGX_HWRTYPE_MMUFAULT       ||       \
+                                                   eHWRType == RGX_HWRTYPE_MMUMETAFAULT) ? 1 : 0 )
+
+typedef struct _RGX_BIFINFO_
+{
+	IMG_UINT64	RGXFW_ALIGN		ui64BIFReqStatus;
+	IMG_UINT64	RGXFW_ALIGN		ui64BIFMMUStatus;
+	IMG_UINT64	RGXFW_ALIGN		ui64PCAddress; /*!< phys address of the page catalogue */
+} RGX_BIFINFO;
+
+typedef struct _RGX_MMUINFO_
+{
+	IMG_UINT64	RGXFW_ALIGN		ui64MMUStatus;
+	IMG_UINT64	RGXFW_ALIGN		ui64PCAddress; /*!< phys address of the page catalogue */
+} RGX_MMUINFO;
+
+typedef struct _RGX_POLLINFO_
+{
+	IMG_UINT32	ui32ThreadNum;
+	IMG_UINT32 	ui32CrPollAddr;
+	IMG_UINT32 	ui32CrPollMask;
+	IMG_UINT32 	ui32CrPollLastValue;
+} UNCACHED_ALIGN RGX_POLLINFO;
+
+typedef struct _RGX_HWRINFO_
+{
+	union
+	{
+		RGX_BIFINFO  sBIFInfo;
+		RGX_MMUINFO  sMMUInfo;
+		RGX_POLLINFO sPollInfo;
+	} uHWRData;
+
+	IMG_UINT64 RGXFW_ALIGN ui64CRTimer;
+	IMG_UINT64 RGXFW_ALIGN ui64OSTimer;
+	IMG_UINT32             ui32FrameNum;
+	IMG_UINT32             ui32PID;
+	IMG_UINT32             ui32ActiveHWRTData;
+	IMG_UINT32             ui32HWRNumber;
+	IMG_UINT32             ui32EventStatus;
+	IMG_UINT32             ui32HWRRecoveryFlags;
+	RGX_HWRTYPE            eHWRType;
+	RGXFWIF_DM             eDM;
+	IMG_UINT64 RGXFW_ALIGN ui64CRTimeOfKick;
+	IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetStart;
+	IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetFinish;
+	IMG_UINT64 RGXFW_ALIGN ui64CRTimeFreelistReady;
+} UNCACHED_ALIGN RGX_HWRINFO;
+
+#define RGXFWIF_HWINFO_MAX_FIRST 8							/* Number of first HWR logs recorded (never overwritten by newer logs) */
+#define RGXFWIF_HWINFO_MAX_LAST 8							/* Number of latest HWR logs (older logs are overwritten by newer logs) */
+#define RGXFWIF_HWINFO_MAX (RGXFWIF_HWINFO_MAX_FIRST + RGXFWIF_HWINFO_MAX_LAST)	/* Total number of HWR logs stored in a buffer */
+#define RGXFWIF_HWINFO_LAST_INDEX (RGXFWIF_HWINFO_MAX - 1)	/* Index of the last log in the HWR log buffer */
+typedef struct _RGXFWIF_HWRINFOBUF_
+{
+	RGX_HWRINFO sHWRInfo[RGXFWIF_HWINFO_MAX];
+
+	IMG_UINT32	ui32FirstCrPollAddr[RGXFW_THREAD_NUM];
+	IMG_UINT32	ui32FirstCrPollMask[RGXFW_THREAD_NUM];
+	IMG_UINT32	ui32FirstCrPollLastValue[RGXFW_THREAD_NUM];
+	IMG_UINT32	ui32WriteIndex;
+	IMG_UINT32	ui32DDReqCount;
+} UNCACHED_ALIGN RGXFWIF_HWRINFOBUF;
+
+
+#define RGXFWIF_CTXSWITCH_PROFILE_FAST_EN		(1)
+#define RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN		(2)
+#define RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN		(3)
+#define RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN	(4)
+
+/*!
+ ******************************************************************************
+ * RGX firmware Init Config Data
+ *****************************************************************************/
+#define RGXFWIF_INICFG_CTXSWITCH_TA_EN				(0x1 << 0)
+#define RGXFWIF_INICFG_CTXSWITCH_3D_EN				(0x1 << 1)
+#define RGXFWIF_INICFG_CTXSWITCH_CDM_EN				(0x1 << 2)
+#define RGXFWIF_INICFG_CTXSWITCH_MODE_RAND			(0x1 << 3)
+#define RGXFWIF_INICFG_CTXSWITCH_SRESET_EN			(0x1 << 4)
+#define RGXFWIF_INICFG_USE_EXTENDED					(0x1 << 5)
+#define RGXFWIF_INICFG_POW_RASCALDUST				(0x1 << 6)
+#define RGXFWIF_INICFG_HWPERF_EN					(0x1 << 7)
+#define RGXFWIF_INICFG_HWR_EN						(0x1 << 8)
+#define RGXFWIF_INICFG_CHECK_MLIST_EN				(0x1 << 9)
+#define RGXFWIF_INICFG_DISABLE_CLKGATING_EN 		(0x1 << 10)
+#define RGXFWIF_INICFG_POLL_COUNTERS_EN				(0x1 << 11)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX		(RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX << 12)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INSTANCE	(RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE << 12)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_LIST		(RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST << 12)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_CLRMSK	(0xFFFFCFFFU)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_SHIFT		(12)
+#define RGXFWIF_INICFG_SHG_BYPASS_EN				(0x1 << 14)
+#define RGXFWIF_INICFG_RTU_BYPASS_EN				(0x1 << 15)
+#define RGXFWIF_INICFG_REGCONFIG_EN					(0x1 << 16)
+#define RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY		(0x1 << 17)
+#define RGXFWIF_INICFG_HWP_DISABLE_FILTER			(0x1 << 18)
+#define RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN			(0x1 << 19)
+#define RGXFWIF_INICFG_CDM_KILL_MODE_RAND_EN		(0x1 << 20)
+#define RGXFWIF_INICFG_DISABLE_DM_OVERLAP			(0x1 << 21)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT		(22)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST		(RGXFWIF_CTXSWITCH_PROFILE_FAST_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM		(RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW		(RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY	(RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK		(0x7 << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_METAT1_SHIFT					(25)
+#define RGXFWIF_INICFG_METAT1_MAIN					(RGX_META_T1_MAIN  << RGXFWIF_INICFG_METAT1_SHIFT)
+#define RGXFWIF_INICFG_METAT1_DUMMY					(RGX_META_T1_DUMMY << RGXFWIF_INICFG_METAT1_SHIFT)
+#define RGXFWIF_INICFG_METAT1_ENABLED				(RGXFWIF_INICFG_METAT1_MAIN | RGXFWIF_INICFG_METAT1_DUMMY)
+#define RGXFWIF_INICFG_METAT1_MASK					(RGXFWIF_INICFG_METAT1_ENABLED >> RGXFWIF_INICFG_METAT1_SHIFT)
+#define RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER		(0x1 << 27)
+#define RGXFWIF_INICFG_WORKEST_V1					(0x1 << 28)
+#define RGXFWIF_INICFG_WORKEST_V2					(0x1 << 29)
+#define RGXFWIF_INICFG_PDVFS_V1						(0x1 << 30)
+#define RGXFWIF_INICFG_PDVFS_V2						(0x1 << 31)
+#define RGXFWIF_INICFG_ALL							(0xFFFFFFDFU)
+
+#define RGXFWIF_INICFG_EXT_USE_EXTENDED				(0x1 << 31)
+#define RGXFWIF_INICFG_EXT_LOW_PRIO_CS_TDM			(0x1 <<  0)
+#define RGXFWIF_INICFG_EXT_LOW_PRIO_CS_TA			(0x1 <<  1)
+#define RGXFWIF_INICFG_EXT_LOW_PRIO_CS_3D			(0x1 <<  2)
+#define RGXFWIF_INICFG_EXT_LOW_PRIO_CS_CDM			(0x1 <<  3)
+#define RGXFWIF_INICFG_EXT_LOW_PRIO_CS_SHG			(0x1 <<  4)
+
+#define RGXFWIF_SRVCFG_DISABLE_PDP_EN 		(0x1 << 31)
+#define RGXFWIF_SRVCFG_ALL					(0x80000000U)
+#define RGXFWIF_FILTCFG_TRUNCATE_HALF		(0x1 << 3)
+#define RGXFWIF_FILTCFG_TRUNCATE_INT		(0x1 << 2)
+#define RGXFWIF_FILTCFG_NEW_FILTER_MODE		(0x1 << 1)
+
+#define RGXFWIF_INICFG_CTXSWITCH_DM_ALL		(RGXFWIF_INICFG_CTXSWITCH_TA_EN | \
+											 RGXFWIF_INICFG_CTXSWITCH_3D_EN | \
+											 RGXFWIF_INICFG_CTXSWITCH_CDM_EN)
+
+#define RGXFWIF_INICFG_CTXSWITCH_CLRMSK		~(RGXFWIF_INICFG_CTXSWITCH_DM_ALL | \
+											 RGXFWIF_INICFG_CTXSWITCH_MODE_RAND | \
+											 RGXFWIF_INICFG_CTXSWITCH_SRESET_EN)
+
+typedef enum
+{
+	RGX_ACTIVEPM_FORCE_OFF = 0,
+	RGX_ACTIVEPM_FORCE_ON = 1,
+	RGX_ACTIVEPM_DEFAULT = 2
+} RGX_ACTIVEPM_CONF;
+
+typedef enum
+{
+	RGX_RD_POWER_ISLAND_FORCE_OFF = 0,
+	RGX_RD_POWER_ISLAND_FORCE_ON = 1,
+	RGX_RD_POWER_ISLAND_DEFAULT = 2
+} RGX_RD_POWER_ISLAND_CONF;
+
+typedef enum
+{
+	RGX_META_T1_OFF   = 0x0,           /*!< No thread 1 running (unless 2nd thread is used for HWPerf) */
+	RGX_META_T1_MAIN  = 0x1,           /*!< Run the main thread 0 code on thread 1 (and vice versa if 2nd thread is used for HWPerf) */
+	RGX_META_T1_DUMMY = 0x2            /*!< Run dummy test code on thread 1 */
+} RGX_META_T1_CONF;
+
+/*!
+ ******************************************************************************
+ * Querying DM state
+ *****************************************************************************/
+
+typedef enum _RGXFWIF_DM_STATE_
+{
+	RGXFWIF_DM_STATE_NORMAL			= 0,
+	RGXFWIF_DM_STATE_LOCKEDUP		= 1
+} RGXFWIF_DM_STATE;
+
+typedef struct
+{
+	IMG_UINT16  ui16RegNum;				/*!< Register number */
+	IMG_UINT16  ui16IndirectRegNum;		/*!< Indirect register number (or 0 if not used) */
+	IMG_UINT16  ui16IndirectStartVal;	/*!< Start value for indirect register */
+	IMG_UINT16  ui16IndirectEndVal;		/*!< End value for indirect register */
+} RGXFW_REGISTER_LIST;
+
+#endif /*  __RGX_FWIF_H__ */
+
+/******************************************************************************
+ End of file (rgx_fwif.h)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_alignchecks.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_alignchecks.h
new file mode 100644
index 0000000..936e187
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_alignchecks.h
@@ -0,0 +1,190 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX fw interface alignment checks
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Checks to avoid disalignment in RGX fw data structures 
+                shared with the host
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_FWIF_ALIGNCHECKS_H__)
+#define __RGX_FWIF_ALIGNCHECKS_H__
+
+/* for the offsetof macro */
+#include <stddef.h> 
+
+/*!
+ ******************************************************************************
+ * Alignment UM/FW checks array
+ *****************************************************************************/
+
+#define RGXFW_ALIGN_CHECKS_UM_MAX 128
+
+#define RGXFW_ALIGN_CHECKS_INIT0						\
+		sizeof(RGXFWIF_TRACEBUF),						\
+		offsetof(RGXFWIF_TRACEBUF, ui32LogType),		\
+		offsetof(RGXFWIF_TRACEBUF, sTraceBuf),			\
+		offsetof(RGXFWIF_TRACEBUF, aui32HwrDmLockedUpCount),	\
+		offsetof(RGXFWIF_TRACEBUF, aui32HwrDmOverranCount),	\
+		offsetof(RGXFWIF_TRACEBUF, aui32HwrDmRecoveredCount),	\
+		offsetof(RGXFWIF_TRACEBUF, aui32HwrDmFalseDetectCount),	\
+														\
+		/* RGXFWIF_CMDTA checks */						\
+		sizeof(RGXFWIF_CMDTA),							\
+		offsetof(RGXFWIF_CMDTA, sTARegs),				\
+														\
+		/* RGXFWIF_CMD3D checks */						\
+		sizeof(RGXFWIF_CMD3D),							\
+		offsetof(RGXFWIF_CMD3D, s3DRegs),				\
+														\
+		/* RGXFWIF_CMDTRANSFER checks */                \
+		sizeof(RGXFWIF_CMDTRANSFER),                    \
+		offsetof(RGXFWIF_CMDTRANSFER, sTransRegs),      \
+														\
+														\
+		/* RGXFWIF_CMD_COMPUTE checks */				\
+		sizeof(RGXFWIF_CMD_COMPUTE),					\
+		offsetof(RGXFWIF_CMD_COMPUTE, sCDMRegs),		\
+									\
+		sizeof(RGXFWIF_FREELIST), \
+		offsetof(RGXFWIF_FREELIST, psFreeListDevVAddr),\
+		offsetof(RGXFWIF_FREELIST, ui32MaxPages),\
+		offsetof(RGXFWIF_FREELIST, ui32CurrentPages),\
+		offsetof(RGXFWIF_FREELIST, ui32HWRCounter),\
+									\
+		sizeof(RGXFWIF_RENDER_TARGET),\
+		offsetof(RGXFWIF_RENDER_TARGET, psVHeapTableDevVAddr), \
+							\
+		sizeof(RGXFWIF_HWRTDATA), \
+		offsetof(RGXFWIF_HWRTDATA, psPMMListDevVAddr), \
+		offsetof(RGXFWIF_HWRTDATA, apsFreeLists),\
+		offsetof(RGXFWIF_HWRTDATA, ui64VCECatBase), \
+		offsetof(RGXFWIF_HWRTDATA, psParentRenderTarget), \
+		offsetof(RGXFWIF_HWRTDATA, eState), \
+		offsetof(RGXFWIF_HWRTDATA, ui32NumPartialRenders), \
+							\
+		sizeof(RGXFWIF_HWPERF_CTL_BLK), \
+		offsetof(RGXFWIF_HWPERF_CTL_BLK, aui64CounterCfg), \
+							\
+		sizeof(RGXFWIF_REGISTER_GUESTOS_OFFSETS), \
+		offsetof(RGXFWIF_REGISTER_GUESTOS_OFFSETS, ui32OSid), \
+		offsetof(RGXFWIF_REGISTER_GUESTOS_OFFSETS, sKCCBCtl), \
+		offsetof(RGXFWIF_REGISTER_GUESTOS_OFFSETS, sKCCB), \
+		offsetof(RGXFWIF_REGISTER_GUESTOS_OFFSETS, sFirmwareCCBCtl), \
+		offsetof(RGXFWIF_REGISTER_GUESTOS_OFFSETS, sFirmwareCCB), \
+\
+		sizeof(RGXFWIF_HWPERF_CTL), \
+		offsetof(RGXFWIF_HWPERF_CTL, SelCntr)
+
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+#define RGXFW_ALIGN_CHECKS_INIT1                           \
+		RGXFW_ALIGN_CHECKS_INIT0,                          \
+		sizeof(RGXFWIF_RPM_FREELIST),                      \
+		offsetof(RGXFWIF_RPM_FREELIST, sFreeListDevVAddr), \
+		offsetof(RGXFWIF_RPM_FREELIST, ui32MaxPages),      \
+		offsetof(RGXFWIF_RPM_FREELIST, ui32CurrentPages),  \
+		offsetof(RGXFWIF_RPM_FREELIST, ui32GrowPages)
+#else
+#define RGXFW_ALIGN_CHECKS_INIT1		RGXFW_ALIGN_CHECKS_INIT0
+#endif /* RGX_FEATURE_RAY_TRACING */
+
+
+#if defined(RGX_FEATURE_TLA)
+#define RGXFW_ALIGN_CHECKS_INIT2                   \
+		RGXFW_ALIGN_CHECKS_INIT1,                  \
+		/* RGXFWIF_CMD2D checks */                 \
+		sizeof(RGXFWIF_CMD2D),                     \
+		offsetof(RGXFWIF_CMD2D, s2DRegs)
+#else
+#define RGXFW_ALIGN_CHECKS_INIT2		RGXFW_ALIGN_CHECKS_INIT1
+#endif	/* RGX_FEATURE_TLA */
+
+
+#if defined(RGX_FEATURE_FASTRENDER_DM)
+#define RGXFW_ALIGN_CHECKS_INIT                    \
+		RGXFW_ALIGN_CHECKS_INIT2,                  \
+		/* RGXFWIF_CMDTDM checks */                \
+		sizeof(RGXFWIF_CMDTDM),                    \
+		offsetof(RGXFWIF_CMDTDM, sTDMRegs)
+#else
+#define RGXFW_ALIGN_CHECKS_INIT		RGXFW_ALIGN_CHECKS_INIT2
+#endif /* ! RGX_FEATURE_FASTRENDER_DM */
+
+
+
+/*!
+ ******************************************************************************
+ * Alignment KM checks array
+ *****************************************************************************/
+
+#define RGXFW_ALIGN_CHECKS_INIT_KM                                           \
+		sizeof(RGXFWIF_INIT),                                        \
+		offsetof(RGXFWIF_INIT, sFaultPhysAddr),                      \
+		offsetof(RGXFWIF_INIT, sPDSExecBase),                        \
+		offsetof(RGXFWIF_INIT, sUSCExecBase),                        \
+		offsetof(RGXFWIF_INIT, psKernelCCBCtl),                      \
+		offsetof(RGXFWIF_INIT, psKernelCCB),                         \
+		offsetof(RGXFWIF_INIT, psFirmwareCCBCtl),                    \
+		offsetof(RGXFWIF_INIT, psFirmwareCCB),                       \
+		offsetof(RGXFWIF_INIT, asSigBufCtl),                         \
+		offsetof(RGXFWIF_INIT, sTraceBufCtl),                        \
+		offsetof(RGXFWIF_INIT, sRGXCompChecks),                      \
+		                                                             \
+		/* RGXFWIF_FWRENDERCONTEXT checks */                         \
+		sizeof(RGXFWIF_FWRENDERCONTEXT),                             \
+		offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext),               \
+		offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext),               \
+		                                                             \
+		sizeof(RGXFWIF_FWCOMMONCONTEXT),                             \
+		offsetof(RGXFWIF_FWCOMMONCONTEXT, psFWMemContext),           \
+		offsetof(RGXFWIF_FWCOMMONCONTEXT, sRunNode),                 \
+		offsetof(RGXFWIF_FWCOMMONCONTEXT, psCCB),                    \
+		                                                             \
+		sizeof(RGXFWIF_MMUCACHEDATA),                                \
+		offsetof(RGXFWIF_MMUCACHEDATA,psMemoryContext),              \
+		offsetof(RGXFWIF_MMUCACHEDATA,ui32Flags),                    \
+		offsetof(RGXFWIF_MMUCACHEDATA,sMMUCacheSync),                \
+		offsetof(RGXFWIF_MMUCACHEDATA,ui16MMUCacheSyncUpdateValue)
+
+
+#endif /*  __RGX_FWIF_ALIGNCHECKS_H__ */
+
+/******************************************************************************
+ End of file (rgx_fwif_alignchecks.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_hwperf.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_hwperf.h
new file mode 100644
index 0000000..155e44a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_hwperf.h
@@ -0,0 +1,242 @@
+/*************************************************************************/ /*!
+@File           rgx_fwif_hwperf.h
+@Title          RGX HWPerf support
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Shared header between RGX firmware and Init process
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_FWIF_HWPERF_H
+#define RGX_FWIF_HWPERF_H
+
+#include "rgx_fwif_shared.h"
+#include "rgx_hwperf.h"
+#include "rgxdefs_km.h"
+
+
+/*****************************************************************************/
+
+/* Structure to hold a block's parameters for passing between the BG context
+ * and the IRQ context when applying a configuration request. */
+typedef struct _RGXFWIF_HWPERF_CTL_BLK_
+{
+	IMG_BOOL                bValid;
+	IMG_BOOL                bEnabled;
+	IMG_UINT32              eBlockID;
+	IMG_UINT32              uiCounterMask;
+	IMG_UINT64  RGXFW_ALIGN aui64CounterCfg[RGX_CNTBLK_COUNTERS_MAX];
+}  RGXFWIF_HWPERF_CTL_BLK;
+
+/* Structure used to hold the configuration of the non-mux counters blocks */
+typedef struct _RGXFW_HWPERF_SELECT_
+{
+	IMG_UINT32            ui32NumSelectedCounters;
+	IMG_UINT32            aui32SelectedCountersIDs[RGX_HWPERF_MAX_CUSTOM_CNTRS];
+} RGXFW_HWPERF_SELECT;
+
+/* Structure to hold the whole configuration request details for all blocks
+ * The block masks and counts are used to optimise reading of this data. */
+typedef struct _RGXFWIF_HWPERF_CTL_
+{
+	IMG_BOOL                           bResetOrdinal;
+
+	IMG_UINT32                         ui32SelectedCountersBlockMask;
+	RGXFW_HWPERF_SELECT RGXFW_ALIGN    SelCntr[RGX_HWPERF_MAX_CUSTOM_BLKS];
+
+	IMG_UINT32                         ui32EnabledBlksCount;
+	RGXFWIF_HWPERF_CTL_BLK RGXFW_ALIGN sBlkCfg[RGX_HWPERF_MAX_DEFINED_BLKS];
+} UNCACHED_ALIGN RGXFWIF_HWPERF_CTL;
+
+/* NOTE: The switch statement in this function must be kept in alignment with
+ * the enumeration RGX_HWPERF_CNTBLK_ID defined in rgx_hwperf.h. ASSERTs may
+ * result if not.
+ * The function provides a hash lookup to get a handle on the global store for
+ * a block's configuration store from it's block ID.
+ */
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(rgxfw_hwperf_get_block_ctl)
+#endif
+static INLINE RGXFWIF_HWPERF_CTL_BLK* rgxfw_hwperf_get_block_ctl(
+		RGX_HWPERF_CNTBLK_ID eBlockID, RGXFWIF_HWPERF_CTL *psHWPerfInitData)
+{
+	IMG_INT32 i32Idx = -1;
+
+	/* Hash the block ID into a control configuration array index */
+	switch(eBlockID)
+	{
+		case RGX_CNTBLK_ID_TA:
+		case RGX_CNTBLK_ID_RASTER:
+		case RGX_CNTBLK_ID_HUB:
+		case RGX_CNTBLK_ID_TORNADO:
+		case RGX_CNTBLK_ID_JONES:
+		case RGX_CNTBLK_ID_BF:
+		case RGX_CNTBLK_ID_BT:
+		case RGX_CNTBLK_ID_RT:
+		case RGX_CNTBLK_ID_SH:
+		{
+			i32Idx = eBlockID;
+			break;
+		}
+		case RGX_CNTBLK_ID_TPU_MCU0:
+		case RGX_CNTBLK_ID_TPU_MCU1:
+		case RGX_CNTBLK_ID_TPU_MCU2:
+		case RGX_CNTBLK_ID_TPU_MCU3:
+		case RGX_CNTBLK_ID_TPU_MCU4:
+		case RGX_CNTBLK_ID_TPU_MCU5:
+		case RGX_CNTBLK_ID_TPU_MCU6:
+		case RGX_CNTBLK_ID_TPU_MCU7:
+		{
+			i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+						(eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+			break;
+		}
+		case RGX_CNTBLK_ID_USC0:
+		case RGX_CNTBLK_ID_USC1:
+		case RGX_CNTBLK_ID_USC2:
+		case RGX_CNTBLK_ID_USC3:
+		case RGX_CNTBLK_ID_USC4:
+		case RGX_CNTBLK_ID_USC5:
+		case RGX_CNTBLK_ID_USC6:
+		case RGX_CNTBLK_ID_USC7:
+		case RGX_CNTBLK_ID_USC8:
+		case RGX_CNTBLK_ID_USC9:
+		case RGX_CNTBLK_ID_USC10:
+		case RGX_CNTBLK_ID_USC11:
+		case RGX_CNTBLK_ID_USC12:
+		case RGX_CNTBLK_ID_USC13:
+		case RGX_CNTBLK_ID_USC14:
+		case RGX_CNTBLK_ID_USC15:
+		{
+			i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+						RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+						(eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+			break;
+		}
+		case RGX_CNTBLK_ID_TEXAS0:
+		case RGX_CNTBLK_ID_TEXAS1:
+		case RGX_CNTBLK_ID_TEXAS2:
+		case RGX_CNTBLK_ID_TEXAS3:
+		case RGX_CNTBLK_ID_TEXAS4:
+		case RGX_CNTBLK_ID_TEXAS5:
+		case RGX_CNTBLK_ID_TEXAS6:
+		case RGX_CNTBLK_ID_TEXAS7:
+		{
+			i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+						RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+						RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+						(eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+			break;
+		}
+		case RGX_CNTBLK_ID_RASTER0:
+		case RGX_CNTBLK_ID_RASTER1:
+		case RGX_CNTBLK_ID_RASTER2:
+		case RGX_CNTBLK_ID_RASTER3:
+		{
+			i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+						RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+						RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+						RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) +
+						(eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+			break;
+		}
+		case RGX_CNTBLK_ID_BLACKPEARL0:
+		case RGX_CNTBLK_ID_BLACKPEARL1:
+		case RGX_CNTBLK_ID_BLACKPEARL2:
+		case RGX_CNTBLK_ID_BLACKPEARL3:
+		{
+			i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+						RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+						RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+						RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) +
+						RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) +
+						(eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+			break;
+		}
+		case RGX_CNTBLK_ID_PBE0:
+		case RGX_CNTBLK_ID_PBE1:
+		case RGX_CNTBLK_ID_PBE2:
+		case RGX_CNTBLK_ID_PBE3:
+		case RGX_CNTBLK_ID_PBE4:
+		case RGX_CNTBLK_ID_PBE5:
+		case RGX_CNTBLK_ID_PBE6:
+		case RGX_CNTBLK_ID_PBE7:
+		case RGX_CNTBLK_ID_PBE8:
+		case RGX_CNTBLK_ID_PBE9:
+		case RGX_CNTBLK_ID_PBE10:
+		case RGX_CNTBLK_ID_PBE11:
+		case RGX_CNTBLK_ID_PBE12:
+		case RGX_CNTBLK_ID_PBE13:
+		case RGX_CNTBLK_ID_PBE14:
+		case RGX_CNTBLK_ID_PBE15:
+		{
+			i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+						RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+						RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+						RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) +
+						RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) +
+						RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3) +
+						(eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+			break;
+		}
+		case RGX_CNTBLK_ID_BX_TU0:
+		case RGX_CNTBLK_ID_BX_TU1:
+		case RGX_CNTBLK_ID_BX_TU2:
+		case RGX_CNTBLK_ID_BX_TU3:
+		{
+			i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+						RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+						RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+						RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) +
+						RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) +
+						RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3) +
+						RGX_CNTBLK_INDIRECT_COUNT(PBE, 15) +
+						(eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+			break;
+		}
+		default:
+		{
+			return NULL;
+		}
+	}
+	if ((i32Idx < 0) || (i32Idx >= RGX_HWPERF_MAX_DEFINED_BLKS))
+	{
+		return NULL;
+	}
+	return &psHWPerfInitData->sBlkCfg[i32Idx];
+}
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_km.h
new file mode 100644
index 0000000..4265987
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_km.h
@@ -0,0 +1,1067 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX firmware interface structures used by pvrsrvkm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX firmware interface structures used by pvrsrvkm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_FWIF_KM_H__)
+#define __RGX_FWIF_KM_H__
+
+#include "img_types.h"
+#include "rgx_fwif_shared.h"
+#include "rgxdefs_km.h"
+#include "pvr_debug.h"
+#include "dllist.h"
+#include "rgx_firmware_processor.h"
+
+#if	!defined(__KERNEL__)
+/* The following defines the offsets for the KCCB, KCCBCtl, FWCCB and FWCCBCtl
+ * for the various guests in a virtualisation environment. It is assumed that each
+ * guest is built the same way and so all their offsets will match. If the code
+ * at host level changes and the offsets change, the defines here need to be updated.
+ */
+
+#if defined(RGX_FEATURE_META)
+#define RGXFWIF_GUEST_OFFSET_KCCB           (RGXFW_SEGMMU_DATA_BASE_ADDRESS | \
+                                             RGXFW_SEGMMU_DATA_META_CACHED | \
+                                             RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED | \
+                                             RGX_FW_HEAP_GUEST_OFFSET_KCCB)
+#define RGXFWIF_GUEST_OFFSET_KCCBCTL        (RGXFW_SEGMMU_DATA_BASE_ADDRESS | \
+                                             RGXFW_SEGMMU_DATA_META_UNCACHED | \
+                                             RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED | \
+											 RGX_FW_HEAP_GUEST_OFFSET_KCCBCTL)
+#define RGXFWIF_GUEST_OFFSET_FWCCB          (RGXFW_SEGMMU_DATA_BASE_ADDRESS | \
+                                             RGXFW_SEGMMU_DATA_META_UNCACHED | \
+                                             RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED | \
+											 RGX_FW_HEAP_GUEST_OFFSET_FWCCB)
+#define RGXFWIF_GUEST_OFFSET_FWCCBCTL       (RGXFW_SEGMMU_DATA_BASE_ADDRESS | \
+                                             RGXFW_SEGMMU_DATA_META_UNCACHED | \
+                                             RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED | \
+											 RGX_FW_HEAP_GUEST_OFFSET_FWCCBCTL)
+#else
+/* In case of MIPS we will need to define proper values for these offsets */
+#define RGXFWIF_GUEST_OFFSET_KCCB           (0x0)
+#define RGXFWIF_GUEST_OFFSET_KCCBCTL        (0x0)
+#define RGXFWIF_GUEST_OFFSET_FWCCB          (0x0)
+#define RGXFWIF_GUEST_OFFSET_FWCCBCTL       (0x0)
+#endif
+
+#endif
+
+#if !defined(ALIGN)
+#define ALIGN(val, align) (((val) + ((align) - 1)) & ~((align) - 1))
+#endif
+
+#if defined(RGX_FIRMWARE)
+typedef DLLIST_NODE							RGXFWIF_DLLIST_NODE;
+#else
+typedef struct {RGXFWIF_DEV_VIRTADDR p;
+                  RGXFWIF_DEV_VIRTADDR n;}	RGXFWIF_DLLIST_NODE;
+#endif
+
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_SIGBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_TRACEBUF;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_HWPERFBUF;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_HWRINFOBUF;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_RUNTIME_CFG;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_GPU_UTIL_FWCB;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_REG_CFG;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_HWPERF_CTL;
+typedef RGXFWIF_DEV_VIRTADDR				PRGX_HWPERF_CONFIG_CNTBLK;
+typedef RGXFWIF_DEV_VIRTADDR				PRGX_HWPERF_SELECT_CUSTOM_CNTRS;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_CCB_CTL;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_CCB;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_FWMEMCONTEXT;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_FWCOMMONCONTEXT;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_ZSBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_INIT;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_COMMONCTX_STATE;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_RF_CMD;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_COMPCHECKS;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_ALIGNCHECK;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_CORE_CLK_RATE;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_OS_CONFIG;
+
+/*!
+ * This number is used to represent an invalid page catalogue physical address
+ */
+#define RGXFWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU
+
+/*!
+    Firmware memory context.
+*/
+typedef struct _RGXFWIF_FWMEMCONTEXT_
+{
+	IMG_DEV_PHYADDR			RGXFW_ALIGN sPCDevPAddr;	/*!< device physical address of context's page catalogue */
+	IMG_INT32				uiPageCatBaseRegID;	/*!< associated page catalog base register (-1 == unallocated) */
+	IMG_UINT32				uiBreakpointAddr; /*!< breakpoint address */
+	IMG_UINT32				uiBPHandlerAddr;  /*!< breakpoint handler address */
+	IMG_UINT32				uiBreakpointCtl; /*!< DM and enable control for BP */
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+    IMG_UINT32              ui32OSid;
+    IMG_BOOL                bOSidAxiProt;
+#endif
+
+} UNCACHED_ALIGN RGXFWIF_FWMEMCONTEXT;
+
+
+/*!
+ * 	FW context state flags
+ */
+#define	RGXFWIF_CONTEXT_TAFLAGS_NEED_RESUME			(0x00000001)
+#define	RGXFWIF_CONTEXT_RENDERFLAGS_NEED_RESUME		(0x00000002)
+#define RGXFWIF_CONTEXT_CDMFLAGS_NEED_RESUME		(0x00000004)
+#define RGXFWIF_CONTEXT_SHGFLAGS_NEED_RESUME		(0x00000008)
+#define RGXFWIF_CONTEXT_TDMFLAGS_CONTEXT_STORED		(0x00000010)
+#define RGXFWIF_CONTEXT_ALLFLAGS_NEED_RESUME		(0x0000001F)
+
+
+typedef struct _RGXFWIF_TACTX_STATE_
+{
+	/* FW-accessible TA state which must be written out to memory on context store */
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER;		 /* To store in mid-TA */
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER_Init;	 /* Initial value (in case is 'lost' due to a lock-up */
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VDM_BATCH;
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VBS_SO_PRIM0;
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VBS_SO_PRIM1;
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VBS_SO_PRIM2;
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VBS_SO_PRIM3;
+#if defined(SUPPORT_VDM_CONTEXT_STORE_BUFFER_AB)
+	IMG_UINT16	RGXFW_ALIGN ui16TACurrentIdx;
+#endif
+} UNCACHED_ALIGN RGXFWIF_TACTX_STATE;
+
+
+typedef struct _RGXFWIF_3DCTX_STATE_
+{
+	/* FW-accessible ISP state which must be written out to memory on context store */
+	IMG_UINT32	RGXFW_ALIGN au3DReg_ISP_STORE[64];
+	IMG_UINT64	RGXFW_ALIGN u3DReg_PM_DEALLOCATED_MASK_STATUS;
+	IMG_UINT64	RGXFW_ALIGN u3DReg_PM_PDS_MTILEFREE_STATUS;
+} UNCACHED_ALIGN RGXFWIF_3DCTX_STATE;
+
+
+
+typedef struct _RGXFWIF_COMPUTECTX_STATE_
+{
+	IMG_BOOL	RGXFW_ALIGN	bBufferB;
+} RGXFWIF_COMPUTECTX_STATE;
+
+
+typedef struct _RGXFWIF_VRDMCTX_STATE_
+{
+	/* FW-accessible TA state which must be written out to memory on context store */
+	IMG_UINT64	RGXFW_ALIGN uVRDMReg_VRM_CALL_STACK_POINTER;
+	IMG_UINT64	RGXFW_ALIGN uVRDMReg_VRM_BATCH;
+} UNCACHED_ALIGN RGXFWIF_VRDMCTX_STATE;
+
+
+typedef struct _RGXFWIF_FWCOMMONCONTEXT_
+{
+	/*
+		Used by bg and irq context
+	*/
+	/* CCB details for this firmware context */
+	PRGXFWIF_CCCB_CTL		psCCBCtl;				/*!< CCB control */
+	PRGXFWIF_CCCB			psCCB;					/*!< CCB base */
+	RGXFWIF_DMA_ADDR		sCCBMetaDMAAddr;
+
+	/*
+		Used by the bg context only
+	*/
+	RGXFWIF_DLLIST_NODE		RGXFW_ALIGN sWaitingNode;			/*!< List entry for the waiting list */
+
+	/*
+		Used by the irq context only
+	*/
+	RGXFWIF_DLLIST_NODE		sRunNode;				/*!< List entry for the run list */
+
+	PRGXFWIF_FWMEMCONTEXT	psFWMemContext;			/*!< Memory context */
+
+	/* Context suspend state */
+	PRGXFWIF_COMMONCTX_STATE	RGXFW_ALIGN psContextState;		/*!< TA/3D context suspend state, read/written by FW */
+
+	/* Framework state
+	 */
+	PRGXFWIF_RF_CMD		RGXFW_ALIGN psRFCmd;		/*!< Register updates for Framework */
+
+	/*
+	 * 	Flags e.g. for context switching
+	 */
+	IMG_UINT32				ui32Flags;
+	IMG_UINT32				ui32Priority;
+	IMG_UINT32				ui32PrioritySeqNum;
+	IMG_UINT64		RGXFW_ALIGN 	ui64MCUFenceAddr;
+
+	/* References to the host side originators */
+	IMG_UINT32				ui32ServerCommonContextID;			/*!< the Server Common Context */
+	IMG_UINT32				ui32PID;							/*!< associated process ID */
+
+	/* Statistic updates waiting to be passed back to the host... */
+	IMG_BOOL				bStatsPending;						/*!< True when some stats are pending */
+	IMG_INT32				i32StatsNumStores;					/*!< Number of stores on this context since last update */
+	IMG_INT32				i32StatsNumOutOfMemory;				/*!< Number of OOMs on this context since last update */
+	IMG_INT32				i32StatsNumPartialRenders;			/*!< Number of PRs on this context since last update */
+	RGXFWIF_DM				eDM;								/*!< Data Master type */
+	IMG_UINT64        RGXFW_ALIGN           ui64WaitSignalAddress;        /*!< Device Virtual Address of the signal the context is waiting on */
+	RGXFWIF_DLLIST_NODE                     sWaitSignalNode;              /*!< List entry for the wait-signal list */
+	RGXFWIF_DLLIST_NODE		RGXFW_ALIGN  sBufStalledNode;		/*!< List entry for the buffer stalled list */
+	IMG_UINT64    		    RGXFW_ALIGN  ui64CBufQueueCtrlAddr;	/*!< Address of the circular buffer queue pointers */
+	IMG_UINT64    		    RGXFW_ALIGN  ui64ResumeSignalAddr;	/*!< Address of the Services Signal for resuming the buffer */
+	IMG_BOOL    		    bReadOffsetNeedsReset;				/*!< Following HWR circular buffer read-offset needs resetting */
+} UNCACHED_ALIGN RGXFWIF_FWCOMMONCONTEXT;
+
+/*!
+	Firmware render context.
+*/
+typedef struct _RGXFWIF_FWRENDERCONTEXT_
+{
+	RGXFWIF_FWCOMMONCONTEXT	sTAContext;				/*!< Firmware context for the TA */
+	RGXFWIF_FWCOMMONCONTEXT	s3DContext;				/*!< Firmware context for the 3D */
+
+	/*
+	 * Note: The following fields keep track of OOM and partial render statistics.
+	 * Because these data structures are allocated cache-incoherent,
+	 * and because these fields are updated by the firmware,
+	 * the host will read valid values only after an SLC flush/inval.
+	 * This is only guaranteed to happen while destroying the render-context.
+	 */
+
+	/* The following variable has been reused to avoid breaking compatibility.
+	 *
+	 * It was previously:
+	 * IMG_UINT32 ui32TotalNumPartialRenders; Total number of partial renders
+	 *
+	 * And is changed to:
+	 */
+	IMG_UINT32			ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */
+
+	IMG_UINT32			ui32TotalNumOutOfMemory;	/*!< Total number of OOMs */
+
+} UNCACHED_ALIGN RGXFWIF_FWRENDERCONTEXT;
+
+/*!
+	Firmware render context.
+*/
+typedef struct _RGXFWIF_FWRAYCONTEXT_
+{
+	IMG_UINT32				ui32ActiveFCMask; /* move here to avoid that fwrayctx and shgctx have the same addr */
+	IMG_UINT32				ui32NextFC;
+	RGXFWIF_FWCOMMONCONTEXT	sSHGContext;				/*!< Firmware context for the SHG */
+	RGXFWIF_FWCOMMONCONTEXT	sRTUContext;				/*!< Firmware context for the RTU */
+	PRGXFWIF_CCCB_CTL		psCCBCtl[DPX_MAX_RAY_CONTEXTS];
+	PRGXFWIF_CCCB			psCCB[DPX_MAX_RAY_CONTEXTS];
+} UNCACHED_ALIGN RGXFWIF_FWRAYCONTEXT;
+
+#define RGXFWIF_INVALID_FRAME_CONTEXT (0xFFFFFFFF)
+
+/*!
+	BIF tiling mode
+*/
+typedef enum _RGXFWIF_BIFTILINGMODE_
+{
+	RGXFWIF_BIFTILINGMODE_NONE      = 0,
+	RGXFWIF_BIFTILINGMODE_256x16    = 0,
+	RGXFWIF_BIFTILINGMODE_512x8     = 1
+} RGXFWIF_BIFTILINGMODE;
+
+/*!
+	BIF requester selection
+*/
+typedef enum _RGXFWIF_BIFREQ_
+{
+	RGXFWIF_BIFREQ_TA		= 0,
+	RGXFWIF_BIFREQ_3D		= 1,
+	RGXFWIF_BIFREQ_CDM		= 2,
+	RGXFWIF_BIFREQ_2D		= 3,
+	RGXFWIF_BIFREQ_TDM		= 3,
+	RGXFWIF_BIFREQ_HOST		= 4,
+	RGXFWIF_BIFREQ_RTU		= 5,
+	RGXFWIF_BIFREQ_SHG		= 6,
+	RGXFWIF_BIFREQ_MAX		= 7
+} RGXFWIF_BIFREQ;
+
+typedef enum _RGXFWIF_PM_DM_
+{
+	RGXFWIF_PM_DM_TA	= 0,
+	RGXFWIF_PM_DM_3D	= 1,
+} RGXFWIF_PM_DM;
+
+typedef enum _RGXFWIF_RPM_DM_
+{
+	RGXFWIF_RPM_DM_SHF	= 0,
+	RGXFWIF_RPM_DM_SHG	= 1,
+	RGXFWIF_RPM_DM_MAX,
+} RGXFWIF_RPM_DM;
+
+/*!
+ ******************************************************************************
+ * Kernel CCB control for RGX
+ *****************************************************************************/
+typedef struct _RGXFWIF_CCB_CTL_
+{
+	volatile IMG_UINT32		ui32WriteOffset;		/*!< write offset into array of commands (MUST be aligned to 16 bytes!) */
+	volatile IMG_UINT32		ui32ReadOffset;			/*!< read offset into array of commands */
+	IMG_UINT32				ui32WrapMask;			/*!< Offset wrapping mask (Total capacity of the CCB - 1) */
+	IMG_UINT32				ui32CmdSize;			/*!< size of each command in bytes */
+} UNCACHED_ALIGN RGXFWIF_CCB_CTL;
+
+/*!
+ ******************************************************************************
+ * Kernel CCB command structure for RGX
+ *****************************************************************************/
+
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PT      (0x1) /* MMU_CTRL_INVAL_PT_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PD      (0x2) /* MMU_CTRL_INVAL_PD_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PC      (0x4) /* MMU_CTRL_INVAL_PC_EN */
+
+#if !defined(__KERNEL)
+
+#if !defined(RGX_FEATURE_SLC_VIVT)
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB   (0x10) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB     (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8) /* BIF_CTRL_INVAL_TLB1_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX(C)  (0x0) /* not used */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x0) /* not used */
+
+#else /* RGX_FEATURE_SLC_VIVT */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB   (0x0) /* not used */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB     (0x0) /* not used */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX(C)  ((C) << 0x3) /* MMU_CTRL_INVAL_CONTEXT_SHIFT */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */
+#endif
+
+#else
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB   (0x10) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB     (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8) /* BIF_CTRL_INVAL_TLB1_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX(C)  ((C) << 0x3) /* MMU_CTRL_INVAL_CONTEXT_SHIFT */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */
+#endif
+
+#define RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000) /* indicates FW should interrupt the host */
+
+typedef struct _RGXFWIF_MMUCACHEDATA_
+{
+	PRGXFWIF_FWMEMCONTEXT		psMemoryContext;
+	IMG_UINT32			ui32Flags;
+	RGXFWIF_DEV_VIRTADDR		sMMUCacheSync;
+	IMG_UINT16			ui16MMUCacheSyncUpdateValue;
+} __attribute__ ((packed)) RGXFWIF_MMUCACHEDATA;
+
+typedef struct _RGXFWIF_SLCBPCTLDATA_
+{
+	IMG_BOOL               bSetBypassed;        /*!< Should SLC be/not be bypassed for indicated units? */
+	IMG_UINT32             uiFlags;             /*!< Units to enable/disable */
+} RGXFWIF_SLCBPCTLDATA;
+
+#define RGXFWIF_BPDATA_FLAGS_WRITE	(1 << 0)
+#define RGXFWIF_BPDATA_FLAGS_CTL	(1 << 1)
+#define RGXFWIF_BPDATA_FLAGS_REGS	(1 << 2)
+
+typedef struct _RGXFWIF_FWBPDATA_
+{
+	PRGXFWIF_FWMEMCONTEXT	psFWMemContext;			/*!< Memory context */
+	IMG_UINT32		ui32BPAddr;			/*!< Breakpoint address */
+	IMG_UINT32		ui32HandlerAddr;		/*!< Breakpoint handler */
+	IMG_UINT32		ui32BPDM;			/*!< Breakpoint control */
+	IMG_BOOL		bEnable;
+	IMG_UINT32		ui32Flags;
+	IMG_UINT32		ui32TempRegs;		/*!< Number of temporary registers to overallocate */
+	IMG_UINT32		ui32SharedRegs;		/*!< Number of shared registers to overallocate */
+} RGXFWIF_BPDATA;
+
+#define RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS 4
+
+typedef struct _RGXFWIF_KCCB_CMD_KICK_DATA_
+{
+	PRGXFWIF_FWCOMMONCONTEXT	psContext;			/*!< address of the firmware context */
+	IMG_UINT32					ui32CWoffUpdate;	/*!< Client CCB woff update */
+	IMG_UINT32					ui32NumCleanupCtl;		/*!< number of CleanupCtl pointers attached */
+	PRGXFWIF_CLEANUP_CTL		apsCleanupCtl[RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS]; /*!< CleanupCtl structures associated with command */
+	PRGXFWIF_WORKLOAD_DATA		sWorkloadDataFWAddress;   /*!< deprecated, kept for compatibility. */
+	IMG_UINT32					ui32WorkEstCmdHeaderOffset; /*!< offset to the CmdHeader which houses the workload estimation kick data. */
+} RGXFWIF_KCCB_CMD_KICK_DATA;
+
+typedef struct _RGXFWIF_KCCB_CMD_FENCE_DATA_
+{
+	RGXFWIF_DEV_VIRTADDR sSyncObjDevVAddr;
+	IMG_UINT32 uiUpdateVal;
+} RGXFWIF_KCCB_CMD_SYNC_DATA;
+
+typedef enum _RGXFWIF_CLEANUP_TYPE_
+{
+	RGXFWIF_CLEANUP_FWCOMMONCONTEXT,		/*!< FW common context cleanup */
+	RGXFWIF_CLEANUP_HWRTDATA,				/*!< FW HW RT data cleanup */
+	RGXFWIF_CLEANUP_FREELIST,				/*!< FW freelist cleanup */
+	RGXFWIF_CLEANUP_ZSBUFFER,				/*!< FW ZS Buffer cleanup */
+	RGXFWIF_CLEANUP_HWFRAMEDATA,			/*!< FW RPM/RTU frame data */
+	RGXFWIF_CLEANUP_RPM_FREELIST,			/*!< FW RPM freelist */
+} RGXFWIF_CLEANUP_TYPE;
+
+#define RGXFWIF_CLEANUP_RUN		(1 << 0)	/*!< The requested cleanup command has run on the FW */
+#define RGXFWIF_CLEANUP_BUSY	(1 << 1)	/*!< The requested resource is busy */
+
+typedef struct _RGXFWIF_CLEANUP_REQUEST_
+{
+	RGXFWIF_CLEANUP_TYPE			eCleanupType;			/*!< Cleanup type */
+	union {
+		PRGXFWIF_FWCOMMONCONTEXT 	psContext;				/*!< FW common context to cleanup */
+		PRGXFWIF_HWRTDATA 			psHWRTData;				/*!< HW RT to cleanup */
+		PRGXFWIF_FREELIST 			psFreelist;				/*!< Freelist to cleanup */
+		PRGXFWIF_ZSBUFFER 			psZSBuffer;				/*!< ZS Buffer to cleanup */
+		PRGXFWIF_RAY_FRAME_DATA		psHWFrameData;			/*!< RPM/RTU frame data to cleanup */
+		PRGXFWIF_RPM_FREELIST 		psRPMFreelist;			/*!< RPM Freelist to cleanup */
+	} uCleanupData;
+	RGXFWIF_DEV_VIRTADDR						sSyncObjDevVAddr;		/*!< sync primitive used to indicate state of the request */
+} RGXFWIF_CLEANUP_REQUEST;
+
+typedef enum _RGXFWIF_POWER_TYPE_
+{
+	RGXFWIF_POW_OFF_REQ = 1,
+	RGXFWIF_POW_FORCED_IDLE_REQ,
+	RGXFWIF_POW_NUMDUST_CHANGE,
+	RGXFWIF_POW_APM_LATENCY_CHANGE
+} RGXFWIF_POWER_TYPE;
+
+typedef enum
+{
+	RGXFWIF_OS_ONLINE = 1,
+	RGXFWIF_OS_OFFLINE
+} RGXFWIF_OS_STATE_CHANGE;
+
+typedef enum
+{
+	RGXFWIF_POWER_FORCE_IDLE = 1,
+	RGXFWIF_POWER_CANCEL_FORCED_IDLE,
+	RGXFWIF_POWER_HOST_TIMEOUT,
+} RGXFWIF_POWER_FORCE_IDLE_TYPE;
+
+typedef struct _RGXFWIF_POWER_REQUEST_
+{
+	RGXFWIF_POWER_TYPE					ePowType;					/*!< Type of power request */
+	union
+	{
+		IMG_UINT32						ui32NumOfDusts;			/*!< Number of active Dusts */
+		IMG_BOOL						bForced;				/*!< If the operation is mandatory */
+		RGXFWIF_POWER_FORCE_IDLE_TYPE	ePowRequestType;		/*!< Type of Request. Consolidating Force Idle, Cancel Forced Idle, Host Timeout */
+		IMG_UINT32						ui32ActivePMLatencyms;	/*!< Number of milliseconds to set APM latency */
+	} uPoweReqData;
+} RGXFWIF_POWER_REQUEST;
+
+typedef struct _RGXFWIF_SLCFLUSHINVALDATA_
+{
+	PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to fence on (only useful when bDMContext == TRUE) */
+	IMG_BOOL    bInval;                 /*!< Invalidate the cache as well as flushing */
+	IMG_BOOL    bDMContext;             /*!< The data to flush/invalidate belongs to a specific DM context */
+	RGXFWIF_DM  eDM;                    /*!< DM to flush entries for (only useful when bDMContext == TRUE) */
+} RGXFWIF_SLCFLUSHINVALDATA;
+
+typedef struct _RGXFWIF_HCS_CTL_
+{
+	IMG_UINT32  ui32HCSDeadlineMS;  /* New number of milliseconds C/S is allowed to last */
+} RGXFWIF_HCS_CTL;
+
+typedef struct _RGXFWIF_HWPERF_CTRL_
+{
+	IMG_BOOL	 			bToggle; 	/*!< Toggle masked bits or apply full mask? */
+	IMG_UINT64	RGXFW_ALIGN	ui64Mask;   /*!< Mask of events to toggle */
+} RGXFWIF_HWPERF_CTRL;
+
+typedef struct _RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS_
+{
+	IMG_UINT32                ui32NumBlocks;    /*!< Number of RGX_HWPERF_CONFIG_CNTBLK in the array */
+	PRGX_HWPERF_CONFIG_CNTBLK sBlockConfigs;    /*!< Address of the RGX_HWPERF_CONFIG_CNTBLK array */
+} RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS;
+
+typedef struct _RGXFWIF_CORECLKSPEEDCHANGE_DATA_
+{
+	IMG_UINT32	ui32NewClockSpeed; 			/*!< New clock speed */
+} RGXFWIF_CORECLKSPEEDCHANGE_DATA;
+
+#define RGXFWIF_HWPERF_CTRL_BLKS_MAX	16
+
+typedef struct _RGXFWIF_HWPERF_CTRL_BLKS_
+{
+	IMG_BOOL	bEnable;
+	IMG_UINT32	ui32NumBlocks;                              /*!< Number of block IDs in the array */
+	IMG_UINT16	aeBlockIDs[RGXFWIF_HWPERF_CTRL_BLKS_MAX];   /*!< Array of RGX_HWPERF_CNTBLK_ID values */
+} RGXFWIF_HWPERF_CTRL_BLKS;
+
+
+typedef struct _RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS_
+{
+	IMG_UINT16                      ui16CustomBlock;
+	IMG_UINT16                      ui16NumCounters;
+	PRGX_HWPERF_SELECT_CUSTOM_CNTRS sCustomCounterIDs;
+} RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS;
+
+typedef struct _RGXFWIF_ZSBUFFER_BACKING_DATA_
+{
+	RGXFWIF_DEV_VIRTADDR	sZSBufferFWDevVAddr; 				/*!< ZS-Buffer FW address */
+	IMG_UINT32				bDone;								/*!< action backing/unbacking succeeded */
+} RGXFWIF_ZSBUFFER_BACKING_DATA;
+
+typedef struct
+{
+	IMG_UINT32 ui32IsolationPriorityThreshold;
+} RGXFWIF_OSID_ISOLATION_GROUP_DATA;
+
+/*
+ * Flags to pass in the unused bits of the page size grow request
+ */
+#define RGX_FREELIST_GSDATA_RPM_RESTART_EN		(1 << 31)		/*!< Restart RPM after freelist grow command */
+#define RGX_FREELIST_GSDATA_RPM_PAGECNT_MASK	(0x3FFFFFU)		/*!< Mask for page count. */
+
+typedef struct _RGXFWIF_FREELIST_GS_DATA_
+{
+	RGXFWIF_DEV_VIRTADDR	sFreeListFWDevVAddr; 				/*!< Freelist FW address */
+	IMG_UINT32				ui32DeltaPages;						/*!< Amount of the Freelist change */
+	IMG_UINT32				ui32NewPages;						/*!< New amount of pages on the freelist */
+	IMG_UINT32              ui32ReadyPages;                     /*!< Number of pages to be added to the ready bank of the freelist */
+	IMG_UINT32              ui32ReadyOOMLimit;                  /*!< Number of pages used as threshold to trigger OOM */
+} RGXFWIF_FREELIST_GS_DATA;
+
+#define RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000
+
+typedef struct _RGXFWIF_FREELISTS_RECONSTRUCTION_DATA_
+{
+	IMG_UINT32			ui32FreelistsCount;
+	IMG_UINT32			aui32FreelistIDs[MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS];
+} RGXFWIF_FREELISTS_RECONSTRUCTION_DATA;
+
+
+typedef struct _RGXFWIF_SIGNAL_UPDATE_DATA_
+{
+	IMG_DEV_VIRTADDR RGXFW_ALIGN       sDevSignalAddress; /*!< device virtual address of the updated signal */
+	PRGXFWIF_FWMEMCONTEXT              psFWMemContext; /*!< Memory context */
+} UNCACHED_ALIGN RGXFWIF_SIGNAL_UPDATE_DATA;
+
+
+typedef struct _RGXFWIF_WRITE_OFFSET_UPDATE_DATA_
+{
+	PRGXFWIF_FWCOMMONCONTEXT  psContext; /*!< Context to that may need to be resumed following write offset update */
+} UNCACHED_ALIGN RGXFWIF_WRITE_OFFSET_UPDATE_DATA;
+
+typedef struct _RGXFWIF_WORKEST_FWCCB_CMD_
+{
+	IMG_UINT64 RGXFW_ALIGN ui64ReturnDataIndex; /*!< Index for return data array */
+	IMG_UINT64 RGXFW_ALIGN ui64CyclesTaken;     /*!< The cycles the workload took on the hardware */
+} RGXFWIF_WORKEST_FWCCB_CMD;
+
+
+/*!
+ ******************************************************************************
+ * Proactive DVFS Structures
+ *****************************************************************************/
+#define NUM_OPP_VALUES 16
+
+typedef struct _PDVFS_OPP_
+{
+	IMG_UINT32			ui32Volt; /* V  */
+	IMG_UINT32			ui32Freq; /* Hz */
+} UNCACHED_ALIGN PDVFS_OPP;
+
+typedef struct _RGXFWIF_PDVFS_OPP_
+{
+	PDVFS_OPP		asOPPValues[NUM_OPP_VALUES];
+	IMG_UINT32		ui32MaxOPPPoint;
+} UNCACHED_ALIGN RGXFWIF_PDVFS_OPP;
+
+typedef struct _RGXFWIF_PDVFS_OPP_DATA_
+{
+	RGXFWIF_PDVFS_OPP sPDFVSOppInfo;
+} UNCACHED_ALIGN RGXFWIF_PDVFS_OPP_DATA;
+
+typedef struct _RGXFWIF_PDVFS_MAX_FREQ_DATA_
+{
+	IMG_UINT32 ui32MaxOPPPoint;
+} UNCACHED_ALIGN RGXFWIF_PDVFS_MAX_FREQ_DATA;
+
+/*!
+ ******************************************************************************
+ * Register configuration structures
+ *****************************************************************************/
+
+#define RGXFWIF_REG_CFG_MAX_SIZE 512
+
+typedef enum _RGXFWIF_REGDATA_CMD_TYPE_
+{
+	RGXFWIF_REGCFG_CMD_ADD 				= 101,
+	RGXFWIF_REGCFG_CMD_CLEAR 			= 102,
+	RGXFWIF_REGCFG_CMD_ENABLE 			= 103,
+	RGXFWIF_REGCFG_CMD_DISABLE 			= 104
+} RGXFWIF_REGDATA_CMD_TYPE;
+
+typedef struct _RGXFWIF_REGCONFIG_DATA_
+{
+	RGXFWIF_REGDATA_CMD_TYPE         eCmdType;
+	RGXFWIF_REG_CFG_TYPE             eRegConfigType;
+	RGXFWIF_REG_CFG_REC RGXFW_ALIGN  sRegConfig;
+
+} RGXFWIF_REGCONFIG_DATA;
+
+typedef struct _RGXFWIF_REG_CFG_
+{
+	/**
+	 * PDump WRW command write granularity is 32 bits.
+	 * Add padding to ensure array size is 32 bit granular.
+	 */
+	IMG_UINT8            RGXFW_ALIGN  aui8NumRegsType[ALIGN(RGXFWIF_REG_CFG_TYPE_ALL,sizeof(IMG_UINT32))];
+	RGXFWIF_REG_CFG_REC	 RGXFW_ALIGN  asRegConfigs[RGXFWIF_REG_CFG_MAX_SIZE];
+} UNCACHED_ALIGN RGXFWIF_REG_CFG;
+
+typedef struct _RGXFWIF_REGISTER_GUESTOS_OFFSETS_
+{
+	IMG_UINT32                        ui32OSid;
+	RGXFWIF_DEV_VIRTADDR RGXFW_ALIGN  sKCCBCtl;
+	RGXFWIF_DEV_VIRTADDR              sKCCB;
+	RGXFWIF_DEV_VIRTADDR              sFirmwareCCBCtl;
+	RGXFWIF_DEV_VIRTADDR              sFirmwareCCB;
+} UNCACHED_ALIGN RGXFWIF_REGISTER_GUESTOS_OFFSETS;
+
+/* OSid Scheduling Priority Change */
+typedef struct _RGXFWIF_OSID_PRIORITY_DATA_
+{
+	IMG_UINT32			ui32OSidNum;
+	IMG_UINT32			ui32Priority;
+} RGXFWIF_OSID_PRIORITY_DATA;
+
+typedef struct
+{
+	IMG_UINT32 ui32OSid;
+	RGXFWIF_OS_STATE_CHANGE eNewOSState;
+} UNCACHED_ALIGN RGXFWIF_OS_STATE_CHANGE_DATA;
+
+typedef struct
+{
+	PRGXFWIF_OS_CONFIG sOSConfig;
+}  RGXFW_ALIGN RGXFWIF_OS_CONFIG_DATA;
+
+typedef enum _RGXFWIF_KCCB_CMD_TYPE_
+{
+	RGXFWIF_KCCB_CMD_KICK						= 101,
+	RGXFWIF_KCCB_CMD_MMUCACHE					= 102,
+	RGXFWIF_KCCB_CMD_BP							= 104,
+	RGXFWIF_KCCB_CMD_SLCBPCTL   				= 106, /*!< slc bypass control. Requires sSLCBPCtlData. For validation */
+	RGXFWIF_KCCB_CMD_SYNC       				= 107, /*!< host sync command. Requires sSyncData. */
+	RGXFWIF_KCCB_CMD_SLCFLUSHINVAL				= 108, /*!< slc flush and invalidation request */
+	RGXFWIF_KCCB_CMD_CLEANUP					= 109, /*!< Requests cleanup of a FW resource (type specified in the command data) */
+	RGXFWIF_KCCB_CMD_POW						= 110, /*!< Power request */
+	RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG			= 111, /*!< Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */
+	RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS	= 112, /*!< Configure, clear and enable multiple HWPerf blocks */
+	RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS			= 113, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */
+	RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE			= 114, /*!< CORE clock speed change event */
+	RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE	= 115, /*!< Backing for on-demand ZS-Buffer done */
+	RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE	= 116, /*!< Unbacking for on-demand ZS-Buffer done */
+	RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE		= 117, /*!< Freelist Grow done */
+	RGXFWIF_KCCB_CMD_FREELIST_SHRINK_UPDATE		= 118, /*!< Freelist Shrink done */
+	RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE	= 119, /*!< Freelists Reconstruction done */
+	RGXFWIF_KCCB_CMD_HEALTH_CHECK               = 120, /*!< Health check request */
+	RGXFWIF_KCCB_CMD_REGCONFIG                  = 121,
+	RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS = 122, /*!< Configure the custom counters for HWPerf */
+	RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT	= 123, /*!< Configure, clear and enable multiple HWPerf blocks during the init process*/
+	RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE             = 124, /*!< Ask the firmware to update its cached ui32LogType value from the (shared) tracebuf control structure */
+	RGXFWIF_KCCB_CMD_WORKEST_CLEAR_BUFFER		= 125,
+	RGXFWIF_KCCB_CMD_PDVFS_PASS_OPP				= 126,
+	RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ				= 127,
+	RGXFWIF_KCCB_CMD_PDVFS_REQUEST_REACTIVE_UPDATE	= 129,
+	RGXFWIF_KCCB_CMD_DOPPLER_MEMORY_GROW		= 130,
+
+	RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE       = 131, /*!< Informs the firmware that the host has performed a signal update */
+
+	RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE	= 132, /*!< Informs the firmware that the host has added more data to a CDM2 Circular Buffer */
+
+	RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE       = 133, /*!< Changes the relative scheduling priority for a particular OSid. It can only be serviced for the Host DDK */
+	RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL            = 134, /*!< Set or clear firmware state flags */
+	RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE           = 135, /*!< Set hard context switching deadline */
+	RGXFWIF_KCCB_CMD_OS_ISOLATION_GROUP_CHANGE  = 136, /*!< Changes the configuration of (or even disables) the OSid Isolation scheduling group. It can only be serviced for the Host DDK */
+	RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE  = 137, /*!< Informs the FW that a Guest OS has come online / offline. It can only be serviced for the Host DDK */
+	RGXFWIF_KCCB_CMD_OS_CFG_INIT                = 138, /*!< First kick of the DDK which initializes all OS specific data on the FW */
+} RGXFWIF_KCCB_CMD_TYPE;
+
+/* Kernel CCB command packet */
+typedef struct _RGXFWIF_KCCB_CMD_
+{
+	RGXFWIF_KCCB_CMD_TYPE  eCmdType; /*!< Command type */
+	RGXFWIF_DM             eDM;      /*!< DM associated with the command */
+
+	union
+	{
+		RGXFWIF_KCCB_CMD_KICK_DATA			sCmdKickData;			/*!< Data for Kick command */
+		RGXFWIF_MMUCACHEDATA				sMMUCacheData;			/*!< Data for MMUCACHE command */
+		RGXFWIF_BPDATA						sBPData;				/*!< Data for Breakpoint Commands */
+		RGXFWIF_SLCBPCTLDATA       			sSLCBPCtlData;  		/*!< Data for SLC Bypass Control */
+		RGXFWIF_KCCB_CMD_SYNC_DATA 			sSyncData;          	/*!< Data for host sync commands */
+		RGXFWIF_SLCFLUSHINVALDATA			sSLCFlushInvalData;		/*!< Data for SLC Flush/Inval commands */
+		RGXFWIF_CLEANUP_REQUEST				sCleanupData; 			/*!< Data for cleanup commands */
+		RGXFWIF_POWER_REQUEST				sPowData;				/*!< Data for power request commands */
+		RGXFWIF_HWPERF_CTRL					sHWPerfCtrl;			/*!< Data for HWPerf control command */
+		RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS	sHWPerfCfgEnableBlks;	/*!< Data for HWPerf configure, clear and enable performance counter block command */
+		RGXFWIF_HWPERF_CTRL_BLKS			sHWPerfCtrlBlks;		/*!< Data for HWPerf enable or disable performance counter block commands */
+		RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS  sHWPerfSelectCstmCntrs; /*!< Data for HWPerf configure the custom counters to read */
+		RGXFWIF_CORECLKSPEEDCHANGE_DATA		sCORECLKSPEEDCHANGEData;/*!< Data for CORE clock speed change */
+		RGXFWIF_ZSBUFFER_BACKING_DATA		sZSBufferBackingData;	/*!< Feedback for Z/S Buffer backing/unbacking */
+		RGXFWIF_FREELIST_GS_DATA			sFreeListGSData;		/*!< Feedback for Freelist grow/shrink */
+		RGXFWIF_FREELISTS_RECONSTRUCTION_DATA	sFreeListsReconstructionData;	/*!< Feedback for Freelists reconstruction */
+		RGXFWIF_REGCONFIG_DATA				sRegConfigData;			/*!< Data for custom register configuration */
+		RGXFWIF_REGISTER_GUESTOS_OFFSETS    sRegisterGuestOsOffests;/*!< Data for registering a guestOS with the FW */
+		RGXFWIF_SIGNAL_UPDATE_DATA          sSignalUpdateData;      /*!< Data for informing the FW about the signal update */
+		RGXFWIF_WRITE_OFFSET_UPDATE_DATA    sWriteOffsetUpdateData; /*!< Data for informing the FW about the write offset update */
+		RGXFWIF_PDVFS_OPP_DATA				sPDVFSOppData;
+		RGXFWIF_PDVFS_MAX_FREQ_DATA			sPDVFSMaxFreqData;
+		RGXFWIF_OSID_PRIORITY_DATA			sCmdOSidPriorityData;	/*!< Data for updating an OSid priority */
+		RGXFWIF_HCS_CTL						sHCSCtrl;				/*!< Data for Hard Context Switching */
+		RGXFWIF_OSID_ISOLATION_GROUP_DATA   sCmdOSidIsolationData;  /*!< Data for updating the OSid isolation group */
+		RGXFWIF_OS_STATE_CHANGE_DATA        sCmdOSOnlineStateData;  /*!< Data for updating the Guest Online states */
+		RGXFWIF_OS_CONFIG_DATA              sCmdOSConfigData;       /*!< Data for the OS-specific initialization part of the FW */
+	} UNCACHED_ALIGN uCmdData;
+} UNCACHED_ALIGN RGXFWIF_KCCB_CMD;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_KCCB_CMD);
+
+/*!
+ ******************************************************************************
+ * Firmware CCB command structure for RGX
+ *****************************************************************************/
+
+typedef struct _RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA_
+{
+	IMG_UINT32				ui32ZSBufferID;
+	IMG_BOOL				bPopulate;
+} RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA;
+
+typedef struct _RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA_
+{
+	IMG_UINT32				ui32FreelistID;
+} RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA;
+
+typedef struct _RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA_
+{
+	IMG_UINT32			ui32FreelistsCount;
+	IMG_UINT32			ui32HwrCounter;
+	IMG_UINT32			aui32FreelistIDs[MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS];
+} RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA;
+
+typedef struct _RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA_
+{
+	IMG_UINT32						ui32ServerCommonContextID;	/*!< Context affected by the reset */
+	RGXFWIF_CONTEXT_RESET_REASON	eResetReason;				/*!< Reason for reset */
+	IMG_UINT32						ui32ResetJobRef;			/*!< Job ref running at the time of reset */
+	IMG_BOOL						bPageFault;					/*!< Did a page fault happen */
+	IMG_UINT64 RGXFW_ALIGN			ui64PCAddress;				/*!< At what page catalog address */
+} RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA;
+
+typedef enum _RGXFWIF_FWCCB_CMD_TYPE_
+{
+	RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING				= 101, 	/*!< Requests ZSBuffer to be backed with physical pages */
+	RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING			= 102, 	/*!< Requests ZSBuffer to be unbacked */
+	RGXFWIF_FWCCB_CMD_FREELIST_GROW					= 103, 	/*!< Requests an on-demand freelist grow/shrink */
+	RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION		= 104, 	/*!< Requests freelists reconstruction */
+	RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION	= 105,	/*!< Notifies host of a HWR event on a context */
+	RGXFWIF_FWCCB_CMD_DEBUG_DUMP					= 106,	/*!< Requests an on-demand debug dump */
+	RGXFWIF_FWCCB_CMD_UPDATE_STATS					= 107,	/*!< Requests an on-demand update on process stats */
+
+	RGXFWIF_FWCCB_CMD_DOPPLER_MEMORY_GROW			= 108, 	/*!< Requests an on-demand RPM freelist grow */
+	RGXFWIF_FWCCB_CMD_WORKLOAD_FINISHED				= 109,	/*!< Supplies data for the workload matching algorithm */
+	RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE			= 110,
+	RGXFWIF_FWCCB_CMD_PDVFS_FREEMEM					= 111,
+} RGXFWIF_FWCCB_CMD_TYPE;
+
+typedef enum
+{
+    RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS=1,		/*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumPartialRenders stat */
+    RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY,			/*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumOutOfMemory stat */
+    RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES,				/*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTAStores stat */
+    RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES,				/*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32Num3DStores stat */
+    RGXFWIF_FWCCB_CMD_UPDATE_NUM_SH_STORES,				/*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumSHStores stat */
+    RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES				/*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumCDMStores stat */
+} RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE;
+
+
+/* Firmware CCB command packet */
+
+typedef struct
+{
+    RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE		eElementToUpdate;			/*!< Element to update */
+    IMG_PID									pidOwner;					/*!< The pid of the process whose stats are being updated */
+    IMG_INT32								i32AdjustmentValue;			/*!< Adjustment to be made to the statistic */
+} RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA;
+/*!
+ ******************************************************************************
+ * Workload Estimation Structures
+ *****************************************************************************/
+
+typedef struct
+{
+	IMG_UINT64 RGXFW_ALIGN /*uintptr_t DEVMEM_MEMDESC*/	ui64WorkloadDataMemdesc;
+} RGXFWIF_FWCCB_CMD_WORKLOAD_FINISHED_DATA;
+
+/*!
+ ******************************************************************************
+ * Proactive DVFS Structures
+ *****************************************************************************/
+
+typedef struct _RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA_
+{
+	IMG_UINT32 ui32CoreClkRate;
+} UNCACHED_ALIGN RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA;
+
+typedef struct _RGXFWIF_FWCCB_CMD_PDVFS_FREEMEM_DATA_
+{
+	IMG_UINT64 RGXFW_ALIGN ui64MemDesc;
+} UNCACHED_ALIGN RGXFWIF_FWCCB_CMD_PDVFS_FREEMEM_DATA;
+
+typedef struct _RGXFWIF_FWCCB_CMD_
+{
+	RGXFWIF_FWCCB_CMD_TYPE					eCmdType;	/*!< Command type */
+	union
+	{
+		RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA				sCmdZSBufferBacking;			/*!< Data for Z/S-Buffer on-demand (un)backing*/
+		RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA					sCmdFreeListGS;					/*!< Data for on-demand freelist grow/shrink */
+		RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA		sCmdFreeListsReconstruction;	/*!< Data for freelists reconstruction */
+		RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA				sCmdContextResetNotification;	/*!< Data for context reset notification */
+		RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA                 sCmdUpdateStatsData;            /*!< Data for updating process stats */
+		RGXFWIF_FWCCB_CMD_WORKLOAD_FINISHED_DATA			sCmdWorkEstWorkloadFinished;			/*!< Data for workload matching */
+		RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA			sCmdCoreClkRateChange;
+		RGXFWIF_FWCCB_CMD_PDVFS_FREEMEM_DATA				sCmdPDVFSFreeMem;
+	} RGXFW_ALIGN uCmdData;
+} RGXFW_ALIGN RGXFWIF_FWCCB_CMD;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_FWCCB_CMD);
+
+/*!
+ ******************************************************************************
+ * Signature and Checksums Buffer
+ *****************************************************************************/
+typedef struct _RGXFWIF_SIGBUF_CTL_
+{
+	PRGXFWIF_SIGBUFFER		sBuffer;			/*!< Ptr to Signature Buffer memory */
+	IMG_UINT32				ui32LeftSizeInRegs;	/*!< Amount of space left for storing regs in the buffer */
+} UNCACHED_ALIGN RGXFWIF_SIGBUF_CTL;
+
+/*!
+ ******************************************************************************
+ * Updated configuration post FW data init.
+ *****************************************************************************/
+typedef struct _RGXFWIF_RUNTIME_CFG_
+{
+	IMG_UINT32         ui32ActivePMLatencyms;      /* APM latency in ms before signalling IDLE to the host */
+	IMG_BOOL           bActivePMLatencyPersistant; /* If set, APM latency does not reset to system default each GPU power transition */
+	IMG_UINT32         ui32CoreClockSpeed;         /* Core clock speed, currently only used to calculate timer ticks */
+	IMG_UINT32         ui32DefaultDustsNumInit;    /* Last number of dusts change requested by the host */
+	PRGXFWIF_HWPERFBUF sHWPerfBuf;                 /* On-demand allocated HWPerf buffer address, to be passed to the FW */
+} RGXFWIF_RUNTIME_CFG;
+
+/*!
+ *****************************************************************************
+ * Control data for RGX
+ *****************************************************************************/
+
+#define RGXFWIF_HWR_DEBUG_DUMP_ALL (99999)
+
+#if defined(PDUMP)
+
+#define RGXFWIF_PID_FILTER_MAX_NUM_PIDS 32
+
+typedef enum _RGXFWIF_PID_FILTER_MODE_
+{
+	RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT,
+	RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT
+} RGXFWIF_PID_FILTER_MODE;
+
+typedef struct _RGXFWIF_PID_FILTER_ITEM_
+{
+	IMG_PID uiPID;
+	IMG_UINT32 ui32OSID;
+} RGXFW_ALIGN RGXFWIF_PID_FILTER_ITEM;
+
+typedef struct _RGXFWIF_PID_FILTER_
+{
+	RGXFWIF_PID_FILTER_MODE eMode;
+	/* each process in the filter list is specified by a PID and OS ID pair.
+	 * each PID and OS pair is an item in the items array (asItems).
+	 * if the array contains less than RGXFWIF_PID_FILTER_MAX_NUM_PIDS entries
+	 * then it must be terminated by an item with pid of zero.
+	 */
+	RGXFWIF_PID_FILTER_ITEM asItems[RGXFWIF_PID_FILTER_MAX_NUM_PIDS];
+} RGXFW_ALIGN RGXFWIF_PID_FILTER;
+#endif
+
+typedef struct
+{
+	IMG_UINT32              ui32ConfigFlags;        /*!< Configuration flags from host */
+	RGXFWIF_DEV_VIRTADDR    sPowerSync;
+	IMG_UINT32              ui32ConfigFlagsExt;     /*!< Extended configuration flags from host */
+} RGXFWIF_OS_CONFIG;
+
+typedef enum
+{
+	RGXFWIF_GPIO_VAL_OFF           = 0, /*!< No GPIO validation */
+	RGXFWIF_GPIO_VAL_GENERAL       = 1, /*!< Simple test case that
+	                                         initiates by sending data via the
+	                                         GPIO and then sends back any data
+	                                         received over the GPIO */
+	RGXFWIF_GPIO_VAL_AP            = 2, /*!< More complex test case that writes
+	                                         and reads data across the entire
+	                                         GPIO AP address range.*/
+	RGXFWIF_GPIO_VAL_LAST
+} RGXFWIF_GPIO_VAL_MODE;
+
+typedef struct _RGXFWIF_INIT_
+{
+
+	PRGXFWIF_OS_CONFIG      sOSConfig;              /*!< OS configuration data for the FW initialization */
+
+	IMG_DEV_PHYADDR         RGXFW_ALIGN sFaultPhysAddr;
+
+	IMG_DEV_VIRTADDR        RGXFW_ALIGN sPDSExecBase;
+	IMG_DEV_VIRTADDR        RGXFW_ALIGN sUSCExecBase;
+	IMG_DEV_VIRTADDR        RGXFW_ALIGN sResultDumpBase;
+	IMG_DEV_VIRTADDR        RGXFW_ALIGN sDPXControlStreamBase;
+	IMG_DEV_VIRTADDR        RGXFW_ALIGN sRTUHeapBase;
+	IMG_DEV_VIRTADDR        RGXFW_ALIGN sTDMTPUYUVCeoffsHeapBase;
+
+	IMG_BOOL                bFirstTA;
+	IMG_BOOL                bFirstRender;
+	IMG_BOOL                bFrameworkAfterInit;
+	IMG_BOOL                bDisableFilterHWPerfCustomCounter;
+
+	IMG_UINT32              ui32FilterFlags;
+
+	/* Kernel CCB */
+	PRGXFWIF_CCB_CTL        psKernelCCBCtl;
+	PRGXFWIF_CCB            psKernelCCB;
+
+	/* Firmware CCB */
+	PRGXFWIF_CCB_CTL        psFirmwareCCBCtl;
+	PRGXFWIF_CCB            psFirmwareCCB;
+
+	RGXFWIF_SIGBUF_CTL	asSigBufCtl[RGXFWIF_DM_DEFAULT_MAX];
+
+	IMG_BOOL                bEnableLogging;
+
+	IMG_UINT32              ui32BreakpointTemps;
+	IMG_UINT32              ui32BreakpointShareds;
+	IMG_UINT32              ui32HWRDebugDumpLimit;
+
+	RGXFWIF_BIFTILINGMODE   eBifTilingMode;
+	struct
+	{
+		IMG_UINT64 uiBase;
+		IMG_UINT64 uiLen;
+		IMG_UINT64 uiXStride;
+	}                       RGXFW_ALIGN sBifTilingCfg[RGXFWIF_NUM_BIF_TILING_CONFIGS];
+
+	PRGXFWIF_RUNTIME_CFG    sRuntimeCfg;
+
+	PRGXFWIF_TRACEBUF       sTraceBufCtl;
+	IMG_UINT64              RGXFW_ALIGN ui64HWPerfFilter;
+
+	PRGXFWIF_HWRINFOBUF     sRGXFWIfHWRInfoBufCtl;
+	PRGXFWIF_GPU_UTIL_FWCB  sGpuUtilFWCbCtl;
+	PRGXFWIF_REG_CFG        sRegCfg;
+	PRGXFWIF_HWPERF_CTL     sHWPerfCtl;
+
+	RGXFWIF_DEV_VIRTADDR    sAlignChecks;
+
+	/* Core clock speed at FW boot time */
+	IMG_UINT32              ui32InitialCoreClockSpeed;
+
+	/* APM latency in ms before signalling IDLE to the host */
+	IMG_UINT32              ui32ActivePMLatencyms;
+
+	/* Flag to be set by the Firmware after successful start */
+	IMG_BOOL                bFirmwareStarted;
+
+	IMG_UINT32              ui32MarkerVal;
+
+	IMG_UINT32              ui32FirmwareStartedTimeStamp;
+
+	IMG_UINT32              ui32JonesDisableMask;
+
+	/* Compatibility checks to be populated by the Firmware */
+	RGXFWIF_COMPCHECKS      sRGXCompChecks;
+
+	RGXFWIF_DMA_ADDR        sCorememDataStore;
+
+	FW_PERF_CONF            eFirmwarePerf;
+
+	IMG_DEV_VIRTADDR        RGXFW_ALIGN sSLC3FenceDevVAddr;
+
+	RGXFWIF_DEV_VIRTADDR    sT1Stack;
+
+	RGXFWIF_PDVFS_OPP       sPDVFSOPPInfo;
+
+	/**
+	 * FW Pointer to memory containing core clock rate in Hz.
+	 * Firmware (PDVFS) updates the memory when running on non primary FW thread
+	 * to communicate to host driver.
+	 */
+	PRGXFWIF_CORE_CLK_RATE  sCoreClockRate;
+
+#if defined(PDUMP)
+	RGXFWIF_PID_FILTER      sPIDFilter;
+#endif
+
+	/* Workload Estimation Firmware CCB */
+	PRGXFWIF_CCB_CTL        psWorkEstFirmwareCCBCtl;
+	PRGXFWIF_CCB            psWorkEstFirmwareCCB;
+
+	RGXFWIF_GPIO_VAL_MODE       eGPIOValidationMode;
+
+} UNCACHED_ALIGN RGXFWIF_INIT;
+
+
+/*!
+ ******************************************************************************
+ * Client CCB commands which are only required by the kernel
+ *****************************************************************************/
+typedef struct _RGXFWIF_CMD_PRIORITY_
+{
+    IMG_UINT32				ui32Priority;
+} RGXFWIF_CMD_PRIORITY;
+
+#endif /*  __RGX_FWIF_KM_H__ */
+
+/******************************************************************************
+ End of file (rgx_fwif_km.h)
+******************************************************************************/
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_resetframework.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_resetframework.h
new file mode 100644
index 0000000..dceeb48
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_resetframework.h
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@File			rgx_fwif_resetframework.h
+@Title         	Post-reset work-around framework FW interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_RGX_FWIF_RESETFRAMEWORK_H)
+#define _RGX_FWIF_RESETFRAMEWORK_H
+
+#include "img_types.h"
+#include "rgx_fwif_shared.h"
+
+typedef struct _RGXFWIF_RF_REGISTERS_
+{
+#if RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT == 2
+	IMG_UINT64	uCDMReg_CDM_CB_QUEUE;
+	IMG_UINT64	uCDMReg_CDM_CB_BASE;
+	IMG_UINT64	uCDMReg_CDM_CB;
+#else
+	IMG_UINT64  uCDMReg_CDM_CTRL_STREAM_BASE;
+#endif
+} RGXFWIF_RF_REGISTERS;
+
+#define RGXFWIF_RF_FLAG_ENABLE 0x00000001 /*!< enables the reset framework in the firmware */
+
+typedef struct _RGXFWIF_RF_CMD_
+{
+	IMG_UINT32           ui32Flags;
+
+	/* THIS MUST BE THE LAST MEMBER OF THE CONTAINING STRUCTURE */
+	RGXFWIF_RF_REGISTERS RGXFW_ALIGN sFWRegisters;
+
+} RGXFWIF_RF_CMD;
+
+/* to opaquely allocate and copy in the kernel */
+#define RGXFWIF_RF_CMD_SIZE  sizeof(RGXFWIF_RF_CMD)
+
+#endif /* _RGX_FWIF_RESETFRAMEWORK_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_sf.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_sf.h
new file mode 100644
index 0000000..31b7309
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_sf.h
@@ -0,0 +1,633 @@
+/*************************************************************************/ /*!
+@File			rgx_fwif_sf.h
+@Title          RGX firmware interface string format specifiers
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the rgx firmware logging messages. The following
+				list are the messages the firmware prints. Changing anything
+				but the first column or spelling mistakes in the strings will
+				break compatibility with log files created with older/newer
+				firmware versions.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_FWIF_SF_H
+#define RGX_FWIF_SF_H
+
+/*****************************************************************************
+ * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you
+ *  		 WILL BREAK fw tracing message compatibility with previous
+ *  		 fw versions. Only add new ones, if so required.
+ ****************************************************************************/
+/* Available log groups */
+#define RGXFW_LOG_SFGROUPLIST       \
+	X(RGXFW_GROUP_NULL,NULL)        \
+	X(RGXFW_GROUP_MAIN,MAIN)        \
+	X(RGXFW_GROUP_CLEANUP,CLEANUP)  \
+	X(RGXFW_GROUP_CSW,CSW)          \
+	X(RGXFW_GROUP_PM, PM)           \
+	X(RGXFW_GROUP_RTD,RTD)          \
+	X(RGXFW_GROUP_SPM,SPM)          \
+	X(RGXFW_GROUP_MTS,MTS)          \
+	X(RGXFW_GROUP_BIF,BIF)          \
+	X(RGXFW_GROUP_DATA,DATA)        \
+	X(RGXFW_GROUP_POW,POW)          \
+	X(RGXFW_GROUP_HWR,HWR)          \
+	X(RGXFW_GROUP_HWP,HWP)          \
+	X(RGXFW_GROUP_RPM,RPM)          \
+	X(RGXFW_GROUP_DMA,DMA)          \
+	X(RGXFW_GROUP_DBG,DBG)
+
+enum RGXFW_LOG_SFGROUPS {
+#define X(A,B) A,
+	RGXFW_LOG_SFGROUPLIST
+#undef X
+};
+
+/* Table of String Format specifiers, the group they belong and the number of
+ * arguments each expects. Xmacro styled macros are used to generate what is
+ * needed without requiring hand editing.
+ *
+ * id		: id within a group
+ * gid		: group id
+ * Sym name	: name of enumerations used to identify message strings
+ * String	: Actual string
+ * #args	: number of arguments the string format requires
+ */
+#define RGXFW_LOG_SFIDLIST \
+/*id, gid,              id name,        string,                           # arguments */ \
+X( 0, RGXFW_GROUP_NULL, RGXFW_SF_FIRST, "You should not use this string\n", 0) \
+\
+X( 1,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx 0x%08.8X @ %d, RTD 0x%08x. Partial render:%d, CSW resume:%d, prio:%d\n", 6) \
+X( 2,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_FINISHED, "3D finished, HWRTData0State=%d, HWRTData1State=%d\n", 2) \
+X( 3,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK3D_TQ_DEPRECATED, "Kick 3D TQ: FWCtx 0x%08.8X @ %d, CSW resume:%d, prio: %d\n", 4) \
+X( 4,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_TQ_FINISHED, "3D Transfer finished\n", 0) \
+X( 5,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_DEPRECATED, "Kick Compute: FWCtx 0x%08.8X @ %d, prio: %d\n", 3) \
+X( 6,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_FINISHED, "Compute finished\n", 0) \
+X( 7,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx 0x%08.8X @ %d, RTD 0x%08x. First kick:%d, Last kick:%d, CSW resume:%d, prio:%d\n", 7) \
+X( 8,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_FINISHED, "TA finished\n", 0) \
+X( 9,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESTART_AFTER_PRENDER, "Restart TA after partial render\n", 0) \
+X(10,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESUME_WOUT_PRENDER, "Resume TA without partial render\n", 0) \
+X(11,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OOM, "Out of memory! Context 0x%08x, HWRTData 0x%x\n", 2) \
+X(12,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA_DEPRECATED, "Kick TLA: FWCtx 0x%08.8X @ %d, prio:%d\n", 3) \
+X(13,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TLA_FINISHED, "TLA finished\n", 0) \
+X(14,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CCCB_WOFF_UPDATE, "cCCB Woff update = %d, DM = %d, FWCtx = 0x%08.8X\n", 3) \
+X(16,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_START, "UFO Checks for FWCtx %08.8X @ %d\n", 2) \
+X(17,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK, "UFO Check: [%08.8X] is %08.8X requires %08.8X\n", 3) \
+X(18,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_SUCCEEDED, "UFO Checks succeeded\n", 0) \
+X(19,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_PR_CHECK, "UFO PR-Check: [%08.8X] is %08.8X requires >= %08.8X\n", 3) \
+X(20,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_START, "UFO SPM PR-Checks for FWCtx %08.8X\n", 1) \
+X(21,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_DEPRECATED, "UFO SPM special PR-Check: [%08.8X] is %08.8X requires >= ????????, [%08.8X] is ???????? requires %08.8X\n", 4) \
+X(22,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE_START, "UFO Updates for FWCtx %08.8X @ %d\n", 2) \
+X(23,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE, "UFO Update: [%08.8X] = %08.8X\n", 2) \
+X(24,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ASSERT_FAILED, "ASSERT Failed: line %d of: \n", 1) \
+X(25,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_LOCKUP_DEPRECATED, "HWR: Lockup detected on DM%d, FWCtx: %08.8X\n", 2) \
+X(26,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_FW_DEPRECATED, "HWR: Reset fw state for DM%d, FWCtx: %08.8X, MemCtx: %08.8X\n", 3) \
+X(27,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_HW_DEPRECATED, "HWR: Reset HW\n", 0) \
+X(28,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_TERMINATED_DEPRECATED, "HWR: Lockup recovered.\n", 0) \
+X(29,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_FALSE_LOCKUP_DEPRECATED, "HWR: False lockup detected for DM%u\n", 1) \
+X(30,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ALIGN_FAILED, "Alignment check %d failed: host = %X, fw = %X\n", 3) \
+X(31,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GP_USC_TRIGGERED, "GP USC triggered\n", 0) \
+X(32,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_OVERALLOC_REGS, "Overallocating %u temporary registers and %u shared registers for breakpoint handler\n", 2) \
+X(33,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED, "Setting breakpoint: Addr 0x%08.8X\n", 1) \
+X(34,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_STORE, "Store breakpoint state\n", 0) \
+X(35,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_UNSET, "Unsetting BP Registers\n", 0) \
+X(36,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NONZERO_RT, "Active RTs expected to be zero, actually %u\n", 1) \
+X(37,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTC_PRESENT, "RTC present, %u active render targets\n", 1) \
+X(38,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_EST_POWER, "Estimated Power 0x%x\n", 1) \
+X(39,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_TARGET, "RTA render target %u\n", 1) \
+X(40,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_KICK_RENDER, "Kick RTA render %u of %u\n", 2) \
+X(41,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SIZES_CHECK, "HWR sizes check %d failed: addresses = %d, sizes = %d\n", 3) \
+X(42,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_ENABLE_DEPRECATED, "Pow: DUSTS_ENABLE = 0x%X\n", 1) \
+X(43,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_HWREQ_DEPRECATED, "Pow: On(1)/Off(0): %d, Units: 0x%08.8X\n", 2) \
+X(44,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_DEPRECATED, "Pow: Changing number of dusts from %d to %d\n", 2) \
+X(45,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_SIDEKICK_IDLE_DEPRECATED, "Pow: Sidekick ready to be powered down\n", 0) \
+X(46,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_REQ_DEPRECATED, "Pow: Request to change num of dusts to %d (bPowRascalDust=%d)\n", 2) \
+X(47,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_STORE, "No ZS Buffer used for partial render (store)\n", 0) \
+X(48,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_LOAD, "No Depth/Stencil Buffer used for partial render (load)\n", 0) \
+X(49,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SET_LOCKUP_DEPRECATED, "HWR: Lock-up DM%d FWCtx: %08.8X \n", 2) \
+X(50,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE_DEPRECATED, "MLIST%d checker: CatBase TE=0x%08x (%d Pages), VCE=0x%08x (%d Pages), ALIST=0x%08x, IsTA=%d\n", 7) \
+X(51,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_MLIST_VALUE, "MLIST%d checker: MList[%d] = 0x%08x\n", 3) \
+X(52,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_OK, "MLIST%d OK\n", 1) \
+X(53,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_EMPTY, "MLIST%d is empty\n", 1) \
+X(54,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE, "MLIST%d checker: CatBase TE=0x%08X%08X, VCE=0x%08x%08X, ALIST=0x%08x%08X, IsTA=%d\n", 8) \
+X(55,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_40480KICK, "3D OQ flush kick\n", 0) \
+X(56,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWP_UNSUPPORTED_BLOCK, "HWPerf block ID (0x%x) unsupported by device\n", 1) \
+X(57,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET, "Setting breakpoint: Addr 0x%08.8X DM%u\n", 2) \
+X(58,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED, "Kick RTU: FWCtx 0x%08.8X @ %d, prio: %d\n", 3) \
+X(59,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_FINISHED, "RDM finished on context %u\n", 1) \
+X(60,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED, "Kick SHG: FWCtx 0x%08.8X @ %d, prio: %d\n", 3) \
+X(61,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SHG_FINISHED, "SHG finished\n", 0) \
+X(62,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBA_FINISHED, "FBA finished on context %u\n", 1) \
+X(63,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_FAILED, "UFO Checks failed\n", 0) \
+X(64,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_START, "Kill DM%d start\n", 1) \
+X(65,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_COMPLETE, "Kill DM%d complete\n", 1) \
+X(66,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FC_CCB_UPDATE, "FC%u cCCB Woff update = %u\n", 2) \
+X(67,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED2, "Kick RTU: FWCtx 0x%08.8X @ %d, prio: %d, Frame Context: %d\n", 4) \
+X(68,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIDEKICK_INIT, "Sidekick init\n", 0) \
+X(69,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RD_INIT, "Rascal+Dusts init (# dusts mask: %X)\n", 1) \
+X(70,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGTIMES, "Register access cycles: read: %d cycles, write: %d cycles, iterations: %d\n", 3) \
+X(71,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_ADD, "Register configuration added. Address: 0x%x Value: 0x%x%x\n", 3) \
+X(72,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_SET, "Register configuration applied to type %d. (0:pow on, 1:Rascal/dust init, 2-5: TA,3D,CDM,TLA, 6:All)\n", 1) \
+X(73,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TPC_FLUSH, "Perform TPC flush.\n", 0) \
+X(74,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP, "GPU has locked up (see HWR logs for more info)\n", 0) \
+X(75,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_OUTOFTIME, "HWR has been triggered - GPU has overrun its deadline (see HWR logs)\n", 0) \
+X(76,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_POLLFAILURE, "HWR has been triggered - GPU has failed a poll (see HWR logs)\n", 0) \
+X(77,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DOPPLER_OOM, "Doppler out of memory event for FC %u\n", 1) \
+X(78,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK1, "UFO SPM special PR-Check: [%08.8X] is %08.8X requires >= %08.8X\n", 3) \
+X(79,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK2, "UFO SPM special PR-Check: [%08.8X] is %08.8X requires %08.8X\n", 3) \
+X(80,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TIMESTAMP, "TIMESTAMP -> [%08.8X]\n", 1) \
+X(81,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE_START, "UFO RMW Updates for FWCtx %08.8X @ %d\n", 2) \
+X(82,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE, "UFO Update: [%08.8X] = %08.8X\n", 2) \
+X(83,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULLCMD, "Kick Null cmd: FWCtx 0x%08.8X @ %d\n", 2) \
+X(84,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RPM_OOM, "RPM Out of memory! Context 0x%08x, SH requestor %d\n", 2) \
+X(85,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_ABORT_DISCARD, "Discard RTU due to RPM abort: FWCtx 0x%08.8X @ %d, prio: %d, Frame Context: %d\n", 4) \
+X(86,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED, "Deferring DM%u from running context 0x%08x @ %d (deferred DMs = 0x%08x)\n", 4) \
+X(87,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_WAITING_TURN, "Deferring DM%u from running context 0x%08x @ %d to let other deferred DMs run (deferred DMs = 0x%08x)\n", 4) \
+X(88,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_NO_LONGER, "No longer deferring DM%u from running context = 0x%08x @ %d (deferred DMs = 0x%08x)\n", 4) \
+X(89,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB_DEPRECATED, "FWCCB for DM%u is full, we will have to wait for space! (Roff = %u, Woff = %u)\n", 3) \
+X(90,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB, "FWCCB for OSid %u is full, we will have to wait for space! (Roff = %u, Woff = %u)\n", 3) \
+X(91,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART, "Host Sync Partition marker: %d\n", 1) \
+X(92,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART_RPT, "Host Sync Partition repeat: %d\n", 1) \
+X(93,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CLOCK_SPEED_CHANGE, "Core clock set to %d Hz\n", 1) \
+X(94,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_OFFSETS, "Compute Queue: FWCtx 0x%08.8X, prio: %d, queue: 0x%08X%08X (Roff = %u, Woff = %u, Size = %u)\n", 7) \
+X(95,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE, "Signal check failed, Required Data: 0x%X, Address: 0x%08x%08x\n", 3) \
+X(96,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE, "Signal update, Snoop Filter: %u, MMU Ctx: %u, Signal Id: %u, Signals Base: 0x%08x%08x\n", 5) \
+X(97,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNALED, "Signalled the previously waiting FWCtx: 0x%08.8X, OSId: %u, Signal Address: 0x%08x%08x\n", 4) \
+X(98,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED_DEPRECATED, "Compute stalled\n", 0) \
+X(99,  RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED, "Compute stalled (Roff = %u, Woff = %u, Size = %u)\n", 3) \
+X(100, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED_FROM_STALL, "Compute resumed (Roff = %u, Woff = %u, Size = %u)\n", 3) \
+X(101, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_SIGNAL_UPDATE, "Signal update notification from the host, PC Physical Address: 0x%08x%08x, Signal Virtual Address: 0x%08x%08x\n", 4) \
+X(102, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_OSID_DM, "Signal update from DM: %u, OSId: %u, PC Physical Address: 0x%08x%08x\n", 4) \
+X(103, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DM, "DM: %u signal check failed\n", 1) \
+X(104, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED, "Kick TDM: FWCtx 0x%08.8X @ %d, prio:%d\n", 3) \
+X(105, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_FINISHED, "TDM finished\n", 0) \
+X(106, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TE_PIPE_STATUS, "MMU_PM_CAT_BASE_TE[%d]_PIPE[%d]:  0x%08X 0x%08X)\n", 4) \
+X(107, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_HIT, "BRN 54141 HIT\n", 0) \
+X(108, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_APPLYING_DUMMY_TA, "BRN 54141 Dummy TA kicked\n", 0) \
+X(109, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_RESUME_TA, "BRN 54141 resume TA\n", 0) \
+X(110, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DOUBLE_HIT, "BRN 54141 double hit after applying WA\n", 0) \
+X(111, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DUMMY_TA_VDM_BASE, "BRN 54141 Dummy TA VDM base address: 0x%08x%08x\n", 2) \
+X(112, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_WITH_CURRENT, "Signal check failed, Required Data: 0x%X, Current Data: 0x%X, Address: 0x%08x%08x\n", 4) \
+X(113, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BUFFER_STALL, "TDM stalled (Roff = %u, Woff = %u)\n", 2) \
+X(114, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_WRITE_OFFSET_UPDATE, "Write Offset update notification for stalled FWCtx %08.8X\n", 1) \
+X(115, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE, "Changing OSid %d's priority from %u to %u \n", 3) \
+X(116, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED, "Compute resumed\n", 0) \
+X(117, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA, "Kick TLA: FWCtx 0x%08.8X @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 7) \
+X(118, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM, "Kick TDM: FWCtx 0x%08.8X @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 7) \
+X(119, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA, "Kick TA: FWCtx 0x%08.8X @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 11) \
+X(120, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D, "Kick 3D: FWCtx 0x%08.8X @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 10) \
+X(121, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3DTQ, "Kick 3D TQ: FWCtx 0x%08.8X @ %d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 8) \
+X(122, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE, "Kick Compute: FWCtx 0x%08.8X @ %d. (PID:%d, prio:%d, ext:0x%08X, int:0x%08X)\n", 6) \
+X(123, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU, "Kick RTU: FWCtx 0x%08.8X @ %d, Frame Context:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 8) \
+X(124, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG, "Kick SHG: FWCtx 0x%08.8X @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 7) \
+X(125, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CSRM_RECONFIG, "Reconfigure CSRM: special coeff support enable %d.\n", 1) \
+X(127, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_REQ_MAX_COEFFS, "TA requires max coeff mode, deferring: %d.\n", 1) \
+X(128, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_REQ_MAX_COEFFS, "3D requires max coeff mode, deferring: %d.\n", 1) \
+X(129, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_FAILED, "Kill DM%d failed\n", 1) \
+X(130, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE, "Thread Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)\n", 2) \
+X(131, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE_FENCE, "Thread Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)\n", 3) \
+X(132, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_HCS_TRIGGERED, "DM %d failed to Context Switch on time. Triggered HCS (see HWR logs).\n", 1) \
+X(133, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HCS_SET, "HCS changed to %d ms\n", 1) \
+X(134, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT, "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x%08x)\n", 4) \
+X(135, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_TILES_IN_FLIGHT, "  Phantom %d: USCTiles=%d\n", 2) \
+X(136, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_OFF, "Isolation grouping is disabled \n", 0) \
+X(137, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF, "Isolation group configured with a priority threshold of %d\n", 1) \
+X(138, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_ONLINE, "OS %d has come online \n", 1) \
+X(139, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_OFFLINE, "OS %d has gone offline \n", 1) \
+X(140, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNAL_REKICK, "Signalled the previously stalled FWCtx: 0x%08.8X, OSId: %u, Signal Address: 0x%08x%08x\n", 4) \
+X(141, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSETS, "TDM Queue: FWCtx 0x%08.8X, prio: %d, queue: 0x%08X%08X (Roff = %u, Woff = %u, Size = %u)\n", 7) \
+X(142, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSET_READ_RESET, "Reset TDM Queue Read Offset: FWCtx 0x%08.8X, queue: 0x%08X%08X (Roff = %u becomes 0, Woff = %u, Size = %u)\n", 6) \
+X(143, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UMQ_MISMATCHED_READ_OFFSET, "User Mode Queue mismatched stream start: FWCtx 0x%08.8X, queue: 0x%08X%08X (Roff = %u, StreamStartOffset = %u)\n", 5) \
+X(144, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIDEKICK_DEINIT, "Sidekick deinit\n", 0) \
+X(145, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RD_DEINIT, "Rascal+Dusts deinit\n", 0) \
+X(146, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG_DEPRECATED, "Initialised OS %d with config flags 0x%08x\n", 2) \
+X(147, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHKPT_LIMIT, "Fence checkpoint UFO limit exceeded %d/%d\n", 2) \
+X(148, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_62850KICK, "3D Dummy stencil store\n", 0) \
+X(149, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG, "Initialised OS %d with config flags 0x%08x and extended config flags 0x%08x\n", 3) \
+\
+X( 1, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED, "Bg Task DM = %u, counted = %d\n", 2) \
+X( 2, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE_DEPRECATED, "Bg Task complete DM = %u\n", 1) \
+X( 3, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK, "Irq Task DM = %u, Breq = %d, SBIrq = 0x%X\n", 3) \
+X( 4, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE_DEPRECATED, "Irq Task complete DM = %u\n", 1) \
+X( 5, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_BG_ALL, "Kick MTS Bg task DM=All\n", 0) \
+X( 6, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_IRQ, "Kick MTS Irq task DM=%d \n", 1) \
+X( 7, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED, "Ready queue debug DM = %u, celltype = %d\n", 2) \
+X( 8, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN_DEPRECATED, "Ready-to-run debug DM = %u, item = 0x%x\n", 2) \
+X( 9, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMDHEADER, "Client command header DM = %u, client CCB = %x, cmd = %x\n", 3) \
+X(10, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN, "Ready-to-run debug OSid = %u, DM = %u, item = 0x%x\n", 3) \
+X(11, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE, "Ready queue debug DM = %u, celltype = %d, OSid = %u\n", 3) \
+X(12, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK, "Bg Task DM = %u, counted = %d, OSid = %u\n", 3 ) \
+X(13, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE, "Bg Task complete DM Bitfield: %u\n", 1) \
+X(14, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE, "Irq Task complete.\n", 0) \
+\
+X( 1, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_CLEANUP, "FwCommonContext [0x%08x] cleaned\n", 1) \
+X( 2, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_BUSY, "FwCommonContext [0x%08x] is busy: ReadOffset = %d, WriteOffset = %d\n", 3) \
+X( 3, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP, "HWRTData [0x%08x] for DM=%d, received cleanup request\n", 2) \
+X( 4, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_FOR_DM, "HWRTData [0x%08x] HW Context cleaned for DM%u, executed commands = %d\n", 3) \
+X( 5, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED, "HWRTData [0x%08x] HW Context for DM%u is busy\n", 2) \
+X( 6, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED, "HWRTData [0x%08x] HW Context %u cleaned\n", 2) \
+X( 7, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FL_CLEANED, "Freelist [0x%08x] cleaned\n", 1) \
+X( 8, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_CLEANED, "ZSBuffer [0x%08x] cleaned\n", 1) \
+X( 9, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_BUSY, "ZSBuffer [0x%08x] is busy: submitted = %d, executed = %d\n", 3) \
+X(10, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY, "HWRTData [0x%08x] HW Context for DM%u is busy: submitted = %d, executed = %d\n", 4) \
+X(11, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANUP, "HW Ray Frame data [0x%08x] for DM=%d, received cleanup request\n", 2) \
+X(12, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_FOR_DM, "HW Ray Frame Data [0x%08x] cleaned for DM%u, executed commands = %d\n", 3) \
+X(13, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_BUSY, "HW Ray Frame Data [0x%08x] for DM%u is busy: submitted = %d, executed = %d\n", 4) \
+X(14, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED, "HW Ray Frame Data [0x%08x] HW Context %u cleaned\n", 2) \
+\
+X( 1, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_NEEDS_RESUME, "CDM FWCtx 0x%08.8X needs resume\n", 1) \
+X( 2, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME, "*** CDM FWCtx 0x%08.8X resume from snapshot buffer 0x%08X%08X\n", 3) \
+X( 3, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SHARED, "CDM FWCtx shared alloc size load 0x%X\n", 1) \
+X( 4, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_COMPLETE, "*** CDM FWCtx store complete\n", 0) \
+X( 5, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_START, "*** CDM FWCtx store start\n", 0) \
+X( 6, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SOFT_RESET, "CDM Soft Reset\n", 0) \
+X( 7, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_NEEDS_RESUME, "3D FWCtx 0x%08.8X needs resume\n", 1) \
+X( 8, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME, "*** 3D FWCtx 0x%08.8X resume\n", 1) \
+X( 9, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_COMPLETE, "*** 3D context store complete\n", 0) \
+X(10, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED, "3D context store pipe state: 0x%08.8X 0x%08.8X 0x%08.8X\n", 3) \
+X(11, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START, "*** 3D context store start\n", 0) \
+X(12, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_TQ_RESUME, "*** 3D TQ FWCtx 0x%08.8X resume\n", 1) \
+X(13, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_NEEDS_RESUME, "TA FWCtx 0x%08.8X needs resume\n", 1) \
+X(14, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_RESUME, "*** TA FWCtx 0x%08.8X resume from snapshot buffer 0x%08X%08X\n", 3) \
+X(15, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_SHARED, "TA context shared alloc size store 0x%X, load 0x%X\n", 2) \
+X(16, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_COMPLETE, "*** TA context store complete\n", 0) \
+X(17, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_START, "*** TA context store start\n", 0) \
+X(18, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED_AGAIN, "Higher priority context scheduled for DM %u, old prio:%d, new prio:%d\n", 3) \
+X(19, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SET_CONTEXT_PRIORITY, "Set FWCtx 0x%x priority to %u\n", 2) \
+X(20, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE, "3D context store pipe%d state: 0x%08.8X\n", 2) \
+X(21, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE, "3D context resume pipe%d state: 0x%08.8X\n", 2) \
+X(22, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_NEEDS_RESUME, "SHG FWCtx 0x%08.8X needs resume\n", 1) \
+X(23, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_RESUME, "*** SHG FWCtx 0x%08.8X resume from snapshot buffer 0x%08X%08X\n", 3) \
+X(24, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_SHARED, "SHG context shared alloc size store 0x%X, load 0x%X\n", 2) \
+X(25, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_COMPLETE, "*** SHG context store complete\n", 0) \
+X(26, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_START, "*** SHG context store start\n", 0) \
+X(27, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_PIPE_INDIRECT, "Performing TA indirection, last used pipe %d\n", 1) \
+X(28, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_CTRL_STREAM_TERMINATE, "CDM context store hit ctrl stream terminate. Skip resume.\n", 0) \
+X(29, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_AB_BUFFER, "*** CDM FWCtx 0x%08.8X resume from snapshot buffer 0x%08X%08X, shader state %u\n", 4) \
+X(30, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STATE_BUFFER_FLIP, "TA PDS/USC state buffer flip (%d->%d)\n", 2) \
+X(31, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_52563_HIT, "TA context store hit BRN 52563: vertex store tasks outstanding\n", 0) \
+X(32, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_USC_POLL_FAILED, "TA USC poll failed (USC vertex task count: %d)\n", 1) \
+X(33, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_DEFERRED, "TA context store deferred due to BRN 54141.", 0) \
+X(34, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u\n", 7) \
+X(35, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_START, "*** TDM context store start\n", 0) \
+X(36, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_COMPLETE, "*** TDM context store complete\n", 0) \
+X(37, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_NEEDS_RESUME, "TDM context needs resume, header [%08.8X, %08.8X]\n", 2) \
+X(38, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u. Hard Context Switching: %u\n", 8) \
+\
+X( 1, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE, "Activate MemCtx=0x%08x BIFreq=%d secure=%d\n", 3) \
+X( 2, RGXFW_GROUP_BIF, RGXFW_SF_BIF_DEACTIVATE, "Deactivate MemCtx=0x%08x \n", 1) \
+X( 3, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_ALLOC, "Alloc PC reg %d\n", 1) \
+X( 4, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_GRAB, "Grab reg %d refcount now %d\n", 2) \
+X( 5, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_UNGRAB, "Ungrab reg %d refcount now %d\n", 2) \
+X( 6, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG, "Setup reg=%d BIFreq=%d, expect=0x%08x%08x, actual=0x%08x%08x\n", 6) \
+X( 7, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST, "Trust enabled:%d, for BIFreq=%d\n", 2) \
+X( 8, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TILECFG, "BIF Tiling Cfg %d base %08x%08x len %08x%08x enable %d stride %d --> %08x%08x\n", 9) \
+X( 9, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID0, "Wrote the Value %d to OSID0, Cat Base %d, Register's contents are now %08x %08x\n", 4) \
+X(10, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID1, "Wrote the Value %d to OSID1, Context  %d, Register's contents are now %04x\n", 3) \
+X(11, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSIDx, "ui32OSid = %u, Catbase = %u, Reg Address = %x, Reg index = %u, Bitshift index = %u, Val = %08x%08x\n", 7) \
+\
+X( 1, RGXFW_GROUP_PM, RGXFW_SF_PM_AMLIST, "ALIST%d SP = %u, MLIST%d SP = %u (VCE 0x%08x%08x, TE 0x%08x%08x, ALIST 0x%08x%08x)\n", 10) \
+X( 2, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_DEPRECATED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d, mmu:%d\n", 8) \
+X( 3, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE_DEPRECATED, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-3D-Base: 0x%08x%08x (SP = %u, 4PT = %u)\n", 14) \
+X( 4, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE_DEPRECATED, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-TA-Base: 0x%08x%08x (SP = %u, 4PT = %u)\n", 14) \
+X( 5, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_COMPLETE, "Freelist grow completed [0x%08x]: added pages 0x%08x, total pages 0x%08x, new DevVirtAddr 0x%08x%08x\n", 5) \
+X( 6, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_DENIED, "Grow for freelist ID=0x%08x denied by host\n", 1) \
+X( 7, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE, "Freelist update completed [0x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x\n", 5) \
+X( 8, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_RECONSTRUCTION_FAILED_DEPRECATED, "Reconstruction of freelist ID=0x%08x failed\n", 1) \
+X( 9, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_WARNING, "Ignored attempt to pause or unpause the DM while there is no relevant operation in progress (0-TA,1-3D): %d, operation(0-unpause, 1-pause): %d\n", 2) \
+X( 10, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT_STATUS, "Force free 3D Context memory, FWCtx: %08x, status(1:success, 0:fail): %d\n", 2)\
+X( 11, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_ALLOC, "PM pause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x\n", 1) \
+X( 12, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_ALLOC, "PM unpause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x\n", 1) \
+X( 13, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_DALLOC, "PM pause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x\n", 1) \
+X( 14, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_DALLOC, "PM unpause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x\n", 1) \
+X( 15, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_FAILED, "PM ALLOC/DALLOC change was not actioned: PM_PAGE_MANAGEOP_STATUS=0x%x\n", 1) \
+X( 16, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d\n", 7) \
+X( 17, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)\n", 10) \
+X( 18, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)\n", 10) \
+\
+X( 1, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_DYNAMIC_STATUS, "Global link list dynamic page count: vertex 0x%x, varying 0x%x, node 0x%x\n", 3) \
+X( 2, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_STATIC_STATUS, "Global link list static page count: vertex 0x%x, varying 0x%x, node 0x%x\n", 3) \
+X( 3, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_GROW, "RPM request failed. Waiting for freelist grow.\n", 0) \
+X( 4, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_ABORT, "RPM request failed. Aborting the current frame.\n", 0) \
+X( 5, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_PENDING_GROW, "RPM waiting for pending grow on freelist 0x%08x\n", 1) \
+X( 6, RGXFW_GROUP_RPM, RGXFW_SF_RPM_REQUEST_HOST_GROW, "Request freelist grow [0x%08x] current pages %d, grow size %d\n", 3) \
+X( 7, RGXFW_GROUP_RPM, RGXFW_SF_RPM_FREELIST_LOAD, "Freelist load: SHF = 0x%08x, SHG = 0x%08x\n", 2) \
+X( 8, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_DEPRECATED, "SHF FPL register: 0x%08X.%08X\n", 2) \
+X( 9, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_DEPRECATED, "SHG FPL register: 0x%08X.%08X\n", 2) \
+X(10, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_FREELIST, "Kernel requested RPM grow on freelist (type %d) at 0x%08x from current size %d to new size %d, RPM restart: %d (1=Yes)\n", 5) \
+X(11, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_RESTART, "Restarting SHG\n", 0) \
+X(12, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_ABORTED, "Grow failed, aborting the current frame.\n", 0) \
+X(13, RGXFW_GROUP_RPM, RGXFW_SF_RPM_ABORT_COMPLETE, "RPM abort complete on HWFrameData [0x%08x].\n", 1) \
+X(14, RGXFW_GROUP_RPM, RGXFW_SF_RPM_CLEANUP_NEEDS_ABORT, "RPM freelist cleanup [0x%08x] requires abort to proceed.\n", 1) \
+X(15, RGXFW_GROUP_RPM, RGXFW_SF_RPM_RPM_PT, "RPM page table base register: 0x%08X.%08X\n", 2) \
+X(16, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_ABORT, "Issuing RPM abort.\n", 0) \
+X(17, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_TOGGLE_CHECK_FULL, "RPM OOM received but toggle bits indicate free pages available\n", 0) \
+X(18, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_HW_TIMEOUT, "RPM hardware timeout. Unable to process OOM event.\n", 0) \
+X(19, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_LOAD, "SHF FL (0x%08x) load, FPL: 0x%08X.%08X, roff: 0x%08X, woff: 0x%08X\n", 5) \
+X(20, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_LOAD, "SHG FL (0x%08x) load, FPL: 0x%08X.%08X, roff: 0x%08X, woff: 0x%08X\n", 5) \
+X(21, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_STORE, "SHF FL (0x%08x) store, roff: 0x%08X, woff: 0x%08X\n", 3) \
+X(22, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_STORE, "SHG FL (0x%08x) store, roff: 0x%08X, woff: 0x%08X\n", 3) \
+\
+X( 1, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_FINISHED, "3D RTData 0x%08x finished on HW context %u\n", 2) \
+X( 2, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_READY, "3D RTData 0x%08x ready on HW context %u\n", 2) \
+X( 3, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO_DEPRECATED, "CONTEXT_PB_BASE set to %X, FL different between TA/3D: local: %d, global: %d, mmu: %d\n", 4) \
+X( 4, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_3D, "Loading VFP table 0x%08x%08x for 3D\n", 2) \
+X( 5, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_TA, "Loading VFP table 0x%08x%08x for TA\n", 2) \
+X( 6, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED, "Load Freelist 0x%X type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 10) \
+X( 7, RGXFW_GROUP_RTD, RGXFW_SF_RTD_VHEAP_STORE, "Perform VHEAP table store\n", 0) \
+X( 8, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_MATCH_FOUND, "RTData 0x%08x: found match in Context=%d: Load=No, Store=No\n", 2) \
+X( 9, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_NULL_FOUND, "RTData 0x%08x: found NULL in Context=%d: Load=Yes, Store=No\n", 2) \
+X(10, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_3D_FINISHED, "RTData 0x%08x: found state 3D finished (0x%08x) in Context=%d: Load=Yes, Store=Yes \n", 3) \
+X(11, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_TA_FINISHED, "RTData 0x%08x: found state TA finished (0x%08x) in Context=%d: Load=Yes, Store=Yes \n", 3) \
+X(12, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_STACK_POINTERS, "Loading stack-pointers for %d (0:MidTA,1:3D) on context %d, MLIST = 0x%08x, ALIST = 0x%08x%08x\n", 5) \
+X(13, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED, "Store Freelist 0x%X type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 10) \
+X(14, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_FINISHED, "TA RTData 0x%08x finished on HW context %u\n", 2) \
+X(15, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED, "TA RTData 0x%08x loaded on HW context %u\n", 2) \
+X(16, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED2, "Store Freelist 0x%X type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 12) \
+X(17, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED2, "Load  Freelist 0x%X type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 12) \
+X(18, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG, "Freelist 0x%X RESET!!!!!!!!\n", 1) \
+X(19, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG2, "Freelist 0x%X stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 5) \
+X(20, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_DEPRECATED, "Request reconstruction of Freelist 0x%X type: %d (0:local,1:global,2:mmu) on HW context %u\n", 3) \
+X(21, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED, "Freelist reconstruction ACK from host (HWR state :%u)\n", 1) \
+X(22, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED2, "Freelist reconstruction completed\n", 0) \
+X(23, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED_DEPRECATED, "TA RTData 0x%08x loaded on HW context %u HWRTDataNeedsLoading=%d\n", 3) \
+X(24, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TE_RGNHDR_INFO, "TE Region headers base 0x%08x%08x (RGNHDR Init: %d)\n", 3) \
+X(25, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS, "TA Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)\n", 8) \
+X(26, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_LOADED, "3D RTData 0x%08x loaded on HW context %u\n", 2) \
+X(27, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS, "3D Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x (MemCtx 0x%08x)\n", 4) \
+X(28, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RESTART_AFTER_PR_EXECUTED, "Restarting TA after partial render, HWRTData0State=%d, HWRTData1State=%d\n", 2) \
+X(29, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO, "CONTEXT_PB_BASE set to %X, FL different between TA/3D: local: %d, global: %d\n", 3) \
+X(30, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_FL, "Store Freelist 0x%X type: %d (0:local,1:global) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 12) \
+X(31, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL, "Load  Freelist 0x%X type: %d (0:local,1:global) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 12) \
+\
+X( 1, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_DEPRECATED, "Force Z-Load for partial render\n", 0) \
+X( 2, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_DEPRECATED, "Force Z-Store for partial render\n", 0) \
+X( 3, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_LOCAL, "3D MemFree: Local FL 0x%08x\n", 1) \
+X( 4, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_MMU, "3D MemFree: MMU FL 0x%08x\n", 1) \
+X( 5, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_GLOBAL, "3D MemFree: Global FL 0x%08x\n", 1) \
+X( 6, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD, "OOM TA/3D PR Check: [%08.8X] is %08.8X requires %08.8X, HardwareSync Fence [%08.8X] is %08.8X requires %08.8X\n", 6) \
+X( 7, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_FL, "OOM TA_cmd=0x%08x, U-FL 0x%08x, N-FL 0x%08x\n", 3) \
+X( 8, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_MMU_FL, "OOM TA_cmd=0x%08x, OOM MMU:%d, U-FL 0x%08x, N-FL 0x%08x, MMU-FL 0x%08x\n", 5) \
+X( 9, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_AVOIDED, "Partial render avoided\n", 0) \
+X(10, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_DISCARDED, "Partial render discarded\n", 0) \
+X(11, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_FINISHED, "Partial Render finished\n", 0) \
+X(12, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DBG, "SPM Owner = 3D-BG\n", 0) \
+X(13, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DIRQ, "SPM Owner = 3D-IRQ\n", 0) \
+X(14, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_NONE, "SPM Owner = NONE\n", 0) \
+X(15, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TABG, "SPM Owner = TA-BG\n", 0) \
+X(16, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TAIRQ, "SPM Owner = TA-IRQ\n", 0) \
+X(17, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_ADDRESS, "ZStore address 0x%08x%08x\n", 2) \
+X(18, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SSTORE_ADDRESS, "SStore address 0x%08x%08x\n", 2) \
+X(19, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_ADDRESS, "ZLoad address 0x%08x%08x\n", 2) \
+X(20, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SLOAD_ADDRESS, "SLoad address 0x%08x%08x\n", 2) \
+X(21, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_ZSBUFFER, "No deferred ZS Buffer provided\n", 0) \
+X(22, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POPULATED, "ZS Buffer successfully populated (ID=0x%08x)\n", 1) \
+X(23, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POP_UNNEEDED, "No need to populate ZS Buffer (ID=0x%08x)\n", 1) \
+X(24, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOPULATED, "ZS Buffer successfully unpopulated (ID=0x%08x)\n", 1) \
+X(25, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOP_UNNEEDED, "No need to unpopulate ZS Buffer (ID=0x%08x)\n", 1) \
+X(26, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST, "Send ZS-Buffer backing request to host (ID=0x%08x)\n", 1) \
+X(27, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST, "Send ZS-Buffer unbacking request to host (ID=0x%08x)\n", 1) \
+X(28, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_PENDING, "Don't send ZS-Buffer backing request. Previous request still pending (ID=0x%08x)\n", 1) \
+X(29, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_PENDING, "Don't send ZS-Buffer unbacking request. Previous request still pending (ID=0x%08x)\n", 1) \
+X(30, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZBUFFER_NOT_READY, "Partial Render waiting for ZBuffer to be backed (ID=0x%08x)\n", 1) \
+X(31, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SBUFFER_NOT_READY, "Partial Render waiting for SBuffer to be backed (ID=0x%08x)\n", 1) \
+X(32, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_NONE, "SPM State = none\n", 0) \
+X(33, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_BLOCKED, "SPM State = PR blocked\n", 0) \
+X(34, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_GROW, "SPM State = wait for grow\n", 0) \
+X(35, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_HW, "SPM State = wait for HW\n", 0) \
+X(36, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_RUNNING, "SPM State = PR running\n", 0) \
+X(37, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_AVOIDED, "SPM State = PR avoided\n", 0) \
+X(38, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_EXECUTED, "SPM State = PR executed\n", 0) \
+X(39, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FREELIST_MATCH, "3DMemFree matches freelist 0x%08x (FL type = %u)\n", 2) \
+X(40, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_FLAG_SET, "Raise the 3DMemFreeDedected flag\n", 0) \
+X(41, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_PENDING_GROW, "Wait for pending grow on Freelist 0x%08x\n", 1) \
+X(42, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_FAILED, "ZS Buffer failed to be populated (ID=0x%08x)\n", 1) \
+X(43, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FL_GROW_DEBUG, "Grow update inconsistency: FL addr: 0x%08x%08x, curr pages: %u, ready: %u, new: %u\n", 5) \
+X(44, RGXFW_GROUP_SPM, RGXFW_SF_SPM_RESUMED_TA, "OOM: Resumed TA with ready pages, FL addr: 0x%08x%08x, current pages: %u, SP : %u, \n", 4) \
+X(45, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE, "Received grow update, FL addr: 0x%08x%08x, current pages: %u, ready pages: %u, threshold: %u\n", 5) \
+\
+X( 1, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED_AGAIN, "Check Pow state DM%d int: 0x%X, ext: 0x%X, pow flags: 0x%X\n", 4) \
+X( 2, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_IDLE, "Sidekick idle (might be powered down). Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+X( 3, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ, "OS requested pow off (forced = %d), DM%d, pow flags: 0x%8.8X\n", 3) \
+X( 4, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_DEPRECATED, "Initiate powoff query. Inactive DMs: %d %d %d %d\n", 4) \
+X( 5, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECKOFF_DEPRECATED, "Any RD-DM pending? %d, Any RD-DM Active? %d\n", 2) \
+X( 6, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_OFF, "Sidekick ready to be powered down. Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+X( 7, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ, "HW Request On(1)/Off(0): %d, Units: 0x%08.8X\n", 2) \
+X( 8, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_REQ, "Request to change num of dusts to %d (Power flags=%d)\n", 2) \
+X( 9, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE, "Changing number of dusts from %d to %d\n", 2) \
+X(11, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_INIT_DEPRECATED, "Sidekick init\n", 0) \
+X(12, RGXFW_GROUP_POW, RGXFW_SF_POW_RD_INIT_DEPRECATED, "Rascal+Dusts init (# dusts mask: %X)\n", 1) \
+X(13, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_RD, "Initiate powoff query for RD-DMs.\n", 0) \
+X(14, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_TLA, "Initiate powoff query for TLA-DM.\n", 0) \
+X(15, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RD, "Any RD-DM pending? %d, Any RD-DM Active? %d\n", 2) \
+X(16, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_TLA, "TLA-DM pending? %d, TLA-DM Active? %d\n", 2) \
+X(17, RGXFW_GROUP_POW, RGXFW_SF_POW_BRN37566, "Request power up due to BRN37566. Pow stat int: 0x%X\n", 1) \
+X(18, RGXFW_GROUP_POW, RGXFW_SF_POW_REQ_CANCEL, "Cancel power off request int: 0x%X, ext: 0x%X, pow flags: 0x%X\n", 3) \
+X(19, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_IDLE, "OS requested forced IDLE, pow flags: 0x%X\n", 1) \
+X(20, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE, "OS cancelled forced IDLE, pow flags: 0x%X\n", 1) \
+X(21, RGXFW_GROUP_POW, RGXFW_SF_POW_IDLE_TIMER, "Idle timer start. Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+X(22, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_IDLE_TIMER, "Cancel idle timer. Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+X(23, RGXFW_GROUP_POW, RGXFW_SF_POW_APM_LATENCY_CHANGE, "Active PM latency set to %dms. Core clock: %d Hz\n", 2) \
+X(24, RGXFW_GROUP_POW, RGXFW_SF_POW_CDM_CLUSTERS, "Compute cluster mask change to 0x%X, %d dusts powered.\n", 2) \
+X(25, RGXFW_GROUP_POW, RGXFW_SF_POW_NULL_CMD_INIOFF_RD, "Null command executed, repeating initiate powoff query for RD-DMs.\n", 0) \
+X(26, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_ENERGY, "Power monitor: Estimate of dynamic energy %u\n", 1) \
+X(27, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED, "Check Pow state: Int: 0x%X, Ext: 0x%X, Pow flags: 0x%X\n", 3) \
+X(28, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_DEADLINE, "Proactive DVFS: New deadline, time = 0x%08x%08x\n", 2) \
+X(29, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_WORKLOAD, "Proactive DVFS: New workload, cycles = 0x%08x%08x\n", 2) \
+X(30, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_CALCULATE, "Proactive DVFS: Proactive frequency calculated = 0x%08x\n", 1) \
+X(31, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UTILISATION, "Proactive DVFS: Reactive utilisation = 0x%08x\n", 1) \
+X(32, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_REACT, "Proactive DVFS: Reactive frequency calculated = 0x%08x%08x\n", 2) \
+X(33, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND_DEPRECATED, "Proactive DVFS: OPP Point Sent = 0x%x\n", 1) \
+X(34, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DEADLINE_REMOVED, "Proactive DVFS: Deadline removed = 0x%08x%08x\n", 2) \
+X(35, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_WORKLOAD_REMOVED, "Proactive DVFS: Workload removed = 0x%08x%08x\n", 2) \
+X(36, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_THROTTLE, "Proactive DVFS: Throttle to a maximum = 0x%x\n", 1) \
+X(37, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_FAILURE, "Proactive DVFS: Failed to pass OPP point via GPIO.\n", 0) \
+X(38, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_INVALID_NODE, "Proactive DVFS: Invalid node passed to function.\n", 0) \
+X(39, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GUEST_BAD_ACCESS, "Proactive DVFS: Guest OS attempted to do a privileged action. OSid = %u\n", 1) \
+X(40, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_STARTED, "Proactive DVFS: Unprofiled work started. Total unprofiled work present: 0x%x\n", 1) \
+X(41, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_FINISHED, "Proactive DVFS: Unprofiled work finished. Total unprofiled work present: 0x%x\n", 1) \
+X(42, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DISABLED, "Proactive DVFS: Disabled: Not enabled by host.\n", 0) \
+X(43, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ_RESULT, "HW Request Completed(1)/Aborted(0): %d, Ticks: %d\n", 2) \
+X(44, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_FIX_59042, "Allowed number of dusts is %d due to BRN59042.\n", 1) \
+X(45, RGXFW_GROUP_POW, RGXFW_SF_POW_HOST_TIMEOUT_NOTIFICATION, "Host timed out while waiting for a forced idle state. Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+X(46, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK, "Check Pow state: Int: 0x%X, Ext: 0x%X, Pow flags: 0x%X, Fence Counters: Check: %u - Update: %u\n", 5) \
+X(47, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND, "Proactive DVFS: OPP Point Sent = 0x%x, Success = %x\n", 2) \
+X(48, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_IDLE, "Proactive DVFS: GPU transitioned to idle\n", 0) \
+X(49, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_ACTIVE, "Proactive DVFS: GPU transitioned to active\n", 0) \
+\
+X(1, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DEPRECATED, "Lockup detected on DM%d, FWCtx: %08.8X\n", 2) \
+X(2, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_FW_DEPRECATED, "Reset fw state for DM%d, FWCtx: %08.8X, MemCtx: %08.8X\n", 3) \
+X(3, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED, "Reset HW\n", 0) \
+X(4, RGXFW_GROUP_HWR, RGXFW_SF_HWR_TERMINATED_DEPRECATED, "Lockup recovered.\n", 0) \
+X(5, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED, "Lock-up DM%d FWCtx: %08.8X \n", 2) \
+X(6, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DETECTED_DEPRECATED, "Lockup detected: GLB(%d->%d), PER-DM(0x%08X->0x%08X)\n", 4) \
+X(7, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EARLY_FAULT_DETECTION_DEPRECATED, "Early fault detection: GLB(%d->%d), PER-DM(0x%08X)\n", 3) \
+X(8, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP_DEPRECATED, "Hold scheduling due lockup: GLB(%d), PER-DM(0x%08X->0x%08X)\n", 3) \
+X(9, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FALSE_LOCKUP_DEPRECATED, "False lockup detected: GLB(%d->%d), PER-DM(0x%08X->0x%08X)\n", 4) \
+X(10, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED, "BRN37729: GLB(%d->%d), PER-DM(0x%08X->0x%08X)\n", 4) \
+X(11, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED, "Freelists reconstructed: GLB(%d->%d), PER-DM(0x%08X)\n", 3) \
+X(12, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RECONSTRUCTING_FREELISTS_DEPRECATED, "Reconstructing freelists: %u (0-No, 1-Yes): GLB(%d->%d), PER-DM(0x%08X)\n", 4) \
+X(13, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FAILED_HW_POLL, "HW poll %u (0-Unset 1-Set) failed (reg:0x%08X val:0x%08X)\n", 3) \
+X(14, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED_DEPRECATED, "Discarded cmd on DM%u FWCtx=0x%08X\n", 2) \
+X(15, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED, "Discarded cmd on DM%u (reason=%u) HWRTData=0x%08X (st: %d), FWCtx 0x%08X @ %d\n", 6) \
+X(16, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PM_FENCE, "PM fence WA could not be applied, Valid TA Setup: %d, RD powered off: %d\n", 2) \
+X(17, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_SNAPSHOT, "FL snapshot RTD 0x%08.8X - local (0x%08.8X): %d, global (0x%08.8X): %d\n", 5) \
+X(18, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_CHECK, "FL check RTD 0x%08.8X, discard: %d - local (0x%08.8X): s%d?=c%d, global (0x%08.8X): s%d?=c%d\n", 8) \
+X(19, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_DEPRECATED, "FL reconstruction 0x%08.8X c%d\n", 2) \
+X(20, RGXFW_GROUP_HWR, RGXFW_SF_HWR_3D_CHECK, "3D check: missing TA FWCtx 0x%08.8X @ %d, RTD 0x%08x.\n", 3) \
+X(21, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED2, "Reset HW (mmu:%d, extmem: %d)\n", 2) \
+X(22, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_TA_CACHES, "Zero TA caches for FWCtx: %08.8X (TPC addr: %08X%08X, size: %d bytes)\n", 4) \
+X(23, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED, "Recovery DM%u: Freelists reconstructed. New R-Flags=0x%08X\n", 2) \
+X(24, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SKIPPED_CMD, "Recovery DM%u: FWCtx 0x%08x skipped to command @ %u. PR=%u. New R-Flags=0x%08X \n", 5) \
+X(25, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_RECOVERED, "Recovery DM%u: DM fully recovered\n", 1) \
+X(26, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP, "DM%u: Hold scheduling due to R-Flag = 0x%08x\n", 2) \
+X(27, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_RECONSTRUCTION, "Analysis: Need freelist reconstruction\n", 0) \
+X(28, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP, "Analysis DM%u: Lockup FWCtx: %08.8X. Need to skip to next command\n", 2) \
+X(29, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP_OOM_TA, "Analysis DM%u: Lockup while TA is OOM FWCtx: %08.8X. Need to skip to next command\n", 2) \
+X(30, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_PR_CLEANUP, "Analysis DM%u: Lockup while partial render FWCtx: %08.8X. Need PR cleanup\n", 2) \
+X(31, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP, "GPU has locked up\n", 0) \
+X(32, RGXFW_GROUP_HWR, RGXFW_SF_HWR_READY, "DM%u ready for HWR\n", 1) \
+X(33, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_UPDATE_RECOVERY, "Recovery DM%u: Updated Recovery counter. New R-Flags=0x%08X\n", 2) \
+X(34, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729, "Analysis: BRN37729 detected, reset TA and re-kicked 0x%08X)\n", 1) \
+X(35, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_TIMED_OUT, "DM%u timed out\n", 1) \
+X(36, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EVENT_STATUS_REG, "RGX_CR_EVENT_STATUS=0x%08x\n", 1) \
+X(37, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_FALSE_LOCKUP, "DM%u lockup falsely detected, R-Flags=0x%08X\n", 2) \
+X(38, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_OUTOFTIME, "GPU has overrun its deadline\n", 0) \
+X(39, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_POLLFAILURE, "GPU has failed a poll\n", 0) \
+X(40, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PERF_PHASE_REG, "RGX DM%u phase count=0x%08x\n", 2) \
+X(41, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW, "Reset HW (loop:%d, poll failures: 0x%08X)\n", 2) \
+X(42, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_FAULT_EVENT, "MMU fault event: 0x%08X\n", 1) \
+X(43, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BIF1_FAULT, "BIF1 page fault detected (Bank1 MMU Status: 0x%08X)\n", 1) \
+X(44, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK_TRUE_DEPRECATED, "Fast CRC Failed. Proceeding to full register checking (DM: %u).\n", 1) \
+X(45, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_META_FAULT, "Meta MMU page fault detected (Meta MMU Status: 0x%08X%08X)\n", 2) \
+X(46, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK, "Fast CRC Check result for DM%u is HWRNeeded=%u\n", 2) \
+X(47, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FULL_CHECK, "Full Signature Check result for DM%u is HWRNeeded=%u\n", 2) \
+X(48, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINAL_RESULT, "Final result for DM%u is HWRNeeded=%u with HWRChecksToGo=%u\n", 3) \
+X(49, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK, "USC Slots result for DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d\n", 3) \
+X(50, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK, "Deadline counter for DM%u is HWRDeadline=%u\n", 2) \
+X(51, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST_DEPRECATED, "Holding Scheduling on OSid %u due to pending freelist reconstruction\n", 1) \
+X(52, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_REQUEST, "Requesting reconstruction for freelist 0x%X (ID=%d)\n", 2) \
+X(53, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_PASSED, "Reconstruction of freelist ID=%d complete\n", 1) \
+X(54, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED, "Reconstruction needed for freelist 0x%X (ID=%d) type: %d (0:local,1:global,2:mmu) on HW context %u\n", 4) \
+X(55, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FAILED, "Reconstruction of freelist ID=%d failed\n", 1) \
+X(56, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESTRICTING_PDS_TASKS, "Restricting PDS Tasks to help other stalling DMs (RunningMask=0x%02X, StallingMask=0x%02X, PDS_CTRL=0x%08X%08X)\n", 4) \
+X(57, RGXFW_GROUP_HWR, RGXFW_SF_HWR_UNRESTRICTING_PDS_TASKS, "Unrestricting PDS Tasks again (RunningMask=0x%02X, StallingMask=0x%02X, PDS_CTRL=0x%08X%08X)\n", 4) \
+X(58, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_USED, "USC slots: %u used by DM%u\n", 2) \
+X(59, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_EMPTY, "USC slots: %u empty\n", 1) \
+X(60, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HCS_FIRE, "HCS DM%d's Context Switch failed to meet deadline. Current time: %08x%08x, deadline: %08x%08x\n", 5) \
+X(61, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_HW_RESET, "Begin hardware reset (HWR Counter=%d)\n", 1) \
+X(62, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINISH_HW_RESET, "Finished hardware reset (HWR Counter=%d)\n", 1) \
+X(63, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST, "Holding Scheduling on DM %u for OSid %u due to pending freelist reconstruction\n", 2) \
+X(64, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_UMQ_READ_OFFSET, "User Mode Queue ROff reset: FWCtx 0x%08.8X, queue: 0x%08X%08X (Roff = %u becomes StreamStartOffset = %u)\n", 5) \
+X(65, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED, "Reconstruction needed for freelist 0x%X (ID=%d) type: %d (0:local,1:global) on HW context %u\n", 4) \
+X(66, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MIPS_FAULT, "Mips page fault detected (BadVAddr: 0x%08x, EntryLo0: 0x%08x, EntryLo1: 0x%08x)\n", 3) \
+X(67, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ANOTHER_CHANCE, "At least one other DM is running okay so DM%u will get another chance\n", 1) \
+X(68, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FW, "Reconstructing in FW, FL: 0x%X (ID=%d)\n", 2) \
+\
+X( 1, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGBLK, "Block 0x%x mapped to Config Idx %u\n", 2) \
+X( 2, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_OMTBLK, "Block 0x%x omitted from event - not enabled in HW\n", 1) \
+X( 3, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INCBLK, "Block 0x%x included in event - enabled in HW\n", 1) \
+X( 4, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELREG, "Select register state hi_0x%x lo_0x%x\n", 2) \
+X( 5, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CSBHDR, "Counter stream block header word 0x%x\n", 1) \
+X( 6, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTROFF, "Counter register offset 0x%x\n", 1) \
+X( 7, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGSKP, "Block 0x%x config unset, skipping\n", 1) \
+X( 8, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INDBLK, "Accessing Indirect block 0x%x\n", 1) \
+X( 9, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DIRBLK, "Accessing Direct block 0x%x\n", 1) \
+X(10, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CNTPRG, "Programmed counter select register at offset 0x%x\n", 1) \
+X(11, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKPRG, "Block register offset 0x%x and value 0x%x\n", 2) \
+X(12, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKCG, "Reading config block from driver 0x%x\n", 1) \
+X(13, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKRG, "Reading block range 0x%x to 0x%x\n", 2) \
+X(14, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKREC, "Recording block 0x%x config from driver\n", 1) \
+X(15, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKED, "Finished reading config block from driver\n", 0) \
+X(16, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_COUNTER, "Custom Counter offset: %x   value: %x \n", 2) \
+X(17, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELECT_CNTR, "Select counter n:%u  ID:%x\n", 2) \
+X(18, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_SELECT_PACK, "The counter ID %x is not allowed. The package [b:%u, n:%u] will be discarded\n", 3) \
+X(19, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS, "Custom Counters filter status %d\n", 1) \
+X(20, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_WRONG_BLOCK, "The Custom block %d is not allowed. Use only blocks lower than %d. The package will be discarded\n", 2) \
+X(21, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_TOO_MANY_ID, "The package will be discarded because it contains %d counters IDs while the upper limit is %d\n", 2) \
+X(22, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHECK_FILTER, "Check Filter %x is %x ?\n", 2) \
+X(23, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_RESET_CUSTOM_BLOCK, "The custom block %u is reset\n", 1) \
+X(24, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INVALID_CMD, "Encountered an invalid command (%d)\n", 1) \
+X(25, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_DEPRECATED, "HWPerf Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)\n", 2) \
+X(26, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_FENCE_DEPRECATED, "HWPerf Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)\n", 3) \
+X(27, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_BLOCK, "Custom Counter block: %d \n", 1) \
+\
+X( 1, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_REQUEST, "Transfer 0x%02x request: 0x%02x%08x -> 0x%08x, size %u\n", 5) \
+X( 2, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_COMPLETE, "Transfer of type 0x%02x expected on channel %u, 0x%02x found, status %u\n", 4) \
+X( 3, RGXFW_GROUP_DMA, RGXFW_SF_DMA_INT_REG, "DMA Interrupt register 0x%08x\n", 1) \
+X( 4, RGXFW_GROUP_DMA, RGXFW_SF_DMA_WAIT, "Waiting for transfer ID %u completion...\n", 1) \
+X( 5, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOADING_FAILED, "Loading of cCCB data from FW common context 0x%08x (offset: %u, size: %u) failed\n", 3) \
+X( 6, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOAD_INVALID, "Invalid load of cCCB data from FW common context 0x%08x (offset: %u, size: %u)\n", 3) \
+X( 7, RGXFW_GROUP_DMA, RGXFW_SF_DMA_POLL_FAILED, "Transfer 0x%02x request poll failure\n", 1) \
+\
+X(1, RGXFW_GROUP_DBG, RGXFW_SF_DBG_INTPAIR, "0x%8.8x 0x%8.8x\n", 2) \
+\
+X(65535, RGXFW_GROUP_NULL, RGXFW_SF_LAST, "You should not use this string\n", 15)
+
+
+/*  The symbolic names found in the table above are assigned an ui32 value of
+ *  the following format:
+ *  31 30 28 27       20   19  16    15  12      11            0   bits
+ *  -   ---   ---- ----     ----      ----        ---- ---- ----
+ *     0-11: id number
+ *    12-15: group id number
+ *    16-19: number of parameters
+ *    20-27: unused
+ *    28-30: active: identify SF packet, otherwise regular int32
+ *       31: reserved for signed/unsigned compatibility
+ *
+ *   The following macro assigns those values to the enum generated SF ids list.
+ */
+#define RGXFW_LOG_IDMARKER			(0x70000000)
+#define RGXFW_LOG_CREATESFID(a,b,e) ((a) | (b<<12) | (e<<16)) | RGXFW_LOG_IDMARKER
+
+#define RGXFW_LOG_IDMASK			(0xFFF00000)
+#define RGXFW_LOG_VALIDID(I)		(((I) & RGXFW_LOG_IDMASK) == RGXFW_LOG_IDMARKER)
+
+typedef enum RGXFW_LOG_SFids {
+#define X(a, b, c, d, e) c = RGXFW_LOG_CREATESFID(a,b,e),
+	RGXFW_LOG_SFIDLIST
+#undef X
+} RGXFW_LOG_SFids;
+
+/* Return the group id that the given (enum generated) id belongs to */
+#define RGXFW_SF_GID(x) (((x)>>12) & 0xf)
+/* Returns how many arguments the SF(string format) for the given (enum generated) id requires */
+#define RGXFW_SF_PARAMNUM(x) (((x)>>16) & 0xf)
+
+#endif /* RGX_FWIF_SF_H */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_shared.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_shared.h
new file mode 100644
index 0000000..42c0750
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_shared.h
@@ -0,0 +1,623 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX firmware interface structures
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX firmware interface structures shared by both host client
+                and host server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_FWIF_SHARED_H__)
+#define __RGX_FWIF_SHARED_H__
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "img_types.h"
+#include "rgx_common.h"
+#include "devicemem_typedefs.h"
+
+/*
+ * Firmware binary block unit in bytes.
+ * Raw data stored in FW binary will be aligned on this size.
+ */
+#define FW_BLOCK_SIZE 4096L
+
+/* Offset for BVNC struct from the end of the FW binary */
+#define FW_BVNC_BACKWARDS_OFFSET (FW_BLOCK_SIZE)
+
+/*!
+ ******************************************************************************
+ * Device state flags
+ *****************************************************************************/
+#define RGXKMIF_DEVICE_STATE_ZERO_FREELIST          (0x1 << 0)		/*!< Zeroing the physical pages of reconstructed free lists */
+#define RGXKMIF_DEVICE_STATE_FTRACE_EN              (0x1 << 1)		/*!< Used to enable production of GPT FTrace from HWPerf events in the MISR */
+#define RGXKMIF_DEVICE_STATE_DISABLE_DW_LOGGING_EN  (0x1 << 2)		/*!< Used to disable the Devices Watchdog logging */
+#define RGXKMIF_DEVICE_STATE_DUST_REQUEST_INJECT_EN (0x1 << 3)		/*!< Used for validation to inject dust requests every TA/3D kick */
+#define RGXKMIF_DEVICE_STATE_HWPERF_HOST_EN         (0x1 << 4)		/*!< Used to enable host-side-only HWPerf stream */
+
+/* Required memory alignment for 64-bit variables accessible by Meta
+  (the gcc meta aligns 64-bit vars to 64-bit; therefore, mem shared between
+   the host and meta that contains 64-bit vars has to maintain this aligment)*/
+#define RGXFWIF_FWALLOC_ALIGN	sizeof(IMG_UINT64)
+
+typedef struct _RGXFWIF_DEV_VIRTADDR_
+{
+	IMG_UINT32	ui32Addr;
+} RGXFWIF_DEV_VIRTADDR;
+
+typedef struct _RGXFWIF_DMA_ADDR_
+{
+	IMG_DEV_VIRTADDR        RGXFW_ALIGN psDevVirtAddr;
+	RGXFWIF_DEV_VIRTADDR    pbyFWAddr;
+} UNCACHED_ALIGN RGXFWIF_DMA_ADDR;
+
+typedef IMG_UINT8	RGXFWIF_CCCB;
+
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_CCCB;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_CCCB_CTL;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_RENDER_TARGET;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_HWRTDATA;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_FREELIST;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_RAY_FRAME_DATA;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_RPM_FREELIST;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_RTA_CTL;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_UFO_ADDR;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_CLEANUP_CTL;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_TIMESTAMP_ADDR;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_WORKLOAD_DATA;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_DEADLINE_LIST_NODE;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_WORKLOAD_LIST_NODE;
+
+/* FIXME PRGXFWIF_UFO_ADDR and RGXFWIF_UFO should move back into rgx_fwif_client.h */
+typedef struct _RGXFWIF_UFO_
+{
+	PRGXFWIF_UFO_ADDR	puiAddrUFO;
+	IMG_UINT32			ui32Value;
+} RGXFWIF_UFO;
+
+
+/*!
+	Last reset reason for a context.
+*/
+typedef enum _RGXFWIF_CONTEXT_RESET_REASON_
+{
+	RGXFWIF_CONTEXT_RESET_REASON_NONE					= 0,	/*!< No reset reason recorded */
+	RGXFWIF_CONTEXT_RESET_REASON_GUILTY_LOCKUP			= 1,	/*!< Caused a reset due to locking up */
+	RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_LOCKUP		= 2,	/*!< Affected by another context locking up */
+	RGXFWIF_CONTEXT_RESET_REASON_GUILTY_OVERRUNING		= 3,	/*!< Overran the global deadline */
+	RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING	= 4,	/*!< Affected by another context overrunning */
+} RGXFWIF_CONTEXT_RESET_REASON;
+
+
+/*!
+	HWRTData state the render is in
+*/
+typedef enum
+{
+	RGXFWIF_RTDATA_STATE_NONE = 0,
+	RGXFWIF_RTDATA_STATE_KICKTA,
+	RGXFWIF_RTDATA_STATE_KICKTAFIRST,
+	RGXFWIF_RTDATA_STATE_TAFINISHED,
+	RGXFWIF_RTDATA_STATE_KICK3D,
+	RGXFWIF_RTDATA_STATE_3DFINISHED,
+	RGXFWIF_RTDATA_STATE_TAOUTOFMEM,
+	RGXFWIF_RTDATA_STATE_PARTIALRENDERFINISHED,
+	RGXFWIF_RTDATA_STATE_HWR					/*!< In case of HWR, we can't set the RTDATA state to NONE,
+													 as this will cause any TA to become a first TA.
+													 To ensure all related TA's are skipped, we use the HWR state */
+} RGXFWIF_RTDATA_STATE;
+
+typedef struct _RGXFWIF_CLEANUP_CTL_
+{
+	IMG_UINT32				ui32SubmittedCommands;	/*!< Number of commands received by the FW */
+	IMG_UINT32				ui32ExecutedCommands;	/*!< Number of commands executed by the FW */
+} UNCACHED_ALIGN RGXFWIF_CLEANUP_CTL;
+
+
+/*!
+ ******************************************************************************
+ * Client CCB control for RGX
+ *****************************************************************************/
+typedef struct _RGXFWIF_CCCB_CTL_
+{
+	IMG_UINT32				ui32WriteOffset;	/*!< write offset into array of commands (MUST be aligned to 16 bytes!) */
+	IMG_UINT32				ui32ReadOffset;		/*!< read offset into array of commands */
+	IMG_UINT32				ui32DepOffset;		/*!< Dependency offset */
+	IMG_UINT32				ui32WrapMask;		/*!< Offset wrapping mask (Total capacity of the CCB - 1) */
+} UNCACHED_ALIGN RGXFWIF_CCCB_CTL;
+
+typedef enum
+{
+	RGXFW_LOCAL_FREELIST = 0,
+	RGXFW_GLOBAL_FREELIST = 1,
+	RGXFW_MAX_FREELISTS
+} RGXFW_FREELIST_TYPE;
+
+typedef struct _RGXFWIF_RTA_CTL_
+{
+	IMG_UINT32           ui32RenderTargetIndex;		//Render number
+	IMG_UINT32           ui32CurrentRenderTarget;	//index in RTA
+	IMG_UINT32           ui32ActiveRenderTargets;	//total active RTs
+	IMG_UINT32           ui32CumulActiveRenderTargets;   //total active RTs from the first TA kick, for OOM
+	RGXFWIF_DEV_VIRTADDR sValidRenderTargets;  //Array of valid RT indices
+	RGXFWIF_DEV_VIRTADDR sNumRenders;  //Array of number of occurred partial renders per render target
+	IMG_UINT16           ui16MaxRTs;   //Number of render targets in the array
+} UNCACHED_ALIGN RGXFWIF_RTA_CTL;
+
+typedef struct _RGXFWIF_FREELIST_
+{
+	IMG_DEV_VIRTADDR	RGXFW_ALIGN psFreeListDevVAddr;
+	IMG_UINT64			RGXFW_ALIGN ui64CurrentDevVAddr;
+	IMG_UINT32			ui32CurrentStackTop;
+	IMG_UINT32			ui32MaxPages;
+	IMG_UINT32			ui32GrowPages;
+	IMG_UINT32			ui32CurrentPages; /* HW pages */
+	IMG_UINT32			ui32AllocatedPageCount;
+	IMG_UINT32			ui32AllocatedMMUPageCount;
+	IMG_UINT32			ui32HWRCounter;
+	IMG_UINT32			ui32FreeListID;
+	IMG_BOOL            bGrowPending;
+	IMG_UINT32          ui32ReadyPages;
+	/* Some more pages that should be used only when OOM is reached */
+	IMG_UINT32          ui32OOMReadyLimit;
+} UNCACHED_ALIGN RGXFWIF_FREELIST;
+
+typedef enum
+{
+	RGXFW_RPM_SHF_FREELIST = 0,
+	RGXFW_RPM_SHG_FREELIST = 1,
+} RGXFW_RPM_FREELIST_TYPE;
+
+#define		RGXFW_MAX_RPM_FREELISTS		(2)
+
+typedef struct _RGXFWIF_RPM_FREELIST_
+{
+	IMG_DEV_VIRTADDR	RGXFW_ALIGN sFreeListDevVAddr;		/*!< device base address */
+	//IMG_DEV_VIRTADDR	RGXFW_ALIGN sRPMPageListDevVAddr;	/*!< device base address for RPM pages in-use */
+	IMG_UINT32			sSyncAddr;				/*!< Free list sync object for OOM event */
+	IMG_UINT32			ui32MaxPages;			/*!< maximum size */
+	IMG_UINT32			ui32GrowPages;			/*!< grow size = maximum pages which may be added later */
+	IMG_UINT32			ui32CurrentPages;		/*!< number of pages */
+	IMG_UINT32			ui32ReadOffset;			/*!< head: where to read alloc'd pages */
+	IMG_UINT32			ui32WriteOffset;		/*!< tail: where to write de-alloc'd pages */
+	IMG_BOOL			bReadToggle;			/*!< toggle bit for circular buffer */
+	IMG_BOOL			bWriteToggle;
+	IMG_UINT32			ui32AllocatedPageCount; /*!< TODO: not sure yet if this is useful */
+	IMG_UINT32			ui32HWRCounter;
+	IMG_UINT32			ui32FreeListID;			/*!< unique ID per device, e.g. rolling counter */
+	IMG_BOOL			bGrowPending;			/*!< FW is waiting for host to grow the freelist */
+} UNCACHED_ALIGN RGXFWIF_RPM_FREELIST;
+
+typedef struct _RGXFWIF_RAY_FRAME_DATA_
+{
+	/* state manager for shared state between vertex and ray processing */
+
+	/* TODO: not sure if this will be useful, link it here for now */
+	IMG_UINT32		sRPMFreeLists[RGXFW_MAX_RPM_FREELISTS];
+
+	IMG_BOOL		bAbortOccurred;
+
+	/* cleanup state.
+	 * Both the SHG and RTU must complete or discard any outstanding work
+	 * which references this frame data.
+	 */
+	RGXFWIF_CLEANUP_CTL		sCleanupStateSHG;
+	RGXFWIF_CLEANUP_CTL		sCleanupStateRTU;
+	IMG_UINT32				ui32CleanupStatus;
+#define HWFRAMEDATA_SHG_CLEAN	(1 << 0)
+#define HWFRAMEDATA_RTU_CLEAN	(1 << 1)
+
+} UNCACHED_ALIGN RGXFWIF_RAY_FRAME_DATA;
+
+
+typedef struct _RGXFWIF_RENDER_TARGET_
+{
+	IMG_DEV_VIRTADDR	RGXFW_ALIGN psVHeapTableDevVAddr; /*!< VHeap Data Store */
+	IMG_BOOL			bTACachesNeedZeroing;			  /*!< Whether RTC and TPC caches (on mem) need to be zeroed on next first TA kick */
+
+} UNCACHED_ALIGN RGXFWIF_RENDER_TARGET;
+
+
+typedef struct _RGXFWIF_HWRTDATA_
+{
+	RGXFWIF_RTDATA_STATE				eState;
+
+	IMG_UINT32							ui32NumPartialRenders; /*!< Number of partial renders. Used to setup ZLS bits correctly */
+	IMG_BOOL							bLastWasPartial; /*!< Whether the last render was a partial render */
+	IMG_DEV_VIRTADDR					RGXFW_ALIGN psPMMListDevVAddr; /*!< MList Data Store */
+
+	IMG_UINT64							RGXFW_ALIGN ui64VCECatBase[4];
+	IMG_UINT64							RGXFW_ALIGN ui64VCELastCatBase[4];
+	IMG_UINT64							RGXFW_ALIGN ui64TECatBase[4];
+	IMG_UINT64							RGXFW_ALIGN ui64TELastCatBase[4];
+	IMG_UINT64							RGXFW_ALIGN ui64AlistCatBase;
+	IMG_UINT64							RGXFW_ALIGN ui64AlistLastCatBase;
+
+	IMG_UINT64							RGXFW_ALIGN ui64PMAListStackPointer;
+	IMG_UINT32							ui32PMMListStackPointer;
+
+	PRGXFWIF_FREELIST 					RGXFW_ALIGN apsFreeLists[RGXFW_MAX_FREELISTS];
+	IMG_UINT32							aui32FreeListHWRSnapshot[RGXFW_MAX_FREELISTS];
+
+	PRGXFWIF_RENDER_TARGET				psParentRenderTarget;
+
+	RGXFWIF_CLEANUP_CTL					sTACleanupState;
+	RGXFWIF_CLEANUP_CTL					s3DCleanupState;
+	IMG_UINT32							ui32CleanupStatus;
+#define HWRTDATA_TA_CLEAN	(1 << 0)
+#define HWRTDATA_3D_CLEAN	(1 << 1)
+
+	PRGXFWIF_RTA_CTL					psRTACtl;
+
+	IMG_UINT32							bHasLastTA;
+	IMG_BOOL							bPartialRendered;
+
+	IMG_UINT32							ui32PPPScreen;
+	IMG_UINT32							ui32PPPGridOffset;
+	IMG_UINT64							RGXFW_ALIGN ui64PPPMultiSampleCtl;
+	IMG_UINT32							ui32TPCStride;
+	IMG_DEV_VIRTADDR					RGXFW_ALIGN sTailPtrsDevVAddr;
+	IMG_UINT32							ui32TPCSize;
+	IMG_UINT32							ui32TEScreen;
+	IMG_UINT32							ui32MTileStride;
+	IMG_UINT32							ui32TEAA;
+	IMG_UINT32							ui32TEMTILE1;
+	IMG_UINT32							ui32TEMTILE2;
+	IMG_UINT32							ui32ISPMergeLowerX;
+	IMG_UINT32							ui32ISPMergeLowerY;
+	IMG_UINT32							ui32ISPMergeUpperX;
+	IMG_UINT32							ui32ISPMergeUpperY;
+	IMG_UINT32							ui32ISPMergeScaleX;
+	IMG_UINT32							ui32ISPMergeScaleY;
+	IMG_BOOL							bDisableTileReordering;
+#if defined(RGX_FIRMWARE)
+	struct _RGXFWIF_FWCOMMONCONTEXT_*	psOwnerTA;
+#else
+	RGXFWIF_DEV_VIRTADDR				pui32OwnerTANotUsedByHost;
+#endif
+} UNCACHED_ALIGN RGXFWIF_HWRTDATA;
+
+typedef enum
+{
+	RGXFWIF_ZSBUFFER_UNBACKED = 0,
+	RGXFWIF_ZSBUFFER_BACKED,
+	RGXFWIF_ZSBUFFER_BACKING_PENDING,
+	RGXFWIF_ZSBUFFER_UNBACKING_PENDING,
+}RGXFWIF_ZSBUFFER_STATE;
+
+typedef struct _RGXFWIF_ZSBUFFER_
+{
+	IMG_UINT32				ui32ZSBufferID;				/*!< Buffer ID*/
+	IMG_BOOL				bOnDemand;					/*!< Needs On-demand ZS Buffer allocation */
+	RGXFWIF_ZSBUFFER_STATE	eState;						/*!< Z/S-Buffer state */
+	RGXFWIF_CLEANUP_CTL		sCleanupState;				/*!< Cleanup state */
+} UNCACHED_ALIGN RGXFWIF_FWZSBUFFER;
+
+/* Number of BIF tiling configurations / heaps */
+#define RGXFWIF_NUM_BIF_TILING_CONFIGS 4
+
+/*!
+ *****************************************************************************
+ * RGX Compatibility checks
+ *****************************************************************************/
+/* WARNING: RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX can be increased only and
+		always equal to (N * sizeof(IMG_UINT32) - 1) */
+#define RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX 7
+
+/* WARNING: Whenever the layout of RGXFWIF_COMPCHECKS_BVNC is a subject of change,
+	following define should be increased by 1 to indicate to compatibility logic,
+	that layout has changed */
+#define RGXFWIF_COMPCHECKS_LAYOUT_VERSION 2
+
+typedef struct _RGXFWIF_COMPCHECKS_BVNC_
+{
+	IMG_UINT32	ui32LayoutVersion; /* WARNING: This field must be defined as first one in this structure */
+	IMG_UINT32  ui32VLenMax;
+	IMG_UINT64	RGXFW_ALIGN ui64BNC;
+	IMG_CHAR	aszV[RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX + 1];
+} UNCACHED_ALIGN RGXFWIF_COMPCHECKS_BVNC;
+
+#define RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(name) \
+	RGXFWIF_COMPCHECKS_BVNC name = { \
+		RGXFWIF_COMPCHECKS_LAYOUT_VERSION, \
+		RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX, \
+		0, \
+		{ 0 }, \
+	}
+#define RGXFWIF_COMPCHECKS_BVNC_INIT(name) \
+	do { \
+		(name).ui32LayoutVersion = RGXFWIF_COMPCHECKS_LAYOUT_VERSION; \
+		(name).ui32VLenMax = RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX; \
+		(name).ui64BNC = 0; \
+		(name).aszV[0] = 0; \
+	} while (0)
+
+typedef struct _RGXFWIF_COMPCHECKS_
+{
+	RGXFWIF_COMPCHECKS_BVNC		sHWBVNC;				 /*!< hardware BNC (from the RGX registers) */
+	RGXFWIF_COMPCHECKS_BVNC		sFWBVNC;				 /*!< firmware BNC */
+	IMG_UINT32					ui32FWProcessorVersion;  /*!< identifier of the MIPS/META version */
+	IMG_UINT32					ui32DDKVersion;			 /*!< software DDK version */
+	IMG_UINT32					ui32DDKBuild;			 /*!< software DDK build no. */
+	IMG_UINT32					ui32BuildOptions;		 /*!< build options bit-field */
+	IMG_BOOL					bUpdated;				 /*!< Information is valid */
+} UNCACHED_ALIGN RGXFWIF_COMPCHECKS;
+
+
+#define GET_CCB_SPACE(WOff, ROff, CCBSize) \
+	((((ROff) - (WOff)) + ((CCBSize) - 1)) & ((CCBSize) - 1))
+
+#define UPDATE_CCB_OFFSET(Off, PacketSize, CCBSize) \
+	(Off) = (((Off) + (PacketSize)) & ((CCBSize) - 1))
+
+#define RESERVED_CCB_SPACE 		(sizeof(IMG_UINT32))
+
+
+/* Defines relating to the per-context CCBs */
+
+/* This size is to be used when a client CCB is found to consume very negligible space
+ * (e.g. a few hundred bytes to few KBs - less than a page). In such a case, instead of
+ * allocating CCB of size of only a few KBs, we allocate at-least this much to be future
+ * risk-free. */
+#define MIN_SAFE_CCB_SIZE_LOG2	13	/* 8K (2 Pages) */
+
+#define RGX_TQ3D_CCB_SIZE_LOG2		PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D
+static_assert(RGX_TQ3D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2, "TQ3D CCB size is too small");
+#define RGX_TQ2D_CCB_SIZE_LOG2		PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D
+static_assert(RGX_TQ2D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2, "TQ2D CCB size is too small");
+#define RGX_CDM_CCB_SIZE_LOG2		PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM
+static_assert(RGX_CDM_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2, "CDM CCB size is too small");
+#define RGX_TA_CCB_SIZE_LOG2		PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA
+static_assert(RGX_TA_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2, "TA CCB size is too small");
+#define RGX_3D_CCB_SIZE_LOG2		PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D
+static_assert(RGX_3D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2, "3D CCB size is too small");
+#define RGX_KICKSYNC_CCB_SIZE_LOG2	PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC
+static_assert(RGX_KICKSYNC_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2, "KickSync CCB size is too small");
+#define RGX_RTU_CCB_SIZE_LOG2		PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_RTU
+static_assert(RGX_RTU_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2, "RTU CCB size is too small");
+/*!
+ ******************************************************************************
+ * Client CCB commands for RGX
+ *****************************************************************************/
+
+#define RGX_CCB_TYPE_TASK			(1 << 31)
+#define RGX_CCB_FWALLOC_ALIGN(size)	(((size) + (RGXFWIF_FWALLOC_ALIGN-1)) & ~(RGXFWIF_FWALLOC_ALIGN - 1))
+
+typedef enum _RGXFWIF_CCB_CMD_TYPE_
+{
+	RGXFWIF_CCB_CMD_TYPE_TA			= 201 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_3D			= 202 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_CDM		= 203 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_TQ_3D		= 204 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_TQ_2D		= 205 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_3D_PR		= 206 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_NULL		= 207 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_SHG		= 208 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_RTU		= 209 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_RTU_FC		  = 210 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP = 211 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_TQ_TDM     = 212 | RGX_CCB_TYPE_TASK,
+
+/* Leave a gap between CCB specific commands and generic commands */
+	RGXFWIF_CCB_CMD_TYPE_FENCE          = 213,
+	RGXFWIF_CCB_CMD_TYPE_UPDATE         = 214,
+	RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE     = 215,
+	RGXFWIF_CCB_CMD_TYPE_FENCE_PR       = 216,
+	RGXFWIF_CCB_CMD_TYPE_PRIORITY       = 217,
+/* Pre and Post timestamp commands are supposed to sandwich the DM cmd. The
+   padding code with the CCB wrap upsets the FW if we don't have the task type
+   bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types.
+*/
+	RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP = 218,
+	RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE = 219,
+	RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE = 220,
+
+	RGXFWIF_CCB_CMD_TYPE_PADDING	= 221,
+} RGXFWIF_CCB_CMD_TYPE;
+
+typedef struct _RGXFWIF_WORKLOAD_DATA_
+{
+	/* Workload characteristics data*/
+	IMG_UINT64 RGXFW_ALIGN                    ui64WorkloadCharacteristics;
+	/* Deadline for the workload */
+	IMG_UINT64 RGXFW_ALIGN                    ui64DeadlineInus;
+	/* Bool for whether the workload was completed */
+	IMG_BOOL                                  bComplete;
+	/* Predicted time taken to do the work in cycles */
+	IMG_UINT64 RGXFW_ALIGN                    ui64CyclesPrediction;
+	/* The actual time taken in cycles */
+	IMG_UINT64 RGXFW_ALIGN                    ui64CyclesTaken;
+	/* The memory descriptor for this workload */
+	IMG_UINT64 RGXFW_ALIGN                    ui64SelfMemDesc;
+	/* Memory descriptor to be able to chain workload data */
+	IMG_UINT64 RGXFW_ALIGN                    ui64NextNodeMemdesc;
+	/* Reference to Host side data */
+	IMG_UINT64 RGXFW_ALIGN                    ui64WorkloadHostData;
+	/* Reference to Specific Hash table */
+	IMG_UINT64 RGXFW_ALIGN                    ui64WorkloadMatchingData;
+	/* The following are for the memory management of the PDVFS workload
+	 * tree in the firmware */
+	PRGXFWIF_DEADLINE_LIST_NODE RGXFW_ALIGN  sDeadlineNodeFWAddress;
+	PRGXFWIF_WORKLOAD_LIST_NODE RGXFW_ALIGN  sWorkloadNodeFWAddress;
+	IMG_UINT64 RGXFW_ALIGN                    ui64DeadlineNodeMemDesc;
+	IMG_UINT64 RGXFW_ALIGN                    ui64WorkloadNodeMemDesc;
+} RGXFWIF_WORKLOAD_DATA;
+
+typedef struct _RGXFWIF_WORKEST_KICK_DATA_
+{
+	/* Index for the KM Workload estimation return data array */
+	IMG_UINT64 RGXFW_ALIGN                    ui64ReturnDataIndex;
+	/* Deadline for the workload */
+	IMG_UINT64 RGXFW_ALIGN                    ui64DeadlineInus;
+	/* Predicted time taken to do the work in cycles */
+	IMG_UINT64 RGXFW_ALIGN                    ui64CyclesPrediction;
+} RGXFWIF_WORKEST_KICK_DATA;
+
+typedef struct _RGXFWIF_WORKLOAD_LIST_NODE_ RGXFWIF_WORKLOAD_LIST_NODE;
+typedef struct _RGXFWIF_DEADLINE_LIST_NODE_ RGXFWIF_DEADLINE_LIST_NODE;
+
+struct _RGXFWIF_WORKLOAD_LIST_NODE_
+{
+	IMG_UINT64 RGXFW_ALIGN ui64Cycles;
+	IMG_UINT64 RGXFW_ALIGN ui64SelfMemDesc;
+	IMG_UINT64 RGXFW_ALIGN ui64WorkloadDataMemDesc;
+	IMG_BOOL					bReleased;
+	RGXFWIF_WORKLOAD_LIST_NODE *psNextNode;
+};
+
+struct _RGXFWIF_DEADLINE_LIST_NODE_
+{
+	IMG_UINT64 RGXFW_ALIGN ui64DeadlineInus;
+	RGXFWIF_WORKLOAD_LIST_NODE *psWorkloadList;
+	IMG_UINT64 RGXFW_ALIGN ui64SelfMemDesc;
+	IMG_UINT64 RGXFW_ALIGN ui64WorkloadDataMemDesc;
+	IMG_BOOL					bReleased;
+	RGXFWIF_DEADLINE_LIST_NODE *psNextNode;
+};
+typedef struct _RGXFWIF_CCB_CMD_HEADER_
+{
+	RGXFWIF_CCB_CMD_TYPE				eCmdType;
+	IMG_UINT32							ui32CmdSize;
+	IMG_UINT32							ui32ExtJobRef; /*!< external job reference - provided by client and used in debug for tracking submitted work */
+	IMG_UINT32							ui32IntJobRef; /*!< internal job reference - generated by services and used in debug for tracking submitted work */
+	PRGXFWIF_WORKLOAD_DATA RGXFW_ALIGN	sWorkloadDataFWAddr;
+	RGXFWIF_WORKEST_KICK_DATA			sWorkEstKickData; /*!< Workload Estimation - Workload Estimation Data */
+	IMG_DEV_VIRTADDR					sRobustnessResetReason; /*!< Address to write reset reason to */
+} RGXFWIF_CCB_CMD_HEADER;
+
+typedef enum _RGXFWIF_REG_CFG_TYPE_
+{
+	RGXFWIF_REG_CFG_TYPE_PWR_ON=0,       /* Sidekick power event */
+	RGXFWIF_REG_CFG_TYPE_DUST_CHANGE,    /* Rascal / dust power event */
+	RGXFWIF_REG_CFG_TYPE_TA,	         /* TA kick */
+	RGXFWIF_REG_CFG_TYPE_3D,	         /* 3D kick */
+	RGXFWIF_REG_CFG_TYPE_CDM,	         /* Compute kick */
+	RGXFWIF_REG_CFG_TYPE_TLA,	         /* TLA kick */
+	RGXFWIF_REG_CFG_TYPE_TDM,	         /* TDM kick */
+	RGXFWIF_REG_CFG_TYPE_ALL             /* Applies to all types. Keep as last element */
+} RGXFWIF_REG_CFG_TYPE;
+
+typedef struct _RGXFWIF_REG_CFG_REC_
+{
+	IMG_UINT64		ui64Addr;
+	IMG_UINT64		ui64Mask;
+	IMG_UINT64		ui64Value;
+} RGXFWIF_REG_CFG_REC;
+
+
+typedef struct _RGXFWIF_TIME_CORR_
+{
+	IMG_UINT64 RGXFW_ALIGN ui64OSTimeStamp;
+	IMG_UINT64 RGXFW_ALIGN ui64OSMonoTimeStamp;
+	IMG_UINT64 RGXFW_ALIGN ui64CRTimeStamp;
+
+	/* Utility variable used to convert CR timer deltas to OS timer deltas (nS),
+	 * where the deltas are relative to the timestamps above:
+	 * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below */
+	IMG_UINT64 RGXFW_ALIGN ui64CRDeltaToOSDeltaKNs;
+
+	IMG_UINT32             ui32CoreClockSpeed;
+} UNCACHED_ALIGN RGXFWIF_TIME_CORR;
+
+
+/* These macros are used to help converting FW timestamps to the Host time domain.
+ * On the FW the RGX_CR_TIMER counter is used to keep track of the time;
+ * it increments by 1 every 256 GPU clock ticks, so the general formula
+ * to perform the conversion is:
+ *
+ * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS,
+ *   otherwise if (scale == 10^6) then deltaOS is in uS ]
+ *
+ *             deltaCR * 256                                   256 * scale
+ *  deltaOS = --------------- * scale = deltaCR * K    [ K = --------------- ]
+ *             GPUclockspeed                                  GPUclockspeed
+ *
+ * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20)
+ * to get some better accuracy and to avoid returning 0 in the integer
+ * division 256000000/GPUfreq if GPUfreq is greater than 256MHz.
+ * This is the same as keeping K as a decimal number.
+ *
+ * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies
+ * (deltaCR * K is more or less a constant), and it's relative to
+ * the base OS timestamp sampled as a part of the timer correlation data.
+ * This base is refreshed on GPU power-on, DVFS transition and
+ * periodic frequency calibration (executed every few seconds if the FW is
+ * doing some work), so as long as the GPU is doing something and one of these
+ * events is triggered then deltaCR * K will not overflow and deltaOS will be
+ * correct.
+ */
+
+#define RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT  (20)
+
+#define RGXFWIF_GET_CRDELTA_TO_OSDELTA_K_NS(clockfreq, remainder) \
+	OSDivide64((256000000ULL << RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT), \
+			   ((clockfreq) + 500) / 1000, \
+			   &(remainder))
+
+#define RGXFWIF_GET_DELTA_OSTIME_NS(deltaCR, K) \
+	( ((deltaCR) * (K)) >> RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT)
+
+#define RGXFWIF_GET_DELTA_OSTIME_US(deltacr, clockfreq, remainder) \
+	OSDivide64r64((deltacr) * 256000, ((clockfreq) + 500) / 1000, &(remainder))
+
+/* Use this macro to get a more realistic GPU core clock speed than
+ * the one given by the upper layers (used when doing GPU frequency
+ * calibration)
+ */
+#define RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(deltacr_us, deltaos_us, remainder) \
+    OSDivide64((deltacr_us) * 256000000, (deltaos_us), &(remainder))
+
+/* 
+	The maximum configurable size via RGX_FW_HEAP_SHIFT is
+	32MiB (1<<25) and the minimum is 4MiB (1<<22); the
+	default firmware heap size is set to maximum 32MiB.
+*/
+#if (RGX_FW_HEAP_SHIFT < 22 || RGX_FW_HEAP_SHIFT > 25)
+#error "RGX_FW_HEAP_SHIFT is outside valid range [22, 25]"
+#endif
+
+#endif /*  __RGX_FWIF_SHARED_H__ */
+
+/******************************************************************************
+ End of file (rgx_fwif_shared.h)
+******************************************************************************/
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_sig.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_sig.h
new file mode 100644
index 0000000..4d736ed
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_fwif_sig.h
@@ -0,0 +1,168 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX firmware signature checks
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX firmware interface structures used by srvinit and server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_FWIF_SIG_H__)
+#define __RGX_FWIF_SIG_H__
+
+#include "rgxdefs_km.h"
+
+/************************************************************************
+* RGX FW signature checks
+************************************************************************/
+
+#if defined(__KERNEL__)
+
+#if defined(PDUMP)
+
+#define SIG_REG_TA_MAX_COUNT	(12)
+static RGXFW_REGISTER_LIST asTASigRegList[SIG_REG_TA_MAX_COUNT];
+static IMG_UINT32 gui32TASigRegCount = 0;
+
+#define SIG_REG_3D_MAX_COUNT	(6)
+static RGXFW_REGISTER_LIST as3DSigRegList[SIG_REG_3D_MAX_COUNT];
+static IMG_UINT32	gui323DSigRegCount = 0;
+
+#endif /* PDUMP */
+
+#else
+
+/* List of TA signature and checksum register addresses */
+static const RGXFW_REGISTER_LIST asTASigRegList[] =
+{	/* Register */						/* Indirect_Reg */			/* Start, End */
+#if defined(RGX_FEATURE_SCALABLE_VDM_GPP)
+	{RGX_CR_USC_UVB_CHECKSUM,			RGX_CR_BLACKPEARL_INDIRECT,	0, RGX_NUM_PHANTOMS-1},
+#else
+	{RGX_CR_USC_UVS0_CHECKSUM,			0,							0, 0},
+	{RGX_CR_USC_UVS1_CHECKSUM,			0,							0, 0},
+	{RGX_CR_USC_UVS2_CHECKSUM,			0,							0, 0},
+	{RGX_CR_USC_UVS3_CHECKSUM,			0,							0, 0},
+	{RGX_CR_USC_UVS4_CHECKSUM,			0,							0, 0},
+	{RGX_CR_USC_UVS5_CHECKSUM,			0,							0, 0},
+#endif
+#if defined(RGX_FEATURE_SCALABLE_TE_ARCH)
+#if defined(RGX_FEATURE_SCALABLE_VDM_GPP)
+	{RGX_CR_PPP_CLIP_CHECKSUM,			RGX_CR_BLACKPEARL_INDIRECT,	0, RGX_NUM_PHANTOMS-1},
+#else
+	{RGX_CR_PPP,						0,							0, 0},
+#endif
+	{RGX_CR_TE_CHECKSUM,				0,							0, 0},
+#else
+	{RGX_CR_PPP_SIGNATURE,				0,							0, 0},
+	{RGX_CR_TE_SIGNATURE,				0,							0, 0},
+#endif
+	{RGX_CR_VCE_CHECKSUM,				0,							0, 0},
+#if !defined(RGX_FEATURE_PDS_PER_DUST) || !defined(FIX_HW_BRN_62204)
+	{RGX_CR_PDS_DOUTM_STM_SIGNATURE,	0,							0, 0},
+#endif
+};
+
+#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && !defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) && !defined(RGX_FEATURE_ROGUEXE)
+#define HWR_SIG_RAST_INDIRECT (0)
+#define HWR_SIG_RAST_INDIRECT_NUM (0)
+
+#if defined(RGX_FEATURE_PBE2_IN_XE) && RGX_FEATURE_NUM_CLUSTERS > 1
+#define HWR_SIG_PBE_INDIRECT (0)
+#define HWR_SIG_PBE_INDIRECT_NUM (0)
+#else
+#define HWR_SIG_PBE_INDIRECT (RGX_CR_PBE_INDIRECT)
+#define HWR_SIG_PBE_INDIRECT_NUM (RGX_FEATURE_NUM_CLUSTERS-1)
+#endif /* !(defined(RGX_FEATURE_PBE2_IN_XE) && RGX_FEATURE_NUM_CLUSTERS > 1) */
+
+#else
+
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(RGX_FEATURE_ROGUEXE)
+#define HWR_SIG_RAST_INDIRECT (RGX_CR_RASTERISATION_INDIRECT)
+#define HWR_SIG_RAST_INDIRECT_NUM (RGX_NUM_RASTERISATION_MODULES-1)
+#else
+#define HWR_SIG_RAST_INDIRECT (RGX_CR_BLACKPEARL_INDIRECT)
+#define HWR_SIG_RAST_INDIRECT_NUM (RGX_NUM_PHANTOMS-1)
+#endif /* !(defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(RGX_FEATURE_ROGUEXE)) */
+
+#define HWR_SIG_PBE_INDIRECT (RGX_CR_PBE_INDIRECT)
+#define HWR_SIG_PBE_INDIRECT_NUM (RGX_FEATURE_NUM_CLUSTERS-1)
+
+#endif /* !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && !defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) && !defined(RGX_FEATURE_ROGUEXE) */
+
+/* List of 3D signature and checksum register addresses */
+static const RGXFW_REGISTER_LIST as3DSigRegList[] =
+{	/* Register */						/* Indirect_Reg */			/* Start, End */
+	{RGX_CR_ISP_PDS_CHECKSUM,           HWR_SIG_RAST_INDIRECT,  0, HWR_SIG_RAST_INDIRECT_NUM},
+	{RGX_CR_ISP_TPF_CHECKSUM,           HWR_SIG_RAST_INDIRECT,  0, HWR_SIG_RAST_INDIRECT_NUM},
+	{RGX_CR_TFPU_PLANE0_CHECKSUM,       HWR_SIG_RAST_INDIRECT,  0, HWR_SIG_RAST_INDIRECT_NUM},
+	{RGX_CR_TFPU_PLANE1_CHECKSUM,       HWR_SIG_RAST_INDIRECT,  0, HWR_SIG_RAST_INDIRECT_NUM},
+	{RGX_CR_PBE_CHECKSUM,               HWR_SIG_PBE_INDIRECT,   0, HWR_SIG_PBE_INDIRECT_NUM},
+	{RGX_CR_IFPU_ISP_CHECKSUM,          HWR_SIG_RAST_INDIRECT,  0, HWR_SIG_RAST_INDIRECT_NUM}
+};
+#endif /* !__KERNEL__ */
+
+#if defined (RGX_FEATURE_RAY_TRACING) || defined(__KERNEL__)
+/* List of SHG signature and checksum register addresses */
+static const RGXFW_REGISTER_LIST asRTUSigRegList[] =
+{	/* Register */						/* Indirect_Reg */			/* Start, End */
+	{DPX_CR_RS_PDS_RR_CHECKSUM,				0,							0, 0},
+	{RGX_CR_FBA_FC0_CHECKSUM,				0,							0, 0},
+	{RGX_CR_FBA_FC1_CHECKSUM,				0,							0, 0},
+	{RGX_CR_FBA_FC2_CHECKSUM,				0,							0, 0},
+	{RGX_CR_FBA_FC3_CHECKSUM,				0,							0, 0},
+	{DPX_CR_RQ_USC_DEBUG,					0,							0, 0},
+};
+
+/* List of SHG signature and checksum register addresses */
+static const RGXFW_REGISTER_LIST asSHGSigRegList[] =
+{	/* Register */						/* Indirect_Reg */			/* Start, End */
+	{RGX_CR_SHF_SHG_CHECKSUM,			0,							0, 0},
+	{RGX_CR_SHF_VERTEX_BIF_CHECKSUM,	0,							0, 0},
+	{RGX_CR_SHF_VARY_BIF_CHECKSUM,		0,							0, 0},
+	{RGX_CR_RPM_BIF_CHECKSUM,			0,							0, 0},
+	{RGX_CR_SHG_BIF_CHECKSUM,			0,							0, 0},
+	{RGX_CR_SHG_FE_BE_CHECKSUM,			0,							0, 0},
+};
+#endif /* RGX_FEATURE_RAY_TRACING */
+
+#endif /*  __RGX_FWIF_SIG_H__ */
+
+/******************************************************************************
+ End of file (rgx_fwif_sig.h)
+******************************************************************************/
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_pdump_panics.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_pdump_panics.h
new file mode 100644
index 0000000..828e886
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_pdump_panics.h
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX PDump panic definitions header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX PDump panic definitions header
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (RGX_PDUMP_PANICS_H_)
+#define RGX_PDUMP_PANICS_H_
+
+
+/*! Unique device specific IMG_UINT16 panic IDs to identify the cause of a
+ * RGX PDump panic in a PDump script. */
+typedef enum
+{
+	RGX_PDUMP_PANIC_UNDEFINED = 0,
+
+	/* These panics occur when test parameters and driver configuration
+	 * enable features that require the firmware and host driver to
+	 * communicate. Such features are not supported with off-line playback.
+	 */
+	RGX_PDUMP_PANIC_ZSBUFFER_BACKING         = 101, /*!< Requests ZSBuffer to be backed with physical pages */
+	RGX_PDUMP_PANIC_ZSBUFFER_UNBACKING       = 102, /*!< Requests ZSBuffer to be unbacked */
+	RGX_PDUMP_PANIC_FREELIST_GROW            = 103, /*!< Requests an on-demand freelist grow/shrink */
+	RGX_PDUMP_PANIC_FREELISTS_RECONSTRUCTION = 104, /*!< Requests freelists reconstruction */
+	RGX_PDUMP_PANIC_SPARSEMEM_SWAP			= 105, /*!<	Requests sparse remap memory swap feature */
+} RGX_PDUMP_PANIC;
+ 
+
+#endif /* RGX_PDUMP_PANICS_H_ */
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_tq_shared.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_tq_shared.h
new file mode 100644
index 0000000..bd3460c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgx_tq_shared.h
@@ -0,0 +1,63 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX transfer queue shared
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Shared definitions between client and server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGX_TQ_SHARED_H__
+#define __RGX_TQ_SHARED_H__
+
+#define TQ_MAX_PREPARES_PER_SUBMIT		16
+
+#define TQ_PREP_FLAGS_COMMAND_3D		0x0
+#define TQ_PREP_FLAGS_COMMAND_2D		0x1
+#define TQ_PREP_FLAGS_COMMAND_MASK		(0xf)
+#define TQ_PREP_FLAGS_COMMAND_SHIFT		0
+#define TQ_PREP_FLAGS_PDUMPCONTINUOUS	(1 << 4)
+#define TQ_PREP_FLAGS_START				(1 << 5)
+#define TQ_PREP_FLAGS_END				(1 << 6)
+
+#define TQ_PREP_FLAGS_COMMAND_SET(m) \
+	((TQ_PREP_FLAGS_COMMAND_##m << TQ_PREP_FLAGS_COMMAND_SHIFT) & TQ_PREP_FLAGS_COMMAND_MASK)
+
+#define TQ_PREP_FLAGS_COMMAND_IS(m,n) \
+	(((m & TQ_PREP_FLAGS_COMMAND_MASK) >> TQ_PREP_FLAGS_COMMAND_SHIFT)  == TQ_PREP_FLAGS_COMMAND_##n)
+
+#endif /* __RGX_TQ_SHARED_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgxapi_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgxapi_km.h
new file mode 100644
index 0000000..4c53926
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgxapi_km.h
@@ -0,0 +1,305 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX API Header kernel mode
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exported RGX API details
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGXAPI_KM_H__
+#define __RGXAPI_KM_H__
+
+#if defined(SUPPORT_SHARED_SLC)
+/*!
+******************************************************************************
+
+ @Function	RGXInitSLC
+
+ @Description Init the SLC after a power up. It is required to call this 
+              function if using SUPPORT_SHARED_SLC. Otherwise, it shouldn't
+			  be called.
+
+ @Input	   hDevHandle : RGX Device Node
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle);
+#endif
+
+#include "rgx_hwperf.h"
+
+
+/******************************************************************************
+ * RGX HW Performance Profiling Control API(s)
+ *****************************************************************************/
+
+typedef struct _RGX_HWPERF_DEVICE_
+{
+	IMG_CHAR pszName[20];	/* Helps identify this device uniquely */
+	IMG_HANDLE hDevData;	/* Handle for the server */
+
+	struct _RGX_HWPERF_DEVICE_ *psNext;
+} RGX_HWPERF_DEVICE;
+
+typedef struct
+{
+	RGX_HWPERF_DEVICE *psHWPerfDevList;
+} RGX_HWPERF_CONNECTION;
+
+/**************************************************************************/ /*!
+@Function      RGXHWPerfLazyConnect
+@Description   Obtain a HWPerf connection object to the RGX device(s). The
+			   connections to devices are not actually opened until HWPerfOpen()
+			   is called.
+@Output        ppsHWPerfConnection     Address of a HWPerf connection object
+@Return        PVRSRV_ERROR:  for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection);
+
+
+/**************************************************************************/ /*!
+@Function      RGXHWPerfOpen
+@Description   Opens connection(s) to the RGX device(s). Valid handle to the
+			   connection object has to be provided which means the this
+			   function needs to be preceded by the call to
+			   RGXHWPerfLazyConnect() function.
+@Input         psHWPerfConnection      HWPerf connection object
+@Return        PVRSRV_ERROR:  for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION* psHWPerfConnection);
+
+
+/**************************************************************************/ /*!
+@Function      RGXHWPerfConnect
+@Description   Obtain a connection object to the RGX HWPerf module. Allocated
+			   connection object(s) reference opened connection(s).
+			   Calling this function is an equivalent of calling
+			   RGXHWPerfLazyConnect and RGXHWPerfOpen.
+			   This connect should be used when the caller will be retrieving
+			   event data.
+@Output        ppsHWPerfConnection      Address of HWPerf connection object
+@Return        PVRSRV_ERROR:  for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection);
+
+
+/**************************************************************************/ /*!
+@Function       RGXHWPerfFreeConnection
+@Description    Frees the HWPerf connection object
+@Input          ppsHWPerfConnection    Pointer to connection object as returned
+                                      from RGXHWPerfLazyConnect()
+@Return         PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** psHWPerfConnection);
+
+
+/**************************************************************************/ /*!
+@Function       RGXHWPerfClose
+@Description    Closes all the opened connection(s) to RGX device(s)
+@Input          psHWPerfConnection    Pointer to HWPerf connection object as
+                                      returned from RGXHWPerfConnect() or RGXHWPerfOpen()
+@Return         PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection);
+
+
+/**************************************************************************/ /*!
+@Function       RGXHWPerfDisconnect
+@Description    Disconnect from the RGX device
+@Input          ppsHWPerfConnection   Pointer to HWPerf connection object as
+                                      returned from RGXHWPerfConnect() or
+									  RGXHWPerfOpen(). Calling this function is
+									  an equivalent of calling RGXHWPerfClose
+									  and RGXHWPerfFreeConnection.
+@Return         PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection);
+
+
+/**************************************************************************/ /*!
+@Function       RGXHWPerfControl
+@Description    Enable or disable the generation of RGX HWPerf event packets.
+                 See RGXCtrlHWPerf().
+@Input          psHWPerfConnection Pointer to HWPerf connection object
+@Input			eStreamId		 ID of the HWPerf stream
+@Input          bToggle          Switch to toggle or apply mask.
+@Input          ui64Mask         Mask of events to control.
+@Return         PVRSRV_ERROR:    for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfControl(
+		RGX_HWPERF_CONNECTION *psHWPerfConnection,
+		RGX_HWPERF_STREAM_ID eStreamId,
+		IMG_BOOL             bToggle,
+		IMG_UINT64           ui64Mask);
+
+
+/**************************************************************************/ /*!
+@Function       RGXHWPerfGetFilter
+@Description    Reads HWPerf stream filter where stream is identified by
+                the given stream ID.
+@Input          hDevData        Handle to connection/device object
+@Input          eStreamId       ID of the HWPerf stream
+@Output         IMG_UINT64      HWPerf filter value
+@Return         PVRSRV_ERROR:   for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfGetFilter(
+		IMG_HANDLE  hDevData,
+		RGX_HWPERF_STREAM_ID eStreamId,
+		IMG_UINT64 *ui64Filter
+);
+
+
+/**************************************************************************/ /*!
+@Function       RGXHWPerfConfigureAndEnableCounters
+@Description    Enable and configure the performance counter block for
+                 one or more device layout modules.
+                 See RGXConfigureAndEnableHWPerfCounters().
+@Input          psHWPerfConnection Pointer to HWPerf connection object
+@Input          ui32NumBlocks    Number of elements in the array
+@Input          asBlockConfigs   Address of the array of configuration blocks
+@Return         PVRSRV_ERROR:    for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfConfigureAndEnableCounters(
+		RGX_HWPERF_CONNECTION *psHWPerfConnection,
+		IMG_UINT32                 ui32NumBlocks,
+		RGX_HWPERF_CONFIG_CNTBLK*  asBlockConfigs);
+
+
+/**************************************************************************/ /*!
+@Function       RGXDisableHWPerfCounters
+@Description    Disable the performance counter block for one or more
+                 device layout modules. See RGXDisableHWPerfCounters().
+@Input          psHWPerfConnection Pointer to HWPerf connection object
+@Input          ui32NumBlocks   Number of elements in the array
+@Input          aeBlockIDs      An array of bytes with values taken from
+                                 the RGX_HWPERF_CNTBLK_ID enumeration.
+@Return         PVRSRV_ERROR:   for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfDisableCounters(
+		RGX_HWPERF_CONNECTION *psHWPerfConnection,
+		IMG_UINT32   ui32NumBlocks,
+		IMG_UINT16*   aeBlockIDs);
+
+/**************************************************************************/ /*!
+@Function       RGXEnableHWPerfCounters
+@Description    Enable the performance counter block for one or more
+                 device layout modules. See RGXEnableHWPerfCounters().
+@Input          hDevData        Handle to connection/device object
+@Input          ui32NumBlocks   Number of elements in the array
+@Input          aeBlockIDs      An array of bytes with values taken from
+                                 the RGX_HWPERF_CNTBLK_ID enumeration.
+@Return         PVRSRV_ERROR:   for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfEnableCounters(
+		IMG_HANDLE   hDevData,
+		IMG_UINT32   ui32NumBlocks,
+		IMG_UINT16*   aeBlockIDs);
+
+/******************************************************************************
+ * RGX HW Performance Profiling Retrieval API(s)
+ *
+ * The client must ensure their use of this acquire/release API for a single 
+ * connection/stream must not be shared with multiple execution contexts e.g.
+ * between a kernel thread and an ISR handler. It is the client’s
+ * responsibility to ensure this API is not interrupted by a high priority
+ * thread/ISR
+ *****************************************************************************/
+
+/**************************************************************************/ /*!
+@Function       RGXHWPerfAcquireEvents
+@Description    When there is data available to read this call returns with OK
+                 and the address and length of the data buffer the
+                 client can safely read. This buffer may contain one or more
+                 event packets.
+                 When there is no data to read, this call returns with OK
+                 and sets *puiBufLen to 0 on exit.
+				 Clients must pair this call with a ReleaseEvents call.
+@Input          hDevData        Handle to connection/device object
+@Input          eStreamId       ID of the HWPerf stream
+@Output         ppBuf           Address of a pointer to a byte buffer. On exit
+                                it contains the address of buffer to read from
+@Output         pui32BufLen     Pointer to an integer. On exit it is the size
+                                of the data to read from the buffer
+@Return         PVRSRV_ERROR:   for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfAcquireEvents(
+		IMG_HANDLE  hDevData,
+		RGX_HWPERF_STREAM_ID eStreamId,
+		IMG_PBYTE*  ppBuf,
+		IMG_UINT32* pui32BufLen);
+
+
+/**************************************************************************/ /*!
+@Function       RGXHWPerfReleaseEvents
+@Description    Called after client has read the event data out of the buffer
+                 retrieved from the Acquire Events call to release resources.
+@Input          hDevData        Handle to connection/device object
+@Input          eStreamId       ID of the HWPerf stream
+@Return         PVRSRV_ERROR:   for system error codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR RGXHWPerfReleaseEvents(
+		IMG_HANDLE hDevData,
+		RGX_HWPERF_STREAM_ID eStreamId);
+
+
+/**************************************************************************/ /*!
+@Function       RGXHWPerfConvertCRTimeStamp
+@Description    Converts the timestamp given by FW events to the common OS
+                timestamp. The first three inputs are obtained via
+                a CLK_SYNC event, ui64CRTimeStamp is the CR timestamp
+                from the FW event to be converted.
+@Input          ui32ClkSpeed            Clock speed given by sync event
+@Input          ui64CorrCRTimeStamp     CR Timestamp given by sync event
+@Input          ui64CorrOSTimeStamp     Correlating OS Timestamp given by sync
+                                        event
+@Input          ui64CRTimeStamp         CR Timestamp to convert
+@Return         IMG_UINT64:             Calculated OS Timestamp
+ */ /**************************************************************************/
+IMG_UINT64 RGXHWPerfConvertCRTimeStamp(
+		IMG_UINT32 ui32ClkSpeed,
+		IMG_UINT64 ui64CorrCRTimeStamp,
+		IMG_UINT64 ui64CorrOSTimeStamp,
+		IMG_UINT64 ui64CRTimeStamp);
+
+#endif /* __RGXAPI_KM_H__ */
+
+/******************************************************************************
+ End of file (rgxapi_km.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgxfw_log_helper.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgxfw_log_helper.h
new file mode 100644
index 0000000..a68366a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/rgxfw_log_helper.h
@@ -0,0 +1,88 @@
+/*************************************************************************/ /*!
+@File           rgxfw_log_helper.h
+@Title          Firmware TBI logging helper function
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Platform       Generic
+@Description    This file contains some helper code to make TBI logging possible
+                Specifically, it uses the SFIDLIST xmacro to trace ids back to
+				the original strings.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef _RGXFW_LOG_HELPER_H_
+#define _RGXFW_LOG_HELPER_H_
+
+#include "rgx_fwif_sf.h"
+
+static IMG_CHAR *const groups[]= {
+#define X(A,B) #B,
+	RGXFW_LOG_SFGROUPLIST
+#undef X
+};
+
+typedef struct {
+	IMG_UINT32 id;
+	IMG_CHAR *name;
+} tuple; /*  pair of string format id and string formats */
+
+/*  The tuple pairs that will be generated using XMacros will be stored here.
+ *   This macro definition must match the definition of SFids in rgx_fwif_sf.h */
+static const tuple SFs[]= {
+#define X(a, b, c, d, e) { RGXFW_LOG_CREATESFID(a,b,e) , d },
+	RGXFW_LOG_SFIDLIST
+#undef X
+};
+ 
+/*  idToStringID : Search SFs tuples {id,string} for a matching id.
+ *   return index to array if found or RGXFW_SF_LAST if none found.
+ *   bsearch could be used as ids are in increasing order. */
+static IMG_UINT32 idToStringID(IMG_UINT32 ui32CheckData)
+{
+	IMG_UINT32 i = 0 ;
+	for ( i = 0 ; SFs[i].id != RGXFW_SF_LAST ; i++)
+	{
+		if ( ui32CheckData == SFs[i].id )
+		{
+			return i;
+		}
+	}
+	/* Nothing found, return max value */
+	return RGXFW_SF_LAST;
+}
+
+#endif /* _RGXFW_LOG_HELPER_H_ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/shared/allocmem.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/shared/allocmem.h
new file mode 100644
index 0000000..decef99
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/shared/allocmem.h
@@ -0,0 +1,176 @@
+/*************************************************************************/ /*!
+@File           allocmem.h
+@Title          memory allocation header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Memory-Allocation API definitions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __ALLOCMEM_H__
+#define __ALLOCMEM_H__
+
+#include "img_types.h"
+#include "pvr_debug.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#if !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) || !defined(DEBUG) || !defined(PVRSRV_ENABLE_PROCESS_STATS) || !defined(PVRSRV_ENABLE_MEMORY_STATS)
+/**************************************************************************/ /*!
+@Function       OSAllocMem
+@Description    Allocates CPU memory. Contents are uninitialized.
+                If passed a size of zero, function should not assert,
+                but just return a NULL pointer.
+@Input          ui32Size        Size of required allocation (in bytes)
+@Return         Pointer to allocated memory on success.
+                Otherwise NULL.
+ */ /**************************************************************************/
+void *OSAllocMem(IMG_UINT32 ui32Size);
+/**************************************************************************/ /*!
+@Function       OSAllocZMem
+@Description    Allocates CPU memory and initializes the contents to zero.
+                If passed a size of zero, function should not assert,
+                but just return a NULL pointer.
+@Input          ui32Size        Size of required allocation (in bytes)
+@Return         Pointer to allocated memory on success.
+                Otherwise NULL.
+ */ /**************************************************************************/
+void *OSAllocZMem(IMG_UINT32 ui32Size);
+#else
+void *_OSAllocMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine);
+void *_OSAllocZMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine);
+#define OSAllocMem(_size) \
+    _OSAllocMem ((_size), (__FILE__), (__LINE__));
+#define OSAllocZMem(_size) \
+    _OSAllocZMem ((_size), (__FILE__), (__LINE__));
+#endif
+
+/**************************************************************************/ /*!
+@Function       OSAllocMemNoStats
+@Description    Allocates CPU memory. Contents are uninitialized.
+                If passed a size of zero, function should not assert,
+                but just return a NULL pointer.
+                The allocated memory is not accounted for by process stats.
+                Process stats are an optional feature (enabled only when
+                PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount
+                of memory allocated to help in debugging. Where this is not
+                required, OSAllocMem() and OSAllocMemNoStats() equate to
+                the same operation.
+@Input          ui32Size        Size of required allocation (in bytes)
+@Return         Pointer to allocated memory on success.
+                Otherwise NULL.
+ */ /**************************************************************************/
+void *OSAllocMemNoStats(IMG_UINT32 ui32Size);
+
+/**************************************************************************/ /*!
+@Function       OSAllocZMemNoStats
+@Description    Allocates CPU memory and initializes the contents to zero.
+                If passed a size of zero, function should not assert,
+                but just return a NULL pointer.
+                The allocated memory is not accounted for by process stats.
+                Process stats are an optional feature (enabled only when
+                PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount
+                of memory allocated to help in debugging. Where this is not
+                required, OSAllocZMem() and OSAllocZMemNoStats() equate to
+                the same operation.
+@Input          ui32Size        Size of required allocation (in bytes)
+@Return         Pointer to allocated memory on success.
+                Otherwise NULL.
+ */ /**************************************************************************/
+void *OSAllocZMemNoStats(IMG_UINT32 ui32Size);
+
+/**************************************************************************/ /*!
+@Function       OSFreeMem
+@Description    Frees previously allocated CPU memory.
+@Input          pvCpuVAddr       Pointer to the memory to be freed.
+@Return         None.
+ */ /**************************************************************************/
+void OSFreeMem(void *pvCpuVAddr);
+
+/**************************************************************************/ /*!
+@Function       OSFreeMemNoStats
+@Description    Frees previously allocated CPU memory.
+                The freed memory does not update the figures in process stats.
+                Process stats are an optional feature (enabled only when
+                PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount
+                of memory allocated to help in debugging. Where this is not
+                required, OSFreeMem() and OSFreeMemNoStats() equate to the
+                same operation.
+@Input          pvCpuVAddr       Pointer to the memory to be freed.
+@Return         None.
+ */ /**************************************************************************/
+void OSFreeMemNoStats(void *pvCpuVAddr);
+
+/*
+ * These macros allow us to catch double-free bugs on DEBUG builds and
+ * prevent crashes on RELEASE builds.
+ */
+
+#if defined(DEBUG)
+#define double_free_sentinel (void*) &OSFreeMem
+#define ALLOCMEM_ASSERT(exp) PVR_ASSERT(exp)
+#else
+#define double_free_sentinel NULL
+#define ALLOCMEM_ASSERT(exp) do {} while(0)
+#endif
+
+#define OSFreeMem(_ptr) do { \
+		ALLOCMEM_ASSERT((_ptr) != double_free_sentinel); \
+		(OSFreeMem)(_ptr); \
+		(_ptr) = double_free_sentinel; \
+		MSC_SUPPRESS_4127 \
+	} while (0)
+
+#define OSFreeMemNoStats(_ptr) do { \
+		ALLOCMEM_ASSERT((_ptr) != double_free_sentinel); \
+		(OSFreeMemNoStats)(_ptr); \
+		(_ptr) = double_free_sentinel; \
+		MSC_SUPPRESS_4127 \
+	} while (0)
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __ALLOCMEM_H__ */
+
+/******************************************************************************
+ End of file (allocmem.h)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/shared/hash.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/shared/hash.h
new file mode 100644
index 0000000..6c8171b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/shared/hash.h
@@ -0,0 +1,229 @@
+/*************************************************************************/ /*!
+@File
+@Title          Self scaling hash tables
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements simple self scaling hash tables.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _HASH_H_
+#define _HASH_H_
+
+/* include5/ */
+#include "img_types.h"
+
+/* services/client/include/ or services/server/include/ */
+#include "osfunc.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * Keys passed to the comparsion function are only guaranteed to
+ * be aligned on an uintptr_t boundary.
+ */
+typedef IMG_UINT32 HASH_FUNC(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen);
+typedef IMG_BOOL HASH_KEY_COMP(size_t uKeySize, void *pKey1, void *pKey2);
+
+typedef struct _HASH_TABLE_ HASH_TABLE;
+
+typedef PVRSRV_ERROR (*HASH_pfnCallback) (
+	uintptr_t k,
+	uintptr_t v
+);
+
+/*************************************************************************/ /*!
+@Function       HASH_Func_Default
+@Description    Hash function intended for hashing keys composed of
+                uintptr_t arrays.
+@Input          uKeySize     The size of the hash key, in bytes.
+@Input          pKey         A pointer to the key to hash.
+@Input          uHashTabLen  The length of the hash table. 
+@Return         The hash value.
+*/ /**************************************************************************/
+IMG_UINT32 HASH_Func_Default (size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen);
+
+/*************************************************************************/ /*!
+@Function       HASH_Key_Comp_Default
+@Description    Compares keys composed of uintptr_t arrays.
+@Input          uKeySize     The size of the hash key, in bytes.
+@Input          pKey1        Pointer to first hash key to compare.
+@Input          pKey2        Pointer to second hash key to compare.
+@Return         IMG_TRUE  - the keys match.
+                IMG_FALSE - the keys don't match.
+*/ /**************************************************************************/
+IMG_BOOL HASH_Key_Comp_Default (size_t uKeySize, void *pKey1, void *pKey2);
+
+/*************************************************************************/ /*!
+@Function       HASH_Create_Extended
+@Description    Create a self scaling hash table, using the supplied
+                key size, and the supllied hash and key comparsion
+                functions.
+@Input          uInitialLen   Initial and minimum length of the
+                              hash table, where the length refers to the number
+                              of entries in the hash table, not its size in
+                              bytes.
+@Input          uKeySize      The size of the key, in bytes.
+@Input          pfnHashFunc   Pointer to hash function.
+@Input          pfnKeyComp    Pointer to key comparsion function.
+@Return         NULL or hash table handle.
+*/ /**************************************************************************/
+HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp);
+
+/*************************************************************************/ /*!
+@Function       HASH_Create
+@Description    Create a self scaling hash table with a key
+                consisting of a single uintptr_t, and using
+                the default hash and key comparison functions.
+@Input          uInitialLen   Initial and minimum length of the
+                              hash table, where the length refers to the
+                              number of entries in the hash table, not its size
+                              in bytes.
+@Return         NULL or hash table handle.
+*/ /**************************************************************************/
+HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen);
+
+/*************************************************************************/ /*!
+@Function       HASH_Delete
+@Description    Delete a hash table created by HASH_Create_Extended or
+                HASH_Create.  All entries in the table must have been
+                removed before calling this function.
+@Input          pHash         Hash table
+*/ /**************************************************************************/
+void HASH_Delete (HASH_TABLE *pHash);
+
+/*************************************************************************/ /*!
+@Function       HASH_Insert_Extended
+@Description    Insert a key value pair into a hash table created
+                with HASH_Create_Extended.
+@Input          pHash         The hash table.
+@Input          pKey          Pointer to the key.
+@Input          v             The value associated with the key.
+@Return         IMG_TRUE  - success
+                IMG_FALSE  - failure
+*/ /**************************************************************************/
+IMG_BOOL HASH_Insert_Extended (HASH_TABLE *pHash, void *pKey, uintptr_t v);
+
+/*************************************************************************/ /*!
+@Function       HASH_Insert
+
+@Description    Insert a key value pair into a hash table created with
+                HASH_Create.
+@Input          pHash         The hash table.
+@Input          k             The key value.
+@Input          v             The value associated with the key.
+@Return         IMG_TRUE - success.
+                IMG_FALSE - failure.
+*/ /**************************************************************************/
+IMG_BOOL HASH_Insert (HASH_TABLE *pHash, uintptr_t k, uintptr_t v);
+
+/*************************************************************************/ /*!
+@Function       HASH_Remove_Extended
+@Description    Remove a key from a hash table created with
+                HASH_Create_Extended.
+@Input          pHash         The hash table.
+@Input          pKey          Pointer to key.
+@Return         0 if the key is missing, or the value associated
+                with the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Remove_Extended(HASH_TABLE *pHash, void *pKey);
+
+/*************************************************************************/ /*!
+@Function       HASH_Remove
+@Description    Remove a key value pair from a hash table created
+                with HASH_Create.
+@Input          pHash         The hash table.
+@Input          pKey          Pointer to key.
+@Return         0 if the key is missing, or the value associated
+                with the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Remove (HASH_TABLE *pHash, uintptr_t k);
+
+/*************************************************************************/ /*!
+@Function       HASH_Retrieve_Extended
+@Description    Retrieve a value from a hash table created with
+                HASH_Create_Extended.
+@Input          pHash         The hash table.
+@Input          pKey          Pointer to key.
+@Return         0 if the key is missing, or the value associated with
+                the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Retrieve_Extended (HASH_TABLE *pHash, void *pKey);
+
+/*************************************************************************/ /*!
+@Function       HASH_Retrieve
+@Description    Retrieve a value from a hash table created with
+                HASH_Create.
+@Input          pHash         The hash table.
+@Input          pKey          Pointer to key.
+@Return         0 if the key is missing, or the value associated with
+                the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Retrieve (HASH_TABLE *pHash, uintptr_t k);
+
+/*************************************************************************/ /*!
+@Function       HASH_Iterate
+@Description    Iterate over every entry in the hash table
+@Input          pHash			Hash table to iterate
+@Input          pfnCallback		Callback to call with the key and data for
+								each entry in the hash table
+@Return         Callback error if any, otherwise PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback);
+
+#ifdef HASH_TRACE
+/*************************************************************************/ /*!
+@Function       HASH_Dump
+@Description    Dump out some information about a hash table.
+@Input          pHash         The hash table.
+*/ /**************************************************************************/
+void HASH_Dump (HASH_TABLE *pHash);
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* _HASH_H_ */
+
+/******************************************************************************
+ End of file (hash.h)
+******************************************************************************/
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/shared/lock.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/shared/lock.h
new file mode 100644
index 0000000..0f202d3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/shared/lock.h
@@ -0,0 +1,346 @@
+/*************************************************************************/ /*!
+@File           lock.h
+@Title          Locking interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services internal locking interface
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _LOCK_H_
+#define _LOCK_H_
+
+/* In Linux kernel mode we are using the kernel mutex implementation directly
+ * with macros. This allows us to use the kernel lockdep feature for lock
+ * debugging. */
+#include "lock_types.h"
+
+#if defined(LINUX) && defined(__KERNEL__)
+
+#include "allocmem.h"
+#include <asm/atomic.h>
+
+#define OSLockCreateNoStats(phLock, eLockType) ({ \
+	PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+	*(phLock) = OSAllocMemNoStats(sizeof(struct mutex)); \
+	if (*(phLock)) { mutex_init(*(phLock)); e = PVRSRV_OK; }; \
+	e;})
+#define OSLockCreate(phLock, eLockType) ({ \
+	PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+	*(phLock) = OSAllocMem(sizeof(struct mutex)); \
+	if (*(phLock)) { mutex_init(*(phLock)); e = PVRSRV_OK; }; \
+	e;})
+#define OSLockDestroy(hLock) ({mutex_destroy((hLock)); OSFreeMem((hLock)); PVRSRV_OK;})
+#define OSLockDestroyNoStats(hLock) ({mutex_destroy((hLock)); OSFreeMemNoStats((hLock)); PVRSRV_OK;})
+
+#define OSLockAcquire(hLock) ({mutex_lock((hLock)); PVRSRV_OK;})
+#define OSLockAcquireNested(hLock, subclass) ({mutex_lock_nested((hLock), (subclass)); PVRSRV_OK;})
+#define OSLockRelease(hLock) ({mutex_unlock((hLock)); PVRSRV_OK;})
+
+#define OSLockIsLocked(hLock) ((mutex_is_locked((hLock)) == 1) ? IMG_TRUE : IMG_FALSE)
+#define OSTryLockAcquire(hLock) ((mutex_trylock(hLock) == 1) ? IMG_TRUE : IMG_FALSE)
+
+/* These _may_ be reordered or optimized away entirely by the compiler/hw */
+#define OSAtomicRead(pCounter)	atomic_read(pCounter)
+#define OSAtomicWrite(pCounter, i)	atomic_set(pCounter, i)
+
+/* The following atomic operations, in addition to being SMP-safe, also
+   imply a memory barrier around the operation  */
+#define OSAtomicIncrement(pCounter) atomic_inc_return(pCounter)
+#define OSAtomicDecrement(pCounter) atomic_dec_return(pCounter)
+#define OSAtomicCompareExchange(pCounter, oldv, newv) atomic_cmpxchg(pCounter,oldv,newv)
+
+#define OSAtomicAdd(pCounter, incr) atomic_add_return(incr,pCounter)
+#define OSAtomicAddUnless(pCounter, incr, test) __atomic_add_unless(pCounter,incr,test)
+
+#define OSAtomicSubtract(pCounter, incr) atomic_add_return(-(incr),pCounter)
+#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), test)
+
+#else /* defined(LINUX) && defined(__KERNEL__) */
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/**************************************************************************/ /*!
+@Function       OSLockCreate
+@Description    Creates an operating system lock object.
+@Output         phLock           The created lock.
+@Input          eLockType        The type of lock required. This may be:
+                                 LOCK_TYPE_PASSIVE - the lock will not be used
+                                 in interrupt context or
+                                 LOCK_TYPE_DISPATCH - the lock may be used
+                                 in interrupt context.
+@Return         PVRSRV_OK on success. PVRSRV_ERROR_OUT_OF_MEMORY if the driver
+                cannot allocate CPU memory needed for the lock.
+                PVRSRV_ERROR_INIT_FAILURE if the Operating System fails to
+                allocate the lock.
+ */ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR OSLockCreate(POS_LOCK *phLock, LOCK_TYPE eLockType);
+#if defined(INTEGRITY_OS)
+#define OSLockCreateNoStats OSLockCreate
+#endif
+
+/**************************************************************************/ /*!
+@Function       OSLockDestroy
+@Description    Destroys an operating system lock object.
+@Input          hLock            The lock to be destroyed.
+@Return         None.
+ */ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR OSLockDestroy(POS_LOCK hLock);
+
+#if defined(INTEGRITY_OS)
+#define OSLockDestroyNoStats OSLockDestroy
+#endif
+/**************************************************************************/ /*!
+@Function       OSLockAcquire
+@Description    Acquires an operating system lock.
+                NB. This function must not return until the lock is acquired
+                (meaning the implementation should not timeout or return with
+                an error, as the caller will assume they have the lock).
+@Input          hLock            The lock to be acquired.
+@Return         None.
+ */ /**************************************************************************/
+IMG_INTERNAL
+void OSLockAcquire(POS_LOCK hLock);
+
+/**************************************************************************/ /*!
+@Function       OSTryLockAcquire
+@Description    Try to acquire an operating system lock.
+                NB. If lock is acquired successfully in the first attempt,
+                then the function returns true and else it will return false.
+@Input          hLock            The lock to be acquired.
+@Return         IMG_TRUE if lock acquired successfully,
+                IMG_FALSE otherwise.
+ */ /**************************************************************************/
+IMG_INTERNAL
+IMG_BOOL OSTryLockAcquire(POS_LOCK hLock);
+
+/* Nested notation isn't used in UM or other OS's */
+/**************************************************************************/ /*!
+@Function       OSLockAcquireNested
+@Description    For operating systems other than Linux, this equates to an
+                OSLockAcquire() call. On Linux, this function wraps a call
+                to mutex_lock_nested(). This recognises the scenario where
+                there may be multiple subclasses within a particular class
+                of lock. In such cases, the order in which the locks belonging
+                these various subclasses are acquired is important and must be
+                validated.
+@Input          hLock            The lock to be acquired.
+@Input          subclass         The subclass of the lock.
+@Return         None.
+ */ /**************************************************************************/
+#define OSLockAcquireNested(hLock, subclass) OSLockAcquire((hLock))
+
+/**************************************************************************/ /*!
+@Function       OSLockRelease
+@Description    Releases an operating system lock.
+@Input          hLock            The lock to be released.
+@Return         None.
+ */ /**************************************************************************/
+IMG_INTERNAL
+void OSLockRelease(POS_LOCK hLock);
+
+/**************************************************************************/ /*!
+@Function       OSLockIsLocked
+@Description    Tests whether or not an operating system lock is currently
+                locked.
+@Input          hLock            The lock to be tested.
+@Return         IMG_TRUE if locked, IMG_FALSE if not locked.
+ */ /**************************************************************************/
+IMG_INTERNAL
+IMG_BOOL OSLockIsLocked(POS_LOCK hLock);
+
+#if defined(LINUX)
+
+/* Use GCC intrinsics (read/write semantics consistent with kernel-side implementation) */
+#define OSAtomicRead(pCounter) (*(volatile int *)&(pCounter)->counter)
+#define OSAtomicWrite(pCounter, i) ((pCounter)->counter = (IMG_INT) i)
+#define OSAtomicIncrement(pCounter) __sync_add_and_fetch((&(pCounter)->counter), 1)
+#define OSAtomicDecrement(pCounter) __sync_sub_and_fetch((&(pCounter)->counter), 1)
+#define OSAtomicCompareExchange(pCounter, oldv, newv) \
+	__sync_val_compare_and_swap((&(pCounter)->counter), oldv, newv)
+
+#define OSAtomicAdd(pCounter, incr) __sync_add_and_fetch((&(pCounter)->counter), incr)
+#define OSAtomicAddUnless(pCounter, incr, test) ({ \
+	int c; int old; \
+	c = OSAtomicRead(pCounter); \
+    while (1) { \
+		if (c == (test)) break; \
+		old = OSAtomicCompareExchange(pCounter, c, c+(incr)); \
+		if (old == c) break; \
+		c = old; \
+	} c; })
+
+#define OSAtomicSubtract(pCounter, incr) OSAtomicAdd(pCounter, -(incr))
+#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), test)
+
+#else
+
+/* These _may_ be reordered or optimized away entirely by the compiler/hw */
+/*************************************************************************/ /*!
+@Function       OSAtomicRead
+@Description    Read the value of a variable atomically.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to read
+@Return         The value of the atomic variable
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT OSAtomicRead(const ATOMIC_T *pCounter);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicWrite
+@Description    Write the value of a variable atomically.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to be written to
+@Input          v               The value to write
+@Return         None
+*/ /**************************************************************************/
+IMG_INTERNAL
+void OSAtomicWrite(ATOMIC_T *pCounter, IMG_INT v);
+
+/* For the following atomic operations, in addition to being SMP-safe, 
+   should also  have a memory barrier around each operation  */
+/*************************************************************************/ /*!
+@Function       OSAtomicIncrement
+@Description    Increment the value of a variable atomically.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to be incremented
+@Return         The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT OSAtomicIncrement(ATOMIC_T *pCounter);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicDecrement
+@Description    Decrement the value of a variable atomically.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to be decremented
+@Return         The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT OSAtomicDecrement(ATOMIC_T *pCounter);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicAdd
+@Description    Add a specified value to a variable atomically.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to add the value to
+@Input          v               The value to be added
+@Return         The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT OSAtomicAdd(ATOMIC_T *pCounter, IMG_INT v);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicAddUnless
+@Description    Add a specified value to a variable atomically unless it
+                already equals a particular value.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to add the value to
+@Input          v               The value to be added to 'pCounter'
+@Input          t               The test value. If 'pCounter' equals this,
+                                its value will not be adjusted
+@Return         The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT OSAtomicAddUnless(ATOMIC_T *pCounter, IMG_INT v, IMG_INT t);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicSubtract
+@Description    Subtract a specified value to a variable atomically.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to subtract the value from
+@Input          v               The value to be subtracted
+@Return         The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT OSAtomicSubtract(ATOMIC_T *pCounter, IMG_INT v);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicSubtractUnless
+@Description    Subtract a specified value from a variable atomically unless
+                it already equals a particular value.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to subtract the value from
+@Input          v               The value to be subtracted from 'pCounter'
+@Input          t               The test value. If 'pCounter' equals this,
+                                its value will not be adjusted
+@Return         The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT OSAtomicSubtractUnless(ATOMIC_T *pCounter, IMG_INT v, IMG_INT t);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicCompareExchange
+@Description    Set a variable to a given value only if it is currently
+                equal to a specified value. The whole operation must be atomic.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to be checked and
+                                possibly updated
+@Input          oldv            The value the atomic variable must have in
+                                order to be modified
+@Input          newv            The value to write to the atomic variable if
+                                it equals 'oldv'
+@Return         The value of *pCounter after the function.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT OSAtomicCompareExchange(ATOMIC_T *pCounter, IMG_INT oldv, IMG_INT newv);
+
+#endif /* defined(LINUX) */
+#endif /* defined(LINUX) && defined(__KERNEL__) */
+
+#endif	/* _LOCK_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/shared/ra.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/shared/ra.h
new file mode 100644
index 0000000..cfccd59
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/shared/ra.h
@@ -0,0 +1,206 @@
+/*************************************************************************/ /*!
+@File
+@Title          Resource Allocator API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RA_H_
+#define _RA_H_
+
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/** Resource arena.
+ *  struct _RA_ARENA_ deliberately opaque
+ */
+typedef struct _RA_ARENA_ RA_ARENA;			//PRQA S 3313
+
+/*
+ * Per-Arena handle - this is private data for the caller of the RA.
+ * The RA knows nothing about this data.  It is given it upon
+ * RA_Create, and promises to pass it to calls to the ImportAlloc and
+ * ImportFree callbacks
+ */
+typedef IMG_HANDLE RA_PERARENA_HANDLE;
+/*
+ * Per-Import handle - this is private data for the caller of the RA.
+ * The RA knows nothing about this data.  It is given it on a
+ * per-import basis, either the "initial" import at RA_Create time, or
+ * further imports via the ImportAlloc callback.  It sends it back via
+ * the ImportFree callback, and also provides it in answer to any
+ * RA_Alloc request to signify from which "import" the allocation came
+ */
+typedef IMG_HANDLE RA_PERISPAN_HANDLE;
+
+typedef IMG_UINT64 RA_BASE_T;
+typedef IMG_UINT32 RA_LOG2QUANTUM_T;
+typedef IMG_UINT64 RA_LENGTH_T;
+
+/* Lock classes: describes the level of nesting between different arenas. */
+#define RA_LOCKCLASS_0 0
+#define RA_LOCKCLASS_1 1
+#define RA_LOCKCLASS_2 2
+
+#define RA_NO_IMPORT_MULTIPLIER 1
+
+/*
+ * Flags in an "import" must much the flags for an allocation
+ */
+typedef IMG_UINT32 RA_FLAGS_T;
+
+/**
+ *  @Function   RA_Create
+ *
+ *  @Description
+ *
+ *  To create a resource arena.
+ *
+ *  @Input name - the name of the arena for diagnostic purposes.
+ *  @Input uQuantum - the arena allocation quantum.
+ *  @Input ui32LockClass - the lock class level this arena uses.
+ *  @Input alloc - a resource allocation callback or 0.
+ *  @Input free - a resource de-allocation callback or 0.
+ *  @Input per_arena_handle - user private handle passed to alloc and free or 0.
+ *  @Input bNoSplit - Disable splitting up imports.
+ *  @Return pointer to arena, or NULL.
+ */
+RA_ARENA *
+RA_Create (IMG_CHAR *name,
+           /* subsequent imports: */
+           RA_LOG2QUANTUM_T uLog2Quantum,
+           IMG_UINT32 ui32LockClass,
+           PVRSRV_ERROR (*imp_alloc)(RA_PERARENA_HANDLE _h,
+                                 RA_LENGTH_T uSize,
+                                 RA_FLAGS_T uFlags,
+                                 const IMG_CHAR *pszAnnotation,
+                                 RA_BASE_T *pBase,
+                                 RA_LENGTH_T *pActualSize,
+                                 RA_PERISPAN_HANDLE *phPriv),
+           void (*imp_free) (RA_PERARENA_HANDLE,
+                                 RA_BASE_T,
+                                 RA_PERISPAN_HANDLE),
+           RA_PERARENA_HANDLE per_arena_handle,
+           IMG_BOOL bNoSplit);
+
+/**
+ *  @Function   RA_Delete
+ *
+ *  @Description
+ *
+ *  To delete a resource arena. All resources allocated from the arena
+ *  must be freed before deleting the arena.
+ *                  
+ *  @Input  pArena - the arena to delete.
+ *  @Return None
+ */
+void
+RA_Delete (RA_ARENA *pArena);
+
+/**
+ *  @Function   RA_Add
+ *
+ *  @Description
+ *
+ *  To add a resource span to an arena. The span must not overlap with
+ *  any span previously added to the arena.
+ *
+ *  @Input pArena - the arena to add a span into.
+ *  @Input base - the base of the span.
+ *  @Input uSize - the extent of the span.
+ *  @Input hPriv - handle associated to the span (reserved to user uses)
+ *  @Return IMG_TRUE - success, IMG_FALSE - failure
+ */
+IMG_BOOL
+RA_Add (RA_ARENA *pArena,
+		RA_BASE_T base,
+		RA_LENGTH_T uSize,
+		RA_FLAGS_T uFlags,
+		RA_PERISPAN_HANDLE hPriv);
+
+/**
+ *  @Function   RA_Alloc
+ *
+ *  @Description
+ *
+ *  To allocate resource from an arena.
+ *
+ *  @Input  pArena - the arena
+ *  @Input  uRequestSize - the size of resource segment requested.
+ *  @Input  uImportMultiplier - Import x-times of the uRequestSize
+ *          for future RA_Alloc calls.
+ *          Use RA_NO_IMPORT_MULTIPLIER to import the exact size.
+ *  @Output pActualSize - the actual_size of resource segment allocated,
+ *          typcially rounded up by quantum.
+ *  @Input  uImportFlags - flags influencing allocation policy.
+ *  @Input  uAlignment - the alignment constraint required for the
+ *          allocated segment, use 0 if alignment not required.
+ *  @Input  pszAnnotation - a string to describe the allocation
+ *  @Output pBase - allocated base resource
+ *  @Output phPriv - the user reference associated with allocated
+ *          resource span.
+ *  @Return PVRSRV_OK - success
+ */
+PVRSRV_ERROR
+RA_Alloc (RA_ARENA *pArena, 
+          RA_LENGTH_T uSize,
+          IMG_UINT8 uImportMultiplier,
+          RA_FLAGS_T uFlags,
+          RA_LENGTH_T uAlignment,
+          const IMG_CHAR *pszAnnotation,
+          RA_BASE_T *pBase,
+          RA_LENGTH_T *pActualSize,
+          RA_PERISPAN_HANDLE *phPriv);
+
+/**
+ *  @Function   RA_Free
+ *
+ *  @Description    To free a resource segment.
+ *  
+ *  @Input  pArena - the arena the segment was originally allocated from.
+ *  @Input  base - the base of the resource span to free.
+ *  @Input  bFreeBackingStore - Should backing store memory be freed?
+ *
+ *  @Return None
+ */
+void
+RA_Free (RA_ARENA *pArena, RA_BASE_T base);
+
+#endif
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/sync_checkpoint_internal.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/sync_checkpoint_internal.h
new file mode 100644
index 0000000..515cc86
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/sync_checkpoint_internal.h
@@ -0,0 +1,238 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services internal synchronisation checkpoint interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the internal server interface for services
+                synchronisation checkpoints.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __SYNC_CHECKPOINT__
+#define __SYNC_CHECKPOINT__
+
+#include "img_types.h"
+#include "sync_checkpoint_internal_fw.h"
+#include "sync_checkpoint_external.h"
+#include "sync_checkpoint.h"
+#include "ra.h"
+#include "dllist.h"
+#include "lock.h"
+#include "devicemem.h"
+#include "rgx_fwif_shared.h"
+typedef struct _PVRSRV_DEVICE_NODE_ PVRSRV_DEVICE_NODE;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+struct SYNC_CHECKPOINT_RECORD;
+#endif
+
+/*
+	Private structures
+*/
+
+typedef struct _SYNC_CHECKPOINT_CONTEXT_CTL_ _SYNC_CHECKPOINT_CONTEXT_CTL;
+
+typedef struct _SYNC_CHECKPOINT_CONTEXT_
+{
+	PVRSRV_DEVICE_NODE     			*psDevNode;
+	IMG_CHAR						azName[PVRSRV_SYNC_NAME_LENGTH];       /*!< Name of the RA */
+	RA_ARENA						*psSubAllocRA;                         /*!< RA context */
+	IMG_CHAR						azSpanName[PVRSRV_SYNC_NAME_LENGTH];   /*!< Name of the span RA */
+	RA_ARENA						*psSpanRA;                             /*!< RA used for span management of SubAllocRA */
+	ATOMIC_T						hRefCount;                             /*!< Ref count for this context */
+	ATOMIC_T						hCheckpointCount;                      /*!< Checkpoint count for this context */
+	POS_LOCK						hLock;
+	_SYNC_CHECKPOINT_CONTEXT_CTL	*psContextCtl;
+} _SYNC_CHECKPOINT_CONTEXT;
+
+typedef struct _SYNC_CHECKPOINT_BLOCK_
+{
+	ATOMIC_T                  hRefCount;                  /*!< Ref count for this sync block */
+	POS_LOCK                  hLock;
+	_SYNC_CHECKPOINT_CONTEXT  *psContext;                 /*!< Our copy of the services connection */
+	PVRSRV_DEVICE_NODE        *psDevNode;
+	IMG_UINT32                ui32SyncBlockSize;          /*!< Size of the sync checkpoint block */
+	IMG_UINT32                ui32FirmwareAddr;           /*!< Firmware address */
+	DEVMEM_MEMDESC            *hMemDesc;                  /*!< DevMem allocation for block */
+	volatile IMG_UINT32       *pui32LinAddr;              /*!< Server-code CPU mapping */
+	IMG_UINT64                uiSpanBase;                 /*!< Base of this import (FW DevMem) in the span RA */
+	DLLIST_NODE               sListNode;                  /*!< List node for the sync chkpt block list */
+} SYNC_CHECKPOINT_BLOCK;
+
+typedef struct SYNC_CHECKPOINT_RECORD* PSYNC_CHECKPOINT_RECORD_HANDLE;
+
+typedef struct _SYNC_CHECKPOINT_
+{
+	//_SYNC_CHECKPOINT_CONTEXT		*psContext;				/*!< pointer to the parent context of this checkpoint */
+	/* A sync checkpoint is assigned a unique ID, to avoid any confusion should
+	 * the same memory be re-used later for a different checkpoint
+	 */
+	IMG_UINT32                      ui32UID;                 /*!< Unique ID assigned to sync checkpoint (to distinguish checkpoints if memory is re-used)*/
+	POS_LOCK                        hLock;
+	ATOMIC_T                        hRefCount;               /*!< Ref count for this sync */
+	ATOMIC_T                        hEnqueuedCCBCount;       /*!< Num times sync has been put in CCBs */
+	SYNC_CHECKPOINT_BLOCK           *psSyncCheckpointBlock;  /*!< Synchronisation block this checkpoint is allocated on */
+	IMG_UINT64                      uiSpanAddr;              /*!< Span address of the sync */
+	volatile _SYNC_CHECKPOINT_FW_OBJ *psSyncCheckpointFwObj; /*!< CPU view of the data held in the sync block */
+	PRGXFWIF_UFO_ADDR               sCheckpointUFOAddr;      /*!< PRGXFWIF_UFO_ADDR struct used to pass update address to FW */
+	IMG_CHAR                        azName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the checkpoint */
+	PVRSRV_TIMELINE                 hTimeline;               /*!< Timeline on which this sync checkpoint was created */
+	IMG_UINT32                      ui32ValidationCheck;
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+	PSYNC_CHECKPOINT_RECORD_HANDLE  hRecord;                /*!< Sync record handle */
+#endif
+	DLLIST_NODE                     sListNode;              /*!< List node for the global sync chkpt list */
+	DLLIST_NODE                     sDeferredFreeListNode;  /*!< List node for the deferred free sync chkpt list */
+} _SYNC_CHECKPOINT;
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetFirmwareAddr
+
+@Description    .
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get
+                                        the firmware address of
+
+@Return         The firmware address of the sync checkpoint
+
+*/
+/*****************************************************************************/
+IMG_UINT32
+SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetFirmwareAddrFromList
+
+@Description    .
+
+@Input          psSyncCheckpoint       Synchronisation checkpoint from
+                                        which to get the firmware address
+
+@Return         The firmware address of the sync checkpoint
+
+*/
+/*****************************************************************************/
+IMG_UINT32
+SyncCheckpointGetFirmwareAddrFromList(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointCCBEnqueued
+
+@Description    Increment the CCB enqueued reference count for a
+                synchronisation checkpoint. This indicates how many FW
+                operations (checks/update) have been placed into CCBs for the
+                sync checkpoint.
+                When the FW services these operation, it increments its own
+                reference count. When these two values are equal, we know
+                there are not outstanding FW operating for the checkpoint
+                in any CCB.
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint for which
+                                        to increment the enqueued reference
+                                        count
+
+@Return         None
+
+*/
+/*****************************************************************************/
+void
+SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetEnqueuedCount
+
+@Description    .
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get
+                                        the enqueued count of
+
+@Return         The enqueued count of the sync checkpoint
+                (i.e. the number of FW operations (checks or updates)
+                 currently enqueued in CCBs for the sync checkpoint)
+
+*/
+/*****************************************************************************/
+IMG_UINT32
+SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetId
+
+@Description    .
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get
+                                        the unique Id of
+
+@Return         The unique Id of the sync checkpoint
+
+*/
+/*****************************************************************************/
+IMG_UINT32
+SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetTimeline
+
+@Description    .
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get
+                                        the parent timeline of
+
+@Return         The parent timeline of the sync checkpoint
+
+*/
+/*****************************************************************************/
+PVRSRV_TIMELINE
+SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetRGXFWIFUFOAddr
+
+@Description    .
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get
+                                        the PRGXFWIF_UFO_ADDR of
+
+@Return         The PRGXFWIF_UFO_ADDR of the sync checkpoint, used when
+                providing the update in server kick code.
+
+*/
+/*****************************************************************************/
+PRGXFWIF_UFO_ADDR*
+SyncCheckpointGetRGXFWIFUFOAddr(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+#endif	/* __SYNC_CHECKPOINT__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/sync_checkpoint_internal_fw.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/sync_checkpoint_internal_fw.h
new file mode 100644
index 0000000..f4810f8
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/include/sync_checkpoint_internal_fw.h
@@ -0,0 +1,66 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services internal synchronisation checkpoint FW obj header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the internal FW object structure for services
+                synchronisation checkpoints.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_CHECKPOINT_INTERNAL_FW_
+#define _SYNC_CHECKPOINT_INTERNAL_FW_
+
+#include "img_types.h"
+
+/* Sync_checkpoint firmware object.
+ * This is the FW-addressable structure use to hold the sync checkpoint's
+ * state and other information which needs to be accessed by the firmware.
+ */
+typedef struct _SYNC_CHECKPOINT_FW_OBJ_
+{
+	IMG_UINT32	ui32State;          /*!< Holds the current state of the sync checkpoint */
+	IMG_UINT32	ui32FwRefCount;     /*!< Holds the FW reference count (num of fences/updates processed) */
+} _SYNC_CHECKPOINT_FW_OBJ;
+
+/* Bit mask Firmware can use to test if a checkpoint has signalled or errored */
+#define SYNC_CHECKPOINT_SIGNALLED_MASK (0x1 << 0)
+
+/* Maximum number of sync checkpoints the firmware supports in one fence */
+#define MAX_SYNC_CHECKPOINTS_PER_FENCE 20
+
+#endif	/* _SYNC_CHECKPOINT_INTERNAL_FW_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/cache_km.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/cache_km.c
new file mode 100644
index 0000000..e284956
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/cache_km.c
@@ -0,0 +1,3208 @@
+/*************************************************************************/ /*!
+@File           cache_km.c
+@Title          CPU d-cache maintenance operations framework
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements server side code for CPU d-cache maintenance taking
+                into account the idiosyncrasies of the various types of CPU
+                d-cache instruction-set architecture (ISA) maintenance
+                mechanisms.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#if defined(LINUX)
+#include <asm/uaccess.h>
+#include <asm/current.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#endif
+
+#include "pmr.h"
+#include "log2.h"
+#include "device.h"
+#include "pvrsrv.h"
+#include "osfunc.h"
+#include "cache_km.h"
+#include "pvr_debug.h"
+#include "lock_types.h"
+#include "allocmem.h"
+#include "process_stats.h"
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+#include "ri_server.h"
+#endif
+#include "devicemem.h"
+#include "pvrsrv_apphint.h"
+#include "pvrsrv_sync_server.h"
+
+/* Top-level file-local build definitions */
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS) && defined(LINUX)
+#define CACHEOP_DEBUG
+#define CACHEOP_STATS_ITEMS_MAX 			32
+#define INCR_WRAP(x)						((x+1) >= CACHEOP_STATS_ITEMS_MAX ? 0 : (x+1))
+#define DECR_WRAP(x)						((x-1) < 0 ? (CACHEOP_STATS_ITEMS_MAX-1) : (x-1))
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+/* Refer to CacheOpStatsExecLogHeader() for header item names */
+#define CACHEOP_RI_PRINTF_HEADER			"%-8s %-10s %-10s %-5s %-16s %-16s %-10s %-10s %-18s %-18s %-12s"
+#define CACHEOP_RI_PRINTF					"%-8d %-10s %-10s %-5s 0x%-14llx 0x%-14llx 0x%-8llx 0x%-8llx %-18llu %-18llu 0x%-10x\n"
+#else
+#define CACHEOP_PRINTF_HEADER				"%-8s %-10s %-10s %-5s %-10s %-10s %-18s %-18s %-12s"
+#define CACHEOP_PRINTF						"%-8d %-10s %-10s %-5s 0x%-8llx 0x%-8llx %-18llu %-18llu 0x%-10x\n"
+#endif
+#endif
+
+//#define CACHEOP_NO_CACHE_LINE_ALIGNED_ROUNDING		/* Force OS page (not cache line) flush granularity */
+#define CACHEOP_THREAD_WAIT_TIMEOUT			500000ULL	/* Wait 500ms between wait unless woken-up on demand */
+#define CACHEOP_FENCE_WAIT_TIMEOUT			1000ULL		/* Wait 1ms between wait events unless woken-up */
+#define CACHEOP_FENCE_RETRY_ABORT			1000ULL		/* Fence retries that aborts fence operation */
+#define CACHEOP_SEQ_MIDPOINT (IMG_UINT32)	0x7FFFFFFF	/* Where seqNum(s) are rebase, compared at */
+#define CACHEOP_ABORT_FENCE_ERROR_STRING	"detected stalled client, retrying cacheop fence"
+#define CACHEOP_NO_GFLUSH_ERROR_STRING		"global flush requested on CPU without support"
+#define CACHEOP_DEVMEM_OOR_ERROR_STRING		"cacheop device memory request is out of range"
+#define CACHEOP_MAX_DEBUG_MESSAGE_LEN		160
+
+typedef struct _CACHEOP_WORK_ITEM_
+{
+	PMR *psPMR;
+	IMG_UINT32 ui32GFSeqNum;
+	IMG_UINT32 ui32OpSeqNum;
+	IMG_DEVMEM_SIZE_T uiSize;
+	PVRSRV_CACHE_OP uiCacheOp;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	PVRSRV_TIMELINE iTimeline;
+	SYNC_TIMELINE_OBJ sSWTimelineObj;
+#if defined(CACHEOP_DEBUG)
+	IMG_UINT64 ui64EnqueuedTime;
+	IMG_UINT64 ui64DequeuedTime;
+	IMG_UINT64 ui64ExecuteTime;
+	IMG_BOOL bDeferred;
+	IMG_BOOL bKMReq;
+	IMG_BOOL bRBF;
+	IMG_BOOL bUMF;
+	IMG_PID pid;
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+	RGXFWIF_DM eFenceOpType;
+#endif
+#endif
+} CACHEOP_WORK_ITEM;
+
+typedef struct _CACHEOP_STATS_EXEC_ITEM_
+{
+	IMG_PID pid;
+	IMG_UINT32 ui32OpSeqNum;
+	PVRSRV_CACHE_OP uiCacheOp;
+	IMG_DEVMEM_SIZE_T uiOffset;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_UINT64 ui64EnqueuedTime;
+	IMG_UINT64 ui64DequeuedTime;
+	IMG_UINT64 ui64ExecuteTime;
+	IMG_BOOL bIsFence;
+	IMG_BOOL bKMReq;
+	IMG_BOOL bRBF;
+	IMG_BOOL bUMF;
+	IMG_BOOL bDeferred;
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+	IMG_DEV_VIRTADDR sDevVAddr;
+	IMG_DEV_PHYADDR sDevPAddr;
+	RGXFWIF_DM eFenceOpType;
+#endif
+} CACHEOP_STATS_EXEC_ITEM;
+
+typedef enum _CACHEOP_CONFIG_
+{
+	CACHEOP_CONFIG_DEFAULT = 0,
+	/* cache flush mechanism types */
+	CACHEOP_CONFIG_KRBF    = 1,
+	CACHEOP_CONFIG_KGF     = 2,
+	CACHEOP_CONFIG_URBF    = 4,
+	/* sw-emulated deferred flush mechanism */
+	CACHEOP_CONFIG_KDF     = 8,
+	/* pseudo configuration items */
+	CACHEOP_CONFIG_LAST    = 16,
+	CACHEOP_CONFIG_KLOG    = 16,
+	CACHEOP_CONFIG_ALL     = 31
+} CACHEOP_CONFIG;
+
+typedef struct _CACHEOP_WORK_QUEUE_
+{
+/*
+ * Init. state & primary device node framework
+ * is anchored on.
+ */
+	IMG_BOOL bInit;
+/*
+  MMU page size/shift & d-cache line size
+ */
+	size_t uiPageSize;
+	IMG_UINT32 uiLineSize;
+	IMG_UINT32 uiLineShift;
+	IMG_UINT32 uiPageShift;
+	PVRSRV_CACHE_OP_ADDR_TYPE uiCacheOpAddrType;
+/*
+  CacheOp deferred queueing protocol
+  + Implementation geared for performance, atomic counter based
+	- Value Space is 0 -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> n.
+	- Index Space is 0 -> 1 -> 2 -> 3 -> 0 -> 1 -> 2 -> 3 -> 0 -> m.
+		- Index = Value modulo CACHEOP_INDICES_LOG2_SIZE.
+  + Write counter never collides with read counter in index space
+	- Unless at start of day when both are initialised to zero.
+	- This means we sacrifice one entry when the queue is full.
+	- Incremented by producer
+		- Value space tracks total number of CacheOps queued.
+		- Index space identifies CacheOp CCB queue index.
+  + Read counter increments towards write counter in value space
+	- Empty queue occurs when read equals write counter.
+	- Wrap-round logic handled by consumer as/when needed.
+	- Incremented by consumer
+		- Value space tracks total # of CacheOps executed.
+		- Index space identifies CacheOp CCB queue index.
+  + Total queued size adjusted up/down during write/read activity
+	- Counter might overflow but does not compromise framework.
+ */
+	ATOMIC_T hReadCounter;
+	ATOMIC_T hWriteCounter;
+/*
+  CacheOp sequence numbers
+  + hCommonSeqNum:
+	- Common sequence, numbers every CacheOp operation in both UM/KM.
+	- In KM
+		- Every deferred CacheOp (on behalf of UM) gets a unique seqNum.
+		- Last executed deferred CacheOp updates gsCwq.hCompletedSeqNum.
+		- Every GF operation (if supported) also gets a unique seqNum.
+		- Last executed GF operation updates CACHEOP_INFO_GFSEQNUM0.
+		- Under debug, all CacheOp gets a unique seqNum for tracking.
+		- This includes all UM/KM synchronous non-deferred CacheOp(s)
+	- In UM
+		- If the processor architecture supports GF maintenance (in KM)
+		- All UM CacheOp samples CACHEOP_INFO_GFSEQNUM0 via info. page.
+		- CacheOp(s) discarded if another GF occurs before execution.
+		- CacheOp(s) discarding happens in both UM and KM space.
+  + hCompletedSeqNum:
+	- Tracks last executed KM/deferred RBF/Global<timeline> CacheOp(s)
+  + hDeferredSize:
+	- Running total of size of currently deferred CacheOp in queue.
+ */
+	ATOMIC_T hDeferredSize;
+	ATOMIC_T hCommonSeqNum;
+	ATOMIC_T hCompletedSeqNum;
+/*
+  CacheOp information page
+  + psInfoPageMemDesc:
+	- Single system-wide OS page that is multi-mapped in UM/KM.
+	- Mapped into clients using read-only memory protection.
+	- Mapped into server using read/write memory protection.
+	- Contains information pertaining to cache framework.
+  + pui32InfoPage:
+	- Server linear address pointer to said information page.
+	- Each info-page entry currently of sizeof(IMG_UINT32).
+ */
+	PMR *psInfoPagePMR;
+	IMG_UINT32 *pui32InfoPage;
+	DEVMEM_MEMDESC *psInfoPageMemDesc;
+/*
+  CacheOp deferred work-item queue
+  + CACHEOP_INDICES_LOG2_SIZE
+	- Sized using GF/RBF ratio
+ */
+#define CACHEOP_INDICES_LOG2_SIZE	(4)
+#define CACHEOP_INDICES_MAX			(1 << CACHEOP_INDICES_LOG2_SIZE)
+#define CACHEOP_INDICES_MASK		(CACHEOP_INDICES_MAX-1)
+	CACHEOP_WORK_ITEM asWorkItems[CACHEOP_INDICES_MAX];
+#if defined(CACHEOP_DEBUG)
+/*
+  CacheOp statistics
+ */
+	void *pvStatsEntry;
+	IMG_HANDLE hStatsExecLock;
+	IMG_UINT32 ui32ServerASync;
+	IMG_UINT32 ui32ServerSyncVA;
+	IMG_UINT32 ui32ServerSync;
+	IMG_UINT32 ui32ServerRBF;
+	IMG_UINT32 ui32ServerGF;
+	IMG_UINT32 ui32ServerDGF;
+	IMG_UINT32 ui32ServerDTL;
+	IMG_UINT32 ui32ClientSync;
+	IMG_UINT32 ui32ClientRBF;
+	IMG_UINT32 ui32KMDiscards;
+	IMG_UINT32 ui32UMDiscards;
+	IMG_UINT32 ui32TotalFenceOps;
+	IMG_UINT32 ui32TotalExecOps;
+	IMG_UINT32 ui32AvgExecTime;
+	IMG_UINT32 ui32AvgFenceTime;
+	IMG_INT32 i32StatsExecWriteIdx;
+	CACHEOP_STATS_EXEC_ITEM asStatsExecuted[CACHEOP_STATS_ITEMS_MAX];
+#endif
+/*
+  CacheOp (re)configuration
+ */
+	void *pvConfigTune;
+	IMG_HANDLE hConfigLock;
+/*
+  CacheOp deferred worker thread
+  + eConfig
+	- Runtime configuration
+  + hWorkerThread
+	- CacheOp thread handler
+  + hThreadWakeUpEvtObj
+	- Event object to drive CacheOp worker thread sleep/wake-ups.
+  + hClientWakeUpEvtObj
+	- Event object to unblock stalled clients waiting on queue.
+  +  uiWorkerThreadPid
+	- CacheOp thread process id
+ */
+	CACHEOP_CONFIG	eConfig;
+	IMG_UINT32		ui32Config;
+	IMG_BOOL		bConfigTuning;
+	IMG_HANDLE		hWorkerThread;
+	IMG_HANDLE 		hDeferredLock;
+	IMG_HANDLE 		hGlobalFlushLock;
+	IMG_PID			uiWorkerThreadPid;
+	IMG_HANDLE		hThreadWakeUpEvtObj;
+	IMG_HANDLE		hClientWakeUpEvtObj;
+	IMG_UINT32		ui32FenceWaitTimeUs;
+	IMG_UINT32		ui32FenceRetryAbort;
+	IMG_BOOL		bNoGlobalFlushImpl;
+	IMG_BOOL		bSupportsUMFlush;
+} CACHEOP_WORK_QUEUE;
+
+/* Top-level CacheOp framework object */
+static CACHEOP_WORK_QUEUE gsCwq = {0};
+
+#define CacheOpConfigSupports(e) ((gsCwq.eConfig & (e)) ? IMG_TRUE : IMG_FALSE)
+
+static INLINE IMG_UINT32 CacheOpIdxRead(ATOMIC_T *phCounter)
+{
+	IMG_UINT32 ui32Idx = OSAtomicRead(phCounter);
+	return ui32Idx & CACHEOP_INDICES_MASK;
+}
+
+static INLINE IMG_UINT32 CacheOpIdxIncrement(ATOMIC_T *phCounter)
+{
+	IMG_UINT32 ui32Idx = OSAtomicIncrement(phCounter);
+	return ui32Idx & CACHEOP_INDICES_MASK;
+}
+
+static INLINE IMG_UINT32 CacheOpIdxNext(ATOMIC_T *phCounter)
+{
+	IMG_UINT32 ui32Idx = OSAtomicRead(phCounter);
+	return ++ui32Idx & CACHEOP_INDICES_MASK;
+}
+
+static INLINE IMG_UINT32 CacheOpIdxSpan(ATOMIC_T *phLhs, ATOMIC_T *phRhs)
+{
+	return OSAtomicRead(phLhs) - OSAtomicRead(phRhs);
+}
+
+static INLINE IMG_UINT64 DivBy10(IMG_UINT64 uiNum)
+{
+	IMG_UINT64 uiQuot;
+	IMG_UINT64 uiRem;
+
+	uiQuot = (uiNum >> 1) + (uiNum >> 2);
+	uiQuot = uiQuot + (uiQuot >> 4);
+	uiQuot = uiQuot + (uiQuot >> 8);
+	uiQuot = uiQuot + (uiQuot >> 16);
+	uiQuot = uiQuot >> 3;
+	uiRem  = uiNum - (((uiQuot << 2) + uiQuot) << 1);
+
+	return uiQuot + (uiRem > 9);
+}
+
+#if defined(CACHEOP_DEBUG)
+static INLINE void CacheOpStatsExecLogHeader(IMG_CHAR szBuffer[CACHEOP_MAX_DEBUG_MESSAGE_LEN])
+{
+	OSSNPrintf(szBuffer, CACHEOP_MAX_DEBUG_MESSAGE_LEN,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+				CACHEOP_RI_PRINTF_HEADER,
+#else
+				CACHEOP_PRINTF_HEADER,
+#endif
+				"Pid",
+				"CacheOp",
+				"  Type",
+				"Mode",
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+				"DevVAddr",
+				"DevPAddr",
+#endif
+				"Offset",
+				"Size",
+				"xTime (us)",
+				"qTime (us)",
+				"SeqNum");
+}
+
+static INLINE void CacheOpStatsExecLogWrite(CACHEOP_WORK_ITEM *psCacheOpWorkItem)
+{
+	IMG_UINT64 ui64ExecuteTime;
+	IMG_UINT64 ui64EnqueuedTime;
+	IMG_INT32 i32WriteOffset;
+
+	if (!psCacheOpWorkItem->ui32OpSeqNum && !psCacheOpWorkItem->uiCacheOp)
+	{
+		/* This breaks the logic of read-out, so we do not queue items
+		   with zero sequence number and no CacheOp */
+		return;
+	}
+	else if (psCacheOpWorkItem->bKMReq && !CacheOpConfigSupports(CACHEOP_CONFIG_KLOG))
+	{
+		/* KM logs spams the history due to frequency, this remove its completely */
+		return;
+	}
+
+	OSLockAcquire(gsCwq.hStatsExecLock);
+
+	i32WriteOffset = gsCwq.i32StatsExecWriteIdx;
+	gsCwq.asStatsExecuted[i32WriteOffset].pid = psCacheOpWorkItem->pid;
+	gsCwq.i32StatsExecWriteIdx = INCR_WRAP(gsCwq.i32StatsExecWriteIdx);
+	gsCwq.asStatsExecuted[i32WriteOffset].bRBF = psCacheOpWorkItem->bRBF;
+	gsCwq.asStatsExecuted[i32WriteOffset].bUMF = psCacheOpWorkItem->bUMF;
+	gsCwq.asStatsExecuted[i32WriteOffset].uiSize = psCacheOpWorkItem->uiSize;
+	gsCwq.asStatsExecuted[i32WriteOffset].bKMReq = psCacheOpWorkItem->bKMReq;
+	gsCwq.asStatsExecuted[i32WriteOffset].uiOffset	= psCacheOpWorkItem->uiOffset;
+	gsCwq.asStatsExecuted[i32WriteOffset].uiCacheOp = psCacheOpWorkItem->uiCacheOp;
+	gsCwq.asStatsExecuted[i32WriteOffset].bDeferred = psCacheOpWorkItem->bDeferred;
+	gsCwq.asStatsExecuted[i32WriteOffset].ui32OpSeqNum	= psCacheOpWorkItem->ui32OpSeqNum;
+	gsCwq.asStatsExecuted[i32WriteOffset].ui64ExecuteTime = psCacheOpWorkItem->ui64ExecuteTime;
+	gsCwq.asStatsExecuted[i32WriteOffset].ui64EnqueuedTime = psCacheOpWorkItem->ui64EnqueuedTime;
+	gsCwq.asStatsExecuted[i32WriteOffset].ui64DequeuedTime = psCacheOpWorkItem->ui64DequeuedTime;
+	/* During early system initialisation, only non-fence & non-PMR CacheOps are processed */
+	gsCwq.asStatsExecuted[i32WriteOffset].bIsFence = gsCwq.bInit && !psCacheOpWorkItem->psPMR;
+	PVR_ASSERT(gsCwq.asStatsExecuted[i32WriteOffset].pid);
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+	if (gsCwq.bInit && psCacheOpWorkItem->psPMR)
+	{
+		IMG_CPU_PHYADDR sDevPAddr;
+		PVRSRV_ERROR eError;
+		IMG_BOOL bValid;
+
+		/* Get more detailed information regarding the sub allocations that
+		   PMR has from RI manager for process that requested the CacheOp */
+		eError = RIDumpProcessListKM(psCacheOpWorkItem->psPMR,
+									 gsCwq.asStatsExecuted[i32WriteOffset].pid,
+									 gsCwq.asStatsExecuted[i32WriteOffset].uiOffset,
+									 &gsCwq.asStatsExecuted[i32WriteOffset].sDevVAddr);
+		if (eError != PVRSRV_OK)
+		{
+			goto e0;
+		}
+
+		/* (Re)lock here as some PMR might have not been locked */
+		eError = PMRLockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+		if (eError != PVRSRV_OK)
+		{
+			goto e0;
+		}
+
+		eError = PMR_CpuPhysAddr(psCacheOpWorkItem->psPMR,
+								 gsCwq.uiPageShift,
+								 1,
+								 gsCwq.asStatsExecuted[i32WriteOffset].uiOffset,
+								 &sDevPAddr,
+								 &bValid);
+		if (eError != PVRSRV_OK)
+		{
+			eError = PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+			PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses");
+			goto e0;
+		}
+
+		eError = PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+		PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses");
+
+		gsCwq.asStatsExecuted[i32WriteOffset].sDevPAddr.uiAddr = sDevPAddr.uiAddr;
+	}
+
+	if (gsCwq.asStatsExecuted[i32WriteOffset].bIsFence)
+	{
+		gsCwq.asStatsExecuted[i32WriteOffset].eFenceOpType = psCacheOpWorkItem->eFenceOpType;
+	}
+#endif
+
+	/* Convert timing from nano-seconds to micro-seconds */
+	ui64ExecuteTime = gsCwq.asStatsExecuted[i32WriteOffset].ui64ExecuteTime;
+	ui64EnqueuedTime = gsCwq.asStatsExecuted[i32WriteOffset].ui64EnqueuedTime;
+	ui64ExecuteTime = DivBy10(DivBy10(DivBy10(ui64ExecuteTime)));
+	ui64EnqueuedTime = DivBy10(DivBy10(DivBy10(ui64EnqueuedTime)));
+
+	/* Coalesced (to global) deferred CacheOps do not contribute to statistics,
+	   as both enqueue/execute time is identical for these CacheOps */
+	if (!gsCwq.asStatsExecuted[i32WriteOffset].bIsFence)
+	{
+		/* Calculate the rolling approximate average execution time */
+		IMG_UINT32 ui32Time = ui64EnqueuedTime < ui64ExecuteTime ?
+									ui64ExecuteTime - ui64EnqueuedTime :
+									ui64EnqueuedTime - ui64ExecuteTime;
+		if (gsCwq.ui32TotalExecOps > 2 && ui32Time)
+		{
+			gsCwq.ui32AvgExecTime -= (gsCwq.ui32AvgExecTime / gsCwq.ui32TotalExecOps);
+			gsCwq.ui32AvgExecTime += (ui32Time / gsCwq.ui32TotalExecOps);
+		}
+		else if (ui32Time)
+		{
+			gsCwq.ui32AvgExecTime = (IMG_UINT32)ui32Time;
+		}
+	}
+
+	if (! gsCwq.asStatsExecuted[i32WriteOffset].bKMReq)
+	{
+		/* This operation queues only UM CacheOp in per-PID process statistics database */
+		PVRSRVStatsUpdateCacheOpStats(gsCwq.asStatsExecuted[i32WriteOffset].uiCacheOp,
+						gsCwq.asStatsExecuted[i32WriteOffset].ui32OpSeqNum,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+						gsCwq.asStatsExecuted[i32WriteOffset].sDevVAddr,
+						gsCwq.asStatsExecuted[i32WriteOffset].sDevPAddr,
+						gsCwq.asStatsExecuted[i32WriteOffset].eFenceOpType,
+#endif
+						gsCwq.asStatsExecuted[i32WriteOffset].uiOffset,
+						gsCwq.asStatsExecuted[i32WriteOffset].uiSize,
+						ui64EnqueuedTime < ui64ExecuteTime ?
+							ui64ExecuteTime - ui64EnqueuedTime:
+							ui64EnqueuedTime - ui64ExecuteTime,
+						gsCwq.asStatsExecuted[i32WriteOffset].bRBF,
+						gsCwq.asStatsExecuted[i32WriteOffset].bUMF,
+						gsCwq.asStatsExecuted[i32WriteOffset].bIsFence,
+						psCacheOpWorkItem->pid);
+	}
+
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+e0:
+#endif
+	OSLockRelease(gsCwq.hStatsExecLock);
+}
+
+static void CacheOpStatsExecLogRead(void *pvFilePtr, void *pvData,
+								OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	IMG_CHAR *pszFlushype;
+	IMG_CHAR *pszCacheOpType;
+	IMG_CHAR *pszFlushSource;
+	IMG_INT32 i32ReadOffset;
+	IMG_INT32 i32WriteOffset;
+	IMG_UINT64 ui64EnqueuedTime;
+	IMG_UINT64 ui64DequeuedTime;
+	IMG_UINT64 ui64ExecuteTime;
+	IMG_CHAR szBuffer[CACHEOP_MAX_DEBUG_MESSAGE_LEN] = {0};
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	OSLockAcquire(gsCwq.hStatsExecLock);
+
+	pfnOSStatsPrintf(pvFilePtr,
+			"Primary CPU d-cache architecture: LSZ: 0x%d, URBF: %s, KGF: %s, KRBF: %s\n",
+			gsCwq.uiLineSize,
+			gsCwq.bSupportsUMFlush ? "Yes" : "No",
+			!gsCwq.bNoGlobalFlushImpl ? "Yes" : "No",
+			"Yes" /* KRBF mechanism always available */
+		);
+
+	pfnOSStatsPrintf(pvFilePtr,
+			"Configuration: QSZ: %d, UKT: %d, KDFT: %d, KGFT: %d, KDF: %s, URBF: %s, KGF: %s, KRBF: %s\n",
+			CACHEOP_INDICES_MAX,
+			gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD],
+			gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD],
+			gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD],
+			gsCwq.eConfig & CACHEOP_CONFIG_KDF  ? "Yes" : "No",
+			gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No",
+			gsCwq.eConfig & CACHEOP_CONFIG_KGF  ? "Yes" : "No",
+			gsCwq.eConfig & CACHEOP_CONFIG_KRBF ? "Yes" : "No"
+		);
+
+	pfnOSStatsPrintf(pvFilePtr,
+			"Summary: OP[F][TL] (tot.avg): %d.%d/%d.%d/%d, [KM][UM][A]SYNC: %d.%d/%d/%d, RBF (um/km): %d/%d, [D]GF (km): %d/%d, DSC (um/km): %d/%d\n",
+			gsCwq.ui32TotalExecOps, gsCwq.ui32AvgExecTime, gsCwq.ui32TotalFenceOps, gsCwq.ui32AvgFenceTime, gsCwq.ui32ServerDTL,
+			gsCwq.ui32ServerSync, gsCwq.ui32ServerSyncVA, gsCwq.ui32ClientSync,	gsCwq.ui32ServerASync,
+			gsCwq.ui32ClientRBF,   gsCwq.ui32ServerRBF,
+			gsCwq.ui32ServerDGF,   gsCwq.ui32ServerGF,
+			gsCwq.ui32UMDiscards,  gsCwq.ui32KMDiscards
+		);
+
+	CacheOpStatsExecLogHeader(szBuffer);
+	pfnOSStatsPrintf(pvFilePtr, "%s\n", szBuffer);
+
+	i32WriteOffset = gsCwq.i32StatsExecWriteIdx;
+	for (i32ReadOffset = DECR_WRAP(i32WriteOffset);
+		 i32ReadOffset != i32WriteOffset;
+		 i32ReadOffset = DECR_WRAP(i32ReadOffset))
+	{
+		if (!gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum &&
+			!gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp)
+		{
+			break;
+		}
+
+		/* Convert from nano-seconds to micro-seconds */
+		ui64ExecuteTime = gsCwq.asStatsExecuted[i32ReadOffset].ui64ExecuteTime;
+		ui64EnqueuedTime = gsCwq.asStatsExecuted[i32ReadOffset].ui64EnqueuedTime;
+		ui64DequeuedTime = gsCwq.asStatsExecuted[i32ReadOffset].ui64DequeuedTime;
+		ui64ExecuteTime = DivBy10(DivBy10(DivBy10(ui64ExecuteTime)));
+		ui64EnqueuedTime = DivBy10(DivBy10(DivBy10(ui64EnqueuedTime)));
+		ui64DequeuedTime = ui64DequeuedTime ? DivBy10(DivBy10(DivBy10(ui64DequeuedTime))) : 0;
+
+		if (gsCwq.asStatsExecuted[i32ReadOffset].bIsFence)
+		{
+			IMG_CHAR *pszMode = "";
+			IMG_CHAR *pszFenceType = "";
+			pszCacheOpType = "Fence";
+
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+			pszMode = gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp != PVRSRV_CACHE_OP_GLOBAL ? "" : "  GF  ";
+			switch (gsCwq.asStatsExecuted[i32ReadOffset].eFenceOpType)
+			{
+				case RGXFWIF_DM_GP:
+					pszFenceType = " GP/GF";
+					break;
+
+				case RGXFWIF_DM_TDM:
+					pszFenceType = "  TDM ";
+					break;
+
+				case RGXFWIF_DM_TA:
+					pszFenceType = "  TA ";
+					break;
+
+				case RGXFWIF_DM_3D:
+					pszFenceType = "  PDM ";
+					break;
+
+				case RGXFWIF_DM_CDM:
+					pszFenceType = "  CDM ";
+					break;
+
+				case RGXFWIF_DM_RTU:
+					pszFenceType = "  RTU ";
+					break;
+
+				case RGXFWIF_DM_SHG:
+					pszFenceType = "  SHG ";
+					break;
+
+				default:
+					PVR_ASSERT(0);
+					break;
+			}
+#else
+			/* The CacheOp fence operation also triggered a global cache flush operation */
+			pszFenceType =
+				gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp != PVRSRV_CACHE_OP_GLOBAL ? "" : "   GF ";
+#endif
+			pfnOSStatsPrintf(pvFilePtr,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+							CACHEOP_RI_PRINTF,
+#else
+							CACHEOP_PRINTF,
+#endif
+							gsCwq.asStatsExecuted[i32ReadOffset].pid,
+							pszCacheOpType,
+							pszFenceType,
+							pszMode,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+							"",
+							"",
+#endif
+							gsCwq.asStatsExecuted[i32ReadOffset].uiOffset,
+							gsCwq.asStatsExecuted[i32ReadOffset].uiSize,
+							ui64EnqueuedTime < ui64ExecuteTime ?
+									ui64ExecuteTime - ui64EnqueuedTime
+										:
+									ui64EnqueuedTime - ui64ExecuteTime,
+							ui64EnqueuedTime < ui64DequeuedTime ?
+									ui64DequeuedTime - ui64EnqueuedTime
+										:
+									!ui64DequeuedTime ? 0 : ui64EnqueuedTime - ui64DequeuedTime,
+							gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum);
+		}
+		else
+		{
+			if (gsCwq.asStatsExecuted[i32ReadOffset].bRBF)
+			{
+				IMG_DEVMEM_SIZE_T ui64NumOfPages;
+
+				ui64NumOfPages = gsCwq.asStatsExecuted[i32ReadOffset].uiSize >> gsCwq.uiPageShift;
+				if (ui64NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC)
+				{
+					pszFlushype = "RBF.Fast";
+				}
+				else
+				{
+					pszFlushype = "RBF.Slow";
+				}
+			}
+			else
+			{
+				pszFlushype = "   GF ";
+			}
+
+			if (gsCwq.asStatsExecuted[i32ReadOffset].bUMF)
+			{
+				pszFlushSource = " UM";
+			}
+			else
+			{
+				/*
+				   - Request originates directly from a KM thread or in KM (KM<), or
+				   - Request originates from a UM thread and is KM deferred (KM+), or
+				   - Request is/was discarded due to an 'else-[when,where]' GFlush
+				     - i.e. GF occurs either (a)sync to current UM/KM thread
+				*/
+				pszFlushSource =
+					gsCwq.asStatsExecuted[i32ReadOffset].bKMReq ? " KM<" :
+					gsCwq.asStatsExecuted[i32ReadOffset].bDeferred && gsCwq.asStatsExecuted[i32ReadOffset].ui64ExecuteTime ? " KM+" :
+					!gsCwq.asStatsExecuted[i32ReadOffset].ui64ExecuteTime ? " KM-" : " KM";
+			}
+
+			switch (gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp)
+			{
+				case PVRSRV_CACHE_OP_NONE:
+					pszCacheOpType = "None";
+					break;
+				case PVRSRV_CACHE_OP_CLEAN:
+					pszCacheOpType = "Clean";
+					break;
+				case PVRSRV_CACHE_OP_INVALIDATE:
+					pszCacheOpType = "Invalidate";
+					break;
+				case PVRSRV_CACHE_OP_FLUSH:
+					pszCacheOpType = "Flush";
+					break;
+				case PVRSRV_CACHE_OP_GLOBAL:
+					pszCacheOpType = "GFlush";
+					break;
+				case PVRSRV_CACHE_OP_TIMELINE:
+					pszCacheOpType = "Timeline";
+					pszFlushype = "      ";
+					break;
+				default:
+					if ((IMG_UINT32)gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp == (IMG_UINT32)(PVRSRV_CACHE_OP_GLOBAL|PVRSRV_CACHE_OP_TIMELINE))
+					{
+						pszCacheOpType = "Timeline";
+					}
+					else
+					{
+						pszCacheOpType = "Unknown";
+						gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum =
+								(IMG_UINT32) gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp;
+					}
+					break;
+			}
+
+			pfnOSStatsPrintf(pvFilePtr,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+							CACHEOP_RI_PRINTF,
+#else
+							CACHEOP_PRINTF,
+#endif
+							gsCwq.asStatsExecuted[i32ReadOffset].pid,
+							pszCacheOpType,
+							pszFlushype,
+							pszFlushSource,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+							gsCwq.asStatsExecuted[i32ReadOffset].sDevVAddr.uiAddr,
+							gsCwq.asStatsExecuted[i32ReadOffset].sDevPAddr.uiAddr,
+#endif
+							gsCwq.asStatsExecuted[i32ReadOffset].uiOffset,
+							gsCwq.asStatsExecuted[i32ReadOffset].uiSize,
+							ui64EnqueuedTime < ui64ExecuteTime ?
+										ui64ExecuteTime - ui64EnqueuedTime
+											:
+										ui64EnqueuedTime - ui64ExecuteTime,
+							ui64EnqueuedTime < ui64DequeuedTime ?
+									ui64DequeuedTime - ui64EnqueuedTime
+										:
+									!ui64DequeuedTime ? 0 : ui64EnqueuedTime - ui64DequeuedTime,
+							gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum);
+		}
+	}
+
+	OSLockRelease(gsCwq.hStatsExecLock);
+}
+#endif /* defined(CACHEOP_DEBUG) */
+
+static void CacheOpStatsReset(void)
+{
+#if defined(CACHEOP_DEBUG)
+	gsCwq.ui32KMDiscards    = 0;
+	gsCwq.ui32UMDiscards    = 0;
+	gsCwq.ui32TotalExecOps  = 0;
+	gsCwq.ui32TotalFenceOps = 0;
+	gsCwq.ui32AvgExecTime   = 0;
+	gsCwq.ui32AvgFenceTime  = 0;
+	gsCwq.ui32ClientRBF     = 0;
+	gsCwq.ui32ClientSync    = 0;
+	gsCwq.ui32ServerRBF     = 0;
+	gsCwq.ui32ServerASync   = 0;
+	gsCwq.ui32ServerSyncVA   = 0;
+	gsCwq.ui32ServerSync    = 0;
+	gsCwq.ui32ServerGF      = 0;
+	gsCwq.ui32ServerDGF     = 0;
+	gsCwq.ui32ServerDTL     = 0;
+	gsCwq.i32StatsExecWriteIdx = 0;
+	OSCachedMemSet(gsCwq.asStatsExecuted, 0, sizeof(gsCwq.asStatsExecuted));
+#endif
+}
+
+static void CacheOpConfigUpdate(IMG_UINT32 ui32Config)
+{
+	OSLockAcquire(gsCwq.hConfigLock);
+
+	/* Step 0, set the gsCwq.eConfig bits */
+	if (!(ui32Config & (CACHEOP_CONFIG_LAST - 1)))
+	{
+		gsCwq.bConfigTuning = IMG_FALSE;
+		gsCwq.eConfig = CACHEOP_CONFIG_KRBF | CACHEOP_CONFIG_KDF;
+		if (! gsCwq.bNoGlobalFlushImpl)
+		{
+			gsCwq.eConfig |= CACHEOP_CONFIG_KGF;
+		}
+		if (gsCwq.bSupportsUMFlush)
+		{
+			gsCwq.eConfig |= CACHEOP_CONFIG_URBF;
+		}
+	}
+	else
+	{
+		gsCwq.bConfigTuning = IMG_TRUE;
+
+		if (ui32Config & CACHEOP_CONFIG_KRBF)
+		{
+			gsCwq.eConfig |= CACHEOP_CONFIG_KRBF;
+		}
+		else
+		{
+			gsCwq.eConfig &= ~CACHEOP_CONFIG_KRBF;
+		}
+
+		if (ui32Config & CACHEOP_CONFIG_KDF)
+		{
+			gsCwq.eConfig |= CACHEOP_CONFIG_KDF;
+		}
+		else
+		{
+			gsCwq.eConfig &= ~CACHEOP_CONFIG_KDF;
+		}
+
+		if (!gsCwq.bNoGlobalFlushImpl && (ui32Config & CACHEOP_CONFIG_KGF))
+		{
+			gsCwq.eConfig |= CACHEOP_CONFIG_KGF;
+		}
+		else
+		{
+			gsCwq.eConfig &= ~CACHEOP_CONFIG_KGF;
+		}
+
+		if (gsCwq.bSupportsUMFlush && (ui32Config & CACHEOP_CONFIG_URBF))
+		{
+			gsCwq.eConfig |= CACHEOP_CONFIG_URBF;
+		}
+		else
+		{
+			gsCwq.eConfig &= ~CACHEOP_CONFIG_URBF;
+		}
+	}
+
+	if (ui32Config & CACHEOP_CONFIG_KLOG)
+	{
+		/* Suppress logs from KM caller */
+		gsCwq.eConfig |= CACHEOP_CONFIG_KLOG;
+	}
+	else
+	{
+		gsCwq.eConfig &= ~CACHEOP_CONFIG_KLOG;
+	}
+
+	/* Step 1, set gsCwq.ui32Config based on gsCwq.eConfig */
+	ui32Config = 0;
+	if (gsCwq.eConfig & CACHEOP_CONFIG_KRBF)
+	{
+		ui32Config |= CACHEOP_CONFIG_KRBF;
+	}
+	if (gsCwq.eConfig & CACHEOP_CONFIG_KDF)
+	{
+		ui32Config |= CACHEOP_CONFIG_KDF;
+	}
+	if (gsCwq.eConfig & CACHEOP_CONFIG_KGF)
+	{
+		ui32Config |= CACHEOP_CONFIG_KGF;
+	}
+	if (gsCwq.eConfig & CACHEOP_CONFIG_URBF)
+	{
+		ui32Config |= CACHEOP_CONFIG_URBF;
+	}
+	if (gsCwq.eConfig & CACHEOP_CONFIG_KLOG)
+	{
+		ui32Config |= CACHEOP_CONFIG_KLOG;
+	}
+	gsCwq.ui32Config = ui32Config;
+
+	/* Step 2, Bar RBF promotion to GF, unless a GF is implemented */
+	gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD] = (IMG_UINT32)~0;
+	if (! gsCwq.bNoGlobalFlushImpl)
+	{
+		gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD] = (IMG_UINT32)PVR_DIRTY_BYTES_FLUSH_THRESHOLD;
+	}
+
+	/* Step 3, in certain cases where a CacheOp/VA is provided, this threshold determines at what point
+	   the optimisation due to the presence of said VA (i.e. us not having to remap the PMR pages in KM)
+	   is clawed-back because of the overhead of maintaining such large request which might stalls the
+	   user thread; so to hide this latency have these CacheOps executed on deferred CacheOp thread */
+	gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] = (IMG_UINT32)(PVR_DIRTY_BYTES_FLUSH_THRESHOLD >> 2);
+
+	/* Step 4, if no UM support, all requests are done in KM so zero these forcing all client requests
+	   to come down into the KM for maintenance */
+	gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = (IMG_UINT32)0;
+	gsCwq.pui32InfoPage[CACHEOP_INFO_UMRBFONLY] = 0;
+	if (gsCwq.bSupportsUMFlush)
+	{
+		/* If URBF has been selected exclusively OR selected but there is no GF implementation */
+		if ((gsCwq.eConfig & CACHEOP_CONFIG_URBF) &&
+			(gsCwq.bNoGlobalFlushImpl || !((gsCwq.ui32Config & (CACHEOP_CONFIG_LAST-1)) & ~CACHEOP_CONFIG_URBF)))
+		{
+			/* If only URBF has been selected, simulate without GF support OR no GF support means all client
+			   requests should be done in UM. In both cases, set this threshold to the highest value to
+			   prevent any client requests coming down to the server for maintenance. */
+			gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = (IMG_UINT32)~0;
+			gsCwq.pui32InfoPage[CACHEOP_INFO_UMRBFONLY] = 1;
+		}
+		/* This is the default entry for setting the UM info. page entries */
+		else if ((gsCwq.eConfig & CACHEOP_CONFIG_URBF) && !gsCwq.bNoGlobalFlushImpl)
+		{
+			/* Set UM/KM threshold, all request sizes above this goes to server for GF maintenance _only_
+			   because UM flushes already have VA acquired, no cost is incurred in per-page (re)mapping
+			   of the to-be maintained PMR/page(s) as it the case with KM flushing so disallow KDF */
+#if defined(ARM64) || defined(__aarch64__) || defined(__arm64__)
+			/* This value is set to be higher for ARM64 due to a very optimised UM flush implementation */
+			gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD] << 5;
+#else
+			/* For others, assume an average UM flush performance, anything above should be promoted to GF.
+			   For x86 UMA/LMA, we avoid KDF because remapping PMR/pages in KM might fail due to exhausted
+			   or fragmented VMALLOC kernel VA space */
+			gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD];
+#endif
+			gsCwq.pui32InfoPage[CACHEOP_INFO_UMRBFONLY] = 0;
+		}
+	}
+
+	/* Step 5, reset stats. */
+	CacheOpStatsReset();
+
+	OSLockRelease(gsCwq.hConfigLock);
+}
+
+static void CacheOpConfigRead(void *pvFilePtr, void *pvData,
+							OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	PVR_UNREFERENCED_PARAMETER(pvData);
+	pfnOSStatsPrintf(pvFilePtr,
+			"KDF: %s, URBF: %s, KGF: %s, KRBF: %s\n",
+			gsCwq.eConfig & CACHEOP_CONFIG_KDF  ? "Yes" : "No",
+			gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No",
+			gsCwq.eConfig & CACHEOP_CONFIG_KGF  ? "Yes" : "No",
+			gsCwq.eConfig & CACHEOP_CONFIG_KRBF ? "Yes" : "No"
+		);
+}
+
+static PVRSRV_ERROR CacheOpConfigQuery(const PVRSRV_DEVICE_NODE *psDevNode,
+									   const void *psPrivate,
+									   IMG_UINT32 *pui32Value)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+	*pui32Value = gsCwq.ui32Config;
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR CacheOpConfigSet(const PVRSRV_DEVICE_NODE *psDevNode,
+									const void *psPrivate,
+									IMG_UINT32 ui32Value)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+	CacheOpConfigUpdate(ui32Value & CACHEOP_CONFIG_ALL);
+	return PVRSRV_OK;
+}
+
+static INLINE void CacheOpQItemRecycle(CACHEOP_WORK_ITEM *psCacheOpWorkItem)
+{
+	PVRSRV_ERROR eError;
+	eError = PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+	PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses");
+	/* Set to max as precaution should recycling this CacheOp index fail
+	   to reset it, this is purely to safe-guard having to discard such
+	   subsequent deferred CacheOps or signal the sw sync timeline */
+	psCacheOpWorkItem->iTimeline = PVRSRV_NO_UPDATE_TIMELINE_REQUIRED;
+	psCacheOpWorkItem->ui32GFSeqNum = (IMG_UINT32)~0;
+	psCacheOpWorkItem->ui32OpSeqNum = (IMG_UINT32)~0;
+#if defined(CACHEOP_DEBUG)
+	psCacheOpWorkItem->psPMR = (void *)(uintptr_t)~0;
+#endif
+}
+
+static INLINE void CacheOpQItemReadCheck(CACHEOP_WORK_ITEM *psCacheOpWorkItem)
+{
+#if defined(CACHEOP_DEBUG)
+	PVR_ASSERT(psCacheOpWorkItem->psPMR);
+	PVR_ASSERT(psCacheOpWorkItem->psPMR != (void *)(uintptr_t)~0);
+	PVR_ASSERT(psCacheOpWorkItem->ui32OpSeqNum != (IMG_UINT32)~0);
+	if (CacheOpConfigSupports(CACHEOP_CONFIG_KGF))
+	{
+		PVR_ASSERT(psCacheOpWorkItem->ui32GFSeqNum != (IMG_UINT32)~0);
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(psCacheOpWorkItem);
+#endif
+}
+
+static INLINE void CacheOpQItemWriteCheck(CACHEOP_WORK_ITEM *psCacheOpWorkItem)
+{
+#if defined(CACHEOP_DEBUG)
+	PVR_ASSERT(psCacheOpWorkItem->psPMR == (void *)(uintptr_t)~0);
+	PVR_ASSERT(psCacheOpWorkItem->ui32OpSeqNum == (IMG_UINT32)~0);
+	PVR_ASSERT(psCacheOpWorkItem->ui32GFSeqNum == (IMG_UINT32)~0);
+	PVR_ASSERT(psCacheOpWorkItem->iTimeline == PVRSRV_NO_UPDATE_TIMELINE_REQUIRED);
+#else
+	PVR_UNREFERENCED_PARAMETER(psCacheOpWorkItem);
+#endif
+}
+
+static INLINE IMG_UINT32 CacheOpGetNextCommonSeqNum(void)
+{
+	IMG_UINT32 ui32SeqNum = OSAtomicIncrement(&gsCwq.hCommonSeqNum);
+	if (! ui32SeqNum)
+	{
+		/* Zero is _not_ a valid sequence value, doing so simplifies _all_
+		   subsequent fence checking when no cache maintenance operation
+		   is outstanding as in this case a fence value of zero is supplied. */
+		if (CacheOpConfigSupports(CACHEOP_CONFIG_KGF))
+		{
+			/* Also when seqNum wraps around/crosses zero, this requires us to
+			   ensure that GFSEQNUM is not erroneously higher than any/all client
+			   seqNum(s) in the system during this wrap-around transition so we
+			   disable both momentarily until the next GF comes along. This has
+			   the effect that all subsequent in-flight discards using ">" is
+			   never true seeing zero is _not_ greater than anything and all
+			   "<=" comparison are always true seeing zero is always less than
+			   all non-zero integers. The additional GF here is done mostly to
+			   account for race condition(s) during this transition for all
+			   pending seqNum(s) that are still behind zero. */
+			gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] = 0;
+			gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM1] = 0;
+			ui32SeqNum = OSAtomicIncrement(&gsCwq.hCommonSeqNum);
+			(void) OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+		}
+		else
+		{
+			ui32SeqNum = OSAtomicIncrement(&gsCwq.hCommonSeqNum);
+		}
+	}
+	return ui32SeqNum;
+}
+
+static INLINE PVRSRV_ERROR CacheOpGlobalFlush(void)
+{
+#if !defined(CACHEFLUSH_ISA_SUPPORTS_GLOBAL_FLUSH)
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#else
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32OpSeqNum = gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+
+	if (! CacheOpConfigSupports(CACHEOP_CONFIG_KGF))
+	{
+		return PVRSRV_ERROR_NOT_SUPPORTED;
+	}
+
+	OSLockAcquire(gsCwq.hGlobalFlushLock);
+	if (ui32OpSeqNum < gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0])
+	{
+#if defined(CACHEOP_DEBUG)
+		gsCwq.ui32KMDiscards += 1;
+#endif
+		goto exit;
+	}
+
+	/* User space sampling the information-page seqNumbers after this point
+	   and before the corresponding GFSEQNUM0 update leads to an invalid
+	   sampling which must be discarded by UM. This implements a lockless
+	   critical region for a single KM(writer) & multiple UM/KM(readers) */
+	ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+	gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM1] = ui32OpSeqNum;
+
+	eError = OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+	PVR_LOGR_IF_ERROR(eError, "OSCPUOperation(PVRSRV_CACHE_OP_FLUSH)");
+
+	gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] = ui32OpSeqNum;
+	OSAtomicWrite(&gsCwq.hDeferredSize, 0);
+#if defined(CACHEOP_DEBUG)
+	gsCwq.ui32ServerGF += 1;
+#endif
+
+exit:
+	OSLockRelease(gsCwq.hGlobalFlushLock);
+	return PVRSRV_OK;
+#endif
+}
+
+static INLINE void CacheOpExecRangeBased(PVRSRV_DEVICE_NODE *psDevNode,
+										PVRSRV_CACHE_OP uiCacheOp,
+										IMG_BYTE *pbCpuVirtAddr,
+										IMG_CPU_PHYADDR sCpuPhyAddr,
+										IMG_DEVMEM_OFFSET_T uiPgAlignedOffset,
+										IMG_DEVMEM_OFFSET_T uiCLAlignedStartOffset,
+										IMG_DEVMEM_OFFSET_T uiCLAlignedEndOffset)
+{
+	IMG_BYTE *pbCpuVirtAddrEnd;
+	IMG_BYTE *pbCpuVirtAddrStart;
+	IMG_CPU_PHYADDR sCpuPhyAddrEnd;
+	IMG_CPU_PHYADDR sCpuPhyAddrStart;
+	IMG_DEVMEM_SIZE_T uiRelFlushSize;
+	IMG_DEVMEM_OFFSET_T uiRelFlushOffset;
+	IMG_DEVMEM_SIZE_T uiNextPgAlignedOffset;
+
+	/* These quantities allows us to perform cache operations
+	   at cache-line granularity thereby ensuring we do not
+	   perform more than is necessary */
+	PVR_ASSERT(uiPgAlignedOffset < uiCLAlignedEndOffset);
+	uiRelFlushSize = (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize;
+	uiRelFlushOffset = 0;
+
+	if (uiCLAlignedStartOffset > uiPgAlignedOffset)
+	{
+		/* Zero unless initially starting at an in-page offset */
+		uiRelFlushOffset = uiCLAlignedStartOffset - uiPgAlignedOffset;
+		uiRelFlushSize -= uiRelFlushOffset;
+	}
+
+	/* uiRelFlushSize is gsCwq.uiPageSize unless current outstanding CacheOp
+	   size is smaller. The 1st case handles in-page CacheOp range and
+	   the 2nd case handles multiple-page CacheOp range with a last
+	   CacheOp size that is less than gsCwq.uiPageSize */
+	uiNextPgAlignedOffset = uiPgAlignedOffset + (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize;
+	if (uiNextPgAlignedOffset < uiPgAlignedOffset)
+	{
+		/* uiNextPgAlignedOffset is greater than uiCLAlignedEndOffset
+		   by implication of this wrap-round; this only happens when
+		   uiPgAlignedOffset is the last page aligned offset */
+		uiRelFlushSize = uiRelFlushOffset ?
+				uiCLAlignedEndOffset - uiCLAlignedStartOffset :
+				uiCLAlignedEndOffset - uiPgAlignedOffset;
+	}
+	else
+	{
+		if (uiNextPgAlignedOffset > uiCLAlignedEndOffset)
+		{
+			uiRelFlushSize = uiRelFlushOffset ?
+					uiCLAlignedEndOffset - uiCLAlignedStartOffset :
+					uiCLAlignedEndOffset - uiPgAlignedOffset;
+		}
+	}
+
+	/* More efficient to request cache maintenance operation for full
+	   relative range as opposed to multiple cache-aligned ranges */
+	sCpuPhyAddrStart.uiAddr = sCpuPhyAddr.uiAddr + uiRelFlushOffset;
+	sCpuPhyAddrEnd.uiAddr = sCpuPhyAddrStart.uiAddr + uiRelFlushSize;
+	if (pbCpuVirtAddr)
+	{
+		pbCpuVirtAddrStart = pbCpuVirtAddr + uiRelFlushOffset;
+		pbCpuVirtAddrEnd = pbCpuVirtAddrStart + uiRelFlushSize;
+	}
+	else
+	{
+		/* Some OS/Env layer support functions expect NULL(s) */
+		pbCpuVirtAddrStart = NULL;
+		pbCpuVirtAddrEnd = NULL;
+	}
+
+	/* Perform requested CacheOp on the CPU data cache for successive cache
+	   line worth of bytes up to page or in-page cache-line boundary */
+	switch (uiCacheOp)
+	{
+		case PVRSRV_CACHE_OP_CLEAN:
+			OSCPUCacheCleanRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd,
+									sCpuPhyAddrStart, sCpuPhyAddrEnd);
+			break;
+		case PVRSRV_CACHE_OP_INVALIDATE:
+			OSCPUCacheInvalidateRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd,
+									sCpuPhyAddrStart, sCpuPhyAddrEnd);
+			break;
+		case PVRSRV_CACHE_OP_FLUSH:
+			OSCPUCacheFlushRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd,
+									sCpuPhyAddrStart, sCpuPhyAddrEnd);
+			break;
+		default:
+			PVR_DPF((PVR_DBG_ERROR,	"%s: Invalid cache operation type %d",
+					__FUNCTION__, uiCacheOp));
+			break;
+	}
+
+#if defined(CACHEOP_DEBUG)
+	/* Tracks the number of kernel-mode cacheline maintenance instructions */
+	gsCwq.ui32ServerRBF += (uiRelFlushSize & ((IMG_DEVMEM_SIZE_T)~(gsCwq.uiLineSize - 1))) >> gsCwq.uiLineShift;
+#endif
+}
+
+static INLINE void CacheOpExecRangeBasedVA(PVRSRV_DEVICE_NODE *psDevNode,
+										 IMG_CPU_VIRTADDR pvAddress,
+										 IMG_DEVMEM_SIZE_T uiSize,
+										 PVRSRV_CACHE_OP uiCacheOp)
+{
+	IMG_CPU_PHYADDR sCpuPhyAddrUnused = {(uintptr_t)0xCAFEF00DDEADBEEF};
+	IMG_BYTE *pbEnd = (IMG_BYTE*)((uintptr_t)pvAddress + (uintptr_t)uiSize);
+	IMG_BYTE *pbStart = (IMG_BYTE*)((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiLineSize-1));
+
+	/*
+	  If the start/end address isn't aligned to cache line size, round it up to the
+	  nearest multiple; this ensures that we flush all the cache lines affected by
+	  unaligned start/end addresses.
+	 */
+	pbEnd = (IMG_BYTE *) PVR_ALIGN((uintptr_t)pbEnd, (uintptr_t)gsCwq.uiLineSize);
+	switch (uiCacheOp)
+	{
+		case PVRSRV_CACHE_OP_CLEAN:
+			OSCPUCacheCleanRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused);
+			break;
+		case PVRSRV_CACHE_OP_INVALIDATE:
+			OSCPUCacheInvalidateRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused);
+			break;
+		case PVRSRV_CACHE_OP_FLUSH:
+			OSCPUCacheFlushRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused);
+			break;
+		default:
+			PVR_DPF((PVR_DBG_ERROR,	"%s: Invalid cache operation type %d", __FUNCTION__, uiCacheOp));
+			break;
+	}
+
+#if defined(CACHEOP_DEBUG)
+	/* Tracks the number of kernel-mode cacheline maintenance instructions */
+	gsCwq.ui32ServerRBF += (uiSize & ((IMG_DEVMEM_SIZE_T)~(gsCwq.uiLineSize - 1))) >> gsCwq.uiLineShift;
+#endif
+}
+
+static IMG_CPU_VIRTADDR CacheOpValidateVAOffset(PMR *psPMR,
+												IMG_CPU_VIRTADDR pvAddress,
+												IMG_DEVMEM_OFFSET_T uiOffset,
+												IMG_DEVMEM_SIZE_T uiSize)
+{
+#if defined(LINUX) && !defined(CACHEFLUSH_NO_KMRBF_USING_UMVA)
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+#endif
+
+	if (! pvAddress)
+	{
+		return NULL;
+	}
+
+#if !defined(LINUX) || defined(CACHEFLUSH_NO_KMRBF_USING_UMVA)
+	pvAddress = NULL;
+#else
+	/* Offset VA, validate UM/VA, skip all KM/VA as it's pre-validated */
+	pvAddress = (void*)(uintptr_t)((uintptr_t)pvAddress + uiOffset);
+	if (access_ok(VERIFY_READ, pvAddress, uiSize))
+	{
+		down_read(&mm->mmap_sem);
+		vma = find_vma(mm, (unsigned long)(uintptr_t)pvAddress);
+		if (!vma ||
+			vma->vm_start > (unsigned long)(uintptr_t)pvAddress ||
+			vma->vm_end - vma->vm_start > (unsigned long)(uintptr_t)uiSize)
+		{
+			/* Out of range mm_struct->vm_area VA */
+			pvAddress = NULL;
+		}
+		else if (vma->vm_private_data != psPMR)
+		{
+			/*
+			   Unknown mm_struct->vm_area VA, can't risk dcache maintenance using
+			   this VA as the client user space mapping could be removed without
+			   us knowing which might induce CPU fault during cache maintenance.
+			*/
+			pvAddress = NULL;
+		}
+		up_read(&mm->mmap_sem);
+	}
+	/* Fail if access is not OK and the supplied address is a client user space VA */
+	else if ((IMG_UINT64)(uintptr_t)pvAddress <= OSGetCurrentProcessVASpaceSize())
+	{
+		pvAddress = NULL;
+	}
+#endif
+
+	return pvAddress;
+}
+
+static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR,
+									IMG_CPU_VIRTADDR pvAddress,
+									IMG_DEVMEM_OFFSET_T uiOffset,
+									IMG_DEVMEM_SIZE_T uiSize,
+									PVRSRV_CACHE_OP uiCacheOp,
+									IMG_UINT32 ui32GFlushSeqNum,
+									IMG_BOOL bIsRequestValidated,
+									IMG_BOOL *pbUsedGlobalFlush)
+{
+	IMG_HANDLE hPrivOut;
+	IMG_BOOL bPMRIsSparse;
+	IMG_UINT32 ui32PageIndex;
+	IMG_UINT32 ui32NumOfPages;
+	IMG_DEVMEM_SIZE_T uiOutSize;
+	PVRSRV_DEVICE_NODE *psDevNode;
+	IMG_DEVMEM_SIZE_T uiPgAlignedSize;
+	IMG_DEVMEM_OFFSET_T uiPgAlignedOffset;
+	IMG_DEVMEM_OFFSET_T uiCLAlignedEndOffset;
+	IMG_DEVMEM_OFFSET_T uiPgAlignedEndOffset;
+	IMG_DEVMEM_OFFSET_T uiCLAlignedStartOffset;
+	IMG_DEVMEM_OFFSET_T uiPgAlignedStartOffset;
+	IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_CPU_PHYADDR asCpuPhyAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_CPU_PHYADDR *psCpuPhyAddr = asCpuPhyAddr;
+	IMG_BOOL bIsPMRInfoValid = IMG_FALSE;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_BYTE *pbCpuVirtAddr = NULL;
+	IMG_BOOL *pbValid = abValid;
+
+	if (uiCacheOp == PVRSRV_CACHE_OP_NONE || uiCacheOp == PVRSRV_CACHE_OP_TIMELINE)
+	{
+		return PVRSRV_OK;
+	}
+	/* Some cache ISA(s) supports privilege kernel-mode global flush, if it fails then fall-back to KRBF */
+	else if (uiCacheOp == PVRSRV_CACHE_OP_GLOBAL || uiSize >= gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD])
+	{
+		if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > ui32GFlushSeqNum)
+		{
+			*pbUsedGlobalFlush = IMG_FALSE;
+#if defined(CACHEOP_DEBUG)
+			gsCwq.ui32KMDiscards += 1;
+#endif
+			return PVRSRV_OK;
+		}
+		else
+		{
+			eError = CacheOpGlobalFlush();
+			if (eError == PVRSRV_OK)
+			{
+				*pbUsedGlobalFlush = IMG_TRUE;
+				return PVRSRV_OK;
+			}
+			else if (uiCacheOp == PVRSRV_CACHE_OP_GLOBAL)
+			{
+				/* Cannot fall-back to KRBF as an explicit KGF was erroneously requested for */
+				*pbUsedGlobalFlush = IMG_FALSE;
+				PVR_ASSERT(eError == PVRSRV_OK);
+				PVR_LOGR_IF_ERROR(eError, CACHEOP_NO_GFLUSH_ERROR_STRING);
+			}
+		}
+	}
+	else if (! uiSize)
+	{
+		/* GF are queued with !size, so check for GF first before !size */
+		return PVRSRV_OK;
+	}
+
+	if (! bIsRequestValidated)
+	{
+		IMG_DEVMEM_SIZE_T uiLogicalSize;
+
+		/* Need to validate parameters before proceeding */
+		eError = PMR_LogicalSize(psPMR, &uiLogicalSize);
+		PVR_LOGR_IF_ERROR(eError, "PMR_LogicalSize");
+
+		PVR_LOGR_IF_FALSE(((uiOffset+uiSize) <= uiLogicalSize), CACHEOP_DEVMEM_OOR_ERROR_STRING, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE);
+
+		eError = PMRLockSysPhysAddresses(psPMR);
+		PVR_LOGR_IF_ERROR(eError, "PMRLockSysPhysAddresses");
+	}
+
+	/* Note we're using KRBF Flush */
+	*pbUsedGlobalFlush = IMG_FALSE;
+
+	/* Fast track if VA is provided and CPU ISA supports VA only maintenance */
+	pvAddress = CacheOpValidateVAOffset(psPMR, pvAddress, uiOffset, uiSize);
+	if (pvAddress && gsCwq.uiCacheOpAddrType == PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+	{
+		CacheOpExecRangeBasedVA(PMR_DeviceNode(psPMR), pvAddress, uiSize, uiCacheOp);
+		if (! bIsRequestValidated)
+		{
+			eError = PMRUnlockSysPhysAddresses(psPMR);
+			PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses");
+		}
+#if defined(CACHEOP_DEBUG)
+		gsCwq.ui32ServerSyncVA += 1;
+#endif
+		return PVRSRV_OK;
+	}
+	else if (pvAddress)
+	{
+		/* Round down the incoming VA (if any) down to the nearest page aligned VA */
+		 pvAddress = (void*)((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiPageSize-1));
+#if defined(CACHEOP_DEBUG)
+		gsCwq.ui32ServerSyncVA += 1;
+#endif
+	}
+
+	/* Need this for kernel mapping */
+	bPMRIsSparse = PMR_IsSparse(psPMR);
+	psDevNode = PMR_DeviceNode(psPMR);
+
+	/* Round the incoming offset down to the nearest cache-line / page aligned-address */
+	uiCLAlignedEndOffset = uiOffset + uiSize;
+	uiCLAlignedEndOffset = PVR_ALIGN(uiCLAlignedEndOffset, (IMG_DEVMEM_SIZE_T)gsCwq.uiLineSize);
+	uiCLAlignedStartOffset = (uiOffset & ~((IMG_DEVMEM_OFFSET_T)gsCwq.uiLineSize-1));
+
+	uiPgAlignedEndOffset = uiCLAlignedEndOffset;
+	uiPgAlignedEndOffset = PVR_ALIGN(uiPgAlignedEndOffset, (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize);
+	uiPgAlignedStartOffset = (uiOffset & ~((IMG_DEVMEM_OFFSET_T)gsCwq.uiPageSize-1));
+	uiPgAlignedSize = uiPgAlignedEndOffset - uiPgAlignedStartOffset;
+
+#if defined(CACHEOP_NO_CACHE_LINE_ALIGNED_ROUNDING)
+	/* For internal debug if cache-line optimised
+	   flushing is suspected of causing data corruption */
+	uiCLAlignedStartOffset = uiPgAlignedStartOffset;
+	uiCLAlignedEndOffset = uiPgAlignedEndOffset;
+#endif
+
+	/* Type of allocation backing the PMR data */
+	ui32NumOfPages = uiPgAlignedSize >> gsCwq.uiPageShift;
+	if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+	{
+		/* The pbValid array is allocated first as it is needed in
+		   both physical/virtual cache maintenance methods */
+		pbValid = OSAllocZMem(ui32NumOfPages * sizeof(IMG_BOOL));
+		if (! pbValid)
+		{
+			pbValid = abValid;
+		}
+		else if (gsCwq.uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+		{
+			psCpuPhyAddr = OSAllocZMem(ui32NumOfPages * sizeof(IMG_CPU_PHYADDR));
+			if (! psCpuPhyAddr)
+			{
+				psCpuPhyAddr = asCpuPhyAddr;
+				OSFreeMem(pbValid);
+				pbValid = abValid;
+			}
+		}
+	}
+
+	/* We always retrieve PMR data in bulk, up-front if number of pages is within
+	   PMR_MAX_TRANSLATION_STACK_ALLOC limits else we check to ensure that a
+	   dynamic buffer has been allocated to satisfy requests outside limits */
+	if (ui32NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC || pbValid != abValid)
+	{
+		if (gsCwq.uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+		{
+			/* Look-up PMR CpuPhyAddr once, if possible */
+			eError = PMR_CpuPhysAddr(psPMR,
+									 gsCwq.uiPageShift,
+									 ui32NumOfPages,
+									 uiPgAlignedStartOffset,
+									 psCpuPhyAddr,
+									 pbValid);
+			if (eError == PVRSRV_OK)
+			{
+				bIsPMRInfoValid = IMG_TRUE;
+			}
+		}
+		else
+		{
+			/* Look-up PMR per-page validity once, if possible */
+			eError = PMR_IsOffsetValid(psPMR,
+									   gsCwq.uiPageShift,
+									   ui32NumOfPages,
+									   uiPgAlignedStartOffset,
+									   pbValid);
+			bIsPMRInfoValid = (eError == PVRSRV_OK) ? IMG_TRUE : IMG_FALSE;
+		}
+	}
+
+	/* For each device page, carry out the requested cache maintenance operation */
+	for (uiPgAlignedOffset = uiPgAlignedStartOffset, ui32PageIndex = 0;
+		 uiPgAlignedOffset < uiPgAlignedEndOffset;
+		 uiPgAlignedOffset += (IMG_DEVMEM_OFFSET_T) gsCwq.uiPageSize, ui32PageIndex += 1)
+	{
+		/* Just before issuing the CacheOp RBF, check if it can be discarded */
+		if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > ui32GFlushSeqNum)
+		{
+#if defined(CACHEOP_DEBUG)
+			gsCwq.ui32KMDiscards += 1;
+#endif
+			break;
+		}
+
+		if (! bIsPMRInfoValid)
+		{
+			/* Never cross page boundary without looking up corresponding PMR page physical
+			   address and/or page validity if these were not looked-up, in bulk, up-front */
+			ui32PageIndex = 0;
+			if (gsCwq.uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+			{
+				eError = PMR_CpuPhysAddr(psPMR,
+										 gsCwq.uiPageShift,
+										 1,
+										 uiPgAlignedOffset,
+										 psCpuPhyAddr,
+										 pbValid);
+				PVR_LOGG_IF_ERROR(eError, "PMR_CpuPhysAddr", e0);
+			}
+			else
+			{
+				eError = PMR_IsOffsetValid(psPMR,
+										  gsCwq.uiPageShift,
+										  1,
+										  uiPgAlignedOffset,
+										  pbValid);
+				PVR_LOGG_IF_ERROR(eError, "PMR_IsOffsetValid", e0);
+			}
+		}
+
+		/* Skip invalid PMR pages (i.e. sparse) */
+		if (pbValid[ui32PageIndex] == IMG_FALSE)
+		{
+			PVR_ASSERT(bPMRIsSparse);
+			continue;
+		}
+
+		if (pvAddress)
+		{
+			/* The caller has supplied either a KM/UM CpuVA, so use it unconditionally */
+			pbCpuVirtAddr =
+				(void *)(uintptr_t)((uintptr_t)pvAddress + (uintptr_t)(uiPgAlignedOffset-uiPgAlignedStartOffset));
+		}
+		/* Skip CpuVA acquire if CacheOp can be maintained entirely using CpuPA */
+		else if (gsCwq.uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL)
+		{
+			if (bPMRIsSparse)
+			{
+				eError =
+					PMRAcquireSparseKernelMappingData(psPMR,
+													  uiPgAlignedOffset,
+													  gsCwq.uiPageSize,
+													  (void **)&pbCpuVirtAddr,
+													  (size_t*)&uiOutSize,
+													  &hPrivOut);
+				PVR_LOGG_IF_ERROR(eError, "PMRAcquireSparseKernelMappingData", e0);
+			}
+			else
+			{
+				eError =
+					PMRAcquireKernelMappingData(psPMR,
+												uiPgAlignedOffset,
+												gsCwq.uiPageSize,
+												(void **)&pbCpuVirtAddr,
+												(size_t*)&uiOutSize,
+												&hPrivOut);
+				PVR_LOGG_IF_ERROR(eError, "PMRAcquireKernelMappingData", e0);
+			}
+		}
+
+		/* Issue actual cache maintenance for PMR */
+		CacheOpExecRangeBased(psDevNode,
+							uiCacheOp,
+							pbCpuVirtAddr,
+							(gsCwq.uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL) ?
+								psCpuPhyAddr[ui32PageIndex] : psCpuPhyAddr[0],
+							uiPgAlignedOffset,
+							uiCLAlignedStartOffset,
+							uiCLAlignedEndOffset);
+
+		if (! pvAddress)
+		{
+			/* The caller has not supplied either a KM/UM CpuVA, release mapping */
+			if (gsCwq.uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL)
+			{
+				eError = PMRReleaseKernelMappingData(psPMR, hPrivOut);
+				PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData");
+			}
+		}
+	}
+
+e0:
+	if (psCpuPhyAddr != asCpuPhyAddr)
+	{
+		OSFreeMem(psCpuPhyAddr);
+	}
+
+	if (pbValid != abValid)
+	{
+		OSFreeMem(pbValid);
+	}
+
+	if (! bIsRequestValidated)
+	{
+		eError = PMRUnlockSysPhysAddresses(psPMR);
+		PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses");
+	}
+
+	return eError;
+}
+
+static INLINE IMG_BOOL CacheOpFenceCheck(IMG_UINT32 ui32CompletedSeqNum,
+										 IMG_UINT32 ui32FenceSeqNum)
+{
+	IMG_UINT32 ui32RebasedCompletedNum;
+	IMG_UINT32 ui32RebasedFenceNum;
+	IMG_UINT32 ui32Rebase;
+
+	if (ui32FenceSeqNum == 0)
+	{
+		return IMG_TRUE;
+	}
+
+	/*
+	   The problem statement is how to compare two values
+	   on a numerical sequentially incrementing timeline in
+	   the presence of wrap around arithmetic semantics using
+	   a single ui32 counter & atomic (increment) operations.
+
+	   The rationale for the solution here is to rebase the
+	   incoming values to the sequence midpoint and perform
+	   comparisons there; this allows us to handle overflow
+	   or underflow wrap-round using only a single integer.
+
+	   NOTE: We assume that the absolute value of the
+	   difference between the two incoming values in _not_
+	   greater than CACHEOP_SEQ_MIDPOINT. This assumption
+	   holds as it implies that it is very _unlikely_ that 2
+	   billion CacheOp requests could have been made between
+	   a single client's CacheOp request & the corresponding
+	   fence check. This code sequence is hopefully a _more_
+	   hand optimised (branchless) version of this:
+
+		   x = ui32CompletedOpSeqNum
+		   y = ui32FenceOpSeqNum
+
+		   if (|x - y| < CACHEOP_SEQ_MIDPOINT)
+			   return (x - y) >= 0 ? true : false
+		   else
+			   return (y - x) >= 0 ? true : false
+	 */
+	ui32Rebase = CACHEOP_SEQ_MIDPOINT - ui32CompletedSeqNum;
+
+	/* ui32Rebase could be either positive/negative, in
+	   any case we still perform operation using unsigned
+	   semantics as 2's complement notation always means
+	   we end up with the correct result */
+	ui32RebasedCompletedNum = ui32Rebase + ui32CompletedSeqNum;
+	ui32RebasedFenceNum = ui32Rebase + ui32FenceSeqNum;
+
+	return (ui32RebasedCompletedNum >= ui32RebasedFenceNum);
+}
+
+static INLINE PVRSRV_ERROR CacheOpTimelineBind(CACHEOP_WORK_ITEM *psCacheOpWorkItem,
+											   PVRSRV_TIMELINE iTimeline)
+{
+	PVRSRV_ERROR eError;
+
+	/* Always default the incoming CacheOp work-item to safe values */
+	psCacheOpWorkItem->sSWTimelineObj = (SYNC_TIMELINE_OBJ)(uintptr_t)NULL;
+	psCacheOpWorkItem->iTimeline = PVRSRV_NO_UPDATE_TIMELINE_REQUIRED;
+	if (iTimeline == PVRSRV_NO_UPDATE_TIMELINE_REQUIRED)
+	{
+		return PVRSRV_OK;
+	}
+
+	psCacheOpWorkItem->iTimeline = iTimeline;
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	if (! PVRSRVIsTimelineValidKM(iTimeline))
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	eError = SyncSWGetTimelineObj(iTimeline, &psCacheOpWorkItem->sSWTimelineObj);
+	PVR_LOG_IF_ERROR(eError, "SyncSWGetTimelineObj");
+#else
+	eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+
+	return eError;
+}
+
+static INLINE PVRSRV_ERROR CacheOpTimelineExec(CACHEOP_WORK_ITEM *psCacheOpWorkItem)
+{
+	PVRSRV_ERROR eError;
+
+	if (psCacheOpWorkItem->iTimeline == PVRSRV_NO_UPDATE_TIMELINE_REQUIRED)
+	{
+		return PVRSRV_OK;
+	}
+	PVR_ASSERT(psCacheOpWorkItem->sSWTimelineObj);
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	eError = SyncSWTimelineAdvanceKM(psCacheOpWorkItem->sSWTimelineObj);
+	(void) SyncSWTimelineReleaseKM(psCacheOpWorkItem->sSWTimelineObj);
+#else
+	eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+
+	return eError;
+}
+
+static INLINE PVRSRV_ERROR CacheOpQListExecGlobal(void)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 ui32NumOfEntries;
+	CACHEOP_WORK_ITEM *psCacheOpWorkItem;
+#if defined(CACHEOP_DEBUG)
+	IMG_UINT64 uiTimeNow = 0;
+	IMG_UINT64 ui64DequeuedTime;
+	CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+#endif
+	PVR_ASSERT(!gsCwq.bNoGlobalFlushImpl);
+
+	/* Take the current snapshot of queued CacheOps before we issue a global cache
+	   flush operation so that we retire the right amount of CacheOps that has
+	   been affected by the to-be executed global CacheOp */
+	ui32NumOfEntries = CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter);
+	if (OSAtomicRead(&gsCwq.hWriteCounter) < OSAtomicRead(&gsCwq.hReadCounter))
+	{
+		/* Branch handles when the write-counter has wrapped-around in value space.
+		   The logic here works seeing the read-counter does not change value for
+		   the duration of this function so we don't run the risk of it too wrapping
+		   round whilst the number of entries is being determined here, that is to
+		   say, the consumer in this framework is single threaded and this function
+		   is that consumer thread */
+		ui32NumOfEntries = CacheOpIdxSpan(&gsCwq.hReadCounter, &gsCwq.hWriteCounter);
+
+		/* Two's complement arithmetic gives the number of entries */
+		ui32NumOfEntries = CACHEOP_INDICES_MAX - ui32NumOfEntries;
+	}
+	if (! ui32NumOfEntries)
+	{
+		return PVRSRV_OK;
+	}
+#if defined(CACHEOP_DEBUG)
+	PVR_ASSERT(ui32NumOfEntries < CACHEOP_INDICES_MAX);
+#endif
+
+	/* Use the current/latest queue-tail work-item's GF/SeqNum to predicate GF */
+	psCacheOpWorkItem = &gsCwq.asWorkItems[CacheOpIdxRead(&gsCwq.hWriteCounter)];
+	CacheOpQItemReadCheck(psCacheOpWorkItem);
+#if defined(CACHEOP_DEBUG)
+	/* The time waiting in the queue to be serviced */
+	ui64DequeuedTime = OSClockns64();
+#endif
+
+	/* Check if items between [hRead/hWrite]Counter can be discarded before issuing GF */
+	if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > psCacheOpWorkItem->ui32GFSeqNum)
+	{
+		/* The currently discarded CacheOp item updates gsCwq.hCompletedSeqNum */
+		OSAtomicWrite(&gsCwq.hCompletedSeqNum, psCacheOpWorkItem->ui32OpSeqNum);
+#if defined(CACHEOP_DEBUG)
+		gsCwq.ui32KMDiscards += ui32NumOfEntries;
+#endif
+	}
+	else
+	{
+		eError = CacheOpGlobalFlush();
+		PVR_LOGR_IF_ERROR(eError, "CacheOpGlobalFlush");
+#if defined(CACHEOP_DEBUG)
+		uiTimeNow = OSClockns64();
+		sCacheOpWorkItem.bDeferred = IMG_TRUE;
+		sCacheOpWorkItem.ui64ExecuteTime = uiTimeNow;
+		sCacheOpWorkItem.psPMR = gsCwq.psInfoPagePMR;
+		sCacheOpWorkItem.pid = OSGetCurrentProcessID();
+		sCacheOpWorkItem.uiCacheOp = PVRSRV_CACHE_OP_GLOBAL;
+		sCacheOpWorkItem.ui64DequeuedTime = ui64DequeuedTime;
+		sCacheOpWorkItem.ui64EnqueuedTime = psCacheOpWorkItem->ui64EnqueuedTime;
+		sCacheOpWorkItem.ui32OpSeqNum = gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+#endif
+	}
+
+	while (ui32NumOfEntries)
+	{
+		psCacheOpWorkItem = &gsCwq.asWorkItems[CacheOpIdxNext(&gsCwq.hReadCounter)];
+		CacheOpQItemReadCheck(psCacheOpWorkItem);
+
+#if defined(CACHEOP_DEBUG)
+		if (psCacheOpWorkItem->uiCacheOp != PVRSRV_CACHE_OP_GLOBAL)
+		{
+			psCacheOpWorkItem->bRBF = IMG_FALSE;
+			if (! uiTimeNow)
+			{
+				/* Measure deferred queueing overhead only */
+				uiTimeNow = OSClockns64();
+				psCacheOpWorkItem->ui64ExecuteTime = uiTimeNow;
+			}
+			else
+			{
+				psCacheOpWorkItem->ui64ExecuteTime = uiTimeNow;
+			}
+			psCacheOpWorkItem->ui64DequeuedTime = ui64DequeuedTime;
+			CacheOpStatsExecLogWrite(psCacheOpWorkItem);
+		}
+		/* Something's gone horribly wrong if these 2 counters are identical at this point */
+		PVR_ASSERT(OSAtomicRead(&gsCwq.hWriteCounter) != OSAtomicRead(&gsCwq.hReadCounter));
+#endif
+
+		/* If CacheOp is timeline(d), notify timeline waiters */
+		eError = CacheOpTimelineExec(psCacheOpWorkItem);
+		PVR_LOG_IF_ERROR(eError, "CacheOpTimelineExec");
+
+		/* Mark index as ready for recycling for next CacheOp */
+		CacheOpQItemRecycle(psCacheOpWorkItem);
+		(void) CacheOpIdxIncrement(&gsCwq.hReadCounter);
+		ui32NumOfEntries = ui32NumOfEntries - 1;
+	}
+
+#if defined(CACHEOP_DEBUG)
+	if (uiTimeNow)
+	{
+		/* Only log GF that was actually executed */
+		CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+	}
+#endif
+
+	return eError;
+}
+
+static PVRSRV_ERROR CacheOpQListExecRangeBased(void)
+{
+	IMG_UINT32 ui32NumOfEntries;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 ui32WriteCounter = ~0;
+	IMG_BOOL bUsedGlobalFlush = IMG_FALSE;
+	CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL;
+#if defined(CACHEOP_DEBUG)
+	IMG_UINT64 uiTimeNow = 0;
+#endif
+
+	/* Take a snapshot of the current count of deferred entries at this junction */
+	ui32NumOfEntries = CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter);
+	if (! ui32NumOfEntries)
+	{
+		return PVRSRV_OK;
+	}
+#if defined(CACHEOP_DEBUG)
+	PVR_ASSERT(ui32NumOfEntries < CACHEOP_INDICES_MAX);
+#endif
+
+	while (ui32NumOfEntries)
+	{
+		if (! OSAtomicRead(&gsCwq.hReadCounter))
+		{
+			/* Normally, the read-counter will trail the write counter until the write
+			   counter wraps-round to zero. Under this condition we (re)calculate as the
+			   read-counter too is wrapping around at this point */
+			ui32NumOfEntries = CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter);
+		}
+#if defined(CACHEOP_DEBUG)
+		/* Something's gone horribly wrong if these 2 counters are identical at this point */
+		PVR_ASSERT(OSAtomicRead(&gsCwq.hWriteCounter) != OSAtomicRead(&gsCwq.hReadCounter));
+#endif
+
+		/* Select the next pending deferred work-item for RBF cache maintenance */
+		psCacheOpWorkItem = &gsCwq.asWorkItems[CacheOpIdxNext(&gsCwq.hReadCounter)];
+		CacheOpQItemReadCheck(psCacheOpWorkItem);
+#if defined(CACHEOP_DEBUG)
+		/* The time waiting in the queue to be serviced */
+		psCacheOpWorkItem->ui64DequeuedTime = OSClockns64();
+#endif
+
+		/* The following CacheOpPMRExec() could trigger a GF, so we (re)read this
+		   counter just in case so that we know all such pending CacheOp(s) that will
+		   benefit from this soon-to-be-executed GF */
+		ui32WriteCounter = CacheOpConfigSupports(CACHEOP_CONFIG_KGF) ?
+								OSAtomicRead(&gsCwq.hWriteCounter) : ui32WriteCounter;
+
+		eError = CacheOpPMRExec(psCacheOpWorkItem->psPMR,
+								NULL, /* No UM virtual address */
+								psCacheOpWorkItem->uiOffset,
+								psCacheOpWorkItem->uiSize,
+								psCacheOpWorkItem->uiCacheOp,
+								psCacheOpWorkItem->ui32GFSeqNum,
+								IMG_TRUE, /* PMR is pre-validated */
+								&bUsedGlobalFlush);
+		if (eError != PVRSRV_OK)
+		{
+#if defined(CACHEOP_DEBUG)
+			PVR_LOG(("Deferred CacheOpPMRExec failed: PID:%d PMR:%p Offset:%" IMG_UINT64_FMTSPECX " Size:%" IMG_UINT64_FMTSPECX " CacheOp:%d, error: %d",
+					(IMG_UINT32)psCacheOpWorkItem->pid,
+#else
+			PVR_LOG(("Deferred CacheOpPMRExec failed: PMR:%p Offset: %" IMG_UINT64_FMTSPECX "Size:%" IMG_UINT64_FMTSPECX " CacheOp:%d, error: %d",
+#endif
+					psCacheOpWorkItem->psPMR,
+					psCacheOpWorkItem->uiOffset,
+					psCacheOpWorkItem->uiSize,
+					psCacheOpWorkItem->uiCacheOp,
+					eError));
+		}
+		else if (bUsedGlobalFlush)
+		{
+#if defined(CACHEOP_DEBUG)
+			psCacheOpWorkItem->ui32OpSeqNum = gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+#endif
+			break;
+		}
+
+#if defined(CACHEOP_DEBUG)
+		if (psCacheOpWorkItem->uiCacheOp != PVRSRV_CACHE_OP_GLOBAL)
+		{
+			psCacheOpWorkItem->bRBF = IMG_TRUE;
+			psCacheOpWorkItem->ui64ExecuteTime = OSClockns64();
+			CacheOpStatsExecLogWrite(psCacheOpWorkItem);
+		}
+		else
+		{
+			PVR_ASSERT(!gsCwq.bNoGlobalFlushImpl);
+		}
+#endif
+
+		/* The currently executed CacheOp item updates gsCwq.hCompletedSeqNum.
+		   NOTE: This CacheOp item might be a discard item, if so its seqNum
+		   still updates the gsCwq.hCompletedSeqNum */
+		OSAtomicWrite(&gsCwq.hCompletedSeqNum, psCacheOpWorkItem->ui32OpSeqNum);
+		OSAtomicSubtract(&gsCwq.hDeferredSize, psCacheOpWorkItem->uiSize);
+
+		/* If CacheOp is timeline(d), notify timeline waiters */
+		eError = CacheOpTimelineExec(psCacheOpWorkItem);
+		PVR_LOG_IF_ERROR(eError, "CacheOpTimelineExec");
+
+		/* Indicate that this CCB work-item slot is now free for (re)use */
+		CacheOpQItemRecycle(psCacheOpWorkItem);
+		(void) CacheOpIdxIncrement(&gsCwq.hReadCounter);
+		ui32NumOfEntries = ui32NumOfEntries - 1;
+	}
+
+	if (bUsedGlobalFlush)
+	{
+#if defined(CACHEOP_DEBUG)
+		uiTimeNow = OSClockns64();
+		PVR_ASSERT(OSAtomicRead(&gsCwq.hWriteCounter) != OSAtomicRead(&gsCwq.hReadCounter));
+#endif
+
+		/* Snapshot of queued CacheOps before the global cache flush was issued */
+		ui32NumOfEntries = ui32WriteCounter - OSAtomicRead(&gsCwq.hReadCounter);
+		if (ui32WriteCounter < OSAtomicRead(&gsCwq.hReadCounter))
+		{
+			/* Branch handles when the write-counter has wrapped-around in value space */
+			ui32NumOfEntries = OSAtomicRead(&gsCwq.hReadCounter) - ui32WriteCounter;
+			ui32NumOfEntries = CACHEOP_INDICES_MAX - ui32NumOfEntries;
+		}
+
+		while (ui32NumOfEntries)
+		{
+			CacheOpQItemReadCheck(psCacheOpWorkItem);
+
+#if defined(CACHEOP_DEBUG)
+			psCacheOpWorkItem->bRBF = IMG_FALSE;
+			psCacheOpWorkItem->ui64ExecuteTime = uiTimeNow;
+			if (psCacheOpWorkItem->uiCacheOp == PVRSRV_CACHE_OP_GLOBAL)
+			{
+				PVR_ASSERT(!gsCwq.bNoGlobalFlushImpl);
+				psCacheOpWorkItem->pid = OSGetCurrentProcessID();
+			}
+			CacheOpStatsExecLogWrite(psCacheOpWorkItem);
+#endif
+
+			eError = CacheOpTimelineExec(psCacheOpWorkItem);
+			PVR_LOG_IF_ERROR(eError, "CacheOpTimelineExec");
+
+			/* Mark index as ready for recycling for next CacheOp */
+			CacheOpQItemRecycle(psCacheOpWorkItem);
+			(void) CacheOpIdxIncrement(&gsCwq.hReadCounter);
+			ui32NumOfEntries = ui32NumOfEntries - 1;
+			psCacheOpWorkItem = &gsCwq.asWorkItems[CacheOpIdxNext(&gsCwq.hReadCounter)];
+		}
+	}
+
+	return eError;
+}
+
+static INLINE PVRSRV_ERROR CacheOpQListExec(void)
+{
+	PVRSRV_ERROR eError;
+
+	if (CacheOpConfigSupports(CACHEOP_CONFIG_KGF) &&
+		(!CacheOpConfigSupports(CACHEOP_CONFIG_KRBF)
+		 || OSAtomicRead(&gsCwq.hDeferredSize) >= gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD]))
+	{
+		eError = CacheOpQListExecGlobal();
+		PVR_LOG_IF_ERROR(eError, "CacheOpQListExecGlobal");
+	}
+	else
+	{
+		eError = CacheOpQListExecRangeBased();
+		PVR_LOG_IF_ERROR(eError, "CacheOpQListExecRangeBased");
+	}
+
+	/* Signal any waiting threads blocked on CacheOp fence checks update
+	   completed sequence number to last queue work item */
+	eError = OSEventObjectSignal(gsCwq.hClientWakeUpEvtObj);
+	PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+
+	return eError;
+}
+
+static void CacheOpThread(void *pvData)
+{
+	PVRSRV_DATA *psPVRSRVData = pvData;
+	IMG_HANDLE hOSEvent;
+	PVRSRV_ERROR eError;
+
+	/* Store the process id (pid) of the CacheOp-up thread */
+	gsCwq.uiWorkerThreadPid = OSGetCurrentProcessID();
+
+	/* Open CacheOp thread event object, abort driver if event object open fails */
+	eError = OSEventObjectOpen(gsCwq.hThreadWakeUpEvtObj, &hOSEvent);
+	PVR_LOG_IF_ERROR(eError, "OSEventObjectOpen");
+
+	/* While driver is in good state & loaded, perform pending cache maintenance */
+	while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) && gsCwq.bInit)
+	{
+		/* Sleep-wait here until when signalled for new queued CacheOp work items;
+		   when woken-up, drain deferred queue completely before next event-wait */
+		eError = OSEventObjectWaitTimeout(hOSEvent, CACHEOP_THREAD_WAIT_TIMEOUT);
+		while (CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter))
+		{
+			eError = CacheOpQListExec();
+			PVR_LOG_IF_ERROR(eError, "CacheOpQListExec");
+		}
+	}
+
+	eError = CacheOpQListExec();
+	PVR_LOG_IF_ERROR(eError, "CacheOpQListExec");
+
+	eError = OSEventObjectClose(hOSEvent);
+	PVR_LOG_IF_ERROR(eError, "OSEventObjectClose");
+}
+
+static PVRSRV_ERROR CacheOpBatchExecTimeline(PVRSRV_TIMELINE iTimeline,
+											 IMG_BOOL bUsedGlobalFlush,
+											 IMG_UINT32 ui32CurrentFenceSeqNum,
+											 IMG_UINT32 *pui32NextFenceSeqNum)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32NextIdx;
+	CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+	CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL;
+
+	eError = CacheOpTimelineBind(&sCacheOpWorkItem, iTimeline);
+	PVR_LOGR_IF_ERROR(eError, "CacheOpTimelineBind");
+
+	OSLockAcquire(gsCwq.hDeferredLock);
+
+	/*
+	   Check if there is any deferred queueing space available and that nothing is
+	   currently queued. This second check is required as Android where timelines
+	   are used sets a timeline signalling deadline of 1000ms to signal timelines
+	   else complains. So seeing we cannot be sure how long the CacheOp presently
+	   in the queue would take we should not send this timeline down the queue as
+	   well.
+	 */
+	ui32NextIdx = CacheOpIdxNext(&gsCwq.hWriteCounter);
+	if (!CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter) &&
+		CacheOpIdxRead(&gsCwq.hReadCounter) != ui32NextIdx)
+	{
+		psCacheOpWorkItem = &gsCwq.asWorkItems[ui32NextIdx];
+		CacheOpQItemWriteCheck(psCacheOpWorkItem);
+
+		psCacheOpWorkItem->sSWTimelineObj = sCacheOpWorkItem.sSWTimelineObj;
+		psCacheOpWorkItem->iTimeline = sCacheOpWorkItem.iTimeline;
+		psCacheOpWorkItem->ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+		psCacheOpWorkItem->uiCacheOp = PVRSRV_CACHE_OP_TIMELINE;
+		psCacheOpWorkItem->uiOffset = (IMG_DEVMEM_OFFSET_T)0;
+		psCacheOpWorkItem->uiSize = (IMG_DEVMEM_SIZE_T)0;
+		psCacheOpWorkItem->ui32GFSeqNum = 0;
+		/* Defer timeline using information page PMR */
+		psCacheOpWorkItem->psPMR = gsCwq.psInfoPagePMR;
+		eError = PMRLockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+		PVR_LOGG_IF_ERROR(eError, "PMRLockSysPhysAddresses", e0);
+#if defined(CACHEOP_DEBUG)
+		psCacheOpWorkItem->pid = OSGetCurrentClientProcessIDKM();
+		psCacheOpWorkItem->ui64EnqueuedTime = OSClockns64();
+		gsCwq.ui32ServerASync += 1;
+		gsCwq.ui32ServerDTL += 1;
+#endif
+
+		/* Mark index ready for cache maintenance */
+		(void) CacheOpIdxIncrement(&gsCwq.hWriteCounter);
+
+		OSLockRelease(gsCwq.hDeferredLock);
+
+		/* Signal the CacheOp thread to ensure this GF get processed */
+		eError = OSEventObjectSignal(gsCwq.hThreadWakeUpEvtObj);
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+	}
+	else
+	{
+		IMG_BOOL bExecTimeline = IMG_TRUE;
+		IMG_UINT32 ui32CompletedOpSeqNum = OSAtomicRead(&gsCwq.hCompletedSeqNum);
+
+		OSLockRelease(gsCwq.hDeferredLock);
+
+		/*
+		   This pathway requires careful handling here as the client CacheOp(s) predicated on this
+		   timeline might have been broken-up (i.e. batched) into several server requests by client:
+		   1 - In the first case, a CacheOp from an earlier batch is still in-flight, so we check if
+		   this is the case because even though we might have executed all the CacheOps in this batch
+		   synchronously, we cannot be sure that any in-flight CacheOp pending on this client is not
+		   predicated on this timeline hence we need to synchronise here for safety by fencing until
+		   all in-flight CacheOps are completed. NOTE: On Android, this might cause issues due to
+		   timelines notification deadlines so we do not fence (i.e. cannot sleep or wait) here to
+		   synchronise, instead nudge services client to retry the request if there is no GF support.
+		   2 - In the second case, there is no in-flight CacheOp for this client in which case just
+		   continue processing as normal.
+		 */
+		if (!bUsedGlobalFlush && !CacheOpFenceCheck(ui32CompletedOpSeqNum, ui32CurrentFenceSeqNum))
+		{
+#if defined(ANDROID)
+			bExecTimeline = IMG_TRUE;
+			if (CacheOpGlobalFlush() != PVRSRV_OK)
+			{
+				bExecTimeline = IMG_FALSE;
+				eError = PVRSRV_ERROR_RETRY;
+			}
+#else
+			eError = CacheOpFence ((RGXFWIF_DM)0, ui32CurrentFenceSeqNum);
+			PVR_LOG_IF_ERROR(eError, "CacheOpFence");
+
+			/* CacheOpFence() might have triggered a GF so we take advantage of it */
+			if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > ui32CurrentFenceSeqNum)
+			{
+				*pui32NextFenceSeqNum = 0;
+			}
+#endif
+		}
+
+		if (bExecTimeline)
+		{
+			/* CacheOp fence requirement met, signal timeline */
+			eError = CacheOpTimelineExec(&sCacheOpWorkItem);
+			PVR_LOG_IF_ERROR(eError, "CacheOpTimelineExec");
+		}
+	}
+
+	return eError;
+e0:
+	if (psCacheOpWorkItem)
+	{
+		/* Need to ensure we leave this CacheOp QItem in the proper recycled state */
+		CacheOpQItemRecycle(psCacheOpWorkItem);
+		OSLockRelease(gsCwq.hDeferredLock);
+	}
+
+	return eError;
+}
+
+static PVRSRV_ERROR CacheOpBatchExecRangeBased(PMR **ppsPMR,
+											IMG_CPU_VIRTADDR *pvAddress,
+											IMG_DEVMEM_OFFSET_T *puiOffset,
+											IMG_DEVMEM_SIZE_T *puiSize,
+											PVRSRV_CACHE_OP *puiCacheOp,
+											IMG_UINT32 ui32NumCacheOps,
+											PVRSRV_TIMELINE uiTimeline,
+											IMG_UINT32 ui32GlobalFlushSeqNum,
+											IMG_UINT32 uiCurrentFenceSeqNum,
+											IMG_UINT32 *pui32NextFenceSeqNum)
+{
+	IMG_UINT32 ui32Idx;
+	IMG_UINT32 ui32NextIdx;
+	IMG_BOOL bBatchHasTimeline;
+	IMG_BOOL bCacheOpConfigKDF;
+	IMG_BOOL bCacheOpConfigKRBF;
+	IMG_DEVMEM_SIZE_T uiLogicalSize;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_BOOL bUseGlobalFlush = IMG_FALSE;
+	CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL;
+#if defined(CACHEOP_DEBUG)
+	CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+	IMG_UINT32 ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+	sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+#endif
+
+	/* Check if batch has an associated timeline update */
+	bBatchHasTimeline = puiCacheOp[ui32NumCacheOps-1] & PVRSRV_CACHE_OP_TIMELINE;
+	puiCacheOp[ui32NumCacheOps-1] &= ~(PVRSRV_CACHE_OP_GLOBAL | PVRSRV_CACHE_OP_TIMELINE);
+
+	/* Check if config. supports kernel deferring of cacheops */
+	bCacheOpConfigKDF = CacheOpConfigSupports(CACHEOP_CONFIG_KDF);
+	bCacheOpConfigKRBF = CacheOpConfigSupports(CACHEOP_CONFIG_KRBF);
+
+	/*
+	   Client expects the next fence seqNum to be zero unless the server has deferred
+	   at least one CacheOp in the submitted queue in which case the server informs
+	   the client of the last CacheOp seqNum deferred in this batch.
+	*/
+	for (*pui32NextFenceSeqNum = 0, ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++)
+	{
+		if (bCacheOpConfigKDF)
+		{
+			/* Check if there is deferred queueing space available */
+			ui32NextIdx = CacheOpIdxNext(&gsCwq.hWriteCounter);
+			if (ui32NextIdx != CacheOpIdxRead(&gsCwq.hReadCounter))
+			{
+				psCacheOpWorkItem = &gsCwq.asWorkItems[ui32NextIdx];
+			}
+		}
+
+		/*
+		   Normally, we would like to defer client CacheOp(s) but we may not always be in a
+		   position or is necessary to do so based on the following reasons:
+		   0 - There is currently no queueing space left to enqueue this CacheOp, this might
+		       imply the system is queueing more requests than can be consumed by the CacheOp
+		       thread in time.
+		   1 - Batch has timeline, action this now due to Android timeline signaling deadlines.
+		   2 - Configuration does not support deferring of cache maintenance operations so we
+		       execute the batch synchronously/immediately.
+		   3 - CacheOp has an INVALIDATE, as this is used to transfer device memory buffer
+		       ownership back to the processor, we cannot defer it so action it immediately.
+		   4 - CacheOp size too small (single OS page size) to warrant overhead of deferment,
+		       this will not be considered if KRBF is not present, as it implies defer all.
+		   5 - CacheOp size OK for deferment, but a client virtual address is supplied so we
+		       might has well just take advantage of said VA & flush immediately in UM context.
+		   6 - Prevent DoS attack if a malicious client queues something very large, say 1GiB
+		       and the processor cache ISA does not have a global flush implementation. Here
+			   we upper bound this threshold to PVR_DIRTY_BYTES_FLUSH_THRESHOLD.
+		   7 - Ensure QoS (load balancing) by not over-loading queue with too much requests,
+		       here the (pseudo) alternate queue is the user context so we execute directly
+		       on it if the processor cache ISA does not have a global flush implementation.
+		*/
+		if (!psCacheOpWorkItem  ||
+			bBatchHasTimeline   ||
+			!bCacheOpConfigKDF  ||
+			puiCacheOp[ui32Idx] & PVRSRV_CACHE_OP_INVALIDATE ||
+			(bCacheOpConfigKRBF && puiSize[ui32Idx] <= (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize) ||
+			(pvAddress[ui32Idx] && puiSize[ui32Idx] < (IMG_DEVMEM_SIZE_T)gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD]) ||
+			(gsCwq.bNoGlobalFlushImpl && puiSize[ui32Idx] >= (IMG_DEVMEM_SIZE_T)(gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] << 2)) ||
+			(gsCwq.bNoGlobalFlushImpl && OSAtomicRead(&gsCwq.hDeferredSize) >= gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] << CACHEOP_INDICES_LOG2_SIZE))
+		{
+			/* When the CacheOp thread not keeping up, trash d-cache */
+			bUseGlobalFlush = !psCacheOpWorkItem && bCacheOpConfigKDF ? IMG_TRUE : IMG_FALSE;
+#if defined(CACHEOP_DEBUG)
+			sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64();
+			gsCwq.ui32ServerSync += 1;
+#endif
+			psCacheOpWorkItem = NULL;
+
+			eError = CacheOpPMRExec(ppsPMR[ui32Idx],
+									pvAddress[ui32Idx],
+									puiOffset[ui32Idx],
+									puiSize[ui32Idx],
+									puiCacheOp[ui32Idx],
+									ui32GlobalFlushSeqNum,
+									IMG_FALSE,
+									&bUseGlobalFlush);
+			PVR_LOGG_IF_ERROR(eError, "CacheOpExecPMR", e0);
+
+#if defined(CACHEOP_DEBUG)
+			sCacheOpWorkItem.ui64ExecuteTime = OSClockns64();
+			sCacheOpWorkItem.bRBF = !bUseGlobalFlush;
+			sCacheOpWorkItem.ui32OpSeqNum = bUseGlobalFlush ?
+				gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] : ui32OpSeqNum;
+			sCacheOpWorkItem.psPMR = ppsPMR[ui32Idx];
+			sCacheOpWorkItem.uiSize = puiSize[ui32Idx];
+			sCacheOpWorkItem.uiOffset = puiOffset[ui32Idx];
+			sCacheOpWorkItem.uiCacheOp = puiCacheOp[ui32Idx];
+			CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+#endif
+
+			if (bUseGlobalFlush) break;
+			continue;
+		}
+
+		/* Need to validate request parameters here before enqueing */
+		eError = PMR_LogicalSize(ppsPMR[ui32Idx], &uiLogicalSize);
+		PVR_LOGG_IF_ERROR(eError, "PMR_LogicalSize", e0);
+		eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+		PVR_LOGG_IF_FALSE(((puiOffset[ui32Idx]+puiSize[ui32Idx]) <= uiLogicalSize), CACHEOP_DEVMEM_OOR_ERROR_STRING, e0);
+		eError = PVRSRV_OK;
+
+		/* For safety, take reference here in user context */
+		eError = PMRLockSysPhysAddresses(ppsPMR[ui32Idx]);
+		PVR_LOGG_IF_ERROR(eError, "PMRLockSysPhysAddresses", e0);
+
+		OSLockAcquire(gsCwq.hDeferredLock);
+
+		/* Select next item off the queue to defer with */
+		ui32NextIdx = CacheOpIdxNext(&gsCwq.hWriteCounter);
+		if (ui32NextIdx != CacheOpIdxRead(&gsCwq.hReadCounter))
+		{
+			psCacheOpWorkItem = &gsCwq.asWorkItems[ui32NextIdx];
+			CacheOpQItemWriteCheck(psCacheOpWorkItem);
+		}
+		else
+		{
+			/* Retry, disable KDF for this batch */
+			OSLockRelease(gsCwq.hDeferredLock);
+			bCacheOpConfigKDF = IMG_FALSE;
+			psCacheOpWorkItem = NULL;
+			ui32Idx = ui32Idx - 1;
+			continue;
+		}
+
+		/* Timeline need to be looked-up (i.e. bind) in the user context
+		   before deferring into the CacheOp thread kernel context */
+		eError = CacheOpTimelineBind(psCacheOpWorkItem, PVRSRV_NO_UPDATE_TIMELINE_REQUIRED);
+		PVR_LOGG_IF_ERROR(eError, "CacheOpTimelineBind", e1);
+
+		/* Prepare & enqueue next deferred work item for CacheOp thread */
+		psCacheOpWorkItem->ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+		*pui32NextFenceSeqNum = psCacheOpWorkItem->ui32OpSeqNum;
+		psCacheOpWorkItem->ui32GFSeqNum = ui32GlobalFlushSeqNum;
+		psCacheOpWorkItem->uiCacheOp = puiCacheOp[ui32Idx];
+		psCacheOpWorkItem->uiOffset = puiOffset[ui32Idx];
+		psCacheOpWorkItem->uiSize = puiSize[ui32Idx];
+		psCacheOpWorkItem->psPMR = ppsPMR[ui32Idx];
+#if defined(CACHEOP_DEBUG)
+		psCacheOpWorkItem->ui64EnqueuedTime = OSClockns64();
+		psCacheOpWorkItem->pid = sCacheOpWorkItem.pid;
+		psCacheOpWorkItem->bDeferred = IMG_TRUE;
+		psCacheOpWorkItem->bKMReq = IMG_FALSE;
+		psCacheOpWorkItem->bUMF = IMG_FALSE;
+		gsCwq.ui32ServerASync += 1;
+#endif
+
+		/* Increment deferred size & mark index ready for cache maintenance */
+		OSAtomicAdd(&gsCwq.hDeferredSize, (IMG_UINT32)puiSize[ui32Idx]);
+		(void) CacheOpIdxIncrement(&gsCwq.hWriteCounter);
+
+		OSLockRelease(gsCwq.hDeferredLock);
+		psCacheOpWorkItem = NULL;
+	}
+
+	/* Signal the CacheOp thread to ensure these items get processed */
+	eError = OSEventObjectSignal(gsCwq.hThreadWakeUpEvtObj);
+	PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+
+	if (bUseGlobalFlush)
+	{
+#if defined(CACHEOP_DEBUG)
+		/* GF was logged already in the loop above, so rest if any are discards */
+		sCacheOpWorkItem.ui64ExecuteTime = sCacheOpWorkItem.ui64EnqueuedTime;
+		sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+		while (++ui32Idx < ui32NumCacheOps)
+		{
+			sCacheOpWorkItem.psPMR = ppsPMR[ui32Idx];
+			sCacheOpWorkItem.uiSize = puiSize[ui32Idx];
+			sCacheOpWorkItem.uiOffset = puiOffset[ui32Idx];
+			sCacheOpWorkItem.uiCacheOp = puiCacheOp[ui32Idx];
+			CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+			gsCwq.ui32KMDiscards += 1;
+		}
+#endif
+
+		/* No next UM fence seqNum */
+		*pui32NextFenceSeqNum = 0;
+	}
+
+e1:
+	if (psCacheOpWorkItem)
+	{
+		/* Need to ensure we leave this CacheOp QItem in the proper recycled state */
+		CacheOpQItemRecycle(psCacheOpWorkItem);
+		OSLockRelease(gsCwq.hDeferredLock);
+	}
+e0:
+	if (bBatchHasTimeline)
+	{
+		PVRSRV_ERROR eError2;
+		eError2 = CacheOpBatchExecTimeline(uiTimeline, bUseGlobalFlush, uiCurrentFenceSeqNum, pui32NextFenceSeqNum);
+		eError = (eError2 == PVRSRV_ERROR_RETRY) ? eError2 : eError;
+	}
+
+	return eError;
+}
+
+static PVRSRV_ERROR CacheOpBatchExecGlobal(PMR **ppsPMR,
+									IMG_CPU_VIRTADDR *pvAddress,
+									IMG_DEVMEM_OFFSET_T *puiOffset,
+									IMG_DEVMEM_SIZE_T *puiSize,
+									PVRSRV_CACHE_OP *puiCacheOp,
+									IMG_UINT32 ui32NumCacheOps,
+									PVRSRV_TIMELINE uiTimeline,
+									IMG_UINT32 ui32GlobalFlushSeqNum,
+									IMG_UINT32 uiCurrentFenceSeqNum,
+									IMG_UINT32 *pui32NextFenceSeqNum)
+{
+	IMG_UINT32 ui32Idx;
+	IMG_BOOL bBatchHasTimeline;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_BOOL bUseGlobalFlush = IMG_FALSE;
+	CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL;
+#if	defined(CACHEOP_DEBUG)
+	IMG_DEVMEM_SIZE_T uiTotalSize = 0;
+	CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+	sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+#endif
+#if !defined(CACHEFLUSH_ISA_SUPPORTS_GLOBAL_FLUSH)
+	PVR_LOGR_IF_ERROR(PVRSRV_ERROR_NOT_SUPPORTED, CACHEOP_NO_GFLUSH_ERROR_STRING);
+#endif
+	PVR_UNREFERENCED_PARAMETER(pvAddress);
+
+	/* Check if batch has an associated timeline update request */
+	bBatchHasTimeline = puiCacheOp[ui32NumCacheOps-1] & PVRSRV_CACHE_OP_TIMELINE;
+	puiCacheOp[ui32NumCacheOps-1] &= ~(PVRSRV_CACHE_OP_GLOBAL | PVRSRV_CACHE_OP_TIMELINE);
+
+	/* Skip operation if an else-when GF has occurred in the interim time */
+	if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > ui32GlobalFlushSeqNum)
+	{
+#if	defined(CACHEOP_DEBUG)
+		sCacheOpWorkItem.ui32OpSeqNum = ui32GlobalFlushSeqNum;
+#endif
+		bUseGlobalFlush = IMG_TRUE;
+		*pui32NextFenceSeqNum = 0;
+		goto exec_timeline;
+	}
+
+	/* Here we need to check that client batch does not contain an INVALIDATE CacheOp */
+	for (*pui32NextFenceSeqNum = 0, ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++)
+	{
+#if	defined(CACHEOP_DEBUG)
+		IMG_DEVMEM_SIZE_T uiLogicalSize;
+		uiTotalSize += puiSize[ui32Idx];
+		/* There is no need to validate request parameters as we are about
+		   to issue a GF but this might lead to issues being reproducible
+		   in one config but not the other, so valid under debug */
+		eError = PMR_LogicalSize(ppsPMR[ui32Idx], &uiLogicalSize);
+		PVR_LOGG_IF_ERROR(eError, "PMR_LogicalSize", e0);
+		eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+		PVR_LOGG_IF_FALSE(((puiOffset[ui32Idx]+puiSize[ui32Idx]) <= uiLogicalSize), CACHEOP_DEVMEM_OOR_ERROR_STRING, e0);
+		eError = PVRSRV_OK;
+#endif
+		if (puiCacheOp[ui32Idx] & PVRSRV_CACHE_OP_INVALIDATE)
+		{
+			/* Invalidates cannot be deferred */
+			bUseGlobalFlush = IMG_TRUE;
+#if	!defined(CACHEOP_DEBUG)
+			break;
+#endif
+		}
+	}
+
+	OSLockAcquire(gsCwq.hDeferredLock);
+
+	/*
+	   Normally, we would like to defer client CacheOp(s) but we may not always be in a
+	   position to do so based on the following reasons:
+	   0 - Batch has an INVALIDATE, as this is used to transfer device memory buffer
+	       ownership back to the processor, we cannot defer it so action it immediately.
+	   1 - Configuration does not support deferring of cache maintenance operations so
+		   we execute synchronously/immediately.
+	   2 - There is currently no queueing space left to enqueue this CacheOp, this might
+	       imply the system is queueing more requests that can be consumed by the CacheOp
+	       thread in time.
+	   3 - Batch has a timeline and there is currently something queued, we cannot defer
+	       because currently queued operation(s) might take quite a while to action which
+	       might cause a timeline deadline timeout.
+	*/
+	if (bUseGlobalFlush ||
+		!CacheOpConfigSupports(CACHEOP_CONFIG_KDF) ||
+		CacheOpIdxNext(&gsCwq.hWriteCounter) == CacheOpIdxRead(&gsCwq.hReadCounter) ||
+		(bBatchHasTimeline && CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter)))
+
+	{
+		OSLockRelease(gsCwq.hDeferredLock);
+#if	defined(CACHEOP_DEBUG)
+		sCacheOpWorkItem.ui32OpSeqNum =	CacheOpGetNextCommonSeqNum();
+		sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64();
+#endif
+		eError = CacheOpGlobalFlush();
+		PVR_LOGG_IF_ERROR(eError, "CacheOpGlobalFlush", e0);
+		bUseGlobalFlush = IMG_TRUE;
+#if	defined(CACHEOP_DEBUG)
+		sCacheOpWorkItem.ui64ExecuteTime = OSClockns64();
+		gsCwq.ui32ServerSync += 1;
+#endif
+		goto exec_timeline;
+	}
+
+	/* Select next item off queue to defer this GF and possibly timeline with */
+	psCacheOpWorkItem = &gsCwq.asWorkItems[CacheOpIdxNext(&gsCwq.hWriteCounter)];
+	CacheOpQItemWriteCheck(psCacheOpWorkItem);
+
+	/* Defer the GF using information page PMR */
+	psCacheOpWorkItem->psPMR = gsCwq.psInfoPagePMR;
+	eError = PMRLockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+	PVR_LOGG_IF_ERROR(eError, "PMRLockSysPhysAddresses", e0);
+
+	/* Timeline object has to be looked-up here in user context */
+	eError = CacheOpTimelineBind(psCacheOpWorkItem, uiTimeline);
+	PVR_LOGG_IF_ERROR(eError, "CacheOpTimelineBind", e0);
+
+	/* Prepare & enqueue next deferred work item for CacheOp thread */
+	*pui32NextFenceSeqNum = CacheOpGetNextCommonSeqNum();
+	psCacheOpWorkItem->ui32OpSeqNum = *pui32NextFenceSeqNum;
+	psCacheOpWorkItem->ui32GFSeqNum = ui32GlobalFlushSeqNum;
+	psCacheOpWorkItem->uiCacheOp = PVRSRV_CACHE_OP_GLOBAL;
+	psCacheOpWorkItem->uiOffset = (IMG_DEVMEM_OFFSET_T)0;
+	psCacheOpWorkItem->uiSize = (IMG_DEVMEM_SIZE_T)0;
+#if defined(CACHEOP_DEBUG)
+	/* Note client pid & queueing time of deferred GF CacheOp */
+	psCacheOpWorkItem->ui64EnqueuedTime = OSClockns64();
+	psCacheOpWorkItem->pid = sCacheOpWorkItem.pid;
+	OSAtomicAdd(&gsCwq.hDeferredSize, uiTotalSize);
+	psCacheOpWorkItem->uiSize = uiTotalSize;
+	psCacheOpWorkItem->bDeferred = IMG_TRUE;
+	psCacheOpWorkItem->bKMReq = IMG_FALSE;
+	psCacheOpWorkItem->bUMF = IMG_FALSE;
+	/* Client CacheOp is logged using the deferred seqNum */
+	sCacheOpWorkItem.ui32OpSeqNum =	*pui32NextFenceSeqNum;
+	sCacheOpWorkItem.ui64EnqueuedTime = psCacheOpWorkItem->ui64EnqueuedTime;
+	sCacheOpWorkItem.ui64ExecuteTime = psCacheOpWorkItem->ui64EnqueuedTime;
+	/* Update the CacheOp statistics */
+	gsCwq.ui32ServerASync += 1;
+	gsCwq.ui32ServerDGF += 1;
+#endif
+
+	/* Mark index ready for cache maintenance */
+	(void) CacheOpIdxIncrement(&gsCwq.hWriteCounter);
+
+	OSLockRelease(gsCwq.hDeferredLock);
+
+	/* Signal CacheOp thread to ensure this GF get processed */
+	eError = OSEventObjectSignal(gsCwq.hThreadWakeUpEvtObj);
+	PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+
+exec_timeline:
+	if (bUseGlobalFlush && bBatchHasTimeline)
+	{
+		eError = CacheOpBatchExecTimeline(uiTimeline, bUseGlobalFlush, uiCurrentFenceSeqNum, pui32NextFenceSeqNum);
+	}
+
+#if	defined(CACHEOP_DEBUG)
+	for (ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++)
+	{
+		sCacheOpWorkItem.psPMR = ppsPMR[ui32Idx];
+		sCacheOpWorkItem.uiSize = puiSize[ui32Idx];
+		sCacheOpWorkItem.uiOffset = puiOffset[ui32Idx];
+		sCacheOpWorkItem.uiCacheOp = puiCacheOp[ui32Idx];
+		if (bUseGlobalFlush)
+		{
+			if (sCacheOpWorkItem.ui64ExecuteTime && ui32Idx)
+			{
+				/* Only first item carries the real execution time, rest are discards */
+				sCacheOpWorkItem.ui64EnqueuedTime = sCacheOpWorkItem.ui64ExecuteTime;
+			}
+			gsCwq.ui32KMDiscards += !sCacheOpWorkItem.ui64ExecuteTime ? 1 : ui32Idx ? 1 : 0;
+		}
+		CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+	}
+#endif
+
+	return eError;
+e0:
+	if (psCacheOpWorkItem)
+	{
+		/* Need to ensure we leave this CacheOp QItem in the proper recycled state */
+		CacheOpQItemRecycle(psCacheOpWorkItem);
+		OSLockRelease(gsCwq.hDeferredLock);
+	}
+
+	if (bBatchHasTimeline)
+	{
+		PVRSRV_ERROR eError2;
+		eError2 = CacheOpBatchExecTimeline(uiTimeline, IMG_FALSE, uiCurrentFenceSeqNum, pui32NextFenceSeqNum);
+		eError = (eError2 == PVRSRV_ERROR_RETRY) ? eError2 : eError;
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR CacheOpExecKM (PPVRSRV_DEVICE_NODE psDevNode,
+							void *pvVirtStart,
+							void *pvVirtEnd,
+							IMG_CPU_PHYADDR sCPUPhysStart,
+							IMG_CPU_PHYADDR sCPUPhysEnd,
+							PVRSRV_CACHE_OP uiCacheOp)
+{
+	PVRSRV_ERROR eError = PVRSRV_ERROR_RETRY;
+#if	defined(CACHEOP_DEBUG)
+	IMG_BOOL bUsedGlobalFlush = IMG_FALSE;
+	CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+	sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64();
+#endif
+
+	if (gsCwq.bInit)
+	{
+		IMG_DEVMEM_SIZE_T uiSize = sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr;
+		if (uiSize >= (IMG_DEVMEM_SIZE_T)gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD])
+		{
+			eError = CacheOpGlobalFlush();
+		}
+	}
+
+	if (eError == PVRSRV_OK)
+	{
+#if	defined(CACHEOP_DEBUG)
+		bUsedGlobalFlush = IMG_TRUE;
+#endif
+	}
+	else
+	{
+		switch (uiCacheOp)
+		{
+			case PVRSRV_CACHE_OP_CLEAN:
+				OSCPUCacheCleanRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd);
+				break;
+			case PVRSRV_CACHE_OP_INVALIDATE:
+				OSCPUCacheInvalidateRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd);
+				break;
+			case PVRSRV_CACHE_OP_FLUSH:
+				OSCPUCacheFlushRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd);
+				break;
+			default:
+				PVR_DPF((PVR_DBG_ERROR,	"%s: Invalid cache operation type %d", __FUNCTION__, uiCacheOp));
+				break;
+		}
+		eError = PVRSRV_OK;
+	}
+
+#if	defined(CACHEOP_DEBUG)
+	if (! CacheOpConfigSupports(CACHEOP_CONFIG_KLOG))
+	{
+		if (bUsedGlobalFlush)
+		{
+			/* Undo the accounting for server GF done in CacheOpGlobalFlush() */
+			gsCwq.ui32ServerGF -= 1;
+		}
+	}
+	else
+	{
+		gsCwq.ui32TotalExecOps += 1;
+		if (! bUsedGlobalFlush)
+		{
+			gsCwq.ui32ServerSync += 1;
+			gsCwq.ui32ServerRBF +=
+				((sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr) & ((IMG_DEVMEM_SIZE_T)~(gsCwq.uiLineSize - 1))) >> gsCwq.uiLineShift;
+		}
+		sCacheOpWorkItem.uiOffset = 0;
+		sCacheOpWorkItem.bKMReq = IMG_TRUE;
+		sCacheOpWorkItem.uiCacheOp = uiCacheOp;
+		sCacheOpWorkItem.bRBF = !bUsedGlobalFlush;
+		/* Use information page PMR for logging KM request */
+		sCacheOpWorkItem.psPMR = gsCwq.psInfoPagePMR;
+		sCacheOpWorkItem.ui64ExecuteTime = OSClockns64();
+		sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+		sCacheOpWorkItem.ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+		sCacheOpWorkItem.uiSize = (sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr);
+		CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+	}
+#endif
+
+	return eError;
+}
+
+PVRSRV_ERROR CacheOpExec(PMR *psPMR,
+						 IMG_UINT64 uiAddress,
+						 IMG_DEVMEM_OFFSET_T uiOffset,
+						 IMG_DEVMEM_SIZE_T uiSize,
+						 PVRSRV_CACHE_OP uiCacheOp)
+{
+	PVRSRV_ERROR eError;
+	IMG_CPU_VIRTADDR pvAddress = (IMG_CPU_VIRTADDR)(uintptr_t)uiAddress;
+	IMG_BOOL bUseGlobalFlush = uiSize >= gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD];
+#if	defined(CACHEOP_DEBUG)
+	CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+	gsCwq.ui32TotalExecOps += 1;
+	gsCwq.ui32ServerSync += 1;
+	sCacheOpWorkItem.psPMR = psPMR;
+	sCacheOpWorkItem.uiSize = uiSize;
+	sCacheOpWorkItem.uiOffset = uiOffset;
+	sCacheOpWorkItem.uiCacheOp = uiCacheOp;
+	sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+	sCacheOpWorkItem.ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+	sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64();
+#endif
+
+	eError = CacheOpPMRExec(psPMR,
+							pvAddress,
+							uiOffset,
+							uiSize,
+							uiCacheOp,
+							gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0],
+							IMG_FALSE,
+							&bUseGlobalFlush);
+	PVR_LOGG_IF_ERROR(eError, "CacheOpPMRExec", e0);
+
+#if	defined(CACHEOP_DEBUG)
+	sCacheOpWorkItem.bRBF = !bUseGlobalFlush;
+	sCacheOpWorkItem.ui64ExecuteTime = OSClockns64();
+	CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+#endif
+
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR CacheOpQueue (IMG_UINT32 ui32NumCacheOps,
+						   PMR **ppsPMR,
+						   IMG_UINT64 *puiAddress,
+						   IMG_DEVMEM_OFFSET_T *puiOffset,
+						   IMG_DEVMEM_SIZE_T *puiSize,
+						   PVRSRV_CACHE_OP *puiCacheOp,
+						   IMG_UINT32 ui32OpTimeline,
+						   IMG_UINT32 ui32ClientGFSeqNum,
+						   IMG_UINT32 uiCurrentFenceSeqNum,
+						   IMG_UINT32 *pui32NextFenceSeqNum)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_TIMELINE uiTimeline = (PVRSRV_TIMELINE)ui32OpTimeline;
+	IMG_CPU_VIRTADDR *pvAddress = (IMG_CPU_VIRTADDR*)(uintptr_t)puiAddress;
+#if !defined(CACHEFLUSH_ISA_SUPPORTS_GLOBAL_FLUSH)
+	PVR_ASSERT(ui32ClientGFSeqNum == 0);
+#endif
+#if defined(CACHEOP_DEBUG)
+	gsCwq.ui32TotalExecOps += ui32NumCacheOps;
+#endif
+
+	if (! gsCwq.bInit)
+	{
+		PVR_LOG(("CacheOp framework not initialised, failing request"));
+		return PVRSRV_ERROR_NOT_INITIALISED;
+	}
+	else if (! ui32NumCacheOps)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	/* Ensure any single timeline CacheOp request is processed immediately */
+	else if (ui32NumCacheOps == 1 && puiCacheOp[0] == PVRSRV_CACHE_OP_TIMELINE)
+	{
+		eError = CacheOpBatchExecTimeline(uiTimeline, IMG_TRUE, uiCurrentFenceSeqNum, pui32NextFenceSeqNum);
+	}
+	/* Services client explicitly requested a GF or config is GF only (i.e. no KRBF support), this takes priority */
+	else if (CacheOpConfigSupports(CACHEOP_CONFIG_KGF) &&
+			 ((puiCacheOp[ui32NumCacheOps-1] & PVRSRV_CACHE_OP_GLOBAL) || !CacheOpConfigSupports(CACHEOP_CONFIG_KRBF)))
+	{
+		eError =
+			CacheOpBatchExecGlobal(ppsPMR,
+								   pvAddress,
+								   puiOffset,
+								   puiSize,
+								   puiCacheOp,
+								   ui32NumCacheOps,
+								   uiTimeline,
+								   ui32ClientGFSeqNum,
+								   uiCurrentFenceSeqNum,
+								   pui32NextFenceSeqNum);
+	}
+	/* This is the default entry for all client requests */
+	else
+	{
+		if (!(gsCwq.eConfig & (CACHEOP_CONFIG_LAST-1)))
+		{
+			/* default the configuration before execution */
+			CacheOpConfigUpdate(CACHEOP_CONFIG_DEFAULT);
+		}
+
+		eError =
+			CacheOpBatchExecRangeBased(ppsPMR,
+									   pvAddress,
+									   puiOffset,
+									   puiSize,
+									   puiCacheOp,
+									   ui32NumCacheOps,
+									   uiTimeline,
+									   ui32ClientGFSeqNum,
+									   uiCurrentFenceSeqNum,
+									   pui32NextFenceSeqNum);
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR CacheOpFence (RGXFWIF_DM eFenceOpType, IMG_UINT32 ui32FenceOpSeqNum)
+{
+	IMG_HANDLE hOSEvent;
+	PVRSRV_ERROR eError2;
+	IMG_UINT32 ui32RetryAbort;
+	IMG_UINT32 ui32CompletedOpSeqNum;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+#if defined(CACHEOP_DEBUG)
+	IMG_UINT64 uiTimeNow;
+	CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+	sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+	sCacheOpWorkItem.ui32OpSeqNum = ui32FenceOpSeqNum;
+	sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64();
+	uiTimeNow = sCacheOpWorkItem.ui64EnqueuedTime;
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+	sCacheOpWorkItem.eFenceOpType = eFenceOpType;
+#endif
+	sCacheOpWorkItem.uiSize = (uintptr_t) OSAtomicRead(&gsCwq.hCompletedSeqNum);
+	sCacheOpWorkItem.uiOffset = (uintptr_t) gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+#endif
+	PVR_UNREFERENCED_PARAMETER(eFenceOpType);
+
+	/* CacheOp(s) this thread is fencing for has already been satisfied by an
+	   else-when GF. Another way of looking at this, if last else-when GF is
+	   logically behind or momentarily disabled (zero) then we have to flush
+	   the cache */
+	if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > ui32FenceOpSeqNum)
+	{
+#if defined(CACHEOP_DEBUG)
+		sCacheOpWorkItem.uiOffset = (uintptr_t) gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+#endif
+		goto e0;
+	}
+
+	/* If initial fence check fails, then wait-and-retry in loop */
+	ui32CompletedOpSeqNum = OSAtomicRead(&gsCwq.hCompletedSeqNum);
+	if (CacheOpFenceCheck(ui32CompletedOpSeqNum, ui32FenceOpSeqNum))
+	{
+#if defined(CACHEOP_DEBUG)
+		sCacheOpWorkItem.uiSize = (uintptr_t) ui32CompletedOpSeqNum;
+#endif
+		goto e0;
+	}
+
+	/* Open CacheOp update event object, if event open fails return error */
+	eError2 = OSEventObjectOpen(gsCwq.hClientWakeUpEvtObj, &hOSEvent);
+	PVR_LOGG_IF_ERROR(eError2, "OSEventObjectOpen", e0);
+
+	/* Linear (i.e. use exponential?) back-off, upper bounds user wait */
+	for (ui32RetryAbort = gsCwq.ui32FenceRetryAbort; ;--ui32RetryAbort)
+	{
+		/* (Re)read completed CacheOp sequence number before waiting */
+		ui32CompletedOpSeqNum = OSAtomicRead(&gsCwq.hCompletedSeqNum);
+		if (CacheOpFenceCheck(ui32CompletedOpSeqNum, ui32FenceOpSeqNum))
+		{
+#if defined(CACHEOP_DEBUG)
+			sCacheOpWorkItem.uiSize = (uintptr_t) ui32CompletedOpSeqNum;
+#endif
+			break;
+		}
+
+		/*
+		   For cache ISA with GF support, the wait(ms) must be set to be around
+		   25% GF overhead and as such there is no point waiting longer, we just
+		   perform a GF as it means the CacheOp thread is really lagging behind.
+		   Lastly, we cannot (or should not) hang the client thread indefinitely
+		   so after a certain duration, we just give up. What this duration is,
+		   is hard to state but for now we set it to be 1 seconds, which is the
+		   product of CACHEOP_FENCE_[WAIT_TIMEOUT * RETRY_ABORT]. We ask the
+		   client to retry the operation by exiting with PVRSRV_ERROR_RETRY.
+		*/
+		(void) OSEventObjectWaitTimeout(hOSEvent, gsCwq.ui32FenceWaitTimeUs);
+		if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > ui32FenceOpSeqNum)
+		{
+#if defined(CACHEOP_DEBUG)
+			sCacheOpWorkItem.uiOffset = (uintptr_t) gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+			uiTimeNow = OSClockns64();
+#endif
+			break;
+		}
+		else if (CacheOpConfigSupports(CACHEOP_CONFIG_KGF))
+		{
+			eError2 = CacheOpGlobalFlush();
+			PVR_LOG_IF_ERROR(eError2, "CacheOpGlobalFlush");
+#if defined(CACHEOP_DEBUG)
+			sCacheOpWorkItem.uiCacheOp = PVRSRV_CACHE_OP_GLOBAL;
+			sCacheOpWorkItem.uiOffset = (uintptr_t) gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+			uiTimeNow = OSClockns64();
+#endif
+			break;
+		}
+		else if (! ui32RetryAbort)
+		{
+#if defined(CACHEOP_DEBUG)
+			sCacheOpWorkItem.uiSize = (uintptr_t) OSAtomicRead(&gsCwq.hCompletedSeqNum);
+			sCacheOpWorkItem.uiOffset = (uintptr_t) gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+			uiTimeNow = OSClockns64();
+#endif
+			PVR_LOG(("CacheOpFence() event: "CACHEOP_ABORT_FENCE_ERROR_STRING));
+			eError = PVRSRV_ERROR_RETRY;
+			break;
+		}
+		else
+		{
+#if defined(CACHEOP_DEBUG)
+			uiTimeNow = OSClockns64();
+#endif
+		}
+	}
+
+	eError2 = OSEventObjectClose(hOSEvent);
+	PVR_LOG_IF_ERROR(eError2, "OSEventObjectOpen");
+
+e0:
+#if defined(CACHEOP_DEBUG)
+	sCacheOpWorkItem.ui64ExecuteTime = uiTimeNow;
+	if (ui32FenceOpSeqNum)
+	{
+		/* Only fence(s) pending on CacheOp(s) contribute towards statistics,
+		   here we calculate the rolling approximate average waiting time
+		   for these fence(s) */
+		IMG_UINT32 ui64EnqueuedTime = sCacheOpWorkItem.ui64EnqueuedTime;
+		IMG_UINT32 ui64ExecuteTime = sCacheOpWorkItem.ui64ExecuteTime;
+		IMG_UINT32 ui32Time = ui64EnqueuedTime < ui64ExecuteTime ?
+									ui64ExecuteTime - ui64EnqueuedTime :
+									ui64EnqueuedTime - ui64ExecuteTime;
+		ui32Time = DivBy10(DivBy10(DivBy10(ui32Time)));
+		gsCwq.ui32TotalFenceOps += 1;
+		if (gsCwq.ui32TotalFenceOps > 2)
+		{
+			gsCwq.ui32AvgFenceTime -= (gsCwq.ui32AvgFenceTime / gsCwq.ui32TotalFenceOps);
+			gsCwq.ui32AvgFenceTime += (ui32Time / gsCwq.ui32TotalFenceOps);
+		}
+		else if (ui32Time)
+		{
+			gsCwq.ui32AvgFenceTime = (IMG_UINT32)ui32Time;
+		}
+	}
+	CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+#endif
+
+	return eError;
+}
+
+PVRSRV_ERROR CacheOpLog (PMR *psPMR,
+						 IMG_UINT64 puiAddress,
+						 IMG_DEVMEM_OFFSET_T uiOffset,
+						 IMG_DEVMEM_SIZE_T uiSize,
+						 IMG_UINT64 ui64EnqueuedTimeUs,
+						 IMG_UINT64 ui64ExecuteTimeUs,
+						 IMG_UINT32 ui32NumRBF,
+						 IMG_BOOL bIsDiscard,
+						 PVRSRV_CACHE_OP uiCacheOp)
+{
+#if defined(CACHEOP_DEBUG)
+	CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+	PVR_UNREFERENCED_PARAMETER(puiAddress);
+
+	sCacheOpWorkItem.psPMR = psPMR;
+	sCacheOpWorkItem.uiSize = uiSize;
+	sCacheOpWorkItem.uiOffset = uiOffset;
+	sCacheOpWorkItem.uiCacheOp = uiCacheOp;
+	sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+	sCacheOpWorkItem.ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+
+	sCacheOpWorkItem.ui64EnqueuedTime = ui64EnqueuedTimeUs;
+	sCacheOpWorkItem.ui64ExecuteTime = ui64ExecuteTimeUs;
+	sCacheOpWorkItem.bUMF = IMG_TRUE;
+	sCacheOpWorkItem.bRBF = bIsDiscard ? IMG_FALSE : IMG_TRUE;
+	gsCwq.ui32UMDiscards += bIsDiscard ? 1 : 0;
+	gsCwq.ui32ClientRBF += bIsDiscard ? 0 : ui32NumRBF;
+	gsCwq.ui32ClientSync += 1;
+	gsCwq.ui32TotalExecOps += 1;
+
+	CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+#else
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(uiCacheOp);
+	PVR_UNREFERENCED_PARAMETER(ui32NumRBF);
+	PVR_UNREFERENCED_PARAMETER(puiAddress);
+	PVR_UNREFERENCED_PARAMETER(ui64ExecuteTimeUs);
+	PVR_UNREFERENCED_PARAMETER(ui64EnqueuedTimeUs);
+#endif
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR CacheOpInit2 (void)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	/* Create an event object for pending CacheOp work items */
+	eError = OSEventObjectCreate("PVRSRV_CACHEOP_EVENTOBJECT", &gsCwq.hThreadWakeUpEvtObj);
+	PVR_LOGG_IF_ERROR(eError, "OSEventObjectCreate", e0);
+
+	/* Create an event object for updating pending fence checks on CacheOp */
+	eError = OSEventObjectCreate("PVRSRV_CACHEOP_EVENTOBJECT", &gsCwq.hClientWakeUpEvtObj);
+	PVR_LOGG_IF_ERROR(eError, "OSEventObjectCreate", e0);
+
+	/* Appending work-items is not concurrent, lock protects against this */
+	eError = OSLockCreate((POS_LOCK*)&gsCwq.hDeferredLock, LOCK_TYPE_PASSIVE);
+	PVR_LOGG_IF_ERROR(eError, "OSLockCreate", e0);
+
+	/* Apphint read/write is not concurrent, so lock protects against this */
+	eError = OSLockCreate((POS_LOCK*)&gsCwq.hConfigLock, LOCK_TYPE_PASSIVE);
+	PVR_LOGG_IF_ERROR(eError, "OSLockCreate", e0);
+
+	/* Determine CPU cache ISA maintenance mechanism available, GF and UMF */
+	gsCwq.bNoGlobalFlushImpl = (IMG_BOOL)OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+	if (! gsCwq.bNoGlobalFlushImpl)
+	{
+		IMG_UINT64 uiIdx;
+		IMG_UINT64 uiTime = 0;
+		IMG_UINT64 uiTimeAfter;
+		IMG_UINT64 uiTimeBefore;
+
+		for (uiIdx = 0; uiIdx < 4; uiIdx++)
+		{
+			/* Take average of four GF */
+			uiTimeBefore = OSClockns64();
+			(void) OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+			uiTimeAfter = OSClockns64();
+
+			uiTimeBefore = DivBy10(DivBy10(DivBy10(uiTimeBefore)));
+			uiTimeAfter = DivBy10(DivBy10(DivBy10(uiTimeAfter)));
+			uiTime += uiTimeBefore < uiTimeAfter ?
+								uiTimeAfter  - uiTimeBefore :
+								uiTimeBefore - uiTimeAfter;
+		}
+
+		gsCwq.ui32FenceWaitTimeUs = (IMG_UINT32)(uiTime >> 2);
+		gsCwq.ui32FenceRetryAbort = ~0;
+	}
+	else
+	{
+		gsCwq.ui32FenceWaitTimeUs = CACHEOP_FENCE_WAIT_TIMEOUT;
+		gsCwq.ui32FenceRetryAbort = CACHEOP_FENCE_RETRY_ABORT;
+	}
+#if defined(CACHEFLUSH_ISA_SUPPORTS_UM_FLUSH)
+	gsCwq.bSupportsUMFlush = IMG_TRUE;
+#else
+	gsCwq.bSupportsUMFlush = IMG_FALSE;
+#endif
+
+	gsCwq.psInfoPageMemDesc = psPVRSRVData->psInfoPageMemDesc;
+	gsCwq.pui32InfoPage = psPVRSRVData->pui32InfoPage;
+	gsCwq.psInfoPagePMR = psPVRSRVData->psInfoPagePMR;
+
+	/* Normally, platforms should use their default configurations, put exceptions here */
+#if defined(__i386__) || defined(__x86_64__)
+#if !defined(TC_MEMORY_CONFIG)
+	CacheOpConfigUpdate(CACHEOP_CONFIG_URBF | CACHEOP_CONFIG_KGF | CACHEOP_CONFIG_KDF);
+#else
+	CacheOpConfigUpdate(CACHEOP_CONFIG_KGF | CACHEOP_CONFIG_KDF);
+#endif
+#else /* defined(__x86__) */
+	CacheOpConfigUpdate(CACHEOP_CONFIG_DEFAULT);
+#endif
+
+	/* Initialise the remaining occupants of the CacheOp information page */
+	gsCwq.pui32InfoPage[CACHEOP_INFO_PGSIZE]    = (IMG_UINT32)gsCwq.uiPageSize;
+	gsCwq.pui32InfoPage[CACHEOP_INFO_LINESIZE]  = (IMG_UINT32)gsCwq.uiLineSize;
+	gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] = (IMG_UINT32)0;
+	gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM1] = (IMG_UINT32)0;
+
+	/* Set before spawning thread */
+	gsCwq.bInit = IMG_TRUE;
+
+	/* Create a thread which is used to execute the deferred CacheOp(s),
+	   these are CacheOp(s) executed by the server on behalf of clients
+	   asynchronously. All clients synchronise with the server before
+	   submitting any HW operation (i.e. device kicks) to ensure that
+	   client device work-load memory is coherent */
+	eError = OSThreadCreatePriority(&gsCwq.hWorkerThread,
+									"pvr_cacheop",
+									CacheOpThread,
+									psPVRSRVData,
+									OS_THREAD_HIGHEST_PRIORITY);
+	PVR_LOGG_IF_ERROR(eError, "OSThreadCreatePriority", e0);
+
+	/* Writing the unsigned integer binary encoding of CACHEOP_CONFIG
+	   into this file cycles through avail. configuration(s) */
+	gsCwq.pvConfigTune = OSCreateStatisticEntry("cacheop_config",
+											NULL,
+											CacheOpConfigRead,
+											NULL,
+											NULL,
+											NULL);
+	PVR_LOGG_IF_FALSE(gsCwq.pvConfigTune, "OSCreateStatisticEntry", e0);
+
+	/* Register the CacheOp framework (re)configuration handlers */
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_CacheOpConfig,
+										CacheOpConfigQuery,
+										CacheOpConfigSet,
+										APPHINT_OF_DRIVER_NO_DEVICE,
+										NULL);
+
+	return PVRSRV_OK;
+e0:
+	CacheOpDeInit2();
+	return eError;
+}
+
+PVRSRV_ERROR CacheOpAcquireInfoPage(PMR **ppsPMR)
+{
+    PVRSRV_DATA *psData = PVRSRVGetPVRSRVData();
+
+    PVR_LOGR_IF_FALSE(psData->psInfoPageMemDesc != NULL, "invalid MEMDESC"
+                      " handle", PVRSRV_ERROR_INVALID_PARAMS);
+    PVR_LOGR_IF_FALSE(psData->psInfoPagePMR != NULL, "invalid PMR handle",
+                      PVRSRV_ERROR_INVALID_PARAMS);
+
+    /* Copy the PMR import handle back */
+    *ppsPMR = psData->psInfoPagePMR;
+
+    return PVRSRV_OK;
+}
+
+PVRSRV_ERROR CacheOpReleaseInfoPage(PMR *ppsPMR)
+{
+    /* Nothing to do here as PMR is singleton */
+    PVR_UNREFERENCED_PARAMETER(ppsPMR);
+    return PVRSRV_OK;
+}
+
+void CacheOpDeInit2 (void)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	gsCwq.bInit = IMG_FALSE;
+
+	if (gsCwq.hThreadWakeUpEvtObj)
+	{
+		eError = OSEventObjectSignal(gsCwq.hThreadWakeUpEvtObj);
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+	}
+
+	if (gsCwq.hClientWakeUpEvtObj)
+	{
+		eError = OSEventObjectSignal(gsCwq.hClientWakeUpEvtObj);
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+	}
+
+	if (gsCwq.hWorkerThread)
+	{
+		LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US)
+		{
+			eError = OSThreadDestroy(gsCwq.hWorkerThread);
+			if (PVRSRV_OK == eError)
+			{
+				gsCwq.hWorkerThread = NULL;
+				break;
+			}
+			OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+		PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+		gsCwq.hWorkerThread = NULL;
+	}
+
+	if (gsCwq.hClientWakeUpEvtObj)
+	{
+		eError = OSEventObjectDestroy(gsCwq.hClientWakeUpEvtObj);
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+		gsCwq.hClientWakeUpEvtObj = NULL;
+	}
+
+	if (gsCwq.hThreadWakeUpEvtObj)
+	{
+		eError = OSEventObjectDestroy(gsCwq.hThreadWakeUpEvtObj);
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+		gsCwq.hThreadWakeUpEvtObj = NULL;
+	}
+
+	if (gsCwq.hConfigLock)
+	{
+		eError = OSLockDestroy(gsCwq.hConfigLock);
+		PVR_LOG_IF_ERROR(eError, "OSLockDestroy");
+		gsCwq.hConfigLock = NULL;
+	}
+
+	if (gsCwq.hDeferredLock)
+	{
+		eError = OSLockDestroy(gsCwq.hDeferredLock);
+		PVR_LOG_IF_ERROR(eError, "OSLockDestroy");
+		gsCwq.hDeferredLock = NULL;
+	}
+
+	if (gsCwq.pvConfigTune)
+	{
+		OSRemoveStatisticEntry(gsCwq.pvConfigTune);
+		gsCwq.pvConfigTune = NULL;
+	}
+
+	gsCwq.psInfoPageMemDesc = NULL;
+	gsCwq.pui32InfoPage = NULL;
+	gsCwq.psInfoPagePMR = NULL;
+}
+
+PVRSRV_ERROR CacheOpInit (void)
+{
+	IMG_UINT32 idx;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* DDK initialisation is anticipated to be performed on the boot
+	   processor (little core in big/little systems) though this may
+	   not always be the case. If so, the value cached here is the
+	   system wide safe (i.e. smallest) L1 d-cache line size value
+	   on any/such platforms with mismatched d-cache line sizes */
+	gsCwq.uiPageSize = OSGetPageSize();
+	gsCwq.uiPageShift = OSGetPageShift();
+	gsCwq.uiLineSize = OSCPUCacheAttributeSize(PVR_DCACHE_LINE_SIZE);
+	gsCwq.uiLineShift = ExactLog2(gsCwq.uiLineSize);
+	PVR_LOGR_IF_FALSE((gsCwq.uiLineSize && gsCwq.uiPageSize && gsCwq.uiPageShift), "", PVRSRV_ERROR_INIT_FAILURE);
+	gsCwq.uiCacheOpAddrType = OSCPUCacheOpAddressType();
+
+	/* More information regarding these atomic counters can be found
+	   in the CACHEOP_WORK_QUEUE type definition at top of file  */
+	OSAtomicWrite(&gsCwq.hCompletedSeqNum, 0);
+	OSAtomicWrite(&gsCwq.hCommonSeqNum, 0);
+	OSAtomicWrite(&gsCwq.hDeferredSize, 0);
+	OSAtomicWrite(&gsCwq.hWriteCounter, 0);
+	OSAtomicWrite(&gsCwq.hReadCounter, 0);
+
+	for (idx = 0; idx < CACHEOP_INDICES_MAX; idx++)
+	{
+		gsCwq.asWorkItems[idx].iTimeline = PVRSRV_NO_UPDATE_TIMELINE_REQUIRED;
+		gsCwq.asWorkItems[idx].psPMR = (void *)(uintptr_t)~0;
+		gsCwq.asWorkItems[idx].ui32OpSeqNum = (IMG_UINT32)~0;
+		gsCwq.asWorkItems[idx].ui32GFSeqNum = (IMG_UINT32)~0;
+	}
+
+	/* Lock prevents multiple threads from issuing surplus to requirement GF */
+	eError = OSLockCreate((POS_LOCK*)&gsCwq.hGlobalFlushLock, LOCK_TYPE_PASSIVE);
+	PVR_LOGG_IF_ERROR(eError, "OSLockCreate", e0);
+
+#if defined(CACHEOP_DEBUG)
+	/* debugfs file read-out is not concurrent, so lock protects against this */
+	eError = OSLockCreate((POS_LOCK*)&gsCwq.hStatsExecLock, LOCK_TYPE_PASSIVE);
+	PVR_LOGG_IF_ERROR(eError, "OSLockCreate", e0);
+
+	gsCwq.i32StatsExecWriteIdx = 0;
+	OSCachedMemSet(gsCwq.asStatsExecuted, 0, sizeof(gsCwq.asStatsExecuted));
+
+	/* File captures the most recent subset of CacheOp(s) executed */
+	gsCwq.pvStatsEntry = OSCreateStatisticEntry("cacheop_history",
+												NULL,
+												CacheOpStatsExecLogRead,
+												NULL,
+												NULL,
+												NULL);
+	PVR_LOGG_IF_ERROR(eError, "OSCreateStatisticEntry", e0);
+#endif
+
+e0:
+	return eError;
+}
+
+void CacheOpDeInit (void)
+{
+#if defined(CACHEOP_DEBUG)
+	if (gsCwq.hStatsExecLock)
+	{
+		(void) OSLockDestroy(gsCwq.hStatsExecLock);
+		gsCwq.hStatsExecLock = NULL;
+	}
+
+	if (gsCwq.pvStatsEntry)
+	{
+		OSRemoveStatisticEntry(gsCwq.pvStatsEntry);
+		gsCwq.pvStatsEntry = NULL;
+	}
+#endif
+	if (gsCwq.hGlobalFlushLock)
+	{
+		(void) OSLockDestroy(gsCwq.hGlobalFlushLock);
+		gsCwq.hGlobalFlushLock = NULL;
+	}
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/connection_server.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/connection_server.c
new file mode 100644
index 0000000..4eaff90
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/connection_server.c
@@ -0,0 +1,415 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server side connection management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Handles connections coming from the client and the management
+                connection based information
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "handle.h"
+#include "pvrsrv.h"
+#include "connection_server.h"
+#include "osconnection_server.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "sync_server.h"
+#include "process_stats.h"
+#include "pdump_km.h"
+#include "lists.h"
+#include "osfunc.h"
+#include "tlstream.h"
+
+/* PID associated with Connection currently being purged by Cleanup thread */
+static IMG_PID gCurrentPurgeConnectionPid = 0;
+
+static PVRSRV_ERROR ConnectionDataDestroy(CONNECTION_DATA *psConnection)
+{
+	PVRSRV_ERROR eError;
+	PROCESS_HANDLE_BASE *psProcessHandleBase;
+	IMG_UINT64 ui64MaxBridgeTime;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	if(psPVRSRVData->bUnload)
+	{
+		/* driver is unloading so do not allow the bridge lock to be released */
+		ui64MaxBridgeTime = 0;
+	}
+	else
+	{
+		ui64MaxBridgeTime = CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS;
+	}
+
+	if (psConnection == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "ConnectionDestroy: Missing connection!"));
+		PVR_ASSERT(0);
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Close the process statistics */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+	if (psConnection->hProcessStats != NULL)
+	{
+		PVRSRVStatsDeregisterProcess(psConnection->hProcessStats);
+		psConnection->hProcessStats = NULL;
+	}
+#endif
+
+	/* Close HWPerfClient stream here even though we created it in
+	 * PVRSRVConnectKM(). */
+	if (psConnection->hClientTLStream)
+	{
+		TLStreamClose(psConnection->hClientTLStream);
+		psConnection->hClientTLStream = NULL;
+		PVR_DPF((PVR_DBG_MESSAGE, "Destroyed private stream."));
+	}
+
+	/* Get process handle base to decrement the refcount */
+	psProcessHandleBase = psConnection->psProcessHandleBase;
+
+	if (psProcessHandleBase != NULL)
+	{
+		/* acquire the lock now to ensure unref and removal from the
+		 * hash table is atomic.
+		 * if the refcount becomes zero then the lock needs to be held
+		 * until the entry is removed from the hash table.
+		 */
+		OSLockAcquire(psPVRSRVData->hProcessHandleBase_Lock);
+
+		/* In case the refcount becomes 0 we can remove the process handle base */
+		if (OSAtomicDecrement(&psProcessHandleBase->iRefCount) == 0)
+		{
+			uintptr_t uiHashValue;
+
+			uiHashValue = HASH_Remove(psPVRSRVData->psProcessHandleBase_Table, psConnection->pid);
+			OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock);
+
+			if (!uiHashValue)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Failed to remove handle base from hash table.",
+						__func__));
+				return PVRSRV_ERROR_UNABLE_TO_REMOVE_HASH_VALUE;
+			}
+
+			eError = PVRSRVFreeHandleBase(psProcessHandleBase->psHandleBase, ui64MaxBridgeTime);
+			if (eError != PVRSRV_OK)
+			{
+				if (eError != PVRSRV_ERROR_RETRY)
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+						 "ConnectionDataDestroy: Couldn't free handle base for process (%d)",
+						 eError));
+				}
+
+				return eError;
+			}
+
+			OSFreeMem(psProcessHandleBase);
+		}
+		else
+		{
+			OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock);
+		}
+
+		psConnection->psProcessHandleBase = NULL;
+	}
+
+	/* Free handle base for this connection */
+	if (psConnection->psHandleBase != NULL)
+	{
+		eError = PVRSRVFreeHandleBase(psConnection->psHandleBase, ui64MaxBridgeTime);
+		if (eError != PVRSRV_OK)
+		{
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+					 "ConnectionDataDestroy: Couldn't free handle base for connection (%d)",
+					 eError));
+			}
+
+			return eError;
+		}
+
+		psConnection->psHandleBase = NULL;
+	}
+
+	if (psConnection->psSyncConnectionData != NULL)
+	{
+		SyncUnregisterConnection(psConnection->psSyncConnectionData);
+		psConnection->psSyncConnectionData = NULL;
+	}
+
+	if (psConnection->psPDumpConnectionData != NULL)
+	{
+		PDumpUnregisterConnection(psConnection->psPDumpConnectionData);
+		psConnection->psPDumpConnectionData = NULL;
+	}
+
+	/* Call environment specific connection data deinit function */
+	if (psConnection->hOsPrivateData != NULL)
+	{
+		eError = OSConnectionPrivateDataDeInit(psConnection->hOsPrivateData);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+				 "PVRSRVConnectionDataDestroy: OSConnectionPrivateDataDeInit failed (%d)",
+				 eError));
+
+			return eError;
+		}
+
+		psConnection->hOsPrivateData = NULL;
+	}
+
+	OSFreeMem(psConnection);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVConnectionConnect(void **ppvPrivData, void *pvOSData)
+{
+	CONNECTION_DATA *psConnection;
+	PVRSRV_ERROR eError;
+	PROCESS_HANDLE_BASE *psProcessHandleBase;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	/* Allocate connection data area */
+	psConnection = OSAllocZMem(sizeof(*psConnection));
+	if (psConnection == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVConnectionConnect: Couldn't allocate connection data"));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* Call environment specific connection data init function */
+	eError = OSConnectionPrivateDataInit(&psConnection->hOsPrivateData, pvOSData);
+	if (eError != PVRSRV_OK)
+	{
+		 PVR_DPF((PVR_DBG_ERROR,
+			  "PVRSRVConnectionConnect: OSConnectionPrivateDataInit failed (%d)",
+			  eError));
+		goto failure;
+	}
+
+	psConnection->pid = OSGetCurrentClientProcessIDKM();
+
+	/* Register this connection with the sync core */
+	eError = SyncRegisterConnection(&psConnection->psSyncConnectionData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVConnectionConnect: Couldn't register the sync data"));
+		goto failure;
+	}
+
+	/*
+	 * Register this connection with the pdump core. Pass in the sync connection data
+	 * as it will be needed later when we only get passed in the PDump connection data.
+	 */
+	eError = PDumpRegisterConnection(psConnection->psSyncConnectionData,
+					 &psConnection->psPDumpConnectionData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVConnectionConnect: Couldn't register the PDump data"));
+		goto failure;
+	}
+
+	/* Allocate handle base for this connection */
+	eError = PVRSRVAllocHandleBase(&psConnection->psHandleBase,
+	                               PVRSRV_HANDLE_BASE_TYPE_CONNECTION);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVConnectionConnect: Couldn't allocate handle base for connection (%d)",
+			 eError));
+		goto failure;
+	}
+
+	/* Try to get process handle base if it already exists */
+	OSLockAcquire(psPVRSRVData->hProcessHandleBase_Lock);
+	psProcessHandleBase = (PROCESS_HANDLE_BASE*) HASH_Retrieve(PVRSRVGetPVRSRVData()->psProcessHandleBase_Table,
+	                                                           psConnection->pid);
+
+	/* In case there is none we are going to allocate one */
+	if (psProcessHandleBase == NULL)
+	{
+		psProcessHandleBase = OSAllocZMem(sizeof(PROCESS_HANDLE_BASE));
+		if (psProcessHandleBase == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to allocate handle base, oom.",
+					__func__));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto failureLock;
+		}
+
+		/* Allocate handle base for this process */
+		eError = PVRSRVAllocHandleBase(&psProcessHandleBase->psHandleBase,
+		                               PVRSRV_HANDLE_BASE_TYPE_PROCESS);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: Couldn't allocate handle base for process (%d)",
+			         __func__,
+			         eError));
+			OSFreeMem(psProcessHandleBase);
+			goto failureLock;
+		}
+
+		/* Insert the handle base into the global hash table */
+		if (!HASH_Insert(PVRSRVGetPVRSRVData()->psProcessHandleBase_Table,
+		                 psConnection->pid,
+		                 (uintptr_t) psProcessHandleBase))
+		{
+
+			eError = PVRSRV_ERROR_UNABLE_TO_INSERT_HASH_VALUE;
+
+			PVRSRVFreeHandleBase(psProcessHandleBase->psHandleBase, 0);
+
+			OSFreeMem(psProcessHandleBase);
+			goto failureLock;
+		}
+	}
+	OSAtomicIncrement(&psProcessHandleBase->iRefCount);
+
+	OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock);
+
+	psConnection->psProcessHandleBase = psProcessHandleBase;
+
+
+	/* Allocate process statistics */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+	eError = PVRSRVStatsRegisterProcess(&psConnection->hProcessStats);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVConnectionConnect: Couldn't register process statistics (%d)",
+			 eError));
+		goto failure;
+	}
+#endif
+
+	*ppvPrivData = psConnection;
+
+	return eError;
+
+failureLock:
+	OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock);
+failure:
+	ConnectionDataDestroy(psConnection);
+
+	return eError;
+}
+
+static PVRSRV_ERROR _CleanupThreadPurgeConnectionData(void *pvConnectionData)
+{
+	PVRSRV_ERROR eErrorConnection, eErrorKernel;
+	CONNECTION_DATA *psConnectionData = pvConnectionData;
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSAcquireBridgeLock();
+#endif
+
+	gCurrentPurgeConnectionPid = psConnectionData->pid;
+
+	eErrorConnection = ConnectionDataDestroy(psConnectionData);
+	if (eErrorConnection != PVRSRV_OK)
+	{
+		if (eErrorConnection == PVRSRV_ERROR_RETRY)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE,
+				 "_CleanupThreadPurgeConnectionData: Failed to purge connection data %p "
+				 "(deferring destruction)",
+				 psConnectionData));
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE,
+			 "_CleanupThreadPurgeConnectionData: Connection data %p deferred destruction finished",
+			 psConnectionData));
+	}
+
+	/* Check if possible resize the global handle base */
+	eErrorKernel = PVRSRVPurgeHandles(KERNEL_HANDLE_BASE);
+	if (eErrorKernel != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "_CleanupThreadPurgeConnectionData: Purge of global handle pool failed (%d)",
+			 eErrorKernel));
+	}
+
+	gCurrentPurgeConnectionPid = 0;
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSReleaseBridgeLock();
+#endif
+
+	return eErrorConnection;
+}
+
+void PVRSRVConnectionDisconnect(void *pvDataPtr)
+{
+	CONNECTION_DATA *psConnectionData = pvDataPtr;
+
+	/* Notify the PDump core if the pdump control client is disconnecting */
+	if (psConnectionData->ui32ClientFlags & SRV_FLAGS_PDUMPCTRL)
+	{
+		PDumpDisconnectionNotify();
+	}
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK)
+#endif
+	{
+		/* Defer the release of the connection data */
+		psConnectionData->sCleanupThreadFn.pfnFree = _CleanupThreadPurgeConnectionData;
+		psConnectionData->sCleanupThreadFn.pvData = psConnectionData;
+		psConnectionData->sCleanupThreadFn.ui32RetryCount = CLEANUP_THREAD_RETRY_COUNT_DEFAULT;
+		psConnectionData->sCleanupThreadFn.bDependsOnHW = IMG_FALSE;
+		PVRSRVCleanupThreadAddWork(&psConnectionData->sCleanupThreadFn);
+	}
+}
+
+IMG_PID PVRSRVGetPurgeConnectionPid(void)
+{
+	return gCurrentPurgeConnectionPid;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/devicemem_heapcfg.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/devicemem_heapcfg.c
new file mode 100644
index 0000000..b584283
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/devicemem_heapcfg.c
@@ -0,0 +1,140 @@
+/*************************************************************************/ /*!
+@File           devicemem_heapcfg.c
+@Title          Temporary Device Memory 2 stuff
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device memory management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+/* our exported API */
+#include "devicemem_heapcfg.h"
+
+#include "device.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+
+#include "connection_server.h"
+
+PVRSRV_ERROR
+HeapCfgHeapConfigCount(CONNECTION_DATA * psConnection,
+    const PVRSRV_DEVICE_NODE *psDeviceNode,
+    IMG_UINT32 *puiNumHeapConfigsOut
+)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	
+    *puiNumHeapConfigsOut = psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs;
+
+    return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+HeapCfgHeapCount(CONNECTION_DATA * psConnection,
+    const PVRSRV_DEVICE_NODE *psDeviceNode,
+    IMG_UINT32 uiHeapConfigIndex,
+    IMG_UINT32 *puiNumHeapsOut
+)
+{
+    if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs)
+    {
+        return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX;
+    }
+
+    *puiNumHeapsOut = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps;
+
+    return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+HeapCfgHeapConfigName(CONNECTION_DATA * psConnection,
+    const PVRSRV_DEVICE_NODE *psDeviceNode,
+    IMG_UINT32 uiHeapConfigIndex,
+    IMG_UINT32 uiHeapConfigNameBufSz,
+    IMG_CHAR *pszHeapConfigNameOut
+)
+{
+    if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs)
+    {
+        return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX;
+    }
+
+    OSSNPrintf(pszHeapConfigNameOut, uiHeapConfigNameBufSz, "%s", psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].pszName);
+
+    return PVRSRV_OK;    
+}
+
+PVRSRV_ERROR
+HeapCfgHeapDetails(CONNECTION_DATA * psConnection,
+    const PVRSRV_DEVICE_NODE *psDeviceNode,
+    IMG_UINT32 uiHeapConfigIndex,
+    IMG_UINT32 uiHeapIndex,
+    IMG_UINT32 uiHeapNameBufSz,
+    IMG_CHAR *pszHeapNameOut,
+    IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+    IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+    IMG_UINT32 *puiLog2DataPageSizeOut,
+    IMG_UINT32 *puiLog2ImportAlignmentOut,
+    IMG_UINT32 *puiLog2TilingStrideFactorOut
+)
+{
+    DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint;
+
+    if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs)
+    {
+        return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX;
+    }
+
+    if (uiHeapIndex >= psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps)
+    {
+        return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX;
+    }
+
+    psHeapBlueprint = &psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].psHeapBlueprintArray[uiHeapIndex];
+
+    OSSNPrintf(pszHeapNameOut, uiHeapNameBufSz, "%s", psHeapBlueprint->pszName);
+    *psDevVAddrBaseOut = psHeapBlueprint->sHeapBaseAddr;
+    *puiHeapLengthOut = psHeapBlueprint->uiHeapLength;
+    *puiLog2DataPageSizeOut = psHeapBlueprint->uiLog2DataPageSize;
+    *puiLog2ImportAlignmentOut = psHeapBlueprint->uiLog2ImportAlignment;
+    *puiLog2TilingStrideFactorOut = psHeapBlueprint->uiLog2TilingStrideFactor;
+
+    return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/devicemem_history_server.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/devicemem_history_server.c
new file mode 100644
index 0000000..07283ef
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/devicemem_history_server.c
@@ -0,0 +1,1917 @@
+/*************************************************************************/ /*!
+@File
+@Title          Devicemem history functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Devicemem history functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "allocmem.h"
+#include "pmr.h"
+#include "pvrsrv.h"
+#include "pvrsrv_device.h"
+#include "pvr_debug.h"
+#include "devicemem_server.h"
+#include "lock.h"
+#include "devicemem_history_server.h"
+#include "pdump_km.h"
+
+#define ALLOCATION_LIST_NUM_ENTRIES 10000
+
+/* data type to hold an allocation index.
+ * we make it 16 bits wide if possible
+ */
+#if ALLOCATION_LIST_NUM_ENTRIES <= 0xFFFF
+typedef uint16_t ALLOC_INDEX_T;
+#else
+typedef uint32_t ALLOC_INDEX_T;
+#endif
+
+/* a record describing a single allocation known to DeviceMemHistory.
+ * this is an element in a doubly linked list of allocations
+ */
+typedef struct _RECORD_ALLOCATION_
+{
+	/* time when this RECORD_ALLOCATION was created/initialised */
+	IMG_UINT64 ui64CreationTime;
+	/* serial number of the PMR relating to this allocation */
+	IMG_UINT64 ui64Serial;
+	/* base DevVAddr of this allocation */
+	IMG_DEV_VIRTADDR sDevVAddr;
+	/* size in bytes of this allocation */
+	IMG_DEVMEM_SIZE_T uiSize;
+	/* Log2 page size of this allocation's GPU pages */
+	IMG_UINT32 ui32Log2PageSize;
+	/* Process ID (PID) this allocation belongs to */
+	IMG_PID uiPID;
+	/* index of previous allocation in the list */
+	ALLOC_INDEX_T ui32Prev;
+	/* index of next allocation in the list */
+	ALLOC_INDEX_T ui32Next;
+	/* annotation/name of this allocation */
+	IMG_CHAR szName[DEVICEMEM_HISTORY_TEXT_BUFSZ];
+} RECORD_ALLOCATION;
+
+/* each command in the circular buffer is prefixed with an 8-bit value
+ * denoting the command type
+ */
+typedef enum _COMMAND_TYPE_
+{
+	COMMAND_TYPE_NONE,
+	COMMAND_TYPE_TIMESTAMP,
+	COMMAND_TYPE_MAP_ALL,
+	COMMAND_TYPE_UNMAP_ALL,
+	COMMAND_TYPE_MAP_RANGE,
+	COMMAND_TYPE_UNMAP_RANGE,
+	/* sentinel value */
+	COMMAND_TYPE_COUNT,
+} COMMAND_TYPE;
+
+/* Timestamp command:
+ * This command is inserted into the circular buffer to provide an updated
+ * timestamp.
+ * The nanosecond-accuracy timestamp is packed into a 56-bit integer, in order
+ * for the whole command to fit into 8 bytes.
+ */
+typedef struct _COMMAND_TIMESTAMP_
+{
+	IMG_UINT8 aui8TimeNs[7];
+} COMMAND_TIMESTAMP;
+
+/* MAP_ALL command:
+ * This command denotes the allocation at the given index was wholly mapped
+ * in to the GPU MMU
+ */
+typedef struct _COMMAND_MAP_ALL_
+{
+	ALLOC_INDEX_T uiAllocIndex;
+} COMMAND_MAP_ALL;
+
+/* UNMAP_ALL command:
+ * This command denotes the allocation at the given index was wholly unmapped
+ * from the GPU MMU
+ * Note: COMMAND_MAP_ALL and COMMAND_UNMAP_ALL commands have the same layout.
+ */
+typedef COMMAND_MAP_ALL COMMAND_UNMAP_ALL;
+
+/* packing attributes for the MAP_RANGE command */
+#define MAP_RANGE_MAX_START ((1 << 18) - 1)
+#define MAP_RANGE_MAX_RANGE ((1 << 12) - 1)
+
+/* MAP_RANGE command:
+ * Denotes a range of pages within the given allocation being mapped.
+ * The range is expressed as [Page Index] + [Page Count]
+ * This information is packed into a 40-bit integer, in order to make
+ * the command size 8 bytes.
+ */
+
+typedef struct _COMMAND_MAP_RANGE_
+{
+	IMG_UINT8 aui8Data[5];
+	ALLOC_INDEX_T uiAllocIndex;
+} COMMAND_MAP_RANGE;
+
+/* UNMAP_RANGE command:
+ * Denotes a range of pages within the given allocation being mapped.
+ * The range is expressed as [Page Index] + [Page Count]
+ * This information is packed into a 40-bit integer, in order to make
+ * the command size 8 bytes.
+ * Note: COMMAND_MAP_RANGE and COMMAND_UNMAP_RANGE commands have the same layout.
+ */
+typedef COMMAND_MAP_RANGE COMMAND_UNMAP_RANGE;
+
+/* wrapper structure for a command */
+typedef struct _COMMAND_WRAPPER_
+{
+	IMG_UINT8 ui8Type;
+	union {
+		COMMAND_TIMESTAMP sTimeStamp;
+		COMMAND_MAP_ALL sMapAll;
+		COMMAND_UNMAP_ALL sUnmapAll;
+		COMMAND_MAP_RANGE sMapRange;
+		COMMAND_UNMAP_RANGE sUnmapRange;
+	} u;
+} COMMAND_WRAPPER;
+
+/* target size for the circular buffer of commands */
+#define CIRCULAR_BUFFER_SIZE_KB 2048
+/* turn the circular buffer target size into a number of commands */
+#define CIRCULAR_BUFFER_NUM_COMMANDS ((CIRCULAR_BUFFER_SIZE_KB * 1024) / sizeof(COMMAND_WRAPPER))
+
+/* index value denoting the end of a list */
+#define END_OF_LIST 0xFFFFFFFF
+#define ALLOC_INDEX_TO_PTR(idx) (&(gsDevicememHistoryData.sRecords.pasAllocations[idx]))
+#define CHECK_ALLOC_INDEX(idx) (idx < ALLOCATION_LIST_NUM_ENTRIES)
+
+/* wrapper structure for the allocation records and the commands circular buffer */
+typedef struct _RECORDS_
+{
+	RECORD_ALLOCATION *pasAllocations;
+	IMG_UINT32 ui32AllocationsListHead;
+
+	IMG_UINT32 ui32Head;
+	IMG_UINT32 ui32Tail;
+	COMMAND_WRAPPER *pasCircularBuffer;;
+} RECORDS;
+
+typedef struct _DEVICEMEM_HISTORY_DATA_
+{
+	/* debugfs entry */
+	void *pvStatsEntry;
+
+	RECORDS sRecords;
+	POS_LOCK hLock;
+} DEVICEMEM_HISTORY_DATA;
+
+static DEVICEMEM_HISTORY_DATA gsDevicememHistoryData = { 0 };
+
+static void DevicememHistoryLock(void)
+{
+	OSLockAcquire(gsDevicememHistoryData.hLock);
+}
+
+static void DevicememHistoryUnlock(void)
+{
+	OSLockRelease(gsDevicememHistoryData.hLock);
+}
+
+/* given a time stamp, calculate the age in nanoseconds */
+static IMG_UINT64 _CalculateAge(IMG_UINT64 ui64Now,
+						IMG_UINT64 ui64Then,
+						IMG_UINT64 ui64Max)
+{
+	if(ui64Now >= ui64Then)
+	{
+		/* no clock wrap */
+		return ui64Now - ui64Then;
+	}
+	else
+	{
+		/* clock has wrapped */
+		return (ui64Max - ui64Then) + ui64Now + 1;
+	}
+}
+
+/* AcquireCBSlot:
+ * Acquire the next slot in the circular buffer and
+ * move the circular buffer head along by one
+ * Returns a pointer to the acquired slot.
+ */
+static COMMAND_WRAPPER *AcquireCBSlot(void)
+{
+	COMMAND_WRAPPER *psSlot;
+
+	psSlot = &gsDevicememHistoryData.sRecords.pasCircularBuffer[gsDevicememHistoryData.sRecords.ui32Head];
+
+	gsDevicememHistoryData.sRecords.ui32Head =
+		(gsDevicememHistoryData.sRecords.ui32Head + 1)
+				% CIRCULAR_BUFFER_NUM_COMMANDS;
+
+	return psSlot;
+}
+
+/* TimeStampPack:
+ * Packs the given timestamp value into the COMMAND_TIMESTAMP structure.
+ * This takes a 64-bit nanosecond timestamp and packs it in to a 56-bit
+ * integer in the COMMAND_TIMESTAMP command.
+ */
+static void TimeStampPack(COMMAND_TIMESTAMP *psTimeStamp, IMG_UINT64 ui64Now)
+{
+	IMG_UINT32 i;
+
+	for(i = 0; i < IMG_ARR_NUM_ELEMS(psTimeStamp->aui8TimeNs); i++)
+	{
+		psTimeStamp->aui8TimeNs[i] = ui64Now & 0xFF;
+		ui64Now >>= 8;
+	}
+}
+
+/* packing a 64-bit nanosecond into a 7-byte integer loses the
+ * top 8 bits of data. This must be taken into account when
+ * comparing a full timestamp against an unpacked timestamp
+ */
+#define TIME_STAMP_MASK ((1LLU << 56) - 1)
+#define DO_TIME_STAMP_MASK(ns64) (ns64 & TIME_STAMP_MASK)
+
+/* TimeStampUnpack:
+ * Unpack the timestamp value from the given COMMAND_TIMESTAMP command
+ */
+static IMG_UINT64 TimeStampUnpack(COMMAND_TIMESTAMP *psTimeStamp)
+{
+	IMG_UINT64 ui64TimeNs = 0;
+	IMG_UINT32 i;
+
+	for(i = IMG_ARR_NUM_ELEMS(psTimeStamp->aui8TimeNs); i > 0; i--)
+	{
+		ui64TimeNs <<= 8;
+		ui64TimeNs |= (IMG_UINT64) psTimeStamp->aui8TimeNs[i - 1];
+	}
+
+	return ui64TimeNs;
+}
+
+#if defined(PDUMP)
+
+static void EmitPDumpAllocation(IMG_UINT32 ui32AllocationIndex,
+					RECORD_ALLOCATION *psAlloc)
+{
+	PDUMPCOMMENT("[SrvPFD] Allocation: %u"
+			" Addr: " IMG_DEV_VIRTADDR_FMTSPEC
+			" Size: " IMG_DEVMEM_SIZE_FMTSPEC
+			" Page size: %u"
+			" PID: %u"
+			" Process: %s"
+			" Name: %s",
+			ui32AllocationIndex,
+			psAlloc->sDevVAddr.uiAddr,
+			psAlloc->uiSize,
+			1U << psAlloc->ui32Log2PageSize,
+			psAlloc->uiPID,
+			OSGetCurrentClientProcessNameKM(),
+			psAlloc->szName);
+}
+
+static void EmitPDumpMapUnmapAll(COMMAND_TYPE eType,
+					IMG_UINT32 ui32AllocationIndex)
+{
+	const IMG_CHAR *pszOpName;
+
+	switch(eType)
+	{
+		case COMMAND_TYPE_MAP_ALL:
+			pszOpName = "MAP_ALL";
+			break;
+		case COMMAND_TYPE_UNMAP_ALL:
+			pszOpName = "UNMAP_ALL";
+			break;
+		default:
+			PVR_DPF((PVR_DBG_ERROR, "EmitPDumpMapUnmapAll: Invalid type: %u",
+										eType));
+			return;
+
+	}
+
+	PDUMPCOMMENT("[SrvPFD] Op: %s Allocation: %u",
+								pszOpName,
+								ui32AllocationIndex);
+}
+
+static void EmitPDumpMapUnmapRange(COMMAND_TYPE eType,
+					IMG_UINT32 ui32AllocationIndex,
+					IMG_UINT32 ui32StartPage,
+					IMG_UINT32 ui32Count)
+{
+	const IMG_CHAR *pszOpName;
+
+	switch(eType)
+	{
+		case COMMAND_TYPE_MAP_RANGE:
+			pszOpName = "MAP_RANGE";
+			break;
+		case COMMAND_TYPE_UNMAP_RANGE:
+			pszOpName = "UNMAP_RANGE";
+			break;
+		default:
+			PVR_DPF((PVR_DBG_ERROR, "EmitPDumpMapUnmapRange: Invalid type: %u",
+										eType));
+			return;
+	}
+
+	PDUMPCOMMENT("[SrvPFD] Op: %s Allocation: %u Start Page: %u Count: %u",
+									pszOpName,
+									ui32AllocationIndex,
+									ui32StartPage,
+									ui32Count);
+}
+
+#endif
+
+/* InsertTimeStampCommand:
+ * Insert a timestamp command into the circular buffer.
+ */
+static void InsertTimeStampCommand(IMG_UINT64 ui64Now)
+{
+	COMMAND_WRAPPER *psCommand;
+
+	psCommand = AcquireCBSlot();
+
+	psCommand->ui8Type = COMMAND_TYPE_TIMESTAMP;
+
+	TimeStampPack(&psCommand->u.sTimeStamp, ui64Now);
+}
+
+/* InsertMapAllCommand:
+ * Insert a "MAP_ALL" command for the given allocation into the circular buffer
+ */
+static void InsertMapAllCommand(IMG_UINT32 ui32AllocIndex)
+{
+	COMMAND_WRAPPER *psCommand;
+
+	psCommand = AcquireCBSlot();
+
+	psCommand->ui8Type = COMMAND_TYPE_MAP_ALL;
+	psCommand->u.sMapAll.uiAllocIndex = ui32AllocIndex;
+
+#if defined(PDUMP)
+	EmitPDumpMapUnmapAll(COMMAND_TYPE_MAP_ALL, ui32AllocIndex);
+#endif
+}
+
+/* InsertUnmapAllCommand:
+ * Insert a "UNMAP_ALL" command for the given allocation into the circular buffer
+ */
+static void InsertUnmapAllCommand(IMG_UINT32 ui32AllocIndex)
+{
+	COMMAND_WRAPPER *psCommand;
+
+	psCommand = AcquireCBSlot();
+
+	psCommand->ui8Type = COMMAND_TYPE_UNMAP_ALL;
+	psCommand->u.sUnmapAll.uiAllocIndex = ui32AllocIndex;
+
+#if defined(PDUMP)
+	EmitPDumpMapUnmapAll(COMMAND_TYPE_UNMAP_ALL, ui32AllocIndex);
+#endif
+}
+
+/* MapRangePack:
+ * Pack the given StartPage and Count values into the 40-bit representation
+ * in the MAP_RANGE command.
+ */
+static void MapRangePack(COMMAND_MAP_RANGE *psMapRange,
+						IMG_UINT32 ui32StartPage,
+						IMG_UINT32 ui32Count)
+{
+	IMG_UINT64 ui64Data;
+	IMG_UINT32 i;
+
+	/* we must encode the data into 40 bits:
+	 *   18 bits for the start page index
+	 *   12 bits for the range
+	*/
+
+	PVR_ASSERT(ui32StartPage <= MAP_RANGE_MAX_START);
+	PVR_ASSERT(ui32Count <= MAP_RANGE_MAX_RANGE);
+
+	ui64Data = (((IMG_UINT64) ui32StartPage) << 12) | ui32Count;
+
+	for(i = 0; i < IMG_ARR_NUM_ELEMS(psMapRange->aui8Data); i++)
+	{
+		psMapRange->aui8Data[i] = ui64Data & 0xFF;
+		ui64Data >>= 8;
+	}
+}
+
+/* MapRangePack:
+ * Unpack the StartPage and Count values from the 40-bit representation
+ * in the MAP_RANGE command.
+ */
+static void MapRangeUnpack(COMMAND_MAP_RANGE *psMapRange,
+						IMG_UINT32 *pui32StartPage,
+						IMG_UINT32 *pui32Count)
+{
+	IMG_UINT64 ui64Data = 0;
+	IMG_UINT32 i;
+
+	for(i = IMG_ARR_NUM_ELEMS(psMapRange->aui8Data); i > 0; i--)
+	{
+		ui64Data <<= 8;
+		ui64Data |= (IMG_UINT64) psMapRange->aui8Data[i - 1];
+	}
+
+	*pui32StartPage = (ui64Data >> 12);
+	*pui32Count = ui64Data & ((1 << 12) - 1);
+}
+
+/* InsertMapRangeCommand:
+ * Insert a MAP_RANGE command into the circular buffer with the given
+ * StartPage and Count values.
+ */
+static void InsertMapRangeCommand(IMG_UINT32 ui32AllocIndex,
+						IMG_UINT32 ui32StartPage,
+						IMG_UINT32 ui32Count)
+{
+	COMMAND_WRAPPER *psCommand;
+
+	psCommand = AcquireCBSlot();
+
+	psCommand->ui8Type = COMMAND_TYPE_MAP_RANGE;
+	psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex;
+
+	MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count);
+
+#if defined(PDUMP)
+	EmitPDumpMapUnmapRange(COMMAND_TYPE_MAP_RANGE,
+							ui32AllocIndex,
+							ui32StartPage,
+							ui32Count);
+#endif
+}
+
+/* InsertUnmapRangeCommand:
+ * Insert a UNMAP_RANGE command into the circular buffer with the given
+ * StartPage and Count values.
+ */
+static void InsertUnmapRangeCommand(IMG_UINT32 ui32AllocIndex,
+						IMG_UINT32 ui32StartPage,
+						IMG_UINT32 ui32Count)
+{
+	COMMAND_WRAPPER *psCommand;
+
+	psCommand = AcquireCBSlot();
+
+	psCommand->ui8Type = COMMAND_TYPE_UNMAP_RANGE;
+	psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex;
+
+	MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count);
+
+#if defined(PDUMP)
+	EmitPDumpMapUnmapRange(COMMAND_TYPE_UNMAP_RANGE,
+							ui32AllocIndex,
+							ui32StartPage,
+							ui32Count);
+#endif
+}
+
+/* InsertAllocationToList:
+ * Helper function for the allocation list.
+ * Inserts the given allocation at the head of the list, whose current head is
+ * pointed to by pui32ListHead
+ */
+static void InsertAllocationToList(IMG_UINT32 *pui32ListHead, IMG_UINT32 ui32Alloc)
+{
+	RECORD_ALLOCATION *psAlloc;
+
+	psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+	if(*pui32ListHead == END_OF_LIST)
+	{
+		/* list is currently empty, so just replace it */
+		*pui32ListHead = ui32Alloc;
+		psAlloc->ui32Next = psAlloc->ui32Prev = *pui32ListHead;
+	}
+	else
+	{
+		RECORD_ALLOCATION *psHeadAlloc;
+		RECORD_ALLOCATION *psTailAlloc;
+
+		psHeadAlloc = ALLOC_INDEX_TO_PTR(*pui32ListHead);
+		psTailAlloc = ALLOC_INDEX_TO_PTR(psHeadAlloc->ui32Prev);
+
+		/* make the new alloc point forwards to the previous head */
+		psAlloc->ui32Next = *pui32ListHead;
+		/* make the new alloc point backwards to the previous tail */
+		psAlloc->ui32Prev = psHeadAlloc->ui32Prev;
+
+		/* the head is now our new alloc */
+		*pui32ListHead = ui32Alloc;
+
+		/* the old head now points back to the new head */
+		psHeadAlloc->ui32Prev = *pui32ListHead;
+
+		/* the tail now points forward to the new head */
+		psTailAlloc->ui32Next = ui32Alloc;
+	}
+}
+
+static void InsertAllocationToBusyList(IMG_UINT32 ui32Alloc)
+{
+	InsertAllocationToList(&gsDevicememHistoryData.sRecords.ui32AllocationsListHead, ui32Alloc);
+}
+
+/* RemoveAllocationFromList:
+ * Helper function for the allocation list.
+ * Removes the given allocation from the list, whose head is
+ * pointed to by pui32ListHead
+ */
+static void RemoveAllocationFromList(IMG_UINT32 *pui32ListHead, IMG_UINT32 ui32Alloc)
+{
+	RECORD_ALLOCATION *psAlloc;
+
+	psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+	/* if this is the only element in the list then just make the list empty */
+	if((*pui32ListHead == ui32Alloc) && (psAlloc->ui32Next == ui32Alloc))
+	{
+		*pui32ListHead = END_OF_LIST;
+	}
+	else
+	{
+		RECORD_ALLOCATION *psPrev, *psNext;
+
+		psPrev = ALLOC_INDEX_TO_PTR(psAlloc->ui32Prev);
+		psNext = ALLOC_INDEX_TO_PTR(psAlloc->ui32Next);
+
+		/* remove the allocation from the list */
+		psPrev->ui32Next = psAlloc->ui32Next;
+		psNext->ui32Prev = psAlloc->ui32Prev;
+
+		/* if this allocation is the head then update the head */
+		if(*pui32ListHead == ui32Alloc)
+		{
+			*pui32ListHead = psAlloc->ui32Prev;
+		}
+	}
+}
+
+static void RemoveAllocationFromBusyList(IMG_UINT32 ui32Alloc)
+{
+	RemoveAllocationFromList(&gsDevicememHistoryData.sRecords.ui32AllocationsListHead, ui32Alloc);
+}
+
+/* TouchBusyAllocation:
+ * Move the given allocation to the head of the list
+ */
+static void TouchBusyAllocation(IMG_UINT32 ui32Alloc)
+{
+	RemoveAllocationFromBusyList(ui32Alloc);
+	InsertAllocationToBusyList(ui32Alloc);
+}
+
+static INLINE IMG_BOOL IsAllocationListEmpty(IMG_UINT32 ui32ListHead)
+{
+	return ui32ListHead == END_OF_LIST;
+}
+
+/* GetOldestBusyAllocation:
+ * Returns the index of the oldest allocation in the MRU list
+ */
+static IMG_UINT32 GetOldestBusyAllocation(void)
+{
+	IMG_UINT32 ui32Alloc;
+	RECORD_ALLOCATION *psAlloc;
+
+	ui32Alloc = gsDevicememHistoryData.sRecords.ui32AllocationsListHead;
+
+	if(ui32Alloc == END_OF_LIST)
+	{
+		return END_OF_LIST;
+	}
+
+	psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+	return psAlloc->ui32Prev;
+}
+
+static IMG_UINT32 GetFreeAllocation(void)
+{
+	IMG_UINT32 ui32Alloc;
+
+	ui32Alloc = GetOldestBusyAllocation();
+
+	return ui32Alloc;
+}
+
+
+/* InitialiseAllocation:
+ * Initialise the given allocation structure with the given properties
+ */
+static void InitialiseAllocation(RECORD_ALLOCATION *psAlloc,
+							const IMG_CHAR *pszName,
+							IMG_UINT64 ui64Serial,
+							IMG_PID uiPID,
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEVMEM_SIZE_T uiSize,
+							IMG_UINT32 ui32Log2PageSize)
+{
+	OSStringNCopy(psAlloc->szName, pszName, sizeof(psAlloc->szName));
+	psAlloc->szName[sizeof(psAlloc->szName) - 1] = '\0';
+	psAlloc->ui64Serial = ui64Serial;
+	psAlloc->uiPID = uiPID;
+	psAlloc->sDevVAddr = sDevVAddr;
+	psAlloc->uiSize = uiSize;
+	psAlloc->ui32Log2PageSize = ui32Log2PageSize;
+	psAlloc->ui64CreationTime = OSClockns64();
+}
+
+/* CreateAllocation:
+ * Creates a new allocation with the given properties then outputs the
+ * index of the allocation
+ */
+static PVRSRV_ERROR CreateAllocation(const IMG_CHAR *pszName,
+							IMG_UINT64 ui64Serial,
+							IMG_PID uiPID,
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEVMEM_SIZE_T uiSize,
+							IMG_UINT32 ui32Log2PageSize,
+							IMG_BOOL bAutoPurge,
+							IMG_UINT32 *puiAllocationIndex)
+{
+	IMG_UINT32 ui32Alloc;
+	RECORD_ALLOCATION *psAlloc;
+
+	ui32Alloc = GetFreeAllocation();
+
+	psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+	InitialiseAllocation(ALLOC_INDEX_TO_PTR(ui32Alloc),
+						pszName,
+						ui64Serial,
+						uiPID,
+						sDevVAddr,
+						uiSize,
+						ui32Log2PageSize);
+
+	/* put the newly initialised allocation at the front of the MRU list */
+	TouchBusyAllocation(ui32Alloc);
+
+	*puiAllocationIndex = ui32Alloc;
+
+#if defined(PDUMP)
+	EmitPDumpAllocation(ui32Alloc, psAlloc);
+#endif
+
+	return PVRSRV_OK;
+}
+
+/* MatchAllocation:
+ * Tests if the allocation at the given index matches the supplied properties.
+ * Returns IMG_TRUE if it is a match, otherwise IMG_FALSE.
+ */
+static IMG_BOOL MatchAllocation(IMG_UINT32 ui32AllocationIndex,
+						IMG_UINT64 ui64Serial,
+						IMG_DEV_VIRTADDR sDevVAddr,
+						IMG_DEVMEM_SIZE_T uiSize,
+						const IMG_CHAR *pszName,
+						IMG_UINT32 ui32Log2PageSize,
+						IMG_PID uiPID)
+{
+	RECORD_ALLOCATION *psAlloc;
+
+	psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocationIndex);
+
+	return 	(psAlloc->ui64Serial == ui64Serial) &&
+			(psAlloc->sDevVAddr.uiAddr == sDevVAddr.uiAddr) &&
+			(psAlloc->uiSize == uiSize) &&
+			(psAlloc->ui32Log2PageSize == ui32Log2PageSize) &&
+			(OSStringCompare(psAlloc->szName, pszName) == 0);
+}
+
+/* FindOrCreateAllocation:
+ * Convenience function.
+ * Given a set of allocation properties (serial, DevVAddr, size, name, etc),
+ * this function will look for an existing record of this allocation and
+ * create the allocation if there is no existing record
+ */
+static PVRSRV_ERROR FindOrCreateAllocation(IMG_UINT32 ui32AllocationIndexHint,
+							IMG_UINT64 ui64Serial,
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEVMEM_SIZE_T uiSize,
+							const char *pszName,
+							IMG_UINT32 ui32Log2PageSize,
+							IMG_PID uiPID,
+							IMG_BOOL bSparse,
+							IMG_UINT32 *pui32AllocationIndexOut,
+							IMG_BOOL *pbCreated)
+{
+	IMG_UINT32 ui32AllocationIndex;
+	PVRSRV_ERROR eError;
+
+	if(ui32AllocationIndexHint != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE)
+	{
+		IMG_BOOL bHaveAllocation;
+
+		/* first, try to match against the index given by the client.
+		 * if the caller provided a hint but the allocation record is no longer
+		 * there, it must have been purged, so go ahead and create a new allocation
+		 */
+		bHaveAllocation = MatchAllocation(ui32AllocationIndexHint,
+								ui64Serial,
+								sDevVAddr,
+								uiSize,
+								pszName,
+								ui32Log2PageSize,
+								uiPID);
+		if(bHaveAllocation)
+		{
+			*pbCreated = IMG_FALSE;
+			*pui32AllocationIndexOut = ui32AllocationIndexHint;
+			return PVRSRV_OK;
+		}
+	}
+
+	/* if there is no record of the allocation then we
+	 * create it now
+	 */
+	eError = CreateAllocation(pszName,
+					ui64Serial,
+					uiPID,
+					sDevVAddr,
+					uiSize,
+					ui32Log2PageSize,
+					IMG_TRUE,
+					&ui32AllocationIndex);
+
+	if(eError == PVRSRV_OK)
+	{
+		*pui32AllocationIndexOut = ui32AllocationIndex;
+		*pbCreated = IMG_TRUE;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			"%s: Failed to create record for allocation %s",
+								__func__,
+								pszName));
+	}
+
+	return eError;
+}
+
+/* GenerateMapUnmapCommandsForSparsePMR:
+ * Generate the MAP_RANGE or UNMAP_RANGE commands for the sparse PMR, using the PMR's
+ * current mapping table
+ *
+ * PMR: The PMR whose mapping table to read.
+ * ui32AllocIndex: The allocation to attribute the MAP_RANGE/UNMAP range commands to.
+ * bMap: Set to TRUE for mapping or IMG_FALSE for unmapping
+ *
+ * This function goes through every page in the PMR's mapping table and looks for
+ * virtually contiguous ranges to record as being mapped or unmapped.
+ */
+static void GenerateMapUnmapCommandsForSparsePMR(PMR *psPMR,
+							IMG_UINT32 ui32AllocIndex,
+							IMG_BOOL bMap)
+{
+	PMR_MAPPING_TABLE *psMappingTable;
+	IMG_UINT32 ui32DonePages = 0;
+	IMG_UINT32 ui32NumPages;
+	IMG_UINT32 i;
+	IMG_BOOL bInARun = IMG_FALSE;
+	IMG_UINT32 ui32CurrentStart = 0;
+	IMG_UINT32 ui32RunCount = 0;
+
+	psMappingTable = PMR_GetMappigTable(psPMR);
+	ui32NumPages = psMappingTable->ui32NumPhysChunks;
+
+	if(ui32NumPages == 0)
+	{
+		/* nothing to do */
+		return;
+	}
+
+	for(i = 0; i < psMappingTable->ui32NumVirtChunks; i++)
+	{
+		if(psMappingTable->aui32Translation[i] != TRANSLATION_INVALID)
+		{
+			if(!bInARun)
+			{
+				bInARun = IMG_TRUE;
+				ui32CurrentStart = i;
+				ui32RunCount = 1;
+			}
+			else
+			{
+				ui32RunCount++;
+			}
+		}
+
+		if(bInARun)
+		{
+			/* test if we need to end this current run and generate the command,
+			 * either because the next page is not virtually contiguous
+			 * to the current page, we have reached the maximum range,
+			 * or this is the last page in the mapping table
+			 */
+			if((psMappingTable->aui32Translation[i] == TRANSLATION_INVALID) ||
+						(ui32RunCount == MAP_RANGE_MAX_RANGE) ||
+						(i == (psMappingTable->ui32NumVirtChunks - 1)))
+			{
+				if(bMap)
+				{
+					InsertMapRangeCommand(ui32AllocIndex,
+										ui32CurrentStart,
+										ui32RunCount);
+				}
+				else
+				{
+					InsertUnmapRangeCommand(ui32AllocIndex,
+										ui32CurrentStart,
+										ui32RunCount);
+				}
+
+				ui32DonePages += ui32RunCount;
+
+				if(ui32DonePages == ui32NumPages)
+				{
+					 break;
+				}
+
+				bInARun = IMG_FALSE;
+			}
+		}
+	}
+
+}
+
+/* GenerateMapUnmapCommandsForChangeList:
+ * Generate the MAP_RANGE or UNMAP_RANGE commands for the sparse PMR, using the
+ * list of page change (page map or page unmap) indices given.
+ *
+ * ui32NumPages: Number of pages which have changed.
+ * pui32PageList: List of indices of the pages which have changed.
+ * ui32AllocIndex: The allocation to attribute the MAP_RANGE/UNMAP range commands to.
+ * bMap: Set to TRUE for mapping or IMG_FALSE for unmapping
+ *
+ * This function goes through every page in the list and looks for
+ * virtually contiguous ranges to record as being mapped or unmapped.
+ */
+static void GenerateMapUnmapCommandsForChangeList(IMG_UINT32 ui32NumPages,
+							IMG_UINT32 *pui32PageList,
+							IMG_UINT32 ui32AllocIndex,
+							IMG_BOOL bMap)
+{
+	IMG_UINT32 i;
+	IMG_BOOL bInARun = IMG_FALSE;
+	IMG_UINT32 ui32CurrentStart = 0;
+	IMG_UINT32 ui32RunCount = 0;
+
+	for(i = 0; i < ui32NumPages; i++)
+	{
+		if(!bInARun)
+		{
+			bInARun = IMG_TRUE;
+			ui32CurrentStart = pui32PageList[i];
+		}
+
+		ui32RunCount++;
+
+		 /* we flush if:
+		 * - the next page in the list is not one greater than the current page
+		 * - this is the last page in the list
+		 * - we have reached the maximum range size
+		 */
+		if((i == (ui32NumPages - 1)) ||
+			((pui32PageList[i] + 1) != pui32PageList[i + 1]) ||
+			(ui32RunCount == MAP_RANGE_MAX_RANGE))
+		{
+			if(bMap)
+			{
+				InsertMapRangeCommand(ui32AllocIndex,
+									ui32CurrentStart,
+									ui32RunCount);
+			}
+			else
+			{
+				InsertUnmapRangeCommand(ui32AllocIndex,
+									ui32CurrentStart,
+									ui32RunCount);
+			}
+
+			bInARun = IMG_FALSE;
+			ui32RunCount = 0;
+		}
+	}
+}
+
+/* DevicememHistoryMapKM:
+ * Entry point for when an allocation is mapped into the MMU GPU
+ *
+ * psPMR: The PMR to which the allocation belongs.
+ * ui32Offset: The offset within the PMR at which the allocation begins.
+ * sDevVAddr: The DevVAddr at which the allocation begins.
+ * szName: Annotation/name for the allocation.
+ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form.
+ * ui32AllocationIndex: Allocation index as provided by the client.
+ *                      We will use this as a short-cut to find the allocation
+ *                      in our records.
+ * pui32AllocationIndexOut: An updated allocation index for the client.
+ *                          This may be a new value if we just created the
+ *                          allocation record.
+ */
+PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR,
+							IMG_UINT32 ui32Offset,
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEVMEM_SIZE_T uiSize,
+							const char szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+							IMG_UINT32 ui32Log2PageSize,
+							IMG_UINT32 ui32AllocationIndex,
+							IMG_UINT32 *pui32AllocationIndexOut)
+{
+	IMG_BOOL bSparse = PMR_IsSparse(psPMR);
+	IMG_UINT64 ui64Serial;
+	IMG_PID uiPID = OSGetCurrentProcessID();
+	PVRSRV_ERROR eError;
+	IMG_BOOL bCreated;
+
+	if((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+				!CHECK_ALLOC_INDEX(ui32AllocationIndex))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+								__func__,
+								ui32AllocationIndex));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	PMRGetUID(psPMR, &ui64Serial);
+
+	DevicememHistoryLock();
+
+	eError = FindOrCreateAllocation(ui32AllocationIndex,
+						ui64Serial,
+						sDevVAddr,
+						uiSize,
+						szName,
+						ui32Log2PageSize,
+						uiPID,
+						bSparse,
+						&ui32AllocationIndex,
+						&bCreated);
+
+	if((eError == PVRSRV_OK) && !bCreated)
+	{
+		/* touch the allocation so it goes to the head of our MRU list */
+		TouchBusyAllocation(ui32AllocationIndex);
+	}
+	else if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+									__func__,
+									szName,
+									PVRSRVGETERRORSTRING(eError)));
+		goto out_unlock;
+	}
+
+	if(!bSparse)
+	{
+		InsertMapAllCommand(ui32AllocationIndex);
+	}
+	else
+	{
+		GenerateMapUnmapCommandsForSparsePMR(psPMR,
+								ui32AllocationIndex,
+								IMG_TRUE);
+	}
+
+	InsertTimeStampCommand(OSClockns64());
+
+	*pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+	DevicememHistoryUnlock();
+
+	return eError;
+}
+
+static void VRangeInsertMapUnmapCommands(IMG_BOOL bMap,
+							IMG_UINT32 ui32AllocationIndex,
+							IMG_DEV_VIRTADDR sBaseDevVAddr,
+							IMG_UINT32 ui32StartPage,
+							IMG_UINT32 ui32NumPages,
+							const IMG_CHAR *pszName)
+{
+	while(ui32NumPages > 0)
+	{
+		IMG_UINT32 ui32PagesToAdd;
+
+		ui32PagesToAdd = MIN(ui32NumPages, MAP_RANGE_MAX_RANGE);
+
+		if(ui32StartPage > MAP_RANGE_MAX_START)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "Cannot record %s range beginning at page "
+									"%u on allocation %s",
+									bMap ? "map" : "unmap",
+									ui32StartPage,
+									pszName));
+			return;
+		}
+
+		if(bMap)
+		{
+			InsertMapRangeCommand(ui32AllocationIndex,
+								ui32StartPage,
+								ui32PagesToAdd);
+		}
+		else
+		{
+			InsertUnmapRangeCommand(ui32AllocationIndex,
+								ui32StartPage,
+								ui32PagesToAdd);
+		}
+
+		ui32StartPage += ui32PagesToAdd;
+		ui32NumPages -= ui32PagesToAdd;
+	}
+}
+
+PVRSRV_ERROR DevicememHistoryMapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr,
+						IMG_UINT32 ui32StartPage,
+						IMG_UINT32 ui32NumPages,
+						IMG_DEVMEM_SIZE_T uiAllocSize,
+						const IMG_CHAR szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+						IMG_UINT32 ui32Log2PageSize,
+						IMG_UINT32 ui32AllocationIndex,
+						IMG_UINT32 *pui32AllocationIndexOut)
+{
+	IMG_PID uiPID = OSGetCurrentProcessID();
+	PVRSRV_ERROR eError;
+	IMG_BOOL bCreated;
+
+	if((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+				!CHECK_ALLOC_INDEX(ui32AllocationIndex))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+								__func__,
+							ui32AllocationIndex));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	DevicememHistoryLock();
+
+	eError = FindOrCreateAllocation(ui32AllocationIndex,
+						0,
+						sBaseDevVAddr,
+						uiAllocSize,
+						szName,
+						ui32Log2PageSize,
+						uiPID,
+						IMG_FALSE,
+						&ui32AllocationIndex,
+						&bCreated);
+
+	if((eError == PVRSRV_OK) && !bCreated)
+	{
+		/* touch the allocation so it goes to the head of our MRU list */
+		TouchBusyAllocation(ui32AllocationIndex);
+	}
+	else if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+									__func__,
+									szName,
+									PVRSRVGETERRORSTRING(eError)));
+		goto out_unlock;
+	}
+
+	VRangeInsertMapUnmapCommands(IMG_TRUE,
+						ui32AllocationIndex,
+						sBaseDevVAddr,
+						ui32StartPage,
+						ui32NumPages,
+						szName);
+
+	*pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+	DevicememHistoryUnlock();
+
+	return eError;
+
+}
+
+PVRSRV_ERROR DevicememHistoryUnmapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr,
+						IMG_UINT32 ui32StartPage,
+						IMG_UINT32 ui32NumPages,
+						IMG_DEVMEM_SIZE_T uiAllocSize,
+						const IMG_CHAR szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+						IMG_UINT32 ui32Log2PageSize,
+						IMG_UINT32 ui32AllocationIndex,
+						IMG_UINT32 *pui32AllocationIndexOut)
+{
+	IMG_PID uiPID = OSGetCurrentProcessID();
+	PVRSRV_ERROR eError;
+	IMG_BOOL bCreated;
+
+	if((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+				!CHECK_ALLOC_INDEX(ui32AllocationIndex))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+								__func__,
+							ui32AllocationIndex));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	DevicememHistoryLock();
+
+	eError = FindOrCreateAllocation(ui32AllocationIndex,
+						0,
+						sBaseDevVAddr,
+						uiAllocSize,
+						szName,
+						ui32Log2PageSize,
+						uiPID,
+						IMG_FALSE,
+						&ui32AllocationIndex,
+						&bCreated);
+
+	if((eError == PVRSRV_OK) && !bCreated)
+	{
+		/* touch the allocation so it goes to the head of our MRU list */
+		TouchBusyAllocation(ui32AllocationIndex);
+	}
+	else if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+									__func__,
+									szName,
+									PVRSRVGETERRORSTRING(eError)));
+		goto out_unlock;
+	}
+
+	VRangeInsertMapUnmapCommands(IMG_FALSE,
+						ui32AllocationIndex,
+						sBaseDevVAddr,
+						ui32StartPage,
+						ui32NumPages,
+						szName);
+
+	*pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+	DevicememHistoryUnlock();
+
+	return eError;
+}
+
+
+
+/* DevicememHistoryUnmapKM:
+ * Entry point for when an allocation is unmapped from the MMU GPU
+ *
+ * psPMR: The PMR to which the allocation belongs.
+ * ui32Offset: The offset within the PMR at which the allocation begins.
+ * sDevVAddr: The DevVAddr at which the allocation begins.
+ * szName: Annotation/name for the allocation.
+ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form.
+ * ui32AllocationIndex: Allocation index as provided by the client.
+ *                      We will use this as a short-cut to find the allocation
+ *                      in our records.
+ * pui32AllocationIndexOut: An updated allocation index for the client.
+ *                          This may be a new value if we just created the
+ *                          allocation record.
+ */
+PVRSRV_ERROR DevicememHistoryUnmapKM(PMR *psPMR,
+							IMG_UINT32 ui32Offset,
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEVMEM_SIZE_T uiSize,
+							const char szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+							IMG_UINT32 ui32Log2PageSize,
+							IMG_UINT32 ui32AllocationIndex,
+							IMG_UINT32 *pui32AllocationIndexOut)
+{
+	IMG_BOOL bSparse = PMR_IsSparse(psPMR);
+	IMG_UINT64 ui64Serial;
+	IMG_PID uiPID = OSGetCurrentProcessID();
+	PVRSRV_ERROR eError;
+	IMG_BOOL bCreated;
+
+	if((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+				!CHECK_ALLOC_INDEX(ui32AllocationIndex))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+								__func__,
+								ui32AllocationIndex));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	PMRGetUID(psPMR, &ui64Serial);
+
+	DevicememHistoryLock();
+
+	eError = FindOrCreateAllocation(ui32AllocationIndex,
+						ui64Serial,
+						sDevVAddr,
+						uiSize,
+						szName,
+						ui32Log2PageSize,
+						uiPID,
+						bSparse,
+						&ui32AllocationIndex,
+						&bCreated);
+
+	if((eError == PVRSRV_OK) && !bCreated)
+	{
+		/* touch the allocation so it goes to the head of our MRU list */
+		TouchBusyAllocation(ui32AllocationIndex);
+	}
+	else if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+									__func__,
+									szName,
+									PVRSRVGETERRORSTRING(eError)));
+		goto out_unlock;
+	}
+
+	if(!bSparse)
+	{
+		InsertUnmapAllCommand(ui32AllocationIndex);
+	}
+	else
+	{
+		GenerateMapUnmapCommandsForSparsePMR(psPMR,
+								ui32AllocationIndex,
+								IMG_FALSE);
+	}
+
+	InsertTimeStampCommand(OSClockns64());
+
+	*pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+	DevicememHistoryUnlock();
+
+	return eError;
+}
+
+/* DevicememHistorySparseChangeKM:
+ * Entry point for when a sparse allocation is changed, such that some of the
+ * pages within the sparse allocation are mapped or unmapped.
+ *
+ * psPMR: The PMR to which the allocation belongs.
+ * ui32Offset: The offset within the PMR at which the allocation begins.
+ * sDevVAddr: The DevVAddr at which the allocation begins.
+ * szName: Annotation/name for the allocation.
+ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form.
+ * ui32AllocPageCount: Number of pages which have been mapped.
+ * paui32AllocPageIndices: Indices of pages which have been mapped.
+ * ui32FreePageCount: Number of pages which have been unmapped.
+ * paui32FreePageIndices: Indices of pages which have been unmapped.
+ * ui32AllocationIndex: Allocation index as provided by the client.
+ *                      We will use this as a short-cut to find the allocation
+ *                      in our records.
+ * pui32AllocationIndexOut: An updated allocation index for the client.
+ *                          This may be a new value if we just created the
+ *                          allocation record.
+ */
+PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR,
+							IMG_UINT32 ui32Offset,
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEVMEM_SIZE_T uiSize,
+							const char szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+							IMG_UINT32 ui32Log2PageSize,
+							IMG_UINT32 ui32AllocPageCount,
+							IMG_UINT32 *paui32AllocPageIndices,
+							IMG_UINT32 ui32FreePageCount,
+							IMG_UINT32 *paui32FreePageIndices,
+							IMG_UINT32 ui32AllocationIndex,
+							IMG_UINT32 *pui32AllocationIndexOut)
+{
+	IMG_UINT64 ui64Serial;
+	IMG_PID uiPID = OSGetCurrentProcessID();
+	PVRSRV_ERROR eError;
+	IMG_BOOL bCreated;
+
+	if((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+				!CHECK_ALLOC_INDEX(ui32AllocationIndex))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+								__func__,
+								ui32AllocationIndex));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	PMRGetUID(psPMR, &ui64Serial);
+
+	DevicememHistoryLock();
+
+	eError = FindOrCreateAllocation(ui32AllocationIndex,
+						ui64Serial,
+						sDevVAddr,
+						uiSize,
+						szName,
+						ui32Log2PageSize,
+						uiPID,
+						IMG_TRUE /* bSparse */,
+						&ui32AllocationIndex,
+						&bCreated);
+
+	if((eError == PVRSRV_OK) && !bCreated)
+	{
+		/* touch the allocation so it goes to the head of our MRU list */
+		TouchBusyAllocation(ui32AllocationIndex);
+	}
+	else if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+									__func__,
+									szName,
+									PVRSRVGETERRORSTRING(eError)));
+		goto out_unlock;
+	}
+
+	GenerateMapUnmapCommandsForChangeList(ui32AllocPageCount,
+							paui32AllocPageIndices,
+							ui32AllocationIndex,
+							IMG_TRUE);
+
+	GenerateMapUnmapCommandsForChangeList(ui32FreePageCount,
+							paui32FreePageIndices,
+							ui32AllocationIndex,
+							IMG_FALSE);
+
+	InsertTimeStampCommand(OSClockns64());
+
+	*pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+	DevicememHistoryUnlock();
+
+	return eError;
+
+}
+
+/* CircularBufferIterateStart:
+ * Initialise local state for iterating over the circular buffer
+ */
+static void CircularBufferIterateStart(IMG_UINT32 *pui32Head, IMG_UINT32 *pui32Iter)
+{
+	*pui32Head = gsDevicememHistoryData.sRecords.ui32Head;
+
+	if(*pui32Head != 0)
+	{
+		*pui32Iter = *pui32Head - 1;
+	}
+	else
+	{
+		*pui32Iter = CIRCULAR_BUFFER_NUM_COMMANDS - 1;
+	}
+}
+
+/* CircularBufferIteratePrevious:
+ * Iterate to the previous item in the circular buffer.
+ * This is called repeatedly to iterate over the whole circular buffer.
+ */
+static COMMAND_WRAPPER *CircularBufferIteratePrevious(IMG_UINT32 ui32Head,
+							IMG_UINT32 *pui32Iter,
+							COMMAND_TYPE *peType,
+							IMG_BOOL *pbLast)
+{
+	IMG_UINT8 *pui8Header;
+	COMMAND_WRAPPER *psOut = NULL;
+
+	psOut = gsDevicememHistoryData.sRecords.pasCircularBuffer + *pui32Iter;
+
+	pui8Header = (IMG_UINT8 *) psOut;
+
+	/* sanity check the command looks valid.
+	 * this condition should never happen, but check for it anyway
+	 * and try to handle it
+	 */
+	if(*pui8Header >= COMMAND_TYPE_COUNT)
+	{
+		/* invalid header detected. Circular buffer corrupted? */
+		PVR_DPF((PVR_DBG_ERROR, "CircularBufferIteratePrevious: "
+							"Invalid header: %u",
+							*pui8Header));
+		*pbLast = IMG_TRUE;
+		return NULL;
+	}
+
+	*peType = *pui8Header;
+
+	if(*pui32Iter != 0)
+	{
+		(*pui32Iter)--;
+	}
+	else
+	{
+		*pui32Iter = CIRCULAR_BUFFER_NUM_COMMANDS - 1;
+	}
+
+
+	/* inform the caller this is the last command if either we have reached
+	 * the head (where we started) or if we have reached an empty command,
+	 * which means we have covered all populated entries
+	 */
+	if((*pui32Iter == ui32Head) || (*peType == COMMAND_TYPE_NONE))
+	{
+		/* this is the final iteration */
+		*pbLast = IMG_TRUE;
+	}
+
+	return psOut;
+}
+
+/* MapUnmapCommandGetInfo:
+ * Helper function to get the address and mapping information from a MAP_ALL, UNMAP_ALL,
+ * MAP_RANGE or UNMAP_RANGE command
+ */
+static void MapUnmapCommandGetInfo(COMMAND_WRAPPER *psCommand,
+					COMMAND_TYPE eType,
+					IMG_DEV_VIRTADDR *psDevVAddrStart,
+					IMG_DEV_VIRTADDR *psDevVAddrEnd,
+					IMG_BOOL *pbMap,
+					IMG_UINT32 *pui32AllocIndex)
+{
+	if((eType == COMMAND_TYPE_MAP_ALL) || ((eType == COMMAND_TYPE_UNMAP_ALL)))
+	{
+		COMMAND_MAP_ALL *psMapAll = &psCommand->u.sMapAll;
+		RECORD_ALLOCATION *psAlloc;
+
+		*pbMap = (eType == COMMAND_TYPE_MAP_ALL);
+		*pui32AllocIndex = psMapAll->uiAllocIndex;
+
+		psAlloc = ALLOC_INDEX_TO_PTR(psMapAll->uiAllocIndex);
+
+		*psDevVAddrStart = psAlloc->sDevVAddr;
+		psDevVAddrEnd->uiAddr = psDevVAddrStart->uiAddr + psAlloc->uiSize - 1;
+	}
+	else if((eType == COMMAND_TYPE_MAP_RANGE) || ((eType == COMMAND_TYPE_UNMAP_RANGE)))
+	{
+		COMMAND_MAP_RANGE *psMapRange = &psCommand->u.sMapRange;
+		RECORD_ALLOCATION *psAlloc;
+		IMG_UINT32 ui32StartPage, ui32Count;
+
+		*pbMap = (eType == COMMAND_TYPE_MAP_RANGE);
+		*pui32AllocIndex = psMapRange->uiAllocIndex;
+
+		psAlloc = ALLOC_INDEX_TO_PTR(psMapRange->uiAllocIndex);
+
+		MapRangeUnpack(psMapRange, &ui32StartPage, &ui32Count);
+
+		psDevVAddrStart->uiAddr = psAlloc->sDevVAddr.uiAddr +
+				((1U << psAlloc->ui32Log2PageSize) * ui32StartPage);
+
+		psDevVAddrEnd->uiAddr = psDevVAddrStart->uiAddr +
+				((1U << psAlloc->ui32Log2PageSize) * ui32Count) - 1;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid command type: %u",
+								__func__,
+								eType));
+	}
+}
+
+/* DevicememHistoryQuery:
+ * Entry point for rgxdebug to look up addresses relating to a page fault
+ */
+IMG_BOOL DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn,
+                               DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut,
+                               IMG_UINT32 ui32PageSizeBytes,
+                               IMG_BOOL bMatchAnyAllocInPage)
+{
+	IMG_UINT32 ui32Head, ui32Iter;
+	COMMAND_TYPE eType = COMMAND_TYPE_NONE;
+	COMMAND_WRAPPER *psCommand = NULL;
+	IMG_BOOL bLast = IMG_FALSE;
+	IMG_UINT64 ui64StartTime = OSClockns64();
+	IMG_UINT64 ui64TimeNs = 0;
+
+	/* initialise the results count for the caller */
+	psQueryOut->ui32NumResults = 0;
+
+	DevicememHistoryLock();
+
+	/* if the search is constrained to a particular PID then we
+	 * first search the list of allocations to see if this
+	 * PID is known to us
+	 */
+	if(psQueryIn->uiPID != DEVICEMEM_HISTORY_PID_ANY)
+	{
+		IMG_UINT32 ui32Alloc;
+		ui32Alloc = gsDevicememHistoryData.sRecords.ui32AllocationsListHead;
+
+		while(ui32Alloc != END_OF_LIST)
+		{
+			RECORD_ALLOCATION *psAlloc;
+
+			psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+			if(psAlloc->uiPID == psQueryIn->uiPID)
+			{
+				goto found_pid;
+			}
+
+			if(ui32Alloc == gsDevicememHistoryData.sRecords.ui32AllocationsListHead)
+			{
+				/* gone through whole list */
+				break;
+			}
+		}
+
+		/* PID not found, so we do not have any suitable data for this
+		 * page fault
+		 */
+		 goto out_unlock;
+	}
+
+found_pid:
+
+	CircularBufferIterateStart(&ui32Head, &ui32Iter);
+
+	while(!bLast)
+	{
+		psCommand = CircularBufferIteratePrevious(ui32Head, &ui32Iter, &eType, &bLast);
+
+		if(eType == COMMAND_TYPE_TIMESTAMP)
+		{
+			ui64TimeNs = TimeStampUnpack(&psCommand->u.sTimeStamp);
+			continue;
+		}
+
+		if((eType == COMMAND_TYPE_MAP_ALL) ||
+			(eType == COMMAND_TYPE_UNMAP_ALL) ||
+			(eType == COMMAND_TYPE_MAP_RANGE) ||
+			(eType == COMMAND_TYPE_UNMAP_RANGE))
+		{
+			RECORD_ALLOCATION *psAlloc;
+			IMG_DEV_VIRTADDR sAllocStartAddrOrig, sAllocEndAddrOrig;
+			IMG_DEV_VIRTADDR sAllocStartAddr, sAllocEndAddr;
+			IMG_BOOL bMap;
+			IMG_UINT32 ui32AllocIndex;
+
+			MapUnmapCommandGetInfo(psCommand,
+							eType,
+							&sAllocStartAddrOrig,
+							&sAllocEndAddrOrig,
+							&bMap,
+							&ui32AllocIndex);
+
+			sAllocStartAddr = sAllocStartAddrOrig;
+			sAllocEndAddr = sAllocEndAddrOrig;
+
+			psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocIndex);
+
+			/* skip this command if we need to search within
+			 * a particular PID, and this allocation is not from
+			 * that PID
+			 */
+			if((psQueryIn->uiPID != DEVICEMEM_HISTORY_PID_ANY) &&
+				(psAlloc->uiPID != psQueryIn->uiPID))
+			{
+				continue;
+			}
+
+			/* if the allocation was created after this event, then this
+			 * event must be for an old/removed allocation, so skip it
+			 */
+			if(DO_TIME_STAMP_MASK(psAlloc->ui64CreationTime) > ui64TimeNs)
+			{
+				continue;
+			}
+
+			/* if the caller wants us to match any allocation in the
+			 * same page as the allocation then tweak the real start/end
+			 * addresses of the allocation here
+			 */
+			if(bMatchAnyAllocInPage)
+			{
+				sAllocStartAddr.uiAddr = sAllocStartAddr.uiAddr & ~(IMG_UINT64) (ui32PageSizeBytes - 1);
+				sAllocEndAddr.uiAddr = (sAllocEndAddr.uiAddr + ui32PageSizeBytes - 1) & ~(IMG_UINT64) (ui32PageSizeBytes - 1);
+			}
+
+			if((psQueryIn->sDevVAddr.uiAddr >= sAllocStartAddr.uiAddr) &&
+				(psQueryIn->sDevVAddr.uiAddr <  sAllocEndAddr.uiAddr))
+			{
+				DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult = &psQueryOut->sResults[psQueryOut->ui32NumResults];
+
+				OSStringNCopy(psResult->szString, psAlloc->szName, sizeof(psResult->szString));
+				psResult->szString[DEVICEMEM_HISTORY_TEXT_BUFSZ - 1] = '\0';
+				psResult->sBaseDevVAddr = psAlloc->sDevVAddr;
+				psResult->uiSize = psAlloc->uiSize;
+				psResult->bMap = bMap;
+				psResult->ui64Age = _CalculateAge(ui64StartTime, ui64TimeNs, TIME_STAMP_MASK);
+				psResult->ui64When = ui64TimeNs;
+				/* write the responsible PID in the placeholder */
+				psResult->sProcessInfo.uiPID = psAlloc->uiPID;
+
+				if((eType == COMMAND_TYPE_MAP_ALL) || (eType == COMMAND_TYPE_UNMAP_ALL))
+				{
+					psResult->bRange = IMG_FALSE;
+					psResult->bAll = IMG_TRUE;
+				}
+				else
+				{
+					psResult->bRange = IMG_TRUE;
+					MapRangeUnpack(&psCommand->u.sMapRange,
+										&psResult->ui32StartPage,
+										&psResult->ui32PageCount);
+					psResult->bAll = (psResult->ui32PageCount * (1U << psAlloc->ui32Log2PageSize))
+											== psAlloc->uiSize;
+					psResult->sMapStartAddr = sAllocStartAddrOrig;
+					psResult->sMapEndAddr = sAllocEndAddrOrig;
+				}
+
+				psQueryOut->ui32NumResults++;
+
+				if(psQueryOut->ui32NumResults == DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS)
+				{
+					break;
+				}
+			}
+		}
+	}
+
+out_unlock:
+	DevicememHistoryUnlock();
+
+	return psQueryOut->ui32NumResults > 0;
+}
+
+static void DeviceMemHistoryFmt(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN],
+							IMG_PID uiPID,
+							const IMG_CHAR *pszName,
+							const IMG_CHAR *pszAction,
+							IMG_DEV_VIRTADDR sDevVAddrStart,
+							IMG_DEV_VIRTADDR sDevVAddrEnd,
+							IMG_UINT64 ui64TimeNs)
+{
+
+	szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN - 1] = '\0';
+	OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN,
+				/* PID NAME MAP/UNMAP MIN-MAX SIZE AbsUS AgeUS*/
+				"%04u %-40s %-10s "
+				IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC " "
+				"0x%08llX "
+				"%013llu", /* 13 digits is over 2 hours of ns */
+				uiPID,
+				pszName,
+				pszAction,
+				sDevVAddrStart.uiAddr,
+				sDevVAddrEnd.uiAddr,
+				sDevVAddrEnd.uiAddr - sDevVAddrStart.uiAddr,
+				ui64TimeNs);
+}
+
+static void DeviceMemHistoryFmtHeader(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN])
+{
+	OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN,
+				"%-4s %-40s %-6s   %10s   %10s   %8s %13s",
+				"PID",
+				"NAME",
+				"ACTION",
+				"ADDR MIN",
+				"ADDR MAX",
+				"SIZE",
+				"ABS NS");
+}
+
+static const char *CommandTypeToString(COMMAND_TYPE eType)
+{
+	switch(eType)
+	{
+		case COMMAND_TYPE_MAP_ALL:
+			return "MapAll";
+		case COMMAND_TYPE_UNMAP_ALL:
+			return "UnmapAll";
+		case COMMAND_TYPE_MAP_RANGE:
+			return "MapRange";
+		case COMMAND_TYPE_UNMAP_RANGE:
+			return "UnmapRange";
+		case COMMAND_TYPE_TIMESTAMP:
+			return "TimeStamp";
+		default:
+			return "???";
+	}
+}
+
+static void DevicememHistoryPrintAll(void *pvFilePtr, OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+	IMG_UINT32 ui32Iter;
+	IMG_UINT32 ui32Head;
+	IMG_BOOL bLast = IMG_FALSE;
+	IMG_UINT64 ui64TimeNs = 0;
+	IMG_UINT64 ui64StartTime = OSClockns64();
+
+	DeviceMemHistoryFmtHeader(szBuffer);
+	pfnOSStatsPrintf(pvFilePtr, "%s\n", szBuffer);
+
+	CircularBufferIterateStart(&ui32Head, &ui32Iter);
+
+	while(!bLast)
+	{
+		COMMAND_WRAPPER *psCommand;
+		COMMAND_TYPE eType = COMMAND_TYPE_NONE;
+
+		psCommand = CircularBufferIteratePrevious(ui32Head, &ui32Iter, &eType, &bLast);
+
+		if(eType == COMMAND_TYPE_TIMESTAMP)
+		{
+			ui64TimeNs = TimeStampUnpack(&psCommand->u.sTimeStamp);
+			continue;
+		}
+
+
+		if((eType == COMMAND_TYPE_MAP_ALL) ||
+			(eType == COMMAND_TYPE_UNMAP_ALL) ||
+			(eType == COMMAND_TYPE_MAP_RANGE) ||
+			(eType == COMMAND_TYPE_UNMAP_RANGE))
+		{
+			RECORD_ALLOCATION *psAlloc;
+			IMG_DEV_VIRTADDR sDevVAddrStart, sDevVAddrEnd;
+			IMG_BOOL bMap;
+			IMG_UINT32 ui32AllocIndex;
+
+			MapUnmapCommandGetInfo(psCommand,
+								eType,
+								&sDevVAddrStart,
+								&sDevVAddrEnd,
+								&bMap,
+								&ui32AllocIndex);
+
+			psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocIndex);
+
+			if(DO_TIME_STAMP_MASK(psAlloc->ui64CreationTime) > ui64TimeNs)
+			{
+				/* if this event relates to an allocation we
+				 * are no longer tracking then do not print it
+				 */
+				continue;
+			}
+
+			DeviceMemHistoryFmt(szBuffer,
+								psAlloc->uiPID,
+								psAlloc->szName,
+								CommandTypeToString(eType),
+								sDevVAddrStart,
+								sDevVAddrEnd,
+								ui64TimeNs);
+
+			pfnOSStatsPrintf(pvFilePtr, "%s\n", szBuffer);
+		}
+	}
+
+	pfnOSStatsPrintf(pvFilePtr, "\nTimestamp reference: %013llu\n", ui64StartTime);
+}
+
+static void DevicememHistoryPrintAllWrapper(void *pvFilePtr, void *pvData, OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	PVR_UNREFERENCED_PARAMETER(pvData);
+	DevicememHistoryLock();
+	DevicememHistoryPrintAll(pvFilePtr, pfnOSStatsPrintf);
+	DevicememHistoryUnlock();
+}
+
+static PVRSRV_ERROR CreateRecords(void)
+{
+	gsDevicememHistoryData.sRecords.pasAllocations =
+			OSAllocMem(sizeof(RECORD_ALLOCATION) * ALLOCATION_LIST_NUM_ENTRIES);
+
+	if(gsDevicememHistoryData.sRecords.pasAllocations == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	gsDevicememHistoryData.sRecords.pasCircularBuffer =
+			OSAllocMem(sizeof(COMMAND_WRAPPER) * CIRCULAR_BUFFER_NUM_COMMANDS);
+
+	if(gsDevicememHistoryData.sRecords.pasCircularBuffer == NULL)
+	{
+		OSFreeMem(gsDevicememHistoryData.sRecords.pasAllocations);
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	return PVRSRV_OK;
+}
+
+static void DestroyRecords(void)
+{
+	OSFreeMem(gsDevicememHistoryData.sRecords.pasCircularBuffer);
+	OSFreeMem(gsDevicememHistoryData.sRecords.pasAllocations);
+}
+
+static void InitialiseRecords(void)
+{
+	IMG_UINT32 i;
+
+	/* initialise the allocations list */
+
+	gsDevicememHistoryData.sRecords.pasAllocations[0].ui32Prev = ALLOCATION_LIST_NUM_ENTRIES - 1;
+	gsDevicememHistoryData.sRecords.pasAllocations[0].ui32Next = 1;
+
+	for(i = 1; i < ALLOCATION_LIST_NUM_ENTRIES; i++)
+	{
+		gsDevicememHistoryData.sRecords.pasAllocations[i].ui32Prev = i - 1;
+		gsDevicememHistoryData.sRecords.pasAllocations[i].ui32Next = i + 1;
+	}
+
+	gsDevicememHistoryData.sRecords.pasAllocations[ALLOCATION_LIST_NUM_ENTRIES - 1].ui32Next = 0;
+
+	gsDevicememHistoryData.sRecords.ui32AllocationsListHead = 0;
+
+	/* initialise the circular buffer with zeros so every command
+	 * is initialised as a command of type COMMAND_TYPE_NONE
+	 */
+	OSCachedMemSet(gsDevicememHistoryData.sRecords.pasCircularBuffer,
+								COMMAND_TYPE_NONE,
+			sizeof(gsDevicememHistoryData.sRecords.pasCircularBuffer[0]) * CIRCULAR_BUFFER_NUM_COMMANDS);
+}
+
+PVRSRV_ERROR DevicememHistoryInitKM(void)
+{
+	PVRSRV_ERROR eError;
+
+	eError = OSLockCreate(&gsDevicememHistoryData.hLock, LOCK_TYPE_PASSIVE);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DevicememHistoryInitKM: Failed to create lock"));
+		goto err_lock;
+	}
+
+	eError = CreateRecords();
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DevicememHistoryInitKM: Failed to create records"));
+		goto err_allocations;
+	}
+
+	InitialiseRecords();
+
+	gsDevicememHistoryData.pvStatsEntry = OSCreateStatisticEntry("devicemem_history",
+						NULL,
+						DevicememHistoryPrintAllWrapper,
+						NULL,
+						NULL,
+						NULL);
+
+	return PVRSRV_OK;
+
+err_allocations:
+	OSLockDestroy(gsDevicememHistoryData.hLock);
+err_lock:
+	return eError;
+}
+
+void DevicememHistoryDeInitKM(void)
+{
+	if(gsDevicememHistoryData.pvStatsEntry != NULL)
+	{
+		OSRemoveStatisticEntry(gsDevicememHistoryData.pvStatsEntry);
+	}
+
+	DestroyRecords();
+
+	OSLockDestroy(gsDevicememHistoryData.hLock);
+}
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/devicemem_server.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/devicemem_server.c
new file mode 100644
index 0000000..8676631
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/devicemem_server.c
@@ -0,0 +1,1603 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Server-side component of the Device Memory Management.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* our exported API */
+#include "devicemem_server.h"
+#include "devicemem_utils.h"
+#include "devicemem.h"
+
+#include "device.h" /* For device node */
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "mmu_common.h"
+#include "pdump_km.h"
+#include "pmr.h"
+#include "physmem.h"
+
+#include "allocmem.h"
+#include "osfunc.h"
+#include "lock.h"
+
+#include "rgx_bvnc_defs_km.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include <linux/sched.h>
+#include "pvr_buffer_sync.h"
+#endif
+
+struct _DEVMEMINT_CTX_
+{
+	PVRSRV_DEVICE_NODE *psDevNode;
+
+	/* MMU common code needs to have a context. There's a one-to-one
+	   correspondence between device memory context and MMU context,
+	   but we have the abstraction here so that we don't need to care
+	   what the MMU does with its context, and the MMU code need not
+	   know about us at all. */
+	MMU_CONTEXT *psMMUContext;
+
+	ATOMIC_T hRefCount;
+
+	/* This handle is for devices that require notification when a new
+	   memory context is created and they need to store private data that
+	   is associated with the context. */
+	IMG_HANDLE hPrivData;
+
+	/* The following tracks UM applications that need to be notified of a
+	 * page fault */
+	DLLIST_NODE sProcessNotifyListHead;
+	/* The following is a node for the list of registered devmem contexts */
+	DLLIST_NODE sPageFaultNotifyListElem;
+};
+
+struct _DEVMEMINT_CTX_EXPORT_
+{
+	DEVMEMINT_CTX *psDevmemCtx;
+	PMR *psPMR;
+	ATOMIC_T hRefCount;
+	DLLIST_NODE sNode;
+};
+
+struct _DEVMEMINT_HEAP_
+{
+	struct _DEVMEMINT_CTX_ *psDevmemCtx;
+	IMG_UINT32 uiLog2PageSize;
+	ATOMIC_T hRefCount;
+};
+
+struct _DEVMEMINT_RESERVATION_
+{
+	struct _DEVMEMINT_HEAP_ *psDevmemHeap;
+	IMG_DEV_VIRTADDR sBase;
+	IMG_DEVMEM_SIZE_T uiLength;
+};
+
+struct _DEVMEMINT_MAPPING_
+{
+	struct _DEVMEMINT_RESERVATION_ *psReservation;
+	PMR *psPMR;
+	IMG_UINT32 uiNumPages;
+#if defined(SUPPORT_BUFFER_SYNC)
+	void *pvWaitHandle;
+#endif
+};
+
+struct _DEVMEMINT_PF_NOTIFY_
+{
+	IMG_UINT32  ui32PID;
+	DLLIST_NODE sProcessNotifyListElem;
+};
+
+/*************************************************************************/ /*!
+@Function       _DevmemIntCtxAcquire
+@Description    Acquire a reference to the provided device memory context.
+@Return         None
+*/ /**************************************************************************/
+static INLINE void _DevmemIntCtxAcquire(DEVMEMINT_CTX *psDevmemCtx)
+{
+	OSAtomicIncrement(&psDevmemCtx->hRefCount);
+}
+
+/*************************************************************************/ /*!
+@Function       _DevmemIntCtxRelease
+@Description    Release the reference to the provided device memory context.
+                If this is the last reference which was taken then the
+                memory context will be freed.
+@Return         None
+*/ /**************************************************************************/
+static INLINE void _DevmemIntCtxRelease(DEVMEMINT_CTX *psDevmemCtx)
+{
+	if (OSAtomicDecrement(&psDevmemCtx->hRefCount) == 0)
+	{
+		/* The last reference has gone, destroy the context */
+		PVRSRV_DEVICE_NODE *psDevNode = psDevmemCtx->psDevNode;
+		DLLIST_NODE *psNode, *psNodeNext;
+
+		/* If there are any PIDs registered for page fault notification.
+		 * Loop through the registered PIDs and free each one */
+		dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext)
+		{
+			DEVMEMINT_PF_NOTIFY *psNotifyNode =
+				IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+			dllist_remove_node(psNode);
+			OSFreeMem(psNotifyNode);
+		}
+
+		/* If this context is in the list registered for a debugger, remove
+		 * from that list */
+		if (dllist_node_is_in_list(&psDevmemCtx->sPageFaultNotifyListElem))
+		{
+			dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem);
+		}
+
+		if (psDevNode->pfnUnregisterMemoryContext)
+		{
+			psDevNode->pfnUnregisterMemoryContext(psDevmemCtx->hPrivData);
+		}
+		MMU_ContextDestroy(psDevmemCtx->psMMUContext);
+
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed memory context %p", __FUNCTION__, psDevmemCtx));
+		OSFreeMem(psDevmemCtx);
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       _DevmemIntHeapAcquire
+@Description    Acquire a reference to the provided device memory heap.
+@Return         None
+*/ /**************************************************************************/
+static INLINE void _DevmemIntHeapAcquire(DEVMEMINT_HEAP *psDevmemHeap)
+{
+	OSAtomicIncrement(&psDevmemHeap->hRefCount);
+}
+
+/*************************************************************************/ /*!
+@Function       _DevmemIntHeapRelease
+@Description    Release the reference to the provided device memory heap.
+                If this is the last reference which was taken then the
+                memory context will be freed.
+@Return         None
+*/ /**************************************************************************/
+static INLINE void _DevmemIntHeapRelease(DEVMEMINT_HEAP *psDevmemHeap)
+{
+	OSAtomicDecrement(&psDevmemHeap->hRefCount);
+}
+
+PVRSRV_ERROR
+DevmemIntUnpin(PMR *psPMR)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* Unpin */
+	eError = PMRUnpinPMR(psPMR, IMG_FALSE);
+
+	return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntUnpinInvalidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	eError = PMRUnpinPMR(psPMR, IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		goto e_exit;
+	}
+
+	/* Invalidate mapping */
+	eError = MMU_ChangeValidity(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+	                            psDevmemMapping->psReservation->sBase,
+	                            psDevmemMapping->uiNumPages,
+	                            psDevmemMapping->psReservation->psDevmemHeap->uiLog2PageSize,
+	                            IMG_FALSE, /* !< Choose to invalidate PT entries */
+	                            psPMR);
+
+e_exit:
+	return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntPin(PMR *psPMR)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* Start the pinning */
+	eError = PMRPinPMR(psPMR);
+
+	return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntPinValidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_ERROR eErrorMMU = PVRSRV_OK;
+	IMG_UINT32 uiLog2PageSize = psDevmemMapping->psReservation->psDevmemHeap->uiLog2PageSize;
+
+	/* Start the pinning */
+	eError = PMRPinPMR(psPMR);
+
+	if (eError == PVRSRV_OK)
+	{
+		/* Make mapping valid again */
+		eErrorMMU = MMU_ChangeValidity(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+		                            psDevmemMapping->psReservation->sBase,
+		                            psDevmemMapping->uiNumPages,
+		                            uiLog2PageSize,
+		                            IMG_TRUE, /* !< Choose to make PT entries valid again */
+		                            psPMR);
+	}
+	else if (eError == PVRSRV_ERROR_PMR_NEW_MEMORY)
+	{
+		/* If we lost the physical backing we have to map it again because
+		 * the old physical addresses are not valid anymore. */
+		IMG_UINT32 uiFlags;
+		uiFlags = PMR_Flags(psPMR);
+
+		eErrorMMU = MMU_MapPages(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+		                         uiFlags,
+		                         psDevmemMapping->psReservation->sBase,
+		                         psPMR,
+		                         0,
+		                         psDevmemMapping->uiNumPages,
+		                         NULL,
+		                         uiLog2PageSize);
+	}
+
+	/* Just overwrite eError if the mappings failed.
+	 * PMR_NEW_MEMORY has to be propagated to the user. */
+	if (eErrorMMU != PVRSRV_OK)
+	{
+		eError = eErrorMMU;
+	}
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemServerGetImportHandle
+@Description    For given exportable memory descriptor returns PMR handle.
+@Return         Memory is exportable - Success
+                PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+                            IMG_HANDLE *phImport)
+{
+	PVRSRV_ERROR eError;
+
+	if ((psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE) == 0)
+	{
+		eError = PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION;
+		goto e0;
+	}
+
+	*phImport = psMemDesc->psImport->hPMR;
+	return PVRSRV_OK;
+
+e0:
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemServerGetHeapHandle
+@Description    For given reservation returns the Heap handle.
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation,
+                          IMG_HANDLE *phHeap)
+{
+	*phHeap = psReservation->psDevmemHeap;
+	return PVRSRV_OK;
+}
+
+
+
+/*************************************************************************/ /*!
+@Function       DevmemIntCtxCreate
+@Description    Creates and initialises a device memory context.
+@Return         valid Device Memory context handle - Success
+                PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntCtxCreate(CONNECTION_DATA *psConnection,
+                   PVRSRV_DEVICE_NODE *psDeviceNode,
+                   IMG_BOOL bKernelMemoryCtx,
+                   DEVMEMINT_CTX **ppsDevmemCtxPtr,
+                   IMG_HANDLE *hPrivData,
+                   IMG_UINT32 *pui32CPUCacheLineSize)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_CTX *psDevmemCtx;
+	IMG_HANDLE hPrivDataInt = NULL;
+	MMU_DEVICEATTRIBS      *psMMUDevAttrs;
+
+	if((psDeviceNode->pfnCheckDeviceFeature) && \
+			psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_MIPS_BIT_MASK))
+	{
+		psMMUDevAttrs = bKernelMemoryCtx ? psDeviceNode->psFirmwareMMUDevAttrs:
+											psDeviceNode->psMMUDevAttrs;
+	}else
+	{
+		psMMUDevAttrs = psDeviceNode->psMMUDevAttrs;
+		PVR_UNREFERENCED_PARAMETER(bKernelMemoryCtx);
+	}
+
+
+	PVR_DPF((PVR_DBG_MESSAGE, "%s", __FUNCTION__));
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	/* allocate a Devmem context */
+	psDevmemCtx = OSAllocMem(sizeof *psDevmemCtx);
+	if (psDevmemCtx == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_DPF ((PVR_DBG_ERROR, "%s: Alloc failed", __FUNCTION__));
+		goto fail_alloc;
+	}
+
+	OSAtomicWrite(&psDevmemCtx->hRefCount, 1);
+	psDevmemCtx->psDevNode = psDeviceNode;
+
+	/* Call down to MMU context creation */
+
+	eError = MMU_ContextCreate(psDeviceNode,
+	                           &psDevmemCtx->psMMUContext,
+	                           psMMUDevAttrs);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: MMU_ContextCreate failed", __FUNCTION__));
+		goto fail_mmucontext;
+	}
+
+
+	if (psDeviceNode->pfnRegisterMemoryContext)
+	{
+		eError = psDeviceNode->pfnRegisterMemoryContext(psDeviceNode, psDevmemCtx->psMMUContext, &hPrivDataInt);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to register MMU context", __FUNCTION__));
+			goto fail_register;
+		}
+	}
+
+	/* Store the private data as it is required to unregister the memory context */
+	psDevmemCtx->hPrivData = hPrivDataInt;
+	*hPrivData = hPrivDataInt;
+	*ppsDevmemCtxPtr = psDevmemCtx;
+
+	/* Pass the CPU cache line size through the bridge to the user mode as it can't be queried in user mode.*/
+	*pui32CPUCacheLineSize = OSCPUCacheAttributeSize(PVR_DCACHE_LINE_SIZE);
+
+	/* Initialise the PID notify list */
+	dllist_init(&(psDevmemCtx->sProcessNotifyListHead));
+	psDevmemCtx->sPageFaultNotifyListElem.psNextNode = NULL;
+	psDevmemCtx->sPageFaultNotifyListElem.psPrevNode = NULL;
+
+	return PVRSRV_OK;
+
+fail_register:
+	MMU_ContextDestroy(psDevmemCtx->psMMUContext);
+fail_mmucontext:
+	OSFreeMem(psDevmemCtx);
+fail_alloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntHeapCreate
+@Description    Creates and initialises a device memory heap.
+@Return         valid Device Memory heap handle - Success
+                PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx,
+                    IMG_DEV_VIRTADDR sHeapBaseAddr,
+                    IMG_DEVMEM_SIZE_T uiHeapLength,
+                    IMG_UINT32 uiLog2DataPageSize,
+                    DEVMEMINT_HEAP **ppsDevmemHeapPtr)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_HEAP *psDevmemHeap;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: DevmemIntHeap_Create", __FUNCTION__));
+
+	/* allocate a Devmem context */
+	psDevmemHeap = OSAllocMem(sizeof *psDevmemHeap);
+	if (psDevmemHeap == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_DPF ((PVR_DBG_ERROR, "%s: Alloc failed", __FUNCTION__));
+		goto fail_alloc;
+	}
+
+	psDevmemHeap->psDevmemCtx = psDevmemCtx;
+
+	_DevmemIntCtxAcquire(psDevmemHeap->psDevmemCtx);
+
+	OSAtomicWrite(&psDevmemHeap->hRefCount, 1);
+
+	psDevmemHeap->uiLog2PageSize = uiLog2DataPageSize;
+
+	*ppsDevmemHeapPtr = psDevmemHeap;
+
+	return PVRSRV_OK;
+
+fail_alloc:
+	return eError;
+}
+
+#define PVR_DUMMY_PAGE_INIT_VALUE	(0x0)
+
+static PVRSRV_ERROR DevmemIntAllocDummyPage(PVRSRV_DEVICE_NODE *psDevNode,
+                                            IMG_BOOL bInitPage)
+{
+	IMG_UINT32 ui32Dummyref;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* We know there will not be 4G number of sparse PMR's */
+	/* Also this function depends on the fact that it's called under the
+	 * global lock & pmr lock and thus is safe from being re-entrant */
+	ui32Dummyref = OSAtomicIncrement(&psDevNode->sDummyPage.atRefCounter);
+
+	if (1 == ui32Dummyref)
+	{
+		IMG_DEV_PHYADDR	sDevPhysAddr;
+
+		/*Acquire the lock */
+		OSLockAcquire(psDevNode->sDummyPage.psDummyPgLock);
+
+#if defined(PDUMP)
+		PDUMPCOMMENT("Alloc Dummy page object");
+#endif
+		/*Allocate the dummy page required for sparse backing */
+		eError = DevPhysMemAlloc(psDevNode,
+		                         (1 << psDevNode->sDummyPage.ui32Log2DummyPgSize),
+		                         0,
+		                         PVR_DUMMY_PAGE_INIT_VALUE,
+		                         bInitPage,
+#if	defined(PDUMP)
+		                         psDevNode->psMMUDevAttrs->pszMMUPxPDumpMemSpaceName,
+		                         DUMMY_PAGE,
+		                         &psDevNode->sDummyPage.hPdumpDummyPg,
+#endif
+		                         &psDevNode->sDummyPage.sDummyPageHandle,
+		                         &sDevPhysAddr);
+		if(PVRSRV_OK != eError)
+		{
+			OSAtomicDecrement(&psDevNode->sDummyPage.atRefCounter);
+		}
+		else
+		{
+			psDevNode->sDummyPage.ui64DummyPgPhysAddr = sDevPhysAddr.uiAddr;
+		}
+
+		/*Release the lock */
+		OSLockRelease(psDevNode->sDummyPage.psDummyPgLock);
+	}
+	return eError;
+}
+
+static void DevmemIntFreeDummyPage(PVRSRV_DEVICE_NODE *psDevNode)
+{
+	IMG_UINT32 ui32Dummyref = 0;
+
+	ui32Dummyref = OSAtomicRead(&psDevNode->sDummyPage.atRefCounter);
+
+	/* For the cases where the dummy page allocation fails due to lack of memory
+	 * The refcount can still be 0 even for a sparse allocation */
+	if (0 != ui32Dummyref)
+	{
+		OSLockAcquire(psDevNode->sDummyPage.psDummyPgLock);
+
+		/* We know there will not be 4G number of sparse PMR's */
+		ui32Dummyref = OSAtomicDecrement(&psDevNode->sDummyPage.atRefCounter);
+
+		if (0 == ui32Dummyref)
+		{
+			PDUMPCOMMENT("Free Dummy page object");
+
+			/* Free the dummy page when refcount reaches zero */
+			DevPhysMemFree(psDevNode,
+#if defined(PDUMP)
+			               psDevNode->sDummyPage.hPdumpDummyPg,
+#endif
+			               &psDevNode->sDummyPage.sDummyPageHandle);
+
+#if defined(PDUMP)
+			psDevNode->sDummyPage.hPdumpDummyPg = NULL;
+#endif
+			psDevNode->sDummyPage.ui64DummyPgPhysAddr = MMU_BAD_PHYS_ADDR;
+		}
+
+		OSLockRelease(psDevNode->sDummyPage.psDummyPgLock);
+	}
+
+}
+
+PVRSRV_ERROR
+DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation,
+                  PMR *psPMR,
+                  IMG_UINT32 ui32PageCount,
+                  IMG_UINT32 ui32PhysicalPgOffset,
+                  PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                  IMG_DEV_VIRTADDR sDevVAddrBase)
+{
+	PVRSRV_ERROR eError;
+
+	if (psReservation->psDevmemHeap->uiLog2PageSize > PMR_GetLog2Contiguity(psPMR))
+	{
+		PVR_DPF ((PVR_DBG_ERROR,
+				"%s: Device heap and PMR have incompatible Log2Contiguity (%u - %u). "
+				"PMR contiguity must be a multiple of the heap contiguity!",
+				__func__,
+				psReservation->psDevmemHeap->uiLog2PageSize,
+				PMR_GetLog2Contiguity(psPMR) ));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	eError = MMU_MapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+	                      uiFlags,
+	                      sDevVAddrBase,
+	                      psPMR,
+	                      ui32PhysicalPgOffset,
+	                      ui32PageCount,
+	                      NULL,
+	                      psReservation->psDevmemHeap->uiLog2PageSize);
+
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation,
+                    IMG_DEV_VIRTADDR sDevVAddrBase,
+                    IMG_UINT32 ui32PageCount)
+{
+	/*Unmap the pages and mark them invalid in the MMU PTE */
+	MMU_UnmapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+	               0,
+	               sDevVAddrBase,
+	               ui32PageCount,
+	               NULL,
+	               psReservation->psDevmemHeap->uiLog2PageSize,
+	               IMG_FALSE);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap,
+                DEVMEMINT_RESERVATION *psReservation,
+                PMR *psPMR,
+                PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+                DEVMEMINT_MAPPING **ppsMappingPtr)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_MAPPING *psMapping;
+	/* number of pages (device pages) that allocation spans */
+	IMG_UINT32 ui32NumDevPages;
+	/* device virtual address of start of allocation */
+	IMG_DEV_VIRTADDR sAllocationDevVAddr;
+	/* and its length */
+	IMG_DEVMEM_SIZE_T uiAllocationSize;
+	IMG_UINT32 uiLog2HeapContiguity = psDevmemHeap->uiLog2PageSize;
+	IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE;
+	PVRSRV_DEVICE_NODE *psDevNode;
+	PMR_FLAGS_T uiPMRFlags;
+
+	if (uiLog2HeapContiguity > PMR_GetLog2Contiguity(psPMR))
+	{
+		PVR_DPF ((PVR_DBG_ERROR,
+				"%s: Device heap and PMR have incompatible contiguity (%u - %u). "
+				"Heap contiguity must be a multiple of the heap contiguity!",
+				__func__,
+				uiLog2HeapContiguity,
+				PMR_GetLog2Contiguity(psPMR) ));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+	psDevNode = psDevmemHeap->psDevmemCtx->psDevNode;
+
+	/* allocate memory to record the mapping info */
+	psMapping = OSAllocMem(sizeof *psMapping);
+	if (psMapping == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_DPF ((PVR_DBG_ERROR, "DevmemIntMapPMR: Alloc failed"));
+		goto e0;
+	}
+
+	uiAllocationSize = psReservation->uiLength;
+
+
+	ui32NumDevPages = 0xffffffffU & ( ( (uiAllocationSize - 1) >> uiLog2HeapContiguity) + 1);
+	PVR_ASSERT(ui32NumDevPages << uiLog2HeapContiguity == uiAllocationSize);
+
+	eError = PMRLockSysPhysAddresses(psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto e2;
+	}
+
+	sAllocationDevVAddr = psReservation->sBase;
+
+	/*Check if the PMR that needs to be mapped is sparse */
+	bIsSparse = PMR_IsSparse(psPMR);
+	if (bIsSparse)
+	{
+		/*Get the flags*/
+		uiPMRFlags = PMR_Flags(psPMR);
+		bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags);
+
+		if (bNeedBacking)
+		{
+			/*Error is logged with in the function if any failures.
+			 * As the allocation fails we need to fail the map request and
+			 * return appropriate error
+			 *
+			 * Allocation of dummy page is done after locking the pages for PMR physically
+			 * By implementing this way, the best case path of dummy page being most likely to be
+			 * allocated after physically locking down pages, is considered.
+			 * If the dummy page allocation fails, we do unlock the physical address and the impact
+			 * is a bit more in on demand mode of operation */
+			eError = DevmemIntAllocDummyPage(psDevmemHeap->psDevmemCtx->psDevNode, IMG_TRUE);
+			if (PVRSRV_OK != eError)
+			{
+				goto e3;
+			}
+		}
+
+		/*  N.B.  We pass mapping permission flags to MMU_MapPages and let
+		 *  it reject the mapping if the permissions on the PMR are not compatible. */
+		eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext,
+		                      uiMapFlags,
+		                      sAllocationDevVAddr,
+		                      psPMR,
+		                      0,
+		                      ui32NumDevPages,
+		                      NULL,
+		                      uiLog2HeapContiguity);
+		if (PVRSRV_OK != eError)
+		{
+			goto e4;
+		}
+	}
+	else
+	{
+		eError = MMU_MapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext,
+		                        sAllocationDevVAddr,
+		                        psPMR,
+		                        ui32NumDevPages << uiLog2HeapContiguity,
+		                        uiMapFlags,
+		                        uiLog2HeapContiguity);
+		if (PVRSRV_OK != eError)
+		{
+			goto e3;
+		}
+	}
+
+	psMapping->psReservation = psReservation;
+	psMapping->uiNumPages = ui32NumDevPages;
+	psMapping->psPMR = psPMR;
+#if defined(SUPPORT_BUFFER_SYNC)
+	psMapping->pvWaitHandle = NULL;
+#endif
+
+	/* Don't bother with refcount on reservation, as a reservation
+	   only ever holds one mapping, so we directly increment the
+	   refcount on the heap instead */
+	_DevmemIntHeapAcquire(psMapping->psReservation->psDevmemHeap);
+
+	*ppsMappingPtr = psMapping;
+
+	return PVRSRV_OK;
+e4:
+	if (bNeedBacking)
+	{
+		/*if the mapping failed, the allocated dummy ref count need
+		 * to be handled accordingly */
+		DevmemIntFreeDummyPage(psDevmemHeap->psDevmemCtx->psDevNode);
+	}
+e3:
+	{
+		PVRSRV_ERROR eError1=PVRSRV_OK;
+		eError1 = PMRUnlockSysPhysAddresses(psPMR);
+		if (PVRSRV_OK != eError1)
+		{
+			PVR_DPF ((PVR_DBG_ERROR, "%s: Failed to unlock the physical addresses",__func__));
+		}
+		*ppsMappingPtr = NULL;
+	}
+e2:
+	OSFreeMem(psMapping);
+
+e0:
+	PVR_ASSERT (eError != PVRSRV_OK);
+	return eError;
+}
+
+
+PVRSRV_ERROR
+DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_HEAP *psDevmemHeap = psMapping->psReservation->psDevmemHeap;
+	/* device virtual address of start of allocation */
+	IMG_DEV_VIRTADDR sAllocationDevVAddr;
+	/* number of pages (device pages) that allocation spans */
+	IMG_UINT32 ui32NumDevPages;
+	IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE;
+	PMR_FLAGS_T uiPMRFlags;
+#if defined(SUPPORT_BUFFER_SYNC)
+	IMG_INT iErr;
+
+	if (!psMapping->pvWaitHandle)
+	{
+		PVRSRV_DEVICE_NODE *psDevNode = psDevmemHeap->psDevmemCtx->psDevNode;
+
+		pvr_buffer_sync_wait_handle_get(psDevNode->psBufferSyncContext,
+										psMapping->psPMR,
+										&psMapping->pvWaitHandle);
+	}
+
+	iErr = pvr_buffer_sync_wait(psMapping->pvWaitHandle, false, 1);
+	if (iErr == -EBUSY)
+	{
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	pvr_buffer_sync_wait_handle_put(psMapping->pvWaitHandle);
+	psMapping->pvWaitHandle = NULL;
+
+	if (iErr)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to unmap PMR from device (errno=%d)",
+				 __FUNCTION__, iErr));
+
+		return PVRSRV_ERROR_STILL_MAPPED;
+	}
+#endif
+
+	ui32NumDevPages = psMapping->uiNumPages;
+	sAllocationDevVAddr = psMapping->psReservation->sBase;
+
+	/*Check if the PMR that needs to be mapped is sparse */
+	bIsSparse = PMR_IsSparse(psMapping->psPMR);
+
+	if(bIsSparse)
+	{
+		/*Get the flags*/
+		uiPMRFlags = PMR_Flags(psMapping->psPMR);
+		bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags);
+
+		if(bNeedBacking)
+		{
+			DevmemIntFreeDummyPage(psDevmemHeap->psDevmemCtx->psDevNode);
+		}
+
+		MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+				0,
+				sAllocationDevVAddr,
+				ui32NumDevPages,
+				NULL,
+				psMapping->psReservation->psDevmemHeap->uiLog2PageSize,
+				IMG_FALSE);
+	}
+	else
+	{
+		MMU_UnmapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext,
+		                 sAllocationDevVAddr,
+		                 ui32NumDevPages,
+		                 psMapping->psReservation->psDevmemHeap->uiLog2PageSize);
+	}
+
+
+
+	eError = PMRUnlockSysPhysAddresses(psMapping->psPMR);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Don't bother with refcount on reservation, as a reservation
+	   only ever holds one mapping, so we directly decrement the
+	   refcount on the heap instead */
+	_DevmemIntHeapRelease(psDevmemHeap);
+
+	OSFreeMem(psMapping);
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap,
+                      IMG_DEV_VIRTADDR sAllocationDevVAddr,
+                      IMG_DEVMEM_SIZE_T uiAllocationSize,
+                      DEVMEMINT_RESERVATION **ppsReservationPtr)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_RESERVATION *psReservation;
+
+	/* allocate memory to record the reservation info */
+	psReservation = OSAllocMem(sizeof *psReservation);
+	if (psReservation == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_DPF ((PVR_DBG_ERROR, "DevmemIntReserveRange: Alloc failed"));
+		goto e0;
+	}
+
+	psReservation->sBase = sAllocationDevVAddr;
+	psReservation->uiLength = uiAllocationSize;
+
+	eError = MMU_Alloc(psDevmemHeap->psDevmemCtx->psMMUContext,
+	                   uiAllocationSize,
+	                   &uiAllocationSize,
+	                   0, /* IMG_UINT32 uiProtFlags */
+	                   0, /* alignment is n/a since we supply devvaddr */
+	                   &sAllocationDevVAddr,
+	                   psDevmemHeap->uiLog2PageSize);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	/* since we supplied the virt addr, MMU_Alloc shouldn't have
+	   chosen a new one for us */
+	PVR_ASSERT(sAllocationDevVAddr.uiAddr == psReservation->sBase.uiAddr);
+
+	_DevmemIntHeapAcquire(psDevmemHeap);
+
+	psReservation->psDevmemHeap = psDevmemHeap;
+	*ppsReservationPtr = psReservation;
+
+	return PVRSRV_OK;
+
+	/*
+	 *  error exit paths follow
+	 */
+
+e1:
+	OSFreeMem(psReservation);
+
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psReservation)
+{
+	IMG_DEV_VIRTADDR sBase        = psReservation->sBase;
+	IMG_UINT32 uiLength           = psReservation->uiLength;
+	IMG_UINT32 uiLog2DataPageSize = psReservation->psDevmemHeap->uiLog2PageSize;
+
+	MMU_Free(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+	         sBase,
+	         uiLength,
+	         uiLog2DataPageSize);
+
+	_DevmemIntHeapRelease(psReservation->psDevmemHeap);
+	OSFreeMem(psReservation);
+
+    return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap)
+{
+	if (OSAtomicRead(&psDevmemHeap->hRefCount) != 1)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "BUG!  %s called but has too many references (%d) "
+		         "which probably means allocations have been made from the heap and not freed",
+		         __FUNCTION__,
+		         OSAtomicRead(&psDevmemHeap->hRefCount)));
+
+		/*
+		 * Try again later when you've freed all the memory
+		 *
+		 * Note:
+		 * We don't expect the application to retry (after all this call would
+		 * succeed if the client had freed all the memory which it should have
+		 * done before calling this function). However, given there should be
+		 * an associated handle, when the handle base is destroyed it will free
+		 * any allocations leaked by the client and then it will retry this call,
+		 * which should then succeed.
+		 */
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	PVR_ASSERT(OSAtomicRead(&psDevmemHeap->hRefCount) == 1);
+
+	_DevmemIntCtxRelease(psDevmemHeap->psDevmemCtx);
+
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed heap %p", __FUNCTION__, psDevmemHeap));
+	OSFreeMem(psDevmemHeap);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap,
+                      PMR *psPMR,
+                      IMG_UINT32 ui32AllocPageCount,
+                      IMG_UINT32 *pai32AllocIndices,
+                      IMG_UINT32 ui32FreePageCount,
+                      IMG_UINT32 *pai32FreeIndices,
+                      SPARSE_MEM_RESIZE_FLAGS uiSparseFlags,
+                      PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                      IMG_DEV_VIRTADDR sDevVAddrBase,
+                      IMG_UINT64 sCpuVAddrBase)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	IMG_UINT32 uiLog2PMRContiguity = PMR_GetLog2Contiguity(psPMR);
+	IMG_UINT32 uiLog2HeapContiguity = psDevmemHeap->uiLog2PageSize;
+	IMG_UINT32 uiOrderDiff = uiLog2PMRContiguity - uiLog2HeapContiguity;
+	IMG_UINT32 uiPagesPerOrder = 1 << uiOrderDiff;
+
+	IMG_UINT32 *pai32MapIndices = pai32AllocIndices;
+	IMG_UINT32 *pai32UnmapIndices = pai32FreeIndices;
+	IMG_UINT32 uiMapPageCount = ui32AllocPageCount;
+	IMG_UINT32 uiUnmapPageCount = ui32FreePageCount;
+
+	/* Special case:
+	 * Adjust indices if we map into a heap that uses smaller page sizes
+	 * than the physical allocation itself.
+	 * The incoming parameters are all based on the page size of the PMR
+	 * but the mapping functions expects parameters to be in terms of heap page sizes. */
+	if (uiOrderDiff != 0)
+	{
+		IMG_UINT32 uiPgIdx, uiPgOffset;
+
+		uiMapPageCount = (uiMapPageCount << uiOrderDiff);
+		uiUnmapPageCount = (uiUnmapPageCount << uiOrderDiff);
+
+		pai32MapIndices = OSAllocMem(uiMapPageCount * sizeof(*pai32MapIndices));
+		if (!pai32MapIndices)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+
+		pai32UnmapIndices = OSAllocMem(uiUnmapPageCount * sizeof(*pai32UnmapIndices));
+		if (!pai32UnmapIndices)
+		{
+			OSFreeMem(pai32MapIndices);
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+
+		/* Every chunk index needs to be translated from physical indices 
+		 * into heap based indices. */
+		for (uiPgIdx = 0; uiPgIdx < ui32AllocPageCount; uiPgIdx++)
+		{
+			for (uiPgOffset = 0; uiPgOffset < uiPagesPerOrder; uiPgOffset++)
+			{
+				pai32MapIndices[uiPgIdx*uiPagesPerOrder + uiPgOffset] =
+						pai32AllocIndices[uiPgIdx]*uiPagesPerOrder + uiPgOffset;
+			}
+		}
+
+		for (uiPgIdx = 0; uiPgIdx < ui32FreePageCount; uiPgIdx++)
+		{
+			for (uiPgOffset = 0; uiPgOffset < uiPagesPerOrder; uiPgOffset++)
+			{
+				pai32UnmapIndices[uiPgIdx*uiPagesPerOrder + uiPgOffset] =
+						pai32FreeIndices[uiPgIdx]*uiPagesPerOrder + uiPgOffset;
+			}
+		}
+	}
+
+	/*
+	 * The order of steps in which this request is done is given below. The order of
+	 * operations is very important in this case:
+	 *
+	 * 1. The parameters are validated in function PMR_ChangeSparseMem below.
+	 *    A successful response indicates all the parameters are correct.
+	 *    In failure case we bail out from here without processing further.
+	 * 2. On success, get the PMR specific operations done. this includes page alloc, page free
+	 *    and the corresponding PMR status changes.
+	 *    when this call fails, it is ensured that the state of the PMR before is
+	 *    not disturbed. If it succeeds, then we can go ahead with the subsequent steps.
+	 * 3. Invalidate the GPU page table entries for the pages to be freed.
+	 * 4. Write the GPU page table entries for the pages that got allocated.
+	 * 5. Change the corresponding CPU space map.
+	 *
+	 * The above steps can be selectively controlled using flags.
+	 */
+	if (uiSparseFlags & (SPARSE_REMAP_MEM | SPARSE_RESIZE_BOTH))
+	{
+		/* Do the PMR specific changes first */
+		eError = PMR_ChangeSparseMem(psPMR,
+		                             ui32AllocPageCount,
+		                             pai32AllocIndices,
+		                             ui32FreePageCount,
+		                             pai32FreeIndices,
+		                             uiSparseFlags);
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE,
+					"%s: Failed to do PMR specific changes.",
+					__func__));
+			goto e1;
+		}
+
+		/* Invalidate the page table entries for the free pages.
+		 * Optimisation later would be not to touch the ones that gets re-mapped */
+		if ((0 != ui32FreePageCount) && (uiSparseFlags & SPARSE_RESIZE_FREE))
+		{
+			PMR_FLAGS_T uiPMRFlags;
+			IMG_BOOL bNeedBacking = IMG_FALSE;
+
+			/*Get the flags*/
+			uiPMRFlags = PMR_Flags(psPMR);
+			bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags);
+
+			if (SPARSE_REMAP_MEM != (uiSparseFlags & SPARSE_REMAP_MEM))
+			{
+				/* Unmap the pages and mark them invalid in the MMU PTE */
+				MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+				                uiFlags,
+				                sDevVAddrBase,
+				                uiUnmapPageCount,
+				                pai32UnmapIndices,
+				                uiLog2HeapContiguity,
+				                bNeedBacking);
+			}
+		}
+
+		/* Wire the pages tables that got allocated */
+		if ((0 != ui32AllocPageCount) && (uiSparseFlags & SPARSE_RESIZE_ALLOC))
+		{
+			/* Map the pages and mark them Valid in the MMU PTE */
+			eError = MMU_MapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+			                       uiFlags,
+			                       sDevVAddrBase,
+			                       psPMR,
+			                       0,
+			                       uiMapPageCount,
+			                       pai32MapIndices,
+			                       uiLog2HeapContiguity);
+
+			if (PVRSRV_OK != eError)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE,
+						"%s: Failed to map alloc indices.",
+						__func__));
+				goto e1;
+			}
+		}
+
+		/* Currently only used for debug */
+		if (SPARSE_REMAP_MEM == (uiSparseFlags & SPARSE_REMAP_MEM))
+		{
+			eError = MMU_MapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+			                       uiFlags,
+			                       sDevVAddrBase,
+			                       psPMR,
+			                       0,
+			                       uiMapPageCount,
+			                       pai32UnmapIndices,
+			                       uiLog2HeapContiguity);
+			if (PVRSRV_OK != eError)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE,
+						"%s: Failed to map Free indices.",
+						__func__));
+				goto e1;
+			}
+		}
+	}
+
+#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+	/* Do the changes in sparse on to the CPU virtual map accordingly */
+	if (uiSparseFlags & SPARSE_MAP_CPU_ADDR)
+	{
+		if (sCpuVAddrBase != 0)
+		{
+			eError = PMR_ChangeSparseMemCPUMap(psPMR,
+			                                   sCpuVAddrBase,
+			                                   ui32AllocPageCount,
+			                                   pai32AllocIndices,
+			                                   ui32FreePageCount,
+			                                   pai32FreeIndices);
+			if (PVRSRV_OK != eError)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE,
+				        "%s: Failed to map to CPU addr space.",
+				        __func__));
+				goto e0;
+			}
+		}
+	}
+#endif
+
+e1:
+	if (pai32MapIndices != pai32AllocIndices)
+	{
+		OSFreeMem(pai32MapIndices);
+	}
+	if (pai32UnmapIndices != pai32FreeIndices)
+	{
+		OSFreeMem(pai32UnmapIndices);
+	}
+e0:
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntCtxDestroy
+@Description    Destroy that created by DevmemIntCtxCreate
+@Input          psDevmemCtx   Device Memory context
+@Return         cannot fail.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntCtxDestroy(DEVMEMINT_CTX *psDevmemCtx)
+{
+	/*
+	   We can't determine if we should be freeing the context here
+	   as it refcount!=1 could be due to either the fact that heap(s)
+	   remain with allocations on them, or that this memory context
+	   has been exported.
+	   As the client couldn’t do anything useful with this information
+	   anyway and the fact that the refcount will ensure we only
+	   free the context when _all_ references have been released
+	   don't bother checking and just return OK regardless.
+	   */
+	_DevmemIntCtxRelease(psDevmemCtx);
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection,
+                                      PVRSRV_DEVICE_NODE *psDevNode,
+                                      DEVMEMINT_CTX *psDevMemContext,
+                                      IMG_DEV_VIRTADDR sDevAddr)
+{
+	IMG_UINT32 i, j, uiLog2HeapPageSize = 0;
+	DEVICE_MEMORY_INFO *psDinfo = &psDevNode->sDevMemoryInfo;
+	DEVMEM_HEAP_CONFIG *psConfig = psDinfo->psDeviceMemoryHeapConfigArray;
+
+	IMG_BOOL bFound = IMG_FALSE;
+
+	for(i = 0;
+	    i < psDinfo->uiNumHeapConfigs && !bFound;
+	    i++)
+	{
+		for(j = 0;
+		    j < psConfig[i].uiNumHeaps  && !bFound;
+		    j++)
+		{
+			IMG_DEV_VIRTADDR uiBase =
+					psConfig[i].psHeapBlueprintArray[j].sHeapBaseAddr;
+			IMG_DEVMEM_SIZE_T uiSize =
+					psConfig[i].psHeapBlueprintArray[j].uiHeapLength;
+
+			if((sDevAddr.uiAddr >= uiBase.uiAddr) &&
+			   (sDevAddr.uiAddr < (uiBase.uiAddr + uiSize)))
+			{
+				uiLog2HeapPageSize =
+						psConfig[i].psHeapBlueprintArray[j].uiLog2DataPageSize;
+				bFound = IMG_TRUE;
+			}
+		}
+	}
+
+	if (uiLog2HeapPageSize == 0)
+	{
+		return PVRSRV_ERROR_INVALID_GPU_ADDR;
+	}
+
+	return MMU_IsVDevAddrValid(psDevMemContext->psMMUContext,
+	                           uiLog2HeapPageSize,
+	                           sDevAddr) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_GPU_ADDR;
+}
+
+
+static void _DevmemIntExportCtxGetList(PDLLIST_NODE *ppsListHead)
+{
+	static DECLARE_DLLIST(sListHead);
+
+	*ppsListHead = &sListHead;
+}
+
+PVRSRV_ERROR
+DevmemIntExportCtx(DEVMEMINT_CTX *psContext,
+                   PMR *psPMR,
+                   DEVMEMINT_CTX_EXPORT **ppsContextExport)
+{
+	PDLLIST_NODE psListHead;
+	DEVMEMINT_CTX_EXPORT *psCtxExport;
+
+	_DevmemIntCtxAcquire(psContext);
+	PMRRefPMR(psPMR);
+
+	_DevmemIntExportCtxGetList(&psListHead);
+
+	psCtxExport = OSAllocMem(sizeof(DEVMEMINT_CTX_EXPORT));
+	if (psCtxExport == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Failed to export context. System currently out of memory",
+		         __func__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psCtxExport->psDevmemCtx = psContext;
+	psCtxExport->psPMR = psPMR;
+	dllist_add_to_tail(psListHead, &psCtxExport->sNode);
+
+	*ppsContextExport = psCtxExport;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntUnexportCtx(DEVMEMINT_CTX_EXPORT *psContextExport)
+{
+	PDLLIST_NODE psListHead;
+
+	_DevmemIntExportCtxGetList(&psListHead);
+
+	PMRUnrefPMR(psContextExport->psPMR);
+	_DevmemIntCtxRelease(psContextExport->psDevmemCtx);
+	dllist_remove_node(&psContextExport->sNode);
+	OSFreeMem(psContextExport);
+
+	/* Unable to find exported context, return error */
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntAcquireRemoteCtx(PMR *psPMR,
+                          DEVMEMINT_CTX **ppsContext,
+                          IMG_HANDLE *phPrivData)
+{
+
+	PDLLIST_NODE psListHead;
+	PDLLIST_NODE psListNode, psListNodeNext;
+	DEVMEMINT_CTX_EXPORT *psCtxExport;
+
+	_DevmemIntExportCtxGetList(&psListHead);
+
+	/* Find context from list using PMR as key */
+	dllist_foreach_node(psListHead, psListNode, psListNodeNext)
+	{
+		psCtxExport = IMG_CONTAINER_OF(psListNode, DEVMEMINT_CTX_EXPORT, sNode);
+		if (psCtxExport->psPMR == psPMR)
+		{
+			_DevmemIntCtxAcquire(psCtxExport->psDevmemCtx);
+			*ppsContext = psCtxExport->psDevmemCtx;
+			*phPrivData = psCtxExport->psDevmemCtx->hPrivData;
+			return PVRSRV_OK;
+		}
+	}
+
+	/* Unable to find exported context, return error */
+	PVR_DPF((PVR_DBG_ERROR,
+			"%s: Failed to acquire remote context. Could not retrieve context with given PMR",
+			__func__));
+	return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntRegisterPFNotify
+@Description    Registers a PID to be notified when a page fault occurs on a
+                specific device memory context.
+@Input          psDevmemCtx    The context to be notified about.
+@Input          ui32PID        The PID of the process that would like to be
+                               notified.
+@Input          bRegister      If true, register. If false, de-register.
+@Return         PVRSRV_ERROR.
+*/ /**************************************************************************/
+PVRSRV_ERROR DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx,
+                                         IMG_INT32     ui32PID,
+                                         IMG_BOOL      bRegister)
+{
+	PVRSRV_DEVICE_NODE *psDevNode;
+	DLLIST_NODE         *psNode, *psNodeNext;
+	DEVMEMINT_PF_NOTIFY *psNotifyNode;
+	IMG_BOOL            bPresent = IMG_FALSE;
+
+	if (psDevmemCtx == NULL)
+	{
+		PVR_ASSERT(!"Devmem Context Missing");
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevNode = psDevmemCtx->psDevNode;
+
+	if (bRegister)
+	{
+		/* If this is the first PID in the list, the device memory context
+		 * needs to be registered for notification */
+		if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead))
+		{
+			dllist_add_to_tail(&psDevNode->sMemoryContextPageFaultNotifyListHead,
+			                   &psDevmemCtx->sPageFaultNotifyListElem);
+		}
+	}
+
+	/* Loop through the registered PIDs and check whether this one is
+	 * present */
+	dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext)
+	{
+		psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+
+		if (psNotifyNode->ui32PID == ui32PID)
+		{
+			bPresent = IMG_TRUE;
+			break;
+		}
+	}
+
+	if (bRegister == IMG_TRUE)
+	{
+		if (bPresent)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: Trying to register a PID that is already registered",
+			         __func__));
+			return PVRSRV_ERROR_PID_ALREADY_REGISTERED;
+		}
+
+		psNotifyNode = OSAllocMem(sizeof(*psNotifyNode));
+		if (psNotifyNode == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: Unable to allocate memory for the notify list",
+			          __func__));
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+		psNotifyNode->ui32PID = ui32PID;
+		dllist_add_to_tail(&(psDevmemCtx->sProcessNotifyListHead), &(psNotifyNode->sProcessNotifyListElem));
+	}
+	else
+	{
+		if (!bPresent)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: Trying to unregister a PID that is not registered",
+			         __func__));
+			return PVRSRV_ERROR_PID_NOT_REGISTERED;
+		}
+		dllist_remove_node(psNode);
+		psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+		OSFreeMem(psNotifyNode);
+	}
+
+	if (!bRegister)
+	{
+		/* If the last process in the list is being unregistered, then also
+		 * unregister the device memory context from the notify list. */
+		if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead))
+		{
+			dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem);
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntPFNotify
+@Description    Notifies any processes that have registered themselves to be
+                notified when a page fault happens on a specific device memory
+                context.
+@Input          *psDevNode           The device node.
+@Input          ui64FaultedPCAddress The page catalogue address that faulted.
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode,
+                               IMG_UINT64         ui64FaultedPCAddress)
+{
+	DLLIST_NODE         *psNode, *psNodeNext;
+	DEVMEMINT_PF_NOTIFY *psNotifyNode;
+	PVRSRV_ERROR        eError;
+	DEVMEMINT_CTX       *psDevmemCtx = NULL;
+	IMG_BOOL            bFailed = IMG_FALSE;
+
+	if (dllist_is_empty(&(psDevNode->sMemoryContextPageFaultNotifyListHead)))
+	{
+		return PVRSRV_OK;
+	}
+
+	dllist_foreach_node(&(psDevNode->sMemoryContextPageFaultNotifyListHead), psNode, psNodeNext)
+	{
+		DEVMEMINT_CTX *psThisContext =
+			IMG_CONTAINER_OF(psNode, DEVMEMINT_CTX, sPageFaultNotifyListElem);
+		IMG_DEV_PHYADDR sPCDevPAddr;
+
+		eError = MMU_AcquireBaseAddr(psThisContext->psMMUContext, &sPCDevPAddr);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: Failed to Acquire Base Address (%s)",
+			         __func__,
+			         PVRSRVGetErrorStringKM(eError)));
+			return eError;
+		}
+
+		if (sPCDevPAddr.uiAddr == ui64FaultedPCAddress)
+		{
+			psDevmemCtx = psThisContext;
+			break;
+		}
+	}
+
+	if (psDevmemCtx == NULL)
+	{
+		/* Not found, just return */
+		return PVRSRV_OK;
+	}
+
+	/* Loop through each registered PID and send a signal to the process */
+	dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext)
+	{
+		psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+
+		eError = OSDebugSignalPID(psNotifyNode->ui32PID);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: Unable to signal process for PID: %u",
+			         __func__,
+			         psNotifyNode->ui32PID));
+
+			PVR_ASSERT(!"Unable to signal process");
+
+			bFailed = IMG_TRUE;
+		}
+	}
+
+	if (bFailed)
+	{
+		return PVRSRV_ERROR_SIGNAL_FAILED;
+	}
+
+	return PVRSRV_OK;
+}
+
+#if defined (PDUMP)
+IMG_UINT32 DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext)
+{
+	IMG_UINT32 ui32MMUContextID;
+	MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32MMUContextID);
+	return ui32MMUContextID;
+}
+
+PVRSRV_ERROR
+DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+                                IMG_DEV_VIRTADDR sDevAddrStart,
+                                IMG_DEVMEM_SIZE_T uiSize,
+                                IMG_UINT32 ui32ArraySize,
+                                const IMG_CHAR *pszFilename,
+                                IMG_UINT32 ui32FileOffset,
+                                IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 uiPDumpMMUCtx;
+
+
+	PVR_UNREFERENCED_PARAMETER(ui32ArraySize);
+
+	eError = MMU_AcquirePDumpMMUContext(psDevmemCtx->psMMUContext,
+			&uiPDumpMMUCtx);
+
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/*
+	   The following SYSMEM refers to the 'MMU Context', hence it
+	   should be the MMU context, not the PMR, that says what the PDump
+	   MemSpace tag is?
+	   From a PDump P.O.V. it doesn't matter which name space we use as long
+	   as that MemSpace is used on the 'MMU Context' we're dumping from
+	   */
+	eError = PDumpMMUSAB(psDevmemCtx->psDevNode->sDevId.pszPDumpDevName,
+	                     uiPDumpMMUCtx,
+	                     sDevAddrStart,
+	                     uiSize,
+	                     pszFilename,
+	                     ui32FileOffset,
+	                     ui32PDumpFlags);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	MMU_ReleasePDumpMMUContext(psDevmemCtx->psMMUContext);
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+DevmemIntPDumpBitmap(CONNECTION_DATA * psConnection,
+                     PVRSRV_DEVICE_NODE *psDeviceNode,
+                     IMG_CHAR *pszFileName,
+                     IMG_UINT32 ui32FileOffset,
+                     IMG_UINT32 ui32Width,
+                     IMG_UINT32 ui32Height,
+                     IMG_UINT32 ui32StrideInBytes,
+                     IMG_DEV_VIRTADDR sDevBaseAddr,
+                     DEVMEMINT_CTX *psDevMemContext,
+                     IMG_UINT32 ui32Size,
+                     PDUMP_PIXEL_FORMAT ePixelFormat,
+                     IMG_UINT32 ui32AddrMode,
+                     IMG_UINT32 ui32PDumpFlags)
+{
+	IMG_UINT32 ui32ContextID;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DevmemIntPDumpBitmap: Failed to acquire MMU context"));
+		return PVRSRV_ERROR_FAILED_TO_ALLOC_MMUCONTEXT_ID;
+	}
+
+	eError = PDumpBitmapKM(psDeviceNode,
+	                       pszFileName,
+	                       ui32FileOffset,
+	                       ui32Width,
+	                       ui32Height,
+	                       ui32StrideInBytes,
+	                       sDevBaseAddr,
+	                       ui32ContextID,
+	                       ui32Size,
+	                       ePixelFormat,
+	                       ui32AddrMode,
+	                       ui32PDumpFlags);
+
+	/* Don't care about return value */
+	MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext);
+
+	return eError;
+}
+
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/handle.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/handle.c
new file mode 100644
index 0000000..fc2a2d0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/handle.c
@@ -0,0 +1,2348 @@
+/*************************************************************************/ /*!
+@File
+@Title		Resource Handle Manager
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Provide resource handle management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+/* See handle.h for a description of the handle API. */
+
+/*
+ * The implementation supports movable handle structures, allowing the address
+ * of a handle structure to change without having to fix up pointers in
+ * any of the handle structures. For example, the linked list mechanism
+ * used to link subhandles together uses handle array indices rather than
+ * pointers to the structures themselves.
+ */
+
+#include <stddef.h>
+
+#include "handle.h"
+#include "handle_impl.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+
+#define	HANDLE_HASH_TAB_INIT_SIZE		32
+
+#define	SET_FLAG(v, f)				((void)((v) |= (f)))
+#define	CLEAR_FLAG(v, f)			((void)((v) &= (IMG_UINT)~(f)))
+#define	TEST_FLAG(v, f)				((IMG_BOOL)(((v) & (f)) != 0))
+
+#define	TEST_ALLOC_FLAG(psHandleData, f)	TEST_FLAG((psHandleData)->eFlag, f)
+
+#if !defined(ARRAY_SIZE)
+#define ARRAY_SIZE(a)				(sizeof(a) / sizeof((a)[0]))
+#endif
+
+
+/* Linked list structure. Used for both the list head and list items */
+typedef struct _HANDLE_LIST_
+{
+	IMG_HANDLE hPrev;
+	IMG_HANDLE hNext;
+	IMG_HANDLE hParent;
+} HANDLE_LIST;
+
+typedef struct _HANDLE_DATA_
+{
+	/* The handle that represents this structure */
+	IMG_HANDLE hHandle;
+
+	/* Handle type */
+	PVRSRV_HANDLE_TYPE eType;
+
+	/* Flags specified when the handle was allocated */
+	PVRSRV_HANDLE_ALLOC_FLAG eFlag;
+
+	/* Pointer to the data that the handle represents */
+	void *pvData;
+
+	/*
+	 * Callback specified at handle allocation time to
+	 * release/destroy/free the data represented by the
+	 * handle when it's reference count reaches 0. This
+	 * should always be NULL for subhandles.
+	 */
+	PFN_HANDLE_RELEASE pfnReleaseData;
+
+	/* List head for subhandles of this handle */
+	HANDLE_LIST sChildren;
+
+	/* List entry for sibling subhandles */
+	HANDLE_LIST sSiblings;
+
+	/* Reference count. The pfnReleaseData callback gets called when the
+	 * reference count hits zero
+	 */
+	IMG_UINT32 ui32RefCount;
+} HANDLE_DATA;
+
+struct _HANDLE_BASE_
+{
+	/* Pointer to a handle implementations base structure */
+	HANDLE_IMPL_BASE *psImplBase;
+
+	/*
+	 * Pointer to handle hash table.
+	 * The hash table is used to do reverse lookups, converting data
+	 * pointers to handles.
+	 */
+	HASH_TABLE *psHashTab;
+
+	/* Can be connection, process, global */
+	PVRSRV_HANDLE_BASE_TYPE eType;
+};
+
+/*
+ * The key for the handle hash table is an array of three elements, the
+ * pointer to the resource, the resource type and the parent handle (or
+ * NULL if there is no parent). The eHandKey enumeration gives the
+ * array indices of the elements making up the key.
+ */
+enum eHandKey
+{
+	HAND_KEY_DATA = 0,
+	HAND_KEY_TYPE,
+	HAND_KEY_PARENT,
+	HAND_KEY_LEN		/* Must be last item in list */
+};
+
+/* HAND_KEY is the type of the hash table key */
+typedef uintptr_t HAND_KEY[HAND_KEY_LEN];
+
+/* Stores a pointer to the function table of the handle back-end in use */
+static HANDLE_IMPL_FUNCTAB const *gpsHandleFuncs = NULL;
+
+/*
+ * Global lock added to avoid to call the handling functions
+ * only in a single threaded context.
+ */
+static POS_LOCK gHandleLock;
+static IMG_BOOL gbLockInitialised = IMG_FALSE;
+
+void LockHandle(void)
+{
+	OSLockAcquire(gHandleLock);
+}
+
+void UnlockHandle(void)
+{
+	OSLockRelease(gHandleLock);
+}
+
+/*
+ * Kernel handle base structure. This is used for handles that are not
+ * allocated on behalf of a particular process.
+ */
+PVRSRV_HANDLE_BASE *gpsKernelHandleBase = NULL;
+
+/* Increase the reference count on the given handle.
+ * The handle lock must already be acquired.
+ * Returns: the reference count after the increment
+ */
+static inline IMG_UINT32 _HandleRef(HANDLE_DATA *psHandleData)
+{
+#if defined PVRSRV_DEBUG_HANDLE_LOCK
+	if (!OSLockIsLocked(gHandleLock))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Handle lock is not locked", __func__));
+		OSDumpStack();
+	}
+#endif
+	psHandleData->ui32RefCount++;
+	return psHandleData->ui32RefCount;
+}
+
+/* Decrease the reference count on the given handle.
+ * The handle lock must already be acquired.
+ * Returns: the reference count after the decrement
+ */
+static inline IMG_UINT32 _HandleUnref(HANDLE_DATA *psHandleData)
+{
+#if defined PVRSRV_DEBUG_HANDLE_LOCK
+	if (!OSLockIsLocked(gHandleLock))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Handle lock is not locked", __func__));
+		OSDumpStack();
+	}
+#endif
+	PVR_ASSERT(psHandleData->ui32RefCount > 0);
+	psHandleData->ui32RefCount--;
+
+	return psHandleData->ui32RefCount;
+}
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+static const IMG_CHAR *HandleTypeToString(PVRSRV_HANDLE_TYPE eType)
+{
+	#define HANDLETYPE(x) \
+			case PVRSRV_HANDLE_TYPE_##x: \
+				return #x;
+	switch(eType)
+	{
+		#include "handle_types.h"
+		#undef HANDLETYPE
+
+		default:
+			return "INVALID";
+	}
+}
+#endif /* PVRSRV_NEED_PVR_DPF */
+
+/*!
+******************************************************************************
+
+ @Function	GetHandleData
+
+ @Description	Get the handle data structure for a given handle
+
+ @Input		psBase - pointer to handle base structure
+		ppsHandleData - location to return pointer to handle data structure
+		hHandle - handle from client
+		eType - handle type or PVRSRV_HANDLE_TYPE_NONE if the
+			handle type is not to be checked.
+
+ @Output	ppsHandleData - points to a pointer to the handle data structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(GetHandleData)
+#endif
+static INLINE
+PVRSRV_ERROR GetHandleData(PVRSRV_HANDLE_BASE *psBase,
+			   HANDLE_DATA **ppsHandleData,
+			   IMG_HANDLE hHandle,
+			   PVRSRV_HANDLE_TYPE eType)
+{
+	HANDLE_DATA *psHandleData;
+	PVRSRV_ERROR eError;
+
+	eError = gpsHandleFuncs->pfnGetHandleData(psBase->psImplBase,
+						  hHandle,
+						  (void **)&psHandleData);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	/*
+	 * Unless PVRSRV_HANDLE_TYPE_NONE was passed in to this function,
+	 * check handle is of the correct type.
+	 */
+	if (eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandleData->eType)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "GetHandleData: Type mismatch. Lookup request: Handle %p, type: %s (%u) but stored handle is type %s (%u)",
+			 hHandle,
+			 HandleTypeToString(eType),
+			 eType,
+			 HandleTypeToString(psHandleData->eType),
+			 psHandleData->eType));
+		return PVRSRV_ERROR_HANDLE_TYPE_MISMATCH;
+	}
+
+	/* Return the handle structure */
+	*ppsHandleData = psHandleData;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	HandleListInit
+
+ @Description	Initialise a linked list structure embedded in a handle
+		structure.
+
+ @Input		hHandle - handle containing the linked list structure
+		psList - pointer to linked list structure
+		hParent - parent handle or NULL
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListInit)
+#endif
+static INLINE
+void HandleListInit(IMG_HANDLE hHandle, HANDLE_LIST *psList, IMG_HANDLE hParent)
+{
+	psList->hPrev = hHandle;
+	psList->hNext = hHandle;
+	psList->hParent = hParent;
+}
+
+/*!
+******************************************************************************
+
+ @Function	InitParentList
+
+ @Description	Initialise the children list head in a handle structure.
+		The children are the subhandles of this handle.
+
+ @Input		psHandleData - pointer to handle data structure
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitParentList)
+#endif
+static INLINE
+void InitParentList(HANDLE_DATA *psHandleData)
+{
+	IMG_HANDLE hParent = psHandleData->hHandle;
+
+	HandleListInit(hParent, &psHandleData->sChildren, hParent);
+}
+
+/*!
+******************************************************************************
+
+ @Function	InitChildEntry
+
+ @Description	Initialise the child list entry in a handle structure.
+		The list entry is used to link together subhandles of
+		a given handle.
+
+ @Input		psHandleData - pointer to handle data structure
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitChildEntry)
+#endif
+static INLINE
+void InitChildEntry(HANDLE_DATA *psHandleData)
+{
+	HandleListInit(psHandleData->hHandle, &psHandleData->sSiblings, NULL);
+}
+
+/*!
+******************************************************************************
+
+ @Function	HandleListIsEmpty
+
+ @Description	Determine whether a given linked list is empty.
+
+ @Input		hHandle - handle containing the list head
+		psList - pointer to the list head
+
+ @Return	IMG_TRUE if the list is empty, IMG_FALSE if it isn't.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListIsEmpty)
+#endif
+static INLINE
+IMG_BOOL HandleListIsEmpty(IMG_HANDLE hHandle, HANDLE_LIST *psList) /* Instead of passing in the handle can we not just do (psList->hPrev == psList->hNext) ? IMG_TRUE : IMG_FALSE ??? */
+{
+	IMG_BOOL bIsEmpty;
+
+	bIsEmpty = (IMG_BOOL)(psList->hNext == hHandle);
+
+#ifdef	DEBUG
+	{
+		IMG_BOOL bIsEmpty2;
+
+		bIsEmpty2 = (IMG_BOOL)(psList->hPrev == hHandle);
+		PVR_ASSERT(bIsEmpty == bIsEmpty2);
+	}
+#endif
+
+	return bIsEmpty;
+}
+
+#ifdef DEBUG
+/*!
+******************************************************************************
+
+ @Function	NoChildren
+
+ @Description	Determine whether a handle has any subhandles
+
+ @Input		psHandleData - pointer to handle data structure
+
+ @Return	IMG_TRUE if the handle has no subhandles, IMG_FALSE if it does.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(NoChildren)
+#endif
+static INLINE
+IMG_BOOL NoChildren(HANDLE_DATA *psHandleData)
+{
+	PVR_ASSERT(psHandleData->sChildren.hParent == psHandleData->hHandle);
+
+	return HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sChildren);
+}
+
+/*!
+******************************************************************************
+
+ @Function	NoParent
+
+ @Description	Determine whether a handle is a subhandle
+
+ @Input		psHandleData - pointer to handle data structure
+
+ @Return	IMG_TRUE if the handle is not a subhandle, IMG_FALSE if it is.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(NoParent)
+#endif
+static INLINE
+IMG_BOOL NoParent(HANDLE_DATA *psHandleData)
+{
+	if (HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sSiblings))
+	{
+		PVR_ASSERT(psHandleData->sSiblings.hParent == NULL);
+
+		return IMG_TRUE;
+	}
+	else
+	{
+		PVR_ASSERT(psHandleData->sSiblings.hParent != NULL);
+	}
+	return IMG_FALSE;
+}
+#endif /*DEBUG*/
+
+/*!
+******************************************************************************
+
+ @Function	ParentHandle
+
+ @Description	Determine the parent of a handle
+
+ @Input		psHandleData - pointer to handle data structure
+
+ @Return	Parent handle, or NULL if the handle is not a subhandle.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(ParentHandle)
+#endif
+static INLINE
+IMG_HANDLE ParentHandle(HANDLE_DATA *psHandleData)
+{
+	return psHandleData->sSiblings.hParent;
+}
+
+/*
+ * GetHandleListFromHandleAndOffset is used to generate either a
+ * pointer to the subhandle list head, or a pointer to the linked list
+ * structure of an item on a subhandle list.
+ * The list head is itself on the list, but is at a different offset
+ * in the handle structure to the linked list structure for items on
+ * the list. The two linked list structures are differentiated by
+ * the third parameter, containing the parent handle. The parent field
+ * in the list head structure references the handle structure that contains
+ * it. For items on the list, the parent field in the linked list structure
+ * references the parent handle, which will be different from the handle
+ * containing the linked list structure.
+ */
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(GetHandleListFromHandleAndOffset)
+#endif
+static INLINE
+HANDLE_LIST *GetHandleListFromHandleAndOffset(PVRSRV_HANDLE_BASE *psBase,
+					      IMG_HANDLE hEntry,
+					      IMG_HANDLE hParent,
+					      size_t uiParentOffset,
+					      size_t uiEntryOffset)
+{
+	HANDLE_DATA *psHandleData = NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psBase != NULL);
+
+	eError = GetHandleData(psBase,
+			       &psHandleData,
+			       hEntry,
+			       PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		return NULL;
+	}
+
+	if (hEntry == hParent)
+	{
+		return (HANDLE_LIST *)((IMG_CHAR *)psHandleData + uiParentOffset);
+	}
+	else
+	{
+		return (HANDLE_LIST *)((IMG_CHAR *)psHandleData + uiEntryOffset);
+	}
+}
+
+/*!
+******************************************************************************
+
+ @Function	HandleListInsertBefore
+
+ @Description	Insert a handle before a handle currently on the list.
+
+ @Input		hEntry - handle to be inserted after
+		psEntry - pointer to handle structure to be inserted after
+		uiParentOffset - offset to list head struct in handle structure
+		hNewEntry - handle to be inserted
+		psNewEntry - pointer to handle structure of item to be inserted
+		uiEntryOffset - offset of list item struct in handle structure
+		hParent - parent handle of hNewEntry
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListInsertBefore)
+#endif
+static INLINE
+PVRSRV_ERROR HandleListInsertBefore(PVRSRV_HANDLE_BASE *psBase,
+				    IMG_HANDLE hEntry,
+				    HANDLE_LIST *psEntry,
+				    size_t uiParentOffset,
+				    IMG_HANDLE hNewEntry,
+				    HANDLE_LIST *psNewEntry,
+				    size_t uiEntryOffset,
+				    IMG_HANDLE hParent)
+{
+	HANDLE_LIST *psPrevEntry;
+
+	if (psBase == NULL || psEntry == NULL || psNewEntry == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psPrevEntry = GetHandleListFromHandleAndOffset(psBase,
+						       psEntry->hPrev,
+						       hParent,
+						       uiParentOffset,
+						       uiEntryOffset);
+	if (psPrevEntry == NULL)
+	{
+		return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+	}
+
+	PVR_ASSERT(psNewEntry->hParent == NULL);
+	PVR_ASSERT(hEntry == psPrevEntry->hNext);
+
+#if defined(DEBUG)
+	{
+		HANDLE_LIST *psParentList;
+
+		psParentList = GetHandleListFromHandleAndOffset(psBase,
+								hParent,
+								hParent,
+								uiParentOffset,
+								uiParentOffset);
+		PVR_ASSERT(psParentList && psParentList->hParent == hParent);
+	}
+#endif /* defined(DEBUG) */
+
+	psNewEntry->hPrev = psEntry->hPrev;
+	psEntry->hPrev = hNewEntry;
+
+	psNewEntry->hNext = hEntry;
+	psPrevEntry->hNext = hNewEntry;
+
+	psNewEntry->hParent = hParent;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	AdoptChild
+
+ @Description	Assign a subhandle to a handle
+
+ @Input		psParentData - pointer to handle structure of parent handle
+		psChildData - pointer to handle structure of child subhandle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(AdoptChild)
+#endif
+static INLINE
+PVRSRV_ERROR AdoptChild(PVRSRV_HANDLE_BASE *psBase,
+			HANDLE_DATA *psParentData,
+			HANDLE_DATA *psChildData)
+{
+	IMG_HANDLE hParent = psParentData->sChildren.hParent;
+
+	PVR_ASSERT(hParent == psParentData->hHandle);
+
+	return HandleListInsertBefore(psBase,
+				      hParent,
+				      &psParentData->sChildren,
+				      offsetof(HANDLE_DATA, sChildren),
+				      psChildData->hHandle,
+				      &psChildData->sSiblings,
+				      offsetof(HANDLE_DATA, sSiblings),
+				      hParent);
+}
+
+/*!
+******************************************************************************
+
+ @Function	HandleListRemove
+
+ @Description	Remove a handle from a list
+
+ @Input		hEntry - handle to be removed
+		psEntry - pointer to handle structure of item to be removed
+		uiEntryOffset - offset of list item struct in handle structure
+		uiParentOffset - offset to list head struct in handle structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListRemove)
+#endif
+static INLINE
+PVRSRV_ERROR HandleListRemove(PVRSRV_HANDLE_BASE *psBase,
+			      IMG_HANDLE hEntry,
+			      HANDLE_LIST *psEntry,
+			      size_t uiEntryOffset,
+			      size_t uiParentOffset)
+{
+	if (psBase == NULL || psEntry == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (!HandleListIsEmpty(hEntry, psEntry))
+	{
+		HANDLE_LIST *psPrev;
+		HANDLE_LIST *psNext;
+
+		psPrev = GetHandleListFromHandleAndOffset(psBase,
+							  psEntry->hPrev,
+							  psEntry->hParent,
+							  uiParentOffset,
+							  uiEntryOffset);
+		if (psPrev == NULL)
+		{
+			return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+		}
+
+		psNext = GetHandleListFromHandleAndOffset(psBase,
+							  psEntry->hNext,
+							  psEntry->hParent,
+							  uiParentOffset,
+							  uiEntryOffset);
+		if (psNext == NULL)
+		{
+			return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+		}
+
+		/*
+		 * The list head is on the list, and we don't want to
+		 * remove it.
+		 */
+		PVR_ASSERT(psEntry->hParent != NULL);
+
+		psPrev->hNext = psEntry->hNext;
+		psNext->hPrev = psEntry->hPrev;
+
+		HandleListInit(hEntry, psEntry, NULL);
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	UnlinkFromParent
+
+ @Description	Remove a subhandle from its parents list
+
+ @Input		psHandleData - pointer to handle data structure of child subhandle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(UnlinkFromParent)
+#endif
+static INLINE
+PVRSRV_ERROR UnlinkFromParent(PVRSRV_HANDLE_BASE *psBase,
+			      HANDLE_DATA *psHandleData)
+{
+	return HandleListRemove(psBase,
+				psHandleData->hHandle,
+				&psHandleData->sSiblings,
+				offsetof(HANDLE_DATA, sSiblings),
+				offsetof(HANDLE_DATA, sChildren));
+}
+
+/*!
+******************************************************************************
+
+ @Function	HandleListIterate
+
+ @Description	Iterate over the items in a list
+
+ @Input		psHead - pointer to list head
+		uiParentOffset - offset to list head struct in handle structure
+		uiEntryOffset - offset of list item struct in handle structure
+		pfnIterFunc - function to be called for each handle in the list
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListIterate)
+#endif
+static INLINE
+PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE *psBase,
+			       HANDLE_LIST *psHead,
+			       size_t uiParentOffset,
+			       size_t uiEntryOffset,
+			       PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE))
+{
+	IMG_HANDLE hHandle = psHead->hNext;
+	IMG_HANDLE hParent = psHead->hParent;
+	IMG_HANDLE hNext;
+
+	PVR_ASSERT(psHead->hParent != NULL);
+
+	/*
+ 	 * Follow the next chain from the list head until we reach
+ 	 * the list head again, which signifies the end of the list.
+ 	 */
+	while (hHandle != hParent)
+	{
+		HANDLE_LIST *psEntry;
+		PVRSRV_ERROR eError;
+
+		psEntry = GetHandleListFromHandleAndOffset(psBase,
+							   hHandle,
+							   hParent,
+							   uiParentOffset,
+							   uiEntryOffset);
+		if (psEntry == NULL)
+		{
+			return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+		}
+
+		PVR_ASSERT(psEntry->hParent == psHead->hParent);
+
+		/*
+		 * Get the next index now, in case the list item is
+		 * modified by the iteration function.
+		 */
+		hNext = psEntry->hNext;
+
+		eError = (*pfnIterFunc)(psBase, hHandle);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+
+		hHandle = hNext;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	IterateOverChildren
+
+ @Description	Iterate over the subhandles of a parent handle
+
+ @Input		psParentData - pointer to parent handle structure
+		pfnIterFunc - function to be called for each subhandle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(IterateOverChildren)
+#endif
+static INLINE
+PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE *psBase,
+				 HANDLE_DATA *psParentData,
+				 PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE))
+{
+	 return HandleListIterate(psBase,
+				  &psParentData->sChildren,
+				  offsetof(HANDLE_DATA, sChildren),
+				  offsetof(HANDLE_DATA, sSiblings),
+				  pfnIterFunc);
+}
+
+/*!
+******************************************************************************
+
+ @Function	ParentIfPrivate
+
+ @Description	Return the parent handle if the handle was allocated
+		with PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE, else return
+		NULL
+
+ @Input		psHandleData - pointer to handle data structure
+
+ @Return	Parent handle, or NULL
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(ParentIfPrivate)
+#endif
+static INLINE
+IMG_HANDLE ParentIfPrivate(HANDLE_DATA *psHandleData)
+{
+	return TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
+			ParentHandle(psHandleData) : NULL;
+}
+
+/*!
+******************************************************************************
+
+ @Function	InitKey
+
+ @Description	Initialise a hash table key for the current process
+
+ @Input		psBase - pointer to handle base structure
+		aKey - pointer to key
+		pvData - pointer to the resource the handle represents
+		eType - type of resource
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitKey)
+#endif
+static INLINE
+void InitKey(HAND_KEY aKey,
+	     PVRSRV_HANDLE_BASE *psBase,
+	     void *pvData,
+	     PVRSRV_HANDLE_TYPE eType,
+	     IMG_HANDLE hParent)
+{
+	PVR_UNREFERENCED_PARAMETER(psBase);
+
+	aKey[HAND_KEY_DATA] = (uintptr_t)pvData;
+	aKey[HAND_KEY_TYPE] = (uintptr_t)eType;
+	aKey[HAND_KEY_PARENT] = (uintptr_t)hParent;
+}
+
+static PVRSRV_ERROR FreeHandleWrapper(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle);
+
+/*!
+******************************************************************************
+
+ @Function	FreeHandle
+
+ @Description	Free a handle data structure.
+
+ @Input		psBase - Pointer to handle base structure
+		hHandle - Handle to be freed
+		eType - Type of the handle to be freed
+		ppvData - Location for data associated with the freed handle
+
+ @Output 		ppvData - Points to data that was associated with the freed handle
+
+ @Return	PVRSRV_OK or PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR FreeHandle(PVRSRV_HANDLE_BASE *psBase,
+			       IMG_HANDLE hHandle,
+			       PVRSRV_HANDLE_TYPE eType,
+			       void **ppvData)
+{
+	HANDLE_DATA *psHandleData = NULL;
+	HANDLE_DATA *psReleasedHandleData;
+	PVRSRV_ERROR eError;
+
+	eError = GetHandleData(psBase, &psHandleData, hHandle, eType);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	if (_HandleUnref(psHandleData) > 0)
+	{
+		/* this handle still has references so do not destroy it
+		 * or the underlying object yet
+		 */
+		return PVRSRV_OK;
+	}
+
+	/* Call the release data callback for each reference on the handle */
+	if (psHandleData->pfnReleaseData != NULL)
+	{
+		eError = psHandleData->pfnReleaseData(psHandleData->pvData);
+		if (eError == PVRSRV_ERROR_RETRY)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE,
+				 "FreeHandle: "
+				 "Got retry while calling release data callback for %p (type = %d)",
+				 hHandle,
+				 (IMG_UINT32)psHandleData->eType));
+
+			/* the caller should retry, so retain a reference on the handle */
+			_HandleRef(psHandleData);
+
+			return eError;
+		}
+		else if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+	{
+		HAND_KEY aKey;
+		IMG_HANDLE hRemovedHandle;
+
+		InitKey(aKey, psBase, psHandleData->pvData, psHandleData->eType, ParentIfPrivate(psHandleData));
+
+		hRemovedHandle = (IMG_HANDLE)HASH_Remove_Extended(psBase->psHashTab, aKey);
+
+		PVR_ASSERT(hRemovedHandle != NULL);
+		PVR_ASSERT(hRemovedHandle == psHandleData->hHandle);
+		PVR_UNREFERENCED_PARAMETER(hRemovedHandle);
+	}
+
+	eError = UnlinkFromParent(psBase, psHandleData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "FreeHandle: Error whilst unlinking from parent handle (%s)",
+			 PVRSRVGetErrorStringKM(eError)));
+		return eError;
+	}
+
+	/* Free children */
+	eError = IterateOverChildren(psBase, psHandleData, FreeHandleWrapper);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "FreeHandle: Error whilst freeing subhandles (%s)",
+			 PVRSRVGetErrorStringKM(eError)));
+		return eError;
+	}
+
+	eError = gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase,
+						  psHandleData->hHandle,
+						  (void **)&psReleasedHandleData);
+	if (eError == PVRSRV_OK)
+	{
+		PVR_ASSERT(psReleasedHandleData == psHandleData);
+	}
+
+	if (ppvData)
+	{
+		*ppvData = psHandleData->pvData;
+	}
+
+	OSFreeMem(psHandleData);
+
+	return eError;
+}
+
+static PVRSRV_ERROR FreeHandleWrapper(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle)
+{
+	return FreeHandle(psBase, hHandle, PVRSRV_HANDLE_TYPE_NONE, NULL);
+}
+
+/*!
+******************************************************************************
+
+ @Function	FindHandle
+
+ @Description	Find handle corresponding to a resource pointer
+
+ @Input		psBase - pointer to handle base structure
+		pvData - pointer to resource to be associated with the handle
+		eType - the type of resource
+
+ @Return	the handle, or NULL if not found
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(FindHandle)
+#endif
+static INLINE
+IMG_HANDLE FindHandle(PVRSRV_HANDLE_BASE *psBase,
+		      void *pvData,
+		      PVRSRV_HANDLE_TYPE eType,
+		      IMG_HANDLE hParent)
+{
+	HAND_KEY aKey;
+
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+
+	InitKey(aKey, psBase, pvData, eType, hParent);
+
+	return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey);
+}
+
+/*!
+******************************************************************************
+
+ @Function	AllocHandle
+
+ @Description	Allocate a new handle
+
+ @Input		phHandle - location for new handle
+		pvData - pointer to resource to be associated with the handle
+		eType - the type of resource
+		hParent - parent handle or NULL
+		pfnReleaseData - Function to release resource at handle release
+		                 time
+
+ @Output	phHandle - points to new handle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase,
+				IMG_HANDLE *phHandle,
+				void *pvData,
+				PVRSRV_HANDLE_TYPE eType,
+				PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+				IMG_HANDLE hParent,
+				PFN_HANDLE_RELEASE pfnReleaseData)
+{
+	HANDLE_DATA *psNewHandleData;
+	IMG_HANDLE hHandle;
+	PVRSRV_ERROR eError;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(psBase != NULL && psBase->psHashTab != NULL);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+	{
+		/* Handle must not already exist */
+		PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == NULL);
+	}
+
+	psNewHandleData = OSAllocZMem(sizeof(*psNewHandleData));
+	if (psNewHandleData == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Couldn't allocate handle data"));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	eError = gpsHandleFuncs->pfnAcquireHandle(psBase->psImplBase, &hHandle, psNewHandleData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Failed to acquire a handle"));
+		goto ErrorFreeHandleData;
+	}
+
+	/*
+	 * If a data pointer can be associated with multiple handles, we
+	 * don't put the handle in the hash table, as the data pointer
+	 * may not map to a unique handle
+	 */
+	if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+	{
+		HAND_KEY aKey;
+
+		/* Initialise hash key */
+		InitKey(aKey, psBase, pvData, eType, hParent);
+
+		/* Put the new handle in the hash table */
+		if (!HASH_Insert_Extended(psBase->psHashTab, aKey, (uintptr_t)hHandle))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Couldn't add handle to hash table"));
+			eError = PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+			goto ErrorReleaseHandle;
+		}
+	}
+
+	psNewHandleData->hHandle = hHandle;
+	psNewHandleData->eType = eType;
+	psNewHandleData->eFlag = eFlag;
+	psNewHandleData->pvData = pvData;
+	psNewHandleData->pfnReleaseData = pfnReleaseData;
+	psNewHandleData->ui32RefCount = 1;
+
+	InitParentList(psNewHandleData);
+#if defined(DEBUG)
+	PVR_ASSERT(NoChildren(psNewHandleData));
+#endif
+
+	InitChildEntry(psNewHandleData);
+#if defined(DEBUG)
+	PVR_ASSERT(NoParent(psNewHandleData));
+#endif
+
+	/* Return the new handle to the client */
+	*phHandle = psNewHandleData->hHandle;
+
+	return PVRSRV_OK;
+
+ErrorReleaseHandle:
+	(void)gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase, hHandle, NULL);
+
+ErrorFreeHandleData:
+	OSFreeMem(psNewHandleData);
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVAllocHandle
+
+ @Description	Allocate a handle
+
+ @Input		phHandle - location for new handle
+		pvData - pointer to resource to be associated with the handle
+		eType - the type of resource
+		pfnReleaseData - Function to release resource at handle release
+		                 time
+
+ @Output	phHandle - points to new handle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase,
+			       IMG_HANDLE *phHandle,
+			       void *pvData,
+			       PVRSRV_HANDLE_TYPE eType,
+			       PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+			       PFN_HANDLE_RELEASE pfnReleaseData)
+{
+	PVRSRV_ERROR eError;
+
+	LockHandle();
+	eError = PVRSRVAllocHandleUnlocked(psBase, phHandle, pvData, eType, eFlag, pfnReleaseData);
+	UnlockHandle();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVAllocHandleUnlocked
+
+ @Description	Allocate a handle without acquiring/releasing the handle
+		lock. The function assumes you hold the lock when called.
+
+ @Input		phHandle - location for new handle
+		pvData - pointer to resource to be associated with the handle
+		eType - the type of resource
+		pfnReleaseData - Function to release resource at handle release
+		                 time
+
+ @Output	phHandle - points to new handle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+			       IMG_HANDLE *phHandle,
+			       void *pvData,
+			       PVRSRV_HANDLE_TYPE eType,
+			       PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+			       PFN_HANDLE_RELEASE pfnReleaseData)
+{
+	PVRSRV_ERROR eError;
+
+	*phHandle = NULL;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandle: Missing handle base"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto Exit;
+	}
+
+	if (pfnReleaseData == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandle: Missing release function"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto Exit;
+	}
+
+	eError = AllocHandle(psBase, phHandle, pvData, eType, eFlag, NULL, pfnReleaseData);
+
+Exit:
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVAllocSubHandle
+
+ @Description	Allocate a subhandle
+
+ @Input		phHandle - location for new subhandle
+		pvData - pointer to resource to be associated with the subhandle
+		eType - the type of resource
+		hParent - parent handle
+
+ @Output	phHandle - points to new subhandle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase,
+				  IMG_HANDLE *phHandle,
+				  void *pvData,
+				  PVRSRV_HANDLE_TYPE eType,
+				  PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+				  IMG_HANDLE hParent)
+{
+	PVRSRV_ERROR eError;
+
+	LockHandle();
+	eError = PVRSRVAllocSubHandleUnlocked(psBase, phHandle, pvData, eType, eFlag, hParent);
+	UnlockHandle();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVAllocSubHandleUnlocked
+
+ @Description	Allocate a subhandle without acquiring/releasing the
+		handle lock. The function assumes you hold the lock when called.
+
+ @Input		phHandle - location for new subhandle
+		pvData - pointer to resource to be associated with the subhandle
+		eType - the type of resource
+		hParent - parent handle
+
+ @Output	phHandle - points to new subhandle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocSubHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+				  IMG_HANDLE *phHandle,
+				  void *pvData,
+				  PVRSRV_HANDLE_TYPE eType,
+				  PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+				  IMG_HANDLE hParent)
+{
+	HANDLE_DATA *psPHandleData = NULL;
+	HANDLE_DATA *psCHandleData = NULL;
+	IMG_HANDLE hParentKey;
+	IMG_HANDLE hHandle;
+	PVRSRV_ERROR eError;
+
+	*phHandle = NULL;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Missing handle base"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto Exit;
+	}
+
+	hParentKey = TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? hParent : NULL;
+
+	/* Lookup the parent handle */
+	eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Failed to get parent handle structure"));
+		goto Exit;
+	}
+
+	eError = AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey, NULL);
+	if (eError != PVRSRV_OK)
+	{
+		goto Exit;
+	}
+
+	eError = GetHandleData(psBase, &psCHandleData, hHandle, PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Failed to get parent handle structure"));
+
+		/* If we were able to allocate the handle then there should be no reason why we
+		   can't also get it's handle structure. Otherwise something has gone badly wrong. */
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		goto Exit;
+	}
+
+	/*
+	 * Get the parent handle structure again, in case the handle
+	 * structure has moved (depending on the implementation
+	 * of AllocHandle).
+	 */
+	eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Failed to get parent handle structure"));
+
+		(void)FreeHandle(psBase, hHandle, eType, NULL);
+		goto Exit;
+	}
+
+	eError = AdoptChild(psBase, psPHandleData, psCHandleData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Parent handle failed to adopt subhandle"));
+
+		(void)FreeHandle(psBase, hHandle, eType, NULL);
+		goto Exit;
+	}
+
+	*phHandle = hHandle;
+
+	eError = PVRSRV_OK;
+
+Exit:
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVFindHandle
+
+ @Description	Find handle corresponding to a resource pointer
+
+ @Input		phHandle - location for returned handle
+		pvData - pointer to resource to be associated with the handle
+		eType - the type of resource
+
+ @Output	phHandle - points to handle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase,
+			      IMG_HANDLE *phHandle,
+			      void *pvData,
+			      PVRSRV_HANDLE_TYPE eType)
+{
+	PVRSRV_ERROR eError;
+
+	LockHandle();
+	eError = PVRSRVFindHandleUnlocked(psBase, phHandle, pvData, eType);
+	UnlockHandle();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVFindHandleUnlocked
+
+ @Description	Find handle corresponding to a resource pointer without
+		acquiring/releasing the handle lock. The function assumes you hold
+		the lock when called.
+
+ @Input		phHandle - location for returned handle
+		pvData - pointer to resource to be associated with the handle
+		eType - the type of resource
+
+ @Output	phHandle - points to handle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFindHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+			      IMG_HANDLE *phHandle,
+			      void *pvData,
+			      PVRSRV_HANDLE_TYPE eType)
+{
+	IMG_HANDLE hHandle;
+	PVRSRV_ERROR eError;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVFindHandle: Missing handle base"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto Exit;
+	}
+
+	/* See if there is a handle for this data pointer */
+	hHandle = FindHandle(psBase, pvData, eType, NULL);
+	if (hHandle == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVFindHandle: Error finding handle. Type %u",
+			 eType));
+
+		eError = PVRSRV_ERROR_HANDLE_NOT_FOUND;
+		goto Exit;
+	}
+
+	*phHandle = hHandle;
+
+	eError = PVRSRV_OK;
+
+Exit:
+	return eError;
+
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVLookupHandle
+
+ @Description	Lookup the data pointer corresponding to a handle
+
+ @Input		ppvData - location to return data pointer
+		hHandle - handle from client
+		eType - handle type
+		bRef - If TRUE, a reference will be added on the handle if the
+		       lookup is successful.
+
+ @Output	ppvData - points to the data pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase,
+				void **ppvData,
+				IMG_HANDLE hHandle,
+				PVRSRV_HANDLE_TYPE eType,
+				IMG_BOOL bRef)
+{
+	PVRSRV_ERROR eError;
+
+	LockHandle();
+	eError = PVRSRVLookupHandleUnlocked(psBase, ppvData, hHandle, eType, bRef);
+	UnlockHandle();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVLookupHandleUnlocked
+
+ @Description	Lookup the data pointer corresponding to a handle without
+ 		acquiring/releasing the handle lock. The function assumes you
+		hold the lock when called.
+
+ @Input		ppvData - location to return data pointer
+		hHandle - handle from client
+		eType - handle type
+		bRef - If TRUE, a reference will be added on the handle if the
+		       lookup is successful.
+
+ @Output	ppvData - points to the data pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVLookupHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+				void **ppvData,
+				IMG_HANDLE hHandle,
+				PVRSRV_HANDLE_TYPE eType,
+				IMG_BOOL bRef)
+{
+	HANDLE_DATA *psHandleData = NULL;
+	PVRSRV_ERROR eError;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandle: Missing handle base"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto Exit;
+	}
+
+	eError = GetHandleData(psBase, &psHandleData, hHandle, eType);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVLookupHandle: Error looking up handle (%s). Handle %p, type %u",
+			 PVRSRVGetErrorStringKM(eError),
+			 (void*) hHandle,
+			 eType));
+#if defined(DEBUG) || defined(PVRSRV_NEED_PVR_DPF)
+		OSDumpStack();
+#endif
+		goto Exit;
+	}
+
+	if (bRef)
+	{
+		_HandleRef(psHandleData);
+	}
+
+	*ppvData = psHandleData->pvData;
+
+	eError = PVRSRV_OK;
+
+Exit:
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVLookupSubHandle
+
+ @Description	Lookup the data pointer corresponding to a subhandle
+
+ @Input		ppvData - location to return data pointer
+		hHandle - handle from client
+		eType - handle type
+		hAncestor - ancestor handle
+
+ @Output	ppvData - points to the data pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase,
+				   void **ppvData,
+				   IMG_HANDLE hHandle,
+				   PVRSRV_HANDLE_TYPE eType,
+				   IMG_HANDLE hAncestor)
+{
+	HANDLE_DATA *psPHandleData = NULL;
+	HANDLE_DATA *psCHandleData = NULL;
+	PVRSRV_ERROR eError;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	LockHandle();
+
+	if (psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Missing handle base"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ExitUnlock;
+	}
+
+	eError = GetHandleData(psBase, &psCHandleData, hHandle, eType);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVLookupSubHandle: Error looking up subhandle (%s). Handle %p, type %u",
+			 PVRSRVGetErrorStringKM(eError),
+			 (void*) hHandle,
+			 eType));
+		OSDumpStack();
+		goto ExitUnlock;
+	}
+
+	/* Look for hAncestor among the handle's ancestors */
+	for (psPHandleData = psCHandleData; ParentHandle(psPHandleData) != hAncestor; )
+	{
+		eError = GetHandleData(psBase, &psPHandleData, ParentHandle(psPHandleData), PVRSRV_HANDLE_TYPE_NONE);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"PVRSRVLookupSubHandle: Subhandle doesn't belong to given ancestor"));
+			eError = PVRSRV_ERROR_INVALID_SUBHANDLE;
+			goto ExitUnlock;
+		}
+	}
+
+	*ppvData = psCHandleData->pvData;
+
+	eError = PVRSRV_OK;
+
+ExitUnlock:
+	UnlockHandle();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVGetParentHandle
+
+ @Description	Lookup the parent of a handle
+
+ @Input		phParent - location for returning parent handle
+		hHandle - handle for which the parent handle is required
+		eType - handle type
+		hParent - parent handle
+
+ @Output	*phParent - parent handle, or NULL if there is no parent
+
+ @Return	Error code or PVRSRV_OK.  Note that not having a parent is
+		not regarded as an error.
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase,
+				   IMG_HANDLE *phParent,
+				   IMG_HANDLE hHandle,
+				   PVRSRV_HANDLE_TYPE eType)
+{
+	HANDLE_DATA *psHandleData = NULL;
+	PVRSRV_ERROR eError;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	LockHandle();
+
+	if (psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetParentHandle: Missing handle base"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ExitUnlock;
+	}
+
+	eError = GetHandleData(psBase, &psHandleData, hHandle, eType);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVGetParentHandle: Error looking up subhandle (%s). Type %u",
+			 PVRSRVGetErrorStringKM(eError),
+			 eType));
+		OSDumpStack();
+		goto ExitUnlock;
+	}
+
+	*phParent = ParentHandle(psHandleData);
+
+	eError = PVRSRV_OK;
+
+ExitUnlock:
+	UnlockHandle();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVReleaseHandle
+
+ @Description	Release a handle that is no longer needed
+
+ @Input 	hHandle - handle from client
+		eType - handle type
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase,
+				 IMG_HANDLE hHandle,
+				 PVRSRV_HANDLE_TYPE eType)
+{
+	PVRSRV_ERROR eError;
+
+	LockHandle();
+	eError = PVRSRVReleaseHandleUnlocked(psBase, hHandle, eType);
+	UnlockHandle();
+
+	return eError;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVReleaseHandleUnlocked
+
+ @Description	Release a handle that is no longer needed without
+ 		acquiring/releasing the handle lock. The function assumes you
+		hold the lock when called.
+
+ @Input 	hHandle - handle from client
+		eType - handle type
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVReleaseHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+				 IMG_HANDLE hHandle,
+				 PVRSRV_HANDLE_TYPE eType)
+{
+	PVRSRV_ERROR eError;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVReleaseHandle: Missing handle base"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto Exit;
+	}
+
+	eError = FreeHandle(psBase, hHandle, eType, NULL);
+
+Exit:
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVPurgeHandles
+
+ @Description	Purge handles for a given handle base
+
+ @Input 	psBase - pointer to handle base structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(gpsHandleFuncs);
+
+	LockHandle();
+
+	if (psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVPurgeHandles: Missing handle base"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ExitUnlock;
+	}
+
+	eError = gpsHandleFuncs->pfnPurgeHandles(psBase->psImplBase);
+
+ExitUnlock:
+	UnlockHandle();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVAllocHandleBase
+
+ @Description	Allocate a handle base structure for a process
+
+ @Input 	ppsBase - pointer to handle base structure pointer
+
+ @Output	ppsBase - points to handle base structure pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase,
+                                   PVRSRV_HANDLE_BASE_TYPE eType)
+{
+	PVRSRV_HANDLE_BASE *psBase;
+	PVRSRV_ERROR eError;
+
+	if (gpsHandleFuncs == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Handle management not initialised"));
+		return PVRSRV_ERROR_NOT_READY;
+	}
+
+	LockHandle();
+
+	if (ppsBase == NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ErrorUnlock;
+	}
+
+	psBase = OSAllocZMem(sizeof(*psBase));
+	if (psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't allocate handle base"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorUnlock;
+	}
+
+	psBase->eType = eType;
+
+	eError = gpsHandleFuncs->pfnCreateHandleBase(&psBase->psImplBase);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorFreeHandleBase;
+	}
+
+	psBase->psHashTab = HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE,
+						 sizeof(HAND_KEY),
+						 HASH_Func_Default,
+						 HASH_Key_Comp_Default);
+	if (psBase->psHashTab == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't create data pointer hash table"));
+		eError = PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE;
+		goto ErrorDestroyHandleBase;
+	}
+
+	*ppsBase = psBase;
+
+	UnlockHandle();
+
+	return PVRSRV_OK;
+
+ErrorDestroyHandleBase:
+	(void)gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase);
+
+ErrorFreeHandleBase:
+	OSFreeMem(psBase);
+
+ErrorUnlock:
+	UnlockHandle();
+
+	return eError;
+}
+
+#if defined(DEBUG)
+typedef struct _COUNT_HANDLE_DATA_
+{
+	PVRSRV_HANDLE_BASE *psBase;
+	IMG_UINT32 uiHandleDataCount;
+} COUNT_HANDLE_DATA;
+
+/* Used to count the number of handles that have data associated with them */
+static PVRSRV_ERROR CountHandleDataWrapper(IMG_HANDLE hHandle, void *pvData)
+{
+	COUNT_HANDLE_DATA *psData = (COUNT_HANDLE_DATA *)pvData;
+	HANDLE_DATA *psHandleData = NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (psData == NULL ||
+	    psData->psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "CountHandleDataWrapper: Missing free data"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eError = GetHandleData(psData->psBase,
+			       &psHandleData,
+			       hHandle,
+			       PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "CountHandleDataWrapper: Couldn't get handle data for handle"));
+		return eError;
+	}
+
+	if (psHandleData != NULL)
+	{
+		psData->uiHandleDataCount++;
+	}
+
+	return PVRSRV_OK;
+}
+
+/* Print a handle in the handle base. Used with the iterator callback. */
+static PVRSRV_ERROR ListHandlesInBase(IMG_HANDLE hHandle, void *pvData)
+{
+	PVRSRV_HANDLE_BASE *psBase = (PVRSRV_HANDLE_BASE*) pvData;
+	HANDLE_DATA *psHandleData = NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Missing base", __func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eError = GetHandleData(psBase,
+			       &psHandleData,
+			       hHandle,
+			       PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't get handle data for handle", __func__));
+		return eError;
+	}
+
+	if (psHandleData != NULL)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "    Handle: %6u, Refs: %3u, Type: %s (%u)",
+				(IMG_UINT32) (uintptr_t) psHandleData->hHandle,
+				psHandleData->ui32RefCount,
+				HandleTypeToString(psHandleData->eType),
+				psHandleData->eType));
+	}
+
+	return PVRSRV_OK;
+}
+
+
+
+#endif /* defined(DEBUG) */
+
+typedef struct FREE_HANDLE_DATA_TAG
+{
+	PVRSRV_HANDLE_BASE *psBase;
+	PVRSRV_HANDLE_TYPE eHandleFreeType;
+	/* timing data (ns) to release bridge lock upon the deadline */
+	IMG_UINT64 ui64TimeStart;
+	IMG_UINT64 ui64MaxBridgeTime;
+} FREE_HANDLE_DATA;
+
+static INLINE IMG_BOOL _CheckIfMaxTimeExpired(IMG_UINT64 ui64TimeStart, IMG_UINT64 ui64MaxBridgeTime)
+{
+	IMG_UINT64 ui64Diff;
+	IMG_UINT64 ui64Now = OSClockns64();
+
+	if (ui64Now >= ui64TimeStart)
+	{
+		ui64Diff = ui64Now - ui64TimeStart;
+	}
+	else
+	{
+		/* time has wrapped around */
+		ui64Diff = (0xFFFFFFFFFFFFFFFF - ui64TimeStart) + ui64Now;
+	}
+
+	return ui64Diff >= ui64MaxBridgeTime;
+}
+
+static PVRSRV_ERROR FreeHandleDataWrapper(IMG_HANDLE hHandle, void *pvData)
+{
+	FREE_HANDLE_DATA *psData = (FREE_HANDLE_DATA *)pvData;
+	HANDLE_DATA *psHandleData = NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (psData == NULL ||
+	    psData->psBase == NULL ||
+	    psData->eHandleFreeType == PVRSRV_HANDLE_TYPE_NONE)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "FreeHandleDataWrapper: Missing free data"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eError = GetHandleData(psData->psBase,
+			       &psHandleData,
+			       hHandle,
+			       PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "FreeHandleDataWrapper: Couldn't get handle data for handle"));
+		return eError;
+	}
+
+	if (psHandleData == NULL || psHandleData->eType != psData->eHandleFreeType)
+	{
+		return PVRSRV_OK;
+	}
+
+	PVR_ASSERT(psHandleData->ui32RefCount > 0);
+
+	while (psHandleData->ui32RefCount != 0)
+	{
+		if (psHandleData->pfnReleaseData != NULL)
+		{
+			eError = psHandleData->pfnReleaseData(psHandleData->pvData);
+			if (eError == PVRSRV_ERROR_RETRY)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE,
+					 "FreeHandleDataWrapper: "
+					 "Got retry while calling release data callback for %p (type = %d)",
+					 hHandle,
+					 (IMG_UINT32)psHandleData->eType));
+
+				return eError;
+			}
+			else if (eError != PVRSRV_OK)
+			{
+				return eError;
+			}
+		}
+
+		_HandleUnref(psHandleData);
+	}
+
+	if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+	{
+		HAND_KEY aKey;
+		IMG_HANDLE hRemovedHandle;
+
+		InitKey(aKey,
+			psData->psBase,
+			psHandleData->pvData,
+			psHandleData->eType,
+			ParentIfPrivate(psHandleData));
+
+		hRemovedHandle = (IMG_HANDLE)HASH_Remove_Extended(psData->psBase->psHashTab, aKey);
+
+		PVR_ASSERT(hRemovedHandle != NULL);
+		PVR_ASSERT(hRemovedHandle == psHandleData->hHandle);
+		PVR_UNREFERENCED_PARAMETER(hRemovedHandle);
+	}
+
+	eError = gpsHandleFuncs->pfnSetHandleData(psData->psBase->psImplBase, hHandle, NULL);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	OSFreeMem(psHandleData);
+
+	/* If we reach the end of the time slice release we can release the global
+	 * lock, invoke the scheduler and reacquire the lock */
+	if ((psData->ui64MaxBridgeTime != 0) && _CheckIfMaxTimeExpired(psData->ui64TimeStart, psData->ui64MaxBridgeTime))
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "FreeResourceByCriteria: Lock timeout (timeout: %" IMG_UINT64_FMTSPEC")",
+								            psData->ui64MaxBridgeTime));
+		UnlockHandle();
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSReleaseBridgeLock();
+#endif
+		/* Invoke the scheduler to check if other processes are waiting for the lock */
+		OSReleaseThreadQuanta();
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSAcquireBridgeLock();
+#endif
+		LockHandle();
+		/* Set again lock timeout and reset the counter */
+		psData->ui64TimeStart = OSClockns64();
+		PVR_DPF((PVR_DBG_MESSAGE, "FreeResourceByCriteria: Lock acquired again"));
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_HANDLE_TYPE g_aeOrderedFreeList[] =
+{
+	PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+	PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+	PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+	PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP,
+	PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+	PVRSRV_HANDLE_TYPE_RGX_RPM_FREELIST,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_RPM_CONTEXT,
+	PVRSRV_HANDLE_TYPE_RGX_MEMORY_BLOCK,
+	PVRSRV_HANDLE_TYPE_RGX_POPULATION,
+	PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+	PVRSRV_HANDLE_TYPE_RGX_FWIF_RENDERTARGET,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT,
+	PVRSRV_HANDLE_TYPE_RI_HANDLE,
+	PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE,
+	PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+	PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+	PVRSRV_HANDLE_TYPE_SERVER_SYNC_EXPORT,
+	PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+	PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER,
+	PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT,
+	PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+	PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_PAGELIST,
+	PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_SECURE_EXPORT,
+	PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+	PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+	PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+	PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+	PVRSRV_HANDLE_TYPE_DC_PIN_HANDLE,
+	PVRSRV_HANDLE_TYPE_DC_BUFFER,
+	PVRSRV_HANDLE_TYPE_DC_DISPLAY_CONTEXT,
+	PVRSRV_HANDLE_TYPE_DC_DEVICE,
+	PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+	PVRSRV_HANDLE_TYPE_MM_PLAT_CLEANUP
+};
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVFreeHandleBase
+
+ @Description	Free a handle base structure
+
+ @Input 	psBase - pointer to handle base structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime)
+{
+#if defined(DEBUG)
+	COUNT_HANDLE_DATA sCountData = { 0 };
+#endif
+	FREE_HANDLE_DATA sHandleData = { 0 };
+	IMG_UINT32 i;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(gpsHandleFuncs);
+
+	LockHandle();
+
+	sHandleData.psBase = psBase;
+	sHandleData.ui64TimeStart = OSClockns64();
+	sHandleData.ui64MaxBridgeTime = ui64MaxBridgeTime;
+
+
+#if defined(DEBUG)
+
+	sCountData.psBase = psBase;
+
+	eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+						       &CountHandleDataWrapper,
+						       (void *)&sCountData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVFreeHandleBase: Failed to perform handle count (%s)",
+			 PVRSRVGetErrorStringKM(eError)));
+		goto ExitUnlock;
+	}
+
+	if (sCountData.uiHandleDataCount != 0)
+	{
+		IMG_BOOL bList = sCountData.uiHandleDataCount < HANDLE_DEBUG_LISTING_MAX_NUM;
+
+		PVR_DPF((PVR_DBG_WARNING,
+			 "%s: %u remaining handles in handle base 0x%p "
+			 "(PVRSRV_HANDLE_BASE_TYPE %u).%s",
+			 __func__,
+			 sCountData.uiHandleDataCount,
+			 psBase,
+			 psBase->eType,
+			 bList ? "": " Skipping details, too many items..."));
+
+		if (bList)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "-------- Listing Handles --------"));
+			eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+										   &ListHandlesInBase,
+										   psBase);
+			PVR_DPF((PVR_DBG_WARNING, "-------- Done Listing    --------"));
+		}
+	}
+
+#endif /* defined(DEBUG) */
+
+	/*
+	 * As we're freeing handles based on type, make sure all
+	 * handles have actually had their data freed to avoid
+	 * resources being leaked
+	 */
+	for (i = 0; i < ARRAY_SIZE(g_aeOrderedFreeList); i++)
+	{
+		sHandleData.eHandleFreeType = g_aeOrderedFreeList[i];
+
+		/* Make sure all handles have been freed before destroying the handle base */
+		eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+							       &FreeHandleDataWrapper,
+							       (void *)&sHandleData);
+		if (eError != PVRSRV_OK)
+		{
+			goto ExitUnlock;
+		}
+	}
+
+
+	if (psBase->psHashTab != NULL)
+	{
+		HASH_Delete(psBase->psHashTab);
+	}
+
+	eError = gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase);
+	if (eError != PVRSRV_OK)
+	{
+		goto ExitUnlock;
+	}
+
+	OSFreeMem(psBase);
+
+	eError = PVRSRV_OK;
+
+ExitUnlock:
+	UnlockHandle();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVHandleInit
+
+ @Description	Initialise handle management
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVHandleInit(void)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(gpsKernelHandleBase == NULL);
+	PVR_ASSERT(gpsHandleFuncs == NULL);
+	PVR_ASSERT(!gbLockInitialised);
+
+	eError = OSLockCreate(&gHandleLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVHandleInit: Creation of handle global lock failed (%s)",
+			 PVRSRVGetErrorStringKM(eError)));
+		return eError;
+	}
+	gbLockInitialised = IMG_TRUE;
+
+	eError = PVRSRVHandleGetFuncTable(&gpsHandleFuncs);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVHandleInit: PVRSRVHandleGetFuncTable failed (%s)",
+			 PVRSRVGetErrorStringKM(eError)));
+		goto ErrorHandleDeinit;
+	}
+
+	eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase,
+	                               PVRSRV_HANDLE_BASE_TYPE_GLOBAL);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVHandleInit: PVRSRVAllocHandleBase failed (%s)",
+			 PVRSRVGetErrorStringKM(eError)));
+		goto ErrorHandleDeinit;
+	}
+
+	eError = gpsHandleFuncs->pfnEnableHandlePurging(gpsKernelHandleBase->psImplBase);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVHandleInit: PVRSRVEnableHandlePurging failed (%s)",
+			 PVRSRVGetErrorStringKM(eError)));
+		goto ErrorHandleDeinit;
+	}
+
+	return PVRSRV_OK;
+
+ErrorHandleDeinit:
+	(void) PVRSRVHandleDeInit();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVHandleDeInit
+
+ @Description	De-initialise handle management
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVHandleDeInit(void)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (gpsHandleFuncs != NULL)
+	{
+		if (gpsKernelHandleBase != NULL)
+		{
+			eError = PVRSRVFreeHandleBase(gpsKernelHandleBase, 0 /* do not release bridge lock */);
+			if (eError == PVRSRV_OK)
+			{
+				gpsKernelHandleBase = NULL;
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+					 "PVRSRVHandleDeInit: FreeHandleBase failed (%s)",
+					 PVRSRVGetErrorStringKM(eError)));
+			}
+		}
+
+		if (eError == PVRSRV_OK)
+		{
+			gpsHandleFuncs = NULL;
+		}
+	}
+	else
+	{
+		/* If we don't have a handle function table we shouldn't have a handle base either */
+		PVR_ASSERT(gpsKernelHandleBase == NULL);
+	}
+
+	if (gbLockInitialised)
+	{
+		OSLockDestroy(gHandleLock);
+		gbLockInitialised = IMG_FALSE;
+	}
+
+	return eError;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/htbserver.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/htbserver.c
new file mode 100644
index 0000000..fcc8580
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/htbserver.c
@@ -0,0 +1,702 @@
+/*************************************************************************/ /*!
+@File           htbserver.c
+@Title          Host Trace Buffer server implementation.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Host Trace Buffer provides a mechanism to log Host events to a
+                buffer in a similar way to the Firmware Trace mechanism.
+                Host Trace Buffer logs data using a Transport Layer buffer.
+                The Transport Layer and pvrtld tool provides the mechanism to
+                retrieve the trace data.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "htbserver.h"
+#include "htbuffer.h"
+#include "htbuffer_types.h"
+#include "tlstream.h"
+#include "pvrsrv_tlcommon.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "pvrsrv_apphint.h"
+
+/* size of circular buffer controlling the maximum number of concurrent PIDs logged */
+#define HTB_MAX_NUM_PID 8
+
+/* number of times to try rewriting a log entry */
+#define HTB_LOG_RETRY_COUNT 5
+
+/*************************************************************************/ /*!
+  Host Trace Buffer control information structure
+*/ /**************************************************************************/
+typedef struct
+{
+	IMG_CHAR *pszBufferName;        /*!< Name to use for the trace buffer,
+                                         this will be required to request
+                                         trace data from TL.
+                                         Once set this may not be changed */
+
+	IMG_UINT32 ui32BufferSize;      /*!< Requested buffer size in bytes
+                                         Once set this may not be changed */
+
+	HTB_OPMODE_CTRL eOpMode;        /*!< Control what trace data is dropped if
+                                         the buffer is full.
+                                         Once set this may not be changed */
+
+/*	IMG_UINT32 ui32GroupEnable; */  /*!< Flags word controlling groups to be
+                                         logged */
+
+	IMG_UINT32 ui32LogLevel;        /*!< Log level to control messages logged */
+
+	IMG_UINT32 aui32EnablePID[HTB_MAX_NUM_PID]; /*!< PIDs to enable logging for
+                                                     a specific set of processes */
+
+	IMG_UINT32 ui32PIDCount;        /*!< Current number of PIDs being logged */
+
+	IMG_UINT32 ui32PIDHead;         /*!< Head of the PID circular buffer */
+
+	HTB_LOGMODE_CTRL eLogMode;      /*!< Logging mode control */
+
+	IMG_BOOL bLogDropSignalled;     /*!< Flag indicating if a log message has
+                                         been signalled as dropped */
+
+	/* synchronisation parameters */
+	IMG_UINT64 ui64SyncOSTS;
+	IMG_UINT64 ui64SyncCRTS;
+	IMG_UINT32 ui32SyncCalcClkSpd;
+	IMG_UINT32 ui32SyncMarker;
+
+} HTB_CTRL_INFO;
+
+
+/*************************************************************************/ /*!
+*/ /**************************************************************************/
+static const IMG_UINT32 MapFlags[] =
+{
+	0,                          /* HTB_OPMODE_UNDEF = 0 */
+	TL_OPMODE_DROP_NEWER, /* HTB_OPMODE_DROPLATEST */
+	TL_OPMODE_DROP_OLDEST,/* HTB_OPMODE_DROPOLDEST */
+	TL_OPMODE_BLOCK       /* HTB_OPMODE_BLOCK */
+};
+
+static_assert(0 == HTB_OPMODE_UNDEF,      "Unexpected value for HTB_OPMODE_UNDEF");
+static_assert(1 == HTB_OPMODE_DROPLATEST, "Unexpected value for HTB_OPMODE_DROPLATEST");
+static_assert(2 == HTB_OPMODE_DROPOLDEST, "Unexpected value for HTB_OPMODE_DROPOLDEST");
+static_assert(3 == HTB_OPMODE_BLOCK,      "Unexpected value for HTB_OPMODE_BLOCK");
+
+static const IMG_UINT32 g_ui32TLBaseFlags = 0; //TL_FLAG_NO_SIGNAL_ON_COMMIT
+
+/* Minimum TL buffer size,
+ * large enough for around 60 worst case messages or 200 average messages
+ */
+#define HTB_TL_BUFFER_SIZE_MIN	(0x10000)
+
+
+static HTB_CTRL_INFO g_sCtrl  = {0};
+static IMG_BOOL g_bConfigured = IMG_FALSE;
+static IMG_HANDLE g_hTLStream = NULL;
+
+
+/************************************************************************/ /*!
+ @Function      _LookupFlags
+ @Description   Convert HTBuffer Operation mode to TLStream flags
+
+ @Input         eModeHTBuffer   Operation Mode
+
+ @Return        IMG_UINT32      TLStream FLags
+*/ /**************************************************************************/
+static IMG_UINT32
+_LookupFlags( HTB_OPMODE_CTRL eMode )
+{
+	return (eMode < (sizeof(MapFlags)/sizeof(MapFlags[0])))? MapFlags[eMode]: 0;
+}
+
+
+/************************************************************************/ /*!
+ @Function      _HTBLogDebugInfo
+ @Description   Debug dump handler used to dump the state of the HTB module.
+                Called for each verbosity level during a debug dump. Function
+                only prints state when called for High verbosity.
+
+ @Input         hDebugRequestHandle See PFN_DBGREQ_NOTIFY
+
+ @Input         ui32VerbLevel       See PFN_DBGREQ_NOTIFY
+
+ @Input         pfnDumpDebugPrintf  See PFN_DBGREQ_NOTIFY
+
+ @Input         pvDumpDebugFile     See PFN_DBGREQ_NOTIFY
+
+*/ /**************************************************************************/
+static void _HTBLogDebugInfo(
+		PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+		IMG_UINT32 ui32VerbLevel,
+		DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+		void *pvDumpDebugFile
+)
+{
+	PVR_UNREFERENCED_PARAMETER(hDebugRequestHandle);
+
+	if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_HIGH)
+	{
+
+		if (g_bConfigured)
+		{
+			IMG_INT i;
+
+			PVR_DUMPDEBUG_LOG("------[ HTB Log state: On ]------");
+
+			PVR_DUMPDEBUG_LOG("HTB Log mode: %d", g_sCtrl.eLogMode);
+			PVR_DUMPDEBUG_LOG("HTB Log level: %d", g_sCtrl.ui32LogLevel);
+			PVR_DUMPDEBUG_LOG("HTB Buffer Opmode: %d", g_sCtrl.eOpMode);
+
+			for (i=0; i < HTB_FLAG_NUM_EL; i++)
+			{
+				PVR_DUMPDEBUG_LOG("HTB Log group %d: %x", i, g_auiHTBGroupEnable[i]);
+			}
+		}
+		else
+		{
+			PVR_DUMPDEBUG_LOG("------[ HTB Log state: Off ]------");
+		}
+	}
+}
+
+/************************************************************************/ /*!
+ @Function      HTBDeviceCreate
+ @Description   Initialisation actions for HTB at device creation.
+
+ @Input         psDeviceNode    Reference to the device node in context
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBDeviceCreate(
+		PVRSRV_DEVICE_NODE *psDeviceNode
+)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	eError = PVRSRVRegisterDbgRequestNotify(&psDeviceNode->hHtbDbgReqNotify,
+			psDeviceNode, &_HTBLogDebugInfo, DEBUG_REQUEST_HTB, NULL);
+	PVR_LOG_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify");
+
+	return eError;
+}
+
+/************************************************************************/ /*!
+ @Function      HTBIDeviceDestroy
+ @Description   De-initialisation actions for HTB at device destruction.
+
+ @Input         psDeviceNode    Reference to the device node in context
+
+*/ /**************************************************************************/
+void
+HTBDeviceDestroy(
+		PVRSRV_DEVICE_NODE *psDeviceNode
+)
+{
+	if (psDeviceNode->hHtbDbgReqNotify)
+	{
+		/* No much we can do if it fails, driver unloading */
+		(void)PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hHtbDbgReqNotify);
+		psDeviceNode->hHtbDbgReqNotify = NULL;
+	}
+}
+
+
+/************************************************************************/ /*!
+ @Function      HTBDeInit
+ @Description   Close the Host Trace Buffer and free all resources. Must
+                perform a no-op if already de-initialised.
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBDeInit( void )
+{
+	if (g_hTLStream)
+	{
+		TLStreamClose( g_hTLStream );
+		g_hTLStream = NULL;
+	}
+
+	if (g_sCtrl.pszBufferName)
+	{
+		OSFreeMem( g_sCtrl.pszBufferName );
+		g_sCtrl.pszBufferName = NULL;
+	}
+
+	return PVRSRV_OK;
+}
+
+
+/*************************************************************************/ /*!
+ AppHint interface functions
+*/ /**************************************************************************/
+static
+PVRSRV_ERROR _HTBSetLogGroup(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                             const void *psPrivate,
+                             IMG_UINT32 ui32Value)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+	return HTBControlKM(1, &ui32Value, 0, 0,
+	                    HTB_LOGMODE_UNDEF, HTB_OPMODE_UNDEF);
+}
+
+static
+PVRSRV_ERROR _HTBReadLogGroup(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                              const void *psPrivate,
+                              IMG_UINT32 *pui32Value)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+	*pui32Value = g_auiHTBGroupEnable[0];
+	return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR _HTBSetOpMode(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                           const void *psPrivate,
+                           IMG_UINT32 ui32Value)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+	return HTBControlKM(0, NULL, 0, 0, HTB_LOGMODE_UNDEF, ui32Value);
+}
+
+static
+PVRSRV_ERROR _HTBReadOpMode(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                            const void *psPrivate,
+                            IMG_UINT32 *pui32Value)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+	*pui32Value = (IMG_UINT32)g_sCtrl.eOpMode;
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+ @Function      HTBConfigureKM
+ @Description   Configure or update the configuration of the Host Trace Buffer
+
+ @Input         ui32NameSize    Size of the pszName string
+
+ @Input         pszName         Name to use for the underlying data buffer
+
+ @Input         ui32BufferSize  Size of the underlying data buffer
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBConfigureKM(
+		IMG_UINT32 ui32NameSize,
+		const IMG_CHAR * pszName,
+		const IMG_UINT32 ui32BufferSize
+)
+{
+	if ( !g_sCtrl.pszBufferName )
+	{
+		g_sCtrl.ui32BufferSize = (ui32BufferSize < HTB_TL_BUFFER_SIZE_MIN)? HTB_TL_BUFFER_SIZE_MIN: ui32BufferSize;
+		ui32NameSize = (ui32NameSize > PRVSRVTL_MAX_STREAM_NAME_SIZE)? PRVSRVTL_MAX_STREAM_NAME_SIZE: ui32NameSize;
+		g_sCtrl.pszBufferName = OSAllocMem(ui32NameSize * sizeof(IMG_CHAR));
+		OSStringNCopy(g_sCtrl.pszBufferName, pszName, ui32NameSize);
+		g_sCtrl.pszBufferName[ui32NameSize-1] = 0;
+
+		/* initialise rest of state */
+		g_sCtrl.eOpMode = HTB_OPMODE_DROPLATEST;
+		g_sCtrl.ui32LogLevel = 0;
+		g_sCtrl.ui32PIDCount = 0;
+		g_sCtrl.ui32PIDHead = 0;
+		g_sCtrl.eLogMode = HTB_LOGMODE_ALLPID;
+		g_sCtrl.bLogDropSignalled = IMG_FALSE;
+
+		PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableHTBLogGroup,
+		                                    _HTBReadLogGroup,
+		                                    _HTBSetLogGroup,
+											APPHINT_OF_DRIVER_NO_DEVICE,
+		                                    NULL);
+		PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HTBOperationMode,
+		                                    _HTBReadOpMode,
+		                                    _HTBSetOpMode,
+											APPHINT_OF_DRIVER_NO_DEVICE,
+		                                    NULL);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "HTBConfigureKM: Reconfiguration is not supported\n"));
+	}
+
+	return PVRSRV_OK;
+}
+
+
+static void
+_OnTLReaderOpenCallback( void *pvArg )
+{
+	if ( g_hTLStream )
+	{
+		PVRSRV_ERROR eError;
+		IMG_UINT32 ui32Time = OSClockus();
+		eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_FWSYNC_SCALE,
+		                ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)),
+		                ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)),
+		                ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)),
+		                ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)),
+		                g_sCtrl.ui32SyncCalcClkSpd);
+	}
+
+	PVR_UNREFERENCED_PARAMETER(pvArg);
+}
+
+
+/*************************************************************************/ /*!
+ @Function      HTBControlKM
+ @Description   Update the configuration of the Host Trace Buffer
+
+ @Input         ui32NumFlagGroups Number of group enable flags words
+
+ @Input         aui32GroupEnable  Flags words controlling groups to be logged
+
+ @Input         ui32LogLevel    Log level to record
+
+ @Input         ui32EnablePID   PID to enable logging for a specific process
+
+ @Input         eLogMode        Enable logging for all or specific processes,
+
+ @Input         eOpMode         Control the behaviour of the data buffer
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBControlKM(
+	const IMG_UINT32 ui32NumFlagGroups,
+	const IMG_UINT32 * aui32GroupEnable,
+	const IMG_UINT32 ui32LogLevel,
+	const IMG_UINT32 ui32EnablePID,
+	const HTB_LOGMODE_CTRL eLogMode,
+	const HTB_OPMODE_CTRL eOpMode
+)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT;
+	IMG_UINT32 i;
+	IMG_UINT32 ui32Time = OSClockus();
+
+	if ( !g_bConfigured && g_sCtrl.pszBufferName && ui32NumFlagGroups )
+	{
+		eError = TLStreamCreate(
+				&g_hTLStream,
+				PVRSRVGetPVRSRVData()->psHostMemDeviceNode,
+				g_sCtrl.pszBufferName,
+				g_sCtrl.ui32BufferSize,
+				_LookupFlags(HTB_OPMODE_DROPOLDEST) | g_ui32TLBaseFlags,
+				_OnTLReaderOpenCallback, NULL, NULL, NULL );
+		PVR_LOGR_IF_ERROR( eError, "TLStreamCreate");
+		g_bConfigured = IMG_TRUE;
+	}
+
+	if ( HTB_OPMODE_UNDEF != eOpMode && g_sCtrl.eOpMode != eOpMode)
+	{
+		g_sCtrl.eOpMode = eOpMode;
+		eError = TLStreamReconfigure(g_hTLStream, _LookupFlags(g_sCtrl.eOpMode | g_ui32TLBaseFlags));
+		while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- )
+		{
+			OSReleaseThreadQuanta();
+			eError = TLStreamReconfigure(g_hTLStream, _LookupFlags(g_sCtrl.eOpMode | g_ui32TLBaseFlags));
+		}
+		PVR_LOGR_IF_ERROR( eError, "TLStreamReconfigure");
+	}
+
+	if ( ui32EnablePID )
+	{
+		g_sCtrl.aui32EnablePID[g_sCtrl.ui32PIDHead] = ui32EnablePID;
+		g_sCtrl.ui32PIDHead++;
+		g_sCtrl.ui32PIDHead %= HTB_MAX_NUM_PID;
+		g_sCtrl.ui32PIDCount++;
+		if ( g_sCtrl.ui32PIDCount > HTB_MAX_NUM_PID )
+		{
+			g_sCtrl.ui32PIDCount = HTB_MAX_NUM_PID;
+		}
+	}
+
+	/* HTB_LOGMODE_ALLPID overrides ui32EnablePID */
+	if ( HTB_LOGMODE_ALLPID == eLogMode )
+	{
+		OSCachedMemSet(g_sCtrl.aui32EnablePID, 0, sizeof(g_sCtrl.aui32EnablePID));
+		g_sCtrl.ui32PIDCount = 0;
+		g_sCtrl.ui32PIDHead = 0;
+	}
+	if ( HTB_LOGMODE_UNDEF != eLogMode )
+	{
+		g_sCtrl.eLogMode = eLogMode;
+	}
+
+	if ( ui32NumFlagGroups )
+	{
+		for (i = 0; i < HTB_FLAG_NUM_EL && i < ui32NumFlagGroups; i++)
+		{
+			g_auiHTBGroupEnable[i] = aui32GroupEnable[i];
+		}
+		for (; i < HTB_FLAG_NUM_EL; i++)
+		{
+			g_auiHTBGroupEnable[i] = 0;
+		}
+	}
+
+	if ( ui32LogLevel )
+	{
+		g_sCtrl.ui32LogLevel = ui32LogLevel;
+	}
+
+	/* Dump the current configuration state */
+	eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_OPMODE, g_sCtrl.eOpMode);
+	PVR_LOG_IF_ERROR( eError, "HTBLog");
+	eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_ENABLE_GROUP, g_auiHTBGroupEnable[0]);
+	PVR_LOG_IF_ERROR( eError, "HTBLog");
+	eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_LOG_LEVEL, g_sCtrl.ui32LogLevel);
+	PVR_LOG_IF_ERROR( eError, "HTBLog");
+	eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_LOGMODE, g_sCtrl.eLogMode);
+	PVR_LOG_IF_ERROR( eError, "HTBLog");
+	for (i = 0; i < g_sCtrl.ui32PIDCount; i++)
+	{
+		eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_ENABLE_PID, g_sCtrl.aui32EnablePID[i]);
+		PVR_LOG_IF_ERROR( eError, "HTBLog");
+	}
+
+	if (0 != g_sCtrl.ui32SyncMarker && 0 != g_sCtrl.ui32SyncCalcClkSpd)
+	{
+		eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_FWSYNC_MARK_RPT,
+				g_sCtrl.ui32SyncMarker);
+		PVR_LOG_IF_ERROR( eError, "HTBLog");
+		eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_FWSYNC_SCALE_RPT, 
+				((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)),
+				((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)),
+				g_sCtrl.ui32SyncCalcClkSpd);
+		PVR_LOG_IF_ERROR( eError, "HTBLog");
+	}
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+*/ /**************************************************************************/
+static IMG_BOOL
+_ValidPID( IMG_UINT32 PID )
+{
+	IMG_UINT32 i;
+
+	for (i = 0; i < g_sCtrl.ui32PIDCount; i++)
+	{
+		if ( g_sCtrl.aui32EnablePID[i] == PID )
+		{
+			return IMG_TRUE;
+		}
+	}
+	return IMG_FALSE;
+}
+
+
+/*************************************************************************/ /*!
+ @Function      HTBSyncPartitionMarker
+ @Description   Write an HTB sync partition marker to the HTB log
+
+ @Input         ui33Marker      Marker value
+
+*/ /**************************************************************************/
+void
+HTBSyncPartitionMarker(
+	const IMG_UINT32 ui32Marker
+)
+{
+	g_sCtrl.ui32SyncMarker = ui32Marker;
+	if ( g_hTLStream )
+	{
+		PVRSRV_ERROR eError;
+		IMG_UINT32 ui32Time = OSClockus();
+		eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_FWSYNC_MARK, ui32Marker);
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "HTBLog", PVRSRVGETERRORSTRING(eError), __func__));
+		}
+		if (0 != g_sCtrl.ui32SyncCalcClkSpd)
+		{
+			eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_FWSYNC_SCALE,
+					((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)),
+					((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)),
+					g_sCtrl.ui32SyncCalcClkSpd);
+			if (PVRSRV_OK != eError)
+			{
+				PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "HTBLog", PVRSRVGETERRORSTRING(eError), __func__));
+			}
+		}
+	}
+}
+
+
+/*************************************************************************/ /*!
+ @Function      HTBSyncScale
+ @Description   Write FW-Host synchronisation data to the HTB log when clocks
+                change or are re-calibrated
+
+ @Input         bLogValues      IMG_TRUE if value should be immediately written
+                                out to the log
+
+ @Input         ui32OSTS        OS Timestamp
+
+ @Input         ui32CRTS        Rogue timestamp
+
+ @Input         ui32CalcClkSpd  Calculated clock speed
+
+*/ /**************************************************************************/
+void
+HTBSyncScale(
+	const IMG_BOOL bLogValues,
+	const IMG_UINT64 ui64OSTS,
+	const IMG_UINT64 ui64CRTS,
+	const IMG_UINT32 ui32CalcClkSpd
+)
+{
+	g_sCtrl.ui64SyncOSTS = ui64OSTS;
+	g_sCtrl.ui64SyncCRTS = ui64CRTS;
+	g_sCtrl.ui32SyncCalcClkSpd = ui32CalcClkSpd;
+	if ( g_hTLStream && bLogValues)
+	{
+		PVRSRV_ERROR eError;
+		IMG_UINT32 ui32Time = OSClockus();
+		eError = HTBLog(0, 0, ui32Time, HTB_SF_CTRL_FWSYNC_SCALE,
+				((IMG_UINT32)((ui64OSTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64OSTS&0xffffffff)),
+				((IMG_UINT32)((ui64CRTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64CRTS&0xffffffff)),
+				ui32CalcClkSpd);
+		PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "HTBLog", PVRSRVGETERRORSTRING(eError), __func__));
+	}
+}
+
+
+/*************************************************************************/ /*!
+ @Function      HTBLogKM
+ @Description   Record a Host Trace Buffer log event
+
+ @Input         PID             The PID of the process the event is associated
+                                with. This is provided as an argument rather
+                                than querying internally so that events associated
+                                with a particular process, but performed by
+                                another can be logged correctly.
+
+ @Input			ui32TimeStamp	The timestamp to be associated with this log event
+
+ @Input         SF    			The log event ID
+
+ @Input			...				Log parameters
+
+ @Return        PVRSRV_OK       Success.
+
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBLogKM(
+		IMG_UINT32 PID,
+		IMG_UINT32 ui32TimeStamp,
+		HTB_LOG_SFids SF,
+		IMG_UINT32 ui32NumArgs,
+		IMG_UINT32 * aui32Args
+)
+{
+	/* format of messages is: SF:PID:TIME:[PARn]*
+	 * 32-bit timestamp (us) gives about 1h before looping
+	 * Buffer allocated on the stack so don't need a semaphore to guard it
+	 */
+	IMG_UINT32 aui32MessageBuffer[HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS];
+
+	PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_ENABLED;
+	IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT;
+	IMG_UINT32 * pui32Message = aui32MessageBuffer;
+	IMG_UINT32 ui32MessageSize = 4 * (HTB_LOG_HEADER_SIZE+ui32NumArgs);
+
+	if ( g_hTLStream
+			&& ( 0 == PID || ~0 == PID || HTB_LOGMODE_ALLPID == g_sCtrl.eLogMode || _ValidPID(PID) )
+/*			&& ( g_sCtrl.ui32GroupEnable & (0x1 << HTB_SF_GID(SF)) ) */
+/*			&& ( g_sCtrl.ui32LogLevel >= HTB_SF_LVL(SF) ) */
+			)
+	{
+		*pui32Message++ = SF;
+		*pui32Message++ = PID;
+		*pui32Message++ = ui32TimeStamp;
+		while ( ui32NumArgs )
+		{
+			ui32NumArgs--;
+			pui32Message[ui32NumArgs] = aui32Args[ui32NumArgs];
+		}
+
+		eError = TLStreamWrite( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize );
+		while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- )
+		{
+			OSReleaseThreadQuanta();
+			eError = TLStreamWrite( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize );
+		}
+
+		if ( PVRSRV_OK == eError )
+		{
+			g_sCtrl.bLogDropSignalled = IMG_FALSE;
+		}
+		else if ( PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG != eError || !g_sCtrl.bLogDropSignalled )
+		{
+			PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "TLStreamWrite", PVRSRVGETERRORSTRING(eError), __func__));
+		}
+		if ( PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG == eError )
+		{
+			g_sCtrl.bLogDropSignalled = IMG_TRUE;
+		}
+	}
+
+	return eError;
+}
+
+/* EOF */
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/info_page_km.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/info_page_km.c
new file mode 100644
index 0000000..db27a52
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/info_page_km.c
@@ -0,0 +1,112 @@
+/*************************************************************************/ /*!
+@File           info_page_km.c
+@Title          Kernel/User space shared memory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements general purpose shared memory between kernel driver
+                and user mode.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "info_page_defs.h"
+#include "info_page.h"
+#include "pvrsrv.h"
+#include "devicemem.h"
+#include "pmr.h"
+
+PVRSRV_ERROR InfoPageCreate(PVRSRV_DATA *psData)
+{
+    const DEVMEM_FLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                                      PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                      PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                                      PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT |
+                                      PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                      PVRSRV_MEMALLOCFLAG_CPU_LOCAL;
+    PVRSRV_ERROR eError;
+
+    PVR_ASSERT(psData != NULL);
+
+    /* Create the CacheOp information page */
+    eError = DevmemAllocateExportable(psData->psHostMemDeviceNode,
+                                      OSGetPageSize(),
+                                      OSGetPageSize(),
+                                      OSGetPageShift(),
+                                      uiMemFlags,
+                                      "PVRSRVInfoPage",
+                                      &psData->psInfoPageMemDesc);
+    PVR_LOGG_IF_ERROR(eError, "DevmemAllocateExportable", e0);
+
+    eError =  DevmemAcquireCpuVirtAddr(psData->psInfoPageMemDesc,
+                                       (void **) &psData->pui32InfoPage);
+    PVR_LOGG_IF_ERROR(eError, "DevmemAllocateExportable", e0);
+
+    /* This PMR is also used for deferring timelines, global flush & logging KM
+     * requests in debug */
+    eError = DevmemLocalGetImportHandle(psData->psInfoPageMemDesc,
+                                        (void **) &psData->psInfoPagePMR);
+    PVR_LOGG_IF_ERROR(eError, "DevmemLocalGetImportHandle", e0);
+
+    eError = OSLockCreate(&psData->hInfoPageLock, LOCK_TYPE_PASSIVE);
+    PVR_LOGG_IF_ERROR(eError, "OSLockCreate", e0);
+
+    return PVRSRV_OK;
+
+e0:
+    InfoPageDestroy(psData);
+    return eError;
+}
+
+void InfoPageDestroy(PVRSRV_DATA *psData)
+{
+    if (psData->psInfoPageMemDesc)
+    {
+        if (psData->pui32InfoPage != NULL)
+        {
+            DevmemReleaseCpuVirtAddr(psData->psInfoPageMemDesc);
+            psData->pui32InfoPage = NULL;
+        }
+
+        DevmemFree(psData->psInfoPageMemDesc);
+        psData->psInfoPageMemDesc = NULL;
+    }
+
+    if (psData->hInfoPageLock)
+    {
+        OSLockDestroy(psData->hInfoPageLock);
+        psData->hInfoPageLock = NULL;
+    }
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/lists.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/lists.c
new file mode 100644
index 0000000..e8e7088
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/lists.c
@@ -0,0 +1,60 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linked list shared functions implementation.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implementation of the list iterators for types shared among
+                more than one file in the services code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "lists.h"
+
+/*===================================================================
+  LIST ITERATOR FUNCTIONS USED IN MORE THAN ONE FILE (those used just
+  once are implemented locally).
+  ===================================================================*/
+
+IMPLEMENT_LIST_ANY(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, IMG_BOOL, IMG_FALSE)
+IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
+IMPLEMENT_LIST_ANY_VA(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
+IMPLEMENT_LIST_FOR_EACH(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_INSERT_TAIL(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_REMOVE(PVRSRV_DEVICE_NODE)
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/mmu_common.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/mmu_common.c
new file mode 100644
index 0000000..c458cac
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/mmu_common.c
@@ -0,0 +1,4194 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common MMU Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements basic low level control of MMU.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "devicemem_server_utils.h"
+
+/* Our own interface */
+#include "mmu_common.h"
+
+#include "rgx_bvnc_defs_km.h"
+#include "rgxmmudefs_km.h"
+/*
+Interfaces to other modules:
+
+Let's keep this graph up-to-date:
+
+   +-----------+
+   | devicemem |
+   +-----------+
+         |
+   +============+
+   | mmu_common |
+   +============+
+         |
+         +-----------------+
+         |                 |
+    +---------+      +----------+
+    |   pmr   |      |  device  |
+    +---------+      +----------+
+*/
+
+#include "img_types.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#if defined(PDUMP)
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#endif
+#include "pmr.h"
+/* include/ */
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv.h"
+#include "htbuffer.h"
+
+#include "rgxdevice.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "physmem_lma.h"
+#endif
+
+#include "dllist.h"
+
+// #define MMU_OBJECT_REFCOUNT_DEBUGING 1
+#if defined (MMU_OBJECT_REFCOUNT_DEBUGING)
+#define MMU_OBJ_DBG(x)	PVR_DPF(x);
+#else
+#define MMU_OBJ_DBG(x)
+#endif
+
+typedef IMG_UINT32 MMU_FLAGS_T;
+
+typedef enum _MMU_MOD_
+{
+	MMU_MOD_UNKNOWN = 0,
+	MMU_MOD_MAP,
+	MMU_MOD_UNMAP,
+} MMU_MOD;
+
+
+/*!
+ * Refcounted structure that is shared between the context and
+ * the cleanup thread items.
+ * It is used to keep track of all cleanup items and whether the creating
+ * MMU context has been destroyed and therefore is not allowed to be
+ * accessed anymore.
+ *
+ * The cleanup thread is used to defer the freeing of the page tables
+ * because we have to make sure that the MMU cache has been invalidated.
+ * If we don't take care of this the MMU might partially access cached
+ * and uncached tables which might lead to inconsistencies and in the
+ * worst case to MMU pending faults on random memory.
+ */
+typedef struct _MMU_CTX_CLEANUP_DATA_
+{
+	/*! Refcount to know when this structure can be destroyed */
+	ATOMIC_T iRef;
+	/*! Protect items in this structure, especially the refcount */
+	POS_LOCK hCleanupLock;
+	/*! List of all cleanup items currently in flight */
+	DLLIST_NODE sMMUCtxCleanupItemsHead;
+	/*! Was the MMU context destroyed and should not be accessed anymore? */
+	IMG_BOOL bMMUContextExists;
+} MMU_CTX_CLEANUP_DATA;
+
+
+/*!
+ * Structure holding one or more page tables that need to be
+ * freed after the MMU cache has been flushed which is signalled when
+ * the stored sync has a value that is <= the required value.
+ */
+typedef struct _MMU_CLEANUP_ITEM_
+{
+	/*! Cleanup thread data */
+	PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn;
+	/*! List to hold all the MMU_MEMORY_MAPPINGs, i.e. page tables */
+	DLLIST_NODE sMMUMappingHead;
+	/*! Node of the cleanup item list for the context */
+	DLLIST_NODE sMMUCtxCleanupItem;
+	/* Pointer to the cleanup meta data */
+	MMU_CTX_CLEANUP_DATA *psMMUCtxCleanupData;
+	/* Sync to query if the MMU cache was flushed */
+	PVRSRV_CLIENT_SYNC_PRIM *psSync;
+	/*! The update value of the sync to signal that the cache was flushed */
+	IMG_UINT16 uiRequiredSyncVal;
+	/*! The device node needed to free the page tables */
+	PVRSRV_DEVICE_NODE *psDevNode;
+} MMU_CLEANUP_ITEM;
+
+/*!
+	All physical allocations and frees are relative to this context, so
+	we would get all the allocations of PCs, PDs, and PTs from the same
+	RA.
+
+	We have one per MMU context in case we have mixed UMA/LMA devices
+	within the same system.
+*/
+typedef struct _MMU_PHYSMEM_CONTEXT_
+{
+	/*! Parent device node */
+	PVRSRV_DEVICE_NODE *psDevNode;
+
+	/*! Refcount so we know when to free up the arena */
+	IMG_UINT32 uiNumAllocations;
+
+	/*! Arena from which physical memory is derived */
+	RA_ARENA *psPhysMemRA;
+	/*! Arena name */
+	IMG_CHAR *pszPhysMemRAName;
+	/*! Size of arena name string */
+	size_t uiPhysMemRANameAllocSize;
+
+	/*! Meta data for deferred cleanup */
+	MMU_CTX_CLEANUP_DATA *psCleanupData;
+	/*! Temporary list of all deferred MMU_MEMORY_MAPPINGs. */
+	DLLIST_NODE sTmpMMUMappingHead;
+
+} MMU_PHYSMEM_CONTEXT;
+
+/*!
+	Mapping structure for MMU memory allocation
+*/
+typedef struct _MMU_MEMORY_MAPPING_
+{
+	/*! Physmem context to allocate from */
+	MMU_PHYSMEM_CONTEXT		*psContext;
+	/*! OS/system Handle for this allocation */
+	PG_HANDLE				sMemHandle;
+	/*! CPU virtual address of this allocation */
+	void					*pvCpuVAddr;
+	/*! Device physical address of this allocation */
+	IMG_DEV_PHYADDR			sDevPAddr;
+	/*! Size of this allocation */
+	size_t					uiSize;
+	/*! Number of current mappings of this allocation */
+	IMG_UINT32				uiCpuVAddrRefCount;
+	/*! Node for the defer free list */
+	DLLIST_NODE				sMMUMappingItem;
+} MMU_MEMORY_MAPPING;
+
+/*!
+	Memory descriptor for MMU objects. There can be more than one memory
+	descriptor per MMU memory allocation.
+*/
+typedef struct _MMU_MEMORY_DESC_
+{
+	/* NB: bValid is set if this descriptor describes physical
+	   memory.  This allows "empty" descriptors to exist, such that we
+	   can allocate them in batches.  */
+	/*! Does this MMU object have physical backing */
+	IMG_BOOL				bValid;
+	/*! Device Physical address of physical backing */
+	IMG_DEV_PHYADDR			sDevPAddr;
+	/*! CPU virtual address of physical backing */
+	void					*pvCpuVAddr;
+	/*! Mapping data for this MMU object */
+	MMU_MEMORY_MAPPING		*psMapping;
+	/*! Memdesc offset into the psMapping */
+	IMG_UINT32 uiOffset;
+	/*! Size of the Memdesc */
+	IMG_UINT32 uiSize;
+} MMU_MEMORY_DESC;
+
+/*!
+	MMU levelx structure. This is generic and is used
+	for all levels (PC, PD, PT).
+*/
+typedef struct _MMU_Levelx_INFO_
+{
+	/*! The Number of entries in this level */
+	IMG_UINT32 ui32NumOfEntries;
+
+	/*! Number of times this level has been reference. Note: For Level1 (PTE)
+	    we still take/drop the reference when setting up the page tables rather
+	    then at map/unmap time as this simplifies things */
+	IMG_UINT32 ui32RefCount;
+
+	/*! MemDesc for this level */
+	MMU_MEMORY_DESC sMemDesc;
+
+	/*! Array of infos for the next level. Must be last member in structure */
+	struct _MMU_Levelx_INFO_ *apsNextLevel[1];
+} MMU_Levelx_INFO;
+
+/*!
+	MMU context structure
+*/
+struct _MMU_CONTEXT_
+{
+	/*! Parent device node */
+	PVRSRV_DEVICE_NODE *psDevNode;
+
+	MMU_DEVICEATTRIBS *psDevAttrs;
+
+	/*! For allocation and deallocation of the physical memory where
+	    the pagetables live */
+	struct _MMU_PHYSMEM_CONTEXT_ *psPhysMemCtx;
+
+#if defined(PDUMP)
+	/*! PDump context ID (required for PDump commands with virtual addresses) */
+	IMG_UINT32 uiPDumpContextID;
+
+	/*! The refcount of the PDump context ID */
+	IMG_UINT32 ui32PDumpContextIDRefCount;
+#endif
+
+	/*! Data that is passed back during device specific callbacks */
+	IMG_HANDLE hDevData;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+    IMG_UINT32  ui32OSid;
+	IMG_UINT32	ui32OSidReg;
+    IMG_BOOL   bOSidAxiProt;
+#endif
+
+	/*! Lock to ensure exclusive access when manipulating the MMU context or
+	 * reading and using its content
+	 */
+	POS_LOCK hLock;
+
+	/*! Base level info structure. Must be last member in structure */
+	MMU_Levelx_INFO sBaseLevelInfo;
+	/* NO OTHER MEMBERS AFTER THIS STRUCTURE ! */
+};
+
+static const IMG_DEV_PHYADDR gsBadDevPhyAddr = {MMU_BAD_PHYS_ADDR};
+
+#if defined(DEBUG)
+#include "log2.h"
+#endif
+
+
+/*****************************************************************************
+ *                          Utility functions                                *
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       _FreeMMUMapping
+
+@Description    Free a given dllist of MMU_MEMORY_MAPPINGs and the page tables
+                they represent.
+
+@Input          psDevNode           Device node
+
+@Input          psTmpMMUMappingHead List of MMU_MEMORY_MAPPINGs to free
+*/
+/*****************************************************************************/
+static void
+_FreeMMUMapping(PVRSRV_DEVICE_NODE *psDevNode,
+                PDLLIST_NODE psTmpMMUMappingHead)
+{
+	PDLLIST_NODE psNode, psNextNode;
+
+	/* Free the current list unconditionally */
+	dllist_foreach_node(psTmpMMUMappingHead,
+						psNode,
+						psNextNode)
+	{
+		MMU_MEMORY_MAPPING *psMapping = IMG_CONTAINER_OF(psNode,
+														 MMU_MEMORY_MAPPING,
+														 sMMUMappingItem);
+
+		psDevNode->pfnDevPxFree(psDevNode, &psMapping->sMemHandle);
+		dllist_remove_node(psNode);
+		OSFreeMem(psMapping);
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       _CleanupThread_FreeMMUMapping
+
+@Description    Function to be executed by the cleanup thread to free
+                MMU_MEMORY_MAPPINGs after the MMU cache has been invalidated.
+
+                This function will request a MMU cache invalidate once and
+                retry to free the MMU_MEMORY_MAPPINGs until the invalidate
+                has been executed.
+
+                If the memory context that created this cleanup item has been
+                destroyed in the meantime this function will directly free the
+                MMU_MEMORY_MAPPINGs without waiting for any MMU cache
+                invalidation.
+
+@Input          pvData           Cleanup data in form of a MMU_CLEANUP_ITEM
+
+@Return         PVRSRV_OK if successful otherwise PVRSRV_ERROR_RETRY
+*/
+/*****************************************************************************/
+static PVRSRV_ERROR
+_CleanupThread_FreeMMUMapping(void* pvData)
+{
+	PVRSRV_ERROR eError;
+	MMU_CLEANUP_ITEM *psCleanup = (MMU_CLEANUP_ITEM *) pvData;
+	MMU_CTX_CLEANUP_DATA *psMMUCtxCleanupData = psCleanup->psMMUCtxCleanupData;
+	PVRSRV_DEVICE_NODE *psDevNode = psCleanup->psDevNode;
+	IMG_BOOL bFreeNow;
+	IMG_UINT32 uiSyncCurrent;
+	IMG_UINT32 uiSyncReq;
+
+	OSLockAcquire(psMMUCtxCleanupData->hCleanupLock);
+
+	/* Don't attempt to free anything when the context has been destroyed.
+	 * Especially don't access any device specific structures anymore!*/
+	if (!psMMUCtxCleanupData->bMMUContextExists)
+	{
+		OSFreeMem(psCleanup);
+		eError = PVRSRV_OK;
+		goto e0;
+	}
+
+	if (psCleanup->psSync == NULL)
+	{
+		/* Kick to invalidate the MMU caches and get sync info */
+		psDevNode->pfnMMUCacheInvalidateKick(psDevNode,
+											 &psCleanup->uiRequiredSyncVal,
+											 IMG_TRUE);
+		psCleanup->psSync = psDevNode->psMMUCacheSyncPrim;
+	}
+
+	uiSyncCurrent = *(psCleanup->psSync->pui32LinAddr);
+	uiSyncReq = psCleanup->uiRequiredSyncVal;
+
+	/* Either the invalidate has been executed ... */
+	bFreeNow = (uiSyncCurrent >= uiSyncReq) ? IMG_TRUE :
+			/* ... with the counter wrapped around ... */
+			(uiSyncReq - uiSyncCurrent) > 0xEFFFFFFFUL ? IMG_TRUE :
+			/* ... or are we still waiting for the invalidate? */
+			IMG_FALSE;
+
+#if defined(NO_HARDWARE)
+	/* In NOHW the syncs will never be updated so just free the tables */
+	bFreeNow = IMG_TRUE;
+#endif
+
+	if (bFreeNow)
+	{
+		_FreeMMUMapping(psDevNode, &psCleanup->sMMUMappingHead);
+
+		dllist_remove_node(&psCleanup->sMMUCtxCleanupItem);
+		OSFreeMem(psCleanup);
+
+		eError = PVRSRV_OK;
+	}
+	else
+	{
+		eError = PVRSRV_ERROR_RETRY;
+	}
+
+e0:
+
+	/* If this cleanup task has been successfully executed we can
+	 * decrease the context cleanup data refcount. Successfully
+	 * means here that the MMU_MEMORY_MAPPINGs have been freed by
+	 * either this cleanup task of when the MMU context has been
+	 * destroyed. */
+	if (eError == PVRSRV_OK)
+	{
+		OSLockRelease(psMMUCtxCleanupData->hCleanupLock);
+
+		if (OSAtomicDecrement(&psMMUCtxCleanupData->iRef) == 0)
+		{
+			OSLockDestroy(psMMUCtxCleanupData->hCleanupLock);
+			OSFreeMem(psMMUCtxCleanupData);
+		}
+	}
+	else
+	{
+		OSLockRelease(psMMUCtxCleanupData->hCleanupLock);
+	}
+
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       _SetupCleanup_FreeMMUMapping
+
+@Description    Setup a cleanup item for the cleanup thread that will
+                kick off a MMU invalidate request and free the associated
+                MMU_MEMORY_MAPPINGs when the invalidate was successful.
+
+@Input          psDevNode           Device node
+
+@Input          psPhysMemCtx        The current MMU physmem context
+*/
+/*****************************************************************************/
+static void
+_SetupCleanup_FreeMMUMapping(PVRSRV_DEVICE_NODE *psDevNode,
+                             MMU_PHYSMEM_CONTEXT *psPhysMemCtx)
+{
+
+	MMU_CLEANUP_ITEM *psCleanupItem;
+	MMU_CTX_CLEANUP_DATA *psCleanupData = psPhysMemCtx->psCleanupData;
+
+	if (dllist_is_empty(&psPhysMemCtx->sTmpMMUMappingHead))
+	{
+		goto e0;
+	}
+
+#if !defined(SUPPORT_MMU_PENDING_FAULT_PROTECTION)
+	/* If users deactivated this we immediately free the page tables */
+	goto e1;
+#endif
+
+	/* Don't defer the freeing if we are currently unloading the driver
+	 * or if the sync has been destroyed */
+	if (PVRSRVGetPVRSRVData()->bUnload ||
+	    psDevNode->psMMUCacheSyncPrim == NULL)
+	{
+		goto e1;
+	}
+
+	/* Allocate a cleanup item */
+	psCleanupItem = OSAllocMem(sizeof(*psCleanupItem));
+	if(!psCleanupItem)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to get memory for deferred page table cleanup. "
+				 "Freeing tables immediately",
+				 __FUNCTION__));
+		goto e1;
+	}
+
+	/* Set sync to NULL to indicate we did not interact with
+	 * the FW yet. Kicking off an MMU cache invalidate should
+	 * be done in the cleanup thread to not waste time here. */
+	psCleanupItem->psSync = NULL;
+	psCleanupItem->uiRequiredSyncVal = 0;
+	psCleanupItem->psDevNode = psDevNode;
+	psCleanupItem->psMMUCtxCleanupData = psCleanupData;
+
+	OSAtomicIncrement(&psCleanupData->iRef);
+
+	/* Move the page tables to free to the cleanup item */
+	dllist_replace_head(&psPhysMemCtx->sTmpMMUMappingHead,
+	                    &psCleanupItem->sMMUMappingHead);
+
+	/* Add the cleanup item itself to the context list */
+	dllist_add_to_tail(&psCleanupData->sMMUCtxCleanupItemsHead,
+	                   &psCleanupItem->sMMUCtxCleanupItem);
+
+	/* Setup the cleanup thread data and add the work item */
+	psCleanupItem->sCleanupThreadFn.pfnFree = _CleanupThread_FreeMMUMapping;
+	psCleanupItem->sCleanupThreadFn.pvData = psCleanupItem;
+	psCleanupItem->sCleanupThreadFn.ui32RetryCount = CLEANUP_THREAD_RETRY_COUNT_DEFAULT;
+	psCleanupItem->sCleanupThreadFn.bDependsOnHW = IMG_TRUE;
+
+	PVRSRVCleanupThreadAddWork(&psCleanupItem->sCleanupThreadFn);
+
+	return;
+
+e1:
+	/* Free the page tables now */
+	_FreeMMUMapping(psDevNode, &psPhysMemCtx->sTmpMMUMappingHead);
+e0:
+	return;
+}
+
+/*************************************************************************/ /*!
+@Function       _CalcPCEIdx
+
+@Description    Calculate the page catalogue index
+
+@Input          sDevVAddr           Device virtual address
+
+@Input          psDevVAddrConfig    Configuration of the virtual address
+
+@Input          bRoundUp            Round up the index
+
+@Return         The page catalogue index
+*/
+/*****************************************************************************/
+static IMG_UINT32 _CalcPCEIdx(IMG_DEV_VIRTADDR sDevVAddr,
+                              const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+                              IMG_BOOL bRoundUp)
+{
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+    IMG_UINT32 ui32RetVal;
+
+    sTmpDevVAddr = sDevVAddr;
+
+	if (bRoundUp)
+	{
+        sTmpDevVAddr.uiAddr --;
+    }
+    ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPCIndexMask)
+        >> psDevVAddrConfig->uiPCIndexShift);
+
+    if (bRoundUp)
+    {
+        ui32RetVal ++;
+    }
+
+    return ui32RetVal;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _CalcPCEIdx
+
+@Description    Calculate the page directory index
+
+@Input          sDevVAddr           Device virtual address
+
+@Input          psDevVAddrConfig    Configuration of the virtual address
+
+@Input          bRoundUp            Round up the index
+
+@Return         The page directory index
+*/
+/*****************************************************************************/
+static IMG_UINT32 _CalcPDEIdx(IMG_DEV_VIRTADDR sDevVAddr,
+                              const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+                              IMG_BOOL bRoundUp)
+{
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+    IMG_UINT32 ui32RetVal;
+
+    sTmpDevVAddr = sDevVAddr;
+
+	if (bRoundUp)
+	{
+        sTmpDevVAddr.uiAddr --;
+    }
+    ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPDIndexMask)
+        >> psDevVAddrConfig->uiPDIndexShift);
+
+    if (bRoundUp)
+    {
+        ui32RetVal ++;
+    }
+
+    return ui32RetVal;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _CalcPTEIdx
+
+@Description    Calculate the page entry index
+
+@Input          sDevVAddr           Device virtual address
+
+@Input          psDevVAddrConfig    Configuration of the virtual address
+
+@Input          bRoundUp            Round up the index
+
+@Return         The page entry index
+*/
+/*****************************************************************************/
+static IMG_UINT32 _CalcPTEIdx(IMG_DEV_VIRTADDR sDevVAddr,
+                              const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+                              IMG_BOOL bRoundUp)
+{
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+    IMG_UINT32 ui32RetVal;
+
+    sTmpDevVAddr = sDevVAddr;
+    sTmpDevVAddr.uiAddr -= psDevVAddrConfig->uiOffsetInBytes;
+	if (bRoundUp)
+	{
+        sTmpDevVAddr.uiAddr --;
+    }
+    ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPTIndexMask)
+        >> psDevVAddrConfig->uiPTIndexShift);
+
+    if (bRoundUp)
+    {
+        ui32RetVal ++;
+    }
+
+    return ui32RetVal;
+}
+
+/*****************************************************************************
+ *         MMU memory allocation/management functions (mem desc)             *
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       _MMU_PhysMem_RAImportAlloc
+
+@Description    Imports MMU Px memory into the RA. This is where the
+                actual allocation of physical memory happens.
+
+@Input          hArenaHandle    Handle that was passed in during the
+                                creation of the RA
+
+@Input          uiSize          Size of the memory to import
+
+@Input          uiFlags         Flags that where passed in the allocation.
+
+@Output         puiBase         The address of where to insert this import
+
+@Output         puiActualSize   The actual size of the import
+
+@Output         phPriv          Handle which will be passed back when
+                                this import is freed
+
+@Return         PVRSRV_OK if import alloc was successful
+*/
+/*****************************************************************************/
+static PVRSRV_ERROR _MMU_PhysMem_RAImportAlloc(RA_PERARENA_HANDLE hArenaHandle,
+                                           RA_LENGTH_T uiSize,
+                                           RA_FLAGS_T uiFlags,
+                                           const IMG_CHAR *pszAnnotation,
+                                           RA_BASE_T *puiBase,
+                                           RA_LENGTH_T *puiActualSize,
+                                           RA_PERISPAN_HANDLE *phPriv)
+{
+	MMU_PHYSMEM_CONTEXT *psCtx = (MMU_PHYSMEM_CONTEXT *) hArenaHandle;
+	PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *) psCtx->psDevNode;
+	MMU_MEMORY_MAPPING *psMapping;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(pszAnnotation);
+	PVR_UNREFERENCED_PARAMETER(uiFlags);
+
+	psMapping = OSAllocMem(sizeof(MMU_MEMORY_MAPPING));
+	if (psMapping == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	eError = psDevNode->pfnDevPxAlloc(psDevNode, TRUNCATE_64BITS_TO_SIZE_T(uiSize), &psMapping->sMemHandle,
+										&psMapping->sDevPAddr);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	psMapping->psContext = psCtx;
+	psMapping->uiSize = TRUNCATE_64BITS_TO_SIZE_T(uiSize);
+
+	psMapping->uiCpuVAddrRefCount = 0;
+
+	*phPriv = (RA_PERISPAN_HANDLE) psMapping;
+
+	/* Note: This assumes this memory never gets paged out */
+	*puiBase = (RA_BASE_T)psMapping->sDevPAddr.uiAddr;
+	*puiActualSize = uiSize;
+
+	return PVRSRV_OK;
+
+e1:
+	OSFreeMem(psMapping);
+e0:
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_PhysMem_RAImportFree
+
+@Description    Imports MMU Px memory into the RA. This is where the
+                actual free of physical memory happens.
+
+@Input          hArenaHandle    Handle that was passed in during the
+                                creation of the RA
+
+@Input          puiBase         The address of where to insert this import
+
+@Output         phPriv          Private data that the import alloc provided
+
+@Return         None
+*/
+/*****************************************************************************/
+static void _MMU_PhysMem_RAImportFree(RA_PERARENA_HANDLE hArenaHandle,
+									  RA_BASE_T uiBase,
+									  RA_PERISPAN_HANDLE hPriv)
+{
+	MMU_MEMORY_MAPPING *psMapping = (MMU_MEMORY_MAPPING *) hPriv;
+	MMU_PHYSMEM_CONTEXT *psCtx = (MMU_PHYSMEM_CONTEXT *) hArenaHandle;
+
+	PVR_UNREFERENCED_PARAMETER(uiBase);
+
+	/* Check we have dropped all CPU mappings */
+	PVR_ASSERT(psMapping->uiCpuVAddrRefCount == 0);
+
+	/* Add mapping to defer free list */
+	psMapping->psContext = NULL;
+	dllist_add_to_tail(&psCtx->sTmpMMUMappingHead, &psMapping->sMMUMappingItem);
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_PhysMemAlloc
+
+@Description    Allocates physical memory for MMU objects
+
+@Input          psCtx           Physmem context to do the allocation from
+
+@Output         psMemDesc       Allocation description
+
+@Input          uiBytes         Size of the allocation in bytes
+
+@Input          uiAlignment     Alignment requirement of this allocation
+
+@Return         PVRSRV_OK if allocation was successful
+*/
+/*****************************************************************************/
+
+static PVRSRV_ERROR _MMU_PhysMemAlloc(MMU_PHYSMEM_CONTEXT *psCtx,
+                                      MMU_MEMORY_DESC *psMemDesc,
+                                      size_t uiBytes,
+                                      size_t uiAlignment)
+{
+	PVRSRV_ERROR eError;
+	RA_BASE_T uiPhysAddr;
+
+	if (!psMemDesc || psMemDesc->bValid)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eError = RA_Alloc(psCtx->psPhysMemRA,
+	                  uiBytes,
+	                  RA_NO_IMPORT_MULTIPLIER,
+	                  0, /* flags */
+	                  uiAlignment,
+	                  "",
+	                  &uiPhysAddr,
+	                  NULL,
+	                  (RA_PERISPAN_HANDLE *) &psMemDesc->psMapping);
+	if(PVRSRV_OK != eError)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_MMU_PhysMemAlloc: ERROR call to RA_Alloc() failed"));
+		return eError;
+	}
+
+	psMemDesc->bValid = IMG_TRUE;
+	psMemDesc->pvCpuVAddr = NULL;
+	psMemDesc->sDevPAddr.uiAddr = (IMG_UINT64) uiPhysAddr;
+
+	if (psMemDesc->psMapping->uiCpuVAddrRefCount == 0)
+	{
+		eError = psCtx->psDevNode->pfnDevPxMap(psCtx->psDevNode,
+		                                       &psMemDesc->psMapping->sMemHandle,
+		                                       psMemDesc->psMapping->uiSize,
+		                                       &psMemDesc->psMapping->sDevPAddr,
+		                                       &psMemDesc->psMapping->pvCpuVAddr);
+		if (eError != PVRSRV_OK)
+		{
+			RA_Free(psCtx->psPhysMemRA, psMemDesc->sDevPAddr.uiAddr);
+			return eError;
+		}
+	}
+
+	psMemDesc->psMapping->uiCpuVAddrRefCount++;
+	psMemDesc->pvCpuVAddr = (IMG_UINT8 *) psMemDesc->psMapping->pvCpuVAddr
+	                        + (psMemDesc->sDevPAddr.uiAddr - psMemDesc->psMapping->sDevPAddr.uiAddr);
+	psMemDesc->uiOffset = (psMemDesc->sDevPAddr.uiAddr - psMemDesc->psMapping->sDevPAddr.uiAddr);
+	psMemDesc->uiSize = uiBytes;
+	PVR_ASSERT(psMemDesc->pvCpuVAddr != NULL);
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_PhysMemFree
+
+@Description    Allocates physical memory for MMU objects
+
+@Input          psCtx           Physmem context to do the free on
+
+@Input          psMemDesc       Allocation description
+
+@Return         None
+*/
+/*****************************************************************************/
+static void _MMU_PhysMemFree(MMU_PHYSMEM_CONTEXT *psCtx,
+							 MMU_MEMORY_DESC *psMemDesc)
+{
+	RA_BASE_T uiPhysAddr;
+
+	PVR_ASSERT(psMemDesc->bValid);
+
+	if (--psMemDesc->psMapping->uiCpuVAddrRefCount == 0)
+	{
+		psCtx->psDevNode->pfnDevPxUnMap(psCtx->psDevNode, &psMemDesc->psMapping->sMemHandle,
+								psMemDesc->psMapping->pvCpuVAddr);
+	}
+
+	psMemDesc->pvCpuVAddr = NULL;
+
+	uiPhysAddr = psMemDesc->sDevPAddr.uiAddr;
+	RA_Free(psCtx->psPhysMemRA, uiPhysAddr);
+
+	psMemDesc->bValid = IMG_FALSE;
+}
+
+
+/*****************************************************************************
+ *              MMU object allocation/management functions                   *
+ *****************************************************************************/
+
+static INLINE PVRSRV_ERROR _MMU_ConvertDevMemFlags(IMG_BOOL bInvalidate,
+                                                   PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+                                                   MMU_PROTFLAGS_T *uiMMUProtFlags,
+                                                   MMU_CONTEXT *psMMUContext)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 uiGPUCacheMode;
+
+	/* Do flag conversion between devmem flags and MMU generic flags */
+	if (bInvalidate == IMG_FALSE)
+	{
+		*uiMMUProtFlags |= ( (uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK)
+							>> PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET)
+							<< MMU_PROTFLAGS_DEVICE_OFFSET;
+
+		if (PVRSRV_CHECK_GPU_READABLE(uiMappingFlags))
+		{
+			*uiMMUProtFlags |= MMU_PROTFLAGS_READABLE;
+		}
+		if (PVRSRV_CHECK_GPU_WRITEABLE(uiMappingFlags))
+		{
+			*uiMMUProtFlags |= MMU_PROTFLAGS_WRITEABLE;
+		}
+
+		eError = DevmemDeviceCacheMode(psMMUContext->psDevNode,
+		                               uiMappingFlags,
+		                               &uiGPUCacheMode);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+
+		switch (uiGPUCacheMode)
+		{
+			case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED:
+			case PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE:
+					break;
+			case PVRSRV_MEMALLOCFLAG_GPU_CACHED:
+					*uiMMUProtFlags |= MMU_PROTFLAGS_CACHED;
+					break;
+			default:
+					PVR_DPF((PVR_DBG_ERROR,"_MMU_DerivePTProtFlags: Wrong parameters"));
+					return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+		if (DevmemDeviceCacheCoherency(psMMUContext->psDevNode, uiMappingFlags))
+		{
+			*uiMMUProtFlags |= MMU_PROTFLAGS_CACHE_COHERENT;
+		}
+
+		if( (psMMUContext->psDevNode->pfnCheckDeviceFeature) && \
+				psMMUContext->psDevNode->pfnCheckDeviceFeature(psMMUContext->psDevNode, RGX_FEATURE_MIPS_BIT_MASK))
+		{
+			/*
+				If we are allocating on the MMU of the firmware processor, the cached/uncached attributes
+				must depend on the FIRMWARE_CACHED allocation flag.
+			 */
+			if (psMMUContext->psDevAttrs == psMMUContext->psDevNode->psFirmwareMMUDevAttrs)
+			{
+				if (uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED))
+				{
+					*uiMMUProtFlags |= MMU_PROTFLAGS_CACHED;
+				}
+				else
+				{
+					*uiMMUProtFlags &= ~MMU_PROTFLAGS_CACHED;
+
+				}
+				*uiMMUProtFlags &= ~MMU_PROTFLAGS_CACHE_COHERENT;
+			}
+		}
+	}
+	else
+	{
+		*uiMMUProtFlags |= MMU_PROTFLAGS_INVALID;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       _PxMemAlloc
+
+@Description    Allocates physical memory for MMU objects, initialises
+                and PDumps it.
+
+@Input          psMMUContext    MMU context
+
+@Input          uiNumEntries    Number of entries to allocate
+
+@Input          psConfig        MMU Px config
+
+@Input          eMMULevel       MMU level that that allocation is for
+
+@Output         psMemDesc       Description of allocation
+
+@Return         PVRSRV_OK if allocation was successful
+*/
+/*****************************************************************************/
+static PVRSRV_ERROR _PxMemAlloc(MMU_CONTEXT *psMMUContext,
+								IMG_UINT32 uiNumEntries,
+								const MMU_PxE_CONFIG *psConfig,
+								MMU_LEVEL eMMULevel,
+								MMU_MEMORY_DESC *psMemDesc,
+								IMG_UINT32 uiLog2Align)
+{
+	PVRSRV_ERROR eError;
+	size_t uiBytes;
+	size_t uiAlign;
+
+	PVR_ASSERT(psConfig->uiBytesPerEntry != 0);
+
+	uiBytes = uiNumEntries * psConfig->uiBytesPerEntry;
+	/* We need here the alignment of the previous level because that is the entry for we generate here */
+	uiAlign = 1 << uiLog2Align;
+
+	/* 
+	 * If the hardware specifies an alignment requirement for a page table then
+	 * it also requires that all memory up to the next aligned address is
+	 * zeroed.
+	 *
+	 * Failing to do this can result in uninitialised data outside of the actual
+	 * page table range being read by the MMU and treated as valid, e.g. the
+	 * pending flag.
+	 *
+	 * Typically this will affect 1MiB, 2MiB PT pages which have a size of 16
+	 * and 8 bytes respectively but an alignment requirement of 64 bytes each.
+	 */
+	uiBytes = PVR_ALIGN(uiBytes, uiAlign);
+
+	/*  allocate the object */
+	eError = _MMU_PhysMemAlloc(psMMUContext->psPhysMemCtx,
+								psMemDesc, uiBytes, uiAlign);
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_PxMemAlloc: failed to allocate memory for the  MMU object"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	/*
+		Clear the object
+		Note: if any MMUs are cleared with non-zero values then will need a
+		custom clear function
+		Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is
+		unlikely
+	*/
+	OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, uiBytes);
+
+	eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+	                                                &psMemDesc->psMapping->sMemHandle,
+	                                                psMemDesc->uiOffset,
+	                                                psMemDesc->uiSize);
+	if(eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Alloc MMU object");
+
+	PDumpMMUMalloc(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+	               eMMULevel,
+	               &psMemDesc->sDevPAddr,
+	               uiBytes,
+	               uiAlign,
+	               psMMUContext->psDevAttrs->eMMUType);
+
+	PDumpMMUDumpPxEntries(eMMULevel,
+	                      psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+	                      psMemDesc->pvCpuVAddr,
+	                      psMemDesc->sDevPAddr,
+	                      0,
+	                      uiNumEntries,
+	                      NULL, NULL, 0, /* pdump symbolic info is irrelevant here */
+	                      psConfig->uiBytesPerEntry,
+	                      uiLog2Align,
+	                      psConfig->uiAddrShift,
+	                      psConfig->uiAddrMask,
+	                      psConfig->uiProtMask,
+	                      psConfig->uiValidEnMask,
+	                      0,
+	                      psMMUContext->psDevAttrs->eMMUType);
+#endif
+
+	return PVRSRV_OK;
+e1:
+	_MMU_PhysMemFree(psMMUContext->psPhysMemCtx,
+	                 psMemDesc);
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       _PxMemFree
+
+@Description    Frees physical memory for MMU objects, de-initialises
+                and PDumps it.
+
+@Input          psMemDesc       Description of allocation
+
+@Return         PVRSRV_OK if allocation was successful
+*/
+/*****************************************************************************/
+
+static void _PxMemFree(MMU_CONTEXT *psMMUContext,
+					   MMU_MEMORY_DESC *psMemDesc, MMU_LEVEL eMMULevel)
+{
+#if defined(MMU_CLEARMEM_ON_FREE)
+	PVRSRV_ERROR eError;
+
+	/*
+		Clear the MMU object
+		Note: if any MMUs are cleared with non-zero values then will need a
+		custom clear function
+		Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is
+		unlikely
+	*/
+	OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, psMemDesc->ui32Bytes);
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Clear MMU object before freeing it");
+#endif
+#endif/* MMU_CLEARMEM_ON_FREE */
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Free MMU object");
+	{
+		PDumpMMUFree(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+		             eMMULevel,
+		             &psMemDesc->sDevPAddr,
+		             psMMUContext->psDevAttrs->eMMUType);
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(eMMULevel);
+#endif
+	/*  free the PC */
+	_MMU_PhysMemFree(psMMUContext->psPhysMemCtx, psMemDesc);
+}
+
+static INLINE PVRSRV_ERROR _SetupPTE(MMU_CONTEXT *psMMUContext,
+                              MMU_Levelx_INFO *psLevel,
+                              IMG_UINT32 uiIndex,
+                              const MMU_PxE_CONFIG *psConfig,
+                              const IMG_DEV_PHYADDR *psDevPAddr,
+                              IMG_BOOL bUnmap,
+#if defined(PDUMP)
+                              const IMG_CHAR *pszMemspaceName,
+                              const IMG_CHAR *pszSymbolicAddr,
+                              IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset,
+#endif
+                              IMG_UINT64 uiProtFlags)
+{
+	MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc;
+	IMG_UINT64 ui64PxE64;
+	IMG_UINT64 uiAddr = psDevPAddr->uiAddr;
+
+	if(psMMUContext->psDevNode->pfnCheckDeviceFeature(psMMUContext->psDevNode, \
+			RGX_FEATURE_MIPS_BIT_MASK))
+	{
+		/*
+		 * If mapping for the MIPS FW context, check for sensitive PAs
+		 */
+		if (psMMUContext->psDevAttrs == psMMUContext->psDevNode->psFirmwareMMUDevAttrs
+			&& RGXMIPSFW_SENSITIVE_ADDR(uiAddr))
+		{
+			PVRSRV_RGXDEV_INFO *psDevice = (PVRSRV_RGXDEV_INFO *)psMMUContext->psDevNode->pvDevice;
+
+			uiAddr = psDevice->sTrampoline.sPhysAddr.uiAddr + RGXMIPSFW_TRAMPOLINE_OFFSET(uiAddr);
+		}
+	}
+
+	/* Calculate Entry */
+	ui64PxE64 =    uiAddr /* Calculate the offset to that base */
+	            >> psConfig->uiAddrLog2Align /* Shift away the useless bits, because the alignment is very coarse and we address by alignment */
+	            << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */
+	             & psConfig->uiAddrMask; /* Delete unused bits */
+	ui64PxE64 |= uiProtFlags;
+
+	/* Set the entry */
+	if (psConfig->uiBytesPerEntry == 8)
+	{
+		IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+		pui64Px[uiIndex] = ui64PxE64;
+	}
+	else if (psConfig->uiBytesPerEntry == 4)
+	{
+		IMG_UINT32 *pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+		/* assert that the result fits into 32 bits before writing
+		   it into the 32-bit array with a cast */
+		PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU));
+
+		pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64;
+	}
+	else
+	{
+		return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+	}
+
+
+	/* Log modification */
+	HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+		HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+		uiIndex, MMU_LEVEL_1,
+		HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64),
+		!bUnmap);
+
+#if defined (PDUMP)
+	PDumpMMUDumpPxEntries(MMU_LEVEL_1,
+	                      psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+	                      psMemDesc->pvCpuVAddr,
+	                      psMemDesc->sDevPAddr,
+	                      uiIndex,
+	                      1,
+	                      pszMemspaceName,
+	                      pszSymbolicAddr,
+	                      uiSymbolicAddrOffset,
+	                      psConfig->uiBytesPerEntry,
+	                      psConfig->uiAddrLog2Align,
+	                      psConfig->uiAddrShift,
+	                      psConfig->uiAddrMask,
+	                      psConfig->uiProtMask,
+	                      psConfig->uiValidEnMask,
+	                      0,
+	                      psMMUContext->psDevAttrs->eMMUType);
+#endif /*PDUMP*/
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       _SetupPxE
+
+@Description    Sets up an entry of an MMU object to point to the
+                provided address
+
+@Input          psMMUContext    MMU context to operate on
+
+@Input          psLevel         Level info for MMU object
+
+@Input          uiIndex         Index into the MMU object to setup
+
+@Input          psConfig        MMU Px config
+
+@Input          eMMULevel       Level of MMU object
+
+@Input          psDevPAddr      Address to setup the MMU object to point to
+
+@Input          pszMemspaceName Name of the PDump memory space that the entry
+                                will point to
+
+@Input          pszSymbolicAddr PDump symbolic address that the entry will
+                                point to
+
+@Input          uiProtFlags     MMU protection flags
+
+@Return         PVRSRV_OK if the setup was successful
+*/
+/*****************************************************************************/
+static PVRSRV_ERROR _SetupPxE(MMU_CONTEXT *psMMUContext,
+								MMU_Levelx_INFO *psLevel,
+								IMG_UINT32 uiIndex,
+								const MMU_PxE_CONFIG *psConfig,
+								MMU_LEVEL eMMULevel,
+								const IMG_DEV_PHYADDR *psDevPAddr,
+#if defined(PDUMP)
+								const IMG_CHAR *pszMemspaceName,
+								const IMG_CHAR *pszSymbolicAddr,
+								IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset,
+#endif
+								MMU_FLAGS_T uiProtFlags,
+								IMG_UINT32 uiLog2DataPageSize)
+{
+	PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psDevNode;
+	MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc;
+
+	IMG_UINT32 (*pfnDerivePxEProt4)(IMG_UINT32);
+	IMG_UINT64 (*pfnDerivePxEProt8)(IMG_UINT32, IMG_UINT32);
+
+	if (!psDevPAddr)
+	{
+		/* Invalidate entry */
+		if (~uiProtFlags & MMU_PROTFLAGS_INVALID)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Error, no physical address specified, but not invalidating entry"));
+			uiProtFlags |= MMU_PROTFLAGS_INVALID;
+		}
+		psDevPAddr = &gsBadDevPhyAddr;
+	}
+	else
+	{
+		if (uiProtFlags & MMU_PROTFLAGS_INVALID)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "A physical address was specified when requesting invalidation of entry"));
+			uiProtFlags |= MMU_PROTFLAGS_INVALID;
+		}
+	}
+
+	switch(eMMULevel)
+	{
+		case MMU_LEVEL_3:
+				pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePCEProt4;
+				pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePCEProt8;
+				break;
+
+		case MMU_LEVEL_2:
+				pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePDEProt4;
+				pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePDEProt8;
+				break;
+
+		case MMU_LEVEL_1:
+				pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePTEProt4;
+				pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePTEProt8;
+				break;
+
+		default:
+				PVR_DPF((PVR_DBG_ERROR, "%s: invalid MMU level", __func__));
+				return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* How big is a PxE in bytes? */
+	/* Filling the actual Px entry with an address */
+	switch(psConfig->uiBytesPerEntry)
+	{
+		case 4:
+		{
+			IMG_UINT32 *pui32Px;
+			IMG_UINT64 ui64PxE64;
+
+			pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+			ui64PxE64 = psDevPAddr->uiAddr               /* Calculate the offset to that base */
+							>> psConfig->uiAddrLog2Align /* Shift away the unnecessary bits of the address */
+							<< psConfig->uiAddrShift     /* Shift back to fit address in the Px entry */
+							& psConfig->uiAddrMask;      /* Delete unused higher bits */
+
+			ui64PxE64 |= (IMG_UINT64)pfnDerivePxEProt4(uiProtFlags);
+			/* assert that the result fits into 32 bits before writing
+			   it into the 32-bit array with a cast */
+			PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU));
+
+			/* We should never invalidate an invalid page */
+			if (uiProtFlags & MMU_PROTFLAGS_INVALID)
+			{
+				PVR_ASSERT(pui32Px[uiIndex] != ui64PxE64);
+			}
+			pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64;
+			HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+				HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+				uiIndex, eMMULevel,
+				HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64),
+				(uiProtFlags & MMU_PROTFLAGS_INVALID)? 0: 1);
+			break;
+		}
+		case 8:
+		{
+			IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+			pui64Px[uiIndex] = psDevPAddr->uiAddr             /* Calculate the offset to that base */
+								>> psConfig->uiAddrLog2Align  /* Shift away the unnecessary bits of the address */
+								<< psConfig->uiAddrShift      /* Shift back to fit address in the Px entry */
+								& psConfig->uiAddrMask;       /* Delete unused higher bits */
+			pui64Px[uiIndex] |= pfnDerivePxEProt8(uiProtFlags, uiLog2DataPageSize);
+
+			HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+				HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+				uiIndex, eMMULevel,
+				HTBLOG_U64_BITS_HIGH(pui64Px[uiIndex]), HTBLOG_U64_BITS_LOW(pui64Px[uiIndex]),
+				(uiProtFlags & MMU_PROTFLAGS_INVALID)? 0: 1);
+			break;
+		}
+		default:
+			PVR_DPF((PVR_DBG_ERROR, "%s: PxE size not supported (%d) for level %d",
+									__func__, psConfig->uiBytesPerEntry, eMMULevel));
+
+			return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+	}
+
+#if defined (PDUMP)
+	PDumpMMUDumpPxEntries(eMMULevel,
+	                      psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+	                      psMemDesc->pvCpuVAddr,
+	                      psMemDesc->sDevPAddr,
+	                      uiIndex,
+	                      1,
+	                      pszMemspaceName,
+	                      pszSymbolicAddr,
+	                      uiSymbolicAddrOffset,
+	                      psConfig->uiBytesPerEntry,
+	                      psConfig->uiAddrLog2Align,
+	                      psConfig->uiAddrShift,
+	                      psConfig->uiAddrMask,
+	                      psConfig->uiProtMask,
+	                      psConfig->uiValidEnMask,
+	                      0,
+	                      psMMUContext->psDevAttrs->eMMUType);
+#endif
+
+	psDevNode->pfnMMUCacheInvalidate(psDevNode, psMMUContext->hDevData,
+									 eMMULevel,
+									 (uiProtFlags & MMU_PROTFLAGS_INVALID)?IMG_TRUE:IMG_FALSE);
+
+	return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ *                   MMU host control functions (Level Info)                 *
+ *****************************************************************************/
+
+
+/*************************************************************************/ /*!
+@Function       _MMU_FreeLevel
+
+@Description    Recursively frees the specified range of Px entries. If any
+                level has its last reference dropped then the MMU object
+                memory and the MMU_Levelx_Info will be freed.
+
+				At each level we might be crossing a boundary from one Px to
+				another. The values for auiStartArray should be by used for
+				the first call into each level and the values in auiEndArray
+				should only be used in the last call for each level.
+				In order to determine if this is the first/last call we pass
+				in bFirst and bLast.
+				When one level calls down to the next only if bFirst/bLast is set
+				and it's the first/last iteration of the loop at its level will
+				bFirst/bLast set for the next recursion.
+				This means that each iteration has the knowledge of the previous
+				level which is required.
+
+@Input          psMMUContext    MMU context to operate on
+
+@Input          psLevel                 Level info on which to free the
+                                        specified range
+
+@Input          auiStartArray           Array of start indexes (one for each level)
+
+@Input          auiEndArray             Array of end indexes (one for each level)
+
+@Input          auiEntriesPerPxArray    Array of number of entries for the Px
+                                        (one for each level)
+
+@Input          apsConfig               Array of PxE configs (one for each level)
+
+@Input          aeMMULevel              Array of MMU levels (one for each level)
+
+@Input          pui32CurrentLevel       Pointer to a variable which is set to our
+                                        current level
+
+@Input          uiStartIndex            Start index of the range to free
+
+@Input          uiEndIndex              End index of the range to free
+
+@Input			bFirst                  This is the first call for this level
+
+@Input			bLast                   This is the last call for this level
+
+@Return         IMG_TRUE if the last reference to psLevel was dropped
+*/
+/*****************************************************************************/
+static IMG_BOOL _MMU_FreeLevel(MMU_CONTEXT *psMMUContext,
+							   MMU_Levelx_INFO *psLevel,
+							   IMG_UINT32 auiStartArray[],
+							   IMG_UINT32 auiEndArray[],
+							   IMG_UINT32 auiEntriesPerPxArray[],
+							   const MMU_PxE_CONFIG *apsConfig[],
+							   MMU_LEVEL aeMMULevel[],
+							   IMG_UINT32 *pui32CurrentLevel,
+							   IMG_UINT32 uiStartIndex,
+							   IMG_UINT32 uiEndIndex,
+							   IMG_BOOL bFirst,
+							   IMG_BOOL bLast,
+							   IMG_UINT32 uiLog2DataPageSize)
+{
+	IMG_UINT32 uiThisLevel = *pui32CurrentLevel;
+	const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel];
+	IMG_UINT32 i;
+	IMG_BOOL bFreed = IMG_FALSE;
+
+	/* Sanity check */
+	PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL);
+	PVR_ASSERT(psLevel != NULL);
+
+	MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel: level = %d, range %d - %d, refcount = %d",
+				aeMMULevel[uiThisLevel], uiStartIndex,
+				uiEndIndex, psLevel->ui32RefCount));
+
+	for (i = uiStartIndex;(i < uiEndIndex) && (psLevel != NULL);i++)
+	{
+		if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+		{
+			MMU_Levelx_INFO *psNextLevel = psLevel->apsNextLevel[i];
+			IMG_UINT32 uiNextStartIndex;
+			IMG_UINT32 uiNextEndIndex;
+			IMG_BOOL bNextFirst;
+			IMG_BOOL bNextLast;
+
+			/* If we're crossing a Px then the start index changes */
+			if (bFirst && (i == uiStartIndex))
+			{
+				uiNextStartIndex = auiStartArray[uiThisLevel + 1];
+				bNextFirst = IMG_TRUE;
+			}
+			else
+			{
+				uiNextStartIndex = 0;
+				bNextFirst = IMG_FALSE;
+			}
+
+			/* If we're crossing a Px then the end index changes */
+			if (bLast && (i == (uiEndIndex - 1)))
+			{
+				uiNextEndIndex = auiEndArray[uiThisLevel + 1];
+				bNextLast = IMG_TRUE;
+			}
+			else
+			{
+				uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1];
+				bNextLast = IMG_FALSE;
+			}
+
+			/* Recurse into the next level */
+			(*pui32CurrentLevel)++;
+			if (_MMU_FreeLevel(psMMUContext, psNextLevel, auiStartArray,
+								auiEndArray, auiEntriesPerPxArray,
+								apsConfig, aeMMULevel, pui32CurrentLevel,
+								uiNextStartIndex, uiNextEndIndex,
+								bNextFirst, bNextLast, uiLog2DataPageSize))
+			{
+				PVRSRV_ERROR eError;
+
+				/* Un-wire the entry */
+				eError = _SetupPxE(psMMUContext,
+								psLevel,
+								i,
+								psConfig,
+								aeMMULevel[uiThisLevel],
+								NULL,
+#if defined(PDUMP)
+								NULL,	/* Only required for data page */
+								NULL,	/* Only required for data page */
+								0,      /* Only required for data page */
+#endif
+								MMU_PROTFLAGS_INVALID,
+								uiLog2DataPageSize);
+
+				PVR_ASSERT(eError == PVRSRV_OK);
+
+				/* Free table of the level below, pointed to by this table entry.
+				 * We don't destroy the table inside the above _MMU_FreeLevel call because we
+				 * first have to set the table entry of the level above to invalid. */
+				_PxMemFree(psMMUContext, &psNextLevel->sMemDesc, aeMMULevel[*pui32CurrentLevel]);
+				OSFreeMem(psNextLevel);
+
+				/* The level below us is empty, drop the refcount and clear the pointer */
+				psLevel->ui32RefCount--;
+				psLevel->apsNextLevel[i] = NULL;
+
+				/* Check we haven't wrapped around */
+				PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+			}
+			(*pui32CurrentLevel)--;
+		}
+		else
+		{
+			psLevel->ui32RefCount--;
+		}
+
+		/*
+		   Free this level if it is no longer referenced, unless it's the base
+		   level in which case it's part of the MMU context and should be freed
+		   when the MMU context is freed
+		*/
+		if ((psLevel->ui32RefCount == 0) && (psLevel != &psMMUContext->sBaseLevelInfo))
+		{
+			bFreed = IMG_TRUE;
+		}
+	}
+
+	/* Level one flushing is done when we actually write the table entries */
+	if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+	{
+		psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+		                                       &psLevel->sMemDesc.psMapping->sMemHandle,
+		                                       uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+		                                       (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry);
+	}
+
+	MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel end: level = %d, refcount = %d",
+				aeMMULevel[uiThisLevel], bFreed?0:psLevel->ui32RefCount));
+
+	return bFreed;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_AllocLevel
+
+@Description    Recursively allocates the specified range of Px entries. If any
+                level has its last reference dropped then the MMU object
+                memory and the MMU_Levelx_Info will be freed.
+
+				At each level we might be crossing a boundary from one Px to
+				another. The values for auiStartArray should be by used for
+				the first call into each level and the values in auiEndArray
+				should only be used in the last call for each level.
+				In order to determine if this is the first/last call we pass
+				in bFirst and bLast.
+				When one level calls down to the next only if bFirst/bLast is set
+				and it's the first/last iteration of the loop at its level will
+				bFirst/bLast set for the next recursion.
+				This means that each iteration has the knowledge of the previous
+				level which is required.
+
+@Input          psMMUContext    MMU context to operate on
+
+@Input          psLevel                 Level info on which to to free the
+                                        specified range
+
+@Input          auiStartArray           Array of start indexes (one for each level)
+
+@Input          auiEndArray             Array of end indexes (one for each level)
+
+@Input          auiEntriesPerPxArray    Array of number of entries for the Px
+                                        (one for each level)
+
+@Input          apsConfig               Array of PxE configs (one for each level)
+
+@Input          aeMMULevel              Array of MMU levels (one for each level)
+
+@Input          pui32CurrentLevel       Pointer to a variable which is set to our
+                                        current level
+
+@Input          uiStartIndex            Start index of the range to free
+
+@Input          uiEndIndex              End index of the range to free
+
+@Input			bFirst                  This is the first call for this level
+
+@Input			bLast                   This is the last call for this level
+
+@Return         IMG_TRUE if the last reference to psLevel was dropped
+*/
+/*****************************************************************************/
+static PVRSRV_ERROR _MMU_AllocLevel(MMU_CONTEXT *psMMUContext,
+									MMU_Levelx_INFO *psLevel,
+									IMG_UINT32 auiStartArray[],
+									IMG_UINT32 auiEndArray[],
+									IMG_UINT32 auiEntriesPerPxArray[],
+									const MMU_PxE_CONFIG *apsConfig[],
+									MMU_LEVEL aeMMULevel[],
+									IMG_UINT32 *pui32CurrentLevel,
+									IMG_UINT32 uiStartIndex,
+									IMG_UINT32 uiEndIndex,
+									IMG_BOOL bFirst,
+									IMG_BOOL bLast,
+									IMG_UINT32 uiLog2DataPageSize)
+{
+	IMG_UINT32 uiThisLevel = *pui32CurrentLevel; /* Starting with 0 */
+	const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel]; /* The table config for the current level */
+	PVRSRV_ERROR eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	IMG_UINT32 uiAllocState = 99; /* Debug info to check what progress was made in the function. Updated during this function. */
+	IMG_UINT32 i;
+
+	/* Sanity check */
+	PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL);
+
+	MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel: level = %d, range %d - %d, refcount = %d",
+				aeMMULevel[uiThisLevel], uiStartIndex,
+				uiEndIndex, psLevel->ui32RefCount));
+
+	/* Go from uiStartIndex to uiEndIndex through the Px */
+	for (i = uiStartIndex;i < uiEndIndex;i++)
+	{
+		/* Only try an allocation if this is not the last level */
+		/*Because a PT allocation is already done while setting the entry in PD */
+		if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+		{
+			IMG_UINT32 uiNextStartIndex;
+			IMG_UINT32 uiNextEndIndex;
+			IMG_BOOL bNextFirst;
+			IMG_BOOL bNextLast;
+
+			/* If there is already a next Px level existing, do not allocate it */
+			if (!psLevel->apsNextLevel[i])
+			{
+				MMU_Levelx_INFO *psNextLevel;
+				IMG_UINT32 ui32AllocSize;
+				IMG_UINT32 uiNextEntries;
+
+				/* Allocate and setup the next level */
+				uiNextEntries = auiEntriesPerPxArray[uiThisLevel + 1];
+				ui32AllocSize = sizeof(MMU_Levelx_INFO);
+				if (aeMMULevel[uiThisLevel + 1] != MMU_LEVEL_1)
+				{
+					ui32AllocSize += sizeof(MMU_Levelx_INFO *) * (uiNextEntries - 1);
+				}
+				psNextLevel = OSAllocZMem(ui32AllocSize);
+				if (psNextLevel == NULL)
+				{
+					uiAllocState = 0;
+					goto e0;
+				}
+
+				/* Hook in this level for next time */
+				psLevel->apsNextLevel[i] = psNextLevel;
+
+				psNextLevel->ui32NumOfEntries = uiNextEntries;
+				psNextLevel->ui32RefCount = 0;
+				/* Allocate Px memory for a sub level*/
+				eError = _PxMemAlloc(psMMUContext, uiNextEntries, apsConfig[uiThisLevel + 1],
+										aeMMULevel[uiThisLevel + 1],
+										&psNextLevel->sMemDesc,
+										psConfig->uiAddrLog2Align);
+				if (eError != PVRSRV_OK)
+				{
+					uiAllocState = 1;
+					goto e0;
+				}
+
+				/* Wire up the entry */
+				eError = _SetupPxE(psMMUContext,
+									psLevel,
+									i,
+									psConfig,
+									aeMMULevel[uiThisLevel],
+									&psNextLevel->sMemDesc.sDevPAddr,
+#if defined(PDUMP)
+									NULL, /* Only required for data page */
+									NULL, /* Only required for data page */
+									0,    /* Only required for data page */
+#endif
+									0,
+									uiLog2DataPageSize);
+
+				if (eError != PVRSRV_OK)
+				{
+					uiAllocState = 2;
+					goto e0;
+				}
+
+				psLevel->ui32RefCount++;
+			}
+
+			/* If we're crossing a Px then the start index changes */
+			if (bFirst && (i == uiStartIndex))
+			{
+				uiNextStartIndex = auiStartArray[uiThisLevel + 1];
+				bNextFirst = IMG_TRUE;
+			}
+			else
+			{
+				uiNextStartIndex = 0;
+				bNextFirst = IMG_FALSE;
+			}
+
+			/* If we're crossing a Px then the end index changes */
+			if (bLast && (i == (uiEndIndex - 1)))
+			{
+				uiNextEndIndex = auiEndArray[uiThisLevel + 1];
+				bNextLast = IMG_TRUE;
+			}
+			else
+			{
+				uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1];
+				bNextLast = IMG_FALSE;
+			}
+
+			/* Recurse into the next level */
+			(*pui32CurrentLevel)++;
+			eError = _MMU_AllocLevel(psMMUContext, psLevel->apsNextLevel[i],
+									 auiStartArray,
+									 auiEndArray,
+									 auiEntriesPerPxArray,
+									 apsConfig,
+									 aeMMULevel,
+									 pui32CurrentLevel,
+									 uiNextStartIndex,
+									 uiNextEndIndex,
+									 bNextFirst,
+									 bNextLast,
+									 uiLog2DataPageSize);
+			(*pui32CurrentLevel)--;
+			if (eError != PVRSRV_OK)
+			{
+				uiAllocState = 2;
+				goto e0;
+			}
+		}
+		else
+		{
+			/* All we need to do for level 1 is bump the refcount */
+			psLevel->ui32RefCount++;
+		}
+		PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+	}
+
+	/* Level one flushing is done when we actually write the table entries */
+	if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+	{
+		eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+		                                                &psLevel->sMemDesc.psMapping->sMemHandle,
+		                                                uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+		                                                (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry);
+		if (eError != PVRSRV_OK)
+			goto e0;
+	}
+
+	MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel end: level = %d, refcount = %d",
+				aeMMULevel[uiThisLevel], psLevel->ui32RefCount));
+	return PVRSRV_OK;
+
+e0:
+	/* Sanity check that we've not come down this route unexpectedly */
+	PVR_ASSERT(uiAllocState!=99);
+	PVR_DPF((PVR_DBG_ERROR, "_MMU_AllocLevel: Error %d allocating Px for level %d in stage %d"
+							,eError, aeMMULevel[uiThisLevel], uiAllocState));
+
+	/* the start value of index variable i is nor initialised on purpose
+	   indeed this for loop deinitialise what has already been initialised
+	   just before failing in reverse order. So the i index has already the
+	   right value. */
+	for (/* i already set */ ; i>= uiStartIndex  &&  i< uiEndIndex; i--)
+	{
+		switch(uiAllocState)
+		{
+			IMG_UINT32 uiNextStartIndex;
+			IMG_UINT32 uiNextEndIndex;
+			IMG_BOOL bNextFirst;
+			IMG_BOOL bNextLast;
+
+			case 3:
+					/* If we're crossing a Px then the start index changes */
+					if (bFirst && (i == uiStartIndex))
+					{
+						uiNextStartIndex = auiStartArray[uiThisLevel + 1];
+						bNextFirst = IMG_TRUE;
+					}
+					else
+					{
+						uiNextStartIndex = 0;
+						bNextFirst = IMG_FALSE;
+					}
+
+					/* If we're crossing a Px then the end index changes */
+					if (bLast && (i == (uiEndIndex - 1)))
+					{
+						uiNextEndIndex = auiEndArray[uiThisLevel + 1];
+						bNextLast = IMG_TRUE;
+					}
+					else
+					{
+						uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1];
+						bNextLast = IMG_FALSE;
+					}
+
+					if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+					{
+						(*pui32CurrentLevel)++;
+						if (_MMU_FreeLevel(psMMUContext, psLevel->apsNextLevel[i],
+											auiStartArray, auiEndArray,
+											auiEntriesPerPxArray, apsConfig,
+											aeMMULevel, pui32CurrentLevel,
+											uiNextStartIndex, uiNextEndIndex,
+											bNextFirst, bNextLast, uiLog2DataPageSize))
+						{
+							psLevel->ui32RefCount--;
+							psLevel->apsNextLevel[i] = NULL;
+
+							/* Check we haven't wrapped around */
+							PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+						}
+						(*pui32CurrentLevel)--;
+					}
+					else
+					{
+						/* We should never come down this path, but it's here
+						   for completeness */
+						psLevel->ui32RefCount--;
+
+						/* Check we haven't wrapped around */
+						PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+					}
+			case 2:
+					if (psLevel->apsNextLevel[i] != NULL  &&
+					    psLevel->apsNextLevel[i]->ui32RefCount == 0)
+					{
+						_PxMemFree(psMMUContext, &psLevel->sMemDesc,
+									aeMMULevel[uiThisLevel]);
+					}
+			case 1:
+					if (psLevel->apsNextLevel[i] != NULL  &&
+					    psLevel->apsNextLevel[i]->ui32RefCount == 0)
+					{
+						OSFreeMem(psLevel->apsNextLevel[i]);
+						psLevel->apsNextLevel[i] = NULL;
+					}
+			case 0:
+					uiAllocState = 3;
+					break;
+		}
+	}
+	return eError;
+}
+
+/*****************************************************************************
+ *                   MMU page table functions                                *
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       _MMU_GetLevelData
+
+@Description    Get the all the level data and calculates the indexes for the
+                specified address range
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddrStart          Start device virtual address
+
+@Input          sDevVAddrEnd            End device virtual address
+
+@Input          uiLog2DataPageSize      Log2 of the page size to use
+
+@Input          auiStartArray           Array of start indexes (one for each level)
+
+@Input          auiEndArray             Array of end indexes (one for each level)
+
+@Input          uiEntriesPerPxArray     Array of number of entries for the Px
+                                        (one for each level)
+
+@Input          apsConfig               Array of PxE configs (one for each level)
+
+@Input          aeMMULevel              Array of MMU levels (one for each level)
+
+@Input          ppsMMUDevVAddrConfig    Device virtual address config
+
+@Input			phPriv					Private data of page size config
+
+@Return         IMG_TRUE if the last reference to psLevel was dropped
+*/
+/*****************************************************************************/
+static void _MMU_GetLevelData(MMU_CONTEXT *psMMUContext,
+									IMG_DEV_VIRTADDR sDevVAddrStart,
+									IMG_DEV_VIRTADDR sDevVAddrEnd,
+									IMG_UINT32 uiLog2DataPageSize,
+									IMG_UINT32 auiStartArray[],
+									IMG_UINT32 auiEndArray[],
+									IMG_UINT32 auiEntriesPerPx[],
+									const MMU_PxE_CONFIG *apsConfig[],
+									MMU_LEVEL aeMMULevel[],
+									const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+									IMG_HANDLE *phPriv)
+{
+	const MMU_PxE_CONFIG *psMMUPDEConfig;
+	const MMU_PxE_CONFIG *psMMUPTEConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+	PVRSRV_ERROR eError;
+	IMG_UINT32 i = 0;
+
+	eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize,
+														&psMMUPDEConfig,
+														&psMMUPTEConfig,
+														ppsMMUDevVAddrConfig,
+														phPriv);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	psDevVAddrConfig = *ppsMMUDevVAddrConfig;
+
+	if (psDevVAddrConfig->uiPCIndexMask != 0)
+	{
+		auiStartArray[i] = _CalcPCEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE);
+		auiEndArray[i] = _CalcPCEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE);
+		auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPC;
+		apsConfig[i] = psDevAttrs->psBaseConfig;
+		aeMMULevel[i] = MMU_LEVEL_3;
+		i++;
+	}
+
+	if (psDevVAddrConfig->uiPDIndexMask != 0)
+	{
+		auiStartArray[i] = _CalcPDEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE);
+		auiEndArray[i] = _CalcPDEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE);
+		auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPD;
+		if (i == 0)
+		{
+			apsConfig[i] = psDevAttrs->psBaseConfig;
+		}
+		else
+		{
+			apsConfig[i] = psMMUPDEConfig;
+		}
+		aeMMULevel[i] = MMU_LEVEL_2;
+		i++;
+	}
+
+	/*
+		There is always a PTE entry so we have a slightly different behaviour than above.
+		E.g. for 2 MB RGX pages the uiPTIndexMask is 0x0000000000 but still there
+		is a PT with one entry.
+
+	*/
+	auiStartArray[i] = _CalcPTEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE);
+	if (psDevVAddrConfig->uiPTIndexMask !=0)
+	{
+		auiEndArray[i] = _CalcPTEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE);
+	}
+	else
+	{
+		/*
+			If the PTE mask is zero it means there is only 1 PTE and thus, as an
+			an exclusive bound, the end array index is equal to the start index + 1.
+		*/
+
+		auiEndArray[i] = auiStartArray[i] + 1;
+	}
+
+	auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPT;
+
+	if (i == 0)
+	{
+		apsConfig[i] = psDevAttrs->psBaseConfig;
+	}
+	else
+	{
+		apsConfig[i] = psMMUPTEConfig;
+	}
+	aeMMULevel[i] = MMU_LEVEL_1;
+}
+
+static void _MMU_PutLevelData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hPriv)
+{
+	MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+
+	psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+}
+
+/*************************************************************************/ /*!
+@Function       _AllocPageTables
+
+@Description    Allocate page tables and any higher level MMU objects required
+                for the specified virtual range
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddrStart          Start device virtual address
+
+@Input          sDevVAddrEnd            End device virtual address
+
+@Input          uiLog2DataPageSize      Page size of the data pages
+
+@Return         PVRSRV_OK if the allocation was successful
+*/
+/*****************************************************************************/
+static PVRSRV_ERROR
+_AllocPageTables(MMU_CONTEXT *psMMUContext,
+                 IMG_DEV_VIRTADDR sDevVAddrStart,
+                 IMG_DEV_VIRTADDR sDevVAddrEnd,
+                 IMG_UINT32 uiLog2DataPageSize)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 auiStartArray[MMU_MAX_LEVEL];
+	IMG_UINT32 auiEndArray[MMU_MAX_LEVEL];
+	IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL];
+	MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL];
+	const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL];
+	const MMU_DEVVADDR_CONFIG	*psDevVAddrConfig;
+	IMG_HANDLE hPriv;
+	IMG_UINT32 ui32CurrentLevel = 0;
+
+
+	PVR_DPF((PVR_DBG_ALLOC,
+			 "_AllocPageTables: vaddr range: "IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC,
+			 sDevVAddrStart.uiAddr,
+			 sDevVAddrEnd.uiAddr
+			 ));
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Allocating page tables for %llu bytes virtual range: "
+				IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC,
+				(IMG_UINT64)sDevVAddrEnd.uiAddr - (IMG_UINT64)sDevVAddrStart.uiAddr,
+                 (IMG_UINT64)sDevVAddrStart.uiAddr,
+                 (IMG_UINT64)sDevVAddrEnd.uiAddr);
+#endif
+
+	_MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd,
+						(IMG_UINT32) uiLog2DataPageSize, auiStartArray, auiEndArray,
+						auiEntriesPerPx, apsConfig, aeMMULevel,
+						&psDevVAddrConfig, &hPriv);
+
+	HTBLOGK(HTB_SF_MMU_PAGE_OP_ALLOC,
+		HTBLOG_U64_BITS_HIGH(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrStart.uiAddr),
+		HTBLOG_U64_BITS_HIGH(sDevVAddrEnd.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrEnd.uiAddr));
+
+	eError = _MMU_AllocLevel(psMMUContext, &psMMUContext->sBaseLevelInfo,
+								auiStartArray, auiEndArray, auiEntriesPerPx,
+								apsConfig, aeMMULevel, &ui32CurrentLevel,
+								auiStartArray[0], auiEndArray[0],
+								IMG_TRUE, IMG_TRUE, uiLog2DataPageSize);
+
+	_MMU_PutLevelData(psMMUContext, hPriv);
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       _FreePageTables
+
+@Description    Free page tables and any higher level MMU objects at are no
+                longer referenced for the specified virtual range.
+                This will fill the temporary free list of the MMU context which
+                needs cleanup after the call.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddrStart          Start device virtual address
+
+@Input          sDevVAddrEnd            End device virtual address
+
+@Input          uiLog2DataPageSize      Page size of the data pages
+
+@Return         None
+*/
+/*****************************************************************************/
+static void _FreePageTables(MMU_CONTEXT *psMMUContext,
+							IMG_DEV_VIRTADDR sDevVAddrStart,
+							IMG_DEV_VIRTADDR sDevVAddrEnd,
+							IMG_UINT32 uiLog2DataPageSize)
+{
+	IMG_UINT32 auiStartArray[MMU_MAX_LEVEL];
+	IMG_UINT32 auiEndArray[MMU_MAX_LEVEL];
+	IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL];
+	MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL];
+	const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL];
+	const MMU_DEVVADDR_CONFIG	*psDevVAddrConfig;
+	IMG_UINT32 ui32CurrentLevel = 0;
+	IMG_HANDLE hPriv;
+
+
+	PVR_DPF((PVR_DBG_ALLOC,
+			 "_FreePageTables: vaddr range: "IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC,
+			 sDevVAddrStart.uiAddr,
+			 sDevVAddrEnd.uiAddr
+			 ));
+
+	_MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd,
+						uiLog2DataPageSize, auiStartArray, auiEndArray,
+						auiEntriesPerPx, apsConfig, aeMMULevel,
+						&psDevVAddrConfig, &hPriv);
+
+	HTBLOGK(HTB_SF_MMU_PAGE_OP_FREE,
+		HTBLOG_U64_BITS_HIGH(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrStart.uiAddr),
+		HTBLOG_U64_BITS_HIGH(sDevVAddrEnd.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrEnd.uiAddr));
+
+	_MMU_FreeLevel(psMMUContext, &psMMUContext->sBaseLevelInfo,
+					auiStartArray, auiEndArray, auiEntriesPerPx,
+					apsConfig, aeMMULevel, &ui32CurrentLevel,
+					auiStartArray[0], auiEndArray[0],
+					IMG_TRUE, IMG_TRUE, uiLog2DataPageSize);
+
+	_MMU_PutLevelData(psMMUContext, hPriv);
+}
+
+
+/*************************************************************************/ /*!
+@Function       _MMU_GetPTInfo
+
+@Description    Get the PT level information and PT entry index for the specified
+                virtual address
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          psDevVAddr              Device virtual address to get the PTE info
+                                        from.
+
+@Input          psDevVAddrConfig        The current virtual address config obtained
+                                        by another function call before.
+
+@Output         psLevel                 Level info of the PT
+
+@Output         pui32PTEIndex           Index into the PT the address corresponds to
+
+@Return         None
+*/
+/*****************************************************************************/
+static INLINE void _MMU_GetPTInfo(MMU_CONTEXT                *psMMUContext,
+								  IMG_DEV_VIRTADDR            sDevVAddr,
+								  const MMU_DEVVADDR_CONFIG  *psDevVAddrConfig,
+								  MMU_Levelx_INFO           **psLevel,
+								  IMG_UINT32                 *pui32PTEIndex)
+{
+	MMU_Levelx_INFO *psLocalLevel = NULL;
+
+	IMG_UINT32 uiPCEIndex;
+	IMG_UINT32 uiPDEIndex;
+
+	switch(psMMUContext->psDevAttrs->eTopLevel)
+	{
+		case MMU_LEVEL_3:
+			/* find the page directory containing the PCE */
+			uiPCEIndex = _CalcPCEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+			psLocalLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiPCEIndex];
+
+		case MMU_LEVEL_2:
+			/* find the page table containing the PDE */
+			uiPDEIndex = _CalcPDEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+			if (psLocalLevel != NULL)
+			{
+				psLocalLevel = psLocalLevel->apsNextLevel[uiPDEIndex];
+			}
+			else
+			{
+				psLocalLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiPDEIndex];
+			}
+
+		case MMU_LEVEL_1:
+			/* find PTE index into page table */
+			*pui32PTEIndex = _CalcPTEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+			if (psLocalLevel == NULL)
+			{
+				psLocalLevel = &psMMUContext->sBaseLevelInfo;
+			}
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTEInfo: Invalid MMU level"));
+			return;
+	}
+
+	*psLevel = psLocalLevel;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_GetPTConfig
+
+@Description    Get the level config. Call _MMU_PutPTConfig after use!
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          uiLog2DataPageSize      Log 2 of the page size
+
+@Output         ppsConfig               Config of the PTE
+
+@Output         phPriv                  Private data handle to be passed back
+                                        when the info is put
+
+@Output         ppsDevVAddrConfig       Config of the device virtual addresses
+
+@Return         None
+*/
+/*****************************************************************************/
+static INLINE void _MMU_GetPTConfig(MMU_CONTEXT               *psMMUContext,
+									IMG_UINT32                  uiLog2DataPageSize,
+									const MMU_PxE_CONFIG      **ppsConfig,
+									IMG_HANDLE                 *phPriv,
+									const MMU_DEVVADDR_CONFIG **ppsDevVAddrConfig)
+{
+	MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	const MMU_PxE_CONFIG *psPDEConfig;
+	const MMU_PxE_CONFIG *psPTEConfig;
+
+	if (psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize,
+	                                            &psPDEConfig,
+	                                            &psPTEConfig,
+	                                            &psDevVAddrConfig,
+	                                            phPriv) != PVRSRV_OK)
+	{
+		/*
+		   There should be no way we got here unless uiLog2DataPageSize
+		   has changed after the MMU_Alloc call (in which case it's a bug in
+		   the MM code)
+		*/
+		PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTConfig: Could not get valid page size config"));
+		PVR_ASSERT(0);
+	}
+
+	*ppsConfig = psPTEConfig;
+	*ppsDevVAddrConfig = psDevVAddrConfig;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_PutPTConfig
+
+@Description    Put the level info. Has to be called after _MMU_GetPTConfig to
+                ensure correct refcounting.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          phPriv                  Private data handle created by
+                                        _MMU_GetPTConfig.
+
+@Return         None
+*/
+/*****************************************************************************/
+static INLINE void _MMU_PutPTConfig(MMU_CONTEXT *psMMUContext,
+                                 IMG_HANDLE hPriv)
+{
+	MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+
+	if( psDevAttrs->pfnPutPageSizeConfiguration(hPriv) != PVRSRV_OK )
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTConfig: Could not put page size config"));
+		PVR_ASSERT(0);
+	}
+
+}
+
+
+/*****************************************************************************
+ *                     Public interface functions                            *
+ *****************************************************************************/
+
+/*
+	MMU_ContextCreate
+*/
+PVRSRV_ERROR
+MMU_ContextCreate(PVRSRV_DEVICE_NODE *psDevNode,
+                  MMU_CONTEXT **ppsMMUContext,
+                  MMU_DEVICEATTRIBS *psDevAttrs)
+{
+	MMU_CONTEXT *psMMUContext;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	const MMU_PxE_CONFIG *psConfig;
+	MMU_PHYSMEM_CONTEXT *psCtx;
+	IMG_UINT32 ui32BaseObjects;
+	IMG_UINT32 ui32Size;
+	IMG_CHAR sBuf[40];
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	psConfig = psDevAttrs->psBaseConfig;
+	psDevVAddrConfig = psDevAttrs->psTopLevelDevVAddrConfig;
+
+	switch(psDevAttrs->eTopLevel)
+	{
+		case MMU_LEVEL_3:
+			ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPC;
+			break;
+
+		case MMU_LEVEL_2:
+			ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPD;
+			break;
+
+		case MMU_LEVEL_1:
+			ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPT;
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: Invalid MMU config"));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto e0;
+	}
+
+	/* Allocate the MMU context with the Level 1 Px info's */
+	ui32Size = sizeof(MMU_CONTEXT) +
+						((ui32BaseObjects - 1) * sizeof(MMU_Levelx_INFO *));
+
+	psMMUContext = OSAllocZMem(ui32Size);
+	if (psMMUContext == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: ERROR call to OSAllocMem failed"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+#if defined(PDUMP)
+	/* Clear the refcount */
+	psMMUContext->ui32PDumpContextIDRefCount = 0;
+#endif
+	/* Record Device specific attributes in the context for subsequent use */
+	psMMUContext->psDevAttrs = psDevAttrs;
+	psMMUContext->psDevNode = psDevNode;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+	IMG_UINT32 ui32OSid, ui32OSidReg;
+    IMG_BOOL bOSidAxiProt;
+
+    RetrieveOSidsfromPidList(OSGetCurrentClientProcessIDKM(), &ui32OSid, &ui32OSidReg, &bOSidAxiProt);
+
+    MMU_SetOSids(psMMUContext, ui32OSid, ui32OSidReg, bOSidAxiProt);
+}
+#endif
+
+	/*
+	  Allocate physmem context and set it up
+	 */
+	psCtx = OSAllocZMem(sizeof(MMU_PHYSMEM_CONTEXT));
+	if (psCtx == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: ERROR call to OSAllocMem failed"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e1;
+	}
+	psMMUContext->psPhysMemCtx = psCtx;
+
+	psCtx->psDevNode = psDevNode;
+
+	OSSNPrintf(sBuf, sizeof(sBuf)-1, "pgtables %p", psCtx);
+	psCtx->uiPhysMemRANameAllocSize = OSStringLength(sBuf)+1;
+	psCtx->pszPhysMemRAName = OSAllocMem(psCtx->uiPhysMemRANameAllocSize);
+	if (psCtx->pszPhysMemRAName == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: Out of memory"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e2;
+	}
+
+	OSStringCopy(psCtx->pszPhysMemRAName, sBuf);
+
+	psCtx->psPhysMemRA = RA_Create(psCtx->pszPhysMemRAName,
+									/* subsequent import */
+									psDevNode->uiMMUPxLog2AllocGran,
+									RA_LOCKCLASS_1,
+									_MMU_PhysMem_RAImportAlloc,
+									_MMU_PhysMem_RAImportFree,
+									psCtx, /* priv */
+									IMG_FALSE);
+	if (psCtx->psPhysMemRA == NULL)
+	{
+		OSFreeMem(psCtx->pszPhysMemRAName);
+		psCtx->pszPhysMemRAName = NULL;
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e3;
+	}
+
+	/* Setup cleanup meta data to check if a MMU context
+	 * has been destroyed and should not be accessed anymore */
+	psCtx->psCleanupData = OSAllocMem(sizeof(*(psCtx->psCleanupData)));
+	if (psCtx->psCleanupData == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: ERROR call to OSAllocMem failed"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e4;
+	}
+
+	OSLockCreate(&psCtx->psCleanupData->hCleanupLock, LOCK_TYPE_PASSIVE);
+	psCtx->psCleanupData->bMMUContextExists = IMG_TRUE;
+	dllist_init(&psCtx->psCleanupData->sMMUCtxCleanupItemsHead);
+	OSAtomicWrite(&psCtx->psCleanupData->iRef, 1);
+
+	/* allocate the base level object */
+	/*
+	   Note: Although this is not required by the this file until
+	         the 1st allocation is made, a device specific callback
+	         might request the base object address so we allocate
+	         it up front.
+	*/
+	if (_PxMemAlloc(psMMUContext,
+							ui32BaseObjects,
+							psConfig,
+							psDevAttrs->eTopLevel,
+							&psMMUContext->sBaseLevelInfo.sMemDesc,
+							psDevAttrs->ui32BaseAlign))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: Failed to alloc level 1 object"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e5;
+	}
+
+	dllist_init(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead);
+
+	psMMUContext->sBaseLevelInfo.ui32NumOfEntries = ui32BaseObjects;
+	psMMUContext->sBaseLevelInfo.ui32RefCount = 0;
+
+	eError = OSLockCreate(&psMMUContext->hLock, LOCK_TYPE_PASSIVE);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: Failed to create lock for MMU_CONTEXT"));
+		goto e6;
+	}
+
+	/* return context */
+	*ppsMMUContext = psMMUContext;
+
+	return PVRSRV_OK;
+
+e6:
+	_PxMemFree(psMMUContext, &psMMUContext->sBaseLevelInfo.sMemDesc, psDevAttrs->eTopLevel);
+e5:
+	OSFreeMem(psCtx->psCleanupData);
+e4:
+	RA_Delete(psCtx->psPhysMemRA);
+e3:
+	OSFreeMem(psCtx->pszPhysMemRAName);
+e2:
+	OSFreeMem(psCtx);
+e1:
+	OSFreeMem(psMMUContext);
+e0:
+	return eError;
+}
+
+/*
+	MMU_ContextDestroy
+*/
+void
+MMU_ContextDestroy (MMU_CONTEXT *psMMUContext)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PDLLIST_NODE psNode, psNextNode;
+
+	PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *) psMMUContext->psDevNode;
+	MMU_CTX_CLEANUP_DATA *psCleanupData = psMMUContext->psPhysMemCtx->psCleanupData;
+
+	PVR_DPF ((PVR_DBG_MESSAGE, "MMU_ContextDestroy: Enter"));
+
+	if (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK)
+	{
+		/* There should be no way to get here with live pages unless
+		   there is a bug in this module or the MM code */
+		PVR_ASSERT(psMMUContext->sBaseLevelInfo.ui32RefCount == 0);
+	}
+
+	/* Cleanup lock must be acquired before MMUContext lock. Reverse order
+	 * may lead to a deadlock and is reported by lockdep. */
+	OSLockAcquire(psCleanupData->hCleanupLock);
+	OSLockAcquire(psMMUContext->hLock);
+
+	/* Free the top level MMU object - will be put on defer free list.
+	 * This has to be done before the step below that will empty the
+	 * defer-free list. */
+	_PxMemFree(psMMUContext,
+	           &psMMUContext->sBaseLevelInfo.sMemDesc,
+	           psMMUContext->psDevAttrs->eTopLevel);
+
+	/* Empty the temporary defer-free list of Px */
+	_FreeMMUMapping(psDevNode, &psMMUContext->psPhysMemCtx->sTmpMMUMappingHead);
+	PVR_ASSERT(dllist_is_empty(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead));
+
+	/* Empty the defer free list so the cleanup thread will
+	 * not have to access any MMU context related structures anymore */
+	dllist_foreach_node(&psCleanupData->sMMUCtxCleanupItemsHead,
+	                    psNode,
+	                    psNextNode)
+	{
+		MMU_CLEANUP_ITEM *psCleanup = IMG_CONTAINER_OF(psNode,
+		                                               MMU_CLEANUP_ITEM,
+		                                               sMMUCtxCleanupItem);
+
+		_FreeMMUMapping(psDevNode, &psCleanup->sMMUMappingHead);
+
+		dllist_remove_node(psNode);
+	}
+	PVR_ASSERT(dllist_is_empty(&psCleanupData->sMMUCtxCleanupItemsHead));
+
+	psCleanupData->bMMUContextExists = IMG_FALSE;
+
+	OSLockRelease(psCleanupData->hCleanupLock);
+
+	if (OSAtomicDecrement(&psCleanupData->iRef) == 0)
+	{
+		OSLockDestroy(psCleanupData->hCleanupLock);
+		OSFreeMem(psCleanupData);
+	}
+
+	/* Free physmem context */
+	RA_Delete(psMMUContext->psPhysMemCtx->psPhysMemRA);
+	psMMUContext->psPhysMemCtx->psPhysMemRA = NULL;
+	OSFreeMem(psMMUContext->psPhysMemCtx->pszPhysMemRAName);
+	psMMUContext->psPhysMemCtx->pszPhysMemRAName = NULL;
+
+	OSFreeMem(psMMUContext->psPhysMemCtx);
+
+	OSLockRelease(psMMUContext->hLock);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	RemovePidOSidCoupling(OSGetCurrentClientProcessIDKM());
+#endif
+
+	OSLockDestroy(psMMUContext->hLock);
+
+	/* free the context itself. */
+	OSFreeMem(psMMUContext);
+	/*not nulling pointer, copy on stack*/
+
+	PVR_DPF ((PVR_DBG_MESSAGE, "MMU_ContextDestroy: Exit"));
+}
+
+/*
+	MMU_Alloc
+*/
+PVRSRV_ERROR
+MMU_Alloc (MMU_CONTEXT *psMMUContext,
+		   IMG_DEVMEM_SIZE_T uSize,
+		   IMG_DEVMEM_SIZE_T *puActualSize,
+           IMG_UINT32 uiProtFlags,
+		   IMG_DEVMEM_SIZE_T uDevVAddrAlignment,
+		   IMG_DEV_VIRTADDR *psDevVAddr,
+		   IMG_UINT32 uiLog2PageSize)
+{
+    PVRSRV_ERROR eError;
+    IMG_DEV_VIRTADDR sDevVAddrEnd;
+
+
+	const MMU_PxE_CONFIG *psPDEConfig;
+	const MMU_PxE_CONFIG *psPTEConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+
+	MMU_DEVICEATTRIBS *psDevAttrs;
+	IMG_HANDLE hPriv;
+
+#if !defined (DEBUG)
+	PVR_UNREFERENCED_PARAMETER(uDevVAddrAlignment);
+#endif
+	PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Alloc: uSize=" IMG_DEVMEM_SIZE_FMTSPEC
+		", uiProtFlags=0x%x, align="IMG_DEVMEM_ALIGN_FMTSPEC, uSize, uiProtFlags, uDevVAddrAlignment));
+
+	/* check params */
+	if (!psMMUContext || !psDevVAddr || !puActualSize)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: invalid params"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevAttrs = psMMUContext->psDevAttrs;
+
+	eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2PageSize,
+													&psPDEConfig,
+													&psPTEConfig,
+													&psDevVAddrConfig,
+													&hPriv);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: Failed to get config info (%d)", eError));
+		return eError;
+	}
+
+	/* size and alignment must be datapage granular */
+	if(((psDevVAddr->uiAddr & psDevVAddrConfig->uiPageOffsetMask) != 0)
+	|| ((uSize & psDevVAddrConfig->uiPageOffsetMask) != 0))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: invalid address or size granularity"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	sDevVAddrEnd = *psDevVAddr;
+	sDevVAddrEnd.uiAddr += uSize;
+
+	OSLockAcquire(psMMUContext->hLock);
+	eError = _AllocPageTables(psMMUContext, *psDevVAddr, sDevVAddrEnd, uiLog2PageSize);
+	OSLockRelease(psMMUContext->hLock);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: _DeferredAllocPagetables failed"));
+        return PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES;
+	}
+
+	psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+
+	return PVRSRV_OK;
+}
+
+/*
+	MMU_Free
+*/
+void
+MMU_Free (MMU_CONTEXT *psMMUContext,
+          IMG_DEV_VIRTADDR sDevVAddr,
+          IMG_DEVMEM_SIZE_T uiSize,
+          IMG_UINT32 uiLog2DataPageSize)
+{
+	IMG_DEV_VIRTADDR sDevVAddrEnd;
+
+	if (psMMUContext == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "MMU_Free: invalid parameter"));
+		return;
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE, "MMU_Free: Freeing DevVAddr " IMG_DEV_VIRTADDR_FMTSPEC,
+			 sDevVAddr.uiAddr));
+
+	/* ensure the address range to free is inside the heap */
+	sDevVAddrEnd = sDevVAddr;
+	sDevVAddrEnd.uiAddr += uiSize;
+
+	/* The Cleanup lock has to be taken before the MMUContext hLock to
+	 * prevent deadlock scenarios. It is necessary only for parts of
+	 * _SetupCleanup_FreeMMUMapping though.*/
+	OSLockAcquire(psMMUContext->psPhysMemCtx->psCleanupData->hCleanupLock);
+
+	OSLockAcquire(psMMUContext->hLock);
+
+	_FreePageTables(psMMUContext,
+	                sDevVAddr,
+	                sDevVAddrEnd,
+	                uiLog2DataPageSize);
+
+	_SetupCleanup_FreeMMUMapping(psMMUContext->psDevNode,
+	                             psMMUContext->psPhysMemCtx);
+
+	OSLockRelease(psMMUContext->hLock);
+
+	OSLockRelease(psMMUContext->psPhysMemCtx->psCleanupData->hCleanupLock);
+
+	return;
+
+}
+
+PVRSRV_ERROR
+MMU_MapPages(MMU_CONTEXT *psMMUContext,
+             PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+             IMG_DEV_VIRTADDR sDevVAddrBase,
+             PMR *psPMR,
+             IMG_UINT32 ui32PhysPgOffset,
+             IMG_UINT32 ui32MapPageCount,
+             IMG_UINT32 *paui32MapIndices,
+             IMG_UINT32 uiLog2HeapPageSize)
+{
+	PVRSRV_ERROR eError;
+	IMG_HANDLE hPriv;
+
+	MMU_Levelx_INFO *psLevel = NULL;
+
+	MMU_Levelx_INFO *psPrevLevel = NULL;
+
+	IMG_UINT32 uiPTEIndex = 0;
+	IMG_UINT32 uiPageSize = (1 << uiLog2HeapPageSize);
+	IMG_UINT32 uiLoop = 0;
+	IMG_UINT32 ui32MappedCount = 0;
+	IMG_UINT32 uiPgOffset = 0;
+	IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0;
+
+	IMG_UINT64 uiProtFlags = 0;
+	MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+
+	const MMU_PxE_CONFIG *psConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+
+	IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+
+	IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_DEV_PHYADDR *psDevPAddr;
+	IMG_DEV_PHYADDR sDevPAddr;
+	IMG_BOOL *pbValid;
+	IMG_BOOL bValid;
+	IMG_BOOL bDummyBacking = IMG_FALSE;
+	IMG_BOOL bNeedBacking = IMG_FALSE;
+
+#if defined(PDUMP)
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset;
+
+	PDUMPCOMMENT("Wire up Page Table entries to point to the Data Pages (%lld bytes)",
+	              (IMG_UINT64)(ui32MapPageCount * uiPageSize));
+#endif /*PDUMP*/
+
+#if defined(TC_MEMORY_CONFIG) || defined(PLATO_MEMORY_CONFIG)
+	/* We're aware that on TC based platforms, accesses from GPU to CPU_LOCAL
+	 * allocated DevMem fail, so we forbid mapping such a PMR into device mmu */
+	if (PMR_Flags(psPMR) & PVRSRV_MEMALLOCFLAG_CPU_LOCAL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "%s: Mapping a CPU_LOCAL PMR to device is forbidden on this platform", __func__));
+		return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+	}
+#endif
+
+	/* Validate the most essential parameters */
+	if((NULL == psMMUContext) || (0 == sDevVAddrBase.uiAddr) || (NULL == psPMR))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Invalid mapping parameter issued", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	/* Allocate memory for page-frame-numbers and validity states,
+	   N.B. assert could be triggered by an illegal uiSizeBytes */
+	if (ui32MapPageCount > PMR_MAX_TRANSLATION_STACK_ALLOC)
+	{
+		psDevPAddr = OSAllocMem(ui32MapPageCount * sizeof(IMG_DEV_PHYADDR));
+		if (psDevPAddr == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR device PFN list"));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+
+		pbValid = OSAllocMem(ui32MapPageCount * sizeof(IMG_BOOL));
+		if (pbValid == NULL)
+		{
+			/* Should allocation fail, clean-up here before exit */
+			PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR device PFN state"));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			OSFreeMem(psDevPAddr);
+			goto e0;
+		}
+	}
+	else
+	{
+		psDevPAddr = asDevPAddr;
+		pbValid	= abValid;
+	}
+
+	/* Get the Device physical addresses of the pages we are trying to map
+	 * In the case of non indexed mapping we can get all addresses at once */
+	if(NULL == paui32MapIndices)
+	{
+		eError = PMR_DevPhysAddr(psPMR,
+		                         uiLog2HeapPageSize,
+		                         ui32MapPageCount,
+		                         (ui32PhysPgOffset << uiLog2HeapPageSize),
+		                         psDevPAddr,
+		                         pbValid);
+		if (eError != PVRSRV_OK)
+		{
+			goto e1;
+		}
+	}
+
+	/*Get the Page table level configuration */
+	_MMU_GetPTConfig(psMMUContext,
+	                 (IMG_UINT32) uiLog2HeapPageSize,
+	                 &psConfig,
+	                 &hPriv,
+	                 &psDevVAddrConfig);
+
+	eError = _MMU_ConvertDevMemFlags(IMG_FALSE,
+	                                 uiMappingFlags,
+	                                 &uiMMUProtFlags,
+	                                 psMMUContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto e2;
+	}
+
+	/* Callback to get device specific protection flags */
+	if (psConfig->uiBytesPerEntry == 8)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize);
+	}
+	else if (psConfig->uiBytesPerEntry == 4)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: The page table entry byte length is not supported", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e2;
+	}
+
+	if (PMR_IsSparse(psPMR))
+	{
+		/* We know there will not be 4G number of PMR's */
+		bDummyBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiMappingFlags);
+	}
+
+	OSLockAcquire(psMMUContext->hLock);
+
+	for(uiLoop = 0; uiLoop < ui32MapPageCount; uiLoop++)
+	{
+
+#if defined(PDUMP)
+		IMG_DEVMEM_OFFSET_T uiNextSymName;
+#endif /*PDUMP*/
+
+		if(NULL != paui32MapIndices)
+		{
+			uiPgOffset = paui32MapIndices[uiLoop];
+
+			/*Calculate the Device Virtual Address of the page */
+			sDevVAddr.uiAddr = sDevVAddrBase.uiAddr + (uiPgOffset * uiPageSize);
+			/* Get the physical address to map */
+			eError = PMR_DevPhysAddr(psPMR,
+			                         uiLog2HeapPageSize,
+			                         1,
+			                         uiPgOffset * uiPageSize,
+			                         &sDevPAddr,
+			                         &bValid);
+			if (eError != PVRSRV_OK)
+			{
+				goto e3;
+			}
+		}
+		else
+		{
+			uiPgOffset = uiLoop + ui32PhysPgOffset;
+			sDevPAddr = psDevPAddr[uiLoop];
+			bValid = pbValid[uiLoop];
+		}
+
+		/*
+			The default value of the entry is invalid so we don't need to mark
+			it as such if the page wasn't valid, we just advance pass that address
+		*/
+		if (bValid || bDummyBacking)
+		{
+
+			if(!bValid)
+			{
+				sDevPAddr.uiAddr = psMMUContext->psDevNode->sDummyPage.ui64DummyPgPhysAddr;
+			}
+			else
+			{
+				/* check the physical alignment of the memory to map */
+				PVR_ASSERT((sDevPAddr.uiAddr & (uiPageSize-1)) == 0);
+			}
+
+#if defined(DEBUG)
+{
+			IMG_INT32	i32FeatureVal = 0;
+			IMG_UINT32 ui32BitLength = FloorLog2(sDevPAddr.uiAddr);
+
+			i32FeatureVal = psMMUContext->psDevNode->pfnGetDeviceFeatureValue(psMMUContext->psDevNode, \
+														RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK);
+			do {
+				/* i32FeatureVal can be negative for cases where this feature is undefined
+				 * In that situation we need to bail out than go ahead with debug comparison */
+				if(0 > i32FeatureVal)
+					break;
+
+				if (ui32BitLength > i32FeatureVal )
+				{
+					PVR_DPF((PVR_DBG_ERROR,"_MMU_MapPage Failed. The physical address bitlength (%d) "
+							 "is greater than what the chip can handle (%d).",
+							 ui32BitLength, i32FeatureVal));
+
+					PVR_ASSERT(ui32BitLength <= i32FeatureVal );
+					eError = PVRSRV_ERROR_INVALID_PARAMS;
+					goto e3;
+				}
+			}while(0);
+}
+#endif /*DEBUG*/
+
+#if defined(PDUMP)
+			if(bValid)
+			{
+				eError = PMR_PDumpSymbolicAddr(psPMR, uiPgOffset * uiPageSize,
+											   sizeof(aszMemspaceName), &aszMemspaceName[0],
+											   sizeof(aszSymbolicAddress), &aszSymbolicAddress[0],
+											   &uiSymbolicAddrOffset,
+											   &uiNextSymName);
+				PVR_ASSERT(eError == PVRSRV_OK);
+			}
+#endif /*PDUMP*/
+
+			psPrevLevel = psLevel;
+			/* Calculate PT index and get new table descriptor */
+			_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+						   &psLevel, &uiPTEIndex);
+
+			if (psPrevLevel == psLevel)
+			{
+				uiFlushEnd = uiPTEIndex;
+			}
+			else
+			{
+				/* Flush if we moved to another psLevel, i.e. page table */
+				if (psPrevLevel != NULL)
+				{
+					eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+					                                                &psPrevLevel->sMemDesc.psMapping->sMemHandle,
+					                                                uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset,
+					                                                (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+					if (eError != PVRSRV_OK)
+						goto e3;
+				}
+
+				uiFlushStart = uiPTEIndex;
+				uiFlushEnd = uiFlushStart;
+			}
+
+			HTBLOGK(HTB_SF_MMU_PAGE_OP_MAP,
+				HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr),
+				HTBLOG_U64_BITS_HIGH(sDevPAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevPAddr.uiAddr));
+
+			eError = _SetupPTE(psMMUContext,
+			                   psLevel,
+			                   uiPTEIndex,
+			                   psConfig,
+			                   &sDevPAddr,
+			                   IMG_FALSE,
+#if defined(PDUMP)
+			                   (bValid)?aszMemspaceName:(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName),
+			                   (bValid)?aszSymbolicAddress:DUMMY_PAGE,
+			                   (bValid)?uiSymbolicAddrOffset:0,
+#endif /*PDUMP*/
+			                   uiProtFlags);
+
+
+			if(eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Mapping failed", __func__));
+				goto e3;
+			}
+
+			if(bValid)
+			{
+				PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+				PVR_DPF ((PVR_DBG_MESSAGE,
+						  "%s: devVAddr=" IMG_DEV_VIRTADDR_FMTSPEC ", size=0x%x",
+						  __func__,
+						  sDevVAddr.uiAddr,
+						  uiPgOffset * uiPageSize));
+
+				ui32MappedCount++;
+			}
+		}
+
+		sDevVAddr.uiAddr += uiPageSize;
+	}
+
+	/* Flush the last level we touched */
+	if (psLevel != NULL)
+	{
+		eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+		                                                &psLevel->sMemDesc.psMapping->sMemHandle,
+		                                                uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+		                                                (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+		if (eError != PVRSRV_OK)
+			goto e3;
+	}
+
+	OSLockRelease(psMMUContext->hLock);
+
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+
+	if (psDevPAddr != asDevPAddr)
+	{
+		OSFreeMem(pbValid);
+		OSFreeMem(psDevPAddr);
+	}
+
+	/* Flush TLB for PTs*/
+	psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+	                                               psMMUContext->hDevData,
+	                                               MMU_LEVEL_1,
+	                                               IMG_FALSE);
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Wired up %d Page Table entries (out of %d)", ui32MappedCount, ui32MapPageCount);
+#endif /*PDUMP*/
+
+	return PVRSRV_OK;
+
+e3:
+	OSLockRelease(psMMUContext->hLock);
+
+	if(PMR_IsSparse(psPMR) && PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiMappingFlags))
+	{
+		bNeedBacking = IMG_TRUE;
+	}
+
+	MMU_UnmapPages(psMMUContext,(bNeedBacking)?uiMappingFlags:0, sDevVAddrBase, uiLoop, paui32MapIndices, uiLog2HeapPageSize, bNeedBacking);
+e2:
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+e1:
+	if (psDevPAddr != asDevPAddr)
+	{
+		OSFreeMem(pbValid);
+		OSFreeMem(psDevPAddr);
+	}
+e0:
+	return eError;
+}
+
+/*
+	MMU_UnmapPages
+*/
+void
+MMU_UnmapPages (MMU_CONTEXT *psMMUContext,
+				PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+                IMG_DEV_VIRTADDR sDevVAddrBase,
+                IMG_UINT32 ui32PageCount,
+                IMG_UINT32 *pai32FreeIndices,
+                IMG_UINT32 uiLog2PageSize,
+                IMG_BOOL bDummyBacking)
+{
+	IMG_UINT32 uiPTEIndex = 0, ui32Loop=0;
+	IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+	IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0;
+	MMU_Levelx_INFO *psLevel = NULL;
+	MMU_Levelx_INFO *psPrevLevel = NULL;
+	IMG_HANDLE hPriv;
+	const MMU_PxE_CONFIG *psConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	IMG_UINT64 uiProtFlags = 0;
+	MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+	IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+	IMG_DEV_PHYADDR sDummyPgDevPhysAddr;
+	IMG_BOOL bUnmap = IMG_TRUE;
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Invalidate %d entries in page tables for virtual range: 0x%010llX to 0x%010llX",
+	             ui32PageCount,
+	             (IMG_UINT64)sDevVAddr.uiAddr,
+	             ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1);
+#endif
+
+	sDummyPgDevPhysAddr.uiAddr = psMMUContext->psDevNode->sDummyPage.ui64DummyPgPhysAddr;
+	bUnmap = (bDummyBacking)?IMG_FALSE:IMG_TRUE;
+	/* Get PT and address configs */
+	_MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize,
+	                 &psConfig, &hPriv, &psDevVAddrConfig);
+
+	if (_MMU_ConvertDevMemFlags(bUnmap,
+	                               uiMappingFlags,
+	                               &uiMMUProtFlags,
+	                               psMMUContext) != PVRSRV_OK)
+	{
+		return;
+	}
+
+	/* Callback to get device specific protection flags */
+	if (psConfig->uiBytesPerEntry == 4)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+	}
+	else if (psConfig->uiBytesPerEntry == 8)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize);
+	}
+
+
+	OSLockAcquire(psMMUContext->hLock);
+
+	/* Unmap page by page */
+	while (ui32Loop < ui32PageCount)
+	{
+		if(NULL != pai32FreeIndices)
+		{
+			/*Calculate the Device Virtual Address of the page */
+			sDevVAddr.uiAddr = sDevVAddrBase.uiAddr +
+										pai32FreeIndices[ui32Loop] * uiPageSize;
+		}
+
+		psPrevLevel = psLevel;
+		/* Calculate PT index and get new table descriptor */
+		_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+					   &psLevel, &uiPTEIndex);
+
+		if (psPrevLevel == psLevel)
+		{
+			uiFlushEnd = uiPTEIndex;
+		}
+		else
+		{
+			/* Flush if we moved to another psLevel, i.e. page table */
+			if (psPrevLevel != NULL)
+			{
+				psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+				                                       &psPrevLevel->sMemDesc.psMapping->sMemHandle,
+				                                       uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset,
+				                                       (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+			}
+
+			uiFlushStart = uiPTEIndex;
+			uiFlushEnd = uiFlushStart;
+		}
+
+		HTBLOGK(HTB_SF_MMU_PAGE_OP_UNMAP,
+			HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr));
+
+		if (_SetupPTE(psMMUContext,
+		              psLevel,
+		              uiPTEIndex,
+		              psConfig,
+		              (bDummyBacking)?&sDummyPgDevPhysAddr:&gsBadDevPhyAddr,
+		              bUnmap,
+#if defined(PDUMP)
+		              (bDummyBacking)?(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName):NULL,
+		              (bDummyBacking)?DUMMY_PAGE:NULL,
+		              0U,
+#endif
+		              uiProtFlags) != PVRSRV_OK )
+		{
+			goto e0;
+		}
+
+		/* Check we haven't wrapped around */
+		PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+		ui32Loop++;
+		sDevVAddr.uiAddr += uiPageSize;
+	}
+
+	/* Flush the last level we touched */
+	if (psLevel != NULL)
+	{
+		psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+		                                       &psLevel->sMemDesc.psMapping->sMemHandle,
+		                                       uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+		                                       (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+	}
+
+	OSLockRelease(psMMUContext->hLock);
+
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+
+	/* Flush TLB for PTs*/
+	psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+	                                               psMMUContext->hDevData,
+	                                               MMU_LEVEL_1,
+	                                               IMG_TRUE);
+
+	return;
+
+e0:
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+	PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Failed to map/unmap page table"));
+	PVR_ASSERT(0);
+	OSLockRelease(psMMUContext->hLock);
+	return;
+}
+
+PVRSRV_ERROR
+MMU_MapPMRFast (MMU_CONTEXT *psMMUContext,
+            IMG_DEV_VIRTADDR sDevVAddrBase,
+            const PMR *psPMR,
+            IMG_DEVMEM_SIZE_T uiSizeBytes,
+            PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+            IMG_UINT32 uiLog2HeapPageSize)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 uiCount, i;
+	IMG_UINT32 uiPageSize = 1 << uiLog2HeapPageSize;
+	IMG_UINT32 uiPTEIndex = 0;
+	IMG_UINT64 uiProtFlags;
+	MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+	MMU_Levelx_INFO *psLevel = NULL;
+	IMG_HANDLE hPriv;
+	const MMU_PxE_CONFIG *psConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+	IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_DEV_PHYADDR *psDevPAddr;
+	IMG_BOOL *pbValid;
+	IMG_UINT32 uiFlushStart = 0;
+
+#if defined(PDUMP)
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset;
+	IMG_UINT32 ui32MappedCount = 0;
+	PDUMPCOMMENT("Wire up Page Table entries to point to the Data Pages (%lld bytes)", uiSizeBytes);
+#endif /*PDUMP*/
+
+	/* We should verify the size and contiguity when supporting variable page size */
+
+	PVR_ASSERT (psMMUContext != NULL);
+	PVR_ASSERT (psPMR != NULL);
+
+#if defined(TC_MEMORY_CONFIG) || defined(PLATO_MEMORY_CONFIG)
+	/* We're aware that on TC based platforms, accesses from GPU to CPU_LOCAL
+	 * allocated DevMem fail, so we forbid mapping such a PMR into device mmu */
+	if (PMR_Flags(psPMR) & PVRSRV_MEMALLOCFLAG_CPU_LOCAL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "%s: Mapping a CPU_LOCAL PMR to device is forbidden on this platform", __func__));
+		return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+	}
+#endif
+
+	/* Allocate memory for page-frame-numbers and validity states,
+	   N.B. assert could be triggered by an illegal uiSizeBytes */
+	uiCount = uiSizeBytes >> uiLog2HeapPageSize;
+	PVR_ASSERT((IMG_DEVMEM_OFFSET_T)uiCount << uiLog2HeapPageSize == uiSizeBytes);
+    if (uiCount > PMR_MAX_TRANSLATION_STACK_ALLOC)
+    {
+		psDevPAddr = OSAllocMem(uiCount * sizeof(IMG_DEV_PHYADDR));
+		if (psDevPAddr == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR device PFN list"));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+
+		pbValid = OSAllocMem(uiCount * sizeof(IMG_BOOL));
+		if (pbValid == NULL)
+		{
+			/* Should allocation fail, clean-up here before exit */
+			PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR device PFN state"));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			OSFreeMem(psDevPAddr);
+			goto e0;
+		}
+    }
+	else
+	{
+		psDevPAddr = asDevPAddr;
+		pbValid	= abValid;
+	}
+
+	/* Get general PT and address configs */
+	_MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2HeapPageSize,
+	                 &psConfig, &hPriv, &psDevVAddrConfig);
+
+	eError = _MMU_ConvertDevMemFlags(IMG_FALSE,
+	                                 uiMappingFlags,
+	                                 &uiMMUProtFlags,
+	                                 psMMUContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	/* Callback to get device specific protection flags */
+
+	if (psConfig->uiBytesPerEntry == 8)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize);
+	}
+	else if (psConfig->uiBytesPerEntry == 4)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: The page table entry byte length is not supported", __func__));
+		eError = PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+		goto e1;
+	}
+
+
+	/* "uiSize" is the amount of contiguity in the underlying
+	   page.  Normally this would be constant for the system, but,
+	   that constant needs to be communicated, in case it's ever
+	   different; caller guarantees that PMRLockSysPhysAddr() has
+	   already been called */
+	eError = PMR_DevPhysAddr(psPMR,
+							 uiLog2HeapPageSize,
+							 uiCount,
+							 0,
+							 psDevPAddr,
+							 pbValid);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	OSLockAcquire(psMMUContext->hLock);
+
+	_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+				   &psLevel, &uiPTEIndex);
+	uiFlushStart = uiPTEIndex;
+
+	/* Map in all pages of that PMR page by page*/
+	for (i=0, uiCount=0; uiCount < uiSizeBytes; i++)
+	{
+#if defined(DEBUG)
+{
+	IMG_INT32	i32FeatureVal = 0;
+	IMG_UINT32 ui32BitLength = FloorLog2(psDevPAddr[i].uiAddr);
+	i32FeatureVal = psMMUContext->psDevNode->pfnGetDeviceFeatureValue(psMMUContext->psDevNode, \
+			RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK);
+	do {
+		if(0 > i32FeatureVal)
+			break;
+
+		if (ui32BitLength > i32FeatureVal )
+		{
+			PVR_DPF((PVR_DBG_ERROR,"_MMU_MapPage Failed. The physical address bitlength (%d) "
+					"is greater than what the chip can handle (%d).",
+					ui32BitLength, i32FeatureVal));
+
+			PVR_ASSERT(ui32BitLength <= i32FeatureVal );
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			OSLockRelease(psMMUContext->hLock);
+			goto e1;
+		}
+	}while(0);
+}
+#endif /*DEBUG*/
+#if defined(PDUMP)
+		{
+			IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+			eError = PMR_PDumpSymbolicAddr(psPMR, uiCount,
+										   sizeof(aszMemspaceName), &aszMemspaceName[0],
+										   sizeof(aszSymbolicAddress), &aszSymbolicAddress[0],
+										   &uiSymbolicAddrOffset,
+										   &uiNextSymName);
+			PVR_ASSERT(eError == PVRSRV_OK);
+			ui32MappedCount++;
+		}
+#endif /*PDUMP*/
+
+		HTBLOGK(HTB_SF_MMU_PAGE_OP_PMRMAP,
+			HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr),
+			HTBLOG_U64_BITS_HIGH(psDevPAddr[i].uiAddr), HTBLOG_U64_BITS_LOW(psDevPAddr[i].uiAddr));
+
+		/* Set the PT entry with the specified address and protection flags */
+		eError = _SetupPTE(psMMUContext, psLevel, uiPTEIndex,
+		                   psConfig, &psDevPAddr[i], IMG_FALSE,
+#if defined(PDUMP)
+		                   aszMemspaceName,
+		                   aszSymbolicAddress,
+		                   uiSymbolicAddrOffset,
+#endif /*PDUMP*/
+						   uiProtFlags);
+		if (eError != PVRSRV_OK)
+			goto e2;
+
+		sDevVAddr.uiAddr += uiPageSize;
+		uiCount += uiPageSize;
+
+		/* Calculate PT index and get new table descriptor */
+		if (uiPTEIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (uiCount != uiSizeBytes))
+		{
+			uiPTEIndex++;
+		}
+		else
+		{
+			eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+			                                                &psLevel->sMemDesc.psMapping->sMemHandle,
+			                                                uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+			                                                (uiPTEIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+			if (eError != PVRSRV_OK)
+				goto e2;
+
+
+			_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+						   &psLevel, &uiPTEIndex);
+			uiFlushStart = uiPTEIndex;
+		}
+	}
+
+	OSLockRelease(psMMUContext->hLock);
+
+
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+
+	if (psDevPAddr != asDevPAddr)
+	{
+		OSFreeMem(pbValid);
+		OSFreeMem(psDevPAddr);
+	}
+
+	/* Flush TLB for PTs*/
+	psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+	                                               psMMUContext->hDevData,
+	                                               MMU_LEVEL_1,
+	                                               IMG_FALSE);
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Wired up %d Page Table entries (out of %d)", ui32MappedCount, i);
+#endif /*PDUMP*/
+
+	return PVRSRV_OK;
+
+e2:
+	OSLockRelease(psMMUContext->hLock);
+	MMU_UnmapPMRFast(psMMUContext,
+	                 sDevVAddrBase,
+	                 uiSizeBytes >> uiLog2HeapPageSize,
+	                 uiLog2HeapPageSize);
+e1:
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+
+	if (psDevPAddr != asDevPAddr)
+	{
+		OSFreeMem(pbValid);
+		OSFreeMem(psDevPAddr);
+	}
+e0:
+	PVR_ASSERT(eError == PVRSRV_OK);
+    return eError;
+}
+
+/*
+    MMU_UnmapPages
+*/
+void
+MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext,
+                 IMG_DEV_VIRTADDR sDevVAddrBase,
+                 IMG_UINT32 ui32PageCount,
+                 IMG_UINT32 uiLog2PageSize)
+{
+	IMG_UINT32 uiPTEIndex = 0, ui32Loop=0;
+	IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+	MMU_Levelx_INFO *psLevel = NULL;
+	IMG_HANDLE hPriv;
+	const MMU_PxE_CONFIG *psConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+	IMG_UINT64 uiProtFlags = 0;
+	MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+	IMG_UINT64 uiEntry = 0;
+	IMG_UINT32 uiFlushStart = 0;
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Invalidate %d entries in page tables for virtual range: 0x%010llX to 0x%010llX",
+				 ui32PageCount,
+				 (IMG_UINT64)sDevVAddr.uiAddr,
+				 ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1);
+#endif
+
+	/* Get PT and address configs */
+	_MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize,
+					 &psConfig, &hPriv, &psDevVAddrConfig);
+
+	if (_MMU_ConvertDevMemFlags(IMG_TRUE,
+							0,
+							&uiMMUProtFlags,
+							psMMUContext) != PVRSRV_OK)
+	{
+		return;
+	}
+
+	/* Callback to get device specific protection flags */
+
+	if (psConfig->uiBytesPerEntry == 8)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize);
+
+		/* Fill the entry with a bad address but leave space for protection flags */
+		uiEntry = (gsBadDevPhyAddr.uiAddr & ~psConfig->uiProtMask) | uiProtFlags;
+	}
+	else if (psConfig->uiBytesPerEntry == 4)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+
+		/* Fill the entry with a bad address but leave space for protection flags */
+		uiEntry = (((IMG_UINT32) gsBadDevPhyAddr.uiAddr) & ~psConfig->uiProtMask) | (IMG_UINT32) uiProtFlags;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: The page table entry byte length is not supported", __func__));
+		goto e0;
+	}
+
+	OSLockAcquire(psMMUContext->hLock);
+
+	_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+				   &psLevel, &uiPTEIndex);
+	uiFlushStart = uiPTEIndex;
+
+	/* Unmap page by page and keep the loop as quick as possible.
+	 * Only use parts of _SetupPTE that need to be executed. */
+	while (ui32Loop < ui32PageCount)
+	{
+
+		/* Set the PT entry to invalid and poison it with a bad address */
+		if (psConfig->uiBytesPerEntry == 8)
+		{
+			((IMG_UINT64*) psLevel->sMemDesc.pvCpuVAddr)[uiPTEIndex] = uiEntry;
+		}
+		else if (psConfig->uiBytesPerEntry == 4)
+		{
+			((IMG_UINT32*) psLevel->sMemDesc.pvCpuVAddr)[uiPTEIndex] = (IMG_UINT32) uiEntry;
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s: The page table entry byte length is not supported", __func__));
+			goto e1;
+		}
+
+		/* Log modifications */
+		HTBLOGK(HTB_SF_MMU_PAGE_OP_UNMAP,
+			HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr));
+
+		HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+			HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+			uiPTEIndex, MMU_LEVEL_1,
+			HTBLOG_U64_BITS_HIGH(uiEntry), HTBLOG_U64_BITS_LOW(uiEntry),
+			IMG_FALSE);
+
+#if defined (PDUMP)
+		PDumpMMUDumpPxEntries(MMU_LEVEL_1,
+		                      psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+		                      psLevel->sMemDesc.pvCpuVAddr,
+		                      psLevel->sMemDesc.sDevPAddr,
+		                      uiPTEIndex,
+		                      1,
+		                      NULL,
+		                      NULL,
+		                      0,
+		                      psConfig->uiBytesPerEntry,
+		                      psConfig->uiAddrLog2Align,
+		                      psConfig->uiAddrShift,
+		                      psConfig->uiAddrMask,
+		                      psConfig->uiProtMask,
+		                      psConfig->uiValidEnMask,
+		                      0,
+		                      psMMUContext->psDevAttrs->eMMUType);
+#endif /*PDUMP*/
+
+		sDevVAddr.uiAddr += uiPageSize;
+		ui32Loop++;
+
+		/* Calculate PT index and get new table descriptor */
+		if (uiPTEIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (ui32Loop != ui32PageCount))
+		{
+			uiPTEIndex++;
+		}
+		else
+		{
+			psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+			                                       &psLevel->sMemDesc.psMapping->sMemHandle,
+			                                       uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+			                                       (uiPTEIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+
+			_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+						   &psLevel, &uiPTEIndex);
+			uiFlushStart = uiPTEIndex;
+		}
+	}
+
+	OSLockRelease(psMMUContext->hLock);
+
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+
+	/* Flush TLB for PTs*/
+	psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+												   psMMUContext->hDevData,
+												   MMU_LEVEL_1,
+												   IMG_TRUE);
+
+	return;
+
+e1:
+	OSLockRelease(psMMUContext->hLock);
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+e0:
+	PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Failed to map/unmap page table"));
+	PVR_ASSERT(0);
+	return;
+}
+
+/*
+	MMU_ChangeValidity
+*/
+PVRSRV_ERROR
+MMU_ChangeValidity(MMU_CONTEXT *psMMUContext,
+                   IMG_DEV_VIRTADDR sDevVAddr,
+                   IMG_DEVMEM_SIZE_T uiNumPages,
+                   IMG_UINT32 uiLog2PageSize,
+                   IMG_BOOL bMakeValid,
+                   PMR *psPMR)
+{
+    PVRSRV_ERROR eError = PVRSRV_OK;
+
+	IMG_HANDLE hPriv;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	const MMU_PxE_CONFIG *psConfig;
+	MMU_Levelx_INFO *psLevel = NULL;
+	IMG_UINT32 uiFlushStart = 0;
+	IMG_UINT32 uiPTIndex = 0;
+	IMG_UINT32 i;
+	IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+	IMG_BOOL bValid;
+
+#if defined(PDUMP)
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+	PDUMPCOMMENT("Change valid bit of the data pages to %d (0x%llX - 0x%llX)",
+			bMakeValid,
+			sDevVAddr.uiAddr,
+			sDevVAddr.uiAddr + (uiNumPages<<uiLog2PageSize) - 1 );
+#endif /*PDUMP*/
+
+	/* We should verify the size and contiguity when supporting variable page size */
+	PVR_ASSERT (psMMUContext != NULL);
+	PVR_ASSERT (psPMR != NULL);
+
+	/* Get general PT and address configs */
+	_MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize,
+	                 &psConfig, &hPriv, &psDevVAddrConfig);
+
+	_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+					&psLevel, &uiPTIndex);
+	uiFlushStart = uiPTIndex;
+
+	/* Do a page table walk and change attribute for every page in range. */
+	for (i=0; i < uiNumPages; )
+	{
+
+		/* Set the entry */
+		if (bMakeValid == IMG_TRUE)
+		{
+			/* Only set valid if physical address exists (sparse allocs might have none)*/
+			eError = PMR_IsOffsetValid(psPMR, uiLog2PageSize, 1, i<<uiLog2PageSize, &bValid);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "Cannot determine validity of page table entries page"));
+				goto e_exit;
+			}
+
+			if (bValid)
+			{
+				if (psConfig->uiBytesPerEntry == 8)
+				{
+					((IMG_UINT64 *) psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] |= (psConfig->uiValidEnMask);
+				}
+				else if (psConfig->uiBytesPerEntry == 4)
+				{
+					((IMG_UINT32 *) psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] |= (psConfig->uiValidEnMask);
+				}
+				else
+				{
+					eError = PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+					PVR_DPF((PVR_DBG_ERROR, "Cannot change page table entries due to wrong configuration"));
+					goto e_exit;
+				}
+			}
+		}
+		else
+		{
+			if (psConfig->uiBytesPerEntry == 8)
+			{
+				((IMG_UINT64 *) psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] &= ~(psConfig->uiValidEnMask);
+			}
+			else if (psConfig->uiBytesPerEntry == 4)
+			{
+				((IMG_UINT32 *) psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] &= ~(psConfig->uiValidEnMask);
+			}
+			else
+			{
+				eError = PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+				PVR_DPF((PVR_DBG_ERROR, "Cannot change page table entries due to wrong configuration"));
+				goto e_exit;
+			}
+		}
+
+#if defined(PDUMP)
+		PMR_PDumpSymbolicAddr(psPMR, i<<uiLog2PageSize,
+		                      sizeof(aszMemspaceName), &aszMemspaceName[0],
+		                      sizeof(aszSymbolicAddress), &aszSymbolicAddress[0],
+		                      &uiSymbolicAddrOffset,
+		                      &uiNextSymName);
+
+		PDumpMMUDumpPxEntries(MMU_LEVEL_1,
+		                      psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+		                      psLevel->sMemDesc.pvCpuVAddr,
+		                      psLevel->sMemDesc.sDevPAddr,
+		                      uiPTIndex,
+		                      1,
+		                      aszMemspaceName,
+		                      aszSymbolicAddress,
+		                      uiSymbolicAddrOffset,
+		                      psConfig->uiBytesPerEntry,
+		                      psConfig->uiAddrLog2Align,
+		                      psConfig->uiAddrShift,
+		                      psConfig->uiAddrMask,
+		                      psConfig->uiProtMask,
+		                      psConfig->uiValidEnMask,
+		                      0,
+		                      psMMUContext->psDevAttrs->eMMUType);
+#endif /*PDUMP*/
+
+		sDevVAddr.uiAddr += uiPageSize;
+		i++;
+
+		/* Calculate PT index and get new table descriptor */
+		if (uiPTIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (i != uiNumPages))
+		{
+			uiPTIndex++;
+		}
+		else
+		{
+
+			eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+			                                                &psLevel->sMemDesc.psMapping->sMemHandle,
+			                                                uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+			                                                (uiPTIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+			if (eError != PVRSRV_OK)
+				goto e_exit;
+
+			_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+						   &psLevel, &uiPTIndex);
+			uiFlushStart = uiPTIndex;
+		}
+	}
+
+e_exit:
+
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+
+	/* Flush TLB for PTs*/
+	psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+	                                               psMMUContext->hDevData,
+	                                               MMU_LEVEL_1,
+	                                               !bMakeValid);
+
+	PVR_ASSERT(eError == PVRSRV_OK);
+    return eError;
+}
+
+
+/*
+	MMU_AcquireBaseAddr
+*/
+PVRSRV_ERROR
+MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr)
+{
+	if (!psMMUContext)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	*psPhysAddr = psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr;
+	return PVRSRV_OK;
+}
+
+/*
+	MMU_ReleaseBaseAddr
+*/
+void
+MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext)
+{
+	PVR_UNREFERENCED_PARAMETER(psMMUContext);
+}
+
+/*
+	MMU_SetDeviceData
+*/
+void MMU_SetDeviceData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hDevData)
+{
+	psMMUContext->hDevData = hDevData;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*
+    MMU_SetOSid, MMU_GetOSid
+*/
+
+void MMU_SetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt)
+{
+    psMMUContext->ui32OSid     = ui32OSid;
+    psMMUContext->ui32OSidReg  = ui32OSidReg;
+    psMMUContext->bOSidAxiProt = bOSidAxiProt;
+
+    return;
+}
+
+void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg, IMG_BOOL *pbOSidAxiProt)
+{
+    *pui32OSid     = psMMUContext->ui32OSid;
+    *pui32OSidReg  = psMMUContext->ui32OSidReg;
+    *pbOSidAxiProt = psMMUContext->bOSidAxiProt;
+
+    return;
+}
+
+#endif
+
+/*
+	MMU_CheckFaultAddress
+*/
+void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext,
+				IMG_DEV_VIRTADDR *psDevVAddr,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile)
+{
+	/* Ideally the RGX defs should be via callbacks, but the function is only called from RGX. */
+	#define MMU_VALID_STR(entry,level) \
+						  (apszMMUValidStr[((((entry)&(RGX_MMUCTRL_##level##_DATA_ENTRY_PENDING_EN))!=0) << 1)| \
+						                   ((((entry)&(RGX_MMUCTRL_##level##_DATA_VALID_EN))!=0) << 0)])
+	static const IMG_PCHAR apszMMUValidStr[1<<2] = {/*--*/ "not valid",
+	                                                /*-V*/ "valid",
+	                                                /*P-*/ "pending",
+	                                                /*PV*/ "inconsistent (pending and valid)"};
+	MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+	const MMU_PxE_CONFIG *psConfig;
+	const MMU_PxE_CONFIG *psMMUPDEConfig;
+	const MMU_PxE_CONFIG *psMMUPTEConfig;
+	const MMU_DEVVADDR_CONFIG *psMMUDevVAddrConfig;
+	IMG_HANDLE hPriv;
+	MMU_Levelx_INFO *psLevel = NULL;
+	PVRSRV_ERROR eError;
+	IMG_UINT64 uiIndex;
+	IMG_UINT32 ui32PCIndex;
+	IMG_UINT32 ui32PDIndex;
+	IMG_UINT32 ui32PTIndex;
+	IMG_UINT32 ui32Log2PageSize;
+
+	OSLockAcquire(psMMUContext->hLock);
+
+	/*
+		At this point we don't know the page size so assume it's 4K.
+		When we get the PD level (MMU_LEVEL_2) we can check to see
+		if this assumption is correct.
+	*/
+	eError = psDevAttrs->pfnGetPageSizeConfiguration(12,
+													 &psMMUPDEConfig,
+													 &psMMUPTEConfig,
+													 &psMMUDevVAddrConfig,
+													 &hPriv);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("Failed to get the page size info for log2 page sizeof 12"));
+	}
+
+	psLevel = &psMMUContext->sBaseLevelInfo;
+	psConfig = psDevAttrs->psBaseConfig;
+
+	switch(psMMUContext->psDevAttrs->eTopLevel)
+	{
+		case MMU_LEVEL_3:
+			/* Determine the PC index */
+			uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexMask;
+			uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexShift;
+			ui32PCIndex = (IMG_UINT32) uiIndex;
+			PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PCIndex));
+
+			if (ui32PCIndex >= psLevel->ui32NumOfEntries)
+			{
+				PVR_DUMPDEBUG_LOG("PC index (%d) out of bounds (%d)", ui32PCIndex, psLevel->ui32NumOfEntries);
+				break;
+			}
+
+			if (psConfig->uiBytesPerEntry == 4)
+			{
+				IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+				PVR_DUMPDEBUG_LOG("PCE for index %d = 0x%08x and is %s",
+						 ui32PCIndex,
+						 pui32Ptr[ui32PCIndex],
+						 MMU_VALID_STR(pui32Ptr[ui32PCIndex], PC));
+			}
+			else
+			{
+				IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+				PVR_DUMPDEBUG_LOG("PCE for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s",
+						 ui32PCIndex,
+						 pui64Ptr[ui32PCIndex],
+						 MMU_VALID_STR(pui64Ptr[ui32PCIndex], PC));
+			}
+
+			psLevel = psLevel->apsNextLevel[ui32PCIndex];
+			if (!psLevel)
+			{
+				break;
+			}
+			psConfig = psMMUPDEConfig;
+			/* Fall through */
+
+		case MMU_LEVEL_2:
+			/* Determine the PD index */
+			uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexMask;
+			uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexShift;
+			ui32PDIndex = (IMG_UINT32) uiIndex;
+			PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PDIndex));
+
+			if (ui32PDIndex >= psLevel->ui32NumOfEntries)
+			{
+				PVR_DUMPDEBUG_LOG("PD index (%d) out of bounds (%d)", ui32PDIndex, psLevel->ui32NumOfEntries);
+				break;
+			}
+
+			if (psConfig->uiBytesPerEntry == 4)
+			{
+				IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+				PVR_DUMPDEBUG_LOG("PDE for index %d = 0x%08x and is %s",
+						 ui32PDIndex,
+						 pui32Ptr[ui32PDIndex],
+						 MMU_VALID_STR(pui32Ptr[ui32PDIndex], PD));
+
+				if (psDevAttrs->pfnGetPageSizeFromPDE4(pui32Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK)
+				{
+					PVR_LOG(("Failed to get the page size from the PDE"));
+				}
+			}
+			else
+			{
+				IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+				PVR_DUMPDEBUG_LOG("PDE for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s",
+						 ui32PDIndex,
+						 pui64Ptr[ui32PDIndex],
+						 MMU_VALID_STR(pui64Ptr[ui32PDIndex], PD));
+
+				if (psDevAttrs->pfnGetPageSizeFromPDE8(pui64Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK)
+				{
+					PVR_LOG(("Failed to get the page size from the PDE"));
+				}
+			}
+
+			/*
+				We assumed the page size was 4K, now we have the actual size
+				from the PDE we can confirm if our assumption was correct.
+				Until now it hasn't mattered as the PC and PD are the same
+				regardless of the page size
+			*/
+			if (ui32Log2PageSize != 12)
+			{
+				/* Put the 4K page size data */
+				psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+
+				/* Get the correct size data */
+				eError = psDevAttrs->pfnGetPageSizeConfiguration(ui32Log2PageSize,
+																 &psMMUPDEConfig,
+																 &psMMUPTEConfig,
+																 &psMMUDevVAddrConfig,
+																 &hPriv);
+				if (eError != PVRSRV_OK)
+				{
+					PVR_LOG(("Failed to get the page size info for log2 page sizeof %d", ui32Log2PageSize));
+					break;
+				}
+			}
+			psLevel = psLevel->apsNextLevel[ui32PDIndex];
+			if (!psLevel)
+			{
+				break;
+			}
+			psConfig = psMMUPTEConfig;
+			/* Fall through */
+
+		case MMU_LEVEL_1:
+			/* Determine the PT index */
+			uiIndex = psDevVAddr->uiAddr & psMMUDevVAddrConfig->uiPTIndexMask;
+			uiIndex = uiIndex >> psMMUDevVAddrConfig->uiPTIndexShift;
+			ui32PTIndex = (IMG_UINT32) uiIndex;
+			PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PTIndex));
+
+			if (ui32PTIndex >= psLevel->ui32NumOfEntries)
+			{
+				PVR_DUMPDEBUG_LOG("PT index (%d) out of bounds (%d)", ui32PTIndex, psLevel->ui32NumOfEntries);
+				break;
+			}
+
+			if (psConfig->uiBytesPerEntry == 4)
+			{
+				IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+				PVR_DUMPDEBUG_LOG("PTE for index %d = 0x%08x and is %s",
+						 ui32PTIndex,
+						 pui32Ptr[ui32PTIndex],
+						 MMU_VALID_STR(pui32Ptr[ui32PTIndex], PT));
+			}
+			else
+			{
+				IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+				PVR_DUMPDEBUG_LOG("PTE for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s",
+						 ui32PTIndex,
+						 pui64Ptr[ui32PTIndex],
+						 MMU_VALID_STR(pui64Ptr[ui32PTIndex], PT));
+			}
+
+			break;
+			default:
+				PVR_LOG(("Unsupported MMU setup"));
+				break;
+	}
+
+	OSLockRelease(psMMUContext->hLock);
+}
+
+IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext,
+                             IMG_UINT32 uiLog2PageSize,
+                             IMG_DEV_VIRTADDR sDevVAddr)
+{
+    MMU_Levelx_INFO *psLevel = NULL;
+    const MMU_PxE_CONFIG *psConfig;
+    const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+    IMG_HANDLE hPriv;
+    IMG_UINT32 uiIndex = 0;
+    IMG_BOOL bStatus = IMG_FALSE;
+
+    _MMU_GetPTConfig(psMMUContext, uiLog2PageSize, &psConfig, &hPriv, &psDevVAddrConfig);
+
+    OSLockAcquire(psMMUContext->hLock);
+
+    switch(psMMUContext->psDevAttrs->eTopLevel)
+    {
+        case MMU_LEVEL_3:
+            uiIndex = _CalcPCEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+            psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex];
+            if (psLevel == NULL)
+                break;
+            /* fall through */
+        case MMU_LEVEL_2:
+            uiIndex = _CalcPDEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+
+            if (psLevel != NULL)
+                psLevel = psLevel->apsNextLevel[uiIndex];
+            else
+                psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex];
+
+            if (psLevel == NULL)
+                break;
+            /* fall through */
+        case MMU_LEVEL_1:
+            uiIndex = _CalcPTEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+
+            if (psLevel == NULL)
+                psLevel = &psMMUContext->sBaseLevelInfo;
+
+            bStatus = ((IMG_UINT64 *) psLevel->sMemDesc.pvCpuVAddr)[uiIndex]
+                      & psConfig->uiValidEnMask;
+            break;
+        default:
+            PVR_LOG(("MMU_IsVDevAddrValid: Unsupported MMU setup"));
+            break;
+    }
+
+    OSLockRelease(psMMUContext->hLock);
+
+    _MMU_PutPTConfig(psMMUContext, hPriv);
+
+    return bStatus;
+}
+
+#if defined(PDUMP)
+/*
+	MMU_ContextDerivePCPDumpSymAddr
+*/
+PVRSRV_ERROR MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext,
+                                             IMG_CHAR *pszPDumpSymbolicNameBuffer,
+                                             size_t uiPDumpSymbolicNameBufferSize)
+{
+    size_t uiCount;
+    IMG_UINT64 ui64PhysAddr;
+	PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psDevNode->sDevId;
+
+    if (!psMMUContext->sBaseLevelInfo.sMemDesc.bValid)
+    {
+        /* We don't have any allocations.  You're not allowed to ask
+           for the page catalogue base address until you've made at
+           least one allocation */
+        return PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR;
+    }
+
+    ui64PhysAddr = (IMG_UINT64)psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr.uiAddr;
+
+    PVR_ASSERT(uiPDumpSymbolicNameBufferSize >= (IMG_UINT32)(21 + OSStringLength(psDevId->pszPDumpDevName)));
+
+    /* Page table Symbolic Name is formed from page table phys addr
+       prefixed with MMUPT_. */
+
+    uiCount = OSSNPrintf(pszPDumpSymbolicNameBuffer,
+                         uiPDumpSymbolicNameBufferSize,
+                         ":%s:%s%016llX",
+                         psDevId->pszPDumpDevName,
+                         psMMUContext->sBaseLevelInfo.sMemDesc.bValid?"MMUPC_":"XXX",
+                         ui64PhysAddr);
+
+    if (uiCount + 1 > uiPDumpSymbolicNameBufferSize)
+    {
+        return PVRSRV_ERROR_INVALID_PARAMS;
+    }
+
+    return PVRSRV_OK;
+}
+
+/*
+	MMU_PDumpWritePageCatBase
+*/
+PVRSRV_ERROR
+MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext,
+                          const IMG_CHAR *pszSpaceName,
+                          IMG_DEVMEM_OFFSET_T uiOffset,
+                          IMG_UINT32 ui32WordSize,
+                          IMG_UINT32 ui32AlignShift,
+                          IMG_UINT32 ui32Shift,
+                          PDUMP_FLAGS_T uiPdumpFlags)
+{
+	PVRSRV_ERROR eError;
+	IMG_CHAR aszPageCatBaseSymbolicAddr[100];
+	const IMG_CHAR *pszPDumpDevName = psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName;
+
+	eError = MMU_ContextDerivePCPDumpSymAddr(psMMUContext,
+                                             &aszPageCatBaseSymbolicAddr[0],
+                                             sizeof(aszPageCatBaseSymbolicAddr));
+	if (eError ==  PVRSRV_OK)
+	{
+		eError = PDumpWriteSymbAddress(pszSpaceName,
+		                               uiOffset,
+		                               aszPageCatBaseSymbolicAddr,
+		                               0, /* offset -- Could be non-zero for var. pgsz */
+		                               pszPDumpDevName,
+		                               ui32WordSize,
+		                               ui32AlignShift,
+		                               ui32Shift,
+		                               uiPdumpFlags | PDUMP_FLAGS_CONTINUOUS);
+	}
+
+    return eError;
+}
+
+/*
+	MMU_AcquirePDumpMMUContext
+*/
+PVRSRV_ERROR MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext,
+                                        IMG_UINT32 *pui32PDumpMMUContextID)
+{
+	PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psDevNode->sDevId;
+
+	if (!psMMUContext->ui32PDumpContextIDRefCount)
+	{
+		PDUMP_MMU_ALLOC_MMUCONTEXT(psDevId->pszPDumpDevName,
+                                           psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr,
+                                           psMMUContext->psDevAttrs->eMMUType,
+                                           &psMMUContext->uiPDumpContextID);
+	}
+
+	psMMUContext->ui32PDumpContextIDRefCount++;
+	*pui32PDumpMMUContextID = psMMUContext->uiPDumpContextID;
+
+	return PVRSRV_OK;
+}
+
+/*
+	MMU_ReleasePDumpMMUContext
+*/
+PVRSRV_ERROR MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext)
+{
+	PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psDevNode->sDevId;
+
+	PVR_ASSERT(psMMUContext->ui32PDumpContextIDRefCount != 0);
+	psMMUContext->ui32PDumpContextIDRefCount--;
+
+	if (psMMUContext->ui32PDumpContextIDRefCount == 0)
+	{
+		PDUMP_MMU_FREE_MMUCONTEXT(psDevId->pszPDumpDevName,
+									psMMUContext->uiPDumpContextID);
+	}
+
+	return PVRSRV_OK;
+}
+#endif
+
+/******************************************************************************
+ End of file (mmu_common.c)
+******************************************************************************/
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pdump_common.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pdump_common.c
new file mode 100644
index 0000000..f87ebd3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pdump_common.c
@@ -0,0 +1,3508 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common Server PDump functions layer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+   
+#if defined(PDUMP)
+#include <stdarg.h>
+
+#include "pvrversion.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "pdump_physmem.h"
+#include "hash.h"
+#include "connection_server.h"
+#include "sync_server.h"
+#include "services_km.h"
+/* pdump headers */
+#include "dbgdrvif_srv5.h"
+#include "pdump_osfunc.h"
+#include "pdump_km.h"
+
+/* Allow temporary buffer size override */
+#if !defined(PDUMP_TEMP_BUFFER_SIZE)
+#define PDUMP_TEMP_BUFFER_SIZE (64 * 1024U)
+#endif
+
+/* DEBUG */
+#if 0
+#define PDUMP_DBG(a)   PDumpOSDebugPrintf (a)
+#else
+#define PDUMP_DBG(a)
+#endif
+
+
+#define	PTR_PLUS(t, p, x) ((t)(((IMG_CHAR *)(p)) + (x)))
+#define	VPTR_PLUS(p, x) PTR_PLUS(void *, p, x)
+#define	VPTR_INC(p, x) ((p) = VPTR_PLUS(p, x))
+#define MAX_PDUMP_MMU_CONTEXTS	(32)
+static void *gpvTempBuffer = NULL;
+
+#define PRM_FILE_SIZE_MAX	0x7FDFFFFFU /*!< Default maximum file size to split output files, 2GB-2MB as fwrite limits it to 2GB-1 on 32bit systems */
+#define FRAME_UNSET			0xFFFFFFFFU /*|< Used to signify no or invalid frame number */
+
+
+static IMG_BOOL		g_PDumpInitialised = IMG_FALSE;
+static IMG_UINT32	g_ConnectionCount = 0;
+
+
+typedef struct
+{
+	PDUMP_CHANNEL sCh;         /*!< Channel handles */
+} PDUMP_SCRIPT;
+
+typedef struct
+{
+	IMG_UINT32    ui32Init;    /*|< Count of bytes written to the init phase stream */
+	IMG_UINT32    ui32Main;    /*!< Count of bytes written to the main stream */
+	IMG_UINT32    ui32Deinit;  /*!< Count of bytes written to the deinit stream */
+} PDUMP_CHANNEL_WOFFSETS;
+
+typedef struct
+{
+	PDUMP_CHANNEL          sCh;             /*!< Channel handles */
+	PDUMP_CHANNEL_WOFFSETS sWOff;           /*!< Channel file write offsets */
+	IMG_UINT32             ui32FileIdx;     /*!< File index used when file size limit reached and a new file is started, parameter channel only */
+	IMG_UINT32             ui32MaxFileSize; /*!< Maximum file size for parameter files */
+
+	PDUMP_FILEOFFSET_T     uiZeroPageOffset; /*!< Offset of the zero page in the parameter file */
+	size_t                 uiZeroPageSize;   /*!< Size of the zero page in the parameter file */
+	IMG_CHAR               szZeroPageFilename[PDUMP_PARAM_MAX_FILE_NAME]; /*< PRM file name where the zero page was pdumped */
+} PDUMP_PARAMETERS;
+
+static PDUMP_SCRIPT     g_PDumpScript    = { { 0, 0, 0} };
+static PDUMP_PARAMETERS g_PDumpParameters = { { 0, 0, 0}, {0, 0, 0}, 0, PRM_FILE_SIZE_MAX};
+
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+/* counter increments each time debug write is called */
+IMG_UINT32 g_ui32EveryLineCounter = 1U;
+#endif
+
+#if defined(PDUMP_DEBUG) || defined(REFCOUNT_DEBUG)
+#define PDUMP_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define PDUMP_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+/* Prototype for the test/debug state dump routine used in debugging */
+void PDumpCommonDumpState(IMG_BOOL bDumpOSLayerState);
+#undef PDUMP_TRACE_STATE
+
+
+/*****************************************************************************/
+/*	PDump Control Module Definitions                                         */
+/*****************************************************************************/
+
+typedef struct _PDUMP_CAPTURE_RANGE_
+{
+	IMG_UINT32 ui32Start;       /*!< Start frame number of range */
+	IMG_UINT32 ui32End;         /*!< Send frame number of range */
+	IMG_UINT32 ui32Interval;    /*!< Frame sample rate interval */
+} PDUMP_CAPTURE_RANGE;
+
+/* No direct access to members from outside the control module - please */
+typedef struct _PDUMP_CTRL_STATE_
+{
+	IMG_BOOL            bInitPhaseActive;   /*!< State of driver initialisation phase */
+	IMG_UINT32          ui32Flags;          /*!< Unused */
+
+	IMG_UINT32          ui32DefaultCapMode; /*!< Capture mode of the dump */
+	PDUMP_CAPTURE_RANGE sCaptureRange;      /*|< The capture range for capture mode 'framed' */
+	IMG_UINT32          ui32CurrentFrame;   /*!< Current frame number */
+
+	IMG_BOOL            bCaptureOn;         /*!< Current capture status, is current frame in range */
+	IMG_BOOL            bSuspended;         /*!< Suspend flag set on unrecoverable error */
+	IMG_BOOL            bInPowerTransition; /*!< Device power transition state */
+	POS_LOCK            hLock;              /*!< Exclusive lock to this structure */
+} PDUMP_CTRL_STATE;
+
+static PDUMP_CTRL_STATE g_PDumpCtrl =
+{
+	IMG_TRUE,
+	0,
+
+	0,              /*!< Value obtained from OS PDump layer during initialisation */
+	{
+		FRAME_UNSET,
+		FRAME_UNSET,
+		1
+	},
+	0,
+
+	IMG_FALSE,
+	IMG_FALSE,
+	IMG_FALSE,
+	NULL
+};
+
+static PVRSRV_ERROR PDumpCtrlInit(IMG_UINT32 ui32InitCapMode)
+{
+	g_PDumpCtrl.ui32DefaultCapMode = ui32InitCapMode;
+	PVR_ASSERT(g_PDumpCtrl.ui32DefaultCapMode != 0);
+
+	/* Create lock for PDUMP_CTRL_STATE struct, which is shared between pdump client
+	   and PDumping app. This lock will help us serialize calls from pdump client
+	   and PDumping app */
+	PVR_LOGR_IF_ERROR(OSLockCreate(&g_PDumpCtrl.hLock, LOCK_TYPE_PASSIVE), "OSLockCreate");
+	
+	return PVRSRV_OK;
+}
+
+static void PDumpCtrlDeInit(void)
+{
+	if (g_PDumpCtrl.hLock)
+	{
+		OSLockDestroy(g_PDumpCtrl.hLock);
+		g_PDumpCtrl.hLock = NULL;
+	}
+}
+
+static INLINE void PDumpCtrlLockAcquire(void)
+{
+	OSLockAcquire(g_PDumpCtrl.hLock);
+}
+
+static INLINE void PDumpCtrlLockRelease(void)
+{
+	OSLockRelease(g_PDumpCtrl.hLock);
+}
+
+/**********************************************************************************************************
+	NOTE:
+	The following PDumpCtrl*** functions require the PDUMP_CTRL_STATE lock be acquired BEFORE they are
+	called. This is because the PDUMP_CTRL_STATE data is shared between the PDumping App and the PDump
+	client, hence an exclusive access is required. The lock can be acquired and released by using the
+	PDumpCtrlLockAcquire & PDumpCtrlLockRelease functions respectively.
+**********************************************************************************************************/
+
+static void PDumpCtrlUpdateCaptureStatus(void)
+{
+	if (g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_FRAMED)
+	{
+		if ((g_PDumpCtrl.ui32CurrentFrame >= g_PDumpCtrl.sCaptureRange.ui32Start) &&
+			(g_PDumpCtrl.ui32CurrentFrame <= g_PDumpCtrl.sCaptureRange.ui32End) &&
+			(((g_PDumpCtrl.ui32CurrentFrame - g_PDumpCtrl.sCaptureRange.ui32Start) % g_PDumpCtrl.sCaptureRange.ui32Interval) == 0))
+		{
+			g_PDumpCtrl.bCaptureOn = IMG_TRUE;
+		}
+		else
+		{
+			g_PDumpCtrl.bCaptureOn = IMG_FALSE;
+		}
+	}
+	else if (g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_CONTINUOUS)
+	{
+		g_PDumpCtrl.bCaptureOn = IMG_TRUE;
+	}
+	else
+	{
+		g_PDumpCtrl.bCaptureOn = IMG_FALSE;
+		PVR_DPF((PVR_DBG_ERROR, "PDumpCtrlSetCurrentFrame: Unexpected capture mode (%x)", g_PDumpCtrl.ui32DefaultCapMode));
+	}
+
+}
+
+static void PDumpCtrlSetCurrentFrame(IMG_UINT32 ui32Frame)
+{
+	g_PDumpCtrl.ui32CurrentFrame = ui32Frame;
+	/* Mirror the value into the debug driver */
+	PDumpOSSetFrame(ui32Frame);
+
+	PDumpCtrlUpdateCaptureStatus();
+
+#if defined(PDUMP_TRACE_STATE)	
+	PDumpCommonDumpState(IMG_FALSE);
+#endif
+}
+
+static void PDumpCtrlSetDefaultCaptureParams(IMG_UINT32 ui32Mode, IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32Interval)
+{
+	PVR_ASSERT(ui32Interval > 0);
+	PVR_ASSERT(ui32End >= ui32Start);
+	PVR_ASSERT((ui32Mode == DEBUG_CAPMODE_FRAMED) || (ui32Mode == DEBUG_CAPMODE_CONTINUOUS));
+
+	/* Set the capture range to that supplied by the PDump client tool
+	 */
+	g_PDumpCtrl.ui32DefaultCapMode = ui32Mode;
+	g_PDumpCtrl.sCaptureRange.ui32Start = ui32Start;
+	g_PDumpCtrl.sCaptureRange.ui32End = ui32End;
+	g_PDumpCtrl.sCaptureRange.ui32Interval = ui32Interval;
+
+	/* Reset the current frame on reset of the capture range, the helps to
+	 * avoid inter-pdump start frame issues when the driver is not reloaded.
+	 * No need to call PDumpCtrlUpdateCaptureStatus() direct as the set
+	 * current frame call will.
+	 */
+	PDumpCtrlSetCurrentFrame(0);
+}
+
+static INLINE IMG_BOOL PDumpCtrlCapModIsFramed(void)
+{
+	return g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_FRAMED;
+}
+
+static INLINE IMG_BOOL PDumpCtrlCapModIsContinuous(void)
+{
+	return g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_CONTINUOUS;
+}
+
+static IMG_UINT32 PDumpCtrlGetCurrentFrame(void)
+{
+	return g_PDumpCtrl.ui32CurrentFrame;
+}
+
+static INLINE IMG_BOOL PDumpCtrlCaptureOn(void)
+{
+	return !g_PDumpCtrl.bSuspended && g_PDumpCtrl.bCaptureOn;
+}
+
+static INLINE IMG_BOOL PDumpCtrlCaptureRangePast(void)
+{
+	return (g_PDumpCtrl.ui32CurrentFrame > g_PDumpCtrl.sCaptureRange.ui32End);
+}
+
+/* Used to imply if the PDump client is connected or not. */
+static INLINE IMG_BOOL PDumpCtrlCaptureRangeUnset(void)
+{
+	return ((g_PDumpCtrl.sCaptureRange.ui32Start == FRAME_UNSET) &&
+			(g_PDumpCtrl.sCaptureRange.ui32End == FRAME_UNSET));
+}
+
+static IMG_BOOL PDumpCtrlIsLastCaptureFrame(void)
+{
+	if (g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_FRAMED)
+	{
+		/* Is the next capture frame within the range end limit? */
+		if ((g_PDumpCtrl.ui32CurrentFrame + g_PDumpCtrl.sCaptureRange.ui32Interval) > g_PDumpCtrl.sCaptureRange.ui32End)
+		{
+			return IMG_TRUE;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PDumpCtrIsLastCaptureFrame: Unexpected capture mode (%x)", g_PDumpCtrl.ui32DefaultCapMode));
+	}
+
+	/* Return false for continuous capture mode or when in framed mode */
+	return IMG_FALSE;
+}
+
+static INLINE IMG_BOOL PDumpCtrlInitPhaseComplete(void)
+{
+	return !g_PDumpCtrl.bInitPhaseActive;
+}
+
+static INLINE void PDumpCtrlSetInitPhaseComplete(IMG_BOOL bIsComplete)
+{
+	if (bIsComplete)
+	{
+		g_PDumpCtrl.bInitPhaseActive = IMG_FALSE;
+		PDUMP_HEREA(102);
+	}
+	else
+	{
+		g_PDumpCtrl.bInitPhaseActive = IMG_TRUE;
+		PDUMP_HEREA(103);
+	}
+}
+
+static INLINE void PDumpCtrlSuspend(void)
+{
+	PDUMP_HEREA(104);
+	g_PDumpCtrl.bSuspended = IMG_TRUE;
+}
+
+static INLINE void PDumpCtrlResume(void)
+{
+	PDUMP_HEREA(105);
+	g_PDumpCtrl.bSuspended = IMG_FALSE;
+}
+
+static INLINE IMG_BOOL PDumpCtrlIsDumpSuspended(void)
+{
+	return g_PDumpCtrl.bSuspended;
+}
+
+static INLINE void PDumpCtrlPowerTransitionStart(void)
+{
+	g_PDumpCtrl.bInPowerTransition = IMG_TRUE;
+}
+
+static INLINE void PDumpCtrlPowerTransitionEnd(void)
+{
+	g_PDumpCtrl.bInPowerTransition = IMG_FALSE;
+}
+
+static INLINE IMG_BOOL PDumpCtrlInPowerTransition(void)
+{
+	return g_PDumpCtrl.bInPowerTransition;
+}
+
+static PVRSRV_ERROR PDumpCtrlIsCaptureFrame(IMG_BOOL *bIsCapturing)
+{
+	*bIsCapturing = PDumpCtrlCaptureOn();
+	return PVRSRV_OK;
+}
+
+/********************************************************************************
+	End of PDumpCtrl*** functions
+*********************************************************************************/
+
+/*
+	Wrapper functions which need to be exposed in pdump_km.h for use in other
+	pdump_*** modules safely. These functions call the specific PDumpCtrl layer
+	function after acquiring the PDUMP_CTRL_STATE lock, hence making the calls 
+	from other modules hassle free by avoiding the acquire/release CtrlLock
+	calls.
+*/
+
+void PDumpPowerTransitionStart(void)
+{
+	PDumpCtrlLockAcquire();
+	PDumpCtrlPowerTransitionStart();
+	PDumpCtrlLockRelease();
+}
+
+void PDumpPowerTransitionEnd(void)
+{
+	PDumpCtrlLockAcquire();
+	PDumpCtrlPowerTransitionEnd();
+	PDumpCtrlLockRelease();
+}
+
+IMG_BOOL PDumpInPowerTransition(void)
+{
+	IMG_BOOL bPDumpInPowerTransition = IMG_FALSE;
+	
+	PDumpCtrlLockAcquire();
+	bPDumpInPowerTransition = PDumpCtrlInPowerTransition();
+	PDumpCtrlLockRelease();
+
+	return bPDumpInPowerTransition;
+}
+
+IMG_BOOL PDumpIsDumpSuspended(void)
+{
+	IMG_BOOL bPDumpIsDumpSuspended;
+
+	PDumpCtrlLockAcquire();
+	bPDumpIsDumpSuspended = PDumpCtrlIsDumpSuspended();
+	PDumpCtrlLockRelease();
+
+	return bPDumpIsDumpSuspended;
+}
+
+/*****************************************************************************/
+/*	PDump Common Write Layer just above PDump OS Layer                       */
+/*****************************************************************************/
+
+
+/* 
+	Checks in this method were seeded from the original PDumpWriteILock()
+	and DBGDrivWriteCM() and have grown since to ensure PDump output
+	matches legacy output.
+	Note: the order of the checks in this method is important as some
+	writes have multiple pdump flags set!
+ */
+static IMG_BOOL PDumpWriteAllowed(IMG_UINT32 ui32Flags)
+{
+	/* Lock down the PDUMP_CTRL_STATE struct before calling the following
+	   PDumpCtrl*** functions. This is to avoid updates to the Control data
+	   while we are reading from it */
+	PDumpCtrlLockAcquire();
+
+	/* No writes if in framed mode and range pasted */
+	if (PDumpCtrlCaptureRangePast())
+	{
+		PDUMP_HERE(10);
+		goto unlockAndReturnFalse;
+	}
+
+	/* No writes while writing is suspended */
+	if (PDumpCtrlIsDumpSuspended())
+	{
+		PDUMP_HERE(11);
+		goto unlockAndReturnFalse;
+	}
+
+	/* Prevent PDumping during a power transition */
+	if (PDumpCtrlInPowerTransition())
+	{	/* except when it's flagged */
+		if (ui32Flags & PDUMP_FLAGS_POWER)
+		{
+			PDUMP_HERE(20);
+			goto unlockAndReturnTrue;
+		}
+		PDUMP_HERE(16);
+		goto unlockAndReturnFalse;
+	}
+
+	/* Always allow dumping in init phase and when persistent flagged */
+	if (ui32Flags & PDUMP_FLAGS_PERSISTENT)
+	{
+		PDUMP_HERE(12);
+		goto unlockAndReturnTrue;
+	}
+	if (!PDumpCtrlInitPhaseComplete())
+	{
+		PDUMP_HERE(15);
+		goto unlockAndReturnTrue;
+	}
+
+	/* The following checks are made when the driver has completed initialisation */
+
+	/* If PDump client connected allow continuous flagged writes */
+	if (PDUMP_IS_CONTINUOUS(ui32Flags))
+	{
+		if (PDumpCtrlCaptureRangeUnset()) /* Is client connected? */
+		{
+			PDUMP_HERE(13);
+			goto unlockAndReturnFalse;
+		}
+		PDUMP_HERE(14);
+		goto unlockAndReturnTrue;
+	}
+
+	/* No last/deinit statements allowed when not in initialisation phase */
+	if (PDUMP_IS_CONTINUOUS(ui32Flags))
+	{
+		if (PDumpCtrlInitPhaseComplete())
+		{
+			PDUMP_HERE(17);
+			PVR_DPF((PVR_DBG_ERROR, "PDumpWriteAllowed: DEINIT flag used at the wrong time outside of initialisation!"));
+			goto unlockAndReturnFalse;
+		}
+	}
+
+	/* 
+		If no flags are provided then it is FRAMED output and the frame
+		range must be checked matching expected behaviour.
+	 */
+	if (PDumpCtrlCapModIsFramed() && !PDumpCtrlCaptureOn())
+	{
+		PDUMP_HERE(18);
+		goto unlockAndReturnFalse;
+	}
+
+	PDUMP_HERE(19);
+
+unlockAndReturnTrue:
+	/* Allow the write to take place */
+	PDumpCtrlLockRelease();
+	return IMG_TRUE;
+
+unlockAndReturnFalse:
+	PDumpCtrlLockRelease();
+	return IMG_FALSE;
+}
+
+#undef PDUMP_DEBUG_SCRIPT_LINES
+
+#if defined(PDUMP_DEBUG_SCRIPT_LINES)
+#define PDUMPOSDEBUGDRIVERWRITE(a,b,c,d) _PDumpOSDebugDriverWrite(a,b,c,d)
+static IMG_UINT32 _PDumpOSDebugDriverWrite( IMG_HANDLE psStream,
+									IMG_UINT8 *pui8Data,
+									IMG_UINT32 ui32BCount,
+									IMG_UINT32 ui32Flags)
+{
+	IMG_CHAR tmp1[80];
+	IMG_CHAR* streamName = "unkn";
+
+	if (g_PDumpScript.sCh.hDeinit == psStream)
+		streamName = "dein";
+	else if (g_PDumpScript.sCh.hInit == psStream)
+		streamName = "init";
+	else if (g_PDumpScript.sCh.hMain == psStream)
+		streamName = "main";
+
+	(void) PDumpOSSprintf(tmp1, 80, "-- %s, %x\n", streamName, ui32Flags);
+	(void) PDumpOSDebugDriverWrite(psStream, tmp1, OSStringLength(tmp1));
+
+	return PDumpOSDebugDriverWrite(psStream, pui8Data, ui32BCount);
+}
+#else
+#define PDUMPOSDEBUGDRIVERWRITE(a,b,c,d) PDumpOSDebugDriverWrite(a,b,c)
+#endif
+
+
+/**************************************************************************/ /*!
+ @Function		PDumpWriteToBuffer
+ @Description	Write the supplied data to the PDump stream buffer and attempt
+                to handle any buffer full conditions to ensure all the data
+                requested to be written, is.
+
+ @Input			psStream	The address of the PDump stream buffer to write to
+ @Input			pui8Data    Pointer to the data to be written
+ @Input			ui32BCount	Number of bytes to write
+ @Input			ui32Flags	PDump statement flags.
+
+ @Return 		IMG_UINT32  Actual number of bytes written, may be less than
+ 	 	 	 	 	 	 	ui32BCount when buffer full condition could not
+ 	 	 	 	 	 	 	be avoided.
+*/ /***************************************************************************/
+static IMG_UINT32 PDumpWriteToBuffer(IMG_HANDLE psStream, IMG_UINT8 *pui8Data,
+		IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags)
+{
+	IMG_UINT32	ui32BytesWritten = 0;
+	IMG_UINT32	ui32Off = 0;
+
+	while (ui32BCount > 0)
+	{
+		ui32BytesWritten = PDUMPOSDEBUGDRIVERWRITE(psStream, &pui8Data[ui32Off], ui32BCount, ui32Flags);
+
+		if (ui32BytesWritten == 0)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "PDumpWriteToBuffer: Zero bytes written - release execution"));
+			PDumpOSReleaseExecution();
+		}
+
+		if (ui32BytesWritten != 0xFFFFFFFFU)
+		{
+			if (ui32BCount != ui32BytesWritten)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE, "PDumpWriteToBuffer: partial write of %d bytes of %d bytes", ui32BytesWritten, ui32BCount));
+			}
+			ui32Off += ui32BytesWritten;
+			ui32BCount -= ui32BytesWritten;
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToBuffer: Unrecoverable error received from the debug driver"));
+			if( PDumpOSGetCtrlState(psStream, DBG_GET_STATE_FLAG_IS_READONLY) )
+			{
+				/* Fatal -suspend PDump to prevent flooding kernel log buffer */
+				PVR_LOG(("PDump suspended, debug driver out of memory"));
+				/*
+					Acquire the control lock before updating "suspended" state. This may not be required
+					because "this" is the context which checks the "suspended" state in PDumpWriteAllowed
+					before calling this function. So, this update is mainly for other contexts.
+					Also, all the other contexts which will/wish-to read the "suspended" state ought to be
+					waiting on the bridge lock first and then the PDUMP_OSLOCK (to pdump into script or 
+					parameter buffer). However, this acquire may be useful incase the PDump call is being
+					made from a direct bridge
+				*/
+				PDumpCtrlLockAcquire();
+				PDumpCtrlSuspend();
+				PDumpCtrlLockRelease();
+			}
+			return 0;
+		}
+	}
+
+	/* reset buffer counters */
+	ui32BCount = ui32Off; ui32Off = 0; ui32BytesWritten = 0;
+
+	return ui32BCount;
+}
+
+
+/**************************************************************************/ /*!
+ @Function		PDumpWriteToChannel
+ @Description	Write the supplied data to the PDump channel specified obeying
+ 	            flags to write to the necessary channel buffers.
+
+ @Input			psChannel	The address of the script or parameter channel object
+ @Input/Output	psWOff		The address of the channel write offsets object to
+                            update on successful writing
+ @Input			pui8Data    Pointer to the data to be written
+ @Input			ui32Size	Number of bytes to write
+ @Input			ui32Flags	PDump statement flags, they may be clear (no flags)
+                            which implies framed data, continuous flagged,
+                            persistent flagged, or continuous AND persistent
+                            flagged and they determine how the data is output.
+                            On the first test app run after driver load, the
+                            Display Controller dumps a resource that is both
+                            continuous and persistent and this needs writing to
+                            both the init (persistent) and main (continuous)
+                            channel buffers to ensure the data is dumped in
+                            subsequent test runs without reloading the driver.
+    						In subsequent runs the PDump client 'freezes' the
+    						init buffer so that only one dump of persistent data
+    						for the "extended init phase" is captured to the
+    						init buffer.
+
+ @Return 		IMG_BOOL    True when the data has been consumed, false otherwise
+*/ /***************************************************************************/
+static IMG_BOOL PDumpWriteToChannel(PDUMP_CHANNEL* psChannel, PDUMP_CHANNEL_WOFFSETS* psWOff,
+		IMG_UINT8* pui8Data, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags)
+{
+	IMG_UINT32   ui32BytesWritten = 0;
+
+	PDUMP_HERE(210);
+
+	/* Dump data to deinit buffer when flagged as deinit */
+	if (ui32Flags & PDUMP_FLAGS_DEINIT)
+	{
+		PDUMP_HERE(211);
+		ui32BytesWritten = PDumpWriteToBuffer(psChannel->hDeinit, pui8Data, ui32Size, ui32Flags);
+		if (ui32BytesWritten != ui32Size)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: DEINIT Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size));
+			PDUMP_HERE(212);
+			return IMG_FALSE;
+		}
+
+		if (psWOff)
+		{
+			psWOff->ui32Deinit += ui32Size;
+		}
+
+	}
+	else
+	{
+		IMG_BOOL bDumpedToInitAlready = IMG_FALSE;
+		IMG_HANDLE*  phStream = NULL;
+		IMG_UINT32*  pui32Offset = NULL;
+
+		/* Always append persistent data to init phase so it's available on
+		 * subsequent app runs, but also to the main stream if client connected */
+		if (ui32Flags & PDUMP_FLAGS_PERSISTENT)
+		{
+			PDUMP_HERE(213);
+			ui32BytesWritten = PDumpWriteToBuffer(	psChannel->hInit, pui8Data, ui32Size, ui32Flags);
+			if (ui32BytesWritten != ui32Size)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: PERSIST Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size));
+				PDUMP_HERE(214);
+				return IMG_FALSE;
+			}
+
+			bDumpedToInitAlready = IMG_TRUE;
+			if (psWOff)
+			{
+				psWOff->ui32Init += ui32Size;
+			}
+
+			/* Don't write continuous data if client not connected */
+			PDumpCtrlLockAcquire();
+			if (PDUMP_IS_CONTINUOUS(ui32Flags) && PDumpCtrlCaptureRangeUnset())
+			{
+				PDumpCtrlLockRelease();
+				return IMG_TRUE;
+			}
+			PDumpCtrlLockRelease();
+		}
+
+		/* Prepare to write the data to the main stream for
+		 * persistent, continuous or framed data. Override and use init
+		 * stream if driver still in init phase and we have not written 
+		 * to it yet.*/
+		PDumpCtrlLockAcquire();
+		if (!PDumpCtrlInitPhaseComplete() && !bDumpedToInitAlready)
+		{
+			PDUMP_HERE(215);
+			phStream = &psChannel->hInit;
+			if (psWOff)
+			{
+				pui32Offset = &psWOff->ui32Init;
+			}
+		}
+		else
+		{
+			PDUMP_HERE(216);
+			phStream = &psChannel->hMain;
+			if (psWOff)
+			{
+				pui32Offset = &psWOff->ui32Main;
+			}
+		}
+		PDumpCtrlLockRelease();
+
+		/* Write the data to the stream */
+		ui32BytesWritten = PDumpWriteToBuffer(*phStream, pui8Data, ui32Size, ui32Flags);
+		if (ui32BytesWritten != ui32Size)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: MAIN Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size));
+			PDUMP_HERE(217);
+			return IMG_FALSE;
+		}
+
+		if (pui32Offset)
+		{
+			*pui32Offset += ui32BytesWritten;
+		}
+	}
+
+	return IMG_TRUE;
+}
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+
+static IMG_UINT32 _GenerateChecksum(void *pvData, size_t uiSize)
+{
+	IMG_UINT32 ui32Sum = 0;
+	IMG_UINT32 *pui32Data = pvData;
+	IMG_UINT8 *pui8Data = pvData;
+	IMG_UINT32 i;
+	IMG_UINT32 ui32LeftOver;
+
+	for(i = 0; i < uiSize / sizeof(IMG_UINT32); i++)
+	{
+		ui32Sum += pui32Data[i];
+	}
+
+	ui32LeftOver = uiSize % sizeof(IMG_UINT32);
+
+	while(ui32LeftOver)
+	{
+		ui32Sum += pui8Data[uiSize - ui32LeftOver];
+		ui32LeftOver--;
+	}
+
+	return ui32Sum;
+}
+
+#endif
+
+PVRSRV_ERROR PDumpWriteParameter(IMG_UINT8 *pui8Data, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags,
+		IMG_UINT32* pui32FileOffset, IMG_CHAR* aszFilenameStr)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_BOOL bPDumpCtrlInitPhaseComplete = IMG_FALSE;
+
+	PVR_ASSERT(pui8Data && (ui32Size!=0));
+	PVR_ASSERT(pui32FileOffset && aszFilenameStr);
+
+	PDUMP_HERE(1);
+
+	if (!PDumpWriteAllowed(ui32Flags))
+	{
+		/* Abort write for the above reason but indicate what happened to
+		 * caller to avoid disrupting the driver, caller should treat it as OK
+		 * but skip any related PDump writes to the script file.  */
+		return PVRSRV_ERROR_PDUMP_NOT_ALLOWED;
+	}
+
+	PDUMP_HERE(2);
+
+	PDumpCtrlLockAcquire();
+	bPDumpCtrlInitPhaseComplete = PDumpCtrlInitPhaseComplete();
+	PDumpCtrlLockRelease();
+
+	if (!bPDumpCtrlInitPhaseComplete || (ui32Flags & PDUMP_FLAGS_PERSISTENT))
+	{
+		PDUMP_HERE(3);
+
+		/* Init phase stream not expected to get above the file size max */
+		PVR_ASSERT(g_PDumpParameters.sWOff.ui32Init < g_PDumpParameters.ui32MaxFileSize);
+
+		/* Return the file write offset at which the parameter data was dumped */
+		*pui32FileOffset = g_PDumpParameters.sWOff.ui32Init;
+	}
+	else
+	{
+		PDUMP_HERE(4);
+
+		/* Do we need to signal the PDump client that a split is required? */
+		if (g_PDumpParameters.sWOff.ui32Main + ui32Size > g_PDumpParameters.ui32MaxFileSize)
+		{
+			PDUMP_HERE(5);
+			PDumpOSSetSplitMarker(g_PDumpParameters.sCh.hMain, g_PDumpParameters.sWOff.ui32Main);
+			g_PDumpParameters.ui32FileIdx++;
+			g_PDumpParameters.sWOff.ui32Main = 0;
+		}
+
+		/* Return the file write offset at which the parameter data was dumped */
+		*pui32FileOffset = g_PDumpParameters.sWOff.ui32Main;
+	}
+
+	/* Create the parameter file name, based on index, to be used in the script */
+	if (g_PDumpParameters.ui32FileIdx == 0)
+	{
+		eError = PDumpOSSprintf(aszFilenameStr, PDUMP_PARAM_MAX_FILE_NAME, PDUMP_PARAM_0_FILE_NAME);
+	}
+	else
+	{
+		PDUMP_HERE(6);
+		eError = PDumpOSSprintf(aszFilenameStr, PDUMP_PARAM_MAX_FILE_NAME, PDUMP_PARAM_N_FILE_NAME, g_PDumpParameters.ui32FileIdx);
+	}
+	PVR_LOGG_IF_ERROR(eError, "PDumpOSSprintf", errExit);
+
+	/* Write the parameter data to the parameter channel */
+	eError = PVRSRV_ERROR_PDUMP_BUFFER_FULL;
+	if (!PDumpWriteToChannel(&g_PDumpParameters.sCh, &g_PDumpParameters.sWOff, pui8Data, ui32Size, ui32Flags))
+	{
+		PDUMP_HERE(7);
+		PVR_LOGG_IF_ERROR(eError, "PDumpWrite", errExit);
+	}
+#if defined(PDUMP_DEBUG_OUTFILES)
+	else
+	{
+		IMG_UINT32 ui32Checksum;
+		PDUMP_GET_SCRIPT_STRING();
+
+		ui32Checksum = _GenerateChecksum(pui8Data, ui32Size);
+
+		/* CHK CHKSUM SIZE PRMOFFSET PRMFILE */
+		eError = PDumpOSBufprintf(hScript, ui32MaxLen, "-- CHK 0x%08X 0x%08X 0x%08X %s",
+									ui32Checksum,
+									ui32Size,
+									*pui32FileOffset,
+									aszFilenameStr);
+		if(eError != PVRSRV_OK)
+		{
+			goto errExit;
+		}
+
+		PDumpWriteScript(hScript, ui32Flags);
+	}
+#endif
+
+	return PVRSRV_OK;
+
+errExit:
+	return eError;
+}
+
+
+IMG_BOOL PDumpWriteScript(IMG_HANDLE hString, IMG_UINT32 ui32Flags)
+{
+	PVR_ASSERT(hString);
+
+	PDUMP_HERE(201);
+
+	if (!PDumpWriteAllowed(ui32Flags))
+	{
+		/* Abort write for the above reasons but indicated it was OK to
+		 * caller to avoid disrupting the driver */
+		return IMG_TRUE;
+	}
+
+	return PDumpWriteToChannel(&g_PDumpScript.sCh, NULL, (IMG_UINT8*) hString, (IMG_UINT32) OSStringLength((IMG_CHAR*) hString), ui32Flags);
+}
+
+
+/*****************************************************************************/
+
+
+
+
+
+
+struct _PDUMP_CONNECTION_DATA_ {
+	IMG_UINT32				ui32RefCount;
+	POS_LOCK				hLock;
+	DLLIST_NODE				sListHead;
+	IMG_BOOL				bLastInto;
+	IMG_UINT32				ui32LastSetFrameNumber;
+	IMG_BOOL				bWasInCaptureRange;
+	IMG_BOOL				bIsInCaptureRange;
+	IMG_BOOL				bLastTransitionFailed;
+	SYNC_CONNECTION_DATA	*psSyncConnectionData;
+};
+
+static PDUMP_CONNECTION_DATA * _PDumpConnectionAcquire(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+	IMG_UINT32 ui32RefCount;
+
+	OSLockAcquire(psPDumpConnectionData->hLock);
+	ui32RefCount = ++psPDumpConnectionData->ui32RefCount;
+	OSLockRelease(psPDumpConnectionData->hLock);
+
+	PDUMP_REFCOUNT_PRINT("%s: PDump connection %p, refcount = %d",
+						 __FUNCTION__, psPDumpConnectionData, ui32RefCount);
+
+	return psPDumpConnectionData;
+}
+
+static void _PDumpConnectionRelease(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+	IMG_UINT32 ui32RefCount;
+
+	OSLockAcquire(psPDumpConnectionData->hLock);
+	ui32RefCount = --psPDumpConnectionData->ui32RefCount;
+	OSLockRelease(psPDumpConnectionData->hLock);
+
+	if (ui32RefCount == 0)
+	{
+		OSLockDestroy(psPDumpConnectionData->hLock);
+		PVR_ASSERT(dllist_is_empty(&psPDumpConnectionData->sListHead));
+		OSFreeMem(psPDumpConnectionData);
+	}
+
+	PDUMP_REFCOUNT_PRINT("%s: PDump connection %p, refcount = %d",
+						 __FUNCTION__, psPDumpConnectionData, ui32RefCount);
+}
+
+/**************************************************************************
+ * Function Name  : GetTempBuffer
+ * Inputs         : None
+ * Outputs        : None
+ * Returns        : Temporary buffer address, or NULL
+ * Description    : Get temporary buffer address.
+**************************************************************************/
+static void *GetTempBuffer(void)
+{
+	/*
+	 * Allocate the temporary buffer, if it hasn't been allocated already.
+	 * Return the address of the temporary buffer, or NULL if it
+	 * couldn't be allocated.
+	 * It is expected that the buffer will be allocated once, at driver
+	 * load time, and left in place until the driver unloads.
+	 */
+
+	if (gpvTempBuffer == NULL)
+	{
+		gpvTempBuffer = OSAllocMem(PDUMP_TEMP_BUFFER_SIZE);
+		if (gpvTempBuffer == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "GetTempBuffer: OSAllocMem failed"));
+		}
+	}
+
+	return gpvTempBuffer;
+}
+
+static void FreeTempBuffer(void)
+{
+
+	if (gpvTempBuffer != NULL)
+	{
+		OSFreeMem(gpvTempBuffer);
+		gpvTempBuffer = NULL;
+	}
+}
+
+/**************************************************************************
+ * Function Name  : PDumpParameterChannelZeroedPageBlock
+ * Inputs         : None
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Set up the zero page block in the parameter stream
+**************************************************************************/
+static PVRSRV_ERROR PDumpParameterChannelZeroedPageBlock(void)
+{
+	IMG_UINT8 aui8Zero[32] = { 0 };
+	size_t uiBytesToWrite;
+	PVRSRV_ERROR eError;
+
+	g_PDumpParameters.uiZeroPageSize = OSGetPageSize();
+
+	/* ensure the zero page size of a multiple of the zero source on the stack */
+	PVR_ASSERT(g_PDumpParameters.uiZeroPageSize % sizeof(aui8Zero) == 0);
+
+	/* the first write gets the parameter file name and stream offset,
+	 * then subsequent writes do not need to know this as the data is
+	 * contiguous in the stream
+	 */
+	PDUMP_LOCK();
+	eError = PDumpWriteParameter(aui8Zero,
+							sizeof(aui8Zero),
+							0,
+							&g_PDumpParameters.uiZeroPageOffset,
+							g_PDumpParameters.szZeroPageFilename);
+
+	if(eError != PVRSRV_OK)
+	{
+		/* Also treat PVRSRV_ERROR_PDUMP_NOT_ALLOWED as an error in this case
+		 * as it should never happen since all writes during driver Init are allowed.
+		 */
+		goto err_write;
+	}
+
+	uiBytesToWrite = g_PDumpParameters.uiZeroPageSize - sizeof(aui8Zero);
+
+	while(uiBytesToWrite)
+	{
+		IMG_BOOL bOK;
+
+		bOK = PDumpWriteToChannel(&g_PDumpParameters.sCh, &g_PDumpParameters.sWOff,
+									aui8Zero,
+									sizeof(aui8Zero), 0);
+
+		if(!bOK)
+		{
+			eError = PVRSRV_ERROR_PDUMP_BUFFER_FULL;
+			goto err_write;
+		}
+
+		uiBytesToWrite -= sizeof(aui8Zero);
+	}
+
+err_write:
+	PDUMP_UNLOCK();
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to initialise parameter stream zero block"));
+	}
+
+	return eError;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpGetParameterZeroPageInfo
+ * Inputs         : None
+ * Outputs        : puiZeroPageOffset: will be set to the offset of the zero page
+ *                : puiZeroPageSize: will be set to the size of the zero page
+ *                : ppszZeroPageFilename: will be set to a pointer to the PRM file name
+ *                :                       containing the zero page
+ * Returns        : None
+ * Description    : Get information about the zero page
+**************************************************************************/
+void PDumpGetParameterZeroPageInfo(PDUMP_FILEOFFSET_T *puiZeroPageOffset,
+					size_t *puiZeroPageSize,
+					const IMG_CHAR **ppszZeroPageFilename)
+{
+		*puiZeroPageOffset = g_PDumpParameters.uiZeroPageOffset;
+		*puiZeroPageSize = g_PDumpParameters.uiZeroPageSize;
+		*ppszZeroPageFilename = g_PDumpParameters.szZeroPageFilename;
+}
+
+PVRSRV_ERROR PDumpInitCommon(void)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32InitCapMode = 0;
+	IMG_CHAR* pszEnvComment = NULL;
+
+	PDUMP_HEREA(2010);
+
+	/* Allocate temporary buffer for copying from user space */
+	(void) GetTempBuffer();
+
+	/* create the global PDump lock */
+	eError = PDumpCreateLockKM();
+	PVR_LOGG_IF_ERROR(eError, "PDumpCreateLockKM", errExit);
+
+	/* Call environment specific PDump initialisation */
+	eError = PDumpOSInit(&g_PDumpParameters.sCh, &g_PDumpScript.sCh, &ui32InitCapMode, &pszEnvComment);
+	PVR_LOGG_IF_ERROR(eError, "PDumpOSInit", errExitLock);
+
+	/* Initialise PDump control module in common layer */
+	eError = PDumpCtrlInit(ui32InitCapMode);
+	PVR_LOGG_IF_ERROR(eError, "PDumpCtrlInit", errExitOSDeInit);
+
+	/* Test PDump initialised and ready by logging driver details */
+	eError = PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "Driver Product Version: %s - %s (%s)", PVRVERSION_STRING, PVR_BUILD_DIR, PVR_BUILD_TYPE);
+	PVR_LOGG_IF_ERROR(eError, "PDumpCommentWithFlags", errExitCtrl);
+	if (pszEnvComment != NULL)
+	{
+		eError = PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "%s", pszEnvComment);
+		PVR_LOGG_IF_ERROR(eError, "PDumpCommentWithFlags", errExitCtrl);
+	}
+	eError = PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "Start of Init Phase");
+	PVR_LOGG_IF_ERROR(eError, "PDumpCommentWithFlags", errExitCtrl);
+
+	eError = PDumpParameterChannelZeroedPageBlock();
+	PVR_LOGG_IF_ERROR(eError, "PDumpParameterChannelZeroedPageBlock", errExitCtrl);
+
+	g_PDumpInitialised = IMG_TRUE;
+
+	PDUMP_HEREA(2011);
+
+	return PVRSRV_OK;
+
+errExitCtrl:
+	PDumpCtrlDeInit();
+errExitOSDeInit:
+	PDUMP_HEREA(2018);
+	PDumpOSDeInit(&g_PDumpParameters.sCh, &g_PDumpScript.sCh);
+errExitLock:
+	PDUMP_HEREA(2019);
+	PDumpDestroyLockKM();
+errExit:
+	return eError;
+}
+
+void PDumpDeInitCommon(void)
+{
+	PDUMP_HEREA(2020);
+
+	g_PDumpInitialised = IMG_FALSE;
+
+	/* Free temporary buffer */
+	FreeTempBuffer();
+
+	/* DeInit the PDUMP_CTRL_STATE data */
+	PDumpCtrlDeInit();
+
+	/* Call environment specific PDump Deinitialisation */
+	PDumpOSDeInit(&g_PDumpParameters.sCh, &g_PDumpScript.sCh);
+
+	/* take down the global PDump lock */
+	PDumpDestroyLockKM();
+}
+
+IMG_BOOL PDumpReady(void)
+{
+	return g_PDumpInitialised;
+}
+
+void PDumpStopInitPhase(IMG_BOOL bPDumpClient, IMG_BOOL bInitClient)
+{
+	/* Check with the OS we a running on */
+	if (PDumpOSAllowInitPhaseToComplete(bPDumpClient, bInitClient))
+	{
+		if (bInitClient)
+		{
+			/* We only ouptut this once for bInitClient init phase ending OSs */
+			PDUMPCOMMENT("Stop Init Phase");
+		}
+		PDumpCtrlLockAcquire();
+		PDumpCtrlSetInitPhaseComplete(IMG_TRUE);
+		PDumpCtrlLockRelease();
+	}
+}
+
+PVRSRV_ERROR PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame)
+{
+	PDumpCtrlLockAcquire();
+	*pbIsLastCaptureFrame = PDumpCtrlIsLastCaptureFrame();
+	PDumpCtrlLockRelease();
+
+	return PVRSRV_OK;
+}
+
+
+
+typedef struct _PDUMP_Transition_DATA_ {
+	PFN_PDUMP_TRANSITION	pfnCallback;
+	void					*hPrivData;
+	PDUMP_CONNECTION_DATA	*psPDumpConnectionData;
+	DLLIST_NODE				sNode;
+} PDUMP_Transition_DATA;
+
+PVRSRV_ERROR PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+											  PFN_PDUMP_TRANSITION pfnCallback,
+											  void *hPrivData,
+											  void **ppvHandle)
+{
+	PDUMP_Transition_DATA *psData;
+	PVRSRV_ERROR eError;
+
+	psData = OSAllocMem(sizeof(*psData));
+	if (psData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	/* Setup the callback and add it to the list for this process */
+	psData->pfnCallback = pfnCallback;
+	psData->hPrivData = hPrivData;
+
+	OSLockAcquire(psPDumpConnectionData->hLock);
+	dllist_add_to_head(&psPDumpConnectionData->sListHead, &psData->sNode);
+	OSLockRelease(psPDumpConnectionData->hLock);
+
+	/* Take a reference on the connection so it doesn't get freed too early */
+	psData->psPDumpConnectionData =_PDumpConnectionAcquire(psPDumpConnectionData);
+	*ppvHandle = psData;
+
+	return PVRSRV_OK;
+
+fail_alloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+void PDumpUnregisterTransitionCallback(void *pvHandle)
+{
+	PDUMP_Transition_DATA *psData = pvHandle;
+
+	OSLockAcquire(psData->psPDumpConnectionData->hLock);
+	dllist_remove_node(&psData->sNode);
+	OSLockRelease(psData->psPDumpConnectionData->hLock);
+	_PDumpConnectionRelease(psData->psPDumpConnectionData);
+	OSFreeMem(psData);
+}
+
+PVRSRV_ERROR PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, IMG_BOOL bInto, IMG_UINT32 ui32PDumpFlags)
+{
+	DLLIST_NODE *psNode, *psNext;
+	PVRSRV_ERROR eError;
+
+	/* Only call the callbacks if we've really done a Transition */
+	if (bInto != psPDumpConnectionData->bLastInto)
+	{
+		OSLockAcquire(psPDumpConnectionData->hLock);
+		/* We're Transitioning either into or out of capture range */
+		dllist_foreach_node(&psPDumpConnectionData->sListHead, psNode, psNext)
+		{
+			PDUMP_Transition_DATA *psData =
+				IMG_CONTAINER_OF(psNode, PDUMP_Transition_DATA, sNode);
+
+			eError = psData->pfnCallback(psData->hPrivData, bInto, ui32PDumpFlags);
+
+			if (eError != PVRSRV_OK)
+			{
+				OSLockRelease(psPDumpConnectionData->hLock);
+				return eError;
+			}
+		}
+		OSLockRelease(psPDumpConnectionData->hLock);
+
+		if (bInto)
+		{
+			SyncConnectionPDumpSyncBlocks(psPDumpConnectionData->psSyncConnectionData);
+		}
+		psPDumpConnectionData->bLastInto = bInto;
+	}
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpIsCaptureFrameKM(IMG_BOOL *bIsCapturing)
+{
+	PDumpCtrlLockAcquire();
+	PDumpCtrlIsCaptureFrame(bIsCapturing);
+	PDumpCtrlLockRelease();
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _PDumpSetFrameKM(CONNECTION_DATA *psConnection,
+                                     IMG_UINT32 ui32Frame)
+{
+	PDUMP_CONNECTION_DATA *psPDumpConnectionData = psConnection->psPDumpConnectionData;
+	IMG_BOOL bWasInCaptureRange = IMG_FALSE;
+	IMG_BOOL bIsInCaptureRange = IMG_FALSE;
+	PVRSRV_ERROR eError;
+
+	/*
+		Note:
+		As we can't test to see if the new frame will be in capture range
+		before we set the frame number and we don't want to roll back the
+		frame number if we fail then we have to save the "transient" data
+		which decides if we're entering or exiting capture range along
+		with a failure boolean so we know what's required on a retry
+	*/
+	if (psPDumpConnectionData->ui32LastSetFrameNumber != ui32Frame)
+	{
+		(void) PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "Set pdump frame %u", ui32Frame);
+
+		/*
+			The boolean values below decide if the PDump transition
+			should trigger because of the current context setting the
+			frame number, hence the functions below should execute
+			atomically and do not give a chance to some other context
+			to transition
+		*/
+		PDumpCtrlLockAcquire(); 
+		
+		PDumpCtrlIsCaptureFrame(&bWasInCaptureRange);
+		PDumpCtrlSetCurrentFrame(ui32Frame);
+		PDumpCtrlIsCaptureFrame(&bIsInCaptureRange);
+
+		PDumpCtrlLockRelease();
+
+		psPDumpConnectionData->ui32LastSetFrameNumber = ui32Frame;
+
+		/* Save the Transition data incase we fail the Transition */
+		psPDumpConnectionData->bWasInCaptureRange = bWasInCaptureRange;
+		psPDumpConnectionData->bIsInCaptureRange = bIsInCaptureRange;
+	}
+	else if (psPDumpConnectionData->bLastTransitionFailed)
+	{
+		/* Load the Transition data so we can try again */
+		bWasInCaptureRange = psPDumpConnectionData->bWasInCaptureRange;
+		bIsInCaptureRange = psPDumpConnectionData->bIsInCaptureRange;
+	}
+	else
+	{
+		/* New frame is the same as the last frame set and the last
+		 * transition succeeded, no need to perform another transition.
+		 */
+		return PVRSRV_OK;
+	}
+
+	if (!bWasInCaptureRange && bIsInCaptureRange)
+	{
+		eError = PDumpTransition(psPDumpConnectionData, IMG_TRUE, PDUMP_FLAGS_NONE);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_Transition;
+		}
+	}
+	else if (bWasInCaptureRange && !bIsInCaptureRange)
+	{
+		eError = PDumpTransition(psPDumpConnectionData, IMG_FALSE, PDUMP_FLAGS_NONE);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_Transition;
+		}
+	}
+	else
+	{
+		/* Here both previous and current frames are in or out of range.
+		 * There is no transition in this case.
+		 */
+	}
+
+	psPDumpConnectionData->bLastTransitionFailed = IMG_FALSE;
+	return PVRSRV_OK;
+
+fail_Transition:
+	psPDumpConnectionData->bLastTransitionFailed = IMG_TRUE;
+	return eError;
+}
+
+PVRSRV_ERROR PDumpSetFrameKM(CONNECTION_DATA *psConnection,
+                             PVRSRV_DEVICE_NODE * psDeviceNode,
+                             IMG_UINT32 ui32Frame)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;	
+
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+#if defined(PDUMP_TRACE_STATE)
+	PVR_DPF((PVR_DBG_WARNING, "PDumpSetFrameKM: ui32Frame( %d )", ui32Frame));
+#endif
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+	(void) PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "Set pdump frame %u (pre)", ui32Frame);
+#endif
+
+	eError = _PDumpSetFrameKM(psConnection, ui32Frame);
+	if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_LOG_ERROR(eError, "_PDumpSetFrameKM");
+	}
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+	(void) PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "Set pdump frame %u (post)", ui32Frame);
+#endif
+
+	return eError;
+}
+
+PVRSRV_ERROR PDumpGetFrameKM(CONNECTION_DATA *psConnection,
+                             PVRSRV_DEVICE_NODE * psDeviceNode,
+                             IMG_UINT32* pui32Frame)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	/*
+		It may be safe to avoid acquiring this lock here as all the other calls
+		which read/modify current frame will wait on the PDump Control bridge
+		lock first. Also, in no way as of now, does the PDumping app modify the
+		current frame through a call which acquires the global bridge lock.
+		Still, as a legacy we acquire and then read.
+	*/	
+	PDumpCtrlLockAcquire();
+
+	*pui32Frame = PDumpCtrlGetCurrentFrame();
+
+	PDumpCtrlLockRelease();
+	return eError;
+}
+
+PVRSRV_ERROR PDumpSetDefaultCaptureParamsKM(IMG_UINT32 ui32Mode,
+                                           IMG_UINT32 ui32Start,
+                                           IMG_UINT32 ui32End,
+                                           IMG_UINT32 ui32Interval,
+                                           IMG_UINT32 ui32MaxParamFileSize)
+{
+	/*
+		Acquire PDUMP_CTRL_STATE struct lock before modifications as a 
+		PDumping app may be reading the state data for some checks
+	*/
+	PDumpCtrlLockAcquire();
+	PDumpCtrlSetDefaultCaptureParams(ui32Mode, ui32Start, ui32End, ui32Interval);
+	PDumpCtrlLockRelease();
+
+	if (ui32MaxParamFileSize == 0)
+	{
+		g_PDumpParameters.ui32MaxFileSize = PRM_FILE_SIZE_MAX;
+	}
+	else
+	{
+		g_PDumpParameters.ui32MaxFileSize = ui32MaxParamFileSize;
+	}
+	return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpReg32
+ * Inputs         : pszPDumpDevName, Register offset, and value to write
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a register write
+**************************************************************************/
+PVRSRV_ERROR PDumpReg32(IMG_CHAR	*pszPDumpRegName,
+						IMG_UINT32	ui32Reg,
+						IMG_UINT32	ui32Data,
+						IMG_UINT32	ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpReg32"));
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:0x%08X 0x%08X", pszPDumpRegName, ui32Reg, ui32Data);
+
+	if (eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpReg64
+ * Inputs         : pszPDumpDevName, Register offset, and value to write
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a register write
+**************************************************************************/
+PVRSRV_ERROR PDumpReg64(IMG_CHAR	*pszPDumpRegName,
+						IMG_UINT32	ui32Reg,
+						IMG_UINT64	ui64Data,
+						IMG_UINT32	ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpRegKM"));
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW64 :%s:0x%08X 0x%010llX", pszPDumpRegName, ui32Reg, ui64Data);
+
+	if (eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpRegLabelToReg64
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a register write from a register label
+**************************************************************************/
+PVRSRV_ERROR PDumpRegLabelToReg64(IMG_CHAR *pszPDumpRegName,
+                                  IMG_UINT32 ui32RegDst,
+                                  IMG_UINT32 ui32RegSrc,
+                                  IMG_UINT32 ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpRegLabelToReg64"));
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW64 :%s:0x%08X :%s:0x%08X", pszPDumpRegName, ui32RegDst, pszPDumpRegName, ui32RegSrc);
+
+	if (eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+
+}
+
+/**************************************************************************
+ * Function Name  : PDumpRegLabelToMem32
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a memory write from a register label
+**************************************************************************/
+PVRSRV_ERROR PDumpRegLabelToMem32(IMG_CHAR *pszPDumpRegName,
+                                  IMG_UINT32 ui32Reg,
+                                  PMR *psPMR,
+                                  IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                                  IMG_UINT32 ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpRegLabelToMem32"));
+
+	eErr = PMR_PDumpSymbolicAddr(psPMR,
+                                     uiLogicalOffset,
+                                     PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+                                     aszMemspaceName,
+                                     PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+                                     aszSymbolicName,
+                                     &uiPDumpSymbolicOffset,
+                                     &uiNextSymName);
+
+	if (eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:%s:0x%llX :%s:0x%08X",aszMemspaceName, aszSymbolicName,
+							uiPDumpSymbolicOffset, pszPDumpRegName, ui32Reg);
+
+
+	if (eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpRegLabelToMem64
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a memory write from a register label
+**************************************************************************/
+PVRSRV_ERROR PDumpRegLabelToMem64(IMG_CHAR *pszPDumpRegName,
+								  IMG_UINT32 ui32Reg,
+								  PMR *psPMR,
+								  IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+								  IMG_UINT32 ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpRegLabelToMem64"));
+
+	eErr = PMR_PDumpSymbolicAddr(psPMR,
+									 uiLogicalOffset,
+									 PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+									 aszMemspaceName,
+									 PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+									 aszSymbolicName,
+									 &uiPDumpSymbolicOffset,
+									 &uiNextSymName);
+
+	if (eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW64 :%s:%s:0x%llX :%s:0x%08X",aszMemspaceName, aszSymbolicName,
+							uiPDumpSymbolicOffset, pszPDumpRegName, ui32Reg);
+
+
+	if (eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpPhysHandleToInternalVar64
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents an internal var
+                    write using a PDump pages handle
+**************************************************************************/
+PVRSRV_ERROR PDumpPhysHandleToInternalVar64(IMG_CHAR *pszInternalVar,
+                                            IMG_HANDLE hPdumpPages,
+                                            IMG_UINT32 ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	IMG_CHAR *pszSymbolicName;
+
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpPhysHandleToInternalVar64"));
+
+	eErr = PDumpGetSymbolicAddr(hPdumpPages,
+	                            &pszSymbolicName);
+	if (eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
+	                        "WRW %s %s:0x%llX",
+	                        pszInternalVar, pszSymbolicName, 0llu);
+	if (eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpMemLabelToInternalVar64
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents an internal var write using a memory label
+**************************************************************************/
+PVRSRV_ERROR PDumpMemLabelToInternalVar64(IMG_CHAR *pszInternalVar,
+                                          PMR *psPMR,
+                                          IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                                          IMG_UINT32 ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpMemLabelToInternalVar"));
+
+	eErr = PMR_PDumpSymbolicAddr(psPMR,
+                                     uiLogicalOffset,
+                                     PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+                                     aszMemspaceName,
+                                     PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+                                     aszSymbolicName,
+                                     &uiPDumpSymbolicOffset,
+                                     &uiNextSymName);
+
+
+	if (eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW %s :%s:%s:0x%llX", pszInternalVar,
+							aszMemspaceName, aszSymbolicName, uiPDumpSymbolicOffset);
+
+
+	if (eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PDumpWriteRegANDValueOp
+
+ @Description
+
+ Emits the PDump commands for the logical OR operation
+ Var <- Var OR Value
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PDumpWriteVarORValueOp	(const IMG_CHAR *pszInternalVariable,
+                                         const IMG_UINT64 ui64Value,
+                                         const IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING();
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript,
+			ui32MaxLen,
+			"OR %s %s 0x%llX",
+			pszInternalVariable,
+			pszInternalVariable,
+			ui64Value);
+
+	if(eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+	PDumpWriteScript( hScript, ui32PDumpFlags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+
+/*******************************************************************************************************
+ * Function Name  : PDumpRegLabelToInternalVar
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which writes a register label into an internal variable
+********************************************************************************************************/
+PVRSRV_ERROR PDumpRegLabelToInternalVar(IMG_CHAR *pszPDumpRegName,
+                                        IMG_UINT32 ui32Reg,
+                                        IMG_CHAR *pszInternalVar,
+                                        IMG_UINT32 ui32Flags)
+
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpRegLabelToInternalVar"));
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW %s :%s:0x%08X", pszInternalVar, pszPDumpRegName, ui32Reg);
+
+	if (eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+
+}
+
+/*******************************************************************************************************
+ * Function Name  : PDumpInternalVarToReg32
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a register write from an internal variable
+********************************************************************************************************/
+PVRSRV_ERROR PDumpInternalVarToReg32(IMG_CHAR *pszPDumpRegName,
+                                     IMG_UINT32	ui32Reg,
+                                     IMG_CHAR *pszInternalVar,
+                                     IMG_UINT32	ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpInternalVarToReg32"));
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:0x%08X %s", pszPDumpRegName, ui32Reg, pszInternalVar);
+
+	if (eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+/*******************************************************************************************************
+ * Function Name  : PDumpInternalVarToReg64
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a register write from an internal variable
+********************************************************************************************************/
+PVRSRV_ERROR PDumpInternalVarToReg64(IMG_CHAR *pszPDumpRegName,
+                                     IMG_UINT32	ui32Reg,
+                                     IMG_CHAR *pszInternalVar,
+                                     IMG_UINT32	ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpInternalVarToReg64"));
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW64 :%s:0x%08X %s", pszPDumpRegName, ui32Reg, pszInternalVar);
+
+	if (eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+
+
+/*******************************************************************************************************
+ * Function Name  : PDumpMemLabelToMem32
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a memory write from a memory label
+********************************************************************************************************/
+PVRSRV_ERROR PDumpMemLabelToMem32(PMR *psPMRSource,
+                                  PMR *psPMRDest,
+                                  IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+                                  IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+                                  IMG_UINT32	ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	IMG_CHAR aszMemspaceNameSource[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicNameSource[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetSource;
+	IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest;
+	IMG_DEVMEM_OFFSET_T uiNextSymNameSource;
+	IMG_DEVMEM_OFFSET_T uiNextSymNameDest;
+
+
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpMemLabelToMem32"));
+
+	eErr = PMR_PDumpSymbolicAddr(psPMRSource,
+                                     uiLogicalOffsetSource,
+                                     PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+                                     aszMemspaceNameSource,
+                                     PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+                                     aszSymbolicNameSource,
+                                     &uiPDumpSymbolicOffsetSource,
+                                     &uiNextSymNameSource);
+
+	if (eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+
+	eErr = PMR_PDumpSymbolicAddr(psPMRDest,
+                                     uiLogicalOffsetDest,
+                                     PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+                                     aszMemspaceNameDest,
+                                     PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+                                     aszSymbolicNameDest,
+                                     &uiPDumpSymbolicOffsetDest,
+                                     &uiNextSymNameDest);
+
+
+	if (eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:%s:0x%llX :%s:%s:0x%llX",aszMemspaceNameDest, aszSymbolicNameDest,
+							uiPDumpSymbolicOffsetDest, aszMemspaceNameSource, aszSymbolicNameSource,
+							uiPDumpSymbolicOffsetSource);
+
+
+	if (eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+/*******************************************************************************************************
+ * Function Name  : PDumpMemLabelToMem64
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a memory write from a memory label
+********************************************************************************************************/
+PVRSRV_ERROR PDumpMemLabelToMem64(PMR *psPMRSource,
+								  PMR *psPMRDest,
+								  IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+								  IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+								  IMG_UINT32	ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	IMG_CHAR aszMemspaceNameSource[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicNameSource[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetSource;
+	IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest;
+	IMG_DEVMEM_OFFSET_T uiNextSymNameSource;
+	IMG_DEVMEM_OFFSET_T uiNextSymNameDest;
+
+
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpMemLabelToMem64"));
+
+	eErr = PMR_PDumpSymbolicAddr(psPMRSource,
+									 uiLogicalOffsetSource,
+									 PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+									 aszMemspaceNameSource,
+									 PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+									 aszSymbolicNameSource,
+									 &uiPDumpSymbolicOffsetSource,
+									 &uiNextSymNameSource);
+
+	if (eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+
+	eErr = PMR_PDumpSymbolicAddr(psPMRDest,
+									 uiLogicalOffsetDest,
+									 PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+									 aszMemspaceNameDest,
+									 PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+									 aszSymbolicNameDest,
+									 &uiPDumpSymbolicOffsetDest,
+									 &uiNextSymNameDest);
+
+
+	if (eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW64 :%s:%s:0x%llX :%s:%s:0x%llX",aszMemspaceNameDest, aszSymbolicNameDest,
+							uiPDumpSymbolicOffsetDest, aszMemspaceNameSource, aszSymbolicNameSource,
+							uiPDumpSymbolicOffsetSource);
+
+
+	if (eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+
+
+/*!
+******************************************************************************
+
+ @Function	PDumpWriteVarSHRValueOp
+
+ @Description
+
+ Emits the PDump commands for the logical SHR operation
+ Var <-  Var SHR Value
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PDumpWriteVarSHRValueOp (const IMG_CHAR *pszInternalVariable,
+                                      const IMG_UINT64 ui64Value,
+                                      const IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING();
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript,
+			ui32MaxLen,
+			"SHR %s %s 0x%llX",
+			pszInternalVariable,
+			pszInternalVariable,
+			ui64Value);
+
+	if(eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+	PDumpWriteScript( hScript, ui32PDumpFlags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	PDumpWriteRegANDValueOp
+
+ @Description
+
+ Emits the PDump commands for the logical AND operation
+ Var <-  Var AND Value
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PDumpWriteVarANDValueOp (const IMG_CHAR *pszInternalVariable,
+                                      const IMG_UINT64 ui64Value,
+                                      const IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING();
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript,
+			ui32MaxLen,
+			"AND %s %s 0x%llX",
+			pszInternalVariable,
+			pszInternalVariable,
+			ui64Value);
+
+	if(eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+	PDumpWriteScript( hScript, ui32PDumpFlags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpSAW
+ * Inputs         : pszDevSpaceName -- device space from which to output
+ *                  ui32Offset -- offset value from register base
+ *                  ui32NumSaveBytes -- number of bytes to output
+ *                  pszOutfileName -- name of file to output to
+ *                  ui32OutfileOffsetByte -- offset into output file to write
+ *                  uiPDumpFlags -- flags to pass to PDumpOSWriteScript
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Dumps the contents of a register bank into a file
+ *                  NB: ui32NumSaveBytes must be divisible by 4
+**************************************************************************/
+PVRSRV_ERROR PDumpSAW(IMG_CHAR      *pszDevSpaceName,
+                      IMG_UINT32    ui32HPOffsetBytes,
+                      IMG_UINT32    ui32NumSaveBytes,
+                      IMG_CHAR      *pszOutfileName,
+                      IMG_UINT32    ui32OutfileOffsetByte,
+                      PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	PVR_DPF((PVR_DBG_ERROR, "PDumpSAW\n"));
+
+	PDUMP_LOCK();
+	eError = PDumpOSBufprintf(hScript,
+	                          ui32MaxLen,
+	                          "SAW :%s:0x%x 0x%x 0x%x %s\n",
+	                          pszDevSpaceName,
+	                          ui32HPOffsetBytes,
+	                          ui32NumSaveBytes / (IMG_UINT32)sizeof(IMG_UINT32),
+	                          ui32OutfileOffsetByte,
+	                          pszOutfileName);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PDumpSAW PDumpOSBufprintf failed: eError=%u\n", eError));
+		PDUMP_UNLOCK();
+		return eError;
+	}
+
+	if(! PDumpWriteScript(hScript, uiPDumpFlags))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PDumpSAW PDumpWriteScript failed!\n"));
+	}
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+	
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpRegPolKM
+ * Inputs         : Description of what this register read is trying to do
+ *					pszPDumpDevName
+ *					Register offset
+ *					expected value
+ *					mask for that value
+ * Outputs        : None
+ * Returns        : None
+ * Description    : Create a PDUMP string which represents a register read
+ *					with the expected value
+**************************************************************************/
+PVRSRV_ERROR PDumpRegPolKM(IMG_CHAR				*pszPDumpRegName,
+						   IMG_UINT32			ui32RegAddr, 
+						   IMG_UINT32			ui32RegValue, 
+						   IMG_UINT32			ui32Mask,
+						   IMG_UINT32			ui32Flags,
+						   PDUMP_POLL_OPERATOR	eOperator)
+{
+	/* Timings correct for linux and XP */
+	/* Timings should be passed in */
+	#define POLL_DELAY			1000U
+	#define POLL_COUNT_LONG		(2000000000U / POLL_DELAY)
+	#define POLL_COUNT_SHORT	(1000000U / POLL_DELAY)
+
+	PVRSRV_ERROR eErr;
+	IMG_UINT32	ui32PollCount;
+
+	PDUMP_GET_SCRIPT_STRING();
+	PDUMP_DBG(("PDumpRegPolKM"));
+
+	ui32PollCount = POLL_COUNT_LONG;
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "POL :%s:0x%08X 0x%08X 0x%08X %d %u %d",
+							pszPDumpRegName, ui32RegAddr, ui32RegValue,
+							ui32Mask, eOperator, ui32PollCount, POLL_DELAY);
+	if(eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+	PDumpWriteScript(hScript, ui32Flags);
+
+	PDUMP_UNLOCK();
+	return PVRSRV_OK;
+}
+
+/* Never call direct, needs caller to hold OS Lock.
+ * Use PDumpCommentWithFlags() from within the server.
+ * Clients call this via the bridge and PDumpCommentKM().
+ */
+static PVRSRV_ERROR _PDumpWriteComment(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+#if defined(PDUMP_DEBUG_OUTFILES)
+	IMG_CHAR pszTemp[256];
+#endif
+	PDUMP_GET_SCRIPT_STRING();
+	PDUMP_DBG(("PDumpCommentKM"));
+
+	if((pszComment == NULL) || (PDumpOSBuflen(pszComment, ui32MaxLen) == 0))
+	{
+		/* PDumpOSVerifyLineEnding silently fails if pszComment is too short to
+		   actually hold the line endings that it's trying to enforce, so
+		   short circuit it and force safety */
+		pszComment = "\n";
+	}
+	else
+	{
+		/* Put line ending sequence at the end if it isn't already there */
+		PDumpOSVerifyLineEnding(pszComment, ui32MaxLen);
+	}
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+	/* Prefix comment with PID and line number */
+	eErr = PDumpOSSprintf(pszTemp, 256, "%u %u:%lu %s: %s",
+		g_ui32EveryLineCounter,
+		OSGetCurrentClientProcessIDKM(),
+		(unsigned long)OSGetCurrentClientThreadIDKM(),
+		OSGetCurrentClientProcessNameKM(),
+		pszComment);
+
+	/* Append the comment to the script stream */
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- %s",
+		pszTemp);
+#else
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- %s",
+		pszComment);
+#endif
+	if( (eErr != PVRSRV_OK) &&
+		(eErr != PVRSRV_ERROR_PDUMP_BUF_OVERFLOW))
+	{
+		PVR_LOGG_IF_ERROR(eErr, "PDumpOSBufprintf", ErrUnlock);
+	}
+
+	if (!PDumpWriteScript(hScript, ui32Flags))
+	{
+		if(PDUMP_IS_CONTINUOUS(ui32Flags))
+		{
+			eErr = PVRSRV_ERROR_PDUMP_BUFFER_FULL;
+			PVR_LOGG_IF_ERROR(eErr, "PDumpWriteScript", ErrUnlock);
+		}
+		else
+		{
+			eErr = PVRSRV_ERROR_CMD_NOT_PROCESSED;
+			PVR_LOGG_IF_ERROR(eErr, "PDumpWriteScript", ErrUnlock);
+		}
+	}
+
+ErrUnlock:
+	return eErr;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpCommentKM
+ * Inputs         : pszComment, ui32Flags
+ * Outputs        : None
+ * Returns        : None
+ * Description    : Dumps a pre-formatted comment, primarily called from the
+ *                : bridge.
+**************************************************************************/
+PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags)
+{
+	PVRSRV_ERROR eErr = PVRSRV_OK;
+
+	PDUMP_LOCK();
+
+	eErr =  _PDumpWriteComment(pszComment, ui32Flags);
+
+	PDUMP_UNLOCK();
+	return eErr;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpCommentWithFlags
+ * Inputs         : psPDev - PDev for PDump device
+ *				  : pszFormat - format string for comment
+ *				  : ... - args for format string
+ * Outputs        : None
+ * Returns        : None
+ * Description    : PDumps a comments
+**************************************************************************/
+PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags, IMG_CHAR * pszFormat, ...)
+{
+	PVRSRV_ERROR eErr = PVRSRV_OK;
+	va_list args;
+
+	va_start(args, pszFormat);
+	PDumpCommentWithFlagsVA(ui32Flags, pszFormat, args);
+	va_end(args);
+
+	return eErr;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpCommentWithFlagsVA
+ * Inputs         : psPDev    - PDev for PDump device
+ *				  : pszFormat - format string for comment
+ *				  : args      - pre-started va_list args for format string
+ * Outputs        : None
+ * Returns        : None
+ * Description    : PDumps a comments
+**************************************************************************/
+PVRSRV_ERROR PDumpCommentWithFlagsVA(IMG_UINT32 ui32Flags, const IMG_CHAR * pszFormat, va_list args)
+{
+	PVRSRV_ERROR eErr = PVRSRV_OK;
+	PDUMP_GET_MSG_STRING();
+
+	PDUMP_LOCK();
+
+	/* Construct the string */
+	eErr = PDumpOSVSprintf(pszMsg, ui32MaxLen, pszFormat, args);
+
+	if(eErr != PVRSRV_OK)
+	{
+		goto Unlock;
+	}
+
+	eErr =  _PDumpWriteComment(pszMsg, ui32Flags);
+
+Unlock:
+	PDUMP_UNLOCK();
+	return eErr;
+}
+
+/*************************************************************************/ /*!
+ * Function Name  : PDumpPanic
+ * Inputs         : ui32PanicNo - Unique number for panic condition
+ *				  : pszPanicMsg - Panic reason message limited to ~90 chars
+ *				  : pszPPFunc   - Function name string where panic occurred
+ *				  : ui32PPline  - Source line number where panic occurred
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : PDumps a panic assertion. Used when the host driver
+ *                : detects a condition that will lead to an invalid PDump
+ *                : script that cannot be played back off-line.
+ */ /*************************************************************************/
+PVRSRV_ERROR PDumpPanic(IMG_UINT32      ui32PanicNo,
+						IMG_CHAR*       pszPanicMsg,
+						const IMG_CHAR* pszPPFunc,
+						IMG_UINT32      ui32PPline)
+{
+	PVRSRV_ERROR   eError = PVRSRV_OK;
+	PDUMP_FLAGS_T  uiPDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+	IMG_CHAR       pszConsoleMsg[] =
+"COM ***************************************************************************\n"
+"COM Script invalid and not compatible with off-line playback. Check test \n"
+"COM parameters and driver configuration, stop imminent.\n"
+"COM ***************************************************************************\n";
+	PDUMP_GET_SCRIPT_STRING();
+
+	/* Log the panic condition to the live kern.log in both REL and DEB mode 
+	 * to aid user PDump trouble shooting. */
+	PVR_LOG(("PDUMP PANIC %08x: %s", ui32PanicNo, pszPanicMsg));
+	PVR_DPF((PVR_DBG_MESSAGE, "PDUMP PANIC start %s:%d", pszPPFunc, ui32PPline));
+
+	/* Check the supplied panic reason string is within length limits */
+	PVR_ASSERT(OSStringLength(pszPanicMsg)+sizeof("PANIC   ") < PVRSRV_PDUMP_MAX_COMMENT_SIZE-1);
+
+	/* Obtain lock to keep the multi-line
+	 * panic statement together in a single atomic write */
+	PDUMP_LOCK();
+
+
+	/* Write -- Panic start (Function:line) */
+	eError = PDumpOSBufprintf(hScript, ui32MaxLen, "-- Panic start (%s:%d)", pszPPFunc, ui32PPline);
+	PVR_LOGG_IF_ERROR(eError, "PDumpOSBufprintf", e1);
+	(void)PDumpWriteScript(hScript, uiPDumpFlags);
+
+	/* Write COM <message> x4 */
+	eError = PDumpOSBufprintf(hScript, ui32MaxLen, "%s", pszConsoleMsg);
+	PVR_LOGG_IF_ERROR(eError, "PDumpOSBufprintf", e1);
+	(void)PDumpWriteScript(hScript, uiPDumpFlags);
+
+	/* Write PANIC no msg command */
+	eError = PDumpOSBufprintf(hScript, ui32MaxLen, "PANIC %08x %s", ui32PanicNo, pszPanicMsg);
+	PVR_LOGG_IF_ERROR(eError, "PDumpOSBufprintf", e1);
+	(void)PDumpWriteScript(hScript, uiPDumpFlags);
+
+	/* Write -- Panic end */
+	eError = PDumpOSBufprintf(hScript, ui32MaxLen, "-- Panic end");
+	PVR_LOGG_IF_ERROR(eError, "PDumpOSBufprintf", e1);
+	(void)PDumpWriteScript(hScript, uiPDumpFlags);
+
+e1:
+	PDUMP_UNLOCK();
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+ * Function Name  : PDumpCaptureError
+ * Inputs         : ui32ErrorNo - Unique number for panic condition
+ *                : pszErrorMsg - Panic reason message limited to ~90 chars
+ *                : pszPPFunc   - Function name string where panic occurred
+ *                : ui32PPline  - Source line number where panic occurred
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : PDumps an error string to the script file to interrupt
+ *                : play back to inform user of a fatal issue that occurred
+ *                : during PDump capture.
+ */ /*************************************************************************/
+PVRSRV_ERROR PDumpCaptureError(PVRSRV_ERROR    ui32ErrorNo,
+                       IMG_CHAR*       pszErrorMsg,
+                       const IMG_CHAR* pszPPFunc,
+                       IMG_UINT32      ui32PPline)
+{
+	IMG_CHAR*       pszFormatStr = "DRIVER_ERROR: %3d: %s";
+	PDUMP_FLAGS_T   uiPDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+
+	/* Need to return an error using this macro */
+	PDUMP_GET_SCRIPT_STRING();
+
+	/* Check the supplied panic reason string is within length limits */
+	PVR_ASSERT(OSStringLength(pszErrorMsg)+sizeof(pszFormatStr) < PVRSRV_PDUMP_MAX_COMMENT_SIZE-1);
+
+	/* Obtain lock to keep the multi-line 
+	 * panic statement together in a single atomic write */
+	PDUMP_LOCK();
+
+	/* Write driver error message to the script file */
+	(void) PDumpOSBufprintf(hScript, ui32MaxLen, pszFormatStr, ui32ErrorNo, pszErrorMsg);
+	(void) PDumpWriteScript(hScript, uiPDumpFlags);
+
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PDumpBitmapKM
+
+ @Description
+
+ Dumps a bitmap from device memory to a file
+
+ @Input    psDevId
+ @Input    pszFileName
+ @Input    ui32FileOffset
+ @Input    ui32Width
+ @Input    ui32Height
+ @Input    ui32StrideInBytes
+ @Input    sDevBaseAddr
+ @Input    ui32Size
+ @Input    ePixelFormat
+ @Input    eMemFormat
+ @Input    ui32PDumpFlags
+
+ @Return   PVRSRV_ERROR			:
+
+******************************************************************************/
+PVRSRV_ERROR PDumpBitmapKM(	PVRSRV_DEVICE_NODE *psDeviceNode,
+							IMG_CHAR *pszFileName,
+							IMG_UINT32 ui32FileOffset,
+							IMG_UINT32 ui32Width,
+							IMG_UINT32 ui32Height,
+							IMG_UINT32 ui32StrideInBytes,
+							IMG_DEV_VIRTADDR sDevBaseAddr,
+							IMG_UINT32 ui32MMUContextID,
+							IMG_UINT32 ui32Size,
+							PDUMP_PIXEL_FORMAT ePixelFormat,
+							IMG_UINT32 ui32AddrMode,
+							IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_DEVICE_IDENTIFIER *psDevId = &psDeviceNode->sDevId;
+	PVRSRV_ERROR eErr=0;
+	PDUMP_GET_SCRIPT_STRING();
+
+	PDumpCommentWithFlags(ui32PDumpFlags, "Dump bitmap of render.");
+	
+	switch (ePixelFormat)
+	{
+		case PVRSRV_PDUMP_PIXEL_FORMAT_YUV8:
+		{
+			PDumpCommentWithFlags(ui32PDumpFlags, "YUV data. Switching from SII to SAB. Width=0x%08X Height=0x%08X Stride=0x%08X",
+							 						ui32Width, ui32Height, ui32StrideInBytes);
+			PDUMP_LOCK();
+			eErr = PDumpOSBufprintf(hScript,
+									ui32MaxLen,
+									"SAB :%s:v%x:0x%010llX 0x%08X 0x%08X %s.bin\n",
+									psDevId->pszPDumpDevName,
+									ui32MMUContextID,
+									sDevBaseAddr.uiAddr,
+									ui32Size,
+									ui32FileOffset,
+									pszFileName);
+			
+			if (eErr != PVRSRV_OK)
+			{
+				PDUMP_UNLOCK();
+				return eErr;
+			}
+			
+			PDumpWriteScript( hScript, ui32PDumpFlags);
+			PDUMP_UNLOCK();		
+			break;
+		}
+		case PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8: // YUV420 2 planes
+		{
+			const IMG_UINT32 ui32Plane0Size = ui32StrideInBytes*ui32Height;
+			const IMG_UINT32 ui32Plane1Size = ui32Plane0Size>>1; // YUV420
+			const IMG_UINT32 ui32Plane1FileOffset = ui32FileOffset + ui32Plane0Size;
+			const IMG_UINT32 ui32Plane1MemOffset = ui32Plane0Size;
+			
+			PDumpCommentWithFlags(ui32PDumpFlags, "YUV420 2-plane. Width=0x%08X Height=0x%08X Stride=0x%08X",
+							 						ui32Width, ui32Height, ui32StrideInBytes);
+			PDUMP_LOCK();
+			eErr = PDumpOSBufprintf(hScript,
+						ui32MaxLen,
+						"SII %s %s.bin :%s:v%x:0x%010llX 0x%08X 0x%08X :%s:v%x:0x%010llX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X",
+						pszFileName,
+						pszFileName,
+						
+						// Plane 0 (Y)
+						psDevId->pszPDumpDevName,	// memsp
+						ui32MMUContextID,			// Context id
+						sDevBaseAddr.uiAddr,		// virtaddr
+						ui32Plane0Size,				// size
+						ui32FileOffset,				// fileoffset
+						
+						// Plane 1 (UV)
+						psDevId->pszPDumpDevName,	// memsp
+						ui32MMUContextID,			// Context id
+						sDevBaseAddr.uiAddr+ui32Plane1MemOffset,	// virtaddr
+						ui32Plane1Size,				// size
+						ui32Plane1FileOffset,		// fileoffset
+						
+						ePixelFormat,
+						ui32Width,
+						ui32Height,
+						ui32StrideInBytes,
+						ui32AddrMode);
+						
+			if (eErr != PVRSRV_OK)
+			{
+				PDUMP_UNLOCK();
+				return eErr;
+			}
+			
+			PDumpWriteScript( hScript, ui32PDumpFlags);
+			PDUMP_UNLOCK();
+			break;
+		}
+		
+		case PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12: // YUV420 3 planes
+		{
+			const IMG_UINT32 ui32Plane0Size = ui32StrideInBytes*ui32Height;
+			const IMG_UINT32 ui32Plane1Size = ui32Plane0Size>>2; // YUV420
+			const IMG_UINT32 ui32Plane2Size = ui32Plane1Size;
+			const IMG_UINT32 ui32Plane1FileOffset = ui32FileOffset + ui32Plane0Size;
+			const IMG_UINT32 ui32Plane2FileOffset = ui32Plane1FileOffset + ui32Plane1Size;
+			const IMG_UINT32 ui32Plane1MemOffset = ui32Plane0Size;
+			const IMG_UINT32 ui32Plane2MemOffset = ui32Plane0Size+ui32Plane1Size;
+	
+			PDumpCommentWithFlags(ui32PDumpFlags, "YUV420 3-plane. Width=0x%08X Height=0x%08X Stride=0x%08X",
+							 						ui32Width, ui32Height, ui32StrideInBytes);
+			PDUMP_LOCK();
+			eErr = PDumpOSBufprintf(hScript,
+						ui32MaxLen,
+						"SII %s %s.bin :%s:v%x:0x%010llX 0x%08X 0x%08X :%s:v%x:0x%010llX 0x%08X 0x%08X :%s:v%x:0x%010llX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X",
+						pszFileName,
+						pszFileName,
+						
+						// Plane 0 (Y)
+						psDevId->pszPDumpDevName,	// memsp
+						ui32MMUContextID,			// MMU context id
+						sDevBaseAddr.uiAddr,		// virtaddr
+						ui32Plane0Size,				// size
+						ui32FileOffset,				// fileoffset
+						
+						// Plane 1 (U)
+						psDevId->pszPDumpDevName,	// memsp
+						ui32MMUContextID,			// MMU context id
+						sDevBaseAddr.uiAddr+ui32Plane1MemOffset,	// virtaddr
+						ui32Plane1Size,				// size
+						ui32Plane1FileOffset,		// fileoffset
+						
+						// Plane 2 (V)
+						psDevId->pszPDumpDevName,	// memsp
+						ui32MMUContextID,			// MMU context id
+						sDevBaseAddr.uiAddr+ui32Plane2MemOffset,	// virtaddr
+						ui32Plane2Size,				// size
+						ui32Plane2FileOffset,		// fileoffset
+						
+						ePixelFormat,
+						ui32Width,
+						ui32Height,
+						ui32StrideInBytes,
+						ui32AddrMode);
+						
+			if (eErr != PVRSRV_OK)
+			{
+				PDUMP_UNLOCK();
+				return eErr;
+			}
+			
+			PDumpWriteScript( hScript, ui32PDumpFlags);
+			PDUMP_UNLOCK();
+			break;
+		}
+		
+		case PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV32: // YV32 - 4 contiguous planes in the order VUYA, stride can be > width.
+		{
+			const IMG_UINT32 ui32PlaneSize = ui32StrideInBytes*ui32Height; // All 4 planes are the same size
+			const IMG_UINT32 ui32Plane0FileOffset = ui32FileOffset + (ui32PlaneSize<<1);		// SII plane 0 is Y, which is YV32 plane 2
+			const IMG_UINT32 ui32Plane1FileOffset = ui32FileOffset + ui32PlaneSize;				// SII plane 1 is U, which is YV32 plane 1
+			const IMG_UINT32 ui32Plane2FileOffset = ui32FileOffset;								// SII plane 2 is V, which is YV32 plane 0
+			const IMG_UINT32 ui32Plane3FileOffset = ui32Plane0FileOffset + ui32PlaneSize;		// SII plane 3 is A, which is YV32 plane 3
+			const IMG_UINT32 ui32Plane0MemOffset = ui32PlaneSize<<1;
+			const IMG_UINT32 ui32Plane1MemOffset = ui32PlaneSize;
+			const IMG_UINT32 ui32Plane2MemOffset = 0;
+			const IMG_UINT32 ui32Plane3MemOffset = ui32Plane0MemOffset + ui32PlaneSize;
+							 						
+			PDumpCommentWithFlags(ui32PDumpFlags, "YV32 4 planes. Width=0x%08X Height=0x%08X Stride=0x%08X",
+							 						ui32Width, ui32Height, ui32StrideInBytes);
+			
+			PDumpCommentWithFlags(ui32PDumpFlags, "YV32 plane size is 0x%08X", ui32PlaneSize);
+			
+			PDumpCommentWithFlags(ui32PDumpFlags, "YV32 Plane 0 Mem Offset=0x%08X", ui32Plane0MemOffset);
+			PDumpCommentWithFlags(ui32PDumpFlags, "YV32 Plane 1 Mem Offset=0x%08X", ui32Plane1MemOffset);
+			PDumpCommentWithFlags(ui32PDumpFlags, "YV32 Plane 2 Mem Offset=0x%08X", ui32Plane2MemOffset);
+			PDumpCommentWithFlags(ui32PDumpFlags, "YV32 Plane 3 Mem Offset=0x%08X", ui32Plane3MemOffset);
+			
+			/*
+				SII <imageset> <filename>	:<memsp1>:v<id1>:<virtaddr1> <size1> <fileoffset1>		Y
+											:<memsp2>:v<id2>:<virtaddr2> <size2> <fileoffset2>		U
+											:<memsp3>:v<id3>:<virtaddr3> <size3> <fileoffset3>		V
+											:<memsp4>:v<id4>:<virtaddr4> <size4> <fileoffset4>		A
+											<pixfmt> <width> <height> <stride> <addrmode>
+			*/
+			PDUMP_LOCK();
+			eErr = PDumpOSBufprintf(hScript,
+						ui32MaxLen,
+						"SII %s %s.bin :%s:v%x:0x%010llX 0x%08X 0x%08X :%s:v%x:0x%010llX 0x%08X 0x%08X :%s:v%x:0x%010llX 0x%08X 0x%08X :%s:v%x:0x%010llX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X",
+						pszFileName,
+						pszFileName,
+						
+						// Plane 0 (V)
+						psDevId->pszPDumpDevName,	// memsp
+						ui32MMUContextID,			// MMU context id
+						sDevBaseAddr.uiAddr+ui32Plane0MemOffset,	// virtaddr
+						ui32PlaneSize,				// size
+						ui32Plane0FileOffset,		// fileoffset
+						
+						// Plane 1 (U)
+						psDevId->pszPDumpDevName,	// memsp
+						ui32MMUContextID,			// MMU context id
+						sDevBaseAddr.uiAddr+ui32Plane1MemOffset,	// virtaddr
+						ui32PlaneSize,				// size
+						ui32Plane1FileOffset,		// fileoffset
+						
+						// Plane 2 (Y)
+						psDevId->pszPDumpDevName,	// memsp
+						ui32MMUContextID,			// MMU context id
+						sDevBaseAddr.uiAddr+ui32Plane2MemOffset,	// virtaddr
+						ui32PlaneSize,				// size
+						ui32Plane2FileOffset,		// fileoffset
+						
+						// Plane 3 (A)
+						psDevId->pszPDumpDevName,	// memsp
+						ui32MMUContextID,			// MMU context id
+						sDevBaseAddr.uiAddr+ui32Plane3MemOffset,	// virtaddr
+						ui32PlaneSize,				// size
+						ui32Plane3FileOffset,		// fileoffset
+						
+						ePixelFormat,
+						ui32Width,
+						ui32Height,
+						ui32StrideInBytes,
+						ui32AddrMode);
+						
+			if (eErr != PVRSRV_OK)
+			{
+				PDUMP_UNLOCK();
+				return eErr;
+			}
+			
+			PDumpWriteScript( hScript, ui32PDumpFlags);
+			PDUMP_UNLOCK();
+			break;
+		}
+				
+		default: // Single plane formats
+		{
+			PDUMP_LOCK();
+			eErr = PDumpOSBufprintf(hScript,
+						ui32MaxLen,
+						"SII %s %s.bin :%s:v%x:0x%010llX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X",
+						pszFileName,
+						pszFileName,
+						psDevId->pszPDumpDevName,
+						ui32MMUContextID,
+						sDevBaseAddr.uiAddr,
+						ui32Size,
+						ui32FileOffset,
+						ePixelFormat,
+						ui32Width,
+						ui32Height,
+						ui32StrideInBytes,
+						ui32AddrMode);
+						
+			if (eErr != PVRSRV_OK)
+			{
+				PDUMP_UNLOCK();
+				return eErr;
+			}
+
+			PDumpWriteScript( hScript, ui32PDumpFlags);
+			PDUMP_UNLOCK();
+			break;
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PDumpReadRegKM
+
+ @Description
+
+ Dumps a read from a device register to a file
+
+ @Input    psConnection 		: connection info
+ @Input    pszFileName
+ @Input    ui32FileOffset
+ @Input    ui32Address
+ @Input    ui32Size
+ @Input    ui32PDumpFlags
+
+ @Return   PVRSRV_ERROR			:
+
+******************************************************************************/
+PVRSRV_ERROR PDumpReadRegKM		(	IMG_CHAR *pszPDumpRegName,
+									IMG_CHAR *pszFileName,
+									IMG_UINT32 ui32FileOffset,
+									IMG_UINT32 ui32Address,
+									IMG_UINT32 ui32Size,
+									IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING();
+
+	PVR_UNREFERENCED_PARAMETER(ui32Size);
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript,
+			ui32MaxLen,
+			"SAB :%s:0x%08X 0x%08X %s",
+			pszPDumpRegName,
+			ui32Address,
+			ui32FileOffset,
+			pszFileName);
+	if(eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+	PDumpWriteScript( hScript, ui32PDumpFlags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ @name		PDumpRegRead32
+ @brief		Dump 32-bit register read to script
+ @param		pszPDumpDevName - pdump device name
+ @param		ui32RegOffset - register offset
+ @param		ui32Flags - pdump flags
+ @return	Error
+*****************************************************************************/
+PVRSRV_ERROR PDumpRegRead32(IMG_CHAR *pszPDumpRegName,
+							const IMG_UINT32 ui32RegOffset,
+							IMG_UINT32 ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING();
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW :%s:0x%X",
+							pszPDumpRegName, 
+							ui32RegOffset);
+	if(eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+	return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ @name		PDumpRegRead64
+ @brief		Dump 64-bit register read to script
+ @param		pszPDumpDevName - pdump device name
+ @param		ui32RegOffset - register offset
+ @param		ui32Flags - pdump flags
+ @return	Error
+*****************************************************************************/
+PVRSRV_ERROR PDumpRegRead64(IMG_CHAR *pszPDumpRegName,
+							const IMG_UINT32 ui32RegOffset,
+							IMG_UINT32 ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING();
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW64 :%s:0x%X",
+							pszPDumpRegName, 
+							ui32RegOffset);
+	if(eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+	return PVRSRV_OK;
+}
+
+
+/*****************************************************************************
+ FUNCTION	: PDumpWriteShiftedMaskedValue
+
+ PURPOSE	: Emits the PDump commands for writing a masked shifted address
+              into another location
+
+ PARAMETERS	: PDump symbolic name and offset of target word
+              PDump symbolic name and offset of source address
+              right shift amount
+              left shift amount
+              mask
+
+ RETURNS	: None
+*****************************************************************************/
+PVRSRV_ERROR
+PDumpWriteShiftedMaskedValue(const IMG_CHAR *pszDestRegspaceName,
+                             const IMG_CHAR *pszDestSymbolicName,
+                             IMG_DEVMEM_OFFSET_T uiDestOffset,
+                             const IMG_CHAR *pszRefRegspaceName,
+                             const IMG_CHAR *pszRefSymbolicName,
+                             IMG_DEVMEM_OFFSET_T uiRefOffset,
+                             IMG_UINT32 uiSHRAmount,
+                             IMG_UINT32 uiSHLAmount,
+                             IMG_UINT32 uiMask,
+                             IMG_DEVMEM_SIZE_T uiWordSize,
+                             IMG_UINT32 uiPDumpFlags)
+{
+	PVRSRV_ERROR         eError;
+
+    /* Suffix of WRW command in PDump (i.e. WRW or WRW64) */
+    const IMG_CHAR       *pszWrwSuffix;
+
+    /* Internal PDump register used for interim calculation */
+    const IMG_CHAR       *pszPDumpIntRegSpace;
+    IMG_UINT32           uiPDumpIntRegNum;
+
+	PDUMP_GET_SCRIPT_STRING();
+
+    if ((uiWordSize != 4) && (uiWordSize != 8))
+    {
+        return PVRSRV_ERROR_NOT_SUPPORTED;
+    }
+
+    pszWrwSuffix = (uiWordSize == 8) ? "64" : "";
+
+    /* Should really "Acquire" a pdump register here */
+    pszPDumpIntRegSpace = pszDestRegspaceName;
+    uiPDumpIntRegNum = 1;
+        
+	PDUMP_LOCK();
+	eError = PDumpOSBufprintf(hScript,
+                              ui32MaxLen,
+                              /* Should this be "MOV" instead? */
+                              "WRW :%s:$%d :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC "\n",
+                              /* dest */
+                              pszPDumpIntRegSpace,
+                              uiPDumpIntRegNum,
+                              /* src */
+                              pszRefRegspaceName,
+                              pszRefSymbolicName,
+                              uiRefOffset);
+    if (eError != PVRSRV_OK)
+    {
+        goto ErrUnlock;
+    }
+
+    PDumpWriteScript(hScript, uiPDumpFlags);
+
+    if (uiSHRAmount > 0)
+    {
+        eError = PDumpOSBufprintf(hScript,
+                                  ui32MaxLen,
+                                  "SHR :%s:$%d :%s:$%d 0x%X\n",
+                                  /* dest */
+                                  pszPDumpIntRegSpace,
+                                  uiPDumpIntRegNum,
+                                  /* src A */
+                                  pszPDumpIntRegSpace,
+                                  uiPDumpIntRegNum,
+                                  /* src B */
+                                  uiSHRAmount);
+        if (eError != PVRSRV_OK)
+        {
+            goto ErrUnlock;
+        }
+        PDumpWriteScript(hScript, uiPDumpFlags);
+    }
+    
+    if (uiSHLAmount > 0)
+    {
+        eError = PDumpOSBufprintf(hScript,
+                                  ui32MaxLen,
+                                  "SHL :%s:$%d :%s:$%d 0x%X\n",
+                                  /* dest */
+                                  pszPDumpIntRegSpace,
+                                  uiPDumpIntRegNum,
+                                  /* src A */
+                                  pszPDumpIntRegSpace,
+                                  uiPDumpIntRegNum,
+                                  /* src B */
+                                  uiSHLAmount);
+        if (eError != PVRSRV_OK)
+        {
+            goto ErrUnlock;
+        }
+        PDumpWriteScript(hScript, uiPDumpFlags);
+    }
+    
+    if (uiMask != (1ULL << (8*uiWordSize))-1)
+    {
+        eError = PDumpOSBufprintf(hScript,
+                                  ui32MaxLen,
+                                  "AND :%s:$%d :%s:$%d 0x%X\n",
+                                  /* dest */
+                                  pszPDumpIntRegSpace,
+                                  uiPDumpIntRegNum,
+                                  /* src A */
+                                  pszPDumpIntRegSpace,
+                                  uiPDumpIntRegNum,
+                                  /* src B */
+                                  uiMask);
+        if (eError != PVRSRV_OK)
+        {
+            goto ErrUnlock;
+        }
+        PDumpWriteScript(hScript, uiPDumpFlags);
+    }
+
+    eError = PDumpOSBufprintf(hScript,
+                              ui32MaxLen,
+                              "WRW%s :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " :%s:$%d\n",
+                              pszWrwSuffix,
+                              /* dest */
+                              pszDestRegspaceName,
+                              pszDestSymbolicName,
+                              uiDestOffset,
+                              /* src */
+                              pszPDumpIntRegSpace,
+                              uiPDumpIntRegNum);
+    if(eError != PVRSRV_OK)
+    {
+        goto ErrUnlock;
+    }
+    PDumpWriteScript(hScript, uiPDumpFlags);
+
+ErrUnlock:
+	PDUMP_UNLOCK();
+	return eError;
+}
+
+
+PVRSRV_ERROR
+PDumpWriteSymbAddress(const IMG_CHAR *pszDestSpaceName,
+                      IMG_DEVMEM_OFFSET_T uiDestOffset,
+                      const IMG_CHAR *pszRefSymbolicName,
+                      IMG_DEVMEM_OFFSET_T uiRefOffset,
+                      const IMG_CHAR *pszPDumpDevName,
+                      IMG_UINT32 ui32WordSize,
+                      IMG_UINT32 ui32AlignShift,
+                      IMG_UINT32 ui32Shift,
+                      IMG_UINT32 uiPDumpFlags)
+{
+    const IMG_CHAR       *pszWrwSuffix = "";
+	PVRSRV_ERROR         eError = PVRSRV_OK;
+
+	PDUMP_GET_SCRIPT_STRING();
+
+    if (ui32WordSize == 8)
+    {
+        pszWrwSuffix = "64";
+    }
+
+    PDUMP_LOCK();
+
+    if (ui32AlignShift != ui32Shift)
+    {
+    	/* Write physical address into a variable */
+    	eError = PDumpOSBufprintf(hScript,
+    							ui32MaxLen,
+    							"WRW%s :%s:$1 %s:" IMG_DEVMEM_OFFSET_FMTSPEC "\n",
+    							pszWrwSuffix,
+    							/* dest */
+    							pszPDumpDevName,
+    							/* src */
+    							pszRefSymbolicName,
+    							uiRefOffset);
+		if (eError != PVRSRV_OK)
+		{
+			goto symbAddress_error;
+		}
+    	PDumpWriteScript(hScript, uiPDumpFlags);
+
+    	/* apply address alignment  */
+    	eError = PDumpOSBufprintf(hScript,
+    							ui32MaxLen,
+    							"SHR :%s:$1 :%s:$1 0x%X",
+    							/* dest */
+    							pszPDumpDevName,
+    							/* src A */
+    							pszPDumpDevName,
+    							/* src B */
+    							ui32AlignShift);
+		if (eError != PVRSRV_OK)
+		{
+			goto symbAddress_error;
+		}
+    	PDumpWriteScript(hScript, uiPDumpFlags);
+
+    	/* apply address shift  */
+    	eError = PDumpOSBufprintf(hScript,
+    							ui32MaxLen,
+    							"SHL :%s:$1 :%s:$1 0x%X",
+    							/* dest */
+    							pszPDumpDevName,
+    							/* src A */
+    							pszPDumpDevName,
+    							/* src B */
+    							ui32Shift);
+		if (eError != PVRSRV_OK)
+		{
+			goto symbAddress_error;
+		}
+    	PDumpWriteScript(hScript, uiPDumpFlags);
+
+
+    	/* write result to register */
+    	eError = PDumpOSBufprintf(hScript,
+    							ui32MaxLen,
+    							"WRW%s :%s:0x%08X :%s:$1",
+    							pszWrwSuffix,
+    							pszDestSpaceName,
+    							(IMG_UINT32)uiDestOffset,
+    							pszPDumpDevName);
+		if (eError != PVRSRV_OK)
+		{
+			goto symbAddress_error;
+		}
+    	PDumpWriteScript(hScript, uiPDumpFlags);
+    }
+    else
+    {
+		eError = PDumpOSBufprintf(hScript,
+								  ui32MaxLen,
+								  "WRW%s :%s:" IMG_DEVMEM_OFFSET_FMTSPEC " %s:" IMG_DEVMEM_OFFSET_FMTSPEC "\n",
+								  pszWrwSuffix,
+								  /* dest */
+								  pszDestSpaceName,
+								  uiDestOffset,
+								  /* src */
+								  pszRefSymbolicName,
+								  uiRefOffset);
+		if (eError != PVRSRV_OK)
+		{
+			goto symbAddress_error;
+		}
+	    PDumpWriteScript(hScript, uiPDumpFlags);
+    }
+
+symbAddress_error:
+
+    PDUMP_UNLOCK();
+
+	return eError;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpIDLWithFlags
+ * Inputs         : Idle time in clocks
+ * Outputs        : None
+ * Returns        : Error
+ * Description    : Dump IDL command to script
+**************************************************************************/
+PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING();
+	PDUMP_DBG(("PDumpIDLWithFlags"));
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "IDL %u", ui32Clocks);
+	if(eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+	return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpIDL
+ * Inputs         : Idle time in clocks
+ * Outputs        : None
+ * Returns        : Error
+ * Description    : Dump IDL command to script
+**************************************************************************/
+PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks)
+{
+	return PDumpIDLWithFlags(ui32Clocks, PDUMP_FLAGS_CONTINUOUS);
+}
+
+/*****************************************************************************
+ FUNCTION	: PDumpRegBasedCBP
+    
+ PURPOSE	: Dump CBP command to script
+
+ PARAMETERS	:
+			  
+ RETURNS	: None
+*****************************************************************************/
+PVRSRV_ERROR PDumpRegBasedCBP(IMG_CHAR		*pszPDumpRegName,
+							  IMG_UINT32	ui32RegOffset,
+							  IMG_UINT32	ui32WPosVal,
+							  IMG_UINT32	ui32PacketSize,
+							  IMG_UINT32	ui32BufferSize,
+							  IMG_UINT32	ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING();
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript,
+			 ui32MaxLen,
+			 "CBP :%s:0x%08X 0x%08X 0x%08X 0x%08X",
+			 pszPDumpRegName,
+			 ui32RegOffset,
+			 ui32WPosVal,
+			 ui32PacketSize,
+			 ui32BufferSize);
+	if(eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+	
+	return PVRSRV_OK;		
+}
+
+PVRSRV_ERROR PDumpTRG(IMG_CHAR *pszMemSpace,
+                      IMG_UINT32 ui32MMUCtxID,
+                      IMG_UINT32 ui32RegionID,
+                      IMG_BOOL bEnable,
+                      IMG_UINT64 ui64VAddr,
+                      IMG_UINT64 ui64LenBytes,
+                      IMG_UINT32 ui32XStride,
+                      IMG_UINT32 ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING();
+
+	PDUMP_LOCK();
+	if(bEnable)
+	{
+		eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
+		                 "TRG :%s:v%u %u 0x%08llX 0x%08llX %u",
+		                 pszMemSpace, ui32MMUCtxID, ui32RegionID,
+		                 ui64VAddr, ui64LenBytes, ui32XStride);
+	}
+	else
+	{
+		eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
+		                 "TRG :%s:v%u %u",
+		                 pszMemSpace, ui32MMUCtxID, ui32RegionID);
+
+	}
+	if(eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpConnectionNotify
+ * Description    : Called by the srvcore to tell PDump core that the
+ *                  PDump capture and control client has connected
+ **************************************************************************/
+void PDumpConnectionNotify(void)
+{
+	PVRSRV_DATA			*psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE	*psThis;
+
+	/* Give PDump control a chance to end the init phase, depends on OS */
+	if (!PDumpCtrlInitPhaseComplete())
+	{
+		PDumpStopInitPhase(IMG_TRUE, IMG_FALSE);
+	}
+
+	g_ConnectionCount++;
+	PVR_LOG(("PDump has connected (%u)", g_ConnectionCount));
+	
+	/* Reset the parameter file attributes */
+	g_PDumpParameters.sWOff.ui32Main = g_PDumpParameters.sWOff.ui32Init;
+	g_PDumpParameters.ui32FileIdx = 0;
+
+	/* Loop over all known devices */
+	psThis = psPVRSRVData->psDeviceNodeList;
+	while (psThis)
+	{
+		if (psThis->pfnPDumpInitDevice)
+		{
+			/* Reset pdump according to connected device */
+			psThis->pfnPDumpInitDevice(psThis);
+		}
+		psThis = psThis->psNext;
+	}
+}
+
+/**************************************************************************
+ * Function Name  : PDumpDisconnectionNotify
+ * Description    : Called by the connection_server to tell PDump core that
+ *                  the PDump capture and control client has disconnected
+ **************************************************************************/
+void PDumpDisconnectionNotify(void)
+{
+	PVRSRV_ERROR eErr;
+
+	if (PDumpCtrlCaptureOn())
+	{
+		PVR_LOG(("PDump killed, output files may be invalid or incomplete!"));
+
+		/* Disable capture in server, in case PDump client was killed and did
+		 * not get a chance to reset the capture parameters.
+		 */
+		eErr = PDumpSetDefaultCaptureParamsKM( DEBUG_CAPMODE_FRAMED,
+		                                       FRAME_UNSET, FRAME_UNSET, 1, 0);
+		PVR_LOG_IF_ERROR(eErr, "PVRSRVPDumpSetDefaultCaptureParams");
+	}
+	else
+	{
+		PVR_LOG(("PDump disconnected"));
+	}
+}
+
+/**************************************************************************
+ * Function Name  : PDumpIfKM
+ * Inputs         : pszPDumpCond - string for condition
+ * Outputs        : None
+ * Returns        : None
+ * Description    : Create a PDUMP string which represents IF command 
+					with condition.
+**************************************************************************/
+PVRSRV_ERROR PDumpIfKM(IMG_CHAR		*pszPDumpCond)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpIfKM"));
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "IF %s\n", pszPDumpCond);
+
+	if (eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+	PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpElseKM
+ * Inputs         : pszPDumpCond - string for condition
+ * Outputs        : None
+ * Returns        : None
+ * Description    : Create a PDUMP string which represents ELSE command 
+					with condition.
+**************************************************************************/
+PVRSRV_ERROR PDumpElseKM(IMG_CHAR		*pszPDumpCond)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpElseKM"));
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "ELSE %s\n", pszPDumpCond);
+
+	if (eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+	PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpFiKM
+ * Inputs         : pszPDumpCond - string for condition
+ * Outputs        : None
+ * Returns        : None
+ * Description    : Create a PDUMP string which represents FI command 
+					with condition.
+**************************************************************************/
+PVRSRV_ERROR PDumpFiKM(IMG_CHAR		*pszPDumpCond)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpFiKM"));
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FI %s\n", pszPDumpCond);
+
+	if (eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+	PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpCreateLockKM(void)
+{
+	return PDumpOSCreateLock();
+}
+
+void PDumpDestroyLockKM(void)
+{
+	PDumpOSDestroyLock();
+}
+
+void PDumpLock(void)
+{
+	PDumpOSLock();
+}
+
+void PDumpUnlock(void)
+{
+	PDumpOSUnlock();
+}
+
+#if defined(PVR_TESTING_UTILS)
+extern void PDumpOSDumpState(void);
+
+#if !defined(LINUX)
+void PDumpOSDumpState(IMG_BOOL bDumpOSLayerState)
+{
+	PVR_UNREFERENCED_PARAMETER(bDumpOSLayerState);
+}
+#endif
+
+void PDumpCommonDumpState(IMG_BOOL bDumpOSLayerState)
+{
+	PVR_LOG(("--- PDUMP COMMON: g_PDumpInitialised( %d )",
+			g_PDumpInitialised) );
+	PVR_LOG(("--- PDUMP COMMON: g_PDumpScript.sCh.hInit( %p ) g_PDumpScript.sCh.hMain( %p ) g_PDumpScript.sCh.hDeinit( %p )",
+			g_PDumpScript.sCh.hInit, g_PDumpScript.sCh.hMain, g_PDumpScript.sCh.hDeinit) );
+	PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.sCh.hInit( %p ) g_PDumpParameters.sCh.hMain( %p ) g_PDumpParameters.sCh.hDeinit( %p )",
+			g_PDumpParameters.sCh.hInit, g_PDumpParameters.sCh.hMain, g_PDumpParameters.sCh.hDeinit) );
+	PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.sWOff.ui32Init( %d ) g_PDumpParameters.sWOff.ui32Main( %d ) g_PDumpParameters.sWOff.ui32Deinit( %d )",
+			g_PDumpParameters.sWOff.ui32Init, g_PDumpParameters.sWOff.ui32Main, g_PDumpParameters.sWOff.ui32Deinit) );
+	PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.ui32FileIdx( %d )",
+			g_PDumpParameters.ui32FileIdx) );
+
+	PVR_LOG(("--- PDUMP COMMON: g_PDumpCtrl( %p ) bInitPhaseActive( %d ) ui32Flags( %x )",
+			&g_PDumpCtrl, g_PDumpCtrl.bInitPhaseActive, g_PDumpCtrl.ui32Flags) );
+	PVR_LOG(("--- PDUMP COMMON: ui32DefaultCapMode( %d ) ui32CurrentFrame( %d )",
+			g_PDumpCtrl.ui32DefaultCapMode, g_PDumpCtrl.ui32CurrentFrame) );
+	PVR_LOG(("--- PDUMP COMMON: sCaptureRange.ui32Start( %x ) sCaptureRange.ui32End( %x ) sCaptureRange.ui32Interval( %u )",
+			g_PDumpCtrl.sCaptureRange.ui32Start, g_PDumpCtrl.sCaptureRange.ui32End, g_PDumpCtrl.sCaptureRange.ui32Interval) );
+	PVR_LOG(("--- PDUMP COMMON: bCaptureOn( %d ) bSuspended( %d ) bInPowerTransition( %d )",
+			g_PDumpCtrl.bCaptureOn, g_PDumpCtrl.bSuspended, g_PDumpCtrl.bInPowerTransition) );
+
+	if (bDumpOSLayerState)
+	{
+		PDumpOSDumpState();
+	}
+}
+#endif
+
+
+PVRSRV_ERROR PDumpRegisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData,
+									 PDUMP_CONNECTION_DATA **ppsPDumpConnectionData)
+{
+	PDUMP_CONNECTION_DATA *psPDumpConnectionData;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(ppsPDumpConnectionData != NULL);
+
+	psPDumpConnectionData = OSAllocMem(sizeof(*psPDumpConnectionData));
+	if (psPDumpConnectionData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	eError = OSLockCreate(&psPDumpConnectionData->hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_lockcreate;
+	}
+
+	dllist_init(&psPDumpConnectionData->sListHead);
+	psPDumpConnectionData->ui32RefCount = 1;
+	psPDumpConnectionData->bLastInto = IMG_FALSE;
+	psPDumpConnectionData->ui32LastSetFrameNumber = FRAME_UNSET;
+	psPDumpConnectionData->bLastTransitionFailed = IMG_FALSE;
+
+	/*
+	 * Although we don't take a ref count here, handle base destruction
+	 * will ensure that any resource that might trigger us to do a
+	 * Transition will have been freed before the sync blocks which
+	 * are keeping the sync connection data alive.
+	 */
+	psPDumpConnectionData->psSyncConnectionData = psSyncConnectionData;
+	*ppsPDumpConnectionData = psPDumpConnectionData;
+
+	return PVRSRV_OK;
+
+fail_lockcreate:
+	OSFreeMem(psPDumpConnectionData);
+fail_alloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+void PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+	_PDumpConnectionRelease(psPDumpConnectionData);
+}
+
+
+
+#else	/* defined(PDUMP) */
+/* disable warning about empty module */
+#ifdef	_WIN32
+#pragma warning (disable:4206)
+#endif
+#endif	/* defined(PDUMP) */
+/*****************************************************************************
+ End of file (pdump_common.c)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pdump_mmu.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pdump_mmu.c
new file mode 100644
index 0000000..218c6b2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pdump_mmu.c
@@ -0,0 +1,1323 @@
+/*************************************************************************/ /*!
+@File
+@Title		MMU PDump functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Common PDump (MMU specific) functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if defined (PDUMP)
+
+#include "img_types.h"
+#include "pdump_mmu.h"
+#include "pdump_osfunc.h"
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#define MAX_PDUMP_MMU_CONTEXTS	(10)
+static IMG_UINT32 guiPDumpMMUContextAvailabilityMask = (1<<MAX_PDUMP_MMU_CONTEXTS)-1;
+
+
+#define MMUPX_FMT(X) ((X<3) ? ((X<2) ?  "MMUPT_\0" : "MMUPD_\0") : "MMUPC_\0")
+#define MIPSMMUPX_FMT(X) ((X<3) ? ((X<2) ?  "MIPSMMUPT_\0" : "MIPSMMUPD_\0") : "MIPSMMUPC_\0")
+
+
+/* Array used to look-up debug strings from MMU_LEVEL */
+static IMG_CHAR ai8MMULevelStringLookup[MMU_LEVEL_LAST][15] =
+		{
+				"MMU_LEVEL_0",
+				"PAGE_TABLE",
+				"PAGE_DIRECTORY",
+				"PAGE_CATALOGUE",
+		};
+
+static PVRSRV_ERROR 
+_ContiguousPDumpBytes(const IMG_CHAR *pszSymbolicName,
+                      IMG_UINT32 ui32SymAddrOffset,
+                      IMG_BOOL bFlush,
+                      IMG_UINT32 uiNumBytes,
+                      void *pvBytes,
+                      IMG_UINT32 ui32Flags)
+{
+    static const IMG_CHAR *pvBeyondLastPointer;
+    static const IMG_CHAR *pvBasePointer;
+    static IMG_UINT32 ui32BeyondLastOffset;
+    static IMG_UINT32 ui32BaseOffset;
+    static IMG_UINT32 uiAccumulatedBytes = 0;
+	IMG_UINT32 ui32ParamOutPos;
+    PVRSRV_ERROR eErr = PVRSRV_OK;
+
+	PDUMP_GET_SCRIPT_AND_FILE_STRING();
+	PVR_UNREFERENCED_PARAMETER(ui32MaxLenFileName);
+
+    /* Caller has PDUMP_LOCK */
+
+    if (!bFlush && uiAccumulatedBytes > 0)
+    {
+        /* do some tests for contiguity.  If it fails, we flush anyway */
+
+        if (pvBeyondLastPointer != pvBytes ||
+            ui32SymAddrOffset != ui32BeyondLastOffset
+            /* NB: ought to check that symbolic name agrees too, but
+               we know this always to be the case in the current use-case */
+            )
+        {
+            bFlush = IMG_TRUE;
+        }
+    }
+
+    /* Flush if necessary */
+    if (bFlush && uiAccumulatedBytes > 0)
+    {        
+        eErr = PDumpWriteParameter((IMG_UINT8 *)(uintptr_t)pvBasePointer,
+                               uiAccumulatedBytes, ui32Flags,
+                               &ui32ParamOutPos, pszFileName);
+    	if (eErr == PVRSRV_OK)
+    	{
+			eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript,
+									"LDB %s:0x%X 0x%X 0x%X %s",
+									/* dest */
+									pszSymbolicName,
+									ui32BaseOffset,
+									/* size */
+									uiAccumulatedBytes,
+									/* file offset */
+									ui32ParamOutPos,
+									/* filename */
+									pszFileName);
+			PVR_LOGG_IF_ERROR(eErr, "PDumpOSBufprintf", ErrOut);
+
+			PDumpWriteScript(hScript, ui32Flags);
+
+    	}
+        else if (eErr != PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+        {
+     		PVR_LOGG_IF_ERROR(eErr, "PDumpWriteParameter", ErrOut);
+        }
+        else
+		{
+			/* else Write to parameter file prevented under the flags and
+			 * current state of the driver so skip write to script and error IF.
+			 */
+			eErr = PVRSRV_OK;
+		}
+
+		uiAccumulatedBytes = 0;
+    }
+
+
+    /* Initialise offsets and pointers if necessary */
+    if (uiAccumulatedBytes == 0)
+    {
+        ui32BaseOffset = ui32BeyondLastOffset = ui32SymAddrOffset;
+        pvBeyondLastPointer = pvBasePointer = (const IMG_CHAR *)pvBytes;
+    }
+
+    /* Accumulate some bytes */
+    ui32BeyondLastOffset += uiNumBytes;
+    pvBeyondLastPointer += uiNumBytes;
+    uiAccumulatedBytes += uiNumBytes;
+
+ErrOut:
+    return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpMMUMalloc
+ * Inputs         : 
+ * Outputs        : 
+ * Returns        : PVRSRV_ERROR
+ * Description    : 
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUMalloc(const IMG_CHAR			*pszPDumpDevName,
+							MMU_LEVEL 				eMMULevel,
+							IMG_DEV_PHYADDR			*psDevPAddr,
+							IMG_UINT32				ui32Size,
+							IMG_UINT32				ui32Align,
+							PDUMP_MMU_TYPE          eMMUType)
+{
+	PVRSRV_ERROR eErr = PVRSRV_OK;
+	IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+	IMG_UINT64 ui64SymbolicAddr;
+	IMG_CHAR *pszMMUPX;
+
+	PDUMP_GET_SCRIPT_STRING();
+
+	if (eMMULevel >= MMU_LEVEL_LAST)
+	{
+		eErr = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ErrOut;
+	}
+
+	PDUMP_LOCK();
+
+	/*
+		Write a comment to the PDump2 script streams indicating the memory allocation
+	*/
+	eErr = PDumpOSBufprintf(hScript,
+							ui32MaxLen,
+							"-- MALLOC :%s:%s Size=0x%08X Alignment=0x%08X DevPAddr=0x%08llX",
+							pszPDumpDevName,
+							ai8MMULevelStringLookup[eMMULevel],
+							ui32Size,
+							ui32Align,
+							psDevPAddr->uiAddr);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrUnlock;
+	}
+
+	PDumpWriteScript(hScript, ui32Flags);
+
+	/*
+		construct the symbolic address
+	*/
+	ui64SymbolicAddr = (IMG_UINT64)psDevPAddr->uiAddr;
+
+	/*
+		Write to the MMU script stream indicating the memory allocation
+	*/
+	if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+	{
+		pszMMUPX = MIPSMMUPX_FMT(eMMULevel);
+	}
+	else
+	{
+		pszMMUPX = MMUPX_FMT(eMMULevel);
+	}
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :%s:%s%016llX 0x%X 0x%X",
+											pszPDumpDevName,
+											pszMMUPX,
+											ui64SymbolicAddr,
+											ui32Size,
+											ui32Align
+											/* don't need this sDevPAddr.uiAddr*/);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrUnlock;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+	PDUMP_UNLOCK();
+ErrOut:
+	return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpMMUFree
+ * Inputs         :
+ * Outputs        : 
+ * Returns        : PVRSRV_ERROR
+ * Description    : 
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUFree(const IMG_CHAR				*pszPDumpDevName,
+							MMU_LEVEL 					eMMULevel,
+							IMG_DEV_PHYADDR				*psDevPAddr,
+							PDUMP_MMU_TYPE              eMMUType)
+{
+	PVRSRV_ERROR eErr = PVRSRV_OK;
+	IMG_UINT64 ui64SymbolicAddr;
+	IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+	IMG_CHAR *pszMMUPX;
+
+	PDUMP_GET_SCRIPT_STRING();
+
+	if (eMMULevel >= MMU_LEVEL_LAST)
+	{
+		eErr = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ErrOut;
+	}
+
+	PDUMP_LOCK();
+	/*
+		Write a comment to the PDUMP2 script streams indicating the memory free
+	*/
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :%s:%s", 
+							pszPDumpDevName, ai8MMULevelStringLookup[eMMULevel]);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrUnlock;
+	}
+
+	PDumpWriteScript(hScript, ui32Flags);
+
+	/*
+		construct the symbolic address
+	*/
+	ui64SymbolicAddr = (IMG_UINT64)psDevPAddr->uiAddr;
+
+	/*
+		Write to the MMU script stream indicating the memory free
+	*/
+	if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+	{
+		pszMMUPX = MIPSMMUPX_FMT(eMMULevel);
+	}
+	else
+	{
+		pszMMUPX = MMUPX_FMT(eMMULevel);
+	}
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :%s:%s%016llX",
+							pszPDumpDevName,
+							pszMMUPX,
+							ui64SymbolicAddr);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrUnlock;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+	PDUMP_UNLOCK();
+ErrOut:
+	return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpMMUMalloc2
+ * Inputs         :
+ * Outputs        : 
+ * Returns        : PVRSRV_ERROR
+ * Description    : 
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUMalloc2(const IMG_CHAR			*pszPDumpDevName,
+							const IMG_CHAR			*pszTableType,/* PAGE_CATALOGUE, PAGE_DIRECTORY, PAGE_TABLE */
+                             const IMG_CHAR *pszSymbolicAddr,
+                             IMG_UINT32				ui32Size,
+							 IMG_UINT32				ui32Align)
+{
+	PVRSRV_ERROR eErr = PVRSRV_OK;
+	IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+
+	PDUMP_GET_SCRIPT_STRING();
+
+	PDUMP_LOCK();
+	/*
+		Write a comment to the PDump2 script streams indicating the memory allocation
+	*/
+	eErr = PDumpOSBufprintf(hScript,
+							ui32MaxLen,
+							"-- MALLOC :%s:%s Size=0x%08X Alignment=0x%08X\n",
+							pszPDumpDevName,
+							pszTableType,
+							ui32Size,
+							ui32Align);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrUnlock;
+	}
+
+	PDumpWriteScript(hScript, ui32Flags);
+
+	/*
+		Write to the MMU script stream indicating the memory allocation
+	*/
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :%s:%s 0x%X 0x%X\n",
+											pszPDumpDevName,
+											pszSymbolicAddr,
+											ui32Size,
+											ui32Align
+											/* don't need this sDevPAddr.uiAddr*/);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrUnlock;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+	PDUMP_UNLOCK();
+	return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpMMUFree2
+ * Inputs         :
+ * Outputs        : 
+ * Returns        : PVRSRV_ERROR
+ * Description    : 
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUFree2(const IMG_CHAR				*pszPDumpDevName,
+							const IMG_CHAR				*pszTableType,/* PAGE_CATALOGUE, PAGE_DIRECTORY, PAGE_TABLE */
+                           const IMG_CHAR *pszSymbolicAddr)
+{
+	PVRSRV_ERROR eErr  = PVRSRV_OK;
+	IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+
+	PDUMP_GET_SCRIPT_STRING();
+
+	PDUMP_LOCK();
+	/*
+		Write a comment to the PDUMP2 script streams indicating the memory free
+	*/
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :%s:%s\n", 
+							pszPDumpDevName, pszTableType);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrUnlock;
+	}
+
+	PDumpWriteScript(hScript, ui32Flags);
+
+	/*
+		Write to the MMU script stream indicating the memory free
+	*/
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :%s:%s\n",
+                            pszPDumpDevName,
+							pszSymbolicAddr);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrUnlock;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+	PDUMP_UNLOCK();
+	return eErr;
+}
+
+/*******************************************************************************************************
+ * Function Name  : PDumpPTBaseObjectToMem64
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a memory write from the baseobject
+ *					for MIPS MMU device type
+********************************************************************************************************/
+PVRSRV_ERROR PDumpPTBaseObjectToMem64(const IMG_CHAR *pszPDumpDevName,
+									PMR *psPMRDest,
+								  IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+								  IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+								  IMG_UINT32 ui32Flags,								  
+								  MMU_LEVEL eMMULevel,
+								  IMG_UINT64 ui64PxSymAddr)
+{
+
+	IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest;
+	IMG_DEVMEM_OFFSET_T uiNextSymNameDest;
+	PVRSRV_ERROR eErr = PVRSRV_OK;
+
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	eErr = PMR_PDumpSymbolicAddr(psPMRDest,
+									 uiLogicalOffsetDest,
+									 PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+									 aszMemspaceNameDest,
+									 PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+									 aszSymbolicNameDest,
+									 &uiPDumpSymbolicOffsetDest,
+									 &uiNextSymNameDest);
+
+
+	if (eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+
+	PDUMP_LOCK();
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW64 :%s:%s:0x%llX :%s:%s%016llX:0x%llX",aszMemspaceNameDest, aszSymbolicNameDest,
+							uiPDumpSymbolicOffsetDest, pszPDumpDevName, MIPSMMUPX_FMT(eMMULevel), ui64PxSymAddr,
+							(IMG_UINT64)0);
+
+
+	if (eErr != PVRSRV_OK)
+	{
+		PDUMP_UNLOCK();
+		return eErr;
+	}
+
+
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+
+
+/**************************************************************************
+ * Function Name  : PDumpMMUDumpPxEntries
+ * Inputs         :
+ * Outputs        :
+ * Returns        : PVRSRV_ERROR
+ * Description    :
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUDumpPxEntries(MMU_LEVEL eMMULevel,
+								   const IMG_CHAR *pszPDumpDevName,
+                                   void *pvPxMem,
+                                   IMG_DEV_PHYADDR sPxDevPAddr,
+                                   IMG_UINT32 uiFirstEntry,
+                                   IMG_UINT32 uiNumEntries,
+                                   const IMG_CHAR *pszMemspaceName,
+                                   const IMG_CHAR *pszSymbolicAddr,
+                                   IMG_UINT64 uiSymbolicAddrOffset,
+                                   IMG_UINT32 uiBytesPerEntry,
+                                   IMG_UINT32 uiLog2Align,
+                                   IMG_UINT32 uiAddrShift,
+                                   IMG_UINT64 uiAddrMask,
+                                   IMG_UINT64 uiPxEProtMask,
+                                   IMG_UINT64 uiDataValidEnable,
+                                   IMG_UINT32 ui32Flags,
+                                   PDUMP_MMU_TYPE eMMUType)
+{
+	PVRSRV_ERROR eErr = PVRSRV_OK;
+    IMG_UINT64 ui64PxSymAddr;
+    IMG_UINT64 ui64PxEValueSymAddr;
+    IMG_UINT32 ui32SymAddrOffset = 0;
+    IMG_UINT32 *pui32PxMem;
+    IMG_UINT64 *pui64PxMem;
+    IMG_BOOL   bPxEValid;
+    IMG_UINT32 uiPxEIdx;
+    IMG_INT32  iShiftAmount;
+    IMG_CHAR   *pszWrwSuffix = 0;
+    void *pvRawBytes = 0;
+    IMG_CHAR aszPxSymbolicAddr[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+    IMG_UINT64 ui64PxE64;
+    IMG_UINT64 ui64Protflags64;
+    IMG_CHAR *pszMMUPX;
+
+	PDUMP_GET_SCRIPT_STRING();
+
+	if (!PDumpReady())
+	{
+		eErr = PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+		goto ErrOut;
+	}
+
+
+	if (PDumpIsDumpSuspended())
+	{
+		eErr = PVRSRV_OK;
+		goto ErrOut;
+	}
+
+    if (pvPxMem == NULL)
+    {
+        eErr = PVRSRV_ERROR_INVALID_PARAMS;
+        goto ErrOut;
+    }
+
+
+	/*
+		create the symbolic address of the Px
+	*/
+	ui64PxSymAddr = sPxDevPAddr.uiAddr;
+
+	if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+	{
+		pszMMUPX = MIPSMMUPX_FMT(eMMULevel);
+	}
+	else
+	{
+		pszMMUPX = MMUPX_FMT(eMMULevel);
+	}
+    OSSNPrintf(aszPxSymbolicAddr,
+               PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+               ":%s:%s%016llX",
+               pszPDumpDevName,
+               pszMMUPX,
+               ui64PxSymAddr);
+
+    PDUMP_LOCK();
+
+	/*
+		traverse PxEs, dumping entries
+	*/
+	for(uiPxEIdx = uiFirstEntry;
+        uiPxEIdx < uiFirstEntry + uiNumEntries;
+        uiPxEIdx++)
+	{
+		/* Calc the symbolic address offset of the PxE location
+		   This is what we have to add to the table address to get to a certain entry */
+		ui32SymAddrOffset = (uiPxEIdx*uiBytesPerEntry);
+
+		/* Calc the symbolic address of the PxE value and HW protflags */
+		/* just read it here */
+		switch(uiBytesPerEntry)
+		{
+			case 4:
+			{
+			 	pui32PxMem = pvPxMem;
+                ui64PxE64 = pui32PxMem[uiPxEIdx];
+                pszWrwSuffix = "";
+                pvRawBytes = &pui32PxMem[uiPxEIdx];
+				break;
+			}
+			case 8:
+			{
+			 	pui64PxMem = pvPxMem;
+                ui64PxE64 = pui64PxMem[uiPxEIdx];
+                pszWrwSuffix = "64";
+                pvRawBytes = &pui64PxMem[uiPxEIdx];
+				break;
+			}
+			default:
+			{
+				PVR_DPF((PVR_DBG_ERROR, "PDumpMMUPxEntries: error"));
+				ui64PxE64 = 0;
+                //!!error
+				break;
+			}
+		}
+
+        ui64PxEValueSymAddr = (ui64PxE64 & uiAddrMask) >> uiAddrShift << uiLog2Align;
+        ui64Protflags64 = ui64PxE64 & uiPxEProtMask;
+	bPxEValid = (ui64Protflags64 & uiDataValidEnable) ? IMG_TRUE : IMG_FALSE;
+        if(bPxEValid)
+        {
+            _ContiguousPDumpBytes(aszPxSymbolicAddr, ui32SymAddrOffset, IMG_TRUE,
+                                  0, 0,
+                                  ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+            iShiftAmount = (IMG_INT32)(uiLog2Align - uiAddrShift);
+
+            /* First put the symbolic representation of the actual
+               address of the entry into a pdump internal register */
+            /* MOV seemed cleaner here, since (a) it's 64-bit; (b) the
+               target is not memory.  However, MOV cannot do the
+               "reference" of the symbolic address.  Apparently WRW is
+               correct. */
+
+			if (pszSymbolicAddr == NULL)
+			{
+				pszSymbolicAddr = "none";
+			}
+
+            if (eMMULevel == MMU_LEVEL_1)
+            {
+             	if (iShiftAmount == 0)
+			    {
+             		eErr = PDumpOSBufprintf(hScript,
+											ui32MaxLen,
+											"WRW%s :%s:%s%016llX:0x%08X :%s:%s:0x%llx | 0x%llX\n",
+										  	pszWrwSuffix,
+											/* dest */
+											pszPDumpDevName,
+											pszMMUPX,
+											ui64PxSymAddr,
+											ui32SymAddrOffset,
+											/* src */
+											pszMemspaceName,
+											pszSymbolicAddr,
+											uiSymbolicAddrOffset,
+											/* ORing prot flags */
+											ui64Protflags64);
+                }
+                else
+                {
+                	eErr = PDumpOSBufprintf(hScript,
+					                        ui32MaxLen,
+					                        "WRW :%s:$1 :%s:%s:0x%llx\n",
+					                        /* dest */
+					                        pszPDumpDevName,
+										    /* src */
+									        pszMemspaceName,
+											pszSymbolicAddr,
+											uiSymbolicAddrOffset);
+                }
+            }
+            else
+            {
+		if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+		{
+			pszMMUPX = MIPSMMUPX_FMT(eMMULevel - 1);
+		}
+		else
+		{
+			pszMMUPX = MMUPX_FMT(eMMULevel - 1);
+		}
+            	eErr = PDumpOSBufprintf(hScript,
+                                    ui32MaxLen,
+                                    "WRW :%s:$1 :%s:%s%016llX:0x0",
+                                    /* dest */
+                                    pszPDumpDevName,
+                                    /* src */
+                                    pszPDumpDevName,
+                                    pszMMUPX,
+                                    ui64PxEValueSymAddr);
+		if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+		{
+			pszMMUPX = MIPSMMUPX_FMT(eMMULevel);
+		}
+		else
+		{
+			pszMMUPX = MMUPX_FMT(eMMULevel);
+		}
+            }
+            if (eErr != PVRSRV_OK)
+            {
+                goto ErrUnlock;
+            }
+            PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+            /* Now shift it to the right place, if necessary: */
+            /* Now shift that value down, by the "Align shift"
+               amount, to get it into units (ought to assert that
+               we get an integer - i.e. we don't shift any bits
+               off the bottom, don't know how to do PDUMP
+               assertions yet) and then back up by the right
+               amount to get it into the position of the field.
+               This is optimised into a single shift right by the
+               difference between the two. */
+            if (iShiftAmount > 0)
+            {
+                /* Page X Address is specified in units larger
+                   than the position in the PxE would suggest.  */
+                eErr = PDumpOSBufprintf(hScript,
+                                        ui32MaxLen,
+                                        "SHR :%s:$1 :%s:$1 0x%X",
+                                        /* dest */
+                                        pszPDumpDevName,
+                                        /* src A */
+                                        pszPDumpDevName,
+                                        /* src B */
+                                        iShiftAmount);
+                if (eErr != PVRSRV_OK)
+                {
+                    goto ErrUnlock;
+                }
+                PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+            }
+            else if (iShiftAmount < 0)
+            {
+                /* Page X Address is specified in units smaller
+                   than the position in the PxE would suggest.  */
+                eErr = PDumpOSBufprintf(hScript,
+                                        ui32MaxLen,
+                                        "SHL :%s:$1 :%s:$1 0x%X",
+                                        /* dest */
+                                        pszPDumpDevName,
+                                        /* src A */
+                                        pszPDumpDevName,
+                                        /* src B */
+                                        -iShiftAmount);
+                if (eErr != PVRSRV_OK)
+                {
+                    goto ErrUnlock;
+                }
+                PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+            }
+
+            if (eMMULevel == MMU_LEVEL_1)
+            {
+            	if( iShiftAmount != 0)
+            	{
+			/* Now we can "or" in the protection flags */
+			eErr = PDumpOSBufprintf(hScript,
+                                                ui32MaxLen,
+                                                "OR :%s:$1 :%s:$1 0x%llX",
+                                                /* dest */
+                                                pszPDumpDevName,
+                                                /* src A */
+                                                pszPDumpDevName,
+                                                /* src B */
+                                               ui64Protflags64);
+			if (eErr != PVRSRV_OK)
+			{
+				goto ErrUnlock;
+			}
+			PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+			eErr = PDumpOSBufprintf(hScript,
+                                                ui32MaxLen,
+                                                "WRW%s :%s:%s%016llX:0x%08X :%s:$1 ",
+                                                pszWrwSuffix,
+                                                /* dest */
+                                                pszPDumpDevName,
+                                                pszMMUPX,
+                                                ui64PxSymAddr,
+                                                ui32SymAddrOffset,
+                                                /* src */
+                                                pszPDumpDevName);
+			if(eErr != PVRSRV_OK)
+			{
+				goto ErrUnlock;
+			}
+			PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+            	}
+             }
+            else
+            {
+            	/* Now we can "or" in the protection flags */
+            	eErr = PDumpOSBufprintf(hScript,
+                                    	ui32MaxLen,
+                                    	"OR :%s:$1 :%s:$1 0x%llX",
+                                    	/* dest */
+                                    	pszPDumpDevName,
+                                    	/* src A */
+                                    	pszPDumpDevName,
+                                    	/* src B */
+                                        ui64Protflags64);
+            	if (eErr != PVRSRV_OK)
+            	{
+                	goto ErrUnlock;
+            	}
+                PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+                /* Finally, we write the register into the actual PxE */
+            	eErr = PDumpOSBufprintf(hScript,
+                                        ui32MaxLen,
+                                        "WRW%s :%s:%s%016llX:0x%08X :%s:$1",
+                                        pszWrwSuffix,
+                                        /* dest */
+                                        pszPDumpDevName,
+                                        pszMMUPX,
+                                        ui64PxSymAddr,
+                                        ui32SymAddrOffset,
+                                        /* src */
+                                        pszPDumpDevName);
+				if(eErr != PVRSRV_OK)
+				{
+					goto ErrUnlock;
+				}
+				PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+        	}
+        }
+        else
+        {
+            /* If the entry was "invalid", simply write the actual
+               value found to the memory location */
+            eErr = _ContiguousPDumpBytes(aszPxSymbolicAddr, ui32SymAddrOffset, IMG_FALSE,
+                                         uiBytesPerEntry, pvRawBytes,
+                                         ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+            if (eErr != PVRSRV_OK)
+            {
+                goto ErrUnlock;
+            }
+        }
+	}
+
+    /* flush out any partly accumulated stuff for LDB */
+    _ContiguousPDumpBytes(aszPxSymbolicAddr, ui32SymAddrOffset, IMG_TRUE,
+                          0, 0,
+                          ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+ErrUnlock:
+	PDUMP_UNLOCK();
+ErrOut:
+	return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name  : _PdumpAllocMMUContext
+ * Inputs         : pui32MMUContextID
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : pdump util to allocate MMU contexts
+**************************************************************************/
+static PVRSRV_ERROR _PdumpAllocMMUContext(IMG_UINT32 *pui32MMUContextID)
+{
+	IMG_UINT32 i;
+
+	/* there are MAX_PDUMP_MMU_CONTEXTS contexts available, find one */
+	for(i=0; i<MAX_PDUMP_MMU_CONTEXTS; i++)
+	{
+		if((guiPDumpMMUContextAvailabilityMask & (1U << i)))
+		{
+			/* mark in use */
+			guiPDumpMMUContextAvailabilityMask &= ~(1U << i);
+			*pui32MMUContextID = i;
+			return PVRSRV_OK;
+		}
+	}
+
+	PVR_DPF((PVR_DBG_ERROR, "_PdumpAllocMMUContext: no free MMU context ids"));
+
+	return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND;
+}
+
+
+/**************************************************************************
+ * Function Name  : _PdumpFreeMMUContext
+ * Inputs         : ui32MMUContextID
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : pdump util to free MMU contexts
+**************************************************************************/
+static PVRSRV_ERROR _PdumpFreeMMUContext(IMG_UINT32 ui32MMUContextID)
+{
+	if(ui32MMUContextID < MAX_PDUMP_MMU_CONTEXTS)
+	{
+		/* free the id */
+        PVR_ASSERT (!(guiPDumpMMUContextAvailabilityMask & (1U << ui32MMUContextID)));
+		guiPDumpMMUContextAvailabilityMask |= (1U << ui32MMUContextID);
+		return PVRSRV_OK;
+	}
+
+	PVR_DPF((PVR_DBG_ERROR, "_PdumpFreeMMUContext: MMU context ids invalid"));
+
+	return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpMMUAllocMMUContext
+ * Inputs         :
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Alloc MMU Context
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUAllocMMUContext(const IMG_CHAR *pszPDumpMemSpaceName,
+                                     IMG_DEV_PHYADDR sPCDevPAddr,
+                                     PDUMP_MMU_TYPE eMMUType,
+                                     IMG_UINT32 *pui32MMUContextID)
+{
+    IMG_UINT64 ui64PCSymAddr;
+    IMG_CHAR *pszMMUPX;
+
+	IMG_UINT32 ui32MMUContextID;
+	PVRSRV_ERROR eErr = PVRSRV_OK;
+	PDUMP_GET_SCRIPT_STRING();
+
+	eErr = _PdumpAllocMMUContext(&ui32MMUContextID);
+	if(eErr != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: _PdumpAllocMMUContext failed: %d",
+				 __func__, eErr));
+        PVR_DBG_BREAK;
+		goto ErrOut;
+	}
+
+	/*
+		create the symbolic address of the PC
+    */
+	ui64PCSymAddr = sPCDevPAddr.uiAddr;
+
+	if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+	{
+		pszMMUPX = MIPSMMUPX_FMT(1);
+		/* Giving it a mock value until the Pdump player implements
+		   the support for the MIPS microAptiv MMU*/
+		eMMUType = PDUMP_MMU_TYPE_VARPAGE_40BIT;
+	}
+	else
+	{
+		pszMMUPX = MMUPX_FMT(3);
+	}
+
+	PDUMP_LOCK();
+
+	eErr = PDumpOSBufprintf(hScript,
+                            ui32MaxLen, 
+                            "MMU :%s:v%d %d :%s:%s%016llX",
+                            /* mmu context */
+                            pszPDumpMemSpaceName,
+                            ui32MMUContextID,
+                            /* mmu type */
+                            eMMUType,
+                            /* PC base address */
+                            pszPDumpMemSpaceName,
+                            pszMMUPX,
+                            ui64PCSymAddr);
+	if(eErr != PVRSRV_OK)
+	{
+	    PDUMP_UNLOCK();
+        PVR_DBG_BREAK;
+		goto ErrOut;
+	}
+
+	PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+    PDUMP_UNLOCK();
+
+	/* return the MMU Context ID */
+	*pui32MMUContextID = ui32MMUContextID;
+
+ErrOut:
+	return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpMMUFreeMMUContext
+ * Inputs         :
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Free MMU Context
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUFreeMMUContext(const IMG_CHAR *pszPDumpMemSpaceName,
+                                    IMG_UINT32 ui32MMUContextID)
+{
+	PVRSRV_ERROR eErr = PVRSRV_OK;
+	PDUMP_GET_SCRIPT_STRING();
+
+	PDUMP_LOCK();
+	eErr = PDumpOSBufprintf(hScript,
+                            ui32MaxLen,
+                            "-- Clear MMU Context for memory space %s", pszPDumpMemSpaceName);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrUnlock;
+	}
+
+	PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+
+	eErr = PDumpOSBufprintf(hScript,
+                            ui32MaxLen, 
+                            "MMU :%s:v%d",
+                            pszPDumpMemSpaceName,
+                            ui32MMUContextID);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrUnlock;
+	}
+
+	PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+
+	eErr = _PdumpFreeMMUContext(ui32MMUContextID);
+	if(eErr != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: _PdumpFreeMMUContext failed: %d",
+				 __func__, eErr));
+		goto ErrUnlock;
+	}
+
+ErrUnlock:
+	PDUMP_UNLOCK();
+	return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpMMUActivateCatalog
+ * Inputs         :
+ * Outputs        : 
+ * Returns        : PVRSRV_ERROR
+ * Description    : 
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUActivateCatalog(const IMG_CHAR *pszPDumpRegSpaceName,
+                                     const IMG_CHAR *pszPDumpRegName,
+                                     IMG_UINT32 uiRegAddr,
+                                     const IMG_CHAR *pszPDumpPCSymbolicName)
+{
+	IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+	PVRSRV_ERROR eErr = PVRSRV_OK;
+
+	PDUMP_GET_SCRIPT_STRING();
+
+	if (!PDumpReady())
+	{
+		eErr = PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+		goto ErrOut;
+	}
+
+
+	if (PDumpIsDumpSuspended())
+	{
+		goto ErrOut;
+	}
+
+	PDUMP_LOCK();
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
+							"-- Write Page Catalogue Address to %s",
+							pszPDumpRegName);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrUnlock;
+	}
+
+	PDumpWriteScript(hScript, ui32Flags);
+
+    eErr = PDumpOSBufprintf(hScript,
+                            ui32MaxLen,
+                            "WRW :%s:0x%04X %s:0",
+                            /* dest */
+                            pszPDumpRegSpaceName,
+                            uiRegAddr,
+                            /* src */
+                            pszPDumpPCSymbolicName);
+    if (eErr != PVRSRV_OK)
+    {
+        goto ErrUnlock;
+    }
+    PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+ErrUnlock:
+	PDUMP_UNLOCK();
+ErrOut:
+	return eErr;
+}
+
+
+PVRSRV_ERROR
+PDumpMMUSAB(const IMG_CHAR *pszPDumpMemNamespace,
+               IMG_UINT32 uiPDumpMMUCtx,
+               IMG_DEV_VIRTADDR sDevAddrStart,
+               IMG_DEVMEM_SIZE_T uiSize,
+               const IMG_CHAR *pszFilename,
+               IMG_UINT32 uiFileOffset,
+			   IMG_UINT32 ui32PDumpFlags)
+{    
+	PVRSRV_ERROR eErr = PVRSRV_OK;
+
+    //							"SAB :%s:v%x:0x%010llX 0x%08X 0x%08X %s.bin",
+
+	PDUMP_GET_SCRIPT_STRING();
+
+	if (!PDumpReady())
+	{
+		eErr = PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+		goto ErrOut;
+	}
+
+
+	if (PDumpIsDumpSuspended())
+	{
+		eErr = PVRSRV_OK;
+		goto ErrOut;
+	}
+
+	PDUMP_LOCK();
+
+	eErr = PDumpOSBufprintf(hScript,
+                              ui32MaxLen,
+                              "SAB :%s:v%x:" IMG_DEV_VIRTADDR_FMTSPEC " "
+                              IMG_DEVMEM_SIZE_FMTSPEC " "
+                              "0x%x %s.bin\n",
+                              pszPDumpMemNamespace,
+                              uiPDumpMMUCtx,
+                              sDevAddrStart.uiAddr,
+                              uiSize,
+                              uiFileOffset,
+                              pszFilename);
+    if (eErr != PVRSRV_OK)
+    {
+        goto ErrUnlock;
+    }
+
+    PDumpWriteScript(hScript, ui32PDumpFlags);
+
+ErrUnlock:
+    PDUMP_UNLOCK();
+ErrOut:
+    return eErr;
+}
+
+/**************************************************************************
+ * Function Name  : PdumpWireUpMipsTLB
+**************************************************************************/
+PVRSRV_ERROR PdumpWireUpMipsTLB(PMR *psPMRSource,
+								PMR *psPMRDest,
+								IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+								IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+								IMG_UINT32 ui32AllocationFlags,
+								IMG_UINT32 ui32Flags)
+{
+	PVRSRV_ERROR eErr = PVRSRV_OK;
+	IMG_CHAR aszMemspaceNameSource[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicNameSource[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetSource;
+	IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest;
+	IMG_DEVMEM_OFFSET_T uiNextSymNameSource;
+	IMG_DEVMEM_OFFSET_T uiNextSymNameDest;
+
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	eErr = PMR_PDumpSymbolicAddr(psPMRSource,
+									 uiLogicalOffsetSource,
+									 PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+									 aszMemspaceNameSource,
+									 PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+									 aszSymbolicNameSource,
+									 &uiPDumpSymbolicOffsetSource,
+									 &uiNextSymNameSource);
+
+	if (eErr != PVRSRV_OK)
+	{
+		goto ErrOut;
+	}
+
+	eErr = PMR_PDumpSymbolicAddr(psPMRDest,
+									 uiLogicalOffsetDest,
+									 PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+									 aszMemspaceNameDest,
+									 PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+									 aszSymbolicNameDest,
+									 &uiPDumpSymbolicOffsetDest,
+									 &uiNextSymNameDest);
+
+
+	if (eErr != PVRSRV_OK)
+	{
+		goto ErrOut;
+	}
+
+	PDUMP_LOCK();
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:$1 :%s:%s:0x%llX", aszMemspaceNameSource,
+							aszMemspaceNameSource, aszSymbolicNameSource,
+							uiPDumpSymbolicOffsetSource);
+
+	if (eErr != PVRSRV_OK)
+	{
+        goto ErrUnlock;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "SHR :%s:$1 :%s:$1 0x6", aszMemspaceNameSource,
+							aszMemspaceNameSource);
+
+	if (eErr != PVRSRV_OK)
+	{
+        goto ErrUnlock;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "AND :%s:$1 :%s:$1 0x03FFFFC0", aszMemspaceNameSource,
+							aszMemspaceNameSource);
+
+	if (eErr != PVRSRV_OK)
+	{
+        goto ErrUnlock;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "OR :%s:$1 :%s:$1 0x%X", aszMemspaceNameSource,
+							aszMemspaceNameSource, ui32AllocationFlags);
+
+	if (eErr != PVRSRV_OK)
+	{
+        goto ErrUnlock;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:%s:0x%llX :%s:$1",aszMemspaceNameDest, aszSymbolicNameDest,
+							uiPDumpSymbolicOffsetDest, aszMemspaceNameSource);
+
+
+	if (eErr != PVRSRV_OK)
+	{
+        goto ErrUnlock;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+    PDUMP_UNLOCK();
+ErrOut:
+	return eErr;
+}
+
+/**************************************************************************
+ * Function Name  : PdumpInvalidateMipsTLB
+**************************************************************************/
+PVRSRV_ERROR PdumpInvalidateMipsTLB(PMR *psPMRDest,
+									IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+									IMG_UINT32 ui32MipsTLBValidClearMask,
+									IMG_UINT32 ui32Flags)
+{
+	PVRSRV_ERROR eErr = PVRSRV_OK;
+	IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest;
+	IMG_DEVMEM_OFFSET_T uiNextSymNameDest;
+
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	eErr = PMR_PDumpSymbolicAddr(psPMRDest,
+									 uiLogicalOffsetDest,
+									 PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+									 aszMemspaceNameDest,
+									 PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+									 aszSymbolicNameDest,
+									 &uiPDumpSymbolicOffsetDest,
+									 &uiNextSymNameDest);
+
+
+	if (eErr != PVRSRV_OK)
+	{
+		goto ErrOut;
+	}
+
+	PDUMP_LOCK();
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:$1 :%s:%s:0x%llX", aszMemspaceNameDest,
+							aszMemspaceNameDest, aszSymbolicNameDest,
+							uiPDumpSymbolicOffsetDest);
+
+	if (eErr != PVRSRV_OK)
+	{
+        goto ErrUnlock;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "AND :%s:$1 :%s:$1 0x%X", aszMemspaceNameDest,
+							aszMemspaceNameDest, ui32MipsTLBValidClearMask);
+
+	if (eErr != PVRSRV_OK)
+	{
+        goto ErrUnlock;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+
+
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:%s:0x%llX :%s:$1",aszMemspaceNameDest, aszSymbolicNameDest,
+							uiPDumpSymbolicOffsetDest, aszMemspaceNameDest);
+
+
+	if (eErr != PVRSRV_OK)
+	{
+        goto ErrUnlock;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+
+
+ErrUnlock:
+    PDUMP_UNLOCK();
+ErrOut:
+	return eErr;
+}
+
+
+#endif /* #if defined (PDUMP) */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pdump_physmem.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pdump_physmem.c
new file mode 100644
index 0000000..a99ee3f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pdump_physmem.c
@@ -0,0 +1,634 @@
+/*************************************************************************/ /*!
+@File
+@Title          Physmem PDump functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Common PDump (PMR specific) functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if defined(PDUMP)
+
+#if defined(LINUX)
+#include <linux/ctype.h>
+#else
+#include <ctype.h>
+#endif
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "pdump_physmem.h"
+#include "pdump_osfunc.h"
+#include "pdump_km.h"
+
+#include "allocmem.h"
+#include "osfunc.h"
+
+/* #define MAX_PDUMP_MMU_CONTEXTS	(10) */
+/* static IMG_UINT32 guiPDumpMMUContextAvailabilityMask = (1<<MAX_PDUMP_MMU_CONTEXTS)-1; */
+
+
+struct _PDUMP_PHYSMEM_INFO_T_
+{
+	IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH];
+	IMG_UINT64 ui64Size;
+	IMG_UINT32 ui32Align;
+	IMG_UINT32 ui32SerialNum;
+};
+
+static IMG_BOOL _IsAllowedSym(IMG_CHAR sym)
+{
+	/* Numbers, Characters or '_' are allowed */
+	if (isalnum(sym) || sym == '_')
+		return IMG_TRUE;
+	else
+		return IMG_FALSE;
+}
+
+static IMG_BOOL _IsLowerCaseSym(IMG_CHAR sym)
+{
+	if (sym >= 'a' && sym <= 'z')
+		return IMG_TRUE;
+	else
+		return IMG_FALSE;
+}
+
+void PDumpMakeStringValid(IMG_CHAR *pszString,
+                          IMG_UINT32 ui32StrLen)
+{
+	IMG_UINT32 i;
+	for (i = 0; i < ui32StrLen; i++)
+	{
+		if (_IsAllowedSym(pszString[i]))
+		{
+			if (_IsLowerCaseSym(pszString[i]))
+				pszString[i] = pszString[i]-32;
+			else
+				pszString[i] = pszString[i];
+		}
+		else
+		{
+			pszString[i] = '_';
+		}
+	}
+}
+
+/**************************************************************************
+ * Function Name  : PDumpGetSymbolicAddr
+ * Inputs         :
+ * Outputs        :
+ * Returns        : PVRSRV_ERROR
+ * Description    :
+**************************************************************************/
+PVRSRV_ERROR PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle,
+                                  IMG_CHAR **ppszSymbolicAddress)
+{
+	PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo;
+
+	if (!hPhysmemPDumpHandle)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psPDumpAllocationInfo = (PDUMP_PHYSMEM_INFO_T *)hPhysmemPDumpHandle;
+	*ppszSymbolicAddress = psPDumpAllocationInfo->aszSymbolicAddress;
+
+	return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpMalloc
+ * Inputs         :
+ * Outputs        :
+ * Returns        : PVRSRV_ERROR
+ * Description    :
+**************************************************************************/
+PVRSRV_ERROR PDumpMalloc(const IMG_CHAR *pszDevSpace,
+                         const IMG_CHAR *pszSymbolicAddress,
+                         IMG_UINT64 ui64Size,
+                         IMG_DEVMEM_ALIGN_T uiAlign,
+                         IMG_BOOL bInitialise,
+                         IMG_UINT32 ui32InitValue,
+                         IMG_HANDLE *phHandlePtr,
+                         IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	psPDumpAllocationInfo = OSAllocMem(sizeof*psPDumpAllocationInfo);
+	PVR_ASSERT(psPDumpAllocationInfo != NULL);
+
+	/*
+		Set continuous flag because there is no way of knowing beforehand which
+		allocation is needed for playback of the captured range.
+	*/
+	ui32PDumpFlags |= PDUMP_CONT;
+
+	/*
+		construct the symbolic address
+	*/
+
+	OSSNPrintf(psPDumpAllocationInfo->aszSymbolicAddress,
+	           sizeof(psPDumpAllocationInfo->aszSymbolicAddress)+sizeof(pszDevSpace),
+	           ":%s:%s",
+	           pszDevSpace,
+	           pszSymbolicAddress);
+
+	/*
+		Write to the MMU script stream indicating the memory allocation
+	*/
+	PDUMP_LOCK();
+	if (bInitialise)
+	{
+		eError = PDumpOSBufprintf(hScript, ui32MaxLen, "CALLOC %s 0x%llX 0x%llX 0x%X\n",
+								psPDumpAllocationInfo->aszSymbolicAddress,
+								ui64Size,
+								uiAlign,
+								ui32InitValue);
+	}
+	else
+	{
+		eError = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC %s 0x%llX 0x%llX\n",
+								psPDumpAllocationInfo->aszSymbolicAddress,
+								ui64Size,
+								uiAlign);
+	}
+
+	if(eError != PVRSRV_OK)
+	{
+		OSFreeMem(psPDumpAllocationInfo);
+		goto _return;
+	}
+
+	PDumpWriteScript(hScript, ui32PDumpFlags);
+
+	psPDumpAllocationInfo->ui64Size = ui64Size;
+	psPDumpAllocationInfo->ui32Align = TRUNCATE_64BITS_TO_32BITS(uiAlign);
+
+	*phHandlePtr = (IMG_HANDLE)psPDumpAllocationInfo;
+
+_return:
+	PDUMP_UNLOCK();
+	return eError;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpFree
+ * Inputs         :
+ * Outputs        :
+ * Returns        : PVRSRV_ERROR
+ * Description    :
+**************************************************************************/
+PVRSRV_ERROR PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+
+	PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	psPDumpAllocationInfo = (PDUMP_PHYSMEM_INFO_T *)hPDumpAllocationInfoHandle;
+
+	/*
+		Write to the MMU script stream indicating the memory free
+	*/
+	PDUMP_LOCK();
+	eError = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE %s\n",
+	                          psPDumpAllocationInfo->aszSymbolicAddress);
+	if(eError != PVRSRV_OK)
+	{
+		goto _return;
+	}
+
+	PDumpWriteScript(hScript, ui32Flags);
+	OSFreeMem(psPDumpAllocationInfo);
+
+_return:
+	PDUMP_UNLOCK();
+	return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRWRW32(const IMG_CHAR *pszDevSpace,
+              const IMG_CHAR *pszSymbolicName,
+              IMG_DEVMEM_OFFSET_T uiOffset,
+              IMG_UINT32 ui32Value,
+              PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	PDUMP_LOCK();
+	eError = PDumpOSBufprintf(hScript,
+	                          ui32MaxLen,
+	                          "WRW :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+	                          PMR_VALUE32_FMTSPEC " ",
+	                          pszDevSpace,
+	                          pszSymbolicName,
+	                          uiOffset,
+	                          ui32Value);
+	if(eError != PVRSRV_OK)
+	{
+		goto _return;
+	}
+
+	PDumpWriteScript(hScript, uiPDumpFlags);
+
+_return:
+	PDUMP_UNLOCK();
+	return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRWRW32InternalVarToMem(const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              const IMG_CHAR *pszInternalVar,
+                              PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	PDUMP_LOCK();
+	eError = PDumpOSBufprintf(hScript,
+	                          ui32MaxLen,
+	                          "WRW :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " %s ",
+	                          pszDevSpace,
+	                          pszSymbolicName,
+	                          uiOffset,
+	                          pszInternalVar);
+	if(eError != PVRSRV_OK)
+	{
+		goto _return;
+	}
+
+	PDumpWriteScript(hScript, uiPDumpFlags);
+
+_return:
+	PDUMP_UNLOCK();
+	return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRRDW32MemToInternalVar(const IMG_CHAR *pszInternalVar,
+                              const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	PDUMP_LOCK();
+	eError = PDumpOSBufprintf(hScript,
+	                          ui32MaxLen,
+	                          "RDW %s :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " ",
+	                          pszInternalVar,
+	                          pszDevSpace,
+	                          pszSymbolicName,
+	                          uiOffset);
+	if(eError != PVRSRV_OK)
+	{
+		goto _return;
+	}
+
+	PDumpWriteScript(hScript, uiPDumpFlags);
+
+_return:
+	PDUMP_UNLOCK();
+	return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRWRW64(const IMG_CHAR *pszDevSpace,
+              const IMG_CHAR *pszSymbolicName,
+              IMG_DEVMEM_OFFSET_T uiOffset,
+              IMG_UINT64 ui64Value,
+              PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	PDUMP_LOCK();
+	eError = PDumpOSBufprintf(hScript,
+	                          ui32MaxLen,
+	                          "WRW64 :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+	                          PMR_VALUE64_FMTSPEC " ",
+	                          pszDevSpace,
+	                          pszSymbolicName,
+	                          uiOffset,
+	                          ui64Value);
+	if(eError != PVRSRV_OK)
+	{
+		goto _return;
+	}
+
+	PDumpWriteScript(hScript, uiPDumpFlags);
+
+_return:
+	PDUMP_UNLOCK();
+	return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRWRW64InternalVarToMem(const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              const IMG_CHAR *pszInternalVar,
+                              PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	PDUMP_LOCK();
+	eError = PDumpOSBufprintf(hScript,
+	                          ui32MaxLen,
+	                          "WRW64 :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " %s ",
+	                          pszDevSpace,
+	                          pszSymbolicName,
+	                          uiOffset,
+	                          pszInternalVar);
+	if(eError != PVRSRV_OK)
+	{
+		goto _return;
+	}
+
+	PDumpWriteScript(hScript, uiPDumpFlags);
+
+_return:
+	PDUMP_UNLOCK();
+	return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRRDW64MemToInternalVar(const IMG_CHAR *pszInternalVar,
+                              const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	PDUMP_LOCK();
+	eError = PDumpOSBufprintf(hScript,
+	                          ui32MaxLen,
+	                          "RDW64 %s :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " ",
+	                          pszInternalVar,
+	                          pszDevSpace,
+	                          pszSymbolicName,
+	                          uiOffset);
+	if(eError != PVRSRV_OK)
+	{
+		goto _return;
+	}
+
+	PDumpWriteScript(hScript, uiPDumpFlags);
+
+_return:
+	PDUMP_UNLOCK();
+	return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRLDB(const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_DEVMEM_SIZE_T uiSize,
+            const IMG_CHAR *pszFilename,
+            IMG_UINT32 uiFileOffset,
+            PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	PDUMP_LOCK();
+	eError = PDumpOSBufprintf(hScript,
+	                          ui32MaxLen,
+	                          "LDB :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+	                          IMG_DEVMEM_SIZE_FMTSPEC " "
+	                          PDUMP_FILEOFFSET_FMTSPEC " %s\n",
+	                          pszDevSpace,
+	                          pszSymbolicName,
+	                          uiOffset,
+	                          uiSize,
+	                          uiFileOffset,
+	                          pszFilename);
+	if(eError != PVRSRV_OK)
+	{
+		goto _return;
+	}
+
+	PDumpWriteScript(hScript, uiPDumpFlags);
+
+_return:
+	PDUMP_UNLOCK();
+	return eError;
+}
+
+PVRSRV_ERROR PDumpPMRSAB(const IMG_CHAR *pszDevSpace,
+                         const IMG_CHAR *pszSymbolicName,
+                         IMG_DEVMEM_OFFSET_T uiOffset,
+                         IMG_DEVMEM_SIZE_T uiSize,
+                         const IMG_CHAR *pszFileName,
+                         IMG_UINT32 uiFileOffset)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 uiPDumpFlags;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	uiPDumpFlags = 0;
+
+	PDUMP_LOCK();
+	eError = PDumpOSBufprintf(hScript,
+	                          ui32MaxLen,
+	                          "SAB :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+	                          IMG_DEVMEM_SIZE_FMTSPEC " "
+	                          "0x%08X %s.bin\n",
+	                          pszDevSpace,
+	                          pszSymbolicName,
+	                          uiOffset,
+	                          uiSize,
+	                          uiFileOffset,
+	                          pszFileName);
+	if(eError != PVRSRV_OK)
+	{
+		goto _return;
+	}
+
+	PDumpWriteScript(hScript, uiPDumpFlags);
+
+_return:
+	PDUMP_UNLOCK();
+	return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRPOL(const IMG_CHAR *pszMemspaceName,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_UINT32 ui32Value,
+            IMG_UINT32 ui32Mask,
+            PDUMP_POLL_OPERATOR eOperator,
+            IMG_UINT32 uiCount,
+            IMG_UINT32 uiDelay,
+            PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	PDUMP_LOCK();
+	eError = PDumpOSBufprintf(hScript,
+	                          ui32MaxLen,
+	                          "POL :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+	                          "0x%08X 0x%08X %d %d %d\n",
+	                          pszMemspaceName,
+	                          pszSymbolicName,
+	                          uiOffset,
+	                          ui32Value,
+	                          ui32Mask,
+	                          eOperator,
+	                          uiCount,
+	                          uiDelay);
+	if(eError != PVRSRV_OK)
+	{
+		goto _return;
+	}
+
+	PDumpWriteScript(hScript, uiPDumpFlags);
+
+_return:
+	PDUMP_UNLOCK();
+	return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRCBP(const IMG_CHAR *pszMemspaceName,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiReadOffset,
+            IMG_DEVMEM_OFFSET_T uiWriteOffset,
+            IMG_DEVMEM_SIZE_T uiPacketSize,
+            IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PDUMP_FLAGS_T uiPDumpFlags = 0;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	PDUMP_LOCK();
+	eError = PDumpOSBufprintf(hScript,
+	                          ui32MaxLen,
+	                          "CBP :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+	                          IMG_DEVMEM_OFFSET_FMTSPEC " " IMG_DEVMEM_SIZE_FMTSPEC " " IMG_DEVMEM_SIZE_FMTSPEC "\n",
+	                          pszMemspaceName,
+	                          pszSymbolicName,
+	                          uiReadOffset,
+	                          uiWriteOffset,
+	                          uiPacketSize,
+	                          uiBufferSize);
+
+	if(eError != PVRSRV_OK)
+	{
+		goto _return;
+	}
+
+	PDumpWriteScript(hScript, uiPDumpFlags);
+
+_return:
+	PDUMP_UNLOCK();
+	return eError;
+}
+
+PVRSRV_ERROR
+PDumpWriteBuffer(IMG_UINT8 *pcBuffer,
+                 size_t uiNumBytes,
+                 PDUMP_FLAGS_T uiPDumpFlags,
+                 IMG_CHAR *pszFilenameOut,
+                 size_t uiFilenameBufSz,
+                 PDUMP_FILEOFFSET_T *puiOffsetOut)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(uiFilenameBufSz);
+
+	if (!PDumpReady())
+	{
+		return PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+	}
+
+    PVR_ASSERT(uiNumBytes > 0);
+
+	/* PRQA S 3415 1 */ /* side effects desired */
+	if (PDumpIsDumpSuspended())
+	{
+		return PVRSRV_ERROR_PDUMP_NOT_ALLOWED;
+	}
+
+	PVR_ASSERT(uiFilenameBufSz <= PDUMP_PARAM_MAX_FILE_NAME);
+
+	PDUMP_LOCK();
+
+	eError = PDumpWriteParameter(pcBuffer, uiNumBytes, uiPDumpFlags, puiOffsetOut, pszFilenameOut);
+
+	PDUMP_UNLOCK();
+
+	if ((eError != PVRSRV_ERROR_PDUMP_NOT_ALLOWED) && (eError != PVRSRV_OK))
+	{
+		PVR_LOGR_IF_ERROR(eError, "PDumpWriteParameter");
+	}
+	/* else Write to parameter file Ok or Prevented under the flags and
+	 * current state of the driver so skip further writes and let caller know.
+	 */
+	return eError;
+}
+
+#endif /* PDUMP */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/physheap.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/physheap.c
new file mode 100644
index 0000000..1f6e4fe
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/physheap.c
@@ -0,0 +1,349 @@
+/*************************************************************************/ /*!
+@File           physheap.c
+@Title          Physical heap management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Management functions for the physical heap(s). A heap contains
+                all the information required by services when using memory from
+                that heap (such as CPU <> Device physical address translation).
+                A system must register one heap but can have more then one which
+                is why a heap must register with a (system) unique ID.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#include "img_types.h"
+#include "physheap.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+
+struct _PHYS_HEAP_
+{
+	/*! ID of this physical memory heap */
+	IMG_UINT32					ui32PhysHeapID;
+	/*! The type of this heap */
+	PHYS_HEAP_TYPE			eType;
+
+	/*! PDump name of this physical memory heap */
+	IMG_CHAR					*pszPDumpMemspaceName;
+	/*! Private data for the translate routines */
+	IMG_HANDLE					hPrivData;
+	/*! Function callbacks */
+	PHYS_HEAP_FUNCTIONS			*psMemFuncs;
+
+	/*! Array of sub-regions of the heap */
+	PHYS_HEAP_REGION			*pasRegions;
+	IMG_UINT32					ui32NumOfRegions;
+
+	/*! Refcount */
+	IMG_UINT32					ui32RefCount;
+	/*! Pointer to next physical heap */
+	struct _PHYS_HEAP_		*psNext;
+};
+
+static PHYS_HEAP *g_psPhysHeapList;
+static POS_LOCK g_hPhysHeapLock;
+
+#if defined(REFCOUNT_DEBUG)
+#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...)	\
+	PVRSRVDebugPrintf(PVR_DBG_WARNING,	\
+			  __FILE__,		\
+			  __LINE__,		\
+			  fmt,			\
+			  __VA_ARGS__)
+#else
+#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+
+PVRSRV_ERROR PhysHeapRegister(PHYS_HEAP_CONFIG *psConfig,
+							  PHYS_HEAP **ppsPhysHeap)
+{
+	PHYS_HEAP *psNew;
+	PHYS_HEAP *psTmp;
+
+	PVR_DPF_ENTERED;
+
+	if (psConfig->eType == PHYS_HEAP_TYPE_UNKNOWN)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Check this heap ID isn't already in use */
+	psTmp = g_psPhysHeapList;
+	while (psTmp)
+	{
+		if (psTmp->ui32PhysHeapID == psConfig->ui32PhysHeapID)
+		{
+			return PVRSRV_ERROR_PHYSHEAP_ID_IN_USE;
+		}
+		psTmp = psTmp->psNext;
+	}
+
+	psNew = OSAllocMem(sizeof(PHYS_HEAP));
+	if (psNew == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psNew->ui32PhysHeapID = psConfig->ui32PhysHeapID;
+	psNew->eType = psConfig->eType;
+	psNew->psMemFuncs = psConfig->psMemFuncs;
+	psNew->hPrivData = psConfig->hPrivData;
+	psNew->ui32RefCount = 0;
+	psNew->pszPDumpMemspaceName = psConfig->pszPDumpMemspaceName;
+
+	psNew->pasRegions = psConfig->pasRegions;
+	psNew->ui32NumOfRegions = psConfig->ui32NumOfRegions;
+
+	psNew->psNext = g_psPhysHeapList;
+	g_psPhysHeapList = psNew;
+
+	*ppsPhysHeap = psNew;
+
+	PVR_DPF_RETURN_RC1(PVRSRV_OK, *ppsPhysHeap);
+}
+
+void PhysHeapUnregister(PHYS_HEAP *psPhysHeap)
+{
+	PVR_DPF_ENTERED1(psPhysHeap);
+
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK)
+#endif
+	{
+		PVR_ASSERT(psPhysHeap->ui32RefCount == 0);
+	}
+
+	if (g_psPhysHeapList == psPhysHeap)
+	{
+		g_psPhysHeapList = psPhysHeap->psNext;
+	}
+	else
+	{
+		PHYS_HEAP *psTmp = g_psPhysHeapList;
+
+		while(psTmp->psNext != psPhysHeap)
+		{
+			psTmp = psTmp->psNext;
+		}
+		psTmp->psNext = psPhysHeap->psNext;
+	}
+
+	OSFreeMem(psPhysHeap);
+
+	PVR_DPF_RETURN;
+}
+
+PVRSRV_ERROR PhysHeapAcquire(IMG_UINT32 ui32PhysHeapID,
+							 PHYS_HEAP **ppsPhysHeap)
+{
+	PHYS_HEAP *psTmp = g_psPhysHeapList;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_DPF_ENTERED1(ui32PhysHeapID);
+
+	OSLockAcquire(g_hPhysHeapLock);
+
+	while (psTmp)
+	{
+		if (psTmp->ui32PhysHeapID == ui32PhysHeapID)
+		{
+			break;
+		}
+		psTmp = psTmp->psNext;
+	}
+	
+	if (psTmp == NULL)
+	{
+		eError = PVRSRV_ERROR_PHYSHEAP_ID_INVALID;
+	}
+	else
+	{
+		psTmp->ui32RefCount++;
+		PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", __FUNCTION__, psTmp, psTmp->ui32RefCount);
+	}
+
+	OSLockRelease(g_hPhysHeapLock);
+
+	*ppsPhysHeap = psTmp;
+	PVR_DPF_RETURN_RC1(eError, *ppsPhysHeap);
+}
+
+void PhysHeapRelease(PHYS_HEAP *psPhysHeap)
+{
+	PVR_DPF_ENTERED1(psPhysHeap);
+
+	OSLockAcquire(g_hPhysHeapLock);
+	psPhysHeap->ui32RefCount--;
+	PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", __FUNCTION__, psPhysHeap, psPhysHeap->ui32RefCount);
+	OSLockRelease(g_hPhysHeapLock);
+
+	PVR_DPF_RETURN;
+}
+
+PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap)
+{
+	return psPhysHeap->eType;
+}
+
+/*
+ * This function will set the psDevPAddr to whatever the system layer
+ * has set it for the referenced region.
+ * It will not fail if the psDevPAddr is invalid.
+ */
+PVRSRV_ERROR PhysHeapRegionGetDevPAddr(PHYS_HEAP *psPhysHeap,
+								IMG_UINT32 ui32RegionId,
+								IMG_DEV_PHYADDR *psDevPAddr)
+{
+	if (ui32RegionId < psPhysHeap->ui32NumOfRegions)
+	{
+		*psDevPAddr = psPhysHeap->pasRegions[ui32RegionId].sCardBase;
+		return PVRSRV_OK;
+	}
+	else
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+}
+
+/*
+ * This function will set the psCpuPAddr to whatever the system layer
+ * has set it for the referenced region.
+ * It will not fail if the psCpuPAddr is invalid.
+ */
+PVRSRV_ERROR PhysHeapRegionGetCpuPAddr(PHYS_HEAP *psPhysHeap,
+								IMG_UINT32 ui32RegionId,
+								IMG_CPU_PHYADDR *psCpuPAddr)
+{
+	if (ui32RegionId < psPhysHeap->ui32NumOfRegions)
+	{
+		*psCpuPAddr = psPhysHeap->pasRegions[ui32RegionId].sStartAddr;
+		return PVRSRV_OK;
+	}
+	else
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+}
+
+PVRSRV_ERROR PhysHeapRegionGetSize(PHYS_HEAP *psPhysHeap,
+								   IMG_UINT32 ui32RegionId,
+								   IMG_UINT64 *puiSize)
+{
+	if (ui32RegionId < psPhysHeap->ui32NumOfRegions)
+	{
+		*puiSize = psPhysHeap->pasRegions[ui32RegionId].uiSize;
+		return PVRSRV_OK;
+	}
+	else
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+}
+
+void PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap,
+								IMG_UINT32 ui32NumOfAddr,
+								IMG_DEV_PHYADDR *psDevPAddr,
+								IMG_CPU_PHYADDR *psCpuPAddr)
+{
+	psPhysHeap->psMemFuncs->pfnCpuPAddrToDevPAddr(psPhysHeap->hPrivData,
+												 ui32NumOfAddr,
+												 psDevPAddr,
+												 psCpuPAddr);
+}
+
+void PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap,
+								IMG_UINT32 ui32NumOfAddr,
+								IMG_CPU_PHYADDR *psCpuPAddr,
+								IMG_DEV_PHYADDR *psDevPAddr)
+{
+	psPhysHeap->psMemFuncs->pfnDevPAddrToCpuPAddr(psPhysHeap->hPrivData,
+												 ui32NumOfAddr,
+												 psCpuPAddr,
+												 psDevPAddr);
+}
+
+IMG_UINT32 PhysHeapGetRegionId(PHYS_HEAP *psPhysHeap,
+								PVRSRV_MEMALLOCFLAGS_T uiAllocFlags)
+{
+	if (psPhysHeap->psMemFuncs->pfnGetRegionId == NULL)
+	{
+		return 0;
+	}
+
+	return psPhysHeap->psMemFuncs->pfnGetRegionId(psPhysHeap->hPrivData,
+												 uiAllocFlags);
+}
+
+IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap)
+{
+	return psPhysHeap->pszPDumpMemspaceName;
+}
+
+PVRSRV_ERROR PhysHeapInit(void)
+{
+	PVRSRV_ERROR eError;
+
+	g_psPhysHeapList = NULL;
+
+	eError = OSLockCreate(&g_hPhysHeapLock, LOCK_TYPE_NONE);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create PhysHeapLock: %s",
+										__func__,
+										PVRSRVGETERRORSTRING(eError)));
+		return eError;
+	}
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PhysHeapDeinit(void)
+{
+	PVR_ASSERT(g_psPhysHeapList == NULL);
+
+	OSLockDestroy(g_hPhysHeapLock);
+
+	return PVRSRV_OK;
+}
+
+IMG_UINT32 PhysHeapNumberOfRegions(PHYS_HEAP *psPhysHeap)
+{
+	return psPhysHeap->ui32NumOfRegions;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/physmem.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/physmem.c
new file mode 100644
index 0000000..95413cf
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/physmem.c
@@ -0,0 +1,614 @@
+/*************************************************************************/ /*!
+@File           physmem.c
+@Title          Physmem
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Common entry point for creation of RAM backed PMR's
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "device.h"
+#include "physmem.h"
+#include "pvrsrv.h"
+#include "osfunc.h"
+#include "pdump_physmem.h"
+#include "pdump_km.h"
+#include "rgx_heaps.h"
+
+#if defined(DEBUG)
+IMG_UINT32 gPMRAllocFail = 0;
+#endif /* defined(DEBUG) */
+
+PVRSRV_ERROR DevPhysMemAlloc(PVRSRV_DEVICE_NODE	*psDevNode,
+                             IMG_UINT32 ui32MemSize,
+                             IMG_UINT32 ui32Log2Align,
+                             const IMG_UINT8 u8Value,
+                             IMG_BOOL bInitPage,
+#if defined(PDUMP)
+                             const IMG_CHAR *pszDevSpace,
+                             const IMG_CHAR *pszSymbolicAddress,
+                             IMG_HANDLE *phHandlePtr,
+#endif
+                             IMG_HANDLE hMemHandle,
+                             IMG_DEV_PHYADDR *psDevPhysAddr)
+{
+	void *pvCpuVAddr;
+	PVRSRV_ERROR eError;
+#if defined(PDUMP)
+	IMG_CHAR szFilenameOut[PDUMP_PARAM_MAX_FILE_NAME];
+	PDUMP_FILEOFFSET_T uiOffsetOut;
+	IMG_UINT32 ui32PageSize;
+	IMG_UINT32 ui32PDumpMemSize = ui32MemSize;
+#endif
+	PG_HANDLE *psMemHandle;
+	IMG_UINT64 uiMask;
+	IMG_DEV_PHYADDR sDevPhysAddr_int;
+
+	psMemHandle = hMemHandle;
+
+	/* Allocate the pages */
+	eError = psDevNode->pfnDevPxAlloc(psDevNode,
+	                                  TRUNCATE_64BITS_TO_SIZE_T(ui32MemSize),
+	                                  psMemHandle,
+	                                  &sDevPhysAddr_int);
+	if (PVRSRV_OK != eError)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Unable to allocate the pages"));
+		return eError;
+	}
+
+	/* Check to see if the page allocator returned pages with our desired
+	 * alignment, which is not unlikely
+	 */
+	uiMask = (1 << ui32Log2Align) - 1;
+	if (ui32Log2Align && (sDevPhysAddr_int.uiAddr & uiMask))
+	{
+		/* use over allocation instead */
+		psDevNode->pfnDevPxFree(psDevNode, psMemHandle);
+
+		ui32MemSize += (IMG_UINT32) uiMask;
+		eError = psDevNode->pfnDevPxAlloc(psDevNode,
+		                                  TRUNCATE_64BITS_TO_SIZE_T(ui32MemSize),
+		                                  psMemHandle,
+		                                  &sDevPhysAddr_int);
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"Unable to over-allocate the pages"));
+			return eError;
+		}
+
+		sDevPhysAddr_int.uiAddr += uiMask;
+		sDevPhysAddr_int.uiAddr &= ~uiMask;
+	}
+	*psDevPhysAddr = sDevPhysAddr_int;
+
+#if defined(PDUMP)
+	ui32PageSize = ui32Log2Align? (1 << ui32Log2Align) : OSGetPageSize();
+	eError = PDumpMalloc(pszDevSpace,
+								pszSymbolicAddress,
+								ui32PDumpMemSize,
+								ui32PageSize,
+								IMG_FALSE,
+								0,
+								phHandlePtr,
+								PDUMP_NONE);
+	if (PVRSRV_OK != eError)
+	{
+		PDUMPCOMMENT("Allocating pages failed");
+		*phHandlePtr = NULL;
+	}
+#endif
+
+	if (bInitPage)
+	{
+		/*Map the page to the CPU VA space */
+		eError = psDevNode->pfnDevPxMap(psDevNode,
+		                                psMemHandle,
+		                                ui32MemSize,
+		                                &sDevPhysAddr_int,
+		                                &pvCpuVAddr);
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"Unable to map the allocated page"));
+			psDevNode->pfnDevPxFree(psDevNode, psMemHandle);
+			return eError;
+		}
+
+		/*Fill the memory with given content */
+		OSDeviceMemSet(pvCpuVAddr, u8Value, ui32MemSize);
+
+		/*Map the page to the CPU VA space */
+		eError = psDevNode->pfnDevPxClean(psDevNode,
+		                                  psMemHandle,
+		                                  0,
+		                                  ui32MemSize);
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"Unable to clean the allocated page"));
+			psDevNode->pfnDevPxUnMap(psDevNode, psMemHandle, pvCpuVAddr);
+			psDevNode->pfnDevPxFree(psDevNode, psMemHandle);
+			return eError;
+		}
+
+#if defined(PDUMP)
+		/*P-Dumping of the page contents can be done in two ways
+		 * 1. Store the single byte init value to the .prm file
+		 *    and load the same value to the entire dummy page buffer
+		 *    This method requires lot of LDB's inserted into the out2.txt
+		 *
+		 * 2. Store the entire contents of the buffer to the .prm file
+		 *    and load them back.
+		 *    This only needs a single LDB instruction in the .prm file
+		 *    and chosen this method
+		 *    size of .prm file might go up but that's not huge at least
+		 *    for this allocation
+		 */
+		/*Write the buffer contents to the prm file */
+		eError = PDumpWriteBuffer(pvCpuVAddr,
+		                          ui32PDumpMemSize,
+		                          PDUMP_FLAGS_CONTINUOUS,
+		                          szFilenameOut,
+		                          sizeof(szFilenameOut),
+		                          &uiOffsetOut);
+		if (PVRSRV_OK == eError)
+		{
+			/* Load the buffer back to the allocated memory when playing the pdump */
+			eError = PDumpPMRLDB(pszDevSpace,
+			                     pszSymbolicAddress,
+			                     0,
+			                     ui32PDumpMemSize,
+			                     szFilenameOut,
+			                     uiOffsetOut,
+			                     PDUMP_FLAGS_CONTINUOUS);
+			if (PVRSRV_OK != eError)
+			{
+				PDUMP_ERROR(eError, "Failed to write LDB statement to script file");
+				PVR_DPF((PVR_DBG_ERROR, "Failed to write LDB statement to script file, error %d", eError));
+			}
+
+		}
+		else if (eError != PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+		{
+			PDUMP_ERROR(eError, "Failed to write device allocation to parameter file");
+			PVR_DPF((PVR_DBG_ERROR, "Failed to write device allocation to parameter file, error %d", eError));
+		}
+		else
+		{
+			/* else Write to parameter file prevented under the flags and
+			 * current state of the driver so skip write to script and error IF.
+			 */
+			eError = PVRSRV_OK;
+		}
+#endif
+
+		/*UnMap the page */
+		psDevNode->pfnDevPxUnMap(psDevNode,
+		                         psMemHandle,
+		                         pvCpuVAddr);
+	}
+
+	return PVRSRV_OK;
+}
+
+void DevPhysMemFree(PVRSRV_DEVICE_NODE *psDevNode,
+#if defined(PDUMP)
+							IMG_HANDLE hPDUMPMemHandle,
+#endif
+							IMG_HANDLE	hMemHandle)
+{
+	PG_HANDLE *psMemHandle;
+
+	psMemHandle = hMemHandle;
+	psDevNode->pfnDevPxFree(psDevNode, psMemHandle);
+#if defined(PDUMP)
+	if (NULL != hPDUMPMemHandle)
+	{
+		PDumpFree(hPDUMPMemHandle);
+	}
+#endif
+
+}
+
+
+/* Checks the input parameters and adjusts them if possible and necessary */
+static inline PVRSRV_ERROR _ValidateParams(IMG_UINT32 ui32NumPhysChunks,
+                                           IMG_UINT32 ui32NumVirtChunks,
+                                           PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                           IMG_UINT32 *puiLog2AllocPageSize,
+                                           IMG_DEVMEM_SIZE_T *puiSize,
+                                           PMR_SIZE_T *puiChunkSize)
+{
+	IMG_UINT32 uiLog2AllocPageSize = *puiLog2AllocPageSize;
+	IMG_DEVMEM_SIZE_T uiSize = *puiSize;
+	PMR_SIZE_T uiChunkSize = *puiChunkSize;
+	/* Sparse if we have different number of virtual and physical chunks plus
+	 * in general all allocations with more than one virtual chunk */
+	IMG_BOOL bIsSparse = (ui32NumVirtChunks != ui32NumPhysChunks ||
+			ui32NumVirtChunks > 1) ? IMG_TRUE : IMG_FALSE;
+
+	/* Protect against ridiculous page sizes */
+	if (uiLog2AllocPageSize > RGX_HEAP_2MB_PAGE_SHIFT)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Page size is too big: 2^%u.", uiLog2AllocPageSize));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Sanity check of the alloc size */
+	if (uiSize >= 0x1000000000ULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Cancelling allocation request of over 64 GB. "
+				 "This is likely a bug."
+				, __func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Fail if requesting coherency on one side but uncached on the other */
+	if ( (PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) &&
+	         (PVRSRV_CHECK_GPU_UNCACHED(uiFlags) || PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags))) )
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Request for CPU coherency but specifying GPU uncached "
+				"Please use GPU cached flags for coherency."));
+		return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+	}
+
+	if ( (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) &&
+	         (PVRSRV_CHECK_CPU_UNCACHED(uiFlags) || PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags))) )
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Request for GPU coherency but specifying CPU uncached "
+				"Please use CPU cached flags for coherency."));
+		return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+	}
+
+	if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) && PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Zero on Alloc and Poison on Alloc are mutually exclusive.",
+				__func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (bIsSparse)
+	{
+		/* For sparse we need correct parameters like a suitable page size....  */
+		if (OSGetPageShift() > uiLog2AllocPageSize)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Invalid log2-contiguity for sparse allocation. "
+					"Requested %u, required minimum %zd",
+					__func__,
+					uiLog2AllocPageSize,
+					OSGetPageShift() ));
+
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+		/* ... chunk size must be a equal to page size ...*/
+		if ( uiChunkSize != (1 << uiLog2AllocPageSize) )
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Invalid chunk size for sparse allocation. "
+					"Requested %#llx, must be same as page size %#x.",
+					__func__,
+					(long long unsigned) uiChunkSize,
+					1 << uiLog2AllocPageSize ));
+
+			return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+		}
+
+		if (ui32NumVirtChunks * uiChunkSize != uiSize)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Total alloc size (%#llx) is not qual "
+					"to virtual chunks * chunk size (%#llx)",
+					__func__,
+					(long long unsigned) uiSize,
+					(long long unsigned) (ui32NumVirtChunks * uiChunkSize) ));
+
+			return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+		}
+
+		if (ui32NumPhysChunks > ui32NumVirtChunks)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Number of physical chunks (%u) must not be greater "
+					"than number of virtual chunks (%u)",
+					__func__,
+					ui32NumPhysChunks,
+					ui32NumVirtChunks));
+
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+	}
+	else
+	{
+		/*
+		 * Silently round up alignment/pagesize if request was less that PAGE_SHIFT
+		 * because it would never be harmful for memory to be _more_ contiguous that
+		 * was desired.
+		 */
+		uiLog2AllocPageSize = OSGetPageShift() > uiLog2AllocPageSize ?
+				OSGetPageShift() : uiLog2AllocPageSize;
+
+		/* Same for total size */
+		uiSize = PVR_ALIGN(uiSize, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
+		*puiChunkSize = uiSize;
+	}
+
+	if ((uiSize & ((1ULL << uiLog2AllocPageSize) - 1)) != 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+							"%s: Total size (%#llx) must be a multiple "
+							"of the requested contiguity (%u)",
+							__func__,
+							(long long unsigned) uiSize,
+							(1 << uiLog2AllocPageSize)));
+		return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+	}
+
+	*puiLog2AllocPageSize = uiLog2AllocPageSize;
+	*puiSize = uiSize;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PhysmemNewRamBackedPMR(CONNECTION_DATA * psConnection,
+                       PVRSRV_DEVICE_NODE *psDevNode,
+                       IMG_DEVMEM_SIZE_T uiSize,
+                       PMR_SIZE_T uiChunkSize,
+                       IMG_UINT32 ui32NumPhysChunks,
+                       IMG_UINT32 ui32NumVirtChunks,
+                       IMG_UINT32 *pui32MappingTable,
+                       IMG_UINT32 uiLog2AllocPageSize,
+                       PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                       IMG_UINT32 uiAnnotationLength,
+                       const IMG_CHAR *pszAnnotation,
+                       PMR **ppsPMRPtr)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DEVICE_PHYS_HEAP ePhysHeapIdx;
+	PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize =
+		psDevNode->psDevConfig->pfnCheckMemAllocSize;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(uiAnnotationLength);
+
+	eError = _ValidateParams(ui32NumPhysChunks,
+	                         ui32NumVirtChunks,
+	                         uiFlags,
+	                         &uiLog2AllocPageSize,
+	                         &uiSize,
+	                         &uiChunkSize);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	/* Lookup the requested physheap index to use for this PMR allocation */
+	if (PVRSRV_CHECK_FW_LOCAL(uiFlags))
+	{
+		ePhysHeapIdx = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+	}
+	else if (PVRSRV_CHECK_CPU_LOCAL(uiFlags))
+	{
+		ePhysHeapIdx = PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL;
+	}
+	else
+	{
+		ePhysHeapIdx = PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL;
+	}
+
+	if (NULL == psDevNode->apsPhysHeap[ePhysHeapIdx])
+	{
+		/* In case a heap hasn't been acquired for this type, return invalid heap error */
+		PVR_DPF((PVR_DBG_ERROR, "%s: Requested allocation on device node (%p) from "
+		        "an invalid heap (HeapIndex=%d)",
+		        __FUNCTION__, psDevNode, ePhysHeapIdx));
+		return PVRSRV_ERROR_INVALID_HEAP;
+	}
+
+	/* Apply memory budgeting policy */
+	if (pfnCheckMemAllocSize)
+	{
+		IMG_UINT64 uiMemSize = (IMG_UINT64)uiChunkSize * ui32NumPhysChunks;
+		PVRSRV_ERROR eError;
+
+		eError = pfnCheckMemAllocSize(psDevNode->psDevConfig->hSysData, uiMemSize);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+#if defined(DEBUG)
+	if (gPMRAllocFail > 0)
+	{
+		static IMG_UINT32 ui32AllocCount = 1;
+
+		if (ui32AllocCount < gPMRAllocFail)
+		{
+			ui32AllocCount++;
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s failed on %d allocation.",
+			         __func__, ui32AllocCount));
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+	}
+#endif /* defined(DEBUG) */
+
+	return psDevNode->pfnCreateRamBackedPMR[ePhysHeapIdx](psDevNode,
+											uiSize,
+											uiChunkSize,
+											ui32NumPhysChunks,
+											ui32NumVirtChunks,
+											pui32MappingTable,
+											uiLog2AllocPageSize,
+											uiFlags,
+											pszAnnotation,
+											ppsPMRPtr);
+}
+
+PVRSRV_ERROR
+PhysmemNewRamBackedLockedPMR(CONNECTION_DATA * psConnection,
+							PVRSRV_DEVICE_NODE *psDevNode,
+							IMG_DEVMEM_SIZE_T uiSize,
+							PMR_SIZE_T uiChunkSize,
+							IMG_UINT32 ui32NumPhysChunks,
+							IMG_UINT32 ui32NumVirtChunks,
+							IMG_UINT32 *pui32MappingTable,
+							IMG_UINT32 uiLog2PageSize,
+							PVRSRV_MEMALLOCFLAGS_T uiFlags,
+							IMG_UINT32 uiAnnotationLength,
+							const IMG_CHAR *pszAnnotation,
+							PMR **ppsPMRPtr)
+{
+
+	PVRSRV_ERROR eError;
+	eError = PhysmemNewRamBackedPMR(psConnection,
+									psDevNode,
+									uiSize,
+									uiChunkSize,
+									ui32NumPhysChunks,
+									ui32NumVirtChunks,
+									pui32MappingTable,
+									uiLog2PageSize,
+									uiFlags,
+									uiAnnotationLength,
+									pszAnnotation,
+									ppsPMRPtr);
+
+	if (eError == PVRSRV_OK)
+	{
+		eError = PMRLockSysPhysAddresses(*ppsPMRPtr);
+	}
+
+	return eError;
+}
+
+static void GetLMASize( IMG_DEVMEM_SIZE_T *puiLMASize,
+			PVRSRV_DEVICE_NODE *psDevNode )
+{
+	IMG_UINT uiRegionIndex = 0, uiNumRegions = 0;
+	PVR_ASSERT(psDevNode);
+
+	uiNumRegions = psDevNode->psDevConfig->pasPhysHeaps[0].ui32NumOfRegions;
+
+	for (uiRegionIndex = 0; uiRegionIndex < uiNumRegions; ++uiRegionIndex)
+	{
+		*puiLMASize += psDevNode->psDevConfig->pasPhysHeaps[0].pasRegions[uiRegionIndex].uiSize;
+	}
+}
+
+PVRSRV_ERROR
+PVRSRVGetMaxDevMemSizeKM( CONNECTION_DATA * psConnection,
+			  PVRSRV_DEVICE_NODE *psDevNode,
+			  IMG_DEVMEM_SIZE_T *puiLMASize,
+			  IMG_DEVMEM_SIZE_T *puiUMASize )
+{
+	IMG_BOOL bLMA = IMG_FALSE, bUMA = IMG_FALSE;
+
+	*puiLMASize = 0;
+	*puiUMASize = 0;
+
+#if defined(TC_MEMORY_CONFIG)			/* For TC2 */
+#if (TC_MEMORY_CONFIG == TC_MEMORY_LOCAL)
+	bLMA = IMG_TRUE;
+#elif (TC_MEMORY_CONFIG == TC_MEMORY_HOST)
+	bUMA = IMG_TRUE;
+#else
+	bUMA = IMG_TRUE;
+	bLMA = IMG_TRUE;
+#endif
+
+#elif defined(PLATO_MEMORY_CONFIG)		/* For Plato TC */
+#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL)
+	bLMA = IMG_TRUE;
+#elif (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST)
+	bUMA = IMG_TRUE;
+#else
+	bUMA = IMG_TRUE;
+	bLMA = IMG_TRUE;
+#endif
+
+#elif defined(LMA)				/* For emu, vp_linux */
+	bLMA = IMG_TRUE;
+
+#else						/* For all other platforms */
+	bUMA = IMG_TRUE;
+#endif
+
+	if (bLMA) { GetLMASize(puiLMASize, psDevNode); }
+	if (bUMA) { *puiUMASize = OSGetRAMSize(); }
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	return PVRSRV_OK;
+}
+
+/* 'Wrapper' function to call PMRImportPMR(), which
+ * first checks the PMR is for the current device.
+ * This avoids the need to do this in pmr.c, which
+ * would then need PVRSRV_DEVICE_NODE (defining this
+ * type in pmr.h causes a typedef redefinition issue).
+ */
+PVRSRV_ERROR
+PhysmemImportPMR(CONNECTION_DATA *psConnection,
+             PVRSRV_DEVICE_NODE *psDevNode,
+             PMR_EXPORT *psPMRExport,
+             PMR_PASSWORD_T uiPassword,
+             PMR_SIZE_T uiSize,
+             PMR_LOG2ALIGN_T uiLog2Contig,
+             PMR **ppsPMR)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	if (PMRGetExportDeviceNode(psPMRExport) != psDevNode)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device\n", __func__));
+		return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+	}
+
+	return PMRImportPMR(psPMRExport,
+	                    uiPassword,
+	                    uiSize,
+	                    uiLog2Contig,
+	                    ppsPMR);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/physmem_hostmem.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/physmem_hostmem.c
new file mode 100644
index 0000000..604bdbc
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/physmem_hostmem.c
@@ -0,0 +1,144 @@
+/*************************************************************************/ /*!
+@File           physmem_hostmem.c
+@Title          Host memory device node functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Functions relevant to device memory allocations made from host
+                mem device node.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "physmem_hostmem.h"
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "physheap.h"
+#include "pvrsrv_device.h"
+
+static void HostMemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+                                      IMG_UINT32 ui32NumOfAddr,
+                                      IMG_DEV_PHYADDR *psDevPAddr,
+                                      IMG_CPU_PHYADDR *psCpuPAddr);
+
+static void HostMemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+                                      IMG_UINT32 ui32NumOfAddr,
+                                      IMG_CPU_PHYADDR *psCpuPAddr,
+                                      IMG_DEV_PHYADDR *psDevPAddr);
+
+/* heap callbacks for host driver's device's heap */
+static PHYS_HEAP_FUNCTIONS gsHostMemDevPhysHeapFuncs =
+{
+	/* pfnCpuPAddrToDevPAddr */
+	HostMemCpuPAddrToDevPAddr,
+	/* pfnDevPAddrToCpuPAddr */
+	HostMemDevPAddrToCpuPAddr,
+	/* pfnGetRegionId */
+	NULL,
+};
+
+static PVRSRV_DEVICE_CONFIG gsHostMemDevConfig[];
+
+/* heap configuration for host driver's device */
+static PHYS_HEAP_CONFIG gsPhysHeapConfigHostMemDevice[] =
+{
+	{
+		PHYS_HEAP_ID_HOSTMEM,
+		PHYS_HEAP_TYPE_UMA,
+		"SYSMEM",
+		&gsHostMemDevPhysHeapFuncs,
+		NULL,
+		0,
+		(IMG_HANDLE)&gsHostMemDevConfig[0],
+	}
+};
+
+/* device configuration for host driver's device */
+static PVRSRV_DEVICE_CONFIG gsHostMemDevConfig[] = 
+{
+	{
+		.pszName = "HostMemDevice",
+		.eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE,
+		.pasPhysHeaps = &gsPhysHeapConfigHostMemDevice[0],
+		.ui32PhysHeapCount = IMG_ARR_NUM_ELEMS(gsPhysHeapConfigHostMemDevice),
+		.aui32PhysHeapID = {
+			PHYS_HEAP_ID_HOSTMEM,
+			PHYS_HEAP_ID_HOSTMEM,
+			PHYS_HEAP_ID_HOSTMEM
+		},
+	}
+};
+
+static void HostMemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+                                      IMG_UINT32 ui32NumOfAddr,
+                                      IMG_DEV_PHYADDR *psDevPAddr,
+                                      IMG_CPU_PHYADDR *psCpuPAddr)
+{
+	PVR_UNREFERENCED_PARAMETER(hPrivData);
+	/* Optimise common case */
+	psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr;
+	if (ui32NumOfAddr > 1)
+	{
+		IMG_UINT32 ui32Idx;
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+		{
+			psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr;
+		}
+	}
+}
+
+static void HostMemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+                                      IMG_UINT32 ui32NumOfAddr,
+                                      IMG_CPU_PHYADDR *psCpuPAddr,
+                                      IMG_DEV_PHYADDR *psDevPAddr)
+{
+	PVR_UNREFERENCED_PARAMETER(hPrivData);
+	/* Optimise common case */
+	psCpuPAddr[0].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[0].uiAddr);
+	if (ui32NumOfAddr > 1)
+	{
+		IMG_UINT32 ui32Idx;
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+		{
+			psCpuPAddr[ui32Idx].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[ui32Idx].uiAddr);
+		}
+	}
+}
+
+PVRSRV_DEVICE_CONFIG* HostMemGetDeviceConfig(void)
+{
+	return &gsHostMemDevConfig[0];
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/physmem_lma.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/physmem_lma.c
new file mode 100644
index 0000000..a564499
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/physmem_lma.c
@@ -0,0 +1,1669 @@
+/*************************************************************************/ /*!
+@File           physmem_lma.c
+@Title          Local card memory allocator
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks for local card memory.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "rgx_pdump_panics.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "devicemem_server_utils.h"
+#include "physmem_lma.h"
+#include "pdump_km.h"
+#include "pmr.h"
+#include "pmr_impl.h"
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "rgxutils.h"
+#endif
+
+/* Since 0x0 is a valid DevPAddr, we rely on max 64-bit value to be an invalid
+ * page address */
+#define INVALID_PAGE_ADDR ~((IMG_UINT64)0x0)
+
+typedef struct _PMR_LMALLOCARRAY_DATA_ {
+	PVRSRV_DEVICE_NODE *psDevNode;
+    IMG_INT32 iNumPagesAllocated;
+    /*
+     * uiTotalNumPages:
+     * Total number of pages supported by this PMR. (Fixed as of now due the fixed Page table array size)
+     */
+    IMG_UINT32 uiTotalNumPages;
+    IMG_UINT32 uiPagesToAlloc;
+
+	IMG_UINT32 uiLog2AllocSize;
+	IMG_UINT32 uiContigAllocSize;
+	IMG_DEV_PHYADDR *pasDevPAddr;
+
+	IMG_BOOL bZeroOnAlloc;
+	IMG_BOOL bPoisonOnAlloc;
+	IMG_BOOL bFwLocalAlloc;
+
+	IMG_BOOL bOnDemand;
+
+	/*
+	  record at alloc time whether poisoning will be required when the
+	  PMR is freed.
+	*/
+	IMG_BOOL bPoisonOnFree;
+
+	/* Physical heap and arena pointers for this allocation */
+	PHYS_HEAP* psPhysHeap;
+	RA_ARENA* psArena;
+	PVRSRV_MEMALLOCFLAGS_T uiAllocFlags;
+
+} PMR_LMALLOCARRAY_DATA;
+
+static PVRSRV_ERROR _MapAlloc(PVRSRV_DEVICE_NODE *psDevNode, 
+							  IMG_DEV_PHYADDR *psDevPAddr,
+							  size_t uiSize,
+							  IMG_BOOL bFwLocalAlloc,
+							  PMR_FLAGS_T ulFlags,
+							  void **pvPtr)
+{
+	IMG_UINT32 ui32CPUCacheFlags;
+	IMG_CPU_PHYADDR sCpuPAddr;
+	PHYS_HEAP *psPhysHeap;
+	PVRSRV_ERROR eError;
+
+	eError = DevmemCPUCacheMode(psDevNode, ulFlags, &ui32CPUCacheFlags);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	if (bFwLocalAlloc)
+	{
+		psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+	}
+	else
+	{
+		psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+	}
+
+	PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPAddr, psDevPAddr);
+
+	*pvPtr = OSMapPhysToLin(sCpuPAddr, uiSize, ui32CPUCacheFlags);
+	if (*pvPtr == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	else
+	{
+		return PVRSRV_OK;
+	}
+}
+
+static void _UnMapAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+						size_t uiSize,
+						IMG_BOOL bFwLocalAlloc,
+						PMR_FLAGS_T ulFlags,
+						void *pvPtr)
+{
+	OSUnMapPhysToLin(pvPtr, uiSize, PVRSRV_CPU_CACHE_MODE(ulFlags));
+}
+
+static PVRSRV_ERROR
+_PoisonAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+			 IMG_DEV_PHYADDR *psDevPAddr,
+			 IMG_BOOL bFwLocalAlloc,
+			 IMG_UINT32 uiContigAllocSize,
+			 const IMG_CHAR *pacPoisonData,
+			 size_t uiPoisonSize)
+{
+	IMG_UINT32 uiSrcByteIndex;
+	IMG_UINT32 uiDestByteIndex;
+	void *pvKernLin = NULL;
+	IMG_CHAR *pcDest = NULL;
+
+	PVRSRV_ERROR eError;
+
+	eError = _MapAlloc(psDevNode,
+					   psDevPAddr,
+					   uiContigAllocSize,
+					   bFwLocalAlloc,
+					   PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+					   &pvKernLin);
+	if (eError != PVRSRV_OK)
+	{
+		goto map_failed;
+	}
+	pcDest = pvKernLin;
+
+	uiSrcByteIndex = 0;
+
+	for (uiDestByteIndex=0; uiDestByteIndex<uiContigAllocSize; uiDestByteIndex++)
+	{
+		pcDest[uiDestByteIndex] = pacPoisonData[uiSrcByteIndex];
+		uiSrcByteIndex++;
+		if (uiSrcByteIndex == uiPoisonSize)
+		{
+			uiSrcByteIndex = 0;
+		}
+	}
+
+	_UnMapAlloc(psDevNode, uiContigAllocSize, bFwLocalAlloc, 0,pvKernLin);
+
+	return PVRSRV_OK;
+
+map_failed:
+	PVR_DPF((PVR_DBG_ERROR, "Failed to poison allocation"));
+	return eError;
+}
+
+static PVRSRV_ERROR
+_ZeroAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+		   IMG_DEV_PHYADDR *psDevPAddr,
+		   IMG_BOOL bFwLocalAlloc,
+		   IMG_UINT32 uiContigAllocSize)
+{
+	void *pvKernLin = NULL;
+	PVRSRV_ERROR eError;
+
+	eError = _MapAlloc(psDevNode, 
+					   psDevPAddr,
+					   uiContigAllocSize,
+					   bFwLocalAlloc,
+					   PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+					   &pvKernLin);
+	if (eError != PVRSRV_OK)
+	{
+		goto map_failed;
+	}
+
+	OSDeviceMemSet(pvKernLin, 0, uiContigAllocSize);
+
+	_UnMapAlloc(psDevNode, uiContigAllocSize, bFwLocalAlloc, 0, pvKernLin);
+
+	return PVRSRV_OK;
+
+map_failed:
+	PVR_DPF((PVR_DBG_ERROR, "Failed to zero allocation"));
+	return eError;
+}
+
+static const IMG_CHAR _AllocPoison[] = "^PoIsOn";
+static const IMG_UINT32 _AllocPoisonSize = 7;
+static const IMG_CHAR _FreePoison[] = "<DEAD-BEEF>";
+static const IMG_UINT32 _FreePoisonSize = 11;
+
+static PVRSRV_ERROR
+_AllocLMPageArray(PVRSRV_DEVICE_NODE *psDevNode,
+			  PMR_SIZE_T uiSize,
+			  PMR_SIZE_T uiChunkSize,
+			  IMG_UINT32 ui32NumPhysChunks,
+			  IMG_UINT32 ui32NumVirtChunks,
+			  IMG_UINT32 *pabMappingTable,
+			  IMG_UINT32 uiLog2AllocPageSize,
+			  IMG_BOOL bZero,
+			  IMG_BOOL bPoisonOnAlloc,
+			  IMG_BOOL bPoisonOnFree,
+			  IMG_BOOL bContig,
+			  IMG_BOOL bOnDemand,
+			  IMG_BOOL bFwLocalAlloc,
+			  PHYS_HEAP* psPhysHeap,
+			  PVRSRV_MEMALLOCFLAGS_T uiAllocFlags,
+			  PMR_LMALLOCARRAY_DATA **ppsPageArrayDataPtr
+			  )
+{
+	PMR_LMALLOCARRAY_DATA *psPageArrayData = NULL;
+	IMG_UINT32 ui32Index;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(!bZero || !bPoisonOnAlloc);
+	PVR_ASSERT(OSGetPageShift() <= uiLog2AllocPageSize);
+
+	psPageArrayData = OSAllocZMem(sizeof(PMR_LMALLOCARRAY_DATA));
+	if (psPageArrayData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto errorOnAllocArray;
+	}
+
+	if (bContig)
+	{
+		/*
+			Some allocations require kernel mappings in which case in order
+			to be virtually contiguous we also have to be physically contiguous.
+		*/
+		psPageArrayData->uiTotalNumPages = 1;
+		psPageArrayData->uiPagesToAlloc = psPageArrayData->uiTotalNumPages;
+		psPageArrayData->uiContigAllocSize = TRUNCATE_64BITS_TO_32BITS(uiSize);
+		psPageArrayData->uiLog2AllocSize = uiLog2AllocPageSize;
+	}
+	else
+	{
+		IMG_UINT32 uiNumPages;
+
+		/* Use of cast below is justified by the assertion that follows to
+		prove that no significant bits have been truncated */
+		uiNumPages = (IMG_UINT32) ( ((uiSize - 1) >> uiLog2AllocPageSize) + 1);
+		PVR_ASSERT( ((PMR_SIZE_T) uiNumPages << uiLog2AllocPageSize) == uiSize);
+
+		psPageArrayData->uiTotalNumPages = uiNumPages;
+
+		if ((ui32NumVirtChunks != ui32NumPhysChunks) || (1 < ui32NumVirtChunks))
+		{
+			psPageArrayData->uiPagesToAlloc = ui32NumPhysChunks;
+		}
+		else
+		{
+			psPageArrayData->uiPagesToAlloc = uiNumPages;
+		}
+		psPageArrayData->uiContigAllocSize = 1 << uiLog2AllocPageSize;
+		psPageArrayData->uiLog2AllocSize = uiLog2AllocPageSize;
+	}
+	psPageArrayData->psDevNode = psDevNode;
+	psPageArrayData->pasDevPAddr = OSAllocMem(sizeof(IMG_DEV_PHYADDR) *
+												psPageArrayData->uiTotalNumPages);
+	if (psPageArrayData->pasDevPAddr == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto errorOnAllocAddr;
+	}
+
+	/* Since no pages are allocated yet, initialise page addresses to INVALID_PAGE_ADDR */
+	for (ui32Index = 0; ui32Index < psPageArrayData->uiTotalNumPages; ui32Index++)
+	{
+		psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR;
+	}
+
+    psPageArrayData->iNumPagesAllocated = 0;
+    psPageArrayData->bZeroOnAlloc = bZero;
+	psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc;
+	psPageArrayData->bPoisonOnFree = bPoisonOnFree;
+ 	psPageArrayData->bOnDemand = bOnDemand;
+ 	psPageArrayData->bFwLocalAlloc = bFwLocalAlloc;
+ 	psPageArrayData->psPhysHeap = psPhysHeap;
+ 	psPageArrayData->uiAllocFlags = uiAllocFlags;
+
+	*ppsPageArrayDataPtr = psPageArrayData;
+
+	return PVRSRV_OK;
+
+	/*
+	  error exit paths follow:
+	*/
+
+errorOnAllocAddr:
+	OSFreeMem(psPageArrayData);
+
+errorOnAllocArray:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+static PVRSRV_ERROR
+_AllocLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, IMG_UINT32 *pui32MapTable)
+{
+	PVRSRV_ERROR eError;
+	RA_BASE_T uiCardAddr;
+	RA_LENGTH_T uiActualSize;
+	IMG_UINT32 i,ui32Index=0;
+	IMG_UINT32 uiContigAllocSize;
+	IMG_UINT32 uiLog2AllocSize;
+	IMG_UINT32 uiRegionId;
+	PVRSRV_DEVICE_NODE *psDevNode;
+	IMG_BOOL bPoisonOnAlloc;
+	IMG_BOOL bZeroOnAlloc;
+	RA_ARENA *pArena;
+
+	PVR_ASSERT(NULL != psPageArrayData);
+	PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
+
+	uiContigAllocSize = psPageArrayData->uiContigAllocSize;
+	uiLog2AllocSize = psPageArrayData->uiLog2AllocSize;
+	psDevNode = psPageArrayData->psDevNode;
+	bPoisonOnAlloc =  psPageArrayData->bPoisonOnAlloc;
+	bZeroOnAlloc =  psPageArrayData->bZeroOnAlloc;
+
+	if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE) && psPageArrayData->bFwLocalAlloc)
+	{
+		PVR_ASSERT(psDevNode->uiKernelFwRAIdx < RGXFW_NUM_OS);
+		pArena = psDevNode->psKernelFwMemArena[psDevNode->uiKernelFwRAIdx];
+		psDevNode->uiKernelFwRAIdx = 0;
+	}
+	else
+	{
+		/* Get suitable local memory region for this allocation */
+		uiRegionId = PhysHeapGetRegionId(psPageArrayData->psPhysHeap,
+		                                 psPageArrayData->uiAllocFlags);
+
+		PVR_ASSERT(uiRegionId < psDevNode->ui32NumOfLocalMemArenas);
+		pArena = psDevNode->apsLocalDevMemArenas[uiRegionId];
+	}
+
+	if (psPageArrayData->uiTotalNumPages <
+			(psPageArrayData->iNumPagesAllocated + psPageArrayData->uiPagesToAlloc))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Pages requested to allocate don't fit PMR alloc Size. "
+				"Allocated: %u + Requested: %u > Total Allowed: %u",
+				psPageArrayData->iNumPagesAllocated,
+				psPageArrayData->uiPagesToAlloc,
+				psPageArrayData->uiTotalNumPages));
+		eError = PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
+		return eError;
+	}
+
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	{
+		IMG_UINT32  ui32OSid=0, ui32OSidReg=0;
+		IMG_BOOL    bOSidAxiProt;
+		IMG_PID     pId;
+
+		pId=OSGetCurrentClientProcessIDKM();
+		RetrieveOSidsfromPidList(pId, &ui32OSid, &ui32OSidReg, &bOSidAxiProt);
+
+		pArena=psDevNode->psOSidSubArena[ui32OSid];
+		PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Giving from OS slot %d",ui32OSid));
+	}
+#endif
+
+	psPageArrayData->psArena = pArena;
+
+	for (i = 0; i < psPageArrayData->uiPagesToAlloc; i++)
+	{
+
+		/* This part of index finding should happen before allocating the page.
+		 * Just avoiding intricate paths */
+		if (psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc)
+		{
+			ui32Index = i;
+		}
+		else
+		{
+			if (NULL == pui32MapTable)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"Mapping table cannot be null"));
+				eError = PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY;
+				goto errorOnRAAlloc;
+			}
+
+			ui32Index = pui32MapTable[i];
+			if (ui32Index >= psPageArrayData->uiTotalNumPages)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Page alloc request Index out of bounds for PMR @0x%p",
+						__func__,
+						psPageArrayData));
+				eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+				goto errorOnRAAlloc;
+			}
+
+			if (INVALID_PAGE_ADDR != psPageArrayData->pasDevPAddr[ui32Index].uiAddr)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"Mapping already exists"));
+				eError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS;
+				goto errorOnRAAlloc;
+			}
+		}
+
+		eError = RA_Alloc(pArena,
+		                  uiContigAllocSize,
+		                  RA_NO_IMPORT_MULTIPLIER,
+		                  0,                       /* No flags */
+		                  1ULL << uiLog2AllocSize,
+		                  "LMA_Page_Alloc",
+		                  &uiCardAddr,
+		                  &uiActualSize,
+		                  NULL);                   /* No private handle */
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"Failed to Allocate the page @index:%d",
+					ui32Index));
+			eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+			goto errorOnRAAlloc;
+		}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+		PVR_DPF((PVR_DBG_MESSAGE,
+				"(GPU Virtualization Validation): Address: %llu \n",
+				uiCardAddr));
+}
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+		/* Allocation is done a page at a time */
+		PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiActualSize);
+#else
+		{
+			IMG_CPU_PHYADDR sLocalCpuPAddr;
+
+			sLocalCpuPAddr.uiAddr = (IMG_UINT64)uiCardAddr;
+			PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+									 NULL,
+									 sLocalCpuPAddr,
+									 uiActualSize,
+									 NULL);
+		}
+#endif
+#endif
+
+		psPageArrayData->pasDevPAddr[ui32Index].uiAddr = uiCardAddr;
+		if (bPoisonOnAlloc)
+		{
+			eError = _PoisonAlloc(psDevNode,
+								  &psPageArrayData->pasDevPAddr[ui32Index],
+								  psPageArrayData->bFwLocalAlloc,
+								  uiContigAllocSize,
+								  _AllocPoison,
+								  _AllocPoisonSize);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"Failed to poison the page"));
+				goto errorOnPoison;
+			}
+		}
+
+		if (bZeroOnAlloc)
+		{
+			eError = _ZeroAlloc(psDevNode,
+								&psPageArrayData->pasDevPAddr[ui32Index],
+								psPageArrayData->bFwLocalAlloc,
+								uiContigAllocSize);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"Failed to zero the page"));
+				goto errorOnZero;
+			}
+		}
+	}
+	psPageArrayData->iNumPagesAllocated += psPageArrayData->uiPagesToAlloc;
+
+	return PVRSRV_OK;
+
+	/*
+	  error exit paths follow:
+	*/
+errorOnZero:
+errorOnPoison:
+	eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+errorOnRAAlloc:
+	PVR_DPF((PVR_DBG_ERROR,
+			"%s: alloc_pages failed to honour request %d @index: %d of %d pages: (%s)",
+			__func__,
+			ui32Index,
+			i,
+			psPageArrayData->uiPagesToAlloc,
+			PVRSRVGetErrorStringKM(eError)));
+	while (--i < psPageArrayData->uiPagesToAlloc)
+	{
+		if (psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc)
+		{
+			ui32Index = i;
+		}
+		else
+		{
+			ui32Index = pui32MapTable[i];
+		}
+
+		if (ui32Index < psPageArrayData->uiTotalNumPages)
+		{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			/* Allocation is done a page at a time */
+			PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+			                            uiContigAllocSize);
+#else
+			{
+				PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+				                                psPageArrayData->pasDevPAddr[ui32Index].uiAddr);
+			}
+#endif
+#endif
+			RA_Free(pArena, psPageArrayData->pasDevPAddr[ui32Index].uiAddr);
+			psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR;
+		}
+	}
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static PVRSRV_ERROR
+_FreeLMPageArray(PMR_LMALLOCARRAY_DATA *psPageArrayData)
+{
+	OSFreeMem(psPageArrayData->pasDevPAddr);
+
+	PVR_DPF((PVR_DBG_MESSAGE,
+			"physmem_lma.c: freed local memory array structure for PMR @0x%p",
+			psPageArrayData));
+
+	OSFreeMem(psPageArrayData);
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_FreeLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData,
+             IMG_UINT32 *pui32FreeIndices,
+             IMG_UINT32 ui32FreePageCount)
+{
+	IMG_UINT32 uiContigAllocSize;
+	IMG_UINT32 i, ui32PagesToFree=0, ui32PagesFreed=0, ui32Index=0;
+	RA_ARENA *pArena = psPageArrayData->psArena;
+
+	if (! PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE))
+	{
+		PVRSRV_DEVICE_NODE *psDevNode = psPageArrayData->psDevNode;
+		if (psPageArrayData->bFwLocalAlloc)
+		{
+			PVR_ASSERT(psDevNode->uiKernelFwRAIdx < RGXFW_NUM_OS);
+			pArena = psDevNode->psKernelFwMemArena[psDevNode->uiKernelFwRAIdx];
+			psDevNode->uiKernelFwRAIdx = 0;
+		}
+	}
+
+	PVR_ASSERT(psPageArrayData->iNumPagesAllocated != 0);
+
+	uiContigAllocSize = psPageArrayData->uiContigAllocSize;
+
+	ui32PagesToFree = (NULL == pui32FreeIndices) ?
+			psPageArrayData->uiTotalNumPages : ui32FreePageCount;
+
+	for (i = 0; i < ui32PagesToFree; i++)
+	{
+		if (NULL == pui32FreeIndices)
+		{
+			ui32Index = i;
+		}
+		else
+		{
+			ui32Index = pui32FreeIndices[i];
+		}
+
+		if (INVALID_PAGE_ADDR != psPageArrayData->pasDevPAddr[ui32Index].uiAddr)
+		{
+			ui32PagesFreed++;
+			if (psPageArrayData->bPoisonOnFree)
+			{
+				_PoisonAlloc(psPageArrayData->psDevNode,
+							 &psPageArrayData->pasDevPAddr[ui32Index],
+							 psPageArrayData->bFwLocalAlloc,
+							 uiContigAllocSize,
+							 _FreePoison,
+							 _FreePoisonSize);
+			}
+
+			RA_Free(pArena,	psPageArrayData->pasDevPAddr[ui32Index].uiAddr);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			/* Allocation is done a page at a time */
+			PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+			                            uiContigAllocSize);
+#else
+			{
+				PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+				                                psPageArrayData->pasDevPAddr[ui32Index].uiAddr);
+			}
+#endif
+#endif
+			psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR;
+		}
+	}
+	psPageArrayData->iNumPagesAllocated -= ui32PagesFreed;
+
+    PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
+
+	PVR_DPF((PVR_DBG_MESSAGE,
+			"%s: freed %d local memory for PMR @0x%p",
+			__func__,
+			(ui32PagesFreed * uiContigAllocSize),
+			psPageArrayData));
+
+	return PVRSRV_OK;
+}
+
+/*
+ *
+ * Implementation of callback functions
+ *
+ */
+
+/* destructor func is called after last reference disappears, but
+   before PMR itself is freed. */
+static PVRSRV_ERROR
+PMRFinalizeLocalMem(PMR_IMPL_PRIVDATA pvPriv
+				 )
+{
+	PVRSRV_ERROR eError;
+	PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+
+	psLMAllocArrayData = pvPriv;
+
+	/*  We can't free pages until now. */
+	if (psLMAllocArrayData->iNumPagesAllocated != 0)
+	{
+		eError = _FreeLMPages(psLMAllocArrayData, NULL, 0);
+		PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+	}
+
+	eError = _FreeLMPageArray(psLMAllocArrayData);
+	PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+
+	return PVRSRV_OK;
+}
+
+/* callback function for locking the system physical page addresses.
+   As we are LMA there is nothing to do as we control physical memory. */
+static PVRSRV_ERROR
+PMRLockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+
+    PVRSRV_ERROR eError;
+    PMR_LMALLOCARRAY_DATA *psLMAllocArrayData;
+
+    psLMAllocArrayData = pvPriv;
+
+    if (psLMAllocArrayData->bOnDemand)
+    {
+		/* Allocate Memory for deferred allocation */
+		eError = _AllocLMPages(psLMAllocArrayData, NULL);
+    	if (eError != PVRSRV_OK)
+    	{
+    		return eError;
+    	}
+    }
+
+	return PVRSRV_OK;
+
+}
+
+static PVRSRV_ERROR
+PMRUnlockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv
+							   )
+{
+    PVRSRV_ERROR eError = PVRSRV_OK;
+    PMR_LMALLOCARRAY_DATA *psLMAllocArrayData;
+
+    psLMAllocArrayData = pvPriv;
+
+	if (psLMAllocArrayData->bOnDemand)
+    {
+		/* Free Memory for deferred allocation */
+		eError = _FreeLMPages(psLMAllocArrayData, NULL, 0);
+    	if (eError != PVRSRV_OK)
+    	{
+    		return eError;
+    	}
+    }
+
+	PVR_ASSERT(eError == PVRSRV_OK);
+	return eError;
+}
+
+/* N.B.  It is assumed that PMRLockSysPhysAddressesLocalMem() is called _before_ this function! */
+static PVRSRV_ERROR
+PMRSysPhysAddrLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+					   IMG_UINT32 ui32Log2PageSize,
+					   IMG_UINT32 ui32NumOfPages,
+					   IMG_DEVMEM_OFFSET_T *puiOffset,
+					   IMG_BOOL *pbValid,
+					   IMG_DEV_PHYADDR *psDevPAddr)
+{
+	IMG_UINT32 idx;
+	IMG_UINT32 uiLog2AllocSize;
+	IMG_UINT32 uiNumAllocs;
+	IMG_UINT64 uiAllocIndex;
+	IMG_DEVMEM_OFFSET_T uiInAllocOffset;
+	PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv;
+
+	if (psLMAllocArrayData->uiLog2AllocSize < ui32Log2PageSize)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Requested physical addresses from PMR "
+		         "for incompatible contiguity %u!",
+		         __FUNCTION__,
+		         ui32Log2PageSize));
+		return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+	}
+
+	uiNumAllocs = psLMAllocArrayData->uiTotalNumPages;
+	if (uiNumAllocs > 1)
+	{
+		PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0);
+		uiLog2AllocSize = psLMAllocArrayData->uiLog2AllocSize;
+
+		for (idx=0; idx < ui32NumOfPages; idx++)
+		{
+			if (pbValid[idx])
+			{
+				uiAllocIndex = puiOffset[idx] >> uiLog2AllocSize;
+				uiInAllocOffset = puiOffset[idx] - (uiAllocIndex << uiLog2AllocSize);
+
+				PVR_ASSERT(uiAllocIndex < uiNumAllocs);
+				PVR_ASSERT(uiInAllocOffset < (1ULL << uiLog2AllocSize));
+
+				psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[uiAllocIndex].uiAddr + uiInAllocOffset;
+			}
+		}
+	}
+	else
+	{
+		for (idx=0; idx < ui32NumOfPages; idx++)
+		{
+			if (pbValid[idx])
+			{
+				psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[0].uiAddr + puiOffset[idx];
+			}
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+								 size_t uiOffset,
+								 size_t uiSize,
+								 void **ppvKernelAddressOut,
+								 IMG_HANDLE *phHandleOut,
+								 PMR_FLAGS_T ulFlags)
+{
+	PVRSRV_ERROR eError;
+	PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+	void *pvKernLinAddr = NULL;
+	IMG_UINT32 ui32PageIndex = 0;
+	size_t uiOffsetMask = uiOffset;
+
+	psLMAllocArrayData = pvPriv;
+
+	/* Check that we can map this in contiguously */
+	if (psLMAllocArrayData->uiTotalNumPages != 1)
+	{
+		size_t uiStart = uiOffset;
+		size_t uiEnd = uiOffset + uiSize - 1;
+		size_t uiPageMask = ~((1 << psLMAllocArrayData->uiLog2AllocSize) - 1);
+
+		/* We can still map if only one page is required */
+		if ((uiStart & uiPageMask) != (uiEnd & uiPageMask))
+		{
+			eError = PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+			goto e0;
+		}
+
+		/* Locate the desired physical page to map in */
+		ui32PageIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize;
+		uiOffsetMask = (1U << psLMAllocArrayData->uiLog2AllocSize) - 1;
+	}
+
+	PVR_ASSERT(ui32PageIndex < psLMAllocArrayData->uiTotalNumPages);
+
+	eError = _MapAlloc(psLMAllocArrayData->psDevNode,
+						&psLMAllocArrayData->pasDevPAddr[ui32PageIndex],
+						psLMAllocArrayData->uiContigAllocSize,
+						psLMAllocArrayData->bFwLocalAlloc,
+						ulFlags,
+						&pvKernLinAddr);
+
+	*ppvKernelAddressOut = ((IMG_CHAR *) pvKernLinAddr) + (uiOffset & uiOffsetMask);
+	*phHandleOut = pvKernLinAddr;
+
+	return eError;
+
+	/*
+	  error exit paths follow
+	*/
+
+ e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static void PMRReleaseKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+												 IMG_HANDLE hHandle)
+{
+	PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+	void *pvKernLinAddr = NULL;
+
+	psLMAllocArrayData = (PMR_LMALLOCARRAY_DATA *) pvPriv;
+	pvKernLinAddr = (void *) hHandle;
+
+	_UnMapAlloc(psLMAllocArrayData->psDevNode,
+				psLMAllocArrayData->uiContigAllocSize,
+				psLMAllocArrayData->bFwLocalAlloc, 
+				0,
+				pvKernLinAddr);
+}
+
+
+static PVRSRV_ERROR
+CopyBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+				  IMG_DEVMEM_OFFSET_T uiOffset,
+				  IMG_UINT8 *pcBuffer,
+				  size_t uiBufSz,
+				  size_t *puiNumBytes,
+				  void (*pfnCopyBytes)(IMG_UINT8 *pcBuffer,
+									   IMG_UINT8 *pcPMR,
+									   size_t uiSize))
+{
+	PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+	size_t uiBytesCopied;
+	size_t uiBytesToCopy;
+	size_t uiBytesCopyableFromAlloc;
+	void *pvMapping = NULL;
+	IMG_UINT8 *pcKernelPointer = NULL;
+	size_t uiBufferOffset;
+	IMG_UINT64 uiAllocIndex;
+	IMG_DEVMEM_OFFSET_T uiInAllocOffset;
+	PVRSRV_ERROR eError;
+
+	psLMAllocArrayData = pvPriv;
+
+	uiBytesCopied = 0;
+	uiBytesToCopy = uiBufSz;
+	uiBufferOffset = 0;
+
+	if (psLMAllocArrayData->uiTotalNumPages > 1)
+	{
+		while (uiBytesToCopy > 0)
+		{
+			/* we have to map one alloc in at a time */
+			PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0);
+			uiAllocIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize;
+			uiInAllocOffset = uiOffset - (uiAllocIndex << psLMAllocArrayData->uiLog2AllocSize);
+			uiBytesCopyableFromAlloc = uiBytesToCopy;
+			if (uiBytesCopyableFromAlloc + uiInAllocOffset > (1ULL << psLMAllocArrayData->uiLog2AllocSize))
+			{
+				uiBytesCopyableFromAlloc = TRUNCATE_64BITS_TO_SIZE_T((1ULL << psLMAllocArrayData->uiLog2AllocSize)-uiInAllocOffset);
+			}
+
+			PVR_ASSERT(uiBytesCopyableFromAlloc != 0);
+			PVR_ASSERT(uiAllocIndex < psLMAllocArrayData->uiTotalNumPages);
+			PVR_ASSERT(uiInAllocOffset < (1ULL << psLMAllocArrayData->uiLog2AllocSize));
+
+			eError = _MapAlloc(psLMAllocArrayData->psDevNode,
+								&psLMAllocArrayData->pasDevPAddr[uiAllocIndex],
+								psLMAllocArrayData->uiContigAllocSize,
+								psLMAllocArrayData->bFwLocalAlloc,
+								PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+								&pvMapping);
+			if (eError != PVRSRV_OK)
+			{
+				goto e0;
+			}
+			pcKernelPointer = pvMapping;
+			pfnCopyBytes(&pcBuffer[uiBufferOffset], &pcKernelPointer[uiInAllocOffset], uiBytesCopyableFromAlloc);
+
+			_UnMapAlloc(psLMAllocArrayData->psDevNode, 
+						psLMAllocArrayData->uiContigAllocSize,
+						psLMAllocArrayData->bFwLocalAlloc,
+						0,
+						pvMapping);
+
+			uiBufferOffset += uiBytesCopyableFromAlloc;
+			uiBytesToCopy -= uiBytesCopyableFromAlloc;
+			uiOffset += uiBytesCopyableFromAlloc;
+			uiBytesCopied += uiBytesCopyableFromAlloc;
+		}
+	}
+	else
+	{
+			PVR_ASSERT((uiOffset + uiBufSz) <= psLMAllocArrayData->uiContigAllocSize);
+			PVR_ASSERT(psLMAllocArrayData->uiContigAllocSize != 0);
+			eError = _MapAlloc(psLMAllocArrayData->psDevNode,
+								&psLMAllocArrayData->pasDevPAddr[0],
+								psLMAllocArrayData->uiContigAllocSize,
+								psLMAllocArrayData->bFwLocalAlloc,
+								PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+								&pvMapping);
+			if (eError != PVRSRV_OK)
+			{
+				goto e0;
+			}
+			pcKernelPointer = pvMapping;
+			pfnCopyBytes(pcBuffer, &pcKernelPointer[uiOffset], uiBufSz);
+
+			_UnMapAlloc(psLMAllocArrayData->psDevNode, 
+						psLMAllocArrayData->uiContigAllocSize,
+						psLMAllocArrayData->bFwLocalAlloc, 
+						0,
+						pvMapping);
+			
+			uiBytesCopied = uiBufSz;
+	}
+	*puiNumBytes = uiBytesCopied;
+	return PVRSRV_OK;
+e0:
+	*puiNumBytes = uiBytesCopied;
+	return eError;
+}
+
+static void ReadLocalMem(IMG_UINT8 *pcBuffer,
+						 IMG_UINT8 *pcPMR,
+						 size_t uiSize)
+{
+	/* NOTE: 'CachedMemCopy' means the operating system default memcpy, which
+	 *       we *assume* in the LMA code will be faster, and doesn't need to
+	 *       worry about ARM64.
+	 */
+	OSCachedMemCopy(pcBuffer, pcPMR, uiSize);
+}
+
+static PVRSRV_ERROR
+PMRReadBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+				  IMG_DEVMEM_OFFSET_T uiOffset,
+				  IMG_UINT8 *pcBuffer,
+				  size_t uiBufSz,
+				  size_t *puiNumBytes)
+{
+	return CopyBytesLocalMem(pvPriv,
+							 uiOffset,
+							 pcBuffer,
+							 uiBufSz,
+							 puiNumBytes,
+							 ReadLocalMem);
+}
+
+static void WriteLocalMem(IMG_UINT8 *pcBuffer,
+						  IMG_UINT8 *pcPMR,
+						  size_t uiSize)
+{
+	/* NOTE: 'CachedMemCopy' means the operating system default memcpy, which
+	 *       we *assume* in the LMA code will be faster, and doesn't need to
+	 *       worry about ARM64.
+	 */
+	OSCachedMemCopy(pcPMR, pcBuffer, uiSize);
+}
+
+static PVRSRV_ERROR
+PMRWriteBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+					  IMG_DEVMEM_OFFSET_T uiOffset,
+					  IMG_UINT8 *pcBuffer,
+					  size_t uiBufSz,
+					  size_t *puiNumBytes)
+{
+	return CopyBytesLocalMem(pvPriv,
+							 uiOffset,
+							 pcBuffer,
+							 uiBufSz,
+							 puiNumBytes,
+							 WriteLocalMem);
+}
+
+/*************************************************************************/ /*!
+@Function       PMRChangeSparseMemLocalMem
+@Description    This function Changes the sparse mapping by allocating & freeing
+				of pages. It does also change the GPU maps accordingly
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+PMRChangeSparseMemLocalMem(PMR_IMPL_PRIVDATA pPriv,
+                           const PMR *psPMR,
+                           IMG_UINT32 ui32AllocPageCount,
+                           IMG_UINT32 *pai32AllocIndices,
+                           IMG_UINT32 ui32FreePageCount,
+                           IMG_UINT32 *pai32FreeIndices,
+                           IMG_UINT32 uiFlags)
+{
+	PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+	IMG_UINT32 ui32AdtnlAllocPages = 0;
+	IMG_UINT32 ui32AdtnlFreePages = 0;
+	IMG_UINT32 ui32CommonRequstCount = 0;
+	IMG_UINT32 ui32Loop = 0;
+	IMG_UINT32 ui32Index = 0;
+	IMG_UINT32 uiAllocpgidx;
+	IMG_UINT32 uiFreepgidx;
+
+	PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv;
+	IMG_DEV_PHYADDR sPhyAddr;
+
+#if defined(DEBUG)
+	IMG_BOOL bPoisonFail = IMG_FALSE;
+	IMG_BOOL bZeroFail = IMG_FALSE;
+#endif
+
+	/* Fetch the Page table array represented by the PMR */
+	IMG_DEV_PHYADDR *psPageArray = psPMRPageArrayData->pasDevPAddr;
+	PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappigTable(psPMR);
+
+	/* The incoming request is classified into two operations independent of
+	 * each other: alloc & free pages.
+	 * These operations can be combined with two mapping operations as well
+	 * which are GPU & CPU space mappings.
+	 *
+	 * From the alloc and free page requests, the net amount of pages to be
+	 * allocated or freed is computed. Pages that were requested to be freed
+	 * will be reused to fulfil alloc requests.
+	 *
+	 * The order of operations is:
+	 * 1. Allocate new pages from the OS
+	 * 2. Move the free pages from free request to alloc positions.
+	 * 3. Free the rest of the pages not used for alloc
+	 *
+	 * Alloc parameters are validated at the time of allocation
+	 * and any error will be handled then. */
+
+	if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH))
+	{
+		ui32CommonRequstCount = (ui32AllocPageCount > ui32FreePageCount) ?
+				ui32FreePageCount : ui32AllocPageCount;
+
+		PDUMP_PANIC(SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported");
+	}
+
+	if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC))
+	{
+		ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequstCount;
+	}
+	else
+	{
+		ui32AllocPageCount = 0;
+	}
+
+	if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE))
+	{
+		ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequstCount;
+	}
+	else
+	{
+		ui32FreePageCount = 0;
+	}
+
+	if (0 == (ui32CommonRequstCount || ui32AdtnlAllocPages || ui32AdtnlFreePages))
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		return eError;
+	}
+
+	{
+		/* Validate the free page indices */
+		if (ui32FreePageCount)
+		{
+			if (NULL != pai32FreeIndices)
+			{
+				for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++)
+				{
+					uiFreepgidx = pai32FreeIndices[ui32Loop];
+
+					if (uiFreepgidx > psPMRPageArrayData->uiTotalNumPages)
+					{
+						eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+						goto e0;
+					}
+
+					if (INVALID_PAGE_ADDR == psPageArray[uiFreepgidx].uiAddr)
+					{
+						eError = PVRSRV_ERROR_INVALID_PARAMS;
+						goto e0;
+					}
+				}
+			}else{
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				return eError;
+			}
+		}
+
+		/*The following block of code verifies any issues with common alloc page indices */
+		for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++)
+		{
+			uiAllocpgidx = pai32AllocIndices[ui32Loop];
+			if (uiAllocpgidx > psPMRPageArrayData->uiTotalNumPages)
+			{
+				eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+				goto e0;
+			}
+
+			if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+			{
+				if ((INVALID_PAGE_ADDR != psPageArray[uiAllocpgidx].uiAddr) ||
+						(TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx]))
+				{
+					eError = PVRSRV_ERROR_INVALID_PARAMS;
+					goto e0;
+				}
+			}
+			else
+			{
+				if ((INVALID_PAGE_ADDR ==  psPageArray[uiAllocpgidx].uiAddr) ||
+				    (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx]))
+				{
+					eError = PVRSRV_ERROR_INVALID_PARAMS;
+					goto e0;
+				}
+			}
+		}
+
+
+		ui32Loop = 0;
+
+		/* Allocate new pages */
+		if (0 != ui32AdtnlAllocPages)
+		{
+			/* Say how many pages to allocate */
+			psPMRPageArrayData->uiPagesToAlloc = ui32AdtnlAllocPages;
+
+			eError = _AllocLMPages(psPMRPageArrayData, pai32AllocIndices);
+			if (PVRSRV_OK != eError)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+				         "%s: New Addtl Allocation of pages failed",
+				         __FUNCTION__));
+				goto e0;
+			}
+
+			/* Mark the corresponding pages of translation table as valid */
+			for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++)
+			{
+				psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop];
+			}
+
+			psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages;
+		}
+
+		ui32Index = ui32Loop;
+
+		/* Move the corresponding free pages to alloc request */
+		for (ui32Loop = 0; ui32Loop < ui32CommonRequstCount; ui32Loop++, ui32Index++)
+		{
+
+			uiAllocpgidx = pai32AllocIndices[ui32Index];
+			uiFreepgidx =  pai32FreeIndices[ui32Loop];
+			sPhyAddr = psPageArray[uiAllocpgidx];
+			psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx];
+
+			/* Is remap mem used in real world scenario? Should it be turned to a
+			 *  debug feature? The condition check needs to be out of loop, will be
+			 *  done at later point though after some analysis */
+			if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+			{
+				psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID;
+				psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+				psPageArray[uiFreepgidx].uiAddr = INVALID_PAGE_ADDR;
+			}
+			else
+			{
+				psPageArray[uiFreepgidx] = sPhyAddr;
+				psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx;
+				psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+			}
+
+			/* Be sure to honour the attributes associated with the allocation
+			 * such as zeroing, poisoning etc. */
+			if (psPMRPageArrayData->bPoisonOnAlloc)
+			{
+				eError = _PoisonAlloc(psPMRPageArrayData->psDevNode,
+				                      &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx],
+				                      psPMRPageArrayData->bFwLocalAlloc,
+				                      psPMRPageArrayData->uiContigAllocSize,
+				                      _AllocPoison,
+				                      _AllocPoisonSize);
+
+				/* Consider this as a soft failure and go ahead but log error to kernel log */
+				if (eError != PVRSRV_OK)
+				{
+#if defined(DEBUG)
+					bPoisonFail = IMG_TRUE;
+#endif
+				}
+			}
+			else
+			{
+				if (psPMRPageArrayData->bZeroOnAlloc)
+				{
+					eError = _ZeroAlloc(psPMRPageArrayData->psDevNode,
+					                    &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx],
+					                    psPMRPageArrayData->bFwLocalAlloc,
+					                    psPMRPageArrayData->uiContigAllocSize);
+					/* Consider this as a soft failure and go ahead but log error to kernel log */
+					if (eError != PVRSRV_OK)
+					{
+#if defined(DEBUG)
+						/*Don't think we need to zero  any pages further*/
+						bZeroFail = IMG_TRUE;
+#endif
+					}
+				}
+			}
+		}
+
+		/*Free the additional free pages */
+		if (0 != ui32AdtnlFreePages)
+		{
+			ui32Index = ui32Loop;
+			_FreeLMPages(psPMRPageArrayData, &pai32FreeIndices[ui32Loop], ui32AdtnlFreePages);
+			ui32Loop = 0;
+
+			while(ui32Loop++ < ui32AdtnlFreePages)
+			{
+				/*Set the corresponding mapping table entry to invalid address */
+				psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Index++]] = TRANSLATION_INVALID;
+			}
+
+			psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages;
+		}
+
+	}
+
+#if defined(DEBUG)
+	if(IMG_TRUE == bPoisonFail)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Error in poisoning the page", __FUNCTION__));
+	}
+
+	if(IMG_TRUE == bZeroFail)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Error in zeroing the page", __FUNCTION__));
+	}
+#endif
+
+	/* Update the PMR memory holding information */
+	eError = PVRSRV_OK;
+
+e0:
+		return eError;
+
+}
+
+/*************************************************************************/ /*!
+@Function       PMRChangeSparseMemCPUMapLocalMem
+@Description    This function Changes CPU maps accordingly
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static
+PVRSRV_ERROR PMRChangeSparseMemCPUMapLocalMem(PMR_IMPL_PRIVDATA pPriv,
+                                              const PMR *psPMR,
+                                              IMG_UINT64 sCpuVAddrBase,
+                                              IMG_UINT32 ui32AllocPageCount,
+                                              IMG_UINT32 *pai32AllocIndices,
+                                              IMG_UINT32 ui32FreePageCount,
+                                              IMG_UINT32 *pai32FreeIndices)
+{
+	IMG_DEV_PHYADDR *psPageArray;
+	PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv;
+	uintptr_t sCpuVABase = sCpuVAddrBase;
+	IMG_CPU_PHYADDR sCpuAddrPtr;
+	IMG_BOOL bValid;
+
+	/*Get the base address of the heap */
+	PMR_CpuPhysAddr(psPMR,
+	                psPMRPageArrayData->uiLog2AllocSize,
+	                1,
+	                0,	/* offset zero here mean first page in the PMR */
+	                &sCpuAddrPtr,
+	                &bValid);
+
+	/* Phys address of heap is computed here by subtracting the offset of this page
+	 * basically phys address of any page = Base address of heap + offset of the page */
+	sCpuAddrPtr.uiAddr -= psPMRPageArrayData->pasDevPAddr[0].uiAddr;
+	psPageArray = psPMRPageArrayData->pasDevPAddr;
+
+	return OSChangeSparseMemCPUAddrMap((void **)psPageArray,
+	                                   sCpuVABase,
+	                                   sCpuAddrPtr,
+	                                   ui32AllocPageCount,
+	                                   pai32AllocIndices,
+	                                   ui32FreePageCount,
+	                                   pai32FreeIndices,
+	                                   IMG_TRUE);
+}
+
+
+static PMR_IMPL_FUNCTAB _sPMRLMAFuncTab = {
+	/* pfnLockPhysAddresses */
+	&PMRLockSysPhysAddressesLocalMem,
+	/* pfnUnlockPhysAddresses */
+	&PMRUnlockSysPhysAddressesLocalMem,
+	/* pfnDevPhysAddr */
+	&PMRSysPhysAddrLocalMem,
+	/* pfnAcquireKernelMappingData */
+	&PMRAcquireKernelMappingDataLocalMem,
+	/* pfnReleaseKernelMappingData */
+	&PMRReleaseKernelMappingDataLocalMem,
+#if defined(INTEGRITY_OS)
+	/* pfnMapMemoryObject */
+    NULL,
+	/* pfnUnmapMemoryObject */
+    NULL,
+#endif
+	/* pfnReadBytes */
+	&PMRReadBytesLocalMem,
+	/* pfnWriteBytes */
+	&PMRWriteBytesLocalMem,
+	/* .pfnUnpinMem */
+	NULL,
+	/* .pfnPinMem */
+	NULL,
+	/* pfnChangeSparseMem*/
+	&PMRChangeSparseMemLocalMem,
+	/* pfnChangeSparseMemCPUMap */
+	&PMRChangeSparseMemCPUMapLocalMem,
+	/* pfnMMap */
+	NULL,
+	/* pfnFinalize */
+	&PMRFinalizeLocalMem
+};
+
+PVRSRV_ERROR
+PhysmemNewLocalRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+							IMG_DEVMEM_SIZE_T uiSize,
+							IMG_DEVMEM_SIZE_T uiChunkSize,
+							IMG_UINT32 ui32NumPhysChunks,
+							IMG_UINT32 ui32NumVirtChunks,
+							IMG_UINT32 *pui32MappingTable,
+							IMG_UINT32 uiLog2AllocPageSize,
+							PVRSRV_MEMALLOCFLAGS_T uiFlags,
+							const IMG_CHAR *pszAnnotation,
+							PMR **ppsPMRPtr)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_ERROR eError2;
+	PMR *psPMR = NULL;
+	PMR_LMALLOCARRAY_DATA *psPrivData = NULL;
+	PMR_FLAGS_T uiPMRFlags;
+	PHYS_HEAP *psPhysHeap;
+	IMG_BOOL bZero;
+	IMG_BOOL bPoisonOnAlloc;
+	IMG_BOOL bPoisonOnFree;
+	IMG_BOOL bOnDemand;
+	IMG_BOOL bContig;
+	IMG_BOOL bFwLocalAlloc;
+	IMG_BOOL bCpuLocalAlloc;
+
+	/* For sparse requests we have to do the allocation
+	 * in chunks rather than requesting one contiguous block */
+	if (ui32NumPhysChunks != ui32NumVirtChunks ||  ui32NumVirtChunks > 1)
+	{
+		if (PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags))
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: LMA kernel mapping functions currently "
+					"don't work with discontiguous memory.",
+					__func__));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto errorOnParam;
+		}
+		bContig = IMG_FALSE;
+	}
+	else
+	{
+		bContig = IMG_TRUE;
+	}
+
+	bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bFwLocalAlloc = PVRSRV_CHECK_FW_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bCpuLocalAlloc = PVRSRV_CHECK_CPU_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags) ? IMG_TRUE : IMG_FALSE;
+
+	if (bFwLocalAlloc)
+	{
+		psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+	}
+	else if (bCpuLocalAlloc)
+	{
+		psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL];
+	}
+	else
+	{
+		psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+	}
+
+	/* Create Array structure that holds the physical pages */
+	eError = _AllocLMPageArray(psDevNode,
+	                           uiChunkSize * ui32NumVirtChunks,
+	                           uiChunkSize,
+	                           ui32NumPhysChunks,
+	                           ui32NumVirtChunks,
+	                           pui32MappingTable,
+	                           uiLog2AllocPageSize,
+	                           bZero,
+	                           bPoisonOnAlloc,
+	                           bPoisonOnFree,
+	                           bContig,
+	                           bOnDemand,
+	                           bFwLocalAlloc,
+	                           psPhysHeap,
+	                           uiFlags,
+	                           &psPrivData);
+	if (eError != PVRSRV_OK)
+	{
+		goto errorOnAllocPageArray;
+	}
+
+	if (!bOnDemand)
+	{
+		/* Allocate the physical pages */
+		eError = _AllocLMPages(psPrivData,pui32MappingTable);
+		if (eError != PVRSRV_OK)
+		{
+			goto errorOnAllocPages;
+		}
+	}
+
+	/* In this instance, we simply pass flags straight through.
+
+	   Generically, uiFlags can include things that control the PMR
+	   factory, but we don't need any such thing (at the time of
+	   writing!), and our caller specifies all PMR flags so we don't
+	   need to meddle with what was given to us.
+	*/
+	uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+	/* check no significant bits were lost in cast due to different
+	   bit widths for flags */
+	PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+    if (bOnDemand)
+    {
+    	PDUMPCOMMENT("Deferred Allocation PMR (LMA)");
+    }
+
+
+	eError = PMRCreatePMR(psDevNode,
+						  psPhysHeap,
+						  uiSize,
+                          uiChunkSize,
+                          ui32NumPhysChunks,
+                          ui32NumVirtChunks,
+                          pui32MappingTable,
+                          uiLog2AllocPageSize,
+						  uiPMRFlags,
+						  pszAnnotation,
+						  &_sPMRLMAFuncTab,
+						  psPrivData,
+						  PMR_TYPE_LMA,
+						  &psPMR,
+						  PDUMP_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"PhysmemNewLocalRamBackedPMR: Unable to create PMR (status=%d)",
+				eError));
+		goto errorOnCreate;
+	}
+
+	*ppsPMRPtr = psPMR;
+	return PVRSRV_OK;
+
+errorOnCreate:
+	if(!bOnDemand && psPrivData->iNumPagesAllocated)
+	{
+		eError2 = _FreeLMPages(psPrivData, NULL,0);
+		PVR_ASSERT(eError2 == PVRSRV_OK);
+	}
+
+errorOnAllocPages:
+	eError2 = _FreeLMPageArray(psPrivData);
+	PVR_ASSERT(eError2 == PVRSRV_OK);
+
+errorOnAllocPageArray:
+errorOnParam:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+
+struct PidOSidCouplingList
+{
+	IMG_PID     pId;
+	IMG_UINT32  ui32OSid;
+	IMG_UINT32	ui32OSidReg;
+    IMG_BOOL    bOSidAxiProt;
+
+	struct PidOSidCouplingList *psNext;
+};
+typedef struct PidOSidCouplingList PidOSidCouplingList;
+
+static PidOSidCouplingList *psPidOSidHead=NULL;
+static PidOSidCouplingList *psPidOSidTail=NULL;
+
+void InsertPidOSidsCoupling(IMG_PID pId, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt)
+{
+	PidOSidCouplingList *psTmp;
+
+    PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Inserting (PID/ OSid/ OSidReg/ IsSecure) (%d/ %d/ %d/ %s) into list",
+                 pId,ui32OSid, ui32OSidReg, (bOSidAxiProt)?"Yes":"No"));
+
+	psTmp=OSAllocMem(sizeof(PidOSidCouplingList));
+
+	if (psTmp==NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"(GPU Virtualization Validation): Memory allocation failed. No list insertion => program will execute normally.\n"));
+		return ;
+	}
+
+	psTmp->pId=pId;
+	psTmp->ui32OSid=ui32OSid;
+	psTmp->ui32OSidReg=ui32OSidReg;
+    psTmp->bOSidAxiProt = bOSidAxiProt;
+
+	psTmp->psNext=NULL;
+	if (psPidOSidHead==NULL)
+	{
+		psPidOSidHead=psTmp;
+		psPidOSidTail=psTmp;
+	}
+	else
+	{
+		psPidOSidTail->psNext=psTmp;
+		psPidOSidTail=psTmp;
+	}
+
+	return ;
+}
+
+void RetrieveOSidsfromPidList(IMG_PID pId, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg, IMG_BOOL *pbOSidAxiProt)
+{
+	PidOSidCouplingList *psTmp;
+
+	for (psTmp=psPidOSidHead;psTmp!=NULL;psTmp=psTmp->psNext)
+	{
+		if (psTmp->pId==pId)
+		{
+            (*pui32OSid)     = psTmp->ui32OSid;
+            (*pui32OSidReg)  = psTmp->ui32OSidReg;
+            (*pbOSidAxiProt) = psTmp->bOSidAxiProt;
+
+			return ;
+		}
+	}
+
+	(*pui32OSid)=0;
+	(*pui32OSidReg)=0;
+    (*pbOSidAxiProt) = IMG_FALSE;
+
+	return ;
+}
+
+void RemovePidOSidCoupling(IMG_PID pId)
+{
+	PidOSidCouplingList *psTmp, *psPrev=NULL;
+
+	for (psTmp=psPidOSidHead; psTmp!=NULL; psTmp=psTmp->psNext)
+	{
+		if (psTmp->pId==pId) break;
+		psPrev=psTmp;
+	}
+
+	if (psTmp==NULL)
+	{
+		return ;
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Deleting Pairing %d / (%d - %d) from list",psTmp->pId, psTmp->ui32OSid, psTmp->ui32OSidReg));
+
+	if (psTmp==psPidOSidHead)
+	{
+		if (psPidOSidHead->psNext==NULL)
+		{
+			psPidOSidHead=NULL;
+			psPidOSidTail=NULL;
+			OSFreeMem(psTmp);
+
+			return ;
+		}
+
+		psPidOSidHead=psPidOSidHead->psNext;
+		OSFreeMem(psTmp);
+		return ;
+	}
+
+	if (psPrev==NULL) return ;
+
+	psPrev->psNext=psTmp->psNext;
+	if (psTmp==psPidOSidTail)
+	{
+		psPidOSidTail=psPrev;
+	}
+
+	OSFreeMem(psTmp);
+
+	return ;
+}
+
+#endif
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/physmem_tdsecbuf.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/physmem_tdsecbuf.c
new file mode 100644
index 0000000..5b43b09
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/physmem_tdsecbuf.c
@@ -0,0 +1,591 @@
+/*************************************************************************/ /*!
+@File
+@Title          Implementation of PMR functions for Trusted Device secure memory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks for physical memory imported
+                from a trusted environment. The driver cannot acquire CPU
+                mappings for this secure memory.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "physmem_tdsecbuf.h"
+#include "physheap.h"
+#include "rgxdevice.h"
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+
+#if defined (SUPPORT_TRUSTED_DEVICE)
+
+#if !defined(NO_HARDWARE)
+
+typedef struct _PMR_TDSECBUF_DATA_ {
+	PVRSRV_DEVICE_NODE    *psDevNode;
+	PHYS_HEAP             *psTDSecBufPhysHeap;
+	IMG_CPU_PHYADDR       sCpuPAddr;
+	IMG_DEV_PHYADDR       sDevPAddr;
+	IMG_UINT64            ui64Size;
+	IMG_UINT32            ui32Log2PageSize;
+	IMG_UINT64            ui64SecBufHandle;
+} PMR_TDSECBUF_DATA;
+
+
+/*
+ * Implementation of callback functions
+ */
+
+static PVRSRV_ERROR PMRSysPhysAddrTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv,
+                                              IMG_UINT32 ui32Log2PageSize,
+                                              IMG_UINT32 ui32NumOfPages,
+                                              IMG_DEVMEM_OFFSET_T *puiOffset,
+                                              IMG_BOOL *pbValid,
+                                              IMG_DEV_PHYADDR *psDevPAddr)
+{
+	PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+	IMG_UINT32 i;
+
+	if (psPrivData->ui32Log2PageSize != ui32Log2PageSize)
+	{
+		return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+	}
+
+	for (i = 0; i < ui32NumOfPages; i++)
+	{
+		psDevPAddr[i].uiAddr = psPrivData->sDevPAddr.uiAddr + puiOffset[i];
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PMRFinalizeTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+	PVRSRV_DEVICE_CONFIG *psDevConfig = psPrivData->psDevNode->psDevConfig;
+	PVRSRV_ERROR eError;
+
+	eError = psDevConfig->pfnTDSecureBufFree(psDevConfig->hSysData,
+											 psPrivData->ui64SecBufHandle);
+	if (eError != PVRSRV_OK)
+	{
+		if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PMRFinalizeTDSecBufMem: TDSecBufFree not implemented on the Trusted Device!"));
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PMRFinalizeTDSecBufMem: TDSecBufFree cannot free the resource!"));
+		}
+		return eError;
+	}
+
+	PhysHeapRelease(psPrivData->psTDSecBufPhysHeap);
+	OSFreeMem(psPrivData);
+
+	return PVRSRV_OK;
+}
+
+static PMR_IMPL_FUNCTAB _sPMRTDSecBufFuncTab = {
+	.pfnDevPhysAddr = &PMRSysPhysAddrTDSecBufMem,
+	.pfnFinalize = &PMRFinalizeTDSecBufMem,
+};
+
+
+/*
+ * Public functions
+ */
+PVRSRV_ERROR PhysmemNewTDSecureBufPMR(CONNECTION_DATA *psConnection,
+                                      PVRSRV_DEVICE_NODE *psDevNode,
+                                      IMG_DEVMEM_SIZE_T uiSize,
+                                      PMR_LOG2ALIGN_T uiLog2Align,
+                                      PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                      PMR **ppsPMRPtr,
+                                      IMG_UINT64 *pui64SecBufHandle)
+{
+	PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+	RGX_DATA *psRGXData = (RGX_DATA *)(psDevConfig->hDevData);
+	PMR_TDSECBUF_DATA *psPrivData = NULL;
+	PMR *psPMR = NULL;
+	IMG_UINT32 uiMappingTable = 0;
+	PMR_FLAGS_T uiPMRFlags;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+	/* In this instance, we simply pass flags straight through.
+	 * Generically, uiFlags can include things that control the PMR
+	 * factory, but we don't need any such thing (at the time of
+	 * writing!), and our caller specifies all PMR flags so we don't
+	 * need to meddle with what was given to us.
+	 */
+	uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+
+	/* Check no significant bits were lost in cast due to different bit widths for flags */
+	PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+	/* Many flags can be dropped as the driver cannot access this memory
+	 * and it is assumed that the trusted zone is physically contiguous
+	 */
+	uiPMRFlags &= ~(PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+	                PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+	                PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC |
+	                PVRSRV_MEMALLOCFLAG_POISON_ON_FREE |
+	                PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK);
+
+	psPrivData = OSAllocZMem(sizeof(PMR_TDSECBUF_DATA));
+	if (psPrivData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto errorOnAllocData;
+	}
+
+	/* Get required info for the TD Secure Buffer physical heap */
+	if (!psRGXData->bHasTDSecureBufPhysHeap)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Trusted Device physical heap not available!"));
+		eError = PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL;
+		goto errorOnAcquireHeap;
+	}
+
+	eError = PhysHeapAcquire(psRGXData->uiTDSecureBufPhysHeapID,
+	                         &psPrivData->psTDSecBufPhysHeap);
+	if (eError != PVRSRV_OK) goto errorOnAcquireHeap;
+
+	psPrivData->ui64Size = uiSize;
+
+	if (psDevConfig->pfnTDSecureBufAlloc && psDevConfig->pfnTDSecureBufFree)
+	{
+		PVRSRV_TD_SECBUF_PARAMS sTDSecBufParams;
+
+		psPrivData->psDevNode = psDevNode;
+
+		/* Ask the Trusted Device to allocate secure memory */
+		sTDSecBufParams.uiSize = uiSize;
+		sTDSecBufParams.uiAlign = 1 << uiLog2Align;
+
+		/* These will be returned by pfnTDSecureBufAlloc on success */
+		sTDSecBufParams.psSecBufAddr = &psPrivData->sCpuPAddr;
+		sTDSecBufParams.pui64SecBufHandle = &psPrivData->ui64SecBufHandle;
+
+		eError = psDevConfig->pfnTDSecureBufAlloc(psDevConfig->hSysData,
+												  &sTDSecBufParams);
+		if (eError != PVRSRV_OK)
+		{
+			if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufAlloc not implemented on the Trusted Device!"));
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufAlloc cannot allocate the resource!"));
+			}
+			goto errorOnAlloc;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufAlloc/Free not implemented!"));
+		eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+		goto errorOnAlloc;
+	}
+
+	PhysHeapCpuPAddrToDevPAddr(psPrivData->psTDSecBufPhysHeap,
+	                           1,
+	                           &psPrivData->sDevPAddr,
+	                           &psPrivData->sCpuPAddr);
+
+	/* Check that the secure buffer has the requested alignment */
+	if ((((1ULL << uiLog2Align) - 1) & psPrivData->sCpuPAddr.uiAddr) != 0)
+	/* Check that the secure buffer is aligned to a Rogue cache line */
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "Trusted Device physical heap has the wrong alignment!"
+				 "Physical address 0x%llx, alignment mask 0x%llx",
+				 (unsigned long long) psPrivData->sCpuPAddr.uiAddr,
+				 ((1ULL << uiLog2Align) - 1)));
+		eError = PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL;
+		goto errorOnCheckAlign;
+	}
+
+	psPrivData->ui32Log2PageSize = uiLog2Align;
+
+	eError = PMRCreatePMR(psDevNode,
+	                      psPrivData->psTDSecBufPhysHeap,
+	                      psPrivData->ui64Size,
+	                      psPrivData->ui64Size,
+	                      1,                 /* ui32NumPhysChunks */
+	                      1,                 /* ui32NumVirtChunks */
+	                      &uiMappingTable,   /* pui32MappingTable (not used) */
+	                      uiLog2Align,
+	                      uiPMRFlags,
+	                      "TDSECUREBUF_PMR",
+	                      &_sPMRTDSecBufFuncTab,
+	                      psPrivData,
+	                      PMR_TYPE_TDSECBUF,
+	                      &psPMR,
+	                      PDUMP_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto errorOnCreatePMR;
+	}
+
+#if defined(PVR_RI_DEBUG)
+	eError = RIWritePMREntryKM(psPMR,
+	                           sizeof("TDSecureBuffer"),
+	                           "TDSecureBuffer",
+	                           psPrivData->ui64Size);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+		         "%s: Failed to write PMR entry (%s)",
+		         __func__, PVRSRVGetErrorStringKM(eError)));
+	}
+#endif
+
+	*ppsPMRPtr = psPMR;
+	*pui64SecBufHandle = psPrivData->ui64SecBufHandle;
+
+	return PVRSRV_OK;
+
+
+errorOnCreatePMR:
+errorOnCheckAlign:
+	eError = psDevConfig->pfnTDSecureBufFree(psDevConfig->hSysData,
+											 psPrivData->ui64SecBufHandle);
+	if (eError != PVRSRV_OK)
+	{
+		if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufFree not implemented on the Trusted Device!"));
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufFree cannot free the resource!"));
+		}
+	}
+errorOnAlloc:
+	PhysHeapRelease(psPrivData->psTDSecBufPhysHeap);
+errorOnAcquireHeap:
+	OSFreeMem(psPrivData);
+
+errorOnAllocData:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+#else /* NO_HARDWARE */
+
+#include "physmem_osmem.h"
+
+typedef struct _PMR_TDSECBUF_DATA_ {
+	PHYS_HEAP  *psTDSecBufPhysHeap;
+	PMR        *psOSMemPMR;
+	IMG_UINT32 ui32Log2PageSize;
+} PMR_TDSECBUF_DATA;
+
+
+/*
+ * Implementation of callback functions
+ */
+
+static PVRSRV_ERROR
+PMRLockPhysAddressesTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+	return PMRLockSysPhysAddresses(psPrivData->psOSMemPMR);
+}
+
+static PVRSRV_ERROR
+PMRUnlockPhysAddressesTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+	return PMRUnlockSysPhysAddresses(psPrivData->psOSMemPMR);
+}
+
+static PVRSRV_ERROR
+PMRSysPhysAddrTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv,
+                          IMG_UINT32 ui32Log2PageSize,
+                          IMG_UINT32 ui32NumOfPages,
+                          IMG_DEVMEM_OFFSET_T *puiOffset,
+                          IMG_BOOL *pbValid,
+                          IMG_DEV_PHYADDR *psDevPAddr)
+{
+	PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+	/* On the assumption that this PMR was created with
+	 * NumPhysChunks == NumVirtChunks then
+	 * puiOffset[0] == uiLogicalOffset
+	 */
+
+	return PMR_DevPhysAddr(psPrivData->psOSMemPMR,
+	                       ui32Log2PageSize,
+	                       ui32NumOfPages,
+	                       puiOffset[0],
+	                       psDevPAddr,
+	                       pbValid);
+}
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv,
+                                       size_t uiOffset,
+                                       size_t uiSize,
+                                       void **ppvKernelAddressOut,
+                                       IMG_HANDLE *phHandleOut,
+                                       PMR_FLAGS_T ulFlags)
+{
+	PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+	size_t uiLengthOut;
+
+	PVR_UNREFERENCED_PARAMETER(ulFlags);
+
+	return PMRAcquireKernelMappingData(psPrivData->psOSMemPMR,
+	                                   uiOffset,
+	                                   uiSize,
+	                                   ppvKernelAddressOut,
+	                                   &uiLengthOut,
+	                                   phHandleOut);
+}
+
+static void
+PMRReleaseKernelMappingDataTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv,
+                                       IMG_HANDLE hHandle)
+{
+	PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+	PMRReleaseKernelMappingData(psPrivData->psOSMemPMR, hHandle);
+}
+
+static PVRSRV_ERROR PMRFinalizeTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+	PMRUnrefPMR(psPrivData->psOSMemPMR);
+	PhysHeapRelease(psPrivData->psTDSecBufPhysHeap);
+	OSFreeMem(psPrivData);
+
+	return PVRSRV_OK;
+}
+
+static PMR_IMPL_FUNCTAB _sPMRTDSecBufFuncTab = {
+	.pfnLockPhysAddresses = &PMRLockPhysAddressesTDSecBufMem,
+	.pfnUnlockPhysAddresses = &PMRUnlockPhysAddressesTDSecBufMem,
+	.pfnDevPhysAddr = &PMRSysPhysAddrTDSecBufMem,
+	.pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataTDSecBufMem,
+	.pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataTDSecBufMem,
+	.pfnFinalize = &PMRFinalizeTDSecBufMem,
+};
+
+
+/*
+ * Public functions
+ */
+PVRSRV_ERROR PhysmemNewTDSecureBufPMR(CONNECTION_DATA *psConnection,
+                                      PVRSRV_DEVICE_NODE *psDevNode,
+                                      IMG_DEVMEM_SIZE_T uiSize,
+                                      PMR_LOG2ALIGN_T uiLog2Align,
+                                      PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                      PMR **ppsPMRPtr,
+                                      IMG_UINT64 *pui64SecBufHandle)
+{
+	PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+	RGX_DATA *psRGXData = (RGX_DATA *)(psDevConfig->hDevData);
+	PMR_TDSECBUF_DATA *psPrivData = NULL;
+	PMR *psPMR = NULL;
+	PMR *psOSPMR = NULL;
+	IMG_UINT32 uiMappingTable = 0;
+	PMR_FLAGS_T uiPMRFlags;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	/* In this instance, we simply pass flags straight through.
+	 * Generically, uiFlags can include things that control the PMR
+	 * factory, but we don't need any such thing (at the time of
+	 * writing!), and our caller specifies all PMR flags so we don't
+	 * need to meddle with what was given to us.
+	 */
+	uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+
+	/* Check no significant bits were lost in cast due to different bit widths for flags */
+	PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+	psPrivData = OSAllocZMem(sizeof(PMR_TDSECBUF_DATA));
+	if (psPrivData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto errorOnAllocData;
+	}
+
+	/* Get required info for the TD Secure Buffer physical heap */
+	if (!psRGXData->bHasTDSecureBufPhysHeap)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Trusted Device physical heap not available!"));
+		eError = PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL;
+		goto errorOnAcquireHeap;
+	}
+
+	eError = PhysHeapAcquire(psRGXData->uiTDSecureBufPhysHeapID,
+	                         &psPrivData->psTDSecBufPhysHeap);
+	if (eError != PVRSRV_OK) goto errorOnAcquireHeap;
+
+	psPrivData->ui32Log2PageSize = uiLog2Align;
+
+	/* Note that this PMR is only used to copy the FW blob to memory and
+	 * to dump this memory to pdump, it doesn't need to have the alignment
+	 * requested by the caller
+	 */
+	eError = PhysmemNewOSRamBackedPMR(psDevNode,
+	                                  uiSize,
+	                                  uiSize,
+	                                  1,                 /* ui32NumPhysChunks */
+	                                  1,                 /* ui32NumVirtChunks */
+	                                  &uiMappingTable,
+	                                  psPrivData->ui32Log2PageSize,
+	                                  uiFlags,
+	                                  "TDSECUREBUF_OSMEM",
+	                                  &psOSPMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto errorOnCreateOSPMR;
+	}
+
+	/* This is the primary PMR dumped with correct memspace and alignment */
+	eError = PMRCreatePMR(psDevNode,
+	                      psPrivData->psTDSecBufPhysHeap,
+	                      uiSize,
+	                      uiSize,
+	                      1,               /* ui32NumPhysChunks */
+	                      1,               /* ui32NumVirtChunks */
+	                      &uiMappingTable, /* pui32MappingTable (not used) */
+	                      uiLog2Align,
+	                      uiPMRFlags,
+	                      "TDSECUREBUF_PMR",
+	                      &_sPMRTDSecBufFuncTab,
+	                      psPrivData,
+	                      PMR_TYPE_TDSECBUF,
+	                      &psPMR,
+	                      PDUMP_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto errorOnCreateTDPMR;
+	}
+
+#if defined(PVR_RI_DEBUG)
+	eError = RIWritePMREntryKM(psPMR,
+	                           sizeof("TDSecureBuffer"),
+	                           "TDSecureBuffer",
+	                           psPrivData->ui64Size);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+		         "%s: Failed to write PMR entry (%s)",
+		         __func__, PVRSRVGetErrorStringKM(eError)));
+	}
+#endif
+
+	psPrivData->psOSMemPMR = psOSPMR;
+	*ppsPMRPtr = psPMR;
+	*pui64SecBufHandle = 0x0ULL;
+
+	return PVRSRV_OK;
+
+errorOnCreateTDPMR:
+	PMRUnrefPMR(psOSPMR);
+
+errorOnCreateOSPMR:
+	PhysHeapRelease(psPrivData->psTDSecBufPhysHeap);
+
+errorOnAcquireHeap:
+	OSFreeMem(psPrivData);
+
+errorOnAllocData:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+#endif /* NO_HARDWARE */
+
+#else /* SUPPORT_TRUSTED_DEVICE */
+
+PVRSRV_ERROR PhysmemNewTDSecureBufPMR(CONNECTION_DATA *psConnection,
+                                      PVRSRV_DEVICE_NODE *psDevNode,
+                                      IMG_DEVMEM_SIZE_T uiSize,
+                                      PMR_LOG2ALIGN_T uiLog2Align,
+                                      PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                      PMR **ppsPMRPtr,
+                                      IMG_UINT64 *pui64SecBufHandle)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiLog2Align);
+	PVR_UNREFERENCED_PARAMETER(uiFlags);
+	PVR_UNREFERENCED_PARAMETER(ppsPMRPtr);
+	PVR_UNREFERENCED_PARAMETER(pui64SecBufHandle);
+
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+#endif
+
+PVRSRV_ERROR PhysmemImportSecBuf(CONNECTION_DATA *psConnection,
+                                 PVRSRV_DEVICE_NODE *psDevNode,
+                                 IMG_DEVMEM_SIZE_T uiSize,
+                                 IMG_UINT32 ui32Log2Align,
+                                 PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                 PMR **ppsPMRPtr,
+                                 IMG_UINT64 *pui64SecBufHandle)
+{
+	return PhysmemNewTDSecureBufPMR(psConnection,
+	                                psDevNode,
+	                                uiSize,
+	                                (PMR_LOG2ALIGN_T)ui32Log2Align,
+	                                uiFlags,
+	                                ppsPMRPtr,
+	                                pui64SecBufHandle);
+};
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pmr.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pmr.c
new file mode 100644
index 0000000..48326c5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pmr.c
@@ -0,0 +1,3528 @@
+/*************************************************************************/ /*!
+@File
+@Title          Physmem (PMR) abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management.  This module is responsible for
+                the "PMR" abstraction.  A PMR (Physical Memory Resource)
+                represents some unit of physical memory which is
+                allocated/freed/mapped/unmapped as an indivisible unit
+                (higher software levels provide an abstraction above that
+                to deal with dividing this down into smaller manageable units).
+                Importantly, this module knows nothing of virtual memory, or
+                of MMUs etc., with one excusable exception.  We have the
+                concept of a "page size", which really means nothing in
+                physical memory, but represents a "contiguity quantum" such
+                that the higher level modules which map this memory are able
+                to verify that it matches the needs of the page size for the
+                virtual realm into which it is being mapped.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "pdump.h"
+#include "devicemem_server_utils.h"
+
+#include "osfunc.h"
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#include "pmr_impl.h"
+#include "pmr_os.h"
+#include "pvrsrv.h"
+
+#include "allocmem.h"
+#include "lock.h"
+
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "secure_export.h"
+#include "ossecure_export.h"
+#endif
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+#define PMR_STRUCTURE_ASSERT
+
+#define PMR_SIGNATURE_LIVE 0x504D524C
+#define PMR_SIGNATURE_DEAD 0x504D5244
+
+
+/* ourselves */
+#include "pmr.h"
+
+/* A "context" for the physical memory block resource allocator.
+
+   Context is probably the wrong word.
+
+   There is almost certainly only one of these, ever, in the system.
+   But, let's keep the notion of a context anyway, "just-in-case".
+*/
+static struct _PMR_CTX_
+{
+    /* For debugging, and PDump, etc., let's issue a forever
+       incrementing serial number to each allocation. */
+    IMG_UINT64 uiNextSerialNum;
+
+    /* For security, we only allow a PMR to be mapped if the caller
+       knows its key.  We can pseudo-randomly generate keys */
+    IMG_UINT64 uiNextKey;
+
+    /* For debugging only, I guess:  Number of live PMRs */
+    IMG_UINT32 uiNumLivePMRs;
+
+	/* Lock for this structure */
+	POS_LOCK hLock;
+
+    /* In order to seed the uiNextKey, we enforce initialisation at
+       driver load time.  Also, we can debug check at driver unload
+       that the PMR count is zero. */
+  IMG_BOOL bModuleInitialised;
+} _gsSingletonPMRContext = { 1, 0, 0, NULL, IMG_FALSE };
+
+
+/* A PMR. One per physical allocation.  May be "shared".
+
+   "shared" is ambiguous.  We need to be careful with terminology.
+   There are two ways in which a PMR may be "shared" and we need to be
+   sure that we are clear which we mean.
+
+   i)   multiple small allocations living together inside one PMR;
+
+   ii)  one single allocation filling a PMR but mapped into multiple
+        memory contexts.
+
+   This is more important further up the stack - at this level, all we
+   care is that the PMR is being referenced multiple times.
+*/
+struct _PMR_
+{
+    /* This object is strictly refcounted.  References include:
+       - mapping
+       - live handles (to this object)
+       - live export handles
+       (thus it is normal for allocated and exported memory to have a refcount of 3)
+       The object is destroyed when and only when the refcount reaches 0
+    */
+
+	/* Device node on which this PMR was created and is valid */
+	PVRSRV_DEVICE_NODE *psDevNode;
+
+    /*
+       Physical address translation (device <> cpu) is done on a per device
+       basis which means we need the physical heap info
+    */
+    PHYS_HEAP *psPhysHeap;
+
+	/* Signature value to show validity of structure */
+	IMG_UINT32 ui32PMRSignature;
+
+    ATOMIC_T iRefCount;
+
+    /* lock count - this is the number of times
+       PMRLockSysPhysAddresses() has been called, less the number of
+       PMRUnlockSysPhysAddresses() calls.  This is arguably here for
+       debug reasons only, as the refcount is already incremented as a
+       matter of course.  Really, this just allows us to trap protocol
+       errors: i.e. calling PMRSysPhysAddr(),
+       without a lock, or calling PMRUnlockSysPhysAddresses() too many
+       or too few times. */
+    ATOMIC_T iLockCount;
+
+	/* Lock for this structure */
+	POS_LOCK hLock;
+
+    /* Incrementing serial number to each allocation. */
+    IMG_UINT64 uiSerialNum;
+
+    /* For security, we only allow a PMR to be mapped if the caller
+       knows its key.  We can pseudo-randomly generate keys */
+    PMR_PASSWORD_T uiKey;
+
+    /* Callbacks for per-flavour functions */
+    const PMR_IMPL_FUNCTAB *psFuncTab;
+
+    /* Data associated with the "subtype" */
+    PMR_IMPL_PRIVDATA pvFlavourData;
+
+    /* What kind of PMR do we have? */
+    PMR_IMPL_TYPE eFlavour;
+
+    /* And for pdump */
+    const IMG_CHAR *pszPDumpDefaultMemspaceName;
+#if defined(PDUMP)
+    /* Allocation annotation */
+    IMG_CHAR *pszAnnotation;
+
+    IMG_HANDLE hPDumpAllocHandle;
+
+	/* Whether PDumping of this PMR must be persistent
+	 * (i.e. it must be present in every future PDump stream as well)
+	 */
+	IMG_BOOL	bForcePersistent;
+
+	IMG_UINT32 uiNumPDumpBlocks;
+#endif
+
+    /* Logical size of allocation.  "logical", because a PMR can
+       represent memory that will never physically exist.  This is the
+       amount of virtual space that the PMR would consume when it's
+       mapped into a virtual allocation. */
+    PMR_SIZE_T uiLogicalSize;
+
+	/* Mapping table for the allocation.
+	   PMR's can be sparse in which case not all the "logic" addresses
+	   in it are valid. We need to know which addresses are and aren't
+	   valid when mapping or reading the PMR.
+	   The mapping table translates "logical" offsets into physical
+	   offsets which is what we always pass to the PMR factory
+	   (so it doesn't have to be concerned about sparseness issues) */
+    PMR_MAPPING_TABLE *psMappingTable;
+
+    /* Indicates whether this PMR has been allocated as sparse.
+     * The condition for this variable to be set at allocation time is:
+     * (numVirtChunks != numPhysChunks) || (numVirtChunks > 1)
+     */
+    IMG_BOOL bSparseAlloc;
+
+    /* Minimum Physical Contiguity Guarantee.  Might be called "page
+       size", but that would be incorrect, as page size is something
+       meaningful only in virtual realm.  This contiguity guarantee
+       provides an inequality that can be verified/asserted/whatever
+       to ensure that this PMR conforms to the page size requirement
+       of the place the PMR gets mapped.  (May be used to select an
+       appropriate heap in variable page size systems)
+
+       The absolutely necessary condition is this:
+
+       device MMU page size <= actual physical contiguity.
+
+       We go one step further in order to be able to provide an early warning / early compatibility check and say this:
+
+       device MMU page size <= 2**(uiLog2ContiguityGuarantee) <= actual physical contiguity.
+
+       In this way, it is possible to make the page table reservation
+       in the device MMU without even knowing the granularity of the
+       physical memory (i.e. useful for being able to allocate virtual
+       before physical)
+    */
+    PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee;
+
+    /* Flags.  We store a copy of the "PMR flags" (usually a subset of
+       the flags given at allocation time) and return them to any
+       caller of PMR_Flags().  The intention of these flags is that
+       the ones stored here are used to represent permissions, such
+       that no one is able to map a PMR in a mode in which they are not
+       allowed, e.g. writeable for a read-only PMR, etc. */
+    PMR_FLAGS_T uiFlags;
+
+    /* Do we really need this? For now we'll keep it, until we know we don't. */
+    /* NB: this is not the "memory context" in client terms - this is
+       _purely_ the "PMR" context, of which there is almost certainly only
+       ever one per system as a whole, but we'll keep the concept
+       anyway, just-in-case. */
+    struct _PMR_CTX_ *psContext;
+
+#if defined(PVR_RI_DEBUG)
+    /*
+	 * Stored handle to PMR RI entry
+	 */
+	void		*hRIHandle;
+#endif
+	IMG_BOOL bPathTaken;
+};
+
+/* do we need a struct for the export handle?  I'll use one for now, but if nothing goes in it, we'll lose it */
+struct _PMR_EXPORT_
+{
+    struct _PMR_ *psPMR;
+};
+
+struct _PMR_PAGELIST_
+{
+	struct _PMR_ *psReferencePMR;
+};
+
+PPVRSRV_DEVICE_NODE PMRGetExportDeviceNode(PMR_EXPORT *psExportPMR)
+{
+	PPVRSRV_DEVICE_NODE psReturnedDeviceNode = NULL;
+	PVR_ASSERT(psExportPMR != NULL);
+	PVR_ASSERT(psExportPMR->psPMR != NULL);
+	PVR_ASSERT(OSAtomicRead(&psExportPMR->psPMR->iRefCount) > 0);
+
+	if (psExportPMR && psExportPMR->psPMR &&
+	    (OSAtomicRead(&psExportPMR->psPMR->iRefCount) > 0))
+	{
+		psReturnedDeviceNode = PMR_DeviceNode(psExportPMR->psPMR);
+	}
+	return psReturnedDeviceNode;
+
+}
+
+void PMRSetPath(PMR *psPMR)
+{
+	psPMR->bPathTaken = IMG_TRUE;
+}
+
+int  PMRRefCount(const PMR *psPMR)
+{
+	return OSAtomicRead(&psPMR->iRefCount);
+}
+
+
+#ifdef PMR_STRUCTURE_ASSERT
+static IMG_BOOL
+_PMRAssert(const PMR *psPMR)
+{
+	if (psPMR
+	    && PMR_SIGNATURE_LIVE == psPMR->ui32PMRSignature
+	    && (OSAtomicRead(&psPMR->iRefCount) >= 0))
+	{
+		return IMG_TRUE;
+	}
+
+	if (!psPMR)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: PMR %p NULL", __func__, psPMR));
+	}
+	else if (PMR_SIGNATURE_DEAD == psPMR->ui32PMRSignature)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: PMR %p DEAD", __func__, psPMR));
+		PVR_LOG(("PMR : %p  Ref Value: %d Path Taken: %s\n", psPMR, PMRRefCount(psPMR), (psPMR->bPathTaken)?"yes":"no"));
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: PMR %p CORRUPT %08x", __func__,
+		         psPMR, psPMR->ui32PMRSignature));
+		PVR_LOG(("PMR : %p  Ref Value: %d Path Taken: %s\n", psPMR, PMRRefCount(psPMR), (psPMR->bPathTaken)?"yes":"no"));
+	}
+
+	OSWarnOn(IMG_TRUE);
+	return IMG_FALSE;
+}
+#else
+#define _PMRAssert(a) PVR_ASSERT(a)
+#endif
+
+#define MIN3(a,b,c)	(((a) < (b)) ? (((a) < (c)) ? (a):(c)) : (((b) < (c)) ? (b):(c)))
+
+static PVRSRV_ERROR
+_PMRCreate(PMR_SIZE_T uiLogicalSize,
+           PMR_SIZE_T uiChunkSize,
+           IMG_UINT32 ui32NumPhysChunks,
+           IMG_UINT32 ui32NumVirtChunks,
+           IMG_UINT32 *pui32MappingTable,
+           PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee,
+           PMR_FLAGS_T uiFlags,
+           PMR **ppsPMR)
+{
+    void *pvPMRLinAddr;
+    PMR *psPMR;
+    PMR_MAPPING_TABLE *psMappingTable;
+    struct _PMR_CTX_ *psContext;
+    IMG_UINT32 i,ui32Temp=0;
+    IMG_UINT32 ui32Remainder;
+    PVRSRV_ERROR eError;
+    IMG_BOOL bSparse = IMG_FALSE;
+
+    psContext = &_gsSingletonPMRContext;
+
+	/* Do we have a sparse allocation? */
+    if ( (ui32NumVirtChunks != ui32NumPhysChunks) ||
+	     (ui32NumVirtChunks > 1) )
+    {
+    	bSparse = IMG_TRUE;
+    }
+
+	/* Extra checks required for sparse PMRs */
+	if (uiLogicalSize != uiChunkSize)
+	{
+		/* Check the logical size and chunk information agree with each other */
+		if (uiLogicalSize != (uiChunkSize * ui32NumVirtChunks))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Bad mapping size (uiLogicalSize = 0x%llx, uiChunkSize = 0x%llx, ui32NumVirtChunks = %d)",
+					__FUNCTION__, (unsigned long long)uiLogicalSize, (unsigned long long)uiChunkSize, ui32NumVirtChunks));
+			return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
+		}
+
+		/* Check that the chunk size is a multiple of the contiguity */
+		OSDivide64(uiChunkSize, (1<< uiLog2ContiguityGuarantee), &ui32Remainder);
+		if (ui32Remainder)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Bad chunk size, must be a multiple of the contiguity "
+					"(uiChunkSize = 0x%llx, uiLog2ContiguityGuarantee = %u)",
+					__FUNCTION__,
+					(unsigned long long) uiChunkSize,
+					uiLog2ContiguityGuarantee));
+			return PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE;
+		}
+	}
+
+	pvPMRLinAddr = OSAllocMem(sizeof(*psPMR) + sizeof(*psMappingTable) + sizeof(IMG_UINT32) * ui32NumVirtChunks);
+
+	if (pvPMRLinAddr == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psPMR = (PMR *) pvPMRLinAddr;
+	psMappingTable = (PMR_MAPPING_TABLE *) (((IMG_CHAR *) pvPMRLinAddr) + sizeof(*psPMR));
+
+	eError = OSLockCreate(&psPMR->hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		OSFreeMem(psPMR);
+		return eError;
+	}
+
+	/* Setup the mapping table */
+	psMappingTable->uiChunkSize = uiChunkSize;
+	psMappingTable->ui32NumVirtChunks = ui32NumVirtChunks;
+	psMappingTable->ui32NumPhysChunks = ui32NumPhysChunks;
+	OSCachedMemSet(&psMappingTable->aui32Translation[0], 0xFF, sizeof(psMappingTable->aui32Translation[0])*
+																	ui32NumVirtChunks);
+	for (i=0; i<ui32NumPhysChunks; i++)
+	{
+		ui32Temp = pui32MappingTable[i];
+		psMappingTable->aui32Translation[ui32Temp] = ui32Temp;
+	}
+
+	/* Setup the PMR */
+	OSAtomicWrite(&psPMR->iRefCount, 0);
+	OSAtomicWrite(&psPMR->iLockCount, 0);
+	psPMR->psContext = psContext;
+	psPMR->uiLogicalSize = uiLogicalSize;
+	psPMR->uiLog2ContiguityGuarantee = uiLog2ContiguityGuarantee;
+	psPMR->uiFlags = uiFlags;
+	psPMR->psMappingTable = psMappingTable;
+	psPMR->bSparseAlloc = bSparse;
+	psPMR->uiKey = psContext->uiNextKey;
+	psPMR->uiSerialNum = psContext->uiNextSerialNum;
+	psPMR->ui32PMRSignature = PMR_SIGNATURE_LIVE;
+
+#if defined(PVR_RI_DEBUG)
+	psPMR->hRIHandle = NULL;
+#endif
+
+	OSLockAcquire(psContext->hLock);
+	psContext->uiNextKey = (0x80200003 * psContext->uiNextKey)
+		^ (0xf00f0081 * (uintptr_t)pvPMRLinAddr);
+	psContext->uiNextSerialNum ++;
+	*ppsPMR = psPMR;
+	PVR_DPF((PVR_DBG_MESSAGE, "pmr.c: created PMR @0x%p", psPMR));
+	/* Increment live PMR count */
+	psContext->uiNumLivePMRs ++;
+	OSLockRelease(psContext->hLock);
+
+	return PVRSRV_OK;
+}
+
+/* This function returns true if the PMR is in use and false otherwise.
+ * This function is not thread safe and hence the caller
+ * needs to ensure the thread safety by explicitly taking
+ * the lock on the PMR or through other means */
+IMG_BOOL  PMRIsPMRLive(PMR *psPMR)
+{
+	return (OSAtomicRead(&psPMR->iRefCount) > 0);
+}
+
+
+static IMG_UINT32
+_Ref(PMR *psPMR)
+{
+	PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) >= 0);
+	/* We need to ensure that this function is always executed under
+	 * PMRLock. The only exception acceptable is the unloading of the driver.
+	 */
+	return OSAtomicIncrement(&psPMR->iRefCount);
+}
+
+static IMG_UINT32
+_Unref(PMR *psPMR)
+{
+	PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) > 0);
+	/* We need to ensure that this function is always executed under
+	 * PMRLock. The only exception acceptable is the unloading of the driver.
+	 */
+	return OSAtomicDecrement(&psPMR->iRefCount);
+}
+
+static void
+_UnrefAndMaybeDestroy(PMR *psPMR)
+{
+	PVRSRV_ERROR eError2;
+	struct _PMR_CTX_ *psCtx;
+	IMG_INT iRefCount;
+
+	PVR_ASSERT(psPMR != NULL);
+
+	iRefCount = _Unref(psPMR);
+
+	if (iRefCount == 0)
+	{
+		if (psPMR->psFuncTab->pfnFinalize != NULL)
+		{
+			eError2 = psPMR->psFuncTab->pfnFinalize(psPMR->pvFlavourData);
+
+			/* PMR unref can be called asynchronously by the kernel or other
+			 * third party modules (eg. display) which doesn't go through the
+			 * usual services bridge. The same PMR can be referenced simultaneously
+			 * in a different path that results in a race condition.
+			 * Hence depending on the race condition, a factory may refuse to destroy
+			 * the resource associated with this PMR if a reference on it was taken
+			 * prior to unref. In that case the PMR factory function returns the error.
+			 *
+			 * When such an error is encountered, the factory needs to ensure the state
+			 * associated with PMR is undisturbed. At this point we just bail out from
+			 * freeing the PMR itself. The PMR handle will then be freed at a later point
+			 * when the same PMR is unreferenced.
+			 * */
+			if (PVRSRV_ERROR_PMR_STILL_REFERENCED == eError2)
+			{
+				return;
+			}
+			PVR_ASSERT (eError2 == PVRSRV_OK); /* can we do better? */
+		}
+
+		psPMR->ui32PMRSignature = PMR_SIGNATURE_DEAD;
+#if defined(PDUMP)
+		PDumpPMRFreePMR(psPMR,
+		                psPMR->uiLogicalSize,
+		                (1 << psPMR->uiLog2ContiguityGuarantee),
+		                psPMR->uiLog2ContiguityGuarantee,
+		                psPMR->hPDumpAllocHandle);
+
+		OSFreeMem(psPMR->pszAnnotation);
+#endif
+
+#if defined (PVRSRV_ENABLE_LINUX_MMAP_STATS)
+		/* This PMR is about to die, update its mmap stats record (if present) to avoid
+		 * dangling pointer. Additionally, this is required because mmap stats are
+		 * identified by PMRs and a new PMR down the line "might" get the same address
+		 * as the one we're about to free and we'd like 2 different entries in mmaps
+		 * stats for such cases */
+		MMapStatsRemovePMR(psPMR);
+#endif
+
+#ifdef PVRSRV_NEED_PVR_ASSERT
+		PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) == 0);
+#endif
+
+#if defined(PVR_RI_DEBUG)
+		{
+			PVRSRV_ERROR eError;
+
+			/* Delete RI entry */
+			if (psPMR->hRIHandle)
+			{
+				eError = RIDeletePMREntryKM (psPMR->hRIHandle);
+
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s: RIDeletePMREntryKM failed: %s",
+												__func__,
+												PVRSRVGetErrorStringKM(eError)));
+					/* continue destroying the PMR */
+				}
+			}
+		}
+#endif /* if defined(PVR_RI_DEBUG) */
+		psCtx = psPMR->psContext;
+
+		OSLockDestroy(psPMR->hLock);
+
+		OSFreeMem(psPMR);
+
+		/* Decrement live PMR count.  Probably only of interest for debugging */
+		PVR_ASSERT(psCtx->uiNumLivePMRs > 0);
+
+		OSLockAcquire(psCtx->hLock);
+		psCtx->uiNumLivePMRs --;
+		OSLockRelease(psCtx->hLock);
+	}
+}
+
+static IMG_BOOL _PMRIsSparse(const PMR *psPMR)
+{
+	return psPMR->bSparseAlloc;
+}
+
+PVRSRV_ERROR
+PMRCreatePMR(PVRSRV_DEVICE_NODE *psDevNode,
+             PHYS_HEAP *psPhysHeap,
+             PMR_SIZE_T uiLogicalSize,
+             PMR_SIZE_T uiChunkSize,
+             IMG_UINT32 ui32NumPhysChunks,
+             IMG_UINT32 ui32NumVirtChunks,
+             IMG_UINT32 *pui32MappingTable,
+             PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee,
+             PMR_FLAGS_T uiFlags,
+             const IMG_CHAR *pszAnnotation,
+             const PMR_IMPL_FUNCTAB *psFuncTab,
+             PMR_IMPL_PRIVDATA pvPrivData,
+             PMR_IMPL_TYPE eType,
+             PMR **ppsPMRPtr,
+             IMG_UINT32 ui32PDumpFlags)
+{
+	PMR *psPMR = NULL;
+	PVRSRV_ERROR eError;
+
+	eError = _PMRCreate(uiLogicalSize,
+	                    uiChunkSize,
+	                    ui32NumPhysChunks,
+	                    ui32NumVirtChunks,
+	                    pui32MappingTable,
+	                    uiLog2ContiguityGuarantee,
+	                    uiFlags,
+	                    &psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	psPMR->psDevNode = psDevNode;
+	psPMR->psPhysHeap = psPhysHeap;
+	psPMR->psFuncTab = psFuncTab;
+	psPMR->pszPDumpDefaultMemspaceName = PhysHeapPDumpMemspaceName(psPhysHeap);
+	psPMR->pvFlavourData = pvPrivData;
+        psPMR->eFlavour = eType;
+	OSAtomicWrite(&psPMR->iRefCount, 1);
+
+#if defined(PDUMP)
+	{
+		PMR_FLAGS_T uiFlags = psPMR->uiFlags;
+		IMG_BOOL bInitialise =  IMG_FALSE;
+		IMG_UINT32 ui32InitValue = 0;
+
+		psPMR->pszAnnotation = OSAllocMem(sizeof(IMG_CHAR) * PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH);
+		if (psPMR->pszAnnotation == NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e1;
+		}
+
+		OSStringNCopy(psPMR->pszAnnotation, pszAnnotation, PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH);
+
+		if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags))
+		{
+			bInitialise = IMG_TRUE;
+		}
+		else if (PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags))
+		{
+			ui32InitValue = 0xDEADBEEF;
+			bInitialise = IMG_TRUE;
+		}
+
+		PDumpPMRMallocPMR(psPMR,
+		                  (uiChunkSize * ui32NumVirtChunks),
+		                  1ULL<<uiLog2ContiguityGuarantee,
+		                  uiChunkSize,
+		                  ui32NumPhysChunks,
+		                  ui32NumVirtChunks,
+		                  pui32MappingTable,
+		                  uiLog2ContiguityGuarantee,
+		                  bInitialise,
+		                  ui32InitValue,
+		                  &psPMR->hPDumpAllocHandle,
+                          ui32PDumpFlags);
+	}
+#endif
+
+	*ppsPMRPtr = psPMR;
+
+	return PVRSRV_OK;
+
+	/*
+	 * error exit paths follow
+	 */
+#if defined(PDUMP)
+e1:
+	PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't allocate memory for PMR PDump Annotation, OOM.", __func__));
+#endif
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR PMRLockSysPhysAddressesNested(PMR *psPMR,
+                                           IMG_UINT32 ui32NestingLevel)
+{
+    PVRSRV_ERROR eError;
+
+	_PMRAssert(psPMR);
+
+	/* Note: taking this lock is not required to protect the PMR reference count,
+	 * because the PMR reference count is atomic.
+	 * Rather, taking the lock here guarantees that no caller will exit this function
+	 * without the underlying physical addresses being locked.
+	 */
+	OSLockAcquireNested(psPMR->hLock, ui32NestingLevel);
+    /* We also count the locks as references, so that the PMR is not
+       freed while someone is using a physical address. */
+    /* "lock" here simply means incrementing the refcount.  It means
+       the refcount is multipurpose, but that's okay.  We only have to
+       promise that physical addresses are valid after this point, and
+       remain valid until the corresponding
+       PMRUnlockSysPhysAddressesOSMem() */
+    _Ref(psPMR);
+
+    /* Also count locks separately from other types of references, to
+       allow for debug assertions */
+
+    /* Only call callback if lockcount transitions from 0 to 1 */
+    if (OSAtomicIncrement(&psPMR->iLockCount) == 1)
+    {
+        if (psPMR->psFuncTab->pfnLockPhysAddresses != NULL)
+        {
+            /* must always have lock and unlock in pairs! */
+            PVR_ASSERT(psPMR->psFuncTab->pfnUnlockPhysAddresses != NULL);
+
+            eError = psPMR->psFuncTab->pfnLockPhysAddresses(psPMR->pvFlavourData);
+
+            if (eError != PVRSRV_OK)
+            {
+                goto e1;
+            }
+        }
+#if defined(PVR_RI_DEBUG)
+        /* Update RI debug to indicate that the PMR now has physical backing */
+        RIPMRPhysicalBackingKM(psPMR, IMG_TRUE);
+#endif
+    }
+	OSLockRelease(psPMR->hLock);
+
+    return PVRSRV_OK;
+
+ e1:
+    OSAtomicDecrement(&psPMR->iLockCount);
+    _Unref(psPMR);
+    PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) != 0);
+    OSLockRelease(psPMR->hLock);
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR
+PMRLockSysPhysAddresses(PMR *psPMR)
+{
+	return PMRLockSysPhysAddressesNested(psPMR, 0);
+}
+
+PVRSRV_ERROR
+PMRUnlockSysPhysAddresses(PMR *psPMR)
+{
+	return PMRUnlockSysPhysAddressesNested(psPMR, 2);
+}
+
+PVRSRV_ERROR
+PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel)
+{
+    PVRSRV_ERROR eError;
+
+	_PMRAssert(psPMR);
+
+	/* Acquiring the lock here, as well as during the Lock operation ensures
+	 * the lock count hitting zero and the unlocking of the phys addresses is
+	 * an atomic operation
+	 */
+	OSLockAcquireNested(psPMR->hLock, ui32NestingLevel);
+	PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) > 0);
+
+    if (OSAtomicDecrement(&psPMR->iLockCount) == 0)
+    {
+        if (psPMR->psFuncTab->pfnUnlockPhysAddresses != NULL)
+        {
+            PVR_ASSERT(psPMR->psFuncTab->pfnLockPhysAddresses != NULL);
+
+            eError = psPMR->psFuncTab->pfnUnlockPhysAddresses(psPMR->pvFlavourData);
+            /* must never fail */
+            PVR_ASSERT(eError == PVRSRV_OK);
+        }
+#if defined(PVR_RI_DEBUG)
+        /* Update RI debug to indicate that the PMR no longer has physical backing */
+        RIPMRPhysicalBackingKM(psPMR, IMG_FALSE);
+#endif
+    }
+
+    OSLockRelease(psPMR->hLock);
+
+    /* We also count the locks as references, so that the PMR is not
+       freed while someone is using a physical address. */
+    _UnrefAndMaybeDestroy(psPMR);
+
+    return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnpinPMR(PMR *psPMR, IMG_BOOL bDevMapped)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	_PMRAssert(psPMR);
+
+	OSLockAcquire(psPMR->hLock);
+	/* Stop if we still have references on the PMR */
+	if (   ( bDevMapped && (OSAtomicRead(&psPMR->iRefCount) > 2))
+	    || (!bDevMapped && (OSAtomicRead(&psPMR->iRefCount) > 1)) )
+	{
+		OSLockRelease(psPMR->hLock);
+        PVR_DPF((PVR_DBG_ERROR,
+                 "%s: PMR is still referenced %u times. "
+                 "That means this PMR is probably exported or used somewhere else. "
+                 "Allowed are 2 references if it is mapped to device, otherwise 1.",
+                 __func__,
+                 OSAtomicRead(&psPMR->iRefCount)));
+
+		eError = PVRSRV_ERROR_PMR_STILL_REFERENCED;
+		goto e_exit;
+	}
+	OSLockRelease(psPMR->hLock);
+
+	if (psPMR->psFuncTab->pfnUnpinMem != NULL)
+	{
+		eError = psPMR->psFuncTab->pfnUnpinMem(psPMR->pvFlavourData);
+	}
+
+e_exit:
+	return eError;
+}
+
+PVRSRV_ERROR
+PMRPinPMR(PMR *psPMR)
+{
+	PVRSRV_ERROR eError= PVRSRV_OK;
+
+	_PMRAssert(psPMR);
+
+	if (psPMR->psFuncTab->pfnPinMem != NULL)
+	{
+		eError = psPMR->psFuncTab->pfnPinMem(psPMR->pvFlavourData,
+											psPMR->psMappingTable);
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PMRMakeLocalImportHandle(PMR *psPMR,
+                         PMR **ppsPMR)
+{
+	PMRRefPMR(psPMR);
+	*ppsPMR = psPMR;
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnmakeLocalImportHandle(PMR *psPMR)
+{
+	PMRUnrefPMR(psPMR);
+	return PVRSRV_OK;
+}
+
+/*
+	Note:
+	We pass back the PMR as it was passed in as a different handle type
+	(DEVMEM_MEM_IMPORT) and it allows us to change the import structure
+	type if we should need to embed any meta data in it.
+*/
+PVRSRV_ERROR
+PMRLocalImportPMR(PMR *psPMR,
+				  PMR **ppsPMR,
+				  IMG_DEVMEM_SIZE_T *puiSize,
+				  IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+	_PMRAssert(psPMR);
+	_Ref(psPMR);
+
+	/* Return the PMR */
+	*ppsPMR = psPMR;
+	*puiSize = psPMR->uiLogicalSize;
+	*puiAlign = 1ULL << psPMR->uiLog2ContiguityGuarantee;
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRGetUID(PMR *psPMR,
+		  IMG_UINT64 *pui64UID)
+{
+	_PMRAssert(psPMR);
+
+	*pui64UID = psPMR->uiSerialNum;
+
+	return PVRSRV_OK;
+}
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR
+PMRExportPMR(PMR *psPMR,
+             PMR_EXPORT **ppsPMRExportPtr,
+             PMR_SIZE_T *puiSize,
+             PMR_LOG2ALIGN_T *puiLog2Contig,
+             PMR_PASSWORD_T *puiPassword)
+{
+    IMG_UINT64 uiPassword;
+    PMR_EXPORT *psPMRExport;
+
+	_PMRAssert(psPMR);
+    uiPassword = psPMR->uiKey;
+
+    psPMRExport = OSAllocMem(sizeof(*psPMRExport));
+    if (psPMRExport == NULL)
+    {
+        return PVRSRV_ERROR_OUT_OF_MEMORY;
+    }
+
+    psPMRExport->psPMR = psPMR;
+    _Ref(psPMR);
+
+    *ppsPMRExportPtr = psPMRExport;
+    *puiSize = psPMR->uiLogicalSize;
+    *puiLog2Contig = psPMR->uiLog2ContiguityGuarantee;
+    *puiPassword = uiPassword;
+
+    return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRUnexportPMR(PMR_EXPORT *psPMRExport)
+{
+    PVR_ASSERT(psPMRExport != NULL);
+	_PMRAssert(psPMRExport->psPMR);
+    PVR_ASSERT(OSAtomicRead(&psPMRExport->psPMR->iRefCount) > 0);
+
+	_UnrefAndMaybeDestroy(psPMRExport->psPMR);
+
+    OSFreeMem(psPMRExport);
+
+    return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRImportPMR(PMR_EXPORT *psPMRExport,
+             PMR_PASSWORD_T uiPassword,
+             PMR_SIZE_T uiSize,
+             PMR_LOG2ALIGN_T uiLog2Contig,
+             PMR **ppsPMR)
+{
+    PMR *psPMR;
+
+    _PMRAssert(psPMRExport->psPMR);
+    PVR_ASSERT(OSAtomicRead(&psPMRExport->psPMR->iRefCount) > 0);
+
+    psPMR = psPMRExport->psPMR;
+
+
+    if (psPMR->uiKey != uiPassword)
+    {
+        PVR_DPF((PVR_DBG_ERROR,
+                 "PMRImport: password given = %016" IMG_UINT64_FMTSPECx ", expected = %016" IMG_UINT64_FMTSPECx "\n",
+                 uiPassword,
+                 psPMR->uiKey));
+        return PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR;
+    }
+
+    if (psPMR->uiLogicalSize != uiSize || psPMR->uiLog2ContiguityGuarantee != uiLog2Contig)
+    {
+        return PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES;
+    }
+
+    _Ref(psPMR);
+
+    *ppsPMR = psPMR;
+
+    return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnimportPMR(PMR *psPMR)
+{
+	_PMRAssert(psPMR);
+    _UnrefAndMaybeDestroy(psPMR);
+
+    return PVRSRV_OK;
+}
+
+#else /* if defined(SUPPORT_INSECURE_EXPORT) */
+
+PVRSRV_ERROR
+PMRExportPMR(PMR *psPMR,
+             PMR_EXPORT **ppsPMRExportPtr,
+             PMR_SIZE_T *puiSize,
+             PMR_LOG2ALIGN_T *puiLog2Contig,
+             PMR_PASSWORD_T *puiPassword)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(ppsPMRExportPtr);
+	PVR_UNREFERENCED_PARAMETER(puiSize);
+	PVR_UNREFERENCED_PARAMETER(puiLog2Contig);
+	PVR_UNREFERENCED_PARAMETER(puiPassword);
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRUnexportPMR(PMR_EXPORT *psPMRExport)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMRExport);
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRImportPMR(PMR_EXPORT *psPMRExport,
+             PMR_PASSWORD_T uiPassword,
+             PMR_SIZE_T uiSize,
+             PMR_LOG2ALIGN_T uiLog2Contig,
+             PMR **ppsPMR)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMRExport);
+	PVR_UNREFERENCED_PARAMETER(uiPassword);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiLog2Contig);
+	PVR_UNREFERENCED_PARAMETER(ppsPMR);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnimportPMR(PMR *psPMR)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	return PVRSRV_OK;
+}
+#endif /* if defined(SUPPORT_INSECURE_EXPORT) */
+
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR PMRSecureExportPMR(CONNECTION_DATA *psConnection,
+                                PVRSRV_DEVICE_NODE * psDevNode,
+								PMR *psPMR,
+								IMG_SECURE_TYPE *phSecure,
+								PMR **ppsPMR,
+								CONNECTION_DATA **ppsSecureConnection)
+{
+	PVRSRV_ERROR eError;
+
+	_PMRAssert(psPMR);
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+	/* We are acquiring reference to PMR here because OSSecureExport
+	 * releases bridge lock and PMR lock for a moment and we don't want PMR
+	 * to be removed by other thread in the meantime. */
+	_Ref(psPMR);
+
+	eError = OSSecureExport(psConnection,
+							(void *) psPMR,
+							phSecure,
+							ppsSecureConnection);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	*ppsPMR = psPMR;
+
+	return PVRSRV_OK;
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	_UnrefAndMaybeDestroy(psPMR);
+	return eError;
+}
+
+PVRSRV_ERROR PMRSecureUnexportPMR(PMR *psPMR)
+{
+	_PMRAssert(psPMR);
+	_UnrefAndMaybeDestroy(psPMR);
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PMRSecureImportPMR(CONNECTION_DATA *psConnection,
+								PVRSRV_DEVICE_NODE *psDevNode,
+								IMG_SECURE_TYPE hSecure,
+								PMR **ppsPMR,
+								IMG_DEVMEM_SIZE_T *puiSize,
+								IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMR;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	eError = OSSecureImport(hSecure, (void **) &psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	_PMRAssert(psPMR);
+	if (psPMR->psDevNode != psDevNode)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device\n", __func__));
+		return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+	}
+
+	_Ref(psPMR);
+
+	/* Return the PMR */
+	*ppsPMR = psPMR;
+	*puiSize = psPMR->uiLogicalSize;
+	*puiAlign = 1 << psPMR->uiLog2ContiguityGuarantee;
+	return PVRSRV_OK;
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR PMRSecureUnimportPMR(PMR *psPMR)
+{
+	_PMRAssert(psPMR);
+	_UnrefAndMaybeDestroy(psPMR);
+	return PVRSRV_OK;
+}
+#endif
+
+#if defined(PVR_RI_DEBUG)
+PVRSRV_ERROR
+PMRStoreRIHandle(PMR *psPMR,
+				 void *hRIHandle)
+{
+    _PMRAssert(psPMR);
+
+    psPMR->hRIHandle = hRIHandle;
+    return PVRSRV_OK;
+}
+#endif
+
+static PVRSRV_ERROR
+_PMRAcquireKernelMappingData(PMR *psPMR,
+                            size_t uiLogicalOffset,
+                            size_t uiSize,
+                            void **ppvKernelAddressOut,
+                            size_t *puiLengthOut,
+                            IMG_HANDLE *phPrivOut,
+                            IMG_BOOL bMapSparse)
+{
+    PVRSRV_ERROR eError;
+    void *pvKernelAddress;
+    IMG_HANDLE hPriv;
+
+	_PMRAssert(psPMR);
+
+    if (_PMRIsSparse(psPMR) && !bMapSparse)
+    {
+        /* Generally we don't support mapping of sparse allocations but if there
+           is a justified need we can do that by passing IMG_TRUE in bMapSparse.
+           Although the callback is supported by the PMR it will always map
+           the physical 1:1 as sparseness issues are handled here in the core */
+        return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+    }
+
+    /* Acquire/Release functions must be overridden in pairs */
+    if (psPMR->psFuncTab->pfnAcquireKernelMappingData == NULL)
+    {
+        PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData == NULL);
+
+        /* If PMR implementation does not supply this pair of
+           functions, it means they do not permit the PMR to be mapped
+           into kernel memory at all */
+        eError = PVRSRV_ERROR_PMR_NOT_PERMITTED;
+        goto e0;
+    }
+    PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != NULL);
+
+    eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData,
+                                                           uiLogicalOffset,
+                                                           uiSize,
+                                                           &pvKernelAddress,
+                                                           &hPriv,
+                                                           psPMR->uiFlags);
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+
+    *ppvKernelAddressOut = pvKernelAddress;
+    if (uiSize == 0)
+    {
+        /* Zero size means map the whole PMR in ...*/
+        *puiLengthOut = (size_t)psPMR->uiLogicalSize;
+    }
+    else if (uiSize > (1 << psPMR->uiLog2ContiguityGuarantee))
+    {
+    	/* ... map in the requested pages ...*/
+		*puiLengthOut = uiSize;
+    }
+    else
+    {
+        /* ... otherwise we just map in one page */
+        *puiLengthOut = 1 << psPMR->uiLog2ContiguityGuarantee;
+    }
+    *phPrivOut = hPriv;
+
+    return PVRSRV_OK;
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR
+PMRAcquireKernelMappingData(PMR *psPMR,
+                            size_t uiLogicalOffset,
+                            size_t uiSize,
+                            void **ppvKernelAddressOut,
+                            size_t *puiLengthOut,
+                            IMG_HANDLE *phPrivOut)
+{
+    return _PMRAcquireKernelMappingData(psPMR,
+                                        uiLogicalOffset,
+                                        uiSize,
+                                        ppvKernelAddressOut,
+                                        puiLengthOut,
+                                        phPrivOut,
+                                        IMG_FALSE);
+}
+
+PVRSRV_ERROR
+PMRAcquireSparseKernelMappingData(PMR *psPMR,
+                                  size_t uiLogicalOffset,
+                                  size_t uiSize,
+                                  void **ppvKernelAddressOut,
+                                  size_t *puiLengthOut,
+                                  IMG_HANDLE *phPrivOut)
+{
+    return _PMRAcquireKernelMappingData(psPMR,
+                                        uiLogicalOffset,
+                                        uiSize,
+                                        ppvKernelAddressOut,
+                                        puiLengthOut,
+                                        phPrivOut,
+                                        IMG_TRUE);
+}
+
+PVRSRV_ERROR
+PMRReleaseKernelMappingData(PMR *psPMR,
+                            IMG_HANDLE hPriv)
+{
+	_PMRAssert(psPMR);
+    PVR_ASSERT (psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL);
+    PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != NULL);
+
+    psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData,
+                                                  hPriv);
+
+    return PVRSRV_OK;
+}
+
+#if defined(INTEGRITY_OS)
+
+PVRSRV_ERROR
+PMRMapMemoryObject(PMR *psPMR,
+                 IMG_HANDLE *phMemObj,
+					void **pvClientAddr,
+                   IMG_HANDLE hPriv)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_ASSERT (psPMR->psFuncTab->pfnMapMemoryObject != NULL);
+
+	eError = psPMR->psFuncTab->pfnMapMemoryObject(hPriv, phMemObj, pvClientAddr);
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PMRUnmapMemoryObject(PMR *psPMR,
+                   IMG_HANDLE hPriv)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_ASSERT (psPMR->psFuncTab->pfnMapMemoryObject != NULL);
+
+	eError = psPMR->psFuncTab->pfnUnmapMemoryObject(hPriv);
+
+	return eError;
+}
+
+#if defined(USING_HYPERVISOR)
+IMG_HANDLE PMRGetPmr(PMR *psPMR, size_t ulOffset)
+{
+    PVR_ASSERT(psPMR->psFuncTab->pfnGetPmr != NULL);
+    return psPMR->psFuncTab->pfnGetPmr(psPMR->pvFlavourData, ulOffset);
+}
+#endif
+#endif /* INTEGRITY_OS */
+
+/*
+	_PMRLogicalOffsetToPhysicalOffset
+
+	Translate between the "logical" offset which the upper levels
+	provide and the physical offset which is what the PMR
+	factories works on.
+
+	As well as returning the physical offset we return the number of
+	bytes remaining till the next chunk and if this chunk is valid.
+
+	For multi-page operations, upper layers communicate their
+	Log2PageSize else argument is redundant (set to zero).
+*/
+
+static void
+_PMRLogicalOffsetToPhysicalOffset(const PMR *psPMR,
+								  IMG_UINT32 ui32Log2PageSize,
+								  IMG_UINT32 ui32NumOfPages,
+								  IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+								  IMG_DEVMEM_OFFSET_T *puiPhysicalOffset,
+								  IMG_UINT32 *pui32BytesRemain,
+								  IMG_BOOL *bValid)
+{
+	PMR_MAPPING_TABLE *psMappingTable = psPMR->psMappingTable;
+	IMG_DEVMEM_OFFSET_T uiPageSize = 1ULL << ui32Log2PageSize;
+	IMG_DEVMEM_OFFSET_T uiOffset = uiLogicalOffset;
+	IMG_UINT64 ui64ChunkIndex;
+	IMG_UINT32 ui32Remain;
+	IMG_UINT32 idx;
+
+	/* Must be translating at least a page */
+	PVR_ASSERT(ui32NumOfPages);
+
+	if (psMappingTable->ui32NumPhysChunks == psMappingTable->ui32NumVirtChunks)
+	{
+		/* Fast path the common case, as logical and physical offsets are
+			equal we assume the ui32NumOfPages span is also valid */
+		*pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiOffset);
+		puiPhysicalOffset[0] = uiOffset;
+		bValid[0] = IMG_TRUE;
+
+		if (ui32NumOfPages > 1)
+		{
+			/* initial offset may not be page aligned, round down */
+			uiOffset &= ~(uiPageSize-1);
+			for (idx=1; idx < ui32NumOfPages; idx++)
+			{
+				uiOffset += uiPageSize;
+				puiPhysicalOffset[idx] = uiOffset;
+				bValid[idx] = IMG_TRUE;
+			}
+		}
+	}
+	else
+	{
+		for (idx=0; idx < ui32NumOfPages; idx++)
+		{
+			ui64ChunkIndex = OSDivide64r64(
+					uiOffset,
+					TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize),
+					&ui32Remain);
+
+			if (psMappingTable->aui32Translation[ui64ChunkIndex] == TRANSLATION_INVALID)
+			{
+				bValid[idx] = IMG_FALSE;
+			}
+			else
+			{
+				bValid[idx] = IMG_TRUE;
+			}
+
+			if (idx == 0)
+			{
+				if (ui32Remain == 0)
+				{
+					/* Start of chunk so return the chunk size */
+					*pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize);
+				}
+				else
+				{
+					*pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize - ui32Remain);
+				}
+
+				puiPhysicalOffset[idx] = (psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize) +	 ui32Remain;
+
+				/* initial offset may not be page aligned, round down */
+				uiOffset &= ~(uiPageSize-1);
+			}
+			else
+			{
+				puiPhysicalOffset[idx] = psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize + ui32Remain;
+			}
+			uiOffset += uiPageSize;
+		}
+	}
+}
+
+static PVRSRV_ERROR
+_PMR_ReadBytesPhysical(PMR *psPMR,
+                       IMG_DEVMEM_OFFSET_T uiPhysicalOffset,
+                       IMG_UINT8 *pcBuffer,
+                       size_t uiBufSz,
+                       size_t *puiNumBytes)
+{
+	PVRSRV_ERROR eError;
+
+    if (psPMR->psFuncTab->pfnReadBytes != NULL)
+    {
+        /* defer to callback if present */
+
+        eError = PMRLockSysPhysAddresses(psPMR);
+        if (eError != PVRSRV_OK)
+        {
+            goto e0;
+        }
+
+        eError = psPMR->psFuncTab->pfnReadBytes(psPMR->pvFlavourData,
+                                                uiPhysicalOffset,
+                                                pcBuffer,
+                                                uiBufSz,
+                                                puiNumBytes);
+        PMRUnlockSysPhysAddresses(psPMR);
+        if (eError != PVRSRV_OK)
+        {
+            goto e0;
+        }
+    }
+    else if (psPMR->psFuncTab->pfnAcquireKernelMappingData)
+    {
+        /* "default" handler for reading bytes */
+
+        IMG_HANDLE hKernelMappingHandle;
+        IMG_UINT8 *pcKernelAddress;
+
+        eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData,
+                                                               (size_t) uiPhysicalOffset,
+                                                               uiBufSz,
+                                                               (void **)&pcKernelAddress,
+                                                               &hKernelMappingHandle,
+                                                               psPMR->uiFlags);
+        if (eError != PVRSRV_OK)
+        {
+            goto e0;
+        }
+
+        /* Use the conservative 'DeviceMemCopy' here because we can't know
+         * if this PMR will be mapped cached.
+         */
+
+        OSDeviceMemCopy(&pcBuffer[0], pcKernelAddress, uiBufSz);
+        *puiNumBytes = uiBufSz;
+
+        psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData,
+                                                      hKernelMappingHandle);
+    }
+    else
+    {
+        PVR_DPF((PVR_DBG_ERROR, "PMR_ReadBytes: can't read from this PMR"));
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        OSPanic();
+        goto e0;
+    }
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR
+PMR_ReadBytes(PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT8 *pcBuffer,
+              size_t uiBufSz,
+              size_t *puiNumBytes)
+{
+    PVRSRV_ERROR eError = PVRSRV_OK;
+    IMG_DEVMEM_OFFSET_T uiPhysicalOffset;
+    size_t uiBytesCopied = 0;
+
+	_PMRAssert(psPMR);
+    if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize)
+    {
+		uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset);
+    }
+    PVR_ASSERT(uiBufSz > 0);
+    PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize);
+
+    /*
+      PMR implementations can override this.  If they don't, a
+      "default" handler uses kernel virtual mappings.  If the kernel
+      can't provide a kernel virtual mapping, this function fails
+    */
+    PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL ||
+               psPMR->psFuncTab->pfnReadBytes != NULL);
+
+	while (uiBytesCopied != uiBufSz)
+	{
+		IMG_UINT32 ui32Remain;
+		size_t uiBytesToCopy;
+		size_t uiRead;
+		IMG_BOOL bValid;
+
+		_PMRLogicalOffsetToPhysicalOffset(psPMR,
+										  0,
+										  1,
+										  uiLogicalOffset,
+										  &uiPhysicalOffset,
+										  &ui32Remain,
+										  &bValid);
+		/*
+			Copy till either then end of the
+			chunk or end of the buffer
+		*/
+		uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain);
+
+		if (bValid)
+		{
+			/* Read the data from the PMR */
+			eError = _PMR_ReadBytesPhysical(psPMR,
+											uiPhysicalOffset,
+											&pcBuffer[uiBytesCopied],
+											uiBytesToCopy,
+											&uiRead);
+			if ((eError != PVRSRV_OK) || (uiRead != uiBytesToCopy))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Failed to read chunk (eError = %s, uiRead = " IMG_SIZE_FMTSPEC " uiBytesToCopy = " IMG_SIZE_FMTSPEC ")",
+						 __FUNCTION__,
+						 PVRSRVGetErrorStringKM(eError),
+						 uiRead,
+						 uiBytesToCopy));
+				/* Bail out as soon as we hit an error */
+				break;
+			}
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+					"%s: Invalid phys offset at logical offset (" IMG_DEVMEM_OFFSET_FMTSPEC ") logical size (" IMG_DEVMEM_OFFSET_FMTSPEC ")",
+					__FUNCTION__,
+					uiLogicalOffset,
+					psPMR->uiLogicalSize));
+			/* Fill invalid chunks with 0 */
+			OSCachedMemSet(&pcBuffer[uiBytesCopied], 0, uiBytesToCopy);
+			uiRead = uiBytesToCopy;
+			eError = PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR;
+		}
+		uiLogicalOffset += uiRead;
+		uiBytesCopied += uiRead;
+	}
+
+	*puiNumBytes = uiBytesCopied;
+    return eError;
+}
+
+static PVRSRV_ERROR
+_PMR_WriteBytesPhysical(PMR *psPMR,
+						IMG_DEVMEM_OFFSET_T uiPhysicalOffset,
+						IMG_UINT8 *pcBuffer,
+						size_t uiBufSz,
+						size_t *puiNumBytes)
+{
+	PVRSRV_ERROR eError;
+
+    if (psPMR->psFuncTab->pfnWriteBytes != NULL)
+    {
+        /* defer to callback if present */
+
+        eError = PMRLockSysPhysAddresses(psPMR);
+        if (eError != PVRSRV_OK)
+        {
+            goto e0;
+        }
+
+        eError = psPMR->psFuncTab->pfnWriteBytes(psPMR->pvFlavourData,
+												 uiPhysicalOffset,
+                                                 pcBuffer,
+                                                 uiBufSz,
+                                                 puiNumBytes);
+        PMRUnlockSysPhysAddresses(psPMR);
+        if (eError != PVRSRV_OK)
+        {
+            goto e0;
+        }
+    }
+    else if (psPMR->psFuncTab->pfnAcquireKernelMappingData)
+    {
+        /* "default" handler for reading bytes */
+
+        IMG_HANDLE hKernelMappingHandle;
+        IMG_UINT8 *pcKernelAddress;
+
+        eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData,
+                                                               (size_t) uiPhysicalOffset,
+                                                               uiBufSz,
+                                                               (void **)&pcKernelAddress,
+                                                               &hKernelMappingHandle,
+                                                               psPMR->uiFlags);
+        if (eError != PVRSRV_OK)
+        {
+            goto e0;
+        }
+
+		/* Use the conservative 'DeviceMemCopy' here because we can't know
+		 * if this PMR will be mapped cached.
+		 */
+
+		OSDeviceMemCopy(pcKernelAddress, &pcBuffer[0], uiBufSz);
+        *puiNumBytes = uiBufSz;
+
+        psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData,
+                                                      hKernelMappingHandle);
+    }
+    else
+    {
+		/*
+			The write callback is optional as it's only required by the debug
+			tools
+		*/
+        PVR_DPF((PVR_DBG_ERROR, "_PMR_WriteBytesPhysical: can't write to this PMR"));
+        eError = PVRSRV_ERROR_PMR_NOT_PERMITTED;
+        OSPanic();
+        goto e0;
+    }
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR
+PMR_WriteBytes(PMR *psPMR,
+			   IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+               IMG_UINT8 *pcBuffer,
+               size_t uiBufSz,
+               size_t *puiNumBytes)
+{
+    PVRSRV_ERROR eError = PVRSRV_OK;
+    IMG_DEVMEM_OFFSET_T uiPhysicalOffset;
+    size_t uiBytesCopied = 0;
+
+	_PMRAssert(psPMR);
+    if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize)
+    {
+        uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset);
+    }
+    PVR_ASSERT(uiBufSz > 0);
+    PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize);
+
+    /*
+      PMR implementations can override this.  If they don't, a
+      "default" handler uses kernel virtual mappings.  If the kernel
+      can't provide a kernel virtual mapping, this function fails
+    */
+    PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL ||
+               psPMR->psFuncTab->pfnWriteBytes != NULL);
+
+	while (uiBytesCopied != uiBufSz)
+	{
+		IMG_UINT32 ui32Remain;
+		size_t uiBytesToCopy;
+		size_t uiWrite;
+		IMG_BOOL bValid;
+
+		_PMRLogicalOffsetToPhysicalOffset(psPMR,
+										  0,
+										  1,
+										  uiLogicalOffset,
+										  &uiPhysicalOffset,
+										  &ui32Remain,
+										  &bValid);
+
+		/*
+			Copy till either then end of the
+			chunk or end of the buffer
+		*/
+		uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain);
+
+		if (bValid)
+		{
+			/* Write the data to the PMR */
+			eError = _PMR_WriteBytesPhysical(psPMR,
+											 uiPhysicalOffset,
+											 &pcBuffer[uiBytesCopied],
+											 uiBytesToCopy,
+											 &uiWrite);
+			if ((eError != PVRSRV_OK) || (uiWrite != uiBytesToCopy))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Failed to read chunk (eError = %s, uiWrite = " IMG_SIZE_FMTSPEC " uiBytesToCopy = " IMG_SIZE_FMTSPEC ")",
+						 __FUNCTION__,
+						 PVRSRVGetErrorStringKM(eError),
+						 uiWrite,
+						 uiBytesToCopy));
+				/* Bail out as soon as we hit an error */
+				break;
+			}
+		}
+		else
+		{
+			/* Ignore writes to invalid pages */
+			uiWrite = uiBytesToCopy;
+		}
+		uiLogicalOffset += uiWrite;
+		uiBytesCopied += uiWrite;
+	}
+
+	*puiNumBytes = uiBytesCopied;
+    return eError;
+}
+
+PVRSRV_ERROR
+PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData)
+{
+	_PMRAssert(psPMR);
+	if (psPMR->psFuncTab->pfnMMap)
+	{
+		return psPMR->psFuncTab->pfnMMap(psPMR->pvFlavourData, psPMR, pOSMMapData);
+	}
+
+	return OSMMapPMRGeneric(psPMR, pOSMMapData);
+}
+
+void
+PMRRefPMR(PMR *psPMR)
+{
+	_PMRAssert(psPMR);
+	_Ref(psPMR);
+}
+
+PVRSRV_ERROR
+PMRUnrefPMR(PMR *psPMR)
+{
+	_PMRAssert(psPMR);
+	_UnrefAndMaybeDestroy(psPMR);
+    return PVRSRV_OK;
+}
+
+extern PVRSRV_ERROR
+PMRUnrefUnlockPMR(PMR *psPMR)
+{
+	PMRUnlockSysPhysAddresses(psPMR);
+
+	PMRUnrefPMR(psPMR);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_DEVICE_NODE *
+PMR_DeviceNode(const PMR *psPMR)
+{
+	_PMRAssert(psPMR);
+
+    return psPMR->psDevNode;
+}
+
+PMR_FLAGS_T
+PMR_Flags(const PMR *psPMR)
+{
+	_PMRAssert(psPMR);
+
+    return psPMR->uiFlags;
+}
+
+IMG_BOOL
+PMR_IsSparse(const PMR *psPMR)
+{
+	_PMRAssert(psPMR);
+
+    return _PMRIsSparse(psPMR);
+
+}
+
+PVRSRV_ERROR
+PMR_LogicalSize(const PMR *psPMR,
+				IMG_DEVMEM_SIZE_T *puiLogicalSize)
+{
+	_PMRAssert(psPMR);
+
+    *puiLogicalSize = psPMR->uiLogicalSize;
+    return PVRSRV_OK;
+}
+
+PHYS_HEAP *
+PMR_PhysHeap(const PMR *psPMR)
+{
+	_PMRAssert(psPMR);
+	return psPMR->psPhysHeap;
+}
+
+PVRSRV_ERROR
+PMR_IsOffsetValid(const PMR *psPMR,
+				IMG_UINT32 ui32Log2PageSize,
+				IMG_UINT32 ui32NumOfPages,
+				IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+				IMG_BOOL *pbValid)
+{
+	IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_UINT32 aui32BytesRemain[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset;
+	IMG_UINT32 *pui32BytesRemain = aui32BytesRemain;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	_PMRAssert(psPMR);
+	PVR_ASSERT(psPMR->uiLogicalSize >= uiLogicalOffset);
+
+	if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+	{
+		puiPhysicalOffset = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEVMEM_OFFSET_T));
+		if (puiPhysicalOffset == NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+
+		pui32BytesRemain = OSAllocMem(ui32NumOfPages * sizeof(IMG_UINT32));
+		if (pui32BytesRemain == NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+	}
+
+	_PMRLogicalOffsetToPhysicalOffset(psPMR,
+									ui32Log2PageSize,
+									ui32NumOfPages,
+									uiLogicalOffset,
+									puiPhysicalOffset,
+									pui32BytesRemain,
+									pbValid);
+
+e0:
+	if (puiPhysicalOffset != auiPhysicalOffset && puiPhysicalOffset != NULL)
+	{
+		OSFreeMem(puiPhysicalOffset);
+	}
+
+	if (pui32BytesRemain != aui32BytesRemain && pui32BytesRemain != NULL)
+	{
+		OSFreeMem(pui32BytesRemain);
+	}
+
+	return eError;
+}
+
+PMR_MAPPING_TABLE *
+PMR_GetMappigTable(const PMR *psPMR)
+{
+	_PMRAssert(psPMR);
+	return psPMR->psMappingTable;
+
+}
+
+IMG_UINT32
+PMR_GetLog2Contiguity(const PMR *psPMR)
+{
+	_PMRAssert(psPMR);
+	return psPMR->uiLog2ContiguityGuarantee;
+}
+
+PMR_IMPL_TYPE
+PMR_GetType(const PMR *psPMR)
+{
+	_PMRAssert(psPMR);
+	return psPMR->eFlavour;
+}
+
+/* must have called PMRLockSysPhysAddresses() before calling this! */
+PVRSRV_ERROR
+PMR_DevPhysAddr(const PMR *psPMR,
+				IMG_UINT32 ui32Log2PageSize,
+				IMG_UINT32 ui32NumOfPages,
+				IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+				IMG_DEV_PHYADDR *psDevAddrPtr,
+				IMG_BOOL *pbValid)
+{
+	IMG_UINT32 ui32Remain;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset;
+
+	_PMRAssert(psPMR);
+	PVR_ASSERT(ui32NumOfPages > 0);
+	PVR_ASSERT(psPMR->psFuncTab->pfnDevPhysAddr != NULL);
+
+#ifdef PVRSRV_NEED_PVR_ASSERT
+	PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) > 0);
+#endif
+
+	if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+	{
+		puiPhysicalOffset = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEVMEM_OFFSET_T));
+		if (puiPhysicalOffset == NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+	}
+
+	_PMRLogicalOffsetToPhysicalOffset(psPMR,
+									 ui32Log2PageSize,
+									 ui32NumOfPages,
+									 uiLogicalOffset,
+									 puiPhysicalOffset,
+									 &ui32Remain,
+									 pbValid);
+	if (*pbValid || _PMRIsSparse(psPMR))
+	{
+		/* Sparse PMR may not always have the first page valid */
+		eError = psPMR->psFuncTab->pfnDevPhysAddr(psPMR->pvFlavourData,
+												  ui32Log2PageSize,
+												  ui32NumOfPages,
+												  puiPhysicalOffset,
+												  pbValid,
+												  psDevAddrPtr);
+#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES)
+    /* Currently excluded from the default build because of performance concerns.
+     * We do not need this part in all systems because the GPU has the same address view of system RAM as the CPU.
+     * Alternatively this could be implemented as part of the PMR-factories directly */
+
+		if (PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_UMA ||
+		    PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_DMA)
+		{
+			IMG_UINT32 i;
+			IMG_DEV_PHYADDR sDevPAddrCorrected;
+
+			/* Copy the translated addresses to the correct array */
+			for (i = 0; i < ui32NumOfPages; i++)
+			{
+				PhysHeapCpuPAddrToDevPAddr(psPMR->psPhysHeap,
+										   1,
+										   &sDevPAddrCorrected,
+										   (IMG_CPU_PHYADDR *) &psDevAddrPtr[i]);
+				psDevAddrPtr[i].uiAddr = sDevPAddrCorrected.uiAddr;
+			}
+
+		}
+#endif
+	}
+
+	if (puiPhysicalOffset != auiPhysicalOffset)
+	{
+		OSFreeMem(puiPhysicalOffset);
+	}
+
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+
+    return PVRSRV_OK;
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR
+PMR_CpuPhysAddr(const PMR *psPMR,
+                IMG_UINT32 ui32Log2PageSize,
+                IMG_UINT32 ui32NumOfPages,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_CPU_PHYADDR *psCpuAddrPtr,
+                IMG_BOOL *pbValid)
+{
+    PVRSRV_ERROR eError;
+	IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_DEV_PHYADDR *psDevPAddr = asDevPAddr;
+
+	_PMRAssert(psPMR);
+    if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+    {
+    	psDevPAddr = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR));
+		if (psDevPAddr == NULL)
+    	{
+    		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+    		goto e0;
+    	}
+    }
+
+    eError = PMR_DevPhysAddr(psPMR, ui32Log2PageSize, ui32NumOfPages,
+							 uiLogicalOffset, psDevPAddr, pbValid);
+    if (eError != PVRSRV_OK)
+    {
+        goto e1;
+    }
+	PhysHeapDevPAddrToCpuPAddr(psPMR->psPhysHeap, ui32NumOfPages, psCpuAddrPtr, psDevPAddr);
+
+	if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+	{
+		OSFreeMem(psDevPAddr);
+	}
+
+    return PVRSRV_OK;
+e1:
+	if (psDevPAddr != asDevPAddr)
+	{
+		OSFreeMem(psDevPAddr);
+	}
+e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR,
+                                 IMG_UINT32 ui32AllocPageCount,
+                                 IMG_UINT32 *pai32AllocIndices,
+                                 IMG_UINT32 ui32FreePageCount,
+                                 IMG_UINT32 *pai32FreeIndices,
+                                 IMG_UINT32 uiFlags)
+{
+	PVRSRV_ERROR eError;
+
+	_PMRAssert(psPMR);
+	if (NULL == psPMR->psFuncTab->pfnChangeSparseMem)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "%s: This type of sparse PMR cannot be changed.",
+		         __func__));
+		return PVRSRV_ERROR_NOT_IMPLEMENTED;
+	}
+
+	eError = psPMR->psFuncTab->pfnChangeSparseMem(psPMR->pvFlavourData,
+	                                              psPMR,
+	                                              ui32AllocPageCount,
+	                                              pai32AllocIndices,
+	                                              ui32FreePageCount,
+	                                              pai32FreeIndices,
+	                                              uiFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+#if defined(PDUMP)
+	{
+		IMG_BOOL bInitialise = IMG_FALSE;
+		IMG_UINT32 ui32InitValue = 0;
+
+		if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags))
+		{
+			bInitialise = IMG_TRUE;
+		}
+		else if (PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags))
+		{
+			ui32InitValue = 0xDEADBEEF;
+			bInitialise = IMG_TRUE;
+		}
+
+		PDumpPMRChangeSparsePMR(psPMR,
+		                        1 << psPMR->uiLog2ContiguityGuarantee,
+		                        ui32AllocPageCount,
+		                        pai32AllocIndices,
+		                        ui32FreePageCount,
+		                        pai32FreeIndices,
+		                        bInitialise,
+		                        ui32InitValue,
+		                        &psPMR->hPDumpAllocHandle);
+	}
+
+#endif
+
+e0:
+	return eError;
+}
+
+
+PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR,
+                                       IMG_UINT64 sCpuVAddrBase,
+                                       IMG_UINT32 ui32AllocPageCount,
+                                       IMG_UINT32 *pai32AllocIndices,
+                                       IMG_UINT32 ui32FreePageCount,
+                                       IMG_UINT32 *pai32FreeIndices)
+{
+	PVRSRV_ERROR eError;
+
+	_PMRAssert(psPMR);
+	if ((NULL == psPMR->psFuncTab) ||
+	    (NULL == psPMR->psFuncTab->pfnChangeSparseMemCPUMap))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		        "%s: This type of sparse PMR cannot be changed.",
+		         __func__));
+		return PVRSRV_ERROR_NOT_IMPLEMENTED;
+	}
+
+	eError = psPMR->psFuncTab->pfnChangeSparseMemCPUMap(psPMR->pvFlavourData,
+	                                                    psPMR,
+	                                                    sCpuVAddrBase,
+	                                                    ui32AllocPageCount,
+	                                                    pai32AllocIndices,
+	                                                    ui32FreePageCount,
+	                                                    pai32FreeIndices);
+
+	return eError;
+}
+
+
+
+#if defined(PDUMP)
+
+static PVRSRV_ERROR
+_PMR_PDumpSymbolicAddrPhysical(const PMR *psPMR,
+                               IMG_DEVMEM_OFFSET_T uiPhysicalOffset,
+                               IMG_UINT32 ui32MemspaceNameLen,
+                               IMG_CHAR *pszMemspaceName,
+                               IMG_UINT32 ui32SymbolicAddrLen,
+                               IMG_CHAR *pszSymbolicAddr,
+                               IMG_DEVMEM_OFFSET_T *puiNewOffset,
+                               IMG_DEVMEM_OFFSET_T *puiNextSymName)
+{
+	if (DevmemCPUCacheCoherency(psPMR->psDevNode, psPMR->uiFlags) ||
+		DevmemDeviceCacheCoherency(psPMR->psDevNode, psPMR->uiFlags))
+	{
+		OSSNPrintf(pszMemspaceName,
+		           ui32MemspaceNameLen,
+		           PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC,
+		           psPMR->pszPDumpDefaultMemspaceName);
+	}
+	else
+	{
+		OSSNPrintf(pszMemspaceName, ui32MemspaceNameLen, PMR_MEMSPACE_FMTSPEC,
+		           psPMR->pszPDumpDefaultMemspaceName);
+	}
+
+	OSSNPrintf(pszSymbolicAddr,
+	           ui32SymbolicAddrLen,
+	           PMR_SYMBOLICADDR_FMTSPEC,
+	           PMR_DEFAULT_PREFIX,
+	           psPMR->uiSerialNum,
+	           uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR),
+	           psPMR->pszAnnotation ? psPMR->pszAnnotation : "");
+		PDumpMakeStringValid(pszSymbolicAddr, OSStringLength(pszSymbolicAddr));
+
+
+	*puiNewOffset = uiPhysicalOffset & ((1 << PMR_GetLog2Contiguity(psPMR))-1);
+	*puiNextSymName = (IMG_DEVMEM_OFFSET_T) (((uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR))+1)
+	                                          << PMR_GetLog2Contiguity(psPMR));
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMR_PDumpSymbolicAddr(const PMR *psPMR,
+                      IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                      IMG_UINT32 ui32MemspaceNameLen,
+                      IMG_CHAR *pszMemspaceName,
+                      IMG_UINT32 ui32SymbolicAddrLen,
+                      IMG_CHAR *pszSymbolicAddr,
+                      IMG_DEVMEM_OFFSET_T *puiNewOffset,
+                      IMG_DEVMEM_OFFSET_T *puiNextSymName
+                      )
+{
+    IMG_DEVMEM_OFFSET_T uiPhysicalOffset;
+    IMG_UINT32 ui32Remain;
+    IMG_BOOL bValid;
+
+    PVR_ASSERT(uiLogicalOffset < psPMR->uiLogicalSize);
+
+    _PMRLogicalOffsetToPhysicalOffset(psPMR,
+								      0,
+								      1,
+								      uiLogicalOffset,
+								      &uiPhysicalOffset,
+								      &ui32Remain,
+								      &bValid);
+
+	if (!bValid)
+	{
+		/* We should never be asked a symbolic address of an invalid chunk */
+		PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "Invalid chunk (PVRSRV_ERROR_PMR_INVALID_CHUNK)?, May be sparse memory");
+		/*	For sparse allocations, for a given logical address, there may not be a
+		 *	physical memory backing, the virtual range can still be valid.
+		 */
+		uiPhysicalOffset = uiLogicalOffset;
+	}
+
+	return _PMR_PDumpSymbolicAddrPhysical(psPMR,
+										  uiPhysicalOffset,
+										  ui32MemspaceNameLen,
+										  pszMemspaceName,
+										  ui32SymbolicAddrLen,
+										  pszSymbolicAddr,
+										  puiNewOffset,
+										  puiNextSymName);
+}
+
+/*!
+ * @brief Writes a WRW command to the script2 buffer, representing a
+ * 		  dword write to a physical allocation. Size is always
+ * 		  sizeof(IMG_UINT32).
+ * @param psPMR - PMR object representing allocation
+ * @param uiLogicalOffset - offset
+ * @param ui32Value - value to write
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMemValue32(PMR *psPMR,
+			         IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT32 ui32Value,
+                     PDUMP_FLAGS_T uiPDumpFlags)
+{
+    PVRSRV_ERROR eError;
+    IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+    IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+    IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+    IMG_DEVMEM_OFFSET_T uiNextSymName;
+    IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee;
+
+    PVR_ASSERT(uiLogicalOffset + sizeof(ui32Value) <= psPMR->uiLogicalSize);
+    /* Especially make sure to not cross a block boundary */
+    PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value))
+               <= uiPMRPageSize));
+
+    eError = PMRLockSysPhysAddresses(psPMR);
+    PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Get the symbolic address of the PMR */
+	eError = PMR_PDumpSymbolicAddr(psPMR,
+								   uiLogicalOffset,
+								   sizeof(aszMemspaceName),
+								   &aszMemspaceName[0],
+								   sizeof(aszSymbolicName),
+								   &aszSymbolicName[0],
+								   &uiPDumpSymbolicOffset,
+				                   &uiNextSymName);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Write the WRW script command */
+	eError = PDumpPMRWRW32(aszMemspaceName,
+						 aszSymbolicName,
+						 uiPDumpSymbolicOffset,
+						 ui32Value,
+						 uiPDumpFlags);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+    eError = PMRUnlockSysPhysAddresses(psPMR);
+    PVR_ASSERT(eError == PVRSRV_OK);
+
+    return PVRSRV_OK;
+}
+
+/*!
+ * @brief Writes a RDW followed by a WRW command to the pdump script to perform
+ *        an effective copy from memory to memory. Memory copied is of size
+ *        sizeof (IMG_UINT32)
+ *
+ * @param psDstPMR - PMR object representing allocation of destination
+ * @param uiDstLogicalOffset - destination offset
+ * @param psSrcPMR - PMR object representing allocation of source
+ * @param uiSrcLogicalOffset - source offset
+ * @param pszTmpVar - pdump temporary variable used during the copy
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpCopyMem32(PMR *psDstPMR,
+                  IMG_DEVMEM_OFFSET_T uiDstLogicalOffset,
+                  PMR *psSrcPMR,
+                  IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset,
+                  const IMG_CHAR *pszTmpVar,
+                  PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+	const IMG_UINT32 uiDstPMRPageSize = 1 << psDstPMR->uiLog2ContiguityGuarantee;
+	const IMG_UINT32 uiSrcPMRPageSize = 1 << psSrcPMR->uiLog2ContiguityGuarantee;
+
+	PVR_ASSERT(uiSrcLogicalOffset + sizeof(IMG_UINT32) <= psSrcPMR->uiLogicalSize);
+	/* Especially make sure to not cross a block boundary */
+	PVR_ASSERT(( ((uiSrcLogicalOffset & (uiSrcPMRPageSize-1)) + sizeof(IMG_UINT32))
+	             <= uiSrcPMRPageSize));
+
+	PVR_ASSERT(uiDstLogicalOffset + sizeof(IMG_UINT32) <= psDstPMR->uiLogicalSize);
+	/* Especially make sure to not cross a block boundary */
+	PVR_ASSERT(( ((uiDstLogicalOffset & (uiDstPMRPageSize-1)) + sizeof(IMG_UINT32))
+	             <= uiDstPMRPageSize));
+
+
+	eError = PMRLockSysPhysAddresses(psSrcPMR);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Get the symbolic address of the source PMR */
+	eError = PMR_PDumpSymbolicAddr(psSrcPMR,
+	                               uiSrcLogicalOffset,
+	                               sizeof(aszMemspaceName),
+	                               &aszMemspaceName[0],
+	                               sizeof(aszSymbolicName),
+	                               &aszSymbolicName[0],
+	                               &uiPDumpSymbolicOffset,
+	                               &uiNextSymName);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Issue PDump read command */
+	eError = PDumpPMRRDW32MemToInternalVar(pszTmpVar,
+	                                       aszMemspaceName,
+	                                       aszSymbolicName,
+	                                       uiPDumpSymbolicOffset,
+	                                       uiPDumpFlags);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+    eError = PMRUnlockSysPhysAddresses(psSrcPMR);
+    PVR_ASSERT(eError == PVRSRV_OK);
+
+
+
+	eError = PMRLockSysPhysAddresses(psDstPMR);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+
+	/* Get the symbolic address of the destination PMR */
+	eError = PMR_PDumpSymbolicAddr(psDstPMR,
+	                               uiDstLogicalOffset,
+	                               sizeof(aszMemspaceName),
+	                               &aszMemspaceName[0],
+	                               sizeof(aszSymbolicName),
+	                               &aszSymbolicName[0],
+	                               &uiPDumpSymbolicOffset,
+	                               &uiNextSymName);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+
+	/* Write the WRW script command */
+	eError = PDumpPMRWRW32InternalVarToMem(aszMemspaceName,
+	                                       aszSymbolicName,
+	                                       uiPDumpSymbolicOffset,
+	                                       pszTmpVar,
+	                                       uiPDumpFlags);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+
+    eError = PMRUnlockSysPhysAddresses(psDstPMR);
+    PVR_ASSERT(eError == PVRSRV_OK);
+
+    return PVRSRV_OK;
+}
+
+/*!
+ * @brief Writes a WRW64 command to the script2 buffer, representing a
+ * 		  dword write to a physical allocation. Size is always
+ * 		  sizeof(IMG_UINT64).
+ * @param psPMR - PMR object representing allocation
+ * @param uiLogicalOffset - offset
+ * @param ui64Value - value to write
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMemValue64(PMR *psPMR,
+			         IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT64 ui64Value,
+                     PDUMP_FLAGS_T uiPDumpFlags)
+{
+    PVRSRV_ERROR eError;
+    IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+    IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+    IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+    IMG_DEVMEM_OFFSET_T uiNextSymName;
+    IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee;
+
+
+    PVR_ASSERT(uiLogicalOffset + sizeof(ui64Value) <= psPMR->uiLogicalSize);
+    /* Especially make sure to not cross a block boundary */
+    /* Especially make sure to not cross a block boundary */
+    PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui64Value))
+               <= uiPMRPageSize));
+
+    eError = PMRLockSysPhysAddresses(psPMR);
+    PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Get the symbolic address of the PMR */
+	eError = PMR_PDumpSymbolicAddr(psPMR,
+								   uiLogicalOffset,
+								   sizeof(aszMemspaceName),
+								   &aszMemspaceName[0],
+								   sizeof(aszSymbolicName),
+								   &aszSymbolicName[0],
+								   &uiPDumpSymbolicOffset,
+				                   &uiNextSymName);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Write the WRW script command */
+	eError = PDumpPMRWRW64(aszMemspaceName,
+						 aszSymbolicName,
+						 uiPDumpSymbolicOffset,
+						 ui64Value,
+						 uiPDumpFlags);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+    eError = PMRUnlockSysPhysAddresses(psPMR);
+    PVR_ASSERT(eError == PVRSRV_OK);
+
+    return PVRSRV_OK;
+}
+
+/*!
+ * @brief Writes a RDW64 followed by a WRW64 command to the pdump script to
+ *        perform an effective copy from memory to memory. Memory copied is of
+ *        size sizeof (IMG_UINT32)
+ *
+ * @param psDstPMR - PMR object representing allocation of destination
+ * @param uiDstLogicalOffset - destination offset
+ * @param psSrcPMR - PMR object representing allocation of source
+ * @param uiSrcLogicalOffset - source offset
+ * @param pszTmpVar - pdump temporary variable used during the copy
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpCopyMem64(PMR *psDstPMR,
+                  IMG_DEVMEM_OFFSET_T uiDstLogicalOffset,
+                  PMR *psSrcPMR,
+                  IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset,
+                  const IMG_CHAR *pszTmpVar,
+                  PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+	const IMG_UINT32 uiDstPMRPageSize = 1 << psDstPMR->uiLog2ContiguityGuarantee;
+	const IMG_UINT32 uiSrcPMRPageSize = 1 << psSrcPMR->uiLog2ContiguityGuarantee;
+
+	PVR_ASSERT(uiSrcLogicalOffset + sizeof(IMG_UINT32) <= psSrcPMR->uiLogicalSize);
+	/* Especially make sure to not cross a block boundary */
+	PVR_ASSERT(( ((uiSrcLogicalOffset & (uiSrcPMRPageSize-1)) + sizeof(IMG_UINT32))
+	             <= uiSrcPMRPageSize));
+
+	PVR_ASSERT(uiDstLogicalOffset + sizeof(IMG_UINT32) <= psDstPMR->uiLogicalSize);
+	/* Especially make sure to not cross a block boundary */
+	PVR_ASSERT(( ((uiDstLogicalOffset & (uiDstPMRPageSize-1)) + sizeof(IMG_UINT32))
+	             <= uiDstPMRPageSize));
+
+
+	eError = PMRLockSysPhysAddresses(psSrcPMR);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Get the symbolic address of the source PMR */
+	eError = PMR_PDumpSymbolicAddr(psSrcPMR,
+	                               uiSrcLogicalOffset,
+	                               sizeof(aszMemspaceName),
+	                               &aszMemspaceName[0],
+	                               sizeof(aszSymbolicName),
+	                               &aszSymbolicName[0],
+	                               &uiPDumpSymbolicOffset,
+	                               &uiNextSymName);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Issue PDump read command */
+	eError = PDumpPMRRDW64MemToInternalVar(pszTmpVar,
+	                                       aszMemspaceName,
+	                                       aszSymbolicName,
+	                                       uiPDumpSymbolicOffset,
+	                                       uiPDumpFlags);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+    eError = PMRUnlockSysPhysAddresses(psSrcPMR);
+    PVR_ASSERT(eError == PVRSRV_OK);
+
+
+
+	eError = PMRLockSysPhysAddresses(psDstPMR);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+
+	/* Get the symbolic address of the destination PMR */
+	eError = PMR_PDumpSymbolicAddr(psDstPMR,
+	                               uiDstLogicalOffset,
+	                               sizeof(aszMemspaceName),
+	                               &aszMemspaceName[0],
+	                               sizeof(aszSymbolicName),
+	                               &aszSymbolicName[0],
+	                               &uiPDumpSymbolicOffset,
+	                               &uiNextSymName);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+
+	/* Write the WRW script command */
+	eError = PDumpPMRWRW64InternalVarToMem(aszMemspaceName,
+	                                       aszSymbolicName,
+	                                       uiPDumpSymbolicOffset,
+	                                       pszTmpVar,
+	                                       uiPDumpFlags);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+
+    eError = PMRUnlockSysPhysAddresses(psDstPMR);
+    PVR_ASSERT(eError == PVRSRV_OK);
+
+    return PVRSRV_OK;
+}
+
+/*!
+ * @brief PDumps the contents of the given allocation.
+ * If bZero is IMG_TRUE then the zero page in the parameter stream is used
+ * as the source of data, rather than the allocation's actual backing.
+ * @param psPMR - PMR object representing allocation
+ * @param uiLogicalOffset - Offset to write at
+ * @param uiSize - Number of bytes to write
+ * @param uiPDumpFlags - PDump flags
+ * @param bZero - Use the PDump zero page as the source
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMem(PMR *psPMR,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_DEVMEM_SIZE_T uiSize,
+                PDUMP_FLAGS_T uiPDumpFlags,
+                IMG_BOOL bZero)
+{
+	PVRSRV_ERROR eError;
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiOutOffset;
+	IMG_DEVMEM_OFFSET_T uiCurrentOffset = uiLogicalOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName = 0;
+	const IMG_CHAR *pszParamStreamFileName;
+	PDUMP_FILEOFFSET_T uiParamStreamFileOffset;
+
+	/* required when !bZero */
+	#define PMR_MAX_PDUMP_BUFSZ (1<<14)
+	IMG_CHAR aszParamStreamFilename[PDUMP_PARAM_MAX_FILE_NAME];
+	IMG_UINT8 *pcBuffer = NULL;
+	size_t uiBufSz;
+	size_t uiNumBytes;
+	IMG_BOOL bValid;
+
+	PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize);
+
+	/* Get the correct PDump stream file name */
+	if (bZero)
+	{
+		PDumpCommentWithFlags(uiPDumpFlags,
+		                      "Zeroing allocation (%llu bytes)",
+		                      (unsigned long long) uiSize);
+
+		/* get the zero page information. it is constant for this function */
+		PDumpGetParameterZeroPageInfo(&uiParamStreamFileOffset,
+		                              &uiBufSz,
+		                              &pszParamStreamFileName);
+	}
+	else
+	{
+
+		uiBufSz = 1 << PMR_GetLog2Contiguity(psPMR);
+		PVR_ASSERT((1 << PMR_GetLog2Contiguity(psPMR)) <= PMR_MAX_PDUMP_BUFSZ);
+
+		pcBuffer = OSAllocMem(uiBufSz);
+
+		PVR_LOGR_IF_NOMEM(pcBuffer, "OSAllocMem");
+
+		eError = PMRLockSysPhysAddresses(psPMR);
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		pszParamStreamFileName = aszParamStreamFilename;
+	}
+
+	/* Loop over all touched symbolic addresses of the PMR and
+	 * emit LDBs to load the contents. */
+	while (uiCurrentOffset < (uiLogicalOffset + uiSize))
+	{
+		/* Get the correct symbolic name for the current offset */
+		eError = PMR_PDumpSymbolicAddr(psPMR,
+		                               uiCurrentOffset,
+		                               sizeof(aszMemspaceName),
+		                               &aszMemspaceName[0],
+		                               sizeof(aszSymbolicName),
+		                               &aszSymbolicName[0],
+		                               &uiOutOffset,
+		                               &uiNextSymName);
+		PVR_ASSERT(eError == PVRSRV_OK);
+		PVR_ASSERT((uiNextSymName - uiCurrentOffset) <= uiBufSz);
+
+		PMR_IsOffsetValid(psPMR,
+		                  0,
+		                  1,
+		                  uiCurrentOffset,
+		                  &bValid);
+
+		/* Either just LDB the zeros or read from the PMR and store that
+		 * in the pdump stream */
+		if (bValid)
+		{
+			if (bZero)
+			{
+				uiNumBytes = MIN(uiSize, uiNextSymName - uiCurrentOffset);
+			}
+			else
+			{
+				IMG_DEVMEM_OFFSET_T uiReadOffset;
+				uiReadOffset = ((uiNextSymName > (uiLogicalOffset + uiSize)) ?
+				                 uiLogicalOffset + uiSize - uiCurrentOffset :
+				                 uiNextSymName - uiCurrentOffset);
+
+				eError = PMR_ReadBytes(psPMR,
+				                       uiCurrentOffset,
+				                       pcBuffer,
+				                       uiReadOffset,
+				                       &uiNumBytes);
+				PVR_ASSERT(eError == PVRSRV_OK);
+
+				eError = PDumpWriteBuffer(pcBuffer,
+				                          uiNumBytes,
+				                          uiPDumpFlags,
+				                          &aszParamStreamFilename[0],
+				                          sizeof(aszParamStreamFilename),
+				                          &uiParamStreamFileOffset);
+				if (eError == PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+				{
+					/* Write to parameter file prevented under the flags and
+					 * current state of the driver so skip further writes.
+					 */
+					eError = PVRSRV_OK;
+				}
+				else if (eError != PVRSRV_OK)
+				{
+					PDUMP_ERROR(eError, "Failed to write PMR memory to parameter file");
+				}
+			}
+
+			/* Emit the LDB command to the current symbolic address*/
+			eError = PDumpPMRLDB(aszMemspaceName,
+			                     aszSymbolicName,
+			                     uiOutOffset,
+			                     uiNumBytes,
+			                     pszParamStreamFileName,
+			                     uiParamStreamFileOffset,
+			                     uiPDumpFlags);
+		}
+		uiCurrentOffset = uiNextSymName;
+	}
+
+	if (!bZero)
+	{
+		eError = PMRUnlockSysPhysAddresses(psPMR);
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		OSFreeMem(pcBuffer);
+	}
+
+	return PVRSRV_OK;
+}
+
+
+
+PVRSRV_ERROR
+PMRPDumpSaveToFile(const PMR *psPMR,
+                   IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   IMG_UINT32 uiArraySize,
+                   const IMG_CHAR *pszFilename,
+                   IMG_UINT32 uiFileOffset)
+{
+	PVRSRV_ERROR eError;
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiOutOffset;
+	IMG_DEVMEM_OFFSET_T uiCurrentOffset = uiLogicalOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName = 0;
+
+	PVR_UNREFERENCED_PARAMETER(uiArraySize);
+
+	PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize);
+
+	while (uiCurrentOffset < (uiLogicalOffset + uiSize))
+	{
+		IMG_DEVMEM_OFFSET_T uiReadOffset;
+
+		eError = PMR_PDumpSymbolicAddr(psPMR,
+		                               uiCurrentOffset,
+		                               sizeof(aszMemspaceName),
+		                               &aszMemspaceName[0],
+		                               sizeof(aszSymbolicName),
+		                               &aszSymbolicName[0],
+		                               &uiOutOffset,
+		                               &uiNextSymName);
+		PVR_ASSERT(eError == PVRSRV_OK);
+		PVR_ASSERT(uiNextSymName <= psPMR->uiLogicalSize);
+
+		uiReadOffset = ((uiNextSymName > (uiLogicalOffset + uiSize)) ?
+		                 uiLogicalOffset + uiSize - uiCurrentOffset :
+		                 uiNextSymName - uiCurrentOffset);
+
+		eError = PDumpPMRSAB(aszMemspaceName,
+		                     aszSymbolicName,
+		                     uiOutOffset,
+		                     uiReadOffset,
+		                     pszFilename,
+		                     uiFileOffset);
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		uiCurrentOffset = uiNextSymName;
+	}
+
+	return PVRSRV_OK;
+}
+
+extern PVRSRV_ERROR
+PMRPDumpPol32(const PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT32 ui32Value,
+              IMG_UINT32 ui32Mask,
+              PDUMP_POLL_OPERATOR eOperator,
+              PDUMP_FLAGS_T uiPDumpFlags)
+{
+    PVRSRV_ERROR eError;
+    IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+    IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+    IMG_DEVMEM_OFFSET_T uiPDumpOffset;
+    IMG_DEVMEM_OFFSET_T uiNextSymName;
+    IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee;
+
+    /* Make sure to not cross a block boundary */
+    PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value))
+               < uiPMRPageSize));
+
+    eError = PMR_PDumpSymbolicAddr(psPMR,
+                                   uiLogicalOffset,
+                                   sizeof(aszMemspaceName),
+                                   &aszMemspaceName[0],
+                                   sizeof(aszSymbolicName),
+                                   &aszSymbolicName[0],
+                                   &uiPDumpOffset,
+                                   &uiNextSymName);
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+
+#define _MEMPOLL_DELAY		(1000)
+#define _MEMPOLL_COUNT		(2000000000 / _MEMPOLL_DELAY)
+
+    eError = PDumpPMRPOL(aszMemspaceName,
+                         aszSymbolicName,
+                         uiPDumpOffset,
+                         ui32Value,
+                         ui32Mask,
+                         eOperator,
+                         _MEMPOLL_COUNT,
+                         _MEMPOLL_DELAY,
+                         uiPDumpFlags);
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR
+PMRPDumpCBP(const PMR *psPMR,
+            IMG_DEVMEM_OFFSET_T uiReadOffset,
+            IMG_DEVMEM_OFFSET_T uiWriteOffset,
+            IMG_DEVMEM_SIZE_T uiPacketSize,
+            IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+    PVRSRV_ERROR eError;
+    IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+    IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+    IMG_DEVMEM_OFFSET_T uiPDumpOffset;
+    IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+    eError = PMR_PDumpSymbolicAddr(psPMR,
+                                   uiReadOffset,
+                                   sizeof(aszMemspaceName),
+                                   &aszMemspaceName[0],
+                                   sizeof(aszSymbolicName),
+                                   &aszSymbolicName[0],
+                                   &uiPDumpOffset,
+                                   &uiNextSymName);
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+
+    eError = PDumpPMRCBP(aszMemspaceName,
+                         aszSymbolicName,
+                         uiPDumpOffset,
+                         uiWriteOffset,
+                         uiPacketSize,
+                         uiBufferSize);
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+IMG_INTERNAL void
+PDumpPMRChangeSparsePMR(PMR *psPMR,
+                        IMG_UINT32 uiBlockSize,
+                        IMG_UINT32 ui32AllocPageCount,
+                        IMG_UINT32 *pai32AllocIndices,
+                        IMG_UINT32 ui32FreePageCount,
+                        IMG_UINT32 *pai32FreeIndices,
+                        IMG_BOOL bInitialise,
+                        IMG_UINT32 ui32InitValue,
+                        IMG_HANDLE *phPDumpAllocInfoOut)
+{
+	PVRSRV_ERROR eError;
+	IMG_HANDLE *phPDumpAllocInfo = (IMG_HANDLE*) psPMR->hPDumpAllocHandle;
+
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+	IMG_UINT32 i, uiIndex;
+
+	/* Remove pages from the PMR */
+	for (i = 0; i < ui32FreePageCount; i++)
+	{
+		uiIndex = pai32FreeIndices[i];
+
+		eError = PDumpFree(phPDumpAllocInfo[uiIndex]);
+		PVR_ASSERT(eError == PVRSRV_OK);
+		phPDumpAllocInfo[uiIndex] = NULL;
+	}
+
+	/* Add new pages to the PMR */
+	for (i = 0; i < ui32AllocPageCount; i++)
+	{
+		uiIndex = pai32AllocIndices[i];
+
+		PVR_ASSERT(phPDumpAllocInfo[uiIndex] == NULL);
+
+		eError = PMR_PDumpSymbolicAddr(psPMR,
+		                               uiIndex * uiBlockSize,
+		                               sizeof(aszMemspaceName),
+		                               &aszMemspaceName[0],
+		                               sizeof(aszSymbolicName),
+		                               &aszSymbolicName[0],
+		                               &uiOffset,
+		                               &uiNextSymName);
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		eError = PDumpMalloc(aszMemspaceName,
+		                     aszSymbolicName,
+		                     uiBlockSize,
+		                     uiBlockSize,
+		                     bInitialise,
+		                     ui32InitValue,
+		                     &phPDumpAllocInfo[uiIndex],
+		                     PDUMP_NONE);
+		PVR_ASSERT(eError == PVRSRV_OK);
+	}
+
+	/* (IMG_HANDLE) <- (IMG_HANDLE*) */
+	*phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo;
+}
+
+IMG_INTERNAL void
+PDumpPMRFreePMR(PMR *psPMR,
+                IMG_DEVMEM_SIZE_T uiSize,
+                IMG_DEVMEM_ALIGN_T uiBlockSize,
+                IMG_UINT32 uiLog2Contiguity,
+                IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 i;
+
+	/* (IMG_HANDLE*) <- (IMG_HANDLE) */
+	IMG_HANDLE *ahPDumpAllocHandleArray = (IMG_HANDLE*) hPDumpAllocationInfoHandle;
+
+	for (i = 0; i < psPMR->uiNumPDumpBlocks; i++)
+	{
+		if (ahPDumpAllocHandleArray[i] != NULL)
+		{
+			eError = PDumpFree(ahPDumpAllocHandleArray[i]);
+			PVR_ASSERT(eError == PVRSRV_OK);
+			ahPDumpAllocHandleArray[i] = NULL;
+		}
+	}
+
+	OSFreeMem(ahPDumpAllocHandleArray);
+}
+
+
+IMG_INTERNAL void
+PDumpPMRMallocPMR(PMR *psPMR,
+                  IMG_DEVMEM_SIZE_T uiSize,
+                  IMG_DEVMEM_ALIGN_T uiBlockSize,
+                  IMG_UINT32 ui32ChunkSize,
+                  IMG_UINT32 ui32NumPhysChunks,
+                  IMG_UINT32 ui32NumVirtChunks,
+                  IMG_UINT32 *puiMappingTable,
+                  IMG_UINT32 uiLog2Contiguity,
+                  IMG_BOOL bInitialise,
+                  IMG_UINT32 ui32InitValue,
+                  IMG_HANDLE *phPDumpAllocInfoOut,
+                  IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	IMG_HANDLE *phPDumpAllocInfo;
+
+	IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+	IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+	IMG_UINT32 uiNumPhysBlocks;
+	IMG_UINT32 uiNumVirtBlocks;
+	IMG_UINT32 i, uiIndex;
+
+
+	if (PMR_IsSparse(psPMR))
+	{
+		uiNumPhysBlocks = (ui32ChunkSize * ui32NumPhysChunks) >> uiLog2Contiguity;
+		/* Make sure we did not cut off anything */
+		PVR_ASSERT(uiNumPhysBlocks << uiLog2Contiguity == (ui32ChunkSize * ui32NumPhysChunks));
+	}
+	else
+	{
+		uiNumPhysBlocks = uiSize >> uiLog2Contiguity;
+		/* Make sure we did not cut off anything */
+		PVR_ASSERT(uiNumPhysBlocks << uiLog2Contiguity == uiSize);
+	}
+
+	uiNumVirtBlocks = uiSize >> uiLog2Contiguity;
+	PVR_ASSERT(uiNumVirtBlocks << uiLog2Contiguity == uiSize);
+
+	psPMR->uiNumPDumpBlocks = uiNumVirtBlocks;
+
+	phPDumpAllocInfo = (IMG_HANDLE*) OSAllocZMem(uiNumVirtBlocks * sizeof(IMG_HANDLE));
+
+
+	for (i = 0; i < uiNumPhysBlocks; i++)
+	{
+		uiIndex = PMR_IsSparse(psPMR) ? puiMappingTable[i] : i;
+
+		eError = PMR_PDumpSymbolicAddr(psPMR,
+		                               uiIndex * uiBlockSize,
+		                               sizeof(aszMemspaceName),
+		                               &aszMemspaceName[0],
+		                               sizeof(aszSymbolicName),
+		                               &aszSymbolicName[0],
+		                               &uiOffset,
+		                               &uiNextSymName);
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		eError = PDumpMalloc(aszMemspaceName,
+		                     aszSymbolicName,
+		                     uiBlockSize,
+		                     uiBlockSize,
+		                     bInitialise,
+		                     ui32InitValue,
+		                     &phPDumpAllocInfo[uiIndex],
+		                     ui32PDumpFlags);
+		PVR_ASSERT(eError == PVRSRV_OK);
+	}
+
+	/* (IMG_HANDLE) <- (IMG_HANDLE*) */
+	*phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo;
+
+}
+#endif	/* PDUMP */
+
+
+void *PMRGetPrivateData(const PMR *psPMR,
+                        const PMR_IMPL_FUNCTAB *psFuncTab)
+{
+	_PMRAssert(psPMR);
+    return (psFuncTab == psPMR->psFuncTab) ? psPMR->pvFlavourData : NULL;
+}
+
+#define PMR_PM_WORD_SIZE 4
+
+PVRSRV_ERROR
+PMRWritePMPageList(/* Target PMR, offset, and length */
+                   PMR *psPageListPMR,
+                   IMG_DEVMEM_OFFSET_T uiTableOffset,
+                   IMG_DEVMEM_SIZE_T  uiTableLength,
+                   /* Referenced PMR, and "page" granularity */
+                   PMR *psReferencePMR,
+                   IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize,
+                   PMR_PAGELIST **ppsPageList)
+{
+    PVRSRV_ERROR eError;
+    IMG_DEVMEM_SIZE_T uiWordSize;
+    IMG_UINT32 uiNumPages;
+    IMG_UINT32 uiPageIndex;
+    PMR_FLAGS_T uiFlags = psPageListPMR->uiFlags;
+    PMR_PAGELIST *psPageList;
+#if defined(PDUMP)
+    IMG_CHAR aszTableEntryMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+    IMG_CHAR aszTableEntrySymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+    IMG_DEVMEM_OFFSET_T uiTableEntryPDumpOffset;
+    IMG_CHAR aszPageMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+    IMG_CHAR aszPageSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+    IMG_DEVMEM_OFFSET_T uiPagePDumpOffset;
+    IMG_DEVMEM_OFFSET_T uiNextSymName;
+#endif
+#if !defined(NO_HARDWARE)
+    IMG_UINT32 uiPageListPageSize = 1 << psPageListPMR->uiLog2ContiguityGuarantee;
+    IMG_UINT64 uiPageListPMRPage = 0;
+    IMG_UINT64 uiPrevPageListPMRPage = 0;
+    IMG_HANDLE hPrivData = NULL;
+    void *pvKernAddr = NULL;
+	IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+    IMG_DEV_PHYADDR *pasDevAddrPtr;
+    IMG_UINT32 *pui32DataPtr = NULL;
+    IMG_BOOL *pbPageIsValid;
+#endif
+
+    uiWordSize = PMR_PM_WORD_SIZE;
+
+    /* check we're being asked to write the same number of 4-byte units as there are pages */
+    uiNumPages = (IMG_UINT32)(psReferencePMR->uiLogicalSize >> uiLog2PageSize);
+
+    if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psReferencePMR->uiLogicalSize)
+    {
+		/* Strictly speaking, it's possible to provoke this error in two ways:
+			(i) if it's not a whole multiple of the page size; or
+			(ii) if there are more than 4 billion pages.
+           The latter is unlikely. :)  but the check is required in order to justify the cast.
+		*/
+        eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+        goto e0;
+    }
+    uiWordSize = (IMG_UINT32)uiTableLength / uiNumPages;
+    if (uiNumPages * uiWordSize != uiTableLength)
+    {
+        eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+        goto e0;
+    }
+
+    /* Check we're not being asked to write off the end of the PMR */
+    if (uiTableOffset + uiTableLength > psPageListPMR->uiLogicalSize)
+    {
+        /* table memory insufficient to store all the entries */
+        /* table insufficient to store addresses of whole block */
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        goto e0;
+    }
+
+    /* the PMR into which we are writing must not be user CPU mappable: */
+	if (PVRSRV_CHECK_CPU_READABLE(uiFlags) || PVRSRV_CHECK_CPU_WRITEABLE(uiFlags))
+    {
+		PVR_DPF((PVR_DBG_ERROR, "masked flags = 0x%08x", (uiFlags & (PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE))));
+		PVR_DPF((PVR_DBG_ERROR, "Page list PMR allows CPU mapping (0x%08x)", uiFlags));
+		eError = PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS;
+        goto e0;
+    }
+
+	if (_PMRIsSparse(psPageListPMR))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PageList PMR is sparse"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	if (_PMRIsSparse(psReferencePMR))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Reference PMR is sparse"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	psPageList = OSAllocMem(sizeof(PMR_PAGELIST));
+	if (psPageList == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR page list"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+	psPageList->psReferencePMR = psReferencePMR;
+
+    /* Need to lock down the physical addresses of the reference PMR */
+    /* N.B.  This also checks that the requested "contiguity" is achievable */
+    eError = PMRLockSysPhysAddresses(psReferencePMR);
+    if (eError != PVRSRV_OK)
+    {
+        goto e1;
+    }
+
+#if !defined(NO_HARDWARE)
+    if (uiNumPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+	{
+	    pasDevAddrPtr = OSAllocMem(uiNumPages * sizeof(IMG_DEV_PHYADDR));
+		if (pasDevAddrPtr == NULL)
+		{
+			 PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR page list"));
+			 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			 goto e2;
+		}
+
+		pbPageIsValid = OSAllocMem(uiNumPages * sizeof(IMG_BOOL));
+		if (pbPageIsValid == NULL)
+		{
+			/* Clean-up before exit */
+			 OSFreeMem(pasDevAddrPtr);
+
+			 PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR page state"));
+			 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			 goto e2;
+		}
+	}
+	else
+	{
+		pasDevAddrPtr = asDevPAddr;
+		pbPageIsValid = abValid;
+	}
+
+
+	eError = PMR_DevPhysAddr(psReferencePMR, uiLog2PageSize, uiNumPages, 0,
+							 pasDevAddrPtr, pbPageIsValid);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to map PMR pages into device physical addresses"));
+		goto e3;
+	}
+#endif
+
+    for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++)
+    {
+        IMG_DEVMEM_OFFSET_T uiPMROffset = uiTableOffset + (uiWordSize * uiPageIndex);
+#if defined(PDUMP)
+        eError = PMR_PDumpSymbolicAddr(psPageListPMR,
+                                       uiPMROffset,
+                                       sizeof(aszTableEntryMemspaceName),
+                                       &aszTableEntryMemspaceName[0],
+                                       sizeof(aszTableEntrySymbolicName),
+                                       &aszTableEntrySymbolicName[0],
+                                       &uiTableEntryPDumpOffset,
+                                       &uiNextSymName);
+        PVR_ASSERT(eError == PVRSRV_OK);
+
+        eError = PMR_PDumpSymbolicAddr(psReferencePMR,
+                                       (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize,
+                                       sizeof(aszPageMemspaceName),
+                                       &aszPageMemspaceName[0],
+                                       sizeof(aszPageSymbolicName),
+                                       &aszPageSymbolicName[0],
+                                       &uiPagePDumpOffset,
+                                       &uiNextSymName);
+        PVR_ASSERT(eError == PVRSRV_OK);
+
+        eError = PDumpWriteShiftedMaskedValue(/* destination */
+                                              aszTableEntryMemspaceName,
+                                              aszTableEntrySymbolicName,
+                                              uiTableEntryPDumpOffset,
+                                              /* source */
+                                              aszPageMemspaceName,
+                                              aszPageSymbolicName,
+                                              uiPagePDumpOffset,
+                                              /* shift right */
+                                              uiLog2PageSize,
+                                              /* shift left */
+                                              0,
+                                              /* mask */
+                                              0xffffffff,
+                                              /* word size */
+                                              uiWordSize,
+                                              /* flags */
+                                              PDUMP_FLAGS_CONTINUOUS);
+        PVR_ASSERT(eError == PVRSRV_OK);
+#else
+		PVR_UNREFERENCED_PARAMETER(uiPMROffset);
+#endif
+#if !defined(NO_HARDWARE)
+
+		/*
+			We check for sparse PMR's at function entry, but as we can,
+			check that every page is valid
+		*/
+		PVR_ASSERT(pbPageIsValid[uiPageIndex]);
+        PVR_ASSERT(pasDevAddrPtr[uiPageIndex].uiAddr != 0);
+        PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0);
+
+        uiPageListPMRPage = uiPMROffset >> psReferencePMR->uiLog2ContiguityGuarantee;
+
+        if ((pui32DataPtr == NULL) || (uiPageListPMRPage != uiPrevPageListPMRPage))
+        {
+            size_t uiMappingOffset = uiPMROffset & (~(uiPageListPageSize - 1));
+            size_t uiMappedSize;
+
+			/* If we already had a page list mapped, we need to unmap it... */
+            if (pui32DataPtr != NULL)
+            {
+                PMRReleaseKernelMappingData(psPageListPMR, hPrivData);
+            }
+
+            eError = PMRAcquireKernelMappingData(psPageListPMR,
+                                                 uiMappingOffset,
+                                                 uiPageListPageSize,
+                                                 &pvKernAddr,
+                                                 &uiMappedSize,
+                                                 &hPrivData);
+            if (eError != PVRSRV_OK)
+            {
+                PVR_DPF((PVR_DBG_ERROR, "Error mapping page list PMR page (%" IMG_UINT64_FMTSPEC ") into kernel (%d)",
+                         uiPageListPMRPage, eError));
+                goto e3;
+            }
+
+            uiPrevPageListPMRPage = uiPageListPMRPage;
+            PVR_ASSERT(uiMappedSize >= uiPageListPageSize);
+            PVR_ASSERT(pvKernAddr != NULL);
+
+			pui32DataPtr = (IMG_UINT32 *) (((IMG_CHAR *) pvKernAddr) + (uiPMROffset & (uiPageListPageSize - 1)));
+        }
+
+        PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0);
+
+        /* Write the physical page index into the page list PMR */
+        *pui32DataPtr++ = TRUNCATE_64BITS_TO_32BITS(pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize);
+
+        /* Last page so unmap */
+        if (uiPageIndex == (uiNumPages - 1))
+        {
+            PMRReleaseKernelMappingData(psPageListPMR, hPrivData);
+        }
+#endif
+    }
+
+#if !defined(NO_HARDWARE)
+    if (pasDevAddrPtr != asDevPAddr)
+	{
+		OSFreeMem(pbPageIsValid);
+		OSFreeMem(pasDevAddrPtr);
+	}
+#endif
+    *ppsPageList = psPageList;
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+#if !defined(NO_HARDWARE)
+e3:
+    if (pasDevAddrPtr != asDevPAddr)
+	{
+		OSFreeMem(pbPageIsValid);
+		OSFreeMem(pasDevAddrPtr);
+	}
+ e2:
+   PMRUnlockSysPhysAddresses(psReferencePMR);
+#endif
+ e1:
+	OSFreeMem(psPageList);
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+
+PVRSRV_ERROR /* FIXME: should be void */
+PMRUnwritePMPageList(PMR_PAGELIST *psPageList)
+{
+    PVRSRV_ERROR eError2;
+
+    eError2 = PMRUnlockSysPhysAddresses(psPageList->psReferencePMR);
+    PVR_ASSERT(eError2 == PVRSRV_OK);
+	OSFreeMem(psPageList);
+
+    return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRZeroingPMR(PMR *psPMR,
+				IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize)
+{
+    IMG_UINT32 uiNumPages;
+    IMG_UINT32 uiPageIndex;
+    IMG_UINT32 ui32PageSize = 1 << uiLog2PageSize;
+    IMG_HANDLE hPrivData = NULL;
+    void *pvKernAddr = NULL;
+    PVRSRV_ERROR eError = PVRSRV_OK;
+    size_t uiMapedSize;
+
+	_PMRAssert(psPMR);
+
+    /* Calculate number of pages in this PMR */
+	uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize);
+
+	/* Verify the logical Size is a multiple or the physical page size */
+    if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize)
+    {
+		PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: PMR is not a multiple of %u",ui32PageSize));
+        eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+        goto MultiPage_Error;
+    }
+
+	if (_PMRIsSparse(psPMR))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: PMR is sparse"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto Sparse_Error;
+	}
+
+	/* Scan through all pages of the PMR */
+    for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++)
+    {
+        /* map the physical page (for a given PMR offset) into kernel space */
+        eError = PMRAcquireKernelMappingData(psPMR,
+                                             (size_t)uiPageIndex << uiLog2PageSize,
+                                             ui32PageSize,
+                                             &pvKernAddr,
+                                             &uiMapedSize,
+                                             &hPrivData);
+        if (eError != PVRSRV_OK)
+        {
+    		PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: AcquireKernelMapping failed with error %u", eError));
+        	goto AcquireKernelMapping_Error;
+        }
+
+        /* ensure the mapped page size is the same as the physical page size */
+        if (uiMapedSize != ui32PageSize)
+        {
+    		PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: Physical Page size = 0x%08x, Size of Mapping = 0x%016" IMG_UINT64_FMTSPECx,
+    								ui32PageSize,
+    								(IMG_UINT64)uiMapedSize));
+    		eError = PVRSRV_ERROR_INVALID_PARAMS;
+        	goto MappingSize_Error;
+        }
+
+        /* Use the conservative 'DeviceMemSet' here because we can't know
+         * if this PMR will be mapped cached.
+         */
+
+        OSDeviceMemSet(pvKernAddr, 0, ui32PageSize);
+
+        /* release mapping */
+        PMRReleaseKernelMappingData(psPMR, hPrivData);
+
+    }
+
+    PVR_DPF((PVR_DBG_MESSAGE,"PMRZeroingPMR: Zeroing PMR %p done (num pages %u, page size %u)",
+    						psPMR,
+    						uiNumPages,
+    						ui32PageSize));
+
+    return PVRSRV_OK;
+
+
+    /* Error handling */
+
+MappingSize_Error:
+	PMRReleaseKernelMappingData(psPMR, hPrivData);
+
+AcquireKernelMapping_Error:
+Sparse_Error:
+MultiPage_Error:
+
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR
+PMRDumpPageList(PMR *psPMR,
+					IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize)
+{
+    IMG_DEV_PHYADDR sDevAddrPtr;
+    IMG_UINT32 uiNumPages;
+    IMG_UINT32 uiPageIndex;
+    IMG_BOOL bPageIsValid;
+    IMG_UINT32 ui32Col = 16;
+    IMG_UINT32 ui32SizePerCol = 11;
+    IMG_UINT32 ui32ByteCount = 0;
+    IMG_CHAR pszBuffer[16 /* ui32Col */ * 11 /* ui32SizePerCol */ + 1];
+    PVRSRV_ERROR eError = PVRSRV_OK;
+
+    /* Get number of pages */
+	uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize);
+
+	/* Verify the logical Size is a multiple or the physical page size */
+    if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize)
+    {
+		PVR_DPF((PVR_DBG_ERROR, "PMRPrintPageList: PMR is not a multiple of %u", 1 << uiLog2PageSize));
+        eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+        goto MultiPage_Error;
+    }
+
+	if (_PMRIsSparse(psPMR))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PMRPrintPageList: PMR is sparse"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto Sparse_Error;
+	}
+
+	PVR_LOG(("    PMR %p, Number of pages %u, Log2PageSize %d", psPMR, uiNumPages, uiLog2PageSize));
+
+	/* Print the address of the physical pages */
+    for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++)
+    {
+    	/* Get Device physical Address */
+        eError = PMR_DevPhysAddr(psPMR,
+                        uiLog2PageSize,
+                        1,
+                        (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize,
+                        &sDevAddrPtr,
+                        &bPageIsValid);
+        if (eError != PVRSRV_OK)
+        {
+    		PVR_DPF((PVR_DBG_ERROR, "PMRPrintPageList: PMR %p failed to get DevPhysAddr with error %u",
+    								psPMR,
+    								eError));
+        	goto DevPhysAddr_Error;
+        }
+
+        ui32ByteCount += OSSNPrintf(pszBuffer + ui32ByteCount, ui32SizePerCol + 1, "%08x ", (IMG_UINT32)(sDevAddrPtr.uiAddr >> uiLog2PageSize));
+        PVR_ASSERT(ui32ByteCount < ui32Col * ui32SizePerCol);
+
+		if (uiPageIndex % ui32Col == ui32Col -1)
+		{
+			PVR_LOG(("      Phys Page: %s", pszBuffer));
+			ui32ByteCount = 0;
+		}
+    }
+    if (ui32ByteCount > 0)
+    {
+		PVR_LOG(("      Phys Page: %s", pszBuffer));
+    }
+
+    return PVRSRV_OK;
+
+    /* Error handling */
+DevPhysAddr_Error:
+Sparse_Error:
+MultiPage_Error:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR
+PMRInit()
+{
+	PVRSRV_ERROR eError;
+
+    if (_gsSingletonPMRContext.bModuleInitialised)
+    {
+        PVR_DPF((PVR_DBG_ERROR, "%s: Error: Singleton PMR context already initialized", __func__));
+        eError = PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR;
+	goto out;
+    }
+
+	eError = OSLockCreate(&_gsSingletonPMRContext.hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Error: failed to create lock", __func__));
+		goto out;
+	}
+
+    _gsSingletonPMRContext.uiNextSerialNum = 1;
+
+    _gsSingletonPMRContext.uiNextKey = 0x8300f001 * (uintptr_t)&_gsSingletonPMRContext;
+
+    _gsSingletonPMRContext.bModuleInitialised = IMG_TRUE;
+
+    _gsSingletonPMRContext.uiNumLivePMRs = 0;
+
+out:
+    PVR_ASSERT(eError == PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR
+PMRDeInit()
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		goto out;
+	}
+
+    if (!_gsSingletonPMRContext.bModuleInitialised)
+    {
+        PVR_DPF((PVR_DBG_ERROR, "%s: Error: Singleton PMR context is not initialized", __func__));
+        eError = PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR;
+	goto out;
+    }
+
+    if (_gsSingletonPMRContext.uiNumLivePMRs != 0)
+    {
+        PVR_DPF((PVR_DBG_ERROR, "%s: Error: %d live PMRs remain",
+						__func__,
+						_gsSingletonPMRContext.uiNumLivePMRs));
+        PVR_DPF((PVR_DBG_ERROR, "%s: This is an unrecoverable error; a subsequent crash is inevitable",
+						__func__));
+	eError = PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR;
+	goto out;
+    }
+
+	OSLockDestroy(_gsSingletonPMRContext.hLock);
+
+    _gsSingletonPMRContext.bModuleInitialised = IMG_FALSE;
+
+    /*
+      FIXME:
+
+      should deinitialise the mutex here
+    */
+out:
+    PVR_ASSERT(eError == PVRSRV_OK);
+    return eError;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/power.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/power.c
new file mode 100644
index 0000000..4c71bbf
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/power.c
@@ -0,0 +1,1009 @@
+/*************************************************************************/ /*!
+@File           power.c
+@Title          Power management functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main APIs for power management functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pdump_km.h"
+#include "allocmem.h"
+#include "osfunc.h"
+
+#include "lists.h"
+#include "pvrsrv.h"
+#include "pvr_debug.h"
+#include "process_stats.h"
+
+
+struct _PVRSRV_POWER_DEV_TAG_
+{
+	PFN_PRE_POWER					pfnDevicePrePower;
+	PFN_POST_POWER					pfnDevicePostPower;
+	PFN_SYS_DEV_PRE_POWER			pfnSystemPrePower;
+	PFN_SYS_DEV_POST_POWER			pfnSystemPostPower;
+	PFN_PRE_CLOCKSPEED_CHANGE		pfnPreClockSpeedChange;
+	PFN_POST_CLOCKSPEED_CHANGE		pfnPostClockSpeedChange;
+	PFN_FORCED_IDLE_REQUEST			pfnForcedIdleRequest;
+	PFN_FORCED_IDLE_CANCEL_REQUEST	pfnForcedIdleCancelRequest;
+	PFN_DUST_COUNT_REQUEST			pfnDustCountRequest;
+	IMG_HANDLE						hSysData;
+	IMG_HANDLE						hDevCookie;
+	PVRSRV_DEV_POWER_STATE 			eDefaultPowerState;
+	PVRSRV_DEV_POWER_STATE 			eCurrentPowerState;
+};
+
+
+static inline IMG_UINT64 PVRSRVProcessStatsGetTimeNs(void)
+{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	return OSClockns64();
+#else
+	return 0;
+#endif
+}
+
+
+static inline IMG_UINT64 PVRSRVProcessStatsGetTimeUs(void)
+{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	return OSClockus();
+#else
+	return 0;
+#endif
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	_IsSystemStatePowered
+
+ @Description	Tests whether a given system state represents powered-up.
+
+ @Input		eSystemPowerState : a system power state
+
+ @Return	IMG_BOOL
+
+******************************************************************************/
+static IMG_BOOL _IsSystemStatePowered(PVRSRV_SYS_POWER_STATE eSystemPowerState)
+{
+	return (eSystemPowerState == PVRSRV_SYS_POWER_STATE_ON);
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVPowerLock
+
+ @Description	Obtain the mutex for power transitions. Only allowed when
+                system power is on.
+
+ @Return	PVRSRV_ERROR_RETRY or PVRSRV_OK
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVPowerLock(PCPVRSRV_DEVICE_NODE psDeviceNode)
+{
+	OSLockAcquire(psDeviceNode->hPowerLock);
+
+	/* Only allow to take powerlock when the system power is on */
+	if (_IsSystemStatePowered(psDeviceNode->eCurrentSysPowerState))
+	{
+		return PVRSRV_OK;
+	}
+
+	OSLockRelease(psDeviceNode->hPowerLock);
+
+	return PVRSRV_ERROR_RETRY;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVForcedPowerLock
+
+ @Description	Obtain the mutex for power transitions regardless of
+                system power state
+
+ @Return	PVRSRV_ERROR_RETRY or PVRSRV_OK
+
+******************************************************************************/
+IMG_EXPORT
+void PVRSRVForcedPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+	OSLockAcquire(psDeviceNode->hPowerLock);
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVPowerUnlock
+
+ @Description	Release the mutex for power transitions
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+void PVRSRVPowerUnlock(PCPVRSRV_DEVICE_NODE psDeviceNode)
+{
+	OSLockRelease(psDeviceNode->hPowerLock);
+}
+IMG_EXPORT
+IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice)
+{
+	return (psPowerDevice->eDefaultPowerState == PVRSRV_DEV_POWER_STATE_OFF);
+}
+
+/*!
+******************************************************************************
+
+ @Function      PVRSRVSetDeviceDefaultPowerState
+
+ @Description   Set the default device power state to eNewPowerState
+
+ @Input		    psDeviceNode : Device node
+ @Input         eNewPowerState : New power state
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(PCPVRSRV_DEVICE_NODE psDeviceNode,
+					PVRSRV_DEV_POWER_STATE eNewPowerState)
+{
+	PVRSRV_POWER_DEV *psPowerDevice;
+
+	psPowerDevice = psDeviceNode->psPowerDev;
+	if (psPowerDevice == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_DEVICE;
+	}
+
+	psPowerDevice->eDefaultPowerState = eNewPowerState;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDeviceIdleRequestKM
+
+ @Description
+
+ Perform device-specific processing required to force the device idle.
+
+ @Input		psDeviceNode : Device node
+ @Input		pfnCheckIdleReq : Filter function used to determine whether a forced idle is required for the device
+ @Input		bDeviceOffPermitted :	IMG_TRUE if the transition should not fail if device off
+					IMG_FALSE if the transition should fail if device off
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode,
+					PFN_SYS_DEV_IS_DEFAULT_STATE_OFF	pfnIsDefaultStateOff,
+					IMG_BOOL				bDeviceOffPermitted)
+{
+	PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev;
+
+	if (psPowerDev && psPowerDev->pfnForcedIdleRequest)
+	{
+		if (!pfnIsDefaultStateOff || pfnIsDefaultStateOff(psPowerDev))
+		{
+			return psPowerDev->pfnForcedIdleRequest(psPowerDev->hDevCookie,
+													bDeviceOffPermitted);
+		}
+	}
+
+	return PVRSRV_OK;
+}
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDeviceIdleCancelRequestKM
+
+ @Description
+
+ Perform device-specific processing required to cancel the forced idle state on the device, returning to normal operation.
+
+ @Input		psDeviceNode : Device node
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+	PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev;
+
+	if (psPowerDev && psPowerDev->pfnForcedIdleCancelRequest)
+	{
+		return psPowerDev->pfnForcedIdleCancelRequest(psPowerDev->hDevCookie);
+	}
+
+	return PVRSRV_OK;
+}
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDevicePrePowerStateKM
+
+ @Description
+
+ Perform device-specific processing required before a power transition
+
+ @Input		psPowerDevice : Power device
+ @Input		eNewPowerState : New power state
+ @Input		bForced : TRUE if the transition should not fail (e.g. OS request)
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+static
+PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(PVRSRV_POWER_DEV		*psPowerDevice,
+										 PVRSRV_DEV_POWER_STATE	eNewPowerState,
+										 IMG_BOOL				bForced)
+{
+	IMG_UINT64 ui64SysTimer1 = 0;
+	IMG_UINT64 ui64SysTimer2 = 0;
+	IMG_UINT64 ui64DevTimer1 = 0;
+	IMG_UINT64 ui64DevTimer2 = 0;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(eNewPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+
+	if (psPowerDevice->pfnDevicePrePower != NULL)
+	{
+		ui64DevTimer1 = PVRSRVProcessStatsGetTimeNs();
+
+		/* Call the device's power callback. */
+		eError = psPowerDevice->pfnDevicePrePower(psPowerDevice->hDevCookie,
+												  eNewPowerState,
+												  psPowerDevice->eCurrentPowerState,
+												  bForced);
+
+		ui64DevTimer2 = PVRSRVProcessStatsGetTimeNs();
+
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	/* Do any required system-layer processing. */
+	if (psPowerDevice->pfnSystemPrePower != NULL)
+	{
+		ui64SysTimer1 = PVRSRVProcessStatsGetTimeNs();
+
+		eError = psPowerDevice->pfnSystemPrePower(psPowerDevice->hSysData,
+												  eNewPowerState,
+												  psPowerDevice->eCurrentPowerState,
+												  bForced);
+
+		ui64SysTimer2 = PVRSRVProcessStatsGetTimeNs();
+
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	InsertPowerTimeStatistic(ui64SysTimer1, ui64SysTimer2,
+							 ui64DevTimer1, ui64DevTimer2,
+							 bForced,
+							 eNewPowerState == PVRSRV_DEV_POWER_STATE_ON,
+							 IMG_TRUE);
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDevicePostPowerStateKM
+
+ @Description
+
+ Perform device-specific processing required after a power transition
+
+ @Input		psPowerDevice : Power device
+ @Input		eNewPowerState : New power state
+ @Input		bForced : TRUE if the transition should not fail (e.g. OS request)
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+static
+PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(PVRSRV_POWER_DEV			*psPowerDevice,
+										  PVRSRV_DEV_POWER_STATE	eNewPowerState,
+										  IMG_BOOL					bForced)
+{
+	IMG_UINT64 ui64SysTimer1 = 0;
+	IMG_UINT64 ui64SysTimer2 = 0;
+	IMG_UINT64 ui64DevTimer1 = 0;
+	IMG_UINT64 ui64DevTimer2 = 0;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(eNewPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+
+	/* Do any required system-layer processing. */
+	if (psPowerDevice->pfnSystemPostPower != NULL)
+	{
+		ui64SysTimer1 = PVRSRVProcessStatsGetTimeNs();
+
+		eError = psPowerDevice->pfnSystemPostPower(psPowerDevice->hSysData,
+												   eNewPowerState,
+												   psPowerDevice->eCurrentPowerState,
+												   bForced);
+
+		ui64SysTimer2 = PVRSRVProcessStatsGetTimeNs();
+
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	if (psPowerDevice->pfnDevicePostPower != NULL)
+	{
+		ui64DevTimer1 = PVRSRVProcessStatsGetTimeNs();
+
+		/* Call the device's power callback. */
+		eError = psPowerDevice->pfnDevicePostPower(psPowerDevice->hDevCookie,
+												   eNewPowerState,
+												   psPowerDevice->eCurrentPowerState,
+												   bForced);
+
+		ui64DevTimer2 = PVRSRVProcessStatsGetTimeNs();
+
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	InsertPowerTimeStatistic(ui64SysTimer1, ui64SysTimer2,
+							 ui64DevTimer1, ui64DevTimer2,
+							 bForced,
+							 eNewPowerState == PVRSRV_DEV_POWER_STATE_ON,
+							 IMG_FALSE);
+
+	psPowerDevice->eCurrentPowerState = eNewPowerState;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVSetDevicePowerStateKM
+
+ @Description	Set the Device into a new state
+
+ @Input		psDeviceNode : Device node
+ @Input		eNewPowerState : New power state
+ @Input		bForced : TRUE if the transition should not fail (e.g. OS request)
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(PPVRSRV_DEVICE_NODE psDeviceNode,
+										 PVRSRV_DEV_POWER_STATE	eNewPowerState,
+										 IMG_BOOL				bForced)
+{
+	PVRSRV_ERROR	eError;
+	PVRSRV_DATA*    psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_POWER_DEV *psPowerDevice;
+
+	psPowerDevice = psDeviceNode->psPowerDev;
+	if (!psPowerDevice)
+	{
+		return PVRSRV_OK;
+	}
+
+	if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT)
+	{
+		eNewPowerState = psPowerDevice->eDefaultPowerState;
+	}
+
+	if (psPowerDevice->eCurrentPowerState != eNewPowerState)
+	{
+		eError = PVRSRVDevicePrePowerStateKM(psPowerDevice,
+											 eNewPowerState,
+											 bForced);
+		if (eError != PVRSRV_OK)
+		{
+			goto ErrorExit;
+		}
+
+		eError = PVRSRVDevicePostPowerStateKM(psPowerDevice,
+											  eNewPowerState,
+											  bForced);
+		if (eError != PVRSRV_OK)
+		{
+			goto ErrorExit;
+		}
+
+		/* Signal Device Watchdog Thread about power mode change. */
+		if (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON)
+		{
+			psPVRSRVData->ui32DevicesWatchdogPwrTrans++;
+
+			if (psPVRSRVData->ui32DevicesWatchdogTimeout == DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT)
+			{
+				if (psPVRSRVData->hDevicesWatchdogEvObj)
+				{
+					eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj);
+					PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+				}
+			}
+		}
+	}
+
+	return PVRSRV_OK;
+
+ErrorExit:
+
+	if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE,
+				 "%s: Transition to %d was denied, Forced=%d",
+				 __func__, eNewPowerState, bForced));
+	}
+	else if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s: Transition to %d FAILED (%s)",
+				 __func__, eNewPowerState, PVRSRVGetErrorStringKM(eError)));
+	}
+	
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function     PVRSRVSetDeviceSystemPowerState
+@Description  Set the device into a new power state based on the systems power
+              state
+@Input        psDeviceNode          Device node
+@Input        eNewSysPowerState  New system power state
+@Return       PVRSRV_ERROR       PVRSRV_OK on success or an error otherwise
+*/ /**************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode,
+											 PVRSRV_SYS_POWER_STATE eNewSysPowerState)
+{
+	PVRSRV_ERROR	eError;
+	IMG_UINT        uiStage = 0;
+
+	PVRSRV_DEV_POWER_STATE eNewDevicePowerState = 
+	  _IsSystemStatePowered(eNewSysPowerState)? PVRSRV_DEV_POWER_STATE_DEFAULT : PVRSRV_DEV_POWER_STATE_OFF;
+
+	/* If setting devices to default state, force idle all devices whose default state is off */
+	PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff =
+	  (eNewDevicePowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ? PVRSRVDeviceIsDefaultStateOFF : NULL;
+
+	/* require a proper power state */
+	if (eNewSysPowerState == PVRSRV_SYS_POWER_STATE_Unspecified)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Prevent simultaneous SetPowerStateKM calls */
+	PVRSRVForcedPowerLock(psDeviceNode);
+
+	/* no power transition requested, so do nothing */
+	if (eNewSysPowerState == psDeviceNode->eCurrentSysPowerState)
+	{
+		PVRSRVPowerUnlock(psDeviceNode);
+		return PVRSRV_OK;
+	}
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = PVRSRVDeviceIdleRequestKM(psDeviceNode,
+										   pfnIsDefaultStateOff, IMG_TRUE);
+
+		if (eError == PVRSRV_OK)
+		{
+			break;
+		}
+		else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+		{
+			PVRSRVPowerUnlock(psDeviceNode);
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+			PVRSRVForcedPowerLock(psDeviceNode);
+		}
+		else
+		{
+			uiStage++;
+			goto ErrorExit;
+		}
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle DENIED", __func__));
+		uiStage++;
+		goto ErrorExit;
+	}
+
+	eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, eNewDevicePowerState,
+										 IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		uiStage++;
+		goto ErrorExit;
+	}
+
+	psDeviceNode->eCurrentSysPowerState = eNewSysPowerState;
+
+	PVRSRVPowerUnlock(psDeviceNode);
+
+	return PVRSRV_OK;
+
+ErrorExit:
+	PVRSRVPowerUnlock(psDeviceNode);
+
+	PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Transition from %d to %d FAILED (%s) at stage %u. Dumping debug info.",
+			 __func__, psDeviceNode->eCurrentSysPowerState, eNewSysPowerState,
+			 PVRSRVGetErrorStringKM(eError), uiStage));
+
+	PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+
+	return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode,
+									   PFN_PRE_POWER				pfnDevicePrePower,
+									   PFN_POST_POWER				pfnDevicePostPower,
+									   PFN_SYS_DEV_PRE_POWER		pfnSystemPrePower,
+									   PFN_SYS_DEV_POST_POWER		pfnSystemPostPower,
+									   PFN_PRE_CLOCKSPEED_CHANGE	pfnPreClockSpeedChange,
+									   PFN_POST_CLOCKSPEED_CHANGE	pfnPostClockSpeedChange,
+									   PFN_FORCED_IDLE_REQUEST	pfnForcedIdleRequest,
+									   PFN_FORCED_IDLE_CANCEL_REQUEST	pfnForcedIdleCancelRequest,
+									   PFN_DUST_COUNT_REQUEST	pfnDustCountRequest,
+									   IMG_HANDLE					hDevCookie,
+									   PVRSRV_DEV_POWER_STATE		eCurrentPowerState,
+									   PVRSRV_DEV_POWER_STATE		eDefaultPowerState)
+{
+	PVRSRV_POWER_DEV *psPowerDevice;
+
+	PVR_ASSERT(!psDeviceNode->psPowerDev);
+
+	PVR_ASSERT(eCurrentPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+	PVR_ASSERT(eDefaultPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+
+	psPowerDevice = OSAllocMem(sizeof(PVRSRV_POWER_DEV));
+	if (psPowerDevice == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to alloc PVRSRV_POWER_DEV", __func__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* setup device for power manager */
+	psPowerDevice->pfnDevicePrePower = pfnDevicePrePower;
+	psPowerDevice->pfnDevicePostPower = pfnDevicePostPower;
+	psPowerDevice->pfnSystemPrePower = pfnSystemPrePower;
+	psPowerDevice->pfnSystemPostPower = pfnSystemPostPower;
+	psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange;
+	psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange;
+	psPowerDevice->pfnForcedIdleRequest = pfnForcedIdleRequest;
+	psPowerDevice->pfnForcedIdleCancelRequest = pfnForcedIdleCancelRequest;
+	psPowerDevice->pfnDustCountRequest = pfnDustCountRequest;
+	psPowerDevice->hSysData = psDeviceNode->psDevConfig->hSysData;
+	psPowerDevice->hDevCookie = hDevCookie;
+	psPowerDevice->eCurrentPowerState = eCurrentPowerState;
+	psPowerDevice->eDefaultPowerState = eDefaultPowerState;
+
+	psDeviceNode->psPowerDev = psPowerDevice;
+
+	return (PVRSRV_OK);
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVRemovePowerDevice
+
+ @Description
+
+ Removes device from power management register. Device is located by Device Index
+
+ @Input		psDeviceNode : Device node
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRemovePowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+	if (psDeviceNode->psPowerDev)
+	{
+		OSFreeMem(psDeviceNode->psPowerDev);
+		psDeviceNode->psPowerDev = NULL;
+	}
+
+	return (PVRSRV_OK);
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVGetDevicePowerState
+
+ @Description
+
+	Return the device power state
+
+ @Input		psDeviceNode : Device node
+ @Output	psPowerState : Current power state 
+
+ @Return	PVRSRV_ERROR_UNKNOWN_POWER_STATE if device could not be found. PVRSRV_OK otherwise.
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVGetDevicePowerState(PCPVRSRV_DEVICE_NODE psDeviceNode,
+									   PPVRSRV_DEV_POWER_STATE pePowerState)
+{
+	PVRSRV_POWER_DEV *psPowerDevice;
+
+	psPowerDevice = psDeviceNode->psPowerDev;
+	if (psPowerDevice == NULL)
+	{
+		return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
+	}
+
+	*pePowerState = psPowerDevice->eCurrentPowerState;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVIsDevicePowered
+
+ @Description
+
+	Whether the device is powered, for the purposes of lockup detection.
+
+ @Input		psDeviceNode : Device node
+
+ @Return	IMG_BOOL
+
+******************************************************************************/
+IMG_EXPORT
+IMG_BOOL PVRSRVIsDevicePowered(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+	PVRSRV_DEV_POWER_STATE ePowerState;
+
+	if (OSLockIsLocked(psDeviceNode->hPowerLock))
+	{
+		return IMG_FALSE;
+	}
+
+	if (PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState) != PVRSRV_OK)
+	{
+		return IMG_FALSE;
+	}
+
+	return (ePowerState == PVRSRV_DEV_POWER_STATE_ON);
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDevicePreClockSpeedChange
+
+ @Description
+
+	Notification from system layer that a device clock speed change is about to happen.
+
+ @Input		psDeviceNode : Device node
+ @Input		bIdleDevice : whether the device should be idled
+ @Input		pvInfo
+
+ @Return	void
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+											 IMG_BOOL	bIdleDevice,
+											 void	*pvInfo)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	PVRSRV_POWER_DEV	*psPowerDevice;
+	IMG_UINT64			ui64StartTimer, ui64StopTimer;
+
+	PVR_UNREFERENCED_PARAMETER(pvInfo);
+
+	ui64StartTimer = PVRSRVProcessStatsGetTimeUs();
+
+	/* This mutex is released in PVRSRVDevicePostClockSpeedChange. */
+	eError = PVRSRVPowerLock(psDeviceNode);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: failed to acquire lock (%s)",
+				 __func__, PVRSRVGetErrorStringKM(eError)));
+		return eError;
+	}
+
+	psPowerDevice = psDeviceNode->psPowerDev;
+	if (psPowerDevice)
+	{
+		if ((psPowerDevice->eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice)
+		{
+			LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+			{	/* We can change the clock speed if the device is either IDLE or OFF */
+				eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE);
+
+				if (eError == PVRSRV_OK)
+				{
+					break;
+				}
+				else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+				{
+					PVRSRV_ERROR	eError2;
+
+					PVRSRVPowerUnlock(psDeviceNode);
+					OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+					eError2 = PVRSRVPowerLock(psDeviceNode);
+
+					if (eError2 != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR,
+								 "%s: failed to acquire lock (%s)",
+								 __func__, PVRSRVGetErrorStringKM(eError)));
+						return eError2;
+					}
+				}
+				else
+				{
+					PVRSRVPowerUnlock(psDeviceNode);
+					return eError;
+				}
+			} END_LOOP_UNTIL_TIMEOUT();
+
+			if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle DENIED", __func__));
+				PVRSRVPowerUnlock(psDeviceNode);
+				return eError;
+			}
+		}
+
+		eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->hDevCookie,
+		                                               psPowerDevice->eCurrentPowerState);
+	}
+
+	ui64StopTimer = PVRSRVProcessStatsGetTimeUs();
+
+	InsertPowerTimeStatisticExtraPre(ui64StartTimer, ui64StopTimer);
+
+	return eError;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDevicePostClockSpeedChange
+
+ @Description
+
+	Notification from system layer that a device clock speed change has just happened.
+
+ @Input		psDeviceNode : Device node
+ @Input		bIdleDevice : whether the device had been idled
+ @Input		pvInfo
+
+ @Return	void
+
+******************************************************************************/
+void PVRSRVDevicePostClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+									  IMG_BOOL		bIdleDevice,
+									  void		*pvInfo)
+{
+	PVRSRV_ERROR		eError;
+	PVRSRV_POWER_DEV	*psPowerDevice;
+	IMG_UINT64			ui64StartTimer, ui64StopTimer;
+
+    PVR_UNREFERENCED_PARAMETER(pvInfo);
+
+	ui64StartTimer = PVRSRVProcessStatsGetTimeUs();
+
+	psPowerDevice = psDeviceNode->psPowerDev;
+	if (psPowerDevice)
+	{
+		eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->hDevCookie,
+														psPowerDevice->eCurrentPowerState);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)",
+					 __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+		}
+
+		if ((psPowerDevice->eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice)
+		{
+			eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode);
+
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Failed to cancel forced IDLE.", __func__));
+			}
+		}
+	}
+
+	/* This mutex was acquired in PVRSRVDevicePreClockSpeedChange. */
+	PVRSRVPowerUnlock(psDeviceNode);
+
+	OSAtomicIncrement(&psDeviceNode->iNumClockSpeedChanges);
+
+	ui64StopTimer = PVRSRVProcessStatsGetTimeUs();
+
+	InsertPowerTimeStatisticExtraPost(ui64StartTimer, ui64StopTimer);
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDeviceDustCountChange
+
+ @Description
+
+	Request from system layer that a dust count change is requested.
+
+ @Input		psDeviceNode : Device node
+ @Input		ui32DustCount : dust count to be set
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDeviceDustCountChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+						IMG_UINT32	ui32DustCount)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	PVRSRV_POWER_DEV	*psPowerDevice;
+
+	psPowerDevice = psDeviceNode->psPowerDev;
+	if (psPowerDevice)
+	{
+		PVRSRV_DEV_POWER_STATE eDevicePowerState;
+
+		eError = PVRSRVPowerLock(psDeviceNode);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)",
+					 __func__, PVRSRVGetErrorStringKM(eError)));
+			return eError;
+		}
+
+		eDevicePowerState = psPowerDevice->eCurrentPowerState;
+		if (eDevicePowerState == PVRSRV_DEV_POWER_STATE_ON)
+		{
+			/* Device must be idle to change dust count */
+			LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+			{
+				eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_FALSE);
+				if (eError == PVRSRV_OK)
+				{
+					break;
+				}
+				else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+				{
+					PVRSRV_ERROR	eError2;
+
+					PVRSRVPowerUnlock(psDeviceNode);
+					OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+					eError2 = PVRSRVPowerLock(psDeviceNode);
+
+					if (eError2 != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)",
+								 __func__, PVRSRVGetErrorStringKM(eError)));
+						return eError2;
+					}
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							 "%s: error occurred whilst forcing idle (%s)",
+							 __func__, PVRSRVGetErrorStringKM(eError)));
+					goto ErrorExit;
+				}
+			} END_LOOP_UNTIL_TIMEOUT();
+
+			if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle DENIED", __func__));
+				goto ErrorExit;
+			}
+		}
+
+		if (psPowerDevice->pfnDustCountRequest != NULL)
+		{
+			PVRSRV_ERROR	eError2 = psPowerDevice->pfnDustCountRequest(psPowerDevice->hDevCookie, ui32DustCount);
+
+			if (eError2 != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)",
+						 __func__, psDeviceNode,
+						 PVRSRVGetErrorStringKM(eError)));
+			}
+		}
+
+		if (eDevicePowerState == PVRSRV_DEV_POWER_STATE_ON)
+		{
+			eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Failed to cancel forced IDLE.", __func__));
+				goto ErrorExit;
+			}
+		}
+
+		PVRSRVPowerUnlock(psDeviceNode);
+	}
+
+	return eError;
+
+ErrorExit:
+	PVRSRVPowerUnlock(psDeviceNode);
+	return eError;
+}
+
+
+/******************************************************************************
+ End of file (power.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/process_stats.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/process_stats.c
new file mode 100644
index 0000000..355eff8
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/process_stats.c
@@ -0,0 +1,3459 @@
+/*************************************************************************/ /*!
+@File
+@Title          Process based statistics
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Manages a collection of statistics based around a process
+                and referenced via OS agnostic methods.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "lock.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "lists.h"
+#include "process_stats.h"
+#include "ri_server.h"
+#include "hash.h"
+#include "connection_server.h"
+#include "pvrsrv.h"
+#include "proc_stats.h"
+
+ /* Enabled OS Statistics entries:  DEBUGFS on Linux, undefined for other OSs */
+#if defined(LINUX) && ( \
+	defined(PVRSRV_ENABLE_PERPID_STATS) || \
+	defined(PVRSRV_ENABLE_CACHEOP_STATS) || \
+	defined(PVRSRV_ENABLE_MEMORY_STATS) || \
+	defined(PVR_RI_DEBUG) )
+#define ENABLE_DEBUGFS_PIDS
+#endif
+
+/*
+ *  Maximum history of process statistics that will be kept.
+ */
+#define MAX_DEAD_LIST_PROCESSES  (10)
+
+/*
+ * Definition of all the strings used to format process based statistics.
+ */
+
+/* Array of Process stat type defined using the X-Macro */
+#define X(stat_type, stat_str) stat_str,
+const IMG_CHAR *const pszProcessStatType[PVRSRV_PROCESS_STAT_TYPE_COUNT] = { PVRSRV_PROCESS_STAT_KEY };
+#undef X
+
+/* Array of Driver stat type defined using the X-Macro */
+#define X(stat_type, stat_str) stat_str,
+const IMG_CHAR *const pszDriverStatType[PVRSRV_DRIVER_STAT_TYPE_COUNT] = { PVRSRV_DRIVER_STAT_KEY };
+#undef X
+
+
+static const IMG_CHAR *const pszProcessStatFmt[] = {
+	"Connections                       %10d\n", /* PVRSRV_STAT_TYPE_CONNECTIONS */
+	"ConnectionsMax                    %10d\n", /* PVRSRV_STAT_TYPE_MAXCONNECTIONS */
+
+	"RenderContextOutOfMemoryEvents    %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_OOMS */
+	"RenderContextPartialRenders       %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_PRS */
+	"RenderContextGrows                %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_GROWS */
+	"RenderContextPushGrows            %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_PUSH_GROWS */
+	"RenderContextTAStores             %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES */
+	"RenderContext3DStores             %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES */
+	"RenderContextSHStores             %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_SH_STORES */
+	"RenderContextCDMStores            %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES */
+	"ZSBufferRequestsByApp             %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP */
+	"ZSBufferRequestsByFirmware        %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW */
+	"FreeListGrowRequestsByApp         %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP */
+	"FreeListGrowRequestsByFirmware    %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW */
+	"FreeListInitialPages              %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT */
+	"FreeListMaxPages                  %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES */
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+	"MemoryUsageKMalloc                %10d\n", /* PVRSRV_STAT_TYPE_KMALLOC */
+	"MemoryUsageKMallocMax             %10d\n", /* PVRSRV_STAT_TYPE_MAX_KMALLOC */
+	"MemoryUsageVMalloc                %10d\n", /* PVRSRV_STAT_TYPE_VMALLOC */
+	"MemoryUsageVMallocMax             %10d\n", /* PVRSRV_STAT_TYPE_MAX_VMALLOC */
+#else
+	"","","","",                                /* Empty strings if these stats are not logged */
+#endif
+	"MemoryUsageAllocPTMemoryUMA       %10d\n", /* PVRSRV_STAT_TYPE_ALLOC_PAGES_PT_UMA */
+	"MemoryUsageAllocPTMemoryUMAMax    %10d\n", /* PVRSRV_STAT_TYPE_MAX_ALLOC_PAGES_PT_UMA */
+	"MemoryUsageVMapPTUMA              %10d\n", /* PVRSRV_STAT_TYPE_VMAP_PT_UMA */
+	"MemoryUsageVMapPTUMAMax           %10d\n", /* PVRSRV_STAT_TYPE_MAX_VMAP_PT_UMA */
+	"MemoryUsageAllocPTMemoryLMA       %10d\n", /* PVRSRV_STAT_TYPE_ALLOC_PAGES_PT_LMA */
+	"MemoryUsageAllocPTMemoryLMAMax    %10d\n", /* PVRSRV_STAT_TYPE_MAX_ALLOC_PAGES_PT_LMA */
+	"MemoryUsageIORemapPTLMA           %10d\n", /* PVRSRV_STAT_TYPE_IOREMAP_PT_LMA */
+	"MemoryUsageIORemapPTLMAMax        %10d\n", /* PVRSRV_STAT_TYPE_MAX_IOREMAP_PT_LMA */
+	"MemoryUsageAllocGPUMemLMA         %10d\n", /* PVRSRV_STAT_TYPE_ALLOC_LMA_PAGES */
+	"MemoryUsageAllocGPUMemLMAMax      %10d\n", /* PVRSRV_STAT_TYPE_MAX_ALLOC_LMA_PAGES */
+	"MemoryUsageAllocGPUMemUMA         %10d\n", /* PVRSRV_STAT_TYPE_ALLOC_UMA_PAGES */
+	"MemoryUsageAllocGPUMemUMAMax      %10d\n", /* PVRSRV_STAT_TYPE_MAX_ALLOC_UMA_PAGES */
+	"MemoryUsageMappedGPUMemUMA/LMA    %10d\n", /* PVRSRV_STAT_TYPE_MAP_UMA_LMA_PAGES */
+	"MemoryUsageMappedGPUMemUMA/LMAMax %10d\n", /* PVRSRV_STAT_TYPE_MAX_MAP_UMA_LMA_PAGES */
+};
+
+static_assert((sizeof(pszProcessStatFmt)/sizeof(pszProcessStatFmt[0])) == PVRSRV_PROCESS_STAT_TYPE_COUNT,
+	      "Fix number of entries in pszProcessStatFmt array : %d");
+
+/* structure used in hash table to track statistic entries */
+typedef struct{
+	size_t	   uiSizeInBytes;
+	IMG_PID	   uiPid;
+}_PVR_STATS_TRACKING_HASH_ENTRY;
+
+/* Function used internally to decrement tracked per-process statistic entries */
+static void _StatsDecrMemTrackedStat(_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry,
+                                    PVRSRV_MEM_ALLOC_TYPE eAllocType);
+
+/*
+ *  Functions for printing the information stored...
+ */
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+void  ProcessStatsPrintElements(void *pvFile,
+								void *pvStatPtr,
+								OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+void RawProcessStatsPrintElements(void *pvFile,
+                                  void *pvStatPtr,
+                                  OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+#endif
+
+void  MemStatsPrintElements(void *pvFile,
+							void *pvStatPtr,
+							OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+void  RIMemStatsPrintElements(void *pvFile,
+							  void *pvStatPtr,
+							  OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+void  PowerStatsPrintElements(void *pvFile,
+							  void *pvStatPtr,
+							  OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+void  GlobalStatsPrintElements(void *pvFile,
+							   void *pvStatPtr,
+							   OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+void  CacheOpStatsPrintElements(void *pvFile,
+							  void *pvStatPtr,
+							  OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+static void StripBadChars( IMG_CHAR *psStr);
+#endif
+
+/* Macro for fetching stat values */
+#define GET_GLOBAL_STAT_VALUE(idx) gsGlobalStats.ui32StatValue[idx]
+/*
+ *  Macros for updating stat values.
+ */
+#define UPDATE_MAX_VALUE(a,b)					do { if ((b) > (a)) {(a) = (b);} } while(0)
+#define INCREASE_STAT_VALUE(ptr,var,val)		do { (ptr)->i32StatValue[(var)] += (val); if ((ptr)->i32StatValue[(var)] > (ptr)->i32StatValue[(var##_MAX)]) {(ptr)->i32StatValue[(var##_MAX)] = (ptr)->i32StatValue[(var)];} } while(0)
+#define INCREASE_GLOBAL_STAT_VALUE(var,idx,val)		do { (var).ui32StatValue[(idx)] += (val); if ((var).ui32StatValue[(idx)] > (var).ui32StatValue[(idx##_MAX)]) {(var).ui32StatValue[(idx##_MAX)] = (var).ui32StatValue[(idx)];} } while(0)
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+/* Allow stats to go negative */
+#define DECREASE_STAT_VALUE(ptr,var,val)		do { (ptr)->i32StatValue[(var)] -= (val); } while(0)
+#define DECREASE_GLOBAL_STAT_VALUE(var,idx,val)		do { (var).ui32StatValue[(idx)] -= (val); } while(0)
+#else
+#define DECREASE_STAT_VALUE(ptr,var,val)		do { if ((ptr)->i32StatValue[(var)] >= (val)) { (ptr)->i32StatValue[(var)] -= (val); } else { (ptr)->i32StatValue[(var)] = 0; } } while(0)
+#define DECREASE_GLOBAL_STAT_VALUE(var,idx,val)		do { if ((var).ui32StatValue[(idx)] >= (val)) { (var).ui32StatValue[(idx)] -= (val); } else { (var).ui32StatValue[(idx)] = 0; } } while(0)
+#endif
+#define MAX_CACHEOP_STAT 16
+#define INCREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x+1) >= MAX_CACHEOP_STAT ? 0 : (x+1))
+#define DECREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x-1) < 0 ? (MAX_CACHEOP_STAT-1) : (x-1))
+
+/*
+ * Structures for holding statistics...
+ */
+typedef enum
+{
+	PVRSRV_STAT_STRUCTURE_PROCESS = 1,
+	PVRSRV_STAT_STRUCTURE_RENDER_CONTEXT = 2,
+	PVRSRV_STAT_STRUCTURE_MEMORY = 3,
+	PVRSRV_STAT_STRUCTURE_RIMEMORY = 4,
+	PVRSRV_STAT_STRUCTURE_CACHEOP = 5
+} PVRSRV_STAT_STRUCTURE_TYPE;
+
+#define MAX_PROC_NAME_LENGTH   (32)
+
+typedef struct _PVRSRV_PROCESS_STATS_ {
+	/* Structure type (must be first!) */
+	PVRSRV_STAT_STRUCTURE_TYPE			eStructureType;
+
+	/* Linked list pointers */
+	struct _PVRSRV_PROCESS_STATS_*		psNext;
+	struct _PVRSRV_PROCESS_STATS_*		psPrev;
+
+	/* Create per process lock that need to be held
+	 * to edit of its members */
+	POS_LOCK							hLock;
+
+	/* OS level process ID */
+	IMG_PID								pid;
+	IMG_UINT32							ui32RefCount;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	ATOMIC_T							iMemRefCount;
+#else
+	IMG_UINT32							ui32MemRefCount;
+#endif
+
+	/* Folder name used to store the statistic */
+	IMG_CHAR							szFolderName[MAX_PROC_NAME_LENGTH];
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+	/* OS specific data */
+	void								*pvOSPidFolderData;
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+	void								*pvOSPidEntryData;
+#endif
+#endif
+
+	/* Stats... */
+	IMG_INT32							i32StatValue[PVRSRV_PROCESS_STAT_TYPE_COUNT];
+	IMG_UINT32							ui32StatAllocFlags;
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+	struct _CACHEOP_STRUCT_  {
+		PVRSRV_CACHE_OP uiCacheOp;
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+		IMG_DEV_VIRTADDR sDevVAddr;
+		IMG_DEV_PHYADDR sDevPAddr;
+		RGXFWIF_DM eFenceOpType;
+#endif
+		IMG_DEVMEM_SIZE_T uiOffset;
+		IMG_DEVMEM_SIZE_T uiSize;
+		IMG_UINT64 ui64ExecuteTime;
+		IMG_BOOL bRangeBasedFlush;
+		IMG_BOOL bUserModeFlush;
+		IMG_UINT32 ui32OpSeqNum;
+		IMG_BOOL bIsFence;
+		IMG_PID ownerPid;
+	} 									asCacheOp[MAX_CACHEOP_STAT];
+	IMG_INT32 							uiCacheOpWriteIndex;
+	struct _PVRSRV_CACHEOP_STATS_*		psCacheOpStats;
+#endif
+
+	/* Other statistics structures */
+	struct _PVRSRV_MEMORY_STATS_*		psMemoryStats;
+	struct _PVRSRV_RI_MEMORY_STATS_*	psRIMemoryStats;
+} PVRSRV_PROCESS_STATS;
+
+typedef struct _PVRSRV_MEM_ALLOC_REC_
+{
+    PVRSRV_MEM_ALLOC_TYPE  eAllocType;
+    IMG_UINT64			ui64Key;
+	void				*pvCpuVAddr;
+	IMG_CPU_PHYADDR		sCpuPAddr;
+	size_t				uiBytes;
+	void				*pvPrivateData;
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+	void				*pvAllocdFromFile;
+	IMG_UINT32			ui32AllocdFromLine;
+#endif
+	IMG_PID				pid;
+	struct _PVRSRV_MEM_ALLOC_REC_	*psNext;
+	struct _PVRSRV_MEM_ALLOC_REC_	**ppsThis;
+} PVRSRV_MEM_ALLOC_REC;
+
+typedef struct _PVRSRV_MEMORY_STATS_ {
+	/* Structure type (must be first!) */
+	PVRSRV_STAT_STRUCTURE_TYPE  eStructureType;
+
+	/* OS specific data */
+	void						*pvOSMemEntryData;
+
+	/* Stats... */
+	PVRSRV_MEM_ALLOC_REC		*psMemoryRecords;
+} PVRSRV_MEMORY_STATS;
+
+typedef struct _PVRSRV_RI_MEMORY_STATS_ {
+	/* Structure type (must be first!) */
+	PVRSRV_STAT_STRUCTURE_TYPE  eStructureType;
+
+	/* OS level process ID */
+	IMG_PID						pid;
+
+#if defined(PVR_RI_DEBUG) && defined(ENABLE_DEBUGFS_PIDS)
+	/* OS specific data */
+	void						*pvOSRIMemEntryData;
+#endif
+} PVRSRV_RI_MEMORY_STATS;
+
+typedef struct _PVRSRV_CACHEOP_STATS_ {
+	/* Structure type (must be first!) */
+	PVRSRV_STAT_STRUCTURE_TYPE  eStructureType;
+
+	/* OS specific data */
+	void						*pvOSCacheOpEntryData;
+} PVRSRV_CACHEOP_STATS;
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+static IMPLEMENT_LIST_INSERT(PVRSRV_MEM_ALLOC_REC)
+static IMPLEMENT_LIST_REMOVE(PVRSRV_MEM_ALLOC_REC)
+#endif
+
+/*
+ *  Global Boolean to flag when the statistics are ready to monitor
+ *  memory allocations.
+ */
+static  IMG_BOOL  bProcessStatsInitialised = IMG_FALSE;
+
+/*
+ * Linked lists for process stats. Live stats are for processes which are still running
+ * and the dead list holds those that have exited.
+ */
+static PVRSRV_PROCESS_STATS*  g_psLiveList = NULL;
+static PVRSRV_PROCESS_STATS*  g_psDeadList = NULL;
+
+static POS_LOCK g_psLinkedListLock = NULL;
+/* Lockdep feature in the kernel cannot differentiate between different instances of same lock type.
+ * This allows it to group all such instances of the same lock type under one class
+ * The consequence of this is that, if lock acquisition is nested on different instances, it generates
+ * a false warning message about the possible occurrence of deadlock due to recursive lock acquisition.
+ * Hence we create the following sub classes to explicitly appraise Lockdep of such safe lock nesting */
+#define PROCESS_LOCK_SUBCLASS_CURRENT	1
+#define PROCESS_LOCK_SUBCLASS_PREV 		2
+#define PROCESS_LOCK_SUBCLASS_NEXT 		3
+#if defined(ENABLE_DEBUGFS_PIDS)
+/*
+ * Pointer to OS folder to hold PID folders.
+ */
+static IMG_CHAR *pszOSLivePidFolderName = "pids";
+static IMG_CHAR *pszOSDeadPidFolderName = "pids_retired";
+static void *pvOSLivePidFolder = NULL;
+static void *pvOSDeadPidFolder = NULL;
+#endif
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+static void *pvOSProcStats = NULL;
+#endif
+
+/* global driver-data folders */
+typedef struct _GLOBAL_STATS_
+{
+	IMG_UINT32  ui32StatValue[PVRSRV_DRIVER_STAT_TYPE_COUNT];
+	POS_LOCK   hGlobalStatsLock;
+} GLOBAL_STATS;
+
+static void *pvOSGlobalMemEntryRef = NULL;
+static IMG_CHAR* const pszDriverStatFilename = "driver_stats";
+static GLOBAL_STATS gsGlobalStats;
+
+#define HASH_INITIAL_SIZE 5
+/* A hash table used to store the size of any vmalloc'd allocation
+ * against its address (not needed for kmallocs as we can use ksize()) */
+static HASH_TABLE* gpsSizeTrackingHashTable;
+static POS_LOCK	 gpsSizeTrackingHashTableLock;
+
+static void _AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats);
+static void _AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats);
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+static IMG_UINT32 _PVRSRVIncrMemStatRefCount(void *pvStatPtr);
+static IMG_UINT32 _PVRSRVDecrMemStatRefCount(void *pvStatPtr);
+#endif
+#if defined(PVRSRV_ENABLE_PERPID_STATS) || !defined(ENABLE_DEBUGFS_PIDS)
+static void _DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats);
+#endif
+static void _RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats);
+#if defined(ENABLE_DEBUGFS_PIDS)
+static void _RemovePIDOSStatisticEntries(PVRSRV_PROCESS_STATS* psProcessStats);
+static void _CreatePIDOSStatisticEntries(PVRSRV_PROCESS_STATS* psProcessStats, void *pvOSPidFolder);
+#endif
+static void _DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                                   PVRSRV_PROCESS_STATS* psProcessStats,
+                                   IMG_UINT32 uiBytes);
+/*
+ * Power statistics related definitions
+ */
+
+/* For the mean time, use an exponentially weighted moving average with a
+ * 1/4 weighting for the new measurement. 
+ */
+#define MEAN_TIME(A, B)     ( ((3*(A))/4) + ((1 * (B))/4) )
+
+#define UPDATE_TIME(time, newtime) \
+	((time) > 0 ? MEAN_TIME((time),(newtime)) : (newtime))
+
+/* Enum to be used as input to GET_POWER_STAT_INDEX */
+typedef enum
+{
+	DEVICE     = 0,
+	SYSTEM     = 1,
+	POST_POWER = 0,
+	PRE_POWER  = 2,
+	POWER_OFF  = 0,
+	POWER_ON   = 4,
+	NOT_FORCED = 0,
+	FORCED     = 8,
+} PVRSRV_POWER_STAT_TYPE;
+
+/* Macro used to access one of the power timing statistics inside an array */
+#define GET_POWER_STAT_INDEX(forced,powon,prepow,system) \
+	((forced) + (powon) + (prepow) + (system))
+
+/* For the power timing stats we need 16 variables to store all the
+ * combinations of forced/not forced, power-on/power-off, pre-power/post-power
+ * and device/system statistics
+ */
+#define NUM_POWER_STATS        (16)
+static IMG_UINT32 aui32PowerTimingStats[NUM_POWER_STATS];
+
+static void *pvOSPowerStatsEntryData = NULL;
+
+typedef struct _EXTRA_POWER_STATS_
+{
+	IMG_UINT64	ui64PreClockSpeedChangeDuration;
+	IMG_UINT64	ui64BetweenPreEndingAndPostStartingDuration;
+	IMG_UINT64	ui64PostClockSpeedChangeDuration;
+} EXTRA_POWER_STATS;
+
+#define NUM_EXTRA_POWER_STATS	10
+
+static EXTRA_POWER_STATS asClockSpeedChanges[NUM_EXTRA_POWER_STATS];
+static IMG_UINT32	ui32ClockSpeedIndexStart = 0, ui32ClockSpeedIndexEnd = 0;
+
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
+                              IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
+                              IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower)
+{
+	IMG_UINT32 *pui32Stat;
+	IMG_UINT64 ui64DeviceDiff = ui64DevEndTime - ui64DevStartTime;
+	IMG_UINT64 ui64SystemDiff = ui64SysEndTime - ui64SysStartTime;
+	IMG_UINT32 ui32Index;
+
+	ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED,
+	                                 bPowerOn ? POWER_ON : POWER_OFF,
+	                                 bPrePower ? PRE_POWER : POST_POWER,
+	                                 DEVICE);
+	pui32Stat = &aui32PowerTimingStats[ui32Index];
+	*pui32Stat = UPDATE_TIME(*pui32Stat, ui64DeviceDiff);
+
+	ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED,
+	                                 bPowerOn ? POWER_ON : POWER_OFF,
+	                                 bPrePower ? PRE_POWER : POST_POWER,
+	                                 SYSTEM);
+	pui32Stat = &aui32PowerTimingStats[ui32Index];
+	*pui32Stat = UPDATE_TIME(*pui32Stat, ui64SystemDiff);
+}
+
+static IMG_UINT64 ui64PreClockSpeedChangeMark = 0;
+
+void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer)
+{
+	asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PreClockSpeedChangeDuration = ui64Stoptimer - ui64StartTimer;
+
+	ui64PreClockSpeedChangeMark = OSClockus();
+}
+
+void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer)
+{
+	IMG_UINT64 ui64Duration = ui64StartTimer - ui64PreClockSpeedChangeMark;
+
+	PVR_ASSERT(ui64PreClockSpeedChangeMark > 0);
+
+	asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64BetweenPreEndingAndPostStartingDuration = ui64Duration;
+	asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PostClockSpeedChangeDuration = ui64StopTimer - ui64StartTimer;
+
+	ui32ClockSpeedIndexEnd = (ui32ClockSpeedIndexEnd + 1) % NUM_EXTRA_POWER_STATS;
+
+	if (ui32ClockSpeedIndexEnd == ui32ClockSpeedIndexStart)
+	{
+		ui32ClockSpeedIndexStart = (ui32ClockSpeedIndexStart + 1) % NUM_EXTRA_POWER_STATS;
+	}
+
+	ui64PreClockSpeedChangeMark = 0;
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function       _FindProcessStatsInLiveList
+@Description    Searches the Live Process List for a statistics structure that
+                matches the PID given.
+@Input          pid  Process to search for.
+@Return         Pointer to stats structure for the process.
+*/ /**************************************************************************/
+static PVRSRV_PROCESS_STATS*
+_FindProcessStatsInLiveList(IMG_PID pid)
+{
+	PVRSRV_PROCESS_STATS*  psProcessStats = g_psLiveList;
+
+	while (psProcessStats != NULL)
+	{
+		if (psProcessStats->pid == pid)
+		{
+			return psProcessStats;
+		}
+
+		psProcessStats = psProcessStats->psNext;
+	}
+
+	return NULL;
+} /* _FindProcessStatsInLiveList */
+
+/*************************************************************************/ /*!
+@Function       _FindProcessStatsInDeadList
+@Description    Searches the Dead Process List for a statistics structure that
+                matches the PID given.
+@Input          pid  Process to search for.
+@Return         Pointer to stats structure for the process.
+*/ /**************************************************************************/
+static PVRSRV_PROCESS_STATS*
+_FindProcessStatsInDeadList(IMG_PID pid)
+{
+	PVRSRV_PROCESS_STATS*  psProcessStats = g_psDeadList;
+
+	while (psProcessStats != NULL)
+	{
+		if (psProcessStats->pid == pid)
+		{
+			return psProcessStats;
+		}
+
+		psProcessStats = psProcessStats->psNext;
+	}
+
+	return NULL;
+} /* _FindProcessStatsInDeadList */
+
+/*************************************************************************/ /*!
+@Function       _FindProcessStats
+@Description    Searches the Live and Dead Process Lists for a statistics
+                structure that matches the PID given.
+@Input          pid  Process to search for.
+@Return         Pointer to stats structure for the process.
+*/ /**************************************************************************/
+static PVRSRV_PROCESS_STATS*
+_FindProcessStats(IMG_PID pid)
+{
+	PVRSRV_PROCESS_STATS*  psProcessStats = _FindProcessStatsInLiveList(pid);
+
+	if (psProcessStats == NULL)
+	{
+		psProcessStats = _FindProcessStatsInDeadList(pid);
+	}
+
+	return psProcessStats;
+} /* _FindProcessStats */
+
+/*************************************************************************/ /*!
+@Function       _CompressMemoryUsage
+@Description    Reduces memory usage by deleting old statistics data.
+                This function requires that the list lock is not held!
+*/ /**************************************************************************/
+static void
+_CompressMemoryUsage(void)
+{
+	PVRSRV_PROCESS_STATS*  psProcessStats;
+	PVRSRV_PROCESS_STATS*  psProcessStatsToBeFreed;
+	IMG_UINT32  ui32ItemsRemaining;
+
+	/*
+	 *  We hold the lock whilst checking the list, but we'll release it
+	 *  before freeing memory (as that will require the lock too)!
+	 */
+	OSLockAcquire(g_psLinkedListLock);
+
+	/* Check that the dead list is not bigger than the max size... */
+	psProcessStats          = g_psDeadList;
+	psProcessStatsToBeFreed = NULL;
+	ui32ItemsRemaining      = MAX_DEAD_LIST_PROCESSES;
+
+	while (psProcessStats != NULL  &&  ui32ItemsRemaining > 0)
+	{
+		ui32ItemsRemaining--;
+		if (ui32ItemsRemaining == 0)
+		{
+			/* This is the last allowed process, cut the linked list here! */
+			psProcessStatsToBeFreed = psProcessStats->psNext;
+			psProcessStats->psNext  = NULL;
+		}
+		else
+		{
+			psProcessStats = psProcessStats->psNext;
+		}
+	}
+
+	OSLockRelease(g_psLinkedListLock);
+
+	/* Any processes stats remaining will need to be destroyed... */
+	while (psProcessStatsToBeFreed != NULL)
+	{
+		PVRSRV_PROCESS_STATS*  psNextProcessStats = psProcessStatsToBeFreed->psNext;
+
+		psProcessStatsToBeFreed->psNext = NULL;
+#if defined(ENABLE_DEBUGFS_PIDS)
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockAcquire(psProcessStatsToBeFreed->hLock);
+#endif
+		_RemovePIDOSStatisticEntries(psProcessStatsToBeFreed);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockRelease(psProcessStatsToBeFreed->hLock);
+#endif
+#else
+		_DestroyProcessStat(psProcessStatsToBeFreed);
+#endif
+		psProcessStatsToBeFreed = psNextProcessStats;
+	}
+} /* _CompressMemoryUsage */
+
+/* These functions move the process stats from the live to the dead list.
+ * _MoveProcessToDeadList moves the entry in the global lists and
+ * it needs to be protected by g_psLinkedListLock.
+ * _MoveProcessToDeadListDebugFS performs the OS calls and it
+ * shouldn't be used under g_psLinkedListLock because this could generate a
+ * lockdep warning. */
+static void
+_MoveProcessToDeadList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	/* Take the element out of the live list and append to the dead list... */
+	_RemoveProcessStatsFromList(psProcessStats);
+	_AddProcessStatsToFrontOfDeadList(psProcessStats);
+} /* _MoveProcessToDeadList */
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+static void
+_MoveProcessToDeadListDebugFS(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	/* Transfer the OS entries to the folder for dead processes... */
+	_RemovePIDOSStatisticEntries(psProcessStats);
+	_CreatePIDOSStatisticEntries(psProcessStats, pvOSDeadPidFolder);
+} /* _MoveProcessToDeadListDebugFS */
+#endif
+
+/* These functions move the process stats from the dead to the live list.
+ * _MoveProcessToLiveList moves the entry in the global lists and
+ * it needs to be protected by g_psLinkedListLock.
+ * _MoveProcessToLiveListDebugFS performs the OS calls and it
+ * shouldn't be used under g_psLinkedListLock because this could generate a
+ * lockdep warning. */
+static void
+_MoveProcessToLiveList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	/* Take the element out of the live list and append to the dead list... */
+	_RemoveProcessStatsFromList(psProcessStats);
+	_AddProcessStatsToFrontOfLiveList(psProcessStats);
+} /* _MoveProcessToLiveList */
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+static void
+_MoveProcessToLiveListDebugFS(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	/* Transfer the OS entries to the folder for live processes... */
+	_RemovePIDOSStatisticEntries(psProcessStats);
+	_CreatePIDOSStatisticEntries(psProcessStats, pvOSLivePidFolder);
+} /* _MoveProcessToLiveListDebugFS */
+#endif
+
+/*************************************************************************/ /*!
+@Function       _AddProcessStatsToFrontOfLiveList
+@Description    Add a statistic to the live list head.
+@Input          psProcessStats  Process stats to add.
+*/ /**************************************************************************/
+static void
+_AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	/* This function should always be called under global list lock g_psLinkedListLock.
+	 */
+	PVR_ASSERT(psProcessStats != NULL);
+
+	OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+	if (g_psLiveList != NULL)
+	{
+		PVR_ASSERT(psProcessStats != g_psLiveList);
+		OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+		g_psLiveList->psPrev     = psProcessStats;
+		OSLockRelease(g_psLiveList->hLock);
+		psProcessStats->psNext = g_psLiveList;
+	}
+
+	g_psLiveList = psProcessStats;
+
+	OSLockRelease(psProcessStats->hLock);
+} /* _AddProcessStatsToFrontOfLiveList */
+
+/*************************************************************************/ /*!
+@Function       _AddProcessStatsToFrontOfDeadList
+@Description    Add a statistic to the dead list head.
+@Input          psProcessStats  Process stats to add.
+*/ /**************************************************************************/
+static void
+_AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	PVR_ASSERT(psProcessStats != NULL);
+	OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+	if (g_psDeadList != NULL)
+	{
+		PVR_ASSERT(psProcessStats != g_psDeadList);
+		OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+		g_psDeadList->psPrev     = psProcessStats;
+		OSLockRelease(g_psDeadList->hLock);
+		psProcessStats->psNext = g_psDeadList;
+	}
+
+	g_psDeadList = psProcessStats;
+
+	OSLockRelease(psProcessStats->hLock);
+} /* _AddProcessStatsToFrontOfDeadList */
+
+/*************************************************************************/ /*!
+@Function       _RemoveProcessStatsFromList
+@Description    Detaches a process from either the live or dead list.
+@Input          psProcessStats  Process stats to remove.
+*/ /**************************************************************************/
+static void
+_RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	PVR_ASSERT(psProcessStats != NULL);
+
+	OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+	/* Remove the item from the linked lists... */
+	if (g_psLiveList == psProcessStats)
+	{
+		g_psLiveList = psProcessStats->psNext;
+
+		if (g_psLiveList != NULL)
+		{
+			PVR_ASSERT(psProcessStats != g_psLiveList);
+			OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+			g_psLiveList->psPrev = NULL;
+			OSLockRelease(g_psLiveList->hLock);
+
+		}
+	}
+	else if (g_psDeadList == psProcessStats)
+	{
+		g_psDeadList = psProcessStats->psNext;
+
+		if (g_psDeadList != NULL)
+		{
+			PVR_ASSERT(psProcessStats != g_psDeadList);
+			OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+			g_psDeadList->psPrev = NULL;
+			OSLockRelease(g_psDeadList->hLock);
+		}
+	}
+	else
+	{
+		PVRSRV_PROCESS_STATS*  psNext = psProcessStats->psNext;
+		PVRSRV_PROCESS_STATS*  psPrev = psProcessStats->psPrev;
+
+		if (psProcessStats->psNext != NULL)
+		{
+			PVR_ASSERT(psProcessStats != psNext);
+			OSLockAcquireNested(psNext->hLock, PROCESS_LOCK_SUBCLASS_NEXT);
+			psProcessStats->psNext->psPrev = psPrev;
+			OSLockRelease(psNext->hLock);
+		}
+		if (psProcessStats->psPrev != NULL)
+		{
+			PVR_ASSERT(psProcessStats != psPrev);
+			OSLockAcquireNested(psPrev->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+			psProcessStats->psPrev->psNext = psNext;
+			OSLockRelease(psPrev->hLock);
+		}
+	}
+
+
+	/* Reset the pointers in this cell, as it is not attached to anything */
+	psProcessStats->psNext = NULL;
+	psProcessStats->psPrev = NULL;
+
+	OSLockRelease(psProcessStats->hLock);
+
+} /* _RemoveProcessStatsFromList */
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+/*************************************************************************/ /*!
+@Function       _CreatePIDOSStatisticEntries
+@Description    Create all OS entries for this statistic.
+@Input          psProcessStats  Process stats to destroy.
+@Input          pvOSPidFolder   Pointer to OS folder to place the entries in.
+*/ /**************************************************************************/
+static void
+_CreatePIDOSStatisticEntries(PVRSRV_PROCESS_STATS* psProcessStats,
+						  void *pvOSPidFolder)
+{
+	void								*pvOSPidFolderData;
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+	void								*pvOSPidEntryData;
+#endif
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	void								*pvOSMemEntryData;
+#endif
+#if defined(PVR_RI_DEBUG)
+	void								*pvOSRIMemEntryData;
+#endif
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+	void								*pvOSCacheOpEntryData;
+#endif
+
+	PVR_ASSERT(psProcessStats != NULL);
+
+	pvOSPidFolderData = OSCreateStatisticFolder(psProcessStats->szFolderName, pvOSPidFolder);
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+	pvOSPidEntryData  = OSCreateStatisticEntry("process_stats",
+												pvOSPidFolderData,
+												ProcessStatsPrintElements,
+												_PVRSRVIncrMemStatRefCount,
+												_PVRSRVDecrMemStatRefCount,
+												(void *) psProcessStats);
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	pvOSMemEntryData = OSCreateStatisticEntry("mem_area",
+											  pvOSPidFolderData,
+											  MemStatsPrintElements,
+											  NULL,
+											  NULL,
+											  (void *) psProcessStats->psMemoryStats);
+#endif
+
+#if defined(PVR_RI_DEBUG)
+	pvOSRIMemEntryData = OSCreateStatisticEntry("ri_mem_area",
+												 pvOSPidFolderData,
+												 RIMemStatsPrintElements,
+												 NULL,
+												 NULL,
+												 (void *) psProcessStats->psRIMemoryStats);
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+	pvOSCacheOpEntryData = OSCreateStatisticEntry("cache_ops_exec",
+												 pvOSPidFolderData,
+												 CacheOpStatsPrintElements,
+												 NULL,
+												 NULL,
+												 (void *) psProcessStats);
+#endif
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+#endif
+
+	psProcessStats->pvOSPidFolderData = pvOSPidFolderData;
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+	psProcessStats->pvOSPidEntryData  = pvOSPidEntryData;
+#endif
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	psProcessStats->psMemoryStats->pvOSMemEntryData = pvOSMemEntryData;
+#endif
+#if defined(PVR_RI_DEBUG)
+	psProcessStats->psRIMemoryStats->pvOSRIMemEntryData = pvOSRIMemEntryData;
+#endif
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+	psProcessStats->psCacheOpStats->pvOSCacheOpEntryData = pvOSCacheOpEntryData;
+#endif
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psProcessStats->hLock);
+#endif
+} /* _CreatePIDOSStatisticEntries */
+
+/*************************************************************************/ /*!
+@Function       _RemovePIDOSStatisticEntries
+@Description    Removed all OS entries used by this statistic.
+@Input          psProcessStats  Process stats to destroy.
+*/ /**************************************************************************/
+static void
+_RemovePIDOSStatisticEntries(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	PVR_ASSERT(psProcessStats != NULL);
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+	OSRemoveStatisticEntry(psProcessStats->psCacheOpStats->pvOSCacheOpEntryData);
+#endif
+
+#if defined(PVR_RI_DEBUG)
+	OSRemoveStatisticEntry(psProcessStats->psRIMemoryStats->pvOSRIMemEntryData);
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	OSRemoveStatisticEntry(psProcessStats->psMemoryStats->pvOSMemEntryData);
+#endif
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+	if( psProcessStats->pvOSPidEntryData != NULL)
+	{
+		OSRemoveStatisticEntry(psProcessStats->pvOSPidEntryData);
+	}
+#endif
+
+	if( psProcessStats->pvOSPidFolderData != NULL)
+	{
+		OSRemoveStatisticFolder(&psProcessStats->pvOSPidFolderData);
+	}
+} /* _RemovePIDOSStatisticEntries */
+#endif /* defined(ENABLE_DEBUGFS_PIDS) */
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS) || !defined(ENABLE_DEBUGFS_PIDS)
+/*************************************************************************/ /*!
+@Function       _DestroyProcessStat
+@Description    Frees memory and resources held by a process statistic.
+@Input          psProcessStats  Process stats to destroy.
+*/ /**************************************************************************/
+static void
+_DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	PVR_ASSERT(psProcessStats != NULL);
+
+	OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+	/* Free the memory statistics... */
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	while (psProcessStats->psMemoryStats->psMemoryRecords)
+	{
+		List_PVRSRV_MEM_ALLOC_REC_Remove(psProcessStats->psMemoryStats->psMemoryRecords);
+	}
+	OSFreeMemNoStats(psProcessStats->psMemoryStats);
+#endif
+#if defined(PVR_RI_DEBUG)
+	OSFreeMemNoStats(psProcessStats->psRIMemoryStats);
+#endif
+	OSLockRelease(psProcessStats->hLock);
+
+	/*Destroy the lock */
+	OSLockDestroyNoStats(psProcessStats->hLock);
+
+	/* Free the memory... */
+	OSFreeMemNoStats(psProcessStats);
+} /* _DestroyProcessStat */
+#endif
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+static IMG_UINT32 _PVRSRVIncrMemStatRefCount(void *pvStatPtr)
+{
+	PVRSRV_STAT_STRUCTURE_TYPE*  peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+	PVRSRV_PROCESS_STATS*  psProcessStats = (PVRSRV_PROCESS_STATS*) pvStatPtr;
+	IMG_UINT32 ui32Res = 0;
+
+	switch (*peStructureType)
+	{
+		case PVRSRV_STAT_STRUCTURE_PROCESS:
+		{
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			ui32Res = OSAtomicIncrement(&psProcessStats->iMemRefCount);
+#else
+			OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+			ui32Res = ++psProcessStats->ui32MemRefCount;
+			OSLockRelease(psProcessStats->hLock);
+#endif
+			break;
+		}
+		default:
+		{
+			/* _PVRSRVIncrMemStatRefCount was passed a pointer to an unrecognised struct */
+			PVR_ASSERT(0);
+			break;
+		}
+	}
+
+	return ui32Res;
+}
+
+static IMG_UINT32 _PVRSRVDecrMemStatRefCount(void *pvStatPtr)
+{
+	PVRSRV_STAT_STRUCTURE_TYPE*  peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+	PVRSRV_PROCESS_STATS*  psProcessStats = (PVRSRV_PROCESS_STATS*) pvStatPtr;
+    IMG_UINT32 ui32Res = 0;
+
+	switch (*peStructureType)
+	{
+		case PVRSRV_STAT_STRUCTURE_PROCESS:
+		{
+			/* Decrement stat memory refCount and free if now zero */
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			ui32Res = OSAtomicDecrement(&psProcessStats->iMemRefCount);
+#else
+			OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+			ui32Res = --psProcessStats->ui32MemRefCount;
+			OSLockRelease(psProcessStats->hLock);
+#endif
+			if (ui32Res == 0)
+			{
+				_DestroyProcessStat(psProcessStats);
+			}
+			break;
+		}
+		default:
+		{
+			/* _PVRSRVDecrMemStatRefCount was passed a pointer to an unrecognised struct */
+			PVR_ASSERT(0);
+			break;
+		}
+	}
+	return ui32Res;
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function       PVRSRVStatsInitialise
+@Description    Entry point for initialising the statistics module.
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVStatsInitialise(void)
+{
+	PVRSRV_ERROR error;
+
+	PVR_ASSERT(g_psLiveList == NULL);
+	PVR_ASSERT(g_psDeadList == NULL);
+	PVR_ASSERT(g_psLinkedListLock == NULL);
+	PVR_ASSERT(gpsSizeTrackingHashTable == NULL);
+	PVR_ASSERT(bProcessStatsInitialised == IMG_FALSE);
+
+	/* We need a lock to protect the linked lists... */
+	error = OSLockCreate(&g_psLinkedListLock, LOCK_TYPE_NONE);
+	if (error == PVRSRV_OK)
+	{
+		/* We also need a lock to protect the hash table used for size tracking.. */
+		error = OSLockCreate(&gpsSizeTrackingHashTableLock, LOCK_TYPE_NONE);
+
+		if (error != PVRSRV_OK)
+		{
+			goto e0;
+		}
+
+		/* We also need a lock to protect the GlobalStat counters */
+		error = OSLockCreate(&gsGlobalStats.hGlobalStatsLock, LOCK_TYPE_NONE);
+		if (error != PVRSRV_OK)
+		{
+			goto e1;
+		}
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+		/* Create a pid folders for putting the PID files in... */
+		pvOSLivePidFolder = OSCreateStatisticFolder(pszOSLivePidFolderName, NULL);
+		pvOSDeadPidFolder = OSCreateStatisticFolder(pszOSDeadPidFolderName, NULL);
+#endif
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+		pvOSProcStats = OSCreateRawStatisticEntry("memtrack_stats", NULL,
+		                                          RawProcessStatsPrintElements);
+#endif
+
+		/* Create power stats entry... */
+		pvOSPowerStatsEntryData = OSCreateStatisticEntry("power_timing_stats",
+														 NULL,
+														 PowerStatsPrintElements,
+														 NULL,
+														 NULL,
+														 NULL);
+
+		pvOSGlobalMemEntryRef = OSCreateStatisticEntry(pszDriverStatFilename,
+													   NULL,
+													   GlobalStatsPrintElements,
+													   NULL,
+													   NULL,
+													   NULL);
+
+		/* Flag that we are ready to start monitoring memory allocations. */
+
+		gpsSizeTrackingHashTable = HASH_Create(HASH_INITIAL_SIZE);
+
+		OSCachedMemSet(asClockSpeedChanges, 0, sizeof(asClockSpeedChanges));
+
+		bProcessStatsInitialised = IMG_TRUE;
+	}
+	return error;
+e1:
+	OSLockDestroy(gpsSizeTrackingHashTableLock);
+	gpsSizeTrackingHashTableLock = NULL;
+e0:
+	OSLockDestroy(g_psLinkedListLock);
+	g_psLinkedListLock = NULL;
+	return error;
+
+} /* PVRSRVStatsInitialise */
+
+/*************************************************************************/ /*!
+@Function       PVRSRVStatsDestroy
+@Description    Method for destroying the statistics module data.
+*/ /**************************************************************************/
+void
+PVRSRVStatsDestroy(void)
+{
+	PVR_ASSERT(bProcessStatsInitialised == IMG_TRUE);
+
+	/* Stop monitoring memory allocations... */
+	bProcessStatsInitialised = IMG_FALSE;
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+	if (pvOSProcStats)
+	{
+		OSRemoveRawStatisticEntry(pvOSProcStats);
+		pvOSProcStats = NULL;
+	}
+#endif
+
+	/* Destroy the power stats entry... */
+	if (pvOSPowerStatsEntryData!=NULL)
+	{
+		OSRemoveStatisticEntry(pvOSPowerStatsEntryData);
+		pvOSPowerStatsEntryData=NULL;
+	}
+
+	/* Destroy the global data entry */
+	if (pvOSGlobalMemEntryRef!=NULL)
+	{
+		OSRemoveStatisticEntry(pvOSGlobalMemEntryRef);
+		pvOSGlobalMemEntryRef=NULL;
+	}
+	
+	/* Destroy the locks... */
+	if (g_psLinkedListLock != NULL)
+	{
+		OSLockDestroy(g_psLinkedListLock);
+		g_psLinkedListLock = NULL;
+	}
+
+	/* Free the live and dead lists... */
+	while (g_psLiveList != NULL)
+	{
+		PVRSRV_PROCESS_STATS*  psProcessStats = g_psLiveList;
+
+		_RemoveProcessStatsFromList(psProcessStats);
+#if defined(ENABLE_DEBUGFS_PIDS)
+		_RemovePIDOSStatisticEntries(psProcessStats);
+#else
+		_DestroyProcessStat(psProcessStats);
+#endif
+	}
+
+	while (g_psDeadList != NULL)
+	{
+		PVRSRV_PROCESS_STATS*  psProcessStats = g_psDeadList;
+
+		_RemoveProcessStatsFromList(psProcessStats);
+#if defined(ENABLE_DEBUGFS_PIDS)
+		_RemovePIDOSStatisticEntries(psProcessStats);
+#else
+		_DestroyProcessStat(psProcessStats);
+#endif
+	}
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+	/* Remove the OS folders used by the PID folders...
+	 * OSRemoveStatisticFolder will NULL the pointers */
+	OSRemoveStatisticFolder(&pvOSLivePidFolder);
+	OSRemoveStatisticFolder(&pvOSDeadPidFolder);
+#endif
+
+	if (gpsSizeTrackingHashTable != NULL)
+	{
+		HASH_Delete(gpsSizeTrackingHashTable);
+	}
+	if (gpsSizeTrackingHashTableLock != NULL)
+	{
+		OSLockDestroy(gpsSizeTrackingHashTableLock);
+		gpsSizeTrackingHashTableLock = NULL;
+	}
+
+	if(NULL != gsGlobalStats.hGlobalStatsLock)
+	{
+		OSLockDestroy(gsGlobalStats.hGlobalStatsLock);
+		gsGlobalStats.hGlobalStatsLock = NULL;
+	}
+
+} /* PVRSRVStatsDestroy */
+
+static void _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+								  size_t uiBytes)
+{
+	OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
+
+	switch (eAllocType)
+	{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_KMALLOC, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMALLOC, uiBytes);
+			break;
+#else
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+			break;
+#endif
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, uiBytes);
+			break;
+
+		default:
+			PVR_ASSERT(0);
+			break;
+	}
+	OSLockRelease(gsGlobalStats.hGlobalStatsLock);
+}
+
+static void _increase_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+								  size_t uiBytes)
+{
+	OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
+
+	switch (eAllocType)
+	{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_KMALLOC, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMALLOC, uiBytes);
+			break;
+#else
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+			break;
+#endif
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, uiBytes);
+			break;
+
+		default:
+			PVR_ASSERT(0);
+			break;
+	}
+	OSLockRelease(gsGlobalStats.hGlobalStatsLock);
+}
+
+/*************************************************************************/ /*!
+@Function       PVRSRVStatsRegisterProcess
+@Description    Register a process into the list statistics list.
+@Output         phProcessStats  Handle to the process to be used to deregister.
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats)
+{
+	PVRSRV_PROCESS_STATS*	psProcessStats=NULL;
+	PVRSRV_ERROR			eError;
+	IMG_PID					currentPid = OSGetCurrentClientProcessIDKM();
+	IMG_BOOL				bMoveProcess = IMG_FALSE;
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+	IMG_CHAR				acFolderName[30];
+	IMG_CHAR				*pszProcName = OSGetCurrentProcessName();
+
+	strncpy(acFolderName, pszProcName, sizeof(acFolderName));
+	StripBadChars(acFolderName);
+#endif
+
+	PVR_ASSERT(phProcessStats != NULL);
+
+	/* Check the PID has not already moved to the dead list... */
+	OSLockAcquire(g_psLinkedListLock);
+	psProcessStats = _FindProcessStatsInDeadList(currentPid);
+	if (psProcessStats != NULL)
+	{
+		/* Move it back onto the live list! */
+		_RemoveProcessStatsFromList(psProcessStats);
+		_AddProcessStatsToFrontOfLiveList(psProcessStats);
+
+		/* we can perform the OS operation out of lock */
+		bMoveProcess = IMG_TRUE;
+	}
+	else
+	{
+		/* Check the PID is not already registered in the live list... */
+		psProcessStats = _FindProcessStatsInLiveList(currentPid);
+	}
+
+	/* If the PID is on the live list then just increment the ref count and return... */
+	if (psProcessStats != NULL)
+	{
+		OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+		psProcessStats->ui32RefCount++;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount;
+		UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS],
+		                 psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS]);
+		OSLockRelease(psProcessStats->hLock);
+		OSLockRelease(g_psLinkedListLock);
+
+		*phProcessStats = psProcessStats;
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+		/* Check if we need to perform any OS operation */
+		if (bMoveProcess)
+		{
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockAcquire(psProcessStats->hLock);
+#endif
+			/* Transfer the OS entries back to the folder for live processes... */
+			_RemovePIDOSStatisticEntries(psProcessStats);
+			_CreatePIDOSStatisticEntries(psProcessStats, pvOSLivePidFolder);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockRelease(psProcessStats->hLock);
+#endif
+		}
+#endif
+
+		return PVRSRV_OK;
+	}
+	OSLockRelease(g_psLinkedListLock);
+
+	/* Allocate a new node structure and initialise it... */
+	psProcessStats = OSAllocZMemNoStats(sizeof(PVRSRV_PROCESS_STATS));
+	if (psProcessStats == NULL)
+	{
+		*phProcessStats = 0;
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psProcessStats->eStructureType  = PVRSRV_STAT_STRUCTURE_PROCESS;
+	psProcessStats->pid             = currentPid;
+	psProcessStats->ui32RefCount    = 1;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSAtomicWrite(&psProcessStats->iMemRefCount, 1);
+#else
+	psProcessStats->ui32MemRefCount = 1;
+#endif
+
+	psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS]     = 1;
+	psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS] = 1;
+
+	eError = OSLockCreateNoStats(&psProcessStats->hLock ,LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	psProcessStats->psMemoryStats = OSAllocZMemNoStats(sizeof(PVRSRV_MEMORY_STATS));
+	if (psProcessStats->psMemoryStats == NULL)
+	{
+		OSLockDestroyNoStats(psProcessStats->hLock);
+		goto e0;
+	}
+
+	psProcessStats->psMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_MEMORY;
+#endif
+
+#if defined(PVR_RI_DEBUG)
+	psProcessStats->psRIMemoryStats = OSAllocZMemNoStats(sizeof(PVRSRV_RI_MEMORY_STATS));
+	if (psProcessStats->psRIMemoryStats == NULL)
+	{
+		OSLockDestroyNoStats(psProcessStats->hLock);
+		OSFreeMemNoStats(psProcessStats->psMemoryStats);
+		goto e0;
+	}
+	psProcessStats->psRIMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_RIMEMORY;
+	psProcessStats->psRIMemoryStats->pid            = currentPid;
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+	psProcessStats->psCacheOpStats = OSAllocZMemNoStats(sizeof(PVRSRV_CACHEOP_STATS));
+	if (psProcessStats->psCacheOpStats == NULL)
+	{
+		OSLockDestroyNoStats(psProcessStats->hLock);
+		OSFreeMemNoStats(psProcessStats->psMemoryStats);
+		OSFreeMemNoStats(psProcessStats->psRIMemoryStats);
+		goto e0;
+	}
+	psProcessStats->psCacheOpStats->eStructureType = PVRSRV_STAT_STRUCTURE_CACHEOP;
+#endif
+
+	/* Add it to the live list... */
+	OSLockAcquire(g_psLinkedListLock);
+	_AddProcessStatsToFrontOfLiveList(psProcessStats);
+	OSLockRelease(g_psLinkedListLock);
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+	/* Create the process stat in the OS... */
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+	OSSNPrintf(psProcessStats->szFolderName, sizeof(psProcessStats->szFolderName),
+			   "%d_%s", currentPid, acFolderName);
+#else
+	OSSNPrintf(psProcessStats->szFolderName, sizeof(psProcessStats->szFolderName),
+			   "%d", currentPid);
+#endif
+	_CreatePIDOSStatisticEntries(psProcessStats, pvOSLivePidFolder);
+#endif
+
+	/* Done */
+	*phProcessStats = (IMG_HANDLE) psProcessStats;
+
+	return PVRSRV_OK;
+
+e0:
+	OSFreeMemNoStats(psProcessStats);
+	*phProcessStats = 0;
+	return PVRSRV_ERROR_OUT_OF_MEMORY;
+} /* PVRSRVStatsRegisterProcess */
+
+/*************************************************************************/ /*!
+@Function       PVRSRVStatsDeregisterProcess
+@Input          hProcessStats  Handle to the process returned when registered.
+@Description    Method for destroying the statistics module data.
+*/ /**************************************************************************/
+void
+PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats)
+{
+	IMG_BOOL    bMoveProcess = IMG_FALSE;
+
+	if (hProcessStats != 0)
+	{
+		PVRSRV_PROCESS_STATS*  psProcessStats = (PVRSRV_PROCESS_STATS*) hProcessStats;
+
+		/* Lower the reference count, if zero then move it to the dead list */
+		OSLockAcquire(g_psLinkedListLock);
+		if (psProcessStats->ui32RefCount > 0)
+		{
+			OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+			psProcessStats->ui32RefCount--;
+			psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount;
+
+#if !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+			if (psProcessStats->ui32RefCount == 0)
+			{
+				OSLockRelease(psProcessStats->hLock);
+				_MoveProcessToDeadList(psProcessStats);
+				bMoveProcess = IMG_TRUE;
+			}else
+#endif
+			{
+				OSLockRelease(psProcessStats->hLock);
+			}
+		}
+		OSLockRelease(g_psLinkedListLock);
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+		/* The OS calls need to be performed without g_psLinkedListLock */
+		if (bMoveProcess == IMG_TRUE)
+		{
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockAcquire(psProcessStats->hLock);
+#endif
+			_MoveProcessToDeadListDebugFS(psProcessStats);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockRelease(psProcessStats->hLock);
+#endif
+		}
+#endif
+
+		/* Check if the dead list needs to be reduced */
+		_CompressMemoryUsage();
+	}
+} /* PVRSRVStatsDeregisterProcess */
+
+void
+PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+							 void *pvCpuVAddr,
+							 IMG_CPU_PHYADDR sCpuPAddr,
+							 size_t uiBytes,
+							 void *pvPrivateData)
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+{
+	_PVRSRVStatsAddMemAllocRecord(eAllocType, pvCpuVAddr, sCpuPAddr, uiBytes, pvPrivateData, NULL, 0);
+}
+void
+_PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+							  void *pvCpuVAddr,
+							  IMG_CPU_PHYADDR sCpuPAddr,
+							  size_t uiBytes,
+							  void *pvPrivateData,
+							  void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine)
+#endif
+{
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	IMG_PID				   currentPid = OSGetCurrentClientProcessIDKM();
+	IMG_PID				   currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+	PVRSRV_DATA*		   psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_MEM_ALLOC_REC*  psRecord   = NULL;
+	PVRSRV_PROCESS_STATS*  psProcessStats;
+	PVRSRV_MEMORY_STATS*   psMemoryStats;
+	IMG_BOOL			   bResurrectProcess = IMG_FALSE;
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+		return;
+	}
+
+	/*
+	 *  To prevent a recursive loop, we make the memory allocations
+	 *  for our memstat records via OSAllocMemNoStats(), which does not try to
+	 *  create a memstat record entry..
+	 */
+
+	/* Allocate the memory record... */
+	psRecord = OSAllocZMemNoStats(sizeof(PVRSRV_MEM_ALLOC_REC));
+	if (psRecord == NULL)
+	{
+		return;
+	}
+
+	psRecord->eAllocType       = eAllocType;
+	psRecord->pvCpuVAddr       = pvCpuVAddr;
+	psRecord->sCpuPAddr.uiAddr = sCpuPAddr.uiAddr;
+	psRecord->uiBytes          = uiBytes;
+	psRecord->pvPrivateData    = pvPrivateData;
+
+	psRecord->pid = currentPid;
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+	psRecord->pvAllocdFromFile = pvAllocFromFile;
+	psRecord->ui32AllocdFromLine = ui32AllocFromLine;
+#endif
+
+	_increase_global_stat(eAllocType, uiBytes);
+	/* Lock while we find the correct process... */
+	OSLockAcquire(g_psLinkedListLock);
+
+	if (psPVRSRVData)
+	{
+		if ( (currentPid == psPVRSRVData->cleanupThreadPid) &&
+			   (currentCleanupPid != 0))
+		{
+			psProcessStats = _FindProcessStats(currentCleanupPid);
+		}
+		else
+		{
+			psProcessStats = _FindProcessStatsInLiveList(currentPid);
+			if (!psProcessStats)
+			{
+				psProcessStats = _FindProcessStatsInDeadList(currentPid);
+				bResurrectProcess = IMG_TRUE;
+			}
+		}
+	}
+	else
+	{
+		psProcessStats = _FindProcessStatsInLiveList(currentPid);
+		if (!psProcessStats)
+		{
+			psProcessStats = _FindProcessStatsInDeadList(currentPid);
+			bResurrectProcess = IMG_TRUE;
+		}
+	}
+
+	if (psProcessStats == NULL)
+	{
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+		PVRSRV_ERROR	eError;
+		IMG_CHAR				acFolderName[30];
+		IMG_CHAR				*pszProcName = OSGetCurrentProcessName();
+
+		strncpy(acFolderName, pszProcName, sizeof(acFolderName));
+		StripBadChars(acFolderName);
+
+		psProcessStats = OSAllocZMemNoStats(sizeof(PVRSRV_PROCESS_STATS));
+		if (psProcessStats == NULL)
+		{
+			OSLockRelease(g_psLinkedListLock);
+			return;
+		}
+
+		psProcessStats->eStructureType  = PVRSRV_STAT_STRUCTURE_PROCESS;
+		psProcessStats->pid             = currentPid;
+		psProcessStats->ui32RefCount    = 1;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSAtomicWrite(&psProcessStats->iMemRefCount, 1);
+#else
+		psProcessStats->ui32MemRefCount = 1;
+#endif
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS]     = 1;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS] = 1;
+
+		eError = OSLockCreateNoStats(&psProcessStats->hLock ,LOCK_TYPE_NONE);
+		if (eError != PVRSRV_OK)
+		{
+			goto e0;
+		}
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+		psProcessStats->psMemoryStats = OSAllocZMemNoStats(sizeof(PVRSRV_MEMORY_STATS));
+		if (psProcessStats->psMemoryStats == NULL)
+		{
+			OSLockRelease(g_psLinkedListLock);
+			OSLockDestroyNoStats(psProcessStats->hLock);
+			psProcessStats->hLock = NULL;
+			goto e0;
+		}
+
+		psProcessStats->psMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_MEMORY;
+#endif
+
+#if defined(PVR_RI_DEBUG)
+		psProcessStats->psRIMemoryStats = OSAllocZMemNoStats(sizeof(PVRSRV_RI_MEMORY_STATS));
+		if (psProcessStats->psRIMemoryStats == NULL)
+		{
+			OSFreeMemNoStats(psProcessStats->psMemoryStats);
+			OSLockDestroyNoStats(psProcessStats->hLock);
+			psProcessStats->hLock = NULL;
+			OSLockRelease(g_psLinkedListLock);
+			goto e0;
+		}
+
+		psProcessStats->psRIMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_RIMEMORY;
+		psProcessStats->psRIMemoryStats->pid            = currentPid;
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+		psProcessStats->psCacheOpStats = OSAllocZMemNoStats(sizeof(PVRSRV_CACHEOP_STATS));
+		if (psProcessStats->psCacheOpStats == NULL)
+		{
+			OSFreeMemNoStats(psProcessStats->psRIMemoryStats);
+			OSFreeMemNoStats(psProcessStats->psMemoryStats);
+			OSLockDestroyNoStats(psProcessStats->hLock);
+			OSLockRelease(g_psLinkedListLock);
+			psProcessStats->hLock = NULL;
+			goto e0;
+		}
+
+		psProcessStats->psCacheOpStats->eStructureType = PVRSRV_STAT_STRUCTURE_CACHEOP;
+#endif
+
+		OSLockRelease(g_psLinkedListLock);
+		/* Add it to the live list... */
+		_AddProcessStatsToFrontOfLiveList(psProcessStats);
+
+		/* Create the process stat in the OS... */
+		OSSNPrintf(psProcessStats->szFolderName, sizeof(psProcessStats->szFolderName),
+				   "%d_%s", currentPid, acFolderName);
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+		_CreatePIDOSStatisticEntries(psProcessStats, pvOSLivePidFolder);
+#endif
+#else  /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */
+		OSLockRelease(g_psLinkedListLock);
+#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */
+	}
+	else
+	{
+		OSLockRelease(g_psLinkedListLock);
+	}
+
+	if (psProcessStats == NULL)
+	{
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+		PVR_DPF((PVR_DBG_ERROR, "%s UNABLE TO CREATE process_stats entry for pid %d [%s] (" IMG_SIZE_FMTSPEC " bytes)", __FUNCTION__, currentPid, OSGetCurrentProcessName(), uiBytes));
+#endif
+		if (psRecord != NULL)
+		{
+			OSFreeMemNoStats(psRecord);
+		}
+		return;
+	}
+
+	OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+	psMemoryStats = psProcessStats->psMemoryStats;
+
+	/* Insert the memory record... */
+	if (psRecord != NULL)
+	{
+		List_PVRSRV_MEM_ALLOC_REC_Insert(&psMemoryStats->psMemoryRecords, psRecord);
+	}
+
+	/* Update the memory watermarks... */
+	switch (eAllocType)
+	{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+		{
+			if (psRecord != NULL)
+			{
+				if (pvCpuVAddr == NULL)
+				{
+					break;
+				}
+				psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes);
+			psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+		{
+			if (psRecord != NULL)
+			{
+				if (pvCpuVAddr == NULL)
+				{
+					break;
+				}
+				psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes);
+			psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+		}
+		break;
+#else
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+		break;
+#endif
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+		{
+			if (psRecord != NULL)
+			{
+				if (pvCpuVAddr == NULL)
+				{
+					break;
+				}
+				psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes);
+			psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+		{
+			if (psRecord != NULL)
+			{
+				if (pvCpuVAddr == NULL)
+				{
+					break;
+				}
+				psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes);
+			psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+		{
+			if (psRecord != NULL)
+			{
+				psRecord->ui64Key = sCpuPAddr.uiAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes);
+			psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+		{
+			if (psRecord != NULL)
+			{
+				if (pvCpuVAddr == NULL)
+				{
+					break;
+				}
+				psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes);
+			psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+		{
+			if (psRecord != NULL)
+			{
+				psRecord->ui64Key = sCpuPAddr.uiAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes);
+			psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+		{
+			if (psRecord != NULL)
+			{
+				psRecord->ui64Key = sCpuPAddr.uiAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes);
+			psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+		{
+			if (psRecord != NULL)
+			{
+				if (pvCpuVAddr == NULL)
+				{
+					break;
+				}
+				psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes);
+			psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+		}
+		break;
+
+		default:
+		{
+			PVR_ASSERT(0);
+		}
+		break;
+	}
+	OSLockRelease(psProcessStats->hLock);
+	if (bResurrectProcess)
+	{
+		/* Move process from dead list to live list */
+		OSLockAcquire(g_psLinkedListLock);
+		_MoveProcessToLiveList(psProcessStats);
+		OSLockRelease(g_psLinkedListLock);
+#if defined(ENABLE_DEBUGFS_PIDS)
+		_MoveProcessToLiveListDebugFS(psProcessStats);
+#endif
+	}
+	return;
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+e0:
+	OSFreeMemNoStats(psRecord);
+	OSFreeMemNoStats(psProcessStats);
+	return;
+#endif
+#endif
+} /* PVRSRVStatsAddMemAllocRecord */
+
+void
+PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+								IMG_UINT64 ui64Key)
+{
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	IMG_PID				   currentPid	  = OSGetCurrentClientProcessIDKM();
+	IMG_PID				   currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+	PVRSRV_DATA*		   psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_PROCESS_STATS*  psProcessStats = NULL;
+	PVRSRV_MEMORY_STATS*   psMemoryStats  = NULL;
+	PVRSRV_MEM_ALLOC_REC*  psRecord		  = NULL;
+	IMG_BOOL			   bFound	      = IMG_FALSE;
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+		return;
+	}
+
+	/* Lock while we find the correct process and remove this record... */
+	OSLockAcquire(g_psLinkedListLock);
+
+	if (psPVRSRVData)
+	{
+		if ( (currentPid == psPVRSRVData->cleanupThreadPid) &&
+			 (currentCleanupPid != 0))
+		{
+			psProcessStats = _FindProcessStats(currentCleanupPid);
+		}
+		else
+		{
+			psProcessStats = _FindProcessStats(currentPid);
+		}
+	}
+	else
+	{
+		psProcessStats = _FindProcessStats(currentPid);
+	}
+	if (psProcessStats != NULL)
+	{
+		psMemoryStats = psProcessStats->psMemoryStats;
+		psRecord      = psMemoryStats->psMemoryRecords;
+		while (psRecord != NULL)
+		{
+			if (psRecord->ui64Key == ui64Key  &&  psRecord->eAllocType == eAllocType)
+			{
+				bFound = IMG_TRUE;
+				break;
+			}
+
+			psRecord = psRecord->psNext;
+		}
+	}
+
+	/* If not found, we need to do a full search in case it was allocated to a different PID... */
+	if (!bFound)
+	{
+		PVRSRV_PROCESS_STATS*  psProcessStatsAlreadyChecked = psProcessStats;
+
+		/* Search all live lists first... */
+		psProcessStats = g_psLiveList;
+		while (psProcessStats != NULL)
+		{
+			if (psProcessStats != psProcessStatsAlreadyChecked)
+			{
+				psMemoryStats = psProcessStats->psMemoryStats;
+				psRecord      = psMemoryStats->psMemoryRecords;
+				while (psRecord != NULL)
+				{
+					if (psRecord->ui64Key == ui64Key  &&  psRecord->eAllocType == eAllocType)
+					{
+						bFound = IMG_TRUE;
+						break;
+					}
+
+					psRecord = psRecord->psNext;
+				}
+			}
+
+			if (bFound)
+			{
+				break;
+			}
+
+			psProcessStats = psProcessStats->psNext;
+		}
+
+		/* If not found, then search all dead lists next... */
+		if (!bFound)
+		{
+			psProcessStats = g_psDeadList;
+			while (psProcessStats != NULL)
+			{
+				if (psProcessStats != psProcessStatsAlreadyChecked)
+				{
+					psMemoryStats = psProcessStats->psMemoryStats;
+					psRecord      = psMemoryStats->psMemoryRecords;
+					while (psRecord != NULL)
+					{
+						if (psRecord->ui64Key == ui64Key  &&  psRecord->eAllocType == eAllocType)
+						{
+							bFound = IMG_TRUE;
+							break;
+						}
+
+						psRecord = psRecord->psNext;
+					}
+				}
+
+				if (bFound)
+				{
+					break;
+				}
+
+				psProcessStats = psProcessStats->psNext;
+			}
+		}
+	}
+
+	/* Update the watermark and remove this record...*/
+	if (bFound)
+	{
+		_decrease_global_stat(eAllocType, psRecord->uiBytes);
+
+		OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+	
+		_DecreaseProcStatValue(eAllocType,
+		                       psProcessStats,
+		                       psRecord->uiBytes);
+
+		List_PVRSRV_MEM_ALLOC_REC_Remove(psRecord);
+		OSLockRelease(psProcessStats->hLock);
+		OSLockRelease(g_psLinkedListLock);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+		/* If all stats are now zero, remove the entry for this thread */
+		if (psProcessStats->ui32StatAllocFlags == 0)
+		{
+			OSLockAcquire(g_psLinkedListLock);
+			_MoveProcessToDeadList(psProcessStats);
+			OSLockRelease(g_psLinkedListLock);
+#if defined(ENABLE_DEBUGFS_PIDS)
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockAcquire(psProcessStats->hLock);
+#endif
+			_MoveProcessToDeadListDebugFS(psProcessStats);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockRelease(psProcessStats->hLock);
+#endif
+#endif
+
+			/* Check if the dead list needs to be reduced */
+			_CompressMemoryUsage();
+		}
+#endif
+		/*
+		 * Free the record outside the lock so we don't deadlock and so we
+		 * reduce the time the lock is held.
+		 */
+		OSFreeMemNoStats(psRecord);
+	}
+	else
+	{
+		OSLockRelease(g_psLinkedListLock);
+	}
+
+#else
+PVR_UNREFERENCED_PARAMETER(eAllocType);
+PVR_UNREFERENCED_PARAMETER(ui64Key);
+#endif
+} /* PVRSRVStatsRemoveMemAllocRecord */
+
+void
+PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+									size_t uiBytes,
+									IMG_UINT64 uiCpuVAddr)
+{
+	IMG_BOOL bRes = IMG_FALSE;
+	_PVR_STATS_TRACKING_HASH_ENTRY *psNewTrackingHashEntry = NULL;
+
+	if (!bProcessStatsInitialised || (gpsSizeTrackingHashTable == NULL) )
+	{
+		return;
+	}
+
+	/* Alloc untracked memory for the new hash table entry */
+	psNewTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)OSAllocMemNoStats(sizeof(*psNewTrackingHashEntry));
+	if (psNewTrackingHashEntry)
+	{
+		/* Fill-in the size of the allocation and PID of the allocating process */
+		psNewTrackingHashEntry->uiSizeInBytes = uiBytes;
+		psNewTrackingHashEntry->uiPid = OSGetCurrentClientProcessIDKM();
+		OSLockAcquire(gpsSizeTrackingHashTableLock);
+		/* Insert address of the new struct into the hash table */
+		bRes = HASH_Insert(gpsSizeTrackingHashTable, uiCpuVAddr, (uintptr_t)psNewTrackingHashEntry);
+		OSLockRelease(gpsSizeTrackingHashTableLock);
+	}
+
+	if (psNewTrackingHashEntry)
+	{
+		if (bRes)
+		{
+			PVRSRVStatsIncrMemAllocStat(eAllocType, uiBytes);
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "*** %s : @ line %d HASH_Insert() failed!!", __FUNCTION__, __LINE__));
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "*** %s : @ line %d Failed to alloc memory for psNewTrackingHashEntry!!", __FUNCTION__, __LINE__));
+	}
+}
+
+void
+PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+									size_t uiBytes)
+{
+	IMG_PID				   currentPid = OSGetCurrentClientProcessIDKM();
+	IMG_PID				   currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+	PVRSRV_DATA* 		   psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_PROCESS_STATS*  psProcessStats = NULL;
+	IMG_BOOL			   bResurrectProcess = IMG_FALSE;
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+		return;
+	}
+
+	_increase_global_stat(eAllocType, uiBytes);
+	OSLockAcquire(g_psLinkedListLock);
+	if (psPVRSRVData)
+	{
+		if ( (currentPid == psPVRSRVData->cleanupThreadPid) &&
+			 (currentCleanupPid != 0))
+		{
+			psProcessStats = _FindProcessStats(currentCleanupPid);
+		}
+		else
+		{
+			psProcessStats = _FindProcessStatsInLiveList(currentPid);
+			if (!psProcessStats)
+			{
+				psProcessStats = _FindProcessStatsInDeadList(currentPid);
+				bResurrectProcess = IMG_TRUE;
+			}
+		}
+	}
+	else
+	{
+		psProcessStats = _FindProcessStatsInLiveList(currentPid);
+		if (!psProcessStats)
+		{
+			psProcessStats = _FindProcessStatsInDeadList(currentPid);
+			bResurrectProcess = IMG_TRUE;
+		}
+	}
+
+	if (psProcessStats == NULL)
+	{
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+		PVRSRV_ERROR eError;
+		IMG_CHAR				acFolderName[30];
+		IMG_CHAR				*pszProcName = OSGetCurrentProcessName();
+
+		strncpy(acFolderName, pszProcName, sizeof(acFolderName));
+		StripBadChars(acFolderName);
+
+		if (bProcessStatsInitialised)
+		{
+			psProcessStats = OSAllocZMemNoStats(sizeof(PVRSRV_PROCESS_STATS));
+			if (psProcessStats == NULL)
+			{
+				return;
+			}
+
+			psProcessStats->eStructureType  = PVRSRV_STAT_STRUCTURE_PROCESS;
+			psProcessStats->pid             = currentPid;
+			psProcessStats->ui32RefCount    = 1;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSAtomicWrite(&psProcessStats->iMemRefCount, 1);
+#else
+			psProcessStats->ui32MemRefCount = 1;
+#endif
+			psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS]     = 1;
+			psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS] = 1;
+
+			eError = OSLockCreateNoStats(&psProcessStats->hLock ,LOCK_TYPE_NONE);
+			if (eError != PVRSRV_OK)
+			{
+				OSFreeMemNoStats(psProcessStats);
+				return;
+			}
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+			psProcessStats->psMemoryStats = OSAllocZMemNoStats(sizeof(PVRSRV_MEMORY_STATS));
+			if (psProcessStats->psMemoryStats == NULL)
+			{
+				OSLockDestroyNoStats(psProcessStats->hLock);
+				OSFreeMemNoStats(psProcessStats);
+				return;
+			}
+			psProcessStats->psMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_MEMORY;
+#endif
+
+#if defined(PVR_RI_DEBUG)
+			psProcessStats->psRIMemoryStats = OSAllocZMemNoStats(sizeof(PVRSRV_RI_MEMORY_STATS));
+			if (psProcessStats->psRIMemoryStats == NULL)
+			{
+				OSFreeMemNoStats(psProcessStats->psMemoryStats);
+				OSLockDestroyNoStats(psProcessStats->hLock);
+				OSFreeMemNoStats(psProcessStats);
+				return;
+			}
+			psProcessStats->psRIMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_RIMEMORY;
+			psProcessStats->psRIMemoryStats->pid            = currentPid;
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+			psProcessStats->psCacheOpStats = OSAllocZMemNoStats(sizeof(PVRSRV_CACHEOP_STATS));
+			if (psProcessStats->psCacheOpStats == NULL)
+			{
+				OSFreeMemNoStats(psProcessStats->psMemoryStats);
+				OSFreeMemNoStats(psProcessStats->psRIMemoryStats);
+				OSLockDestroyNoStats(psProcessStats->hLock);
+				OSFreeMemNoStats(psProcessStats);
+				return;
+			}
+			psProcessStats->psCacheOpStats->eStructureType = PVRSRV_STAT_STRUCTURE_CACHEOP;
+#endif
+
+			/* Add it to the live list... */
+			_AddProcessStatsToFrontOfLiveList(psProcessStats);
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+			/* Create the process stat in the OS... */
+			OSSNPrintf(psProcessStats->szFolderName, sizeof(psProcessStats->szFolderName),
+					"%d_%s", currentPid, acFolderName);
+
+			_CreatePIDOSStatisticEntries(psProcessStats, pvOSLivePidFolder);
+#endif
+		}
+#else
+		OSLockRelease(g_psLinkedListLock);
+#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */
+
+	}
+
+	if (psProcessStats != NULL)
+	{
+		OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+		/*Release the list lock as soon as we acquire the process lock,
+		 * this ensures if the process is in deadlist the entry cannot be deleted or modified */
+		OSLockRelease(g_psLinkedListLock);
+		/* Update the memory watermarks... */
+		switch (eAllocType)
+		{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+			case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes);
+				psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes);
+				psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+			break;
+#else
+			case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+			case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+			break;
+#endif
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes);
+				psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes);
+				psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes);
+				psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes);
+				psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes);
+				psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes);
+				psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes);
+				psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+			break;
+
+			default:
+			{
+				PVR_ASSERT(0);
+			}
+			break;
+		}
+		OSLockRelease(psProcessStats->hLock);
+
+		if (bResurrectProcess)
+		{
+			/* Move process from dead list to live list */
+			OSLockAcquire(g_psLinkedListLock);
+			_MoveProcessToLiveList(psProcessStats);
+			OSLockRelease(g_psLinkedListLock);
+#if defined(ENABLE_DEBUGFS_PIDS)
+			_MoveProcessToLiveListDebugFS(psProcessStats);
+#endif
+		}
+    }
+}
+
+static void
+_DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                       PVRSRV_PROCESS_STATS* psProcessStats,
+                       IMG_UINT32 uiBytes)
+{
+	switch (eAllocType)
+	{
+	#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+		{
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes);
+			if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] == 0 )
+			{
+				psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+		{
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes);
+			if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC] == 0 )
+			{
+				psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+		}
+		break;
+	#else
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+		break;
+	#endif
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+		{
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes);
+			if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] == 0 )
+			{
+				psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+		{
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes);
+			if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA] == 0 )
+			{
+				psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+		{
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes);
+			if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] == 0 )
+			{
+				psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+		{
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes);
+			if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA] == 0 )
+			{
+				psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+		{
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes);
+			if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] == 0 )
+			{
+				psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+		{
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes);
+			if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES] == 0 )
+			{
+				psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+		{
+			DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes);
+			if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES] == 0 )
+			{
+				psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+			}
+		}
+		break;
+
+		default:
+		{
+			PVR_ASSERT(0);
+		}
+		break;
+	}
+
+}
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+void RawProcessStatsPrintElements(void *pvFile,
+                                  void *pvStatPtr,
+                                  OS_STATS_PRINTF_FUNC *pfnOSStatsPrintf)
+{
+	PVRSRV_PROCESS_STATS *psProcessStats;
+
+	if (pfnOSStatsPrintf == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: pfnOSStatsPrintf not set", __func__));
+		return;
+	}
+
+	pfnOSStatsPrintf(pvFile, "%s,%s,%s,%s,%s,%s\n",
+	                 "PID",
+	                 "MemoryUsageKMalloc",           // PVRSRV_PROCESS_STAT_TYPE_KMALLOC
+	                 "MemoryUsageAllocPTMemoryUMA",  // PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA
+	                 "MemoryUsageAllocPTMemoryLMA",  // PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA
+	                 "MemoryUsageAllocGPUMemLMA",    // PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES
+	                 "MemoryUsageAllocGPUMemUMA"     // PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES
+	                 );
+
+	OSLockAcquire(g_psLinkedListLock);
+
+	psProcessStats = g_psLiveList;
+
+	while (psProcessStats != NULL)
+	{
+		pfnOSStatsPrintf(pvFile, "%d,%d,%d,%d,%d,%d\n",
+		                 psProcessStats->pid,
+		                 psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC],
+		                 psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA],
+		                 psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA],
+		                 psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES],
+		                 psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES]
+		                 );
+
+		psProcessStats = psProcessStats->psNext;
+	}
+
+	OSLockRelease(g_psLinkedListLock);
+} /* RawProcessStatsPrintElements */
+#endif
+
+void
+PVRSRVStatsDecrMemKAllocStat(size_t uiBytes,
+                             IMG_PID decrPID)
+{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+	PVRSRV_PROCESS_STATS*  psProcessStats;
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+		return;
+	}
+
+	_decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, uiBytes);
+
+	OSLockAcquire(g_psLinkedListLock);
+
+	psProcessStats = _FindProcessStats(decrPID);
+
+	if (psProcessStats != NULL)
+	{
+		/* Decrement the kmalloc memory stat... */
+		DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes);
+	}
+
+	OSLockRelease(g_psLinkedListLock);
+#endif
+}
+
+static void
+_StatsDecrMemTrackedStat(_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry,
+                        PVRSRV_MEM_ALLOC_TYPE eAllocType)
+{
+	PVRSRV_PROCESS_STATS*  psProcessStats;
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+		return;
+	}
+
+	_decrease_global_stat(eAllocType, psTrackingHashEntry->uiSizeInBytes);
+
+	OSLockAcquire(g_psLinkedListLock);
+
+	psProcessStats = _FindProcessStats(psTrackingHashEntry->uiPid);
+
+	if (psProcessStats != NULL)
+	{
+		/* Decrement the memory stat... */
+		_DecreaseProcStatValue(eAllocType,
+		                       psProcessStats,
+		                       psTrackingHashEntry->uiSizeInBytes);
+	}
+
+	OSLockRelease(g_psLinkedListLock);
+}
+
+void
+PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+									  IMG_UINT64 uiCpuVAddr)
+{
+	_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry = NULL;
+
+	if (!bProcessStatsInitialised || (gpsSizeTrackingHashTable == NULL) )
+	{
+		return;
+	}
+
+	OSLockAcquire(gpsSizeTrackingHashTableLock);
+	psTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)HASH_Remove(gpsSizeTrackingHashTable, uiCpuVAddr);
+	OSLockRelease(gpsSizeTrackingHashTableLock);
+	if (psTrackingHashEntry)
+	{
+		_StatsDecrMemTrackedStat(psTrackingHashEntry, eAllocType);
+		OSFreeMemNoStats(psTrackingHashEntry);
+	}
+}
+
+void
+PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+							size_t uiBytes)
+{
+	IMG_PID				   currentPid = OSGetCurrentClientProcessIDKM();
+	IMG_PID				   currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+	PVRSRV_DATA* 		   psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_PROCESS_STATS*  psProcessStats = NULL;
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+		return;
+	}
+
+	_decrease_global_stat(eAllocType, uiBytes);
+
+	OSLockAcquire(g_psLinkedListLock);
+	if (psPVRSRVData)
+	{
+		if ( (currentPid == psPVRSRVData->cleanupThreadPid) &&
+			 (currentCleanupPid != 0))
+		{
+			psProcessStats = _FindProcessStats(currentCleanupPid);
+		}
+		else
+		{
+			psProcessStats = _FindProcessStats(currentPid);
+		}
+	}
+	else
+	{
+		psProcessStats = _FindProcessStats(currentPid);
+	}
+
+
+	if (psProcessStats != NULL)
+	{
+		OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+		/*Release the list lock as soon as we acquire the process lock,
+		 * this ensures if the process is in deadlist the entry cannot be deleted or modified */
+		OSLockRelease(g_psLinkedListLock);
+		/* Update the memory watermarks... */
+		_DecreaseProcStatValue(eAllocType,
+		                       psProcessStats,
+		                       uiBytes);
+		OSLockRelease(psProcessStats->hLock);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+		/* If all stats are now zero, remove the entry for this thread */
+		if (psProcessStats->ui32StatAllocFlags == 0)
+		{
+			OSLockAcquire(g_psLinkedListLock);
+			_MoveProcessToDeadList(psProcessStats);
+			OSLockRelease(g_psLinkedListLock);
+#if defined(ENABLE_DEBUGFS_PIDS)
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockAcquire(psProcessStats->hLock);
+#endif
+			_MoveProcessToDeadListDebugFS(psProcessStats);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockRelease(psProcessStats->hLock);
+#endif
+#endif
+
+			/* Check if the dead list needs to be reduced */
+			_CompressMemoryUsage();
+		}
+#endif
+	}else{
+		OSLockRelease(g_psLinkedListLock);
+	}
+}
+
+/* For now we do not want to expose the global stats API
+ * so we wrap it into this specific function for pooled pages.
+ * As soon as we need to modify the global stats directly somewhere else
+ * we want to replace these functions with more general ones.
+ */
+void
+PVRSRVStatsIncrMemAllocPoolStat(size_t uiBytes)
+{
+	_increase_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes);
+}
+
+void
+PVRSRVStatsDecrMemAllocPoolStat(size_t uiBytes)
+{
+	_decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes);
+}
+
+void
+PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders,
+									IMG_UINT32 ui32TotalNumOutOfMemory,
+									IMG_UINT32 ui32NumTAStores,
+									IMG_UINT32 ui32Num3DStores,
+									IMG_UINT32 ui32NumSHStores,
+									IMG_UINT32 ui32NumCDMStores,
+									IMG_PID pidOwner)
+{
+	IMG_PID	pidCurrent = pidOwner;
+
+	PVRSRV_PROCESS_STATS*  psProcessStats;
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+		return;
+	}
+
+	/* Lock while we find the correct process and update the record... */
+	OSLockAcquire(g_psLinkedListLock);
+
+	psProcessStats = _FindProcessStats(pidCurrent);
+	if (psProcessStats != NULL)
+	{
+		OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_PRS]       += ui32TotalNumPartialRenders;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_OOMS]      += ui32TotalNumOutOfMemory;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES] += ui32NumTAStores;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES] += ui32Num3DStores;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_SH_STORES] += ui32NumSHStores;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES]+= ui32NumCDMStores;
+		OSLockRelease(psProcessStats->hLock);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING, "PVRSRVStatsUpdateRenderContextStats: Null process. Pid=%d", pidCurrent));
+	}
+
+	OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateRenderContextStats */
+
+void
+PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp,
+							   IMG_UINT32 ui32NumReqByFW,
+							   IMG_PID owner)
+{
+	IMG_PID				   currentPid = (owner==0)?OSGetCurrentClientProcessIDKM():owner;
+	PVRSRV_PROCESS_STATS*  psProcessStats;
+
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+		return;
+	}
+
+	/* Lock while we find the correct process and update the record... */
+	OSLockAcquire(g_psLinkedListLock);
+
+	psProcessStats = _FindProcessStats(currentPid);
+	if (psProcessStats != NULL)
+	{
+		OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP] += ui32NumReqByApp;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW]  += ui32NumReqByFW;
+		OSLockRelease(psProcessStats->hLock);
+	}
+
+	OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateZSBufferStats */
+
+void
+PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp,
+							   IMG_UINT32 ui32NumGrowReqByFW,
+							   IMG_UINT32 ui32InitFLPages,
+							   IMG_UINT32 ui32NumHighPages,
+							   IMG_PID ownerPid)
+{
+	IMG_PID				   currentPid = (ownerPid!=0)?ownerPid:OSGetCurrentClientProcessIDKM();
+	PVRSRV_PROCESS_STATS*  psProcessStats;
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+		return;
+	}
+
+	/* Lock while we find the correct process and update the record... */
+	OSLockAcquire(g_psLinkedListLock);
+
+	psProcessStats = _FindProcessStats(currentPid);
+
+	if (psProcessStats != NULL)
+	{
+		/* Avoid signed / unsigned mismatch which is flagged by some compilers */
+		IMG_INT32 a, b;
+
+		OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP] += ui32NumGrowReqByApp;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW]  += ui32NumGrowReqByFW;
+
+		a=psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT];
+		b=(IMG_INT32)(ui32InitFLPages);
+		UPDATE_MAX_VALUE(a, b);
+
+
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT]=a;
+		ui32InitFLPages=(IMG_UINT32)b;
+
+		a=psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES];
+		b=(IMG_INT32)ui32NumHighPages;
+
+		UPDATE_MAX_VALUE(a, b);
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT]=a;
+		ui32InitFLPages=(IMG_UINT32)b;
+		OSLockRelease(psProcessStats->hLock);
+
+	}
+
+	OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateFreelistStats */
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+/*************************************************************************/ /*!
+@Function       ProcessStatsPrintElements
+@Description    Prints all elements for this process statistic record.
+@Input          pvStatPtr         Pointer to statistics structure.
+@Input          pfnOSStatsPrintf  Printf function to use for output.
+*/ /**************************************************************************/
+void
+ProcessStatsPrintElements(void *pvFile,
+						  void *pvStatPtr,
+						  OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	PVRSRV_STAT_STRUCTURE_TYPE*  peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+	PVRSRV_PROCESS_STATS*	     psProcessStats  = (PVRSRV_PROCESS_STATS*) pvStatPtr;
+	IMG_UINT32					 ui32StatNumber = 0;
+
+	if (peStructureType == NULL  ||  *peStructureType != PVRSRV_STAT_STRUCTURE_PROCESS)
+	{
+		PVR_ASSERT(peStructureType != NULL  &&  *peStructureType == PVRSRV_STAT_STRUCTURE_PROCESS);
+		return;
+	}
+
+	if (pfnOSStatsPrintf == NULL)
+	{
+		return;
+	}
+
+	/* Loop through all the values and print them... */
+	while (ui32StatNumber < PVRSRV_PROCESS_STAT_TYPE_COUNT)
+	{
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		if (OSAtomicRead(&psProcessStats->iMemRefCount) > 0)
+#else
+		if (psProcessStats->ui32MemRefCount > 0)
+#endif
+		{
+			pfnOSStatsPrintf(pvFile, pszProcessStatFmt[ui32StatNumber], psProcessStats->i32StatValue[ui32StatNumber]);
+		}
+		else
+		{
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			PVR_DPF((PVR_DBG_ERROR, "%s: Called with psProcessStats->iMemRefCount=%d", __FUNCTION__, OSAtomicRead(&psProcessStats->iMemRefCount)));
+#else
+			PVR_DPF((PVR_DBG_ERROR, "%s: Called with psProcessStats->ui32MemRefCount=%d", __FUNCTION__, psProcessStats->ui32MemRefCount));
+#endif
+		}
+		ui32StatNumber++;
+	}
+} /* ProcessStatsPrintElements */
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+void
+PVRSRVStatsUpdateCacheOpStats(PVRSRV_CACHE_OP uiCacheOp,
+							IMG_UINT32 ui32OpSeqNum,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEV_PHYADDR sDevPAddr,
+							IMG_UINT32 eFenceOpType,
+#endif
+							IMG_DEVMEM_SIZE_T uiOffset,
+							IMG_DEVMEM_SIZE_T uiSize,
+							IMG_UINT64 ui64ExecuteTime,
+							IMG_BOOL bRangeBasedFlush,
+							IMG_BOOL bUserModeFlush,
+							IMG_BOOL bIsFence,
+							IMG_PID ownerPid)
+{
+	IMG_PID				   currentPid = (ownerPid!=0)?ownerPid:OSGetCurrentClientProcessIDKM();
+	PVRSRV_PROCESS_STATS*  psProcessStats;
+
+	/* Don't do anything if we are not initialised or we are shutting down! */
+	if (!bProcessStatsInitialised)
+	{
+		return;
+	}
+
+	/* Lock while we find the correct process and update the record... */
+	OSLockAcquire(g_psLinkedListLock);
+
+	psProcessStats = _FindProcessStats(currentPid);
+
+	if (psProcessStats != NULL)
+	{
+		IMG_INT32 Idx;
+
+		OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+		/* Look-up next buffer write index */
+		Idx = psProcessStats->uiCacheOpWriteIndex;
+		psProcessStats->uiCacheOpWriteIndex = INCREMENT_CACHEOP_STAT_IDX_WRAP(Idx);
+
+		/* Store all CacheOp meta-data */
+		psProcessStats->asCacheOp[Idx].uiCacheOp = uiCacheOp;
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+		psProcessStats->asCacheOp[Idx].sDevVAddr = sDevVAddr;
+		psProcessStats->asCacheOp[Idx].sDevPAddr = sDevPAddr;
+		psProcessStats->asCacheOp[Idx].eFenceOpType = eFenceOpType;
+#endif
+		psProcessStats->asCacheOp[Idx].uiOffset = uiOffset;
+		psProcessStats->asCacheOp[Idx].uiSize = uiSize;
+		psProcessStats->asCacheOp[Idx].bRangeBasedFlush = bRangeBasedFlush;
+		psProcessStats->asCacheOp[Idx].bUserModeFlush = bUserModeFlush;
+		psProcessStats->asCacheOp[Idx].ui64ExecuteTime = ui64ExecuteTime;
+		psProcessStats->asCacheOp[Idx].ui32OpSeqNum = ui32OpSeqNum;
+		psProcessStats->asCacheOp[Idx].bIsFence = bIsFence;
+
+		OSLockRelease(psProcessStats->hLock);
+	}
+
+	OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateCacheOpStats */
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+/*************************************************************************/ /*!
+@Function       CacheOpStatsPrintElements
+@Description    Prints all elements for this process statistic CacheOp record.
+@Input          pvStatPtr         Pointer to statistics structure.
+@Input          pfnOSStatsPrintf  Printf function to use for output.
+*/ /**************************************************************************/
+void
+CacheOpStatsPrintElements(void *pvFile,
+						  void *pvStatPtr,
+						  OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	PVRSRV_STAT_STRUCTURE_TYPE*  peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+	PVRSRV_PROCESS_STATS*		 psProcessStats  = (PVRSRV_PROCESS_STATS*) pvStatPtr;
+	IMG_CHAR					 *pszCacheOpType, *pszFlushType, *pszFlushMode;
+	IMG_INT32 					 i32WriteIdx, i32ReadIdx;
+
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+	#define CACHEOP_RI_PRINTF_HEADER \
+		"%-10s %-10s %-5s %-16s %-16s %-10s %-10s %-12s %-12s\n"
+	#define CACHEOP_RI_PRINTF_FENCE	 \
+		"%-10s %-10s %-5s %-16s %-16s %-10s %-10s %-12llu 0x%-10x\n"
+	#define CACHEOP_RI_PRINTF		\
+		"%-10s %-10s %-5s 0x%-14llx 0x%-14llx 0x%-8llx 0x%-8llx %-12llu 0x%-10x\n"
+#else
+	#define CACHEOP_PRINTF_HEADER	\
+		"%-10s %-10s %-5s %-10s %-10s %-12s %-12s\n"
+	#define CACHEOP_PRINTF_FENCE	 \
+		"%-10s %-10s %-5s %-10s %-10s %-12llu 0x%-10x\n"
+	#define CACHEOP_PRINTF		 	\
+		"%-10s %-10s %-5s 0x%-8llx 0x%-8llx %-12llu 0x%-10x\n"
+#endif
+
+	if (peStructureType == NULL  ||
+		*peStructureType != PVRSRV_STAT_STRUCTURE_PROCESS ||
+		psProcessStats->psCacheOpStats->eStructureType != PVRSRV_STAT_STRUCTURE_CACHEOP)
+	{
+		PVR_ASSERT(peStructureType != NULL);
+		PVR_ASSERT(*peStructureType == PVRSRV_STAT_STRUCTURE_PROCESS);
+		PVR_ASSERT(psProcessStats->psCacheOpStats->eStructureType == PVRSRV_STAT_STRUCTURE_CACHEOP);
+		return;
+	}
+
+	if (pfnOSStatsPrintf == NULL)
+	{
+		return;
+	}
+
+	/* File header info */
+	pfnOSStatsPrintf(pvFile,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+					CACHEOP_RI_PRINTF_HEADER,
+#else
+					CACHEOP_PRINTF_HEADER,
+#endif
+					"CacheOp",
+					"Type",
+					"Mode",
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+					"DevVAddr",
+					"DevPAddr",
+#endif
+					"Offset",
+					"Size",
+					"Time (us)",
+					"SeqNo");
+
+	/* Take a snapshot of write index, read backwards in buffer 
+	   and wrap round at boundary */
+	i32WriteIdx = psProcessStats->uiCacheOpWriteIndex;
+	for (i32ReadIdx = DECREMENT_CACHEOP_STAT_IDX_WRAP(i32WriteIdx);
+		 i32ReadIdx != i32WriteIdx;
+		 i32ReadIdx = DECREMENT_CACHEOP_STAT_IDX_WRAP(i32ReadIdx))
+	{
+		IMG_UINT64 ui64ExecuteTime;
+
+		if (! psProcessStats->asCacheOp[i32ReadIdx].ui32OpSeqNum)
+		{
+			break;
+		}
+
+		ui64ExecuteTime = psProcessStats->asCacheOp[i32ReadIdx].ui64ExecuteTime;
+
+		if (psProcessStats->asCacheOp[i32ReadIdx].bIsFence)
+		{
+			IMG_CHAR *pszFenceType = "";
+			pszCacheOpType = "Fence";
+
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+			switch (psProcessStats->asCacheOp[i32ReadIdx].eFenceOpType)
+			{
+				case RGXFWIF_DM_GP:
+					pszFenceType = "GP";
+					break;
+
+				case RGXFWIF_DM_TDM:
+					/* Also case RGXFWIF_DM_2D: */
+					pszFenceType = "TDM/2D";
+					break;
+	
+				case RGXFWIF_DM_TA:
+					pszFenceType = "TA";
+					break;
+
+				case RGXFWIF_DM_3D:
+					pszFenceType = "3D";
+					break;
+
+				case RGXFWIF_DM_CDM:
+					pszFenceType = "CDM";
+					break;
+
+				case RGXFWIF_DM_RTU:
+					pszFenceType = "RTU";
+					break;
+	
+				case RGXFWIF_DM_SHG:
+					pszFenceType = "SHG";
+					break;
+
+				default:
+					PVR_ASSERT(0);
+					break;
+			}
+#endif
+
+			pfnOSStatsPrintf(pvFile,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+							CACHEOP_RI_PRINTF_FENCE,
+#else
+							CACHEOP_PRINTF_FENCE,
+#endif
+							pszCacheOpType,
+							pszFenceType,
+							"",
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+							"",
+							"",
+#endif
+							"",
+							"",
+							ui64ExecuteTime,
+							psProcessStats->asCacheOp[i32ReadIdx].ui32OpSeqNum);
+		}
+		else
+		{
+			if (psProcessStats->asCacheOp[i32ReadIdx].bRangeBasedFlush)
+			{
+				IMG_DEVMEM_SIZE_T ui64NumOfPages;
+	
+				ui64NumOfPages = psProcessStats->asCacheOp[i32ReadIdx].uiSize >> OSGetPageShift();
+				if (ui64NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC)
+				{
+					pszFlushType = "RBF.Fast";
+				}
+				else
+				{
+					pszFlushType = "RBF.Slow";
+				}
+			}
+			else
+			{
+				pszFlushType = "GF";
+			}
+
+			if (psProcessStats->asCacheOp[i32ReadIdx].bUserModeFlush)
+			{
+				pszFlushMode = "UM";
+			}
+			else
+			{
+				pszFlushMode = "KM";
+			}
+
+			switch (psProcessStats->asCacheOp[i32ReadIdx].uiCacheOp)
+			{
+				case PVRSRV_CACHE_OP_NONE:
+					pszCacheOpType = "None";
+					break;
+				case PVRSRV_CACHE_OP_CLEAN:
+					pszCacheOpType = "Clean";
+					break;
+				case PVRSRV_CACHE_OP_INVALIDATE:
+					pszCacheOpType = "Invalidate";
+					break;
+				case PVRSRV_CACHE_OP_FLUSH:
+					pszCacheOpType = "Flush";
+					break;
+				default:
+					pszCacheOpType = "Unknown";
+					break;
+			}
+
+			pfnOSStatsPrintf(pvFile,
+#if defined(PVR_RI_DEBUG)  && defined(DEBUG)
+							CACHEOP_RI_PRINTF,
+#else
+							CACHEOP_PRINTF,
+#endif
+							pszCacheOpType,
+							pszFlushType,
+							pszFlushMode,
+#if defined(PVR_RI_DEBUG)  && defined(DEBUG)
+							psProcessStats->asCacheOp[i32ReadIdx].sDevVAddr.uiAddr,
+							psProcessStats->asCacheOp[i32ReadIdx].sDevPAddr.uiAddr,
+#endif
+							psProcessStats->asCacheOp[i32ReadIdx].uiOffset,
+							psProcessStats->asCacheOp[i32ReadIdx].uiSize,
+							ui64ExecuteTime,
+							psProcessStats->asCacheOp[i32ReadIdx].ui32OpSeqNum);
+		}
+	}
+} /* CacheOpStatsPrintElements */
+#endif
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS) && defined(ENABLE_DEBUGFS_PIDS)
+/*************************************************************************/ /*!
+@Function       MemStatsPrintElements
+@Description    Prints all elements for the memory statistic record.
+@Input          pvStatPtr         Pointer to statistics structure.
+@Input          pfnOSStatsPrintf  Printf function to use for output.
+*/ /**************************************************************************/
+void
+MemStatsPrintElements(void *pvFile,
+					  void *pvStatPtr,
+					  OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	PVRSRV_STAT_STRUCTURE_TYPE*  peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+	PVRSRV_MEMORY_STATS*         psMemoryStats   = (PVRSRV_MEMORY_STATS*) pvStatPtr;
+	IMG_UINT32	ui32VAddrFields = sizeof(void*)/sizeof(IMG_UINT32);
+	IMG_UINT32	ui32PAddrFields = sizeof(IMG_CPU_PHYADDR)/sizeof(IMG_UINT32);
+	PVRSRV_MEM_ALLOC_REC  *psRecord;
+	IMG_UINT32  ui32ItemNumber;
+
+	if (peStructureType == NULL  ||  *peStructureType != PVRSRV_STAT_STRUCTURE_MEMORY)
+	{
+		PVR_ASSERT(peStructureType != NULL  &&  *peStructureType == PVRSRV_STAT_STRUCTURE_MEMORY);
+		return;
+	}
+
+	if (pfnOSStatsPrintf == NULL)
+	{
+		return;
+	}
+
+	/* Write the header... */
+	pfnOSStatsPrintf(pvFile, "Type                VAddress");
+	for (ui32ItemNumber = 1;  ui32ItemNumber < ui32VAddrFields;  ui32ItemNumber++)
+	{
+		pfnOSStatsPrintf(pvFile, "        ");
+	}
+
+	pfnOSStatsPrintf(pvFile, "  PAddress");
+	for (ui32ItemNumber = 1;  ui32ItemNumber < ui32PAddrFields;  ui32ItemNumber++)
+	{
+        pfnOSStatsPrintf(pvFile, "        ");
+	}
+
+    pfnOSStatsPrintf(pvFile, "  Size(bytes)\n");
+
+	/* The lock has to be held whilst moving through the memory list... */
+	OSLockAcquire(g_psLinkedListLock);
+	psRecord = psMemoryStats->psMemoryRecords;
+
+	while (psRecord != NULL)
+	{
+		IMG_BOOL bPrintStat = IMG_TRUE;
+
+		switch (psRecord->eAllocType)
+		{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:      		pfnOSStatsPrintf(pvFile, "KMALLOC             "); break;
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:      		pfnOSStatsPrintf(pvFile, "VMALLOC             "); break;
+#else
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+														bPrintStat = IMG_FALSE; break;
+#endif
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:  pfnOSStatsPrintf(pvFile, "ALLOC_PAGES_PT_LMA  "); break;
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:  pfnOSStatsPrintf(pvFile, "ALLOC_PAGES_PT_UMA  "); break;
+		case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:		pfnOSStatsPrintf(pvFile, "IOREMAP_PT_LMA      "); break;
+		case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:			pfnOSStatsPrintf(pvFile, "VMAP_PT_UMA         "); break;
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: 	pfnOSStatsPrintf(pvFile, "ALLOC_LMA_PAGES     "); break;
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: 	pfnOSStatsPrintf(pvFile, "ALLOC_UMA_PAGES     "); break;
+		case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: 	pfnOSStatsPrintf(pvFile, "MAP_UMA_LMA_PAGES   "); break;
+		default:										pfnOSStatsPrintf(pvFile, "INVALID             "); break;
+		}
+
+		if (bPrintStat)
+		{
+			for (ui32ItemNumber = 0;  ui32ItemNumber < ui32VAddrFields;  ui32ItemNumber++)
+			{
+				pfnOSStatsPrintf(pvFile, "%08x", *(((IMG_UINT32*) &psRecord->pvCpuVAddr) + ui32VAddrFields - ui32ItemNumber - 1));
+			}
+			pfnOSStatsPrintf(pvFile, "  ");
+
+			for (ui32ItemNumber = 0;  ui32ItemNumber < ui32PAddrFields;  ui32ItemNumber++)
+			{
+				pfnOSStatsPrintf(pvFile, "%08x", *(((IMG_UINT32*) &psRecord->sCpuPAddr.uiAddr) + ui32PAddrFields - ui32ItemNumber - 1));
+			}
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+			pfnOSStatsPrintf(pvFile, "  %u", psRecord->uiBytes);
+
+			pfnOSStatsPrintf(pvFile, "  %s", (IMG_CHAR*)psRecord->pvAllocdFromFile);
+
+			pfnOSStatsPrintf(pvFile, "  %d\n", psRecord->ui32AllocdFromLine);
+#else
+			pfnOSStatsPrintf(pvFile, "  %u\n", psRecord->uiBytes);
+#endif
+		}
+		/* Move to next record... */
+		psRecord = psRecord->psNext;
+	}
+
+	OSLockRelease(g_psLinkedListLock);
+} /* MemStatsPrintElements */
+#endif
+
+#if defined(PVR_RI_DEBUG) && defined(ENABLE_DEBUGFS_PIDS)
+/*************************************************************************/ /*!
+@Function       RIMemStatsPrintElements
+@Description    Prints all elements for the RI Memory record.
+@Input          pvStatPtr         Pointer to statistics structure.
+@Input          pfnOSStatsPrintf  Printf function to use for output.
+*/ /**************************************************************************/
+void
+RIMemStatsPrintElements(void *pvFile,
+						void *pvStatPtr,
+						OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	PVRSRV_STAT_STRUCTURE_TYPE  *peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+	PVRSRV_RI_MEMORY_STATS		*psRIMemoryStats = (PVRSRV_RI_MEMORY_STATS*) pvStatPtr;
+	IMG_CHAR					*pszStatFmtText  = NULL;
+	IMG_HANDLE					*pRIHandle       = NULL;
+
+	if (peStructureType == NULL  ||  *peStructureType != PVRSRV_STAT_STRUCTURE_RIMEMORY)
+	{
+		PVR_ASSERT(peStructureType != NULL  &&  *peStructureType == PVRSRV_STAT_STRUCTURE_RIMEMORY);
+		return;
+	}
+
+	if (pfnOSStatsPrintf == NULL)
+	{
+		return;
+	}
+
+	/*
+	 *  Loop through the RI system to get each line of text.
+	 */
+	while (RIGetListEntryKM(psRIMemoryStats->pid,
+							&pRIHandle,
+							&pszStatFmtText))
+	{
+		pfnOSStatsPrintf(pvFile, "%s", pszStatFmtText);
+	}
+} /* RIMemStatsPrintElements */
+#endif
+
+static IMG_UINT32	ui32FirmwareStartTimestamp=0;
+static IMG_UINT64	ui64FirmwareIdleDuration=0;
+
+void SetFirmwareStartTime(IMG_UINT32 ui32Time)
+{
+	ui32FirmwareStartTimestamp = UPDATE_TIME(ui32FirmwareStartTimestamp, ui32Time);
+}
+
+void SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration)
+{
+	ui64FirmwareIdleDuration = UPDATE_TIME(ui64FirmwareIdleDuration, ui64Duration);
+}
+
+static INLINE void PowerStatsPrintGroup(IMG_UINT32 *pui32Stats,
+                                        void *pvFile,
+                                        OS_STATS_PRINTF_FUNC *pfnPrintf,
+                                        PVRSRV_POWER_STAT_TYPE eForced,
+                                        PVRSRV_POWER_STAT_TYPE ePowerOn)
+{
+	IMG_UINT32 ui32Index;
+
+	ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, DEVICE);
+	pfnPrintf(pvFile, "  Pre-Device:  %9u\n", pui32Stats[ui32Index]);
+
+	ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, SYSTEM);
+	pfnPrintf(pvFile, "  Pre-System:  %9u\n", pui32Stats[ui32Index]);
+
+	ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, SYSTEM);
+	pfnPrintf(pvFile, "  Post-System: %9u\n", pui32Stats[ui32Index]);
+
+	ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, DEVICE);
+	pfnPrintf(pvFile, "  Post-Device: %9u\n", pui32Stats[ui32Index]);
+}
+
+void PowerStatsPrintElements(void *pvFile,
+							 void *pvStatPtr,
+							 OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	IMG_UINT32 *pui32Stats = &aui32PowerTimingStats[0];
+	IMG_UINT32 ui32Idx;
+
+	PVR_UNREFERENCED_PARAMETER(pvStatPtr);
+
+	if (pfnOSStatsPrintf == NULL)
+	{
+		return;
+	}
+
+	pfnOSStatsPrintf(pvFile, "Forced Power-on Transition (nanoseconds):\n");
+	PowerStatsPrintGroup(pui32Stats, pvFile, pfnOSStatsPrintf, FORCED, POWER_ON);
+	pfnOSStatsPrintf(pvFile, "\n");
+
+	pfnOSStatsPrintf(pvFile, "Forced Power-off Transition (nanoseconds):\n");
+	PowerStatsPrintGroup(pui32Stats, pvFile, pfnOSStatsPrintf, FORCED, POWER_OFF);
+	pfnOSStatsPrintf(pvFile, "\n");
+
+	pfnOSStatsPrintf(pvFile, "Not Forced Power-on Transition (nanoseconds):\n");
+	PowerStatsPrintGroup(pui32Stats, pvFile, pfnOSStatsPrintf, NOT_FORCED, POWER_ON);
+	pfnOSStatsPrintf(pvFile, "\n");
+
+	pfnOSStatsPrintf(pvFile, "Not Forced Power-off Transition (nanoseconds):\n");
+	PowerStatsPrintGroup(pui32Stats, pvFile, pfnOSStatsPrintf, NOT_FORCED, POWER_OFF);
+	pfnOSStatsPrintf(pvFile, "\n");
+
+
+	pfnOSStatsPrintf(pvFile, "FW bootup time (timer ticks): %u\n", ui32FirmwareStartTimestamp);
+	pfnOSStatsPrintf(pvFile, "Host Acknowledge Time for FW Idle Signal (timer ticks): %u\n", (IMG_UINT32)(ui64FirmwareIdleDuration));
+	pfnOSStatsPrintf(pvFile, "\n");
+
+	pfnOSStatsPrintf(pvFile, "Last %d Clock Speed Change Timers (nanoseconds):\n", NUM_EXTRA_POWER_STATS);
+	pfnOSStatsPrintf(pvFile, "Prepare DVFS\tDVFS Change\tPost DVFS\n");
+
+	for (ui32Idx = ui32ClockSpeedIndexStart; ui32Idx !=ui32ClockSpeedIndexEnd; ui32Idx = (ui32Idx + 1) % NUM_EXTRA_POWER_STATS)
+	{
+		pfnOSStatsPrintf(pvFile, "%12llu\t%11llu\t%9llu\n",asClockSpeedChanges[ui32Idx].ui64PreClockSpeedChangeDuration,
+						 asClockSpeedChanges[ui32Idx].ui64BetweenPreEndingAndPostStartingDuration,
+						 asClockSpeedChanges[ui32Idx].ui64PostClockSpeedChangeDuration);
+	}
+
+
+} /* PowerStatsPrintElements */
+
+void GlobalStatsPrintElements(void *pvFile,
+							  void *pvStatPtr,
+							  OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf)
+{
+	PVR_UNREFERENCED_PARAMETER(pvStatPtr);
+
+	if (pfnOSGetStatsPrintf != NULL)
+	{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageKMalloc                %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_KMALLOC));
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageKMallocMax             %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_KMALLOC_MAX));
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageVMalloc                %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_VMALLOC));
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageVMallocMax             %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_VMALLOC_MAX));
+#endif
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocPTMemoryUMA       %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA));
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocPTMemoryUMAMax    %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA_MAX));
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageVMapPTUMA              %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA));
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageVMapPTUMAMax           %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA_MAX));
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocPTMemoryLMA       %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA));
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocPTMemoryLMAMax    %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA_MAX));
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageIORemapPTLMA           %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA));
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageIORemapPTLMAMax        %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA_MAX));
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocGPUMemLMA         %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA));
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocGPUMemLMAMax      %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA_MAX));
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocGPUMemUMA         %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA));
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocGPUMemUMAMax      %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_MAX));
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocGPUMemUMAPool     %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL));
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocGPUMemUMAPoolMax  %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL_MAX));
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageMappedGPUMemUMA/LMA    %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA));
+		pfnOSGetStatsPrintf(pvFile, "MemoryUsageMappedGPUMemUMA/LMAMax %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA_MAX));
+	}
+}
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+static void StripBadChars( IMG_CHAR *psStr)
+{
+	IMG_INT	cc;
+
+	/* Remove any '/' chars that may be in the ProcName (kernel thread could contain these) */
+	for (cc=0; cc<30; cc++)
+	{
+		if( *psStr == '/')
+		{
+			*psStr = '-';
+		}
+		psStr++;
+	}
+}
+#endif
+
+
+/*************************************************************************/ /*!
+@Function       PVRSRVFindProcessMemStats
+@Description    Using the provided PID find memory stats for that process.
+                Memstats will be provided for live/connected processes only.
+                Memstat values provided by this API relate only to the physical
+                memory allocated by the process and does not relate to any of
+                the mapped or imported memory.
+@Input          pid                 Process to search for.
+@Input          ArraySize           Size of the array where memstat
+                                    records will be stored
+@Input          bAllProcessStats    Flag to denote if stats for
+                                    individual process are requested
+                                    stats for all processes are
+                                    requested
+@Input          MemoryStats         Handle to the memory where memstats
+                                    are stored.
+@Output         Memory statistics records for the requested pid.
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT32 *ui32MemoryStats)
+{
+	IMG_INT i;
+	PVRSRV_PROCESS_STATS* psProcessStats;
+
+	if(bAllProcessStats)
+	{
+		for ( i=0; i < PVRSRV_DRIVER_STAT_TYPE_COUNT; i++ )
+		{
+			ui32MemoryStats[i] = GET_GLOBAL_STAT_VALUE(i);
+		}
+		return PVRSRV_OK;
+	}
+
+	/* Search for the given PID in the Live List */
+	psProcessStats = _FindProcessStatsInLiveList(pid);
+
+	if(psProcessStats == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Process %d not found. This process may not be live anymore.", (IMG_INT)pid));
+		return PVRSRV_ERROR_PROCESS_NOT_FOUND;
+	}
+
+	for ( i=0; i < PVRSRV_PROCESS_STAT_TYPE_COUNT; i++ )
+	{
+		ui32MemoryStats[i] = psProcessStats->i32StatValue[i];
+	}
+
+	return PVRSRV_OK;
+
+} /* PVRSRVFindProcessMemStats */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pvr_notifier.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pvr_notifier.c
new file mode 100644
index 0000000..fa207d8
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pvr_notifier.c
@@ -0,0 +1,479 @@
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR notifier interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "allocmem.h"
+#include "device.h"
+#include "dllist.h"
+#include "img_defs.h"
+#include "osfunc.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "pvrversion.h"
+
+
+/*************************************************************************/ /*!
+Command Complete Notifier Interface
+*/ /**************************************************************************/
+
+typedef struct PVRSRV_CMDCOMP_NOTIFY_TAG
+{
+	PVRSRV_CMDCOMP_HANDLE	hCmdCompHandle;
+	PFN_CMDCOMP_NOTIFY		pfnCmdCompleteNotify;
+	DLLIST_NODE				sListNode;
+} PVRSRV_CMDCOMP_NOTIFY;
+
+/* Head of the list of callbacks called when command complete happens */
+static DLLIST_NODE g_sCmdCompNotifyHead;
+static POSWR_LOCK g_hCmdCompNotifyLock;
+
+PVRSRV_ERROR
+PVRSRVCmdCompleteInit(void)
+{
+	PVRSRV_ERROR eError;
+
+	eError = OSWRLockCreate(&g_hCmdCompNotifyLock);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	dllist_init(&g_sCmdCompNotifyHead);
+
+	return PVRSRV_OK;
+}
+
+void
+PVRSRVCmdCompleteDeinit(void)
+{
+	/* Check that all notify function have been unregistered */
+	if (!dllist_is_empty(&g_sCmdCompNotifyHead))
+	{
+		PDLLIST_NODE psNode;
+
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Command complete notify list is not empty!", __func__));
+
+		/* Clean up any stragglers */
+		psNode = dllist_get_next_node(&g_sCmdCompNotifyHead);
+		while (psNode)
+		{
+			PVRSRV_CMDCOMP_NOTIFY *psNotify;
+
+			dllist_remove_node(psNode);
+			
+			psNotify = IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode);
+			OSFreeMem(psNotify);
+
+			psNode = dllist_get_next_node(&g_sCmdCompNotifyHead);
+		}
+	}
+
+	if (g_hCmdCompNotifyLock)
+	{
+		OSWRLockDestroy(g_hCmdCompNotifyLock);
+	}
+}
+
+PVRSRV_ERROR
+PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify,
+								PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify,
+								PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+	PVRSRV_CMDCOMP_NOTIFY *psNotify;
+
+	if (!phNotify || !pfnCmdCompleteNotify || !hCmdCompHandle)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Bad arguments (%p, %p, %p)",
+				 __func__, phNotify, pfnCmdCompleteNotify, hCmdCompHandle));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psNotify = OSAllocMem(sizeof(*psNotify));
+	if (!psNotify)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Not enough memory to allocate CmdCompleteNotify function",
+				 __func__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;		
+	}
+
+	/* Set-up the notify data */
+	psNotify->hCmdCompHandle = hCmdCompHandle;
+	psNotify->pfnCmdCompleteNotify = pfnCmdCompleteNotify;
+
+	/* Add it to the list of Notify functions */
+	OSWRLockAcquireWrite(g_hCmdCompNotifyLock);
+	dllist_add_to_tail(&g_sCmdCompNotifyHead, &psNotify->sListNode);
+	OSWRLockReleaseWrite(g_hCmdCompNotifyLock);
+
+	*phNotify = psNotify;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify)
+{
+	PVRSRV_CMDCOMP_NOTIFY *psNotify;
+
+	psNotify = (PVRSRV_CMDCOMP_NOTIFY *) hNotify;
+	if (!psNotify)
+	{
+		PVR_DPF((PVR_DBG_ERROR," %s: Bad arguments (%p)", __func__, hNotify));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	OSWRLockAcquireWrite(g_hCmdCompNotifyLock);
+	dllist_remove_node(&psNotify->sListNode);
+	OSWRLockReleaseWrite(g_hCmdCompNotifyLock);
+
+	OSFreeMem(psNotify);
+
+	return PVRSRV_OK;
+}
+
+void
+PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+#if !defined(NO_HARDWARE)
+	DLLIST_NODE *psNode, *psNext;
+#endif
+
+	/* Call notify callbacks to check if blocked work items can now proceed */
+#if !defined(NO_HARDWARE)
+	OSWRLockAcquireRead(g_hCmdCompNotifyLock);
+	dllist_foreach_node(&g_sCmdCompNotifyHead, psNode, psNext)
+	{
+		PVRSRV_CMDCOMP_NOTIFY *psNotify =
+			IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode);
+
+		if (hCmdCompCallerHandle != psNotify->hCmdCompHandle)
+		{
+			psNotify->pfnCmdCompleteNotify(psNotify->hCmdCompHandle);
+		}
+	}
+	OSWRLockReleaseRead(g_hCmdCompNotifyLock);
+#endif
+
+	if (psPVRSRVData->hGlobalEventObject)
+	{
+		OSEventObjectSignal(psPVRSRVData->hGlobalEventObject);
+	}
+}
+
+/*************************************************************************/ /*!
+Debug Notifier Interface
+*/ /**************************************************************************/
+
+typedef struct DEBUG_REQUEST_ENTRY_TAG
+{
+	IMG_UINT32		ui32RequesterID;
+	DLLIST_NODE		sListHead;
+} DEBUG_REQUEST_ENTRY;
+
+typedef struct DEBUG_REQUEST_TABLE_TAG
+{
+	POSWR_LOCK				hLock;
+	IMG_UINT32				ui32RequestCount;
+	DEBUG_REQUEST_ENTRY		asEntry[1];
+} DEBUG_REQUEST_TABLE;
+
+typedef struct DEBUG_REQUEST_NOTIFY_TAG
+{
+	PVRSRV_DEVICE_NODE		*psDevNode;
+	PVRSRV_DBGREQ_HANDLE	hDbgRequestHandle;
+	PFN_DBGREQ_NOTIFY		pfnDbgRequestNotify;
+	IMG_UINT32				ui32RequesterID;
+	DLLIST_NODE				sListNode;
+} DEBUG_REQUEST_NOTIFY;
+
+
+PVRSRV_ERROR
+PVRSRVRegisterDbgTable(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 *paui32Table,
+					   IMG_UINT32 ui32Length)
+{
+	DEBUG_REQUEST_TABLE *psDebugTable;
+	IMG_UINT32 i;
+	PVRSRV_ERROR eError;
+
+	if (psDevNode->hDebugTable)
+	{
+		return PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED;
+	}
+
+	psDebugTable = OSAllocMem(sizeof(DEBUG_REQUEST_TABLE) +
+							  (sizeof(DEBUG_REQUEST_ENTRY) * (ui32Length-1)));
+	if (!psDebugTable)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	eError = OSWRLockCreate(&psDebugTable->hLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorFreeDebugTable;
+	}
+
+	psDebugTable->ui32RequestCount = ui32Length;
+
+	/* Init the list heads */
+	for (i = 0; i < ui32Length; i++)
+	{
+		psDebugTable->asEntry[i].ui32RequesterID = paui32Table[i];
+		dllist_init(&psDebugTable->asEntry[i].sListHead);
+	}
+
+	psDevNode->hDebugTable = (IMG_HANDLE *) psDebugTable;
+
+	return PVRSRV_OK;
+
+ErrorFreeDebugTable:
+	OSFreeMem(psDebugTable);
+	psDebugTable = NULL;
+
+	return eError;
+}
+
+void
+PVRSRVUnregisterDbgTable(PVRSRV_DEVICE_NODE *psDevNode)
+{
+	DEBUG_REQUEST_TABLE *psDebugTable;
+	IMG_UINT32 i;
+
+	PVR_ASSERT(psDevNode->hDebugTable);
+	psDebugTable = (DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable;
+	psDevNode->hDebugTable = NULL;
+
+	for (i = 0; i < psDebugTable->ui32RequestCount; i++)
+	{
+		if (!dllist_is_empty(&psDebugTable->asEntry[i].sListHead))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Found registered callback(s) on %d",
+					 __func__, i));
+		}
+	}
+
+	OSWRLockDestroy(psDebugTable->hLock);
+	psDebugTable->hLock = NULL;
+
+	OSFreeMem(psDebugTable);
+}
+
+PVRSRV_ERROR
+PVRSRVRegisterDbgRequestNotify(IMG_HANDLE *phNotify,
+							   PVRSRV_DEVICE_NODE *psDevNode,
+							   PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+							   IMG_UINT32 ui32RequesterID,
+							   PVRSRV_DBGREQ_HANDLE hDbgRequestHandle)
+{
+	DEBUG_REQUEST_TABLE *psDebugTable;
+	DEBUG_REQUEST_NOTIFY *psNotify;
+	PDLLIST_NODE psHead = NULL;
+	IMG_UINT32 i;
+	PVRSRV_ERROR eError;
+
+	if (!phNotify || !psDevNode || !pfnDbgRequestNotify)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Bad arguments (%p, %p, %p)",
+				 __func__, phNotify, psDevNode, pfnDbgRequestNotify));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDebugTable = (DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable;
+
+	PVR_ASSERT(psDebugTable);
+
+	psNotify = OSAllocMem(sizeof(*psNotify));
+	if (!psNotify)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Not enough memory to allocate DbgRequestNotify structure",
+				 __func__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* Set-up the notify data */
+	psNotify->psDevNode = psDevNode;
+	psNotify->hDbgRequestHandle = hDbgRequestHandle;
+	psNotify->pfnDbgRequestNotify = pfnDbgRequestNotify;
+	psNotify->ui32RequesterID = ui32RequesterID;
+
+	/* Lock down all the lists */
+	OSWRLockAcquireWrite(psDebugTable->hLock);
+
+	/* Find which list to add it to */
+	for (i = 0; i < psDebugTable->ui32RequestCount; i++)
+	{
+		if (psDebugTable->asEntry[i].ui32RequesterID == ui32RequesterID)
+		{
+			psHead = &psDebugTable->asEntry[i].sListHead;
+		}
+	}
+
+	if (!psHead)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to find debug requester", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ErrorReleaseLock;
+	}
+
+	/* Add it to the list of Notify functions */
+	dllist_add_to_tail(psHead, &psNotify->sListNode);
+
+	/* Unlock the lists */
+	OSWRLockReleaseWrite(psDebugTable->hLock);
+
+	*phNotify = psNotify;
+
+	return PVRSRV_OK;
+
+ErrorReleaseLock:
+	OSWRLockReleaseWrite(psDebugTable->hLock);
+	OSFreeMem(psNotify);
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVUnregisterDbgRequestNotify(IMG_HANDLE hNotify)
+{
+	DEBUG_REQUEST_NOTIFY *psNotify = (DEBUG_REQUEST_NOTIFY *) hNotify;
+	DEBUG_REQUEST_TABLE *psDebugTable;
+
+	if (!psNotify)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Bad arguments (%p)", __func__, hNotify));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDebugTable = (DEBUG_REQUEST_TABLE *) psNotify->psDevNode->hDebugTable;
+
+	OSWRLockAcquireWrite(psDebugTable->hLock);
+	dllist_remove_node(&psNotify->sListNode);
+	OSWRLockReleaseWrite(psDebugTable->hLock);
+
+	OSFreeMem(psNotify);
+
+	return PVRSRV_OK;
+}
+
+void
+PVRSRVDebugRequest(PVRSRV_DEVICE_NODE *psDevNode,
+				   IMG_UINT32 ui32VerbLevel,
+				   DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				   void *pvDumpDebugFile)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	DEBUG_REQUEST_TABLE *psDebugTable =
+		(DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable;
+	static const IMG_CHAR *apszVerbosityTable[] = { "Low", "Medium", "High" };
+	const IMG_CHAR *szVerbosityLevel;
+	IMG_UINT32 i;
+	IMG_UINT32 j;
+
+	static_assert(IMG_ARR_NUM_ELEMS(apszVerbosityTable) == DEBUG_REQUEST_VERBOSITY_MAX+1,
+	              "Incorrect number of verbosity levels");
+
+	PVR_ASSERT(psDebugTable);
+
+	OSWRLockAcquireRead(psDebugTable->hLock);
+
+	if (ui32VerbLevel < IMG_ARR_NUM_ELEMS(apszVerbosityTable))
+	{
+		szVerbosityLevel = apszVerbosityTable[ui32VerbLevel];
+	}
+	else
+	{
+		szVerbosityLevel = "unknown";
+		PVR_ASSERT(!"Invalid verbosity level received");
+	}
+
+	PVR_DUMPDEBUG_LOG("------------[ PVR DBG: START (%s) ]------------",
+			szVerbosityLevel);
+
+	PVR_DUMPDEBUG_LOG("DDK info: %s (%s) %s",
+					   PVRVERSION_STRING, PVR_BUILD_TYPE, PVR_BUILD_DIR);
+	PVR_DUMPDEBUG_LOG("Time now: %015" IMG_UINT64_FMTSPECx, OSClockus64());
+
+	switch (psPVRSRVData->eServicesState)
+	{
+		case PVRSRV_SERVICES_STATE_OK:
+			PVR_DUMPDEBUG_LOG("Services State: OK");
+			break;
+		case PVRSRV_SERVICES_STATE_BAD:
+			PVR_DUMPDEBUG_LOG("Services State: BAD");
+			break;
+		default:
+			PVR_DUMPDEBUG_LOG("Services State: UNKNOWN (%d)",
+							   psPVRSRVData->eServicesState);
+			break;
+	}
+
+	/* For each verbosity level */
+	for (j = 0; j <= ui32VerbLevel; j++)
+	{
+		/* For each requester */
+		for (i = 0; i < psDebugTable->ui32RequestCount; i++)
+		{
+			DLLIST_NODE *psNode;
+			DLLIST_NODE *psNext;
+
+			dllist_foreach_node(&psDebugTable->asEntry[i].sListHead, psNode, psNext)
+			{
+				DEBUG_REQUEST_NOTIFY *psNotify =
+					IMG_CONTAINER_OF(psNode, DEBUG_REQUEST_NOTIFY, sListNode);
+				psNotify->pfnDbgRequestNotify(psNotify->hDbgRequestHandle, j,
+								pfnDumpDebugPrintf, pvDumpDebugFile);
+			}
+		}
+	}
+
+	PVR_DUMPDEBUG_LOG("------------[ PVR DBG: END ]------------");
+	OSWRLockReleaseRead(psDebugTable->hLock);
+
+	if (!pfnDumpDebugPrintf)
+	{
+		/* Only notify OS of an issue if the debug dump has gone there */
+		OSWarnOn(IMG_TRUE);
+	}
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pvrsrv.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pvrsrv.c
new file mode 100644
index 0000000..b1627b5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pvrsrv.c
@@ -0,0 +1,3506 @@
+/*************************************************************************/ /*!
+@File
+@Title          core services functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main APIs for core services functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxdebug.h"
+#include "handle.h"
+#include "connection_server.h"
+#include "pdump_km.h"
+#include "ra.h"
+#include "allocmem.h"
+#include "pmr.h"
+#include "pvrsrv.h"
+#include "srvcore.h"
+#include "services_km.h"
+#include "pvrsrv_device.h"
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "sync.h"
+#include "sync_server.h"
+#include "sync_checkpoint.h"
+#include "sync_fallback_server.h"
+#include "sync_checkpoint_init.h"
+#include "devicemem.h"
+#include "cache_km.h"
+#include "pvrsrv_pool.h"
+#include "info_page.h"
+
+#include "log2.h"
+
+#include "lists.h"
+#include "dllist.h"
+#include "syscommon.h"
+#include "sysvalidation.h"
+
+#include "physmem_lma.h"
+#include "physmem_osmem.h"
+#include "physmem_hostmem.h"
+
+#include "tlintern.h"
+#include "htbserver.h"
+
+#if defined (SUPPORT_RGX)
+#include "rgxinit.h"
+#include "rgxhwperf.h"
+#include "rgxfwutils.h"
+#endif
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	#if !defined(GPUVIRT_SIZEOF_ARENA0)
+		#define GPUVIRT_SIZEOF_ARENA0	64 * 1024 * 1024 //Giving 64 megs of LMA memory to arena 0 for firmware and other allocations
+	#endif
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+#include "devicemem_history_server.h"
+#endif
+
+#if defined(PVR_DVFS)
+#include "pvr_dvfs_device.h"
+#endif
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "dc_server.h"
+#endif
+
+#include "rgx_options.h"
+#include "srvinit.h"
+#include "rgxutils.h"
+
+#include "oskm_apphint.h"
+#include "pvrsrv_apphint.h"
+
+#include "rgx_bvnc_defs_km.h"
+
+#include "pvrsrv_tlstreams.h"
+#include "tlstream.h"
+
+#if defined (SUPPORT_GPUTRACE_EVENTS)
+#include "pvr_gputrace.h"
+#endif
+
+/*! Wait 100ms before retrying deferred clean-up again */
+#define CLEANUP_THREAD_WAIT_RETRY_TIMEOUT 100000ULL
+
+/*! Wait 8hrs when no deferred clean-up required. Allows a poll several times
+ * a day to check for any missed clean-up. */
+#define CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT 28800000000ULL
+
+/*! When unloading try a few times to free everything remaining on the list */
+#define CLEANUP_THREAD_UNLOAD_RETRY 4
+
+#define PVRSRV_PROC_HANDLE_BASE_INIT 10
+
+#define PVRSRV_TL_CTLR_STREAM_SIZE 4096
+
+#define PVRSRV_MAX_POOLED_BRIDGE_BUFFERS 16 /*!< Max number of pooled bridge buffers */
+
+static PVRSRV_DATA	*gpsPVRSRVData = NULL;
+static IMG_UINT32 g_ui32InitFlags;
+
+/* mark which parts of Services were initialised */
+#define		INIT_DATA_ENABLE_PDUMPINIT	0x1U
+
+static IMG_UINT32 g_aui32DebugOrderTable[] = {
+	DEBUG_REQUEST_SYS,
+	DEBUG_REQUEST_APPHINT,
+	DEBUG_REQUEST_HTB,
+	DEBUG_REQUEST_DC,
+	DEBUG_REQUEST_SYNCCHECKPOINT,
+	DEBUG_REQUEST_SERVERSYNC,
+	DEBUG_REQUEST_ANDROIDSYNC,
+	DEBUG_REQUEST_FALLBACKSYNC,
+	DEBUG_REQUEST_LINUXFENCE
+};
+
+/* Add work to the cleanup thread work list.
+ * The work item will be executed by the cleanup thread
+ */
+void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData)
+{
+	PVRSRV_DATA *psPVRSRVData;
+	PVRSRV_ERROR eError;
+
+	psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	PVR_ASSERT(psData != NULL);
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK || psPVRSRVData->bUnload)
+#else
+	if (psPVRSRVData->bUnload)
+#endif
+	{
+		CLEANUP_THREAD_FN pfnFree = psData->pfnFree;
+
+		PVR_DPF((PVR_DBG_MESSAGE, "Cleanup thread has already quit: doing work immediately"));
+
+		eError = pfnFree(psData->pvData);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to free resource "
+						"(callback " IMG_PFN_FMTSPEC "). "
+						"Immediate free will not be retried.",
+						pfnFree));
+		}
+	}
+	else
+	{
+		/* add this work item to the list */
+		OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+		dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, &psData->sNode);
+		OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+
+		/* signal the cleanup thread to ensure this item gets processed */
+		eError = OSEventObjectSignal(psPVRSRVData->hCleanupEventObject);
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+	}
+}
+
+/* Pop an item from the head of the cleanup thread work list */
+static INLINE DLLIST_NODE *_CleanupThreadWorkListPop(PVRSRV_DATA *psPVRSRVData)
+{
+	DLLIST_NODE *psNode;
+
+	OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+	psNode = dllist_get_next_node(&psPVRSRVData->sCleanupThreadWorkList);
+	if (psNode != NULL)
+	{
+		dllist_remove_node(psNode);
+	}
+	OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+
+	return psNode;
+}
+
+/* Process the cleanup thread work list */
+static IMG_BOOL _CleanupThreadProcessWorkList(PVRSRV_DATA *psPVRSRVData,
+                                              IMG_BOOL *pbUseGlobalEO)
+{
+	DLLIST_NODE *psNodeIter, *psNodeLast;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bNeedRetry = IMG_FALSE;
+
+	/* any callback functions which return error will be
+	 * moved to the back of the list, and additional items can be added
+	 * to the list at any time so we ensure we only iterate from the
+	 * head of the list to the current tail (since the tail may always
+	 * be changing)
+	 */
+
+	OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+	psNodeLast = psPVRSRVData->sCleanupThreadWorkList.psPrevNode;
+	OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+
+	do
+	{
+		PVRSRV_CLEANUP_THREAD_WORK *psData;
+
+		psNodeIter = _CleanupThreadWorkListPop(psPVRSRVData);
+
+		if (psNodeIter != NULL)
+		{
+			CLEANUP_THREAD_FN pfnFree;
+
+			psData = IMG_CONTAINER_OF(psNodeIter, PVRSRV_CLEANUP_THREAD_WORK, sNode);
+
+			/* get the function pointer address here so we have access to it
+			 * in order to report the error in case of failure, without having
+			 * to depend on psData not having been freed
+			 */
+			pfnFree = psData->pfnFree;
+
+			*pbUseGlobalEO = psData->bDependsOnHW;
+			eError = pfnFree(psData->pvData);
+
+			if (eError != PVRSRV_OK)
+			{
+				/* move to back of the list, if this item's
+				 * retry count hasn't hit zero.
+				 */
+				if (psData->ui32RetryCount-- > 0)
+				{
+					OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+					dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, psNodeIter);
+					OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+					bNeedRetry = IMG_TRUE;
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "Failed to free resource "
+								"(callback " IMG_PFN_FMTSPEC "). "
+								"Retry limit reached",
+								pfnFree));
+				}
+			}
+		}
+	} while((psNodeIter != NULL) && (psNodeIter != psNodeLast));
+
+	return bNeedRetry;
+}
+
+// #define CLEANUP_DPFL PVR_DBG_WARNING
+#define CLEANUP_DPFL    PVR_DBG_MESSAGE
+
+/* Create/initialise data required by the cleanup thread,
+ * before the cleanup thread is started
+ */
+static PVRSRV_ERROR _CleanupThreadPrepare(PVRSRV_DATA *psPVRSRVData)
+{
+	PVRSRV_ERROR eError;
+
+	/* Create the clean up event object */
+
+	eError = OSEventObjectCreate("PVRSRV_CLEANUP_EVENTOBJECT", &gpsPVRSRVData->hCleanupEventObject);
+	PVR_LOGG_IF_ERROR(eError, "OSEventObjectCreate", Exit);
+
+	/* initialise the mutex and linked list required for the cleanup thread work list */
+
+	eError = OSLockCreate(&psPVRSRVData->hCleanupThreadWorkListLock, LOCK_TYPE_PASSIVE);
+	PVR_LOGG_IF_ERROR(eError, "OSLockCreate", Exit);
+
+	dllist_init(&psPVRSRVData->sCleanupThreadWorkList);
+
+Exit:
+	return eError;
+}
+
+static void CleanupThread(void *pvData)
+{
+	PVRSRV_DATA *psPVRSRVData = pvData;
+	IMG_BOOL     bRetryWorkList = IMG_FALSE;
+	IMG_HANDLE	 hGlobalEvent;
+	IMG_HANDLE	 hOSEvent;
+	PVRSRV_ERROR eRc;
+	IMG_BOOL bUseGlobalEO = IMG_FALSE;
+	IMG_UINT32 uiUnloadRetry = 0;
+
+	/* Store the process id (pid) of the clean-up thread */
+	psPVRSRVData->cleanupThreadPid = OSGetCurrentProcessID();
+
+	PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread starting... "));
+
+	/* Open an event on the clean up event object so we can listen on it,
+	 * abort the clean up thread and driver if this fails.
+	 */
+	eRc = OSEventObjectOpen(psPVRSRVData->hCleanupEventObject, &hOSEvent);
+	PVR_ASSERT(eRc == PVRSRV_OK);
+
+	eRc = OSEventObjectOpen(psPVRSRVData->hGlobalEventObject, &hGlobalEvent);
+	PVR_ASSERT(eRc == PVRSRV_OK);
+
+	/* While the driver is in a good state and is not being unloaded
+	 * try to free any deferred items when signalled
+	 */
+	while (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK)
+	{
+		IMG_HANDLE hEvent;
+
+		if (psPVRSRVData->bUnload)
+		{
+			if (dllist_is_empty(&psPVRSRVData->sCleanupThreadWorkList) ||
+					uiUnloadRetry > CLEANUP_THREAD_UNLOAD_RETRY)
+			{
+				break;
+			}
+			uiUnloadRetry++;
+		}
+
+		/* Wait until signalled for deferred clean up OR wait for a
+		 * short period if the previous deferred clean up was not able
+		 * to release all the resources before trying again.
+		 * Bridge lock re-acquired on our behalf before the wait call returns.
+		 */
+
+		if (bRetryWorkList && bUseGlobalEO)
+		{
+			hEvent = hGlobalEvent;
+		}
+		else
+		{
+			hEvent = hOSEvent;
+		}
+
+		eRc = OSEventObjectWaitTimeout(hEvent,
+				bRetryWorkList ?
+				CLEANUP_THREAD_WAIT_RETRY_TIMEOUT :
+				CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT);
+		if (eRc == PVRSRV_ERROR_TIMEOUT)
+		{
+			PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait timeout"));
+		}
+		else if (eRc == PVRSRV_OK)
+		{
+			PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait OK, signal received"));
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "CleanupThread: wait error %d", eRc));
+		}
+
+		bRetryWorkList = _CleanupThreadProcessWorkList(psPVRSRVData, &bUseGlobalEO);
+	}
+
+	OSLockDestroy(psPVRSRVData->hCleanupThreadWorkListLock);
+
+	eRc = OSEventObjectClose(hOSEvent);
+	PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose");
+
+	eRc = OSEventObjectClose(hGlobalEvent);
+	PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose");
+
+	PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread ending... "));
+}
+
+static IMG_BOOL DevicesWatchdogThread_Powered_Any(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_DEV_POWER_STATE ePowerState = PVRSRV_DEV_POWER_STATE_ON;
+	PVRSRV_ERROR eError;
+
+	eError = PVRSRVPowerLock(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		if (eError == PVRSRV_ERROR_RETRY)
+		{
+			/* Power lock cannot be acquired at this time (sys power is off) */
+			return IMG_FALSE;
+		}
+
+		/* Any other error is unexpected so we assume the device is on */
+		PVR_DPF((PVR_DBG_ERROR,
+				 "DevicesWatchdogThread: Failed to acquire power lock for device %p (%s)",
+				 psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+		return IMG_TRUE;
+	}
+
+	(void) PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+	PVRSRVPowerUnlock(psDeviceNode);
+
+	return (ePowerState == PVRSRV_DEV_POWER_STATE_ON) ? IMG_TRUE : IMG_FALSE;
+}
+
+static void DevicesWatchdogThread_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode,
+											  va_list va)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+	PVRSRV_DEVICE_HEALTH_STATUS *pePreviousHealthStatus, eHealthStatus;
+	PVRSRV_ERROR eError;
+
+	pePreviousHealthStatus = va_arg(va, PVRSRV_DEVICE_HEALTH_STATUS *);
+
+	if (psDeviceNode->pfnUpdateHealthStatus != NULL)
+	{
+		eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, IMG_TRUE);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "DevicesWatchdogThread: "
+					 "Could not check for fatal error (%d)!",
+					 eError));
+		}
+	}
+	eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus);
+
+	if (eHealthStatus != PVRSRV_DEVICE_HEALTH_STATUS_OK)
+	{
+		if (eHealthStatus != *pePreviousHealthStatus)
+		{
+			if (!(psDevInfo->ui32DeviceFlags &
+				  RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN))
+			{
+				PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: "
+						 "Device not responding!!!"));
+				PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX,
+								   NULL, NULL);
+			}
+		}
+	}
+
+	*pePreviousHealthStatus = eHealthStatus;
+}
+
+static void DevicesWatchdogThread(void *pvData)
+{
+	PVRSRV_DATA *psPVRSRVData = pvData;
+	PVRSRV_DEVICE_HEALTH_STATUS ePreviousHealthStatus = PVRSRV_DEVICE_HEALTH_STATUS_OK;
+	IMG_HANDLE hOSEvent;
+	PVRSRV_ERROR  eError;
+	IMG_UINT32 ui32Timeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power off sleep time: %d.",
+			DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT));
+
+	/* Open an event on the devices watchdog event object so we can listen on it
+	   and abort the devices watchdog thread. */
+	eError = OSEventObjectOpen(psPVRSRVData->hDevicesWatchdogEvObj, &hOSEvent);
+	PVR_LOGRN_IF_ERROR(eError, "OSEventObjectOpen");
+
+	/* Loop continuously checking the device status every few seconds. */
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) &&
+			!psPVRSRVData->bUnload)
+#else
+	while (!psPVRSRVData->bUnload)
+#endif
+	{
+		IMG_BOOL bPwrIsOn = IMG_FALSE;
+
+		/* Wait time between polls (done at the start of the loop to allow devices
+		   to initialise) or for the event signal (shutdown or power on). */
+		eError = OSEventObjectWaitTimeout(hOSEvent, (IMG_UINT64)ui32Timeout * 1000);
+
+#ifdef PVR_TESTING_UTILS
+		psPVRSRVData->ui32DevicesWdWakeupCounter++;
+#endif
+		if (eError == PVRSRV_OK)
+		{
+			if (psPVRSRVData->bUnload)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Shutdown event received."));
+				break;
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power state change event received."));
+			}
+		}
+		else if (eError != PVRSRV_ERROR_TIMEOUT)
+		{
+			/* If timeout do nothing otherwise print warning message. */
+			PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: "
+					"Error (%d) when waiting for event!", eError));
+		}
+
+		bPwrIsOn = List_PVRSRV_DEVICE_NODE_IMG_BOOL_Any(psPVRSRVData->psDeviceNodeList,
+														DevicesWatchdogThread_Powered_Any);
+		if (bPwrIsOn || psPVRSRVData->ui32DevicesWatchdogPwrTrans)
+		{
+			psPVRSRVData->ui32DevicesWatchdogPwrTrans = 0;
+			ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT;
+		}
+		else
+		{
+			ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT;
+		}
+
+		List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList,
+										   DevicesWatchdogThread_ForEachVaCb,
+										   &ePreviousHealthStatus);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR)
+		SysPrintAndResetFaultStatusRegister();
+#endif
+	}
+
+	eError = OSEventObjectClose(hOSEvent);
+	PVR_LOG_IF_ERROR(eError, "OSEventObjectClose");
+}
+
+
+PVRSRV_DATA *PVRSRVGetPVRSRVData()
+{
+	return gpsPVRSRVData;
+}
+
+static PVRSRV_ERROR _HostMemDeviceCreate(void)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+	PVRSRV_DEVICE_CONFIG *psDevConfig = HostMemGetDeviceConfig();
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	/* Assert ensures HostMemory device isn't already created and
+	 * that data is initialized */
+	PVR_ASSERT(psPVRSRVData->psHostMemDeviceNode == NULL);
+
+	/* for now, we only know a single heap (UMA) config for host device */
+	PVR_ASSERT(psDevConfig->ui32PhysHeapCount == 1 &&
+				psDevConfig->pasPhysHeaps[0].eType == PHYS_HEAP_TYPE_UMA);
+
+	/* N.B.- In case of any failures in this function, we just return error to
+	   the caller, as clean-up is taken care by _HostMemDeviceDestroy function */
+
+	psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode));
+	PVR_LOGR_IF_NOMEM(psDeviceNode, "OSAllocZMem");
+
+	/* early save return pointer to aid clean-up */
+	psPVRSRVData->psHostMemDeviceNode = psDeviceNode;
+
+	psDeviceNode->psDevConfig = psDevConfig;
+	psDeviceNode->papsRegisteredPhysHeaps =
+		OSAllocZMem(sizeof(*psDeviceNode->papsRegisteredPhysHeaps) *
+					psDevConfig->ui32PhysHeapCount);
+	PVR_LOGR_IF_NOMEM(psDeviceNode->papsRegisteredPhysHeaps, "OSAllocZMem");
+
+	eError = PhysHeapRegister(&psDevConfig->pasPhysHeaps[0],
+								  &psDeviceNode->papsRegisteredPhysHeaps[0]);
+	PVR_LOGR_IF_ERROR(eError, "PhysHeapRegister");
+	psDeviceNode->ui32RegisteredPhysHeaps = 1;
+
+	/* Only CPU local heap is valid on host-mem DevNode, so enable minimal callbacks */
+	eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL],
+							 &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]);
+	PVR_LOGR_IF_ERROR(eError, "PhysHeapAcquire");
+	
+	psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = PhysmemNewOSRamBackedPMR;
+
+	return PVRSRV_OK;
+}
+
+static void _HostMemDeviceDestroy(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->psHostMemDeviceNode;
+
+	if (!psDeviceNode)
+	{
+		return;
+	}
+
+	psPVRSRVData->psHostMemDeviceNode = NULL;
+	if (psDeviceNode->papsRegisteredPhysHeaps)
+	{
+		if (psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL])
+		{
+			PhysHeapRelease(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]);
+		}
+	
+		if (psDeviceNode->papsRegisteredPhysHeaps[0])
+		{
+			/* clean-up function as well is aware of only one heap */
+			PVR_ASSERT(psDeviceNode->ui32RegisteredPhysHeaps == 1);
+			PhysHeapUnregister(psDeviceNode->papsRegisteredPhysHeaps[0]);
+		}
+
+		OSFreeMem(psDeviceNode->papsRegisteredPhysHeaps);
+	}
+	OSFreeMem(psDeviceNode);
+}
+
+PVRSRV_ERROR PVRSRVSuspendDriver(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR eError;
+	IMG_HANDLE hEvent;
+
+	eError = OSEventObjectOpen(psPVRSRVData->hDriverThreadEventObject, &hEvent);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__));
+		return eError;
+	}
+
+	OSLockAcquire(psPVRSRVData->hDriverThreadLock);
+
+	if(psPVRSRVData->bDriverSuspended)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Driver is already suspended", __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		OSLockRelease(psPVRSRVData->hDriverThreadLock);
+		goto out_put;
+	}
+	/* set to TRUE, so any new threads calling into the Server wait */
+	psPVRSRVData->bDriverSuspended = IMG_TRUE;
+	OSLockRelease(psPVRSRVData->hDriverThreadLock);
+
+	/* now wait for any threads currently in the server to exit */
+	while(OSAtomicRead(&psPVRSRVData->iNumActiveDriverThreads) != 0)
+	{
+		OSEventObjectWait(hEvent);
+	}
+
+out_put:
+	OSEventObjectClose(hEvent);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVUnsuspendDriver(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR eError;
+
+	if(!psPVRSRVData->bDriverSuspended)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Driver is not suspended", __func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* unsuspend the driver and then signal so any waiting threads
+	 * wake up
+	 */
+	OSLockAcquire(psPVRSRVData->hDriverThreadLock);
+	psPVRSRVData->bDriverSuspended = IMG_FALSE;
+	OSLockRelease(psPVRSRVData->hDriverThreadLock);
+	eError = OSEventObjectSignal(psPVRSRVData->hDriverThreadEventObject);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: OSEventObjectSignal failed: %s",
+								__func__,
+								PVRSRVGetErrorStringKM(eError)));
+	}
+
+	return eError;
+}
+
+/* Atomically wait for the driver to be unsuspended and increment
+ * the active thread count
+ */
+static PVRSRV_ERROR _WaitForDriverUnsuspend(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR eError;
+	IMG_HANDLE hEvent;
+	IMG_BOOL bReady = IMG_FALSE;
+
+	eError = OSEventObjectOpen(psPVRSRVData->hDriverThreadEventObject, &hEvent);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__));
+		return eError;
+	}
+
+	while(!bReady)
+	{
+		OSLockAcquire(psPVRSRVData->hDriverThreadLock);
+
+		bReady = !psPVRSRVData->bDriverSuspended;
+
+		if(bReady)
+		{
+			OSAtomicIncrement(&psPVRSRVData->iNumActiveDriverThreads);
+			OSLockRelease(psPVRSRVData->hDriverThreadLock);
+		}
+		else
+		{
+			OSLockRelease(psPVRSRVData->hDriverThreadLock);
+			OSEventObjectWait(hEvent);
+		}
+	}
+
+	OSEventObjectClose(hEvent);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVDriverThreadEnter(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	OSLockAcquire(psPVRSRVData->hDriverThreadLock);
+
+	if(!psPVRSRVData->bDriverSuspended)
+	{
+		OSAtomicIncrement(&psPVRSRVData->iNumActiveDriverThreads);
+		OSLockRelease(psPVRSRVData->hDriverThreadLock);
+	}
+	else
+	{
+		OSLockRelease(psPVRSRVData->hDriverThreadLock);
+		eError = _WaitForDriverUnsuspend();
+
+		if(eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to wait for driver unsuspend: %s",
+										__func__,
+										PVRSRVGetErrorStringKM(eError)));
+		}
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVDriverThreadExit(void)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	IMG_BOOL bNeedSignal;
+
+	/* if the driver is being suspended then we need to signal the
+	* event object as the thread suspending the driver is waiting
+	* for active threads to exit
+	*/
+	OSLockAcquire(psPVRSRVData->hDriverThreadLock);
+
+	OSAtomicDecrement(&psPVRSRVData->iNumActiveDriverThreads);
+	bNeedSignal = psPVRSRVData->bDriverSuspended;
+
+	OSLockRelease(psPVRSRVData->hDriverThreadLock);
+
+	if(bNeedSignal)
+	{
+		eError = OSEventObjectSignal(psPVRSRVData->hDriverThreadEventObject);
+
+		if(eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to signal driver thread event object: %s",
+											__func__,
+											PVRSRVGetErrorStringKM(eError)));
+		}
+	}
+
+	return eError;
+}
+
+static PVRSRV_ERROR _BridgeBufferAlloc(void *pvPrivData, void **pvOut)
+{
+	PVR_UNREFERENCED_PARAMETER(pvPrivData);
+
+	*pvOut = OSAllocZMem(PVRSRV_MAX_BRIDGE_IN_SIZE +
+						PVRSRV_MAX_BRIDGE_OUT_SIZE);
+
+	if(*pvOut == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	return PVRSRV_OK;
+}
+
+static void _BridgeBufferFree(void *pvPrivData, void *pvFreeData)
+{
+	PVR_UNREFERENCED_PARAMETER(pvPrivData);
+
+	OSFreeMem(pvFreeData);
+}
+
+PVRSRV_ERROR IMG_CALLCONV
+PVRSRVDriverInit(void)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DATA	*psPVRSRVData = NULL;
+
+	IMG_UINT32 ui32AppHintCleanupThreadPriority;
+	IMG_UINT32 ui32AppHintCleanupThreadWeight;
+	IMG_UINT32 ui32AppHintWatchdogThreadPriority;
+	IMG_UINT32 ui32AppHintWatchdogThreadWeight;
+
+	void *pvAppHintState = NULL;
+	IMG_UINT32 ui32AppHintDefault;
+
+	/*
+	 * As this function performs one time driver initialisation, use the
+	 * Services global device-independent data to determine whether or not
+	 * this function has already been called.
+	 */
+	if (gpsPVRSRVData)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Driver already initialised", __func__));
+		return PVRSRV_ERROR_ALREADY_EXISTS;
+	}
+
+	eError = PhysHeapInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	/*
+	 * Allocate the device-independent data
+	 */
+	psPVRSRVData = OSAllocZMem(sizeof(*gpsPVRSRVData));
+	if (psPVRSRVData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto Error;
+	}
+
+	/* Now it is set up, point gpsPVRSRVData to the actual data */
+	gpsPVRSRVData = psPVRSRVData;
+
+	OSAtomicWrite(&psPVRSRVData->iNumActiveDriverThreads, 0);
+
+	eError = OSLockCreate(&psPVRSRVData->hDriverThreadLock, LOCK_TYPE_NONE);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create driver thread event lock: %s",
+										__func__,
+										PVRSRVGetErrorStringKM(eError)));
+		goto Error;
+	}
+
+	eError = OSEventObjectCreate("Global driver thread event object",
+							&psPVRSRVData->hDriverThreadEventObject);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create global driver thread event object: %s",
+										__func__,
+										PVRSRVGetErrorStringKM(eError)));
+		goto Error;
+	}
+
+	eError = PVRSRVPoolCreate(_BridgeBufferAlloc,
+							_BridgeBufferFree,
+							PVRSRV_MAX_POOLED_BRIDGE_BUFFERS,
+							"Bridge buffer pool",
+							NULL,
+							&psPVRSRVData->psBridgeBufferPool);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create bridge buffer pool: %s",
+										__func__,
+										PVRSRVGetErrorStringKM(eError)));
+		goto Error;
+	}
+
+	/* Init any OS specific's */
+	eError = OSInitEnvData();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	/* Early init. server cache maintenance */
+	eError = CacheOpInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+#if defined(PVR_RI_DEBUG)
+	RIInitKM();
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	eError = DevicememHistoryInitKM();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to initialise DevicememHistoryInitKM", __func__));
+		goto Error;
+	}
+#endif
+
+	eError = BridgeInit();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise bridge",
+				 __func__));
+		goto Error;
+	}
+
+	eError = PMRInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+	eError = DCInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+#endif
+
+	/* Initialise overall system state */
+	gpsPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_OK;
+
+	/* Create an event object */
+	eError = OSEventObjectCreate("PVRSRV_GLOBAL_EVENTOBJECT", &gpsPVRSRVData->hGlobalEventObject);
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+	gpsPVRSRVData->ui32GEOConsecutiveTimeouts = 0;
+
+	eError = PVRSRVCmdCompleteInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	/* Initialise pdump */
+	eError = PDUMPINIT();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	g_ui32InitFlags |= INIT_DATA_ENABLE_PDUMPINIT;
+
+	eError = PVRSRVHandleInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+	
+	eError = _CleanupThreadPrepare(gpsPVRSRVData);
+	PVR_LOGG_IF_ERROR(eError, "_CleanupThreadPrepare", Error);
+
+	/* Create a thread which is used to do the deferred cleanup */
+	eError = OSThreadCreatePriority(&gpsPVRSRVData->hCleanupThread,
+							"pvr_defer_free",
+							CleanupThread,
+							gpsPVRSRVData,
+							OS_THREAD_LOWEST_PRIORITY);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create deferred cleanup thread",
+				 __func__));
+		goto Error;
+	}
+
+	OSCreateKMAppHintState(&pvAppHintState);
+	ui32AppHintDefault = PVRSRV_APPHINT_CLEANUPTHREADPRIORITY;
+	OSGetKMAppHintUINT32(pvAppHintState, CleanupThreadPriority,
+	                     &ui32AppHintDefault, &ui32AppHintCleanupThreadPriority);
+	ui32AppHintDefault = PVRSRV_APPHINT_CLEANUPTHREADWEIGHT;
+	OSGetKMAppHintUINT32(pvAppHintState, CleanupThreadWeight,
+	                     &ui32AppHintDefault, &ui32AppHintCleanupThreadWeight);
+	ui32AppHintDefault = PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY;
+	OSGetKMAppHintUINT32(pvAppHintState, WatchdogThreadPriority,
+	                     &ui32AppHintDefault, &ui32AppHintWatchdogThreadPriority);
+	ui32AppHintDefault = PVRSRV_APPHINT_WATCHDOGTHREADWEIGHT;
+	OSGetKMAppHintUINT32(pvAppHintState, WatchdogThreadWeight,
+	                     &ui32AppHintDefault, &ui32AppHintWatchdogThreadWeight);
+	OSFreeKMAppHintState(pvAppHintState);
+	pvAppHintState = NULL;
+
+	eError = OSSetThreadPriority(gpsPVRSRVData->hCleanupThread,
+								 ui32AppHintCleanupThreadPriority,
+								 ui32AppHintCleanupThreadWeight);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set thread priority of deferred cleanup thread.",
+				 __func__));
+		goto Error;
+	}
+
+	/* Create the devices watchdog event object */
+	eError = OSEventObjectCreate("PVRSRV_DEVICESWATCHDOG_EVENTOBJECT", &gpsPVRSRVData->hDevicesWatchdogEvObj);
+	PVR_LOGG_IF_ERROR(eError, "OSEventObjectCreate", Error);
+
+	/* Create a thread which is used to detect fatal errors */
+	eError = OSThreadCreate(&gpsPVRSRVData->hDevicesWatchdogThread,
+							"pvr_device_wdg",
+							DevicesWatchdogThread,
+							gpsPVRSRVData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create devices watchdog thread",
+				 __func__));
+		goto Error;
+	}
+
+	eError = OSSetThreadPriority(gpsPVRSRVData->hDevicesWatchdogThread,
+								 ui32AppHintWatchdogThreadPriority,
+								 ui32AppHintWatchdogThreadWeight);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set thread priority of the watchdog thread.",
+				 __func__));
+		goto Error;
+	}
+
+	gpsPVRSRVData->psProcessHandleBase_Table = HASH_Create(PVRSRV_PROC_HANDLE_BASE_INIT);
+
+	if (gpsPVRSRVData->psProcessHandleBase_Table == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to create hash table for process handle base.",
+				__func__));
+		eError = PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE;
+		goto Error;
+	}
+
+	eError = OSLockCreate(&gpsPVRSRVData->hProcessHandleBase_Lock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to create lock for process handle base.",
+				__func__));
+		goto Error;
+	}
+
+	eError = _HostMemDeviceCreate();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	eError = InfoPageCreate(psPVRSRVData);
+	PVR_LOGG_IF_ERROR(eError, "InfoPageCreate", Error);
+
+	/* Initialise the Transport Layer */
+	eError = TLInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	/* Initialise TL control stream */
+	eError = TLStreamCreate(&psPVRSRVData->hTLCtrlStream,
+	                        psPVRSRVData->psHostMemDeviceNode,
+	                        PVRSRV_TL_CTLR_STREAM, PVRSRV_TL_CTLR_STREAM_SIZE,
+	                        TL_OPMODE_DROP_OLDEST, NULL, NULL, NULL,
+                            NULL);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to create TL control plane stream"
+		        " (%d).", eError));
+		psPVRSRVData->hTLCtrlStream = NULL;
+	}
+
+#if defined (SUPPORT_GPUTRACE_EVENTS)
+	eError = PVRGpuTraceSupportInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+#endif
+
+	RGXHWPerfClientInitAppHintCallbacks();
+
+	/* Late init. client cache maintenance via info. page */
+	eError = CacheOpInit2();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed to initialise the CacheOp framework (%d)",
+				__func__, eError));
+		goto Error;
+	}
+
+	eError = ServerSyncInitOnce(psPVRSRVData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to initialise sync server",
+				__func__));
+		goto Error;
+	}
+
+	return 0;
+
+Error:
+	PVRSRVDriverDeInit();
+	return eError;
+}
+
+void IMG_CALLCONV
+PVRSRVDriverDeInit(void)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (gpsPVRSRVData == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: missing device-independent data",
+				 __func__));
+		return;
+	}
+
+	gpsPVRSRVData->bUnload = IMG_TRUE;
+
+	if (gpsPVRSRVData->hProcessHandleBase_Lock)
+	{
+		OSLockDestroy(gpsPVRSRVData->hProcessHandleBase_Lock);
+		gpsPVRSRVData->hProcessHandleBase_Lock = NULL;
+	}
+
+	if (gpsPVRSRVData->psProcessHandleBase_Table)
+	{
+		HASH_Delete(gpsPVRSRVData->psProcessHandleBase_Table);
+		gpsPVRSRVData->psProcessHandleBase_Table = NULL;
+	}
+
+	if (gpsPVRSRVData->hGlobalEventObject)
+	{
+		OSEventObjectSignal(gpsPVRSRVData->hGlobalEventObject);
+	}
+
+	/* Stop and cleanup the devices watchdog thread */
+	if (gpsPVRSRVData->hDevicesWatchdogThread)
+	{
+		if (gpsPVRSRVData->hDevicesWatchdogEvObj)
+		{
+			eError = OSEventObjectSignal(gpsPVRSRVData->hDevicesWatchdogEvObj);
+			PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+		}
+		LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US)
+		{
+			eError = OSThreadDestroy(gpsPVRSRVData->hDevicesWatchdogThread);
+			if (PVRSRV_OK == eError)
+			{
+				gpsPVRSRVData->hDevicesWatchdogThread = NULL;
+				break;
+			}
+			OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+		PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+	}
+
+	if (gpsPVRSRVData->hDevicesWatchdogEvObj)
+	{
+		eError = OSEventObjectDestroy(gpsPVRSRVData->hDevicesWatchdogEvObj);
+		gpsPVRSRVData->hDevicesWatchdogEvObj = NULL;
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+	}
+
+	/* Stop and cleanup the deferred clean up thread, event object and
+	 * deferred context list.
+	 */
+	if (gpsPVRSRVData->hCleanupThread)
+	{
+		if (gpsPVRSRVData->hCleanupEventObject)
+		{
+			eError = OSEventObjectSignal(gpsPVRSRVData->hCleanupEventObject);
+			PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+		}
+		LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US)
+		{
+			eError = OSThreadDestroy(gpsPVRSRVData->hCleanupThread);
+			if (PVRSRV_OK == eError)
+			{
+				gpsPVRSRVData->hCleanupThread = NULL;
+				break;
+			}
+			OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+		PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+	}
+
+	if (gpsPVRSRVData->hCleanupEventObject)
+	{
+		eError = OSEventObjectDestroy(gpsPVRSRVData->hCleanupEventObject);
+		gpsPVRSRVData->hCleanupEventObject = NULL;
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+	}
+
+	/* Tear down the HTB before PVRSRVHandleDeInit() removes its TL handle */
+	/* HTB De-init happens in device de-registration currently */
+	eError = HTBDeInit();
+	PVR_LOG_IF_ERROR(eError, "HTBDeInit");
+
+#if defined (SUPPORT_GPUTRACE_EVENTS)
+	PVRGpuTraceSupportDeInit();
+#endif
+
+	/* Tear down CacheOp framework information page first */
+	CacheOpDeInit2();
+
+	ServerSyncDeinitOnce(gpsPVRSRVData);
+
+	/* Close the TL control plane stream. */
+	TLStreamClose(gpsPVRSRVData->hTLCtrlStream);
+
+	/* Clean up Transport Layer resources that remain */
+	TLDeInit();
+
+	/* Clean up information page */
+	InfoPageDestroy(gpsPVRSRVData);
+
+	_HostMemDeviceDestroy();
+
+	eError = PVRSRVHandleDeInit();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVHandleDeInit failed", __func__));
+	}
+
+	/* deinitialise pdump */
+	if ((g_ui32InitFlags & INIT_DATA_ENABLE_PDUMPINIT) > 0)
+	{
+		PDUMPDEINIT();
+	}
+	
+	/* destroy event object */
+	if (gpsPVRSRVData->hGlobalEventObject)
+	{
+		OSEventObjectDestroy(gpsPVRSRVData->hGlobalEventObject);
+		gpsPVRSRVData->hGlobalEventObject = NULL;
+	}
+
+	PVRSRVCmdCompleteDeinit();
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+	eError = DCDeInit();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: DCDeInit failed", __func__));
+	}
+#endif
+
+	eError = PMRDeInit();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: PMRDeInit failed", __func__));
+	}
+
+	BridgeDeinit();
+
+#if defined(PVR_RI_DEBUG)
+	RIDeInitKM();
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	DevicememHistoryDeInitKM();
+#endif
+
+	CacheOpDeInit();
+
+	PVRSRVPoolDestroy(gpsPVRSRVData->psBridgeBufferPool);
+
+	eError = OSEventObjectDestroy(gpsPVRSRVData->hDriverThreadEventObject);
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to destroy global driver thread event object",
+											__func__));
+	}
+
+	OSDeInitEnvData();
+
+	eError = PhysHeapDeinit();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: PhysHeapDeinit failed", __func__));
+	}
+
+	OSFreeMem(gpsPVRSRVData);
+	gpsPVRSRVData = NULL;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+static PVRSRV_ERROR CreateLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	IMG_UINT	uiCounter=0;
+
+	for (uiCounter = 0; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++)
+	{
+		psDeviceNode->psOSidSubArena[uiCounter] =
+			RA_Create(psDeviceNode->apszRANames[0],
+					  OSGetPageShift(),			/* Use host page size, keeps things simple */
+					  RA_LOCKCLASS_0,			/* This arena doesn't use any other arenas. */
+					  NULL,					/* No Import */
+					  NULL,					/* No free import */
+					  NULL,					/* No import handle */
+					  IMG_FALSE);
+
+		if (psDeviceNode->psOSidSubArena[uiCounter] == NULL)
+		{
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE,"\n(GPU Virtualization Validation): Calling RA_Add with base %u and size %u \n",0, GPUVIRT_SIZEOF_ARENA0));
+
+	/* Arena creation takes place earlier than when the client side reads the apphints and transfers them over the bridge. Since we don't
+	 * know how the memory is going to be partitioned and since we already need some memory for all the initial allocations that take place,
+	 * we populate the first sub-arena (0) with a span of 64 megabytes. This has been shown to be enough even for cases where EWS is allocated
+	 * memory in this sub arena and then a multi app example is executed. This pre-allocation also means that consistency must be maintained
+	 * between apphints and reality. That's why in the Apphints, the OSid0 region must start from 0 and end at 3FFFFFF. */
+
+	if (!RA_Add(psDeviceNode->psOSidSubArena[0], 0, GPUVIRT_SIZEOF_ARENA0, 0 , NULL))
+	{
+		RA_Delete(psDeviceNode->psOSidSubArena[0]);
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psDeviceNode->apsLocalDevMemArenas[0] = psDeviceNode->psOSidSubArena[0];
+
+	return PVRSRV_OK;
+}
+
+void PopulateLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode,
+						  IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+						  IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS])
+{
+	IMG_UINT	uiCounter;
+
+	/* Since Sub Arena[0] has been populated already, now we populate the rest starting from 1*/
+
+	for (uiCounter = 1; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE,"\n[GPU Virtualization Validation]: Calling RA_Add with base %u and size %u \n",aui32OSidMin[0][uiCounter], aui32OSidMax[0][uiCounter]-aui32OSidMin[0][uiCounter]+1));
+
+		if (!RA_Add(psDeviceNode->psOSidSubArena[uiCounter], aui32OSidMin[0][uiCounter], aui32OSidMax[0][uiCounter]-aui32OSidMin[0][uiCounter]+1, 0, NULL))
+		{
+			goto error;
+		}
+	}
+
+	#if defined(EMULATOR)
+	{
+		SysSetOSidRegisters(aui32OSidMin, aui32OSidMax);
+	}
+	#endif
+
+	return;
+
+error:
+	for (uiCounter = 0; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++)
+	{
+		RA_Delete(psDeviceNode->psOSidSubArena[uiCounter]);
+	}
+
+	return;
+}
+
+#endif
+
+static void _SysDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+					IMG_UINT32 ui32VerbLevel,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile)
+{
+	/* Only dump info once */
+	if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_LOW)
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode =
+			(PVRSRV_DEVICE_NODE *) hDebugRequestHandle;
+
+		switch (psDeviceNode->eCurrentSysPowerState)
+		{
+			case PVRSRV_SYS_POWER_STATE_OFF:
+				PVR_DUMPDEBUG_LOG("Device System Power State: OFF");
+				break;
+			case PVRSRV_SYS_POWER_STATE_ON:
+				PVR_DUMPDEBUG_LOG("Device System Power State: ON");
+				break;
+			default:
+				PVR_DUMPDEBUG_LOG("Device System Power State: UNKNOWN (%d)",
+								   psDeviceNode->eCurrentSysPowerState);
+				break;
+		}
+
+		SysDebugInfo(psDeviceNode->psDevConfig, pfnDumpDebugPrintf, pvDumpDebugFile);
+	}
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDeviceCreate(void *pvOSDevice,
+											 IMG_INT32 i32UMIdentifier,
+											 PVRSRV_DEVICE_NODE **ppsDeviceNode)
+{
+	PVRSRV_DATA				*psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR			eError;
+	PVRSRV_DEVICE_CONFIG	*psDevConfig;
+	PVRSRV_DEVICE_NODE		*psDeviceNode;
+	PVRSRV_RGXDEV_INFO		*psDevInfo;
+	PVRSRV_DEVICE_PHYS_HEAP	physHeapIndex;
+	IMG_UINT32				i;
+	IMG_UINT32				ui32AppHintDefault;
+	IMG_UINT32				ui32AppHintDriverMode;
+	void *pvAppHintState    = NULL;
+
+	psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode));
+	if (!psDeviceNode)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate device node",
+				 __func__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psDeviceNode->sDevId.i32UMIdentifier = i32UMIdentifier;
+
+	eError = SysDevInit(pvOSDevice, &psDevConfig);
+	if (eError)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get device config (%s)",
+				 __func__, PVRSRVGetErrorStringKM(eError)));
+		goto ErrorFreeDeviceNode;
+	}
+
+	PVR_ASSERT(psDevConfig);
+	PVR_ASSERT(psDevConfig->pvOSDevice == pvOSDevice);
+	PVR_ASSERT(!psDevConfig->psDevNode);
+
+	/* Store the device node in the device config for the system layer to use */
+	psDevConfig->psDevNode = psDeviceNode;
+
+	psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_INIT;
+	psDeviceNode->psDevConfig = psDevConfig;
+	psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON;
+	psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+	/* Read driver mode (i.e. native, host or guest) AppHint */
+	ui32AppHintDefault = PVRSRV_APPHINT_DRIVERMODE;
+	OSCreateKMAppHintState(&pvAppHintState);
+	OSGetKMAppHintUINT32(pvAppHintState, DriverMode,
+						 &ui32AppHintDefault, &ui32AppHintDriverMode);
+	OSFreeKMAppHintState(pvAppHintState);
+	pvAppHintState = NULL;
+
+	/*
+	 * Driver mode AppHint comes in override and (default) non-override
+	 * values. Override values always take priority else if the system
+	 * layer provides a callback use it. If both of these are absent, use
+	 * the supplied (or default) non-override value.
+	 */
+	if (PVRSRV_VZ_APPHINT_MODE_IS_OVERRIDE(ui32AppHintDriverMode))
+	{
+		psPVRSRVData->eDriverMode = PVRSRV_VZ_APPHINT_MODE(ui32AppHintDriverMode);
+	}
+	else if (psDeviceNode->psDevConfig->pfnSysDriverMode)
+	{
+		psPVRSRVData->eDriverMode = psDeviceNode->psDevConfig->pfnSysDriverMode();
+	}
+	else
+	{
+		psPVRSRVData->eDriverMode = PVRSRV_VZ_APPHINT_MODE(ui32AppHintDriverMode);
+	}
+
+	/*
+	 * Ensure that the supplied driver execution mode is consistent with the number
+	 * of OSIDs the firmware can support. Any failure here is (should be) fatal as
+	 * the requested for driver mode cannot be supported by the firmware.
+	 */
+	switch (psPVRSRVData->eDriverMode)
+	{
+		case DRIVER_MODE_NATIVE:
+		/* Always supported mode */
+			break;
+
+		case DRIVER_MODE_HOST:
+		case DRIVER_MODE_GUEST:
+#if (RGXFW_NUM_OS == 1)
+			PVR_DPF((PVR_DBG_ERROR, "The number of firmware supported OSID(s) is 1"));
+			PVR_DPF((PVR_DBG_ERROR,	"Halting initialisation, cannot transition to %s mode",
+					psPVRSRVData->eDriverMode == DRIVER_MODE_HOST ? "host" : "guest"));
+			eError = PVRSRV_ERROR_NOT_SUPPORTED;
+			goto ErrorFreeDeviceNode;
+#endif
+			break;
+
+		default:
+			if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK)
+			{
+				/* Running on VZ capable BVNC, invalid driver mode enumeration integer value */
+				PVR_DPF((PVR_DBG_ERROR, "Halting initialisation due to invalid driver mode %d",
+						(IMG_INT32)psPVRSRVData->eDriverMode));
+				eError = PVRSRV_ERROR_NOT_SUPPORTED;
+				goto ErrorFreeDeviceNode;
+			}
+			else if ((IMG_INT32)psPVRSRVData->eDriverMode <  (IMG_INT32)DRIVER_MODE_NATIVE ||
+					 (IMG_INT32)psPVRSRVData->eDriverMode >= (IMG_INT32)RGXFW_NUM_OS)
+			{
+				/* Running on non-VZ capable BVNC so simulating OSID using eDriverMode but
+				   value is outside of permitted range */
+				PVR_DPF((PVR_DBG_ERROR,
+						"Halting initialisation, OSID %d is outside of range [0:%d] supported",
+						(IMG_INT)psPVRSRVData->eDriverMode, RGXFW_NUM_OS-1));
+				eError = PVRSRV_ERROR_NOT_SUPPORTED;
+				goto ErrorFreeDeviceNode;
+			}
+			break;
+	}
+
+	/* Perform additional VZ system initialisation */
+	eError = SysVzDevInit(psDevConfig);
+	if (eError)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed system virtualization initialisation (%s)",
+				 __func__, PVRSRVGetErrorStringKM(eError)));
+		goto ErrorFreeDeviceNode;
+	}
+
+	eError = PVRSRVRegisterDbgTable(psDeviceNode,
+									g_aui32DebugOrderTable,
+									IMG_ARR_NUM_ELEMS(g_aui32DebugOrderTable));
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorSysDevDeInit;
+	}
+
+	eError = OSLockCreate(&psDeviceNode->hPowerLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorUnregisterDbgTable;
+	}
+
+	/* Register the physical memory heaps */
+	psDeviceNode->papsRegisteredPhysHeaps =
+		OSAllocZMem(sizeof(*psDeviceNode->papsRegisteredPhysHeaps) *
+					psDevConfig->ui32PhysHeapCount);
+	if (!psDeviceNode->papsRegisteredPhysHeaps)
+	{
+		goto ErrorPowerLockDestroy;
+	}
+
+	for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++)
+	{
+		/* No real device should register a heap with ID same as host device's heap ID */
+		PVR_ASSERT(psDevConfig->pasPhysHeaps[i].ui32PhysHeapID != PHYS_HEAP_ID_HOSTMEM);
+
+		eError = PhysHeapRegister(&psDevConfig->pasPhysHeaps[i],
+								  &psDeviceNode->papsRegisteredPhysHeaps[i]);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to register physical heap %d (%s)",
+					 __func__, psDevConfig->pasPhysHeaps[i].ui32PhysHeapID,
+					 PVRSRVGetErrorStringKM(eError)));
+			goto ErrorPhysHeapsUnregister;
+		}
+
+		psDeviceNode->ui32RegisteredPhysHeaps++;
+	}
+
+	/*
+	 * The physical backing storage for the following physical heaps
+	 * [CPU,GPU,FW] may or may not come from the same underlying source
+	 */
+	eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL],
+							 &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to acquire PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL physical memory heap",
+				 __func__));
+		goto ErrorPhysHeapsUnregister;
+	}
+
+	eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL],
+							 &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to acquire PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL physical memory heap",
+				 __func__));
+		goto ErrorPhysHeapsRelease;
+	}
+
+	eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL],
+							 &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to acquire PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL physical memory heap",
+				 __func__));
+		goto ErrorPhysHeapsRelease;
+	}
+
+	eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL],
+							 &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL]);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to acquire PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL physical memory heap",
+				 __func__));
+		goto ErrorPhysHeapsRelease;
+	}
+
+	/* Do we have card memory? If so create RAs to manage it */
+	if (PhysHeapGetType(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]) == PHYS_HEAP_TYPE_LMA)
+	{
+		RA_BASE_T uBase;
+		RA_LENGTH_T uSize;
+		IMG_UINT64 ui64Size;
+		IMG_CPU_PHYADDR sCpuPAddr;
+		IMG_DEV_PHYADDR sDevPAddr;
+
+		IMG_UINT32 ui32NumOfLMARegions;
+		IMG_UINT32 ui32RegionId;
+		PHYS_HEAP* psLMAHeap;
+
+		psLMAHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+		ui32NumOfLMARegions = PhysHeapNumberOfRegions(psLMAHeap);
+
+		if (ui32NumOfLMARegions == 0)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: LMA heap has no memory regions defined.", __func__));
+			eError = PVRSRV_ERROR_DEVICEMEM_INVALID_LMA_HEAP;
+			goto ErrorPhysHeapsRelease;
+		}
+
+		/* Allocate memory for RA pointers and name strings */
+		psDeviceNode->apsLocalDevMemArenas = OSAllocMem(sizeof(RA_ARENA*) * ui32NumOfLMARegions);
+		psDeviceNode->ui32NumOfLocalMemArenas = ui32NumOfLMARegions;
+		psDeviceNode->apszRANames = OSAllocMem(ui32NumOfLMARegions * sizeof(IMG_PCHAR));
+
+		for (ui32RegionId = 0; ui32RegionId < ui32NumOfLMARegions; ui32RegionId++)
+		{
+			eError = PhysHeapRegionGetSize(psLMAHeap, ui32RegionId, &ui64Size);
+			if (eError != PVRSRV_OK)
+			{
+				/* We can only get here if there is a bug in this module */
+				PVR_ASSERT(IMG_FALSE);
+				return eError;
+			}
+
+			eError = PhysHeapRegionGetCpuPAddr(psLMAHeap, ui32RegionId, &sCpuPAddr);
+			if (eError != PVRSRV_OK)
+			{
+				/* We can only get here if there is a bug in this module */
+				PVR_ASSERT(IMG_FALSE);
+				return eError;
+			}
+
+			eError = PhysHeapRegionGetDevPAddr(psLMAHeap, ui32RegionId, &sDevPAddr);
+			if (eError != PVRSRV_OK)
+			{
+				/* We can only get here if there is a bug in this module */
+				PVR_ASSERT(IMG_FALSE);
+				return eError;
+			}
+
+			PVR_DPF((PVR_DBG_MESSAGE,
+					"Creating RA for card memory - region %d - 0x%016"
+					IMG_UINT64_FMTSPECx"-0x%016" IMG_UINT64_FMTSPECx,
+					 ui32RegionId, (IMG_UINT64) sCpuPAddr.uiAddr,
+					 sCpuPAddr.uiAddr + ui64Size));
+
+			psDeviceNode->apszRANames[ui32RegionId] =
+				OSAllocMem(PVRSRV_MAX_RA_NAME_LENGTH);
+			OSSNPrintf(psDeviceNode->apszRANames[ui32RegionId],
+					   PVRSRV_MAX_RA_NAME_LENGTH,
+					   "%s card mem",
+					   psDevConfig->pszName);
+
+			uBase = sDevPAddr.uiAddr;
+			uSize = (RA_LENGTH_T) ui64Size;
+			PVR_ASSERT(uSize == ui64Size);
+
+			/* Use host page size, keeps things simple */
+			psDeviceNode->apsLocalDevMemArenas[ui32RegionId] =
+				RA_Create(psDeviceNode->apszRANames[ui32RegionId],
+						  OSGetPageShift(), RA_LOCKCLASS_0, NULL, NULL, NULL,
+						  IMG_FALSE);
+
+			if (psDeviceNode->apsLocalDevMemArenas[ui32RegionId] == NULL)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create LMA memory arena",
+						 __func__));
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto ErrorRAsDelete;
+			}
+
+			if (!RA_Add(psDeviceNode->apsLocalDevMemArenas[ui32RegionId],
+						uBase, uSize, 0, NULL))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Failed to add memory to LMA memory arena",
+						 __func__));
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto ErrorRAsDelete;
+			}
+		}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+		eError = CreateLMASubArenas(psDeviceNode);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to create LMA memory sub-arenas", __func__));
+			goto ErrorRAsDelete;
+		}
+#endif
+
+		/* If additional psDeviceNode->pfnDevPx* callbacks are added,
+		   update the corresponding virtualization-specific override
+		   in pvrsrv_vz.c:PVRSRVVzDeviceCreate() */
+		psDeviceNode->pfnDevPxAlloc = LMA_PhyContigPagesAlloc;
+		psDeviceNode->pfnDevPxFree = LMA_PhyContigPagesFree;
+		psDeviceNode->pfnDevPxMap = LMA_PhyContigPagesMap;
+		psDeviceNode->pfnDevPxUnMap = LMA_PhyContigPagesUnmap;
+		psDeviceNode->pfnDevPxClean = LMA_PhyContigPagesClean;
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = PhysmemNewLocalRamBackedPMR;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "===== OS System memory only, no local card memory"));
+
+		/* else we only have OS system memory */
+		psDeviceNode->pfnDevPxAlloc = OSPhyContigPagesAlloc;
+		psDeviceNode->pfnDevPxFree = OSPhyContigPagesFree;
+		psDeviceNode->pfnDevPxMap = OSPhyContigPagesMap;
+		psDeviceNode->pfnDevPxUnMap = OSPhyContigPagesUnmap;
+		psDeviceNode->pfnDevPxClean = OSPhyContigPagesClean;
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = PhysmemNewOSRamBackedPMR;
+	}
+
+	if (PhysHeapGetType(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]) == PHYS_HEAP_TYPE_LMA)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "===== Local card memory only, no OS system memory"));
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = PhysmemNewLocalRamBackedPMR;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "===== OS System memory, 2nd phys heap"));
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = PhysmemNewOSRamBackedPMR;
+	}
+
+	if (PhysHeapGetType(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]) == PHYS_HEAP_TYPE_LMA)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "===== Local card memory only, no OS system memory"));
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = PhysmemNewLocalRamBackedPMR;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "===== OS System memory, 3rd phys heap"));
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = PhysmemNewOSRamBackedPMR;
+	}
+
+	psDeviceNode->uiMMUPxLog2AllocGran = OSGetPageShift();
+
+	eError = ServerSyncInit(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorRAsDelete;
+	}
+
+	eError = SyncCheckpointInit(psDeviceNode);
+	PVR_LOG_IF_ERROR(eError, "SyncCheckpointInit");
+
+	/* Perform additional vz initialization */
+	eError = PVRSRVVzDeviceCreate(psDeviceNode);
+	PVR_LOG_IF_ERROR(eError, "PVRSRVVzDeviceCreate");
+
+	/*
+	 * This is registered before doing device specific initialisation to ensure
+	 * generic device information is dumped first during a debug request.
+	 */
+	eError = PVRSRVRegisterDbgRequestNotify(&psDeviceNode->hDbgReqNotify,
+											psDeviceNode,
+											_SysDebugRequestNotify,
+											DEBUG_REQUEST_SYS,
+											psDeviceNode);
+	PVR_LOG_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify");
+
+	eError = HTBDeviceCreate(psDeviceNode);
+	PVR_LOG_IF_ERROR(eError, "HTBDeviceCreate");
+
+	psPVRSRVData->ui32RegisteredDevices++;
+
+#if defined(SUPPORT_RGX)
+	eError = RGXRegisterDevice(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to register device", __func__));
+		eError = PVRSRV_ERROR_DEVICE_REGISTER_FAILED;
+		goto ErrorDecrementDeviceCount;
+	}
+#endif
+
+#if defined(PVR_DVFS)
+	eError = InitDVFS(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to start DVFS", __func__));
+#if defined(SUPPORT_RGX)
+		DevDeInitRGX(psDeviceNode);
+#endif
+		goto ErrorDecrementDeviceCount;
+	}
+#endif
+
+	OSAtomicWrite(&psDeviceNode->iNumClockSpeedChanges, 0);
+
+#if defined(PVR_TESTING_UTILS)
+	TUtilsInit(psDeviceNode);
+#endif
+
+	dllist_init(&psDeviceNode->sMemoryContextPageFaultNotifyListHead);
+
+	PVR_DPF((PVR_DBG_MESSAGE, "Registered device %p", psDeviceNode));
+	PVR_DPF((PVR_DBG_MESSAGE, "Register bank address = 0x%08lx",
+			 (unsigned long)psDevConfig->sRegsCpuPBase.uiAddr));
+	PVR_DPF((PVR_DBG_MESSAGE, "IRQ = %d", psDevConfig->ui32IRQ));
+
+	/* Finally insert the device into the dev-list and set it as active */
+	List_PVRSRV_DEVICE_NODE_InsertTail(&psPVRSRVData->psDeviceNodeList,
+									   psDeviceNode);
+
+	*ppsDeviceNode = psDeviceNode;
+
+	return PVRSRV_OK;
+
+#if defined(SUPPORT_RGX) || defined(PVR_DVFS)
+ErrorDecrementDeviceCount:
+	psPVRSRVData->ui32RegisteredDevices--;
+
+	if (psDeviceNode->hDbgReqNotify)
+	{
+		PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hDbgReqNotify);
+	}
+
+	/* Perform vz deinitialization */
+	PVRSRVVzDeviceDestroy(psDeviceNode);
+
+	ServerSyncDeinit(psDeviceNode);
+#endif
+ErrorRAsDelete:
+	{
+		IMG_UINT32 ui32RegionId;
+
+		for (ui32RegionId = 0;
+			 ui32RegionId < psDeviceNode->ui32NumOfLocalMemArenas;
+			 ui32RegionId++)
+		{
+			if (psDeviceNode->apsLocalDevMemArenas[ui32RegionId])
+			{
+				RA_Delete(psDeviceNode->apsLocalDevMemArenas[ui32RegionId]);
+			}
+		}
+	}
+
+ErrorPhysHeapsRelease:
+	for (physHeapIndex = 0;
+		 physHeapIndex < IMG_ARR_NUM_ELEMS(psDeviceNode->apsPhysHeap);
+		 physHeapIndex++)
+	{
+		if (psDeviceNode->apsPhysHeap[physHeapIndex])
+		{
+			PhysHeapRelease(psDeviceNode->apsPhysHeap[physHeapIndex]);
+		}
+	}
+ErrorPhysHeapsUnregister:
+	for (i = 0; i < psDeviceNode->ui32RegisteredPhysHeaps; i++)
+	{
+		PhysHeapUnregister(psDeviceNode->papsRegisteredPhysHeaps[i]);
+	}
+
+	OSFreeMem(psDeviceNode->papsRegisteredPhysHeaps);
+ErrorPowerLockDestroy:
+	OSLockDestroy(psDeviceNode->hPowerLock);
+ErrorUnregisterDbgTable:
+	PVRSRVUnregisterDbgTable(psDeviceNode);
+ErrorSysDevDeInit:
+	psDevConfig->psDevNode = NULL;
+	SysVzDevDeInit(psDevConfig);
+	SysDevDeInit(psDevConfig);
+ErrorFreeDeviceNode:
+	OSFreeMem(psDeviceNode);
+	return eError;
+}
+
+static PVRSRV_ERROR _SetDeviceFlag(const PVRSRV_DEVICE_NODE *psDevice,
+                                  const void *psPrivate, IMG_BOOL bValue)
+{
+	PVRSRV_ERROR eResult = PVRSRV_OK;
+	IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+
+	if (!ui32Flag)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eResult = RGXSetDeviceFlags((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice,
+	                            ui32Flag, bValue);
+
+	return eResult;
+}
+
+static PVRSRV_ERROR _ReadDeviceFlag(const PVRSRV_DEVICE_NODE *psDevice,
+                                   const void *psPrivate, IMG_BOOL *pbValue)
+{
+	PVRSRV_ERROR eResult = PVRSRV_OK;
+	IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+	IMG_UINT32 ui32State;
+
+	if (!ui32Flag)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eResult = RGXGetDeviceFlags((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice,
+	                            &ui32State);
+
+	if (PVRSRV_OK == eResult)
+	{
+		*pbValue = (ui32State & ui32Flag)? IMG_TRUE: IMG_FALSE;
+	}
+
+	return eResult;
+}
+static PVRSRV_ERROR _SetStateFlag(const PVRSRV_DEVICE_NODE *psDevice,
+                                  const void *psPrivate, IMG_BOOL bValue)
+{
+	PVRSRV_ERROR eResult = PVRSRV_OK;
+	IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+
+	if (!ui32Flag)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* EnableHWR is a special case
+	 * only possible to disable after FW is running
+	 */
+	if (bValue && RGXFWIF_INICFG_HWR_EN == ui32Flag)
+	{
+		return PVRSRV_ERROR_NOT_SUPPORTED;
+	}
+
+	eResult = RGXStateFlagCtrl((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice,
+	                           ui32Flag, NULL, bValue);
+
+	return eResult;
+}
+
+static PVRSRV_ERROR _ReadStateFlag(const PVRSRV_DEVICE_NODE *psDevice,
+                                   const void *psPrivate, IMG_BOOL *pbValue)
+{
+	IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+	IMG_UINT32 ui32State;
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDevice->pvDevice;
+
+	if (!ui32Flag)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	ui32State = psDevInfo->psFWIfOSConfig->ui32ConfigFlags;
+
+	if(pbValue)
+	{
+		*pbValue = (ui32State & ui32Flag)? IMG_TRUE: IMG_FALSE;
+	}
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVDeviceInitialise(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	IMG_BOOL bInitSuccesful = IMG_FALSE;
+	PVRSRV_ERROR eError;
+
+	if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_INIT)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Device already initialised", __func__));
+		return PVRSRV_ERROR_INIT_FAILURE;
+	}
+
+#if defined(SUPPORT_RGX)
+	eError = RGXInit(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Initialisation of Rogue device failed (%s)",
+				 __func__, PVRSRVGetErrorStringKM(eError)));
+		goto Exit;
+	}
+#endif
+
+	bInitSuccesful = IMG_TRUE;
+
+#if defined(SUPPORT_RGX)
+Exit:
+#endif
+	eError = PVRSRVDeviceFinalise(psDeviceNode, bInitSuccesful);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Services failed to finalise the device (%s)",
+				 __func__, PVRSRVGetErrorStringKM(eError)));
+	}
+
+
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableClockGating,
+	                                  _ReadStateFlag, _SetStateFlag,
+	                                  psDeviceNode,
+	                                  (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_CLKGATING_EN));
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableDMOverlap,
+	                                  _ReadStateFlag, _SetStateFlag,
+	                                  psDeviceNode,
+	                                  (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_DM_OVERLAP));
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOnHWRTrigger,
+	                                  _ReadStateFlag, _SetStateFlag,
+	                                  psDeviceNode,
+	                                  (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER));
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOutOfMemory,
+	                                  _ReadStateFlag, _SetStateFlag,
+	                                  psDeviceNode,
+	                                  (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY));
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_CheckMList,
+	                                  _ReadStateFlag, _SetStateFlag,
+	                                  psDeviceNode,
+	                                  (void*)((uintptr_t)RGXFWIF_INICFG_CHECK_MLIST_EN));
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableHWR,
+	                                  _ReadStateFlag, _SetStateFlag,
+	                                  psDeviceNode,
+	                                  (void*)((uintptr_t)RGXFWIF_INICFG_HWR_EN));
+
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableFEDLogging,
+	                                  _ReadDeviceFlag, _SetDeviceFlag,
+	                                  psDeviceNode,
+	                                  (void*)((uintptr_t)RGXKMIF_DEVICE_STATE_DISABLE_DW_LOGGING_EN));
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_ZeroFreelist,
+	                                  _ReadDeviceFlag, _SetDeviceFlag,
+	                                  psDeviceNode,
+	                                  (void*)((uintptr_t)RGXKMIF_DEVICE_STATE_ZERO_FREELIST));
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DustRequestInject,
+	                                  _ReadDeviceFlag, _SetDeviceFlag,
+	                                  psDeviceNode,
+	                                  (void*)((uintptr_t)RGXKMIF_DEVICE_STATE_DUST_REQUEST_INJECT_EN));
+
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisablePDumpPanic,
+	                                  RGXQueryPdumpPanicEnable, RGXSetPdumpPanicEnable,
+	                                  psDeviceNode,
+	                                  NULL);
+	return eError;
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_DATA				*psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_PHYS_HEAP ePhysHeapIdx;
+	IMG_UINT32 				ui32RegionIdx;
+	IMG_UINT32				i;
+	PVRSRV_ERROR			eError;
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	IMG_BOOL				bForceUnload = IMG_FALSE;
+
+	if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		bForceUnload = IMG_TRUE;
+	}
+#endif
+
+	psPVRSRVData->ui32RegisteredDevices--;
+
+	psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_DEINIT;
+
+#if defined(PVR_TESTING_UTILS)
+	TUtilsDeinit(psDeviceNode);
+#endif
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	SyncFbDeregisterDevice(psDeviceNode);
+#endif
+	/* Counter part to what gets done in PVRSRVDeviceFinalise */
+	if (psDeviceNode->hSyncCheckpointContext)
+	{
+		SyncCheckpointContextDestroy(psDeviceNode->hSyncCheckpointContext);
+		psDeviceNode->hSyncCheckpointContext = NULL;
+	}
+	if (psDeviceNode->hSyncPrimContext)
+	{
+		if (psDeviceNode->psSyncPrim)
+		{
+			/* Free general pupose sync primitive */
+			SyncPrimFree(psDeviceNode->psSyncPrim);
+			psDeviceNode->psSyncPrim = NULL;
+		}
+
+		if (psDeviceNode->psMMUCacheSyncPrim)
+		{
+			PVRSRV_CLIENT_SYNC_PRIM *psSync = psDeviceNode->psMMUCacheSyncPrim;
+
+			/* Important to set the device node pointer to NULL
+			 * before we free the sync-prim to make sure we don't
+			 * defer the freeing of the sync-prim's page tables itself.
+			 * The sync is used to defer the MMU page table
+			 * freeing. */
+			psDeviceNode->psMMUCacheSyncPrim = NULL;
+
+			/* Free general pupose sync primitive */
+			SyncPrimFree(psSync);
+
+		}
+
+		SyncPrimContextDestroy(psDeviceNode->hSyncPrimContext);
+		psDeviceNode->hSyncPrimContext = NULL;
+	}
+
+	eError = PVRSRVPowerLock(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire power lock", __func__));
+		return eError;
+	}
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+		if (bForceUnload)
+		{
+			/*
+			 * Firmware probably not responding but we still want to unload the
+			 * driver.
+			 */
+			break;
+		}
+#endif
+		/* Force idle device */
+		eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE);
+		if (eError == PVRSRV_OK)
+		{
+			break;
+		}
+		else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+		{
+			PVRSRV_ERROR eError2;
+
+			PVRSRVPowerUnlock(psDeviceNode);
+
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+
+			eError2 = PVRSRVPowerLock(psDeviceNode);
+			if (eError2 != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire power lock",
+						 __func__));
+				return eError2;
+			}
+		}
+		else
+		{
+			PVRSRVPowerUnlock(psDeviceNode);
+			return eError;
+		}
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle DENIED", __func__));
+		PVRSRVPowerUnlock(psDeviceNode);
+		return eError;
+	}
+
+	/* Power down the device if necessary */
+	eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+										 PVRSRV_DEV_POWER_STATE_OFF,
+										 IMG_TRUE);
+	PVRSRVPowerUnlock(psDeviceNode);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed PVRSRVSetDevicePowerStateKM call (%s). Dump debug.",
+				 __func__, PVRSRVGetErrorStringKM(eError)));
+
+		PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+
+		/*
+		 * If the driver is okay then return the error, otherwise we can ignore
+		 * this error.
+		 */
+		if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK)
+		{
+			return eError;
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_MESSAGE,
+					 "%s: Will continue to unregister as driver status is not OK",
+					 __func__));
+		}
+	}
+
+#if defined(SUPPORT_RGX)
+	DevDeInitRGX(psDeviceNode);
+#endif
+
+	HTBDeviceDestroy(psDeviceNode);
+
+	if (psDeviceNode->hDbgReqNotify)
+	{
+		PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hDbgReqNotify);
+	}
+
+	SyncCheckpointDeinit(psDeviceNode);
+
+	ServerSyncDeinit(psDeviceNode);
+
+	/* Remove RAs and RA names for local card memory */
+	for (ui32RegionIdx = 0;
+		 ui32RegionIdx < psDeviceNode->ui32NumOfLocalMemArenas;
+		 ui32RegionIdx++)
+	{
+		if (psDeviceNode->apsLocalDevMemArenas[ui32RegionIdx])
+		{
+			RA_Delete(psDeviceNode->apsLocalDevMemArenas[ui32RegionIdx]);
+			psDeviceNode->apsLocalDevMemArenas[ui32RegionIdx] = NULL;
+		}
+
+		if (psDeviceNode->apszRANames[ui32RegionIdx])
+		{
+			OSFreeMem(psDeviceNode->apszRANames[ui32RegionIdx]);
+			psDeviceNode->apszRANames[ui32RegionIdx] = NULL;
+		}
+	}
+
+	if (psDeviceNode->apsLocalDevMemArenas)
+	{
+		OSFreeMem(psDeviceNode->apsLocalDevMemArenas);
+		psDeviceNode->apsLocalDevMemArenas = NULL;
+	}
+	if (psDeviceNode->apszRANames)
+	{
+		OSFreeMem(psDeviceNode->apszRANames);
+		psDeviceNode->apszRANames = NULL;
+	}
+
+	/* Perform vz deinitialization */
+	PVRSRVVzDeviceDestroy(psDeviceNode);
+
+	List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
+
+	for (ePhysHeapIdx = 0;
+		 ePhysHeapIdx < IMG_ARR_NUM_ELEMS(psDeviceNode->apsPhysHeap);
+		 ePhysHeapIdx++)
+	{
+		if (psDeviceNode->apsPhysHeap[ePhysHeapIdx])
+		{
+			PhysHeapRelease(psDeviceNode->apsPhysHeap[ePhysHeapIdx]);
+		}
+	}
+
+	for (i = 0; i < psDeviceNode->ui32RegisteredPhysHeaps; i++)
+	{
+		PhysHeapUnregister(psDeviceNode->papsRegisteredPhysHeaps[i]);
+	}
+
+	OSFreeMem(psDeviceNode->papsRegisteredPhysHeaps);
+
+#if defined(PVR_DVFS)
+	DeinitDVFS(psDeviceNode);
+#endif
+
+	OSLockDestroy(psDeviceNode->hPowerLock);
+
+	PVRSRVUnregisterDbgTable(psDeviceNode);
+
+	psDeviceNode->psDevConfig->psDevNode = NULL;
+	SysVzDevDeInit(psDeviceNode->psDevConfig);
+	SysDevDeInit(psDeviceNode->psDevConfig);
+
+	OSFreeMem(psDeviceNode);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR LMA_PhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize,
+							PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr)
+{
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	IMG_UINT32  ui32OSid = 0;
+#endif
+	RA_BASE_T uiCardAddr;
+	RA_LENGTH_T uiActualSize;
+	PVRSRV_ERROR eError;
+
+	RA_ARENA *pArena=psDevNode->apsLocalDevMemArenas[0];
+	IMG_UINT32 ui32Log2NumPages = 0;
+
+	PVR_ASSERT(uiSize != 0);
+	ui32Log2NumPages = OSGetOrder(uiSize);
+	uiSize = (1 << ui32Log2NumPages) * OSGetPageSize();
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+	IMG_UINT32  ui32OSidReg = 0;
+	IMG_BOOL    bOSidAxiProt;
+
+	IMG_PID     pId = OSGetCurrentClientProcessIDKM();
+
+	RetrieveOSidsfromPidList(pId, &ui32OSid, &ui32OSidReg, &bOSidAxiProt);
+
+	pArena = psDevNode->psOSidSubArena[ui32OSid];
+}
+#endif
+
+	eError = RA_Alloc(pArena,
+	                  uiSize,
+	                  RA_NO_IMPORT_MULTIPLIER,
+	                  0,                         /* No flags */
+	                  uiSize,
+	                  "LMA_PhyContigPagesAlloc",
+	                  &uiCardAddr,
+	                  &uiActualSize,
+	                  NULL);                     /* No private handle */
+
+	PVR_ASSERT(uiSize == uiActualSize);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+	PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): LMA_PhyContigPagesAlloc: Address:%llu, size:%llu", uiCardAddr,uiActualSize));
+}
+#endif
+
+	psMemHandle->u.ui64Handle = uiCardAddr;
+	psDevPAddr->uiAddr = (IMG_UINT64) uiCardAddr;
+
+	if (PVRSRV_OK == eError)
+	{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	    PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+	                                        uiSize,
+	                                        (IMG_UINT64)(uintptr_t) psMemHandle);
+#else
+		IMG_CPU_PHYADDR sCpuPAddr;
+		sCpuPAddr.uiAddr = psDevPAddr->uiAddr;
+
+		PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+		                             NULL,
+		                             sCpuPAddr,
+		                             uiSize,
+		                             NULL);
+#endif
+#endif
+		psMemHandle->ui32Order = ui32Log2NumPages;
+	}
+
+	return eError;
+}
+
+void LMA_PhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle)
+{
+	RA_BASE_T uiCardAddr = (RA_BASE_T) psMemHandle->u.ui64Handle;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+	                                      (IMG_UINT64)(uintptr_t) psMemHandle);
+#else
+		PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT64)uiCardAddr);
+#endif
+#endif
+	RA_Free(psDevNode->apsLocalDevMemArenas[0], uiCardAddr);
+	psMemHandle->ui32Order = 0;
+}
+
+PVRSRV_ERROR LMA_PhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+							size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+							void **pvPtr)
+{
+	IMG_CPU_PHYADDR sCpuPAddr;
+	IMG_UINT32 ui32NumPages = (1 << psMemHandle->ui32Order);
+	PVR_UNREFERENCED_PARAMETER(psMemHandle);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+
+	PhysHeapDevPAddrToCpuPAddr(psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL], 1, &sCpuPAddr, psDevPAddr);
+	*pvPtr = OSMapPhysToLin(sCpuPAddr,
+							ui32NumPages * OSGetPageSize(),
+							PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE);
+	if (*pvPtr == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	else
+	{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+		PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, ui32NumPages * OSGetPageSize());
+#else
+		{
+			PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA,
+										 *pvPtr,
+										 sCpuPAddr,
+										 ui32NumPages * OSGetPageSize(),
+										 NULL);
+		}
+#endif
+#endif
+		return PVRSRV_OK;
+	}
+}
+
+void LMA_PhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+						void *pvPtr)
+{
+	IMG_UINT32 ui32NumPages = (1 << psMemHandle->ui32Order);
+	PVR_UNREFERENCED_PARAMETER(psMemHandle);
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+		PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, ui32NumPages * OSGetPageSize());
+#else
+	PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, (IMG_UINT64)(uintptr_t)pvPtr);
+#endif
+#endif
+
+	OSUnMapPhysToLin(pvPtr, ui32NumPages * OSGetPageSize(),
+					 PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+}
+
+PVRSRV_ERROR LMA_PhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode,
+                                     PG_HANDLE *psMemHandle,
+                                     IMG_UINT32 uiOffset,
+                                     IMG_UINT32 uiLength)
+{
+	/* No need to flush because we map as uncached */
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(psMemHandle);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(uiLength);
+
+	return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVDeviceFinalise
+@Description  Performs the final parts of device initialisation.
+@Input        psDeviceNode            Device node of the device to finish
+                                      initialising
+@Input        bInitSuccessful         Whether or not device specific
+                                      initialisation was successful
+@Return       PVRSRV_ERROR     PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode,
+											   IMG_BOOL bInitSuccessful)
+{
+	PVRSRV_ERROR eError;
+
+	if (bInitSuccessful)
+	{
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+		eError = SyncCheckpointContextCreate(psDeviceNode,
+											 &psDeviceNode->hSyncCheckpointContext);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to create sync checkpoint context (%s)",
+					 __func__, PVRSRVGetErrorStringKM(eError)));
+
+			goto ErrorExit;
+		}
+#endif
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+		eError = SyncFbRegisterDevice(psDeviceNode);
+		if (eError != PVRSRV_OK)
+		{
+			goto ErrorExit;
+		}
+#endif
+		eError = SyncPrimContextCreate(psDeviceNode,
+									   &psDeviceNode->hSyncPrimContext);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to create sync prim context (%s)",
+					 __func__, PVRSRVGetErrorStringKM(eError)));
+			SyncCheckpointContextDestroy(psDeviceNode->hSyncCheckpointContext);
+			goto ErrorExit;
+		}
+
+		/* Allocate general purpose sync primitive */
+		eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+							   &psDeviceNode->psSyncPrim,
+							   "pvrsrv dev general");
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to allocate sync primitive with error (%s)",
+					 __func__, PVRSRVGetErrorStringKM(eError)));
+			goto ErrorExit;
+		}
+
+		/* Allocate MMU cache invalidate sync */
+		eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+							   &psDeviceNode->psMMUCacheSyncPrim,
+							   "pvrsrv dev MMU cache");
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to allocate sync primitive with error (%s)",
+					 __func__, PVRSRVGetErrorStringKM(eError)));
+			goto ErrorExit;
+		}
+
+		/* Next update value will be 1 since sync prim starts with 0 */
+		psDeviceNode->ui16NextMMUInvalidateUpdate = 1;
+
+		eError = PVRSRVPowerLock(psDeviceNode);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire power lock (%s)",
+					 __func__, PVRSRVGetErrorStringKM(eError)));
+			goto ErrorExit;
+		}
+
+		/*
+		 * Always ensure a single power on command appears in the pdump. This
+		 * should be the only power related call outside of PDUMPPOWCMDSTART
+		 * and PDUMPPOWCMDEND.
+		 */
+		eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+											 PVRSRV_DEV_POWER_STATE_ON, IMG_TRUE);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to set device %p power state to 'on' (%s)",
+					 __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+			PVRSRVPowerUnlock(psDeviceNode);
+			goto ErrorExit;
+		}
+
+		/* Verify firmware compatibility for device */
+		eError = PVRSRVDevInitCompatCheck(psDeviceNode);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed compatibility check for device %p (%s)",
+					 __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+			PVRSRVPowerUnlock(psDeviceNode);
+			PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+			goto ErrorExit;
+		}
+
+		PDUMPPOWCMDSTART();
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			/* Force the device to idle if its default power state is off */
+			eError = PVRSRVDeviceIdleRequestKM(psDeviceNode,
+											   &PVRSRVDeviceIsDefaultStateOFF,
+											   IMG_TRUE);
+			if (eError == PVRSRV_OK)
+			{
+				break;
+			}
+			else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+			{
+				PVRSRVPowerUnlock(psDeviceNode);
+				OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+
+				eError = PVRSRVPowerLock(psDeviceNode);
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							 "%s: Failed to acquire power lock (%s)",
+							 __func__, PVRSRVGetErrorStringKM(eError)));
+					goto ErrorExit;
+				}
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to idle device %p (%s)",
+						 __func__, psDeviceNode,
+						 PVRSRVGetErrorStringKM(eError)));
+				PVRSRVPowerUnlock(psDeviceNode);
+				goto ErrorExit;
+			}
+		} END_LOOP_UNTIL_TIMEOUT();
+
+		if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle DENIED", __func__));
+			PVRSRVPowerUnlock(psDeviceNode);
+			goto ErrorExit;
+		}
+
+		/* Place device into its default power state. */
+		eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+											 PVRSRV_DEV_POWER_STATE_DEFAULT,
+											 IMG_TRUE);
+		PDUMPPOWCMDEND();
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to set device %p into its default power state (%s)",
+					 __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+
+			PVRSRVPowerUnlock(psDeviceNode);
+			goto ErrorExit;
+		}
+
+		PVRSRVPowerUnlock(psDeviceNode);
+
+		/*
+		 * If PDUMP is enabled and RGX device is supported, then initialise the
+		 * performance counters that can be further modified in PDUMP. Then,
+		 * before ending the init phase of the pdump, drain the commands put in
+		 * the kCCB during the init phase.
+		 */
+#if defined(SUPPORT_RGX) && defined(PDUMP)
+		{
+			PVRSRV_RGXDEV_INFO *psDevInfo =
+				(PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice);
+
+			eError = PVRSRVRGXInitHWPerfCountersKM(psDeviceNode);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Failed to init hwperf counters (%s)",
+						 __func__, PVRSRVGetErrorStringKM(eError)));
+				goto ErrorExit;
+			}
+
+			eError = RGXPdumpDrainKCCB(psDevInfo,
+									   psDevInfo->psKernelCCBCtl->ui32WriteOffset);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Problem draining kCCB (%s)",
+						 __func__, PVRSRVGetErrorStringKM(eError)));
+				goto ErrorExit;
+			}
+		}
+#endif
+
+		/* Now that the device(s) are fully initialised set them as active */
+		psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_ACTIVE;
+		eError = PVRSRV_OK;
+
+#if defined(SUPPORT_RGX)
+		if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+		{
+			eError = RGXFWOSConfig((PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice));
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Cannot kick initialization configuration to the Device (%s)",
+						 __func__, PVRSRVGetErrorStringKM(eError)));
+
+				goto ErrorExit;
+			}
+		}
+#endif
+	}
+	else
+	{
+		/* Initialisation failed so set the device(s) into a bad state */
+		psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_BAD;
+		eError = PVRSRV_ERROR_NOT_INITIALISED;
+	}
+
+	/* Give PDump control a chance to end the init phase, depends on OS */
+	PDumpStopInitPhase(IMG_FALSE, IMG_TRUE);
+
+	return eError;
+
+ErrorExit:
+	/* Initialisation failed so set the device(s) into a bad state */
+	psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_BAD;
+
+	return eError;
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	/* Only check devices which specify a compatibility check callback */
+	if (psDeviceNode->pfnInitDeviceCompatCheck)
+		return psDeviceNode->pfnInitDeviceCompatCheck(psDeviceNode);
+	else
+		return PVRSRV_OK;
+}
+
+/*
+	PollForValueKM
+*/
+static
+PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32*	pui32LinMemAddr,
+										  IMG_UINT32			ui32Value,
+										  IMG_UINT32			ui32Mask,
+										  IMG_UINT32			ui32Timeoutus,
+										  IMG_UINT32			ui32PollPeriodus,
+										  IMG_BOOL				bAllowPreemption)
+{
+#if defined(NO_HARDWARE)
+	PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	PVR_UNREFERENCED_PARAMETER(ui32Timeoutus);
+	PVR_UNREFERENCED_PARAMETER(ui32PollPeriodus);
+	PVR_UNREFERENCED_PARAMETER(bAllowPreemption);
+	return PVRSRV_OK;
+#else
+	IMG_UINT32	ui32ActualValue = 0xFFFFFFFFU; /* Initialiser only required to prevent incorrect warning */
+
+	if (bAllowPreemption)
+	{
+		PVR_ASSERT(ui32PollPeriodus >= 1000);
+	}
+
+	LOOP_UNTIL_TIMEOUT(ui32Timeoutus)
+	{
+		ui32ActualValue = OSReadHWReg32((void *)pui32LinMemAddr, 0) & ui32Mask;
+
+		if (ui32ActualValue == ui32Value)
+		{
+			return PVRSRV_OK;
+		}
+
+		if (gpsPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+		{
+			return PVRSRV_ERROR_TIMEOUT;
+		}
+
+		if (bAllowPreemption)
+		{
+			OSSleepms(ui32PollPeriodus / 1000);
+		}
+		else
+		{
+			OSWaitus(ui32PollPeriodus);
+		}
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	PVR_DPF((PVR_DBG_ERROR,"PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).",
+			ui32Value, ui32ActualValue, ui32Mask));
+	
+	return PVRSRV_ERROR_TIMEOUT;
+#endif /* NO_HARDWARE */
+}
+
+
+/*
+	PVRSRVPollForValueKM
+*/
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPollForValueKM (volatile IMG_UINT32	*pui32LinMemAddr,
+												IMG_UINT32			ui32Value,
+												IMG_UINT32			ui32Mask)
+{
+	return PollForValueKM(pui32LinMemAddr, ui32Value, ui32Mask,
+						  MAX_HW_TIME_US,
+						  MAX_HW_TIME_US/WAIT_TRY_COUNT,
+						  IMG_FALSE);
+}
+
+static
+PVRSRV_ERROR IMG_CALLCONV WaitForValueKM(volatile IMG_UINT32  *pui32LinMemAddr,
+                                         IMG_UINT32           ui32Value,
+                                         IMG_UINT32           ui32Mask,
+                                         IMG_BOOL             bHoldBridgeLock)
+{
+#if defined(NO_HARDWARE)
+	PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	return PVRSRV_OK;
+#else
+
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	IMG_HANDLE hOSEvent;
+	PVRSRV_ERROR eError;
+	PVRSRV_ERROR eErrorWait;
+	IMG_UINT32 ui32ActualValue;
+
+	eError = OSEventObjectOpen(psPVRSRVData->hGlobalEventObject, &hOSEvent);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVWaitForValueKM: Failed to setup EventObject with error (%d)", eError));
+		goto EventObjectOpenError;
+	}
+
+	eError = PVRSRV_ERROR_TIMEOUT;
+	
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		ui32ActualValue = (*pui32LinMemAddr & ui32Mask);
+
+		if (ui32ActualValue == ui32Value)
+		{
+			/* Expected value has been found */
+			eError = PVRSRV_OK;
+			break;
+		}
+		else if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+		{
+			/* Services in bad state, don't wait any more */
+			eError = PVRSRV_ERROR_NOT_READY;
+			break;
+		}
+		else
+		{
+			/* wait for event and retry */
+			eErrorWait = bHoldBridgeLock ? OSEventObjectWaitAndHoldBridgeLock(hOSEvent) : OSEventObjectWait(hOSEvent);
+			if (eErrorWait != PVRSRV_OK  &&  eErrorWait != PVRSRV_ERROR_TIMEOUT)
+			{
+				PVR_DPF((PVR_DBG_WARNING,"PVRSRVWaitForValueKM: Waiting for value failed with error %d. Expected 0x%x but found 0x%x (Mask 0x%08x). Retrying",
+							eErrorWait,
+							ui32Value,
+							ui32ActualValue,
+							ui32Mask));
+			}
+		}
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	OSEventObjectClose(hOSEvent);
+
+	/* One last check in case the object wait ended after the loop timeout... */
+	if (eError != PVRSRV_OK  &&  (*pui32LinMemAddr & ui32Mask) == ui32Value)
+	{
+		eError = PVRSRV_OK;
+	}
+
+	/* Provide event timeout information to aid the Device Watchdog Thread... */
+	if (eError == PVRSRV_OK)
+	{
+		psPVRSRVData->ui32GEOConsecutiveTimeouts = 0;
+	}
+	else if (eError == PVRSRV_ERROR_TIMEOUT)
+	{
+		psPVRSRVData->ui32GEOConsecutiveTimeouts++;
+	}
+
+EventObjectOpenError:
+
+	return eError;
+
+#endif /* NO_HARDWARE */
+}
+
+/*
+	PVRSRVWaitForValueKM
+*/
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKM (volatile IMG_UINT32	*pui32LinMemAddr,
+												IMG_UINT32			ui32Value,
+												IMG_UINT32			ui32Mask)
+{
+	/* In this case we are NOT retaining bridge lock while waiting
+	   for bridge lock. */
+	return WaitForValueKM(pui32LinMemAddr, ui32Value, ui32Mask, IMG_FALSE);
+}
+
+/*
+	PVRSRVWaitForValueKMAndHoldBridgeLock
+*/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKMAndHoldBridgeLockKM(volatile IMG_UINT32 *pui32LinMemAddr,
+                                                                  IMG_UINT32          ui32Value,
+                                                                  IMG_UINT32          ui32Mask)
+{
+	return WaitForValueKM(pui32LinMemAddr, ui32Value, ui32Mask, IMG_TRUE);
+}
+
+int PVRSRVGetDriverStatus(void)
+{
+	return PVRSRVGetPVRSRVData()->eServicesState;
+}
+
+/*!
+ ******************************************************************************
+
+ @Function		PVRSRVGetErrorStringKM
+
+ @Description	Returns a text string relating to the PVRSRV_ERROR enum.
+
+ @Note		case statement used rather than an indexed array to ensure text is
+ 			synchronised with the correct enum
+
+ @Input		eError : PVRSRV_ERROR enum
+
+ @Return	const IMG_CHAR * : Text string
+
+ @Note		Must be kept in sync with servicesext.h
+
+******************************************************************************/
+
+IMG_EXPORT
+const IMG_CHAR *PVRSRVGetErrorStringKM(PVRSRV_ERROR eError)
+{
+	switch(eError)
+	{
+		case PVRSRV_OK:
+			return "PVRSRV_OK";
+#define PVRE(x) \
+		case x: \
+			return #x;
+#include "pvrsrv_errors.h"
+#undef PVRE
+		default:
+			return "Unknown PVRSRV error number";
+	}
+}
+
+/*
+	PVRSRVSystemHasCacheSnooping
+*/
+IMG_BOOL PVRSRVSystemHasCacheSnooping(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	if ((psDevConfig->eCacheSnoopingMode != PVRSRV_DEVICE_SNOOP_NONE) &&
+		(psDevConfig->eCacheSnoopingMode != PVRSRV_DEVICE_SNOOP_EMULATED))
+	{
+		return IMG_TRUE;
+	}
+	return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemSnoopingIsEmulated(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	if (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_EMULATED)
+	{
+		return IMG_TRUE;
+	}
+	return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	if ((psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CPU_ONLY) ||
+		(psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CROSS))
+	{
+		return IMG_TRUE;
+	}
+	return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	if ((psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_DEVICE_ONLY) ||
+		(psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CROSS))
+	{
+		return IMG_TRUE;
+	}
+	return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemHasNonMappableLocalMemory(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	return psDevConfig->bHasNonMappableLocalMemory;
+}
+
+/*
+	PVRSRVSystemWaitCycles
+*/
+void PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles)
+{
+	/* Delay in us */
+	IMG_UINT32 ui32Delayus = 1;
+
+	/* obtain the device freq */
+	if (psDevConfig->pfnClockFreqGet != NULL)
+	{
+		IMG_UINT32 ui32DeviceFreq;
+
+		ui32DeviceFreq = psDevConfig->pfnClockFreqGet(psDevConfig->hSysData);
+
+		ui32Delayus = (ui32Cycles*1000000)/ui32DeviceFreq;
+
+		if (ui32Delayus == 0)
+		{
+			ui32Delayus = 1;
+		}
+	}
+
+	OSWaitus(ui32Delayus);
+}
+
+static void *
+PVRSRVSystemInstallDeviceLISR_Match_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode,
+											va_list va)
+{
+	void *pvOSDevice = va_arg(va, void *);
+
+	if (psDeviceNode->psDevConfig->pvOSDevice == pvOSDevice)
+	{
+		return psDeviceNode;
+	}
+
+	return NULL;
+}
+
+PVRSRV_ERROR PVRSRVSystemInstallDeviceLISR(void *pvOSDevice,
+										   IMG_UINT32 ui32IRQ,
+										   const IMG_CHAR *pszName,
+										   PFN_LISR pfnLISR,
+										   void *pvData,
+										   IMG_HANDLE *phLISRData)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+
+	psDeviceNode =
+		List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+									   &PVRSRVSystemInstallDeviceLISR_Match_AnyVaCb,
+									   pvOSDevice);
+	if (!psDeviceNode)
+	{
+		/* Device can't be found in the list so it isn't in the system */
+		PVR_DPF((PVR_DBG_ERROR, "%s: device %p with irq %d is not present",
+				 __func__, pvOSDevice, ui32IRQ));
+		return PVRSRV_ERROR_INVALID_DEVICE;
+	}
+
+	return SysInstallDeviceLISR(psDeviceNode->psDevConfig->hSysData, ui32IRQ,
+								pszName, pfnLISR, pvData, phLISRData);
+}
+
+PVRSRV_ERROR PVRSRVSystemUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+	return SysUninstallDeviceLISR(hLISRData);
+}
+
+PVRSRV_ERROR
+PVRSRVSystemBIFTilingHeapGetXStride(PVRSRV_DEVICE_CONFIG *psDevConfig,
+									IMG_UINT32 uiHeapNum,
+									IMG_UINT32 *puiXStride)
+{
+	PVR_ASSERT(puiXStride != NULL);
+
+	if (uiHeapNum < 1 || uiHeapNum > psDevConfig->ui32BIFTilingHeapCount)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*puiXStride = psDevConfig->pui32BIFTilingHeapConfigs[uiHeapNum - 1];
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSystemBIFTilingGetConfig(PVRSRV_DEVICE_CONFIG  *psDevConfig,
+                               RGXFWIF_BIFTILINGMODE *peBifTilingMode,
+                               IMG_UINT32            *puiNumHeaps)
+{
+	*peBifTilingMode = psDevConfig->eBIFTilingMode;
+	*puiNumHeaps = psDevConfig->ui32BIFTilingHeapCount;
+	return PVRSRV_OK;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR)
+void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState)
+{
+    SysSetAxiProtOSid(ui32OSid, bState);
+    return;
+}
+
+void SetTrustedDeviceAceEnabled(void)
+{
+    SysSetTrustedDeviceAceEnabled();
+
+    return;
+}
+#endif
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVVzDeviceCreate(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	RA_BASE_T uBase;
+	RA_LENGTH_T uSize;
+	IMG_UINT ui32OSID;
+	IMG_UINT64 ui64Size;
+	PVRSRV_ERROR eError;
+	PHYS_HEAP *psPhysHeap;
+	IMG_CPU_PHYADDR sCpuPAddr;
+	IMG_DEV_PHYADDR sDevPAddr;
+	PHYS_HEAP_TYPE eHeapType;
+	IMG_UINT32 ui32NumOfHeapRegions;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+
+	/* First, register device GPU physical heap based on physheap config */
+	psPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+	ui32NumOfHeapRegions = PhysHeapNumberOfRegions(psPhysHeap);
+	eHeapType = PhysHeapGetType(psPhysHeap);
+
+	/* Normally, for GPU UMA physheap, use OS services but here we override this
+	   if said physheap is DMA/UMA carve-out; for this create an RA to manage it */
+	if (eHeapType == PHYS_HEAP_TYPE_UMA || eHeapType == PHYS_HEAP_TYPE_DMA)
+	{
+		if (ui32NumOfHeapRegions)
+		{
+			eError = PhysHeapRegionGetCpuPAddr(psPhysHeap, 0, &sCpuPAddr);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_ASSERT(IMG_FALSE);
+				goto e0;
+			}
+	
+			eError = PhysHeapRegionGetSize(psPhysHeap, 0, &ui64Size);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_ASSERT(IMG_FALSE);
+				goto e0;
+			}
+	
+			eError = PhysHeapRegionGetDevPAddr(psPhysHeap, 0, &sDevPAddr);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_ASSERT(IMG_FALSE);
+				goto e0;
+			}
+		}
+		else
+		{
+			sDevPAddr.uiAddr = (IMG_UINT64)0;
+			sCpuPAddr.uiAddr = (IMG_UINT64)0;
+			ui64Size = (IMG_UINT64)0;
+		}
+
+		if (sCpuPAddr.uiAddr && sDevPAddr.uiAddr && ui64Size)
+		{
+			psDeviceNode->ui32NumOfLocalMemArenas = ui32NumOfHeapRegions;
+			PVR_ASSERT(ui32NumOfHeapRegions == 1);
+
+			PVR_DPF((PVR_DBG_MESSAGE, "===== UMA (carve-out) memory, 1st phys heap (gpu)"));
+
+			PVR_DPF((PVR_DBG_MESSAGE, "Creating RA for gpu memory 0x%016"IMG_UINT64_FMTSPECX"-0x%016"IMG_UINT64_FMTSPECX,
+			 		(IMG_UINT64) sCpuPAddr.uiAddr, sCpuPAddr.uiAddr + ui64Size - 1));
+
+			uBase = sDevPAddr.uiAddr;
+			uSize = (RA_LENGTH_T) ui64Size;
+			PVR_ASSERT(uSize == ui64Size);
+
+			psDeviceNode->apsLocalDevMemArenas = OSAllocMem(sizeof(RA_ARENA*));
+			PVR_ASSERT(psDeviceNode->apsLocalDevMemArenas);
+			psDeviceNode->apszRANames = OSAllocMem(sizeof(IMG_PCHAR));
+			PVR_ASSERT(psDeviceNode->apszRANames);
+			psDeviceNode->apszRANames[0] = OSAllocMem(PVRSRV_MAX_RA_NAME_LENGTH);
+			PVR_ASSERT(psDeviceNode->apszRANames[0]);
+
+			OSSNPrintf(psDeviceNode->apszRANames[0], PVRSRV_MAX_RA_NAME_LENGTH,
+						"%s gpu mem", psDeviceNode->psDevConfig->pszName);
+	
+			psDeviceNode->apsLocalDevMemArenas[0] =
+				RA_Create(psDeviceNode->apszRANames[0],
+							OSGetPageShift(),	/* Use OS page size, keeps things simple */
+							RA_LOCKCLASS_0,		/* This arena doesn't use any other arenas. */
+							NULL,				/* No Import */
+							NULL,				/* No free import */
+							NULL,				/* No import handle */
+							IMG_FALSE);
+			if (psDeviceNode->apsLocalDevMemArenas[0] == NULL)
+			{
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto e0;
+			}
+	
+			if (!RA_Add(psDeviceNode->apsLocalDevMemArenas[0], uBase, uSize, 0 , NULL))
+			{
+				RA_Delete(psDeviceNode->apsLocalDevMemArenas[0]);
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto e0;
+			}
+
+			/* Replace the UMA allocator with LMA allocator */
+			psDeviceNode->pfnDevPxAlloc = LMA_PhyContigPagesAlloc;
+			psDeviceNode->pfnDevPxFree = LMA_PhyContigPagesFree;
+			psDeviceNode->pfnDevPxMap = LMA_PhyContigPagesMap;
+			psDeviceNode->pfnDevPxUnMap = LMA_PhyContigPagesUnmap;
+			psDeviceNode->pfnDevPxClean = LMA_PhyContigPagesClean;
+			psDeviceNode->uiMMUPxLog2AllocGran = OSGetPageShift();
+			psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = PhysmemNewLocalRamBackedPMR;
+		}
+	}
+	else
+	{
+		/* LMA heap sanity check */
+		PVR_ASSERT(ui32NumOfHeapRegions);
+	}
+
+	/* Next, register device firmware physical heap based on heap config */
+	psPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+	ui32NumOfHeapRegions = PhysHeapNumberOfRegions(psPhysHeap);
+	eHeapType = PhysHeapGetType(psPhysHeap);
+	PVR_ASSERT(eHeapType != PHYS_HEAP_TYPE_UNKNOWN);
+
+	PVR_DPF((PVR_DBG_MESSAGE, "===== LMA/DMA/UMA (carve-out) memory, 2nd phys heap (fw)"));
+
+	if (ui32NumOfHeapRegions)
+	{
+		eError = PhysHeapRegionGetCpuPAddr(psPhysHeap, 0, &sCpuPAddr);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_ASSERT(IMG_FALSE);
+			goto e0;
+		}
+	
+		eError = PhysHeapRegionGetSize(psPhysHeap, 0, &ui64Size);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_ASSERT(IMG_FALSE);
+			goto e0;
+		}
+	
+		eError = PhysHeapRegionGetDevPAddr(psPhysHeap, 0, &sDevPAddr);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_ASSERT(IMG_FALSE);
+			goto e0;
+		}
+	}
+	else
+	{
+		sDevPAddr.uiAddr = (IMG_UINT64)0;
+		sCpuPAddr.uiAddr = (IMG_UINT64)0;
+		ui64Size = (IMG_UINT64)0;
+	}
+
+	if (ui32NumOfHeapRegions)
+	{
+		PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+
+		SysVzGetPhysHeapOrigin(psDeviceNode->psDevConfig,
+							   PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL,
+							   &eHeapOrigin);
+
+		PVR_DPF((PVR_DBG_MESSAGE, "Creating RA for  fw memory 0x%016"IMG_UINT64_FMTSPECX"-0x%016"IMG_UINT64_FMTSPECX,
+				(IMG_UINT64) sCpuPAddr.uiAddr, sCpuPAddr.uiAddr + ui64Size - 1));
+
+		/* Now we construct RA to manage FW heap */
+		uBase = sDevPAddr.uiAddr;
+		uSize = (RA_LENGTH_T) ui64Size;
+		PVR_ASSERT(sCpuPAddr.uiAddr && ui64Size && uSize == ui64Size);
+		if (eHeapType != PHYS_HEAP_TYPE_LMA)
+		{
+			/* On some LMA config, fw base starts at zero */
+			PVR_ASSERT(sDevPAddr.uiAddr);
+		}
+
+		/* All vz drivers go through this motion, here the loop terminates early
+		   for guest driver(s) seeing RGXFW_NUM_OS will be one */
+		for (ui32OSID = 0; ui32OSID < RGXFW_NUM_OS; ui32OSID++)
+		{
+			RA_BASE_T uOSIDBase = uBase + (ui32OSID * ui64Size);
+
+			OSSNPrintf(psDeviceNode->szKernelFwRAName[ui32OSID], sizeof(psDeviceNode->szKernelFwRAName[ui32OSID]),
+						"%s fw mem", psDeviceNode->psDevConfig->pszName);
+
+			psDeviceNode->psKernelFwMemArena[ui32OSID] =
+				RA_Create(psDeviceNode->szKernelFwRAName[ui32OSID],
+							OSGetPageShift(),	/* Use OS page size, keeps things simple */
+							RA_LOCKCLASS_0,		/* This arena doesn't use any other arenas. */
+							NULL,				/* No Import */
+							NULL,				/* No free import */
+							NULL,				/* No import handle */
+							IMG_FALSE);
+			if (psDeviceNode->psKernelFwMemArena[ui32OSID] == NULL)
+			{
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto e1;
+			}
+
+			if (!RA_Add(psDeviceNode->psKernelFwMemArena[ui32OSID], uOSIDBase, uSize, 0 , NULL))
+			{
+				RA_Delete(psDeviceNode->psKernelFwMemArena[ui32OSID]);
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto e1;
+			}
+
+			if (eHeapOrigin != PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+			{
+				break;
+			}
+		}
+
+		/* Fw physheap is always managed by LMA PMR factory */
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = PhysmemNewLocalRamBackedPMR;
+	}
+
+	return PVRSRV_OK;
+e1:
+	PVRSRVVzDeviceDestroy(psDeviceNode);
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVVzDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	IMG_UINT ui32OSID;
+	IMG_UINT64 ui64Size;
+	PHYS_HEAP *psPhysHeap;
+	IMG_CPU_PHYADDR sCpuPAddr;
+	IMG_DEV_PHYADDR sDevPAddr;
+	PHYS_HEAP_TYPE eHeapType;
+	IMG_UINT32 ui32NumOfHeapRegions;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+
+	/* First, unregister device firmware physical heap based on heap config */
+	psPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+	ui32NumOfHeapRegions = PhysHeapNumberOfRegions(psPhysHeap);
+
+	if (ui32NumOfHeapRegions)
+	{
+		for (ui32OSID = 0; ui32OSID < RGXFW_NUM_OS; ui32OSID++)
+		{
+			if (psDeviceNode->psKernelFwMemArena[ui32OSID])
+			{
+				RA_Delete(psDeviceNode->psKernelFwMemArena[ui32OSID]);
+				psDeviceNode->psKernelFwMemArena[ui32OSID] = NULL;
+			}
+		}
+	}
+
+	/* Next, unregister device GPU physical heap based on heap config */
+	psPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+	ui32NumOfHeapRegions = PhysHeapNumberOfRegions(psPhysHeap);
+	eHeapType = PhysHeapGetType(psPhysHeap);
+
+	if (eHeapType == PHYS_HEAP_TYPE_UMA || eHeapType == PHYS_HEAP_TYPE_DMA)
+	{
+		if (ui32NumOfHeapRegions)
+		{
+			eError = PhysHeapRegionGetCpuPAddr(psPhysHeap, 0, &sCpuPAddr);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_ASSERT(IMG_FALSE);
+				return eError;
+			}
+	
+			eError = PhysHeapRegionGetSize(psPhysHeap, 0, &ui64Size);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_ASSERT(IMG_FALSE);
+				return eError;
+			}
+	
+			eError = PhysHeapRegionGetDevPAddr(psPhysHeap, 0, &sDevPAddr);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_ASSERT(IMG_FALSE);
+				return eError;
+			}
+		}
+		else
+		{
+			sDevPAddr.uiAddr = (IMG_UINT64)0;
+			sCpuPAddr.uiAddr = (IMG_UINT64)0;
+			ui64Size = (IMG_UINT64)0;
+		}
+
+		if (sCpuPAddr.uiAddr && sDevPAddr.uiAddr && ui64Size)
+		{
+			if (psDeviceNode->apsLocalDevMemArenas && psDeviceNode->apsLocalDevMemArenas[0])
+			{
+				RA_Delete(psDeviceNode->apsLocalDevMemArenas[0]);
+				psDeviceNode->apsLocalDevMemArenas[0] = NULL;
+				OSFreeMem(psDeviceNode->apsLocalDevMemArenas);
+				psDeviceNode->apsLocalDevMemArenas = NULL;
+			}
+			if (psDeviceNode->apszRANames)
+			{
+				OSFreeMem(psDeviceNode->apszRANames[0]);
+				psDeviceNode->apszRANames[0] = NULL;
+				OSFreeMem(psDeviceNode->apszRANames);
+				psDeviceNode->apszRANames = NULL;
+			}
+		}
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVVzRegisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+															IMG_DEV_PHYADDR sDevPAddr,
+															IMG_UINT64 ui64DevPSize,
+															IMG_UINT32 uiOSID)
+{
+	RA_BASE_T uBase;
+	RA_LENGTH_T uSize;
+	IMG_UINT64 ui64Size;
+	PHYS_HEAP *psPhysHeap;
+	PVRSRV_ERROR eError;
+
+	/*
+	   This is called by the host driver only, it creates an RA to manage this guest firmware
+	   physheaps so we fail the call if an invalid guest OSID is supplied.
+	*/
+	PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_HOST, PVRSRV_ERROR_INTERNAL_ERROR);
+	PVR_DPF((PVR_DBG_MESSAGE, "===== Registering OSID: %d fw physheap memory", uiOSID));
+	PVR_LOGR_IF_FALSE(((uiOSID > 0)&&(uiOSID < RGXFW_NUM_OS)), "Invalid guest OSID", PVRSRV_ERROR_INVALID_PARAMS);
+
+	/* Verify guest size with host size  (support only same sized FW heaps) */
+	psPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+	ui64Size = RGX_FIRMWARE_HEAP_SIZE;
+
+	if (ui64DevPSize != ui64Size)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+				"OSID: %d fw physheap size 0x%"IMG_UINT64_FMTSPECX" differs from host fw phyheap size 0x%"IMG_UINT64_FMTSPECX,
+				uiOSID,
+				ui64DevPSize,
+				ui64Size));
+
+		PVR_DPF((PVR_DBG_WARNING,
+				"Truncating OSID: %d requested fw physheap to: 0x%"IMG_UINT64_FMTSPECX"\n",
+				uiOSID,
+				ui64Size));
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE, "Creating RA for fw 0x%016"IMG_UINT64_FMTSPECX"-0x%016"IMG_UINT64_FMTSPECX" [DEV/PA]",
+			(IMG_UINT64) sDevPAddr.uiAddr, sDevPAddr.uiAddr + ui64Size - 1));
+
+	/* Now we construct RA to manage FW heap */
+	uBase = sDevPAddr.uiAddr;
+	uSize = (RA_LENGTH_T) ui64Size;
+	PVR_ASSERT(uSize == ui64Size);
+
+	OSSNPrintf(psDeviceNode->szKernelFwRAName[uiOSID],
+			   sizeof(psDeviceNode->szKernelFwRAName[uiOSID]),
+			   "[OSID: %d]: fw mem", uiOSID);
+
+	psDeviceNode->psKernelFwMemArena[uiOSID] =
+		RA_Create(psDeviceNode->szKernelFwRAName[uiOSID],
+					OSGetPageShift(),	/* Use host page size, keeps things simple */
+					RA_LOCKCLASS_0,		/* This arena doesn't use any other arenas */
+					NULL,				/* No Import */
+					NULL,				/* No free import */
+					NULL,				/* No import handle */
+					IMG_FALSE);
+	if (psDeviceNode->psKernelFwMemArena[uiOSID] == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	if (!RA_Add(psDeviceNode->psKernelFwMemArena[uiOSID], uBase, uSize, 0 , NULL))
+	{
+		RA_Delete(psDeviceNode->psKernelFwMemArena[uiOSID]);
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	psDeviceNode->ui64RABase[uiOSID] = uBase;
+	return PVRSRV_OK;
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVVzUnregisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+																IMG_UINT32 uiOSID)
+{
+	PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_HOST, PVRSRV_ERROR_INTERNAL_ERROR);
+	PVR_DPF((PVR_DBG_MESSAGE, "===== Deregistering OSID: %d fw physheap memory", uiOSID));
+	PVR_LOGR_IF_FALSE(((uiOSID > 0)&&(uiOSID < RGXFW_NUM_OS)), "Invalid guest OSID", PVRSRV_ERROR_INVALID_PARAMS);
+
+	if (psDeviceNode->psKernelFwMemArena[uiOSID])
+	{
+		RA_Free(psDeviceNode->psKernelFwMemArena[uiOSID], psDeviceNode->ui64RABase[uiOSID]);
+		RA_Delete(psDeviceNode->psKernelFwMemArena[uiOSID]);
+		psDeviceNode->psKernelFwMemArena[uiOSID] = NULL;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ End of file (pvrsrv.c)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pvrsrv_pool.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pvrsrv_pool.c
new file mode 100644
index 0000000..5a1c7e2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/pvrsrv_pool.c
@@ -0,0 +1,274 @@
+/**************************************************************************/ /*!
+@File
+@Title          Services pool implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides a generic pool implementation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "lock.h"
+#include "dllist.h"
+#include "allocmem.h"
+
+struct _PVRSRV_POOL_
+{
+	POS_LOCK hLock;
+	/* total max number of permitted entries in the pool */
+	IMG_UINT uiMaxEntries;
+	/* currently number of pool entries created. these may be in the pool
+	 * or in-use
+	 */
+	IMG_UINT uiNumBusy;
+	/* number of not-in-use entries currently free in the pool */
+	IMG_UINT uiNumFree;
+
+	DLLIST_NODE sFreeList;
+
+	const IMG_CHAR *pszName;
+
+	PVRSRV_POOL_ALLOC_FUNC *pfnAlloc;
+	PVRSRV_POOL_FREE_FUNC *pfnFree;
+	void *pvPrivData;
+};
+
+typedef struct _PVRSRV_POOL_ENTRY_
+{
+	DLLIST_NODE sNode;
+	void *pvData;
+} PVRSRV_POOL_ENTRY;
+
+PVRSRV_ERROR PVRSRVPoolCreate(PVRSRV_POOL_ALLOC_FUNC *pfnAlloc,
+					PVRSRV_POOL_FREE_FUNC *pfnFree,
+					IMG_UINT32 ui32MaxEntries,
+					const IMG_CHAR *pszName,
+					void *pvPrivData,
+					PVRSRV_POOL **ppsPool)
+{
+	PVRSRV_POOL *psPool;
+	PVRSRV_ERROR eError;
+
+	psPool = OSAllocMem(sizeof(PVRSRV_POOL));
+
+	if (psPool == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_alloc;
+	}
+
+	eError = OSLockCreate(&psPool->hLock, LOCK_TYPE_NONE);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto err_lock_create;
+	}
+
+	psPool->uiMaxEntries = ui32MaxEntries;
+	psPool->uiNumBusy = 0;
+	psPool->uiNumFree = 0;
+	psPool->pfnAlloc = pfnAlloc;
+	psPool->pfnFree = pfnFree;
+	psPool->pvPrivData = pvPrivData;
+	psPool->pszName = pszName;
+
+	dllist_init(&psPool->sFreeList);
+
+	*ppsPool = psPool;
+
+	return PVRSRV_OK;
+
+err_lock_create:
+	OSFreeMem(psPool);
+err_alloc:
+	return eError;
+}
+
+static PVRSRV_ERROR _DestroyPoolEntry(PVRSRV_POOL *psPool,
+					PVRSRV_POOL_ENTRY *psEntry)
+{
+	psPool->pfnFree(psPool->pvPrivData, psEntry->pvData);
+	OSFreeMem(psEntry);
+
+	return PVRSRV_OK;
+}
+
+void PVRSRVPoolDestroy(PVRSRV_POOL *psPool)
+{
+	if (psPool->uiNumBusy != 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Attempt to destroy pool %s "
+						"with %u entries still in use",
+						__func__,
+						psPool->pszName,
+						psPool->uiNumBusy));
+		return;
+	}
+
+	OSLockDestroy(psPool->hLock);
+
+	while (psPool->uiNumFree)
+	{
+		PVRSRV_POOL_ENTRY *psEntry;
+		DLLIST_NODE *psChosenNode;
+
+		psChosenNode = dllist_get_next_node(&psPool->sFreeList);
+		dllist_remove_node(psChosenNode);
+
+		psEntry = IMG_CONTAINER_OF(psChosenNode, PVRSRV_POOL_ENTRY, sNode);
+
+		_DestroyPoolEntry(psPool, psEntry);
+
+		psPool->uiNumFree--;
+	}
+
+	OSFreeMem(psPool);
+}
+
+static PVRSRV_ERROR _CreateNewPoolEntry(PVRSRV_POOL *psPool,
+					PVRSRV_POOL_ENTRY **ppsEntry)
+{
+	PVRSRV_POOL_ENTRY *psNewEntry;
+	PVRSRV_ERROR eError;
+
+	psNewEntry = OSAllocMem(sizeof(PVRSRV_POOL_ENTRY));
+
+	if (psNewEntry == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_allocmem;
+	}
+
+	dllist_init(&psNewEntry->sNode);
+
+	eError = psPool->pfnAlloc(psPool->pvPrivData, &psNewEntry->pvData);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto err_pfn_alloc;
+	}
+
+	*ppsEntry = psNewEntry;
+
+	return PVRSRV_OK;
+
+err_pfn_alloc:
+	OSFreeMem(psNewEntry);
+err_allocmem:
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVPoolGet(PVRSRV_POOL *psPool,
+					PVRSRV_POOL_TOKEN *hToken,
+					void **ppvDataOut)
+{
+	PVRSRV_POOL_ENTRY *psEntry;
+	DLLIST_NODE *psChosenNode;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	OSLockAcquire(psPool->hLock);
+
+	/* check if we already have a free element ready */
+	if (psPool->uiNumFree)
+	{
+		psChosenNode = dllist_get_next_node(&psPool->sFreeList);
+		dllist_remove_node(psChosenNode);
+
+		psEntry = IMG_CONTAINER_OF(psChosenNode, PVRSRV_POOL_ENTRY, sNode);
+
+		psPool->uiNumFree--;
+		psPool->uiNumBusy++;
+		*hToken = psEntry;
+		*ppvDataOut = psEntry->pvData;
+		goto out_unlock;
+	}
+	else
+	{
+		PVRSRV_ERROR eError;
+
+		/* no available elements in the pool. try to create one */
+
+		eError = _CreateNewPoolEntry(psPool, &psEntry);
+
+		if (eError != PVRSRV_OK)
+		{
+			goto out_unlock;
+		}
+
+		*hToken = psEntry;
+
+		psPool->uiNumBusy++;
+		*ppvDataOut = psEntry->pvData;
+		goto out_unlock;
+	}
+
+out_unlock:
+	OSLockRelease(psPool->hLock);
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVPoolPut(PVRSRV_POOL *psPool, PVRSRV_POOL_TOKEN hToken)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_POOL_ENTRY *psEntry = hToken;
+
+	PVR_ASSERT(psPool->uiNumBusy > 0);
+
+	OSLockAcquire(psPool->hLock);
+
+	/* put this entry in the pool if the pool has space,
+	 * otherwise free it
+	 */
+	if (psPool->uiNumFree < psPool->uiMaxEntries)
+	{
+		dllist_add_to_tail(&psPool->sFreeList, &psEntry->sNode);
+		psPool->uiNumFree++;
+	}
+	else
+	{
+		eError = _DestroyPoolEntry(psPool, psEntry);
+	}
+
+	psPool->uiNumBusy--;
+
+	OSLockRelease(psPool->hLock);
+
+	return eError;
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/ri_server.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/ri_server.c
new file mode 100644
index 0000000..9208855
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/ri_server.c
@@ -0,0 +1,1874 @@
+/*************************************************************************/ /*!
+@File			ri_server.c
+@Title          Resource Information (RI) server implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Resource Information (RI) server functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#include <stdarg.h>
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+
+#include "srvkm.h"
+#include "lock.h"
+/* services/server/include/ */
+#include "ri_server.h"
+
+/* services/include/shared/ */
+#include "hash.h"
+/* services/shared/include/ */
+#include "dllist.h"
+
+#include "pmr.h"
+
+#if defined(PVR_RI_DEBUG)
+
+#define USE_RI_LOCK 	1
+
+/*
+ * Initial size use for Hash table.
+ * (Used to index the RI list entries).
+ */
+#define _RI_INITIAL_HASH_TABLE_SIZE	64
+
+/*
+ * Values written to the 'valid' field of
+ * RI structures when created and cleared
+ * prior to being destroyed.
+ * The code can then check this value
+ * before accessing the provided pointer
+ * contents as a valid RI structure.
+ */
+#define _VALID_RI_LIST_ENTRY 	0x66bccb66
+#define _VALID_RI_SUBLIST_ENTRY	0x77cddc77
+#define _INVALID				0x00000000
+
+/*
+ * If this define is set to 1, details of
+ * the linked lists (addresses, prev/next
+ * ptrs, etc) are also output when function
+ * RIDumpList() is called
+ */
+#define _DUMP_LINKEDLIST_INFO		0
+
+
+typedef IMG_UINT64 _RI_BASE_T;
+
+/*
+ *  Length of string used for process name
+ */
+#define TASK_COMM_LEN 				16
+/*
+ *  Length of string used for process ID
+ */
+#define TASK_PID_LEN 				11
+/*
+ *  Length of string used for "[{PID}:_{process_name}]"
+ */
+#define RI_PROC_TAG_CHAR_LEN 		(1+TASK_PID_LEN+2+TASK_COMM_LEN+1)
+
+/*
+ *  Length of string used for address
+ */
+#define RI_ADDR_CHAR_LEN			12
+/*
+ *  Length of string used for size
+ */
+#define RI_SIZE_CHAR_LEN			12
+/*
+ *  Length of string used for "{Imported from PID nnnnnnnnnn}"
+ */
+#define RI_IMPORT_TAG_CHAR_LEN 		32
+/*
+ *  Total length of string returned to debugfs
+ *  {0xaddr}_{annotation_text}_{0xsize}_{import_tag}
+ */
+#define RI_MAX_DEBUGFS_ENTRY_LEN	(RI_ADDR_CHAR_LEN+1+RI_MAX_TEXT_LEN+1+RI_SIZE_CHAR_LEN+1+RI_IMPORT_TAG_CHAR_LEN+1)
+/*
+ *  Total length of string output to _RIOutput()
+ *  for MEMDESC RI sub-list entries
+ *  {0xaddr}_{annotation_text}_[{PID}:_{process_name}]_{0xsize}_bytes_{import_tag}
+ */
+#define RI_MAX_MEMDESC_RI_ENTRY_LEN	(RI_ADDR_CHAR_LEN+1+RI_MAX_TEXT_LEN+1+RI_PROC_TAG_CHAR_LEN+1+RI_SIZE_CHAR_LEN+7+RI_IMPORT_TAG_CHAR_LEN+1)
+/*
+ *  Total length of string output to _RIOutput()
+ *  for PMR RI list entries
+ *  {annotation_text}_{pmr_handle}_suballocs:{num_suballocs}_{0xsize}
+ */
+#define RI_MAX_PMR_RI_ENTRY_LEN		(RI_MAX_TEXT_LEN+1+RI_ADDR_CHAR_LEN+11+10+1+RI_SIZE_CHAR_LEN)
+
+
+/*
+ * Structure used to make linked sublist of
+ * memory allocations (MEMDESC)
+ */
+struct _RI_SUBLIST_ENTRY_
+{
+	DLLIST_NODE				sListNode;
+	struct _RI_LIST_ENTRY_	*psRI;
+	IMG_UINT32 				valid;
+	IMG_BOOL				bIsImport;
+	IMG_BOOL				bIsExportable;
+	IMG_BOOL				bIsPinned;
+	IMG_PID					pid;
+	IMG_CHAR				ai8ProcName[TASK_COMM_LEN];
+	IMG_DEV_VIRTADDR 		sVAddr;
+	IMG_UINT64				ui64Offset;
+	IMG_UINT64				ui64Size;
+	IMG_UINT64				ui64BackedSize;
+	IMG_CHAR				ai8TextB[RI_MAX_TEXT_LEN+1];
+	DLLIST_NODE				sProcListNode;
+};
+
+/*
+ * Structure used to make linked list of
+ * PMRs. Sublists of allocations (MEMDESCs) made
+ * from these PMRs are chained off these entries.
+ */
+struct _RI_LIST_ENTRY_
+{
+	DLLIST_NODE				sListNode;
+	DLLIST_NODE				sSubListFirst;
+	IMG_UINT32 				valid;
+	PMR						*psPMR;
+	IMG_UINT64 				ui64LogicalSize;
+	IMG_UINT64 				ui64BackedSize;
+	IMG_PID					pid;
+	IMG_CHAR				ai8ProcName[TASK_COMM_LEN];
+	IMG_CHAR				ai8TextA[RI_MAX_TEXT_LEN+1];
+	IMG_UINT16 				ui16SubListCount;
+	IMG_UINT16 				ui16MaxSubListCount;
+	IMG_UINT32				ui32Flags; /* Flags used to indicate if PMR appears in ri debugfs output */
+};
+
+typedef struct _RI_LIST_ENTRY_ RI_LIST_ENTRY;
+typedef struct _RI_SUBLIST_ENTRY_ RI_SUBLIST_ENTRY;
+
+static IMG_UINT16 	g_ui16RICount = 0;
+static HASH_TABLE 	*g_pRIHashTable = NULL;
+static IMG_UINT16 	g_ui16ProcCount = 0;
+static HASH_TABLE 	*g_pProcHashTable = NULL;
+
+static POS_LOCK		g_hRILock;
+/*
+ * Flag used to indicate if RILock should be destroyed when final PMR entry
+ * is deleted, i.e. if RIDeInitKM() has already been called before that point
+ * but the handle manager has deferred deletion of RI entries.
+ */
+static IMG_BOOL 	bRIDeInitDeferred = IMG_FALSE;
+
+/*
+ *  Used as head of linked-list of PMR RI entries -
+ *  this is useful when we wish to iterate all PMR
+ *  list entries (when we don't have a PMR ref)
+ */
+static DLLIST_NODE	sListFirst;
+
+/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */
+static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, IMG_BOOL bDebugFs, IMG_UINT16 ui16MaxStrLen, IMG_CHAR *pszEntryString);
+/* Function used to produce string containing info for PMR RI entries (used for both debugfs and kernel log output) */
+static void _GeneratePMREntryString(RI_LIST_ENTRY *psRIEntry, IMG_BOOL bDebugFs, IMG_UINT16 ui16MaxStrLen, IMG_CHAR *pszEntryString);
+
+static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v);
+static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v);
+static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v);
+static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid);
+#define _RIOutput(x) PVR_LOG(x)
+
+IMG_INTERNAL IMG_UINT32
+_ProcHashFunc (size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen);
+IMG_INTERNAL IMG_UINT32
+_ProcHashFunc (size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen)
+{
+	IMG_UINT32 *p = (IMG_UINT32 *)pKey;
+	IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32);
+	IMG_UINT32 ui;
+	IMG_UINT32 uHashKey = 0;
+
+	PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+
+	for (ui = 0; ui < uKeyLen; ui++)
+	{
+		IMG_UINT32 uHashPart = *p++;
+
+		uHashPart += (uHashPart << 12);
+		uHashPart ^= (uHashPart >> 22);
+		uHashPart += (uHashPart << 4);
+		uHashPart ^= (uHashPart >> 9);
+		uHashPart += (uHashPart << 10);
+		uHashPart ^= (uHashPart >> 2);
+		uHashPart += (uHashPart << 7);
+		uHashPart ^= (uHashPart >> 12);
+
+		uHashKey += uHashPart;
+	}
+
+	return uHashKey;
+}
+IMG_INTERNAL IMG_BOOL
+_ProcHashComp (size_t uKeySize, void *pKey1, void *pKey2);
+IMG_INTERNAL IMG_BOOL
+_ProcHashComp (size_t uKeySize, void *pKey1, void *pKey2)
+{
+	IMG_UINT32 *p1 = (IMG_UINT32 *)pKey1;
+	IMG_UINT32 *p2 = (IMG_UINT32 *)pKey2;
+	IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32);
+	IMG_UINT32 ui;
+
+	for (ui = 0; ui < uKeyLen; ui++)
+	{
+		if (*p1++ != *p2++)
+			return IMG_FALSE;
+	}
+
+	return IMG_TRUE;
+}
+
+static void _RILock(void)
+{
+#if (USE_RI_LOCK == 1)
+	OSLockAcquire(g_hRILock);
+#endif
+}
+
+static void _RIUnlock(void)
+{
+#if (USE_RI_LOCK == 1)
+	OSLockRelease(g_hRILock);
+#endif
+}
+
+PVRSRV_ERROR RIInitKM(void)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	bRIDeInitDeferred = IMG_FALSE;
+#if (USE_RI_LOCK == 1)
+	eError = OSLockCreate(&g_hRILock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: OSLockCreate failed (returned %d)",__func__,eError));
+	}
+#endif
+	return eError;
+}
+void RIDeInitKM(void)
+{
+#if (USE_RI_LOCK == 1)
+	if (g_ui16RICount > 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: called with %d entries remaining - deferring OSLockDestroy()",__func__,g_ui16RICount));
+		bRIDeInitDeferred = IMG_TRUE;
+	}
+	else
+	{
+		OSLockDestroy(g_hRILock);
+	}
+#endif
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIPMRPhysicalBackingKM
+
+ @Description
+            Set whether or not the PMR has physical backing.
+
+ @input     psPMR         Reference (handle) to the PMR to be updated
+
+ @input     bHasBacking  IMG_TRUE if PMR has physical backing
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIPMRPhysicalBackingKM(PMR *psPMR, IMG_BOOL bHasBacking)
+{
+	uintptr_t hashData = 0;
+	PMR		  *pPMRHashKey = psPMR;
+	RI_LIST_ENTRY *psRIEntry = NULL;
+
+	if (!psPMR)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	else
+	{
+		/* Acquire RI Lock */
+		_RILock();
+
+		/* look-up psPMR in Hash Table */
+		hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+		psRIEntry = (RI_LIST_ENTRY *)hashData;
+		if (!psRIEntry)
+		{
+			/* Release RI Lock */
+			_RIUnlock();
+			/* Error - no entry found for the specified PMR */
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+		psRIEntry->ui64BackedSize = bHasBacking ? psRIEntry->ui64LogicalSize : 0;
+
+		/* Release RI Lock */
+		_RIUnlock();
+	}
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIWritePMREntryKM
+
+ @Description
+            Writes a new Resource Information list entry.
+            The new entry will be inserted at the head of the list of
+            PMR RI entries and assigned the values provided.
+
+ @input     psPMR - Reference (handle) to the PMR to which this reference relates
+ @input     ai8TextA - String describing this PMR (may be null)
+ @input     uiLogicalSize - Size of PMR
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWritePMREntryKM(PMR *psPMR,
+					   	       IMG_UINT32 ui32TextASize,
+					   	       const IMG_CHAR *psz8TextA,
+					   	       IMG_UINT64 ui64LogicalSize)
+{
+	uintptr_t hashData = 0;
+	PMR			*pPMRHashKey = psPMR;
+	IMG_PCHAR pszText = (IMG_PCHAR)psz8TextA;
+	RI_LIST_ENTRY *psRIEntry = NULL;
+
+
+	/* if Hash table has not been created, create it now */
+	if (!g_pRIHashTable)
+	{
+		g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default);
+		g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp);
+	}
+	if (!g_pRIHashTable || !g_pProcHashTable)
+	{
+		/* Error - no memory to allocate for Hash table(s) */
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	if (!psPMR)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	else
+	{
+		/* Acquire RI Lock */
+		_RILock();
+
+		/* look-up psPMR in Hash Table */
+		hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+		psRIEntry = (RI_LIST_ENTRY *)hashData;
+		if (!psRIEntry)
+		{
+			/*
+			 * If failed to find a matching existing entry, create a new one
+			 */
+			psRIEntry = (RI_LIST_ENTRY *)OSAllocZMem(sizeof(RI_LIST_ENTRY));
+			if (!psRIEntry)
+			{
+				/* Release RI Lock */
+				_RIUnlock();
+				/* Error - no memory to allocate for new RI entry */
+				return PVRSRV_ERROR_OUT_OF_MEMORY;
+			}
+			else
+			{
+				/*
+				 * Add new RI Entry
+				 */
+				if (g_ui16RICount == 0)
+				{
+					/* Initialise PMR entry linked-list head */
+					dllist_init(&sListFirst);
+				}
+				g_ui16RICount++;
+
+				dllist_init (&(psRIEntry->sSubListFirst));
+				psRIEntry->ui16SubListCount = 0;
+				psRIEntry->ui16MaxSubListCount = 0;
+				psRIEntry->valid = _VALID_RI_LIST_ENTRY;
+				psRIEntry->pid = OSGetCurrentClientProcessIDKM();
+				OSSNPrintf((IMG_CHAR *)psRIEntry->ai8ProcName, TASK_COMM_LEN, "%s", OSGetCurrentClientProcessNameKM());
+				/* Add PMR entry to linked-list of PMR entries */
+				dllist_init (&(psRIEntry->sListNode));
+				dllist_add_to_tail(&sListFirst,(PDLLIST_NODE)&(psRIEntry->sListNode));
+			}
+
+			if (pszText)
+			{
+				if (ui32TextASize > RI_MAX_TEXT_LEN)
+					ui32TextASize = RI_MAX_TEXT_LEN;
+
+				/* copy ai8TextA field data */
+				OSSNPrintf((IMG_CHAR *)psRIEntry->ai8TextA, ui32TextASize+1, "%s", pszText);
+
+				/* ensure string is NUL-terminated */
+				psRIEntry->ai8TextA[ui32TextASize] = '\0';
+			}
+			else
+			{
+				/* ensure string is NUL-terminated */
+				psRIEntry->ai8TextA[0] = '\0';
+			}
+			psRIEntry->psPMR = psPMR;
+			psRIEntry->ui64LogicalSize = ui64LogicalSize;
+			psRIEntry->ui64BackedSize = 0;
+			psRIEntry->ui32Flags = 0;
+
+			/* Create index entry in Hash Table */
+			HASH_Insert_Extended (g_pRIHashTable, (void *)&pPMRHashKey, (uintptr_t)psRIEntry);
+
+			/* Store phRIHandle in PMR structure, so it can delete the associated RI entry when it destroys the PMR */
+			PMRStoreRIHandle(psPMR, psRIEntry);
+		}
+		/* Release RI Lock */
+		_RIUnlock();
+	}
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIWriteMEMDESCEntryKM
+
+ @Description
+            Writes a new Resource Information sublist entry.
+            The new entry will be inserted at the head of the sublist of
+            the indicated PMR list entry, and assigned the values provided.
+
+ @input     psPMR - Reference (handle) to the PMR to which this MEMDESC RI entry relates
+ @input     ai8TextB - String describing this secondary reference (may be null)
+ @input     uiOffset - Offset from the start of the PMR at which this allocation begins
+ @input     uiSize - Size of this allocation
+ @input     ui64BackedSize - How much of uiSize is actually physically backed?
+ @input     bIsImport - Flag indicating if this is an allocation or an import
+ @input     bIsExportable - Flag indicating if this allocation is exportable
+ @output    phRIHandle - Handle to the created RI entry
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR,
+					   	   	   	   IMG_UINT32 ui32TextBSize,
+					   	   	   	   const IMG_CHAR *psz8TextB,
+					   	   	   	   IMG_UINT64 ui64Offset,
+					   	   	   	   IMG_UINT64 ui64Size,
+					   	   	   	   IMG_UINT64 ui64BackedSize,
+					   	   	   	   IMG_BOOL bIsImport,
+					   	   	   	   IMG_BOOL bIsExportable,
+					   	   	   	   RI_HANDLE *phRIHandle)
+{
+	uintptr_t hashData = 0;
+	PMR 		*pPMRHashKey = psPMR;
+	IMG_PID		pid;
+	IMG_PCHAR pszText = (IMG_PCHAR)psz8TextB;
+	RI_LIST_ENTRY *psRIEntry = NULL;
+	RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+
+
+	/* check Hash tables have been created (meaning at least one PMR has been defined) */
+	if (!g_pRIHashTable || !g_pProcHashTable)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	if (!psPMR || !phRIHandle)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	else
+	{
+		/* Acquire RI Lock */
+		_RILock();
+
+		*phRIHandle = NULL;
+
+		/* look-up psPMR in Hash Table */
+		hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+		psRIEntry = (RI_LIST_ENTRY *)hashData;
+		if (!psRIEntry)
+		{
+			/* Release RI Lock */
+			_RIUnlock();
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+		psRISubEntry = (RI_SUBLIST_ENTRY *)OSAllocZMem(sizeof(RI_SUBLIST_ENTRY));
+		if (!psRISubEntry)
+		{
+			/* Release RI Lock */
+			_RIUnlock();
+			/* Error - no memory to allocate for new RI sublist entry */
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+		else
+		{
+			/*
+			 * Insert new entry in sublist
+			 */
+			PDLLIST_NODE currentNode = dllist_get_next_node(&(psRIEntry->sSubListFirst));
+
+			/*
+			 * Insert new entry before currentNode
+			 */
+			if (!currentNode)
+			{
+				currentNode = &(psRIEntry->sSubListFirst);
+			}
+			dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sListNode));
+
+			psRISubEntry->psRI = psRIEntry;
+
+			/* Increment number of entries in sublist */
+			psRIEntry->ui16SubListCount++;
+			if (psRIEntry->ui16SubListCount > psRIEntry->ui16MaxSubListCount)
+			{
+				psRIEntry->ui16MaxSubListCount = psRIEntry->ui16SubListCount;
+			}
+			psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY;
+		}
+
+		psRISubEntry->pid = OSGetCurrentClientProcessIDKM();
+
+		if (ui32TextBSize > RI_MAX_TEXT_LEN)
+			ui32TextBSize = RI_MAX_TEXT_LEN;
+		/* copy ai8TextB field data */
+		OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, ui32TextBSize+1, "%s", pszText);
+		/* ensure string is NUL-terminated */
+		psRISubEntry->ai8TextB[ui32TextBSize] = '\0';
+
+		psRISubEntry->ui64Offset = ui64Offset;
+		psRISubEntry->ui64Size = ui64Size;
+		psRISubEntry->ui64BackedSize = ui64BackedSize;
+		psRISubEntry->bIsImport = bIsImport;
+		psRISubEntry->bIsExportable = bIsExportable;
+		psRISubEntry->bIsPinned = IMG_TRUE;
+		OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, TASK_COMM_LEN, "%s", OSGetCurrentClientProcessNameKM());
+		dllist_init (&(psRISubEntry->sProcListNode));
+
+		/*
+		 *	Now insert this MEMDESC into the proc list
+		 */
+		/* look-up pid in Hash Table */
+		pid = psRISubEntry->pid;
+		hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid);
+		if (!hashData)
+		{
+			/*
+			 * No allocations for this pid yet
+			 */
+			HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode));
+			/* Increment number of entries in proc hash table */
+			g_ui16ProcCount++;
+		}
+		else
+		{
+			/*
+			 * Insert allocation into pid allocations linked list
+			 */
+			PDLLIST_NODE currentNode = (PDLLIST_NODE)hashData;
+
+			/*
+			 * Insert new entry
+			 */
+			dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode));
+		}
+		*phRIHandle = (RI_HANDLE)psRISubEntry;
+		/* Release RI Lock */
+		_RIUnlock();
+	}
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIWriteProcListEntryKM
+
+ @Description
+            Write a new entry in the process list directly. We have to do this
+            because there might be no, multiple or changing PMR handles.
+
+            In the common case we have a PMR that will be added to the PMR list
+            and one or several MemDescs that are associated to it in a sub-list.
+            Additionally these MemDescs will be inserted in the per-process list.
+
+            There might be special descriptors from e.g. new user APIs that
+            are associated with no or multiple PMRs and not just one.
+            These can be now added to the per-process list (as RI_SUBLIST_ENTRY)
+            directly with this function and won't be listed in the PMR list (RIEntry)
+            because there might be no PMR.
+
+            To remove entries from the per-process list, just use
+            RIDeleteMEMDESCEntryKM().
+
+ @input     ai8TextB - String describing this secondary reference (may be null)
+ @input     uiSize - Size of this allocation
+ @input     ui64BackedSize - How much of uiSize is actually physically backed?
+ @input     ui64DevVAddr - Virtual address of this entry
+ @output    phRIHandle - Handle to the created RI entry
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWriteProcListEntryKM(IMG_UINT32 ui32TextBSize,
+                                    const IMG_CHAR *psz8TextB,
+                                    IMG_UINT64 ui64Size,
+                                    IMG_UINT64 ui64BackedSize,
+                                    IMG_UINT64 ui64DevVAddr,
+                                    RI_HANDLE *phRIHandle)
+{
+	uintptr_t hashData = 0;
+	IMG_PID		pid;
+	IMG_PCHAR pszText = (IMG_PCHAR)psz8TextB;
+	RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+
+	if (!g_pRIHashTable)
+	{
+		g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default);
+		g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp);
+
+		if (!g_pRIHashTable || !g_pProcHashTable)
+		{
+			/* Error - no memory to allocate for Hash table(s) */
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+	}
+
+	/* Acquire RI Lock */
+	_RILock();
+
+	*phRIHandle = NULL;
+
+	psRISubEntry = (RI_SUBLIST_ENTRY *)OSAllocZMem(sizeof(RI_SUBLIST_ENTRY));
+	if (!psRISubEntry)
+	{
+		/* Release RI Lock */
+		_RIUnlock();
+		/* Error - no memory to allocate for new RI sublist entry */
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY;
+
+
+	psRISubEntry->pid = OSGetCurrentClientProcessIDKM();
+
+	if (ui32TextBSize > RI_MAX_TEXT_LEN)
+		ui32TextBSize = RI_MAX_TEXT_LEN;
+	/* copy ai8TextB field data */
+	OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, ui32TextBSize+1, "%s", pszText);
+	/* ensure string is NUL-terminated */
+	psRISubEntry->ai8TextB[ui32TextBSize] = '\0';
+
+	psRISubEntry->ui64Offset = 0;
+	psRISubEntry->ui64Size = ui64Size;
+	psRISubEntry->ui64BackedSize = ui64BackedSize;
+	psRISubEntry->sVAddr.uiAddr = ui64DevVAddr;
+	psRISubEntry->bIsImport = IMG_FALSE;
+	psRISubEntry->bIsExportable = IMG_FALSE;
+	psRISubEntry->bIsPinned = IMG_TRUE;
+	OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, TASK_COMM_LEN, "%s", OSGetCurrentClientProcessNameKM());
+	dllist_init (&(psRISubEntry->sProcListNode));
+
+	/*
+	 *	Now insert this MEMDESC into the proc list
+	 */
+	/* look-up pid in Hash Table */
+	pid = psRISubEntry->pid;
+	hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid);
+	if (!hashData)
+	{
+		/*
+		 * No allocations for this pid yet
+		 */
+		HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode));
+		/* Increment number of entries in proc hash table */
+		g_ui16ProcCount++;
+	}
+	else
+	{
+		/*
+		 * Insert allocation into pid allocations linked list
+		 */
+		PDLLIST_NODE currentNode = (PDLLIST_NODE)hashData;
+
+		/*
+		 * Insert new entry
+		 */
+		dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode));
+	}
+	*phRIHandle = (RI_HANDLE)psRISubEntry;
+	/* Release RI Lock */
+	_RIUnlock();
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIUpdateMEMDESCAddrKM
+
+ @Description
+            Update a Resource Information entry.
+
+ @input     hRIHandle - Handle of object whose reference info is to be updated
+ @input     uiAddr - New address for the RI entry
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle,
+								   IMG_DEV_VIRTADDR sVAddr)
+{
+	RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+
+	if (!hRIHandle)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle;
+	if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+	{
+		/* Pointer does not point to valid structure */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+    /* Acquire RI lock*/
+	_RILock();
+
+	psRISubEntry->sVAddr.uiAddr = sVAddr.uiAddr;
+
+	/* Release RI lock */
+	_RIUnlock();
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIUpdateMEMDESCPinningKM
+
+ @Description
+            Update a Resource Information entry.
+
+ @input     hRIHandle - Handle of object whose reference info is to be updated
+ @input     bIsPinned - The new pinning state
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIUpdateMEMDESCPinningKM(RI_HANDLE hRIHandle,
+								   IMG_BOOL bIsPinned)
+{
+	RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+
+	if (!hRIHandle)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle;
+	if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+	{
+		/* Pointer does not point to valid structure */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+    /* Acquire RI lock*/
+	_RILock();
+
+	psRISubEntry->bIsPinned = bIsPinned;
+
+	/* Release RI lock */
+	_RIUnlock();
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIUpdateMEMDESCBackingKM
+
+ @Description
+            Update a Resource Information entry.
+
+ @input     hRIHandle       Handle of object whose reference info is to be updated
+ @input     iSizeAdjustment The change of backed physical memory for this
+                            allocation in bytes.
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIUpdateMEMDESCBackingKM(RI_HANDLE hRIHandle,
+                                      IMG_INT32 iSizeAdjustment)
+{
+	RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+
+	if (!hRIHandle)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle;
+	if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+	{
+		/* Pointer does not point to valid structure */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Acquire RI lock*/
+	_RILock();
+
+	psRISubEntry->ui64BackedSize += iSizeAdjustment;
+
+	/* Release RI lock */
+	_RIUnlock();
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIDeletePMREntryKM
+
+ @Description
+            Delete a Resource Information entry.
+
+ @input     hRIHandle - Handle of object whose reference info is to be deleted
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle)
+{
+	RI_LIST_ENTRY *psRIEntry = NULL;
+	PMR			*pPMRHashKey;
+	PVRSRV_ERROR eResult = PVRSRV_OK;
+
+
+	if (!hRIHandle)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	else
+	{
+		psRIEntry = (RI_LIST_ENTRY *)hRIHandle;
+
+		if (psRIEntry->valid != _VALID_RI_LIST_ENTRY)
+		{
+			/* Pointer does not point to valid structure */
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+		if(psRIEntry->ui16SubListCount == 0)
+		{
+		    /* Acquire RI lock*/
+			_RILock();
+
+			/* Remove the HASH table index entry */
+			pPMRHashKey = psRIEntry->psPMR;
+			HASH_Remove_Extended(g_pRIHashTable, (void *)&pPMRHashKey);
+
+			psRIEntry->valid = _INVALID;
+
+			/* Remove PMR entry from linked-list of PMR entries */
+			dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sListNode));
+
+			/* Now, free the memory used to store the RI entry */
+			OSFreeMem(psRIEntry);
+			psRIEntry = NULL;
+
+			/*
+			 * Decrement number of RI entries - if this is now zero,
+			 * we can delete the RI hash table
+			 */
+			if(--g_ui16RICount == 0)
+			{
+				HASH_Delete(g_pRIHashTable);
+				g_pRIHashTable = NULL;
+
+				_RIUnlock();
+
+				/* If deInit has been deferred, we can now destroy the RI Lock */
+				if (bRIDeInitDeferred)
+				{
+					OSLockDestroy(g_hRILock);
+				}
+			}
+			else
+			{
+				/* Release RI lock*/
+				_RIUnlock();
+			}
+			/*
+			 * Make the handle NULL once PMR RI entry is deleted
+			 */
+			hRIHandle = NULL;
+		}
+		else
+		{
+			eResult = PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP;
+		}
+	}
+
+	return eResult;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIDeleteMEMDESCEntryKM
+
+ @Description
+            Delete a Resource Information entry.
+            Entry can be from RIEntry list or ProcList.
+
+ @input     hRIHandle - Handle of object whose reference info is to be deleted
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle)
+{
+	RI_LIST_ENTRY *psRIEntry = NULL;
+	RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+	uintptr_t hashData = 0;
+	IMG_PID     pid;
+	PVRSRV_ERROR eResult = PVRSRV_OK;
+
+
+	if (!hRIHandle)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle;
+	if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+	{
+		/* Pointer does not point to valid structure */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+    /* Acquire RI lock*/
+	_RILock();
+
+	/* For entries which do have a parent PMR remove the node from the sublist */
+	if (psRISubEntry->psRI)
+	{
+		psRIEntry = (RI_LIST_ENTRY *)psRISubEntry->psRI;
+
+		/* Now, remove entry from the sublist */
+		dllist_remove_node(&(psRISubEntry->sListNode));
+	}
+
+	psRISubEntry->valid = _INVALID;
+
+	/* Remove the entry from the proc allocations linked list */
+	pid = psRISubEntry->pid;
+	/* If this is the only allocation for this pid, just remove it from the hash table */
+	if (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL)
+	{
+		HASH_Remove_Extended(g_pProcHashTable, (void *)&pid);
+		/* Decrement number of entries in proc hash table, and delete the hash table if there are now none */
+		if(--g_ui16ProcCount == 0)
+		{
+			HASH_Delete(g_pProcHashTable);
+			g_pProcHashTable = NULL;
+		}
+	}
+	else
+	{
+		hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid);
+		if ((PDLLIST_NODE)hashData == &(psRISubEntry->sProcListNode))
+		{
+			HASH_Remove_Extended(g_pProcHashTable, (void *)&pid);
+			HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)dllist_get_next_node(&(psRISubEntry->sProcListNode)));
+		}
+	}
+	dllist_remove_node(&(psRISubEntry->sProcListNode));
+
+	/* Now, free the memory used to store the sublist entry */
+	OSFreeMem(psRISubEntry);
+	psRISubEntry = NULL;
+
+	/*
+	 * Decrement number of entries in sublist if this MemDesc had a parent entry.
+	 */
+	if (psRIEntry)
+	{
+		psRIEntry->ui16SubListCount--;
+	}
+
+    /* Release RI lock*/
+	_RIUnlock();
+
+	/*
+	 * Make the handle NULL once MEMDESC RI entry is deleted
+	 */
+	hRIHandle = NULL;
+
+	return eResult;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIDeleteListKM
+
+ @Description
+            Delete all Resource Information entries and free associated
+            memory.
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDeleteListKM(void)
+{
+	PVRSRV_ERROR eResult = PVRSRV_OK;
+
+	_RILock();
+
+	if (g_pRIHashTable)
+	{
+		eResult = HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DeleteAllEntries);
+		if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE)
+		{
+			/*
+			 * PVRSRV_ERROR_RESOURCE_UNAVAILABLE is used to stop the Hash iterator when
+			 * the hash table gets deleted as a result of deleting the final PMR entry,
+			 * so this is not a real error condition...
+			 */
+			eResult = PVRSRV_OK;
+		}
+	}
+
+	/* After the run through the RIHashTable that holds the PMR entries there might be
+	 * still entries left in the per-process hash table because they were added with
+	 * RIWriteProcListEntryKM() and have no PMR parent associated.
+	 */
+	if (g_pProcHashTable)
+	{
+		eResult = HASH_Iterate(g_pProcHashTable, (HASH_pfnCallback) _DeleteAllProcEntries);
+		if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE)
+		{
+			/*
+			 * PVRSRV_ERROR_RESOURCE_UNAVAILABLE is used to stop the Hash iterator when
+			 * the hash table gets deleted as a result of deleting the final PMR entry,
+			 * so this is not a real error condition...
+			 */
+			eResult = PVRSRV_OK;
+		}
+	}
+
+	_RIUnlock();
+
+	return eResult;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIDumpListKM
+
+ @Description
+            Dumps out the contents of the RI List entry for the
+            specified PMR, and all MEMDESC allocation entries
+            in the associated sub linked list.
+            At present, output is directed to Kernel log
+            via PVR_DPF.
+
+ @input     psPMR - PMR for which RI entry details are to be output
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpListKM(PMR *psPMR)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* Acquire RI lock*/
+	_RILock();
+
+	eError = _DumpList(psPMR,0);
+
+    /* Release RI lock*/
+	_RIUnlock();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIGetListEntryKM
+
+ @Description
+            Returns pointer to a formatted string with details of the specified
+            list entry. If no entry exists (e.g. it may have been deleted
+            since the previous call), NULL is returned.
+
+ @input     pid - pid for which RI entry details are to be output
+ @input     ppHandle - handle to the entry, if NULL, the first entry will be
+                     returned.
+ @output    pszEntryString - string to be output for the entry
+ @output    hEntry - hEntry will be returned pointing to the next entry
+                     (or NULL if there is no next entry)
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+IMG_BOOL RIGetListEntryKM(IMG_PID pid,
+						  IMG_HANDLE **ppHandle,
+						  IMG_CHAR **ppszEntryString)
+{
+	RI_SUBLIST_ENTRY  *psRISubEntry = NULL;
+	RI_LIST_ENTRY  *psRIEntry = NULL;
+	uintptr_t     hashData = 0;
+	IMG_PID       hashKey  = pid;
+
+	static IMG_CHAR	  ai8DebugfsSummaryString[RI_MAX_DEBUGFS_ENTRY_LEN+1];
+	static IMG_UINT64 ui64TotalMemdescAlloc = 0;
+	static IMG_UINT64 ui64TotalImport = 0;
+	static IMG_UINT64 ui64TotalPMRAlloc = 0;
+	static IMG_UINT64 ui64TotalPMRBacked = 0;
+	static IMG_BOOL bDisplayMemdescSummary = IMG_FALSE;
+	static IMG_BOOL bDisplayPMRSummary = IMG_FALSE;
+	static IMG_BOOL bTerminateNextCall = IMG_FALSE;
+	static IMG_BOOL bIteratePMRs = IMG_FALSE;
+	static DLLIST_NODE *psNode = NULL;
+	static IMG_BOOL bStoredProcName = IMG_FALSE;
+	static IMG_CHAR szProcName[RI_PROC_TAG_CHAR_LEN];
+
+	ai8DebugfsSummaryString[0] = '\0';
+
+	if (bDisplayMemdescSummary)
+	{
+		OSSNPrintf((IMG_CHAR *)&ai8DebugfsSummaryString[0],
+		            RI_MAX_TEXT_LEN,
+		            "MEMDESCs Alloc'd:0x%llx + Imported:0x%llx = Total:0x%llx\n",
+		            (unsigned long long) ui64TotalMemdescAlloc,
+		            (unsigned long long) ui64TotalImport,
+		            (unsigned long long) (ui64TotalMemdescAlloc + ui64TotalImport));
+
+		*ppszEntryString = &ai8DebugfsSummaryString[0];
+		ui64TotalMemdescAlloc = 0;
+		ui64TotalImport = 0;
+		bIteratePMRs = IMG_TRUE;
+		bDisplayMemdescSummary = IMG_FALSE;
+		return IMG_TRUE;
+	}
+	if (bDisplayPMRSummary)
+	{
+		OSSNPrintf((IMG_CHAR *)&ai8DebugfsSummaryString[0],
+		            RI_MAX_TEXT_LEN,
+		            "PID %d%s PMRs Alloc'd:0x%llx [Physical: 0x%llx]\n",
+		            pid,
+		            (char *)szProcName,
+		            (unsigned long long) ui64TotalPMRAlloc,
+		            (unsigned long long) ui64TotalPMRBacked);
+
+		*ppszEntryString = &ai8DebugfsSummaryString[0];
+		ui64TotalPMRAlloc = 0;
+		ui64TotalPMRBacked = 0;
+		bDisplayPMRSummary = IMG_FALSE;
+		bStoredProcName = IMG_FALSE;
+		bTerminateNextCall = IMG_TRUE;
+		return IMG_TRUE;
+	}
+
+	if (bTerminateNextCall)
+	{
+		*ppszEntryString = NULL;
+		*ppHandle        = NULL;
+		psNode = NULL;
+		bTerminateNextCall = IMG_FALSE;
+		bIteratePMRs = IMG_FALSE;
+		szProcName[0] = '\0';
+		return IMG_FALSE;
+	}
+
+    /* Acquire RI lock*/
+	_RILock();
+
+	if (!bIteratePMRs)
+	{
+		/* look-up pid in Hash Table, to obtain first entry for pid */
+		hashData = HASH_Retrieve_Extended(g_pProcHashTable, (void *)&hashKey);
+		if (hashData)
+		{
+			if (*ppHandle)
+			{
+				psRISubEntry = (RI_SUBLIST_ENTRY *)*ppHandle;
+				if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+				{
+					psRISubEntry = NULL;
+				}
+			}
+			else
+			{
+				psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode);
+				if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+				{
+					psRISubEntry = NULL;
+				}
+			}
+		}
+
+		if (psRISubEntry)
+		{
+			PDLLIST_NODE  psNextProcListNode = dllist_get_next_node(&psRISubEntry->sProcListNode);
+
+			if (psNextProcListNode == NULL  ||
+				psNextProcListNode == (PDLLIST_NODE)hashData)
+			{
+				bDisplayMemdescSummary = IMG_TRUE;
+			}
+
+			if (psRISubEntry->bIsImport)
+			{
+				/* If it is a local import we set backed size to 0
+				 * so we don't account twice for the same allocation */
+				ui64TotalImport += psRISubEntry->ui64Size;
+			}
+			else
+			{
+				ui64TotalMemdescAlloc += psRISubEntry->ui64Size;
+			}
+
+			_GenerateMEMDESCEntryString(psRISubEntry,
+										IMG_TRUE,
+										RI_MAX_DEBUGFS_ENTRY_LEN,
+										(IMG_CHAR *)&ai8DebugfsSummaryString);
+
+			/* If not an imported PMR, flag 'parent' PMR has having been listed in MEMDESCs */
+			if (!psRISubEntry->bIsImport)
+			{
+				psRISubEntry->psRI->ui32Flags |= 1;
+			}
+
+			if (!bStoredProcName)
+			{
+				OSSNPrintf( (IMG_CHAR *)&szProcName,
+							RI_PROC_TAG_CHAR_LEN,
+							" %s",
+							(IMG_CHAR *)psRISubEntry->ai8ProcName);
+				bStoredProcName = IMG_TRUE;
+			}
+
+			ai8DebugfsSummaryString[RI_MAX_DEBUGFS_ENTRY_LEN] = '\0';
+
+			*ppszEntryString = (IMG_CHAR *)&ai8DebugfsSummaryString;
+			*ppHandle        = (IMG_HANDLE)IMG_CONTAINER_OF(psNextProcListNode, RI_SUBLIST_ENTRY, sProcListNode);
+
+		}
+		else
+		{
+			bDisplayMemdescSummary = IMG_TRUE;
+			if (ui64TotalMemdescAlloc == 0)
+			{
+				ai8DebugfsSummaryString[0] = '\0';
+				*ppszEntryString = (IMG_CHAR *)&ai8DebugfsSummaryString;
+			}
+		}
+	}
+	else
+	{
+		IMG_BOOL bPMRToDisplay = IMG_FALSE;
+
+		/* Iterate through the 'touched' PMRs and display details */
+		if (!psNode)
+		{
+			psNode = dllist_get_next_node(&sListFirst);
+		}
+		else
+		{
+			psNode = dllist_get_next_node(psNode);
+		}
+
+		while (!bDisplayPMRSummary && !bPMRToDisplay)
+		{
+			if (!psNode || (psNode == &sListFirst))
+			{
+				bDisplayPMRSummary = IMG_TRUE;
+			}
+			else
+			{
+				psRIEntry =	IMG_CONTAINER_OF(psNode, RI_LIST_ENTRY, sListNode);
+				if (psRIEntry->ui32Flags & 1)
+				{
+					/* This PMR was 'touched', so display details and unflag it*/
+					_GeneratePMREntryString(psRIEntry,
+											IMG_TRUE,
+											RI_MAX_DEBUGFS_ENTRY_LEN,
+											(IMG_CHAR *)&ai8DebugfsSummaryString);
+					psRIEntry->ui32Flags &= ~1;
+					ui64TotalPMRAlloc += psRIEntry->ui64LogicalSize;
+					ui64TotalPMRBacked += psRIEntry->ui64BackedSize;
+					bPMRToDisplay = IMG_TRUE;
+				}
+				else
+				{
+					psNode = dllist_get_next_node(psNode);
+				}
+			}
+		}
+	}
+
+	ai8DebugfsSummaryString[RI_MAX_DEBUGFS_ENTRY_LEN] = '\0';
+
+	/* Release RI lock*/
+	_RIUnlock();
+
+	return IMG_TRUE;
+}
+
+/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */
+static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry,
+                                            IMG_BOOL bDebugFs,
+                                            IMG_UINT16 ui16MaxStrLen,
+                                            IMG_CHAR *pszEntryString)
+{
+	IMG_CHAR 	szProc[RI_PROC_TAG_CHAR_LEN];
+	IMG_CHAR 	szImport[RI_IMPORT_TAG_CHAR_LEN];
+	IMG_PCHAR   pszAnnotationText = NULL;
+
+	if (!bDebugFs)
+	{
+		/* we don't include process ID info for debugfs output */
+		OSSNPrintf( (IMG_CHAR *)&szProc,
+		            RI_PROC_TAG_CHAR_LEN,
+		            "[%d: %s]",
+		            psRISubEntry->pid,
+		            (IMG_CHAR *)psRISubEntry->ai8ProcName);
+	}
+	if (psRISubEntry->bIsImport)
+	{
+		OSSNPrintf( (IMG_CHAR *)&szImport,
+		            RI_IMPORT_TAG_CHAR_LEN,
+		            "{Import from PID %d}",
+		            psRISubEntry->psRI->pid);
+		/* Set pszAnnotationText to that of the 'parent' PMR RI entry */
+		pszAnnotationText = (IMG_PCHAR)psRISubEntry->psRI->ai8TextA;
+	}
+	else
+	{
+		if (psRISubEntry->bIsExportable)
+		{
+			/* Set pszAnnotationText to that of the 'parent' PMR RI entry */
+			pszAnnotationText = (IMG_PCHAR)psRISubEntry->psRI->ai8TextA;
+		}
+		else
+		{
+			/* Set pszAnnotationText to that of the MEMDESC RI entry */
+			pszAnnotationText = (IMG_PCHAR)psRISubEntry->ai8TextB;
+		}
+	}
+
+
+	/* Don't print memdescs if they are local imports
+	 * (ie imported PMRs allocated by this process)
+	 */
+	if (psRISubEntry->bIsImport && (psRISubEntry->pid == psRISubEntry->psRI->pid))
+	{
+		/* Don't print this entry */
+		pszEntryString[0] = '\0';
+	}
+	else
+	{
+		OSSNPrintf(pszEntryString,
+				   ui16MaxStrLen,
+				   "%s0x%010llx\t%-80s %s\t0x%010llx <%p> %s%s%c",
+				   (bDebugFs ? "" : "   "),
+				   (unsigned long long) (psRISubEntry->sVAddr.uiAddr + psRISubEntry->ui64Offset),
+				   pszAnnotationText,
+		           (bDebugFs ? "" : (char *)szProc),
+				   (unsigned long long) psRISubEntry->ui64Size,
+				   psRISubEntry->psRI->psPMR,
+				   (psRISubEntry->bIsImport ? (char *)&szImport : ""),
+				   (psRISubEntry->bIsPinned ? "" : "{Unpinned}"),
+				   (bDebugFs ? '\n' : ' '));
+	}
+}
+
+/* Function used to produce string containing info for PMR RI entries (used for debugfs and kernel log output) */
+static void _GeneratePMREntryString(RI_LIST_ENTRY *psRIEntry,
+                                            IMG_BOOL bDebugFs,
+                                            IMG_UINT16 ui16MaxStrLen,
+                                            IMG_CHAR *pszEntryString)
+{
+	IMG_PCHAR   pszAnnotationText = NULL;
+
+	/* Set pszAnnotationText to that PMR RI entry */
+	pszAnnotationText = (IMG_PCHAR)psRIEntry->ai8TextA;
+
+	OSSNPrintf(pszEntryString,
+	           ui16MaxStrLen,
+	           "%s<%p> %-80s \t0x%010llx [0x%010llx] %c",
+	           (bDebugFs ? "" : "   "),
+	           psRIEntry->psPMR,
+	           pszAnnotationText,
+	           (unsigned long long) psRIEntry->ui64LogicalSize,
+	           (unsigned long long) psRIEntry->ui64BackedSize,
+	           (bDebugFs ? '\n' : ' '));
+}
+
+/*!
+******************************************************************************
+
+ @Function	_DumpList
+ @Description
+            Dumps out RI List entries according to parameters passed.
+
+ @input     psPMR - If not NULL, function will output the RI entries for
+                   the specified PMR only
+ @input     pid - If non-zero, the function will only output MEMDESC RI
+  	  	  	  	  entries made by the process with ID pid.
+                  If zero, all MEMDESC RI entries will be output.
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid)
+{
+	RI_LIST_ENTRY *psRIEntry = NULL;
+	RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+	IMG_UINT16 ui16SubEntriesParsed = 0;
+	uintptr_t hashData = 0;
+	IMG_PID		  hashKey;
+	PMR			*pPMRHashKey = psPMR;
+	IMG_BOOL 	bDisplayedThisPMR = IMG_FALSE;
+
+
+	if (!psPMR)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	if (g_pRIHashTable && g_pProcHashTable)
+	{
+		if (pid != 0)
+		{
+			/* look-up pid in Hash Table */
+			hashKey = pid;
+			hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&hashKey);
+			if (hashData)
+			{
+				psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode);
+				if (psRISubEntry)
+				{
+					psRIEntry = psRISubEntry->psRI;
+				}
+			}
+		}
+		else
+		{
+			/* look-up psPMR in Hash Table */
+			hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+			psRIEntry = (RI_LIST_ENTRY *)hashData;
+		}
+		if (!psRIEntry)
+		{
+			/* No entry found in hash table */
+			return PVRSRV_ERROR_NOT_FOUND;
+		}
+		while (psRIEntry)
+		{
+			bDisplayedThisPMR = IMG_FALSE;
+			/* Output details for RI entry */
+			if (!pid)
+			{
+				_RIOutput (("%s (0x%p) suballocs:%d size:0x%llx\n",
+				            psRIEntry->ai8TextA,
+				            psRIEntry->psPMR,
+				            (IMG_UINT)psRIEntry->ui16SubListCount,
+				            (unsigned long long)psRIEntry->ui64LogicalSize));
+				bDisplayedThisPMR = IMG_TRUE;
+			}
+			ui16SubEntriesParsed = 0;
+			if(psRIEntry->ui16SubListCount)
+			{
+#if _DUMP_LINKEDLIST_INFO
+				_RIOutput (("RI LIST: {sSubListFirst.psNextNode:0x%x}\n",
+				            (IMG_UINT)psRIEntry->sSubListFirst.psNextNode));
+#endif /* _DUMP_LINKEDLIST_INFO */
+				if (!pid)
+				{
+					psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)),
+					                                RI_SUBLIST_ENTRY, sListNode);
+				}
+				/* Traverse RI sublist and output details for each entry */
+				while (psRISubEntry && (ui16SubEntriesParsed < psRIEntry->ui16SubListCount))
+				{
+					if (!bDisplayedThisPMR)
+					{
+						_RIOutput (("%s (0x%p) suballocs:%d size:0x%llx\n",
+						            psRIEntry->ai8TextA,
+						            psRIEntry->psPMR,
+						            (IMG_UINT)psRIEntry->ui16SubListCount,
+						            (unsigned long long)psRIEntry->ui64LogicalSize));
+						bDisplayedThisPMR = IMG_TRUE;
+					}
+#if _DUMP_LINKEDLIST_INFO
+					_RIOutput (("RI LIST:    [this subentry:0x%x]\n",(IMG_UINT)psRISubEntry));
+					_RIOutput (("RI LIST:     psRI:0x%x\n",(IMG_UINT32)psRISubEntry->psRI));
+#endif /* _DUMP_LINKEDLIST_INFO */
+
+					{
+						IMG_CHAR szEntryString[RI_MAX_MEMDESC_RI_ENTRY_LEN];
+
+						_GenerateMEMDESCEntryString(psRISubEntry,
+						                            IMG_FALSE,
+						                            RI_MAX_MEMDESC_RI_ENTRY_LEN,
+						                            (IMG_CHAR *)&szEntryString);
+						szEntryString[RI_MAX_MEMDESC_RI_ENTRY_LEN-1] = '\0';
+						_RIOutput (("%s\n",(IMG_CHAR *)&szEntryString));
+					}
+
+					if (pid)
+					{
+						if((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == 0) ||
+						   (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData))
+						{
+							psRISubEntry = NULL;
+						}
+						else
+						{
+							psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)),
+							                                RI_SUBLIST_ENTRY, sProcListNode);
+							if (psRISubEntry)
+							{
+								if (psRIEntry != psRISubEntry->psRI)
+								{
+									/*
+									 * The next MEMDESC in the process linked list is in a different PMR
+									 */
+									psRIEntry = psRISubEntry->psRI;
+									bDisplayedThisPMR = IMG_FALSE;
+								}
+							}
+						}
+					}
+					else
+					{
+						ui16SubEntriesParsed++;
+						psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sListNode)),
+						                                RI_SUBLIST_ENTRY, sListNode);
+					}
+				}
+			}
+			if (!pid)
+			{
+				if (ui16SubEntriesParsed != psRIEntry->ui16SubListCount)
+				{
+					/*
+					 * Output error message as sublist does not contain the
+					 * number of entries indicated by sublist count
+					 */
+					_RIOutput (("RI ERROR: RI sublist contains %d entries, not %d entries\n",
+					            ui16SubEntriesParsed,psRIEntry->ui16SubListCount));
+				}
+				else if (psRIEntry->ui16SubListCount && !dllist_get_next_node(&(psRIEntry->sSubListFirst)))
+				{
+					/*
+					 * Output error message as sublist is empty but sublist count
+					 * is not zero
+					 */
+					_RIOutput (("RI ERROR: ui16SubListCount=%d for empty RI sublist\n",
+					            psRIEntry->ui16SubListCount));
+				}
+			}
+			psRIEntry = NULL;
+		}
+	}
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIDumpAllKM
+
+ @Description
+            Dumps out the contents of all RI List entries (i.e. for all
+            MEMDESC allocations for each PMR).
+            At present, output is directed to Kernel log
+            via PVR_DPF.
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpAllKM(void)
+{
+	if (g_pRIHashTable)
+	{
+		return HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DumpAllEntries);
+	}
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIDumpProcessKM
+
+ @Description
+            Dumps out the contents of all MEMDESC RI List entries (for every
+            PMR) which have been allocate by the specified process only.
+            At present, output is directed to Kernel log
+            via PVR_DPF.
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32  dummyPMR;
+
+	if (g_pProcHashTable)
+	{
+		/* Acquire RI lock*/
+		_RILock();
+
+		eError = _DumpList((PMR *)&dummyPMR,pid);
+
+	    /* Release RI lock*/
+		_RIUnlock();
+	}
+	return eError;
+}
+
+#if defined(DEBUG)
+/*!
+******************************************************************************
+
+ @Function	_DumpList
+ @Description
+            Dumps out RI List entries according to parameters passed.
+
+ @input     psPMR - If not NULL, function will output the RI entries for
+                   the specified PMR only
+ @input     pid - If non-zero, the function will only output MEMDESC RI
+  	  	  	  	  entries made by the process with ID pid.
+                  If zero, all MEMDESC RI entries will be output.
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR _DumpProcessList(PMR *psPMR,
+									 IMG_PID pid,
+									 IMG_UINT64 ui64Offset,
+									 IMG_DEV_VIRTADDR *psDevVAddr)
+{
+	RI_LIST_ENTRY *psRIEntry = NULL;
+	RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+	IMG_UINT16 ui16SubEntriesParsed = 0;
+	uintptr_t hashData = 0;
+	PMR *pPMRHashKey = psPMR;
+	PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+	psDevVAddr->uiAddr = 0;
+
+	if (!psPMR)
+	{
+		/* NULL handle provided */
+		return eError;
+	}
+
+	if (g_pRIHashTable && g_pProcHashTable)
+	{
+		PVR_ASSERT(psPMR && pid);
+
+		/* look-up psPMR in Hash Table */
+		hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+		psRIEntry = (RI_LIST_ENTRY *)hashData;
+
+		if (!psRIEntry)
+		{
+			/* No entry found in hash table */
+			return PVRSRV_ERROR_NOT_FOUND;
+		}
+
+		if (psRIEntry->ui16SubListCount)
+		{
+			psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)),
+											RI_SUBLIST_ENTRY, sListNode);
+
+			/* Traverse RI sublist and output details for each entry */
+			while (psRISubEntry && (ui16SubEntriesParsed < psRIEntry->ui16SubListCount))
+			{
+				if (pid == psRISubEntry->pid)
+				{
+					IMG_UINT64 ui64StartOffset = psRISubEntry->ui64Offset;
+					IMG_UINT64 ui64EndOffset = psRISubEntry->ui64Offset + psRISubEntry->ui64Size;
+
+					if (ui64Offset >= ui64StartOffset && ui64Offset < ui64EndOffset)
+					{
+						psDevVAddr->uiAddr = psRISubEntry->sVAddr.uiAddr;
+						return PVRSRV_OK;
+					}
+				}
+
+				ui16SubEntriesParsed++;
+				psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sListNode)),
+												RI_SUBLIST_ENTRY, sListNode);
+			}
+		}
+	}
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIDumpProcessListKM
+
+ @Description
+            Dumps out selected contents of all MEMDESC RI List entries (for a
+            PMR) which have been allocate by the specified process only.
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpProcessListKM(PMR *psPMR,
+								 IMG_PID pid,
+								 IMG_UINT64 ui64Offset,
+								 IMG_DEV_VIRTADDR *psDevVAddr)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (g_pProcHashTable)
+	{
+		/* Acquire RI lock*/
+		_RILock();
+
+		eError = _DumpProcessList(psPMR,
+								  pid,
+								  ui64Offset,
+								  psDevVAddr);
+
+		/* Release RI lock*/
+		_RIUnlock();
+	}
+
+	return eError;
+}
+#endif
+
+static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v)
+{
+	RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v;
+
+	PVR_UNREFERENCED_PARAMETER (k);
+
+	return RIDumpListKM(psRIEntry->psPMR);
+}
+
+static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v)
+{
+	RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v;
+	RI_SUBLIST_ENTRY *psRISubEntry;
+	PVRSRV_ERROR eResult = PVRSRV_OK;
+
+	PVR_UNREFERENCED_PARAMETER (k);
+
+	while ((eResult == PVRSRV_OK) && (psRIEntry->ui16SubListCount > 0))
+	{
+		psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)), RI_SUBLIST_ENTRY, sListNode);
+		eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE)psRISubEntry);
+	}
+	if (eResult == PVRSRV_OK)
+	{
+		eResult = RIDeletePMREntryKM((RI_HANDLE)psRIEntry);
+		/*
+		 * If we've deleted the Hash table, return
+		 * an error to stop the iterator...
+		 */
+		if (!g_pRIHashTable)
+		{
+			eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+		}
+	}
+	return eResult;
+}
+
+static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v)
+{
+	RI_SUBLIST_ENTRY *psRISubEntry = (RI_SUBLIST_ENTRY *)v;
+	PVRSRV_ERROR eResult = PVRSRV_OK;
+
+	PVR_UNREFERENCED_PARAMETER (k);
+
+	eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE) psRISubEntry);
+	if (eResult == PVRSRV_OK && !g_pProcHashTable)
+	{
+		/*
+		 * If we've deleted the Hash table, return
+		 * an error to stop the iterator...
+		 */
+		eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+	}
+
+	return eResult;
+}
+
+#endif /* if defined(PVR_RI_DEBUG) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/srvcore.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/srvcore.c
new file mode 100644
index 0000000..43065ad
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/srvcore.c
@@ -0,0 +1,1274 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Common Bridge Module (kernel side)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements core PVRSRV API, server side
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "ra.h"
+#include "pvr_bridge.h"
+#include "connection_server.h"
+#include "device.h"
+#include "htbuffer.h"
+
+#include "pdump_km.h"
+
+#include "srvkm.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "log2.h"
+
+#include "srvcore.h"
+#include "rgxinit.h"
+#include "pvrsrv.h"
+#include "power.h"
+#include "lists.h"
+#include "rgxdevice.h"
+
+#include "rgx_options.h"
+#include "pvrversion.h"
+#include "lock.h"
+#include "osfunc.h"
+#include "device_connection.h"
+#include "process_stats.h"
+#include "pvrsrv_pool.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "physmem_lma.h"
+#include "services_km.h"
+#endif
+
+#include "pvrsrv_tlstreams.h"
+#include "tlstream.h"
+
+/* For the purpose of maintainability, it is intended that this file should not
+ * contain any OS specific #ifdefs. Please find a way to add e.g.
+ * an osfunc.c abstraction or override the entire function in question within
+ * env,*,pvr_bridge_k.c
+ */
+
+PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT] = { {.pfFunction = DummyBW,} ,};
+
+#define		PVR_DISPATCH_OFFSET_FIRST_FUNC 			0
+#define 	PVR_DISPATCH_OFFSET_LAST_FUNC 			1
+#define		PVR_DISPATCH_OFFSET_ARRAY_MAX 			2
+
+#define PVRSRV_CLIENT_TL_STREAM_SIZE_DEFAULT 131072
+
+static IMG_UINT16 g_BridgeDispatchTableStartOffsets[BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT][PVR_DISPATCH_OFFSET_ARRAY_MAX];
+
+#if defined(DEBUG_BRIDGE_KM)
+/* a lock used for protecting bridge call timing calculations
+ * for calls which do not acquire a lock
+ */
+POS_LOCK g_hStatsLock;
+PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
+#endif
+
+void BridgeDispatchTableStartOffsetsInit(void)
+{
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEFAULT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEFAULT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SRVCORE][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SRVCORE][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNC_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCEXPORT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCEXPORT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCSEXPORT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCSEXPORT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPCTRL][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPCTRL][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MM_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MM_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMPLAT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMPLAT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_CMM_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_CMM_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMP][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMP][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DMABUF][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DMABUF][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DC_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DC_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CACHE][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CACHE][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_CACHE_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SMM_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SMM_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PVRTL][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PVRTL][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RI][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RI_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RI][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RI_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_VALIDATION][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_VALIDATION][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TUTILS][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TUTILS][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEVICEMEMHISTORY][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEVICEMEMHISTORY][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_HTBUFFER][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_HTBUFFER][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DCPLAT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DCPLAT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMEXTMEM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMEXTMEM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCTRACKING][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCTRACKING][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCFALLBACK][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCFALLBACK][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST;
+#if defined(SUPPORT_RGX)
+	/* Need a gap here to start next entry at element 128 */
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXCMP][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXCMP][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTA3D][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTA3D][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_BREAKPOINT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_BREAKPOINT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEBUGMISC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEBUGMISC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXPDUMP][PVR_DISPATCH_OFFSET_FIRST_FUNC]= PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXPDUMP][PVR_DISPATCH_OFFSET_LAST_FUNC]= PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXHWPERF][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXHWPERF][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXRAY][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXRAY][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_REGCONFIG][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_REGCONFIG_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_REGCONFIG][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TIMERQUERY][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TIMERQUERY][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXKICKSYNC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXKICKSYNC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXSIGNALS][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXSIGNALS][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_LAST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ2][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST;
+	g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ2][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST;
+#endif
+}
+
+#if defined(DEBUG_BRIDGE_KM)
+PVRSRV_ERROR
+CopyFromUserWrapper(CONNECTION_DATA *psConnection,
+					IMG_UINT32 ui32DispatchTableEntry,
+					void *pvDest,
+					void *pvSrc,
+					IMG_UINT32 ui32Size)
+{
+	g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyFromUserTotalBytes+=ui32Size;
+	g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size;
+	return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+PVRSRV_ERROR
+CopyToUserWrapper(CONNECTION_DATA *psConnection,
+				  IMG_UINT32 ui32DispatchTableEntry,
+				  void *pvDest,
+				  void *pvSrc,
+				  IMG_UINT32 ui32Size)
+{
+	g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyToUserTotalBytes+=ui32Size;
+	g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size;
+	return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+#else
+INLINE PVRSRV_ERROR
+CopyFromUserWrapper(CONNECTION_DATA *psConnection,
+					IMG_UINT32 ui32DispatchTableEntry,
+					void *pvDest,
+					void *pvSrc,
+					IMG_UINT32 ui32Size)
+{
+	PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry);
+	return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+INLINE PVRSRV_ERROR
+CopyToUserWrapper(CONNECTION_DATA *psConnection,
+				  IMG_UINT32 ui32DispatchTableEntry,
+				  void *pvDest,
+				  void *pvSrc,
+				  IMG_UINT32 ui32Size)
+{
+	PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry);
+	return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+#endif
+
+PVRSRV_ERROR
+PVRSRVConnectKM(CONNECTION_DATA *psConnection,
+                PVRSRV_DEVICE_NODE * psDeviceNode,
+				IMG_UINT32 ui32Flags,
+				IMG_UINT32 ui32ClientBuildOptions,
+				IMG_UINT32 ui32ClientDDKVersion,
+				IMG_UINT32 ui32ClientDDKBuild,
+				IMG_UINT8  *pui8KernelArch,
+				IMG_UINT32 *pui32CapabilityFlags,
+				IMG_UINT32 *ui32PVRBridges,
+				IMG_UINT32 *ui32RGXBridges)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	IMG_UINT32			ui32BuildOptions, ui32BuildOptionsMismatch;
+	IMG_UINT32			ui32DDKVersion, ui32DDKBuild;
+	PVRSRV_DATA			*psSRVData = NULL;
+	IMG_UINT64			ui64ProcessVASpaceSize = OSGetCurrentProcessVASpaceSize();
+	static IMG_BOOL		bIsFirstConnection=IMG_FALSE;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	/* Clear the flags */
+	*pui32CapabilityFlags = 0;
+	psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+	psSRVData = PVRSRVGetPVRSRVData();
+
+	psConnection->ui32ClientFlags = ui32Flags;
+
+	/* output the available bridges */
+	*ui32PVRBridges = gui32PVRBridges;
+	*ui32RGXBridges = gui32RGXBridges;
+
+	/* Is the system snooping of caches emulated in software? */
+	if (PVRSRVSystemSnoopingIsEmulated(psDeviceNode->psDevConfig))
+	{
+		*pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_EMULATE_FLAG;
+	}
+	else
+	{
+		/*Set flags to pass back to the client showing which cache coherency is available.*/
+		/*Is the system CPU cache coherent?*/
+		if (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig))
+		{
+			*pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_DEVICE_FLAG;
+		}
+		/*Is the system device cache coherent?*/
+		if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig))
+		{
+			*pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_CPU_FLAG;
+		}
+	}
+
+	/* Has the system device non-mappable local memory?*/
+	if (PVRSRVSystemHasNonMappableLocalMemory(psDeviceNode->psDevConfig))
+	{
+		*pui32CapabilityFlags |= PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG;
+	}
+
+	/* Set flags to indicate shared-virtual-memory (SVM) allocation availability */
+	if (! psDeviceNode->ui64GeneralSVMHeapTopVA || ! ui64ProcessVASpaceSize)
+	{
+		*pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED;
+	}
+	else
+	{
+		if (ui64ProcessVASpaceSize <= psDeviceNode->ui64GeneralSVMHeapTopVA)
+		{
+			*pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED;
+		}
+		else
+		{
+			/* This can happen when processor has more virtual address bits
+			   than device (i.e. alloc is not always guaranteed to succeed) */
+			*pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL;
+		}
+	}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+	IMG_UINT32	ui32OSid = 0, ui32OSidReg = 0;
+    IMG_BOOL    bOSidAxiProtReg = IMG_FALSE;
+
+	IMG_PID pIDCurrent = OSGetCurrentClientProcessIDKM();
+
+    ui32OSid    = (ui32Flags & SRV_VIRTVAL_FLAG_OSID_MASK)    >> (VIRTVAL_FLAG_OSID_SHIFT);
+    ui32OSidReg = (ui32Flags & SRV_VIRTVAL_FLAG_OSIDREG_MASK) >> (VIRTVAL_FLAG_OSIDREG_SHIFT);
+
+#if defined(EMULATOR)
+
+    if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_AXI_ACELITE_BIT_MASK)
+    {
+    	IMG_UINT32 ui32OSidAxiProtReg = 0, ui32OSidAxiProtTD = 0;
+
+    	ui32OSidAxiProtReg = (ui32Flags & SRV_VIRTVAL_FLAG_AXIPREG_MASK) >> (VIRTVAL_FLAG_AXIPREG_SHIFT);
+    	ui32OSidAxiProtTD  = (ui32Flags & SRV_VIRTVAL_FLAG_AXIPTD_MASK)  >> (VIRTVAL_FLAG_AXIPTD_SHIFT);
+
+    	PVR_DPF((PVR_DBG_MESSAGE,
+    			"[AxiProt & Virt]: Setting bOSidAxiProt of Emulator's Trusted Device for Catbase %d to %s",
+				ui32OSidReg,
+				(ui32OSidAxiProtTD == 1)?"TRUE":"FALSE"));
+
+    	bOSidAxiProtReg = ui32OSidAxiProtReg == 1;
+    	PVR_DPF((PVR_DBG_MESSAGE,
+    			"[AxiProt & Virt]: Setting bOSidAxiProt of FW's Register for Catbase %d to %s",
+				ui32OSidReg,
+				bOSidAxiProtReg?"TRUE":"FALSE"));
+
+    	SetAxiProtOSid(ui32OSidReg, ui32OSidAxiProtTD);
+    }
+
+#endif
+
+    InsertPidOSidsCoupling(pIDCurrent, ui32OSid, ui32OSidReg, bOSidAxiProtReg);
+
+    PVR_DPF((PVR_DBG_MESSAGE,"[GPU Virtualization Validation]: OSIDs: %d, %d\n",ui32OSid, ui32OSidReg));
+}
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	/* Only enabled if enabled in the UM */
+	if(!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_WORKLOAD_ESTIMATION_MASK))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVConnectKM: Workload Estimation disabled. Not enabled in UM."));
+	}
+#endif
+
+#if defined(SUPPORT_PDVFS)
+	/* Only enabled if enabled in the UM */
+	if(!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_PDVFS_MASK))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVConnectKM: Proactive DVFS disabled. Not enabled in UM."));
+	}
+#endif
+
+	ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN);
+	ui32DDKBuild = PVRVERSION_BUILD;
+
+	if(ui32Flags & SRV_FLAGS_CLIENT_64BIT_COMPAT)
+	{
+		psSRVData->sDriverInfo.ui8UMSupportedArch |= BUILD_ARCH_64BIT;
+	}else
+	{
+		psSRVData->sDriverInfo.ui8UMSupportedArch |= BUILD_ARCH_32BIT;
+	}
+
+	if(IMG_FALSE == bIsFirstConnection)
+	{
+		psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildOptions = (RGX_BUILD_OPTIONS_KM);
+		psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildOptions = ui32ClientBuildOptions;
+
+		psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildVersion = ui32DDKVersion;
+		psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildVersion = ui32ClientDDKVersion;
+
+		psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildRevision = ui32DDKBuild;
+		psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildRevision = ui32ClientDDKBuild;
+
+		psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType = ((RGX_BUILD_OPTIONS_KM) & OPTIONS_DEBUG_MASK)? \
+																	BUILD_TYPE_DEBUG:BUILD_TYPE_RELEASE;
+		psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType = (ui32ClientBuildOptions & OPTIONS_DEBUG_MASK)? \
+																	BUILD_TYPE_DEBUG:BUILD_TYPE_RELEASE;
+
+		if (sizeof(void *) == POINTER_SIZE_64BIT)
+		{
+			psSRVData->sDriverInfo.ui8KMBitArch |= BUILD_ARCH_64BIT;
+		}
+		else
+		{
+			psSRVData->sDriverInfo.ui8KMBitArch |= BUILD_ARCH_32BIT;
+		}
+
+	}
+
+	/* Masking out every option that is not kernel specific*/
+	ui32ClientBuildOptions &= RGX_BUILD_OPTIONS_MASK_KM;
+
+	/*
+	 * Validate the build options
+	 */
+	ui32BuildOptions = (RGX_BUILD_OPTIONS_KM);
+	if (ui32BuildOptions != ui32ClientBuildOptions)
+	{
+		ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32ClientBuildOptions;
+#if !defined(PVRSRV_STRICT_COMPAT_CHECK)
+		/*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/
+		ui32BuildOptionsMismatch &= OPTIONS_STRICT;
+#endif
+		if ( (ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0)
+		{
+			PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; "
+				"extra options present in client-side driver: (0x%x). Please check rgx_options.h",
+				__FUNCTION__,
+				ui32ClientBuildOptions & ui32BuildOptionsMismatch ));
+			eError = PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+			goto chk_exit;
+		}
+
+		if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
+		{
+			PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; "
+				"extra options present in KM driver: (0x%x). Please check rgx_options.h",
+				__FUNCTION__,
+				ui32BuildOptions & ui32BuildOptionsMismatch ));
+			eError = PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+			goto chk_exit;
+		}
+		if(IMG_FALSE == bIsFirstConnection)
+		{
+			PVR_LOG(("%s: COMPAT_TEST: Client-side (0x%04x) (%s) and KM driver (0x%04x) (%s) build options differ.",
+																			__FUNCTION__,
+																			ui32ClientBuildOptions,
+																			(psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType)?"release":"debug",
+																			ui32BuildOptions,
+																			(psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType)?"release":"debug"));
+		}else{
+			PVR_DPF((PVR_DBG_WARNING, "%s: COMPAT_TEST: Client-side (0x%04x) and KM driver (0x%04x) build options differ.",
+																		__FUNCTION__,
+																		ui32ClientBuildOptions,
+																		ui32BuildOptions));
+
+		}
+		if(!psSRVData->sDriverInfo.bIsNoMatch)
+			psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: Client-side and KM driver build options match. [ OK ]", __FUNCTION__));
+	}
+
+	/*
+	 * Validate DDK version
+	 */
+	if (ui32ClientDDKVersion != ui32DDKVersion)
+	{
+		if(!psSRVData->sDriverInfo.bIsNoMatch)
+			psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE;
+		PVR_LOG(("(FAIL) %s: Incompatible driver DDK version (%u.%u) / client DDK version (%u.%u).",
+				__FUNCTION__,
+				PVRVERSION_MAJ, PVRVERSION_MIN,
+				PVRVERSION_UNPACK_MAJ(ui32ClientDDKVersion),
+				PVRVERSION_UNPACK_MIN(ui32ClientDDKVersion)));
+		eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH;
+		PVR_DBG_BREAK;
+		goto chk_exit;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK version (%u.%u) and client DDK version (%u.%u) match. [ OK ]",
+				__FUNCTION__,
+				PVRVERSION_MAJ, PVRVERSION_MIN, PVRVERSION_MAJ, PVRVERSION_MIN));
+	}
+
+	/* Create stream for every connection except for the special clients
+	 * that don't need it e.g.: recipients of HWPerf data. */
+	if (!(psConnection->ui32ClientFlags & SRV_NO_HWPERF_CLIENT_STREAM))
+	{
+		IMG_CHAR acStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE];
+		OSSNPrintf(acStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE,
+		           PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC,
+		           psDeviceNode->sDevId.i32UMIdentifier,
+		           psConnection->pid);
+
+		eError = TLStreamCreate(&psConnection->hClientTLStream, psDeviceNode,
+		                        acStreamName,
+		                        PVRSRV_CLIENT_TL_STREAM_SIZE_DEFAULT,
+		                        TL_OPMODE_DROP_NEWER |
+		                        TL_FLAG_ALLOCATE_ON_FIRST_OPEN,
+		                        NULL, NULL, NULL, NULL);
+		if (eError != PVRSRV_OK && eError != PVRSRV_ERROR_ALREADY_EXISTS)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Could not create private TL stream (%s)",
+					PVRSRVGetErrorStringKM(eError)));
+			psConnection->hClientTLStream = NULL;
+		}
+		else if (eError == PVRSRV_OK)
+		{
+			/* Set "tlctrl" stream as a notification channel. This channel is
+			 * is used to notify recipients about stream open/close (by writer)
+			 * actions (and possibly other actions in the future). */
+			eError = TLStreamSetNotifStream(psConnection->hClientTLStream,
+			                                psSRVData->hTLCtrlStream);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "Failed to set notification stream"));
+				TLStreamClose(psConnection->hClientTLStream);
+				psConnection->hClientTLStream = NULL;
+			}
+		}
+
+		/* Reset error status. Don't want to propagate any errors from here */
+		eError = PVRSRV_OK;
+		PVR_DPF((PVR_DBG_MESSAGE, "Created stream \"%s\".", acStreamName));
+	}
+
+	/*
+	 * Validate DDK build
+	 */
+	if (ui32ClientDDKBuild != ui32DDKBuild)
+	{
+		if(!psSRVData->sDriverInfo.bIsNoMatch)
+			psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE;
+		PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch in driver DDK revision (%d) / client DDK revision (%d).",
+				__FUNCTION__, ui32DDKBuild, ui32ClientDDKBuild));
+#if defined(PVRSRV_STRICT_COMPAT_CHECK)
+		eError = PVRSRV_ERROR_DDK_BUILD_MISMATCH;
+		PVR_DBG_BREAK;
+		goto chk_exit;
+#endif
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK revision (%d) and client DDK revision (%d) match. [ OK ]",
+				__FUNCTION__, ui32DDKBuild, ui32ClientDDKBuild));
+	}
+
+	/* Success so far so is it the PDump client that is connecting? */
+	if (ui32Flags & SRV_FLAGS_PDUMPCTRL)
+	{
+		PDumpConnectionNotify();
+	}
+
+	PVR_ASSERT(pui8KernelArch != NULL);
+
+	if (psSRVData->sDriverInfo.ui8KMBitArch & BUILD_ARCH_64BIT)
+	{
+		*pui8KernelArch = 64;
+	}
+	else
+	{
+		*pui8KernelArch = 32;
+	}
+
+	bIsFirstConnection = IMG_TRUE;
+
+#if defined(DEBUG_BRIDGE_KM)
+	{
+		int ii;
+
+		/* dump dispatch table offset lookup table */
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: g_BridgeDispatchTableStartOffsets[0-%lu] entries:", __FUNCTION__, BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT - 1));
+		for (ii=0; ii < BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT; ii++)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "g_BridgeDispatchTableStartOffsets[%d]: %u", ii, g_BridgeDispatchTableStartOffsets[ii][PVR_DISPATCH_OFFSET_FIRST_FUNC]));
+		}
+	}
+#endif
+
+chk_exit:
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVDisconnectKM(void)
+{
+	/* just return OK, per-process data is cleaned up by resmgr */
+
+	return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function       PVRSRVAcquireGlobalEventObjectKM
+@Description    Acquire the global event object.
+@Output         phGlobalEventObject    On success, points to the global event
+                                       object handle
+@Return         PVRSRV_ERROR           PVRSRV_OK on success or an error
+                                       otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVAcquireGlobalEventObjectKM(IMG_HANDLE *phGlobalEventObject)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	*phGlobalEventObject = psPVRSRVData->hGlobalEventObject;
+
+	return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function       PVRSRVReleaseGlobalEventObjectKM
+@Description    Release the global event object.
+@Output         hGlobalEventObject    Global event object handle
+@Return         PVRSRV_ERROR          PVRSRV_OK on success or an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVReleaseGlobalEventObjectKM(IMG_HANDLE hGlobalEventObject)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	PVR_ASSERT(psPVRSRVData->hGlobalEventObject == hGlobalEventObject);
+
+	return PVRSRV_OK;
+}
+
+/*
+	PVRSRVDumpDebugInfoKM
+*/
+PVRSRV_ERROR
+PVRSRVDumpDebugInfoKM(CONNECTION_DATA *psConnection,
+					  PVRSRV_DEVICE_NODE *psDeviceNode,
+					  IMG_UINT32 ui32VerbLevel)
+{
+	if (ui32VerbLevel > DEBUG_REQUEST_VERBOSITY_MAX)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	PVR_LOG(("User requested PVR debug info"));
+
+	PVRSRVDebugRequest(psDeviceNode, ui32VerbLevel, NULL, NULL);
+
+	return PVRSRV_OK;
+}
+
+/*
+	PVRSRVGetDevClockSpeedKM
+*/
+PVRSRV_ERROR
+PVRSRVGetDevClockSpeedKM(CONNECTION_DATA * psConnection,
+                         PVRSRV_DEVICE_NODE *psDeviceNode,
+						 IMG_PUINT32  pui32RGXClockSpeed)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVR_ASSERT(psDeviceNode->pfnDeviceClockSpeed != NULL);
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	eError = psDeviceNode->pfnDeviceClockSpeed(psDeviceNode, pui32RGXClockSpeed);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetDevClockSpeedKM: "
+				"Could not get device clock speed (%d)!",
+				eError));
+	}
+
+	return eError;
+}
+
+
+/*
+	PVRSRVHWOpTimeoutKM
+*/
+PVRSRV_ERROR
+PVRSRVHWOpTimeoutKM(CONNECTION_DATA *psConnection,
+					PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if defined(PVRSRV_RESET_ON_HWTIMEOUT)
+	PVR_LOG(("User requested OS reset"));
+	OSPanic();
+#endif
+	PVR_LOG(("HW operation timeout, dump server info"));
+	PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MEDIUM, NULL, NULL);
+	return PVRSRV_OK;
+}
+
+
+IMG_INT
+DummyBW(IMG_UINT32 ui32DispatchTableEntry,
+		void *psBridgeIn,
+		void *psBridgeOut,
+		CONNECTION_DATA *psConnection)
+{
+	PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+	PVR_UNREFERENCED_PARAMETER(psBridgeOut);
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if defined(DEBUG_BRIDGE_KM)
+	PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u (%s) mapped to "
+			 "Dummy Wrapper (probably not what you want!)",
+			 __FUNCTION__, ui32DispatchTableEntry, g_BridgeDispatchTable[ui32DispatchTableEntry].pszIOCName));
+#else
+	PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u mapped to "
+			 "Dummy Wrapper (probably not what you want!)",
+			 __FUNCTION__, ui32DispatchTableEntry));
+#endif
+	return PVRSRV_ERROR_BRIDGE_ENOTTY;
+}
+
+PVRSRV_ERROR PVRSRVAlignmentCheckKM(CONNECTION_DATA *psConnection,
+                                    PVRSRV_DEVICE_NODE *psDeviceNode,
+                                    IMG_UINT32 ui32AlignChecksSize,
+                                    IMG_UINT32 aui32AlignChecks[])
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(NO_HARDWARE) && defined(RGXFW_ALIGNCHECKS)
+
+	PVR_ASSERT(psDeviceNode->pfnAlignmentCheck != NULL);
+	return psDeviceNode->pfnAlignmentCheck(psDeviceNode, ui32AlignChecksSize,
+	                                       aui32AlignChecks);
+
+#else
+
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(ui32AlignChecksSize);
+	PVR_UNREFERENCED_PARAMETER(aui32AlignChecks);
+
+	return PVRSRV_OK;
+
+#endif /* !defined(NO_HARDWARE) */
+
+}
+
+PVRSRV_ERROR PVRSRVGetDeviceStatusKM(CONNECTION_DATA *psConnection,
+                                     PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     IMG_UINT32 *pui32DeviceStatus)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	/* First try to update the status. */
+	if (psDeviceNode->pfnUpdateHealthStatus != NULL)
+	{
+		PVRSRV_ERROR eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode,
+		                                                          IMG_FALSE);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetDeviceStatusKM: Failed to"
+					 "check for device status (%d)", eError));
+
+			/* Return unknown status and error because we don't know what
+			 * happened and if the status is valid. */
+			*pui32DeviceStatus = PVRSRV_DEVICE_STATUS_UNKNOWN;
+			return eError;
+		}
+	}
+
+	switch (OSAtomicRead(&psDeviceNode->eHealthStatus))
+	{
+		case PVRSRV_DEVICE_HEALTH_STATUS_OK:
+			*pui32DeviceStatus = PVRSRV_DEVICE_STATUS_OK;
+			return PVRSRV_OK;
+		case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING:
+			*pui32DeviceStatus = PVRSRV_DEVICE_STATUS_NOT_RESPONDING;
+			return PVRSRV_OK;
+		case PVRSRV_DEVICE_HEALTH_STATUS_DEAD:
+			*pui32DeviceStatus = PVRSRV_DEVICE_STATUS_DEVICE_ERROR;
+			return PVRSRV_OK;
+		default:
+			*pui32DeviceStatus = PVRSRV_DEVICE_STATUS_UNKNOWN;
+			return PVRSRV_ERROR_INTERNAL_ERROR;
+	}
+}
+
+/*!
+ * *****************************************************************************
+ * @brief A wrapper for filling in the g_BridgeDispatchTable array that does
+ * 		  error checking.
+ *
+ * @param ui32Index
+ * @param pszIOCName
+ * @param pfFunction
+ * @param pszFunctionName
+ *
+ * @return
+ ********************************************************************************/
+void
+_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup,
+					   IMG_UINT32 ui32Index,
+					   const IMG_CHAR *pszIOCName,
+					   BridgeWrapperFunction pfFunction,
+					   const IMG_CHAR *pszFunctionName,
+					   POS_LOCK hBridgeLock,
+					   const IMG_CHAR *pszBridgeLockName,
+					   IMG_BOOL bUseLock)
+{
+	static IMG_UINT32 ui32PrevIndex = IMG_UINT32_MAX;		/* -1 */
+
+#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM)
+	PVR_UNREFERENCED_PARAMETER(pszFunctionName);
+	PVR_UNREFERENCED_PARAMETER(pszBridgeLockName);
+#endif
+
+	ui32Index += g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC];
+
+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
+	/* Enable this to dump out the dispatch table entries */
+	PVR_DPF((PVR_DBG_WARNING, "%s: g_BridgeDispatchTableStartOffsets[%d]=%d", __FUNCTION__, ui32BridgeGroup, g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC]));
+	PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s %s", __FUNCTION__, ui32Index, pszIOCName, pszFunctionName, pszBridgeLockName));
+#endif
+
+	/* Any gaps are sub-optimal in-terms of memory usage, but we are mainly
+	 * interested in spotting any large gap of wasted memory that could be
+	 * accidentally introduced.
+	 *
+	 * This will currently flag up any gaps > 5 entries.
+	 *
+	 * NOTE: This shouldn't be debug only since switching from debug->release
+	 * etc is likely to modify the available ioctls and thus be a point where
+	 * mistakes are exposed. This isn't run at a performance critical time.
+	 */
+	if((ui32PrevIndex != IMG_UINT32_MAX) &&
+	   ((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) ||
+		(ui32Index <= ui32PrevIndex)))
+	{
+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s: There is a gap in the dispatch table between indices %u (%s) and %u (%s)",
+				 __FUNCTION__, ui32PrevIndex, g_BridgeDispatchTable[ui32PrevIndex].pszIOCName,
+				 ui32Index, pszIOCName));
+#else
+		PVR_DPF((PVR_DBG_MESSAGE,
+				 "%s: There is a gap in the dispatch table between indices %u and %u (%s)",
+				 __FUNCTION__, (IMG_UINT)ui32PrevIndex, (IMG_UINT)ui32Index, pszIOCName));
+#endif
+	}
+
+	if (ui32Index >= BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Index %u (%s) out of range",
+				 __FUNCTION__, (IMG_UINT)ui32Index, pszIOCName));
+
+#if defined(DEBUG_BRIDGE_KM)
+		PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE_DISPATCH_TABLE_ENTRY_COUNT = %lu",
+				 __FUNCTION__, BRIDGE_DISPATCH_TABLE_ENTRY_COUNT));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST = %lu\n",
+				 __FUNCTION__, PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST));
+
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_RGX_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_RGX_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST));
+#endif
+
+		OSPanic();
+	}
+
+	/* Panic if the previous entry has been overwritten as this is not allowed!
+	 * NOTE: This shouldn't be debug only since switching from debug->release
+	 * etc is likely to modify the available ioctls and thus be a point where
+	 * mistakes are exposed. This isn't run at a performance critical time.
+	 */
+	if(g_BridgeDispatchTable[ui32Index].pfFunction)
+	{
+		if(g_BridgeDispatchTable[ui32Index].pfFunction != pfFunction)
+		{
+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
+			PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Adding dispatch table entry for %s clobbers an existing entry for %s (current pfn=<%p>, new pfn=<%p>)",
+				 __FUNCTION__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName),
+				 (void*)g_BridgeDispatchTable[ui32Index].pfFunction, (void*)pfFunction));
+#else
+			PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Adding dispatch table entry for %s clobbers an existing entry (index=%u). (current pfn=<%p>, new pfn=<%p>)",
+				 __FUNCTION__, pszIOCName, ui32Index,
+				 (void*)g_BridgeDispatchTable[ui32Index].pfFunction, (void*)pfFunction));
+			PVR_DPF((PVR_DBG_WARNING, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue."));
+#endif
+			OSPanic();
+		}
+	}
+	else
+	{
+		g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction;
+		g_BridgeDispatchTable[ui32Index].hBridgeLock = hBridgeLock;
+		g_BridgeDispatchTable[ui32Index].bUseLock = bUseLock;
+#if defined(DEBUG_BRIDGE_KM)
+		g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName;
+		g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName;
+		g_BridgeDispatchTable[ui32Index].pszBridgeLockName = pszBridgeLockName;
+		g_BridgeDispatchTable[ui32Index].ui32CallCount = 0;
+		g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0;
+		g_BridgeDispatchTable[ui32Index].ui64TotalTimeNS = 0;
+		g_BridgeDispatchTable[ui32Index].ui64MaxTimeNS = 0;
+#endif
+	}
+
+	ui32PrevIndex = ui32Index;
+}
+
+
+PVRSRV_ERROR BridgeInit(void)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+#if defined(DEBUG_BRIDGE_KM)
+	eError = OSLockCreate(&g_hStatsLock, LOCK_TYPE_PASSIVE);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to create bridge stats lock"));
+		return eError;
+	}
+#endif
+
+	return eError;
+}
+
+void BridgeDeinit(void)
+{
+#if defined(DEBUG_BRIDGE_KM)
+	if(g_hStatsLock)
+	{
+		OSLockDestroy(g_hStatsLock);
+		g_hStatsLock = NULL;
+	}
+#endif
+}
+
+
+PVRSRV_ERROR BridgedDispatchKM(CONNECTION_DATA * psConnection,
+                          PVRSRV_BRIDGE_PACKAGE   * psBridgePackageKM)
+{
+
+	void       * psBridgeIn=NULL;
+	void       * psBridgeOut=NULL;
+	BridgeWrapperFunction pfBridgeHandler;
+	IMG_UINT32   ui32DispatchTableEntry, ui32GroupBoundary;
+	PVRSRV_ERROR err = PVRSRV_OK;
+	PVRSRV_POOL_TOKEN hBridgeBufferPoolToken;
+	IMG_UINT32 ui32Timestamp = OSClockus();
+#if defined(DEBUG_BRIDGE_KM)
+	IMG_UINT64	ui64TimeStart;
+	IMG_UINT64	ui64TimeEnd;
+	IMG_UINT64	ui64TimeDiff;
+#endif
+#if !defined(INTEGRITY_OS)
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+#endif
+
+#if defined(DEBUG_BRIDGE_KM_STOP_AT_DISPATCH)
+	PVR_DBG_BREAK;
+#endif
+
+	err = PVRSRVDriverThreadEnter();
+
+	if(err != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVDriverThreadEnter failed: %s",
+									__func__,
+									PVRSRVGetErrorStringKM(err)));
+		return err;
+	}
+
+	if(BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT <= psBridgePackageKM->ui32BridgeID)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Out of range dispatch table group ID: %d",
+		        __FUNCTION__, psBridgePackageKM->ui32BridgeID));
+		err = PVRSRV_ERROR_BRIDGE_EINVAL;
+		goto return_error;
+	}
+	ui32DispatchTableEntry = g_BridgeDispatchTableStartOffsets[psBridgePackageKM->ui32BridgeID][PVR_DISPATCH_OFFSET_FIRST_FUNC];
+	ui32GroupBoundary = g_BridgeDispatchTableStartOffsets[psBridgePackageKM->ui32BridgeID][PVR_DISPATCH_OFFSET_LAST_FUNC];
+
+	/* bridge function is not implemented in this build */
+	if(0 == ui32DispatchTableEntry)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Dispatch table entry=%d, boundary = %d, (bridge module %d, function %d)",
+					__FUNCTION__,
+					ui32DispatchTableEntry,ui32GroupBoundary, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID));
+		/* this points to DummyBW() which returns PVRSRV_ERROR_ENOTTY */
+		err = g_BridgeDispatchTable[ui32DispatchTableEntry].pfFunction(ui32DispatchTableEntry,
+				  psBridgeIn,
+				  psBridgeOut,
+				  psConnection);
+		goto return_error;
+	}
+	else
+	{
+		ui32DispatchTableEntry +=  psBridgePackageKM->ui32FunctionID;
+	}
+	if(ui32DispatchTableEntry > ui32GroupBoundary)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Dispatch table entry=%d, boundary = %d, (bridge module %d, function %d)",
+					__FUNCTION__,
+					ui32DispatchTableEntry,ui32GroupBoundary, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID));
+		err = PVRSRV_ERROR_BRIDGE_EINVAL;
+		goto return_error;
+	}
+	if(BRIDGE_DISPATCH_TABLE_ENTRY_COUNT <= ui32DispatchTableEntry)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Dispatch table entry=%d, entry count = %lu,"
+		        " (bridge module %d, function %d)", __FUNCTION__,
+		        ui32DispatchTableEntry, BRIDGE_DISPATCH_TABLE_ENTRY_COUNT,
+		        psBridgePackageKM->ui32BridgeID,
+		        psBridgePackageKM->ui32FunctionID));
+		err = PVRSRV_ERROR_BRIDGE_EINVAL;
+		goto return_error;
+	}
+#if defined(DEBUG_BRIDGE_KM)
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: Dispatch table entry=%d, (bridge module %d, function %d)",
+			__FUNCTION__,
+			ui32DispatchTableEntry, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID));
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: %s",
+			 __FUNCTION__,
+			 g_BridgeDispatchTable[ui32DispatchTableEntry].pszIOCName));
+	g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CallCount++;
+	g_BridgeGlobalStats.ui32IOCTLCount++;
+#endif
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	if (g_BridgeDispatchTable[ui32DispatchTableEntry].hBridgeLock == NULL &&
+	    g_BridgeDispatchTable[ui32DispatchTableEntry].bUseLock)
+	{
+		/* Acquire default global bridge lock if calling module has no independent lock */
+		OSAcquireBridgeLock();
+
+		/* Request for global bridge buffers */
+		OSGetGlobalBridgeBuffers(&psBridgeIn,
+		                         &psBridgeOut);
+	}
+	else
+#endif /* PVRSRV_USE_BRIDGE_LOCK */
+	{
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+		if (g_BridgeDispatchTable[ui32DispatchTableEntry].hBridgeLock != NULL &&
+		    g_BridgeDispatchTable[ui32DispatchTableEntry].bUseLock)
+		{
+			OSLockAcquire(g_BridgeDispatchTable[ui32DispatchTableEntry].hBridgeLock);
+		}
+#endif
+#if !defined(INTEGRITY_OS)
+		/* try to acquire a bridge buffer from the pool */
+
+		err = PVRSRVPoolGet(psPVRSRVData->psBridgeBufferPool,
+								&hBridgeBufferPoolToken,
+								&psBridgeIn);
+
+		if(err != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to get bridge buffer from global pool"));
+			goto unlock_and_return_error;
+		}
+
+		psBridgeOut = ((IMG_BYTE *) psBridgeIn) + PVRSRV_MAX_BRIDGE_IN_SIZE;
+#endif
+	}
+
+#if defined(DEBUG_BRIDGE_KM)
+	ui64TimeStart = OSClockns64();
+#endif
+
+	if (psBridgePackageKM->ui32InBufferSize > PVRSRV_MAX_BRIDGE_IN_SIZE)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Bridge input buffer too small "
+		        "(data size %u, buffer size %u)!", __FUNCTION__,
+		        psBridgePackageKM->ui32InBufferSize, PVRSRV_MAX_BRIDGE_IN_SIZE));
+		err = PVRSRV_ERROR_BRIDGE_ERANGE;
+		goto unlock_and_return_error;
+	}
+
+#if !defined(INTEGRITY_OS)
+	if (psBridgePackageKM->ui32OutBufferSize > PVRSRV_MAX_BRIDGE_OUT_SIZE)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Bridge output buffer too small "
+		        "(data size %u, buffer size %u)!", __FUNCTION__,
+		        psBridgePackageKM->ui32OutBufferSize, PVRSRV_MAX_BRIDGE_OUT_SIZE));
+		err = PVRSRV_ERROR_BRIDGE_ERANGE;
+		goto unlock_and_return_error;
+	}
+
+	if((CopyFromUserWrapper (psConnection,
+					ui32DispatchTableEntry,
+					psBridgeIn,
+					psBridgePackageKM->pvParamIn,
+					psBridgePackageKM->ui32InBufferSize) != PVRSRV_OK)
+#if defined __QNXNTO__
+/* For Neutrino, the output bridge buffer acts as an input as well */
+					|| (CopyFromUserWrapper(psConnection,
+											ui32DispatchTableEntry,
+											psBridgeOut,
+											(void *)((uintptr_t)psBridgePackageKM->pvParamIn + psBridgePackageKM->ui32InBufferSize),
+											psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK)
+#endif
+		) /* end of if-condition */
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: CopyFromUserWrapper returned an error!", __FUNCTION__));
+		err = PVRSRV_ERROR_BRIDGE_EFAULT;
+		goto unlock_and_return_error;
+	}
+#else
+	psBridgeIn = psBridgePackageKM->pvParamIn;
+	psBridgeOut = psBridgePackageKM->pvParamOut;
+#endif
+
+	pfBridgeHandler =
+		(BridgeWrapperFunction)g_BridgeDispatchTable[ui32DispatchTableEntry].pfFunction;
+
+	if (pfBridgeHandler == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: ui32DispatchTableEntry = %d is not a registered function!",
+				 __FUNCTION__, ui32DispatchTableEntry));
+		err = PVRSRV_ERROR_BRIDGE_EFAULT;
+		goto unlock_and_return_error;
+	}
+
+	/* pfBridgeHandler functions do not fail and return an IMG_INT.
+	 * The value returned is either 0 or PVRSRV_OK (0).
+	 * In the event this changes an error may be +ve or -ve,
+	 * so try to return something consistent here.
+	 */
+	if (0 != pfBridgeHandler(ui32DispatchTableEntry,
+						  psBridgeIn,
+						  psBridgeOut,
+						  psConnection)
+		)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: pfBridgeHandler returned an error", __FUNCTION__));
+		err = PVRSRV_ERROR_BRIDGE_EPERM;
+		goto unlock_and_return_error;
+	}
+
+	/*
+	   This should always be true as a.t.m. all bridge calls have to
+	   return an error message, but this could change so we do this
+	   check to be safe.
+	*/
+	if (psBridgePackageKM->ui32OutBufferSize > 0)
+	{
+#if !defined(INTEGRITY_OS)
+		if (CopyToUserWrapper (psConnection,
+						ui32DispatchTableEntry,
+						psBridgePackageKM->pvParamOut,
+						psBridgeOut,
+						psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK)
+		{
+			err = PVRSRV_ERROR_BRIDGE_EFAULT;
+			goto unlock_and_return_error;
+		}
+#endif
+	}
+
+#if defined(DEBUG_BRIDGE_KM)
+	ui64TimeEnd = OSClockns64();
+
+	ui64TimeDiff = ui64TimeEnd - ui64TimeStart;
+
+	/* if there is no lock held then acquire the stats lock to
+	 * ensure the calculations are done safely
+	 */
+	if(!g_BridgeDispatchTable[ui32DispatchTableEntry].bUseLock)
+	{
+		OSLockAcquire(g_hStatsLock);
+	}
+
+	g_BridgeDispatchTable[ui32DispatchTableEntry].ui64TotalTimeNS += ui64TimeDiff;
+
+	if(ui64TimeDiff > g_BridgeDispatchTable[ui32DispatchTableEntry].ui64MaxTimeNS)
+	{
+		g_BridgeDispatchTable[ui32DispatchTableEntry].ui64MaxTimeNS = ui64TimeDiff;
+	}
+
+	if(!g_BridgeDispatchTable[ui32DispatchTableEntry].bUseLock)
+	{
+		OSLockRelease(g_hStatsLock);
+	}
+#endif
+
+unlock_and_return_error:
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	if (g_BridgeDispatchTable[ui32DispatchTableEntry].hBridgeLock == NULL &&
+	    g_BridgeDispatchTable[ui32DispatchTableEntry].bUseLock)
+	{
+		OSReleaseBridgeLock();
+	}
+	else
+#endif
+	{
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+		if (g_BridgeDispatchTable[ui32DispatchTableEntry].hBridgeLock != NULL &&
+		    g_BridgeDispatchTable[ui32DispatchTableEntry].bUseLock)
+		{
+			OSLockRelease(g_BridgeDispatchTable[ui32DispatchTableEntry].hBridgeLock);
+		}
+#endif
+
+#if !defined(INTEGRITY_OS)
+		err = PVRSRVPoolPut(psPVRSRVData->psBridgeBufferPool,
+								hBridgeBufferPoolToken);
+
+		if(err != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to return bridge buffer to global pool"));
+		}
+#endif
+	}
+
+return_error:
+	if (err)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: returning (err = %d)", __FUNCTION__, err));
+	}
+	/* ignore transport layer bridge to avoid HTB flooding */
+	if (psBridgePackageKM->ui32BridgeID != PVRSRV_BRIDGE_PVRTL)
+	{
+		if (err)
+		{
+			HTBLOGK(HTB_SF_BRG_BRIDGE_CALL_ERR, ui32Timestamp,
+			        psBridgePackageKM->ui32BridgeID,
+			        psBridgePackageKM->ui32FunctionID, err);
+		}
+		else
+		{
+			HTBLOGK(HTB_SF_BRG_BRIDGE_CALL, ui32Timestamp,
+			        psBridgePackageKM->ui32BridgeID,
+			        psBridgePackageKM->ui32FunctionID);
+		}
+	}
+
+	PVRSRVDriverThreadExit();
+	return err;
+}
+
+PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemStatArray)
+{
+#if !defined(__QNXNTO__)
+	return PVRSRVFindProcessMemStats(pid,
+					ui32ArrSize,
+					bAllProcessStats,
+					pui32MemStatArray);
+#else
+	PVR_DPF((PVR_DBG_ERROR, "This functionality is not yet implemented for this platform"));
+
+	return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/sync_checkpoint.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/sync_checkpoint.c
new file mode 100644
index 0000000..0436132
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/sync_checkpoint.c
@@ -0,0 +1,2529 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services synchronisation checkpoint interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements server side code for services synchronisation
+	            interface
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "osfunc.h"
+#include "dllist.h"
+#include "sync.h"
+#include "sync_checkpoint_external.h"
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+#include "sync_checkpoint_init.h"
+#include "lock.h"
+#include "log2.h"
+#include "pvrsrv.h"
+#include "pdump_km.h"
+
+#include "pvrsrv_sync_km.h"
+#include "rgxhwperf.h"
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+/* Enable this to turn on debug relating to the creation and
+   resolution of contexts */
+#define ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG 0
+
+/* Enable this to turn on debug relating to the creation and
+   resolution of fences */
+#define ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG 0
+
+/* Enable this to turn on debug relating to the sync checkpoint
+   allocation and freeing */
+#define ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG 0
+
+/* Enable this to turn on debug relating to the sync checkpoint
+   enqueuing and signalling */
+#define ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG 0
+
+/* Enable this to turn on debug relating to the sync checkpoint pool */
+#define ENABLE_SYNC_CHECKPOINT_POOL_DEBUG 0
+
+/* Enable this to turn on debug relating to sync checkpoint UFO
+   lookup */
+#define ENABLE_SYNC_CHECKPOINT_UFO_DEBUG 0
+
+/* Enable this to turn on sync checkpoint deferred cleanup debug
+ * (for syncs we have been told to free but which have some
+ * outstanding FW operations remaining (enqueued in CCBs)
+ */
+#define ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG 0
+
+#else
+
+#define ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_POOL_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_UFO_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG 0
+
+#endif
+
+/* We may need to temporarily disable the posting of HWPERF Host events,
+ * as the host could be holding a spin lock, and RGX_HWPERF_HOST_UFO
+ * calls a function which takes a mutex
+ */
+#define ENABLE_SYNC_CHECKPOINT_HWPERF_HOST_EVENTS 0
+
+/* Set the size of the sync checkpoint pool (not used if 0).
+ * A pool will be maintained for each sync checkpoint context.
+ */
+#define SYNC_CHECKPOINT_POOL_SIZE	128
+
+#define SYNC_CHECKPOINT_BLOCK_LIST_CHUNK_SIZE  10
+#define LOCAL_SYNC_CHECKPOINT_RESET_VALUE      PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED
+
+/*
+	This defines the maximum amount of synchronisation memory
+	that can be allocated per sync checkpoint context.
+	In reality this number is meaningless as we would run out
+	of synchronisation memory before we reach this limit, but
+	we need to provide a size to the span RA.
+*/
+#define MAX_SYNC_CHECKPOINT_MEM  (4 * 1024 * 1024)
+
+typedef struct _SYNC_CHECKPOINT_BLOCK_LIST_
+{
+	IMG_UINT32            ui32BlockCount;            /*!< Number of contexts in the list */
+	IMG_UINT32            ui32BlockListSize;         /*!< Size of the array contexts */
+	SYNC_CHECKPOINT_BLOCK **papsSyncCheckpointBlock; /*!< Array of sync checkpoint blocks */
+} SYNC_CHECKPOINT_BLOCK_LIST;
+
+typedef struct _SYNC_CHECKPOINT_CONTEXT_CTL_
+{
+	SHARED_DEV_CONNECTION					psDeviceNode;
+	PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN	pfnFenceResolve;
+	PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN		pfnFenceCreate;
+	/*
+	 *  Used as head of linked-list of sync checkpoints for which
+	 *  SyncCheckpointFree() has been called, but have outstanding
+	 *  FW operations (enqueued in CCBs)
+	 *  This list will be check whenever a SyncCheckpointFree() is
+	 *  called, and when SyncCheckpointContextDestroy() is called.
+	 */
+	DLLIST_NODE								sDeferredCleanupListHead;
+	/* Lock to protect the deferred cleanup list */
+	POS_LOCK								hDeferredCleanupListLock;
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+	_SYNC_CHECKPOINT						*psSyncCheckpointPool[SYNC_CHECKPOINT_POOL_SIZE];
+	IMG_BOOL								bSyncCheckpointPoolFull;
+	IMG_BOOL								bSyncCheckpointPoolValid;
+	IMG_UINT32								ui32SyncCheckpointPoolCount;
+	IMG_UINT32								ui32SyncCheckpointPoolWp;
+	IMG_UINT32								ui32SyncCheckpointPoolRp;
+	POS_LOCK								hSyncCheckpointPoolLock;
+#endif
+} _SYNC_CHECKPOINT_CONTEXT_CTL;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+
+/* this is the max number of sync checkpoint records we will search or dump
+ * at any time.
+ */
+#define SYNC_CHECKPOINT_RECORD_LIMIT 20000
+
+#define DECREMENT_WITH_WRAP(value, sz) ((value) ? ((value) - 1) : ((sz) - 1))
+
+struct SYNC_CHECKPOINT_RECORD
+{
+	PVRSRV_DEVICE_NODE		*psDevNode;
+	SYNC_CHECKPOINT_BLOCK	*psSyncCheckpointBlock;	/*!< handle to SYNC_CHECKPOINT_BLOCK */
+	IMG_UINT32				ui32SyncOffset; 		/*!< offset to sync in block */
+	IMG_UINT32				ui32FwBlockAddr;
+	IMG_PID					uiPID;
+	IMG_UINT32				ui32UID;
+	IMG_UINT64				ui64OSTime;
+	DLLIST_NODE				sNode;
+	IMG_CHAR				szClassName[SYNC_MAX_CLASS_NAME_LEN];
+} SYNC_CHECKPOINT_RECORD;
+#endif /* defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+static IMG_BOOL gbSyncCheckpointInit = IMG_FALSE;
+static PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN g_pfnFenceResolve = NULL;
+static PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN g_pfnFenceCreate = NULL;
+static PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN g_pfnFenceDataRollback = NULL;
+static PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN g_pfnFenceFinalise = NULL;
+static PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN g_pfnNoHWUpdateTimelines = NULL;
+static PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN g_pfnFreeChkptListMem = NULL;
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+static _SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext);
+static IMG_BOOL _PutCheckpointInPool(_SYNC_CHECKPOINT *psSyncCheckpoint);
+static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext);
+#endif
+
+/* Defined values to indicate status of sync checkpoint, which is
+ * stored in the memory of the structure */
+#define SYNC_CHECKPOINT_PATTERN_IN_USE 0x1a1aa
+#define SYNC_CHECKPOINT_PATTERN_IN_POOL 0x2b2bb
+#define SYNC_CHECKPOINT_PATTERN_FREED 0x3c3cc
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+PVRSRV_ERROR
+SyncCheckpointRecordAdd(PSYNC_CHECKPOINT_RECORD_HANDLE *phRecord,
+	                    SYNC_CHECKPOINT_BLOCK *hSyncCheckpointBlock,
+	                    IMG_UINT32 ui32FwBlockAddr,
+	                    IMG_UINT32 ui32SyncOffset,
+	                    IMG_UINT32 ui32UID,
+	                    IMG_UINT32 ui32ClassNameSize,
+	                    const IMG_CHAR *pszClassName);
+PVRSRV_ERROR
+SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord);
+static void _SyncCheckpointState(PDLLIST_NODE psNode,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile);
+static void _SyncCheckpointDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+					IMG_UINT32 ui32VerbLevel,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile);
+static PVRSRV_ERROR _SyncCheckpointRecordListInit(PVRSRV_DEVICE_NODE *psDevNode);
+static void _SyncCheckpointRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode);
+#endif
+
+PVRSRV_ERROR SyncCheckpointSignalPDump(_SYNC_CHECKPOINT *psSyncCheckpoint);
+PVRSRV_ERROR SyncCheckpointErrorPDump(_SYNC_CHECKPOINT *psSyncCheckpoint);
+
+
+
+/* Unique incremental ID assigned to sync checkpoints when allocated */
+static IMG_UINT32 g_SyncCheckpointUID = 0;
+
+static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext);
+
+/*
+	Internal interfaces for management of _SYNC_CHECKPOINT_CONTEXT
+*/
+static void
+_SyncCheckpointContextUnref(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+	if (!OSAtomicRead(&psContext->hRefCount))
+	{
+		PVR_LOG_ERROR(PVRSRV_ERROR_INVALID_CONTEXT,
+		              "_SyncCheckpointContextUnref context already freed");
+	}
+	else if (0 == OSAtomicDecrement(&psContext->hRefCount))
+	{
+		/* SyncCheckpointContextDestroy only when no longer referenced */
+		OSLockDestroy(psContext->psContextCtl->hDeferredCleanupListLock);
+		psContext->psContextCtl->hDeferredCleanupListLock = NULL;
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+		if (psContext->psContextCtl->ui32SyncCheckpointPoolCount)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s called for context<%p> with %d sync checkpoints still in the pool",
+					__FUNCTION__,
+					(void*)psContext,
+					psContext->psContextCtl->ui32SyncCheckpointPoolCount));
+		}
+		psContext->psContextCtl->bSyncCheckpointPoolValid = IMG_FALSE;
+		OSLockDestroy(psContext->psContextCtl->hSyncCheckpointPoolLock);
+		psContext->psContextCtl->hSyncCheckpointPoolLock = NULL;
+#endif
+		OSFreeMem(psContext->psContextCtl);
+		RA_Delete(psContext->psSpanRA);
+		RA_Delete(psContext->psSubAllocRA);
+		OSLockDestroy(psContext->hLock);
+		psContext->hLock = NULL;
+		OSFreeMem(psContext);
+	}
+}
+
+static void
+_SyncCheckpointContextRef(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+	if (!OSAtomicRead(&psContext->hRefCount))
+	{
+		PVR_LOG_ERROR(PVRSRV_ERROR_INVALID_CONTEXT,
+		              "_SyncCheckpointContextRef context use after free");
+	}
+	else
+	{
+		OSAtomicIncrement(&psContext->hRefCount);
+	}
+}
+
+/*
+	Internal interfaces for management of synchronisation block memory
+*/
+static PVRSRV_ERROR
+_AllocSyncCheckpointBlock(_SYNC_CHECKPOINT_CONTEXT *psContext,
+						  SYNC_CHECKPOINT_BLOCK    **ppsSyncBlock)
+{
+	PVRSRV_DEVICE_NODE *psDevNode;
+	SYNC_CHECKPOINT_BLOCK *psSyncBlk;
+	PVRSRV_ERROR eError;
+
+	psSyncBlk = OSAllocMem(sizeof(*psSyncBlk));
+	PVR_LOGG_IF_NOMEM(psSyncBlk, "OSAllocMem", eError, fail_alloc);
+
+	psSyncBlk->psContext = psContext;
+
+	/* Allocate sync checkpoint block */
+	psDevNode = psContext->psDevNode;
+	if (!psDevNode)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		PVR_LOG_ERROR(eError, "context device node invalid");
+		goto fail_alloc_ufo_block;
+	}
+	psSyncBlk->psDevNode = psDevNode;
+
+	eError = psDevNode->pfnAllocUFOBlock(psDevNode,
+											 &psSyncBlk->hMemDesc,
+											 &psSyncBlk->ui32FirmwareAddr,
+											 &psSyncBlk->ui32SyncBlockSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOG_ERROR(eError, "failed to allocate ufo block");
+		goto fail_alloc_ufo_block;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psSyncBlk->hMemDesc,
+									  (void **) &psSyncBlk->pui32LinAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOG_ERROR(eError, "DevmemAcquireCpuVirtAddr");
+		goto fail_devmem_acquire;
+	}
+
+	OSAtomicWrite(&psSyncBlk->hRefCount, 1);
+
+	OSLockCreate(&psSyncBlk->hLock, LOCK_TYPE_NONE);
+
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+						  "Allocated Sync Checkpoint UFO block (FirmwareVAddr = 0x%08x)",
+						  psSyncBlk->ui32FirmwareAddr);
+
+	*ppsSyncBlock = psSyncBlk;
+	return PVRSRV_OK;
+
+fail_devmem_acquire:
+	psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->hMemDesc);
+fail_alloc_ufo_block:
+	OSFreeMem(psSyncBlk);
+fail_alloc:
+	return eError;
+}
+
+static void
+_FreeSyncCheckpointBlock(SYNC_CHECKPOINT_BLOCK *psSyncBlk)
+{
+	OSLockAcquire(psSyncBlk->hLock);
+	if (0 == OSAtomicDecrement(&psSyncBlk->hRefCount))
+	{
+		PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode;
+
+		DevmemReleaseCpuVirtAddr(psSyncBlk->hMemDesc);
+		psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->hMemDesc);
+		OSLockRelease(psSyncBlk->hLock);
+		OSLockDestroy(psSyncBlk->hLock);
+		psSyncBlk->hLock = NULL;
+		OSFreeMem(psSyncBlk);
+	}
+	else
+	{
+		OSLockRelease(psSyncBlk->hLock);
+	}
+}
+
+static PVRSRV_ERROR
+_SyncCheckpointBlockImport(RA_PERARENA_HANDLE hArena,
+                           RA_LENGTH_T uSize,
+                           RA_FLAGS_T uFlags,
+                           const IMG_CHAR *pszAnnotation,
+                           RA_BASE_T *puiBase,
+                           RA_LENGTH_T *puiActualSize,
+                           RA_PERISPAN_HANDLE *phImport)
+{
+	_SYNC_CHECKPOINT_CONTEXT *psContext = hArena;
+	SYNC_CHECKPOINT_BLOCK *psSyncBlock = NULL;
+	RA_LENGTH_T uiSpanSize;
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(uFlags);
+
+	PVR_LOG_IF_FALSE((hArena != NULL), "hArena is NULL");
+
+	/* Check we've not be called with an unexpected size */
+	PVR_LOG_IF_FALSE((uSize == sizeof(_SYNC_CHECKPOINT_FW_OBJ)),
+	                 "uiSize is not the size of _SYNC_CHECKPOINT_FW_OBJ");
+
+	/*
+		Ensure the sync checkpoint context doesn't go away while we have sync blocks
+		attached to it
+	*/
+	_SyncCheckpointContextRef(psContext);
+
+	/* Allocate the block of memory */
+	eError = _AllocSyncCheckpointBlock(psContext, &psSyncBlock);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_syncblockalloc;
+	}
+
+	/* Allocate a span for it */
+	eError = RA_Alloc(psContext->psSpanRA,
+					psSyncBlock->ui32SyncBlockSize,
+					RA_NO_IMPORT_MULTIPLIER,
+					0,
+					psSyncBlock->ui32SyncBlockSize,
+					pszAnnotation,
+					&psSyncBlock->uiSpanBase,
+					&uiSpanSize,
+					NULL);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_spanalloc;
+	}
+
+	/*
+		There is no reason the span RA should return an allocation larger
+		then we request
+	*/
+	PVR_LOG_IF_FALSE((uiSpanSize == psSyncBlock->ui32SyncBlockSize),
+	                 "uiSpanSize invalid");
+
+	*puiBase = psSyncBlock->uiSpanBase;
+	*puiActualSize = psSyncBlock->ui32SyncBlockSize;
+	*phImport = psSyncBlock;
+	return PVRSRV_OK;
+
+fail_spanalloc:
+	_FreeSyncCheckpointBlock(psSyncBlock);
+fail_syncblockalloc:
+	_SyncCheckpointContextUnref(psContext);
+
+	return eError;
+}
+
+static void
+_SyncCheckpointBlockUnimport(RA_PERARENA_HANDLE hArena,
+                             RA_BASE_T uiBase,
+                             RA_PERISPAN_HANDLE hImport)
+{
+	_SYNC_CHECKPOINT_CONTEXT *psContext = hArena;
+	SYNC_CHECKPOINT_BLOCK   *psSyncBlock = hImport;
+
+	PVR_LOG_IF_FALSE((psContext != NULL), "hArena invalid");
+	PVR_LOG_IF_FALSE((psSyncBlock != NULL), "hImport invalid");
+	PVR_LOG_IF_FALSE((uiBase == psSyncBlock->uiSpanBase), "uiBase invalid");
+
+	/* Free the span this import is using */
+	RA_Free(psContext->psSpanRA, uiBase);
+
+	/* Free the sync checkpoint block */
+	_FreeSyncCheckpointBlock(psSyncBlock);
+
+	/*	Drop our reference to the sync checkpoint context */
+	_SyncCheckpointContextUnref(psContext);
+}
+
+static INLINE IMG_UINT32 _SyncCheckpointGetOffset(_SYNC_CHECKPOINT *psSyncInt)
+{
+	IMG_UINT64 ui64Temp;
+	
+	ui64Temp =  psSyncInt->uiSpanAddr - psSyncInt->psSyncCheckpointBlock->uiSpanBase;
+	PVR_ASSERT(ui64Temp<IMG_UINT32_MAX);
+	return (IMG_UINT32)ui64Temp;
+}
+
+/* Used by SyncCheckpointContextCreate() below */
+static INLINE IMG_UINT32 _Log2(IMG_UINT32 ui32Align)
+{
+	PVR_ASSERT(IsPower2(ui32Align));
+	return ExactLog2(ui32Align);
+}
+
+/*
+	External interfaces
+*/
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve,
+                                PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate,
+                                PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback,
+                                PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise,
+                                PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines,
+                                PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	g_pfnFenceResolve = pfnFenceResolve;
+	g_pfnFenceCreate = pfnFenceCreate;
+	g_pfnFenceDataRollback = pfnFenceDataRollback;
+	g_pfnFenceFinalise = pfnFenceFinalise;
+	g_pfnNoHWUpdateTimelines = pfnNoHWUpdateTimelines;
+	g_pfnFreeChkptListMem = pfnFreeCheckpointListMem;
+
+	return eError;
+}
+IMG_INTERNAL PVRSRV_ERROR
+SyncCheckpointResolveFence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                           PVRSRV_FENCE hFence, IMG_UINT32 *pui32NumSyncCheckpoints,
+                           PSYNC_CHECKPOINT **papsSyncCheckpoints,
+                           IMG_UINT32 *pui32FenceUID)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!g_pfnFenceResolve)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)",
+				__FUNCTION__));
+		eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+		PVR_LOG_ERROR(eError, "g_pfnFenceResolve is NULL");
+	}
+	else
+	{
+		if (papsSyncCheckpoints)
+		{
+			eError = g_pfnFenceResolve(psSyncCheckpointContext,
+			                           hFence,
+			                           pui32NumSyncCheckpoints,
+			                           papsSyncCheckpoints,
+			                           pui32FenceUID);
+		}
+		else
+		{
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+		}
+	}
+	if (eError == PVRSRV_OK)
+	{
+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1)
+		IMG_UINT32 ii;
+
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: g_pfnFenceResolve() for fence %d returned the following %d checkpoints:",
+				__FUNCTION__,
+				hFence,
+				*pui32NumSyncCheckpoints));
+
+		for (ii=0; ii<*pui32NumSyncCheckpoints; ii++)
+		{
+			PSYNC_CHECKPOINT psNextCheckpoint = *(*papsSyncCheckpoints +  ii);
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s:   *papsSyncCheckpoints[%d]:<%p>",
+					__FUNCTION__,
+					ii,
+					(void*)psNextCheckpoint));
+		}
+#endif
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: ERROR g_pfnFenceResolve() returned %s",
+				__FUNCTION__,
+				PVRSRVGetErrorStringKM(eError)));
+	}
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncCheckpointCreateFence(PVRSRV_DEVICE_NODE *psDevNode,
+                          const IMG_CHAR *pszFenceName,
+                          PVRSRV_TIMELINE hTimeline,
+                          PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                          PVRSRV_FENCE *phNewFence,
+                          IMG_UINT32 *puiUpdateFenceUID,
+                          void **ppvFenceFinaliseData,
+                          PSYNC_CHECKPOINT *psNewSyncCheckpoint,
+                          void **ppvTimelineUpdateSyncPrim,
+                          IMG_UINT32 *pui32TimelineUpdateValue)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+	if (!g_pfnFenceCreate)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)",
+				__FUNCTION__));
+		eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+		PVR_LOG_ERROR(eError, "g_pfnFenceCreate is NULL");
+	}
+	else
+	{
+		eError = g_pfnFenceCreate(pszFenceName,
+		                          hTimeline,
+		                          psSyncCheckpointContext,
+		                          phNewFence,
+		                          puiUpdateFenceUID,
+		                          ppvFenceFinaliseData,
+		                          psNewSyncCheckpoint,
+		                          ppvTimelineUpdateSyncPrim,
+		                          pui32TimelineUpdateValue);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s failed to create new fence<%p> for timeline<%d> using "
+					"sync checkpoint context<%p>, psNewSyncCheckpoint=<%p>, eError=%s",
+			         __FUNCTION__,
+			         (void*)phNewFence,
+			         hTimeline,
+			         (void*)psSyncCheckpointContext,
+			         (void*)psNewSyncCheckpoint,
+			         PVRSRVGetErrorStringKM(eError)));
+		}
+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1)
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s created new fence<%d> for timeline<%d> using "
+					"sync checkpoint context<%p>, new sync_checkpoint=<%p>",
+			         __FUNCTION__,
+			         *phNewFence,
+			         hTimeline,
+			         (void*)psSyncCheckpointContext,
+			         (void*)*psNewSyncCheckpoint));
+		}
+#endif
+	}
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncCheckpointRollbackFenceData(PVRSRV_FENCE hFence, void *pvFinaliseData)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!g_pfnFenceDataRollback)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)",
+				__FUNCTION__));
+		eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+		PVR_LOG_ERROR(eError, "g_pfnFenceDataRollback is NULL");
+	}
+	else
+	{
+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1)
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: called to rollback fence data <%p>",
+				__FUNCTION__,
+				pvFinaliseData));
+#endif
+		eError = g_pfnFenceDataRollback(hFence, pvFinaliseData);
+		PVR_LOG_IF_ERROR(eError, "g_pfnFenceDataRollback returned error");
+	}
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncCheckpointFinaliseFence(PVRSRV_FENCE hFence, void *pvFinaliseData)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!g_pfnFenceFinalise)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+				"%s: Warning (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED) (this is permitted)",
+				__FUNCTION__));
+		eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+	}
+	else
+	{
+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1)
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: called to finalise fence <%d>",
+				__FUNCTION__,
+				hFence));
+#endif
+		eError = g_pfnFenceFinalise(hFence, pvFinaliseData);
+		PVR_LOG_IF_ERROR(eError, "g_pfnFenceFinalise returned error");
+	}
+	return eError;
+}
+
+IMG_INTERNAL void
+SyncCheckpointFreeCheckpointListMem(void *pvCheckpointListMem)
+{
+	if (g_pfnFreeChkptListMem)
+	{
+		g_pfnFreeChkptListMem(pvCheckpointListMem);
+	}
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncCheckpointNoHWUpdateTimelines(void *pvPrivateData)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!g_pfnNoHWUpdateTimelines)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)",
+				__FUNCTION__));
+		eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+		PVR_LOG_ERROR(eError, "g_pfnNoHWUpdateTimelines is NULL");
+	}
+	else
+	{
+		g_pfnNoHWUpdateTimelines(pvPrivateData);
+	}
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode,
+							PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext)
+{
+	_SYNC_CHECKPOINT_CONTEXT *psContext = NULL;
+	_SYNC_CHECKPOINT_CONTEXT_CTL *psContextCtl = NULL;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_LOGR_IF_FALSE((ppsSyncCheckpointContext != NULL),
+	                  "ppsSyncCheckpointContext invalid",
+	                  PVRSRV_ERROR_INVALID_PARAMS);
+
+	psContext = OSAllocMem(sizeof(*psContext));
+	PVR_LOGG_IF_NOMEM(psContext, "OSAllocMem", eError, fail_alloc); /* Sets OOM error code */
+
+	psContextCtl = OSAllocMem(sizeof(*psContextCtl));
+	PVR_LOGG_IF_NOMEM(psContextCtl, "OSAllocMem", eError, fail_alloc2); /* Sets OOM error code */
+
+	eError = OSLockCreate(&psContext->hLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call "
+				"to OSLockCreate(context lock) failed");
+		goto fail_create_context_lock;
+	}
+
+	eError = OSLockCreate(&psContextCtl->hDeferredCleanupListLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call "
+				"to OSLockCreate(deferred cleanup list lock) failed");
+		goto fail_create_deferred_cleanup_lock;
+	}
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+	eError = OSLockCreate(&psContextCtl->hSyncCheckpointPoolLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call "
+				"to OSLockCreate(sync checkpoint pool lock) failed");
+		goto fail_create_pool_lock;
+	}
+#endif
+
+	dllist_init(&psContextCtl->sDeferredCleanupListHead);
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+	psContextCtl->ui32SyncCheckpointPoolCount = 0;
+	psContextCtl->ui32SyncCheckpointPoolWp = 0;
+	psContextCtl->ui32SyncCheckpointPoolRp = 0;
+	psContextCtl->bSyncCheckpointPoolFull = IMG_FALSE;
+	psContextCtl->bSyncCheckpointPoolValid = IMG_TRUE;
+#endif
+	psContext->psDevNode = psDevNode;
+
+	OSSNPrintf(psContext->azName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim RA-%p", psContext);
+	OSSNPrintf(psContext->azSpanName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim span RA-%p", psContext);
+
+	/*
+		Create the RA for sub-allocations of the sync checkpoints
+
+		Note:
+		The import size doesn't matter here as the server will pass
+		back the blocksize when it does the import which overrides
+		what we specify here.
+	*/
+	psContext->psSubAllocRA = RA_Create(psContext->azName,
+										/* Params for imports */
+										_Log2(sizeof(IMG_UINT32)),
+										RA_LOCKCLASS_2,
+										_SyncCheckpointBlockImport,
+										_SyncCheckpointBlockUnimport,
+										psContext,
+										IMG_FALSE);
+	if (psContext->psSubAllocRA == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call to RA_Create(subAlloc) failed");
+		goto fail_suballoc;
+	}
+
+	/*
+		Create the span-management RA
+
+		The RA requires that we work with linear spans. For our use
+		here we don't require this behaviour as we're always working
+		within offsets of blocks (imports). However, we need to keep
+		the RA happy so we create the "span" management RA which
+		ensures that all are imports are added to the RA in a linear
+		fashion
+	*/
+	psContext->psSpanRA = RA_Create(psContext->azSpanName,
+									/* Params for imports */
+									0,
+									RA_LOCKCLASS_1,
+									NULL,
+									NULL,
+									NULL,
+									IMG_FALSE);
+	if (psContext->psSpanRA == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call to RA_Create(span) failed");
+		goto fail_span;
+	}
+
+	if (!RA_Add(psContext->psSpanRA, 0, MAX_SYNC_CHECKPOINT_MEM, 0, NULL))
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call to RA_Add(span) failed");
+		goto fail_span_add;
+	}
+
+	OSAtomicWrite(&psContext->hRefCount, 1);
+	OSAtomicWrite(&psContext->hCheckpointCount, 0);
+
+	psContext->psContextCtl = psContextCtl;
+
+	*ppsSyncCheckpointContext = (PSYNC_CHECKPOINT_CONTEXT)psContext;
+#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR,
+			"%s: created psSyncCheckpointContext=<%p>",
+			__FUNCTION__,
+			(void*)*ppsSyncCheckpointContext));
+#endif
+	return PVRSRV_OK;
+
+fail_span_add:
+	RA_Delete(psContext->psSpanRA);
+fail_span:
+	RA_Delete(psContext->psSubAllocRA);
+fail_suballoc:
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+	OSLockDestroy(psContextCtl->hSyncCheckpointPoolLock);
+	psContextCtl->hSyncCheckpointPoolLock = NULL;
+fail_create_pool_lock:
+#endif
+	OSLockDestroy(psContextCtl->hDeferredCleanupListLock);
+	psContextCtl->hDeferredCleanupListLock = NULL;
+fail_create_deferred_cleanup_lock:
+	OSLockDestroy(psContext->hLock);
+	psContext->hLock = NULL;
+fail_create_context_lock:
+	OSFreeMem(psContextCtl);
+fail_alloc2:
+	OSFreeMem(psContext);
+fail_alloc:
+	return eError;
+}
+
+/* Poisons and frees the checkpoint and lock.
+ * Decrements context refcount. */
+static void _FreeSyncCheckpoint(_SYNC_CHECKPOINT *psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext;
+
+	psSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = 0;
+	psSyncCheckpoint->psSyncCheckpointFwObj = NULL;
+	psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_FREED;
+
+	RA_Free(psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA,
+			psSyncCheckpoint->uiSpanAddr);
+	psSyncCheckpoint->psSyncCheckpointBlock = NULL;
+
+	OSLockDestroy(psSyncCheckpoint->hLock);
+	OSFreeMem(psSyncCheckpoint);
+
+	OSAtomicDecrement(&psContext->hCheckpointCount);
+}
+
+static IMG_BOOL _FreeSyncCheckpointIfPossible(_SYNC_CHECKPOINT *psSyncCheckpoint, _SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+	IMG_BOOL bFreed = IMG_FALSE;
+
+	/* Attempt to free the sync checkpoint if not still referenced by client */
+	if (OSAtomicRead(&psSyncCheckpoint->hRefCount) == 0)
+	{
+		/* Free the sync checkpoint if not still referenced by FW */
+		if (psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount ==
+				(IMG_UINT32)(OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount)))
+		{
+			dllist_remove_node(&psSyncCheckpoint->sListNode);
+		#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx",
+					__FUNCTION__,
+					psSyncCheckpoint->ui32UID,
+					(void*)psSyncCheckpoint,
+					(void*)psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA,
+					psSyncCheckpoint->uiSpanAddr));
+		#endif
+			_FreeSyncCheckpoint(psSyncCheckpoint);
+			bFreed = IMG_TRUE;
+		}
+	}
+	return bFreed;
+}
+
+IMG_INTERNAL PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	_SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointContext;
+	PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psDevNode;
+	IMG_INT iRf = 0;
+
+	PVR_LOG_IF_FALSE((psSyncCheckpointContext != NULL), "psSyncCheckpointContext invalid");
+
+#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR,
+			"%s: destroying psSyncCheckpointContext=<%p>",
+			__FUNCTION__,
+			(void*)psSyncCheckpointContext));
+#endif
+
+	_CheckDeferredCleanupList(psContext);
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+	if (psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0)
+	{
+		IMG_UINT32 ui32NumFreedFromPool = _CleanCheckpointPool(psContext);
+
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s freed %d sync checkpoints that were still in the pool for context<%p>",
+				__FUNCTION__,
+				ui32NumFreedFromPool,
+				(void*)psContext));
+#else
+		PVR_UNREFERENCED_PARAMETER(ui32NumFreedFromPool);
+#endif
+	}
+#endif
+
+	iRf = OSAtomicRead(&psContext->hCheckpointCount);
+
+	if (iRf != 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s <%p> attempted with active references (iRf=%d), "
+				"may be the result of a race",
+				__FUNCTION__,
+				(void*)psContext,
+				iRf));
+
+		OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+		{
+			DLLIST_NODE *psNode, *psNext;
+
+			dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
+			{
+				_SYNC_CHECKPOINT *psSyncCheckpoint = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode);
+
+				/* Line below avoids build error in release builds (where PVR_DPF is not defined) */
+				PVR_UNREFERENCED_PARAMETER(psSyncCheckpoint);
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s syncCheckpoint<%p> ID=%d, %s, state=%s, enqCount:%d,FWCount:%d ",
+						__FUNCTION__,
+						(void*)psSyncCheckpoint,
+						psSyncCheckpoint->ui32UID,
+						psSyncCheckpoint->azName,
+				         psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED ?
+				        		 "PVRSRV_SYNC_CHECKPOINT_SIGNALLED" :
+				        			 psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED ?
+				        					 "PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED" : "PVRSRV_SYNC_CHECKPOINT_ERRORED",
+				         OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount),
+				         psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount));
+
+				if (!_FreeSyncCheckpointIfPossible(psSyncCheckpoint, psContext))
+				{
+					eError = PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT;
+				}
+			}
+		}
+		OSLockRelease(psDevNode->hSyncCheckpointListLock);
+	}
+	else
+	{
+		IMG_INT iRf2 = 0;
+
+		iRf2 = OSAtomicRead(&psContext->hRefCount);
+		_SyncCheckpointContextUnref(psContext);
+	}
+
+	PVR_LOG_IF_ERROR(eError, "SyncCheckpointContextDestroy returning error");
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR
+SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext,
+	                PVRSRV_TIMELINE hTimeline,
+	                const IMG_CHAR *pszCheckpointName,
+	                PSYNC_CHECKPOINT *ppsSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psNewSyncCheckpoint = NULL;
+	_SYNC_CHECKPOINT_CONTEXT *psSyncContextInt = (_SYNC_CHECKPOINT_CONTEXT*)psSyncContext;
+	PVRSRV_DEVICE_NODE *psDevNode;
+	PVRSRV_ERROR eError;
+
+	PVR_LOGR_IF_FALSE((psSyncContext != NULL), "psSyncContext invalid", PVRSRV_ERROR_INVALID_PARAMS);
+	PVR_LOGR_IF_FALSE((ppsSyncCheckpoint != NULL), "ppsSyncCheckpoint invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+	psDevNode = (PVRSRV_DEVICE_NODE *)psSyncContextInt->psDevNode;
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+	PVR_DPF((PVR_DBG_ERROR, "%s Entry, Getting checkpoint from pool", __FUNCTION__));
+#endif
+	psNewSyncCheckpoint = _GetCheckpointFromPool(psSyncContextInt);
+	if (!psNewSyncCheckpoint)
+	{
+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+		PVR_DPF((PVR_DBG_ERROR, "%s     checkpoint pool empty - will have to allocate", __FUNCTION__));
+#endif
+	}
+#endif
+	/* If pool is empty (or not defined) alloc the new sync checkpoint */
+	if (!psNewSyncCheckpoint)
+	{
+		psNewSyncCheckpoint = OSAllocMem(sizeof(*psNewSyncCheckpoint));
+		PVR_LOGG_IF_NOMEM(psNewSyncCheckpoint, "OSAllocMem", eError, fail_alloc); /* Sets OOM error code */
+
+		eError = OSLockCreate(&psNewSyncCheckpoint->hLock, LOCK_TYPE_NONE);
+
+		PVR_LOGG_IF_ERROR(eError, "OSLockCreate", fail_create_checkpoint_lock);
+
+		eError = RA_Alloc(psSyncContextInt->psSubAllocRA,
+		                  sizeof(*psNewSyncCheckpoint->psSyncCheckpointFwObj),
+		                  RA_NO_IMPORT_MULTIPLIER,
+		                  0,
+		                  sizeof(IMG_UINT32),
+		                  (IMG_CHAR*)pszCheckpointName,
+		                  &psNewSyncCheckpoint->uiSpanAddr,
+		                  NULL,
+		                  (RA_PERISPAN_HANDLE *) &psNewSyncCheckpoint->psSyncCheckpointBlock);
+		PVR_LOGG_IF_ERROR(eError, "RA_Alloc", fail_raalloc);
+
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s CALLED RA_Alloc(), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx",
+				__FUNCTION__,
+				(void*)psSyncContextInt->psSubAllocRA,
+				psNewSyncCheckpoint->uiSpanAddr));
+#endif
+		psNewSyncCheckpoint->hTimeline = hTimeline;
+		psNewSyncCheckpoint->psSyncCheckpointFwObj =
+				(volatile _SYNC_CHECKPOINT_FW_OBJ*)(psNewSyncCheckpoint->psSyncCheckpointBlock->pui32LinAddr +
+						(_SyncCheckpointGetOffset(psNewSyncCheckpoint)/sizeof(IMG_UINT32)));
+		OSAtomicIncrement(&psNewSyncCheckpoint->psSyncCheckpointBlock->psContext->hCheckpointCount);
+		psNewSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE;
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+		PVR_DPF((PVR_DBG_ERROR, "%s called to allocate new sync checkpoint<%p> for context<%p>", __FUNCTION__, (void*)psNewSyncCheckpoint, (void*)psSyncContext));
+		PVR_DPF((PVR_DBG_ERROR, "%s                    psSyncCheckpointFwObj<%p>", __FUNCTION__, (void*)psNewSyncCheckpoint->psSyncCheckpointFwObj));
+		PVR_DPF((PVR_DBG_ERROR, "%s                    psSyncCheckpoint FwAddr=0x%x", __FUNCTION__, SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint)));
+		PVR_DPF((PVR_DBG_ERROR, "%s                    pszCheckpointName = %s", __FUNCTION__, pszCheckpointName));
+		PVR_DPF((PVR_DBG_ERROR, "%s                    psSyncCheckpoint Timeline=%d", __FUNCTION__, psNewSyncCheckpoint->hTimeline));
+#endif
+	}
+
+	OSAtomicWrite(&psNewSyncCheckpoint->hRefCount, 1);
+	OSAtomicWrite(&psNewSyncCheckpoint->hEnqueuedCCBCount, 0);
+	psNewSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount = 0;
+	psNewSyncCheckpoint->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED;
+
+	if(pszCheckpointName)
+	{
+		/* Copy over the checkpoint name annotation */
+		OSStringNCopy(psNewSyncCheckpoint->azName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH);
+		psNewSyncCheckpoint->azName[PVRSRV_SYNC_NAME_LENGTH-1] = 0;
+	}
+	else
+	{
+		/* No sync checkpoint name annotation */
+		psNewSyncCheckpoint->azName[0] = '\0';
+	}
+
+	/* Store sync checkpoint FW address in PRGXFWIF_UFO_ADDR struct */
+	psNewSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint);
+
+	/* Assign unique ID to this sync checkpoint */
+	psNewSyncCheckpoint->ui32UID = g_SyncCheckpointUID++;
+
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+						  "Allocated Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)",
+						  psNewSyncCheckpoint->azName,
+						  psNewSyncCheckpoint->ui32UID, psNewSyncCheckpoint->hTimeline,
+						  psNewSyncCheckpoint->sCheckpointUFOAddr.ui32Addr);
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+	{
+		IMG_CHAR szChkptName[PVRSRV_SYNC_NAME_LENGTH];
+
+		if(pszCheckpointName)
+		{
+			/* Copy the checkpoint name annotation into a fixed-size array */
+			OSStringNCopy(szChkptName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH - 1);
+			szChkptName[SYNC_MAX_CLASS_NAME_LEN - 1] = 0;
+		}
+		else
+		{
+			/* No checkpoint name annotation */
+			szChkptName[0] = 0;
+		}
+		/* record this sync */
+		eError = SyncCheckpointRecordAdd(&psNewSyncCheckpoint->hRecord,
+		                                 psNewSyncCheckpoint->psSyncCheckpointBlock,
+		                                 psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr,
+		                                 _SyncCheckpointGetOffset(psNewSyncCheckpoint),
+		                                 psNewSyncCheckpoint->ui32UID,
+		                                 OSStringNLength(szChkptName, PVRSRV_SYNC_NAME_LENGTH),
+		                                 szChkptName);
+		if(eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync checkpoint record \"%s\" (%s)",
+												__func__,
+												szChkptName,
+												PVRSRVGetErrorStringKM(eError)));
+			psNewSyncCheckpoint->hRecord = NULL;
+			/* note the error but continue without affecting driver operation */
+		}
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(pszCheckpointName);
+#endif /* if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+	/* Add the sync checkpoint to the device list */
+	OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+	dllist_add_to_head(&psDevNode->sSyncCheckpointSyncsList,
+	                   &psNewSyncCheckpoint->sListNode);
+	OSLockRelease(psDevNode->hSyncCheckpointListLock);
+
+	*ppsSyncCheckpoint = (PSYNC_CHECKPOINT)psNewSyncCheckpoint;
+
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR,
+			"%s Exit(Ok), psNewSyncCheckpoint->ui32UID=%d <%p>",
+			__FUNCTION__,
+			psNewSyncCheckpoint->ui32UID,
+			(void*)psNewSyncCheckpoint));
+#endif
+	return PVRSRV_OK;
+
+fail_raalloc:
+	OSLockDestroy(psNewSyncCheckpoint->hLock);
+	psNewSyncCheckpoint->hLock = NULL;
+fail_create_checkpoint_lock:
+	OSFreeMem(psNewSyncCheckpoint);
+fail_alloc:
+	return eError;
+}
+
+static void SyncCheckpointUnref(_SYNC_CHECKPOINT *psSyncCheckpointInt)
+{
+	_SYNC_CHECKPOINT_CONTEXT *psContext;
+	PVRSRV_DEVICE_NODE *psDevNode;
+
+	psContext = psSyncCheckpointInt->psSyncCheckpointBlock->psContext;
+	psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psDevNode;
+
+	/*
+	 * Without this reference, the context may be destroyed as soon
+	 * as _FreeSyncCheckpoint is called, but the context is still
+	 * needed when _CheckDeferredCleanupList is called at the end
+	 * of this function.
+	 */
+	_SyncCheckpointContextRef(psContext);
+
+	PVR_ASSERT(psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE);
+	if (!OSAtomicRead(&psSyncCheckpointInt->hRefCount))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncCheckpointUnref sync checkpoint already freed"));
+	}
+	else if (0 == OSAtomicDecrement(&psSyncCheckpointInt->hRefCount))
+	{
+		/* If the firmware has serviced all enqueued references to the sync checkpoint, free it */
+		if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount ==
+				(IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)))
+		{
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s No outstanding FW ops and hRef is zero, deleting SyncCheckpoint..",
+					__FUNCTION__));
+#endif
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+			if(psSyncCheckpointInt->hRecord)
+			{
+				PVRSRV_ERROR eError;
+				/* remove this sync record */
+				eError = SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord);
+				PVR_LOG_IF_ERROR(eError, "SyncCheckpointRecordRemove");
+			}
+#endif
+			/* Remove the sync checkpoint from the global list */
+			OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+			dllist_remove_node(&psSyncCheckpointInt->sListNode);
+			OSLockRelease(psDevNode->hSyncCheckpointListLock);
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s attempting to return sync checkpoint to the pool",
+					__FUNCTION__));
+#endif
+			if (!_PutCheckpointInPool(psSyncCheckpointInt))
+#endif
+			{
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s pool is full, so just free it",
+						__FUNCTION__));
+#endif
+#endif
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx",
+						__FUNCTION__,
+						psSyncCheckpointInt->ui32UID,
+						(void*)psSyncCheckpointInt,
+						(void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA,
+						psSyncCheckpointInt->uiSpanAddr));
+#endif
+				_FreeSyncCheckpoint(psSyncCheckpointInt);
+			}
+		}
+		else
+		{
+#if ((ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s Outstanding FW ops hEnqueuedCCBCount=%d != FwObj->ui32FwRefCount=%d "
+					"- DEFERRING CLEANUP psSyncCheckpoint(ID:%d)<%p>",
+					__FUNCTION__,
+					OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount),
+					psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount,
+					psSyncCheckpointInt->ui32UID,
+					(void*)psSyncCheckpointInt));
+#endif
+			/* Add the sync checkpoint to the deferred free list */
+			OSLockAcquire(psContext->psContextCtl->hDeferredCleanupListLock);
+			dllist_add_to_tail(&psContext->psContextCtl->sDeferredCleanupListHead,
+			                   &psSyncCheckpointInt->sDeferredFreeListNode);
+			OSLockRelease(psContext->psContextCtl->hDeferredCleanupListLock);
+		}
+	}
+	else
+	{
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s psSyncCheckpoint(ID:%d)<%p>, hRefCount decremented to %d",
+				__FUNCTION__,
+				psSyncCheckpointInt->ui32UID,
+				(void*)psSyncCheckpointInt,
+				(IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount))));
+#endif
+	}
+
+	/* See if any sync checkpoints in the deferred cleanup list can be freed */
+	 _CheckDeferredCleanupList(psContext);
+
+	_SyncCheckpointContextUnref(psContext);
+}
+
+IMG_INTERNAL void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR,
+			"%s Entry,  psSyncCheckpoint(ID:%d)<%p>, hRefCount=%d, psSyncCheckpoint->ui32ValidationCheck=0x%x",
+			__FUNCTION__,
+			psSyncCheckpointInt->ui32UID,
+			(void*)psSyncCheckpoint,
+			(IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount)),
+			psSyncCheckpointInt->ui32ValidationCheck));
+#endif
+	SyncCheckpointUnref(psSyncCheckpointInt);
+}
+
+IMG_INTERNAL void
+SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+	if(psSyncCheckpointInt)
+	{
+		PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED),
+		                 "psSyncCheckpoint already signalled");
+
+		if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED)
+		{
+			psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+#if (ENABLE_SYNC_CHECKPOINT_HWPERF_HOST_EVENTS == 1)
+			{
+				RGX_HWPERF_UFO_DATA_ELEMENT asSyncData[1];
+
+				asSyncData[0].sUpdate.ui32FWAddr = SyncCheckpointGetFirmwareAddr(psSyncCheckpoint);
+				asSyncData[0].sUpdate.ui32OldValue = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+				asSyncData[0].sUpdate.ui32NewValue = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+
+				/* We may need to temporarily disable the posting of HWPERF Host events, as the caller can be
+				 * in interrupt context and RGX_HWPERF_HOST_UFO calls a function which takes a mutex
+				 */
+				RGX_HWPERF_HOST_UFO(RGX_HWPERF_UFO_EV_UPDATE, asSyncData, 1);
+			}
+#endif
+#if defined(PDUMP)
+			/* We may need to temporarily disable the posting of PDump events here, as the caller can be
+			 * in interrupt context and PDUMPCOMMENTWITHFLAGS takes the PDUMP_LOCK mutex
+			 */
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+								  "Signalled Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)",
+								  psSyncCheckpointInt->azName,
+								  psSyncCheckpointInt->ui32UID, psSyncCheckpointInt->hTimeline,
+								  (psSyncCheckpointInt->psSyncCheckpointBlock->ui32FirmwareAddr +
+										  _SyncCheckpointGetOffset(psSyncCheckpointInt)));
+			SyncCheckpointSignalPDump(psSyncCheckpointInt);
+#endif
+		}
+		else
+		{
+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s asked to set PVRSRV_SYNC_CHECKPOINT_SIGNALLED(%d) for (psSyncCheckpointInt->ui32UID=%d), "
+					"when value is already %d",
+					__FUNCTION__,
+					PVRSRV_SYNC_CHECKPOINT_SIGNALLED,
+					psSyncCheckpointInt->ui32UID,
+					psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State));
+#endif
+		}
+	}
+}
+
+IMG_INTERNAL void
+SyncCheckpointSignalNoHW(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+	if(psSyncCheckpointInt)
+	{
+		PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED),
+		                 "psSyncCheckpoint already signalled");
+
+		if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED)
+		{
+			psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+#if (ENABLE_SYNC_CHECKPOINT_HWPERF_HOST_EVENTS == 1)
+			{
+				RGX_HWPERF_UFO_DATA_ELEMENT asSyncData[1];
+
+
+
+				asSyncData[0].sUpdate.ui32FWAddr = SyncCheckpointGetFirmwareAddr(psSyncCheckpoint);
+				asSyncData[0].sUpdate.ui32OldValue = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+				asSyncData[0].sUpdate.ui32NewValue = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+
+				/* We may need to temporarily disable the posting of HWPERF Host events, as the host could be
+				 * holding a spin lock, and RGX_HWPERF_HOST_UFO calls a function which takes a mutex
+				 */
+				RGX_HWPERF_HOST_UFO(RGX_HWPERF_UFO_EV_UPDATE, asSyncData, 1);
+			}
+#endif
+		}
+		else
+		{
+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s asked to set PVRSRV_SYNC_CHECKPOINT_SIGNALLED(%d) for (psSyncCheckpointInt->ui32UID=%d), "
+					"when value is already %d",
+					__FUNCTION__,
+					PVRSRV_SYNC_CHECKPOINT_SIGNALLED,
+					psSyncCheckpointInt->ui32UID,
+					psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State));
+#endif
+		}
+	}
+}
+
+IMG_INTERNAL void
+SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+	if(psSyncCheckpointInt)
+	{
+		PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED),
+		                 "psSyncCheckpoint already signalled");
+
+		if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED)
+		{
+			psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_ERRORED;
+#if (ENABLE_SYNC_CHECKPOINT_HWPERF_HOST_EVENTS == 1)
+			{
+				RGX_HWPERF_UFO_DATA_ELEMENT asSyncData[1];
+
+				asSyncData[0].sUpdate.ui32FWAddr = SyncCheckpointGetFirmwareAddr(psSyncCheckpoint);
+				asSyncData[0].sUpdate.ui32OldValue = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+				asSyncData[0].sUpdate.ui32NewValue = PVRSRV_SYNC_CHECKPOINT_ERRORED;
+
+				/* We may need to temporarily disable the posting of HWPERF Host events, as the caller can be
+				 * in interrupt context and RGX_HWPERF_HOST_UFO calls a function which takes a mutex
+				 */
+				RGX_HWPERF_HOST_UFO(RGX_HWPERF_UFO_EV_UPDATE, asSyncData, 1);
+			}
+#endif
+#if defined(PDUMP)
+			/* We may need to temporarily disable the posting of PDump events here, as the caller can be
+			 * in interrupt context and PDUMPCOMMENTWITHFLAGS takes the PDUMP_LOCK mutex
+			 */
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+								  "Errored Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)",
+								  psSyncCheckpointInt->azName,
+								  psSyncCheckpointInt->ui32UID, psSyncCheckpointInt->hTimeline,
+								  (psSyncCheckpointInt->psSyncCheckpointBlock->ui32FirmwareAddr +
+										  _SyncCheckpointGetOffset(psSyncCheckpointInt)));
+			SyncCheckpointErrorPDump(psSyncCheckpointInt);
+#endif
+		}
+	}
+}
+
+IMG_BOOL SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	IMG_BOOL bRet = IMG_FALSE;
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+	if (psSyncCheckpointInt)
+	{
+
+		bRet = ((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ||
+		        (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED));
+
+		/* We may need to temporarily disable the posting of HWPERF Host events, as the host could be
+		 * holding a spin lock, and RGX_HWPERF_HOST_UFO calls a function which takes a mutex
+		 */
+#if (ENABLE_SYNC_CHECKPOINT_HWPERF_HOST_EVENTS == 1)
+		{
+			RGX_HWPERF_UFO_DATA_ELEMENT asSyncData[1];
+			RGX_HWPERF_UFO_EV eEV;
+
+			if (bRet)
+			{
+				asSyncData[0].sCheckSuccess.ui32FWAddr = SyncCheckpointGetFirmwareAddr(psSyncCheckpoint);
+				asSyncData[0].sCheckSuccess.ui32Value = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+
+				eEV = RGX_HWPERF_UFO_EV_CHECK_SUCCESS;
+			}
+			else
+			{
+				asSyncData[0].sCheckFail.ui32FWAddr = SyncCheckpointGetFirmwareAddr(psSyncCheckpoint);
+				asSyncData[0].sCheckFail.ui32Value = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+				asSyncData[0].sCheckFail.ui32Required = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+
+				eEV = RGX_HWPERF_UFO_EV_CHECK_FAIL;
+			}
+
+			RGX_HWPERF_HOST_UFO(eEV, asSyncData, 1);
+		}
+#endif
+
+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s called for psSyncCheckpoint<%p>, returning %d",
+				__FUNCTION__,
+				(void*)psSyncCheckpoint,
+				bRet));
+#endif
+	}
+	return bRet;
+}
+
+IMG_INTERNAL IMG_BOOL
+SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	IMG_BOOL bRet = IMG_FALSE;
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+	if (psSyncCheckpointInt)
+	{
+		bRet = (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED);
+
+		/* Temporarily remove the posting of HWPERF Host events, as the host could be holding
+		 * a spin lock, and RGX_HWPERF_HOST_UFO calls a function which takes a mutex
+		 */
+#if (ENABLE_SYNC_CHECKPOINT_HWPERF_HOST_EVENTS == 1)
+		{
+			RGX_HWPERF_UFO_DATA_ELEMENT asSyncData[1];
+			RGX_HWPERF_UFO_EV eEV;
+
+			if (bRet)
+			{
+				asSyncData[0].sCheckSuccess.ui32FWAddr = SyncCheckpointGetFirmwareAddr(psSyncCheckpoint);
+				asSyncData[0].sCheckSuccess.ui32Value = PVRSRV_SYNC_CHECKPOINT_ERRORED;
+
+				eEV = RGX_HWPERF_UFO_EV_CHECK_SUCCESS;
+			}
+			else
+			{
+				asSyncData[0].sCheckFail.ui32FWAddr = SyncCheckpointGetFirmwareAddr(psSyncCheckpoint);
+				asSyncData[0].sCheckFail.ui32Value = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+				asSyncData[0].sCheckFail.ui32Required = PVRSRV_SYNC_CHECKPOINT_ERRORED;
+
+				eEV = RGX_HWPERF_UFO_EV_CHECK_FAIL;
+			}
+
+			RGX_HWPERF_HOST_UFO(eEV, asSyncData, 1);
+		}
+#endif
+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s called for psSyncCheckpoint<%p>, returning %d",
+				__FUNCTION__,
+				(void*)psSyncCheckpoint,
+				bRet));
+#endif
+	}
+	return bRet;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	PVRSRV_ERROR eRet = PVRSRV_OK;
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOGR_IF_FALSE((psSyncCheckpoint != NULL),
+	                  "psSyncCheckpoint invalid",
+	                  PVRSRV_ERROR_INVALID_PARAMS);
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)",
+			__func__,
+			psSyncCheckpointInt,
+			OSAtomicRead(&psSyncCheckpointInt->hRefCount),
+			OSAtomicRead(&psSyncCheckpointInt->hRefCount)+1,
+			psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount));
+#endif
+	OSAtomicIncrement(&psSyncCheckpointInt->hRefCount);
+
+	return eRet;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	PVRSRV_ERROR eRet = PVRSRV_OK;
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOGR_IF_FALSE((psSyncCheckpoint != NULL),
+	                  "psSyncCheckpoint invalid",
+	                  PVRSRV_ERROR_INVALID_PARAMS);
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)",
+			__func__,
+			psSyncCheckpointInt,
+			OSAtomicRead(&psSyncCheckpointInt->hRefCount),
+			OSAtomicRead(&psSyncCheckpointInt->hRefCount)-1,
+			psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount));
+#endif
+	SyncCheckpointUnref(psSyncCheckpointInt);
+
+	return eRet;
+}
+
+IMG_INTERNAL void
+SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+	if (psSyncCheckpointInt)
+	{
+#if !defined(NO_HARDWARE)
+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+		PVR_DPF((PVR_DBG_ERROR, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)",
+				__FUNCTION__,
+		         (void*)psSyncCheckpoint,
+		         OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount),
+		         OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)+1,
+		         psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount));
+#endif
+		OSAtomicIncrement(&psSyncCheckpointInt->hEnqueuedCCBCount);
+#endif
+	}
+}
+
+IMG_INTERNAL PRGXFWIF_UFO_ADDR*
+SyncCheckpointGetRGXFWIFUFOAddr(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	PVR_LOGG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
+
+	if (psSyncCheckpointInt)
+	{
+		if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE)
+		{
+			return &psSyncCheckpointInt->sCheckpointUFOAddr;
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x",
+					__FUNCTION__,
+			         (void*)psSyncCheckpoint,
+			         psSyncCheckpointInt->ui32ValidationCheck));
+		}
+	}
+
+invalid_chkpt:
+	return NULL;
+}
+
+IMG_INTERNAL IMG_UINT32
+SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+	SYNC_CHECKPOINT_BLOCK *psSyncBlock;
+	IMG_UINT32 ui32Ret = 0;
+
+	PVR_LOGG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
+
+	if (psSyncCheckpointInt)
+	{
+		if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE)
+		{
+			psSyncBlock = psSyncCheckpointInt->psSyncCheckpointBlock;
+			/* add 1 to addr to indicate this FW addr is a sync checkpoint (not a sync prim) */
+			ui32Ret = psSyncBlock->ui32FirmwareAddr + _SyncCheckpointGetOffset(psSyncCheckpointInt) + 1;
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x",
+					__FUNCTION__,
+			         (void*)psSyncCheckpoint,
+			         psSyncCheckpointInt->ui32ValidationCheck));
+		}
+	}
+	return ui32Ret;
+
+invalid_chkpt:
+	return 0;
+}
+
+IMG_INTERNAL IMG_UINT32
+SyncCheckpointGetFirmwareAddrFromList(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*) psSyncCheckpoint;
+	SYNC_CHECKPOINT_BLOCK *psSyncBlock;
+	IMG_UINT32 ui32Ret = 0;
+
+	if (psSyncCheckpointInt)
+	{
+		if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE)
+		{
+			psSyncBlock = psSyncCheckpointInt->psSyncCheckpointBlock;
+			/* add 1 to addr to indicate this FW addr is a sync checkpoint (not a sync prim) */
+			ui32Ret = psSyncBlock->ui32FirmwareAddr + _SyncCheckpointGetOffset(psSyncCheckpointInt) + 1;
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s called for psSyncCheckpointInt<%p>, but ui32ValidationCheck=0x%x",
+					__FUNCTION__,
+			         (void*)psSyncCheckpointInt,
+			         psSyncCheckpointInt->ui32ValidationCheck));
+		}
+	}
+
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR,
+			"%s returning %u (0x%x) as UFO addr for psSyncCheckpointInt<%p>",
+			__FUNCTION__,
+			ui32Ret,
+			ui32Ret,
+			(void*)psSyncCheckpointInt));
+#endif
+	return ui32Ret;
+}
+
+IMG_INTERNAL IMG_UINT32
+SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+	IMG_UINT32 ui32Ret = 0;
+
+	PVR_LOGG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
+
+	if (psSyncCheckpointInt)
+	{
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s returning ID for sync checkpoint<%p>",
+				__FUNCTION__,
+				(void*)psSyncCheckpointInt));
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s (validationCheck=0x%x)",
+				__FUNCTION__,
+				psSyncCheckpointInt->ui32ValidationCheck));
+#endif
+		ui32Ret = psSyncCheckpointInt->ui32UID;
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s (ui32UID=0x%x)",
+				__FUNCTION__,
+				psSyncCheckpointInt->ui32UID));
+#endif
+	}
+	return ui32Ret;
+
+invalid_chkpt:
+	return 0;
+}
+
+PVRSRV_TIMELINE
+SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+	PVRSRV_TIMELINE i32Ret = -1;
+
+	PVR_LOGG_IF_FALSE((psSyncCheckpoint != NULL),
+	                  "psSyncCheckpoint invalid",
+	                  invalid_chkpt);
+
+	if (psSyncCheckpointInt)
+	{
+		i32Ret = psSyncCheckpointInt->hTimeline;
+	}
+	return i32Ret;
+
+invalid_chkpt:
+	return 0;
+}
+
+
+IMG_INTERNAL IMG_UINT32
+SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+	IMG_UINT32 ui32Ret = 0;
+
+	PVR_LOGG_IF_FALSE((psSyncCheckpoint != NULL),
+	                  "psSyncCheckpoint invalid",
+	                  invalid_chkpt);
+
+	if (psSyncCheckpointInt)
+	{
+		ui32Ret = OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount);
+	}
+	return ui32Ret;
+
+invalid_chkpt:
+	return 0;
+}
+
+void SyncCheckpointErrorFromUFO(PPVRSRV_DEVICE_NODE psDevNode,
+								IMG_UINT32 ui32FwAddr)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt;
+	PDLLIST_NODE psNode, psNext;
+
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR,
+			"%s called to error UFO with ui32FWAddr=%d",
+			__FUNCTION__,
+			ui32FwAddr));
+#endif
+
+	OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+	dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
+	{
+		psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode);
+		if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt))
+		{
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s calling SyncCheckpointError for sync checkpoint <%p>",
+					__FUNCTION__,
+					(void*)psSyncCheckpointInt));
+#endif
+			/* Mark as errored */
+			SyncCheckpointError((PSYNC_CHECKPOINT)psSyncCheckpointInt);
+			break;
+		}
+	}
+	OSLockRelease(psDevNode->hSyncCheckpointListLock);
+}
+
+void SyncCheckpointRollbackFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt;
+	PDLLIST_NODE psNode, psNext;
+
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR,
+			"%s called to rollback UFO with ui32FWAddr=0x%x",
+			__FUNCTION__,
+			ui32FwAddr));
+#endif
+#if !defined(NO_HARDWARE)
+	OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+	dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
+	{
+		psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode);
+		if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt))
+		{
+#if ((ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)) || (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s called for psSyncCheckpointInt<%p> %d->%d",
+					__FUNCTION__,
+					(void*)psSyncCheckpointInt,
+					OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount),
+					OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)-1));
+#endif
+			OSAtomicDecrement(&psSyncCheckpointInt->hEnqueuedCCBCount);
+			break;
+		}
+	}
+	OSLockRelease(psDevNode->hSyncCheckpointListLock);
+#else
+	PVR_UNREFERENCED_PARAMETER(psNode);
+	PVR_UNREFERENCED_PARAMETER(psNext);
+	PVR_UNREFERENCED_PARAMETER(psSyncCheckpointInt);
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncCheckpointInit(PPVRSRV_DEVICE_NODE psDevNode)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!gbSyncCheckpointInit)
+	{
+		eError = OSLockCreate(&psDevNode->hSyncCheckpointListLock, LOCK_TYPE_NONE);
+		if (eError == PVRSRV_OK)
+		{
+			dllist_init(&psDevNode->sSyncCheckpointSyncsList);
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+			eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncCheckpointNotify,
+													psDevNode,
+													_SyncCheckpointDebugRequest,
+													DEBUG_REQUEST_SYNCCHECKPOINT,
+													(PVRSRV_DBGREQ_HANDLE)psDevNode);
+#endif
+			if (eError == PVRSRV_OK)
+			{
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+				_SyncCheckpointRecordListInit(psDevNode);
+#endif
+				gbSyncCheckpointInit = IMG_TRUE;
+			}
+			else
+			{
+				/* free the created lock */
+				OSLockDestroy(psDevNode->hSyncCheckpointListLock);
+				psDevNode->hSyncCheckpointListLock = NULL;
+			}
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s FAILED to create psDevNode->hSyncCheckpointListLock",
+					__FUNCTION__));
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s called but already initialised", __FUNCTION__));
+	}
+	return eError;
+}
+
+void SyncCheckpointDeinit(PPVRSRV_DEVICE_NODE psDevNode)
+{
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+	PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncCheckpointNotify);
+#endif
+	psDevNode->hSyncCheckpointNotify = NULL;
+	OSLockDestroy(psDevNode->hSyncCheckpointListLock);
+	psDevNode->hSyncCheckpointListLock = NULL;
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+	_SyncCheckpointRecordListDeinit(psDevNode);
+#endif
+	gbSyncCheckpointInit = IMG_FALSE;
+}
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+static void _SyncCheckpointState(PDLLIST_NODE psNode,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpoint = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode);
+
+	if (psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED)
+	{
+		PVR_DUMPDEBUG_LOG("\tPending sync checkpoint(ID = %d, FWAddr = 0x%08x): (%s)",
+		                   psSyncCheckpoint->ui32UID,
+		                   psSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr +
+							   _SyncCheckpointGetOffset(psSyncCheckpoint),
+		                   psSyncCheckpoint->azName);
+	}
+}
+
+static void _SyncCheckpointDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+					IMG_UINT32 ui32VerbLevel,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile)
+{
+	PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+	DLLIST_NODE *psNode, *psNext;
+
+	if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_MEDIUM)
+	{
+		PVR_DUMPDEBUG_LOG("Dumping all pending sync checkpoints");
+		OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+		dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
+		{
+			_SyncCheckpointState(psNode, pfnDumpDebugPrintf, pvDumpDebugFile);
+		}
+		OSLockRelease(psDevNode->hSyncCheckpointListLock);
+	}
+}
+
+PVRSRV_ERROR
+SyncCheckpointRecordAdd(
+			PSYNC_CHECKPOINT_RECORD_HANDLE * phRecord,
+			SYNC_CHECKPOINT_BLOCK *hSyncCheckpointBlock,
+			IMG_UINT32 ui32FwBlockAddr,
+			IMG_UINT32 ui32SyncOffset,
+			IMG_UINT32 ui32UID,
+			IMG_UINT32 ui32ClassNameSize,
+			const IMG_CHAR *pszClassName)
+{
+	struct SYNC_CHECKPOINT_RECORD * psSyncRec;
+	_SYNC_CHECKPOINT_CONTEXT *psContext = hSyncCheckpointBlock->psContext;
+	PVRSRV_DEVICE_NODE *psDevNode = psContext->psDevNode;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!phRecord)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*phRecord = NULL;
+
+	psSyncRec = OSAllocMem(sizeof(*psSyncRec));
+	PVR_LOGG_IF_NOMEM(psSyncRec, "OSAllocMem", eError, fail_alloc); /* Sets OOM error code */
+
+	psSyncRec->psDevNode = psDevNode;
+	psSyncRec->psSyncCheckpointBlock = hSyncCheckpointBlock;
+	psSyncRec->ui32SyncOffset = ui32SyncOffset;
+	psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr;
+	psSyncRec->ui64OSTime = OSClockns64();
+	psSyncRec->uiPID = OSGetCurrentProcessID();
+	psSyncRec->ui32UID = ui32UID;
+	if(pszClassName)
+	{
+		if (ui32ClassNameSize >= SYNC_MAX_CLASS_NAME_LEN)
+			ui32ClassNameSize = SYNC_MAX_CLASS_NAME_LEN - 1;
+		/* Copy over the class name annotation */
+		OSStringNCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize);
+		psSyncRec->szClassName[ui32ClassNameSize] = 0;
+	}
+	else
+	{
+		/* No class name annotation */
+		psSyncRec->szClassName[0] = 0;
+	}
+
+	OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
+	if(psDevNode->ui32SyncCheckpointRecordCount < SYNC_CHECKPOINT_RECORD_LIMIT)
+	{
+		dllist_add_to_head(&psDevNode->sSyncCheckpointRecordList, &psSyncRec->sNode);
+		psDevNode->ui32SyncCheckpointRecordCount++;
+
+		if(psDevNode->ui32SyncCheckpointRecordCount > psDevNode->ui32SyncCheckpointRecordCountHighWatermark)
+		{
+			psDevNode->ui32SyncCheckpointRecordCountHighWatermark = psDevNode->ui32SyncCheckpointRecordCount;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync checkpoint record \"%s\". %u records already exist.",
+											__func__,
+											pszClassName,
+											psDevNode->ui32SyncCheckpointRecordCount));
+		OSFreeMem(psSyncRec);
+		psSyncRec = NULL;
+		eError = PVRSRV_ERROR_TOOMANYBUFFERS;
+	}
+	OSLockRelease(psDevNode->hSyncCheckpointRecordLock);
+
+	*phRecord = (PSYNC_CHECKPOINT_RECORD_HANDLE)psSyncRec;
+
+fail_alloc:
+	return eError;
+}
+
+PVRSRV_ERROR
+SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord)
+{
+	struct SYNC_CHECKPOINT_RECORD **ppFreedSync;
+	struct SYNC_CHECKPOINT_RECORD *pSync = (struct SYNC_CHECKPOINT_RECORD*)hRecord;
+	PVRSRV_DEVICE_NODE *psDevNode;
+
+	if (!hRecord)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevNode = pSync->psDevNode;
+
+	OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
+
+	dllist_remove_node(&pSync->sNode);
+
+	if (psDevNode->uiSyncCheckpointRecordFreeIdx >= PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: psDevNode->uiSyncCheckpointRecordFreeIdx out of range",
+				__FUNCTION__));
+		psDevNode->uiSyncCheckpointRecordFreeIdx = 0;
+	}
+	ppFreedSync = &psDevNode->apsSyncCheckpointRecordsFreed[psDevNode->uiSyncCheckpointRecordFreeIdx];
+	psDevNode->uiSyncCheckpointRecordFreeIdx =
+			(psDevNode->uiSyncCheckpointRecordFreeIdx + 1) % PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN;
+
+	if (*ppFreedSync)
+	{
+		OSFreeMem(*ppFreedSync);
+	}
+	pSync->psSyncCheckpointBlock = NULL;
+	pSync->ui64OSTime = OSClockns64();
+	*ppFreedSync = pSync;
+
+	psDevNode->ui32SyncCheckpointRecordCount--;
+
+	OSLockRelease(psDevNode->hSyncCheckpointRecordLock);
+
+	return PVRSRV_OK;
+}
+
+#define NS_IN_S (1000000000UL)
+static void _SyncCheckpointRecordPrint(struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec,
+					IMG_UINT64 ui64TimeNow,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile)
+{
+	SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock = psSyncCheckpointRec->psSyncCheckpointBlock;
+	IMG_UINT64 ui64DeltaS;
+	IMG_UINT32 ui32DeltaF;
+	IMG_UINT64 ui64Delta = ui64TimeNow - psSyncCheckpointRec->ui64OSTime;
+	ui64DeltaS = OSDivide64(ui64Delta, NS_IN_S, &ui32DeltaF);
+
+	if (psSyncCheckpointBlock && psSyncCheckpointBlock->pui32LinAddr)
+	{
+		void *pSyncCheckpointAddr;
+		pSyncCheckpointAddr = (void*)( ((IMG_BYTE*) psSyncCheckpointBlock->pui32LinAddr) + psSyncCheckpointRec->ui32SyncOffset);
+
+		PVR_DUMPDEBUG_LOG("\t%05u %05llu.%09u %010u FWAddr=0x%08x State=%s (%s)",
+		                  psSyncCheckpointRec->uiPID,
+		                  ui64DeltaS, ui32DeltaF,psSyncCheckpointRec->ui32UID,
+		                  (psSyncCheckpointRec->ui32FwBlockAddr+psSyncCheckpointRec->ui32SyncOffset),
+		                  (*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ?
+		                      "SIGNALLED" :
+		                      ((*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_ERRORED) ?
+		                          "ERRORED" : "NOT_SIGNALLED"),
+		                  psSyncCheckpointRec->szClassName);
+	}
+	else
+	{
+		PVR_DUMPDEBUG_LOG("\t%05u %05llu.%09u %010u FWAddr=0x%08x State=<null_ptr> (%s)",
+			psSyncCheckpointRec->uiPID,
+			ui64DeltaS, ui32DeltaF, psSyncCheckpointRec->ui32UID,
+			(psSyncCheckpointRec->ui32FwBlockAddr+psSyncCheckpointRec->ui32SyncOffset),
+			psSyncCheckpointRec->szClassName
+			);
+	}
+}
+
+static void _SyncCheckpointRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+						IMG_UINT32 ui32VerbLevel,
+						DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+						void *pvDumpDebugFile)
+{
+	PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+	IMG_UINT64 ui64TimeNowS;
+	IMG_UINT32 ui32TimeNowF;
+	IMG_UINT64 ui64TimeNow = OSClockns64();
+	DLLIST_NODE *psNode, *psNext;
+
+	ui64TimeNowS = OSDivide64(ui64TimeNow, NS_IN_S, &ui32TimeNowF);
+
+	if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_MEDIUM)
+	{
+		IMG_UINT32 i;
+
+		OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
+
+		PVR_DUMPDEBUG_LOG("Dumping allocated sync checkpoints. Allocated: %u High watermark: %u (time ref %05llu.%09u)",
+		                  psDevNode->ui32SyncCheckpointRecordCount,
+		                  psDevNode->ui32SyncCheckpointRecordCountHighWatermark,
+		                  ui64TimeNowS,
+		                  ui32TimeNowF);
+		if(psDevNode->ui32SyncCheckpointRecordCountHighWatermark == SYNC_CHECKPOINT_RECORD_LIMIT)
+		{
+			PVR_DUMPDEBUG_LOG("Warning: Record limit (%u) was reached. Some sync checkpoints may not have been recorded in the debug information.",
+																SYNC_CHECKPOINT_RECORD_LIMIT);
+		}
+		PVR_DUMPDEBUG_LOG("\t%-5s %-15s %-10s %-17s %-14s (%s)",
+					"PID", "Time Delta (s)", "UID", "Address", "State", "Annotation");
+
+		dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext)
+		{
+			struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec =
+				IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode);
+			_SyncCheckpointRecordPrint(psSyncCheckpointRec, ui64TimeNow,
+							pfnDumpDebugPrintf, pvDumpDebugFile);
+		}
+
+		PVR_DUMPDEBUG_LOG("Dumping all recently freed sync checkpoints @ %05llu.%09u",
+		                  ui64TimeNowS,
+		                  ui32TimeNowF);
+		PVR_DUMPDEBUG_LOG("\t%-5s %-15s %-10s %-17s %-14s (%s)",
+					"PID", "Time Delta (s)", "UID", "Address", "State", "Annotation");
+		for(i = DECREMENT_WITH_WRAP(psDevNode->uiSyncCheckpointRecordFreeIdx, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN);
+				i != psDevNode->uiSyncCheckpointRecordFreeIdx;
+				i = DECREMENT_WITH_WRAP(i, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN))
+		{
+			if (psDevNode->apsSyncCheckpointRecordsFreed[i])
+			{
+				_SyncCheckpointRecordPrint(psDevNode->apsSyncCheckpointRecordsFreed[i],
+										   ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile);
+			}
+			else
+			{
+				break;
+			}
+		}
+		OSLockRelease(psDevNode->hSyncCheckpointRecordLock);
+	}
+}
+#undef NS_IN_S
+static PVRSRV_ERROR _SyncCheckpointRecordListInit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+	PVRSRV_ERROR eError;
+
+	eError = OSLockCreate(&psDevNode->hSyncCheckpointRecordLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_lock_create;
+	}
+	dllist_init(&psDevNode->sSyncCheckpointRecordList);
+
+	psDevNode->ui32SyncCheckpointRecordCount = 0;
+	psDevNode->ui32SyncCheckpointRecordCountHighWatermark = 0;
+
+	eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncCheckpointRecordNotify,
+											psDevNode,
+											_SyncCheckpointRecordRequest,
+											DEBUG_REQUEST_SYNCCHECKPOINT,
+											(PVRSRV_DBGREQ_HANDLE)psDevNode);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_dbg_register;
+	}
+
+	return PVRSRV_OK;
+
+fail_dbg_register:
+	OSLockDestroy(psDevNode->hSyncCheckpointRecordLock);
+fail_lock_create:
+	return eError;
+}
+
+static void _SyncCheckpointRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+	DLLIST_NODE *psNode, *psNext;
+	int i;
+
+	OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
+	dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext)
+	{
+		struct SYNC_CHECKPOINT_RECORD *pSyncCheckpointRec =
+			IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode);
+
+		dllist_remove_node(psNode);
+		OSFreeMem(pSyncCheckpointRec);
+	}
+
+	for (i = 0; i < PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; i++)
+	{
+		if (psDevNode->apsSyncCheckpointRecordsFreed[i])
+		{
+			OSFreeMem(psDevNode->apsSyncCheckpointRecordsFreed[i]);
+			psDevNode->apsSyncCheckpointRecordsFreed[i] = NULL;
+		}
+	}
+	OSLockRelease(psDevNode->hSyncCheckpointRecordLock);
+
+	if (psDevNode->hSyncCheckpointRecordNotify)
+	{
+		PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncCheckpointRecordNotify);
+	}
+	OSLockDestroy(psDevNode->hSyncCheckpointRecordLock);
+}
+#endif /* defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+PVRSRV_ERROR
+SyncCheckpointPDumpPol(PSYNC_CHECKPOINT psSyncCheckpoint, PDUMP_FLAGS_T ui32PDumpFlags)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+	DevmemPDumpDevmemPol32(psSyncCheckpointInt->psSyncCheckpointBlock->hMemDesc,
+						   _SyncCheckpointGetOffset(psSyncCheckpointInt),
+						   PVRSRV_SYNC_CHECKPOINT_SIGNALLED,
+						   0xFFFFFFFF,
+						   PDUMP_POLL_OPERATOR_EQUAL,
+						   ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+
+#if defined(PDUMP)
+PVRSRV_ERROR
+SyncCheckpointSignalPDump(_SYNC_CHECKPOINT *psSyncCheckpoint)
+{
+	/*
+		We might be ask to PDump sync state outside of capture range
+		(e.g. texture uploads) so make this continuous.
+	*/
+	DevmemPDumpLoadMemValue32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc,
+					   _SyncCheckpointGetOffset(psSyncCheckpoint),
+					   PVRSRV_SYNC_CHECKPOINT_SIGNALLED,
+					   PDUMP_FLAGS_CONTINUOUS);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+SyncCheckpointErrorPDump(_SYNC_CHECKPOINT *psSyncCheckpoint)
+{
+	/*
+		We might be ask to PDump sync state outside of capture range
+		(e.g. texture uploads) so make this continuous.
+	*/
+	DevmemPDumpLoadMemValue32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc,
+					   _SyncCheckpointGetOffset(psSyncCheckpoint),
+					   PVRSRV_SYNC_CHECKPOINT_ERRORED,
+					   PDUMP_FLAGS_CONTINUOUS);
+
+	return PVRSRV_OK;
+}
+#endif
+
+static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+	DLLIST_NODE *psNode, *psNext;
+	PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE*)psContext->psDevNode;
+
+	/* Check the deferred cleanup list and free any sync checkpoints we can */
+	//PVR_DPF((PVR_DBG_ERROR, "%s acquiring hDeferredCleanupListLock for context<%p>, context->psContextCtl<%p>", __FUNCTION__, (void*)psContext, (void*)psContext->psContextCtl));
+	OSLockAcquire(psContext->psContextCtl->hDeferredCleanupListLock);
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s called", __FUNCTION__));
+#endif
+
+	if (dllist_is_empty(&psContext->psContextCtl->sDeferredCleanupListHead))
+	{
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+		PVR_DPF((PVR_DBG_ERROR, "%s: Defer free list is empty", __FUNCTION__));
+#endif
+	}
+
+	dllist_foreach_node(&psContext->psContextCtl->sDeferredCleanupListHead, psNode, psNext)
+	{
+		_SYNC_CHECKPOINT *psSyncCheckpointInt =
+			IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sDeferredFreeListNode);
+
+		if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount ==
+				(IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)))
+		{
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+			if(psSyncCheckpointInt->hRecord)
+			{
+				PVRSRV_ERROR eError;
+				/* remove this sync record */
+				eError = SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord);
+				PVR_LOG_IF_ERROR(eError, "SyncCheckpointRecordRemove");
+			}
+#endif
+
+			/* Remove the sync checkpoint from the deferred free list */
+			dllist_remove_node(&psSyncCheckpointInt->sDeferredFreeListNode);
+
+			/* Remove the sync checkpoint from the global list */
+			OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+			dllist_remove_node(&psSyncCheckpointInt->sListNode);
+			OSLockRelease(psDevNode->hSyncCheckpointListLock);
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s attempting to return sync(ID:%d),%p> to pool",
+					__FUNCTION__,
+					psSyncCheckpointInt->ui32UID,
+					(void*)psSyncCheckpointInt));
+#endif
+			if (!_PutCheckpointInPool(psSyncCheckpointInt))
+#endif
+			{
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+				PVR_DPF((PVR_DBG_ERROR, "%s pool is full, so just free it", __FUNCTION__));
+#endif
+#endif
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), "
+						"psSubAllocRA=<%p>, ui32SpanAddr=0x%llx",
+						__FUNCTION__,
+						psSyncCheckpointInt->ui32UID,
+						(void*)psSyncCheckpointInt,
+						(void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA,
+						psSyncCheckpointInt->uiSpanAddr));
+#endif
+				_FreeSyncCheckpoint(psSyncCheckpointInt);
+			}
+		}
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s psSyncCheckpoint '%s'' (ID:%d)<%p>), still pending (enq=%d,FWRef=%d)",
+					__FUNCTION__,
+					psSyncCheckpointInt->azName,
+					psSyncCheckpointInt->ui32UID,
+					(void*)psSyncCheckpointInt,
+					(IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)),
+					psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount));
+		}
+#endif
+	}
+	OSLockRelease(psContext->psContextCtl->hDeferredCleanupListLock);
+}
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+static _SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpoint = NULL;
+
+	/* Acquire sync checkpoint pool lock */
+	OSLockAcquire(psContext->psContextCtl->hSyncCheckpointPoolLock);
+
+	/* Check if pool has anything in it */
+	if (psContext->psContextCtl->bSyncCheckpointPoolValid && (psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0) &&
+			(psContext->psContextCtl->ui32SyncCheckpointPoolWp != psContext->psContextCtl->ui32SyncCheckpointPoolRp))
+	{
+		/* Get the next sync checkpoint from the pool */
+		psSyncCheckpoint = psContext->psContextCtl->psSyncCheckpointPool[psContext->psContextCtl->ui32SyncCheckpointPoolRp++];
+		if (psContext->psContextCtl->ui32SyncCheckpointPoolRp == SYNC_CHECKPOINT_POOL_SIZE)
+		{
+			psContext->psContextCtl->ui32SyncCheckpointPoolRp = 0;
+		}
+		psContext->psContextCtl->ui32SyncCheckpointPoolCount--;
+		psContext->psContextCtl->bSyncCheckpointPoolFull = IMG_FALSE;
+		psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE;
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s checkpoint(old ID:%d)<-POOL(%d/%d), psContext=<%p>, poolRp=%d, poolWp=%d",
+				__FUNCTION__,
+				psSyncCheckpoint->ui32UID,
+				psContext->psContextCtl->ui32SyncCheckpointPoolCount,
+				SYNC_CHECKPOINT_POOL_SIZE,
+				(void*)psContext, psContext->psContextCtl->ui32SyncCheckpointPoolRp, psContext->psContextCtl->ui32SyncCheckpointPoolWp));
+#endif
+	}
+	/* Release sync checkpoint pool lock */
+	OSLockRelease(psContext->psContextCtl->hSyncCheckpointPoolLock);
+
+	return psSyncCheckpoint;
+}
+
+static IMG_BOOL _PutCheckpointInPool(_SYNC_CHECKPOINT *psSyncCheckpoint)
+{
+	IMG_BOOL bReturnedToPool = IMG_FALSE;
+	_SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext;
+
+	/* Acquire sync checkpoint pool lock */
+	OSLockAcquire(psContext->psContextCtl->hSyncCheckpointPoolLock);
+
+	/* Check if pool has space */
+	if (psContext->psContextCtl->bSyncCheckpointPoolValid &&
+			!psContext->psContextCtl->bSyncCheckpointPoolFull)
+	{
+		/* Put the sync checkpoint into the next write slot in the pool */
+		psContext->psContextCtl->psSyncCheckpointPool[psContext->psContextCtl->ui32SyncCheckpointPoolWp++] = psSyncCheckpoint;
+		if (psContext->psContextCtl->ui32SyncCheckpointPoolWp == SYNC_CHECKPOINT_POOL_SIZE)
+		{
+			psContext->psContextCtl->ui32SyncCheckpointPoolWp = 0;
+		}
+		psContext->psContextCtl->ui32SyncCheckpointPoolCount++;
+		psContext->psContextCtl->bSyncCheckpointPoolFull =
+		        ((psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0) &&
+		         (psContext->psContextCtl->ui32SyncCheckpointPoolWp == psContext->psContextCtl->ui32SyncCheckpointPoolRp));
+		bReturnedToPool = IMG_TRUE;
+		psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_POOL;
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s checkpoint(ID:%d)->POOL(%d/%d), poolRp=%d, poolWp=%d",
+				__FUNCTION__,
+				psSyncCheckpoint->ui32UID,
+				psContext->psContextCtl->ui32SyncCheckpointPoolCount,
+				SYNC_CHECKPOINT_POOL_SIZE, psContext->psContextCtl->ui32SyncCheckpointPoolRp, psContext->psContextCtl->ui32SyncCheckpointPoolWp));
+#endif
+	}
+	/* Release sync checkpoint pool lock */
+	OSLockRelease(psContext->psContextCtl->hSyncCheckpointPoolLock);
+
+	return bReturnedToPool;
+}
+
+static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+	_SYNC_CHECKPOINT *psSyncCheckpointInt = NULL;
+	IMG_UINT32 ui32ItemsFreed = 0;
+
+	/* Acquire sync checkpoint pool lock */
+	OSLockAcquire(psContext->psContextCtl->hSyncCheckpointPoolLock);
+
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR,
+			"%s psContext=<%p>, bSyncCheckpointPoolValid=%d, uiSyncCheckpointPoolCount=%d",
+			__FUNCTION__,
+			(void*)psContext,
+			psContext->psContextCtl->bSyncCheckpointPoolValid,
+			psContext->psContextCtl->ui32SyncCheckpointPoolCount));
+#endif
+	/* While the pool still contains sync checkpoints, free them */
+	while (psContext->psContextCtl->bSyncCheckpointPoolValid &&
+			(psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0))
+	{
+		/* Get the sync checkpoint from the next read slot in the pool */
+		psSyncCheckpointInt = psContext->psContextCtl->psSyncCheckpointPool[psContext->psContextCtl->ui32SyncCheckpointPoolRp++];
+		if (psContext->psContextCtl->ui32SyncCheckpointPoolRp == SYNC_CHECKPOINT_POOL_SIZE)
+		{
+			psContext->psContextCtl->ui32SyncCheckpointPoolRp = 0;
+		}
+		psContext->psContextCtl->ui32SyncCheckpointPoolCount--;
+		psContext->psContextCtl->bSyncCheckpointPoolFull =
+		         ((psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0) &&
+		         (psContext->psContextCtl->ui32SyncCheckpointPoolWp == psContext->psContextCtl->ui32SyncCheckpointPoolRp));
+
+		if (psSyncCheckpointInt)
+		{
+			if (psSyncCheckpointInt->ui32ValidationCheck != SYNC_CHECKPOINT_PATTERN_IN_POOL)
+			{
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s pool contains invalid entry (ui32ValidationCheck=0x%x)",
+						__FUNCTION__,
+						psSyncCheckpointInt->ui32ValidationCheck));
+#endif
+			}
+
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+			PVR_DPF((PVR_DBG_ERROR, "%s psSyncCheckpoint(ID:%d)", __FUNCTION__, psSyncCheckpointInt->ui32UID));
+			PVR_DPF((PVR_DBG_ERROR, "%s psSyncCheckpoint->ui32ValidationCheck=0x%x", __FUNCTION__, psSyncCheckpointInt->ui32ValidationCheck));
+			PVR_DPF((PVR_DBG_ERROR, "%s psSyncCheckpoint->uiSpanAddr=0x%llx", __FUNCTION__, psSyncCheckpointInt->uiSpanAddr));
+			PVR_DPF((PVR_DBG_ERROR, "%s psSyncCheckpoint->psSyncCheckpointBlock=<%p>", __FUNCTION__, (void*)psSyncCheckpointInt->psSyncCheckpointBlock));
+			PVR_DPF((PVR_DBG_ERROR, "%s psSyncCheckpoint->psSyncCheckpointBlock->psContext=<%p>", __FUNCTION__, (void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext));
+			PVR_DPF((PVR_DBG_ERROR, "%s psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA=<%p>", __FUNCTION__, (void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA));
+#endif
+			OSAtomicDecrement(&psContext->hCheckpointCount);
+			psSyncCheckpointInt->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_FREED;
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx",
+					__FUNCTION__,
+					psSyncCheckpointInt->ui32UID,
+					(void*)psSyncCheckpointInt,
+					(void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA,
+					psSyncCheckpointInt->uiSpanAddr));
+#endif
+			RA_Free(psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA,
+			        psSyncCheckpointInt->uiSpanAddr);
+			ui32ItemsFreed++;
+		}
+		else
+		{
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+			PVR_DPF((PVR_DBG_ERROR, "%s pool contains NULL entry", __FUNCTION__));
+#endif
+		}
+	}
+	/* Release sync checkpoint pool lock */
+	OSLockRelease(psContext->psContextCtl->hSyncCheckpointPoolLock);
+
+	return ui32ItemsFreed;
+}
+#endif /* (SYNC_CHECKPOINT_POOL_SIZE > 0) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/sync_server.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/sync_server.c
new file mode 100644
index 0000000..ae7e2cd
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/sync_server.c
@@ -0,0 +1,2600 @@
+/*************************************************************************/ /*!
+@File           sync_server.c
+@Title          Server side synchronisation functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side functions that for synchronisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "img_types.h"
+#include "sync_server.h"
+#include "allocmem.h"
+#include "device.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pdump.h"
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "pdump_km.h"
+#include "sync.h"
+#include "sync_internal.h"
+#include "connection_server.h"
+#include "htbuffer.h"
+#include "rgxhwperf.h"
+
+#include "sync_checkpoint_internal.h"
+#include "sync_checkpoint.h"
+
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "ossecure_export.h"
+#endif
+
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+#include "rgxdebug.h"
+#endif
+
+/* Set this to enable debug relating to the construction and maintenance of the sync address list */
+#define SYNC_ADDR_LIST_DEBUG 0
+
+/* Max number of syncs allowed in a sync prim op */
+#define SYNC_PRIM_OP_MAX_SYNCS 1024
+
+struct _SYNC_PRIMITIVE_BLOCK_
+{
+	PVRSRV_DEVICE_NODE	*psDevNode;
+	DEVMEM_MEMDESC		*psMemDesc;
+	IMG_UINT32			*pui32LinAddr;
+	IMG_UINT32			ui32BlockSize;		/*!< Size of the Sync Primitive Block */
+	IMG_UINT32			ui32RefCount;
+	POS_LOCK			hLock;
+	DLLIST_NODE			sConnectionNode;
+	SYNC_CONNECTION_DATA *psSyncConnectionData;	/*!< Link back to the sync connection data if there is one */
+	PRGXFWIF_UFO_ADDR		uiFWAddr;	/*!< The firmware address of the sync prim block */
+};
+
+struct _SERVER_SYNC_PRIMITIVE_
+{
+	PVRSRV_DEVICE_NODE		*psDevNode;
+	PVRSRV_CLIENT_SYNC_PRIM *psSync;
+	IMG_UINT32				ui32NextOp;
+	IMG_UINT32				ui32RefCount;
+	IMG_UINT32				ui32UID;
+	IMG_UINT32				ui32LastSyncRequesterID;
+	DLLIST_NODE				sNode;
+	/* PDump only data */
+	IMG_BOOL				bSWOperation;
+	IMG_BOOL				bSWOpStartedInCaptRange;
+	IMG_UINT32				ui32LastHWUpdate;
+	IMG_BOOL				bPDumped;
+	POS_LOCK				hLock;
+	IMG_CHAR				szClassName[SYNC_MAX_CLASS_NAME_LEN];
+};
+
+struct _SERVER_SYNC_EXPORT_
+{
+	SERVER_SYNC_PRIMITIVE *psSync;
+};
+
+struct _SERVER_OP_COOKIE_
+{
+	IMG_BOOL				bActive;
+	/*
+		Client syncblock(s) info.
+		If this changes update the calculation of ui32BlockAllocSize
+	*/
+	IMG_UINT32				ui32SyncBlockCount;
+	SYNC_PRIMITIVE_BLOCK	**papsSyncPrimBlock;
+
+	/*
+		Client sync(s) info.
+		If this changes update the calculation of ui32ClientAllocSize
+	*/
+	IMG_UINT32				ui32ClientSyncCount;
+	IMG_UINT32				*paui32SyncBlockIndex;
+	IMG_UINT32				*paui32Index;
+	IMG_UINT32				*paui32Flags;
+	IMG_UINT32				*paui32FenceValue;
+	IMG_UINT32				*paui32UpdateValue;
+
+	/*
+		Server sync(s) info
+		If this changes update the calculation of ui32ServerAllocSize
+	*/
+	IMG_UINT32				ui32ServerSyncCount;
+	SERVER_SYNC_PRIMITIVE	**papsServerSync;
+	IMG_UINT32				*paui32ServerFenceValue;
+	IMG_UINT32				*paui32ServerUpdateValue;
+
+};
+
+struct _SYNC_CONNECTION_DATA_
+{
+	DLLIST_NODE	sListHead;
+	IMG_UINT32	ui32RefCount;
+	POS_LOCK	hLock;
+};
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+#define DECREMENT_WITH_WRAP(value, sz) ((value) ? ((value) - 1) : ((sz) - 1))
+
+/* this is the max number of syncs we will search or dump
+ * at any time.
+ */
+#define SYNC_RECORD_LIMIT 20000
+
+enum SYNC_RECORD_TYPE
+{
+	SYNC_RECORD_TYPE_UNKNOWN = 0,
+	SYNC_RECORD_TYPE_CLIENT,
+	SYNC_RECORD_TYPE_SERVER,
+};
+
+struct SYNC_RECORD
+{
+	PVRSRV_DEVICE_NODE		*psDevNode;
+	SYNC_PRIMITIVE_BLOCK	*psServerSyncPrimBlock;	/*!< handle to _SYNC_PRIMITIVE_BLOCK_ */
+	IMG_UINT32				ui32SyncOffset; 		/*!< offset to sync in block */
+	IMG_UINT32				ui32FwBlockAddr;
+	IMG_PID					uiPID;
+	IMG_UINT64				ui64OSTime;
+	enum SYNC_RECORD_TYPE	eRecordType;
+	DLLIST_NODE				sNode;
+	IMG_CHAR				szClassName[SYNC_MAX_CLASS_NAME_LEN];
+};
+#endif /* #if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+static IMG_UINT32 g_ServerSyncUID = 0;
+
+#define SYNC_REQUESTOR_UNKNOWN 0
+static IMG_UINT32 g_ui32NextSyncRequestorID = 1;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+static POS_LOCK ghServerSyncLock = NULL;
+#endif
+
+#if defined(SYNC_DEBUG) || defined(REFCOUNT_DEBUG)
+#define SYNC_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define SYNC_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+#if defined(SYNC_DEBUG)
+#define SYNC_UPDATES_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define SYNC_UPDATES_PRINT(fmt, ...)
+#endif
+
+/*!
+*****************************************************************************
+ @Function      : SyncPrimitiveBlockToFWAddr
+
+ @Description   : Given a pointer to a sync primitive block and an offset,
+                  returns the firmware address of the sync.
+
+ @Input           psSyncPrimBlock : Sync primitive block which contains the sync
+ @Input           ui32Offset      : Offset of sync within the sync primitive block
+ @Output          psAddrOut       : Absolute FW address of the sync is written out through
+                                    this pointer
+ @Return :        PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+                  parameters are invalid.
+*****************************************************************************/
+
+PVRSRV_ERROR
+SyncPrimitiveBlockToFWAddr(SYNC_PRIMITIVE_BLOCK *psSyncPrimBlock,
+							IMG_UINT32 ui32Offset,
+						PRGXFWIF_UFO_ADDR *psAddrOut)
+{
+	/* check offset is legal */
+	if((ui32Offset >= psSyncPrimBlock->ui32BlockSize) ||
+						(ui32Offset % sizeof(IMG_UINT32)))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncPrimitiveBlockToFWAddr: parameters check failed"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psAddrOut->ui32Addr = psSyncPrimBlock->uiFWAddr.ui32Addr + ui32Offset;
+	return PVRSRV_OK;
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListGrow
+
+ @Description   : Grow the SYNC_ADDR_LIST so it can accommodate the given
+                  number of syncs, up to a maximum of PVRSRV_MAX_SYNC_PRIMS.
+
+ @Input           psList       : The SYNC_ADDR_LIST to grow
+ @Input           ui32NumSyncs : The number of sync addresses to be able to hold
+ @Return :        PVRSRV_OK on success
+*****************************************************************************/
+
+static PVRSRV_ERROR SyncAddrListGrow(SYNC_ADDR_LIST *psList, IMG_UINT32 ui32NumSyncs)
+{
+	if (ui32NumSyncs > PVRSRV_MAX_SYNC_PRIMS)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: ui32NumSyncs=%u > PVRSRV_MAX_SYNC_PRIMS=%u", __FUNCTION__, ui32NumSyncs, PVRSRV_MAX_SYNC_PRIMS));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s:     Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __FUNCTION__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs));
+#endif
+	if(ui32NumSyncs > psList->ui32NumSyncs)
+	{
+		if(psList->pasFWAddrs == NULL)
+		{
+			psList->pasFWAddrs = OSAllocMem(sizeof(PRGXFWIF_UFO_ADDR) * PVRSRV_MAX_SYNC_PRIMS);
+			if(psList->pasFWAddrs == NULL)
+			{
+				return PVRSRV_ERROR_OUT_OF_MEMORY;
+			}
+		}
+
+		psList->ui32NumSyncs = ui32NumSyncs;
+	}
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s:     Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __FUNCTION__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs));
+#endif
+	return PVRSRV_OK;
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListInit
+
+ @Description   : Initialise a SYNC_ADDR_LIST structure ready for use
+
+ @Input           psList        : The SYNC_ADDR_LIST structure to initialise
+ @Return        : None
+*****************************************************************************/
+
+void
+SyncAddrListInit(SYNC_ADDR_LIST *psList)
+{
+	psList->ui32NumSyncs = 0;
+	psList->pasFWAddrs   = NULL;
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListDeinit
+
+ @Description   : Frees any resources associated with the given SYNC_ADDR_LIST
+
+ @Input           psList        : The SYNC_ADDR_LIST structure to deinitialise
+ @Return        : None
+*****************************************************************************/
+
+void
+SyncAddrListDeinit(SYNC_ADDR_LIST *psList)
+{
+	if(psList->pasFWAddrs != NULL)
+	{
+		OSFreeMem(psList->pasFWAddrs);
+	}
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListPopulate
+
+ @Description   : Populate the given SYNC_ADDR_LIST with the FW addresses
+                  of the syncs given by the SYNC_PRIMITIVE_BLOCKs and sync offsets
+
+ @Input           ui32NumSyncs    : The number of syncs being passed in
+ @Input           apsSyncPrimBlock: Array of pointers to SYNC_PRIMITIVE_BLOCK structures
+                                    in which the syncs are based
+ @Input           paui32SyncOffset: Array of offsets within each of the sync primitive blocks
+                                    where the syncs are located
+ @Return :        PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+                  parameters are invalid.
+*****************************************************************************/
+
+PVRSRV_ERROR
+SyncAddrListPopulate(SYNC_ADDR_LIST *psList,
+						IMG_UINT32 ui32NumSyncs,
+						SYNC_PRIMITIVE_BLOCK **apsSyncPrimBlock,
+						IMG_UINT32 *paui32SyncOffset)
+{
+	IMG_UINT32 i;
+	PVRSRV_ERROR eError;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __FUNCTION__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs));
+#endif
+	if(ui32NumSyncs > psList->ui32NumSyncs)
+	{
+		eError = SyncAddrListGrow(psList, ui32NumSyncs);
+
+		if(eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	psList->ui32NumSyncs = ui32NumSyncs;
+
+	for(i = 0; i < ui32NumSyncs; i++)
+	{
+		eError = SyncPrimitiveBlockToFWAddr(apsSyncPrimBlock[i],
+								paui32SyncOffset[i],
+								&psList->pasFWAddrs[i]);
+
+		if(eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __FUNCTION__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs));
+#endif
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+SyncAddrListAppendSyncPrim(SYNC_ADDR_LIST          *psList,
+						   PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 ui32FwAddr = 0;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d)", __FUNCTION__, (void*)psList, psList->ui32NumSyncs));
+#endif
+	/* Ensure there's room in psList for the additional sync prim update */
+	eError = SyncAddrListGrow(psList, psList->ui32NumSyncs + 1);
+	if(eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	SyncPrimGetFirmwareAddr(psSyncPrim, &ui32FwAddr);
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s: Appending sync prim <%p> UFO addr (0x%x) to psList[->pasFWAddrss[%d]", __FUNCTION__, (void*)psSyncPrim, ui32FwAddr, psList->ui32NumSyncs-1));
+#endif
+	psList->pasFWAddrs[psList->ui32NumSyncs-1].ui32Addr = ui32FwAddr;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	{
+		IMG_UINT32 iii;
+
+		PVR_DPF((PVR_DBG_ERROR, "%s: psList->ui32NumSyncs=%d", __FUNCTION__, psList->ui32NumSyncs));
+		for (iii=0; iii<psList->ui32NumSyncs; iii++)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: psList->pasFWAddrs[%d].ui32Addr=0x%x", __FUNCTION__, iii, psList->pasFWAddrs[iii].ui32Addr));
+		}
+	}
+#endif
+e0:
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d", __FUNCTION__, (void*)psList, psList->ui32NumSyncs));
+#endif
+	return eError;
+}
+
+
+static PVRSRV_ERROR
+_AppendCheckpoints(SYNC_ADDR_LIST *psList,
+				   IMG_UINT32 ui32NumCheckpoints,
+				   PSYNC_CHECKPOINT *apsSyncCheckpoint,
+				   IMG_BOOL bDeRefCheckpoints)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 ui32SyncCheckpointIndex;
+	IMG_UINT32 ui32RollbackSize = psList->ui32NumSyncs;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __FUNCTION__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints));
+#endif
+	/* Ensure there's room in psList for the sync checkpoints */
+	eError = SyncAddrListGrow(psList, psList->ui32NumSyncs + ui32NumCheckpoints);
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: * * * * ERROR * * * * Trying to SyncAddrListGrow(psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __FUNCTION__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints));
+		goto e0;
+	}
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s: (ui32NumCheckpoints=%d) (psList->ui32NumSyncs is now %d) array already contains %d FWAddrs:", __FUNCTION__, ui32NumCheckpoints, psList->ui32NumSyncs, ui32RollbackSize));
+	if (ui32RollbackSize > 0)
+	{
+		{
+			IMG_UINT32 kk;
+			for (kk=0; kk<ui32RollbackSize; kk++)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s:    <%p>psList->pasFWAddrs[%d].ui32Addr = %u(0x%x)", __FUNCTION__,
+						 (void*)&psList->pasFWAddrs[kk], kk,
+						 psList->pasFWAddrs[kk].ui32Addr, psList->pasFWAddrs[kk].ui32Addr));
+			}
+		}
+	}
+	PVR_DPF((PVR_DBG_ERROR, "%s: apsSyncCheckpoint=<%p>, apsSyncCheckpoint[0] = <%p>", __FUNCTION__, (void*)apsSyncCheckpoint, (void*)apsSyncCheckpoint[0]));
+#endif
+	for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex<ui32NumCheckpoints; ui32SyncCheckpointIndex++)
+	{
+		psList->pasFWAddrs[ui32RollbackSize + ui32SyncCheckpointIndex].ui32Addr = SyncCheckpointGetFirmwareAddrFromList(apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+		PVR_DPF((PVR_DBG_ERROR, "%s:  SyncCheckpointCCBEnqueued(<%p>)", __FUNCTION__, (void*)apsSyncCheckpoint[ui32SyncCheckpointIndex]));
+		PVR_DPF((PVR_DBG_ERROR, "%s:                           ID:%d", __FUNCTION__, SyncCheckpointGetId((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex])));
+#endif
+		SyncCheckpointCCBEnqueued((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+		if (bDeRefCheckpoints)
+		{
+			/* Drop the reference that was taken internally by the OS implementation of resolve_fence() */
+			SyncCheckpointDropRef((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+		}
+	}
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	if (psList->ui32NumSyncs > 0)
+	{
+		IMG_UINT32 kk;
+		for (kk=0; kk<psList->ui32NumSyncs; kk++)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s:    <%p>psList->pasFWAddrs[%d].ui32Addr = %u(0x%x)", __FUNCTION__,
+			         (void*)&psList->pasFWAddrs[kk], kk,
+			         psList->pasFWAddrs[kk].ui32Addr, psList->pasFWAddrs[kk].ui32Addr));
+		}
+	}
+#endif
+	return eError;
+
+e0:
+	for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex<ui32NumCheckpoints; ui32SyncCheckpointIndex++)
+	{
+		if (bDeRefCheckpoints)
+		{
+			/* Drop the reference that was taken internally by the OS implementation of resolve_fence() */
+			SyncCheckpointDropRef((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+		}
+	}
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __FUNCTION__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints));
+#endif
+	return eError;
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListAppendCheckpoints
+
+ @Description   : Append the FW addresses of the sync checkpoints given in
+                  the PSYNC_CHECKPOINTs array to the given SYNC_ADDR_LIST
+
+ @Input           ui32NumSyncCheckpoints : The number of sync checkpoints
+                                           being passed in
+ @Input           apsSyncCheckpoint : Array of PSYNC_CHECKPOINTs whose details
+                                      are to be appended to the SYNC_ADDR_LIST
+ @Return :        PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+                  parameters are invalid.
+*****************************************************************************/
+PVRSRV_ERROR
+SyncAddrListAppendCheckpoints(SYNC_ADDR_LIST *psList,
+								IMG_UINT32 ui32NumCheckpoints,
+								PSYNC_CHECKPOINT *apsSyncCheckpoint)
+{
+	return _AppendCheckpoints(psList, ui32NumCheckpoints, apsSyncCheckpoint, IMG_FALSE);
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListAppendAndDeRefCheckpoints
+
+ @Description   : Append the FW addresses of the sync checkpoints given in
+                  the PSYNC_CHECKPOINTs array to the given SYNC_ADDR_LIST.
+                  A reference is dropped for each of the checkpoints.
+
+ @Input           ui32NumSyncCheckpoints : The number of sync checkpoints
+                                           being passed in
+ @Input           apsSyncCheckpoint : Array of PSYNC_CHECKPOINTs whose details
+                                      are to be appended to the SYNC_ADDR_LIST
+ @Return :        PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+                  parameters are invalid.
+*****************************************************************************/
+PVRSRV_ERROR
+SyncAddrListAppendAndDeRefCheckpoints(SYNC_ADDR_LIST *psList,
+									  IMG_UINT32 ui32NumCheckpoints,
+									  PSYNC_CHECKPOINT *apsSyncCheckpoint)
+{
+	return _AppendCheckpoints(psList, ui32NumCheckpoints, apsSyncCheckpoint, IMG_TRUE);
+}
+
+void
+SyncAddrListDeRefCheckpoints(IMG_UINT32 ui32NumCheckpoints,
+							 PSYNC_CHECKPOINT *apsSyncCheckpoint)
+{
+	IMG_UINT32 ui32SyncCheckpointIndex;
+
+	for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex<ui32NumCheckpoints; ui32SyncCheckpointIndex++)
+	{
+		/* Drop the reference that was taken internally by the OS implementation of resolve_fence() */
+		SyncCheckpointDropRef((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+	}
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListRollbackCheckpoints
+
+ @Description   : Rollback the enqueued count of each sync checkpoint in
+                  the given SYNC_ADDR_LIST. This needs to be done in the
+                  event of the kick call failing, so that the reference
+                  taken on each sync checkpoint on the firmware's behalf
+                  is dropped.
+
+ @Input           psList        : The SYNC_ADDR_LIST structure containing
+                                  sync checkpoints to be rolled back
+
+ @Return :        PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+                  parameters are invalid.
+*****************************************************************************/
+
+PVRSRV_ERROR
+SyncAddrListRollbackCheckpoints(PVRSRV_DEVICE_NODE *psDevNode, SYNC_ADDR_LIST *psList)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 ui32SyncIndex;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+	PVR_DPF((PVR_DBG_ERROR, "%s: called (psList=<%p>)", __FUNCTION__, (void*)psList));
+#endif
+	if (psList)
+	{
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+		PVR_DPF((PVR_DBG_ERROR, "%s: psList->ui32NumSyncs=%d", __FUNCTION__, psList->ui32NumSyncs));
+#endif
+		for (ui32SyncIndex=0; ui32SyncIndex<psList->ui32NumSyncs; ui32SyncIndex++)
+		{
+			if (psList->pasFWAddrs[ui32SyncIndex].ui32Addr & 0x1)
+			{
+				SyncCheckpointRollbackFromUFO(psDevNode, psList->pasFWAddrs[ui32SyncIndex].ui32Addr);
+			}
+		}
+	}
+	return eError;
+}
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+PVRSRV_ERROR
+PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection,
+					  PVRSRV_DEVICE_NODE *psDevNode,
+					  SYNC_RECORD_HANDLE *phRecord,
+					  SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock,
+					  IMG_UINT32 ui32FwBlockAddr,
+					  IMG_UINT32 ui32SyncOffset,
+					  IMG_BOOL bServerSync,
+					  IMG_UINT32 ui32ClassNameSize,
+					  const IMG_CHAR *pszClassName)
+{
+	struct SYNC_RECORD * psSyncRec;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	RGX_HWPERF_HOST_ALLOC(psDevNode, SYNC,
+	                      ui32FwBlockAddr + ui32SyncOffset,
+	                      pszClassName,
+	                      ui32ClassNameSize);
+
+	if (!phRecord)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*phRecord = NULL;
+
+	psSyncRec = OSAllocMem(sizeof(*psSyncRec));
+	if (!psSyncRec)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	psSyncRec->psDevNode = psDevNode;
+	psSyncRec->psServerSyncPrimBlock = hServerSyncPrimBlock;
+	psSyncRec->ui32SyncOffset = ui32SyncOffset;
+	psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr;
+	psSyncRec->ui64OSTime = OSClockns64();
+	psSyncRec->uiPID = OSGetCurrentProcessID();
+	psSyncRec->eRecordType = bServerSync? SYNC_RECORD_TYPE_SERVER: SYNC_RECORD_TYPE_CLIENT;
+
+	if(pszClassName)
+	{
+		if (ui32ClassNameSize >= SYNC_MAX_CLASS_NAME_LEN)
+			ui32ClassNameSize = SYNC_MAX_CLASS_NAME_LEN - 1;
+		/* Copy over the class name annotation */
+		OSStringNCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize);
+		psSyncRec->szClassName[ui32ClassNameSize] = 0;
+	}
+	else
+	{
+		/* No class name annotation */
+		psSyncRec->szClassName[0] = 0;
+	}
+
+	OSLockAcquire(psDevNode->hSyncServerRecordLock);
+	if(psDevNode->ui32SyncServerRecordCount < SYNC_RECORD_LIMIT)
+	{
+		dllist_add_to_head(&psDevNode->sSyncServerRecordList, &psSyncRec->sNode);
+		psDevNode->ui32SyncServerRecordCount++;
+
+		if(psDevNode->ui32SyncServerRecordCount > psDevNode->ui32SyncServerRecordCountHighWatermark)
+		{
+			psDevNode->ui32SyncServerRecordCountHighWatermark = psDevNode->ui32SyncServerRecordCount;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync record \"%s\". %u records already exist.",
+											__func__,
+											pszClassName,
+											psDevNode->ui32SyncServerRecordCount));
+		OSFreeMem(psSyncRec);
+		psSyncRec = NULL;
+		eError = PVRSRV_ERROR_TOOMANYBUFFERS;
+	}
+	OSLockRelease(psDevNode->hSyncServerRecordLock);
+
+	*phRecord = (SYNC_RECORD_HANDLE)psSyncRec;
+
+fail_alloc:
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncRecordRemoveByHandleKM(
+			SYNC_RECORD_HANDLE hRecord)
+{
+	struct SYNC_RECORD **ppFreedSync;
+	struct SYNC_RECORD *pSync = (struct SYNC_RECORD*)hRecord;
+	PVRSRV_DEVICE_NODE *psDevNode;
+
+	if (!hRecord)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevNode = pSync->psDevNode;
+
+	OSLockAcquire(psDevNode->hSyncServerRecordLock);
+
+	RGX_HWPERF_HOST_FREE(psDevNode, SYNC, pSync->ui32FwBlockAddr + pSync->ui32SyncOffset);
+
+	dllist_remove_node(&pSync->sNode);
+
+	if (psDevNode->uiSyncServerRecordFreeIdx >= PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: freed sync record index out of range",
+				 __func__));
+		psDevNode->uiSyncServerRecordFreeIdx = 0;
+	}
+	ppFreedSync = &psDevNode->apsSyncServerRecordsFreed[psDevNode->uiSyncServerRecordFreeIdx];
+	psDevNode->uiSyncServerRecordFreeIdx =
+		(psDevNode->uiSyncServerRecordFreeIdx + 1) % PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN;
+
+	if (*ppFreedSync)
+	{
+		OSFreeMem(*ppFreedSync);
+	}
+	pSync->psServerSyncPrimBlock = NULL;
+	pSync->ui64OSTime = OSClockns64();
+	*ppFreedSync = pSync;
+
+	psDevNode->ui32SyncServerRecordCount--;
+
+	OSLockRelease(psDevNode->hSyncServerRecordLock);
+
+	return PVRSRV_OK;
+}
+#else
+PVRSRV_ERROR
+PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection,
+					  PVRSRV_DEVICE_NODE *psDevNode,
+					  SYNC_RECORD_HANDLE *phRecord,
+					  SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock,
+					  IMG_UINT32 ui32FwBlockAddr,
+					  IMG_UINT32 ui32SyncOffset,
+					  IMG_BOOL bServerSync,
+					  IMG_UINT32 ui32ClassNameSize,
+					  const IMG_CHAR *pszClassName)
+{
+	*phRecord = NULL;
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(phRecord);
+	PVR_UNREFERENCED_PARAMETER(hServerSyncPrimBlock);
+	PVR_UNREFERENCED_PARAMETER(ui32FwBlockAddr);
+	PVR_UNREFERENCED_PARAMETER(ui32SyncOffset);
+	PVR_UNREFERENCED_PARAMETER(bServerSync);
+	PVR_UNREFERENCED_PARAMETER(ui32ClassNameSize);
+	PVR_UNREFERENCED_PARAMETER(pszClassName);
+	return PVRSRV_OK;
+}
+PVRSRV_ERROR
+PVRSRVSyncRecordRemoveByHandleKM(
+			SYNC_RECORD_HANDLE hRecord)
+{
+	PVR_UNREFERENCED_PARAMETER(hRecord);
+	return PVRSRV_OK;
+}
+#endif /* #if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+PVRSRV_ERROR
+PVRSRVSyncAllocEventKM(CONNECTION_DATA *psConnection,
+			PVRSRV_DEVICE_NODE *psDevNode,
+			IMG_BOOL bServerSync,
+			IMG_UINT32 ui32FWAddr,
+			IMG_UINT32 ui32ClassNameSize,
+			const IMG_CHAR *pszClassName)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	RGX_HWPERF_HOST_ALLOC(psDevNode, SYNC, ui32FWAddr, pszClassName, ui32ClassNameSize);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncFreeEventKM(CONNECTION_DATA *psConnection,
+			PVRSRV_DEVICE_NODE *psDevNode,
+			IMG_UINT32 ui32FWAddr)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	RGX_HWPERF_HOST_FREE(psDevNode, SYNC, ui32FWAddr);
+
+	return PVRSRV_OK;
+}
+
+static
+void _SyncConnectionRef(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+	IMG_UINT32 ui32RefCount;
+
+	OSLockAcquire(psSyncConnectionData->hLock);
+	ui32RefCount = ++psSyncConnectionData->ui32RefCount;
+	OSLockRelease(psSyncConnectionData->hLock);	
+
+	SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d",
+						__FUNCTION__, psSyncConnectionData, ui32RefCount);
+}
+
+static
+void _SyncConnectionUnref(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+	IMG_UINT32 ui32RefCount;
+
+	OSLockAcquire(psSyncConnectionData->hLock);
+	ui32RefCount = --psSyncConnectionData->ui32RefCount;
+	OSLockRelease(psSyncConnectionData->hLock);
+
+	if (ui32RefCount == 0)
+	{
+		SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d",
+							__FUNCTION__, psSyncConnectionData, ui32RefCount);
+
+		PVR_ASSERT(dllist_is_empty(&psSyncConnectionData->sListHead));
+		OSLockDestroy(psSyncConnectionData->hLock);
+		OSFreeMem(psSyncConnectionData);
+	}
+	else
+	{
+		SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d",
+							__FUNCTION__, psSyncConnectionData, ui32RefCount);
+	}
+}
+
+static
+void _SyncConnectionAddBlock(CONNECTION_DATA *psConnection, SYNC_PRIMITIVE_BLOCK *psBlock)
+{
+	if (psConnection)
+	{
+		SYNC_CONNECTION_DATA *psSyncConnectionData = psConnection->psSyncConnectionData;
+
+		/*
+			Make sure the connection doesn't go away. It doesn't matter that we will release
+			the lock between as the refcount and list don't have to be atomic w.r.t. to each other
+		*/
+		_SyncConnectionRef(psSyncConnectionData);
+	
+		OSLockAcquire(psSyncConnectionData->hLock);
+		if (psConnection != NULL)
+		{
+			dllist_add_to_head(&psSyncConnectionData->sListHead, &psBlock->sConnectionNode);
+		}
+		OSLockRelease(psSyncConnectionData->hLock);
+		psBlock->psSyncConnectionData = psSyncConnectionData;
+	}
+	else
+	{
+		psBlock->psSyncConnectionData = NULL;
+	}
+}
+
+static
+void _SyncConnectionRemoveBlock(SYNC_PRIMITIVE_BLOCK *psBlock)
+{
+	SYNC_CONNECTION_DATA *psSyncConnectionData = psBlock->psSyncConnectionData;
+
+	if (psBlock->psSyncConnectionData)
+	{
+		OSLockAcquire(psSyncConnectionData->hLock);
+		dllist_remove_node(&psBlock->sConnectionNode);
+		OSLockRelease(psSyncConnectionData->hLock);
+
+		_SyncConnectionUnref(psBlock->psSyncConnectionData);
+	}
+}
+
+static
+void _SyncPrimitiveBlockRef(SYNC_PRIMITIVE_BLOCK *psSyncBlk)
+{
+	IMG_UINT32 ui32RefCount;
+
+	OSLockAcquire(psSyncBlk->hLock);
+	ui32RefCount = ++psSyncBlk->ui32RefCount;
+	OSLockRelease(psSyncBlk->hLock);
+
+	SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d",
+						__FUNCTION__, psSyncBlk, ui32RefCount);
+}
+
+static
+void _SyncPrimitiveBlockUnref(SYNC_PRIMITIVE_BLOCK *psSyncBlk)
+{
+	IMG_UINT32 ui32RefCount;
+
+	OSLockAcquire(psSyncBlk->hLock);
+	ui32RefCount = --psSyncBlk->ui32RefCount;
+	OSLockRelease(psSyncBlk->hLock);
+
+	if (ui32RefCount == 0)
+	{
+		PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode;
+
+		SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d (remove)",
+							__FUNCTION__, psSyncBlk, ui32RefCount);
+
+		_SyncConnectionRemoveBlock(psSyncBlk);
+		OSLockDestroy(psSyncBlk->hLock);
+		DevmemReleaseCpuVirtAddr(psSyncBlk->psMemDesc);
+		psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->psMemDesc);
+		OSFreeMem(psSyncBlk);
+	}
+	else
+	{
+		SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d",
+							__FUNCTION__, psSyncBlk, ui32RefCount);
+	}
+}
+
+PVRSRV_ERROR
+PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection,
+                                PVRSRV_DEVICE_NODE * psDevNode,
+								SYNC_PRIMITIVE_BLOCK **ppsSyncBlk,
+								IMG_UINT32 *puiSyncPrimVAddr,
+								IMG_UINT32 *puiSyncPrimBlockSize,
+								PMR        **ppsSyncPMR)
+{
+	SYNC_PRIMITIVE_BLOCK *psNewSyncBlk;
+	PVRSRV_ERROR eError;
+
+	psNewSyncBlk = OSAllocMem(sizeof(SYNC_PRIMITIVE_BLOCK));
+	if (psNewSyncBlk == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+	psNewSyncBlk->psDevNode = psDevNode;
+
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Allocate UFO block");
+
+	eError = psDevNode->pfnAllocUFOBlock(psDevNode,
+										 &psNewSyncBlk->psMemDesc,
+										 &psNewSyncBlk->uiFWAddr.ui32Addr,
+										 &psNewSyncBlk->ui32BlockSize);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	*puiSyncPrimVAddr = psNewSyncBlk->uiFWAddr.ui32Addr;
+
+	eError = DevmemAcquireCpuVirtAddr(psNewSyncBlk->psMemDesc,
+									  (void **) &psNewSyncBlk->pui32LinAddr);
+	if (eError != PVRSRV_OK)
+	{
+		goto e2;
+	}
+
+	eError = DevmemLocalGetImportHandle(psNewSyncBlk->psMemDesc, (void **) ppsSyncPMR);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e3;
+	}
+
+	eError = OSLockCreate(&psNewSyncBlk->hLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto e3;
+	}
+
+	psNewSyncBlk->ui32RefCount = 1;
+
+	/* If there is a connection pointer then add the new block onto it's list */
+	_SyncConnectionAddBlock(psConnection, psNewSyncBlk);
+
+	*ppsSyncBlk = psNewSyncBlk;
+	*puiSyncPrimBlockSize = psNewSyncBlk->ui32BlockSize;
+
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+						  "Allocated UFO block (FirmwareVAddr = 0x%08x)",
+						  *puiSyncPrimVAddr);
+
+	return PVRSRV_OK;
+
+e3:
+	DevmemReleaseCpuVirtAddr(psNewSyncBlk->psMemDesc);
+e2:
+	psDevNode->pfnFreeUFOBlock(psDevNode, psNewSyncBlk->psMemDesc);
+e1:
+	OSFreeMem(psNewSyncBlk);
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk)
+{
+	_SyncPrimitiveBlockUnref(psSyncBlk);
+
+	return PVRSRV_OK;
+}
+
+static INLINE IMG_BOOL _CheckSyncIndex(SYNC_PRIMITIVE_BLOCK *psSyncBlk,
+							IMG_UINT32 ui32Index)
+{
+	return ((ui32Index * sizeof(IMG_UINT32)) < psSyncBlk->ui32BlockSize);
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index,
+					IMG_UINT32 ui32Value)
+{
+	if(_CheckSyncIndex(psSyncBlk, ui32Index))
+	{
+		psSyncBlk->pui32LinAddr[ui32Index] = ui32Value;
+		return PVRSRV_OK;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncPrimSetKM: Index %u out of range for "
+							"0x%08X byte sync block (value 0x%08X)",
+							ui32Index,
+							psSyncBlk->ui32BlockSize,
+							ui32Value));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncPrimSetKM(SERVER_SYNC_PRIMITIVE *psServerSync, IMG_UINT32 ui32Value)
+{
+	*psServerSync->psSync->pui32LinAddr = ui32Value;
+
+	return PVRSRV_OK;
+}
+
+static void
+_ServerSyncRef(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	IMG_UINT32 ui32RefCount;
+
+	OSLockAcquire(psSync->hLock);
+	ui32RefCount = ++psSync->ui32RefCount;
+	OSLockRelease(psSync->hLock);
+
+	SYNC_REFCOUNT_PRINT("%s: Server sync %p, refcount = %d",
+						__FUNCTION__, psSync, ui32RefCount);
+}
+
+static void
+_ServerSyncUnref(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	PVRSRV_DEVICE_NODE *psDevNode = psSync->psDevNode;
+	IMG_UINT32 ui32RefCount;
+
+	OSLockAcquire(psSync->hLock);
+	ui32RefCount = --psSync->ui32RefCount;
+	OSLockRelease(psSync->hLock);
+
+	if (ui32RefCount == 0)
+	{
+		IMG_UINT32 ui32SyncAddr;
+
+		(void)SyncPrimGetFirmwareAddr(psSync->psSync, &ui32SyncAddr);
+		SYNC_REFCOUNT_PRINT("%s: Server sync %p, refcount = %d",
+			__FUNCTION__, psSync, ui32RefCount);
+		HTBLOGK(HTB_SF_SYNC_SERVER_UNREF, ui32SyncAddr);
+
+		/* Remove the sync from the global list */
+		OSLockAcquire(psDevNode->hSyncServerListLock);
+		dllist_remove_node(&psSync->sNode);
+		OSLockRelease(psDevNode->hSyncServerListLock);
+
+		OSLockDestroy(psSync->hLock);
+		/* safe to ignore return value as an error indicates
+		 * the sync is either already freed or not a sync
+		 */
+		(void)SyncPrimFree(psSync->psSync);
+		OSFreeMem(psSync);
+	}
+	else
+	{
+		SYNC_REFCOUNT_PRINT("%s: Server sync %p, refcount = %d",
+							__FUNCTION__, psSync, ui32RefCount);
+	}
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncAllocKM(CONNECTION_DATA * psConnection,
+			PVRSRV_DEVICE_NODE *psDevNode,
+			SERVER_SYNC_PRIMITIVE **ppsSync,
+			IMG_UINT32 *pui32SyncPrimVAddr,
+			IMG_UINT32 ui32ClassNameSize,
+			const IMG_CHAR *pszClassName)
+{
+	SERVER_SYNC_PRIMITIVE *psNewSync;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	
+	psNewSync = OSAllocMem(sizeof(SERVER_SYNC_PRIMITIVE));
+	if (psNewSync == NULL)
+	{
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* szClassName must be setup now and used for the SyncPrimAlloc call because
+	 * pszClassName is allocated in the bridge code is not NULL terminated 
+	 */
+	if(pszClassName)
+	{
+		if (ui32ClassNameSize >= SYNC_MAX_CLASS_NAME_LEN)
+			ui32ClassNameSize = SYNC_MAX_CLASS_NAME_LEN - 1;
+		/* Copy over the class name annotation */
+		OSStringNCopy(psNewSync->szClassName, pszClassName, ui32ClassNameSize);
+		psNewSync->szClassName[ui32ClassNameSize] = 0;
+	}
+	else
+	{
+		/* No class name annotation */
+		psNewSync->szClassName[0] = 0;
+	}
+
+	eError = SyncPrimAllocForServerSync(psDevNode->hSyncPrimContext,
+						   &psNewSync->psSync,
+						   psNewSync->szClassName);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_sync_alloc;
+	}
+
+	eError = OSLockCreate(&psNewSync->hLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_lock_create;
+	}
+
+	eError = SyncPrimSet(psNewSync->psSync, 0);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_sync_op;
+	}
+
+	psNewSync->psDevNode = psDevNode;
+	psNewSync->ui32NextOp = 0;
+	psNewSync->ui32RefCount = 1;
+	psNewSync->ui32UID = g_ServerSyncUID++;
+	psNewSync->ui32LastSyncRequesterID = SYNC_REQUESTOR_UNKNOWN;
+	psNewSync->bSWOperation = IMG_FALSE;
+	psNewSync->ui32LastHWUpdate = 0x0bad592c;
+	psNewSync->bPDumped = IMG_FALSE;
+
+	eError = SyncPrimGetFirmwareAddr(psNewSync->psSync, pui32SyncPrimVAddr);
+	if (PVRSRV_OK != eError)
+	{
+		goto fail_sync_op;
+	}
+
+	/* Add the sync to the global list */
+	OSLockAcquire(psDevNode->hSyncServerListLock);
+	dllist_add_to_head(&psDevNode->sSyncServerSyncsList, &psNewSync->sNode);
+	OSLockRelease(psDevNode->hSyncServerListLock);
+
+	HTBLOGK(HTB_SF_SYNC_SERVER_ALLOC, *pui32SyncPrimVAddr);
+	SYNC_UPDATES_PRINT("%s: sync: %p, fwaddr: %8.8X", __FUNCTION__, psNewSync, *pui32SyncPrimVAddr);
+	*ppsSync = psNewSync;
+	return PVRSRV_OK;
+
+fail_sync_op:
+	OSLockDestroy(psNewSync->hLock);
+
+fail_lock_create:
+	SyncPrimFree(psNewSync->psSync);
+
+fail_sync_alloc:
+	OSFreeMem(psNewSync);
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncFreeKM(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	_ServerSyncUnref(psSync);
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncGetStatusKM(IMG_UINT32 ui32SyncCount,
+			SERVER_SYNC_PRIMITIVE **papsSyncs,
+			IMG_UINT32 *pui32UID,
+			IMG_UINT32 *pui32FWAddr,
+			IMG_UINT32 *pui32CurrentOp,
+			IMG_UINT32 *pui32NextOp)
+{
+	IMG_UINT32 i, ui32SyncAddr;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_ERROR eReturn = PVRSRV_OK;
+
+	for (i=0;i<ui32SyncCount;i++)
+	{
+		PVRSRV_CLIENT_SYNC_PRIM *psClientSync = papsSyncs[i]->psSync;
+
+		eError = SyncPrimGetFirmwareAddr(psClientSync, &ui32SyncAddr);
+		if (PVRSRV_OK != eError)
+		{
+			pui32FWAddr[i] = 0;
+			pui32CurrentOp[i] = 0;
+			eReturn = eError;
+		}
+		else
+		{
+			pui32FWAddr[i] = ui32SyncAddr;
+			pui32CurrentOp[i] = *psClientSync->pui32LinAddr;
+		}
+		pui32NextOp[i] = papsSyncs[i]->ui32NextOp;
+		pui32UID[i] = papsSyncs[i]->ui32UID;
+	}
+	return eReturn;
+}
+
+#if defined(SUPPORT_INSECURE_EXPORT) || defined(SUPPORT_SECURE_EXPORT)
+static PVRSRV_ERROR
+_PVRSRVSyncPrimServerExportKM(SERVER_SYNC_PRIMITIVE *psSync,
+							  SERVER_SYNC_EXPORT **ppsExport)
+{
+	SERVER_SYNC_EXPORT *psNewExport;
+	PVRSRV_ERROR eError;
+
+	psNewExport = OSAllocMem(sizeof(SERVER_SYNC_EXPORT));
+	if (!psNewExport)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	_ServerSyncRef(psSync);
+
+	psNewExport->psSync = psSync;
+	*ppsExport = psNewExport;
+
+	return PVRSRV_OK;
+e0:
+	return eError;
+}
+
+static PVRSRV_ERROR
+_PVRSRVSyncPrimServerUnexportKM(SERVER_SYNC_EXPORT *psExport)
+{
+	_ServerSyncUnref(psExport->psSync);
+
+	OSFreeMem(psExport);
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_PVRSRVSyncPrimServerImportKM(PVRSRV_DEVICE_NODE *psDevNode,
+							  SERVER_SYNC_EXPORT *psExport,
+							  SERVER_SYNC_PRIMITIVE **ppsSync,
+							  IMG_UINT32 *pui32SyncPrimVAddr)
+{
+	SERVER_SYNC_PRIMITIVE *psSync = psExport->psSync;
+	PVRSRV_ERROR eError;
+
+	if (psSync->psDevNode != psDevNode)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: server sync invalid for this device\n",
+				 __func__));
+		return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+	}
+
+	_ServerSyncRef(psSync);
+
+	*ppsSync = psSync;
+	eError = SyncPrimGetFirmwareAddr(psSync->psSync,
+			pui32SyncPrimVAddr);
+	return eError;
+}
+#endif /* defined(SUPPORT_INSECURE_EXPORT) || defined(SUPPORT_SECURE_EXPORT) */
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR
+PVRSRVSyncPrimServerExportKM(SERVER_SYNC_PRIMITIVE *psSync,
+				SERVER_SYNC_EXPORT **ppsExport)
+{
+	return _PVRSRVSyncPrimServerExportKM(psSync, ppsExport);
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerUnexportKM(SERVER_SYNC_EXPORT *psExport)
+{
+	return _PVRSRVSyncPrimServerUnexportKM(psExport);
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerImportKM(CONNECTION_DATA *psConnection,
+							 PVRSRV_DEVICE_NODE *psDevNode,
+							 SERVER_SYNC_EXPORT *psExport,
+							 SERVER_SYNC_PRIMITIVE **ppsSync,
+							 IMG_UINT32 *pui32SyncPrimVAddr)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	return _PVRSRVSyncPrimServerImportKM(psDevNode, psExport, ppsSync,
+										 pui32SyncPrimVAddr);
+}
+#endif /* defined(SUPPORT_INSECURE_EXPORT) */
+
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureExportKM(CONNECTION_DATA *psConnection,
+                                   PVRSRV_DEVICE_NODE * psDevNode,
+								   SERVER_SYNC_PRIMITIVE *psSync,
+								   IMG_SECURE_TYPE *phSecure,
+								   SERVER_SYNC_EXPORT **ppsExport,
+								   CONNECTION_DATA **ppsSecureConnection)
+{
+	SERVER_SYNC_EXPORT *psNewExport;
+	PVRSRV_ERROR eError;
+
+	/* Create an export server sync */
+	eError = _PVRSRVSyncPrimServerExportKM(psSync,
+										   &psNewExport);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	/* Transform it into a secure export */
+	eError = OSSecureExport(psConnection,
+							(void *) psNewExport,
+							phSecure,
+							ppsSecureConnection);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	*ppsExport = psNewExport;
+	return PVRSRV_OK;
+e1:
+	_PVRSRVSyncPrimServerUnexportKM(psNewExport);
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureUnexportKM(SERVER_SYNC_EXPORT *psExport)
+{
+	_PVRSRVSyncPrimServerUnexportKM(psExport);
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureImportKM(CONNECTION_DATA *psConnection,
+								   PVRSRV_DEVICE_NODE *psDevNode,
+								   IMG_SECURE_TYPE hSecure,
+								   SERVER_SYNC_PRIMITIVE **ppsSync,
+								   IMG_UINT32 *pui32SyncPrimVAddr)
+{
+	PVRSRV_ERROR eError;
+	SERVER_SYNC_EXPORT *psImport;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	/* Retrieve the data from the secure import */
+	eError = OSSecureImport(hSecure, (void **) &psImport);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	eError = _PVRSRVSyncPrimServerImportKM(psDevNode, psImport, ppsSync,
+										   pui32SyncPrimVAddr);
+e0:
+	return eError;
+}
+#endif /* defined(SUPPORT_SECURE_EXPORT) */
+
+IMG_UINT32 PVRSRVServerSyncRequesterRegisterKM(IMG_UINT32 *pui32SyncRequesterID)
+{
+	*pui32SyncRequesterID = g_ui32NextSyncRequestorID++;
+
+	return PVRSRV_OK;
+}
+
+void PVRSRVServerSyncRequesterUnregisterKM(IMG_UINT32 ui32SyncRequesterID)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32SyncRequesterID);
+}
+
+static void
+_ServerSyncTakeOperation(SERVER_SYNC_PRIMITIVE *psSync,
+						  IMG_BOOL bUpdate,
+						  IMG_UINT32 *pui32FenceValue,
+						  IMG_UINT32 *pui32UpdateValue)
+{
+	IMG_BOOL bInCaptureRange;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	PVR_ASSERT(OSLockIsLocked(ghServerSyncLock));
+#endif
+
+	/* Only advance the pending if an update is required */
+	if (bUpdate)
+	{
+		*pui32FenceValue = psSync->ui32NextOp++;
+	}
+	else
+	{
+		*pui32FenceValue = psSync->ui32NextOp;
+	}
+
+	*pui32UpdateValue = psSync->ui32NextOp;
+
+	PDumpIsCaptureFrameKM(&bInCaptureRange);
+	/*
+		If this is the 1st operation (in this capture range) then PDump
+		this sync
+	*/
+	if (!psSync->bPDumped && bInCaptureRange)
+	{
+#if defined(PDUMP)
+		{
+			IMG_UINT32 ui32SyncAddr;
+			(void)SyncPrimGetFirmwareAddr(psSync->psSync, &ui32SyncAddr);
+			PDumpCommentWithFlags(0,
+				"Dump initial sync state (0x%p, FW VAddr = 0x%08x) = 0x%08x\n",
+				psSync,
+				ui32SyncAddr,
+				*psSync->psSync->pui32LinAddr);
+		}
+#endif
+
+		SyncPrimPDump(psSync->psSync);
+		psSync->bPDumped = IMG_TRUE;
+	}
+
+	/*
+		When exiting capture range clear down bPDumped as we might re-enter
+		capture range and thus need to PDump this sync again
+	*/
+	if (!bInCaptureRange)
+	{
+		psSync->bPDumped = IMG_FALSE;
+	}
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueSWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+						  IMG_UINT32 *pui32FenceValue,
+						  IMG_UINT32 *pui32UpdateValue,
+						  IMG_UINT32 ui32SyncRequesterID,
+						  IMG_BOOL bUpdate,
+						  IMG_BOOL *pbFenceRequired)
+{
+	PVRSRV_ERROR eError;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	PVRSRVLockServerSync();
+#endif
+
+	eError = PVRSRVServerSyncQueueSWOpKM_NoGlobalLock(psSync,
+								pui32FenceValue,
+								pui32UpdateValue,
+								ui32SyncRequesterID,
+								bUpdate,
+								pbFenceRequired);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	PVRSRVUnlockServerSync();
+#endif
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueSWOpKM_NoGlobalLock(SERVER_SYNC_PRIMITIVE *psSync,
+						  IMG_UINT32 *pui32FenceValue,
+						  IMG_UINT32 *pui32UpdateValue,
+						  IMG_UINT32 ui32SyncRequesterID,
+						  IMG_BOOL bUpdate,
+						  IMG_BOOL *pbFenceRequired)
+{
+
+	_ServerSyncRef(psSync);
+
+	/*
+		_ServerSyncRef will acquire and release the lock but we need to
+		reacquire here to ensure the state that we're modifying below
+		will be consistent with itself. But it doesn't matter if another
+		thread acquires the lock in between as we've ensured the sync
+		won't go away
+	*/
+	OSLockAcquire(psSync->hLock);
+	_ServerSyncTakeOperation(psSync,
+							 bUpdate,
+							 pui32FenceValue,
+							 pui32UpdateValue);
+
+	/*
+		The caller want to know if a fence command is required
+		i.e. was the last operation done on this sync done by
+		the same sync requester
+	*/
+	if (pbFenceRequired)
+	{
+		if (ui32SyncRequesterID == psSync->ui32LastSyncRequesterID)
+		{
+			*pbFenceRequired = IMG_FALSE;
+		}
+		else
+		{
+			*pbFenceRequired = IMG_TRUE;
+		}
+	}
+	/*
+		If we're transitioning from a HW operation to a SW operation we
+		need to save the last update the HW will do so that when we PDump
+		we can issue a POL for it before the next HW operation and then
+		LDB in the last SW fence update
+	*/
+	if (psSync->bSWOperation == IMG_FALSE)
+	{
+		psSync->bSWOperation = IMG_TRUE;
+		psSync->ui32LastHWUpdate = *pui32FenceValue;
+		PDumpIsCaptureFrameKM(&psSync->bSWOpStartedInCaptRange);
+	}
+
+	if (pbFenceRequired)
+	{
+		if (*pbFenceRequired)
+		{
+			SYNC_UPDATES_PRINT("%s: sync: %p, fence: %d, value: %d", __FUNCTION__, psSync, *pui32FenceValue, *pui32UpdateValue);
+		}
+	}
+
+	/* Only update the last requester id if we are make changes to this sync
+	 * object. */
+	if (bUpdate)
+		psSync->ui32LastSyncRequesterID = ui32SyncRequesterID;
+
+	OSLockRelease(psSync->hLock);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueHWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+						       IMG_BOOL bUpdate,
+						       IMG_UINT32 *pui32FenceValue,
+						       IMG_UINT32 *pui32UpdateValue)
+{
+	PVRSRV_ERROR eError;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	PVRSRVLockServerSync();
+#endif
+
+	eError = PVRSRVServerSyncQueueHWOpKM_NoGlobalLock(psSync,
+							bUpdate,
+							pui32FenceValue,
+							pui32UpdateValue);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	PVRSRVUnlockServerSync();
+#endif
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueHWOpKM_NoGlobalLock(SERVER_SYNC_PRIMITIVE *psSync,
+						       IMG_BOOL bUpdate,
+						       IMG_UINT32 *pui32FenceValue,
+						       IMG_UINT32 *pui32UpdateValue)
+{
+	/*
+		For HW operations the client is required to ensure the
+		operation has completed before freeing the sync as we
+		no way of dropping the refcount if we where to acquire it
+		here.
+
+		Take the lock to ensure the state that we're modifying below
+		will be consistent with itself.
+	*/
+	OSLockAcquire(psSync->hLock);
+	_ServerSyncTakeOperation(psSync,
+							 bUpdate,
+							 pui32FenceValue,
+							 pui32UpdateValue);
+
+	/*
+		Note:
+
+		We might want to consider optimising the fences that we write for
+		HW operations but for now just clear it back to unknown
+	*/
+	psSync->ui32LastSyncRequesterID = SYNC_REQUESTOR_UNKNOWN;
+
+	if (psSync->bSWOperation)
+	{
+#if defined(PDUMP)
+		{
+			IMG_UINT32 ui32SyncAddr;
+			(void)SyncPrimGetFirmwareAddr(psSync->psSync, &ui32SyncAddr);
+			PDumpCommentWithFlags(0,
+				"Wait for HW ops and dummy update for SW ops (0x%p, FW VAddr = 0x%08x, value = 0x%08x)\n",
+				psSync,
+				ui32SyncAddr,
+				*pui32FenceValue);
+		}
+#endif
+
+		if (psSync->bSWOpStartedInCaptRange)
+		{
+			/* Dump a POL for the previous HW operation */
+			SyncPrimPDumpPol(psSync->psSync,
+								psSync->ui32LastHWUpdate,
+								0xffffffff,
+								PDUMP_POLL_OPERATOR_EQUAL,
+								0);
+		}
+
+		/* Dump the expected value (i.e. the value after all the SW operations) */
+		SyncPrimPDumpValue(psSync->psSync, *pui32FenceValue);
+
+		/* Reset the state as we've just done a HW operation */
+		psSync->bSWOperation = IMG_FALSE;
+	}
+	OSLockRelease(psSync->hLock);
+
+	SYNC_UPDATES_PRINT("%s: sync: %p, fence: %d, value: %d", __FUNCTION__, psSync, *pui32FenceValue, *pui32UpdateValue);
+
+	return PVRSRV_OK;
+}
+
+IMG_BOOL ServerSyncFenceIsMet(SERVER_SYNC_PRIMITIVE *psSync,
+							   IMG_UINT32 ui32FenceValue)
+{
+	SYNC_UPDATES_PRINT("%s: sync: %p, value(%d) == fence(%d)?", __FUNCTION__, psSync, *psSync->psSync->pui32LinAddr, ui32FenceValue);
+	return (*psSync->psSync->pui32LinAddr == ui32FenceValue);
+}
+
+void
+ServerSyncCompleteOp(SERVER_SYNC_PRIMITIVE *psSync,
+					 IMG_BOOL bDoUpdate,
+					 IMG_UINT32 ui32UpdateValue)
+{
+	if (bDoUpdate)
+	{
+		SYNC_UPDATES_PRINT("%s: sync: %p (%d) = %d", __FUNCTION__, psSync, *psSync->psSync->pui32LinAddr, ui32UpdateValue);
+
+		*psSync->psSync->pui32LinAddr = ui32UpdateValue;
+	}
+
+	_ServerSyncUnref(psSync);
+}
+
+IMG_UINT32 ServerSyncGetId(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	return psSync->ui32UID;
+}
+
+PVRSRV_ERROR
+ServerSyncGetFWAddr(SERVER_SYNC_PRIMITIVE *psSync, IMG_UINT32 *pui32SyncAddr)
+{
+	return SyncPrimGetFirmwareAddr(psSync->psSync, pui32SyncAddr);
+}
+
+IMG_UINT32 ServerSyncGetValue(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	return *psSync->psSync->pui32LinAddr;
+}
+
+IMG_UINT32 ServerSyncGetNextValue(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	return psSync->ui32NextOp;
+}
+
+PVRSRV_DEVICE_NODE* ServerSyncGetDeviceNode(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	return psSync->psDevNode;
+}
+
+static void _ServerSyncState(PDLLIST_NODE psNode,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile)
+{
+	SERVER_SYNC_PRIMITIVE *psSync = IMG_CONTAINER_OF(psNode, SERVER_SYNC_PRIMITIVE, sNode);
+
+	if (*psSync->psSync->pui32LinAddr != psSync->ui32NextOp)
+	{
+		IMG_UINT32 ui32SyncAddr;
+
+		(void)ServerSyncGetFWAddr(psSync, &ui32SyncAddr);
+#if !defined(SUPPORT_EXTRA_METASP_DEBUG)
+		PVR_DUMPDEBUG_LOG("\tPending server sync (ID = %d, FWAddr = 0x%08x): Current = 0x%08x, NextOp = 0x%08x (%s)",
+				psSync->ui32UID,
+				ui32SyncAddr,
+		                ServerSyncGetValue(psSync),
+		                psSync->ui32NextOp,
+		                psSync->szClassName);
+#else
+		PVR_DUMPDEBUG_LOG("\tPending server sync (ID = %d, FWAddr = 0x%08x): Value (Host) = 0x%08x, Value (FW) = 0x%08x, NextOp = 0x%08x (%s)",
+		                   psSync->ui32UID,
+				   ui32SyncAddr,
+		                   ServerSyncGetValue(psSync),
+		                   RGXReadWithSP(ui32SyncAddr),
+		                   psSync->ui32NextOp,
+		                   psSync->szClassName);
+#endif
+	}
+}
+
+static void _ServerSyncDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+					IMG_UINT32 ui32VerbLevel,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile)
+{
+	PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+	DLLIST_NODE *psNode, *psNext;
+
+	if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_MEDIUM)
+	{
+		PVR_DUMPDEBUG_LOG("------[ Pending Server Syncs ]------");
+		OSLockAcquire(psDevNode->hSyncServerListLock);
+		dllist_foreach_node(&psDevNode->sSyncServerSyncsList, psNode, psNext)
+		{
+			_ServerSyncState(psNode, pfnDumpDebugPrintf, pvDumpDebugFile);
+		}
+		OSLockRelease(psDevNode->hSyncServerListLock);
+	}
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCreateKM(IMG_UINT32 ui32SyncBlockCount,
+						 SYNC_PRIMITIVE_BLOCK **papsSyncPrimBlock,
+						 IMG_UINT32 ui32ClientSyncCount,
+						 IMG_UINT32 *paui32SyncBlockIndex,
+						 IMG_UINT32 *paui32Index,
+						 IMG_UINT32 ui32ServerSyncCount,
+						 SERVER_SYNC_PRIMITIVE **papsServerSync,
+						 SERVER_OP_COOKIE **ppsServerCookie)
+{
+	SERVER_OP_COOKIE *psNewCookie;
+	IMG_UINT32 ui32BlockAllocSize;
+	IMG_UINT32 ui32ServerAllocSize;
+	IMG_UINT32 ui32ClientAllocSize;
+	IMG_UINT32 ui32TotalAllocSize;
+	IMG_UINT32 i;
+	IMG_CHAR *pcPtr;
+	PVRSRV_ERROR eError;
+
+	if((ui32ClientSyncCount + ui32ServerSyncCount) > SYNC_PRIM_OP_MAX_SYNCS)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Too many syncs specified", __func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Allocate space for all the sync block list */
+	ui32BlockAllocSize = ui32SyncBlockCount * (sizeof(SYNC_PRIMITIVE_BLOCK *));
+
+	/* Allocate space for all the client sync size elements */
+	ui32ClientAllocSize = ui32ClientSyncCount * (5 * sizeof(IMG_UINT32));
+
+	/* Allocate space for all the server sync size elements */
+	ui32ServerAllocSize = ui32ServerSyncCount * (sizeof(SERVER_SYNC_PRIMITIVE *)
+							+ (2 * sizeof(IMG_UINT32)));
+
+	ui32TotalAllocSize = sizeof(SERVER_OP_COOKIE) +
+							 ui32BlockAllocSize +
+							 ui32ServerAllocSize +
+							 ui32ClientAllocSize;
+
+	psNewCookie = OSAllocZMem(ui32TotalAllocSize);
+	pcPtr = (IMG_CHAR *) psNewCookie;
+
+	if (!psNewCookie)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	/* Setup the pointers */
+	pcPtr += sizeof(SERVER_OP_COOKIE);
+	psNewCookie->papsSyncPrimBlock = (SYNC_PRIMITIVE_BLOCK **) pcPtr;
+
+	pcPtr += sizeof(SYNC_PRIMITIVE_BLOCK *) * ui32SyncBlockCount;
+	psNewCookie->paui32SyncBlockIndex = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32Index = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32Flags = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32FenceValue = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32UpdateValue = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->papsServerSync =(SERVER_SYNC_PRIMITIVE **) pcPtr;
+
+	pcPtr += sizeof(SERVER_SYNC_PRIMITIVE *) * ui32ServerSyncCount;
+	psNewCookie->paui32ServerFenceValue = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ServerSyncCount;
+	psNewCookie->paui32ServerUpdateValue = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ServerSyncCount;
+
+	/* Check the pointer setup went ok */
+	PVR_ASSERT(pcPtr == (((IMG_CHAR *) psNewCookie) + ui32TotalAllocSize));
+
+	psNewCookie->ui32SyncBlockCount= ui32SyncBlockCount;
+	psNewCookie->ui32ServerSyncCount = ui32ServerSyncCount;
+	psNewCookie->ui32ClientSyncCount = ui32ClientSyncCount;
+	psNewCookie->bActive = IMG_FALSE;
+	HTBLOGK(HTB_SF_SYNC_PRIM_OP_CREATE, psNewCookie, ui32SyncBlockCount,
+			ui32ServerSyncCount, ui32ClientSyncCount);
+
+	/* Copy all the data into our server cookie */
+	OSCachedMemCopy(psNewCookie->papsSyncPrimBlock,
+			  papsSyncPrimBlock,
+			  sizeof(SYNC_PRIMITIVE_BLOCK *) * ui32SyncBlockCount);
+
+	/* Copy the sync block and sync indices.
+	 *
+	 * Each index must be verified:
+	 * Each Sync Block index must be within the range of the number of sync block
+	 * pointers received. All those pointers are valid, as verified by the bridge.
+	 * And each Sync index must be valid for the Sync Block it relates to.
+	 */
+	for(i = 0; i < ui32ClientSyncCount; i++)
+	{
+		SYNC_PRIMITIVE_BLOCK *psSyncBlock;
+
+		/* first copy the sync block index and ensure it is in range */
+
+		if(paui32SyncBlockIndex[i] >= ui32SyncBlockCount)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Sync block index %u is out of range",
+										__func__,
+										paui32SyncBlockIndex[i]));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto err_range;
+		}
+
+		psNewCookie->paui32SyncBlockIndex[i] = paui32SyncBlockIndex[i];
+
+		/* now copy the sync index and ensure it is a valid index within
+		 * the corresponding sync block (note the sync block index was
+		 * verified above
+		 */
+
+		psSyncBlock = psNewCookie->papsSyncPrimBlock[paui32SyncBlockIndex[i]];
+
+		if(_CheckSyncIndex(psSyncBlock, paui32Index[i]) == IMG_FALSE)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Sync index %u is out of range",
+										__func__,
+										paui32Index[i]));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto err_range;
+		}
+
+		psNewCookie->paui32Index[i] = paui32Index[i];
+	}
+
+	OSCachedMemCopy(psNewCookie->papsServerSync,
+			  papsServerSync,
+			  sizeof(SERVER_SYNC_PRIMITIVE *) *ui32ServerSyncCount);
+
+	/*
+		Take a reference on all the sync blocks and server syncs so they can't
+		be freed while we're using them
+	*/
+	for (i=0;i<ui32SyncBlockCount;i++)
+	{
+		_SyncPrimitiveBlockRef(psNewCookie->papsSyncPrimBlock[i]);
+	}
+
+	for (i=0;i<ui32ServerSyncCount;i++)
+	{
+		_ServerSyncRef(psNewCookie->papsServerSync[i]);
+	}
+
+	*ppsServerCookie = psNewCookie;
+	return PVRSRV_OK;
+
+err_range:
+	OSFreeMem(psNewCookie);
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpTakeKM(SERVER_OP_COOKIE *psServerCookie,
+					       IMG_UINT32 ui32ClientSyncCount,
+					       IMG_UINT32 *paui32Flags,
+					       IMG_UINT32 *paui32FenceValue,
+					       IMG_UINT32 *paui32UpdateValue,
+					       IMG_UINT32 ui32ServerSyncCount,
+						   IMG_UINT32 *paui32ServerFlags)
+{
+	IMG_UINT32 i;
+
+	if ((ui32ClientSyncCount != psServerCookie->ui32ClientSyncCount) ||
+		(ui32ServerSyncCount != psServerCookie->ui32ServerSyncCount))
+	{
+		/* The bridge layer should have stopped us getting here but check in case */
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync counts", __FUNCTION__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	for (i=0;i<ui32ServerSyncCount;i++)
+	{
+		/* Server syncs must fence */
+		if ((paui32ServerFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK) == 0)
+		{
+			return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+		}
+	}
+
+	/*
+		For client syncs all we need to do is save the values
+		that we've been passed
+	*/
+	OSCachedMemCopy(psServerCookie->paui32Flags,
+			  paui32Flags,
+			  sizeof(IMG_UINT32) * ui32ClientSyncCount);
+	OSCachedMemCopy(psServerCookie->paui32FenceValue,
+			  paui32FenceValue,
+			  sizeof(IMG_UINT32) * ui32ClientSyncCount);
+	OSCachedMemCopy(psServerCookie->paui32UpdateValue,
+			  paui32UpdateValue,
+			  sizeof(IMG_UINT32) * ui32ClientSyncCount);
+
+	/*
+		For server syncs we just take an operation
+	*/
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	PVRSRVLockServerSync();
+#endif
+	for (i=0;i<ui32ServerSyncCount;i++)
+	{
+		/*
+			Take op can only take one operation at a time so we can't
+			optimise away fences so just report the requester as unknown
+		*/
+		PVRSRVServerSyncQueueSWOpKM_NoGlobalLock(psServerCookie->papsServerSync[i],
+								  &psServerCookie->paui32ServerFenceValue[i],
+								  &psServerCookie->paui32ServerUpdateValue[i],
+								  SYNC_REQUESTOR_UNKNOWN,
+								  (paui32ServerFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE) ? IMG_TRUE:IMG_FALSE,
+								  NULL);
+	}
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	PVRSRVUnlockServerSync();
+#endif
+
+	HTBLOGK(HTB_SF_SYNC_PRIM_OP_TAKE, psServerCookie,
+			ui32ServerSyncCount, ui32ClientSyncCount);
+	psServerCookie->bActive = IMG_TRUE;
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpReadyKM(SERVER_OP_COOKIE *psServerCookie,
+						IMG_BOOL *pbReady)
+{
+	IMG_UINT32 i;
+	IMG_BOOL bReady = IMG_TRUE;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!psServerCookie->bActive)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Operation cookie not active (no take operation performed)", __FUNCTION__));
+
+		bReady = IMG_FALSE;
+		eError = PVRSRV_ERROR_BAD_SYNC_STATE;
+		goto e0;
+	}
+
+	/* Check the client syncs */
+	for (i=0;i<psServerCookie->ui32ClientSyncCount;i++)
+	{
+		if (psServerCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)
+		{
+			IMG_UINT32 ui32BlockIndex = psServerCookie->paui32SyncBlockIndex[i];
+			IMG_UINT32 ui32Index = psServerCookie->paui32Index[i];
+			SYNC_PRIMITIVE_BLOCK *psSyncBlock = psServerCookie->papsSyncPrimBlock[ui32BlockIndex];
+
+			if (psSyncBlock->pui32LinAddr[ui32Index] !=
+					psServerCookie->paui32FenceValue[i])
+			{
+				bReady = IMG_FALSE;
+				goto e0;
+			}
+		}
+	}
+
+	for (i=0;i<psServerCookie->ui32ServerSyncCount;i++)
+	{
+		bReady = ServerSyncFenceIsMet(psServerCookie->papsServerSync[i],
+									  psServerCookie->paui32ServerFenceValue[i]);
+		if (!bReady)
+		{
+			break;
+		}
+	}
+
+e0:
+	*pbReady = bReady;
+	return eError;
+}
+
+static
+IMG_BOOL _SyncPrimOpComplete(SERVER_OP_COOKIE *psServerCookie)
+{
+	RGX_HWPERF_UFO_DATA_ELEMENT sUFOData[1];
+	IMG_BOOL bDidUpdates = IMG_FALSE;
+	IMG_UINT32 i;
+
+	for (i=0;i<psServerCookie->ui32ClientSyncCount;i++)
+	{
+		if (psServerCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)
+		{
+			IMG_UINT32 ui32BlockIndex = psServerCookie->paui32SyncBlockIndex[i];
+			IMG_UINT32 ui32Index = psServerCookie->paui32Index[i];
+			SYNC_PRIMITIVE_BLOCK *psSyncBlock = psServerCookie->papsSyncPrimBlock[ui32BlockIndex];
+
+			sUFOData[0].sUpdate.ui32FWAddr = psSyncBlock->uiFWAddr.ui32Addr + ui32Index * sizeof(IMG_UINT32);
+			sUFOData[0].sUpdate.ui32OldValue = psSyncBlock->pui32LinAddr[ui32Index];
+			sUFOData[0].sUpdate.ui32NewValue = psServerCookie->paui32UpdateValue[i];
+
+			psSyncBlock->pui32LinAddr[ui32Index] = psServerCookie->paui32UpdateValue[i];
+			RGX_HWPERF_HOST_UFO(psSyncBlock->psDevNode->pvDevice,
+								RGX_HWPERF_UFO_EV_UPDATE, sUFOData, 1);			
+			bDidUpdates = IMG_TRUE;
+		}
+	}
+
+	for (i=0;i<psServerCookie->ui32ServerSyncCount;i++)
+	{
+		IMG_BOOL bUpdate = psServerCookie->paui32ServerFenceValue[i] != psServerCookie->paui32ServerUpdateValue[i];
+
+		if (bUpdate)
+		{
+			IMG_UINT32 ui32SyncAddr;
+
+			(void)ServerSyncGetFWAddr(psServerCookie->papsServerSync[i], &ui32SyncAddr);
+			sUFOData[0].sUpdate.ui32FWAddr = ui32SyncAddr;
+			sUFOData[0].sUpdate.ui32OldValue = ServerSyncGetValue(psServerCookie->papsServerSync[i]);
+			sUFOData[0].sUpdate.ui32NewValue = psServerCookie->paui32ServerUpdateValue[i];
+			RGX_HWPERF_HOST_UFO(psServerCookie->papsServerSync[i]->psDevNode->pvDevice,
+								RGX_HWPERF_UFO_EV_UPDATE, sUFOData, 1);
+			bDidUpdates = IMG_TRUE;
+		}
+
+		ServerSyncCompleteOp(psServerCookie->papsServerSync[i],
+							 bUpdate,
+							 psServerCookie->paui32ServerUpdateValue[i]);
+	}
+
+	psServerCookie->bActive = IMG_FALSE;
+
+	return bDidUpdates;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCompleteKM(SERVER_OP_COOKIE *psServerCookie)
+{
+	IMG_BOOL bReady;
+
+	PVRSRVSyncPrimOpReadyKM(psServerCookie, &bReady);
+
+	/* Check the client is playing ball */
+	if (!bReady)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: sync op still not ready", __FUNCTION__));
+
+		return PVRSRV_ERROR_BAD_SYNC_STATE;
+	}
+
+	HTBLOGK(HTB_SF_SYNC_PRIM_OP_COMPLETE, psServerCookie);
+
+	if (_SyncPrimOpComplete(psServerCookie))
+	{
+		PVRSRVCheckStatus(NULL);
+	}
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpDestroyKM(SERVER_OP_COOKIE *psServerCookie)
+{
+	IMG_UINT32 i;
+
+	/* If the operation is still active then check if it's finished yet */
+	if (psServerCookie->bActive)
+	{
+		if (PVRSRVSyncPrimOpCompleteKM(psServerCookie) == PVRSRV_ERROR_BAD_SYNC_STATE)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Not ready, ask for retry", __FUNCTION__));
+			return PVRSRV_ERROR_RETRY;
+		}
+	}
+
+	/* Drop our references on the sync blocks and server syncs*/
+	for (i = 0; i < psServerCookie->ui32SyncBlockCount; i++)
+	{
+		_SyncPrimitiveBlockUnref(psServerCookie->papsSyncPrimBlock[i]);
+	}
+
+	for (i = 0; i < psServerCookie->ui32ServerSyncCount; i++)
+	{
+		_ServerSyncUnref(psServerCookie->papsServerSync[i]);
+	}
+
+	HTBLOGK(HTB_SF_SYNC_PRIM_OP_DESTROY, psServerCookie);
+	OSFreeMem(psServerCookie);
+	return PVRSRV_OK;
+}
+
+#if defined(PDUMP)
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
+{
+	/*
+		We might be ask to PDump sync state outside of capture range
+		(e.g. texture uploads) so make this continuous.
+	*/
+	DevmemPDumpLoadMemValue32(psSyncBlk->psMemDesc,
+					   ui32Offset,
+					   ui32Value,
+					   PDUMP_FLAGS_CONTINUOUS);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset)
+{
+	/*
+		We might be ask to PDump sync state outside of capture range
+		(e.g. texture uploads) so make this continuous.
+	*/
+	DevmemPDumpLoadMem(psSyncBlk->psMemDesc,
+					   ui32Offset,
+					   sizeof(IMG_UINT32),
+					   PDUMP_FLAGS_CONTINUOUS);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+						 IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask,
+						 PDUMP_POLL_OPERATOR eOperator,
+						 PDUMP_FLAGS_T ui32PDumpFlags)
+{
+	DevmemPDumpDevmemPol32(psSyncBlk->psMemDesc,
+						   ui32Offset,
+						   ui32Value,
+						   ui32Mask,
+						   eOperator,
+						   ui32PDumpFlags);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpPDumpPolKM(SERVER_OP_COOKIE *psServerCookie,
+						 PDUMP_POLL_OPERATOR eOperator,
+						 PDUMP_FLAGS_T ui32PDumpFlags)
+{
+	IMG_UINT32 i;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!psServerCookie->bActive)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Operation cookie not active (no take operation performed)", __FUNCTION__));
+
+		eError = PVRSRV_ERROR_BAD_SYNC_STATE;
+		goto e0;
+	}
+
+	/* PDump POL on the client syncs */
+	for (i = 0; i < psServerCookie->ui32ClientSyncCount; i++)
+	{
+		if (psServerCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)
+		{
+			IMG_UINT32 ui32BlockIndex = psServerCookie->paui32SyncBlockIndex[i];
+			IMG_UINT32 ui32Index = psServerCookie->paui32Index[i];
+			SYNC_PRIMITIVE_BLOCK *psSyncBlock = psServerCookie->papsSyncPrimBlock[ui32BlockIndex];
+
+			PVRSRVSyncPrimPDumpPolKM(psSyncBlock,
+									ui32Index*sizeof(IMG_UINT32),
+									psServerCookie->paui32FenceValue[i],
+									0xFFFFFFFFU,
+									eOperator,
+									ui32PDumpFlags);
+		}
+	}
+
+	/* PDump POL on the server syncs */
+	for (i = 0; i < psServerCookie->ui32ServerSyncCount; i++)
+	{
+		SERVER_SYNC_PRIMITIVE *psServerSync = psServerCookie->papsServerSync[i];
+		IMG_UINT32 ui32FenceValue = psServerCookie->paui32ServerFenceValue[i];
+
+		SyncPrimPDumpPol(psServerSync->psSync,
+						ui32FenceValue,
+						0xFFFFFFFFU,
+						PDUMP_POLL_OPERATOR_EQUAL,
+						ui32PDumpFlags);
+	}
+
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset,
+						 IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize,
+						 IMG_UINT64 uiBufferSize)
+{
+	DevmemPDumpCBP(psSyncBlk->psMemDesc,
+				   ui32Offset,
+				   uiWriteOffset,
+				   uiPacketSize,
+				   uiBufferSize);
+	return PVRSRV_OK;
+}
+#endif
+
+/* SyncRegisterConnection */
+PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData)
+{
+	SYNC_CONNECTION_DATA *psSyncConnectionData;
+	PVRSRV_ERROR eError;
+
+	psSyncConnectionData = OSAllocMem(sizeof(SYNC_CONNECTION_DATA));
+	if (psSyncConnectionData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	eError = OSLockCreate(&psSyncConnectionData->hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_lockcreate;
+	}
+	dllist_init(&psSyncConnectionData->sListHead);
+	psSyncConnectionData->ui32RefCount = 1;
+
+	*ppsSyncConnectionData = psSyncConnectionData;
+	return PVRSRV_OK;
+
+fail_lockcreate:
+	OSFreeMem(psSyncConnectionData);
+fail_alloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/* SyncUnregisterConnection */
+void SyncUnregisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+	_SyncConnectionUnref(psSyncConnectionData);
+}
+
+void SyncConnectionPDumpSyncBlocks(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+	DLLIST_NODE *psNode, *psNext;
+
+	OSLockAcquire(psSyncConnectionData->hLock);
+
+	PDUMPCOMMENT("Dump client Sync Prim state");
+	dllist_foreach_node(&psSyncConnectionData->sListHead, psNode, psNext)
+	{
+		SYNC_PRIMITIVE_BLOCK *psSyncBlock =
+			IMG_CONTAINER_OF(psNode, SYNC_PRIMITIVE_BLOCK, sConnectionNode);
+
+		DevmemPDumpLoadMem(psSyncBlock->psMemDesc,
+						   0,
+						   psSyncBlock->ui32BlockSize,
+						   PDUMP_FLAGS_CONTINUOUS);
+	}
+
+	OSLockRelease(psSyncConnectionData->hLock);
+}
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+void SyncRecordLookup(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32FwAddr,
+					  IMG_CHAR * pszSyncInfo, size_t len)
+{
+	DLLIST_NODE *psNode, *psNext;
+	IMG_INT iEnd;
+	IMG_BOOL bFound = IMG_FALSE;
+
+	if (!pszSyncInfo)
+	{
+		return;
+	}
+
+	OSLockAcquire(psDevNode->hSyncServerRecordLock);
+	pszSyncInfo[0] = '\0';
+
+	dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext)
+	{
+		struct SYNC_RECORD *psSyncRec =
+			IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode);
+		if ((psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset) == ui32FwAddr
+			&& SYNC_RECORD_TYPE_UNKNOWN != psSyncRec->eRecordType
+			&& psSyncRec->psServerSyncPrimBlock
+			&& psSyncRec->psServerSyncPrimBlock->pui32LinAddr
+			)
+		{
+			IMG_UINT32 *pui32SyncAddr;
+			pui32SyncAddr = psSyncRec->psServerSyncPrimBlock->pui32LinAddr
+				+ (psSyncRec->ui32SyncOffset/sizeof(IMG_UINT32));
+			iEnd = OSSNPrintf(pszSyncInfo, len, "Cur=0x%08x %s:%05u (%s)",
+				*pui32SyncAddr,
+				((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"),
+				psSyncRec->uiPID,
+				psSyncRec->szClassName
+				);
+			if (iEnd >= 0 && iEnd < len)
+			{
+				pszSyncInfo[iEnd] = '\0';
+			}
+			bFound = IMG_TRUE;
+			break;
+		}
+	}
+
+	OSLockRelease(psDevNode->hSyncServerRecordLock);
+
+	if(!bFound && (psDevNode->ui32SyncServerRecordCountHighWatermark == SYNC_RECORD_LIMIT))
+	{
+        	OSSNPrintf(pszSyncInfo, len, "(Record may be lost)");
+	}
+}
+
+#define NS_IN_S (1000000000UL)
+static void _SyncRecordPrint(struct SYNC_RECORD *psSyncRec,
+					IMG_UINT64 ui64TimeNow,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile)
+{
+	SYNC_PRIMITIVE_BLOCK *psSyncBlock = psSyncRec->psServerSyncPrimBlock;
+
+	if (SYNC_RECORD_TYPE_UNKNOWN != psSyncRec->eRecordType)
+	{
+		IMG_UINT64 ui64DeltaS;
+		IMG_UINT32 ui32DeltaF;
+		IMG_UINT64 ui64Delta = ui64TimeNow - psSyncRec->ui64OSTime;
+		ui64DeltaS = OSDivide64(ui64Delta, NS_IN_S, &ui32DeltaF);
+
+		if (psSyncBlock && psSyncBlock->pui32LinAddr)
+		{
+			IMG_UINT32 *pui32SyncAddr;
+			pui32SyncAddr = psSyncBlock->pui32LinAddr
+				+ (psSyncRec->ui32SyncOffset/sizeof(IMG_UINT32));
+
+			PVR_DUMPDEBUG_LOG("\t%s %05u %05llu.%09u FWAddr=0x%08x Val=0x%08x (%s)",
+				((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"),
+				psSyncRec->uiPID,
+				ui64DeltaS, ui32DeltaF,
+				(psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset),
+				*pui32SyncAddr,
+				psSyncRec->szClassName
+				);
+		}
+		else
+		{
+			PVR_DUMPDEBUG_LOG("\t%s %05u %05llu.%09u FWAddr=0x%08x Val=<null_ptr> (%s)",
+				((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"),
+				psSyncRec->uiPID,
+				ui64DeltaS, ui32DeltaF,
+				(psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset),
+				psSyncRec->szClassName
+				);
+		}
+	}
+}
+
+static void _SyncRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+					IMG_UINT32 ui32VerbLevel,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile)
+{
+	PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+	IMG_UINT64 ui64TimeNowS;
+	IMG_UINT32 ui32TimeNowF;
+	IMG_UINT64 ui64TimeNow = OSClockns64();
+	DLLIST_NODE *psNode, *psNext;
+
+	ui64TimeNowS = OSDivide64(ui64TimeNow, NS_IN_S, &ui32TimeNowF);
+
+	if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_MEDIUM)
+	{
+		IMG_UINT32 i;
+		OSLockAcquire(psDevNode->hSyncServerRecordLock);
+
+		PVR_DUMPDEBUG_LOG("Dumping all allocated syncs. Allocated: %u High watermark: %u @ %05llu.%09u",
+										psDevNode->ui32SyncServerRecordCount,
+										psDevNode->ui32SyncServerRecordCountHighWatermark,
+										ui64TimeNowS,
+										ui32TimeNowF);
+		if(psDevNode->ui32SyncServerRecordCountHighWatermark == SYNC_RECORD_LIMIT)
+		{
+			PVR_DUMPDEBUG_LOG("Warning: Record limit (%u) was reached. Some sync checkpoints may not have been recorded in the debug information.",
+                                                                                                                SYNC_RECORD_LIMIT);
+		}
+
+		PVR_DUMPDEBUG_LOG("\t%-6s %-5s %-15s %-17s %-14s (%s)",
+					"Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation");
+
+		dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext)
+		{
+			struct SYNC_RECORD *psSyncRec =
+				IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode);
+			_SyncRecordPrint(psSyncRec, ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile);
+		}
+
+		PVR_DUMPDEBUG_LOG("Dumping all recently freed syncs @ %05llu.%09u", ui64TimeNowS, ui32TimeNowF);
+		PVR_DUMPDEBUG_LOG("\t%-6s %-5s %-15s %-17s %-14s (%s)",
+					"Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation");
+		for (i = DECREMENT_WITH_WRAP(psDevNode->uiSyncServerRecordFreeIdx, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN);
+			 i != psDevNode->uiSyncServerRecordFreeIdx;
+			 i = DECREMENT_WITH_WRAP(i, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN))
+		{
+			if (psDevNode->apsSyncServerRecordsFreed[i])
+			{
+				_SyncRecordPrint(psDevNode->apsSyncServerRecordsFreed[i],
+								 ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile);
+			}
+			else
+			{
+				break;
+			}
+		}
+
+		OSLockRelease(psDevNode->hSyncServerRecordLock);
+	}
+}
+#undef NS_IN_S
+
+static PVRSRV_ERROR SyncRecordListInit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+	PVRSRV_ERROR eError;
+
+	psDevNode->ui32SyncServerRecordCount = 0;
+	psDevNode->ui32SyncServerRecordCountHighWatermark = 0;
+
+	eError = OSLockCreate(&psDevNode->hSyncServerRecordLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_lock_create;
+	}
+	dllist_init(&psDevNode->sSyncServerRecordList);
+
+	eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncServerRecordNotify,
+											psDevNode,
+											_SyncRecordRequest,
+											DEBUG_REQUEST_SERVERSYNC,
+											psDevNode);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_dbg_register;
+	}
+
+	return PVRSRV_OK;
+
+fail_dbg_register:
+	OSLockDestroy(psDevNode->hSyncServerRecordLock);
+fail_lock_create:
+	return eError;
+}
+
+static void SyncRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+	DLLIST_NODE *psNode, *psNext;
+	int i;
+
+	OSLockAcquire(psDevNode->hSyncServerRecordLock);
+	dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext)
+	{
+		struct SYNC_RECORD *pSyncRec =
+			IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode);
+
+		dllist_remove_node(psNode);
+		OSFreeMem(pSyncRec);
+	}
+
+	for (i = 0; i < PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; i++)
+	{
+		if (psDevNode->apsSyncServerRecordsFreed[i])
+		{
+			OSFreeMem(psDevNode->apsSyncServerRecordsFreed[i]);
+			psDevNode->apsSyncServerRecordsFreed[i] = NULL;
+		}
+	}
+	OSLockRelease(psDevNode->hSyncServerRecordLock);
+
+	if (psDevNode->hSyncServerRecordNotify)
+	{
+		PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncServerRecordNotify);
+	}
+	OSLockDestroy(psDevNode->hSyncServerRecordLock);
+}
+#endif /* #if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+PVRSRV_ERROR ServerSyncInit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+	PVRSRV_ERROR eError;
+
+	eError = OSLockCreate(&psDevNode->hSyncServerListLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_lock_create;
+	}
+	dllist_init(&psDevNode->sSyncServerSyncsList);
+
+	eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncServerNotify,
+											psDevNode,
+											_ServerSyncDebugRequest,
+											DEBUG_REQUEST_SERVERSYNC,
+											psDevNode);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_dbg_register;
+	}
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+	eError = SyncRecordListInit(psDevNode);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_record_list;
+	}
+#endif
+
+	return PVRSRV_OK;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+fail_record_list:
+	PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncServerNotify);
+#endif
+fail_dbg_register:
+	OSLockDestroy(psDevNode->hSyncServerListLock);
+fail_lock_create:
+	return eError;
+}
+
+void ServerSyncDeinit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+	PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncServerNotify);
+	psDevNode->hSyncServerNotify = NULL;
+
+	OSLockDestroy(psDevNode->hSyncServerListLock);
+	psDevNode->hSyncServerListLock = NULL;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+	SyncRecordListDeinit(psDevNode);
+#endif
+}
+
+PVRSRV_ERROR ServerSyncInitOnce(PVRSRV_DATA *psPVRSRVData)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVR_UNREFERENCED_PARAMETER(psPVRSRVData);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	eError = OSLockCreate(&ghServerSyncLock, LOCK_TYPE_NONE);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create server sync lock", __func__));
+		goto err;
+	}
+err:
+#endif
+	return eError;
+}
+
+void ServerSyncDeinitOnce(PVRSRV_DATA *psPVRSRVData)
+{
+	PVR_UNREFERENCED_PARAMETER(psPVRSRVData);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(ghServerSyncLock);
+#endif
+}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+void PVRSRVLockServerSync(void)
+{
+	OSLockAcquire(ghServerSyncLock);
+}
+
+void PVRSRVUnlockServerSync(void)
+{
+	OSLockRelease(ghServerSyncLock);
+}
+#endif
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/tlintern.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/tlintern.c
new file mode 100644
index 0000000..b9b18d5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/tlintern.c
@@ -0,0 +1,436 @@
+/*************************************************************************/ /*!
+@File
+@Title          Transport Layer kernel side API implementation.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport Layer functions available to driver components in
+                the driver.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+#include "pvr_debug.h"
+
+#include "allocmem.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+#include "devicemem.h"
+
+#include "pvrsrv_tlcommon.h"
+#include "tlintern.h"
+
+/*
+ * Make functions
+ */
+PTL_STREAM_DESC
+TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3)
+{
+	PTL_STREAM_DESC ps = OSAllocZMem(sizeof(TL_STREAM_DESC));
+	if (ps == NULL)
+	{
+		return NULL;
+	}
+	ps->psNode = f1;
+	ps->ui32Flags = f2;
+	ps->hReadEvent = f3;
+	ps->uiRefCount = 1;
+	return ps;
+}
+
+PTL_SNODE
+TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4)
+{
+	PTL_SNODE ps = OSAllocZMem(sizeof(TL_SNODE));
+	if (ps == NULL)
+	{
+		return NULL;
+	}
+	ps->hReadEventObj = f2;
+	ps->psStream = f3;
+	ps->psRDesc = f4;
+	f3->psNode = ps;
+	return ps;
+}
+
+/*
+ * Transport Layer Global top variables and functions
+ */
+static TL_GLOBAL_DATA  sTLGlobalData = { 0 };
+
+TL_GLOBAL_DATA *TLGGD(void)	// TLGetGlobalData()
+{
+	return &sTLGlobalData;
+}
+
+/* TLInit must only be called once at driver initialisation.
+ * An assert is provided to check this condition on debug builds.
+ */
+PVRSRV_ERROR
+TLInit(void)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT (sTLGlobalData.hTLGDLock == NULL && sTLGlobalData.hTLEventObj == NULL);
+
+	/* Allocate a lock for TL global data, to be used while updating the TL data.
+	 * This is for making TL global data muti-thread safe */
+	eError = OSLockCreate (&sTLGlobalData.hTLGDLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	/* Allocate the event object used to signal global TL events such as
+	 * new stream created */
+	eError = OSEventObjectCreate("TLGlobalEventObj", &sTLGlobalData.hTLEventObj);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	PVR_DPF_RETURN_OK;
+
+/* Don't allow the driver to start up on error */
+e1:
+	OSLockDestroy (sTLGlobalData.hTLGDLock);
+	sTLGlobalData.hTLGDLock = NULL;
+e0:
+	PVR_DPF_RETURN_RC (eError);
+}
+
+static void RemoveAndFreeStreamNode(PTL_SNODE psRemove)
+{
+	TL_GLOBAL_DATA*  psGD = TLGGD();
+	PTL_SNODE* 		 last;
+	PTL_SNODE 		 psn;
+	PVRSRV_ERROR     eError;
+
+	PVR_DPF_ENTERED;
+
+	/* Unlink the stream node from the master list */
+	PVR_ASSERT(psGD->psHead);
+	last = &psGD->psHead;
+	for (psn = psGD->psHead; psn; psn=psn->psNext)
+	{
+		if (psn == psRemove)
+		{
+			/* Other calling code may have freed and zeroed the pointers */
+			if (psn->psRDesc)
+			{
+				OSFreeMem(psn->psRDesc);
+				psn->psRDesc = NULL;
+			}
+			if (psn->psStream)
+			{
+				OSFreeMem(psn->psStream);
+				psn->psStream = NULL;
+			}
+			*last = psn->psNext;
+			break;
+		}
+		last = &psn->psNext;
+	}
+
+	/* Release the event list object owned by the stream node */
+	if (psRemove->hReadEventObj)
+	{
+		eError = OSEventObjectDestroy(psRemove->hReadEventObj);
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+
+		psRemove->hReadEventObj = NULL;
+	}
+
+	/* Release the memory of the stream node */
+	OSFreeMem(psRemove);
+
+	PVR_DPF_RETURN;
+}
+
+void
+TLDeInit(void)
+{
+	PVR_DPF_ENTERED;
+
+	if (sTLGlobalData.uiClientCnt)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "TLDeInit transport layer but %d client streams are still connected", sTLGlobalData.uiClientCnt));
+		sTLGlobalData.uiClientCnt = 0;
+	}
+
+	/* Clean up the SNODE list */
+	if (sTLGlobalData.psHead)
+	{
+		while (sTLGlobalData.psHead)
+		{
+			RemoveAndFreeStreamNode(sTLGlobalData.psHead);
+		}
+		/* Leave psHead NULL on loop exit */
+	}
+
+	/* Clean up the TL global event object */
+	if (sTLGlobalData.hTLEventObj)
+	{
+		OSEventObjectDestroy(sTLGlobalData.hTLEventObj);
+		sTLGlobalData.hTLEventObj = NULL;
+	}
+
+	/* Destroy the TL global data lock */
+	if (sTLGlobalData.hTLGDLock)
+	{
+		OSLockDestroy (sTLGlobalData.hTLGDLock);
+		sTLGlobalData.hTLGDLock = NULL;
+	}
+
+	PVR_DPF_RETURN;
+}
+
+void TLAddStreamNode(PTL_SNODE psAdd)
+{
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psAdd);
+	psAdd->psNext = TLGGD()->psHead;
+	TLGGD()->psHead = psAdd;
+
+	PVR_DPF_RETURN;
+}
+
+PTL_SNODE TLFindStreamNodeByName(const IMG_CHAR *pszName)
+{
+	TL_GLOBAL_DATA*  psGD = TLGGD();
+	PTL_SNODE 		 psn;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(pszName);
+
+	for (psn = psGD->psHead; psn; psn=psn->psNext)
+	{
+		if (psn->psStream && OSStringCompare(psn->psStream->szName, pszName)==0)
+		{
+			PVR_DPF_RETURN_VAL(psn);
+		}
+	}
+
+	PVR_DPF_RETURN_VAL(NULL);
+}
+
+PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc)
+{
+	TL_GLOBAL_DATA*  psGD = TLGGD();
+	PTL_SNODE 		 psn;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psDesc);
+
+	for (psn = psGD->psHead; psn; psn=psn->psNext)
+	{
+		if (psn->psRDesc == psDesc || psn->psWDesc == psDesc)
+		{
+			PVR_DPF_RETURN_VAL(psn);
+		}
+	}
+	PVR_DPF_RETURN_VAL(NULL);
+}
+
+static inline IMG_BOOL IsDigit(IMG_CHAR c)
+{
+	return c >= '0' && c <= '9';
+}
+
+static inline IMG_BOOL ReadNumber(const IMG_CHAR *pszBuffer,
+                                  IMG_UINT32 *pui32Number)
+{
+	IMG_CHAR acTmp[11] = {0}; // max 10 digits
+	IMG_UINT32 ui32Result;
+	IMG_UINT i;
+
+	for (i = 0; i < sizeof(acTmp) - 1; i++)
+	{
+		if (!IsDigit(*pszBuffer))
+			break;
+		acTmp[i] = *pszBuffer++;
+	}
+
+	/* if there are no digits or there is something after the number */
+	if (i == 0 || *pszBuffer != '\0')
+		return IMG_FALSE;
+
+	if (OSStringToUINT32(acTmp, 10, &ui32Result) != PVRSRV_OK)
+		return IMG_FALSE;
+
+	*pui32Number = ui32Result;
+
+	return IMG_TRUE;
+}
+
+IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern,
+                          IMG_CHAR aaszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE],
+                          IMG_UINT32 ui32Max)
+{
+	TL_GLOBAL_DATA *psGD = TLGGD();
+	PTL_SNODE psn;
+	IMG_UINT32 ui32Count = 0;
+	size_t uiLen;
+
+	PVR_ASSERT(pszNamePattern);
+
+	if ((uiLen = OSStringLength(pszNamePattern)) == 0)
+		return 0;
+
+	for (psn = psGD->psHead; psn; psn = psn->psNext)
+	{
+		if (OSStringNCompare(pszNamePattern, psn->psStream->szName, uiLen) != 0)
+			continue;
+
+		/* If aaszStreams is NULL we only count how many string match
+		 * the given pattern. If it's a valid pointer we also return
+		 * the names. */
+		if (aaszStreams != NULL)
+		{
+			if (ui32Count >= ui32Max)
+				break;
+
+			/* all of names are shorter than MAX and null terminated */
+			OSStringNCopy(aaszStreams[ui32Count], psn->psStream->szName,
+			              PRVSRVTL_MAX_STREAM_NAME_SIZE);
+		}
+
+		ui32Count++;
+	}
+
+	return ui32Count;
+}
+
+PTL_SNODE TLFindAndGetStreamNodeByDesc(PTL_STREAM_DESC psDesc)
+{
+	PTL_SNODE psn;
+
+	PVR_DPF_ENTERED;
+
+	psn = TLFindStreamNodeByDesc(psDesc);
+	if (psn == NULL)
+		PVR_DPF_RETURN_VAL(NULL);
+
+	PVR_ASSERT(psDesc == psn->psWDesc);
+
+	psn->uiWRefCount++;
+	psDesc->uiRefCount++;
+
+	PVR_DPF_RETURN_VAL(psn);
+}
+
+void TLReturnStreamNode(PTL_SNODE psNode)
+{
+	psNode->uiWRefCount--;
+	psNode->psWDesc->uiRefCount--;
+
+	PVR_ASSERT(psNode->uiWRefCount > 0);
+	PVR_ASSERT(psNode->psWDesc->uiRefCount > 0);
+}
+
+IMG_BOOL TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove)
+{
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psRemove);
+
+	/* If there is a client connected to this stream, defer stream's deletion */
+	if (psRemove->psRDesc != NULL || psRemove->psWDesc != NULL)
+	{
+		PVR_DPF_RETURN_VAL (IMG_FALSE);
+	}
+
+	/* Remove stream from TL_GLOBAL_DATA's list and free stream node */
+	psRemove->psStream = NULL;
+	RemoveAndFreeStreamNode(psRemove);
+
+	PVR_DPF_RETURN_VAL (IMG_TRUE);
+}
+
+IMG_BOOL TLUnrefDescAndTryFreeStreamNode(PTL_SNODE psNodeToRemove,
+                                          PTL_STREAM_DESC psSD)
+{
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psNodeToRemove);
+	PVR_ASSERT(psSD);
+
+	/* Decrement reference count. For descriptor obtained by reader it must
+	 * reach 0 (only single reader allowed) and for descriptors obtained by
+	 * writers it must reach value greater or equal to 0 (multiple writers
+	 * model). */
+	psSD->uiRefCount--;
+
+	if (psSD == psNodeToRemove->psRDesc)
+	{
+		PVR_ASSERT(0 == psSD->uiRefCount);
+		/* Remove stream descriptor (i.e. stream reader context) */
+		psNodeToRemove->psRDesc = NULL;
+	}
+	else if (psSD == psNodeToRemove->psWDesc)
+	{
+		PVR_ASSERT(0 <= psSD->uiRefCount);
+
+		psNodeToRemove->uiWRefCount--;
+
+		/* Remove stream descriptor if reference == 0 */
+		if (0 == psSD->uiRefCount)
+		{
+			psNodeToRemove->psWDesc = NULL;
+		}
+	}
+
+	/* Do not Free Stream Node if there is a write reference (a producer
+	 * context) to the stream */
+	if (NULL != psNodeToRemove->psRDesc || NULL != psNodeToRemove->psWDesc ||
+	    0 != psNodeToRemove->uiWRefCount)
+	{
+		PVR_DPF_RETURN_VAL (IMG_FALSE);
+	}
+
+	/* Make stream pointer NULL to prevent it from being destroyed in
+	 * RemoveAndFreeStreamNode Cleanup of stream should be done by the calling
+	 * context */
+	psNodeToRemove->psStream = NULL;
+	RemoveAndFreeStreamNode(psNodeToRemove);
+
+	PVR_DPF_RETURN_VAL (IMG_TRUE);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/tlserver.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/tlserver.c
new file mode 100644
index 0000000..4d7b5b7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/tlserver.c
@@ -0,0 +1,687 @@
+/*************************************************************************/ /*!
+@File
+@Title          KM server Transport Layer implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main bridge APIs for Transport Layer client functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <stddef.h>
+
+#include "img_defs.h"
+
+/*#define PVR_DPF_FUNCTION_TRACE_ON 1*/
+#undef PVR_DPF_FUNCTION_TRACE_ON
+#include "pvr_debug.h"
+
+#include "connection_server.h"
+#include "allocmem.h"
+#include "devicemem.h"
+
+#include "tlintern.h"
+#include "tlstream.h"
+#include "tlserver.h"
+
+#include "pvrsrv_tlstreams.h"
+
+#define NO_STREAM_WAIT_PERIOD 2000000ULL
+#define NO_DATA_WAIT_PERIOD   1000000ULL
+#define NO_ACQUIRE            0xffffffffU
+
+/*
+ * Transport Layer Client API Kernel-Mode bridge implementation
+ */
+PVRSRV_ERROR
+TLServerOpenStreamKM(const IMG_CHAR*  	   pszName,
+				     IMG_UINT32 		   ui32Mode,
+			   	     PTL_STREAM_DESC* 	   ppsSD,
+				     PMR** 				   ppsTLPMR)
+{
+	PVRSRV_ERROR 	eError = PVRSRV_OK;
+	PVRSRV_ERROR 	eErrorEO = PVRSRV_OK;
+	PTL_SNODE		psNode = 0;
+	PTL_STREAM		psStream = 0;
+	TL_STREAM_DESC* psNewSD = 0;
+	IMG_HANDLE 		hEvent;
+	IMG_BOOL		bIsWriteOnly = ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ?
+	                               IMG_TRUE : IMG_FALSE;
+	IMG_BOOL		bResetOnOpen = ui32Mode & PVRSRV_STREAM_FLAG_RESET_ON_OPEN ?
+	                               IMG_TRUE : IMG_FALSE;
+	PTL_GLOBAL_DATA psGD = TLGGD();
+
+#if defined(PVR_DPF_FUNCTION_TRACE_ON)
+    PVR_DPF((PVR_DBG_CALLTRACE, "--> %s:%d entered (%s, %x)", __func__, __LINE__, pszName, ui32Mode));
+#endif
+
+	PVR_ASSERT(pszName);
+
+	/* Acquire TL_GLOBAL_DATA lock here, as if the following TLFindStreamNodeByName
+	 * returns NON NULL PTL_SNODE, we try updating the global data client count and
+	 * PTL_SNODE's psRDesc and we want to make sure the TL_SNODE is valid (eg. has
+	 * not been deleted) while we are updating it
+	 */
+	OSLockAcquire (psGD->hTLGDLock);
+
+	psNode = TLFindStreamNodeByName(pszName);
+	if ((psNode == NULL) && (ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT))
+	{	/* Blocking code to wait for stream to be created if it does not exist */
+		eError = OSEventObjectOpen(psGD->hTLEventObj, &hEvent);
+		PVR_LOGG_IF_ERROR (eError, "OSEventObjectOpen", e0);
+
+		do
+		{
+			if ((psNode = TLFindStreamNodeByName(pszName)) == NULL)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE, "Stream %s does not exist, waiting...", pszName));
+
+				/* Release TL_GLOBAL_DATA lock before sleeping */
+				OSLockRelease (psGD->hTLGDLock);
+
+				/* Will exit OK or with timeout, both cases safe to ignore */
+				eErrorEO = OSEventObjectWaitTimeout(hEvent, NO_STREAM_WAIT_PERIOD);
+
+				/* Acquire lock after waking up */
+				OSLockAcquire (psGD->hTLGDLock);
+			}
+		}
+		while ((psNode == NULL) && (eErrorEO == PVRSRV_OK));
+
+		eError = OSEventObjectClose(hEvent);
+		PVR_LOGG_IF_ERROR (eError, "OSEventObjectClose", e0);
+	}
+
+	/* Make sure we have found a stream node after wait/search */
+	if (psNode == NULL)
+	{
+		/* Did we exit the wait with timeout, inform caller */
+		if (eErrorEO == PVRSRV_ERROR_TIMEOUT)
+		{
+			eError = eErrorEO;
+		}
+		else
+		{
+			eError = PVRSRV_ERROR_NOT_FOUND;
+			PVR_DPF((PVR_DBG_ERROR, "Stream \"%s\" does not exist", pszName));
+		}
+		goto e0;
+	}
+
+	psStream = psNode->psStream;
+
+	/* Allocate memory for the stream. The memory will be allocated with the
+	 * first call. */
+	eError = TLAllocSharedMemIfNull(psStream);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to allocate memory for stream"
+				" \"%s\"", pszName));
+		goto e0;
+	}
+
+	if (bIsWriteOnly)
+	{
+
+		/* If psWDesc == NULL it means that this is the first attempt
+		 * to open stream for write. If yes create the descriptor or increment
+		 * reference count otherwise. */
+		if (psNode->psWDesc == NULL)
+		{
+			psNewSD = TLMakeStreamDesc(psNode, ui32Mode, NULL);
+			psNode->psWDesc = psNewSD;
+		}
+		else
+		{
+			psNewSD = psNode->psWDesc;
+			psNode->psWDesc->uiRefCount++;
+		}
+
+		if (!psNewSD)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Not possible to make a new stream"
+			        " writer descriptor"));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+
+		psNode->uiWRefCount++;
+	}
+	else
+	{
+		/* Only one reader per stream supported */
+		if (psNode->psRDesc != NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Cannot open \"%s\" stream, stream already"
+			        " opened", pszName));
+			eError = PVRSRV_ERROR_ALREADY_OPEN;
+			goto e0;
+		}
+
+		/* Create an event handle for this client to wait on when no data in
+		 * stream buffer. */
+		eError = OSEventObjectOpen(psNode->hReadEventObj, &hEvent);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Not possible to open node's event object"));
+			eError = PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT;
+			goto e0;
+		}
+
+		psNewSD = TLMakeStreamDesc(psNode, ui32Mode, hEvent);
+		psNode->psRDesc = psNewSD;
+
+		if (!psNewSD)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Not possible to make a new stream descriptor"));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e1;
+		}
+
+		PVR_DPF((PVR_DBG_VERBOSE,
+		        "TLServerOpenStreamKM evList=%p, evObj=%p",
+		        psNode->hReadEventObj,
+		        psNode->psRDesc->hReadEvent));
+	}
+
+	/* Copy the import handle back to the user mode API to enable access to
+	 * the stream buffer from user-mode process. */
+	eError = DevmemLocalGetImportHandle(TLStreamGetBufferPointer(psStream),
+	                                    (void**) ppsTLPMR);
+	PVR_LOGG_IF_ERROR(eError, "DevmemLocalGetImportHandle", e2);
+
+	psGD->uiClientCnt++;
+
+	/* Global data updated. Now release global lock */
+	OSLockRelease (psGD->hTLGDLock);
+
+	*ppsSD = psNewSD;
+
+	if (bResetOnOpen)
+	{
+		TLStreamReset(psStream);
+	}
+
+	/* This callback is executed only on reader open. There are some actions
+	 * executed on reader open that don't make much sense for writers e.g.
+	 * injection on time synchronisation packet into the stream. */
+	if (!bIsWriteOnly && psStream->pfOnReaderOpenCallback != NULL)
+	{
+		psStream->pfOnReaderOpenCallback(psStream->pvOnReaderOpenUserData);
+	}
+
+	/* psNode->uiWRefCount is set to '1' on stream create so the first open
+	 * is '2'. */
+	if (bIsWriteOnly && psStream->psNotifStream != NULL &&
+	    psNode->uiWRefCount == 2)
+	{
+		TLStreamMarkStreamOpen(psStream);
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: Stream %s opened for %s", __func__, pszName,
+	        ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ? "write" : "read"));
+
+	PVR_DPF_RETURN_OK;
+
+e2:
+	OSFreeMem(psNewSD);
+e1:
+	if (!bIsWriteOnly)
+		OSEventObjectClose(hEvent);
+e0:
+	OSLockRelease (psGD->hTLGDLock);
+	PVR_DPF_RETURN_RC (eError);
+}
+
+PVRSRV_ERROR
+TLServerCloseStreamKM(PTL_STREAM_DESC psSD)
+{
+	PVRSRV_ERROR    eError = PVRSRV_OK;
+	PTL_GLOBAL_DATA psGD = TLGGD();
+	PTL_SNODE		psNode = 0;
+	PTL_STREAM	psStream;
+	IMG_BOOL	bDestroyStream;
+	IMG_BOOL	bIsWriteOnly = psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO ?
+	                           IMG_TRUE : IMG_FALSE;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psSD);
+
+	/* Sanity check, quick exit if there are no streams */
+	if (psGD->psHead == NULL)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+	}
+
+	/* Check stream still valid */
+	psNode = TLFindStreamNodeByDesc(psSD);
+	if ((psNode == NULL) || (psNode != psSD->psNode))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+	}
+
+	/* Since the descriptor is valid, the stream should not have been made NULL */
+	PVR_ASSERT (psNode->psStream);
+
+	/* Save the stream's reference in-case its destruction is required after this
+	 * client is removed */
+	psStream = psNode->psStream;
+
+	/* Acquire TL_GLOBAL_DATA lock as the following TLRemoveDescAndTryFreeStreamNode
+	 * call will update the TL_SNODE's descriptor value */
+	OSLockAcquire (psGD->hTLGDLock);
+
+	/* Close event handle because event object list might be destroyed in
+	 * TLUnrefDescAndTryFreeStreamNode(). */
+	if (!bIsWriteOnly)
+	{
+		/* Close and free the event handle resource used by this descriptor */
+		eError = OSEventObjectClose(psSD->hReadEvent);
+		if (eError != PVRSRV_OK)
+		{
+			/* Log error but continue as it seems best */
+			PVR_DPF((PVR_DBG_ERROR, "OSEventObjectClose() failed error %d",
+			        eError));
+			eError = PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+		}
+	}
+	else if (psNode->uiWRefCount == 2 && psStream->psNotifStream != NULL)
+	{
+		/* psNode->uiWRefCount is set to '1' on stream create so the last close
+		 * before destruction is '2'. */
+		TLStreamMarkStreamClose(psStream);
+	}
+
+	/* Remove descriptor from stream object/list */
+	bDestroyStream = TLUnrefDescAndTryFreeStreamNode (psNode, psSD);
+
+	/* Assert the counter is sane after input data validated. */
+	PVR_ASSERT(psGD->uiClientCnt > 0);
+	psGD->uiClientCnt--;
+
+	OSLockRelease (psGD->hTLGDLock);
+
+	/* Destroy the stream if its TL_SNODE was removed from TL_GLOBAL_DATA */
+	if (bDestroyStream)
+	{
+		TLStreamDestroy (psStream);
+		psStream = NULL;
+	}
+
+	PVR_DPF((PVR_DBG_VERBOSE, "%s: Stream closed", __func__));
+
+	/* Free the descriptor if ref count reaches 0. */
+	if (psSD->uiRefCount == 0)
+	{
+		/* Free the stream descriptor object */
+		OSFreeMem(psSD);
+	}
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLServerReserveStreamKM(PTL_STREAM_DESC psSD,
+                        IMG_UINT32* ui32BufferOffset,
+                        IMG_UINT32 ui32Size,
+                        IMG_UINT32 ui32SizeMin,
+                        IMG_UINT32* pui32Available)
+{
+	TL_GLOBAL_DATA* psGD = TLGGD();
+	PTL_SNODE psNode = 0;
+	IMG_UINT8* pui8Buffer = NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psSD);
+
+	if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	/* Sanity check, quick exit if there are no streams */
+	if (psGD->psHead == NULL)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+	}
+
+	/* Acquire the global lock. We have to be sure that no one modifies
+	 * the list while we are looking for our stream. */
+	OSLockAcquire(psGD->hTLGDLock);
+	/* Check stream still valid */
+	psNode = TLFindAndGetStreamNodeByDesc(psSD);
+	OSLockRelease(psGD->hTLGDLock);
+
+	if ((psNode == NULL) || (psNode != psSD->psNode))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+	}
+
+
+	/* Since we have a valid stream descriptor, the stream should not have been
+	 * made NULL by any producer context. */
+	PVR_ASSERT (psNode->psStream);
+
+	eError = TLStreamReserve2(psNode->psStream, &pui8Buffer, ui32Size,
+	                          ui32SizeMin, pui32Available);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "Failed to reserve %u (%u, %u) bytes in the stream, error %s.", \
+				ui32Size, ui32SizeMin, *pui32Available, PVRSRVGETERRORSTRING(eError)));
+	}
+	else if (pui8Buffer == NULL)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "Not enough space in the stream."));
+		eError = PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG;
+	}
+	else
+	{
+		*ui32BufferOffset = pui8Buffer - psNode->psStream->pbyBuffer;
+		PVR_ASSERT(*ui32BufferOffset < psNode->psStream->ui32Size);
+	}
+
+	OSLockAcquire(psGD->hTLGDLock);
+	TLReturnStreamNode(psNode);
+	OSLockRelease(psGD->hTLGDLock);
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLServerCommitStreamKM(PTL_STREAM_DESC psSD,
+                       IMG_UINT32 ui32Size)
+{
+	TL_GLOBAL_DATA*	psGD = TLGGD();
+	PTL_SNODE psNode = 0;
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psSD);
+
+	if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	/* Sanity check, quick exit if there are no streams */
+	if (psGD->psHead == NULL)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+	}
+
+	/* Acquire the global lock. We have to be sure that no one modifies
+	 * the list while we are looking for our stream. */
+	OSLockAcquire(psGD->hTLGDLock);
+	/* Check stream still valid */
+	psNode = TLFindAndGetStreamNodeByDesc(psSD);
+	OSLockRelease(psGD->hTLGDLock);
+
+	if ((psNode == NULL) || (psNode != psSD->psNode))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+	}
+
+	/* Since we have a valid stream descriptor, the stream should not have been
+	 * made NULL by any producer context. */
+	PVR_ASSERT (psNode->psStream);
+
+	eError = TLStreamCommit(psNode->psStream, ui32Size);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to commit data into stream."));
+	}
+
+	OSLockAcquire(psGD->hTLGDLock);
+	TLReturnStreamNode(psNode);
+	OSLockRelease(psGD->hTLGDLock);
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLServerDiscoverStreamsKM(const IMG_CHAR *pszNamePattern,
+                          IMG_UINT32 ui32Size,
+                          IMG_CHAR *pszStreams,
+                          IMG_UINT32 *pui32NumFound)
+{
+	PTL_SNODE psNode = NULL;
+	IMG_CHAR (*paszStreams)[PRVSRVTL_MAX_STREAM_NAME_SIZE] =
+			(IMG_CHAR (*)[PRVSRVTL_MAX_STREAM_NAME_SIZE]) pszStreams;
+
+	if (*pszNamePattern == '\0')
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	
+	if (ui32Size % PRVSRVTL_MAX_STREAM_NAME_SIZE != 0)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	/* Sanity check, quick exit if there are no streams */
+	if (TLGGD()->psHead == NULL)
+	{
+		*pui32NumFound = 0;
+		return PVRSRV_OK;
+	}
+
+	OSLockAcquire(TLGGD()->hTLGDLock);
+
+	*pui32NumFound = TLDiscoverStreamNodes(pszNamePattern, paszStreams,
+	                                  ui32Size / PRVSRVTL_MAX_STREAM_NAME_SIZE);
+
+	/* Find "tlctrl" stream and reset it */
+	psNode = TLFindStreamNodeByName(PVRSRV_TL_CTLR_STREAM);
+	if (psNode != NULL)
+		TLStreamReset(psNode->psStream);
+
+	OSLockRelease(TLGGD()->hTLGDLock);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+TLServerAcquireDataKM(PTL_STREAM_DESC psSD,
+		   	   		  IMG_UINT32*	  puiReadOffset,
+		   	   		  IMG_UINT32* 	  puiReadLen)
+{
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	TL_GLOBAL_DATA*		psGD = TLGGD();
+	IMG_UINT32		    uiTmpOffset = NO_ACQUIRE;
+	IMG_UINT32  		uiTmpLen = 0;
+	PTL_SNODE			psNode = 0;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psSD);
+
+	/* Sanity check, quick exit if there are no streams */
+	if (psGD->psHead == NULL)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+	}
+
+	/* Check stream still valid */
+	psNode = TLFindStreamNodeByDesc(psSD);
+	if ((psNode == NULL) || (psNode != psSD->psNode))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+	}
+
+	/* If we are here, the stream will never be made NULL until this context itself
+	 * calls TLRemoveDescAndTryFreeStreamNode(). This is because the producer will
+	 * fail to make the stream NULL (by calling TLTryRemoveStreamAndFreeStreamNode)
+	 * when a valid stream descriptor is present (i.e. a client is connected).
+	 * Hence, no checks for stream being NON NULL are required after this. */
+	PVR_ASSERT (psNode->psStream);
+
+	//PVR_DPF((PVR_DBG_VERBOSE, "TLServerAcquireDataKM evList=%p, evObj=%p", psSD->psNode->hReadEventObj, psSD->hReadEvent));
+
+	/* Check for data in the associated stream buffer, sleep/wait if none */
+	while (((uiTmpLen = TLStreamAcquireReadPos(psNode->psStream,
+			 psSD->ui32Flags & PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK,
+			 &uiTmpOffset)) == 0) &&
+	       (!(psSD->ui32Flags&PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING)) )
+	{
+		PVR_DPF((PVR_DBG_VERBOSE, "TLAcquireDataKM sleeping..."));
+
+		/* Loop around if EndOfStream (nothing to read) and wait times out,
+		 * exit loop if not time out but data is ready for client */
+		while (TLStreamEOS(psNode->psStream))
+		{
+			eError = OSEventObjectWaitTimeout(psSD->hReadEvent, NO_DATA_WAIT_PERIOD);
+			if (eError != PVRSRV_OK)
+			{
+				/* Return timeout or other error condition to the caller who
+				 * can choose to call again if desired. We don't block
+				 * Indefinitely as we want the user mode application to have a
+				 * chance to break out and end if it needs to, so we return the
+				 * time out error code. */
+				PVR_DPF((PVR_DBG_VERBOSE, "TL Server timed out"));
+				PVR_DPF_RETURN_RC(eError);
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_VERBOSE, "TL Server signalled"));
+			}
+		}
+	}
+
+	/* Data available now if we reach here in blocking more or we take the
+	 * values as is in non-blocking mode which might be all zeros. */
+	*puiReadOffset = uiTmpOffset;
+	*puiReadLen = uiTmpLen;
+
+	PVR_DPF((PVR_DBG_VERBOSE, "TLAcquireDataKM return offset=%d, len=%d bytes", *puiReadOffset, *puiReadLen));
+
+	PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLServerReleaseDataKM(PTL_STREAM_DESC psSD,
+		 	 		  IMG_UINT32  	  uiReadOffset,
+		 	 		  IMG_UINT32  	  uiReadLen)
+{
+	TL_GLOBAL_DATA*		psGD = TLGGD();
+	PTL_SNODE			psNode = 0;
+
+	PVR_DPF_ENTERED;
+
+	/* Unreferenced in release builds */
+	PVR_UNREFERENCED_PARAMETER(uiReadOffset);
+
+	PVR_ASSERT(psSD);
+
+	/* Sanity check, quick exit if there are no streams */
+	if (psGD->psHead == NULL)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+	}
+
+	/* Check stream still valid */
+	psNode = TLFindStreamNodeByDesc(psSD);
+	if ((psNode == NULL) || (psNode != psSD->psNode))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+	}
+
+	/* Since we have a valid stream descriptor, the stream should not have been
+	 * made NULL by any producer context. */
+	PVR_ASSERT (psNode->psStream);
+
+	PVR_DPF((PVR_DBG_VERBOSE, "TLReleaseDataKM uiReadOffset=%d, uiReadLen=%d", uiReadOffset, uiReadLen));
+
+	/* Move read position on to free up space in stream buffer */
+	TLStreamAdvanceReadPos(psNode->psStream, uiReadLen);
+
+	PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLServerWriteDataKM(PTL_STREAM_DESC psSD,
+                    IMG_UINT32 ui32Size,
+                    IMG_BYTE* pui8Data)
+{
+	TL_GLOBAL_DATA* psGD = TLGGD();
+	PTL_SNODE psNode = 0;
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psSD);
+
+	if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	/* Sanity check, quick exit if there are no streams */
+	if (psGD->psHead == NULL)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+	}
+
+	OSLockAcquire(psGD->hTLGDLock);
+	/* Check stream still valid */
+	psNode = TLFindAndGetStreamNodeByDesc(psSD);
+	OSLockRelease(psGD->hTLGDLock);
+
+	if ((psNode == NULL) || (psNode != psSD->psNode))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+	}
+
+	/* Since we have a valid stream descriptor, the stream should not have been
+	 * made NULL by any producer context. */
+	PVR_ASSERT (psNode->psStream);
+
+	eError = TLStreamWrite(psNode->psStream, pui8Data, ui32Size);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to write data to the stream (%d).",
+		        eError));
+	}
+
+	OSLockAcquire(psGD->hTLGDLock);
+	TLReturnStreamNode(psNode);
+	OSLockRelease(psGD->hTLGDLock);
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+/*****************************************************************************
+ End of file (tlserver.c)
+*****************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/tlstream.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/tlstream.c
new file mode 100644
index 0000000..9dba8aa
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/common/tlstream.c
@@ -0,0 +1,1278 @@
+/*************************************************************************/ /*!
+@File
+@Title          Transport Layer kernel side API implementation.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport Layer API implementation.
+                These functions are provided to driver components.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+#include "pvr_debug.h"
+
+#include "allocmem.h"
+#include "devicemem.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+#include "log2.h"
+
+#include "tlintern.h"
+#include "tlstream.h"
+
+#include "pvrsrv.h"
+
+#define EVENT_OBJECT_TIMEOUT_US 1000000ULL
+#define READ_PENDING_TIMEOUT_US 100000ULL
+
+/*! Compute maximum TL packet size for this stream. Max packet size will be
+ * minimum of PVRSRVTL_MAX_PACKET_SIZE and (BufferSize / 2.5). This computation
+ * is required to avoid a corner case that was observed when TL buffer size is
+ * smaller than twice of TL max packet size and read, write index are positioned
+ * in such a way that the TL packet (write packet + padding packet) size is may
+ * be bigger than the buffer size itself.
+ */
+#define GET_TL_MAX_PACKET_SIZE( bufSize ) PVRSRVTL_ALIGN( MIN( PVRSRVTL_MAX_PACKET_SIZE, ( 2 * bufSize ) / 5 ) )
+
+/* Given the state of the buffer it returns a number of bytes that the client
+ * can use for a successful allocation. */
+static INLINE IMG_UINT32 suggestAllocSize(IMG_UINT32 ui32LRead,
+                                          IMG_UINT32 ui32LWrite,
+                                          IMG_UINT32 ui32CBSize,
+                                          IMG_UINT32 ui32ReqSizeMin,
+                                          IMG_UINT32 ui32MaxPacketSize)
+{
+	IMG_UINT32 ui32AvSpace = 0;
+
+	/* This could be written in fewer lines using the ? operator but it
+		would not be kind to potential readers of this source at all. */
+	if ( ui32LRead > ui32LWrite )                          /* Buffer WRAPPED */
+	{
+		if ( (ui32LRead - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE) )
+		{
+			ui32AvSpace =  ui32LRead - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE;
+		}
+	}
+	else                                                  /* Normal, no wrap */
+	{
+		if ( (ui32CBSize - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE) )
+		{
+			ui32AvSpace =  ui32CBSize - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE;
+		}
+		else if ( (ui32LRead - 0) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE) )
+		{
+			ui32AvSpace =  ui32LRead - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE;
+		}
+	}
+    /* The max size of a TL packet currently is UINT16. adjust accordingly */
+	return MIN(ui32AvSpace, ui32MaxPacketSize);
+}
+
+/* Returns bytes left in the buffer. Negative if there is not any.
+ * two 4b aligned values are reserved, one for the write failed buffer flag
+ * and one to be able to distinguish the buffer full state to the buffer
+ * empty state.
+ * Always returns free space -8 even when the "write failed" packet may be
+ * already in the stream before this write. */
+static INLINE IMG_INT
+cbSpaceLeft(IMG_UINT32 ui32Read, IMG_UINT32 ui32Write, IMG_UINT32 ui32size)
+{
+	/* We need to reserve 4b (one packet) in the buffer to be able to tell empty
+	 * buffers from full buffers and one more for packet write fail packet */
+	if ( ui32Read > ui32Write )
+	{
+		return (IMG_INT)ui32Read - (IMG_INT)ui32Write - (IMG_INT)BUFFER_RESERVED_SPACE;
+	}
+	else
+	{
+		return (IMG_INT)ui32size - ((IMG_INT)ui32Write - (IMG_INT)ui32Read) - (IMG_INT)BUFFER_RESERVED_SPACE;
+	}
+}
+
+PVRSRV_ERROR TLAllocSharedMemIfNull(IMG_HANDLE hStream)
+{
+	PTL_STREAM psStream = (PTL_STREAM) hStream;
+	PVRSRV_ERROR eError;
+
+	/* CPU Local memory used as these buffers are not accessed by the device.
+	 * CPU Uncached write combine memory used to improve write performance,
+	 * memory barrier added in TLStreamCommit to ensure data written to memory
+	 * before CB write point is updated before consumption by the reader.
+	 */
+	IMG_CHAR pszBufferLabel[PRVSRVTL_MAX_STREAM_NAME_SIZE + 20];
+	DEVMEM_FLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+	                            PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+	                            PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+	                            PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+	                            PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+	                            PVRSRV_MEMALLOCFLAG_CPU_LOCAL;  // TL for now is only used by host driver, so cpulocal mem suffices
+
+	/* Exit if memory has already been allocated. */
+	if (psStream->pbyBuffer != NULL)
+		return PVRSRV_OK;
+
+	OSSNPrintf(pszBufferLabel, sizeof(pszBufferLabel), "TLStreamBuf-%s",
+	           psStream->szName);
+
+
+	/* Use HostMemDeviceNode instead of psStream->psDevNode to benefit from faster
+	 * accesses to CPU local memory. When the framework to access CPU_LOCAL device
+	 * memory from GPU is fixed, we'll switch back to use psStream->psDevNode for
+	 * TL buffers */
+	eError = DevmemAllocateExportable((IMG_HANDLE)PVRSRVGetPVRSRVData()->psHostMemDeviceNode,
+	                                  (IMG_DEVMEM_SIZE_T) psStream->ui32Size,
+	                                  (IMG_DEVMEM_ALIGN_T) OSGetPageSize(),
+	                                  ExactLog2(OSGetPageSize()),
+	                                  uiMemFlags,
+	                                  pszBufferLabel,
+	                                  &psStream->psStreamMemDesc);
+	PVR_LOGG_IF_ERROR(eError, "DevmemAllocateExportable", e0);
+
+	eError = DevmemAcquireCpuVirtAddr(psStream->psStreamMemDesc,
+	                                  (void**) &psStream->pbyBuffer);
+	PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e1);
+
+	return PVRSRV_OK;
+
+e1:
+	DevmemFree(psStream->psStreamMemDesc);
+e0:
+	return eError;
+}
+
+void TLFreeSharedMem(IMG_HANDLE hStream)
+{
+	PTL_STREAM psStream = (PTL_STREAM) hStream;
+
+	if (psStream->pbyBuffer != NULL)
+	{
+		DevmemReleaseCpuVirtAddr(psStream->psStreamMemDesc);
+		psStream->pbyBuffer = NULL;
+	}
+	if (psStream->psStreamMemDesc != NULL)
+	{
+		DevmemFree(psStream->psStreamMemDesc);
+		psStream->psStreamMemDesc = NULL;
+	}
+}
+
+/*******************************************************************************
+ * TL Server public API implementation.
+ ******************************************************************************/
+PVRSRV_ERROR
+TLStreamCreate(IMG_HANDLE *phStream,
+			   PVRSRV_DEVICE_NODE *psDevNode,
+			   IMG_CHAR *szStreamName,
+			   IMG_UINT32 ui32Size,
+			   IMG_UINT32 ui32StreamFlags,
+               TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB,
+               void *pvOnRederOpenUD,
+               TL_STREAM_SOURCECB pfProducerCB,
+               void *pvProducerUD)
+{
+	PTL_STREAM     psTmp;
+	PVRSRV_ERROR   eError;
+	IMG_HANDLE     hEventList;
+	PTL_SNODE      psn = 0;
+	TL_OPMODE      eOpMode;
+
+	PVR_DPF_ENTERED;
+	/* Sanity checks:  */
+	/* non NULL handler required */
+	if ( NULL == phStream )
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+	if (szStreamName == NULL || *szStreamName == '\0' ||
+	    OSStringLength(szStreamName) >= PRVSRVTL_MAX_STREAM_NAME_SIZE)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+	if ( NULL == psDevNode )
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	eOpMode = ui32StreamFlags & TL_OPMODE_MASK;
+	if (( eOpMode <= TL_OPMODE_UNDEF ) || ( eOpMode >= TL_OPMODE_LAST ))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OpMode for TL stream is invalid"));
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	/* Acquire TL_GLOBAL_DATA lock here because, if the following TLFindStreamNodeByName()
+	 * returns NULL, a new TL_SNODE will be added to TL_GLOBAL_DATA's TL_SNODE list */
+	OSLockAcquire (TLGGD()->hTLGDLock);
+
+	/* Check if there already exists a stream with this name. */
+	psn = TLFindStreamNodeByName( szStreamName );
+	if ( NULL != psn )
+	{
+		eError = PVRSRV_ERROR_ALREADY_EXISTS;
+		goto e0;
+	}
+
+	/* Allocate stream structure container (stream struct) for the new stream */
+	psTmp = OSAllocZMem(sizeof(TL_STREAM));
+	if ( NULL == psTmp )
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	OSStringCopy(psTmp->szName, szStreamName);
+
+	if ( ui32StreamFlags & TL_FLAG_FORCE_FLUSH )
+	{
+		psTmp->bWaitForEmptyOnDestroy = IMG_TRUE;
+	}
+
+	psTmp->bNoSignalOnCommit = (ui32StreamFlags&TL_FLAG_NO_SIGNAL_ON_COMMIT) ?  IMG_TRUE : IMG_FALSE;
+
+	psTmp->eOpMode = eOpMode;
+
+	eError = OSEventObjectCreate(NULL, &psTmp->hProducerEventObj);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+	/* Create an event handle for this kind of stream */
+	eError = OSEventObjectOpen(psTmp->hProducerEventObj, &psTmp->hProducerEvent);
+	if (eError != PVRSRV_OK)
+	{
+		goto e2;
+	}
+
+	psTmp->pfOnReaderOpenCallback = pfOnReaderOpenCB;
+	psTmp->pvOnReaderOpenUserData = pvOnRederOpenUD;
+	/* Remember producer supplied CB and data for later */
+	psTmp->pfProducerCallback = (void(*)(void))pfProducerCB;
+	psTmp->pvProducerUserData = pvProducerUD;
+
+	psTmp->psNotifStream = NULL;
+
+	/* Round the requested bytes to a multiple of array elements' size, eg round 3 to 4 */
+	psTmp->ui32Size = PVRSRVTL_ALIGN(ui32Size);
+	psTmp->ui32MaxPacketSize = GET_TL_MAX_PACKET_SIZE(psTmp->ui32Size);
+	psTmp->ui32Read = 0;
+	psTmp->ui32Write = 0;
+	psTmp->ui32Pending = NOTHING_PENDING;
+	psTmp->psDevNode = psDevNode;
+	psTmp->bReadPending = IMG_FALSE;
+	/* Memory will be allocated on first connect to the stream */
+	if (!(ui32StreamFlags & TL_FLAG_ALLOCATE_ON_FIRST_OPEN))
+	{
+		/* Allocate memory for the circular buffer and export it to user space. */
+		eError = TLAllocSharedMemIfNull(psTmp);
+		PVR_LOGG_IF_ERROR(eError, "TLAllocSharedMem", e3);
+	}
+
+	/* Synchronisation object to synchronise with user side data transfers. */
+	eError = OSEventObjectCreate(psTmp->szName, &hEventList);
+	if (eError != PVRSRV_OK)
+	{
+		goto e4;
+	}
+
+	eError = OSLockCreate (&psTmp->hStreamWLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto e5;
+	}
+
+	eError = OSLockCreate (&psTmp->hReadLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto e6;
+	}
+
+	/* Now remember the stream in the global TL structures */
+	psn = TLMakeSNode(hEventList, (TL_STREAM *)psTmp, 0);
+	if (psn == NULL)
+	{
+		eError=PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e7;
+	}
+
+	/* Stream node created, now reset the write reference count to 1
+	 * (i.e. this context's reference) */
+	psn->uiWRefCount = 1;
+
+	TLAddStreamNode(psn);
+
+	/* Release TL_GLOBAL_DATA lock as the new TL_SNODE is now added to the list */
+	OSLockRelease (TLGGD()->hTLGDLock);
+
+	/* Best effort signal, client wait timeout will ultimately let it find the
+	 * new stream if this fails, acceptable to avoid clean-up as it is tricky
+	 * at this point */
+	(void) OSEventObjectSignal(TLGGD()->hTLEventObj);
+
+	/* Pass the newly created stream handle back to caller */
+	*phStream = (IMG_HANDLE)psTmp;
+	PVR_DPF_RETURN_OK;
+
+e7:
+	OSLockDestroy(psTmp->hReadLock);
+e6:
+	OSLockDestroy(psTmp->hStreamWLock);
+e5:
+	OSEventObjectDestroy(hEventList);
+e4:
+	TLFreeSharedMem(psTmp);
+e3:
+	OSEventObjectClose(psTmp->hProducerEvent);
+e2:
+	OSEventObjectDestroy(psTmp->hProducerEventObj);
+e1:
+	OSFreeMem(psTmp);
+e0:
+	OSLockRelease (TLGGD()->hTLGDLock);
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+void TLStreamReset(IMG_HANDLE hStream)
+{
+	PTL_STREAM psStream = (PTL_STREAM) hStream;
+
+	PVR_ASSERT(psStream != NULL);
+
+	OSLockAcquire(psStream->hStreamWLock);
+
+	while (psStream->ui32Pending != NOTHING_PENDING)
+	{
+		PVRSRV_ERROR eError;
+
+		/* We're in the middle of a write so we cannot reset the stream.
+		 * We are going to wait until the data is committed. Release lock while
+		 * we're here. */
+		OSLockRelease(psStream->hStreamWLock);
+
+		/* Event when psStream->bNoSignalOnCommit is set we can still use
+		 * the timeout capability of event object API (time in us). */
+		eError = OSEventObjectWaitTimeout(psStream->psNode->hReadEventObj, 100);
+		if (eError != PVRSRV_ERROR_TIMEOUT && eError != PVRSRV_OK)
+		{
+			PVR_LOGRN_IF_ERROR(eError, "OSEventObjectWaitTimeout");
+		}
+
+		OSLockAcquire(psStream->hStreamWLock);
+
+		/* Either timeout occurred or the stream has been signalled.
+		 * If former we have to check if the data was committed and if latter
+		 * if the stream hasn't been re-reserved. Either way we have to go
+		 * back to the condition.
+		 * If the stream has been released we'll exit with the lock held so
+		 * we can finally go and reset the stream. */
+	}
+
+	psStream->ui32Read = 0;
+	psStream->ui32Write = 0;
+	/* we know that ui32Pending already has correct value (no need to set) */
+
+	OSLockRelease(psStream->hStreamWLock);
+}
+
+PVRSRV_ERROR
+TLStreamSetNotifStream(IMG_HANDLE hStream, IMG_HANDLE hNotifStream)
+{
+	PTL_STREAM psStream = (PTL_STREAM) hStream;
+
+	if (hStream == NULL || hNotifStream == NULL)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	psStream->psNotifStream = (PTL_STREAM) hNotifStream;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+TLStreamReconfigure(
+		IMG_HANDLE hStream,
+		IMG_UINT32 ui32StreamFlags)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PTL_STREAM psTmp;
+	TL_OPMODE eOpMode;
+
+	PVR_DPF_ENTERED;
+
+	if ( NULL == hStream )
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	eOpMode = ui32StreamFlags & TL_OPMODE_MASK;
+	if (( eOpMode <= TL_OPMODE_UNDEF ) || ( eOpMode >= TL_OPMODE_LAST ))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OpMode for TL stream is invalid"));
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	psTmp = (PTL_STREAM)hStream;
+
+	/* Prevent the TL Stream buffer from being written to
+	 * while its mode is being reconfigured
+	 */
+	OSLockAcquire (psTmp->hStreamWLock);
+	if ( NOTHING_PENDING != psTmp->ui32Pending )
+	{
+		OSLockRelease (psTmp->hStreamWLock);
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_READY);
+	}
+	psTmp->ui32Pending = 0;
+	OSLockRelease (psTmp->hStreamWLock);
+
+	psTmp->eOpMode = eOpMode;
+
+	OSLockAcquire (psTmp->hStreamWLock);
+	psTmp->ui32Pending = NOTHING_PENDING;
+	OSLockRelease (psTmp->hStreamWLock);
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLStreamOpen(IMG_HANDLE *phStream,
+             IMG_CHAR   *szStreamName)
+{
+ 	PTL_SNODE  psTmpSNode;
+
+	PVR_DPF_ENTERED;
+
+	if ( NULL == phStream || NULL == szStreamName )
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	/* Acquire the TL_GLOBAL_DATA lock first to ensure,
+	 * the TL_STREAM while returned and being modified,
+	 * is not deleted by some other context */
+	OSLockAcquire (TLGGD()->hTLGDLock);
+
+	/* Search for a stream node with a matching stream name */
+	psTmpSNode = TLFindStreamNodeByName(szStreamName);
+
+	if ( NULL == psTmpSNode )
+	{
+		OSLockRelease (TLGGD()->hTLGDLock);
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_FOUND);
+	}
+
+	if (psTmpSNode->psStream->psNotifStream != NULL &&
+	    psTmpSNode->uiWRefCount == 1)
+	{
+		TLStreamMarkStreamOpen(psTmpSNode->psStream);
+	}
+
+	/* The TL_SNODE->uiWRefCount governs the presence of this node in the
+	 * TL_GLOBAL_DATA list i.e. when uiWRefCount falls to zero we try removing
+	 * this node from the TL_GLOBAL_DATA list. Hence, is protected using the
+	 * TL_GLOBAL_DATA lock and not TL_STREAM lock */
+	psTmpSNode->uiWRefCount++;
+
+	OSLockRelease (TLGGD()->hTLGDLock);
+
+	/* Return the stream handle to the caller */
+	*phStream = (IMG_HANDLE)psTmpSNode->psStream;
+
+	PVR_DPF_RETURN_VAL(PVRSRV_OK);
+}
+
+void
+TLStreamClose(IMG_HANDLE hStream)
+{
+	PTL_STREAM	psTmp;
+	IMG_BOOL	bDestroyStream;
+
+	PVR_DPF_ENTERED;
+
+	if ( NULL == hStream )
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+				 "TLStreamClose failed as NULL stream handler passed, nothing done."));
+		PVR_DPF_RETURN;
+	}
+
+	psTmp = (PTL_STREAM)hStream;
+
+	/* Acquire TL_GLOBAL_DATA lock for updating the reference count as this will be required
+	 * in-case this TL_STREAM node is to be deleted */
+	OSLockAcquire (TLGGD()->hTLGDLock);
+
+	/* Decrement write reference counter of the stream */
+	psTmp->psNode->uiWRefCount--;
+
+	if ( 0 != psTmp->psNode->uiWRefCount )
+	{
+		/* The stream is still being used in other context(s) do not destroy
+		 * anything */
+
+		/* uiWRefCount == 1 means that stream was closed for write. Next
+		 * close is pairing TLStreamCreate(). Send notification to indicate
+		 * that no writer are connected to the stream any more. */
+		if (psTmp->psNotifStream != NULL && psTmp->psNode->uiWRefCount == 1)
+		{
+			TLStreamMarkStreamClose(psTmp);
+		}
+
+		OSLockRelease (TLGGD()->hTLGDLock);
+		PVR_DPF_RETURN;
+	}
+	else
+	{
+		/* Now we try removing this TL_STREAM from TL_GLOBAL_DATA */
+
+		if ( psTmp->bWaitForEmptyOnDestroy == IMG_TRUE )
+		{
+			/* We won't require the TL_STREAM lock to be acquired here for accessing its read
+			 * and write offsets. REASON: We are here because there is no producer context
+			 * referencing this TL_STREAM, hence its ui32Write offset won't be changed now.
+			 * Also, the update of ui32Read offset is not protected by locks */
+			while (psTmp->ui32Read != psTmp->ui32Write)
+			{
+				/* Release lock before sleeping */
+				OSLockRelease (TLGGD()->hTLGDLock);
+
+				OSEventObjectWaitTimeout(psTmp->hProducerEvent, EVENT_OBJECT_TIMEOUT_US);
+
+				OSLockAcquire (TLGGD()->hTLGDLock);
+
+				/* Ensure destruction of stream is still required */
+				if (0 != psTmp->psNode->uiWRefCount)
+				{
+					OSLockRelease (TLGGD()->hTLGDLock);
+					PVR_DPF_RETURN;
+				}
+			}
+		}
+
+		/* Try removing the stream from TL_GLOBAL_DATA */
+		bDestroyStream = TLTryRemoveStreamAndFreeStreamNode (psTmp->psNode);
+
+		OSLockRelease (TLGGD()->hTLGDLock);
+
+		if (bDestroyStream)
+		{
+			/* Destroy the stream if it was removed from TL_GLOBAL_DATA */
+			TLStreamDestroy (psTmp);
+			psTmp = NULL;
+		}
+		PVR_DPF_RETURN;
+	}
+}
+
+static PVRSRV_ERROR
+DoTLStreamReserve(IMG_HANDLE hStream,
+				IMG_UINT8 **ppui8Data,
+				IMG_UINT32 ui32ReqSize,
+                IMG_UINT32 ui32ReqSizeMin,
+				PVRSRVTL_PACKETTYPE ePacketType,
+				IMG_UINT32* pui32AvSpace)
+{
+	PTL_STREAM psTmp;
+	IMG_UINT32 *pui32Buf, ui32LRead, ui32LWrite, ui32LPending, lReqSizeAligned, lReqSizeActual, ui32CreateFreeSpace;
+	IMG_INT pad, iFreeSpace;
+	IMG_UINT8 *pui8IncrRead = NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+	if (pui32AvSpace) *pui32AvSpace = 0;
+
+	if (( NULL == hStream ))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+	psTmp = (PTL_STREAM)hStream;
+
+	/* Assert used as the packet type parameter is currently only provided
+	 * by the TL APIs, not the calling client */
+	PVR_ASSERT((PVRSRVTL_PACKETTYPE_UNDEF < ePacketType) && (PVRSRVTL_PACKETTYPE_LAST >= ePacketType));
+
+	/* The buffer is only used in "rounded" (aligned) chunks */
+	lReqSizeAligned = PVRSRVTL_ALIGN(ui32ReqSize);
+
+	/* Lock the stream before reading it's pending value, because if pending is set
+	 * to NOTHING_PENDING, we update the pending value such that subsequent calls to
+	 * this function from other context(s) fail with PVRSRV_ERROR_NOT_READY */
+	OSLockAcquire (psTmp->hStreamWLock);
+
+	/* Get a local copy of the stream buffer parameters */
+	ui32LRead  = psTmp->ui32Read;
+	ui32LWrite = psTmp->ui32Write;
+	ui32LPending = psTmp->ui32Pending;
+
+	/*  Multiple pending reserves are not supported. */
+	if ( NOTHING_PENDING != ui32LPending )
+	{
+		OSLockRelease (psTmp->hStreamWLock);
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_READY);
+	}
+
+	if ( psTmp->ui32MaxPacketSize < lReqSizeAligned )
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "Requested Size : %u > Max Packet size allowed : %u \n", lReqSizeAligned, psTmp->ui32MaxPacketSize));
+		psTmp->ui32Pending = NOTHING_PENDING;
+		if (pui32AvSpace)
+		{
+			*pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin, psTmp->ui32MaxPacketSize);
+			if (*pui32AvSpace == 0 && psTmp->eOpMode == TL_OPMODE_DROP_OLDEST)
+			{
+				*pui32AvSpace = psTmp->ui32MaxPacketSize;
+				PVR_DPF((PVR_DBG_MESSAGE, "Opmode is Drop_Oldest, so Available Space changed to : %u\n", *pui32AvSpace));
+			}
+		}
+		OSLockRelease (psTmp->hStreamWLock);
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG);
+	}
+
+	/* Prevent other threads from entering this region before we are done updating
+	 * the pending value and write offset (incase of padding). This is not exactly
+	 * a lock but a signal for other contexts that there is a TLStreamCommit operation
+	 * pending on this stream */
+	psTmp->ui32Pending = 0;
+
+	OSLockRelease (psTmp->hStreamWLock);
+
+	/* If there is enough contiguous space following the current Write
+	 * position then no padding is required */
+	if (  psTmp->ui32Size
+		< ui32LWrite + lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) )
+	{
+		pad = psTmp->ui32Size - ui32LWrite;
+	}
+	else
+	{
+		pad = 0;
+	}
+
+	lReqSizeActual = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) + pad;
+
+#if defined(DEBUG)
+		/* Sanity check that the user is not trying to add more data than the
+		 * buffer size. Conditionally compile it out to ensure this check has
+		 * no impact to release performance */
+		if ( lReqSizeAligned+sizeof(PVRSRVTL_PACKETHDR) > psTmp->ui32Size )
+		{
+			OSLockAcquire (psTmp->hStreamWLock);
+			psTmp->ui32Pending = NOTHING_PENDING;
+			OSLockRelease (psTmp->hStreamWLock);
+
+			PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE);
+		}
+#endif
+	iFreeSpace = cbSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size);
+
+	if (iFreeSpace < (IMG_INT) lReqSizeActual)
+	{
+		/* If this is a blocking reserve and there is not enough space then wait. */
+		if (psTmp->eOpMode == TL_OPMODE_BLOCK)
+		{
+			while ( ( cbSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size)
+				 <(IMG_INT) lReqSizeActual ) )
+			{
+				/* The TL bridge is lockless now, so changing to OSEventObjectWait() */
+				OSEventObjectWait(psTmp->hProducerEvent);
+				// update local copies.
+				ui32LRead  = psTmp->ui32Read;
+				ui32LWrite = psTmp->ui32Write;
+			}
+		}
+		/* Data overwriting, also insert PACKETS_DROPPED flag into existing packet */
+		else if (psTmp->eOpMode == TL_OPMODE_DROP_OLDEST)
+		{
+			OSLockAcquire(psTmp->hReadLock);
+
+			while(psTmp->bReadPending)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE, "Waiting for the pending read operation to complete."));
+				OSLockRelease(psTmp->hReadLock);
+#if defined(TL_BUFFER_STATS)
+				psTmp->ui32CntWriteWaits++;
+#endif
+				eError = OSEventObjectWaitTimeout(psTmp->hProducerEvent, READ_PENDING_TIMEOUT_US);
+				OSLockAcquire(psTmp->hReadLock);
+			}
+
+#if defined(TL_BUFFER_STATS)
+			psTmp->ui32CntWriteSuccesses++;
+#endif
+			ui32LRead = psTmp->ui32Read;
+
+			if (cbSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size)
+			     < (IMG_INT) lReqSizeActual)
+			{
+				ui32CreateFreeSpace = 5 * (psTmp->ui32Size / 100);
+				if (ui32CreateFreeSpace < lReqSizeActual)
+				{
+					ui32CreateFreeSpace = lReqSizeActual;
+				}
+
+				while(ui32CreateFreeSpace > (IMG_UINT32)cbSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size))
+				{
+					pui8IncrRead = &psTmp->pbyBuffer[ui32LRead];
+					ui32LRead += (sizeof(PVRSRVTL_PACKETHDR) + PVRSRVTL_ALIGN( GET_PACKET_DATA_LEN(pui8IncrRead) ));
+
+					/* Check if buffer needs to wrap */
+					if (ui32LRead >= psTmp->ui32Size)
+					{
+						ui32LRead = 0;
+					}
+				}
+				psTmp->ui32Read = ui32LRead;
+				pui8IncrRead = &psTmp->pbyBuffer[psTmp->ui32Read];
+
+				GET_PACKET_HDR(pui8IncrRead)->uiTypeSize = SET_PACKETS_DROPPED( GET_PACKET_HDR(pui8IncrRead) );
+			}
+			/* else fall through as there is enough space now to write the data */
+
+			OSLockRelease(psTmp->hReadLock);
+		}
+		/* No data overwriting, insert write_failed flag and return */
+		else if (psTmp->eOpMode == TL_OPMODE_DROP_NEWER)
+		{
+			/* Caller should not try to use ppui8Data,
+			 * NULLify to give user a chance of avoiding memory corruption */
+			ppui8Data = NULL;
+
+			/* This flag should not be inserted two consecutive times, so
+			 * check the last ui32 in case it was a packet drop packet. */
+			pui32Buf =  ui32LWrite
+					  ?
+					    (IMG_UINT32*)&psTmp->pbyBuffer[ui32LWrite - sizeof(PVRSRVTL_PACKETHDR)]
+					   : // Previous four bytes are not guaranteed to be a packet header...
+					    (IMG_UINT32*)&psTmp->pbyBuffer[psTmp->ui32Size - PVRSRVTL_PACKET_ALIGNMENT];
+
+			if ( PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED
+				 !=
+				 GET_PACKET_TYPE( (PVRSRVTL_PACKETHDR*)pui32Buf ) )
+			{
+				/* Insert size-stamped packet header */
+				pui32Buf = (IMG_UINT32*)&psTmp->pbyBuffer[ui32LWrite];
+				*pui32Buf = PVRSRVTL_SET_PACKET_WRITE_FAILED;
+				ui32LWrite += sizeof(PVRSRVTL_PACKETHDR);
+				ui32LWrite %= psTmp->ui32Size;
+				iFreeSpace -= sizeof(PVRSRVTL_PACKETHDR);
+			}
+
+			OSLockAcquire (psTmp->hStreamWLock);
+			psTmp->ui32Write = ui32LWrite;
+			psTmp->ui32Pending = NOTHING_PENDING;
+			OSLockRelease (psTmp->hStreamWLock);
+
+			if (pui32AvSpace)
+			{
+				*pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin, psTmp->ui32MaxPacketSize);
+			}
+			PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG);
+		}
+	}
+
+	/* The easy case: buffer has enough space to hold the requested packet (data + header) */
+	if ( (cbSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size))
+		>= (IMG_INT) lReqSizeActual )
+	{
+		if ( pad )
+		{
+			/* Inserting padding packet. */
+			pui32Buf = (IMG_UINT32*)&psTmp->pbyBuffer[ui32LWrite];
+			*pui32Buf = PVRSRVTL_SET_PACKET_PADDING(pad-sizeof(PVRSRVTL_PACKETHDR));
+
+			/* CAUTION: the used pad value should always result in a properly
+			 *          aligned ui32LWrite pointer, which in this case is 0 */
+			ui32LWrite = (ui32LWrite + pad) % psTmp->ui32Size;
+			/* Detect unaligned pad value */
+			PVR_ASSERT( ui32LWrite == 0);
+		}
+		/* Insert size-stamped packet header */
+		pui32Buf = (IMG_UINT32*) &psTmp->pbyBuffer[ui32LWrite];
+
+		*pui32Buf = PVRSRVTL_SET_PACKET_HDR(ui32ReqSize, ePacketType);
+
+		/* return the next position in the buffer to the user */
+		*ppui8Data =  &psTmp->pbyBuffer[ ui32LWrite+sizeof(PVRSRVTL_PACKETHDR) ];
+
+		/* update pending offset: size stamp + data  */
+		ui32LPending = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR);
+	}
+	else
+	{
+		OSLockAcquire (psTmp->hStreamWLock);
+		psTmp->ui32Pending = NOTHING_PENDING;
+		OSLockRelease (psTmp->hStreamWLock);
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+	}
+
+	/* Acquire stream lock for updating stream parameters */
+	OSLockAcquire (psTmp->hStreamWLock);
+	psTmp->ui32Write = ui32LWrite;
+	psTmp->ui32Pending = ui32LPending;
+	OSLockRelease (psTmp->hStreamWLock);
+
+#if defined(TL_BUFFER_STATS)
+	psTmp->ui32CntNumWriteSuccess++;
+#endif
+
+	PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLStreamReserve(IMG_HANDLE hStream,
+				IMG_UINT8 **ppui8Data,
+				IMG_UINT32 ui32Size)
+{
+	return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32Size, PVRSRVTL_PACKETTYPE_DATA, NULL);
+}
+
+PVRSRV_ERROR
+TLStreamReserve2(IMG_HANDLE hStream,
+                IMG_UINT8  **ppui8Data,
+                IMG_UINT32 ui32Size,
+                IMG_UINT32 ui32SizeMin,
+                IMG_UINT32* pui32Available)
+{
+	return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32SizeMin, PVRSRVTL_PACKETTYPE_DATA, pui32Available);
+}
+
+PVRSRV_ERROR
+TLStreamCommit(IMG_HANDLE hStream, IMG_UINT32 ui32ReqSize)
+{
+	PTL_STREAM psTmp;
+	IMG_UINT32 ui32LRead, ui32OldWrite, ui32LWrite, ui32LPending;
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	if ( NULL == hStream )
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+	psTmp = (PTL_STREAM)hStream;
+
+	/* Get a local copy of the stream buffer parameters */
+	ui32LRead = psTmp->ui32Read;
+	ui32LWrite = psTmp->ui32Write;
+	ui32LPending = psTmp->ui32Pending;
+
+	ui32OldWrite = ui32LWrite;
+
+	// Space in buffer is aligned
+	ui32ReqSize = PVRSRVTL_ALIGN(ui32ReqSize) + sizeof(PVRSRVTL_PACKETHDR);
+
+	/* Check pending reserver and ReqSize + packet header size. */
+	if ((ui32LPending == NOTHING_PENDING) || (ui32ReqSize > ui32LPending))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE);
+	}
+
+	/* Update pointer to written data. */
+	ui32LWrite = (ui32LWrite + ui32ReqSize) % psTmp->ui32Size;
+
+	/* and reset LPending to 0 since data are now submitted  */
+	ui32LPending = NOTHING_PENDING;
+
+	/* Calculate high water mark for debug purposes */
+#if defined(TL_BUFFER_STATS)
+	{
+		IMG_UINT32 tmp = 0;
+		if (ui32LWrite > ui32LRead)
+		{
+			tmp = (ui32LWrite-ui32LRead);
+		}
+		else if (ui32LWrite < ui32LRead)
+		{
+			tmp = (psTmp->ui32Size-ui32LRead+ui32LWrite);
+		} /* else equal, ignore */
+
+		if (tmp > psTmp->ui32BufferUt)
+		{
+			psTmp->ui32BufferUt = tmp;
+		}
+	}
+#endif
+
+	/* Memory barrier required to ensure prior data written by writer is
+	 * flushed from WC buffer to main memory. */
+	OSWriteMemoryBarrier();
+
+	/* Acquire stream lock to ensure other context(s) (if any)
+	 * wait on the lock (in DoTLStreamReserve) for consistent values
+	 * of write offset and pending value */
+	OSLockAcquire (psTmp->hStreamWLock);
+
+	/* Update stream buffer parameters to match local copies */
+	psTmp->ui32Write = ui32LWrite;
+	psTmp->ui32Pending = ui32LPending;
+
+	OSLockRelease (psTmp->hStreamWLock);
+
+	/* If  we have transitioned from an empty buffer to a non-empty buffer,
+	 * signal any consumers that may be waiting */
+	if (ui32OldWrite == ui32LRead && !psTmp->bNoSignalOnCommit)
+	{
+		/* Signal consumers that may be waiting */
+		eError = OSEventObjectSignal(psTmp->psNode->hReadEventObj);
+		if ( eError != PVRSRV_OK)
+		{
+			PVR_DPF_RETURN_RC(eError);
+		}
+	}
+PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLStreamWrite(IMG_HANDLE hStream, IMG_UINT8 *pui8Src, IMG_UINT32 ui32Size)
+{
+	IMG_BYTE *pbyDest = NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	if ( NULL == hStream )
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	eError = TLStreamReserve(hStream, &pbyDest, ui32Size);
+	if ( PVRSRV_OK != eError )
+	{
+		PVR_DPF_RETURN_RC(eError);
+	}
+	else if ( pbyDest )
+	{
+		OSDeviceMemCopy((void*)pbyDest, (void*)pui8Src, ui32Size);
+		eError = TLStreamCommit(hStream, ui32Size);
+		if ( PVRSRV_OK != eError )
+		{
+			PVR_DPF_RETURN_RC(eError);
+		}
+	}
+	else
+	{
+		/* A NULL ptr returned from TLStreamReserve indicates the TL buffer is full */
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG);
+	}
+	PVR_DPF_RETURN_OK;
+}
+
+void TLStreamInfo(PTL_STREAM_INFO psInfo)
+{
+ 	IMG_DEVMEM_SIZE_T actual_req_size;
+	IMG_DEVMEM_ALIGN_T align = 4; /* Low dummy value so the real value can be obtained */
+
+ 	actual_req_size = 2;
+	DevmemExportalignAdjustSizeAndAlign(OSGetPageShift(), &actual_req_size, &align);
+
+	psInfo->headerSize = sizeof(PVRSRVTL_PACKETHDR);
+	psInfo->minReservationSize = sizeof(IMG_UINT32);
+	psInfo->pageSize = (IMG_UINT32)(actual_req_size);
+	psInfo->pageAlign = (IMG_UINT32)(align);
+}
+
+PVRSRV_ERROR
+TLStreamMarkEOS(IMG_HANDLE psStream)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT8* pData;
+
+	PVR_DPF_ENTERED;
+
+	if ( NULL == psStream )
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	eError = DoTLStreamReserve(psStream, &pData, 0, 0, PVRSRVTL_PACKETTYPE_MARKER_EOS, NULL);
+	if ( PVRSRV_OK !=  eError )
+	{
+		PVR_DPF_RETURN_RC(eError);
+	}
+
+	PVR_DPF_RETURN_RC(TLStreamCommit(psStream, 0));
+}
+
+
+static PVRSRV_ERROR
+_TLStreamMarkOC(IMG_HANDLE hStream, PVRSRVTL_PACKETTYPE ePacketType)
+{
+	PVRSRV_ERROR eError;
+	PTL_STREAM psStream = hStream;
+	IMG_UINT32 ui32Size;
+	IMG_UINT8 *pData;
+
+	PVR_DPF_ENTERED;
+
+	if (NULL == psStream)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	if (NULL == psStream->psNotifStream)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_NOTIF_STREAM);
+	}
+
+	ui32Size = OSStringLength(psStream->szName) + 1;
+
+	eError = DoTLStreamReserve(psStream->psNotifStream, &pData, ui32Size,
+	                           ui32Size, ePacketType, NULL);
+	if ( PVRSRV_OK != eError)
+	{
+		PVR_DPF_RETURN_RC(eError);
+	}
+
+	OSDeviceMemCopy(pData, psStream->szName, ui32Size);
+
+	PVR_DPF_RETURN_RC(TLStreamCommit(psStream->psNotifStream, ui32Size));
+}
+
+PVRSRV_ERROR
+TLStreamMarkStreamOpen(IMG_HANDLE psStream)
+{
+	return _TLStreamMarkOC(psStream, PVRSRVTL_PACKETTYPE_STREAM_OPEN_FOR_WRITE);
+}
+
+PVRSRV_ERROR
+TLStreamMarkStreamClose(IMG_HANDLE psStream)
+{
+	return _TLStreamMarkOC(psStream, PVRSRVTL_PACKETTYPE_STREAM_CLOSE_FOR_WRITE);
+}
+
+PVRSRV_ERROR
+TLStreamSync(IMG_HANDLE psStream)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PTL_STREAM   psTmp;
+
+	PVR_DPF_ENTERED;
+
+	if ( NULL == psStream )
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+	psTmp = (PTL_STREAM)psStream;
+
+	/* If read client exists and has opened stream in blocking mode,
+	 * signal when data is available to read. */
+	if (psTmp->psNode->psRDesc &&
+		 (!(psTmp->psNode->psRDesc->ui32Flags & PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING)) &&
+			psTmp->ui32Read != psTmp->ui32Write)
+	{
+		eError = OSEventObjectSignal(psTmp->psNode->hReadEventObj);
+	}
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+/*
+ * Internal stream APIs to server part of Transport Layer, declared in
+ * header tlintern.h. Direct pointers to stream objects are used here as
+ * these functions are internal.
+ */
+IMG_UINT32
+TLStreamAcquireReadPos(PTL_STREAM psStream,
+                       IMG_BOOL bDisableCallback,
+                       IMG_UINT32* puiReadOffset)
+{
+	IMG_UINT32 uiReadLen = 0;
+	IMG_UINT32 ui32LRead, ui32LWrite;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psStream);
+	PVR_ASSERT(puiReadOffset);
+
+	if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST)
+	{
+		if (!OSTryLockAcquire(psStream->hReadLock))
+		{
+			PVR_DPF((PVR_DBG_WARNING, "Read lock on the stream is acquired by some writer, "
+						  "hence reader failed to acquire read lock."));
+#if defined(TL_BUFFER_STATS)
+			psStream->ui32CntReadFails++;
+#endif
+			PVR_DPF_RETURN_VAL(0);
+		}
+	}
+
+#if defined(TL_BUFFER_STATS)
+		psStream->ui32CntReadSuccesses++;
+#endif
+
+	/* Grab a local copy */
+	ui32LRead = psStream->ui32Read;
+	ui32LWrite = psStream->ui32Write;
+
+	if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST)
+	{
+		psStream->bReadPending = IMG_TRUE;
+		OSLockRelease(psStream->hReadLock);
+	}
+
+	/* No data available and CB defined - try and get data */
+	if ((ui32LRead == ui32LWrite) && psStream->pfProducerCallback && !bDisableCallback)
+	{
+		PVRSRV_ERROR eRc;
+		IMG_UINT32   ui32Resp = 0;
+
+		eRc = ((TL_STREAM_SOURCECB)psStream->pfProducerCallback)(psStream, TL_SOURCECB_OP_CLIENT_EOS,
+				&ui32Resp, psStream->pvProducerUserData);
+		PVR_LOG_IF_ERROR(eRc, "TLStream->pfProducerCallback");
+
+		ui32LWrite = psStream->ui32Write;
+	}
+
+	/* No data available... */
+	if (ui32LRead == ui32LWrite)
+	{
+		if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST)
+		{
+			psStream->bReadPending = IMG_FALSE;
+		}
+		PVR_DPF_RETURN_VAL(0);
+	}
+
+	/* Data is available to read... */
+	*puiReadOffset = ui32LRead;
+
+	/*PVR_DPF((PVR_DBG_VERBOSE,
+	 *		"TLStreamAcquireReadPos Start before: Write:%d, Read:%d, size:%d",
+	 *		ui32LWrite, ui32LRead, psStream->ui32Size));
+	 */
+
+	if ( ui32LRead > ui32LWrite )
+	{	/* CB has wrapped around.
+		 * Return the first contiguous piece of memory, ie [ReadLen,EndOfBuffer]
+		 * and let a subsequent AcquireReadPos read the rest of the Buffer */
+		/*PVR_DPF((PVR_DBG_VERBOSE, "TLStreamAcquireReadPos buffer has wrapped"));*/
+		uiReadLen = psStream->ui32Size - ui32LRead;
+	}
+	else
+	{	// CB has not wrapped
+		uiReadLen = ui32LWrite - ui32LRead;
+	}
+
+	PVR_DPF_RETURN_VAL(uiReadLen);
+}
+
+void
+TLStreamAdvanceReadPos(PTL_STREAM psStream, IMG_UINT32 uiReadLen)
+{
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psStream);
+
+	/*
+	 * This API does not use Read lock as 'bReadPending' is sufficient
+	 * to keep Read index safe by preventing a write from updating the
+	 * index and 'bReadPending' itself is safe as it can only be modified
+	 * by readers and there can be only one reader in action at a time.
+	 */
+
+	/* Update the read offset by the length provided in a circular manner.
+	 * Assuming the update to be atomic hence, avoiding use of locks */
+	psStream->ui32Read = (psStream->ui32Read + uiReadLen) % psStream->ui32Size;
+
+	if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST)
+	{
+		psStream->bReadPending = IMG_FALSE;
+	}
+
+	/* notify reserves that may be pending */
+	/* The producer event object is used to signal the StreamReserve if the TL
+	 * Buffer is in blocking mode and is full.
+	 * Previously this event was only signalled if the buffer was created in
+	 * blocking mode. Since the buffer mode can now change dynamically the event
+	 * is signalled every time to avoid any potential race where the signal is
+	 * required, but not produced.
+	 */
+	{
+		PVRSRV_ERROR eError;
+		eError = OSEventObjectSignal(psStream->hProducerEventObj);
+		if ( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+					 "Error in TLStreamAdvanceReadPos: OSEventObjectSignal returned:%u",
+					 eError));
+		}
+	}
+
+	PVR_DPF((PVR_DBG_VERBOSE,
+			 "TLStreamAdvanceReadPos Read now at: %d",
+			psStream->ui32Read));
+	PVR_DPF_RETURN;
+}
+
+void
+TLStreamDestroy (PTL_STREAM psStream)
+{
+	PVR_ASSERT (psStream);
+
+	OSLockDestroy (psStream->hStreamWLock);
+	OSLockDestroy (psStream->hReadLock);
+
+	OSEventObjectClose(psStream->hProducerEvent);
+	OSEventObjectDestroy(psStream->hProducerEventObj);
+
+	TLFreeSharedMem(psStream);
+	OSFreeMem(psStream);
+}
+
+DEVMEM_MEMDESC*
+TLStreamGetBufferPointer(PTL_STREAM psStream)
+{
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psStream);
+
+	PVR_DPF_RETURN_VAL(psStream->psStreamMemDesc);
+}
+
+IMG_BOOL
+TLStreamEOS(PTL_STREAM psStream)
+{
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psStream);
+
+	/* If both pointers are equal then the buffer is empty */
+	PVR_DPF_RETURN_VAL( psStream->ui32Read == psStream->ui32Write );
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/debugmisc_server.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/debugmisc_server.c
new file mode 100644
index 0000000..972ac0a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/debugmisc_server.c
@@ -0,0 +1,268 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debugging and miscellaneous functions server implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Kernel services functions for debugging and other
+                miscellaneous functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv.h"
+#include "pvr_debug.h"
+#include "debugmisc_server.h"
+#include "rgxfwutils.h"
+#include "rgxta3d.h"
+#include "pdump_km.h"
+#include "mmu_common.h"
+#include "devicemem_server.h"
+#include "osfunc.h"
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVDebugMiscSLCSetBypassStateKM(
+	CONNECTION_DATA * psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  uiFlags,
+	IMG_BOOL bSetBypassed)
+{
+	RGXFWIF_KCCB_CMD  sSLCBPCtlCmd;
+	PVRSRV_ERROR  eError = PVRSRV_OK;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	
+	sSLCBPCtlCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCBPCTL;
+	sSLCBPCtlCmd.uCmdData.sSLCBPCtlData.bSetBypassed = bSetBypassed;
+	sSLCBPCtlCmd.uCmdData.sSLCBPCtlData.uiFlags = uiFlags;
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+	                            RGXFWIF_DM_GP,
+	                            &sSLCBPCtlCmd,
+	                            sizeof(sSLCBPCtlCmd),
+	                            0,
+	                            PDUMP_FLAGS_CONTINUOUS);
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVDebugMiscSLCSetEnableStateKM: RGXScheduleCommandfailed. Error:%u", eError));
+	}
+	else
+	{
+		/* Wait for the SLC flush to complete */
+		eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"PVRSRVDebugMiscSLCSetEnableStateKM: Waiting for value aborted with error (%u)", eError));
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscQueryFWLogKM(
+	const CONNECTION_DATA *psConnection,
+	const PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32 *pui32RGXFWLogType)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	if (!psDeviceNode || !pui32RGXFWLogType)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevInfo = psDeviceNode->pvDevice;
+
+	if (!psDevInfo || !psDevInfo->psRGXFWIfTraceBuf)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*pui32RGXFWLogType = psDevInfo->psRGXFWIfTraceBuf->ui32LogType;
+	return PVRSRV_OK;
+}
+
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetFWLogKM(
+	const CONNECTION_DATA * psConnection,
+	const PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  ui32RGXFWLogType)
+{
+	RGXFWIF_KCCB_CMD sLogTypeUpdateCmd;
+	PVRSRV_DEV_POWER_STATE ePowerState;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* check log type is valid */
+	if (ui32RGXFWLogType & ~RGXFWIF_LOG_TYPE_MASK)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* set the new log type and ensure the new log type is written to memory
+	 * before requesting the FW to read it
+	 */
+	psDevInfo->psRGXFWIfTraceBuf->ui32LogType = ui32RGXFWLogType;
+	OSMemoryBarrier();
+
+	/* Allocate firmware trace buffer resource(s) if not already done */
+	if (RGXTraceBufferIsInitRequired(psDevInfo))
+	{
+		RGXTraceBufferInitOnDemandResources(psDevInfo);
+	}
+
+	eError = PVRSRVPowerLock((const PPVRSRV_DEVICE_NODE) psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire power lock (%u)", __func__, eError));
+		return eError;
+	}
+
+	eError = PVRSRVGetDevicePowerState((const PPVRSRV_DEVICE_NODE) psDeviceNode, &ePowerState);
+
+	if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF))
+	{
+		/* Ask the FW to update its cached version of logType value */
+		sLogTypeUpdateCmd.eCmdType = RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE;
+
+		eError = RGXSendCommand(psDevInfo,
+		                        RGXFWIF_DM_GP,
+		                        &sLogTypeUpdateCmd,
+		                        sizeof(sLogTypeUpdateCmd),
+		                        PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: RGXSendCommand failed. Error:%u", __func__, eError));
+		}
+		else
+		{
+			/* Give up the power lock as its acquired in RGXWaitForFWOp */
+			PVRSRVPowerUnlock((const PPVRSRV_DEVICE_NODE) psDeviceNode);
+
+			/* Wait for the LogType value to be updated */
+			eError = RGXWaitForFWOp(psDevInfo, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"%s: Waiting for value aborted with error (%u)", __func__, eError));
+			}
+			return eError;
+		}
+	}
+
+	PVRSRVPowerUnlock((const PPVRSRV_DEVICE_NODE) psDeviceNode);
+	return eError;
+}
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetHCSDeadlineKM(
+	CONNECTION_DATA *psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  ui32HCSDeadlineMS)
+{
+	PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	
+	return RGXFWSetHCSDeadline(psDevInfo, ui32HCSDeadlineMS);
+}
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetOSidPriorityKM(
+	CONNECTION_DATA *psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  ui32OSid,
+	IMG_UINT32  ui32OSidPriority)
+{
+	PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	return RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32OSidPriority);
+}
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetOSNewOnlineStateKM(
+	CONNECTION_DATA *psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  ui32OSid,
+	IMG_UINT32  ui32OSNewState)
+{
+	PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	if (ui32OSNewState)
+	{
+		return RGXFWSetVMOnlineState(psDevInfo, ui32OSid, RGXFWIF_OS_ONLINE);
+	}
+
+	return RGXFWSetVMOnlineState(psDevInfo, ui32OSid, RGXFWIF_OS_OFFLINE);
+}
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscDumpFreelistPageListKM(
+	CONNECTION_DATA * psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+	DLLIST_NODE *psNode, *psNext;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	
+	if (dllist_is_empty(&psDevInfo->sFreeListHead))
+	{
+		return PVRSRV_OK;
+	}
+
+	PVR_LOG(("---------------[ Begin Freelist Page List Dump ]------------------"));
+
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext)
+	{
+		RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+		RGXDumpFreeListPageList(psFreeList);
+	}
+	OSLockRelease(psDevInfo->hLockFreeList);
+
+	PVR_LOG(("----------------[ End Freelist Page List Dump ]-------------------"));
+
+	return PVRSRV_OK;
+
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/debugmisc_server.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/debugmisc_server.h
new file mode 100644
index 0000000..af20f3f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/debugmisc_server.h
@@ -0,0 +1,108 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debugging and miscellaneous functions server interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Kernel services functions for debugging and other
+                miscellaneous functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if ! defined(DEBUGMISC_SERVER_H)
+#define DEBUGMISC_SERVER_H
+
+#include <img_defs.h>
+#include <pvrsrv_error.h>
+#include <device.h>
+#include <pmr.h>
+
+#include "connection_server.h"
+
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVDebugMiscSLCSetBypassStateKM(
+	CONNECTION_DATA *psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  uiFlags,
+	IMG_BOOL  bSetBypassed);
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVDebugMiscInitFWImageKM(
+	PMR *psFWImgDestPMR,
+	PMR *psFWImgSrcPMR,
+	IMG_UINT64 ui64FWImgLen,
+	PMR *psFWImgSigPMR,
+	IMG_UINT64 ui64FWSigLen);
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscQueryFWLogKM(
+	const CONNECTION_DATA *psConnection,
+	const PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32 *pui32RGXFWLogType);
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetFWLogKM(
+	const CONNECTION_DATA *psConnection,
+	const PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  ui32RGXFWLogType);
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetHCSDeadlineKM(
+	CONNECTION_DATA *psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  ui32HCSDeadlineMS);
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetOSidPriorityKM(
+	CONNECTION_DATA *psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  ui32OSid,
+	IMG_UINT32  ui32OSidPriority);
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetOSNewOnlineStateKM(
+	CONNECTION_DATA *psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  ui32OSid,
+	IMG_UINT32  ui32OSNewState);
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscDumpFreelistPageListKM(
+	CONNECTION_DATA * psConnection,
+	PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/env/linux/km/rgxfwload.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/env/linux/km/rgxfwload.c
new file mode 100644
index 0000000..bd38231
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/env/linux/km/rgxfwload.c
@@ -0,0 +1,317 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services firmware load and access routines for Linux
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/firmware.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "device.h"
+#include "module_common.h"
+#include "rgxfwload.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+
+struct RGXFW
+{
+	const struct firmware sFW;
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)) && defined(RGX_FW_SIGNED)
+
+/* The Linux kernel does not support the RSA PSS padding mode. It only
+ * supports the legacy PKCS#1 padding mode.
+ */
+#if defined(RGX_FW_PKCS1_PSS_PADDING)
+#error Linux does not support verification of RSA PSS padded signatures
+#endif
+
+#include <crypto/public_key.h>
+#include <crypto/hash_info.h>
+#include <crypto/hash.h>
+
+#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
+
+#include "signfw.h"
+
+static bool VerifyFirmware(const struct firmware *psFW)
+{
+	struct FirmwareSignatureHeader *psHeader;
+	struct public_key_signature *psPKS;
+	unsigned char *szKeyID, *pcKeyID;
+	size_t uDigestSize, uDescSize;
+	void *pvSignature, *pvSigner;
+	struct crypto_shash *psTFM;
+	struct shash_desc *psDesc;
+	uint32_t ui32SignatureLen;
+	bool bVerified = false;
+	key_ref_t hKey;
+	uint8_t i;
+	int res;
+
+	if (psFW->size < FW_SIGN_BACKWARDS_OFFSET)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Firmware is too small (%zu bytes)",
+								__func__, psFW->size));
+		goto err_release_firmware;
+	}
+
+	psHeader = (struct FirmwareSignatureHeader *)
+					(psFW->data + (psFW->size - FW_SIGN_BACKWARDS_OFFSET));
+
+	/* All derived from u8 so can't be exploited to flow out of this page */
+	pvSigner    = (u8 *)psHeader + sizeof(struct FirmwareSignatureHeader);
+	pcKeyID     = (unsigned char *)((u8 *)pvSigner + psHeader->ui8SignerLen);
+	pvSignature = (u8 *)pcKeyID + psHeader->ui8KeyIDLen;
+
+	/* We cannot update KERNEL_RO in-place, so we must copy the len */
+	ui32SignatureLen = ntohl(psHeader->ui32SignatureLen);
+
+	if (psHeader->ui8Algo >= PKEY_ALGO__LAST)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Public key algorithm %u is not supported",
+								__func__, psHeader->ui8Algo));
+		goto err_release_firmware;
+	}
+
+	if (psHeader->ui8HashAlgo >= PKEY_HASH__LAST)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Hash algorithm %u is not supported",
+								__func__, psHeader->ui8HashAlgo));
+		goto err_release_firmware;
+	}
+
+	if (psHeader->ui8IDType != PKEY_ID_X509)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Only asymmetric X.509 PKI certificates "
+								"are supported", __func__));
+		goto err_release_firmware;
+	}
+
+	/* Generate a hash of the fw data (including the padding) */
+
+	psTFM = crypto_alloc_shash(hash_algo_name[psHeader->ui8HashAlgo], 0, 0);
+	if (IS_ERR(psTFM))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: crypto_alloc_shash() failed (%ld)",
+								__func__, PTR_ERR(psTFM)));
+		goto err_release_firmware;
+	}
+
+	uDescSize = crypto_shash_descsize(psTFM) + sizeof(*psDesc);
+	uDigestSize = crypto_shash_digestsize(psTFM);
+
+	psPKS = kzalloc(sizeof(*psPKS) + uDescSize + uDigestSize, GFP_KERNEL);
+	if (!psPKS)
+		goto err_free_crypto_shash;
+
+	psDesc = (struct shash_desc *)((u8 *)psPKS + sizeof(*psPKS));
+	psDesc->tfm = psTFM;
+	psDesc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	psPKS->pkey_algo = psHeader->ui8Algo;
+	psPKS->pkey_hash_algo = psHeader->ui8HashAlgo;
+
+	psPKS->digest = (u8 *)psPKS + sizeof(*psPKS) + uDescSize;
+	psPKS->digest_size = uDigestSize;
+
+	res = crypto_shash_init(psDesc);
+	if (res < 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: crypto_shash_init() failed (%d)",
+								__func__, res));
+		goto err_free_pks;
+	}
+
+	res = crypto_shash_finup(psDesc, psFW->data, psFW->size - FW_SIGN_BACKWARDS_OFFSET,
+							 psPKS->digest);
+	if (res < 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: crypto_shash_finup() failed (%d)",
+								__func__, res));
+		goto err_free_pks;
+	}
+
+	/* Populate the MPI with the signature payload */
+
+	psPKS->nr_mpi = 1;
+	psPKS->rsa.s = mpi_read_raw_data(pvSignature, ui32SignatureLen);
+	if (!psPKS->rsa.s)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: mpi_read_raw_data() failed", __func__));
+		goto err_free_pks;
+	}
+
+	/* Look up the key we'll use to verify this signature */
+
+	szKeyID = kmalloc(psHeader->ui8SignerLen + 2 +
+					  psHeader->ui8KeyIDLen * 2 + 1, GFP_KERNEL);
+	if (!szKeyID)
+		goto err_free_mpi;
+
+	memcpy(szKeyID, pvSigner, psHeader->ui8SignerLen);
+
+	szKeyID[psHeader->ui8SignerLen + 0] = ':';
+	szKeyID[psHeader->ui8SignerLen + 1] = ' ';
+
+	for (i = 0; i < psHeader->ui8KeyIDLen; i++)
+		sprintf(&szKeyID[psHeader->ui8SignerLen + 2 + i * 2],
+				"%02x", pcKeyID[i]);
+
+	szKeyID[psHeader->ui8SignerLen + 2 + psHeader->ui8KeyIDLen * 2] = 0;
+
+	hKey = keyring_search(make_key_ref(system_trusted_keyring, 1),
+						  &key_type_asymmetric, szKeyID);
+	if (IS_ERR(hKey))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Request for unknown key '%s' (%ld)",
+								szKeyID, PTR_ERR(hKey)));
+		goto err_free_keyid_string;
+	}
+
+	res = verify_signature(key_ref_to_ptr(hKey), psPKS);
+	if (res)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Firmware digital signature verification "
+								"failed (%d)", __func__, res));
+		goto err_put_key;
+	}
+
+	PVR_LOG(("Digital signature for '%s' verified successfully.",
+			 RGX_FW_FILENAME));
+	bVerified = true;
+err_put_key:
+	key_put(key_ref_to_ptr(hKey));
+err_free_keyid_string:
+	kfree(szKeyID);
+err_free_mpi:
+	mpi_free(psPKS->rsa.s);
+err_free_pks:
+	kfree(psPKS);
+err_free_crypto_shash:
+	crypto_free_shash(psTFM);
+err_release_firmware:
+	return bVerified;
+}
+
+#else /* defined(RGX_FW_SIGNED) */
+
+static inline bool VerifyFirmware(const struct firmware *psFW)
+{
+	return true;
+}
+
+#endif /* defined(RGX_FW_SIGNED) */
+
+IMG_INTERNAL struct RGXFW *
+RGXLoadFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, const IMG_CHAR *pszBVNCString, const IMG_CHAR *pszBVpNCString)
+{
+	const struct firmware *psFW;
+	int res;
+
+	if(pszBVNCString != NULL)
+	{
+		res = request_firmware(&psFW, pszBVNCString, psDeviceNode->psDevConfig->pvOSDevice);
+		if (res != 0)
+		{
+			if(pszBVpNCString != NULL)
+			{
+				PVR_DPF((PVR_DBG_WARNING, "%s: request_firmware('%s') failed (%d), trying '%s'",
+										__func__, pszBVNCString, res, pszBVpNCString));
+				res = request_firmware(&psFW, pszBVpNCString, psDeviceNode->psDevConfig->pvOSDevice);
+			}
+			if (res != 0)
+			{
+				PVR_DPF((PVR_DBG_WARNING, "%s: request_firmware('%s') failed (%d), trying '%s'",
+										__func__, pszBVpNCString, res, RGX_FW_FILENAME));
+				res = request_firmware(&psFW, RGX_FW_FILENAME, psDeviceNode->psDevConfig->pvOSDevice);
+			}
+		}
+	}
+	else
+	{
+		res = request_firmware(&psFW, RGX_FW_FILENAME, psDeviceNode->psDevConfig->pvOSDevice);
+	}
+	if (res != 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: request_firmware('%s') failed (%d)",
+								__func__, RGX_FW_FILENAME, res));
+		return NULL;
+	}
+
+	if (!VerifyFirmware(psFW))
+	{
+		release_firmware(psFW);
+		return NULL;
+	}
+
+	return (struct RGXFW *)psFW;
+}
+
+IMG_INTERNAL void
+RGXUnloadFirmware(struct RGXFW *psRGXFW)
+{
+	const struct firmware *psFW = &psRGXFW->sFW;
+
+	release_firmware(psFW);
+}
+
+IMG_INTERNAL size_t
+RGXFirmwareSize(struct RGXFW *psRGXFW)
+{
+	const struct firmware *psFW = &psRGXFW->sFW;
+	return psFW->size;
+}
+
+IMG_INTERNAL const void *
+RGXFirmwareData(struct RGXFW *psRGXFW)
+{
+	const struct firmware *psFW = &psRGXFW->sFW;
+
+	return psFW->data;
+}
+
+/******************************************************************************
+ End of file (rgxfwload.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxbreakpoint.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxbreakpoint.c
new file mode 100644
index 0000000..7183c314
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxbreakpoint.c
@@ -0,0 +1,290 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Breakpoint routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Breakpoint routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxbreakpoint.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxmem.h"
+#include "device.h"
+#include "sync_internal.h"
+#include "pdump_km.h"
+#include "pvrsrv.h"
+
+PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA    * psConnection,
+                                      PVRSRV_DEVICE_NODE * psDeviceNode,
+                                      IMG_HANDLE           hMemCtxPrivData,
+                                      RGXFWIF_DM           eFWDataMaster,
+                                      IMG_UINT32           ui32BPAddr,
+                                      IMG_UINT32           ui32HandlerAddr,
+                                      IMG_UINT32           ui32DataMaster)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	DEVMEM_MEMDESC		*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sBPCmd;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	
+	if (psDevInfo->bBPSet == IMG_TRUE)
+		return PVRSRV_ERROR_BP_ALREADY_SET;
+	
+	sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+	sBPCmd.uCmdData.sBPData.ui32BPAddr = ui32BPAddr;
+	sBPCmd.uCmdData.sBPData.ui32HandlerAddr = ui32HandlerAddr;
+	sBPCmd.uCmdData.sBPData.ui32BPDM = ui32DataMaster;
+	sBPCmd.uCmdData.sBPData.bEnable = IMG_TRUE;
+	sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_WRITE;
+
+	RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, 
+				psFWMemContextMemDesc, 
+				0 , 
+				RFW_FWADDR_NOREF_FLAG);
+		
+	eError = RGXScheduleCommand(psDevInfo,
+				eFWDataMaster,
+				&sBPCmd,
+				sizeof(sBPCmd),
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+		return eError;
+	}
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDevInfo, eFWDataMaster, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXSetBreakpointKM: Wait for completion aborted with error (%u)", eError));
+		return eError;
+	}
+
+	psDevInfo->eBPDM = eFWDataMaster;
+	psDevInfo->bBPSet = IMG_TRUE;
+	
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(CONNECTION_DATA    * psConnection,
+                                        PVRSRV_DEVICE_NODE * psDeviceNode,
+                                        IMG_HANDLE           hMemCtxPrivData)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	DEVMEM_MEMDESC		*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sBPCmd;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	
+	sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+	sBPCmd.uCmdData.sBPData.ui32BPAddr = 0;
+	sBPCmd.uCmdData.sBPData.ui32HandlerAddr = 0;
+	sBPCmd.uCmdData.sBPData.bEnable = IMG_FALSE;
+	sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_WRITE | RGXFWIF_BPDATA_FLAGS_CTL;
+	
+	RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, 
+				psFWMemContextMemDesc, 
+				0 , 
+				RFW_FWADDR_NOREF_FLAG);
+
+	eError = RGXScheduleCommand(psDevInfo,
+				psDevInfo->eBPDM,
+				&sBPCmd,
+				sizeof(sBPCmd),
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+		return eError;
+	}
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDevInfo, psDevInfo->eBPDM, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXClearBreakpointKM: Wait for completion aborted with error (%u)", eError));
+		return eError;
+	}
+
+	psDevInfo->bBPSet = IMG_FALSE;
+	
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(CONNECTION_DATA    * psConnection,
+                                         PVRSRV_DEVICE_NODE * psDeviceNode,
+                                         IMG_HANDLE           hMemCtxPrivData)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	DEVMEM_MEMDESC		*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sBPCmd;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	if (psDevInfo->bBPSet == IMG_FALSE)
+		return PVRSRV_ERROR_BP_NOT_SET;
+	
+	sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+	sBPCmd.uCmdData.sBPData.bEnable = IMG_TRUE;
+	sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_CTL;
+	
+	RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, 
+				psFWMemContextMemDesc, 
+				0 , 
+				RFW_FWADDR_NOREF_FLAG);
+
+	eError = RGXScheduleCommand(psDevInfo,
+				psDevInfo->eBPDM,
+				&sBPCmd,
+				sizeof(sBPCmd),
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXEnableBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+		return eError;
+	}
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDevInfo, psDevInfo->eBPDM, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXEnableBreakpointKM: Wait for completion aborted with error (%u)", eError));
+		return eError;
+	}
+	
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(CONNECTION_DATA    * psConnection,
+                                          PVRSRV_DEVICE_NODE * psDeviceNode,
+                                          IMG_HANDLE           hMemCtxPrivData)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	DEVMEM_MEMDESC		*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sBPCmd;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	
+	if (psDevInfo->bBPSet == IMG_FALSE)
+		return PVRSRV_ERROR_BP_NOT_SET;
+	
+	sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+	sBPCmd.uCmdData.sBPData.bEnable = IMG_FALSE;
+	sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_CTL;
+	
+	RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, 
+				psFWMemContextMemDesc, 
+				0 , 
+				RFW_FWADDR_NOREF_FLAG);
+	
+	eError = RGXScheduleCommand(psDevInfo,
+				psDevInfo->eBPDM,
+				&sBPCmd,
+				sizeof(sBPCmd),
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDisableBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+		return eError;
+	}
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDevInfo, psDevInfo->eBPDM, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXDisableBreakpointKM: Wait for completion aborted with error (%u)", eError));
+		return eError;
+	}
+				
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(CONNECTION_DATA    * psConnection,
+                                                PVRSRV_DEVICE_NODE * psDeviceNode,
+                                                IMG_UINT32           ui32TempRegs,
+                                                IMG_UINT32           ui32SharedRegs)
+{
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sBPCmd;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	
+	sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+	sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_REGS;
+	sBPCmd.uCmdData.sBPData.ui32TempRegs = ui32TempRegs;
+	sBPCmd.uCmdData.sBPData.ui32SharedRegs = ui32SharedRegs;
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sBPCmd,
+				sizeof(sBPCmd),
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXOverallocateBPRegistersKM: RGXScheduleCommand failed. Error:%u", eError));
+		return eError;
+	}
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXOverallocateBPRegistersKM: Wait for completion aborted with error (%u)", eError));
+		return eError;
+	}
+
+	return eError;
+}
+
+
+/******************************************************************************
+ End of file (rgxbreakpoint.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxbreakpoint.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxbreakpoint.h
new file mode 100644
index 0000000..fc66568
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxbreakpoint.h
@@ -0,0 +1,141 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX breakpoint functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX breakpoint functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXBREAKPOINT_H__)
+#define __RGXBREAKPOINT_H__
+
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_km.h"
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXSetBreakpointKM
+
+ @Description
+	Server-side implementation of RGXSetBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input eDataMaster - Data Master to schedule command for
+ @Input hMemCtxPrivData - memory context private data
+ @Input ui32BPAddr - Address of breakpoint
+ @Input ui32HandlerAddr - Address of breakpoint handler
+ @Input ui32BPCtl - Breakpoint controls
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA    * psConnection,
+                                      PVRSRV_DEVICE_NODE * psDeviceNode,
+                                      IMG_HANDLE           hMemCtxPrivData,
+                                      RGXFWIF_DM           eFWDataMaster,
+                                      IMG_UINT32           ui32BPAddr,
+                                      IMG_UINT32           ui32HandlerAddr,
+                                      IMG_UINT32           ui32DataMaster);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXClearBreakpointKM
+
+ @Description
+	Server-side implementation of RGXClearBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input hMemCtxPrivData - memory context private data
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(CONNECTION_DATA    * psConnection,
+                                        PVRSRV_DEVICE_NODE * psDeviceNode,
+                                        IMG_HANDLE           hMemCtxPrivData);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXEnableBreakpointKM
+
+ @Description
+	Server-side implementation of RGXEnableBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input hMemCtxPrivData - memory context private data
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(CONNECTION_DATA    * psConnection,
+                                         PVRSRV_DEVICE_NODE * psDeviceNode,
+                                         IMG_HANDLE           hMemCtxPrivData);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXDisableBreakpointKM
+
+ @Description
+	Server-side implementation of RGXDisableBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input hMemCtxPrivData - memory context private data
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(CONNECTION_DATA    * psConnection,
+                                          PVRSRV_DEVICE_NODE * psDeviceNode,
+                                          IMG_HANDLE           hMemCtxPrivData);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXOverallocateBPRegistersKM
+
+ @Description
+	Server-side implementation of RGXOverallocateBPRegisters
+
+ @Input psDeviceNode - RGX Device node
+ @Input ui32TempRegs - Number of temporary registers to overallocate
+ @Input ui32SharedRegs - Number of shared registers to overallocate
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(CONNECTION_DATA    * psConnection,
+                                                PVRSRV_DEVICE_NODE * psDeviceNode,
+                                                IMG_UINT32           ui32TempRegs,
+                                                IMG_UINT32           ui32SharedRegs);
+#endif /* __RGXBREAKPOINT_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxccb.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxccb.c
new file mode 100644
index 0000000..a4066e1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxccb.c
@@ -0,0 +1,2042 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX CCB routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX CCB routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debug.h"
+#include "rgxdevice.h"
+#include "pdump_km.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "rgxfwutils.h"
+#include "osfunc.h"
+#include "rgxccb.h"
+#include "rgx_memallocflags.h"
+#include "devicemem_pdump.h"
+#include "dllist.h"
+#include "rgx_fwif_shared.h"
+#include "rgxtimerquery.h"
+#if defined(LINUX)
+#include "trace_events.h"
+#endif
+#include "sync_checkpoint_external.h"
+#include "sync_checkpoint.h"
+#include "rgxutils.h"
+
+/*
+*  Defines the number of fence updates to record so that future fences in the CCB
+*  can be checked to see if they are already known to be satisfied.
+*/
+#define RGX_CCCB_FENCE_UPDATE_LIST_SIZE  (32)
+
+#define RGX_UFO_PTR_ADDR(ufoptr)			(((ufoptr)->puiAddrUFO.ui32Addr) & 0xFFFFFFFC)
+#define RGX_UFO_IS_SYNC_CHECKPOINT(ufoptr)	(((ufoptr)->puiAddrUFO.ui32Addr) & 0x1)
+
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+
+#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD 0x1
+#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED 0x2
+
+typedef struct _RGX_CLIENT_CCB_UTILISATION_
+{
+	/* the threshold in bytes.
+	 * when the CCB utilisation hits the threshold then we will print
+	 * a warning message.
+	 */
+	IMG_UINT32 ui32ThresholdBytes;
+	/* Maximum cCCB usage at some point in time */
+	IMG_UINT32 ui32HighWaterMark;
+	/* keep track of the warnings already printed.
+	 * bit mask of PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_xyz
+	 */
+	IMG_UINT32 ui32Warnings;
+} RGX_CLIENT_CCB_UTILISATION;
+
+#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */
+
+struct _RGX_CLIENT_CCB_ {
+	volatile RGXFWIF_CCCB_CTL	*psClientCCBCtrl;				/*!< CPU mapping of the CCB control structure used by the fw */
+	IMG_UINT8					*pui8ClientCCB;					/*!< CPU mapping of the CCB */
+	DEVMEM_MEMDESC 				*psClientCCBMemDesc;			/*!< MemDesc for the CCB */
+	DEVMEM_MEMDESC 				*psClientCCBCtrlMemDesc;		/*!< MemDesc for the CCB control */
+	IMG_UINT32					ui32HostWriteOffset;			/*!< CCB write offset from the driver side */
+	IMG_UINT32					ui32LastPDumpWriteOffset;		/*!< CCB write offset from the last time we submitted a command in capture range */
+	IMG_UINT32					ui32LastROff;					/*!< Last CCB Read offset to help detect any CCB wedge */
+	IMG_UINT32					ui32LastWOff;					/*!< Last CCB Write offset to help detect any CCB wedge */
+	IMG_UINT32					ui32ByteCount;					/*!< Count of the number of bytes written to CCCB */
+	IMG_UINT32					ui32LastByteCount;				/*!< Last value of ui32ByteCount to help detect any CCB wedge */
+	IMG_UINT32					ui32Size;						/*!< Size of the CCB */
+	DLLIST_NODE					sNode;							/*!< Node used to store this CCB on the per connection list */
+	PDUMP_CONNECTION_DATA		*psPDumpConnectionData;			/*!< Pointer to the per connection data in which we reside */
+	void						*hTransition;					/*!< Handle for Transition callback */
+	IMG_CHAR					szName[MAX_CLIENT_CCB_NAME];	/*!< Name of this client CCB */
+	RGX_SERVER_COMMON_CONTEXT   *psServerCommonContext;     	/*!< Parent server common context that this CCB belongs to */
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+	RGX_CCB_REQUESTOR_TYPE				eRGXCCBRequestor;
+	RGX_CLIENT_CCB_UTILISATION		sUtilisation;				/*!< CCB utilisation data */
+#endif
+#if defined(DEBUG)
+	IMG_UINT32					ui32UpdateEntries;				/*!< Number of Fence Updates in asFenceUpdateList */
+	RGXFWIF_UFO					asFenceUpdateList[RGX_CCCB_FENCE_UPDATE_LIST_SIZE];  /*!< List of recent updates written in this CCB */
+#endif
+};
+
+
+/* Forms a table, with array of strings for each requestor type (listed in RGX_CCB_REQUESTORS X macro), to be used for
+   DevMemAllocation comments and PDump comments. Each tuple in the table consists of 3 strings:
+	{ "FwClientCCB:" <requestor_name>, "FwClientCCBControl:" <requestor_name>, <requestor_name> },
+   The first string being used as comment when allocating ClientCCB for the given requestor, the second for CCBControl
+   structure, and the 3rd one for use in PDUMP comments. The number of tuples in the table must adhere to the following
+   build assert. */
+IMG_CHAR *const aszCCBRequestors[][3] =
+{
+#define REQUESTOR_STRING(prefix,req) #prefix ":" #req
+#define FORM_REQUESTOR_TUPLE(req) { REQUESTOR_STRING(FwClientCCB,req), REQUESTOR_STRING(FwClientCCBControl,req), #req },
+	RGX_CCB_REQUESTORS(FORM_REQUESTOR_TUPLE)
+#undef FORM_REQUESTOR_TUPLE
+};
+/* The number of tuples in the above table is always equal to those provided in the RGX_CCB_REQUESTORS X macro list.
+   In an event of change in value of DPX_MAX_RAY_CONTEXTS to say 'n', appropriate entry/entries up to FC[n-1] must be added to
+   the RGX_CCB_REQUESTORS list. */
+static_assert((sizeof(aszCCBRequestors)/(3*sizeof(aszCCBRequestors[0][0]))) == (REQ_TYPE_FIXED_COUNT + DPX_MAX_RAY_CONTEXTS + 1),
+			  "Mismatch between aszCCBRequestors table and DPX_MAX_RAY_CONTEXTS");
+
+IMG_EXPORT PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB,
+						IMG_UINT32 ui32PDumpFlags)
+{
+
+	PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+						  "cCCB(%s@%p): Draining CCB rgxfw_roff == woff (%d)",
+						  psClientCCB->szName,
+						  psClientCCB,
+						  psClientCCB->ui32LastPDumpWriteOffset);
+
+	return DevmemPDumpDevmemPol32(psClientCCB->psClientCCBCtrlMemDesc,
+									offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+									psClientCCB->ui32LastPDumpWriteOffset,
+									0xffffffff,
+									PDUMP_POLL_OPERATOR_EQUAL,
+									ui32PDumpFlags);
+}
+
+static PVRSRV_ERROR _RGXCCBPDumpTransition(void **pvData, IMG_BOOL bInto, IMG_UINT32 ui32PDumpFlags)
+{
+	RGX_CLIENT_CCB *psClientCCB = (RGX_CLIENT_CCB *) pvData;
+	
+	/*
+		We're about to Transition into capture range and we've submitted
+		new commands since the last time we entered capture range so drain
+		the CCB as required
+	*/
+	if (bInto)
+	{
+		volatile RGXFWIF_CCCB_CTL *psCCBCtl = psClientCCB->psClientCCBCtrl;
+		PVRSRV_ERROR eError;
+
+		/*
+			Wait for the FW to catch up (retry will get pushed back out services
+			client where we wait on the event object and try again later)
+		*/
+		if (psClientCCB->psClientCCBCtrl->ui32ReadOffset != psClientCCB->ui32HostWriteOffset)
+		{
+			return PVRSRV_ERROR_RETRY;
+		}
+
+		/*
+			We drain whenever capture range is entered. Even if no commands
+			have been issued while where out of capture range we have to wait for
+			operations that we might have issued in the last capture range
+			to finish so the sync prim update that will happen after all the
+			PDumpTransition callbacks have been called doesn't clobber syncs
+			which the FW is currently working on.
+			Although this is suboptimal, while out of capture range for every
+			persistent operation we serialise the PDump script processing and
+			the FW, there is no easy solution.
+			Not all modules that work on syncs register a PDumpTransition and
+			thus we have no way of knowing if we can skip drain and the sync
+			prim dump or not.
+		*/
+
+		eError = RGXCCBPDumpDrainCCB(psClientCCB, ui32PDumpFlags);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "_RGXCCBPDumpTransition: problem pdumping POL for cCCBCtl (%d)", eError));
+		}
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		/*
+			If new command(s) have been written out of capture range then we
+			need to fast forward past uncaptured operations.
+		*/
+		if (psClientCCB->ui32LastPDumpWriteOffset != psClientCCB->ui32HostWriteOffset)
+		{
+			/*
+				There are commands that where not captured so after the
+				simulation drain (above) we also need to fast-forward pass
+				those commands so the FW can start with the 1st command
+				which is in the new capture range
+			 */
+			psCCBCtl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset;
+			psCCBCtl->ui32DepOffset = psClientCCB->ui32HostWriteOffset;
+			psCCBCtl->ui32WriteOffset = psClientCCB->ui32HostWriteOffset;
+	
+			PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+								  "cCCB(%s@%p): Fast-forward from %d to %d",
+								  psClientCCB->szName,
+								  psClientCCB,
+								  psClientCCB->ui32LastPDumpWriteOffset,
+								  psClientCCB->ui32HostWriteOffset);
+	
+			DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc,
+							   0,
+							   sizeof(RGXFWIF_CCCB_CTL),
+							   ui32PDumpFlags);
+			
+			/*
+				Although we've entered capture range we might not do any work
+				on this CCB so update the ui32LastPDumpWriteOffset to reflect
+				where we got to for next so we start the drain from where we
+				got to last time
+			*/
+			psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset;
+		}
+	}
+	return PVRSRV_OK;
+}
+
+#if defined (PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+
+static INLINE void _RGXInitCCBUtilisation(RGX_CLIENT_CCB *psClientCCB)
+{
+	psClientCCB->sUtilisation.ui32HighWaterMark = 0; /* initialize ui32HighWaterMark level to zero */
+	psClientCCB->sUtilisation.ui32ThresholdBytes = (psClientCCB->ui32Size *
+							PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD)	/ 100;
+	psClientCCB->sUtilisation.ui32Warnings = 0;
+}
+
+static INLINE void _RGXPrintCCBUtilisationWarning(RGX_CLIENT_CCB *psClientCCB,
+									IMG_UINT32 ui32WarningType,
+									IMG_UINT32 ui32CmdSize)
+{
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE)
+	if(ui32WarningType == PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED)
+	{
+		PVR_LOG(("Failed to acquire CCB space for %u byte command:", ui32CmdSize));
+	}
+
+	PVR_LOG(("%s: Client CCB (%s) watermark (%u) hit %d%% of its allocation size (%u)",
+								__FUNCTION__,
+								psClientCCB->szName,
+								psClientCCB->sUtilisation.ui32HighWaterMark,
+								psClientCCB->sUtilisation.ui32HighWaterMark * 100 / psClientCCB->ui32Size,
+								psClientCCB->ui32Size));
+#else
+	PVR_UNREFERENCED_PARAMETER(ui32WarningType);
+	PVR_UNREFERENCED_PARAMETER(ui32CmdSize);
+
+	PVR_LOG(("GPU %s command buffer usage high (%u). This is not an error but the application may not run optimally.",
+							aszCCBRequestors[psClientCCB->eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+							psClientCCB->sUtilisation.ui32HighWaterMark * 100 / psClientCCB->ui32Size));
+#endif
+}
+
+static INLINE void _RGXCCBUtilisationEvent(RGX_CLIENT_CCB *psClientCCB,
+						IMG_UINT32 ui32WarningType,
+						IMG_UINT32 ui32CmdSize)
+{
+	/* in VERBOSE mode we will print a message for each different
+	 * event type as they happen.
+	 * but by default we will only issue one message
+	 */
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE)
+	if(!(psClientCCB->sUtilisation.ui32Warnings & ui32WarningType))
+#else
+	if(!psClientCCB->sUtilisation.ui32Warnings)
+#endif
+	{
+		_RGXPrintCCBUtilisationWarning(psClientCCB,
+						ui32WarningType,
+						ui32CmdSize);
+		/* record that we have issued a warning of this type */
+		psClientCCB->sUtilisation.ui32Warnings |= ui32WarningType;
+	}
+}
+
+/* Check the current CCB utilisation. Print a one-time warning message if it is above the
+ * specified threshold
+ */
+static INLINE void _RGXCheckCCBUtilisation(RGX_CLIENT_CCB *psClientCCB)
+{
+	/* Print a warning message if the cCCB watermark is above the threshold value */
+	if(psClientCCB->sUtilisation.ui32HighWaterMark >= psClientCCB->sUtilisation.ui32ThresholdBytes)
+	{
+		_RGXCCBUtilisationEvent(psClientCCB,
+					PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD,
+					0);
+	}
+}
+
+/* Update the cCCB high watermark level if necessary */
+static void _RGXUpdateCCBUtilisation(RGX_CLIENT_CCB *psClientCCB)
+{
+	IMG_UINT32 ui32FreeSpace, ui32MemCurrentUsage;
+
+	ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+									  psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+									  psClientCCB->ui32Size);
+	ui32MemCurrentUsage = psClientCCB->ui32Size - ui32FreeSpace;
+
+	if (ui32MemCurrentUsage > psClientCCB->sUtilisation.ui32HighWaterMark)
+	{
+		psClientCCB->sUtilisation.ui32HighWaterMark = ui32MemCurrentUsage;
+
+		/* The high water mark has increased. Check if it is above the
+		 * threshold so we can print a warning if necessary.
+		 */
+		_RGXCheckCCBUtilisation(psClientCCB);
+	}
+}
+
+#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */
+
+PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO	*psDevInfo,
+						  IMG_UINT32			ui32CCBSizeLog2,
+						  CONNECTION_DATA		*psConnectionData,
+						  RGX_CCB_REQUESTOR_TYPE		eRGXCCBRequestor,
+						  RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+						  RGX_CLIENT_CCB		**ppsClientCCB,
+						  DEVMEM_MEMDESC 		**ppsClientCCBMemDesc,
+						  DEVMEM_MEMDESC 		**ppsClientCCBCtrlMemDesc)
+{
+	PVRSRV_ERROR	eError;
+	DEVMEM_FLAGS_T	uiClientCCBMemAllocFlags, uiClientCCBCtlMemAllocFlags;
+	IMG_UINT32		ui32AllocSize = (1U << ui32CCBSizeLog2);
+	RGX_CLIENT_CCB	*psClientCCB;
+
+	/* All client CCBs should be at-least of the "minimum" size declared by the API */
+	PVR_ASSERT (ui32CCBSizeLog2 >= MIN_SAFE_CCB_SIZE_LOG2);
+
+	psClientCCB = OSAllocMem(sizeof(*psClientCCB));
+	if (psClientCCB == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+	psClientCCB->psServerCommonContext = psServerCommonContext;
+
+	uiClientCCBMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+								PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+								PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+								PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+								PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+								PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+								PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+	uiClientCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+								PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+								PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+								PVRSRV_MEMALLOCFLAG_UNCACHED |
+								PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+								PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+	PDUMPCOMMENT("Allocate RGXFW cCCB");
+	eError = DevmemFwAllocate(psDevInfo,
+										ui32AllocSize,
+										uiClientCCBMemAllocFlags,
+										aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING],
+										&psClientCCB->psClientCCBMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to allocate RGX client CCB (%s)",
+				PVRSRVGetErrorStringKM(eError)));
+		goto fail_alloc_ccb;
+	}
+
+
+	eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc,
+									  (void **) &psClientCCB->pui8ClientCCB);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to map RGX client CCB (%s)",
+				PVRSRVGetErrorStringKM(eError)));
+		goto fail_map_ccb;
+	}
+
+	PDUMPCOMMENT("Allocate RGXFW cCCB control");
+	eError = DevmemFwAllocate(psDevInfo,
+										sizeof(RGXFWIF_CCCB_CTL),
+										uiClientCCBCtlMemAllocFlags,
+										aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING],
+										&psClientCCB->psClientCCBCtrlMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to allocate RGX client CCB control (%s)",
+				PVRSRVGetErrorStringKM(eError)));
+		goto fail_alloc_ccbctrl;
+	}
+
+
+	eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc,
+									  (void **) &psClientCCB->psClientCCBCtrl);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to map RGX client CCB (%s)",
+				PVRSRVGetErrorStringKM(eError)));
+		goto fail_map_ccbctrl;
+	}
+
+	psClientCCB->psClientCCBCtrl->ui32WriteOffset = 0;
+	psClientCCB->psClientCCBCtrl->ui32ReadOffset = 0;
+	psClientCCB->psClientCCBCtrl->ui32DepOffset = 0;
+	psClientCCB->psClientCCBCtrl->ui32WrapMask = ui32AllocSize - 1;
+	OSSNPrintf(psClientCCB->szName, MAX_CLIENT_CCB_NAME, "%s-P%lu-T%lu-%s",
+									aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+									(unsigned long) OSGetCurrentClientProcessIDKM(),
+									(unsigned long) OSGetCurrentClientThreadIDKM(),
+									OSGetCurrentClientProcessNameKM());
+
+	PDUMPCOMMENT("cCCB control");
+	DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc,
+					   0,
+					   sizeof(RGXFWIF_CCCB_CTL),
+					   PDUMP_FLAGS_CONTINUOUS);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	psClientCCB->ui32HostWriteOffset = 0;
+	psClientCCB->ui32LastPDumpWriteOffset = 0;
+	psClientCCB->ui32Size = ui32AllocSize;
+	psClientCCB->ui32LastROff = ui32AllocSize - 1;
+	psClientCCB->ui32ByteCount = 0;
+	psClientCCB->ui32LastByteCount = 0;
+
+#if defined(DEBUG)
+	psClientCCB->ui32UpdateEntries = 0;
+#endif
+
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+	_RGXInitCCBUtilisation(psClientCCB);
+	psClientCCB->eRGXCCBRequestor = eRGXCCBRequestor;
+#endif
+	eError = PDumpRegisterTransitionCallback(psConnectionData->psPDumpConnectionData,
+											  _RGXCCBPDumpTransition,
+											  psClientCCB,
+											  &psClientCCB->hTransition);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_pdumpreg;
+	}
+
+	/*
+	 * Note:
+	 * Save the PDump specific structure, which is ref counted unlike
+	 * the connection data, to ensure it's not freed too early
+	 */
+	psClientCCB->psPDumpConnectionData = psConnectionData->psPDumpConnectionData;
+	PDUMPCOMMENT("New RGXFW cCCB(%s@%p) created",
+				 psClientCCB->szName,
+				 psClientCCB);
+
+	*ppsClientCCB = psClientCCB;
+	*ppsClientCCBMemDesc = psClientCCB->psClientCCBMemDesc;
+	*ppsClientCCBCtrlMemDesc = psClientCCB->psClientCCBCtrlMemDesc;
+	return PVRSRV_OK;
+
+fail_pdumpreg:
+	DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc);
+fail_map_ccbctrl:
+	DevmemFwFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc);
+fail_alloc_ccbctrl:
+	DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc);
+fail_map_ccb:
+	DevmemFwFree(psDevInfo, psClientCCB->psClientCCBMemDesc);
+fail_alloc_ccb:
+	OSFreeMem(psClientCCB);
+fail_alloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB)
+{
+	PDumpUnregisterTransitionCallback(psClientCCB->hTransition);
+	DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc);
+	DevmemFwFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc);
+	DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc);
+	DevmemFwFree(psDevInfo, psClientCCB->psClientCCBMemDesc);
+	OSFreeMem(psClientCCB);
+}
+
+
+/******************************************************************************
+ FUNCTION	: RGXAcquireCCB
+
+ PURPOSE	: Obtains access to write some commands to a CCB
+
+ PARAMETERS	: psClientCCB		- The client CCB
+			  ui32CmdSize		- How much space is required
+			  ppvBufferSpace	- Pointer to space in the buffer
+			  ui32PDumpFlags - Should this be PDump continuous?
+
+ RETURNS	: PVRSRV_ERROR
+******************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB,
+										IMG_UINT32		ui32CmdSize,
+										void			**ppvBufferSpace,
+										IMG_UINT32		ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	IMG_BOOL	bInCaptureRange;
+	IMG_BOOL	bPdumpEnabled;
+
+	PDumpIsCaptureFrameKM(&bInCaptureRange);
+	bPdumpEnabled = (bInCaptureRange || PDUMP_IS_CONTINUOUS(ui32PDumpFlags));
+
+	/*
+		PDumpSetFrame will detect as we Transition into capture range for
+		frame based data but if we are PDumping continuous data then we
+		need to inform the PDump layer ourselves
+	*/
+	if (PDUMP_IS_CONTINUOUS(ui32PDumpFlags) && !bInCaptureRange)
+	{
+		eError = PDumpTransition(psClientCCB->psPDumpConnectionData, IMG_TRUE, PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	/* Check that the CCB can hold this command + padding */
+	if ((ui32CmdSize + PADDING_COMMAND_SIZE + 1) > psClientCCB->ui32Size)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Command size (%d bytes) too big for CCB (%d bytes)",
+								ui32CmdSize, psClientCCB->ui32Size));
+		return PVRSRV_ERROR_CMD_TOO_BIG;
+	}
+
+	/*
+		Check we don't overflow the end of the buffer and make sure we have
+		enough space for the padding command. We don't have enough space (including the
+		minimum amount for the padding command) we will need to make sure we insert a
+		padding command now and wrap before adding the main command.
+	*/
+	if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) <= psClientCCB->ui32Size)
+	{
+		/*
+			The command can fit without wrapping...
+		*/
+		IMG_UINT32 ui32FreeSpace;
+
+#if defined(PDUMP)
+		/* Wait for sufficient CCB space to become available */
+		PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+								ui32CmdSize, psClientCCB->ui32HostWriteOffset,
+								psClientCCB->szName);
+		DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+					   offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+					   psClientCCB->ui32HostWriteOffset,
+					   ui32CmdSize,
+					   psClientCCB->ui32Size);
+#endif
+
+		ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+									  psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+									  psClientCCB->ui32Size);
+
+		/* Don't allow all the space to be used */
+		if (ui32FreeSpace > ui32CmdSize)
+		{
+			*ppvBufferSpace = (void *) (psClientCCB->pui8ClientCCB +
+										psClientCCB->ui32HostWriteOffset);
+			return PVRSRV_OK;
+		}
+
+		goto e_retry;
+	}
+	else
+	{
+		/*
+			We're at the end of the buffer without enough contiguous space.
+			The command cannot fit without wrapping, we need to insert a
+			padding command and wrap. We need to do this in one go otherwise
+			we would be leaving unflushed commands and forcing the client to
+			deal with flushing the padding command but not the command they
+			wanted to write. Therefore we either do all or nothing.
+		*/
+		RGXFWIF_CCB_CMD_HEADER *psHeader;
+		IMG_UINT32 ui32FreeSpace;
+		IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset;
+
+#if defined(PDUMP)
+		/* Wait for sufficient CCB space to become available */
+		PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+								ui32Remain, psClientCCB->ui32HostWriteOffset,
+								psClientCCB->szName);
+		DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+					   offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+					   psClientCCB->ui32HostWriteOffset,
+					   ui32Remain,
+					   psClientCCB->ui32Size);
+		PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+								ui32CmdSize, 0 /*ui32HostWriteOffset after wrap */,
+								psClientCCB->szName);
+		DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+					   offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+					   0 /*ui32HostWriteOffset after wrap */,
+					   ui32CmdSize,
+					   psClientCCB->ui32Size);
+#endif
+
+		ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+									  psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+									  psClientCCB->ui32Size);
+
+		/* Don't allow all the space to be used */
+		if (ui32FreeSpace > ui32Remain + ui32CmdSize)
+		{
+			psHeader = (void *) (psClientCCB->pui8ClientCCB + psClientCCB->ui32HostWriteOffset);
+			psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING;
+			psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+			PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize);
+			if (bPdumpEnabled)
+			{
+				DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc,
+								   psClientCCB->ui32HostWriteOffset,
+								   ui32Remain,
+								   ui32PDumpFlags);
+			}
+			
+			*ppvBufferSpace = (void *) (psClientCCB->pui8ClientCCB +
+										0 /*ui32HostWriteOffset after wrap */);
+			return PVRSRV_OK;
+		}
+
+		goto e_retry;
+	}
+e_retry:
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+	_RGXCCBUtilisationEvent(psClientCCB,
+				PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED,
+				ui32CmdSize);
+#endif  /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */
+	return PVRSRV_ERROR_RETRY;
+}
+
+/******************************************************************************
+ FUNCTION	: RGXReleaseCCB
+
+ PURPOSE	: Release a CCB that we have been writing to.
+
+ PARAMETERS	: psDevData			- device data
+  			  psCCB				- the CCB
+
+ RETURNS	: None
+******************************************************************************/
+IMG_INTERNAL void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB,
+								IMG_UINT32		ui32CmdSize,
+								IMG_UINT32		ui32PDumpFlags)
+{
+	IMG_BOOL	bInCaptureRange;
+	IMG_BOOL	bPdumpEnabled;
+
+	PDumpIsCaptureFrameKM(&bInCaptureRange);
+	bPdumpEnabled = (bInCaptureRange || PDUMP_IS_CONTINUOUS(ui32PDumpFlags));
+	
+	/*
+	 *  If a padding command was needed then we should now move ui32HostWriteOffset
+	 *  forward. The command has already be dumped (if bPdumpEnabled).
+	 */
+	if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) > psClientCCB->ui32Size)
+	{
+		IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset;
+
+		UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset,
+						  ui32Remain,
+						  psClientCCB->ui32Size);
+		psClientCCB->ui32ByteCount += ui32Remain;
+	}
+
+	/* Dump the CCB data */
+	if (bPdumpEnabled)
+	{
+		DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc,
+						   psClientCCB->ui32HostWriteOffset,
+						   ui32CmdSize,
+						   ui32PDumpFlags);
+	}
+	
+	/*
+	 *  Check if there any fences being written that will already be
+	 *  satisfied by the last written update command in this CCB. At the
+	 *  same time we can ASSERT that all sync addresses are not NULL.
+	 */
+#if defined(DEBUG)
+	{
+		IMG_UINT8  *pui8BufferStart = (void *)((uintptr_t)psClientCCB->pui8ClientCCB + psClientCCB->ui32HostWriteOffset);
+		IMG_UINT8  *pui8BufferEnd   = (void *)((uintptr_t)psClientCCB->pui8ClientCCB + psClientCCB->ui32HostWriteOffset + ui32CmdSize);
+		IMG_BOOL   bMessagePrinted  = IMG_FALSE;
+
+		/* Walk through the commands in this section of CCB being released... */
+		while (pui8BufferStart < pui8BufferEnd)
+		{
+			RGXFWIF_CCB_CMD_HEADER  *psCmdHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8BufferStart;
+
+			if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UPDATE)
+			{
+				/* If an UPDATE then record the values incase an adjacent fence uses it. */
+				IMG_UINT32   ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+				RGXFWIF_UFO  *psUFOPtr   = (RGXFWIF_UFO*)(pui8BufferStart + sizeof(RGXFWIF_CCB_CMD_HEADER));
+				
+				psClientCCB->ui32UpdateEntries = 0;
+				while (ui32NumUFOs-- > 0)
+				{
+					PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0);
+					if (psClientCCB->ui32UpdateEntries < RGX_CCCB_FENCE_UPDATE_LIST_SIZE)
+					{
+						psClientCCB->asFenceUpdateList[psClientCCB->ui32UpdateEntries++] = *psUFOPtr++;
+					}
+				}
+			}
+			else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE)
+			{
+				/* If a FENCE then check the values against the last UPDATE issued. */
+				IMG_UINT32   ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+				RGXFWIF_UFO  *psUFOPtr   = (RGXFWIF_UFO*)(pui8BufferStart + sizeof(RGXFWIF_CCB_CMD_HEADER));
+				
+				while (ui32NumUFOs-- > 0)
+				{
+					PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0);
+
+					if (bMessagePrinted == IMG_FALSE)
+					{
+						RGXFWIF_UFO  *psUpdatePtr = psClientCCB->asFenceUpdateList;
+						IMG_UINT32  ui32UpdateIndex;
+
+						for (ui32UpdateIndex = 0;  ui32UpdateIndex < psClientCCB->ui32UpdateEntries;  ui32UpdateIndex++)
+						{
+							if (RGX_UFO_IS_SYNC_CHECKPOINT(psUFOPtr))
+							{
+								if (RGX_UFO_PTR_ADDR(psUFOPtr) == RGX_UFO_PTR_ADDR(psUpdatePtr))
+								{
+									PVR_DPF((PVR_DBG_MESSAGE, "Redundant sync checkpoint check found in cCCB(%p) - 0x%x -> 0x%x",
+											psClientCCB, RGX_UFO_PTR_ADDR(psUFOPtr), psUFOPtr->ui32Value));
+									bMessagePrinted = IMG_TRUE;
+									break;
+								}
+							}
+							else
+							{
+								if (psUFOPtr->puiAddrUFO.ui32Addr == psUpdatePtr->puiAddrUFO.ui32Addr  &&
+									psUFOPtr->ui32Value == psUpdatePtr->ui32Value)
+								{
+									PVR_DPF((PVR_DBG_MESSAGE, "Redundant fence check found in cCCB(%p) - 0x%x -> 0x%x",
+											psClientCCB, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
+									bMessagePrinted = IMG_TRUE;
+									break;
+								}
+							}
+							psUpdatePtr++;
+						}
+					}
+
+					psUFOPtr++;
+				}
+			}
+			else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR  ||
+					 psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE)
+			{
+				/* For all other UFO ops check the UFO address is not NULL. */
+				IMG_UINT32   ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+				RGXFWIF_UFO  *psUFOPtr   = (RGXFWIF_UFO*)(pui8BufferStart + sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+				while (ui32NumUFOs-- > 0)
+				{
+					PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0);
+					psUFOPtr++;
+				}
+			}
+
+			/* Move to the next command in this section of CCB being released... */
+			pui8BufferStart += sizeof(RGXFWIF_CCB_CMD_HEADER) + psCmdHeader->ui32CmdSize;
+		}
+	}
+#endif /* REDUNDANT_SYNCS_DEBUG */
+
+	/*
+	 * Update the CCB write offset.
+	 */
+	UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset,
+					  ui32CmdSize,
+					  psClientCCB->ui32Size);
+	psClientCCB->ui32ByteCount += ui32CmdSize;
+
+#if defined (PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+	_RGXUpdateCCBUtilisation(psClientCCB);
+#endif
+	/*
+		PDumpSetFrame will detect as we Transition out of capture range for
+		frame based data but if we are PDumping continuous data then we
+		need to inform the PDump layer ourselves
+	*/
+	if (PDUMP_IS_CONTINUOUS(ui32PDumpFlags)&& !bInCaptureRange)
+	{
+		PVRSRV_ERROR eError;
+
+		/* Only Transitioning into capture range can cause an error */
+		eError = PDumpTransition(psClientCCB->psPDumpConnectionData, IMG_FALSE, PDUMP_FLAGS_CONTINUOUS);
+		PVR_ASSERT(eError == PVRSRV_OK);
+	}
+
+	if (bPdumpEnabled)
+	{
+		/* Update the PDump write offset to show we PDumped this command */
+		psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset;
+	}
+
+#if defined(NO_HARDWARE)
+	/*
+		The firmware is not running, it cannot update these; we do here instead.
+	*/
+	psClientCCB->psClientCCBCtrl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset;
+	psClientCCB->psClientCCBCtrl->ui32DepOffset = psClientCCB->ui32HostWriteOffset;
+#endif
+}
+
+IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB)
+{
+	return psClientCCB->ui32HostWriteOffset;
+}
+
+#define SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL PVR_DBG_ERROR
+#define CHECK_COMMAND(cmd, fenceupdate) \
+				case RGXFWIF_CCB_CMD_TYPE_##cmd: \
+						PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, #cmd " command (%d bytes)", psHeader->ui32CmdSize)); \
+						bFenceUpdate = fenceupdate; \
+						break
+
+static void _RGXClientCCBDumpCommands(RGX_CLIENT_CCB *psClientCCB,
+									  IMG_UINT32 ui32Offset,
+									  IMG_UINT32 ui32ByteCount)
+{
+#if defined(SUPPORT_DUMP_CLIENT_CCB_COMMANDS)
+	IMG_UINT8 *pui8Ptr = psClientCCB->pui8ClientCCB + ui32Offset;
+	IMG_UINT32 ui32ConsumeSize = ui32ByteCount;
+
+	while (ui32ConsumeSize)
+	{
+		RGXFWIF_CCB_CMD_HEADER *psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8Ptr;
+		IMG_BOOL bFenceUpdate = IMG_FALSE;
+
+		PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "@offset 0x%08lx", pui8Ptr - psClientCCB->pui8ClientCCB));
+		switch(psHeader->eCmdType)
+		{
+			CHECK_COMMAND(TA, IMG_FALSE);
+			CHECK_COMMAND(3D, IMG_FALSE);
+			CHECK_COMMAND(CDM, IMG_FALSE);
+			CHECK_COMMAND(TQ_3D, IMG_FALSE);
+			CHECK_COMMAND(TQ_2D, IMG_FALSE);
+			CHECK_COMMAND(3D_PR, IMG_FALSE);
+			CHECK_COMMAND(NULL, IMG_FALSE);
+			CHECK_COMMAND(SHG, IMG_FALSE);
+			CHECK_COMMAND(RTU, IMG_FALSE);
+			CHECK_COMMAND(RTU_FC, IMG_FALSE);
+			CHECK_COMMAND(PRE_TIMESTAMP, IMG_FALSE);
+			CHECK_COMMAND(POST_TIMESTAMP, IMG_FALSE);
+			CHECK_COMMAND(FENCE, IMG_TRUE);
+			CHECK_COMMAND(UPDATE, IMG_TRUE);
+			CHECK_COMMAND(UNFENCED_UPDATE, IMG_FALSE);
+			CHECK_COMMAND(RMW_UPDATE, IMG_TRUE);
+			CHECK_COMMAND(FENCE_PR, IMG_TRUE);
+			CHECK_COMMAND(UNFENCED_RMW_UPDATE, IMG_FALSE);
+			CHECK_COMMAND(PADDING, IMG_FALSE);
+			CHECK_COMMAND(TQ_TDM, IMG_FALSE);
+			default:
+				PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "Unknown command!"));
+				break;
+		}
+		pui8Ptr += sizeof(*psHeader);
+		if (bFenceUpdate)
+		{
+			IMG_UINT32 j;
+			RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *) pui8Ptr;
+			for (j=0;j<psHeader->ui32CmdSize/sizeof(RGXFWIF_UFO);j++)
+			{
+				PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "Addr = 0x%08x, value = 0x%08x",
+							psUFOPtr[j].puiAddrUFO.ui32Addr, psUFOPtr[j].ui32Value));
+			}
+		}
+		else
+		{
+			IMG_UINT32 *pui32Ptr = (IMG_UINT32 *) pui8Ptr;
+			IMG_UINT32 ui32Remain = psHeader->ui32CmdSize/sizeof(IMG_UINT32);
+			while(ui32Remain)
+			{
+				if (ui32Remain >= 4)
+				{
+					PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x 0x%08x 0x%08x",
+							pui32Ptr[0], pui32Ptr[1], pui32Ptr[2], pui32Ptr[3]));
+					pui32Ptr += 4;
+					ui32Remain -= 4;
+				}
+				if (ui32Remain == 3)
+				{
+					PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x 0x%08x",
+							pui32Ptr[0], pui32Ptr[1], pui32Ptr[2]));
+					pui32Ptr += 3;
+					ui32Remain -= 3;
+				}
+				if (ui32Remain == 2)
+				{
+					PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x",
+							pui32Ptr[0], pui32Ptr[1]));
+					pui32Ptr += 2;
+					ui32Remain -= 2;
+				}
+				if (ui32Remain == 1)
+				{
+					PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x",
+							pui32Ptr[0]));
+					pui32Ptr += 1;
+					ui32Remain -= 1;
+				}
+			}
+		}
+		pui8Ptr += psHeader->ui32CmdSize;
+		ui32ConsumeSize -= sizeof(*psHeader) + psHeader->ui32CmdSize;
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(psClientCCB);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	PVR_UNREFERENCED_PARAMETER(ui32ByteCount);
+#endif
+}
+
+/*
+	Workout how much space this command will require
+*/
+PVRSRV_ERROR RGXCmdHelperInitCmdCCB(RGX_CLIENT_CCB            *psClientCCB,
+                                    IMG_UINT32                ui32ClientFenceCount,
+                                    PRGXFWIF_UFO_ADDR         *pauiFenceUFOAddress,
+                                    IMG_UINT32                *paui32FenceValue,
+                                    IMG_UINT32                ui32ClientUpdateCount,
+                                    PRGXFWIF_UFO_ADDR         *pauiUpdateUFOAddress,
+                                    IMG_UINT32                *paui32UpdateValue,
+                                    IMG_UINT32                ui32ServerSyncCount,
+                                    IMG_UINT32                *paui32ServerSyncFlags,
+                                    IMG_UINT32                ui32ServerSyncFlagMask,
+                                    SERVER_SYNC_PRIMITIVE     **papsServerSyncs,
+                                    IMG_UINT32                ui32CmdSize,
+                                    IMG_PBYTE                 pui8DMCmd,
+                                    PRGXFWIF_TIMESTAMP_ADDR   *ppPreAddr,
+                                    PRGXFWIF_TIMESTAMP_ADDR   *ppPostAddr,
+                                    PRGXFWIF_UFO_ADDR         *ppRMWUFOAddr,
+                                    RGXFWIF_CCB_CMD_TYPE      eType,
+                                    IMG_UINT32                ui32ExtJobRef,
+                                    IMG_UINT32                ui32IntJobRef,
+                                    IMG_UINT32                ui32PDumpFlags,
+                                    RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData,
+                                    IMG_CHAR                  *pszCommandName,
+                                    RGX_CCB_CMD_HELPER_DATA   *psCmdHelperData,
+									IMG_DEV_VIRTADDR		  sRobustnessResetReason)
+{
+	IMG_UINT32 ui32FenceCount;
+	IMG_UINT32 ui32UpdateCount;
+	IMG_UINT32 i;
+
+	/* Job reference values */
+	psCmdHelperData->ui32ExtJobRef = ui32ExtJobRef;
+	psCmdHelperData->ui32IntJobRef = ui32IntJobRef;
+
+	/* Save the data we require in the submit call */
+	psCmdHelperData->psClientCCB = psClientCCB;
+	psCmdHelperData->ui32PDumpFlags = ui32PDumpFlags;
+	psCmdHelperData->pszCommandName = pszCommandName;
+
+	/* Client sync data */
+	psCmdHelperData->ui32ClientFenceCount = ui32ClientFenceCount;
+	psCmdHelperData->pauiFenceUFOAddress = pauiFenceUFOAddress;
+	psCmdHelperData->paui32FenceValue = paui32FenceValue;
+	psCmdHelperData->ui32ClientUpdateCount = ui32ClientUpdateCount;
+	psCmdHelperData->pauiUpdateUFOAddress = pauiUpdateUFOAddress;
+	psCmdHelperData->paui32UpdateValue = paui32UpdateValue;
+
+	/* Server sync data */
+	psCmdHelperData->ui32ServerSyncCount = ui32ServerSyncCount;
+	psCmdHelperData->paui32ServerSyncFlags = paui32ServerSyncFlags;
+	psCmdHelperData->ui32ServerSyncFlagMask = ui32ServerSyncFlagMask;
+	psCmdHelperData->papsServerSyncs = papsServerSyncs;
+
+	/* Command data */
+	psCmdHelperData->ui32CmdSize = ui32CmdSize;
+	psCmdHelperData->pui8DMCmd = pui8DMCmd;
+	psCmdHelperData->eType = eType;
+
+	/* Robustness reset reason address */
+	psCmdHelperData->sRobustnessResetReason = sRobustnessResetReason;
+
+	PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+			"%s Command Server Init on FWCtx %08x", pszCommandName,
+			FWCommonContextGetFWAddress(psClientCCB->psServerCommonContext).ui32Addr);
+
+	/* Init the generated data members */
+	psCmdHelperData->ui32ServerFenceCount = 0;
+	psCmdHelperData->ui32ServerUpdateCount = 0;
+	psCmdHelperData->ui32ServerUnfencedUpdateCount = 0;
+	psCmdHelperData->ui32PreTimeStampCmdSize = 0;
+	psCmdHelperData->ui32PostTimeStampCmdSize = 0;
+	psCmdHelperData->ui32RMWUFOCmdSize = 0;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	/* Workload Data added */
+	psCmdHelperData->psWorkEstKickData = psWorkEstKickData;
+#endif
+
+	if (ppPreAddr && (ppPreAddr->ui32Addr != 0))
+	{
+
+		psCmdHelperData->pPreTimestampAddr = *ppPreAddr;
+		psCmdHelperData->ui32PreTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER)
+			+ ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN  - 1));
+	}
+
+	if (ppPostAddr && (ppPostAddr->ui32Addr != 0))
+	{
+		psCmdHelperData->pPostTimestampAddr = *ppPostAddr;
+		psCmdHelperData->ui32PostTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER)
+			+ ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN  - 1));
+	}
+
+	if (ppRMWUFOAddr && (ppRMWUFOAddr->ui32Addr != 0))
+	{
+		psCmdHelperData->pRMWUFOAddr       = * ppRMWUFOAddr;
+		psCmdHelperData->ui32RMWUFOCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_UFO);
+	}
+
+
+	/* Workout how many fences and updates this command will have */
+	for (i = 0; i < ui32ServerSyncCount; i++)
+	{
+		IMG_UINT32 ui32Flag = paui32ServerSyncFlags[i] & ui32ServerSyncFlagMask;
+
+		if (ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)
+		{
+			/* Server syncs must fence */
+			psCmdHelperData->ui32ServerFenceCount++;
+		}
+
+		/* If it is an update */
+		if (ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)
+		{
+			/* is it a fenced update or a progress update (a.k.a unfenced update) ?*/
+			if ((ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE) == PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE)
+			{
+				/* it is a progress update */
+				psCmdHelperData->ui32ServerUnfencedUpdateCount++;
+			}
+			else
+			{
+				/* it is a fenced update */
+				psCmdHelperData->ui32ServerUpdateCount++;
+			}
+		}
+	}
+
+
+	/* Total fence command size (header plus command data) */
+	ui32FenceCount = ui32ClientFenceCount + psCmdHelperData->ui32ServerFenceCount;
+	if (ui32FenceCount)
+	{
+		psCmdHelperData->ui32FenceCmdSize = RGX_CCB_FWALLOC_ALIGN((ui32FenceCount * sizeof(RGXFWIF_UFO)) +
+																  sizeof(RGXFWIF_CCB_CMD_HEADER));
+	}
+	else
+	{
+		psCmdHelperData->ui32FenceCmdSize = 0;
+	}
+
+	/* Total DM command size (header plus command data) */
+	psCmdHelperData->ui32DMCmdSize = RGX_CCB_FWALLOC_ALIGN(ui32CmdSize +
+														   sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+	/* Total update command size (header plus command data) */
+	ui32UpdateCount = ui32ClientUpdateCount + psCmdHelperData->ui32ServerUpdateCount;
+	if (ui32UpdateCount)
+	{
+		psCmdHelperData->ui32UpdateCmdSize = RGX_CCB_FWALLOC_ALIGN((ui32UpdateCount * sizeof(RGXFWIF_UFO)) +
+																   sizeof(RGXFWIF_CCB_CMD_HEADER));
+	}
+	else
+	{
+		psCmdHelperData->ui32UpdateCmdSize = 0;
+	}
+
+	/* Total unfenced update command size (header plus command data) */
+	if (psCmdHelperData->ui32ServerUnfencedUpdateCount != 0)
+	{
+		psCmdHelperData->ui32UnfencedUpdateCmdSize = RGX_CCB_FWALLOC_ALIGN((psCmdHelperData->ui32ServerUnfencedUpdateCount * sizeof(RGXFWIF_UFO)) +
+																		   sizeof(RGXFWIF_CCB_CMD_HEADER));
+	}
+	else
+	{
+		psCmdHelperData->ui32UnfencedUpdateCmdSize = 0;
+	}
+
+	return PVRSRV_OK;
+}
+
+
+/*
+	Reserve space in the CCB and fill in the command and client sync data
+*/
+PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount,
+									   RGX_CCB_CMD_HELPER_DATA *asCmdHelperData)
+{
+	IMG_UINT32 ui32AllocSize = 0;
+	IMG_UINT32 i;
+	IMG_UINT8 *pui8StartPtr;
+	PVRSRV_ERROR eError;
+
+	/*
+		Workout how much space we need for all the command(s)
+	*/
+	ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData);
+
+
+	for (i = 0; i < ui32CmdCount; i++)
+	{
+		if ((asCmdHelperData[0].ui32PDumpFlags ^ asCmdHelperData[i].ui32PDumpFlags) & PDUMP_FLAGS_CONTINUOUS)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: PDump continuous is not consistent (%s != %s) for command %d",
+					 __FUNCTION__,
+					 PDUMP_IS_CONTINUOUS(asCmdHelperData[0].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE",
+					 PDUMP_IS_CONTINUOUS(asCmdHelperData[i].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE",
+					 ui32CmdCount));
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+	}
+
+	/*
+		Acquire space in the CCB for all the command(s).
+	*/
+	eError = RGXAcquireCCB(asCmdHelperData[0].psClientCCB,
+						   ui32AllocSize,
+						   (void **)&pui8StartPtr,
+						   asCmdHelperData[0].ui32PDumpFlags);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	/*
+		For each command fill in the fence, DM, and update command
+
+		Note:
+		We only fill in the client fences here, the server fences (and updates)
+		will be filled in together at the end. This is because we might fail the
+		kernel CCB alloc and would then have to rollback the server syncs if
+		we took the operation here
+	*/
+	for (i = 0; i < ui32CmdCount; i++)
+	{
+		RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = & asCmdHelperData[i];
+		IMG_UINT8 *pui8CmdPtr;
+		IMG_UINT8 *pui8ServerFenceStart = 0;
+		IMG_UINT8 *pui8ServerUpdateStart = 0;
+#if defined(PDUMP)
+		IMG_UINT32 ui32CtxAddr = FWCommonContextGetFWAddress(asCmdHelperData->psClientCCB->psServerCommonContext).ui32Addr;
+		IMG_UINT32 ui32CcbWoff = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(asCmdHelperData->psClientCCB->psServerCommonContext));
+#endif
+
+		if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0)
+		{
+			PDUMPCOMMENT("Start of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes",
+					psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff);
+		}
+
+
+
+		/*
+			Create the fence command.
+		*/
+		if (psCmdHelperData->ui32FenceCmdSize)
+		{
+			RGXFWIF_CCB_CMD_HEADER *psHeader;
+			IMG_UINT k, uiNextValueIndex;
+
+			/* Fences are at the start of the command */
+			pui8CmdPtr = pui8StartPtr;
+
+			psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+			psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_FENCE;
+			psHeader->ui32CmdSize = psCmdHelperData->ui32FenceCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+			psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+			psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+			psHeader->sWorkloadDataFWAddr.ui32Addr = 0;
+			psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0;
+			psHeader->sWorkEstKickData.ui64DeadlineInus = 0;
+			psHeader->sWorkEstKickData.ui64CyclesPrediction = 0;
+#endif
+
+			pui8CmdPtr += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+			/* Fill in the client fences */
+			uiNextValueIndex = 0;
+			for (k = 0; k < psCmdHelperData->ui32ClientFenceCount; k++)
+			{
+				RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *) pui8CmdPtr;
+	
+				psUFOPtr->puiAddrUFO = psCmdHelperData->pauiFenceUFOAddress[k];
+
+				if (RGX_UFO_IS_SYNC_CHECKPOINT(psUFOPtr))
+				{
+					psUFOPtr->ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+				}
+				else
+				{
+					/* Only increment uiNextValueIndex for non sync checkpoints
+					 * (as paui32FenceValue only contains values for sync prims)
+					 */
+					psUFOPtr->ui32Value = psCmdHelperData->paui32FenceValue[uiNextValueIndex++];
+				}
+				pui8CmdPtr += sizeof(RGXFWIF_UFO);
+
+#if defined SYNC_COMMAND_DEBUG
+				PVR_DPF((PVR_DBG_ERROR, "%s client sync fence - 0x%x -> 0x%x",
+						psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
+#endif
+				PDUMPCOMMENT(".. %s client sync fence - 0x%x -> 0x%x",
+						psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value);
+
+
+			}
+			pui8ServerFenceStart = pui8CmdPtr;
+		}
+
+		/* jump over the Server fences */
+		pui8CmdPtr = pui8StartPtr + psCmdHelperData->ui32FenceCmdSize;
+
+
+		/*
+		  Create the pre DM timestamp commands. Pre and Post timestamp commands are supposed to
+		  sandwich the DM cmd. The padding code with the CCB wrap upsets the FW if we don't have
+		  the task type bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types.
+		*/
+		if (psCmdHelperData->ui32PreTimeStampCmdSize != 0)
+		{
+			RGXWriteTimestampCommand(& pui8CmdPtr,
+			                         RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP,
+			                         psCmdHelperData->pPreTimestampAddr);
+		}
+
+		/*
+			Create the DM command
+		*/
+		if (psCmdHelperData->ui32DMCmdSize)
+		{
+			RGXFWIF_CCB_CMD_HEADER *psHeader;
+
+			psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+			psHeader->eCmdType = psCmdHelperData->eType;
+			psHeader->ui32CmdSize = psCmdHelperData->ui32DMCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+			psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+			psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+			psHeader->sWorkloadDataFWAddr.ui32Addr = 0;
+
+			if(psCmdHelperData->psWorkEstKickData != NULL)
+			{
+				PVR_ASSERT(psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_TA ||
+				           psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_3D);
+				psHeader->sWorkEstKickData = *psCmdHelperData->psWorkEstKickData;
+			}
+			else
+			{
+				psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0;
+				psHeader->sWorkEstKickData.ui64DeadlineInus = 0;
+				psHeader->sWorkEstKickData.ui64CyclesPrediction = 0;
+			}
+#endif
+
+			psHeader->sRobustnessResetReason = psCmdHelperData->sRobustnessResetReason;
+
+			pui8CmdPtr += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+			/* The buffer is write-combine, so no special device memory treatment required. */
+			OSCachedMemCopy(pui8CmdPtr, psCmdHelperData->pui8DMCmd, psCmdHelperData->ui32CmdSize);
+			pui8CmdPtr += psCmdHelperData->ui32CmdSize;
+		}
+
+		if (psCmdHelperData->ui32PostTimeStampCmdSize != 0)
+		{
+			RGXWriteTimestampCommand(& pui8CmdPtr,
+			                         RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP,
+			                         psCmdHelperData->pPostTimestampAddr);
+		}
+
+
+		if (psCmdHelperData->ui32RMWUFOCmdSize != 0)
+		{
+			RGXFWIF_CCB_CMD_HEADER * psHeader;
+			RGXFWIF_UFO            * psUFO;
+
+			psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+			psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE;
+			psHeader->ui32CmdSize = psCmdHelperData->ui32RMWUFOCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+			psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+			psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+
+			psHeader->sWorkloadDataFWAddr.ui32Addr = 0;
+			psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0;
+			psHeader->sWorkEstKickData.ui64DeadlineInus = 0;
+			psHeader->sWorkEstKickData.ui64CyclesPrediction = 0;
+#endif
+			pui8CmdPtr += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+			psUFO = (RGXFWIF_UFO *) pui8CmdPtr;
+			psUFO->puiAddrUFO = psCmdHelperData->pRMWUFOAddr;
+			
+			pui8CmdPtr += sizeof(RGXFWIF_UFO);
+		}
+	
+
+		/*
+			Create the update command.
+			
+			Note:
+			We only fill in the client updates here, the server updates (and fences)
+			will be filled in together at the end
+		*/
+		if (psCmdHelperData->ui32UpdateCmdSize)
+		{
+			RGXFWIF_CCB_CMD_HEADER *psHeader;
+			IMG_UINT k, uiNextValueIndex;
+
+			psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+			psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_UPDATE;
+			psHeader->ui32CmdSize = psCmdHelperData->ui32UpdateCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+			psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+			psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+			psHeader->sWorkloadDataFWAddr.ui32Addr = 0;
+			psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0;
+			psHeader->sWorkEstKickData.ui64DeadlineInus = 0;
+			psHeader->sWorkEstKickData.ui64CyclesPrediction = 0;
+#endif
+			pui8CmdPtr += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+			/* Fill in the client updates */
+			uiNextValueIndex = 0;
+			for (k = 0; k < psCmdHelperData->ui32ClientUpdateCount; k++)
+			{
+				RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *) pui8CmdPtr;
+	
+				psUFOPtr->puiAddrUFO = psCmdHelperData->pauiUpdateUFOAddress[k];
+				if (RGX_UFO_IS_SYNC_CHECKPOINT(psUFOPtr))
+				{
+					psUFOPtr->ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+				}
+				else
+				{
+					/* Only increment uiNextValueIndex for non sync checkpoints
+					 * (as paui32UpdateValue only contains values for sync prims)
+					 */
+					psUFOPtr->ui32Value = psCmdHelperData->paui32UpdateValue[uiNextValueIndex++];
+				}
+				pui8CmdPtr += sizeof(RGXFWIF_UFO);
+
+#if defined SYNC_COMMAND_DEBUG
+				PVR_DPF((PVR_DBG_ERROR, "%s client sync update - 0x%x -> 0x%x",
+						psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
+#endif
+				PDUMPCOMMENT(".. %s client sync update - 0x%x -> 0x%x",
+						psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value);
+
+			}
+			pui8ServerUpdateStart = pui8CmdPtr;
+		}
+	
+		/* Save the server sync fence & update offsets for submit time */
+		psCmdHelperData->pui8ServerFenceStart  = pui8ServerFenceStart;
+		psCmdHelperData->pui8ServerUpdateStart = pui8ServerUpdateStart;
+
+		/* jump over the fenced update */
+		if (psCmdHelperData->ui32UnfencedUpdateCmdSize != 0)
+		{
+			RGXFWIF_CCB_CMD_HEADER * const psHeader = (RGXFWIF_CCB_CMD_HEADER * ) psCmdHelperData->pui8ServerUpdateStart + psCmdHelperData->ui32UpdateCmdSize;
+			/* set up the header for unfenced updates,  */
+			PVR_ASSERT(psHeader); /* Could be zero if ui32UpdateCmdSize is 0 which is never expected */
+			psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE;
+			psHeader->ui32CmdSize = psCmdHelperData->ui32UnfencedUpdateCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+			psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+			psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+			psHeader->sWorkloadDataFWAddr.ui32Addr = 0;
+			psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0;
+			psHeader->sWorkEstKickData.ui64DeadlineInus = 0;
+			psHeader->sWorkEstKickData.ui64CyclesPrediction = 0;
+#endif
+
+			/* jump over the header */
+			psCmdHelperData->pui8ServerUnfencedUpdateStart = ((IMG_UINT8*) psHeader) + sizeof(RGXFWIF_CCB_CMD_HEADER);
+		}
+		else
+		{
+			psCmdHelperData->pui8ServerUnfencedUpdateStart = NULL;
+		}
+		
+		/* Save start for sanity checking at submit time */
+		psCmdHelperData->pui8StartPtr = pui8StartPtr;
+
+		/* Set the start pointer for the next iteration around the loop */
+		pui8StartPtr +=
+			psCmdHelperData->ui32FenceCmdSize         +
+			psCmdHelperData->ui32PreTimeStampCmdSize  +
+			psCmdHelperData->ui32DMCmdSize            +
+			psCmdHelperData->ui32PostTimeStampCmdSize +
+			psCmdHelperData->ui32RMWUFOCmdSize        +
+			psCmdHelperData->ui32UpdateCmdSize        +
+			psCmdHelperData->ui32UnfencedUpdateCmdSize;
+
+		if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0)
+		{
+			PDUMPCOMMENT("End of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes",
+					psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff);
+		}
+		else
+		{
+			PDUMPCOMMENT("No %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes",
+					psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff);
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+/*
+	Fill in the server syncs data and release the CCB space
+*/
+void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount,
+							   RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+							   const IMG_CHAR *pcszDMName,
+							   IMG_UINT32 ui32CtxAddr)
+{
+	IMG_UINT32 ui32AllocSize = 0;
+	IMG_UINT32 i;
+#if defined(LINUX)
+	IMG_BOOL bTraceChecks = trace_rogue_are_fence_checks_traced();
+	IMG_BOOL bTraceUpdates = trace_rogue_are_fence_updates_traced();
+#endif
+
+	/*
+		Workout how much space we need for all the command(s)
+	*/
+	ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	PVRSRVLockServerSync();
+#endif
+
+   /*
+		For each command fill in the server sync info
+	*/
+	for (i=0;i<ui32CmdCount;i++)
+	{
+		RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = &asCmdHelperData[i];
+		IMG_UINT8 *pui8ServerFenceStart = psCmdHelperData->pui8ServerFenceStart;
+		IMG_UINT8 *pui8ServerUpdateStart = psCmdHelperData->pui8ServerUpdateStart;
+		IMG_UINT8 *pui8ServerUnfencedUpdateStart = psCmdHelperData->pui8ServerUnfencedUpdateStart;
+		IMG_UINT32 j;
+
+		/* Now fill in the server fence and updates together */
+		for (j = 0; j < psCmdHelperData->ui32ServerSyncCount; j++)
+		{
+			RGXFWIF_UFO *psUFOPtr;
+			IMG_UINT32 ui32UpdateValue;
+			IMG_UINT32 ui32FenceValue;
+			IMG_UINT32 ui32SyncAddr;
+			PVRSRV_ERROR eError;
+			IMG_UINT32 ui32Flag = psCmdHelperData->paui32ServerSyncFlags[j] & psCmdHelperData->ui32ServerSyncFlagMask;
+			IMG_BOOL bFence = ((ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)!=0)?IMG_TRUE:IMG_FALSE;
+			IMG_BOOL bUpdate = ((ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)!=0)?IMG_TRUE:IMG_FALSE;
+			const IMG_BOOL bUnfencedUpdate = ((ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE) == PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE)
+				? IMG_TRUE
+				: IMG_FALSE;
+
+			eError = PVRSRVServerSyncQueueHWOpKM_NoGlobalLock(psCmdHelperData->papsServerSyncs[j],
+												 bUpdate,
+												 &ui32FenceValue,
+												 &ui32UpdateValue);
+			/* This function can't fail */
+			PVR_ASSERT(eError == PVRSRV_OK);
+	
+			/*
+				As server syncs always fence (we have a check in RGXCmcdHelperInitCmdCCB
+				which ensures the client is playing ball) the filling in of the fence
+				is unconditional.
+			*/
+			eError = ServerSyncGetFWAddr(psCmdHelperData->papsServerSyncs[j], &ui32SyncAddr);
+			if (PVRSRV_OK != eError)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+					"%s: Failed to read Server Sync FW address (%d)",
+					__FUNCTION__, eError));
+				PVR_ASSERT(eError == PVRSRV_OK);
+			}
+			if (bFence)
+			{
+				PVR_ASSERT(pui8ServerFenceStart != 0);
+
+				psUFOPtr = (RGXFWIF_UFO *) pui8ServerFenceStart;
+				psUFOPtr->puiAddrUFO.ui32Addr = ui32SyncAddr;
+				psUFOPtr->ui32Value = ui32FenceValue;
+				pui8ServerFenceStart += sizeof(RGXFWIF_UFO);
+
+#if defined(LINUX)
+				if (bTraceChecks)
+				{
+					trace_rogue_fence_checks(psCmdHelperData->pszCommandName,
+											 pcszDMName,
+											 ui32CtxAddr,
+											 psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+											 1,
+											 &psUFOPtr->puiAddrUFO,
+											 &psUFOPtr->ui32Value);
+				}
+#endif
+			}
+	
+			/* If there is an update then fill that in as well */
+			if (bUpdate)
+			{
+				if (bUnfencedUpdate)
+				{
+					PVR_ASSERT(pui8ServerUnfencedUpdateStart != 0);
+
+					psUFOPtr = (RGXFWIF_UFO *) pui8ServerUnfencedUpdateStart;
+					psUFOPtr->puiAddrUFO.ui32Addr = ui32SyncAddr;
+					psUFOPtr->ui32Value = ui32UpdateValue;
+					pui8ServerUnfencedUpdateStart += sizeof(RGXFWIF_UFO);
+				}
+				else
+				{
+					/* fenced update */
+					PVR_ASSERT(pui8ServerUpdateStart != 0);
+
+					psUFOPtr = (RGXFWIF_UFO *) pui8ServerUpdateStart;
+					psUFOPtr->puiAddrUFO.ui32Addr = ui32SyncAddr;
+					psUFOPtr->ui32Value = ui32UpdateValue;
+					pui8ServerUpdateStart += sizeof(RGXFWIF_UFO);
+				}
+#if defined(LINUX)
+				if (bTraceUpdates)
+				{
+					trace_rogue_fence_updates(psCmdHelperData->pszCommandName,
+											  pcszDMName,
+											  ui32CtxAddr,
+											  psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+											  1,
+											  &psUFOPtr->puiAddrUFO,
+											  &psUFOPtr->ui32Value);
+				}
+#endif
+				
+#if defined(NO_HARDWARE)
+				/*
+				  There is no FW so the host has to do any Sync updates
+				  (client sync updates are done in the client
+				*/
+				PVRSRVServerSyncPrimSetKM(psCmdHelperData->papsServerSyncs[j], ui32UpdateValue);
+#endif
+			}
+		}
+
+#if defined(LINUX)
+		if (bTraceChecks)
+		{
+			trace_rogue_fence_checks(psCmdHelperData->pszCommandName,
+									 pcszDMName,
+									 ui32CtxAddr,
+									 psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+									 psCmdHelperData->ui32ClientFenceCount,
+									 psCmdHelperData->pauiFenceUFOAddress,
+									 psCmdHelperData->paui32FenceValue);
+		}
+		if (bTraceUpdates)
+		{
+			trace_rogue_fence_updates(psCmdHelperData->pszCommandName,
+									  pcszDMName,
+									  ui32CtxAddr,
+									  psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+									  psCmdHelperData->ui32ClientUpdateCount,
+									  psCmdHelperData->pauiUpdateUFOAddress,
+									  psCmdHelperData->paui32UpdateValue);
+		}
+#endif
+
+		if (psCmdHelperData->ui32ServerSyncCount)
+		{
+			/*
+				Do some sanity checks to ensure we did the point math right
+			*/
+			if (pui8ServerFenceStart != 0)
+			{
+				PVR_ASSERT(pui8ServerFenceStart ==
+						   (psCmdHelperData->pui8StartPtr +
+						   psCmdHelperData->ui32FenceCmdSize));
+			}
+
+			if (pui8ServerUpdateStart != 0)
+			{
+				PVR_ASSERT(pui8ServerUpdateStart ==
+				           psCmdHelperData->pui8StartPtr             +
+				           psCmdHelperData->ui32FenceCmdSize         +
+				           psCmdHelperData->ui32PreTimeStampCmdSize  +
+				           psCmdHelperData->ui32DMCmdSize            +
+				           psCmdHelperData->ui32RMWUFOCmdSize        +
+				           psCmdHelperData->ui32PostTimeStampCmdSize +
+				           psCmdHelperData->ui32UpdateCmdSize);
+			}
+
+			if (pui8ServerUnfencedUpdateStart != 0)
+			{
+				PVR_ASSERT(pui8ServerUnfencedUpdateStart ==
+				           psCmdHelperData->pui8StartPtr             +
+				           psCmdHelperData->ui32FenceCmdSize         +
+				           psCmdHelperData->ui32PreTimeStampCmdSize  +
+				           psCmdHelperData->ui32DMCmdSize            +
+				           psCmdHelperData->ui32RMWUFOCmdSize        +
+				           psCmdHelperData->ui32PostTimeStampCmdSize +
+				           psCmdHelperData->ui32UpdateCmdSize        +
+				           psCmdHelperData->ui32UnfencedUpdateCmdSize);
+			}
+		}
+	
+		/*
+			All the commands have been filled in so release the CCB space.
+			The FW still won't run this command until we kick it
+		*/
+		PDUMPCOMMENTWITHFLAGS(psCmdHelperData->ui32PDumpFlags,
+				"%s Command Server Release on FWCtx %08x",
+				psCmdHelperData->pszCommandName, ui32CtxAddr);
+	}
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	PVRSRVUnlockServerSync();
+#endif
+
+	_RGXClientCCBDumpCommands(asCmdHelperData[0].psClientCCB,
+							  asCmdHelperData[0].psClientCCB->ui32HostWriteOffset,
+							  ui32AllocSize);
+
+	RGXReleaseCCB(asCmdHelperData[0].psClientCCB,
+				  ui32AllocSize,
+				  asCmdHelperData[0].ui32PDumpFlags);
+}
+
+
+IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32              ui32CmdCount,
+                                      RGX_CCB_CMD_HELPER_DATA *asCmdHelperData)
+{
+	IMG_UINT32 ui32AllocSize = 0;
+	IMG_UINT32 i;
+
+	/*
+		Workout how much space we need for all the command(s)
+	*/
+	for (i = 0; i < ui32CmdCount; i++)
+	{
+		ui32AllocSize +=
+			asCmdHelperData[i].ui32FenceCmdSize          +
+			asCmdHelperData[i].ui32DMCmdSize             +
+			asCmdHelperData[i].ui32UpdateCmdSize         +
+			asCmdHelperData[i].ui32UnfencedUpdateCmdSize +
+			asCmdHelperData[i].ui32PreTimeStampCmdSize   +
+			asCmdHelperData[i].ui32PostTimeStampCmdSize  +
+			asCmdHelperData[i].ui32RMWUFOCmdSize;
+	}
+
+	return ui32AllocSize;
+}
+
+/* Work out how much of an offset there is to a specific command. */
+IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+                                        IMG_UINT32              ui32Cmdindex)
+{
+	IMG_UINT32 ui32Offset = 0;
+	IMG_UINT32 i;
+
+	for (i = 0; i < ui32Cmdindex; i++)
+	{
+		ui32Offset +=
+			asCmdHelperData[i].ui32FenceCmdSize          +
+			asCmdHelperData[i].ui32DMCmdSize             +
+			asCmdHelperData[i].ui32UpdateCmdSize         +
+			asCmdHelperData[i].ui32UnfencedUpdateCmdSize +
+			asCmdHelperData[i].ui32PreTimeStampCmdSize   +
+			asCmdHelperData[i].ui32PostTimeStampCmdSize  +
+			asCmdHelperData[i].ui32RMWUFOCmdSize;
+	}
+
+	return ui32Offset;
+}
+
+/* Returns the offset of the data master command from a write offset */
+IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData)
+{
+	return psCmdHelperData->ui32FenceCmdSize + psCmdHelperData->ui32PreTimeStampCmdSize;
+}
+
+
+static const char *_CCBCmdTypename(RGXFWIF_CCB_CMD_TYPE cmdType)
+{
+	switch (cmdType)
+	{
+		case RGXFWIF_CCB_CMD_TYPE_TA: return "TA";
+		case RGXFWIF_CCB_CMD_TYPE_3D: return "3D";
+		case RGXFWIF_CCB_CMD_TYPE_CDM: return "CDM";
+		case RGXFWIF_CCB_CMD_TYPE_TQ_3D: return "TQ_3D";
+		case RGXFWIF_CCB_CMD_TYPE_TQ_2D: return "TQ_2D";
+		case RGXFWIF_CCB_CMD_TYPE_3D_PR: return "3D_PR";
+		case RGXFWIF_CCB_CMD_TYPE_NULL: return "NULL";
+		case RGXFWIF_CCB_CMD_TYPE_SHG: return "SHG";
+		case RGXFWIF_CCB_CMD_TYPE_RTU: return "RTU";
+		case RGXFWIF_CCB_CMD_TYPE_RTU_FC: return "RTU_FC";
+		case RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP: return "PRE_TIMESTAMP";
+		case RGXFWIF_CCB_CMD_TYPE_TQ_TDM: return "TQ_TDM";
+
+		case RGXFWIF_CCB_CMD_TYPE_FENCE: return "FENCE";
+		case RGXFWIF_CCB_CMD_TYPE_UPDATE: return "UPDATE";
+		case RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE: return "RMW_UPDATE";
+		case RGXFWIF_CCB_CMD_TYPE_FENCE_PR: return "FENCE_PR";
+		case RGXFWIF_CCB_CMD_TYPE_PRIORITY: return "PRIORITY";
+
+		case RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP: return "POST_TIMESTAMP";
+		case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE: return "UNFENCED_UPDATE";
+		case RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE: return "UNFENCED_RMW_UPDATE";
+
+		case RGXFWIF_CCB_CMD_TYPE_PADDING: return "PADDING";
+
+		default:
+			PVR_ASSERT(IMG_FALSE);
+		break;
+	}
+	
+	return "INVALID";
+}
+
+PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB  *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM)
+{
+	volatile RGXFWIF_CCCB_CTL	*psClientCCBCtrl;
+	IMG_UINT32 					ui32SampledRdOff, ui32SampledWrOff;
+	PVRSRV_ERROR				eError = PVRSRV_OK;
+
+	if (psCurrentClientCCB == NULL)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB is NULL"));
+		return  PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	
+	psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl;
+	ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset;
+	ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset;
+
+	if (ui32SampledRdOff > psClientCCBCtrl->ui32WrapMask  ||
+		ui32SampledWrOff > psClientCCBCtrl->ui32WrapMask)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB has invalid offset (ROFF=%d WOFF=%d)",
+				ui32SampledRdOff, ui32SampledWrOff));
+		return  PVRSRV_ERROR_INVALID_OFFSET;
+	}
+
+	if (ui32SampledRdOff != ui32SampledWrOff &&
+				psCurrentClientCCB->ui32LastROff != psCurrentClientCCB->ui32LastWOff &&
+				ui32SampledRdOff == psCurrentClientCCB->ui32LastROff &&
+				(psCurrentClientCCB->ui32ByteCount - psCurrentClientCCB->ui32LastByteCount) < psCurrentClientCCB->ui32Size)
+	{
+		//RGXFWIF_DEV_VIRTADDR v = {0};
+		//DumpStalledCCBCommand(v,psCurrentClientCCB,NULL);
+
+		/* Don't log this by default unless debugging since a higher up
+		 * function will log the stalled condition. Helps avoid double
+		 *  messages in the log.
+		 */
+		PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB has not progressed (ROFF=%d WOFF=%d) for DM: %s",
+				ui32SampledRdOff, ui32SampledWrOff, RGXStringifyKickTypeDM(eKickTypeDM)));
+		eError =  PVRSRV_ERROR_CCCB_STALLED;
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+		{
+			/* Un-wedge the stalled CCB (if waiting on a PSYNC_CHECKPOINT) */
+			RGXFWIF_CCB_CMD_HEADER *psHeader = (RGXFWIF_CCB_CMD_HEADER *) (psCurrentClientCCB->pui8ClientCCB + ui32SampledRdOff);
+			RGXFWIF_UFO *psUFOPtr;
+			IMG_UINT8 *pui8Ptr = (IMG_UINT8*)psHeader;
+			IMG_UINT32 j, jMax;
+
+			PVR_DPF((PVR_DBG_ERROR, "CheckForStalledCCB: psHeader->eCmdType=%d",psHeader->eCmdType));
+			if ((psHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (psHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR))
+			{
+				pui8Ptr += sizeof(*psHeader);
+				psUFOPtr = (RGXFWIF_UFO *) pui8Ptr;
+				/* Find the fence we are stuck on, and see if it is a PVRSRV_SYNC_CHECKPOINT */
+				jMax = psHeader->ui32CmdSize/sizeof(RGXFWIF_UFO);
+				for (j=0;j<jMax;j++)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "CheckForStalledCCB: Checking UFO(%d/%d)(FwAddr=0x%x) to see if it is a sync checkpoint", j+1, jMax, psUFOPtr[j].puiAddrUFO.ui32Addr));
+					if ( RGX_UFO_IS_SYNC_CHECKPOINT((RGXFWIF_UFO*)&psUFOPtr[j]))
+					{
+						PVR_DPF((PVR_DBG_ERROR, "Calling SyncCheckpointErrorFromUFO(), puiAddrUFOPtr[%d]=<%p>, psUFOPtr[%d].puiAddrUFO,ui32Addr=0x%x",
+								j, (void*)&psUFOPtr[j], j, psUFOPtr[j].puiAddrUFO.ui32Addr));
+						SyncCheckpointErrorFromUFO(psDevNode, psUFOPtr[j].puiAddrUFO.ui32Addr);
+					}
+				}
+
+			}
+
+		}
+#endif
+	}
+
+	psCurrentClientCCB->ui32LastROff = ui32SampledRdOff;
+	psCurrentClientCCB->ui32LastWOff = ui32SampledWrOff;
+	psCurrentClientCCB->ui32LastByteCount = psCurrentClientCCB->ui32ByteCount;
+
+	return eError;
+}
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) || defined(PVRSRV_ENABLE_FULL_CCB_DUMP)
+void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+			PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+			RGX_CLIENT_CCB  *psCurrentClientCCB,
+			DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+			void *pvDumpDebugFile)
+{
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+	PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+#endif
+	volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl;
+	IMG_UINT8 *pui8ClientCCBBuff = psCurrentClientCCB->pui8ClientCCB;
+	IMG_UINT32 ui32Offset = psClientCCBCtrl->ui32ReadOffset;
+	IMG_UINT32 ui32DepOffset = psClientCCBCtrl->ui32DepOffset;
+	IMG_UINT32 ui32EndOffset = psCurrentClientCCB->ui32HostWriteOffset;
+	IMG_UINT32 ui32WrapMask = psClientCCBCtrl->ui32WrapMask;
+	IMG_CHAR * pszState = "Ready";
+
+	PVR_DUMPDEBUG_LOG("FWCtx 0x%08X (%s)", sFWCommonContext.ui32Addr,
+		(IMG_PCHAR)&psCurrentClientCCB->szName);
+	if (ui32Offset == ui32EndOffset)
+	{
+		PVR_DUMPDEBUG_LOG("  `--<Empty>");
+	}
+
+	while (ui32Offset != ui32EndOffset)
+	{
+		RGXFWIF_CCB_CMD_HEADER *psCmdHeader = (RGXFWIF_CCB_CMD_HEADER*)(pui8ClientCCBBuff + ui32Offset);
+		IMG_UINT32 ui32NextOffset = (ui32Offset + psCmdHeader->ui32CmdSize + sizeof(RGXFWIF_CCB_CMD_HEADER)) & ui32WrapMask;
+		IMG_BOOL bLastCommand = (ui32NextOffset == ui32EndOffset)? IMG_TRUE: IMG_FALSE;
+		IMG_BOOL bLastUFO;
+		#define CCB_SYNC_INFO_LEN 80
+		IMG_CHAR pszSyncInfo[CCB_SYNC_INFO_LEN];
+		IMG_UINT32 ui32NoOfUpdates, i;
+		RGXFWIF_UFO *psUFOPtr;
+
+		ui32NoOfUpdates = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+		psUFOPtr = (RGXFWIF_UFO*)(pui8ClientCCBBuff + ui32Offset + sizeof(RGXFWIF_CCB_CMD_HEADER));
+		pszSyncInfo[0] = '\0';
+
+		if (ui32Offset == ui32DepOffset)
+		{
+			pszState = "Waiting";
+		}
+
+		PVR_DUMPDEBUG_LOG("  %s--%s %s @ %u Int=%u Ext=%u",
+			bLastCommand? "`": "|",
+			pszState, _CCBCmdTypename(psCmdHeader->eCmdType),
+			ui32Offset, psCmdHeader->ui32IntJobRef, psCmdHeader->ui32ExtJobRef
+			);
+
+		/* switch on type and write checks and updates */
+		switch (psCmdHeader->eCmdType)
+		{
+			case RGXFWIF_CCB_CMD_TYPE_UPDATE:
+			case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE:
+			case RGXFWIF_CCB_CMD_TYPE_FENCE:
+			case RGXFWIF_CCB_CMD_TYPE_FENCE_PR:
+			{
+				for (i = 0; i < ui32NoOfUpdates; i++, psUFOPtr++)
+				{
+					bLastUFO = (ui32NoOfUpdates-1 == i)? IMG_TRUE: IMG_FALSE;
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+					SyncRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr,
+									 pszSyncInfo, CCB_SYNC_INFO_LEN);
+#endif
+					PVR_DUMPDEBUG_LOG("  %s  %s--Addr:0x%08x Val=0x%08x %s",
+						bLastCommand? " ": "|",
+						bLastUFO? "`": "|",
+						psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value,
+						pszSyncInfo
+						);
+				}
+				break;
+			}
+
+			case RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE:
+			case RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE:
+			{
+				for (i = 0; i < ui32NoOfUpdates; i++, psUFOPtr++)
+				{
+					bLastUFO = (ui32NoOfUpdates-1 == i)? IMG_TRUE: IMG_FALSE;
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+					SyncRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr,
+									 pszSyncInfo, CCB_SYNC_INFO_LEN);
+#endif
+					PVR_DUMPDEBUG_LOG("  %s  %s--Addr:0x%08x Val++ %s",
+						bLastCommand? " ": "|",
+						bLastUFO? "`": "|",
+						psUFOPtr->puiAddrUFO.ui32Addr,
+						pszSyncInfo
+						);
+				}
+				break;
+			}
+
+			default:
+				break;
+		}
+		ui32Offset = ui32NextOffset;
+	}
+
+}
+#endif /* defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) || defined(PVRSRV_ENABLE_FULL_CCB_DUMP) */
+
+void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+				RGX_CLIENT_CCB *psCurrentClientCCB,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile)
+{
+	volatile RGXFWIF_CCCB_CTL	  *psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl;
+	IMG_UINT8					  *pui8ClientCCBBuff = psCurrentClientCCB->pui8ClientCCB;
+	volatile IMG_UINT8		   	  *pui8Ptr;
+	IMG_UINT32 					  ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset;
+	IMG_UINT32 					  ui32SampledDepOff = psClientCCBCtrl->ui32DepOffset;
+	IMG_UINT32 					  ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset;
+
+	pui8Ptr = pui8ClientCCBBuff + ui32SampledRdOff;
+
+	if ((ui32SampledRdOff == ui32SampledDepOff) &&
+		(ui32SampledRdOff != ui32SampledWrOff))
+	{
+		volatile RGXFWIF_CCB_CMD_HEADER *psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)(pui8ClientCCBBuff + ui32SampledRdOff);
+		RGXFWIF_CCB_CMD_TYPE 	eCommandType = psCommandHeader->eCmdType;
+		volatile IMG_UINT8				*pui8Ptr = (IMG_UINT8 *)psCommandHeader;
+
+		/* CCB is stalled on a fence... */
+		if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR))
+		{
+			RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *)(pui8Ptr + sizeof(*psCommandHeader));
+			IMG_UINT32 jj;
+
+			/* Display details of the fence object on which the context is pending */
+			PVR_DUMPDEBUG_LOG("FWCtx 0x%08X @ %d (%s) pending on %s:",
+							   sFWCommonContext.ui32Addr,
+							   ui32SampledRdOff,
+							   (IMG_PCHAR)&psCurrentClientCCB->szName,
+							   _CCBCmdTypename(eCommandType));
+			for (jj=0; jj<psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO); jj++)
+			{
+#if !defined(SUPPORT_EXTRA_METASP_DEBUG)
+				PVR_DUMPDEBUG_LOG("  Addr:0x%08x  Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value);
+#else
+				PVR_DUMPDEBUG_LOG("  Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x",
+				                   psUFOPtr[jj].puiAddrUFO.ui32Addr,
+				                   psUFOPtr[jj].ui32Value,
+				                   RGXReadWithSP(psUFOPtr[jj].puiAddrUFO.ui32Addr));
+#endif
+			}
+
+			/* Advance psCommandHeader past the FENCE to the next command header (this will be the TA/3D command that is fenced) */
+			pui8Ptr = (IMG_UINT8 *)psUFOPtr + psCommandHeader->ui32CmdSize;
+			psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)pui8Ptr;
+			if( (uintptr_t)psCommandHeader != ((uintptr_t)pui8ClientCCBBuff + ui32SampledWrOff))
+			{
+				PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X fenced command is of type %s",sFWCommonContext.ui32Addr, _CCBCmdTypename(psCommandHeader->eCmdType));
+				/* Advance psCommandHeader past the TA/3D to the next command header (this will possibly be an UPDATE) */
+				pui8Ptr += sizeof(*psCommandHeader) + psCommandHeader->ui32CmdSize;
+				psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)pui8Ptr;
+				/* If the next command is an update, display details of that so we can see what would then become unblocked */
+				if( (uintptr_t)psCommandHeader != ((uintptr_t)pui8ClientCCBBuff + ui32SampledWrOff))
+				{
+					eCommandType = psCommandHeader->eCmdType;
+
+					if (eCommandType == RGXFWIF_CCB_CMD_TYPE_UPDATE)
+					{
+						psUFOPtr = (RGXFWIF_UFO *)((IMG_UINT8 *)psCommandHeader + sizeof(*psCommandHeader));
+						PVR_DUMPDEBUG_LOG(" preventing %s:",_CCBCmdTypename(eCommandType));
+						for (jj=0; jj<psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO); jj++)
+						{
+#if !defined(SUPPORT_EXTRA_METASP_DEBUG)
+							PVR_DUMPDEBUG_LOG("  Addr:0x%08x  Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value);
+#else
+							PVR_DUMPDEBUG_LOG("  Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x",
+							                   psUFOPtr[jj].puiAddrUFO.ui32Addr,
+							                   psUFOPtr[jj].ui32Value,
+							                   RGXReadWithSP(psUFOPtr[jj].puiAddrUFO.ui32Addr));
+#endif
+						}
+					}
+				}
+				else
+				{
+					PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr);
+				}
+			}
+			else
+			{
+				PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr);
+			}
+		}
+	}
+}
+
+/******************************************************************************
+ End of file (rgxccb.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxccb.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxccb.h
new file mode 100644
index 0000000..c3883a5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxccb.h
@@ -0,0 +1,258 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Circular Command Buffer functionality.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX Circular Command Buffer functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXCCB_H__)
+#define __RGXCCB_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "sync_server.h"
+#include "connection_server.h"
+#include "rgx_fwif_shared.h"
+#include "rgxdebug.h"
+#include "rgxdefs_km.h"
+#include "pvr_notifier.h"
+
+#define MAX_CLIENT_CCB_NAME	30
+#define SYNC_FLAG_MASK_ALL  IMG_UINT32_MAX
+
+typedef struct _RGX_CLIENT_CCB_ RGX_CLIENT_CCB;
+
+/*
+	This structure is declared here as it's allocated on the heap by
+	the callers
+*/
+
+typedef struct _RGX_CCB_CMD_HELPER_DATA_ {
+	/* Data setup at command init time */
+	RGX_CLIENT_CCB  			*psClientCCB;
+	IMG_CHAR 					*pszCommandName;
+	IMG_UINT32 					ui32PDumpFlags;
+	
+	IMG_UINT32					ui32ClientFenceCount;
+	PRGXFWIF_UFO_ADDR			*pauiFenceUFOAddress;
+	IMG_UINT32					*paui32FenceValue;
+	IMG_UINT32					ui32ClientUpdateCount;
+	PRGXFWIF_UFO_ADDR			*pauiUpdateUFOAddress;
+	IMG_UINT32					*paui32UpdateValue;
+
+	IMG_UINT32					ui32ServerSyncCount;
+	IMG_UINT32					*paui32ServerSyncFlags;
+	IMG_UINT32					ui32ServerSyncFlagMask;
+	SERVER_SYNC_PRIMITIVE		**papsServerSyncs;
+	
+	RGXFWIF_CCB_CMD_TYPE		eType;
+	IMG_UINT32					ui32CmdSize;
+	IMG_UINT8					*pui8DMCmd;
+	IMG_UINT32					ui32FenceCmdSize;
+	IMG_UINT32					ui32DMCmdSize;
+	IMG_UINT32					ui32UpdateCmdSize;
+	IMG_UINT32					ui32UnfencedUpdateCmdSize;
+
+	/* timestamp commands */
+	PRGXFWIF_TIMESTAMP_ADDR     pPreTimestampAddr;
+	IMG_UINT32                  ui32PreTimeStampCmdSize;
+	PRGXFWIF_TIMESTAMP_ADDR     pPostTimestampAddr;
+	IMG_UINT32                  ui32PostTimeStampCmdSize;
+	PRGXFWIF_UFO_ADDR           pRMWUFOAddr;
+	IMG_UINT32                  ui32RMWUFOCmdSize;
+
+	/* Data setup at command acquire time */
+	IMG_UINT8					*pui8StartPtr;
+	IMG_UINT8					*pui8ServerUpdateStart;
+	IMG_UINT8					*pui8ServerUnfencedUpdateStart;
+	IMG_UINT8					*pui8ServerFenceStart;
+	IMG_UINT32					ui32ServerFenceCount;
+	IMG_UINT32					ui32ServerUpdateCount;
+	IMG_UINT32					ui32ServerUnfencedUpdateCount;
+
+	/* Job reference fields */
+	IMG_UINT32					ui32ExtJobRef;
+	IMG_UINT32					ui32IntJobRef;
+
+	/* Workload kick information */
+	RGXFWIF_WORKEST_KICK_DATA	*psWorkEstKickData;
+
+	/* Robustness reset reason address */
+	IMG_DEV_VIRTADDR			sRobustnessResetReason;
+} RGX_CCB_CMD_HELPER_DATA;
+
+#define PADDING_COMMAND_SIZE	(sizeof(RGXFWIF_CCB_CMD_HEADER))
+
+
+#define RGX_CCB_REQUESTORS(TYPE) \
+	/* for debugging purposes */ TYPE(UNDEF)	\
+	TYPE(TA)	\
+	TYPE(3D)	\
+	TYPE(CDM)	\
+	TYPE(SH)	\
+	TYPE(RS)	\
+	TYPE(TQ_3D)	\
+	TYPE(TQ_2D)	\
+	TYPE(TQ_TDM)    \
+	TYPE(KICKSYNC)	\
+	/* Only used for validating the number of entries in this list */ TYPE(FIXED_COUNT)	\
+	TYPE(FC0)	\
+	TYPE(FC1)	\
+	TYPE(FC2)	\
+	TYPE(FC3)	\
+
+/* Forms an enum constant for each type present in RGX_CCB_REQUESTORS list. The enum is mainly used as
+   an index to the aszCCBRequestors table defined in rgxccb.c. The total number of enums must adhere
+   to the following build assert.
+*/
+typedef enum _RGX_CCB_REQUESTOR_TYPE_
+{
+#define CONSTRUCT_ENUM(req) REQ_TYPE_##req,
+	RGX_CCB_REQUESTORS (CONSTRUCT_ENUM)
+#undef CONSTRUCT_ENUM
+	
+	/* should always be at the end */
+	REQ_TYPE_TOTAL_COUNT,
+} RGX_CCB_REQUESTOR_TYPE;
+
+/*	The number of enum constants in the above table is always equal to those provided in the RGX_CCB_REQUESTORS X macro list.
+	In an event of change in value of DPX_MAX_RAY_CONTEXTS to say 'n', appropriate entry/entries up to FC[n-1] must be added to
+	the RGX_CCB_REQUESTORS list.
+*/
+static_assert(REQ_TYPE_TOTAL_COUNT == REQ_TYPE_FIXED_COUNT + DPX_MAX_RAY_CONTEXTS + 1,
+			  "Mismatch between DPX_MAX_RAY_CONTEXTS and RGX_CCB_REQUESTOR_TYPE enum");
+
+/* Tuple describing the columns of the following table */
+typedef enum _RGX_CCB_REQUESTOR_TUPLE_
+{
+	REQ_RGX_FW_CLIENT_CCB_STRING,          /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCB for this requestor */
+	REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING,  /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCBControl for this requestor */
+	REQ_PDUMP_COMMENT,                     /* Index to comment to be dumped in PDUMPs */
+
+	/* should always be at the end */
+	REQ_TUPLE_CARDINALITY,
+} RGX_CCB_REQUESTOR_TUPLE;
+
+/*	Table containing an array of strings for each requestor type in the list of RGX_CCB_REQUESTORS. In addition to its use in
+	this module (rgxccb.c), this table is also used to access string to be dumped in PDUMP comments, hence, marking it extern for
+	use in other modules.
+*/
+extern IMG_CHAR *const aszCCBRequestors[][REQ_TUPLE_CARDINALITY];
+
+PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB,
+					IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO	*psDevInfo,
+						  IMG_UINT32			ui32CCBSizeLog2,
+						  CONNECTION_DATA		*psConnectionData,
+						  RGX_CCB_REQUESTOR_TYPE	eCCBRequestor,
+						  RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+						  RGX_CLIENT_CCB		**ppsClientCCB,
+						  DEVMEM_MEMDESC 		**ppsClientCCBMemDesc,
+						  DEVMEM_MEMDESC 		**ppsClientCCBCtlMemDesc);
+
+void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB);
+
+PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB,
+										IMG_UINT32		ui32CmdSize,
+										void			**ppvBufferSpace,
+										IMG_UINT32		ui32PDumpFlags);
+
+IMG_INTERNAL void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB,
+								IMG_UINT32		ui32CmdSize,
+								IMG_UINT32		ui32PDumpFlags);
+
+IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB);
+
+PVRSRV_ERROR RGXCmdHelperInitCmdCCB(RGX_CLIENT_CCB            *psClientCCB,
+                                    IMG_UINT32                ui32ClientFenceCount,
+                                    PRGXFWIF_UFO_ADDR         *pauiFenceUFOAddress,
+                                    IMG_UINT32                *paui32FenceValue,
+                                    IMG_UINT32                ui32ClientUpdateCount,
+                                    PRGXFWIF_UFO_ADDR         *pauiUpdateUFOAddress,
+                                    IMG_UINT32                *paui32UpdateValue,
+                                    IMG_UINT32                ui32ServerSyncCount,
+                                    IMG_UINT32                *paui32ServerSyncFlags,
+                                    IMG_UINT32                ui32ServerSyncFlagMask,
+                                    SERVER_SYNC_PRIMITIVE     **papsServerSyncs,
+                                    IMG_UINT32                ui32CmdSize,
+                                    IMG_PBYTE                 pui8DMCmd,
+                                    PRGXFWIF_TIMESTAMP_ADDR   *ppPreAddr,
+                                    PRGXFWIF_TIMESTAMP_ADDR   *ppPostAddr,
+                                    PRGXFWIF_UFO_ADDR         *ppRMWUFOAddr,
+                                    RGXFWIF_CCB_CMD_TYPE      eType,
+                                    IMG_UINT32                ui32ExtJobRef,
+                                    IMG_UINT32                ui32IntJobRef,
+                                    IMG_UINT32                ui32PDumpFlags,
+                                    RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData,
+                                    IMG_CHAR                  *pszCommandName,
+                                    RGX_CCB_CMD_HELPER_DATA   *psCmdHelperData,
+									IMG_DEV_VIRTADDR		  sRobustnessResetReason);
+
+PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount,
+									   RGX_CCB_CMD_HELPER_DATA *asCmdHelperData);
+
+void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount,
+							   RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+							   const IMG_CHAR *pcszDMName,
+							   IMG_UINT32 ui32CtxAddr);
+
+IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount,
+								   RGX_CCB_CMD_HELPER_DATA *asCmdHelperData);
+
+IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+                                        IMG_UINT32              ui32Cmdindex);
+
+IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData);
+
+void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+				RGX_CLIENT_CCB  *psCurrentClientCCB,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile);
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) || defined(PVRSRV_ENABLE_FULL_CCB_DUMP)
+void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+			PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+			RGX_CLIENT_CCB *psCurrentClientCCB,
+			DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+			void *pvDumpDebugFile);
+#endif
+
+PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB  *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM);
+#endif /* __RGXCCB_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxcompute.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxcompute.c
new file mode 100644
index 0000000..0a1b77e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxcompute.c
@@ -0,0 +1,1103 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Compute routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Compute routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "srvkm.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxcompute.h"
+#include "rgx_bvnc_defs_km.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "rgxccb.h"
+#include "rgxhwperf.h"
+#include "rgxtimerquery.h"
+#include "htbuffer.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "rgx_memallocflags.h"
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+#endif/* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_CMP_UFO_DUMP	0
+
+//#define CMP_CHECKPOINT_DEBUG 1
+
+#if defined(CMP_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+struct _RGX_SERVER_COMPUTE_CONTEXT_ {
+	PVRSRV_DEVICE_NODE			*psDeviceNode;
+	RGX_SERVER_COMMON_CONTEXT	*psServerCommonContext;
+	DEVMEM_MEMDESC				*psFWFrameworkMemDesc;
+	DEVMEM_MEMDESC				*psFWComputeContextStateMemDesc;
+	PVRSRV_CLIENT_SYNC_PRIM		*psSync;
+	DLLIST_NODE					sListNode;
+	SYNC_ADDR_LIST				sSyncAddrListFence;
+	SYNC_ADDR_LIST				sSyncAddrListUpdate;
+	ATOMIC_T					hJobId;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	POS_LOCK                    		 hLock;
+#endif
+};
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA			*psConnection,
+											 PVRSRV_DEVICE_NODE			*psDeviceNode,
+											 IMG_UINT32					ui32Priority,
+											 IMG_UINT32					ui32FrameworkCommandSize,
+											 IMG_PBYTE					pbyFrameworkCommand,
+											 IMG_HANDLE					hMemCtxPrivData,
+											 IMG_DEV_VIRTADDR			sServicesSignalAddr,
+											 RGX_SERVER_COMPUTE_CONTEXT	**ppsComputeContext)
+{
+	PVRSRV_RGXDEV_INFO 			*psDevInfo = psDeviceNode->pvDevice;
+	DEVMEM_MEMDESC				*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	RGX_SERVER_COMPUTE_CONTEXT	*psComputeContext;
+	RGX_COMMON_CONTEXT_INFO		sInfo;
+	PVRSRV_ERROR				eError = PVRSRV_OK;
+
+	/* Prepare cleanup struct */
+	*ppsComputeContext = NULL;
+	psComputeContext = OSAllocZMem(sizeof(*psComputeContext));
+	if (psComputeContext == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	eError = OSLockCreate(&psComputeContext->hLock, LOCK_TYPE_NONE);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+									__func__,
+									PVRSRVGetErrorStringKM(eError)));
+		goto fail_createlock;
+	}
+#endif
+
+	psComputeContext->psDeviceNode = psDeviceNode;
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+						   &psComputeContext->psSync,
+						   "compute cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to allocate cleanup sync (0x%x)",
+				eError));
+		goto fail_syncalloc;
+	}
+
+	/*
+		Allocate device memory for the firmware GPU context suspend state.
+		Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+	*/
+	PDUMPCOMMENT("Allocate RGX firmware compute context suspend state");
+
+	eError = DevmemFwAllocate(psDevInfo,
+							  sizeof(RGXFWIF_COMPUTECTX_STATE),
+							  RGX_FWCOMCTX_ALLOCFLAGS,
+							  "FwComputeContextState",
+							  &psComputeContext->psFWComputeContextStateMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to allocate firmware GPU context suspend state (%u)",
+				eError));
+		goto fail_contextsuspendalloc;
+	}
+
+	/*
+	 * Create the FW framework buffer
+	 */
+	eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+										&psComputeContext->psFWFrameworkMemDesc,
+										ui32FrameworkCommandSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to allocate firmware GPU framework state (%u)",
+				eError));
+		goto fail_frameworkcreate;
+	}
+
+	/* Copy the Framework client data into the framework buffer */
+	eError = PVRSRVRGXFrameworkCopyCommand(psComputeContext->psFWFrameworkMemDesc,
+										   pbyFrameworkCommand,
+										   ui32FrameworkCommandSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to populate the framework buffer (%u)",
+				eError));
+		goto fail_frameworkcopy;
+	}
+	
+	sInfo.psFWFrameworkMemDesc = psComputeContext->psFWFrameworkMemDesc;
+
+	if((psDevInfo->sDevFeatureCfg.ui32CtrlStreamFormat == 2) && \
+			(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK))
+	{
+		sInfo.psResumeSignalAddr = &sServicesSignalAddr;
+	}else
+	{
+		PVR_UNREFERENCED_PARAMETER(sServicesSignalAddr);
+	}
+
+	eError = FWCommonContextAllocate(psConnection,
+									 psDeviceNode,
+									 REQ_TYPE_CDM,
+									 RGXFWIF_DM_CDM,
+									 NULL,
+									 0,
+									 psFWMemContextMemDesc,
+									 psComputeContext->psFWComputeContextStateMemDesc,
+									 RGX_CDM_CCB_SIZE_LOG2,
+									 ui32Priority,
+									 &sInfo,
+									 &psComputeContext->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_contextalloc;
+	}
+
+	SyncAddrListInit(&psComputeContext->sSyncAddrListFence);
+	SyncAddrListInit(&psComputeContext->sSyncAddrListUpdate);
+
+	{
+		PVRSRV_RGXDEV_INFO			*psDevInfo = psDeviceNode->pvDevice;
+
+		OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock);
+		dllist_add_to_tail(&(psDevInfo->sComputeCtxtListHead), &(psComputeContext->sListNode));
+		OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock);
+	}
+
+	*ppsComputeContext = psComputeContext;
+	return PVRSRV_OK;
+
+fail_contextalloc:
+fail_frameworkcopy:
+	DevmemFwFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc);
+fail_frameworkcreate:
+	DevmemFwFree(psDevInfo, psComputeContext->psFWComputeContextStateMemDesc);
+fail_contextsuspendalloc:
+	SyncPrimFree(psComputeContext->psSync);
+fail_syncalloc:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psComputeContext->hLock);
+fail_createlock:
+#endif
+	OSFreeMem(psComputeContext);
+	return eError;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+	PVRSRV_ERROR				eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(psComputeContext->psDeviceNode,
+											  psComputeContext->psServerCommonContext,
+											  psComputeContext->psSync,
+											  RGXFWIF_DM_CDM,
+											  PDUMP_FLAGS_NONE);
+
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				__FUNCTION__,
+				PVRSRVGetErrorStringKM(eError)));
+		return eError;
+	}
+
+	/* ... it has so we can free its resources */
+
+	OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock);
+	dllist_remove_node(&(psComputeContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock);
+
+	FWCommonContextFree(psComputeContext->psServerCommonContext);
+	DevmemFwFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc);
+	DevmemFwFree(psDevInfo, psComputeContext->psFWComputeContextStateMemDesc);
+	SyncPrimFree(psComputeContext->psSync);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psComputeContext->hLock);
+#endif
+	OSFreeMem(psComputeContext);
+
+	return PVRSRV_OK;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT	*psComputeContext,
+								IMG_UINT32					ui32ClientCacheOpSeqNum,
+								IMG_UINT32					ui32ClientFenceCount,
+								SYNC_PRIMITIVE_BLOCK		**pauiClientFenceUFOSyncPrimBlock,
+								IMG_UINT32					*paui32ClientFenceSyncOffset,
+								IMG_UINT32					*paui32ClientFenceValue,
+								IMG_UINT32					ui32ClientUpdateCount,
+								SYNC_PRIMITIVE_BLOCK		**pauiClientUpdateUFOSyncPrimBlock,
+								IMG_UINT32					*paui32ClientUpdateSyncOffset,
+								IMG_UINT32					*paui32ClientUpdateValue,
+								IMG_UINT32					ui32ServerSyncPrims,
+								IMG_UINT32					*paui32ServerSyncFlags,
+								SERVER_SYNC_PRIMITIVE		**pasServerSyncs,
+								PVRSRV_FENCE				iCheckFence,
+								PVRSRV_TIMELINE				iUpdateTimeline,
+								PVRSRV_FENCE				*piUpdateFence,
+								IMG_CHAR					pszUpdateFenceName[32],
+								IMG_UINT32					ui32CmdSize,
+								IMG_PBYTE					pui8DMCmd,
+								IMG_UINT32					ui32PDumpFlags,
+								IMG_UINT32					ui32ExtJobRef,
+								IMG_DEV_VIRTADDR			sRobustnessResetReason)
+{
+	RGXFWIF_KCCB_CMD		sCmpKCCBCmd;
+	RGX_CCB_CMD_HELPER_DATA	asCmdHelperData[1];
+	PVRSRV_ERROR			eError;
+	PVRSRV_ERROR			eError2;
+	IMG_UINT32				i;
+	IMG_UINT32				ui32CDMCmdOffset = 0;
+	IMG_UINT32				ui32JobId;
+	IMG_UINT32				ui32FWCtx;
+
+	PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+	PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+	PRGXFWIF_UFO_ADDR       pRMWUFOAddr;
+
+	IMG_UINT32 ui32IntClientFenceCount = 0;
+	PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL;
+	IMG_UINT32 *paui32IntFenceValue = NULL;
+	IMG_UINT32 ui32IntClientUpdateCount = 0;
+	PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL;
+	IMG_UINT32 *paui32IntUpdateValue = NULL;
+	PVRSRV_FENCE  iUpdateFence = PVRSRV_FENCE_INVALID;
+	IMG_UINT32               uiCheckFenceUID = 0;
+	IMG_UINT32               uiUpdateFenceUID = 0;
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	/* Android fd sync update info */
+	struct pvr_sync_append_data *psFDFenceData = NULL;
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+	PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+	IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+	IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+	PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+	IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+	void *pvUpdateFenceFinaliseData = NULL;
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	if (iUpdateTimeline >= 0 && !piUpdateFence)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+#else /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+	if (iUpdateTimeline >= 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: Providing update timeline (%d) in non-supporting driver",
+			__func__, iUpdateTimeline));
+	}
+	if (iCheckFence >= 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: Providing check fence (%d) in non-supporting driver",
+			__func__, iCheckFence));
+	}
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+
+	/* Ensure the string is null-terminated (Required for safety) */
+	pszUpdateFenceName[31] = '\0';
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psComputeContext->hLock);
+#endif
+
+	ui32JobId = OSAtomicIncrement(&psComputeContext->hJobId);
+
+	ui32IntClientFenceCount = ui32ClientFenceCount;
+	eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListFence,
+									ui32ClientFenceCount,
+									pauiClientFenceUFOSyncPrimBlock,
+									paui32ClientFenceSyncOffset);
+	if(eError != PVRSRV_OK)
+	{
+		goto err_populate_sync_addr_list;
+	}
+	if (ui32IntClientFenceCount && !pauiIntFenceUFOAddress)
+	{
+		pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs;
+	}
+	paui32IntFenceValue = paui32ClientFenceValue;
+
+	ui32IntClientUpdateCount = ui32ClientUpdateCount;
+
+	eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListUpdate,
+									ui32ClientUpdateCount,
+									pauiClientUpdateUFOSyncPrimBlock,
+									paui32ClientUpdateSyncOffset);
+	if(eError != PVRSRV_OK)
+	{
+		goto err_populate_sync_addr_list;
+	}
+	if (ui32IntClientUpdateCount && !pauiIntUpdateUFOAddress)
+	{
+		pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs;
+	}
+	paui32IntUpdateValue = paui32ClientUpdateValue;
+
+	/* Sanity check the server fences */
+	for (i=0;i<ui32ServerSyncPrims;i++)
+	{
+		if (!(paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on CDM) must fence", __FUNCTION__));
+			eError = PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+			goto err_populate_sync_addr_list;
+		}
+	}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	if (iCheckFence >= 0 || iUpdateTimeline >= 0)
+	{
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+		eError =
+		  pvr_sync_append_fences(pszUpdateFenceName,
+		                         iCheckFence,
+		                         iUpdateTimeline,
+		                         ui32IntClientUpdateCount,
+		                         pauiIntUpdateUFOAddress,
+		                         paui32IntUpdateValue,
+		                         ui32IntClientFenceCount,
+		                         pauiIntFenceUFOAddress,
+		                         paui32IntFenceValue,
+		                         &psFDFenceData);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_syncinit;
+		}
+		pvr_sync_get_updates(psFDFenceData, &ui32IntClientUpdateCount,
+			&pauiIntUpdateUFOAddress, &paui32IntUpdateValue);
+		pvr_sync_get_checks(psFDFenceData, &ui32IntClientFenceCount,
+			&pauiIntFenceUFOAddress, &paui32IntFenceValue);
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __FUNCTION__, iCheckFence, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext));
+		/* Resolve the sync checkpoints that make up the input fence */
+		eError = SyncCheckpointResolveFence(psComputeContext->psDeviceNode->hSyncCheckpointContext,
+											iCheckFence,
+											&ui32FenceSyncCheckpointCount,
+											&apsFenceSyncCheckpoints,
+		                                    &uiCheckFenceUID);
+		if (eError != PVRSRV_OK)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __FUNCTION__, eError));
+			goto fail_resolve_input_fence;
+		}
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __FUNCTION__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+#if defined(CMP_CHECKPOINT_DEBUG)
+		if (ui32FenceSyncCheckpointCount > 0)
+		{
+			IMG_UINT32 ii;
+			for (ii=0; ii<ui32FenceSyncCheckpointCount; ii++)
+			{
+				PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints +  ii);
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:    apsFenceSyncCheckpoints[%d]=<%p>", __FUNCTION__, ii, (void*)psNextCheckpoint));
+			}
+		}
+#endif
+		/* Create the output fence (if required) */
+		if (piUpdateFence)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d,  psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>)...", __FUNCTION__, iUpdateFence, iUpdateTimeline, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext));
+			eError = SyncCheckpointCreateFence(psComputeContext->psDeviceNode,
+			                                   pszUpdateFenceName,
+											   iUpdateTimeline,
+											   psComputeContext->psDeviceNode->hSyncCheckpointContext,
+											   &iUpdateFence,
+											   &uiUpdateFenceUID,
+											   &pvUpdateFenceFinaliseData,
+											   &psUpdateSyncCheckpoint,
+											   (void*)&psFenceTimelineUpdateSync,
+											   &ui32FenceTimelineUpdateValue);
+			if (eError != PVRSRV_OK)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)", __FUNCTION__, eError));
+				goto fail_create_output_fence;
+			}
+
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=%u)", __FUNCTION__, iUpdateFence, psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u, psFenceTimelineUpdateSync=<%p>", __FUNCTION__, ui32IntClientUpdateCount, (void*)psFenceTimelineUpdateSync));
+			/* Append the sync prim update for the timeline (if required) */
+			if (psFenceTimelineUpdateSync)
+			{
+				IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+				/* Allocate memory to hold the list of update values (including our timeline update) */
+				pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+				if (!pui32IntAllocatedUpdateValues)
+				{
+					/* Failed to allocate memory */
+					eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+					goto fail_alloc_update_values_mem;
+				}
+				OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+				/* Copy the update values into the new memory, then append our timeline update value */
+				OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+#if defined(CMP_CHECKPOINT_DEBUG)
+				if (ui32IntClientUpdateCount > 0)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __FUNCTION__, ui32IntClientUpdateCount));
+					for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+				/* Now set the additional update value */
+				pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+				*pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+				ui32IntClientUpdateCount++;
+				/* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */
+				paui32ClientUpdateValue = pui32IntAllocatedUpdateValues;
+
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: append the timeline sync prim addr <%p> to the compute context update list", __FUNCTION__,  (void*)psFenceTimelineUpdateSync));
+				/* Now append the timeline sync prim addr to the compute context update list */
+				SyncAddrListAppendSyncPrim(&psComputeContext->sSyncAddrListUpdate,
+				                           psFenceTimelineUpdateSync);
+#if defined(CMP_CHECKPOINT_DEBUG)
+				if (ui32IntClientUpdateCount > 0)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __FUNCTION__, ui32IntClientUpdateCount));
+					for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+				/* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+				paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
+			}
+		}
+
+		/* Append the checks (from input fence) */
+		if (ui32FenceSyncCheckpointCount > 0)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d sync checkpoints to Compute CDM Fence (&psComputeContext->sSyncAddrListFence=<%p>)...", __FUNCTION__, ui32FenceSyncCheckpointCount, (void*)&psComputeContext->sSyncAddrListFence));
+#if defined(CMP_CHECKPOINT_DEBUG)
+			if (ui32IntClientUpdateCount > 0)
+			{
+				IMG_UINT32 iii;
+				IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+				for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+					pui32Tmp++;
+				}
+			}
+#endif
+			SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListFence,
+										  ui32FenceSyncCheckpointCount,
+										  apsFenceSyncCheckpoints);
+			if (!pauiIntFenceUFOAddress)
+			{
+				pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs;
+			}
+			ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+		}
+#if defined(CMP_CHECKPOINT_DEBUG)
+		if (ui32IntClientUpdateCount > 0)
+		{
+			IMG_UINT32 iii;
+			IMG_UINT32 *pui32Tmp = (IMG_UINT32*)paui32IntUpdateValue;
+
+			CHKPT_DBG((PVR_DBG_ERROR, "%s:   Dumping %d update values (paui32IntUpdateValue=<%p>)...", __FUNCTION__, ui32IntClientUpdateCount, (void*)paui32IntUpdateValue));
+			for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: paui32IntUpdateValue[%d] = <%p>", __FUNCTION__, iii, (void*)pui32Tmp));
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: *paui32IntUpdateValue[%d] = 0x%x", __FUNCTION__, iii, *pui32Tmp));
+				pui32Tmp++;
+			}
+		}
+#endif
+
+		if (psUpdateSyncCheckpoint)
+		{
+			/* Append the update (from output fence) */
+			CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 sync checkpoint to Compute CDM Update (&psComputeContext->sSyncAddrListUpdate=<%p>, psUpdateSyncCheckpoint=<%p>)...", __FUNCTION__, (void*)&psComputeContext->sSyncAddrListUpdate , (void*)psUpdateSyncCheckpoint));
+			SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate,
+										  1,
+										  &psUpdateSyncCheckpoint);
+			if (!pauiIntUpdateUFOAddress)
+			{
+				pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs;
+			}
+			ui32IntClientUpdateCount++;
+#if defined(CMP_CHECKPOINT_DEBUG)
+			if (ui32IntClientUpdateCount > 0)
+			{
+				IMG_UINT32 iii;
+				IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress;
+
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress=<%p>, pui32Tmp=<%p>, ui32IntClientUpdateCount=%u", __FUNCTION__, (void*)pauiIntUpdateUFOAddress, (void*)pui32Tmp, ui32IntClientUpdateCount));
+				for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+					pui32Tmp++;
+				}
+			}
+#endif
+		}
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __FUNCTION__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	}
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+
+#if (ENABLE_CMP_UFO_DUMP == 1)
+		PVR_DPF((PVR_DBG_ERROR, "%s: dumping Compute (CDM) fence/updates syncs...", __FUNCTION__));
+		{
+			IMG_UINT32 ii;
+			PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+			IMG_UINT32 *pui32TmpIntFenceValue = paui32IntFenceValue;
+			PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+			IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+			/* Dump Fence syncs and Update syncs */
+			PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) fence syncs (&psComputeContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __FUNCTION__, ui32IntClientFenceCount, (void*)&psComputeContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+			for (ii=0; ii<ui32IntClientFenceCount; ii++)
+			{
+				if (psTmpIntFenceUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __FUNCTION__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr, *pui32TmpIntFenceValue, *pui32TmpIntFenceValue));
+					pui32TmpIntFenceValue++;
+				}
+				psTmpIntFenceUFOAddress++;
+			}
+			PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) update syncs (&psComputeContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __FUNCTION__, ui32IntClientUpdateCount, (void*)&psComputeContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+			for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+			{
+				if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __FUNCTION__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+					pui32TmpIntUpdateValue++;
+				}
+				psTmpIntUpdateUFOAddress++;
+			}
+		}
+#endif
+
+	RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psComputeContext->psDeviceNode->pvDevice,
+	                          & pPreAddr,
+	                          & pPostAddr,
+	                          & pRMWUFOAddr);
+
+	eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext),
+	                                ui32IntClientFenceCount,
+	                                pauiIntFenceUFOAddress,
+	                                paui32IntFenceValue,
+	                                ui32IntClientUpdateCount,
+	                                pauiIntUpdateUFOAddress,
+	                                paui32IntUpdateValue,
+	                                ui32ServerSyncPrims,
+	                                paui32ServerSyncFlags,
+	                                SYNC_FLAG_MASK_ALL,
+	                                pasServerSyncs,
+	                                ui32CmdSize,
+	                                pui8DMCmd,
+	                                & pPreAddr,
+	                                & pPostAddr,
+	                                & pRMWUFOAddr,
+	                                RGXFWIF_CCB_CMD_TYPE_CDM,
+	                                ui32ExtJobRef,
+	                                ui32JobId,
+	                                ui32PDumpFlags,
+	                                NULL,
+	                                "Compute",
+	                                asCmdHelperData,
+									sRobustnessResetReason);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_cmdinit;
+	}
+
+	eError = RGXCmdHelperAcquireCmdCCB(IMG_ARR_NUM_ELEMS(asCmdHelperData),
+	                                   asCmdHelperData);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_cmdaquire;
+	}
+
+
+	/*
+		We should reserve space in the kernel CCB here and fill in the command
+		directly.
+		This is so if there isn't space in the kernel CCB we can return with
+		retry back to services client before we take any operations
+	*/
+
+	/*
+		We might only be kicking for flush out a padding packet so only submit
+		the command if the create was successful
+	*/
+	if (eError == PVRSRV_OK)
+	{
+		/*
+			All the required resources are ready at this point, we can't fail so
+			take the required server sync operations and commit all the resources
+		*/
+
+		ui32CDMCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext));
+		RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "CDM", FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr);
+	}
+
+	/* Construct the kernel compute CCB command. */
+	sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+	sCmpKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+	sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext));
+	sCmpKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+	ui32FWCtx = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr;
+
+	HTBLOGK(HTB_SF_MAIN_KICK_CDM,
+			sCmpKCCBCmd.uCmdData.sCmdKickData.psContext,
+			ui32CDMCmdOffset
+			);
+	RGX_HWPERF_HOST_ENQ(psComputeContext,
+	                    OSGetCurrentClientProcessIDKM(),
+	                    ui32FWCtx,
+	                    ui32ExtJobRef,
+	                    ui32JobId,
+	                    RGX_HWPERF_KICK_TYPE_CDM,
+	                    uiCheckFenceUID,
+	                    uiUpdateFenceUID,
+	                    NO_DEADLINE,
+	                    NO_CYCEST);
+
+	/*
+	 * Submit the compute command to the firmware.
+	 */
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError2 = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+									RGXFWIF_DM_CDM,
+									&sCmpKCCBCmd,
+									sizeof(sCmpKCCBCmd),
+									ui32ClientCacheOpSeqNum,
+									ui32PDumpFlags);
+		if (eError2 != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+	
+	if (eError2 != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKickCDMKM failed to schedule kernel CCB command. (0x%x)", eError));
+	}
+	else
+	{
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+		RGXHWPerfFTraceGPUEnqueueEvent(psComputeContext->psDeviceNode->pvDevice,
+				ui32FWCtx, ui32JobId, RGX_HWPERF_KICK_TYPE_CDM);
+#endif
+	}
+	/*
+	 * Now check eError (which may have returned an error from our earlier call
+	 * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+	 * so we check it now...
+	 */
+	if (eError != PVRSRV_OK )
+	{
+		goto fail_cmdaquire;
+	}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	if (iUpdateTimeline >= 0)
+	{
+		/* If we get here, this should never fail. Hitting that likely implies
+		 * a code error above */
+		iUpdateFence = pvr_sync_get_update_fd(psFDFenceData);
+		if (iUpdateFence < 0)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get install update sync fd",
+				__FUNCTION__));
+			/* If we fail here, we cannot rollback the syncs as the hw already
+			 * has references to resources they may be protecting in the kick
+			 * so fallthrough */
+
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto fail_free_append_data;
+		}
+	}
+#if defined(NO_HARDWARE)
+	pvr_sync_nohw_complete_fences(psFDFenceData);
+#endif
+	/*
+		Free the merged sync memory if required
+	*/
+	pvr_sync_free_append_fences_data(psFDFenceData);
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#if defined(NO_HARDWARE)
+	/* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+	if (psUpdateSyncCheckpoint)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __FUNCTION__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint)));
+		SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+	}
+	if (psFenceTimelineUpdateSync)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   Updating NOHW sync prim<%p> to %d", __FUNCTION__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+		SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+	}
+	SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined (NO_HARDWARE) */
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+
+	*piUpdateFence = iUpdateFence;
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_FENCE_INVALID))
+	{
+		SyncCheckpointFinaliseFence(iUpdateFence, pvUpdateFenceFinaliseData);
+	}
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+	/* Free memory allocated to hold the internal list of update values */
+	if (pui32IntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui32IntAllocatedUpdateValues);
+		pui32IntAllocatedUpdateValues = NULL;
+	}
+#endif /* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psComputeContext->hLock);
+#endif
+
+	return PVRSRV_OK;
+
+fail_cmdinit:
+fail_cmdaquire:
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListFence);
+	SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListUpdate);
+fail_alloc_update_values_mem:
+	if(iUpdateFence != PVRSRV_FENCE_INVALID)
+	{
+		SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+	}
+fail_create_output_fence:
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+fail_resolve_input_fence:
+#endif /* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+fail_syncinit:
+	pvr_sync_rollback_append_fences(psFDFenceData);
+fail_free_append_data:
+	pvr_sync_free_append_fences_data(psFDFenceData);
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+err_populate_sync_addr_list:
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+	/* Free memory allocated to hold the internal list of update values */
+	if (pui32IntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui32IntAllocatedUpdateValues);
+		pui32IntAllocatedUpdateValues = NULL;
+	}
+#endif /* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psComputeContext->hLock);
+#endif
+	return eError;
+}
+
+IMG_EXPORT PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+	RGXFWIF_KCCB_CMD sFlushCmd;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+#if defined(PDUMP)
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit Compute flush");
+#endif
+	sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+	sFlushCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_FALSE;
+	sFlushCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_TRUE;
+	sFlushCmd.uCmdData.sSLCFlushInvalData.eDM = RGXFWIF_DM_CDM;
+	sFlushCmd.uCmdData.sSLCFlushInvalData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psComputeContext->hLock);
+#endif
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+									RGXFWIF_DM_CDM,
+									&sFlushCmd,
+									sizeof(sFlushCmd),
+									0,
+									PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFlushComputeDataKM: Failed to schedule SLC flush command with error (%u)", eError));
+	}
+	else
+	{
+		/* Wait for the SLC flush to complete */
+		eError = RGXWaitForFWOp(psComputeContext->psDeviceNode->pvDevice,
+								RGXFWIF_DM_CDM,
+								psComputeContext->psSync,
+								PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFlushComputeDataKM: Compute flush aborted with error (%u)", eError));
+		}
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psComputeContext->hLock);
+#endif
+	return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT  *psComputeContext)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice;
+	if (2 == psDevInfo->sDevFeatureCfg.ui32CtrlStreamFormat)
+	{
+
+		RGXFWIF_KCCB_CMD  sKCCBCmd;
+		PVRSRV_ERROR      eError;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockAcquire(psComputeContext->hLock);
+#endif
+
+		/* Schedule the firmware command */
+		sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE;
+		sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+										RGXFWIF_DM_CDM,
+										&sKCCBCmd,
+										sizeof(sKCCBCmd),
+										0,
+										PDUMP_FLAGS_NONE);
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXNotifyWriteOffsetUpdateKM: Failed to schedule the FW command %d (%s)",
+					eError, PVRSRVGETERRORSTRING(eError)));
+		}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockRelease(psComputeContext->hLock);
+#endif
+		return eError;
+	}else
+	{
+		return PVRSRV_ERROR_NOT_SUPPORTED;
+	}
+}
+
+
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                  PVRSRV_DEVICE_NODE * psDeviceNode,
+												  RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+												  IMG_UINT32 ui32Priority)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psComputeContext->hLock);
+#endif
+
+	eError = ContextSetPriority(psComputeContext->psServerCommonContext,
+								psConnection,
+								psComputeContext->psDeviceNode->pvDevice,
+								ui32Priority,
+								RGXFWIF_DM_CDM);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the compute context (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psComputeContext->hLock);
+#endif
+	return eError;
+}
+
+/*
+ * PVRSRVRGXGetLastComputeContextResetReasonKM
+ */
+PVRSRV_ERROR PVRSRVRGXGetLastComputeContextResetReasonKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+                                                         IMG_UINT32 *peLastResetReason,
+														 IMG_UINT32 *pui32LastResetJobRef)
+{
+	PVR_ASSERT(psComputeContext != NULL);
+	PVR_ASSERT(peLastResetReason != NULL);
+	PVR_ASSERT(pui32LastResetJobRef != NULL);
+	
+	*peLastResetReason = FWCommonContextGetLastResetReason(psComputeContext->psServerCommonContext,
+	                                                       pui32LastResetJobRef);
+
+	return PVRSRV_OK;
+}
+
+void CheckForStalledComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile)
+{
+	DLLIST_NODE *psNode, *psNext;
+	OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock);
+	dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx =
+			IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode);
+		DumpStalledFWCommonContext(psCurrentServerComputeCtx->psServerCommonContext,
+								   pfnDumpDebugPrintf, pvDumpDebugFile);
+	}
+	OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	IMG_UINT32 ui32ContextBitMask = 0;
+	DLLIST_NODE *psNode, *psNext;
+	OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock);
+	dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx =
+			IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode);
+
+		if (CheckStalledClientCommonContext(psCurrentServerComputeCtx->psServerCommonContext, RGX_KICK_TYPE_DM_CDM)
+			== PVRSRV_ERROR_CCCB_STALLED)
+		{
+			ui32ContextBitMask |= RGX_KICK_TYPE_DM_CDM;
+		}
+	}
+	OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock);
+	return ui32ContextBitMask;
+}
+
+/******************************************************************************
+ End of file (rgxcompute.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxcompute.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxcompute.h
new file mode 100644
index 0000000..5befde5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxcompute.h
@@ -0,0 +1,183 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX compute functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX compute functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXCOMPUTE_H__)
+#define __RGXCOMPUTE_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "connection_server.h"
+
+
+typedef struct _RGX_SERVER_COMPUTE_CONTEXT_ RGX_SERVER_COMPUTE_CONTEXT;
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXCreateComputeContextKM
+
+ @Description
+
+
+ @Input pvDeviceNode
+ @Input psCmpCCBMemDesc -
+ @Input psCmpCCBCtlMemDesc -
+ @Output ppsFWComputeContextMemDesc -
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA			*psConnection,
+											 PVRSRV_DEVICE_NODE			*psDeviceNode,
+											 IMG_UINT32					ui32Priority,
+											 IMG_UINT32					ui32FrameworkRegisterSize,
+											 IMG_PBYTE					pbyFrameworkRegisters,
+											 IMG_HANDLE					hMemCtxPrivData,
+											 IMG_DEV_VIRTADDR			sServicesSignalAddr,
+											 RGX_SERVER_COMPUTE_CONTEXT	**ppsComputeContext);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXDestroyComputeContextKM
+
+ @Description
+	Server-side implementation of RGXDestroyComputeContext
+
+ @Input psCleanupData -
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXKickCDMKM
+
+ @Description
+	Server-side implementation of RGXKickCDM
+
+ @Input psDeviceNode - RGX Device node
+ @Input psFWComputeContextMemDesc - Mem desc for firmware compute context
+ @Input ui32cCCBWoffUpdate - New fw Woff for the client CDM CCB
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT	*psComputeContext,
+								IMG_UINT32					ui32ClientCacheOpSeqNum,
+								IMG_UINT32					ui32ClientFenceCount,
+								SYNC_PRIMITIVE_BLOCK		**pauiClientFenceUFOSyncPrimBlock,
+								IMG_UINT32					*paui32ClientFenceSyncOffset,
+								IMG_UINT32					*paui32ClientFenceValue,
+								IMG_UINT32					ui32ClientUpdateCount,
+								SYNC_PRIMITIVE_BLOCK		**pauiClientUpdateUFOSyncPrimBlock,
+								IMG_UINT32					*paui32ClientUpdateSyncOffset,
+								IMG_UINT32					*paui32ClientUpdateValue,
+								IMG_UINT32					ui32ServerSyncPrims,
+								IMG_UINT32					*paui32ServerSyncFlags,
+								SERVER_SYNC_PRIMITIVE		**pasServerSyncs,
+								PVRSRV_FENCE				iCheckFence,
+								PVRSRV_TIMELINE				iUpdateTimeline,
+								PVRSRV_FENCE				*piUpdateFence,
+								IMG_CHAR					pcszUpdateFenceName[32],
+								IMG_UINT32					ui32CmdSize,
+								IMG_PBYTE					pui8DMCmd,
+								IMG_UINT32					ui32PDumpFlags,
+								IMG_UINT32					ui32ExtJobRef,
+								IMG_DEV_VIRTADDR			sRobustnessResetReason);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXFlushComputeDataKM
+
+ @Description
+	Server-side implementation of RGXFlushComputeData
+
+ @Input psComputeContext - Compute context to flush
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+/*!
+*******************************************************************************
+
+ @Function	    PVRSRVRGXNotifyComputeWriteOffsetUpdateKM
+ @Description   Server-side implementation of RGXNotifyComputeWriteOffsetUpdate
+
+ @Input         psDeviceNode - RGX Device node
+ @Input         psComputeContext - Compute context to flush
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection,
+												  PVRSRV_DEVICE_NODE *psDeviceNode,
+												  RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+												  IMG_UINT32 ui32Priority);
+
+PVRSRV_ERROR PVRSRVRGXGetLastComputeContextResetReasonKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+                                                         IMG_UINT32 *peLastResetReason,
+                                                         IMG_UINT32 *pui32LastResetJobRef);
+
+/* Debug - check if compute context is waiting on a fence */
+void CheckForStalledComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile);
+
+/* Debug/Watchdog - check if client compute contexts are stalled */
+IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* __RGXCOMPUTE_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxdebug.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxdebug.c
new file mode 100644
index 0000000..a057cbe
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxdebug.c
@@ -0,0 +1,4176 @@
+/*************************************************************************/ /*!
+@File
+@Title          Rgx debug information
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX debugging functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+
+#include "rgxdefs_km.h"
+#include "rgxdevice.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "cache_km.h"
+#include "osfunc.h"
+
+#include "lists.h"
+
+#include "rgxdebug.h"
+#include "pvrversion.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "rgxutils.h"
+#include "tlstream.h"
+#include "rgxfwutils.h"
+#include "pvrsrv.h"
+#include "services_km.h"
+
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "devicemem_utils.h"
+#include "rgx_fwif.h"
+#include "rgx_fwif_sf.h"
+#include "rgxfw_log_helper.h"
+
+#include "rgxta3d.h"
+#include "rgxcompute.h"
+#include "rgxtransfer.h"
+#include "rgxtdmtransfer.h"
+#include "rgxray.h"
+#include "rgxtimecorr.h"
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+#include "devicemem_history_server.h"
+#endif
+#include "rgx_bvnc_defs_km.h"
+#define PVR_DUMP_DRIVER_INFO(x, y)										\
+	PVR_DUMPDEBUG_LOG("%s info: "										\
+					   "BuildOptions: 0x%08x "							\
+					   "BuildVersion: %d.%d "							\
+					   "BuildRevision: %8d "							\
+					   "BuildType: %s",									\
+					   (x),												\
+					   (y).ui32BuildOptions,							\
+					   PVRVERSION_UNPACK_MAJ((y).ui32BuildVersion),		\
+					   PVRVERSION_UNPACK_MIN((y).ui32BuildVersion),		\
+					   (y).ui32BuildRevision,							\
+					   (BUILD_TYPE_DEBUG == (y).ui32BuildType) ? "debug" : "release")
+
+
+#define RGX_DEBUG_STR_SIZE		(150)
+#define MAX_FW_DESCRIPTION_LENGTH	(500u)
+
+#define RGX_CR_BIF_CAT_BASE0                              (0x1200U)
+#define RGX_CR_BIF_CAT_BASE1                              (0x1208U)
+
+#define RGX_CR_BIF_CAT_BASEN(n) \
+	RGX_CR_BIF_CAT_BASE0 + \
+	((RGX_CR_BIF_CAT_BASE1 - RGX_CR_BIF_CAT_BASE0) * n)
+
+
+#define RGXDBG_BIF_IDS \
+	X(BIF0)\
+	X(BIF1)\
+	X(TEXAS_BIF)\
+	X(DPX_BIF)
+
+#define RGXDBG_SIDEBAND_TYPES \
+	X(META)\
+	X(TLA)\
+	X(DMA)\
+	X(VDMM)\
+	X(CDM)\
+	X(IPP)\
+	X(PM)\
+	X(TILING)\
+	X(MCU)\
+	X(PDS)\
+	X(PBE)\
+	X(VDMS)\
+	X(IPF)\
+	X(ISP)\
+	X(TPF)\
+	X(USCS)\
+	X(PPP)\
+	X(VCE)\
+	X(TPF_CPF)\
+	X(IPF_CPF)\
+	X(FBCDC)
+
+typedef enum
+{
+#define X(NAME) RGXDBG_##NAME,
+	RGXDBG_BIF_IDS
+#undef X
+} RGXDBG_BIF_ID;
+
+typedef enum
+{
+#define X(NAME) RGXDBG_##NAME,
+	RGXDBG_SIDEBAND_TYPES
+#undef X
+} RGXDBG_SIDEBAND_TYPE;
+
+static const IMG_CHAR *const pszPowStateName[] =
+{
+#define X(NAME)	#NAME,
+	RGXFWIF_POW_STATES
+#undef X
+};
+
+static const IMG_CHAR *const pszBIFNames[] =
+{
+#define X(NAME)	#NAME,
+	RGXDBG_BIF_IDS
+#undef X
+};
+
+typedef struct _IMG_FLAGS2DESC_
+{
+	IMG_UINT32	uiFlag;
+	IMG_CHAR	*pszLabel;
+} IMG_FLAGS2DESC;
+
+const IMG_FLAGS2DESC asCSW2Description[] =
+{
+	{RGXFWIF_INICFG_CTXSWITCH_TA_EN, "TA; "},
+	{RGXFWIF_INICFG_CTXSWITCH_3D_EN, "3D; "},
+	{RGXFWIF_INICFG_CTXSWITCH_CDM_EN, "CDM; "},
+	{RGXFWIF_INICFG_CTXSWITCH_MODE_RAND, "Random; "},
+	{RGXFWIF_INICFG_CTXSWITCH_SRESET_EN, "SoftReset; "},
+	{RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX, "VDM CS INDEX mode; "},
+	{RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INSTANCE, "VDM CS INSTANCE mode; "},
+	{RGXFWIF_INICFG_VDM_CTX_STORE_MODE_LIST, "VDM CS LIST mode; "},
+	{RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST, "Fast CSW profile; "},
+	{RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM, "Medium CSW profile; "},
+	{RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW, "Slow CSW profile; "},
+	{RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY, "No Delay CSW profile; "}
+};
+
+const IMG_FLAGS2DESC asMisc2Description[] =
+{
+	{RGXFWIF_INICFG_USE_EXTENDED, "Use extended; "},
+	{RGXFWIF_INICFG_POW_RASCALDUST, "Power Rascal/Dust; "},
+	{RGXFWIF_INICFG_HWPERF_EN, "HwPerf EN; "},
+	{RGXFWIF_INICFG_HWR_EN, "HWR EN; "},
+	{RGXFWIF_INICFG_CHECK_MLIST_EN, "Check MList; "},
+	{RGXFWIF_INICFG_DISABLE_CLKGATING_EN, "ClockGating Off; "},
+	{RGXFWIF_INICFG_POLL_COUNTERS_EN, "Poll Counters; "},
+	{RGXFWIF_INICFG_SHG_BYPASS_EN, "SHG Bypass; "},
+	{RGXFWIF_INICFG_RTU_BYPASS_EN, "RTU Bypass; "},
+	{RGXFWIF_INICFG_REGCONFIG_EN, "Register Config; "},
+	{RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY, "Assert on OOM; "},
+	{RGXFWIF_INICFG_HWP_DISABLE_FILTER, "HWP Filter Off; "},
+	{RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN, "Custom PerfTimer; "},
+	{RGXFWIF_INICFG_CDM_KILL_MODE_RAND_EN, "CDM Random kill; "},
+	{RGXFWIF_INICFG_DISABLE_DM_OVERLAP, "DM Overlap Off; "},
+	{RGXFWIF_INICFG_METAT1_MAIN, "Main; "},
+	{RGXFWIF_INICFG_METAT1_DUMMY, "Dummy; "},
+	{RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER, "Assert on HWR; "},
+	{RGXFWIF_INICFG_WORKEST_V1, "Workload Estim v1; "},
+	{RGXFWIF_INICFG_WORKEST_V2, "Workload Estim v2; "},
+	{RGXFWIF_INICFG_PDVFS_V1, "PDVFS v1; "},
+	{RGXFWIF_INICFG_PDVFS_V2, "PDVFS v2; "}
+};
+
+const IMG_FLAGS2DESC asHwrState2Description[] =
+{
+	{RGXFWIF_HWR_HARDWARE_OK, "HWR OK; "},
+	{RGXFWIF_HWR_ANALYSIS_DONE, "Analysis done; "},
+	{RGXFWIF_HWR_GENERAL_LOCKUP, "General lockup; "},
+	{RGXFWIF_HWR_DM_RUNNING_OK, "DM running ok; "},
+	{RGXFWIF_HWR_DM_STALLING, "DM stalling; "},
+};
+
+const IMG_FLAGS2DESC asDmState2Description[] =
+{
+		{RGXFWIF_DM_STATE_WORKING, "working; "},
+		{RGXFWIF_DM_STATE_READY_FOR_HWR, "ready for hwr; "},
+		{RGXFWIF_DM_STATE_NEEDS_SKIP, "needs skip; "},
+		{RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP, "needs PR cleanup; "},
+		{RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR, "needs trace clear; "},
+		{RGXFWIF_DM_STATE_GUILTY_LOCKUP, "guilty lockup; "},
+		{RGXFWIF_DM_STATE_INNOCENT_LOCKUP, "innocent lockup; "},
+		{RGXFWIF_DM_STATE_GUILTY_OVERRUNING, "guilty overrunning; "},
+		{RGXFWIF_DM_STATE_INNOCENT_OVERRUNING, "innocent overrunning; "},
+};
+
+#if !defined(NO_HARDWARE)
+/* Translation of MIPS exception encoding */
+static const IMG_CHAR * const apszMIPSExcCodes[32] =
+{
+	"Interrupt",
+	"TLB modified exception",
+	"TLB exception (load/instruction fetch)",
+	"TLB exception (store)",
+	"Address error exception (load/instruction fetch)",
+	"Address error exception (store)",
+	"Bus error exception (instruction fetch)",
+	"Bus error exception (load/store)",
+	"Syscall exception",
+	"Breakpoint exception",
+	"Reserved instruction exception",
+	"Coprocessor Unusable exception",
+	"Arithmetic Overflow exception",
+	"Trap exception",
+	NULL,
+	NULL,
+	"Implementation-Specific Exception 1 (COP2)",
+	"CorExtend Unusable",
+	"Coprocessor 2 exceptions",
+	"TLB Read-Inhibit",
+	"TLB Execute-Inhibit",
+	NULL,
+	NULL,
+	"Reference to WatchHi/WatchLo address",
+	"Machine check",
+	NULL,
+	"DSP Module State Disabled exception",
+	NULL,
+	NULL,
+	NULL,
+	/* Can only happen in MIPS debug mode */
+	"Parity error",
+	NULL
+};
+#endif
+
+typedef struct _RGXMIPSFW_C0_DEBUG_TBL_ENTRY_
+{
+    IMG_UINT32 ui32Mask;
+    const IMG_CHAR * pszExplanation;
+} RGXMIPSFW_C0_DEBUG_TBL_ENTRY;
+
+#if !defined(NO_HARDWARE)
+static const RGXMIPSFW_C0_DEBUG_TBL_ENTRY sMIPS_C0_DebugTable[] =
+{
+    { RGXMIPSFW_C0_DEBUG_DSS,      "Debug single-step exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DBP,      "Debug software breakpoint exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DDBL,     "Debug data break exception occurred on a load" },
+    { RGXMIPSFW_C0_DEBUG_DDBS,     "Debug data break exception occurred on a store" },
+    { RGXMIPSFW_C0_DEBUG_DIB,      "Debug instruction break exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DINT,     "Debug interrupt exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DIBIMPR,  "Imprecise debug instruction break exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DDBLIMPR, "Imprecise debug data break load exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DDBSIMPR, "Imprecise debug data break store exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_IEXI,     "Imprecise error exception inhibit controls exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DBUSEP,   "Data access Bus Error exception pending" },
+    { RGXMIPSFW_C0_DEBUG_CACHEEP,  "Imprecise Cache Error pending" },
+    { RGXMIPSFW_C0_DEBUG_MCHECKP,  "Imprecise Machine Check exception pending" },
+    { RGXMIPSFW_C0_DEBUG_IBUSEP,   "Instruction fetch Bus Error exception pending" },
+    { RGXMIPSFW_C0_DEBUG_DBD,      "Debug exception occurred in branch delay slot" }
+};
+#endif
+
+IMG_UINT32 RGXReadWithSP(IMG_UINT32 ui32FWAddr)
+{
+	PVRSRV_DATA        *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->psDeviceNodeList;
+	PVRSRV_RGXDEV_INFO *psDevInfo    = psDeviceNode->pvDevice;
+	IMG_UINT32         ui32Value     = 0;
+	PVRSRV_ERROR       eError;
+
+	eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, &ui32Value);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXReadWithSP error: %s", PVRSRVGetErrorStringKM(eError)));
+	}
+
+	return ui32Value;
+}
+
+void RGXWriteWithSP(IMG_UINT32 ui32FWAddr, IMG_UINT32 ui32Value)
+{
+	PVRSRV_DATA        *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->psDeviceNodeList;
+	PVRSRV_RGXDEV_INFO *psDevInfo    = psDeviceNode->pvDevice;
+	PVRSRV_ERROR       eError;
+
+	eError = RGXWriteMETAAddr(psDevInfo, ui32FWAddr, ui32Value);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXWriteMETAAddr error: %s", PVRSRVGetErrorStringKM(eError)));
+	}
+}
+
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE)
+static PVRSRV_ERROR _ValidateFWImageWithSP(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                           DEVMEM_MEMDESC *psMemDesc,
+                                           RGXFWIF_DEV_VIRTADDR *psFWAddr,
+                                           const IMG_CHAR *pszDesc)
+{
+	PMR *psFWImagePMR;
+	IMG_UINT32 *pui32HostCodeAddr;
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32FWCodeAddr, ui32FWImageLen, ui32Value, i;
+	IMG_HANDLE hFWImage;
+
+	eError = DevmemServerGetImportHandle(psMemDesc,
+	                                     (void **)&psFWImagePMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "ValidateFWImageWithSP: Error getting %s PMR (%u)",
+		         pszDesc,
+		         eError));
+		return eError;
+	}
+
+	/* Get a pointer to the FW code and the allocation size */
+	eError = PMRAcquireKernelMappingData(psFWImagePMR,
+	                                     0,
+	                                     0, /* Map whole PMR */
+	                                     (void**)&pui32HostCodeAddr,
+	                                     (size_t*)&ui32FWImageLen,
+	                                     &hFWImage);
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "ValidateFWImageWithSP: Acquire mapping for %s failed (%u)",
+		         pszDesc,
+		         eError));
+		return eError;
+	}
+
+	ui32FWCodeAddr = psFWAddr->ui32Addr;
+	ui32FWImageLen /= sizeof(IMG_UINT32); /* Byte -> 32 bit words */
+
+	for (i = 0; i < ui32FWImageLen; i++)
+	{
+		eError = RGXReadMETAAddr(psDevInfo, ui32FWCodeAddr, &ui32Value);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "ValidateFWImageWithSP error: %s",
+			         PVRSRVGetErrorStringKM(eError)));
+			goto validatefwimage_release;
+		}
+
+		PVR_DPF((PVR_DBG_VERBOSE,
+		         "0x%x: CPU 0x%08x, FW 0x%08x",
+		         i * 4, pui32HostCodeAddr[i], ui32Value));
+
+		if (pui32HostCodeAddr[i] != ui32Value)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "ValidateFWImageWithSP: Mismatch while validating %s at offset 0x%x: CPU 0x%08x, FW 0x%08x",
+			         pszDesc,
+			         i * 4, pui32HostCodeAddr[i], ui32Value));
+			eError = PVRSRV_ERROR_FW_IMAGE_MISMATCH;
+			goto validatefwimage_release;
+		}
+
+		ui32FWCodeAddr += 4;
+	}
+
+	PVR_DPF((PVR_DBG_ERROR,
+	         "ValidateFWImageWithSP: Match between Host and Meta views of the %s",
+	         pszDesc));
+
+validatefwimage_release:
+	PMRReleaseKernelMappingData(psFWImagePMR, hFWImage);
+
+	return eError;
+}
+#endif
+
+PVRSRV_ERROR ValidateFWImageWithSP(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE)
+	RGXFWIF_DEV_VIRTADDR sFWAddr;
+	PVRSRV_ERROR eError;
+
+#define VALIDATEFWIMAGEWITHSP_NUM_CHECKS    (1U)
+	static IMG_UINT32 ui32NumChecks = 0;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	if (ui32NumChecks == VALIDATEFWIMAGEWITHSP_NUM_CHECKS)
+	{
+		return PVRSRV_OK;
+	}
+	ui32NumChecks++;
+
+	if (psDevInfo->pvRegsBaseKM == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "ValidateFWImageWithSP: RGX registers not mapped yet!"));
+		return PVRSRV_ERROR_BAD_MAPPING;
+	}
+
+	sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR;
+	eError = _ValidateFWImageWithSP(psDevInfo,
+	                                psDevInfo->psRGXFWCodeMemDesc,
+	                                &sFWAddr,
+	                                "FW code");
+	if (eError != PVRSRV_OK) return eError;
+
+	if (0 != psDevInfo->sDevFeatureCfg.ui32MCMS)
+	{
+		RGXSetFirmwareAddress(&sFWAddr,
+							  psDevInfo->psRGXFWCorememMemDesc,
+							  0, RFW_FWADDR_NOREF_FLAG);
+
+		eError = _ValidateFWImageWithSP(psDevInfo,
+	                                psDevInfo->psRGXFWCorememMemDesc,
+	                                &sFWAddr,
+	                                "FW coremem code");
+		if (eError != PVRSRV_OK) return eError;
+	}
+
+#else
+	PVR_UNREFERENCED_PARAMETER(psDevInfo);
+#endif
+
+	return PVRSRV_OK;
+}
+#endif /* defined(SUPPORT_EXTRA_METASP_DEBUG) */
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDecodePMPC
+
+ @Description
+
+ Return the name for the PM managed Page Catalogues
+
+ @Input ui32PC	 - Page Catalogue number
+
+ @Return   void
+
+******************************************************************************/
+static IMG_CHAR* _RGXDecodePMPC(IMG_UINT32 ui32PC)
+{
+	IMG_CHAR* pszPMPC = " (-)";
+
+	switch (ui32PC)
+	{
+		case 0x8: pszPMPC = " (PM-VCE0)"; break;
+		case 0x9: pszPMPC = " (PM-TE0)"; break;
+		case 0xA: pszPMPC = " (PM-ZLS0)"; break;
+		case 0xB: pszPMPC = " (PM-ALIST0)"; break;
+		case 0xC: pszPMPC = " (PM-VCE1)"; break;
+		case 0xD: pszPMPC = " (PM-TE1)"; break;
+		case 0xE: pszPMPC = " (PM-ZLS1)"; break;
+		case 0xF: pszPMPC = " (PM-ALIST1)"; break;
+	}
+
+	return pszPMPC;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_DPXDecodeBIFReqTags
+
+ @Description
+
+ Decode the BIF Tag ID and sideband data fields from DPX_CR_BIF_FAULT_BANK_REQ_STATUS regs
+
+ @Input eBankID	 			- BIF identifier
+ @Input ui32TagID           - Tag ID value
+ @Input ui32TagSB           - Tag Sideband data
+ @Output ppszTagID          - Decoded string from the Tag ID
+ @Output ppszTagSB          - Decoded string from the Tag SB
+ @Output pszScratchBuf      - Buffer provided to the function to generate the debug strings
+ @Input ui32ScratchBufSize  - Size of the provided buffer
+
+ @Return   void
+
+******************************************************************************/
+static void _DPXDecodeBIFReqTags(RGXDBG_BIF_ID	eBankID,
+								 IMG_UINT32		ui32TagID,
+								 IMG_UINT32		ui32TagSB,
+								 IMG_CHAR		**ppszTagID,
+								 IMG_CHAR		**ppszTagSB,
+								 IMG_CHAR		*pszScratchBuf,
+								 IMG_UINT32		ui32ScratchBufSize)
+{
+	/* default to unknown */
+	IMG_CHAR *pszTagID = "-";
+	IMG_CHAR *pszTagSB = "-";
+
+	PVR_ASSERT(eBankID == RGXDBG_DPX_BIF);
+	PVR_ASSERT(ppszTagID != NULL);
+
+	PVR_UNREFERENCED_PARAMETER(ui32TagSB);
+	PVR_UNREFERENCED_PARAMETER(pszScratchBuf);
+	PVR_UNREFERENCED_PARAMETER(ui32ScratchBufSize);
+
+	switch (ui32TagID)
+	{
+		case 0x0:
+		{
+			pszTagID = "MMU";
+			break;
+		}
+		case 0x1:
+		{
+			pszTagID = "RS_READ";
+			break;
+		}
+		case 0x2:
+		{
+			pszTagID = "RS_WRITE";
+			break;
+		}
+		case 0x3:
+		{
+			pszTagID = "RQ";
+			break;
+		}
+		case 0x4:
+		{
+			pszTagID = "PU";
+			break;
+		}
+	} /* switch(TagID) */
+
+	*ppszTagID = pszTagID;
+	*ppszTagSB = pszTagSB;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDecodeBIFReqTags
+
+ @Description
+
+ Decode the BIF Tag ID and sideband data fields from BIF_FAULT_BANK_REQ_STATUS regs
+
+ @Input eBankID	 			- BIF identifier
+ @Input ui32TagID           - Tag ID value
+ @Input ui32TagSB           - Tag Sideband data
+ @Output ppszTagID          - Decoded string from the Tag ID
+ @Output ppszTagSB          - Decoded string from the Tag SB
+ @Output pszScratchBuf      - Buffer provided to the function to generate the debug strings
+ @Input ui32ScratchBufSize  - Size of the provided buffer
+
+ @Return   void
+
+******************************************************************************/
+static void _RGXDecodeBIFReqTags(PVRSRV_RGXDEV_INFO	*psDevInfo,
+								 RGXDBG_BIF_ID	eBankID,
+								 IMG_UINT32		ui32TagID,
+								 IMG_UINT32		ui32TagSB,
+								 IMG_CHAR		**ppszTagID,
+								 IMG_CHAR		**ppszTagSB,
+								 IMG_CHAR		*pszScratchBuf,
+								 IMG_UINT32		ui32ScratchBufSize)
+{
+	/* default to unknown */
+	IMG_CHAR *pszTagID = "-";
+	IMG_CHAR *pszTagSB = "-";
+
+	PVR_ASSERT(ppszTagID != NULL);
+	PVR_ASSERT(ppszTagSB != NULL);
+
+	if ((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK) && (eBankID == RGXDBG_DPX_BIF))
+	{
+		_DPXDecodeBIFReqTags(eBankID, ui32TagID, ui32TagSB, ppszTagID, ppszTagSB, pszScratchBuf, ui32ScratchBufSize);
+		return;
+	}
+
+	switch (ui32TagID)
+	{
+		case 0x0:
+		{
+			if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+			{
+				if (eBankID == RGXDBG_BIF0)
+				{
+					pszTagID = "VRDM";
+					switch (ui32TagSB)
+					{
+					case 0x0: pszTagSB = "Control Stream"; break;
+					case 0x1: pszTagSB = "SHF State"; break;
+					case 0x2: pszTagSB = "Index Data"; break;
+					case 0x4: pszTagSB = "Call Stack"; break;
+					case 0x8: pszTagSB = "Context State"; break;
+					}
+				}
+				else
+				{
+					pszTagID = "MMU";
+					switch (ui32TagSB)
+					{
+						case 0x0: pszTagSB = "Table"; break;
+						case 0x1: pszTagSB = "Directory"; break;
+						case 0x2: pszTagSB = "Catalogue"; break;
+					}
+				}
+			}else
+			{
+				pszTagID = "MMU";
+				switch (ui32TagSB)
+				{
+					case 0x0: pszTagSB = "Table"; break;
+					case 0x1: pszTagSB = "Directory"; break;
+					case 0x2: pszTagSB = "Catalogue"; break;
+				}
+			}
+			break;
+		}
+		case 0x1:
+		{
+			pszTagID = "TLA";
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Pixel data"; break;
+				case 0x1: pszTagSB = "Command stream data"; break;
+				case 0x2: pszTagSB = "Fence or flush"; break;
+			}
+			break;
+		}
+		case 0x2:
+		{
+			if ((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK) && (eBankID == RGXDBG_BIF0))
+			{
+				pszTagID = "SHF";
+			}else
+			{
+				pszTagID = "HOST";
+			}
+			break;
+		}
+		case 0x3:
+		{
+			if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+			{
+				if (eBankID == RGXDBG_BIF0)
+				{
+					pszTagID = "SHG";
+				}
+			}
+			else if (0 == (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK))
+			{
+					pszTagID = "META";
+					switch (ui32TagSB)
+					{
+						case 0x0: pszTagSB = "DCache - Thread 0"; break;
+						case 0x1: pszTagSB = "ICache - Thread 0"; break;
+						case 0x2: pszTagSB = "JTag - Thread 0"; break;
+						case 0x3: pszTagSB = "Slave bus - Thread 0"; break;
+						case 0x4: pszTagSB = "DCache - Thread "; break;
+						case 0x5: pszTagSB = "ICache - Thread 1"; break;
+						case 0x6: pszTagSB = "JTag - Thread 1"; break;
+						case 0x7: pszTagSB = "Slave bus - Thread 1"; break;
+					}
+			}
+			else if (psDevInfo->sDevFeatureCfg.ui64ErnsBrns & HW_ERN_57596_BIT_MASK)
+			{
+				pszTagID="TCU";
+			}
+			else
+			{
+				/* Unreachable code */
+				PVR_ASSERT(IMG_FALSE);
+			}
+			break;
+		}
+		case 0x4:
+		{
+			pszTagID = "USC";
+			OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+			           "Cache line %d", (ui32TagSB & 0x3f));
+			pszTagSB = pszScratchBuf;
+			break;
+		}
+		case 0x5:
+		{
+			if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK)
+			{
+				if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+				{
+					if (eBankID == RGXDBG_TEXAS_BIF)
+					{
+						pszTagID = "PBE";
+					}
+					else
+					{
+						pszTagID = "RPM";
+					}
+				}else{
+					pszTagID = "PBE";
+				}
+			}else
+			{
+				pszTagID = "PBE";
+				break;
+			}
+			break;
+		}
+		case 0x6:
+		{
+			if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK)
+			{
+				if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+				{
+					if (eBankID == RGXDBG_TEXAS_BIF)
+					{
+						pszTagID = "ISP";
+						switch (ui32TagSB)
+						{
+							case 0x00: pszTagSB = "ZLS"; break;
+							case 0x20: pszTagSB = "Occlusion Query"; break;
+						}
+					}else
+					{
+						pszTagID = "FBA";
+					}
+				}else
+				{
+					pszTagID = "ISP";
+					switch (ui32TagSB)
+					{
+						case 0x00: pszTagSB = "ZLS"; break;
+						case 0x20: pszTagSB = "Occlusion Query"; break;
+					}
+				}
+			}else
+			{
+				pszTagID = "ISP";
+				switch (ui32TagSB)
+				{
+					case 0x00: pszTagSB = "ZLS"; break;
+					case 0x20: pszTagSB = "Occlusion Query"; break;
+				}
+			}
+			break;
+		}
+		case 0x7:
+		{
+			if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK)
+			{
+				if (eBankID == RGXDBG_TEXAS_BIF)
+				{
+					pszTagID = "IPF";
+					switch (ui32TagSB)
+					{
+						case 0x0: pszTagSB = "CPF"; break;
+						case 0x1: pszTagSB = "DBSC"; break;
+						case 0x2:
+						case 0x4:
+						case 0x6:
+						case 0x8: pszTagSB = "Control Stream"; break;
+						case 0x3:
+						case 0x5:
+						case 0x7:
+						case 0x9: pszTagSB = "Primitive Block"; break;
+					}
+				}
+				else
+				{
+					pszTagID = "IPP";
+					switch (ui32TagSB)
+					{
+						case 0x0: pszTagSB = "Macrotile Header"; break;
+						case 0x1: pszTagSB = "Region Header"; break;
+					}
+				}
+			}
+			else if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_PBE2_IN_XE_BIT_MASK/*RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_BIT_MASK*/)
+			{
+				pszTagID = "IPF";
+				switch (ui32TagSB)
+				{
+					case 0x0: pszTagSB = "Region Header"; break;
+					case 0x1: pszTagSB = "DBSC"; break;
+					case 0x2: pszTagSB = "CPF"; break;
+					case 0x3: pszTagSB = "Control Stream"; break;
+					case 0x4: pszTagSB = "Primitive Block"; break;
+				}
+			}
+			else
+			{
+				pszTagID = "IPF";
+				switch (ui32TagSB)
+				{
+					case 0x0: pszTagSB = "Macrotile Header"; break;
+					case 0x1: pszTagSB = "Region Header"; break;
+					case 0x2: pszTagSB = "DBSC"; break;
+					case 0x3: pszTagSB = "CPF"; break;
+					case 0x4:
+					case 0x6:
+					case 0x8: pszTagSB = "Control Stream"; break;
+					case 0x5:
+					case 0x7:
+					case 0x9: pszTagSB = "Primitive Block"; break;
+				}
+			}
+			break;
+		}
+		case 0x8:
+		{
+			pszTagID = "CDM";
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Control Stream"; break;
+				case 0x1: pszTagSB = "Indirect Data"; break;
+				case 0x2: pszTagSB = "Event Write"; break;
+				case 0x3: pszTagSB = "Context State"; break;
+			}
+			break;
+		}
+		case 0x9:
+		{
+			pszTagID = "VDM";
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Control Stream"; break;
+				case 0x1: pszTagSB = "PPP State"; break;
+				case 0x2: pszTagSB = "Index Data"; break;
+				case 0x4: pszTagSB = "Call Stack"; break;
+				case 0x8: pszTagSB = "Context State"; break;
+			}
+			break;
+		}
+		case 0xA:
+		{
+			pszTagID = "PM";
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "PMA_TAFSTACK"; break;
+				case 0x1: pszTagSB = "PMA_TAMLIST"; break;
+				case 0x2: pszTagSB = "PMA_3DFSTACK"; break;
+				case 0x3: pszTagSB = "PMA_3DMLIST"; break;
+				case 0x4: pszTagSB = "PMA_PMCTX0"; break;
+				case 0x5: pszTagSB = "PMA_PMCTX1"; break;
+				case 0x6: pszTagSB = "PMA_MAVP"; break;
+				case 0x7: pszTagSB = "PMA_UFSTACK"; break;
+				case 0x8: pszTagSB = "PMD_TAFSTACK"; break;
+				case 0x9: pszTagSB = "PMD_TAMLIST"; break;
+				case 0xA: pszTagSB = "PMD_3DFSTACK"; break;
+				case 0xB: pszTagSB = "PMD_3DMLIST"; break;
+				case 0xC: pszTagSB = "PMD_PMCTX0"; break;
+				case 0xD: pszTagSB = "PMD_PMCTX1"; break;
+				case 0xF: pszTagSB = "PMD_UFSTACK"; break;
+				case 0x10: pszTagSB = "PMA_TAMMUSTACK"; break;
+				case 0x11: pszTagSB = "PMA_3DMMUSTACK"; break;
+				case 0x12: pszTagSB = "PMD_TAMMUSTACK"; break;
+				case 0x13: pszTagSB = "PMD_3DMMUSTACK"; break;
+				case 0x14: pszTagSB = "PMA_TAUFSTACK"; break;
+				case 0x15: pszTagSB = "PMA_3DUFSTACK"; break;
+				case 0x16: pszTagSB = "PMD_TAUFSTACK"; break;
+				case 0x17: pszTagSB = "PMD_3DUFSTACK"; break;
+				case 0x18: pszTagSB = "PMA_TAVFP"; break;
+				case 0x19: pszTagSB = "PMD_3DVFP"; break;
+				case 0x1A: pszTagSB = "PMD_TAVFP"; break;
+			}
+			break;
+		}
+		case 0xB:
+		{
+			pszTagID = "TA";
+			switch (ui32TagSB)
+			{
+				case 0x1: pszTagSB = "VCE"; break;
+				case 0x2: pszTagSB = "TPC"; break;
+				case 0x3: pszTagSB = "TE Control Stream"; break;
+				case 0x4: pszTagSB = "TE Region Header"; break;
+				case 0x5: pszTagSB = "TE Render Target Cache"; break;
+				case 0x6: pszTagSB = "TEAC Render Target Cache"; break;
+				case 0x7: pszTagSB = "VCE Render Target Cache"; break;
+				case 0x8: pszTagSB = "PPP Context State"; break;
+			}
+			break;
+		}
+		case 0xC:
+		{
+			pszTagID = "TPF";
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "TPF0: Primitive Block"; break;
+				case 0x1: pszTagSB = "TPF0: Depth Bias"; break;
+				case 0x2: pszTagSB = "TPF0: Per Primitive IDs"; break;
+				case 0x3: pszTagSB = "CPF - Tables"; break;
+				case 0x4: pszTagSB = "TPF1: Primitive Block"; break;
+				case 0x5: pszTagSB = "TPF1: Depth Bias"; break;
+				case 0x6: pszTagSB = "TPF1: Per Primitive IDs"; break;
+				case 0x7: pszTagSB = "CPF - Data: Pipe 0"; break;
+				case 0x8: pszTagSB = "TPF2: Primitive Block"; break;
+				case 0x9: pszTagSB = "TPF2: Depth Bias"; break;
+				case 0xA: pszTagSB = "TPF2: Per Primitive IDs"; break;
+				case 0xB: pszTagSB = "CPF - Data: Pipe 1"; break;
+				case 0xC: pszTagSB = "TPF3: Primitive Block"; break;
+				case 0xD: pszTagSB = "TPF3: Depth Bias"; break;
+				case 0xE: pszTagSB = "TPF3: Per Primitive IDs"; break;
+				case 0xF: pszTagSB = "CPF - Data: Pipe 2"; break;
+			}
+			break;
+		}
+		case 0xD:
+		{
+			pszTagID = "PDS";
+			break;
+		}
+		case 0xE:
+		{
+			pszTagID = "MCU";
+			{
+				IMG_UINT32 ui32Burst = (ui32TagSB >> 5) & 0x7;
+				IMG_UINT32 ui32GroupEnc = (ui32TagSB >> 2) & 0x7;
+				IMG_UINT32 ui32Group = ui32TagSB & 0x3;
+
+				IMG_CHAR* pszBurst = "";
+				IMG_CHAR* pszGroupEnc = "";
+				IMG_CHAR* pszGroup = "";
+
+				switch (ui32Burst)
+				{
+					case 0x0:
+					case 0x1: pszBurst = "128bit word within the Lower 256bits"; break;
+					case 0x2:
+					case 0x3: pszBurst = "128bit word within the Upper 256bits"; break;
+					case 0x4: pszBurst = "Lower 256bits"; break;
+					case 0x5: pszBurst = "Upper 256bits"; break;
+					case 0x6: pszBurst = "512 bits"; break;
+				}
+				switch (ui32GroupEnc)
+				{
+					case 0x0: pszGroupEnc = "TPUA_USC"; break;
+					case 0x1: pszGroupEnc = "TPUB_USC"; break;
+					case 0x2: pszGroupEnc = "USCA_USC"; break;
+					case 0x3: pszGroupEnc = "USCB_USC"; break;
+					case 0x4: pszGroupEnc = "PDS_USC"; break;
+					case 0x5:
+						if(6 > psDevInfo->sDevFeatureCfg.ui32NumClusters)
+						{
+							pszGroupEnc = "PDSRW"; break;
+						}else if(6 == psDevInfo->sDevFeatureCfg.ui32NumClusters)
+						{
+							pszGroupEnc = "UPUC_USC"; break;
+						}
+					case 0x6:
+						if(6 == psDevInfo->sDevFeatureCfg.ui32NumClusters)
+						{
+							pszGroupEnc = "TPUC_USC"; break;
+						}
+					case 0x7:
+						if(6 == psDevInfo->sDevFeatureCfg.ui32NumClusters)
+						{
+							pszGroupEnc = "PDSRW"; break;
+						}
+				}
+				switch (ui32Group)
+				{
+					case 0x0: pszGroup = "Banks 0-3"; break;
+					case 0x1: pszGroup = "Banks 4-7"; break;
+					case 0x2: pszGroup = "Banks 8-11"; break;
+					case 0x3: pszGroup = "Banks 12-15"; break;
+				}
+
+				OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+								"%s, %s, %s", pszBurst, pszGroupEnc, pszGroup);
+				pszTagSB = pszScratchBuf;
+			}
+			break;
+		}
+		case 0xF:
+		{
+			pszTagID = "FB_CDC";
+
+			if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK)
+			{
+				IMG_UINT32 ui32Req   = (ui32TagSB >> 0) & 0xf;
+				IMG_UINT32 ui32MCUSB = (ui32TagSB >> 4) & 0x3;
+				IMG_CHAR* pszReqOrig = "";
+
+				switch (ui32Req)
+				{
+					case 0x0: pszReqOrig = "FBC Request, originator ZLS"; break;
+					case 0x1: pszReqOrig = "FBC Request, originator PBE"; break;
+					case 0x2: pszReqOrig = "FBC Request, originator Host"; break;
+					case 0x3: pszReqOrig = "FBC Request, originator TLA"; break;
+					case 0x4: pszReqOrig = "FBDC Request, originator ZLS"; break;
+					case 0x5: pszReqOrig = "FBDC Request, originator MCU"; break;
+					case 0x6: pszReqOrig = "FBDC Request, originator Host"; break;
+					case 0x7: pszReqOrig = "FBDC Request, originator TLA"; break;
+					case 0x8: pszReqOrig = "FBC Request, originator ZLS Requester Fence"; break;
+					case 0x9: pszReqOrig = "FBC Request, originator PBE Requester Fence"; break;
+					case 0xa: pszReqOrig = "FBC Request, originator Host Requester Fence"; break;
+					case 0xb: pszReqOrig = "FBC Request, originator TLA Requester Fence"; break;
+					case 0xc: pszReqOrig = "Reserved"; break;
+					case 0xd: pszReqOrig = "Reserved"; break;
+					case 0xe: pszReqOrig = "FBDC Request, originator FBCDC(Host) Memory Fence"; break;
+					case 0xf: pszReqOrig = "FBDC Request, originator FBCDC(TLA) Memory Fence"; break;
+				}
+				OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+				           "%s, MCU sideband 0x%X", pszReqOrig, ui32MCUSB);
+				pszTagSB = pszScratchBuf;
+			}
+			else
+			{
+				IMG_UINT32 ui32Req   = (ui32TagSB >> 2) & 0x7;
+				IMG_UINT32 ui32MCUSB = (ui32TagSB >> 0) & 0x3;
+				IMG_CHAR* pszReqOrig = "";
+
+				switch (ui32Req)
+				{
+					case 0x0: pszReqOrig = "FBC Request, originator ZLS";   break;
+					case 0x1: pszReqOrig = "FBC Request, originator PBE";   break;
+					case 0x2: pszReqOrig = "FBC Request, originator Host";  break;
+					case 0x3: pszReqOrig = "FBC Request, originator TLA";   break;
+					case 0x4: pszReqOrig = "FBDC Request, originator ZLS";  break;
+					case 0x5: pszReqOrig = "FBDC Request, originator MCU";  break;
+					case 0x6: pszReqOrig = "FBDC Request, originator Host"; break;
+					case 0x7: pszReqOrig = "FBDC Request, originator TLA";  break;
+				}
+				OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+				           "%s, MCU sideband 0x%X", pszReqOrig, ui32MCUSB);
+				pszTagSB = pszScratchBuf;
+			}
+			break;
+		}
+	} /* switch(TagID) */
+
+	*ppszTagID = pszTagID;
+	*ppszTagSB = pszTagSB;
+}
+
+
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDecodeMMULevel
+
+ @Description
+
+ Return the name for the MMU level that faulted.
+
+ @Input ui32MMULevel	 - MMU level
+
+ @Return   IMG_CHAR* to the sting describing the MMU level that faulted.
+
+******************************************************************************/
+static IMG_CHAR* _RGXDecodeMMULevel(IMG_UINT32 ui32MMULevel)
+{
+	IMG_CHAR* pszMMULevel = "";
+
+	switch (ui32MMULevel)
+	{
+		case 0x0: pszMMULevel = " (Page Table)"; break;
+		case 0x1: pszMMULevel = " (Page Directory)"; break;
+		case 0x2: pszMMULevel = " (Page Catalog)"; break;
+		case 0x3: pszMMULevel = " (Cat Base)"; break;
+	}
+
+	return pszMMULevel;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDecodeMMUReqTags
+
+ @Description
+
+ Decodes the MMU Tag ID and Sideband data fields from RGX_CR_MMU_FAULT_META_STATUS and
+ RGX_CR_MMU_FAULT_STATUS regs.
+
+ @Input ui32TagID           - Tag ID value
+ @Input ui32TagSB           - Tag Sideband data
+ @Input bRead               - Read flag
+ @Output ppszTagID          - Decoded string from the Tag ID
+ @Output ppszTagSB          - Decoded string from the Tag SB
+ @Output pszScratchBuf      - Buffer provided to the function to generate the debug strings
+ @Input ui32ScratchBufSize  - Size of the provided buffer
+
+ @Return   void
+
+******************************************************************************/
+static void _RGXDecodeMMUReqTags(PVRSRV_RGXDEV_INFO    *psDevInfo,
+								IMG_UINT32  ui32TagID,
+								 IMG_UINT32  ui32TagSB,
+								 IMG_BOOL    bRead,
+								 IMG_CHAR    **ppszTagID,
+								 IMG_CHAR    **ppszTagSB,
+								 IMG_CHAR    *pszScratchBuf,
+								 IMG_UINT32  ui32ScratchBufSize)
+{
+	IMG_INT32  i32SideBandType = -1;
+	IMG_CHAR   *pszTagID = "-";
+	IMG_CHAR   *pszTagSB = "-";
+
+	PVR_ASSERT(ppszTagID != NULL);
+	PVR_ASSERT(ppszTagSB != NULL);
+
+
+	switch (ui32TagID)
+	{
+		case  0: pszTagID = "META (Jones)"; i32SideBandType = RGXDBG_META; break;
+		case  1: pszTagID = "TLA (Jones)"; i32SideBandType = RGXDBG_TLA; break;
+		case  2: pszTagID = "DMA (Jones)"; i32SideBandType = RGXDBG_DMA; break;
+		case  3: pszTagID = "VDMM (Jones)"; i32SideBandType = RGXDBG_VDMM; break;
+		case  4: pszTagID = "CDM (Jones)"; i32SideBandType = RGXDBG_CDM; break;
+		case  5: pszTagID = "IPP (Jones)"; i32SideBandType = RGXDBG_IPP; break;
+		case  6: pszTagID = "PM (Jones)"; i32SideBandType = RGXDBG_PM; break;
+		case  7: pszTagID = "Tiling (Jones)"; i32SideBandType = RGXDBG_TILING; break;
+		case  8: pszTagID = "MCU (Texas 0)"; i32SideBandType = RGXDBG_MCU; break;
+		case 12: pszTagID = "VDMS (Black Pearl 0)"; i32SideBandType = RGXDBG_VDMS; break;
+		case 13: pszTagID = "IPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF; break;
+		case 14: pszTagID = "ISP (Black Pearl 0)"; i32SideBandType = RGXDBG_ISP; break;
+		case 15: pszTagID = "TPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF; break;
+		case 16: pszTagID = "USCS (Black Pearl 0)"; i32SideBandType = RGXDBG_USCS; break;
+		case 17: pszTagID = "PPP (Black Pearl 0)"; i32SideBandType = RGXDBG_PPP; break;
+		case 20: pszTagID = "MCU (Texas 1)"; i32SideBandType = RGXDBG_MCU; break;
+		case 24: pszTagID = "MCU (Texas 2)"; i32SideBandType = RGXDBG_MCU; break;
+		case 28: pszTagID = "VDMS (Black Pearl 1)"; i32SideBandType = RGXDBG_VDMS; break;
+		case 29: pszTagID = "IPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF; break;
+		case 30: pszTagID = "ISP (Black Pearl 1)"; i32SideBandType = RGXDBG_ISP; break;
+		case 31: pszTagID = "TPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF; break;
+		case 32: pszTagID = "USCS (Black Pearl 1)"; i32SideBandType = RGXDBG_USCS; break;
+		case 33: pszTagID = "PPP (Black Pearl 1)"; i32SideBandType = RGXDBG_PPP; break;
+		case 36: pszTagID = "MCU (Texas 3)"; i32SideBandType = RGXDBG_MCU; break;
+		case 40: pszTagID = "MCU (Texas 4)"; i32SideBandType = RGXDBG_MCU; break;
+		case 44: pszTagID = "VDMS (Black Pearl 2)"; i32SideBandType = RGXDBG_VDMS; break;
+		case 45: pszTagID = "IPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF; break;
+		case 46: pszTagID = "ISP (Black Pearl 2)"; i32SideBandType = RGXDBG_ISP; break;
+		case 47: pszTagID = "TPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF; break;
+		case 48: pszTagID = "USCS (Black Pearl 2)"; i32SideBandType = RGXDBG_USCS; break;
+		case 49: pszTagID = "PPP (Black Pearl 2)"; i32SideBandType = RGXDBG_PPP; break;
+		case 52: pszTagID = "MCU (Texas 5)"; i32SideBandType = RGXDBG_MCU; break;
+		case 56: pszTagID = "MCU (Texas 6)"; i32SideBandType = RGXDBG_MCU; break;
+		case 60: pszTagID = "VDMS (Black Pearl 3)"; i32SideBandType = RGXDBG_VDMS; break;
+		case 61: pszTagID = "IPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF; break;
+		case 62: pszTagID = "ISP (Black Pearl 3)"; i32SideBandType = RGXDBG_ISP; break;
+		case 63: pszTagID = "TPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF; break;
+		case 64: pszTagID = "USCS (Black Pearl 3)"; i32SideBandType = RGXDBG_USCS; break;
+		case 65: pszTagID = "PPP (Black Pearl 3)"; i32SideBandType = RGXDBG_PPP; break;
+		case 68: pszTagID = "MCU (Texas 7)"; i32SideBandType = RGXDBG_MCU; break;
+	}
+	if(('-' == pszTagID[0]) && '\n' == pszTagID[1])
+	{
+
+		if((psDevInfo->sDevFeatureCfg.ui64ErnsBrns & HW_ERN_50539_BIT_MASK) || \
+				(psDevInfo->sDevFeatureCfg.ui32FBCDCArch >= 3))
+		{
+			switch(ui32TagID)
+			{
+			case 18: pszTagID = "TPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+			case 19: pszTagID = "IPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+			case 34: pszTagID = "TPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+			case 35: pszTagID = "IPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+			case 50: pszTagID = "TPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+			case 51: pszTagID = "IPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+			case 66: pszTagID = "TPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+			case 67: pszTagID = "IPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+			}
+
+			if(psDevInfo->sDevFeatureCfg.ui64ErnsBrns & HW_ERN_50539_BIT_MASK)
+			{
+				switch(ui32TagID)
+				{
+				case 9:	pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+				case 10: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break;
+				case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 21: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+				case 22: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break;
+				case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 25: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+				case 26: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break;
+				case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 37: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+				case 38: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break;
+				case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 41: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+				case 42: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break;
+				case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 53: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+				case 54: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break;
+				case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 57: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+				case 58: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break;
+				case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 69: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+				case 70: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break;
+				case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break;
+				}
+			}else
+			{
+				switch(ui32TagID)
+				{
+				case 9:	pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break;
+				case 10: pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+				case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break;
+				case 22: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+				case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break;
+				case 26: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+				case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break;
+				case 38: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+				case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break;
+				case 42: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+				case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break;
+				case 54: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+				case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break;
+				case 58: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+				case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break;
+				case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break;
+				case 70: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+				case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break;
+				}
+			}
+		}else
+		{
+			switch(ui32TagID)
+			{
+			case 9:	pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break;
+			case 10: pszTagID = "PBE0 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+			case 11: pszTagID = "PBE1 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+			case 18: pszTagID = "VCE (Black Pearl 0)"; i32SideBandType = RGXDBG_VCE; break;
+			case 19: pszTagID = "FBCDC (Black Pearl 0)"; i32SideBandType = RGXDBG_FBCDC; break;
+			case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break;
+			case 22: pszTagID = "PBE0 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+			case 23: pszTagID = "PBE1 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+			case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break;
+			case 26: pszTagID = "PBE0 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+			case 27: pszTagID = "PBE1 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+			case 34: pszTagID = "VCE (Black Pearl 1)"; i32SideBandType = RGXDBG_VCE; break;
+			case 35: pszTagID = "FBCDC (Black Pearl 1)"; i32SideBandType = RGXDBG_FBCDC; break;
+			case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break;
+			case 38: pszTagID = "PBE0 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+			case 39: pszTagID = "PBE1 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+			case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break;
+			case 42: pszTagID = "PBE0 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+			case 43: pszTagID = "PBE1 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+			case 50: pszTagID = "VCE (Black Pearl 2)"; i32SideBandType = RGXDBG_VCE; break;
+			case 51: pszTagID = "FBCDC (Black Pearl 2)"; i32SideBandType = RGXDBG_FBCDC; break;
+			case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break;
+			case 54: pszTagID = "PBE0 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+			case 55: pszTagID = "PBE1 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+			case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break;
+			case 58: pszTagID = "PBE0 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+			case 59: pszTagID = "PBE1 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+			case 66: pszTagID = "VCE (Black Pearl 3)"; i32SideBandType = RGXDBG_VCE; break;
+			case 67: pszTagID = "FBCDC (Black Pearl 3)"; i32SideBandType = RGXDBG_FBCDC; break;
+			case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break;
+			case 70: pszTagID = "PBE0 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+			case 71: pszTagID = "PBE1 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+			}
+		}
+
+	}
+
+	switch (i32SideBandType)
+	{
+		case RGXDBG_META:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "DCache - Thread 0"; break;
+				case 0x1: pszTagSB = "ICache - Thread 0"; break;
+				case 0x2: pszTagSB = "JTag - Thread 0"; break;
+				case 0x3: pszTagSB = "Slave bus - Thread 0"; break;
+				case 0x4: pszTagSB = "DCache - Thread 1"; break;
+				case 0x5: pszTagSB = "ICache - Thread 1"; break;
+				case 0x6: pszTagSB = "JTag - Thread 1"; break;
+				case 0x7: pszTagSB = "Slave bus - Thread 1"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_TLA:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Pixel data"; break;
+				case 0x1: pszTagSB = "Command stream data"; break;
+				case 0x2: pszTagSB = "Fence or flush"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_VDMM:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Control Stream - Read Only"; break;
+				case 0x1: pszTagSB = "PPP State - Read Only"; break;
+				case 0x2: pszTagSB = "Indices - Read Only"; break;
+				case 0x4: pszTagSB = "Call Stack - Read/Write"; break;
+				case 0x6: pszTagSB = "DrawIndirect - Read Only"; break;
+				case 0xA: pszTagSB = "Context State - Write Only"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_CDM:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Control Stream"; break;
+				case 0x1: pszTagSB = "Indirect Data"; break;
+				case 0x2: pszTagSB = "Event Write"; break;
+				case 0x3: pszTagSB = "Context State"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_IPP:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Macrotile Header"; break;
+				case 0x1: pszTagSB = "Region Header"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_PM:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "PMA_TAFSTACK"; break;
+				case 0x1: pszTagSB = "PMA_TAMLIST"; break;
+				case 0x2: pszTagSB = "PMA_3DFSTACK"; break;
+				case 0x3: pszTagSB = "PMA_3DMLIST"; break;
+				case 0x4: pszTagSB = "PMA_PMCTX0"; break;
+				case 0x5: pszTagSB = "PMA_PMCTX1"; break;
+				case 0x6: pszTagSB = "PMA_MAVP"; break;
+				case 0x7: pszTagSB = "PMA_UFSTACK"; break;
+				case 0x8: pszTagSB = "PMD_TAFSTACK"; break;
+				case 0x9: pszTagSB = "PMD_TAMLIST"; break;
+				case 0xA: pszTagSB = "PMD_3DFSTACK"; break;
+				case 0xB: pszTagSB = "PMD_3DMLIST"; break;
+				case 0xC: pszTagSB = "PMD_PMCTX0"; break;
+				case 0xD: pszTagSB = "PMD_PMCTX1"; break;
+				case 0xF: pszTagSB = "PMD_UFSTACK"; break;
+				case 0x10: pszTagSB = "PMA_TAMMUSTACK"; break;
+				case 0x11: pszTagSB = "PMA_3DMMUSTACK"; break;
+				case 0x12: pszTagSB = "PMD_TAMMUSTACK"; break;
+				case 0x13: pszTagSB = "PMD_3DMMUSTACK"; break;
+				case 0x14: pszTagSB = "PMA_TAUFSTACK"; break;
+				case 0x15: pszTagSB = "PMA_3DUFSTACK"; break;
+				case 0x16: pszTagSB = "PMD_TAUFSTACK"; break;
+				case 0x17: pszTagSB = "PMD_3DUFSTACK"; break;
+				case 0x18: pszTagSB = "PMA_TAVFP"; break;
+				case 0x19: pszTagSB = "PMD_3DVFP"; break;
+				case 0x1A: pszTagSB = "PMD_TAVFP"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_TILING:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "PSG Control Stream TP0"; break;
+				case 0x1: pszTagSB = "TPC TP0"; break;
+				case 0x2: pszTagSB = "VCE0"; break;
+				case 0x3: pszTagSB = "VCE1"; break;
+				case 0x4: pszTagSB = "PSG Control Stream TP1"; break;
+				case 0x5: pszTagSB = "TPC TP1"; break;
+				case 0x8: pszTagSB = "PSG Region Header TP0"; break;
+				case 0xC: pszTagSB = "PSG Region Header TP1"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_VDMS:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Context State - Write Only"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_IPF:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x00:
+				case 0x20: pszTagSB = "CPF"; break;
+				case 0x01: pszTagSB = "DBSC"; break;
+				case 0x02:
+				case 0x04:
+				case 0x06:
+				case 0x08:
+				case 0x0A:
+				case 0x0C:
+				case 0x0E:
+				case 0x10: pszTagSB = "Control Stream"; break;
+				case 0x03:
+				case 0x05:
+				case 0x07:
+				case 0x09:
+				case 0x0B:
+				case 0x0D:
+				case 0x0F:
+				case 0x11: pszTagSB = "Primitive Block"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_ISP:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x00: pszTagSB = "ZLS read/write"; break;
+				case 0x20: pszTagSB = "Occlusion query read/write"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_TPF:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "TPF0: Primitive Block"; break;
+				case 0x1: pszTagSB = "TPF0: Depth Bias"; break;
+				case 0x2: pszTagSB = "TPF0: Per Primitive IDs"; break;
+				case 0x3: pszTagSB = "CPF - Tables"; break;
+				case 0x4: pszTagSB = "TPF1: Primitive Block"; break;
+				case 0x5: pszTagSB = "TPF1: Depth Bias"; break;
+				case 0x6: pszTagSB = "TPF1: Per Primitive IDs"; break;
+				case 0x7: pszTagSB = "CPF - Data: Pipe 0"; break;
+				case 0x8: pszTagSB = "TPF2: Primitive Block"; break;
+				case 0x9: pszTagSB = "TPF2: Depth Bias"; break;
+				case 0xA: pszTagSB = "TPF2: Per Primitive IDs"; break;
+				case 0xB: pszTagSB = "CPF - Data: Pipe 1"; break;
+				case 0xC: pszTagSB = "TPF3: Primitive Block"; break;
+				case 0xD: pszTagSB = "TPF3: Depth Bias"; break;
+				case 0xE: pszTagSB = "TPF3: Per Primitive IDs"; break;
+				case 0xF: pszTagSB = "CPF - Data: Pipe 2"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_FBCDC:
+		{
+			/*
+			 * FBC faults on a 4-cluster phantom does not always set SB
+			 * bit 5, but since FBC is write-only and FBDC is read-only,
+			 * we can set bit 5 if this is a write fault, before decoding.
+			 */
+			if (bRead == IMG_FALSE)
+			{
+				ui32TagSB |= 0x20;
+			}
+
+			switch (ui32TagSB)
+			{
+				case 0x00: pszTagSB = "FBDC Request, originator ZLS"; break;
+				case 0x02: pszTagSB = "FBDC Request, originator MCU Dust 0"; break;
+				case 0x03: pszTagSB = "FBDC Request, originator MCU Dust 1"; break;
+				case 0x20: pszTagSB = "FBC Request, originator ZLS"; break;
+				case 0x22: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 0"; break;
+				case 0x23: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 1"; break;
+				case 0x24: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 0"; break;
+				case 0x25: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 1"; break;
+				case 0x28: pszTagSB = "FBC Request, originator ZLS Fence"; break;
+				case 0x2a: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 0, Fence"; break;
+				case 0x2b: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 1, Fence"; break;
+				case 0x2c: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 0, Fence"; break;
+				case 0x2d: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 1, Fence"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_MCU:
+		{
+			IMG_UINT32 ui32SetNumber = (ui32TagSB >> 5) & 0x7;
+			IMG_UINT32 ui32WayNumber = (ui32TagSB >> 2) & 0x7;
+			IMG_UINT32 ui32Group     = ui32TagSB & 0x3;
+
+			IMG_CHAR* pszGroup = "";
+
+			switch (ui32Group)
+			{
+				case 0x0: pszGroup = "Banks 0-1"; break;
+				case 0x1: pszGroup = "Banks 2-3"; break;
+				case 0x2: pszGroup = "Banks 4-5"; break;
+				case 0x3: pszGroup = "Banks 6-7"; break;
+			}
+
+			OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+			           "Set=%d, Way=%d, %s", ui32SetNumber, ui32WayNumber, pszGroup);
+			pszTagSB = pszScratchBuf;
+			break;
+		}
+
+		default:
+		{
+			OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "SB=0x%02x", ui32TagSB);
+			pszTagSB = pszScratchBuf;
+			break;
+		}
+	}
+
+	*ppszTagID = pszTagID;
+	*ppszTagSB = pszTagSB;
+}
+
+static void ConvertOSTimestampToSAndNS(IMG_UINT64 ui64OSTimer,
+							IMG_UINT64 *pui64Seconds,
+							IMG_UINT64 *pui64Nanoseconds)
+{
+	IMG_UINT32 ui32Remainder;
+
+	*pui64Seconds = OSDivide64r64(ui64OSTimer, 1000000000, &ui32Remainder);
+	*pui64Nanoseconds = ui64OSTimer - (*pui64Seconds * 1000000000ULL);
+}
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+
+typedef enum _DEVICEMEM_HISTORY_QUERY_INDEX_
+{
+	DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING,
+	DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED,
+	DEVICEMEM_HISTORY_QUERY_INDEX_NEXT,
+	DEVICEMEM_HISTORY_QUERY_INDEX_COUNT,
+} DEVICEMEM_HISTORY_QUERY_INDEX;
+
+/*!
+*******************************************************************************
+
+ @Function	_PrintDevicememHistoryQueryResult
+
+ @Description
+
+ Print details of a single result from a DevicememHistory query
+
+ @Input pfnDumpDebugPrintf       - Debug printf function
+ @Input pvDumpDebugFile          - Optional file identifier to be passed to the
+                                   'printf' function if required
+ @Input psFaultProcessInfo       - The process info derived from the page fault
+ @Input psResult                 - The DevicememHistory result to be printed
+ @Input ui32Index                - The index of the result
+
+ @Return   void
+
+******************************************************************************/
+static void _PrintDevicememHistoryQueryResult(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+						void *pvDumpDebugFile,
+						RGXMEM_PROCESS_INFO *psFaultProcessInfo,
+						DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult,
+						IMG_UINT32 ui32Index)
+{
+	IMG_UINT32 ui32Remainder;
+	IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+	ConvertOSTimestampToSAndNS(psResult->ui64When,
+							&ui64Seconds,
+							&ui64Nanoseconds);
+
+	if(psFaultProcessInfo->uiPID != RGXMEM_SERVER_PID_FIRMWARE)
+	{
+		PVR_DUMPDEBUG_LOG("  [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC
+					" Size: " IMG_DEVMEM_SIZE_FMTSPEC
+					" Operation: %s Modified: %llu us ago (OS time %llu.%09llu s)",
+										ui32Index,
+										psResult->szString,
+						(unsigned long long) psResult->sBaseDevVAddr.uiAddr,
+						(unsigned long long) psResult->uiSize,
+						psResult->bMap ? "Map": "Unmap",
+						(unsigned long long) OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder),
+						(unsigned long long) ui64Seconds,
+						(unsigned long long) ui64Nanoseconds);
+	}
+	else
+	{
+		PVR_DUMPDEBUG_LOG("  [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC
+					" Size: " IMG_DEVMEM_SIZE_FMTSPEC
+					" Operation: %s Modified: %llu us ago (OS time  %llu.%09llu) PID: %u (%s)",
+										ui32Index,
+										psResult->szString,
+						(unsigned long long) psResult->sBaseDevVAddr.uiAddr,
+						(unsigned long long) psResult->uiSize,
+						psResult->bMap ? "Map": "Unmap",
+						(unsigned long long) OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder),
+						(unsigned long long) ui64Seconds,
+						(unsigned long long) ui64Nanoseconds,
+						(unsigned int) psResult->sProcessInfo.uiPID,
+						psResult->sProcessInfo.szProcessName);
+	}
+
+	if(!psResult->bRange)
+	{
+		PVR_DUMPDEBUG_LOG("      Whole allocation was %s", psResult->bMap ? "mapped": "unmapped");
+	}
+	else
+	{
+		PVR_DUMPDEBUG_LOG("      Pages %u to %u (" IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC ") %s%s",
+										psResult->ui32StartPage,
+										psResult->ui32StartPage + psResult->ui32PageCount - 1,
+										psResult->sMapStartAddr.uiAddr,
+										psResult->sMapEndAddr.uiAddr,
+										psResult->bAll ? "(whole allocation) " : "",
+										psResult->bMap ? "mapped": "unmapped");
+	}
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_PrintDevicememHistoryQueryOut
+
+ @Description
+
+ Print details of all the results from a DevicememHistory query
+
+ @Input pfnDumpDebugPrintf       - Debug printf function
+ @Input pvDumpDebugFile          - Optional file identifier to be passed to the
+                                   'printf' function if required
+ @Input psFaultProcessInfo       - The process info derived from the page fault
+ @Input psQueryOut               - Storage for the query results
+
+ @Return   void
+
+******************************************************************************/
+static void _PrintDevicememHistoryQueryOut(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+						void *pvDumpDebugFile,
+						RGXMEM_PROCESS_INFO *psFaultProcessInfo,
+						DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut)
+{
+	IMG_UINT32 i;
+
+	if(psQueryOut->ui32NumResults == 0)
+	{
+		PVR_DUMPDEBUG_LOG("  No results");
+	}
+	else
+	{
+		for(i = 0; i < psQueryOut->ui32NumResults; i++)
+		{
+			_PrintDevicememHistoryQueryResult(pfnDumpDebugPrintf, pvDumpDebugFile,
+									psFaultProcessInfo,
+									&psQueryOut->sResults[i],
+									i);
+		}
+	}
+}
+
+/* table of HW page size values and the equivalent */
+static const unsigned int aui32HWPageSizeTable[][2] =
+{
+	{ 0, PVRSRV_4K_PAGE_SIZE },
+	{ 1, PVRSRV_16K_PAGE_SIZE },
+	{ 2, PVRSRV_64K_PAGE_SIZE },
+	{ 3, PVRSRV_256K_PAGE_SIZE },
+	{ 4, PVRSRV_1M_PAGE_SIZE },
+	{ 5, PVRSRV_2M_PAGE_SIZE }
+};
+
+/*!
+*******************************************************************************
+
+ @Function	_PageSizeHWToBytes
+
+ @Description
+
+ Convert a HW page size value to its size in bytes
+
+ @Input ui32PageSizeHW     - The HW page size value
+
+ @Return   IMG_UINT32      The page size in bytes
+
+******************************************************************************/
+static IMG_UINT32 _PageSizeHWToBytes(IMG_UINT32 ui32PageSizeHW)
+{
+	if (ui32PageSizeHW > 5)
+	{
+		/* This is invalid, so return a default value as we cannot ASSERT in this code! */
+		return PVRSRV_4K_PAGE_SIZE;
+	}
+
+	return aui32HWPageSizeTable[ui32PageSizeHW][1];
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_GetDevicememHistoryData
+
+ @Description
+
+ Get the DevicememHistory results for the given PID and faulting device virtual address.
+ The function will query DevicememHistory for information about the faulting page, as well
+ as the page before and after.
+
+ @Input uiPID              - The process ID to search for allocations belonging to
+ @Input sFaultDevVAddr     - The device address to search for allocations at/before/after
+ @Input asQueryOut         - Storage for the query results
+ @Input ui32PageSizeBytes  - Faulted page size in bytes
+
+ @Return IMG_BOOL          - IMG_TRUE if any results were found for this page fault
+
+******************************************************************************/
+static IMG_BOOL _GetDevicememHistoryData(IMG_PID uiPID, IMG_DEV_VIRTADDR sFaultDevVAddr,
+							DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT],
+							IMG_UINT32 ui32PageSizeBytes)
+{
+	IMG_UINT32 i;
+	DEVICEMEM_HISTORY_QUERY_IN sQueryIn;
+	IMG_BOOL bAnyHits = IMG_FALSE;
+
+	/* if the page fault originated in the firmware then the allocation may
+	 * appear to belong to any PID, because FW allocations are attributed
+	 * to the client process creating the allocation, so instruct the
+	 * devicemem_history query to search all available PIDs
+	 */
+	if(uiPID == RGXMEM_SERVER_PID_FIRMWARE)
+	{
+		sQueryIn.uiPID = DEVICEMEM_HISTORY_PID_ANY;
+	}
+	else
+	{
+		sQueryIn.uiPID = uiPID;
+	}
+
+	/* query the DevicememHistory about the preceding / faulting / next page */
+
+	for(i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++)
+	{
+		IMG_BOOL bHits;
+
+		switch(i)
+		{
+			case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING:
+				sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) - 1;
+				break;
+			case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED:
+				sQueryIn.sDevVAddr = sFaultDevVAddr;
+				break;
+			case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT:
+				sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) + ui32PageSizeBytes;
+				break;
+		}
+
+		/* First try matching any record at the exact address... */
+		bHits = DevicememHistoryQuery(&sQueryIn, &asQueryOut[i], ui32PageSizeBytes, IMG_FALSE);
+		if (!bHits)
+		{
+			/* If not matched then try matching any record in the same page... */
+			bHits = DevicememHistoryQuery(&sQueryIn, &asQueryOut[i], ui32PageSizeBytes, IMG_TRUE);
+		}
+
+		if(bHits)
+		{
+			bAnyHits = IMG_TRUE;
+		}
+	}
+
+	return bAnyHits;
+}
+
+/* stored data about one page fault */
+typedef struct _FAULT_INFO_
+{
+	/* the process info of the memory context that page faulted */
+	RGXMEM_PROCESS_INFO sProcessInfo;
+	IMG_DEV_VIRTADDR sFaultDevVAddr;
+	DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT];
+	/* the CR timer value at the time of the fault, recorded by the FW.
+	 * used to differentiate different page faults
+	 */
+	IMG_UINT64 ui64CRTimer;
+	/* time when this FAULT_INFO entry was added. used for timing
+	 * reference against the map/unmap information
+	 */
+	IMG_UINT64 ui64When;
+} FAULT_INFO;
+
+/* history list of page faults.
+ * Keeps the first `n` page faults and the last `n` page faults, like the FW
+ * HWR log
+ */
+typedef struct _FAULT_INFO_LOG_
+{
+	IMG_UINT32 ui32Head;
+	IMG_UINT32 ui32NumWrites;
+	/* the number of faults in this log need not correspond exactly to
+	 * the HWINFO number of the FW, as the FW HWINFO log may contain
+	 * non-page fault HWRs
+	 */
+	FAULT_INFO asFaults[RGXFWIF_HWINFO_MAX];
+} FAULT_INFO_LOG;
+
+static FAULT_INFO_LOG gsFaultInfoLog = { 0 };
+
+/*!
+*******************************************************************************
+
+ @Function	_QueryFaultInfo
+
+ @Description
+
+ Searches the local list of previously analysed page faults to see if the given
+ fault has already been analysed and if so, returns a pointer to the analysis
+ object (FAULT_INFO *), otherwise returns NULL.
+
+ @Input pfnDumpDebugPrintf       - The debug printf function
+ @Input pvDumpDebugFile          - Optional file identifier to be passed to the
+                                   'printf' function if required
+ @Input sFaultDevVAddr           - The faulting device virtual address
+ @Input ui64CRTimer              - The CR timer value recorded by the FW at the time of the fault
+
+ @Return   FAULT_INFO* Pointer to an existing fault analysis structure if found, otherwise NULL
+
+******************************************************************************/
+static FAULT_INFO *_QueryFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile,
+					IMG_DEV_VIRTADDR sFaultDevVAddr,
+					IMG_UINT64 ui64CRTimer)
+{
+	IMG_UINT32 i;
+
+	for(i = 0; i < MIN(gsFaultInfoLog.ui32NumWrites, RGXFWIF_HWINFO_MAX); i++)
+	{
+		if((gsFaultInfoLog.asFaults[i].ui64CRTimer == ui64CRTimer) &&
+			(gsFaultInfoLog.asFaults[i].sFaultDevVAddr.uiAddr == sFaultDevVAddr.uiAddr))
+			{
+				return &gsFaultInfoLog.asFaults[i];
+			}
+	}
+
+	return NULL;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	__AcquireNextFaultInfoElement
+
+ @Description
+
+ Gets a pointer to the next element in the fault info log
+ (requires the fault info lock be held)
+
+
+ @Return   FAULT_INFO* Pointer to the next record for writing
+
+******************************************************************************/
+
+static FAULT_INFO *_AcquireNextFaultInfoElement(void)
+{
+	IMG_UINT32 ui32Head = gsFaultInfoLog.ui32Head;
+	FAULT_INFO *psInfo = &gsFaultInfoLog.asFaults[ui32Head];
+
+	return psInfo;
+}
+
+static void _CommitFaultInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+							FAULT_INFO *psInfo,
+							RGXMEM_PROCESS_INFO *psProcessInfo,
+							IMG_DEV_VIRTADDR sFaultDevVAddr,
+							IMG_UINT64 ui64CRTimer)
+{
+	IMG_UINT32 i, j;
+
+	/* commit the page fault details */
+
+	psInfo->sProcessInfo = *psProcessInfo;
+	psInfo->sFaultDevVAddr = sFaultDevVAddr;
+	psInfo->ui64CRTimer = ui64CRTimer;
+	psInfo->ui64When = OSClockns64();
+
+	/* if the page fault was caused by the firmware then get information about
+	 * which client application created the related allocations.
+	 *
+	 * Fill in the process info data for each query result.
+	 */
+
+	if(psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE)
+	{
+		for(i = 0; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++)
+		{
+			for(j = 0; j < DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS; j++)
+			{
+				IMG_BOOL bFound;
+
+				RGXMEM_PROCESS_INFO *psProcInfo = &psInfo->asQueryOut[i].sResults[j].sProcessInfo;
+				bFound = RGXPCPIDToProcessInfo(psDevInfo,
+									psProcInfo->uiPID,
+									psProcInfo);
+				if(!bFound)
+				{
+					OSStringNCopy(psProcInfo->szProcessName,
+									"(unknown)",
+									sizeof(psProcInfo->szProcessName) - 1);
+					psProcInfo->szProcessName[sizeof(psProcInfo->szProcessName) - 1] = '\0';
+				}
+			}
+		}
+	}
+
+	/* assert the faults circular buffer hasn't been moving and
+	 * move the head along
+	 */
+
+	PVR_ASSERT(psInfo == &gsFaultInfoLog.asFaults[gsFaultInfoLog.ui32Head]);
+
+	if(gsFaultInfoLog.ui32Head < RGXFWIF_HWINFO_MAX - 1)
+	{
+		gsFaultInfoLog.ui32Head++;
+	}
+	else
+	{
+		/* wrap back to the first of the 'LAST' entries */
+		gsFaultInfoLog.ui32Head = RGXFWIF_HWINFO_MAX_FIRST;
+	}
+
+	gsFaultInfoLog.ui32NumWrites++;
+
+
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_PrintFaultInfo
+
+ @Description
+
+ Print all the details of a page fault from a FAULT_INFO structure
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psInfo               - The page fault occurrence to print
+ @Input pui32Index           - (optional) index value to include in the print output
+
+ @Return   void
+
+******************************************************************************/
+static void _PrintFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile,
+					FAULT_INFO *psInfo,
+					const IMG_UINT32 *pui32Index)
+{
+	IMG_UINT32 i;
+	IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+	IMG_PID uiPID;
+
+	uiPID = (psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE) ? 0 : psInfo->sProcessInfo.uiPID;
+
+	ConvertOSTimestampToSAndNS(psInfo->ui64When, &ui64Seconds, &ui64Nanoseconds);
+
+	if(pui32Index)
+	{
+		PVR_DUMPDEBUG_LOG("(%u) Device memory history for page fault address 0x%010llX, CRTimer: 0x%016llX, "
+							"PID: %u (%s, unregistered: %u) OS time: %llu.%09llu",
+					*pui32Index,
+					(unsigned long long) psInfo->sFaultDevVAddr.uiAddr,
+					psInfo->ui64CRTimer,
+					(unsigned int) uiPID,
+					psInfo->sProcessInfo.szProcessName,
+					psInfo->sProcessInfo.bUnregistered,
+					(unsigned long long) ui64Seconds,
+					(unsigned long long) ui64Nanoseconds);
+	}
+	else
+	{
+		PVR_DUMPDEBUG_LOG("Device memory history for page fault address 0x%010llX, PID: %u "
+							"(%s, unregistered: %u) OS time: %llu.%09llu",
+					(unsigned long long) psInfo->sFaultDevVAddr.uiAddr,
+					(unsigned int) uiPID,
+					psInfo->sProcessInfo.szProcessName,
+					psInfo->sProcessInfo.bUnregistered,
+					(unsigned long long) ui64Seconds,
+					(unsigned long long) ui64Nanoseconds);
+	}
+
+	for(i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++)
+	{
+		const IMG_CHAR *pszWhich;
+
+		switch(i)
+		{
+			case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING:
+				pszWhich = "Preceding page";
+				break;
+			case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED:
+				pszWhich = "Faulted page";
+				break;
+			case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT:
+				pszWhich = "Next page";
+				break;
+		}
+
+		PVR_DUMPDEBUG_LOG("%s:", pszWhich);
+		_PrintDevicememHistoryQueryOut(pfnDumpDebugPrintf, pvDumpDebugFile,
+							&psInfo->sProcessInfo,
+							&psInfo->asQueryOut[i]);
+	}
+}
+
+#endif
+
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDumpRGXBIFBank
+
+ @Description
+
+ Dump BIF Bank state in human readable form.
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psDevInfo            - RGX device info
+ @Input eBankID              - BIF identifier
+ @Input ui64MMUStatus        - MMU Status register value
+ @Input ui64ReqStatus        - BIF request Status register value
+ @Input ui64PCAddress        - Page catalogue base address of faulting access
+ @Input ui64CRTimer          - RGX CR timer value at time of page fault
+ @Input bSummary             - Flag to check whether the function is called
+                                as a part of the debug dump summary or
+                                as a part of a HWR log
+ @Return   void
+
+******************************************************************************/
+static void _RGXDumpRGXBIFBank(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile,
+					PVRSRV_RGXDEV_INFO *psDevInfo,
+					RGXDBG_BIF_ID eBankID,
+					IMG_UINT64 ui64MMUStatus,
+					IMG_UINT64 ui64ReqStatus,
+					IMG_UINT64 ui64PCAddress,
+					IMG_UINT64 ui64CRTimer,
+					IMG_BOOL bSummary)
+{
+	IMG_CHAR  *pszIndent = (bSummary ? "" : "    ");
+
+	if (ui64MMUStatus == 0x0)
+	{
+		PVR_DUMPDEBUG_LOG("%s - OK", pszBIFNames[eBankID]);
+	}
+	else
+	{
+		IMG_DEV_VIRTADDR sFaultDevVAddr;
+		IMG_DEV_PHYADDR sPCDevPAddr = { 0 };
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+		IMG_BOOL bFound = IMG_FALSE;
+		RGXMEM_PROCESS_INFO sProcessInfo;
+		IMG_UINT32 ui32PageSizeBytes;
+		FAULT_INFO *psInfo;
+#endif
+		/* Bank 0 & 1 share the same fields */
+		PVR_DUMPDEBUG_LOG("%s%s - FAULT:",
+						  pszIndent,
+						  pszBIFNames[eBankID]);
+
+		/* MMU Status */
+		{
+			IMG_UINT32 ui32PC =
+				(ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >>
+					RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT;
+
+			IMG_UINT32 ui32PageSize =
+				(ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >>
+					RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT;
+
+			IMG_UINT32 ui32MMUDataType =
+				(ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK) >>
+					RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT;
+
+			IMG_BOOL bROFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN) != 0;
+			IMG_BOOL bProtFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN) != 0;
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+			ui32PageSizeBytes = _PageSizeHWToBytes(ui32PageSize);
+#endif
+
+			PVR_DUMPDEBUG_LOG("%s  * MMU status (0x%016" IMG_UINT64_FMTSPECX "): PC = %d%s, Page Size = %d, MMU data type = %d%s%s.",
+			                  pszIndent,
+							  ui64MMUStatus,
+			                  ui32PC,
+			                  (ui32PC < 0x8)?"":_RGXDecodePMPC(ui32PC),
+			                  ui32PageSize,
+			                  ui32MMUDataType,
+			                  (bROFault)?", Read Only fault":"",
+			                  (bProtFault)?", PM/META protection fault":"");
+		}
+
+		/* Req Status */
+		{
+			IMG_CHAR *pszTagID;
+			IMG_CHAR *pszTagSB;
+			IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE];
+
+			IMG_BOOL bRead = (ui64ReqStatus & RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN) != 0;
+			IMG_UINT32 ui32TagSB =
+				(ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK) >>
+					RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT;
+			IMG_UINT32 ui32TagID =
+				(ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK) >>
+							RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT;
+			IMG_UINT64 ui64Addr = ((ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK) >>
+							RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT) <<
+							RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT;
+
+			/* RNW bit offset is different. The TAG_SB, TAG_ID and address fields are the same. */
+			if( (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK) && (eBankID == RGXDBG_DPX_BIF))
+			{
+				bRead = (ui64ReqStatus & DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_EN) != 0;
+			}
+
+			_RGXDecodeBIFReqTags(psDevInfo, eBankID, ui32TagID, ui32TagSB, &pszTagID, &pszTagSB, &aszScratch[0], RGX_DEBUG_STR_SIZE);
+
+			PVR_DUMPDEBUG_LOG("%s  * Request (0x%016" IMG_UINT64_FMTSPECX
+						"): %s (%s), %s " IMG_DEV_VIRTADDR_FMTSPEC ".",
+							  pszIndent,
+							  ui64ReqStatus,
+			                  pszTagID,
+			                  pszTagSB,
+			                  (bRead)?"Reading from":"Writing to",
+			                  ui64Addr);
+		}
+
+		/* Check if the host thinks this fault is valid */
+
+		sFaultDevVAddr.uiAddr = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK);
+
+		if (bSummary)
+		{
+			IMG_UINT32 ui32PC =
+				(ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >>
+					RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT;
+
+			/* Only the first 8 cat bases are application memory contexts which we can validate... */
+			if (ui32PC < 8)
+			{
+				sPCDevPAddr.uiAddr = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_CAT_BASEN(ui32PC));
+				PVR_DUMPDEBUG_LOG("%sAcquired live PC address: 0x%016"
+					IMG_UINT64_FMTSPECX, pszIndent, sPCDevPAddr.uiAddr);
+			}
+			else
+			{
+				sPCDevPAddr.uiAddr = RGXFWIF_INVALID_PC_PHYADDR;
+			}
+		}
+		else
+		{
+			PVR_DUMPDEBUG_LOG("%sFW logged fault using PC Address: 0x%016"
+				IMG_UINT64_FMTSPECX, pszIndent, ui64PCAddress);
+			sPCDevPAddr.uiAddr = ui64PCAddress;
+		}
+
+		if (bSummary)
+		{
+			PVR_DUMPDEBUG_LOG("%sChecking faulting address "IMG_DEV_VIRTADDR_FMTSPEC,
+				pszIndent, sFaultDevVAddr.uiAddr);
+			RGXCheckFaultAddress(psDevInfo, &sFaultDevVAddr, &sPCDevPAddr,
+								 pfnDumpDebugPrintf, pvDumpDebugFile);
+		}
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+
+		 /* look to see if we have already processed this fault.
+		  * if so then use the previously acquired information.
+		  */
+		OSLockAcquire(psDevInfo->hDebugFaultInfoLock);
+		psInfo = _QueryFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, sFaultDevVAddr, ui64CRTimer);
+
+		if(psInfo == NULL)
+		{
+			if(sPCDevPAddr.uiAddr != RGXFWIF_INVALID_PC_PHYADDR)
+			{
+				/* look up the process details for the faulting page catalogue */
+				bFound = RGXPCAddrToProcessInfo(psDevInfo, sPCDevPAddr, &sProcessInfo);
+
+				if(bFound)
+				{
+					IMG_BOOL bHits;
+
+					psInfo = _AcquireNextFaultInfoElement();
+
+					/* get any DevicememHistory data for the faulting address */
+					bHits = _GetDevicememHistoryData(sProcessInfo.uiPID,
+										sFaultDevVAddr,
+										psInfo->asQueryOut,
+										ui32PageSizeBytes);
+
+					if(bHits)
+					{
+						_CommitFaultInfo(psDevInfo,
+									psInfo,
+									&sProcessInfo,
+									sFaultDevVAddr,
+									ui64CRTimer);
+					}
+					else
+					{
+						/* no hits, so no data to present */
+						PVR_DUMPDEBUG_LOG("%sNo matching Devmem History for fault address", pszIndent);
+						psInfo = NULL;
+					}
+				}
+				else
+				{
+					PVR_DUMPDEBUG_LOG("%sCould not find PID for PC 0x%016llX", pszIndent, sPCDevPAddr.uiAddr);
+				}
+			}
+			else
+			{
+				PVR_DUMPDEBUG_LOG("%sPage fault not applicable to Devmem History", pszIndent);
+			}
+		}
+
+		if(psInfo != NULL)
+		{
+			_PrintFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psInfo, NULL);
+		}
+
+		OSLockRelease(psDevInfo->hDebugFaultInfoLock);
+#endif
+
+	}
+
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDumpRGXMMUFaultStatus
+
+ @Description
+
+ Dump MMU Fault status in human readable form.
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psDevInfo            - RGX device info
+ @Input ui64MMUStatus        - MMU Status register value
+ @Input ui64PCAddress        - Page catalogue base address of faulting access
+ @Input ui64CRTimer          - RGX CR timer value at time of page fault
+ @Input bIsMetaMMUStatus     - Is the status from MMU_FAULT_STATUS or MMU_FAULT_STATUS_META.
+ @Input bSummary             - Flag to check whether the function is called
+                                as a part of the debug dump summary or
+                                as a part of a HWR log
+ @Return   void
+
+******************************************************************************/
+static void _RGXDumpRGXMMUFaultStatus(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile,
+					PVRSRV_RGXDEV_INFO *psDevInfo,
+					IMG_UINT64 ui64MMUStatus,
+					IMG_UINT64 ui64PCAddress,
+					IMG_UINT64 ui64CRTimer,
+					IMG_BOOL bIsMetaMMUStatus,
+					IMG_BOOL bSummary)
+{
+	IMG_CHAR  *pszMetaOrCore = (bIsMetaMMUStatus ? "Meta" : "Core");
+	IMG_CHAR  *pszIndent     = (bSummary ? "" : "    ");
+
+	if (ui64MMUStatus == 0x0)
+	{
+		PVR_DUMPDEBUG_LOG("%sMMU (%s) - OK", pszIndent, pszMetaOrCore);
+	}
+	else
+	{
+		IMG_UINT32 ui32PC        = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT;
+		IMG_UINT64 ui64Addr      = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT) <<  4; /* align shift */
+		IMG_UINT32 ui32Requester = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT;
+		IMG_UINT32 ui32SideBand  = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT;
+		IMG_UINT32 ui32MMULevel  = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT;
+		IMG_BOOL bRead           = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_RNW_EN) != 0;
+		IMG_BOOL bFault          = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_FAULT_EN) != 0;
+		IMG_BOOL bROFault        = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >>
+		                            RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x2;
+		IMG_BOOL bProtFault      = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >>
+		                            RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x3;
+		IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE];
+		IMG_CHAR *pszTagID;
+		IMG_CHAR *pszTagSB;
+		IMG_DEV_VIRTADDR sFaultDevVAddr;
+		IMG_DEV_PHYADDR sPCDevPAddr = { 0 };
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+		IMG_BOOL bFound = IMG_FALSE;
+		RGXMEM_PROCESS_INFO sProcessInfo;
+		IMG_UINT32 ui32PageSizeBytes = _PageSizeHWToBytes(0);
+		FAULT_INFO *psInfo;
+#endif
+
+		_RGXDecodeMMUReqTags(psDevInfo, ui32Requester, ui32SideBand, bRead, &pszTagID, &pszTagSB, aszScratch, RGX_DEBUG_STR_SIZE);
+
+		PVR_DUMPDEBUG_LOG("%sMMU (%s) - FAULT:", pszIndent, pszMetaOrCore);
+		PVR_DUMPDEBUG_LOG("%s  * MMU status (0x%016" IMG_UINT64_FMTSPECX "): PC = %d, %s 0x%010" IMG_UINT64_FMTSPECX ", %s (%s)%s%s%s%s.",
+						  pszIndent,
+						  ui64MMUStatus,
+						  ui32PC,
+		                  (bRead)?"Reading from":"Writing to",
+						  ui64Addr,
+						  pszTagID,
+						  pszTagSB,
+						  (bFault)?", Fault":"",
+						  (bROFault)?", Read Only fault":"",
+						  (bProtFault)?", PM/META protection fault":"",
+						  _RGXDecodeMMULevel(ui32MMULevel));
+		/* Check if the host thinks this fault is valid */
+
+		sFaultDevVAddr.uiAddr = ui64Addr;
+
+		if (bSummary)
+		{
+			/*
+			 *  The first 7 or 8 cat bases are memory contexts used for PM
+			 *  or firmware. The rest are application contexts.
+			 *
+			 *  It is not possible for the host to obtain the cat base
+			 *  address while the FW is running (since the cat bases are
+			 *  indirectly accessed), but in the case of the 'live' PC
+			 *  we can see if the FW has already logged it in the HWR log.
+			 */
+#if defined(SUPPORT_TRUSTED_DEVICE)
+			 if (ui32PC > 7)
+#else
+			 if (ui32PC > 6)
+#endif
+			 {
+				IMG_UINT32  ui32LatestHWRNumber = 0;
+				IMG_UINT64	ui64LatestMMUStatus = 0;
+				IMG_UINT64	ui64LatestPCAddress = 0;
+				IMG_UINT32  ui32HWRIndex;
+
+				for (ui32HWRIndex = 0 ;  ui32HWRIndex < RGXFWIF_HWINFO_MAX ;  ui32HWRIndex++)
+				{
+					RGX_HWRINFO  *psHWRInfo = &psDevInfo->psRGXFWIfHWRInfoBuf->sHWRInfo[ui32HWRIndex];
+
+					if (psHWRInfo->ui32HWRNumber > ui32LatestHWRNumber  &&
+					    psHWRInfo->eHWRType == RGX_HWRTYPE_MMUFAULT)
+					{
+						ui32LatestHWRNumber = psHWRInfo->ui32HWRNumber;
+						ui64LatestMMUStatus = psHWRInfo->uHWRData.sMMUInfo.ui64MMUStatus;
+						ui64LatestPCAddress = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress;
+					}
+				}
+
+				if (ui64LatestMMUStatus == ui64MMUStatus  &&  ui64LatestPCAddress != 0)
+				{
+					sPCDevPAddr.uiAddr = ui64LatestPCAddress;
+					PVR_DUMPDEBUG_LOG("%sLocated PC address: 0x%016" IMG_UINT64_FMTSPECX, pszIndent, sPCDevPAddr.uiAddr);
+				}
+			}
+			else
+			{
+				sPCDevPAddr.uiAddr = RGXFWIF_INVALID_PC_PHYADDR;
+			}
+		}
+		else
+		{
+			PVR_DUMPDEBUG_LOG("%sFW logged fault using PC Address: 0x%016" IMG_UINT64_FMTSPECX,
+			                  pszIndent, ui64PCAddress);
+			sPCDevPAddr.uiAddr = ui64PCAddress;
+		}
+
+		if (bSummary  &&  sPCDevPAddr.uiAddr != 0)
+		{
+			PVR_DUMPDEBUG_LOG("%sChecking faulting address " IMG_DEV_VIRTADDR_FMTSPEC,
+			                  pszIndent, sFaultDevVAddr.uiAddr);
+			RGXCheckFaultAddress(psDevInfo, &sFaultDevVAddr, &sPCDevPAddr,
+								 pfnDumpDebugPrintf, pvDumpDebugFile);
+		}
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+		 /* look to see if we have already processed this fault.
+		  * if so then use the previously acquired information.
+		  */
+		OSLockAcquire(psDevInfo->hDebugFaultInfoLock);
+		psInfo = _QueryFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, sFaultDevVAddr, ui64CRTimer);
+
+		if(psInfo == NULL)
+		{
+			if(sPCDevPAddr.uiAddr != RGXFWIF_INVALID_PC_PHYADDR)
+			{
+				/* look up the process details for the faulting page catalogue */
+				bFound = RGXPCAddrToProcessInfo(psDevInfo, sPCDevPAddr, &sProcessInfo);
+
+				if(bFound)
+				{
+					IMG_BOOL bHits;
+
+					psInfo = _AcquireNextFaultInfoElement();
+
+					/* get any DevicememHistory data for the faulting address */
+					bHits = _GetDevicememHistoryData(sProcessInfo.uiPID,
+										sFaultDevVAddr,
+										psInfo->asQueryOut,
+										ui32PageSizeBytes);
+
+					if(bHits)
+					{
+						_CommitFaultInfo(psDevInfo,
+									psInfo,
+									&sProcessInfo,
+									sFaultDevVAddr,
+									ui64CRTimer);
+					}
+					else
+					{
+						/* no hits, so no data to present */
+						PVR_DUMPDEBUG_LOG("%sNo matching Devmem History for fault address", pszIndent);
+						psInfo = NULL;
+					}
+				}
+				else
+				{
+					PVR_DUMPDEBUG_LOG("%sCould not find PID for PC 0x%016llX",
+					                  pszIndent, sPCDevPAddr.uiAddr);
+				}
+			}
+			else
+			{
+				PVR_DUMPDEBUG_LOG("%sPage fault not applicable to Devmem History",
+				                  pszIndent);
+			}
+		}
+
+		if(psInfo != NULL)
+		{
+			_PrintFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psInfo, NULL);
+		}
+
+		OSLockRelease(psDevInfo->hDebugFaultInfoLock);
+#endif
+	}
+}
+static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_RNW_EN == RGX_CR_MMU_FAULT_STATUS_META_RNW_EN),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_FAULT_EN == RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT),
+			  "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+
+
+
+#if !defined(NO_HARDWARE)
+static PVRSRV_ERROR _RGXMipsExtraDebug(PVRSRV_RGXDEV_INFO *psDevInfo, PVRSRV_DEVICE_CONFIG *psDevConfig, RGX_MIPS_STATE *psMIPSState)
+{
+	void *pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
+	IMG_UINT32 ui32RegRead;
+	IMG_UINT32 eError = PVRSRV_OK;
+	/* This pointer contains a kernel mapping of a particular memory area shared
+	   between the driver and the firmware. This area is used for exchanging info
+	   about the internal state of the MIPS*/
+	IMG_UINT32 *pui32NMIMemoryPointer;
+	IMG_UINT32 *pui32NMIPageBasePointer;
+	IMG_BOOL bValid;
+	IMG_CPU_PHYADDR sCPUPhyAddrStart;
+	IMG_CPU_PHYADDR sCPUPhyAddrEnd;
+	PMR *psPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR);
+
+	/* Map the FW code area to the kernel */
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc,
+									 (void **)&pui32NMIMemoryPointer);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"_RGXMipsExtraDebug: Failed to acquire NMI shared memory area (%u)", eError));
+		goto map_error_fail;
+	}
+
+	eError = PMR_CpuPhysAddr(psPMR,
+							 RGXMIPSFW_LOG2_PAGE_SIZE,
+							 1,
+							 RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE * RGXMIPSFW_PAGE_SIZE,
+							 &sCPUPhyAddrStart,
+							 &bValid);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXBootldrDataInit: PMR_CpuPhysAddr failed (%u)",
+				eError));
+		return eError;
+	}
+
+	sCPUPhyAddrEnd.uiAddr = sCPUPhyAddrStart.uiAddr + RGXMIPSFW_PAGE_SIZE;
+
+	/* Jump to the boot/NMI data page */
+	pui32NMIMemoryPointer += RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE * RGXMIPSFW_PAGE_SIZE);
+	pui32NMIPageBasePointer = pui32NMIMemoryPointer;
+
+	/* Jump to the NMI shared data area within the page above */
+	pui32NMIMemoryPointer += RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXMIPSFW_NMI_SHARED_DATA_BASE);
+
+	/* Acquire the NMI operations lock */
+	OSLockAcquire(psDevInfo->hNMILock);
+
+	/* Make sure the synchronization flag is set to 0 */
+	pui32NMIMemoryPointer[RGXMIPSFW_NMI_SYNC_FLAG_OFFSET] = 0;
+
+	/* Flush out the dirty locations of the NMI page */
+	CacheOpExecKM(PMR_DeviceNode(psPMR),
+				  pui32NMIPageBasePointer,
+				  pui32NMIPageBasePointer + RGXMIPSFW_PAGE_SIZE/(sizeof(IMG_UINT32)),
+				  sCPUPhyAddrStart,
+				  sCPUPhyAddrEnd,
+				  PVRSRV_CACHE_OP_FLUSH);
+
+	/* Enable NMI issuing in the MIPS wrapper */
+	OSWriteHWReg64(pvRegsBaseKM,
+				   RGX_CR_MIPS_WRAPPER_NMI_ENABLE,
+				   RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN);
+
+	/* Check the MIPS is not in error state already (e.g. it is booting or an NMI has already been requested) */
+	ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+				   RGX_CR_MIPS_EXCEPTION_STATUS);
+	if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN) || (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN))
+	{
+
+		eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE;
+		goto fail;
+	}
+	ui32RegRead = 0;
+
+	/* Issue NMI */
+	OSWriteHWReg32(pvRegsBaseKM,
+				   RGX_CR_MIPS_WRAPPER_NMI_EVENT,
+				   RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN);
+
+
+	/* Wait for NMI Taken to be asserted */
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+									RGX_CR_MIPS_EXCEPTION_STATUS);
+		if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN) == 0)
+	{
+			eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE;
+			goto fail;
+	}
+	ui32RegRead = 0;
+
+	/* Allow the firmware to proceed */
+	pui32NMIMemoryPointer[RGXMIPSFW_NMI_SYNC_FLAG_OFFSET] = 1;
+
+	/* Flush out the dirty locations of the NMI page */
+	CacheOpExecKM(PMR_DeviceNode(psPMR),
+				  pui32NMIPageBasePointer,
+				  pui32NMIPageBasePointer + RGXMIPSFW_PAGE_SIZE/(sizeof(IMG_UINT32)),
+				  sCPUPhyAddrStart,
+				  sCPUPhyAddrEnd,
+				  PVRSRV_CACHE_OP_FLUSH);
+
+	/* Wait for the FW to have finished the NMI routine */
+	ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+								RGX_CR_MIPS_EXCEPTION_STATUS);
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+									RGX_CR_MIPS_EXCEPTION_STATUS);
+		if (!(ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN))
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+	if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN)
+	{
+			eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE;
+			goto fail;
+	}
+	ui32RegRead = 0;
+
+	/* Copy state */
+	OSDeviceMemCopy(psMIPSState, pui32NMIMemoryPointer + RGXMIPSFW_NMI_STATE_OFFSET, sizeof(*psMIPSState));
+
+	--(psMIPSState->ui32ErrorEPC);
+	--(psMIPSState->ui32EPC);
+
+	/* Disable NMI issuing in the MIPS wrapper */
+	OSWriteHWReg32(pvRegsBaseKM,
+				   RGX_CR_MIPS_WRAPPER_NMI_ENABLE,
+				   0);
+
+fail:
+	/* Release the NMI operations lock */
+	OSLockRelease(psDevInfo->hNMILock);
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+map_error_fail:
+	return eError;
+}
+
+/* Print decoded information from cause register */
+static void _RGXMipsDumpCauseDecode(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, void *pvDumpDebugFile, IMG_UINT32 ui32Cause)
+{
+#define INDENT "    "
+	const IMG_UINT32 ui32ExcCode = RGXMIPSFW_C0_CAUSE_EXCCODE(ui32Cause);
+	const IMG_CHAR * const pszException = apszMIPSExcCodes[ui32ExcCode];
+
+	if (pszException != NULL)
+	{
+		PVR_DUMPDEBUG_LOG(INDENT "Cause exception: %s", pszException);
+	}
+
+	/* IP Bits */
+	{
+		IMG_UINT32  ui32HWIRQStatus = RGXMIPSFW_C0_CAUSE_PENDING_HWIRQ(ui32Cause);
+		IMG_UINT32 i;
+
+		for (i = 0; i < RGXMIPSFW_C0_NBHWIRQ; ++i)
+		{
+			if (ui32HWIRQStatus & (1 << i))
+			{
+				PVR_DUMPDEBUG_LOG(INDENT "Hardware interrupt %d pending", i);
+				/* Can there be more than one HW irq pending or should we break? */
+			}
+		}
+	}
+
+	if (ui32Cause & RGXMIPSFW_C0_CAUSE_FDCIPENDING)
+	{
+		PVR_DUMPDEBUG_LOG(INDENT "FDC interrupt pending");
+	}
+
+	if (ui32Cause & RGXMIPSFW_C0_CAUSE_IV)
+	{
+		PVR_DUMPDEBUG_LOG(INDENT "Interrupt uses special interrupt vector");
+	}
+
+	if (ui32Cause & RGXMIPSFW_C0_CAUSE_PCIPENDING)
+	{
+		PVR_DUMPDEBUG_LOG(INDENT "Performance Counter Interrupt pending");
+	}
+
+	/* Unusable Coproc exception */
+	if (ui32ExcCode == 11)
+	{
+		PVR_DUMPDEBUG_LOG(INDENT "Unusable Coprocessor: %d", RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(ui32Cause));
+	}
+
+	if (ui32Cause & RGXMIPSFW_C0_CAUSE_TIPENDING)
+	{
+		PVR_DUMPDEBUG_LOG(INDENT "Timer Interrupt pending");
+	}
+
+#undef INDENT
+}
+
+static void _RGXMipsDumpDebugDecode(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, void *pvDumpDebugFile, IMG_UINT32 ui32Debug, IMG_UINT32 ui32DEPC)
+{
+	const IMG_CHAR *pszDException = NULL;
+	IMG_UINT32 i;
+#define INDENT "    "
+
+	if (!(ui32Debug & RGXMIPSFW_C0_DEBUG_DM))
+	{
+		PVR_DUMPDEBUG_LOG(INDENT "Debug Mode is OFF");
+		return;
+	}
+
+	pszDException = apszMIPSExcCodes[RGXMIPSFW_C0_DEBUG_EXCCODE(ui32Debug)];
+
+	if (pszDException != NULL)
+	{
+		PVR_DUMPDEBUG_LOG(INDENT "Debug exception: %s", pszDException);
+	}
+
+	for (i = 0; i < IMG_ARR_NUM_ELEMS(sMIPS_C0_DebugTable); ++i)
+	{
+	    const RGXMIPSFW_C0_DEBUG_TBL_ENTRY * const psDebugEntry = &sMIPS_C0_DebugTable[i];
+
+	    if (ui32Debug & psDebugEntry->ui32Mask)
+	    {
+		PVR_DUMPDEBUG_LOG(INDENT "%s", psDebugEntry->pszExplanation);
+	    }
+	}
+#undef INDENT
+	PVR_DUMPDEBUG_LOG("DEPC                    :0x%08X", ui32DEPC);
+}
+
+static inline void _RGXMipsDumpTLBEntry(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, void *pvDumpDebugFile, const RGX_MIPS_TLB_ENTRY *psEntry, IMG_UINT32 ui32Index)
+{
+#define INDENT "    "
+#define DUMP_TLB_LO(ENTRY_LO, ENTRY_NUM)                                          \
+	PVR_DUMPDEBUG_LOG(INDENT "EntryLo" #ENTRY_NUM                                 \
+					  ":%s PFN = 0x%05X, %s%s",                                   \
+					  apszPermissionInhibit[RGXMIPSFW_TLB_GET_INHIBIT(ENTRY_LO)], \
+					  RGXMIPSFW_TLB_GET_PFN(ENTRY_LO),                            \
+					  apszCoherencyTBL[RGXMIPSFW_TLB_GET_COHERENCY(ENTRY_LO)],    \
+					  apszDirtyGlobalValid[RGXMIPSFW_TLB_GET_DGV(ENTRY_LO)])
+
+	static const IMG_CHAR * const apszPermissionInhibit[4] =
+	{
+		"",
+		" XI,",
+		" RI,",
+		" RI/XI,"
+	};
+
+	static const IMG_CHAR * const apszCoherencyTBL[8] =
+	{
+		"Cacheable",
+		"Cacheable",
+		"Uncached",
+		"Cacheable",
+		"Cacheable",
+		"Cacheable",
+		"Cacheable",
+		"Uncached"
+	};
+
+	static const IMG_CHAR * const apszDirtyGlobalValid[8] =
+	{
+		"",
+		", G",
+		", V",
+		", GV",
+		", D",
+		", DG",
+		", DV",
+		", DGV"
+	};
+
+	PVR_DUMPDEBUG_LOG("Entry %u, Page Mask: 0x%04X, EntryHi: VPN2 = 0x%05X", ui32Index, RGXMIPSFW_TLB_GET_MASK(psEntry->ui32TLBPageMask),
+					  RGXMIPSFW_TLB_GET_VPN2(psEntry->ui32TLBHi));
+
+	DUMP_TLB_LO(psEntry->ui32TLBLo0, 0);
+
+	DUMP_TLB_LO(psEntry->ui32TLBLo1, 1);
+
+#undef DUMP_TLB_LO
+}
+
+#endif /* defined(RGX_FEATURE_MIPS) && !defined(NO_HARDWARE) */
+
+static void _Flags2Description(IMG_CHAR *psDesc, const IMG_FLAGS2DESC *psConvTable, IMG_UINT32 ui32TableSize, IMG_UINT32 ui32Flags)
+{
+	IMG_UINT32 ui32Idx;
+
+	for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++)
+	{
+		if ((ui32Flags & psConvTable[ui32Idx].uiFlag) == psConvTable[ui32Idx].uiFlag)
+			{
+				strcat(psDesc, psConvTable[ui32Idx].pszLabel);
+			}
+	}
+}
+
+static void _GetFwFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32RawFlags)
+{
+	const IMG_CHAR *psCswLabel = "Ctx switch: ";
+	strcat(psDesc, psCswLabel);
+	_Flags2Description(psDesc, asCSW2Description, IMG_ARR_NUM_ELEMS(asCSW2Description), ui32RawFlags);
+	_Flags2Description(psDesc, asMisc2Description, IMG_ARR_NUM_ELEMS(asMisc2Description), ui32RawFlags);
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDumpFWAssert
+
+ @Description
+
+ Dump FW assert strings when a thread asserts.
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psRGXFWIfTraceBufCtl - RGX FW trace buffer
+
+ @Return   void
+
+******************************************************************************/
+static void _RGXDumpFWAssert(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile,
+					RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl)
+{
+	IMG_CHAR    *pszTraceAssertPath;
+	IMG_CHAR    *pszTraceAssertInfo;
+	IMG_INT32   ui32TraceAssertLine;
+	IMG_UINT32  i;
+
+	for (i = 0; i < RGXFW_THREAD_NUM; i++)
+	{
+		pszTraceAssertPath = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szPath;
+		pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szInfo;
+		ui32TraceAssertLine = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.ui32LineNum;
+
+		/* print non null assert strings */
+		if (*pszTraceAssertInfo)
+		{
+			PVR_DUMPDEBUG_LOG("FW-T%d Assert: %s (%s:%d)",
+			                  i, pszTraceAssertInfo, pszTraceAssertPath, ui32TraceAssertLine);
+		}
+	}
+}
+
+static void _RGXDumpFWPoll(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile,
+					RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl)
+{
+	IMG_UINT32 i;
+	for (i = 0; i < RGXFW_THREAD_NUM; i++)
+	{
+		if (psRGXFWIfTraceBufCtl->aui32CrPollAddr[i])
+		{
+			PVR_DUMPDEBUG_LOG("T%u polling %s (reg:0x%08X mask:0x%08X)",
+			                  i,
+			                  ((psRGXFWIfTraceBufCtl->aui32CrPollAddr[i] & RGXFW_POLL_TYPE_SET)?("set"):("unset")),
+			                  psRGXFWIfTraceBufCtl->aui32CrPollAddr[i] & ~RGXFW_POLL_TYPE_SET,
+			                  psRGXFWIfTraceBufCtl->aui32CrPollMask[i]);
+		}
+	}
+
+}
+
+static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile,
+					RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl, PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	IMG_BOOL        	bAnyLocked = IMG_FALSE;
+	IMG_UINT32      	dm, i;
+	IMG_UINT32      	ui32LineSize;
+	IMG_CHAR	    	*pszLine, *pszTemp;
+	IMG_CHAR 		*apszDmNames[] = { "GP(", "TDM(", "TA(", "3D(", "CDM(",
+								 "RTU(", "SHG(",NULL };
+
+	const IMG_CHAR 		*pszMsgHeader = "Number of HWR: ";
+	IMG_CHAR 			*pszLockupType = "";
+	RGXFWIF_HWRINFOBUF 	*psHWInfoBuf = psDevInfo->psRGXFWIfHWRInfoBuf;
+	RGX_HWRINFO 		*psHWRInfo;
+	IMG_UINT32      	ui32MsgHeaderSize = OSStringLength(pszMsgHeader);
+	IMG_UINT32			ui32HWRRecoveryFlags;
+	IMG_UINT32			ui32ReadIndex;
+
+	if(!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+	{
+		apszDmNames[RGXFWIF_DM_TDM] = "2D(";
+	}
+
+	for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++)
+	{
+		if (psRGXFWIfTraceBufCtl->aui32HwrDmLockedUpCount[dm]  ||
+		    psRGXFWIfTraceBufCtl->aui32HwrDmOverranCount[dm])
+		{
+			bAnyLocked = IMG_TRUE;
+			break;
+		}
+	}
+
+	if (!bAnyLocked && (psRGXFWIfTraceBufCtl->ui32HWRStateFlags & RGXFWIF_HWR_HARDWARE_OK))
+	{
+		/* No HWR situation, print nothing */
+		return;
+	}
+
+	ui32LineSize = sizeof(IMG_CHAR) * (	ui32MsgHeaderSize +
+			(psDevInfo->sDevFeatureCfg.ui32MAXDMCount*(	4/*DM name + left parenthesis*/ +
+								10/*UINT32 max num of digits*/ +
+								1/*slash*/ +
+								10/*UINT32 max num of digits*/ +
+								3/*right parenthesis + comma + space*/)) +
+			7 + (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*6)/* FALSE() + (UINT16 max num + comma) per DM */ +
+			1/* \0 */);
+
+	pszLine = OSAllocMem(ui32LineSize);
+	if (pszLine == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"_RGXDumpRGXDebugSummary: Out of mem allocating line string (size: %d)", ui32LineSize));
+		return;
+	}
+
+	OSStringCopy(pszLine,pszMsgHeader);
+	pszTemp = pszLine + ui32MsgHeaderSize;
+
+	for (dm = 0; (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount) && (apszDmNames[dm] != NULL); dm++)
+	{
+		OSStringCopy(pszTemp,apszDmNames[dm]);
+		pszTemp += OSStringLength(apszDmNames[dm]);
+		pszTemp += OSSNPrintf(pszTemp,
+				10 + 1 + 10 + 1 + 10 + 1 + 1 + 1 + 1 /* UINT32 + slash + UINT32 + plus + UINT32 + right parenthesis + comma + space + \0 */,
+				"%u/%u+%u), ",
+				psRGXFWIfTraceBufCtl->aui32HwrDmRecoveredCount[dm],
+				psRGXFWIfTraceBufCtl->aui32HwrDmLockedUpCount[dm],
+				psRGXFWIfTraceBufCtl->aui32HwrDmOverranCount[dm]);
+	}
+
+	OSStringCopy(pszTemp, "FALSE(");
+	pszTemp += 6;
+
+	for (dm = 0; (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount) && (apszDmNames[dm] != NULL); dm++)
+	{
+		pszTemp += OSSNPrintf(pszTemp,
+				10 + 1 + 1 /* UINT32 max num + comma + \0 */,
+				(dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount-1 ? "%u," : "%u)"),
+				psRGXFWIfTraceBufCtl->aui32HwrDmFalseDetectCount[dm]);
+	}
+
+	PVR_DUMPDEBUG_LOG("%s", pszLine);
+
+	OSFreeMem(pszLine);
+
+	/* Print out per HWR info */
+	for (dm = 0; (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount) && (apszDmNames[dm] != NULL); dm++)
+	{
+		if (dm == RGXFWIF_DM_GP)
+		{
+			PVR_DUMPDEBUG_LOG("DM %d (GP)", dm);
+		}
+		else
+		{
+			IMG_CHAR sPerDmHwrDescription[RGX_DEBUG_STR_SIZE] = "";
+
+			_Flags2Description(sPerDmHwrDescription, asDmState2Description, IMG_ARR_NUM_ELEMS(asDmState2Description), psRGXFWIfTraceBufCtl->aui32HWRRecoveryFlags[dm]);
+			PVR_DUMPDEBUG_LOG("DM %d (HWRflags 0x%08x: %s)", dm, psRGXFWIfTraceBufCtl->aui32HWRRecoveryFlags[dm], sPerDmHwrDescription);
+		}
+
+		ui32ReadIndex = 0;
+		for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++)
+		{
+			psHWRInfo = &psHWInfoBuf->sHWRInfo[ui32ReadIndex];
+
+			if((psHWRInfo->eDM == dm) && (psHWRInfo->ui32HWRNumber != 0))
+			{
+				IMG_CHAR  aui8RecoveryNum[10+10+1];
+				IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+				/* Split OS timestamp in seconds and nanoseconds */
+				ConvertOSTimestampToSAndNS(psHWRInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds);
+
+				ui32HWRRecoveryFlags = psHWRInfo->ui32HWRRecoveryFlags;
+				if(ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_LOCKUP) { pszLockupType = ", Guilty Lockup"; }
+				else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_LOCKUP) { pszLockupType = ", Innocent Lockup"; }
+				else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_OVERRUNING) { pszLockupType = ", Guilty Overrun"; }
+				else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_LOCKUP) { pszLockupType = ", Innocent Overrun"; }
+
+				OSSNPrintf(aui8RecoveryNum, sizeof(aui8RecoveryNum), "Recovery %d:", psHWRInfo->ui32HWRNumber);
+				PVR_DUMPDEBUG_LOG("  %s PID = %d, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s",
+				                   aui8RecoveryNum,
+				                   psHWRInfo->ui32PID,
+				                   psHWRInfo->ui32FrameNum,
+				                   psHWRInfo->ui32ActiveHWRTData,
+				                   psHWRInfo->ui32EventStatus,
+				                   pszLockupType);
+				pszTemp = &aui8RecoveryNum[0];
+				while (*pszTemp != '\0')
+				{
+					*pszTemp++ = ' ';
+				}
+				PVR_DUMPDEBUG_LOG("  %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ", CyclesElapsed = %" IMG_INT64_FMTSPECd,
+				                   aui8RecoveryNum,
+				                   psHWRInfo->ui64CRTimer,
+				                   ui64Seconds,
+				                   ui64Nanoseconds,
+				                   (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256);
+				if (psHWRInfo->ui64CRTimeHWResetFinish != 0)
+				{
+					if (psHWRInfo->ui64CRTimeFreelistReady != 0)
+					{
+						PVR_DUMPDEBUG_LOG("  %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", FreelistReconTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalRecoveryTimeInCycles = %" IMG_INT64_FMTSPECd,
+										   aui8RecoveryNum,
+										   (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256,
+										   (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256,
+										   (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimeHWResetFinish)*256,
+										   (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimer)*256);
+					}
+					else
+					{
+						PVR_DUMPDEBUG_LOG("  %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalRecoveryTimeInCycles = %" IMG_INT64_FMTSPECd,
+										   aui8RecoveryNum,
+										   (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256,
+										   (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256,
+										   (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256);
+					}
+				}
+
+				switch(psHWRInfo->eHWRType)
+				{
+					case RGX_HWRTYPE_BIF0FAULT:
+					case RGX_HWRTYPE_BIF1FAULT:
+					{
+						if(!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+						{
+							_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXFWIF_HWRTYPE_BIF_BANK_GET(psHWRInfo->eHWRType),
+											psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus,
+											psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus,
+											psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress,
+											psHWRInfo->ui64CRTimer,
+											IMG_FALSE);
+						}
+					}
+					break;
+					case RGX_HWRTYPE_TEXASBIF0FAULT:
+					{
+						if(!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+						{
+							if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK)
+							{
+								_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF,
+											psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus,
+											psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus,
+											psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress,
+											psHWRInfo->ui64CRTimer,
+											IMG_FALSE);
+							}
+						}
+					}
+					break;
+					case RGX_HWRTYPE_DPXMMUFAULT:
+					{
+						if(!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+						{
+							if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+							{
+									_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_DPX_BIF,
+													psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus,
+													psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus,
+													psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress,
+													psHWRInfo->ui64CRTimer,
+													IMG_FALSE);
+							}
+						}
+					}
+					break;
+					case RGX_HWRTYPE_MMUFAULT:
+					{
+						if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+						{
+							_RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo,
+													  psHWRInfo->uHWRData.sMMUInfo.ui64MMUStatus,
+													  psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress,
+													  psHWRInfo->ui64CRTimer,
+													  IMG_FALSE,
+													  IMG_FALSE);
+						}
+					}
+					break;
+
+					case RGX_HWRTYPE_MMUMETAFAULT:
+					{
+						if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+						{
+
+							_RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo,
+						                          psHWRInfo->uHWRData.sMMUInfo.ui64MMUStatus,
+						                          psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress,
+						                          psHWRInfo->ui64CRTimer,
+						                          IMG_TRUE,
+						                          IMG_FALSE);
+						}
+					}
+					break;
+
+
+					case RGX_HWRTYPE_POLLFAILURE:
+					{
+						PVR_DUMPDEBUG_LOG("    T%u polling %s (reg:0x%08X mask:0x%08X last:0x%08X)",
+										  psHWRInfo->uHWRData.sPollInfo.ui32ThreadNum,
+										  ((psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & RGXFW_POLL_TYPE_SET)?("set"):("unset")),
+										  psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & ~RGXFW_POLL_TYPE_SET,
+										  psHWRInfo->uHWRData.sPollInfo.ui32CrPollMask,
+										  psHWRInfo->uHWRData.sPollInfo.ui32CrPollLastValue);
+					}
+					break;
+
+					case RGX_HWRTYPE_OVERRUN:
+					case RGX_HWRTYPE_UNKNOWNFAILURE:
+					{
+						/* Nothing to dump */
+					}
+					break;
+
+					default:
+					{
+						PVR_ASSERT(IMG_FALSE);
+					}
+					break;
+				}
+			}
+
+			if(ui32ReadIndex == RGXFWIF_HWINFO_MAX_FIRST - 1)
+							ui32ReadIndex = psHWInfoBuf->ui32WriteIndex;
+			else
+				ui32ReadIndex = (ui32ReadIndex + 1) - (ui32ReadIndex / RGXFWIF_HWINFO_LAST_INDEX) * RGXFWIF_HWINFO_MAX_LAST;
+		}
+	}
+}
+
+#if !defined(NO_HARDWARE)
+
+/*!
+*******************************************************************************
+
+ @Function	_CheckForPendingPage
+
+ @Description
+
+ Check if the MMU indicates it is blocked on a pending page
+
+ @Input psDevInfo	 - RGX device info
+
+ @Return   IMG_BOOL      - IMG_TRUE if there is a pending page
+
+******************************************************************************/
+static INLINE IMG_BOOL _CheckForPendingPage(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	IMG_UINT32 ui32BIFMMUEntry;
+
+	ui32BIFMMUEntry = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY);
+
+	if(ui32BIFMMUEntry & RGX_CR_BIF_MMU_ENTRY_PENDING_EN)
+	{
+		return IMG_TRUE;
+	}
+	else
+	{
+		return IMG_FALSE;
+	}
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_GetPendingPageInfo
+
+ @Description
+
+ Get information about the pending page from the MMU status registers
+
+ @Input psDevInfo	 - RGX device info
+ @Output psDevVAddr      - The device virtual address of the pending MMU address translation
+ @Output pui32CatBase    - The page catalog base
+ @Output pui32DataType   - The MMU entry data type
+
+ @Return   void
+
+******************************************************************************/
+static void _GetPendingPageInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR *psDevVAddr,
+									IMG_UINT32 *pui32CatBase,
+									IMG_UINT32 *pui32DataType)
+{
+	IMG_UINT64 ui64BIFMMUEntryStatus;
+
+	ui64BIFMMUEntryStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY_STATUS);
+
+	psDevVAddr->uiAddr = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK);
+
+	*pui32CatBase = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK) >>
+								RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT;
+
+	*pui32DataType = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK) >>
+								RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT;
+}
+
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDumpRGXDebugSummary
+
+ @Description
+
+ Dump a summary in human readable form with the RGX state
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psDevInfo	     - RGX device info
+ @Input bRGXPoweredON        - IMG_TRUE if RGX device is on
+
+ @Return   void
+
+******************************************************************************/
+static void _RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile,
+					PVRSRV_RGXDEV_INFO *psDevInfo,
+					IMG_BOOL bRGXPoweredON)
+{
+	IMG_CHAR *pszState, *pszReason;
+	RGXFWIF_TRACEBUF *psRGXFWIfTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+	IMG_UINT32 ui32OSid;
+	IMG_CHAR sHwrStateDescription[RGX_DEBUG_STR_SIZE] = "";
+	RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+	/* space for the current clock speed and 3 previous */
+	RGXFWIF_TIME_CORR asTimeCorrs[4];
+	IMG_UINT32 ui32NumClockSpeedChanges;
+
+#if defined(NO_HARDWARE)
+	PVR_UNREFERENCED_PARAMETER(bRGXPoweredON);
+#else
+	if (bRGXPoweredON)
+	{
+		if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+		{
+
+			IMG_UINT64	ui64RegValMMUStatus;
+
+			ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS);
+			_RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui64RegValMMUStatus, 0, 0, IMG_FALSE, IMG_TRUE);
+
+			ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS_META);
+			_RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui64RegValMMUStatus, 0, 0, IMG_TRUE, IMG_TRUE);
+		}else
+		{
+			IMG_UINT64	ui64RegValMMUStatus, ui64RegValREQStatus;
+
+			ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_MMU_STATUS);
+			ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_REQ_STATUS);
+
+			_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF0, ui64RegValMMUStatus, ui64RegValREQStatus, 0, 0, IMG_TRUE);
+
+			if(!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SINGLE_BIF_BIT_MASK))
+			{
+				ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_MMU_STATUS);
+				ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_REQ_STATUS);
+				_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF1, ui64RegValMMUStatus, ui64RegValREQStatus, 0, 0, IMG_TRUE);
+			}
+
+			if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK)
+			{
+				IMG_UINT32  ui32PhantomCnt = RGX_REQ_NUM_PHANTOMS(psDevInfo->sDevFeatureCfg.ui32NumClusters);
+
+				if(ui32PhantomCnt > 1)
+				{
+					IMG_UINT32  ui32Phantom;
+					for (ui32Phantom = 0;  ui32Phantom < ui32PhantomCnt;  ui32Phantom++)
+					{
+						/* This can't be done as it may interfere with the FW... */
+						/*OSWriteHWReg64(RGX_CR_TEXAS_INDIRECT, ui32Phantom);*/
+
+						ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS);
+						ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS);
+
+						_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, 0, 0, IMG_TRUE);
+					}
+				}else
+				{
+					ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS);
+					ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS);
+
+					_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, 0, 0, IMG_TRUE);
+				}
+			}
+
+			if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+			{
+				ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, DPX_CR_BIF_FAULT_BANK_MMU_STATUS);
+				ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, DPX_CR_BIF_FAULT_BANK_REQ_STATUS);
+				_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_DPX_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, 0, 0, IMG_TRUE);
+			}
+
+		}
+
+		if(_CheckForPendingPage(psDevInfo))
+		{
+			IMG_UINT32 ui32CatBase;
+			IMG_UINT32 ui32DataType;
+			IMG_DEV_VIRTADDR sDevVAddr;
+
+			PVR_DUMPDEBUG_LOG("MMU Pending page: Yes");
+
+			_GetPendingPageInfo(psDevInfo, &sDevVAddr, &ui32CatBase, &ui32DataType);
+
+			if(ui32CatBase >= 8)
+			{
+				PVR_DUMPDEBUG_LOG("Cannot check address on PM cat base %u", ui32CatBase);
+			}
+			else
+			{
+				IMG_DEV_PHYADDR sPCDevPAddr;
+
+				sPCDevPAddr.uiAddr = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_CAT_BASEN(ui32CatBase));
+
+				PVR_DUMPDEBUG_LOG("Checking device virtual address " IMG_DEV_VIRTADDR_FMTSPEC
+							" on cat base %u. PC Addr = 0x%" IMG_UINT64_FMTSPECX,
+								sDevVAddr.uiAddr,
+								ui32CatBase,
+								sPCDevPAddr.uiAddr);
+				RGXCheckFaultAddress(psDevInfo, &sDevVAddr, &sPCDevPAddr,
+							pfnDumpDebugPrintf, pvDumpDebugFile);
+			}
+		}
+	}
+#endif /* NO_HARDWARE */
+
+	/* Firmware state */
+	switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthStatus))
+	{
+		case PVRSRV_DEVICE_HEALTH_STATUS_OK:  pszState = "OK";  break;
+		case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING:  pszState = "NOT RESPONDING";  break;
+		case PVRSRV_DEVICE_HEALTH_STATUS_DEAD:  pszState = "DEAD";  break;
+		default:  pszState = "UNKNOWN";  break;
+	}
+
+	switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthReason))
+	{
+		case PVRSRV_DEVICE_HEALTH_REASON_NONE:  pszReason = "";  break;
+		case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED:  pszReason = " - FW Assert";  break;
+		case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING:  pszReason = " - Poll failure";  break;
+		case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS:  pszReason = " - Global Event Object timeouts rising";  break;
+		case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT:  pszReason = " - KCCB offset invalid";  break;
+		case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED:  pszReason = " - KCCB stalled";  break;
+		default:  pszReason = " - Unknown reason";  break;
+	}
+
+	if (psRGXFWIfTraceBuf == NULL)
+	{
+		PVR_DUMPDEBUG_LOG("RGX FW State: %s%s", pszState, pszReason);
+
+		/* can't dump any more information */
+		return;
+	}
+
+	_Flags2Description(sHwrStateDescription, asHwrState2Description, IMG_ARR_NUM_ELEMS(asHwrState2Description), psRGXFWIfTraceBuf->ui32HWRStateFlags);
+	PVR_DUMPDEBUG_LOG("RGX FW State: %s%s (HWRState 0x%08x: %s)", pszState, pszReason, psRGXFWIfTraceBuf->ui32HWRStateFlags, sHwrStateDescription);
+	PVR_DUMPDEBUG_LOG("RGX FW Power State: %s (APM %s: %d ok, %d denied, %d other, %d total. Latency: %u ms)",
+	                  pszPowStateName[psRGXFWIfTraceBuf->ePowState],
+	                  (psDevInfo->pvAPMISRData)?"enabled":"disabled",
+	                  psDevInfo->ui32ActivePMReqOk,
+	                  psDevInfo->ui32ActivePMReqDenied,
+	                  psDevInfo->ui32ActivePMReqTotal - psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqDenied,
+	                  psDevInfo->ui32ActivePMReqTotal,
+			  psRuntimeCfg->ui32ActivePMLatencyms);
+
+	ui32NumClockSpeedChanges = (IMG_UINT32) OSAtomicRead(&psDevInfo->psDeviceNode->iNumClockSpeedChanges);
+	RGXGetTimeCorrData(psDevInfo->psDeviceNode, asTimeCorrs, IMG_ARR_NUM_ELEMS(asTimeCorrs));
+
+	PVR_DUMPDEBUG_LOG("RGX DVFS: %u frequency changes. Current frequency: %u MHz (sampled at %llx)",
+											ui32NumClockSpeedChanges,
+											asTimeCorrs[0].ui32CoreClockSpeed / 1000000,
+											(unsigned long long) asTimeCorrs[0].ui64OSTimeStamp);
+	if(ui32NumClockSpeedChanges > 0)
+	{
+		PVR_DUMPDEBUG_LOG("          Previous frequencies: %u, %u, %u MHz (Sampled at %llx, %llx, %llx)",
+												asTimeCorrs[1].ui32CoreClockSpeed / 1000000,
+												asTimeCorrs[2].ui32CoreClockSpeed / 1000000,
+												asTimeCorrs[3].ui32CoreClockSpeed / 1000000,
+												(unsigned long long) asTimeCorrs[1].ui64OSTimeStamp,
+												(unsigned long long) asTimeCorrs[2].ui64OSTimeStamp,
+												(unsigned long long) asTimeCorrs[3].ui64OSTimeStamp);
+	}
+
+	for (ui32OSid = 0; ui32OSid < RGXFW_NUM_OS; ui32OSid++)
+	{
+		IMG_UINT32 ui32OSStateFlags = psRGXFWIfTraceBuf->ui32OSStateFlags[ui32OSid];
+
+		PVR_DUMPDEBUG_LOG("RGX FW OS %u State: 0x%08x (Active: %s%s, Freelists: %s, Grow Request Pending: %s)", ui32OSid, ui32OSStateFlags,
+						   ((ui32OSStateFlags & RGXFW_OS_STATE_ACTIVE_OS) != 0)?"Yes":"No",
+						   ((ui32OSStateFlags & RGXFW_OS_STATE_OFFLOADING) != 0)?"- offloading":"",
+						   ((ui32OSStateFlags & RGXFW_OS_STATE_FREELIST_OK) != 0)?"Ok":"Not Ok",
+						   ((ui32OSStateFlags & RGXFW_OS_STATE_GROW_REQUEST_PENDING) != 0)?"Yes":"No"
+						   );
+	}
+	_RGXDumpFWAssert(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBuf);
+
+	_RGXDumpFWPoll(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBuf);
+
+	_RGXDumpFWHWRInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBuf, psDevInfo);
+}
+
+static void _RGXDumpMetaSPExtraDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+						void *pvDumpDebugFile,
+						PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+/* List of extra META Slave Port debug registers */
+#define RGX_META_SP_EXTRA_DEBUG \
+			X(RGX_CR_META_SP_MSLVCTRL0) \
+			X(RGX_CR_META_SP_MSLVCTRL1) \
+			X(RGX_CR_META_SP_MSLVDATAX) \
+			X(RGX_CR_META_SP_MSLVIRQSTATUS) \
+			X(RGX_CR_META_SP_MSLVIRQENABLE) \
+			X(RGX_CR_META_SP_MSLVIRQLEVEL)
+
+	IMG_UINT32 ui32Idx, ui32RegIdx;
+	IMG_UINT32 ui32RegVal;
+	IMG_UINT32 ui32RegAddr;
+
+	const IMG_UINT32 aui32DebugRegAddr [] = {
+#define X(A) A,
+		RGX_META_SP_EXTRA_DEBUG
+#undef X
+		};
+
+	const IMG_CHAR* apszDebugRegName [] = {
+#define X(A) #A,
+	RGX_META_SP_EXTRA_DEBUG
+#undef X
+	};
+
+	const IMG_UINT32 aui32Debug2RegAddr [] = {0xA28, 0x0A30, 0x0A38};
+
+	PVR_DUMPDEBUG_LOG("META Slave Port extra debug:");
+
+	/* dump first set of Slave Port debug registers */
+	for (ui32Idx = 0; ui32Idx < sizeof(aui32DebugRegAddr)/sizeof(IMG_UINT32); ui32Idx++)
+	{
+		const IMG_CHAR* pszRegName = apszDebugRegName[ui32Idx];
+
+		ui32RegAddr = aui32DebugRegAddr[ui32Idx];
+		ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr);
+		PVR_DUMPDEBUG_LOG("  * %s: 0x%8.8X", pszRegName, ui32RegVal);
+	}
+
+	/* dump second set of Slave Port debug registers */
+	for (ui32Idx = 0; ui32Idx < 4; ui32Idx++)
+	{
+		OSWriteHWReg32(psDevInfo->pvRegsBaseKM, 0xA20, ui32Idx);
+		ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, 0xA20);
+		PVR_DUMPDEBUG_LOG("  * 0xA20[%d]: 0x%8.8X", ui32Idx, ui32RegVal);
+
+	}
+
+	for (ui32RegIdx = 0; ui32RegIdx < sizeof(aui32Debug2RegAddr)/sizeof(IMG_UINT32); ui32RegIdx++)
+	{
+		ui32RegAddr = aui32Debug2RegAddr[ui32RegIdx];
+		for (ui32Idx = 0; ui32Idx < 2; ui32Idx++)
+		{
+			OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr, ui32Idx);
+			ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr);
+			PVR_DUMPDEBUG_LOG("  * 0x%X[%d]: 0x%8.8X", ui32RegAddr, ui32Idx, ui32RegVal);
+		}
+	}
+
+}
+
+void RGXDumpDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+			void *pvDumpDebugFile,
+			PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	IMG_UINT32 i;
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+	for(i=0;i<=DEBUG_REQUEST_VERBOSITY_MAX;i++)
+	{
+		RGXDebugRequestProcess(pfnDumpDebugPrintf, pvDumpDebugFile,
+					psDevInfo, i);
+	}
+}
+
+/*
+ *  Array of all the Firmware Trace log IDs used to convert the trace data.
+ */
+typedef struct _TRACEBUF_LOG_ {
+	RGXFW_LOG_SFids	 eSFId;
+	IMG_CHAR		 *pszName;
+	IMG_CHAR		 *pszFmt;
+	IMG_UINT32		 ui32ArgNum;
+} TRACEBUF_LOG;
+
+static TRACEBUF_LOG aLogDefinitions[] =
+{
+#define X(a, b, c, d, e) {RGXFW_LOG_CREATESFID(a,b,e), #c, d, e},
+	RGXFW_LOG_SFIDLIST
+#undef X
+};
+
+#define NARGS_MASK ~(0xF<<16)
+static IMG_BOOL _FirmwareTraceIntegrityCheck(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+						void *pvDumpDebugFile)
+{
+	TRACEBUF_LOG  *psLogDef    = &aLogDefinitions[0];
+	IMG_BOOL      bIntegrityOk = IMG_TRUE;
+
+	/*
+	 *  For every log ID, check the format string and number of arguments is valid.
+	 */
+	while (psLogDef->eSFId != RGXFW_SF_LAST)
+	{
+		IMG_UINT32    ui32Count;
+		IMG_CHAR      *pszString;
+		TRACEBUF_LOG  *psLogDef2;
+
+		/*
+		 * Check the number of arguments matches the number of '%' in the string and
+		 * check that no string uses %s which is not supported as it requires a
+		 * pointer to memory that is not going to be valid.
+		 */
+		pszString = psLogDef->pszFmt;
+		ui32Count = 0;
+
+		while (*pszString != '\0')
+		{
+			if (*pszString++ == '%')
+			{
+				ui32Count++;
+				if (*pszString == 's')
+				{
+					bIntegrityOk = IMG_FALSE;
+					PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has an unsupported type not recognized (fmt: %%%c). Please fix.",
+									  psLogDef->pszName, *pszString);
+				}
+				else if (*pszString == '%')
+				{
+					/* Double % is a printable % sign and not a format string... */
+					ui32Count--;
+				}
+			}
+		}
+
+		if (ui32Count != psLogDef->ui32ArgNum)
+		{
+			bIntegrityOk = IMG_FALSE;
+			PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but only %d are specified. Please fix.",
+			                  psLogDef->pszName, ui32Count, psLogDef->ui32ArgNum);
+		}
+
+		/* RGXDumpFirmwareTrace() has a hardcoded limit of supporting up to 20 arguments... */
+		if (ui32Count > 20)
+		{
+			bIntegrityOk = IMG_FALSE;
+			PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but a maximum of 20 are supported. Please fix.",
+			                  psLogDef->pszName, ui32Count);
+		}
+
+		/* Check the id number is unique (don't take into account the number of arguments) */
+		ui32Count = 0;
+		psLogDef2 = &aLogDefinitions[0];
+
+		while (psLogDef2->eSFId != RGXFW_SF_LAST)
+		{
+			if ((psLogDef->eSFId & NARGS_MASK) == (psLogDef2->eSFId & NARGS_MASK))
+			{
+				ui32Count++;
+			}
+			psLogDef2++;
+		}
+
+		if (ui32Count != 1)
+		{
+			bIntegrityOk = IMG_FALSE;
+			PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s id %x is not unique, there are %d more. Please fix.",
+			                  psLogDef->pszName, psLogDef->eSFId, ui32Count - 1);
+		}
+
+		/* Move to the next log ID... */
+		psLogDef++;
+	}
+
+	return bIntegrityOk;
+}
+
+void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile,
+				PVRSRV_RGXDEV_INFO  *psDevInfo)
+{
+	RGXFWIF_TRACEBUF  *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+	static IMG_BOOL   bIntegrityCheckPassed = IMG_FALSE;
+
+	/* Check that the firmware trace is correctly defined... */
+	if (!bIntegrityCheckPassed)
+	{
+		bIntegrityCheckPassed = _FirmwareTraceIntegrityCheck(pfnDumpDebugPrintf, pvDumpDebugFile);
+		if (!bIntegrityCheckPassed)
+		{
+			return;
+		}
+	}
+
+	/* Dump FW trace information... */
+	if (psRGXFWIfTraceBufCtl != NULL)
+	{
+		IMG_UINT32  tid;
+
+		/* Print the log type settings... */
+		if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)
+		{
+			PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")",
+							  ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")),
+							  RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType)
+							  );
+		}
+		else
+		{
+			PVR_DUMPDEBUG_LOG("Debug log type: none");
+		}
+
+		/* Print the decoded log for each thread... */
+		for (tid = 0;  tid < RGXFW_THREAD_NUM;  tid++)
+		{
+			IMG_UINT32  *pui32TraceBuf = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer;
+			IMG_UINT32  ui32TracePtr  = psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer;
+			IMG_UINT32  ui32Count     = 0;
+
+			if (pui32TraceBuf == NULL)
+			{
+				/* trace buffer not yet allocated */
+				continue;
+			}
+
+			while (ui32Count < RGXFW_TRACE_BUFFER_SIZE)
+			{
+				IMG_UINT32  ui32Data, ui32DataToId;
+
+				/* Find the first valid log ID, skipping whitespace... */
+				do
+				{
+					ui32Data     = pui32TraceBuf[ui32TracePtr];
+					ui32DataToId = idToStringID(ui32Data);
+
+					/* If an unrecognized id is found check if it is valid, if it is tracebuf needs updating. */
+					if (ui32DataToId == RGXFW_SF_LAST  &&  RGXFW_LOG_VALIDID(ui32Data))
+					{
+						PVR_DUMPDEBUG_LOG("ERROR: Unrecognized id (%x). From here on the trace might be wrong!", ui32Data);
+						return;
+					}
+
+					/* Update the trace pointer... */
+					ui32TracePtr = (ui32TracePtr + 1) % RGXFW_TRACE_BUFFER_SIZE;
+					ui32Count++;
+				} while ((RGXFW_SF_LAST == ui32DataToId  ||  ui32DataToId >= RGXFW_SF_FIRST)  &&
+				         ui32Count < RGXFW_TRACE_BUFFER_SIZE);
+
+				if (ui32Count < RGXFW_TRACE_BUFFER_SIZE)
+				{
+					IMG_CHAR    szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN] = "%llu:T%u-%s> ";
+					IMG_UINT64  ui64Timestamp;
+					IMG_UINT    uiLen;
+
+					/* If we hit the ASSERT message then this is the end of the log... */
+					if (ui32Data == RGXFW_SF_MAIN_ASSERT_FAILED)
+					{
+						PVR_DUMPDEBUG_LOG("ASSERTION %s failed at %s:%u",
+										  psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szInfo,
+										  psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szPath,
+										  psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.ui32LineNum);
+						break;
+					}
+
+					/*
+					 *  Print the trace string and provide up to 20 arguments which
+					 *  printf function will be able to use. We have already checked
+					 *  that no string uses more than this.
+					 */
+					OSStringCopy(&szBuffer[OSStringLength(szBuffer)], SFs[ui32DataToId].name);
+					uiLen = OSStringLength(szBuffer);
+					szBuffer[uiLen ? uiLen - 1 : 0] = '\0';
+					ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr +  0) % RGXFW_TRACE_BUFFER_SIZE]) << 32 |
+					               (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr +  1) % RGXFW_TRACE_BUFFER_SIZE]);
+					PVR_DUMPDEBUG_LOG(szBuffer, ui64Timestamp, tid, groups[RGXFW_SF_GID(ui32Data)],
+									  pui32TraceBuf[(ui32TracePtr +  2) % RGXFW_TRACE_BUFFER_SIZE],
+									  pui32TraceBuf[(ui32TracePtr +  3) % RGXFW_TRACE_BUFFER_SIZE],
+									  pui32TraceBuf[(ui32TracePtr +  4) % RGXFW_TRACE_BUFFER_SIZE],
+									  pui32TraceBuf[(ui32TracePtr +  5) % RGXFW_TRACE_BUFFER_SIZE],
+									  pui32TraceBuf[(ui32TracePtr +  6) % RGXFW_TRACE_BUFFER_SIZE],
+									  pui32TraceBuf[(ui32TracePtr +  7) % RGXFW_TRACE_BUFFER_SIZE],
+									  pui32TraceBuf[(ui32TracePtr +  8) % RGXFW_TRACE_BUFFER_SIZE],
+									  pui32TraceBuf[(ui32TracePtr +  9) % RGXFW_TRACE_BUFFER_SIZE],
+									  pui32TraceBuf[(ui32TracePtr + 10) % RGXFW_TRACE_BUFFER_SIZE],
+									  pui32TraceBuf[(ui32TracePtr + 11) % RGXFW_TRACE_BUFFER_SIZE],
+									  pui32TraceBuf[(ui32TracePtr + 12) % RGXFW_TRACE_BUFFER_SIZE],
+									  pui32TraceBuf[(ui32TracePtr + 13) % RGXFW_TRACE_BUFFER_SIZE],
+									  pui32TraceBuf[(ui32TracePtr + 14) % RGXFW_TRACE_BUFFER_SIZE],
+									  pui32TraceBuf[(ui32TracePtr + 15) % RGXFW_TRACE_BUFFER_SIZE],
+									  pui32TraceBuf[(ui32TracePtr + 16) % RGXFW_TRACE_BUFFER_SIZE],
+									  pui32TraceBuf[(ui32TracePtr + 17) % RGXFW_TRACE_BUFFER_SIZE],
+									  pui32TraceBuf[(ui32TracePtr + 18) % RGXFW_TRACE_BUFFER_SIZE],
+									  pui32TraceBuf[(ui32TracePtr + 19) % RGXFW_TRACE_BUFFER_SIZE],
+									  pui32TraceBuf[(ui32TracePtr + 20) % RGXFW_TRACE_BUFFER_SIZE],
+									  pui32TraceBuf[(ui32TracePtr + 21) % RGXFW_TRACE_BUFFER_SIZE]);
+
+					/* Update the trace pointer... */
+					ui32TracePtr = (ui32TracePtr + 2 + RGXFW_SF_PARAMNUM(ui32Data)) % RGXFW_TRACE_BUFFER_SIZE;
+					ui32Count    = (ui32Count    + 2 + RGXFW_SF_PARAMNUM(ui32Data));
+				}
+			}
+		}
+	}
+}
+
+static const IMG_CHAR *_RGXGetDebugDevStateString(PVRSRV_DEVICE_STATE eDevState)
+{
+	switch (eDevState)
+	{
+		case PVRSRV_DEVICE_STATE_INIT:
+			return "Initialising";
+		case PVRSRV_DEVICE_STATE_ACTIVE:
+			return "Active";
+		case PVRSRV_DEVICE_STATE_DEINIT:
+			return "De-initialising";
+		case PVRSRV_DEVICE_STATE_UNDEFINED:
+			PVR_ASSERT(!"Device has undefined state");
+			/* fallthrough */
+		default:
+			return "Unknown";
+	}
+}
+
+static IMG_CHAR* _RGXGetDebugDevPowerStateString(PVRSRV_DEV_POWER_STATE ePowerState)
+{
+	switch(ePowerState)
+	{
+		case PVRSRV_DEV_POWER_STATE_DEFAULT: return "DEFAULT";
+		case PVRSRV_DEV_POWER_STATE_OFF: return "OFF";
+		case PVRSRV_DEV_POWER_STATE_ON: return "ON";
+		default: return "UNKNOWN";
+	}
+}
+
+void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile,
+				PVRSRV_RGXDEV_INFO *psDevInfo,
+				IMG_UINT32 ui32VerbLevel)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+	eError = PVRSRVPowerLock(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,	"RGXDebugRequestProcess : failed to acquire lock, error:0x%x", eError));
+		return;
+	}
+
+	switch (ui32VerbLevel)
+	{
+		case DEBUG_REQUEST_VERBOSITY_LOW :
+		{
+			PVRSRV_DEV_POWER_STATE  ePowerState;
+			IMG_BOOL                bRGXPoweredON;
+			IMG_CHAR 				*Bit32 = "32 Bit", *Bit64 = "64 Bit";
+
+			eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "RGXDebugRequestProcess: Error retrieving RGX power state. No debug info dumped."));
+				goto Exit;
+			}
+
+			bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON);
+			if(psPVRSRVData->sDriverInfo.bIsNoMatch)
+			{
+				PVR_DUMPDEBUG_LOG("------[ Driver Info ]------");
+				PVR_DUMP_DRIVER_INFO("UM", psPVRSRVData->sDriverInfo.sUMBuildInfo);
+				PVR_DUMP_DRIVER_INFO("KM", psPVRSRVData->sDriverInfo.sKMBuildInfo);
+			}
+
+			PVR_DUMPDEBUG_LOG("KM Arch: %s", (psPVRSRVData->sDriverInfo.ui8KMBitArch & BUILD_ARCH_64BIT)?
+									Bit64 : Bit32);
+
+			if(psPVRSRVData->sDriverInfo.ui8UMSupportedArch)
+			{
+				if((psPVRSRVData->sDriverInfo.ui8UMSupportedArch & BUILD_ARCH_BOTH) ==
+						BUILD_ARCH_BOTH)
+				{
+					PVR_DUMPDEBUG_LOG("UM Connected Clients Arch: %s and %s", Bit64, Bit32);
+
+				}else
+				{
+					PVR_DUMPDEBUG_LOG("UM Connected Clients: %s",(psPVRSRVData->sDriverInfo.ui8UMSupportedArch &
+							BUILD_ARCH_64BIT)? Bit64 : Bit32);
+				}
+			}
+
+			PVR_DUMPDEBUG_LOG("------[ RGX summary ]------");
+			PVR_DUMPDEBUG_LOG("RGX BVNC: %d.%d.%d.%d", psDevInfo->sDevFeatureCfg.ui32B, \
+													   psDevInfo->sDevFeatureCfg.ui32V,	\
+													   psDevInfo->sDevFeatureCfg.ui32N, \
+													   psDevInfo->sDevFeatureCfg.ui32C);
+			PVR_DUMPDEBUG_LOG("RGX Device State: %s", _RGXGetDebugDevStateString(psDeviceNode->eDevState));
+			PVR_DUMPDEBUG_LOG("RGX Power State: %s", _RGXGetDebugDevPowerStateString(ePowerState));
+
+			_RGXDumpRGXDebugSummary(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, bRGXPoweredON);
+
+			if (bRGXPoweredON)
+			{
+
+				PVR_DUMPDEBUG_LOG("------[ RGX registers ]------");
+				PVR_DUMPDEBUG_LOG("RGX Register Base Address (Linear):   0x%p", psDevInfo->pvRegsBaseKM);
+				PVR_DUMPDEBUG_LOG("RGX Register Base Address (Physical): 0x%08lX", (unsigned long)psDevInfo->sRegsPhysBase.uiAddr);
+
+				if(psDevInfo->sDevFeatureCfg.ui32META)
+				{
+					/* Forcing bit 6 of MslvCtrl1 to 0 to avoid internal reg read going through the core */
+					OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1, 0x0);
+				}
+
+				eError = RGXRunScript(psDevInfo, psDevInfo->psScripts->asDbgCommands, RGX_MAX_DEBUG_COMMANDS, PDUMP_FLAGS_CONTINUOUS, pfnDumpDebugPrintf, pvDumpDebugFile);
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR,"RGXDebugRequestProcess: RGXRunScript failed (%d)", eError));
+					if(psDevInfo->sDevFeatureCfg.ui32META)
+					{
+						PVR_DPF((PVR_DBG_ERROR,"Dump Slave Port debug information"));
+						_RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+					}
+				}
+#if !defined(NO_HARDWARE)
+				if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+				{
+					RGX_MIPS_STATE sMIPSState;
+					PVRSRV_ERROR eError;
+					OSCachedMemSet((void *)&sMIPSState, 0x00, sizeof(RGX_MIPS_STATE));
+					eError = _RGXMipsExtraDebug(psDevInfo, psDeviceNode->psDevConfig, &sMIPSState);
+					PVR_DUMPDEBUG_LOG("---- [ MIPS internal state ] ----");
+					if (eError != PVRSRV_OK)
+					{
+						PVR_DUMPDEBUG_LOG("MIPS extra debug not available");
+					}
+					else
+					{
+						PVR_DUMPDEBUG_LOG("PC                      :0x%08X", sMIPSState.ui32ErrorEPC);
+						PVR_DUMPDEBUG_LOG("STATUS_REGISTER         :0x%08X", sMIPSState.ui32StatusRegister);
+						PVR_DUMPDEBUG_LOG("CAUSE_REGISTER          :0x%08X", sMIPSState.ui32CauseRegister);
+						_RGXMipsDumpCauseDecode(pfnDumpDebugPrintf, pvDumpDebugFile, sMIPSState.ui32CauseRegister);
+						PVR_DUMPDEBUG_LOG("BAD_REGISTER            :0x%08X", sMIPSState.ui32BadRegister);
+						PVR_DUMPDEBUG_LOG("EPC                     :0x%08X", sMIPSState.ui32EPC);
+						PVR_DUMPDEBUG_LOG("SP                      :0x%08X", sMIPSState.ui32SP);
+						PVR_DUMPDEBUG_LOG("BAD_INSTRUCTION         :0x%08X", sMIPSState.ui32BadInstr);
+						PVR_DUMPDEBUG_LOG("DEBUG                   :");
+						_RGXMipsDumpDebugDecode(pfnDumpDebugPrintf, pvDumpDebugFile, sMIPSState.ui32Debug, sMIPSState.ui32DEPC);
+
+						{
+							IMG_UINT32 ui32Idx;
+
+							PVR_DUMPDEBUG_LOG("TLB                     :");
+							for (ui32Idx = 0;
+								 ui32Idx < IMG_ARR_NUM_ELEMS(sMIPSState.asTLB);
+								 ++ui32Idx)
+							{
+								_RGXMipsDumpTLBEntry(pfnDumpDebugPrintf, pvDumpDebugFile, &sMIPSState.asTLB[ui32Idx], ui32Idx);
+							}
+						}
+					}
+					PVR_DUMPDEBUG_LOG("--------------------------------");
+				}
+#endif
+			}
+			else
+			{
+				PVR_DUMPDEBUG_LOG(" (!) RGX power is down. No registers dumped");
+			}
+
+			/* Dump out the kernel CCB. */
+			{
+				RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+
+				if (psKCCBCtl != NULL)
+				{
+					PVR_DUMPDEBUG_LOG("RGX Kernel CCB WO:0x%X RO:0x%X",
+					                  psKCCBCtl->ui32WriteOffset,
+					                  psKCCBCtl->ui32ReadOffset);
+				}
+			}
+
+			/* Dump out the firmware CCB. */
+			{
+				RGXFWIF_CCB_CTL *psFCCBCtl = psDevInfo->psFirmwareCCBCtl;
+
+				if (psFCCBCtl != NULL)
+				{
+					PVR_DUMPDEBUG_LOG("RGX Firmware CCB WO:0x%X RO:0x%X",
+					                   psFCCBCtl->ui32WriteOffset,
+					                   psFCCBCtl->ui32ReadOffset);
+				}
+			}
+
+			/* Dump the KCCB commands executed */
+			{
+				PVR_DUMPDEBUG_LOG("RGX Kernel CCB commands executed = %d",
+				                  psDevInfo->psRGXFWIfTraceBuf->ui32KCCBCmdsExecuted);
+			}
+
+			/* Dump the IRQ info for threads*/
+			{
+				IMG_UINT32 ui32TID;
+
+				for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+				{
+					PVR_DUMPDEBUG_LOG("RGX FW thread %u: FW IRQ count = %u, Last sampled IRQ count in LISR = %u",
+									  ui32TID,
+									  psDevInfo->psRGXFWIfTraceBuf->aui32InterruptCount[ui32TID],
+									  psDevInfo->aui32SampleIRQCount[ui32TID]);
+				}
+			}
+
+			/* Dump the FW config flags */
+			{
+				RGXFWIF_OS_CONFIG   *psOSConfig = psDevInfo->psFWIfOSConfig;
+				IMG_CHAR sFwFlagsDescription[MAX_FW_DESCRIPTION_LENGTH] = "";
+
+				if (!psOSConfig)
+				{
+					PVR_DPF((PVR_DBG_ERROR,"RGXDebugRequestProcess: OS Config is not mapped into CPU space"));
+					goto Exit;
+				}
+
+				_GetFwFlagsDescription(sFwFlagsDescription, psOSConfig->ui32ConfigFlags);
+				PVR_DUMPDEBUG_LOG("FW OS config flags = 0x%X (%s)", psOSConfig->ui32ConfigFlags, sFwFlagsDescription);
+			}
+
+			break;
+
+		}
+		case DEBUG_REQUEST_VERBOSITY_MEDIUM :
+		{
+			IMG_INT tid;
+			/* Dump FW trace information */
+			if (psDevInfo->psRGXFWIfTraceBuf != NULL)
+			{
+				RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+				for (tid = 0 ; tid < RGXFW_THREAD_NUM ; tid++)
+				{
+					IMG_UINT32	i;
+					IMG_BOOL	bPrevLineWasZero = IMG_FALSE;
+					IMG_BOOL	bLineIsAllZeros = IMG_FALSE;
+					IMG_UINT32	ui32CountLines = 0;
+					IMG_UINT32	*pui32TraceBuffer;
+					IMG_CHAR	*pszLine;
+
+					if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)
+					{
+						PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")",
+						                  ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")),
+						                  RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType)
+						                  );
+					}
+					else
+					{
+						PVR_DUMPDEBUG_LOG("Debug log type: none");
+					}
+
+					pui32TraceBuffer = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer;
+
+					/* Skip if trace buffer is not allocated */
+					if (pui32TraceBuffer == NULL)
+					{
+						PVR_DUMPDEBUG_LOG("RGX FW thread %d: Trace buffer not yet allocated",tid);
+						continue;
+					}
+
+					/* each element in the line is 8 characters plus a space.  The '+1' is because of the final trailing '\0'. */
+					pszLine = OSAllocMem(9*RGXFW_TRACE_BUFFER_LINESIZE+1);
+					if (pszLine == NULL)
+					{
+						PVR_DPF((PVR_DBG_ERROR,"RGXDebugRequestProcess: Out of mem allocating line string (size: %d)", 9*RGXFW_TRACE_BUFFER_LINESIZE));
+						goto Exit;
+					}
+
+					PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace START ]------", tid);
+					PVR_DUMPDEBUG_LOG("FWT[traceptr]: %X", psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer);
+					PVR_DUMPDEBUG_LOG("FWT[tracebufsize]: %X", RGXFW_TRACE_BUFFER_SIZE);
+
+					for (i = 0; i < RGXFW_TRACE_BUFFER_SIZE; i += RGXFW_TRACE_BUFFER_LINESIZE)
+					{
+						IMG_UINT32 k = 0;
+						IMG_UINT32 ui32Line = 0x0;
+						IMG_UINT32 ui32LineOffset = i*sizeof(IMG_UINT32);
+						IMG_CHAR   *pszBuf = pszLine;
+
+						for (k = 0; k < RGXFW_TRACE_BUFFER_LINESIZE; k++)
+						{
+							ui32Line |= pui32TraceBuffer[i + k];
+
+							/* prepare the line to print it. The '+1' is because of the trailing '\0' added */
+							OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32TraceBuffer[i + k]);
+							pszBuf += 9; /* write over the '\0' */
+						}
+
+						bLineIsAllZeros = (ui32Line == 0x0);
+
+						if (bLineIsAllZeros)
+						{
+							if (bPrevLineWasZero)
+							{
+								ui32CountLines++;
+							}
+							else
+							{
+								bPrevLineWasZero = IMG_TRUE;
+								ui32CountLines = 1;
+								PVR_DUMPDEBUG_LOG("FWT[%08x]: 00000000 ... 00000000", ui32LineOffset);
+							}
+						}
+						else
+						{
+							if (bPrevLineWasZero  &&  ui32CountLines > 1)
+							{
+								PVR_DUMPDEBUG_LOG("FWT[...]: %d lines were all zero", ui32CountLines);
+							}
+							bPrevLineWasZero = IMG_FALSE;
+
+							PVR_DUMPDEBUG_LOG("FWT[%08x]:%s", ui32LineOffset, pszLine);
+						}
+
+					}
+					if (bPrevLineWasZero)
+					{
+						PVR_DUMPDEBUG_LOG("FWT[END]: %d lines were all zero", ui32CountLines);
+					}
+
+					PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace END ]------", tid);
+
+					OSFreeMem(pszLine);
+				}
+
+				if(psDevInfo->sDevFeatureCfg.ui32META)
+				{
+					RGXFWIF_OS_CONFIG *psOSConfig = psDevInfo->psFWIfOSConfig;
+
+					if (!psOSConfig)
+					{
+						PVR_DPF((PVR_DBG_ERROR,"RGXDebugRequestProcess: OS Config is not mapped into CPU space"));
+						goto Exit;
+					}
+
+					if ((psOSConfig->ui32ConfigFlags & RGXFWIF_INICFG_METAT1_DUMMY) != 0)
+					{
+						RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+						IMG_UINT32 *pui32T1PCX = &psRGXFWIfTraceBufCtl->ui32T1PCX[0];
+						IMG_UINT32 ui32T1PCXWOff = psRGXFWIfTraceBufCtl->ui32T1PCXWOff;
+						IMG_UINT32 i = ui32T1PCXWOff;
+
+						PVR_DUMPDEBUG_LOG("------[ FW Thread 1 PCX list (most recent first) ]------");
+						do
+						{
+							PVR_DUMPDEBUG_LOG("  0x%08x", pui32T1PCX[i]);
+							i = (i == 0) ? (RGXFWIF_MAX_PCX - 1) : (i - 1);
+
+						} while (i != ui32T1PCXWOff);
+
+						PVR_DUMPDEBUG_LOG("------[ FW Thread 1 PCX list [END] ]------");
+					}
+
+				}
+			}
+
+			{
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) || defined(PVRSRV_ENABLE_FULL_CCB_DUMP)
+				PVR_DUMPDEBUG_LOG("------[ Full CCB Status ]------");
+#else
+				PVR_DUMPDEBUG_LOG("------[ Stalled FWCtxs ]------");
+#endif
+				CheckForStalledTransferCtxt(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile);
+				CheckForStalledRenderCtxt(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile);
+				if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK)
+				{
+					CheckForStalledComputeCtxt(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile);
+				}
+				if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_FASTRENDER_DM_BIT_MASK)
+				{
+					CheckForStalledTDMTransferCtxt(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile);
+				}
+				if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+				{
+					CheckForStalledRayCtxt(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile);
+				}
+			}
+			break;
+		}
+		case DEBUG_REQUEST_VERBOSITY_HIGH:
+		{
+			PVRSRV_ERROR            eError;
+			PVRSRV_DEV_POWER_STATE  ePowerState;
+			IMG_BOOL                bRGXPoweredON;
+
+			eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "RGXDebugRequestProcess: Error retrieving RGX power state. No debug info dumped."));
+				return;
+			}
+
+			bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON);
+
+			PVR_DUMPDEBUG_LOG("------[ Debug summary ]------");
+
+			_RGXDumpRGXDebugSummary(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, bRGXPoweredON);
+		}
+		default:
+			break;
+	}
+
+Exit:
+	PVRSRVPowerUnlock(psDeviceNode);
+}
+
+/*
+	RGXPanic
+*/
+void RGXPanic(PVRSRV_RGXDEV_INFO	*psDevInfo)
+{
+	PVR_LOG(("RGX panic"));
+	PVRSRVDebugRequest(psDevInfo->psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX,
+					   NULL, NULL);
+	OSPanic();
+}
+
+
+/******************************************************************************
+ End of file (rgxdebug.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxdebug.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxdebug.h
new file mode 100644
index 0000000..cdfd984
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxdebug.h
@@ -0,0 +1,204 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX debug header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX debugging functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXDEBUG_H__)
+#define __RGXDEBUG_H__
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "device.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "rgxdevice.h"
+
+
+/**
+ * Debug utility macro for printing FW IRQ count and Last sampled IRQ count in 
+ * LISR for each RGX FW thread.
+ * Macro takes pointer to PVRSRV_RGXDEV_INFO as input.
+ */
+#define RGXDEBUG_PRINT_IRQ_COUNT(psRgxDevInfo) \
+	do \
+	{ \
+		IMG_UINT32 ui32TID; \
+		for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) \
+		{ \
+			PVR_DPF((DBGPRIV_VERBOSE, \
+					"RGX FW thread %u: FW IRQ count = %u, Last sampled IRQ count in LISR = %u)", \
+					ui32TID, \
+					(psRgxDevInfo)->psRGXFWIfTraceBuf->aui32InterruptCount[ui32TID], \
+					(psRgxDevInfo)->aui32SampleIRQCount[ui32TID])); \
+		} \
+	} while(0)
+
+/*!
+*******************************************************************************
+
+ @Function	RGXPanic
+
+ @Description
+
+ Called when an unrecoverable situation is detected. Dumps RGX debug
+ information and tells the OS to panic.
+
+ @Input psDevInfo - RGX device info
+
+ @Return void
+
+******************************************************************************/
+void RGXPanic(PVRSRV_RGXDEV_INFO	*psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDumpDebugInfo
+
+ @Description
+
+ Dump useful debugging info. Dumps lesser information than PVRSRVDebugRequest.
+ Does not dump debugging information for all requester types.(SysDebug, ServerSync info)
+
+ @Input pfnDumpDebugPrintf  - Optional replacement print function
+ @Input pvDumpDebugFile     - Optional file identifier to be passed to the
+                              'printf' function if required
+ @Input psDevInfo           - RGX device info
+
+ @Return   void
+
+******************************************************************************/
+void RGXDumpDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+			void *pvDumpDebugFile,
+			PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDebugRequestProcess
+
+ @Description
+
+ This function will print out the debug for the specificed level of
+ verbosity
+
+ @Input pfnDumpDebugPrintf  - Optional replacement print function
+ @Input pvDumpDebugFile     - Optional file identifier to be passed to the
+                              'printf' function if required
+ @Input psDevInfo           - RGX device info
+ @Input ui32VerbLevel       - Verbosity level
+
+ @Return   void
+
+******************************************************************************/
+void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile,
+				PVRSRV_RGXDEV_INFO *psDevInfo,
+				IMG_UINT32 ui32VerbLevel);
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDumpFirmwareTrace
+
+ @Description Dumps the decoded version of the firmware trace buffer.
+
+ Dump useful debugging info
+
+ @Input pfnDumpDebugPrintf  - Optional replacement print function
+ @Input pvDumpDebugFile     - Optional file identifier to be passed to the
+                              'printf' function if required
+ @Input psDevInfo           - RGX device info
+
+ @Return   void
+
+******************************************************************************/
+void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile,
+				PVRSRV_RGXDEV_INFO  *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function	RGXReadWithSP
+
+ @Description
+
+ Reads data from a memory location (FW memory map) using the META Slave Port
+
+ @Input  ui32FWAddr - 32 bit FW address
+
+ @Return IMG_UINT32
+******************************************************************************/
+IMG_UINT32 RGXReadWithSP(IMG_UINT32 ui32FWAddr);
+
+/*!
+*******************************************************************************
+
+ @Function	RGXWriteWithSP
+
+ @Description
+
+ Writes data to a memory location (FW memory map) using the META Slave Port
+
+ @Input  ui32FWAddr - 32 bit FW address
+
+ @Input  ui32Value  - 32 bit Value to write
+******************************************************************************/
+void RGXWriteWithSP(IMG_UINT32 ui32FWAddr, IMG_UINT32 ui32Value);
+
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+/*!
+*******************************************************************************
+
+ @Function     ValidateFWImageWithSP
+
+ @Description  Compare the Firmware image as seen from the CPU point of view
+               against the same memory area as seen from the META point of view
+
+ @Input        psDevInfo - Device Info
+
+ @Return       PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR ValidateFWImageWithSP(PVRSRV_RGXDEV_INFO *psDevInfo);
+#endif /* defined(SUPPORT_EXTRA_METASP_DEBUG) */
+
+#endif /* __RGXDEBUG_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxdevice.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxdevice.h
new file mode 100644
index 0000000..0b60ef4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxdevice.h
@@ -0,0 +1,563 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX device node header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX device node
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXDEVICE_H__)
+#define __RGXDEVICE_H__
+
+#include "img_types.h"
+#include "pvrsrv_device_types.h"
+#include "mmu_common.h"
+#include "rgx_fwif_km.h"
+#include "rgx_fwif.h"
+#include "rgxscript.h"
+#include "cache_ops.h"
+#include "device.h"
+#include "osfunc.h"
+#include "rgxlayer_impl.h"
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "hash.h"
+#endif
+typedef struct _RGX_SERVER_COMMON_CONTEXT_ RGX_SERVER_COMMON_CONTEXT;
+
+typedef struct {
+	DEVMEM_MEMDESC		*psFWFrameworkMemDesc;
+	IMG_DEV_VIRTADDR	*psResumeSignalAddr;
+} RGX_COMMON_CONTEXT_INFO;
+
+
+/*!
+ ******************************************************************************
+ * Device state flags
+ *****************************************************************************/
+#define RGXKM_DEVICE_STATE_ZERO_FREELIST			(0x1 << 0)		/*!< Zeroing the physical pages of reconstructed free lists */
+#define RGXKM_DEVICE_STATE_FTRACE_EN				(0x1 << 1)		/*!< Used to enable device FTrace thread to consume HWPerf data */
+#define RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN 	(0x1 << 2)		/*!< Used to disable the Devices Watchdog logging */
+#define RGXKM_DEVICE_STATE_DUST_REQUEST_INJECT_EN	(0x1 << 3)		/*!< Used for validation to inject dust requests every TA/3D kick */
+
+/*!
+ ******************************************************************************
+ * GPU DVFS Table
+ *****************************************************************************/
+
+#define RGX_GPU_DVFS_TABLE_SIZE            100                      /* DVFS Table size */
+#define RGX_GPU_DVFS_GET_INDEX(clockfreq)  ((clockfreq) / 10000000) /* Assuming different GPU clocks are separated by at least 10MHz
+                                                                     * WARNING: this macro must be used only with nominal values of
+                                                                     * the GPU clock speed (the ones provided by the customer code) */
+#define RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US       25000          /* Time required to calibrate a clock frequency the first time */
+#define RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US  150000         /* Time required for a recalibration after a DVFS transition */
+#define RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US    10000000       /* Time before the next periodic calibration and correlation */
+
+typedef struct _RGX_GPU_DVFS_TABLE_
+{
+	IMG_UINT64 ui64CalibrationCRTimestamp;              /*!< CR timestamp used to calibrate GPU frequencies (beginning of a calibration period) */
+	IMG_UINT64 ui64CalibrationOSTimestamp;              /*!< OS timestamp used to calibrate GPU frequencies (beginning of a calibration period) */
+	IMG_UINT64 ui64CalibrationCRTimediff;               /*!< CR timediff used to calibrate GPU frequencies (calibration period) */
+	IMG_UINT64 ui64CalibrationOSTimediff;               /*!< OS timediff used to calibrate GPU frequencies (calibration period) */
+	IMG_UINT32 ui32CalibrationPeriod;                   /*!< Threshold used to determine whether the current GPU frequency should be calibrated */
+	IMG_UINT32 ui32CurrentDVFSId;                       /*!< Current table entry index */
+	IMG_BOOL   bAccumulatePeriod;                       /*!< Accumulate many consecutive periods to get a better calibration at the end */
+	IMG_UINT32 aui32DVFSClock[RGX_GPU_DVFS_TABLE_SIZE]; /*!< DVFS clocks table (clocks in Hz) */
+} RGX_GPU_DVFS_TABLE;
+
+
+/*!
+ ******************************************************************************
+ * GPU utilisation statistics
+ *****************************************************************************/
+
+typedef struct _RGXFWIF_GPU_UTIL_STATS_
+{
+	IMG_BOOL   bValid;                /* If TRUE, statistics are valid.
+	                                     FALSE if the driver couldn't get reliable stats. */
+	IMG_UINT64 ui64GpuStatActiveHigh; /* GPU active high statistic */
+	IMG_UINT64 ui64GpuStatActiveLow;  /* GPU active low (i.e. TLA active only) statistic */
+	IMG_UINT64 ui64GpuStatBlocked;    /* GPU blocked statistic */
+	IMG_UINT64 ui64GpuStatIdle;       /* GPU idle statistic */
+	IMG_UINT64 ui64GpuStatCumulative; /* Sum of active/blocked/idle stats */
+} RGXFWIF_GPU_UTIL_STATS;
+
+
+typedef struct _RGX_REG_CONFIG_
+{
+	IMG_BOOL               bEnabled;
+	RGXFWIF_REG_CFG_TYPE   eRegCfgTypeToPush;
+	IMG_UINT32             ui32NumRegRecords;
+} RGX_REG_CONFIG;
+
+typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC;
+
+typedef struct
+{
+	IMG_UINT32			ui32DustCount1;
+	IMG_UINT32			ui32DustCount2;
+	IMG_BOOL			bToggle;
+} RGX_DUST_STATE;
+
+typedef struct _PVRSRV_DEVICE_FEATURE_CONFIG_
+{
+	IMG_UINT64 ui64ErnsBrns;
+	IMG_UINT64 ui64Features;
+	IMG_UINT32 ui32B;
+	IMG_UINT32 ui32V;
+	IMG_UINT32 ui32N;
+	IMG_UINT32 ui32C;
+	IMG_UINT32 ui32NumClusters;
+	IMG_UINT32 ui32CtrlStreamFormat;
+	IMG_UINT32 ui32FBCDCArch;
+	IMG_UINT32 ui32META;
+	IMG_UINT32 ui32MCMB;
+	IMG_UINT32 ui32MCMS;
+	IMG_UINT32 ui32MDMACount;
+	IMG_UINT32 ui32NIIP;
+	IMG_UINT32 ui32PBW;
+	IMG_UINT32 ui32STEArch;
+	IMG_UINT32 ui32SVCE;
+	IMG_UINT32 ui32SLCBanks;
+	IMG_UINT32 ui32CacheLineSize;
+	IMG_UINT32 ui32SLCSize;
+	IMG_UINT32 ui32VASB;
+	IMG_UINT32 ui32MAXDMCount;
+	IMG_UINT32 ui32MAXDMMTSCount;
+	IMG_UINT32 ui32MAXDustCount;
+#define 	MAX_BVNC_STRING_LEN		(50)
+	IMG_PCHAR  pszBVNCString;
+}PVRSRV_DEVICE_FEATURE_CONFIG;
+
+/* there is a corresponding define in rgxapi.h */
+#define RGX_MAX_TIMER_QUERIES 16
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+/* For the workload estimation return data array */
+/* The max amount of commands the MTS can have is 255, therefore 512 (LOG2 = 9)
+ * is large enough to account for all corner cases.
+ */
+#define RETURN_DATA_ARRAY_SIZE_LOG2 (9)
+#define RETURN_DATA_ARRAY_SIZE      ((1UL) << RETURN_DATA_ARRAY_SIZE_LOG2)
+#define RETURN_DATA_ARRAY_WRAP_MASK (RETURN_DATA_ARRAY_SIZE - 1)
+
+#define WORKLOAD_HASH_SIZE 64
+
+typedef struct _WORKEST_HOST_DATA_ WORKEST_HOST_DATA;
+
+typedef struct _RGX_WORKLOAD_TA3D_
+{
+	IMG_UINT32				ui32RenderTargetSize;
+	IMG_UINT32				ui32NumberOfDrawCalls;
+	IMG_UINT32				ui32NumberOfIndices;
+	IMG_UINT32				ui32NumberOfMRTs;
+} RGX_WORKLOAD_TA3D;
+
+typedef struct _WORKLOAD_MATCHING_DATA_
+{
+	HASH_TABLE                  *psWorkloadDataHash;
+	RGX_WORKLOAD_TA3D           asWorkloadHashKeys[WORKLOAD_HASH_SIZE];
+	IMG_UINT64                  aui64HashCycleData[WORKLOAD_HASH_SIZE];
+	IMG_UINT32                  ui32HashArrayWO;
+	POS_LOCK                    psWorkEstHashLock;
+} WORKLOAD_MATCHING_DATA;
+
+struct _WORKEST_HOST_DATA_{
+	WORKLOAD_MATCHING_DATA      sWorkloadMatchingDataTA;
+	WORKLOAD_MATCHING_DATA      sWorkloadMatchingData3D;
+	IMG_UINT32                  ui32WorkEstCCBReceived;
+};
+typedef struct _WORKEST_RETURN_DATA_ {
+	WORKEST_HOST_DATA           *psWorkEstHostData;
+	WORKLOAD_MATCHING_DATA      *psWorkloadMatchingData;
+	RGX_WORKLOAD_TA3D           sWorkloadCharacteristics;
+} WORKEST_RETURN_DATA;
+#endif
+
+
+typedef struct
+{
+#if defined(PDUMP)
+	IMG_HANDLE      hPdumpPages;
+#endif
+	PG_HANDLE       sPages;
+	IMG_DEV_PHYADDR sPhysAddr;
+} RGX_MIPS_ADDRESS_TRAMPOLINE;
+
+
+/*!
+ ******************************************************************************
+ * RGX Device info
+ *****************************************************************************/
+
+typedef struct _PVRSRV_RGXDEV_INFO_
+{
+	PVRSRV_DEVICE_NODE		*psDeviceNode;
+
+	PVRSRV_DEVICE_FEATURE_CONFIG	sDevFeatureCfg;
+
+	/* FIXME: This is a workaround due to having 2 inits but only 1 deinit */
+	IMG_BOOL				bDevInit2Done;
+
+	IMG_BOOL                bFirmwareInitialised;
+	IMG_BOOL				bPDPEnabled;
+
+	IMG_HANDLE				hDbgReqNotify;
+
+	/* Kernel mode linear address of device registers */
+	void					*pvRegsBaseKM;
+
+	/* FIXME: The alloc for this should go through OSAllocMem in future */
+	IMG_HANDLE				hRegMapping;
+
+	/* System physical address of device registers*/
+	IMG_CPU_PHYADDR			sRegsPhysBase;
+	/*  Register region size in bytes */
+	IMG_UINT32				ui32RegSize;
+
+	PVRSRV_STUB_PBDESC		*psStubPBDescListKM;
+
+	/* Firmware memory context info */
+	DEVMEM_CONTEXT			*psKernelDevmemCtx;
+	DEVMEM_HEAP				*psFirmwareHeap;
+	MMU_CONTEXT				*psKernelMMUCtx;
+
+	void					*pvDeviceMemoryHeap;
+
+	/* Kernel CCB */
+	DEVMEM_MEMDESC			*psKernelCCBCtlMemDesc;    /*!< memdesc for Kernel CCB control */
+	RGXFWIF_CCB_CTL			*psKernelCCBCtl;           /*!< kernel mapping for Kernel CCB control */
+	DEVMEM_MEMDESC			*psKernelCCBMemDesc;       /*!< memdesc for Kernel CCB */
+	IMG_UINT8				*psKernelCCB;              /*!< kernel mapping for Kernel CCB */
+
+	/* Firmware CCB */
+	DEVMEM_MEMDESC			*psFirmwareCCBCtlMemDesc;   /*!< memdesc for Firmware CCB control */
+	RGXFWIF_CCB_CTL			*psFirmwareCCBCtl;          /*!< kernel mapping for Firmware CCB control */
+	DEVMEM_MEMDESC			*psFirmwareCCBMemDesc;      /*!< memdesc for Firmware CCB */
+	IMG_UINT8				*psFirmwareCCB;             /*!< kernel mapping for Firmware CCB */
+
+	/* Workload Estimation Firmware CCB */
+	DEVMEM_MEMDESC			*psWorkEstFirmwareCCBCtlMemDesc;   /*!< memdesc for Workload Estimation Firmware CCB control */
+	RGXFWIF_CCB_CTL			*psWorkEstFirmwareCCBCtl;          /*!< kernel mapping for Workload Estimation Firmware CCB control */
+	DEVMEM_MEMDESC			*psWorkEstFirmwareCCBMemDesc;      /*!< memdesc for Workload Estimation Firmware CCB */
+	IMG_UINT8				*psWorkEstFirmwareCCB;             /*!< kernel mapping for Workload Estimation Firmware CCB */
+
+	IMG_BOOL				bEnableFWPoisonOnFree;             /*!< Enable poisoning of FW allocations when freed */
+	IMG_BYTE				ubFWPoisonOnFreeValue;             /*!< Byte value used when poisoning FW allocations */
+
+	/*
+		if we don't preallocate the pagetables we must
+		insert newly allocated page tables dynamically
+	*/
+	void					*pvMMUContextList;
+
+	IMG_UINT32				ui32ClkGateStatusReg;
+	IMG_UINT32				ui32ClkGateStatusMask;
+	RGX_SCRIPTS				*psScripts;
+
+	DEVMEM_MEMDESC			*psRGXFWCodeMemDesc;
+	IMG_DEV_VIRTADDR		sFWCodeDevVAddrBase;
+	DEVMEM_MEMDESC			*psRGXFWDataMemDesc;
+	IMG_DEV_VIRTADDR		sFWDataDevVAddrBase;
+	RGX_MIPS_ADDRESS_TRAMPOLINE	sTrampoline;
+
+	DEVMEM_MEMDESC			*psRGXFWCorememMemDesc;
+	IMG_DEV_VIRTADDR		sFWCorememCodeDevVAddrBase;
+	RGXFWIF_DEV_VIRTADDR	sFWCorememCodeFWAddr;
+
+#if defined(RGXFW_ALIGNCHECKS)
+	DEVMEM_MEMDESC			*psRGXFWAlignChecksMemDesc;
+#endif
+
+	DEVMEM_MEMDESC			*psRGXFWSigTAChecksMemDesc;
+	IMG_UINT32				ui32SigTAChecksSize;
+
+	DEVMEM_MEMDESC			*psRGXFWSig3DChecksMemDesc;
+	IMG_UINT32				ui32Sig3DChecksSize;
+
+	DEVMEM_MEMDESC			*psRGXFWSigRTChecksMemDesc;
+	IMG_UINT32				ui32SigRTChecksSize;
+
+	DEVMEM_MEMDESC			*psRGXFWSigSHChecksMemDesc;
+	IMG_UINT32				ui32SigSHChecksSize;
+
+#if defined (PDUMP)
+	IMG_BOOL				bDumpedKCCBCtlAlready;
+#endif
+
+	DEVMEM_MEMDESC			*psRGXFWIfTraceBufCtlMemDesc;	/*!< memdesc of trace buffer control structure */
+	DEVMEM_MEMDESC			*psRGXFWIfTraceBufferMemDesc[RGXFW_THREAD_NUM];	/*!< memdesc of actual FW trace (log) buffer(s) */
+	RGXFWIF_TRACEBUF		*psRGXFWIfTraceBuf;		/* structure containing trace control data and actual trace buffer */
+
+	DEVMEM_MEMDESC			*psRGXFWIfHWRInfoBufCtlMemDesc;
+	RGXFWIF_HWRINFOBUF		*psRGXFWIfHWRInfoBuf;
+
+	DEVMEM_MEMDESC			*psRGXFWIfGpuUtilFWCbCtlMemDesc;
+	RGXFWIF_GPU_UTIL_FWCB	*psRGXFWIfGpuUtilFWCb;
+
+	DEVMEM_MEMDESC			*psRGXFWIfHWPerfBufMemDesc;
+	IMG_BYTE				*psRGXFWIfHWPerfBuf;
+	IMG_UINT32				ui32RGXFWIfHWPerfBufSize; /* in bytes */
+
+	DEVMEM_MEMDESC			*psRGXFWIfCorememDataStoreMemDesc;
+
+	DEVMEM_MEMDESC			*psRGXFWIfRegCfgMemDesc;
+
+	DEVMEM_MEMDESC			*psRGXFWIfHWPerfCountersMemDesc;
+	DEVMEM_MEMDESC			*psRGXFWIfInitMemDesc;
+	DEVMEM_MEMDESC			*psRGXFWIfOSConfigDesc;
+	RGXFWIF_OS_CONFIG		*psFWIfOSConfig;
+	RGXFWIF_DEV_VIRTADDR	sFWInitFWAddr;
+
+	DEVMEM_MEMDESC			*psRGXFWIfRuntimeCfgMemDesc;
+	RGXFWIF_RUNTIME_CFG		*psRGXFWIfRuntimeCfg;
+
+	/* Additional guest firmware memory context info */
+	DEVMEM_HEAP				*psGuestFirmwareHeap[RGXFW_NUM_OS];
+	DEVMEM_MEMDESC			*psGuestFirmwareMemDesc[RGXFW_NUM_OS];
+
+	DEVMEM_MEMDESC			*psMETAT1StackMemDesc;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	/* Array to store data needed for workload estimation when a workload
+	 * has finished and its cycle time is returned to the host.
+	 */
+	WORKEST_RETURN_DATA		asReturnData[RETURN_DATA_ARRAY_SIZE];
+	IMG_UINT32				ui32ReturnDataWO;
+#endif
+
+#if defined (SUPPORT_PDVFS)
+	/**
+	 * Host memdesc and pointer to memory containing core clock rate in Hz.
+	 * Firmware (PDVFS) updates the memory on changing the core clock rate over
+	 * GPIO.
+	 * Note: Shared memory needs atomic access from Host driver and firmware,
+	 * hence size should not be greater than memory transaction granularity.
+	 * Currently it's is chosen to be 32 bits.
+	 */
+	DEVMEM_MEMDESC			*psRGXFWIFCoreClkRateMemDesc;
+	volatile IMG_UINT32		*pui32RGXFWIFCoreClkRate;
+	/**
+	 * Last sampled core clk rate.
+	 */
+	volatile IMG_UINT32		ui32CoreClkRateSnapshot;
+#endif
+	/*
+	   HWPerf data for the RGX device
+	 */
+
+	POS_LOCK    hHWPerfLock;  /*! Critical section lock that protects HWPerf code
+	                           *  from multiple thread duplicate init/deinit
+	                           *  and loss/freeing of FW & Host resources while in
+	                           *  use in another thread e.g. MSIR. */
+
+	IMG_UINT64  ui64HWPerfFilter; /*! Event filter for FW events (settable by AppHint) */
+	IMG_HANDLE  hHWPerfStream;    /*! TL Stream buffer (L2) for firmware event stream */
+
+	IMG_UINT32  ui32HWPerfHostFilter;      /*! Event filter for HWPerfHost stream (settable by AppHint) */
+	POS_LOCK    hLockHWPerfHostStream;     /*! Lock guarding access to HWPerfHost stream from multiple threads */
+	IMG_HANDLE  hHWPerfHostStream;         /*! TL Stream buffer for host only event stream */
+	IMG_UINT32  ui32HWPerfHostBufSize;     /*! Host side buffer size in bytes */
+	IMG_UINT32  ui32HWPerfHostNextOrdinal; /*! Ordinal number for HWPerfHost events */
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+	void        *pvGpuFtraceData;
+#endif
+
+	/* Poll data for detecting firmware fatal errors */
+	IMG_UINT32				aui32CrLastPollAddr[RGXFW_THREAD_NUM];
+	IMG_UINT32				ui32KCCBCmdsExecutedLastTime;
+	IMG_BOOL				bKCCBCmdsWaitingLastTime;
+	IMG_UINT32				ui32GEOTimeoutsLastTime;
+
+	/* Client stall detection */
+	IMG_UINT32				ui32StalledClientMask;
+
+	IMG_BOOL				bWorkEstEnabled;
+	IMG_BOOL				bPDVFSEnabled;
+
+	void					*pvLISRData;
+	void					*pvMISRData;
+	void					*pvAPMISRData;
+	RGX_ACTIVEPM_CONF		eActivePMConf;
+
+	volatile IMG_UINT32		aui32SampleIRQCount[RGXFW_THREAD_NUM];
+
+	DEVMEM_MEMDESC			*psRGXFaultAddressMemDesc;
+
+	DEVMEM_MEMDESC			*psRGXFWHWBRN37200MemDesc;
+
+	DEVMEM_MEMDESC			*psSLC3FenceMemDesc;
+
+	/* If we do 10 deferred memory allocations per second, then the ID would wrap around after 13 years */
+	IMG_UINT32				ui32ZSBufferCurrID;	/*!< ID assigned to the next deferred devmem allocation */
+	IMG_UINT32				ui32FreelistCurrID;	/*!< ID assigned to the next freelist */
+	IMG_UINT32				ui32RPMFreelistCurrID;	/*!< ID assigned to the next RPM freelist */
+
+	POS_LOCK 				hLockZSBuffer;		/*!< Lock to protect simultaneous access to ZSBuffers */
+	DLLIST_NODE				sZSBufferHead;		/*!< List of on-demand ZSBuffers */
+	POS_LOCK 				hLockFreeList;		/*!< Lock to protect simultaneous access to Freelists */
+	DLLIST_NODE				sFreeListHead;		/*!< List of growable Freelists */
+	POS_LOCK 				hLockRPMFreeList;	/*!< Lock to protect simultaneous access to RPM Freelists */
+	DLLIST_NODE				sRPMFreeListHead;	/*!< List of growable RPM Freelists */
+	POS_LOCK				hLockRPMContext;	/*!< Lock to protect simultaneous access to RPM contexts */
+	PSYNC_PRIM_CONTEXT		hSyncPrimContext;
+	PVRSRV_CLIENT_SYNC_PRIM *psPowSyncPrim;
+
+	IMG_UINT32				ui32ActivePMReqOk;
+	IMG_UINT32				ui32ActivePMReqDenied;
+	IMG_UINT32				ui32ActivePMReqTotal;
+
+	IMG_HANDLE				hProcessQueuesMISR;
+
+	IMG_UINT32 				ui32DeviceFlags;		/*!< Flags to track general device state */
+
+	/* Timer Queries */
+	IMG_UINT32				ui32ActiveQueryId;		/*!< id of the active line */
+	IMG_BOOL				bSaveStart;				/*!< save the start time of the next kick on the device*/
+	IMG_BOOL				bSaveEnd;				/*!< save the end time of the next kick on the device*/
+
+	DEVMEM_MEMDESC			*psStartTimeMemDesc;    /*!< memdesc for Start Times */
+	IMG_UINT64				*pui64StartTimeById;    /*!< CPU mapping of the above */
+
+	DEVMEM_MEMDESC			*psEndTimeMemDesc;      /*!< memdesc for End Timer */
+	IMG_UINT64				*pui64EndTimeById;      /*!< CPU mapping of the above */
+
+	IMG_UINT32				aui32ScheduledOnId[RGX_MAX_TIMER_QUERIES];	/*!< kicks Scheduled on QueryId */
+	DEVMEM_MEMDESC			*psCompletedMemDesc;	/*!< kicks Completed on QueryId */
+	IMG_UINT32				*pui32CompletedById;	/*!< CPU mapping of the above */
+
+	/* GPU DVFS Table */
+	RGX_GPU_DVFS_TABLE  *psGpuDVFSTable;
+
+	/* Pointer to function returning the GPU utilisation statistics since the last
+	 * time the function was called. Supports different users at the same time.
+	 *
+	 * psReturnStats [out]: GPU utilisation statistics (active high/active low/idle/blocked)
+	 *                      in microseconds since the last time the function was called
+	 *                      by a specific user (identified by hGpuUtilUser)
+	 *
+	 * Returns PVRSRV_OK in case the call completed without errors,
+	 * some other value otherwise.
+	 */
+	PVRSRV_ERROR (*pfnGetGpuUtilStats) (PVRSRV_DEVICE_NODE *psDeviceNode,
+	                                    IMG_HANDLE hGpuUtilUser,
+	                                    RGXFWIF_GPU_UTIL_STATS *psReturnStats);
+
+	POS_LOCK				hGPUUtilLock;
+
+	/* Register configuration */
+	RGX_REG_CONFIG			sRegCongfig;
+
+	IMG_BOOL				bRGXPowered;
+	DLLIST_NODE				sMemoryContextList;
+
+	POSWR_LOCK				hRenderCtxListLock;
+	POSWR_LOCK				hComputeCtxListLock;
+	POSWR_LOCK				hTransferCtxListLock;
+	POSWR_LOCK				hTDMCtxListLock;
+	POSWR_LOCK				hRaytraceCtxListLock;
+	POSWR_LOCK				hMemoryCtxListLock;
+	POSWR_LOCK				hKickSyncCtxListLock;
+
+	/* Linked list of deferred KCCB commands due to a full KCCB */
+	DLLIST_NODE				sKCCBDeferredCommandsListHead;
+
+	/* Linked lists of contexts on this device */
+	DLLIST_NODE				sRenderCtxtListHead;
+	DLLIST_NODE				sComputeCtxtListHead;
+	DLLIST_NODE				sTransferCtxtListHead;
+	DLLIST_NODE				sTDMCtxtListHead;
+	DLLIST_NODE				sRaytraceCtxtListHead;
+	DLLIST_NODE				sKickSyncCtxtListHead;
+
+	DLLIST_NODE 			sCommonCtxtListHead;
+	POSWR_LOCK			hCommonCtxtListLock;
+	IMG_UINT32				ui32CommonCtxtCurrentID;	/*!< ID assigned to the next common context */
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	POS_LOCK 				hDebugFaultInfoLock;	/*!< Lock to protect the debug fault info list */
+	POS_LOCK 				hMMUCtxUnregLock;		/*!< Lock to protect list of unregistered MMU contexts */
+#endif
+
+	POS_LOCK				hNMILock; /*!< Lock to protect NMI operations */
+
+	RGX_DUST_STATE			sDustReqState;
+
+	RGX_LAYER_PARAMS		sLayerParams;
+
+	RGXFWIF_DM				eBPDM;					/*!< Current breakpoint data master */
+	IMG_BOOL				bBPSet;					/*!< A Breakpoint has been set */
+
+	IMG_UINT32				ui32CoherencyTestsDone;
+} PVRSRV_RGXDEV_INFO;
+
+
+
+typedef struct _RGX_TIMING_INFORMATION_
+{
+	/*! GPU default core clock speed in Hz */
+	IMG_UINT32			ui32CoreClockSpeed;
+
+	/*! Active Power Management: GPU actively requests the host driver to be powered off */
+	IMG_BOOL			bEnableActivePM;
+
+	/*! Enable the GPU to power off internal Power Islands independently from the host driver */
+	IMG_BOOL			bEnableRDPowIsland;
+
+	/*! Active Power Management: Delay between the GPU idle and the request to the host */
+	IMG_UINT32			ui32ActivePMLatencyms;
+
+} RGX_TIMING_INFORMATION;
+
+typedef struct _RGX_DATA_
+{
+	/*! Timing information */
+	RGX_TIMING_INFORMATION	*psRGXTimingInfo;
+	IMG_BOOL bHasTDFWCodePhysHeap;
+	IMG_UINT32 uiTDFWCodePhysHeapID;
+	IMG_BOOL bHasTDSecureBufPhysHeap;
+	IMG_UINT32 uiTDSecureBufPhysHeapID;
+} RGX_DATA;
+
+
+/*
+	RGX PDUMP register bank name (prefix)
+*/
+#define RGX_PDUMPREG_NAME		"RGXREG"
+
+#endif /* __RGXDEVICE_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxfwimageutils.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxfwimageutils.c
new file mode 100644
index 0000000..8f56195
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxfwimageutils.c
@@ -0,0 +1,956 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services Firmware image utilities used at init time
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services Firmware image utilities used at init time
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* The routines implemented here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when trusted device is enabled).
+ * Any new dependency should be added to rgxlayer.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary. */
+#include "rgxfwimageutils.h"
+
+
+/************************************************************************
+* FW Segments configuration
+************************************************************************/
+typedef struct _RGX_FW_SEGMENT_
+{
+	IMG_UINT32 ui32SegId;        /*!< Segment Id */
+	IMG_UINT32 ui32SegStartAddr; /*!< Segment Start Addr */
+	IMG_UINT32 ui32SegAllocSize; /*!< Amount of memory to allocate for that segment */
+	IMG_UINT32 ui32FWMemOffset;  /*!< Offset of this segment in the collated FW mem allocation */
+	const IMG_CHAR *pszSegName;
+} RGX_FW_SEGMENT;
+
+typedef struct _RGX_FW_SEGMENT_LIST_
+{
+	RGX_FW_SEGMENT *psRGXFWCodeSeg;
+	RGX_FW_SEGMENT *psRGXFWDataSeg;
+	IMG_UINT32 ui32CodeSegCount;
+	IMG_UINT32 ui32DataSegCount;
+} RGX_FW_SEGMENT_LIST;
+
+
+static RGX_FW_SEGMENT asRGXMetaFWCodeSegments[] = {
+/* Seg ID                 Seg Start Addr           Alloc size   FWMem offset  Name */
+{RGXFW_SEGMMU_TEXT_ID,    RGXFW_BOOTLDR_META_ADDR, 0x31000,      0,           "Bootldr and Code"}, /* Has to be the first one to get the proper DevV addr */
+};
+static RGX_FW_SEGMENT asRGXMetaFWDataSegments[] = {
+/* Seg ID                 Seg Start Addr           Alloc size   FWMem offset  Name */
+{RGXFW_SEGMMU_DATA_ID,    0x38880000,              0x17000,      0,           "Local Shared and Data"},
+};
+#define RGXFW_META_NUM_CODE_SEGMENTS  (sizeof(asRGXMetaFWCodeSegments)/sizeof(asRGXMetaFWCodeSegments[0]))
+#define RGXFW_META_NUM_DATA_SEGMENTS  (sizeof(asRGXMetaFWDataSegments)/sizeof(asRGXMetaFWDataSegments[0]))
+
+static RGX_FW_SEGMENT asRGXMipsFWCodeSegments[] = {
+/* Seg ID   Seg Start Addr                         Alloc size                         FWMem offset                         Name */
+{    0,     RGXMIPSFW_BOOT_NMI_CODE_VIRTUAL_BASE,  RGXMIPSFW_BOOT_NMI_CODE_SIZE,      RGXMIPSFW_BOOT_NMI_CODE_OFFSET,      "Bootldr and NMI code"},
+{    1,     RGXMIPSFW_EXCEPTIONS_VIRTUAL_BASE,     RGXMIPSFW_EXCEPTIONSVECTORS_SIZE,  RGXMIPSFW_EXCEPTIONSVECTORS_OFFSET,  "Exception vectors"},
+{    2,     RGXMIPSFW_CODE_VIRTUAL_BASE,           RGXMIPSFW_CODE_SIZE,               RGXMIPSFW_CODE_OFFSET,               "Text"},
+};
+static RGX_FW_SEGMENT asRGXMipsFWDataSegments[] = {
+/* Seg ID   Seg Start Addr                         Alloc size                         FWMem offset                         Name */
+{    3,     RGXMIPSFW_BOOT_NMI_DATA_VIRTUAL_BASE,  RGXMIPSFW_BOOT_NMI_DATA_SIZE,      RGXMIPSFW_BOOT_NMI_DATA_OFFSET,      "Bootldr and NMI data"},
+{    4,     RGXMIPSFW_DATA_VIRTUAL_BASE,           RGXMIPSFW_DATA_SIZE,               RGXMIPSFW_DATA_OFFSET,               "Local Data"},
+{    5,     RGXMIPSFW_STACK_VIRTUAL_BASE,          RGXMIPSFW_STACK_SIZE,              RGXMIPSFW_DATA_SIZE,                 "Stack"},
+};
+
+#define RGXFW_MIPS_NUM_CODE_SEGMENTS  (sizeof(asRGXMipsFWCodeSegments)/sizeof(asRGXMipsFWCodeSegments[0]))
+#define RGXFW_MIPS_NUM_DATA_SEGMENTS  (sizeof(asRGXMipsFWDataSegments)/sizeof(asRGXMipsFWDataSegments[0]))
+
+/*!
+*******************************************************************************
+
+ @Function      FindMMUSegment
+
+ @Description   Given a 32 bit FW address attempt to find the corresponding
+                pointer to FW allocation
+
+ @Input         ui32OffsetIn      : 32 bit FW address
+ @Input         pvHostFWCodeAddr  : Pointer to FW code
+ @Input         pvHostFWDataAddr  : Pointer to FW data
+ @Input         uiHostAddrOut     : CPU pointer equivalent to ui32OffsetIn
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR FindMMUSegment(IMG_UINT32 ui32OffsetIn,
+                                   void *pvHostFWCodeAddr,
+                                   void *pvHostFWDataAddr,
+                                   void **uiHostAddrOut,
+                                   RGX_FW_SEGMENT_LIST *psRGXFWSegList)
+{
+	RGX_FW_SEGMENT *psSegArr;
+	IMG_UINT32 i;
+
+	psSegArr = psRGXFWSegList->psRGXFWCodeSeg;
+	for (i = 0; i < psRGXFWSegList->ui32CodeSegCount; i++)
+	{
+		if ((ui32OffsetIn >= psSegArr[i].ui32SegStartAddr) &&
+		    (ui32OffsetIn < (psSegArr[i].ui32SegStartAddr + psSegArr[i].ui32SegAllocSize)))
+		{
+			*uiHostAddrOut = pvHostFWCodeAddr;
+			goto found;
+		}
+	}
+
+	psSegArr = psRGXFWSegList->psRGXFWDataSeg;
+	for (i = 0; i < psRGXFWSegList->ui32DataSegCount; i++)
+	{
+		if ((ui32OffsetIn >= psSegArr[i].ui32SegStartAddr) &&
+		   (ui32OffsetIn < (psSegArr[i].ui32SegStartAddr + psSegArr[i].ui32SegAllocSize)))
+		{
+			*uiHostAddrOut = pvHostFWDataAddr;
+			goto found;
+		}
+	}
+
+	return PVRSRV_ERROR_INIT_FAILURE;
+
+found:
+	/* Direct Mem write to mapped memory */
+	ui32OffsetIn -= psSegArr[i].ui32SegStartAddr;
+	ui32OffsetIn += psSegArr[i].ui32FWMemOffset;
+
+	/* Add offset to pointer to FW allocation only if
+	 * that allocation is available
+	 */
+	if (*uiHostAddrOut)
+	{
+		*(IMG_UINT8 **)uiHostAddrOut += ui32OffsetIn;
+	}
+
+	return PVRSRV_OK;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      RGXFWConfigureSegID
+
+ @Description   Configures a single segment of the Segment MMU
+                (base, limit and out_addr)
+
+ @Input         hPrivate        : Implementation specific data
+ @Input         ui64SegOutAddr  : Segment output base address (40 bit devVaddr)
+ @Input         ui32SegBase     : Segment input base address (32 bit FW address)
+ @Input         ui32SegLimit    : Segment size
+ @Input         ui32SegID       : Segment ID
+ @Input         pszName         : Segment name
+ @Input         ppui32BootConf  : Pointer to bootloader data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXFWConfigureSegID(const void *hPrivate,
+                                IMG_UINT64 ui64SegOutAddr,
+                                IMG_UINT32 ui32SegBase,
+                                IMG_UINT32 ui32SegLimit,
+                                IMG_UINT32 ui32SegID,
+                                const IMG_CHAR *pszName,
+                                IMG_UINT32 **ppui32BootConf)
+{
+	IMG_UINT32 *pui32BootConf = *ppui32BootConf;
+	IMG_UINT32 ui32SegOutAddr0  = ui64SegOutAddr & 0x00000000FFFFFFFFUL;
+	IMG_UINT32 ui32SegOutAddr1  = (ui64SegOutAddr >> 32) & 0x00000000FFFFFFFFUL;
+
+	/* META segments have a minimum size */
+	IMG_UINT32 ui32LimitOff = (ui32SegLimit < RGXFW_SEGMMU_ALIGN) ?
+	                          RGXFW_SEGMMU_ALIGN : ui32SegLimit;
+	/* the limit is an offset, therefore off = size - 1 */
+	ui32LimitOff -= 1;
+
+	RGXCommentLog(hPrivate,
+	              "* FW %s - seg%d: meta_addr = 0x%08x, devv_addr = 0x%llx, limit = 0x%x",
+	              pszName, ui32SegID,
+	              ui32SegBase, (unsigned long long)ui64SegOutAddr,
+	              ui32LimitOff);
+
+	ui32SegBase |= RGXFW_SEGMMU_ALLTHRS_WRITEABLE;
+
+	*pui32BootConf++ = META_CR_MMCU_SEGMENTn_BASE(ui32SegID);
+	*pui32BootConf++ = ui32SegBase;
+
+	*pui32BootConf++ = META_CR_MMCU_SEGMENTn_LIMIT(ui32SegID);
+	*pui32BootConf++ = ui32LimitOff;
+
+	*pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA0(ui32SegID);
+	*pui32BootConf++ = ui32SegOutAddr0;
+
+	*pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA1(ui32SegID);
+	*pui32BootConf++ = ui32SegOutAddr1;
+
+	*ppui32BootConf = pui32BootConf;
+}
+
+/*!
+*******************************************************************************
+
+ @Function      RGXFWConfigureSegMMU
+
+ @Description   Configures META's Segment MMU
+
+ @Input         hPrivate             : Implementation specific data
+ @Input         psFWCodeDevVAddrBase : FW code base device virtual address
+ @Input         psFWDataDevVAddrBase : FW data base device virtual address
+ @Input         ppui32BootConf       : Pointer to bootloader data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXFWConfigureSegMMU(const void       *hPrivate,
+                                 IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase,
+                                 IMG_DEV_VIRTADDR *psFWDataDevVAddrBase,
+                                 IMG_UINT32       **ppui32BootConf)
+{
+	IMG_UINT64 ui64SegOutAddrTop;
+	IMG_UINT32 i;
+
+	PVR_UNREFERENCED_PARAMETER(psFWCodeDevVAddrBase);
+
+	/* Configure Segment MMU */
+	RGXCommentLog(hPrivate, "********** FW configure Segment MMU **********");
+
+	if (RGXDeviceHasErnBrn(hPrivate, HW_ERN_49144_BIT_MASK))
+	{
+		ui64SegOutAddrTop = RGXFW_SEGMMU_OUTADDR_TOP_ERN_49144(META_MMU_CONTEXT_MAPPING, RGXFW_SEGMMU_META_DM_ID);
+	}
+	else if (RGXDeviceHasErnBrn(hPrivate, HW_ERN_45914_BIT_MASK))
+	{
+		ui64SegOutAddrTop = RGXFW_SEGMMU_OUTADDR_TOP_ERN_45914(META_MMU_CONTEXT_MAPPING, RGXFW_SEGMMU_META_DM_ID);
+	}
+	else
+	{
+		ui64SegOutAddrTop = RGXFW_SEGMMU_OUTADDR_TOP_PRE_S7(META_MMU_CONTEXT_MAPPING, RGXFW_SEGMMU_META_DM_ID);
+	}
+
+	for (i = 0; i < RGXFW_META_NUM_DATA_SEGMENTS ; i++)
+	{
+		IMG_UINT64 ui64SegOutAddr;
+
+		ui64SegOutAddr = (psFWDataDevVAddrBase->uiAddr | ui64SegOutAddrTop) +
+		                  asRGXMetaFWDataSegments[i].ui32FWMemOffset;
+
+		RGXFWConfigureSegID(hPrivate,
+		                    ui64SegOutAddr,
+		                    asRGXMetaFWDataSegments[i].ui32SegStartAddr,
+		                    asRGXMetaFWDataSegments[i].ui32SegAllocSize,
+		                    asRGXMetaFWDataSegments[i].ui32SegId,
+		                    asRGXMetaFWDataSegments[i].pszSegName,
+		                    ppui32BootConf); /*write the sequence to the bootldr */
+	}
+}
+
+/*!
+*******************************************************************************
+
+ @Function      RGXFWConfigureMetaCaches
+
+ @Description   Configure and enable the Meta instruction and data caches
+
+ @Input         hPrivate          : Implementation specific data
+ @Input         ui32NumThreads    : Number of FW threads in use
+ @Input         ui32MainThreadID  : ID of the FW thread in use
+                                    (only meaningful if ui32NumThreads == 1)
+ @Input         ppui32BootConf    : Pointer to bootloader data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXFWConfigureMetaCaches(const void *hPrivate,
+                                     IMG_UINT32 ui32NumThreads,
+                                     IMG_UINT32 ui32MainThreadID,
+                                     IMG_UINT32 **ppui32BootConf)
+{
+	IMG_UINT32 *pui32BootConf = *ppui32BootConf;
+	IMG_UINT32 ui32DCacheT0, ui32ICacheT0;
+	IMG_UINT32 ui32DCacheT1, ui32ICacheT1;
+	IMG_UINT32 ui32DCacheT2, ui32ICacheT2;
+	IMG_UINT32 ui32DCacheT3, ui32ICacheT3;
+
+#define META_CR_MMCU_LOCAL_EBCTRL                        (0x04830600)
+#define META_CR_MMCU_LOCAL_EBCTRL_ICWIN                  (0x3 << 14)
+#define META_CR_MMCU_LOCAL_EBCTRL_DCWIN                  (0x3 << 6)
+#define META_CR_SYSC_DCPART(n)                           (0x04830200 + (n)*0x8)
+#define META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE         (0x1 << 31)
+#define META_CR_SYSC_ICPART(n)                           (0x04830220 + (n)*0x8)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF  (0x8 << 16)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE       (0xF)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE       (0x7)
+#define META_CR_MMCU_DCACHE_CTRL                         (0x04830018)
+#define META_CR_MMCU_ICACHE_CTRL                         (0x04830020)
+#define META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN           (0x1)
+
+	RGXCommentLog(hPrivate, "********** Meta caches configuration *********");
+
+	/* Initialise I/Dcache settings */
+	ui32DCacheT0 = ui32DCacheT1 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+	ui32DCacheT2 = ui32DCacheT3 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+	ui32ICacheT0 = ui32ICacheT1 = ui32ICacheT2 = ui32ICacheT3 = 0;
+
+	if (ui32NumThreads == 1)
+	{
+		if (ui32MainThreadID == 0)
+		{
+			ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+			ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+		}
+		else
+		{
+			ui32DCacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+			ui32ICacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+		}
+	}
+	else
+	{
+		ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE;
+		ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE;
+
+		ui32DCacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE |
+		                META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF;
+		ui32ICacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE |
+		                META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF;
+	}
+
+	/* Local region MMU enhanced bypass: WIN-3 mode for code and data caches */
+	*pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL;
+	*pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL_ICWIN |
+	                   META_CR_MMCU_LOCAL_EBCTRL_DCWIN;
+
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_MMCU_LOCAL_EBCTRL,
+	              META_CR_MMCU_LOCAL_EBCTRL_ICWIN | META_CR_MMCU_LOCAL_EBCTRL_DCWIN);
+
+	/* Data cache partitioning thread 0 to 3 */
+	*pui32BootConf++ = META_CR_SYSC_DCPART(0);
+	*pui32BootConf++ = ui32DCacheT0;
+	*pui32BootConf++ = META_CR_SYSC_DCPART(1);
+	*pui32BootConf++ = ui32DCacheT1;
+	*pui32BootConf++ = META_CR_SYSC_DCPART(2);
+	*pui32BootConf++ = ui32DCacheT2;
+	*pui32BootConf++ = META_CR_SYSC_DCPART(3);
+	*pui32BootConf++ = ui32DCacheT3;
+
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_SYSC_DCPART(0), ui32DCacheT0);
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_SYSC_DCPART(1), ui32DCacheT1);
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_SYSC_DCPART(2), ui32DCacheT2);
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_SYSC_DCPART(3), ui32DCacheT3);
+
+	/* Enable data cache hits */
+	*pui32BootConf++ = META_CR_MMCU_DCACHE_CTRL;
+	*pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN;
+
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_MMCU_DCACHE_CTRL,
+	              META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN);
+
+	/* Instruction cache partitioning thread 0 to 3 */
+	*pui32BootConf++ = META_CR_SYSC_ICPART(0);
+	*pui32BootConf++ = ui32ICacheT0;
+	*pui32BootConf++ = META_CR_SYSC_ICPART(1);
+	*pui32BootConf++ = ui32ICacheT1;
+	*pui32BootConf++ = META_CR_SYSC_ICPART(2);
+	*pui32BootConf++ = ui32ICacheT2;
+	*pui32BootConf++ = META_CR_SYSC_ICPART(3);
+	*pui32BootConf++ = ui32ICacheT3;
+
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_SYSC_ICPART(0), ui32ICacheT0);
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_SYSC_ICPART(1), ui32ICacheT1);
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_SYSC_ICPART(2), ui32ICacheT2);
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_SYSC_ICPART(3), ui32ICacheT3);
+
+	/* Enable instruction cache hits */
+	*pui32BootConf++ = META_CR_MMCU_ICACHE_CTRL;
+	*pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN;
+
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+	              META_CR_MMCU_ICACHE_CTRL,
+	              META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN);
+
+	*pui32BootConf++ = 0x040000C0;
+	*pui32BootConf++ = 0;
+
+	RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", 0x040000C0, 0);
+
+	*ppui32BootConf = pui32BootConf;
+}
+
+/*!
+*******************************************************************************
+
+ @Function      ProcessLDRCommandStream
+
+ @Description   Process the output of the Meta toolchain in the .LDR format
+                copying code and data sections into their final location and
+                passing some information to the Meta bootloader
+
+ @Input         hPrivate             : Implementation specific data
+ @Input         pbLDR                : Pointer to FW blob
+ @Input         pvHostFWCodeAddr     : Pointer to FW code
+ @Input         pvHostFWDataAddr     : Pointer to FW data
+ @Input         pvHostFWCorememAddr  : Pointer to FW coremem code
+ @Input         ppui32BootConf       : Pointer to bootloader data
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate,
+                                            const IMG_BYTE* pbLDR,
+                                            void* pvHostFWCodeAddr,
+                                            void* pvHostFWDataAddr,
+                                            void* pvHostFWCorememAddr,
+                                            IMG_UINT32 **ppui32BootConf)
+{
+	RGX_META_LDR_BLOCK_HDR *psHeader = (RGX_META_LDR_BLOCK_HDR *) pbLDR;
+	RGX_META_LDR_L1_DATA_BLK *psL1Data =
+	    (RGX_META_LDR_L1_DATA_BLK*) ((IMG_UINT8 *) pbLDR + psHeader->ui32SLData);
+
+	IMG_UINT32 *pui32BootConf  = *ppui32BootConf;
+	IMG_UINT32 ui32CorememSize = RGXGetFWCorememSize(hPrivate);
+	IMG_UINT32 ui32CorememCodeStartAddr = 0xFFFFFFFF;
+
+	RGXCommentLog(hPrivate, "**********************************************");
+	RGXCommentLog(hPrivate, "************** Begin LDR Parsing *************");
+	RGXCommentLog(hPrivate, "**********************************************");
+
+	while (psL1Data != NULL)
+	{
+		RGX_FW_SEGMENT_LIST sRGXFWSegList;
+		sRGXFWSegList.psRGXFWCodeSeg = asRGXMetaFWCodeSegments;
+		sRGXFWSegList.psRGXFWDataSeg = asRGXMetaFWDataSegments;
+		sRGXFWSegList.ui32CodeSegCount = RGXFW_META_NUM_CODE_SEGMENTS;
+		sRGXFWSegList.ui32DataSegCount = RGXFW_META_NUM_DATA_SEGMENTS;
+
+		if (RGX_META_LDR_BLK_IS_COMMENT(psL1Data->ui16Cmd))
+		{
+			/* Don't process comment blocks */
+			goto NextBlock;
+		}
+
+		switch (psL1Data->ui16Cmd & RGX_META_LDR_CMD_MASK)
+		{
+			case RGX_META_LDR_CMD_LOADMEM:
+			{
+				RGX_META_LDR_L2_DATA_BLK *psL2Block =
+				    (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[1]);
+				IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0];
+				IMG_UINT32 ui32DataSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */;
+				void *pvWriteAddr;
+				PVRSRV_ERROR eError;
+
+				if (RGX_META_IS_COREMEM_CODE(ui32Offset, ui32CorememSize))
+				{
+					if (ui32Offset < ui32CorememCodeStartAddr)
+					{
+						if (ui32CorememCodeStartAddr == 0xFFFFFFFF)
+						{
+							/* Take the first coremem code address as the coremem code start address */
+							ui32CorememCodeStartAddr = ui32Offset;
+
+							/* Also check that there is a valid allocation for the coremem code */
+							if (pvHostFWCorememAddr == NULL)
+							{
+								RGXErrorLog(hPrivate,
+								            "ProcessLDRCommandStream: Coremem code found"
+								            "but no coremem allocation available!");
+
+								return PVRSRV_ERROR_INIT_FAILURE;
+							}
+						}
+						else
+						{
+							/* The coremem addresses should be ordered in the LDR command stream */
+							return PVRSRV_ERROR_INIT_FAILURE;
+						}
+					}
+
+					/* Copy coremem data to buffer. The FW copies it to the actual coremem */
+					ui32Offset -= ui32CorememCodeStartAddr;
+
+					RGXMemCopy(hPrivate,
+					           (void*)((IMG_UINT8 *)pvHostFWCorememAddr + ui32Offset),
+					           psL2Block->aui32BlockData,
+					           ui32DataSize);
+				}
+				else
+				{
+					/* Global range is aliased to local range */
+					ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT;
+
+					eError = FindMMUSegment(ui32Offset,
+					                        pvHostFWCodeAddr,
+					                        pvHostFWDataAddr,
+					                        &pvWriteAddr,
+					                        &sRGXFWSegList);
+
+					if (eError != PVRSRV_OK)
+					{
+						RGXErrorLog(hPrivate,
+						            "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment",
+						            ui32Offset, ui32DataSize);
+						return eError;
+					}
+
+					/* Write to FW allocation only if available */
+					if (pvWriteAddr)
+					{
+						RGXMemCopy(hPrivate,
+						           pvWriteAddr,
+						           psL2Block->aui32BlockData,
+						           ui32DataSize);
+					}
+				}
+
+				break;
+			}
+			case RGX_META_LDR_CMD_LOADCORE:
+			case RGX_META_LDR_CMD_LOADMMREG:
+			{
+				return PVRSRV_ERROR_INIT_FAILURE;
+			}
+			case RGX_META_LDR_CMD_START_THREADS:
+			{
+				/* Don't process this block */
+				break;
+			}
+			case RGX_META_LDR_CMD_ZEROMEM:
+			{
+				IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0];
+				IMG_UINT32 ui32ByteCount = psL1Data->aui32CmdData[1];
+				void *pvWriteAddr;
+				PVRSRV_ERROR  eError;
+
+				if (RGX_META_IS_COREMEM_DATA(ui32Offset, ui32CorememSize))
+				{
+					/* cannot zero coremem directly */
+					break;
+				}
+
+				/* Global range is aliased to local range */
+				ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT;
+
+				eError = FindMMUSegment(ui32Offset,
+				                        pvHostFWCodeAddr,
+				                        pvHostFWDataAddr,
+				                        &pvWriteAddr,
+				                        &sRGXFWSegList);
+
+				if (eError != PVRSRV_OK)
+				{
+					RGXErrorLog(hPrivate,
+					            "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment",
+					            ui32Offset, ui32ByteCount);
+					return eError;
+				}
+
+				/* Write to FW allocation only if available */
+				if (pvWriteAddr)
+				{
+					RGXMemSet(hPrivate, pvWriteAddr, 0, ui32ByteCount);
+				}
+
+				break;
+			}
+			case RGX_META_LDR_CMD_CONFIG:
+			{
+				RGX_META_LDR_L2_DATA_BLK *psL2Block =
+				    (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[0]);
+				RGX_META_LDR_CFG_BLK *psConfigCommand = (RGX_META_LDR_CFG_BLK*) psL2Block->aui32BlockData;
+				IMG_UINT32 ui32L2BlockSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */;
+				IMG_UINT32 ui32CurrBlockSize = 0;
+
+				while (ui32L2BlockSize)
+				{
+					switch (psConfigCommand->ui32Type)
+					{
+						case RGX_META_LDR_CFG_PAUSE:
+						case RGX_META_LDR_CFG_READ:
+						{
+							ui32CurrBlockSize = 8;
+							return PVRSRV_ERROR_INIT_FAILURE;
+						}
+						case RGX_META_LDR_CFG_WRITE:
+						{
+							IMG_UINT32 ui32RegisterOffset = psConfigCommand->aui32BlockData[0];
+							IMG_UINT32 ui32RegisterValue  = psConfigCommand->aui32BlockData[1];
+
+							/* Only write to bootloader if we got a valid
+							 * pointer to the FW code allocation
+							 */
+							if (pui32BootConf)
+							{
+								/* Do register write */
+								*pui32BootConf++ = ui32RegisterOffset;
+								*pui32BootConf++ = ui32RegisterValue;
+							}
+
+							RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+							              ui32RegisterOffset, ui32RegisterValue);
+
+							ui32CurrBlockSize = 12;
+							break;
+						}
+						case RGX_META_LDR_CFG_MEMSET:
+						case RGX_META_LDR_CFG_MEMCHECK:
+						{
+							ui32CurrBlockSize = 20;
+							return PVRSRV_ERROR_INIT_FAILURE;
+						}
+						default:
+						{
+							return PVRSRV_ERROR_INIT_FAILURE;
+						}
+					}
+					ui32L2BlockSize -= ui32CurrBlockSize;
+					psConfigCommand = (RGX_META_LDR_CFG_BLK*) (((IMG_UINT8*) psConfigCommand) + ui32CurrBlockSize);
+				}
+
+				break;
+			}
+			default:
+			{
+				return PVRSRV_ERROR_INIT_FAILURE;
+			}
+		}
+
+NextBlock:
+
+		if (psL1Data->ui32Next == 0xFFFFFFFF)
+		{
+			psL1Data = NULL;
+		}
+		else
+		{
+			psL1Data = (RGX_META_LDR_L1_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->ui32Next);
+		}
+	}
+
+	*ppui32BootConf = pui32BootConf;
+
+	RGXCommentLog(hPrivate, "**********************************************");
+	RGXCommentLog(hPrivate, "************** End Loader Parsing ************");
+	RGXCommentLog(hPrivate, "**********************************************");
+
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function      ProcessELFCommandStream
+
+ @Description   Process the output of the Mips toolchain in the .ELF format
+                copying code and data sections into their final location
+
+ @Input         hPrivate          : Implementation specific data
+ @Input         pbELF             : Pointer to FW blob
+ @Input         pvHostFWCodeAddr  : Pointer to FW code
+ @Input         pvHostFWDataAddr  : Pointer to FW data
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR ProcessELFCommandStream(const void *hPrivate,
+                                            const IMG_BYTE *pbELF,
+                                            void *pvHostFWCodeAddr,
+                                            void *pvHostFWDataAddr)
+{
+	IMG_UINT32 ui32Entry;
+	RGX_MIPS_ELF_HDR *psHeader = (RGX_MIPS_ELF_HDR *)pbELF;
+	RGX_MIPS_ELF_PROGRAM_HDR *psProgramHeader =
+	    (RGX_MIPS_ELF_PROGRAM_HDR *)(pbELF + psHeader->ui32Ephoff);
+	PVRSRV_ERROR eError;
+
+	for (ui32Entry = 0; ui32Entry < psHeader->ui32Ephnum; ui32Entry++, psProgramHeader++)
+	{
+		void *pvWriteAddr;
+		RGX_FW_SEGMENT_LIST sRGXFWSegList;
+		sRGXFWSegList.psRGXFWCodeSeg = asRGXMipsFWCodeSegments;
+		sRGXFWSegList.psRGXFWDataSeg = asRGXMipsFWDataSegments;
+		sRGXFWSegList.ui32CodeSegCount = RGXFW_MIPS_NUM_CODE_SEGMENTS;
+		sRGXFWSegList.ui32DataSegCount = RGXFW_MIPS_NUM_DATA_SEGMENTS;
+
+		/* Only consider loadable entries in the ELF segment table */
+		if (psProgramHeader->ui32Ptype != ELF_PT_LOAD) continue;
+
+		eError = FindMMUSegment(psProgramHeader->ui32Pvaddr,
+		                        pvHostFWCodeAddr,
+		                        pvHostFWDataAddr,
+		                        &pvWriteAddr,
+		                        &sRGXFWSegList);
+
+		if (eError != PVRSRV_OK)
+		{
+			RGXErrorLog(hPrivate,
+			            "%s: Addr 0x%x (size: %d) not found in any segment",__func__,
+			            psProgramHeader->ui32Pvaddr,
+			            psProgramHeader->ui32Pfilesz);
+			return eError;
+		}
+
+		/* Write to FW allocation only if available */
+		if (pvWriteAddr)
+		{
+			RGXMemCopy(hPrivate,
+			           pvWriteAddr,
+			           (IMG_PBYTE)(pbELF + psProgramHeader->ui32Poffset),
+			           psProgramHeader->ui32Pfilesz);
+
+			RGXMemSet(hPrivate,
+			          (IMG_PBYTE)pvWriteAddr + psProgramHeader->ui32Pfilesz,
+			          0,
+			          psProgramHeader->ui32Pmemsz - psProgramHeader->ui32Pfilesz);
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate,
+                                    IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWDataAllocSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWCorememAllocSize)
+{
+	IMG_UINT32 i, ui32NumCodeSegments = 0, ui32NumDataSegments = 0;
+	RGX_FW_SEGMENT *pasRGXFWCodeSegments = NULL, *pasRGXFWDataSegments = NULL;
+
+	IMG_BOOL bMIPS = RGXDeviceHasFeature(hPrivate, RGX_FEATURE_MIPS_BIT_MASK);
+
+	if (!bMIPS)
+	{
+		pasRGXFWCodeSegments = asRGXMetaFWCodeSegments;
+		pasRGXFWDataSegments = asRGXMetaFWDataSegments;
+		ui32NumCodeSegments = RGXFW_META_NUM_CODE_SEGMENTS;
+		ui32NumDataSegments = RGXFW_META_NUM_DATA_SEGMENTS;
+	}
+
+	if (bMIPS)
+	{
+		pasRGXFWCodeSegments = asRGXMipsFWCodeSegments;
+		pasRGXFWDataSegments = asRGXMipsFWDataSegments;
+		ui32NumCodeSegments = RGXFW_MIPS_NUM_CODE_SEGMENTS;
+		ui32NumDataSegments = RGXFW_MIPS_NUM_DATA_SEGMENTS;
+	}
+
+	*puiFWCodeAllocSize = 0;
+	*puiFWDataAllocSize = 0;
+	*puiFWCorememAllocSize = 0;
+
+	/* Calculate how much memory the FW needs for its code and data segments */
+
+	for(i = 0; i < ui32NumCodeSegments; i++) {
+		*puiFWCodeAllocSize += ((pasRGXFWCodeSegments + i)->ui32SegAllocSize);
+	}
+
+	for(i = 0; i < ui32NumDataSegments; i++) {
+		*puiFWDataAllocSize += ((pasRGXFWDataSegments + i)->ui32SegAllocSize);
+	}
+
+	*puiFWCorememAllocSize = RGXGetFWCorememSize(hPrivate);
+
+	if (*puiFWCorememAllocSize != 0)
+	{
+		*puiFWCorememAllocSize = *puiFWCorememAllocSize - RGX_META_COREMEM_DATA_SIZE;
+	}
+
+	if (bMIPS)
+	{
+		if ((*puiFWCodeAllocSize % RGXMIPSFW_PAGE_SIZE) != 0)
+		{
+			RGXErrorLog(hPrivate,
+			            "%s: The MIPS FW code allocation is not"
+			            " a multiple of the page size!", __func__);
+			return PVRSRV_ERROR_INIT_FAILURE;
+		}
+
+		if ((*puiFWDataAllocSize % RGXMIPSFW_PAGE_SIZE) != 0)
+		{
+			RGXErrorLog(hPrivate,
+			            "%s: The MIPS FW data allocation is not"
+			            " a multiple of the page size!", __func__);
+			return PVRSRV_ERROR_INIT_FAILURE;
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXProcessFWImage(const void           *hPrivate,
+                               const IMG_BYTE       *pbRGXFirmware,
+                               void                 *pvFWCode,
+                               void                 *pvFWData,
+                               void                 *pvFWCorememCode,
+                               IMG_DEV_VIRTADDR     *psFWCodeDevVAddrBase,
+                               IMG_DEV_VIRTADDR     *psFWDataDevVAddrBase,
+                               IMG_DEV_VIRTADDR     *psFWCorememDevVAddrBase,
+                               RGXFWIF_DEV_VIRTADDR *psFWCorememFWAddr,
+                               RGXFWIF_DEV_VIRTADDR *psRGXFwInit,
+                               IMG_UINT32           ui32NumThreads,
+                               IMG_UINT32           ui32MainThreadID)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_BOOL bMIPS = RGXDeviceHasFeature(hPrivate, RGX_FEATURE_MIPS_BIT_MASK);
+
+	if (!bMIPS)
+	{
+		IMG_UINT32 *pui32BootConf = NULL;
+		/* Skip bootloader configuration if a pointer to the FW code
+		 * allocation is not available
+		 */
+		if (pvFWCode)
+		{
+			/* This variable points to the bootloader code which is mostly
+			 * a sequence of <register address,register value> pairs
+			 */
+			pui32BootConf = ((IMG_UINT32*) pvFWCode) + RGXFW_BOOTLDR_CONF_OFFSET;
+
+			/* Slave port and JTAG accesses are privileged */
+			*pui32BootConf++ = META_CR_SYSC_JTAG_THREAD;
+			*pui32BootConf++ = META_CR_SYSC_JTAG_THREAD_PRIV_EN;
+
+			RGXFWConfigureSegMMU(hPrivate,
+			                     psFWCodeDevVAddrBase,
+			                     psFWDataDevVAddrBase,
+			                     &pui32BootConf);
+		}
+	
+		/* Process FW image data stream */
+		eError = ProcessLDRCommandStream(hPrivate,
+		                                 pbRGXFirmware,
+		                                 pvFWCode,
+		                                 pvFWData,
+		                                 pvFWCorememCode,
+		                                 &pui32BootConf);
+		if (eError != PVRSRV_OK)
+		{
+			RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError);
+			return eError;
+		}
+
+		/* Skip bootloader configuration if a pointer to the FW code
+		 * allocation is not available
+		 */
+		if (pvFWCode)
+		{
+			if ((ui32NumThreads == 0) || (ui32NumThreads > 2) || (ui32MainThreadID >= 2))
+			{
+				RGXErrorLog(hPrivate,
+				            "ProcessFWImage: Wrong Meta threads configuration, using one thread only");
+
+				ui32NumThreads = 1;
+				ui32MainThreadID = 0;
+			}
+
+			RGXFWConfigureMetaCaches(hPrivate,
+			                         ui32NumThreads,
+			                         ui32MainThreadID,
+			                         &pui32BootConf);
+
+			/* Signal the end of the conf sequence */
+			*pui32BootConf++ = 0x0;
+			*pui32BootConf++ = 0x0;
+
+			/* The FW main argv arguments start here */
+			*pui32BootConf++ = psRGXFwInit->ui32Addr;
+
+			if ((RGXGetFWCorememSize(hPrivate) != 0) && (psFWCorememFWAddr != NULL))
+			{
+				*pui32BootConf++ = psFWCorememFWAddr->ui32Addr;
+			}
+			else
+			{
+				*pui32BootConf++ = 0;
+			}
+
+			if (RGXDeviceHasFeature(hPrivate, RGX_FEATURE_META_DMA_BIT_MASK))
+			{
+				*pui32BootConf++ = (IMG_UINT32) (psFWCorememDevVAddrBase->uiAddr >> 32);
+				*pui32BootConf++ = (IMG_UINT32) psFWCorememDevVAddrBase->uiAddr;
+			}
+			else
+			{
+				*pui32BootConf++ = 0;
+				*pui32BootConf++ = 0;
+			}
+
+		}
+	}
+
+	if (bMIPS)
+	{
+		/* Process FW image data stream */
+		eError = ProcessELFCommandStream(hPrivate,
+		                                 pbRGXFirmware,
+		                                 pvFWCode,
+		                                 pvFWData);
+		if (eError != PVRSRV_OK)
+		{
+			RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError);
+			return eError;
+		}
+
+		PVR_UNREFERENCED_PARAMETER(pvFWData); /* No need to touch the data segment in MIPS */
+		PVR_UNREFERENCED_PARAMETER(pvFWCorememCode); /* Coremem N/A in MIPS */
+		PVR_UNREFERENCED_PARAMETER(psFWCodeDevVAddrBase);
+		PVR_UNREFERENCED_PARAMETER(psFWDataDevVAddrBase);
+		PVR_UNREFERENCED_PARAMETER(psFWCorememDevVAddrBase);
+		PVR_UNREFERENCED_PARAMETER(psFWCorememFWAddr);
+		PVR_UNREFERENCED_PARAMETER(psRGXFwInit);
+		PVR_UNREFERENCED_PARAMETER(ui32NumThreads);
+		PVR_UNREFERENCED_PARAMETER(ui32MainThreadID);
+	}
+
+	return eError;
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxfwimageutils.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxfwimageutils.h
new file mode 100644
index 0000000..56b7d08
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxfwimageutils.h
@@ -0,0 +1,120 @@
+/*************************************************************************/ /*!
+@File
+@Title          Header for Services Firmware image utilities used at init time
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for Services Firmware image utilities used at init time
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGXFWIMAGEUTILS_H__
+#define __RGXFWIMAGEUTILS_H__
+
+/* The routines declared here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when DRM security is enabled).
+ * Any new dependency should be added to rgxlayer.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary. */
+#include "rgxlayer.h"
+
+
+/*!
+*******************************************************************************
+
+ @Function     RGXGetFWImageAllocSize
+
+ @Description  Return size of Firmware code/data/coremem code allocations
+
+ @Input        puiFWCodeAllocSize : Returned code size
+ @Input        puiFWDataAllocSize : Returned data size
+ @Input        puiFWCorememCodeAllocSize : Returned coremem code size (0 if N/A)
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate,
+                                    IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWDataAllocSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWCorememCodeAllocSize);
+
+/*!
+*******************************************************************************
+
+ @Function     RGXProcessFWImage
+
+ @Description  Process the Firmware binary blob copying code and data
+               sections into their final location and passing some
+               information to the Firmware bootloader.
+               If a pointer to the final memory location for FW code or data
+               is not valid (NULL) then the relative section will not be
+               processed.
+
+ @Input        hPrivate        : Implementation specific data
+ @Input        pbRGXFirmware   : Pointer to FW blob
+ @Input        pvFWCode        : Pointer to FW code
+ @Input        pvFWData        : Pointer to FW data
+ @Input        pvFWCorememCode : Pointer to FW coremem code
+ @Input        psFWCodeDevVAddrBase    : FW code base device virtual address
+ @Input        psFWDataDevVAddrBase    : FW data base device virtual address
+ @Input        psFWCorememDevVAddrBase : FW coremem code base device virtual address
+ @Input        psFWCorememFWAddr    : FW coremem code allocation 32 bit (FW) address
+ @Input        psRGXFwInit          : FW init structure 32 bit (FW) address
+ @Input        ui32NumThreads       : Number of FW threads in use
+ @Input        ui32MainThreadID     : ID of the FW thread in use
+                                      (only meaningful if ui32NumThreads == 1)
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR RGXProcessFWImage(const void           *hPrivate,
+                               const IMG_BYTE       *pbRGXFirmware,
+                               void                 *pvFWCode,
+                               void                 *pvFWData,
+                               void                 *pvFWCorememCode,
+                               IMG_DEV_VIRTADDR     *psFWCodeDevVAddrBase,
+                               IMG_DEV_VIRTADDR     *psFWDataDevVAddrBase,
+                               IMG_DEV_VIRTADDR     *psFWCorememDevVAddrBase,
+                               RGXFWIF_DEV_VIRTADDR *psFWCorememFWAddr,
+                               RGXFWIF_DEV_VIRTADDR *psRGXFwInit,
+                               IMG_UINT32           ui32NumThreads,
+                               IMG_UINT32           ui32MainThreadID);
+
+#endif /* __RGXFWIMAGEUTILS_H__ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxfwload.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxfwload.h
new file mode 100644
index 0000000..3098963
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxfwload.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services firmware load and access routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGXFWLOAD_H__
+#define __RGXFWLOAD_H__
+
+#include "img_defs.h"
+#include "rgxdefs_km.h"
+#include "device_connection.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+struct RGXFW;
+
+IMG_INTERNAL struct RGXFW *RGXLoadFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, const IMG_CHAR *pszBVNCString, const IMG_CHAR *pszBVpNCString);
+
+IMG_INTERNAL void RGXUnloadFirmware(struct RGXFW *psRGXFW);
+
+IMG_INTERNAL const void *RGXFirmwareData(struct RGXFW *psRGXFW);
+IMG_INTERNAL size_t RGXFirmwareSize(struct RGXFW *psRGXFW);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __RGXFWLOAD_H__ */
+
+/*****************************************************************************
+ End of file (rgxfwload.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxfwutils.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxfwutils.c
new file mode 100644
index 0000000..267dfe0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxfwutils.c
@@ -0,0 +1,5654 @@
+ /*************************************************************************/ /*!
+@File
+@Title          Rogue firmware utility routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Rogue firmware utility routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "lists.h"
+
+#include "rgxdefs_km.h"
+#include "rgx_fwif_km.h"
+#include "pdump_km.h"
+#include "osfunc.h"
+#include "oskm_apphint.h"
+#include "cache_km.h"
+#include "allocmem.h"
+#include "physheap.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "devicemem_server.h"
+
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "rgxfwutils.h"
+#include "rgx_options.h"
+#include "rgx_fwif.h"
+#include "rgx_fwif_alignchecks.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgx_pdump_panics.h"
+#include "rgxheapconfig.h"
+#include "pvrsrv.h"
+#include "rgxdebug.h"
+#include "rgxhwperf.h"
+#include "rgxccb.h"
+#include "rgxcompute.h"
+#include "rgxtransfer.h"
+#include "rgxpower.h"
+#include "rgxtdmtransfer.h"
+#include "rgxray.h"
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "dc_server.h"
+#endif
+#include "rgxmem.h"
+#include "rgxta3d.h"
+#include "rgxutils.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "tlstream.h"
+#include "devicemem_server_utils.h"
+#include "htbuffer.h"
+#include "rgx_bvnc_defs_km.h"
+
+#include "physmem_lma.h"
+#include "physmem_osmem.h"
+
+#ifdef __linux__
+#include <linux/kernel.h>	/* sprintf */
+#include <linux/string.h>	/* strncpy, strlen */
+#include "rogue_trace_events.h"
+#else
+#include <stdio.h>
+#include <string.h>
+#endif
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+#endif
+
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+
+#include "vz_support.h"
+#include "vz_physheap.h"
+
+/* Kernel CCB length */
+/* Reducing the size of the KCCB in an attempt to avoid flooding and overflowing the FW kick queue
+ * in the case of multiple OSes */
+#define RGXFWIF_KCCB_NUMCMDS_LOG2_GPUVIRT_WITHOUT_FEATURE  (6)
+#define RGXFWIF_KCCB_NUMCMDS_LOG2_DEFAULT                  (7)
+
+
+/* Firmware CCB length */
+#if defined(SUPPORT_PDVFS)
+#define RGXFWIF_FWCCB_NUMCMDS_LOG2   (8)
+#else
+#define RGXFWIF_FWCCB_NUMCMDS_LOG2   (5)
+#endif
+
+/* Workload Estimation Firmware CCB length */
+#define RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2   (7)
+
+typedef struct
+{
+	RGXFWIF_KCCB_CMD        sKCCBcmd;
+	DLLIST_NODE             sListNode;
+	PDUMP_FLAGS_T           uiPdumpFlags;
+	PVRSRV_RGXDEV_INFO      *psDevInfo;
+	RGXFWIF_DM              eDM;
+} RGX_DEFERRED_KCCB_CMD;
+
+#if defined(PDUMP)
+/* ensure PIDs are 32-bit because a 32-bit PDump load is generated for the
+ * PID filter example entries
+ */
+static_assert(sizeof(IMG_PID) == sizeof(IMG_UINT32),
+		"FW PID filtering assumes the IMG_PID type is 32-bits wide as it "
+		"generates WRW commands for loading the PID values");
+#endif
+
+static PVRSRV_ERROR _AllocateSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo, RGXFWIF_INIT* psRGXFWInit)
+{
+	PVRSRV_ERROR eError;
+	DEVMEM_MEMDESC** ppsSLC3FenceMemDesc = &psDevInfo->psSLC3FenceMemDesc;
+	IMG_UINT32	ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE(psDevInfo->sDevFeatureCfg.ui32CacheLineSize);
+
+	PVR_DPF_ENTERED;
+
+	eError = DevmemAllocate(psDevInfo->psFirmwareHeap,
+							1,
+							ui32CacheLineSize,
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_UNCACHED |
+							PVRSRV_MEMALLOCFLAG_FW_LOCAL,
+							"SLC3 Fence WA",
+							ppsSLC3FenceMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF_RETURN_RC(eError);
+	}
+
+	/*
+		We need to map it so the heap for this allocation
+		is set
+	*/
+	eError = DevmemMapToDevice(*ppsSLC3FenceMemDesc,
+							   psDevInfo->psFirmwareHeap,
+							   &psRGXFWInit->sSLC3FenceDevVAddr);
+	if (eError != PVRSRV_OK)
+	{
+		DevmemFwFree(psDevInfo, *ppsSLC3FenceMemDesc);
+		*ppsSLC3FenceMemDesc = NULL;
+	}
+
+	PVR_DPF_RETURN_RC1(eError, *ppsSLC3FenceMemDesc);
+}
+
+static void _FreeSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo)
+{
+	DEVMEM_MEMDESC* psSLC3FenceMemDesc = psDevInfo->psSLC3FenceMemDesc;
+
+	if (psSLC3FenceMemDesc)
+	{
+		DevmemReleaseDevVirtAddr(psSLC3FenceMemDesc);
+		DevmemFree(psSLC3FenceMemDesc);
+	}
+}
+
+static void __MTSScheduleWrite(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Value)
+{
+	/* ensure memory is flushed before kicking MTS */
+	OSWriteMemoryBarrier();
+
+	OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE, ui32Value);
+
+	/* ensure the MTS kick goes through before continuing */
+	OSMemoryBarrier();
+}
+
+
+/*!
+*******************************************************************************
+ @Function		RGXFWSetupSignatureChecks
+ @Description
+ @Input			psDevInfo
+
+ @Return		PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXFWSetupSignatureChecks(PVRSRV_RGXDEV_INFO* psDevInfo,
+                                              DEVMEM_MEMDESC**    ppsSigChecksMemDesc,
+                                              IMG_UINT32          ui32SigChecksBufSize,
+                                              RGXFWIF_SIGBUF_CTL* psSigBufCtl,
+                                              const IMG_CHAR*     pszBufferName)
+{
+	PVRSRV_ERROR	eError;
+	DEVMEM_FLAGS_T	uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+									  PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+					                  PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+									  PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+									  PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+									  PVRSRV_MEMALLOCFLAG_UNCACHED |
+									  PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	/* Allocate memory for the checks */
+	PDUMPCOMMENT("Allocate memory for %s signature checks", pszBufferName);
+	eError = DevmemFwAllocate(psDevInfo,
+							ui32SigChecksBufSize,
+							uiMemAllocFlags,
+							"FwSignatureChecks",
+							ppsSigChecksMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %d bytes for signature checks (%u)",
+					ui32SigChecksBufSize,
+					eError));
+		return eError;
+	}
+
+	/* Prepare the pointer for the fw to access that memory */
+	RGXSetFirmwareAddress(&psSigBufCtl->sBuffer,
+						  *ppsSigChecksMemDesc,
+						  0, RFW_FWADDR_NOREF_FLAG);
+
+	DevmemPDumpLoadMem(	*ppsSigChecksMemDesc,
+						0,
+						ui32SigChecksBufSize,
+						PDUMP_FLAGS_CONTINUOUS);
+
+	psSigBufCtl->ui32LeftSizeInRegs = ui32SigChecksBufSize / sizeof(IMG_UINT32);
+
+	return PVRSRV_OK;
+}
+
+#if defined(RGXFW_ALIGNCHECKS)
+/*!
+*******************************************************************************
+ @Function		RGXFWSetupAlignChecks
+ @Description   This functions allocates and fills memory needed for the
+                aligns checks of the UM and KM structures shared with the
+                firmware. The format of the data in the memory is as follows:
+                    <number of elements in the KM array>
+                    <array of KM structures' sizes and members' offsets>
+                    <number of elements in the UM array>
+                    <array of UM structures' sizes and members' offsets>
+                The UM array is passed from the user side. Now the firmware is
+                is responsible for filling this part of the memory. If that
+                happens the check of the UM structures will be performed
+                by the host driver on client's connect.
+                If the macro is not defined the client driver fills the memory
+                and the firmware checks for the alignment of all structures.
+ @Input			psDevInfo
+
+ @Return		PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXFWSetupAlignChecks(PVRSRV_RGXDEV_INFO* psDevInfo,
+								RGXFWIF_DEV_VIRTADDR	*psAlignChecksDevFW,
+								IMG_UINT32				*pui32RGXFWAlignChecks,
+								IMG_UINT32				ui32RGXFWAlignChecksArrLength)
+{
+	IMG_UINT32		aui32RGXFWAlignChecksKM[] = { RGXFW_ALIGN_CHECKS_INIT_KM };
+	IMG_UINT32		ui32RGXFWAlingChecksTotal;
+	IMG_UINT32*		paui32AlignChecks;
+	PVRSRV_ERROR	eError;
+
+	/* In this case we don't know the number of elements in UM array.
+	 * We have to assume something so we assume RGXFW_ALIGN_CHECKS_UM_MAX. */
+	PVR_ASSERT(ui32RGXFWAlignChecksArrLength == 0);
+	ui32RGXFWAlingChecksTotal = sizeof(aui32RGXFWAlignChecksKM)
+	        + RGXFW_ALIGN_CHECKS_UM_MAX * sizeof(IMG_UINT32)
+	        + 2 * sizeof(IMG_UINT32);
+
+	/* Allocate memory for the checks */
+	PDUMPCOMMENT("Allocate memory for alignment checks");
+	eError = DevmemFwAllocate(psDevInfo,
+							ui32RGXFWAlingChecksTotal,
+							PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | PVRSRV_MEMALLOCFLAG_UNCACHED,
+							"FwAlignmentChecks",
+							&psDevInfo->psRGXFWAlignChecksMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %d bytes for alignment checks (%u)",
+					ui32RGXFWAlingChecksTotal,
+					eError));
+		goto failAlloc;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc,
+									(void **)&paui32AlignChecks);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel addr for alignment checks (%u)",
+					eError));
+		goto failAqCpuAddr;
+	}
+
+	/* Copy the values */
+	*paui32AlignChecks++ = sizeof(aui32RGXFWAlignChecksKM)/sizeof(IMG_UINT32);
+	OSDeviceMemCopy(paui32AlignChecks, &aui32RGXFWAlignChecksKM[0], sizeof(aui32RGXFWAlignChecksKM));
+	paui32AlignChecks += sizeof(aui32RGXFWAlignChecksKM)/sizeof(IMG_UINT32);
+
+	*paui32AlignChecks = 0;
+
+	DevmemPDumpLoadMem(	psDevInfo->psRGXFWAlignChecksMemDesc,
+						0,
+						ui32RGXFWAlingChecksTotal,
+						PDUMP_FLAGS_CONTINUOUS);
+
+	/* Prepare the pointer for the fw to access that memory */
+	RGXSetFirmwareAddress(psAlignChecksDevFW,
+						  psDevInfo->psRGXFWAlignChecksMemDesc,
+						  0, RFW_FWADDR_NOREF_FLAG);
+
+	return PVRSRV_OK;
+
+
+
+
+failAqCpuAddr:
+	DevmemFwFree(psDevInfo, psDevInfo->psRGXFWAlignChecksMemDesc);
+	psDevInfo->psRGXFWAlignChecksMemDesc = NULL;
+failAlloc:
+
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static void RGXFWFreeAlignChecks(PVRSRV_RGXDEV_INFO* psDevInfo)
+{
+	if (psDevInfo->psRGXFWAlignChecksMemDesc != NULL)
+	{
+		DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc);
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWAlignChecksMemDesc);
+		psDevInfo->psRGXFWAlignChecksMemDesc = NULL;
+	}
+}
+#endif
+
+static PVRSRV_ERROR
+RGXVzDevMemAllocateGuestFwHeap(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32OSID)
+{
+	IMG_CHAR szHeapName[32];
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+	PVRSRV_ERROR eError = IMG_FALSE;
+	IMG_UINT32 ui32CacheLineSize = 0;
+	IMG_BOOL bFwLocalIsUMA = IMG_FALSE;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_DEVICE_PHYS_HEAP ePhysHeap = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+	FN_CREATERAMBACKEDPMR *ppfnCreateRamBackedPMR =
+							&psDeviceNode->pfnCreateRamBackedPMR[ePhysHeap];
+
+	/*
+	 * This is called by the host driver only, it pre-allocates and maps
+	 * into the firmware kernel memory context all guest firmware physheaps
+	 * so we fail the call if an invalid OSID (i.e. either host OSID or
+	 * OSID outside range) is supplied (i.e. as this would have been due
+	 * to an internal error).
+	 */
+	PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_HOST, PVRSRV_ERROR_INTERNAL_ERROR);
+	if (!ui32OSID || ui32OSID >= RGXFW_NUM_OS)
+	{
+		/* Guest OSID(s) range [1 up to (RGXFW_NUM_OS-1)] */
+		PVR_DPF((PVR_DBG_ERROR,
+				"Allocating guest fw heap with invalid OSID:%u, MAX:%u",
+				ui32OSID, RGXFW_NUM_OS - 1));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	PDUMPCOMMENT("Mapping firmware physheap for OSID: [%d]", ui32OSID);
+	OSSNPrintf(szHeapName, sizeof(szHeapName), "GuestFirmware%d", ui32OSID);
+
+	if (*ppfnCreateRamBackedPMR != PhysmemNewLocalRamBackedPMR)
+	{
+		/* This needs a more generic framework, allocating from guest physheap
+		   in host driver, but now we override momentarily for the duration of
+		   the guest physheap allocation as we need to allocate these using the
+		   guest Fw/RAs; this happens when host driver uses firmware UMA
+		   physheaps */
+		*ppfnCreateRamBackedPMR = PhysmemNewLocalRamBackedPMR;
+		bFwLocalIsUMA = IMG_TRUE;
+	}
+
+	/* Target OSID physheap for allocation */
+	psDeviceNode->uiKernelFwRAIdx = ui32OSID;
+
+	ui32CacheLineSize = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, \
+				RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK);
+
+	/* This allocates all available memory in the guest physheap */
+	eError = DevmemAllocate(psDevInfo->psGuestFirmwareHeap[ui32OSID],
+							RGX_FIRMWARE_HEAP_SIZE,
+							(GET_ROGUE_CACHE_LINE_SIZE(ui32CacheLineSize)),
+							PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+							/* PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | */
+							PVRSRV_MEMALLOCFLAG_UNCACHED |
+							PVRSRV_MEMALLOCFLAG_FW_LOCAL,
+							szHeapName,
+							&psDevInfo->psGuestFirmwareMemDesc[ui32OSID]);
+	if (bFwLocalIsUMA)
+	{
+		/* If we have overridden this then set it back */
+		*ppfnCreateRamBackedPMR = PhysmemNewOSRamBackedPMR;
+	}
+	if (eError == PVRSRV_OK)
+	{
+		/* If allocation is successful, permanently map this into device */
+		eError = DevmemMapToDevice(psDevInfo->psGuestFirmwareMemDesc[ui32OSID],
+								   psDevInfo->psGuestFirmwareHeap[ui32OSID],
+								   &sTmpDevVAddr);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,	"DevmemMapToDevice() failed (%u)", eError));
+		}
+	}
+
+	return eError;
+}
+
+static void
+RGXVzDevMemFreeGuestFwHeap(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32OSID)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_VZ_RETN_IF_NOT_MODE(DRIVER_MODE_HOST);
+
+	if (!ui32OSID || ui32OSID >= RGXFW_NUM_OS)
+	{
+		/* Guest OSID(s) range [1 up to (RGXFW_NUM_OS-1)] */
+		PVR_DPF((PVR_DBG_ERROR,
+				"Deallocating guest fw heap with invalid OSID:%u, MAX:%u",
+				ui32OSID, RGXFW_NUM_OS - 1));
+		return;
+	}
+
+	if (psDevInfo->psGuestFirmwareMemDesc[ui32OSID])
+	{
+		DevmemReleaseDevVirtAddr(psDevInfo->psGuestFirmwareMemDesc[ui32OSID]);
+		/* Target OSID physheap for deallocation */
+		psDeviceNode->uiKernelFwRAIdx = ui32OSID;
+		DevmemFree(psDevInfo->psGuestFirmwareMemDesc[ui32OSID]);
+		psDevInfo->psGuestFirmwareMemDesc[ui32OSID] = NULL;
+	}
+}
+
+static PVRSRV_ERROR
+RGXVzSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	IMG_UINT32 ui32OSID;
+	PVRSRV_ERROR eError;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+	PVRSRV_DEVICE_PHYS_HEAP eHeapType = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+	PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_HOST, PVRSRV_OK);
+
+	eError = SysVzGetPhysHeapOrigin(psDeviceNode->psDevConfig, eHeapType, &eHeapOrigin);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+	{
+		/* Guest OSID(s) in range [1 up to (RGXFW_NUM_OS-1)] */
+		for (ui32OSID = 1; ui32OSID < RGXFW_NUM_OS; ui32OSID++)
+		{
+			eError = RGXVzDevMemAllocateGuestFwHeap(psDeviceNode, ui32OSID);
+			PVR_ASSERT(eError == PVRSRV_OK);
+		}
+	}
+
+	return eError;
+}
+
+static void
+RGXVzFreeFirmware(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	IMG_UINT32 ui32OSID;
+	PVRSRV_ERROR eError;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+	PVRSRV_DEVICE_PHYS_HEAP eHeapType = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+	PVRSRV_VZ_RETN_IF_NOT_MODE(DRIVER_MODE_HOST);
+
+	eError = SysVzGetPhysHeapOrigin(psDeviceNode->psDevConfig, eHeapType, &eHeapOrigin);
+	if (eError != PVRSRV_OK)
+	{
+		return;
+	}
+
+	if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+	{
+		/* Guest OSID(s) in range [1 up to (RGXFW_NUM_OS-1)] */
+		for (ui32OSID = 1; ui32OSID < RGXFW_NUM_OS; ui32OSID++)
+		{
+			RGXVzDevMemFreeGuestFwHeap(psDeviceNode, ui32OSID);
+		}
+	}
+}
+
+void RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR	*ppDest,
+						   DEVMEM_MEMDESC		*psSrc,
+						   IMG_UINT32			uiExtraOffset,
+						   IMG_UINT32			ui32Flags)
+{
+	PVRSRV_ERROR		eError;
+	IMG_DEV_VIRTADDR	psDevVirtAddr;
+	PVRSRV_DEVICE_NODE	*psDeviceNode;
+	IMG_UINT64			ui64ErnsBrns = 0;
+	PVRSRV_RGXDEV_INFO	*psDevInfo;
+
+	psDeviceNode = (PVRSRV_DEVICE_NODE *) DevmemGetConnection(psSrc);
+	psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+	ui64ErnsBrns = psDevInfo->sDevFeatureCfg.ui64ErnsBrns;
+
+	if (psDevInfo->sDevFeatureCfg.ui32META)
+	{
+		IMG_UINT32	    ui32Offset;
+		IMG_BOOL            bCachedInMETA;
+		DEVMEM_FLAGS_T      uiDevFlags;
+		IMG_UINT32          uiGPUCacheMode;
+
+		eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr);
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		/* Convert to an address in META memmap */
+		ui32Offset = psDevVirtAddr.uiAddr + uiExtraOffset - RGX_FIRMWARE_HEAP_BASE;
+
+		/* Check in the devmem flags whether this memory is cached/uncached */
+		DevmemGetFlags(psSrc, &uiDevFlags);
+
+		/* Honour the META cache flags */
+		bCachedInMETA = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0;
+
+		/* Honour the SLC cache flags */
+		eError = DevmemDeviceCacheMode(psDeviceNode, uiDevFlags, &uiGPUCacheMode);
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		ui32Offset += RGXFW_SEGMMU_DATA_BASE_ADDRESS;
+
+		if (bCachedInMETA)
+		{
+			ui32Offset |= RGXFW_SEGMMU_DATA_META_CACHED;
+		}
+		else
+		{
+			ui32Offset |= RGXFW_SEGMMU_DATA_META_UNCACHED;
+		}
+
+		if (PVRSRV_CHECK_GPU_CACHED(uiGPUCacheMode))
+		{
+			ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED;
+		}
+		else
+		{
+			ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED;
+		}
+		ppDest->ui32Addr = ui32Offset;
+	}else
+	{
+		eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr);
+		PVR_ASSERT(eError == PVRSRV_OK);
+		ppDest->ui32Addr = (IMG_UINT32)((psDevVirtAddr.uiAddr + uiExtraOffset) & 0xFFFFFFFF);
+	}
+
+	if (ui32Flags & RFW_FWADDR_NOREF_FLAG)
+	{
+		DevmemReleaseDevVirtAddr(psSrc);
+	}
+}
+
+void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR		*psDest,
+						  DEVMEM_MEMDESC		*psSrcMemDesc,
+						  RGXFWIF_DEV_VIRTADDR	*psSrcFWDevVAddr,
+						  IMG_UINT32			uiOffset)
+{
+	PVRSRV_ERROR		eError;
+	IMG_DEV_VIRTADDR	sDevVirtAddr;
+
+	eError = DevmemAcquireDevVirtAddr(psSrcMemDesc, &sDevVirtAddr);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	psDest->psDevVirtAddr.uiAddr = sDevVirtAddr.uiAddr;
+	psDest->psDevVirtAddr.uiAddr += uiOffset;
+	psDest->pbyFWAddr.ui32Addr = psSrcFWDevVAddr->ui32Addr;
+
+	DevmemReleaseDevVirtAddr(psSrcMemDesc);
+}
+
+
+void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc)
+{
+	DevmemReleaseDevVirtAddr(psSrc);
+}
+
+struct _RGX_SERVER_COMMON_CONTEXT_ {
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	DEVMEM_MEMDESC *psFWCommonContextMemDesc;
+	PRGXFWIF_FWCOMMONCONTEXT sFWCommonContextFWAddr;
+	DEVMEM_MEMDESC *psFWMemContextMemDesc;
+	DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+	DEVMEM_MEMDESC *psContextStateMemDesc;
+	RGX_CLIENT_CCB *psClientCCB;
+	DEVMEM_MEMDESC *psClientCCBMemDesc;
+	DEVMEM_MEMDESC *psClientCCBCtrlMemDesc;
+	IMG_BOOL bCommonContextMemProvided;
+	IMG_UINT32 ui32ContextID;
+	DLLIST_NODE sListNode;
+	RGXFWIF_CONTEXT_RESET_REASON eLastResetReason;
+	IMG_UINT32 ui32LastResetJobRef;
+};
+
+PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection,
+									 PVRSRV_DEVICE_NODE *psDeviceNode,
+									 RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor,
+									 RGXFWIF_DM eDM,
+									 DEVMEM_MEMDESC *psAllocatedMemDesc,
+									 IMG_UINT32 ui32AllocatedOffset,
+									 DEVMEM_MEMDESC *psFWMemContextMemDesc,
+									 DEVMEM_MEMDESC *psContextStateMemDesc,
+									 IMG_UINT32 ui32CCBAllocSize,
+									 IMG_UINT32 ui32Priority,
+									 RGX_COMMON_CONTEXT_INFO *psInfo,
+									 RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+	RGXFWIF_FWCOMMONCONTEXT *psFWCommonContext;
+	IMG_UINT32 ui32FWCommonContextOffset;
+	IMG_UINT8 *pui8Ptr;
+	PVRSRV_ERROR eError;
+
+	/*
+	 * Allocate all the resources that are required
+	 */
+	psServerCommonContext = OSAllocMem(sizeof(*psServerCommonContext));
+	if (psServerCommonContext == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	psServerCommonContext->psDevInfo = psDevInfo;
+
+	if (psAllocatedMemDesc)
+	{
+		PDUMPCOMMENT("Using existing MemDesc for Rogue firmware %s context (offset = %d)",
+					 aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+					 ui32AllocatedOffset);
+		ui32FWCommonContextOffset = ui32AllocatedOffset;
+		psServerCommonContext->psFWCommonContextMemDesc = psAllocatedMemDesc;
+		psServerCommonContext->bCommonContextMemProvided = IMG_TRUE;
+	}
+	else
+	{
+		/* Allocate device memory for the firmware context */
+		PDUMPCOMMENT("Allocate Rogue firmware %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]);
+		eError = DevmemFwAllocate(psDevInfo,
+								sizeof(*psFWCommonContext),
+								RGX_FWCOMCTX_ALLOCFLAGS,
+								"FwContext",
+								&psServerCommonContext->psFWCommonContextMemDesc);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s : Failed to allocate firmware %s context (%s)",
+									__FUNCTION__,
+									aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+									PVRSRVGetErrorStringKM(eError)));
+			goto fail_contextalloc;
+		}
+		ui32FWCommonContextOffset = 0;
+		psServerCommonContext->bCommonContextMemProvided = IMG_FALSE;
+	}
+
+	/* Record this context so we can refer to it if the FW needs to tell us it was reset. */
+	psServerCommonContext->eLastResetReason    = RGXFWIF_CONTEXT_RESET_REASON_NONE;
+	psServerCommonContext->ui32LastResetJobRef = 0;
+	psServerCommonContext->ui32ContextID       = psDevInfo->ui32CommonCtxtCurrentID++;
+
+	/* Allocate the client CCB */
+	eError = RGXCreateCCB(psDevInfo,
+						  ui32CCBAllocSize,
+						  psConnection,
+						  eRGXCCBRequestor,
+						  psServerCommonContext,
+						  &psServerCommonContext->psClientCCB,
+						  &psServerCommonContext->psClientCCBMemDesc,
+						  &psServerCommonContext->psClientCCBCtrlMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: failed to create CCB for %s context(%s)",
+								__FUNCTION__,
+								aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+								PVRSRVGetErrorStringKM(eError)));
+		goto fail_allocateccb;
+	}
+
+	/*
+	 * Temporarily map the firmware context to the kernel and init it
+	 */
+	eError = DevmemAcquireCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc,
+                                      (void **)&pui8Ptr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to map firmware %s context (%s)to CPU",
+								__FUNCTION__,
+								aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+								PVRSRVGetErrorStringKM(eError)));
+		goto fail_cpuvirtacquire;
+	}
+
+	psFWCommonContext = (RGXFWIF_FWCOMMONCONTEXT *) (pui8Ptr + ui32FWCommonContextOffset);
+	psFWCommonContext->eDM = eDM;
+
+	/* Set the firmware CCB device addresses in the firmware common context */
+	RGXSetFirmwareAddress(&psFWCommonContext->psCCB,
+						  psServerCommonContext->psClientCCBMemDesc,
+						  0, RFW_FWADDR_FLAG_NONE);
+	RGXSetFirmwareAddress(&psFWCommonContext->psCCBCtl,
+						  psServerCommonContext->psClientCCBCtrlMemDesc,
+						  0, RFW_FWADDR_FLAG_NONE);
+
+	if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_META_DMA_BIT_MASK)
+	{
+		RGXSetMetaDMAAddress(&psFWCommonContext->sCCBMetaDMAAddr,
+							 psServerCommonContext->psClientCCBMemDesc,
+							 &psFWCommonContext->psCCB,
+							 0);
+	}
+
+	/* Set the memory context device address */
+	psServerCommonContext->psFWMemContextMemDesc = psFWMemContextMemDesc;
+	RGXSetFirmwareAddress(&psFWCommonContext->psFWMemContext,
+						  psFWMemContextMemDesc,
+						  0, RFW_FWADDR_FLAG_NONE);
+
+	/* Set the framework register updates address */
+	psServerCommonContext->psFWFrameworkMemDesc = psInfo->psFWFrameworkMemDesc;
+	if (psInfo->psFWFrameworkMemDesc != NULL)
+	{
+		RGXSetFirmwareAddress(&psFWCommonContext->psRFCmd,
+							  psInfo->psFWFrameworkMemDesc,
+							  0, RFW_FWADDR_FLAG_NONE);
+	}
+	else
+	{
+		/* This should never be touched in this contexts without a framework
+		 * memdesc, but ensure it is zero so we see crashes if it is.
+		 */
+		psFWCommonContext->psRFCmd.ui32Addr = 0;
+	}
+
+	psFWCommonContext->ui32Priority = ui32Priority;
+	psFWCommonContext->ui32PrioritySeqNum = 0;
+
+	if ((psDevInfo->sDevFeatureCfg.ui32CtrlStreamFormat == 2) && \
+				(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK))
+	{
+		if (eDM == RGXFWIF_DM_CDM)
+		{
+			if (psInfo->psResumeSignalAddr != NULL)
+			{
+				psFWCommonContext->ui64ResumeSignalAddr = psInfo->psResumeSignalAddr->uiAddr;
+			}
+		}
+	}
+
+	/* Store a references to Server Common Context and PID for notifications back from the FW. */
+	psFWCommonContext->ui32ServerCommonContextID = psServerCommonContext->ui32ContextID;
+	psFWCommonContext->ui32PID                   = OSGetCurrentClientProcessIDKM();
+
+	/* Set the firmware GPU context state buffer */
+	psServerCommonContext->psContextStateMemDesc = psContextStateMemDesc;
+	if (psContextStateMemDesc)
+	{
+		RGXSetFirmwareAddress(&psFWCommonContext->psContextState,
+							  psContextStateMemDesc,
+							  0,
+							  RFW_FWADDR_FLAG_NONE);
+	}
+
+	/*
+	 * Dump the created context
+	 */
+	PDUMPCOMMENT("Dump %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]);
+	DevmemPDumpLoadMem(psServerCommonContext->psFWCommonContextMemDesc,
+					   ui32FWCommonContextOffset,
+					   sizeof(*psFWCommonContext),
+					   PDUMP_FLAGS_CONTINUOUS);
+
+	/* We've finished the setup so release the CPU mapping */
+	DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc);
+
+	/* Map this allocation into the FW */
+	RGXSetFirmwareAddress(&psServerCommonContext->sFWCommonContextFWAddr,
+						  psServerCommonContext->psFWCommonContextMemDesc,
+						  ui32FWCommonContextOffset,
+						  RFW_FWADDR_FLAG_NONE);
+
+#if defined(LINUX)
+	{
+		IMG_UINT32 ui32FWAddr;
+		switch (eDM) {
+			case RGXFWIF_DM_TA:
+				ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t)
+						psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, sTAContext));
+				break;
+			case RGXFWIF_DM_3D:
+				ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t)
+						psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, s3DContext));
+				break;
+			default:
+				ui32FWAddr = psServerCommonContext->sFWCommonContextFWAddr.ui32Addr;
+				break;
+		}
+
+		trace_rogue_create_fw_context(OSGetCurrentClientProcessNameKM(),
+									  aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+									  ui32FWAddr);
+	}
+#endif
+	/*Add the node to the list when finalised */
+	OSWRLockAcquireWrite(psDevInfo->hCommonCtxtListLock);
+	dllist_add_to_tail(&(psDevInfo->sCommonCtxtListHead), &(psServerCommonContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hCommonCtxtListLock);
+
+	*ppsServerCommonContext = psServerCommonContext;
+	return PVRSRV_OK;
+
+fail_allocateccb:
+	DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc);
+fail_cpuvirtacquire:
+	RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc);
+	if (!psServerCommonContext->bCommonContextMemProvided)
+	{
+		DevmemFwFree(psDevInfo, psServerCommonContext->psFWCommonContextMemDesc);
+		psServerCommonContext->psFWCommonContextMemDesc = NULL;
+	}
+fail_contextalloc:
+	OSFreeMem(psServerCommonContext);
+fail_alloc:
+	return eError;
+}
+
+void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+
+	OSWRLockAcquireWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock);
+	/* Remove the context from the list of all contexts. */
+	dllist_remove_node(&psServerCommonContext->sListNode);
+	OSWRLockReleaseWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock);
+
+	/*
+		Unmap the context itself and then all its resources
+	*/
+
+	/* Unmap the FW common context */
+	RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc);
+	/* Umap context state buffer (if there was one) */
+	if (psServerCommonContext->psContextStateMemDesc)
+	{
+		RGXUnsetFirmwareAddress(psServerCommonContext->psContextStateMemDesc);
+	}
+	/* Unmap the framework buffer */
+	if (psServerCommonContext->psFWFrameworkMemDesc)
+	{
+		RGXUnsetFirmwareAddress(psServerCommonContext->psFWFrameworkMemDesc);
+	}
+	/* Unmap client CCB and CCB control */
+	RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc);
+	RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc);
+	/* Unmap the memory context */
+	RGXUnsetFirmwareAddress(psServerCommonContext->psFWMemContextMemDesc);
+
+	/* Destroy the client CCB */
+	RGXDestroyCCB(psServerCommonContext->psDevInfo, psServerCommonContext->psClientCCB);
+
+
+	/* Free the FW common context (if there was one) */
+	if (!psServerCommonContext->bCommonContextMemProvided)
+	{
+		DevmemFwFree(psServerCommonContext->psDevInfo,
+						psServerCommonContext->psFWCommonContextMemDesc);
+		psServerCommonContext->psFWCommonContextMemDesc = NULL;
+	}
+	/* Free the hosts representation of the common context */
+	OSFreeMem(psServerCommonContext);
+}
+
+PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+	return psServerCommonContext->sFWCommonContextFWAddr;
+}
+
+RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+	return psServerCommonContext->psClientCCB;
+}
+
+RGXFWIF_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+                                                               IMG_UINT32 *pui32LastResetJobRef)
+{
+	RGXFWIF_CONTEXT_RESET_REASON eLastResetReason;
+
+	PVR_ASSERT(psServerCommonContext != NULL);
+	PVR_ASSERT(pui32LastResetJobRef != NULL);
+
+	/* Take the most recent reason & job ref and reset for next time... */
+	eLastResetReason      = psServerCommonContext->eLastResetReason;
+	*pui32LastResetJobRef = psServerCommonContext->ui32LastResetJobRef;
+	psServerCommonContext->eLastResetReason = RGXFWIF_CONTEXT_RESET_REASON_NONE;
+	psServerCommonContext->ui32LastResetJobRef = 0;
+
+	return eLastResetReason;
+}
+
+/*!
+*******************************************************************************
+ @Function		RGXFreeKernelCCB
+ @Description	Free the kernel CCB
+ @Input			psDevInfo
+
+ @Return		PVRSRV_ERROR
+******************************************************************************/
+static void RGXFreeKernelCCB(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	if (psDevInfo->psKernelCCBMemDesc != NULL)
+	{
+		if (psDevInfo->psKernelCCB != NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psKernelCCBMemDesc);
+			psDevInfo->psKernelCCB = NULL;
+		}
+		DevmemFwFree(psDevInfo, psDevInfo->psKernelCCBMemDesc);
+		psDevInfo->psKernelCCBMemDesc = NULL;
+	}
+	if (psDevInfo->psKernelCCBCtlMemDesc != NULL)
+	{
+		if (psDevInfo->psKernelCCBCtl != NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psKernelCCBCtlMemDesc);
+			psDevInfo->psKernelCCBCtl = NULL;
+		}
+		DevmemFwFree(psDevInfo, psDevInfo->psKernelCCBCtlMemDesc);
+		psDevInfo->psKernelCCBCtlMemDesc = NULL;
+	}
+}
+
+/*!
+*******************************************************************************
+ @Function		RGXSetupKernelCCB
+ @Description	Allocate and initialise the kernel CCB
+ @Input			psDevInfo
+ @Input			psRGXFWInit
+ @Input			ui32NumCmdsLog2
+ @Input			ui32CmdSize
+
+ @Return		PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXSetupKernelCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                      RGXFWIF_INIT       *psRGXFWInit,
+                                      IMG_UINT32         ui32NumCmdsLog2,
+                                      IMG_UINT32         ui32CmdSize)
+{
+	PVRSRV_ERROR		eError;
+	RGXFWIF_CCB_CTL		*psKCCBCtl;
+	DEVMEM_FLAGS_T		uiCCBCtlMemAllocFlags, uiCCBMemAllocFlags;
+	IMG_UINT32			ui32kCCBSize = (1U << ui32NumCmdsLog2);
+
+
+	/*
+	 * FIXME: the write offset need not be writeable by the firmware, indeed may
+	 * not even be needed for reading. Consider moving it to its own data
+	 * structure.
+	 */
+	uiCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+							PVRSRV_MEMALLOCFLAG_UNCACHED |
+							 PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	/* Allocation flags for Kernel CCB */
+	uiCCBMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+						 PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+						 PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+						 PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+						 PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+						 PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+						 PVRSRV_MEMALLOCFLAG_UNCACHED |
+						 PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	/*
+	 * Allocate memory for the kernel CCB control.
+	 */
+	PDUMPCOMMENT("Allocate memory for kernel CCB control");
+	eError = DevmemFwAllocate(psDevInfo,
+	                          sizeof(RGXFWIF_CCB_CTL),
+	                          uiCCBCtlMemAllocFlags,
+	                          "FwKernelCCBControl",
+	                          &psDevInfo->psKernelCCBCtlMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupKernelCCB: Failed to allocate kernel CCB ctl (%u)", eError));
+		goto fail;
+	}
+
+	/*
+	 * Allocate memory for the kernel CCB.
+	 * (this will reference further command data in non-shared CCBs)
+	 */
+	PDUMPCOMMENT("Allocate memory for kernel CCB");
+	eError = DevmemFwAllocate(psDevInfo,
+	                          ui32kCCBSize * ui32CmdSize,
+	                          uiCCBMemAllocFlags,
+	                          "FwKernelCCB",
+	                          &psDevInfo->psKernelCCBMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupKernelCCB: Failed to allocate kernel CCB (%u)", eError));
+		goto fail;
+	}
+
+	/*
+	 * Map the kernel CCB control to the kernel.
+	 */
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psKernelCCBCtlMemDesc,
+                                      (void **)&psDevInfo->psKernelCCBCtl);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupKernelCCB: Failed to acquire cpu kernel CCB Ctl (%u)", eError));
+		goto fail;
+	}
+
+	/*
+	 * Map the kernel CCB to the kernel.
+	 */
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psKernelCCBMemDesc,
+                                      (void **)&psDevInfo->psKernelCCB);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupKernelCCB: Failed to acquire cpu kernel CCB (%u)", eError));
+		goto fail;
+	}
+
+	/*
+	 * Initialise the kernel CCB control.
+	 */
+	psKCCBCtl = psDevInfo->psKernelCCBCtl;
+	psKCCBCtl->ui32WriteOffset = 0;
+	psKCCBCtl->ui32ReadOffset = 0;
+	psKCCBCtl->ui32WrapMask = ui32kCCBSize - 1;
+	psKCCBCtl->ui32CmdSize = ui32CmdSize;
+
+	/*
+	 * Set-up RGXFWIfCtl pointers to access the kCCB
+	 */
+	RGXSetFirmwareAddress(&psRGXFWInit->psKernelCCBCtl,
+	                      psDevInfo->psKernelCCBCtlMemDesc,
+	                      0, RFW_FWADDR_NOREF_FLAG);
+
+	RGXSetFirmwareAddress(&psRGXFWInit->psKernelCCB,
+	                      psDevInfo->psKernelCCBMemDesc,
+	                      0, RFW_FWADDR_NOREF_FLAG);
+
+	/*
+	 * Pdump the kernel CCB control.
+	 */
+	PDUMPCOMMENT("Initialise kernel CCB ctl");
+	DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc, 0, sizeof(RGXFWIF_CCB_CTL), 0);
+
+	return PVRSRV_OK;
+
+fail:
+	RGXFreeKernelCCB(psDevInfo);
+
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function		RGXFreeFirmwareCCB
+ @Description	Free the firmware CCB
+ @Input			psDevInfo
+ @Input			ppsFirmwareCCBCtl
+ @Input			ppsFirmwareCCBCtlMemDesc
+ @Input			ppui8FirmwareCCB
+ @Input			ppsFirmwareCCBMemDesc
+
+ @Return		void
+******************************************************************************/
+static void RGXFreeFirmwareCCB(PVRSRV_RGXDEV_INFO	*psDevInfo,
+							   RGXFWIF_CCB_CTL		**ppsFirmwareCCBCtl,
+							   DEVMEM_MEMDESC		**ppsFirmwareCCBCtlMemDesc,
+							   IMG_UINT8			**ppui8FirmwareCCB,
+							   DEVMEM_MEMDESC		**ppsFirmwareCCBMemDesc)
+{
+	if (*ppsFirmwareCCBMemDesc != NULL)
+	{
+		if (*ppui8FirmwareCCB != NULL)
+		{
+			DevmemReleaseCpuVirtAddr(*ppsFirmwareCCBMemDesc);
+			*ppui8FirmwareCCB = NULL;
+		}
+		DevmemFwFree(psDevInfo, *ppsFirmwareCCBMemDesc);
+		*ppsFirmwareCCBMemDesc = NULL;
+	}
+	if (*ppsFirmwareCCBCtlMemDesc != NULL)
+	{
+		if (*ppsFirmwareCCBCtl != NULL)
+		{
+			DevmemReleaseCpuVirtAddr(*ppsFirmwareCCBCtlMemDesc);
+			*ppsFirmwareCCBCtl = NULL;
+		}
+		DevmemFwFree(psDevInfo, *ppsFirmwareCCBCtlMemDesc);
+		*ppsFirmwareCCBCtlMemDesc = NULL;
+	}
+}
+
+#define INPUT_STR_SIZE_MAX 13
+#define APPEND_STR_SIZE 7
+#define COMBINED_STR_LEN_MAX (INPUT_STR_SIZE_MAX + APPEND_STR_SIZE + 1)
+
+/*!
+*******************************************************************************
+ @Function		RGXSetupFirmwareCCB
+ @Description	Allocate and initialise a Firmware CCB
+ @Input			psDevInfo
+ @Input			ppsFirmwareCCBCtl
+ @Input			ppsFirmwareCCBCtlMemDesc
+ @Input			ppui8FirmwareCCB
+ @Input			ppsFirmwareCCBMemDesc
+ @Input			psFirmwareCCBCtlFWAddr
+ @Input			psFirmwareCCBFWAddr
+ @Input			ui32NumCmdsLog2
+ @Input			ui32CmdSize
+ @Input			pszName                   Must be less than or equal to
+                                          INPUT_STR_SIZE_MAX
+ @Return		PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXSetupFirmwareCCB(PVRSRV_RGXDEV_INFO		*psDevInfo,
+										RGXFWIF_CCB_CTL			**ppsFirmwareCCBCtl,
+										DEVMEM_MEMDESC			**ppsFirmwareCCBCtlMemDesc,
+										IMG_UINT8				**ppui8FirmwareCCB,
+										DEVMEM_MEMDESC			**ppsFirmwareCCBMemDesc,
+										PRGXFWIF_CCB_CTL		*psFirmwareCCBCtlFWAddr,
+										PRGXFWIF_CCB			*psFirmwareCCBFWAddr,
+										IMG_UINT32				ui32NumCmdsLog2,
+										IMG_UINT32				ui32CmdSize,
+										IMG_PCHAR				pszName)
+{
+	PVRSRV_ERROR		eError;
+	RGXFWIF_CCB_CTL		*psFWCCBCtl;
+	DEVMEM_FLAGS_T		uiCCBCtlMemAllocFlags, uiCCBMemAllocFlags;
+	IMG_UINT32			ui32FWCCBSize = (1U << ui32NumCmdsLog2);
+	IMG_CHAR			sCCBCtlName[COMBINED_STR_LEN_MAX] = "";
+	IMG_CHAR			sAppend[] = "Control";
+
+	/*
+	 * FIXME: the write offset need not be writeable by the host, indeed may
+	 * not even be needed for reading. Consider moving it to its own data
+	 * structure.
+	 */
+	uiCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+							PVRSRV_MEMALLOCFLAG_UNCACHED |
+							 PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	/* Allocation flags for Firmware CCB */
+	uiCCBMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+						 PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+						 PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+						 PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+						 PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+						 PVRSRV_MEMALLOCFLAG_UNCACHED |
+						 PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PVR_ASSERT(strlen(sCCBCtlName) == 0);
+	PVR_ASSERT(strlen(sAppend) == APPEND_STR_SIZE);
+	PVR_ASSERT(strlen(pszName) <= INPUT_STR_SIZE_MAX);
+
+	/* Append "Control" to the name for the control struct. */
+	strncat(sCCBCtlName, pszName, INPUT_STR_SIZE_MAX);
+	strncat(sCCBCtlName, sAppend, APPEND_STR_SIZE);
+
+	/*
+		Allocate memory for the Firmware CCB control.
+	*/
+	PDUMPCOMMENT("Allocate memory for %s", sCCBCtlName);
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_CCB_CTL),
+							uiCCBCtlMemAllocFlags,
+							sCCBCtlName,
+                            ppsFirmwareCCBCtlMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmwareCCB: Failed to allocate %s (%u)", sCCBCtlName, eError));
+		goto fail;
+	}
+
+	/*
+		Allocate memory for the Firmware CCB.
+		(this will reference further command data in non-shared CCBs)
+	*/
+	PDUMPCOMMENT("Allocate memory for %s", pszName);
+	eError = DevmemFwAllocate(psDevInfo,
+							ui32FWCCBSize * ui32CmdSize,
+							uiCCBMemAllocFlags,
+							pszName,
+                            ppsFirmwareCCBMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmwareCCB: Failed to allocate %s (%u)", pszName, eError));
+		goto fail;
+	}
+
+	/*
+		Map the Firmware CCB control to the kernel.
+	*/
+	eError = DevmemAcquireCpuVirtAddr(*ppsFirmwareCCBCtlMemDesc,
+                                      (void **)ppsFirmwareCCBCtl);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmwareCCB: Failed to acquire cpu %s (%u)", sCCBCtlName, eError));
+		goto fail;
+	}
+
+	/*
+		Map the firmware CCB to the kernel.
+	*/
+	eError = DevmemAcquireCpuVirtAddr(*ppsFirmwareCCBMemDesc,
+                                      (void **)ppui8FirmwareCCB);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmwareCCB: Failed to acquire cpu %s (%u)", pszName, eError));
+		goto fail;
+	}
+
+	/*
+	 * Initialise the firmware CCB control.
+	 */
+	psFWCCBCtl = *ppsFirmwareCCBCtl;
+	psFWCCBCtl->ui32WriteOffset = 0;
+	psFWCCBCtl->ui32ReadOffset = 0;
+	psFWCCBCtl->ui32WrapMask = ui32FWCCBSize - 1;
+	psFWCCBCtl->ui32CmdSize = ui32CmdSize;
+
+	/*
+	 * Set-up RGXFWIfCtl pointers to access the kCCBs
+	 */
+	RGXSetFirmwareAddress(psFirmwareCCBCtlFWAddr,
+						  *ppsFirmwareCCBCtlMemDesc,
+						  0, RFW_FWADDR_NOREF_FLAG);
+
+	RGXSetFirmwareAddress(psFirmwareCCBFWAddr,
+						  *ppsFirmwareCCBMemDesc,
+						  0, RFW_FWADDR_NOREF_FLAG);
+
+	/*
+	 * Pdump the kernel CCB control.
+	 */
+	PDUMPCOMMENT("Initialise %s", sCCBCtlName);
+	DevmemPDumpLoadMem(*ppsFirmwareCCBCtlMemDesc,
+					   0,
+					   sizeof(RGXFWIF_CCB_CTL),
+					   0);
+
+	return PVRSRV_OK;
+
+fail:
+	RGXFreeFirmwareCCB(psDevInfo,
+					   ppsFirmwareCCBCtl,
+					   ppsFirmwareCCBCtlMemDesc,
+					   ppui8FirmwareCCB,
+					   ppsFirmwareCCBMemDesc);
+
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static void RGXSetupFaultReadRegisterRollback(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	PMR *psPMR;
+
+	if (psDevInfo->psRGXFaultAddressMemDesc)
+	{
+		if (DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc,(void **)&psPMR) == PVRSRV_OK)
+		{
+			PMRUnlockSysPhysAddresses(psPMR);
+		}
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc);
+		psDevInfo->psRGXFaultAddressMemDesc = NULL;
+	}
+}
+
+static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE	*psDeviceNode, RGXFWIF_INIT *psRGXFWInit)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	IMG_UINT32			*pui32MemoryVirtAddr;
+	IMG_UINT32			i;
+	size_t				ui32PageSize = OSGetPageSize();
+	DEVMEM_FLAGS_T		uiMemAllocFlags;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	PMR					*psPMR;
+
+	/* Allocate page of memory to use for page faults on non-blocking memory transactions */
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+						PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+						PVRSRV_MEMALLOCFLAG_UNCACHED;
+
+	psDevInfo->psRGXFaultAddressMemDesc = NULL;
+	eError = DevmemFwAllocateExportable(psDeviceNode,
+										ui32PageSize,
+										ui32PageSize,
+										uiMemAllocFlags,
+										"FwExFaultAddress",
+										&psDevInfo->psRGXFaultAddressMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to allocate mem for fault address (%u)",
+				eError));
+		goto failFaultAddressDescAlloc;
+	}
+
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc,
+									  (void **)&pui32MemoryVirtAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire mem for fault address (%u)",
+				eError));
+		goto failFaultAddressDescAqCpuVirt;
+	}
+
+	for (i = 0; i < ui32PageSize/sizeof(IMG_UINT32); i++)
+	{
+		*(pui32MemoryVirtAddr + i) = 0xDEADBEEF;
+	}
+
+	eError = DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc,(void **)&psPMR);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Error getting PMR for fault address (%u)",
+				eError));
+
+		goto failFaultAddressDescGetPMR;
+	}
+	else
+	{
+		IMG_BOOL bValid;
+		IMG_UINT32 ui32Log2PageSize = OSGetPageShift();
+
+		eError = PMRLockSysPhysAddresses(psPMR);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Error locking physical address for fault address MemDesc (%u)",
+					eError));
+
+			goto failFaultAddressDescLockPhys;
+		}
+
+		eError = PMR_DevPhysAddr(psPMR,ui32Log2PageSize,1,0,&(psRGXFWInit->sFaultPhysAddr),&bValid);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Error getting physical address for fault address MemDesc (%u)",
+					eError));
+
+			goto failFaultAddressDescGetPhys;
+		}
+
+		if (!bValid)
+		{
+			psRGXFWInit->sFaultPhysAddr.uiAddr = 0;
+			PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed getting physical address for fault address MemDesc - invalid page (0x%" IMG_UINT64_FMTSPECX ")",
+					psRGXFWInit->sFaultPhysAddr.uiAddr));
+
+			goto failFaultAddressDescGetPhys;
+		}
+	}
+
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc);
+
+	return PVRSRV_OK;
+
+failFaultAddressDescGetPhys:
+	PMRUnlockSysPhysAddresses(psPMR);
+
+failFaultAddressDescLockPhys:
+
+failFaultAddressDescGetPMR:
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc);
+
+failFaultAddressDescAqCpuVirt:
+	DevmemFwFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc);
+	psDevInfo->psRGXFaultAddressMemDesc = NULL;
+
+failFaultAddressDescAlloc:
+
+	return eError;
+}
+
+static PVRSRV_ERROR RGXHwBrn37200(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	PVRSRV_ERROR			eError = PVRSRV_OK;
+	IMG_UINT64	ui64ErnsBrns = psDevInfo->sDevFeatureCfg.ui64ErnsBrns;
+	IMG_UINT32	ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE(psDevInfo->sDevFeatureCfg.ui32CacheLineSize);
+
+	if (ui64ErnsBrns & FIX_HW_BRN_37200_BIT_MASK)
+	{
+		struct _DEVMEM_HEAP_	*psBRNHeap;
+		DEVMEM_FLAGS_T			uiFlags;
+		IMG_DEV_VIRTADDR		sTmpDevVAddr;
+		size_t				uiPageSize;
+
+		uiPageSize = OSGetPageSize();
+
+		uiFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+					PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+					PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+					PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+					PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+					PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+					PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+					PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+					PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+		eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx,
+								  "HWBRN37200", /* FIXME: We need to create an IDENT macro for this string.
+												 Make sure the IDENT macro is not accessible to userland */
+								  &psBRNHeap);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXHwBrn37200: HWBRN37200 Failed DevmemFindHeapByName (%u)", eError));
+			goto failFWHWBRN37200FindHeapByName;
+		}
+
+		psDevInfo->psRGXFWHWBRN37200MemDesc = NULL;
+		eError = DevmemAllocate(psBRNHeap,
+							uiPageSize,
+							ui32CacheLineSize,
+							uiFlags,
+							"HWBRN37200",
+							&psDevInfo->psRGXFWHWBRN37200MemDesc);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXHwBrn37200: Failed to allocate %u bytes for HWBRN37200 (%u)",
+					(IMG_UINT32)uiPageSize,
+					eError));
+			goto failFWHWBRN37200MemDescAlloc;
+		}
+
+		/*
+			We need to map it so the heap for this allocation
+			is set
+		*/
+		eError = DevmemMapToDevice(psDevInfo->psRGXFWHWBRN37200MemDesc,
+							   psBRNHeap,
+							   &sTmpDevVAddr);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXHwBrn37200: Failed to allocate %u bytes for HWBRN37200 (%u)",
+					(IMG_UINT32)uiPageSize,
+					eError));
+			goto failFWHWBRN37200DevmemMapToDevice;
+		}
+
+
+
+		return PVRSRV_OK;
+
+	failFWHWBRN37200DevmemMapToDevice:
+
+	failFWHWBRN37200MemDescAlloc:
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWHWBRN37200MemDesc);
+		psDevInfo->psRGXFWHWBRN37200MemDesc = NULL;
+
+	failFWHWBRN37200FindHeapByName:;
+	}
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXTraceBufferIsInitRequired
+
+@Description    Returns true if the firmware trace buffer is not allocated and
+		might be required by the firmware soon. Trace buffer allocated
+		on-demand to reduce RAM footprint on systems not needing
+		firmware trace.
+
+@Input          psDevInfo	 RGX device info
+
+@Return		IMG_BOOL	Whether on-demand allocation(s) is/are needed
+				or not
+*/ /**************************************************************************/
+INLINE IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGXFWIF_TRACEBUF*  psTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+	/* The firmware expects a trace buffer only when:
+	 *	- Logtype is "trace" AND
+	 *	- at least one LogGroup is configured
+	 */
+	if ((psDevInfo->psRGXFWIfTraceBufferMemDesc[0] == NULL)
+		 && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)
+		 && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK))
+	{
+		return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXTraceBufferInitOnDemandResources
+
+@Description    Allocates the firmware trace buffer required for dumping trace
+		info from the firmware.
+
+@Input          psDevInfo	 RGX device info
+
+@Return		PVRSRV_OK	If all went good, PVRSRV_ERROR otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGXFWIF_TRACEBUF*  psTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+	DEVMEM_FLAGS_T     uiMemAllocFlags;
+	PVRSRV_ERROR       eError = PVRSRV_OK;
+	IMG_UINT32         ui32FwThreadNum;
+
+	uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+				PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+				PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+				PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+				PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+				PVRSRV_MEMALLOCFLAG_UNCACHED |
+				PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	for (ui32FwThreadNum = 0; ui32FwThreadNum < RGXFW_THREAD_NUM; ui32FwThreadNum++)
+	{
+		/* Ensure allocation API is only called when not already allocated */
+		PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum] == NULL);
+
+		PDUMPCOMMENT("Allocate rgxfw trace buffer(%u)", ui32FwThreadNum);
+		eError = DevmemFwAllocate(psDevInfo,
+						RGXFW_TRACE_BUFFER_SIZE * sizeof(*(psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32TraceBuffer)),
+						uiMemAllocFlags,
+						"FwTraceBuffer",
+						&psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum]);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate %zu bytes for fw trace buffer %u (Error code:%u)",
+					__FUNCTION__,
+					RGXFW_TRACE_BUFFER_SIZE * sizeof(*(psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32TraceBuffer)),
+					ui32FwThreadNum,
+					eError));
+			goto fail;
+		}
+
+		/* Firmware address should not be already set */
+		PVR_ASSERT(psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer.ui32Addr == 0x0);
+
+		/* for the FW to use this address when dumping in log (trace) buffer */
+		RGXSetFirmwareAddress(&psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer,
+						psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum],
+						0, RFW_FWADDR_NOREF_FLAG);
+		/* Set an address for the host to be able to read fw trace buffer */
+		eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum],
+								(void **)&psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32TraceBuffer);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire kernel tracebuf (%u) ctl (Error code: %u)",
+					__FUNCTION__, ui32FwThreadNum, eError));
+			goto fail;
+		}
+	}
+
+/* Just return error in-case of failures, clean-up would be handled by DeInit function */
+fail:
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXTraceBufferDeinit
+
+@Description    Deinitialises all the allocations and references that are made
+		for the FW trace buffer(s)
+
+@Input          ppsDevInfo	 RGX device info
+@Return		void
+*/ /**************************************************************************/
+static void RGXTraceBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGXFWIF_TRACEBUF*  psTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+	IMG_UINT32 i;
+
+	for (i = 0; i < RGXFW_THREAD_NUM; i++)
+	{
+		if (psDevInfo->psRGXFWIfTraceBufferMemDesc[i])
+		{
+			if (psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer != NULL)
+			{
+				DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufferMemDesc[i]);
+				psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer = NULL;
+			}
+
+			DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufferMemDesc[i]);
+			psDevInfo->psRGXFWIfTraceBufferMemDesc[i] = NULL;
+		}
+	}
+}
+
+static PVRSRV_ERROR RGXSetupOSConfig(PVRSRV_RGXDEV_INFO *psDevInfo,
+									 RGXFWIF_INIT *psRGXFWInit,
+									 IMG_UINT32 ui32ConfigFlags,
+									 IMG_UINT32 ui32ConfigFlagsExt)
+{
+	PVRSRV_ERROR       eError = PVRSRV_OK;
+	DEVMEM_FLAGS_T     uiMemAllocFlags;
+	RGXFWIF_OS_CONFIG *psOSConfig;
+
+	uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+						PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+						PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+						PVRSRV_MEMALLOCFLAG_UNCACHED |
+						PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+
+	PDUMPCOMMENT("Allocate RGXFW_OS_CONFIG structure");
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_OS_CONFIG),
+							uiMemAllocFlags,
+							"FwOSConfigStructure",
+							&psDevInfo->psRGXFWIfOSConfigDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupOSConfig: Failed to allocate %u bytes for OS Config (%u)",
+				(IMG_UINT32)sizeof(RGXFWIF_OS_CONFIG),
+				eError));
+		goto fail1;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInit->sOSConfig,
+							psDevInfo->psRGXFWIfOSConfigDesc,
+							0, RFW_FWADDR_NOREF_FLAG);
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfOSConfigDesc,
+									  (void **)&psOSConfig);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupOSConfig: Failed to acquire OS Config (%u)",
+				eError));
+		goto fail2;
+	}
+
+	psOSConfig->ui32ConfigFlags = ui32ConfigFlags & RGXFWIF_INICFG_ALL;
+
+	eError = SyncPrimGetFirmwareAddr(psDevInfo->psPowSyncPrim, &psOSConfig->sPowerSync.ui32Addr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			"%s: Failed to get Sync Prim FW address with error (%u)",
+			__FUNCTION__, eError));
+		goto fail2;
+	}
+
+	psDevInfo->psFWIfOSConfig = psOSConfig;
+
+	if (ui32ConfigFlagsExt > 0)
+	{
+		/* Inform the FW that there is also an extended field */
+		psOSConfig->ui32ConfigFlags |= RGXFWIF_INICFG_USE_EXTENDED;
+	}
+
+	/* Set the extended flags into the OS Config field */
+	psOSConfig->ui32ConfigFlagsExt = ui32ConfigFlagsExt;
+
+	return PVRSRV_OK;
+
+fail2:
+	DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfOSConfigDesc);
+fail1:
+	return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXSetupFirmware
+
+ @Description
+
+ Setups all the firmware related data
+
+ @Input psDevInfo
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE       *psDeviceNode,
+                              IMG_BOOL                 bEnableSignatureChecks,
+                              IMG_UINT32               ui32SignatureChecksBufSize,
+                              IMG_UINT32               ui32HWPerfFWBufSizeKB,
+                              IMG_UINT64               ui64HWPerfFilter,
+                              IMG_UINT32               ui32RGXFWAlignChecksArrLength,
+                              IMG_UINT32               *pui32RGXFWAlignChecks,
+                              IMG_UINT32               ui32ConfigFlags,
+                              IMG_UINT32               ui32LogType,
+                              RGXFWIF_BIFTILINGMODE    eBifTilingMode,
+                              IMG_UINT32               ui32NumTilingCfgs,
+                              IMG_UINT32               *pui32BIFTilingXStrides,
+                              IMG_UINT32               ui32FilterFlags,
+                              IMG_UINT32               ui32JonesDisableMask,
+                              IMG_UINT32               ui32HWRDebugDumpLimit,
+                              IMG_UINT32               ui32HWPerfCountersDataSize,
+                              PMR                      **ppsHWPerfPMR,
+                              RGXFWIF_DEV_VIRTADDR     *psRGXFWInitFWAddr,
+                              RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+                              FW_PERF_CONF             eFirmwarePerf,
+                              IMG_UINT32               ui32ConfigFlagsExt)
+
+{
+	PVRSRV_ERROR		eError;
+	DEVMEM_FLAGS_T		uiMemAllocFlags;
+	RGXFWIF_INIT		*psRGXFWInitScratch = NULL;
+	RGXFWIF_INIT		*psRGXFWInitActual = NULL;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	IMG_UINT32			dm, ui32Temp = 0;
+	IMG_UINT64			ui64ErnsBrns;
+#if defined (SUPPORT_PDVFS)
+	RGXFWIF_PDVFS_OPP   *psPDVFSOPPInfo;
+	IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg;
+#endif
+	ui64ErnsBrns = psDevInfo->sDevFeatureCfg.ui64ErnsBrns;
+
+	/* Fw init data */
+
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+						PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+						PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+						PVRSRV_MEMALLOCFLAG_UNCACHED |
+						PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+						/* FIXME: Change to Cached */
+
+
+	PDUMPCOMMENT("Allocate RGXFWIF_INIT structure");
+
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_INIT),
+							uiMemAllocFlags,
+							"FwInitStructure",
+							&psDevInfo->psRGXFWIfInitMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for fw if ctl (%u)",
+				(IMG_UINT32)sizeof(RGXFWIF_INIT),
+				eError));
+		goto fail;
+	}
+
+	psRGXFWInitScratch = OSAllocZMem(sizeof(*psRGXFWInitScratch));
+
+	if(psRGXFWInitScratch == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXSetupFirmware: Failed to allocate RGXFWInit scratch structure"));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(&psDevInfo->sFWInitFWAddr,
+	                      psDevInfo->psRGXFWIfInitMemDesc,
+	                      0, RFW_FWADDR_NOREF_FLAG);
+	*psRGXFWInitFWAddr = psDevInfo->sFWInitFWAddr;
+
+	/* FW coremem data */
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+						PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_UNCACHED |
+						PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	if ((0 != psDevInfo->sDevFeatureCfg.ui32MCMS) && \
+			(0 == (ui64ErnsBrns & FIX_HW_BRN_50767_BIT_MASK)))
+	{
+		IMG_BOOL bMetaDMA = psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_META_DMA_BIT_MASK;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+		if (bMetaDMA)
+		{
+			IMG_UINT64 ui64SecBufHandle;
+
+			PDUMPCOMMENT("Import secure buffer to store FW coremem data");
+			eError = DevmemImportTDSecureBuf(psDeviceNode,
+			                                 RGX_META_COREMEM_BSS_SIZE,
+			                                 OSGetPageShift(),
+			                                 uiMemAllocFlags,
+			                                 &psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+			                                 &ui64SecBufHandle);
+		}
+		else
+#endif
+		{
+			PDUMPCOMMENT("Allocate buffer to store FW coremem data");
+			eError = DevmemFwAllocate(psDevInfo,
+									  RGX_META_COREMEM_BSS_SIZE,
+									  uiMemAllocFlags,
+									  "FwCorememDataStore",
+									  &psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+		}
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXSetupFirmware: Failed to allocate coremem data store (%u)",
+					 eError));
+			goto fail;
+		}
+
+		RGXSetFirmwareAddress(&psRGXFWInitScratch->sCorememDataStore.pbyFWAddr,
+							  psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+							  0, RFW_FWADDR_NOREF_FLAG);
+
+		if (bMetaDMA)
+		{
+			RGXSetMetaDMAAddress(&psRGXFWInitScratch->sCorememDataStore,
+								 psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+								 &psRGXFWInitScratch->sCorememDataStore.pbyFWAddr,
+								 0);
+		}
+	}
+
+	/* init HW frame info */
+	PDUMPCOMMENT("Allocate rgxfw HW info buffer");
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_HWRINFOBUF),
+							uiMemAllocFlags,
+							"FwHWInfoBuffer",
+							&psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %d bytes for HW info (%u)",
+				(IMG_UINT32)sizeof(RGXFWIF_HWRINFOBUF),
+				eError));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInitScratch->sRGXFWIfHWRInfoBufCtl,
+						psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc,
+						0, RFW_FWADDR_NOREF_FLAG);
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc,
+									  (void **)&psDevInfo->psRGXFWIfHWRInfoBuf);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel tracebuf ctl (%u)",
+				eError));
+		goto fail;
+	}
+
+	/* Might be uncached. Be conservative and use a DeviceMemSet */
+	OSDeviceMemSet(psDevInfo->psRGXFWIfHWRInfoBuf, 0, sizeof(RGXFWIF_HWRINFOBUF));
+
+	/* Allocate a sync for power management */
+	eError = SyncPrimContextCreate(psDevInfo->psDeviceNode,
+	                               &psDevInfo->hSyncPrimContext);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate sync primitive context with error (%u)", eError));
+		goto fail;
+	}
+
+	eError = SyncPrimAlloc(psDevInfo->hSyncPrimContext, &psDevInfo->psPowSyncPrim, "fw power ack");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate sync primitive with error (%u)", eError));
+		goto fail;
+	}
+
+	/* Setup Fault read register */
+	eError = RGXSetupFaultReadRegister(psDeviceNode, psRGXFWInitScratch);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup fault read register"));
+		goto fail;
+	}
+
+	/* Apply FIX_HW_BRN_37200 */
+	eError = RGXHwBrn37200(psDevInfo);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to apply HWBRN37200"));
+		goto fail;
+	}
+
+	if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE) &&
+	    !(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK))
+	{
+		ui32Temp = RGXFWIF_KCCB_NUMCMDS_LOG2_GPUVIRT_WITHOUT_FEATURE;
+	}
+	else
+	{
+		ui32Temp = RGXFWIF_KCCB_NUMCMDS_LOG2_DEFAULT;
+	}
+	/*
+	 * Set up kernel CCB.
+	 */
+	eError = RGXSetupKernelCCB(psDevInfo,
+	                           psRGXFWInitScratch,
+	                           ui32Temp,
+	                           sizeof(RGXFWIF_KCCB_CMD));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate Kernel CCB"));
+		goto fail;
+	}
+
+	/*
+	 * Set up firmware CCB.
+	 */
+	eError = RGXSetupFirmwareCCB(psDevInfo,
+								 &psDevInfo->psFirmwareCCBCtl,
+								 &psDevInfo->psFirmwareCCBCtlMemDesc,
+								 &psDevInfo->psFirmwareCCB,
+								 &psDevInfo->psFirmwareCCBMemDesc,
+								 &psRGXFWInitScratch->psFirmwareCCBCtl,
+								 &psRGXFWInitScratch->psFirmwareCCB,
+								 RGXFWIF_FWCCB_NUMCMDS_LOG2,
+								 sizeof(RGXFWIF_FWCCB_CMD),
+								 "FwCCB");
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate Firmware CCB"));
+		goto fail;
+	}
+	/* RD Power Island */
+	{
+		RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+		IMG_BOOL bSysEnableRDPowIsland = psRGXData->psRGXTimingInfo->bEnableRDPowIsland;
+		IMG_BOOL bEnableRDPowIsland = ((eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_DEFAULT) && bSysEnableRDPowIsland) ||
+						(eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_FORCE_ON);
+
+		ui32ConfigFlags |= bEnableRDPowIsland? RGXFWIF_INICFG_POW_RASCALDUST : 0;
+	}
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	ui32ConfigFlags |= RGXFWIF_INICFG_WORKEST_V2;
+
+#if defined(SUPPORT_PDVFS)
+	/* Proactive DVFS depends on Workload Estimation */
+	psPDVFSOPPInfo = &(psRGXFWInitScratch->sPDVFSOPPInfo);
+	psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
+
+	if (psDVFSDeviceCfg->pasOPPTable != NULL)
+	{
+		if (psDVFSDeviceCfg->ui32OPPTableSize >
+		    sizeof(psPDVFSOPPInfo->asOPPValues)/sizeof(psPDVFSOPPInfo->asOPPValues[0]))
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "RGXSetupFirmware: OPP Table too large :"
+			         " Size = %u, Maximum size = %lu",
+			         psDVFSDeviceCfg->ui32OPPTableSize,
+			         (unsigned long)(sizeof(psPDVFSOPPInfo->asOPPValues)/sizeof(psPDVFSOPPInfo->asOPPValues[0]))));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto fail;
+		}
+
+		OSDeviceMemCopy(psPDVFSOPPInfo->asOPPValues,
+						psDVFSDeviceCfg->pasOPPTable,
+						sizeof(psPDVFSOPPInfo->asOPPValues));
+		psPDVFSOPPInfo->ui32MaxOPPPoint =
+			(psDVFSDeviceCfg->ui32OPPTableSize) - 1;
+
+		ui32ConfigFlags |= RGXFWIF_INICFG_PDVFS_V2;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Missing OPP Table"));
+	}
+#endif
+#endif
+
+
+
+	eError = RGXSetupOSConfig(psDevInfo, psRGXFWInitScratch, ui32ConfigFlags, ui32ConfigFlagsExt);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to set up the per OS configuration"));
+		goto fail;
+	}
+
+	psRGXFWInitScratch->eGPIOValidationMode = RGXFWIF_GPIO_VAL_OFF;
+#if defined(SUPPORT_VALIDATION)
+	{
+		IMG_INT32 ui32AppHintDefault;
+		IMG_INT32 ui32GPIOValidationMode;
+		void      *pvAppHintState = NULL;
+
+		/* Check AppHint for GPIO validation mode */
+		OSCreateKMAppHintState(&pvAppHintState);
+		ui32AppHintDefault = PVRSRV_APPHINT_GPIOVALIDATIONMODE;
+		OSGetKMAppHintUINT32(pvAppHintState,
+		                     GPIOValidationMode,
+		                     &ui32AppHintDefault,
+		                     &ui32GPIOValidationMode);
+		OSFreeKMAppHintState(pvAppHintState);
+		pvAppHintState = NULL;
+
+		if (ui32GPIOValidationMode >= RGXFWIF_GPIO_VAL_LAST)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: Invalid GPIO validation mode: %d, only valid if smaller than %d. Disabling GPIO validation.",
+			         __func__,
+			         ui32GPIOValidationMode,
+			         RGXFWIF_GPIO_VAL_LAST));
+		}
+		else
+		{
+			psRGXFWInitScratch->eGPIOValidationMode = (RGXFWIF_GPIO_VAL_MODE) ui32GPIOValidationMode;
+		}
+
+		psRGXFWInitScratch->eGPIOValidationMode = ui32GPIOValidationMode;
+	}
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	/*
+	 * Set up Workload Estimation firmware CCB.
+	 */
+	eError = RGXSetupFirmwareCCB(psDevInfo,
+								 &psDevInfo->psWorkEstFirmwareCCBCtl,
+								 &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
+								 &psDevInfo->psWorkEstFirmwareCCB,
+								 &psDevInfo->psWorkEstFirmwareCCBMemDesc,
+								 &psRGXFWInitScratch->psWorkEstFirmwareCCBCtl,
+								 &psRGXFWInitScratch->psWorkEstFirmwareCCB,
+								 RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2,
+								 sizeof(RGXFWIF_WORKEST_FWCCB_CMD),
+								 "FwWEstCCB");
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate Workload Estimation Firmware CCB"));
+		goto fail;
+	}
+#endif
+
+	/* Require a minimum amount of memory for the signature buffers */
+	if (ui32SignatureChecksBufSize < RGXFW_SIG_BUFFER_SIZE_MIN)
+	{
+		ui32SignatureChecksBufSize = RGXFW_SIG_BUFFER_SIZE_MIN;
+	}
+
+	/* Setup Signature and Checksum Buffers for TA and 3D */
+	eError = RGXFWSetupSignatureChecks(psDevInfo,
+	                                   &psDevInfo->psRGXFWSigTAChecksMemDesc,
+	                                   ui32SignatureChecksBufSize,
+	                                   &psRGXFWInitScratch->asSigBufCtl[RGXFWIF_DM_TA],
+	                                   "TA");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup TA signature checks"));
+		goto fail;
+	}
+	psDevInfo->ui32SigTAChecksSize = ui32SignatureChecksBufSize;
+
+	eError = RGXFWSetupSignatureChecks(psDevInfo,
+	                                   &psDevInfo->psRGXFWSig3DChecksMemDesc,
+	                                   ui32SignatureChecksBufSize,
+	                                   &psRGXFWInitScratch->asSigBufCtl[RGXFWIF_DM_3D],
+	                                   "3D");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup 3D signature checks"));
+		goto fail;
+	}
+	psDevInfo->ui32Sig3DChecksSize = ui32SignatureChecksBufSize;
+
+	if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+	{
+		eError = RGXFWSetupSignatureChecks(psDevInfo,
+										   &psDevInfo->psRGXFWSigRTChecksMemDesc,
+										   ui32SignatureChecksBufSize,
+										   &psRGXFWInitScratch->asSigBufCtl[RGXFWIF_DM_RTU],
+										   "RTU");
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup RTU signature checks"));
+			goto fail;
+		}
+		psDevInfo->ui32SigRTChecksSize = ui32SignatureChecksBufSize;
+
+		eError = RGXFWSetupSignatureChecks(psDevInfo,
+										   &psDevInfo->psRGXFWSigSHChecksMemDesc,
+										   ui32SignatureChecksBufSize,
+										   &psRGXFWInitScratch->asSigBufCtl[RGXFWIF_DM_SHG],
+										   "SHG");
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup SHG signature checks"));
+			goto fail;
+		}
+		psDevInfo->ui32SigSHChecksSize = ui32SignatureChecksBufSize;
+	}
+
+#if defined(RGXFW_ALIGNCHECKS)
+	eError = RGXFWSetupAlignChecks(psDevInfo,
+								&psRGXFWInitScratch->sAlignChecks,
+								pui32RGXFWAlignChecks,
+								ui32RGXFWAlignChecksArrLength);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup alignment checks"));
+		goto fail;
+	}
+#endif
+
+	psRGXFWInitScratch->ui32FilterFlags = ui32FilterFlags;
+
+
+	if (ui64ErnsBrns & FIX_HW_BRN_65273_BIT_MASK)
+	{
+		/* Fill the remaining bits of fw the init data */
+		psRGXFWInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_BRN_65273_HEAP_BASE;
+		psRGXFWInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_BRN_65273_HEAP_BASE;
+	}
+	else if (ui64ErnsBrns & FIX_HW_BRN_52402_BIT_MASK)
+	{
+		/* Fill the remaining bits of fw the init data */
+		psRGXFWInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_BRN_52402_HEAP_BASE;
+		psRGXFWInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_BRN_52402_HEAP_BASE;
+	}
+	else
+	{
+		/* Fill the remaining bits of fw the init data */
+		psRGXFWInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_HEAP_BASE;
+		psRGXFWInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_HEAP_BASE;
+	}
+
+	psRGXFWInitScratch->sDPXControlStreamBase.uiAddr = RGX_DOPPLER_HEAP_BASE;
+	psRGXFWInitScratch->sResultDumpBase.uiAddr = RGX_DOPPLER_OVERFLOW_HEAP_BASE;
+	psRGXFWInitScratch->sRTUHeapBase.uiAddr = RGX_DOPPLER_HEAP_BASE;
+	psRGXFWInitScratch->sTDMTPUYUVCeoffsHeapBase.uiAddr = RGX_TDM_TPU_YUV_COEFFS_HEAP_BASE;
+
+	if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+	{
+		psRGXFWInitScratch->ui32JonesDisableMask = ui32JonesDisableMask;
+	}
+	psDevInfo->bPDPEnabled = (ui32ConfigFlags & RGXFWIF_SRVCFG_DISABLE_PDP_EN)
+			? IMG_FALSE : IMG_TRUE;
+	psRGXFWInitScratch->ui32HWRDebugDumpLimit = ui32HWRDebugDumpLimit;
+
+	psRGXFWInitScratch->eFirmwarePerf = eFirmwarePerf;
+
+	if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SLC_VIVT_BIT_MASK)
+	{
+		eError = _AllocateSLC3Fence(psDevInfo, psRGXFWInitScratch);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate memory for SLC3Fence"));
+			goto fail;
+		}
+	}
+
+
+	if ( (psDevInfo->sDevFeatureCfg.ui32META) && \
+			((ui32ConfigFlags & RGXFWIF_INICFG_METAT1_ENABLED) != 0))
+	{
+		/* Allocate a page for T1 stack */
+		eError = DevmemFwAllocate(psDevInfo,
+		                          RGX_META_STACK_SIZE,
+		                          RGX_FWCOMCTX_ALLOCFLAGS,
+		                          "FwMETAT1Stack",
+		                          & psDevInfo->psMETAT1StackMemDesc);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate T1 Stack"));
+			goto fail;
+		}
+
+		RGXSetFirmwareAddress(&psRGXFWInitScratch->sT1Stack,
+		                      psDevInfo->psMETAT1StackMemDesc,
+		                      0, RFW_FWADDR_NOREF_FLAG);
+
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXSetupFirmware: T1 Stack Frame allocated at %x",
+				 psRGXFWInitScratch->sT1Stack.ui32Addr));
+	}
+
+#if defined(SUPPORT_PDVFS)
+		/* Core clock rate */
+		uiMemAllocFlags =
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_UNCACHED |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+		eError = DevmemFwAllocate(psDevInfo,
+								  sizeof(IMG_UINT32),
+								  uiMemAllocFlags,
+								  "FwCoreClkRate",
+								  &psDevInfo->psRGXFWIFCoreClkRateMemDesc);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate PDVFS core clock rate"));
+			goto fail;
+		}
+
+		RGXSetFirmwareAddress(&psRGXFWInitScratch->sCoreClockRate,
+							  psDevInfo->psRGXFWIFCoreClkRateMemDesc,
+							  0, RFW_FWADDR_NOREF_FLAG);
+
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXSetupFirmware: PDVFS core clock rate allocated at %x",
+				 psRGXFWInitScratch->sCoreClockRate.ui32Addr));
+
+		eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIFCoreClkRateMemDesc,
+										  (void **)&psDevInfo->pui32RGXFWIFCoreClkRate);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire core clk rate (%u)",
+					eError));
+			goto fail;
+		}
+#endif
+
+	/* Timestamps */
+	uiMemAllocFlags =
+		PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+		PVRSRV_MEMALLOCFLAG_GPU_READABLE | /* XXX ?? */
+		PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+		PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+		PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+		PVRSRV_MEMALLOCFLAG_UNCACHED |
+		PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	/*
+	  the timer query arrays
+	*/
+	PDUMPCOMMENT("Allocate timer query arrays (FW)");
+	eError = DevmemFwAllocate(psDevInfo,
+	                          sizeof(IMG_UINT64) * RGX_MAX_TIMER_QUERIES,
+	                          uiMemAllocFlags,
+	                          "FwStartTimesArray",
+	                          & psDevInfo->psStartTimeMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map start times array"));
+		goto fail;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psStartTimeMemDesc,
+	                                  (void **)& psDevInfo->pui64StartTimeById);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map start times array"));
+		goto fail;
+	}
+
+	eError = DevmemFwAllocate(psDevInfo,
+	                          sizeof(IMG_UINT64) * RGX_MAX_TIMER_QUERIES,
+	                          uiMemAllocFlags,
+	                          "FwEndTimesArray",
+	                          & psDevInfo->psEndTimeMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map end times array"));
+		goto fail;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psEndTimeMemDesc,
+	                                  (void **)& psDevInfo->pui64EndTimeById);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map end times array"));
+		goto fail;
+	}
+
+	eError = DevmemFwAllocate(psDevInfo,
+	                          sizeof(IMG_UINT32) * RGX_MAX_TIMER_QUERIES,
+	                          uiMemAllocFlags,
+	                          "FwCompletedOpsArray",
+	                          & psDevInfo->psCompletedMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to completed ops array"));
+		goto fail;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psCompletedMemDesc,
+	                                  (void **)& psDevInfo->pui32CompletedById);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map completed ops array"));
+		goto fail;
+	}
+
+	/* FW trace control structure */
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+						PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+						PVRSRV_MEMALLOCFLAG_UNCACHED |
+						PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PDUMPCOMMENT("Allocate rgxfw trace control structure");
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_TRACEBUF),
+							uiMemAllocFlags,
+							"FwTraceCtlStruct",
+							&psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for fw trace (%u)",
+				(IMG_UINT32)sizeof(RGXFWIF_TRACEBUF),
+				eError));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInitScratch->sTraceBufCtl,
+						psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+						0, RFW_FWADDR_NOREF_FLAG);
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+									  (void **)&psDevInfo->psRGXFWIfTraceBuf);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel tracebuf ctl (%u)",
+				eError));
+		goto fail;
+	}
+
+	/* Set initial firmware log type/group(s) */
+	if (ui32LogType & ~RGXFWIF_LOG_TYPE_MASK)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Invalid initial log type (0x%X)",ui32LogType));
+		goto fail;
+	}
+	psDevInfo->psRGXFWIfTraceBuf->ui32LogType = ui32LogType;
+
+#if defined (PDUMP)
+	/* When PDUMP is enabled, ALWAYS allocate on-demand trace buffer resource
+	 * (irrespective of loggroup(s) enabled), given that logtype/loggroups can
+	 * be set during PDump playback in logconfig, at any point of time */
+	eError = RGXTraceBufferInitOnDemandResources(psDevInfo);
+#else
+	/* Otherwise, allocate only if required */
+	if (RGXTraceBufferIsInitRequired(psDevInfo))
+	{
+		eError = RGXTraceBufferInitOnDemandResources(psDevInfo);
+	}
+	else
+	{
+		eError = PVRSRV_OK;
+	}
+#endif
+	PVR_LOGG_IF_ERROR(eError, "RGXTraceBufferInitOnDemandResources", fail);
+
+	/* Allocate shared buffer for GPU utilisation */
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+						PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+						PVRSRV_MEMALLOCFLAG_UNCACHED |
+						PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PDUMPCOMMENT("Allocate shared buffer for GPU utilisation");
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_GPU_UTIL_FWCB),
+							uiMemAllocFlags,
+							"FwGPUUtilisationBuffer",
+							&psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for GPU utilisation buffer ctl (%u)",
+				(IMG_UINT32)sizeof(RGXFWIF_GPU_UTIL_FWCB),
+				eError));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInitScratch->sGpuUtilFWCbCtl,
+						psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc,
+						0, RFW_FWADDR_NOREF_FLAG);
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc,
+									  (void **)&psDevInfo->psRGXFWIfGpuUtilFWCb);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel GPU utilisation buffer ctl (%u)",
+				eError));
+		goto fail;
+	}
+
+	/* Initialise GPU utilisation buffer */
+	psDevInfo->psRGXFWIfGpuUtilFWCb->ui64LastWord =
+	    RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64(),RGXFWIF_GPU_UTIL_STATE_IDLE);
+
+
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+						PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+						PVRSRV_MEMALLOCFLAG_UNCACHED |
+						PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PDUMPCOMMENT("Allocate rgxfw FW runtime configuration (FW)");
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_RUNTIME_CFG),
+							uiMemAllocFlags,
+							"FwRuntimeCfg",
+							&psDevInfo->psRGXFWIfRuntimeCfgMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for FW runtime configuration (%u)",
+				(IMG_UINT32)sizeof(RGXFWIF_RUNTIME_CFG),
+				eError));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInitScratch->sRuntimeCfg,
+						psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+						0, RFW_FWADDR_NOREF_FLAG);
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+									(void **)&psDevInfo->psRGXFWIfRuntimeCfg);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel FW runtime configuration (%u)",
+				eError));
+		goto fail;
+	}
+	/* HWPerf: Determine the size of the FW buffer */
+	if (ui32HWPerfFWBufSizeKB == 0 ||
+			ui32HWPerfFWBufSizeKB == RGXFW_HWPERF_L1_SIZE_DEFAULT)
+	{
+		/* Under pvrsrvctl 0 size implies AppHint not set or is set to zero,
+		 * use default size from driver constant. Set it to the default
+		 * size, no logging.
+		 */
+		psDevInfo->ui32RGXFWIfHWPerfBufSize = RGXFW_HWPERF_L1_SIZE_DEFAULT<<10;
+	}
+	else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MAX))
+	{
+		/* Size specified as a AppHint but it is too big */
+		PVR_DPF((PVR_DBG_WARNING,"RGXSetupFirmware: HWPerfFWBufSizeInKB value (%u) too big, using maximum (%u)",
+				ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MAX));
+		psDevInfo->ui32RGXFWIfHWPerfBufSize = RGXFW_HWPERF_L1_SIZE_MAX<<10;
+	}
+	else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MIN))
+	{
+		/* Size specified as in AppHint HWPerfFWBufSizeInKB */
+		PVR_DPF((PVR_DBG_WARNING,"RGXSetupFirmware: Using HWPerf FW buffer size of %u KB",
+				ui32HWPerfFWBufSizeKB));
+		psDevInfo->ui32RGXFWIfHWPerfBufSize = ui32HWPerfFWBufSizeKB<<10;
+	}
+	else
+	{
+		/* Size specified as a AppHint but it is too small */
+		PVR_DPF((PVR_DBG_WARNING,"RGXSetupFirmware: HWPerfFWBufSizeInKB value (%u) too small, using minimum (%u)",
+				ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MIN));
+		psDevInfo->ui32RGXFWIfHWPerfBufSize = RGXFW_HWPERF_L1_SIZE_MIN<<10;
+	}
+
+	/* init HWPERF data */
+	psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfRIdx = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfWIdx = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfWrapCount = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfSize = psDevInfo->ui32RGXFWIfHWPerfBufSize;
+	psRGXFWInitScratch->bDisableFilterHWPerfCustomCounter = (ui32ConfigFlags & RGXFWIF_INICFG_HWP_DISABLE_FILTER) ? IMG_TRUE : IMG_FALSE;
+	psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfUt = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfDropCount = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32FirstDropOrdinal = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32LastDropOrdinal = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32PowMonEnergy = 0;
+
+	/* Second stage initialisation or HWPerf, hHWPerfLock created in first
+	 * stage. See RGXRegisterDevice() call to RGXHWPerfInit(). */
+	if (psDevInfo->ui64HWPerfFilter == 0)
+	{
+		psDevInfo->ui64HWPerfFilter = ui64HWPerfFilter;
+		psRGXFWInitScratch->ui64HWPerfFilter = ui64HWPerfFilter;
+	}
+	else
+	{
+		/* The filter has already been modified. This can happen if pvr/gpu_tracing_on
+		 * was enabled. */
+		psRGXFWInitScratch->ui64HWPerfFilter = psDevInfo->ui64HWPerfFilter;
+	}
+
+#if defined (PDUMP)
+	/* When PDUMP is enabled, ALWAYS allocate on-demand HWPerf resources
+	 * (irrespective of HWPerf enabled or not), given that HWPerf can be
+	 * enabled during PDump playback via RTCONF at any point of time. */
+	eError = RGXHWPerfInitOnDemandResources(psDevInfo);
+#else
+	/* Otherwise, only allocate if HWPerf is enabled via apphint */
+	if (ui32ConfigFlags & RGXFWIF_INICFG_HWPERF_EN)
+	{
+		eError = RGXHWPerfInitOnDemandResources(psDevInfo);
+	}
+#endif
+	PVR_LOGG_IF_ERROR(eError, "RGXHWPerfInitOnDemandResources", fail);
+
+	RGXHWPerfInitAppHintCallbacks(psDeviceNode);
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PDUMPCOMMENT("Allocate rgxfw register configuration structure");
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_REG_CFG),
+							uiMemAllocFlags | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE,
+							"FwRegisterConfigStructure",
+							&psDevInfo->psRGXFWIfRegCfgMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for fw register configurations (%u)",
+				(IMG_UINT32)sizeof(RGXFWIF_REG_CFG),
+				eError));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInitScratch->sRegCfg,
+						psDevInfo->psRGXFWIfRegCfgMemDesc,
+						0, RFW_FWADDR_NOREF_FLAG);
+#endif
+
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+						PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+						PVRSRV_MEMALLOCFLAG_UNCACHED |
+						PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PDUMPCOMMENT("Allocate rgxfw hwperfctl structure");
+	eError = DevmemFwAllocateExportable(psDeviceNode,
+							ui32HWPerfCountersDataSize,
+							OSGetPageSize(),
+							uiMemAllocFlags,
+							"FwExHWPerfControlStructure",
+							&psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXInitHWPerfCounters: Failed to allocate %u bytes for fw hwperf control (%u)",
+				ui32HWPerfCountersDataSize,
+				eError));
+		goto fail;
+	}
+
+	eError = DevmemLocalGetImportHandle(psDevInfo->psRGXFWIfHWPerfCountersMemDesc, (void**) ppsHWPerfPMR);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"DevmemLocalGetImportHandle failed (%u)", eError));
+		goto fail;
+	}
+
+
+	RGXSetFirmwareAddress(&psRGXFWInitScratch->sHWPerfCtl,
+						psDevInfo->psRGXFWIfHWPerfCountersMemDesc,
+						0, 0);
+
+	/* Required info by FW to calculate the ActivePM idle timer latency */
+	{
+		RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+		RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+
+		psRGXFWInitScratch->ui32InitialCoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+		psRGXFWInitScratch->ui32ActivePMLatencyms = psRGXData->psRGXTimingInfo->ui32ActivePMLatencyms;
+
+		/* Initialise variable runtime configuration to the system defaults */
+		psRuntimeCfg->ui32CoreClockSpeed = psRGXFWInitScratch->ui32InitialCoreClockSpeed;
+		psRuntimeCfg->ui32ActivePMLatencyms = psRGXFWInitScratch->ui32ActivePMLatencyms;
+		psRuntimeCfg->bActivePMLatencyPersistant = IMG_TRUE;
+
+		/* Initialize the DefaultDustsNumInit Field to Max Dusts */
+		psRuntimeCfg->ui32DefaultDustsNumInit = MAX(1, (psDevInfo->sDevFeatureCfg.ui32NumClusters/2));
+	}
+#if defined(PDUMP)
+	PDUMPCOMMENT("Dump initial state of FW runtime configuration");
+	DevmemPDumpLoadMem(	psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+						0,
+						sizeof(RGXFWIF_RUNTIME_CFG),
+						PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+	/* Initialize FW started flag */
+	psRGXFWInitScratch->bFirmwareStarted = IMG_FALSE;
+	psRGXFWInitScratch->ui32MarkerVal = 1;
+
+	/* Initialise the compatibility check data */
+	RGXFWIF_COMPCHECKS_BVNC_INIT(psRGXFWInitScratch->sRGXCompChecks.sFWBVNC);
+	RGXFWIF_COMPCHECKS_BVNC_INIT(psRGXFWInitScratch->sRGXCompChecks.sHWBVNC);
+
+	PDUMPCOMMENT("Dump RGXFW Init data");
+	if (!bEnableSignatureChecks)
+	{
+#if defined(PDUMP)
+		PDUMPCOMMENT("(to enable rgxfw signatures place the following line after the RTCONF line)");
+		DevmemPDumpLoadMem(	psDevInfo->psRGXFWIfInitMemDesc,
+							offsetof(RGXFWIF_INIT, asSigBufCtl),
+							sizeof(RGXFWIF_SIGBUF_CTL)*(psDevInfo->sDevFeatureCfg.ui32MAXDMCount),
+							PDUMP_FLAGS_CONTINUOUS);
+#endif
+		psRGXFWInitScratch->asSigBufCtl[RGXFWIF_DM_3D].sBuffer.ui32Addr = 0x0;
+		psRGXFWInitScratch->asSigBufCtl[RGXFWIF_DM_TA].sBuffer.ui32Addr = 0x0;
+	}
+
+	for (dm = 0; dm < (psDevInfo->sDevFeatureCfg.ui32MAXDMCount); dm++)
+	{
+		psDevInfo->psRGXFWIfTraceBuf->aui32HwrDmLockedUpCount[dm] = 0;
+		psDevInfo->psRGXFWIfTraceBuf->aui32HwrDmOverranCount[dm] = 0;
+		psDevInfo->psRGXFWIfTraceBuf->aui32HwrDmRecoveredCount[dm] = 0;
+		psDevInfo->psRGXFWIfTraceBuf->aui32HwrDmFalseDetectCount[dm] = 0;
+	}
+
+	/*
+	 * BIF Tiling configuration
+	 */
+
+	psRGXFWInitScratch->eBifTilingMode = eBifTilingMode;
+
+	psRGXFWInitScratch->sBifTilingCfg[0].uiBase = RGX_BIF_TILING_HEAP_1_BASE;
+	psRGXFWInitScratch->sBifTilingCfg[0].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+	psRGXFWInitScratch->sBifTilingCfg[0].uiXStride = pui32BIFTilingXStrides[0];
+	psRGXFWInitScratch->sBifTilingCfg[1].uiBase = RGX_BIF_TILING_HEAP_2_BASE;
+	psRGXFWInitScratch->sBifTilingCfg[1].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+	psRGXFWInitScratch->sBifTilingCfg[1].uiXStride = pui32BIFTilingXStrides[1];
+	psRGXFWInitScratch->sBifTilingCfg[2].uiBase = RGX_BIF_TILING_HEAP_3_BASE;
+	psRGXFWInitScratch->sBifTilingCfg[2].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+	psRGXFWInitScratch->sBifTilingCfg[2].uiXStride = pui32BIFTilingXStrides[2];
+	psRGXFWInitScratch->sBifTilingCfg[3].uiBase = RGX_BIF_TILING_HEAP_4_BASE;
+	psRGXFWInitScratch->sBifTilingCfg[3].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+	psRGXFWInitScratch->sBifTilingCfg[3].uiXStride = pui32BIFTilingXStrides[3];
+
+	/* update the FW structure proper */
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+									  (void **)&psRGXFWInitActual);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel fw if ctl (%u)",
+				eError));
+		goto fail;
+	}
+
+	OSDeviceMemCopy(psRGXFWInitActual, psRGXFWInitScratch, sizeof(*psRGXFWInitActual));
+
+	/* We don't need access to the fw init data structure anymore */
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+	psRGXFWInitActual = NULL;
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Dump rgxfw hwperfctl structure");
+	DevmemPDumpLoadZeroMem (psDevInfo->psRGXFWIfHWPerfCountersMemDesc,
+	                        0,
+							ui32HWPerfCountersDataSize,
+	                        PDUMP_FLAGS_CONTINUOUS);
+
+	PDUMPCOMMENT("Dump rgxfw trace control structure");
+	DevmemPDumpLoadMem(	psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+						0,
+						sizeof(RGXFWIF_TRACEBUF),
+						PDUMP_FLAGS_CONTINUOUS);
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PDUMPCOMMENT("Dump rgxfw register configuration buffer");
+	DevmemPDumpLoadMem(	psDevInfo->psRGXFWIfRegCfgMemDesc,
+						0,
+						sizeof(RGXFWIF_REG_CFG),
+						PDUMP_FLAGS_CONTINUOUS);
+#endif
+	PDUMPCOMMENT("Dump rgxfw init structure");
+	DevmemPDumpLoadMem(	psDevInfo->psRGXFWIfInitMemDesc,
+						0,
+						sizeof(RGXFWIF_INIT),
+						PDUMP_FLAGS_CONTINUOUS);
+	if ((0 != psDevInfo->sDevFeatureCfg.ui32MCMS) && \
+				(0 == (psDevInfo->sDevFeatureCfg.ui64ErnsBrns & FIX_HW_BRN_50767_BIT_MASK)))
+	{
+		PDUMPCOMMENT("Dump rgxfw coremem data store");
+		DevmemPDumpLoadMem(	psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+							0,
+							RGX_META_COREMEM_BSS_SIZE,
+							PDUMP_FLAGS_CONTINUOUS);
+	}
+
+	PDUMPCOMMENT("RTCONF: run-time configuration");
+
+
+	/* Dump the config options so they can be edited.
+	 *
+	 * FIXME: Need new DevmemPDumpWRW API which writes a WRW to load ui32ConfigFlags
+	 */
+	PDUMPCOMMENT("(Set the FW config options here)");
+	PDUMPCOMMENT("( Ctx Switch TA Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_TA_EN);
+	PDUMPCOMMENT("( Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_3D_EN);
+	PDUMPCOMMENT("( Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_CDM_EN);
+	PDUMPCOMMENT("( Ctx Switch Rand mode: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_MODE_RAND);
+	PDUMPCOMMENT("( Ctx Switch Soft Reset Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_SRESET_EN);
+	PDUMPCOMMENT("( Use Extended FW Config flags: 0x%08x)", RGXFWIF_INICFG_USE_EXTENDED);
+	PDUMPCOMMENT("( Rascal+Dust Power Island: 0x%08x)", RGXFWIF_INICFG_POW_RASCALDUST);
+	PDUMPCOMMENT("( Enable HWPerf: 0x%08x)", RGXFWIF_INICFG_HWPERF_EN);
+	PDUMPCOMMENT("( Enable HWR: 0x%08x)", RGXFWIF_INICFG_HWR_EN);
+	PDUMPCOMMENT("( Check MList: 0x%08x)", RGXFWIF_INICFG_CHECK_MLIST_EN);
+	PDUMPCOMMENT("( Disable Auto Clock Gating: 0x%08x)", RGXFWIF_INICFG_DISABLE_CLKGATING_EN);
+	PDUMPCOMMENT("( Enable HWPerf Polling Perf Counter: 0x%08x)", RGXFWIF_INICFG_POLL_COUNTERS_EN);
+
+	if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_BIT_MASK)
+	{
+		PDUMPCOMMENT("( Ctx Switch Object mode Index: 0x%08x)", RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX);
+		PDUMPCOMMENT("( Ctx Switch Object mode Instance: 0x%08x)", RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INSTANCE);
+		PDUMPCOMMENT("( Ctx Switch Object mode List: 0x%08x)", RGXFWIF_INICFG_VDM_CTX_STORE_MODE_LIST);
+	}
+
+	PDUMPCOMMENT("( Enable SHG Bypass mode: 0x%08x)", RGXFWIF_INICFG_SHG_BYPASS_EN);
+	PDUMPCOMMENT("( Enable RTU Bypass mode: 0x%08x)", RGXFWIF_INICFG_RTU_BYPASS_EN);
+	PDUMPCOMMENT("( Enable register configuration: 0x%08x)", RGXFWIF_INICFG_REGCONFIG_EN);
+	PDUMPCOMMENT("( Assert on TA Out-of-Memory: 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY);
+	PDUMPCOMMENT("( Disable HWPerf custom counter filter: 0x%08x)", RGXFWIF_INICFG_HWP_DISABLE_FILTER);
+	PDUMPCOMMENT("( Enable HWPerf custom performance timer: 0x%08x)", RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN);
+	PDUMPCOMMENT("( Enable CDM Killing Rand mode: 0x%08x)", RGXFWIF_INICFG_CDM_KILL_MODE_RAND_EN);
+	PDUMPCOMMENT("( Enable Ctx Switch profile mode: 0x%08x (none=b'000, fast=b'001, medium=b'010, slow=b'011, nodelay=b'100))", RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK);
+	PDUMPCOMMENT("( Disable DM overlap (except TA during SPM): 0x%08x)", RGXFWIF_INICFG_DISABLE_DM_OVERLAP);
+	PDUMPCOMMENT("( Enable Meta T1 running main code: 0x%08x)", RGXFWIF_INICFG_METAT1_MAIN);
+	PDUMPCOMMENT("( Enable Meta T1 running dummy code: 0x%08x)", RGXFWIF_INICFG_METAT1_DUMMY);
+	PDUMPCOMMENT("( Assert on HWR trigger (page fault, lockup, overrun or poll failure): 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER);
+
+	DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfOSConfigDesc,
+							offsetof(RGXFWIF_OS_CONFIG, ui32ConfigFlags),
+							ui32ConfigFlags,
+							PDUMP_FLAGS_CONTINUOUS);
+
+	if (ui32ConfigFlagsExt > 0)
+	{
+		PDUMPCOMMENT("( Extended FW config options start here )");
+		PDUMPCOMMENT("( Lower Priority Ctx Switch  2D Enable: 0x%08x)", RGXFWIF_INICFG_EXT_LOW_PRIO_CS_TDM);
+		PDUMPCOMMENT("( Lower Priority Ctx Switch  TA Enable: 0x%08x)", RGXFWIF_INICFG_EXT_LOW_PRIO_CS_TA);
+		PDUMPCOMMENT("( Lower Priority Ctx Switch  3D Enable: 0x%08x)", RGXFWIF_INICFG_EXT_LOW_PRIO_CS_3D);
+		PDUMPCOMMENT("( Lower Priority Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_EXT_LOW_PRIO_CS_CDM);
+		PDUMPCOMMENT("( Lower Priority Ctx Switch SHG Enable: 0x%08x)", RGXFWIF_INICFG_EXT_LOW_PRIO_CS_SHG);
+
+		DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfOSConfigDesc,
+							offsetof(RGXFWIF_OS_CONFIG, ui32ConfigFlagsExt),
+							ui32ConfigFlagsExt,
+							PDUMP_FLAGS_CONTINUOUS);
+	}
+
+	/* default: no filter */
+	psRGXFWInitScratch->sPIDFilter.eMode = RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT;
+	psRGXFWInitScratch->sPIDFilter.asItems[0].uiPID = 0;
+
+	PDUMPCOMMENT("( PID filter type: %X=INCLUDE_ALL_EXCEPT, %X=EXCLUDE_ALL_EXCEPT)",
+							RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT,
+							RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT);
+
+	DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfInitMemDesc,
+							offsetof(RGXFWIF_INIT, sPIDFilter.eMode),
+							psRGXFWInitScratch->sPIDFilter.eMode,
+							PDUMP_FLAGS_CONTINUOUS);
+
+	PDUMPCOMMENT("( PID filter PID/OSID list (Up to %u entries. Terminate with a zero PID))",
+									RGXFWIF_PID_FILTER_MAX_NUM_PIDS);
+	{
+		IMG_UINT32 i;
+
+		/* generate a few WRWs in the pdump stream as an example */
+		for (i = 0; i < MIN(RGXFWIF_PID_FILTER_MAX_NUM_PIDS, 8); i++)
+		{
+			/*
+			 * Some compilers cannot cope with the uses of offsetof() below - the specific problem being the use of
+			 * a non-const variable in the expression, which it needs to be const. Typical compiler output is
+			 * "expression must have a constant value".
+			 */
+			const IMG_DEVMEM_OFFSET_T uiPIDOff
+			= (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_INIT *)0)->sPIDFilter.asItems[i].uiPID);
+
+			const IMG_DEVMEM_OFFSET_T uiOSIDOff
+			= (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_INIT *)0)->sPIDFilter.asItems[i].ui32OSID);
+
+			PDUMPCOMMENT("(PID and OSID pair %u)", i);
+
+			PDUMPCOMMENT("(PID)");
+			DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfInitMemDesc,
+						uiPIDOff,
+						0,
+						PDUMP_FLAGS_CONTINUOUS);
+
+			PDUMPCOMMENT("(OSID)");
+			DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfInitMemDesc,
+						uiOSIDOff,
+						0,
+						PDUMP_FLAGS_CONTINUOUS);
+		}
+	}
+
+	/*
+	 * Dump the log config so it can be edited.
+	 */
+	PDUMPCOMMENT("(Set the log config here)");
+	PDUMPCOMMENT("( Log Type: set bit 0 for TRACE, reset for TBI)");
+	PDUMPCOMMENT("( MAIN Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MAIN);
+	PDUMPCOMMENT("( MTS Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MTS);
+	PDUMPCOMMENT("( CLEANUP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CLEANUP);
+	PDUMPCOMMENT("( CSW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CSW);
+	PDUMPCOMMENT("( BIF Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_BIF);
+	PDUMPCOMMENT("( PM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_PM);
+	PDUMPCOMMENT("( RTD Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_RTD);
+	PDUMPCOMMENT("( SPM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_SPM);
+	PDUMPCOMMENT("( POW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_POW);
+	PDUMPCOMMENT("( HWR Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWR);
+	PDUMPCOMMENT("( HWP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWP);
+
+	if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+	{
+		PDUMPCOMMENT("( RPM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_RPM);
+	}
+
+	if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_META_DMA_BIT_MASK)
+	{
+		PDUMPCOMMENT("( DMA Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DMA);
+	}
+	PDUMPCOMMENT("( DEBUG Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DEBUG);
+	DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+							offsetof(RGXFWIF_TRACEBUF, ui32LogType),
+							psDevInfo->psRGXFWIfTraceBuf->ui32LogType,
+							PDUMP_FLAGS_CONTINUOUS);
+
+	PDUMPCOMMENT("Set the HWPerf Filter config here");
+	DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfInitMemDesc,
+						offsetof(RGXFWIF_INIT, ui64HWPerfFilter),
+						psRGXFWInitScratch->ui64HWPerfFilter,
+						PDUMP_FLAGS_CONTINUOUS);
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PDUMPCOMMENT("(Number of registers configurations for types(byte index) : pow on(%d), dust change(%d), ta(%d), 3d(%d), cdm(%d), tla(%d), TDM(%d))",\
+					RGXFWIF_REG_CFG_TYPE_PWR_ON,\
+					RGXFWIF_REG_CFG_TYPE_DUST_CHANGE,\
+					RGXFWIF_REG_CFG_TYPE_TA,\
+					RGXFWIF_REG_CFG_TYPE_3D,\
+					RGXFWIF_REG_CFG_TYPE_CDM,\
+					RGXFWIF_REG_CFG_TYPE_TLA,\
+					RGXFWIF_REG_CFG_TYPE_TDM);
+
+	{
+		IMG_UINT32 i;
+
+		/**
+		 * Write 32 bits in each iteration as required by PDUMP WRW command.
+		 */
+		for (i = 0; i < RGXFWIF_REG_CFG_TYPE_ALL; i += sizeof(IMG_UINT32))
+		{
+			DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRegCfgMemDesc,
+									offsetof(RGXFWIF_REG_CFG, aui8NumRegsType[i]),
+									0,
+									PDUMP_FLAGS_CONTINUOUS);
+		}
+	}
+
+	PDUMPCOMMENT("(Set registers here: address, mask, value)");
+	DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+							offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Addr),
+							0,
+							PDUMP_FLAGS_CONTINUOUS);
+	DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+							offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Mask),
+							0,
+							PDUMP_FLAGS_CONTINUOUS);
+	DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+							offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Value),
+							0,
+							PDUMP_FLAGS_CONTINUOUS);
+#endif /* SUPPORT_USER_REGISTER_CONFIGURATION */
+#endif /* PDUMP */
+
+	/* Perform additional virtualisation initialisation */
+	eError = RGXVzSetupFirmware(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed RGXVzSetupFirmware"));
+		goto fail;
+	}
+
+	OSFreeMem(psRGXFWInitScratch);
+
+	psDevInfo->bFirmwareInitialised = IMG_TRUE;
+
+	return PVRSRV_OK;
+
+fail:
+	if (psDevInfo->psRGXFWIfInitMemDesc != NULL && psRGXFWInitActual != NULL)
+	{
+		DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+	}
+
+	if(psRGXFWInitScratch)
+	{
+		OSFreeMem(psRGXFWInitScratch);
+	}
+
+	RGXFreeFirmware(psDevInfo);
+
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXFreeFirmware
+
+ @Description
+
+ Frees all the firmware-related allocations
+
+ @Input psDevInfo
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+void RGXFreeFirmware(PVRSRV_RGXDEV_INFO	*psDevInfo)
+{
+	IMG_UINT64	ui64ErnsBrns = psDevInfo->sDevFeatureCfg.ui64ErnsBrns;
+
+	psDevInfo->bFirmwareInitialised = IMG_FALSE;
+
+	RGXVzFreeFirmware(psDevInfo->psDeviceNode);
+
+	RGXFreeKernelCCB(psDevInfo);
+
+	RGXFreeFirmwareCCB(psDevInfo,
+					   &psDevInfo->psFirmwareCCBCtl,
+					   &psDevInfo->psFirmwareCCBCtlMemDesc,
+					   &psDevInfo->psFirmwareCCB,
+					   &psDevInfo->psFirmwareCCBMemDesc);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	RGXFreeFirmwareCCB(psDevInfo,
+					   &psDevInfo->psWorkEstFirmwareCCBCtl,
+					   &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
+					   &psDevInfo->psWorkEstFirmwareCCB,
+					   &psDevInfo->psWorkEstFirmwareCCBMemDesc);
+#endif
+
+#if defined(RGXFW_ALIGNCHECKS)
+	if (psDevInfo->psRGXFWAlignChecksMemDesc)
+	{
+		RGXFWFreeAlignChecks(psDevInfo);
+	}
+#endif
+
+	if (psDevInfo->psRGXFWIfOSConfigDesc)
+	{
+		if(psDevInfo->psFWIfOSConfig)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfOSConfigDesc);
+			psDevInfo->psFWIfOSConfig = NULL;
+		}
+
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfOSConfigDesc);
+		psDevInfo->psRGXFWIfOSConfigDesc = NULL;
+	}
+
+	if (psDevInfo->psRGXFWSigTAChecksMemDesc)
+	{
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWSigTAChecksMemDesc);
+		psDevInfo->psRGXFWSigTAChecksMemDesc = NULL;
+	}
+
+	if (psDevInfo->psRGXFWSig3DChecksMemDesc)
+	{
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWSig3DChecksMemDesc);
+		psDevInfo->psRGXFWSig3DChecksMemDesc = NULL;
+	}
+
+	if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+	{
+		if (psDevInfo->psRGXFWSigRTChecksMemDesc)
+		{
+			DevmemFwFree(psDevInfo, psDevInfo->psRGXFWSigRTChecksMemDesc);
+			psDevInfo->psRGXFWSigRTChecksMemDesc = NULL;
+		}
+
+		if (psDevInfo->psRGXFWSigSHChecksMemDesc)
+		{
+			DevmemFwFree(psDevInfo, psDevInfo->psRGXFWSigSHChecksMemDesc);
+			psDevInfo->psRGXFWSigSHChecksMemDesc = NULL;
+		}
+	}
+
+	if (ui64ErnsBrns & FIX_HW_BRN_37200_BIT_MASK)
+	{
+		if (psDevInfo->psRGXFWHWBRN37200MemDesc)
+		{
+			DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWHWBRN37200MemDesc);
+			DevmemFree(psDevInfo->psRGXFWHWBRN37200MemDesc);
+			psDevInfo->psRGXFWHWBRN37200MemDesc = NULL;
+		}
+	}
+
+	RGXSetupFaultReadRegisterRollback(psDevInfo);
+
+	if (psDevInfo->psPowSyncPrim != NULL)
+	{
+		SyncPrimFree(psDevInfo->psPowSyncPrim);
+		psDevInfo->psPowSyncPrim = NULL;
+	}
+
+	if (psDevInfo->hSyncPrimContext != 0)
+	{
+		SyncPrimContextDestroy(psDevInfo->hSyncPrimContext);
+		psDevInfo->hSyncPrimContext = 0;
+	}
+
+	if (psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc)
+	{
+		if (psDevInfo->psRGXFWIfGpuUtilFWCb != NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc);
+			psDevInfo->psRGXFWIfGpuUtilFWCb = NULL;
+		}
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc);
+		psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc = NULL;
+	}
+
+	RGXHWPerfDeinit(psDevInfo);
+
+	if (psDevInfo->psRGXFWIfRuntimeCfgMemDesc)
+	{
+		if (psDevInfo->psRGXFWIfRuntimeCfg != NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfRuntimeCfgMemDesc);
+			psDevInfo->psRGXFWIfRuntimeCfg = NULL;
+		}
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfRuntimeCfgMemDesc);
+		psDevInfo->psRGXFWIfRuntimeCfgMemDesc = NULL;
+	}
+
+	if (psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc)
+	{
+		if (psDevInfo->psRGXFWIfHWRInfoBuf != NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc);
+			psDevInfo->psRGXFWIfHWRInfoBuf = NULL;
+		}
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc);
+		psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc = NULL;
+	}
+
+	if ((0 != psDevInfo->sDevFeatureCfg.ui32MCMS) && \
+				(0 == (ui64ErnsBrns & FIX_HW_BRN_50767_BIT_MASK)))
+	{
+		if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc)
+		{
+			DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+			psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL;
+		}
+	}
+
+	if (psDevInfo->psRGXFWIfTraceBufCtlMemDesc)
+	{
+		if (psDevInfo->psRGXFWIfTraceBuf != NULL)
+		{
+			/* first deinit/free the tracebuffer allocation */
+			RGXTraceBufferDeinit(psDevInfo);
+
+			DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
+			psDevInfo->psRGXFWIfTraceBuf = NULL;
+		}
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
+		psDevInfo->psRGXFWIfTraceBufCtlMemDesc = NULL;
+	}
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	if (psDevInfo->psRGXFWIfRegCfgMemDesc)
+	{
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfRegCfgMemDesc);
+		psDevInfo->psRGXFWIfRegCfgMemDesc = NULL;
+	}
+#endif
+	if (psDevInfo->psRGXFWIfHWPerfCountersMemDesc)
+	{
+		RGXUnsetFirmwareAddress(psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+		psDevInfo->psRGXFWIfHWPerfCountersMemDesc = NULL;
+	}
+	if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SLC_VIVT_BIT_MASK)
+	{
+		_FreeSLC3Fence(psDevInfo);
+	}
+
+	if ((psDevInfo->sDevFeatureCfg.ui32META) && (psDevInfo->psMETAT1StackMemDesc))
+	{
+		DevmemFwFree(psDevInfo, psDevInfo->psMETAT1StackMemDesc);
+		psDevInfo->psMETAT1StackMemDesc = NULL;
+	}
+
+#if defined(SUPPORT_PDVFS)
+	if (psDevInfo->psRGXFWIFCoreClkRateMemDesc)
+	{
+		if (psDevInfo->pui32RGXFWIFCoreClkRate != NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIFCoreClkRateMemDesc);
+			psDevInfo->pui32RGXFWIFCoreClkRate = NULL;
+		}
+
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIFCoreClkRateMemDesc);
+		psDevInfo->psRGXFWIFCoreClkRateMemDesc = NULL;
+	}
+#endif
+
+	if (psDevInfo->psRGXFWIfInitMemDesc)
+	{
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfInitMemDesc);
+		psDevInfo->psRGXFWIfInitMemDesc = NULL;
+	}
+
+	if (psDevInfo->psCompletedMemDesc)
+	{
+		if (psDevInfo->pui32CompletedById)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psCompletedMemDesc);
+			psDevInfo->pui32CompletedById = NULL;
+		}
+		DevmemFwFree(psDevInfo, psDevInfo->psCompletedMemDesc);
+		psDevInfo->psCompletedMemDesc = NULL;
+	}
+	if (psDevInfo->psEndTimeMemDesc)
+	{
+		if (psDevInfo->pui64EndTimeById)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psEndTimeMemDesc);
+			psDevInfo->pui64EndTimeById = NULL;
+		}
+
+		DevmemFwFree(psDevInfo, psDevInfo->psEndTimeMemDesc);
+		psDevInfo->psEndTimeMemDesc = NULL;
+	}
+	if (psDevInfo->psStartTimeMemDesc)
+	{
+		if (psDevInfo->pui64StartTimeById)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psStartTimeMemDesc);
+			psDevInfo->pui64StartTimeById = NULL;
+		}
+
+		DevmemFwFree(psDevInfo, psDevInfo->psStartTimeMemDesc);
+		psDevInfo->psStartTimeMemDesc = NULL;
+	}
+}
+
+
+/******************************************************************************
+ FUNCTION	: RGXAcquireKernelCCBSlot
+
+ PURPOSE	: Attempts to obtain a slot in the Kernel CCB
+
+ PARAMETERS	: psCCB - the CCB
+			: Address of space if available, NULL otherwise
+
+ RETURNS	: PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXAcquireKernelCCBSlot(DEVMEM_MEMDESC *psKCCBCtrlMemDesc,
+											RGXFWIF_CCB_CTL	*psKCCBCtl,
+											IMG_UINT32			*pui32Offset)
+{
+	IMG_UINT32	ui32OldWriteOffset, ui32NextWriteOffset;
+
+	ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset;
+	ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask;
+
+	/* Note: The MTS can queue up to 255 kicks (254 pending kicks and 1 executing kick)
+	 * Hence the kernel CCB should not queue more 254 commands
+	 */
+	PVR_ASSERT(psKCCBCtl->ui32WrapMask < 255);
+
+#if defined(PDUMP)
+	/* Wait for sufficient CCB space to become available */
+	PDUMPCOMMENTWITHFLAGS(0, "Wait for kCCB woff=%u", ui32NextWriteOffset);
+	DevmemPDumpCBP(psKCCBCtrlMemDesc,
+	               offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset),
+	               ui32NextWriteOffset,
+	               1,
+	               (psKCCBCtl->ui32WrapMask + 1));
+#endif
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+
+		if (ui32NextWriteOffset != psKCCBCtl->ui32ReadOffset)
+		{
+			*pui32Offset = ui32NextWriteOffset;
+			return PVRSRV_OK;
+		}
+		{
+			/*
+			 * The following sanity check doesn't impact performance,
+			 * since the CPU has to wait for the GPU anyway (full kernel CCB).
+			 */
+			if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+			{
+				return PVRSRV_ERROR_KERNEL_CCB_FULL;
+			}
+		}
+
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	/* Time out on waiting for CCB space */
+	return PVRSRV_ERROR_KERNEL_CCB_FULL;
+}
+
+
+PVRSRV_ERROR RGXSendCommandWithPowLock(PVRSRV_RGXDEV_INFO	*psDevInfo,
+										 RGXFWIF_DM			eKCCBType,
+										 RGXFWIF_KCCB_CMD	*psKCCBCmd,
+										 IMG_UINT32			ui32CmdSize,
+										 IMG_UINT32			ui32PDumpFlags)
+{
+	PVRSRV_ERROR		eError;
+	PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+
+	/* Ensure Rogue is powered up before kicking MTS */
+	eError = PVRSRVPowerLock(psDeviceNode);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXSendCommandWithPowLock: failed to acquire powerlock (%s)",
+					PVRSRVGetErrorStringKM(eError)));
+
+		goto _PVRSRVPowerLock_Exit;
+	}
+
+	PDUMPPOWCMDSTART();
+	eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+										 PVRSRV_DEV_POWER_STATE_ON,
+										 IMG_FALSE);
+	PDUMPPOWCMDEND();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXSendCommandWithPowLock: failed to transition Rogue to ON (%s)",
+					PVRSRVGetErrorStringKM(eError)));
+
+		goto _PVRSRVSetDevicePowerStateKM_Exit;
+	}
+
+	eError = RGXSendCommand(psDevInfo, eKCCBType,  psKCCBCmd, ui32CmdSize, ui32PDumpFlags);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXSendCommandWithPowLock: failed to schedule command (%s)",
+					PVRSRVGetErrorStringKM(eError)));
+#if defined(DEBUG)
+		/* PVRSRVDebugRequest must be called without powerlock */
+		PVRSRVPowerUnlock(psDeviceNode);
+		PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+		goto _PVRSRVPowerLock_Exit;
+#endif
+	}
+
+_PVRSRVSetDevicePowerStateKM_Exit:
+	PVRSRVPowerUnlock(psDeviceNode);
+
+_PVRSRVPowerLock_Exit:
+	return eError;
+}
+
+static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO	*psDevInfo,
+								 RGXFWIF_DM			eKCCBType,
+								 RGXFWIF_KCCB_CMD	*psKCCBCmd,
+								 IMG_UINT32			ui32CmdSize,
+								 IMG_UINT32             uiPdumpFlags)
+{
+	PVRSRV_ERROR		eError;
+	PVRSRV_DEVICE_NODE	*psDeviceNode = psDevInfo->psDeviceNode;
+	RGXFWIF_CCB_CTL		*psKCCBCtl = psDevInfo->psKernelCCBCtl;
+	IMG_UINT8			*pui8KCCB = psDevInfo->psKernelCCB;
+	IMG_UINT32			ui32NewWriteOffset;
+	IMG_UINT32			ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset;
+
+#if !defined(PDUMP)
+	PVR_UNREFERENCED_PARAMETER(uiPdumpFlags);
+#else
+	IMG_BOOL bIsInCaptureRange;
+	IMG_BOOL bPdumpEnabled;
+	IMG_BOOL bPDumpPowTrans = PDUMPPOWCMDINTRANS();
+
+	PDumpIsCaptureFrameKM(&bIsInCaptureRange);
+	bPdumpEnabled = (bIsInCaptureRange || PDUMP_IS_CONTINUOUS(uiPdumpFlags)) && !bPDumpPowTrans;
+
+	/* in capture range */
+	if (bPdumpEnabled)
+	{
+		if (!psDevInfo->bDumpedKCCBCtlAlready)
+		{
+			/* entering capture range */
+			psDevInfo->bDumpedKCCBCtlAlready = IMG_TRUE;
+
+			/* wait for firmware to catch up */
+			PVR_DPF((PVR_DBG_MESSAGE, "RGXSendCommandRaw: waiting on fw to catch-up, roff: %d, woff: %d",
+						psKCCBCtl->ui32ReadOffset, ui32OldWriteOffset));
+			PVRSRVPollForValueKM(&psKCCBCtl->ui32ReadOffset, ui32OldWriteOffset, 0xFFFFFFFF);
+
+			/* Dump Init state of Kernel CCB control (read and write offset) */
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Initial state of kernel CCB Control, roff: %d, woff: %d",
+						psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset);
+
+			DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc,
+					0,
+					sizeof(RGXFWIF_CCB_CTL),
+					PDUMP_FLAGS_CONTINUOUS);
+		}
+	}
+#endif
+
+	psKCCBCmd->eDM = eKCCBType;
+
+	PVR_ASSERT(ui32CmdSize == psKCCBCtl->ui32CmdSize);
+	if (!OSLockIsLocked(psDeviceNode->hPowerLock))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXSendCommandRaw called without power lock held!"));
+		PVR_ASSERT(OSLockIsLocked(psDeviceNode->hPowerLock));
+	}
+
+	/*
+	 * Acquire a slot in the CCB.
+	 */
+	eError = RGXAcquireKernelCCBSlot(psDevInfo->psKernelCCBCtlMemDesc, psKCCBCtl, &ui32NewWriteOffset);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXSendCommandRaw failed to acquire CCB slot. Type:%u Error:%u",
+				eKCCBType, eError));
+		goto _RGXSendCommandRaw_Exit;
+	}
+
+	/*
+	 * Copy the command into the CCB.
+	 */
+	OSDeviceMemCopy(&pui8KCCB[ui32OldWriteOffset * psKCCBCtl->ui32CmdSize],
+			  psKCCBCmd, psKCCBCtl->ui32CmdSize);
+
+	/* ensure kCCB data is written before the offsets */
+	OSWriteMemoryBarrier();
+
+	/* Move past the current command */
+	psKCCBCtl->ui32WriteOffset = ui32NewWriteOffset;
+	/* Force a read-back to memory to avoid posted writes on certain buses */
+	(void) psKCCBCtl->ui32WriteOffset;
+
+
+#if defined(PDUMP)
+	/* in capture range */
+	if (bPdumpEnabled)
+	{
+		/* Dump new Kernel CCB content */
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump kCCB cmd for DM %d, woff = %d",
+		                      eKCCBType,
+		                      ui32OldWriteOffset);
+		DevmemPDumpLoadMem(psDevInfo->psKernelCCBMemDesc,
+				ui32OldWriteOffset * psKCCBCtl->ui32CmdSize,
+				psKCCBCtl->ui32CmdSize,
+				PDUMP_FLAGS_CONTINUOUS);
+
+		/* Dump new kernel CCB write offset */
+		PDUMPCOMMENTWITHFLAGS(uiPdumpFlags, "Dump kCCBCtl woff (added new cmd for DM %d): %d",
+		                      eKCCBType,
+		                      ui32NewWriteOffset);
+		DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc,
+							   offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset),
+							   sizeof(IMG_UINT32),
+							   uiPdumpFlags);
+
+		/* mimic the read-back of the write from above */
+		DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc,
+		                       offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset),
+		                       ui32NewWriteOffset,
+		                       0xFFFFFFFF,
+		                       PDUMP_POLL_OPERATOR_EQUAL,
+		                       uiPdumpFlags);
+
+	}
+
+	/* out of capture range */
+	if (!bPdumpEnabled)
+	{
+		RGXPdumpDrainKCCB(psDevInfo, ui32OldWriteOffset);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "RGXSendCommandRaw: problem draining kCCB (%d)", eError));
+			goto _RGXSendCommandRaw_Exit;
+		}
+	}
+#endif
+
+
+	PDUMPCOMMENTWITHFLAGS(uiPdumpFlags, "MTS kick for kernel CCB");
+	/*
+	 * Kick the MTS to schedule the firmware.
+	 */
+    {
+		IMG_UINT32 ui32MTSRegVal;
+
+		if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE) &&
+		    !(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK))
+		{
+			ui32MTSRegVal = ((RGXFWIF_DM_GP + PVRSRV_VZ_DRIVER_OSID) & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_COUNTED;
+		}
+		else
+		{
+			ui32MTSRegVal = (RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_COUNTED;
+		}
+
+
+		__MTSScheduleWrite(psDevInfo, ui32MTSRegVal);
+
+		PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_MTS_SCHEDULE, ui32MTSRegVal, uiPdumpFlags);
+	}
+
+#if defined (NO_HARDWARE)
+	/* keep the roff updated because fw isn't there to update it */
+	psKCCBCtl->ui32ReadOffset = psKCCBCtl->ui32WriteOffset;
+#endif
+
+_RGXSendCommandRaw_Exit:
+	return eError;
+}
+
+
+PVRSRV_ERROR RGXSendCommand(PVRSRV_RGXDEV_INFO	*psDevInfo,
+                            RGXFWIF_DM			eKCCBType,
+                            RGXFWIF_KCCB_CMD	*psKCCBCmd,
+                            IMG_UINT32		ui32CmdSize,
+                            IMG_UINT32		uiPdumpFlags)
+{
+
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	DLLIST_NODE *psNode, *psNext;
+	RGX_DEFERRED_KCCB_CMD *psTempDeferredKCCBCmd;
+
+	/* Check if there is any deferred KCCB command before sending the command passed as argument */
+	dllist_foreach_node(&psDevInfo->sKCCBDeferredCommandsListHead, psNode, psNext)
+	{
+		psTempDeferredKCCBCmd = IMG_CONTAINER_OF(psNode, RGX_DEFERRED_KCCB_CMD, sListNode);
+		/* For every deferred KCCB command, try to send it*/
+		eError = RGXSendCommandRaw(psTempDeferredKCCBCmd->psDevInfo,
+                                           psTempDeferredKCCBCmd->eDM,
+                                           &(psTempDeferredKCCBCmd->sKCCBcmd),
+                                           sizeof(psTempDeferredKCCBCmd->sKCCBcmd),
+                                           psTempDeferredKCCBCmd->uiPdumpFlags);
+		if (eError != PVRSRV_OK)
+		{
+			goto _exit;
+		}
+		/* Remove from the deferred list the sent deferred KCCB command */
+		dllist_remove_node(psNode);
+		OSFreeMem(psTempDeferredKCCBCmd);
+	}
+
+	eError = RGXSendCommandRaw(psDevInfo,
+                                   eKCCBType,
+                                   psKCCBCmd,
+                                   ui32CmdSize,
+                                   uiPdumpFlags);
+
+
+_exit:
+	/*
+	 * If we don't manage to enqueue one of the deferred commands or the command
+	 * passed as argument because the KCCB is full, insert the latter into the deferred commands list.
+	 * The deferred commands will also be flushed eventually by:
+	 *  - one more KCCB command sent for any DM
+	 *  - the watchdog thread
+	 *  - the power off sequence
+	 */
+	if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL)
+	{
+		RGX_DEFERRED_KCCB_CMD *psDeferredCommand;
+
+		psDeferredCommand = OSAllocMem(sizeof(*psDeferredCommand));
+
+		if (!psDeferredCommand)
+		{
+			PVR_DPF((PVR_DBG_WARNING,"Deferring a KCCB command failed: allocation failure: requesting retry "));
+			eError = PVRSRV_ERROR_RETRY;
+		}
+		else
+		{
+			psDeferredCommand->sKCCBcmd = *psKCCBCmd;
+			psDeferredCommand->eDM = eKCCBType;
+			psDeferredCommand->uiPdumpFlags = uiPdumpFlags;
+			psDeferredCommand->psDevInfo = psDevInfo;
+
+			PVR_DPF((PVR_DBG_WARNING,"Deferring a KCCB command for DM %d" ,eKCCBType));
+			dllist_add_to_tail(&(psDevInfo->sKCCBDeferredCommandsListHead), &(psDeferredCommand->sListNode));
+
+			eError = PVRSRV_OK;
+		}
+	}
+
+	return eError;
+
+}
+
+
+void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*) hCmdCompHandle;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+	OSScheduleMISR(psDevInfo->hProcessQueuesMISR);
+}
+
+/*!
+******************************************************************************
+
+ @Function	_RGXScheduleProcessQueuesMISR
+
+ @Description - Sends uncounted kick to all the DMs (the FW will process all
+				the queue for all the DMs)
+******************************************************************************/
+static void _RGXScheduleProcessQueuesMISR(void *pvData)
+{
+	PVRSRV_DEVICE_NODE     *psDeviceNode = pvData;
+	PVRSRV_RGXDEV_INFO     *psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR           eError;
+	PVRSRV_DEV_POWER_STATE ePowerState;
+
+	/* We don't need to acquire the BridgeLock as this power transition won't
+	   send a command to the FW */
+	eError = PVRSRVPowerLock(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXScheduleProcessQueuesKM: failed to acquire powerlock (%s)",
+					PVRSRVGetErrorStringKM(eError)));
+
+		return;
+	}
+
+	/* Check whether it's worth waking up the GPU */
+	eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+	if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) &&
+		(eError == PVRSRV_OK) && (ePowerState == PVRSRV_DEV_POWER_STATE_OFF))
+	{
+		/* For now, guest drivers will always wake-up the GPU */
+		RGXFWIF_GPU_UTIL_FWCB  *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+		IMG_BOOL               bGPUHasWorkWaiting;
+
+		bGPUHasWorkWaiting =
+		    (RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED);
+
+		if (!bGPUHasWorkWaiting)
+		{
+			/* all queues are empty, don't wake up the GPU */
+			PVRSRVPowerUnlock(psDeviceNode);
+			return;
+		}
+	}
+
+	PDUMPPOWCMDSTART();
+	/* wake up the GPU */
+	eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+										 PVRSRV_DEV_POWER_STATE_ON,
+										 IMG_FALSE);
+	PDUMPPOWCMDEND();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXScheduleProcessQueuesKM: failed to transition Rogue to ON (%s)",
+					PVRSRVGetErrorStringKM(eError)));
+
+		PVRSRVPowerUnlock(psDeviceNode);
+		return;
+	}
+
+	/* uncounted kick to the FW */
+	{
+		IMG_UINT32 ui32MTSRegVal;
+
+		if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE) &&
+			!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK))
+		{
+			ui32MTSRegVal = ((RGXFWIF_DM_GP + PVRSRV_VZ_DRIVER_OSID) & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED;
+		}
+		else
+		{
+			ui32MTSRegVal = (RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED;
+		}
+
+		HTBLOGK(HTB_SF_MAIN_KICK_UNCOUNTED);
+		__MTSScheduleWrite(psDevInfo, ui32MTSRegVal);
+	}
+
+	PVRSRVPowerUnlock(psDeviceNode);
+}
+
+PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	return OSInstallMISR(phMISR,
+	                     _RGXScheduleProcessQueuesMISR,
+	                     psDeviceNode);
+}
+
+/*!
+******************************************************************************
+
+ @Function	RGXScheduleCommand
+
+ @Description - Submits a CCB command and kicks the firmware but first schedules
+                any commands which have to happen before handle
+
+ @Input psDevInfo		 - pointer to device info
+ @Input eKCCBType		 - see RGXFWIF_CMD_*
+ @Input psKCCBCmd		 - kernel CCB command
+ @Input ui32CmdSize		 - kernel CCB SIZE
+ @Input ui32CacheOpFence - CPU dcache operation fence
+ @Input ui32PDumpFlags - PDUMP_FLAGS_CONTINUOUS bit set if the pdump flags should be continuous
+
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXScheduleCommand(PVRSRV_RGXDEV_INFO	*psDevInfo,
+								RGXFWIF_DM			eKCCBType,
+								RGXFWIF_KCCB_CMD	*psKCCBCmd,
+								IMG_UINT32			ui32CmdSize,
+								IMG_UINT32			ui32CacheOpFence,
+								IMG_UINT32			ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT16 uiMMUSyncUpdate;
+
+	eError = CacheOpFence(eKCCBType, ui32CacheOpFence);
+	if (eError != PVRSRV_OK) goto RGXScheduleCommand_exit;
+
+#if defined (SUPPORT_VALIDATION)
+	/* For validation, force the core to different dust count states with each kick */
+	if ((eKCCBType == RGXFWIF_DM_TA) || (eKCCBType == RGXFWIF_DM_CDM))
+	{
+		if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_DUST_REQUEST_INJECT_EN)
+		{
+			IMG_UINT32 ui32NumDusts = RGXGetNextDustCount(&psDevInfo->sDustReqState, psDevInfo->sDevFeatureCfg.ui32MAXDustCount);
+			PVRSRVDeviceDustCountChange(psDevInfo->psDeviceNode, ui32NumDusts);
+		}
+	}
+#endif
+
+	eError = RGXPreKickCacheCommand(psDevInfo, eKCCBType, &uiMMUSyncUpdate, IMG_FALSE);
+	if (eError != PVRSRV_OK) goto RGXScheduleCommand_exit;
+
+	eError = RGXSendCommandWithPowLock(psDevInfo, eKCCBType, psKCCBCmd, ui32CmdSize, ui32PDumpFlags);
+	if (eError != PVRSRV_OK) goto RGXScheduleCommand_exit;
+
+RGXScheduleCommand_exit:
+	return eError;
+}
+
+/*
+ * RGXCheckFirmwareCCB
+ */
+void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGXFWIF_FWCCB_CMD *psFwCCBCmd;
+
+	RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psFirmwareCCBCtl;
+	IMG_UINT8 *psFWCCB = psDevInfo->psFirmwareCCB;
+
+	while (psFWCCBCtl->ui32ReadOffset != psFWCCBCtl->ui32WriteOffset)
+	{
+		/* Point to the next command */
+		psFwCCBCmd = ((RGXFWIF_FWCCB_CMD *)psFWCCB) + psFWCCBCtl->ui32ReadOffset;
+
+		HTBLOGK(HTB_SF_MAIN_FWCCB_CMD, psFwCCBCmd->eCmdType);
+		switch(psFwCCBCmd->eCmdType)
+		{
+			case RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING:
+			{
+				if (psDevInfo->bPDPEnabled)
+				{
+					PDUMP_PANIC(ZSBUFFER_BACKING, "Request to add backing to ZSBuffer");
+				}
+				RGXProcessRequestZSBufferBacking(psDevInfo,
+				        psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID);
+				break;
+			}
+
+			case RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING:
+			{
+				if (psDevInfo->bPDPEnabled)
+				{
+					PDUMP_PANIC(ZSBUFFER_UNBACKING, "Request to remove backing from ZSBuffer");
+				}
+				RGXProcessRequestZSBufferUnbacking(psDevInfo,
+				        psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID);
+				break;
+			}
+
+			case RGXFWIF_FWCCB_CMD_FREELIST_GROW:
+			{
+				if (psDevInfo->bPDPEnabled)
+				{
+					PDUMP_PANIC(FREELIST_GROW, "Request to grow the free list");
+				}
+				RGXProcessRequestGrow(psDevInfo,
+				        psFwCCBCmd->uCmdData.sCmdFreeListGS.ui32FreelistID);
+				break;
+			}
+
+			case RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION:
+			{
+				if (psDevInfo->bPDPEnabled)
+				{
+					PDUMP_PANIC(FREELISTS_RECONSTRUCTION, "Request to reconstruct free lists");
+				}
+
+				if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+				{
+					PVR_DPF((PVR_DBG_MESSAGE, "RGXCheckFirmwareCCBs: Freelist reconstruction request (%d) for %d freelists",
+							psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1,
+							psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_MESSAGE, "RGXCheckFirmwareCCBs: Freelist reconstruction request (%d/%d) for %d freelists",
+							psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1,
+							psDevInfo->psRGXFWIfTraceBuf->ui32HwrCounter+1,
+							psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount));
+				}
+
+				RGXProcessRequestFreelistsReconstruction(psDevInfo,
+				        psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount,
+				        psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.aui32FreelistIDs);
+				break;
+			}
+
+			case RGXFWIF_FWCCB_CMD_DOPPLER_MEMORY_GROW:
+			{
+				if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+				{
+					if (psDevInfo->bPDPEnabled)
+					{
+						PDUMP_PANIC(FREELIST_GROW, "Request to grow the RPM free list");
+					}
+					RGXProcessRequestRPMGrow(psDevInfo,
+							psFwCCBCmd->uCmdData.sCmdFreeListGS.ui32FreelistID);
+				}
+				break;
+			}
+
+			case RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION:
+			{
+				DLLIST_NODE *psNode, *psNext;
+				RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA *psCmdContextResetNotification =
+					&psFwCCBCmd->uCmdData.sCmdContextResetNotification;
+				IMG_UINT32 ui32ServerCommonContextID =
+					psCmdContextResetNotification->ui32ServerCommonContextID;
+				RGX_SERVER_COMMON_CONTEXT *psServerCommonContext = NULL;
+
+				OSWRLockAcquireRead(psDevInfo->hCommonCtxtListLock);
+				dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext)
+				{
+					RGX_SERVER_COMMON_CONTEXT *psThisContext =
+						IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode);
+
+					if (psThisContext->ui32ContextID == ui32ServerCommonContextID)
+					{
+						psServerCommonContext = psThisContext;
+						break;
+					}
+				}
+
+				PVR_DPF((PVR_DBG_MESSAGE, "RGXCheckFirmwareCCBs: Context 0x%p reset (ID=0x%08x, Reason=%d, JobRef=0x%08x)",
+				        psServerCommonContext,
+				        psCmdContextResetNotification->ui32ServerCommonContextID,
+				        (IMG_UINT32)(psCmdContextResetNotification->eResetReason),
+				        psCmdContextResetNotification->ui32ResetJobRef));
+
+				if (psServerCommonContext != NULL)
+				{
+					psServerCommonContext->eLastResetReason    = psCmdContextResetNotification->eResetReason;
+					psServerCommonContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef;
+				}
+				OSWRLockReleaseRead(psDevInfo->hCommonCtxtListLock);
+
+				if (psCmdContextResetNotification->bPageFault)
+				{
+					DevmemIntPFNotify(psDevInfo->psDeviceNode,
+					                  psCmdContextResetNotification->ui64PCAddress);
+				}
+				break;
+			}
+
+			case RGXFWIF_FWCCB_CMD_DEBUG_DUMP:
+			{
+				RGXDumpDebugInfo(NULL,NULL,psDevInfo);
+				/* Notify the OS of an issue that triggered a debug dump */
+				OSWarnOn(IMG_TRUE);
+				break;
+			}
+
+			case RGXFWIF_FWCCB_CMD_UPDATE_STATS:
+			{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+				IMG_PID pidTmp = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.pidOwner;
+				IMG_INT32 i32AdjustmentValue = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.i32AdjustmentValue;
+
+				switch (psFwCCBCmd->uCmdData.sCmdUpdateStatsData.eElementToUpdate)
+				{
+					case RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS:
+					{
+						PVRSRVStatsUpdateRenderContextStats(i32AdjustmentValue,0,0,0,0,0,pidTmp);
+						break;
+					}
+					case RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY:
+					{
+						PVRSRVStatsUpdateRenderContextStats(0,i32AdjustmentValue,0,0,0,0,pidTmp);
+						break;
+					}
+					case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES:
+					{
+						PVRSRVStatsUpdateRenderContextStats(0,0,i32AdjustmentValue,0,0,0,pidTmp);
+						break;
+					}
+					case RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES:
+					{
+						PVRSRVStatsUpdateRenderContextStats(0,0,0,i32AdjustmentValue,0,0,pidTmp);
+						break;
+					}
+					case RGXFWIF_FWCCB_CMD_UPDATE_NUM_SH_STORES:
+					{
+						PVRSRVStatsUpdateRenderContextStats(0,0,0,0,i32AdjustmentValue,0,pidTmp);
+						break;
+					}
+					case RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES:
+					{
+						PVRSRVStatsUpdateRenderContextStats(0,0,0,0,0,i32AdjustmentValue,pidTmp);
+						break;
+					}
+				}
+#endif
+				break;
+			}
+			case RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE:
+			{
+#if defined(SUPPORT_PDVFS)
+				PDVFSProcessCoreClkRateChange(psDevInfo,
+											  psFwCCBCmd->uCmdData.sCmdCoreClkRateChange.ui32CoreClkRate);
+#endif
+				break;
+			}
+			default:
+			{
+				PVR_ASSERT(IMG_FALSE);
+			}
+		}
+
+		/* Update read offset */
+		psFWCCBCtl->ui32ReadOffset = (psFWCCBCtl->ui32ReadOffset + 1) & psFWCCBCtl->ui32WrapMask;
+	}
+}
+
+/*
+ * PVRSRVRGXFrameworkCopyCommand
+ */
+PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(DEVMEM_MEMDESC	*psFWFrameworkMemDesc,
+										   IMG_PBYTE		pbyGPUFRegisterList,
+										   IMG_UINT32		ui32FrameworkRegisterSize)
+{
+	PVRSRV_ERROR	eError;
+	RGXFWIF_RF_REGISTERS	*psRFReg;
+
+	eError = DevmemAcquireCpuVirtAddr(psFWFrameworkMemDesc,
+                                      (void **)&psRFReg);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFrameworkCopyCommand: Failed to map firmware render context state (%u)",
+				eError));
+		return eError;
+	}
+
+	OSDeviceMemCopy(psRFReg, pbyGPUFRegisterList, ui32FrameworkRegisterSize);
+
+	/* Release the CPU mapping */
+	DevmemReleaseCpuVirtAddr(psFWFrameworkMemDesc);
+
+	/*
+	 * Dump the FW framework buffer
+	 */
+	PDUMPCOMMENT("Dump FWFramework buffer");
+	DevmemPDumpLoadMem(psFWFrameworkMemDesc, 0, ui32FrameworkRegisterSize, PDUMP_FLAGS_CONTINUOUS);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVRGXFrameworkCreateKM
+ */
+PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+										DEVMEM_MEMDESC		**ppsFWFrameworkMemDesc,
+										IMG_UINT32			ui32FrameworkCommandSize)
+{
+	PVRSRV_ERROR			eError;
+	PVRSRV_RGXDEV_INFO		*psDevInfo = psDeviceNode->pvDevice;
+
+	/*
+		Allocate device memory for the firmware GPU framework state.
+		Sufficient info to kick one or more DMs should be contained in this buffer
+	*/
+	PDUMPCOMMENT("Allocate Rogue firmware framework state");
+
+	eError = DevmemFwAllocate(psDevInfo,
+							  ui32FrameworkCommandSize,
+							  RGX_FWCOMCTX_ALLOCFLAGS,
+							  "FwGPUFrameworkState",
+							  ppsFWFrameworkMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFrameworkContextKM: Failed to allocate firmware framework state (%u)",
+				eError));
+		return eError;
+	}
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXWaitForFWOp(PVRSRV_RGXDEV_INFO	*psDevInfo,
+			    RGXFWIF_DM eDM,
+			    PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+			    IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+	RGXFWIF_KCCB_CMD	sCmdSyncPrim;
+
+	/* Ensure Rogue is powered up before kicking MTS */
+	eError = PVRSRVPowerLock(psDeviceNode);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire powerlock (%s)",
+					__FUNCTION__,
+					PVRSRVGetErrorStringKM(eError)));
+
+		goto _PVRSRVPowerLock_Exit;
+	}
+
+	PDUMPPOWCMDSTART();
+	eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+					     PVRSRV_DEV_POWER_STATE_ON,
+					     IMG_FALSE);
+	PDUMPPOWCMDEND();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: failed to transition Rogue to ON (%s)",
+					__FUNCTION__,
+					PVRSRVGetErrorStringKM(eError)));
+
+		goto _PVRSRVSetDevicePowerStateKM_Exit;
+	}
+
+	/* Setup sync primitive */
+	eError = SyncPrimSet(psSyncPrim, 0);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to set SyncPrim (%u)",
+			__FUNCTION__, eError));
+		goto _SyncPrimSet_Exit;
+	}
+
+	/* prepare a sync command */
+	eError = SyncPrimGetFirmwareAddr(psSyncPrim,
+			&sCmdSyncPrim.uCmdData.sSyncData.sSyncObjDevVAddr.ui32Addr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to get SyncPrim FW address(%u)",
+			__FUNCTION__, eError));
+		goto _SyncPrimGetFirmwareAddr_Exit;
+	}
+	sCmdSyncPrim.eCmdType = RGXFWIF_KCCB_CMD_SYNC;
+	sCmdSyncPrim.uCmdData.sSyncData.uiUpdateVal = 1;
+
+	PDUMPCOMMENT("RGXWaitForFWOp: Submit Kernel SyncPrim [0x%08x] to DM %d ",
+		sCmdSyncPrim.uCmdData.sSyncData.sSyncObjDevVAddr.ui32Addr, eDM);
+
+	/* submit the sync primitive to the kernel CCB */
+	eError = RGXSendCommand(psDevInfo,
+				eDM,
+				&sCmdSyncPrim,
+				sizeof(RGXFWIF_KCCB_CMD),
+				ui32PDumpFlags);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to schedule Kernel SyncPrim with error (%u)",
+					__FUNCTION__,
+					eError));
+		goto _RGXSendCommandRaw_Exit;
+	}
+
+	/* Wait for sync primitive to be updated */
+#if defined(PDUMP)
+	PDUMPCOMMENT("RGXScheduleCommandAndWait: Poll for Kernel SyncPrim [0x%08x] on DM %d ",
+		sCmdSyncPrim.uCmdData.sSyncData.sSyncObjDevVAddr.ui32Addr, eDM);
+
+	SyncPrimPDumpPol(psSyncPrim,
+			 1,
+			 0xffffffff,
+			 PDUMP_POLL_OPERATOR_EQUAL,
+			 ui32PDumpFlags);
+#endif
+
+	{
+		RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+		IMG_UINT32 ui32CurrentQueueLength =
+				(psKCCBCtl->ui32WrapMask+1 +
+				psKCCBCtl->ui32WriteOffset -
+				psKCCBCtl->ui32ReadOffset) & psKCCBCtl->ui32WrapMask;
+		IMG_UINT32 ui32MaxRetries;
+
+		for (ui32MaxRetries = (ui32CurrentQueueLength + 1) * 3;
+			 ui32MaxRetries > 0;
+			 ui32MaxRetries--)
+		{
+			eError = PVRSRVWaitForValueKMAndHoldBridgeLockKM(psSyncPrim->pui32LinAddr, 1, 0xffffffff);
+
+			if (eError != PVRSRV_ERROR_TIMEOUT)
+			{
+				break;
+			}
+		}
+
+		if (eError == PVRSRV_ERROR_TIMEOUT)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s: PVRSRVWaitForValueKMAndHoldBridgeLock timed out. Dump debug information.",
+					__FUNCTION__));
+			PVRSRVPowerUnlock(psDeviceNode);
+
+			PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+			PVR_ASSERT(eError != PVRSRV_ERROR_TIMEOUT);
+			goto _PVRSRVDebugRequest_Exit;
+		}
+	}
+
+_RGXSendCommandRaw_Exit:
+_SyncPrimGetFirmwareAddr_Exit:
+_SyncPrimSet_Exit:
+_PVRSRVSetDevicePowerStateKM_Exit:
+
+	PVRSRVPowerUnlock(psDeviceNode);
+
+_PVRSRVDebugRequest_Exit:
+_PVRSRVPowerLock_Exit:
+	return eError;
+}
+
+PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo,
+				IMG_UINT32 ui32Config,
+				IMG_UINT32 *pui32ConfigState,
+				IMG_BOOL bSetNotClear)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DEV_POWER_STATE ePowerState;
+	RGXFWIF_KCCB_CMD sStateFlagCmd;
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+	RGXFWIF_OS_CONFIG *psOSConfig;
+
+	if (!psDevInfo)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	psDeviceNode = psDevInfo->psDeviceNode;
+	psOSConfig = psDevInfo->psFWIfOSConfig;
+
+	if (NULL == psOSConfig)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: OS Config is not mapped into CPU space", __func__));
+		return PVRSRV_ERROR_INVALID_CPU_ADDR;
+	}
+
+	/* apply change and ensure the new data is written to memory
+	 * before requesting the FW to read it
+	 */
+	ui32Config = ui32Config & RGXFWIF_INICFG_ALL;
+	if (bSetNotClear)
+	{
+		psOSConfig->ui32ConfigFlags |= ui32Config;
+	}
+	else
+	{
+		psOSConfig->ui32ConfigFlags &= ~ui32Config;
+	}
+
+	/* return current/new value to caller */
+	if (pui32ConfigState)
+	{
+		*pui32ConfigState = psOSConfig->ui32ConfigFlags;
+	}
+
+	OSMemoryBarrier();
+
+	eError = PVRSRVPowerLock(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire power lock (%u)", __func__, eError));
+		goto error_lock;
+	}
+
+	/* notify FW to update setting */
+	eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+	if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF))
+	{
+		/* Ask the FW to update its cached version of the value */
+		sStateFlagCmd.eCmdType = RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL;
+
+		eError = RGXSendCommand(psDevInfo,
+		                        RGXFWIF_DM_GP,
+		                        &sStateFlagCmd,
+		                        sizeof(sStateFlagCmd),
+		                        PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: RGXSendCommand failed. Error:%u", __func__, eError));
+			goto error_cmd;
+		}
+		else
+		{
+			/* Give up the power lock as its acquired in RGXWaitForFWOp */
+			PVRSRVPowerUnlock(psDeviceNode);
+
+			/* Wait for the value to be updated as the FW validates
+			 * the parameters and modifies the ui32ConfigFlags
+			 * accordingly
+			 * (for completeness as registered callbacks should also
+			 *  not permit invalid transitions)
+			 */
+			eError = RGXWaitForFWOp(psDevInfo, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"%s: Waiting for value aborted with error (%u)", __func__, eError));
+			}
+			goto error_lock;
+		}
+	}
+
+error_cmd:
+	PVRSRVPowerUnlock(psDeviceNode);
+error_lock:
+	return eError;
+}
+
+static
+PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO	*psDevInfo,
+									   RGXFWIF_DM			eDM,
+									   RGXFWIF_KCCB_CMD		*psKCCBCmd,
+									   IMG_UINT32			ui32CmdSize,
+									   RGXFWIF_CLEANUP_TYPE	eCleanupType,
+									   PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+									   IMG_UINT32				ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+
+	psKCCBCmd->eCmdType = RGXFWIF_KCCB_CMD_CLEANUP;
+
+	psKCCBCmd->uCmdData.sCleanupData.eCleanupType = eCleanupType;
+	eError = SyncPrimGetFirmwareAddr(psSyncPrim, &psKCCBCmd->uCmdData.sCleanupData.sSyncObjDevVAddr.ui32Addr);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_command;
+	}
+
+	eError = SyncPrimSet(psSyncPrim, 0);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_command;
+	}
+
+	/*
+		Send the cleanup request to the firmware. If the resource is still busy
+		the firmware will tell us and we'll drop out with a retry.
+	*/
+	eError = RGXScheduleCommand(psDevInfo,
+								eDM,
+								psKCCBCmd,
+								ui32CmdSize,
+								0,
+								ui32PDumpFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_command;
+	}
+
+	/* Wait for sync primitive to be updated */
+#if defined(PDUMP)
+	PDUMPCOMMENT("Wait for the firmware to reply to the cleanup command");
+	SyncPrimPDumpPol(psSyncPrim,
+					RGXFWIF_CLEANUP_RUN,
+					RGXFWIF_CLEANUP_RUN,
+					PDUMP_POLL_OPERATOR_EQUAL,
+					ui32PDumpFlags);
+
+	/*
+	 * The cleanup request to the firmware will tell us if a given resource is busy or not.
+	 * If the RGXFWIF_CLEANUP_BUSY flag is set, this means that the resource is still in use.
+	 * In this case we return a PVRSRV_ERROR_RETRY error to the client drivers and they will
+	 * re-issue the cleanup request until it succeed.
+	 *
+	 * Since this retry mechanism doesn't work for pdumps, client drivers should ensure
+	 * that cleanup requests are only submitted if the resource is unused.
+	 * If this is not the case, the following poll will block infinitely, making sure
+	 * the issue doesn't go unnoticed.
+	 */
+	PDUMPCOMMENT("Cleanup: If this poll fails, the following resource is still in use (DM=%u, type=%u, address=0x%08x), which is incorrect in pdumps",
+					eDM,
+					psKCCBCmd->uCmdData.sCleanupData.eCleanupType,
+					psKCCBCmd->uCmdData.sCleanupData.uCleanupData.psContext.ui32Addr);
+	SyncPrimPDumpPol(psSyncPrim,
+					0,
+					RGXFWIF_CLEANUP_BUSY,
+					PDUMP_POLL_OPERATOR_EQUAL,
+					ui32PDumpFlags);
+#endif
+
+	{
+		RGXFWIF_CCB_CTL  *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+		IMG_UINT32       ui32CurrentQueueLength = (psKCCBCtl->ui32WrapMask+1 +
+		                                           psKCCBCtl->ui32WriteOffset -
+		                                           psKCCBCtl->ui32ReadOffset) & psKCCBCtl->ui32WrapMask;
+		IMG_UINT32       ui32MaxRetries;
+
+		for (ui32MaxRetries = ui32CurrentQueueLength + 1;
+			 ui32MaxRetries > 0;
+			 ui32MaxRetries--)
+		{
+			eError = PVRSRVWaitForValueKMAndHoldBridgeLockKM(psSyncPrim->pui32LinAddr, RGXFWIF_CLEANUP_RUN, RGXFWIF_CLEANUP_RUN);
+
+			if (eError != PVRSRV_ERROR_TIMEOUT)
+			{
+				break;
+			}
+		}
+
+		/*
+			If the firmware hasn't got back to us in a timely manner
+			then bail and let the caller retry the command.
+		*/
+		if (eError == PVRSRV_ERROR_TIMEOUT)
+		{
+			PVR_DPF((PVR_DBG_WARNING,"RGXScheduleCleanupCommand: PVRSRVWaitForValueKMAndHoldBridgeLock timed out. Dump debug information."));
+
+			eError = PVRSRV_ERROR_RETRY;
+#if defined(DEBUG)
+			PVRSRVDebugRequest(psDevInfo->psDeviceNode,
+							   DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+#endif
+			goto fail_poll;
+		}
+		else if (eError != PVRSRV_OK)
+		{
+			goto fail_poll;
+		}
+	}
+
+	/*
+		If the command has was run but a resource was busy, then the request
+		will need to be retried.
+	*/
+	if (*psSyncPrim->pui32LinAddr & RGXFWIF_CLEANUP_BUSY)
+	{
+		eError = PVRSRV_ERROR_RETRY;
+		goto fail_requestbusy;
+	}
+
+	return PVRSRV_OK;
+
+fail_requestbusy:
+fail_poll:
+fail_command:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+/*
+	RGXRequestCommonContextCleanUp
+*/
+PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+											  RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+											  PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+											  RGXFWIF_DM eDM,
+											  IMG_UINT32 ui32PDumpFlags)
+{
+	RGXFWIF_KCCB_CMD			sRCCleanUpCmd = {0};
+	PVRSRV_ERROR				eError;
+	PRGXFWIF_FWCOMMONCONTEXT	psFWCommonContextFWAddr;
+
+	psFWCommonContextFWAddr = FWCommonContextGetFWAddress(psServerCommonContext);
+
+	PDUMPCOMMENT("Common ctx cleanup Request DM%d [context = 0x%08x]",
+					eDM, psFWCommonContextFWAddr.ui32Addr);
+	PDUMPCOMMENT("Wait for CCB to be empty before common ctx cleanup");
+
+	RGXCCBPDumpDrainCCB(FWCommonContextGetClientCCB(psServerCommonContext), ui32PDumpFlags);
+
+	/* Setup our command data, the cleanup call will fill in the rest */
+	sRCCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psContext = psFWCommonContextFWAddr;
+
+	/* Request cleanup of the firmware resource */
+	eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice,
+									   eDM,
+									   &sRCCleanUpCmd,
+									   sizeof(RGXFWIF_KCCB_CMD),
+									   RGXFWIF_CLEANUP_FWCOMMONCONTEXT,
+									   psSyncPrim,
+									   ui32PDumpFlags);
+
+	if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXRequestCommonContextCleanUp: Failed to schedule a memory context cleanup with error (%u)", eError));
+	}
+
+	return eError;
+}
+
+/*
+ * RGXRequestHWRTDataCleanUp
+ */
+
+PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+										 PRGXFWIF_HWRTDATA psHWRTData,
+										 PVRSRV_CLIENT_SYNC_PRIM *psSync,
+										 RGXFWIF_DM eDM)
+{
+	RGXFWIF_KCCB_CMD			sHWRTDataCleanUpCmd = {0};
+	PVRSRV_ERROR				eError;
+
+	PDUMPCOMMENT("HW RTData cleanup Request DM%d [HWRTData = 0x%08x]", eDM, psHWRTData.ui32Addr);
+
+	sHWRTDataCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psHWRTData = psHWRTData;
+
+	eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice,
+									   eDM,
+									   &sHWRTDataCleanUpCmd,
+									   sizeof(sHWRTDataCleanUpCmd),
+									   RGXFWIF_CLEANUP_HWRTDATA,
+									   psSync,
+									   PDUMP_FLAGS_NONE);
+
+	if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXRequestHWRTDataCleanUp: Failed to schedule a HWRTData cleanup with error (%u)", eError));
+	}
+
+	return eError;
+}
+
+/*
+	RGXFWRequestFreeListCleanUp
+*/
+PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+										 PRGXFWIF_FREELIST psFWFreeList,
+										 PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	RGXFWIF_KCCB_CMD			sFLCleanUpCmd = {0};
+	PVRSRV_ERROR				eError;
+
+	PDUMPCOMMENT("Free list cleanup Request [FreeList = 0x%08x]", psFWFreeList.ui32Addr);
+
+	/* Setup our command data, the cleanup call will fill in the rest */
+	sFLCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psFreelist = psFWFreeList;
+
+	/* Request cleanup of the firmware resource */
+	eError = RGXScheduleCleanupCommand(psDevInfo,
+									   RGXFWIF_DM_GP,
+									   &sFLCleanUpCmd,
+									   sizeof(RGXFWIF_KCCB_CMD),
+									   RGXFWIF_CLEANUP_FREELIST,
+									   psSync,
+									   PDUMP_FLAGS_NONE);
+
+	if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestFreeListCleanUp: Failed to schedule a memory context cleanup with error (%u)", eError));
+	}
+
+	return eError;
+}
+
+/*
+	RGXFWRequestZSBufferCleanUp
+*/
+PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+										 PRGXFWIF_ZSBUFFER psFWZSBuffer,
+										 PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	RGXFWIF_KCCB_CMD			sZSBufferCleanUpCmd = {0};
+	PVRSRV_ERROR				eError;
+
+	PDUMPCOMMENT("ZS Buffer cleanup Request [ZS Buffer = 0x%08x]", psFWZSBuffer.ui32Addr);
+
+	/* Setup our command data, the cleanup call will fill in the rest */
+	sZSBufferCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psZSBuffer = psFWZSBuffer;
+
+	/* Request cleanup of the firmware resource */
+	eError = RGXScheduleCleanupCommand(psDevInfo,
+									   RGXFWIF_DM_3D,
+									   &sZSBufferCleanUpCmd,
+									   sizeof(RGXFWIF_KCCB_CMD),
+									   RGXFWIF_CLEANUP_ZSBUFFER,
+									   psSync,
+									   PDUMP_FLAGS_NONE);
+
+	if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestZSBufferCleanUp: Failed to schedule a memory context cleanup with error (%u)", eError));
+	}
+
+	return eError;
+}
+
+
+PVRSRV_ERROR RGXFWRequestRayFrameDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+											 PRGXFWIF_RAY_FRAME_DATA psHWFrameData,
+											 PVRSRV_CLIENT_SYNC_PRIM *psSync,
+											 RGXFWIF_DM eDM)
+{
+	RGXFWIF_KCCB_CMD			sHWFrameDataCleanUpCmd = {0};
+	PVRSRV_ERROR				eError;
+
+	PDUMPCOMMENT("HW FrameData cleanup Request DM%d [HWFrameData = 0x%08x]", eDM, psHWFrameData.ui32Addr);
+
+	sHWFrameDataCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psHWFrameData = psHWFrameData;
+
+	eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice,
+									   eDM,
+									   &sHWFrameDataCleanUpCmd,
+									   sizeof(sHWFrameDataCleanUpCmd),
+									   RGXFWIF_CLEANUP_HWFRAMEDATA,
+									   psSync,
+									   PDUMP_FLAGS_NONE);
+
+	if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestRayFrameDataCleanUp: Failed to schedule a HWFrameData cleanup with error (%u)", eError));
+	}
+
+	return eError;
+}
+
+/*
+	RGXFWRequestRPMFreeListCleanUp
+*/
+PVRSRV_ERROR RGXFWRequestRPMFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+											PRGXFWIF_RPM_FREELIST psFWRPMFreeList,
+											PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	RGXFWIF_KCCB_CMD			sFLCleanUpCmd = {0};
+	PVRSRV_ERROR				eError;
+
+	PDUMPCOMMENT("RPM Free list cleanup Request [RPM FreeList = 0x%08x]", psFWRPMFreeList.ui32Addr);
+
+	/* Setup our command data, the cleanup call will fill in the rest */
+	sFLCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psRPMFreelist = psFWRPMFreeList;
+
+	/* Request cleanup of the firmware resource */
+	eError = RGXScheduleCleanupCommand(psDevInfo,
+									   RGXFWIF_DM_GP,
+									   &sFLCleanUpCmd,
+									   sizeof(RGXFWIF_KCCB_CMD),
+									   RGXFWIF_CLEANUP_RPM_FREELIST,
+									   psSync,
+									   PDUMP_FLAGS_NONE);
+
+	if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestRPMFreeListCleanUp: Failed to schedule a memory context cleanup with error (%u)", eError));
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo,
+								IMG_UINT32 ui32HCSDeadlineMs)
+{
+	PVRSRV_ERROR eError;
+	RGXFWIF_KCCB_CMD	sSetHCSDeadline;
+
+	sSetHCSDeadline.eCmdType                            = RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE;
+	sSetHCSDeadline.eDM                                 = RGXFWIF_DM_GP;
+	sSetHCSDeadline.uCmdData.sHCSCtrl.ui32HCSDeadlineMS = ui32HCSDeadlineMs;
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psDevInfo,
+									RGXFWIF_DM_GP,
+									&sSetHCSDeadline,
+									sizeof(sSetHCSDeadline),
+									0,
+									PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	return eError;
+}
+
+PVRSRV_ERROR RGXFWOSConfig(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	PVRSRV_ERROR eError;
+	RGXFWIF_KCCB_CMD   sOSConfigCmdData;
+	PRGXFWIF_OS_CONFIG sOSConfigFWAddr;
+	PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_INTERNAL_ERROR);
+
+	RGXSetFirmwareAddress(&sOSConfigFWAddr, psDevInfo->psRGXFWIfOSConfigDesc, 0, RFW_FWADDR_NOREF_FLAG);
+
+	sOSConfigCmdData.eCmdType                            = RGXFWIF_KCCB_CMD_OS_CFG_INIT;
+	sOSConfigCmdData.eDM                                 = RGXFWIF_DM_GP;
+	sOSConfigCmdData.uCmdData.sCmdOSConfigData.sOSConfig = sOSConfigFWAddr;
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psDevInfo,
+									RGXFWIF_DM_GP,
+									&sOSConfigCmdData,
+									sizeof(sOSConfigCmdData),
+									0,
+									PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	return eError;
+}
+
+PVRSRV_ERROR RGXFWSetOSIsolationThreshold(PVRSRV_RGXDEV_INFO *psDevInfo,
+								IMG_UINT32 ui32IsolationPriorityThreshold)
+{
+	PVRSRV_ERROR eError;
+	RGXFWIF_KCCB_CMD	sOSidIsoConfCmd;
+
+	sOSidIsoConfCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ISOLATION_GROUP_CHANGE;
+	sOSidIsoConfCmd.uCmdData.sCmdOSidIsolationData.ui32IsolationPriorityThreshold = ui32IsolationPriorityThreshold;
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psDevInfo,
+									RGXFWIF_DM_GP,
+									&sOSidIsoConfCmd,
+									sizeof(sOSidIsoConfCmd),
+									0,
+									PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	return eError;
+}
+
+PVRSRV_ERROR RGXFWSetVMOnlineState(PVRSRV_RGXDEV_INFO *psDevInfo,
+								IMG_UINT32 ui32OSid,
+								RGXFWIF_OS_STATE_CHANGE eOSOnlineState)
+{
+	PVRSRV_ERROR         eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD     sOSOnlineStateCmd;
+	RGXFWIF_TRACEBUF    *psRGXFWIfTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+	volatile IMG_UINT32 *pui32OSStateFlags;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	sOSOnlineStateCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE;
+	sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.ui32OSid = ui32OSid;
+	sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = eOSOnlineState;
+
+	if (eOSOnlineState == RGXFWIF_OS_ONLINE)
+	{
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError = RGXScheduleCommand(psDevInfo,
+										RGXFWIF_DM_GP,
+										&sOSOnlineStateCmd,
+										sizeof(sOSOnlineStateCmd),
+										0,
+										PDUMP_FLAGS_CONTINUOUS);
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+		return eError;
+	}
+
+	if (psRGXFWIfTraceBuf == NULL)
+	{
+		return PVRSRV_ERROR_NOT_INITIALISED;
+	}
+	pui32OSStateFlags = (volatile IMG_UINT32*) &psRGXFWIfTraceBuf->ui32OSStateFlags[ui32OSid];
+
+	/* Attempt several times until the FW manages to offload the OS */
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		IMG_UINT32 ui32OSStateFlags;
+
+		/* Send request */
+		eError = RGXScheduleCommand(psDevInfo,
+									RGXFWIF_DM_GP,
+									&sOSOnlineStateCmd,
+									sizeof(sOSOnlineStateCmd),
+									0,
+									PDUMP_FLAGS_CONTINUOUS);
+		if (unlikely(eError == PVRSRV_ERROR_RETRY))
+		{
+			continue;
+		}
+		PVR_LOGG_IF_ERROR(eError, "RGXScheduleCommand", return_);
+
+		/* Wait for FW to process the cmd */
+		eError = RGXWaitForFWOp(psDevInfo,
+								RGXFWIF_DM_GP,
+								psDevInfo->psDeviceNode->psSyncPrim,
+								PDUMP_FLAGS_CONTINUOUS);
+		PVR_LOGG_IF_ERROR(eError, "RGXWaitForFWOp", return_);
+
+		/* read the OS state */
+		OSMemoryBarrier();
+		ui32OSStateFlags = *pui32OSStateFlags;
+
+		if ((ui32OSStateFlags & RGXFW_OS_STATE_ACTIVE_OS) == 0)
+		{
+			/* FW finished offloading the OSID */
+			eError = PVRSRV_OK;
+			break;
+		}
+		else
+		{
+			eError = PVRSRV_ERROR_TIMEOUT;
+		}
+
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+
+	} END_LOOP_UNTIL_TIMEOUT();
+
+return_ :
+	return eError;
+}
+
+PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo,
+								IMG_UINT32 ui32OSid,
+								IMG_UINT32 ui32Priority)
+{
+	PVRSRV_ERROR eError;
+	RGXFWIF_KCCB_CMD	sOSidPriorityCmd;
+
+	sOSidPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE;
+	sOSidPriorityCmd.uCmdData.sCmdOSidPriorityData.ui32OSidNum = ui32OSid;
+	sOSidPriorityCmd.uCmdData.sCmdOSidPriorityData.ui32Priority = ui32Priority;
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psDevInfo,
+									RGXFWIF_DM_GP,
+									&sOSidPriorityCmd,
+									sizeof(sOSidPriorityCmd),
+									0,
+									PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	return eError;
+}
+
+PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext,
+								CONNECTION_DATA *psConnection,
+								PVRSRV_RGXDEV_INFO *psDevInfo,
+								IMG_UINT32 ui32Priority,
+								RGXFWIF_DM eDM)
+{
+	IMG_UINT32				ui32CmdSize;
+	IMG_UINT8				*pui8CmdPtr;
+	RGXFWIF_KCCB_CMD		sPriorityCmd;
+	RGXFWIF_CCB_CMD_HEADER	*psCmdHeader;
+	RGXFWIF_CMD_PRIORITY	*psCmd;
+	PVRSRV_ERROR			eError;
+
+	/*
+		Get space for command
+	*/
+	ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_CMD_PRIORITY));
+
+	eError = RGXAcquireCCB(FWCommonContextGetClientCCB(psContext),
+						   ui32CmdSize,
+						   (void **) &pui8CmdPtr,
+						   PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire space for client CCB", __FUNCTION__));
+		}
+		goto fail_ccbacquire;
+	}
+
+	/*
+		Write the command header and command
+	*/
+	psCmdHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+	psCmdHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PRIORITY;
+	psCmdHeader->ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CMD_PRIORITY));
+	pui8CmdPtr += sizeof(*psCmdHeader);
+
+	psCmd = (RGXFWIF_CMD_PRIORITY *) pui8CmdPtr;
+	psCmd->ui32Priority = ui32Priority;
+	pui8CmdPtr += sizeof(*psCmd);
+
+	/*
+		We should reserved space in the kernel CCB here and fill in the command
+		directly.
+		This is so if there isn't space in the kernel CCB we can return with
+		retry back to services client before we take any operations
+	*/
+
+	/*
+		Submit the command
+	*/
+	RGXReleaseCCB(FWCommonContextGetClientCCB(psContext),
+				  ui32CmdSize,
+				  PDUMP_FLAGS_CONTINUOUS);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release space in client CCB", __FUNCTION__));
+		return eError;
+	}
+
+	/* Construct the priority command. */
+	sPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+	sPriorityCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psContext);
+	sPriorityCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psContext));
+	sPriorityCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+	sPriorityCmd.uCmdData.sCmdKickData.sWorkloadDataFWAddress.ui32Addr = 0;
+	sPriorityCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psDevInfo,
+									eDM,
+									&sPriorityCmd,
+									sizeof(sPriorityCmd),
+									0,
+									PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"ContextSetPriority: Failed to submit set priority command with error (%u)", eError));
+	}
+
+	return PVRSRV_OK;
+
+fail_ccbacquire:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*
+	RGXReadMETAAddr
+*/
+PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO	*psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 *pui32Value)
+{
+	IMG_UINT8 *pui8RegBase = (IMG_UINT8*)psDevInfo->pvRegsBaseKM;
+	IMG_UINT32 ui32Value;
+
+	/* Wait for Slave Port to be Ready */
+	if (PVRSRVPollForValueKM(
+	        (IMG_UINT32*) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1),
+	        RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+	        RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN) != PVRSRV_OK)
+	{
+		return PVRSRV_ERROR_TIMEOUT;
+	}
+
+	/* Issue the Read */
+	OSWriteHWReg32(
+	    psDevInfo->pvRegsBaseKM,
+	    RGX_CR_META_SP_MSLVCTRL0,
+	    ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN);
+
+	/* Wait for Slave Port to be Ready: read complete */
+	if (PVRSRVPollForValueKM(
+	        (IMG_UINT32*) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1),
+	        RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+	        RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN) != PVRSRV_OK)
+	{
+		return PVRSRV_ERROR_TIMEOUT;
+	}
+
+	/* Read the value */
+	ui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAX);
+
+	*pui32Value = ui32Value;
+
+	return PVRSRV_OK;
+}
+
+/*
+	RGXReadMETAAddr
+*/
+PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 ui32Value)
+{
+	IMG_UINT8 *pui8RegBase = (IMG_UINT8*)psDevInfo->pvRegsBaseKM;
+
+	/* Wait for Slave Port to be Ready */
+	if (PVRSRVPollForValueKM(
+		(IMG_UINT32*) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1),
+		RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+		RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN) != PVRSRV_OK)
+	{
+		return PVRSRV_ERROR_TIMEOUT;
+	}
+
+	/* Issue the Write */
+	OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0, ui32METAAddr);
+	OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT, ui32Value);
+
+	return PVRSRV_OK;
+}
+
+/*
+	RGXUpdateHealthStatus
+*/
+PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode,
+                                   IMG_BOOL bCheckAfterTimePassed)
+{
+	PVRSRV_DATA*                 psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_HEALTH_STATUS  eNewStatus   = PVRSRV_DEVICE_HEALTH_STATUS_OK;
+	PVRSRV_DEVICE_HEALTH_REASON  eNewReason   = PVRSRV_DEVICE_HEALTH_REASON_NONE;
+	PVRSRV_RGXDEV_INFO*  psDevInfo;
+	RGXFWIF_TRACEBUF*  psRGXFWIfTraceBufCtl;
+	RGXFWIF_CCB_CTL *psKCCBCtl;
+	IMG_UINT32  ui32ThreadCount;
+	IMG_BOOL  bKCCBCmdsWaiting;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	PVR_ASSERT(psDevNode != NULL);
+	psDevInfo = psDevNode->pvDevice;
+	psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+	/* If the firmware is not initialised, there is not much point continuing! */
+	if (!psDevInfo->bFirmwareInitialised || psDevInfo->pvRegsBaseKM == NULL ||
+	    psDevInfo->psDeviceNode == NULL)
+	{
+		return PVRSRV_OK;
+	}
+
+	/* If this is a quick update, then include the last current value... */
+	if (!bCheckAfterTimePassed)
+	{
+		eNewStatus = OSAtomicRead(&psDevNode->eHealthStatus);
+		eNewReason = OSAtomicRead(&psDevNode->eHealthReason);
+	}
+
+	/* If Rogue is not powered on, just skip ahead and check for stalled client CCBs */
+	if (PVRSRVIsDevicePowered(psDevNode))
+	{
+		/*
+		   Firmware thread checks...
+		*/
+		for (ui32ThreadCount = 0;  ui32ThreadCount < RGXFW_THREAD_NUM;  ui32ThreadCount++)
+		{
+			if (psRGXFWIfTraceBufCtl != NULL)
+			{
+				IMG_CHAR* pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szInfo;
+
+				/*
+				Check if the FW has hit an assert...
+				*/
+				if (*pszTraceAssertInfo != '\0')
+				{
+					PVR_DPF((PVR_DBG_WARNING, "RGXGetDeviceHealthStatus: Firmware thread %d has asserted: %s (%s:%d)",
+							ui32ThreadCount, pszTraceAssertInfo,
+							psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szPath,
+							psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.ui32LineNum));
+					eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD;
+					eNewReason = PVRSRV_DEVICE_HEALTH_REASON_ASSERTED;
+					goto _RGXUpdateHealthStatus_Exit;
+				}
+
+				/*
+				   Check the threads to see if they are in the same poll locations as last time...
+				*/
+				if (bCheckAfterTimePassed)
+				{
+					if (psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount] != 0  &&
+						psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount] == psDevInfo->aui32CrLastPollAddr[ui32ThreadCount])
+					{
+						PVR_DPF((PVR_DBG_WARNING, "RGXGetDeviceHealthStatus: Firmware stuck on CR poll: T%u polling %s (reg:0x%08X mask:0x%08X)",
+								ui32ThreadCount,
+								((psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount] & RGXFW_POLL_TYPE_SET)?("set"):("unset")),
+								psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount] & ~RGXFW_POLL_TYPE_SET,
+								psRGXFWIfTraceBufCtl->aui32CrPollMask[ui32ThreadCount]));
+					eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+						eNewReason = PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING;
+						goto _RGXUpdateHealthStatus_Exit;
+					}
+					psDevInfo->aui32CrLastPollAddr[ui32ThreadCount] = psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount];
+				}
+			}
+		}
+
+		/*
+		   Event Object Timeouts check...
+		*/
+	if (!bCheckAfterTimePassed)
+	{
+		if (psDevInfo->ui32GEOTimeoutsLastTime > 1 && psPVRSRVData->ui32GEOConsecutiveTimeouts > psDevInfo->ui32GEOTimeoutsLastTime)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "RGXGetDeviceHealthStatus: Global Event Object Timeouts have risen (from %d to %d)",
+					psDevInfo->ui32GEOTimeoutsLastTime, psPVRSRVData->ui32GEOConsecutiveTimeouts));
+			eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+			eNewReason = PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS;
+		}
+		psDevInfo->ui32GEOTimeoutsLastTime = psPVRSRVData->ui32GEOConsecutiveTimeouts;
+	}
+
+		/*
+		   Check the Kernel CCB pointer is valid. If any commands were waiting last time, then check
+		   that some have executed since then.
+		*/
+		bKCCBCmdsWaiting = IMG_FALSE;
+		psKCCBCtl = psDevInfo->psKernelCCBCtl;
+
+		if (psKCCBCtl != NULL)
+		{
+			if (psKCCBCtl->ui32ReadOffset > psKCCBCtl->ui32WrapMask  ||
+				psKCCBCtl->ui32WriteOffset > psKCCBCtl->ui32WrapMask)
+			{
+				PVR_DPF((PVR_DBG_WARNING, "RGXGetDeviceHealthStatus: KCCB has invalid offset (ROFF=%d WOFF=%d)",
+						psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset));
+				eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD;
+				eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT;
+			}
+
+			if (psKCCBCtl->ui32ReadOffset != psKCCBCtl->ui32WriteOffset)
+			{
+				bKCCBCmdsWaiting = IMG_TRUE;
+			}
+		}
+
+		if (bCheckAfterTimePassed && psDevInfo->psRGXFWIfTraceBuf != NULL)
+		{
+			IMG_UINT32 ui32KCCBCmdsExecuted = psDevInfo->psRGXFWIfTraceBuf->ui32KCCBCmdsExecuted;
+
+			if (psDevInfo->ui32KCCBCmdsExecutedLastTime == ui32KCCBCmdsExecuted)
+			{
+				/*
+				   If something was waiting last time then the Firmware has stopped processing commands.
+				*/
+				if (psDevInfo->bKCCBCmdsWaitingLastTime)
+				{
+					PVR_DPF((PVR_DBG_WARNING, "RGXGetDeviceHealthStatus: No KCCB commands executed since check!"));
+					eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+					eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED;
+				}
+
+				/*
+				   If no commands are currently pending and nothing happened since the last poll, then
+				   schedule a dummy command to ping the firmware so we know it is alive and processing.
+				*/
+				if (!bKCCBCmdsWaiting)
+				{
+					RGXFWIF_KCCB_CMD  sCmpKCCBCmd;
+					PVRSRV_ERROR      eError;
+
+					sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK;
+
+				/* Protect the PDumpLoadMem. RGXScheduleCommand() cannot take the
+				 * PMR lock itself, because some bridge functions will take the PMR lock
+				 * before calling RGXScheduleCommand
+				 */
+					eError = RGXScheduleCommand(psDevNode->pvDevice,
+												RGXFWIF_DM_GP,
+												&sCmpKCCBCmd,
+												sizeof(sCmpKCCBCmd),
+												0,
+					                            PDUMP_FLAGS_CONTINUOUS);
+					if (eError != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_WARNING, "RGXGetDeviceHealthStatus: Cannot schedule Health Check command! (0x%x)", eError));
+					}
+					else
+					{
+						bKCCBCmdsWaiting = IMG_TRUE;
+					}
+				}
+			}
+
+			psDevInfo->bKCCBCmdsWaitingLastTime     = bKCCBCmdsWaiting;
+			psDevInfo->ui32KCCBCmdsExecutedLastTime = ui32KCCBCmdsExecuted;
+		}
+	}
+
+	if (bCheckAfterTimePassed && (PVRSRV_DEVICE_HEALTH_STATUS_OK==eNewStatus))
+	{
+		/* Attempt to detect and deal with any stalled client contexts.
+		 * Currently, ui32StalledClientMask is not a reliable method of detecting a stalled
+		 * application as the app could just be busy with a long running task,
+		 * or a lots of smaller workloads. Also the definition of stalled is
+		 * effectively subject to the timer frequency calling this function
+		 * (which is a platform config value with no guarantee it is correctly tuned).
+		 */
+
+		IMG_UINT32 ui32StalledClientMask = 0;
+
+		ui32StalledClientMask |= CheckForStalledClientTransferCtxt(psDevInfo);
+
+		ui32StalledClientMask |= CheckForStalledClientRenderCtxt(psDevInfo);
+
+#if	!defined(UNDER_WDDM)
+		if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK)
+		{
+			ui32StalledClientMask |= CheckForStalledClientComputeCtxt(psDevInfo);
+		}
+#endif
+
+		if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_FASTRENDER_DM_BIT_MASK)
+		{
+			ui32StalledClientMask |= CheckForStalledClientTDMTransferCtxt(psDevInfo);
+		}
+
+		if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+		{
+			ui32StalledClientMask |= CheckForStalledClientRayCtxt(psDevInfo);
+		}
+
+		/* If at least one DM stalled bit is different than before */
+		if (psDevInfo->ui32StalledClientMask ^ ui32StalledClientMask)
+		{
+			/* Print all the stalled DMs */
+			PVR_LOG(("RGXGetDeviceHealthStatus: Possible stalled client contexts detected: %s%s%s%s%s%s%s%s%s",
+			         RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_GP),
+			         RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TDM_2D),
+			         RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TA),
+			         RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_3D),
+			         RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_CDM),
+			         RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_RTU),
+			         RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_SHG),
+			         RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ2D),
+			         RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ3D)));
+		}
+		psDevInfo->ui32StalledClientMask = ui32StalledClientMask;
+	}
+
+	/*
+	   Finished, save the new status...
+	*/
+_RGXUpdateHealthStatus_Exit:
+	OSAtomicWrite(&psDevNode->eHealthStatus, eNewStatus);
+	OSAtomicWrite(&psDevNode->eHealthReason, eNewReason);
+
+	/*
+	 * Attempt to service the HWPerf buffer to regularly transport idle/periodic
+	 * packets to host buffer.
+	 */
+	if (psDevNode->pfnServiceHWPerf != NULL)
+	{
+		PVRSRV_ERROR eError = psDevNode->pfnServiceHWPerf(psDevNode);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "DevicesWatchdogThread: "
+					 "Error occurred when servicing HWPerf buffer (%d)",
+					 eError));
+		}
+	}
+
+	return PVRSRV_OK;
+} /* RGXUpdateHealthStatus */
+
+PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM)
+{
+	RGX_CLIENT_CCB	*psCurrentClientCCB = psCurrentServerCommonContext->psClientCCB;
+
+	return CheckForStalledCCB(psCurrentServerCommonContext->psDevInfo->psDeviceNode, psCurrentClientCCB, eKickTypeDM);
+}
+
+void DumpStalledFWCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile)
+{
+	RGX_CLIENT_CCB	*psCurrentClientCCB = psCurrentServerCommonContext->psClientCCB;
+	PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext = psCurrentServerCommonContext->sFWCommonContextFWAddr;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) || defined(PVRSRV_ENABLE_FULL_CCB_DUMP)
+	DumpCCB(psCurrentServerCommonContext->psDevInfo, sFWCommonContext,
+			psCurrentClientCCB, pfnDumpDebugPrintf, pvDumpDebugFile);
+#else
+	DumpStalledCCBCommand(sFWCommonContext, psCurrentClientCCB, pfnDumpDebugPrintf, pvDumpDebugFile);
+#endif
+}
+
+void AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl,
+									IMG_UINT32 *pui32NumCleanupCtl,
+									RGXFWIF_DM eDM,
+									IMG_BOOL bKick,
+									RGX_RTDATA_CLEANUP_DATA        *psRTDataCleanup,
+									RGX_ZSBUFFER_DATA              *psZBuffer,
+									RGX_ZSBUFFER_DATA              *psSBuffer,
+									RGX_ZSBUFFER_DATA              *psMSAAScratchBuffer)
+{
+	PRGXFWIF_CLEANUP_CTL *psCleanupCtlWrite = apsCleanupCtl;
+
+	PVR_ASSERT((eDM == RGXFWIF_DM_TA) || (eDM == RGXFWIF_DM_3D));
+
+	if (bKick)
+	{
+		if (eDM == RGXFWIF_DM_TA)
+		{
+			if (psRTDataCleanup)
+			{
+				PRGXFWIF_CLEANUP_CTL psCleanupCtl;
+
+				RGXSetFirmwareAddress(&psCleanupCtl, psRTDataCleanup->psFWHWRTDataMemDesc,
+									offsetof(RGXFWIF_HWRTDATA, sTACleanupState),
+								RFW_FWADDR_NOREF_FLAG);
+
+				*(psCleanupCtlWrite++) = psCleanupCtl;
+			}
+		}
+		else
+		{
+			if (psRTDataCleanup)
+			{
+				PRGXFWIF_CLEANUP_CTL psCleanupCtl;
+
+				RGXSetFirmwareAddress(&psCleanupCtl, psRTDataCleanup->psFWHWRTDataMemDesc,
+									offsetof(RGXFWIF_HWRTDATA, s3DCleanupState),
+								RFW_FWADDR_NOREF_FLAG);
+
+				*(psCleanupCtlWrite++) = psCleanupCtl;
+			}
+
+			if (psZBuffer)
+			{
+				(psCleanupCtlWrite++)->ui32Addr = psZBuffer->sZSBufferFWDevVAddr.ui32Addr +
+								offsetof(RGXFWIF_FWZSBUFFER, sCleanupState);
+			}
+
+			if (psSBuffer)
+			{
+				(psCleanupCtlWrite++)->ui32Addr = psSBuffer->sZSBufferFWDevVAddr.ui32Addr +
+								offsetof(RGXFWIF_FWZSBUFFER, sCleanupState);
+			}
+
+			if (psMSAAScratchBuffer)
+			{
+				(psCleanupCtlWrite++)->ui32Addr = psMSAAScratchBuffer->sZSBufferFWDevVAddr.ui32Addr +
+								offsetof(RGXFWIF_FWZSBUFFER, sCleanupState);
+			}
+		}
+	}
+
+	*pui32NumCleanupCtl = psCleanupCtlWrite - apsCleanupCtl;
+
+	PVR_ASSERT(*pui32NumCleanupCtl <= RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS);
+}
+
+PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo;
+	RGXFWIF_HWRINFOBUF	*psHWRInfoBuf;
+	RGXFWIF_TRACEBUF	*psRGXFWIfTraceBufCtl;
+	IMG_UINT32			i;
+
+	if (psDevNode->pvDevice == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_DEVINFO;
+	}
+	psDevInfo = psDevNode->pvDevice;
+
+	psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBuf;
+	psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+	for (i = 0 ; i < psDevInfo->sDevFeatureCfg.ui32MAXDMCount ; i++)
+	{
+		/* Reset the HWR numbers */
+		psRGXFWIfTraceBufCtl->aui32HwrDmLockedUpCount[i] = 0;
+		psRGXFWIfTraceBufCtl->aui32HwrDmFalseDetectCount[i] = 0;
+		psRGXFWIfTraceBufCtl->aui32HwrDmRecoveredCount[i] = 0;
+		psRGXFWIfTraceBufCtl->aui32HwrDmOverranCount[i] = 0;
+	}
+
+	for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++)
+	{
+		psHWRInfoBuf->sHWRInfo[i].ui32HWRNumber = 0;
+	}
+
+	for (i = 0 ; i < RGXFW_THREAD_NUM ; i++)
+	{
+		psHWRInfoBuf->ui32FirstCrPollAddr[i] = 0;
+		psHWRInfoBuf->ui32FirstCrPollMask[i] = 0;
+		psHWRInfoBuf->ui32FirstCrPollLastValue[i] = 0;
+	}
+
+	psHWRInfoBuf->ui32WriteIndex = 0;
+	psHWRInfoBuf->ui32DDReqCount = 0;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR,
+						   IMG_DEV_PHYADDR *psPhyAddr,
+						   IMG_UINT32 ui32LogicalOffset,
+						   IMG_UINT32 ui32Log2PageSize,
+						   IMG_UINT32 ui32NumOfPages,
+						   IMG_BOOL *bValid)
+{
+
+	PVRSRV_ERROR eError;
+
+	eError = PMRLockSysPhysAddresses(psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXGetPhyAddr: PMRLockSysPhysAddresses failed (%u)",
+				eError));
+		return eError;
+	}
+
+	eError = PMR_DevPhysAddr(psPMR,
+								 ui32Log2PageSize,
+								 ui32NumOfPages,
+								 ui32LogicalOffset,
+								 psPhyAddr,
+								 bValid);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXGetPhyAddr: PMR_DevPhysAddr failed (%u)",
+				eError));
+		return eError;
+	}
+
+
+	eError = PMRUnlockSysPhysAddresses(psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXGetPhyAddr: PMRUnLockSysPhysAddresses failed (%u)",
+				eError));
+		return eError;
+	}
+
+	return eError;
+}
+
+#if defined(PDUMP)
+PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32WriteOffset)
+{
+	RGXFWIF_CCB_CTL	*psKCCBCtl = psDevInfo->psKernelCCBCtl;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (psDevInfo->bDumpedKCCBCtlAlready)
+	{
+		/* exiting capture range */
+		psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE;
+
+		/* make sure previous cmd is drained in pdump in case we will 'jump' over some future cmds */
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER,
+				      "kCCB(%p): Draining rgxfw_roff (0x%x) == woff (0x%x)",
+                                      psKCCBCtl,
+                                      ui32WriteOffset,
+                                      ui32WriteOffset);
+		eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc,
+                                                offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset),
+                                                ui32WriteOffset,
+                                                0xffffffff,
+                                                PDUMP_POLL_OPERATOR_EQUAL,
+                                                PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXPdumpDrainKCCB: problem pdumping POL for kCCBCtl (%d)", eError));
+		}
+	}
+
+	return eError;
+
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function	RGXClientConnectCompatCheck_ClientAgainstFW
+
+ @Description
+
+ Check compatibility of client and firmware (build options)
+ at the connection time.
+
+ @Input psDeviceNode - device node
+ @Input ui32ClientBuildOptions - build options for the client
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32ClientBuildOptions)
+{
+	PVRSRV_ERROR		eError;
+#if !defined(NO_HARDWARE) || defined(PDUMP)
+#if !defined(NO_HARDWARE)
+	RGXFWIF_INIT	*psRGXFWInit = NULL;
+	IMG_UINT32		ui32BuildOptionsMismatch;
+	IMG_UINT32		ui32BuildOptionsFW;
+#endif
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+#endif
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+#if !defined(NO_HARDWARE)
+	if (psDevInfo == NULL || psDevInfo->psRGXFWIfInitMemDesc == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Cannot acquire kernel fw compatibility check info, RGXFWIF_INIT structure not allocated.",
+				__FUNCTION__));
+		return PVRSRV_ERROR_NOT_INITIALISED;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+												(void **)&psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire kernel fw compatibility check info (%u)",
+				__FUNCTION__, eError));
+		return eError;
+	}
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		if (*((volatile IMG_BOOL *)&psRGXFWInit->sRGXCompChecks.bUpdated))
+		{
+			/* No need to wait if the FW has already updated the values */
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+#endif
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Compatibility check: client and FW build options");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+												offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+												offsetof(RGXFWIF_COMPCHECKS, ui32BuildOptions),
+												ui32ClientBuildOptions,
+												0xffffffff,
+												PDUMP_POLL_OPERATOR_EQUAL,
+												PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+		return eError;
+	}
+#endif
+
+#if !defined(NO_HARDWARE)
+	ui32BuildOptionsFW = psRGXFWInit->sRGXCompChecks.ui32BuildOptions;
+	ui32BuildOptionsMismatch = ui32ClientBuildOptions ^ ui32BuildOptionsFW;
+
+	if (ui32BuildOptionsMismatch != 0)
+	{
+		if ( (ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; "
+			"extra options present in client: (0x%x). Please check rgx_options.h",
+			ui32ClientBuildOptions & ui32BuildOptionsMismatch ));
+		}
+
+		if ( (ui32BuildOptionsFW & ui32BuildOptionsMismatch) != 0)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; "
+			"extra options present in Firmware: (0x%x). Please check rgx_options.h",
+			ui32BuildOptionsFW & ui32BuildOptionsMismatch ));
+		}
+		eError = PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+		goto chk_exit;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware and client build options match. [ OK ]"));
+	}
+#endif
+
+	eError = PVRSRV_OK;
+#if !defined(NO_HARDWARE)
+chk_exit:
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+#endif
+
+	return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXVzRegisterFirmwarePhysHeap
+
+ @Description Register firmware heap for the specified guest OSID
+
+ @Input psDeviceNode - device node
+ @Input ui32OSID     - Guest OSID
+ @Input sDevPAddr    - Heap address
+ @Input ui64DevPSize - Heap size
+
+ @Return   PVRSRV_ERROR - PVRSRV_OK if heap setup was successful.
+
+******************************************************************************/
+PVRSRV_ERROR RGXVzRegisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+										   IMG_UINT32 ui32OSID,
+										   IMG_DEV_PHYADDR sDevPAddr,
+										   IMG_UINT64 ui64DevPSize)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_HOST, PVRSRV_OK);
+
+	if (!ui64DevPSize ||
+		!sDevPAddr.uiAddr ||
+		!ui32OSID || ui32OSID >= RGXFW_NUM_OS)
+	{
+		/* Guest OSID(s) range [1 up to (RGXFW_NUM_OS-1)] */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Registration creates internal RA to managed the guest(s) firmware heap */
+	eError = PVRSRVVzRegisterFirmwarePhysHeap (psDeviceNode, sDevPAddr, ui64DevPSize, ui32OSID);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Registering guest %d fw physheap failed\n", ui32OSID));
+		return eError;
+	}
+
+	/* Map guest DMA fw physheap into the fw kernel memory context */
+	eError = RGXVzDevMemAllocateGuestFwHeap(psDeviceNode, ui32OSID);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Mapping guest %d fw physheap failed\n", ui32OSID));
+		return eError;
+	}
+
+	return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXVzUnregisterFirmwarePhysHeap
+
+ @Description Unregister firmware heap for the specified guest OSID
+
+ @Input psDeviceNode - device node
+ @Input ui32OSID     - Guest OSID
+
+ @Return   PVRSRV_ERROR - PVRSRV_OK if heap setup was successful.
+
+******************************************************************************/
+PVRSRV_ERROR RGXVzUnregisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+											 IMG_UINT32 ui32OSID)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_HOST, PVRSRV_OK);
+
+	if (!ui32OSID || ui32OSID >= RGXFW_NUM_OS)
+	{
+		/* Guest OSID(s) range [1 up to (RGXFW_NUM_OS-1)] */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Free guest fw physheap from fw kernel memory context */
+	RGXVzDevMemFreeGuestFwHeap(psDeviceNode, ui32OSID);
+
+	/* Unregistration deletes state required to maintain heap */
+	eError = PVRSRVVzUnregisterFirmwarePhysHeap (psDeviceNode, ui32OSID);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Registering guest %d fw physheap failed\n", ui32OSID));
+		return eError;
+	}
+
+	return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXVzCreateFWKernelMemoryContext
+
+ @Description Setup additional firmware state specific to VZ
+
+ @Input psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR - PVRSRV_OK if successful.
+
+******************************************************************************/
+PVRSRV_ERROR RGXVzCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	IMG_CHAR szHeapName[32];
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		eError = SysVzRegisterFwPhysHeap(psDeviceNode->psDevConfig);
+	}
+	else
+	{
+		/* Initialise each guest OSID firmware physheap heaps, note that the guest
+		   OSID(s) range is [1 up to (RGXFW_NUM_OS-1)] */
+		IMG_UINT32 ui32OSID;
+
+		for (ui32OSID = 1; ui32OSID < RGXFW_NUM_OS; ui32OSID++)
+		{
+			OSSNPrintf(szHeapName, sizeof(szHeapName), "GuestFirmware%d", ui32OSID);
+
+			eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, szHeapName,
+										  &psDevInfo->psGuestFirmwareHeap[ui32OSID]);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "DevmemFindHeapByName() for guest %d failed\n", ui32OSID));
+			}
+		}
+	}
+
+	return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXVzDestroyFWKernelMemoryContext
+
+ @Description Destroy additional firmware state specific to VZ
+
+ @Input psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR - PVRSRV_OK if successful.
+
+******************************************************************************/
+PVRSRV_ERROR RGXVzDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		return SysVzUnregisterFwPhysHeap(psDeviceNode->psDevConfig);
+	}
+	return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (rgxfwutils.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxfwutils.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxfwutils.h
new file mode 100644
index 0000000..d0deba9
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxfwutils.h
@@ -0,0 +1,1130 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX firmware utility routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX firmware utility routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXFWUTILS_H__)
+#define __RGXFWUTILS_H__
+
+#include "log2.h"
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "devicemem.h"
+#include "device.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "connection_server.h"
+#include "rgxta3d.h"
+#include "devicemem_utils.h"
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+#include "physmem_tdfwcode.h"
+#include "physmem_tdsecbuf.h"
+#endif
+
+
+/*
+ * Firmware-only allocation (which are initialised by the host) must be aligned to the SLC cache line size.
+ * This is because firmware-only allocations are GPU_CACHE_INCOHERENT and this causes problems
+ * if two allocations share the same cache line; e.g. the initialisation of the second allocation won't
+ * make it into the SLC cache because it has been already loaded when accessing the content of the first allocation.
+ */
+static INLINE PVRSRV_ERROR DevmemFwAllocate(PVRSRV_RGXDEV_INFO *psDevInfo,
+											IMG_DEVMEM_SIZE_T uiSize,
+											DEVMEM_FLAGS_T uiFlags,
+						                    IMG_PCHAR pszText,
+											DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	/* Ensure all RI labels begin 'Fw' for the FW heap. */
+	PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w'));
+
+	eError = DevmemAllocate(psDevInfo->psFirmwareHeap,
+							uiSize,
+							GET_ROGUE_CACHE_LINE_SIZE(psDevInfo->sDevFeatureCfg.ui32CacheLineSize),
+							uiFlags | PVRSRV_MEMALLOCFLAG_FW_LOCAL,
+							pszText,
+							ppsMemDescPtr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF_RETURN_RC(eError);
+	}
+
+	/*
+		We need to map it so the heap for this allocation
+		is set
+	*/
+	eError = DevmemMapToDevice(*ppsMemDescPtr,
+							   psDevInfo->psFirmwareHeap,
+							   &sTmpDevVAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF_RETURN_RC(eError);
+	}
+
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+static INLINE PVRSRV_ERROR DevmemFwAllocateExportable(PVRSRV_DEVICE_NODE *psDeviceNode,
+													  IMG_DEVMEM_SIZE_T uiSize,
+													  IMG_DEVMEM_ALIGN_T uiAlign,
+													  DEVMEM_FLAGS_T uiFlags,
+									                  IMG_PCHAR pszText,
+													  DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT((pszText != NULL) &&
+			(pszText[0] == 'F') && (pszText[1] == 'w') &&
+			(pszText[2] == 'E') && (pszText[3] == 'x'));
+
+	eError = DevmemAllocateExportable(psDeviceNode,
+									  uiSize,
+									  uiAlign,
+									  psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK ?
+										ExactLog2(uiAlign) :
+										DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareHeap),
+									  uiFlags | PVRSRV_MEMALLOCFLAG_FW_LOCAL,
+									  pszText,
+									  ppsMemDescPtr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"FW DevmemAllocateExportable failed (%u)", eError));
+		PVR_DPF_RETURN_RC(eError);
+	}
+
+	/*
+		We need to map it so the heap for this allocation
+		is set
+	*/
+	eError = DevmemMapToDevice(*ppsMemDescPtr,
+							   psDevInfo->psFirmwareHeap,
+							   &sTmpDevVAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"FW DevmemMapToDevice failed (%u)", eError));
+	}
+
+	PVR_DPF_RETURN_RC1(eError, *ppsMemDescPtr);
+}
+
+static void DevmemFWPoison(DEVMEM_MEMDESC *psMemDesc, IMG_BYTE ubPoisonValue)
+{
+	void *pvLinAddr;
+	PVRSRV_ERROR eError;
+
+	eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvLinAddr);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire FW allocation mapping "
+					"to poison: %s",
+							__func__,
+							PVRSRVGETERRORSTRING(eError)));
+		return;
+	}
+
+	OSDeviceMemSet(pvLinAddr, ubPoisonValue, psMemDesc->uiAllocSize);
+
+	DevmemReleaseCpuVirtAddr(psMemDesc);
+}
+
+static INLINE void DevmemFwFree(PVRSRV_RGXDEV_INFO *psDevInfo,
+								DEVMEM_MEMDESC *psMemDesc)
+{
+	PVR_DPF_ENTERED1(psMemDesc);
+
+	if(psDevInfo->bEnableFWPoisonOnFree)
+	{
+		DevmemFWPoison(psMemDesc, psDevInfo->ubFWPoisonOnFreeValue);
+	}
+
+	DevmemReleaseDevVirtAddr(psMemDesc);
+	DevmemFree(psMemDesc);
+
+	PVR_DPF_RETURN;
+}
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+static INLINE
+PVRSRV_ERROR DevmemImportTDFWCode(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  IMG_DEVMEM_SIZE_T uiSize,
+                                  PMR_LOG2ALIGN_T uiLog2Align,
+                                  IMG_UINT32 uiMemAllocFlags,
+                                  IMG_BOOL bFWCorememCode,
+                                  DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+	PMR *psTDFWCodePMR;
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+	IMG_DEVMEM_SIZE_T uiMemDescSize;
+	IMG_DEVMEM_ALIGN_T uiAlign = 1 << uiLog2Align;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(ppsMemDescPtr);
+
+	DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareHeap),
+	                                    &uiSize,
+	                                    &uiAlign);
+
+	eError = PhysmemNewTDFWCodePMR(psDeviceNode,
+	                               uiSize,
+	                               uiLog2Align,
+	                               uiMemAllocFlags,
+	                               bFWCorememCode,
+	                               &psTDFWCodePMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDFWCodePMR failed (%u)", eError));
+		goto PMRCreateError;
+	}
+
+	/* NB: TDFWCodePMR refcount: 1 -> 2 */
+	eError = DevmemLocalImport(psDeviceNode,
+	                           psTDFWCodePMR,
+	                           uiMemAllocFlags,
+	                           ppsMemDescPtr,
+	                           &uiMemDescSize,
+	                           "TDFWCode");
+	if(eError != PVRSRV_OK)
+	{
+		goto ImportError;
+	}
+
+	eError = DevmemMapToDevice(*ppsMemDescPtr,
+	                           psDevInfo->psFirmwareHeap,
+	                           &sTmpDevVAddr);
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to map TD META code PMR (%u)", eError));
+		goto MapError;
+	}
+
+	/* NB: TDFWCodePMR refcount: 2 -> 1
+	 * The PMR will be unreferenced again (and destroyed) when
+	 * the memdesc tracking it is cleaned up
+	 */
+	PMRUnrefPMR(psTDFWCodePMR);
+
+	return PVRSRV_OK;
+
+MapError:
+	DevmemFree(*ppsMemDescPtr);
+	*ppsMemDescPtr = NULL;
+ImportError:
+	/* Unref and destroy the PMR */
+	PMRUnrefPMR(psTDFWCodePMR);
+PMRCreateError:
+
+	return eError;
+}
+
+static INLINE
+PVRSRV_ERROR DevmemImportTDSecureBuf(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     IMG_DEVMEM_SIZE_T uiSize,
+                                     PMR_LOG2ALIGN_T uiLog2Align,
+                                     IMG_UINT32 uiMemAllocFlags,
+                                     DEVMEM_MEMDESC **ppsMemDescPtr,
+                                     IMG_UINT64 *pui64SecBufHandle)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+	PMR *psTDSecureBufPMR;
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+	IMG_DEVMEM_SIZE_T uiMemDescSize;
+	IMG_DEVMEM_ALIGN_T uiAlign = 1 << uiLog2Align;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(ppsMemDescPtr);
+
+	DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareHeap),
+	                                    &uiSize,
+	                                    &uiAlign);
+
+	eError = PhysmemNewTDSecureBufPMR(NULL,
+	                                  psDeviceNode,
+	                                  uiSize,
+	                                  uiLog2Align,
+	                                  uiMemAllocFlags,
+	                                  &psTDSecureBufPMR,
+	                                  pui64SecBufHandle);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR failed (%u)", eError));
+		goto PMRCreateError;
+	}
+
+	/* NB: psTDSecureBufPMR refcount: 1 -> 2 */
+	eError = DevmemLocalImport(psDeviceNode,
+	                           psTDSecureBufPMR,
+	                           uiMemAllocFlags,
+	                           ppsMemDescPtr,
+	                           &uiMemDescSize,
+	                           "TDSecureBuffer");
+	if(eError != PVRSRV_OK)
+	{
+		goto ImportError;
+	}
+
+	eError = DevmemMapToDevice(*ppsMemDescPtr,
+	                           psDevInfo->psFirmwareHeap,
+	                           &sTmpDevVAddr);
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to map TD secure buffer PMR (%u)", eError));
+		goto MapError;
+	}
+
+	/* NB: psTDSecureBufPMR refcount: 2 -> 1
+	 * The PMR will be unreferenced again (and destroyed) when
+	 * the memdesc tracking it is cleaned up
+	 */
+	PMRUnrefPMR(psTDSecureBufPMR);
+
+	return PVRSRV_OK;
+
+MapError:
+	DevmemFree(*ppsMemDescPtr);
+	*ppsMemDescPtr = NULL;
+ImportError:
+	/* Unref and destroy the PMR */
+	PMRUnrefPMR(psTDSecureBufPMR);
+PMRCreateError:
+
+	return eError;
+}
+#endif
+
+
+/*
+ * This function returns the value of the hardware register RGX_CR_TIMER
+ * which is a timer counting in ticks.
+ */
+
+static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+    IMG_UINT64  ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER);
+
+    /*
+     *  In order to avoid having to issue three 32-bit reads to detect the
+     *  lower 32-bits wrapping, the MSB of the low 32-bit word is duplicated
+     *  in the MSB of the high 32-bit word. If the wrap happens, we just read
+     *  the register again (it will not wrap again so soon).
+     */
+    if ((ui64Time ^ (ui64Time << 32)) & ~RGX_CR_TIMER_BIT31_CLRMSK)
+    {
+        ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER);
+    }
+
+    return ((ui64Time & ~RGX_CR_TIMER_VALUE_CLRMSK)	>> RGX_CR_TIMER_VALUE_SHIFT);
+}
+
+/*
+ * This FW Common Context is only mapped into kernel for initialisation and cleanup purposes.
+ * Otherwise this allocation is only used by the FW.
+ * Therefore the GPU cache doesn't need coherency,
+ * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first kick)
+ */
+#define RGX_FWCOMCTX_ALLOCFLAGS	(PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \
+								 PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)| \
+								 PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+								 PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+								 PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | \
+								 PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+								 PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+								 PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | \
+								 PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+								 PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+
+/******************************************************************************
+ * RGXSetFirmwareAddress Flags
+ *****************************************************************************/
+#define RFW_FWADDR_FLAG_NONE		(0)			/*!< Void flag */
+#define RFW_FWADDR_NOREF_FLAG		(1U << 0)	/*!< It is safe to immediately release the reference to the pointer, 
+												  otherwise RGXUnsetFirmwareAddress() must be call when finished. */
+
+IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo);
+PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE       *psDeviceNode,
+                              IMG_BOOL                 bEnableSignatureChecks,
+                              IMG_UINT32               ui32SignatureChecksBufSize,
+                              IMG_UINT32               ui32HWPerfFWBufSizeKB,
+                              IMG_UINT64               ui64HWPerfFilter,
+                              IMG_UINT32               ui32RGXFWAlignChecksArrLength,
+                              IMG_UINT32               *pui32RGXFWAlignChecks,
+                              IMG_UINT32               ui32ConfigFlags,
+                              IMG_UINT32               ui32LogType,
+                              RGXFWIF_BIFTILINGMODE    eBifTilingMode,
+                              IMG_UINT32               ui32NumTilingCfgs,
+                              IMG_UINT32               *pui32BIFTilingXStrides,
+                              IMG_UINT32               ui32FilterFlags,
+                              IMG_UINT32               ui32JonesDisableMask,
+                              IMG_UINT32               ui32HWRDebugDumpLimit,
+                              IMG_UINT32               ui32HWPerfCountersDataSize,
+                              PMR                      **ppsHWPerfPMR,
+                              RGXFWIF_DEV_VIRTADDR     *psRGXFWInitFWAddr,
+                              RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+                              FW_PERF_CONF             eFirmwarePerf,
+                              IMG_UINT32               ui32ConfigFlagsExt);
+
+
+
+void RGXFreeFirmware(PVRSRV_RGXDEV_INFO 	*psDevInfo);
+
+/*************************************************************************/ /*!
+@Function       RGXSetFirmwareAddress
+
+@Description    Sets a pointer in a firmware data structure.
+
+@Input          ppDest		 Address of the pointer to set
+@Input          psSrc		 MemDesc describing the pointer
+@Input          ui32Flags	 Any combination of  RFW_FWADDR_*_FLAG
+
+@Return			void
+*/ /**************************************************************************/
+void RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR	*ppDest,
+						   DEVMEM_MEMDESC		*psSrc,
+						   IMG_UINT32			uiOffset,
+						   IMG_UINT32			ui32Flags);
+
+
+/*************************************************************************/ /*!
+@Function       RGXSetMetaDMAAddress
+
+@Description    Fills a Firmware structure used to setup the Meta DMA with two
+                pointers to the same data, one on 40 bit and one on 32 bit
+                (pointer in the FW memory space).
+
+@Input          ppDest		 	Address of the structure to set
+@Input          psSrcMemDesc	MemDesc describing the pointer
+@Input			psSrcFWDevVAddr Firmware memory space pointer
+
+@Return			void
+*/ /**************************************************************************/
+void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR		*psDest,
+						  DEVMEM_MEMDESC		*psSrcMemDesc,
+						  RGXFWIF_DEV_VIRTADDR	*psSrcFWDevVAddr,
+						  IMG_UINT32			uiOffset);
+
+
+/*************************************************************************/ /*!
+@Function       RGXUnsetFirmwareAddress
+
+@Description    Unsets a pointer in a firmware data structure
+
+@Input          psSrc		 MemDesc describing the pointer
+
+@Return			void
+*/ /**************************************************************************/
+void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC			*psSrc);
+
+/*************************************************************************/ /*!
+@Function       FWCommonContextAllocate
+
+@Description    Allocate a FW common context. This allocates the HW memory
+                for the context, the CCB and wires it all together.
+
+@Input          psConnection            Connection this context is being created on
+@Input          psDeviceNode		    Device node to create the FW context on
+                                        (must be RGX device node)
+@Input          eRGXCCBRequestor        RGX_CCB_REQUESTOR_TYPE enum constant which
+                                        which represents the requestor of this FWCC
+@Input          eDM                     Data Master type
+@Input          psAllocatedMemDesc      Pointer to pre-allocated MemDesc to use
+                                        as the FW context or NULL if this function
+                                        should allocate it
+@Input          ui32AllocatedOffset     Offset into pre-allocate MemDesc to use
+                                        as the FW context. If psAllocatedMemDesc
+                                        is NULL then this parameter is ignored
+@Input          psFWMemContextMemDesc   MemDesc of the FW memory context this
+                                        common context resides on
+@Input          psContextStateMemDesc   FW context state (context switch) MemDesc
+@Input          ui32CCBAllocSize        Size of the CCB for this context
+@Input          ui32Priority            Priority of the context
+@Input          psInfo                  Structure that contains extra info
+                                        required for the creation of the context
+                                        (elements might change from core to core)
+@Return			PVRSRV_OK if the context was successfully created
+*/ /**************************************************************************/
+PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection,
+									 PVRSRV_DEVICE_NODE *psDeviceNode,
+									 RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor,
+									 RGXFWIF_DM eDM,
+									 DEVMEM_MEMDESC *psAllocatedMemDesc,
+									 IMG_UINT32 ui32AllocatedOffset,
+									 DEVMEM_MEMDESC *psFWMemContextMemDesc,
+									 DEVMEM_MEMDESC *psContextStateMemDesc,
+									 IMG_UINT32 ui32CCBAllocSize,
+									 IMG_UINT32 ui32Priority,
+									 RGX_COMMON_CONTEXT_INFO *psInfo,
+									 RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext);
+
+									 
+
+void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+RGXFWIF_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+                                                               IMG_UINT32 *pui32LastResetJobRef);
+
+/*!
+******************************************************************************
+
+ @Function	RGXScheduleProcessQueuesKM
+
+ @Description - Software command complete handler
+				(sends uncounted kicks for all the DMs through the MISR)
+
+ @Input hCmdCompHandle - RGX device node
+
+******************************************************************************/
+IMG_IMPORT
+void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle);
+
+/*!
+******************************************************************************
+
+ @Function	RGXInstallProcessQueuesMISR
+
+ @Description - Installs the MISR to handle Process Queues operations
+
+ @Input phMISR - Pointer to the MISR handler
+
+ @Input psDeviceNode - RGX Device node
+
+******************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*************************************************************************/ /*!
+@Function       RGXSendCommandWithPowLock
+
+@Description    Sends a command to a particular DM without honouring
+				pending cache operations but taking the power lock.
+
+@Input          psDevInfo			Device Info
+@Input          eDM				To which DM the cmd is sent.
+@Input          psKCCBCmd			The cmd to send.
+@Input          ui32CmdSize			The cmd size.
+@Input          ui32PDumpFlags			Pdump flags
+
+@Return			PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSendCommandWithPowLock(PVRSRV_RGXDEV_INFO 	*psDevInfo,
+										RGXFWIF_DM			eKCCBType,
+									 	RGXFWIF_KCCB_CMD	*psKCCBCmd,
+									 	IMG_UINT32			ui32CmdSize,
+									 	IMG_UINT32			ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function       RGXSendCommand
+
+@Description    Sends a command to a particular DM without honouring
+				pending cache operations or the power lock. 
+                The function flushes any deferred KCCB commands first.
+
+@Input          psDevInfo			Device Info
+@Input          eDM				To which DM the cmd is sent.
+@Input          psKCCBCmd			The cmd to send.
+@Input          ui32CmdSize			The cmd size.
+@Input          uiPdumpFlags			PDump flags.
+
+@Return			PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSendCommand(PVRSRV_RGXDEV_INFO 	*psDevInfo,
+								 RGXFWIF_DM		eKCCBType,
+								 RGXFWIF_KCCB_CMD	*psKCCBCmd,
+								 IMG_UINT32		ui32CmdSize,
+								 PDUMP_FLAGS_T		uiPdumpFlags);
+
+
+/*************************************************************************/ /*!
+@Function       RGXScheduleCommand
+
+@Description    Sends a command to a particular DM
+
+@Input          psDevInfo			Device Info
+@Input          eDM				To which DM the cmd is sent.
+@Input          psKCCBCmd			The cmd to send.
+@Input          ui32CmdSize			The cmd size.
+@Input          ui32CacheOpFence		Pending cache op. fence value.
+@Input          ui32PDumpFlags			PDump flags
+
+@Return			PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXScheduleCommand(PVRSRV_RGXDEV_INFO 	*psDevInfo,
+								RGXFWIF_DM		eKCCBType,
+								RGXFWIF_KCCB_CMD	*psKCCBCmd,
+								IMG_UINT32		ui32CmdSize,
+								IMG_UINT32		ui32CacheOpFence,
+								IMG_UINT32 		ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function       RGXScheduleCommandAndWait
+
+@Description    Schedules the command with RGXScheduleCommand and then waits 
+				for the FW to update a sync. The sync must be piggy backed on
+				the cmd, either by passing a sync cmd or a cmd that contains the
+				sync which the FW will eventually update. The sync is created in
+				the function, therefore the function provides a FWAddr and 
+				UpdateValue for that cmd.
+
+@Input          psDevInfo			Device Info
+@Input          eDM				To which DM the cmd is sent.
+@Input          psKCCBCmd			The cmd to send.
+@Input          ui32CmdSize			The cmd size.
+@Input          puiSyncObjFWAddr	Pointer to the location with the FWAddr of 
+									the sync.
+@Input          puiUpdateValue		Pointer to the location with the update 
+									value of the sync.
+@Input          ui32PDumpFlags		PDump flags
+
+@Return			PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXScheduleCommandAndWait(PVRSRV_RGXDEV_INFO 	*psDevInfo,
+									   RGXFWIF_DM			eDM,
+									   RGXFWIF_KCCB_CMD		*psKCCBCmd,
+									   IMG_UINT32			ui32CmdSize,
+									   IMG_UINT32			*puiSyncObjDevVAddr,
+									   IMG_UINT32			*puiUpdateValue,
+									   PVRSRV_CLIENT_SYNC_PRIM 	*psSyncPrim,
+									   IMG_UINT32			ui32PDumpFlags);
+
+PVRSRV_ERROR RGXFirmwareUnittests(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+
+/*! ***********************************************************************//**
+@brief          Copy framework command into FW addressable buffer
+
+@param          psFWFrameworkMemDesc
+@param          pbyGPUFRegisterList
+@param          ui32FrameworkRegisterSize
+
+@returns        PVRSRV_ERROR 
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(DEVMEM_MEMDESC	*psFWFrameworkMemDesc,
+										   IMG_PBYTE		pbyGPUFRegisterList,
+										   IMG_UINT32		ui32FrameworkRegisterSize);
+
+
+/*! ***********************************************************************//**
+@brief          Create FW addressable buffer for framework
+
+@param          psDeviceNode
+@param          ppsFWFrameworkMemDesc
+@param          ui32FrameworkRegisterSize
+
+@returns        PVRSRV_ERROR 
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE * psDeviceNode,
+										DEVMEM_MEMDESC     ** ppsFWFrameworkMemDesc,
+										IMG_UINT32         ui32FrameworkRegisterSize);
+
+/*************************************************************************/ /*!
+@Function       RGXWaitForFWOp
+
+@Description    Send a sync command and wait to be signalled.
+
+@Input          psDevInfo			Device Info
+@Input          eDM				To which DM the cmd is sent.
+@Input          ui32PDumpFlags			PDump flags
+
+@Return			void
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXWaitForFWOp(PVRSRV_RGXDEV_INFO	*psDevInfo,
+									RGXFWIF_DM	eDM,
+									PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+									IMG_UINT32	ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function       RGXStateFlagCtrl
+
+@Description    Set and return FW internal state flags.
+
+@Input          psDevInfo       Device Info
+@Input          ui32Config      AppHint config flags
+@Output         pui32State      Current AppHint state flag configuration
+@Input          bSetNotClear    Set or clear the provided config flags
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo,
+				IMG_UINT32 ui32Config,
+				IMG_UINT32 *pui32State,
+				IMG_BOOL bSetNotClear);
+
+/*!
+******************************************************************************
+
+ @Function	RGXFWRequestCommonContextCleanUp
+
+ @Description Schedules a FW common context cleanup. The firmware will doesn't
+              block waiting for the resource to become idle but rather notifies
+              the host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psFWContext - firmware address of the context to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+ @Input ui32PDumpFlags - PDump continuous flag
+
+******************************************************************************/
+PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+											  RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+											  PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+											  RGXFWIF_DM eDM,
+											  IMG_UINT32 ui32PDumpFlags);
+
+/*!
+******************************************************************************
+
+ @Function	RGXFWRequestHWRTDataCleanUp
+
+ @Description Schedules a FW HWRTData memory cleanup. The firmware will doesn't
+              block waiting for the resource to become idle but rather notifies
+              the host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psHWRTData - firmware address of the HWRTData to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+										 PRGXFWIF_HWRTDATA psHWRTData,
+										 PVRSRV_CLIENT_SYNC_PRIM *psSync,
+										 RGXFWIF_DM eDM);
+
+PVRSRV_ERROR RGXFWRequestRayFrameDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+											 PRGXFWIF_RAY_FRAME_DATA psHWFrameData,
+											 PVRSRV_CLIENT_SYNC_PRIM *psSync,
+											 RGXFWIF_DM eDM);
+
+/*!
+******************************************************************************
+
+ @Function	RGXFWRequestRPMFreeListCleanUp
+
+ @Description Schedules a FW RPM FreeList cleanup. The firmware will doesn't block
+              waiting for the resource to become idle but rather notifies the
+              host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psFWRPMFreeList - firmware address of the RPM freelist to be cleaned up
+
+ @Input psSync - Sync object associated with cleanup
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWRequestRPMFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+											PRGXFWIF_RPM_FREELIST psFWRPMFreeList,
+											PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+
+/*!
+******************************************************************************
+
+ @Function	RGXFWRequestFreeListCleanUp
+
+ @Description Schedules a FW FreeList cleanup. The firmware will doesn't block
+              waiting for the resource to become idle but rather notifies the
+              host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psHWRTData - firmware address of the HWRTData to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDeviceNode,
+										 PRGXFWIF_FREELIST psFWFreeList,
+										 PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+/*!
+******************************************************************************
+
+ @Function	RGXFWRequestZSBufferCleanUp
+
+ @Description Schedules a FW ZS Buffer cleanup. The firmware will doesn't block
+              waiting for the resource to become idle but rather notifies the
+              host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psFWZSBuffer - firmware address of the ZS Buffer to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+ ******************************************************************************/
+
+PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+										 PRGXFWIF_ZSBUFFER psFWZSBuffer,
+										 PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext,
+								CONNECTION_DATA *psConnection,
+								PVRSRV_RGXDEV_INFO *psDevInfo,
+								IMG_UINT32 ui32Priority,
+								RGXFWIF_DM eDM);
+
+/*!
+******************************************************************************
+
+ @Function				RGXFWSetHCSDeadline
+
+ @Description			Requests the Firmware to set a new Hard Context
+						Switch timeout deadline. Context switches that
+						surpass that deadline cause the system to kill
+						the currently running workloads.
+
+ @Input psDeviceNode	pointer to device node
+
+ @Input ui32HCSDeadlineMs	The deadline in milliseconds.
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo,
+								IMG_UINT32 ui32HCSDeadlineMs);
+
+/*!
+******************************************************************************
+
+ @Function				RGXFWChangeOSidPriority
+
+ @Description			Requests the Firmware to change the priority of an
+						operating system. Higher priority number equals
+						higher priority on the scheduling system.
+
+ @Input psDeviceNode	pointer to device node
+
+ @Input ui32OSid		The OSid whose priority is to be altered
+
+ @Input ui32Priority	The new priority number for the specified OSid
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo,
+									 IMG_UINT32 ui32OSid,
+									 IMG_UINT32 ui32Priority);
+
+/*!
+****************************************************************************
+
+ @Function				RGXFWSetOSIsolationThreshold
+
+ @Description			Requests the Firmware to change the priority
+						threshold of the OS Isolation group. Any OS with a
+						priority higher or equal than the threshold is
+						considered to be belonging to the isolation group.
+
+ @Input psDeviceNode	pointer to device node
+
+ @Input ui32IsolationPriorityThreshold	The new priority threshold
+ ***************************************************************************/
+PVRSRV_ERROR RGXFWSetOSIsolationThreshold(PVRSRV_RGXDEV_INFO *psDevInfo,
+								IMG_UINT32 ui32IsolationPriorityThreshold);
+
+/*!
+****************************************************************************
+
+ @Function              RGXFWOSConfig
+
+ @Description           Sends the OS Config structure to the FW to complete
+                        the initialization process. The FW will then set all
+                        the OS specific parameters for that DDK
+
+ @Input psDeviceNode    pointer to device node
+ ***************************************************************************/
+PVRSRV_ERROR RGXFWOSConfig(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+****************************************************************************
+
+ @Function				RGXFWSetVMOnlineState
+
+ @Description			Requests the Firmware to change the guest OS Online
+						states. This should be initiated by the VMM when a
+						guest VM comes online or goes offline. If offline,
+						the FW offloads any current resource from that OSID.
+						The request is repeated until the FW has had time to
+						free all the resources or has waited for workloads
+						to finish.
+
+ @Input psDeviceNode	pointer to device node
+
+ @Input ui32OSid		The Guest OSid whose state is being altered
+
+ @Input eOSOnlineState	The new state (Online or Offline)
+ ***************************************************************************/
+PVRSRV_ERROR RGXFWSetVMOnlineState(PVRSRV_RGXDEV_INFO *psDevInfo,
+								IMG_UINT32 ui32OSid,
+								RGXFWIF_OS_STATE_CHANGE eOSOnlineState);
+/*!
+******************************************************************************
+
+ @Function	RGXReadMETAAddr
+
+ @Description Reads a value at given address in META memory space
+              (it can be either a memory location or a META register)
+
+ @Input psDevInfo - pointer to device info
+
+ @Input ui32METAAddr - address in META memory space
+
+ @Output pui32Value - value
+
+ ******************************************************************************/
+
+PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO	*psDevInfo,
+                             IMG_UINT32 ui32METAAddr,
+                             IMG_UINT32 *pui32Value);
+
+/*!
+******************************************************************************
+
+ @Function	RGXWriteMETAAddr
+
+ @Description Write a value to the given address in META memory space
+              (it can be either a memory location or a META register)
+
+ @Input psDevInfo - pointer to device info
+
+ @Input ui32METAAddr - address in META memory space
+
+ @Input ui32Value    - Value to write to address in META memory space
+
+ ******************************************************************************/
+
+PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo,
+                              IMG_UINT32 ui32METAAddr,
+                              IMG_UINT32 ui32Value);
+
+/*!
+******************************************************************************
+
+ @Function	RGXCheckFirmwareCCB
+
+ @Description Processes all commands that are found in the Firmware CCB.
+
+ @Input psDevInfo - pointer to device
+
+ ******************************************************************************/
+void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+******************************************************************************
+
+ @Function	   RGXUpdateHealthStatus
+
+ @Description  Tests a number of conditions which might indicate a fatal error has
+               occurred in the firmware. The result is stored in the device node
+               eheathStatus.
+
+ @Input        psDevNode              Pointer to device node structure.
+ @Input        bCheckAfterTimePassed  When TRUE, the function will also test for
+                                      firmware queues and polls not changing
+                                      since the previous test.
+                                      
+                                      Note: if not enough time has passed since
+                                      the last call, false positives may occur.
+
+ @returns      PVRSRV_ERROR 
+ ******************************************************************************/
+PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode,
+                                   IMG_BOOL bCheckAfterTimePassed);
+
+
+PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM);
+
+void DumpStalledFWCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile);
+
+/*!
+******************************************************************************
+
+ @Function	   AttachKickResourcesCleanupCtls
+
+ @Description  Attaches the cleanup structures to a kick command so that
+               submission reference counting can be performed when the
+               firmware processes the command
+
+ @Output        apsCleanupCtl          Array of CleanupCtl structure pointers to populate.
+ @Output        pui32NumCleanupCtl     Number of CleanupCtl structure pointers written out.
+ @Input         eDM                    Which data master is the subject of the command.
+ @Input         bKick                  TRUE if the client originally wanted to kick this DM.
+ @Input         psRTDataCleanup        Optional RTData cleanup associated with the command.
+ @Input         psZBuffer              Optional ZBuffer associated with the command.
+ @Input         psSBuffer              Optional SBuffer associated with the command.
+ ******************************************************************************/
+void AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl,
+									IMG_UINT32 *pui32NumCleanupCtl,
+									RGXFWIF_DM eDM,
+									IMG_BOOL bKick,
+									RGX_RTDATA_CLEANUP_DATA        *psRTDataCleanup,
+									RGX_ZSBUFFER_DATA              *psZBuffer,
+									RGX_ZSBUFFER_DATA              *psSBuffer,
+									RGX_ZSBUFFER_DATA              *psMSAAScratchBuffer);
+
+/*!
+******************************************************************************
+
+ @Function			RGXResetHWRLogs
+
+ @Description 		Resets the HWR Logs buffer (the hardware recovery count is not reset)
+
+ @Input 			psDevInfo	Pointer to the device
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                	error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode);
+
+
+/*!
+******************************************************************************
+
+ @Function			RGXGetPhyAddr
+
+ @Description 		Get the physical address of a certain PMR at a certain offset within it
+
+ @Input 			psPMR	    PMR of the allocation
+
+ @Input 			ui32LogicalOffset	    Logical offset
+
+ @Output			psPhyAddr	    Physical address of the allocation
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR,
+						   IMG_DEV_PHYADDR *psPhyAddr,
+						   IMG_UINT32 ui32LogicalOffset,
+						   IMG_UINT32 ui32Log2PageSize,
+						   IMG_UINT32 ui32NumOfPages,
+						   IMG_BOOL *bValid);
+
+#if defined(PDUMP)
+/*!
+******************************************************************************
+
+ @Function                      RGXPdumpDrainKCCB
+
+ @Description                   Wait for the firmware to execute all the commands in the kCCB
+
+ @Input                         psDevInfo	Pointer to the device
+
+ @Input                         ui32WriteOffset	  Woff we have to POL for the Roff to be equal to
+
+ @Return                        PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32WriteOffset);
+#endif /* PDUMP */
+
+/*!
+******************************************************************************
+
+ @Function			RGXVzCreateFWKernelMemoryContext
+
+ @Description 		Performs additional firmware memory context creation
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXVzCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+******************************************************************************
+
+ @Function			RGXVzDestroyFWKernelMemoryContext
+
+ @Description 		Performs additional firmware memory context destruction
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXVzDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+******************************************************************************
+
+ @Function			RGXVzRegisterFirmwarePhysHeap
+
+ @Description 		Register and maps to device, a guest firmware physheap
+ 
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ *****************************************************************************/
+PVRSRV_ERROR RGXVzRegisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+										   IMG_UINT32 ui32OSID,
+										   IMG_DEV_PHYADDR sDevPAddr,
+										   IMG_UINT64 ui64DevPSize);
+
+/*!
+******************************************************************************
+
+ @Function			RGXVzDeregisterFirmwarePhysHeap
+
+ @Description 		Unregister and unmap from device, a guest firmware physheap
+ 
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ *****************************************************************************/
+PVRSRV_ERROR RGXVzUnregisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+											 IMG_UINT32 ui32OSID);
+
+#endif /* __RGXFWUTILS_H__ */
+/******************************************************************************
+ End of file (rgxfwutils.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxheapconfig.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxheapconfig.h
new file mode 100644
index 0000000..93f8dd1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxheapconfig.h
@@ -0,0 +1,186 @@
+/*************************************************************************/ /*!
+@File
+@Title          device configuration
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Memory heaps device specific configuration
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGXHEAPCONFIG_H__
+#define __RGXHEAPCONFIG_H__
+
+#include "rgxdefs_km.h"
+
+/*
+	RGX Device Virtual Address Space Definitions
+	NOTES:
+		Base addresses have to be a multiple of 4MiB
+
+		RGX_PDSCODEDATA_HEAP_BASE and RGX_USCCODE_HEAP_BASE will be programmed,
+		on a global basis, into RGX_CR_PDS_EXEC_BASE and RGX_CR_USC_CODE_BASE_*
+		respectively. Therefore if clients use multiple configs they must still
+		be consistent with their definitions for these heaps.
+
+		Shared virtual memory (GENERAL_SVM) support requires half of the address
+		space be reserved for SVM allocations unless BRN fixes are required in
+		which case the SVM heap is disabled. This is reflected in the device
+		connection capability bits returned to userspace.
+
+		Variable page-size heap (GENERAL_NON4K) support reserves 64GiB from the
+		available 4K page-size heap (GENERAL) space. The actual heap page-size
+		defaults to 16K; AppHint PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE
+		can be used to forced it to these values: 4K,64K,256K,1M,2M.
+*/
+
+	/* Start at 4 MiB Size of 512 GiB less 4 MiB (managed by OS/Services) */
+	#define RGX_GENERAL_SVM_HEAP_BASE			IMG_UINT64_C(0x0000400000)
+	#define RGX_GENERAL_SVM_HEAP_SIZE			IMG_UINT64_C(0x7FFFC00000)
+
+	/* Start at 512GiB. Size of 256 GiB */
+	#define RGX_GENERAL_HEAP_BASE				IMG_UINT64_C(0x8000000000)
+	#define RGX_GENERAL_HEAP_SIZE				IMG_UINT64_C(0x4000000000)
+
+	/* HWBRN65273 workaround requires General Heap to use a unique single 1GB PCE entry. */
+	#define RGX_GENERAL_BRN_65273_HEAP_BASE		IMG_UINT64_C(0x65C0000000)
+	#define RGX_GENERAL_BRN_65273_HEAP_SIZE		IMG_UINT64_C(0x0080000000)
+
+	/* Start at 768GiB. Size of 64 GiB */
+	#define RGX_GENERAL_NON4K_HEAP_BASE			IMG_UINT64_C(0xC000000000)
+	#define RGX_GENERAL_NON4K_HEAP_SIZE			IMG_UINT64_C(0x1000000000)
+
+	/* HWBRN65273 workaround requires Non4K memory to use a unique single 1GB PCE entry. */
+	#define RGX_GENERAL_NON4K_BRN_65273_HEAP_BASE	IMG_UINT64_C(0x73C0000000)
+	#define RGX_GENERAL_NON4K_BRN_65273_HEAP_SIZE	IMG_UINT64_C(0x0080000000)
+
+	/* Start at 832 GiB. Size of 32 GiB */
+	#define RGX_BIF_TILING_NUM_HEAPS			4
+	#define RGX_BIF_TILING_HEAP_SIZE			IMG_UINT64_C(0x0200000000)
+	#define RGX_BIF_TILING_HEAP_1_BASE			IMG_UINT64_C(0xD000000000)
+	#define RGX_BIF_TILING_HEAP_2_BASE			(RGX_BIF_TILING_HEAP_1_BASE + RGX_BIF_TILING_HEAP_SIZE)
+	#define RGX_BIF_TILING_HEAP_3_BASE			(RGX_BIF_TILING_HEAP_2_BASE + RGX_BIF_TILING_HEAP_SIZE)
+	#define RGX_BIF_TILING_HEAP_4_BASE			(RGX_BIF_TILING_HEAP_3_BASE + RGX_BIF_TILING_HEAP_SIZE)
+
+	/* HWBRN52402 workaround requires PDS memory to be below 16GB. Start at 8GB. Size of 4GB. */
+	#define RGX_PDSCODEDATA_BRN_52402_HEAP_BASE	IMG_UINT64_C(0x0200000000)
+	#define RGX_PDSCODEDATA_BRN_52402_HEAP_SIZE	IMG_UINT64_C(0x0100000000)
+
+	/* Start at 872 GiB. Size of 4 GiB */
+	#define RGX_PDSCODEDATA_HEAP_BASE			IMG_UINT64_C(0xDA00000000)
+    #define RGX_PDSCODEDATA_HEAP_SIZE			IMG_UINT64_C(0x0100000000)
+
+	/* HWBRN65273 workaround requires PDS memory to use a unique single 1GB PCE entry. */
+	#define RGX_PDSCODEDATA_BRN_65273_HEAP_BASE	IMG_UINT64_C(0xA800000000)
+    #define RGX_PDSCODEDATA_BRN_65273_HEAP_SIZE	IMG_UINT64_C(0x0040000000)
+
+	/* HWBRN63142 workaround requires Region Header memory to be at the top
+	   of a 16GB aligned range. This is so when masked with 0x03FFFFFFFF the
+	   address will avoid aliasing PB addresses. Start at 879.75GB. Size of 256MB. */
+	#define RGX_RGNHDR_BRN_63142_HEAP_BASE		IMG_UINT64_C(0xDBF0000000)
+	#define RGX_RGNHDR_BRN_63142_HEAP_SIZE		IMG_UINT64_C(0x0010000000)
+
+	/* Start at 880 GiB, Size of 1 MiB */
+	#define RGX_VISTEST_HEAP_BASE				IMG_UINT64_C(0xDC00000000)
+	#define RGX_VISTEST_HEAP_SIZE				IMG_UINT64_C(0x0000100000)
+
+	/* HWBRN65273 workaround requires VisTest memory to use a unique single 1GB PCE entry. */
+	#define RGX_VISTEST_BRN_65273_HEAP_BASE		IMG_UINT64_C(0xE400000000)
+	#define RGX_VISTEST_BRN_65273_HEAP_SIZE		IMG_UINT64_C(0x0000100000)
+
+	/* HWBRN52402 workaround requires PDS memory to be below 16GB. Start at 12GB. Size of 4GB. */
+	#define RGX_USCCODE_BRN_52402_HEAP_BASE		IMG_UINT64_C(0x0300000000)
+	#define RGX_USCCODE_BRN_52402_HEAP_SIZE		IMG_UINT64_C(0x0100000000)
+
+	/* Start at 896 GiB Size of 4 GiB */
+	#define RGX_USCCODE_HEAP_BASE				IMG_UINT64_C(0xE000000000)
+	#define RGX_USCCODE_HEAP_SIZE				IMG_UINT64_C(0x0100000000)
+
+	/* HWBRN65273 workaround requires USC memory to use a unique single 1GB PCE entry. */
+	#define RGX_USCCODE_BRN_65273_HEAP_BASE		IMG_UINT64_C(0xBA00000000)
+	#define RGX_USCCODE_BRN_65273_HEAP_SIZE		IMG_UINT64_C(0x0040000000)
+
+	/* Start at 903GiB. Size of 32MB per OSID (defined in rgxdefs_km.h)
+	   #define RGX_FIRMWARE_HEAP_BASE			IMG_UINT64_C(0xE1C0000000)
+	   #define RGX_FIRMWARE_HEAP_SIZE			(1<<RGX_FW_HEAP_SHIFT) 
+	   #define RGX_FIRMWARE_HEAP_SHIFT			RGX_FW_HEAP_SHIFT */
+
+	/* HWBRN52402 & HWBRN55091 workarounds requires TQ memory to be below 16GB and 16GB aligned. Start at 0GB. Size of 8GB. */
+	#define RGX_TQ3DPARAMETERS_BRN_52402_55091_HEAP_BASE		IMG_UINT64_C(0x0000000000)
+	#define RGX_TQ3DPARAMETERS_BRN_52402_55091_HEAP_SIZE		IMG_UINT64_C(0x0200000000)
+
+	/* HWBRN65273 workaround requires TQ memory to start at 0GB and use a unique single 1GB PCE entry. */
+	#define RGX_TQ3DPARAMETERS_BRN_65273_HEAP_BASE		IMG_UINT64_C(0x0000000000)
+	#define RGX_TQ3DPARAMETERS_BRN_65273_HEAP_SIZE		IMG_UINT64_C(0x0040000000)
+
+	/* Start at 912GiB. Size of 16 GiB. 16GB aligned to match RGX_CR_ISP_PIXEL_BASE */
+	#define RGX_TQ3DPARAMETERS_HEAP_BASE		IMG_UINT64_C(0xE400000000)
+	#define RGX_TQ3DPARAMETERS_HEAP_SIZE		IMG_UINT64_C(0x0400000000)
+
+	/* Size of 16 * 4 KB (think about large page systems) */
+  	#define RGX_HWBRN37200_HEAP_BASE			IMG_UINT64_C(0xFFFFF00000)
+   	#define RGX_HWBRN37200_HEAP_SIZE			IMG_UINT64_C(0x0000100000)
+
+	/* Start at 928GiB. Size of 4 GiB */
+	#define RGX_DOPPLER_HEAP_BASE				IMG_UINT64_C(0xE800000000)
+	#define RGX_DOPPLER_HEAP_SIZE				IMG_UINT64_C(0x0100000000)
+
+	/* Start at 932GiB. Size of 4 GiB */
+	#define RGX_DOPPLER_OVERFLOW_HEAP_BASE		IMG_UINT64_C(0xE900000000)
+	#define RGX_DOPPLER_OVERFLOW_HEAP_SIZE		IMG_UINT64_C(0x0100000000)
+
+	/* Start at 936GiB. Two groups of 128 KBytes that must follow each other in this order. */
+	#define RGX_SERVICES_SIGNALS_HEAP_BASE		IMG_UINT64_C(0xEA00000000)
+	#define RGX_SERVICES_SIGNALS_HEAP_SIZE		IMG_UINT64_C(0x0000020000)
+
+	#define RGX_SIGNALS_HEAP_BASE				IMG_UINT64_C(0xEA00020000)
+	#define RGX_SIGNALS_HEAP_SIZE				IMG_UINT64_C(0x0000020000)
+
+	/* TDM TPU YUV coeffs - can be reduced to a single page */
+	#define RGX_TDM_TPU_YUV_COEFFS_HEAP_BASE	IMG_UINT64_C(0xEA00080000)
+	#define RGX_TDM_TPU_YUV_COEFFS_HEAP_SIZE	IMG_UINT64_C(0x0000040000)
+
+	/* HWBRN65273 workaround requires two Region Header buffers 4GB apart. */
+	#define RGX_MMU_INIA_BRN_65273_HEAP_BASE	IMG_UINT64_C(0xF800000000)
+	#define RGX_MMU_INIA_BRN_65273_HEAP_SIZE	IMG_UINT64_C(0x0040000000)
+	#define RGX_MMU_INIB_BRN_65273_HEAP_BASE	IMG_UINT64_C(0xF900000000)
+	#define RGX_MMU_INIB_BRN_65273_HEAP_SIZE	IMG_UINT64_C(0x0040000000)
+
+#endif /* __RGXHEAPCONFIG_H__ */
+
+/*****************************************************************************
+ End of file (rgxheapconfig.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxhwperf.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxhwperf.c
new file mode 100644
index 0000000..30025d3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxhwperf.c
@@ -0,0 +1,3744 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HW Performance implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX HW Performance implementation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+
+#include "pvr_debug.h"
+#include "rgxdevice.h"
+#include "pvrsrv_error.h"
+#include "pvr_notifier.h"
+#include "osfunc.h"
+#include "allocmem.h"
+
+#include "pvrsrv.h"
+#include "pvrsrv_tlstreams.h"
+#include "pvrsrv_tlcommon.h"
+#include "tlclient.h"
+#include "tlstream.h"
+
+#include "rgxhwperf.h"
+#include "rgxapi_km.h"
+#include "rgxfwutils.h"
+#include "rgxtimecorr.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "pdump_km.h"
+#include "pvrsrv_apphint.h"
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+#include "pvr_gputrace.h"
+#endif
+
+/* This is defined by default to enable producer callbacks.
+ * Clients of the TL interface can disable the use of the callback
+ * with PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK. */
+#define SUPPORT_TL_PRODUCER_CALLBACK 1
+
+/* Maximum enum value to prevent access to RGX_HWPERF_STREAM_ID2_CLIENT stream */
+#define RGX_HWPERF_MAX_STREAM_ID (RGX_HWPERF_STREAM_ID2_CLIENT)
+
+/* Defines size of buffers returned from acquire/release calls */
+#define FW_STREAM_BUFFER_SIZE (0x80000)
+#define HOST_STREAM_BUFFER_SIZE (0x20000)
+
+/* Must be at least as large as two tl packets of maximum size */
+static_assert(HOST_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1),
+			  "HOST_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)");
+static_assert(FW_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1),
+			  "FW_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)");
+
+
+/*
+	RGXHWPerfCopyDataL1toL2
+*/
+static IMG_UINT32 RGXHWPerfCopyDataL1toL2(IMG_HANDLE hHWPerfStream,
+										  IMG_BYTE   *pbFwBuffer,
+										  IMG_UINT32 ui32BytesExp)
+{
+  	IMG_BYTE 	 *pbL2Buffer;
+	IMG_UINT32   ui32L2BufFree;
+	IMG_UINT32   ui32BytesCopied = 0;
+	IMG_UINT32   ui32BytesExpMin = RGX_HWPERF_GET_SIZE(RGX_HWPERF_GET_PACKET(pbFwBuffer));
+	PVRSRV_ERROR eError;
+
+/* HWPERF_MISR_FUNC_DEBUG enables debug code for investigating HWPerf issues */
+#ifdef HWPERF_MISR_FUNC_DEBUG
+	static IMG_UINT32 gui32Ordinal = IMG_UINT32_MAX;
+#endif
+
+	PVR_DPF_ENTERED;
+
+#ifdef HWPERF_MISR_FUNC_DEBUG
+	PVR_DPF((PVR_DBG_VERBOSE, "EVENTS to copy from 0x%p length:%05d",
+							  pbFwBuffer, ui32BytesExp));
+#endif
+
+#ifdef HWPERF_MISR_FUNC_DEBUG
+	{
+		/* Check the incoming buffer of data has not lost any packets */
+ 	 	IMG_BYTE *pbFwBufferIter = pbFwBuffer;
+ 	 	IMG_BYTE *pbFwBufferEnd = pbFwBuffer+ui32BytesExp;
+	 	do
+		{
+			RGX_HWPERF_V2_PACKET_HDR *asCurPos = RGX_HWPERF_GET_PACKET(pbFwBufferIter);
+			IMG_UINT32 ui32CurOrdinal = asCurPos->ui32Ordinal;
+			if (gui32Ordinal != IMG_UINT32_MAX)
+			{
+				if ((gui32Ordinal+1) != ui32CurOrdinal)
+				{
+					if (gui32Ordinal < ui32CurOrdinal)
+					{
+						PVR_DPF((PVR_DBG_WARNING,
+								 "HWPerf [%p] packets lost (%u packets) between ordinal %u...%u",
+								 pbFwBufferIter,
+								 ui32CurOrdinal - gui32Ordinal - 1,
+								 gui32Ordinal,
+								 ui32CurOrdinal));
+					}
+					else
+					{
+						PVR_DPF((PVR_DBG_WARNING,
+								 "HWPerf [%p] packet ordinal out of sequence last: %u, current: %u",
+								  pbFwBufferIter,
+								  gui32Ordinal,
+								  ui32CurOrdinal));
+					}
+				}
+			}
+			gui32Ordinal = asCurPos->ui32Ordinal;
+			pbFwBufferIter += RGX_HWPERF_GET_SIZE(asCurPos);
+		} while( pbFwBufferIter < pbFwBufferEnd );
+	}
+#endif
+
+	/* Try submitting all data in one TL packet. */
+	eError = TLStreamReserve2( hHWPerfStream,
+							   &pbL2Buffer,
+							   (size_t)ui32BytesExp, ui32BytesExpMin,
+							   &ui32L2BufFree);
+	if ( eError == PVRSRV_OK )
+	{
+		OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)ui32BytesExp );
+		eError = TLStreamCommit(hHWPerfStream, (size_t)ui32BytesExp);
+		if ( eError != PVRSRV_OK )
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer",
+					 eError, __func__));
+			goto e0;
+		}
+		/* Data were successfully written */
+		ui32BytesCopied = ui32BytesExp;
+	}
+	else if (eError == PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG)
+	{
+		/* There was not enough space for all data, copy as much as possible */
+		IMG_UINT32                sizeSum  = 0;
+		RGX_PHWPERF_V2_PACKET_HDR psCurPkt = RGX_HWPERF_GET_PACKET(pbFwBuffer);
+
+		PVR_DPF((PVR_DBG_MESSAGE, "Unable to reserve space (%d) in host buffer on first attempt, remaining free space: %d", ui32BytesExp, ui32L2BufFree));
+
+		/* Traverse the array to find how many packets will fit in the available space. */
+		while ( sizeSum < ui32BytesExp  &&
+				sizeSum + RGX_HWPERF_GET_SIZE(psCurPkt) < ui32L2BufFree )
+		{
+			sizeSum += RGX_HWPERF_GET_SIZE(psCurPkt);
+			psCurPkt = RGX_HWPERF_GET_NEXT_PACKET(psCurPkt);
+		}
+
+		if ( 0 != sizeSum )
+		{
+			eError = TLStreamReserve( hHWPerfStream, &pbL2Buffer, (size_t)sizeSum);
+
+			if ( eError == PVRSRV_OK )
+			{
+				OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)sizeSum );
+				eError = TLStreamCommit(hHWPerfStream, (size_t)sizeSum);
+				if ( eError != PVRSRV_OK )
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							 "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer",
+							 eError, __func__));
+					goto e0;
+				}
+				/* sizeSum bytes of hwperf packets have been successfully written */
+				ui32BytesCopied = sizeSum;
+			}
+			else if ( PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG == eError )
+			{
+				PVR_DPF((PVR_DBG_WARNING, "Cannot write HWPerf packet into host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree));
+			}
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "Cannot find space in host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree));
+		}
+	}
+	if ( PVRSRV_OK != eError && /* Some other error occurred */
+	     PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG != eError ) /* Full error handled by caller, we returning the copied bytes count to caller*/
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "HWPerf enabled: Unexpected Error ( %d ) while copying FW buffer to TL buffer.",
+				 eError));
+	}
+
+e0:
+	/* Return the remaining packets left to be transported. */
+	PVR_DPF_RETURN_VAL(ui32BytesCopied);
+}
+
+
+static INLINE IMG_UINT32 RGXHWPerfAdvanceRIdx(
+		const IMG_UINT32 ui32BufSize,
+		const IMG_UINT32 ui32Pos,
+		const IMG_UINT32 ui32Size)
+{
+	return ( ui32Pos + ui32Size < ui32BufSize ? ui32Pos + ui32Size : 0 );
+}
+
+
+/*
+	RGXHWPerfDataStore
+*/
+static IMG_UINT32 RGXHWPerfDataStore(PVRSRV_RGXDEV_INFO	*psDevInfo)
+{
+	RGXFWIF_TRACEBUF	*psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+	IMG_BYTE*			psHwPerfInfo = psDevInfo->psRGXFWIfHWPerfBuf;
+	IMG_UINT32			ui32SrcRIdx, ui32SrcWIdx, ui32SrcWrapCount;
+	IMG_UINT32			ui32BytesExp = 0, ui32BytesCopied = 0, ui32BytesCopiedSum = 0;
+#ifdef HWPERF_MISR_FUNC_DEBUG
+	IMG_UINT32			ui32BytesExpSum = 0;
+#endif
+
+	PVR_DPF_ENTERED;
+
+	/* Caller should check this member is valid before calling */
+	PVR_ASSERT(psDevInfo->hHWPerfStream);
+
+ 	/* Get a copy of the current
+	 *   read (first packet to read)
+	 *   write (empty location for the next write to be inserted)
+	 *   WrapCount (size in bytes of the buffer at or past end)
+	 * indexes of the FW buffer */
+	ui32SrcRIdx = psRGXFWIfTraceBufCtl->ui32HWPerfRIdx;
+	ui32SrcWIdx = psRGXFWIfTraceBufCtl->ui32HWPerfWIdx;
+	OSMemoryBarrier();
+	ui32SrcWrapCount = psRGXFWIfTraceBufCtl->ui32HWPerfWrapCount;
+
+	/* Is there any data in the buffer not yet retrieved? */
+	if ( ui32SrcRIdx != ui32SrcWIdx )
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStore EVENTS found srcRIdx:%d srcWIdx: %d ", ui32SrcRIdx, ui32SrcWIdx));
+
+		/* Is the write position higher than the read position? */
+		if ( ui32SrcWIdx > ui32SrcRIdx )
+		{
+			/* Yes, buffer has not wrapped */
+			ui32BytesExp = ui32SrcWIdx - ui32SrcRIdx;
+#ifdef HWPERF_MISR_FUNC_DEBUG
+			ui32BytesExpSum += ui32BytesExp;
+#endif
+			ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo->hHWPerfStream,
+													  psHwPerfInfo + ui32SrcRIdx,
+													  ui32BytesExp);
+
+			/* Advance the read index and the free bytes counter by the number
+			 * of bytes transported. Items will be left in buffer if not all data
+			 * could be transported. Exit to allow buffer to drain. */
+			psRGXFWIfTraceBufCtl->ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx(
+					psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx,
+					ui32BytesCopied);
+
+			ui32BytesCopiedSum += ui32BytesCopied;
+		}
+		/* No, buffer has wrapped and write position is behind read position */
+		else
+		{
+			/* Byte count equal to
+			 *     number of bytes from read position to the end of the buffer,
+			 *   + data in the extra space in the end of the buffer. */
+			ui32BytesExp = ui32SrcWrapCount - ui32SrcRIdx;
+
+#ifdef HWPERF_MISR_FUNC_DEBUG
+			ui32BytesExpSum += ui32BytesExp;
+#endif
+			/* Attempt to transfer the packets to the TL stream buffer */
+			ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo->hHWPerfStream,
+													  psHwPerfInfo + ui32SrcRIdx,
+													  ui32BytesExp);
+
+			/* Advance read index as before and Update the local copy of the
+			 * read index as it might be used in the last if branch*/
+			ui32SrcRIdx = RGXHWPerfAdvanceRIdx(
+					psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx,
+					ui32BytesCopied);
+
+			/* Update Wrap Count */
+			if ( ui32SrcRIdx == 0)
+			{
+				psRGXFWIfTraceBufCtl->ui32HWPerfWrapCount = psDevInfo->ui32RGXFWIfHWPerfBufSize;
+			}
+			psRGXFWIfTraceBufCtl->ui32HWPerfRIdx = ui32SrcRIdx;
+
+			ui32BytesCopiedSum += ui32BytesCopied;
+
+			/* If all the data in the end of the array was copied, try copying
+			 * wrapped data in the beginning of the array, assuming there is
+			 * any and the RIdx was wrapped. */
+			if (   (ui32BytesCopied == ui32BytesExp)
+			    && (ui32SrcWIdx > 0)
+				&& (ui32SrcRIdx == 0) )
+			{
+				ui32BytesExp = ui32SrcWIdx;
+#ifdef HWPERF_MISR_FUNC_DEBUG
+				ui32BytesExpSum += ui32BytesExp;
+#endif
+				ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo->hHWPerfStream,
+														  psHwPerfInfo,
+														  ui32BytesExp);
+				/* Advance the FW buffer read position. */
+				psRGXFWIfTraceBufCtl->ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx(
+						psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx,
+						ui32BytesCopied);
+
+				ui32BytesCopiedSum += ui32BytesCopied;
+			}
+		}
+#ifdef HWPERF_MISR_FUNC_DEBUG
+		if (ui32BytesCopiedSum != ui32BytesExpSum)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfDataStore: FW L1 RIdx:%u. Not all bytes copied to L2: %u bytes out of %u expected", psRGXFWIfTraceBufCtl->ui32HWPerfRIdx, ui32BytesCopiedSum, ui32BytesExpSum));
+		}
+#endif
+
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfDataStore NO EVENTS to transport"));
+	}
+
+	PVR_DPF_RETURN_VAL(ui32BytesCopiedSum);
+}
+
+
+PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE *psDevInfo)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO* psRgxDevInfo;
+	IMG_UINT32          ui32BytesCopied;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psDevInfo);
+	psRgxDevInfo = psDevInfo->pvDevice;
+
+	/* Keep HWPerf resource init check and use of
+	 * resources atomic, they may not be freed during use
+	 */
+	OSLockAcquire(psRgxDevInfo->hHWPerfLock);
+
+	if (psRgxDevInfo->hHWPerfStream != 0)
+	{
+		ui32BytesCopied = RGXHWPerfDataStore(psRgxDevInfo);
+		if ( ui32BytesCopied )
+		{	/* Signal consumers that packets may be available to read when
+			 * running from a HW kick, not when called by client APP thread
+			 * via the transport layer CB as this can lead to stream
+			 * corruption.*/
+			eError = TLStreamSync(psRgxDevInfo->hHWPerfStream);
+			PVR_ASSERT(eError == PVRSRV_OK);
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStoreCB: Zero bytes copied"));
+			RGXDEBUG_PRINT_IRQ_COUNT(psRgxDevInfo);
+		}
+	}
+
+	OSLockRelease(psRgxDevInfo->hHWPerfLock);
+
+	PVR_DPF_RETURN_OK;
+}
+
+
+/* Currently supported by default */
+#if defined(SUPPORT_TL_PRODUCER_CALLBACK)
+static PVRSRV_ERROR RGXHWPerfTLCB(IMG_HANDLE hStream,
+		IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*)pvUser;
+
+	PVR_UNREFERENCED_PARAMETER(hStream);
+	PVR_UNREFERENCED_PARAMETER(ui32Resp);
+
+	PVR_ASSERT(psRgxDevInfo);
+
+	switch (ui32ReqOp)
+	{
+	case TL_SOURCECB_OP_CLIENT_EOS:
+		/* Keep HWPerf resource init check and use of
+		 * resources atomic, they may not be freed during use
+		 */
+
+		/* This solution is for avoiding a deadlock situation where -
+		 * in DoTLStreamReserve(), writer has acquired HWPerfLock and
+		 * ReadLock and is waiting on ReadPending (which will be reset
+		 * by reader), And
+		 * the reader after setting ReadPending in TLStreamAcquireReadPos(),
+		 * is waiting for HWPerfLock in RGXHWPerfTLCB().
+		 * So here in RGXHWPerfTLCB(), if HWPerfLock is already acquired we
+		 * will return to the reader without waiting to acquire HWPerfLock.
+		 */
+		if( !OSTryLockAcquire(psRgxDevInfo->hHWPerfLock))
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "hHWPerfLock is already acquired, a write "
+						  "operation might already be in process"));
+			return PVRSRV_OK;
+		}
+
+		if (psRgxDevInfo->hHWPerfStream != 0)
+		{
+			(void) RGXHWPerfDataStore(psRgxDevInfo);
+		}
+		OSLockRelease(psRgxDevInfo->hHWPerfLock);
+		break;
+
+	default:
+		break;
+	}
+
+	return eError;
+}
+#endif
+
+
+static void RGXHWPerfL1BufferDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+	if (psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc)
+	{
+		if (psRgxDevInfo->psRGXFWIfHWPerfBuf != NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc);
+			psRgxDevInfo->psRGXFWIfHWPerfBuf = NULL;
+		}
+		DevmemFwFree(psRgxDevInfo, psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc);
+		psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL;
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfInit
+
+@Description    Called during driver init for initialization of HWPerf module
+				in the Rogue device driver. This function keeps allocated
+				only the minimal necessary resources, which are required for
+				functioning of HWPerf server module.
+
+@Input          psRgxDevInfo	RGX Device Info
+
+@Return			PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+	PVRSRV_ERROR eError;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	PVR_DPF_ENTERED;
+
+	/* expecting a valid device info */
+	PVR_ASSERT(psRgxDevInfo);
+
+	/* Create a lock for HWPerf server module used for serializing, L1 to L2
+	 * copy calls (e.g. in case of TL producer callback) and L1, L2 resource
+	 * allocation */
+	eError = OSLockCreate(&psRgxDevInfo->hHWPerfLock, LOCK_TYPE_PASSIVE);
+	PVR_LOGR_IF_ERROR(eError, "OSLockCreate");
+
+	/* avoid uninitialised data */
+	psRgxDevInfo->hHWPerfStream = 0;
+	psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL;
+
+	PVR_DPF_RETURN_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfIsInitRequired
+
+@Description    Returns true if the HWperf firmware buffer (L1 buffer) and host
+                driver TL buffer (L2 buffer) are not already allocated. Caller
+                must possess hHWPerfLock lock before calling this
+                function so the state tested is not inconsistent.
+
+@Input          psRgxDevInfo RGX Device Info, on which init requirement is
+                checked.
+
+@Return         IMG_BOOL	Whether initialization (allocation) is required
+*/ /**************************************************************************/
+static INLINE IMG_BOOL RGXHWPerfIsInitRequired(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+	PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hHWPerfLock));
+
+#if !defined (NO_HARDWARE)
+	/* Both L1 and L2 buffers are required (for HWPerf functioning) on driver
+	 * built for actual hardware (TC, EMU, etc.)
+	 */
+	if (psRgxDevInfo->hHWPerfStream == 0)
+	{
+		/* The allocation API (RGXHWPerfInitOnDemandResources) allocates
+		 * device memory for both L1 and L2 without any checks. Hence,
+		 * either both should be allocated or both be NULL.
+		 *
+		 * In-case this changes in future (for e.g. a situation where one
+		 * of the 2 buffers is already allocated and other is required),
+		 * add required checks before allocation calls to avoid memory leaks.
+		 */
+		PVR_ASSERT(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL);
+		return IMG_TRUE;
+	}
+	PVR_ASSERT(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc != NULL);
+#else
+	/* On a NO-HW driver L2 is not allocated. So, no point in checking its
+	 * allocation */
+	if (psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL)
+	{
+		return IMG_TRUE;
+	}
+#endif
+	return IMG_FALSE;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfInitOnDemandResources
+
+@Description    This function allocates the HWperf firmware buffer (L1 buffer)
+                and host driver TL buffer (L2 buffer) if HWPerf is enabled at
+                driver load time. Otherwise, these buffers are allocated
+                on-demand as and when required. Caller
+                must possess hHWPerfLock lock before calling this
+                function so the state tested is not inconsistent if called
+                outside of driver initialisation.
+
+@Input          psRgxDevInfo RGX Device Info, on which init is done
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32L2BufferSize;
+	DEVMEM_FLAGS_T uiMemAllocFlags;
+	IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5]; /* 5 seems reasonable as it can hold
+																			  names up to "hwperf_9999", which is enough */
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	PVR_DPF_ENTERED;
+
+	/* Create the L1 HWPerf buffer on demand */
+	uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT)
+				| PVRSRV_MEMALLOCFLAG_GPU_READABLE
+				| PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE
+				| PVRSRV_MEMALLOCFLAG_CPU_READABLE
+				| PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE
+				| PVRSRV_MEMALLOCFLAG_UNCACHED
+				#if defined(PDUMP)
+				| PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC
+				#endif
+				;
+
+	/* Allocate HWPerf FW L1 buffer */
+	eError = DevmemFwAllocate(psRgxDevInfo,
+							  psRgxDevInfo->ui32RGXFWIfHWPerfBufSize+RGXFW_HWPERF_L1_PADDING_DEFAULT,
+							  uiMemAllocFlags,
+							  "FwHWPerfBuffer",
+							  &psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate kernel fw hwperf buffer (%u)",
+					__FUNCTION__, eError));
+		goto e0;
+	}
+
+	/* Expecting the RuntimeCfg structure is mapped into CPU virtual memory.
+	 * Also, make sure the FW address is not already set */
+	PVR_ASSERT(psRgxDevInfo->psRGXFWIfRuntimeCfg && psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf.ui32Addr == 0x0);
+
+	/* Meta cached flag removed from this allocation as it was found
+	 * FW performance was better without it. */
+	RGXSetFirmwareAddress(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf,
+						  psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc,
+						  0, RFW_FWADDR_NOREF_FLAG);
+
+	eError = DevmemAcquireCpuVirtAddr(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc,
+									  (void**)&psRgxDevInfo->psRGXFWIfHWPerfBuf);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire kernel hwperf buffer (%u)",
+					 __FUNCTION__, eError));
+		goto e0;
+	}
+
+	/* On NO-HW driver, there is no MISR installed to copy data from L1 to L2. Hence,
+	 * L2 buffer is not allocated */
+#if !defined(NO_HARDWARE)
+	/* Host L2 HWPERF buffer size in bytes must be bigger than the L1 buffer
+	 * accessed by the FW. The MISR may try to write one packet the size of the L1
+	 * buffer in some scenarios. When logging is enabled in the MISR, it can be seen
+	 * if the L2 buffer hits a full condition. The closer in size the L2 and L1 buffers
+	 * are the more chance of this happening.
+	 * Size chosen to allow MISR to write an L1 sized packet and for the client
+	 * application/daemon to drain a L1 sized packet e.g. ~ 1.5*L1.
+	 */
+	ui32L2BufferSize = psRgxDevInfo->ui32RGXFWIfHWPerfBufSize +
+	                       (psRgxDevInfo->ui32RGXFWIfHWPerfBufSize>>1);
+
+	/* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */
+	if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d",
+				   PVRSRV_TL_HWPERF_RGX_FW_STREAM,
+				   psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to form HWPerf stream name for device %d",
+		                        __FUNCTION__,
+								psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eError = TLStreamCreate(&psRgxDevInfo->hHWPerfStream,
+					psRgxDevInfo->psDeviceNode,
+					pszHWPerfStreamName,
+					ui32L2BufferSize,
+					TL_OPMODE_DROP_NEWER | TL_FLAG_NO_SIGNAL_ON_COMMIT,
+					NULL, NULL,
+#if !defined(SUPPORT_TL_PRODUCER_CALLBACK)
+					NULL, NULL
+#else
+					/* Not enabled by default */
+					RGXHWPerfTLCB, psRgxDevInfo
+#endif
+					);
+	PVR_LOGG_IF_ERROR(eError, "TLStreamCreate", e1);
+
+	eError = TLStreamSetNotifStream(psRgxDevInfo->hHWPerfStream,
+	                                PVRSRVGetPVRSRVData()->hTLCtrlStream);
+	/* we can still discover host stream so leave it as is and just log error */
+	PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream");
+
+	/* send the event here because host stream is implicitly opened for write
+	 * in TLStreamCreate and TLStreamOpen is never called (so the event is
+	 * never emitted) */
+	TLStreamMarkStreamOpen(psRgxDevInfo->hHWPerfStream);
+
+	PVR_DPF((PVR_DBG_MESSAGE, "HWPerf buffer size in bytes: L1: %d  L2: %d",
+			psRgxDevInfo->ui32RGXFWIfHWPerfBufSize, ui32L2BufferSize));
+
+#else /* defined (NO_HARDWARE) */
+	PVR_UNREFERENCED_PARAMETER(ui32L2BufferSize);
+	PVR_UNREFERENCED_PARAMETER(RGXHWPerfTLCB);
+	PVR_UNREFERENCED_PARAMETER(pszHWPerfStreamName);
+ui32L2BufferSize = 0;
+#endif
+
+	PVR_DPF_RETURN_OK;
+
+#if !defined(NO_HARDWARE)
+e1: /* L2 buffer initialisation failures */
+	psRgxDevInfo->hHWPerfStream = NULL;
+#endif
+e0: /* L1 buffer initialisation failures */
+	RGXHWPerfL1BufferDeinit(psRgxDevInfo);
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+
+void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psRgxDevInfo);
+
+	/* Clean up the L2 buffer stream object if allocated */
+	if (psRgxDevInfo->hHWPerfStream)
+	{
+		/* send the event here because host stream is implicitly opened for
+		 * write in TLStreamCreate and TLStreamClose is never called (so the
+		 * event is never emitted) */
+		TLStreamMarkStreamClose(psRgxDevInfo->hHWPerfStream);
+		TLStreamClose(psRgxDevInfo->hHWPerfStream);
+		psRgxDevInfo->hHWPerfStream = NULL;
+	}
+
+	/* Cleanup L1 buffer resources */
+	RGXHWPerfL1BufferDeinit(psRgxDevInfo);
+
+	/* Cleanup the HWPerf server module lock resource */
+	if (psRgxDevInfo->hHWPerfLock)
+	{
+		OSLockDestroy(psRgxDevInfo->hHWPerfLock);
+		psRgxDevInfo->hHWPerfLock = NULL;
+	}
+
+	PVR_DPF_RETURN;
+}
+
+
+/******************************************************************************
+ * RGX HW Performance Profiling Server API(s)
+ *****************************************************************************/
+
+static PVRSRV_ERROR RGXHWPerfCtrlFwBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                          IMG_BOOL bToggle,
+                                          IMG_UINT64 ui64Mask)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice;
+	RGXFWIF_KCCB_CMD sKccbCmd;
+
+	/* If this method is being used whether to enable or disable
+	 * then the hwperf buffers (host and FW) are likely to be needed
+	 * eventually so create them, also helps unit testing. Buffers
+	 * allocated on demand to reduce RAM foot print on systems not
+	 * needing HWPerf resources.
+	 * Obtain lock first, test and init if required. */
+	OSLockAcquire(psDevice->hHWPerfLock);
+
+	if (!psDevice->bFirmwareInitialised)
+	{
+		psDevice->ui64HWPerfFilter = ui64Mask; // at least set filter
+		eError = PVRSRV_ERROR_NOT_INITIALISED;
+
+		PVR_DPF((PVR_DBG_ERROR, "HWPerf has NOT been initialised yet."
+		        " Mask has been SET to (%llx)", (long long) ui64Mask));
+
+		goto unlock_and_return;
+	}
+
+	if (RGXHWPerfIsInitRequired(psDevice))
+	{
+		eError = RGXHWPerfInitOnDemandResources(psDevice);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand HWPerfFW "
+			        "resources failed", __func__));
+			goto unlock_and_return;
+		}
+	}
+
+	/* Unlock here as no further HWPerf resources are used below that would be
+	 * affected if freed by another thread */
+	OSLockRelease(psDevice->hHWPerfLock);
+
+	/* Return if the filter is the same */
+	if (!bToggle && psDevice->ui64HWPerfFilter == ui64Mask)
+		goto return_;
+
+	/* Prepare command parameters ... */
+	sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG;
+	sKccbCmd.uCmdData.sHWPerfCtrl.bToggle = bToggle;
+	sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = ui64Mask;
+
+	/* Ask the FW to carry out the HWPerf configuration command */
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,	RGXFWIF_DM_GP,
+								&sKccbCmd, sizeof(sKccbCmd), 0, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set new HWPerfFW filter in "
+				"firmware (error = %d)", __func__, eError));
+		goto return_;
+	}
+
+	psDevice->ui64HWPerfFilter = bToggle ?
+	        psDevice->ui64HWPerfFilter ^ ui64Mask : ui64Mask;
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP,
+	                        psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+	PVR_LOGG_IF_ERROR(eError, "RGXWaitForFWOp", return_);
+
+#if defined(DEBUG)
+	if (bToggle)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "HWPerfFW events (%" IMG_UINT64_FMTSPECx ") have been TOGGLED",
+		        ui64Mask));
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING, "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")",
+		        ui64Mask));
+	}
+#endif
+
+	return PVRSRV_OK;
+
+unlock_and_return:
+	OSLockRelease(psDevice->hHWPerfLock);
+
+return_:
+	return eError;
+}
+
+static PVRSRV_ERROR RGXHWPerfCtrlHostBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                            IMG_BOOL bToggle,
+                                            IMG_UINT32 ui32Mask)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice;
+
+	OSLockAcquire(psDevice->hLockHWPerfHostStream);
+	if (psDevice->hHWPerfHostStream == NULL)
+	{
+		eError = RGXHWPerfHostInitOnDemandResources(psDevice);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Initialization of on-demand HWPerfHost"
+			        " resources failed", __FUNCTION__));
+			OSLockRelease(psDevice->hLockHWPerfHostStream);
+			return eError;
+		}
+	}
+
+	psDevice->ui32HWPerfHostFilter = bToggle ?
+	        psDevice->ui32HWPerfHostFilter ^ ui32Mask : ui32Mask;
+	OSLockRelease(psDevice->hLockHWPerfHostStream);
+
+#if defined(DEBUG)
+	if (bToggle)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "HWPerfHost events (%x) have been TOGGLED",
+		        ui32Mask));
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING, "HWPerfHost mask has been SET to (%x)",
+		        ui32Mask));
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXHWPerfCtrlClientBuffer(IMG_BOOL bToggle,
+                                              IMG_UINT32 ui32InfoPageIdx,
+                                              IMG_UINT32 ui32Mask)
+{
+	PVRSRV_DATA *psData = PVRSRVGetPVRSRVData();
+
+	PVR_LOGR_IF_FALSE(ui32InfoPageIdx >= HWPERF_INFO_IDX_START &&
+	                  ui32InfoPageIdx < HWPERF_INFO_IDX_END, "invalid info"
+	                  " page index", PVRSRV_ERROR_INVALID_PARAMS);
+
+	OSLockAcquire(psData->hInfoPageLock);
+	psData->pui32InfoPage[ui32InfoPageIdx] = bToggle ?
+	        psData->pui32InfoPage[ui32InfoPageIdx] ^ ui32Mask : ui32Mask;
+	OSLockRelease(psData->hInfoPageLock);
+
+#if defined(DEBUG)
+	if (bToggle)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) events (%x) have been TOGGLED",
+		        ui32InfoPageIdx, ui32Mask));
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) mask has been SET to (%x)",
+		        ui32InfoPageIdx, ui32Mask));
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*
+	PVRSRVRGXCtrlHWPerfKM
+*/
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM(
+	CONNECTION_DATA         *psConnection,
+	PVRSRV_DEVICE_NODE      *psDeviceNode,
+	RGX_HWPERF_STREAM_ID     eStreamId,
+	IMG_BOOL                 bToggle,
+	IMG_UINT64               ui64Mask)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	PVR_DPF_ENTERED;
+	PVR_ASSERT(psDeviceNode);
+
+	if (eStreamId == RGX_HWPERF_STREAM_ID0_FW)
+	{
+		return RGXHWPerfCtrlFwBuffer(psDeviceNode, bToggle, ui64Mask);
+	}
+	else if (eStreamId == RGX_HWPERF_STREAM_ID1_HOST)
+	{
+		return RGXHWPerfCtrlHostBuffer(psDeviceNode, bToggle, (IMG_UINT32) ui64Mask);
+	}
+	else if (eStreamId == RGX_HWPERF_STREAM_ID2_CLIENT)
+	{
+		IMG_UINT32 ui32Index = (IMG_UINT32) (ui64Mask >> 32);
+		IMG_UINT32 ui32Mask = (IMG_UINT32) ui64Mask;
+
+		return RGXHWPerfCtrlClientBuffer(bToggle, ui32Index, ui32Mask);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXCtrlHWPerfKM: Unknown stream id."));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	PVR_DPF_RETURN_OK;
+}
+
+/*
+	AppHint interfaces
+*/
+static
+PVRSRV_ERROR RGXHWPerfSetFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  const void *psPrivate,
+                                  IMG_UINT64 ui64Value)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDevNode;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+	psDevNode = psPVRSRVData->psDeviceNodeList;
+	/* Control HWPerf on all the devices */
+	while (psDevNode)
+	{
+		eError = RGXHWPerfCtrlFwBuffer(psDevNode, IMG_FALSE, ui64Value);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to set HWPerf firmware filter for device (%d)", psDevNode->sDevId.i32UMIdentifier));
+			return eError;
+		}
+		psDevNode = psDevNode->psNext;
+	}
+	return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXHWPerfReadFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                   const void *psPrivate,
+                                   IMG_UINT64 *pui64Value)
+{
+	PVRSRV_RGXDEV_INFO *psDevice;
+
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+	if (!psDeviceNode || !psDeviceNode->pvDevice)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Configuration command is applied for all devices, so filter value should
+	 * be same for all */
+	psDevice = psDeviceNode->pvDevice;
+	*pui64Value = psDevice->ui64HWPerfFilter;
+	return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXHWPerfSetHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                    const void *psPrivate,
+                                    IMG_UINT32 ui32Value)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDevNode;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+	psDevNode = psPVRSRVData->psDeviceNodeList;
+	/* Control HWPerf on all the devices */
+	while (psDevNode)
+	{
+		eError = RGXHWPerfCtrlHostBuffer(psDevNode, IMG_FALSE, ui32Value);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to set HWPerf firmware filter for device (%d)", psDevNode->sDevId.i32UMIdentifier));
+			return eError;
+		}
+		psDevNode = psDevNode->psNext;
+	}
+	return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXHWPerfReadHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     const void *psPrivate,
+                                     IMG_UINT32 *pui32Value)
+{
+	PVRSRV_RGXDEV_INFO *psDevice;
+
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+	if (!psDeviceNode || !psDeviceNode->pvDevice)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevice = psDeviceNode->pvDevice;
+	*pui32Value = psDevice->ui32HWPerfHostFilter;
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _ReadClientFilter(const PVRSRV_DEVICE_NODE *psDevice,
+                                      const void *psPrivData,
+                                      IMG_UINT32 *pui32Value)
+{
+	PVRSRV_DATA *psData = PVRSRVGetPVRSRVData();
+	IMG_UINT32 ui32Idx = (IMG_UINT32) (uintptr_t) psPrivData;
+	PVR_UNREFERENCED_PARAMETER(psDevice);
+
+	OSLockAcquire(psData->hInfoPageLock);
+	*pui32Value = psData->pui32InfoPage[ui32Idx];
+	OSLockRelease(psData->hInfoPageLock);
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _WriteClientFilter(const PVRSRV_DEVICE_NODE *psDevice,
+                                       const void *psPrivData,
+                                       IMG_UINT32 ui32Value)
+{
+	IMG_UINT32 ui32Idx = (IMG_UINT32) (uintptr_t) psPrivData;
+	PVR_UNREFERENCED_PARAMETER(psDevice);
+
+	return RGXHWPerfCtrlClientBuffer(IMG_FALSE, ui32Idx, ui32Value);
+}
+
+void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRVAppHintRegisterHandlersUINT64(APPHINT_ID_HWPerfFWFilter,
+	                                    RGXHWPerfReadFwFilter,
+	                                    RGXHWPerfSetFwFilter,
+	                                    psDeviceNode,
+	                                    NULL);
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfHostFilter,
+	                                    RGXHWPerfReadHostFilter,
+	                                    RGXHWPerfSetHostFilter,
+	                                    psDeviceNode,
+	                                    NULL);
+}
+
+void RGXHWPerfClientInitAppHintCallbacks(void)
+{
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_Services,
+	                                    _ReadClientFilter,
+	                                    _WriteClientFilter,
+	                                    APPHINT_OF_DRIVER_NO_DEVICE,
+	                                    (void *) HWPERF_FILTER_SERVICES_IDX);
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_EGL,
+	                                    _ReadClientFilter,
+	                                    _WriteClientFilter,
+	                                    APPHINT_OF_DRIVER_NO_DEVICE,
+	                                    (void *) HWPERF_FILTER_EGL_IDX);
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenGLES,
+	                                    _ReadClientFilter,
+	                                    _WriteClientFilter,
+	                                    APPHINT_OF_DRIVER_NO_DEVICE,
+	                                    (void *) HWPERF_FILTER_OPENGLES_IDX);
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenCL,
+	                                    _ReadClientFilter,
+	                                    _WriteClientFilter,
+	                                    APPHINT_OF_DRIVER_NO_DEVICE,
+	                                    (void *) HWPERF_FILTER_OPENCL_IDX);
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenRL,
+	                                    _ReadClientFilter,
+	                                    _WriteClientFilter,
+	                                    APPHINT_OF_DRIVER_NO_DEVICE,
+	                                    (void *) HWPERF_FILTER_OPENRL_IDX);
+}
+
+/*
+	PVRSRVRGXEnableHWPerfCountersKM
+*/
+PVRSRV_ERROR PVRSRVRGXConfigEnableHWPerfCountersKM(
+	CONNECTION_DATA          * psConnection,
+	PVRSRV_DEVICE_NODE       * psDeviceNode,
+	IMG_UINT32                 ui32ArrayLen,
+	RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs)
+{
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sKccbCmd;
+	DEVMEM_MEMDESC*		psFwBlkConfigsMemDesc;
+	RGX_HWPERF_CONFIG_CNTBLK* psFwArray;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psDeviceNode);
+	PVR_ASSERT(ui32ArrayLen>0);
+	PVR_ASSERT(psBlockConfigs);
+
+	/* Fill in the command structure with the parameters needed
+	 */
+	sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS;
+	sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.ui32NumBlocks = ui32ArrayLen;
+
+	eError = DevmemFwAllocate(psDeviceNode->pvDevice,
+			sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen,
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+									  PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+									  PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+									  PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+									  PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+									  PVRSRV_MEMALLOCFLAG_UNCACHED |
+									  PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+			"FwHWPerfCountersConfigBlock",
+			&psFwBlkConfigsMemDesc);
+	if (eError != PVRSRV_OK)
+		PVR_LOGR_IF_ERROR(eError, "DevmemFwAllocate");
+
+	RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.sBlockConfigs,
+			psFwBlkConfigsMemDesc, 0, 0);
+
+	eError = DevmemAcquireCpuVirtAddr(psFwBlkConfigsMemDesc, (void **)&psFwArray);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail1);
+	}
+
+	OSDeviceMemCopy(psFwArray, psBlockConfigs, sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen);
+	DevmemPDumpLoadMem(psFwBlkConfigsMemDesc,
+						0,
+						sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen,
+						0);
+
+	/* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigEnableHWPerfCountersKM parameters set, calling FW")); */
+
+	/* Ask the FW to carry out the HWPerf configuration command
+	 */
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+			RGXFWIF_DM_GP, &sKccbCmd, sizeof(sKccbCmd), 0, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOGG_IF_ERROR(eError, "RGXScheduleCommand", fail2);
+	}
+
+	/* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigEnableHWPerfCountersKM command scheduled for FW")); */
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOGG_IF_ERROR(eError, "RGXWaitForFWOp", fail2);
+	}
+
+	/* Release temporary memory used for block configuration
+	 */
+	RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc);
+	DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc);
+	DevmemFwFree(psDeviceNode->pvDevice, psFwBlkConfigsMemDesc);
+
+	/* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigEnableHWPerfCountersKM firmware completed")); */
+
+	PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks configured and ENABLED", ui32ArrayLen));
+
+	PVR_DPF_RETURN_OK;
+
+fail2:
+	DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc);
+fail1:
+	RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc);
+	DevmemFwFree(psDeviceNode->pvDevice, psFwBlkConfigsMemDesc);
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+
+/*
+	PVRSRVRGXConfigCustomCountersReadingHWPerfKM
+ */
+PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM(
+	CONNECTION_DATA             * psConnection,
+	PVRSRV_DEVICE_NODE          * psDeviceNode,
+	IMG_UINT16                    ui16CustomBlockID,
+	IMG_UINT16                    ui16NumCustomCounters,
+	IMG_UINT32                  * pui32CustomCounterIDs)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD	sKccbCmd;
+	DEVMEM_MEMDESC*		psFwSelectCntrsMemDesc = NULL;
+	IMG_UINT32*			psFwArray;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psDeviceNode);
+
+	PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVRGXSelectCustomCountersKM: configure block %u to read %u counters", ui16CustomBlockID, ui16NumCustomCounters));
+
+	/* Fill in the command structure with the parameters needed */
+	sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS;
+	sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.ui16NumCounters = ui16NumCustomCounters;
+	sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.ui16CustomBlock = ui16CustomBlockID;
+
+	if (ui16NumCustomCounters > 0)
+	{
+		PVR_ASSERT(pui32CustomCounterIDs);
+
+		eError = DevmemFwAllocate(psDeviceNode->pvDevice,
+				sizeof(IMG_UINT32) * ui16NumCustomCounters,
+				PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+				PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+				PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+				PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+				PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+				PVRSRV_MEMALLOCFLAG_UNCACHED |
+				PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+				"FwHWPerfConfigCustomCounters",
+				&psFwSelectCntrsMemDesc);
+		if (eError != PVRSRV_OK)
+			PVR_LOGR_IF_ERROR(eError, "DevmemFwAllocate");
+
+		RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.sCustomCounterIDs,
+				psFwSelectCntrsMemDesc, 0, 0);
+
+		eError = DevmemAcquireCpuVirtAddr(psFwSelectCntrsMemDesc, (void **)&psFwArray);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail1);
+		}
+
+		OSDeviceMemCopy(psFwArray, pui32CustomCounterIDs, sizeof(IMG_UINT32) * ui16NumCustomCounters);
+		DevmemPDumpLoadMem(psFwSelectCntrsMemDesc,
+				0,
+				sizeof(IMG_UINT32) * ui16NumCustomCounters,
+				0);
+	}
+
+	/* Push in the KCCB the command to configure the custom counters block */
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+			RGXFWIF_DM_GP, &sKccbCmd, sizeof(sKccbCmd), 0, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOGG_IF_ERROR(eError, "RGXScheduleCommand", fail2);
+	}
+	PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXSelectCustomCountersKM: Command scheduled"));
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOGG_IF_ERROR(eError, "RGXWaitForFWOp", fail2);
+	}
+	PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXSelectCustomCountersKM: FW operation completed"));
+
+	if (ui16NumCustomCounters > 0)
+	{
+		/* Release temporary memory used for block configuration */
+		RGXUnsetFirmwareAddress(psFwSelectCntrsMemDesc);
+		DevmemReleaseCpuVirtAddr(psFwSelectCntrsMemDesc);
+		DevmemFwFree(psDeviceNode->pvDevice, psFwSelectCntrsMemDesc);
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE, "HWPerf custom counters %u reading will be sent with the next HW events", ui16NumCustomCounters));
+
+	PVR_DPF_RETURN_OK;
+
+	fail2:
+	if (psFwSelectCntrsMemDesc) DevmemReleaseCpuVirtAddr(psFwSelectCntrsMemDesc);
+
+	fail1:
+	if (psFwSelectCntrsMemDesc)
+	{
+		RGXUnsetFirmwareAddress(psFwSelectCntrsMemDesc);
+		DevmemFwFree(psDeviceNode->pvDevice, psFwSelectCntrsMemDesc);
+	}
+
+	PVR_DPF_RETURN_RC(eError);
+}
+/*
+	PVRSRVRGXDisableHWPerfcountersKM
+*/
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfCountersKM(
+	CONNECTION_DATA             * psConnection,
+	PVRSRV_DEVICE_NODE          * psDeviceNode,
+	IMG_BOOL                      bEnable,
+	IMG_UINT32                    ui32ArrayLen,
+	IMG_UINT16                  * psBlockIDs)
+{
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sKccbCmd;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psDeviceNode);
+	PVR_ASSERT(ui32ArrayLen>0);
+	PVR_ASSERT(ui32ArrayLen<=RGXFWIF_HWPERF_CTRL_BLKS_MAX);
+	PVR_ASSERT(psBlockIDs);
+
+	/* Fill in the command structure with the parameters needed
+	 */
+	sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS;
+	sKccbCmd.uCmdData.sHWPerfCtrlBlks.bEnable = bEnable;
+	sKccbCmd.uCmdData.sHWPerfCtrlBlks.ui32NumBlocks = ui32ArrayLen;
+	OSDeviceMemCopy(sKccbCmd.uCmdData.sHWPerfCtrlBlks.aeBlockIDs, psBlockIDs, sizeof(IMG_UINT16)*ui32ArrayLen);
+
+	/* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfCountersKM parameters set, calling FW")); */
+
+	/* Ask the FW to carry out the HWPerf configuration command
+	 */
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+			RGXFWIF_DM_GP, &sKccbCmd, sizeof(sKccbCmd), 0, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+		PVR_LOGR_IF_ERROR(eError, "RGXScheduleCommand");
+
+	/* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfCountersKM command scheduled for FW")); */
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+		PVR_LOGR_IF_ERROR(eError, "RGXWaitForFWOp");
+
+	/* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfCountersKM firmware completed")); */
+
+#if defined(DEBUG)
+	if (bEnable)
+		PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been ENABLED", ui32ArrayLen));
+	else
+		PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been DISABLED", ui32ArrayLen));
+#endif
+
+	PVR_DPF_RETURN_OK;
+}
+
+static INLINE IMG_UINT32 _RGXHWPerfFixBufferSize(IMG_UINT32 ui32BufSizeKB)
+{
+	if (ui32BufSizeKB > HWPERF_HOST_TL_STREAM_SIZE_MAX)
+	{
+		/* Size specified as a AppHint but it is too big */
+		PVR_DPF((PVR_DBG_WARNING,"RGXHWPerfHostInit: HWPerf Host buffer size "
+				"value (%u) too big, using maximum (%u)", ui32BufSizeKB,
+		        HWPERF_HOST_TL_STREAM_SIZE_MAX));
+		return HWPERF_HOST_TL_STREAM_SIZE_MAX<<10;
+	}
+	else if (ui32BufSizeKB >= HWPERF_HOST_TL_STREAM_SIZE_MIN)
+	{
+		return ui32BufSizeKB<<10;
+	}
+	else if (ui32BufSizeKB > 0)
+	{
+		/* Size specified as a AppHint but it is too small */
+		PVR_DPF((PVR_DBG_WARNING,"RGXHWPerfHostInit: HWPerf Host buffer size "
+		        "value (%u) too small, using minimum (%u)", ui32BufSizeKB,
+		        HWPERF_HOST_TL_STREAM_SIZE_MIN));
+		return HWPERF_HOST_TL_STREAM_SIZE_MIN<<10;
+	}
+	else
+	{
+		/* 0 size implies AppHint not set or is set to zero,
+		 * use default size from driver constant. */
+		return HWPERF_HOST_TL_STREAM_SIZE_DEFAULT<<10;
+	}
+}
+
+/******************************************************************************
+ * RGX HW Performance Host Stream API
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfHostInit
+
+@Description    Called during driver init for initialisation of HWPerfHost
+                stream in the Rogue device driver. This function keeps allocated
+                only the minimal necessary resources, which are required for
+                functioning of HWPerf server module.
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB)
+{
+	PVRSRV_ERROR eError;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	PVR_ASSERT(psRgxDevInfo != NULL);
+
+	eError = OSLockCreate(&psRgxDevInfo->hLockHWPerfHostStream, LOCK_TYPE_PASSIVE);
+	PVR_LOGG_IF_ERROR(eError, "OSLockCreate", error);
+
+	psRgxDevInfo->hHWPerfHostStream = NULL;
+	psRgxDevInfo->ui32HWPerfHostFilter = 0; /* disable all events */
+	psRgxDevInfo->ui32HWPerfHostNextOrdinal = 0;
+	psRgxDevInfo->ui32HWPerfHostBufSize = _RGXHWPerfFixBufferSize(ui32BufSizeKB);
+
+error:
+	return eError;
+}
+
+static void _HWPerfHostOnConnectCB(void *pvArg)
+{
+	RGX_HWPERF_HOST_CLK_SYNC(pvArg);
+}
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfHostInitOnDemandResources
+
+@Description    This function allocates the HWPerfHost buffer if HWPerf is
+                enabled at driver load time. Otherwise, these buffers are
+                allocated on-demand as and when required.
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+	PVRSRV_ERROR eError;
+	IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 5]; /* 5 makes space up to "hwperf_host_9999" streams */
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */
+	if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d",
+				   PVRSRV_TL_HWPERF_HOST_SERVER_STREAM,
+				   psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to form HWPerf host stream name for device %d",
+		                        __FUNCTION__,
+								psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eError = TLStreamCreate(&psRgxDevInfo->hHWPerfHostStream,
+							psRgxDevInfo->psDeviceNode,
+					        pszHWPerfHostStreamName, psRgxDevInfo->ui32HWPerfHostBufSize,
+					        TL_OPMODE_DROP_NEWER,
+							_HWPerfHostOnConnectCB, psRgxDevInfo,
+							NULL, NULL);
+	PVR_LOGG_IF_ERROR(eError, "TLStreamCreate", error_stream_create);
+
+	eError = TLStreamSetNotifStream(psRgxDevInfo->hHWPerfHostStream,
+	                                PVRSRVGetPVRSRVData()->hTLCtrlStream);
+	/* we can still discover host stream so leave it as is and just log error */
+	PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream");
+
+	/* send the event here because host stream is implicitly opened for write
+	 * in TLStreamCreate and TLStreamOpen is never called (so the event is
+	 * never emitted) */
+	TLStreamMarkStreamOpen(psRgxDevInfo->hHWPerfHostStream);
+
+	PVR_DPF((DBGPRIV_MESSAGE, "HWPerf Host buffer size is %uKB",
+	        psRgxDevInfo->ui32HWPerfHostBufSize));
+
+	return PVRSRV_OK;
+
+error_stream_create:
+	OSLockDestroy(psRgxDevInfo->hLockHWPerfHostStream);
+	psRgxDevInfo->hLockHWPerfHostStream = NULL;
+
+	return eError;
+}
+
+void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+	PVR_ASSERT (psRgxDevInfo);
+	if (psRgxDevInfo->hHWPerfHostStream)
+	{
+		/* send the event here because host stream is implicitly opened for
+		 * write in TLStreamCreate and TLStreamClose is never called (so the
+		 * event is never emitted) */
+		TLStreamMarkStreamClose(psRgxDevInfo->hHWPerfHostStream);
+		TLStreamClose(psRgxDevInfo->hHWPerfHostStream);
+		psRgxDevInfo->hHWPerfHostStream = NULL;
+	}
+
+	if (psRgxDevInfo->hLockHWPerfHostStream)
+	{
+		OSLockDestroy(psRgxDevInfo->hLockHWPerfHostStream);
+		psRgxDevInfo->hLockHWPerfHostStream = NULL;
+	}
+}
+
+inline void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Filter)
+{
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+	psRgxDevInfo->ui32HWPerfHostFilter = ui32Filter;
+}
+
+inline IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_HOST_EVENT_TYPE eEvent)
+{
+	PVR_ASSERT(psRgxDevInfo);
+	return (psRgxDevInfo->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(eEvent)) ? IMG_TRUE : IMG_FALSE;
+}
+
+static inline void _PostFunctionPrologue(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+	PVR_ASSERT(psRgxDevInfo->hLockHWPerfHostStream != NULL);
+	PVR_ASSERT(psRgxDevInfo->hHWPerfHostStream != NULL);
+
+	OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream);
+
+	/* In case we drop packet we increment ordinal beforehand. */
+	psRgxDevInfo->ui32HWPerfHostNextOrdinal++;
+}
+
+static inline void _PostFunctionEpilogue(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+	OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream);
+}
+
+static inline IMG_UINT8 *_ReserveHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Size)
+{
+	IMG_UINT8 *pui8Dest;
+
+	PVRSRV_ERROR eError = TLStreamReserve(psRgxDevInfo->hHWPerfHostStream,
+	                         &pui8Dest, ui32Size);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not reserve space in %s buffer"
+		        " (%d). Dropping packet.",
+		        __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError));
+		return NULL;
+	}
+	PVR_ASSERT(pui8Dest != NULL);
+
+	return pui8Dest;
+}
+
+static inline void _CommitHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Size)
+{
+	PVRSRV_ERROR eError = TLStreamCommit(psRgxDevInfo->hHWPerfHostStream,
+	                                     ui32Size);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not commit data to %s"
+	            " (%d)", __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError));
+	}
+}
+
+static inline void _SetupHostPacketHeader(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                          IMG_UINT8 *pui8Dest,
+                                          RGX_HWPERF_HOST_EVENT_TYPE eEvType,
+                                          IMG_UINT32 ui32Size)
+{
+	RGX_HWPERF_V2_PACKET_HDR *psHeader = (RGX_HWPERF_V2_PACKET_HDR *) pui8Dest;
+
+	PVR_ASSERT(ui32Size<=RGX_HWPERF_MAX_PACKET_SIZE);
+
+	psHeader->ui32Ordinal = psRgxDevInfo->ui32HWPerfHostNextOrdinal;
+	psHeader->ui64Timestamp = RGXGPUFreqCalibrateClockus64();
+	psHeader->ui32Sig = HWPERF_PACKET_V2B_SIG;
+	psHeader->eTypeId = RGX_HWPERF_MAKE_TYPEID(RGX_HWPERF_STREAM_ID1_HOST,
+	        eEvType, 0, 0);
+	psHeader->ui32Size = ui32Size;
+}
+
+static inline void _SetupHostEnqPacketData(IMG_UINT8 *pui8Dest,
+                                           RGX_HWPERF_KICK_TYPE eEnqType,
+                                           IMG_UINT32 ui32Pid,
+                                           IMG_UINT32 ui32FWDMContext,
+                                           IMG_UINT32 ui32ExtJobRef,
+                                           IMG_UINT32 ui32IntJobRef,
+                                           IMG_UINT32 ui32CheckFenceUID,
+                                           IMG_UINT32 ui32UpdateFenceUID,
+                                           IMG_UINT64 ui64DeadlineInus,
+                                           IMG_UINT64 ui64CycleEstimate)
+{
+	RGX_HWPERF_HOST_ENQ_DATA *psData = (RGX_HWPERF_HOST_ENQ_DATA *)
+	        (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+	psData->ui32EnqType = eEnqType;
+	psData->ui32PID = ui32Pid;
+	psData->ui32ExtJobRef = ui32ExtJobRef;
+	psData->ui32IntJobRef = ui32IntJobRef;
+	psData->ui32DMContext = ui32FWDMContext;
+	psData->ui32Padding = 0;       /* Set to zero for future compatibility */
+	psData->ui32CheckFence_UID = ui32CheckFenceUID;
+	psData->ui32UpdateFence_UID = ui32UpdateFenceUID;
+	psData->ui64DeadlineInus = ui64DeadlineInus;
+	psData->ui64CycleEstimate = ui64CycleEstimate;
+}
+
+void RGXHWPerfHostPostEnqEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                               RGX_HWPERF_KICK_TYPE eEnqType,
+                               IMG_UINT32 ui32Pid,
+                               IMG_UINT32 ui32FWDMContext,
+                               IMG_UINT32 ui32ExtJobRef,
+                               IMG_UINT32 ui32IntJobRef,
+                               IMG_UINT32 ui32CheckFenceUID,
+                               IMG_UINT32 ui32UpdateFenceUID,
+                               IMG_UINT64 ui64DeadlineInus,
+                               IMG_UINT64 ui64CycleEstimate )
+{
+	IMG_UINT8 *pui8Dest;
+	IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_ENQ_DATA);
+
+	_PostFunctionPrologue(psRgxDevInfo);
+
+	if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+	{
+		goto cleanup;
+	}
+
+	_SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ENQ, ui32Size);
+	_SetupHostEnqPacketData(pui8Dest,
+	                        eEnqType,
+	                        ui32Pid,
+	                        ui32FWDMContext,
+	                        ui32ExtJobRef,
+	                        ui32IntJobRef,
+	                        ui32CheckFenceUID,
+	                        ui32UpdateFenceUID,
+	                        ui64DeadlineInus,
+	                        ui64CycleEstimate);
+
+	_CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+cleanup:
+	_PostFunctionEpilogue(psRgxDevInfo);
+}
+
+static inline IMG_UINT32 _CalculateHostUfoPacketSize(RGX_HWPERF_UFO_EV eUfoType,
+                                                     IMG_UINT uiNoOfUFOs)
+{
+	IMG_UINT32 ui32Size =
+		(IMG_UINT32) offsetof(RGX_HWPERF_UFO_DATA, aui32StreamData);
+	RGX_HWPERF_UFO_DATA_ELEMENT *puData;
+
+	switch (eUfoType)
+	{
+		case RGX_HWPERF_UFO_EV_CHECK_SUCCESS:
+		case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS:
+			ui32Size += uiNoOfUFOs * sizeof(puData->sCheckSuccess);
+			break;
+		case RGX_HWPERF_UFO_EV_CHECK_FAIL:
+		case RGX_HWPERF_UFO_EV_PRCHECK_FAIL:
+			ui32Size += uiNoOfUFOs * sizeof(puData->sCheckFail);
+			break;
+		case RGX_HWPERF_UFO_EV_UPDATE:
+			ui32Size += uiNoOfUFOs * sizeof(puData->sUpdate);
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO"
+			        " event type"));
+			PVR_ASSERT(IMG_FALSE);
+			break;
+	}
+
+	return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+static inline void _SetupHostUfoPacketData(IMG_UINT8 *pui8Dest,
+                                        RGX_HWPERF_UFO_EV eUfoType,
+                                        RGX_HWPERF_UFO_DATA_ELEMENT psUFOData[],
+                                        IMG_UINT uiNoOfUFOs)
+{
+	IMG_UINT uiUFOIdx;
+	RGX_HWPERF_HOST_UFO_DATA *psData = (RGX_HWPERF_HOST_UFO_DATA *)
+	        (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+	RGX_HWPERF_UFO_DATA_ELEMENT *puData = (RGX_HWPERF_UFO_DATA_ELEMENT *)
+	         psData->aui32StreamData;
+
+	psData->eEvType = eUfoType;
+	psData->ui32StreamInfo = RGX_HWPERF_MAKE_UFOPKTINFO(uiNoOfUFOs,
+	        offsetof(RGX_HWPERF_HOST_UFO_DATA, aui32StreamData));
+
+	switch (eUfoType)
+	{
+		case RGX_HWPERF_UFO_EV_CHECK_SUCCESS:
+		case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS:
+			for (uiUFOIdx = 0; uiUFOIdx < uiNoOfUFOs; uiUFOIdx++)
+			{
+				puData->sCheckSuccess.ui32FWAddr =
+				        psUFOData[uiUFOIdx].sCheckSuccess.ui32FWAddr;
+				puData->sCheckSuccess.ui32Value =
+				        psUFOData[uiUFOIdx].sCheckSuccess.ui32Value;
+
+				puData = (RGX_HWPERF_UFO_DATA_ELEMENT *)
+				        (((IMG_BYTE *) puData) + sizeof(puData->sCheckSuccess));
+			}
+			break;
+		case RGX_HWPERF_UFO_EV_CHECK_FAIL:
+		case RGX_HWPERF_UFO_EV_PRCHECK_FAIL:
+			for (uiUFOIdx = 0; uiUFOIdx < uiNoOfUFOs; uiUFOIdx++)
+			{
+				puData->sCheckFail.ui32FWAddr =
+				        psUFOData[uiUFOIdx].sCheckFail.ui32FWAddr;
+				puData->sCheckFail.ui32Value =
+				        psUFOData[uiUFOIdx].sCheckFail.ui32Value;
+				puData->sCheckFail.ui32Required =
+				        psUFOData[uiUFOIdx].sCheckFail.ui32Required;
+
+				puData = (RGX_HWPERF_UFO_DATA_ELEMENT *)
+				        (((IMG_BYTE *) puData) + sizeof(puData->sCheckFail));
+			}
+			break;
+		case RGX_HWPERF_UFO_EV_UPDATE:
+			for (uiUFOIdx = 0; uiUFOIdx < uiNoOfUFOs; uiUFOIdx++)
+			{
+				puData->sUpdate.ui32FWAddr =
+				        psUFOData[uiUFOIdx].sUpdate.ui32FWAddr;
+				puData->sUpdate.ui32OldValue =
+				        psUFOData[uiUFOIdx].sUpdate.ui32OldValue;
+				puData->sUpdate.ui32NewValue =
+				        psUFOData[uiUFOIdx].sUpdate.ui32NewValue;
+
+				puData = (RGX_HWPERF_UFO_DATA_ELEMENT *)
+				        (((IMG_BYTE *) puData) + sizeof(puData->sUpdate));
+			}
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO"
+			         " event type"));
+			PVR_ASSERT(IMG_FALSE);
+			break;
+	}
+}
+
+void RGXHWPerfHostPostUfoEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                               RGX_HWPERF_UFO_EV eUfoType,
+                               RGX_HWPERF_UFO_DATA_ELEMENT psUFOData[],
+                               IMG_UINT uiNoOfUFOs)
+{
+	IMG_UINT8 *pui8Dest;
+	IMG_UINT32 ui32Size = _CalculateHostUfoPacketSize(eUfoType, uiNoOfUFOs);
+
+	_PostFunctionPrologue(psRgxDevInfo);
+
+	if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+	{
+		goto cleanup;
+	}
+
+	_SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_UFO, ui32Size);
+	_SetupHostUfoPacketData(pui8Dest, eUfoType, psUFOData, uiNoOfUFOs);
+
+	_CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+cleanup:
+	_PostFunctionEpilogue(psRgxDevInfo);
+}
+
+#define UNKNOWN_SYNC_NAME "UnknownSync"
+
+static inline IMG_UINT32 _FixNameAndCalculateHostAllocPacketSize(
+                                       RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+                                       const IMG_CHAR **ppsName,
+                                       IMG_UINT32 *ui32NameSize)
+{
+	RGX_HWPERF_HOST_ALLOC_DATA *psData;
+	RGX_HWPERF_HOST_ALLOC_DETAIL *puData;
+	IMG_UINT32 ui32Size = sizeof(psData->ui32AllocType);
+
+	if (*ppsName != NULL && *ui32NameSize > 0)
+	{
+		/* first strip the terminator */
+		if ((*ppsName)[*ui32NameSize - 1] == '\0')
+			*ui32NameSize -= 1;
+		/* if string longer than maximum cut it (leave space for '\0') */
+		if (*ui32NameSize >= SYNC_MAX_CLASS_NAME_LEN)
+			*ui32NameSize = SYNC_MAX_CLASS_NAME_LEN - 1;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfHostPostAllocEvent: Invalid"
+		        " resource name given."));
+		*ppsName = UNKNOWN_SYNC_NAME;
+		*ui32NameSize = sizeof(UNKNOWN_SYNC_NAME) - 1;
+	}
+
+	switch (eAllocType)
+	{
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC:
+			ui32Size += sizeof(puData->sSyncAlloc) - SYNC_MAX_CLASS_NAME_LEN +
+			        *ui32NameSize + 1; /* +1 for '\0' */
+			break;
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE:
+			ui32Size += sizeof(puData->sTimelineAlloc) - SYNC_MAX_CLASS_NAME_LEN +
+			        *ui32NameSize + 1; /* +1 for '\0' */
+			break;
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+			ui32Size += sizeof(puData->sFenceAlloc) - SYNC_MAX_CLASS_NAME_LEN +
+			        *ui32NameSize + 1; /* +1 for '\0' */
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR,
+			         "RGXHWPerfHostPostAllocEvent: Invalid alloc event type"));
+			PVR_ASSERT(IMG_FALSE);
+			break;
+	}
+
+	return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+static inline void _SetupHostAllocPacketData(IMG_UINT8 *pui8Dest,
+                                       RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+                                       IMG_UINT32 ui32UID,
+                                       IMG_UINT32 ui32PID,
+                                       IMG_UINT32 ui32FWAddr,
+                                       const IMG_CHAR *psName,
+                                       IMG_UINT32 ui32NameSize)
+{
+	RGX_HWPERF_HOST_ALLOC_DATA *psData = (RGX_HWPERF_HOST_ALLOC_DATA *)
+	        (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+
+	IMG_CHAR *acName = NULL;
+
+	psData->ui32AllocType = eAllocType;
+
+	switch (eAllocType)
+	{
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC:
+			psData->uAllocDetail.sSyncAlloc.ui32FWAddr = ui32FWAddr;
+			acName = psData->uAllocDetail.sSyncAlloc.acName;
+			break;
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE:
+			psData->uAllocDetail.sTimelineAlloc.ui32Timeline_UID1 = ui32UID;
+			psData->uAllocDetail.sTimelineAlloc.uiPid = ui32PID;
+			acName = psData->uAllocDetail.sTimelineAlloc.acName;
+			break;
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+			psData->uAllocDetail.sFenceAlloc.ui32Fence_UID = ui32UID;
+			psData->uAllocDetail.sFenceAlloc.ui32CheckPt_FWAddr = ui32FWAddr;
+			acName = psData->uAllocDetail.sFenceAlloc.acName;
+			break;
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNCCP:
+			psData->uAllocDetail.sSyncCheckPointAlloc.ui32Timeline_UID = ui32UID;
+			psData->uAllocDetail.sSyncCheckPointAlloc.ui32CheckPt_FWAddr = ui32FWAddr;
+			acName = psData->uAllocDetail.sSyncCheckPointAlloc.acName;
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR,
+			         "RGXHWPerfHostPostAllocEvent: Invalid alloc event type"));
+			PVR_ASSERT(IMG_FALSE);
+	}
+
+
+	if (ui32NameSize)
+	{
+		OSStringNCopy(acName, psName, ui32NameSize);
+		/* we know here that string is not null terminated and that we have
+		 *enough space for the terminator */
+		acName[ui32NameSize] = '\0';
+	}
+	else
+	{
+		/* In case no name was given make sure we don't access random memory */
+		acName[0] = '\0';
+	}
+}
+
+void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO* psRgxDevInfo,
+                                 RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+                                 IMG_UINT32 ui32UID,
+                                 IMG_UINT32 ui32PID,
+                                 IMG_UINT32 ui32FWAddr,
+                                 const IMG_CHAR *psName,
+                                 IMG_UINT32 ui32NameSize)
+{
+	IMG_UINT8 *pui8Dest;
+	IMG_UINT32 ui32Size = _FixNameAndCalculateHostAllocPacketSize(eAllocType,
+	                                                             &psName,
+	                                                             &ui32NameSize);
+
+	_PostFunctionPrologue(psRgxDevInfo);
+
+	if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+	{
+		goto cleanup;
+	}
+
+	_SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ALLOC, ui32Size);
+
+	_SetupHostAllocPacketData(pui8Dest,
+	                          eAllocType,
+	                          ui32UID,
+	                          ui32PID,
+	                          ui32FWAddr,
+	                          psName,
+	                          ui32NameSize);
+
+	_CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+cleanup:
+	_PostFunctionEpilogue(psRgxDevInfo);
+}
+
+static inline void _SetupHostFreePacketData(IMG_UINT8 *pui8Dest,
+                                          RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType,
+                                          IMG_UINT32 ui32UID,
+                                          IMG_UINT32 ui32PID,
+                                          IMG_UINT32 ui32FWAddr)
+{
+	RGX_HWPERF_HOST_FREE_DATA *psData = (RGX_HWPERF_HOST_FREE_DATA *)
+	        (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+
+	psData->ui32FreeType = eFreeType;
+
+	switch (eFreeType)
+	{
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC:
+			psData->uFreeDetail.sSyncFree.ui32FWAddr = ui32FWAddr;
+			break;
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE:
+			psData->uFreeDetail.sTimelineDestroy.ui32Timeline_UID1 = ui32UID;
+			psData->uFreeDetail.sTimelineDestroy.uiPid = ui32PID;
+			break;
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+			psData->uFreeDetail.sFenceDestroy.ui32Fence_UID = ui32UID;
+			break;
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNCCP:
+			psData->uFreeDetail.sSyncCheckPointFree.ui32CheckPt_FWAddr = ui32FWAddr;
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR,
+			         "RGXHWPerfHostPostFreeEvent: Invalid free event type"));
+			PVR_ASSERT(IMG_FALSE);
+	}
+}
+
+void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType,
+                                IMG_UINT32 ui32UID,
+                                IMG_UINT32 ui32PID,
+                                IMG_UINT32 ui32FWAddr)
+{
+	IMG_UINT8 *pui8Dest;
+	IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_FREE_DATA);
+
+	_PostFunctionPrologue(psRgxDevInfo);
+
+	if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+	{
+		goto cleanup;
+	}
+
+	_SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_FREE, ui32Size);
+	_SetupHostFreePacketData(pui8Dest,
+	                         eFreeType,
+                             ui32UID,
+                             ui32PID,
+                             ui32FWAddr);
+
+	_CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+cleanup:
+	_PostFunctionEpilogue(psRgxDevInfo);
+}
+
+static inline IMG_UINT32 _FixNameAndCalculateHostModifyPacketSize(
+                                      RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType,
+                                      const IMG_CHAR **ppsName,
+                                      IMG_UINT32 *ui32NameSize)
+{
+	RGX_HWPERF_HOST_MODIFY_DATA *psData;
+	RGX_HWPERF_HOST_MODIFY_DETAIL *puData;
+	IMG_UINT32 ui32Size = sizeof(psData->ui32ModifyType);
+
+	if (*ppsName != NULL && *ui32NameSize > 0)
+	{
+		/* first strip the terminator */
+		if ((*ppsName)[*ui32NameSize - 1] == '\0')
+			*ui32NameSize -= 1;
+		/* if string longer than maximum cut it (leave space for '\0') */
+		if (*ui32NameSize >= SYNC_MAX_CLASS_NAME_LEN)
+			*ui32NameSize = SYNC_MAX_CLASS_NAME_LEN - 1;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfHostPostModifyEvent: Invalid"
+		        " resource name given."));
+		*ppsName = UNKNOWN_SYNC_NAME;
+		*ui32NameSize = sizeof(UNKNOWN_SYNC_NAME) - 1;
+	}
+
+	switch (eModifyType)
+	{
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+			ui32Size += sizeof(puData->sFenceMerge) - SYNC_MAX_CLASS_NAME_LEN +
+			        *ui32NameSize + 1; /* +1 for '\0' */
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR,
+			         "RGXHWPerfHostPostModifyEvent: Invalid modify event type"));
+			PVR_ASSERT(IMG_FALSE);
+			break;
+	}
+
+	return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+static inline void _SetupHostModifyPacketData(IMG_UINT8 *pui8Dest,
+                                              RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType,
+                                              IMG_UINT32 ui32NewUID,
+                                              IMG_UINT32 ui32UID1,
+                                              IMG_UINT32 ui32UID2,
+                                              const IMG_CHAR *psName,
+                                              IMG_UINT32 ui32NameSize)
+{
+	RGX_HWPERF_HOST_MODIFY_DATA *psData = (RGX_HWPERF_HOST_MODIFY_DATA *)
+	        (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+
+	IMG_CHAR *acName = NULL;
+
+	psData->ui32ModifyType = eModifyType;
+
+	switch (eModifyType)
+	{
+		case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+			psData->uModifyDetail.sFenceMerge.ui32NewFence_UID = ui32NewUID;
+			psData->uModifyDetail.sFenceMerge.ui32InFence1_UID = ui32UID1;
+			psData->uModifyDetail.sFenceMerge.ui32InFence2_UID = ui32UID2;
+			acName = psData->uModifyDetail.sFenceMerge.acName;
+			break;
+		default:
+			// unknown type - this should never happen
+			PVR_DPF((PVR_DBG_ERROR,
+			         "RGXHWPerfHostPostModifyEvent: Invalid modify event type"));
+			PVR_ASSERT(IMG_FALSE);
+	}
+
+	if (ui32NameSize)
+	{
+		OSStringNCopy(acName, psName, ui32NameSize);
+		/* we know here that string is not null terminated and that we have
+		 * enough space for the terminator */
+		acName[ui32NameSize] = '\0';
+	}
+	else
+	{
+		/* In case no name was given make sure we don't access random memory */
+		acName[0] = '\0';
+	}
+}
+
+void RGXHWPerfHostPostModifyEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                  RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType,
+                                  IMG_UINT32 ui32NewUID,
+                                  IMG_UINT32 ui32UID1,
+                                  IMG_UINT32 ui32UID2,
+                                  const IMG_CHAR *psName,
+                                  IMG_UINT32 ui32NameSize)
+{
+	IMG_UINT8 *pui8Dest;
+	IMG_UINT32 ui32Size = _FixNameAndCalculateHostModifyPacketSize(eModifyType,
+			                                                     &psName,
+			                                                     &ui32NameSize);
+
+	_PostFunctionPrologue(psRgxDevInfo);
+
+	if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+	{
+		goto cleanup;
+	}
+
+	_SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_MODIFY, ui32Size);
+	_SetupHostModifyPacketData(pui8Dest,
+	                           eModifyType,
+	                           ui32NewUID,
+	                           ui32UID1,
+	                           ui32UID2,
+	                           psName,
+                               ui32NameSize);
+
+
+cleanup:
+	_PostFunctionEpilogue(psRgxDevInfo);
+}
+
+static inline void _SetupHostClkSyncPacketData(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT8 *pui8Dest)
+{
+	RGX_HWPERF_HOST_CLK_SYNC_DATA *psData = (RGX_HWPERF_HOST_CLK_SYNC_DATA *)
+	        (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+	RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psRgxDevInfo->psRGXFWIfGpuUtilFWCb;
+	IMG_UINT32 ui32CurrIdx =
+	        RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFWCB->ui32TimeCorrSeqCount);
+	RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32CurrIdx];
+
+	psData->ui64CRTimestamp = psTimeCorr->ui64CRTimeStamp;
+	psData->ui64OSTimestamp = psTimeCorr->ui64OSTimeStamp;
+	psData->ui32ClockSpeed = psTimeCorr->ui32CoreClockSpeed;
+}
+
+void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+	IMG_UINT8 *pui8Dest;
+	IMG_UINT32 ui32Size =
+	        RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_CLK_SYNC_DATA);
+
+	_PostFunctionPrologue(psRgxDevInfo);
+
+	if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+	{
+		goto cleanup;
+	}
+
+	_SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_CLK_SYNC, ui32Size);
+	_SetupHostClkSyncPacketData(psRgxDevInfo, pui8Dest);
+
+	_CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+cleanup:
+	_PostFunctionEpilogue(psRgxDevInfo);
+}
+
+/******************************************************************************
+ * SUPPORT_GPUTRACE_EVENTS
+ *
+ * Currently only implemented on Linux and Android. Feature can be enabled on
+ * Android builds but can also be enabled on Linux builds for testing
+ * but requires the gpu.h FTrace event header file to be present.
+ *****************************************************************************/
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+
+/* Saved value of the clock source before the trace was enabled. We're keeping
+ * it here so that we know which clock should be selected after we disable the
+ * gpu ftrace. */
+RGXTIMECORR_CLOCK_TYPE  geLastTimeCorrClock = PVRSRV_APPHINT_TIMECORRCLOCK;
+
+/* This lock ensures that the reference counting operation on the FTrace UFO
+ * events and enable/disable operation on firmware event are performed as
+ * one atomic operation. This should ensure that there are no race conditions
+ * between reference counting and firmware event state change.
+ * See below comment for guiUfoEventRef.
+ */
+POS_LOCK    ghLockFTraceEventLock;
+
+/* Multiple FTrace UFO events are reflected in the firmware as only one event. When
+ * we enable FTrace UFO event we want to also at the same time enable it in
+ * the firmware. Since there is a multiple-to-one relation between those events
+ * we count how many FTrace UFO events is enabled. If at least one event is
+ * enabled we enabled the firmware event. When all FTrace UFO events are disabled
+ * we disable firmware event. */
+IMG_UINT    guiUfoEventRef;
+
+static void RGXHWPerfFTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE);
+
+typedef struct RGX_HWPERF_FTRACE_DATA {
+	/* This lock ensures the HWPerf TL stream reading resources are not destroyed
+	 * by one thread disabling it while another is reading from it. Keeps the
+	 * state and resource create/destroy atomic and consistent. */
+	POS_LOCK    hFTraceResourceLock;
+
+	IMG_HANDLE  hGPUTraceCmdCompleteHandle;
+	IMG_HANDLE  hGPUTraceTLStream;
+	IMG_UINT64  ui64LastSampledTimeCorrOSTimeStamp;
+	IMG_UINT32  ui32FTraceLastOrdinal;
+} RGX_HWPERF_FTRACE_DATA;
+
+/* Caller must now hold hFTraceResourceLock before calling this method.
+ */
+static PVRSRV_ERROR RGXHWPerfFTraceGPUEnable(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	RGX_HWPERF_FTRACE_DATA *psFtraceData;
+	PVRSRV_DEVICE_NODE *psRgxDevNode = psRgxDevInfo->psDeviceNode;
+	IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5];
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psRgxDevInfo);
+
+	psFtraceData = psRgxDevInfo->pvGpuFtraceData;
+
+	PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceResourceLock));
+
+	/* return if already enabled */
+	if (psFtraceData->hGPUTraceTLStream)
+	{
+		return PVRSRV_OK;
+	}
+
+	/* Signal FW to enable event generation */
+	if (psRgxDevInfo->bFirmwareInitialised)
+	{
+		IMG_UINT64 ui64UFOFilter = psRgxDevInfo->ui64HWPerfFilter &
+		        (RGX_HWPERF_EVENT_MASK_FW_SED | RGX_HWPERF_EVENT_MASK_FW_UFO);
+
+		eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode,
+		                               RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE,
+		                               RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |
+		                               ui64UFOFilter);
+		PVR_LOGG_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM", err_out);
+	}
+	else
+	{
+		/* only set filter and exit */
+		psRgxDevInfo->ui64HWPerfFilter = RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |
+		        ((RGX_HWPERF_EVENT_MASK_FW_SED | RGX_HWPERF_EVENT_MASK_FW_UFO) &
+		        psRgxDevInfo->ui64HWPerfFilter);
+
+		PVR_DPF((PVR_DBG_WARNING, "HWPerfFW mask has been SET to (%llx)",
+		        (long long) psRgxDevInfo->ui64HWPerfFilter));
+
+		return PVRSRV_OK;
+	}
+
+	/* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */
+	if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d",
+					PVRSRV_TL_HWPERF_RGX_FW_STREAM, psRgxDevNode->sDevId.i32UMIdentifier) < 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to form HWPerf stream name for device %d",
+		                        __FUNCTION__,
+								psRgxDevNode->sDevId.i32UMIdentifier));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Open the TL Stream for HWPerf data consumption */
+	eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE,
+								pszHWPerfStreamName,
+								PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING,
+								&psFtraceData->hGPUTraceTLStream);
+	PVR_LOGG_IF_ERROR(eError, "TLClientOpenStream", err_out);
+
+	if (RGXGPUFreqCalibrateGetClockSource() != RGXTIMECORR_CLOCK_SCHED)
+	{
+		/* Set clock source for timer correlation data to sched_clock */
+		geLastTimeCorrClock = RGXGPUFreqCalibrateGetClockSource();
+		RGXGPUFreqCalibrateSetClockSource(psRgxDevNode, RGXTIMECORR_CLOCK_SCHED);
+	}
+
+	/* Reset the OS timestamp coming from the timer correlation data
+	 * associated with the latest HWPerf event we processed.
+	 */
+	psFtraceData->ui64LastSampledTimeCorrOSTimeStamp = 0;
+
+	/* Register a notifier to collect HWPerf data whenever the HW completes
+	 * an operation.
+	 */
+	eError = PVRSRVRegisterCmdCompleteNotify(
+		&psFtraceData->hGPUTraceCmdCompleteHandle,
+		&RGXHWPerfFTraceCmdCompleteNotify,
+		psRgxDevInfo);
+	PVR_LOGG_IF_ERROR(eError, "PVRSRVRegisterCmdCompleteNotify", err_close_stream);
+
+err_out:
+	PVR_DPF_RETURN_RC(eError);
+
+err_close_stream:
+	TLClientCloseStream(DIRECT_BRIDGE_HANDLE,
+						psFtraceData->hGPUTraceTLStream);
+	psFtraceData->hGPUTraceTLStream = 0;
+	goto err_out;
+}
+
+/* Caller must now hold hFTraceResourceLock before calling this method.
+ */
+static PVRSRV_ERROR RGXHWPerfFTraceGPUDisable(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_BOOL bDeInit)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	RGX_HWPERF_FTRACE_DATA *psFtraceData;
+	PVRSRV_DEVICE_NODE *psRgxDevNode = psRgxDevInfo->psDeviceNode;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psRgxDevInfo);
+
+	psFtraceData = psRgxDevInfo->pvGpuFtraceData;
+
+	PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceResourceLock));
+
+	/* if FW is not yet initialised, just set filter and exit */
+	if (!psRgxDevInfo->bFirmwareInitialised)
+	{
+		psRgxDevInfo->ui64HWPerfFilter = RGX_HWPERF_EVENT_MASK_NONE;
+		PVR_DPF((PVR_DBG_WARNING, "HWPerfFW mask has been SET to (%llx)",
+		        (long long) psRgxDevInfo->ui64HWPerfFilter));
+
+		return PVRSRV_OK;
+	}
+
+	if (NULL == psFtraceData->hGPUTraceTLStream)
+	{
+		/* Tracing already disabled, just return */
+		return PVRSRV_OK;
+	}
+
+	if (!bDeInit)
+	{
+		eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode,
+		                               RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE,
+		                               (RGX_HWPERF_EVENT_MASK_NONE));
+		PVR_LOG_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM");
+	}
+
+	if (psFtraceData->hGPUTraceCmdCompleteHandle)
+	{
+		/* Tracing is being turned off. Unregister the notifier. */
+		eError = PVRSRVUnregisterCmdCompleteNotify(
+				psFtraceData->hGPUTraceCmdCompleteHandle);
+		PVR_LOG_IF_ERROR(eError, "PVRSRVUnregisterCmdCompleteNotify");
+		psFtraceData->hGPUTraceCmdCompleteHandle = NULL;
+	}
+
+	if (psFtraceData->hGPUTraceTLStream)
+	{
+		IMG_PBYTE pbTmp = NULL;
+		IMG_UINT32 ui32Tmp = 0;
+
+		/* We have to flush both the L1 (FW) and L2 (Host) buffers in case there
+		 * are some events left unprocessed in this FTrace/systrace "session"
+		 * (note that even if we have just disabled HWPerf on the FW some packets
+		 * could have been generated and already copied to L2 by the MISR handler).
+		 *
+		 * With the following calls we will both copy new data to the Host buffer
+		 * (done by the producer callback in TLClientAcquireData) and advance
+		 * the read offset in the buffer to catch up with the latest events.
+		 */
+		eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+		                             psFtraceData->hGPUTraceTLStream,
+		                             &pbTmp, &ui32Tmp);
+		PVR_LOG_IF_ERROR(eError, "TLClientCloseStream");
+
+		/* Let close stream perform the release data on the outstanding acquired data */
+		eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE,
+		                             psFtraceData->hGPUTraceTLStream);
+		PVR_LOG_IF_ERROR(eError, "TLClientCloseStream");
+
+		psFtraceData->hGPUTraceTLStream = NULL;
+	}
+
+	if (geLastTimeCorrClock != RGXTIMECORR_CLOCK_SCHED)
+	{
+		RGXGPUFreqCalibrateSetClockSource(psRgxDevNode, geLastTimeCorrClock);
+	}
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR RGXHWPerfFTraceGPUEventsEnabledSet(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_BOOL bNewValue)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	RGX_HWPERF_FTRACE_DATA *psFtraceData;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psRgxDevInfo);
+	psFtraceData = psRgxDevInfo->pvGpuFtraceData;
+
+	/* About to create/destroy FTrace resources, lock critical section
+	 * to avoid HWPerf MISR thread contention.
+	 */
+	OSLockAcquire(psFtraceData->hFTraceResourceLock);
+
+	eError = (bNewValue ? RGXHWPerfFTraceGPUEnable(psRgxDevInfo)
+					   : RGXHWPerfFTraceGPUDisable(psRgxDevInfo, IMG_FALSE));
+
+	OSLockRelease(psFtraceData->hFTraceResourceLock);
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR PVRGpuTraceEnabledSet(IMG_BOOL bNewValue)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+
+	/* This entry point from DebugFS must take the global
+	 * bridge lock at this outer level of the stack before calling
+	 * into the RGX part of the driver which can lead to RGX
+	 * device data changes and communication with the FW which
+	 * all requires the bridge lock.
+	 */
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSAcquireBridgeLock();
+#endif
+	psDeviceNode = psPVRSRVData->psDeviceNodeList;
+	/* enable/disable GPU trace on all devices */
+	while (psDeviceNode)
+	{
+		eError = RGXHWPerfFTraceGPUEventsEnabledSet(psDeviceNode->pvDevice, bNewValue);
+		if (eError != PVRSRV_OK)
+		{
+			break;
+		}
+		psDeviceNode = psDeviceNode->psNext;
+	}
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSReleaseBridgeLock();
+#endif
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR PVRGpuTraceEnabledSetNoBridgeLock(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                               IMG_BOOL bNewValue)
+{
+	return RGXHWPerfFTraceGPUEventsEnabledSet(psDeviceNode->pvDevice, bNewValue);
+}
+
+/* Calculate the OS timestamp given an RGX timestamp in the HWPerf event. */
+static uint64_t
+CalculateEventTimestamp(PVRSRV_RGXDEV_INFO *psDevInfo,
+						uint32_t ui32TimeCorrIndex,
+						uint64_t ui64EventTimestamp)
+{
+	RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb;
+	RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData;
+	RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32TimeCorrIndex];
+	uint64_t ui64CRTimeStamp = psTimeCorr->ui64CRTimeStamp;
+	uint64_t ui64OSTimeStamp = psTimeCorr->ui64OSTimeStamp;
+	uint64_t ui64CRDeltaToOSDeltaKNs = psTimeCorr->ui64CRDeltaToOSDeltaKNs;
+	uint64_t ui64EventOSTimestamp, deltaRgxTimer, delta_ns;
+
+	if (psFtraceData->ui64LastSampledTimeCorrOSTimeStamp > ui64OSTimeStamp)
+	{
+		/* The previous packet had a time reference (time correlation data) more
+		 * recent than the one in the current packet, it means the timer
+		 * correlation array wrapped too quickly (buffer too small) and in the
+		 * previous call to RGXHWPerfFTraceGPUUfoEvent we read one of the
+		 * newest timer correlations rather than one of the oldest ones.
+		 */
+		PVR_DPF((PVR_DBG_ERROR, "%s: The timestamps computed so far could be "
+				 "wrong! The time correlation array size should be increased "
+				 "to avoid this.", __func__));
+	}
+
+	psFtraceData->ui64LastSampledTimeCorrOSTimeStamp = ui64OSTimeStamp;
+
+	/* RGX CR timer ticks delta */
+	deltaRgxTimer = ui64EventTimestamp - ui64CRTimeStamp;
+	/* RGX time delta in nanoseconds */
+	delta_ns = RGXFWIF_GET_DELTA_OSTIME_NS(deltaRgxTimer, ui64CRDeltaToOSDeltaKNs);
+	/* Calculate OS time of HWPerf event */
+	ui64EventOSTimestamp = ui64OSTimeStamp + delta_ns;
+
+	PVR_DPF((PVR_DBG_VERBOSE, "%s: psCurrentDvfs RGX %llu, OS %llu, DVFSCLK %u",
+			 __func__, ui64CRTimeStamp, ui64OSTimeStamp,
+			 psTimeCorr->ui32CoreClockSpeed));
+
+	return ui64EventOSTimestamp;
+}
+
+void RGXHWPerfFTraceGPUEnqueueEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+		IMG_UINT32 ui32CtxId, IMG_UINT32 ui32JobId,
+		RGX_HWPERF_KICK_TYPE eKickType)
+{
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+	PVR_DPF_ENTERED;
+
+	PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUEnqueueEvent: ui32CtxId %u, "
+	        "ui32JobId %u", ui32CtxId, ui32JobId));
+
+	PVRGpuTraceClientWork(psDevInfo->psDeviceNode, ui32CtxId, ui32JobId,
+	    RGXHWPerfKickTypeToStr(eKickType));
+
+	PVR_DPF_RETURN;
+}
+
+
+static void RGXHWPerfFTraceGPUSwitchEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+		RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt, const IMG_CHAR* pszWorkName,
+		PVR_GPUTRACE_SWITCH_TYPE eSwType)
+{
+	IMG_UINT64 ui64Timestamp;
+	RGX_HWPERF_HW_DATA* psHWPerfPktData;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psHWPerfPkt);
+	PVR_ASSERT(pszWorkName);
+
+	psHWPerfPktData = (RGX_HWPERF_HW_DATA*) RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt);
+
+	ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex,
+											psHWPerfPkt->ui64Timestamp);
+
+	PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUSwitchEvent: %s ui32ExtJobRef=%d, ui32IntJobRef=%d, eSwType=%d",
+			pszWorkName, psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32IntJobRef, eSwType));
+
+	PVRGpuTraceWorkSwitch(ui64Timestamp, psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32CtxPriority,
+	                      psHWPerfPktData->ui32IntJobRef, pszWorkName, eSwType);
+
+	PVR_DPF_RETURN;
+}
+
+static void RGXHWPerfFTraceGPUUfoEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                       RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt)
+{
+	IMG_UINT64 ui64Timestamp;
+	RGX_HWPERF_UFO_DATA *psHWPerfPktData;
+	IMG_UINT32 ui32UFOCount;
+	RGX_HWPERF_UFO_DATA_ELEMENT *puData;
+
+	psHWPerfPktData = (RGX_HWPERF_UFO_DATA *)
+	        RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt);
+
+	ui32UFOCount = RGX_HWPERF_GET_UFO_STREAMSIZE(psHWPerfPktData->ui32StreamInfo);
+	puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) (((IMG_BYTE *) psHWPerfPktData)
+	        + RGX_HWPERF_GET_UFO_STREAMOFFSET(psHWPerfPktData->ui32StreamInfo));
+
+	ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex,
+											psHWPerfPkt->ui64Timestamp);
+
+	PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUUfoEvent: ui32ExtJobRef=%d, "
+	        "ui32IntJobRef=%d", psHWPerfPktData->ui32ExtJobRef,
+	        psHWPerfPktData->ui32IntJobRef));
+
+	PVRGpuTraceUfo(ui64Timestamp, psHWPerfPktData->eEvType,
+	        psHWPerfPktData->ui32ExtJobRef, psHWPerfPktData->ui32DMContext,
+	        psHWPerfPktData->ui32IntJobRef, ui32UFOCount, puData);
+}
+
+static void RGXHWPerfFTraceGPUFirmwareEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+		RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt, const IMG_CHAR* pszWorkName,
+		PVR_GPUTRACE_SWITCH_TYPE eSwType)
+
+{
+	uint64_t ui64Timestamp;
+	RGX_HWPERF_FW_DATA *psHWPerfPktData = (RGX_HWPERF_FW_DATA *)
+		RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt);
+
+	ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex,
+											psHWPerfPkt->ui64Timestamp);
+
+	PVRGpuTraceFirmware(ui64Timestamp, pszWorkName, eSwType);
+}
+
+static IMG_BOOL ValidAndEmitFTraceEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+		RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt)
+{
+	RGX_HWPERF_EVENT_TYPE eType;
+	RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData;
+	IMG_UINT32 ui32HwEventTypeIndex;
+	static const struct {
+		IMG_CHAR* pszName;
+		PVR_GPUTRACE_SWITCH_TYPE eSwType;
+	} aszHwEventTypeMap[] = {
+			{ /* RGX_HWPERF_FW_BGSTART */      "BG",     PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_FW_BGEND */        "BG",     PVR_GPUTRACE_SWITCH_TYPE_END },
+			{ /* RGX_HWPERF_FW_IRQSTART */     "IRQ",     PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_FW_IRQEND */       "IRQ",     PVR_GPUTRACE_SWITCH_TYPE_END },
+			{ /* RGX_HWPERF_FW_DBGSTART */     "DBG",     PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_FW_DBGEND */       "DBG",     PVR_GPUTRACE_SWITCH_TYPE_END },
+			{ /* RGX_HWPERF_HW_PMOOM_TAPAUSE */	"PMOOM_TAPAUSE",  PVR_GPUTRACE_SWITCH_TYPE_END },
+			{ /* RGX_HWPERF_HW_TAKICK */       "TA",     PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_HW_TAFINISHED */   "TA",     PVR_GPUTRACE_SWITCH_TYPE_END },
+			{ /* RGX_HWPERF_HW_3DTQKICK */     "TQ3D",   PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_HW_3DKICK */       "3D",     PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_HW_3DFINISHED */   "3D",     PVR_GPUTRACE_SWITCH_TYPE_END },
+			{ /* RGX_HWPERF_HW_CDMKICK */      "CDM",    PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_HW_CDMFINISHED */  "CDM",    PVR_GPUTRACE_SWITCH_TYPE_END },
+			{ /* RGX_HWPERF_HW_TLAKICK */      "TQ2D",   PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_HW_TLAFINISHED */  "TQ2D",   PVR_GPUTRACE_SWITCH_TYPE_END },
+			{ /* RGX_HWPERF_HW_3DSPMKICK */    "3DSPM",  PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_HW_PERIODIC */     NULL, 0 }, /* PERIODIC not supported */
+			{ /* RGX_HWPERF_HW_RTUKICK */      "RTU",    PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_HW_RTUFINISHED */  "RTU",    PVR_GPUTRACE_SWITCH_TYPE_END },
+			{ /* RGX_HWPERF_HW_SHGKICK */      "SHG",    PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_HW_SHGFINISHED */  "SHG",    PVR_GPUTRACE_SWITCH_TYPE_END },
+			{ /* RGX_HWPERF_HW_3DTQFINISHED */ "TQ3D",   PVR_GPUTRACE_SWITCH_TYPE_END },
+			{ /* RGX_HWPERF_HW_3DSPMFINISHED */ "3DSPM", PVR_GPUTRACE_SWITCH_TYPE_END },
+			{ /* RGX_HWPERF_HW_PMOOM_TARESUME */	"PMOOM_TARESUME",  PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_HW_TDMKICK */      "TDM",   PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_HW_TDMFINISHED */  "TDM",   PVR_GPUTRACE_SWITCH_TYPE_END },
+	};
+	static_assert(RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE == RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE + 1,
+				  "FW and HW events are not contiguous in RGX_HWPERF_EVENT_TYPE");
+
+	PVR_ASSERT(psHWPerfPkt);
+	eType = RGX_HWPERF_GET_TYPE(psHWPerfPkt);
+
+	if (psFtraceData->ui32FTraceLastOrdinal != psHWPerfPkt->ui32Ordinal - 1)
+	{
+		RGX_HWPERF_STREAM_ID eStreamId = RGX_HWPERF_GET_STREAM_ID(psHWPerfPkt);
+		PVRGpuTraceEventsLost(eStreamId,
+		                      psFtraceData->ui32FTraceLastOrdinal,
+		                      psHWPerfPkt->ui32Ordinal);
+		PVR_DPF((PVR_DBG_ERROR, "FTrace events lost (stream_id = %u, ordinal: last = %u, current = %u)",
+		         eStreamId, psFtraceData->ui32FTraceLastOrdinal, psHWPerfPkt->ui32Ordinal));
+	}
+
+	psFtraceData->ui32FTraceLastOrdinal = psHWPerfPkt->ui32Ordinal;
+
+	/* Process UFO packets */
+	if (eType == RGX_HWPERF_UFO)
+	{
+		RGXHWPerfFTraceGPUUfoEvent(psDevInfo, psHWPerfPkt);
+		return IMG_TRUE;
+	}
+
+	if (eType <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE)
+	{
+		/* this ID belongs to range 0, so index directly in range 0 */
+		ui32HwEventTypeIndex = eType - RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE;
+	}
+	else
+	{
+		/* this ID belongs to range 1, so first index in range 1 and skip number of slots used up for range 0 */
+		ui32HwEventTypeIndex = (eType - RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE) +
+		                       (RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE - RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE + 1);
+	}
+
+	if (ui32HwEventTypeIndex >= IMG_ARR_NUM_ELEMS(aszHwEventTypeMap))
+		goto err_unsupported;
+
+	if (aszHwEventTypeMap[ui32HwEventTypeIndex].pszName == NULL)
+	{
+		/* Not supported map entry, ignore event */
+		goto err_unsupported;
+	}
+
+	if (HWPERF_PACKET_IS_HW_TYPE(eType))
+	{
+		RGXHWPerfFTraceGPUSwitchEvent(psDevInfo, psHWPerfPkt,
+									  aszHwEventTypeMap[ui32HwEventTypeIndex].pszName,
+									  aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType);
+	}
+	else if (HWPERF_PACKET_IS_FW_TYPE(eType))
+	{
+		RGXHWPerfFTraceGPUFirmwareEvent(psDevInfo, psHWPerfPkt,
+										aszHwEventTypeMap[ui32HwEventTypeIndex].pszName,
+										aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType);
+	}
+	else
+	{
+		goto err_unsupported;
+	}
+
+	return IMG_TRUE;
+
+err_unsupported:
+	PVR_DPF((PVR_DBG_VERBOSE, "%s: Unsupported event type %d", __func__, eType));
+	return IMG_FALSE;
+}
+
+
+static void RGXHWPerfFTraceGPUProcessPackets(PVRSRV_RGXDEV_INFO *psDevInfo,
+		IMG_PBYTE pBuffer, IMG_UINT32 ui32ReadLen)
+{
+	IMG_UINT32			ui32TlPackets = 0;
+	IMG_UINT32			ui32HWPerfPackets = 0;
+	IMG_UINT32			ui32HWPerfPacketsSent = 0;
+	IMG_PBYTE			pBufferEnd;
+	PVRSRVTL_PPACKETHDR psHDRptr;
+	PVRSRVTL_PACKETTYPE ui16TlType;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psDevInfo);
+	PVR_ASSERT(pBuffer);
+	PVR_ASSERT(ui32ReadLen);
+
+	/* Process the TL Packets
+	 */
+	pBufferEnd = pBuffer+ui32ReadLen;
+	psHDRptr = GET_PACKET_HDR(pBuffer);
+	while ( psHDRptr < (PVRSRVTL_PPACKETHDR)pBufferEnd )
+	{
+		ui16TlType = GET_PACKET_TYPE(psHDRptr);
+		if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA)
+		{
+			IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr);
+			if (0 == ui16DataLen)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfFTraceGPUProcessPackets: ZERO Data in TL data packet: %p", psHDRptr));
+			}
+			else
+			{
+				RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt;
+				RGX_HWPERF_V2_PACKET_HDR* psHWPerfEnd;
+
+				/* Check for lost hwperf data packets */
+				psHWPerfEnd = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr)+ui16DataLen);
+				psHWPerfPkt = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr));
+				do
+				{
+					if (ValidAndEmitFTraceEvent(psDevInfo, psHWPerfPkt))
+					{
+						ui32HWPerfPacketsSent++;
+					}
+					ui32HWPerfPackets++;
+					psHWPerfPkt = RGX_HWPERF_GET_NEXT_PACKET(psHWPerfPkt);
+				}
+				while (psHWPerfPkt < psHWPerfEnd);
+			}
+		}
+		else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfFTraceGPUProcessPackets: Indication that the transport buffer was full"));
+		}
+		else
+		{
+			/* else Ignore padding packet type and others */
+			PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfFTraceGPUProcessPackets: Ignoring TL packet, type %d", ui16TlType ));
+		}
+
+		psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr);
+		ui32TlPackets++;
+	}
+
+	PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUProcessPackets: TL "
+	 		"Packets processed %03d, HWPerf packets %03d, sent %03d",
+	 		ui32TlPackets, ui32HWPerfPackets, ui32HWPerfPacketsSent));
+
+	PVR_DPF_RETURN;
+}
+
+
+static
+void RGXHWPerfFTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+	PVRSRV_RGXDEV_INFO* psDeviceInfo = hCmdCompHandle;
+	RGX_HWPERF_FTRACE_DATA* psFtraceData;
+	PVRSRV_ERROR		eError;
+	IMG_PBYTE			pBuffer;
+	IMG_UINT32			ui32ReadLen;
+
+	PVR_DPF_ENTERED;
+
+	/* Exit if no HWPerf enabled device exits */
+	PVR_ASSERT(psDeviceInfo != NULL);
+
+	psFtraceData = psDeviceInfo->pvGpuFtraceData;
+
+	/* Command-complete notifiers can run concurrently. If this is
+	 * happening, just bail out and let the previous call finish.
+	 * This is ok because we can process the queued packets on the next call.
+	 */
+	if (!OSTryLockAcquire(psFtraceData->hFTraceResourceLock))
+	{
+		PVR_DPF_RETURN;
+	}
+
+	/* If this notifier is called, it means the TL resources will be valid at-least
+	 * until the end of this call, since the DeInit function will wait on the hFTraceResourceLock
+	 * to clean-up the TL resources and un-register the notifier, so just assert here.
+	 */
+	PVR_ASSERT(psFtraceData->hGPUTraceTLStream);
+
+	/* If we have a valid stream attempt to acquire some data */
+	eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream, &pBuffer, &ui32ReadLen);
+	if (eError == PVRSRV_OK)
+	{
+		/* Process the HWPerf packets and release the data */
+		if (ui32ReadLen > 0)
+		{
+			PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUThread: DATA AVAILABLE offset=%p, length=%d", pBuffer, ui32ReadLen));
+
+			/* Process the transport layer data for HWPerf packets... */
+			RGXHWPerfFTraceGPUProcessPackets(psDeviceInfo, pBuffer, ui32ReadLen);
+
+			eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_LOG_ERROR(eError, "TLClientReleaseData");
+
+				/* Serious error, disable FTrace GPU events */
+
+				/* Release TraceLock so we always have the locking
+				 * order BridgeLock->TraceLock to prevent AB-BA deadlocks*/
+				OSLockRelease(psFtraceData->hFTraceResourceLock);
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+				OSAcquireBridgeLock();
+#endif
+				OSLockAcquire(psFtraceData->hFTraceResourceLock);
+				RGXHWPerfFTraceGPUDisable(psDeviceInfo, IMG_FALSE);
+				OSLockRelease(psFtraceData->hFTraceResourceLock);
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+				OSReleaseBridgeLock();
+#endif
+				goto out;
+
+			}
+		} /* else no data, ignore */
+	}
+	else if (eError != PVRSRV_ERROR_TIMEOUT)
+	{
+		PVR_LOG_ERROR(eError, "TLClientAcquireData");
+	}
+
+	OSLockRelease(psFtraceData->hFTraceResourceLock);
+out:
+	PVR_DPF_RETURN;
+}
+
+inline PVRSRV_ERROR RGXHWPerfFTraceGPUInitSupport()
+{
+	PVRSRV_ERROR eError;
+
+	if (ghLockFTraceEventLock != NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "FTrace Support is already initialized"));
+		return PVRSRV_OK;
+	}
+
+	/* common module params initialization */
+	eError = OSLockCreate(&ghLockFTraceEventLock, LOCK_TYPE_PASSIVE);
+	PVR_LOGR_IF_ERROR(eError, "OSLockCreate");
+
+	return PVRSRV_OK;
+}
+
+inline void RGXHWPerfFTraceGPUDeInitSupport()
+{
+	if (ghLockFTraceEventLock)
+	{
+		OSLockDestroy(ghLockFTraceEventLock);
+		ghLockFTraceEventLock = NULL;
+	}
+}
+
+PVRSRV_ERROR RGXHWPerfFTraceGPUInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError;
+	RGX_HWPERF_FTRACE_DATA *psData;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	psData = OSAllocZMem(sizeof(RGX_HWPERF_FTRACE_DATA));
+	psDevInfo->pvGpuFtraceData = psData;
+	if (psData == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* We initialise it only once because we want to track if any
+	 * packets were dropped. */
+	psData->ui32FTraceLastOrdinal = IMG_UINT32_MAX - 1;
+
+	eError = OSLockCreate(&psData->hFTraceResourceLock, LOCK_TYPE_DISPATCH);
+	PVR_LOGR_IF_ERROR(eError, "OSLockCreate");
+
+	return PVRSRV_OK;
+}
+
+void RGXHWPerfFTraceGPUDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGX_HWPERF_FTRACE_DATA *psData = psDevInfo->pvGpuFtraceData;
+
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+	if (psData)
+	{
+		/* first disable the tracing, to free up TL resources */
+		if (psData->hFTraceResourceLock)
+		{
+			OSLockAcquire(psData->hFTraceResourceLock);
+			RGXHWPerfFTraceGPUDisable(psDeviceNode->pvDevice, IMG_TRUE);
+			OSLockRelease(psData->hFTraceResourceLock);
+
+			/* now free all the FTrace resources */
+			OSLockDestroy(psData->hFTraceResourceLock);
+		}
+		OSFreeMem(psData);
+		psDevInfo->pvGpuFtraceData = NULL;
+	}
+}
+
+void PVRGpuTraceEnableUfoCallback(void)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList;
+	PVRSRV_RGXDEV_INFO *psRgxDevInfo;
+
+	/* Lock down events state, for consistent value of guiUfoEventRef */
+	OSLockAcquire(ghLockFTraceEventLock);
+	if (guiUfoEventRef++ == 0)
+	{
+		/* make sure UFO events are enabled on all rogue devices */
+		while (psDeviceNode)
+		{
+			IMG_UINT64 ui64Filter;
+
+			psRgxDevInfo = psDeviceNode->pvDevice;
+			ui64Filter = RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO) |
+							psRgxDevInfo->ui64HWPerfFilter;
+			/* Small chance exists that ui64HWPerfFilter can be changed here and
+			 * the newest filter value will be changed to the old one + UFO event.
+			 * This is not a critical problem. */
+			eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW,
+											IMG_FALSE, ui64Filter);
+			if (eError == PVRSRV_ERROR_NOT_INITIALISED)
+			{
+				/* If we land here that means that the FW is not initialised yet.
+				 * We stored the filter and it will be passed to the firmware
+				 * during its initialisation phase. So ignore. */
+			}
+			else if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "Could not enable UFO HWPerf events on device %d", psDeviceNode->sDevId.i32UMIdentifier));
+			}
+
+			psDeviceNode = psDeviceNode->psNext;
+		}
+	}
+	OSLockRelease(ghLockFTraceEventLock);
+}
+
+void PVRGpuTraceDisableUfoCallback(void)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+
+	/* We have to check if lock is valid because on driver unload
+	 * RGXHWPerfFTraceGPUDeInit is called before kernel disables the ftrace
+	 * events. This means that the lock will be destroyed before this callback
+	 * is called.
+	 * We can safely return if that situation happens because driver will be
+	 * unloaded so we don't care about HWPerf state anymore. */
+	if (ghLockFTraceEventLock == NULL)
+		return;
+
+	psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList;
+
+	/* Lock down events state, for consistent value of guiUfoEventRef */
+	OSLockAcquire(ghLockFTraceEventLock);
+	if (--guiUfoEventRef == 0)
+	{
+		/* make sure UFO events are disabled on all rogue devices */
+		while (psDeviceNode)
+		{
+			IMG_UINT64 ui64Filter;
+			PVRSRV_RGXDEV_INFO *psRgxDevInfo = psDeviceNode->pvDevice;
+
+			ui64Filter = ~(RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO)) &
+					psRgxDevInfo->ui64HWPerfFilter;
+			/* Small chance exists that ui64HWPerfFilter can be changed here and
+			 * the newest filter value will be changed to the old one + UFO event.
+			 * This is not a critical problem. */
+			eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW,
+			                               IMG_FALSE, ui64Filter);
+			if (eError == PVRSRV_ERROR_NOT_INITIALISED)
+			{
+				/* If we land here that means that the FW is not initialised yet.
+				 * We stored the filter and it will be passed to the firmware
+				 * during its initialisation phase. So ignore. */
+			}
+			else if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "Could not disable UFO HWPerf events on device %d",
+				        psDeviceNode->sDevId.i32UMIdentifier));
+			}
+			psDeviceNode = psDeviceNode->psNext;
+		}
+	}
+	OSLockRelease(ghLockFTraceEventLock);
+}
+
+void PVRGpuTraceEnableFirmwareActivityCallback(void)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList;
+	PVRSRV_RGXDEV_INFO *psRgxDevInfo;
+	uint64_t ui64Filter, ui64FWEventsFilter = 0;
+	int i;
+
+	for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE;
+		 i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE; i++)
+	{
+		ui64FWEventsFilter |= RGX_HWPERF_EVENT_MASK_VALUE(i);
+	}
+
+	OSLockAcquire(ghLockFTraceEventLock);
+	/* Enable all FW events on all the devices */
+	while (psDeviceNode)
+	{
+		PVRSRV_ERROR eError;
+		psRgxDevInfo = psDeviceNode->pvDevice;
+		ui64Filter = psRgxDevInfo->ui64HWPerfFilter | ui64FWEventsFilter;
+
+		eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW,
+		                               IMG_FALSE, ui64Filter);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Could not enable HWPerf event for firmware"
+			        " task timings (%s).", PVRSRVGetErrorStringKM(eError)));
+		}
+		psDeviceNode = psDeviceNode->psNext;
+	}
+	OSLockRelease(ghLockFTraceEventLock);
+}
+
+void PVRGpuTraceDisableFirmwareActivityCallback(void)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+	IMG_UINT64 ui64FWEventsFilter = ~0;
+	int i;
+
+	/* We have to check if lock is valid because on driver unload
+	 * RGXHWPerfFTraceGPUDeInit is called before kernel disables the ftrace
+	 * events. This means that the lock will be destroyed before this callback
+	 * is called.
+	 * We can safely return if that situation happens because driver will be
+	 * unloaded so we don't care about HWPerf state anymore. */
+	if (ghLockFTraceEventLock == NULL)
+		return;
+
+	psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList;
+
+	for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE;
+		 i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE; i++)
+	{
+		ui64FWEventsFilter &= ~RGX_HWPERF_EVENT_MASK_VALUE(i);
+	}
+
+	OSLockAcquire(ghLockFTraceEventLock);
+
+	/* Disable all FW events on all the devices */
+	while (psDeviceNode)
+	{
+		PVRSRV_RGXDEV_INFO *psRgxDevInfo = psDeviceNode->pvDevice;
+		IMG_UINT64 ui64Filter = psRgxDevInfo->ui64HWPerfFilter & ui64FWEventsFilter;
+
+		if (PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW,
+		                          IMG_FALSE, ui64Filter) != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Could not disable HWPerf event for firmware task timings."));
+		}
+		psDeviceNode = psDeviceNode->psNext;
+	}
+
+	OSLockRelease(ghLockFTraceEventLock);
+}
+
+#endif /* SUPPORT_GPUTRACE_EVENTS */
+
+/******************************************************************************
+ * Currently only implemented on Linux. Feature can be enabled to provide
+ * an interface to 3rd-party kernel modules that wish to access the
+ * HWPerf data. The API is documented in the rgxapi_km.h header and
+ * the rgx_hwperf* headers.
+ *****************************************************************************/
+
+/* Internal HWPerf kernel connection/device data object to track the state
+ * of a client session.
+ */
+typedef struct
+{
+	PVRSRV_DEVICE_NODE* psRgxDevNode;
+	PVRSRV_RGXDEV_INFO* psRgxDevInfo;
+
+	/* TL Open/close state */
+	IMG_HANDLE          hSD[RGX_HWPERF_MAX_STREAM_ID];
+
+	/* TL Acquire/release state */
+	IMG_PBYTE			pHwpBuf[RGX_HWPERF_MAX_STREAM_ID];			/*!< buffer returned to user in acquire call */
+	IMG_PBYTE			pHwpBufEnd[RGX_HWPERF_MAX_STREAM_ID];		/*!< pointer to end of HwpBuf */
+	IMG_PBYTE			pTlBuf[RGX_HWPERF_MAX_STREAM_ID];			/*!< buffer obtained via TlAcquireData */
+	IMG_PBYTE			pTlBufPos[RGX_HWPERF_MAX_STREAM_ID];		/*!< initial position in TlBuf to acquire packets */
+	IMG_PBYTE			pTlBufRead[RGX_HWPERF_MAX_STREAM_ID];		/*!< pointer to the last packet read */
+	IMG_UINT32			ui32AcqDataLen[RGX_HWPERF_MAX_STREAM_ID];	/*!< length of acquired TlBuf */
+	IMG_BOOL			bRelease[RGX_HWPERF_MAX_STREAM_ID];		/*!< used to determine whether or not to release currently held TlBuf */
+
+
+} RGX_KM_HWPERF_DEVDATA;
+
+PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+	RGX_KM_HWPERF_DEVDATA *psDevData;
+	RGX_HWPERF_DEVICE *psNewHWPerfDevice;
+	RGX_HWPERF_CONNECTION* psHWPerfConnection;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* avoid uninitialised data */
+	PVR_ASSERT(*ppsHWPerfConnection == NULL);
+	PVR_ASSERT(psPVRSRVData);
+
+	/* Allocate connection object */
+	psHWPerfConnection = OSAllocZMem(sizeof(*psHWPerfConnection));
+	if (!psHWPerfConnection)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	/* early save the return pointer to aid clean-up if failure occurs */
+	*ppsHWPerfConnection = psHWPerfConnection;
+
+	psDeviceNode = psPVRSRVData->psDeviceNodeList;
+	while(psDeviceNode)
+	{
+		/* Create a list node to be attached to connection object's list */
+		psNewHWPerfDevice = OSAllocMem(sizeof(*psNewHWPerfDevice));
+		if (!psNewHWPerfDevice)
+		{
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+		/* Insert node at head of the list */
+		psNewHWPerfDevice->psNext = psHWPerfConnection->psHWPerfDevList;
+		psHWPerfConnection->psHWPerfDevList = psNewHWPerfDevice;
+
+		/* create a device data object for kernel server */
+		psDevData = OSAllocZMem(sizeof(*psDevData));
+		psNewHWPerfDevice->hDevData = (IMG_HANDLE)psDevData;
+		if (!psDevData)
+		{
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+		if (OSSNPrintf(psNewHWPerfDevice->pszName, sizeof(psNewHWPerfDevice->pszName),
+		               "hwperf_device_%d", psDeviceNode->sDevId.i32UMIdentifier) < 0)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to form HWPerf device name for device %d",
+		    	                    __FUNCTION__,
+									psDeviceNode->sDevId.i32UMIdentifier));
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+		psDevData->psRgxDevNode = psDeviceNode;
+		psDevData->psRgxDevInfo = psDeviceNode->pvDevice;
+
+		psDeviceNode = psDeviceNode->psNext;
+	}
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION *psHWPerfConnection)
+{
+	RGX_KM_HWPERF_DEVDATA *psDevData;
+	RGX_HWPERF_DEVICE *psHWPerfDev;
+	PVRSRV_RGXDEV_INFO *psRgxDevInfo;
+	PVRSRV_ERROR eError;
+	IMG_CHAR pszHWPerfFwStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5];
+	IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 5];
+	IMG_UINT32 ui32BufSize;
+
+	/* Disable producer callback by default for the Kernel API. */
+	IMG_UINT32 ui32StreamFlags = PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING |
+			                     PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* Validate input argument values supplied by the caller */
+	if (!psHWPerfConnection)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+	while (psHWPerfDev)
+	{
+		psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+		psRgxDevInfo = psDevData->psRgxDevInfo;
+
+		/* In the case where the AppHint has not been set we need to
+		 * initialise the HWPerf resources here. Allocated on-demand
+		 * to reduce RAM foot print on systems not needing HWPerf.
+		 */
+		OSLockAcquire(psRgxDevInfo->hHWPerfLock);
+		if (RGXHWPerfIsInitRequired(psRgxDevInfo))
+		{
+			eError = RGXHWPerfInitOnDemandResources(psRgxDevInfo);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Initialization of on-demand HWPerfFW"
+			        " resources failed", __FUNCTION__));
+				OSLockRelease(psRgxDevInfo->hHWPerfLock);
+				return eError;
+			}
+		}
+		OSLockRelease(psRgxDevInfo->hHWPerfLock);
+
+		OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream);
+		if (psRgxDevInfo->hHWPerfHostStream == NULL)
+		{
+			eError = RGXHWPerfHostInitOnDemandResources(psRgxDevInfo);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Initialization of on-demand HWPerfHost"
+						" resources failed", __FUNCTION__));
+				OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream);
+				return eError;
+			}
+		}
+		OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream);
+
+		/* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */
+		if (OSSNPrintf(pszHWPerfFwStreamName, sizeof(pszHWPerfFwStreamName), "%s%d",
+						PVRSRV_TL_HWPERF_RGX_FW_STREAM,
+						psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to form HWPerf stream name for device %d",
+		    	                    __FUNCTION__,
+									psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier));
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+		/* Open the RGX TL stream for reading in this session */
+		eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE,
+									pszHWPerfFwStreamName,
+									ui32StreamFlags,
+									&psDevData->hSD[RGX_HWPERF_STREAM_ID0_FW]);
+		PVR_LOGR_IF_ERROR(eError, "TLClientOpenStream(RGX_HWPerf)");
+
+		/* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */
+		if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d",
+					PVRSRV_TL_HWPERF_HOST_SERVER_STREAM,
+					psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to form HWPerf host stream name for device %d",
+		    	                    __FUNCTION__,
+									psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier));
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+		/* Open the host TL stream for reading in this session */
+		eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE,
+									pszHWPerfHostStreamName,
+									PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING,
+									&psDevData->hSD[RGX_HWPERF_STREAM_ID1_HOST]);
+		PVR_LOGR_IF_ERROR(eError, "TLClientOpenStream(Host_HWPerf)");
+
+		/* Allocate a large enough buffer for use during the entire session to
+		 * avoid the need to resize in the Acquire call as this might be in an ISR
+		 * Choose size that can contain at least one packet.
+		 */
+		/* Allocate buffer for FW Stream */
+		ui32BufSize = FW_STREAM_BUFFER_SIZE;
+		psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW] = OSAllocMem(ui32BufSize);
+		if (psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW] == NULL)
+		{
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+		psDevData->pHwpBufEnd[RGX_HWPERF_STREAM_ID0_FW] = psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW]+ui32BufSize;
+
+		/* Allocate buffer for Host Stream */
+		ui32BufSize = HOST_STREAM_BUFFER_SIZE;
+		psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST] = OSAllocMem(ui32BufSize);
+		if (psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST] == NULL)
+		{
+			OSFreeMem(psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW]);
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+		psDevData->pHwpBufEnd[RGX_HWPERF_STREAM_ID1_HOST] = psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST]+ui32BufSize;
+
+		psHWPerfDev = psHWPerfDev->psNext;
+	}
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection)
+{
+	PVRSRV_ERROR eError;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	eError = RGXHWPerfLazyConnect(ppsHWPerfConnection);
+	PVR_LOGG_IF_ERROR(eError, "RGXHWPerfLazyConnect", e0);
+
+	eError = RGXHWPerfOpen(*ppsHWPerfConnection);
+	PVR_LOGG_IF_ERROR(eError, "RGXHWPerfOpen", e1);
+
+	return PVRSRV_OK;
+
+e1: /* HWPerfOpen might have opened some, and then failed */
+	RGXHWPerfClose(*ppsHWPerfConnection);
+e0: /* LazyConnect might have allocated some resources and then failed,
+	 * make sure they are cleaned up */
+	RGXHWPerfFreeConnection(ppsHWPerfConnection);
+	return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfControl(
+		RGX_HWPERF_CONNECTION *psHWPerfConnection,
+		RGX_HWPERF_STREAM_ID eStreamId,
+		IMG_BOOL             bToggle,
+		IMG_UINT64           ui64Mask)
+{
+	PVRSRV_ERROR           eError;
+	RGX_KM_HWPERF_DEVDATA* psDevData;
+	RGX_HWPERF_DEVICE* psHWPerfDev;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* Validate input argument values supplied by the caller */
+	if (!psHWPerfConnection)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+
+	while (psHWPerfDev)
+	{
+		psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+
+		/* Call the internal server API */
+		eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDevData->psRgxDevNode, eStreamId, bToggle, ui64Mask);
+		PVR_LOGR_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM");
+
+		psHWPerfDev = psHWPerfDev->psNext;
+	}
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfConfigureAndEnableCounters(
+		RGX_HWPERF_CONNECTION *psHWPerfConnection,
+		IMG_UINT32					ui32NumBlocks,
+		RGX_HWPERF_CONFIG_CNTBLK*	asBlockConfigs)
+{
+	PVRSRV_ERROR           eError = PVRSRV_OK;
+	RGX_KM_HWPERF_DEVDATA* psDevData;
+	RGX_HWPERF_DEVICE *psHWPerfDev;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* Validate input argument values supplied by the caller */
+	if (!psHWPerfConnection || ui32NumBlocks==0 || !asBlockConfigs)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+
+	while (psHWPerfDev)
+	{
+		psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+
+		/* Call the internal server API */
+		eError = PVRSRVRGXConfigEnableHWPerfCountersKM(NULL,
+				psDevData->psRgxDevNode, ui32NumBlocks, asBlockConfigs);
+		PVR_LOGR_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM");
+
+		psHWPerfDev = psHWPerfDev->psNext;
+	}
+
+	return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfDisableCounters(
+		RGX_HWPERF_CONNECTION *psHWPerfConnection,
+		IMG_UINT32   ui32NumBlocks,
+		IMG_UINT16*   aeBlockIDs)
+{
+	PVRSRV_ERROR           eError = PVRSRV_OK;
+	RGX_KM_HWPERF_DEVDATA* psDevData;
+	RGX_HWPERF_DEVICE* psHWPerfDev;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* Validate input argument values supplied by the caller */
+	if (!psHWPerfConnection || ui32NumBlocks==0 || !aeBlockIDs)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX)
+    {
+        return PVRSRV_ERROR_INVALID_PARAMS;
+    }
+
+	psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+
+	while (psHWPerfDev)
+	{
+		psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+
+		/* Call the internal server API */
+		eError = PVRSRVRGXCtrlHWPerfCountersKM(NULL,
+					psDevData->psRgxDevNode, IMG_FALSE, ui32NumBlocks, aeBlockIDs);
+		PVR_LOGR_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfCountersKM");
+
+		psHWPerfDev = psHWPerfDev->psNext;
+	}
+
+	return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfAcquireEvents(
+		IMG_HANDLE  hDevData,
+		RGX_HWPERF_STREAM_ID eStreamId,
+		IMG_PBYTE*  ppBuf,
+		IMG_UINT32* pui32BufLen)
+{
+	PVRSRV_ERROR			eError;
+	RGX_KM_HWPERF_DEVDATA*	psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData;
+	IMG_PBYTE				pDataDest;
+	IMG_UINT32			ui32TlPackets = 0;
+	IMG_PBYTE			pBufferEnd;
+	PVRSRVTL_PPACKETHDR psHDRptr;
+	PVRSRVTL_PACKETTYPE ui16TlType;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* Reset the output arguments in case we discover an error */
+	*ppBuf = NULL;
+	*pui32BufLen = 0;
+
+	/* Valid input argument values supplied by the caller */
+	if (!psDevData || eStreamId >= RGX_HWPERF_MAX_STREAM_ID)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (psDevData->pTlBuf[eStreamId] == NULL)
+	{
+		/* Acquire some data to read from the HWPerf TL stream */
+		eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+								 	 psDevData->hSD[eStreamId],
+									 &psDevData->pTlBuf[eStreamId],
+									 &psDevData->ui32AcqDataLen[eStreamId]);
+		PVR_LOGR_IF_ERROR(eError, "TLClientAcquireData");
+
+		psDevData->pTlBufPos[eStreamId] = psDevData->pTlBuf[eStreamId];
+	}
+
+	/* TL indicates no data exists so return OK and zero. */
+	if ((psDevData->pTlBufPos[eStreamId] == NULL) || (psDevData->ui32AcqDataLen[eStreamId] == 0))
+	{
+		return PVRSRV_OK;
+	}
+
+	/* Process each TL packet in the data buffer we have acquired */
+	pBufferEnd = psDevData->pTlBuf[eStreamId]+psDevData->ui32AcqDataLen[eStreamId];
+	pDataDest = psDevData->pHwpBuf[eStreamId];
+	psHDRptr = GET_PACKET_HDR(psDevData->pTlBufPos[eStreamId]);
+	psDevData->pTlBufRead[eStreamId] = psDevData->pTlBufPos[eStreamId];
+	while ( psHDRptr < (PVRSRVTL_PPACKETHDR)pBufferEnd )
+	{
+		ui16TlType = GET_PACKET_TYPE(psHDRptr);
+		if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA)
+		{
+			IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr);
+			if (0 == ui16DataLen)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfAcquireEvents: ZERO Data in TL data packet: %p", psHDRptr));
+			}
+			else
+			{
+				/* Check next packet does not fill buffer */
+				if (pDataDest + ui16DataLen > psDevData->pHwpBufEnd[eStreamId])
+				{
+					break;
+				}
+
+				/* For valid data copy it into the client buffer and move
+				 * the write position on */
+				OSDeviceMemCopy(pDataDest, GET_PACKET_DATA_PTR(psHDRptr), ui16DataLen);
+				pDataDest += ui16DataLen;
+			}
+		}
+		else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireEvents: Indication that the transport buffer was full"));
+		}
+		else
+		{
+			/* else Ignore padding packet type and others */
+			PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireEvents: Ignoring TL packet, type %d", ui16TlType ));
+		}
+
+		/* Update loop variable to the next packet and increment counts */
+		psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr);
+		/* Updated to keep track of the next packet to be read. */
+		psDevData->pTlBufRead[eStreamId] = (IMG_PBYTE) psHDRptr;
+		ui32TlPackets++;
+	}
+
+	PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfAcquireEvents: TL Packets processed %03d", ui32TlPackets));
+
+	psDevData->bRelease[eStreamId] = IMG_FALSE;
+	if (psHDRptr >= (PVRSRVTL_PPACKETHDR) pBufferEnd)
+	{
+		psDevData->bRelease[eStreamId] = IMG_TRUE;
+	}
+
+	/* Update output arguments with client buffer details and true length */
+	*ppBuf = psDevData->pHwpBuf[eStreamId];
+	*pui32BufLen = pDataDest - psDevData->pHwpBuf[eStreamId];
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfReleaseEvents(
+		IMG_HANDLE hDevData,
+		RGX_HWPERF_STREAM_ID eStreamId)
+{
+	PVRSRV_ERROR			eError = PVRSRV_OK;
+	RGX_KM_HWPERF_DEVDATA*	psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* Valid input argument values supplied by the caller */
+	if (!psDevData || eStreamId >= RGX_HWPERF_MAX_STREAM_ID)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (psDevData->bRelease[eStreamId])
+	{
+		/* Inform the TL that we are done with reading the data. */
+		eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[eStreamId]);
+		psDevData->ui32AcqDataLen[eStreamId] = 0;
+		psDevData->pTlBuf[eStreamId] = NULL;
+	}
+	else
+	{
+		psDevData->pTlBufPos[eStreamId] = psDevData->pTlBufRead[eStreamId];
+	}
+	return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfGetFilter(
+		IMG_HANDLE  hDevData,
+		RGX_HWPERF_STREAM_ID eStreamId,
+		IMG_UINT64 *ui64Filter)
+{
+	PVRSRV_RGXDEV_INFO* psRgxDevInfo;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	/* Valid input argument values supplied by the caller */
+	psRgxDevInfo = hDevData ? ((RGX_KM_HWPERF_DEVDATA*) hDevData)->psRgxDevInfo : NULL;
+	if (!psRgxDevInfo)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pointer to the RGX device",
+		        __func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* No need to take hHWPerfLock here since we are only reading data
+	 * from always existing integers to return to debugfs which is an
+	 * atomic operation.
+	 */
+	switch (eStreamId) {
+		case RGX_HWPERF_STREAM_ID0_FW:
+			*ui64Filter = psRgxDevInfo->ui64HWPerfFilter;
+			break;
+		case RGX_HWPERF_STREAM_ID1_HOST:
+			*ui64Filter = psRgxDevInfo->ui32HWPerfHostFilter;
+			break;
+		default:
+			PVR_DPF((PVR_DBG_ERROR, "%s: Invalid stream ID",
+			        __func__));
+			return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** ppsHWPerfConnection)
+{
+	RGX_HWPERF_DEVICE *psHWPerfDev, *psHWPerfNextDev;
+	RGX_HWPERF_CONNECTION *psHWPerfConnection = *ppsHWPerfConnection;
+
+	/* if connection object itself is NULL, nothing to free */
+	if (psHWPerfConnection == NULL)
+	{
+		return PVRSRV_OK;
+	}
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	psHWPerfNextDev = psHWPerfConnection->psHWPerfDevList;
+	while (psHWPerfNextDev)
+	{
+		psHWPerfDev = psHWPerfNextDev;
+		psHWPerfNextDev = psHWPerfNextDev->psNext;
+
+		/* Free the session memory */
+		if (psHWPerfDev->hDevData)
+			OSFreeMem(psHWPerfDev->hDevData);
+		OSFreeMem(psHWPerfDev);
+	}
+	OSFreeMem(psHWPerfConnection);
+	*ppsHWPerfConnection = NULL;
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection)
+{
+	RGX_HWPERF_DEVICE *psHWPerfDev;
+	RGX_KM_HWPERF_DEVDATA* psDevData;
+	IMG_UINT uiStreamId;
+	PVRSRV_ERROR eError;
+
+	/* Check session connection is not zero */
+	if (!psHWPerfConnection)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+	while (psHWPerfDev)
+	{
+		psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+		for (uiStreamId = 0; uiStreamId < RGX_HWPERF_MAX_STREAM_ID; uiStreamId++)
+		{
+			/* If the TL buffer exists they have not called ReleaseData
+			 * before disconnecting so clean it up */
+			if (psDevData->pTlBuf[uiStreamId])
+			{
+				/* TLClientReleaseData call and null out the buffer fields
+				 * and length */
+				eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[uiStreamId]);
+				psDevData->ui32AcqDataLen[uiStreamId] = 0;
+				psDevData->pTlBuf[uiStreamId] = NULL;
+				PVR_LOG_IF_ERROR(eError, "TLClientReleaseData");
+				/* Packets may be lost if release was not required */
+				if (!psDevData->bRelease[uiStreamId])
+				{
+					PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfClose: Events in buffer waiting to be read, remaining events may be lost."));
+				}
+			}
+
+			/* Close the TL stream, ignore the error if it occurs as we
+			 * are disconnecting */
+			if (psDevData->hSD[uiStreamId])
+			{
+				eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE,
+											 psDevData->hSD[uiStreamId]);
+				PVR_LOG_IF_ERROR(eError, "TLClientCloseStream");
+				psDevData->hSD[uiStreamId] = NULL;
+			}
+
+			/* Free the client buffer used in session */
+			if (psDevData->pHwpBuf[uiStreamId])
+			{
+				OSFreeMem(psDevData->pHwpBuf[uiStreamId]);
+				psDevData->pHwpBuf[uiStreamId] = NULL;
+			}
+		}
+		psHWPerfDev = psHWPerfDev->psNext;
+	}
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+	eError = RGXHWPerfClose(*ppsHWPerfConnection);
+	PVR_LOG_IF_ERROR(eError, "RGXHWPerfClose");
+
+	eError = RGXHWPerfFreeConnection(ppsHWPerfConnection);
+	PVR_LOG_IF_ERROR(eError, "RGXHWPerfFreeConnection");
+
+	return eError;
+}
+
+
+const IMG_CHAR *RGXHWPerfKickTypeToStr(RGX_HWPERF_KICK_TYPE eKickType)
+{
+	static const IMG_CHAR *aszKickType[RGX_HWPERF_KICK_TYPE_LAST+1] = {
+		"TA3D", "TQ2D", "TQ3D", "CDM", "RS", "VRDM", "TQTDM", "SYNC", "LAST"
+	};
+
+	/* cast in case of negative value */
+	if (((IMG_UINT32) eKickType) >= RGX_HWPERF_KICK_TYPE_LAST)
+	{
+		return "<UNKNOWN>";
+	}
+
+	return aszKickType[eKickType];
+}
+
+
+IMG_UINT64 RGXHWPerfConvertCRTimeStamp(
+		IMG_UINT32 ui32ClkSpeed,
+		IMG_UINT64 ui64CorrCRTimeStamp,
+		IMG_UINT64 ui64CorrOSTimeStamp,
+		IMG_UINT64 ui64CRTimeStamp)
+{
+	IMG_UINT32 ui32Remainder;
+	IMG_UINT64 ui64CRDeltaToOSDeltaKNs;
+	IMG_UINT64 ui64EventOSTimestamp, deltaRgxTimer, delta_ns;
+
+	if (!(ui64CRTimeStamp) || !(ui32ClkSpeed) || !(ui64CorrCRTimeStamp) || !(ui64CorrOSTimeStamp))
+	{
+		return 0;
+	}
+
+	ui64CRDeltaToOSDeltaKNs = RGXFWIF_GET_CRDELTA_TO_OSDELTA_K_NS(ui32ClkSpeed,
+																  ui32Remainder);
+
+	/* RGX CR timer ticks delta */
+	deltaRgxTimer = ui64CRTimeStamp - ui64CorrCRTimeStamp;
+	/* RGX time delta in nanoseconds */
+	delta_ns = RGXFWIF_GET_DELTA_OSTIME_NS(deltaRgxTimer, ui64CRDeltaToOSDeltaKNs);
+	/* Calculate OS time of HWPerf event */
+	ui64EventOSTimestamp = ui64CorrOSTimeStamp + delta_ns;
+
+	return ui64EventOSTimestamp;
+}
+
+/******************************************************************************
+ End of file (rgxhwperf.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxhwperf.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxhwperf.h
new file mode 100644
index 0000000..9c4ddef
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxhwperf.h
@@ -0,0 +1,360 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HW Performance header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX HWPerf functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXHWPERF_H_
+#define RGXHWPERF_H_
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#include "device.h"
+#include "connection_server.h"
+#include "rgxdevice.h"
+#include "rgx_hwperf.h"
+
+/* HWPerf host buffer size constraints in KBs */
+#define HWPERF_HOST_TL_STREAM_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB
+#define HWPERF_HOST_TL_STREAM_SIZE_MIN     (32U)
+#define HWPERF_HOST_TL_STREAM_SIZE_MAX     (1024U)
+
+/******************************************************************************
+ * RGX HW Performance Data Transport Routines
+ *****************************************************************************/
+
+PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE* psDevInfo);
+
+PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo);
+PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo);
+void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo);
+void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode);
+void RGXHWPerfClientInitAppHintCallbacks(void);
+
+/******************************************************************************
+ * RGX HW Performance Profiling API(s)
+ *****************************************************************************/
+
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM(
+	CONNECTION_DATA      * psConnection,
+	PVRSRV_DEVICE_NODE   * psDeviceNode,
+	 RGX_HWPERF_STREAM_ID  eStreamId,
+	IMG_BOOL               bToggle,
+	IMG_UINT64             ui64Mask);
+
+
+PVRSRV_ERROR PVRSRVRGXConfigEnableHWPerfCountersKM(
+	CONNECTION_DATA    * psConnection,
+	PVRSRV_DEVICE_NODE * psDeviceNode,
+	IMG_UINT32         ui32ArrayLen,
+	RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs);
+
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfCountersKM(
+	CONNECTION_DATA    * psConnection,
+	PVRSRV_DEVICE_NODE * psDeviceNode,
+	IMG_BOOL           bEnable,
+	IMG_UINT32         ui32ArrayLen,
+	IMG_UINT16         * psBlockIDs);
+
+PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM(
+	CONNECTION_DATA    * psConnection,
+	PVRSRV_DEVICE_NODE * psDeviceNode,
+	IMG_UINT16           ui16CustomBlockID,
+	IMG_UINT16           ui16NumCustomCounters,
+	IMG_UINT32         * pui32CustomCounterIDs);
+
+/******************************************************************************
+ * RGX HW Performance Host Stream API
+ *****************************************************************************/
+
+PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB);
+PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo);
+void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO	*psRgxDevInfo);
+
+void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                 IMG_UINT32 ui32Filter);
+
+void RGXHWPerfHostPostEnqEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                               RGX_HWPERF_KICK_TYPE eEnqType,
+                               IMG_UINT32 ui32Pid,
+                               IMG_UINT32 ui32FWDMContext,
+                               IMG_UINT32 ui32ExtJobRef,
+                               IMG_UINT32 ui32IntJobRef,
+                               IMG_UINT32 ui32CheckFenceUID,
+                               IMG_UINT32 ui32UpdateFenceUID,
+                               IMG_UINT64 ui64DeadlineInus,
+                               IMG_UINT64 ui64CycleEstimate);
+
+void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                 RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+                                 IMG_UINT32 ui32UID,
+                                 IMG_UINT32 ui32PID,
+                                 IMG_UINT32 ui32FWAddr,
+                                 const IMG_CHAR *psName,
+                                 IMG_UINT32 ui32NameSize);
+
+void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType,
+                                IMG_UINT32 ui32UID,
+                                IMG_UINT32 ui32PID,
+                                IMG_UINT32 ui32FWAddr);
+
+void RGXHWPerfHostPostModifyEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                  RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType,
+                                  IMG_UINT32 ui32NewUID,
+                                  IMG_UINT32 ui32UID1,
+                                  IMG_UINT32 ui32UID2,
+                                  const IMG_CHAR *psName,
+                                  IMG_UINT32 ui32NameSize);
+
+void RGXHWPerfHostPostUfoEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                               RGX_HWPERF_UFO_EV eUfoType,
+                               RGX_HWPERF_UFO_DATA_ELEMENT psUFOData[],
+                               IMG_UINT uiNoOfUFOs);
+
+void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo);
+
+IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_HOST_EVENT_TYPE eEvent);
+
+#define _RGX_HWPERF_HOST_FILTER(CTX, EV) \
+		(((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice)->ui32HWPerfHostFilter \
+		& RGX_HWPERF_EVENT_MASK_VALUE(EV))
+
+#define _RGX_DEVICE_INFO_FROM_CTX(CTX) \
+		((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice)
+
+#define _RGX_DEVICE_INFO_FROM_NODE(DEVNODE) \
+		((PVRSRV_RGXDEV_INFO *)DEVNODE->pvDevice)
+
+/* Deadline and cycle estimate is not supported for all ENQ events */
+#define NO_DEADLINE 0
+#define NO_CYCEST   0
+
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param C      Kick context
+ * @param P      Pid of kicking process
+ * @param X      Related FW context
+ * @param E      External job reference
+ * @param I      Job ID
+ * @param K      Kick type
+ * @param CHKUID Check fence UID
+ * @param UPDUID Update fence UID
+ * @param D      Deadline
+ * @param CE     Cycle estimate
+ */
+#define RGX_HWPERF_HOST_ENQ(C, P, X, E, I, K, CHKUID, UPDUID, D, CE) \
+		do { \
+			if (_RGX_HWPERF_HOST_FILTER(C, RGX_HWPERF_HOST_ENQ)) \
+			{ \
+				RGXHWPerfHostPostEnqEvent(_RGX_DEVICE_INFO_FROM_CTX(C), \
+				                          (K), (P), (X), (E), (I), \
+				                          (CHKUID), (UPDUID), (D), (CE)); \
+			} \
+		} while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param I Device Info pointer
+ * @param T Host UFO event type
+ * @param D UFO data array
+ * @param N number of syncs in data array
+ */
+#define RGX_HWPERF_HOST_UFO(I, T, D, N) \
+		do { \
+			if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_UFO)) \
+			{ \
+				RGXHWPerfHostPostUfoEvent((I), (T), (D), (N)); \
+			} \
+		} while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device node pointer
+ * @param T Host ALLOC event type
+ * @param FWADDR sync firmware address
+ * @param N string containing sync name
+ * @param Z string size including null terminating character
+ */
+#define RGX_HWPERF_HOST_ALLOC(D, T, FWADDR, N, Z) \
+		do { \
+			if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \
+			{ \
+				RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+				                            RGX_HWPERF_HOST_RESOURCE_TYPE_##T, 0, 0, \
+				                            (FWADDR), (N), (Z)); \
+			} \
+		} while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device Node pointer
+ * @param T Host ALLOC event type
+ * @param UID ID of input object
+ * @param PID ID of allocating process
+ * @param FWADDR sync firmware address
+ * @param N string containing sync name
+ * @param Z string size including null terminating character
+ */
+#define RGX_HWPERF_HOST_ALLOC_FENCE_SYNC(D, T, UID, PID, FWADDR, N, Z)  \
+		do { \
+			if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \
+			{ \
+				RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+				                            RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \
+				                            (UID), (PID), (FWADDR), (N), (Z)); \
+			} \
+		} while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device Node pointer
+ * @param T Host ALLOC event type
+ * @param FWADDR sync firmware address
+ */
+#define RGX_HWPERF_HOST_FREE(D, T, FWADDR) \
+		do { \
+			if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_FREE)) \
+			{ \
+				RGXHWPerfHostPostFreeEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+				                           RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \
+				                           (0), (0), (FWADDR)); \
+			} \
+		} while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device Node pointer
+ * @param T Host ALLOC event type
+ * @param UID ID of input object
+ * @param PID ID of allocating process
+ * @param FWADDR sync firmware address
+ */
+#define RGX_HWPERF_HOST_FREE_FENCE_SYNC(D, T, UID, PID, FWADDR) \
+		do { \
+			if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_FREE)) \
+			{ \
+				RGXHWPerfHostPostFreeEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+				                           RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \
+				                           (UID), (PID), (FWADDR)); \
+			} \
+		} while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device Node pointer
+ * @param T Host ALLOC event type
+ * @param NEWUID ID of output object
+ * @param UID1 ID of first input object
+ * @param UID2 ID of second input object
+ * @param N string containing new object's name
+ * @param Z string size including null terminating character
+ */
+#define RGX_HWPERF_HOST_MODIFY_FENCE_SYNC(D, T, NEWUID, UID1, UID2, N, Z) \
+		do { \
+			if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_MODIFY)) \
+			{ \
+				RGXHWPerfHostPostModifyEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+				                             RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \
+				                             (NEWUID), (UID1), (UID2), N, Z); \
+			} \
+		} while (0)
+
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param I Device info pointer
+ */
+#define RGX_HWPERF_HOST_CLK_SYNC(I) \
+		do { \
+			if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_CLK_SYNC)) \
+			{ \
+				RGXHWPerfHostPostClkSyncEvent((I)); \
+			} \
+		} while (0)
+
+/******************************************************************************
+ * RGX HW Performance To FTrace Profiling API(s)
+ *****************************************************************************/
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+
+PVRSRV_ERROR RGXHWPerfFTraceGPUInitSupport(void);
+void RGXHWPerfFTraceGPUDeInitSupport(void);
+
+PVRSRV_ERROR RGXHWPerfFTraceGPUInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+void RGXHWPerfFTraceGPUDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+void RGXHWPerfFTraceGPUEnqueueEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+		IMG_UINT32 ui32ExternalJobRef, IMG_UINT32 ui32InternalJobRef,
+		RGX_HWPERF_KICK_TYPE eKickType);
+
+PVRSRV_ERROR RGXHWPerfFTraceGPUEventsEnabledSet(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bNewValue);
+
+void RGXHWPerfFTraceGPUThread(void *pvData);
+
+#endif
+
+/******************************************************************************
+ * RGX HW utils functions
+ *****************************************************************************/
+
+const IMG_CHAR *RGXHWPerfKickTypeToStr(RGX_HWPERF_KICK_TYPE eKickType);
+
+#endif /* RGXHWPERF_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxinit.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxinit.c
new file mode 100644
index 0000000..31fd68f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxinit.c
@@ -0,0 +1,4931 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "img_defs.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "syscommon.h"
+#include "rgx_heaps.h"
+#include "rgxheapconfig.h"
+#include "rgxpower.h"
+#include "tlstream.h"
+#include "pvrsrv_tlstreams.h"
+
+#include "rgxinit.h"
+
+#include "pdump_km.h"
+#include "handle.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "rgxmem.h"
+#include "sync_internal.h"
+#include "pvrsrv_apphint.h"
+#include "oskm_apphint.h"
+#include "debugmisc_server.h"
+
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_km.h"
+
+#include "rgxmmuinit.h"
+#include "rgxmipsmmuinit.h"
+#include "physmem.h"
+#include "devicemem_utils.h"
+#include "devicemem_server.h"
+#include "physmem_osmem.h"
+
+#include "rgxdebug.h"
+#include "rgxhwperf.h"
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+#include "pvr_gputrace.h"
+#endif
+#include "htbserver.h"
+
+#include "rgx_options.h"
+#include "pvrversion.h"
+
+#include "rgx_compat_bvnc.h"
+
+#include "rgx_heaps.h"
+
+#include "rgxta3d.h"
+#include "rgxtimecorr.h"
+
+#include "rgx_bvnc_table_km.h"
+#include "rgx_bvnc_defs_km.h"
+#if defined(PDUMP)
+#include "rgxstartstop.h"
+#endif
+
+#include "rgx_fwif_alignchecks.h"
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+#endif
+
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+
+static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
+static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_CHAR **ppszVersionString);
+static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_PUINT32  pui32RGXClockSpeed);
+static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64  ui64ResetValue1, IMG_UINT64  ui64ResetValue2);
+
+#define RGX_MMU_LOG2_PAGE_SIZE_4KB   (12)
+#define RGX_MMU_LOG2_PAGE_SIZE_16KB  (14)
+#define RGX_MMU_LOG2_PAGE_SIZE_64KB  (16)
+#define RGX_MMU_LOG2_PAGE_SIZE_256KB (18)
+#define RGX_MMU_LOG2_PAGE_SIZE_1MB   (20)
+#define RGX_MMU_LOG2_PAGE_SIZE_2MB   (21)
+
+#define RGX_MMU_PAGE_SIZE_4KB   (   4 * 1024)
+#define RGX_MMU_PAGE_SIZE_16KB  (  16 * 1024)
+#define RGX_MMU_PAGE_SIZE_64KB  (  64 * 1024)
+#define RGX_MMU_PAGE_SIZE_256KB ( 256 * 1024)
+#define RGX_MMU_PAGE_SIZE_1MB   (1024 * 1024)
+#define RGX_MMU_PAGE_SIZE_2MB   (2048 * 1024)
+#define RGX_MMU_PAGE_SIZE_MIN RGX_MMU_PAGE_SIZE_4KB
+#define RGX_MMU_PAGE_SIZE_MAX RGX_MMU_PAGE_SIZE_2MB
+
+#define VAR(x) #x
+
+#define MAX_BVNC_LEN (12)
+#define RGXBVNC_BUFFER_SIZE (((PVRSRV_MAX_DEVICES)*(MAX_BVNC_LEN))+1)
+
+/* List of BVNC strings given as module param & count*/
+IMG_PCHAR gazRGXBVNCList[PVRSRV_MAX_DEVICES];
+IMG_UINT32 gui32RGXLoadTimeDevCount;
+
+static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo);
+
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+
+/* bits used by the LISR to provide a trace of its last execution */
+#define RGX_LISR_DEVICE_NOT_POWERED	(1 << 0)
+#define RGX_LISR_FWIF_POW_OFF		(1 << 1)
+#define RGX_LISR_EVENT_EN		(1 << 2)
+#define RGX_LISR_COUNTS_EQUAL		(1 << 3)
+#define RGX_LISR_PROCESSED		(1 << 4)
+
+typedef struct _LISR_EXECUTION_INFO_
+{
+	/* bit mask showing execution flow of last LISR invocation */
+	IMG_UINT32 ui32State;
+	/* snapshot from the last LISR invocation, regardless of
+	 * whether an interrupt was handled
+	 */
+	IMG_UINT32 aui32InterruptCountSnapshot[RGXFW_THREAD_NUM];
+	/* time of the last LISR invocation */
+	IMG_UINT64 ui64Clockns;
+} LISR_EXECUTION_INFO;
+
+/* information about the last execution of the LISR */
+static LISR_EXECUTION_INFO g_sLISRExecutionInfo;
+
+#endif
+
+#if !defined(NO_HARDWARE)
+/*************************************************************************/ /*!
+@Function       SampleIRQCount
+@Description    Utility function taking snapshots of RGX FW interrupt count.
+@Input          paui32Input  A pointer to RGX FW IRQ count array.
+                             Size of the array should be equal to RGX FW thread
+                             count.
+@Input          paui32Output A pointer to array containing sampled RGX FW
+                             IRQ counts
+@Return         IMG_BOOL     Returns IMG_TRUE, if RGX FW IRQ is not equal to
+                             sampled RGX FW IRQ count for any RGX FW thread.
+*/ /**************************************************************************/
+static INLINE IMG_BOOL SampleIRQCount(volatile IMG_UINT32 *paui32Input,
+									  volatile IMG_UINT32 *paui32Output)
+{
+	IMG_UINT32 ui32TID;
+	IMG_BOOL bReturnVal = IMG_FALSE;
+
+	for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+	{
+		if (paui32Output[ui32TID] != paui32Input[ui32TID])
+		{
+			/**
+			 * we are handling any unhandled interrupts here so align the host
+			 * count with the FW count
+			 */
+
+			/* Sample the current count from the FW _after_ we've cleared the interrupt. */
+			paui32Output[ui32TID] = paui32Input[ui32TID];
+			bReturnVal = IMG_TRUE;
+		}
+	}
+
+	return bReturnVal;
+}
+
+static IMG_BOOL _WaitForInterruptsTimeoutCheck(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGXFWIF_TRACEBUF *psRGXFWIfTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+	IMG_BOOL bScheduleMISR = IMG_FALSE;
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+	IMG_UINT32 ui32TID;
+#endif
+
+	RGXDEBUG_PRINT_IRQ_COUNT(psDevInfo);
+
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+	PVR_DPF((PVR_DBG_ERROR, "Last RGX_LISRHandler State: 0x%08X Clock: %llu",
+							g_sLISRExecutionInfo.ui32State,
+							g_sLISRExecutionInfo.ui64Clockns));
+
+	for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+	{
+		PVR_DPF((PVR_DBG_ERROR, \
+				"RGX FW thread %u: InterruptCountSnapshot: 0x%X", \
+				ui32TID, g_sLISRExecutionInfo.aui32InterruptCountSnapshot[ui32TID]));
+	}
+#else
+	PVR_DPF((PVR_DBG_ERROR, "No further information available. Please enable PVRSRV_DEBUG_LISR_EXECUTION"));
+#endif
+
+
+	if(psRGXFWIfTraceBuf->ePowState != RGXFWIF_POW_OFF)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_WaitForInterruptsTimeout: FW pow state is not OFF (is %u)",
+						(unsigned int) psRGXFWIfTraceBuf->ePowState));
+	}
+
+	bScheduleMISR = SampleIRQCount(psRGXFWIfTraceBuf->aui32InterruptCount,
+								   psDevInfo->aui32SampleIRQCount);
+	return bScheduleMISR;
+}
+
+void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	IMG_BOOL bScheduleMISR;
+
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		bScheduleMISR = IMG_TRUE;
+	}
+	else
+	{
+		bScheduleMISR = _WaitForInterruptsTimeoutCheck(psDevInfo);
+	}
+
+	if (bScheduleMISR)
+	{
+		OSScheduleMISR(psDevInfo->pvMISRData);
+
+		if(psDevInfo->pvAPMISRData != NULL)
+		{
+			OSScheduleMISR(psDevInfo->pvAPMISRData);
+		}
+	}
+}
+
+/*
+	RGX LISR Handler
+*/
+static IMG_BOOL RGX_LISRHandler (void *pvData)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	IMG_BOOL bInterruptProcessed;
+	RGXFWIF_TRACEBUF *psRGXFWIfTraceBuf;
+	IMG_UINT32 ui32IRQStatus, ui32IRQStatusReg, ui32IRQStatusEventMsk, ui32IRQClearReg, ui32IRQClearMask;
+
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		if (! psDevInfo->bRGXPowered)
+		{
+			return IMG_FALSE;
+		}
+
+		OSScheduleMISR(psDevInfo->pvMISRData);
+		return IMG_TRUE;
+	}
+	else
+	{
+		bInterruptProcessed = IMG_FALSE;
+		psRGXFWIfTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+	}
+
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+	{
+		ui32IRQStatusReg = RGX_CR_MIPS_WRAPPER_IRQ_STATUS;
+		ui32IRQStatusEventMsk = RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN;
+		ui32IRQClearReg = RGX_CR_MIPS_WRAPPER_IRQ_CLEAR;
+		ui32IRQClearMask = RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN;
+	}else
+	{
+		ui32IRQStatusReg = RGX_CR_META_SP_MSLVIRQSTATUS;
+		ui32IRQStatusEventMsk = RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN;
+		ui32IRQClearReg = RGX_CR_META_SP_MSLVIRQSTATUS;
+		ui32IRQClearMask = RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK;
+	}
+
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+	IMG_UINT32 ui32TID;
+
+	for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+	{
+		g_sLISRExecutionInfo.aui32InterruptCountSnapshot[ui32TID ] =
+			psRGXFWIfTraceBuf->aui32InterruptCount[ui32TID];
+	}
+	g_sLISRExecutionInfo.ui32State = 0;
+	g_sLISRExecutionInfo.ui64Clockns = OSClockns64();
+#endif
+
+	if (psDevInfo->bRGXPowered == IMG_FALSE)
+	{
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+		g_sLISRExecutionInfo.ui32State |= RGX_LISR_DEVICE_NOT_POWERED;
+#endif
+		if (psRGXFWIfTraceBuf->ePowState == RGXFWIF_POW_OFF)
+		{
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+			g_sLISRExecutionInfo.ui32State |= RGX_LISR_FWIF_POW_OFF;
+#endif
+			return bInterruptProcessed;
+		}
+	}
+
+	ui32IRQStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQStatusReg);
+	if (ui32IRQStatus & ui32IRQStatusEventMsk)
+	{
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+		g_sLISRExecutionInfo.ui32State |= RGX_LISR_EVENT_EN;
+#endif
+
+		OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQClearReg, ui32IRQClearMask);
+
+#if defined(RGX_FEATURE_OCPBUS)
+		OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OCP_IRQSTATUS_2, RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN);
+#endif
+
+		bInterruptProcessed = SampleIRQCount(psRGXFWIfTraceBuf->aui32InterruptCount,
+											 psDevInfo->aui32SampleIRQCount);
+
+		if (!bInterruptProcessed)
+		{
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+			g_sLISRExecutionInfo.ui32State |= RGX_LISR_COUNTS_EQUAL;
+#endif
+			return bInterruptProcessed;
+		}
+
+		bInterruptProcessed = IMG_TRUE;
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+		g_sLISRExecutionInfo.ui32State |= RGX_LISR_PROCESSED;
+#endif
+
+		OSScheduleMISR(psDevInfo->pvMISRData);
+
+		if (psDevInfo->pvAPMISRData != NULL)
+		{
+			OSScheduleMISR(psDevInfo->pvAPMISRData);
+		}
+	}
+
+	return bInterruptProcessed;
+}
+
+static void RGXCheckFWActivePowerState(void *psDevice)
+{
+	PVRSRV_DEVICE_NODE	*psDeviceNode = psDevice;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (psFWTraceBuf->ePowState == RGXFWIF_POW_IDLE)
+	{
+		/* The FW is IDLE and therefore could be shut down */
+		eError = RGXActivePowerRequest(psDeviceNode);
+
+		if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED))
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+					 "%s: Failed RGXActivePowerRequest call (device: %p) with %s",
+					 __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+
+			PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+		}
+	}
+
+}
+
+/* Shorter defines to keep the code a bit shorter */
+#define GPU_ACTIVE_LOW   RGXFWIF_GPU_UTIL_STATE_ACTIVE_LOW
+#define GPU_IDLE         RGXFWIF_GPU_UTIL_STATE_IDLE
+#define GPU_ACTIVE_HIGH  RGXFWIF_GPU_UTIL_STATE_ACTIVE_HIGH
+#define GPU_BLOCKED      RGXFWIF_GPU_UTIL_STATE_BLOCKED
+#define MAX_ITERATIONS   64
+
+static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                       IMG_HANDLE hGpuUtilUser,
+                                       RGXFWIF_GPU_UTIL_STATS *psReturnStats)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	volatile RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+	RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+	IMG_UINT64 aui64TmpCounters[RGXFWIF_GPU_UTIL_STATE_NUM] = {0};
+	IMG_UINT64 ui64TimeNow;
+	IMG_UINT64 ui64LastPeriod;
+	IMG_UINT64 ui64LastWord = 0, ui64LastState = 0, ui64LastTime = 0;
+	IMG_UINT32 i = 0;
+
+
+	/***** (1) Initialise return stats *****/
+
+	psReturnStats->bValid = IMG_FALSE;
+	psReturnStats->ui64GpuStatActiveLow  = 0;
+	psReturnStats->ui64GpuStatIdle       = 0;
+	psReturnStats->ui64GpuStatActiveHigh = 0;
+	psReturnStats->ui64GpuStatBlocked    = 0;
+	psReturnStats->ui64GpuStatCumulative = 0;
+
+	if (hGpuUtilUser == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	psAggregateStats = hGpuUtilUser;
+
+
+	/***** (2) Get latest data from shared area *****/
+
+	OSLockAcquire(psDevInfo->hGPUUtilLock);
+
+	/* Read the timer before reading the latest stats from the shared
+	 * area, discard it later in case of state updates after this point.
+	 */
+	ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(OSClockns64());
+	OSMemoryBarrier();
+
+	/* Keep reading the counters until the values stabilise as the FW
+	 * might be updating them at the same time.
+	 */
+	while(((ui64LastWord != psUtilFWCb->ui64LastWord) ||
+	       (aui64TmpCounters[ui64LastState] !=
+	        psUtilFWCb->aui64StatsCounters[ui64LastState])) &&
+	      (i < MAX_ITERATIONS))
+	{
+		ui64LastWord  = psUtilFWCb->ui64LastWord;
+		ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64LastWord);
+		aui64TmpCounters[GPU_ACTIVE_LOW]  = psUtilFWCb->aui64StatsCounters[GPU_ACTIVE_LOW];
+		aui64TmpCounters[GPU_IDLE]        = psUtilFWCb->aui64StatsCounters[GPU_IDLE];
+		aui64TmpCounters[GPU_ACTIVE_HIGH] = psUtilFWCb->aui64StatsCounters[GPU_ACTIVE_HIGH];
+		aui64TmpCounters[GPU_BLOCKED]     = psUtilFWCb->aui64StatsCounters[GPU_BLOCKED];
+		i++;
+	}
+
+	OSLockRelease(psDevInfo->hGPUUtilLock);
+
+	if (i == MAX_ITERATIONS)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXGetGpuUtilStats could not get reliable data within a short time."));
+		return PVRSRV_ERROR_TIMEOUT;
+	}
+
+
+	/***** (3) Compute return stats and update aggregate stats *****/
+
+	/* Update temp counters to account for the time since the last update to the shared ones */
+	ui64LastTime   = RGXFWIF_GPU_UTIL_GET_TIME(ui64LastWord);
+	ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
+	aui64TmpCounters[ui64LastState] += ui64LastPeriod;
+
+	/* Get statistics for a user since its last request */
+	psReturnStats->ui64GpuStatActiveLow = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_ACTIVE_LOW],
+	                                                                  psAggregateStats->ui64GpuStatActiveLow);
+	psReturnStats->ui64GpuStatIdle = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_IDLE],
+	                                                             psAggregateStats->ui64GpuStatIdle);
+	psReturnStats->ui64GpuStatActiveHigh = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_ACTIVE_HIGH],
+	                                                                   psAggregateStats->ui64GpuStatActiveHigh);
+	psReturnStats->ui64GpuStatBlocked = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_BLOCKED],
+	                                                                psAggregateStats->ui64GpuStatBlocked);
+	psReturnStats->ui64GpuStatCumulative = psReturnStats->ui64GpuStatActiveLow + psReturnStats->ui64GpuStatIdle +
+	                                       psReturnStats->ui64GpuStatActiveHigh + psReturnStats->ui64GpuStatBlocked;
+
+	/* Update aggregate stats for the current user */
+	psAggregateStats->ui64GpuStatActiveLow  += psReturnStats->ui64GpuStatActiveLow;
+	psAggregateStats->ui64GpuStatIdle       += psReturnStats->ui64GpuStatIdle;
+	psAggregateStats->ui64GpuStatActiveHigh += psReturnStats->ui64GpuStatActiveHigh;
+	psAggregateStats->ui64GpuStatBlocked    += psReturnStats->ui64GpuStatBlocked;
+
+
+	/***** (4) Convert return stats to microseconds *****/
+
+	psReturnStats->ui64GpuStatActiveLow  = OSDivide64(psReturnStats->ui64GpuStatActiveLow, 1000, &i);
+	psReturnStats->ui64GpuStatIdle       = OSDivide64(psReturnStats->ui64GpuStatIdle, 1000, &i);
+	psReturnStats->ui64GpuStatActiveHigh = OSDivide64(psReturnStats->ui64GpuStatActiveHigh, 1000, &i);
+	psReturnStats->ui64GpuStatBlocked    = OSDivide64(psReturnStats->ui64GpuStatBlocked, 1000, &i);
+	psReturnStats->ui64GpuStatCumulative = OSDivide64(psReturnStats->ui64GpuStatCumulative, 1000, &i);
+
+	/* Check that the return stats make sense */
+	if(psReturnStats->ui64GpuStatCumulative == 0)
+	{
+		/* We can enter here only if all the RGXFWIF_GPU_UTIL_GET_PERIOD
+		 * returned 0. This could happen if the GPU frequency value
+		 * is not well calibrated and the FW is updating the GPU state
+		 * while the Host is reading it.
+		 * When such an event happens frequently, timers or the aggregate
+		 * stats might not be accurate...
+		 */
+		PVR_DPF((PVR_DBG_WARNING, "RGXGetGpuUtilStats could not get reliable data."));
+		return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+	}
+
+	psReturnStats->bValid = IMG_TRUE;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXRegisterGpuUtilStats(IMG_HANDLE *phGpuUtilUser)
+{
+	RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+
+	psAggregateStats = OSAllocMem(sizeof(RGXFWIF_GPU_UTIL_STATS));
+	if(psAggregateStats == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psAggregateStats->ui64GpuStatActiveLow  = 0;
+	psAggregateStats->ui64GpuStatIdle       = 0;
+	psAggregateStats->ui64GpuStatActiveHigh = 0;
+	psAggregateStats->ui64GpuStatBlocked    = 0;
+
+	/* Not used */
+	psAggregateStats->bValid = IMG_FALSE;
+	psAggregateStats->ui64GpuStatCumulative = 0;
+
+	*phGpuUtilUser = psAggregateStats;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXUnregisterGpuUtilStats(IMG_HANDLE hGpuUtilUser)
+{
+	RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+
+	if(hGpuUtilUser == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psAggregateStats = hGpuUtilUser;
+	OSFreeMem(psAggregateStats);
+
+	return PVRSRV_OK;
+}
+
+/*
+	RGX MISR Handler
+*/
+static void RGX_MISRHandler (void *pvData)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
+
+	/* Give the HWPerf service a chance to transfer some data from the FW
+	 * buffer to the host driver transport layer buffer.
+	 */
+	RGXHWPerfDataStoreCB(psDeviceNode);
+
+	/* Inform other services devices that we have finished an operation */
+	PVRSRVCheckStatus(psDeviceNode);
+
+#if defined(SUPPORT_PDVFS) && defined(RGXFW_META_SUPPORT_2ND_THREAD)
+	/*
+	 * Firmware CCB only exists for primary FW thread. Only requirement for
+	 * non primary FW thread(s) to communicate with host driver is in the case
+	 * of PDVFS running on non primary FW thread.
+	 * This requirement is directly handled by the below
+	 */
+	RGXPDVFSCheckCoreClkRateChange(psDeviceNode->pvDevice);
+#endif
+
+	/* Process the Firmware CCB for pending commands */
+	RGXCheckFirmwareCCB(psDeviceNode->pvDevice);
+
+	/* Calibrate the GPU frequency and recorrelate Host and FW timers (done every few seconds) */
+	RGXGPUFreqCalibrateCorrelatePeriodic(psDeviceNode);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	/* Process Workload Estimation Specific commands from the FW */
+	WorkEstCheckFirmwareCCB(psDeviceNode->pvDevice);
+#endif
+}
+#endif /* !defined(NO_HARDWARE) */
+
+
+/* This function puts into the firmware image some parameters for the initial boot */
+static PVRSRV_ERROR RGXBootldrDataInit(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                       void *pvFWImage)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*) psDeviceNode->pvDevice;
+	IMG_UINT64 *pui64BootConfig;
+	IMG_DEV_PHYADDR sPhyAddr;
+	IMG_BOOL bValid;
+
+	/* To get a pointer to the bootloader configuration data start from a pointer to the FW image... */
+	pui64BootConfig =  (IMG_UINT64 *) pvFWImage;
+
+	/* ... jump to the boot/NMI data page... */
+	pui64BootConfig += RGXMIPSFW_GET_OFFSET_IN_QWORDS(RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE * RGXMIPSFW_PAGE_SIZE);
+
+	/* ... and then jump to the bootloader data offset within the page */
+	pui64BootConfig += RGXMIPSFW_GET_OFFSET_IN_QWORDS(RGXMIPSFW_BOOTLDR_CONF_OFFSET);
+
+
+	/* Rogue Registers physical address */
+	PhysHeapCpuPAddrToDevPAddr(psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL],
+							   1, &sPhyAddr, &(psDeviceNode->psDevConfig->sRegsCpuPBase));
+	pui64BootConfig[RGXMIPSFW_ROGUE_REGS_BASE_PHYADDR_OFFSET] = sPhyAddr.uiAddr;
+
+	/* MIPS Page Table physical address. There are 16 pages for a firmware heap of 32 MB */
+	MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sPhyAddr);
+	pui64BootConfig[RGXMIPSFW_PAGE_TABLE_BASE_PHYADDR_OFFSET] = sPhyAddr.uiAddr;
+
+	/* MIPS Stack Pointer Physical Address */
+	eError = RGXGetPhyAddr(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR,
+						   &sPhyAddr,
+						   RGXMIPSFW_STACK_OFFSET,
+						   RGXMIPSFW_LOG2_PAGE_SIZE,
+						   1,
+						   &bValid);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXBootldrDataInit: RGXGetPhyAddr failed (%u)",
+				eError));
+		return eError;
+	}
+	pui64BootConfig[RGXMIPSFW_STACKPOINTER_PHYADDR_OFFSET] = sPhyAddr.uiAddr;
+
+	/* Reserved for future use */
+	pui64BootConfig[RGXMIPSFW_RESERVED_FUTURE_OFFSET] = 0;
+
+	/* FW Init Data Structure Virtual Address */
+	pui64BootConfig[RGXMIPSFW_FWINIT_VIRTADDR_OFFSET] = psDevInfo->psRGXFWIfInitMemDesc->sDeviceMemDesc.sDevVAddr.uiAddr;
+
+	return PVRSRV_OK;
+}
+
+#if defined(PDUMP)
+static PVRSRV_ERROR RGXPDumpBootldrData(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                        PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	PMR *psFWDataPMR;
+	IMG_DEV_PHYADDR sTmpAddr;
+	IMG_UINT32 ui32BootConfOffset, ui32ParamOffset;
+	PVRSRV_ERROR eError;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR);
+	ui32BootConfOffset = (RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE * RGXMIPSFW_PAGE_SIZE);
+	ui32BootConfOffset += RGXMIPSFW_BOOTLDR_CONF_OFFSET;
+
+	/* The physical addresses used by a pdump player will be different
+	 * than the ones we have put in the MIPS bootloader configuration data.
+	 * We have to tell the pdump player to replace the original values with the real ones.
+	 */
+	PDUMPCOMMENT("Pass new boot parameters to the FW");
+
+	/* Rogue Registers physical address */
+	ui32ParamOffset = ui32BootConfOffset + (RGXMIPSFW_ROGUE_REGS_BASE_PHYADDR_OFFSET * sizeof(IMG_UINT64));
+
+	eError = PDumpRegLabelToMem64(RGX_PDUMPREG_NAME,
+	                              0x0,
+	                              psFWDataPMR,
+	                              ui32ParamOffset,
+	                              PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of Rogue registers phy address failed (%u)", eError));
+		return eError;
+	}
+
+	/* Page Table physical Address */
+	ui32ParamOffset = ui32BootConfOffset + (RGXMIPSFW_PAGE_TABLE_BASE_PHYADDR_OFFSET * sizeof(IMG_UINT64));
+
+	MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sTmpAddr);
+
+	eError = PDumpPTBaseObjectToMem64(psDeviceNode->psFirmwareMMUDevAttrs->pszMMUPxPDumpMemSpaceName,
+	                                  psFWDataPMR,
+	                                  0,
+	                                  ui32ParamOffset,
+	                                  PDUMP_FLAGS_CONTINUOUS,
+	                                  MMU_LEVEL_1,
+	                                  sTmpAddr.uiAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of page tables phy address failed (%u)", eError));
+		return eError;
+	}
+
+	/* Stack physical address */
+	ui32ParamOffset = ui32BootConfOffset + (RGXMIPSFW_STACKPOINTER_PHYADDR_OFFSET * sizeof(IMG_UINT64));
+
+	eError = PDumpMemLabelToMem64(psFWDataPMR,
+	                              psFWDataPMR,
+	                              RGXMIPSFW_STACK_OFFSET,
+	                              ui32ParamOffset,
+	                              PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of stack phy address failed (%u)", eError));
+		return eError;
+	}
+
+	return eError;
+}
+#endif /* PDUMP */
+
+
+PVRSRV_ERROR PVRSRVGPUVIRTPopulateLMASubArenasKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+												 IMG_UINT32          aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+												 IMG_UINT32          aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+                                                 IMG_BOOL            bEnableTrustedDeviceAceConfig)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+	IMG_UINT32	ui32OS, ui32Region;
+
+	for (ui32OS = 0; ui32OS < GPUVIRT_VALIDATION_NUM_OS; ui32OS++)
+	{
+		for (ui32Region = 0; ui32Region < GPUVIRT_VALIDATION_NUM_REGIONS; ui32Region++)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE,"OS=%u, Region=%u, Min=%u, Max=%u", ui32OS, ui32Region, aui32OSidMin[ui32OS][ui32Region], aui32OSidMax[ui32OS][ui32Region]));
+		}
+	}
+
+	PopulateLMASubArenas(psDeviceNode, aui32OSidMin, aui32OSidMax);
+
+    #if defined(EMULATOR)
+    if ((bEnableTrustedDeviceAceConfig) && (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_AXI_ACELITE_BIT_MASK))
+    {
+        SetTrustedDeviceAceEnabled();
+    }
+    #else
+    {
+        PVR_UNREFERENCED_PARAMETER(bEnableTrustedDeviceAceConfig);
+    }
+    #endif
+    }
+#else
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(aui32OSidMin);
+	PVR_UNREFERENCED_PARAMETER(aui32OSidMax);
+	PVR_UNREFERENCED_PARAMETER(bEnableTrustedDeviceAceConfig);
+}
+#endif
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXSetPowerParams(PVRSRV_RGXDEV_INFO   *psDevInfo,
+                                      PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_ERROR eError;
+
+	/* Save information used on power transitions for later
+	 * (when RGXStart and RGXStop are executed)
+	 */
+	psDevInfo->sLayerParams.psDevInfo = psDevInfo;
+	psDevInfo->sLayerParams.psDevConfig = psDevConfig;
+#if defined(PDUMP)
+	psDevInfo->sLayerParams.ui32PdumpFlags = PDUMP_FLAGS_CONTINUOUS;
+#endif
+	if(psDevInfo->sDevFeatureCfg.ui32META)
+	{
+		IMG_DEV_PHYADDR sKernelMMUCtxPCAddr;
+
+		eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx,
+		                             &sKernelMMUCtxPCAddr);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire Kernel MMU Ctx page catalog"));
+			return eError;
+		}
+
+		psDevInfo->sLayerParams.sPCAddr = sKernelMMUCtxPCAddr;
+	}else
+	{
+		PMR *psFWCodePMR = (PMR *)(psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR);
+		PMR *psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR);
+		IMG_DEV_PHYADDR sPhyAddr;
+		IMG_BOOL bValid;
+
+		/* The physical address of the GPU registers needs to be translated
+		 * in case we are in a LMA scenario
+		 */
+		PhysHeapCpuPAddrToDevPAddr(psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL],
+		                           1,
+		                           &sPhyAddr,
+		                           &(psDevConfig->sRegsCpuPBase));
+
+		psDevInfo->sLayerParams.sGPURegAddr = sPhyAddr;
+
+		eError = RGXGetPhyAddr(psFWCodePMR,
+		                       &sPhyAddr,
+		                       RGXMIPSFW_BOOT_NMI_CODE_BASE_PAGE * RGXMIPSFW_PAGE_SIZE,
+		                       RGXMIPSFW_LOG2_PAGE_SIZE,
+		                       1,
+		                       &bValid);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW boot/NMI code address"));
+			return eError;
+		}
+
+		psDevInfo->sLayerParams.sBootRemapAddr = sPhyAddr;
+
+		eError = RGXGetPhyAddr(psFWDataPMR,
+		                       &sPhyAddr,
+		                       RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE * RGXMIPSFW_PAGE_SIZE,
+		                       RGXMIPSFW_LOG2_PAGE_SIZE,
+		                       1,
+		                       &bValid);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW boot/NMI data address"));
+			return eError;
+		}
+
+		psDevInfo->sLayerParams.sDataRemapAddr = sPhyAddr;
+
+		eError = RGXGetPhyAddr(psFWCodePMR,
+		                       &sPhyAddr,
+		                       RGXMIPSFW_EXCEPTIONSVECTORS_BASE_PAGE * RGXMIPSFW_PAGE_SIZE,
+		                       RGXMIPSFW_LOG2_PAGE_SIZE,
+		                       1,
+		                       &bValid);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW exceptions address"));
+			return eError;
+		}
+
+		psDevInfo->sLayerParams.sCodeRemapAddr = sPhyAddr;
+
+		psDevInfo->sLayerParams.sTrampolineRemapAddr.uiAddr = psDevInfo->sTrampoline.sPhysAddr.uiAddr;
+	}
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+	/* Send information used on power transitions to the trusted device as
+	 * in this setup the driver cannot start/stop the GPU and perform resets
+	 */
+	if (psDevConfig->pfnTDSetPowerParams)
+	{
+		PVRSRV_TD_POWER_PARAMS sTDPowerParams;
+
+		if(psDevInfo->sDevFeatureCfg.ui32META)
+		{
+			sTDPowerParams.sPCAddr = psDevInfo->sLayerParams.sPCAddr;
+		}else
+		{
+			sTDPowerParams.sGPURegAddr    = psDevInfo->sLayerParams.sGPURegAddr;
+			sTDPowerParams.sBootRemapAddr = psDevInfo->sLayerParams.sBootRemapAddr;
+			sTDPowerParams.sCodeRemapAddr = psDevInfo->sLayerParams.sCodeRemapAddr;
+			sTDPowerParams.sDataRemapAddr = psDevInfo->sLayerParams.sDataRemapAddr;
+		}
+		eError = psDevConfig->pfnTDSetPowerParams(psDevConfig->hSysData,
+												  &sTDPowerParams);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: TDSetPowerParams not implemented!"));
+		eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+	}
+#endif
+
+	return eError;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitReleaseFWInitResourcesKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+												   PMR *psFWCodePMR,
+												   PMR *psFWDataPMR,
+												   PMR *psFWCorePMR,
+												   PMR *psHWPerfPMR)
+{
+	/* provide a stub interface for the direct bridge */
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(psFWCodePMR);
+	PVR_UNREFERENCED_PARAMETER(psFWDataPMR);
+	PVR_UNREFERENCED_PARAMETER(psFWCorePMR);
+	PVR_UNREFERENCED_PARAMETER(psHWPerfPMR);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVRGXInitDevPart2KM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitDevPart2KM (PVRSRV_DEVICE_NODE	*psDeviceNode,
+									  RGX_INIT_COMMAND		*psDbgScript,
+									  IMG_UINT32			ui32DeviceFlags,
+									  IMG_UINT32			ui32HWPerfHostBufSizeKB,
+									  IMG_UINT32			ui32HWPerfHostFilter,
+									  RGX_ACTIVEPM_CONF		eActivePMConf)
+{
+	PVRSRV_ERROR			eError;
+	PVRSRV_RGXDEV_INFO		*psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_DEV_POWER_STATE	eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON;
+	PVRSRV_DEVICE_CONFIG	*psDevConfig = psDeviceNode->psDevConfig;
+
+#if defined(PDUMP)
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+	{
+		RGXPDumpBootldrData(psDeviceNode, psDevInfo);
+	}
+#endif
+#if defined(TIMING) || defined(DEBUG)
+	OSUserModeAccessToPerfCountersEn();
+#endif
+
+	PDUMPCOMMENT("RGX Initialisation Part 2");
+
+	psDevInfo->ui32RegSize = psDevConfig->ui32RegsSize;
+	psDevInfo->sRegsPhysBase = psDevConfig->sRegsCpuPBase;
+
+	/* Initialise Device Flags */
+	psDevInfo->ui32DeviceFlags = 0;
+	RGXSetDeviceFlags(psDevInfo, ui32DeviceFlags, IMG_TRUE);
+
+	/* Allocate DVFS Table (needs to be allocated before SUPPORT_GPUTRACE_EVENTS
+	 * is initialised because there is a dependency between them) */
+	psDevInfo->psGpuDVFSTable = OSAllocZMem(sizeof(*(psDevInfo->psGpuDVFSTable)));
+	if (psDevInfo->psGpuDVFSTable == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitDevPart2KM: failed to allocate gpu dvfs table storage"));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* Reset DVFS Table */
+	psDevInfo->psGpuDVFSTable->ui32CurrentDVFSId = 0;
+	psDevInfo->psGpuDVFSTable->aui32DVFSClock[0] = 0;
+
+	/* Initialise HWPerfHost buffer. */
+	if (RGXHWPerfHostInit(psDevInfo, ui32HWPerfHostBufSizeKB) == PVRSRV_OK)
+	{
+		/* If HWPerf enabled allocate all resources for the host side buffer. */
+		if (ui32DeviceFlags & RGXKMIF_DEVICE_STATE_HWPERF_HOST_EN)
+		{
+			if (RGXHWPerfHostInitOnDemandResources(psDevInfo) == PVRSRV_OK)
+			{
+				RGXHWPerfHostSetEventFilter(psDevInfo, ui32HWPerfHostFilter);
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_WARNING, "HWPerfHost buffer on demand"
+				        " initialisation failed."));
+			}
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING, "HWPerfHost buffer initialisation failed."));
+	}
+
+	/* Initialise lists of ZSBuffers */
+	eError = OSLockCreate(&psDevInfo->hLockZSBuffer,LOCK_TYPE_PASSIVE);
+	PVR_ASSERT(eError == PVRSRV_OK);
+	dllist_init(&psDevInfo->sZSBufferHead);
+	psDevInfo->ui32ZSBufferCurrID = 1;
+
+	/* Initialise lists of growable Freelists */
+	eError = OSLockCreate(&psDevInfo->hLockFreeList,LOCK_TYPE_PASSIVE);
+	PVR_ASSERT(eError == PVRSRV_OK);
+	dllist_init(&psDevInfo->sFreeListHead);
+	psDevInfo->ui32FreelistCurrID = 1;
+
+#if 1//defined(SUPPORT_RAY_TRACING)
+	eError = OSLockCreate(&psDevInfo->hLockRPMFreeList,LOCK_TYPE_PASSIVE);
+	PVR_ASSERT(eError == PVRSRV_OK);
+	dllist_init(&psDevInfo->sRPMFreeListHead);
+	psDevInfo->ui32RPMFreelistCurrID = 1;
+	eError = OSLockCreate(&psDevInfo->hLockRPMContext,LOCK_TYPE_PASSIVE);
+	PVR_ASSERT(eError == PVRSRV_OK);
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	eError = OSLockCreate(&psDevInfo->hDebugFaultInfoLock, LOCK_TYPE_PASSIVE);
+
+	if(eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = OSLockCreate(&psDevInfo->hMMUCtxUnregLock, LOCK_TYPE_PASSIVE);
+
+	if(eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+	{
+		eError = OSLockCreate(&psDevInfo->hNMILock, LOCK_TYPE_DISPATCH);
+
+		if(eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	/* Setup GPU utilisation stats update callback */
+#if !defined(NO_HARDWARE)
+	psDevInfo->pfnGetGpuUtilStats = RGXGetGpuUtilStats;
+#endif
+
+	eError = OSLockCreate(&psDevInfo->hGPUUtilLock, LOCK_TYPE_PASSIVE);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON;
+	psDevInfo->eActivePMConf = eActivePMConf;
+
+	/* set-up the Active Power Mgmt callback */
+#if !defined(NO_HARDWARE)
+	{
+		RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+		IMG_BOOL bSysEnableAPM = psRGXData->psRGXTimingInfo->bEnableActivePM;
+		IMG_BOOL bEnableAPM = ((eActivePMConf == RGX_ACTIVEPM_DEFAULT) && bSysEnableAPM) ||
+							   (eActivePMConf == RGX_ACTIVEPM_FORCE_ON);
+		/* Disable APM if in VZ mode */
+		bEnableAPM = bEnableAPM && PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE);
+
+		if (bEnableAPM)
+		{
+			eError = OSInstallMISR(&psDevInfo->pvAPMISRData, RGXCheckFWActivePowerState, psDeviceNode);
+			if (eError != PVRSRV_OK)
+			{
+				return eError;
+			}
+
+			/* Prevent the device being woken up before there is something to do. */
+			eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF;
+		}
+	}
+#endif
+
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableAPM,
+	                                    RGXQueryAPMState,
+	                                    RGXSetAPMState,
+	                                    psDeviceNode,
+	                                    NULL);
+
+	RGXGPUFreqCalibrationInitAppHintCallbacks(psDeviceNode);
+
+	/*
+		Register the device with the power manager.
+			Normal/Hyperv Drivers: Supports power management
+			Guest Drivers: Do not currently support power management
+	*/
+	eError = PVRSRVRegisterPowerDevice(psDeviceNode,
+									   &RGXPrePowerState, &RGXPostPowerState,
+									   psDevConfig->pfnPrePowerState, psDevConfig->pfnPostPowerState,
+									   &RGXPreClockSpeedChange, &RGXPostClockSpeedChange,
+									   &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest,
+									   &RGXDustCountChange,
+									   (IMG_HANDLE)psDeviceNode,
+									   PVRSRV_DEV_POWER_STATE_OFF,
+									   eDefaultPowerState);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitDevPart2KM: failed to register device with power manager"));
+		return eError;
+	}
+
+	eError = RGXSetPowerParams(psDevInfo, psDevConfig);
+	if (eError != PVRSRV_OK) return eError;
+
+	/*
+	 * Copy scripts
+	 */
+	OSCachedMemCopy(psDevInfo->psScripts->asDbgCommands, psDbgScript,
+			  RGX_MAX_DEBUG_COMMANDS * sizeof(*psDbgScript));
+
+#if defined(PDUMP)
+	/* Run RGXStop with the correct PDump flags to feed the last-frame deinit buffer */
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_DEINIT, "RGX deinitialisation commands");
+
+	psDevInfo->sLayerParams.ui32PdumpFlags |= PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW;
+
+	if (! PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		eError = RGXStop(&psDevInfo->sLayerParams);
+		if (eError != PVRSRV_OK) return eError;
+	}
+
+	psDevInfo->sLayerParams.ui32PdumpFlags &= ~(PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW);
+#endif
+
+#if !defined(NO_HARDWARE)
+	eError = RGXInstallProcessQueuesMISR(&psDevInfo->hProcessQueuesMISR, psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		if (psDevInfo->pvAPMISRData != NULL)
+		{
+			(void) OSUninstallMISR(psDevInfo->pvAPMISRData);
+		}
+		return eError;
+	}
+
+	/* Register the interrupt handlers */
+	eError = OSInstallMISR(&psDevInfo->pvMISRData,
+									RGX_MISRHandler, psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		if (psDevInfo->pvAPMISRData != NULL)
+		{
+			(void) OSUninstallMISR(psDevInfo->pvAPMISRData);
+		}
+		(void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR);
+		return eError;
+	}
+
+	eError = SysInstallDeviceLISR(psDevConfig->hSysData,
+								  psDevConfig->ui32IRQ,
+								  PVRSRV_MODNAME,
+								  RGX_LISRHandler,
+								  psDeviceNode,
+								  &psDevInfo->pvLISRData);
+	if (eError != PVRSRV_OK)
+	{
+		if (psDevInfo->pvAPMISRData != NULL)
+		{
+			(void) OSUninstallMISR(psDevInfo->pvAPMISRData);
+		}
+		(void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR);
+		(void) OSUninstallMISR(psDevInfo->pvMISRData);
+		return eError;
+	}
+#endif
+
+#if defined(SUPPORT_PDVFS) && !defined(RGXFW_META_SUPPORT_2ND_THREAD)
+	psDeviceNode->psDevConfig->sDVFS.sPDVFSData.hReactiveTimer =
+		OSAddTimer((PFN_TIMER_FUNC)PDVFSRequestReactiveUpdate,
+		           psDevInfo,
+		           PDVFS_REACTIVE_INTERVAL_MS);
+
+	OSEnableTimer(psDeviceNode->psDevConfig->sDVFS.sPDVFSData.hReactiveTimer);
+#endif
+
+#if defined(PDUMP)
+	if(!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK))
+	{
+		if (!PVRSRVSystemSnoopingOfCPUCache(psDevConfig) &&
+			!PVRSRVSystemSnoopingOfDeviceCache(psDevConfig))
+		{
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has NO cache snooping");
+		}
+		else
+		{
+			if (PVRSRVSystemSnoopingOfCPUCache(psDevConfig))
+			{
+				PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has CPU cache snooping");
+			}
+			if (PVRSRVSystemSnoopingOfDeviceCache(psDevConfig))
+			{
+				PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has DEVICE cache snooping");
+			}
+		}
+	}
+#endif
+
+	psDevInfo->bDevInit2Done = IMG_TRUE;
+
+	return PVRSRV_OK;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitHWPerfCountersKM(PVRSRV_DEVICE_NODE	*psDeviceNode)
+{
+
+	PVRSRV_ERROR			eError;
+	RGXFWIF_KCCB_CMD		sKccbCmd;
+
+	/* Fill in the command structure with the parameters needed
+	 */
+	sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT;
+
+	eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice,
+											RGXFWIF_DM_GP,
+											&sKccbCmd,
+											sizeof(sKccbCmd),
+											PDUMP_FLAGS_CONTINUOUS);
+
+	return PVRSRV_OK;
+
+}
+
+static PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE	*psDeviceNode)
+{
+	/* set up fw memory contexts */
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR        eError;
+
+	/* Register callbacks for creation of device memory contexts */
+	psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext;
+	psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext;
+
+	/* Create the memory context for the firmware. */
+	eError = DevmemCreateContext(psDeviceNode, DEVMEM_HEAPCFG_META,
+								 &psDevInfo->psKernelDevmemCtx);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXInitCreateFWKernelMemoryContext: Failed DevmemCreateContext (%u)", eError));
+		goto failed_to_create_ctx;
+	}
+
+	eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx,
+								  "Firmware", /* FIXME: We need to create an IDENT macro for this string.
+								                 Make sure the IDENT macro is not accessible to userland */
+								  &psDevInfo->psFirmwareHeap);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXInitCreateFWKernelMemoryContext: Failed DevmemFindHeapByName (%u)", eError));
+		goto failed_to_find_heap;
+	}
+
+	/* Perform additional vz specific initialization */
+	eError = RGXVzInitCreateFWKernelMemoryContext(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "RGXInitCreateFWKernelMemoryContext: Failed RGXVzInitCreateFWKernelMemoryContext (%u)",
+				 eError));
+		goto failed_to_find_heap;
+	}
+
+	return eError;
+
+failed_to_find_heap:
+	/*
+	 * Clear the mem context create callbacks before destroying the RGX firmware
+	 * context to avoid a spurious callback.
+	 */
+	psDeviceNode->pfnRegisterMemoryContext = NULL;
+	psDeviceNode->pfnUnregisterMemoryContext = NULL;
+	DevmemDestroyContext(psDevInfo->psKernelDevmemCtx);
+	psDevInfo->psKernelDevmemCtx = NULL;
+failed_to_create_ctx:
+	return eError;
+}
+
+static void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR        eError;
+
+	RGXVzDeInitDestroyFWKernelMemoryContext(psDeviceNode);
+
+	/*
+	 * Clear the mem context create callbacks before destroying the RGX firmware
+	 * context to avoid a spurious callback.
+	 */
+	psDeviceNode->pfnRegisterMemoryContext = NULL;
+	psDeviceNode->pfnUnregisterMemoryContext = NULL;
+
+	if (psDevInfo->psKernelDevmemCtx)
+	{
+		eError = DevmemDestroyContext(psDevInfo->psKernelDevmemCtx);
+		/* FIXME - this should return void */
+		PVR_ASSERT(eError == PVRSRV_OK);
+	}
+}
+
+#if defined(RGXFW_ALIGNCHECKS)
+static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode,
+                                      IMG_UINT32 ui32AlignChecksSize,
+                                      IMG_UINT32 aui32AlignChecks[])
+{
+	static IMG_UINT32 aui32AlignChecksKM[] = {RGXFW_ALIGN_CHECKS_INIT_KM};
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+	IMG_UINT32 i, *paui32FWAlignChecks;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* Skip the alignment check if the driver is guest
+	   since there is no firmware to check against */
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, eError);
+
+	if (psDevInfo->psRGXFWAlignChecksMemDesc == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAlignmentCheckKM: FW Alignment Check"
+		        " Mem Descriptor is NULL"));
+		return PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc,
+	                                  (void **) &paui32FWAlignChecks);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVAlignmentCheckKM: Failed to acquire"
+		        " kernel address for alignment checks (%u)", eError));
+		return eError;
+	}
+
+	paui32FWAlignChecks += IMG_ARR_NUM_ELEMS(aui32AlignChecksKM) + 1;
+	if (*paui32FWAlignChecks++ != ui32AlignChecksSize)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAlignmentCheckKM: Mismatch"
+					" in number of structures to check."));
+		eError = PVRSRV_ERROR_INVALID_ALIGNMENT;
+		goto return_;
+	}
+
+	for (i = 0; i < ui32AlignChecksSize; i++)
+	{
+		if (aui32AlignChecks[i] != paui32FWAlignChecks[i])
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PVRSRVAlignmentCheckKM: Check for"
+					" structured alignment failed."));
+			eError = PVRSRV_ERROR_INVALID_ALIGNMENT;
+			goto return_;
+		}
+	}
+
+return_:
+
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc);
+
+	return eError;
+}
+#endif
+
+static
+PVRSRV_ERROR RGXAllocateFWCodeRegion(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     IMG_DEVMEM_SIZE_T ui32FWCodeAllocSize,
+                                     IMG_UINT32 uiMemAllocFlags,
+                                     IMG_BOOL bFWCorememCode,
+                                     const IMG_PCHAR pszText,
+                                     DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	PVRSRV_ERROR eError;
+	IMG_DEVMEM_LOG2ALIGN_T uiLog2Align = OSGetPageShift();
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+	{
+		uiLog2Align = RGXMIPSFW_LOG2_PAGE_SIZE_64K;
+	}
+#endif
+
+#if !defined(SUPPORT_TRUSTED_DEVICE)
+	uiMemAllocFlags |= PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+	                   PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PVR_UNREFERENCED_PARAMETER(bFWCorememCode);
+
+	PDUMPCOMMENT("Allocate and export FW %s memory",
+	             bFWCorememCode? "coremem code" : "code");
+
+	eError = DevmemFwAllocateExportable(psDeviceNode,
+	                                    ui32FWCodeAllocSize,
+	                                    1 << uiLog2Align,
+	                                    uiMemAllocFlags,
+	                                    pszText,
+	                                    ppsMemDescPtr);
+	return eError;
+#else
+	PDUMPCOMMENT("Import secure FW %s memory",
+	             bFWCorememCode? "coremem code" : "code");
+
+	eError = DevmemImportTDFWCode(psDeviceNode,
+	                              ui32FWCodeAllocSize,
+	                              uiLog2Align,
+	                              uiMemAllocFlags,
+	                              bFWCorememCode,
+	                              ppsMemDescPtr);
+	return eError;
+#endif
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver
+
+ @Description
+
+ Validate the FW build options against KM driver build options (KM build options only)
+
+ Following check is redundant, because next check checks the same bits.
+ Redundancy occurs because if client-server are build-compatible and client-firmware are
+ build-compatible then server-firmware are build-compatible as well.
+
+ This check is left for clarity in error messages if any incompatibility occurs.
+
+ @Input psRGXFWInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(RGXFWIF_INIT *psRGXFWInit)
+{
+#if !defined(NO_HARDWARE)
+	IMG_UINT32			ui32BuildOptions, ui32BuildOptionsFWKMPart, ui32BuildOptionsMismatch;
+
+	if (psRGXFWInit == NULL)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	ui32BuildOptions = (RGX_BUILD_OPTIONS_KM);
+
+	ui32BuildOptionsFWKMPart = psRGXFWInit->sRGXCompChecks.ui32BuildOptions & RGX_BUILD_OPTIONS_MASK_KM;
+
+	if (ui32BuildOptions != ui32BuildOptionsFWKMPart)
+	{
+		ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32BuildOptionsFWKMPart;
+#if !defined(PVRSRV_STRICT_COMPAT_CHECK)
+		/*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/
+		ui32BuildOptionsMismatch &= OPTIONS_STRICT;
+#endif
+		if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and KM driver build options; "
+				"extra options present in the KM driver: (0x%x). Please check rgx_options.h",
+				ui32BuildOptions & ui32BuildOptionsMismatch ));
+			return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+		}
+
+		if ( (ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch) != 0)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware-side and KM driver build options; "
+				"extra options present in Firmware: (0x%x). Please check rgx_options.h",
+				ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch ));
+			return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+		}
+		PVR_DPF((PVR_DBG_WARNING, "RGXDevInitCompatCheck: Firmware and KM driver build options differ."));
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware and KM driver build options match. [ OK ]"));
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver
+
+ @Description
+
+ Validate FW DDK version against driver DDK version
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+																			RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+	IMG_UINT32			ui32DDKVersion;
+	PVRSRV_ERROR		eError;
+
+	ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN);
+#endif
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Compatibility check: KM driver and FW DDK version");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+												offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+												offsetof(RGXFWIF_COMPCHECKS, ui32DDKVersion),
+												ui32DDKVersion,
+												0xffffffff,
+												PDUMP_POLL_OPERATOR_EQUAL,
+												PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+		return eError;
+	}
+#endif
+
+#if !defined(NO_HARDWARE)
+	if (psRGXFWInit == NULL)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	if (psRGXFWInit->sRGXCompChecks.ui32DDKVersion != ui32DDKVersion)
+	{
+		PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible driver DDK version (%u.%u) / Firmware DDK revision (%u.%u).",
+				PVRVERSION_MAJ, PVRVERSION_MIN,
+				PVRVERSION_UNPACK_MAJ(psRGXFWInit->sRGXCompChecks.ui32DDKVersion),
+				PVRVERSION_UNPACK_MIN(psRGXFWInit->sRGXCompChecks.ui32DDKVersion)));
+		eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH;
+		PVR_DBG_BREAK;
+		return eError;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK version (%u.%u) and Firmware DDK revision (%u.%u) match. [ OK ]",
+				PVRVERSION_MAJ, PVRVERSION_MIN,
+				PVRVERSION_MAJ, PVRVERSION_MIN));
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver
+
+ @Description
+
+ Validate FW DDK build against driver DDK build
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+																			RGXFWIF_INIT *psRGXFWInit)
+{
+	PVRSRV_ERROR		eError=PVRSRV_OK;
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+	IMG_UINT32			ui32DDKBuild;
+
+	ui32DDKBuild = PVRVERSION_BUILD;
+#endif
+
+#if defined(PDUMP) && defined(PVRSRV_STRICT_COMPAT_CHECK)
+	PDUMPCOMMENT("Compatibility check: KM driver and FW DDK build");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+												offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+												offsetof(RGXFWIF_COMPCHECKS, ui32DDKBuild),
+												ui32DDKBuild,
+												0xffffffff,
+												PDUMP_POLL_OPERATOR_EQUAL,
+												PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+		return eError;
+	}
+#endif
+
+#if !defined(NO_HARDWARE)
+	if (psRGXFWInit == NULL)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	if (psRGXFWInit->sRGXCompChecks.ui32DDKBuild != ui32DDKBuild)
+	{
+		PVR_LOG(("(WARN) RGXDevInitCompatCheck: Incompatible driver DDK build version (%d) / Firmware DDK build version (%d).",
+				ui32DDKBuild, psRGXFWInit->sRGXCompChecks.ui32DDKBuild));
+#if defined(PVRSRV_STRICT_COMPAT_CHECK)
+		eError = PVRSRV_ERROR_DDK_BUILD_MISMATCH;
+		PVR_DBG_BREAK;
+		return eError;
+#endif
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK build version (%d) and Firmware DDK build version (%d) match. [ OK ]",
+				ui32DDKBuild, psRGXFWInit->sRGXCompChecks.ui32DDKBuild));
+	}
+#endif
+	return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_BVNC_FWAgainstDriver
+
+ @Description
+
+ Validate FW BVNC against driver BVNC
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+																			RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP)
+	IMG_UINT32					i;
+#endif
+#if !defined(NO_HARDWARE)
+	IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV;
+#endif
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+	IMG_UINT32					ui32B, ui32V, ui32N, ui32C;
+	RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC);
+	PVRSRV_ERROR				eError;
+	IMG_CHAR	szV[8];
+
+	ui32B = psDevInfo->sDevFeatureCfg.ui32B;
+	ui32V = psDevInfo->sDevFeatureCfg.ui32V;
+	ui32N = psDevInfo->sDevFeatureCfg.ui32N;
+	ui32C = psDevInfo->sDevFeatureCfg.ui32C;
+
+	OSSNPrintf(szV, sizeof(szV),"%d",ui32V);
+
+	rgx_bvnc_packed(&sBVNC.ui64BNC, sBVNC.aszV, sBVNC.ui32VLenMax, ui32B, szV, ui32N, ui32C);
+#endif
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (struct version)");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+											offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+											offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+											offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion),
+											sBVNC.ui32LayoutVersion,
+											0xffffffff,
+											PDUMP_POLL_OPERATOR_EQUAL,
+											PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+	}
+
+	PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (maxlen)");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+											offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+											offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+											offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32VLenMax),
+											sBVNC.ui32VLenMax,
+											0xffffffff,
+											PDUMP_POLL_OPERATOR_EQUAL,
+											PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+	}
+
+	PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (BNC part - lower 32 bits)");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+											offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+											offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+											offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BNC),
+											(IMG_UINT32)sBVNC.ui64BNC,
+											0xffffffff,
+											PDUMP_POLL_OPERATOR_EQUAL,
+											PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+	}
+
+	PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (BNC part - Higher 32 bits)");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+											offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+											offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+											offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BNC) +
+											sizeof(IMG_UINT32),
+											(IMG_UINT32)(sBVNC.ui64BNC >> 32),
+											0xffffffff,
+											PDUMP_POLL_OPERATOR_EQUAL,
+											PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+	}
+
+	for (i = 0; i < sBVNC.ui32VLenMax; i += sizeof(IMG_UINT32))
+	{
+		PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (V part)");
+		eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+												offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+												offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+												offsetof(RGXFWIF_COMPCHECKS_BVNC, aszV) +
+												i,
+												*((IMG_UINT32 *)(sBVNC.aszV + i)),
+												0xffffffff,
+												PDUMP_POLL_OPERATOR_EQUAL,
+												PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+		}
+	}
+#endif
+
+#if !defined(NO_HARDWARE)
+	if (psRGXFWInit == NULL)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	RGX_BVNC_EQUAL(sBVNC, psRGXFWInit->sRGXCompChecks.sFWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV);
+
+	if (!bCompatibleAll)
+	{
+		if (!bCompatibleVersion)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%d) and firmware (%d).",
+					__FUNCTION__,
+					sBVNC.ui32LayoutVersion,
+					psRGXFWInit->sRGXCompChecks.sFWBVNC.ui32LayoutVersion));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+
+		if (!bCompatibleLenMax)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible V maxlen of driver (%d) and firmware (%d).",
+					__FUNCTION__,
+					sBVNC.ui32VLenMax,
+					psRGXFWInit->sRGXCompChecks.sFWBVNC.ui32VLenMax));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+
+		if (!bCompatibleBNC)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BNC (%d._.%d.%d) and Firmware BNC (%d._.%d.%d)",
+					RGX_BVNC_PACKED_EXTR_B(sBVNC),
+					RGX_BVNC_PACKED_EXTR_N(sBVNC),
+					RGX_BVNC_PACKED_EXTR_C(sBVNC),
+					RGX_BVNC_PACKED_EXTR_B(psRGXFWInit->sRGXCompChecks.sFWBVNC),
+					RGX_BVNC_PACKED_EXTR_N(psRGXFWInit->sRGXCompChecks.sFWBVNC),
+					RGX_BVNC_PACKED_EXTR_C(psRGXFWInit->sRGXCompChecks.sFWBVNC)));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+
+		if (!bCompatibleV)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BVNC (%d.%s.%d.%d) and Firmware BVNC (%d.%s.%d.%d)",
+					RGX_BVNC_PACKED_EXTR_B(sBVNC),
+					RGX_BVNC_PACKED_EXTR_V(sBVNC),
+					RGX_BVNC_PACKED_EXTR_N(sBVNC),
+					RGX_BVNC_PACKED_EXTR_C(sBVNC),
+					RGX_BVNC_PACKED_EXTR_B(psRGXFWInit->sRGXCompChecks.sFWBVNC),
+					RGX_BVNC_PACKED_EXTR_V(psRGXFWInit->sRGXCompChecks.sFWBVNC),
+					RGX_BVNC_PACKED_EXTR_N(psRGXFWInit->sRGXCompChecks.sFWBVNC),
+					RGX_BVNC_PACKED_EXTR_C(psRGXFWInit->sRGXCompChecks.sFWBVNC)));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware BVNC and KM driver BNVC match. [ OK ]"));
+	}
+#endif
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_BVNC_HWAgainstDriver
+
+ @Description
+
+ Validate HW BVNC against driver BVNC
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+#if ((!defined(NO_HARDWARE))&&(!defined(EMULATOR)))
+#define TARGET_SILICON  /* definition for everything that is not emu and not nohw configuration */
+#endif
+
+static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_HWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+																	RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP) || defined(TARGET_SILICON)
+	IMG_UINT64 ui64MaskBNC = RGX_BVNC_PACK_MASK_B |
+								RGX_BVNC_PACK_MASK_N |
+								RGX_BVNC_PACK_MASK_C;
+
+	IMG_UINT32 bMaskV = IMG_FALSE;
+
+	PVRSRV_ERROR				eError;
+	RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sSWBVNC);
+#endif
+
+#if defined(TARGET_SILICON)
+	RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sHWBVNC);
+	IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV;
+#endif
+
+#if defined(PDUMP) || defined(TARGET_SILICON)
+	IMG_UINT32 ui32B, ui32V, ui32N, ui32C;
+	IMG_CHAR szV[8];
+
+	/*if(psDevInfo->sDevFeatureCfg.ui64ErnsBrns & FIX_HW_BRN_38835_BIT_MASK)
+	{
+		ui64MaskBNC &= ~RGX_BVNC_PACK_MASK_B;
+		bMaskV = IMG_TRUE;
+	}*/
+#if defined(COMPAT_BVNC_MASK_N)
+	ui64MaskBNC &= ~RGX_BVNC_PACK_MASK_N;
+#endif
+#if defined(COMPAT_BVNC_MASK_C)
+	ui64MaskBNC &= ~RGX_BVNC_PACK_MASK_C;
+#endif
+	ui32B = psDevInfo->sDevFeatureCfg.ui32B;
+	ui32V = psDevInfo->sDevFeatureCfg.ui32V;
+	ui32N = psDevInfo->sDevFeatureCfg.ui32N;
+	ui32C = psDevInfo->sDevFeatureCfg.ui32C;
+
+	OSSNPrintf(szV, sizeof(szV),"%d",ui32V);
+	rgx_bvnc_packed(&sSWBVNC.ui64BNC, sSWBVNC.aszV, sSWBVNC.ui32VLenMax,  ui32B, szV, ui32N, ui32C);
+
+
+	if((psDevInfo->sDevFeatureCfg.ui64ErnsBrns & FIX_HW_BRN_38344_BIT_MASK) && (ui32C >= 10))
+	{
+		ui64MaskBNC &= ~RGX_BVNC_PACK_MASK_C;
+	}
+
+	if ((ui64MaskBNC != (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C)) || bMaskV)
+	{
+		PVR_LOG(("Compatibility checks: Ignoring fields: '%s%s%s%s' of HW BVNC.",
+				((!(ui64MaskBNC & RGX_BVNC_PACK_MASK_B))?("B"):("")),
+				((bMaskV)?("V"):("")),
+				((!(ui64MaskBNC & RGX_BVNC_PACK_MASK_N))?("N"):("")),
+				((!(ui64MaskBNC & RGX_BVNC_PACK_MASK_C))?("C"):(""))));
+	}
+#endif
+
+#if defined(EMULATOR)
+	PVR_LOG(("Compatibility checks for emu target: Ignoring HW BVNC checks."));
+#endif
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Compatibility check: Layout version of compchecks struct");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+											offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+											offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+											offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion),
+											sSWBVNC.ui32LayoutVersion,
+											0xffffffff,
+											PDUMP_POLL_OPERATOR_EQUAL,
+											PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+		return eError;
+	}
+
+	PDUMPCOMMENT("Compatibility check: HW V max len and FW V max len");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+											offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+											offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+											offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32VLenMax),
+											sSWBVNC.ui32VLenMax,
+											0xffffffff,
+											PDUMP_POLL_OPERATOR_EQUAL,
+											PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+		return eError;
+	}
+
+	if (ui64MaskBNC != 0)
+	{
+		PDUMPIF("DISABLE_HWBNC_CHECK");
+		PDUMPELSE("DISABLE_HWBNC_CHECK");
+		PDUMPCOMMENT("Compatibility check: HW BNC and FW BNC (Lower 32 bits)");
+		eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+												offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+												offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+												offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BNC),
+												(IMG_UINT32)sSWBVNC.ui64BNC ,
+												(IMG_UINT32)ui64MaskBNC,
+												PDUMP_POLL_OPERATOR_EQUAL,
+												PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+			return eError;
+		}
+
+		PDUMPCOMMENT("Compatibility check: HW BNC and FW BNC (Higher 32 bits)");
+		eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+												offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+												offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+												offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BNC) +
+												sizeof(IMG_UINT32),
+												(IMG_UINT32)(sSWBVNC.ui64BNC >> 32),
+												(IMG_UINT32)(ui64MaskBNC >> 32),
+												PDUMP_POLL_OPERATOR_EQUAL,
+												PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+			return eError;
+		}
+
+		PDUMPFI("DISABLE_HWBNC_CHECK");
+	}
+	if (!bMaskV)
+	{
+		IMG_UINT32 i;
+		PDUMPIF("DISABLE_HWV_CHECK");
+		PDUMPELSE("DISABLE_HWV_CHECK");
+		for (i = 0; i < sSWBVNC.ui32VLenMax; i += sizeof(IMG_UINT32))
+		{
+			PDUMPCOMMENT("Compatibility check: HW V and FW V");
+			eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+												offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+												offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+												offsetof(RGXFWIF_COMPCHECKS_BVNC, aszV) +
+												i,
+												*((IMG_UINT32 *)(sSWBVNC.aszV + i)),
+												0xffffffff,
+												PDUMP_POLL_OPERATOR_EQUAL,
+												PDUMP_FLAGS_CONTINUOUS);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+				return eError;
+			}
+		}
+		PDUMPFI("DISABLE_HWV_CHECK");
+	}
+#endif
+
+#if defined(TARGET_SILICON)
+	if (psRGXFWInit == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	sHWBVNC = psRGXFWInit->sRGXCompChecks.sHWBVNC;
+
+	sHWBVNC.ui64BNC &= ui64MaskBNC;
+	sSWBVNC.ui64BNC &= ui64MaskBNC;
+
+	if (bMaskV)
+	{
+		sHWBVNC.aszV[0] = '\0';
+		sSWBVNC.aszV[0] = '\0';
+	}
+
+	RGX_BVNC_EQUAL(sSWBVNC, sHWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV);
+
+	if(psDevInfo->sDevFeatureCfg.ui64ErnsBrns & FIX_HW_BRN_42480_BIT_MASK)
+	{
+		if (!bCompatibleAll && bCompatibleVersion)
+		{
+			if ((RGX_BVNC_PACKED_EXTR_B(sSWBVNC) == 1) &&
+				!(OSStringCompare(RGX_BVNC_PACKED_EXTR_V(sSWBVNC),"76")) &&
+				(RGX_BVNC_PACKED_EXTR_N(sSWBVNC) == 4) &&
+				(RGX_BVNC_PACKED_EXTR_C(sSWBVNC) == 6))
+			{
+				if ((RGX_BVNC_PACKED_EXTR_B(sHWBVNC) == 1) &&
+					!(OSStringCompare(RGX_BVNC_PACKED_EXTR_V(sHWBVNC),"69")) &&
+					(RGX_BVNC_PACKED_EXTR_N(sHWBVNC) == 4) &&
+					(RGX_BVNC_PACKED_EXTR_C(sHWBVNC) == 4))
+				{
+					bCompatibleBNC = IMG_TRUE;
+					bCompatibleLenMax = IMG_TRUE;
+					bCompatibleV = IMG_TRUE;
+					bCompatibleAll = IMG_TRUE;
+				}
+			}
+		}
+	}
+
+	if (!bCompatibleAll)
+	{
+		if (!bCompatibleVersion)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of HW (%d) and FW (%d).",
+					__FUNCTION__,
+					sHWBVNC.ui32LayoutVersion,
+					sSWBVNC.ui32LayoutVersion));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+
+		if (!bCompatibleLenMax)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible V maxlen of HW (%d) and FW (%d).",
+					__FUNCTION__,
+					sHWBVNC.ui32VLenMax,
+					sSWBVNC.ui32VLenMax));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+
+		if (!bCompatibleBNC)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible HW BNC (%d._.%d.%d) and FW BNC (%d._.%d.%d).",
+					RGX_BVNC_PACKED_EXTR_B(sHWBVNC),
+					RGX_BVNC_PACKED_EXTR_N(sHWBVNC),
+					RGX_BVNC_PACKED_EXTR_C(sHWBVNC),
+					RGX_BVNC_PACKED_EXTR_B(sSWBVNC),
+					RGX_BVNC_PACKED_EXTR_N(sSWBVNC),
+					RGX_BVNC_PACKED_EXTR_C(sSWBVNC)));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+
+		if (!bCompatibleV)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible HW BVNC (%d.%s.%d.%d) and FW BVNC (%d.%s.%d.%d).",
+					RGX_BVNC_PACKED_EXTR_B(sHWBVNC),
+					RGX_BVNC_PACKED_EXTR_V(sHWBVNC),
+					RGX_BVNC_PACKED_EXTR_N(sHWBVNC),
+					RGX_BVNC_PACKED_EXTR_C(sHWBVNC),
+					RGX_BVNC_PACKED_EXTR_B(sSWBVNC),
+					RGX_BVNC_PACKED_EXTR_V(sSWBVNC),
+					RGX_BVNC_PACKED_EXTR_N(sSWBVNC),
+					RGX_BVNC_PACKED_EXTR_C(sSWBVNC)));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: HW BVNC (%d.%s.%d.%d) and FW BVNC (%d.%s.%d.%d) match. [ OK ]",
+				RGX_BVNC_PACKED_EXTR_B(sHWBVNC),
+				RGX_BVNC_PACKED_EXTR_V(sHWBVNC),
+				RGX_BVNC_PACKED_EXTR_N(sHWBVNC),
+				RGX_BVNC_PACKED_EXTR_C(sHWBVNC),
+				RGX_BVNC_PACKED_EXTR_B(sSWBVNC),
+				RGX_BVNC_PACKED_EXTR_V(sSWBVNC),
+				RGX_BVNC_PACKED_EXTR_N(sSWBVNC),
+				RGX_BVNC_PACKED_EXTR_C(sSWBVNC)));
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_METACoreVersion_AgainstDriver
+
+ @Description
+
+ Validate HW META version against driver META version
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+									RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+	PVRSRV_ERROR		eError;
+#endif
+
+	IMG_UINT32	ui32FWCoreIDValue = 0;
+	IMG_CHAR *pcRGXFW_PROCESSOR = NULL;
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+	{
+		ui32FWCoreIDValue = RGXMIPSFW_CORE_ID_VALUE;
+		pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_MIPS;
+	}else if (psDevInfo->sDevFeatureCfg.ui32META)
+	{
+		switch(psDevInfo->sDevFeatureCfg.ui32META)
+		{
+		case MTP218: ui32FWCoreIDValue = RGX_CR_META_MTP218_CORE_ID_VALUE; break;
+		case MTP219: ui32FWCoreIDValue = RGX_CR_META_MTP219_CORE_ID_VALUE; break;
+		case LTP218: ui32FWCoreIDValue = RGX_CR_META_LTP218_CORE_ID_VALUE; break;
+		case LTP217: ui32FWCoreIDValue = RGX_CR_META_LTP217_CORE_ID_VALUE; break;
+		default:
+			PVR_DPF((PVR_DBG_ERROR,"%s: Undefined FW_CORE_ID_VALUE", __func__));
+			PVR_ASSERT(0);
+		}
+		pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META;
+	}else
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Undefined FW_CORE_ID_VALUE", __func__));
+		PVR_ASSERT(0);
+	}
+
+#if defined(PDUMP)
+	PDUMPIF("DISABLE_HWMETA_CHECK");
+	PDUMPELSE("DISABLE_HWMETA_CHECK");
+	PDUMPCOMMENT("Compatibility check: KM driver and HW FW Processor version");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+					offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+					offsetof(RGXFWIF_COMPCHECKS, ui32FWProcessorVersion),
+					ui32FWCoreIDValue,
+					0xffffffff,
+					PDUMP_POLL_OPERATOR_EQUAL,
+					PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+		return eError;
+	}
+	PDUMPFI("DISABLE_HWMETA_CHECK");
+#endif
+
+#if !defined(NO_HARDWARE)
+	if (psRGXFWInit == NULL)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	if (psRGXFWInit->sRGXCompChecks.ui32FWProcessorVersion != ui32FWCoreIDValue)
+	{
+		PVR_LOG(("RGXDevInitCompatCheck: Incompatible driver %s version (%d) / HW %s version (%d).",
+				pcRGXFW_PROCESSOR,
+				 ui32FWCoreIDValue,
+				 pcRGXFW_PROCESSOR,
+				 psRGXFWInit->sRGXCompChecks.ui32FWProcessorVersion));
+		eError = PVRSRV_ERROR_FWPROCESSOR_MISMATCH;
+		PVR_DBG_BREAK;
+		return eError;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Compatible driver %s version (%d) / HW %s version (%d) [OK].",
+				 pcRGXFW_PROCESSOR,
+				 ui32FWCoreIDValue,
+				 pcRGXFW_PROCESSOR,
+				 psRGXFWInit->sRGXCompChecks.ui32FWProcessorVersion));
+	}
+#endif
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDevInitCompatCheck
+
+ @Description
+
+ Check compatibility of host driver and firmware (DDK and build options)
+ for RGX devices at services/device initialisation
+
+ @Input psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR		eError;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_INIT		*psRGXFWInit = NULL;
+#if !defined(NO_HARDWARE)
+	IMG_UINT32			ui32RegValue;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	/* Retrieve the FW information */
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+												(void **)&psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire kernel fw compatibility check info (%u)",
+				__FUNCTION__, eError));
+		return eError;
+	}
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		if(*((volatile IMG_BOOL *)&psRGXFWInit->sRGXCompChecks.bUpdated))
+		{
+			/* No need to wait if the FW has already updated the values */
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	ui32RegValue = 0;
+
+	if(psDevInfo->sDevFeatureCfg.ui32META)
+	{
+		eError = RGXReadMETAAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegValue);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_LOG(("%s: Reading RGX META register failed. Is the GPU correctly powered up? (%u)",
+					__FUNCTION__, eError));
+			goto chk_exit;
+		}
+
+		if (!(ui32RegValue & META_CR_TXENABLE_ENABLE_BIT))
+		{
+			eError = PVRSRV_ERROR_META_THREAD0_NOT_ENABLED;
+			PVR_DPF((PVR_DBG_ERROR,"%s: RGX META is not running. Is the GPU correctly powered up? %d (%u)",
+					__FUNCTION__, psRGXFWInit->sRGXCompChecks.bUpdated, eError));
+			goto chk_exit;
+		}
+	}
+
+	if (!*((volatile IMG_BOOL *)&psRGXFWInit->sRGXCompChecks.bUpdated))
+	{
+		eError = PVRSRV_ERROR_TIMEOUT;
+		PVR_DPF((PVR_DBG_ERROR,"%s: Missing compatibility info from FW (%u)",
+				__FUNCTION__, eError));
+		goto chk_exit;
+	}
+#endif /* defined(NO_HARDWARE) */
+
+	eError = RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		goto chk_exit;
+	}
+
+	eError = RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(psDevInfo, psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		goto chk_exit;
+	}
+
+	eError = RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(psDevInfo, psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		goto chk_exit;
+	}
+
+	eError = RGXDevInitCompatCheck_BVNC_FWAgainstDriver(psDevInfo, psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		goto chk_exit;
+	}
+
+	eError = RGXDevInitCompatCheck_BVNC_HWAgainstDriver(psDevInfo, psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		goto chk_exit;
+	}
+	eError = RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(psDevInfo, psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		goto chk_exit;
+	}
+
+	eError = PVRSRV_OK;
+chk_exit:
+#if !defined(NO_HARDWARE)
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+#endif
+	return eError;
+}
+
+/**************************************************************************/ /*!
+@Function       RGXSoftReset
+@Description    Resets some modules of the RGX device
+@Input          psDeviceNode		Device node
+@Input          ui64ResetValue1 A mask for which each bit set corresponds
+                                to a module to reset (via the SOFT_RESET
+                                register).
+@Input          ui64ResetValue2 A mask for which each bit set corresponds
+                                to a module to reset (via the SOFT_RESET2
+                                register).
+@Return         PVRSRV_ERROR
+*/ /***************************************************************************/
+static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 IMG_UINT64  ui64ResetValue1,
+                                 IMG_UINT64  ui64ResetValue2)
+{
+	PVRSRV_RGXDEV_INFO        *psDevInfo;
+	IMG_BOOL	bSoftReset = IMG_FALSE;
+	IMG_UINT64	ui64SoftResetMask = 0;
+
+	PVR_ASSERT(psDeviceNode != NULL);
+	PVR_ASSERT(psDeviceNode->pvDevice != NULL);
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	/* the device info */
+	psDevInfo = psDeviceNode->pvDevice;
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_PBE2_IN_XE_BIT_MASK)
+	{
+		ui64SoftResetMask = RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL;
+	}else
+	{
+		ui64SoftResetMask = RGX_CR_SOFT_RESET_MASKFULL;
+	}
+
+	if((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) && \
+		((ui64ResetValue2 & RGX_CR_SOFT_RESET2_MASKFULL) != ui64ResetValue2))
+	{
+		bSoftReset = IMG_TRUE;
+	}
+
+	if (((ui64ResetValue1 & ui64SoftResetMask) != ui64ResetValue1) || bSoftReset)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Set in soft-reset */
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, ui64ResetValue1);
+
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+	{
+		OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, ui64ResetValue2);
+	}
+
+
+	/* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+	(void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET);
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+	{
+		(void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2);
+	}
+
+	/* Take the modules out of reset... */
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, 0);
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+	{
+		OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, 0);
+	}
+
+	/* ...and fence again */
+	(void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET);
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+	{
+		(void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2);
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RGXDebugRequestNotify
+
+ @Description Dump the debug data for RGX
+
+******************************************************************************/
+static void RGXDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgReqestHandle,
+					IMG_UINT32 ui32VerbLevel,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = hDbgReqestHandle;
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+	/* Only action the request if we've fully init'ed */
+	if (psDevInfo->bDevInit2Done)
+	{
+		RGXDebugRequestProcess(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui32VerbLevel);
+	}
+}
+
+static const RGX_MIPS_ADDRESS_TRAMPOLINE sNullTrampoline =
+{
+#if defined(PDUMP)
+	.hPdumpPages = 0,
+#endif
+	.sPages = {{0}},
+	.sPhysAddr = {0}
+};
+
+static void RGXFreeTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+	DevPhysMemFree(psDeviceNode,
+#if defined(PDUMP)
+	               psDevInfo->sTrampoline.hPdumpPages,
+#endif
+	               &psDevInfo->sTrampoline.sPages);
+	psDevInfo->sTrampoline = sNullTrampoline;
+}
+
+static PVRSRV_ERROR RGXAllocTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError;
+	IMG_INT32 i, j;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGX_MIPS_ADDRESS_TRAMPOLINE asTrampoline[RGXMIPSFW_TRAMPOLINE_NUMPAGES];
+
+	PDUMPCOMMENT("Allocate pages for trampoline");
+
+	/* Retry the allocation of the trampoline, retaining any allocations
+	 * overlapping with the target range until we get an allocation that
+	 * doesn't overlap with the target range. Any allocation like this
+	 * will require a maximum of 3 tries.
+	 * Free the unused allocations only after the desired range is obtained
+	 * to prevent the alloc function from returning the same bad range
+	 * repeatedly.
+	 */
+	#define RANGES_OVERLAP(x,y,size) (x < (y+size) && y < (x+size))
+	for (i = 0; i < 3; i++)
+	{
+		eError = DevPhysMemAlloc(psDeviceNode,
+					 RGXMIPSFW_TRAMPOLINE_SIZE,
+					 RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE,
+					 0,         // (init) u8Value
+					 IMG_FALSE, // bInitPage,
+#if defined(PDUMP)
+					 psDeviceNode->psFirmwareMMUDevAttrs->pszMMUPxPDumpMemSpaceName,
+					 "TrampolineRegion",
+					 &asTrampoline[i].hPdumpPages,
+#endif
+					 &asTrampoline[i].sPages,
+					 &asTrampoline[i].sPhysAddr);
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s failed (%u)",
+				 __func__, eError));
+			goto fail;
+		}
+
+		if (!RANGES_OVERLAP(asTrampoline[i].sPhysAddr.uiAddr,
+		                    RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR,
+		                    RGXMIPSFW_TRAMPOLINE_SIZE))
+		{
+			break;
+		}
+	}
+	if (RGXMIPSFW_TRAMPOLINE_NUMPAGES == i)
+	{
+		eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s failed to allocate non-overlapping pages (%u)",
+			 __func__, eError));
+		goto fail;
+	}
+	#undef RANGES_OVERLAP
+
+	psDevInfo->sTrampoline = asTrampoline[i];
+
+fail:
+	/* free all unused allocations */
+	for (j = 0; j < i; j++)
+	{
+		DevPhysMemFree(psDeviceNode,
+#if defined(PDUMP)
+		               asTrampoline[j].hPdumpPages,
+#endif
+		               &asTrampoline[j].sPages);
+	}
+
+	return eError;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitAllocFWImgMemKM(PVRSRV_DEVICE_NODE   *psDeviceNode,
+                                          IMG_DEVMEM_SIZE_T    uiFWCodeLen,
+                                          IMG_DEVMEM_SIZE_T    uiFWDataLen,
+                                          IMG_DEVMEM_SIZE_T    uiFWCorememLen,
+                                          PMR                  **ppsFWCodePMR,
+                                          IMG_DEV_VIRTADDR     *psFWCodeDevVAddrBase,
+                                          PMR                  **ppsFWDataPMR,
+                                          IMG_DEV_VIRTADDR     *psFWDataDevVAddrBase,
+                                          PMR                  **ppsFWCorememPMR,
+                                          IMG_DEV_VIRTADDR     *psFWCorememDevVAddrBase,
+                                          RGXFWIF_DEV_VIRTADDR *psFWCorememMetaVAddrBase)
+{
+	DEVMEM_FLAGS_T		uiMemAllocFlags;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR        eError;
+
+	eError = RGXInitCreateFWKernelMemoryContext(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitAllocFWImgMemKM: Failed RGXInitCreateFWKernelMemoryContext (%u)", eError));
+		goto failFWMemoryContextAlloc;
+	}
+
+	/*
+	 * Set up Allocation for FW code section
+	 */
+	uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+	                  PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+	                  PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+	                  PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+	                  PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+	                  PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+                          PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+	                  PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+
+	eError = RGXAllocateFWCodeRegion(psDeviceNode,
+	                                 uiFWCodeLen,
+	                                 uiMemAllocFlags,
+	                                 IMG_FALSE,
+	                                 "FwExCodeRegion",
+	                                 &psDevInfo->psRGXFWCodeMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to allocate fw code mem (%u)",
+				eError));
+		goto failFWCodeMemDescAlloc;
+	}
+
+	eError = DevmemLocalGetImportHandle(psDevInfo->psRGXFWCodeMemDesc, (void**) ppsFWCodePMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"DevmemLocalGetImportHandle failed (%u)", eError));
+		goto failFWCodeMemDescAqDevVirt;
+	}
+
+	eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc,
+	                                  &psDevInfo->sFWCodeDevVAddrBase);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to acquire devVAddr for fw code mem (%u)",
+				eError));
+		goto failFWCodeMemDescAqDevVirt;
+	}
+	*psFWCodeDevVAddrBase = psDevInfo->sFWCodeDevVAddrBase;
+
+	if (0 == (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK))
+	{
+		/*
+		* The FW code must be the first allocation in the firmware heap, otherwise
+		* the bootloader will not work (META will not be able to find the bootloader).
+		*/
+		PVR_ASSERT(psFWCodeDevVAddrBase->uiAddr == RGX_FIRMWARE_HEAP_BASE);
+	}
+
+	/*
+	 * Set up Allocation for FW data section
+	 */
+	uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+	                  PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+	                  PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+	                  PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+	                  PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                          PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+	                  PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+	                  PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+	                  PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+	                  PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PDUMPCOMMENT("Allocate and export data memory for fw");
+
+	eError = DevmemFwAllocateExportable(psDeviceNode,
+										uiFWDataLen,
+										OSGetPageSize(),
+										uiMemAllocFlags,
+										"FwExDataRegion",
+	                                    &psDevInfo->psRGXFWDataMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to allocate fw data mem (%u)",
+				eError));
+		goto failFWDataMemDescAlloc;
+	}
+
+	eError = DevmemLocalGetImportHandle(psDevInfo->psRGXFWDataMemDesc, (void **) ppsFWDataPMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"DevmemLocalGetImportHandle failed (%u)", eError));
+		goto failFWDataMemDescAqDevVirt;
+	}
+
+	eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWDataMemDesc,
+	                                  &psDevInfo->sFWDataDevVAddrBase);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to acquire devVAddr for fw data mem (%u)",
+				eError));
+		goto failFWDataMemDescAqDevVirt;
+	}
+	*psFWDataDevVAddrBase = psDevInfo->sFWDataDevVAddrBase;
+
+	if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+	{
+		eError = RGXAllocTrampoline(psDeviceNode);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "Failed to allocate trampoline region (%u)",
+					 eError));
+			goto failTrampolineMemDescAlloc;
+		}
+	}
+
+	if (uiFWCorememLen != 0)
+	{
+		/*
+		 * Set up Allocation for FW coremem section
+		 */
+		uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+		eError = RGXAllocateFWCodeRegion(psDeviceNode,
+		                                 uiFWCorememLen,
+		                                 uiMemAllocFlags,
+		                                 IMG_TRUE,
+		                                 "FwExCorememRegion",
+		                                 &psDevInfo->psRGXFWCorememMemDesc);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"Failed to allocate fw coremem mem, size: %"  IMG_INT64_FMTSPECd ", flags: %x (%u)",
+						uiFWCorememLen, uiMemAllocFlags, eError));
+			goto failFWCorememMemDescAlloc;
+		}
+
+		eError = DevmemLocalGetImportHandle(psDevInfo->psRGXFWCorememMemDesc, (void**) ppsFWCorememPMR);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"DevmemLocalGetImportHandle failed (%u)", eError));
+			goto failFWCorememMemDescAqDevVirt;
+		}
+
+		eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCorememMemDesc,
+		                                  &psDevInfo->sFWCorememCodeDevVAddrBase);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"Failed to acquire devVAddr for fw coremem mem (%u)",
+						eError));
+			goto failFWCorememMemDescAqDevVirt;
+		}
+
+		RGXSetFirmwareAddress(&psDevInfo->sFWCorememCodeFWAddr,
+		                      psDevInfo->psRGXFWCorememMemDesc,
+		                      0, RFW_FWADDR_NOREF_FLAG);
+	}
+	else
+	{
+		*ppsFWCorememPMR = NULL;
+		psDevInfo->sFWCorememCodeDevVAddrBase.uiAddr = 0;
+		psDevInfo->sFWCorememCodeFWAddr.ui32Addr = 0;
+	}
+
+	*psFWCorememDevVAddrBase = psDevInfo->sFWCorememCodeDevVAddrBase;
+	*psFWCorememMetaVAddrBase = psDevInfo->sFWCorememCodeFWAddr;
+
+	return PVRSRV_OK;
+
+failFWCorememMemDescAqDevVirt:
+	if (uiFWCorememLen != 0)
+	{
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWCorememMemDesc);
+		psDevInfo->psRGXFWCorememMemDesc = NULL;
+	}
+failFWCorememMemDescAlloc:
+	if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+	{
+		RGXFreeTrampoline(psDeviceNode);
+	}
+failTrampolineMemDescAlloc:
+	DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+failFWDataMemDescAqDevVirt:
+	DevmemFwFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc);
+	psDevInfo->psRGXFWDataMemDesc = NULL;
+failFWDataMemDescAlloc:
+	DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+failFWCodeMemDescAqDevVirt:
+	DevmemFwFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc);
+	psDevInfo->psRGXFWCodeMemDesc = NULL;
+failFWCodeMemDescAlloc:
+failFWMemoryContextAlloc:
+	return eError;
+}
+
+/*
+	AppHint parameter interface
+*/
+static
+PVRSRV_ERROR RGXFWTraceQueryFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                   const void *psPrivate,
+                                   IMG_UINT32 *pui32Value)
+{
+	PVRSRV_ERROR eResult;
+
+	eResult = PVRSRVRGXDebugMiscQueryFWLogKM(NULL, psDeviceNode, pui32Value);
+	*pui32Value &= RGXFWIF_LOG_TYPE_GROUP_MASK;
+	return eResult;
+}
+
+static
+PVRSRV_ERROR RGXFWTraceQueryLogType(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                   const void *psPrivate,
+                                   IMG_UINT32 *pui32Value)
+{
+	PVRSRV_ERROR eResult;
+
+	eResult = PVRSRVRGXDebugMiscQueryFWLogKM(NULL, psDeviceNode, pui32Value);
+	if (PVRSRV_OK == eResult)
+	{
+		if (*pui32Value & RGXFWIF_LOG_TYPE_TRACE)
+		{
+			*pui32Value = 2; /* Trace */
+		}
+		else if (*pui32Value & RGXFWIF_LOG_TYPE_GROUP_MASK)
+		{
+			*pui32Value = 1; /* TBI */
+		}
+		else
+		{
+			*pui32Value = 0; /* None */
+		}
+	}
+	return eResult;
+}
+
+static
+PVRSRV_ERROR RGXFWTraceSetFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  const void *psPrivate,
+                                  IMG_UINT32 ui32Value)
+{
+	PVRSRV_ERROR eResult;
+	IMG_UINT32 ui32RGXFWLogType;
+
+	eResult = RGXFWTraceQueryLogType(psDeviceNode, NULL, &ui32RGXFWLogType);
+	if (PVRSRV_OK == eResult)
+	{
+		if (ui32Value && 1 != ui32RGXFWLogType)
+		{
+			ui32Value |= RGXFWIF_LOG_TYPE_TRACE;
+		}
+		eResult = PVRSRVRGXDebugMiscSetFWLogKM(NULL, psDeviceNode, ui32Value);
+	}
+	return eResult;
+}
+
+static
+PVRSRV_ERROR RGXFWTraceSetLogType(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                    const void *psPrivate,
+                                    IMG_UINT32 ui32Value)
+{
+	PVRSRV_ERROR eResult;
+	IMG_UINT32 ui32RGXFWLogType = ui32Value;
+
+	/* 0 - none, 1 - tbi, 2 - trace */
+	if (ui32Value)
+	{
+		eResult = RGXFWTraceQueryFilter(psDeviceNode, NULL, &ui32RGXFWLogType);
+		if (PVRSRV_OK != eResult)
+		{
+			return eResult;
+		}
+		if (!ui32RGXFWLogType)
+		{
+			ui32RGXFWLogType = RGXFWIF_LOG_TYPE_GROUP_MAIN;
+		}
+		if (2 == ui32Value)
+		{
+			ui32RGXFWLogType |= RGXFWIF_LOG_TYPE_TRACE;
+		}
+	}
+
+	eResult = PVRSRVRGXDebugMiscSetFWLogKM(NULL, psDeviceNode, ui32RGXFWLogType);
+	return eResult;
+}
+
+static
+PVRSRV_ERROR RGXQueryFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode,
+									const void *psPrivate,
+									IMG_BOOL *pbValue)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+	*pbValue = psDevInfo->bEnableFWPoisonOnFree;
+	return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXSetFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode,
+									const void *psPrivate,
+									IMG_BOOL bValue)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+	psDevInfo->bEnableFWPoisonOnFree = bValue;
+	return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXQueryFWPoisonOnFreeValue(const PVRSRV_DEVICE_NODE *psDeviceNode,
+									const void *psPrivate,
+									IMG_UINT32 *pui32Value)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+	*pui32Value = psDevInfo->ubFWPoisonOnFreeValue;
+	return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXSetFWPoisonOnFreeValue(const PVRSRV_DEVICE_NODE *psDeviceNode,
+									const void *psPrivate,
+									IMG_UINT32 ui32Value)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+	psDevInfo->ubFWPoisonOnFreeValue = (IMG_BYTE) ui32Value;
+	return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVRGXInitFirmwareKM
+ */
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXInitFirmwareKM(PVRSRV_DEVICE_NODE       *psDeviceNode,
+                        RGXFWIF_DEV_VIRTADDR     *psRGXFwInit,
+                        IMG_BOOL                 bEnableSignatureChecks,
+                        IMG_UINT32               ui32SignatureChecksBufSize,
+                        IMG_UINT32               ui32HWPerfFWBufSizeKB,
+                        IMG_UINT64               ui64HWPerfFilter,
+                        IMG_UINT32               ui32RGXFWAlignChecksArrLength,
+                        IMG_UINT32               *pui32RGXFWAlignChecks,
+                        IMG_UINT32               ui32ConfigFlags,
+                        IMG_UINT32               ui32LogType,
+                        IMG_UINT32               ui32FilterFlags,
+                        IMG_UINT32               ui32JonesDisableMask,
+                        IMG_UINT32               ui32HWRDebugDumpLimit,
+                        RGXFWIF_COMPCHECKS_BVNC  *psClientBVNC,
+                        RGXFWIF_COMPCHECKS_BVNC  *psFirmwareBVNC,
+                        IMG_UINT32               ui32HWPerfCountersDataSize,
+                        PMR                      **ppsHWPerfPMR,
+                        RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf,
+                        FW_PERF_CONF             eFirmwarePerf,
+                        IMG_UINT32               ui32ConfigFlagsExt)
+{
+	PVRSRV_ERROR eError;
+	void *pvAppHintState = NULL;
+	IMG_UINT32 ui32AppHintDefault;
+	RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC);
+	IMG_BOOL bCompatibleAll=IMG_TRUE, bCompatibleVersion=IMG_TRUE, bCompatibleLenMax=IMG_TRUE, bCompatibleBNC=IMG_TRUE, bCompatibleV=IMG_TRUE;
+	IMG_UINT32 ui32NumBIFTilingConfigs, *pui32BIFTilingXStrides, i, ui32B, ui32V, ui32N, ui32C;
+	RGXFWIF_BIFTILINGMODE eBIFTilingMode;
+	IMG_CHAR szV[8];
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+	OSSNPrintf(szV, sizeof(szV),"%d",psDevInfo->sDevFeatureCfg.ui32V);
+	rgx_bvnc_packed(&sBVNC.ui64BNC, sBVNC.aszV, sBVNC.ui32VLenMax, psDevInfo->sDevFeatureCfg.ui32B, szV, psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C);
+
+	/* Check if BVNC numbers of firmware and driver are compatible */
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		bCompatibleAll = IMG_TRUE;
+	}
+	else
+	{
+		RGX_BVNC_EQUAL(sBVNC, *psFirmwareBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV);
+	}
+
+	if (!bCompatibleAll)
+	{
+		if (!bCompatibleVersion)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%d) and firmware (%d).",
+			         __FUNCTION__,
+			         sBVNC.ui32LayoutVersion,
+			         psFirmwareBVNC->ui32LayoutVersion));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			PVR_DBG_BREAK;
+			goto failed_to_pass_compatibility_check;
+		}
+
+		if (!bCompatibleLenMax)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible V maxlen of driver (%d) and firmware (%d).",
+			         __FUNCTION__,
+			         sBVNC.ui32VLenMax,
+			         psFirmwareBVNC->ui32VLenMax));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			PVR_DBG_BREAK;
+			goto failed_to_pass_compatibility_check;
+		}
+
+		if (!bCompatibleBNC)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible driver BNC (%d._.%d.%d) / firmware BNC (%d._.%d.%d).",
+			         __FUNCTION__,
+			         RGX_BVNC_PACKED_EXTR_B(sBVNC),
+			         RGX_BVNC_PACKED_EXTR_N(sBVNC),
+			         RGX_BVNC_PACKED_EXTR_C(sBVNC),
+			         RGX_BVNC_PACKED_EXTR_B(*psFirmwareBVNC),
+			         RGX_BVNC_PACKED_EXTR_N(*psFirmwareBVNC),
+			         RGX_BVNC_PACKED_EXTR_C(*psFirmwareBVNC)));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			PVR_DBG_BREAK;
+			goto failed_to_pass_compatibility_check;
+		}
+
+		if (!bCompatibleV)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible driver BVNC (%d.%s.%d.%d) / firmware BVNC (%d.%s.%d.%d).",
+			         __FUNCTION__,
+			         RGX_BVNC_PACKED_EXTR_B(sBVNC),
+			         RGX_BVNC_PACKED_EXTR_V(sBVNC),
+			         RGX_BVNC_PACKED_EXTR_N(sBVNC),
+			         RGX_BVNC_PACKED_EXTR_C(sBVNC),
+			         RGX_BVNC_PACKED_EXTR_B(*psFirmwareBVNC),
+			         RGX_BVNC_PACKED_EXTR_V(*psFirmwareBVNC),
+			         RGX_BVNC_PACKED_EXTR_N(*psFirmwareBVNC),
+			         RGX_BVNC_PACKED_EXTR_C(*psFirmwareBVNC)));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			PVR_DBG_BREAK;
+			goto failed_to_pass_compatibility_check;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver BVNC (%d.%s.%d.%d) and firmware BVNC (%d.%s.%d.%d) match. [ OK ]",
+		         __FUNCTION__,
+		         RGX_BVNC_PACKED_EXTR_B(sBVNC),
+		         RGX_BVNC_PACKED_EXTR_V(sBVNC),
+		         RGX_BVNC_PACKED_EXTR_N(sBVNC),
+		         RGX_BVNC_PACKED_EXTR_C(sBVNC),
+		         RGX_BVNC_PACKED_EXTR_B(*psFirmwareBVNC),
+		         RGX_BVNC_PACKED_EXTR_V(*psFirmwareBVNC),
+		         RGX_BVNC_PACKED_EXTR_N(*psFirmwareBVNC),
+		         RGX_BVNC_PACKED_EXTR_C(*psFirmwareBVNC)));
+	}
+
+	ui32B = psDevInfo->sDevFeatureCfg.ui32B;
+	ui32V = psDevInfo->sDevFeatureCfg.ui32V;
+	ui32N = psDevInfo->sDevFeatureCfg.ui32N;
+	ui32C = psDevInfo->sDevFeatureCfg.ui32C;
+
+	OSSNPrintf(szV, sizeof(szV),"%d",ui32V);
+
+	/* Check if BVNC numbers of client and driver are compatible */
+	rgx_bvnc_packed(&sBVNC.ui64BNC, sBVNC.aszV, sBVNC.ui32VLenMax,  ui32B, szV, ui32N, ui32C);
+
+	RGX_BVNC_EQUAL(sBVNC, *psClientBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV);
+
+	if (!bCompatibleAll)
+	{
+		if (!bCompatibleVersion)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%d) and client (%d).",
+					__FUNCTION__,
+					sBVNC.ui32LayoutVersion,
+					psClientBVNC->ui32LayoutVersion));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			PVR_DBG_BREAK;
+			goto failed_to_pass_compatibility_check;
+		}
+
+		if (!bCompatibleLenMax)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible V maxlen of driver (%d) and client (%d).",
+					__FUNCTION__,
+					sBVNC.ui32VLenMax,
+					psClientBVNC->ui32VLenMax));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			PVR_DBG_BREAK;
+			goto failed_to_pass_compatibility_check;
+		}
+
+		if (!bCompatibleBNC)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible driver BNC (%d._.%d.%d) / client BNC (%d._.%d.%d).",
+					__FUNCTION__,
+					RGX_BVNC_PACKED_EXTR_B(sBVNC),
+					RGX_BVNC_PACKED_EXTR_N(sBVNC),
+					RGX_BVNC_PACKED_EXTR_C(sBVNC),
+					RGX_BVNC_PACKED_EXTR_B(*psClientBVNC),
+					RGX_BVNC_PACKED_EXTR_N(*psClientBVNC),
+					RGX_BVNC_PACKED_EXTR_C(*psClientBVNC)));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			PVR_DBG_BREAK;
+			goto failed_to_pass_compatibility_check;
+		}
+
+		if (!bCompatibleV)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible driver BVNC (%d.%s.%d.%d) / client BVNC (%d.%s.%d.%d).",
+					__FUNCTION__,
+					RGX_BVNC_PACKED_EXTR_B(sBVNC),
+					RGX_BVNC_PACKED_EXTR_V(sBVNC),
+					RGX_BVNC_PACKED_EXTR_N(sBVNC),
+					RGX_BVNC_PACKED_EXTR_C(sBVNC),
+					RGX_BVNC_PACKED_EXTR_B(*psClientBVNC),
+					RGX_BVNC_PACKED_EXTR_V(*psClientBVNC),
+					RGX_BVNC_PACKED_EXTR_N(*psClientBVNC),
+					RGX_BVNC_PACKED_EXTR_C(*psClientBVNC)));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			PVR_DBG_BREAK;
+			goto failed_to_pass_compatibility_check;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver BVNC (%d.%s.%d.%d) and client BVNC (%d.%s.%d.%d) match. [ OK ]",
+				__FUNCTION__,
+				RGX_BVNC_PACKED_EXTR_B(sBVNC),
+				RGX_BVNC_PACKED_EXTR_V(sBVNC),
+				RGX_BVNC_PACKED_EXTR_N(sBVNC),
+				RGX_BVNC_PACKED_EXTR_C(sBVNC),
+				RGX_BVNC_PACKED_EXTR_B(*psClientBVNC),
+				RGX_BVNC_PACKED_EXTR_V(*psClientBVNC),
+				RGX_BVNC_PACKED_EXTR_N(*psClientBVNC),
+				RGX_BVNC_PACKED_EXTR_C(*psClientBVNC)));
+	}
+
+	PVRSRVSystemBIFTilingGetConfig(psDeviceNode->psDevConfig,
+	                               &eBIFTilingMode,
+	                               &ui32NumBIFTilingConfigs);
+	pui32BIFTilingXStrides = OSAllocMem(sizeof(IMG_UINT32) * ui32NumBIFTilingConfigs);
+	if(pui32BIFTilingXStrides == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitFirmwareKM: OSAllocMem failed (%u)", eError));
+		goto failed_BIF_tiling_alloc;
+	}
+	for(i = 0; i < ui32NumBIFTilingConfigs; i++)
+	{
+		eError = PVRSRVSystemBIFTilingHeapGetXStride(psDeviceNode->psDevConfig,
+		                                             i+1,
+		                                             &pui32BIFTilingXStrides[i]);
+		if(eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: Failed to get BIF tiling X stride for heap %u (%u)",
+			          __func__, i + 1, eError));
+			goto failed_BIF_heap_init;
+		}
+	}
+
+	eError = RGXSetupFirmware(psDeviceNode,
+	                          bEnableSignatureChecks,
+	                          ui32SignatureChecksBufSize,
+	                          ui32HWPerfFWBufSizeKB,
+	                          ui64HWPerfFilter,
+	                          ui32RGXFWAlignChecksArrLength,
+	                          pui32RGXFWAlignChecks,
+	                          ui32ConfigFlags,
+	                          ui32LogType,
+	                          eBIFTilingMode,
+	                          ui32NumBIFTilingConfigs,
+	                          pui32BIFTilingXStrides,
+	                          ui32FilterFlags,
+	                          ui32JonesDisableMask,
+	                          ui32HWRDebugDumpLimit,
+	                          ui32HWPerfCountersDataSize,
+	                          ppsHWPerfPMR,
+	                          psRGXFwInit,
+	                          eRGXRDPowerIslandingConf,
+	                          eFirmwarePerf,
+	                          ui32ConfigFlagsExt);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitFirmwareKM: RGXSetupFirmware failed (%u)", eError));
+		goto failed_init_firmware;
+	}
+
+	OSFreeMem(pui32BIFTilingXStrides);
+
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableLogGroup,
+	                                    RGXFWTraceQueryFilter,
+	                                    RGXFWTraceSetFilter,
+	                                    psDeviceNode,
+	                                    NULL);
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_FirmwareLogType,
+	                                    RGXFWTraceQueryLogType,
+	                                    RGXFWTraceSetLogType,
+	                                    psDeviceNode,
+	                                    NULL);
+
+	/* FW Poison values are not passed through from the init code
+	 * so grab them here */
+	OSCreateKMAppHintState(&pvAppHintState);
+
+	ui32AppHintDefault = PVRSRV_APPHINT_ENABLEFWPOISONONFREE;
+	OSGetKMAppHintBOOL(pvAppHintState,
+	                   EnableFWPoisonOnFree,
+	                   &ui32AppHintDefault,
+	                   &psDevInfo->bEnableFWPoisonOnFree);
+
+	ui32AppHintDefault = PVRSRV_APPHINT_FWPOISONONFREEVALUE;
+	OSGetKMAppHintUINT32(pvAppHintState,
+	                     FWPoisonOnFreeValue,
+	                     &ui32AppHintDefault,
+	                     (IMG_UINT32*)&psDevInfo->ubFWPoisonOnFreeValue);
+
+	OSFreeKMAppHintState(pvAppHintState);
+
+	PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFWPoisonOnFree,
+	                                  RGXQueryFWPoisonOnFree,
+	                                  RGXSetFWPoisonOnFree,
+	                                  psDeviceNode,
+	                                  NULL);
+
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_FWPoisonOnFreeValue,
+	                                    RGXQueryFWPoisonOnFreeValue,
+	                                    RGXSetFWPoisonOnFreeValue,
+	                                    psDeviceNode,
+	                                    NULL);
+
+	return PVRSRV_OK;
+
+failed_init_firmware:
+failed_BIF_heap_init:
+	OSFreeMem(pui32BIFTilingXStrides);
+failed_BIF_tiling_alloc:
+failed_to_pass_compatibility_check:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/* See device.h for function declaration */
+static PVRSRV_ERROR RGXAllocUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode,
+									 DEVMEM_MEMDESC **psMemDesc,
+									 IMG_UINT32 *puiSyncPrimVAddr,
+									 IMG_UINT32 *puiSyncPrimBlockSize)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	PVRSRV_ERROR eError;
+	RGXFWIF_DEV_VIRTADDR pFirmwareAddr;
+	IMG_DEVMEM_SIZE_T uiUFOBlockSize = sizeof(IMG_UINT32);
+	IMG_DEVMEM_ALIGN_T ui32UFOBlockAlign = sizeof(IMG_UINT32);
+
+	psDevInfo = psDeviceNode->pvDevice;
+
+	/* Size and align are 'expanded' because we request an Exportalign allocation */
+	DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareHeap),
+										&uiUFOBlockSize,
+										&ui32UFOBlockAlign);
+
+	eError = DevmemFwAllocateExportable(psDeviceNode,
+										uiUFOBlockSize,
+										ui32UFOBlockAlign,
+										PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+										PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+										PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+										PVRSRV_MEMALLOCFLAG_CACHE_COHERENT |
+										PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+										PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+										PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+										PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE,
+										"FwExUFOBlock",
+										psMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	RGXSetFirmwareAddress(&pFirmwareAddr, *psMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+	*puiSyncPrimVAddr = pFirmwareAddr.ui32Addr;
+	*puiSyncPrimBlockSize = TRUNCATE_64BITS_TO_32BITS(uiUFOBlockSize);
+
+	return PVRSRV_OK;
+
+e0:
+	return eError;
+}
+
+/* See device.h for function declaration */
+static void RGXFreeUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode,
+							DEVMEM_MEMDESC *psMemDesc)
+{
+	/*
+		If the system has snooping of the device cache then the UFO block
+		might be in the cache so we need to flush it out before freeing
+		the memory
+
+		When the device is being shutdown/destroyed we don't care anymore.
+		Several necessary data structures to issue a flush were destroyed
+		already.
+	*/
+	if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) &&
+		psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_DEINIT)
+	{
+		RGXFWIF_KCCB_CMD sFlushInvalCmd;
+		PVRSRV_ERROR eError;
+
+		/* Schedule the SLC flush command ... */
+#if defined(PDUMP)
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate");
+#endif
+		sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.eDM = 0;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0;
+
+		eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice,
+											RGXFWIF_DM_GP,
+											&sFlushInvalCmd,
+											sizeof(sFlushInvalCmd),
+											PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXFreeUFOBlock: Failed to schedule SLC flush command with error (%u)", eError));
+		}
+		else
+		{
+			/* Wait for the SLC flush to complete */
+			eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXFreeUFOBlock: SLC flush and invalidate aborted with error (%u)", eError));
+			}
+		}
+	}
+
+	RGXUnsetFirmwareAddress(psMemDesc);
+	DevmemFwFree(psDeviceNode->pvDevice, psMemDesc);
+}
+
+/*
+	DevDeInitRGX
+*/
+PVRSRV_ERROR DevDeInitRGX (PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO			*psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice;
+	PVRSRV_ERROR				eError;
+	DEVICE_MEMORY_INFO		    *psDevMemoryInfo;
+	IMG_UINT32		ui32Temp=0;
+	if (!psDevInfo)
+	{
+		/* Can happen if DevInitRGX failed */
+		PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: Null DevInfo"));
+		return PVRSRV_OK;
+	}
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		OSAtomicWrite(&psDeviceNode->sDummyPage.atRefCounter, 0);
+		PVR_UNREFERENCED_PARAMETER(ui32Temp);
+	}
+	else
+#else
+	{
+		/*Delete the Dummy page related info */
+		ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDummyPage.atRefCounter);
+		if(0 != ui32Temp)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s: Dummy page reference counter is non zero (%u)",
+					__func__,
+					ui32Temp));
+			PVR_ASSERT(0);
+		}
+	}
+#endif
+#if defined(PDUMP)
+		if(NULL != psDeviceNode->sDummyPage.hPdumpDummyPg)
+		{
+			PDUMPCOMMENT("Error dummy page handle is still active");
+		}
+#endif
+
+#if defined(SUPPORT_PDVFS) && !defined(RGXFW_META_SUPPORT_2ND_THREAD)
+	if(psDeviceNode->psDevConfig->sDVFS.sPDVFSData.hReactiveTimer)
+	{
+		OSDisableTimer(psDeviceNode->psDevConfig->sDVFS.sPDVFSData.hReactiveTimer);
+		OSRemoveTimer(psDeviceNode->psDevConfig->sDVFS.sPDVFSData.hReactiveTimer);
+	}
+#endif
+
+	/*The lock type need to be dispatch type here because it can be acquired from MISR (Z-buffer) path */
+	OSLockDestroy(psDeviceNode->sDummyPage.psDummyPgLock);
+
+	/* Unregister debug request notifiers first as they could depend on anything. */
+	if (psDevInfo->hDbgReqNotify)
+	{
+		PVRSRVUnregisterDbgRequestNotify(psDevInfo->hDbgReqNotify);
+	}
+
+	/* Cancel notifications to this device */
+	PVRSRVUnregisterCmdCompleteNotify(psDeviceNode->hCmdCompNotify);
+	psDeviceNode->hCmdCompNotify = NULL;
+
+	/*
+	 *  De-initialise in reverse order, so stage 2 init is undone first.
+	 */
+	if (psDevInfo->bDevInit2Done)
+	{
+		psDevInfo->bDevInit2Done = IMG_FALSE;
+
+#if !defined(NO_HARDWARE)
+		(void) SysUninstallDeviceLISR(psDevInfo->pvLISRData);
+		(void) OSUninstallMISR(psDevInfo->pvMISRData);
+		(void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR);
+		if (psDevInfo->pvAPMISRData != NULL)
+		{
+			(void) OSUninstallMISR(psDevInfo->pvAPMISRData);
+		}
+#endif /* !NO_HARDWARE */
+
+		/* Remove the device from the power manager */
+		eError = PVRSRVRemovePowerDevice(psDeviceNode);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+
+		OSLockDestroy(psDevInfo->hGPUUtilLock);
+
+		/* Free DVFS Table */
+		if (psDevInfo->psGpuDVFSTable != NULL)
+		{
+			OSFreeMem(psDevInfo->psGpuDVFSTable);
+			psDevInfo->psGpuDVFSTable = NULL;
+		}
+
+		/* De-init Freelists/ZBuffers... */
+		OSLockDestroy(psDevInfo->hLockFreeList);
+		OSLockDestroy(psDevInfo->hLockZSBuffer);
+
+		/* Unregister MMU related stuff */
+		eError = RGXMMUInit_Unregister(psDeviceNode);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: Failed RGXMMUInit_Unregister (0x%x)", eError));
+			return eError;
+		}
+
+
+		if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+		{
+			/* Unregister MMU related stuff */
+			eError = RGXMipsMMUInit_Unregister(psDeviceNode);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: Failed RGXMipsMMUInit_Unregister (0x%x)", eError));
+				return eError;
+			}
+		}
+	}
+
+	/* UnMap Regs */
+	if (psDevInfo->pvRegsBaseKM != NULL)
+	{
+#if !defined(NO_HARDWARE)
+		OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
+						 psDevInfo->ui32RegSize,
+						 PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+#endif /* !NO_HARDWARE */
+		psDevInfo->pvRegsBaseKM = NULL;
+	}
+
+#if 0 /* not required at this time */
+	if (psDevInfo->hTimer)
+	{
+		eError = OSRemoveTimer(psDevInfo->hTimer);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: Failed to remove timer"));
+			return 	eError;
+		}
+		psDevInfo->hTimer = NULL;
+	}
+#endif
+
+    psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+
+	RGXDeInitHeaps(psDevMemoryInfo);
+
+	if (psDevInfo->psRGXFWCodeMemDesc)
+	{
+		/* Free fw code */
+		PDUMPCOMMENT("Freeing FW code memory");
+		DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc);
+		psDevInfo->psRGXFWCodeMemDesc = NULL;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING,"No firmware code memory to free"));
+	}
+
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+	{
+		if (psDevInfo->sTrampoline.sPages.u.pvHandle)
+		{
+			/* Free trampoline region */
+			PDUMPCOMMENT("Freeing trampoline memory");
+			RGXFreeTrampoline(psDeviceNode);
+		}
+	}
+
+	if (psDevInfo->psRGXFWDataMemDesc)
+	{
+		/* Free fw data */
+		PDUMPCOMMENT("Freeing FW data memory");
+		DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc);
+		psDevInfo->psRGXFWDataMemDesc = NULL;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING,"No firmware data memory to free"));
+	}
+
+	if (psDevInfo->psRGXFWCorememMemDesc)
+	{
+		/* Free fw data */
+		PDUMPCOMMENT("Freeing FW coremem memory");
+		DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCorememMemDesc);
+		DevmemFwFree(psDevInfo, psDevInfo->psRGXFWCorememMemDesc);
+		psDevInfo->psRGXFWCorememMemDesc = NULL;
+	}
+
+	/*
+	   Free the firmware allocations.
+	 */
+	RGXFreeFirmware(psDevInfo);
+	RGXDeInitDestroyFWKernelMemoryContext(psDeviceNode);
+
+	/* De-initialise non-device specific (TL) users of RGX device memory */
+	RGXHWPerfHostDeInit(psDevInfo);
+	eError = HTBDeInit();
+	PVR_LOG_IF_ERROR(eError, "HTBDeInit");
+
+	/* destroy the context list locks */
+	OSWRLockDestroy(psDevInfo->hRenderCtxListLock);
+	OSWRLockDestroy(psDevInfo->hComputeCtxListLock);
+	OSWRLockDestroy(psDevInfo->hTransferCtxListLock);
+	OSWRLockDestroy(psDevInfo->hTDMCtxListLock);
+	OSWRLockDestroy(psDevInfo->hRaytraceCtxListLock);
+	OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock);
+	OSWRLockDestroy(psDevInfo->hMemoryCtxListLock);
+	OSWRLockDestroy(psDevInfo->hCommonCtxtListLock);
+
+
+	if ((psDevInfo->hNMILock != NULL) && (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK))
+	{
+		OSLockDestroy(psDevInfo->hNMILock);
+	}
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	if (psDevInfo->hDebugFaultInfoLock != NULL)
+	{
+		OSLockDestroy(psDevInfo->hDebugFaultInfoLock);
+	}
+	if (psDevInfo->hMMUCtxUnregLock != NULL)
+	{
+		OSLockDestroy(psDevInfo->hMMUCtxUnregLock);
+	}
+#endif
+
+	if (psDevInfo->psScripts != NULL)
+	{
+		/* Free the init scripts. */
+		OSFreeMem(psDevInfo->psScripts);
+	}
+
+	/* Free device BVNC string */
+	if(NULL != psDevInfo->sDevFeatureCfg.pszBVNCString)
+	{
+		OSFreeMem(psDevInfo->sDevFeatureCfg.pszBVNCString);
+	}
+
+	/* DeAllocate devinfo */
+	OSFreeMem(psDevInfo);
+
+	psDeviceNode->pvDevice = NULL;
+
+	return PVRSRV_OK;
+}
+
+#if defined(PDUMP)
+static
+PVRSRV_ERROR RGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice);
+
+	psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE;
+
+	return PVRSRV_OK;
+}
+#endif /* PDUMP */
+
+static INLINE DEVMEM_HEAP_BLUEPRINT _blueprint_init(IMG_CHAR *name,
+	IMG_UINT64 heap_base,
+	IMG_DEVMEM_SIZE_T heap_length,
+	IMG_UINT32 log2_import_alignment,
+	IMG_UINT32 tiling_mode)
+{
+	DEVMEM_HEAP_BLUEPRINT b = {
+		.pszName = name,
+		.sHeapBaseAddr.uiAddr = heap_base,
+		.uiHeapLength = heap_length,
+		.uiLog2DataPageSize = RGXHeapDerivePageSize(OSGetPageShift()),
+		.uiLog2ImportAlignment = log2_import_alignment,
+		.uiLog2TilingStrideFactor = (RGX_BIF_TILING_HEAP_LOG2_ALIGN_TO_STRIDE_BASE - tiling_mode)
+	};
+	void *pvAppHintState = NULL;
+	IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE;
+	IMG_UINT32 ui32GeneralNon4KHeapPageSize;
+
+	if (!OSStringCompare(name, RGX_GENERAL_NON4K_HEAP_IDENT))
+	{
+		OSCreateKMAppHintState(&pvAppHintState);
+		OSGetKMAppHintUINT32(pvAppHintState, GeneralNon4KHeapPageSize,
+				&ui32AppHintDefault, &ui32GeneralNon4KHeapPageSize);
+		switch (ui32GeneralNon4KHeapPageSize)
+		{
+			case (1<<RGX_HEAP_4KB_PAGE_SHIFT):
+				b.uiLog2DataPageSize = RGX_HEAP_4KB_PAGE_SHIFT;
+				break;
+			case (1<<RGX_HEAP_16KB_PAGE_SHIFT):
+				b.uiLog2DataPageSize = RGX_HEAP_16KB_PAGE_SHIFT;
+				break;
+			case (1<<RGX_HEAP_64KB_PAGE_SHIFT):
+				b.uiLog2DataPageSize = RGX_HEAP_64KB_PAGE_SHIFT;
+				break;
+			case (1<<RGX_HEAP_256KB_PAGE_SHIFT):
+				b.uiLog2DataPageSize = RGX_HEAP_256KB_PAGE_SHIFT;
+				break;
+			case (1<<RGX_HEAP_1MB_PAGE_SHIFT):
+				b.uiLog2DataPageSize = RGX_HEAP_1MB_PAGE_SHIFT;
+				break;
+			case (1<<RGX_HEAP_2MB_PAGE_SHIFT):
+				b.uiLog2DataPageSize = RGX_HEAP_2MB_PAGE_SHIFT;
+				break;
+			default:
+				b.uiLog2DataPageSize = RGX_HEAP_16KB_PAGE_SHIFT;
+
+				PVR_DPF((PVR_DBG_ERROR,"Invalid AppHint GeneralAltHeapPageSize [%d] value, using 16KB",
+						ui32AppHintDefault));
+				break;
+		}
+		OSFreeKMAppHintState(pvAppHintState);
+	}
+
+	return b;
+}
+
+#define INIT_HEAP(NAME) \
+do { \
+	*psDeviceMemoryHeapCursor = _blueprint_init( \
+			RGX_ ## NAME ## _HEAP_IDENT, \
+			RGX_ ## NAME ## _HEAP_BASE, \
+			RGX_ ## NAME ## _HEAP_SIZE, \
+			0, 0); \
+	psDeviceMemoryHeapCursor++; \
+} while (0)
+
+#define INIT_HEAP_NAME(STR, NAME) \
+do { \
+	*psDeviceMemoryHeapCursor = _blueprint_init( \
+			STR, \
+			RGX_ ## NAME ## _HEAP_BASE, \
+			RGX_ ## NAME ## _HEAP_SIZE, \
+			0, 0); \
+	psDeviceMemoryHeapCursor++; \
+} while (0)
+
+#define INIT_TILING_HEAP(D, N, M)		\
+do { \
+	IMG_UINT32 xstride; \
+	PVRSRVSystemBIFTilingHeapGetXStride((D)->psDeviceNode->psDevConfig, N, &xstride); \
+	*psDeviceMemoryHeapCursor = _blueprint_init( \
+			RGX_BIF_TILING_HEAP_ ## N ## _IDENT, \
+			RGX_BIF_TILING_HEAP_ ## N ## _BASE, \
+			RGX_BIF_TILING_HEAP_SIZE, \
+			RGX_BIF_TILING_HEAP_ALIGN_LOG2_FROM_XSTRIDE(xstride), \
+			(IMG_UINT32)M); \
+	psDeviceMemoryHeapCursor++; \
+} while (0)
+
+static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo,
+								 DEVICE_MEMORY_INFO *psNewMemoryInfo,
+								 IMG_UINT32 *pui32Log2DummyPgSize)
+{
+	IMG_UINT64	ui64ErnsBrns;
+	DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor;
+	RGXFWIF_BIFTILINGMODE eBIFTilingMode;
+	IMG_UINT32 uiNumHeaps;
+	void *pvAppHintState = NULL;
+	IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE;
+	IMG_UINT32 ui32GeneralNon4KHeapPageSize;
+
+	ui64ErnsBrns = psDevInfo->sDevFeatureCfg.ui64ErnsBrns;
+
+	/* FIXME - consider whether this ought not to be on the device node itself */
+	psNewMemoryInfo->psDeviceMemoryHeap = OSAllocMem(sizeof(DEVMEM_HEAP_BLUEPRINT) * RGX_MAX_HEAP_ID);
+	if(psNewMemoryInfo->psDeviceMemoryHeap == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXRegisterDevice : Failed to alloc memory for DEVMEM_HEAP_BLUEPRINT"));
+		goto e0;
+	}
+
+	PVRSRVSystemBIFTilingGetConfig(psDevInfo->psDeviceNode->psDevConfig, &eBIFTilingMode, &uiNumHeaps);
+
+
+	/* Get the page size for the dummy page from the NON4K heap apphint */
+	OSCreateKMAppHintState(&pvAppHintState);
+	OSGetKMAppHintUINT32(pvAppHintState, GeneralNon4KHeapPageSize,
+			&ui32AppHintDefault, &ui32GeneralNon4KHeapPageSize);
+	*pui32Log2DummyPgSize = ExactLog2(ui32GeneralNon4KHeapPageSize);
+	OSFreeKMAppHintState(pvAppHintState);
+
+	/* Initialise the heaps */
+	psDeviceMemoryHeapCursor = psNewMemoryInfo->psDeviceMemoryHeap;
+
+	INIT_HEAP(GENERAL_SVM);
+
+	if (ui64ErnsBrns & FIX_HW_BRN_65273_BIT_MASK)
+	{
+		INIT_HEAP_NAME(RGX_GENERAL_HEAP_IDENT, GENERAL_BRN_65273);
+	}
+	else
+	{
+		INIT_HEAP(GENERAL);
+	}
+
+	if (ui64ErnsBrns & FIX_HW_BRN_63142_BIT_MASK)
+	{
+		/* BRN63142 heap must be at the top of an aligned 16GB range. */
+		INIT_HEAP(RGNHDR_BRN_63142);
+		PVR_ASSERT((RGX_RGNHDR_BRN_63142_HEAP_BASE & IMG_UINT64_C(0x3FFFFFFFF)) +
+		           RGX_RGNHDR_BRN_63142_HEAP_SIZE == IMG_UINT64_C(0x400000000));
+	}
+
+	if (ui64ErnsBrns & FIX_HW_BRN_65273_BIT_MASK)
+	{
+		INIT_HEAP_NAME(RGX_GENERAL_NON4K_HEAP_IDENT, GENERAL_NON4K_BRN_65273);
+		INIT_HEAP_NAME(RGX_VISTEST_HEAP_IDENT, VISTEST_BRN_65273);
+
+		/* HWBRN65273 workaround also requires two Region Header buffers 4GB apart. */
+		INIT_HEAP(MMU_INIA_BRN_65273);
+		INIT_HEAP(MMU_INIB_BRN_65273);
+	}
+	else
+	{
+		INIT_HEAP(GENERAL_NON4K);
+		INIT_HEAP(VISTEST);
+	}
+
+	if (ui64ErnsBrns & FIX_HW_BRN_65273_BIT_MASK)
+	{
+		INIT_HEAP_NAME(RGX_PDSCODEDATA_HEAP_IDENT, PDSCODEDATA_BRN_65273);
+		INIT_HEAP_NAME(RGX_USCCODE_HEAP_IDENT, USCCODE_BRN_65273);
+	}
+	else if (ui64ErnsBrns & FIX_HW_BRN_52402_BIT_MASK)
+	{
+		INIT_HEAP_NAME(RGX_PDSCODEDATA_HEAP_IDENT, PDSCODEDATA_BRN_52402);
+		INIT_HEAP_NAME(RGX_USCCODE_HEAP_IDENT, USCCODE_BRN_52402);
+	}
+	else
+	{
+		INIT_HEAP(PDSCODEDATA);
+		INIT_HEAP(USCCODE);
+	}
+
+	if (ui64ErnsBrns & FIX_HW_BRN_65273_BIT_MASK)
+	{
+		INIT_HEAP_NAME(RGX_TQ3DPARAMETERS_HEAP_IDENT, TQ3DPARAMETERS_BRN_65273);
+	}
+	else if (ui64ErnsBrns & (FIX_HW_BRN_52402_BIT_MASK | FIX_HW_BRN_55091_BIT_MASK))
+	{
+		INIT_HEAP_NAME(RGX_TQ3DPARAMETERS_HEAP_IDENT, TQ3DPARAMETERS_BRN_52402_55091);
+	}
+	else
+	{
+		INIT_HEAP(TQ3DPARAMETERS);
+	}
+
+	INIT_TILING_HEAP(psDevInfo, 1, eBIFTilingMode);
+	INIT_TILING_HEAP(psDevInfo, 2, eBIFTilingMode);
+	INIT_TILING_HEAP(psDevInfo, 3, eBIFTilingMode);
+	INIT_TILING_HEAP(psDevInfo, 4, eBIFTilingMode);
+	INIT_HEAP(DOPPLER);
+	INIT_HEAP(DOPPLER_OVERFLOW);
+	INIT_HEAP(TDM_TPU_YUV_COEFFS);
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK)
+	{
+		INIT_HEAP(SERVICES_SIGNALS);
+		INIT_HEAP(SIGNALS);
+	}
+	INIT_HEAP_NAME("HWBRN37200", HWBRN37200);
+	INIT_HEAP_NAME("Firmware", FIRMWARE);
+
+	/* set the heap count */
+	psNewMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeapCursor - psNewMemoryInfo->psDeviceMemoryHeap);
+
+	PVR_ASSERT(psNewMemoryInfo->ui32HeapCount <= RGX_MAX_HEAP_ID);
+
+	/*
+	   In the new heap setup, we initialise 2 configurations:
+		1 - One will be for the firmware only (index 1 in array)
+			a. This primarily has the firmware heap in it.
+			b. It also has additional guest OSID firmware heap(s)
+				- Only if the number of support firmware OSID > 1
+		2 - Others shall be for clients only (index 0 in array)
+			a. This has all the other client heaps in it.
+	*/
+	psNewMemoryInfo->uiNumHeapConfigs = 2;
+	psNewMemoryInfo->psDeviceMemoryHeapConfigArray = OSAllocMem(sizeof(DEVMEM_HEAP_CONFIG) * psNewMemoryInfo->uiNumHeapConfigs);
+	if (psNewMemoryInfo->psDeviceMemoryHeapConfigArray == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXRegisterDevice : Failed to alloc memory for DEVMEM_HEAP_CONFIG"));
+		goto e1;
+	}
+
+	psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].pszName = "Default Heap Configuration";
+	psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].uiNumHeaps = psNewMemoryInfo->ui32HeapCount-1;
+	psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].psHeapBlueprintArray = psNewMemoryInfo->psDeviceMemoryHeap;
+
+	psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].pszName = "Firmware Heap Configuration";
+	if(ui64ErnsBrns & FIX_HW_BRN_37200_BIT_MASK)
+	{
+		psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps = 2;
+		psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].psHeapBlueprintArray = psDeviceMemoryHeapCursor-2;
+	}
+	else
+	{
+		psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps = 1;
+		psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].psHeapBlueprintArray = psDeviceMemoryHeapCursor-1;
+	}
+
+	/* Perform additional virtualization initialization */
+	if (RGXVzInitHeaps(psNewMemoryInfo, psDeviceMemoryHeapCursor) != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	return PVRSRV_OK;
+e1:
+	OSFreeMem(psNewMemoryInfo->psDeviceMemoryHeap);
+e0:
+	return PVRSRV_ERROR_OUT_OF_MEMORY;
+}
+
+#undef INIT_HEAP
+#undef INIT_HEAP_NAME
+#undef INIT_TILING_HEAP
+
+static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo)
+{
+	RGXVzDeInitHeaps(psDevMemoryInfo);
+	OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeapConfigArray);
+	OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeap);
+}
+
+/*This function searches the given array for a given search value */
+static void *RGXSearchTable( IMG_UINT64 *pui64Array,
+								IMG_UINT uiEnd,
+								IMG_UINT64 ui64SearchValue,
+								IMG_UINT uiRowCount)
+{
+	IMG_UINT uiStart = 0, index;
+	IMG_UINT64 value, *pui64Ptr = NULL;
+
+	while(uiStart < uiEnd)
+	{
+		index = (uiStart + uiEnd)/2;
+		pui64Ptr = pui64Array + (index * uiRowCount);
+		value = *(pui64Ptr);
+
+		if(value == ui64SearchValue)
+		{
+			return (void *)pui64Ptr;
+		}
+
+		if(value > ui64SearchValue)
+		{
+			uiEnd = index;
+		}else
+		{
+			uiStart = index + 1;
+		}
+	}
+	return NULL;
+}
+
+#if defined(DEBUG)
+static void RGXDumpParsedBVNCConfig(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+	IMG_UINT64 ui64Temp = 0, ui64Temp2 = 1;
+
+	PVR_LOG(( "NC: 		%d", psDevInfo->sDevFeatureCfg.ui32NumClusters));
+	PVR_LOG(( "CSF: 		%d", psDevInfo->sDevFeatureCfg.ui32CtrlStreamFormat));
+	PVR_LOG(( "FBCDCA:	%d", psDevInfo->sDevFeatureCfg.ui32FBCDCArch));
+	PVR_LOG(( "MCMB: 		%d", psDevInfo->sDevFeatureCfg.ui32MCMB));
+	PVR_LOG(( "MCMS: 		%d", psDevInfo->sDevFeatureCfg.ui32MCMS));
+	PVR_LOG(( "MDMACnt: 	%d", psDevInfo->sDevFeatureCfg.ui32MDMACount));
+	PVR_LOG(( "NIIP: 		%d", psDevInfo->sDevFeatureCfg.ui32NIIP));
+	PVR_LOG(( "PBW: 		%d", psDevInfo->sDevFeatureCfg.ui32PBW));
+	PVR_LOG(( "STEArch: 	%d", psDevInfo->sDevFeatureCfg.ui32STEArch));
+	PVR_LOG(( "SVCEA: 	%d", psDevInfo->sDevFeatureCfg.ui32SVCE));
+	PVR_LOG(( "SLCBanks: 	%d", psDevInfo->sDevFeatureCfg.ui32SLCBanks));
+	PVR_LOG(( "SLCCLS: 	%d", psDevInfo->sDevFeatureCfg.ui32CacheLineSize));
+	PVR_LOG(( "SLCSize: 	%d", psDevInfo->sDevFeatureCfg.ui32SLCSize));
+	PVR_LOG(( "VASB:	 	%d", psDevInfo->sDevFeatureCfg.ui32VASB));
+	PVR_LOG(( "META:	 	%d", psDevInfo->sDevFeatureCfg.ui32META));
+
+	/* Dump the features with no values */
+	ui64Temp = psDevInfo->sDevFeatureCfg.ui64Features;
+	while(ui64Temp)
+	{
+		if(ui64Temp & 0x01)
+		{
+			IMG_PCHAR psString = "Unknown feature, debug list should be updated....";
+			switch(ui64Temp2)
+			{
+			case RGX_FEATURE_AXI_ACELITE_BIT_MASK: psString = "RGX_FEATURE_AXI_ACELITE";break;
+			case RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK:
+				psString = "RGX_FEATURE_CLUSTER_GROUPING";break;
+			case RGX_FEATURE_COMPUTE_BIT_MASK:
+				psString = "RGX_FEATURE_COMPUTE";break;
+			case RGX_FEATURE_COMPUTE_MORTON_CAPABLE_BIT_MASK:
+				psString = "RGX_FEATURE_COMPUTE_MORTON_CAPABLE";break;
+			case RGX_FEATURE_COMPUTE_OVERLAP_BIT_MASK:
+				psString = "RGX_FEATURE_COMPUTE_OVERLAP";break;
+			case RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_BIT_MASK:
+				psString = "RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS"; break;
+			case RGX_FEATURE_DYNAMIC_DUST_POWER_BIT_MASK:
+				psString = "RGX_FEATURE_DYNAMIC_DUST_POWER";break;
+			case RGX_FEATURE_FASTRENDER_DM_BIT_MASK:
+				psString = "RGX_FEATURE_FASTRENDER_DM";break;
+			case RGX_FEATURE_GPU_CPU_COHERENCY_BIT_MASK:
+				psString = "RGX_FEATURE_GPU_CPU_COHERENCY";break;
+			case RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK:
+				psString = "RGX_FEATURE_GPU_VIRTUALISATION";break;
+			case RGX_FEATURE_GS_RTA_SUPPORT_BIT_MASK:
+				psString = "RGX_FEATURE_GS_RTA_SUPPORT";break;
+			case RGX_FEATURE_META_DMA_BIT_MASK:
+				psString = "RGX_FEATURE_META_DMA";break;
+			case RGX_FEATURE_MIPS_BIT_MASK:
+				psString = "RGX_FEATURE_MIPS";break;
+			case RGX_FEATURE_PBE2_IN_XE_BIT_MASK:
+				psString = "RGX_FEATURE_PBE2_IN_XE";break;
+			case RGX_FEATURE_PBVNC_COREID_REG_BIT_MASK:
+				psString = "RGX_FEATURE_PBVNC_COREID_REG";break;
+			case RGX_FEATURE_PDS_PER_DUST_BIT_MASK:
+				psString = "RGX_FEATURE_PDS_PER_DUST";break;
+			case RGX_FEATURE_PDS_TEMPSIZE8_BIT_MASK:
+				psString = "RGX_FEATURE_PDS_TEMPSIZE8";break;
+			case RGX_FEATURE_PERFBUS_BIT_MASK:
+				psString = "RGX_FEATURE_PERFBUS";break;
+			case RGX_FEATURE_RAY_TRACING_BIT_MASK:
+				psString = "RGX_FEATURE_RAY_TRACING";break;
+			case RGX_FEATURE_ROGUEXE_BIT_MASK:
+				psString = "RGX_FEATURE_ROGUEXE";break;
+			case RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK:
+				psString = "RGX_FEATURE_S7_CACHE_HIERARCHY";break;
+			case RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK:
+				psString = "RGX_FEATURE_S7_TOP_INFRASTRUCTURE";break;
+			case RGX_FEATURE_SCALABLE_VDM_GPP_BIT_MASK:
+				psString = "RGX_FEATURE_SCALABLE_VDM_GPP";break;
+			case RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK:
+				psString = "RGX_FEATURE_SIGNAL_SNOOPING";break;
+			case RGX_FEATURE_SINGLE_BIF_BIT_MASK:
+				psString = "RGX_FEATURE_SINGLE_BIF";break;
+			case RGX_FEATURE_SLCSIZE8_BIT_MASK:
+				psString = "RGX_FEATURE_SLCSIZE8";break;
+			case RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_BIT_MASK:
+				psString = "RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128"; break;
+			case RGX_FEATURE_SLC_VIVT_BIT_MASK:
+				psString = "RGX_FEATURE_SLC_VIVT";break;
+			case RGX_FEATURE_SYS_BUS_SECURE_RESET_BIT_MASK:
+				psString = "RGX_FEATURE_SYS_BUS_SECURE_RESET"; break;
+			case RGX_FEATURE_TESSELLATION_BIT_MASK:
+				psString = "RGX_FEATURE_TESSELLATION";break;
+			case RGX_FEATURE_TLA_BIT_MASK:
+				psString = "RGX_FEATURE_TLA";break;
+			case RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_BIT_MASK:
+				psString = "RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS";break;
+			case RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_BIT_MASK:
+				psString = "RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS";break;
+			case RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK:
+				psString = "RGX_FEATURE_TPU_FILTERING_MODE_CONTROL";break;
+			case RGX_FEATURE_VDM_DRAWINDIRECT_BIT_MASK:
+				psString = "RGX_FEATURE_VDM_DRAWINDIRECT";break;
+			case RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_BIT_MASK:
+				psString = "RGX_FEATURE_VDM_OBJECT_LEVEL_LLS";break;
+			case RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK:
+				psString = "RGX_FEATURE_XT_TOP_INFRASTRUCTURE";break;
+
+
+			default:PVR_DPF((PVR_DBG_WARNING,"Feature with Mask doesn't not exist: 0x%016" IMG_UINT64_FMTSPECx, ui64Temp2));
+			break;
+			}
+			PVR_LOG(("%s", psString));
+		}
+		ui64Temp >>= 1;
+		ui64Temp2 <<= 1;
+	}
+
+	/*Dump the ERN and BRN flags for this core */
+	ui64Temp = psDevInfo->sDevFeatureCfg.ui64ErnsBrns;
+	ui64Temp2 = 1;
+
+	while(ui64Temp)
+	{
+		if(ui64Temp & 0x1)
+		{
+			IMG_UINT32	ui32ErnBrnId = 0;
+			switch(ui64Temp2)
+			{
+			case HW_ERN_36400_BIT_MASK: ui32ErnBrnId = 36400; break;
+			case FIX_HW_BRN_37200_BIT_MASK: ui32ErnBrnId = 37200; break;
+			case FIX_HW_BRN_37918_BIT_MASK: ui32ErnBrnId = 37918; break;
+			case FIX_HW_BRN_38344_BIT_MASK: ui32ErnBrnId = 38344; break;
+			case HW_ERN_41805_BIT_MASK: ui32ErnBrnId = 41805; break;
+			case HW_ERN_42290_BIT_MASK: ui32ErnBrnId = 42290; break;
+			case FIX_HW_BRN_42321_BIT_MASK: ui32ErnBrnId = 42321; break;
+			case FIX_HW_BRN_42480_BIT_MASK: ui32ErnBrnId = 42480; break;
+			case HW_ERN_42606_BIT_MASK: ui32ErnBrnId = 42606; break;
+			case FIX_HW_BRN_43276_BIT_MASK: ui32ErnBrnId = 43276; break;
+			case FIX_HW_BRN_44455_BIT_MASK: ui32ErnBrnId = 44455; break;
+			case FIX_HW_BRN_44871_BIT_MASK: ui32ErnBrnId = 44871; break;
+			case HW_ERN_44885_BIT_MASK: ui32ErnBrnId = 44885; break;
+			case HW_ERN_45914_BIT_MASK: ui32ErnBrnId = 45914; break;
+			case HW_ERN_46066_BIT_MASK: ui32ErnBrnId = 46066; break;
+			case HW_ERN_47025_BIT_MASK: ui32ErnBrnId = 47025; break;
+			case HW_ERN_49144_BIT_MASK: ui32ErnBrnId = 49144; break;
+			case HW_ERN_50539_BIT_MASK: ui32ErnBrnId = 50539; break;
+			case FIX_HW_BRN_50767_BIT_MASK: ui32ErnBrnId = 50767; break;
+			case FIX_HW_BRN_51281_BIT_MASK: ui32ErnBrnId = 51281; break;
+			case HW_ERN_51468_BIT_MASK: ui32ErnBrnId = 51468; break;
+			case FIX_HW_BRN_52402_BIT_MASK: ui32ErnBrnId = 52402; break;
+			case FIX_HW_BRN_52563_BIT_MASK: ui32ErnBrnId = 52563; break;
+			case FIX_HW_BRN_54141_BIT_MASK: ui32ErnBrnId = 54141; break;
+			case FIX_HW_BRN_54441_BIT_MASK: ui32ErnBrnId = 54441; break;
+			case FIX_HW_BRN_55091_BIT_MASK: ui32ErnBrnId = 55091; break;
+			case FIX_HW_BRN_57193_BIT_MASK: ui32ErnBrnId = 57193; break;
+			case FIX_HW_BRN_57289_BIT_MASK: ui32ErnBrnId = 57289; break;
+			case HW_ERN_57596_BIT_MASK: ui32ErnBrnId = 57596; break;
+			case FIX_HW_BRN_60084_BIT_MASK: ui32ErnBrnId = 60084; break;
+			case HW_ERN_61389_BIT_MASK: ui32ErnBrnId = 61389; break;
+			case FIX_HW_BRN_61450_BIT_MASK: ui32ErnBrnId = 61450; break;
+			case FIX_HW_BRN_62204_BIT_MASK: ui32ErnBrnId = 62204; break;
+			case FIX_HW_BRN_63027_BIT_MASK: ui32ErnBrnId = 63027; break;
+			case FIX_HW_BRN_63142_BIT_MASK: ui32ErnBrnId = 63142; break;
+			case FIX_HW_BRN_65273_BIT_MASK: ui32ErnBrnId = 65273; break;
+
+			default:
+				PVR_LOG(("Unknown ErnBrn bit: 0x%0" IMG_UINT64_FMTSPECx, ui64Temp2));
+				break;
+			}
+			PVR_LOG(("ERN/BRN : %d",ui32ErnBrnId));
+		}
+		ui64Temp >>= 1;
+		ui64Temp2 <<= 1;
+	}
+
+}
+#endif
+
+static void RGXConfigFeaturesWithValues(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+	psDevInfo->sDevFeatureCfg.ui32MAXDMCount =	RGXFWIF_DM_MIN_CNT;
+	psDevInfo->sDevFeatureCfg.ui32MAXDMMTSCount =	RGXFWIF_DM_MIN_MTS_CNT;
+
+	/* ui64Features must be already initialized */
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+	{
+		psDevInfo->sDevFeatureCfg.ui32MAXDMCount += RGXFWIF_RAY_TRACING_DM_CNT;
+		psDevInfo->sDevFeatureCfg.ui32MAXDMMTSCount += RGXFWIF_RAY_TRACING_DM_MTS_CNT;
+	}
+
+	/* Get the max number of dusts in the core */
+	if(0 != psDevInfo->sDevFeatureCfg.ui32NumClusters)
+	{
+		psDevInfo->sDevFeatureCfg.ui32MAXDustCount = MAX(1, (psDevInfo->sDevFeatureCfg.ui32NumClusters / 2));
+	}
+}
+
+static inline
+IMG_UINT32 GetFeatureValue(IMG_UINT64 ui64CfgInfo,
+							IMG_PCHAR pcFeature,
+							IMG_PUINT32 pui32FeatureValList,
+							IMG_UINT64 ui64FeatureMask,
+							IMG_UINT32 ui32FeaturePos,
+							IMG_UINT32 ui64FeatureMaxValue)
+{
+
+	IMG_UINT64	ui64Indx = 0;
+	IMG_UINT32	uiValue = 0;
+	ui64Indx = (ui64CfgInfo & ui64FeatureMask) >> ui32FeaturePos;
+	if(ui64Indx < ui64FeatureMaxValue)
+	{
+		uiValue = pui32FeatureValList[ui64Indx];
+	}else
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Array out of bounds access attempted %s", pcFeature));
+	}
+	return uiValue;
+}
+
+#define	GET_FEAT_VALUE(CfgInfo, Feature) 	\
+			GetFeatureValue(CfgInfo, #Feature, (IMG_PUINT32)aui32_##Feature##_values, \
+							Feature##_BIT_MASK, Feature##_POS, Feature##_MAX_VALUE_IDX)
+
+#define PVR_UNREFERENCED_FEATURE_VALUES(Feature) \
+			PVR_UNREFERENCED_PARAMETER(aui32_##Feature##_values)
+
+static void RGXParseBVNCFeatures(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64CfgInfo)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+	PVR_UNREFERENCED_FEATURE_VALUES(RGX_FEATURE_FBCDC_ALGORITHM);
+	PVR_UNREFERENCED_FEATURE_VALUES(RGX_FEATURE_SLC_SIZE_IN_KILOBYTES);
+	PVR_UNREFERENCED_FEATURE_VALUES(RGX_FEATURE_NUM_RASTER_PIPES);
+	PVR_UNREFERENCED_FEATURE_VALUES(RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION);
+
+	/*Get the SLC cacheline size info in kilo bytes */
+	psDevInfo->sDevFeatureCfg.ui32SLCSize = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_SLC_SIZE_IN_BYTES) *1024;
+
+	/*Get the control stream format architecture info */
+	psDevInfo->sDevFeatureCfg.ui32CtrlStreamFormat = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT);
+
+	psDevInfo->sDevFeatureCfg.ui32FBCDCArch = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_FBCDC_ARCHITECTURE);
+
+	psDevInfo->sDevFeatureCfg.ui32MCMB = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_META_COREMEM_BANKS);
+
+	psDevInfo->sDevFeatureCfg.ui32MCMS = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_META_COREMEM_SIZE) *1024;
+
+	psDevInfo->sDevFeatureCfg.ui32MDMACount = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_META_DMA_CHANNEL_COUNT);
+
+	if(!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK))
+	{
+		psDevInfo->sDevFeatureCfg.ui32META = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_META);
+	}else
+	{
+		psDevInfo->sDevFeatureCfg.ui32META = 0;
+	}
+
+	psDevInfo->sDevFeatureCfg.ui32NumClusters = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_NUM_CLUSTERS);
+
+	psDevInfo->sDevFeatureCfg.ui32NIIP = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_NUM_ISP_IPP_PIPES);
+
+	psDevInfo->sDevFeatureCfg.ui32PBW = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_PHYS_BUS_WIDTH);
+
+	psDevInfo->sDevFeatureCfg.ui32SLCBanks = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_SLC_BANKS);
+
+	psDevInfo->sDevFeatureCfg.ui32CacheLineSize = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS);
+
+	psDevInfo->sDevFeatureCfg.ui32STEArch = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_SCALABLE_TE_ARCH);
+
+	psDevInfo->sDevFeatureCfg.ui32SVCE = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_SCALABLE_VCE);
+
+	psDevInfo->sDevFeatureCfg.ui32VASB = GET_FEAT_VALUE(ui64CfgInfo, RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS);
+
+	RGXConfigFeaturesWithValues(psDeviceNode);
+}
+
+#undef PVR_UNREFERENCED_FEATURE_VALUES
+#undef GET_FEAT_VALUE
+
+static void RGXAcquireBVNCAppHint(IMG_CHAR *pszBVNCAppHint,
+								  IMG_CHAR **apszRGXBVNCList,
+								  IMG_UINT32 ui32BVNCListCount,
+								  IMG_UINT32 *pui32BVNCCount)
+{
+	IMG_CHAR *pszAppHintDefault = NULL;
+	void *pvAppHintState = NULL;
+	IMG_UINT32 ui32BVNCIndex = 0;
+	IMG_BOOL bRet;
+
+	OSCreateKMAppHintState(&pvAppHintState);
+	pszAppHintDefault = PVRSRV_APPHINT_RGXBVNC;
+
+	bRet = (IMG_BOOL)OSGetKMAppHintSTRING(pvAppHintState,
+						 RGXBVNC,
+						 &pszAppHintDefault,
+						 pszBVNCAppHint,
+						 RGXBVNC_BUFFER_SIZE);
+
+	OSFreeKMAppHintState(pvAppHintState);
+
+	if (!bRet)
+	{
+		*pui32BVNCCount = 0;
+		return;
+	}
+
+	while (*pszBVNCAppHint != '\0')
+	{
+		if (ui32BVNCIndex >= ui32BVNCListCount)
+		{
+			break;
+		}
+		apszRGXBVNCList[ui32BVNCIndex++] = pszBVNCAppHint;
+		while (1)
+		{
+			if (*pszBVNCAppHint == ',')
+			{
+				pszBVNCAppHint[0] = '\0';
+				pszBVNCAppHint++;
+				break;
+			} else if (*pszBVNCAppHint == '\0')
+			{
+				break;
+			}
+			pszBVNCAppHint++;
+		}
+	}
+	*pui32BVNCCount = ui32BVNCIndex;
+}
+
+/*Function that parses the BVNC List passed as module parameter */
+static PVRSRV_ERROR RGXParseBVNCList(IMG_UINT64 *pB,
+									 IMG_UINT64 *pV,
+									 IMG_UINT64 *pN,
+									 IMG_UINT64 *pC,
+									 const IMG_UINT32 ui32RGXDevCount)
+{
+	unsigned int ui32ScanCount = 0;
+	IMG_CHAR *pszBVNCString = NULL;
+
+	if (ui32RGXDevCount == 0) {
+		IMG_CHAR pszBVNCAppHint[RGXBVNC_BUFFER_SIZE];
+		pszBVNCAppHint[0] = '\0';
+		RGXAcquireBVNCAppHint(pszBVNCAppHint, gazRGXBVNCList, PVRSRV_MAX_DEVICES, &gui32RGXLoadTimeDevCount);
+	}
+
+	/*4 components of a BVNC string is B, V, N & C */
+#define RGX_BVNC_INFO_PARAMS (4)
+
+	/*If only one BVNC parameter is specified, the same is applied for all RGX
+	 * devices detected */
+	if(1 == gui32RGXLoadTimeDevCount)
+	{
+		pszBVNCString = gazRGXBVNCList[0];
+	}else
+	{
+
+#if defined(DEBUG)
+		int i =0;
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: No. of BVNC module params : %u", __func__, gui32RGXLoadTimeDevCount));
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC module param list ... ",__func__));
+		for(i=0; i < gui32RGXLoadTimeDevCount; i++)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "%s, ", gazRGXBVNCList[i]));
+		}
+#endif
+
+		if (gui32RGXLoadTimeDevCount == 0)
+			return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+
+		/* total number of RGX devices detected should always be
+		 * less than the gazRGXBVNCList count */
+		if(ui32RGXDevCount < gui32RGXLoadTimeDevCount)
+		{
+			pszBVNCString = gazRGXBVNCList[ui32RGXDevCount];
+		}else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Given module parameters list is shorter than "
+					"number of actual devices", __func__));
+			return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+		}
+	}
+
+	if(NULL == pszBVNCString)
+	{
+		return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+	}
+
+	/* Parse the given RGX_BVNC string */
+	ui32ScanCount = OSVSScanf(pszBVNCString, "%llu.%llu.%llu.%llu", pB, pV, pN, pC);
+	if(RGX_BVNC_INFO_PARAMS != ui32ScanCount)
+	{
+		ui32ScanCount = OSVSScanf(pszBVNCString, "%llu.%llup.%llu.%llu", pB, pV, pN, pC);
+	}
+	if(RGX_BVNC_INFO_PARAMS == ui32ScanCount)
+	{
+		PVR_LOG(("BVNC module parameter honoured: %s", pszBVNCString));
+	}else
+	{
+		return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*This function detects the rogue variant and configures the
+ * essential config info associated with such a device
+ * The config info include features, errata etc etc */
+static PVRSRV_ERROR RGXGetBVNCConfig(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	static IMG_UINT32 ui32RGXDevCnt = 0;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bDetectBVNC = IMG_TRUE;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	IMG_UINT64	ui64BVNC, *pui64Cfg, B=0, V=0, N=0, C=0;
+	/*
+	 * Order of BVNC rules
+	 *	1. RGX_BVNC Module parameter
+	 *	2. Detected BVNC (Hardware) / Compiled BVNC (No Hardware)
+	 *	3. If none of above report failure
+	 */
+	IMG_UINT32 uiOrderOfBVNC = 0;
+#if !defined(NO_HARDWARE) && defined(SUPPORT_MULTIBVNC_RUNTIME_BVNC_ACQUISITION)
+	if (! PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		uiOrderOfBVNC = 1;
+	}
+#endif
+#if defined(RGX_BVNC_KM_B) && defined(RGX_BVNC_KM_V) && defined(RGX_BVNC_KM_N) && defined(RGX_BVNC_KM_C)
+	uiOrderOfBVNC = !uiOrderOfBVNC ? 2 : uiOrderOfBVNC;
+#else
+	uiOrderOfBVNC = !uiOrderOfBVNC ? 3 : uiOrderOfBVNC;
+#endif
+
+	/* Check for load time RGX BVNC config */
+	eError = RGXParseBVNCList(&B,&V,&N,&C, ui32RGXDevCnt);
+	if(PVRSRV_OK == eError)
+	{
+		bDetectBVNC = IMG_FALSE;
+	}
+
+	/*if BVNC is not specified as module parameter or if specified BVNC list is insufficient
+	 * Try to detect the device */
+	if(IMG_TRUE == bDetectBVNC)
+	{
+		if (uiOrderOfBVNC == 1)
+		{
+			IMG_UINT64 ui32ID;
+			IMG_HANDLE hSysData;
+
+			hSysData = psDeviceNode->psDevConfig->hSysData;
+
+			/* Power-up the device as required to read the registers */
+			if(psDeviceNode->psDevConfig->pfnPrePowerState)
+			{
+				eError = psDeviceNode->psDevConfig->pfnPrePowerState(hSysData, PVRSRV_DEV_POWER_STATE_ON,
+						PVRSRV_DEV_POWER_STATE_OFF, IMG_FALSE);
+				if (PVRSRV_OK != eError)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s: System Pre-Power up failed", __func__));
+					return eError;
+				}
+			}
+
+			if(psDeviceNode->psDevConfig->pfnPostPowerState)
+			{
+				eError = psDeviceNode->psDevConfig->pfnPostPowerState(hSysData, PVRSRV_DEV_POWER_STATE_ON,
+						PVRSRV_DEV_POWER_STATE_OFF, IMG_FALSE);
+				if (PVRSRV_OK != eError)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s: System Post Power up failed", __func__));
+					return eError;
+				}
+			}
+
+			ui32ID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID__PBVNC);
+
+			if(GET_B(ui32ID))
+			{
+				B = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK) >>
+														RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT;
+				V = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK) >>
+														RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT;
+				N = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK) >>
+														RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT;
+				C = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK) >>
+														RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT;
+
+			}
+			else
+			{
+				IMG_UINT64 ui32CoreID, ui32CoreRev;
+				ui32CoreRev = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_REVISION);
+				ui32CoreID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID);
+				B = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MAJOR_CLRMSK) >>
+														RGX_CR_CORE_REVISION_MAJOR_SHIFT;
+				V = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MINOR_CLRMSK) >>
+														RGX_CR_CORE_REVISION_MINOR_SHIFT;
+				N = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_N_CLRMSK) >>
+														RGX_CR_CORE_ID_CONFIG_N_SHIFT;
+				C = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_C_CLRMSK) >>
+														RGX_CR_CORE_ID_CONFIG_C_SHIFT;
+			}
+			PVR_LOG(("%s: Read BVNC %" IMG_UINT64_FMTSPEC ".%" IMG_UINT64_FMTSPEC ".%" IMG_UINT64_FMTSPEC ".%" IMG_UINT64_FMTSPEC " from device registers", __func__, B, V, N, C));
+
+			/* Power-down the device */
+			if(psDeviceNode->psDevConfig->pfnPrePowerState)
+			{
+				eError = psDeviceNode->psDevConfig->pfnPrePowerState(hSysData, PVRSRV_DEV_POWER_STATE_OFF,
+						PVRSRV_DEV_POWER_STATE_ON, IMG_FALSE);
+				if (PVRSRV_OK != eError)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s: System Pre-Power down failed", __func__));
+					return eError;
+				}
+			}
+
+			if(psDeviceNode->psDevConfig->pfnPostPowerState)
+			{
+				eError = psDeviceNode->psDevConfig->pfnPostPowerState(hSysData, PVRSRV_DEV_POWER_STATE_OFF,
+						PVRSRV_DEV_POWER_STATE_ON, IMG_FALSE);
+				if (PVRSRV_OK != eError)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s: System Post Power down failed", __func__));
+					return eError;
+				}
+			}
+		}
+		else if (uiOrderOfBVNC == 2)
+		{
+			B = RGX_BVNC_KM_B;
+			N = RGX_BVNC_KM_N;
+			C = RGX_BVNC_KM_C;
+			{
+				IMG_UINT32	ui32ScanCount = 0;
+				ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%llu", &V);
+				if(1 != ui32ScanCount)
+				{
+					ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%llup", &V);
+					if(1 != ui32ScanCount)
+					{
+						V = 0;
+					}
+				}
+			}
+			PVR_LOG(("%s: Reverting to compile time BVNC %s", __func__, RGX_BVNC_KM));
+		}
+		else
+		{
+			PVR_LOG(("%s: Unable to determine the BVNC", __func__));
+		}
+	}
+	ui64BVNC = BVNC_PACK(B,0,N,C);
+
+	/* Get the BVNC configuration */
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: Detected BVNC INFO: 0x%016" IMG_UINT64_FMTSPECx " 0x%016" IMG_UINT64_FMTSPECx " 0x%016"
+		 IMG_UINT64_FMTSPECx " 0x%016" IMG_UINT64_FMTSPECx " 0x%016" IMG_UINT64_FMTSPECx "\n",__func__,
+							B,
+							V,
+							N,
+							C,
+							ui64BVNC));
+
+	/*Extract the information from the BVNC & ERN/BRN Table */
+	pui64Cfg = (IMG_UINT64 *)RGXSearchTable((IMG_UINT64 *)gaFeatures, sizeof(gaFeatures)/sizeof(gaFeatures[0]),
+														ui64BVNC,
+														sizeof(gaFeatures[0])/sizeof(IMG_UINT64));
+	if(pui64Cfg)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC Feature Cfg: 0x%016" IMG_UINT64_FMTSPECx 
+				" 0x%016" IMG_UINT64_FMTSPECx " 0x%016" IMG_UINT64_FMTSPECx "\n",__func__,
+												pui64Cfg[0], pui64Cfg[1], pui64Cfg[2]));
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: BVNC Feature Lookup failed. Unsupported BVNC: 0x%016" IMG_UINT64_FMTSPECx,
+											__func__, ui64BVNC));
+		return PVRSRV_ERROR_BVNC_UNSUPPORTED;
+	}
+
+
+	psDevInfo->sDevFeatureCfg.ui64Features = pui64Cfg[1];
+	/*Parsing feature config depends on available features on the core
+	 * hence this parsing should always follow the above feature assignment */
+	RGXParseBVNCFeatures(psDeviceNode, pui64Cfg[2]);
+
+	/* Get the ERN and BRN configuration */
+	ui64BVNC = BVNC_PACK(B,V,N,C);
+
+	pui64Cfg = (IMG_UINT64 *)RGXSearchTable((IMG_UINT64 *)gaErnsBrns, sizeof(gaErnsBrns)/sizeof(gaErnsBrns[0]),
+				ui64BVNC,
+				sizeof(gaErnsBrns[0])/sizeof(IMG_UINT64));
+	if(pui64Cfg)
+	{
+		psDevInfo->sDevFeatureCfg.ui64ErnsBrns = pui64Cfg[1];
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC ERN/BRN Cfg: 0x%016" IMG_UINT64_FMTSPECx " 0x%016" IMG_UINT64_FMTSPECx " \n",
+				__func__, *pui64Cfg, psDevInfo->sDevFeatureCfg.ui64ErnsBrns));
+
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: BVNC ERN/BRN Lookup failed. Unsupported BVNC: 0x%016" IMG_UINT64_FMTSPECx,
+				__func__, ui64BVNC));
+		psDevInfo->sDevFeatureCfg.ui64ErnsBrns = 0;
+		return PVRSRV_ERROR_BVNC_UNSUPPORTED;
+	}
+
+	psDevInfo->sDevFeatureCfg.ui32B = (IMG_UINT32)B;
+	psDevInfo->sDevFeatureCfg.ui32V = (IMG_UINT32)V;
+	psDevInfo->sDevFeatureCfg.ui32N = (IMG_UINT32)N;
+	psDevInfo->sDevFeatureCfg.ui32C = (IMG_UINT32)C;
+
+	ui32RGXDevCnt++;
+#if defined(DEBUG)
+	RGXDumpParsedBVNCConfig(psDeviceNode);
+#endif
+	return PVRSRV_OK;
+}
+
+/*
+ * This function checks if a particular feature is available on the given rgx device */
+static IMG_BOOL RGXCheckFeatureSupported(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT64 ui64FeatureMask)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDevNode->pvDevice;
+	/* FIXME: need to implement a bounds check for passed feature mask */
+	if(psDevInfo->sDevFeatureCfg.ui64Features & ui64FeatureMask)
+	{
+		return IMG_TRUE;
+	}
+	return IMG_FALSE;
+}
+
+/*
+ * * This function returns the value of a feature on the given rgx device */
+static IMG_INT32 RGXGetSupportedFeatureValue(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT64 ui64FeatureMask)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDevNode->pvDevice;
+	/*FIXME: need to implement a bounds check for passed feature mask */
+
+	switch(ui64FeatureMask)
+	{
+	case RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK:
+		return psDevInfo->sDevFeatureCfg.ui32PBW;
+	case RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK:
+		return psDevInfo->sDevFeatureCfg.ui32CacheLineSize;
+	default:
+		return -1;
+	}
+}
+
+/*
+	RGXRegisterDevice
+*/
+PVRSRV_ERROR RGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+    PVRSRV_ERROR eError;
+	DEVICE_MEMORY_INFO *psDevMemoryInfo;
+	PVRSRV_RGXDEV_INFO	*psDevInfo;
+
+	PDUMPCOMMENT("Device Name: %s", psDeviceNode->psDevConfig->pszName);
+
+	if (psDeviceNode->psDevConfig->pszVersion)
+	{
+		PDUMPCOMMENT("Device Version: %s", psDeviceNode->psDevConfig->pszVersion);
+	}
+
+	#if defined(RGX_FEATURE_SYSTEM_CACHE)
+	PDUMPCOMMENT("RGX System Level Cache is present");
+	#endif /* RGX_FEATURE_SYSTEM_CACHE */
+
+	PDUMPCOMMENT("RGX Initialisation (Part 1)");
+
+	/*********************
+	 * Device node setup *
+	 *********************/
+	/* Setup static data and callbacks on the device agnostic device node */
+#if defined(PDUMP)
+	psDeviceNode->sDevId.pszPDumpRegName	= RGX_PDUMPREG_NAME;
+	/*
+		FIXME: This should not be required as PMR's should give the memspace
+		name. However, due to limitations within PDump we need a memspace name
+		when pdumping with MMU context with virtual address in which case we
+		don't have a PMR to get the name from.
+
+		There is also the issue obtaining a namespace name for the catbase which
+		is required when we PDump the write of the physical catbase into the FW
+		structure
+	*/
+	psDeviceNode->sDevId.pszPDumpDevName	= PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]);
+	psDeviceNode->pfnPDumpInitDevice = &RGXResetPDump;
+#endif /* PDUMP */
+
+	OSAtomicWrite(&psDeviceNode->eHealthStatus, PVRSRV_DEVICE_HEALTH_STATUS_OK);
+	OSAtomicWrite(&psDeviceNode->eHealthReason, PVRSRV_DEVICE_HEALTH_REASON_NONE);
+
+	/* Configure MMU specific stuff */
+	RGXMMUInit_Register(psDeviceNode);
+
+	psDeviceNode->pfnMMUCacheInvalidate = RGXMMUCacheInvalidate;
+
+	psDeviceNode->pfnMMUCacheInvalidateKick = RGXMMUCacheInvalidateKick;
+
+	/* Register RGX to receive notifies when other devices complete some work */
+	PVRSRVRegisterCmdCompleteNotify(&psDeviceNode->hCmdCompNotify, &RGXScheduleProcessQueuesKM, psDeviceNode);
+
+	psDeviceNode->pfnInitDeviceCompatCheck	= &RGXDevInitCompatCheck;
+
+	/* Register callbacks for creation of device memory contexts */
+	psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext;
+	psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext;
+
+	/* Register callbacks for Unified Fence Objects */
+	psDeviceNode->pfnAllocUFOBlock = RGXAllocUFOBlock;
+	psDeviceNode->pfnFreeUFOBlock = RGXFreeUFOBlock;
+
+	/* Register callback for checking the device's health */
+	psDeviceNode->pfnUpdateHealthStatus = RGXUpdateHealthStatus;
+
+	/* Register method to service the FW HWPerf buffer */
+	psDeviceNode->pfnServiceHWPerf = RGXHWPerfDataStoreCB;
+
+	/* Register callback for getting the device version information string */
+	psDeviceNode->pfnDeviceVersionString = RGXDevVersionString;
+
+	/* Register callback for getting the device clock speed */
+	psDeviceNode->pfnDeviceClockSpeed = RGXDevClockSpeed;
+
+	/* Register callback for soft resetting some device modules */
+	psDeviceNode->pfnSoftReset = RGXSoftReset;
+
+	/* Register callback for resetting the HWR logs */
+	psDeviceNode->pfnResetHWRLogs = RGXResetHWRLogs;
+
+#if defined(RGXFW_ALIGNCHECKS)
+	/* Register callback for checking alignment of UM structures */
+	psDeviceNode->pfnAlignmentCheck = RGXAlignmentCheck;
+#endif
+
+	/*Register callback for checking the supported features and getting the
+	 * corresponding values */
+	psDeviceNode->pfnCheckDeviceFeature = RGXCheckFeatureSupported;
+	psDeviceNode->pfnGetDeviceFeatureValue = RGXGetSupportedFeatureValue;
+
+	/*Set up required support for dummy page */
+	OSAtomicWrite(&(psDeviceNode->sDummyPage.atRefCounter), 0);
+
+	/*Set the order to 0 */
+	psDeviceNode->sDummyPage.sDummyPageHandle.ui32Order = 0;
+
+	/*Set the size of the Dummy page to zero */
+	psDeviceNode->sDummyPage.ui32Log2DummyPgSize = 0;
+
+	/*Set the Dummy page phys addr */
+	psDeviceNode->sDummyPage.ui64DummyPgPhysAddr = MMU_BAD_PHYS_ADDR;
+
+	/*The lock type need to be dispatch type here because it can be acquired from MISR (Z-buffer) path */
+	eError = OSLockCreate(&psDeviceNode->sDummyPage.psDummyPgLock ,LOCK_TYPE_DISPATCH);
+	if(PVRSRV_OK != eError)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create dummy page lock", __func__));
+		return eError;
+	}
+#if defined(PDUMP)
+	psDeviceNode->sDummyPage.hPdumpDummyPg = NULL;
+#endif
+
+	/*********************
+	 * Device info setup *
+	 *********************/
+	/* Allocate device control block */
+	psDevInfo = OSAllocZMem(sizeof(*psDevInfo));
+	if (psDevInfo == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"DevInitRGXPart1 : Failed to alloc memory for DevInfo"));
+		return (PVRSRV_ERROR_OUT_OF_MEMORY);
+	}
+
+	/* create locks for the context lists stored in the DevInfo structure.
+	 * these lists are modified on context create/destroy and read by the
+	 * watchdog thread
+	 */
+
+	eError = OSWRLockCreate(&(psDevInfo->hRenderCtxListLock));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create render context list lock", __func__));
+		goto e0;
+	}
+
+	eError = OSWRLockCreate(&(psDevInfo->hComputeCtxListLock));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create compute context list lock", __func__));
+		goto e1;
+	}
+
+	eError = OSWRLockCreate(&(psDevInfo->hTransferCtxListLock));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create transfer context list lock", __func__));
+		goto e2;
+	}
+
+	eError = OSWRLockCreate(&(psDevInfo->hTDMCtxListLock));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create TDM context list lock", __func__));
+		goto e3;
+	}
+
+	eError = OSWRLockCreate(&(psDevInfo->hRaytraceCtxListLock));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create raytrace context list lock", __func__));
+		goto e4;
+	}
+
+	eError = OSWRLockCreate(&(psDevInfo->hKickSyncCtxListLock));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create kick sync context list lock", __func__));
+		goto e5;
+	}
+
+	eError = OSWRLockCreate(&(psDevInfo->hMemoryCtxListLock));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create memory context list lock", __func__));
+		goto e6;
+	}
+
+	dllist_init(&(psDevInfo->sKCCBDeferredCommandsListHead));
+
+	dllist_init(&(psDevInfo->sRenderCtxtListHead));
+	dllist_init(&(psDevInfo->sComputeCtxtListHead));
+	dllist_init(&(psDevInfo->sTransferCtxtListHead));
+	dllist_init(&(psDevInfo->sTDMCtxtListHead));
+	dllist_init(&(psDevInfo->sRaytraceCtxtListHead));
+	dllist_init(&(psDevInfo->sKickSyncCtxtListHead));
+
+	dllist_init(&(psDevInfo->sCommonCtxtListHead));
+	psDevInfo->ui32CommonCtxtCurrentID = 1;
+
+
+	eError = OSWRLockCreate(&psDevInfo->hCommonCtxtListLock);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create common context list lock", __func__));
+		goto e7;
+	}
+
+	dllist_init(&psDevInfo->sMemoryContextList);
+
+	/* Allocate space for scripts. */
+	psDevInfo->psScripts = OSAllocMem(sizeof(*psDevInfo->psScripts));
+	if (!psDevInfo->psScripts)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate memory for scripts", __func__));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e8;
+	}
+
+	/* Setup static data and callbacks on the device specific device info */
+	psDevInfo->psDeviceNode		= psDeviceNode;
+
+	psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+	psDevInfo->pvDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
+
+	/*
+	 * Map RGX Registers
+	 */
+#if !defined(NO_HARDWARE)
+	psDevInfo->pvRegsBaseKM = OSMapPhysToLin(psDeviceNode->psDevConfig->sRegsCpuPBase,
+												psDeviceNode->psDevConfig->ui32RegsSize,
+										     PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+
+	if (psDevInfo->pvRegsBaseKM == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to create RGX register mapping", __func__));
+		eError = PVRSRV_ERROR_BAD_MAPPING;
+		goto e9;
+	}
+#endif
+
+	psDeviceNode->pvDevice = psDevInfo;
+
+	eError = RGXGetBVNCConfig(psDeviceNode);
+	if(PVRSRV_OK != eError)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Unsupported Device detected by driver", __func__));
+		goto e10;
+	}
+
+	/* pdump info about the core */
+	PDUMPCOMMENT("RGX Version Information (KM): %d.%d.%d.%d",
+	             psDevInfo->sDevFeatureCfg.ui32B,
+	             psDevInfo->sDevFeatureCfg.ui32V,
+	             psDevInfo->sDevFeatureCfg.ui32N,
+	             psDevInfo->sDevFeatureCfg.ui32C);
+
+	eError = RGXInitHeaps(psDevInfo, psDevMemoryInfo,
+						  &psDeviceNode->sDummyPage.ui32Log2DummyPgSize);
+	if (eError != PVRSRV_OK)
+	{
+		goto e10;
+	}
+
+	eError = RGXHWPerfInit(psDevInfo);
+	PVR_LOGG_IF_ERROR(eError, "RGXHWPerfInit", e10);
+
+	/* Register callback for dumping debug info */
+	eError = PVRSRVRegisterDbgRequestNotify(&psDevInfo->hDbgReqNotify,
+											psDeviceNode,
+											RGXDebugRequestNotify,
+											DEBUG_REQUEST_SYS,
+											psDevInfo);
+	PVR_LOG_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify");
+
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+	{
+		RGXMipsMMUInit_Register(psDeviceNode);
+	}
+
+	/* The device shared-virtual-memory heap address-space size is stored here for faster
+	   look-up without having to walk the device heap configuration structures during
+	   client device connection  (i.e. this size is relative to a zero-based offset) */
+	if(psDevInfo->sDevFeatureCfg.ui64ErnsBrns & (FIX_HW_BRN_52402_BIT_MASK | FIX_HW_BRN_55091_BIT_MASK | FIX_HW_BRN_65273_BIT_MASK))
+	{
+		psDeviceNode->ui64GeneralSVMHeapTopVA = 0;
+	}else
+	{
+		psDeviceNode->ui64GeneralSVMHeapTopVA = RGX_GENERAL_SVM_HEAP_BASE + RGX_GENERAL_SVM_HEAP_SIZE;
+	}
+
+	if(NULL != psDeviceNode->psDevConfig->pfnSysDevFeatureDepInit)
+	{
+		psDeviceNode->psDevConfig->pfnSysDevFeatureDepInit(psDeviceNode->psDevConfig, \
+				psDevInfo->sDevFeatureCfg.ui64Features);
+	}
+
+	return PVRSRV_OK;
+
+e10:
+#if !defined(NO_HARDWARE)
+	OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
+							 psDevInfo->ui32RegSize,
+							 PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+e9:
+#endif /* !NO_HARDWARE */
+	OSFreeMem(psDevInfo->psScripts);
+e8:
+	OSWRLockDestroy(psDevInfo->hCommonCtxtListLock);
+e7:
+	OSWRLockDestroy(psDevInfo->hMemoryCtxListLock);
+e6:
+	OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock);
+e5:
+	OSWRLockDestroy(psDevInfo->hRaytraceCtxListLock);
+e4:
+	OSWRLockDestroy(psDevInfo->hTDMCtxListLock);
+e3:
+	OSWRLockDestroy(psDevInfo->hTransferCtxListLock);
+e2:
+	OSWRLockDestroy(psDevInfo->hComputeCtxListLock);
+e1:
+	OSWRLockDestroy(psDevInfo->hRenderCtxListLock);
+e0:
+	OSFreeMem(psDevInfo);
+
+	/*Destroy the dummy page lock created above */
+	OSLockDestroy(psDeviceNode->sDummyPage.psDummyPgLock);
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXInitFinaliseFWImageKM(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+	{
+		void *pvFWImage;
+		PVRSRV_ERROR eError;
+
+		eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, &pvFWImage);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "PVRSRVRGXInitFinaliseFWImageKM: Acquire mapping for FW data failed (%u)",
+					 eError));
+			return eError;
+		}
+
+		eError = RGXBootldrDataInit(psDeviceNode, pvFWImage);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "PVRSRVRGXInitLoadFWImageKM: ELF parameters injection failed (%u)",
+					 eError));
+			return eError;
+		}
+
+		DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+
+	}
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXDevVersionString
+@Description    Gets the version string for the given device node and returns
+                a pointer to it in ppszVersionString. It is then the
+                responsibility of the caller to free this memory.
+@Input          psDeviceNode            Device node from which to obtain the
+                                        version string
+@Output	        ppszVersionString	Contains the version string upon return
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode,
+					IMG_CHAR **ppszVersionString)
+{
+#if defined(NO_HARDWARE) || defined(EMULATOR)
+	IMG_PCHAR pszFormatString = "Rogue Version: %s (SW)";
+#else
+	IMG_PCHAR pszFormatString = "Rogue Version: %s (HW)";
+#endif
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	size_t uiStringLength;
+
+	if (psDeviceNode == NULL || ppszVersionString == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+	if(NULL == psDevInfo->sDevFeatureCfg.pszBVNCString)
+	{
+		IMG_CHAR pszBVNCInfo[MAX_BVNC_STRING_LEN];
+		size_t uiBVNCStringSize;
+
+		OSSNPrintf(pszBVNCInfo, MAX_BVNC_STRING_LEN, "%d.%d.%d.%d", \
+				psDevInfo->sDevFeatureCfg.ui32B, \
+				psDevInfo->sDevFeatureCfg.ui32V, \
+				psDevInfo->sDevFeatureCfg.ui32N, \
+				psDevInfo->sDevFeatureCfg.ui32C);
+
+		uiBVNCStringSize = (OSStringLength(pszBVNCInfo) + 1) * sizeof(IMG_CHAR);
+		psDevInfo->sDevFeatureCfg.pszBVNCString = OSAllocMem(uiBVNCStringSize);
+		if(NULL == psDevInfo->sDevFeatureCfg.pszBVNCString)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE,
+					 "%s: Allocating memory for BVNC Info string failed ",
+					 __func__));
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+
+		OSCachedMemCopy(psDevInfo->sDevFeatureCfg.pszBVNCString,pszBVNCInfo,uiBVNCStringSize);
+	}
+
+	uiStringLength = OSStringLength(psDevInfo->sDevFeatureCfg.pszBVNCString) +
+	                 OSStringLength(pszFormatString);
+	*ppszVersionString = OSAllocZMem(uiStringLength);
+	if (*ppszVersionString == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	OSSNPrintf(*ppszVersionString, uiStringLength, pszFormatString,
+			psDevInfo->sDevFeatureCfg.pszBVNCString);
+
+	return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function       RGXDevClockSpeed
+@Description    Gets the clock speed for the given device node and returns
+                it in pui32RGXClockSpeed.
+@Input          psDeviceNode		Device node
+@Output         pui32RGXClockSpeed  Variable for storing the clock speed
+@Return         PVRSRV_ERROR
+*/ /***************************************************************************/
+static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode,
+					IMG_PUINT32  pui32RGXClockSpeed)
+{
+	RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+
+	/* get clock speed */
+	*pui32RGXClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXVzInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+	return RGXVzCreateFWKernelMemoryContext(psDeviceNode);
+}
+
+void RGXVzDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_NATIVE);
+	RGXVzDestroyFWKernelMemoryContext(psDeviceNode);
+}
+
+PVRSRV_ERROR RGXVzInitHeaps(DEVICE_MEMORY_INFO *psNewMemoryInfo,
+							DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor)
+{
+	IMG_UINT32 uiIdx;
+	IMG_UINT32 uiStringLength = 32;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+
+	/* Create additional guest OSID firmware heap */
+	for (uiIdx=1; uiIdx < RGXFW_NUM_OS; uiIdx++)
+	{
+		psDeviceMemoryHeapCursor->pszName = OSAllocZMem(uiStringLength * sizeof(IMG_CHAR));
+		if (psDeviceMemoryHeapCursor->pszName == NULL)
+		{
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+
+		OSSNPrintf((IMG_CHAR *)psDeviceMemoryHeapCursor->pszName, uiStringLength, "GuestFirmware%d", uiIdx);
+
+		psDeviceMemoryHeapCursor->uiHeapLength = RGX_FIRMWARE_HEAP_SIZE;
+		psDeviceMemoryHeapCursor->uiLog2DataPageSize = RGXHeapDerivePageSize(OSGetPageShift());
+		psDeviceMemoryHeapCursor->sHeapBaseAddr.uiAddr = RGX_FIRMWARE_HEAP_BASE + (uiIdx * RGX_FIRMWARE_HEAP_SIZE);
+
+		/* Append additional guest(s) firmware heap to host driver firmware context heap configuration */
+		psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps += 1;
+
+		/* advance to the next heap */
+		psDeviceMemoryHeapCursor++;
+	}
+
+	return PVRSRV_OK;
+}
+
+void RGXVzDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevMemoryInfo);
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_NATIVE);
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+}
+
+/******************************************************************************
+ End of file (rgxinit.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxinit.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxinit.h
new file mode 100644
index 0000000..36b1e41
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxinit.h
@@ -0,0 +1,319 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX initialisation header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX initialisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXINIT_H__)
+#define __RGXINIT_H__
+
+#include "connection_server.h"
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "rgxscript.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgx_bridge.h"
+
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXInitDevPart2KM
+
+ @Description
+
+ Second part of server-side RGX initialisation
+
+ @Input pvDeviceNode - device node
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVRGXInitDevPart2KM (PVRSRV_DEVICE_NODE	*psDeviceNode,
+									  RGX_INIT_COMMAND		*psDbgScript,
+									  IMG_UINT32			ui32DeviceFlags,
+									  IMG_UINT32			ui32HWPerfHostBufSizeKB,
+									  IMG_UINT32			ui32HWPerfHostFilter,
+									  RGX_ACTIVEPM_CONF		eActivePMConf);
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitAllocFWImgMemKM(PVRSRV_DEVICE_NODE   *psDeviceNode,
+                                          IMG_DEVMEM_SIZE_T    ui32FWCodeLen,
+                                          IMG_DEVMEM_SIZE_T    ui32FWDataLen,
+                                          IMG_DEVMEM_SIZE_T    uiFWCorememLen,
+                                          PMR                  **ppsFWCodePMR,
+                                          IMG_DEV_VIRTADDR     *psFWCodeDevVAddrBase,
+                                          PMR                  **ppsFWDataPMR,
+                                          IMG_DEV_VIRTADDR     *psFWDataDevVAddrBase,
+                                          PMR                  **ppsFWCorememPMR,
+                                          IMG_DEV_VIRTADDR     *psFWCorememDevVAddrBase,
+                                          RGXFWIF_DEV_VIRTADDR *psFWCorememMetaVAddrBase);
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitMipsWrapperRegistersKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+												 IMG_UINT32 ui32Remap1Config1Offset,
+												 IMG_UINT32 ui32Remap1Config2Offset,
+												 IMG_UINT32 ui32WrapperConfigOffset,
+												 IMG_UINT32 ui32BootCodeOffset);
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXPdumpBootldrDataInitKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+												 IMG_UINT32 ui32BootConfOffset,
+												 IMG_UINT32 ui32ExceptionVectorsBaseAddress);
+
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXInitFirmwareKM
+
+ @Description
+
+ Server-side RGX firmware initialisation
+
+ @Input pvDeviceNode - device node
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_IMPORT PVRSRV_ERROR
+PVRSRVRGXInitFirmwareKM(PVRSRV_DEVICE_NODE       *psDeviceNode,
+                        RGXFWIF_DEV_VIRTADDR     *psRGXFwInit,
+                        IMG_BOOL                 bEnableSignatureChecks,
+                        IMG_UINT32               ui32SignatureChecksBufSize,
+                        IMG_UINT32               ui32HWPerfFWBufSizeKB,
+                        IMG_UINT64               ui64HWPerfFilter,
+                        IMG_UINT32               ui32RGXFWAlignChecksArrLength,
+                        IMG_UINT32               *pui32RGXFWAlignChecks,
+                        IMG_UINT32               ui32ConfigFlags,
+                        IMG_UINT32               ui32LogType,
+                        IMG_UINT32               ui32FilterFlags,
+                        IMG_UINT32               ui32JonesDisableMask,
+                        IMG_UINT32               ui32HWRDebugDumpLimit,
+                        RGXFWIF_COMPCHECKS_BVNC  *psClientBVNC,
+                        RGXFWIF_COMPCHECKS_BVNC  *psFirmwareBVNC,
+                        IMG_UINT32               ui32HWPerfCountersDataSize,
+                        PMR                      **ppsHWPerfPMR,
+                        RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf,
+                        FW_PERF_CONF             eFirmwarePerf,
+                        IMG_UINT32               ui32ConfigFlagsExt);
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitReleaseFWInitResourcesKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+												   PMR *psFWCodePMR,
+												   PMR *psFWDataPMR,
+												   PMR *psFWCorePMR,
+												   PMR *psHWPerfPMR);
+
+/*!
+*******************************************************************************
+
+ @Function  PVRSRVRGXInitFinaliseFWImageKM
+
+ @Description
+
+ Perform final steps of FW code setup when necessary
+
+ @Input psDeviceNode - Device node
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXInitFinaliseFWImageKM(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXInitHWPerfCountersKM
+
+ @Description
+
+ Initialisation of the performance counters
+
+ @Input pvDeviceNode - device node
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVRGXInitHWPerfCountersKM (PVRSRV_DEVICE_NODE	*psDeviceNode);
+
+/*!
+*******************************************************************************
+
+ @Function	RGXRegisterDevice
+
+ @Description
+
+ Registers the device with the system
+
+ @Input: 	psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+/*!
+*******************************************************************************
+
+ @Function	DevDeInitRGX
+
+ @Description
+
+ Reset and deinitialise Chip
+
+ @Input pvDeviceNode - device info. structure
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#if !defined(NO_HARDWARE)
+
+void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function     RGXRegisterGpuUtilStats
+
+ @Description  Initialise data used to compute GPU utilisation statistics
+               for a particular user (identified by the handle passed as
+               argument). This function must be called only once for each
+               different user/handle.
+
+ @Input        phGpuUtilUser - Pointer to handle used to identify a user of
+                               RGXGetGpuUtilStats
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXRegisterGpuUtilStats(IMG_HANDLE *phGpuUtilUser);
+
+
+/*!
+*******************************************************************************
+
+ @Function     RGXUnregisterGpuUtilStats
+
+ @Description  Free data previously used to compute GPU utilisation statistics
+               for a particular user (identified by the handle passed as
+               argument).
+
+ @Input        hGpuUtilUser - Handle used to identify a user of
+                              RGXGetGpuUtilStats
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXUnregisterGpuUtilStats(IMG_HANDLE hGpuUtilUser);
+#endif /* !defined(NO_HARDWARE) */
+
+
+/*!
+*******************************************************************************
+
+ @Function		PVRSRVGPUVIRTPopulateLMASubArenasKM
+
+ @Description	Populates the LMA arenas based on the min max values passed by
+				the client during initialization. GPU Virtualisation Validation
+				only.
+
+ @Input			pvDeviceNode	: Pointer to a device info structure.
+				ui32NumElements	: Total number of min / max values passed by
+								  the client
+				pui32Elements	: The array containing all the min / max values
+								  passed by the client, all bundled together
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVGPUVIRTPopulateLMASubArenasKM(PVRSRV_DEVICE_NODE	* psDeviceNode,
+                                                 IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+                                                 IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+                                                 IMG_BOOL bEnableTrustedDeviceAceConfig);
+
+/*!
+*******************************************************************************
+
+ @Function		RGXVzInitCreateFWKernelMemoryContext
+
+ @Description	Called to perform additional initialisation during firmware
+ 	 	 	 	kernel context creation.
+******************************************************************************/
+PVRSRV_ERROR RGXVzInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+*******************************************************************************
+
+ @Function		RGXVzDeInitDestroyFWKernelMemoryContext
+
+ @Description	Called to perform additional deinitialisation during firmware
+ 	 	 	 	kernel context destruction.
+******************************************************************************/
+void RGXVzDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+*******************************************************************************
+
+ @Function		RGXVzInitHeaps
+
+ @Description	Called to perform additional initialisation
+******************************************************************************/
+PVRSRV_ERROR RGXVzInitHeaps(DEVICE_MEMORY_INFO *psNewMemoryInfo,
+							DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor);
+
+/*!
+*******************************************************************************
+
+ @Function		RGXVzDeInitHeaps
+
+ @Description	Called to perform additional deinitialisation
+******************************************************************************/
+void RGXVzDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo);
+
+#endif /* __RGXINIT_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxkicksync.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxkicksync.c
new file mode 100644
index 0000000..0d37d56
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxkicksync.c
@@ -0,0 +1,770 @@
+/*************************************************************************/ /*!
+@File           rgxkicksync.c
+@Title          Server side of the sync only kick API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxkicksync.h"
+
+#include "rgxdevice.h"
+#include "rgxmem.h"
+#include "rgxfwutils.h"
+#include "allocmem.h"
+#include "sync.h"
+#include "rgxhwperf.h"
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+#endif /* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_KICKSYNC_UFO_DUMP	0
+
+//#define KICKSYNC_CHECKPOINT_DEBUG 1
+
+#if defined(KICKSYNC_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+struct _RGX_SERVER_KICKSYNC_CONTEXT_
+{
+	PVRSRV_DEVICE_NODE        * psDeviceNode;
+	RGX_SERVER_COMMON_CONTEXT * psServerCommonContext;
+	PVRSRV_CLIENT_SYNC_PRIM   * psSync;
+	DLLIST_NODE                 sListNode;
+	SYNC_ADDR_LIST              sSyncAddrListFence;
+	SYNC_ADDR_LIST              sSyncAddrListUpdate;
+	ATOMIC_T                    hJobId;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	POS_LOCK                     hLock;
+#endif
+};
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateKickSyncContextKM(CONNECTION_DATA             * psConnection,
+                                              PVRSRV_DEVICE_NODE          * psDeviceNode,
+                                              IMG_HANDLE					hMemCtxPrivData,
+                                              RGX_SERVER_KICKSYNC_CONTEXT ** ppsKickSyncContext)
+{
+	PVRSRV_RGXDEV_INFO          * psDevInfo = psDeviceNode->pvDevice;
+	DEVMEM_MEMDESC              * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext;
+	RGX_COMMON_CONTEXT_INFO      sInfo;
+	PVRSRV_ERROR                 eError = PVRSRV_OK;
+
+	/* Prepare cleanup struct */
+	* ppsKickSyncContext = NULL;
+	psKickSyncContext = OSAllocZMem(sizeof(*psKickSyncContext));
+	if (psKickSyncContext == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	eError = OSLockCreate(&psKickSyncContext->hLock, LOCK_TYPE_NONE);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+									__func__,
+									PVRSRVGetErrorStringKM(eError)));
+		goto err_lockcreate;
+	}
+#endif
+
+	psKickSyncContext->psDeviceNode = psDeviceNode;
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+	                       & psKickSyncContext->psSync,
+	                       "kick sync cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "PVRSRVRGXCreateKickSyncContextKM: Failed to allocate cleanup sync (0x%x)",
+		         eError));
+		goto fail_syncalloc;
+	}
+
+	sInfo.psFWFrameworkMemDesc = NULL;
+
+	eError = FWCommonContextAllocate(psConnection,
+									 psDeviceNode,
+									 REQ_TYPE_KICKSYNC,
+									 RGXFWIF_DM_GP,
+									 NULL,
+									 0,
+									 psFWMemContextMemDesc,
+									 NULL,
+									 RGX_KICKSYNC_CCB_SIZE_LOG2,
+	                                 0, /* priority */
+									 & sInfo,
+									 & psKickSyncContext->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_contextalloc;
+	}
+
+	OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock);
+	dllist_add_to_tail(&(psDevInfo->sKickSyncCtxtListHead), &(psKickSyncContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock);
+
+	SyncAddrListInit(&psKickSyncContext->sSyncAddrListFence);
+	SyncAddrListInit(&psKickSyncContext->sSyncAddrListUpdate);
+
+	* ppsKickSyncContext = psKickSyncContext;
+	return PVRSRV_OK;
+
+fail_contextalloc:
+fail_syncalloc:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psKickSyncContext->hLock);
+err_lockcreate:
+#endif
+	OSFreeMem(psKickSyncContext);
+	return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext)
+{
+	PVRSRV_ERROR         eError    = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO * psDevInfo = psKickSyncContext->psDeviceNode->pvDevice;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(psKickSyncContext->psDeviceNode,
+	                                          psKickSyncContext->psServerCommonContext,
+	                                          psKickSyncContext->psSync,
+	                                          RGXFWIF_DM_3D,
+	                                          PDUMP_FLAGS_NONE);
+
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				__FUNCTION__,
+				PVRSRVGetErrorStringKM(eError)));
+		return eError;
+	}
+
+	/* ... it has so we can free its resources */
+
+	OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock);
+	dllist_remove_node(&(psKickSyncContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock);
+
+	FWCommonContextFree(psKickSyncContext->psServerCommonContext);
+	SyncPrimFree(psKickSyncContext->psSync);
+
+	SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListFence);
+	SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListUpdate);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psKickSyncContext->hLock);
+#endif
+
+	OSFreeMem(psKickSyncContext);
+
+	return PVRSRV_OK;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext,
+
+                                 IMG_UINT32                    ui32ClientCacheOpSeqNum,
+
+                                 IMG_UINT32                    ui32ClientFenceCount,
+                                 SYNC_PRIMITIVE_BLOCK           ** pauiClientFenceUFOSyncPrimBlock,
+                                 IMG_UINT32                  * paui32ClientFenceOffset,
+                                 IMG_UINT32                  * paui32ClientFenceValue,
+
+                                 IMG_UINT32                    ui32ClientUpdateCount,
+                                 SYNC_PRIMITIVE_BLOCK           ** pauiClientUpdateUFOSyncPrimBlock,
+                                 IMG_UINT32                  * paui32ClientUpdateOffset,
+                                 IMG_UINT32                  * paui32ClientUpdateValue,
+
+                                 IMG_UINT32                    ui32ServerSyncPrims,
+                                 IMG_UINT32                  * paui32ServerSyncFlags,
+                                 SERVER_SYNC_PRIMITIVE      ** pasServerSyncs,
+
+                                 PVRSRV_FENCE                  iCheckFence,
+                                 PVRSRV_TIMELINE               iUpdateTimeline,
+                                 PVRSRV_FENCE                * piUpdateFence,
+                                 IMG_CHAR                      szUpdateFenceName[32],
+
+                                 IMG_UINT32                    ui32ExtJobRef)
+{
+	RGXFWIF_KCCB_CMD         sKickSyncKCCBCmd;
+	RGX_CCB_CMD_HELPER_DATA  asCmdHelperData[1];
+	PVRSRV_ERROR             eError;
+	PVRSRV_ERROR             eError2;
+	IMG_UINT32               i;
+	PRGXFWIF_UFO_ADDR        *pauiClientFenceUFOAddress = NULL;
+	PRGXFWIF_UFO_ADDR        *pauiClientUpdateUFOAddress = NULL;
+	PVRSRV_FENCE             iUpdateFence = PVRSRV_FENCE_INVALID;
+	IMG_UINT32               ui32JobId;
+	IMG_UINT32               ui32FWCtx = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr;
+	IMG_UINT32               uiCheckFenceUID = 0;
+	IMG_UINT32               uiUpdateFenceUID = 0;
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	/* Android fd sync update info */
+	struct pvr_sync_append_data *psFDFenceData = NULL;
+#endif
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+	PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+	IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+	IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+	IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+	PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+	void *pvUpdateFenceFinaliseData = NULL;
+#endif
+	IMG_DEV_VIRTADDR sRobustnessResetReason = {0};
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psKickSyncContext->hLock);
+#endif
+
+	ui32JobId = OSAtomicIncrement(&psKickSyncContext->hJobId);
+
+	eError = SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListFence,
+							ui32ClientFenceCount,
+							pauiClientFenceUFOSyncPrimBlock,
+							paui32ClientFenceOffset);
+
+	if(eError != PVRSRV_OK)
+	{
+		goto fail_syncaddrlist;
+	}
+
+	if (ui32ClientFenceCount > 0)
+	{
+		pauiClientFenceUFOAddress = psKickSyncContext->sSyncAddrListFence.pasFWAddrs;
+	}
+
+	eError = SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListUpdate,
+							ui32ClientUpdateCount,
+							pauiClientUpdateUFOSyncPrimBlock,
+							paui32ClientUpdateOffset);
+
+	if(eError != PVRSRV_OK)
+	{
+		goto fail_syncaddrlist;
+	}
+
+	if (ui32ClientUpdateCount > 0)
+	{
+		pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs;
+	}
+
+	/* Sanity check the server fences */
+	for (i = 0; i < ui32ServerSyncPrims; i++)
+	{
+		if (0 == (paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on Kick Sync) must fence", __FUNCTION__));
+			eError = PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+			goto out_unlock;
+		}
+	}
+
+	/* Ensure the string is null-terminated (Required for safety) */
+	szUpdateFenceName[31] = '\0';
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined (SUPPORT_FALLBACK_FENCE_SYNC)
+	/* Fences are hardcoded to updates (IMG_TRUE below), Fences go to the TA and updates to the 3D */
+	if (iUpdateTimeline >= 0 && !piUpdateFence)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto out_unlock;
+	}
+
+	if (iCheckFence >= 0 || iUpdateTimeline >= 0)
+	{
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+		eError =
+		  pvr_sync_append_fences(szUpdateFenceName,
+								 iCheckFence,
+								 iUpdateTimeline,
+								 ui32ClientUpdateCount,
+								 pauiClientUpdateUFOAddress,
+								 paui32ClientUpdateValue,
+								 ui32ClientFenceCount,
+								 pauiClientFenceUFOAddress,
+								 paui32ClientFenceValue,
+								 &psFDFenceData);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_fdsync;
+		}
+		pvr_sync_get_updates(psFDFenceData, &ui32ClientUpdateCount,
+			&pauiClientUpdateUFOAddress, &paui32ClientUpdateValue);
+		pvr_sync_get_checks(psFDFenceData, &ui32ClientFenceCount,
+			&pauiClientFenceUFOAddress, &paui32ClientFenceValue);
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psKickSyncContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __FUNCTION__, iCheckFence, (void*)psKickSyncContext->psDeviceNode->hSyncCheckpointContext));
+			/* Resolve the sync checkpoints that make up the input fence */
+			eError = SyncCheckpointResolveFence(psKickSyncContext->psDeviceNode->hSyncCheckpointContext,
+			                                    iCheckFence,
+			                                    &ui32FenceSyncCheckpointCount,
+			                                    &apsFenceSyncCheckpoints,
+			                                    &uiCheckFenceUID);
+			if (eError != PVRSRV_OK)
+			{
+				goto fail_resolve_fence;
+			}
+
+			/* Create the output fence (if required) */
+			if (piUpdateFence)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateTimeline=%d)...", __FUNCTION__, iUpdateTimeline));
+				eError = SyncCheckpointCreateFence(psKickSyncContext->psDeviceNode,
+				                                   szUpdateFenceName,
+				                                   iUpdateTimeline,
+				                                   psKickSyncContext->psDeviceNode->hSyncCheckpointContext,
+				                                   &iUpdateFence,
+				                                   &uiUpdateFenceUID,
+				                                   &pvUpdateFenceFinaliseData,
+				                                   &psUpdateSyncCheckpoint,
+				                                   (void*)&psFenceTimelineUpdateSync,
+				                                   &ui32FenceTimelineUpdateValue);
+				if (eError != PVRSRV_OK)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)", __FUNCTION__, eError));
+					goto fail_create_output_fence;
+				}
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=%u)", __FUNCTION__, iUpdateFence, psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+
+				/* Append the sync prim update for the timeline (if required) */
+				if (psFenceTimelineUpdateSync)
+				{
+					IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+					/* Allocate memory to hold the list of update values (including our timeline update) */
+					pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*paui32ClientUpdateValue) * (ui32ClientUpdateCount+1));
+					if (!pui32IntAllocatedUpdateValues)
+					{
+						/* Failed to allocate memory */
+						eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+						goto fail_alloc_update_values_mem;
+					}
+					OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientUpdateCount+1));
+					/* Copy the update values into the new memory, then append our timeline update value */
+					OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32ClientUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32ClientUpdateCount);
+					/* Now set the additional update value */
+					pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32ClientUpdateCount;
+					*pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+					ui32ClientUpdateCount++;
+					/* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */
+					paui32ClientUpdateValue = pui32IntAllocatedUpdateValues;
+#if defined(KICKSYNC_CHECKPOINT_DEBUG)
+					{
+						IMG_UINT32 iii;
+						IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+						for (iii=0; iii<ui32ClientUpdateCount; iii++)
+						{
+							CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+							pui32Tmp++;
+						}
+					}
+#endif
+					/* Now append the timeline sync prim addr to the kicksync context update list */
+					SyncAddrListAppendSyncPrim(&psKickSyncContext->sSyncAddrListUpdate,
+					                           psFenceTimelineUpdateSync);
+				}
+
+				if (ui32FenceSyncCheckpointCount > 0)
+				{
+					/* Append the checks (from input fence) */
+					CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d sync checkpoints to KickSync Fence (&psKickSyncContext->sSyncAddrListFence=<%p>)...", __FUNCTION__, ui32FenceSyncCheckpointCount, (void*)&psKickSyncContext->sSyncAddrListFence));
+					SyncAddrListAppendCheckpoints(&psKickSyncContext->sSyncAddrListFence,
+												  ui32FenceSyncCheckpointCount,
+												  apsFenceSyncCheckpoints);
+					if (!pauiClientFenceUFOAddress)
+					{
+						pauiClientFenceUFOAddress = psKickSyncContext->sSyncAddrListFence.pasFWAddrs;
+					}
+					ui32ClientFenceCount += ui32FenceSyncCheckpointCount;
+#if defined(KICKSYNC_CHECKPOINT_DEBUG)
+					{
+						IMG_UINT32 iii;
+						IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiClientFenceUFOAddress;
+
+						for (iii=0; iii<ui32ClientFenceCount; iii++)
+						{
+							CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClientFenceUFOAddress[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+							pui32Tmp++;
+						}
+					}
+#endif
+				}
+
+				if (psUpdateSyncCheckpoint)
+				{
+					PVRSRV_ERROR eErr;
+
+					/* Append the update (from output fence) */
+					CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 sync checkpoint to KickSync Update (&psKickSyncContext->sSyncAddrListUpdate=<%p>)...", __FUNCTION__, (void*)&psKickSyncContext->sSyncAddrListUpdate));
+					eErr = SyncAddrListAppendCheckpoints(&psKickSyncContext->sSyncAddrListUpdate,
+														 1,
+														 &psUpdateSyncCheckpoint);
+					if (eErr != PVRSRV_OK)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s:  ...done. SyncAddrListAppendCheckpoints() returned error (%d)", __FUNCTION__, eErr));
+					}
+					else
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s:  ...done.", __FUNCTION__));
+					}
+					if (!pauiClientUpdateUFOAddress)
+					{
+						pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs;
+					}
+					ui32ClientUpdateCount++;
+#if defined(KICKSYNC_CHECKPOINT_DEBUG)
+					{
+						IMG_UINT32 iii;
+						IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiClientUpdateUFOAddress;
+
+						for (iii=0; iii<ui32ClientUpdateCount; iii++)
+						{
+							CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClientUpdateUFOAddress[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+							pui32Tmp++;
+						}
+					}
+#endif
+				}
+			}
+		}
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	}
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined (SUPPORT_FALLBACK_FENCE_SYNC) */
+
+#if (ENABLE_KICKSYNC_UFO_DUMP == 1)
+		PVR_DPF((PVR_DBG_ERROR, "%s: dumping KICKSYNC fence/updates syncs...", __FUNCTION__));
+		{
+			IMG_UINT32 ii;
+			PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiClientFenceUFOAddress;
+			IMG_UINT32 *pui32TmpIntFenceValue = paui32ClientFenceValue;
+			PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiClientUpdateUFOAddress;
+			IMG_UINT32 *pui32TmpIntUpdateValue = paui32ClientUpdateValue;
+
+			/* Dump Fence syncs and Update syncs */
+			PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d KickSync fence syncs (&psKickSyncContext->sSyncAddrListFence=<%p>, pauiClientFenceUFOAddress=<%p>):", __FUNCTION__, ui32ClientFenceCount, (void*)&psKickSyncContext->sSyncAddrListFence, (void*)pauiClientFenceUFOAddress));
+			for (ii=0; ii<ui32ClientFenceCount; ii++)
+			{
+				if (psTmpIntFenceUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32ClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __FUNCTION__, ii+1, ui32ClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr, *pui32TmpIntFenceValue, *pui32TmpIntFenceValue));
+					pui32TmpIntFenceValue++;
+				}
+				psTmpIntFenceUFOAddress++;
+			}
+			PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d KickSync update syncs (&psKickSyncContext->sSyncAddrListUpdate=<%p>, pauiClientUpdateUFOAddress=<%p>):", __FUNCTION__, ui32ClientUpdateCount, (void*)&psKickSyncContext->sSyncAddrListUpdate, (void*)pauiClientUpdateUFOAddress));
+			for (ii=0; ii<ui32ClientUpdateCount; ii++)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:  Line %d, psTmpIntUpdateUFOAddress=<%p>", __FUNCTION__, __LINE__, (void*)psTmpIntUpdateUFOAddress));
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:  Line %d, pui32TmpIntUpdateValue=<%p>", __FUNCTION__, __LINE__, (void*)pui32TmpIntUpdateValue));
+				if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32ClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __FUNCTION__, ii+1, ui32ClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+					pui32TmpIntUpdateValue++;
+				}
+				psTmpIntUpdateUFOAddress++;
+			}
+		}
+#endif
+
+	eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psKickSyncContext->psServerCommonContext),
+	                                ui32ClientFenceCount,
+	                                pauiClientFenceUFOAddress,
+	                                paui32ClientFenceValue,
+	                                ui32ClientUpdateCount,
+	                                pauiClientUpdateUFOAddress,
+	                                paui32ClientUpdateValue,
+	                                ui32ServerSyncPrims,
+	                                paui32ServerSyncFlags,
+	                                SYNC_FLAG_MASK_ALL,
+	                                pasServerSyncs,
+	                                0,
+	                                NULL,
+	                                NULL,
+	                                NULL,
+	                                NULL,
+	                                RGXFWIF_CCB_CMD_TYPE_NULL,
+	                                ui32ExtJobRef,
+	                                ui32JobId,
+	                                PDUMP_FLAGS_NONE,
+	                                NULL,
+	                                "KickSync",
+	                                asCmdHelperData,
+									sRobustnessResetReason);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_cmdinit;
+	}
+
+	eError = RGXCmdHelperAcquireCmdCCB(IMG_ARR_NUM_ELEMS(asCmdHelperData), asCmdHelperData);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_cmdaquire;
+	}
+
+	/*
+	 *  We should reserve space in the kernel CCB here and fill in the command
+	 *  directly.
+	 *  This is so if there isn't space in the kernel CCB we can return with
+	 *  retry back to services client before we take any operations
+	 */
+
+	/*
+	 * We might only be kicking for flush out a padding packet so only submit
+	 * the command if the create was successful
+	 */
+	if (eError == PVRSRV_OK)
+	{
+		/*
+		 * All the required resources are ready at this point, we can't fail so
+		 * take the required server sync operations and commit all the resources
+		 */
+		RGXCmdHelperReleaseCmdCCB(1,
+		                          asCmdHelperData,
+		                          "KickSync",
+		                          FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr);
+	}
+
+	/* Construct the kernel kicksync CCB command. */
+	sKickSyncKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+	sKickSyncKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext);
+	sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psKickSyncContext->psServerCommonContext));
+	sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+	sKickSyncKCCBCmd.uCmdData.sCmdKickData.sWorkloadDataFWAddress.ui32Addr = 0;
+	sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+	
+	/*
+	 * Submit the kicksync command to the firmware.
+	 */
+	RGX_HWPERF_HOST_ENQ(psKickSyncContext,
+	                    OSGetCurrentClientProcessIDKM(),
+	                    ui32FWCtx,
+	                    ui32ExtJobRef,
+	                    ui32JobId,
+	                    RGX_HWPERF_KICK_TYPE_SYNC,
+	                    uiCheckFenceUID,
+	                    uiUpdateFenceUID,
+	                    NO_DEADLINE,
+	                    NO_CYCEST);
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError2 = RGXScheduleCommand(psKickSyncContext->psDeviceNode->pvDevice,
+		                             RGXFWIF_DM_3D,
+		                             & sKickSyncKCCBCmd,
+		                             sizeof(sKickSyncKCCBCmd),
+		                             ui32ClientCacheOpSeqNum,
+		                             PDUMP_FLAGS_NONE);
+		if (eError2 != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+		RGXHWPerfFTraceGPUEnqueueEvent(psKickSyncContext->psDeviceNode->pvDevice,
+					ui32FWCtx, ui32JobId, RGX_HWPERF_KICK_TYPE_SYNC);
+#endif
+
+	if (eError2 != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "PVRSRVRGXKickSync failed to schedule kernel CCB command. (0x%x)",
+		         eError));
+	}
+	
+	/*
+	 * Now check eError (which may have returned an error from our earlier call
+	 * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+	 * so we check it now...
+	 */
+	if (eError != PVRSRV_OK )
+	{
+		goto fail_cmdaquire;
+	}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined (SUPPORT_FALLBACK_FENCE_SYNC)
+	if(iUpdateFence != PVRSRV_FENCE_INVALID)
+	{
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+		/* If we get here, this should never fail. Hitting that likely implies
+		 * a code error above */
+		iUpdateFence = pvr_sync_get_update_fd(psFDFenceData);
+		if (iUpdateFence < 0)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get install update sync fd",
+				__FUNCTION__));
+			/* If we fail here, we cannot rollback the syncs as the hw already
+			 * has references to resources they may be protecting in the kick
+			 * so fallthrough */
+
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto fail_free_append_data;
+		}
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	}
+
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+#if defined(NO_HARDWARE)
+	pvr_sync_nohw_complete_fences(psFDFenceData);
+#endif
+	pvr_sync_free_append_fences_data(psFDFenceData);
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#if defined(NO_HARDWARE)
+	/* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+	if (psUpdateSyncCheckpoint)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __FUNCTION__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint)));
+		SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+	}
+	if (psFenceTimelineUpdateSync)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   Updating NOHW sync prim<%p> to %d", __FUNCTION__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+		SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+	}
+	SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+	/* Free memory allocated to hold the internal list of update values */
+	if (pui32IntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui32IntAllocatedUpdateValues);
+		pui32IntAllocatedUpdateValues = NULL;
+	}
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined (SUPPORT_FALLBACK_FENCE_SYNC) */
+
+	*piUpdateFence = iUpdateFence;
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_FENCE_INVALID))
+	{
+		SyncCheckpointFinaliseFence(iUpdateFence, pvUpdateFenceFinaliseData);
+	}
+#endif
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psKickSyncContext->hLock);
+#endif
+	return PVRSRV_OK;
+
+fail_cmdaquire:
+fail_cmdinit:
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	pvr_sync_rollback_append_fences(psFDFenceData);
+fail_free_append_data:
+	pvr_sync_free_append_fences_data(psFDFenceData);
+fail_fdsync:
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	SyncAddrListRollbackCheckpoints(psKickSyncContext->psDeviceNode, &psKickSyncContext->sSyncAddrListUpdate);
+	if(iUpdateFence != PVRSRV_FENCE_INVALID)
+	{
+		SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+	}
+
+	/* Free memory allocated to hold update values */
+	if (pui32IntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui32IntAllocatedUpdateValues);
+	}
+fail_alloc_update_values_mem:
+fail_create_output_fence:
+	/* Free memory allocated to hold the resolved fence's checkpoints */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+fail_resolve_fence:
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+fail_syncaddrlist:
+out_unlock:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psKickSyncContext->hLock);
+#endif
+	return eError;
+}	
+
+
+/**************************************************************************//**
+ End of file (rgxkicksync.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxkicksync.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxkicksync.h
new file mode 100644
index 0000000..b6c307f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxkicksync.h
@@ -0,0 +1,111 @@
+/*************************************************************************/ /*!
+@File           rgxkicksync.h
+@Title          Server side of the sync only kick API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if ! defined (__RGXKICKSYNC_H__)
+#define __RGXKICKSYNC_H__
+
+#include "pvrsrv_error.h"
+#include "connection_server.h"
+#include "sync_server.h"
+
+
+typedef struct _RGX_SERVER_KICKSYNC_CONTEXT_ RGX_SERVER_KICKSYNC_CONTEXT;
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXCreateKickSyncContextKM
+@Description    Server-side implementation of RGXCreateKicksyncContext
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+ */ /**************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateKickSyncContextKM(CONNECTION_DATA             * psConnection,
+                                              PVRSRV_DEVICE_NODE          * psDeviceNode,
+                                              IMG_HANDLE					hMemCtxPrivData,
+                                              RGX_SERVER_KICKSYNC_CONTEXT ** ppsKicksyncContext);
+
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXDestroyKickSyncContextKM
+@Description    Server-side implementation of RGXDestroyKicksyncContext
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+ */ /**************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psKicksyncContext);
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXKickSyncKM
+@Description    Kicks a sync only command
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+ */ /**************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKicksyncContext,
+
+                                 IMG_UINT32                    ui32ClientCacheOpSeqNum,
+
+                                 IMG_UINT32                    ui32ClientFenceCount,
+                                 SYNC_PRIMITIVE_BLOCK           ** pauiClientFenceUFOSyncPrimBlock,
+                                 IMG_UINT32                  * paui32ClientFenceSyncOffset,
+                                 IMG_UINT32                  * paui32ClientFenceValue,
+
+                                 IMG_UINT32                    ui32ClientUpdateCount,
+                                 SYNC_PRIMITIVE_BLOCK           ** pauiClientUpdateUFOSyncPrimBlock,
+                                 IMG_UINT32                  * paui32ClientUpdateSyncOffset,
+                                 IMG_UINT32                  * paui32ClientUpdateValue,
+
+                                 IMG_UINT32                    ui32ServerSyncPrims,
+                                 IMG_UINT32                  * paui32ServerSyncFlags,
+                                 SERVER_SYNC_PRIMITIVE      ** pasServerSyncs,
+
+                                 PVRSRV_FENCE                  iCheckFence,
+                                 PVRSRV_TIMELINE               iUpdateTimeline,
+                                 PVRSRV_FENCE                * piUpdateFence,
+                                 IMG_CHAR                      szUpdateFenceName[32],
+
+                                 IMG_UINT32                    ui32ExtJobRef);
+
+#endif /* __RGXKICKSYNC_H__ */
+
+/**************************************************************************//**
+ End of file (rgxkicksync.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxlayer.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxlayer.h
new file mode 100644
index 0000000..80aee63
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxlayer.h
@@ -0,0 +1,697 @@
+/*************************************************************************/ /*!
+@File
+@Title          Header for Services abstraction layer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declaration of an interface layer used to abstract code that
+                can be compiled outside of the DDK, potentially in a
+                completely different OS.
+                All the headers included by this file must also be copied to
+                the alternative source tree.
+                All the functions declared here must have a DDK implementation
+                inside the DDK source tree (e.g. rgxlayer_impl.h/.c) and
+                another different implementation in case they are used outside
+                of the DDK.
+                All of the functions accept as a first parameter a
+                "const void *hPrivate" argument. It should be used to pass
+                around any implementation specific data required.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGXLAYER_H__)
+#define __RGXLAYER_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h" /* includes pvrsrv_errors.h */
+#include "rgx_bvnc_defs_km.h"
+
+#include "rgx_firmware_processor.h"
+/* includes:
+ * rgx_meta.h and rgx_mips.h,
+ * rgxdefs_km.h,
+ * rgx_cr_defs_km.h, 
+ * RGX_BVNC_CORE_KM_HEADER (rgxcore_km_B.V.N.C.h),
+ * RGX_BNC_CONFIG_KM_HEADER (rgxconfig_km_B.V.N.C.h)
+ */
+
+#include "rgx_fwif_shared.h"
+/* FIXME required because of RGXFWIF_DEV_VIRTADDR but this header
+ * includes a lot of other headers..  RGXFWIF_DEV_VIRTADDR must be moved
+ * somewhere else (either img_types.h or a new header) */
+
+
+/*!
+*******************************************************************************
+
+ @Function       RGXMemCopy
+
+ @Description    MemCopy implementation
+
+ @Input          hPrivate   : Implementation specific data
+ @Input          pvDst      : Pointer to the destination
+ @Input          pvSrc      : Pointer to the source location
+ @Input          uiSize     : The amount of memory to copy in bytes
+
+ @Return         void
+
+******************************************************************************/
+IMG_INTERNAL
+void RGXMemCopy(const void *hPrivate,
+                void *pvDst,
+                void *pvSrc,
+                size_t uiSize);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXMemSet
+
+ @Description    MemSet implementation
+
+ @Input          hPrivate   : Implementation specific data
+ @Input          pvDst      : Pointer to the start of the memory region
+ @Input          ui8Value   : The value to be written
+ @Input          uiSize     : The number of bytes to be set to ui8Value
+
+ @Return         void
+
+******************************************************************************/
+IMG_INTERNAL
+void RGXMemSet(const void *hPrivate,
+               void *pvDst,
+               IMG_UINT8 ui8Value,
+               size_t uiSize);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXCommentLog
+
+ @Description    Generic log function used for debugging or other purposes
+
+ @Input          hPrivate   : Implementation specific data
+ @Input          pszString  : Message to be printed
+ @Input          ...        : Variadic arguments
+
+ @Return         void
+
+******************************************************************************/
+IMG_INTERNAL
+void RGXCommentLog(const void *hPrivate,
+                   const IMG_CHAR *pszString,
+                   ...) __printf(2, 3);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXErrorLog
+
+ @Description    Generic error log function used for debugging or other purposes
+
+ @Input          hPrivate   : Implementation specific data
+ @Input          pszString  : Message to be printed
+ @Input          ...        : Variadic arguments
+
+ @Return         void
+
+******************************************************************************/
+IMG_INTERNAL
+void RGXErrorLog(const void *hPrivate,
+                 const IMG_CHAR *pszString,
+                 ...) __printf(2, 3);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXDeviceHasFeature
+
+ @Description    Checks if a device has a particular feature
+
+ @Input          hPrivate     : Implementation specific data
+ @Input          ui64Feature  : Feature to check
+
+ @Return         IMG_TRUE if the given feature is available, IMG_FALSE otherwise
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXGetFWCorememSize
+
+ @Description    Get the FW coremem size
+
+ @Input          hPrivate   : Implementation specific data
+
+ @Return         FW coremem size
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function      RGXWriteReg32/64
+
+ @Description   Write a value to a 32/64 bit RGX register
+
+ @Input         hPrivate         : Implementation specific data
+ @Input         ui32RegAddr      : Register offset inside the register bank
+ @Input         ui32/64RegValue  : New register value
+
+ @Return        void
+
+******************************************************************************/
+void RGXWriteReg32(const void *hPrivate,
+                   IMG_UINT32 ui32RegAddr,
+                   IMG_UINT32 ui32RegValue);
+
+void RGXWriteReg64(const void *hPrivate,
+                   IMG_UINT32 ui32RegAddr,
+                   IMG_UINT64 ui64RegValue);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXReadReg32/64
+
+ @Description    Read a 32/64 bit RGX register
+
+ @Input          hPrivate     : Implementation specific data
+ @Input          ui32RegAddr  : Register offset inside the register bank
+
+ @Return         Register value
+
+******************************************************************************/
+IMG_UINT32 RGXReadReg32(const void *hPrivate,
+                        IMG_UINT32 ui32RegAddr);
+
+IMG_UINT64 RGXReadReg64(const void *hPrivate,
+                        IMG_UINT32 ui32RegAddr);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXPollReg32/64
+
+ @Description    Poll on a 32/64 bit RGX register until some bits are set/unset
+
+ @Input          hPrivate         : Implementation specific data
+ @Input          ui32RegAddr      : Register offset inside the register bank
+ @Input          ui32/64RegValue  : Value expected from the register
+ @Input          ui32/64RegMask   : Only the bits set in this mask will be
+                                    checked against uiRegValue
+
+ @Return         PVRSRV_OK if the poll succeeds,
+                 PVRSRV_ERROR_TIMEOUT if the poll takes too long
+
+******************************************************************************/
+PVRSRV_ERROR RGXPollReg32(const void *hPrivate,
+                          IMG_UINT32 ui32RegAddr,
+                          IMG_UINT32 ui32RegValue,
+                          IMG_UINT32 ui32RegMask);
+
+PVRSRV_ERROR RGXPollReg64(const void *hPrivate,
+                          IMG_UINT32 ui32RegAddr,
+                          IMG_UINT64 ui64RegValue,
+                          IMG_UINT64 ui64RegMask);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXWaitCycles
+
+ @Description    Wait for a number of GPU cycles and/or microseconds
+
+ @Input          hPrivate    : Implementation specific data
+ @Input          ui32Cycles  : Number of GPU cycles to wait for in pdumps,
+                               it can also be used when running driver-live
+                               if desired (ignoring the next parameter)
+ @Input          ui32WaitUs  : Number of microseconds to wait for when running
+                               driver-live
+
+ @Return         void
+
+******************************************************************************/
+void RGXWaitCycles(const void *hPrivate,
+                   IMG_UINT32 ui32Cycles,
+                   IMG_UINT32 ui32WaitUs);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireKernelMMUPC
+
+ @Description     Acquire the Kernel MMU Page Catalogue device physical address
+
+ @Input           hPrivate  : Implementation specific data
+ @Input           psPCAddr  : Returned page catalog address
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXWriteKernelMMUPC32/64
+
+ @Description     Write the Kernel MMU Page Catalogue to the 32/64 bit
+                  RGX register passed as argument.
+                  In a driver-live scenario without PDump these functions
+                  are the same as RGXWriteReg32/64 and they don't need
+                  to be reimplemented.
+
+ @Input           hPrivate        : Implementation specific data
+ @Input           ui32PCReg       : Register offset inside the register bank
+ @Input           ui32AlignShift  : PC register alignshift
+ @Input           ui32Shift       : PC register shift
+ @Input           ui32/64PCVal    : Page catalog value (aligned and shifted)
+
+ @Return          void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXWriteKernelMMUPC64(const void *hPrivate,
+                           IMG_UINT32 ui32PCReg,
+                           IMG_UINT32 ui32PCRegAlignShift,
+                           IMG_UINT32 ui32PCRegShift,
+                           IMG_UINT64 ui64PCVal);
+
+void RGXWriteKernelMMUPC32(const void *hPrivate,
+                           IMG_UINT32 ui32PCReg,
+                           IMG_UINT32 ui32PCRegAlignShift,
+                           IMG_UINT32 ui32PCRegShift,
+                           IMG_UINT32 ui32PCVal);
+#else  /* defined(PDUMP) */
+
+#define RGXWriteKernelMMUPC64(priv, pcreg, alignshift, shift, pcval) \
+	RGXWriteReg64(priv, pcreg, pcval)
+
+#define RGXWriteKernelMMUPC32(priv, pcreg, alignshift, shift, pcval) \
+	RGXWriteReg32(priv, pcreg, pcval)
+
+#endif /* defined(PDUMP) */
+
+
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireGPURegsAddr
+
+ @Description     Acquire the GPU registers base device physical address
+
+ @Input           hPrivate       : Implementation specific data
+ @Input           psGPURegsAddr  : Returned GPU registers base address
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireGPURegsAddr(const void *hPrivate, IMG_DEV_PHYADDR *psGPURegsAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXMIPSWrapperConfig
+
+ @Description     Write GPU register bank transaction ID and MIPS boot mode
+                  to the MIPS wrapper config register (passed as argument).
+                  In a driver-live scenario without PDump this is the same as
+                  RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input           hPrivate          : Implementation specific data
+ @Input           ui32RegAddr       : Register offset inside the register bank
+ @Input           ui64GPURegsAddr   : GPU registers base address
+ @Input           ui32GPURegsAlign  : Register bank transactions alignment
+ @Input           ui32BootMode      : Mips BOOT ISA mode
+
+ @Return          void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXMIPSWrapperConfig(const void *hPrivate,
+                          IMG_UINT32 ui32RegAddr,
+                          IMG_UINT64 ui64GPURegsAddr,
+                          IMG_UINT32 ui32GPURegsAlign,
+                          IMG_UINT32 ui32BootMode);
+#else
+#define RGXMIPSWrapperConfig(priv, regaddr, gpuregsaddr, gpuregsalign, bootmode) \
+	RGXWriteReg64(priv, regaddr, ((gpuregsaddr) >> (gpuregsalign)) | (bootmode))
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireBootRemapAddr
+
+ @Description     Acquire the device physical address of the MIPS bootloader
+                  accessed through remap region
+
+ @Input           hPrivate         : Implementation specific data
+ @Output          psBootRemapAddr  : Base address of the remapped bootloader
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireBootRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psBootRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXBootRemapConfig
+
+ @Description     Configure the bootloader remap registers passed as arguments.
+                  In a driver-live scenario without PDump this is the same as
+                  two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input           hPrivate             : Implementation specific data
+ @Input           ui32Config1RegAddr   : Remap config1 register offset
+ @Input           ui64Config1RegValue  : Remap config1 register value
+ @Input           ui32Config2RegAddr   : Remap config2 register offset
+ @Input           ui64Config2PhyAddr   : Output remapped aligned physical address
+ @Input           ui64Config2PhyMask   : Mask for the output physical address
+ @Input           ui64Config2Settings  : Extra settings for this remap region
+
+ @Return          void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXBootRemapConfig(const void *hPrivate,
+                        IMG_UINT32 ui32Config1RegAddr,
+                        IMG_UINT64 ui64Config1RegValue,
+                        IMG_UINT32 ui32Config2RegAddr,
+                        IMG_UINT64 ui64Config2PhyAddr,
+                        IMG_UINT64 ui64Config2PhyMask,
+                        IMG_UINT64 ui64Config2Settings);
+#else
+#define RGXBootRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+		RGXWriteReg64(priv, c1reg, (c1val)); \
+		RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+	} while (0)
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireCodeRemapAddr
+
+ @Description     Acquire the device physical address of the MIPS code
+                  accessed through remap region
+
+ @Input           hPrivate         : Implementation specific data
+ @Output          psCodeRemapAddr  : Base address of the remapped code
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireCodeRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psCodeRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXCodeRemapConfig
+
+ @Description     Configure the code remap registers passed as arguments.
+                  In a driver-live scenario without PDump this is the same as
+                  two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input           hPrivate             : Implementation specific data
+ @Input           ui32Config1RegAddr   : Remap config1 register offset
+ @Input           ui64Config1RegValue  : Remap config1 register value
+ @Input           ui32Config2RegAddr   : Remap config2 register offset
+ @Input           ui64Config2PhyAddr   : Output remapped aligned physical address
+ @Input           ui64Config2PhyMask   : Mask for the output physical address
+ @Input           ui64Config2Settings  : Extra settings for this remap region
+
+ @Return          void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXCodeRemapConfig(const void *hPrivate,
+                        IMG_UINT32 ui32Config1RegAddr,
+                        IMG_UINT64 ui64Config1RegValue,
+                        IMG_UINT32 ui32Config2RegAddr,
+                        IMG_UINT64 ui64Config2PhyAddr,
+                        IMG_UINT64 ui64Config2PhyMask,
+                        IMG_UINT64 ui64Config2Settings);
+#else
+#define RGXCodeRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+		RGXWriteReg64(priv, c1reg, (c1val)); \
+		RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+	} while (0)
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireDataRemapAddr
+
+ @Description     Acquire the device physical address of the MIPS data
+                  accessed through remap region
+
+ @Input           hPrivate         : Implementation specific data
+ @Output          psDataRemapAddr  : Base address of the remapped data
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireDataRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psDataRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXDataRemapConfig
+
+ @Description     Configure the data remap registers passed as arguments.
+                  In a driver-live scenario without PDump this is the same as
+                  two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input           hPrivate             : Implementation specific data
+ @Input           ui32Config1RegAddr   : Remap config1 register offset
+ @Input           ui64Config1RegValue  : Remap config1 register value
+ @Input           ui32Config2RegAddr   : Remap config2 register offset
+ @Input           ui64Config2PhyAddr   : Output remapped aligned physical address
+ @Input           ui64Config2PhyMask   : Mask for the output physical address
+ @Input           ui64Config2Settings  : Extra settings for this remap region
+
+ @Return          void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXDataRemapConfig(const void *hPrivate,
+                        IMG_UINT32 ui32Config1RegAddr,
+                        IMG_UINT64 ui64Config1RegValue,
+                        IMG_UINT32 ui32Config2RegAddr,
+                        IMG_UINT64 ui64Config2PhyAddr,
+                        IMG_UINT64 ui64Config2PhyMask,
+                        IMG_UINT64 ui64Config2Settings);
+#else
+#define RGXDataRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+		RGXWriteReg64(priv, c1reg, (c1val)); \
+		RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+	} while (0)
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireTrampolineRemapAddr
+
+ @Description     Acquire the device physical address of the MIPS data
+                  accessed through remap region
+
+ @Input           hPrivate             : Implementation specific data
+ @Output          psTrampolineRemapAddr: Base address of the remapped data
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireTrampolineRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psTrampolineRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXTrampolineRemapConfig
+
+ @Description     Configure the trampoline remap registers passed as arguments.
+                  In a driver-live scenario without PDump this is the same as
+                  two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input           hPrivate             : Implementation specific data
+ @Input           ui32Config1RegAddr   : Remap config1 register offset
+ @Input           ui64Config1RegValue  : Remap config1 register value
+ @Input           ui32Config2RegAddr   : Remap config2 register offset
+ @Input           ui64Config2PhyAddr   : Output remapped aligned physical address
+ @Input           ui64Config2PhyMask   : Mask for the output physical address
+ @Input           ui64Config2Settings  : Extra settings for this remap region
+
+ @Return          void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXTrampolineRemapConfig(const void *hPrivate,
+                        IMG_UINT32 ui32Config1RegAddr,
+                        IMG_UINT64 ui64Config1RegValue,
+                        IMG_UINT32 ui32Config2RegAddr,
+                        IMG_UINT64 ui64Config2PhyAddr,
+                        IMG_UINT64 ui64Config2PhyMask,
+                        IMG_UINT64 ui64Config2Settings);
+#else
+#define RGXTrampolineRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+		RGXWriteReg64(priv, c1reg, (c1val)); \
+		RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+	} while (0)
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function        RGXDoFWSlaveBoot
+
+ @Description     Returns whether or not a FW Slave Boot is required
+                  while powering on
+
+ @Input           hPrivate       : Implementation specific data
+
+ @Return          IMG_BOOL
+
+******************************************************************************/
+IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXFabricCoherencyTest
+
+ @Description    Performs a coherency test
+
+ @Input          hPrivate         : Implementation specific data
+
+ @Return         PVRSRV_OK if the test succeeds,
+                 PVRSRV_ERROR_INIT_FAILURE if the test fails at some point
+
+******************************************************************************/
+PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXDeviceHasErnBrn
+
+ @Description    Checks if a device has a particular errata
+
+ @Input          hPrivate     : Implementation specific data
+ @Input          ui64ErnsBrns : Flags to check
+
+ @Return         IMG_TRUE if the given errata is available, IMG_FALSE otherwise
+
+******************************************************************************/
+IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXGetDeviceSLCBanks
+
+ @Description    Returns the number of SLC banks used by the device
+
+ @Input          hPrivate    : Implementation specific data
+
+ @Return         Number of SLC banks
+
+******************************************************************************/
+IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXGetDeviceSLCSize
+
+ @Description    Returns the device SLC size
+
+ @Input          hPrivate    : Implementation specific data
+
+ @Return         SLC size
+
+******************************************************************************/
+IMG_UINT32 RGXGetDeviceSLCSize(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXGetDeviceCacheLineSize
+
+ @Description    Returns the device cache line size
+
+ @Input          hPrivate    : Implementation specific data
+
+ @Return         Cache line size
+
+******************************************************************************/
+IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXGetDevicePhysBusWidth
+
+ @Description    Returns the device physical bus width
+
+ @Input          hPrivate    : Implementation specific data
+
+ @Return         Physical bus width
+
+******************************************************************************/
+IMG_UINT32 RGXGetDevicePhysBusWidth(const void *hPrivate);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* !defined (__RGXLAYER_H__) */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxlayer_impl.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxlayer_impl.c
new file mode 100644
index 0000000..be161f6
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxlayer_impl.c
@@ -0,0 +1,1115 @@
+/*************************************************************************/ /*!
+@File
+@Title          DDK implementation of the Services abstraction layer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    DDK implementation of the Services abstraction layer
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxlayer_impl.h"
+#include "osfunc.h"
+#include "pdump_km.h"
+#include "rgxfwutils.h"
+#include "devicemem.h"
+#include "cache_km.h"
+#include "pmr.h"
+
+#if defined (PDUMP)
+#include <stdarg.h>
+#endif
+
+void RGXMemCopy(const void *hPrivate,
+                void *pvDst,
+                void *pvSrc,
+                size_t uiSize)
+{
+	PVR_UNREFERENCED_PARAMETER(hPrivate);
+	OSDeviceMemCopy(pvDst, pvSrc, uiSize);
+}
+
+void RGXMemSet(const void *hPrivate,
+               void *pvDst,
+               IMG_UINT8 ui8Value,
+               size_t uiSize)
+{
+	PVR_UNREFERENCED_PARAMETER(hPrivate);
+	OSDeviceMemSet(pvDst, ui8Value, uiSize);
+}
+
+void RGXCommentLog(const void *hPrivate,
+                   const IMG_CHAR *pszString,
+                   ...)
+{
+#if defined(PDUMP)
+	va_list argList;
+	va_start(argList, pszString);
+	PDumpCommentWithFlagsVA(PDUMP_FLAGS_CONTINUOUS, pszString, argList);
+	va_end(argList);
+	PVR_UNREFERENCED_PARAMETER(hPrivate);
+#else
+	PVR_UNREFERENCED_PARAMETER(hPrivate);
+	PVR_UNREFERENCED_PARAMETER(pszString);
+#endif
+}
+
+void RGXErrorLog(const void *hPrivate,
+                 const IMG_CHAR *pszString,
+                 ...)
+{
+	IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+	va_list argList;
+
+	PVR_UNREFERENCED_PARAMETER(hPrivate);
+
+	va_start(argList, pszString);
+	vsnprintf(szBuffer, sizeof(szBuffer), pszString, argList);
+	va_end(argList);
+
+	PVR_DPF((PVR_DBG_ERROR, "%s", szBuffer));
+}
+
+IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+
+	return psDevInfo->sDevFeatureCfg.ui32MCMS;
+}
+
+void RGXWriteReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	void *pvRegsBase;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+	pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+	if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+	{
+		OSWriteHWReg32(pvRegsBase, ui32RegAddr, ui32RegValue);
+	}
+
+	PDUMPREG32(RGX_PDUMPREG_NAME, ui32RegAddr, ui32RegValue, psParams->ui32PdumpFlags);
+}
+
+void RGXWriteReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT64 ui64RegValue)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	void *pvRegsBase;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+	pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+	if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+	{
+		OSWriteHWReg64(pvRegsBase, ui32RegAddr, ui64RegValue);
+	}
+
+	PDUMPREG64(RGX_PDUMPREG_NAME, ui32RegAddr, ui64RegValue, psParams->ui32PdumpFlags);
+}
+
+IMG_UINT32 RGXReadReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	void *pvRegsBase;
+	IMG_UINT32 ui32RegValue;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+	pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+	if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)
+	{
+		ui32RegValue = IMG_UINT32_MAX;
+	}
+	else
+#endif
+	{
+		ui32RegValue = OSReadHWReg32(pvRegsBase, ui32RegAddr);
+	}
+
+	PDUMPREGREAD32(RGX_PDUMPREG_NAME, ui32RegAddr, psParams->ui32PdumpFlags);
+
+	return ui32RegValue;
+}
+
+IMG_UINT64 RGXReadReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	void *pvRegsBase;
+	IMG_UINT64 ui64RegValue;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+	pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+	if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)
+	{
+		ui64RegValue = IMG_UINT64_MAX;
+	}
+	else
+#endif
+	{
+		ui64RegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr);
+	}
+
+	PDUMPREGREAD64(RGX_PDUMPREG_NAME, ui32RegAddr, PDUMP_FLAGS_CONTINUOUS);
+
+	return ui64RegValue;
+}
+
+PVRSRV_ERROR RGXPollReg32(const void *hPrivate,
+                          IMG_UINT32 ui32RegAddr,
+                          IMG_UINT32 ui32RegValue,
+                          IMG_UINT32 ui32RegMask)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	void *pvRegsBase;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+	pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+	if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+	{
+		if (PVRSRVPollForValueKM((IMG_UINT32 *)((IMG_UINT8*)pvRegsBase + ui32RegAddr),
+		                         ui32RegValue,
+		                         ui32RegMask) != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXPollReg32: Poll for Reg (0x%x) failed", ui32RegAddr));
+			return PVRSRV_ERROR_TIMEOUT;
+		}
+	}
+
+	PDUMPREGPOL(RGX_PDUMPREG_NAME,
+	            ui32RegAddr,
+	            ui32RegValue,
+	            ui32RegMask,
+	            psParams->ui32PdumpFlags,
+	            PDUMP_POLL_OPERATOR_EQUAL);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXPollReg64(const void *hPrivate,
+                          IMG_UINT32 ui32RegAddr,
+                          IMG_UINT64 ui64RegValue,
+                          IMG_UINT64 ui64RegMask)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	void *pvRegsBase;
+
+	/* Split lower and upper words */
+	IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64RegValue >> 32);
+	IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64RegValue);
+	IMG_UINT32 ui32UpperMask = (IMG_UINT32) (ui64RegMask >> 32);
+	IMG_UINT32 ui32LowerMask = (IMG_UINT32) (ui64RegMask);
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+	pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+	if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+	{
+		if (PVRSRVPollForValueKM((IMG_UINT32 *)((IMG_UINT8*)pvRegsBase + ui32RegAddr + 4),
+		                         ui32UpperValue,
+		                         ui32UpperMask) != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr));
+			return PVRSRV_ERROR_TIMEOUT;
+		}
+
+		if (PVRSRVPollForValueKM((IMG_UINT32 *)((IMG_UINT8*)pvRegsBase + ui32RegAddr),
+		                         ui32LowerValue,
+		                         ui32LowerMask) != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr));
+			return PVRSRV_ERROR_TIMEOUT;
+		}
+	}
+
+	PDUMPREGPOL(RGX_PDUMPREG_NAME,
+	            ui32RegAddr + 4,
+	            ui32UpperValue,
+	            ui32UpperMask,
+	            psParams->ui32PdumpFlags,
+	            PDUMP_POLL_OPERATOR_EQUAL);
+
+
+	PDUMPREGPOL(RGX_PDUMPREG_NAME,
+	            ui32RegAddr,
+	            ui32LowerValue,
+	            ui32LowerMask,
+	            psParams->ui32PdumpFlags,
+	            PDUMP_POLL_OPERATOR_EQUAL);
+
+	return PVRSRV_OK;
+}
+
+void RGXWaitCycles(const void *hPrivate, IMG_UINT32 ui32Cycles, IMG_UINT32 ui32TimeUs)
+{
+	PVR_UNREFERENCED_PARAMETER(hPrivate);
+	OSWaitus(ui32TimeUs);
+	PDUMPIDLWITHFLAGS(ui32Cycles, PDUMP_FLAGS_CONTINUOUS);
+}
+
+void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr)
+{
+	PVR_ASSERT(hPrivate != NULL);
+	*psPCAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sPCAddr;
+}
+
+#if defined(PDUMP)
+void RGXWriteKernelMMUPC64(const void *hPrivate,
+                           IMG_UINT32 ui32PCReg,
+                           IMG_UINT32 ui32PCRegAlignShift,
+                           IMG_UINT32 ui32PCRegShift,
+                           IMG_UINT64 ui64PCVal)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+	/* Write the cat-base address */
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, ui32PCReg, ui64PCVal);
+
+	/* Pdump catbase address */
+	MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx,
+	                          RGX_PDUMPREG_NAME,
+	                          ui32PCReg,
+	                          8,
+	                          ui32PCRegAlignShift,
+	                          ui32PCRegShift,
+	                          PDUMP_FLAGS_CONTINUOUS);
+}
+
+void RGXWriteKernelMMUPC32(const void *hPrivate,
+                           IMG_UINT32 ui32PCReg,
+                           IMG_UINT32 ui32PCRegAlignShift,
+                           IMG_UINT32 ui32PCRegShift,
+                           IMG_UINT32 ui32PCVal)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+	/* Write the cat-base address */
+	OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32PCReg, ui32PCVal);
+
+	/* Pdump catbase address */
+	MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx,
+	                          RGX_PDUMPREG_NAME,
+	                          ui32PCReg,
+	                          4,
+	                          ui32PCRegAlignShift,
+	                          ui32PCRegShift,
+	                          PDUMP_FLAGS_CONTINUOUS);
+}
+#endif /* defined(PDUMP) */
+
+void RGXAcquireGPURegsAddr(const void *hPrivate, IMG_DEV_PHYADDR *psGPURegsAddr)
+{
+	PVR_ASSERT(hPrivate != NULL);
+	*psGPURegsAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sGPURegAddr;
+}
+
+#if defined(PDUMP)
+void RGXMIPSWrapperConfig(const void *hPrivate,
+                          IMG_UINT32 ui32RegAddr,
+                          IMG_UINT64 ui64GPURegsAddr,
+                          IMG_UINT32 ui32GPURegsAlign,
+                          IMG_UINT32 ui32BootMode)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM,
+	               ui32RegAddr,
+	               (ui64GPURegsAddr >> ui32GPURegsAlign) | ui32BootMode);
+
+	/* Store register offset to temp PDump variable */
+	PDumpRegLabelToInternalVar(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", PDUMP_FLAGS_CONTINUOUS);
+
+	/* Align register transactions identifier */
+	PDumpWriteVarSHRValueOp(":SYSMEM:$1", ui32GPURegsAlign, PDUMP_FLAGS_CONTINUOUS);
+
+	/* Enable micromips instruction encoding */
+	PDumpWriteVarORValueOp(":SYSMEM:$1", ui32BootMode, PDUMP_FLAGS_CONTINUOUS);
+
+	/* Do the actual register write */
+	PDumpInternalVarToReg64(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", 0);
+}
+#endif
+
+void RGXAcquireBootRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psBootRemapAddr)
+{
+	PVR_ASSERT(hPrivate != NULL);
+	*psBootRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sBootRemapAddr;
+}
+
+void RGXAcquireCodeRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psCodeRemapAddr)
+{
+	PVR_ASSERT(hPrivate != NULL);
+	*psCodeRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sCodeRemapAddr;
+}
+
+void RGXAcquireDataRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psDataRemapAddr)
+{
+	PVR_ASSERT(hPrivate != NULL);
+	*psDataRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sDataRemapAddr;
+}
+
+void RGXAcquireTrampolineRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psTrampolineRemapAddr)
+{
+	PVR_ASSERT(hPrivate != NULL);
+	*psTrampolineRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sTrampolineRemapAddr;
+}
+
+#if defined(PDUMP)
+static inline
+void RGXWriteRemapConfig2Reg(void *pvRegs,
+                             PMR *psPMR,
+                             IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                             IMG_UINT32 ui32RegAddr,
+                             IMG_UINT64 ui64PhyAddr,
+                             IMG_UINT64 ui64PhyMask,
+                             IMG_UINT64 ui64Settings)
+{
+	OSWriteHWReg64(pvRegs, ui32RegAddr, (ui64PhyAddr & ui64PhyMask) | ui64Settings);
+
+	/* Store memory offset to temp PDump variable */
+	PDumpMemLabelToInternalVar64(":SYSMEM:$1", psPMR, uiLogicalOffset, PDUMP_FLAGS_CONTINUOUS);
+
+	/* Keep only the relevant bits of the output physical address */
+	PDumpWriteVarANDValueOp(":SYSMEM:$1", ui64PhyMask, PDUMP_FLAGS_CONTINUOUS);
+
+	/* Extra settings for this remapped region */
+	PDumpWriteVarORValueOp(":SYSMEM:$1", ui64Settings, PDUMP_FLAGS_CONTINUOUS);
+
+	/* Do the actual register write */
+	PDumpInternalVarToReg64(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", PDUMP_FLAGS_CONTINUOUS);
+}
+
+void RGXBootRemapConfig(const void *hPrivate,
+                        IMG_UINT32 ui32Config1RegAddr,
+                        IMG_UINT64 ui64Config1RegValue,
+                        IMG_UINT32 ui32Config2RegAddr,
+                        IMG_UINT64 ui64Config2PhyAddr,
+                        IMG_UINT64 ui64Config2PhyMask,
+                        IMG_UINT64 ui64Config2Settings)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	IMG_UINT32 ui32BootRemapMemOffset = RGXMIPSFW_BOOT_NMI_CODE_BASE_PAGE * (IMG_UINT32)RGXMIPSFW_PAGE_SIZE;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+	/* Write remap config1 register */
+	RGXWriteReg64(hPrivate,
+	              ui32Config1RegAddr,
+	              ui64Config1RegValue);
+
+	/* Write remap config2 register */
+	RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM,
+	                        psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR,
+	                        psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32BootRemapMemOffset,
+	                        ui32Config2RegAddr,
+	                        ui64Config2PhyAddr,
+	                        ui64Config2PhyMask,
+	                        ui64Config2Settings);
+}
+
+void RGXCodeRemapConfig(const void *hPrivate,
+                        IMG_UINT32 ui32Config1RegAddr,
+                        IMG_UINT64 ui64Config1RegValue,
+                        IMG_UINT32 ui32Config2RegAddr,
+                        IMG_UINT64 ui64Config2PhyAddr,
+                        IMG_UINT64 ui64Config2PhyMask,
+                        IMG_UINT64 ui64Config2Settings)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	IMG_UINT32 ui32CodeRemapMemOffset = RGXMIPSFW_EXCEPTIONSVECTORS_BASE_PAGE * (IMG_UINT32)RGXMIPSFW_PAGE_SIZE;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+	/* Write remap config1 register */
+	RGXWriteReg64(hPrivate,
+	              ui32Config1RegAddr,
+	              ui64Config1RegValue);
+
+	/* Write remap config2 register */
+	RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM,
+	                        psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR,
+	                        psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32CodeRemapMemOffset,
+	                        ui32Config2RegAddr,
+	                        ui64Config2PhyAddr,
+	                        ui64Config2PhyMask,
+	                        ui64Config2Settings);
+}
+
+void RGXDataRemapConfig(const void *hPrivate,
+                        IMG_UINT32 ui32Config1RegAddr,
+                        IMG_UINT64 ui64Config1RegValue,
+                        IMG_UINT32 ui32Config2RegAddr,
+                        IMG_UINT64 ui64Config2PhyAddr,
+                        IMG_UINT64 ui64Config2PhyMask,
+                        IMG_UINT64 ui64Config2Settings)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	IMG_UINT32 ui32DataRemapMemOffset = RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE * (IMG_UINT32)RGXMIPSFW_PAGE_SIZE;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+	/* Write remap config1 register */
+	RGXWriteReg64(hPrivate,
+	              ui32Config1RegAddr,
+	              ui64Config1RegValue);
+
+	/* Write remap config2 register */
+	RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM,
+	                        psDevInfo->psRGXFWDataMemDesc->psImport->hPMR,
+	                        psDevInfo->psRGXFWDataMemDesc->uiOffset + ui32DataRemapMemOffset,
+	                        ui32Config2RegAddr,
+	                        ui64Config2PhyAddr,
+	                        ui64Config2PhyMask,
+	                        ui64Config2Settings);
+}
+
+void RGXTrampolineRemapConfig(const void *hPrivate,
+                        IMG_UINT32 ui32Config1RegAddr,
+                        IMG_UINT64 ui64Config1RegValue,
+                        IMG_UINT32 ui32Config2RegAddr,
+                        IMG_UINT64 ui64Config2PhyAddr,
+                        IMG_UINT64 ui64Config2PhyMask,
+                        IMG_UINT64 ui64Config2Settings)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+	/* write the register for real, without PDump */
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM,
+	               ui32Config1RegAddr,
+	               ui64Config1RegValue);
+
+	/* Store the memory address in a PDump variable */
+	PDumpPhysHandleToInternalVar64(":SYSMEM:$1",
+	                               psDevInfo->sTrampoline.hPdumpPages,
+	                               PDUMP_FLAGS_CONTINUOUS);
+
+	/* Keep only the relevant bits of the input physical address */
+	PDumpWriteVarANDValueOp(":SYSMEM:$1",
+                                ~RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK,
+	                        PDUMP_FLAGS_CONTINUOUS);
+
+	/* Enable bit */
+	PDumpWriteVarORValueOp(":SYSMEM:$1",
+	                       RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN,
+	                       PDUMP_FLAGS_CONTINUOUS);
+
+	/* Do the PDump register write */
+	PDumpInternalVarToReg64(RGX_PDUMPREG_NAME,
+	                        ui32Config1RegAddr,
+	                        ":SYSMEM:$1",
+	                        PDUMP_FLAGS_CONTINUOUS);
+
+	/* this can be written directly */
+	RGXWriteReg64(hPrivate,
+	              ui32Config2RegAddr,
+	              (ui64Config2PhyAddr & ui64Config2PhyMask) | ui64Config2Settings);
+}
+#endif
+
+#define MAX_NUM_COHERENCY_TESTS  (10)
+IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	PVRSRV_DEVICE_CONFIG *psDevConfig;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+	if (psDevInfo->ui32CoherencyTestsDone >= MAX_NUM_COHERENCY_TESTS)
+	{
+		return IMG_FALSE;
+	}
+
+	psDevConfig = ((RGX_LAYER_PARAMS*)hPrivate)->psDevConfig;
+
+	return PVRSRVSystemSnoopingOfCPUCache(psDevConfig);
+}
+
+static PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* Wait for Slave Port to be Ready */
+	eError = RGXPollReg32(hPrivate,
+	                      RGX_CR_META_SP_MSLVCTRL1,
+	                      RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+	                      RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+	if (eError != PVRSRV_OK) return eError;
+
+	/* Issue a Write */
+	RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr);
+	RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue);
+
+	return eError;
+}
+
+/*
+ * The fabric coherency test is performed when platform supports fabric coherency
+ * either in the form of ACE-lite or Full-ACE. This test is done quite early
+ * with the firmware processor quiescent and makes exclusive use of the slave
+ * port interface for reading/writing through the device memory hierarchy. The
+ * rationale for the test is to ensure that what the CPU writes to its dcache
+ * is visible to the GPU via coherency snoop miss/hit and vice-versa without
+ * any intervening cache maintenance by the writing agent.
+ */
+PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	IMG_UINT32 *pui32FabricCohTestBufferCpuVA;
+	DEVMEM_MEMDESC *psFabricCohTestBufferMemDesc;
+	RGXFWIF_DEV_VIRTADDR sFabricCohTestBufferDevVA;
+	IMG_DEVMEM_SIZE_T uiFabricCohTestBlockSize = sizeof(IMG_UINT64);
+	IMG_DEVMEM_ALIGN_T uiFabricCohTestBlockAlign = sizeof(IMG_UINT64);
+	IMG_UINT64 ui64SegOutAddrTopCached = 0;
+	IMG_UINT64 ui64SegOutAddrTopUncached = 0;
+	IMG_UINT32 ui32SLCCTRL = 0;
+	IMG_UINT32 ui32OddEven;
+	IMG_BOOL   bFeatureS7;
+	IMG_UINT32 ui32TestType;
+	IMG_UINT32 ui32OddEvenSeed = 1;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_BOOL bFullTestPassed = IMG_TRUE;
+	IMG_BOOL bSubTestPassed = IMG_FALSE;
+	IMG_BOOL bExit = IMG_FALSE;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+	PVR_LOG(("Starting fabric coherency test ....."));
+
+	bFeatureS7 = RGXDeviceHasFeature(hPrivate, RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK);
+
+	if (bFeatureS7)
+	{
+		if (RGXDeviceHasErnBrn(hPrivate, HW_ERN_49144_BIT_MASK))
+		{
+			ui64SegOutAddrTopCached   = RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED_ERN_49144(META_MMU_CONTEXT_MAPPING);
+			ui64SegOutAddrTopUncached = RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED_ERN_49144(META_MMU_CONTEXT_MAPPING);
+		}
+		else if (RGXDeviceHasErnBrn(hPrivate, HW_ERN_45914_BIT_MASK))
+		{
+			ui64SegOutAddrTopCached   = RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED_ERN_45914(META_MMU_CONTEXT_MAPPING);
+			ui64SegOutAddrTopUncached = RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED_ERN_45914(META_MMU_CONTEXT_MAPPING);
+		}
+
+		/* Configure META to use SLC force-linefill for the bootloader segment */
+		RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6),
+		                         (ui64SegOutAddrTopUncached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32);
+	}
+	else
+	{
+		/* Bypass the SLC when IO coherency is enabled */
+		ui32SLCCTRL = RGXReadReg32(hPrivate, RGX_CR_SLC_CTRL_BYPASS);
+		RGXWriteReg32(hPrivate,
+		              RGX_CR_SLC_CTRL_BYPASS,
+		              ui32SLCCTRL | RGX_CR_SLC_CTRL_BYPASS_BYP_CC_EN);
+	}
+
+	/* Size and align are 'expanded' because we request an export align allocation */
+	DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareHeap),
+										&uiFabricCohTestBlockSize,
+										&uiFabricCohTestBlockAlign);
+
+	/* Allocate, acquire cpu address and set firmware address */
+	eError = DevmemFwAllocateExportable(psDevInfo->psDeviceNode,
+										uiFabricCohTestBlockSize,
+										uiFabricCohTestBlockAlign,
+										PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+										PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+										PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+										PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT |
+										PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT |
+										PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+										PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+										PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+										PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE,
+										"FwExFabricCoherencyTestBuffer",
+										&psFabricCohTestBufferMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"DevmemFwAllocateExportable() error: %s, exiting",
+				PVRSRVGetErrorStringKM(eError)));
+		goto e0;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psFabricCohTestBufferMemDesc, (void **) &pui32FabricCohTestBufferCpuVA);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"DevmemAcquireCpuVirtAddr() error: %s, exiting",
+				PVRSRVGetErrorStringKM(eError)));
+		goto e0;
+	}
+
+	/* Create a FW address which is uncached in the Meta DCache and in the SLC
+	 * using the Meta bootloader segment.
+	 * This segment is the only one configured correctly out of reset
+	 * (when this test is meant to be executed).
+	 */
+	RGXSetFirmwareAddress(&sFabricCohTestBufferDevVA,
+						  psFabricCohTestBufferMemDesc,
+						  0,
+						  RFW_FWADDR_FLAG_NONE);
+
+	/* Undo most of the FW mappings done by RGXSetFirmwareAddress */
+	sFabricCohTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_META_CACHE_MASK;
+	sFabricCohTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK;
+	sFabricCohTestBufferDevVA.ui32Addr -= RGXFW_SEGMMU_DATA_BASE_ADDRESS;
+
+	/* Map the buffer in the bootloader segment as uncached */
+	sFabricCohTestBufferDevVA.ui32Addr |= RGXFW_BOOTLDR_META_ADDR;
+	sFabricCohTestBufferDevVA.ui32Addr |= RGXFW_SEGMMU_DATA_META_UNCACHED;
+
+	for (ui32TestType = 0; ui32TestType < 4 && bExit == IMG_FALSE; ui32TestType++)
+	{
+		IMG_CPU_PHYADDR sCpuPhyAddr;
+		IMG_BOOL bValid;
+		PMR *psPMR;
+
+		/* Acquire underlying PMR CpuPA in preparation for cache maintenance */
+		(void) DevmemLocalGetImportHandle(psFabricCohTestBufferMemDesc, (void**)&psPMR);
+		eError = PMR_CpuPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sCpuPhyAddr, &bValid);
+		if (eError != PVRSRV_OK || bValid != IMG_TRUE)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"PMR_CpuPhysAddr error: %s, exiting",
+					 PVRSRVGetErrorStringKM(eError)));
+			bExit = IMG_TRUE;
+			continue;
+		}
+
+		/* Here we do two passes [runs] mostly to account for the effects of using
+		   the different seed (i.e. ui32OddEvenSeed) value to read and write */
+		for (ui32OddEven = 1; ui32OddEven < 3 && bExit == IMG_FALSE; ui32OddEven++)
+		{
+			IMG_UINT32 i;
+
+#if defined(DEBUG)
+			switch(ui32TestType)
+			{
+			case 0:
+				PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: starting  [run #%u]", ui32OddEven));
+				break;
+			case 1:
+				PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: starting  [run #%u]", ui32OddEven));
+				break;
+			case 2:
+				PVR_LOG(("CPU:Write/GPU:Read Snoop Hit  Test: starting  [run #%u]", ui32OddEven));
+				break;
+			case 3:
+				PVR_LOG(("GPU:Write/CPU:Read Snoop Hit  Test: starting  [run #%u]", ui32OddEven));
+				break;
+			default:
+				PVR_LOG(("Internal error, exiting test"));
+				eError = PVRSRV_ERROR_INIT_FAILURE;
+				bExit = IMG_TRUE;
+				continue;
+				break;
+			}
+#endif
+
+			for (i = 0; i < 2 && bExit == IMG_FALSE; i++)
+			{
+				IMG_UINT32 ui32FWAddr;
+				IMG_UINT32 ui32FWValue;
+				IMG_UINT32 ui32FWValue2;
+				IMG_CPU_PHYADDR sCpuPhyAddrStart;
+				IMG_CPU_PHYADDR sCpuPhyAddrEnd;
+				IMG_UINT32 ui32LastFWValue = ~0;
+				IMG_UINT32 ui32Offset = i * sizeof(IMG_UINT32);
+
+				/* Calculate next address and seed value to write/read from slave-port */
+				ui32FWAddr = sFabricCohTestBufferDevVA.ui32Addr + ui32Offset;
+				sCpuPhyAddrStart.uiAddr = sCpuPhyAddr.uiAddr + ui32Offset;
+				sCpuPhyAddrEnd.uiAddr = sCpuPhyAddrStart.uiAddr;
+				ui32OddEvenSeed += 1;
+
+				if (ui32TestType & 0x1)
+				{
+					ui32FWValue = i + ui32OddEvenSeed;
+
+					switch(ui32TestType)
+					{
+					case 1:
+					case 3:
+						/* Clean dcache to ensure there is no stale data in dcache that might over-write
+						   what we are about to write via slave-port here because if it drains from the CPU
+						   dcache before we read it, it would corrupt what we are going to read back via
+						   the CPU */
+						sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32);
+						CacheOpExecKM(psDevInfo->psDeviceNode,
+									  pui32FabricCohTestBufferCpuVA + ui32Offset,
+									  pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32),
+									  sCpuPhyAddrStart,
+									  sCpuPhyAddrEnd,
+									  PVRSRV_CACHE_OP_CLEAN);
+						break;
+					}
+
+					/* Write the value using the RGX slave-port interface */
+					eError = RGXWriteMETAAddr(psDevInfo, ui32FWAddr, ui32FWValue);
+					if (eError != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR,
+								"RGXWriteMETAAddr error: %s, exiting",
+								 PVRSRVGetErrorStringKM(eError)));
+						bExit = IMG_TRUE;
+						continue;
+					}
+
+					/* Read back value using RGX slave-port interface, this is used
+					   as a sort of memory barrier for the above write */
+					eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, &ui32FWValue2);
+					if (eError != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR,
+								"RGXReadMETAAddr error: %s, exiting",
+								 PVRSRVGetErrorStringKM(eError)));
+						bExit = IMG_TRUE;
+						continue;
+					}
+					else if (ui32FWValue != ui32FWValue2)
+					{
+						/* Fatal error, we should abort */
+						PVR_DPF((PVR_DBG_ERROR,
+								"At Offset: %d, RAW via SlavePort failed: expected: %x, got: %x",
+								i,
+								ui32FWValue,
+								ui32FWValue2));
+						eError = PVRSRV_ERROR_INIT_FAILURE;
+						bExit = IMG_TRUE;
+						continue;
+					}
+
+					if (! PVRSRVSystemSnoopingOfDeviceCache(psDevInfo->psDeviceNode->psDevConfig))
+					{
+						/* Invalidate dcache to ensure that any prefetched data by the CPU from this memory
+						   region is discarded before we read (i.e. next read must trigger a cache miss).
+						   If there is snooping of device cache, then any prefetching done by the CPU
+						   will reflect the most up to date datum writing by GPU into said location,
+						   that is to say prefetching must be coherent so CPU d-flush is not needed */
+						sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32);
+						CacheOpExecKM(psDevInfo->psDeviceNode,
+									  pui32FabricCohTestBufferCpuVA + ui32Offset,
+									  pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32),
+									  sCpuPhyAddrStart,
+									  sCpuPhyAddrEnd,
+									  PVRSRV_CACHE_OP_INVALIDATE);
+					}
+				}
+				else
+				{
+					IMG_UINT32 ui32RAWCpuValue;
+
+					/* Ensures line is in dcache */
+					ui32FWValue = pui32FabricCohTestBufferCpuVA[i];
+					ui32FWValue = ~0;
+
+					/* Dirty allocation in dcache */
+					ui32RAWCpuValue = i + ui32OddEvenSeed;
+					pui32FabricCohTestBufferCpuVA[i] = i + ui32OddEvenSeed;
+
+					/* Flush possible cpu store-buffer(ing) on LMA */
+					OSWriteMemoryBarrier();
+
+					switch(ui32TestType)
+					{
+					case 0:
+						/* Flush dcache to force subsequent incoming CPU-bound snoop to miss so
+						   memory is coherent before the SlavePort reads */
+						sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32);
+						CacheOpExecKM(psDevInfo->psDeviceNode,
+									  pui32FabricCohTestBufferCpuVA + ui32Offset,
+									  pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32),
+									  sCpuPhyAddrStart,
+									  sCpuPhyAddrEnd,
+									  PVRSRV_CACHE_OP_FLUSH);
+						break;
+					}
+
+					/* Read back value using RGX slave-port interface */
+					eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, &ui32FWValue);
+					if (eError != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR,
+								"RGXReadWithSP error: %s, exiting",
+								PVRSRVGetErrorStringKM(eError)));
+						bExit = IMG_TRUE;
+						continue;
+					}
+
+					/* We are being mostly paranoid here, just to account for CPU RAW operations */
+					sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32);
+					CacheOpExecKM(psDevInfo->psDeviceNode,
+								  pui32FabricCohTestBufferCpuVA + ui32Offset,
+								  pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32),
+								  sCpuPhyAddrStart,
+								  sCpuPhyAddrEnd,
+								  PVRSRV_CACHE_OP_FLUSH);
+					if (pui32FabricCohTestBufferCpuVA[i] != ui32RAWCpuValue)
+					{
+						/* Fatal error, we should abort */
+						PVR_DPF((PVR_DBG_ERROR,
+								"At Offset: %d, RAW by CPU failed: expected: %x, got: %x",
+								i,
+								ui32RAWCpuValue,
+								pui32FabricCohTestBufferCpuVA[i]));
+						eError = PVRSRV_ERROR_INIT_FAILURE;
+						bExit = IMG_TRUE;
+						continue;
+					}
+				}
+
+				/* Compare to see if sub-test passed */
+				if (pui32FabricCohTestBufferCpuVA[i] == ui32FWValue)
+				{
+					bSubTestPassed = IMG_TRUE;
+				}
+				else
+				{
+					bSubTestPassed = IMG_FALSE;
+					bFullTestPassed = IMG_FALSE;
+					eError = PVRSRV_ERROR_INIT_FAILURE;
+					if (ui32LastFWValue != ui32FWValue)
+					{
+#if defined(DEBUG)
+						PVR_LOG(("At Offset: %d, Expected: %x, Got: %x",
+								 i,
+								 (ui32TestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i],
+	 							 (ui32TestType & 0x1) ? pui32FabricCohTestBufferCpuVA[i] : ui32FWValue));
+#endif
+					}
+					else
+					{
+						PVR_DPF((PVR_DBG_ERROR,
+								"test encountered unexpected error, exiting"));
+						eError = PVRSRV_ERROR_INIT_FAILURE;
+						bExit = IMG_TRUE;
+						continue;
+					}
+				}
+
+				ui32LastFWValue = (ui32TestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i];
+			}
+
+#if defined(DEBUG)
+			if (bExit)
+			{
+				continue;
+			}
+
+			switch(ui32TestType)
+			{
+			case 0:
+				PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+				break;
+			case 1:
+				PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+				break;
+			case 2:
+				PVR_LOG(("CPU:Write/GPU:Read Snoop Hit  Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+				break;
+			case 3:
+				PVR_LOG(("GPU:Write/CPU:Read Snoop Hit  Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+				break;
+			default:
+				PVR_LOG(("Internal error, exiting test"));
+				bExit = IMG_TRUE;
+				continue;
+				break;
+			}
+#endif
+		}
+	}
+
+	RGXUnsetFirmwareAddress(psFabricCohTestBufferMemDesc);
+	DevmemReleaseCpuVirtAddr(psFabricCohTestBufferMemDesc);
+	DevmemFwFree(psDevInfo, psFabricCohTestBufferMemDesc);
+
+e0:
+	if (bFeatureS7)
+	{
+		/* Restore bootloader segment settings */
+		RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6),
+		                         (ui64SegOutAddrTopCached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32);
+	}
+	else
+	{
+		/* Restore SLC bypass settings */
+		RGXWriteReg32(hPrivate, RGX_CR_SLC_CTRL_BYPASS, ui32SLCCTRL);
+	}
+
+	bFullTestPassed = bExit ? IMG_FALSE: bFullTestPassed;
+	if (bFullTestPassed)
+	{
+		PVR_LOG(("fabric coherency test: PASSED"));
+		psDevInfo->ui32CoherencyTestsDone = MAX_NUM_COHERENCY_TESTS + 1;
+	}
+	else
+	{
+		PVR_LOG(("fabric coherency test: FAILED"));
+		psDevInfo->ui32CoherencyTestsDone++;
+	}
+
+	return eError;
+}
+
+IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+
+	return (psDevInfo->sDevFeatureCfg.ui64Features & ui64Feature) != 0;
+}
+
+IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+
+	return (psDevInfo->sDevFeatureCfg.ui64ErnsBrns & ui64ErnsBrns) != 0;
+}
+
+IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+
+	return psDevInfo->sDevFeatureCfg.ui32SLCBanks;
+}
+
+IMG_UINT32 RGXGetDeviceSLCSize(const void *hPrivate)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+
+	return psDevInfo->sDevFeatureCfg.ui32SLCSize;
+}
+
+IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+
+	return psDevInfo->sDevFeatureCfg.ui32CacheLineSize;
+}
+
+IMG_UINT32 RGXGetDevicePhysBusWidth(const void *hPrivate)
+{
+	RGX_LAYER_PARAMS *psParams;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_ASSERT(hPrivate != NULL);
+	psParams = (RGX_LAYER_PARAMS*)hPrivate;
+	psDevInfo = psParams->psDevInfo;
+
+	return psDevInfo->sDevFeatureCfg.ui32PBW;
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxlayer_impl.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxlayer_impl.h
new file mode 100644
index 0000000..9fb398e7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxlayer_impl.h
@@ -0,0 +1,67 @@
+/*************************************************************************/ /*!
+@File
+@Title          Header for DDK implementation of the Services abstraction layer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for DDK implementation of the Services abstraction layer
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGXLAYER_IMPL_H__)
+#define __RGXLAYER_IMPL_H__
+
+#include "rgxlayer.h"
+#include "device_connection.h"
+
+typedef struct _RGX_LAYER_PARAMS_
+{
+	void *psDevInfo;
+	void *psDevConfig;
+#if defined(PDUMP)
+	IMG_UINT32 ui32PdumpFlags;
+#endif
+
+	IMG_DEV_PHYADDR sPCAddr;
+	IMG_DEV_PHYADDR sGPURegAddr;
+	IMG_DEV_PHYADDR sBootRemapAddr;
+	IMG_DEV_PHYADDR sCodeRemapAddr;
+	IMG_DEV_PHYADDR sDataRemapAddr;
+	IMG_DEV_PHYADDR sTrampolineRemapAddr;
+} RGX_LAYER_PARAMS;
+
+#endif /* !defined (__RGXLAYER_IMPL_H__) */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxmem.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxmem.c
new file mode 100644
index 0000000..b0f92a7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxmem.c
@@ -0,0 +1,731 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX memory context management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX memory context management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debug.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_server_utils.h"
+#include "devicemem_pdump.h"
+#include "rgxdevice.h"
+#include "rgx_fwif_km.h"
+#include "rgxfwutils.h"
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "sync_internal.h"
+#include "rgx_memallocflags.h"
+#include "rgx_bvnc_defs_km.h"
+/*
+	FIXME:
+	For now just get global state, but what we really want is to do
+	this per memory context
+*/
+static IMG_UINT32 gui32CacheOpps = 0;
+/* FIXME: End */
+
+typedef struct _SERVER_MMU_CONTEXT_ {
+	DEVMEM_MEMDESC *psFWMemContextMemDesc;
+	MMU_CONTEXT *psMMUContext;
+	IMG_PID uiPID;
+	IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME];
+	DLLIST_NODE sNode;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+} SERVER_MMU_CONTEXT;
+
+
+
+void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode,
+						   IMG_HANDLE hDeviceData,
+						   MMU_LEVEL eMMULevel,
+						   IMG_BOOL bUnmap)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+	PVR_UNREFERENCED_PARAMETER(bUnmap);
+
+	switch (eMMULevel)
+	{
+		case MMU_LEVEL_3:	gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_PC;
+							break;
+		case MMU_LEVEL_2:	gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_PD;
+							break;
+		case MMU_LEVEL_1:	gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_PT;
+							if(!(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SLC_VIVT_BIT_MASK))
+							{
+								gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_TLB;
+							}
+							break;
+		default:
+							PVR_ASSERT(0);
+							break;
+	}
+}
+
+PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDevInfo,
+                                       IMG_UINT16 *pui16MMUInvalidateUpdate,
+                                       IMG_BOOL bInterrupt)
+{
+	PVRSRV_ERROR eError;
+
+	eError = RGXPreKickCacheCommand(psDevInfo->pvDevice,
+	                                RGXFWIF_DM_GP,
+	                                pui16MMUInvalidateUpdate,
+	                                bInterrupt);
+
+	return eError;
+}
+
+PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                    RGXFWIF_DM eDM,
+                                    IMG_UINT16 *pui16MMUInvalidateUpdate,
+                                    IMG_BOOL bInterrupt)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+	RGXFWIF_KCCB_CMD sFlushCmd;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!gui32CacheOpps)
+	{
+		goto _PVRSRVPowerLock_Exit;
+	}
+
+	/* PVRSRVPowerLock guarantees atomicity between commands and global variables consistency.
+	 * This is helpful in a scenario with several applications allocating resources. */
+	eError = PVRSRVPowerLock(psDeviceNode);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXPreKickCacheCommand: failed to acquire powerlock (%s)",
+					PVRSRVGetErrorStringKM(eError)));
+		goto _PVRSRVPowerLock_Exit;
+	}
+
+	*pui16MMUInvalidateUpdate = psDeviceNode->ui16NextMMUInvalidateUpdate;
+
+	/* Setup cmd and add the device nodes sync object */
+	sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_MMUCACHE;
+	sFlushCmd.uCmdData.sMMUCacheData.ui16MMUCacheSyncUpdateValue = psDeviceNode->ui16NextMMUInvalidateUpdate;
+	SyncPrimGetFirmwareAddr(psDeviceNode->psMMUCacheSyncPrim,
+	                        &sFlushCmd.uCmdData.sMMUCacheData.sMMUCacheSync.ui32Addr);
+
+	/* Set the update value for the next kick */
+	psDeviceNode->ui16NextMMUInvalidateUpdate++;
+
+	/* Set which memory context this command is for (all ctxs for now) */
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SLC_VIVT_BIT_MASK)
+	{
+		gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL;
+	}
+	/* Indicate the firmware should signal command completion to the host */
+	if(bInterrupt)
+	{
+		gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT;
+	}
+#if 0
+	sFlushCmd.uCmdData.sMMUCacheData.psMemoryContext = ???
+#endif
+
+	PDUMPPOWCMDSTART();
+	eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+										 PVRSRV_DEV_POWER_STATE_ON,
+										 IMG_FALSE);
+	PDUMPPOWCMDEND();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXPreKickCacheCommand: failed to transition RGX to ON (%s)",
+					PVRSRVGetErrorStringKM(eError)));
+
+		goto _PVRSRVSetDevicePowerStateKM_Exit;
+	}
+
+	sFlushCmd.uCmdData.sMMUCacheData.ui32Flags = gui32CacheOpps;
+
+#if defined(PDUMP)
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+	                      "Submit MMU flush and invalidate (flags = 0x%08x)",
+	                      gui32CacheOpps);
+#endif
+
+	gui32CacheOpps = 0;
+
+	/* Schedule MMU cache command */
+	eError = RGXSendCommand(psDevInfo,
+	                           eDM,
+	                           &sFlushCmd,
+	                           sizeof(RGXFWIF_KCCB_CMD),
+	                           PDUMP_FLAGS_CONTINUOUS);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXPreKickCacheCommand: Failed to schedule MMU "
+		                       "cache command to DM=%d with error (%u)", eDM, eError));
+	}
+
+_PVRSRVSetDevicePowerStateKM_Exit:
+	PVRSRVPowerUnlock(psDeviceNode);
+
+_PVRSRVPowerLock_Exit:
+	return eError;
+}
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+/* page fault debug is the only current use case for needing to find process info
+ * after that process device memory context has been destroyed
+ */
+
+typedef struct _UNREGISTERED_MEMORY_CONTEXT_
+{
+	IMG_PID uiPID;
+	IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME];
+	IMG_DEV_PHYADDR sPCDevPAddr;
+} UNREGISTERED_MEMORY_CONTEXT;
+
+/* must be a power of two */
+#define UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE (1 << 3)
+
+static UNREGISTERED_MEMORY_CONTEXT gasUnregisteredMemCtxs[UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE];
+static IMG_UINT32 gui32UnregisteredMemCtxsHead = 0;
+
+/* record a device memory context being unregistered.
+ * the list of unregistered contexts can be used to find the PID and process name
+ * belonging to a memory context which has been destroyed
+ */
+static void _RecordUnregisteredMemoryContext(PVRSRV_RGXDEV_INFO *psDevInfo, SERVER_MMU_CONTEXT *psServerMMUContext)
+{
+	UNREGISTERED_MEMORY_CONTEXT *psRecord;
+
+	OSLockAcquire(psDevInfo->hMMUCtxUnregLock);
+
+	psRecord = &gasUnregisteredMemCtxs[gui32UnregisteredMemCtxsHead];
+
+	gui32UnregisteredMemCtxsHead = (gui32UnregisteredMemCtxsHead + 1)
+					& (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1);
+
+	OSLockRelease(psDevInfo->hMMUCtxUnregLock);
+
+	psRecord->uiPID = psServerMMUContext->uiPID;
+	if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &psRecord->sPCDevPAddr) != PVRSRV_OK)
+	{
+		PVR_LOG(("_RecordUnregisteredMemoryContext: Failed to get PC address for memory context"));
+	}
+	OSStringNCopy(psRecord->szProcessName, psServerMMUContext->szProcessName, sizeof(psRecord->szProcessName));
+	psRecord->szProcessName[sizeof(psRecord->szProcessName) - 1] = '\0';
+}
+
+#endif
+
+void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData)
+{
+	SERVER_MMU_CONTEXT *psServerMMUContext = hPrivData;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psServerMMUContext->psDevInfo;
+
+	OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock);
+	dllist_remove_node(&psServerMMUContext->sNode);
+	OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock);
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	_RecordUnregisteredMemoryContext(psDevInfo, psServerMMUContext);
+#endif
+
+	/*
+	 * Release the page catalogue address acquired in RGXRegisterMemoryContext().
+	 */
+	MMU_ReleaseBaseAddr(NULL /* FIXME */);
+	
+	/*
+	 * Free the firmware memory context.
+	 */
+	DevmemFwFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc);
+
+	OSFreeMem(psServerMMUContext);
+}
+
+
+/*
+ * RGXRegisterMemoryContext
+ */ 
+PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE	*psDeviceNode,
+									  MMU_CONTEXT			*psMMUContext,
+									  IMG_HANDLE			*hPrivData)
+{
+	PVRSRV_ERROR			eError;
+	PVRSRV_RGXDEV_INFO 		*psDevInfo = psDeviceNode->pvDevice;
+	DEVMEM_FLAGS_T			uiFWMemContextMemAllocFlags;
+	RGXFWIF_FWMEMCONTEXT	*psFWMemContext;
+	DEVMEM_MEMDESC			*psFWMemContextMemDesc;
+	SERVER_MMU_CONTEXT *psServerMMUContext;
+
+	if (psDevInfo->psKernelMMUCtx == NULL)
+	{
+		/*
+		 * This must be the creation of the Kernel memory context. Take a copy
+		 * of the MMU context for use when programming the BIF.
+		 */ 
+		psDevInfo->psKernelMMUCtx = psMMUContext;
+	}
+	else
+	{
+		psServerMMUContext = OSAllocMem(sizeof(*psServerMMUContext));
+		if (psServerMMUContext == NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto fail_alloc_server_ctx;
+		}
+
+		psServerMMUContext->psDevInfo = psDevInfo;
+
+		/*
+		 * This FW MemContext is only mapped into kernel for initialisation purposes.
+		 * Otherwise this allocation is only used by the FW.
+		 * Therefore the GPU cache doesn't need coherency,
+		 * and write-combine is suffice on the CPU side (WC buffer will be flushed at any kick)
+		 */
+		uiFWMemContextMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+										PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+										PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+										PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+										PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+										PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+										PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+										PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+										PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+		/*
+			Allocate device memory for the firmware memory context for the new
+			application.
+		*/
+		PDUMPCOMMENT("Allocate RGX firmware memory context");
+		/* FIXME: why cache-consistent? */
+		eError = DevmemFwAllocate(psDevInfo,
+								sizeof(*psFWMemContext),
+								uiFWMemContextMemAllocFlags,
+								"FwMemoryContext",
+								&psFWMemContextMemDesc);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to allocate firmware memory context (%u)",
+					eError));
+			goto fail_alloc_fw_ctx;
+		}
+		
+		/*
+			Temporarily map the firmware memory context to the kernel.
+		*/
+		eError = DevmemAcquireCpuVirtAddr(psFWMemContextMemDesc,
+										  (void **)&psFWMemContext);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to map firmware memory context (%u)",
+					eError));
+			goto fail_acquire_cpu_addr;
+		}
+		
+		/*
+		 * Write the new memory context's page catalogue into the firmware memory
+		 * context for the client.
+		 */
+		eError = MMU_AcquireBaseAddr(psMMUContext, &psFWMemContext->sPCDevPAddr);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to acquire Page Catalogue address (%u)",
+					eError));
+			DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+			goto fail_acquire_base_addr;
+		}
+
+		/*
+		 * Set default values for the rest of the structure.
+		 */
+		psFWMemContext->uiPageCatBaseRegID = -1;
+		psFWMemContext->uiBreakpointAddr = 0;
+		psFWMemContext->uiBPHandlerAddr = 0;
+		psFWMemContext->uiBreakpointCtl = 0;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+		IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0;
+        IMG_BOOL   bOSidAxiProt;
+
+        MMU_GetOSids(psMMUContext, &ui32OSid, &ui32OSidReg, &bOSidAxiProt);
+
+        psFWMemContext->ui32OSid     = ui32OSidReg;
+        psFWMemContext->bOSidAxiProt = bOSidAxiProt;
+}
+#endif
+
+#if defined(PDUMP)
+		{
+			IMG_CHAR			aszName[PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH];
+			IMG_DEVMEM_OFFSET_T uiOffset = 0;
+
+			/*
+			 * Dump the Mem context allocation
+			 */
+			DevmemPDumpLoadMem(psFWMemContextMemDesc, 0, sizeof(*psFWMemContext), PDUMP_FLAGS_CONTINUOUS);
+			
+
+			/*
+			 * Obtain a symbolic addr of the mem context structure
+			 */
+			eError = DevmemPDumpPageCatBaseToSAddr(psFWMemContextMemDesc, 
+												   &uiOffset, 
+												   aszName, 
+												   PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH);
+
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to generate a Dump Page Catalogue address (%u)",
+						eError));
+				DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+				goto fail_pdump_cat_base_addr;
+			}
+
+			/*
+			 * Dump the Page Cat tag in the mem context (symbolic address)
+			 */
+			eError = MMU_PDumpWritePageCatBase(psMMUContext,
+												aszName,
+												uiOffset,
+												8, /* 64-bit register write */
+												0,
+												0,
+												0);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to acquire Page Catalogue address (%u)",
+						eError));
+				DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+				goto fail_pdump_cat_base;
+			}
+		}
+#endif
+
+		/*
+		 * Release kernel address acquired above.
+		 */
+		DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+
+		/*
+		 * Store the process information for this device memory context
+		 * for use with the host page-fault analysis.
+		 */
+		psServerMMUContext->uiPID = OSGetCurrentClientProcessIDKM();
+		psServerMMUContext->psMMUContext = psMMUContext;
+		psServerMMUContext->psFWMemContextMemDesc = psFWMemContextMemDesc;
+		if (OSSNPrintf(psServerMMUContext->szProcessName,
+						RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME,
+						"%s",
+						OSGetCurrentClientProcessNameKM()) == RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME)
+		{
+			psServerMMUContext->szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME-1] = '\0';
+		}
+
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "New memory context: Process Name: %s PID: %u (0x%08X)",
+										psServerMMUContext->szProcessName,
+										psServerMMUContext->uiPID,
+										psServerMMUContext->uiPID);
+
+		OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock);
+		dllist_add_to_tail(&psDevInfo->sMemoryContextList, &psServerMMUContext->sNode);
+		OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock);
+
+		MMU_SetDeviceData(psMMUContext, psFWMemContextMemDesc);
+		*hPrivData = psServerMMUContext;
+	}
+			
+	return PVRSRV_OK;
+
+#if defined(PDUMP)
+fail_pdump_cat_base:
+fail_pdump_cat_base_addr:
+	MMU_ReleaseBaseAddr(NULL);
+#endif
+fail_acquire_base_addr:
+	/* Done before jumping to the fail point as the release is done before exit */
+fail_acquire_cpu_addr:
+	DevmemFwFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc);
+fail_alloc_fw_ctx:
+	OSFreeMem(psServerMMUContext);
+fail_alloc_server_ctx:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv)
+{
+	SERVER_MMU_CONTEXT *psMMUContext = (SERVER_MMU_CONTEXT *) hPriv;
+
+	return psMMUContext->psFWMemContextMemDesc;
+}
+
+void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo,
+				IMG_DEV_VIRTADDR *psDevVAddr,
+				IMG_DEV_PHYADDR *psDevPAddr,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile)
+{
+	IMG_DEV_PHYADDR sPCDevPAddr;
+	DLLIST_NODE *psNode, *psNext;
+
+	OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock);
+
+	dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext)
+	{
+		SERVER_MMU_CONTEXT *psServerMMUContext =
+			IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
+
+		if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK)
+		{
+			PVR_LOG(("Failed to get PC address for memory context"));
+			continue;
+		}
+
+		if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr)
+		{
+			PVR_DUMPDEBUG_LOG("Found memory context (PID = %d, %s)",
+							   psServerMMUContext->uiPID,
+							   psServerMMUContext->szProcessName);
+
+			MMU_CheckFaultAddress(psServerMMUContext->psMMUContext, psDevVAddr,
+						pfnDumpDebugPrintf, pvDumpDebugFile);
+			goto out_unlock;
+		}
+	}
+
+	/* Lastly check for fault in the kernel allocated memory */
+	if (MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sPCDevPAddr) != PVRSRV_OK)
+	{
+		PVR_LOG(("Failed to get PC address for kernel memory context"));
+	}
+
+	if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr)
+	{
+		MMU_CheckFaultAddress(psDevInfo->psKernelMMUCtx, psDevVAddr,
+					pfnDumpDebugPrintf, pvDumpDebugFile);
+	}
+
+out_unlock:
+	OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock);
+}
+
+/* given the physical address of a page catalogue, searches for a corresponding
+ * MMU context and if found, provides the caller details of the process.
+ * Returns IMG_TRUE if a process is found.
+ */
+IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress,
+								RGXMEM_PROCESS_INFO *psInfo)
+{
+	IMG_BOOL bRet = IMG_FALSE;
+	DLLIST_NODE *psNode, *psNext;
+	SERVER_MMU_CONTEXT *psServerMMUContext = NULL;
+
+	/* check if the input PC addr corresponds to an active memory context */
+	dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext)
+	{
+		SERVER_MMU_CONTEXT *psThisMMUContext =
+			IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
+		IMG_DEV_PHYADDR sPCDevPAddr;
+
+		if (MMU_AcquireBaseAddr(psThisMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK)
+		{
+			PVR_LOG(("Failed to get PC address for memory context"));
+			continue;
+		}
+
+		if (sPCAddress.uiAddr == sPCDevPAddr.uiAddr)
+		{
+			psServerMMUContext = psThisMMUContext;
+			break;
+		}
+	}
+
+	if(psServerMMUContext != NULL)
+	{
+		psInfo->uiPID = psServerMMUContext->uiPID;
+		OSStringNCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName));
+		psInfo->szProcessName[sizeof(psInfo->szProcessName) - 1] = '\0';
+		psInfo->bUnregistered = IMG_FALSE;
+		bRet = IMG_TRUE;
+	}
+	/* else check if the input PC addr corresponds to the firmware */
+	else
+	{
+		IMG_DEV_PHYADDR sKernelPCDevPAddr;
+		PVRSRV_ERROR eError;
+
+		eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sKernelPCDevPAddr);
+
+		if(eError != PVRSRV_OK)
+		{
+			PVR_LOG(("Failed to get PC address for kernel memory context"));
+		}
+		else
+		{
+			if(sPCAddress.uiAddr == sKernelPCDevPAddr.uiAddr)
+			{
+				psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE;
+				OSStringNCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName));
+				psInfo->szProcessName[sizeof(psInfo->szProcessName) - 1] = '\0';
+				psInfo->bUnregistered = IMG_FALSE;
+				bRet = IMG_TRUE;
+			}
+		}
+	}
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	if(bRet == IMG_FALSE)
+	{
+		/* no active memory context found with the given PC address.
+		 * Check the list of most recently freed memory contexts.
+		 */
+		 IMG_UINT32 i;
+
+		 OSLockAcquire(psDevInfo->hMMUCtxUnregLock);
+
+		/* iterate through the list of unregistered memory contexts
+		 * from newest (one before the head) to the oldest (the current head)
+		 */
+		i = gui32UnregisteredMemCtxsHead;
+
+		do
+		{
+			UNREGISTERED_MEMORY_CONTEXT *psRecord;
+
+			i ? i-- : (i = (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1));
+
+			psRecord = &gasUnregisteredMemCtxs[i];
+
+			if(psRecord->sPCDevPAddr.uiAddr == sPCAddress.uiAddr)
+			{
+				psInfo->uiPID = psRecord->uiPID;
+				OSStringNCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)-1);
+				psInfo->szProcessName[sizeof(psInfo->szProcessName) - 1] = '\0';
+				psInfo->bUnregistered = IMG_TRUE;
+				bRet = IMG_TRUE;
+				break;
+			}
+		} while(i != gui32UnregisteredMemCtxsHead);
+
+		OSLockRelease(psDevInfo->hMMUCtxUnregLock);
+
+	}
+#endif
+	return bRet;
+}
+
+IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID,
+								RGXMEM_PROCESS_INFO *psInfo)
+{
+	IMG_BOOL bRet = IMG_FALSE;
+	DLLIST_NODE *psNode, *psNext;
+	SERVER_MMU_CONTEXT *psServerMMUContext = NULL;
+
+	/* check if the input PID corresponds to an active memory context */
+	dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext)
+	{
+		SERVER_MMU_CONTEXT *psThisMMUContext =
+			IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
+
+		if (psThisMMUContext->uiPID == uiPID)
+		{
+			psServerMMUContext = psThisMMUContext;
+			break;
+		}
+	}
+
+	if(psServerMMUContext != NULL)
+	{
+		psInfo->uiPID = psServerMMUContext->uiPID;
+		OSStringNCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName));
+		psInfo->szProcessName[sizeof(psInfo->szProcessName) - 1] = '\0';
+		psInfo->bUnregistered = IMG_FALSE;
+		bRet = IMG_TRUE;
+	}
+	/* else check if the input PID corresponds to the firmware */
+	else if(uiPID == RGXMEM_SERVER_PID_FIRMWARE)
+	{
+		psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE;
+		OSStringNCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName));
+		psInfo->szProcessName[sizeof(psInfo->szProcessName) - 1] = '\0';
+		psInfo->bUnregistered = IMG_FALSE;
+		bRet = IMG_TRUE;
+	}
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	/* if the PID didn't correspond to an active context or the
+	 * FW address then see if it matches a recently unregistered context
+	 */
+	if(bRet == IMG_FALSE)
+	{
+		 IMG_UINT32 i;
+
+		 OSLockAcquire(psDevInfo->hMMUCtxUnregLock);
+
+		 for(i = (gui32UnregisteredMemCtxsHead > 0) ? (gui32UnregisteredMemCtxsHead - 1) :
+		 					UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE;
+							i != gui32UnregisteredMemCtxsHead; i--)
+		{
+			UNREGISTERED_MEMORY_CONTEXT *psRecord = &gasUnregisteredMemCtxs[i];
+
+			if(psRecord->uiPID == uiPID)
+			{
+				psInfo->uiPID = psRecord->uiPID;
+				OSStringNCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)-1);
+				psInfo->szProcessName[sizeof(psInfo->szProcessName) - 1] = '\0';
+				psInfo->bUnregistered = IMG_TRUE;
+				bRet = IMG_TRUE;
+				break;
+			}
+		}
+
+		OSLockRelease(psDevInfo->hMMUCtxUnregLock);
+
+	}
+#endif
+	return bRet;
+}
+
+/******************************************************************************
+ End of file (rgxmem.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxmem.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxmem.h
new file mode 100644
index 0000000..ee89633
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxmem.h
@@ -0,0 +1,103 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX memory context management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for RGX memory context management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXMEM_H__)
+#define __RGXMEM_H__
+
+#include "pvrsrv_error.h"
+#include "device.h"
+#include "mmu_common.h"
+#include "rgxdevice.h"
+
+#define RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME 40
+
+/* this PID denotes the firmware */
+#define RGXMEM_SERVER_PID_FIRMWARE 0xFFFFFFFF
+
+typedef struct _RGXMEM_PROCESS_INFO_
+{
+	IMG_PID uiPID;
+	IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME];
+	IMG_BOOL bUnregistered;
+} RGXMEM_PROCESS_INFO;
+
+IMG_DEV_PHYADDR GetPC(MMU_CONTEXT * psContext);
+	
+/* FIXME: SyncPrim should be stored on the memory context */
+void RGXMMUSyncPrimAlloc(PVRSRV_DEVICE_NODE *psDeviceNode);
+void RGXMMUSyncPrimFree(void);
+
+void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode,
+						   IMG_HANDLE hDeviceData,
+						   MMU_LEVEL eMMULevel,
+						   IMG_BOOL bUnmap);
+
+PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDevInfo,
+                                       IMG_UINT16 *pui16NextMMUInvalidateUpdate,
+                                       IMG_BOOL bInterrupt);
+
+PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                    RGXFWIF_DM eDM,
+                                    IMG_UINT16 *pui16MMUInvalidateUpdate,
+                                    IMG_BOOL bInterrupt);
+
+void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData);
+PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE	*psDeviceNode,
+									  MMU_CONTEXT			*psMMUContext,
+									  IMG_HANDLE			*hPrivData);
+
+DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv);
+
+void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo,
+				IMG_DEV_VIRTADDR *psDevVAddr,
+				IMG_DEV_PHYADDR *psDevPAddr,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile);
+
+IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress,
+								RGXMEM_PROCESS_INFO *psInfo);
+
+IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID,
+                                                                RGXMEM_PROCESS_INFO *psInfo);
+
+#endif /* __RGXMEM_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxmipsmmuinit.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxmipsmmuinit.c
new file mode 100644
index 0000000..4e547d9
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxmipsmmuinit.c
@@ -0,0 +1,931 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific MMU initialisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "rgxmipsmmuinit.h"
+
+#include "device.h"
+#include "img_types.h"
+#include "mmu_common.h"
+#include "pdump_mmu.h"
+#include "rgxheapconfig.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "rgx_memallocflags.h"
+#include "pdump_km.h"
+#include "rgxdevice.h"
+
+/*
+ * Bits of PT, PD and PC not involving addresses
+ */
+
+/* Currently there is no page directory for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PDE_PROTMASK        0
+/* Currently there is no page catalog for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PCE_PROTMASK	     0
+
+
+static MMU_PxE_CONFIG sRGXMMUPCEConfig;
+static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig;
+
+
+/*
+ *
+ *  Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig4KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig16KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 64kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig64KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 256kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig256KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 1MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig1MB;
+
+
+/*
+ *
+ *  Configuration for heaps with 2MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig2MB;
+
+
+/* Forward declaration of protection bits derivation functions, for
+   the following structure */
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags);
+
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+										   const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+										   const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+										   const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+										   IMG_HANDLE *phPriv);
+
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv);
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize);
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize);
+
+static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes;
+
+/* Cached policy */
+static IMG_UINT32 gui32CachedPolicy;
+
+PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	IMG_BOOL bPhysBusAbove32Bit = psDevInfo->sDevFeatureCfg.ui32PBW > 32;
+
+	sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName =
+		PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]);
+
+	/*
+	 * Setup sRGXMMUPCEConfig, no PC in MIPS MMU currently
+	 */
+	sRGXMMUPCEConfig.uiBytesPerEntry = 0; /* 32 bit entries */
+	sRGXMMUPCEConfig.uiAddrMask = 0; /* Mask to get significant address bits of PC entry */
+
+	sRGXMMUPCEConfig.uiAddrShift = 0; /* Shift this many bits to get PD address in PC entry */
+	sRGXMMUPCEConfig.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE; /* Alignment of PD AND PC */
+
+	sRGXMMUPCEConfig.uiProtMask = RGX_MIPS_MMUCTRL_PCE_PROTMASK; //Mask to get the status bits of the PC */
+	sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to have status bits starting with bit 0 */
+
+	sRGXMMUPCEConfig.uiValidEnMask = RGX_MIPS_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */
+	sRGXMMUPCEConfig.uiValidEnShift = RGX_MIPS_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to have entry valid bit starting with bit 0 */
+
+	/*
+	 *  Setup sRGXMMUTopLevelDevVAddrConfig
+	 */
+	sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = 0; /* Get the PC address bits from a 40 bit virt. address (in a 64bit UINT) */
+	sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = 0;
+	sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = 0;
+
+	sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = 0; /* Get the PD address bits from a 40 bit virt. address (in a 64bit UINT) */
+	sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = 0;
+	sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = 0;
+
+	sRGXMMUTopLevelDevVAddrConfig.uiPTIndexMask = IMG_UINT64_C(0xfffffff000); /* Get the PT address bits from a 40 bit virt. address (in a 64bit UINT) */
+	sRGXMMUTopLevelDevVAddrConfig.uiPTIndexShift = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE;
+	sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPT = RGX_FIRMWARE_HEAP_SIZE >> sRGXMMUTopLevelDevVAddrConfig.uiPTIndexShift;
+
+/*
+ *
+ *  Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_4KBDP. No PD in MIPS MMU currently
+	 */
+	sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 0;
+
+	/* No PD used for MIPS */
+	sRGXMMUPDEConfig_4KBDP.uiAddrMask = 0;
+	sRGXMMUPDEConfig_4KBDP.uiAddrShift = 0;
+	sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE;
+
+	sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x0);
+	sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 0;
+
+	sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MIPS_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_4KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MIPS_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MIPS_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_4KBDP.
+	 */
+	sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE;
+
+
+	if (bPhysBusAbove32Bit)
+	{
+		sRGXMMUPTEConfig_4KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT;
+		gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT;
+	}
+	else
+	{
+		sRGXMMUPTEConfig_4KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK;
+		gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY;
+	}
+
+	sRGXMMUPTEConfig_4KBDP.uiAddrShift = RGXMIPSFW_ENTRYLO_PFN_SHIFT;
+	sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE;
+
+	sRGXMMUPTEConfig_4KBDP.uiProtMask = RGXMIPSFW_ENTRYLO_DVG | ~RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK;
+	sRGXMMUPTEConfig_4KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGXMIPSFW_ENTRYLO_VALID_EN;
+	sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGXMIPSFW_ENTRYLO_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_4KBDP
+	 */
+	sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = 0;
+	sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = 0;
+	sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = 0;
+
+
+	sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = 0;
+	sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = 0;
+	sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = 0;
+
+	sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = RGX_FIRMWARE_HEAP_SIZE >> sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift;
+
+
+	sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff);
+	sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = RGX_FIRMWARE_HEAP_BASE & IMG_UINT64_C(0x00ffffffff);
+
+	/*
+	 * Setup gsPageSizeConfig4KB
+	 */
+	gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP;
+	gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP;
+	gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP;
+	gsPageSizeConfig4KB.uiRefCount = 0;
+	gsPageSizeConfig4KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ *  Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_16KBDP
+	 */
+	sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 0;
+
+	sRGXMMUPDEConfig_16KBDP.uiAddrMask = 0;
+	sRGXMMUPDEConfig_16KBDP.uiAddrShift = 0; /* These are for a page directory ENTRY, meaning the address of a PT cropped to suit the PD */
+	sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 0; /* Alignment of the page tables NOT directories */
+
+	sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = 0;
+	sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 0;
+
+	sRGXMMUPDEConfig_16KBDP.uiProtMask = 0;
+	sRGXMMUPDEConfig_16KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_16KBDP.uiValidEnMask = 0;
+	sRGXMMUPDEConfig_16KBDP.uiValidEnShift = 0;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_16KBDP. Not supported yet
+	 */
+	sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 0;
+
+	sRGXMMUPTEConfig_16KBDP.uiAddrMask = 0;
+	sRGXMMUPTEConfig_16KBDP.uiAddrShift = 0; /* These are for a page table ENTRY, meaning the address of a PAGE cropped to suit the PD */
+	sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 0; /* Alignment of the pages NOT tables */
+
+	sRGXMMUPTEConfig_16KBDP.uiProtMask = 0;
+	sRGXMMUPTEConfig_16KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_16KBDP.uiValidEnMask = 0;
+	sRGXMMUPTEConfig_16KBDP.uiValidEnShift = 0;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_16KBDP
+	 */
+	sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = 0;
+	sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = 0;
+	sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = 0;
+
+	sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = 0;
+	sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = 0;
+	sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD= 0;
+
+	sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = 0;
+	sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 0;
+	sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = 0;
+
+	sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = 0;
+	sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig16KB
+	 */
+	gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP;
+	gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP;
+	gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP;
+	gsPageSizeConfig16KB.uiRefCount = 0;
+	gsPageSizeConfig16KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ *  Configuration for heaps with 64kB Data-Page size. Not supported yet
+ *
+ */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_64KBDP
+	 */
+	sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 0;
+
+	sRGXMMUPDEConfig_64KBDP.uiAddrMask = 0;
+	sRGXMMUPDEConfig_64KBDP.uiAddrShift = 0;
+	sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 0;
+
+	sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = 0;
+	sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 0;
+
+	sRGXMMUPDEConfig_64KBDP.uiProtMask = 0;
+	sRGXMMUPDEConfig_64KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_64KBDP.uiValidEnMask = 0;
+	sRGXMMUPDEConfig_64KBDP.uiValidEnShift = 0;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_64KBDP.
+	 *
+	 */
+	sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 0;
+
+	sRGXMMUPTEConfig_64KBDP.uiAddrMask = 0;
+	sRGXMMUPTEConfig_64KBDP.uiAddrShift = 0;
+	sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = 0;
+
+	sRGXMMUPTEConfig_64KBDP.uiProtMask = 0;
+	sRGXMMUPTEConfig_64KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_64KBDP.uiValidEnMask = 0;
+	sRGXMMUPTEConfig_64KBDP.uiValidEnShift = 0;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_64KBDP.
+	 */
+	sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = 0;
+	sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = 0;
+	sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = 0;
+
+	sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = 0;
+	sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = 0;
+	sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = 0;
+
+	sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = 0;
+	sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = 0;
+	sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = 0;
+
+	sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = 0;
+	sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig64KB.
+	 */
+	gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP;
+	gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP;
+	gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP;
+	gsPageSizeConfig64KB.uiRefCount = 0;
+	gsPageSizeConfig64KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ *  Configuration for heaps with 256kB Data-Page size. Not supported yet
+ *
+ */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_256KBDP
+	 */
+	sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 0;
+
+	sRGXMMUPDEConfig_256KBDP.uiAddrMask = 0;
+	sRGXMMUPDEConfig_256KBDP.uiAddrShift = 0;
+	sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 0;
+
+	sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = 0;
+	sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 0;
+
+	sRGXMMUPDEConfig_256KBDP.uiProtMask = 0;
+	sRGXMMUPDEConfig_256KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_256KBDP.uiValidEnMask = 0;
+	sRGXMMUPDEConfig_256KBDP.uiValidEnShift = 0;
+
+	/*
+	 * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP
+	 */
+	sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 0;
+
+	sRGXMMUPTEConfig_256KBDP.uiAddrMask = 0;
+	sRGXMMUPTEConfig_256KBDP.uiAddrShift = 0;
+	sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 0;
+
+	sRGXMMUPTEConfig_256KBDP.uiProtMask = 0;
+	sRGXMMUPTEConfig_256KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_256KBDP.uiValidEnMask = 0;
+	sRGXMMUPTEConfig_256KBDP.uiValidEnShift = 0;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_256KBDP
+	 */
+	sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = 0;
+	sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = 0;
+	sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = 0;
+
+	sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = 0;
+	sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = 0;
+	sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = 0;
+
+	sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = 0;
+	sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 0;
+	sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = 0;
+
+	sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = 0;
+	sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig256KB
+	 */
+	gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP;
+	gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP;
+	gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP;
+	gsPageSizeConfig256KB.uiRefCount = 0;
+	gsPageSizeConfig256KB.uiMaxRefCount = 0;
+
+	/*
+	 * Setup sRGXMMUPDEConfig_1MBDP.  Not supported yet
+	 */
+	sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 0;
+
+	sRGXMMUPDEConfig_1MBDP.uiAddrMask = 0;
+	sRGXMMUPDEConfig_1MBDP.uiAddrShift = 0;
+	sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 0;
+
+	sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = 0;
+	sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 0;
+
+	sRGXMMUPDEConfig_1MBDP.uiProtMask = 0;
+	sRGXMMUPDEConfig_1MBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_1MBDP.uiValidEnMask = 0;
+	sRGXMMUPDEConfig_1MBDP.uiValidEnShift = 0;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_1MBDP
+	 */
+	sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_1MBDP.uiAddrMask = 0;
+	sRGXMMUPTEConfig_1MBDP.uiAddrShift = 0;
+	sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 0;
+
+	sRGXMMUPTEConfig_1MBDP.uiProtMask = 0;
+	sRGXMMUPTEConfig_1MBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_1MBDP.uiValidEnMask = 0;
+	sRGXMMUPTEConfig_1MBDP.uiValidEnShift = 0;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_1MBDP
+	 */
+	sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = 0;
+	sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = 0;
+	sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = 0;
+
+	sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = 0;
+	sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = 0;
+	sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = 0;
+
+	sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = 0;
+	sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 0;
+	sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = 0;
+
+	sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = 0;
+	sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig1MB
+	 */
+	gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP;
+	gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP;
+	gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP;
+	gsPageSizeConfig1MB.uiRefCount = 0;
+	gsPageSizeConfig1MB.uiMaxRefCount = 0;
+
+	/*
+	 * Setup sRGXMMUPDEConfig_2MBDP. Not supported yet
+	 */
+	sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 0;
+
+	sRGXMMUPDEConfig_2MBDP.uiAddrMask = 0;
+	sRGXMMUPDEConfig_2MBDP.uiAddrShift = 0;
+	sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 0;
+
+	sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = 0;
+	sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 0;
+
+	sRGXMMUPDEConfig_2MBDP.uiProtMask = 0;
+	sRGXMMUPDEConfig_2MBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_2MBDP.uiValidEnMask = 0;
+	sRGXMMUPDEConfig_2MBDP.uiValidEnShift = 0;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_2MBDP
+	 */
+	sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 0;
+
+	sRGXMMUPTEConfig_2MBDP.uiAddrMask = 0;
+	sRGXMMUPTEConfig_2MBDP.uiAddrShift = 0;
+	sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 0;
+
+	sRGXMMUPTEConfig_2MBDP.uiProtMask = 0;
+	sRGXMMUPTEConfig_2MBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_2MBDP.uiValidEnMask = 0;
+	sRGXMMUPTEConfig_2MBDP.uiValidEnShift = 0;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_2MBDP
+	 */
+	sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = 0;
+	sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = 0;
+	sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = 0;
+
+	sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = 0;
+	sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = 0;
+	sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = 0;
+
+	sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = 0;
+	sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 0;
+	sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = 0;
+
+	sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = 0;
+	sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig2MB
+	 */
+	gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP;
+	gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP;
+	gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP;
+	gsPageSizeConfig2MB.uiRefCount = 0;
+	gsPageSizeConfig2MB.uiMaxRefCount = 0;
+
+	/*
+	 * Setup sRGXMMUDeviceAttributes
+	 */
+	sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_MIPS_MICROAPTIV;
+	sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_1;
+	/* The page table fits in one big physical page as big as the page table itself */
+	sRGXMMUDeviceAttributes.ui32BaseAlign = RGXMIPSFW_LOG2_PAGETABLE_PAGE_SIZE;
+	/* The base configuration is set to 4kB pages*/
+	sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPTEConfig_4KBDP;
+	sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig;
+
+	/* Functions for deriving page table/dir/cat protection bits */
+	sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8;
+	sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4;
+	sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8;
+	sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4;
+	sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8;
+	sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4;
+
+	/* Functions for establishing configurations for PDE/PTE/DEVVADDR
+	   on per-heap basis */
+	sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB;
+	sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB;
+
+	sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4;
+	sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8;
+
+	psDeviceNode->psFirmwareMMUDevAttrs = &sRGXMMUDeviceAttributes;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXMipsMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError;
+
+	eError = PVRSRV_OK;
+
+#if defined(PDUMP)
+	psDeviceNode->pfnMMUGetContextID = NULL;
+#endif
+
+	psDeviceNode->psFirmwareMMUDevAttrs = NULL;
+
+#if defined(DEBUG)
+	PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:"));
+	PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d",
+			 gsPageSizeConfig4KB.uiMaxRefCount));
+	PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d",
+			 gsPageSizeConfig4KB.uiRefCount));
+	PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d",
+			 gsPageSizeConfig16KB.uiMaxRefCount));
+	PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d",
+			 gsPageSizeConfig16KB.uiRefCount));
+	PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d",
+			 gsPageSizeConfig64KB.uiMaxRefCount));
+	PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d",
+			 gsPageSizeConfig64KB.uiRefCount));
+	PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d",
+			 gsPageSizeConfig256KB.uiMaxRefCount));
+	PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d",
+			 gsPageSizeConfig256KB.uiRefCount));
+	PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d",
+			 gsPageSizeConfig1MB.uiMaxRefCount));
+	PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d",
+			 gsPageSizeConfig1MB.uiRefCount));
+	PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d",
+			 gsPageSizeConfig2MB.uiMaxRefCount));
+	PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d",
+			 gsPageSizeConfig2MB.uiRefCount));
+#endif
+	if (gsPageSizeConfig4KB.uiRefCount > 0 ||
+		gsPageSizeConfig16KB.uiRefCount > 0 ||
+		gsPageSizeConfig64KB.uiRefCount > 0 ||
+		gsPageSizeConfig256KB.uiRefCount > 0 ||
+		gsPageSizeConfig1MB.uiRefCount > 0 ||
+		gsPageSizeConfig2MB.uiRefCount > 0
+		)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)"));
+	}
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePCEProt4
+@Description    calculate the PCE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags)
+{
+	PVR_DPF((PVR_DBG_ERROR, "Page Catalog not supported on MIPS MMU"));
+	return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePCEProt8
+@Description    calculate the PCE protection flags based on an 8 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+	PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+	PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+	PVR_DPF((PVR_DBG_ERROR, "Page Catalog not supported on MIPS MMU"));
+	return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePDEProt4
+@Description    derive the PDE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+	PVR_DPF((PVR_DBG_ERROR, "Page Directory not supported on MIPS MMU"));
+	return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePDEProt8
+@Description    derive the PDE protection flags based on an 8 byte entry
+
+@Input          uiLog2DataPageSize The log2 of the required page size.
+				E.g, for 4KiB pages, this parameter must be 12.
+				For 2MiB pages, it must be set to 21.
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+	PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+	PVR_DPF((PVR_DBG_ERROR, "Page Directory not supported on MIPS MMU"));
+	return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePTEProt4
+@Description    calculate the PTE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags)
+{
+	IMG_UINT32 ui32MMUFlags = 0;
+
+	if(((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE))
+	{
+		/* read/write */
+		ui32MMUFlags |= RGXMIPSFW_ENTRYLO_DIRTY_EN;
+	}
+	else if(MMU_PROTFLAGS_READABLE & uiProtFlags)
+	{
+		/* read only */
+	}
+	else if(MMU_PROTFLAGS_WRITEABLE & uiProtFlags)
+	{
+		/* write only */
+		ui32MMUFlags |= RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN;
+	}
+	else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt4: neither read nor write specified..."));
+	}
+
+	/* cache coherency */
+	if(MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt4: cache coherency not supported for MIPS caches"));
+	}
+
+	/* cache setup */
+	if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0)
+	{
+		ui32MMUFlags |= RGXMIPSFW_ENTRYLO_UNCACHED;
+	}
+	else
+	{
+		ui32MMUFlags |= gui32CachedPolicy <<
+		                RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT;
+	}
+
+	if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0)
+	{
+		ui32MMUFlags |= RGXMIPSFW_ENTRYLO_VALID_EN;
+		ui32MMUFlags |= RGXMIPSFW_ENTRYLO_GLOBAL_EN;
+	}
+
+	if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags)
+	{
+		/* PVR_DPF((PVR_DBG_WARNING, "RGXDerivePTEProt4: PMMETA Protect not existent for MIPS, option discarded")); */
+	}
+
+	return ui32MMUFlags;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePTEProt8
+@Description    calculate the PTE protection flags based on an 8 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+	PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+	PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+	PVR_DPF((PVR_DBG_ERROR, "8-byte PTE not supported on this device"));
+
+	return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXGetPageSizeConfig
+@Description    Set up configuration for variable sized data pages.
+				RGXPutPageSizeConfigCB has to be called to ensure correct
+				refcounting.
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+										   const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+										   const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+										   const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+										   IMG_HANDLE *phPriv)
+{
+	MMU_PAGESIZECONFIG *psPageSizeConfig;
+
+	switch (uiLog2DataPageSize)
+	{
+	case RGXMIPSFW_LOG2_PAGE_SIZE:
+		psPageSizeConfig = &gsPageSizeConfig4KB;
+		break;
+	default:
+		PVR_DPF((PVR_DBG_ERROR,
+				 "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+				 uiLog2DataPageSize));
+		return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+	}
+
+	/* Refer caller's pointers to the data */
+	*ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig;
+	*ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig;
+	*ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig;
+
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+	/* Increment ref-count - not that we're allocating anything here
+	   (I'm using static structs), but one day we might, so we want
+	   the Get/Put code to be balanced properly */
+	psPageSizeConfig->uiRefCount ++;
+
+	/* This is purely for debug statistics */
+	psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount,
+										  psPageSizeConfig->uiRefCount);
+#endif
+
+	*phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize;
+	PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv);
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXPutPageSizeConfig
+@Description    Tells this code that the mmu module is done with the
+				configurations set in RGXGetPageSizeConfig.  This can
+				be a no-op.
+				Called after RGXGetPageSizeConfigCB.
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv)
+{
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+	MMU_PAGESIZECONFIG *psPageSizeConfig;
+	IMG_UINT32 uiLog2DataPageSize;
+
+	uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv;
+
+	switch (uiLog2DataPageSize)
+	{
+	case RGXMIPSFW_LOG2_PAGE_SIZE:
+		psPageSizeConfig = &gsPageSizeConfig4KB;
+		break;
+	default:
+		PVR_DPF((PVR_DBG_ERROR,
+				 "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+				 uiLog2DataPageSize));
+		return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+	}
+
+	/* Ref-count here is not especially useful, but it's an extra
+	   check that the API is being used correctly */
+	psPageSizeConfig->uiRefCount --;
+#else
+	PVR_UNREFERENCED_PARAMETER(hPriv);
+#endif
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32PDE);
+	PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize);
+	PVR_DPF((PVR_DBG_ERROR, "PDE not supported on MIPS"));
+	return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+	PVR_UNREFERENCED_PARAMETER(ui64PDE);
+	PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize);
+	PVR_DPF((PVR_DBG_ERROR, "PDE not supported on MIPS"));
+	return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxmipsmmuinit.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxmipsmmuinit.h
new file mode 100644
index 0000000..e0f630f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxmipsmmuinit.h
@@ -0,0 +1,94 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific MMU initialisation for the MIPS firmware
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* NB: this file is not to be included arbitrarily.  It exists solely
+   for the linkage between rgxinit.c and rgxmmuinit.c, the former
+   being otherwise cluttered by the contents of the latter */
+
+#ifndef _SRVKM_RGXMIPSMMUINIT_H_
+#define _SRVKM_RGXMIPSMMUINIT_H_
+
+#include "device.h"
+#include "img_types.h"
+#include "mmu_common.h"
+#include "img_defs.h"
+#include "rgx_mips.h"
+
+/*
+
+		Labelling of fields within virtual address. No PD and PC are used currently for
+		the MIPS MMU
+*/
+/*
+Page Table entry #
+*/
+#define RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT        (12U)
+#define RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK       (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+
+
+/* PC entries related definitions */
+/* No PC is currently used for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_EN            (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_SHIFT         (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_CLRMSK        (0U)
+
+#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_SHIFT     (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_CLRMSK    (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_EN        (0U)
+
+/* PD entries related definitions */
+/* No PD is currently used for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_EN            (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_SHIFT         (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_CLRMSK        (0U)
+
+#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_SHIFT     (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_CLRMSK    (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_EN        (0U)
+
+
+IMG_EXPORT PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode);
+IMG_EXPORT PVRSRV_ERROR RGXMipsMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#endif /* #ifndef _SRVKM_RGXMIPSMMUINIT_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxmmuinit.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxmmuinit.c
new file mode 100644
index 0000000..bed532c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxmmuinit.c
@@ -0,0 +1,1076 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific MMU initialisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "rgxmmuinit.h"
+#include "rgxmmudefs_km.h"
+
+#include "device.h"
+#include "img_types.h"
+#include "mmu_common.h"
+#include "pdump_mmu.h"
+
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "rgx_memallocflags.h"
+#include "rgx_heaps.h"
+#include "pdump_km.h"
+
+
+/* useful macros */
+/* units represented in a bitfield */
+#define UNITS_IN_BITFIELD(Mask, Shift)	((Mask >> Shift) + 1)
+
+
+/*
+ * Bits of PT, PD and PC not involving addresses 
+ */
+
+#define RGX_MMUCTRL_PTE_PROTMASK	(RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN | \
+									 RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN | \
+									 RGX_MMUCTRL_PT_DATA_PM_SRC_EN | \
+									 RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN | \
+									 RGX_MMUCTRL_PT_DATA_CC_EN | \
+									 RGX_MMUCTRL_PT_DATA_READ_ONLY_EN | \
+									 RGX_MMUCTRL_PT_DATA_VALID_EN)
+
+#define RGX_MMUCTRL_PDE_PROTMASK	(RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN | \
+									 ~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK | \
+									 RGX_MMUCTRL_PD_DATA_VALID_EN)
+
+#define RGX_MMUCTRL_PCE_PROTMASK	(RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN | \
+									 RGX_MMUCTRL_PC_DATA_VALID_EN)
+
+
+
+static MMU_PxE_CONFIG sRGXMMUPCEConfig;
+static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig;
+
+
+/*
+ *
+ *  Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig4KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig16KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 64kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig64KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 256kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig256KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 1MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig1MB;
+
+
+/*
+ *
+ *  Configuration for heaps with 2MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig2MB;
+
+
+/* Forward declaration of protection bits derivation functions, for
+   the following structure */
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags);
+
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+                                           const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+                                           const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+                                           const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+                                           IMG_HANDLE *phPriv);
+
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv);
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize);
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize);
+
+static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes;
+
+PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	/* Setup of Px Entries:
+	 * 
+	 * 
+	 * PAGE TABLE (8 Byte):
+	 * 
+	 * | 62              | 61...40         | 39...12 (varies) | 11...6          | 5             | 4      | 3               | 2               | 1         | 0     |
+	 * | PM/Meta protect | VP Page (39:18) | Physical Page    | VP Page (17:12) | Entry Pending | PM src | SLC Bypass Ctrl | Cache Coherency | Read Only | Valid |
+	 * 
+	 * 
+	 * PAGE DIRECTORY (8 Byte):
+	 * 
+	 *  | 40            | 39...5  (varies)        | 4          | 3...1     | 0     |
+	 *  | Entry Pending | Page Table base address | (reserved) | Page Size | Valid | 
+	 * 
+	 * 
+	 * PAGE CATALOGUE (4 Byte):
+	 * 
+	 *  | 31...4                      | 3...2      | 1             | 0     | 
+	 *  | Page Directory base address | (reserved) | Entry Pending | Valid | 
+	 * 
+     */
+    
+    
+    /* Example how to get the PD address from a PC entry. 
+     * The procedure is the same for PD and PT entries to retrieve PT and Page addresses: 
+     * 
+     * 1) sRGXMMUPCEConfig.uiAddrMask applied to PC entry with '&':
+     *  | 31...4   | 3...2      | 1             | 0     | 
+	 *  | PD Addr  | 0          | 0             | 0     | 
+     * 
+     * 2) sRGXMMUPCEConfig.uiAddrShift applied with '>>':
+     *  | 27...0   |
+     *  | PD Addr  |  
+     * 
+     * 3) sRGXMMUPCEConfig.uiAddrLog2Align applied with '<<':
+     *  | 39...0   |
+     *  | PD Addr  |
+     * 
+     */
+    
+	
+	sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName =
+		PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]);
+
+	/*
+	 * Setup sRGXMMUPCEConfig
+	 */
+	sRGXMMUPCEConfig.uiBytesPerEntry = 4;     /* 32 bit entries */
+	sRGXMMUPCEConfig.uiAddrMask = 0xfffffff0; /* Mask to get significant address bits of PC entry i.e. the address of the PD */
+
+	sRGXMMUPCEConfig.uiAddrShift = 4;         /* Shift this many bits to get PD address */
+	sRGXMMUPCEConfig.uiAddrLog2Align = 12;    /* Alignment of PD physical addresses. */
+
+	sRGXMMUPCEConfig.uiProtMask = RGX_MMUCTRL_PCE_PROTMASK; /* Mask to get the status bits (pending | valid)*/
+	sRGXMMUPCEConfig.uiProtShift = 0;                       /* Shift this many bits to get the statis bits */
+
+	sRGXMMUPCEConfig.uiValidEnMask = RGX_MMUCTRL_PC_DATA_VALID_EN;     /* Mask to get entry valid bit of the PC */
+	sRGXMMUPCEConfig.uiValidEnShift = RGX_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to get entry valid bit */
+
+	/*
+	 *  Setup sRGXMMUTopLevelDevVAddrConfig
+	 */
+	sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; /* Mask to get PC index applied to a 40 bit virt. device address */
+	sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;  /* Shift a 40 bit virt. device address by this amount to get the PC index */
+	sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask,
+																							   sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift));
+
+	sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; /* Mask to get PD index applied to a 40 bit virt. device address */
+	sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;  /* Shift a 40 bit virt. device address by this amount to get the PD index */
+	sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask,
+																							   sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift));
+
+/*
+ *
+ *  Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_4KBDP
+	 */
+	sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPDEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+	sRGXMMUPDEConfig_4KBDP.uiAddrShift = 12;
+	sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = 12;
+
+	sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+	sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 1;
+
+	sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_4KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_4KBDP
+	 */
+	sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffff000);
+	sRGXMMUPTEConfig_4KBDP.uiAddrShift = 12; 
+	sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = 12; /* Alignment of the physical addresses of the pages NOT PTs */
+
+	sRGXMMUPTEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+	sRGXMMUPTEConfig_4KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+	sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_4KBDP
+	 */
+	sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask,
+																							   sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift));
+
+	sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask,
+																							   sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift));
+
+	sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask,
+																							   sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift));
+
+	sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff);
+	sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig4KB
+	 */
+	gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP;
+	gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP;
+	gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP;
+	gsPageSizeConfig4KB.uiRefCount = 0;
+ 	gsPageSizeConfig4KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ *  Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_16KBDP
+	 */
+	sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPDEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+	sRGXMMUPDEConfig_16KBDP.uiAddrShift = 10; 
+	sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 10; 
+
+	sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+	sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 1;
+
+	sRGXMMUPDEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_16KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_16KBDP
+	 */
+	sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xffffffc000);
+	sRGXMMUPTEConfig_16KBDP.uiAddrShift = 14;
+	sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 14;
+
+	sRGXMMUPTEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+	sRGXMMUPTEConfig_16KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+	sRGXMMUPTEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_16KBDP
+	 */
+	sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask,
+																							   sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask,
+																							   sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001fc000);
+	sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 14;
+	sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask,
+																							   sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift));
+
+	sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000003fff);
+	sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig16KB
+	 */
+	gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP;
+	gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP;
+	gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP;
+	gsPageSizeConfig16KB.uiRefCount = 0;
+	gsPageSizeConfig16KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ *  Configuration for heaps with 64kB Data-Page size
+ *
+ */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_64KBDP
+	 */
+	sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPDEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+	sRGXMMUPDEConfig_64KBDP.uiAddrShift = 8;
+	sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 8;
+
+	sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+	sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 1;
+
+	sRGXMMUPDEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_64KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_64KBDP
+	 */
+	sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xffffff0000);
+	sRGXMMUPTEConfig_64KBDP.uiAddrShift =16;
+	sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = 16;
+
+	sRGXMMUPTEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+	sRGXMMUPTEConfig_64KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+	sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_64KBDP
+	 */
+	sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask,
+																							   sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask,
+																							   sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001f0000);
+	sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = 16;
+	sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask,
+																							   sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff);
+	sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig64KB
+	 */
+	gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP;
+	gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP;
+	gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP;
+	gsPageSizeConfig64KB.uiRefCount = 0;
+	gsPageSizeConfig64KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ *  Configuration for heaps with 256kB Data-Page size
+ *
+ */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_256KBDP
+	 */
+	sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPDEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+	sRGXMMUPDEConfig_256KBDP.uiAddrShift = 6;
+	sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 6;
+
+	sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+	sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 1;
+
+	sRGXMMUPDEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_256KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP
+	 */
+	sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffc0000);
+	sRGXMMUPTEConfig_256KBDP.uiAddrShift = 18;
+	sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 18;
+
+	sRGXMMUPTEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+	sRGXMMUPTEConfig_256KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+	sRGXMMUPTEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_256KBDP
+	 */
+	sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask,
+																							   sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask,
+																							   sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001c0000);
+	sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 18;
+	sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask,
+																							   sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000003ffff);
+	sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig256KB
+	 */
+	gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP;
+	gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP;
+	gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP;
+	gsPageSizeConfig256KB.uiRefCount = 0;
+	gsPageSizeConfig256KB.uiMaxRefCount = 0;
+
+	/*
+	 * Setup sRGXMMUPDEConfig_1MBDP
+	 */
+	sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPDEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+	/* 
+	 * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even
+	 * if they contain fewer entries.
+	 */
+	sRGXMMUPDEConfig_1MBDP.uiAddrShift = 6;
+	sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 6;
+
+	sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+	sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 1;
+
+	sRGXMMUPDEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_1MBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_1MBDP
+	 */
+	sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffff00000);
+	sRGXMMUPTEConfig_1MBDP.uiAddrShift = 20;
+	sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 20;
+
+	sRGXMMUPTEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+	sRGXMMUPTEConfig_1MBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+	sRGXMMUPTEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_1MBDP
+	 */
+	sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask,
+																							   sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask,
+																							   sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000100000);
+	sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 20;
+	sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask,
+																							   sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00000fffff);
+	sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig1MB
+	 */
+	gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP;
+	gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP;
+	gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP;
+	gsPageSizeConfig1MB.uiRefCount = 0;
+	gsPageSizeConfig1MB.uiMaxRefCount = 0;
+
+	/*
+	 * Setup sRGXMMUPDEConfig_2MBDP
+	 */
+	sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPDEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+	/* 
+	 * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even
+	 * if they contain fewer entries.
+	 */
+	sRGXMMUPDEConfig_2MBDP.uiAddrShift = 6;
+	sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 6;
+
+	sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+	sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 1;
+
+	sRGXMMUPDEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_2MBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_2MBDP
+	 */
+	sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xffffe00000);
+	sRGXMMUPTEConfig_2MBDP.uiAddrShift = 21;
+	sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 21;
+
+	sRGXMMUPTEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+	sRGXMMUPTEConfig_2MBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+	sRGXMMUPTEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_2MBDP
+	 */
+	sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask,
+																							   sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+	sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask,
+																							   sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000000000);
+	sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 21;
+	sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask,
+																							   sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift));
+
+
+	sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00001fffff);
+	sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0;
+	sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0;
+
+	/*
+	 * Setup gsPageSizeConfig2MB
+	 */
+	gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP;
+	gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP;
+	gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP;
+	gsPageSizeConfig2MB.uiRefCount = 0;
+	gsPageSizeConfig2MB.uiMaxRefCount = 0;
+
+	/*
+	 * Setup sRGXMMUDeviceAttributes
+	 */
+	sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_VARPAGE_40BIT;
+	sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_3;
+	sRGXMMUDeviceAttributes.ui32BaseAlign = RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT;
+	sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPCEConfig;
+	sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig;
+
+	/* Functions for deriving page table/dir/cat protection bits */
+	sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8;
+	sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4;
+	sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8;
+	sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4;
+	sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8;
+	sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4;
+
+	/* Functions for establishing configurations for PDE/PTE/DEVVADDR
+	   on per-heap basis */
+	sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB;
+	sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB;
+
+	sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4;
+	sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8;
+
+	psDeviceNode->psMMUDevAttrs = &sRGXMMUDeviceAttributes;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+    PVRSRV_ERROR eError;
+
+    eError = PVRSRV_OK;
+
+#if defined(PDUMP)
+    psDeviceNode->pfnMMUGetContextID = NULL;
+#endif
+
+    psDeviceNode->psMMUDevAttrs = NULL;
+
+#if defined(DEBUG)
+    PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:"));
+    PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d",
+             gsPageSizeConfig4KB.uiMaxRefCount));
+    PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d",
+             gsPageSizeConfig4KB.uiRefCount));
+    PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d",
+             gsPageSizeConfig16KB.uiMaxRefCount));
+    PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d",
+             gsPageSizeConfig16KB.uiRefCount));
+    PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d",
+             gsPageSizeConfig64KB.uiMaxRefCount));
+    PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d",
+             gsPageSizeConfig64KB.uiRefCount));
+    PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d",
+             gsPageSizeConfig256KB.uiMaxRefCount));
+    PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d",
+             gsPageSizeConfig256KB.uiRefCount));
+    PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d",
+             gsPageSizeConfig1MB.uiMaxRefCount));
+    PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d",
+             gsPageSizeConfig1MB.uiRefCount));
+    PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d",
+             gsPageSizeConfig2MB.uiMaxRefCount));
+    PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d",
+             gsPageSizeConfig2MB.uiRefCount));
+#endif
+    if (gsPageSizeConfig4KB.uiRefCount > 0 ||
+        gsPageSizeConfig16KB.uiRefCount > 0 ||
+        gsPageSizeConfig64KB.uiRefCount > 0 ||
+        gsPageSizeConfig256KB.uiRefCount > 0 ||
+        gsPageSizeConfig1MB.uiRefCount > 0 ||
+        gsPageSizeConfig2MB.uiRefCount > 0
+        )
+    {
+        PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)"));
+    }
+
+    return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePCEProt4
+@Description    calculate the PCE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags)
+{
+    return (uiProtFlags & MMU_PROTFLAGS_INVALID)?0:RGX_MMUCTRL_PC_DATA_VALID_EN;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePCEProt8
+@Description    calculate the PCE protection flags based on an 8 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+	PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+	PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+	PVR_DPF((PVR_DBG_ERROR, "8-byte PCE not supported on this device"));
+	return 0;	
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePDEProt4
+@Description    derive the PDE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags)
+{
+    PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+	PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device"));
+	return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePDEProt8
+@Description    derive the PDE protection flags based on an 8 byte entry
+
+@Input          uiLog2DataPageSize The log2 of the required page size.
+                E.g, for 4KiB pages, this parameter must be 12.
+                For 2MiB pages, it must be set to 21.
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+	IMG_UINT64 ret_value = 0; // 0 means invalid
+
+    if (! (uiProtFlags & MMU_PROTFLAGS_INVALID)) // if not invalid
+	{
+		switch (uiLog2DataPageSize)
+		{
+			case RGX_HEAP_4KB_PAGE_SHIFT:
+				ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB;
+				break;
+			case RGX_HEAP_16KB_PAGE_SHIFT:
+				ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB;
+				break;
+			case RGX_HEAP_64KB_PAGE_SHIFT:
+				ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB;
+				break;
+			case RGX_HEAP_256KB_PAGE_SHIFT:
+				ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB;
+				break;
+			case RGX_HEAP_1MB_PAGE_SHIFT:
+				ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB;
+				break;
+			case RGX_HEAP_2MB_PAGE_SHIFT:
+				ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB;
+				break;
+			default:
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s:%d: in function<%s>: Invalid parameter log2_page_size. Expected {12, 14, 16, 18, 20, 21}. Got [%u]",
+						 __FILE__, __LINE__, __FUNCTION__, uiLog2DataPageSize));
+		}
+	}
+	return ret_value;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePTEProt4
+@Description    calculate the PTE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags)
+{
+    PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+	PVR_DPF((PVR_DBG_ERROR, "4-byte PTE not supported on this device"));
+
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePTEProt8
+@Description    calculate the PTE protection flags based on an 8 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+	IMG_UINT64 ui64MMUFlags=0;
+
+	PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+	if(((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE))
+	{
+		/* read/write */
+	}
+	else if(MMU_PROTFLAGS_READABLE & uiProtFlags)
+	{
+		/* read only */
+		ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_READ_ONLY_EN;
+	}
+	else if(MMU_PROTFLAGS_WRITEABLE & uiProtFlags)
+	{
+		/* write only */
+        PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: write-only is not possible on this device"));
+	}
+    else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0)
+    {
+        PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: neither read nor write specified..."));
+    }
+
+	/* cache coherency */
+	if(MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags)
+	{
+		ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_CC_EN;
+	}
+
+	/* cache setup */
+	if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0)
+	{
+		ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN;
+	}
+
+    if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0)
+    {
+        ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_VALID_EN;
+    }
+
+	if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags)
+	{
+		ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN;
+	}
+
+	return ui64MMUFlags;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXGetPageSizeConfig
+@Description    Set up configuration for variable sized data pages.
+                RGXPutPageSizeConfigCB has to be called to ensure correct
+                refcounting.
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+                                           const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+                                           const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+                                           const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+                                           IMG_HANDLE *phPriv)
+{
+	MMU_PAGESIZECONFIG *psPageSizeConfig;
+
+    switch (uiLog2DataPageSize)
+    {
+    case RGX_HEAP_4KB_PAGE_SHIFT:
+        psPageSizeConfig = &gsPageSizeConfig4KB;
+        break;
+    case RGX_HEAP_16KB_PAGE_SHIFT:
+        psPageSizeConfig = &gsPageSizeConfig16KB;
+        break;
+    case RGX_HEAP_64KB_PAGE_SHIFT:
+        psPageSizeConfig = &gsPageSizeConfig64KB;
+        break;
+    case RGX_HEAP_256KB_PAGE_SHIFT:
+        psPageSizeConfig = &gsPageSizeConfig256KB;
+        break;
+    case RGX_HEAP_1MB_PAGE_SHIFT:
+        psPageSizeConfig = &gsPageSizeConfig1MB;
+        break;
+    case RGX_HEAP_2MB_PAGE_SHIFT:
+        psPageSizeConfig = &gsPageSizeConfig2MB;
+        break;
+    default:
+        PVR_DPF((PVR_DBG_ERROR,
+                 "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+                 uiLog2DataPageSize));
+        return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+    }
+    
+    /* Refer caller's pointers to the data */
+    *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig;
+    *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig;
+    *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig;
+
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+    /* Increment ref-count - not that we're allocating anything here
+       (I'm using static structs), but one day we might, so we want
+       the Get/Put code to be balanced properly */
+    psPageSizeConfig->uiRefCount ++;
+
+    /* This is purely for debug statistics */
+    psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount,
+                                          psPageSizeConfig->uiRefCount);
+#endif
+
+    *phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize;
+	PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv);
+
+    return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXPutPageSizeConfig
+@Description    Tells this code that the mmu module is done with the
+                configurations set in RGXGetPageSizeConfig.  This can
+                be a no-op.
+                Called after RGXGetPageSizeConfigCB.
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv)
+{
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+	MMU_PAGESIZECONFIG *psPageSizeConfig;
+    IMG_UINT32 uiLog2DataPageSize;
+
+    uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv;
+
+    switch (uiLog2DataPageSize)
+    {
+    case RGX_HEAP_4KB_PAGE_SHIFT:
+        psPageSizeConfig = &gsPageSizeConfig4KB;
+        break;
+    case RGX_HEAP_16KB_PAGE_SHIFT:
+        psPageSizeConfig = &gsPageSizeConfig16KB;
+        break;
+    case RGX_HEAP_64KB_PAGE_SHIFT:
+        psPageSizeConfig = &gsPageSizeConfig64KB;
+        break;
+    case RGX_HEAP_256KB_PAGE_SHIFT:
+        psPageSizeConfig = &gsPageSizeConfig256KB;
+        break;
+    case RGX_HEAP_1MB_PAGE_SHIFT:
+        psPageSizeConfig = &gsPageSizeConfig1MB;
+        break;
+    case RGX_HEAP_2MB_PAGE_SHIFT:
+        psPageSizeConfig = &gsPageSizeConfig2MB;
+        break;
+    default:
+        PVR_DPF((PVR_DBG_ERROR,
+                 "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+                 uiLog2DataPageSize));
+        return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+    }
+
+    /* Ref-count here is not especially useful, but it's an extra
+       check that the API is being used correctly */
+    psPageSizeConfig->uiRefCount --;
+#else
+    PVR_UNREFERENCED_PARAMETER(hPriv);
+#endif
+    return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+    PVR_UNREFERENCED_PARAMETER(ui32PDE);
+    PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize);
+	PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device"));
+	return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+	switch (ui64PDE & (~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK))
+	{
+		case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB:
+			*pui32Log2PageSize = RGX_HEAP_4KB_PAGE_SHIFT;
+			break;
+		case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB:
+			*pui32Log2PageSize = RGX_HEAP_16KB_PAGE_SHIFT;
+			break;
+		case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB:
+			*pui32Log2PageSize = RGX_HEAP_64KB_PAGE_SHIFT;
+			break;
+		case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB:
+			*pui32Log2PageSize = RGX_HEAP_256KB_PAGE_SHIFT;
+			break;
+		case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB:
+			*pui32Log2PageSize = RGX_HEAP_1MB_PAGE_SHIFT;
+			break;
+		case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB:
+			*pui32Log2PageSize = RGX_HEAP_2MB_PAGE_SHIFT;
+			break;
+		default:
+			return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+	}
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxmmuinit.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxmmuinit.h
new file mode 100644
index 0000000..c79a72e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxmmuinit.h
@@ -0,0 +1,60 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific MMU initialisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* NB: this file is not to be included arbitrarily.  It exists solely
+   for the linkage between rgxinit.c and rgxmmuinit.c, the former
+   being otherwise cluttered by the contents of the latter */
+
+#ifndef _SRVKM_RGXMMUINIT_H_
+#define _SRVKM_RGXMMUINIT_H_
+
+#include "device.h"
+#include "img_types.h"
+#include "mmu_common.h"
+#include "img_defs.h"
+
+IMG_EXPORT PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode);
+IMG_EXPORT PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#endif /* #ifndef _SRVKM_RGXMMUINIT_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxpdump.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxpdump.c
new file mode 100644
index 0000000..84cb6c1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxpdump.c
@@ -0,0 +1,338 @@
+/*************************************************************************/ /*!
+@File			rgxpdump.c
+@Title          Device specific pdump routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific pdump functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(PDUMP)
+#include "pvrsrv.h"
+#include "devicemem_pdump.h"
+#include "rgxpdump.h"
+#include "rgx_bvnc_defs_km.h"
+
+/*
+ * There are two different set of functions one for META and one for MIPS
+ * because the Pdump player does not implement the support for
+ * the MIPS MMU yet. So for MIPS builds we cannot use DevmemPDumpSaveToFileVirtual,
+ * we have to use DevmemPDumpSaveToFile instead.
+ */
+static PVRSRV_ERROR _MetaDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+                                               PVRSRV_DEVICE_NODE *psDeviceNode,
+                                               IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	/* TA signatures */
+	PDumpCommentWithFlags(ui32PDumpFlags, "** Dump TA signatures and checksums Buffer");
+	DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigTAChecksMemDesc,
+								 0,
+								 psDevInfo->ui32SigTAChecksSize,
+								 "out.tasig",
+								 0,
+								 ui32PDumpFlags);
+
+	/* 3D signatures */
+	PDumpCommentWithFlags(ui32PDumpFlags, "** Dump 3D signatures and checksums Buffer");
+	DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSig3DChecksMemDesc,
+								 0,
+								 psDevInfo->ui32Sig3DChecksSize,
+								 "out.3dsig",
+								 0,
+								 ui32PDumpFlags);
+
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+	{
+		/* RT signatures */
+		PDumpCommentWithFlags(ui32PDumpFlags, "** Dump RTU signatures and checksums Buffer");
+		DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigRTChecksMemDesc,
+									 0,
+									 psDevInfo->ui32SigRTChecksSize,
+									 "out.rtsig",
+									 0,
+									 ui32PDumpFlags);
+		/* SH signatures */
+		PDumpCommentWithFlags(ui32PDumpFlags, "** Dump SHG signatures and checksums Buffer");
+		DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigSHChecksMemDesc,
+									 0,
+									 psDevInfo->ui32SigSHChecksSize,
+									 "out.shsig",
+									 0,
+									 ui32PDumpFlags);
+	}
+
+	return PVRSRV_OK;
+}
+static PVRSRV_ERROR _MetaDumpTraceBufferKM(CONNECTION_DATA * psConnection,
+									  PVRSRV_DEVICE_NODE	*psDeviceNode,
+									  IMG_UINT32			ui32PDumpFlags)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	IMG_UINT32 		ui32ThreadNum, ui32Size, ui32OutFileOffset;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	/* Dump trace buffers */
+	PDumpCommentWithFlags(ui32PDumpFlags, "** Dump trace buffers");
+	for(ui32ThreadNum = 0, ui32OutFileOffset = 0; ui32ThreadNum < RGXFW_THREAD_NUM; ui32ThreadNum++)
+	{
+		/*
+		 * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of
+		 * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is
+		 * "expression must have a constant value".
+		 */
+		const IMG_DEVMEM_OFFSET_T uiTraceBufThreadNumOff
+		= (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF *)0)->sTraceBuf[ui32ThreadNum]);
+
+		/* ui32TracePointer tracepointer */
+		ui32Size = sizeof(IMG_UINT32);
+		DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+								uiTraceBufThreadNumOff,
+								ui32Size,
+								"out.trace",
+								ui32OutFileOffset,
+								ui32PDumpFlags);
+		ui32OutFileOffset += ui32Size;
+
+		/* trace buffer */
+		ui32Size = RGXFW_TRACE_BUFFER_SIZE * sizeof(IMG_UINT32);
+		PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum]);
+		DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum],
+								0, /* 0 offset in the trace buffer mem desc */
+								ui32Size,
+								"out.trace",
+								ui32OutFileOffset,
+								ui32PDumpFlags);
+		ui32OutFileOffset += ui32Size;
+		
+		/* assert info buffer */
+		ui32Size = RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR)
+				+ RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR)
+				+ sizeof(IMG_UINT32);
+		DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+								offsetof(RGXFWIF_TRACEBUF, sTraceBuf) /* move to first element of sTraceBuf */
+									+ ui32ThreadNum * sizeof(RGXFWIF_TRACEBUF_SPACE) /* skip required number of sTraceBuf elements */
+									+ offsetof(RGXFWIF_TRACEBUF_SPACE, sAssertBuf), /* offset into its sAssertBuf, to be pdumped */
+								ui32Size,
+								"out.trace",
+								ui32OutFileOffset,
+								ui32PDumpFlags);
+		ui32OutFileOffset += ui32Size;
+	}
+	
+	/* FW HWPerf buffer is always allocated when PDUMP is defined, irrespective of HWPerf events being enabled/disabled */
+	PVR_ASSERT(psDevInfo->psRGXFWIfHWPerfBufMemDesc);
+
+	/* Dump hwperf buffer */
+	PDumpCommentWithFlags(ui32PDumpFlags, "** Dump HWPerf Buffer");
+	DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfHWPerfBufMemDesc,
+								 0,
+								 psDevInfo->ui32RGXFWIfHWPerfBufSize,
+								 "out.hwperf",
+								 0,
+								 ui32PDumpFlags);
+
+	return PVRSRV_OK;
+
+}
+
+
+static PVRSRV_ERROR _MipsDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+                                               PVRSRV_DEVICE_NODE *psDeviceNode,
+                                               IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	/* TA signatures */
+	PDumpCommentWithFlags(ui32PDumpFlags, "** Dump TA signatures and checksums Buffer");
+
+	DevmemPDumpSaveToFile(psDevInfo->psRGXFWSigTAChecksMemDesc,
+								 0,
+								 psDevInfo->ui32SigTAChecksSize,
+								 "out.tasig",
+								 0);
+
+	/* 3D signatures */
+	PDumpCommentWithFlags(ui32PDumpFlags, "** Dump 3D signatures and checksums Buffer");
+	DevmemPDumpSaveToFile(psDevInfo->psRGXFWSig3DChecksMemDesc,
+								 0,
+								 psDevInfo->ui32Sig3DChecksSize,
+								 "out.3dsig",
+								 0);
+
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+	{
+		/* RT signatures */
+		PDumpCommentWithFlags(ui32PDumpFlags, "** Dump RTU signatures and checksums Buffer");
+		DevmemPDumpSaveToFile(psDevInfo->psRGXFWSigRTChecksMemDesc,
+									 0,
+									 psDevInfo->ui32SigRTChecksSize,
+									 "out.rtsig",
+									 0);
+
+		/* SH signatures */
+		PDumpCommentWithFlags(ui32PDumpFlags, "** Dump SHG signatures and checksums Buffer");
+		DevmemPDumpSaveToFile(psDevInfo->psRGXFWSigSHChecksMemDesc,
+									 0,
+									 psDevInfo->ui32SigSHChecksSize,
+									 "out.shsig",
+									 0);
+	}
+
+	return PVRSRV_OK;
+
+}
+
+static PVRSRV_ERROR _MipsDumpTraceBufferKM(CONNECTION_DATA *psConnection,
+                                           PVRSRV_DEVICE_NODE *psDeviceNode,
+                                           IMG_UINT32 ui32PDumpFlags)
+{
+	IMG_UINT32 		ui32ThreadNum, ui32Size, ui32OutFileOffset;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	/* Dump trace buffers */
+	PDumpCommentWithFlags(ui32PDumpFlags, "** Dump trace buffers");
+	for(ui32ThreadNum = 0, ui32OutFileOffset = 0; ui32ThreadNum < RGXFW_THREAD_NUM; ui32ThreadNum++)
+	{
+		/*
+		 * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of
+		 * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is
+		 * "expression must have a constant value".
+		 */
+		const IMG_DEVMEM_OFFSET_T uiTraceBufOff
+		= (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF *)0)->sTraceBuf[ui32ThreadNum]);
+
+		/* Same again... */
+		const IMG_DEVMEM_OFFSET_T uiTraceBufSpaceAssertBufOff
+		= (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF_SPACE *)0)->sAssertBuf);
+
+		/* ui32TracePointer tracepointer */
+		ui32Size = sizeof(IMG_UINT32);
+		DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+								uiTraceBufOff,
+								ui32Size,
+								"out.trace",
+								ui32OutFileOffset);
+		ui32OutFileOffset += ui32Size;
+
+		/* trace buffer */
+		ui32Size = RGXFW_TRACE_BUFFER_SIZE * sizeof(IMG_UINT32);
+		PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum]);
+		DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum],
+								0, /* 0 offset in the trace buffer mem desc */
+								ui32Size,
+								"out.trace",
+								ui32OutFileOffset);
+		ui32OutFileOffset += ui32Size;
+		
+		/* assert info buffer */
+		ui32Size = RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR)
+				+ RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR)
+				+ sizeof(IMG_UINT32);
+		DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+								uiTraceBufOff + uiTraceBufSpaceAssertBufOff,
+								ui32Size,
+								"out.trace",
+								ui32OutFileOffset);
+		ui32OutFileOffset += ui32Size;
+	}
+
+	/* Dump hwperf buffer */
+	PDumpCommentWithFlags(ui32PDumpFlags, "** Dump HWPerf Buffer");
+	DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfHWPerfBufMemDesc,
+								 0,
+								 psDevInfo->ui32RGXFWIfHWPerfBufSize,
+								 "out.hwperf",
+								 0);
+
+	return PVRSRV_OK;
+
+}
+
+
+/*
+ * PVRSRVPDumpSignatureBufferKM
+ */
+PVRSRV_ERROR PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+                                          PVRSRV_DEVICE_NODE	*psDeviceNode,
+                                          IMG_UINT32			ui32PDumpFlags)
+{	
+	if( (psDeviceNode->pfnCheckDeviceFeature) && \
+			psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_MIPS_BIT_MASK))
+	{
+		return _MipsDumpSignatureBufferKM(psConnection,
+										  psDeviceNode,
+										  ui32PDumpFlags);
+	}
+	else
+	{
+		return _MetaDumpSignatureBufferKM(psConnection,
+											  psDeviceNode,
+											  ui32PDumpFlags);
+	}
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVPDumpTraceBufferKM(CONNECTION_DATA *psConnection,
+                                      PVRSRV_DEVICE_NODE *psDeviceNode,
+                                      IMG_UINT32 ui32PDumpFlags)
+{	
+	if( (psDeviceNode->pfnCheckDeviceFeature) && \
+			psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_MIPS_BIT_MASK))
+	{
+		return _MipsDumpTraceBufferKM(psConnection, psDeviceNode, ui32PDumpFlags);
+	}else
+	{
+		return _MetaDumpTraceBufferKM(psConnection, psDeviceNode, ui32PDumpFlags);
+	}
+}
+
+#endif /* PDUMP */
+
+/******************************************************************************
+ End of file (rgxpdump.c)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxpdump.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxpdump.h
new file mode 100644
index 0000000..72b8371
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxpdump.h
@@ -0,0 +1,118 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX pdump Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX pdump functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "rgxdevice.h"
+#include "device.h"
+#include "devicemem.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+
+#if defined(PDUMP)
+/*!
+******************************************************************************
+
+ @Function	PVRSRVPDumpSignatureBufferKM
+
+ @Description
+
+ Dumps TA and 3D signature and checksum buffers
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+                                          PVRSRV_DEVICE_NODE * psDeviceNode,
+                                          IMG_UINT32 ui32PDumpFlags);
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVPDumpTraceBufferKM
+
+ @Description
+
+ Dumps TA and 3D signature and checksum buffers
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVPDumpTraceBufferKM(CONNECTION_DATA * psConnection,
+                                      PVRSRV_DEVICE_NODE *psDeviceNode,
+                                      IMG_UINT32 ui32PDumpFlags);
+#else  	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpSignatureBufferKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+                             PVRSRV_DEVICE_NODE	*psDeviceNode,
+							 IMG_UINT32			ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpTraceBufferKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVPDumpTraceBufferKM(CONNECTION_DATA * psConnection,
+                         PVRSRV_DEVICE_NODE	*psDeviceNode,
+						 IMG_UINT32			ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);	
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+#endif	/* PDUMP */
+/******************************************************************************
+ End of file (rgxpdump.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxpower.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxpower.c
new file mode 100644
index 0000000..e763d3d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxpower.c
@@ -0,0 +1,973 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific power routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include "rgxpower.h"
+#include "rgxinit.h"
+#include "rgx_fwif_km.h"
+#include "rgxfwutils.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "rgxdebug.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "rgxtimecorr.h"
+#include "devicemem_utils.h"
+#include "htbserver.h"
+#include "rgxstartstop.h"
+#include "sync.h"
+#include "lists.h"
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+#if defined(PVR_DVFS)
+#include "pvr_dvfs_device.h"
+#endif
+
+static PVRSRV_ERROR RGXFWNotifyHostTimeout(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	PVRSRV_ERROR     eError;
+	RGXFWIF_KCCB_CMD sCmd;
+
+	/* Send the Timeout notification to the FW */
+	sCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+	sCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ;
+	sCmd.uCmdData.sPowData.uPoweReqData.ePowRequestType = RGXFWIF_POWER_HOST_TIMEOUT;
+
+	eError = RGXSendCommand(psDevInfo,
+	                        RGXFWIF_DM_GP,
+	                        &sCmd,
+	                        sizeof(sCmd),
+	                        PDUMP_FLAGS_NONE);
+
+	return eError;
+}
+
+static void _RGXUpdateGPUUtilStats(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb;
+	IMG_UINT64 *paui64StatsCounters;
+	IMG_UINT64 ui64LastPeriod;
+	IMG_UINT64 ui64LastState;
+	IMG_UINT64 ui64LastTime;
+	IMG_UINT64 ui64TimeNow;
+
+	psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+	paui64StatsCounters = &psUtilFWCb->aui64StatsCounters[0];
+
+	OSLockAcquire(psDevInfo->hGPUUtilLock);
+
+	ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(OSClockns64());
+
+	/* Update counters to account for the time since the last update */
+	ui64LastState  = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord);
+	ui64LastTime   = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->ui64LastWord);
+	ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
+	paui64StatsCounters[ui64LastState] += ui64LastPeriod;
+
+	/* Update state and time of the latest update */
+	psUtilFWCb->ui64LastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState);
+
+	OSLockRelease(psDevInfo->hGPUUtilLock);
+}
+
+static INLINE PVRSRV_ERROR RGXDoStop(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+	PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	if (psDevConfig->pfnTDRGXStop == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXPrePowerState: TDRGXStop not implemented!"));
+		return PVRSRV_ERROR_NOT_IMPLEMENTED;
+	}
+
+	eError = psDevConfig->pfnTDRGXStop(psDevConfig->hSysData);
+#else
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	eError = RGXStop(&psDevInfo->sLayerParams);
+#endif
+
+	return eError;
+}
+
+/*
+	RGXPrePowerState
+*/
+PVRSRV_ERROR RGXPrePowerState (IMG_HANDLE				hDevHandle,
+							   PVRSRV_DEV_POWER_STATE	eNewPowerState,
+							   PVRSRV_DEV_POWER_STATE	eCurrentPowerState,
+							   IMG_BOOL					bForced)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	if ((eNewPowerState != eCurrentPowerState) &&
+		(eNewPowerState != PVRSRV_DEV_POWER_STATE_ON))
+	{
+		PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+		PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+		RGXFWIF_KCCB_CMD	sPowCmd;
+		RGXFWIF_TRACEBUF	*psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+
+		/* Send the Power off request to the FW */
+		sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+		sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_OFF_REQ;
+		sPowCmd.uCmdData.sPowData.uPoweReqData.bForced = bForced;
+
+		eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s: Failed to set Power sync prim",
+				__FUNCTION__));
+			return eError;
+		}
+
+		eError = RGXSendCommand(psDevInfo,
+		                        RGXFWIF_DM_GP,
+		                        &sPowCmd,
+		                        sizeof(sPowCmd),
+		                        PDUMP_FLAGS_NONE);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXPrePowerState: Failed to send Power off request"));
+			return eError;
+		}
+
+		/* Wait for the firmware to complete processing. It cannot use PVRSRVWaitForValueKM as it relies
+		   on the EventObject which is signalled in this MISR */
+		eError = PVRSRVPollForValueKM(psDevInfo->psPowSyncPrim->pui32LinAddr, 0x1, 0xFFFFFFFF);
+
+		/* Check the Power state after the answer */
+		if (eError == PVRSRV_OK)
+		{
+			/* Finally, de-initialise some registers. */
+			if (psFWTraceBuf->ePowState == RGXFWIF_POW_OFF)
+			{
+#if !defined(NO_HARDWARE)
+				IMG_UINT32 ui32TID;
+				for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+				{
+					/* Wait for the pending META/MIPS to host interrupts to come back. */
+					eError = PVRSRVPollForValueKM(&psDevInfo->aui32SampleIRQCount[ui32TID],
+										          psFWTraceBuf->aui32InterruptCount[ui32TID],
+										          0xffffffff);
+
+					if (eError != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR, \
+								"RGXPrePowerState: Wait for pending interrupts failed. Thread %u: Host:%u, FW: %u", \
+								ui32TID, \
+								psDevInfo->aui32SampleIRQCount[ui32TID], \
+								psFWTraceBuf->aui32InterruptCount[ui32TID]));
+
+						RGX_WaitForInterruptsTimeout(psDevInfo);
+						break;
+					}
+				}
+#endif /* NO_HARDWARE */
+
+				/* Update GPU frequency and timer correlation related data */
+				RGXGPUFreqCalibratePrePowerOff(psDeviceNode);
+
+				/* Update GPU state counters */
+				_RGXUpdateGPUUtilStats(psDevInfo);
+
+#if defined(PVR_DVFS)
+				eError = SuspendDVFS();
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR,"RGXPrePowerState: Failed to suspend DVFS"));
+					return eError;
+				}
+#endif
+
+				psDevInfo->bRGXPowered = IMG_FALSE;
+
+				eError = RGXDoStop(psDeviceNode);
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "RGXPrePowerState: RGXDoStop failed (%s)",
+					         PVRSRVGetErrorStringKM(eError)));
+					eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE;
+				}
+			}
+			else
+			{
+				/* the sync was updated but the pow state isn't off -> the FW denied the transition */
+				eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED;
+
+				if (bForced)
+				{	/* It is an error for a forced request to be denied */
+					PVR_DPF((PVR_DBG_ERROR,"RGXPrePowerState: Failure to power off during a forced power off. FW: %d", psFWTraceBuf->ePowState));
+				}
+			}
+		}
+		else if (eError == PVRSRV_ERROR_TIMEOUT)
+		{
+			/* timeout waiting for the FW to ack the request: return timeout */
+			PVR_DPF((PVR_DBG_WARNING,"RGXPrePowerState: Timeout waiting for powoff ack from the FW"));
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXPrePowerState: Error waiting for powoff ack from the FW (%s)", PVRSRVGetErrorStringKM(eError)));
+			eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE;
+		}
+
+	}
+
+	return eError;
+}
+
+static INLINE PVRSRV_ERROR RGXDoStart(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+	PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+
+	if (psDevConfig->pfnTDRGXStart == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: TDRGXStart not implemented!"));
+		return PVRSRV_ERROR_NOT_IMPLEMENTED;
+	}
+
+	eError = psDevConfig->pfnTDRGXStart(psDevConfig->hSysData);
+#else
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+	eError = RGXStart(&psDevInfo->sLayerParams);
+#endif
+
+	return eError;
+}
+
+/*
+	RGXPostPowerState
+*/
+PVRSRV_ERROR RGXPostPowerState (IMG_HANDLE				hDevHandle,
+								PVRSRV_DEV_POWER_STATE	eNewPowerState,
+								PVRSRV_DEV_POWER_STATE	eCurrentPowerState,
+								IMG_BOOL				bForced)
+{
+	if ((eNewPowerState != eCurrentPowerState) &&
+		(eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON))
+	{
+		PVRSRV_ERROR		 eError;
+		PVRSRV_DEVICE_NODE	 *psDeviceNode = hDevHandle;
+		PVRSRV_RGXDEV_INFO	 *psDevInfo = psDeviceNode->pvDevice;
+		RGXFWIF_INIT *psRGXFWInit;
+
+		if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+		{
+			psDevInfo->bRGXPowered = IMG_TRUE;
+			return PVRSRV_OK;
+		}
+
+		if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+		{
+			/* Update GPU frequency and timer correlation related data */
+			RGXGPUFreqCalibratePostPowerOn(psDeviceNode);
+
+			/* Update GPU state counters */
+			_RGXUpdateGPUUtilStats(psDevInfo);
+
+			eError = RGXDoStart(psDeviceNode);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXPostPowerState: RGXDoStart failed"));
+				return eError;
+			}
+
+			OSMemoryBarrier();
+
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+			eError = ValidateFWImageWithSP(psDevInfo);
+			if (eError != PVRSRV_OK) return eError;
+#endif
+
+			eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+			                                  (void **)&psRGXFWInit);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+				         "RGXPostPowerState: Failed to acquire kernel fw if ctl (%u)",
+				         eError));
+				return eError;
+			}
+
+			/*
+			 * Check whether the FW has started by polling on bFirmwareStarted flag
+			 */
+			if (PVRSRVPollForValueKM((IMG_UINT32 *)&psRGXFWInit->bFirmwareStarted,
+			                         IMG_TRUE,
+			                         0xFFFFFFFF) != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Polling for 'FW started' flag failed."));
+				eError = PVRSRV_ERROR_TIMEOUT;
+
+				/*
+				 * When bFirmwareStarted fails some info may be gained by doing the following
+				 * debug dump but unfortunately it could lockup some cores or cause other power
+				 * lock issues. The code is placed here to provide a possible example approach
+				 * when all other ideas have been tried.
+				 */
+				/*{
+					PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev;
+				
+					if (psPowerDev)
+					{
+						PVRSRV_DEV_POWER_STATE  eOldPowerState = psPowerDev->eCurrentPowerState;
+
+						PVRSRVPowerUnlock(psDeviceNode);
+						psPowerDev->eCurrentPowerState = PVRSRV_DEV_POWER_STATE_ON;
+						RGXDumpDebugInfo(NULL, psDeviceNode->pvDevice);
+						psPowerDev->eCurrentPowerState = eOldPowerState;
+						PVRSRVPowerLock(psDeviceNode);
+					}
+				}*/
+				
+				DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+				return eError;
+			}
+
+#if defined(PDUMP)
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Wait for the Firmware to start.");
+			eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+			                                offsetof(RGXFWIF_INIT, bFirmwareStarted),
+			                                IMG_TRUE,
+			                                0xFFFFFFFFU,
+			                                PDUMP_POLL_OPERATOR_EQUAL,
+			                                PDUMP_FLAGS_CONTINUOUS);
+
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+				         "RGXPostPowerState: problem pdumping POL for psRGXFWIfInitMemDesc (%d)",
+				         eError));
+				DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+				return eError;
+			}
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+			SetFirmwareStartTime(psRGXFWInit->ui32FirmwareStartedTimeStamp);
+#endif
+
+			HTBSyncPartitionMarker(psRGXFWInit->ui32MarkerVal);
+
+			DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+
+			psDevInfo->bRGXPowered = IMG_TRUE;
+
+#if defined(PVR_DVFS)
+			eError = ResumeDVFS();
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXPostPowerState: Failed to resume DVFS"));
+				return eError;
+			}
+#endif
+		}
+	}
+
+	PDUMPCOMMENT("RGXPostPowerState: Current state: %d, New state: %d", eCurrentPowerState, eNewPowerState);
+
+	return PVRSRV_OK;
+}
+
+/*
+	RGXPreClockSpeedChange
+*/
+PVRSRV_ERROR RGXPreClockSpeedChange (IMG_HANDLE				hDevHandle,
+									 PVRSRV_DEV_POWER_STATE	eCurrentPowerState)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	RGX_DATA			*psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+	RGXFWIF_TRACEBUF	*psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+	PVR_UNREFERENCED_PARAMETER(psRGXData);
+
+	PVR_DPF((PVR_DBG_MESSAGE,"RGXPreClockSpeedChange: RGX clock speed was %uHz",
+			psRGXData->psRGXTimingInfo->ui32CoreClockSpeed));
+
+    if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF)
+		&& (psFWTraceBuf->ePowState != RGXFWIF_POW_OFF))
+	{
+		/* Update GPU frequency and timer correlation related data */
+		RGXGPUFreqCalibratePreClockSpeedChange(psDeviceNode);
+	}
+
+	return eError;
+}
+
+/*
+	RGXPostClockSpeedChange
+*/
+PVRSRV_ERROR RGXPostClockSpeedChange (IMG_HANDLE				hDevHandle,
+									  PVRSRV_DEV_POWER_STATE	eCurrentPowerState)
+{
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	RGX_DATA			*psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	RGXFWIF_TRACEBUF	*psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+	IMG_UINT32 		ui32NewClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	/* Update runtime configuration with the new value */
+	psDevInfo->psRGXFWIfRuntimeCfg->ui32CoreClockSpeed = ui32NewClockSpeed;
+
+    if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF)
+		&& (psFWTraceBuf->ePowState != RGXFWIF_POW_OFF))
+	{
+		RGXFWIF_KCCB_CMD	sCOREClkSpeedChangeCmd;
+
+		RGXGPUFreqCalibratePostClockSpeedChange(psDeviceNode, ui32NewClockSpeed);
+
+		sCOREClkSpeedChangeCmd.eCmdType = RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE;
+		sCOREClkSpeedChangeCmd.uCmdData.sCORECLKSPEEDCHANGEData.ui32NewClockSpeed = ui32NewClockSpeed;
+
+		/* Ensure the new clock speed is written to memory before requesting the FW to read it */
+		OSMemoryBarrier();
+
+		PDUMPCOMMENT("Scheduling CORE clock speed change command");
+
+		PDUMPPOWCMDSTART();
+		eError = RGXSendCommand(psDeviceNode->pvDevice,
+		                           RGXFWIF_DM_GP,
+		                           &sCOREClkSpeedChangeCmd,
+		                           sizeof(sCOREClkSpeedChangeCmd),
+		                           PDUMP_FLAGS_NONE);
+		PDUMPPOWCMDEND();
+
+		if (eError != PVRSRV_OK)
+		{
+			PDUMPCOMMENT("Scheduling CORE clock speed change command failed");
+			PVR_DPF((PVR_DBG_ERROR, "RGXPostClockSpeedChange: Scheduling KCCB command failed. Error:%u", eError));
+			return eError;
+		}
+
+		PVR_DPF((PVR_DBG_MESSAGE,"RGXPostClockSpeedChange: RGX clock speed changed to %uHz",
+				psRGXData->psRGXTimingInfo->ui32CoreClockSpeed));
+	}
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RGXDustCountChange
+
+ @Description
+
+	Does change of number of DUSTs
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   ui32NumberOfDusts : Number of DUSTs to make transition to
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE				hDevHandle,
+								IMG_UINT32				ui32NumberOfDusts)
+{
+
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR		eError;
+	RGXFWIF_KCCB_CMD 	sDustCountChange;
+	IMG_UINT32			ui32MaxAvailableDusts = MAX(1, (psDevInfo->sDevFeatureCfg.ui32NumClusters/2));
+	RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	if (ui32NumberOfDusts > ui32MaxAvailableDusts)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		PVR_DPF((PVR_DBG_ERROR,
+				"RGXDustCountChange: Invalid number of DUSTs (%u) while expecting value within <0,%u>. Error:%u",
+				ui32NumberOfDusts,
+				ui32MaxAvailableDusts,
+				eError));
+		return eError;
+	}
+
+	#if defined(FIX_HW_BRN_59042)
+	if (ui32NumberOfDusts < ui32MaxAvailableDusts && (ui32NumberOfDusts & 0x1))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"RGXDustCountChange: Invalid number of DUSTs (%u) due to HW restriction. Allowed values are :-",
+				ui32NumberOfDusts));
+		switch (ui32MaxAvailableDusts)
+		{
+			case 2:	PVR_DPF((PVR_DBG_ERROR, "0, 2")); break;
+			case 3:	PVR_DPF((PVR_DBG_ERROR, "0, 2, 3")); break;
+			case 4:	PVR_DPF((PVR_DBG_ERROR, "0, 2, 4")); break;
+			case 5:	PVR_DPF((PVR_DBG_ERROR, "0, 2, 4, 5")); break;
+			case 6:	PVR_DPF((PVR_DBG_ERROR, "0, 2, 4, 6")); break;
+			default: break;
+		}
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	#endif
+
+	psRuntimeCfg->ui32DefaultDustsNumInit = ui32NumberOfDusts;
+
+	#if !defined(NO_HARDWARE)
+	{
+		RGXFWIF_TRACEBUF 	*psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+
+		if (psFWTraceBuf->ePowState == RGXFWIF_POW_OFF)
+		{
+			return PVRSRV_OK;
+		}
+
+		if (psFWTraceBuf->ePowState != RGXFWIF_POW_FORCED_IDLE)
+		{
+			eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED;
+			PVR_DPF((PVR_DBG_ERROR,"RGXDustCountChange: Attempt to change dust count when not IDLE"));
+			return eError;
+		}
+	}
+	#endif
+
+	eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to set Power sync prim",
+			__FUNCTION__));
+		return eError;
+	}
+
+	sDustCountChange.eCmdType = RGXFWIF_KCCB_CMD_POW;
+	sDustCountChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_NUMDUST_CHANGE;
+	sDustCountChange.uCmdData.sPowData.uPoweReqData.ui32NumOfDusts = ui32NumberOfDusts;
+
+	PDUMPCOMMENT("Scheduling command to change Dust Count to %u", ui32NumberOfDusts);
+	eError = RGXSendCommand(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sDustCountChange,
+				sizeof(sDustCountChange),
+				PDUMP_FLAGS_NONE);
+
+	if (eError != PVRSRV_OK)
+	{
+		PDUMPCOMMENT("Scheduling command to change Dust Count failed. Error:%u", eError);
+		PVR_DPF((PVR_DBG_ERROR, "RGXDustCountChange: Scheduling KCCB to change Dust Count failed. Error:%u", eError));
+		return eError;
+	}
+
+	/* Wait for the firmware to answer. */
+	eError = PVRSRVPollForValueKM(psDevInfo->psPowSyncPrim->pui32LinAddr, 0x1, 0xFFFFFFFF);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXDustCountChange: Timeout waiting for idle request"));
+		return eError;
+	}
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("RGXDustCountChange: Poll for Kernel SyncPrim [0x%p] on DM %d ", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+	SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+					1,
+					0xffffffff,
+					PDUMP_POLL_OPERATOR_EQUAL,
+					0);
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*
+ @Function	RGXAPMLatencyChange
+*/
+PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE				hDevHandle,
+				IMG_UINT32				ui32ActivePMLatencyms,
+				IMG_BOOL				bActivePMLatencyPersistant)
+{
+
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR		eError;
+	RGXFWIF_RUNTIME_CFG	*psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+	PVRSRV_DEV_POWER_STATE	ePowerState;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	eError = PVRSRVPowerLock(psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXAPMLatencyChange: Failed to acquire power lock"));
+		return eError;
+	}
+
+	/* Update runtime configuration with the new values and ensure the
+	 * new APM latency is written to memory before requesting the FW to
+	 * read it
+	 */
+	psRuntimeCfg->ui32ActivePMLatencyms = ui32ActivePMLatencyms;
+	psRuntimeCfg->bActivePMLatencyPersistant = bActivePMLatencyPersistant;
+	OSMemoryBarrier();
+
+	eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+	if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF))
+	{
+		RGXFWIF_KCCB_CMD	sActivePMLatencyChange;
+		sActivePMLatencyChange.eCmdType = RGXFWIF_KCCB_CMD_POW;
+		sActivePMLatencyChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_APM_LATENCY_CHANGE;
+		sActivePMLatencyChange.uCmdData.sPowData.uPoweReqData.ui32ActivePMLatencyms = ui32ActivePMLatencyms;
+
+		PDUMPCOMMENT("Scheduling command to change APM latency to %u", ui32ActivePMLatencyms);
+		eError = RGXSendCommand(psDeviceNode->pvDevice,
+					RGXFWIF_DM_GP,
+					&sActivePMLatencyChange,
+					sizeof(sActivePMLatencyChange),
+					PDUMP_FLAGS_NONE);
+
+		if (eError != PVRSRV_OK)
+		{
+			PDUMPCOMMENT("Scheduling command to change APM latency failed. Error:%u", eError);
+			PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Scheduling KCCB to change APM latency failed. Error:%u", eError));
+			goto ErrorExit;
+		}
+	}
+
+ErrorExit:
+	PVRSRVPowerUnlock(psDeviceNode);
+
+	return eError;
+}
+
+/*
+	RGXActivePowerRequest
+*/
+PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSAcquireBridgeLock();
+#endif
+	/* NOTE: If this function were to wait for event object attempt should be
+	   made to prevent releasing bridge lock during sleep. Bridge lock should
+	   be held during sleep. */
+
+	/* Powerlock to avoid further requests from racing with the FW hand-shake from now on
+	   (previous kicks to this point are detected by the FW) */
+	eError = PVRSRVPowerLock(psDeviceNode);
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to acquire PowerLock (device: %p, error: %s)",
+				 __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+		goto _RGXActivePowerRequest_PowerLock_failed;
+	}
+
+	/* Check again for IDLE once we have the power lock */
+	if (psFWTraceBuf->ePowState == RGXFWIF_POW_IDLE)
+	{
+
+		psDevInfo->ui32ActivePMReqTotal++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+        SetFirmwareHandshakeIdleTime(RGXReadHWTimerReg(psDevInfo)-psFWTraceBuf->ui64StartIdleTime);
+#endif
+
+		PDUMPPOWCMDSTART();
+		eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+											 PVRSRV_DEV_POWER_STATE_OFF,
+											 IMG_FALSE); /* forced */
+		PDUMPPOWCMDEND();
+
+		if (eError == PVRSRV_OK)
+		{
+			psDevInfo->ui32ActivePMReqOk++;
+		}
+		else if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)
+		{
+			psDevInfo->ui32ActivePMReqDenied++;
+		}
+
+	}
+
+	PVRSRVPowerUnlock(psDeviceNode);
+
+_RGXActivePowerRequest_PowerLock_failed:
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSReleaseBridgeLock();
+#endif
+
+	return eError;
+
+}
+/*
+	RGXForcedIdleRequest
+*/
+
+#define RGX_FORCED_IDLE_RETRY_COUNT 10
+
+PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted)
+{
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_KCCB_CMD	sPowCmd;
+	PVRSRV_ERROR		eError;
+	IMG_UINT32			ui32RetryCount = 0;
+#if !defined(NO_HARDWARE)
+	RGXFWIF_TRACEBUF	*psFWTraceBuf;
+#endif
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+#if !defined(NO_HARDWARE)
+	psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+
+	/* Firmware already forced idle */
+	if (psFWTraceBuf->ePowState == RGXFWIF_POW_FORCED_IDLE)
+	{
+		return PVRSRV_OK;
+	}
+
+	/* Firmware is not powered. Sometimes this is permitted, for instance we were forcing idle to power down. */
+	if (psFWTraceBuf->ePowState == RGXFWIF_POW_OFF)
+	{
+		return (bDeviceOffPermitted) ? PVRSRV_OK : PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED;
+	}
+#endif
+
+	eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to set Power sync prim",
+			__FUNCTION__));
+		return eError;
+	}
+	sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+	sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ;
+	sPowCmd.uCmdData.sPowData.uPoweReqData.ePowRequestType = RGXFWIF_POWER_FORCE_IDLE;
+
+	PDUMPCOMMENT("RGXForcedIdleRequest: Sending forced idle command");
+
+	/* Send one forced IDLE command to GP */
+	eError = RGXSendCommand(psDevInfo,
+			RGXFWIF_DM_GP,
+			&sPowCmd,
+			sizeof(sPowCmd),
+			PDUMP_FLAGS_NONE);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXForcedIdleRequest: Failed to send idle request"));
+		return eError;
+	}
+
+	/* Wait for GPU to finish current workload */
+	do {
+		eError = PVRSRVPollForValueKM(psDevInfo->psPowSyncPrim->pui32LinAddr, 0x1, 0xFFFFFFFF);
+		if ((eError == PVRSRV_OK) || (ui32RetryCount == RGX_FORCED_IDLE_RETRY_COUNT))
+		{
+			break;
+		}
+		ui32RetryCount++;
+		PVR_DPF((PVR_DBG_WARNING,"RGXForcedIdleRequest: Request timeout. Retry %d of %d", ui32RetryCount, RGX_FORCED_IDLE_RETRY_COUNT));
+	} while (IMG_TRUE);
+
+	if (eError != PVRSRV_OK)
+	{
+		RGXFWNotifyHostTimeout(psDevInfo);
+		PVR_DPF((PVR_DBG_ERROR,"RGXForcedIdleRequest: Idle request failed. Firmware potentially left in forced idle state"));
+		return eError;
+	}
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("RGXForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d ", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+	SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+					1,
+					0xffffffff,
+					PDUMP_POLL_OPERATOR_EQUAL,
+					0);
+#endif
+
+#if !defined(NO_HARDWARE)
+	/* Check the firmware state for idleness */
+	if (psFWTraceBuf->ePowState != RGXFWIF_POW_FORCED_IDLE)
+	{
+		return PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED;
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*
+	RGXCancelForcedIdleRequest
+*/
+PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle)
+{
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_KCCB_CMD	sPowCmd;
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+	eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to set Power sync prim",
+			__FUNCTION__));
+		goto ErrorExit;
+	}
+
+	/* Send the IDLE request to the FW */
+	sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+	sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ;
+	sPowCmd.uCmdData.sPowData.uPoweReqData.ePowRequestType = RGXFWIF_POWER_CANCEL_FORCED_IDLE;
+
+	PDUMPCOMMENT("RGXForcedIdleRequest: Sending cancel forced idle command");
+
+	/* Send cancel forced IDLE command to GP */
+	eError = RGXSendCommand(psDevInfo,
+			RGXFWIF_DM_GP,
+			&sPowCmd,
+			sizeof(sPowCmd),
+			PDUMP_FLAGS_NONE);
+
+	if (eError != PVRSRV_OK)
+	{
+		PDUMPCOMMENT("RGXCancelForcedIdleRequest: Failed to send cancel IDLE request for DM%d", RGXFWIF_DM_GP);
+		goto ErrorExit;
+	}
+
+	/* Wait for the firmware to answer. */
+	eError = PVRSRVPollForValueKM(psDevInfo->psPowSyncPrim->pui32LinAddr, 1, 0xFFFFFFFF);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXCancelForcedIdleRequest: Timeout waiting for cancel idle request"));
+		goto ErrorExit;
+	}
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("RGXCancelForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d ", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+	SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+					1,
+					0xffffffff,
+					PDUMP_POLL_OPERATOR_EQUAL,
+					0);
+#endif
+
+	return eError;
+
+ErrorExit:
+	PVR_DPF((PVR_DBG_ERROR,"RGXCancelForcedIdleRequest: Firmware potentially left in forced idle state"));
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVGetNextDustCount
+
+ @Description
+
+	Calculate a sequence of dust counts to achieve full transition coverage.
+	We increment two counts of dusts and switch up and down between them.
+	It does	contain a few redundant transitions. If two dust exist, the
+	output transitions should be as follows.
+
+	0->1, 0<-1, 0->2, 0<-2, (0->1)
+	1->1, 1->2, 1<-2, (1->2)
+	2->2, (2->0),
+	0->0. Repeat.
+
+	Redundant transitions in brackets.
+
+ @Input		psDustReqState : Counter state used to calculate next dust count
+ @Input		ui32DustCount : Number of dusts in the core
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+IMG_UINT32 RGXGetNextDustCount(RGX_DUST_STATE *psDustReqState, IMG_UINT32 ui32DustCount)
+{
+	if (psDustReqState->bToggle)
+	{
+		psDustReqState->ui32DustCount2++;
+	}
+
+	if (psDustReqState->ui32DustCount2 > ui32DustCount)
+	{
+		psDustReqState->ui32DustCount1++;
+		psDustReqState->ui32DustCount2 = psDustReqState->ui32DustCount1;
+	}
+
+	if (psDustReqState->ui32DustCount1 > ui32DustCount)
+	{
+		psDustReqState->ui32DustCount1 = 0;
+		psDustReqState->ui32DustCount2 = 0;
+	}
+
+	psDustReqState->bToggle = !psDustReqState->bToggle;
+
+	return (psDustReqState->bToggle) ? psDustReqState->ui32DustCount1 : psDustReqState->ui32DustCount2;
+}
+
+/******************************************************************************
+ End of file (rgxpower.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxpower.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxpower.h
new file mode 100644
index 0000000..073dd47
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxpower.h
@@ -0,0 +1,245 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX power header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX power
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXPOWER_H__)
+#define __RGXPOWER_H__
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "servicesext.h"
+#include "rgxdevice.h"
+
+
+/*!
+******************************************************************************
+
+ @Function	RGXPrePowerState
+
+ @Description
+
+ does necessary preparation before power state transition
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   eNewPowerState : New power state
+ @Input	   eCurrentPowerState : Current power state
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE				hDevHandle, 
+							  PVRSRV_DEV_POWER_STATE	eNewPowerState, 
+							  PVRSRV_DEV_POWER_STATE	eCurrentPowerState,
+							  IMG_BOOL					bForced);
+
+/*!
+******************************************************************************
+
+ @Function	RGXPostPowerState
+
+ @Description
+
+ does necessary preparation after power state transition
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   eNewPowerState : New power state
+ @Input	   eCurrentPowerState : Current power state
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE				hDevHandle, 
+							   PVRSRV_DEV_POWER_STATE	eNewPowerState, 
+							   PVRSRV_DEV_POWER_STATE	eCurrentPowerState,
+							  IMG_BOOL					bForced);
+
+
+/*!
+******************************************************************************
+
+ @Function	RGXPreClockSpeedChange
+
+ @Description
+
+	Does processing required before an RGX clock speed change.
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   bIdleDevice : Whether the firmware needs to be idled
+ @Input	   eCurrentPowerState : Power state of the device
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE				hDevHandle,
+									PVRSRV_DEV_POWER_STATE	eCurrentPowerState);
+
+/*!
+******************************************************************************
+
+ @Function	RGXPostClockSpeedChange
+
+ @Description
+
+	Does processing required after an RGX clock speed change.
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   bIdleDevice : Whether the firmware had been idled previously
+ @Input	   eCurrentPowerState : Power state of the device
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE				hDevHandle,
+									 PVRSRV_DEV_POWER_STATE	eCurrentPowerState);
+
+
+/*!
+******************************************************************************
+
+ @Function	RGXDustCountChange
+
+ @Description Change of number of DUSTs
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   ui32NumberOfDusts : Number of DUSTs to make transition to
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE				hDevHandle,
+								IMG_UINT32				ui32NumberOfDusts);
+
+/*!
+******************************************************************************
+
+ @Function	RGXAPMLatencyChange
+
+ @Description
+
+	Changes the wait duration used before firmware indicates IDLE.
+	Reducing this value will cause the firmware to shut off faster and
+	more often but may increase bubbles in GPU scheduling due to the added
+	power management activity. If bPersistent is NOT set, APM latency will
+	return back to system default on power up.
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   ui32ActivePMLatencyms : Number of milliseconds to wait
+ @Input	   bPersistent : Set to ensure new value is not reset
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE				hDevHandle,
+				IMG_UINT32				ui32ActivePMLatencyms,
+				IMG_BOOL				bActivePMLatencyPersistant);
+
+/*!
+******************************************************************************
+
+ @Function	RGXActivePowerRequest
+
+ @Description Initiate a handshake with the FW to power off the GPU
+
+ @Input	   hDevHandle : RGX Device Node
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function	RGXForcedIdleRequest
+
+ @Description Initiate a handshake with the FW to idle the GPU
+
+ @Input	   hDevHandle : RGX Device Node
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted);
+
+/*!
+******************************************************************************
+
+ @Function	RGXCancelForcedIdleRequest
+
+ @Description Send a request to cancel idle to the firmware.
+
+ @Input	   hDevHandle : RGX Device Node
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVGetNextDustCount
+
+ @Description
+
+	Calculate a sequence of dust counts to achieve full transition coverage.
+	We increment two counts of dusts and switch up and down between them.
+	It does	contain a few redundant transitions. If two dust exist, the
+	output transitions should be as follows.
+
+	0->1, 0<-1, 0->2, 0<-2, (0->1)
+	1->1, 1->2, 1<-2, (1->2)
+	2->2, (2->0),
+	0->0. Repeat.
+
+	Redundant transitions in brackets.
+
+ @Input		psDustReqState : Counter state used to calculate next dust count
+ @Input		ui32DustCount : Number of dusts in the core
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+IMG_UINT32 RGXGetNextDustCount(RGX_DUST_STATE *psDustState, IMG_UINT32 ui32DustCount);
+
+
+#endif /* __RGXPOWER_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxray.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxray.c
new file mode 100644
index 0000000..60f3541
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxray.c
@@ -0,0 +1,3872 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX ray tracing routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX ray tracing routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* for the offsetof macro */
+#include <stddef.h>
+#if defined(INTEGRITY_OS)
+#include <string.h>
+#endif
+
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxray.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "devicemem_server.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "rgxccb.h"
+#include "rgxhwperf.h"
+#include "rgxtimerquery.h"
+#include "htbuffer.h"
+
+#include "rgxdefs_km.h"
+#include "rgx_fwif_km.h"
+#include "physmem.h"
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "process_stats.h"
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+#endif/* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_RAY_UFO_DUMP	0
+
+//#define RAY_CHECKPOINT_DEBUG 1
+
+#if defined(RAY_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+/*
+ * FIXME: Defs copied from "rgxrpmdefs.h"
+ */
+
+typedef struct _RGX_RPM_DATA_RTU_FREE_PAGE_LIST {
+     IMG_UINT32 u32_0;
+} RGX_RPM_DATA_RTU_FREE_PAGE_LIST;
+
+/*
+Page table index.
+                                                        The field is a pointer to a free page
+*/
+#define RGX_RPM_DATA_RTU_FREE_PAGE_LIST_PTI_WOFF          (0U)
+#define RGX_RPM_DATA_RTU_FREE_PAGE_LIST_PTI_SHIFT         (0U)
+#define RGX_RPM_DATA_RTU_FREE_PAGE_LIST_PTI_CLRMSK        (0XFFC00000U)
+#define RGX_RPM_DATA_RTU_FREE_PAGE_LIST_SET_PTI(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_RPM_DATA_RTU_FREE_PAGE_LIST_PTI_CLRMSK ) | (((_x_) & (0x003fffff))  <<  0)))
+#define RGX_RPM_DATA_RTU_FREE_PAGE_LIST_GET_PTI(_ft_)     (((_ft_).u32_0  >>  (0)) & 0x003fffff)
+
+typedef struct _RGX_RPM_DATA_RTU_PAGE_TABLE {
+     IMG_UINT32 u32_0;
+} RGX_RPM_DATA_RTU_PAGE_TABLE;
+
+/*
+ Page Table State
+                                                        <br> 00: Empty Block
+                                                        <br> 01: Full Block
+                                                        <br> 10: Fragmented Block: Partially full page
+*/
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_PTS_WOFF              (0U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_PTS_SHIFT             (30U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_PTS_CLRMSK            (0X3FFFFFFFU)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_SET_PTS(_ft_,_x_)     ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_RPM_DATA_RTU_PAGE_TABLE_PTS_CLRMSK ) | (((_x_) & (0x00000003))  <<  30)))
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_GET_PTS(_ft_)         (((_ft_).u32_0  >>  (30)) & 0x00000003)
+/*
+ Primitives in Page.
+                                                        Number of unique primitives stored in this page.
+                                                        The memory manager will re-use this page when the RCNT drops to zero.
+*/
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_RCNT_WOFF             (0U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_RCNT_SHIFT            (22U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_RCNT_CLRMSK           (0XC03FFFFFU)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_SET_RCNT(_ft_,_x_)    ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_RPM_DATA_RTU_PAGE_TABLE_RCNT_CLRMSK ) | (((_x_) & (0x000000ff))  <<  22)))
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_GET_RCNT(_ft_)        (((_ft_).u32_0  >>  (22)) & 0x000000ff)
+/*
+Next page table index.
+                                                        The field is a pointer to the next page for this primitive.
+*/
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_NPTI_WOFF             (0U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_NPTI_SHIFT            (0U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_NPTI_CLRMSK           (0XFFC00000U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_SET_NPTI(_ft_,_x_)    ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_RPM_DATA_RTU_PAGE_TABLE_NPTI_CLRMSK ) | (((_x_) & (0x003fffff))  <<  0)))
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_GET_NPTI(_ft_)        (((_ft_).u32_0  >>  (0)) & 0x003fffff)
+
+
+#define RGX_CR_RPM_PAGE_TABLE_BASE_VALUE_ALIGNSHIFT		(2U)
+#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSHIFT				(2U)
+
+
+typedef struct {
+	DEVMEM_MEMDESC				*psContextStateMemDesc;
+	RGX_SERVER_COMMON_CONTEXT	*psServerCommonContext;
+	IMG_UINT32					ui32Priority;
+#if 0
+	/* FIXME - multiple frame contexts? */
+	RGX_RPM_FREELIST				*psSHFFreeList;
+	RGX_RPM_FREELIST				*psSHGFreeList;
+#endif
+} RGX_SERVER_RAY_SH_DATA;
+
+
+typedef enum {
+	NODE_EMPTY = 0,
+	NODE_SCENE_HIERARCHY,
+	NODE_RPM_PAGE_TABLE,
+	NODE_RPM_FREE_PAGE_LIST
+} RGX_DEVMEM_NODE_TYPE;
+
+typedef struct _RGX_DEVMEM_NODE_ {
+	RGX_DEVMEM_NODE_TYPE	eNodeType;			/*!< Alloc type */
+	PMR						*psPMR; 			/*!< Scene hierarchy/page table/free page list phys pages */
+	DEVMEMINT_HEAP			*psDevMemHeap;		/*!< Heap where the virtual mapping is made */
+	IMG_DEV_VIRTADDR		sAddr;				/*!< GPU virtual address where the phys pages are mapped into */
+	IMG_UINT32				ui32NumPhysPages;	/*!< Number of physical pages mapped in for this node */
+	IMG_UINT32				ui32StartOfMappingIndex;	/*!< Start of mapping index (i.e. OS page offset from virtual base) */
+	IMG_BOOL				bInternal;
+} RGX_DEVMEM_NODE;
+
+typedef struct _RGX_RPM_DEVMEM_DESC_ {
+	DLLIST_NODE			sMemoryDescBlock;		/*!< the hierarchy scene memory block  */
+	RGX_RPM_FREELIST	*psFreeList;			/*!< Free list this allocation is associated with */
+	IMG_UINT32			ui32NumPages;			/*!< Number of RPM pages added */
+	RGX_DEVMEM_NODE		sSceneHierarchyNode;	/*!< scene hierarchy block descriptor */
+	RGX_DEVMEM_NODE		sRPMPageListNode;		/*!< RPM page list block descriptor */
+	RGX_DEVMEM_NODE		sRPMFreeListNode;		/*!< RPM free list block descriptor */
+} RGX_RPM_DEVMEM_DESC;
+
+typedef struct _DEVMEM_RPM_FREELIST_LOOKUP_
+{
+	IMG_UINT32 ui32FreeListID;
+	RGX_RPM_FREELIST *psFreeList;
+} DEVMEM_RPM_FREELIST_LOOKUP;
+
+typedef struct {
+	RGX_SERVER_COMMON_CONTEXT	*psServerCommonContext;
+	IMG_UINT32					ui32Priority;
+	RGX_CLIENT_CCB *psFCClientCCB[DPX_MAX_RAY_CONTEXTS];
+	DEVMEM_MEMDESC *psFCClientCCBMemDesc[DPX_MAX_RAY_CONTEXTS];
+	DEVMEM_MEMDESC *psFCClientCCBCtrlMemDesc[DPX_MAX_RAY_CONTEXTS];
+} RGX_SERVER_RAY_RS_DATA;
+
+
+struct _RGX_SERVER_RAY_CONTEXT_ {
+	PVRSRV_DEVICE_NODE			*psDeviceNode;
+	DEVMEM_MEMDESC				*psFWRayContextMemDesc;
+	DEVMEM_MEMDESC				*psFWFrameworkMemDesc;
+	RGX_SERVER_RAY_SH_DATA		sSHData;
+	RGX_SERVER_RAY_RS_DATA		sRSData;
+	IMG_UINT32					ui32CleanupStatus;
+#define RAY_CLEANUP_SH_COMPLETE		(1 << 0)
+#define RAY_CLEANUP_RS_COMPLETE		(1 << 1)
+	PVRSRV_CLIENT_SYNC_PRIM		*psCleanupSync;
+	DLLIST_NODE					sListNode;
+	SYNC_ADDR_LIST				sSyncAddrListFence;
+	SYNC_ADDR_LIST				sSyncAddrListUpdate;
+	ATOMIC_T					hJobId;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	POS_LOCK                     hLock;
+#endif
+};
+
+
+#if 0
+static
+#ifdef __GNUC__
+	__attribute__((noreturn))
+#endif
+void sleep_for_ever(void)
+{
+#if defined(__KLOCWORK__) // klocworks would report an infinite loop because of while(1).
+	PVR_ASSERT(0);
+#else
+	while(1)
+	{
+		OSSleepms(~0); // sleep the maximum amount of time possible
+	}
+#endif
+}
+#endif
+
+static
+PVRSRV_ERROR _RGXCreateRPMSparsePMR(CONNECTION_DATA *psConnection,
+									PVRSRV_DEVICE_NODE	 *psDeviceNode,
+									RGX_DEVMEM_NODE_TYPE eBlockType,
+									IMG_UINT32		ui32NumPages,
+									IMG_UINT32		uiLog2DopplerPageSize,
+									PMR				**ppsPMR);
+
+static PVRSRV_ERROR _RGXMapRPMPBBlock(RGX_DEVMEM_NODE	*psDevMemNode,
+					RGX_RPM_FREELIST *psFreeList,
+					RGX_DEVMEM_NODE_TYPE eBlockType,
+					DEVMEMINT_HEAP *psDevmemHeap,
+					IMG_UINT32 ui32NumPages,
+					IMG_DEV_VIRTADDR sDevVAddrBase);
+
+static
+PVRSRV_ERROR _RGXUnmapRPMPBBlock(RGX_DEVMEM_NODE	*psDevMemNode,
+					RGX_RPM_FREELIST *psFreeList,
+					IMG_DEV_VIRTADDR sDevVAddrBase);
+
+static
+PVRSRV_ERROR _CreateSHContext(CONNECTION_DATA *psConnection,
+							  PVRSRV_DEVICE_NODE *psDeviceNode,
+							  DEVMEM_MEMDESC *psAllocatedMemDesc,
+							  IMG_UINT32 ui32AllocatedOffset,
+							  DEVMEM_MEMDESC *psFWMemContextMemDesc,
+							  IMG_DEV_VIRTADDR sVRMCallStackAddr,
+							  IMG_UINT32 ui32Priority,
+							  RGX_COMMON_CONTEXT_INFO *psInfo,
+							  RGX_SERVER_RAY_SH_DATA *psSHData)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_VRDMCTX_STATE *psContextState;
+	PVRSRV_ERROR eError;
+	/*
+		Allocate device memory for the firmware GPU context suspend state.
+		Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+	*/
+	PDUMPCOMMENT("Allocate RGX firmware SHG context suspend state");
+
+	eError = DevmemFwAllocate(psDevInfo,
+							  sizeof(RGXFWIF_VRDMCTX_STATE),
+							  RGX_FWCOMCTX_ALLOCFLAGS,
+							  "FwRaySHGContextSuspendState",
+							  &psSHData->psContextStateMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to allocate firmware GPU context suspend state (%u)",
+				eError));
+		goto fail_shcontextsuspendalloc;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psSHData->psContextStateMemDesc,
+                                      (void **)&psContextState);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to map firmware render context state (%u)",
+				eError));
+		goto fail_suspendcpuvirtacquire;
+	}
+	psContextState->uVRDMReg_VRM_CALL_STACK_POINTER = sVRMCallStackAddr.uiAddr;
+	DevmemReleaseCpuVirtAddr(psSHData->psContextStateMemDesc);
+
+	eError = FWCommonContextAllocate(psConnection,
+									 psDeviceNode,
+									 REQ_TYPE_SH,
+									 RGXFWIF_DM_SHG,
+									 psAllocatedMemDesc,
+									 ui32AllocatedOffset,
+									 psFWMemContextMemDesc,
+									 psSHData->psContextStateMemDesc,
+									 RGX_RTU_CCB_SIZE_LOG2,
+									 ui32Priority,
+									 psInfo,
+									 &psSHData->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to init TA fw common context (%u)",
+				eError));
+		goto fail_shcommoncontext;
+	}
+	
+	/*
+	 * Dump the FW SH context suspend state buffer
+	 */
+	PDUMPCOMMENT("Dump the SH context suspend state buffer");
+	DevmemPDumpLoadMem(psSHData->psContextStateMemDesc,
+					   0,
+					   sizeof(RGXFWIF_VRDMCTX_STATE),
+					   PDUMP_FLAGS_CONTINUOUS);
+
+	psSHData->ui32Priority = ui32Priority;
+	return PVRSRV_OK;
+
+fail_shcommoncontext:
+fail_suspendcpuvirtacquire:
+	DevmemFwFree(psDevInfo, psSHData->psContextStateMemDesc);
+fail_shcontextsuspendalloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+static
+PVRSRV_ERROR _CreateRSContext(CONNECTION_DATA *psConnection,
+							  PVRSRV_DEVICE_NODE *psDeviceNode,
+							  DEVMEM_MEMDESC *psAllocatedMemDesc,
+							  IMG_UINT32 ui32AllocatedOffset,
+							  DEVMEM_MEMDESC *psFWMemContextMemDesc,
+							  IMG_UINT32 ui32Priority,
+							  RGX_COMMON_CONTEXT_INFO *psInfo,
+							  RGX_SERVER_RAY_RS_DATA *psRSData)
+{
+	PVRSRV_ERROR eError;
+
+	eError = FWCommonContextAllocate(psConnection,
+									 psDeviceNode,
+									 REQ_TYPE_RS,
+									 RGXFWIF_DM_RTU,
+									 psAllocatedMemDesc,
+									 ui32AllocatedOffset,
+									 psFWMemContextMemDesc,
+                                     NULL,
+									 RGX_RTU_CCB_SIZE_LOG2,
+									 ui32Priority,
+									 psInfo,
+									 &psRSData->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to init 3D fw common context (%u)",
+				eError));
+		goto fail_rscommoncontext;
+	}
+
+	psRSData->ui32Priority = ui32Priority;
+	return PVRSRV_OK;
+
+fail_rscommoncontext:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+
+/*
+	Static functions used by ray context code
+*/
+
+static
+PVRSRV_ERROR _DestroySHContext(RGX_SERVER_RAY_SH_DATA *psSHData,
+							   PVRSRV_DEVICE_NODE *psDeviceNode,
+							   PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync)
+{
+	PVRSRV_ERROR eError;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+											  psSHData->psServerCommonContext,
+											  psCleanupSync,
+											  RGXFWIF_DM_SHG,
+											  PDUMP_FLAGS_NONE);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				__FUNCTION__,
+				PVRSRVGetErrorStringKM(eError)));
+		return eError;
+	}
+
+	/* ... it has so we can free its resources */
+	FWCommonContextFree(psSHData->psServerCommonContext);
+	DevmemFwFree(psDeviceNode->pvDevice, psSHData->psContextStateMemDesc);
+	psSHData->psContextStateMemDesc = NULL;
+	psSHData->psServerCommonContext = NULL;
+	return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR _DestroyRSContext(RGX_SERVER_RAY_RS_DATA *psRSData,
+							   PVRSRV_DEVICE_NODE *psDeviceNode,
+							   PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync)
+{
+	PVRSRV_ERROR eError;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+											  psRSData->psServerCommonContext,
+											  psCleanupSync,
+											  RGXFWIF_DM_RTU,
+											  PDUMP_FLAGS_NONE);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				 __FUNCTION__,
+				 PVRSRVGetErrorStringKM(eError)));
+		return eError;
+	}
+
+	/* ... it has so we can free its resources */
+
+
+	FWCommonContextFree(psRSData->psServerCommonContext);
+	psRSData->psServerCommonContext = NULL;
+	return PVRSRV_OK;
+}
+
+
+/*
+ * RPM driver management rev 2
+ * 
+ * The RPM freelists are opaque to the client driver. Scene Hierarchy pages
+ * are managed in Blocks (analogous to PB blocks) which are alloc'd in KM
+ * and mapped into the client MMU context.
+ * 
+ * Page tables are set up for each existing Scene Memory Block.
+ * 
+ * Freelist entries are also updated according to the list of Scene Memory Blocks.
+ * 
+ * NOTES:
+ * 
+ * (1) Scene Hierarchy shrink is not expected to be used.
+ * (2) The RPM FreeLists are Circular buffers and must be contiguous in virtual space
+ * (3) Each PMR is created with no phys backing pages. Pages are mapped in on-demand
+ * via RGXGrowRPMFreeList.
+ * 
+ */
+#if defined(DEBUG)
+static PVRSRV_ERROR _ReadRPMFreePageList(PMR		 *psPMR,
+										 IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+										 IMG_UINT32  ui32PageCount)
+{
+	PVRSRV_ERROR	eError;
+	IMG_UINT32		uiIdx, j;
+	size_t			uNumBytesCopied;
+	RGX_RPM_DATA_RTU_FREE_PAGE_LIST		*psFreeListBuffer;
+	IMG_UINT32		ui32PTI[4];
+
+	/* Allocate scratch area for setting up Page table indices */
+	psFreeListBuffer = OSAllocMem(ui32PageCount * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST));
+    if (psFreeListBuffer == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_WriteRPMPageList: failed to allocate scratch page table"));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	
+	/* Read scratch buffer from PMR (FPL entries must be contiguous) */
+	eError = PMR_ReadBytes(psPMR,
+				 uiLogicalOffset,
+				 (IMG_UINT8 *) psFreeListBuffer,
+				 ui32PageCount * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST),
+				 &uNumBytesCopied);
+
+	if (eError == PVRSRV_OK)
+	{
+		for (uiIdx = 0; uiIdx < ui32PageCount; uiIdx +=4)
+		{
+			for (j=0; j<4; j++)
+			{
+				ui32PTI[j] = RGX_RPM_DATA_RTU_FREE_PAGE_LIST_GET_PTI(psFreeListBuffer[uiIdx + j]);
+			}
+			PVR_DPF((PVR_DBG_MESSAGE, "%4d:  %7d %7d %7d %7d", uiIdx,
+					ui32PTI[0], ui32PTI[1], ui32PTI[2], ui32PTI[3]));
+		}
+	}
+
+	/* Free scratch buffer */
+	OSFreeMem(psFreeListBuffer);
+
+	return eError;
+}
+
+static IMG_BOOL RGXDumpRPMFreeListPageList(RGX_RPM_FREELIST *psFreeList)
+{
+	PVR_LOG(("RPM Freelist FWAddr 0x%08x, ID = %d, CheckSum 0x%016" IMG_UINT64_FMTSPECx,
+				psFreeList->sFreeListFWDevVAddr.ui32Addr,
+				psFreeList->ui32FreelistID,
+				psFreeList->ui64FreelistChecksum));
+
+	/* Dump FreeList page list */
+	_ReadRPMFreePageList(psFreeList->psFreeListPMR, 0, psFreeList->ui32CurrentFLPages);
+
+	return IMG_TRUE;
+}
+#endif
+
+static PVRSRV_ERROR _UpdateFwRPMFreelistSize(RGX_RPM_FREELIST *psFreeList,
+											 IMG_BOOL bGrow,
+											 IMG_BOOL bRestartRPM,
+											 IMG_UINT32 ui32DeltaSize)
+{
+	PVRSRV_ERROR			eError;
+	RGXFWIF_KCCB_CMD		sGPCCBCmd;
+
+	if(!bGrow)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_UpdateFwRPMFreelistSize: RPM freelist shrink not supported."));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* send feedback */
+	sGPCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_DOPPLER_MEMORY_GROW;
+	sGPCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+	sGPCCBCmd.uCmdData.sFreeListGSData.ui32DeltaPages = ui32DeltaSize;
+	sGPCCBCmd.uCmdData.sFreeListGSData.ui32NewPages =
+		((bRestartRPM) ? RGX_FREELIST_GSDATA_RPM_RESTART_EN : 0) |
+		psFreeList->ui32CurrentFLPages;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "Send FW update: RPM freelist [FWAddr=0x%08x] has 0x%08x pages",
+								psFreeList->sFreeListFWDevVAddr.ui32Addr,
+								psFreeList->ui32CurrentFLPages));
+
+	/* Submit command to the firmware.  */
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psFreeList->psDevInfo,
+									RGXFWIF_DM_GP,
+									&sGPCCBCmd,
+									sizeof(sGPCCBCmd),
+									0,
+									PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_UpdateFwRPMFreelistSize: failed to update FW freelist size. (error = %u)", eError));
+		return eError;
+	}
+
+	return PVRSRV_OK;
+}
+
+#if 0
+static void _CheckRPMFreelist(RGX_RPM_FREELIST *psFreeList,
+                   	   	   	   IMG_UINT32 ui32NumOfPagesToCheck,
+                   	   	   	   IMG_UINT64 ui64ExpectedCheckSum,
+                   	   	   	   IMG_UINT64 *pui64CalculatedCheckSum)
+{
+#if defined(NO_HARDWARE)
+	/* No checksum needed as we have all information in the pdumps */
+	PVR_UNREFERENCED_PARAMETER(psFreeList);
+	PVR_UNREFERENCED_PARAMETER(ui32NumOfPagesToCheck);
+	PVR_UNREFERENCED_PARAMETER(ui64ExpectedCheckSum);
+	*pui64CalculatedCheckSum = 0;
+#else
+	PVRSRV_ERROR eError;
+	size_t uiNumBytes;
+    IMG_UINT8* pui8Buffer;
+    IMG_UINT32* pui32Buffer;
+    IMG_UINT32 ui32CheckSumAdd = 0;
+    IMG_UINT32 ui32CheckSumXor = 0;
+    IMG_UINT32 ui32Entry;
+    IMG_UINT32 ui32Entry2;
+    IMG_BOOL  bFreelistBad = IMG_FALSE;
+
+	*pui64CalculatedCheckSum = 0;
+
+	/* Allocate Buffer of the size of the freelist */
+	pui8Buffer = OSAllocMem(psFreeList->ui32CurrentFLPages * sizeof(IMG_UINT32));
+    if (pui8Buffer == NULL)
+    {
+		PVR_LOG(("_CheckRPMFreelist: Failed to allocate buffer to check freelist %p!", psFreeList));
+		sleep_for_ever();
+		//PVR_ASSERT(0);
+        return;
+    }
+
+    /* Copy freelist content into Buffer */
+    eError = PMR_ReadBytes(psFreeList->psFreeListPMR,
+    				psFreeList->uiFreeListPMROffset + (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages) * sizeof(IMG_UINT32),
+    				pui8Buffer,
+    				psFreeList->ui32CurrentFLPages * sizeof(IMG_UINT32),
+            		&uiNumBytes);
+    if (eError != PVRSRV_OK)
+    {
+		OSFreeMem(pui8Buffer);
+		PVR_LOG(("_CheckRPMFreelist: Failed to get freelist data for RPM freelist %p!", psFreeList));
+		sleep_for_ever();
+		//PVR_ASSERT(0);
+        return;
+    }
+
+    PVR_ASSERT(uiNumBytes == psFreeList->ui32CurrentFLPages * sizeof(IMG_UINT32));
+    PVR_ASSERT(ui32NumOfPagesToCheck <= psFreeList->ui32CurrentFLPages);
+
+    /* Generate checksum */
+    pui32Buffer = (IMG_UINT32 *)pui8Buffer;
+    for(ui32Entry = 0; ui32Entry < ui32NumOfPagesToCheck; ui32Entry++)
+    {
+    	ui32CheckSumAdd += pui32Buffer[ui32Entry];
+    	ui32CheckSumXor ^= pui32Buffer[ui32Entry];
+
+    	/* Check for double entries */
+    	for (ui32Entry2 = 0; ui32Entry2 < ui32NumOfPagesToCheck; ui32Entry2++)
+    	{
+			if ((ui32Entry != ui32Entry2) &&
+				(pui32Buffer[ui32Entry] == pui32Buffer[ui32Entry2]))
+			{
+				PVR_LOG(("_CheckRPMFreelist: RPM Freelist consistency failure: FW addr: 0x%08X, Double entry found 0x%08x on idx: %d and %d of %d",
+											psFreeList->sFreeListFWDevVAddr.ui32Addr,
+											pui32Buffer[ui32Entry2],
+											ui32Entry,
+											ui32Entry2,
+											psFreeList->ui32CurrentFLPages));
+				bFreelistBad = IMG_TRUE;
+			}
+    	}
+    }
+
+    OSFreeMem(pui8Buffer);
+
+	/* Check the calculated checksum against the expected checksum... */
+	*pui64CalculatedCheckSum = ((IMG_UINT64)ui32CheckSumXor << 32) | ui32CheckSumAdd;
+
+	if (ui64ExpectedCheckSum != 0  &&  ui64ExpectedCheckSum != *pui64CalculatedCheckSum)
+	{
+		PVR_LOG(("_CheckRPMFreelist: Checksum mismatch for RPM freelist %p!  Expected 0x%016llx calculated 0x%016llx",
+		        psFreeList, ui64ExpectedCheckSum, *pui64CalculatedCheckSum));
+		bFreelistBad = IMG_TRUE;
+	}
+
+	if (bFreelistBad)
+	{
+		PVR_LOG(("_CheckRPMFreelist: Sleeping for ever!"));
+		sleep_for_ever();
+//		PVR_ASSERT(!bFreelistBad);
+	}
+#endif
+}
+#endif
+
+static PVRSRV_ERROR _WriteRPMFreePageList(PMR		 *psPMR,
+										  IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+										  IMG_UINT32  ui32NextPageIndex,
+										  IMG_UINT32  ui32PageCount)
+{
+	PVRSRV_ERROR	eError;
+	IMG_UINT32		uiIdx;
+	size_t		uNumBytesCopied;
+	RGX_RPM_DATA_RTU_FREE_PAGE_LIST		*psFreeListBuffer;
+
+	/* Allocate scratch area for setting up Page table indices */
+	psFreeListBuffer = OSAllocMem(ui32PageCount * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST));
+    if (psFreeListBuffer == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_WriteRPMPageList: failed to allocate scratch page table"));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	
+	for (uiIdx = 0; uiIdx < ui32PageCount; uiIdx ++, ui32NextPageIndex ++)
+	{
+		psFreeListBuffer[uiIdx].u32_0 = 0;
+		RGX_RPM_DATA_RTU_FREE_PAGE_LIST_SET_PTI(psFreeListBuffer[uiIdx], ui32NextPageIndex);
+	}
+	
+	/* Copy scratch buffer to PMR */
+	eError = PMR_WriteBytes(psPMR,
+				 uiLogicalOffset,
+				 (IMG_UINT8 *) psFreeListBuffer,
+				 ui32PageCount * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST),
+				 &uNumBytesCopied);
+	
+	/* Free scratch buffer */
+	OSFreeMem(psFreeListBuffer);
+
+#if defined(PDUMP)
+	/* Pdump the Page tables */
+	PDUMPCOMMENT("Dump %u RPM free page list entries.", ui32PageCount);
+	PMRPDumpLoadMem(psPMR,
+					uiLogicalOffset,
+					ui32PageCount * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST),
+					PDUMP_FLAGS_CONTINUOUS,
+					IMG_FALSE);
+#endif
+	return eError;
+}
+
+
+static RGX_RPM_FREELIST* FindRPMFreeList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FreelistID)
+{
+	DLLIST_NODE *psNode, *psNext;
+	RGX_RPM_FREELIST *psFreeList = NULL;
+
+	OSLockAcquire(psDevInfo->hLockRPMFreeList);
+	dllist_foreach_node(&psDevInfo->sRPMFreeListHead, psNode, psNext)
+	{
+		RGX_RPM_FREELIST *psThisFreeList = IMG_CONTAINER_OF(psNode, RGX_RPM_FREELIST, sNode);
+
+		if (psThisFreeList->ui32FreelistID == ui32FreelistID)
+		{
+			psFreeList = psThisFreeList;
+			break;
+		}
+	}
+	OSLockRelease(psDevInfo->hLockRPMFreeList);
+	
+	return psFreeList;
+}
+
+void RGXProcessRequestRPMGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+							  IMG_UINT32 ui32FreelistID)
+{
+	RGX_RPM_FREELIST *psFreeList = NULL;
+	RGXFWIF_KCCB_CMD sVRDMCCBCmd;
+	IMG_UINT32 ui32GrowValue;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bRestartRPM = IMG_TRUE; /* FIXME */
+
+	PVR_ASSERT(psDevInfo);
+
+	/* find the freelist with the corresponding ID */
+	psFreeList = FindRPMFreeList(psDevInfo, ui32FreelistID);
+
+	if (psFreeList)
+	{
+		/* Try to grow the freelist */
+		eError = RGXGrowRPMFreeList(psFreeList,
+									psFreeList->ui32GrowFLPages,
+									&psFreeList->sMemoryBlockHead);
+		if (eError == PVRSRV_OK)
+		{
+			/* Grow successful, return size of grow size */
+			ui32GrowValue = psFreeList->ui32GrowFLPages;
+
+			psFreeList->ui32NumGrowReqByFW++;
+
+ #if defined(PVRSRV_ENABLE_PROCESS_STATS)
+			/* Update Stats */
+			PVRSRVStatsUpdateFreelistStats(0,
+	                               1, /* Add 1 to the appropriate counter (Requests by FW) */
+	                               psFreeList->ui32InitFLPages,
+	                               psFreeList->ui32NumHighPages,
+	                               psFreeList->ownerPid);
+
+ #endif
+
+		}
+		else
+		{
+			/* Grow failed */
+			ui32GrowValue = 0;
+			PVR_DPF((PVR_DBG_ERROR,"Grow for FreeList %p [ID %d] failed (error %u)",
+									psFreeList,
+									psFreeList->ui32FreelistID,
+									eError));
+		}
+
+		/* send feedback */
+		sVRDMCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_DOPPLER_MEMORY_GROW;
+		sVRDMCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+		sVRDMCCBCmd.uCmdData.sFreeListGSData.ui32DeltaPages = ui32GrowValue;
+		sVRDMCCBCmd.uCmdData.sFreeListGSData.ui32NewPages =
+			((bRestartRPM) ? RGX_FREELIST_GSDATA_RPM_RESTART_EN : 0) |
+			(psFreeList->ui32CurrentFLPages);
+
+		PVR_DPF((PVR_DBG_ERROR,"Send feedback to RPM after grow on freelist [ID %d]", ui32FreelistID));
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError = RGXScheduleCommand(psDevInfo,
+										RGXFWIF_DM_SHG,
+										&sVRDMCCBCmd,
+										sizeof(sVRDMCCBCmd),
+										0,
+										PDUMP_FLAGS_NONE);
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+		/* Kernel CCB should never fill up, as the FW is processing them right away  */
+
+		PVR_ASSERT(eError == PVRSRV_OK);
+	}
+	else
+	{
+		/* Should never happen */
+		PVR_DPF((PVR_DBG_ERROR,"FreeList Lookup for FreeList ID 0x%08x failed (Populate)", ui32FreelistID));
+		PVR_ASSERT(IMG_FALSE);
+	}
+}
+
+
+/*!
+ * RGXGrowRPMFreeList
+ *
+ * Allocate and map physical backing pages for RPM buffers
+ * 
+ * @param	ppsRPMDevMemDesc - RPM buffer descriptor representing new Scene memory block
+ * 								and its associated RPM page table and free page list entries
+ * @param	psRPMContext - RPM context
+ * @param	psFreeList - RPM freelist descriptor
+ * @param	ui32RequestNumPages - number of RPM pages to add to Doppler scene hierarchy
+ * @param	pListHeader - linked list of RGX_RPM_DEVMEM_DESC blocks
+ * 
+ */
+PVRSRV_ERROR RGXGrowRPMFreeList(RGX_RPM_FREELIST *psFreeList,
+								IMG_UINT32 ui32RequestNumPages,
+								PDLLIST_NODE pListHeader)
+{
+	PVRSRV_ERROR			eError;
+	RGX_SERVER_RPM_CONTEXT	*psRPMContext = psFreeList->psParentCtx;
+	RGX_RPM_DEVMEM_DESC		*psRPMDevMemDesc;
+	IMG_DEVMEM_OFFSET_T		uiPMROffset;
+	IMG_UINT32				ui32NextPageIndex;
+
+	/* Are we allowed to grow ? */
+	if (ui32RequestNumPages > psFreeList->psParentCtx->ui32UnallocatedPages)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXGrowRPMFreeList: Scene Hierarchy buffer exceeded (0x%x pages required, 0x%x pages available).",
+				ui32RequestNumPages, psFreeList->psParentCtx->ui32UnallocatedPages));
+		return PVRSRV_ERROR_RPM_PBSIZE_ALREADY_MAX;
+	}
+
+	/* Allocate descriptor */
+	psRPMDevMemDesc = OSAllocZMem(sizeof(*psRPMDevMemDesc));
+    if (psRPMDevMemDesc == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXGrowRPMFreeList: failed to allocate host data structure"));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/*
+	 * Lock protects simultaneous manipulation of:
+	 * - the memory block list
+	 * - the freelist's ui32CurrentFLPages
+	 * - the context's ui32UnallocatedPages
+	 */
+	OSLockAcquire(psFreeList->psDevInfo->hLockRPMFreeList);
+	OSLockAcquire(psFreeList->psDevInfo->hLockRPMContext);
+
+	/* Update the sparse PMRs */
+	psRPMDevMemDesc->psFreeList = psFreeList;
+	psRPMDevMemDesc->ui32NumPages = ui32RequestNumPages;
+	psRPMDevMemDesc->sSceneHierarchyNode.psPMR = psRPMContext->psSceneHierarchyPMR;
+	psRPMDevMemDesc->sRPMPageListNode.psPMR = psRPMContext->psRPMPageTablePMR;
+	psRPMDevMemDesc->sRPMFreeListNode.psPMR = psFreeList->psFreeListPMR;
+
+
+	PVR_DPF((PVR_DBG_MESSAGE, "RGXGrowRPMFreeList: mapping %d pages for Doppler scene memory to VA 0x%" IMG_UINT64_FMTSPECx " with heap ID %p",
+			ui32RequestNumPages, psRPMContext->sSceneMemoryBaseAddr.uiAddr, psRPMContext->psSceneHeap));
+
+	/* 
+	 * 1. Doppler scene hierarchy
+	 */
+	PDUMPCOMMENT("Allocate %d pages with mapping index %d for Doppler scene memory.",
+				 ui32RequestNumPages,
+				 psRPMContext->ui32SceneMemorySparseMappingIndex);
+	eError = _RGXMapRPMPBBlock(&psRPMDevMemDesc->sSceneHierarchyNode,
+					psFreeList,
+					NODE_SCENE_HIERARCHY,
+					psRPMContext->psSceneHeap,
+					ui32RequestNumPages,
+					psRPMContext->sSceneMemoryBaseAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXGrowRPMFreeList: Unable to map RPM scene hierarchy block (status %d)", eError));
+		goto ErrorSceneBlock;
+	}
+
+	/* 
+	 * 2. RPM page list
+	 */
+	if (ui32RequestNumPages > psRPMContext->ui32RPMEntriesInPage)
+	{
+		/* we need to map in phys pages for RPM page table */
+		PDUMPCOMMENT("Allocate %d (%d requested) page table entries with mapping index %d for RPM page table.",
+					 ui32RequestNumPages - psRPMContext->ui32RPMEntriesInPage,
+					 ui32RequestNumPages,
+					 psRPMContext->ui32RPMPageTableSparseMappingIndex);
+		eError = _RGXMapRPMPBBlock(&psRPMDevMemDesc->sRPMPageListNode,
+						psFreeList,
+						NODE_RPM_PAGE_TABLE,
+						psRPMContext->psRPMPageTableHeap,
+						ui32RequestNumPages - psRPMContext->ui32RPMEntriesInPage,
+						psRPMContext->sRPMPageTableBaseAddr);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXGrowRPMFreeList: Unable to map RPM page table block (status %d)", eError));
+			goto ErrorPageTableBlock;
+		}
+	}
+
+	/*
+	 * 3. Free page list (FPL)
+	 */
+	if (ui32RequestNumPages > psFreeList->ui32EntriesInPage)
+	{
+		/* we need to map in phys pages for RPM free page list */
+		PDUMPCOMMENT("Allocate %d (%d requested) FPL entries with mapping index %d for RPM free page list.",
+					 ui32RequestNumPages - psFreeList->ui32EntriesInPage,
+					 ui32RequestNumPages,
+					 psFreeList->ui32RPMFreeListSparseMappingIndex);
+		eError = _RGXMapRPMPBBlock(&psRPMDevMemDesc->sRPMFreeListNode,
+						psFreeList,
+						NODE_RPM_FREE_PAGE_LIST,
+						psRPMContext->psRPMPageTableHeap,
+						ui32RequestNumPages - psFreeList->ui32EntriesInPage,
+						psFreeList->sBaseDevVAddr);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXGrowRPMFreeList: Unable to map RPM free page list (status %d)", eError));
+			goto ErrorFreeListBlock;
+		}
+	}
+
+	/*
+	 * Update FPL entries
+	 */
+
+	/* Calculate doppler page index from base of Doppler heap */
+	ui32NextPageIndex = (psRPMDevMemDesc->sSceneHierarchyNode.sAddr.uiAddr -
+		psRPMContext->sDopplerHeapBaseAddr.uiAddr) >> psFreeList->uiLog2DopplerPageSize;
+
+	/* Calculate write offset into FPL PMR assuming pages are mapped in order with no gaps */
+	uiPMROffset = (size_t)psFreeList->ui32CurrentFLPages * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST);
+
+	eError = _WriteRPMFreePageList(psFreeList->psFreeListPMR, uiPMROffset, ui32NextPageIndex, ui32RequestNumPages);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXGrowRPMFreeList: error writing RPM free list entries (%d)", eError));
+		goto ErrorFreeListWriteEntries;
+	}
+
+	{
+		/* 
+		 * Update the entries remaining in the last mapped RPM and FPL pages.
+		 * 
+		 * psRPMDevMemDesc->sRPMPageListNode.ui32NumPhysPages * 1024 entries are added (can be zero)
+		 * ui32RequestNumPages entries are committed
+		 * 
+		 * The number of entries remaining should always be less than a full page.
+		 */
+		IMG_UINT32	ui32PTEntriesPerChunk = OSGetPageSize() / sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST);
+		IMG_UINT32	ui32PTEntriesPerChunkClearMask = ~(ui32PTEntriesPerChunk - 1);
+
+		psRPMContext->ui32RPMEntriesInPage = psRPMContext->ui32RPMEntriesInPage +
+			(psRPMDevMemDesc->sRPMPageListNode.ui32NumPhysPages * ui32PTEntriesPerChunk) - ui32RequestNumPages;
+		PVR_ASSERT((psRPMContext->ui32RPMEntriesInPage & ui32PTEntriesPerChunkClearMask) == 0);
+
+		psFreeList->ui32EntriesInPage = psFreeList->ui32EntriesInPage +
+			(psRPMDevMemDesc->sRPMFreeListNode.ui32NumPhysPages * ui32PTEntriesPerChunk) - ui32RequestNumPages;
+		PVR_ASSERT((psFreeList->ui32EntriesInPage & ui32PTEntriesPerChunkClearMask) == 0);
+	}
+
+	/* Add node to link list */
+	dllist_add_to_head(pListHeader, &psRPMDevMemDesc->sMemoryDescBlock);
+
+	/* Update number of available pages */
+	psFreeList->ui32CurrentFLPages += ui32RequestNumPages;
+	psRPMContext->ui32UnallocatedPages -= ui32RequestNumPages;
+
+#if defined(DEBUG)
+	RGXDumpRPMFreeListPageList(psFreeList);
+#endif
+
+	OSLockRelease(psFreeList->psDevInfo->hLockRPMContext);
+	OSLockRelease(psFreeList->psDevInfo->hLockRPMFreeList);
+
+	PVR_DPF((PVR_DBG_MESSAGE,"RPM Freelist [%p, ID %d]: grow by %u pages (current pages %u/%u, unallocated pages %u)",
+			psFreeList,
+			psFreeList->ui32FreelistID,
+			ui32RequestNumPages,
+			psFreeList->ui32CurrentFLPages,
+			psRPMContext->ui32TotalRPMPages,
+			psRPMContext->ui32UnallocatedPages));
+
+	return PVRSRV_OK;
+
+	/* Error handling */
+ErrorFreeListWriteEntries:
+	/* TODO: unmap sparse block for RPM FPL */
+ErrorFreeListBlock:
+	/* TODO: unmap sparse block for RPM page table */
+ErrorPageTableBlock:
+	/* TODO: unmap sparse block for scene hierarchy */
+
+ErrorSceneBlock:
+	OSLockRelease(psFreeList->psDevInfo->hLockRPMContext);
+	OSLockRelease(psFreeList->psDevInfo->hLockRPMFreeList);
+	OSFreeMem(psRPMDevMemDesc);
+
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static PVRSRV_ERROR RGXShrinkRPMFreeList(PDLLIST_NODE pListHeader,
+										 RGX_RPM_FREELIST *psFreeList)
+{
+	DLLIST_NODE *psNode;
+	RGX_RPM_DEVMEM_DESC	*psRPMDevMemNode;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 ui32OldValue;
+
+	/*
+	 * Lock protects simultaneous manipulation of:
+	 * - the memory block list
+	 * - the freelist's ui32CurrentFLPages value
+	 */
+	PVR_ASSERT(pListHeader);
+	PVR_ASSERT(psFreeList);
+	PVR_ASSERT(psFreeList->psDevInfo);
+	PVR_ASSERT(psFreeList->psDevInfo->hLockRPMFreeList);
+
+	OSLockAcquire(psFreeList->psDevInfo->hLockRPMFreeList);
+
+	/********************************************************************
+	 * All scene memory blocks must be freed together as non-contiguous
+	 * virtual mappings are not yet supported.
+	 ********************************************************************/
+
+	/* Get node from head of list and remove it */
+	psNode = dllist_get_next_node(pListHeader);
+	PVR_DPF((PVR_DBG_MESSAGE, "Found node %p", psNode));
+	if (psNode)
+	{
+		dllist_remove_node(psNode);
+
+		psRPMDevMemNode = IMG_CONTAINER_OF(psNode, RGX_RPM_DEVMEM_DESC, sMemoryDescBlock);
+		PVR_ASSERT(psRPMDevMemNode);
+		PVR_ASSERT(psRPMDevMemNode->psFreeList);
+		PVR_ASSERT(psRPMDevMemNode->sSceneHierarchyNode.psPMR);
+
+		/* remove scene hierarchy block */
+		PVR_DPF((PVR_DBG_MESSAGE, "Removing scene hierarchy node"));
+		eError = _RGXUnmapRPMPBBlock(&psRPMDevMemNode->sSceneHierarchyNode,
+									 psRPMDevMemNode->psFreeList,
+									 psFreeList->psParentCtx->sSceneMemoryBaseAddr);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXShrinkRPMFreeList: Failed to unmap %d pages with mapping index %d (status %d)",
+					psRPMDevMemNode->sSceneHierarchyNode.ui32NumPhysPages,
+					psRPMDevMemNode->sSceneHierarchyNode.ui32StartOfMappingIndex,
+					eError));
+			goto UnMapError;
+		}
+
+		/* 
+		 * If the grow size is sub OS page size then the page lists may not need updating
+		 */
+		if (psRPMDevMemNode->sRPMPageListNode.eNodeType != NODE_EMPTY)
+		{
+			/* unmap the RPM page table backing pages */
+			PVR_DPF((PVR_DBG_MESSAGE, "Removing RPM page list node"));
+			PVR_ASSERT(psRPMDevMemNode->sRPMPageListNode.psPMR);
+			eError = _RGXUnmapRPMPBBlock(&psRPMDevMemNode->sRPMPageListNode,
+										 psRPMDevMemNode->psFreeList,
+										 psFreeList->psParentCtx->sRPMPageTableBaseAddr);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "RGXShrinkRPMFreeList: Failed to unmap %d pages with mapping index %d (status %d)",
+						psRPMDevMemNode->sRPMPageListNode.ui32NumPhysPages,
+						psRPMDevMemNode->sRPMPageListNode.ui32StartOfMappingIndex,
+						eError));
+				goto UnMapError;
+			}
+		}
+
+		if (psRPMDevMemNode->sRPMFreeListNode.eNodeType != NODE_EMPTY)
+		{
+			/* unmap the RPM free page list backing pages */
+			PVR_DPF((PVR_DBG_MESSAGE, "Removing RPM free list node"));
+			PVR_ASSERT(psRPMDevMemNode->sRPMFreeListNode.psPMR);
+			eError = _RGXUnmapRPMPBBlock(&psRPMDevMemNode->sRPMFreeListNode,
+										 psRPMDevMemNode->psFreeList,
+										 psFreeList->sBaseDevVAddr);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "RGXShrinkRPMFreeList: Failed to unmap %d pages with mapping index %d (status %d)",
+						psRPMDevMemNode->sRPMFreeListNode.ui32NumPhysPages,
+						psRPMDevMemNode->sRPMFreeListNode.ui32StartOfMappingIndex,
+						eError));
+				goto UnMapError;
+			}
+		}
+
+		/* update available RPM pages in freelist (NOTE: may be different from phys page count) */
+		ui32OldValue = psFreeList->ui32CurrentFLPages;
+		psFreeList->ui32CurrentFLPages -= psRPMDevMemNode->ui32NumPages;
+
+		/* check underflow */
+		PVR_ASSERT(ui32OldValue > psFreeList->ui32CurrentFLPages);
+
+		PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p, ID %d]: shrink by %u pages (current pages %u/%u)",
+								psFreeList,
+								psFreeList->ui32FreelistID,
+								psRPMDevMemNode->ui32NumPages,
+								psFreeList->ui32CurrentFLPages,
+								psFreeList->psParentCtx->ui32UnallocatedPages));
+
+		OSFreeMem(psRPMDevMemNode);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING,"Freelist [0x%p]: shrink denied. PB already at zero PB size (%u pages)",
+								psFreeList,
+								psFreeList->ui32CurrentFLPages));
+		eError = PVRSRV_ERROR_PBSIZE_ALREADY_MIN;
+	}
+
+	OSLockRelease(psFreeList->psDevInfo->hLockRPMFreeList);
+	return PVRSRV_OK;
+
+UnMapError:
+	OSFreeMem(psRPMDevMemNode);
+	OSLockRelease(psFreeList->psDevInfo->hLockRPMFreeList);
+
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+/*!
+ *	_RGXCreateRPMSparsePMR
+ * 
+ * Creates a PMR container with no phys pages initially. Phys pages will be allocated
+ * and mapped later when requested by client or by HW RPM Out of Memory event.
+ * The PMR is created with zero phys backing pages.
+ * The sparse PMR is associated to either the RPM context or to the RPM freelist(s):
+ * 
+ * RGX_SERVER_RPM_CONTEXT - Scene hierarchy, page table
+ * RGX_RPM_FREELIST - free page list PMR
+ * 
+ * @param	eBlockType - whether block is for scene hierarchy pages or page
+ * 				tables. This parameter is used to calculate size.
+ * @param	ui32NumPages - total number of pages
+ * @param	uiLog2DopplerPageSize - log2 Doppler/RPM page size
+ * @param	ppsPMR - (Output) new PMR container.
+ * 
+ * See the documentation for more details.
+ */
+static
+PVRSRV_ERROR _RGXCreateRPMSparsePMR(CONNECTION_DATA *psConnection,
+									PVRSRV_DEVICE_NODE	 *psDeviceNode,
+									RGX_DEVMEM_NODE_TYPE eBlockType,
+									IMG_UINT32		ui32NumPages,
+									IMG_UINT32		uiLog2DopplerPageSize,
+									PMR				**ppsPMR)
+{
+	PVRSRV_ERROR		eError;
+	IMG_DEVMEM_SIZE_T	uiMaxSize = 0;
+	IMG_UINT32			ui32NumVirtPages = 0; /*!< number of virtual pages to cover virtual range */
+	IMG_UINT32			ui32Log2OSPageSize = OSGetPageShift();
+	IMG_UINT32			ui32ChunkSize = OSGetPageSize();
+	PVRSRV_MEMALLOCFLAGS_T uiCustomFlags = 0;
+
+	/* Work out the allocation logical size = virtual size */
+	switch(eBlockType)
+	{
+		case NODE_EMPTY:
+			PVR_ASSERT(IMG_FALSE);
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		case NODE_SCENE_HIERARCHY:
+			PDUMPCOMMENT("Allocate Scene Hierarchy PMR (Pages %08X)", ui32NumPages);
+			uiMaxSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * (1 << uiLog2DopplerPageSize);
+			break;
+		case NODE_RPM_PAGE_TABLE:
+			PDUMPCOMMENT("Allocate RPM Page Table PMR (Page entries %08X)", ui32NumPages);
+			uiMaxSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * sizeof(RGX_RPM_DATA_RTU_PAGE_TABLE);
+			break;
+		case NODE_RPM_FREE_PAGE_LIST:
+			/* 
+			 * Each RPM free page list (FPL) supports the maximum range.
+			 * In practise the maximum range is divided between allocations in each FPL
+			 */
+			PDUMPCOMMENT("Allocate RPM Free Page List PMR (Page entries %08X)", ui32NumPages);
+			uiMaxSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST);
+
+            /* Needed to write page indices into the freelist */
+	        uiCustomFlags |= PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE;
+
+			break;
+		/* no default case because the build should error out if a case is unhandled */
+	}
+
+	uiMaxSize = (uiMaxSize + ui32ChunkSize - 1) & ~(ui32ChunkSize - 1);
+	ui32NumVirtPages = uiMaxSize >> ui32Log2OSPageSize;
+
+	eError = PhysmemNewRamBackedPMR(psConnection,
+									psDeviceNode,
+									uiMaxSize, /* the maximum size which should match num virtual pages * page size */
+									ui32ChunkSize,
+									0,
+									ui32NumVirtPages,
+									NULL,
+									ui32Log2OSPageSize,
+									(PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | uiCustomFlags),
+									strlen("RPM Buffer") + 1,
+									"RPM Buffer",
+									ppsPMR);
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "_RGXCreateRPMSparsePMR: Failed to allocate sparse PMR of size: 0x%016" IMG_UINT64_FMTSPECX,
+				 (IMG_UINT64)uiMaxSize));
+	}
+	
+	return eError;
+}
+
+/*!
+ *	_RGXMapRPMPBBlock
+ * 
+ * Maps in a block of phys pages for one of the following:
+ * 
+ * NODE_SCENE_HIERARCHY - scene hierarchy
+ * NODE_RPM_PAGE_TABLE - RPM page table entries
+ * NODE_RPM_FREE_PAGE_LIST - RPM free page list entries
+ * 
+ * @param	psDevMemNode - device mem block descriptor (allocated by caller)
+ * @param	psFreeList - free list descriptor
+ * @param	eBlockType - block type: scene memory, RPM page table or RPM page free list
+ * @param	psDevmemHeap - heap for GPU virtual mapping
+ * @param	ui32NumPages - number of pages for scene memory, OR
+ * 							number of PT entries for RPM page table or page free list
+ * @param	sDevVAddrBase - GPU virtual base address i.e. base address at start of sparse allocation
+ * 
+ * @return	PVRSRV_OK if no error occurred
+ */
+static
+PVRSRV_ERROR _RGXMapRPMPBBlock(RGX_DEVMEM_NODE	*psDevMemNode,
+					RGX_RPM_FREELIST *psFreeList,
+					RGX_DEVMEM_NODE_TYPE eBlockType,
+					DEVMEMINT_HEAP *psDevmemHeap,
+					IMG_UINT32 ui32NumPages,
+					IMG_DEV_VIRTADDR sDevVAddrBase)
+{
+	PVRSRV_ERROR	eError;
+    IMG_UINT64 		sCpuVAddrNULL = 0; 			/* no CPU mapping needed */
+	IMG_UINT32		*paui32AllocPageIndices;	/* table of virtual indices for sparse mapping */
+	IMG_PUINT32 	pui32MappingIndex = NULL;	/* virtual index where next physical chunk is mapped */
+	IMG_UINT32		i;
+	size_t			uiSize = 0;
+	IMG_UINT32		ui32Log2OSPageSize = OSGetPageShift();
+	IMG_UINT32		ui32ChunkSize = OSGetPageSize();
+	IMG_UINT32		ui32NumPhysPages = 0; /*!< number of physical pages for data pages or RPM PTs */
+	PVRSRV_MEMALLOCFLAGS_T uiCustomFlags = 0;
+
+
+	/* Allocate Memory Block for scene hierarchy */
+	switch(eBlockType)
+	{
+		case NODE_EMPTY:
+			PVR_ASSERT(IMG_FALSE);
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		case NODE_SCENE_HIERARCHY:
+			PDUMPCOMMENT("Allocate Scene Hierarchy Block (Pages %08X)", ui32NumPages);
+			uiSize = (size_t)ui32NumPages * (1 << psFreeList->psParentCtx->uiLog2DopplerPageSize);
+			pui32MappingIndex = &psFreeList->psParentCtx->ui32SceneMemorySparseMappingIndex;
+			break;
+		case NODE_RPM_PAGE_TABLE:
+			PDUMPCOMMENT("Allocate RPM Page Table Block (Page entries %08X)", ui32NumPages);
+			uiSize = (size_t)ui32NumPages * sizeof(RGX_RPM_DATA_RTU_PAGE_TABLE);
+			pui32MappingIndex = &psFreeList->psParentCtx->ui32RPMPageTableSparseMappingIndex;
+			break;
+		case NODE_RPM_FREE_PAGE_LIST:
+			PDUMPCOMMENT("Allocate RPM Free Page List Block (Page entries %08X)", ui32NumPages);
+			uiSize = (size_t)ui32NumPages * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST);
+			pui32MappingIndex = &psFreeList->ui32RPMFreeListSparseMappingIndex;
+
+            /* Needed to write page indices into the freelist */
+	        uiCustomFlags |= PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE;
+
+			break;
+		/* no default case because the build should error out if a case is unhandled */
+	}
+
+	/* 
+	 * Round size up to multiple of the sparse chunk size = OS page size.
+	 */
+	uiSize = (uiSize + ui32ChunkSize - 1) & ~(ui32ChunkSize - 1);
+	ui32NumPhysPages = uiSize >> ui32Log2OSPageSize;
+
+	paui32AllocPageIndices = OSAllocMem(ui32NumPhysPages * sizeof(IMG_UINT32));
+    if (paui32AllocPageIndices == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_RGXCreateRPMPBBlockSparse: failed to allocate sparse mapping index list"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorAllocHost;
+	}
+	for(i=0; i<ui32NumPhysPages; i++)
+	{
+		paui32AllocPageIndices[i] = *pui32MappingIndex + i;
+	}
+
+	/* Set up some state */
+	psDevMemNode->eNodeType = eBlockType;
+	psDevMemNode->psDevMemHeap = psDevmemHeap;
+	if (eBlockType == NODE_SCENE_HIERARCHY)
+	{
+		/* the mapped-in scene hierarchy device address will be used to set up the FPL entries */
+		psDevMemNode->sAddr.uiAddr = sDevVAddrBase.uiAddr + (*pui32MappingIndex * ui32ChunkSize);
+	}
+	psDevMemNode->ui32NumPhysPages = ui32NumPhysPages;
+	psDevMemNode->ui32StartOfMappingIndex = *pui32MappingIndex;
+
+	{
+		if ((eBlockType == NODE_SCENE_HIERARCHY) &&
+			(ui32NumPhysPages > psFreeList->psParentCtx->ui32UnallocatedPages))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "_RGXCreateRPMPBBlockSparse: virtual address space exceeded (0x%x pages required, 0x%x pages available).",
+					ui32NumPhysPages, psFreeList->psParentCtx->ui32UnallocatedPages));
+			OSFreeMem(paui32AllocPageIndices);
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+		eError = PMRLockSysPhysAddresses(psDevMemNode->psPMR);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "_RGXCreateRPMPBBlockSparse: unable to lock PMR physical pages (status %d)", eError));
+			goto ErrorLockPhys;
+		}
+
+		eError = DevmemIntChangeSparse(psDevmemHeap,
+						psDevMemNode->psPMR,
+						ui32NumPhysPages,
+						paui32AllocPageIndices,
+						0,
+						NULL,
+						SPARSE_RESIZE_ALLOC,
+						(PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | uiCustomFlags),
+						sDevVAddrBase,
+						sCpuVAddrNULL);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "_RGXCreateRPMPBBlockSparse: change sparse mapping failed with %d pages starting at %d (status %d)",
+					ui32NumPhysPages, *pui32MappingIndex, eError));
+			goto ErrorSparseMapping;
+		}
+
+		/* FIXME: leave locked until destroy */
+		PMRUnlockSysPhysAddresses(psDevMemNode->psPMR);
+	}
+
+	/* 
+	 * Update the mapping index for the next allocation.
+	 * The virtual pages should be contiguous.
+	 */
+	*pui32MappingIndex += ui32NumPhysPages;
+
+	OSFreeMem(paui32AllocPageIndices);
+
+	return PVRSRV_OK;
+
+ErrorSparseMapping:
+	PMRUnlockSysPhysAddresses(psDevMemNode->psPMR);
+
+ErrorLockPhys:
+	OSFreeMem(paui32AllocPageIndices);
+
+ErrorAllocHost:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*!
+ * _RGXUnmapRPMPBBlock
+ * 
+ * NOTE: because the SHF and SHG requests for memory are interleaved, the
+ * page mapping offset cannot be updated (non-contiguous virtual mapping
+ * is not supported).
+ * 
+ * So either
+ *  (i) the allocated virtual address range is unusable after unmap
+ * (ii) all of the scene memory must be freed
+ * 
+ * @param	psDevMemNode - block to free
+ * @param	psFreeList - RPM free list
+ * @param	sDevVAddrBase - the virtual base address (i.e. where page 1 of the PMR is mapped)
+ */
+static
+PVRSRV_ERROR _RGXUnmapRPMPBBlock(RGX_DEVMEM_NODE	*psDevMemNode,
+					RGX_RPM_FREELIST *psFreeList,
+					IMG_DEV_VIRTADDR sDevVAddrBase)
+{
+	PVRSRV_ERROR	eError;
+	IMG_UINT64 		sCpuVAddrNULL = 0; 			/* no CPU mapping needed */
+	IMG_UINT32		*paui32FreePageIndices;		/* table of virtual indices for sparse unmapping */
+	IMG_UINT32		i;
+	IMG_UINT32		ui32NumPhysPages = psDevMemNode->ui32NumPhysPages; /*!< number of physical pages for data pages or RPM PTs */
+
+#if defined(PDUMP)
+	/* Free Memory Block for scene hierarchy */
+	switch(psDevMemNode->eNodeType)
+	{
+		case NODE_EMPTY:
+			PVR_ASSERT(IMG_FALSE);
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		case NODE_SCENE_HIERARCHY:
+			PDUMPCOMMENT("Free Scene Hierarchy Block (Pages %08X)", ui32NumPhysPages);
+			break;
+		case NODE_RPM_PAGE_TABLE:
+			PDUMPCOMMENT("Free RPM Page Table Block (Page entries %08X)", ui32NumPhysPages);
+			break;
+		case NODE_RPM_FREE_PAGE_LIST:
+			PDUMPCOMMENT("Free RPM Free Page List Block (Page entries %08X)", ui32NumPhysPages);
+			break;
+		/* no default case because the build should error out if a case is unhandled */
+	}
+#endif
+
+	paui32FreePageIndices = OSAllocMem(ui32NumPhysPages * sizeof(IMG_UINT32));
+    if (paui32FreePageIndices == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_RGXUnmapRPMPBBlock: failed to allocate sparse mapping index list"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorAllocHost;
+	}
+	for(i=0; i<ui32NumPhysPages; i++)
+	{
+		paui32FreePageIndices[i] = psDevMemNode->ui32StartOfMappingIndex + i;
+	}
+
+	{
+		eError = PMRLockSysPhysAddresses(psDevMemNode->psPMR);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "_RGXUnmapRPMPBBlock: unable to lock PMR physical pages (status %d)", eError));
+			goto ErrorLockPhys;
+		}
+
+		eError = DevmemIntChangeSparse(psDevMemNode->psDevMemHeap,
+						psDevMemNode->psPMR,
+						0, /* no pages are mapped here */
+						NULL,
+						ui32NumPhysPages,
+						paui32FreePageIndices,
+						SPARSE_RESIZE_FREE,
+						(PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE),
+						sDevVAddrBase,
+						sCpuVAddrNULL);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "_RGXUnmapRPMPBBlock: free sparse mapping failed with %d pages starting at %d (status %d)",
+					ui32NumPhysPages, psDevMemNode->ui32StartOfMappingIndex, eError));
+			goto ErrorSparseMapping;
+		}
+
+		PMRUnlockSysPhysAddresses(psDevMemNode->psPMR);
+	}
+
+	OSFreeMem(paui32FreePageIndices);
+
+	return PVRSRV_OK;
+
+ErrorSparseMapping:
+	PMRUnlockSysPhysAddresses(psDevMemNode->psPMR);
+
+ErrorLockPhys:
+	OSFreeMem(paui32FreePageIndices);
+
+ErrorAllocHost:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+/*!
+ *	RGXCreateRPMFreeList
+ * 
+ * @param	ui32InitFLPages - initial allocation of mapped-in physical pages
+ * @param	ui32GrowFLPages - physical pages to add to scene hierarchy if RPM OOM occurs
+ * @param	sFreeListDevVAddr - virtual base address of free list
+ * @param	sRPMPageListDevVAddr (DEPRECATED -- cached in RPM Context)
+ * @param	ui32FLSyncAddr (DEPRECATED)
+ * @param	ppsFreeList - returns a RPM freelist handle to client
+ * @param	puiHWFreeList - 'handle' to FW freelist, passed in VRDM kick (FIXME)
+ * @param	bIsExternal - flag which marks if the freelist is an external one
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateRPMFreeList(CONNECTION_DATA *psConnection,
+							   PVRSRV_DEVICE_NODE	 *psDeviceNode,
+							   RGX_SERVER_RPM_CONTEXT	*psRPMContext,
+							   IMG_UINT32			ui32InitFLPages,
+							   IMG_UINT32			ui32GrowFLPages,
+							   IMG_DEV_VIRTADDR		sFreeListDevVAddr,
+							   RGX_RPM_FREELIST	  **ppsFreeList,
+							   IMG_UINT32		   *puiHWFreeList,
+							   IMG_BOOL				bIsExternal)
+{
+	PVRSRV_ERROR				eError;
+	RGXFWIF_RPM_FREELIST		*psFWRPMFreeList;
+	DEVMEM_MEMDESC				*psFWRPMFreelistMemDesc;
+	RGX_RPM_FREELIST			*psFreeList;
+	PVRSRV_RGXDEV_INFO			*psDevInfo = psDeviceNode->pvDevice;
+
+	/* Allocate kernel freelist struct */
+	psFreeList = OSAllocZMem(sizeof(*psFreeList));
+    if (psFreeList == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMFreeList: failed to allocate host data structure"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorAllocHost;
+	}
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+						   &psFreeList->psCleanupSync,
+						   "RPM free list cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXCreateRPMFreeList: Failed to allocate cleanup sync (0x%x)",
+				eError));
+		goto ErrorSyncAlloc;
+	}
+
+	/*
+	 * This FW FreeList context is only mapped into kernel for initialisation.
+	 * Otherwise this allocation is only used by the FW.
+	 * Therefore the GPU cache doesn't need coherency,
+	 * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+	 * 
+	 * TODO - RPM freelist will be modified after creation, but only from host-side.
+	 */
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(*psFWRPMFreeList),
+							PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+							PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+							PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+							PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE,
+							"FwRPMFreeList",
+							&psFWRPMFreelistMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMFreeList: DevmemAllocate for RGXFWIF_FREELIST failed"));
+		goto ErrorFWFreeListAlloc;
+	}
+
+	/* Initialise host data structures */
+	psFreeList->psConnection = psConnection;
+	psFreeList->psDevInfo = psDevInfo;
+	psFreeList->psParentCtx = psRPMContext;
+	psFreeList->psFWFreelistMemDesc = psFWRPMFreelistMemDesc;
+	psFreeList->sBaseDevVAddr = sFreeListDevVAddr;
+	RGXSetFirmwareAddress(&psFreeList->sFreeListFWDevVAddr, psFWRPMFreelistMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+	psFreeList->ui32FreelistID = psDevInfo->ui32RPMFreelistCurrID++;
+	//psFreeList->ui32MaxFLPages = ui32MaxFLPages;
+	/* TODO: is it really needed? */
+	if(bIsExternal == IMG_FALSE)
+	{
+		psFreeList->ui32InitFLPages = ui32InitFLPages;
+		psFreeList->ui32GrowFLPages = ui32GrowFLPages;
+	}
+	//psFreeList->ui32CurrentFLPages = ui32InitFLPages;
+	psFreeList->ui32RefCount = 0;
+	dllist_init(&psFreeList->sMemoryBlockHead);
+
+	/* Wizard2 -- support per-freelist Doppler virtual page size */
+	psFreeList->uiLog2DopplerPageSize = psRPMContext->uiLog2DopplerPageSize;
+
+	/* Initialise FW data structure */
+	eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWRPMFreeList);
+	PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", ErrorFWFreeListCpuMap);
+
+	/*
+	 * FIXME - the max pages are shared with the other freelists so this
+	 * over-estimates the number of free pages. The full check is
+	 * implemented in RGXGrowRPMFreeList.
+	 */
+	if(bIsExternal == IMG_TRUE)
+	{
+		/* An external RPM FreeList will never grow */
+		psFWRPMFreeList->ui32MaxPages = ui32InitFLPages;
+	}
+	else
+	{
+		psFWRPMFreeList->ui32MaxPages = psFreeList->psParentCtx->ui32TotalRPMPages;
+	}
+	psFWRPMFreeList->ui32CurrentPages = ui32InitFLPages;
+	psFWRPMFreeList->ui32GrowPages = ui32GrowFLPages;
+	psFWRPMFreeList->ui32ReadOffset = 0;
+	psFWRPMFreeList->ui32WriteOffset = RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_EN; /* FL is full */
+	psFWRPMFreeList->bReadToggle = IMG_FALSE;
+	psFWRPMFreeList->bWriteToggle = IMG_TRUE;
+	psFWRPMFreeList->sFreeListDevVAddr.uiAddr = sFreeListDevVAddr.uiAddr;
+	psFWRPMFreeList->ui32FreeListID = psFreeList->ui32FreelistID;
+	psFWRPMFreeList->bGrowPending = IMG_FALSE;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "RPM Freelist %p created: FW freelist: %p, Init pages 0x%08x, Max FL base address " IMG_DEVMEM_SIZE_FMTSPEC ", Init FL base address " IMG_DEVMEM_SIZE_FMTSPEC,
+			psFreeList,
+			psFWRPMFreeList,
+			ui32InitFLPages,
+			sFreeListDevVAddr.uiAddr,
+			psFWRPMFreeList->sFreeListDevVAddr.uiAddr));
+
+	PVR_DPF((PVR_DBG_MESSAGE,"RPM FW Freelist %p created: sync FW addr 0x%08x", psFWRPMFreeList, psFWRPMFreeList->sSyncAddr));
+
+	PDUMPCOMMENT("Dump FW RPM FreeList");
+	DevmemPDumpLoadMem(psFreeList->psFWFreelistMemDesc, 0, sizeof(*psFWRPMFreeList), PDUMP_FLAGS_CONTINUOUS);
+
+	/*
+	 * Separate dump of the Freelist's number of Pages and stack pointer.
+	 * This allows to easily modify the PB size in the out2.txt files.
+	 */
+	PDUMPCOMMENT("RPM FreeList TotalPages");
+	DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc,
+							offsetof(RGXFWIF_RPM_FREELIST, ui32CurrentPages),
+							psFWRPMFreeList->ui32CurrentPages,
+							PDUMP_FLAGS_CONTINUOUS);
+
+	PDUMPCOMMENT("RPM FreeList device virtual base address");
+	DevmemPDumpLoadMemValue64(psFreeList->psFWFreelistMemDesc,
+							offsetof(RGXFWIF_RPM_FREELIST, sFreeListDevVAddr),
+							psFWRPMFreeList->sFreeListDevVAddr.uiAddr,
+							PDUMP_FLAGS_CONTINUOUS);
+
+	DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+	if (bIsExternal == IMG_TRUE)
+	{
+		/* Mark the freelist as an external */
+		psFreeList->bIsExternal = IMG_TRUE;
+
+		/* In case of an external RPM FreeList it is not needed to:
+		 * 		- create sparse PMR
+		 * 		- allocate physical memory for the freelist
+		 * 		- add it to the list of freelist
+		 */
+
+		/* return values */
+		*puiHWFreeList = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+		*ppsFreeList = psFreeList;
+
+		return PVRSRV_OK;
+	}
+
+	psFreeList->bIsExternal = IMG_FALSE;
+
+	/*
+	 * Create the sparse PMR for the RPM free page list
+	 */
+	eError = _RGXCreateRPMSparsePMR(psConnection, psDeviceNode,
+									NODE_RPM_FREE_PAGE_LIST,
+									psRPMContext->ui32TotalRPMPages,
+									psRPMContext->uiLog2DopplerPageSize,
+									&psFreeList->psFreeListPMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMContext: failed to allocate PMR for RPM Free page list (%d)", eError));
+		goto ErrorSparsePMR;
+	}
+
+	/*
+	 * Lock protects simultaneous manipulation of:
+	 * - the memory block list
+	 * - the freelist's ui32CurrentFLPages
+	 */
+	/* Add to list of freelists */
+	OSLockAcquire(psDevInfo->hLockRPMFreeList);
+	psFreeList->psParentCtx->uiFLRefCount++;
+	dllist_add_to_tail(&psDevInfo->sRPMFreeListHead, &psFreeList->sNode);
+	OSLockRelease(psDevInfo->hLockRPMFreeList);
+
+	/*
+	 * Add initial scene hierarchy block
+	 * Allocate phys memory for scene hierarchy, free page list and RPM page-in-use list
+	 */
+	eError = RGXGrowRPMFreeList(psFreeList, ui32InitFLPages, &psFreeList->sMemoryBlockHead);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMFreeList: error during phys memory allocation and mapping (%d)", eError));
+		goto ErrorGrowFreeList;
+	}
+
+	/* return values */
+	*puiHWFreeList = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+	*ppsFreeList = psFreeList;
+
+	return PVRSRV_OK;
+
+	/* Error handling */
+ErrorGrowFreeList:
+	/* Remove freelists from list  */
+	OSLockAcquire(psDevInfo->hLockRPMFreeList);
+	dllist_remove_node(&psFreeList->sNode);
+	psFreeList->psParentCtx->uiFLRefCount--;
+	OSLockRelease(psDevInfo->hLockRPMFreeList);
+
+ErrorSparsePMR:
+	SyncPrimFree(psFreeList->psCleanupSync);
+
+ErrorFWFreeListCpuMap:
+	RGXUnsetFirmwareAddress(psFWRPMFreelistMemDesc);
+	DevmemFwFree(psDevInfo, psFWRPMFreelistMemDesc);
+
+ErrorFWFreeListAlloc:
+	PMRUnrefPMR(psFreeList->psFreeListPMR);
+
+ErrorSyncAlloc:
+	OSFreeMem(psFreeList);
+
+ErrorAllocHost:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*
+ *	RGXDestroyRPMFreeList
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyRPMFreeList(RGX_RPM_FREELIST *psFreeList)
+{
+	PVRSRV_ERROR eError;
+	//IMG_UINT64 ui64CheckSum;
+
+	PVR_ASSERT(psFreeList);
+
+	if(psFreeList->ui32RefCount != 0 && psFreeList->bIsExternal == IMG_FALSE)
+	{
+		/* Freelist still busy */
+		PVR_DPF((PVR_DBG_WARNING, "Freelist %p is busy", psFreeList));
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	/* Freelist is not in use => start firmware cleanup */
+	eError = RGXFWRequestRPMFreeListCleanUp(psFreeList->psDevInfo,
+											psFreeList->sFreeListFWDevVAddr,
+											psFreeList->psCleanupSync);
+	if(eError != PVRSRV_OK)
+	{
+		/* Can happen if the firmware took too long to handle the cleanup request,
+		 * or if SLC-flushes didn't went through (due to some GPU lockup) */
+		return eError;
+	}
+
+	/* update the statistics */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	PVRSRVStatsUpdateFreelistStats(psFreeList->ui32NumGrowReqByApp,
+	                               psFreeList->ui32NumGrowReqByFW,
+	                               psFreeList->ui32InitFLPages,
+	                               psFreeList->ui32NumHighPages,
+	                               0); /* FIXME - owner PID */
+#endif
+
+	/* Destroy FW structures */
+	RGXUnsetFirmwareAddress(psFreeList->psFWFreelistMemDesc);
+	DevmemFwFree(psFreeList->psDevInfo, psFreeList->psFWFreelistMemDesc);
+
+	if(psFreeList->bIsExternal == IMG_FALSE)
+	{
+		/* Free the phys mem block descriptors. */
+		PVR_DPF((PVR_DBG_WARNING, "Cleaning RPM freelist index %d", psFreeList->ui32FreelistID));
+		while (!dllist_is_empty(&psFreeList->sMemoryBlockHead))
+		{
+			eError = RGXShrinkRPMFreeList(&psFreeList->sMemoryBlockHead, psFreeList);
+			PVR_ASSERT(eError == PVRSRV_OK);
+		}
+		psFreeList->psParentCtx->uiFLRefCount--;
+
+		/* consistency checks */
+		PVR_ASSERT(dllist_is_empty(&psFreeList->sMemoryBlockHead));
+		PVR_ASSERT(psFreeList->ui32CurrentFLPages == 0);
+
+		/* Free RPM Free page list PMR */
+		eError = PMRUnrefPMR(psFreeList->psFreeListPMR);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "RGXDestroyRPMFreeList: Failed to free RPM free page list PMR %p (error %u)",
+					 psFreeList->psFreeListPMR,
+					 eError));
+			PVR_ASSERT(IMG_FALSE);
+		}
+
+		/* Remove RPM FreeList from list */
+		OSLockAcquire(psFreeList->psDevInfo->hLockRPMFreeList);
+		dllist_remove_node(&psFreeList->sNode);
+		OSLockRelease(psFreeList->psDevInfo->hLockRPMFreeList);
+	}
+
+	SyncPrimFree(psFreeList->psCleanupSync);
+
+	/* free Freelist */
+	OSFreeMem(psFreeList);
+
+	return eError;
+}
+
+
+/*!
+ *	RGXAddBlockToRPMFreeListKM
+ * 
+ * NOTE: This API isn't used but it's provided for symmetry with the parameter
+ * management API.
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXAddBlockToRPMFreeListKM(RGX_RPM_FREELIST *psFreeList,
+										IMG_UINT32 ui32NumPages)
+{
+	PVRSRV_ERROR eError;
+
+	/* Check if we have reference to freelist's PMR */
+	if (psFreeList->psFreeListPMR == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,	"RPM Freelist is not configured for grow"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psFreeList->psParentCtx->hLock);
+#endif
+	/* grow freelist */
+	eError = RGXGrowRPMFreeList(psFreeList,
+								ui32NumPages,
+								&psFreeList->sMemoryBlockHead);
+	if(eError == PVRSRV_OK)
+	{
+		/* update freelist data in firmware */
+		_UpdateFwRPMFreelistSize(psFreeList, IMG_TRUE, IMG_TRUE, ui32NumPages);
+
+		psFreeList->ui32NumGrowReqByApp++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+			/* Update Stats */
+			PVRSRVStatsUpdateFreelistStats(1, /* Add 1 to the appropriate counter (Requests by App)*/
+	                               0,
+	                               psFreeList->ui32InitFLPages,
+	                               psFreeList->ui32NumHighPages,
+	                               psFreeList->ownerPid);
+
+#endif
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psFreeList->psParentCtx->hLock);
+#endif
+	return eError;
+}
+
+
+/*
+ * RGXCreateRPMContext
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateRPMContext(CONNECTION_DATA * psConnection,
+								 PVRSRV_DEVICE_NODE	 *psDeviceNode,
+								 RGX_SERVER_RPM_CONTEXT	**ppsRPMContext,
+								 IMG_UINT32			ui32TotalRPMPages,
+								 IMG_UINT32			uiLog2DopplerPageSize,
+								 IMG_DEV_VIRTADDR	sSceneMemoryBaseAddr,
+								 IMG_DEV_VIRTADDR	sDopplerHeapBaseAddr,
+								 DEVMEMINT_HEAP		*psSceneHeap,
+								 IMG_DEV_VIRTADDR	sRPMPageTableBaseAddr,
+								 DEVMEMINT_HEAP		*psRPMPageTableHeap,
+								 DEVMEM_MEMDESC		**ppsMemDesc,
+							     IMG_UINT32		     *puiHWFrameData)
+{
+	PVRSRV_ERROR					eError;
+	PVRSRV_RGXDEV_INFO 				*psDevInfo = psDeviceNode->pvDevice;
+	//DEVMEM_MEMDESC				*psFWRPMContextMemDesc;
+	RGX_SERVER_RPM_CONTEXT			*psRPMContext;
+	RGXFWIF_RAY_FRAME_DATA			*psFrameData;
+	RGXFWIF_DEV_VIRTADDR 			 sFirmwareAddr;
+
+	/* Allocate kernel RPM context */
+	psRPMContext = OSAllocZMem(sizeof(*psRPMContext));
+	if (psRPMContext == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMContext: failed to allocate host data structure"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorAllocHost;
+	}
+
+	*ppsRPMContext = psRPMContext;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	eError = OSLockCreate(&psRPMContext->hLock, LOCK_TYPE_NONE);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+									__func__,
+									PVRSRVGetErrorStringKM(eError)));
+		goto ErrorCreateLock;
+	}
+#endif
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+						   &psRPMContext->psCleanupSync,
+						   "RPM context cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXCreateRPMContext: Failed to allocate cleanup sync (0x%x)",
+				eError));
+		goto ErrorSyncAlloc;
+	}
+
+	/*
+	 * 1. Create the sparse PMR for scene hierarchy
+	 */
+	eError = _RGXCreateRPMSparsePMR(psConnection, psDeviceNode,
+									NODE_SCENE_HIERARCHY,
+									ui32TotalRPMPages,
+									uiLog2DopplerPageSize,
+									&psRPMContext->psSceneHierarchyPMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMContext: failed to allocate PMR for Scene hierarchy (%d)", eError));
+		goto ErrorSparsePMR1;
+	}
+
+	/*
+	 * 2. Create the sparse PMR for the RPM page list
+	 */
+	eError = _RGXCreateRPMSparsePMR(psConnection, psDeviceNode,
+									NODE_RPM_PAGE_TABLE,
+									ui32TotalRPMPages,
+									uiLog2DopplerPageSize,
+									&psRPMContext->psRPMPageTablePMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMContext: failed to allocate PMR for RPM Page list (%d)", eError));
+		goto ErrorSparsePMR2;
+	}
+
+	/* Allocate FW structure and return FW address to client */
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(*psFrameData),
+							PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+							PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+							PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+							PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+							"FwRPMContext",
+							ppsMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMContext: DevmemAllocate for RGXFWIF_FREELIST failed"));
+		goto ErrorFWRPMContextAlloc;
+	}
+
+	/* Update the unallocated pages, which are shared between the RPM freelists */
+	psRPMContext->ui32UnallocatedPages = psRPMContext->ui32TotalRPMPages = ui32TotalRPMPages;
+	psRPMContext->psDeviceNode = psDeviceNode;
+	psRPMContext->psFWRPMContextMemDesc = *ppsMemDesc;
+	psRPMContext->uiLog2DopplerPageSize = uiLog2DopplerPageSize;
+
+	/* Cache the virtual alloc state for future phys page mapping */
+	psRPMContext->sDopplerHeapBaseAddr = sDopplerHeapBaseAddr;
+	psRPMContext->sSceneMemoryBaseAddr = sSceneMemoryBaseAddr;
+	psRPMContext->psSceneHeap = psSceneHeap;
+	psRPMContext->sRPMPageTableBaseAddr = sRPMPageTableBaseAddr;
+	psRPMContext->psRPMPageTableHeap = psRPMPageTableHeap;
+
+	/*
+	 * TODO - implement RPM abort control using HW frame data to track
+	 * abort status in RTU.
+	 */
+	RGXSetFirmwareAddress(&sFirmwareAddr, *ppsMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+	*puiHWFrameData = sFirmwareAddr.ui32Addr;
+
+	//eError = DevmemAcquireCpuVirtAddr(*ppsMemDesc, (void **)&psFrameData);
+	//PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", ErrorFrameDataCpuMap);
+
+	/*
+	 * TODO: pdumping
+	 */
+
+
+	return PVRSRV_OK;
+
+	/* Error handling */
+
+	DevmemReleaseCpuVirtAddr(*ppsMemDesc);
+
+ErrorFWRPMContextAlloc:
+	PMRUnrefPMR(psRPMContext->psRPMPageTablePMR);
+
+ErrorSparsePMR2:
+	PMRUnrefPMR(psRPMContext->psSceneHierarchyPMR);
+
+ErrorSparsePMR1:
+	SyncPrimFree(psRPMContext->psCleanupSync);
+
+ErrorSyncAlloc:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psRPMContext->hLock);
+
+ErrorCreateLock:
+#endif
+	OSFreeMem(psRPMContext);
+
+ErrorAllocHost:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+/*
+ * RGXDestroyRPMContext
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyRPMContext(RGX_SERVER_RPM_CONTEXT *psCleanupData)
+{
+	PVRSRV_ERROR				 eError;
+	PVRSRV_RGXDEV_INFO 			*psDevInfo;
+	PRGXFWIF_RAY_FRAME_DATA		 psFrameData;
+
+	/* Wait for FW to process all commands */
+
+	PVR_ASSERT(psCleanupData);
+
+	RGXSetFirmwareAddress(&psFrameData, psCleanupData->psFWRPMContextMemDesc, 0, RFW_FWADDR_NOREF_FLAG);
+
+	/* Cleanup frame data in SHG */
+	eError = RGXFWRequestRayFrameDataCleanUp(psCleanupData->psDeviceNode,
+										  psFrameData,
+										  psCleanupData->psCleanupSync,
+										  RGXFWIF_DM_SHG);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "FrameData busy in SHG"));
+		return eError;
+	}
+
+	psDevInfo = psCleanupData->psDeviceNode->pvDevice;
+
+	/* Cleanup frame data in RTU */
+	eError = RGXFWRequestRayFrameDataCleanUp(psCleanupData->psDeviceNode,
+										  psFrameData,
+										  psCleanupData->psCleanupSync,
+										  RGXFWIF_DM_RTU);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "FrameData busy in RTU"));
+		return eError;
+	}
+
+	/* Free Scene hierarchy PMR (We should be the only one that holds a ref on the PMR) */
+	eError = PMRUnrefPMR(psCleanupData->psSceneHierarchyPMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "RGXDestroyRPMContext: Failed to free scene hierarchy PMR %p (error %u)",
+				 psCleanupData->psSceneHierarchyPMR,
+				 eError));
+		PVR_ASSERT(IMG_FALSE);
+	}
+
+	/* Free RPM Page list PMR */
+	eError = PMRUnrefPMR(psCleanupData->psRPMPageTablePMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "RGXDestroyRPMContext: Failed to free RPM page list PMR %p (error %u)",
+				 psCleanupData->psRPMPageTablePMR,
+				 eError));
+		PVR_ASSERT(IMG_FALSE);
+	}
+
+	if (psCleanupData->uiFLRefCount > 0)
+	{
+		/* Kernel RPM freelists hold reference to RPM context */
+		PVR_DPF((PVR_DBG_WARNING, "RGXDestroyRPMContext: Free list ref count non-zero."));
+		return PVRSRV_ERROR_NONZERO_REFCOUNT;
+	}
+
+	/* If we got here then SHG and RTU operations on this FrameData have finished */
+	SyncPrimFree(psCleanupData->psCleanupSync);
+
+	/* Free the FW RPM descriptor */
+	RGXUnsetFirmwareAddress(psCleanupData->psFWRPMContextMemDesc);
+	DevmemFwFree(psDevInfo, psCleanupData->psFWRPMContextMemDesc);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+        OSLockDestroy(psCleanupData->hLock);
+#endif
+
+	OSFreeMem(psCleanupData);
+
+	return PVRSRV_OK;
+}
+
+
+/*
+ * PVRSRVRGXCreateRayContextKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateRayContextKM(CONNECTION_DATA				*psConnection,
+											PVRSRV_DEVICE_NODE			*psDeviceNode,
+											IMG_UINT32					ui32Priority,
+											IMG_DEV_VIRTADDR			sVRMCallStackAddr,
+											IMG_UINT32					ui32FrameworkRegisterSize,
+											IMG_PBYTE					pabyFrameworkRegisters,
+											IMG_HANDLE					hMemCtxPrivData,
+											RGX_SERVER_RAY_CONTEXT	**ppsRayContext)
+{
+	PVRSRV_ERROR				eError;
+	PVRSRV_RGXDEV_INFO 			*psDevInfo = psDeviceNode->pvDevice;
+	RGX_SERVER_RAY_CONTEXT		*psRayContext;
+	DEVMEM_MEMDESC				*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	RGX_COMMON_CONTEXT_INFO		sInfo;
+	RGXFWIF_FWRAYCONTEXT		*pFWRayContext;
+	IMG_UINT32 i;
+
+	/* Prepare cleanup structure */
+    *ppsRayContext= NULL;
+	psRayContext = OSAllocZMem(sizeof(*psRayContext));
+    if (psRayContext == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	eError = OSLockCreate(&psRayContext->hLock, LOCK_TYPE_NONE);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+									__func__,
+									PVRSRVGetErrorStringKM(eError)));
+		goto fail_createlock;
+	}
+#endif
+
+	psRayContext->psDeviceNode = psDeviceNode;
+
+	/*
+		Allocate device memory for the firmware ray context.
+	*/
+	PDUMPCOMMENT("Allocate RGX firmware ray context");
+
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_FWRAYCONTEXT),
+							RGX_FWCOMCTX_ALLOCFLAGS,
+							"FwRayContext",
+							&psRayContext->psFWRayContextMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to allocate firmware ray context (%u)",
+				eError));
+		goto fail_fwraycontext;
+	}
+	
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+						   &psRayContext->psCleanupSync,
+						   "Ray context cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to allocate cleanup sync (0x%x)",
+				eError));
+		goto fail_syncalloc;
+	}
+	
+	/* 
+	 * Create the FW framework buffer
+	 */
+	eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, &psRayContext->psFWFrameworkMemDesc, ui32FrameworkRegisterSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to allocate firmware GPU framework state (%u)",
+				eError));
+		goto fail_frameworkcreate;
+	}
+	
+	/* Copy the Framework client data into the framework buffer */
+	eError = PVRSRVRGXFrameworkCopyCommand(psRayContext->psFWFrameworkMemDesc, pabyFrameworkRegisters, ui32FrameworkRegisterSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to populate the framework buffer (%u)",
+				eError));
+		goto fail_frameworkcopy;
+	}
+
+	sInfo.psFWFrameworkMemDesc = psRayContext->psFWFrameworkMemDesc;
+	
+	eError = _CreateSHContext(psConnection,
+							  psDeviceNode,
+							  psRayContext->psFWRayContextMemDesc,
+							  offsetof(RGXFWIF_FWRAYCONTEXT, sSHGContext),
+							  psFWMemContextMemDesc,
+							  sVRMCallStackAddr,
+							  ui32Priority,
+							  &sInfo,
+							  &psRayContext->sSHData);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_shcontext;
+	}
+
+	eError = _CreateRSContext(psConnection,
+							  psDeviceNode,
+							  psRayContext->psFWRayContextMemDesc,
+							  offsetof(RGXFWIF_FWRAYCONTEXT, sRTUContext),
+							  psFWMemContextMemDesc,
+							  ui32Priority,
+							  &sInfo,
+							  &psRayContext->sRSData);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_rscontext;
+	}
+	
+	/*
+		Temporarily map the firmware context to the kernel and init it
+	*/
+	eError = DevmemAcquireCpuVirtAddr(psRayContext->psFWRayContextMemDesc,
+									  (void **)&pFWRayContext);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to map firmware %s ray context to CPU",
+								__FUNCTION__,
+								PVRSRVGetErrorStringKM(eError)));
+		goto fail_rscontext;
+	}
+
+	
+	for (i = 0; i < DPX_MAX_RAY_CONTEXTS; i++)
+	{
+		/* Allocate the frame context client CCB */
+		eError = RGXCreateCCB(psDevInfo,
+							  RGX_RTU_CCB_SIZE_LOG2,
+							  psConnection,
+							  REQ_TYPE_FC0 + i,
+							  psRayContext->sRSData.psServerCommonContext,
+							  &psRayContext->sRSData.psFCClientCCB[i],
+							  &psRayContext->sRSData.psFCClientCCBMemDesc[i],
+							  &psRayContext->sRSData.psFCClientCCBCtrlMemDesc[i]);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: failed to create CCB for frame context %u (%s)",
+									__FUNCTION__,
+									i,
+									PVRSRVGetErrorStringKM(eError)));
+			goto fail_rscontext;
+		}
+
+		/* Set the firmware CCB device addresses in the firmware common context */
+		RGXSetFirmwareAddress(&pFWRayContext->psCCB[i],
+							  psRayContext->sRSData.psFCClientCCBMemDesc[i],
+							  0, RFW_FWADDR_FLAG_NONE);
+		RGXSetFirmwareAddress(&pFWRayContext->psCCBCtl[i],
+							  psRayContext->sRSData.psFCClientCCBCtrlMemDesc[i],
+							  0, RFW_FWADDR_FLAG_NONE);
+	}
+	
+	pFWRayContext->ui32ActiveFCMask = 0;
+	pFWRayContext->ui32NextFC = RGXFWIF_INVALID_FRAME_CONTEXT;
+
+	/* We've finished the setup so release the CPU mapping */
+	DevmemReleaseCpuVirtAddr(psRayContext->psFWRayContextMemDesc);
+	
+	/*
+		As the common context alloc will dump the SH and RS common contexts
+		after they've been setup we skip of the 2 common contexts and dump the
+		rest of the structure
+	*/
+	PDUMPCOMMENT("Dump shared part of ray context context");
+	DevmemPDumpLoadMem(psRayContext->psFWRayContextMemDesc,
+					   (sizeof(RGXFWIF_FWCOMMONCONTEXT) * 2),
+					   sizeof(RGXFWIF_FWRAYCONTEXT) - (sizeof(RGXFWIF_FWCOMMONCONTEXT) * 2),
+					   PDUMP_FLAGS_CONTINUOUS);
+
+	{
+		PVRSRV_RGXDEV_INFO			*psDevInfo = psDeviceNode->pvDevice;
+
+		OSWRLockAcquireWrite(psDevInfo->hRaytraceCtxListLock);
+		dllist_add_to_tail(&(psDevInfo->sRaytraceCtxtListHead), &(psRayContext->sListNode));
+		OSWRLockReleaseWrite(psDevInfo->hRaytraceCtxListLock);
+	}
+
+	*ppsRayContext= psRayContext;
+	return PVRSRV_OK;
+
+fail_rscontext:
+	_DestroySHContext(&psRayContext->sSHData,
+					  psDeviceNode,
+					  psRayContext->psCleanupSync);
+fail_shcontext:
+fail_frameworkcopy:
+	DevmemFwFree(psDevInfo, psRayContext->psFWFrameworkMemDesc);
+fail_frameworkcreate:
+	SyncPrimFree(psRayContext->psCleanupSync);
+fail_syncalloc:
+	DevmemFwFree(psDevInfo, psRayContext->psFWRayContextMemDesc);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psRayContext->hLock);
+fail_createlock:
+#endif
+fail_fwraycontext:
+	OSFreeMem(psRayContext);
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+
+/*
+ * PVRSRVRGXDestroyRayContextKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyRayContextKM(RGX_SERVER_RAY_CONTEXT *psRayContext)
+{
+	PVRSRV_ERROR				eError;
+	IMG_UINT32 i;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psRayContext->psDeviceNode->pvDevice;
+
+	/* remove node from list before calling destroy - as destroy, if successful
+	 * will invalidate the node
+	 * must be re-added if destroy fails
+	 */
+	OSWRLockAcquireWrite(psDevInfo->hRaytraceCtxListLock);
+	dllist_remove_node(&(psRayContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hRaytraceCtxListLock);
+
+	/* Cleanup the TA if we haven't already */
+	if ((psRayContext->ui32CleanupStatus & RAY_CLEANUP_SH_COMPLETE) == 0)
+	{
+		eError = _DestroySHContext(&psRayContext->sSHData,
+								   psRayContext->psDeviceNode,
+								   psRayContext->psCleanupSync);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			psRayContext->ui32CleanupStatus |= RAY_CLEANUP_SH_COMPLETE;
+		}
+		else
+		{
+			goto e0;
+		}
+	}
+
+	/* Cleanup the RS if we haven't already */
+	if ((psRayContext->ui32CleanupStatus & RAY_CLEANUP_RS_COMPLETE) == 0)
+	{
+		eError = _DestroyRSContext(&psRayContext->sRSData,
+								   psRayContext->psDeviceNode,
+								   psRayContext->psCleanupSync);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			psRayContext->ui32CleanupStatus |= RAY_CLEANUP_RS_COMPLETE;
+		}
+		else
+		{
+			goto e0;
+		}
+	}
+
+#if 0
+	/*
+	 * 	FIXME - De-allocate RPM freelists (should be called from UM)
+	 */
+	RGXDestroyRPMFreeList(psRayContext->sSHData.psSHFFreeList);
+	RGXDestroyRPMFreeList(psRayContext->sSHData.psSHGFreeList);
+#endif
+	
+	for (i = 0; i < DPX_MAX_RAY_CONTEXTS; i++)
+	{
+		RGXUnsetFirmwareAddress(psRayContext->sRSData.psFCClientCCBMemDesc[i]);
+		RGXUnsetFirmwareAddress(psRayContext->sRSData.psFCClientCCBCtrlMemDesc[i]);
+		RGXDestroyCCB(psDevInfo, psRayContext->sRSData.psFCClientCCB[i]);
+	}
+
+	/*
+		Only if both TA and 3D contexts have been cleaned up can we
+		free the shared resources
+	*/
+	if (psRayContext->ui32CleanupStatus == (RAY_CLEANUP_RS_COMPLETE | RAY_CLEANUP_SH_COMPLETE))
+	{
+		/* Free the framework buffer */
+		DevmemFwFree(psDevInfo, psRayContext->psFWFrameworkMemDesc);
+	
+		/* Free the firmware ray context */
+		DevmemFwFree(psDevInfo, psRayContext->psFWRayContextMemDesc);
+
+		/* Free the cleanup sync */
+		SyncPrimFree(psRayContext->psCleanupSync);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockDestroy(psRayContext->hLock);
+#endif
+
+		OSFreeMem(psRayContext);
+	}
+
+	return PVRSRV_OK;
+
+e0:
+	OSWRLockAcquireWrite(psDevInfo->hRaytraceCtxListLock);
+	dllist_add_to_tail(&(psDevInfo->sRaytraceCtxtListHead), &(psRayContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hRaytraceCtxListLock);
+	return eError;
+}
+
+/*
+ * PVRSRVRGXKickRSKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickRSKM(RGX_SERVER_RAY_CONTEXT		*psRayContext,
+								IMG_UINT32					ui32ClientCacheOpSeqNum,
+								IMG_UINT32					ui32ClientFenceCount,
+								SYNC_PRIMITIVE_BLOCK		**pauiClientFenceUFOSyncPrimBlock,
+								IMG_UINT32					*paui32ClientFenceSyncOffset,
+								IMG_UINT32					*paui32ClientFenceValue,
+								IMG_UINT32					ui32ClientUpdateCount,
+								SYNC_PRIMITIVE_BLOCK		**pauiClientUpdateUFOSyncPrimBlock,
+								IMG_UINT32					*paui32ClientUpdateSyncOffset,
+								IMG_UINT32					*paui32ClientUpdateValue,
+								IMG_UINT32					ui32ServerSyncPrims,
+								IMG_UINT32					*paui32ServerSyncFlags,
+								SERVER_SYNC_PRIMITIVE 		**pasServerSyncs,
+								PVRSRV_FENCE				iCheckFence,
+								PVRSRV_TIMELINE				iUpdateTimeline,
+								PVRSRV_FENCE				*piUpdateFence,
+								IMG_CHAR					szUpdateFenceName[32],
+								IMG_UINT32					ui32CmdSize,
+								IMG_PBYTE					pui8DMCmd,
+								IMG_UINT32					ui32FCCmdSize,
+								IMG_PBYTE					pui8FCDMCmd,
+								IMG_UINT32					ui32FrameContextID,
+								IMG_UINT32					ui32PDumpFlags,
+								IMG_UINT32					ui32ExtJobRef,
+								IMG_DEV_VIRTADDR			sRobustnessResetReason)
+{
+	RGXFWIF_KCCB_CMD		sRSKCCBCmd;
+	RGX_CCB_CMD_HELPER_DATA	asRSCmdHelperData[1] = {{0}};
+	RGX_CCB_CMD_HELPER_DATA asFCCmdHelperData[1] = {{0}};
+	PVRSRV_ERROR			eError;
+	PVRSRV_ERROR			eError1;
+	PVRSRV_ERROR			eError2;
+	RGX_SERVER_RAY_RS_DATA *psRSData = &psRayContext->sRSData;
+	IMG_UINT32				i;
+	IMG_UINT32				ui32FCWoff;
+	IMG_UINT32				ui32RTUCmdOffset = 0;
+	IMG_UINT32				ui32JobId;
+	IMG_UINT32				ui32FWCtx;
+	PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+	PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+	PRGXFWIF_UFO_ADDR       pRMWUFOAddr;
+
+	IMG_UINT32 ui32IntClientFenceCount = 0;
+	PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL;
+	IMG_UINT32 *paui32IntFenceValue = NULL;
+	IMG_UINT32 ui32IntClientUpdateCount = 0;
+	PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL;
+	IMG_UINT32 *paui32IntUpdateValue = NULL;
+	PVRSRV_FENCE iUpdateFence = PVRSRV_FENCE_INVALID;
+	IMG_UINT32               uiCheckFenceUID = 0;
+	IMG_UINT32               uiUpdateFenceUID = 0;
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	struct pvr_sync_append_data *psFDFenceData = NULL;
+#endif
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+	PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+	IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+	IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+	PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+	IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+	void *pvUpdateFenceFinaliseData = NULL;
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	if (iUpdateTimeline >= 0 && !piUpdateFence)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+#if !defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	if (iUpdateTimeline >= 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Providing update timeline (%d) in non-supporting driver",
+			__func__, iUpdateTimeline));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	if (iCheckFence >= 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Providing check fence (%d) in non-supporting driver",
+			__func__, iCheckFence));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+#endif /* !defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+
+	/* Sanity check the server fences */
+	for (i=0;i<ui32ServerSyncPrims;i++)
+	{
+		if (!(paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on RS) must fence", __FUNCTION__));
+			return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+		}
+	}
+
+
+	/* Ensure the string is null-terminated (Required for safety) */
+	szUpdateFenceName[31] = '\0';
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psRayContext->hLock);
+#endif
+
+	ui32JobId = OSAtomicIncrement(&psRayContext->hJobId);
+
+	ui32IntClientFenceCount  = ui32ClientFenceCount;
+	eError = SyncAddrListPopulate(&psRayContext->sSyncAddrListFence,
+							ui32ClientFenceCount,
+							pauiClientFenceUFOSyncPrimBlock,
+							paui32ClientFenceSyncOffset);
+	if(eError != PVRSRV_OK)
+	{
+		goto err_populate_sync_addr_list;
+	}
+	if (ui32IntClientFenceCount && !pauiIntFenceUFOAddress)
+	{
+		pauiIntFenceUFOAddress = psRayContext->sSyncAddrListFence.pasFWAddrs;
+	}
+
+	paui32IntFenceValue      = paui32ClientFenceValue;
+	ui32IntClientUpdateCount = ui32ClientUpdateCount;
+	eError = SyncAddrListPopulate(&psRayContext->sSyncAddrListUpdate,
+							ui32ClientUpdateCount,
+							pauiClientUpdateUFOSyncPrimBlock,
+							paui32ClientUpdateSyncOffset);
+	if(eError != PVRSRV_OK)
+	{
+		goto err_populate_sync_addr_list;
+	}
+	if (ui32IntClientUpdateCount && !pauiIntUpdateUFOAddress)
+	{
+		pauiIntUpdateUFOAddress = psRayContext->sSyncAddrListUpdate.pasFWAddrs;
+	}
+	paui32IntUpdateValue = paui32ClientUpdateValue;
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	if (iCheckFence >= 0 || iUpdateTimeline >= 0)
+	{
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+		eError =
+		  pvr_sync_append_fences(szUpdateFenceName,
+		                         iCheckFence,
+		                         iUpdateTimeline,
+		                         ui32IntClientUpdateCount,
+		                         pauiIntUpdateUFOAddress,
+		                         paui32IntUpdateValue,
+		                         ui32IntClientFenceCount,
+		                         pauiIntFenceUFOAddress,
+		                         paui32IntFenceValue,
+		                         &psFDFenceData);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_syncinit;
+		}
+		pvr_sync_get_updates(psFDFenceData, &ui32IntClientUpdateCount,
+			&pauiIntUpdateUFOAddress, &paui32IntUpdateValue);
+		pvr_sync_get_checks(psFDFenceData, &ui32IntClientFenceCount,
+			&pauiIntFenceUFOAddress, &paui32IntFenceValue);
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psRayContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __FUNCTION__, iCheckFence, (void*)psRayContext->psDeviceNode->hSyncCheckpointContext));
+		/* Resolve the sync checkpoints that make up the input fence */
+		eError = SyncCheckpointResolveFence(psRayContext->psDeviceNode->hSyncCheckpointContext,
+											iCheckFence,
+											&ui32FenceSyncCheckpointCount,
+											&apsFenceSyncCheckpoints,
+		                                    &uiCheckFenceUID);
+		if (eError != PVRSRV_OK)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __FUNCTION__, eError));
+			goto fail_resolve_input_fence;
+		}
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __FUNCTION__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+#if defined(RAY_CHECKPOINT_DEBUG)
+		if (ui32FenceSyncCheckpointCount > 0)
+		{
+			IMG_UINT32 ii;
+			for (ii=0; ii<32; ii++)
+			{
+				PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints +  ii);
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:    apsFenceSyncCheckpoints[%d]=<%p>", __FUNCTION__, ii, (void*)psNextCheckpoint)); //psFenceSyncCheckpoints[ii]));
+			}
+		}
+#endif
+		/* Create the output fence (if required) */
+		if (piUpdateFence)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d,  psRayContext->psDeviceNode->hSyncCheckpointContext=<%p>)", __FUNCTION__, iUpdateFence, iUpdateTimeline, (void*)psRayContext->psDeviceNode->hSyncCheckpointContext));
+			eError = SyncCheckpointCreateFence(psRayContext->psDeviceNode,
+			                                   szUpdateFenceName,
+											   iUpdateTimeline,
+											   psRayContext->psDeviceNode->hSyncCheckpointContext,
+											   &iUpdateFence,
+											   &uiUpdateFenceUID,
+											   &pvUpdateFenceFinaliseData,
+											   &psUpdateSyncCheckpoint,
+											   (void*)&psFenceTimelineUpdateSync,
+											   &ui32FenceTimelineUpdateValue);
+			if (eError != PVRSRV_OK)
+			{
+				goto fail_create_output_fence;
+			}
+
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: returned from SyncCheckpointCreateFence (iUpdateFence=%d)", __FUNCTION__, iUpdateFence));
+
+			/* Append the sync prim update for the timeline (if required) */
+			if (psFenceTimelineUpdateSync)
+			{
+				IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+				/* Allocate memory to hold the list of update values (including our timeline update) */
+				pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+				if (!pui32IntAllocatedUpdateValues)
+				{
+					/* Failed to allocate memory */
+					eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+					goto fail_alloc_update_values_mem;
+				}
+				OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+				/* Copy the update values into the new memory, then append our timeline update value */
+				OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+				/* Now set the additional update value */
+				pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+				*pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+				ui32IntClientUpdateCount++;
+#if defined(RAY_CHECKPOINT_DEBUG)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+					for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+				/* Now append the timeline sync prim addr to the ray context update list */
+				SyncAddrListAppendSyncPrim(&psRayContext->sSyncAddrListUpdate,
+				                           psFenceTimelineUpdateSync);
+#if defined(RAY_CHECKPOINT_DEBUG)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+					for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+				/* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+				paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
+			}
+		}
+
+		if (ui32FenceSyncCheckpointCount)
+		{
+			/* Append the checks (from input fence) */
+			if (ui32FenceSyncCheckpointCount > 0)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d sync checkpoints to Ray RS Fence (&psRayContext->sSyncAddrListFence=<%p>)...", __FUNCTION__, ui32FenceSyncCheckpointCount, (void*)&psRayContext->sSyncAddrListFence));
+#if defined(RAY_CHECKPOINT_DEBUG)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+					for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+				SyncAddrListAppendCheckpoints(&psRayContext->sSyncAddrListFence,
+											  ui32FenceSyncCheckpointCount,
+											  apsFenceSyncCheckpoints);
+				if (!pauiIntFenceUFOAddress)
+				{
+					pauiIntFenceUFOAddress = psRayContext->sSyncAddrListFence.pasFWAddrs;
+				}
+				ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+			}
+#if defined(RAY_CHECKPOINT_DEBUG)
+			{
+				IMG_UINT32 iii;
+				IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+				for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+					pui32Tmp++;
+				}
+			}
+#endif
+		}
+		if (psUpdateSyncCheckpoint)
+		{
+			/* Append the update (from output fence) */
+			CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 sync checkpoint to Ray RS Update (&psRayContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __FUNCTION__, (void*)&psRayContext->sSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress));
+			SyncAddrListAppendCheckpoints(&psRayContext->sSyncAddrListUpdate,
+										  1,
+										  &psUpdateSyncCheckpoint);
+			if (!pauiIntUpdateUFOAddress)
+			{
+				pauiIntUpdateUFOAddress = psRayContext->sSyncAddrListUpdate.pasFWAddrs;
+			}
+			ui32IntClientUpdateCount++;
+#if defined(RAY_CHECKPOINT_DEBUG)
+			{
+				IMG_UINT32 iii;
+				IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress;
+
+				for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+					pui32Tmp++;
+				}
+			}
+#endif
+		}
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __FUNCTION__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	}
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+
+#if (ENABLE_RAY_UFO_DUMP == 1)
+		PVR_DPF((PVR_DBG_ERROR, "%s: dumping Ray (RS) fence/updates syncs...", __FUNCTION__));
+		{
+			IMG_UINT32 ii;
+			PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+			IMG_UINT32 *pui32TmpIntFenceValue = paui32IntFenceValue;
+			PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+			IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+			/* Dump Fence syncs and Update syncs */
+			PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Ray (RS) fence syncs (&psRayContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __FUNCTION__, ui32IntClientFenceCount, (void*)&psRayContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+			for (ii=0; ii<ui32IntClientFenceCount; ii++)
+			{
+				if (psTmpIntFenceUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __FUNCTION__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr, *pui32TmpIntFenceValue, *pui32TmpIntFenceValue));
+					pui32TmpIntFenceValue++;
+				}
+				psTmpIntFenceUFOAddress++;
+			}
+			PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Ray (RS) update syncs (&psRayContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __FUNCTION__, ui32IntClientUpdateCount, (void*)&psRayContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+			for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+			{
+				if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __FUNCTION__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+					pui32TmpIntUpdateValue++;
+				}
+				psTmpIntUpdateUFOAddress++;
+			}
+		}
+#endif
+
+	RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psRayContext->psDeviceNode->pvDevice,
+	                          & pPreAddr,
+	                          & pPostAddr,
+	                          & pRMWUFOAddr);
+
+
+    if(pui8DMCmd != NULL)
+	{
+		eError = RGXCmdHelperInitCmdCCB(psRSData->psFCClientCCB[ui32FrameContextID],
+	                                0,
+                                    NULL,
+                                    NULL,
+	                                ui32IntClientUpdateCount,
+	                                pauiIntUpdateUFOAddress,
+	                                paui32IntUpdateValue,
+	                                ui32ServerSyncPrims,
+	                                paui32ServerSyncFlags,
+	                                SYNC_FLAG_MASK_ALL,
+	                                pasServerSyncs,
+	                                ui32CmdSize,
+	                                pui8DMCmd,
+	                                & pPreAddr,
+	                                & pPostAddr,
+	                                & pRMWUFOAddr,
+	                                RGXFWIF_CCB_CMD_TYPE_RTU,
+	                                ui32ExtJobRef,
+	                                ui32JobId,
+	                                ui32PDumpFlags,
+	                                NULL,
+	                                "FC",
+	                                asFCCmdHelperData,
+									sRobustnessResetReason);
+	}
+	else
+	{
+		eError = RGXCmdHelperInitCmdCCB(psRSData->psFCClientCCB[ui32FrameContextID],
+	                                0,
+                                    NULL,
+                                    NULL,
+	                                ui32IntClientUpdateCount,
+	                                pauiIntUpdateUFOAddress,
+	                                paui32IntUpdateValue,
+	                                ui32ServerSyncPrims,
+	                                paui32ServerSyncFlags,
+	                                SYNC_FLAG_MASK_ALL,
+	                                pasServerSyncs,
+	                                ui32CmdSize,
+	                                pui8DMCmd,
+	                                & pPreAddr,
+	                                & pPostAddr,
+	                                & pRMWUFOAddr,
+	                                RGXFWIF_CCB_CMD_TYPE_NULL,
+	                                ui32ExtJobRef,
+	                                ui32JobId,
+	                                ui32PDumpFlags,
+	                                NULL,
+	                                "FC",
+	                                asFCCmdHelperData,
+									sRobustnessResetReason);
+
+	}
+
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_initcmd;
+	}
+
+	eError = RGXCmdHelperAcquireCmdCCB(IMG_ARR_NUM_ELEMS(asFCCmdHelperData),
+	                                   asFCCmdHelperData);
+	if (eError != PVRSRV_OK)
+	{
+		goto PVRSRVRGXKickRSKM_Exit;
+	}
+	
+	ui32FCWoff = RGXCmdHelperGetCommandSize(IMG_ARR_NUM_ELEMS(asFCCmdHelperData),
+	                                        asFCCmdHelperData);
+	
+	*(IMG_UINT32*)pui8FCDMCmd = RGXGetHostWriteOffsetCCB(psRSData->psFCClientCCB[ui32FrameContextID]) + ui32FCWoff;
+
+	/*
+		We should reserve space in the kernel CCB here and fill in the command
+		directly.
+		This is so if there isn't space in the kernel CCB we can return with
+		retry back to services client before we take any operations
+	*/
+
+	/*
+		We might only be kicking for flush out a padding packet so only submit
+		the command if the create was successful
+	*/
+	eError1 = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psRSData->psServerCommonContext),
+	                                 ui32ClientFenceCount,
+	                                 pauiIntFenceUFOAddress,
+	                                 paui32IntFenceValue,
+	                                 0,
+                                     NULL,
+                                     NULL,
+	                                 ui32ServerSyncPrims,
+	                                 paui32ServerSyncFlags,
+	                                 SYNC_FLAG_MASK_ALL,
+	                                 pasServerSyncs,
+	                                 ui32FCCmdSize,
+	                                 pui8FCDMCmd,
+                                     NULL,
+	                                 & pPostAddr,
+	                                 & pRMWUFOAddr,
+	                                 RGXFWIF_CCB_CMD_TYPE_RTU_FC,
+	                                 ui32ExtJobRef,
+	                                 ui32JobId,
+	                                 ui32PDumpFlags,
+	                                 NULL,
+	                                 "RS",
+	                                 asRSCmdHelperData,
+									 sRobustnessResetReason);
+	if (eError1 != PVRSRV_OK)
+	{
+		goto PVRSRVRGXKickRSKM_Exit;
+	}
+
+	eError1 = RGXCmdHelperAcquireCmdCCB(IMG_ARR_NUM_ELEMS(asRSCmdHelperData),
+	                                    asRSCmdHelperData);
+	if (eError1 != PVRSRV_OK)
+	{
+		goto PVRSRVRGXKickRSKM_Exit;
+	}
+	
+	
+	/*
+		We should reserve space in the kernel CCB here and fill in the command
+		directly.
+		This is so if there isn't space in the kernel CCB we can return with
+		retry back to services client before we take any operations
+	*/
+
+	/*
+		We might only be kicking for flush out a padding packet so only submit
+		the command if the create was successful
+	*/
+	if (eError == PVRSRV_OK)
+	{
+		/*
+			All the required resources are ready at this point, we can't fail so
+			take the required server sync operations and commit all the resources
+		*/
+		RGXCmdHelperReleaseCmdCCB(IMG_ARR_NUM_ELEMS(asFCCmdHelperData),
+		                          asFCCmdHelperData, "FC", 0);
+	}
+	
+	if (eError1 == PVRSRV_OK)
+	{
+		/*
+			All the required resources are ready at this point, we can't fail so
+			take the required server sync operations and commit all the resources
+		*/
+		ui32RTUCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRSData->psServerCommonContext));
+		RGXCmdHelperReleaseCmdCCB(IMG_ARR_NUM_ELEMS(asRSCmdHelperData),
+		                          asRSCmdHelperData, "RS",
+		                          FWCommonContextGetFWAddress(psRSData->psServerCommonContext).ui32Addr);
+	}
+	
+	/*
+	 * Construct the kernel RTU CCB command.
+	 * (Safe to release reference to ray context virtual address because
+	 * ray context destruction must flush the firmware).
+	 */
+	sRSKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+	sRSKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psRSData->psServerCommonContext);
+	sRSKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRSData->psServerCommonContext));
+	sRSKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+	ui32FWCtx = FWCommonContextGetFWAddress(psRSData->psServerCommonContext).ui32Addr;
+
+	HTBLOGK(HTB_SF_MAIN_KICK_RTU,
+			sRSKCCBCmd.uCmdData.sCmdKickData.psContext,
+			ui32RTUCmdOffset
+			);
+	RGX_HWPERF_HOST_ENQ(psRayContext,
+	                    OSGetCurrentClientProcessIDKM(),
+	                    ui32FWCtx,
+	                    ui32ExtJobRef,
+	                    ui32JobId,
+	                    RGX_HWPERF_KICK_TYPE_RS,
+	                    uiCheckFenceUID,
+	                    uiUpdateFenceUID,
+	                    NO_DEADLINE,
+	                    NO_CYCEST);
+
+	/*
+	 * Submit the RTU command to the firmware.
+	 */
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError2 = RGXScheduleCommand(psRayContext->psDeviceNode->pvDevice,
+									RGXFWIF_DM_RTU,
+									&sRSKCCBCmd,
+									sizeof(sRSKCCBCmd),
+									ui32ClientCacheOpSeqNum,
+									ui32PDumpFlags);
+		if (eError2 != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if (eError2 != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKickRSKM failed to schedule kernel RTU command. Error:%u", eError));
+		if (eError == PVRSRV_OK)
+		{
+			eError = eError2;
+		}
+		goto PVRSRVRGXKickRSKM_Exit;
+	}
+	else
+	{
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+		RGXHWPerfFTraceGPUEnqueueEvent(psRayContext->psDeviceNode->pvDevice,
+				ui32FWCtx, ui32JobId, RGX_HWPERF_KICK_TYPE_RS);
+#endif
+	}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	if (iUpdateTimeline >= 0)
+	{
+		/* If we get here, this should never fail. Hitting that likely implies
+		 * a code error above */
+		iUpdateFence = pvr_sync_get_update_fd(psFDFenceData);
+		if (iUpdateFence < 0)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get install update sync fd",
+				__FUNCTION__));
+			/* If we fail here, we cannot rollback the syncs as the hw already
+			 * has references to resources they may be protecting in the kick
+			 * so fallthrough */
+
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto fail_free_append_data;
+		}
+	}
+#if defined(NO_HARDWARE)
+	pvr_sync_nohw_complete_fences(psFDFenceData);
+#endif
+	/*
+		Free the merged sync memory if required
+	*/
+	pvr_sync_free_append_fences_data(psFDFenceData);
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#if defined(NO_HARDWARE)
+	/* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+	if (psUpdateSyncCheckpoint)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __FUNCTION__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint)));
+		SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+	}
+	if (psFenceTimelineUpdateSync)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   Updating NOHW sync prim<%p> to %d", __FUNCTION__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+		SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+	}
+	SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined (NO_HARDWARE) */
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+
+	*piUpdateFence = iUpdateFence;
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_FENCE_INVALID))
+	{
+		SyncCheckpointFinaliseFence(iUpdateFence, pvUpdateFenceFinaliseData);
+	}
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+	/* Free memory allocated to hold the internal list of update values */
+	if (pui32IntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui32IntAllocatedUpdateValues);
+		pui32IntAllocatedUpdateValues = NULL;
+	}
+#endif /* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psRayContext->hLock);
+#endif
+	return eError;
+
+fail_initcmd:
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	SyncAddrListRollbackCheckpoints(psRayContext->psDeviceNode, &psRayContext->sSyncAddrListFence);
+	SyncAddrListRollbackCheckpoints(psRayContext->psDeviceNode, &psRayContext->sSyncAddrListUpdate);
+fail_alloc_update_values_mem:
+	if(iUpdateFence != PVRSRV_FENCE_INVALID)
+	{
+		SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+	}
+fail_create_output_fence:
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+fail_resolve_input_fence:
+#endif /* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+fail_syncinit:
+	pvr_sync_rollback_append_fences(psFDFenceData);
+fail_free_append_data:
+	pvr_sync_free_append_fences_data(psFDFenceData);
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+PVRSRVRGXKickRSKM_Exit:
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+	/* Free memory allocated to hold the internal list of update values */
+	if (pui32IntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui32IntAllocatedUpdateValues);
+		pui32IntAllocatedUpdateValues = NULL;
+	}
+#endif /* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+err_populate_sync_addr_list:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psRayContext->hLock);
+#endif
+	return eError;
+}
+
+/*
+ * PVRSRVRGXKickVRDMKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickVRDMKM(RGX_SERVER_RAY_CONTEXT		*psRayContext,
+								 IMG_UINT32					ui32ClientCacheOpSeqNum,
+								 IMG_UINT32					ui32ClientFenceCount,
+								 SYNC_PRIMITIVE_BLOCK		**pauiClientFenceUFOSyncPrimBlock,
+								 IMG_UINT32					*paui32ClientFenceSyncOffset,
+								 IMG_UINT32					*paui32ClientFenceValue,
+								 IMG_UINT32					ui32ClientUpdateCount,
+								 SYNC_PRIMITIVE_BLOCK		**pauiClientUpdateUFOSyncPrimBlock,
+								 IMG_UINT32					*paui32ClientUpdateSyncOffset,
+								 IMG_UINT32					*paui32ClientUpdateValue,
+								 IMG_UINT32					ui32ServerSyncPrims,
+								 IMG_UINT32					*paui32ServerSyncFlags,
+								 SERVER_SYNC_PRIMITIVE 		**pasServerSyncs,
+								 PVRSRV_FENCE				iCheckFence,
+								 PVRSRV_TIMELINE			iUpdateTimeline,
+								 PVRSRV_FENCE				*piUpdateFence,
+								 IMG_CHAR					szUpdateFenceName[32],
+								 IMG_UINT32					ui32CmdSize,
+								 IMG_PBYTE					pui8DMCmd,
+								 IMG_UINT32					ui32PDumpFlags,
+								 IMG_UINT32					ui32ExtJobRef,
+								 IMG_DEV_VIRTADDR			sRobustnessResetReason)
+{
+	RGXFWIF_KCCB_CMD		sSHKCCBCmd;
+	RGX_CCB_CMD_HELPER_DATA	sCmdHelperData;
+	PVRSRV_ERROR			eError;
+	PVRSRV_ERROR			eError2;
+	RGX_SERVER_RAY_SH_DATA *psSHData = &psRayContext->sSHData;
+	IMG_UINT32				i;
+	IMG_UINT32				ui32SHGCmdOffset = 0;
+	IMG_UINT32				ui32JobId;
+	IMG_UINT32				ui32FWCtx;
+
+	PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+	PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+	PRGXFWIF_UFO_ADDR       pRMWUFOAddr;
+
+	IMG_UINT32 ui32IntClientFenceCount = 0;
+	PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL;
+	IMG_UINT32 *paui32IntFenceValue = NULL;
+	IMG_UINT32 ui32IntClientUpdateCount = 0;
+	PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL;
+	IMG_UINT32 *paui32IntUpdateValue = NULL;
+	PVRSRV_FENCE iUpdateFence = PVRSRV_FENCE_INVALID;
+	IMG_UINT32               uiCheckFenceUID = 0;
+	IMG_UINT32               uiUpdateFenceUID = 0;
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	struct pvr_sync_append_data *psFDFenceData = NULL;
+#endif
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+	PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+	IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+	IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+	PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+	IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+	void *pvUpdateFenceFinaliseData = NULL;
+#endif /* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	if (iUpdateTimeline >= 0 && !piUpdateFence)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	if (iUpdateTimeline >= 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Providing update timeline (%d) in non-supporting driver",
+			__func__, iUpdateTimeline));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	if (iCheckFence >= 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Providing check fence (%d) in non-supporting driver",
+			__func__, iCheckFence));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+
+	/* Sanity check the server fences */
+	for (i=0;i<ui32ServerSyncPrims;i++)
+	{
+		if (!(paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on SH) must fence", __FUNCTION__));
+			return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+		}
+	}
+
+	/* Ensure the string is null-terminated (Required for safety) */
+	szUpdateFenceName[31] = '\0';
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psRayContext->hLock);
+#endif
+
+	ui32JobId = OSAtomicIncrement(&psRayContext->hJobId);
+
+	ui32IntClientFenceCount = ui32ClientFenceCount;
+	eError = SyncAddrListPopulate(&psRayContext->sSyncAddrListFence,
+							ui32ClientFenceCount,
+							pauiClientFenceUFOSyncPrimBlock,
+							paui32ClientFenceSyncOffset);
+	if(eError != PVRSRV_OK)
+	{
+		goto err_populate_sync_addr_list;
+	}
+	if (ui32IntClientFenceCount && !pauiIntFenceUFOAddress)
+	{
+		pauiIntFenceUFOAddress = psRayContext->sSyncAddrListFence.pasFWAddrs;
+	}
+	paui32IntFenceValue      = paui32ClientFenceValue;
+
+	ui32IntClientUpdateCount = ui32ClientUpdateCount;
+	eError = SyncAddrListPopulate(&psRayContext->sSyncAddrListUpdate,
+							ui32ClientUpdateCount,
+							pauiClientUpdateUFOSyncPrimBlock,
+							paui32ClientUpdateSyncOffset);
+	if(eError != PVRSRV_OK)
+	{
+		goto err_populate_sync_addr_list;
+	}
+	if (ui32IntClientUpdateCount && !pauiIntUpdateUFOAddress)
+	{
+		pauiIntUpdateUFOAddress = psRayContext->sSyncAddrListUpdate.pasFWAddrs;
+	}
+	paui32IntUpdateValue = paui32ClientUpdateValue;
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	if (iCheckFence >= 0 || iUpdateTimeline >= 0)
+	{
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+		eError =
+		  pvr_sync_append_fences(szUpdateFenceName,
+		                         iCheckFence,
+		                         iUpdateTimeline,
+		                         ui32IntClientUpdateCount,
+		                         pauiIntUpdateUFOAddress,
+		                         paui32IntUpdateValue,
+		                         ui32IntClientFenceCount,
+		                         pauiIntFenceUFOAddress,
+		                         paui32IntFenceValue,
+		                         &psFDFenceData);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_syncinit;
+		}
+		pvr_sync_get_updates(psFDFenceData, &ui32IntClientUpdateCount,
+			&pauiIntUpdateUFOAddress, &paui32IntUpdateValue);
+		pvr_sync_get_checks(psFDFenceData, &ui32IntClientFenceCount,
+			&pauiIntFenceUFOAddress, &paui32IntFenceValue);
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psRayContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __FUNCTION__, iCheckFence, (void*)psRayContext->psDeviceNode->hSyncCheckpointContext));
+		/* Resolve the sync checkpoints that make up the input fence */
+		eError = SyncCheckpointResolveFence(psRayContext->psDeviceNode->hSyncCheckpointContext,
+											iCheckFence,
+											&ui32FenceSyncCheckpointCount,
+											&apsFenceSyncCheckpoints,
+		                                    &uiCheckFenceUID);
+		if (eError != PVRSRV_OK)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __FUNCTION__, eError));
+			goto fail_resolve_input_fence;
+		}
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __FUNCTION__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+#if defined(RAY_CHECKPOINT_DEBUG)
+		{
+			IMG_UINT32 ii;
+			for (ii=0; ii<32; ii++)
+			{
+				PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints +  ii);
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:    apsFenceSyncCheckpoints[%d]=<%p>", __FUNCTION__, ii, (void*)psNextCheckpoint)); //psFenceSyncCheckpoints[ii]));
+			}
+		}
+#endif
+		/* Create the output fence (if required) */
+		if (piUpdateFence)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d,  psRayContext->psDeviceNode->hSyncCheckpointContext=<%p>)", __FUNCTION__, iUpdateFence, iUpdateTimeline, (void*)psRayContext->psDeviceNode->hSyncCheckpointContext));
+			eError = SyncCheckpointCreateFence(psRayContext->psDeviceNode,
+			                                   szUpdateFenceName,
+											   (PVRSRV_TIMELINE)iUpdateTimeline,
+											   psRayContext->psDeviceNode->hSyncCheckpointContext,
+											   (PVRSRV_FENCE*)&iUpdateFence,
+											   &uiUpdateFenceUID,
+											   &pvUpdateFenceFinaliseData,
+											   &psUpdateSyncCheckpoint,
+											   (void*)&psFenceTimelineUpdateSync,
+											   &ui32FenceTimelineUpdateValue);
+			if (eError != PVRSRV_OK)
+			{
+				goto fail_create_output_fence;
+			}
+
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: returned from SyncCheckpointCreateFence (iUpdateFence=%d)", __FUNCTION__, iUpdateFence));
+
+			/* Append the sync prim update for the timeline (if required) */
+			if (psFenceTimelineUpdateSync)
+			{
+				IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+				/* Allocate memory to hold the list of update values (including our timeline update) */
+				pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+				if (!pui32IntAllocatedUpdateValues)
+				{
+					/* Failed to allocate memory */
+					eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+					goto fail_alloc_update_values_mem;
+				}
+				OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+				/* Copy the update values into the new memory, then append our timeline update value */
+				OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+				/* Now set the additional update value */
+				pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+				*pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+				ui32IntClientUpdateCount++;
+#if defined(RAY_CHECKPOINT_DEBUG)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+					for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+				/* Now append the timeline sync prim addr to the ray context update list */
+				SyncAddrListAppendSyncPrim(&psRayContext->sSyncAddrListUpdate,
+				                           psFenceTimelineUpdateSync);
+#if defined(RAY_CHECKPOINT_DEBUG)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+					for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+				/* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+				paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
+			}
+		}
+
+		if (ui32FenceSyncCheckpointCount)
+		{
+			/* Append the checks (from input fence) */
+			if (ui32FenceSyncCheckpointCount > 0)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d sync checkpoints to Ray VRDM Fence (&psRayContext->sSyncAddrListFence=<%p>)...", __FUNCTION__, ui32FenceSyncCheckpointCount, (void*)&psRayContext->sSyncAddrListFence));
+#if defined(RAY_CHECKPOINT_DEBUG)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+					for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+				SyncAddrListAppendCheckpoints(&psRayContext->sSyncAddrListFence,
+											  ui32FenceSyncCheckpointCount,
+											  apsFenceSyncCheckpoints);
+				if (!pauiIntFenceUFOAddress)
+				{
+					pauiIntFenceUFOAddress = psRayContext->sSyncAddrListFence.pasFWAddrs;
+				}
+				ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+			}
+#if defined(RAY_CHECKPOINT_DEBUG)
+			{
+				IMG_UINT32 iii;
+				IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+				for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+					pui32Tmp++;
+				}
+			}
+#endif
+		}
+		if (psUpdateSyncCheckpoint)
+		{
+			/* Append the update (from output fence) */
+			CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 sync checkpoint to Ray VRDM Update (&psRayContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __FUNCTION__, (void*)&psRayContext->sSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress));
+			SyncAddrListAppendCheckpoints(&psRayContext->sSyncAddrListUpdate,
+										  1,
+										  &psUpdateSyncCheckpoint);
+			if (!pauiIntUpdateUFOAddress)
+			{
+				pauiIntUpdateUFOAddress = psRayContext->sSyncAddrListUpdate.pasFWAddrs;
+			}
+			ui32IntClientUpdateCount++;
+#if defined(RAY_CHECKPOINT_DEBUG)
+			{
+				IMG_UINT32 iii;
+				IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+				for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+					pui32Tmp++;
+				}
+			}
+#endif
+		}
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __FUNCTION__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	}
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+
+#if (ENABLE_RAY_UFO_DUMP == 1)
+		PVR_DPF((PVR_DBG_ERROR, "%s: dumping Ray (VRDM) fence/updates syncs...", __FUNCTION__));
+		{
+			IMG_UINT32 ii;
+			PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+			IMG_UINT32 *pui32TmpIntFenceValue = paui32IntFenceValue;
+			PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+			IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+			/* Dump Fence syncs and Update syncs */
+			PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Ray (VRDM) fence syncs (&psRayContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __FUNCTION__, ui32IntClientFenceCount, (void*)&psRayContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+			for (ii=0; ii<ui32IntClientFenceCount; ii++)
+			{
+				if (psTmpIntFenceUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __FUNCTION__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr, *pui32TmpIntFenceValue, *pui32TmpIntFenceValue));
+					pui32TmpIntFenceValue++;
+				}
+				psTmpIntFenceUFOAddress++;
+			}
+			PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Ray (VRDM) update syncs (&psRayContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __FUNCTION__, ui32IntClientUpdateCount, (void*)&psRayContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+			for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+			{
+				if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __FUNCTION__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+					pui32TmpIntUpdateValue++;
+				}
+				psTmpIntUpdateUFOAddress++;
+			}
+		}
+#endif
+
+	RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psRayContext->psDeviceNode->pvDevice,
+	                          & pPreAddr,
+	                          & pPostAddr,
+	                          & pRMWUFOAddr);
+
+	eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psSHData->psServerCommonContext),
+	                                ui32IntClientFenceCount,
+	                                pauiIntFenceUFOAddress,
+	                                paui32IntFenceValue,
+	                                ui32IntClientUpdateCount,
+	                                pauiIntUpdateUFOAddress,
+	                                paui32IntUpdateValue,
+	                                ui32ServerSyncPrims,
+	                                paui32ServerSyncFlags,
+	                                SYNC_FLAG_MASK_ALL,
+	                                pasServerSyncs,
+	                                ui32CmdSize,
+	                                pui8DMCmd,
+	                                & pPreAddr,
+	                                & pPostAddr,
+	                                & pRMWUFOAddr,
+	                                RGXFWIF_CCB_CMD_TYPE_SHG,
+	                                ui32ExtJobRef,
+	                                ui32JobId,
+	                                ui32PDumpFlags,
+	                                NULL,
+	                                "SH",
+	                                &sCmdHelperData,
+									sRobustnessResetReason);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_initcmd;
+	}
+
+	eError = RGXCmdHelperAcquireCmdCCB(1, &sCmdHelperData);
+	if (eError != PVRSRV_OK)
+	{
+		goto PVRSRVRGXKickSHKM_Exit;
+	}
+	
+	
+	/*
+		We should reserve space in the kernel CCB here and fill in the command
+		directly.
+		This is so if there isn't space in the kernel CCB we can return with
+		retry back to services client before we take any operations
+	*/
+
+	/*
+		We might only be kicking for flush out a padding packet so only submit
+		the command if the create was successful
+	*/
+	if (eError == PVRSRV_OK)
+	{
+		/*
+			All the required resources are ready at this point, we can't fail so
+			take the required server sync operations and commit all the resources
+		*/
+		ui32SHGCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psSHData->psServerCommonContext));
+		RGXCmdHelperReleaseCmdCCB(1, &sCmdHelperData, "SH", FWCommonContextGetFWAddress(psSHData->psServerCommonContext).ui32Addr);
+	}
+	
+	/*
+	 * Construct the kernel SHG CCB command.
+	 * (Safe to release reference to ray context virtual address because
+	 * ray context destruction must flush the firmware).
+	 */
+	sSHKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+	sSHKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psSHData->psServerCommonContext);
+	sSHKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psSHData->psServerCommonContext));
+	sSHKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+	ui32FWCtx = FWCommonContextGetFWAddress(psSHData->psServerCommonContext).ui32Addr;
+
+	HTBLOGK(HTB_SF_MAIN_KICK_SHG,
+			sSHKCCBCmd.uCmdData.sCmdKickData.psContext,
+			ui32SHGCmdOffset
+			);
+	RGX_HWPERF_HOST_ENQ(psRayContext,
+	                    OSGetCurrentClientProcessIDKM(),
+	                    ui32FWCtx,
+	                    ui32ExtJobRef,
+	                    ui32JobId,
+	                    RGX_HWPERF_KICK_TYPE_VRDM,
+	                    uiCheckFenceUID,
+	                    uiUpdateFenceUID,
+	                    NO_DEADLINE,
+	                    NO_CYCEST);
+
+	/*
+	 * Submit the RTU command to the firmware.
+	 */
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError2 = RGXScheduleCommand(psRayContext->psDeviceNode->pvDevice,
+									RGXFWIF_DM_SHG,
+									&sSHKCCBCmd,
+									sizeof(sSHKCCBCmd),
+									ui32ClientCacheOpSeqNum,
+									ui32PDumpFlags);
+		if (eError2 != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if (eError2 != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKickSHKM failed to schedule kernel RTU command. Error:%u", eError));
+		if (eError == PVRSRV_OK)
+		{
+			eError = eError2;
+		}
+		goto PVRSRVRGXKickSHKM_Exit;
+	}
+	else
+	{
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+		RGXHWPerfFTraceGPUEnqueueEvent(psRayContext->psDeviceNode->pvDevice,
+				ui32FWCtx, ui32JobId, RGX_HWPERF_KICK_TYPE_VRDM);
+#endif
+	}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	if (iUpdateTimeline >= 0)
+	{
+		/* If we get here, this should never fail. Hitting that likely implies
+		 * a code error above */
+		iUpdateFence = pvr_sync_get_update_fd(psFDFenceData);
+		if (iUpdateFence < 0)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get install update sync fd",
+				__FUNCTION__));
+			/* If we fail here, we cannot rollback the syncs as the hw already
+			 * has references to resources they may be protecting in the kick
+			 * so fallthrough */
+
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto fail_free_append_data;
+		}
+	}
+#if defined(NO_HARDWARE)
+	pvr_sync_nohw_complete_fences(psFDFenceData);
+#endif
+	/*
+		Free the merged sync memory if required
+	*/
+	pvr_sync_free_append_fences_data(psFDFenceData);
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#if defined(NO_HARDWARE)
+	/* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+	if (psUpdateSyncCheckpoint)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __FUNCTION__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint)));
+		SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+	}
+	if (psFenceTimelineUpdateSync)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   Updating NOHW sync prim<%p> to %d", __FUNCTION__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+		SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+	}
+	SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined (NO_HARDWARE) */
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+
+	*piUpdateFence = iUpdateFence;
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_FENCE_INVALID))
+	{
+		SyncCheckpointFinaliseFence(iUpdateFence, pvUpdateFenceFinaliseData);
+	}
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+	/* Free memory allocated to hold the internal list of update values */
+	if (pui32IntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui32IntAllocatedUpdateValues);
+		pui32IntAllocatedUpdateValues = NULL;
+	}
+#endif /* defined(PVR_USR_SYNC_CHECKPOINTS) */
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psRayContext->hLock);
+#endif
+	return eError;
+
+fail_initcmd:
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	SyncAddrListRollbackCheckpoints(psRayContext->psDeviceNode, &psRayContext->sSyncAddrListFence);
+	SyncAddrListRollbackCheckpoints(psRayContext->psDeviceNode, &psRayContext->sSyncAddrListUpdate);
+fail_alloc_update_values_mem:
+	if(iUpdateFence != PVRSRV_FENCE_INVALID)
+	{
+		SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+	}
+fail_create_output_fence:
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+fail_resolve_input_fence:
+#endif /* defined(PVR_USR_SYNC_CHECKPOINTS) */
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+fail_syncinit:
+	pvr_sync_rollback_append_fences(psFDFenceData);
+fail_free_append_data:
+	pvr_sync_free_append_fences_data(psFDFenceData);
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+PVRSRVRGXKickSHKM_Exit:
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+	/* Free memory allocated to hold the internal list of update values */
+	if (pui32IntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui32IntAllocatedUpdateValues);
+		pui32IntAllocatedUpdateValues = NULL;
+	}
+#endif /* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+err_populate_sync_addr_list:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psRayContext->hLock);
+#endif
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXSetRayContextPriorityKM(CONNECTION_DATA *psConnection,
+                                              PVRSRV_DEVICE_NODE * psDeviceNode,
+												 RGX_SERVER_RAY_CONTEXT *psRayContext,
+												 IMG_UINT32 ui32Priority)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psRayContext->hLock);
+#endif
+
+	if (psRayContext->sSHData.ui32Priority != ui32Priority)
+	{
+		eError = ContextSetPriority(psRayContext->sSHData.psServerCommonContext,
+									psConnection,
+									psRayContext->psDeviceNode->pvDevice,
+									ui32Priority,
+									RGXFWIF_DM_SHG);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the SH part of the rendercontext (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+			goto fail_shcontext;
+		}
+
+		psRayContext->sSHData.ui32Priority = ui32Priority;
+	}
+
+	if (psRayContext->sRSData.ui32Priority != ui32Priority)
+	{
+		eError = ContextSetPriority(psRayContext->sRSData.psServerCommonContext,
+									psConnection,
+									psRayContext->psDeviceNode->pvDevice,
+									ui32Priority,
+									RGXFWIF_DM_RTU);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the RS part of the rendercontext (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+			goto fail_rscontext;
+		}
+
+		psRayContext->sRSData.ui32Priority = ui32Priority;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psRayContext->hLock);
+#endif
+	return PVRSRV_OK;
+
+fail_rscontext:
+fail_shcontext:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psRayContext->hLock);
+#endif
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+void CheckForStalledRayCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile)
+{
+	DLLIST_NODE *psNode, *psNext;
+	OSWRLockAcquireRead(psDevInfo->hRaytraceCtxListLock);
+	dllist_foreach_node(&psDevInfo->sRaytraceCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_RAY_CONTEXT *psCurrentServerRayCtx =
+			IMG_CONTAINER_OF(psNode, RGX_SERVER_RAY_CONTEXT, sListNode);
+
+		DumpStalledFWCommonContext(psCurrentServerRayCtx->sSHData.psServerCommonContext,
+								   pfnDumpDebugPrintf, pvDumpDebugFile);
+		DumpStalledFWCommonContext(psCurrentServerRayCtx->sRSData.psServerCommonContext,
+								   pfnDumpDebugPrintf, pvDumpDebugFile);
+	}
+	OSWRLockReleaseRead(psDevInfo->hRaytraceCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientRayCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	DLLIST_NODE *psNode, *psNext;
+	IMG_UINT32 ui32ContextBitMask = 0;
+
+	OSWRLockAcquireRead(psDevInfo->hRaytraceCtxListLock);
+
+	dllist_foreach_node(&psDevInfo->sRaytraceCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_RAY_CONTEXT *psCurrentServerRayCtx =
+			IMG_CONTAINER_OF(psNode, RGX_SERVER_RAY_CONTEXT, sListNode);
+		if(NULL != psCurrentServerRayCtx->sSHData.psServerCommonContext)
+		{
+			if (CheckStalledClientCommonContext(psCurrentServerRayCtx->sSHData.psServerCommonContext, RGX_KICK_TYPE_DM_RTU) == PVRSRV_ERROR_CCCB_STALLED)
+			{
+				ui32ContextBitMask |= RGX_KICK_TYPE_DM_RTU;
+			}
+		}
+
+		if(NULL != psCurrentServerRayCtx->sRSData.psServerCommonContext)
+		{
+			if (CheckStalledClientCommonContext(psCurrentServerRayCtx->sRSData.psServerCommonContext, RGX_KICK_TYPE_DM_SHG) == PVRSRV_ERROR_CCCB_STALLED)
+			{
+				ui32ContextBitMask |= RGX_KICK_TYPE_DM_SHG;
+			}
+		}
+	}
+
+	OSWRLockReleaseRead(psDevInfo->hRaytraceCtxListLock);
+	return ui32ContextBitMask;
+}
+
+/******************************************************************************
+ End of file (rgxray.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxray.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxray.h
new file mode 100644
index 0000000..d1536a9
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxray.h
@@ -0,0 +1,378 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX ray tracing functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX ray tracing functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXRAY_H__)
+#define __RGXRAY_H__
+
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgx_fwif_shared.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxfwutils.h"
+#include "pvr_notifier.h"
+
+typedef struct _RGX_SERVER_RAY_CONTEXT_ RGX_SERVER_RAY_CONTEXT;
+typedef struct _RGX_SERVER_RPM_CONTEXT_ RGX_SERVER_RPM_CONTEXT;
+typedef struct _RGX_RPM_FREELIST_ RGX_RPM_FREELIST;
+
+
+struct _RGX_SERVER_RPM_CONTEXT_
+{
+	PVRSRV_DEVICE_NODE		*psDeviceNode;
+	DEVMEM_MEMDESC			*psFWRPMContextMemDesc;
+	//DEVMEM_MEMDESC		*psRTACtlMemDesc;
+	//DEVMEM_MEMDESC		*psRTArrayMemDesc;
+	PVRSRV_CLIENT_SYNC_PRIM	*psCleanupSync;
+	IMG_UINT32				uiFLRefCount;		/*!< increments each time a free list references this parent context */
+
+	DEVMEMINT_HEAP	*psSceneHeap;
+	DEVMEMINT_HEAP	*psRPMPageTableHeap;
+	DEVMEMINT_HEAP	*psRPMFreeListHeap;
+
+	IMG_DEV_VIRTADDR	sSceneMemoryBaseAddr;
+	IMG_DEV_VIRTADDR	sDopplerHeapBaseAddr;	/*!< Base address of the virtual heap where Doppler scene is mapped */
+	IMG_DEV_VIRTADDR	sRPMPageTableBaseAddr;
+
+	IMG_UINT32		ui32TotalRPMPages;			/*!< Total virtual pages available */
+	IMG_UINT32		uiLog2DopplerPageSize;		/*!< Doppler virtual page size, may be sub-4KB */
+	IMG_UINT32		ui32UnallocatedPages;		/*!< Unmapped pages which may be mapped and added to a RPM free list */
+	IMG_UINT32		ui32RPMEntriesInPage;		/*!< Number of remaining RPM page entries (dwords) in current mapped pages */
+
+	/* Sparse mappings */
+	PMR 		*psSceneHierarchyPMR;	/*!< Scene hierarchy phys page resource */
+	PMR 		*psRPMPageTablePMR;		/*!< RPM pages in use by scene hierarchy phys page resource */
+
+	/* Current page offset at the end of the physical allocation (PMR)
+	 * for the scene memory and RPM page tables. This is where new phys pages
+	 * will be mapped when the grow occurs (using sparse dev mem API). */
+	IMG_UINT32				ui32SceneMemorySparseMappingIndex;
+	IMG_UINT32				ui32RPMPageTableSparseMappingIndex;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	POS_LOCK                     hLock;
+#endif
+};
+
+/*
+ * RPM host freelist (analogous to PM host freelist)
+ */
+struct _RGX_RPM_FREELIST_ {
+    PVRSRV_RGXDEV_INFO 		*psDevInfo;
+    CONNECTION_DATA   		*psConnection;
+    RGX_SERVER_RPM_CONTEXT	*psParentCtx;
+
+	/* Free list PMR. Used for grow */
+	PMR						*psFreeListPMR;
+	IMG_DEVMEM_OFFSET_T		uiFreeListPMROffset;
+
+	IMG_DEV_VIRTADDR		sBaseDevVAddr;
+
+	/* Current page offset at the end of the physical allocation (PMR)
+	 * for the scene memory and RPM page tables. This is where new phys pages
+	 * will be mapped when the grow occurs (using sparse dev mem API). */
+	IMG_UINT32				ui32RPMFreeListSparseMappingIndex;
+
+	IMG_UINT32				ui32ReadOffset;			/*!< FPL circular buffer read offset */
+	IMG_UINT32				ui32WriteOffset;		/*!< FPL circular buffer write offset */
+
+	/* Freelist config */
+	IMG_UINT32				ui32MaxFLPages;
+	IMG_UINT32				ui32InitFLPages;
+	IMG_UINT32				ui32CurrentFLPages;
+	IMG_UINT32				ui32GrowFLPages;
+	IMG_UINT32				ui32FreelistID;
+	IMG_UINT64				ui64FreelistChecksum;	/* checksum over freelist content */
+	IMG_BOOL				bCheckFreelist;			/* freelist check enabled */
+	IMG_UINT32				ui32RefCount;			/* freelist reference counting */
+	IMG_UINT32				uiLog2DopplerPageSize;	/*!< Doppler virtual page size, may be sub-4KB */
+	IMG_UINT32				ui32EntriesInPage;		/*!< Number of remaining FPL page entries (dwords) in current mapped pages */
+
+	IMG_UINT32				ui32NumGrowReqByApp;	/* Total number of grow requests by Application*/
+	IMG_UINT32				ui32NumGrowReqByFW;		/* Total Number of grow requests by Firmware */
+	IMG_UINT32				ui32NumHighPages;		/* High Mark of pages in the freelist */
+
+	IMG_PID					ownerPid;			/* Pid of the owner of the list */
+
+	/* 
+	 * External freelists don't use common RPM memory and are not added to global list of freelists.
+	 * They're created and destroyed on demand, e.g. when loading offline hierarchies.
+	 */
+	IMG_BOOL				bIsExternal;		/* Mark if the freelist is external */
+
+	/* Memory Blocks */
+	DLLIST_NODE				sMemoryBlockHead;		/* head of list of RGX_RPM_DEVMEM_DESC block descriptors */
+	DLLIST_NODE				sNode;					/* node used to reference list of freelists on device */
+
+	/* FW data structures */
+	DEVMEM_MEMDESC			*psFWFreelistMemDesc;
+	RGXFWIF_DEV_VIRTADDR	sFreeListFWDevVAddr;
+
+	PVRSRV_CLIENT_SYNC_PRIM	*psCleanupSync;
+} ;
+
+
+/*!
+ *	RGXCreateRPMFreeList
+ * 
+ * @param	ui32MaxFLPages
+ * @param	ui32InitFLPages
+ * @param	ui32GrowFLPages
+ * @param	bCheckFreelist
+ * @param	sFreeListDevVAddr
+ * @param	sRPMPageListDevVAddr
+ * @param	psFreeListPMR
+ * @param	uiFreeListPMROffset
+ * @param	ppsFreeList
+ * @param	bIsExternal
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateRPMFreeList(CONNECTION_DATA *psConnection,
+							   PVRSRV_DEVICE_NODE	 *psDeviceNode, 
+							   RGX_SERVER_RPM_CONTEXT	*psRPMContext,
+							   IMG_UINT32			ui32InitFLPages,
+							   IMG_UINT32			ui32GrowFLPages,
+							   IMG_DEV_VIRTADDR		sFreeListDevVAddr,
+							   RGX_RPM_FREELIST	  **ppsFreeList,
+							   IMG_UINT32		   *puiHWFreeList,
+							   IMG_BOOL				bIsExternal);
+
+/*!
+ *	RGXGrowRPMFreeList
+ */
+PVRSRV_ERROR RGXGrowRPMFreeList(RGX_RPM_FREELIST *psFreeList,
+								IMG_UINT32 ui32RequestNumPages,
+								PDLLIST_NODE pListHeader);
+
+/*!
+ *	RGXDestroyRPMFreeList
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyRPMFreeList(RGX_RPM_FREELIST *psFreeList);
+
+/*!
+ * RGXCreateRPMContext
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateRPMContext(CONNECTION_DATA *psConnection,
+								 PVRSRV_DEVICE_NODE	 *psDeviceNode, 
+								 RGX_SERVER_RPM_CONTEXT	**ppsRPMContext,
+								 IMG_UINT32			ui32TotalRPMPages,
+								 IMG_UINT32			uiLog2DopplerPageSize,
+								 IMG_DEV_VIRTADDR	sSceneMemoryBaseAddr,
+								 IMG_DEV_VIRTADDR	sDopplerHeapBaseAddr,
+								 DEVMEMINT_HEAP		*psSceneHeap,
+								 IMG_DEV_VIRTADDR	sRPMPageTableBaseAddr,
+								 DEVMEMINT_HEAP		*psRPMPageTableHeap,
+								 DEVMEM_MEMDESC		**ppsMemDesc,
+							     IMG_UINT32		     *puiHWFrameData);
+
+/*!
+ * RGXDestroyRPMContext
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyRPMContext(RGX_SERVER_RPM_CONTEXT *psCleanupData);
+
+/*!
+	RGXProcessRequestRPMGrow
+*/
+IMG_EXPORT
+void RGXProcessRequestRPMGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+							  IMG_UINT32 ui32FreelistID);
+
+
+/*! 
+	RGXAddBlockToRPMFreeListKM
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXAddBlockToRPMFreeListKM(RGX_RPM_FREELIST *psFreeList,
+										IMG_UINT32 ui32NumPages);
+
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXCreateRenderContextKM
+
+ @Description
+	Server-side implementation of RGXCreateRenderContext
+
+ @Input pvDeviceNode - device node
+ @Input psSHGCCBMemDesc - SHG CCB Memory descriptor
+ @Input psSHGCCBCtlMemDesc - SHG CCB Ctrl Memory descriptor
+ @Input psRTUCCBMemDesc - RTU CCB Memory descriptor
+ @Input psRTUCCBCtlMemDesc - RTU CCB Ctrl Memory descriptor
+ @Input ui32Priority - context priority
+ @Input sVRMCallStackAddr - VRM call stack device virtual address
+ @Input ui32FrameworkRegisterSize - framework register size
+ @Input pbyFrameworkRegisters - ptr to framework register
+ @Input hMemCtxPrivData - memory context private data
+ @Output ppsCleanupData - clean up data
+ @Output ppsFWRayContextMemDesc - firmware ray context memory descriptor
+ @Output ppsFWRayContextStateMemDesc - firmware ray context state memory descriptor
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateRayContextKM(CONNECTION_DATA				*psConnection,
+											PVRSRV_DEVICE_NODE			*psDeviceNode,
+											IMG_UINT32					ui32Priority,
+											IMG_DEV_VIRTADDR			sVRMCallStackAddr,
+											IMG_UINT32					ui32FrameworkCommandSize,
+											IMG_PBYTE					pabyFrameworkCommand,
+											IMG_HANDLE					hMemCtxPrivData,
+											RGX_SERVER_RAY_CONTEXT	**ppsRayContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXDestroyRayContextKM
+
+ @Description
+	Server-side implementation of RGXDestroyRayContext
+
+ @Input psRayContext - Ray context
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyRayContextKM(RGX_SERVER_RAY_CONTEXT *psRayContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXKickRSKM
+
+ @Description
+	Server-side implementation of RGXKickRS
+
+ @Input pvDeviceNode - device node
+ @Input psFWRayContextMemDesc - memdesc for the firmware render context
+ @Input ui32RTUcCCBWoffUpdate - New fw Woff for the client RTU CCB
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickRSKM(RGX_SERVER_RAY_CONTEXT		*psRayContext,
+								IMG_UINT32					ui32ClientCacheOpSeqNum,
+								IMG_UINT32					ui32ClientFenceCount,
+								SYNC_PRIMITIVE_BLOCK		**pauiClientFenceUFOSyncPrimBlock,
+								IMG_UINT32					*paui32ClientFenceOffset,
+								IMG_UINT32					*paui32ClientFenceValue,
+								IMG_UINT32					ui32ClientUpdateCount,
+								SYNC_PRIMITIVE_BLOCK		**pauiClientUpdateUFOSyncPrimBlock,
+								IMG_UINT32					*paui32ClientUpdateOffset,
+								IMG_UINT32					*paui32ClientUpdateValue,
+								IMG_UINT32					ui32ServerSyncPrims,
+								IMG_UINT32					*paui32ServerSyncFlags,
+								SERVER_SYNC_PRIMITIVE 		**pasServerSyncs,
+								PVRSRV_FENCE				iCheckFence,
+								PVRSRV_TIMELINE				iUpdateTimeline,
+								PVRSRV_FENCE				*piUpdateFence,
+								IMG_CHAR					szUpdateFenceName[32],
+								IMG_UINT32					ui32CmdSize,
+								IMG_PBYTE					pui8DMCmd,
+								IMG_UINT32					ui32FCCmdSize,
+								IMG_PBYTE					pui8FCDMCmd,
+								IMG_UINT32					ui32FrameContextID,
+								IMG_UINT32					ui32PDumpFlags,
+								IMG_UINT32					ui32ExtJobRef,
+								IMG_DEV_VIRTADDR			sRobustnessResetReason);
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXKickVRDMKM
+
+ @Description
+	Server-side implementation of PVRSRVRGXKickVRDMKM
+
+ @Input pvDeviceNode - device node
+ @Input psFWRayContextMemDesc - memdesc for the firmware render context
+ @Input ui32SHGcCCBWoffUpdate - New fw Woff for the client SHG CCB
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickVRDMKM(RGX_SERVER_RAY_CONTEXT		*psRayContext,
+								 IMG_UINT32					ui32ClientCacheOpSeqNum,
+								 IMG_UINT32					ui32ClientFenceCount,
+								 SYNC_PRIMITIVE_BLOCK		**pauiClientFenceUFOSyncPrimBlock,
+								 IMG_UINT32					*paui32ClientFenceOffset,
+								 IMG_UINT32					*paui32ClientFenceValue,
+								 IMG_UINT32					ui32ClientUpdateCount,
+								 SYNC_PRIMITIVE_BLOCK		**pauiClientUpdateUFOSyncPrimBlock,
+								 IMG_UINT32					*paui32ClientUpdateOffset,
+								 IMG_UINT32					*paui32ClientUpdateValue,
+								 IMG_UINT32					ui32ServerSyncPrims,
+								 IMG_UINT32					*paui32ServerSyncFlags,
+								 SERVER_SYNC_PRIMITIVE 		**pasServerSyncs,
+								 PVRSRV_FENCE				iCheckFence,
+								 PVRSRV_TIMELINE			iUpdateTimeline,
+								 PVRSRV_FENCE				*piUpdateFence,
+								 IMG_CHAR					szUpdateFenceName[32],
+								 IMG_UINT32					ui32CmdSize,
+								 IMG_PBYTE					pui8DMCmd,
+								 IMG_UINT32					ui32PDumpFlags,
+								 IMG_UINT32					ui32ExtJobRef,
+								 IMG_DEV_VIRTADDR			sRobustnessResetReason);
+
+PVRSRV_ERROR PVRSRVRGXSetRayContextPriorityKM(CONNECTION_DATA *psConnection,
+                                              PVRSRV_DEVICE_NODE *psDevNode,
+												 RGX_SERVER_RAY_CONTEXT *psRayContext,
+												 IMG_UINT32 ui32Priority);
+
+/* Debug - check if ray context is waiting on a fence */
+void CheckForStalledRayCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile);
+
+/* Debug/Watchdog - check if client ray contexts are stalled */
+IMG_UINT32 CheckForStalledClientRayCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* __RGXRAY_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxregconfig.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxregconfig.c
new file mode 100644
index 0000000..04c3923
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxregconfig.c
@@ -0,0 +1,262 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Register configuration
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Regconfig routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxregconfig.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "device.h"
+#include "sync_internal.h"
+#include "pdump_km.h"
+#include "pvrsrv.h"
+PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection,
+                                         PVRSRV_DEVICE_NODE	 *psDeviceNode,
+                                         IMG_UINT8           ui8RegCfgType)
+{	
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PVRSRV_ERROR          eError       = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO 	  *psDevInfo   = psDeviceNode->pvDevice;
+	RGX_REG_CONFIG        *psRegCfg    = &psDevInfo->sRegCongfig;
+	RGXFWIF_REG_CFG_TYPE  eRegCfgType  = (RGXFWIF_REG_CFG_TYPE) ui8RegCfgType;
+
+	PVR_UNREFERENCED_PARAMETER(psDevConnection);
+
+	if (eRegCfgType < psRegCfg->eRegCfgTypeToPush)
+	{
+		PVR_DPF((PVR_DBG_ERROR, 
+		         "PVRSRVRGXSetRegConfigTypeKM: Register configuration requested (%d) is not valid since it has to be at least %d."
+				 " Configurations of different types need to go in order",
+				 eRegCfgType,
+				 psRegCfg->eRegCfgTypeToPush));
+		return PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE;
+	}
+
+	psRegCfg->eRegCfgTypeToPush = eRegCfgType;
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(psDevConnection);
+		
+	PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetRegConfigTypeKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+	return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection,
+                                     PVRSRV_DEVICE_NODE	*psDeviceNode,
+                                     IMG_UINT32		ui32RegAddr,
+                                     IMG_UINT64		ui64RegValue,
+                                     IMG_UINT64		ui64RegMask)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sRegCfgCmd;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	RGX_REG_CONFIG          *psRegCfg = &psDevInfo->sRegCongfig;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	
+	if (psRegCfg->bEnabled)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXAddRegConfigKM: Cannot add record whilst register configuration active."));
+		return PVRSRV_ERROR_REG_CONFIG_ENABLED;
+	}
+	if (psRegCfg->ui32NumRegRecords == RGXFWIF_REG_CFG_MAX_SIZE)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXAddRegConfigKM: Register configuration full."));
+		return PVRSRV_ERROR_REG_CONFIG_FULL;
+	}
+
+	sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+	sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Addr = (IMG_UINT64) ui32RegAddr;
+	sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Value = ui64RegValue;
+	sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Mask = ui64RegMask;
+	sRegCfgCmd.uCmdData.sRegConfigData.eRegConfigType = psRegCfg->eRegCfgTypeToPush;
+	sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ADD;
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sRegCfgCmd,
+				sizeof(sRegCfgCmd),
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXAddRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+		return eError;
+	}
+
+	psRegCfg->ui32NumRegRecords++;
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	
+	PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetRegConfigPIKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+	return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection,
+                                       PVRSRV_DEVICE_NODE	*psDeviceNode)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sRegCfgCmd;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	RGX_REG_CONFIG          *psRegCfg = &psDevInfo->sRegCongfig;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	
+	if (psRegCfg->bEnabled)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: Attempt to clear register configuration whilst active."));
+		return PVRSRV_ERROR_REG_CONFIG_ENABLED;
+	}
+
+	sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+	sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_CLEAR;
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sRegCfgCmd,
+				sizeof(sRegCfgCmd),
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+		return eError;
+	}
+
+	psRegCfg->ui32NumRegRecords = 0;
+	psRegCfg->eRegCfgTypeToPush = RGXFWIF_REG_CFG_TYPE_PWR_ON; 
+
+	return eError;
+#else
+	PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	
+	return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection,
+                                        PVRSRV_DEVICE_NODE	*psDeviceNode)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sRegCfgCmd;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	RGX_REG_CONFIG          *psRegCfg = &psDevInfo->sRegCongfig;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	
+	sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+	sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ENABLE;
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sRegCfgCmd,
+				sizeof(sRegCfgCmd),
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXEnableRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+		return eError;
+	}
+
+	psRegCfg->bEnabled = IMG_TRUE;
+
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	
+	PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXEnableRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+	return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection,
+                                         PVRSRV_DEVICE_NODE	*psDeviceNode)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sRegCfgCmd;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	RGX_REG_CONFIG          *psRegCfg = &psDevInfo->sRegCongfig;
+	
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	
+	sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+	sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_DISABLE;
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sRegCfgCmd,
+				sizeof(sRegCfgCmd),
+				0,
+				PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDisableRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+		return eError;
+	}
+
+	psRegCfg->bEnabled = IMG_FALSE;
+
+	return eError;
+#else
+	PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDisableRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+		
+	return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+
+/******************************************************************************
+ End of file (rgxregconfig.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxregconfig.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxregconfig.h
new file mode 100644
index 0000000..5edb2b9
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxregconfig.h
@@ -0,0 +1,130 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX register configuration functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX register configuration functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXREGCONFIG_H__)
+#define __RGXREGCONFIG_H__
+
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_km.h"
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXSetRegConfigTypeKM
+
+ @Description
+	Server-side implementation of RGXSetRegConfig
+
+ @Input psDeviceNode - RGX Device node
+ @Input ui8RegPowerIsland - Reg configuration
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection,
+                                         PVRSRV_DEVICE_NODE	*psDeviceNode,
+                                         IMG_UINT8 ui8RegPowerIsland);
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXSetRegConfigKM
+
+ @Description
+	Server-side implementation of RGXSetRegConfig
+
+ @Input psDeviceNode - RGX Device node
+ @Input ui64RegAddr - Register address
+ @Input ui64RegValue - Reg value
+ @Input ui64RegMask - Reg mask
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+
+PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection,
+                                     PVRSRV_DEVICE_NODE	*psDeviceNode,
+                                     IMG_UINT32	ui64RegAddr,
+                                     IMG_UINT64	ui64RegValue,
+                                     IMG_UINT64	ui64RegMask);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXClearRegConfigKM
+
+ @Description
+	Server-side implementation of RGXClearRegConfig
+
+ @Input psDeviceNode - RGX Device node
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection,
+                                       PVRSRV_DEVICE_NODE	*psDeviceNode);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXEnableRegConfigKM
+
+ @Description
+	Server-side implementation of RGXEnableRegConfig
+
+ @Input psDeviceNode - RGX Device node
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection,
+                                        PVRSRV_DEVICE_NODE	*psDeviceNode);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXDisableRegConfigKM
+
+ @Description
+	Server-side implementation of RGXDisableRegConfig
+
+ @Input psDeviceNode - RGX Device node
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection,
+                                         PVRSRV_DEVICE_NODE	*psDeviceNode);
+
+#endif /* __RGXREGCONFIG_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxsignals.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxsignals.c
new file mode 100644
index 0000000..ca3d1eb
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxsignals.c
@@ -0,0 +1,96 @@
+/*************************************************************************/ /*!
+@File           rgxsignals.c
+@Title          RGX Signals routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Signals routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxsignals.h"
+
+#include "rgxmem.h"
+#include "rgx_fwif_km.h"
+#include "mmu_common.h"
+#include "devicemem.h"
+#include "rgxfwutils.h"
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXNotifySignalUpdateKM(CONNECTION_DATA *psConnection,
+	                                   PVRSRV_DEVICE_NODE	*psDeviceNode,
+	                                   IMG_HANDLE hMemCtxPrivData,
+	                                   IMG_DEV_VIRTADDR sDevSignalAddress)
+{
+	DEVMEM_MEMDESC *psFWMemContextMemDesc;
+	RGXFWIF_KCCB_CMD sKCCBCmd;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+
+	/* Schedule the firmware command */
+	sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE;
+	sKCCBCmd.uCmdData.sSignalUpdateData.sDevSignalAddress = sDevSignalAddress;
+	RGXSetFirmwareAddress(&sKCCBCmd.uCmdData.sSignalUpdateData.psFWMemContext,
+	                      psFWMemContextMemDesc,
+	                      0, RFW_FWADDR_NOREF_FLAG);
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand((PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice,
+		                            RGXFWIF_DM_GP,
+		                            &sKCCBCmd,
+		                            sizeof(sKCCBCmd),
+		                            0,
+		                            PDUMP_FLAGS_NONE);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXNotifySignalUpdateKM: Failed to schedule the FW command %d (%s)",
+				eError, PVRSRVGETERRORSTRING(eError)));
+	}
+
+	return eError;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxsignals.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxsignals.h
new file mode 100644
index 0000000..e9c68ae
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxsignals.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@File           rgxsignals.h
+@Title          RGX Signals routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Signals routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_RGX_SIGNALS_H)
+#define _RGX_SIGNALS_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "connection_server.h"
+#include "device.h"
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXNotifySignalUpdateKM
+
+ @Description   Server-side implementation of RGXNotifySignalUpdate
+
+ @Input hMemCtxPrivData - memory context private data
+ @Input sDevSignalAddress - device virtual address of the updated signal
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXNotifySignalUpdateKM(CONNECTION_DATA *psConnection,
+                                           PVRSRV_DEVICE_NODE *psDeviceNode,
+                                           IMG_HANDLE hMemCtxPrivData,
+                                           IMG_DEV_VIRTADDR sDevSignalAddress);
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxsrvinit.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxsrvinit.c
new file mode 100644
index 0000000..0349da3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxsrvinit.c
@@ -0,0 +1,2094 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "srvinit.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "km_apphint_defs.h"
+#include "htbuffer_types.h"
+#include "htbuffer_init.h"
+
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+
+#include "rgx_fwif.h"
+#include "pdump_km.h"
+
+#include "rgx_fwif_sig.h"
+#include "rgxinit.h"
+
+#include "rgx_compat_bvnc.h"
+
+#include "osfunc.h"
+
+#include "rgxdefs_km.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+#include "rgx_fwif_hwperf.h"
+#include "rgx_hwperf_table.h"
+
+#include "rgxsrvinit_script.h"
+
+#include "rgxfwload.h"
+#include "rgxlayer_impl.h"
+#include "rgxfwimageutils.h"
+
+#include "rgx_hwperf.h"
+#include "rgx_bvnc_defs_km.h"
+
+#include "rgxdevice.h"
+
+#include "pvrsrv.h"
+
+static RGX_INIT_COMMAND asDbgCommands[RGX_MAX_DEBUG_COMMANDS];
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+#include "rgxdevice.h"
+#include "pvrsrv_device.h"
+#endif
+
+#define DRIVER_MODE_HOST               0          /* AppHint value for host driver mode */
+
+#define	HW_PERF_FILTER_DEFAULT         0x00000000 /* Default to no HWPerf */
+#define HW_PERF_FILTER_DEFAULT_ALL_ON  0xFFFFFFFF /* All events */
+
+
+#if defined(SUPPORT_VALIDATION)
+#include "pvrsrv_apphint.h"
+#endif
+
+#if defined(LINUX)
+#include "km_apphint.h"
+#include "os_srvinit_param.h"
+#else
+#include "srvinit_param.h"
+/*!
+*******************************************************************************
+ * AppHint mnemonic data type helper tables
+******************************************************************************/
+/* apphint map of name vs. enable flag */
+static SRV_INIT_PARAM_UINT32_LOOKUP htb_loggroup_tbl[] = {
+#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) },
+	HTB_LOG_SFGROUPLIST
+#undef X
+};
+/* apphint map of arg vs. OpMode */
+static SRV_INIT_PARAM_UINT32_LOOKUP htb_opmode_tbl[] = {
+	{ "droplatest", HTB_OPMODE_DROPLATEST},
+	{ "dropoldest", HTB_OPMODE_DROPOLDEST},
+	/* HTB should never be started in HTB_OPMODE_BLOCK
+	 * as this can lead to deadlocks
+	 */
+};
+
+static SRV_INIT_PARAM_UINT32_LOOKUP fwt_logtype_tbl[] = {
+	{ "trace", 2},
+	{ "tbi", 1},
+	{ "none", 0}
+};
+
+static SRV_INIT_PARAM_UINT32_LOOKUP timecorr_clk_tbl[] = {
+	{ "mono", 0 },
+	{ "mono_raw", 1 },
+	{ "sched", 2 }
+};
+
+static SRV_INIT_PARAM_UINT32_LOOKUP fwt_loggroup_tbl[] = { RGXFWIF_LOG_GROUP_NAME_VALUE_MAP };
+
+/*
+ * Services AppHints initialisation
+ */
+#define X(a, b, c, d, e) SrvInitParamInit ## b( a, d, e )
+APPHINT_LIST_ALL
+#undef X
+#endif /* LINUX */
+
+/*
+ * Container for all the apphints used by this module
+ */
+typedef struct _RGX_SRVINIT_APPHINTS_
+{
+	IMG_UINT32 ui32DriverMode;
+	IMG_BOOL   bDustRequestInject;
+	IMG_BOOL   bEnableSignatureChecks;
+	IMG_UINT32 ui32SignatureChecksBufSize;
+
+#if defined(DEBUG)
+	IMG_BOOL   bAssertOnOutOfMem;
+#endif
+	IMG_BOOL   bAssertOnHWRTrigger;
+	IMG_BOOL   bCheckMlist;
+	IMG_BOOL   bDisableClockGating;
+	IMG_BOOL   bDisableDMOverlap;
+	IMG_BOOL   bDisableFEDLogging;
+	IMG_BOOL   bDisablePDP;
+	IMG_BOOL   bEnableCDMKillRand;
+	IMG_BOOL   bEnableFTrace;
+	IMG_BOOL   bEnableHWPerf;
+	IMG_BOOL   bEnableHWPerfHost;
+	IMG_BOOL   bEnableHWR;
+	IMG_BOOL   bEnableRTUBypass;
+	IMG_BOOL   bFilteringMode;
+	IMG_BOOL   bHWPerfDisableCustomCounterFilter;
+	IMG_BOOL   bZeroFreelist;
+	IMG_UINT32 ui32EnableFWContextSwitch;
+	IMG_UINT32 ui32FWContextSwitchProfile;
+	IMG_UINT32 ui32VDMContextSwitchMode;
+	IMG_UINT32 ui32HWPerfFWBufSize;
+	IMG_UINT32 ui32HWPerfHostBufSize;
+	IMG_UINT32 ui32HWPerfFilter0;
+	IMG_UINT32 ui32HWPerfFilter1;
+	IMG_UINT32 ui32HWPerfHostFilter;
+	IMG_UINT32 ui32TimeCorrClock;
+	IMG_UINT32 ui32HWRDebugDumpLimit;
+	IMG_UINT32 ui32JonesDisableMask;
+	IMG_UINT32 ui32LogType;
+	IMG_UINT32 ui32TruncateMode;
+	IMG_UINT32 ui32GPIOValidationMode;
+	FW_PERF_CONF eFirmwarePerf;
+	RGX_ACTIVEPM_CONF eRGXActivePMConf;
+	RGX_META_T1_CONF eUseMETAT1;
+	RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS];
+	IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS];
+#endif
+	IMG_BOOL   bEnableTrustedDeviceAceConfig;
+	IMG_UINT32 ui32FWContextSwitchCrossDM;
+} RGX_SRVINIT_APPHINTS;
+
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*
+ * Parses the dot('.') separated OSID regions on a string and stores the integer results
+ * in an array. Numbers can be decimal or hex (starting with 0x) and there must be a . between each
+ * (example: 1.2.3.4.5.6.7.8)
+ */
+static void _ParseOSidRegionString(IMG_CHAR *apszBuffer, IMG_UINT32 *pui32ApphintArray)
+{
+	IMG_UINT32 ui32OSid;
+	IMG_CHAR *pui8StringParsingBase=apszBuffer;
+	IMG_UINT32 ui32StringLength = OSStringLength(apszBuffer);
+
+	/* Initialize all apphints to 0 */
+	for (ui32OSid = 0; ui32OSid < GPUVIRT_VALIDATION_NUM_OS; ui32OSid++)
+	{
+		pui32ApphintArray[ui32OSid] = 0;
+	}
+
+	/* Parse the string. Even if it fails, apphints will have been initialized */
+	for (ui32OSid = 0; ui32OSid < GPUVIRT_VALIDATION_NUM_OS; ui32OSid++)
+	{
+		IMG_UINT32 ui32Base=10;
+		IMG_CHAR *pui8StringParsingNextDelimiter;
+
+		/* Find the next character in the string that's not a ',' '.' or ' ' */
+		while ((*pui8StringParsingBase == '.' ||
+			    *pui8StringParsingBase == ',' ||
+			    *pui8StringParsingBase == ' ') &&
+			   pui8StringParsingBase - apszBuffer <= ui32StringLength)
+		{
+			pui8StringParsingBase++;
+		}
+
+		if (pui8StringParsingBase - apszBuffer > ui32StringLength)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Reached the end of the apphint string while trying to parse it.\nBuffer: %s, OSid: %d", pui8StringParsingBase, ui32OSid));
+			return ;
+		}
+
+		/* If the substring begins with "0x" move the pointer 2 bytes forward and set the base to 16 */
+		if (*pui8StringParsingBase == '0' && *(pui8StringParsingBase+1) =='x')
+		{
+			ui32Base=16;
+			pui8StringParsingBase+=2;
+		}
+
+		/* Find the next delimiter in the string or the end of the string itself if we're parsing the final number */
+		pui8StringParsingNextDelimiter = pui8StringParsingBase;
+
+		while(*pui8StringParsingNextDelimiter!='.' &&
+			  *pui8StringParsingNextDelimiter!=',' &&
+			  *pui8StringParsingNextDelimiter!=' ' &&
+			  *pui8StringParsingNextDelimiter!='\0' &&
+			  (pui8StringParsingNextDelimiter - apszBuffer <= ui32StringLength))
+		{
+			pui8StringParsingNextDelimiter++;
+		}
+
+		/*
+		 * Each number is followed by a '.' except for the last one. If a string termination is found
+		 * when not expected the functions returns
+		 */
+
+		if (*pui8StringParsingNextDelimiter=='\0' && ui32OSid < GPUVIRT_VALIDATION_NUM_OS - 1)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "There was an error parsing the OSid Region Apphint Strings"));
+			return ;
+		}
+
+		/*replace the . with a string termination so that it can be properly parsed to an integer */
+		*pui8StringParsingNextDelimiter = '\0';
+
+		/* Parse the number. The fact that it is followed by '\0' means that the string parsing utility
+		 * will finish there and not try to parse the rest */
+
+		OSStringToUINT32(pui8StringParsingBase, ui32Base, &pui32ApphintArray[ui32OSid]);
+
+		pui8StringParsingBase = pui8StringParsingNextDelimiter + 1;
+	}
+}
+
+#endif
+/*!
+*******************************************************************************
+
+ @Function      GetApphints
+
+ @Description   Read init time apphints and initialise internal variables
+
+ @Input         psHints : Pointer to apphints container
+
+ @Return        void
+
+******************************************************************************/
+static INLINE void GetApphints(RGX_SRVINIT_APPHINTS *psHints, IMG_UINT64 ui64ErnsBrns, IMG_UINT64 ui64Features)
+{
+	void *pvParamState = SrvInitParamOpen();
+	IMG_UINT32 ui32ParamTemp;
+	IMG_BOOL bS7TopInfra = IMG_FALSE, bE42290 = IMG_FALSE, bTPUFiltermodeCtrl = IMG_FALSE, \
+			bE41805 = IMG_FALSE, bE42606 = IMG_FALSE, bAXIACELite = IMG_FALSE;
+
+	if(ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+	{
+		bS7TopInfra = IMG_TRUE;
+	}
+
+	if(ui64Features & RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK)
+	{
+		bTPUFiltermodeCtrl = IMG_TRUE;
+	}
+
+	if(ui64ErnsBrns & HW_ERN_42290_BIT_MASK)
+	{
+		bE42290 = IMG_TRUE;
+	}
+
+	if(ui64ErnsBrns & HW_ERN_41805_BIT_MASK)
+	{
+		bE41805 = IMG_TRUE;
+	}
+
+	if(ui64ErnsBrns & HW_ERN_42606_BIT_MASK)
+	{
+		bE42606 = IMG_TRUE;
+	}
+
+	if(ui64Features & RGX_FEATURE_AXI_ACELITE_BIT_MASK)
+	{
+		bAXIACELite = IMG_TRUE;
+	}
+
+	/*
+	 * NB AppHints initialised to a default value via SrvInitParamInit* macros above
+	 */
+	SrvInitParamGetUINT32(pvParamState,   DriverMode, psHints->ui32DriverMode);
+	SrvInitParamGetBOOL(pvParamState,     DustRequestInject, psHints->bDustRequestInject);
+	SrvInitParamGetBOOL(pvParamState,     EnableSignatureChecks, psHints->bEnableSignatureChecks);
+	SrvInitParamGetUINT32(pvParamState,   SignatureChecksBufSize, psHints->ui32SignatureChecksBufSize);
+
+#if defined(DEBUG)
+	SrvInitParamGetBOOL(pvParamState,    AssertOutOfMemory, psHints->bAssertOnOutOfMem);
+#endif
+	SrvInitParamGetBOOL(pvParamState,    AssertOnHWRTrigger, psHints->bAssertOnHWRTrigger);
+	SrvInitParamGetBOOL(pvParamState,    CheckMList, psHints->bCheckMlist);
+	SrvInitParamGetBOOL(pvParamState,    DisableClockGating, psHints->bDisableClockGating);
+	SrvInitParamGetBOOL(pvParamState,    DisableDMOverlap, psHints->bDisableDMOverlap);
+	SrvInitParamGetBOOL(pvParamState,    DisableFEDLogging, psHints->bDisableFEDLogging);
+	SrvInitParamGetUINT32(pvParamState,  EnableAPM, ui32ParamTemp);
+	psHints->eRGXActivePMConf = ui32ParamTemp;
+	SrvInitParamGetBOOL(pvParamState,    EnableCDMKillingRandMode, psHints->bEnableCDMKillRand);
+	SrvInitParamGetUINT32(pvParamState,  EnableFWContextSwitch, psHints->ui32EnableFWContextSwitch);
+	SrvInitParamGetUINT32(pvParamState,  VDMContextSwitchMode, psHints->ui32VDMContextSwitchMode);
+	SrvInitParamGetBOOL(pvParamState,    EnableHWR, psHints->bEnableHWR);
+	SrvInitParamGetUINT32(pvParamState,  EnableRDPowerIsland, ui32ParamTemp);
+	psHints->eRGXRDPowerIslandConf = ui32ParamTemp;
+	SrvInitParamGetUINT32(pvParamState,  FirmwarePerf, ui32ParamTemp);
+	psHints->eFirmwarePerf = ui32ParamTemp;
+	SrvInitParamGetUINT32(pvParamState,  FWContextSwitchProfile, psHints->ui32FWContextSwitchProfile);
+	SrvInitParamGetBOOL(pvParamState,    HWPerfDisableCustomCounterFilter, psHints->bHWPerfDisableCustomCounterFilter);
+	SrvInitParamGetUINT32(pvParamState,  HWPerfHostBufSizeInKB, psHints->ui32HWPerfHostBufSize);
+	SrvInitParamGetUINT32(pvParamState,  HWPerfFWBufSizeInKB, psHints->ui32HWPerfFWBufSize);
+	SrvInitParamGetUINT32(pvParamState,  GPIOValidationMode, psHints->ui32GPIOValidationMode);
+#if defined(LINUX)
+	/* name changes */
+	{
+		IMG_UINT64 ui64Tmp;
+		SrvInitParamGetBOOL(pvParamState,    DisablePDumpPanic, psHints->bDisablePDP);
+		SrvInitParamGetUINT64(pvParamState,  HWPerfFWFilter, ui64Tmp);
+		psHints->ui32HWPerfFilter0 = (IMG_UINT32)(ui64Tmp & 0xffffffffllu);
+		psHints->ui32HWPerfFilter1 = (IMG_UINT32)((ui64Tmp >> 32) & 0xffffffffllu);
+	}
+#else
+	SrvInitParamUnreferenced(DisablePDumpPanic);
+	SrvInitParamUnreferenced(HWPerfFWFilter);
+	SrvInitParamUnreferenced(RGXBVNC);
+#endif
+	SrvInitParamGetUINT32(pvParamState,  HWPerfHostFilter, psHints->ui32HWPerfHostFilter);
+	SrvInitParamGetUINT32List(pvParamState,  TimeCorrClock, psHints->ui32TimeCorrClock);
+	SrvInitParamGetUINT32(pvParamState,  HWRDebugDumpLimit, ui32ParamTemp);
+	psHints->ui32HWRDebugDumpLimit = MIN(ui32ParamTemp, RGXFWIF_HWR_DEBUG_DUMP_ALL);
+
+	if(bS7TopInfra)
+	{
+	#define RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK	(0XFFFFFFCFU)
+	#define RGX_CR_JONES_FIX_MT_ORDER_ISP_EN	(0X00000020U)
+	#define RGX_CR_JONES_FIX_MT_ORDER_TE_EN		(0X00000010U)
+
+		SrvInitParamGetUINT32(pvParamState,  JonesDisableMask, ui32ParamTemp);
+		if (((ui32ParamTemp & ~RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK) == RGX_CR_JONES_FIX_MT_ORDER_ISP_EN) ||
+			((ui32ParamTemp & ~RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK) == RGX_CR_JONES_FIX_MT_ORDER_TE_EN))
+		{
+			ui32ParamTemp |= (RGX_CR_JONES_FIX_MT_ORDER_TE_EN |
+							  RGX_CR_JONES_FIX_MT_ORDER_ISP_EN);
+			PVR_DPF((PVR_DBG_WARNING, "Tile reordering mode requires both TE and ISP enabled. Forcing JonesDisableMask = %d",
+					ui32ParamTemp));
+		}
+		psHints->ui32JonesDisableMask = ui32ParamTemp;
+	}
+
+	if ( (bE42290) && (bTPUFiltermodeCtrl))
+	{
+		SrvInitParamGetBOOL(pvParamState,    NewFilteringMode, psHints->bFilteringMode);
+	}
+
+	if(bE41805 || bE42606)
+	{
+		SrvInitParamGetUINT32(pvParamState,  TruncateMode, psHints->ui32TruncateMode);
+	}
+#if defined(EMULATOR)
+	if(bAXIACELite)
+	{
+		SrvInitParamGetBOOL(pvParamState, EnableTrustedDeviceAceConfig, psHints->bEnableTrustedDeviceAceConfig);
+	}
+#endif	
+
+	SrvInitParamGetUINT32(pvParamState,  UseMETAT1, ui32ParamTemp);
+	psHints->eUseMETAT1 = ui32ParamTemp & RGXFWIF_INICFG_METAT1_MASK;
+
+	SrvInitParamGetBOOL(pvParamState,    ZeroFreelist, psHints->bZeroFreelist);
+
+#if defined(LINUX)
+	SrvInitParamGetUINT32(pvParamState, FWContextSwitchCrossDM, psHints->ui32FWContextSwitchCrossDM);
+#else
+	SrvInitParamUnreferenced(FWContextSwitchCrossDM);
+#endif
+
+	/*
+	 * HWPerf filter apphints setup
+	 */
+	if (psHints->bEnableHWPerf)
+	{
+		if (psHints->ui32HWPerfFilter0 == 0 && psHints->ui32HWPerfFilter1 == 0)
+		{
+			psHints->ui32HWPerfFilter0 = HW_PERF_FILTER_DEFAULT_ALL_ON;
+			psHints->ui32HWPerfFilter1 = HW_PERF_FILTER_DEFAULT_ALL_ON;
+		}
+	}
+	else
+	{
+		if (psHints->ui32HWPerfFilter0 != 0 || psHints->ui32HWPerfFilter1 != 0)
+		{
+			psHints->bEnableHWPerf = IMG_TRUE;
+		}
+	}
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+	if (psHints->bEnableFTrace)
+	{
+		/* In case we have not set EnableHWPerf AppHint just request creation
+		 * of certain events we need for the FTrace i.e. only the Kick/Finish
+		 * HW events */
+		if (!psHints->bEnableHWPerf)
+		{
+			psHints->ui32HWPerfFilter0 = (IMG_UINT32) (RGX_HWPERF_EVENT_MASK_HW_KICKFINISH & 0xFFFFFFFF);
+			psHints->ui32HWPerfFilter1 = (IMG_UINT32) ((RGX_HWPERF_EVENT_MASK_HW_KICKFINISH & 0xFFFFFFFF00000000) >> 32);
+		}
+		else
+		{
+			psHints->ui32HWPerfFilter0 = HW_PERF_FILTER_DEFAULT_ALL_ON;
+			psHints->ui32HWPerfFilter1 = HW_PERF_FILTER_DEFAULT_ALL_ON;
+		}
+
+	}
+#endif
+	
+	if (psHints->bEnableHWPerfHost)
+	{
+		if (psHints->ui32HWPerfHostFilter == 0)
+		{
+			psHints->ui32HWPerfHostFilter = HW_PERF_FILTER_DEFAULT_ALL_ON;
+		}
+	}
+	else
+	{
+		if (psHints->ui32HWPerfHostFilter != 0)
+		{
+			psHints->bEnableHWPerfHost = IMG_TRUE;
+		}
+	}
+
+	/*
+	 * FW logs apphints
+	 */
+	{
+		IMG_UINT32 ui32LogType;
+		IMG_BOOL bAnyLogGroupConfigured;
+
+		SrvInitParamGetUINT32BitField(pvParamState, EnableLogGroup, ui32LogType);
+		bAnyLogGroupConfigured = ui32LogType ? IMG_TRUE : IMG_FALSE;
+		SrvInitParamGetUINT32List(pvParamState, FirmwareLogType, ui32ParamTemp);
+
+		/* Defaulting to TRACE */
+		ui32LogType |= RGXFWIF_LOG_TYPE_TRACE;
+
+		if (ui32ParamTemp == 2 /* TRACE */)
+		{
+			if (!bAnyLogGroupConfigured)
+			{
+				/* No groups configured - defaulting to MAIN group */
+				ui32LogType |= RGXFWIF_LOG_TYPE_GROUP_MAIN;
+			}
+		}
+		else if (ui32ParamTemp == 1 /* TBI */)
+		{
+			if (!bAnyLogGroupConfigured)
+			{
+				/* No groups configured - defaulting to MAIN group */
+				ui32LogType |= RGXFWIF_LOG_TYPE_GROUP_MAIN;
+			}
+			ui32LogType &= ~RGXFWIF_LOG_TYPE_TRACE;
+		}
+		else if (ui32ParamTemp == 0 /* NONE */)
+		{
+			ui32LogType = RGXFWIF_LOG_TYPE_NONE;
+		}
+
+		psHints->ui32LogType = ui32LogType;
+	}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	/*
+	 * GPU virtualisation validation apphints
+	 */
+	{
+		IMG_CHAR pszOSidRegionBuffer[GPUVIRT_VALIDATION_MAX_STRING_LENGTH];
+
+		SrvInitParamGetSTRING(pvParamState, OSidRegion0Min, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH);
+		_ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMin[0]);
+
+		SrvInitParamGetSTRING(pvParamState, OSidRegion0Max, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH);
+		_ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMax[0]);
+
+		SrvInitParamGetSTRING(pvParamState, OSidRegion1Min, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH);
+		_ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMin[1]);
+
+		SrvInitParamGetSTRING(pvParamState, OSidRegion1Max, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH);
+		_ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMax[1]);
+	}
+#else
+#if !defined(LINUX)
+	SrvInitParamUnreferenced(OSidRegion0Min);
+	SrvInitParamUnreferenced(OSidRegion0Max);
+	SrvInitParamUnreferenced(OSidRegion1Min);
+	SrvInitParamUnreferenced(OSidRegion1Max);
+#endif /* !defined(LINUX) */
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+
+
+	SrvInitParamClose(pvParamState);
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      GetFWConfigFlags
+
+ @Description   Initialise and return FW config flags
+
+ @Input         psHints            : Apphints container
+ @Input         pui32FWConfigFlags : Pointer to config flags
+
+ @Return        void
+
+******************************************************************************/
+static INLINE void GetFWConfigFlags(RGX_SRVINIT_APPHINTS *psHints,
+                                    IMG_UINT32 *pui32FWConfigFlags,
+                                    IMG_UINT32 *pui32FWConfigFlagsExt)
+{
+	IMG_UINT32 ui32FWConfigFlags = 0;
+
+#if defined(DEBUG)
+	ui32FWConfigFlags |= psHints->bAssertOnOutOfMem ? RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY : 0;
+#endif
+	ui32FWConfigFlags |= psHints->bAssertOnHWRTrigger ? RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER : 0;
+	ui32FWConfigFlags |= psHints->bCheckMlist ? RGXFWIF_INICFG_CHECK_MLIST_EN : 0;
+	ui32FWConfigFlags |= psHints->bDisableClockGating ? RGXFWIF_INICFG_DISABLE_CLKGATING_EN : 0;
+	ui32FWConfigFlags |= psHints->bDisableDMOverlap ? RGXFWIF_INICFG_DISABLE_DM_OVERLAP : 0;
+	ui32FWConfigFlags |= psHints->bDisablePDP ? RGXFWIF_SRVCFG_DISABLE_PDP_EN : 0;
+	ui32FWConfigFlags |= psHints->bEnableCDMKillRand ? RGXFWIF_INICFG_CDM_KILL_MODE_RAND_EN : 0;
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+	/* Since FTrace GPU events depends on HWPerf, ensure it is enabled here */
+	ui32FWConfigFlags |= psHints->bEnableFTrace ? RGXFWIF_INICFG_HWPERF_EN : 0;
+#endif
+	ui32FWConfigFlags |= psHints->bEnableHWPerf ? RGXFWIF_INICFG_HWPERF_EN : 0;
+#if !defined(NO_HARDWARE)
+	ui32FWConfigFlags |= psHints->bEnableHWR ? RGXFWIF_INICFG_HWR_EN : 0;
+#endif
+	ui32FWConfigFlags |= psHints->bEnableRTUBypass ? RGXFWIF_INICFG_RTU_BYPASS_EN : 0;
+	ui32FWConfigFlags |= psHints->bHWPerfDisableCustomCounterFilter ? RGXFWIF_INICFG_HWP_DISABLE_FILTER : 0;
+	ui32FWConfigFlags |= (psHints->eFirmwarePerf == FW_PERF_CONF_CUSTOM_TIMER) ? RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN : 0;
+	ui32FWConfigFlags |= (psHints->eFirmwarePerf == FW_PERF_CONF_POLLS) ? RGXFWIF_INICFG_POLL_COUNTERS_EN : 0;
+	ui32FWConfigFlags |= psHints->eUseMETAT1 << RGXFWIF_INICFG_METAT1_SHIFT;
+	ui32FWConfigFlags |= psHints->ui32EnableFWContextSwitch & ~RGXFWIF_INICFG_CTXSWITCH_CLRMSK;
+	ui32FWConfigFlags |= (psHints->ui32VDMContextSwitchMode << RGXFWIF_INICFG_VDM_CTX_STORE_MODE_SHIFT) & ~RGXFWIF_INICFG_VDM_CTX_STORE_MODE_CLRMSK;
+
+	ui32FWConfigFlags |= (psHints->ui32FWContextSwitchProfile << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) & RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK;
+
+	*pui32FWConfigFlags = ui32FWConfigFlags;
+	*pui32FWConfigFlagsExt = psHints->ui32FWContextSwitchCrossDM;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      GetFilterFlags
+
+ @Description   Initialise and return filter flags
+
+ @Input         psHints : Apphints container
+
+ @Return        Filter flags
+
+******************************************************************************/
+static INLINE IMG_UINT32 GetFilterFlags(RGX_SRVINIT_APPHINTS *psHints)
+{
+	IMG_UINT32 ui32FilterFlags = 0;
+
+	ui32FilterFlags |= psHints->bFilteringMode ? RGXFWIF_FILTCFG_NEW_FILTER_MODE : 0;
+	if (psHints->ui32TruncateMode == 2)
+	{
+		ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_INT;
+	}
+	else if (psHints->ui32TruncateMode == 3)
+	{
+		ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_HALF;
+	}
+
+	return ui32FilterFlags;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      GetDeviceFlags
+
+ @Description   Initialise and return device flags
+
+ @Input         psHints          : Apphints container
+ @Input         pui32DeviceFlags : Pointer to device flags
+
+ @Return        void
+
+******************************************************************************/
+static INLINE void GetDeviceFlags(RGX_SRVINIT_APPHINTS *psHints,
+                                  IMG_UINT32 *pui32DeviceFlags)
+{
+	IMG_UINT32 ui32DeviceFlags = 0;
+
+	ui32DeviceFlags |= psHints->bDustRequestInject? RGXKMIF_DEVICE_STATE_DUST_REQUEST_INJECT_EN : 0;
+
+	ui32DeviceFlags |= psHints->bZeroFreelist ? RGXKMIF_DEVICE_STATE_ZERO_FREELIST : 0;
+	ui32DeviceFlags |= psHints->bDisableFEDLogging ? RGXKMIF_DEVICE_STATE_DISABLE_DW_LOGGING_EN : 0;
+	ui32DeviceFlags |= psHints->bEnableHWPerfHost ? RGXKMIF_DEVICE_STATE_HWPERF_HOST_EN : 0;
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+	ui32DeviceFlags |= psHints->bEnableFTrace ? RGXKMIF_DEVICE_STATE_FTRACE_EN : 0;
+#endif
+
+	*pui32DeviceFlags = ui32DeviceFlags;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function		PrepareDebugScript
+
+ @Description	Generates a script to dump debug info
+
+ @Input			psScript
+
+ @Return		IMG_BOOL True if it runs out of cmds when building the script
+
+******************************************************************************/
+static IMG_BOOL PrepareDebugScript(RGX_SCRIPT_BUILD* psDbgInitScript,
+					IMG_BOOL bFirmwarePerf,
+					void *pvDeviceInfo)
+{
+#define DBG_READ(T, R, S)		if (!ScriptDBGReadRGXReg(psDbgInitScript, T, R, S)) return IMG_FALSE;
+#define DBG_MSP_READ(R, S)		if (!ScriptDBGReadMetaRegThroughSP(psDbgInitScript, R, S)) return IMG_FALSE;
+#define DBG_MCR_READ(R, S)		if (!ScriptDBGReadMetaCoreReg(psDbgInitScript, R, S)) return IMG_FALSE;
+#define DBG_CALC(R, S, T, U, V)	if (!ScriptDBGCalc(psDbgInitScript, R, S, T, U, V)) return IMG_FALSE;
+#define DBG_STRING(S)			if (!ScriptDBGString(psDbgInitScript, S)) return IMG_FALSE;
+#define DBG_READ32(R, S)				DBG_READ(RGX_INIT_OP_DBG_READ32_HW_REG, R, S)
+#define DBG_READ64(R, S)				DBG_READ(RGX_INIT_OP_DBG_READ64_HW_REG, R, S)
+#define DBG_CALC_TA_AND_3D(R, S, T, U)	DBG_CALC(RGX_INIT_OP_DBG_CALC, R, S, T, U)
+	IMG_BOOL	bS7Infra, bXTInfra, e44871, bRayTracing, e47025, bVIVTSlc, bMIPS, bPBVNC;
+	IMG_UINT32	ui32SLCBanks = 0, ui32Meta = 0;
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDeviceInfo;
+	PVR_UNREFERENCED_PARAMETER(bFirmwarePerf);
+	bS7Infra = bXTInfra = e44871 = bRayTracing = e47025 = bVIVTSlc = bMIPS = bPBVNC = IMG_FALSE;
+
+
+	do{
+		if(NULL == psDevInfo)
+			break;
+
+		if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_PBVNC_COREID_REG_BIT_MASK)
+		{
+			bPBVNC = IMG_TRUE;
+		}
+
+		if(psDevInfo->sDevFeatureCfg.ui32META)
+		{
+			ui32Meta = psDevInfo->sDevFeatureCfg.ui32META;
+		}
+
+		if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+		{
+			bS7Infra = IMG_TRUE;
+		}
+
+		if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK)
+		{
+			bXTInfra = IMG_TRUE;
+		}
+
+		if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+		{
+			bRayTracing = IMG_TRUE;
+		}
+
+		if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SLC_VIVT_BIT_MASK)
+		{
+			bVIVTSlc = IMG_TRUE;
+		}
+
+		if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+		{
+			bMIPS = IMG_TRUE;
+		}
+
+
+		if(psDevInfo->sDevFeatureCfg.ui32SLCBanks)
+		{
+			ui32SLCBanks = psDevInfo->sDevFeatureCfg.ui32SLCBanks;
+		}
+
+		if(psDevInfo->sDevFeatureCfg.ui64ErnsBrns & FIX_HW_BRN_44871_BIT_MASK)
+		{
+			e44871 = IMG_TRUE;
+		}
+
+		if(psDevInfo->sDevFeatureCfg.ui64ErnsBrns & HW_ERN_47025_POS)
+		{
+			e47025 = IMG_TRUE;
+		}
+
+	}while(0);
+
+	if(bPBVNC)
+	{
+		DBG_READ64(RGX_CR_CORE_ID,							"CORE_ID                         ");
+	}
+	else
+	{
+		DBG_READ32(RGX_CR_CORE_ID,							"CORE_ID                         ");
+	}
+
+	DBG_READ32(RGX_CR_CORE_REVISION,					"CORE_REVISION                   ");
+	DBG_READ32(RGX_CR_DESIGNER_REV_FIELD1,				"DESIGNER_REV_FIELD1             ");
+	DBG_READ32(RGX_CR_DESIGNER_REV_FIELD2,				"DESIGNER_REV_FIELD2             ");
+	DBG_READ64(RGX_CR_CHANGESET_NUMBER,					"CHANGESET_NUMBER                ");
+	if(ui32Meta)
+	{
+		DBG_READ32(RGX_CR_META_SP_MSLVIRQSTATUS,			"META_SP_MSLVIRQSTATUS           ");
+	}
+	DBG_READ64(RGX_CR_CLK_CTRL,							"CLK_CTRL                        ");
+	DBG_READ64(RGX_CR_CLK_STATUS,						"CLK_STATUS                      ");
+	DBG_READ64(RGX_CR_CLK_CTRL2,						"CLK_CTRL2                       ");
+	DBG_READ64(RGX_CR_CLK_STATUS2,						"CLK_STATUS2                     ");
+	if (bS7Infra)
+	{
+		DBG_READ64(RGX_CR_CLK_XTPLUS_CTRL,					"CLK_XTPLUS_CTRL                 ");
+		DBG_READ64(RGX_CR_CLK_XTPLUS_STATUS,				"CLK_XTPLUS_STATUS               ");
+	}
+	DBG_READ32(RGX_CR_EVENT_STATUS,						"EVENT_STATUS                    ");
+	DBG_READ64(RGX_CR_TIMER,							"TIMER                           ");
+	if (bS7Infra)
+	{
+		DBG_READ64(RGX_CR_MMU_FAULT_STATUS,					"MMU_FAULT_STATUS                ");
+		DBG_READ64(RGX_CR_MMU_FAULT_STATUS_META,			"MMU_FAULT_STATUS_META           ");
+	}
+	else
+	{
+		DBG_READ32(RGX_CR_BIF_FAULT_BANK0_MMU_STATUS,		"BIF_FAULT_BANK0_MMU_STATUS      ");
+		DBG_READ64(RGX_CR_BIF_FAULT_BANK0_REQ_STATUS,		"BIF_FAULT_BANK0_REQ_STATUS      ");
+		DBG_READ32(RGX_CR_BIF_FAULT_BANK1_MMU_STATUS,		"BIF_FAULT_BANK1_MMU_STATUS      ");
+		DBG_READ64(RGX_CR_BIF_FAULT_BANK1_REQ_STATUS,		"BIF_FAULT_BANK1_REQ_STATUS      ");
+	}
+
+	DBG_READ32(RGX_CR_BIF_MMU_STATUS,					"BIF_MMU_STATUS                  ");
+	DBG_READ32(RGX_CR_BIF_MMU_ENTRY,					"BIF_MMU_ENTRY                   ");
+	DBG_READ64(RGX_CR_BIF_MMU_ENTRY_STATUS,				"BIF_MMU_ENTRY_STATUS            ");
+	if (bS7Infra)
+	{
+		DBG_READ32(RGX_CR_BIF_JONES_OUTSTANDING_READ,		"BIF_JONES_OUTSTANDING_READ      ");
+		DBG_READ32(RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ,	"BIF_BLACKPEARL_OUTSTANDING_READ ");
+		DBG_READ32(RGX_CR_BIF_DUST_OUTSTANDING_READ,		"BIF_DUST_OUTSTANDING_READ       ");
+	}
+	else
+	{
+
+		if (!bXTInfra)
+		{
+			DBG_READ32(RGX_CR_BIF_STATUS_MMU,					"BIF_STATUS_MMU                  ");
+			DBG_READ32(RGX_CR_BIF_READS_EXT_STATUS,				"BIF_READS_EXT_STATUS            ");
+			DBG_READ32(RGX_CR_BIF_READS_INT_STATUS,				"BIF_READS_INT_STATUS            ");
+		}
+		DBG_READ32(RGX_CR_BIFPM_STATUS_MMU,					"BIFPM_STATUS_MMU                ");
+		DBG_READ32(RGX_CR_BIFPM_READS_EXT_STATUS,			"BIFPM_READS_EXT_STATUS          ");
+		DBG_READ32(RGX_CR_BIFPM_READS_INT_STATUS,			"BIFPM_READS_INT_STATUS          ");
+	}
+
+	if(e44871)
+	{
+		DBG_STRING("Warning: BRN44871 is present");
+	}
+
+	if(e47025)
+	{
+		DBG_READ64(RGX_CR_CDM_CONTEXT_LOAD_PDS0,			"CDM_CONTEXT_LOAD_PDS0           ");
+		DBG_READ64(RGX_CR_CDM_CONTEXT_LOAD_PDS1,			"CDM_CONTEXT_LOAD_PDS1           ");
+	}
+
+	if (bVIVTSlc)
+	{
+		DBG_READ64(RGX_CR_CONTEXT_MAPPING0,					"CONTEXT_MAPPING0                ");
+		DBG_READ64(RGX_CR_CONTEXT_MAPPING1,					"CONTEXT_MAPPING1                ");
+		DBG_READ64(RGX_CR_CONTEXT_MAPPING2,					"CONTEXT_MAPPING2                ");
+		DBG_READ64(RGX_CR_CONTEXT_MAPPING3,					"CONTEXT_MAPPING3                ");
+		DBG_READ64(RGX_CR_CONTEXT_MAPPING4,					"CONTEXT_MAPPING4                ");
+	}
+	else
+	{
+		DBG_READ64(RGX_CR_BIF_CAT_BASE_INDEX,				"BIF_CAT_BASE_INDEX              ");
+		DBG_READ64(RGX_CR_BIF_CAT_BASE0,					"BIF_CAT_BASE0                   ");
+		DBG_READ64(RGX_CR_BIF_CAT_BASE1,					"BIF_CAT_BASE1                   ");
+		DBG_READ64(RGX_CR_BIF_CAT_BASE2,					"BIF_CAT_BASE2                   ");
+		DBG_READ64(RGX_CR_BIF_CAT_BASE3,					"BIF_CAT_BASE3                   ");
+		DBG_READ64(RGX_CR_BIF_CAT_BASE4,					"BIF_CAT_BASE4                   ");
+		DBG_READ64(RGX_CR_BIF_CAT_BASE5,					"BIF_CAT_BASE5                   ");
+		DBG_READ64(RGX_CR_BIF_CAT_BASE6,					"BIF_CAT_BASE6                   ");
+		DBG_READ64(RGX_CR_BIF_CAT_BASE7,					"BIF_CAT_BASE7                   ");
+	}
+
+	DBG_READ32(RGX_CR_BIF_CTRL_INVAL,					"BIF_CTRL_INVAL                  ");
+	DBG_READ32(RGX_CR_BIF_CTRL,							"BIF_CTRL                        ");
+
+	DBG_READ64(RGX_CR_BIF_PM_CAT_BASE_VCE0,				"BIF_PM_CAT_BASE_VCE0            ");
+	DBG_READ64(RGX_CR_BIF_PM_CAT_BASE_TE0,				"BIF_PM_CAT_BASE_TE0             ");
+	DBG_READ64(RGX_CR_BIF_PM_CAT_BASE_ALIST0,			"BIF_PM_CAT_BASE_ALIST0          ");
+	DBG_READ64(RGX_CR_BIF_PM_CAT_BASE_VCE1,				"BIF_PM_CAT_BASE_VCE1            ");
+	DBG_READ64(RGX_CR_BIF_PM_CAT_BASE_TE1,				"BIF_PM_CAT_BASE_TE1             ");
+	DBG_READ64(RGX_CR_BIF_PM_CAT_BASE_ALIST1,			"BIF_PM_CAT_BASE_ALIST1          ");
+	
+	DBG_READ32(RGX_CR_PERF_TA_PHASE,					"PERF_TA_PHASE                   ");
+	DBG_READ32(RGX_CR_PERF_TA_CYCLE,					"PERF_TA_CYCLE                   ");
+	DBG_READ32(RGX_CR_PERF_3D_PHASE,					"PERF_3D_PHASE                   ");
+	DBG_READ32(RGX_CR_PERF_3D_CYCLE,					"PERF_3D_CYCLE                   ");
+
+	DBG_READ32(RGX_CR_PERF_TA_OR_3D_CYCLE,				"PERF_TA_OR_3D_CYCLE             ");
+	DBG_CALC_TA_AND_3D(RGX_CR_PERF_TA_CYCLE, RGX_CR_PERF_3D_CYCLE, RGX_CR_PERF_TA_OR_3D_CYCLE,
+														"PERF_TA_AND_3D_CYCLE            ");
+
+	DBG_READ32(RGX_CR_PERF_COMPUTE_PHASE,				"PERF_COMPUTE_PHASE              ");
+	DBG_READ32(RGX_CR_PERF_COMPUTE_CYCLE,				"PERF_COMPUTE_CYCLE              ");
+
+	DBG_READ32(RGX_CR_PM_PARTIAL_RENDER_ENABLE,			"PARTIAL_RENDER_ENABLE           ");
+
+	DBG_READ32(RGX_CR_ISP_RENDER,						"ISP_RENDER                      ");
+	DBG_READ64(RGX_CR_TLA_STATUS,						"TLA_STATUS                      ");
+	DBG_READ64(RGX_CR_MCU_FENCE,						"MCU_FENCE                       ");
+
+	DBG_READ32(RGX_CR_VDM_CONTEXT_STORE_STATUS,			"VDM_CONTEXT_STORE_STATUS        ");
+	DBG_READ64(RGX_CR_VDM_CONTEXT_STORE_TASK0,			"VDM_CONTEXT_STORE_TASK0         ");
+	DBG_READ64(RGX_CR_VDM_CONTEXT_STORE_TASK1,			"VDM_CONTEXT_STORE_TASK1         ");
+	DBG_READ64(RGX_CR_VDM_CONTEXT_STORE_TASK2,			"VDM_CONTEXT_STORE_TASK2         ");
+	DBG_READ64(RGX_CR_VDM_CONTEXT_RESUME_TASK0,			"VDM_CONTEXT_RESUME_TASK0        ");
+	DBG_READ64(RGX_CR_VDM_CONTEXT_RESUME_TASK1,			"VDM_CONTEXT_RESUME_TASK1        ");
+	DBG_READ64(RGX_CR_VDM_CONTEXT_RESUME_TASK2,			"VDM_CONTEXT_RESUME_TASK2        ");
+
+	DBG_READ32(RGX_CR_ISP_CTL,							"ISP_CTL                         ");
+	DBG_READ32(RGX_CR_ISP_STATUS,						"ISP_STATUS                      ");
+	DBG_READ32(RGX_CR_MTS_INTCTX,						"MTS_INTCTX                      ");
+	DBG_READ32(RGX_CR_MTS_BGCTX,						"MTS_BGCTX                       ");
+	DBG_READ32(RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE,		"MTS_BGCTX_COUNTED_SCHEDULE      ");
+	DBG_READ32(RGX_CR_MTS_SCHEDULE,						"MTS_SCHEDULE                    ");
+	DBG_READ32(RGX_CR_MTS_GPU_INT_STATUS,				"MTS_GPU_INT_STATUS              ");
+
+	DBG_READ32(RGX_CR_CDM_CONTEXT_STORE_STATUS,			"CDM_CONTEXT_STORE_STATUS        ");
+	DBG_READ64(RGX_CR_CDM_CONTEXT_PDS0,					"CDM_CONTEXT_PDS0                ");
+	DBG_READ64(RGX_CR_CDM_CONTEXT_PDS1,					"CDM_CONTEXT_PDS1                ");
+	DBG_READ64(RGX_CR_CDM_TERMINATE_PDS,				"CDM_TERMINATE_PDS               ");
+	DBG_READ64(RGX_CR_CDM_TERMINATE_PDS1,				"CDM_TERMINATE_PDS1              ");
+
+	if(e47025)
+	{
+		DBG_READ64(RGX_CR_CDM_CONTEXT_LOAD_PDS0,			"CDM_CONTEXT_LOAD_PDS0           ");
+		DBG_READ64(RGX_CR_CDM_CONTEXT_LOAD_PDS1,			"CDM_CONTEXT_LOAD_PDS1           ");
+	}
+
+	if(bRayTracing)
+	{
+		DBG_READ32(DPX_CR_BIF_MMU_STATUS,					"DPX_CR_BIF_MMU_STATUS           ");
+		DBG_READ64(DPX_CR_BIF_FAULT_BANK_MMU_STATUS,		"DPX_CR_BIF_FAULT_BANK_MMU_STATUS");
+		DBG_READ64(DPX_CR_BIF_FAULT_BANK_REQ_STATUS,		"DPX_CR_BIF_FAULT_BANK_REQ_STATUS");
+
+		DBG_READ64(RGX_CR_RPM_SHF_FPL,						"RGX_CR_RPM_SHF_FPL	             ");
+		DBG_READ32(RGX_CR_RPM_SHF_FPL_READ,					"RGX_CR_RPM_SHF_FPL_READ         ");
+		DBG_READ32(RGX_CR_RPM_SHF_FPL_WRITE,				"RGX_CR_RPM_SHF_FPL_WRITE        ");
+		DBG_READ64(RGX_CR_RPM_SHG_FPL,   					"RGX_CR_RPM_SHG_FPL	             ");
+		DBG_READ32(RGX_CR_RPM_SHG_FPL_READ,					"RGX_CR_RPM_SHG_FPL_READ         ");
+		DBG_READ32(RGX_CR_RPM_SHG_FPL_WRITE,				"RGX_CR_RPM_SHG_FPL_WRITE        ");
+	}
+
+	if (bS7Infra)
+	{
+		DBG_READ32(RGX_CR_JONES_IDLE,						"JONES_IDLE                      ");
+	}
+
+	DBG_READ32(RGX_CR_SIDEKICK_IDLE,					"SIDEKICK_IDLE                   ");
+
+	if (!bS7Infra)
+	{
+		DBG_READ32(RGX_CR_SLC_IDLE,							"SLC_IDLE                        ");
+		DBG_READ32(RGX_CR_SLC_STATUS0,						"SLC_STATUS0                     ");
+		DBG_READ64(RGX_CR_SLC_STATUS1,						"SLC_STATUS1                     ");
+
+		if (ui32SLCBanks)
+		{
+			DBG_READ64(RGX_CR_SLC_STATUS2,					"SLC_STATUS2                     ");
+		}
+
+		DBG_READ32(RGX_CR_SLC_CTRL_BYPASS,					"SLC_CTRL_BYPASS                 ");
+		DBG_READ64(RGX_CR_SLC_CTRL_MISC,					"SLC_CTRL_MISC                   ");
+	}
+	else
+	{
+		DBG_READ32(RGX_CR_SLC3_IDLE,						"SLC3_IDLE                       ");
+		DBG_READ64(RGX_CR_SLC3_STATUS,						"SLC3_STATUS                     ");
+		DBG_READ32(RGX_CR_SLC3_FAULT_STOP_STATUS,			"SLC3_FAULT_STOP_STATUS          ");
+	}
+
+	if (ui32Meta)
+	{
+		DBG_MSP_READ(META_CR_T0ENABLE_OFFSET,				"T0 TXENABLE                     ");
+		DBG_MSP_READ(META_CR_T0STATUS_OFFSET,				"T0 TXSTATUS                     ");
+		DBG_MSP_READ(META_CR_T0DEFR_OFFSET,					"T0 TXDEFR                       ");
+		DBG_MCR_READ(META_CR_THR0_PC,						"T0 PC                           ");
+		DBG_MCR_READ(META_CR_THR0_PCX,						"T0 PCX                          ");
+		DBG_MCR_READ(META_CR_THR0_SP,						"T0 SP                           ");
+	}
+
+	if ((ui32Meta == MTP218) || (ui32Meta == MTP219))
+	{
+		DBG_MSP_READ(META_CR_T1ENABLE_OFFSET,				"T1 TXENABLE                     ");
+		DBG_MSP_READ(META_CR_T1STATUS_OFFSET,				"T1 TXSTATUS                     ");
+		DBG_MSP_READ(META_CR_T1DEFR_OFFSET,					"T1 TXDEFR                       ");
+		DBG_MCR_READ(META_CR_THR1_PC,						"T1 PC                           ");
+		DBG_MCR_READ(META_CR_THR1_PCX,						"T1 PCX                          ");
+		DBG_MCR_READ(META_CR_THR1_SP,						"T1 SP                           ");
+	}
+
+	if (ui32Meta && bFirmwarePerf)
+	{
+		DBG_MSP_READ(META_CR_PERF_COUNT0,				"PERF_COUNT0                     ");
+		DBG_MSP_READ(META_CR_PERF_COUNT1,				"PERF_COUNT1                     ");
+	}
+
+	if (bMIPS)
+	{
+		DBG_READ32(RGX_CR_MIPS_EXCEPTION_STATUS,            "MIPS_EXCEPTION_STATUS           ");
+	}
+
+	return IMG_TRUE;
+}
+
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+/*************************************************************************/ /*!
+ @Function       RGXTDProcessFWImage
+
+ @Description    Fetch and send data used by the trusted device to complete
+                 the FW image setup
+
+ @Input          psDeviceNode - Device node
+ @Input          psRGXFW      - Firmware blob
+
+ @Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXTDProcessFWImage(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                        struct RGXFW *psRGXFW)
+{
+	PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_TD_FW_PARAMS sTDFWParams;
+	PVRSRV_ERROR eError;
+
+	if (psDevConfig->pfnTDSendFWImage == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXTDProcessFWImage: TDProcessFWImage not implemented!"));
+		return PVRSRV_ERROR_NOT_IMPLEMENTED;
+	}
+
+	sTDFWParams.pvFirmware = RGXFirmwareData(psRGXFW);
+	sTDFWParams.ui32FirmwareSize = RGXFirmwareSize(psRGXFW);
+	sTDFWParams.sFWCodeDevVAddrBase = psDevInfo->sFWCodeDevVAddrBase;
+	sTDFWParams.sFWDataDevVAddrBase = psDevInfo->sFWDataDevVAddrBase;
+	sTDFWParams.sFWCorememCodeFWAddr = psDevInfo->sFWCorememCodeFWAddr;
+	sTDFWParams.sFWInitFWAddr = psDevInfo->sFWInitFWAddr;
+
+	eError = psDevConfig->pfnTDSendFWImage(psDevConfig->hSysData, &sTDFWParams);
+
+	return eError;
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function     AcquireHostData
+
+ @Description  Acquire Device MemDesc and CPU pointer for a given PMR
+
+ @Input        psDeviceNode   : Device Node
+ @Input        hPMR           : PMR
+ @Output       ppsHostMemDesc : Returned MemDesc
+ @Output       ppvHostAddr    : Returned CPU pointer
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+static INLINE
+PVRSRV_ERROR AcquireHostData(PVRSRV_DEVICE_NODE *psDeviceNode,
+                             PMR* pPMR,
+                             DEVMEM_MEMDESC **ppsHostMemDesc,
+                             void **ppvHostAddr)
+{
+	IMG_HANDLE hImportHandle;
+	IMG_DEVMEM_SIZE_T uiImportSize;
+	PVRSRV_ERROR eError;
+
+	eError = DevmemMakeLocalImportHandle(psDeviceNode,
+	                                     pPMR,
+	                                     &hImportHandle);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "AcquireHostData: DevmemMakeLocalImportHandle failed (%d)", eError));
+		goto acquire_failmakehandle;
+	}
+
+	eError = DevmemLocalImport(psDeviceNode,
+	                           hImportHandle,
+	                           PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+	                           PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+	                           PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+	                           PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE,
+	                           ppsHostMemDesc,
+	                           &uiImportSize,
+	                           "AcquireHostData");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "AcquireHostData: DevmemLocalImport failed (%d)", eError));
+		goto acquire_failimport;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(*ppsHostMemDesc,
+	                                  ppvHostAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "AcquireHostData: DevmemAcquireCpuVirtAddr failed (%d)", eError));
+		goto acquire_failcpuaddr;
+	}
+
+	/* We don't need the import handle anymore */
+	DevmemUnmakeLocalImportHandle(psDeviceNode, hImportHandle);
+
+	return PVRSRV_OK;
+
+
+acquire_failcpuaddr:
+	DevmemFree(*ppsHostMemDesc);
+
+acquire_failimport:
+	DevmemUnmakeLocalImportHandle(psDeviceNode, hImportHandle);
+
+acquire_failmakehandle:
+	return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     ReleaseHostData
+
+ @Description  Releases resources associated with a Device MemDesc
+
+ @Input        psHostMemDesc : MemDesc to free
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+static INLINE void ReleaseHostData(DEVMEM_MEMDESC *psHostMemDesc)
+{
+	DevmemReleaseCpuVirtAddr(psHostMemDesc);
+	DevmemFree(psHostMemDesc);
+}
+
+/*!
+*******************************************************************************
+
+ @Function     GetFirmwareBVNC
+
+ @Description  Retrieves FW BVNC information from binary data
+
+ @Input        psRGXFW : Firmware binary handle to get BVNC from
+
+ @Output       psRGXFWBVNC : structure store BVNC info
+
+ @Return       IMG_TRUE upon success, IMG_FALSE otherwise
+
+******************************************************************************/
+static INLINE IMG_BOOL GetFirmwareBVNC(struct RGXFW *psRGXFW,
+                                       RGXFWIF_COMPCHECKS_BVNC *psFWBVNC)
+{
+#if defined(LINUX)
+	const size_t FWSize = RGXFirmwareSize(psRGXFW);
+	const RGXFWIF_COMPCHECKS_BVNC * psBinBVNC;
+#endif
+
+#if !defined(LINUX)
+	/* Check not available in non linux OSes. Just fill the struct and return true */
+	psFWBVNC->ui32LayoutVersion = RGXFWIF_COMPCHECKS_LAYOUT_VERSION;
+	psFWBVNC->ui32VLenMax = RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX;
+
+	rgx_bvnc_packed(&psFWBVNC->ui64BNC, psFWBVNC->aszV, psFWBVNC->ui32VLenMax,
+	                RGX_BNC_KM_B, RGX_BVNC_KM_V_ST, RGX_BNC_KM_N, RGX_BNC_KM_C);
+
+#else
+
+	if (FWSize < FW_BVNC_BACKWARDS_OFFSET)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Firmware is too small (%zu bytes)",
+		         __func__, FWSize));
+		return IMG_FALSE;
+	}
+
+	psBinBVNC = (RGXFWIF_COMPCHECKS_BVNC *) ((IMG_UINT8 *) (RGXFirmwareData(psRGXFW)) +
+	                                         (FWSize - FW_BVNC_BACKWARDS_OFFSET));
+
+	psFWBVNC->ui32LayoutVersion = RGX_INT32_FROM_BE(psBinBVNC->ui32LayoutVersion);
+
+	psFWBVNC->ui32VLenMax = RGX_INT32_FROM_BE(psBinBVNC->ui32VLenMax);
+
+	psFWBVNC->ui64BNC = RGX_INT64_FROM_BE(psBinBVNC->ui64BNC);
+
+	strncpy(psFWBVNC->aszV, psBinBVNC->aszV, sizeof(psFWBVNC->aszV));
+#endif /* defined(LINUX) */
+
+	return IMG_TRUE;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     InitFirmware
+
+ @Description  Allocate, initialise and pdump Firmware code and data memory
+
+ @Input        psDeviceNode    : Device Node
+ @Input        psHints         : Apphints
+ @Input        psBVNC          : Compatibility checks
+ @Output       phFWCodePMR     : FW code PMR handle
+ @Output       phFWDataPMR     : FW data PMR handle
+ @Output       phFWCorememPMR  : FW coremem code PMR handle
+ @Output       phHWPerfDataPMR : HWPerf control PMR handle
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 RGX_SRVINIT_APPHINTS *psHints,
+                                 RGXFWIF_COMPCHECKS_BVNC *psBVNC,
+                                 PMR **phFWCodePMR,
+                                 PMR **phFWDataPMR,
+                                 PMR **phFWCorememPMR,
+                                 PMR **phHWPerfDataPMR)
+{
+	IMG_INT32         i32DriverMode;
+	struct RGXFW      *psRGXFW = NULL;
+	const IMG_BYTE    *pbRGXFirmware = NULL;
+	RGXFWIF_COMPCHECKS_BVNC sFWBVNC;
+
+	/* FW code memory */
+	IMG_DEVMEM_SIZE_T uiFWCodeAllocSize;
+	IMG_DEV_VIRTADDR  sFWCodeDevVAddrBase;
+	DEVMEM_MEMDESC    *psFWCodeHostMemDesc;
+	void              *pvFWCodeHostAddr;
+
+	/* FW data memory */
+	IMG_DEVMEM_SIZE_T uiFWDataAllocSize;
+	IMG_DEV_VIRTADDR  sFWDataDevVAddrBase;
+	DEVMEM_MEMDESC    *psFWDataHostMemDesc;
+	void              *pvFWDataHostAddr;
+
+	/* FW coremem code memory */
+	IMG_DEVMEM_SIZE_T uiFWCorememCodeAllocSize;
+	IMG_DEV_VIRTADDR  sFWCorememDevVAddrBase;
+
+	/* 
+	 * Only declare psFWCorememHostMemDesc where used (PVR_UNREFERENCED_PARAMETER doesn't
+	 * help for local vars when using certain compilers)
+	 */
+	DEVMEM_MEMDESC    *psFWCorememHostMemDesc;
+	void              *pvFWCorememHostAddr = NULL;
+
+	RGXFWIF_DEV_VIRTADDR sFWCorememFWAddr; /* FW coremem data */
+	RGXFWIF_DEV_VIRTADDR sRGXFwInit;       /* FW init struct */
+	RGX_LAYER_PARAMS sLayerParams;
+	IMG_UINT32 ui32FWConfigFlags, ui32FWConfigFlagsExt;
+	PVRSRV_ERROR eError;
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+	if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST)) 
+	{
+		IMG_CHAR *pszFWFilename = NULL;
+		IMG_CHAR *pszFWpFilename = NULL;
+
+		IMG_CHAR aszFWFilenameStr[OSStringLength(RGX_FW_FILENAME)+MAX_BVNC_STRING_LEN+2];
+		IMG_CHAR aszFWpFilenameStr[OSStringLength(RGX_FW_FILENAME)+MAX_BVNC_STRING_LEN+3];
+
+		pszFWFilename = &aszFWFilenameStr[0];
+		OSSNPrintf(pszFWFilename, OSStringLength(RGX_FW_FILENAME)+MAX_BVNC_STRING_LEN+2, "%s.%d.%d.%d.%d%s", RGX_FW_FILENAME,
+		           psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V,
+		           psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C, PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE) ? "" : ".vz");
+		pszFWpFilename = &aszFWpFilenameStr[0];
+		OSSNPrintf(pszFWpFilename, OSStringLength(RGX_FW_FILENAME)+MAX_BVNC_STRING_LEN+3, "%s.%d.%dp.%d.%d%s", RGX_FW_FILENAME,
+		           psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V,
+		           psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C, PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE) ? "" : ".vz");
+
+		/*
+		 * Get pointer to Firmware image
+		 */
+		psRGXFW = RGXLoadFirmware(psDeviceNode, pszFWFilename, pszFWpFilename);
+		if (psRGXFW == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "InitFirmware: RGXLoadFirmware failed"));
+			eError = PVRSRV_ERROR_INIT_FAILURE;
+			goto cleanup_initfw;
+		}
+		pbRGXFirmware = RGXFirmwareData(psRGXFW);
+
+		if (!GetFirmwareBVNC(psRGXFW, &sFWBVNC))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "InitFirmware: RGXLoadFirmware failed to get Firmware BVNC"));
+			eError = PVRSRV_ERROR_INIT_FAILURE;
+			goto cleanup_initfw;
+		}
+
+	}
+	sLayerParams.psDevInfo = psDevInfo;
+
+	/*
+	 * Allocate Firmware memory
+	 */
+
+	eError = RGXGetFWImageAllocSize(&sLayerParams,
+	                                &uiFWCodeAllocSize,
+	                                &uiFWDataAllocSize,
+	                                &uiFWCorememCodeAllocSize);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "InitFirmware: RGXGetFWImageAllocSize failed"));
+		goto cleanup_initfw;
+	}
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	/* Disable META core memory allocation unless the META DMA is available */
+	if (!RGXDeviceHasFeature(&sLayerParams, RGX_FEATURE_META_DMA_BIT_MASK))
+	{
+		uiFWCorememCodeAllocSize = 0;
+	}
+#endif
+	eError = PVRSRVRGXInitAllocFWImgMemKM(psDeviceNode,
+	                                      uiFWCodeAllocSize,
+	                                      uiFWDataAllocSize,
+	                                      uiFWCorememCodeAllocSize,
+	                                      phFWCodePMR,
+	                                      &sFWCodeDevVAddrBase,
+	                                      phFWDataPMR,
+	                                      &sFWDataDevVAddrBase,
+	                                      phFWCorememPMR,
+	                                      &sFWCorememDevVAddrBase,
+	                                      &sFWCorememFWAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "InitFirmware: PVRSRVRGXInitAllocFWImgMem failed (%d)", eError));
+		goto cleanup_initfw;
+	}
+
+
+	/*
+	 * Setup Firmware initialisation data
+	 */
+
+	GetFWConfigFlags(psHints, &ui32FWConfigFlags, &ui32FWConfigFlagsExt);
+
+	eError = PVRSRVRGXInitFirmwareKM(psDeviceNode,
+	                                 &sRGXFwInit,
+	                                 psHints->bEnableSignatureChecks,
+	                                 psHints->ui32SignatureChecksBufSize,
+	                                 psHints->ui32HWPerfFWBufSize,
+	                                 (IMG_UINT64)psHints->ui32HWPerfFilter0 |
+	                                 ((IMG_UINT64)psHints->ui32HWPerfFilter1 << 32),
+	                                 0,
+	                                 NULL,
+	                                 ui32FWConfigFlags,
+	                                 psHints->ui32LogType,
+	                                 GetFilterFlags(psHints),
+	                                 psHints->ui32JonesDisableMask,
+	                                 psHints->ui32HWRDebugDumpLimit,
+	                                 psBVNC,
+	                                 &sFWBVNC,
+	                                 sizeof(RGXFWIF_HWPERF_CTL),
+	                                 phHWPerfDataPMR,
+	                                 psHints->eRGXRDPowerIslandConf,
+	                                 psHints->eFirmwarePerf,
+	                                 ui32FWConfigFlagsExt);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "InitFirmware: PVRSRVRGXInitFirmware failed (%d)", eError));
+		goto cleanup_initfw;
+	}
+
+	/*
+	 * Acquire pointers to Firmware allocations
+	 */
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+	eError = AcquireHostData(psDeviceNode,
+	                         *phFWCodePMR,
+	                         &psFWCodeHostMemDesc,
+	                         &pvFWCodeHostAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "InitFirmware: AcquireHostData for FW code failed (%d)", eError));
+		goto release_code;
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(psFWCodeHostMemDesc);
+
+	/* We can't get a pointer to a secure FW allocation from within the DDK */
+	pvFWCodeHostAddr = NULL;
+#endif
+
+	eError = AcquireHostData(psDeviceNode,
+	                         *phFWDataPMR,
+	                         &psFWDataHostMemDesc,
+	                         &pvFWDataHostAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "InitFirmware: AcquireHostData for FW data failed (%d)", eError));
+		goto release_data;
+	}
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+	if (uiFWCorememCodeAllocSize)
+	{
+		eError = AcquireHostData(psDeviceNode,
+								 *phFWCorememPMR,
+								 &psFWCorememHostMemDesc,
+								 &pvFWCorememHostAddr);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "InitFirmware: AcquireHostData for FW coremem code failed (%d)", eError));
+			goto release_corememcode;
+		}
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(psFWCorememHostMemDesc);
+
+	/* We can't get a pointer to a secure FW allocation from within the DDK */
+	pvFWCorememHostAddr = NULL;
+#endif
+
+	/* The driver execution mode AppHint can be either an override or non-override
+	   32-bit value. An override value has the MSB bit set & the non-override value
+	   has this bit cleared. Excluding this MSB bit & treating the remaining 31-bit
+	   value as a signed integer the mode values are -1 native mode, 0 host mode &
+	   +1 guest mode respectively */
+	i32DriverMode = psHints->ui32DriverMode & 0x7FFFFFFF;
+	i32DriverMode |= (psHints->ui32DriverMode & (1<<30)) ? (1<<31) : 0;
+	if (i32DriverMode <= (IMG_INT32)DRIVER_MODE_HOST)
+	{
+		/*
+		 * Process the Firmware image and setup code and data segments.
+		 *
+		 * When the trusted device is enabled and the FW code lives
+		 * in secure memory we will only setup the data segments here,
+		 * while the code segments will be loaded to secure memory
+		 * by the trusted device.
+		 */
+		eError = RGXProcessFWImage(&sLayerParams,
+								   pbRGXFirmware,
+								   pvFWCodeHostAddr,
+								   pvFWDataHostAddr,
+								   pvFWCorememHostAddr,
+								   &sFWCodeDevVAddrBase,
+								   &sFWDataDevVAddrBase,
+								   &sFWCorememDevVAddrBase,
+								   &sFWCorememFWAddr,
+								   &sRGXFwInit,
+#if defined(RGXFW_META_SUPPORT_2ND_THREAD)
+								   2,
+#else
+								   psHints->eUseMETAT1 == RGX_META_T1_OFF ? 1 : 2,
+#endif
+								   psHints->eUseMETAT1 == RGX_META_T1_MAIN ? 1 : 0);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "InitFirmware: RGXProcessFWImage failed (%d)", eError));
+			goto release_fw_allocations;
+		}
+	}
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+	RGXTDProcessFWImage(psDeviceNode, psRGXFW);
+#endif
+
+	/*
+	 * Perform final steps (if any) on the kernel
+	 * before pdumping the Firmware allocations
+	 */
+	eError = PVRSRVRGXInitFinaliseFWImageKM(psDeviceNode);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "InitFirmware: RGXInitFinaliseFWImage failed (%d)", eError));
+		goto release_fw_allocations;
+	}
+
+	/*
+	 * PDump Firmware allocations
+	 */
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware code image");
+	DevmemPDumpLoadMem(psFWCodeHostMemDesc,
+	                   0,
+	                   uiFWCodeAllocSize,
+	                   PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware data image");
+	DevmemPDumpLoadMem(psFWDataHostMemDesc,
+	                   0,
+	                   uiFWDataAllocSize,
+	                   PDUMP_FLAGS_CONTINUOUS);
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+	if (uiFWCorememCodeAllocSize)
+	{
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware coremem image");
+		DevmemPDumpLoadMem(psFWCorememHostMemDesc,
+						   0,
+						   uiFWCorememCodeAllocSize,
+						   PDUMP_FLAGS_CONTINUOUS);
+	}
+#endif
+
+
+	/*
+	 * Release Firmware allocations and clean up
+	 */
+
+release_fw_allocations:
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+release_corememcode:
+	if (uiFWCorememCodeAllocSize)
+	{
+		ReleaseHostData(psFWCorememHostMemDesc);
+	}
+#endif
+
+release_data:
+	ReleaseHostData(psFWDataHostMemDesc);
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+release_code:
+	ReleaseHostData(psFWCodeHostMemDesc);
+#endif
+cleanup_initfw:
+	if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) && psRGXFW != NULL)
+	{
+		RGXUnloadFirmware(psRGXFW);
+	}
+
+	return eError;
+}
+
+
+#if defined(PDUMP)
+/*!
+*******************************************************************************
+
+ @Function	InitialiseHWPerfCounters
+
+ @Description
+
+ Initialisation of hardware performance counters and dumping them out to pdump, so that they can be modified at a later point.
+
+ @Input psDeviceNode
+
+ @Input psHWPerfDataMemDesc
+
+ @Input psHWPerfInitDataInt
+
+ @Return  void
+
+******************************************************************************/
+
+static void InitialiseHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode, DEVMEM_MEMDESC *psHWPerfDataMemDesc, RGXFWIF_HWPERF_CTL *psHWPerfInitDataInt)
+{
+	RGXFWIF_HWPERF_CTL_BLK *psHWPerfInitBlkData;
+	IMG_UINT32 ui32CntBlkModelLen;
+	const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel;
+	const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc;
+	IMG_UINT32 ui32BlockID, ui32BlkCfgIdx, ui32CounterIdx ;
+	RGX_HWPERF_CNTBLK_RT_INFO sCntBlkRtInfo;
+	void *pvDev = psDeviceNode->pvDevice;
+
+	ui32CntBlkModelLen = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel);
+	for(ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen; ui32BlkCfgIdx++)
+	{
+		/* Exit early if this core does not have any of these counter blocks
+		 * due to core type/BVNC features.... */
+		psBlkTypeDesc = &asCntBlkTypeModel[ui32BlkCfgIdx];
+		if (psBlkTypeDesc->pfnIsBlkPresent(psBlkTypeDesc, pvDev, &sCntBlkRtInfo) == IMG_FALSE)
+		{
+			continue;
+		}
+
+		/* Program all counters in one block so those already on may
+		 * be configured off and vice-a-versa. */
+		for (ui32BlockID = psBlkTypeDesc->uiCntBlkIdBase;
+					 ui32BlockID < psBlkTypeDesc->uiCntBlkIdBase+sCntBlkRtInfo.uiNumUnits;
+					 ui32BlockID++)
+		{
+
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Unit %d Block : %s", ui32BlockID-psBlkTypeDesc->uiCntBlkIdBase, psBlkTypeDesc->pszBlockNameComment);
+			/* Get the block configure store to update from the global store of
+			 * block configuration. This is used to remember the configuration
+			 * between configurations and core power on in APM */
+			psHWPerfInitBlkData = rgxfw_hwperf_get_block_ctl(ui32BlockID, psHWPerfInitDataInt);
+			/* Assert to check for HWPerf block mis-configuration */
+			PVR_ASSERT(psHWPerfInitBlkData);
+
+			psHWPerfInitBlkData->bValid = IMG_TRUE;	
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "bValid: This specifies if the layout block is valid for the given BVNC.");
+			DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+							(size_t)&(psHWPerfInitBlkData->bValid) - (size_t)(psHWPerfInitDataInt),
+							psHWPerfInitBlkData->bValid,
+							PDUMP_FLAGS_CONTINUOUS);
+
+			psHWPerfInitBlkData->bEnabled = IMG_FALSE;
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "bEnabled: Set to 0x1 if the block needs to be enabled during playback. ");
+			DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+							(size_t)&(psHWPerfInitBlkData->bEnabled) - (size_t)(psHWPerfInitDataInt),
+							psHWPerfInitBlkData->bEnabled,
+							PDUMP_FLAGS_CONTINUOUS);
+
+			psHWPerfInitBlkData->eBlockID = ui32BlockID;
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "eBlockID: The Block ID for the layout block. See RGX_HWPERF_CNTBLK_ID for further information.");
+			DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+							(size_t)&(psHWPerfInitBlkData->eBlockID) - (size_t)(psHWPerfInitDataInt),
+							psHWPerfInitBlkData->eBlockID,
+							PDUMP_FLAGS_CONTINUOUS);
+
+			psHWPerfInitBlkData->uiCounterMask = 0x00;
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "uiCounterMask: Bitmask for selecting the counters that need to be configured.(Bit 0 - counter0, bit 1 - counter1 and so on. ");
+			DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+							(size_t)&(psHWPerfInitBlkData->uiCounterMask) - (size_t)(psHWPerfInitDataInt),
+							psHWPerfInitBlkData->uiCounterMask,
+							PDUMP_FLAGS_CONTINUOUS);
+
+			for(ui32CounterIdx = RGX_CNTBLK_COUNTER0_ID; ui32CounterIdx < psBlkTypeDesc->uiNumCounters; ui32CounterIdx++)
+			{
+				psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx] = IMG_UINT64_C(0x0000000000000000);
+
+				PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "%s_COUNTER_%d", psBlkTypeDesc->pszBlockNameComment,ui32CounterIdx);
+				DevmemPDumpLoadMemValue64(psHWPerfDataMemDesc,
+							(size_t)&(psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx]) - (size_t)(psHWPerfInitDataInt),
+							psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx],
+							PDUMP_FLAGS_CONTINUOUS);
+
+			}
+		}
+	}
+}
+/*!
+*******************************************************************************
+
+ @Function	InitialiseCustomCounters
+
+ @Description
+
+ Initialisation of custom counters and dumping them out to pdump, so that they can be modified at a later point.
+
+ @Input psDeviceNode
+
+ @Input psHWPerfDataMemDesc
+
+ @Return  void
+
+******************************************************************************/
+
+static void InitialiseCustomCounters(PVRSRV_DEVICE_NODE *psDeviceNode, DEVMEM_MEMDESC *psHWPerfDataMemDesc)
+{
+	IMG_UINT32 ui32CustomBlock, ui32CounterID;
+
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "ui32SelectedCountersBlockMask - The Bitmask of the custom counters that are to be selected");
+	DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+						offsetof(RGXFWIF_HWPERF_CTL, ui32SelectedCountersBlockMask),
+						0,
+						PDUMP_FLAGS_CONTINUOUS);
+
+	for( ui32CustomBlock = 0; ui32CustomBlock < RGX_HWPERF_MAX_CUSTOM_BLKS; ui32CustomBlock++ )
+	{
+		/*
+		 * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of
+		 * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is
+		 * "expression must have a constant value".
+		 */
+		const IMG_DEVMEM_OFFSET_T uiOffsetOfCustomBlockSelectedCounters
+		= (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_HWPERF_CTL *)0)->SelCntr[ui32CustomBlock].ui32NumSelectedCounters);
+
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "ui32NumSelectedCounters - The Number of counters selected for this Custom Block: %d",ui32CustomBlock );
+		DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+					uiOffsetOfCustomBlockSelectedCounters,
+					0,
+					PDUMP_FLAGS_CONTINUOUS);
+
+		for(ui32CounterID = 0; ui32CounterID < RGX_HWPERF_MAX_CUSTOM_CNTRS; ui32CounterID++ )
+		{
+			const IMG_DEVMEM_OFFSET_T uiOffsetOfCustomBlockSelectedCounterIDs
+			= (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_HWPERF_CTL *)0)->SelCntr[ui32CustomBlock].aui32SelectedCountersIDs[ui32CounterID]);
+
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "CUSTOMBLK_%d_COUNTERID_%d",ui32CustomBlock, ui32CounterID);
+			DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+					uiOffsetOfCustomBlockSelectedCounterIDs,
+					0,
+					PDUMP_FLAGS_CONTINUOUS);
+		}
+	}
+}
+
+/*!
+*******************************************************************************
+
+ @Function     InitialiseAllCounters
+
+ @Description  Initialise HWPerf and custom counters
+
+ @Input        psDeviceNode   : Device Node
+ @Input        psHWPerfDataPMR : HWPerf control PMR handle
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR InitialiseAllCounters(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                          PMR *psHWPerfDataPMR)
+{
+	RGXFWIF_HWPERF_CTL *psHWPerfInitData;
+	DEVMEM_MEMDESC *psHWPerfDataMemDesc;
+	PVRSRV_ERROR eError;
+
+	eError = AcquireHostData(psDeviceNode,
+	                         psHWPerfDataPMR,
+	                         &psHWPerfDataMemDesc,
+	                         (void **)&psHWPerfInitData);
+
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", failHWPerfCountersMemDescAqCpuVirt);
+	}
+
+	InitialiseHWPerfCounters(psDeviceNode, psHWPerfDataMemDesc, psHWPerfInitData);
+	InitialiseCustomCounters(psDeviceNode, psHWPerfDataMemDesc);
+
+failHWPerfCountersMemDescAqCpuVirt:
+	ReleaseHostData(psHWPerfDataMemDesc);
+
+	return eError;
+}
+#endif /* PDUMP */
+
+static void
+_ParseHTBAppHints(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError;
+	void * pvParamState = NULL;
+	IMG_UINT32 ui32LogType;
+	IMG_BOOL bAnyLogGroupConfigured;
+
+	IMG_CHAR * szBufferName = "PVRHTBuffer";
+	IMG_UINT32 ui32BufferSize;
+	IMG_UINT32 ui32OpMode;
+
+	/* Services initialisation parameters */
+	pvParamState = SrvInitParamOpen();
+
+	SrvInitParamGetUINT32BitField(pvParamState, EnableHTBLogGroup, ui32LogType);
+	bAnyLogGroupConfigured = ui32LogType ? IMG_TRUE: IMG_FALSE;
+	SrvInitParamGetUINT32List(pvParamState, HTBOperationMode, ui32OpMode);
+	SrvInitParamGetUINT32(pvParamState, HTBufferSize, ui32BufferSize);
+
+	eError = HTBConfigure(psDeviceNode, szBufferName, ui32BufferSize);
+	PVR_LOGG_IF_ERROR(eError, "PVRSRVHTBConfigure", cleanup);
+
+	if (bAnyLogGroupConfigured)
+	{
+		eError = HTBControl(psDeviceNode, 1, &ui32LogType, 0, 0, HTB_LOGMODE_ALLPID, (HTB_OPMODE_CTRL)ui32OpMode);
+		PVR_LOGG_IF_ERROR(eError, "PVRSRVHTBControl", cleanup);
+	}
+
+cleanup:
+	SrvInitParamClose(pvParamState);
+}
+
+#if defined(PDUMP) && defined(__KERNEL__)
+static void RGXInitFWSigRegisters(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	IMG_UINT32	ui32PhantomCnt = 0;
+
+	if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK)
+	{
+		ui32PhantomCnt = RGX_REQ_NUM_PHANTOMS(psDevInfo->sDevFeatureCfg.ui32NumClusters) - 1;
+	}
+
+	/*Initialise the TA related signature registers */
+	if(0 == gui32TASigRegCount)
+	{
+		if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SCALABLE_VDM_GPP_BIT_MASK)
+		{
+			asTASigRegList[gui32TASigRegCount++] = 	(RGXFW_REGISTER_LIST){RGX_CR_USC_UVB_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT,0, ui32PhantomCnt};
+		}else
+		{
+			asTASigRegList[gui32TASigRegCount++] = 	(RGXFW_REGISTER_LIST){RGX_CR_USC_UVS0_CHECKSUM, 0, 0, 0};
+			asTASigRegList[gui32TASigRegCount++] = 	(RGXFW_REGISTER_LIST){RGX_CR_USC_UVS1_CHECKSUM, 0, 0, 0};
+			asTASigRegList[gui32TASigRegCount++] = 	(RGXFW_REGISTER_LIST){RGX_CR_USC_UVS2_CHECKSUM, 0, 0, 0};
+			asTASigRegList[gui32TASigRegCount++] = 	(RGXFW_REGISTER_LIST){RGX_CR_USC_UVS3_CHECKSUM, 0, 0, 0};
+			asTASigRegList[gui32TASigRegCount++] = 	(RGXFW_REGISTER_LIST){RGX_CR_USC_UVS4_CHECKSUM, 0, 0, 0};
+			asTASigRegList[gui32TASigRegCount++] = 	(RGXFW_REGISTER_LIST){RGX_CR_USC_UVS5_CHECKSUM, 0, 0, 0};
+		}
+
+		if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SCALABLE_TE_ARCH_BIT_MASK)
+		{
+			if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_SCALABLE_VDM_GPP_BIT_MASK)
+			{
+				asTASigRegList[gui32TASigRegCount++] = 	(RGXFW_REGISTER_LIST){RGX_CR_PPP_CLIP_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT,0, ui32PhantomCnt};
+			}else
+			{
+				asTASigRegList[gui32TASigRegCount++] = 	(RGXFW_REGISTER_LIST){RGX_CR_PPP, 0, 0, 0};
+			}
+			asTASigRegList[gui32TASigRegCount++] = 	(RGXFW_REGISTER_LIST){RGX_CR_TE_CHECKSUM,0, 0, 0};
+		}else
+		{
+			asTASigRegList[gui32TASigRegCount++] = 	(RGXFW_REGISTER_LIST){RGX_CR_PPP_SIGNATURE, 0, 0, 0};
+			asTASigRegList[gui32TASigRegCount++] = 	(RGXFW_REGISTER_LIST){RGX_CR_TE_SIGNATURE, 0, 0, 0};
+		}
+
+		asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_VCE_CHECKSUM, 0, 0, 0};
+
+		if(0 == (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_PDS_PER_DUST_BIT_MASK) ||
+		   0 == (psDevInfo->sDevFeatureCfg.ui64ErnsBrns & FIX_HW_BRN_62204_BIT_MASK))
+		{
+			asTASigRegList[gui32TASigRegCount++] = 	(RGXFW_REGISTER_LIST){RGX_CR_PDS_DOUTM_STM_SIGNATURE,0, 0, 0};
+		}
+	}
+
+	if(0 == gui323DSigRegCount)
+	{
+		/* List of 3D signature and checksum register addresses */
+		if(0 == (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+		{
+			as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_ISP_PDS_CHECKSUM,			0,							0, 0};
+			as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_ISP_TPF_CHECKSUM,			0,							0, 0};
+			as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TFPU_PLANE0_CHECKSUM,		0,							0, 0};
+			as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TFPU_PLANE1_CHECKSUM,		0,							0, 0};
+			as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_IFPU_ISP_CHECKSUM,			0,							0, 0};
+
+			if (0 != (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_PBE2_IN_XE_BIT_MASK) &&
+			    psDevInfo->sDevFeatureCfg.ui32NumClusters > 1)
+			{
+				as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_PBE_CHECKSUM,				0,							0, 0};
+			}
+			else
+			{
+				as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_PBE_CHECKSUM,				RGX_CR_PBE_INDIRECT,		0, psDevInfo->sDevFeatureCfg.ui32NumClusters-1};
+			}
+		}else
+		{
+			if ((0 != (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK)) ||
+			    (0 != (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_ROGUEXE_BIT_MASK)))
+			{
+				const IMG_UINT32 ui32RasterModCnt = RGX_GET_NUM_RASTERISATION_MODULES(psDevInfo->sDevFeatureCfg) - 1;
+
+				as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_ISP_PDS_CHECKSUM,			RGX_CR_RASTERISATION_INDIRECT,	0, ui32RasterModCnt};
+				as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_ISP_TPF_CHECKSUM,			RGX_CR_RASTERISATION_INDIRECT,	0, ui32RasterModCnt};
+				as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TFPU_PLANE0_CHECKSUM,		RGX_CR_RASTERISATION_INDIRECT,	0, ui32RasterModCnt};
+				as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TFPU_PLANE1_CHECKSUM,		RGX_CR_RASTERISATION_INDIRECT,	0, ui32RasterModCnt};
+				as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_IFPU_ISP_CHECKSUM,			RGX_CR_RASTERISATION_INDIRECT,	0, ui32RasterModCnt};
+			}
+			else
+			{
+				as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_ISP_PDS_CHECKSUM,			RGX_CR_BLACKPEARL_INDIRECT,	0, ui32PhantomCnt};
+				as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_ISP_TPF_CHECKSUM,			RGX_CR_BLACKPEARL_INDIRECT,	0, ui32PhantomCnt};
+				as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TFPU_PLANE0_CHECKSUM,		RGX_CR_BLACKPEARL_INDIRECT,	0, ui32PhantomCnt};
+				as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TFPU_PLANE1_CHECKSUM,		RGX_CR_BLACKPEARL_INDIRECT,	0, ui32PhantomCnt};
+				as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_IFPU_ISP_CHECKSUM,			RGX_CR_BLACKPEARL_INDIRECT,	0, ui32PhantomCnt};
+			}
+
+			as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_PBE_CHECKSUM,				RGX_CR_PBE_INDIRECT,		0, psDevInfo->sDevFeatureCfg.ui32NumClusters-1};
+
+		}
+
+	}
+
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function	RGXInit
+
+ @Description
+
+ RGX Initialisation
+
+ @Input    psDeviceNode 
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError;
+	RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC);
+
+	/* Services initialisation parameters */
+	RGX_SRVINIT_APPHINTS sApphints = {0};
+	IMG_UINT32 ui32DeviceFlags;
+	IMG_UINT64	ui64ErnsBrns = 0, ui64Features = 0;
+
+	void *pvDevInfo = NULL;
+	/* Server scripts */
+	RGX_SCRIPT_BUILD sDbgInitScript = {RGX_MAX_DEBUG_COMMANDS,  0, IMG_FALSE, asDbgCommands};
+
+	/* FW allocations handles */
+	PMR *psFWCodePMR;
+	PMR *psFWDataPMR;
+	PMR *psFWCorememPMR;
+
+	/* HWPerf Ctl allocation handle */
+	PMR *psHWPerfDataPMR;
+
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+	IMG_CHAR sV[RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX];
+
+	OSSNPrintf(sV, sizeof(sV), "%d", psDevInfo->sDevFeatureCfg.ui32V);
+	/*
+	 * FIXME:
+	 * Is this check redundant for the kernel mode version of srvinit?
+	 * How do we check the user mode BVNC in this case?
+	 */
+	rgx_bvnc_packed(&sBVNC.ui64BNC, sBVNC.aszV, sBVNC.ui32VLenMax, psDevInfo->sDevFeatureCfg.ui32B, \
+							sV,	\
+							psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C);
+
+
+	ui64ErnsBrns = psDevInfo->sDevFeatureCfg.ui64ErnsBrns;
+	ui64Features = psDevInfo->sDevFeatureCfg.ui64Features;
+
+	pvDevInfo = (void *)psDevInfo;
+
+	/* Services initialisation parameters */
+	_ParseHTBAppHints(psDeviceNode);
+	GetApphints(&sApphints, ui64ErnsBrns, ui64Features);
+	GetDeviceFlags(&sApphints, &ui32DeviceFlags);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+	PVRSRVGPUVIRTPopulateLMASubArenasKM(psDeviceNode, sApphints.aui32OSidMin, sApphints.aui32OSidMax, sApphints.bEnableTrustedDeviceAceConfig);
+}
+#endif
+
+
+	eError = InitFirmware(psDeviceNode,
+	                      &sApphints,
+	                      &sBVNC,
+	                      &psFWCodePMR,
+	                      &psFWDataPMR,
+	                      &psFWCorememPMR,
+	                      &psHWPerfDataPMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXInit: InitFirmware failed (%d)", eError));
+		goto cleanup;
+	}
+
+	/*
+	 * Build Debug info script
+	 */
+	sDbgInitScript.psCommands = asDbgCommands;
+
+	if(!PrepareDebugScript(&sDbgInitScript, sApphints.eFirmwarePerf != FW_PERF_CONF_NONE, pvDevInfo))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXInit: Run out of mem for the dbg commands"));
+	}
+
+	/* finish the script */
+	if(!ScriptHalt(&sDbgInitScript))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXInit: Run out of mem for the terminating dbg script"));
+	}
+
+#if defined(PDUMP)
+	eError = InitialiseAllCounters(psDeviceNode, psHWPerfDataPMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXInit: InitialiseAllCounters failed (%d)", eError));
+		goto cleanup;
+	}
+#endif
+
+	/* Done using PMR handles, now release them */
+	eError = PVRSRVRGXInitReleaseFWInitResourcesKM(psDeviceNode,
+	                                               psFWCodePMR,
+	                                               psFWDataPMR,
+	                                               psFWCorememPMR,
+	                                               psHWPerfDataPMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXInit: BridgeRGXInitReleaseFWInitResources failed (%d)", eError));
+		goto cleanup;
+	}
+
+	/*
+	 * Perform second stage of RGX initialisation
+	 */
+	eError = PVRSRVRGXInitDevPart2KM(psDeviceNode,
+	                                 sDbgInitScript.psCommands,
+	                                 ui32DeviceFlags,
+	                                 sApphints.ui32HWPerfHostBufSize,
+	                                 sApphints.ui32HWPerfHostFilter,
+	                                 sApphints.eRGXActivePMConf);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXInit: PVRSRVRGXInitDevPart2KM failed (%d)", eError));
+		goto cleanup;
+	}
+
+#if defined(SUPPORT_VALIDATION)
+	PVRSRVAppHintDumpState();
+#endif
+
+#if defined(PDUMP)
+	/*
+	 * Dump the list of signature registers
+	 */
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 ui32TASigRegCount = 0, ui323DSigRegCount= 0;
+		IMG_BOOL	bRayTracing = IMG_FALSE;
+
+#if defined(__KERNEL__)
+		RGXInitFWSigRegisters(psDevInfo);
+		ui32TASigRegCount = gui32TASigRegCount;
+		ui323DSigRegCount = gui323DSigRegCount;
+		if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+		{
+			bRayTracing = IMG_TRUE;
+		}
+#if defined(DEBUG)
+		if (gui32TASigRegCount > SIG_REG_TA_MAX_COUNT)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: TA signature registers max count exceeded",__func__));
+			PVR_ASSERT(0);
+		}
+		if (gui323DSigRegCount > SIG_REG_3D_MAX_COUNT)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: 3D signature registers max count exceeded",__func__));
+			PVR_ASSERT(0);
+		}
+#endif
+#else
+		ui32TASigRegCount = sizeof(asTASigRegList)/sizeof(RGXFW_REGISTER_LIST);
+		ui323DSigRegCount = sizeof(as3DSigRegList)/sizeof(RGXFW_REGISTER_LIST);
+#if defined(RGX_FEATURE_RAY_TRACING)
+		bRayTracing = IMG_TRUE;
+#endif
+#endif
+
+
+
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Signature TA registers: ");
+		for (i = 0; i < ui32TASigRegCount; i++)
+		{
+			if (asTASigRegList[i].ui16IndirectRegNum != 0)
+			{
+				PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, " * 0x%8.8X (indirect via 0x%8.8X %d to %d)",
+				              asTASigRegList[i].ui16RegNum, asTASigRegList[i].ui16IndirectRegNum,
+				              asTASigRegList[i].ui16IndirectStartVal, asTASigRegList[i].ui16IndirectEndVal);
+			}
+			else
+			{
+				PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, " * 0x%8.8X", asTASigRegList[i].ui16RegNum);
+			}
+		}
+
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Signature 3D registers: ");
+		for (i = 0; i < ui323DSigRegCount; i++)
+		{
+			if (as3DSigRegList[i].ui16IndirectRegNum != 0)
+			{
+				PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, " * 0x%8.8X (indirect via 0x%8.8X %d to %d)",
+				              as3DSigRegList[i].ui16RegNum, as3DSigRegList[i].ui16IndirectRegNum,
+				              as3DSigRegList[i].ui16IndirectStartVal, as3DSigRegList[i].ui16IndirectEndVal);
+			}
+			else
+			{
+				PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, " * 0x%8.8X", as3DSigRegList[i].ui16RegNum);
+			}
+		}
+
+		if(bRayTracing)
+		{
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Signature RTU registers: ");
+			for (i = 0; i < sizeof(asRTUSigRegList)/sizeof(RGXFW_REGISTER_LIST); i++)
+			{
+				if (asRTUSigRegList[i].ui16IndirectRegNum != 0)
+				{
+					PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, " * 0x%8.8X (indirect via 0x%8.8X %d to %d)",
+								  asRTUSigRegList[i].ui16RegNum, asRTUSigRegList[i].ui16IndirectRegNum,
+								  asRTUSigRegList[i].ui16IndirectStartVal, asRTUSigRegList[i].ui16IndirectEndVal);
+				}
+				else
+				{
+					PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, " * 0x%8.8X", asRTUSigRegList[i].ui16RegNum);
+				}
+			}
+
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Signature SHG registers: ");
+			for (i = 0; i < sizeof(asSHGSigRegList)/sizeof(RGXFW_REGISTER_LIST); i++)
+			{
+				if (asSHGSigRegList[i].ui16IndirectRegNum != 0)
+				{
+					PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, " * 0x%8.8X (indirect via 0x%8.8X %d to %d)",
+								  asSHGSigRegList[i].ui16RegNum, asSHGSigRegList[i].ui16IndirectRegNum,
+								  asSHGSigRegList[i].ui16IndirectStartVal, asSHGSigRegList[i].ui16IndirectEndVal);
+				}
+				else
+				{
+					PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, " * 0x%8.8X", asSHGSigRegList[i].ui16RegNum);
+				}
+			}
+		}
+
+	}
+#endif	/* defined(PDUMP) */
+
+	eError = PVRSRV_OK;
+
+cleanup:
+	return eError;
+}
+
+/******************************************************************************
+ End of file (rgxsrvinit.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxsrvinit_script.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxsrvinit_script.c
new file mode 100644
index 0000000..040d300
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxsrvinit_script.c
@@ -0,0 +1,546 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services script routines used at initialisation time
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxsrvinit_script.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+/*!
+*******************************************************************************
+
+ @Function     OutOfScriptSpace
+
+ @Description  Checks for script space failure
+
+ @Input        psScript
+
+ @Return       IMG_BOOL
+
+******************************************************************************/
+static IMG_BOOL OutOfScriptSpace(RGX_SCRIPT_BUILD *psScript)
+{
+	if (psScript->ui32CurrComm >= psScript->ui32MaxLen)
+	{
+		psScript->bOutOfSpace = IMG_TRUE;
+	}
+
+	return psScript->bOutOfSpace;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function     NextScriptCommand
+
+ @Description  Gets next script command to populate
+
+ @Input        psScript
+
+ @Return       IMG_BOOL
+
+******************************************************************************/
+static RGX_INIT_COMMAND* NextScriptCommand(RGX_SCRIPT_BUILD *psScript)
+{
+	if (OutOfScriptSpace(psScript))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "NextScriptCommand: Out of space for commands (%d)",
+		         psScript->ui32MaxLen));
+		return NULL;
+	}
+
+	return &psScript->psCommands[psScript->ui32CurrComm++];
+}
+
+
+IMG_BOOL ScriptWriteRGXReg(RGX_SCRIPT_BUILD *psScript,
+                           IMG_UINT32 ui32Offset,
+                           IMG_UINT32 ui32Value)
+{
+	RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+	if (psComm != NULL)
+	{
+		psComm->sWriteHWReg.eOp = RGX_INIT_OP_WRITE_HW_REG;
+		psComm->sWriteHWReg.ui32Offset = ui32Offset;
+		psComm->sWriteHWReg.ui32Value = ui32Value;
+
+		return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+
+IMG_BOOL ScriptPoll64RGXReg(RGX_SCRIPT_BUILD *psScript,
+                            IMG_UINT32 ui32Offset,
+                            IMG_UINT64 ui64Value,
+                            IMG_UINT64 ui64PollMask)
+{
+	RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+	if (psComm != NULL)
+	{
+		psComm->sPoll64HWReg.eOp = RGX_INIT_OP_POLL_64_HW_REG;
+		psComm->sPoll64HWReg.ui32Offset = ui32Offset;
+		psComm->sPoll64HWReg.ui64Value = ui64Value;
+		psComm->sPoll64HWReg.ui64Mask = ui64PollMask;
+		return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+
+IMG_BOOL ScriptPollRGXReg(RGX_SCRIPT_BUILD *psScript,
+                          IMG_UINT32 ui32Offset,
+                          IMG_UINT32 ui32Value,
+                          IMG_UINT32 ui32PollMask)
+{
+	RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+	if (psComm != NULL)
+	{
+		psComm->sPollHWReg.eOp = RGX_INIT_OP_POLL_HW_REG;
+		psComm->sPollHWReg.ui32Offset = ui32Offset;
+		psComm->sPollHWReg.ui32Value = ui32Value;
+		psComm->sPollHWReg.ui32Mask = ui32PollMask;
+		return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+
+IMG_BOOL ScriptDBGReadRGXReg(RGX_SCRIPT_BUILD *psScript,
+                             RGX_INIT_OPERATION eOp,
+                             IMG_UINT32 ui32Offset,
+                             IMG_CHAR *pszName)
+{
+	RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+	PVR_ASSERT(strlen(pszName) < RGX_DBG_CMD_NAME_SIZE);
+
+	if (psComm != NULL)
+	{
+		PVR_ASSERT((eOp == RGX_INIT_OP_DBG_READ32_HW_REG) ||
+		           (eOp == RGX_INIT_OP_DBG_READ64_HW_REG));
+
+		psComm->sDBGReadHWReg.eOp = eOp;
+		psComm->sDBGReadHWReg.ui32Offset = ui32Offset;
+
+		strcpy(&psComm->sDBGReadHWReg.aszName[0], pszName);
+
+		return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+
+IMG_BOOL ScriptDBGCalc(RGX_SCRIPT_BUILD *psScript,
+                       RGX_INIT_OPERATION eOp,
+                       IMG_UINT32 ui32Offset1,
+                       IMG_UINT32 ui32Offset2,
+                       IMG_UINT32 ui32Offset3,
+                       IMG_CHAR *pszName)
+{
+	RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+	PVR_ASSERT(strlen(pszName) < RGX_DBG_CMD_NAME_SIZE);
+
+	if (psComm != NULL)
+	{
+		PVR_ASSERT(eOp == RGX_INIT_OP_DBG_CALC);
+
+		psComm->sDBGCalc.eOp = eOp;
+		psComm->sDBGCalc.ui32Offset1 = ui32Offset1;
+		psComm->sDBGCalc.ui32Offset2 = ui32Offset2;
+		psComm->sDBGCalc.ui32Offset3 = ui32Offset3;
+		strcpy(&psComm->sDBGCalc.aszName[0], pszName);
+
+		return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+
+IMG_BOOL ScriptWriteRGXRegPDUMPOnly(RGX_SCRIPT_BUILD *psScript,
+                                    IMG_UINT32 ui32Offset,
+                                    IMG_UINT32 ui32Value)
+{
+	RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+	if (psComm != NULL)
+	{
+		psComm->sPDumpHWReg.eOp = RGX_INIT_OP_PDUMP_HW_REG;
+		psComm->sPDumpHWReg.ui32Offset = ui32Offset;
+		psComm->sPDumpHWReg.ui32Value = ui32Value;
+
+		return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      ScriptPrepareReadMetaRegThroughSP
+
+ @Description   Add script entries for reading a reg through Meta slave port
+
+ @Input         psScript
+ @Input         ui32RegAddr
+
+ @Return        IMG_BOOL
+
+******************************************************************************/
+static IMG_BOOL ScriptPrepareReadMetaRegThroughSP(RGX_SCRIPT_BUILD *psScript,
+                                                  IMG_UINT32 ui32RegAddr)
+{
+	IMG_BOOL bCmdAdded = IMG_FALSE;
+
+	/* Wait for Slave Port to be Ready */
+	bCmdAdded = ScriptPollRGXReg(psScript,
+	                             RGX_CR_META_SP_MSLVCTRL1,
+	                             RGX_CR_META_SP_MSLVCTRL1_READY_EN |
+	                             RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+	                             RGX_CR_META_SP_MSLVCTRL1_READY_EN |
+	                             RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+	if (!bCmdAdded) return IMG_FALSE;
+	
+	/* Issue a Read */
+	bCmdAdded = ScriptWriteRGXReg(psScript,
+	                              RGX_CR_META_SP_MSLVCTRL0,
+	                              ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN);
+	if (!bCmdAdded) return IMG_FALSE;
+
+	/* Wait for Slave Port to be Ready: read complete */
+	bCmdAdded = ScriptPollRGXReg(psScript,
+	                             RGX_CR_META_SP_MSLVCTRL1,
+	                             RGX_CR_META_SP_MSLVCTRL1_READY_EN |
+	                             RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+	                             RGX_CR_META_SP_MSLVCTRL1_READY_EN |
+	                             RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+
+	return bCmdAdded;
+}
+
+
+IMG_BOOL ScriptDBGReadMetaRegThroughSP(RGX_SCRIPT_BUILD *psScript,
+                                       IMG_UINT32 ui32RegAddr,
+                                       IMG_CHAR *pszName)
+{
+	IMG_BOOL bCmdsAdded = IMG_FALSE;
+
+	/* Issue a Read */
+	bCmdsAdded = ScriptPrepareReadMetaRegThroughSP(psScript, ui32RegAddr);
+	if (!bCmdsAdded) return IMG_FALSE;
+
+	/* Read the value */
+	bCmdsAdded = ScriptDBGReadRGXReg(psScript,
+	                                 RGX_INIT_OP_DBG_READ32_HW_REG,
+	                                 RGX_CR_META_SP_MSLVDATAX,
+	                                 pszName);
+
+	return bCmdsAdded;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      ScriptCondPollRGXReg
+
+ @Description   Sets up a script entry for a conditional register poll
+
+ @Input         psScript
+ @Input         ui32CondOffset
+ @Input         ui32CondValue
+ @Input         ui32CondPollMask
+ @Input         ui32Offset
+ @Input         ui32Value
+ @Input         ui32PollMask
+
+ @return        IMG_BOOL
+
+******************************************************************************/
+static IMG_BOOL ScriptCondPollRGXReg(RGX_SCRIPT_BUILD *psScript,
+                                     IMG_UINT32 ui32CondOffset,
+                                     IMG_UINT32 ui32CondValue,
+                                     IMG_UINT32 ui32CondPollMask,
+                                     IMG_UINT32 ui32Offset,
+                                     IMG_UINT32 ui32Value,
+                                     IMG_UINT32 ui32PollMask)
+{
+	RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+	if (psComm != NULL)
+	{
+		psComm->sCondPollHWReg.eOp = RGX_INIT_OP_COND_POLL_HW_REG;
+		psComm->sCondPollHWReg.ui32CondOffset = ui32CondOffset;
+		psComm->sCondPollHWReg.ui32CondValue = ui32CondValue;
+		psComm->sCondPollHWReg.ui32CondMask = ui32CondPollMask;
+		psComm->sCondPollHWReg.ui32Offset = ui32Offset;
+		psComm->sCondPollHWReg.ui32Value = ui32Value;
+		psComm->sCondPollHWReg.ui32Mask = ui32PollMask;
+		return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+
+IMG_BOOL ScriptMetaRegCondPollRGXReg(RGX_SCRIPT_BUILD *psScript,
+                                     IMG_UINT32 ui32MetaRegAddr,
+                                     IMG_UINT32 ui32MetaRegValue,
+                                     IMG_UINT32 ui32MetaRegMask,
+                                     IMG_UINT32 ui32RegAddr,
+                                     IMG_UINT32 ui32RegValue,
+                                     IMG_UINT32 ui32RegMask)
+{
+	IMG_BOOL bCmdsAdded = IMG_FALSE;
+
+	/* Issue a Read */
+	bCmdsAdded = ScriptPrepareReadMetaRegThroughSP(psScript, ui32MetaRegAddr);
+	if (!bCmdsAdded) return IMG_FALSE;
+
+	/* Read the value */
+	bCmdsAdded = ScriptCondPollRGXReg(psScript,
+	                                  RGX_CR_META_SP_MSLVDATAX,
+	                                  ui32MetaRegValue,
+	                                  ui32MetaRegMask,
+	                                  ui32RegAddr,
+	                                  ui32RegValue,
+	                                  ui32RegMask);
+
+	return bCmdsAdded;
+}
+
+
+IMG_BOOL ScriptWriteMetaRegThroughSP(RGX_SCRIPT_BUILD *psScript,
+                                     IMG_UINT32 ui32RegAddr,
+                                     IMG_UINT32 ui32RegValue)
+{
+	IMG_BOOL bCmdAdded = IMG_FALSE;
+
+	/* Wait for Slave Port to be Ready */
+	bCmdAdded = ScriptPollRGXReg(psScript,
+	                             RGX_CR_META_SP_MSLVCTRL1,
+	                             RGX_CR_META_SP_MSLVCTRL1_READY_EN |
+	                             RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+	                             RGX_CR_META_SP_MSLVCTRL1_READY_EN |
+	                             RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+	if (!bCmdAdded) return IMG_FALSE;
+
+	/* Issue a Write */
+	bCmdAdded = ScriptWriteRGXReg(psScript,
+	                              RGX_CR_META_SP_MSLVCTRL0,
+	                              ui32RegAddr);
+	if (!bCmdAdded) return IMG_FALSE;
+
+	bCmdAdded = ScriptWriteRGXReg(psScript,
+	                              RGX_CR_META_SP_MSLVDATAT,
+	                              ui32RegValue);
+
+	/* Wait for complete to be done on the next attempt to read/write */
+
+	return bCmdAdded;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      ScriptInsertLoopPoint
+
+ @Description   Inserts a loop point in the startup script
+
+ @Input         psScript
+
+ @Return        IMG_BOOL
+
+******************************************************************************/
+static IMG_BOOL ScriptInsertLoopPoint(RGX_SCRIPT_BUILD *psScript)
+{
+	RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+	if (psComm != NULL)
+	{
+		psComm->eOp = RGX_INIT_OP_LOOP_POINT;
+		return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      ScriptConditionalBranchOnReg
+
+ @Description   Conditionally branches back to the last loop point in the script.
+                Condition is satisfied by the contents of a register
+
+ @Input         psScript
+ @Input         ui32Offset
+ @Input         ui32Value
+ @Input         ui32Mask
+
+ @Return        IMG_BOOL
+
+******************************************************************************/
+static IMG_BOOL ScriptConditionalBranchOnReg(RGX_SCRIPT_BUILD *psScript,
+                                             IMG_UINT32 ui32Offset,
+                                             IMG_UINT32 ui32Value,
+                                             IMG_UINT32 ui32Mask)
+{
+	RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+	if (psComm != NULL)
+	{
+		psComm->eOp = RGX_INIT_OP_COND_BRANCH;
+		psComm->sConditionalBranchPoint.ui32Offset = ui32Offset;
+		psComm->sConditionalBranchPoint.ui32Value = ui32Value;
+		psComm->sConditionalBranchPoint.ui32Mask = ui32Mask;
+		return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+
+IMG_BOOL ScriptPollMetaRegThroughSP(RGX_SCRIPT_BUILD *psScript,
+                                    IMG_UINT32 ui32Offset,
+                                    IMG_UINT32 ui32PollValue,
+                                    IMG_UINT32 ui32PollMask)
+{
+	IMG_BOOL bCmdsAdded = IMG_FALSE;
+
+	bCmdsAdded = ScriptInsertLoopPoint(psScript);
+	if (!bCmdsAdded) return IMG_FALSE;
+
+	bCmdsAdded = ScriptPrepareReadMetaRegThroughSP(psScript, ui32Offset);
+	if (!bCmdsAdded) return IMG_FALSE;
+
+	bCmdsAdded = ScriptConditionalBranchOnReg(psScript,
+	                                          RGX_CR_META_SP_MSLVDATAX,
+	                                          ui32PollValue,
+	                                          ui32PollMask);
+	return bCmdsAdded;
+}
+
+
+IMG_BOOL ScriptDBGReadMetaCoreReg(RGX_SCRIPT_BUILD *psScript,
+                                  IMG_UINT32 ui32RegAddr,
+                                  IMG_CHAR *pszName)
+{
+	IMG_BOOL bCmdsAdded = IMG_FALSE;
+
+	/* Core Read Ready? */
+	bCmdsAdded = ScriptPollMetaRegThroughSP(psScript,
+	                                        META_CR_TXUXXRXRQ_OFFSET,
+	                                        META_CR_TXUXXRXRQ_DREADY_BIT,
+	                                        META_CR_TXUXXRXRQ_DREADY_BIT);
+
+	/* Set the reg we are interested in reading */
+	bCmdsAdded = ScriptWriteMetaRegThroughSP(psScript,
+	                                         META_CR_TXUXXRXRQ_OFFSET,
+	                                         ui32RegAddr | META_CR_TXUXXRXRQ_RDnWR_BIT);
+	if (!bCmdsAdded) return IMG_FALSE;
+
+	/* Core Read Done? */
+	bCmdsAdded = ScriptPollMetaRegThroughSP(psScript,
+	                                        META_CR_TXUXXRXRQ_OFFSET,
+	                                        META_CR_TXUXXRXRQ_DREADY_BIT,
+	                                        META_CR_TXUXXRXRQ_DREADY_BIT);
+
+	/* Read the value */
+	ScriptDBGReadMetaRegThroughSP(psScript, META_CR_TXUXXRXDT_OFFSET, pszName);
+
+	return IMG_TRUE;
+
+}
+
+
+IMG_BOOL ScriptDBGString(RGX_SCRIPT_BUILD *psScript,
+                         const IMG_CHAR *aszString)
+{
+	RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+	if (psComm != NULL)
+	{
+		psComm->sDBGString.eOp = RGX_INIT_OP_DBG_STRING;
+		strcpy(psComm->sDBGString.aszString, aszString);
+		if (strlen(aszString) >= (sizeof(psComm->sDBGString.aszString) - 2))
+		{
+			psComm->sDBGString.aszString[RGX_DBG_CMD_NAME_SIZE-1] = '\0';
+		}
+		return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+
+IMG_BOOL ScriptHalt(RGX_SCRIPT_BUILD *psScript)
+{
+	RGX_INIT_COMMAND *psComm = NextScriptCommand(psScript);
+
+	if (psComm != NULL)
+	{
+		psComm->eOp = RGX_INIT_OP_HALT;
+		return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+
+/******************************************************************************
+ End of file (rgxsrvinit_script.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxsrvinit_script.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxsrvinit_script.h
new file mode 100644
index 0000000..8a07863
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxsrvinit_script.h
@@ -0,0 +1,331 @@
+/*************************************************************************/ /*!
+@File
+@Title          Header for Services script routines used at initialisation time
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the connections between the various parts of the
+                initialisation server.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __RGXSRVINIT_SCRIPT_H__
+#define __RGXSRVINIT_SCRIPT_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+#include "rgxscript.h"
+#include "rgx_firmware_processor.h"
+#include "rgxdefs_km.h"
+
+
+typedef struct _RGX_SCRIPT_BUILD
+{
+	IMG_UINT32 ui32MaxLen;
+	IMG_UINT32 ui32CurrComm;
+	IMG_BOOL bOutOfSpace;
+	RGX_INIT_COMMAND *psCommands;
+} RGX_SCRIPT_BUILD;
+
+
+/*!
+*******************************************************************************
+
+ @Function      ScriptWriteRGXReg
+
+ @Description   Sets up a script entry for register write
+
+ @Input         psScript
+ @Input         ui32Offset
+ @Input         ui32Value
+
+ @Return        IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptWriteRGXReg(RGX_SCRIPT_BUILD *psScript,
+                           IMG_UINT32 ui32Offset,
+                           IMG_UINT32 ui32Value);
+
+/*!
+*******************************************************************************
+
+ @Function      ScriptPoll64RGXReg
+
+ @Description   Sets up a script entry for register poll
+
+ @Input         psScript
+ @Input         ui32Offset
+ @Input         ui32Value
+ @Input         ui32PollMask
+
+ @Return        IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptPoll64RGXReg(RGX_SCRIPT_BUILD *psScript,
+                            IMG_UINT32 ui32Offset,
+                            IMG_UINT64 ui64Value,
+                            IMG_UINT64 ui64PollMask);
+
+/*!
+*******************************************************************************
+
+ @Function      ScriptPollRGXReg
+
+ @Description   Sets up a script entry for register poll
+
+ @Input         psScript
+ @Input         ui32Offset
+ @Input         ui32Value
+ @Input         ui32PollMask
+
+ @Return        IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptPollRGXReg(RGX_SCRIPT_BUILD *psScript,
+                          IMG_UINT32 ui32Offset,
+                          IMG_UINT32 ui32Value,
+                          IMG_UINT32 ui32PollMask);
+
+/*!
+*******************************************************************************
+
+ @Function      ScriptDBGReadRGXReg
+
+ @Description   Sets up a script entry for register setup
+
+ @Input         psScript
+ @Input         eOp
+ @Input         ui32Offset
+ @Input         ui32Value
+
+ @Return        IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptDBGReadRGXReg(RGX_SCRIPT_BUILD *psScript,
+                             RGX_INIT_OPERATION eOp,
+                             IMG_UINT32 ui32Offset,
+                             IMG_CHAR *pszName);
+
+/*!
+*******************************************************************************
+
+ @Function      ScriptDBGCalc
+
+ @Description   Sets up a script for calculation
+
+ @Input         psScript
+ @Input         eOp
+ @Input         ui32Offset1
+ @Input         ui32Offset2
+ @Input         ui32Offset3
+
+ @Return        IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptDBGCalc(RGX_SCRIPT_BUILD *psScript,
+                       RGX_INIT_OPERATION eOp,
+                       IMG_UINT32 ui32Offset1,
+                       IMG_UINT32 ui32Offset2,
+                       IMG_UINT32 ui32Offset3,
+                       IMG_CHAR *pszName);
+
+
+/*!
+*******************************************************************************
+
+ @Function      ScriptWriteRGXReg
+
+ @Description   Sets up a script entry for register setup
+
+ @Input         psScript
+ @Input         ui32Offset
+ @Input         ui32Value
+
+ @Return        IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptWriteRGXRegPDUMPOnly(RGX_SCRIPT_BUILD *psScript,
+                                    IMG_UINT32 ui32Offset,
+                                    IMG_UINT32 ui32Value);
+
+/*!
+*******************************************************************************
+
+ @Function      ScriptDBGReadMetaRegThroughSP
+
+ @Description   Add script entries for reading a reg through Meta slave port
+
+ @Input         psScript
+ @Input         ui32RegAddr
+ @Input         pszName
+
+ @Return        IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptDBGReadMetaRegThroughSP(RGX_SCRIPT_BUILD *psScript,
+                                       IMG_UINT32 ui32RegAddr,
+                                       IMG_CHAR *pszName);
+
+/*!
+*******************************************************************************
+
+ @Function      ScriptDBGReadMetaRegThroughSP
+
+ @Description   Add script entries for polling a reg through Meta slave port
+
+ @Input         psScript
+ @Input         ui32RegAddr
+ @Input         pszName
+
+ @Return        IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptMetaRegCondPollRGXReg(RGX_SCRIPT_BUILD *psScript,
+                                     IMG_UINT32 ui32MetaRegAddr,
+                                     IMG_UINT32 ui32MetaRegValue,
+                                     IMG_UINT32 ui32MetaRegMask,
+                                     IMG_UINT32 ui32RegAddr,
+                                     IMG_UINT32 ui32RegValue,
+                                     IMG_UINT32 ui32RegMask);
+
+/*!
+*******************************************************************************
+
+ @Function      ScriptWriteMetaRegThroughSP
+
+ @Description   Add script entries for writing a reg through Meta slave port
+
+ @Input         psScript
+ @Input         ui32RegAddr
+ @Input         pszName
+
+ @Return        IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptWriteMetaRegThroughSP(RGX_SCRIPT_BUILD *psScript,
+                                     IMG_UINT32 ui32RegAddr,
+                                     IMG_UINT32 ui32RegValue);
+
+/*!
+*******************************************************************************
+
+ @Function      ScriptPollMetaRegThroughSP
+
+ @Description   Polls a Core Garten register through the slave port
+
+ @Input         psScript
+
+ @Return        void
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptPollMetaRegThroughSP(RGX_SCRIPT_BUILD *psScript,
+                                    IMG_UINT32 ui32Offset,
+                                    IMG_UINT32 ui32PollValue,
+                                    IMG_UINT32 ui32PollMask);
+
+/*!
+*******************************************************************************
+
+ @Function      ScriptDBGReadMetaRegThroughSP
+
+ @Description   Adds script entries reading a reg through Meta slave port
+
+ @Input         psScript
+ @Input         ui32RegAddr
+ @Input         pszName
+
+ @Return        IMG_BOOL
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptDBGReadMetaCoreReg(RGX_SCRIPT_BUILD *psScript,
+                                  IMG_UINT32 ui32RegAddr,
+                                  IMG_CHAR *pszName);
+
+
+/*!
+*******************************************************************************
+
+ @Function      ScriptDBGString
+
+ @Description   Adds a debug print to the script
+
+ @Input         psScript
+ @Input         pszName
+
+ @Return        IMG_BOOL
+
+******************************************************************************/
+
+IMG_INTERNAL
+IMG_BOOL ScriptDBGString(RGX_SCRIPT_BUILD *psScript,
+                         const IMG_CHAR *aszString);
+
+
+/*!
+*******************************************************************************
+
+ @Function      ScriptHalt
+
+ @Description   Add a cmd to finish the script
+
+ @Input         psScript
+
+ @Return        IMG_BOOL True if it runs out of cmds when building the script
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_BOOL ScriptHalt(RGX_SCRIPT_BUILD *psScript);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __RGXSRVINIT_SCRIPT_H__ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxstartstop.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxstartstop.c
new file mode 100644
index 0000000..10afd49
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxstartstop.c
@@ -0,0 +1,1141 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific start/stop routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific start/stop routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* The routines implemented here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when trusted device is enabled).
+ * Any new dependency should be added to rgxlayer.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary. */
+#include "rgxstartstop.h"
+
+#if defined(SUPPORT_SHARED_SLC)
+#include "rgxapi_km.h"
+#include "rgxdevice.h"
+#endif
+
+#define SOC_FEATURE_STRICT_SAME_ADDRESS_WRITE_ORDERING
+
+
+#if !defined(FIX_HW_BRN_37453)
+/*!
+*******************************************************************************
+
+ @Function      RGXEnableClocks
+
+ @Description   Enable RGX Clocks
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXEnableClocks(const void *hPrivate)
+{
+	RGXCommentLog(hPrivate, "RGX clock: use default (automatic clock gating)");
+}
+#endif
+
+
+static PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* Wait for Slave Port to be Ready */
+	eError = RGXPollReg32(hPrivate,
+	                      RGX_CR_META_SP_MSLVCTRL1,
+	                      RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+	                      RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+	if (eError != PVRSRV_OK) return eError;
+
+	/* Issue a Write */
+	RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr);
+	RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue);
+
+	return eError;
+}
+
+static PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate,
+                                            IMG_UINT32 ui32RegAddr,
+                                            IMG_UINT32* ui32RegValue)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* Wait for Slave Port to be Ready */
+	eError = RGXPollReg32(hPrivate,
+	                      RGX_CR_META_SP_MSLVCTRL1,
+	                      RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+	                      RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+	if (eError != PVRSRV_OK) return eError;
+
+	/* Issue a Read */
+	RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN);
+
+	/* Wait for Slave Port to be Ready */
+	eError = RGXPollReg32(hPrivate,
+	                      RGX_CR_META_SP_MSLVCTRL1,
+	                      RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+	                      RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+	if (eError != PVRSRV_OK) return eError;
+
+#if !defined(NO_HARDWARE)
+	*ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX);
+#else
+	*ui32RegValue = 0xFFFFFFFF;
+#endif
+
+	return eError;
+}
+
+static PVRSRV_ERROR RGXWriteMetaCoreRegThoughSP(const void *hPrivate,
+                                                IMG_UINT32 ui32CoreReg,
+                                                IMG_UINT32 ui32Value)
+{
+	IMG_UINT32 i = 0;
+
+	RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXDT_OFFSET, ui32Value);
+	RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, ui32CoreReg & ~META_CR_TXUXXRXRQ_RDnWR_BIT);
+
+	do
+	{
+		RGXReadMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, &ui32Value);
+	} while (((ui32Value & META_CR_TXUXXRXRQ_DREADY_BIT) != META_CR_TXUXXRXRQ_DREADY_BIT) && (i++ < 1000));
+
+	if (i == 1000)
+	{
+		RGXCommentLog(hPrivate, "RGXWriteMetaCoreRegThoughSP: Timeout");
+		return PVRSRV_ERROR_TIMEOUT;
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXStartFirmware(const void *hPrivate)
+{
+	PVRSRV_ERROR eError;
+
+	/* Give privilege to debug and slave port */
+	RGXWriteMetaRegThroughSP(hPrivate, META_CR_SYSC_JTAG_THREAD, META_CR_SYSC_JTAG_THREAD_PRIV_EN);
+
+	/* Point Meta to the bootloader address, global (uncached) range */
+	eError = RGXWriteMetaCoreRegThoughSP(hPrivate,
+	                                     PC_ACCESS(0),
+	                                     RGXFW_BOOTLDR_META_ADDR | META_MEM_GLOBAL_RANGE_BIT);
+
+	if (eError != PVRSRV_OK)
+	{
+		RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start failed!");
+		return eError;
+	}
+
+	/* Enable minim encoding */
+	RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXPRIVEXT, META_CR_TXPRIVEXT_MINIM_EN);
+
+	/* Enable Meta thread */
+	RGXWriteMetaRegThroughSP(hPrivate, META_CR_T0ENABLE_OFFSET, META_CR_TXENABLE_ENABLE_BIT);
+
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function      RGXInitMetaProcWrapper
+
+ @Description   Configures the hardware wrapper of the META processor
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXInitMetaProcWrapper(const void *hPrivate)
+{
+	IMG_UINT64 ui64GartenConfig;
+
+	/* Set Garten IDLE to META idle and Set the Garten Wrapper BIF Fence address */
+
+	/* Garten IDLE bit controlled by META */
+	ui64GartenConfig = RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META;
+
+	/* The fence addr is set at the fw init sequence */
+
+	if (RGXDeviceHasFeature(hPrivate, RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+	{
+		/* Set PC = 0 for fences */
+		ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK;
+		ui64GartenConfig |= (IMG_UINT64)META_MMU_CONTEXT_MAPPING
+		                    << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT;
+
+		if (!RGXDeviceHasErnBrn(hPrivate, FIX_HW_BRN_51281_BIT_MASK))
+		{
+			/* Ensure the META fences go all the way to external memory */
+			ui64GartenConfig |= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_SLC_COHERENT_EN;    /* SLC Coherent 1 */
+			ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PERSISTENCE_CLRMSK; /* SLC Persistence 0 */
+		}
+	}
+	else
+	{
+		/* Set PC = 0 for fences */
+		ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK;
+		ui64GartenConfig |= (IMG_UINT64)META_MMU_CONTEXT_MAPPING
+		                    << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT;
+
+		/* Set SLC DM=META */
+		ui64GartenConfig |= ((IMG_UINT64) RGXFW_SEGMMU_META_DM_ID) << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT;
+	}
+
+	RGXCommentLog(hPrivate, "RGXStart: Configure META wrapper");
+	RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, ui64GartenConfig);
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      RGXInitMipsProcWrapper
+
+ @Description   Configures the hardware wrapper of the MIPS processor
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXInitMipsProcWrapper(const void *hPrivate)
+{
+	IMG_DEV_PHYADDR sPhyAddr;
+	IMG_UINT64 ui64RemapSettings = RGXMIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE; /* Same for all remap registers */
+
+	RGXCommentLog(hPrivate, "RGXStart: Configure MIPS wrapper");
+
+	/*
+	 * MIPS wrapper (registers transaction ID and ISA mode) setup
+	 */
+
+	RGXCommentLog(hPrivate, "RGXStart: Write wrapper config register");
+
+	if (RGXGetDevicePhysBusWidth(hPrivate) > 32)
+	{
+		RGXWriteReg32(hPrivate,
+		              RGX_CR_MIPS_WRAPPER_CONFIG,
+		              (RGXMIPSFW_REGISTERS_VIRTUAL_BASE >>
+		              RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN) |
+		              RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS);
+	}
+	else
+	{
+		RGXAcquireGPURegsAddr(hPrivate, &sPhyAddr);
+
+		RGXMIPSWrapperConfig(hPrivate,
+		                     RGX_CR_MIPS_WRAPPER_CONFIG,
+		                     sPhyAddr.uiAddr,
+		                     RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN,
+		                     RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS);
+	}
+
+	/*
+	 * Boot remap setup
+	 */
+
+	RGXAcquireBootRemapAddr(hPrivate, &sPhyAddr);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	/* Do not mark accesses to a FW code remap region as DRM accesses */
+	ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+#endif
+
+	RGXCommentLog(hPrivate, "RGXStart: Write boot remap registers");
+	RGXBootRemapConfig(hPrivate,
+	                   RGX_CR_MIPS_ADDR_REMAP1_CONFIG1,
+	                   RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN,
+	                   RGX_CR_MIPS_ADDR_REMAP1_CONFIG2,
+	                   sPhyAddr.uiAddr,
+	                   ~RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK,
+	                   ui64RemapSettings);
+
+	/*
+	 * Data remap setup
+	 */
+
+	RGXAcquireDataRemapAddr(hPrivate, &sPhyAddr);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	/* Remapped data in non-secure memory */
+	ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+#endif
+
+	RGXCommentLog(hPrivate, "RGXStart: Write data remap registers");
+	RGXDataRemapConfig(hPrivate,
+	                   RGX_CR_MIPS_ADDR_REMAP2_CONFIG1,
+	                   RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN,
+	                   RGX_CR_MIPS_ADDR_REMAP2_CONFIG2,
+	                   sPhyAddr.uiAddr,
+	                   ~RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK,
+	                   ui64RemapSettings);
+
+	/*
+	 * Code remap setup
+	 */
+
+	RGXAcquireCodeRemapAddr(hPrivate, &sPhyAddr);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	/* Do not mark accesses to a FW code remap region as DRM accesses */
+	ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+#endif
+
+	RGXCommentLog(hPrivate, "RGXStart: Write exceptions remap registers");
+	RGXCodeRemapConfig(hPrivate,
+	                   RGX_CR_MIPS_ADDR_REMAP3_CONFIG1,
+	                   RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN,
+	                   RGX_CR_MIPS_ADDR_REMAP3_CONFIG2,
+	                   sPhyAddr.uiAddr,
+	                   ~RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK,
+	                   ui64RemapSettings);
+
+	/*
+	 * Trampoline remap setup
+	 */
+
+	RGXAcquireTrampolineRemapAddr(hPrivate, &sPhyAddr);
+	ui64RemapSettings = RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	/* Remapped data in non-secure memory */
+	ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+#endif
+
+	RGXCommentLog(hPrivate, "RGXStart: Write trampoline remap registers");
+	RGXTrampolineRemapConfig(hPrivate,
+	                   RGX_CR_MIPS_ADDR_REMAP4_CONFIG1,
+	                   sPhyAddr.uiAddr | RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN,
+	                   RGX_CR_MIPS_ADDR_REMAP4_CONFIG2,
+	                   RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR,
+	                   ~RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK,
+	                   ui64RemapSettings);
+
+	/* Garten IDLE bit controlled by MIPS */
+	RGXCommentLog(hPrivate, "RGXStart: Set GARTEN_IDLE type to MIPS");
+	RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META);
+
+	/* Turn on the EJTAG probe (only useful driver live) */
+	RGXWriteReg32(hPrivate, RGX_CR_MIPS_DEBUG_CONFIG, 0);
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      __RGXInitSLC
+
+ @Description   Initialise RGX SLC
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void __RGXInitSLC(const void *hPrivate)
+{
+	if (RGXDeviceHasFeature(hPrivate, RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK))
+	{
+		IMG_UINT32 ui32Reg;
+		IMG_UINT32 ui32RegVal;
+
+		if (RGXDeviceHasErnBrn(hPrivate, HW_ERN_51468_BIT_MASK))
+		{
+			/*
+			 * SLC control
+			 */
+			ui32Reg = RGX_CR_SLC3_CTRL_MISC;
+			ui32RegVal = RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_WEAVED_HASH |
+			             RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN;
+			RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal);
+		}
+		else
+		{
+			/*
+			 * SLC control
+			 */
+			ui32Reg = RGX_CR_SLC3_CTRL_MISC;
+			ui32RegVal = RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH |
+			             RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN;
+			RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal);
+
+			/*
+			 * SLC scramble bits
+			 */
+			{
+				IMG_UINT32 i;
+				IMG_UINT32 ui32Count=0;
+				IMG_UINT32 ui32SLCBanks = RGXGetDeviceSLCBanks(hPrivate);
+				IMG_UINT64 aui64ScrambleValues[4];
+				IMG_UINT32 aui32ScrambleRegs[] = {
+					RGX_CR_SLC3_SCRAMBLE,
+					RGX_CR_SLC3_SCRAMBLE2,
+					RGX_CR_SLC3_SCRAMBLE3,
+					RGX_CR_SLC3_SCRAMBLE4
+				};
+
+				if (2 == ui32SLCBanks)
+				{
+					aui64ScrambleValues[0] = IMG_UINT64_C(0x6965a99a55696a6a);
+					aui64ScrambleValues[1] = IMG_UINT64_C(0x6aa9aa66959aaa9a);
+					aui64ScrambleValues[2] = IMG_UINT64_C(0x9a5665965a99a566);
+					aui64ScrambleValues[3] = IMG_UINT64_C(0x5aa69596aa66669a);
+					ui32Count = 4;
+				}
+				else if (4 == ui32SLCBanks)
+				{
+					aui64ScrambleValues[0] = IMG_UINT64_C(0xc6788d722dd29ce4);
+					aui64ScrambleValues[1] = IMG_UINT64_C(0x7272e4e11b279372);
+					aui64ScrambleValues[2] = IMG_UINT64_C(0x87d872d26c6c4be1);
+					aui64ScrambleValues[3] = IMG_UINT64_C(0xe1b4878d4b36e478);
+					ui32Count = 4;
+
+				}
+				else if (8 == ui32SLCBanks)
+				{
+					aui64ScrambleValues[0] = IMG_UINT64_C(0x859d6569e8fac688);
+					aui64ScrambleValues[1] = IMG_UINT64_C(0xf285e1eae4299d33);
+					aui64ScrambleValues[2] = IMG_UINT64_C(0x1e1af2be3c0aa447);
+					ui32Count = 3;
+				}
+
+				for (i = 0; i < ui32Count; i++)
+				{
+					IMG_UINT32 ui32Reg = aui32ScrambleRegs[i];
+					IMG_UINT64 ui64Value = aui64ScrambleValues[i];
+					RGXWriteReg64(hPrivate, ui32Reg, ui64Value);
+				}
+			}
+		}
+
+		if (RGXDeviceHasErnBrn(hPrivate, HW_ERN_45914_BIT_MASK))
+		{
+			/* Disable the forced SLC coherency which the hardware enables for compatibility with older pdumps */
+			RGXCommentLog(hPrivate, "Disable forced SLC coherency");
+			RGXWriteReg64(hPrivate, RGX_CR_GARTEN_SLC, 0);
+		}
+	}
+	else
+	{
+		IMG_UINT32 ui32Reg;
+		IMG_UINT32 ui32RegVal;
+
+#if defined(FIX_HW_BRN_36492)
+		/* Because the WA for this BRN forbids using SLC reset, need to inval it instead */
+		RGXCommentLog(hPrivate, "Invalidate the SLC");
+		RGXWriteReg32(hPrivate, RGX_CR_SLC_CTRL_FLUSH_INVAL, RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN);
+
+		/* Poll for completion */
+		RGXPollReg32(hPrivate, RGX_CR_SLC_STATUS0, 0x0, RGX_CR_SLC_STATUS0_MASKFULL);
+#endif
+
+		/*
+		 * SLC Bypass control
+		 */
+		ui32Reg = RGX_CR_SLC_CTRL_BYPASS;
+		ui32RegVal = 0;
+
+		if (RGXDeviceHasFeature(hPrivate, RGX_FEATURE_SLCSIZE8_BIT_MASK)  ||
+		    RGXDeviceHasErnBrn(hPrivate, FIX_HW_BRN_61450_BIT_MASK))
+		{
+			RGXCommentLog(hPrivate, "Bypass SLC for IPF_OBJ and IPF_CPF");
+			ui32RegVal |= RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN | RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN;
+		}
+
+		if (RGXGetDeviceSLCSize(hPrivate) < (128*1024))
+		{
+			/* Bypass SLC for textures if the SLC size is less than 128kB */
+			RGXCommentLog(hPrivate, "Bypass SLC for TPU");
+			ui32RegVal |= RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_EN;
+		}
+
+		if (ui32RegVal != 0)
+		{
+			RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal);
+		}
+
+		/*
+		 * SLC Misc control.
+		 *
+		 * Note: This is a 64bit register and we set only the lower 32bits leaving the top
+		 *       32bits (RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS) unchanged from the HW default.
+		 */
+		ui32Reg = RGX_CR_SLC_CTRL_MISC;
+		ui32RegVal = (RGXReadReg32(hPrivate, ui32Reg) & RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN) |
+		             RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1;
+
+		if (RGXDeviceHasErnBrn(hPrivate, FIX_HW_BRN_60084_BIT_MASK))
+		{
+#if !defined(SOC_FEATURE_STRICT_SAME_ADDRESS_WRITE_ORDERING)
+			ui32RegVal |= RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN;
+#else
+			if (RGXDeviceHasErnBrn(hPrivate, HW_ERN_61389_BIT_MASK))
+			{
+				ui32RegVal |= RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN;
+			}
+#endif
+		}
+		/* Bypass burst combiner if SLC line size is smaller than 1024 bits */
+		if (RGXGetDeviceCacheLineSize(hPrivate) < 1024)
+		{
+			ui32RegVal |= RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN;
+		}
+
+		RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal);
+	}
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      RGXInitBIF
+
+ @Description   Initialise RGX BIF
+
+ @Input         hPrivate : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXInitBIF(const void *hPrivate)
+{
+	if (!RGXDeviceHasFeature(hPrivate, RGX_FEATURE_MIPS_BIT_MASK))
+	{
+		IMG_DEV_PHYADDR sPCAddr;
+
+		/*
+		 * Acquire the address of the Kernel Page Catalogue.
+		 */
+		RGXAcquireKernelMMUPC(hPrivate, &sPCAddr);
+
+		/*
+		 * Write the kernel catalogue base.
+		 */
+		RGXCommentLog(hPrivate, "RGX firmware MMU Page Catalogue");
+
+		if (!RGXDeviceHasFeature(hPrivate, RGX_FEATURE_SLC_VIVT_BIT_MASK))
+		{
+			/* Write the cat-base address */
+			RGXWriteKernelMMUPC64(hPrivate,
+			                      RGX_CR_BIF_CAT_BASE0,
+			                      RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT,
+			                      RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT,
+			                      ((sPCAddr.uiAddr
+			                      >> RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT)
+			                      << RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT)
+			                      & ~RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK);
+			/*
+			 * Trusted Firmware boot
+			 */
+#if defined(SUPPORT_TRUSTED_DEVICE)
+			RGXCommentLog(hPrivate, "RGXInitBIF: Trusted Device enabled");
+			RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN);
+#endif
+		}
+		else
+		{
+			IMG_UINT32 uiPCAddr;
+			uiPCAddr = (((sPCAddr.uiAddr >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT)
+			             << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT)
+			            & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK);
+			/* Set the mapping context */
+			RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, 0);
+
+			/* Write the cat-base address */
+			RGXWriteKernelMMUPC32(hPrivate,
+			                      RGX_CR_MMU_CBASE_MAPPING,
+			                      RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT,
+			                      RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT,
+			                      uiPCAddr);
+#if defined(SUPPORT_TRUSTED_DEVICE)
+			/* Set-up MMU ID 1 mapping to the same PC used by MMU ID 0 */
+			RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, 1);
+			RGXWriteKernelMMUPC32(hPrivate,
+			                      RGX_CR_MMU_CBASE_MAPPING,
+			                      RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT,
+			                      RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT,
+			                      uiPCAddr);
+#endif /* SUPPORT_TRUSTED_DEVICE */
+		}
+	}
+	else
+	{
+		/*
+		 * Trusted Firmware boot
+		 */
+#if defined(SUPPORT_TRUSTED_DEVICE)
+		RGXCommentLog(hPrivate, "RGXInitBIF: Trusted Device enabled");
+		RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN);
+#endif
+	}
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      RGXAXIACELiteInit
+
+ @Description   Initialise AXI-ACE Lite interface
+
+ @Input         hPrivate : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXAXIACELiteInit(const void *hPrivate)
+{
+	IMG_UINT32 ui32RegAddr;
+	IMG_UINT64 ui64RegVal;
+
+	ui32RegAddr = RGX_CR_AXI_ACE_LITE_CONFIGURATION;
+
+	/* Setup AXI-ACE config. Set everything to outer cache */
+	ui64RegVal = (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT) |
+	             (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT) |
+	             (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT)  |
+	             (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT) |
+	             (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT) |
+	             (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT) |
+	             (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT) |
+	             (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT);
+
+	if (RGXDeviceHasErnBrn(hPrivate, FIX_HW_BRN_42321_BIT_MASK))
+	{
+		ui64RegVal |= (((IMG_UINT64) 1) << RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT);
+	}
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	if (RGXDeviceHasFeature(hPrivate, RGX_FEATURE_SLC_VIVT_BIT_MASK))
+	{
+		RGXCommentLog(hPrivate, "OSID 0 and 1 are trusted");
+		ui64RegVal |= IMG_UINT64_C(0xFC)
+	              << RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT;
+	}
+#endif
+
+	RGXCommentLog(hPrivate, "Init AXI-ACE interface");
+	RGXWriteReg64(hPrivate, ui32RegAddr, ui64RegVal);
+}
+
+
+PVRSRV_ERROR RGXStart(const void *hPrivate)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_BOOL bDoFWSlaveBoot;
+	IMG_CHAR *pcRGXFW_PROCESSOR;
+	IMG_BOOL bMetaFW;
+
+	if (RGXDeviceHasFeature(hPrivate, RGX_FEATURE_MIPS_BIT_MASK))
+	{
+		pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_MIPS;
+		bMetaFW = IMG_FALSE;
+		bDoFWSlaveBoot = IMG_FALSE;
+	}
+	else
+	{
+		pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META;
+		bMetaFW = IMG_TRUE;
+		bDoFWSlaveBoot = RGXDoFWSlaveBoot(hPrivate);
+	}
+
+	if (RGXDeviceHasFeature(hPrivate, RGX_FEATURE_SYS_BUS_SECURE_RESET_BIT_MASK))
+	{
+		/* Disable the default sys_bus_secure protection to perform minimal setup */
+		RGXCommentLog(hPrivate, "RGXStart: Disable sys_bus_secure");
+		RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, 0);
+		(void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */
+	}
+
+#if defined(FIX_HW_BRN_37453)
+	/* Force all clocks on*/
+	RGXCommentLog(hPrivate, "RGXStart: force all clocks on");
+	RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL, RGX_CR_CLK_CTRL_ALL_ON);
+#endif
+
+#if defined(SUPPORT_SHARED_SLC) && !defined(FIX_HW_BRN_36492)
+	/* When the SLC is shared, the SLC reset is performed by the System layer when calling
+	 * RGXInitSLC (before any device uses it), therefore mask out the SLC bit to avoid
+	 * soft_resetting it here. If HW_BRN_36492, the bit is already masked out.
+	 */
+#define RGX_CR_SOFT_RESET_ALL  (RGX_CR_SOFT_RESET_MASKFULL ^ RGX_CR_SOFT_RESET_SLC_EN)
+	RGXCommentLog(hPrivate, "RGXStart: Shared SLC (don't reset SLC as part of RGX reset)");
+#else
+#define RGX_CR_SOFT_RESET_ALL  (RGX_CR_SOFT_RESET_MASKFULL)
+#endif
+
+	if (RGXDeviceHasFeature(hPrivate, RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+	{
+		/* Set RGX in soft-reset */
+		RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 1");
+		RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS);
+
+		RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 2");
+		RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_JONES_ALL | RGX_S7_SOFT_RESET_DUSTS);
+		RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET2, RGX_S7_SOFT_RESET2);
+
+		/* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+		(void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+		/* Take everything out of reset but META/MIPS */
+		RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 1 excluding %s", pcRGXFW_PROCESSOR);
+		RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS | RGX_CR_SOFT_RESET_GARTEN_EN);
+		RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET2, 0x0);
+
+		(void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+		RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 2 excluding %s", pcRGXFW_PROCESSOR);
+		RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN);
+
+		(void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+	}
+	else
+	{
+		/* Set RGX in soft-reset */
+		RGXCommentLog(hPrivate, "RGXStart: soft reset everything");
+		RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL);
+
+		/* Take Rascal and Dust out of reset */
+		RGXCommentLog(hPrivate, "RGXStart: Rascal and Dust out of reset");
+		RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL ^ RGX_CR_SOFT_RESET_RASCALDUSTS_EN);
+
+		/* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+		(void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+		/* Take everything out of reset but META/MIPS */
+		RGXCommentLog(hPrivate, "RGXStart: Take everything out of reset but %s", pcRGXFW_PROCESSOR);
+		RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN);
+	}
+
+
+#if !defined(FIX_HW_BRN_37453)
+	/* Enable clocks */
+	RGXEnableClocks(hPrivate);
+#endif
+
+	/*
+	 * Initialise SLC.
+	 */
+#if !defined(SUPPORT_SHARED_SLC)
+	__RGXInitSLC(hPrivate);
+#endif
+
+	if (bMetaFW)
+	{
+		if (bDoFWSlaveBoot)
+		{
+			/* Configure META to Slave boot */
+			RGXCommentLog(hPrivate, "RGXStart: META Slave boot");
+			RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, 0);
+
+		}
+		else
+		{
+			/* Configure META to Master boot */
+			RGXCommentLog(hPrivate, "RGXStart: META Master boot");
+			RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, RGX_CR_META_BOOT_MODE_EN);
+		}
+	}
+
+	/*
+	 * Initialise Firmware wrapper
+	 */
+	if (bMetaFW)
+	{
+		RGXInitMetaProcWrapper(hPrivate);
+	}
+	else
+	{
+		RGXInitMipsProcWrapper(hPrivate);
+	}
+
+	if (RGXDeviceHasFeature(hPrivate, RGX_FEATURE_AXI_ACELITE_BIT_MASK))
+	{
+		/* We must init the AXI-ACE interface before 1st BIF transaction */
+		RGXAXIACELiteInit(hPrivate);
+	}
+
+	/*
+	 * Initialise BIF.
+	 */
+	RGXInitBIF(hPrivate);
+
+	RGXCommentLog(hPrivate, "RGXStart: Take %s out of reset", pcRGXFW_PROCESSOR);
+
+	/* Need to wait for at least 16 cycles before taking META/MIPS out of reset ... */
+	RGXWaitCycles(hPrivate, 32, 3);
+
+	RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, 0x0);
+	(void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+	/* ... and afterwards */
+	RGXWaitCycles(hPrivate, 32, 3);
+
+#if defined(FIX_HW_BRN_37453)
+	/* We rely on the 32 clk sleep from above */
+
+	/* Switch clocks back to auto */
+	RGXCommentLog(hPrivate, "RGXStart: set clocks back to auto");
+	RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL, RGX_CR_CLK_CTRL_ALL_AUTO);
+#endif
+
+	if (bMetaFW && bDoFWSlaveBoot)
+	{
+		eError = RGXFabricCoherencyTest(hPrivate);
+		if (eError != PVRSRV_OK) return eError;
+
+		RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start");
+		eError = RGXStartFirmware(hPrivate);
+		if (eError != PVRSRV_OK) return eError;
+	}
+	else
+	{
+		RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Master boot Start");
+	}
+
+	/* Enable Sys Bus security */
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	RGXCommentLog(hPrivate, "RGXStart: Enable sys_bus_secure");
+	RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, RGX_CR_SYS_BUS_SECURE_ENABLE_EN);
+	(void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */
+#endif
+
+	return eError;
+}
+
+static INLINE void ClearIRQStatusRegister(const void *hPrivate, IMG_BOOL bMetaFW)
+{
+	IMG_UINT32 ui32IRQClearReg;
+	IMG_UINT32 ui32IRQClearMask;
+
+	if(bMetaFW)
+	{
+		ui32IRQClearReg = RGX_CR_META_SP_MSLVIRQSTATUS;
+		ui32IRQClearMask = RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK;
+	}
+	else
+	{
+		ui32IRQClearReg = RGX_CR_MIPS_WRAPPER_IRQ_CLEAR;
+		ui32IRQClearMask = RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN;
+	}
+
+	RGXWriteReg32(hPrivate, ui32IRQClearReg, ui32IRQClearMask);
+
+#if defined(RGX_FEATURE_OCPBUS)
+	RGXWriteReg32(hPrivate, RGX_CR_OCP_IRQSTATUS_2, RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN);
+#endif
+}
+
+PVRSRV_ERROR RGXStop(const void *hPrivate)
+{
+	IMG_BOOL bMetaFW = !RGXDeviceHasFeature(hPrivate, RGX_FEATURE_MIPS_BIT_MASK);
+	PVRSRV_ERROR eError;
+
+	ClearIRQStatusRegister(hPrivate, bMetaFW);
+
+	/* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper */
+	if (!RGXDeviceHasFeature(hPrivate, RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+	{
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_SIDEKICK_IDLE,
+		                      RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN),
+		                      RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN));
+	}
+	else
+	{
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_JONES_IDLE,
+		                      RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN),
+		                      RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN));
+	}
+
+	if (eError != PVRSRV_OK) return eError;
+
+
+#if !defined(SUPPORT_SHARED_SLC)
+	/* Wait for SLC to signal IDLE */
+	if (!RGXDeviceHasFeature(hPrivate, RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+	{
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_SLC_IDLE,
+		                      RGX_CR_SLC_IDLE_MASKFULL,
+		                      RGX_CR_SLC_IDLE_MASKFULL);
+	}
+	else
+	{
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_SLC3_IDLE,
+		                      RGX_CR_SLC3_IDLE_MASKFULL,
+		                      RGX_CR_SLC3_IDLE_MASKFULL);
+	}
+#endif /* SUPPORT_SHARED_SLC */
+	if (eError != PVRSRV_OK) return eError;
+
+
+	/* Unset MTS DM association with threads */
+	RGXWriteReg32(hPrivate,
+	              RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC,
+	              RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK
+	              & RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL);
+	RGXWriteReg32(hPrivate,
+	              RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC,
+	              RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK
+	              & RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL);
+	RGXWriteReg32(hPrivate,
+	              RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC,
+	              RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK
+	              & RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL);
+	RGXWriteReg32(hPrivate,
+	              RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC,
+	              RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK
+	              & RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL);
+
+
+#if defined(PDUMP)
+	if (bMetaFW)
+	{
+		/* Disabling threads is only required for pdumps to stop the fw gracefully */
+
+		/* Disable thread 0 */
+		eError = RGXWriteMetaRegThroughSP(hPrivate,
+		                                  META_CR_T0ENABLE_OFFSET,
+		                                  ~META_CR_TXENABLE_ENABLE_BIT);
+		if (eError != PVRSRV_OK) return eError;
+
+		/* Disable thread 1 */
+		eError = RGXWriteMetaRegThroughSP(hPrivate,
+		                                  META_CR_T1ENABLE_OFFSET,
+		                                  ~META_CR_TXENABLE_ENABLE_BIT);
+		if (eError != PVRSRV_OK) return eError;
+
+		/* Clear down any irq raised by META (done after disabling the FW
+		 * threads to avoid a race condition).
+		 * This is only really needed for PDumps but we do it anyway driver-live.
+		 */
+		RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS, 0x0);
+
+		/* Wait for the Slave Port to finish all the transactions */
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_META_SP_MSLVCTRL1,
+		                      RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+		                      RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+		if (eError != PVRSRV_OK) return eError;
+	}
+#endif
+
+
+	/* Extra Idle checks */
+	eError = RGXPollReg32(hPrivate,
+	                      RGX_CR_BIF_STATUS_MMU,
+	                      0,
+	                      RGX_CR_BIF_STATUS_MMU_MASKFULL);
+	if (eError != PVRSRV_OK) return eError;
+
+	eError = RGXPollReg32(hPrivate,
+	                      RGX_CR_BIFPM_STATUS_MMU,
+	                      0,
+	                      RGX_CR_BIFPM_STATUS_MMU_MASKFULL);
+	if (eError != PVRSRV_OK) return eError;
+
+	if (!RGXDeviceHasFeature(hPrivate, RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) &&
+	    !RGXDeviceHasFeature(hPrivate, RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK))
+	{
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_BIF_READS_EXT_STATUS,
+		                      0,
+		                      RGX_CR_BIF_READS_EXT_STATUS_MASKFULL);
+		if (eError != PVRSRV_OK) return eError;
+	}
+
+
+	eError = RGXPollReg32(hPrivate,
+	                      RGX_CR_BIFPM_READS_EXT_STATUS,
+	                      0,
+	                      RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL);
+	if (eError != PVRSRV_OK) return eError;
+
+	{
+		IMG_UINT64 ui64SLCMask = RGX_CR_SLC_STATUS1_MASKFULL;
+		eError = RGXPollReg64(hPrivate,
+		                      RGX_CR_SLC_STATUS1,
+		                      0,
+		                      ui64SLCMask);
+		if (eError != PVRSRV_OK) return eError;
+	}
+
+	if (4 == RGXGetDeviceSLCBanks(hPrivate))
+	{
+		eError = RGXPollReg64(hPrivate,
+		                      RGX_CR_SLC_STATUS2,
+		                      0,
+		                      RGX_CR_SLC_STATUS2_MASKFULL);
+		if (eError != PVRSRV_OK) return eError;
+	}
+
+#if !defined(SUPPORT_SHARED_SLC)
+	/* Wait for SLC to signal IDLE */
+	if (!RGXDeviceHasFeature(hPrivate, RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+	{
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_SLC_IDLE,
+		                      RGX_CR_SLC_IDLE_MASKFULL,
+		                      RGX_CR_SLC_IDLE_MASKFULL);
+	}
+	else
+	{
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_SLC3_IDLE,
+		                      RGX_CR_SLC3_IDLE_MASKFULL,
+		                      RGX_CR_SLC3_IDLE_MASKFULL);
+	}
+#endif /* SUPPORT_SHARED_SLC */
+	if (eError != PVRSRV_OK) return eError;
+
+
+	/* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper */
+	if (!RGXDeviceHasFeature(hPrivate, RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+	{
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_SIDEKICK_IDLE,
+		                      RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN),
+		                      RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN));
+	}
+	else
+	{
+		if (!RGXDeviceHasFeature(hPrivate, RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+		{
+			eError = RGXPollReg32(hPrivate,
+			                      RGX_CR_JONES_IDLE,
+			                      RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN),
+			                      RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN));
+		}
+	}
+
+	if (eError != PVRSRV_OK) return eError;
+
+
+	if (bMetaFW)
+	{
+		IMG_UINT32 ui32RegValue;
+
+		eError = RGXReadMetaRegThroughSP(hPrivate,
+		                                 META_CR_TxVECINT_BHALT,
+		                                 &ui32RegValue);
+		if (eError != PVRSRV_OK) return eError;
+
+		if ((ui32RegValue & 0xFFFFFFFFU) == 0x0)
+		{
+			/* Wait for Sidekick/Jones to signal IDLE including
+			 * the Garten Wrapper if there is no debugger attached
+			 * (TxVECINT_BHALT = 0x0) */
+			if (!RGXDeviceHasFeature(hPrivate, RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK))
+			{
+				eError = RGXPollReg32(hPrivate,
+				                      RGX_CR_SIDEKICK_IDLE,
+				                      RGX_CR_SIDEKICK_IDLE_GARTEN_EN,
+				                      RGX_CR_SIDEKICK_IDLE_GARTEN_EN);
+				if (eError != PVRSRV_OK) return eError;
+			}
+			else
+			{
+				eError = RGXPollReg32(hPrivate,
+				                      RGX_CR_JONES_IDLE,
+				                      RGX_CR_JONES_IDLE_GARTEN_EN,
+				                      RGX_CR_JONES_IDLE_GARTEN_EN);
+				if (eError != PVRSRV_OK) return eError;
+			}
+		}
+	}
+	else
+	{
+		eError = RGXPollReg32(hPrivate,
+		                      RGX_CR_SIDEKICK_IDLE,
+		                      RGX_CR_SIDEKICK_IDLE_GARTEN_EN,
+		                      RGX_CR_SIDEKICK_IDLE_GARTEN_EN);
+		if (eError != PVRSRV_OK) return eError;
+	}
+
+	return eError;
+}
+
+
+/*
+ * RGXInitSLC
+ */
+#if defined(SUPPORT_SHARED_SLC)
+PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	void *pvPowerParams;
+
+	if (psDeviceNode == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	psDevInfo = psDeviceNode->pvDevice;
+	pvPowerParams = &psDevInfo->sLayerParams;
+
+#if !defined(FIX_HW_BRN_36492)
+	/* reset the SLC */
+	RGXCommentLog(pvPowerParams, "RGXInitSLC: soft reset SLC");
+	RGXWriteReg64(pvPowerParams, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_SLC_EN);
+
+	/* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+	(void) RGXReadReg64(pvPowerParams, RGX_CR_SOFT_RESET);
+
+	/* Take everything out of reset */
+	RGXWriteReg64(pvPowerParams, RGX_CR_SOFT_RESET, 0x0);
+#endif
+
+	__RGXInitSLC(pvPowerParams);
+
+	return PVRSRV_OK;
+}
+#endif
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxstartstop.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxstartstop.h
new file mode 100644
index 0000000..ac14118
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxstartstop.h
@@ -0,0 +1,84 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX start/stop header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX start/stop functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXSTARTSTOP_H__)
+#define __RGXSTARTSTOP_H__
+
+/* The routines declared here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when DRM security is enabled).
+ * Any new dependency should be added to rgxlayer.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary. */
+#include "rgxlayer.h"
+
+/*!
+*******************************************************************************
+
+ @Function      RGXStart
+
+ @Description   Perform GPU reset and initialisation
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXStart(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function      RGXStop
+
+ @Description   Stop Rogue in preparation for power down
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXStop(const void *hPrivate);
+
+#endif /* __RGXSTARTSTOP_H__ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxta3d.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxta3d.c
new file mode 100644
index 0000000..082c4cc
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxta3d.c
@@ -0,0 +1,4781 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX TA/3D routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX TA/3D routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* for the offsetof macro */
+#include <stddef.h>
+
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxta3d.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "ri_server.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "rgx_memallocflags.h"
+#include "rgxccb.h"
+#include "rgxhwperf.h"
+#include "rgxtimerquery.h"
+#include "htbuffer.h"
+
+#include "rgxdefs_km.h"
+#include "rgx_fwif_km.h"
+#include "physmem.h"
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "process_stats.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+#endif
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#include "pvrsrv_sync_server.h"
+#endif
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif /* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "hash.h"
+#include "rgxworkest.h"
+
+#define HASH_CLEAN_LIMIT 6
+#endif
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_TA3D_UFO_DUMP	0
+
+//#define TA3D_CHECKPOINT_DEBUG
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+/* define the number of commands required to be set up by the CCB helper */
+/* 1 command for the TA */
+#define CCB_CMD_HELPER_NUM_TA_COMMANDS 1
+/* Up to 3 commands for the 3D (partial render fence, partial render, and render) */
+#define CCB_CMD_HELPER_NUM_3D_COMMANDS 3
+
+typedef struct _DEVMEM_REF_LOOKUP_
+{
+	IMG_UINT32 ui32ZSBufferID;
+	RGX_ZSBUFFER_DATA *psZSBuffer;
+} DEVMEM_REF_LOOKUP;
+
+typedef struct _DEVMEM_FREELIST_LOOKUP_
+{
+	IMG_UINT32 ui32FreeListID;
+	RGX_FREELIST *psFreeList;
+} DEVMEM_FREELIST_LOOKUP;
+
+typedef struct {
+	DEVMEM_MEMDESC				*psContextStateMemDesc;
+	RGX_SERVER_COMMON_CONTEXT	*psServerCommonContext;
+	IMG_UINT32					ui32Priority;
+} RGX_SERVER_RC_TA_DATA;
+
+typedef struct {
+	DEVMEM_MEMDESC				*psContextStateMemDesc;
+	RGX_SERVER_COMMON_CONTEXT	*psServerCommonContext;
+	IMG_UINT32					ui32Priority;
+} RGX_SERVER_RC_3D_DATA;
+
+struct _RGX_SERVER_RENDER_CONTEXT_ {
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	/* this lock protects usage of the render context.
+	 * it ensures only one kick is being prepared and/or submitted on
+	 * this render context at any time
+	 */
+	POS_LOCK				hLock;
+	RGX_CCB_CMD_HELPER_DATA asTACmdHelperData[CCB_CMD_HELPER_NUM_TA_COMMANDS];
+	RGX_CCB_CMD_HELPER_DATA as3DCmdHelperData[CCB_CMD_HELPER_NUM_3D_COMMANDS];
+#endif
+	PVRSRV_DEVICE_NODE			*psDeviceNode;
+	DEVMEM_MEMDESC				*psFWRenderContextMemDesc;
+	DEVMEM_MEMDESC				*psFWFrameworkMemDesc;
+	RGX_SERVER_RC_TA_DATA		sTAData;
+	RGX_SERVER_RC_3D_DATA		s3DData;
+	IMG_UINT32					ui32CleanupStatus;
+#define RC_CLEANUP_TA_COMPLETE		(1 << 0)
+#define RC_CLEANUP_3D_COMPLETE		(1 << 1)
+	PVRSRV_CLIENT_SYNC_PRIM		*psCleanupSync;
+	DLLIST_NODE					sListNode;
+	SYNC_ADDR_LIST				sSyncAddrListTAFence;
+	SYNC_ADDR_LIST				sSyncAddrListTAUpdate;
+	SYNC_ADDR_LIST				sSyncAddrList3DFence;
+	SYNC_ADDR_LIST				sSyncAddrList3DUpdate;
+	ATOMIC_T					hJobId;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	WORKEST_HOST_DATA			sWorkEstData;
+#endif
+};
+
+
+/*
+	Static functions used by render context code
+*/
+
+static
+PVRSRV_ERROR _DestroyTAContext(RGX_SERVER_RC_TA_DATA *psTAData,
+							   PVRSRV_DEVICE_NODE *psDeviceNode,
+							   PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync)
+{
+	PVRSRV_ERROR eError;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+											  psTAData->psServerCommonContext,
+											  psCleanupSync,
+											  RGXFWIF_DM_TA,
+											  PDUMP_FLAGS_NONE);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				__FUNCTION__,
+				PVRSRVGetErrorStringKM(eError)));
+		return eError;
+	}
+
+	/* ... it has so we can free it's resources */
+#if defined(DEBUG)
+	/* Log the number of TA context stores which occurred */
+	{
+		RGXFWIF_TACTX_STATE	*psFWTAState;
+
+		eError = DevmemAcquireCpuVirtAddr(psTAData->psContextStateMemDesc,
+										  (void**)&psFWTAState);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s: Failed to map firmware render context state (%u)",
+					__FUNCTION__, eError));
+		}
+		else
+		{
+			/* Release the CPU virt addr */
+			DevmemReleaseCpuVirtAddr(psTAData->psContextStateMemDesc);
+		}
+	}
+#endif
+	FWCommonContextFree(psTAData->psServerCommonContext);
+	DevmemFwFree(psDeviceNode->pvDevice, psTAData->psContextStateMemDesc);
+	psTAData->psServerCommonContext = NULL;
+	return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR _Destroy3DContext(RGX_SERVER_RC_3D_DATA *ps3DData,
+							   PVRSRV_DEVICE_NODE *psDeviceNode,
+							   PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync)
+{
+	PVRSRV_ERROR eError;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+											  ps3DData->psServerCommonContext,
+											  psCleanupSync,
+											  RGXFWIF_DM_3D,
+											  PDUMP_FLAGS_NONE);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				 __FUNCTION__,
+				 PVRSRVGetErrorStringKM(eError)));
+		return eError;
+	}
+
+	/* ... it has so we can free it's resources */
+#if defined(DEBUG)
+	/* Log the number of 3D context stores which occurred */
+	{
+		RGXFWIF_3DCTX_STATE	*psFW3DState;
+
+		eError = DevmemAcquireCpuVirtAddr(ps3DData->psContextStateMemDesc,
+										  (void**)&psFW3DState);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s: Failed to map firmware render context state (%u)",
+					__FUNCTION__, eError));
+		}
+		else
+		{
+			/* Release the CPU virt addr */
+			DevmemReleaseCpuVirtAddr(ps3DData->psContextStateMemDesc);
+		}
+	}
+#endif
+
+	FWCommonContextFree(ps3DData->psServerCommonContext);
+	DevmemFwFree(psDeviceNode->pvDevice, ps3DData->psContextStateMemDesc);
+	ps3DData->psServerCommonContext = NULL;
+	return PVRSRV_OK;
+}
+
+static void _RGXDumpPMRPageList(DLLIST_NODE *psNode)
+{
+	RGX_PMR_NODE *psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+	PVRSRV_ERROR			eError;
+
+	eError = PMRDumpPageList(psPMRNode->psPMR,
+							RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Error (%u) printing pmr %p", eError, psPMRNode->psPMR));
+	}
+}
+
+IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList)
+{
+	DLLIST_NODE *psNode, *psNext;
+
+	PVR_LOG(("Freelist FWAddr 0x%08x, ID = %d, CheckSum 0x%016" IMG_UINT64_FMTSPECx,
+				psFreeList->sFreeListFWDevVAddr.ui32Addr,
+				psFreeList->ui32FreelistID,
+				psFreeList->ui64FreelistChecksum));
+
+	/* Dump Init FreeList page list */
+	PVR_LOG(("  Initial Memory block"));
+	dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext)
+	{
+		_RGXDumpPMRPageList(psNode);
+	}
+
+	/* Dump Grow FreeList page list */
+	PVR_LOG(("  Grow Memory blocks"));
+	dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext)
+	{
+		_RGXDumpPMRPageList(psNode);
+	}
+
+	return IMG_TRUE;
+}
+
+static PVRSRV_ERROR _UpdateFwFreelistSize(RGX_FREELIST *psFreeList,
+										IMG_BOOL bGrow,
+										IMG_UINT32 ui32DeltaSize)
+{
+	PVRSRV_ERROR			eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD		sGPCCBCmd;
+
+	sGPCCBCmd.eCmdType = (bGrow) ? RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE : RGXFWIF_KCCB_CMD_FREELIST_SHRINK_UPDATE;
+	sGPCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+	sGPCCBCmd.uCmdData.sFreeListGSData.ui32DeltaPages = ui32DeltaSize;
+	sGPCCBCmd.uCmdData.sFreeListGSData.ui32NewPages = psFreeList->ui32CurrentFLPages;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "Send FW update: freelist [FWAddr=0x%08x] has 0x%08x pages",
+								psFreeList->sFreeListFWDevVAddr.ui32Addr,
+								psFreeList->ui32CurrentFLPages));
+
+	/* Submit command to the firmware. */
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psFreeList->psDevInfo,
+									RGXFWIF_DM_GP,
+									&sGPCCBCmd,
+									sizeof(sGPCCBCmd),
+									0,
+									PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_UpdateFwFreelistSize: failed to update FW freelist size. (error = %u)", eError));
+		return eError;
+	}
+
+	return eError;
+}
+
+static void _CheckFreelist(RGX_FREELIST *psFreeList,
+						   IMG_UINT32 ui32NumOfPagesToCheck,
+						   IMG_UINT64 ui64ExpectedCheckSum,
+						   IMG_UINT64 *pui64CalculatedCheckSum)
+{
+#if defined(NO_HARDWARE)
+	/* No checksum needed as we have all information in the pdumps */
+	PVR_UNREFERENCED_PARAMETER(psFreeList);
+	PVR_UNREFERENCED_PARAMETER(ui32NumOfPagesToCheck);
+	PVR_UNREFERENCED_PARAMETER(ui64ExpectedCheckSum);
+	*pui64CalculatedCheckSum = 0;
+#else
+	PVRSRV_ERROR eError;
+	size_t uiNumBytes;
+	IMG_UINT8* pui8Buffer;
+	IMG_UINT32* pui32Buffer;
+	IMG_UINT32 ui32CheckSumAdd = 0;
+	IMG_UINT32 ui32CheckSumXor = 0;
+	IMG_UINT32 ui32Entry;
+	IMG_UINT32 ui32Entry2;
+	IMG_BOOL bFreelistBad = IMG_FALSE;
+
+	*pui64CalculatedCheckSum = 0;
+
+	PVR_ASSERT(ui32NumOfPagesToCheck <= (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages));
+
+	/* Allocate Buffer of the size of the freelist */
+	pui8Buffer = OSAllocMem(ui32NumOfPagesToCheck * sizeof(IMG_UINT32));
+	if (pui8Buffer == NULL)
+	{
+		PVR_LOG(("_CheckFreelist: Failed to allocate buffer to check freelist %p!", psFreeList));
+		PVR_ASSERT(0);
+		return;
+	}
+
+    /* Copy freelist content into Buffer */
+	eError = PMR_ReadBytes(psFreeList->psFreeListPMR,
+					psFreeList->uiFreeListPMROffset + (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psFreeList->ui32ReadyFLPages) * sizeof(IMG_UINT32),
+					pui8Buffer,
+					ui32NumOfPagesToCheck * sizeof(IMG_UINT32),
+					&uiNumBytes);
+	if (eError != PVRSRV_OK)
+	{
+		OSFreeMem(pui8Buffer);
+		PVR_LOG(("_CheckFreelist: Failed to get freelist data for freelist %p!", psFreeList));
+		PVR_ASSERT(0);
+        return;
+    }
+
+	PVR_ASSERT(uiNumBytes == ui32NumOfPagesToCheck * sizeof(IMG_UINT32));
+
+	/* Generate checksum */
+	pui32Buffer = (IMG_UINT32 *)pui8Buffer;
+	for(ui32Entry = 0; ui32Entry < ui32NumOfPagesToCheck; ui32Entry++)
+	{
+		ui32CheckSumAdd += pui32Buffer[ui32Entry];
+		ui32CheckSumXor ^= pui32Buffer[ui32Entry];
+
+		/* Check for double entries */
+		for (ui32Entry2 = ui32Entry+1; ui32Entry2 < ui32NumOfPagesToCheck; ui32Entry2++)
+		{
+			if (pui32Buffer[ui32Entry] == pui32Buffer[ui32Entry2])
+			{
+				PVR_LOG(("_CheckFreelist: Freelist consistency failure: FW addr: 0x%08X, Double entry found 0x%08x on idx: %d and %d of %d",
+											psFreeList->sFreeListFWDevVAddr.ui32Addr,
+											pui32Buffer[ui32Entry2],
+											ui32Entry,
+											ui32Entry2,
+											psFreeList->ui32CurrentFLPages));
+				bFreelistBad = IMG_TRUE;
+				break;
+			}
+		}
+	}
+
+	OSFreeMem(pui8Buffer);
+
+	/* Check the calculated checksum against the expected checksum... */
+	*pui64CalculatedCheckSum = ((IMG_UINT64)ui32CheckSumXor << 32) | ui32CheckSumAdd;
+
+	if (ui64ExpectedCheckSum != 0  &&  ui64ExpectedCheckSum != *pui64CalculatedCheckSum)
+	{
+		PVR_LOG(("_CheckFreelist: Checksum mismatch for freelist %p!  Expected 0x%016" IMG_UINT64_FMTSPECx " calculated 0x%016" IMG_UINT64_FMTSPECx,
+		        psFreeList, ui64ExpectedCheckSum, *pui64CalculatedCheckSum));
+		bFreelistBad = IMG_TRUE;
+	}
+
+	if (bFreelistBad)
+	{
+		PVR_LOG(("_CheckFreelist: Sleeping for ever!"));
+		PVR_ASSERT(!bFreelistBad);
+	}
+#endif
+}
+
+PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList,
+                             IMG_UINT32 ui32NumPages,
+                             PDLLIST_NODE pListHeader,
+                             IMG_BOOL bForCreate)
+{
+	RGX_PMR_NODE	*psPMRNode;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_UINT32  ui32MappingTable = 0;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_DEVMEM_SIZE_T uiLength;
+	IMG_DEVMEM_SIZE_T uistartPage;
+	PVRSRV_ERROR eError;
+	const IMG_CHAR * pszAllocName = "Free List";
+	IMG_UINT32 ui32CurrentFLPages = psFreeList->ui32CurrentFLPages;
+	IMG_UINT32 ui32ReadyFLPages = psFreeList->ui32ReadyFLPages;
+
+	/* Are we allowed to grow ? */
+	if (psFreeList->ui32MaxFLPages - (ui32CurrentFLPages + ui32ReadyFLPages) < ui32NumPages)
+	{
+		PVR_DPF((PVR_DBG_WARNING,"Freelist [0x%p]: grow by %u pages denied. Max PB size reached (current pages %u/%u)",
+				psFreeList,
+				ui32NumPages,
+				psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages,
+				psFreeList->ui32MaxFLPages));
+		return PVRSRV_ERROR_PBSIZE_ALREADY_MAX;
+	}
+
+	/* Allocate kernel memory block structure */
+	psPMRNode = OSAllocMem(sizeof(*psPMRNode));
+	if (psPMRNode == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXGrowFreeList: failed to allocate host data structure"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorAllocHost;
+	}
+
+	/*
+	 * Lock protects simultaneous manipulation of:
+	 * - the memory block list
+	 * - the freelist's ui32CurrentFLPages
+	 */
+	OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+
+
+	psPMRNode->ui32NumPages = ui32NumPages;
+	psPMRNode->psFreeList = psFreeList;
+
+	/* Allocate Memory Block */
+	PDUMPCOMMENT("Allocate PB Block (Pages %08X)", ui32NumPages);
+	uiSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE;
+	eError = PhysmemNewRamBackedPMR(NULL,
+	                                psFreeList->psDevInfo->psDeviceNode,
+									uiSize,
+									uiSize,
+									1,
+									1,
+									&ui32MappingTable,
+									RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+									PVRSRV_MEMALLOCFLAG_GPU_READABLE,
+									OSStringLength(pszAllocName) + 1,
+									pszAllocName,
+									&psPMRNode->psPMR);
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "RGXGrowFreeList: Failed to allocate PB block of size: 0x%016" IMG_UINT64_FMTSPECX,
+				 (IMG_UINT64)uiSize));
+		goto ErrorBlockAlloc;
+	}
+
+	/* Zeroing physical pages pointed by the PMR */
+	if (psFreeList->psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST)
+	{
+		eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXGrowFreeList: Failed to zero PMR %p of freelist %p with Error %d",
+									psPMRNode->psPMR,
+									psFreeList,
+									eError));
+			PVR_ASSERT(0);
+		}
+	}
+
+	uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32);
+	uistartPage = (psFreeList->ui32MaxFLPages - ui32CurrentFLPages - psPMRNode->ui32NumPages);
+	uiOffset = psFreeList->uiFreeListPMROffset + (uistartPage * sizeof(IMG_UINT32));
+
+#if defined(PVR_RI_DEBUG)
+
+	eError = RIWritePMREntryKM(psPMRNode->psPMR,
+	                           OSStringNLength(pszAllocName, RI_MAX_TEXT_LEN),
+	                           pszAllocName,
+	                           uiSize);
+	if( eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: call to RIWritePMREntryKM failed (eError=%d)",
+				__func__,
+				eError));
+	}
+
+	 /* Attach RI information */
+	eError = RIWriteMEMDESCEntryKM(psPMRNode->psPMR,
+	                               OSStringNLength(pszAllocName, RI_MAX_TEXT_LEN),
+	                               pszAllocName,
+	                               0,
+	                               uiSize,
+	                               uiSize,
+	                               IMG_FALSE,
+	                               IMG_FALSE,
+	                               &psPMRNode->hRIHandle);
+	if( eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: call to RIWriteMEMDESCEntryKM failed (eError=%d)",
+				__func__,
+				eError));
+	}
+
+#endif /* if defined(PVR_RI_DEBUG) */
+
+	/* write Freelist with Memory Block physical addresses */
+	eError = PMRWritePMPageList(
+						/* Target PMR, offset, and length */
+						psFreeList->psFreeListPMR,
+						uiOffset,
+						uiLength,
+						/* Referenced PMR, and "page" granularity */
+						psPMRNode->psPMR,
+						RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+						&psPMRNode->psPageList);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "RGXGrowFreeList: Failed to write pages of Node %p",
+				 psPMRNode));
+		goto ErrorPopulateFreelist;
+	}
+
+#if defined(SUPPORT_SHADOW_FREELISTS)
+	/* Copy freelist memory to shadow freelist */
+	{
+		const IMG_UINT32 ui32FLMaxSize = psFreeList->ui32MaxFLPages * sizeof (IMG_UINT32);
+		const IMG_UINT32 ui32MapSize = ui32FLMaxSize * 2;
+		const IMG_UINT32 ui32CopyOffset = uiOffset - psFreeList->uiFreeListPMROffset;
+		IMG_BYTE *pFLMapAddr;
+		size_t uiNumBytes;
+		PVRSRV_ERROR res;
+		IMG_HANDLE hMapHandle;
+
+		/* Map both the FL and the shadow FL */
+		res = PMRAcquireKernelMappingData(psFreeList->psFreeListPMR, psFreeList->uiFreeListPMROffset, ui32MapSize,
+		                                  (void**) &pFLMapAddr, &uiNumBytes, &hMapHandle);
+		if (res != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "RGXGrowFreeList: Failed to map freelist (ID=%d)",
+			         psFreeList->ui32FreelistID));
+			goto ErrorPopulateFreelist;
+		}
+
+		/* Copy only the newly added memory */
+		memcpy(pFLMapAddr + ui32FLMaxSize + ui32CopyOffset, pFLMapAddr + ui32CopyOffset , uiLength);
+
+#if defined(PDUMP)
+		PDUMPCOMMENT("Initialize shadow freelist");
+
+		/* Translate memcpy to pdump */
+		{
+			IMG_DEVMEM_OFFSET_T uiCurrOffset;
+
+			for (uiCurrOffset = uiOffset; (uiCurrOffset - uiOffset) < uiLength; uiCurrOffset += sizeof (IMG_UINT32))
+			{
+				PMRPDumpCopyMem32(psFreeList->psFreeListPMR,
+				                  uiCurrOffset + ui32FLMaxSize,
+				                  psFreeList->psFreeListPMR,
+				                  uiCurrOffset,
+				                  ":SYSMEM:$1",
+				                  PDUMP_FLAGS_CONTINUOUS);
+			}
+		}
+#endif
+
+
+		res = PMRReleaseKernelMappingData(psFreeList->psFreeListPMR, hMapHandle);
+
+		if (res != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "RGXGrowFreeList: Failed to release freelist mapping (ID=%d)",
+			         psFreeList->ui32FreelistID));
+			goto ErrorPopulateFreelist;
+		}
+	}
+#endif
+
+	/* We add It must be added to the tail, otherwise the freelist population won't work */
+	dllist_add_to_head(pListHeader, &psPMRNode->sMemoryBlock);
+
+	/* Update number of available pages */
+	if (bForCreate)
+	{
+		/* Only growable freelists should reserve pages for OOM threshold */
+		if (psFreeList->ui32GrowFLPages != 0)
+		{
+			const IMG_UINT32 ui32ReadyPages = RGX_COMPUTE_FL_READY_PAGES(ui32NumPages, psFreeList->ui32GrowThreshold);
+
+			ui32CurrentFLPages += ui32NumPages - ui32ReadyPages;
+			ui32ReadyFLPages += ui32ReadyPages;
+		}
+		else
+		{
+			ui32CurrentFLPages += ui32NumPages;
+		}
+	}
+	else
+	{
+		ui32ReadyFLPages += ui32NumPages;
+	}
+
+	/* Update freelist fields in case they were modified */
+	psFreeList->ui32CurrentFLPages = ui32CurrentFLPages;
+	psFreeList->ui32ReadyFLPages = ui32ReadyFLPages;
+
+	/* Update statistics */
+	if (psFreeList->ui32NumHighPages < psFreeList->ui32CurrentFLPages)
+	{
+		psFreeList->ui32NumHighPages = psFreeList->ui32CurrentFLPages;
+	}
+
+	if (psFreeList->bCheckFreelist)
+	{
+		/*
+		 *  We can only calculate the freelist checksum when the list is full
+		 *  (e.g. at initial creation time). At other times the checksum cannot
+		 *  be calculated and has to be disabled for this freelist.
+		 */
+		if ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages)
+		{
+			_CheckFreelist(psFreeList, ui32NumPages, 0, &psFreeList->ui64FreelistChecksum);
+		}
+		else
+		{
+			psFreeList->ui64FreelistChecksum = 0;
+		}
+	}
+	OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+	PVR_DPF((PVR_DBG_MESSAGE,"Freelist [%p]: %s %u pages (pages=%u/%u checksum=0x%016" IMG_UINT64_FMTSPECx ")",
+			psFreeList,
+	        ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages ? "Create initial" : "Grow by"),
+			ui32NumPages,
+			psFreeList->ui32CurrentFLPages,
+			psFreeList->ui32MaxFLPages,
+			psFreeList->ui64FreelistChecksum));
+
+	return PVRSRV_OK;
+
+	/* Error handling */
+ErrorPopulateFreelist:
+	PMRUnrefPMR(psPMRNode->psPMR);
+
+ErrorBlockAlloc:
+	OSFreeMem(psPMRNode);
+	OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+ErrorAllocHost:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+
+}
+
+static PVRSRV_ERROR RGXShrinkFreeList(PDLLIST_NODE pListHeader,
+										RGX_FREELIST *psFreeList)
+{
+	DLLIST_NODE *psNode;
+	RGX_PMR_NODE *psPMRNode;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 ui32OldValue;
+
+	/*
+	 * Lock protects simultaneous manipulation of:
+	 * - the memory block list
+	 * - the freelist's ui32CurrentFLPages value
+	 */
+	PVR_ASSERT(pListHeader);
+	PVR_ASSERT(psFreeList);
+	PVR_ASSERT(psFreeList->psDevInfo);
+	PVR_ASSERT(psFreeList->psDevInfo->hLockFreeList);
+
+	OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+
+	/* Get node from head of list and remove it */
+	psNode = dllist_get_next_node(pListHeader);
+	if (psNode)
+	{
+		dllist_remove_node(psNode);
+
+		psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+		PVR_ASSERT(psPMRNode);
+		PVR_ASSERT(psPMRNode->psPMR);
+		PVR_ASSERT(psPMRNode->psFreeList);
+
+		/* remove block from freelist list */
+
+		/* Unwrite Freelist with Memory Block physical addresses */
+		eError = PMRUnwritePMPageList(psPMRNode->psPageList);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "RGXRemoveBlockFromFreeListKM: Failed to unwrite pages of Node %p",
+					 psPMRNode));
+			PVR_ASSERT(IMG_FALSE);
+		}
+
+#if defined(PVR_RI_DEBUG)
+
+		if (psPMRNode->hRIHandle)
+		{
+		    PVRSRV_ERROR eError;
+
+		    eError = RIDeleteMEMDESCEntryKM(psPMRNode->hRIHandle);
+			if( eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: call to RIDeleteMEMDESCEntryKM failed (eError=%d)", __func__, eError));
+			}
+		}
+
+#endif  /* if defined(PVR_RI_DEBUG) */
+
+		/* Free PMR (We should be the only one that holds a ref on the PMR) */
+		eError = PMRUnrefPMR(psPMRNode->psPMR);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "RGXRemoveBlockFromFreeListKM: Failed to free PB block %p (error %u)",
+					 psPMRNode->psPMR,
+					 eError));
+			PVR_ASSERT(IMG_FALSE);
+		}
+
+		/* update available pages in freelist */
+		ui32OldValue = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages;
+
+		/*
+		 * Deallocated pages should first be deducted from ReadyPages bank, once
+		 * there are no more left, start deducting them from CurrentPage bank.
+		 */
+		if (psPMRNode->ui32NumPages > psFreeList->ui32ReadyFLPages)
+		{
+			psFreeList->ui32CurrentFLPages -= psPMRNode->ui32NumPages - psFreeList->ui32ReadyFLPages;
+			psFreeList->ui32ReadyFLPages = 0;
+		}
+		else
+		{
+			psFreeList->ui32ReadyFLPages -= psPMRNode->ui32NumPages;
+		}
+
+		/* check underflow */
+		PVR_ASSERT(ui32OldValue > (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages));
+
+		PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: shrink by %u pages (current pages %u/%u)",
+								psFreeList,
+								psPMRNode->ui32NumPages,
+								psFreeList->ui32CurrentFLPages,
+								psFreeList->ui32MaxFLPages));
+
+		OSFreeMem(psPMRNode);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING,"Freelist [0x%p]: shrink denied. PB already at initial PB size (%u pages)",
+								psFreeList,
+								psFreeList->ui32InitFLPages));
+		eError = PVRSRV_ERROR_PBSIZE_ALREADY_MIN;
+	}
+
+	OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+	return eError;
+}
+
+static RGX_FREELIST *FindFreeList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FreelistID)
+{
+	DLLIST_NODE *psNode, *psNext;
+	RGX_FREELIST *psFreeList = NULL;
+
+	OSLockAcquire(psDevInfo->hLockFreeList);
+
+	dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext)
+	{
+		RGX_FREELIST *psThisFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+
+		if (psThisFreeList->ui32FreelistID == ui32FreelistID)
+		{
+			psFreeList = psThisFreeList;
+			break;
+		}
+	}
+
+	OSLockRelease(psDevInfo->hLockFreeList);
+	return psFreeList;
+}
+
+void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+						   IMG_UINT32 ui32FreelistID)
+{
+	RGX_FREELIST *psFreeList = NULL;
+	RGXFWIF_KCCB_CMD s3DCCBCmd;
+	IMG_UINT32 ui32GrowValue;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psDevInfo);
+
+	psFreeList = FindFreeList(psDevInfo, ui32FreelistID);
+
+	if (psFreeList)
+	{
+		/* Since the FW made the request, it has already consumed the ready pages, update the host struct */
+		psFreeList->ui32CurrentFLPages += psFreeList->ui32ReadyFLPages;
+		psFreeList->ui32ReadyFLPages = 0;
+
+		/* Try to grow the freelist */
+		eError = RGXGrowFreeList(psFreeList,
+		                         psFreeList->ui32GrowFLPages,
+		                         &psFreeList->sMemoryBlockHead,
+		                         IMG_FALSE);
+
+		if (eError == PVRSRV_OK)
+		{
+			/* Grow successful, return size of grow size */
+			ui32GrowValue = psFreeList->ui32GrowFLPages;
+
+			psFreeList->ui32NumGrowReqByFW++;
+
+ #if defined(PVRSRV_ENABLE_PROCESS_STATS)
+			/* Update Stats */
+			PVRSRVStatsUpdateFreelistStats(0,
+	                               1, /* Add 1 to the appropriate counter (Requests by FW) */
+	                               psFreeList->ui32InitFLPages,
+	                               psFreeList->ui32NumHighPages,
+	                               psFreeList->ownerPid);
+
+ #endif
+
+		}
+		else
+		{
+			/* Grow failed */
+			ui32GrowValue = 0;
+			PVR_DPF((PVR_DBG_ERROR,"Grow for FreeList %p failed (error %u)",
+									psFreeList,
+									eError));
+		}
+
+		/* send feedback */
+		s3DCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE;
+		s3DCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+		s3DCCBCmd.uCmdData.sFreeListGSData.ui32DeltaPages = ui32GrowValue;
+		s3DCCBCmd.uCmdData.sFreeListGSData.ui32NewPages = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages;
+		s3DCCBCmd.uCmdData.sFreeListGSData.ui32ReadyPages = psFreeList->ui32ReadyFLPages;
+		s3DCCBCmd.uCmdData.sFreeListGSData.ui32ReadyOOMLimit =
+			RGX_COMPUTE_FL_READY_PAGES(psFreeList->ui32ReadyFLPages, psFreeList->ui32GrowThreshold);
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError = RGXScheduleCommand(psDevInfo,
+										RGXFWIF_DM_3D,
+										&s3DCCBCmd,
+										sizeof(s3DCCBCmd),
+										0,
+										PDUMP_FLAGS_NONE);
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+		/* Kernel CCB should never fill up, as the FW is processing them right away  */
+
+		PVR_ASSERT(eError == PVRSRV_OK);
+	}
+	else
+	{
+		/* Should never happen */
+		PVR_DPF((PVR_DBG_ERROR,"FreeList Lookup for FreeList ID 0x%08x failed (Populate)", ui32FreelistID));
+		PVR_ASSERT(IMG_FALSE);
+	}
+}
+
+static void _RGXCheckFreeListReconstruction(PDLLIST_NODE psNode)
+{
+
+	PVRSRV_RGXDEV_INFO 		*psDevInfo;
+	RGX_FREELIST			*psFreeList;
+	RGX_PMR_NODE			*psPMRNode;
+	PVRSRV_ERROR			eError;
+	IMG_DEVMEM_OFFSET_T		uiOffset;
+	IMG_DEVMEM_SIZE_T		uiLength;
+	IMG_UINT32				ui32StartPage;
+
+	psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+	psFreeList = psPMRNode->psFreeList;
+	PVR_ASSERT(psFreeList);
+	psDevInfo = psFreeList->psDevInfo;
+	PVR_ASSERT(psDevInfo);
+
+	uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32);
+	ui32StartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages);
+	uiOffset = psFreeList->uiFreeListPMROffset + (ui32StartPage * sizeof(IMG_UINT32));
+
+	PMRUnwritePMPageList(psPMRNode->psPageList);
+	psPMRNode->psPageList = NULL;
+	eError = PMRWritePMPageList(
+						/* Target PMR, offset, and length */
+						psFreeList->psFreeListPMR,
+						uiOffset,
+						uiLength,
+						/* Referenced PMR, and "page" granularity */
+						psPMRNode->psPMR,
+						RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+						&psPMRNode->psPageList);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Error (%u) writing FL 0x%08x", eError, (IMG_UINT32)psFreeList->ui32FreelistID));
+	}
+
+	/* Zeroing physical pages pointed by the reconstructed freelist */
+	if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST)
+	{
+		eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"_RGXCheckFreeListReconstruction: Failed to zero PMR %p of freelist %p with Error %d",
+									psPMRNode->psPMR,
+									psFreeList,
+									eError));
+			PVR_ASSERT(0);
+		}
+	}
+
+
+	psFreeList->ui32CurrentFLPages += psPMRNode->ui32NumPages;
+}
+
+
+static PVRSRV_ERROR RGXReconstructFreeList(RGX_FREELIST *psFreeList)
+{
+	IMG_UINT32        ui32OriginalFLPages;
+	DLLIST_NODE       *psNode, *psNext;
+	RGXFWIF_FREELIST  *psFWFreeList;
+	PVRSRV_ERROR      eError;
+
+	//PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: Reconstructing freelist %p (ID=%u)", psFreeList, psFreeList->ui32FreelistID));
+
+	/* Do the FreeList Reconstruction */
+	ui32OriginalFLPages            = psFreeList->ui32CurrentFLPages;
+	psFreeList->ui32CurrentFLPages = 0;
+
+	/* Reconstructing Init FreeList pages */
+	dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext)
+	{
+		_RGXCheckFreeListReconstruction(psNode);
+	}
+
+	/* Reconstructing Grow FreeList pages */
+	dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext)
+	{
+		_RGXCheckFreeListReconstruction(psNode);
+	}
+	
+	/* Ready pages are allocated but kept hidden until OOM occurs. */
+	psFreeList->ui32CurrentFLPages -= psFreeList->ui32ReadyFLPages;
+	if (psFreeList->ui32CurrentFLPages != ui32OriginalFLPages)
+	{
+		PVR_ASSERT(psFreeList->ui32CurrentFLPages == ui32OriginalFLPages);
+		return PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED;
+	}
+
+	/* Reset the firmware freelist structure */
+	eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	psFWFreeList->ui32CurrentStackTop       = psFWFreeList->ui32CurrentPages - 1;
+	psFWFreeList->ui32AllocatedPageCount    = 0;
+	psFWFreeList->ui32AllocatedMMUPageCount = 0;
+	psFWFreeList->ui32HWRCounter++;
+
+	DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+	/* Check the Freelist checksum if required (as the list is fully populated) */
+	if (psFreeList->bCheckFreelist)
+	{
+		IMG_UINT64  ui64CheckSum;
+
+		_CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum);
+	}
+
+	return eError;
+}
+
+
+void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                              IMG_UINT32 ui32FreelistsCount,
+                                              IMG_UINT32 *paui32Freelists)
+{
+	PVRSRV_ERROR      eError = PVRSRV_OK;
+	DLLIST_NODE       *psNode, *psNext;
+	IMG_UINT32        ui32Loop;
+	RGXFWIF_KCCB_CMD  sTACCBCmd;
+
+	PVR_ASSERT(psDevInfo != NULL);
+	PVR_ASSERT(ui32FreelistsCount <= (MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS));
+
+	//PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: %u freelist(s) requested for reconstruction", ui32FreelistsCount));
+
+	/*
+	 *  Initialise the response command (in case we don't find a freelist ID)...
+	 */
+	sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE;
+	sTACCBCmd.uCmdData.sFreeListsReconstructionData.ui32FreelistsCount = ui32FreelistsCount;
+
+	for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+	{
+		sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] = paui32Freelists[ui32Loop] |
+		                                                                             RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG;
+	}
+
+	/*
+	 *  The list of freelists we have been given for reconstruction will
+	 *  consist of local and global freelists (maybe MMU as well). Any
+	 *  local freelists will have their global list specified as well.
+	 *  However there may be other local freelists not listed, which are
+	 *  going to have their global freelist reconstructed. Therefore we
+	 *  have to find those freelists as well meaning we will have to
+	 *  iterate the entire list of freelists to find which must be reconstructed.
+	 */
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext)
+	{
+		RGX_FREELIST  *psFreeList  = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+		IMG_BOOL      bReconstruct = IMG_FALSE;
+
+		/*
+		 *  Check if this freelist needs to be reconstructed (was it requested
+		 *  or was its global freelist requested)...
+		 */
+		for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+		{
+			if (paui32Freelists[ui32Loop] == psFreeList->ui32FreelistID  ||
+			    paui32Freelists[ui32Loop] == psFreeList->ui32FreelistGlobalID)
+			{
+				bReconstruct = IMG_TRUE;
+				break;
+			}
+		}
+
+		if (bReconstruct)
+		{
+			eError = RGXReconstructFreeList(psFreeList);
+			if (eError == PVRSRV_OK)
+			{
+				for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+				{
+					if (paui32Freelists[ui32Loop] == psFreeList->ui32FreelistID)
+					{
+						/* Reconstruction of this requested freelist was successful... */
+						sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] &= ~RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG;
+						break;
+					}
+				}
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR,"Reconstructing of FreeList %p failed (error %u)",
+						 psFreeList,
+						 eError));
+			}
+		}
+	}
+	OSLockRelease(psDevInfo->hLockFreeList);
+
+	/* Check that all freelists were found and reconstructed... */
+	for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+	{
+		PVR_ASSERT((sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] &
+		            RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG) == 0);
+	}
+
+	/* send feedback */
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psDevInfo,
+		                            RGXFWIF_DM_TA,
+		                            &sTACCBCmd,
+		                            sizeof(sTACCBCmd),
+		                            0,
+		                            PDUMP_FLAGS_NONE);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	/* Kernel CCB should never fill up, as the FW is processing them right away  */
+	PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+/* Create HWRTDataSet */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateHWRTData(CONNECTION_DATA      *psConnection,
+                               PVRSRV_DEVICE_NODE	*psDeviceNode,
+							   IMG_UINT32			psRenderTarget, /* FIXME this should not be IMG_UINT32 */
+							   IMG_DEV_VIRTADDR		psPMMListDevVAddr,
+							   RGX_FREELIST			*apsFreeLists[RGXFW_MAX_FREELISTS],
+							   RGX_RTDATA_CLEANUP_DATA	**ppsCleanupData,
+							   DEVMEM_MEMDESC		**ppsRTACtlMemDesc,
+							   IMG_UINT32           ui32PPPScreen,
+							   IMG_UINT32           ui32PPPGridOffset,
+							   IMG_UINT64           ui64PPPMultiSampleCtl,
+							   IMG_UINT32           ui32TPCStride,
+							   IMG_DEV_VIRTADDR		sTailPtrsDevVAddr,
+							   IMG_UINT32           ui32TPCSize,
+							   IMG_UINT32           ui32TEScreen,
+							   IMG_UINT32           ui32TEAA,
+							   IMG_UINT32           ui32TEMTILE1,
+							   IMG_UINT32           ui32TEMTILE2,
+							   IMG_UINT32           ui32MTileStride,
+							   IMG_UINT32                 ui32ISPMergeLowerX,
+							   IMG_UINT32                 ui32ISPMergeLowerY,
+							   IMG_UINT32                 ui32ISPMergeUpperX,
+							   IMG_UINT32                 ui32ISPMergeUpperY,
+							   IMG_UINT32                 ui32ISPMergeScaleX,
+							   IMG_UINT32                 ui32ISPMergeScaleY,
+							   IMG_UINT16			ui16MaxRTs,
+							   DEVMEM_MEMDESC		**ppsMemDesc,
+							   IMG_UINT32			*puiHWRTData)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	RGXFWIF_DEV_VIRTADDR pFirmwareAddr;
+	RGXFWIF_HWRTDATA *psHWRTData;
+	RGXFWIF_RTA_CTL *psRTACtl;
+	IMG_UINT32 ui32Loop;
+	RGX_RTDATA_CLEANUP_DATA *psTmpCleanup;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	/* Prepare cleanup struct */
+	psTmpCleanup = OSAllocZMem(sizeof(*psTmpCleanup));
+	if (psTmpCleanup == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto AllocError;
+	}
+
+	*ppsCleanupData = psTmpCleanup;
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+						   &psTmpCleanup->psCleanupSync,
+						   "HWRTData cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXCreateHWRTData: Failed to allocate cleanup sync (0x%x)",
+				eError));
+		goto SyncAlloc;
+	}
+
+	psDevInfo = psDeviceNode->pvDevice;
+
+	/*
+	 * This FW RT-Data is only mapped into kernel for initialisation.
+	 * Otherwise this allocation is only used by the FW.
+	 * Therefore the GPU cache doesn't need coherency,
+	 * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+	 */
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_HWRTDATA),
+							PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+							PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+							PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+							PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+							PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+							"FwHWRTData",
+							ppsMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXCreateHWRTData: DevmemAllocate for RGX_FWIF_HWRTDATA failed"));
+		goto FWRTDataAllocateError;
+	}
+
+	psTmpCleanup->psDeviceNode = psDeviceNode;
+	psTmpCleanup->psFWHWRTDataMemDesc = *ppsMemDesc;
+
+	RGXSetFirmwareAddress(&pFirmwareAddr, *ppsMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+
+	*puiHWRTData = pFirmwareAddr.ui32Addr;
+
+	eError = DevmemAcquireCpuVirtAddr(*ppsMemDesc, (void **)&psHWRTData);
+	PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWRTDataCpuMapError);
+
+	/* FIXME: MList is something that that PM writes physical addresses to,
+	 * so ideally its best allocated in kernel */
+	psHWRTData->psPMMListDevVAddr = psPMMListDevVAddr;
+	psHWRTData->psParentRenderTarget.ui32Addr = psRenderTarget;
+
+	psHWRTData->ui32PPPScreen         = ui32PPPScreen;
+	psHWRTData->ui32PPPGridOffset     = ui32PPPGridOffset;
+	psHWRTData->ui64PPPMultiSampleCtl = ui64PPPMultiSampleCtl;
+	psHWRTData->ui32TPCStride         = ui32TPCStride;
+	psHWRTData->sTailPtrsDevVAddr     = sTailPtrsDevVAddr;
+	psHWRTData->ui32TPCSize           = ui32TPCSize;
+	psHWRTData->ui32TEScreen          = ui32TEScreen;
+	psHWRTData->ui32TEAA              = ui32TEAA;
+	psHWRTData->ui32TEMTILE1          = ui32TEMTILE1;
+	psHWRTData->ui32TEMTILE2          = ui32TEMTILE2;
+	psHWRTData->ui32MTileStride       = ui32MTileStride;
+	psHWRTData->ui32ISPMergeLowerX = ui32ISPMergeLowerX;
+	psHWRTData->ui32ISPMergeLowerY = ui32ISPMergeLowerY;
+	psHWRTData->ui32ISPMergeUpperX = ui32ISPMergeUpperX;
+	psHWRTData->ui32ISPMergeUpperY = ui32ISPMergeUpperY;
+	psHWRTData->ui32ISPMergeScaleX = ui32ISPMergeScaleX;
+	psHWRTData->ui32ISPMergeScaleY = ui32ISPMergeScaleY;
+
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+	{
+		psTmpCleanup->apsFreeLists[ui32Loop] = apsFreeLists[ui32Loop];
+		psTmpCleanup->apsFreeLists[ui32Loop]->ui32RefCount++;
+		psHWRTData->apsFreeLists[ui32Loop].ui32Addr = psTmpCleanup->apsFreeLists[ui32Loop]->sFreeListFWDevVAddr.ui32Addr;
+		/* invalid initial snapshot value, the snapshot is always taken during first kick
+		 * and hence the value get replaced during the first kick anyway. So it's safe to set it 0.
+		*/
+		psHWRTData->aui32FreeListHWRSnapshot[ui32Loop] = 0;
+	}
+	OSLockRelease(psDevInfo->hLockFreeList);
+
+	PDUMPCOMMENT("Allocate RGXFW RTA control");
+	eError = DevmemFwAllocate(psDevInfo,
+										sizeof(RGXFWIF_RTA_CTL),
+										PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+										PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+										PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+										PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+										PVRSRV_MEMALLOCFLAG_UNCACHED |
+										PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+										"FwRTAControl",
+										ppsRTACtlMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXCreateHWRTData: Failed to allocate RGX RTA control (%u)",
+				eError));
+		goto FWRTAAllocateError;
+	}
+	psTmpCleanup->psRTACtlMemDesc = *ppsRTACtlMemDesc;
+	RGXSetFirmwareAddress(&psHWRTData->psRTACtl,
+								   *ppsRTACtlMemDesc,
+								   0, RFW_FWADDR_FLAG_NONE);
+
+	eError = DevmemAcquireCpuVirtAddr(*ppsRTACtlMemDesc, (void **)&psRTACtl);
+	PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWRTACpuMapError);
+	psRTACtl->ui32RenderTargetIndex = 0;
+	psRTACtl->ui32ActiveRenderTargets = 0;
+
+	if (ui16MaxRTs > 1)
+	{
+		/* Allocate memory for the checks */
+		PDUMPCOMMENT("Allocate memory for shadow render target cache");
+		eError = DevmemFwAllocate(psDevInfo,
+								ui16MaxRTs * sizeof(IMG_UINT32),
+								PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+								PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+								PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+								PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+								PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+								PVRSRV_MEMALLOCFLAG_UNCACHED|
+								PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+								"FwShadowRTCache",
+								&psTmpCleanup->psRTArrayMemDesc);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXCreateHWRTData: Failed to allocate %d bytes for render target array (%u)",
+				ui16MaxRTs, eError));
+			goto FWAllocateRTArryError;
+		}
+
+		RGXSetFirmwareAddress(&psRTACtl->sValidRenderTargets,
+										psTmpCleanup->psRTArrayMemDesc,
+										0, RFW_FWADDR_FLAG_NONE);
+
+		/* Allocate memory for the checks */
+		PDUMPCOMMENT("Allocate memory for tracking renders accumulation");
+		eError = DevmemFwAllocate(psDevInfo,
+                                                        ui16MaxRTs * sizeof(IMG_UINT32),
+                                                        PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+							PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                                                        PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                                        PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                                        PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                                        PVRSRV_MEMALLOCFLAG_UNCACHED|
+                                                        PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+                                                        "FwRendersAccumulation",
+                                                        &psTmpCleanup->psRendersAccArrayMemDesc);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXCreateHWRTData: Failed to allocate %d bytes for render target array (%u) (renders accumulation)",
+						  ui16MaxRTs, eError));
+			goto FWAllocateRTAccArryError;
+		}
+
+		RGXSetFirmwareAddress(&psRTACtl->sNumRenders,
+                                                psTmpCleanup->psRendersAccArrayMemDesc,
+                                                0, RFW_FWADDR_FLAG_NONE);
+		psRTACtl->ui16MaxRTs = ui16MaxRTs;
+	}
+	else
+	{
+		psRTACtl->sValidRenderTargets.ui32Addr = 0;
+		psRTACtl->sNumRenders.ui32Addr = 0;
+		psRTACtl->ui16MaxRTs = 1;
+	}
+
+	PDUMPCOMMENT("Dump HWRTData 0x%08X", *puiHWRTData);
+	DevmemPDumpLoadMem(*ppsMemDesc, 0, sizeof(*psHWRTData), PDUMP_FLAGS_CONTINUOUS);
+	PDUMPCOMMENT("Dump RTACtl");
+	DevmemPDumpLoadMem(*ppsRTACtlMemDesc, 0, sizeof(*psRTACtl), PDUMP_FLAGS_CONTINUOUS);
+
+	DevmemReleaseCpuVirtAddr(*ppsMemDesc);
+	DevmemReleaseCpuVirtAddr(*ppsRTACtlMemDesc);
+	return PVRSRV_OK;
+
+FWAllocateRTAccArryError:
+	DevmemFwFree(psDevInfo, psTmpCleanup->psRTArrayMemDesc);
+FWAllocateRTArryError:
+	DevmemReleaseCpuVirtAddr(*ppsRTACtlMemDesc);
+FWRTACpuMapError:
+	RGXUnsetFirmwareAddress(*ppsRTACtlMemDesc);
+	DevmemFwFree(psDevInfo, *ppsRTACtlMemDesc);
+FWRTAAllocateError:
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+	{
+		PVR_ASSERT(psTmpCleanup->apsFreeLists[ui32Loop]->ui32RefCount > 0);
+		psTmpCleanup->apsFreeLists[ui32Loop]->ui32RefCount--;
+	}
+	OSLockRelease(psDevInfo->hLockFreeList);
+	DevmemReleaseCpuVirtAddr(*ppsMemDesc);
+FWRTDataCpuMapError:
+	RGXUnsetFirmwareAddress(*ppsMemDesc);
+	DevmemFwFree(psDevInfo, *ppsMemDesc);
+FWRTDataAllocateError:
+	SyncPrimFree(psTmpCleanup->psCleanupSync);
+SyncAlloc:
+	*ppsCleanupData = NULL;
+	OSFreeMem(psTmpCleanup);
+
+AllocError:
+	return eError;
+}
+
+/* Destroy HWRTDataSet */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyHWRTData(RGX_RTDATA_CLEANUP_DATA *psCleanupData)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	PVRSRV_ERROR eError;
+	PRGXFWIF_HWRTDATA psHWRTData;
+	IMG_UINT32 ui32Loop;
+
+	PVR_ASSERT(psCleanupData);
+
+	RGXSetFirmwareAddress(&psHWRTData, psCleanupData->psFWHWRTDataMemDesc, 0, RFW_FWADDR_NOREF_FLAG);
+
+	/* Cleanup HWRTData in TA */
+	eError = RGXFWRequestHWRTDataCleanUp(psCleanupData->psDeviceNode,
+										 psHWRTData,
+										 psCleanupData->psCleanupSync,
+										 RGXFWIF_DM_TA);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+
+	psDevInfo = psCleanupData->psDeviceNode->pvDevice;
+
+	/* Cleanup HWRTData in 3D */
+	eError = RGXFWRequestHWRTDataCleanUp(psCleanupData->psDeviceNode,
+										 psHWRTData,
+										 psCleanupData->psCleanupSync,
+										 RGXFWIF_DM_3D);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+
+	/* If we got here then TA and 3D operations on this RTData have finished */
+	if(psCleanupData->psRTACtlMemDesc)
+	{
+		RGXUnsetFirmwareAddress(psCleanupData->psRTACtlMemDesc);
+		DevmemFwFree(psDevInfo, psCleanupData->psRTACtlMemDesc);
+	}
+
+	RGXUnsetFirmwareAddress(psCleanupData->psFWHWRTDataMemDesc);
+	DevmemFwFree(psDevInfo, psCleanupData->psFWHWRTDataMemDesc);
+
+	if(psCleanupData->psRTArrayMemDesc)
+	{
+		RGXUnsetFirmwareAddress(psCleanupData->psRTArrayMemDesc);
+		DevmemFwFree(psDevInfo, psCleanupData->psRTArrayMemDesc);
+	}
+
+	if(psCleanupData->psRendersAccArrayMemDesc)
+	{
+		RGXUnsetFirmwareAddress(psCleanupData->psRendersAccArrayMemDesc);
+		DevmemFwFree(psDevInfo, psCleanupData->psRendersAccArrayMemDesc);
+	}
+
+	SyncPrimFree(psCleanupData->psCleanupSync);
+
+	/* decrease freelist refcount */
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+	{
+		PVR_ASSERT(psCleanupData->apsFreeLists[ui32Loop]->ui32RefCount > 0);
+		psCleanupData->apsFreeLists[ui32Loop]->ui32RefCount--;
+	}
+	OSLockRelease(psDevInfo->hLockFreeList);
+
+	OSFreeMem(psCleanupData);
+
+	return PVRSRV_OK;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA      *psConnection,
+                               PVRSRV_DEVICE_NODE	*psDeviceNode,
+							   IMG_UINT32			ui32MaxFLPages,
+							   IMG_UINT32			ui32InitFLPages,
+							   IMG_UINT32			ui32GrowFLPages,
+                               IMG_UINT32           ui32GrowParamThreshold,
+							   RGX_FREELIST			*psGlobalFreeList,
+							   IMG_BOOL				bCheckFreelist,
+							   IMG_DEV_VIRTADDR		sFreeListDevVAddr,
+							   PMR					*psFreeListPMR,
+							   IMG_DEVMEM_OFFSET_T	uiFreeListPMROffset,
+							   RGX_FREELIST			**ppsFreeList)
+{
+	PVRSRV_ERROR				eError;
+	RGXFWIF_FREELIST			*psFWFreeList;
+	DEVMEM_MEMDESC				*psFWFreelistMemDesc;
+	RGX_FREELIST				*psFreeList;
+	PVRSRV_RGXDEV_INFO			*psDevInfo = psDeviceNode->pvDevice;
+
+	/* Allocate kernel freelist struct */
+	psFreeList = OSAllocZMem(sizeof(*psFreeList));
+	if (psFreeList == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXCreateFreeList: failed to allocate host data structure"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorAllocHost;
+	}
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+						   &psFreeList->psCleanupSync,
+						   "ta3d free list cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXCreateFreeList: Failed to allocate cleanup sync (0x%x)",
+				eError));
+		goto SyncAlloc;
+	}
+
+	/*
+	 * This FW FreeList context is only mapped into kernel for initialisation
+	 * and reconstruction (at other times it is not mapped and only used by
+	 * the FW. Therefore the GPU cache doesn't need coherency, and write-combine
+	 * is suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+	 */
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(*psFWFreeList),
+							PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+							PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+							PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+							PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+							"FwFreeList",
+							&psFWFreelistMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXCreateFreeList: DevmemAllocate for RGXFWIF_FREELIST failed"));
+		goto FWFreeListAlloc;
+	}
+
+	/* Initialise host data structures */
+	psFreeList->psDevInfo = psDevInfo;
+	psFreeList->psFreeListPMR = psFreeListPMR;
+	psFreeList->uiFreeListPMROffset = uiFreeListPMROffset;
+	psFreeList->psFWFreelistMemDesc = psFWFreelistMemDesc;
+	RGXSetFirmwareAddress(&psFreeList->sFreeListFWDevVAddr, psFWFreelistMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+	psFreeList->ui32FreelistID = psDevInfo->ui32FreelistCurrID++;
+	psFreeList->ui32FreelistGlobalID = (psGlobalFreeList ? psGlobalFreeList->ui32FreelistID : 0);
+	psFreeList->ui32MaxFLPages = ui32MaxFLPages;
+	psFreeList->ui32InitFLPages = ui32InitFLPages;
+	psFreeList->ui32GrowFLPages = ui32GrowFLPages;
+	psFreeList->ui32CurrentFLPages = 0;
+	psFreeList->ui32ReadyFLPages = 0;
+	psFreeList->ui32GrowThreshold = ui32GrowParamThreshold;
+	psFreeList->ui64FreelistChecksum = 0;
+	psFreeList->ui32RefCount = 0;
+	psFreeList->bCheckFreelist = bCheckFreelist;
+	dllist_init(&psFreeList->sMemoryBlockHead);
+	dllist_init(&psFreeList->sMemoryBlockInitHead);
+
+
+	/* Add to list of freelists */
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	dllist_add_to_tail(&psDevInfo->sFreeListHead, &psFreeList->sNode);
+	OSLockRelease(psDevInfo->hLockFreeList);
+
+
+	/* Initialise FW data structure */
+	eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList);
+	PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWFreeListCpuMap);
+
+	{
+		/* Have ready pages only for FLs that do not reference a global FL */
+		const IMG_UINT32 ui32ReadyPages =
+			(ui32GrowFLPages != 0 ? RGX_COMPUTE_FL_READY_PAGES(ui32InitFLPages, psFreeList->ui32GrowThreshold) : 0);
+
+		psFWFreeList->ui32MaxPages = ui32MaxFLPages;
+		psFWFreeList->ui32CurrentPages = ui32InitFLPages - ui32ReadyPages;
+		psFWFreeList->ui32GrowPages = ui32GrowFLPages;
+		psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1;
+		psFWFreeList->psFreeListDevVAddr = sFreeListDevVAddr;
+		psFWFreeList->ui64CurrentDevVAddr = sFreeListDevVAddr.uiAddr +
+			((ui32MaxFLPages - psFWFreeList->ui32CurrentPages) * sizeof(IMG_UINT32));
+		psFWFreeList->ui32FreeListID = psFreeList->ui32FreelistID;
+		psFWFreeList->bGrowPending = IMG_FALSE;
+		psFWFreeList->ui32ReadyPages = ui32ReadyPages;
+		/* Start of the day, the firmware shouldn't attempt to take more pages until hitting OOM */
+		psFWFreeList->ui32OOMReadyLimit = ui32ReadyPages;
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE,"Freelist %p created: Max pages 0x%08x, Init pages 0x%08x, Max FL base address 0x%016" IMG_UINT64_FMTSPECx ", Init FL base address 0x%016" IMG_UINT64_FMTSPECx,
+			psFreeList,
+			ui32MaxFLPages,
+			ui32InitFLPages,
+			sFreeListDevVAddr.uiAddr,
+			psFWFreeList->psFreeListDevVAddr.uiAddr));
+
+	PDUMPCOMMENT("Dump FW FreeList");
+	DevmemPDumpLoadMem(psFreeList->psFWFreelistMemDesc, 0, sizeof(*psFWFreeList), PDUMP_FLAGS_CONTINUOUS);
+
+	/*
+	 * Separate dump of the Freelist's number of Pages and stack pointer.
+	 * This allows to easily modify the PB size in the out2.txt files.
+	 */
+	PDUMPCOMMENT("FreeList TotalPages");
+	DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc,
+							offsetof(RGXFWIF_FREELIST, ui32CurrentPages),
+							psFWFreeList->ui32CurrentPages,
+							PDUMP_FLAGS_CONTINUOUS);
+	PDUMPCOMMENT("FreeList StackPointer");
+	DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc,
+							offsetof(RGXFWIF_FREELIST, ui32CurrentStackTop),
+							psFWFreeList->ui32CurrentStackTop,
+							PDUMP_FLAGS_CONTINUOUS);
+
+	DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+
+	/* Add initial PB block */
+	eError = RGXGrowFreeList(psFreeList,
+	                         ui32InitFLPages,
+	                         &psFreeList->sMemoryBlockInitHead,
+	                         IMG_TRUE);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"RGXCreateFreeList: failed to allocate initial memory block for free list 0x%016" IMG_UINT64_FMTSPECx " (error = %u)",
+				sFreeListDevVAddr.uiAddr,
+				eError));
+		goto FWFreeListCpuMap;
+	}
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+			/* Update Stats */
+			PVRSRVStatsUpdateFreelistStats(1, /* Add 1 to the appropriate counter (Requests by App)*/
+	                               0,
+	                               psFreeList->ui32InitFLPages,
+	                               psFreeList->ui32NumHighPages,
+	                               psFreeList->ownerPid);
+
+#endif
+
+	psFreeList->ownerPid = OSGetCurrentClientProcessIDKM();
+	/* return values */
+	*ppsFreeList = psFreeList;
+
+	return PVRSRV_OK;
+
+	/* Error handling */
+
+FWFreeListCpuMap:
+	/* Remove freelists from list  */
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	dllist_remove_node(&psFreeList->sNode);
+	OSLockRelease(psDevInfo->hLockFreeList);
+
+	RGXUnsetFirmwareAddress(psFWFreelistMemDesc);
+	DevmemFwFree(psDevInfo, psFWFreelistMemDesc);
+
+FWFreeListAlloc:
+	SyncPrimFree(psFreeList->psCleanupSync);
+
+SyncAlloc:
+	OSFreeMem(psFreeList);
+
+ErrorAllocHost:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+/*
+	RGXDestroyFreeList
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32RefCount;
+
+	PVR_ASSERT(psFreeList);
+
+	OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+	ui32RefCount = psFreeList->ui32RefCount;
+	OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+	if (ui32RefCount != 0)
+	{
+		/* Freelist still busy */
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	/* Freelist is not in use => start firmware cleanup */
+	eError = RGXFWRequestFreeListCleanUp(psFreeList->psDevInfo,
+										 psFreeList->sFreeListFWDevVAddr,
+										 psFreeList->psCleanupSync);
+	if(eError != PVRSRV_OK)
+	{
+		/* Can happen if the firmware took too long to handle the cleanup request,
+		 * or if SLC-flushes didn't went through (due to some GPU lockup) */
+		return eError;
+	}
+
+	/* Remove FreeList from linked list before we destroy it... */
+	OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+	dllist_remove_node(&psFreeList->sNode);
+	OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+	if (psFreeList->bCheckFreelist)
+	{
+		RGXFWIF_FREELIST  *psFWFreeList;
+		IMG_UINT64        ui32CurrentStackTop;
+		IMG_UINT64        ui64CheckSum;
+
+		/* Get the current stack pointer for this free list */
+		DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList);
+		ui32CurrentStackTop = psFWFreeList->ui32CurrentStackTop;
+		DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+		if (ui32CurrentStackTop == psFreeList->ui32CurrentFLPages-1)
+		{
+			/* Do consistency tests (as the list is fully populated) */
+			_CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum);
+		}
+		else
+		{
+			/* Check for duplicate pages, but don't check the checksum as the list is not fully populated */
+			_CheckFreelist(psFreeList, ui32CurrentStackTop+1, 0, &ui64CheckSum);
+		}
+	}
+
+	/* Destroy FW structures */
+	RGXUnsetFirmwareAddress(psFreeList->psFWFreelistMemDesc);
+	DevmemFwFree(psFreeList->psDevInfo, psFreeList->psFWFreelistMemDesc);
+
+	/* Remove grow shrink blocks */
+	while (!dllist_is_empty(&psFreeList->sMemoryBlockHead))
+	{
+		eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockHead, psFreeList);
+		PVR_ASSERT(eError == PVRSRV_OK);
+	}
+
+	/* Remove initial PB block */
+	eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockInitHead, psFreeList);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* consistency checks */
+	PVR_ASSERT(dllist_is_empty(&psFreeList->sMemoryBlockInitHead));
+	PVR_ASSERT(psFreeList->ui32CurrentFLPages == 0);
+
+	SyncPrimFree(psFreeList->psCleanupSync);
+
+	/* free Freelist */
+	OSFreeMem(psFreeList);
+
+	return eError;
+}
+
+
+
+/*
+	RGXAddBlockToFreeListKM
+*/
+
+IMG_EXPORT
+PVRSRV_ERROR RGXAddBlockToFreeListKM(RGX_FREELIST *psFreeList,
+										IMG_UINT32 ui32NumPages)
+{
+	PVRSRV_ERROR eError;
+
+	/* Check if we have reference to freelist's PMR */
+	if (psFreeList->psFreeListPMR == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,	"Freelist is not configured for grow"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* grow freelist */
+	eError = RGXGrowFreeList(psFreeList,
+	                         ui32NumPages,
+	                         &psFreeList->sMemoryBlockHead,
+	                         IMG_FALSE);
+
+	if(eError == PVRSRV_OK)
+	{
+		/* update freelist data in firmware */
+		_UpdateFwFreelistSize(psFreeList, IMG_TRUE, ui32NumPages);
+
+		psFreeList->ui32NumGrowReqByApp++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+			/* Update Stats */
+			PVRSRVStatsUpdateFreelistStats(1, /* Add 1 to the appropriate counter (Requests by App)*/
+	                               0,
+	                               psFreeList->ui32InitFLPages,
+	                               psFreeList->ui32NumHighPages,
+	                               psFreeList->ownerPid);
+
+#endif
+	}
+
+	return eError;
+}
+
+/*
+	RGXRemoveBlockFromFreeListKM
+*/
+
+IMG_EXPORT
+PVRSRV_ERROR RGXRemoveBlockFromFreeListKM(RGX_FREELIST *psFreeList)
+{
+	PVRSRV_ERROR eError;
+
+	/*
+	 * Make sure the pages part of the memory block are not in use anymore.
+	 * Instruct the firmware to update the freelist pointers accordingly.
+	 */
+
+	eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockHead,
+								psFreeList);
+
+	return eError;
+}
+
+
+/*
+	RGXCreateRenderTarget
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateRenderTarget(CONNECTION_DATA      *psConnection,
+                                   PVRSRV_DEVICE_NODE	*psDeviceNode,
+								   IMG_DEV_VIRTADDR		psVHeapTableDevVAddr,
+								   RGX_RT_CLEANUP_DATA 	**ppsCleanupData,
+								   IMG_UINT32			*sRenderTargetFWDevVAddr)
+{
+	PVRSRV_ERROR			eError = PVRSRV_OK;
+	RGXFWIF_RENDER_TARGET	*psRenderTarget;
+	RGXFWIF_DEV_VIRTADDR	pFirmwareAddr;
+	PVRSRV_RGXDEV_INFO 		*psDevInfo = psDeviceNode->pvDevice;
+	RGX_RT_CLEANUP_DATA		*psCleanupData;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	psCleanupData = OSAllocZMem(sizeof(*psCleanupData));
+	if (psCleanupData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_out;
+	}
+
+	psCleanupData->psDeviceNode = psDeviceNode;
+	/*
+	 * This FW render target context is only mapped into kernel for initialisation.
+	 * Otherwise this allocation is only used by the FW.
+	 * Therefore the GPU cache doesn't need coherency,
+	 * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+	 */
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(*psRenderTarget),
+							PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+							PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+							PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+							PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+							"FwRenderTarget",
+							&psCleanupData->psRenderTargetMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXCreateRenderTarget: DevmemAllocate for Render Target failed"));
+		goto err_free;
+	}
+	RGXSetFirmwareAddress(&pFirmwareAddr, psCleanupData->psRenderTargetMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+	*sRenderTargetFWDevVAddr = pFirmwareAddr.ui32Addr;
+
+	eError = DevmemAcquireCpuVirtAddr(psCleanupData->psRenderTargetMemDesc, (void **)&psRenderTarget);
+	PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", err_fwalloc);
+
+	psRenderTarget->psVHeapTableDevVAddr = psVHeapTableDevVAddr;
+	psRenderTarget->bTACachesNeedZeroing = IMG_FALSE;
+	PDUMPCOMMENT("Dump RenderTarget");
+	DevmemPDumpLoadMem(psCleanupData->psRenderTargetMemDesc, 0, sizeof(*psRenderTarget), PDUMP_FLAGS_CONTINUOUS);
+	DevmemReleaseCpuVirtAddr(psCleanupData->psRenderTargetMemDesc);
+
+	*ppsCleanupData = psCleanupData;
+
+err_out:
+	return eError;
+
+err_free:
+	OSFreeMem(psCleanupData);
+	goto err_out;
+
+err_fwalloc:
+	DevmemFwFree(psDevInfo, psCleanupData->psRenderTargetMemDesc);
+	goto err_free;
+
+}
+
+
+/*
+	RGXDestroyRenderTarget
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyRenderTarget(RGX_RT_CLEANUP_DATA *psCleanupData)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = psCleanupData->psDeviceNode;
+
+	RGXUnsetFirmwareAddress(psCleanupData->psRenderTargetMemDesc);
+
+	/*
+		Note:
+		When we get RT cleanup in the FW call that instead
+	*/
+	/* Flush the SLC before freeing */
+	{
+		RGXFWIF_KCCB_CMD sFlushInvalCmd;
+		PVRSRV_ERROR eError;
+
+		/* Schedule the SLC flush command ... */
+#if defined(PDUMP)
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate");
+#endif
+		sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.eDM = 0;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0;
+
+		eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice,
+											RGXFWIF_DM_GP,
+											&sFlushInvalCmd,
+											sizeof(sFlushInvalCmd),
+											PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXDestroyRenderTarget: Failed to schedule SLC flush command with error (%u)", eError));
+		}
+		else
+		{
+			/* Wait for the SLC flush to complete */
+			eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXDestroyRenderTarget: SLC flush and invalidate aborted with error (%u)", eError));
+			}
+		}
+	}
+
+	DevmemFwFree(psDeviceNode->pvDevice, psCleanupData->psRenderTargetMemDesc);
+	OSFreeMem(psCleanupData);
+	return PVRSRV_OK;
+}
+
+/*
+	RGXCreateZSBuffer
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection,
+                                 PVRSRV_DEVICE_NODE	*psDeviceNode,
+                                 DEVMEMINT_RESERVATION 	*psReservation,
+                                 PMR 					*psPMR,
+                                 PVRSRV_MEMALLOCFLAGS_T 	uiMapFlags,
+                                 RGX_ZSBUFFER_DATA **ppsZSBuffer,
+                                 IMG_UINT32 *pui32ZSBufferFWDevVAddr)
+{
+	PVRSRV_ERROR				eError;
+	PVRSRV_RGXDEV_INFO 			*psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_FWZSBUFFER			*psFWZSBuffer;
+	RGX_ZSBUFFER_DATA			*psZSBuffer;
+	DEVMEM_MEMDESC				*psFWZSBufferMemDesc;
+	IMG_BOOL					bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiMapFlags) ? IMG_TRUE : IMG_FALSE;
+
+	/* Allocate host data structure */
+	psZSBuffer = OSAllocZMem(sizeof(*psZSBuffer));
+	if (psZSBuffer == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXCreateZSBufferKM: Failed to allocate cleanup data structure for ZS-Buffer"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorAllocCleanup;
+	}
+
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+						   &psZSBuffer->psCleanupSync,
+						   "ta3d zs buffer cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXCreateZSBufferKM: Failed to allocate cleanup sync (0x%x)",
+				eError));
+		goto ErrorSyncAlloc;
+	}
+
+	/* Populate Host data */
+	psZSBuffer->psDevInfo = psDevInfo;
+	psZSBuffer->psReservation = psReservation;
+	psZSBuffer->psPMR = psPMR;
+	psZSBuffer->uiMapFlags = uiMapFlags;
+	psZSBuffer->ui32RefCount = 0;
+	psZSBuffer->bOnDemand = bOnDemand;
+    if (bOnDemand)
+    {
+    	psZSBuffer->ui32ZSBufferID = psDevInfo->ui32ZSBufferCurrID++;
+		psZSBuffer->psMapping = NULL;
+
+		OSLockAcquire(psDevInfo->hLockZSBuffer);
+    	dllist_add_to_tail(&psDevInfo->sZSBufferHead, &psZSBuffer->sNode);
+		OSLockRelease(psDevInfo->hLockZSBuffer);
+    }
+
+	/* Allocate firmware memory for ZS-Buffer. */
+	PDUMPCOMMENT("Allocate firmware ZS-Buffer data structure");
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(*psFWZSBuffer),
+							PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+							PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+							PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+							PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+							"FwZSBuffer",
+							&psFWZSBufferMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXCreateZSBufferKM: Failed to allocate firmware ZS-Buffer (%u)", eError));
+		goto ErrorAllocFWZSBuffer;
+	}
+	psZSBuffer->psZSBufferMemDesc = psFWZSBufferMemDesc;
+
+	/* Temporarily map the firmware render context to the kernel. */
+	eError = DevmemAcquireCpuVirtAddr(psFWZSBufferMemDesc,
+                                      (void **)&psFWZSBuffer);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXCreateZSBufferKM: Failed to map firmware ZS-Buffer (%u)", eError));
+		goto ErrorAcquireFWZSBuffer;
+	}
+
+	/* Populate FW ZS-Buffer data structure */
+	psFWZSBuffer->bOnDemand = bOnDemand;
+	psFWZSBuffer->eState = (bOnDemand) ? RGXFWIF_ZSBUFFER_UNBACKED : RGXFWIF_ZSBUFFER_BACKED;
+	psFWZSBuffer->ui32ZSBufferID = psZSBuffer->ui32ZSBufferID;
+
+	/* Get firmware address of ZS-Buffer. */
+	RGXSetFirmwareAddress(&psZSBuffer->sZSBufferFWDevVAddr, psFWZSBufferMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+
+	/* Dump the ZS-Buffer and the memory content */
+	PDUMPCOMMENT("Dump firmware ZS-Buffer");
+	DevmemPDumpLoadMem(psFWZSBufferMemDesc, 0, sizeof(*psFWZSBuffer), PDUMP_FLAGS_CONTINUOUS);
+
+	/* Release address acquired above. */
+	DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc);
+
+
+	/* define return value */
+	*ppsZSBuffer = psZSBuffer;
+	*pui32ZSBufferFWDevVAddr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] created (%s)",
+							psZSBuffer,
+							(bOnDemand) ? "On-Demand": "Up-front"));
+
+	psZSBuffer->owner=OSGetCurrentClientProcessIDKM();
+
+	return PVRSRV_OK;
+
+	/* error handling */
+
+ErrorAcquireFWZSBuffer:
+	DevmemFwFree(psDevInfo, psFWZSBufferMemDesc);
+
+ErrorAllocFWZSBuffer:
+	SyncPrimFree(psZSBuffer->psCleanupSync);
+
+ErrorSyncAlloc:
+	OSFreeMem(psZSBuffer);
+
+ErrorAllocCleanup:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+/*
+	RGXDestroyZSBuffer
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+	POS_LOCK hLockZSBuffer;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psZSBuffer);
+	hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+	/* Request ZS Buffer cleanup */
+	eError = RGXFWRequestZSBufferCleanUp(psZSBuffer->psDevInfo,
+										psZSBuffer->sZSBufferFWDevVAddr,
+										psZSBuffer->psCleanupSync);
+	if (eError != PVRSRV_ERROR_RETRY)
+	{
+		/* Free the firmware render context. */
+    	RGXUnsetFirmwareAddress(psZSBuffer->psZSBufferMemDesc);
+		DevmemFwFree(psZSBuffer->psDevInfo, psZSBuffer->psZSBufferMemDesc);
+
+	    /* Remove Deferred Allocation from list */
+		if (psZSBuffer->bOnDemand)
+		{
+			OSLockAcquire(hLockZSBuffer);
+			PVR_ASSERT(dllist_node_is_in_list(&psZSBuffer->sNode));
+			dllist_remove_node(&psZSBuffer->sNode);
+			OSLockRelease(hLockZSBuffer);
+		}
+
+		SyncPrimFree(psZSBuffer->psCleanupSync);
+
+		PVR_ASSERT(psZSBuffer->ui32RefCount == 0);
+
+		PVR_DPF((PVR_DBG_MESSAGE,"ZS-Buffer [%p] destroyed",psZSBuffer));
+
+		/* Free ZS-Buffer host data structure */
+		OSFreeMem(psZSBuffer);
+
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR
+RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+	POS_LOCK hLockZSBuffer;
+	PVRSRV_ERROR eError;
+
+	if (!psZSBuffer)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (!psZSBuffer->bOnDemand)
+	{
+		/* Only deferred allocations can be populated */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE,"ZS Buffer [%p, ID=0x%08x]: Physical backing requested",
+								psZSBuffer,
+								psZSBuffer->ui32ZSBufferID));
+	hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+	OSLockAcquire(hLockZSBuffer);
+
+	if (psZSBuffer->ui32RefCount == 0)
+	{
+		if (psZSBuffer->bOnDemand)
+		{
+			IMG_HANDLE hDevmemHeap;
+
+			PVR_ASSERT(psZSBuffer->psMapping == NULL);
+
+			/* Get Heap */
+			eError = DevmemServerGetHeapHandle(psZSBuffer->psReservation, &hDevmemHeap);
+			PVR_ASSERT(psZSBuffer->psMapping == NULL);
+
+			eError = DevmemIntMapPMR(hDevmemHeap,
+									psZSBuffer->psReservation,
+									psZSBuffer->psPMR,
+									psZSBuffer->uiMapFlags,
+									&psZSBuffer->psMapping);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"Unable populate ZS Buffer [%p, ID=0x%08x] with error %u",
+										psZSBuffer,
+										psZSBuffer->ui32ZSBufferID,
+										eError));
+				OSLockRelease(hLockZSBuffer);
+				return eError;
+
+			}
+			PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing acquired",
+										psZSBuffer,
+										psZSBuffer->ui32ZSBufferID));
+		}
+	}
+
+	/* Increase refcount*/
+	psZSBuffer->ui32RefCount++;
+
+	OSLockRelease(hLockZSBuffer);
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer,
+					RGX_POPULATION **ppsPopulation)
+{
+	RGX_POPULATION *psPopulation;
+	PVRSRV_ERROR eError;
+
+	psZSBuffer->ui32NumReqByApp++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	PVRSRVStatsUpdateZSBufferStats(1,0,psZSBuffer->owner);
+#endif
+
+	/* Do the backing */
+	eError = RGXBackingZSBuffer(psZSBuffer);
+	if (eError != PVRSRV_OK)
+	{
+		goto OnErrorBacking;
+	}
+
+	/* Create the handle to the backing */
+	psPopulation = OSAllocMem(sizeof(*psPopulation));
+	if (psPopulation == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto OnErrorAlloc;
+	}
+
+	psPopulation->psZSBuffer = psZSBuffer;
+
+	/* return value */
+	*ppsPopulation = psPopulation;
+
+	return PVRSRV_OK;
+
+OnErrorAlloc:
+	RGXUnbackingZSBuffer(psZSBuffer);
+
+OnErrorBacking:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+	POS_LOCK hLockZSBuffer;
+	PVRSRV_ERROR eError;
+
+	if (!psZSBuffer)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	PVR_ASSERT(psZSBuffer->ui32RefCount);
+
+	PVR_DPF((PVR_DBG_MESSAGE,"ZS Buffer [%p, ID=0x%08x]: Physical backing removal requested",
+								psZSBuffer,
+								psZSBuffer->ui32ZSBufferID));
+
+	hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+	OSLockAcquire(hLockZSBuffer);
+
+	if (psZSBuffer->bOnDemand)
+	{
+		if (psZSBuffer->ui32RefCount == 1)
+		{
+			PVR_ASSERT(psZSBuffer->psMapping);
+
+			eError = DevmemIntUnmapPMR(psZSBuffer->psMapping);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"Unable to unpopulate ZS Buffer [%p, ID=0x%08x] with error %u",
+										psZSBuffer,
+										psZSBuffer->ui32ZSBufferID,
+										eError));
+				OSLockRelease(hLockZSBuffer);
+				return eError;
+			}
+
+			PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing removed",
+										psZSBuffer,
+										psZSBuffer->ui32ZSBufferID));
+		}
+	}
+
+	/* Decrease refcount*/
+	psZSBuffer->ui32RefCount--;
+
+	OSLockRelease(hLockZSBuffer);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation)
+{
+	PVRSRV_ERROR eError;
+
+	if (!psPopulation)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eError = RGXUnbackingZSBuffer(psPopulation->psZSBuffer);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	OSFreeMem(psPopulation);
+
+	return PVRSRV_OK;
+}
+
+static RGX_ZSBUFFER_DATA *FindZSBuffer(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32ZSBufferID)
+{
+	DLLIST_NODE *psNode, *psNext;
+	RGX_ZSBUFFER_DATA *psZSBuffer = NULL;
+
+	OSLockAcquire(psDevInfo->hLockZSBuffer);
+
+	dllist_foreach_node(&psDevInfo->sZSBufferHead, psNode, psNext)
+	{
+		RGX_ZSBUFFER_DATA *psThisZSBuffer = IMG_CONTAINER_OF(psNode, RGX_ZSBUFFER_DATA, sNode);
+
+		if (psThisZSBuffer->ui32ZSBufferID == ui32ZSBufferID)
+		{
+			psZSBuffer = psThisZSBuffer;
+			break;
+		}
+	}
+
+	OSLockRelease(psDevInfo->hLockZSBuffer);
+	return psZSBuffer;
+}
+
+void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+									  IMG_UINT32 ui32ZSBufferID)
+{
+	RGX_ZSBUFFER_DATA *psZSBuffer;
+	RGXFWIF_KCCB_CMD sTACCBCmd;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psDevInfo);
+
+	/* scan all deferred allocations */
+	psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID);
+
+	if (psZSBuffer)
+	{
+		IMG_BOOL bBackingDone = IMG_TRUE;
+
+		/* Populate ZLS */
+		eError = RGXBackingZSBuffer(psZSBuffer);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"Populating ZS-Buffer failed with error %u (ID = 0x%08x)", eError, ui32ZSBufferID));
+			bBackingDone = IMG_FALSE;
+		}
+
+		/* send confirmation */
+		sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE;
+		sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr;
+		sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = bBackingDone;
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError = RGXScheduleCommand(psDevInfo,
+										RGXFWIF_DM_TA,
+										&sTACCBCmd,
+										sizeof(sTACCBCmd),
+										0,
+										PDUMP_FLAGS_NONE);
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+		/* Kernel CCB should never fill up, as the FW is processing them right away  */
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		psZSBuffer->ui32NumReqByFW++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+		PVRSRVStatsUpdateZSBufferStats(0,1,psZSBuffer->owner);
+#endif
+
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,"ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (Populate)", ui32ZSBufferID));
+	}
+}
+
+void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+											IMG_UINT32 ui32ZSBufferID)
+{
+	RGX_ZSBUFFER_DATA *psZSBuffer;
+	RGXFWIF_KCCB_CMD sTACCBCmd;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psDevInfo);
+
+	/* scan all deferred allocations */
+	psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID);
+
+	if (psZSBuffer)
+	{
+		/* Unpopulate ZLS */
+		eError = RGXUnbackingZSBuffer(psZSBuffer);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"UnPopulating ZS-Buffer failed failed with error %u (ID = 0x%08x)", eError, ui32ZSBufferID));
+			PVR_ASSERT(IMG_FALSE);
+		}
+
+		/* send confirmation */
+		sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE;
+		sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr;
+		sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = IMG_TRUE;
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError = RGXScheduleCommand(psDevInfo,
+										RGXFWIF_DM_TA,
+										&sTACCBCmd,
+										sizeof(sTACCBCmd),
+										0,
+										PDUMP_FLAGS_NONE);
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+		/* Kernel CCB should never fill up, as the FW is processing them right away  */
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,"ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (UnPopulate)", ui32ZSBufferID));
+	}
+}
+
+static
+PVRSRV_ERROR _CreateTAContext(CONNECTION_DATA *psConnection,
+							  PVRSRV_DEVICE_NODE *psDeviceNode,
+							  DEVMEM_MEMDESC *psAllocatedMemDesc,
+							  IMG_UINT32 ui32AllocatedOffset,
+							  DEVMEM_MEMDESC *psFWMemContextMemDesc,
+							  IMG_DEV_VIRTADDR sVDMCallStackAddr,
+							  IMG_UINT32 ui32Priority,
+							  RGX_COMMON_CONTEXT_INFO *psInfo,
+							  RGX_SERVER_RC_TA_DATA *psTAData)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_TACTX_STATE *psContextState;
+	PVRSRV_ERROR eError;
+	/*
+		Allocate device memory for the firmware GPU context suspend state.
+		Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+	*/
+	PDUMPCOMMENT("Allocate RGX firmware TA context suspend state");
+
+	eError = DevmemFwAllocate(psDevInfo,
+							  sizeof(RGXFWIF_TACTX_STATE),
+							  RGX_FWCOMCTX_ALLOCFLAGS,
+							  "FwTAContextState",
+							  &psTAData->psContextStateMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to allocate firmware GPU context suspend state (%u)",
+				eError));
+		goto fail_tacontextsuspendalloc;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psTAData->psContextStateMemDesc,
+                                      (void **)&psContextState);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to map firmware render context state (%u)",
+				eError));
+		goto fail_suspendcpuvirtacquire;
+	}
+	psContextState->uTAReg_VDM_CALL_STACK_POINTER_Init = sVDMCallStackAddr.uiAddr;
+	DevmemReleaseCpuVirtAddr(psTAData->psContextStateMemDesc);
+
+	eError = FWCommonContextAllocate(psConnection,
+									 psDeviceNode,
+									 REQ_TYPE_TA,
+									 RGXFWIF_DM_TA,
+									 psAllocatedMemDesc,
+									 ui32AllocatedOffset,
+									 psFWMemContextMemDesc,
+									 psTAData->psContextStateMemDesc,
+									 RGX_TA_CCB_SIZE_LOG2,
+									 ui32Priority,
+									 psInfo,
+									 &psTAData->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to init TA fw common context (%u)",
+				eError));
+		goto fail_tacommoncontext;
+	}
+
+	/*
+	 * Dump the FW 3D context suspend state buffer
+	 */
+	PDUMPCOMMENT("Dump the TA context suspend state buffer");
+	DevmemPDumpLoadMem(psTAData->psContextStateMemDesc,
+					   0,
+					   sizeof(RGXFWIF_TACTX_STATE),
+					   PDUMP_FLAGS_CONTINUOUS);
+
+	psTAData->ui32Priority = ui32Priority;
+	return PVRSRV_OK;
+
+fail_tacommoncontext:
+fail_suspendcpuvirtacquire:
+	DevmemFwFree(psDevInfo, psTAData->psContextStateMemDesc);
+fail_tacontextsuspendalloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+static
+PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection,
+							  PVRSRV_DEVICE_NODE *psDeviceNode,
+							  DEVMEM_MEMDESC *psAllocatedMemDesc,
+							  IMG_UINT32 ui32AllocatedOffset,
+							  DEVMEM_MEMDESC *psFWMemContextMemDesc,
+							  IMG_UINT32 ui32Priority,
+							  RGX_COMMON_CONTEXT_INFO *psInfo,
+							  RGX_SERVER_RC_3D_DATA *ps3DData)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR eError;
+
+	/*
+		Allocate device memory for the firmware GPU context suspend state.
+		Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+	*/
+	PDUMPCOMMENT("Allocate RGX firmware 3D context suspend state");
+
+	eError = DevmemFwAllocate(psDevInfo,
+							  sizeof(RGXFWIF_3DCTX_STATE),
+							  RGX_FWCOMCTX_ALLOCFLAGS,
+							  "Fw3DContextState",
+							  &ps3DData->psContextStateMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to allocate firmware GPU context suspend state (%u)",
+				eError));
+		goto fail_3dcontextsuspendalloc;
+	}
+
+	eError = FWCommonContextAllocate(psConnection,
+									 psDeviceNode,
+									 REQ_TYPE_3D,
+									 RGXFWIF_DM_3D,
+									 psAllocatedMemDesc,
+									 ui32AllocatedOffset,
+									 psFWMemContextMemDesc,
+									 ps3DData->psContextStateMemDesc,
+									 RGX_3D_CCB_SIZE_LOG2,
+									 ui32Priority,
+									 psInfo,
+									 &ps3DData->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to init 3D fw common context (%u)",
+				eError));
+		goto fail_3dcommoncontext;
+	}
+
+	/*
+	 * Dump the FW 3D context suspend state buffer
+	 */
+	PDUMPCOMMENT("Dump the 3D context suspend state buffer");
+	DevmemPDumpLoadMem(ps3DData->psContextStateMemDesc,
+					   0,
+					   sizeof(RGXFWIF_3DCTX_STATE),
+					   PDUMP_FLAGS_CONTINUOUS);
+
+	ps3DData->ui32Priority = ui32Priority;
+	return PVRSRV_OK;
+
+fail_3dcommoncontext:
+	DevmemFwFree(psDevInfo, ps3DData->psContextStateMemDesc);
+fail_3dcontextsuspendalloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+
+/*
+ * PVRSRVRGXCreateRenderContextKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA				*psConnection,
+											PVRSRV_DEVICE_NODE			*psDeviceNode,
+											IMG_UINT32					ui32Priority,
+											IMG_DEV_VIRTADDR			sVDMCallStackAddr,
+											IMG_UINT32					ui32FrameworkRegisterSize,
+											IMG_PBYTE					pabyFrameworkRegisters,
+											IMG_HANDLE					hMemCtxPrivData,
+											RGX_SERVER_RENDER_CONTEXT	**ppsRenderContext)
+{
+	PVRSRV_ERROR				eError;
+	PVRSRV_RGXDEV_INFO 			*psDevInfo = psDeviceNode->pvDevice;
+	RGX_SERVER_RENDER_CONTEXT	*psRenderContext;
+	DEVMEM_MEMDESC				*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	RGX_COMMON_CONTEXT_INFO		sInfo;
+
+	/* Prepare cleanup structure */
+	*ppsRenderContext = NULL;
+	psRenderContext = OSAllocZMem(sizeof(*psRenderContext));
+	if (psRenderContext == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	eError = OSLockCreate(&psRenderContext->hLock, LOCK_TYPE_NONE);
+
+
+	if(eError != PVRSRV_OK)
+	{
+		goto fail_lock;
+	}
+#endif
+
+	psRenderContext->psDeviceNode = psDeviceNode;
+
+	/*
+		Create the FW render context, this has the TA and 3D FW common
+		contexts embedded within it
+	*/
+	eError = DevmemFwAllocate(psDevInfo,
+							  sizeof(RGXFWIF_FWRENDERCONTEXT),
+							  RGX_FWCOMCTX_ALLOCFLAGS,
+							  "FwRenderContext",
+							  &psRenderContext->psFWRenderContextMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_fwrendercontext;
+	}
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	WorkEstRCInit(&(psRenderContext->sWorkEstData));
+#endif
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+						   &psRenderContext->psCleanupSync,
+						   "ta3d render context cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to allocate cleanup sync (0x%x)",
+				eError));
+		goto fail_syncalloc;
+	}
+
+	/*
+	 * Create the FW framework buffer
+	 */
+	eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+										&psRenderContext->psFWFrameworkMemDesc,
+										ui32FrameworkRegisterSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to allocate firmware GPU framework state (%u)",
+				eError));
+		goto fail_frameworkcreate;
+	}
+
+	/* Copy the Framework client data into the framework buffer */
+	eError = PVRSRVRGXFrameworkCopyCommand(psRenderContext->psFWFrameworkMemDesc,
+										   pabyFrameworkRegisters,
+										   ui32FrameworkRegisterSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to populate the framework buffer (%u)",
+				eError));
+		goto fail_frameworkcopy;
+	}
+
+	sInfo.psFWFrameworkMemDesc = psRenderContext->psFWFrameworkMemDesc;
+
+	eError = _CreateTAContext(psConnection,
+							  psDeviceNode,
+							  psRenderContext->psFWRenderContextMemDesc,
+							  offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext),
+							  psFWMemContextMemDesc,
+							  sVDMCallStackAddr,
+							  ui32Priority,
+							  &sInfo,
+							  &psRenderContext->sTAData);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_tacontext;
+	}
+
+	eError = _Create3DContext(psConnection,
+							  psDeviceNode,
+							  psRenderContext->psFWRenderContextMemDesc,
+							  offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext),
+							  psFWMemContextMemDesc,
+							  ui32Priority,
+							  &sInfo,
+							  &psRenderContext->s3DData);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_3dcontext;
+	}
+
+	SyncAddrListInit(&psRenderContext->sSyncAddrListTAFence);
+	SyncAddrListInit(&psRenderContext->sSyncAddrListTAUpdate);
+	SyncAddrListInit(&psRenderContext->sSyncAddrList3DFence);
+	SyncAddrListInit(&psRenderContext->sSyncAddrList3DUpdate);
+
+	{
+		PVRSRV_RGXDEV_INFO			*psDevInfo = psDeviceNode->pvDevice;
+
+		OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+		dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode));
+		OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+	}
+
+	*ppsRenderContext= psRenderContext;
+	return PVRSRV_OK;
+
+fail_3dcontext:
+	_DestroyTAContext(&psRenderContext->sTAData,
+					  psDeviceNode,
+					  psRenderContext->psCleanupSync);
+fail_tacontext:
+fail_frameworkcopy:
+	DevmemFwFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc);
+fail_frameworkcreate:
+	SyncPrimFree(psRenderContext->psCleanupSync);
+fail_syncalloc:
+	DevmemFwFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc);
+fail_fwrendercontext:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psRenderContext->hLock);
+fail_lock:
+#endif
+	OSFreeMem(psRenderContext);
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+/*
+ * PVRSRVRGXDestroyRenderContextKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext)
+{
+	PVRSRV_ERROR				eError;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psRenderContext->psDeviceNode->pvDevice;
+	RGXFWIF_FWRENDERCONTEXT	*psFWRenderContext;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	IMG_UINT32 ui32WorkEstCCBSubmitted;
+#endif
+
+	/* remove node from list before calling destroy - as destroy, if successful
+	 * will invalidate the node
+	 * must be re-added if destroy fails
+	 */
+	OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+	dllist_remove_node(&(psRenderContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+
+	/* Cleanup the TA if we haven't already */
+	if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_TA_COMPLETE) == 0)
+	{
+		eError = _DestroyTAContext(&psRenderContext->sTAData,
+								   psRenderContext->psDeviceNode,
+								   psRenderContext->psCleanupSync);
+		if (eError == PVRSRV_OK)
+		{
+			psRenderContext->ui32CleanupStatus |= RC_CLEANUP_TA_COMPLETE;
+		}
+		else
+		{
+			goto e0;
+		}
+	}
+
+	/* Cleanup the 3D if we haven't already */
+	if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_3D_COMPLETE) == 0)
+	{
+		eError = _Destroy3DContext(&psRenderContext->s3DData,
+								   psRenderContext->psDeviceNode,
+								   psRenderContext->psCleanupSync);
+		if (eError == PVRSRV_OK)
+		{
+			psRenderContext->ui32CleanupStatus |= RC_CLEANUP_3D_COMPLETE;
+		}
+		else
+		{
+			goto e0;
+		}
+	}
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc,
+	                                  (void **)&psFWRenderContext);
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXDestroyRenderContextKM: Failed to map firmware render context (%u)",
+		         eError));
+		goto e0;
+	}
+
+	ui32WorkEstCCBSubmitted = psFWRenderContext->ui32WorkEstCCBSubmitted;
+
+	DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc);
+
+	/* Check if all of the workload estimation CCB commands for this workload
+	 * are read
+	 */
+	if(ui32WorkEstCCBSubmitted != psRenderContext->sWorkEstData.ui32WorkEstCCBReceived)
+	{
+		eError = PVRSRV_ERROR_RETRY;
+		goto e0;
+	}
+#endif
+
+	/*
+		Only if both TA and 3D contexts have been cleaned up can we
+		free the shared resources
+	*/
+	if (psRenderContext->ui32CleanupStatus == (RC_CLEANUP_3D_COMPLETE | RC_CLEANUP_TA_COMPLETE))
+	{
+
+		/* Update SPM statistics */
+		eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc,
+	                                      (void **)&psFWRenderContext);
+		if (eError == PVRSRV_OK)
+		{
+			DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc);
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXDestroyRenderContextKM: Failed to map firmware render context (%u)",
+					eError));
+		}
+
+		/* Free the framework buffer */
+		DevmemFwFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc);
+
+		/* Free the firmware render context */
+		DevmemFwFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc);
+
+		/* Free the cleanup sync */
+		SyncPrimFree(psRenderContext->psCleanupSync);
+
+		SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAFence);
+		SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAUpdate);
+		SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DFence);
+		SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DUpdate);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		WorkEstRCDeInit(&(psRenderContext->sWorkEstData),
+                        psDevInfo);
+#endif
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockDestroy(psRenderContext->hLock);
+#endif
+
+		OSFreeMem(psRenderContext);
+	}
+
+	return PVRSRV_OK;
+
+e0:
+	OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+	dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+	return eError;
+}
+
+
+/* TODO !!! this was local on the stack, and we managed to blow the stack for the kernel.
+ * THIS - 46 argument function needs to be sorted out.
+ */
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+static RGX_CCB_CMD_HELPER_DATA gasTACmdHelperData[CCB_CMD_HELPER_NUM_TA_COMMANDS];
+static RGX_CCB_CMD_HELPER_DATA gas3DCmdHelperData[CCB_CMD_HELPER_NUM_3D_COMMANDS];
+#endif
+
+/*
+ * PVRSRVRGXKickTA3DKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT	*psRenderContext,
+								 IMG_UINT32					ui32ClientCacheOpSeqNum,
+								 IMG_UINT32					ui32ClientTAFenceCount,
+								 SYNC_PRIMITIVE_BLOCK		**apsClientTAFenceSyncPrimBlock,
+								 IMG_UINT32					*paui32ClientTAFenceSyncOffset,
+								 IMG_UINT32					*paui32ClientTAFenceValue,
+								 IMG_UINT32					ui32ClientTAUpdateCount,
+								 SYNC_PRIMITIVE_BLOCK		**apsClientTAUpdateSyncPrimBlock,
+								 IMG_UINT32					*paui32ClientTAUpdateSyncOffset,
+								 IMG_UINT32					*paui32ClientTAUpdateValue,
+								 IMG_UINT32					ui32ServerTASyncPrims,
+								 IMG_UINT32					*paui32ServerTASyncFlags,
+								 SERVER_SYNC_PRIMITIVE 		**pasServerTASyncs,
+								 IMG_UINT32					ui32Client3DFenceCount,
+								 SYNC_PRIMITIVE_BLOCK		**apsClient3DFenceSyncPrimBlock,
+								 IMG_UINT32					*paui32Client3DFenceSyncOffset,
+								 IMG_UINT32					*paui32Client3DFenceValue,
+								 IMG_UINT32					ui32Client3DUpdateCount,
+								 SYNC_PRIMITIVE_BLOCK		**apsClient3DUpdateSyncPrimBlock,
+								 IMG_UINT32					*paui32Client3DUpdateSyncOffset,
+								 IMG_UINT32					*paui32Client3DUpdateValue,
+								 IMG_UINT32					ui32Server3DSyncPrims,
+								 IMG_UINT32					*paui32Server3DSyncFlags,
+								 SERVER_SYNC_PRIMITIVE 		**pasServer3DSyncs,
+								 SYNC_PRIMITIVE_BLOCK		*psPRFenceSyncPrimBlock,
+								 IMG_UINT32					ui32PRFenceSyncOffset,
+								 IMG_UINT32					ui32PRFenceValue,
+								 PVRSRV_FENCE				iCheckFence,
+								 PVRSRV_TIMELINE			iUpdateTimeline,
+								 PVRSRV_FENCE				*piUpdateFence,
+								 IMG_CHAR					szFenceName[32],
+								 IMG_UINT32					ui32TACmdSize,
+								 IMG_PBYTE					pui8TADMCmd,
+								 IMG_UINT32					ui323DPRCmdSize,
+								 IMG_PBYTE					pui83DPRDMCmd,
+								 IMG_UINT32					ui323DCmdSize,
+								 IMG_PBYTE					pui83DDMCmd,
+								 IMG_UINT32					ui32ExtJobRef,
+								 IMG_BOOL					bLastTAInScene,
+								 IMG_BOOL					bKickTA,
+								 IMG_BOOL					bKickPR,
+								 IMG_BOOL					bKick3D,
+								 IMG_BOOL					bAbort,
+								 IMG_UINT32					ui32PDumpFlags,
+								 RGX_RTDATA_CLEANUP_DATA	*psRTDataCleanup,
+								 RGX_ZSBUFFER_DATA		*psZBuffer,
+								 RGX_ZSBUFFER_DATA		*psSBuffer,
+								 RGX_ZSBUFFER_DATA		*psMSAAScratchBuffer,
+								 IMG_BOOL			bCommitRefCountsTA,
+								 IMG_BOOL			bCommitRefCounts3D,
+								 IMG_BOOL			*pbCommittedRefCountsTA,
+								 IMG_BOOL			*pbCommittedRefCounts3D,
+								 IMG_UINT32			ui32SyncPMRCount,
+								 IMG_UINT32			*paui32SyncPMRFlags,
+								 PMR				**ppsSyncPMRs,
+								 IMG_UINT32			ui32RenderTargetSize,
+								 IMG_UINT32			ui32NumberOfDrawCalls,
+								 IMG_UINT32			ui32NumberOfIndices,
+								 IMG_UINT32			ui32NumberOfMRTs,
+								 IMG_UINT64			ui64DeadlineInus,
+								 IMG_DEV_VIRTADDR	sRobustnessResetReason)
+{
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	/* if the bridge lock is present then we use the singular/global helper structures */
+	RGX_CCB_CMD_HELPER_DATA *pasTACmdHelperData = gasTACmdHelperData;
+	RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelperData = gas3DCmdHelperData;
+#else
+	/* if there is no bridge lock then we use the per-context helper structures */
+	RGX_CCB_CMD_HELPER_DATA *pasTACmdHelperData = psRenderContext->asTACmdHelperData;
+	RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelperData = psRenderContext->as3DCmdHelperData;
+#endif
+
+	IMG_UINT32				ui32TACmdCount=0;
+	IMG_UINT32				ui323DCmdCount=0;
+	IMG_UINT32				ui32TACmdOffset=0;
+	IMG_UINT32				ui323DCmdOffset=0;
+	RGXFWIF_UFO				sPRUFO;
+	IMG_UINT32				i;
+	PVRSRV_ERROR			eError = PVRSRV_OK;
+	PVRSRV_ERROR			eError2;
+	PVRSRV_FENCE			iUpdateFence = PVRSRV_FENCE_INVALID;
+	IMG_UINT32				ui32JobId;
+
+	IMG_UINT32				ui32ClientPRUpdateCount = 0;
+	PRGXFWIF_UFO_ADDR		*pauiClientPRUpdateUFOAddress = NULL;
+	IMG_UINT32				*paui32ClientPRUpdateValue = NULL;
+
+	PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+	PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+	PRGXFWIF_UFO_ADDR       pRMWUFOAddr;
+
+	PRGXFWIF_UFO_ADDR			*pauiClientTAFenceUFOAddress = NULL;
+	PRGXFWIF_UFO_ADDR			*pauiClientTAUpdateUFOAddress = NULL;
+	PRGXFWIF_UFO_ADDR			*pauiClient3DFenceUFOAddress = NULL;
+	PRGXFWIF_UFO_ADDR			*pauiClient3DUpdateUFOAddress = NULL;
+	PRGXFWIF_UFO_ADDR			uiPRFenceUFOAddress;
+
+	IMG_UINT32               uiCheckFenceUID = 0;
+	IMG_UINT32               uiUpdateFenceUID = 0;
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+	/* Count of the number of TA and 3D update values (may differ from number of
+	 * TA and 3D updates later, as sync checkpoints do not need to specify a value)
+	 */
+	IMG_UINT32 ui32ClientPRUpdateValueCount = 0;
+	IMG_UINT32 ui32Client3DUpdateValueCount = ui32Client3DUpdateCount;
+	PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+	IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+	PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+	IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+	PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+	void *pvUpdateFenceFinaliseData = NULL;
+	IMG_BOOL bTAFenceOnSyncCheckpointsOnly = IMG_FALSE;
+	IMG_BOOL b3DFenceOnSyncCheckpointsOnly = IMG_FALSE;
+#if defined(SUPPORT_BUFFER_SYNC)
+	PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
+	IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
+	PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+#endif /* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTA;
+	RGXFWIF_WORKEST_KICK_DATA sWorkloadKickData3D;
+	IMG_UINT32 ui32TACommandOffset = 0;
+	IMG_UINT32 ui323DCommandOffset = 0;
+	IMG_UINT32 ui32TACmdHeaderOffset = 0;
+	IMG_UINT32 ui323DCmdHeaderOffset = 0;
+	IMG_UINT32 ui323DFullRenderCommandOffset = 0;
+	IMG_UINT32 ui32TACmdOffsetWrapCheck = 0;
+	IMG_UINT32 ui323DCmdOffsetWrapCheck = 0;
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+	PVRSRV_DEVICE_NODE *psDeviceNode = psRenderContext->psDeviceNode;
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	struct pvr_buffer_sync_append_data *psAppendData = NULL;
+#else
+	struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	/* Android fd sync update info */
+	struct pvr_sync_append_data *psFDData = NULL;
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	if (iUpdateTimeline >= 0 && !piUpdateFence)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+#else /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+	if (iUpdateTimeline >= 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: Providing update timeline (%d) in non-supporting driver",
+			__func__, iUpdateTimeline));
+	}
+	if (iCheckFence >= 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: Providing check fence (%d) in non-supporting driver",
+			__func__, iCheckFence));
+	}
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	sWorkloadKickDataTA.ui64ReturnDataIndex = 0;
+	sWorkloadKickDataTA.ui64CyclesPrediction = 0;
+	sWorkloadKickData3D.ui64ReturnDataIndex = 0;
+	sWorkloadKickData3D.ui64CyclesPrediction = 0;
+#endif
+
+	ui32JobId = OSAtomicIncrement(&psRenderContext->hJobId);
+
+	CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, ui32Client3DUpdateCount=%d", __FUNCTION__,
+	         ui32ClientTAFenceCount, ui32ClientTAUpdateCount, ui32Client3DFenceCount, ui32Client3DUpdateCount));
+	CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32ServerTASyncPrims=%d, ui32Server3DSyncPrims=%d", __FUNCTION__, ui32ServerTASyncPrims, ui32Server3DSyncPrims));
+
+	/* Ensure the string is null-terminated (Required for safety) */
+	szFenceName[31] = '\0';
+	*pbCommittedRefCountsTA = IMG_FALSE;
+	*pbCommittedRefCounts3D = IMG_FALSE;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psRenderContext->hLock);
+#endif
+
+	CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAFence, %d fences)...", __func__, ui32ClientTAFenceCount));
+	eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAFence,
+										ui32ClientTAFenceCount,
+										apsClientTAFenceSyncPrimBlock,
+										paui32ClientTAFenceSyncOffset);
+	if(eError != PVRSRV_OK)
+	{
+		goto err_populate_sync_addr_list_ta_fence;
+	}
+
+	if (ui32ClientTAFenceCount)
+	{
+		pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+	}
+
+	CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClientTAFenceUFOAddress=<%p> ", __func__, (void*)pauiClientTAFenceUFOAddress));
+
+	CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAUpdate, %d updates)...", __func__, ui32ClientTAUpdateCount));
+	eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAUpdate,
+										ui32ClientTAUpdateCount,
+										apsClientTAUpdateSyncPrimBlock,
+										paui32ClientTAUpdateSyncOffset);
+	if(eError != PVRSRV_OK)
+	{
+		goto err_populate_sync_addr_list_ta_update;
+	}
+
+	if (ui32ClientTAUpdateCount)
+	{
+		pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs;
+	}
+	CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClientTAUpdateUFOAddress=<%p> ", __func__, (void*)pauiClientTAUpdateUFOAddress));
+
+	CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DFence, %d fences)...", __func__, ui32Client3DFenceCount));
+	eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DFence,
+										ui32Client3DFenceCount,
+										apsClient3DFenceSyncPrimBlock,
+										paui32Client3DFenceSyncOffset);
+	if(eError != PVRSRV_OK)
+	{
+		goto err_populate_sync_addr_list_3d_fence;
+	}
+
+	if (ui32Client3DFenceCount)
+	{
+		pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs;
+	}
+	CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClient3DFenceUFOAddress=<%p> ", __func__, (void*)pauiClient3DFenceUFOAddress));
+
+	CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DUpdate, %d updates)...", __func__, ui32Client3DUpdateCount));
+	eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DUpdate,
+										ui32Client3DUpdateCount,
+										apsClient3DUpdateSyncPrimBlock,
+										paui32Client3DUpdateSyncOffset);
+	if(eError != PVRSRV_OK)
+	{
+		goto err_populate_sync_addr_list_3d_update;
+	}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	if (ui32Client3DUpdateCount || (PVRSRVIsTimelineValidKM(iUpdateTimeline) && piUpdateFence && bKick3D))
+#else
+	if (ui32Client3DUpdateCount)
+#endif
+	{
+		pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+	}
+	CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClient3DUpdateUFOAddress=<%p> ", __func__, (void*)pauiClient3DUpdateUFOAddress));
+
+	eError = SyncPrimitiveBlockToFWAddr(psPRFenceSyncPrimBlock,
+									ui32PRFenceSyncOffset,
+									&uiPRFenceUFOAddress);
+
+	if(eError != PVRSRV_OK)
+	{
+		goto err_pr_fence_address;
+	}
+
+#if (ENABLE_TA3D_UFO_DUMP == 1)
+	{
+		IMG_UINT32 ii;
+		PRGXFWIF_UFO_ADDR *psTmpClientTAFenceUFOAddress = pauiClientTAFenceUFOAddress;
+		IMG_UINT32 *pui32TmpClientTAFenceValue = paui32ClientTAFenceValue;
+		PRGXFWIF_UFO_ADDR *psTmpClientTAUpdateUFOAddress = pauiClientTAUpdateUFOAddress;
+		IMG_UINT32 *pui32TmpClientTAUpdateValue = paui32ClientTAUpdateValue;
+		PRGXFWIF_UFO_ADDR *psTmpClient3DFenceUFOAddress = pauiClient3DFenceUFOAddress;
+		IMG_UINT32 *pui32TmpClient3DFenceValue = paui32Client3DFenceValue;
+		PRGXFWIF_UFO_ADDR *psTmpClient3DUpdateUFOAddress = pauiClient3DUpdateUFOAddress;
+		IMG_UINT32 *pui32TmpClient3DUpdateValue = paui32Client3DUpdateValue;
+
+		PVR_DPF((PVR_DBG_ERROR, "%s: ~~~ After populating sync prims ~~~", __FUNCTION__));
+
+		/* Dump Fence syncs, Update syncs and PR Update syncs */
+		PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA fence syncs:", __FUNCTION__, ui32ClientTAFenceCount));
+		for (ii=0; ii<ui32ClientTAFenceCount; ii++)
+		{
+			if (psTmpClientTAFenceUFOAddress->ui32Addr & 0x1)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32ClientTAFenceCount, (void*)psTmpClientTAFenceUFOAddress, psTmpClientTAFenceUFOAddress->ui32Addr));
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __FUNCTION__, ii+1, ui32ClientTAFenceCount, (void*)psTmpClientTAFenceUFOAddress, psTmpClientTAFenceUFOAddress->ui32Addr, *pui32TmpClientTAFenceValue, *pui32TmpClientTAFenceValue));
+				pui32TmpClientTAFenceValue++;
+			}
+			psTmpClientTAFenceUFOAddress++;
+		}
+		PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA update syncs:", __FUNCTION__, ui32ClientTAUpdateCount));
+		for (ii=0; ii<ui32ClientTAUpdateCount; ii++)
+		{
+			if (psTmpClientTAUpdateUFOAddress->ui32Addr & 0x1)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32ClientTAUpdateCount, (void*)psTmpClientTAUpdateUFOAddress, psTmpClientTAUpdateUFOAddress->ui32Addr));
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __FUNCTION__, ii+1, ui32ClientTAUpdateCount, (void*)psTmpClientTAUpdateUFOAddress, psTmpClientTAUpdateUFOAddress->ui32Addr, *pui32TmpClientTAUpdateValue));
+				pui32TmpClientTAUpdateValue++;
+			}
+			psTmpClientTAUpdateUFOAddress++;
+		}
+		PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D fence syncs:", __FUNCTION__, ui32Client3DFenceCount));
+		for (ii=0; ii<ui32Client3DFenceCount; ii++)
+		{
+			if (psTmpClient3DFenceUFOAddress->ui32Addr & 0x1)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32Client3DFenceCount, (void*)psTmpClient3DFenceUFOAddress, psTmpClient3DFenceUFOAddress->ui32Addr));
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d", __FUNCTION__, ii+1, ui32Client3DFenceCount, (void*)psTmpClient3DFenceUFOAddress, psTmpClient3DFenceUFOAddress->ui32Addr, *pui32TmpClient3DFenceValue));
+				pui32TmpClient3DFenceValue++;
+			}
+			psTmpClient3DFenceUFOAddress++;
+		}
+		PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D update syncs:", __FUNCTION__, ui32Client3DUpdateCount));
+		for (ii=0; ii<ui32Client3DUpdateCount; ii++)
+		{
+			if (psTmpClient3DUpdateUFOAddress->ui32Addr & 0x1)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32Client3DUpdateCount, (void*)psTmpClient3DUpdateUFOAddress, psTmpClient3DUpdateUFOAddress->ui32Addr));
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __FUNCTION__, ii+1, ui32Client3DUpdateCount, (void*)psTmpClient3DUpdateUFOAddress, psTmpClient3DUpdateUFOAddress->ui32Addr, *pui32TmpClient3DUpdateValue));
+				pui32TmpClient3DUpdateValue++;
+			}
+			psTmpClient3DUpdateUFOAddress++;
+		}
+	}
+#endif
+
+	/* Sanity check the server fences */
+	for (i=0;i<ui32ServerTASyncPrims;i++)
+	{
+		if (!(paui32ServerTASyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on TA) must fence", __FUNCTION__));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockRelease(psRenderContext->hLock);
+#endif
+			return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+		}
+	}
+
+	for (i=0;i<ui32Server3DSyncPrims;i++)
+	{
+		if (!(paui32Server3DSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on 3D) must fence", __FUNCTION__));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockRelease(psRenderContext->hLock);
+#endif
+			return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+		}
+	}
+
+	RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psRenderContext->psDeviceNode->pvDevice,
+	                          & pPreAddr,
+	                          & pPostAddr,
+	                          & pRMWUFOAddr);
+
+	/*
+		Sanity check we have a PR kick if there are client or server fences
+	*/
+	if (!bKickPR && ((ui32Client3DFenceCount != 0) || (ui32Server3DSyncPrims != 0)))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: 3D fence (client or server) passed without a PR kick", __FUNCTION__));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockRelease(psRenderContext->hLock);
+#endif
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (ui32SyncPMRCount)
+	{
+#if defined(SUPPORT_BUFFER_SYNC)
+		int err;
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+		IMG_UINT32 ui32ClientIntUpdateCount = 0;
+		PRGXFWIF_UFO_ADDR *pauiClientIntUpdateUFOAddress = NULL;
+		IMG_UINT32 *paui32ClientIntUpdateValue = NULL;
+
+		if (!bKickTA)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync only supported for kicks including a TA",
+					 __FUNCTION__));
+			PVR_DPF((PVR_DBG_ERROR, "%s:   <--EXIT(%d)", __FUNCTION__, PVRSRV_ERROR_INVALID_PARAMS));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockRelease(psRenderContext->hLock);
+#endif
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+		if (!bKickPR)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync only supported for kicks including a PR",
+					 __FUNCTION__));
+			PVR_DPF((PVR_DBG_ERROR, "%s:   <--EXIT(%d)", __FUNCTION__, PVRSRV_ERROR_INVALID_PARAMS));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockRelease(psRenderContext->hLock);
+#endif
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+		if (bKick3D)
+		{
+			ui32ClientIntUpdateCount = ui32Client3DUpdateCount;
+			pauiClientIntUpdateUFOAddress = pauiClient3DUpdateUFOAddress;
+			paui32ClientIntUpdateValue = paui32Client3DUpdateValue;
+		}
+		else
+		{
+			ui32ClientIntUpdateCount = ui32ClientPRUpdateCount;
+			pauiClientIntUpdateUFOAddress = pauiClientPRUpdateUFOAddress;
+			paui32ClientIntUpdateValue = paui32ClientPRUpdateValue;
+		}
+
+		err = pvr_buffer_sync_append_start(psDeviceNode->psBufferSyncContext,
+										   ui32SyncPMRCount,
+										   ppsSyncPMRs,
+										   paui32SyncPMRFlags,
+										   ui32ClientTAFenceCount,
+										   pauiClientTAFenceUFOAddress,
+										   paui32ClientTAFenceValue,
+										   ui32ClientIntUpdateCount,
+										   pauiClientIntUpdateUFOAddress,
+										   paui32ClientIntUpdateValue,
+										   &psAppendData);
+		if (err)
+		{
+			eError = (err == -ENOMEM) ? PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_ERROR_INVALID_PARAMS;
+			PVR_DPF((PVR_DBG_ERROR, "%s:   pvr_buffer_sync_append_start failed (%d)", __FUNCTION__, eError));
+			goto fail_sync_append;
+		}
+
+		pvr_buffer_sync_append_checks_get(psAppendData,
+										  &ui32ClientTAFenceCount,
+										  &pauiClientTAFenceUFOAddress,
+										  &paui32ClientTAFenceValue);
+		if (bKick3D)
+		{
+			pvr_buffer_sync_append_updates_get(psAppendData,
+											   &ui32Client3DUpdateCount,
+											   &pauiClient3DUpdateUFOAddress,
+											   &paui32Client3DUpdateValue);
+		}
+		else
+		{
+			pvr_buffer_sync_append_updates_get(psAppendData,
+											   &ui32ClientPRUpdateCount,
+											   &pauiClientPRUpdateUFOAddress,
+											   &paui32ClientPRUpdateValue);
+		}
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+		if (!bKickTA)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync only supported for kicks including a TA",
+					 __FUNCTION__));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockRelease(psRenderContext->hLock);
+#endif
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+		if (!bKickPR)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync only supported for kicks including a PR",
+					 __FUNCTION__));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockRelease(psRenderContext->hLock);
+#endif
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   Calling pvr_buffer_sync_resolve_and_create_fences", __FUNCTION__));
+		err = pvr_buffer_sync_resolve_and_create_fences(psDeviceNode->psBufferSyncContext,
+														ui32SyncPMRCount,
+														ppsSyncPMRs,
+														paui32SyncPMRFlags,
+														&ui32BufferFenceSyncCheckpointCount,
+														&apsBufferFenceSyncCheckpoints,
+														&psBufferUpdateSyncCheckpoint,
+														&psBufferSyncData);
+		if (err)
+		{
+			eError = (err == -ENOMEM) ? PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_ERROR_INVALID_PARAMS;
+			PVR_DPF((PVR_DBG_ERROR, "%s:   pvr_buffer_sync_resolve_and_create_fences failed (%d)", __FUNCTION__, eError));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockRelease(psRenderContext->hLock);
+#endif
+			return eError;
+		}
+
+		/* Append buffer sync fences to TA fences */
+		if (ui32BufferFenceSyncCheckpointCount > 0)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d buffer sync checkpoints to TA Fence (&psRenderContext->sSyncAddrListTAFence=<%p>, pauiClientTAFenceUFOAddress=<%p>)...", __FUNCTION__, ui32BufferFenceSyncCheckpointCount, (void*)&psRenderContext->sSyncAddrListTAFence , (void*)pauiClientTAFenceUFOAddress));
+			SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrListTAFence,
+			                                      ui32BufferFenceSyncCheckpointCount,
+			                                      apsBufferFenceSyncCheckpoints);
+			if (!pauiClientTAFenceUFOAddress)
+			{
+				pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+			}
+			if (ui32ClientTAFenceCount == 0)
+			{
+				bTAFenceOnSyncCheckpointsOnly = IMG_TRUE;
+			}
+			ui32ClientTAFenceCount += ui32BufferFenceSyncCheckpointCount;
+		}
+
+		if (psBufferUpdateSyncCheckpoint)
+		{
+			/* If we have a 3D kick append update to the 3D updates else append to the PR update */
+			if (bKick3D)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 buffer sync checkpoint<%p> to 3D Update (&psRenderContext->sSyncAddrList3DUpdate=<%p>, pauiClient3DUpdateUFOAddress=<%p>)...", __FUNCTION__, (void*)psBufferUpdateSyncCheckpoint, (void*)&psRenderContext->sSyncAddrList3DUpdate , (void*)pauiClient3DUpdateUFOAddress));
+				/* Append buffer sync update to 3D updates */
+				SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+											  1,
+											  &psBufferUpdateSyncCheckpoint);
+				if (!pauiClient3DUpdateUFOAddress)
+				{
+					pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+				}
+				ui32Client3DUpdateCount++;
+			}
+			else
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 buffer sync checkpoint<%p> to TA Update (&psRenderContext->sSyncAddrListTAUpdate=<%p>, pauiClient3DUpdateUFOAddress=<%p>)...", __FUNCTION__, (void*)psBufferUpdateSyncCheckpoint, (void*)&psRenderContext->sSyncAddrListTAUpdate , (void*)pauiClientTAUpdateUFOAddress));
+				/* Append buffer sync update to TA updates */
+				SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAUpdate,
+											  1,
+											  &psBufferUpdateSyncCheckpoint);
+				if (!pauiClientTAUpdateUFOAddress)
+				{
+					pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs;
+				}
+				ui32ClientTAUpdateCount++;
+			}
+		}
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   (after buffer_sync) ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,", __FUNCTION__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount, ui32Client3DFenceCount, ui32Client3DUpdateCount, ui32ClientPRUpdateCount));
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#else
+		PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __FUNCTION__, ui32SyncPMRCount));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSLockRelease(psRenderContext->hLock);
+#endif
+		return PVRSRV_ERROR_INVALID_PARAMS;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+	}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined (SUPPORT_FALLBACK_FENCE_SYNC)
+	/*
+	 * The hardware requires a PR to be submitted if there is a TA (otherwise
+	 * it can wedge if we run out of PB space with no PR to run)
+	 *
+	 * If we only have a TA, attach native checks to the TA and updates to the PR
+	 * If we have a TA and 3D, attach checks to TA, updates to 3D
+	 * If we only have a 3D, attach checks and updates to the 3D
+	 *
+	 * Note that 'updates' includes the cleanup syncs for 'check' fence FDs, in
+	 * addition to the update fence FD (if supplied)
+	 *
+	 * Currently, the client driver never kicks only the 3D, so we only support
+	 * that for the time being.
+	 */
+	if (iCheckFence >= 0 || iUpdateTimeline >= 0)
+	{
+		PRGXFWIF_UFO_ADDR	*pauiClientIntUpdateUFOAddress = NULL;
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+		IMG_UINT32			*paui32ClientIntUpdateValue = NULL;
+		IMG_UINT32			ui32ClientIntUpdateCount = 0;
+#endif
+		if (!bKickTA)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Native syncs only supported for kicks including a TA",
+				__FUNCTION__));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto fail_fdsync;
+		}
+		if (!bKickPR)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Native syncs require a PR for all kicks",
+				__FUNCTION__));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto fail_fdsync;
+		}
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+		/* If we have a 3D, attach updates to that. Otherwise, we attach it to a PR */
+		if (bKick3D)
+		{
+			ui32ClientIntUpdateCount = ui32Client3DUpdateCount;
+			pauiClientIntUpdateUFOAddress = pauiClient3DUpdateUFOAddress;
+			paui32ClientIntUpdateValue = paui32Client3DUpdateValue;
+		}
+		else
+		{
+			ui32ClientIntUpdateCount = ui32ClientPRUpdateCount;
+			pauiClientIntUpdateUFOAddress = pauiClientPRUpdateUFOAddress;
+			paui32ClientIntUpdateValue = paui32ClientPRUpdateValue;
+		}
+
+		eError =
+			pvr_sync_append_fences(szFenceName,
+			                       iCheckFence,
+			                       iUpdateTimeline,
+			                       ui32ClientIntUpdateCount,
+			                       pauiClientIntUpdateUFOAddress,
+			                       paui32ClientIntUpdateValue,
+			                       ui32ClientTAFenceCount,
+			                       pauiClientTAFenceUFOAddress,
+			                       paui32ClientTAFenceValue,
+			                       &psFDData);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s:   pvr_sync_append_fences failed (%d)", __FUNCTION__, eError));
+			goto fail_fdsync;
+		}
+		/* If we have a 3D, attach updates to that. Otherwise, we attach it to a PR */
+		if (bKick3D)
+		{
+			pvr_sync_get_updates(psFDData, &ui32Client3DUpdateCount,
+				&pauiClient3DUpdateUFOAddress, &paui32Client3DUpdateValue);
+		}
+		else
+		{
+			pvr_sync_get_updates(psFDData, &ui32ClientPRUpdateCount,
+				&pauiClientPRUpdateUFOAddress, &paui32ClientPRUpdateValue);
+		}
+		pvr_sync_get_checks(psFDData, &ui32ClientTAFenceCount,
+			&pauiClientTAFenceUFOAddress, &paui32ClientTAFenceValue);
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __FUNCTION__, iCheckFence, (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext));
+			/* Resolve the sync checkpoints that make up the input fence */
+			eError = SyncCheckpointResolveFence(psRenderContext->psDeviceNode->hSyncCheckpointContext,
+			                                    iCheckFence,
+			                                    &ui32FenceSyncCheckpointCount,
+			                                    &apsFenceSyncCheckpoints,
+			                                    &uiCheckFenceUID);
+			if (eError != PVRSRV_OK)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __FUNCTION__, eError));
+				goto fail_resolve_input_fence;
+			}
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __FUNCTION__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+#if defined(TA3D_CHECKPOINT_DEBUG)
+			if (apsFenceSyncCheckpoints)
+			{
+				IMG_UINT32 ii;
+				for (ii=0; ii<ui32FenceSyncCheckpointCount; ii++)
+				{
+					PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints +  ii);
+					CHKPT_DBG((PVR_DBG_ERROR, "%s:    apsFenceSyncCheckpoints[%d]=<%p>", __FUNCTION__, ii, (void*)psNextCheckpoint));
+				}
+			}
+#endif
+			/* Create the output fence (if required) */
+			if (piUpdateFence)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d,  psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)", __FUNCTION__, iUpdateFence, iUpdateTimeline, (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext));
+				eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode,
+				                                   szFenceName,
+				                                   iUpdateTimeline,
+				                                   psRenderContext->psDeviceNode->hSyncCheckpointContext,
+				                                   &iUpdateFence,
+				                                   &uiUpdateFenceUID,
+				                                   &pvUpdateFenceFinaliseData,
+				                                   &psUpdateSyncCheckpoint,
+				                                   (void*)&psFenceTimelineUpdateSync,
+				                                   &ui32FenceTimelineUpdateValue);
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   SyncCheckpointCreateFence failed (%d)", __FUNCTION__, eError));
+					goto fail_create_output_fence;
+				}
+
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=0x%x)", __FUNCTION__, iUpdateFence, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+
+				/* Store the FW address of the update sync checkpoint in pauiClientIntUpdateUFOAddress */
+				pauiClientIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdateSyncCheckpoint);
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClientIntUpdateUFOAddress->ui32Addr=0x%x", __FUNCTION__, pauiClientIntUpdateUFOAddress->ui32Addr));
+			}
+			/* Append the sync prim update for the timeline (if required) */
+			if (psFenceTimelineUpdateSync)
+			{
+				IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: About to allocate memory to hold updates in pui32IntAllocatedUpdateValues(currently <%p>)", __FUNCTION__, (void*)pui32IntAllocatedUpdateValues));
+				if (bKick3D)
+				{
+					/* Allocate memory to hold the list of update values (including our timeline update) */
+					pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32Client3DUpdateValueCount+1));
+					if (!pui32IntAllocatedUpdateValues)
+					{
+						/* Failed to allocate memory */
+						eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+						goto fail_alloc_update_values_mem;
+					}
+					OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xcc, sizeof(*pui32IntAllocatedUpdateValues) * (ui32Client3DUpdateValueCount+1));
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: Copying %d 3D update values into pui32IntAllocatedUpdateValues(<%p>)", __FUNCTION__, ui32Client3DUpdateCount, (void*)pui32IntAllocatedUpdateValues));
+					/* Copy the update values into the new memory, then append our timeline update value */
+					OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32Client3DUpdateValue, ui32Client3DUpdateValueCount * sizeof(*paui32Client3DUpdateValue));
+				}
+				else
+				{
+					/* Allocate memory to hold our timeline update value (for PR update) */
+					pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientPRUpdateValueCount+1));
+					if (!pui32IntAllocatedUpdateValues)
+					{
+						/* Failed to allocate memory */
+						eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+						goto fail_alloc_update_values_mem;
+					}
+					OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xcc, sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientPRUpdateValueCount+1));
+				}
+#if defined(TA3D_CHECKPOINT_DEBUG)
+				if (bKick3D)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+					for (iii=0; iii<ui32Client3DUpdateValueCount; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+				/* Now set the additional update value and append the timeline sync prim addr to either the
+				 * render context 3D (or TA) update list
+				 */
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: Appending the additional update value (0x%x) to psRenderContext->sSyncAddrList%sUpdate...", __FUNCTION__, ui32FenceTimelineUpdateValue, bKick3D ? "3D" : "TA"));
+				if (bKick3D)
+				{
+					pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32Client3DUpdateValueCount;
+					*pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+					ui32Client3DUpdateValueCount++;
+					ui32Client3DUpdateCount++;
+					SyncAddrListAppendSyncPrim(&psRenderContext->sSyncAddrList3DUpdate,
+												   psFenceTimelineUpdateSync);
+					if (!pauiClient3DUpdateUFOAddress)
+					{
+						pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+					}
+					/* Update paui32Client3DUpdateValue to point to our new list of update values */
+					paui32Client3DUpdateValue = pui32IntAllocatedUpdateValues;
+				}
+				else
+				{
+					/* Use the sSyncAddrList3DUpdate for PR (as it doesn't have one of its own) */
+					pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues;
+					*pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+					ui32ClientPRUpdateValueCount++;
+					ui32ClientPRUpdateCount++;
+					SyncAddrListAppendSyncPrim(&psRenderContext->sSyncAddrList3DUpdate,
+											   psFenceTimelineUpdateSync);
+					if (!pauiClientPRUpdateUFOAddress)
+					{
+						pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+					}
+					/* Update paui32ClientPRUpdateValue to point to our new list of update values */
+					paui32ClientPRUpdateValue = pui32IntAllocatedUpdateValues;
+				}
+#if defined(TA3D_CHECKPOINT_DEBUG)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+					IMG_UINT32 ui32NumValues = ui32Client3DUpdateValueCount;
+
+					for (iii=0; iii<ui32NumValues; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+			}
+
+			/*
+			 * The hardware requires a PR to be submitted if there is a TA (otherwise
+			 * it can wedge if we run out of PB space with no PR to run)
+			 *
+			 * If we only have a TA, attach native checks to the TA and updates to the PR
+			 * If we have a TA and 3D, attach checks to TA, updates to 3D
+			 * If we only have a 3D, attach checks and updates to the 3D
+			 *
+			 * Note that 'updates' includes the cleanup syncs for 'check' fence FDs, in
+			 * addition to the update fence FD (if supplied)
+			 *
+			 * Currently, the client driver never kicks only the 3D, so we only support
+			 * that for the time being.
+			 */
+
+			if (bKick3D)
+			{
+				if (bKickTA)
+				{
+					/* we have a TA and 3D, attach checks to TA, updates to 3D */
+					/* Checks (from input fence) */
+					if (ui32FenceSyncCheckpointCount > 0)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d sync checkpoints to TA Fence (apsFenceSyncCheckpoints=<%p>)...", __FUNCTION__, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+						SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence,
+													  ui32FenceSyncCheckpointCount,
+													  apsFenceSyncCheckpoints);
+						if (!pauiClientTAFenceUFOAddress)
+						{
+							pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+						}
+						CHKPT_DBG((PVR_DBG_ERROR, "%s:   {ui32ClientTAFenceCount was %d, now %d}", __FUNCTION__, ui32ClientTAFenceCount, ui32ClientTAFenceCount+ui32FenceSyncCheckpointCount));
+						if (ui32ClientTAFenceCount == 0)
+						{
+							bTAFenceOnSyncCheckpointsOnly = IMG_TRUE;
+						}
+						ui32ClientTAFenceCount += ui32FenceSyncCheckpointCount;
+					}
+					CHKPT_DBG((PVR_DBG_ERROR, "%s:   {ui32ClientTAFenceCount now %d}", __FUNCTION__, ui32ClientTAFenceCount));
+
+					/* Update (from output fence) */
+					CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 sync checkpoint<%p> (ID=%d) to 3D Update...", __FUNCTION__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint)));
+					SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+												  1,
+												  &psUpdateSyncCheckpoint);
+					if (!pauiClient3DUpdateUFOAddress)
+					{
+						pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+					}
+					ui32Client3DUpdateCount++;
+				}
+				else
+				{
+					/* we only have 3D, attach checks and updates to the 3D */
+					/* Checks (from input fence) */
+					if (ui32FenceSyncCheckpointCount > 0)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d sync checkpoints to 3D Fence...", __FUNCTION__, ui32FenceSyncCheckpointCount));
+						SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence,
+													  ui32FenceSyncCheckpointCount,
+													  apsFenceSyncCheckpoints);
+						if (!pauiClient3DFenceUFOAddress)
+						{
+							pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs;
+						}
+						CHKPT_DBG((PVR_DBG_ERROR, "%s:   {ui32Client3DFenceCount was %d, now %d}", __FUNCTION__, ui32Client3DFenceCount, ui32Client3DFenceCount+ui32FenceSyncCheckpointCount));
+						if (ui32Client3DFenceCount == 0)
+						{
+							b3DFenceOnSyncCheckpointsOnly = IMG_TRUE;
+						}
+						ui32Client3DFenceCount += ui32FenceSyncCheckpointCount;
+					}
+					CHKPT_DBG((PVR_DBG_ERROR, "%s:   {ui32Client3DFenceCount was %d}", __FUNCTION__, ui32Client3DFenceCount));
+
+					/* Update (from output fence) */
+					CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 sync checkpoint<%p> (ID=%d) to 3D Update...", __FUNCTION__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint)));
+					SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+												  1,
+												  &psUpdateSyncCheckpoint);
+					if (!pauiClient3DUpdateUFOAddress)
+					{
+						pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+					}
+					ui32Client3DUpdateCount++;
+				}
+			}
+			else
+			{
+				/* we only have TA, attach checks to the TA */
+				/* Checks (from input fence) */
+				if (ui32FenceSyncCheckpointCount > 0)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d sync checkpoints to TA Fence...", __FUNCTION__, ui32FenceSyncCheckpointCount));
+					SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence,
+												  ui32FenceSyncCheckpointCount,
+												  apsFenceSyncCheckpoints);
+					if (!pauiClientTAFenceUFOAddress)
+					{
+						pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+					}
+					CHKPT_DBG((PVR_DBG_ERROR, "%s:   {ui32ClientTAFenceCount was %d, now %d}", __FUNCTION__, ui32ClientTAFenceCount, ui32ClientTAFenceCount+ui32FenceSyncCheckpointCount));
+					if (ui32ClientTAFenceCount == 0)
+					{
+						bTAFenceOnSyncCheckpointsOnly = IMG_TRUE;
+					}
+					ui32ClientTAFenceCount += ui32FenceSyncCheckpointCount;
+				}
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:   {ui32ClientTAFenceCount now %d}", __FUNCTION__, ui32ClientTAFenceCount));
+				/* Attach update to the 3D (used for PR) Updates */
+				SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+				                              1,
+				                              &psUpdateSyncCheckpoint);
+				if (!pauiClientPRUpdateUFOAddress)
+				{
+					pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+				}
+				ui32ClientPRUpdateCount++;
+			}
+			CHKPT_DBG((PVR_DBG_ERROR, "%s:   (after pvr_sync) ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,", __FUNCTION__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount, ui32Client3DFenceCount, ui32Client3DUpdateCount, ui32ClientPRUpdateCount));
+		}
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+
+		if (ui32ClientTAFenceCount)
+		{
+			PVR_ASSERT(pauiClientTAFenceUFOAddress);
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+			if (!bTAFenceOnSyncCheckpointsOnly)
+#endif
+			{
+				PVR_ASSERT(paui32ClientTAFenceValue);
+			}
+		}
+		if (ui32ClientTAUpdateCount)
+		{
+			PVR_ASSERT(pauiClientTAUpdateUFOAddress);
+			/* We don't have TA updates from fences, so there should always be a value
+			 * (fence updates are attached to the PR)
+			 */
+			PVR_ASSERT(paui32ClientTAUpdateValue);
+		}
+		if (ui32Client3DFenceCount)
+		{
+			PVR_ASSERT(pauiClient3DFenceUFOAddress);
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+			if (!b3DFenceOnSyncCheckpointsOnly)
+#endif
+			{
+				PVR_ASSERT(paui32Client3DFenceValue);
+			}
+		}
+		if (ui32Client3DUpdateCount)
+		{
+			PVR_ASSERT(pauiClient3DUpdateUFOAddress);
+			if (ui32Client3DUpdateValueCount>0)
+				PVR_ASSERT(paui32Client3DUpdateValue);
+		}
+		if (ui32ClientPRUpdateCount)
+		{
+			PVR_ASSERT(pauiClientPRUpdateUFOAddress);
+			if (ui32ClientPRUpdateValueCount>0)
+				PVR_ASSERT(paui32ClientPRUpdateValue);
+		}
+
+	}
+#endif /* SUPPORT_NATIVE_FENCE_SYNC || defined (SUPPORT_FALLBACK_FENCE_SYNC) */
+	CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32ClientTAFenceCount=%d, pauiClientTAFenceUFOAddress=<%p> Line ", __func__, ui32ClientTAFenceCount, (void*)paui32ClientTAFenceValue));
+	CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32ClientTAUpdateCount=%d, pauiClientTAUpdateUFOAddress=<%p> Line ", __func__, ui32ClientTAUpdateCount, (void*)pauiClientTAUpdateUFOAddress));
+#if (ENABLE_TA3D_UFO_DUMP == 1)
+	{
+		IMG_UINT32 ii;
+		PRGXFWIF_UFO_ADDR *psTmpClientTAFenceUFOAddress = pauiClientTAFenceUFOAddress;
+		IMG_UINT32 *pui32TmpClientTAFenceValue = paui32ClientTAFenceValue;
+		PRGXFWIF_UFO_ADDR *psTmpClientTAUpdateUFOAddress = pauiClientTAUpdateUFOAddress;
+		IMG_UINT32 *pui32TmpClientTAUpdateValue = paui32ClientTAUpdateValue;
+		PRGXFWIF_UFO_ADDR *psTmpClient3DFenceUFOAddress = pauiClient3DFenceUFOAddress;
+		IMG_UINT32 *pui32TmpClient3DFenceValue = paui32Client3DFenceValue;
+		PRGXFWIF_UFO_ADDR *psTmpClient3DUpdateUFOAddress = pauiClient3DUpdateUFOAddress;
+		IMG_UINT32 *pui32TmpClient3DUpdateValue = paui32Client3DUpdateValue;
+
+		PVR_DPF((PVR_DBG_ERROR, "%s: ~~~ After appending sync checkpoints ", __FUNCTION__));
+
+		/* Dump Fence syncs, Update syncs and PR Update syncs */
+		PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA fence syncs:", __FUNCTION__, ui32ClientTAFenceCount));
+		for (ii=0; ii<ui32ClientTAFenceCount; ii++)
+		{
+			if (psTmpClientTAFenceUFOAddress->ui32Addr & 0x1)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32ClientTAFenceCount, (void*)psTmpClientTAFenceUFOAddress, psTmpClientTAFenceUFOAddress->ui32Addr));
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __FUNCTION__, ii+1, ui32ClientTAFenceCount, (void*)psTmpClientTAFenceUFOAddress, psTmpClientTAFenceUFOAddress->ui32Addr, *pui32TmpClientTAFenceValue, *pui32TmpClientTAFenceValue));
+				pui32TmpClientTAFenceValue++;
+			}
+			psTmpClientTAFenceUFOAddress++;
+		}
+		PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA update syncs:", __FUNCTION__, ui32ClientTAUpdateCount));
+		for (ii=0; ii<ui32ClientTAUpdateCount; ii++)
+		{
+			if (psTmpClientTAUpdateUFOAddress->ui32Addr & 0x1)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32ClientTAUpdateCount, (void*)psTmpClientTAUpdateUFOAddress, psTmpClientTAUpdateUFOAddress->ui32Addr));
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)", __FUNCTION__, ii+1, ui32ClientTAUpdateCount, (void*)psTmpClientTAUpdateUFOAddress, psTmpClientTAUpdateUFOAddress->ui32Addr, *pui32TmpClientTAUpdateValue, *pui32TmpClientTAUpdateValue));
+				pui32TmpClientTAUpdateValue++;
+			}
+			psTmpClientTAUpdateUFOAddress++;
+		}
+		PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D fence syncs:", __FUNCTION__, ui32Client3DFenceCount));
+		for (ii=0; ii<ui32Client3DFenceCount; ii++)
+		{
+			if (psTmpClient3DFenceUFOAddress->ui32Addr & 0x1)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32Client3DFenceCount, (void*)psTmpClient3DFenceUFOAddress, psTmpClient3DFenceUFOAddress->ui32Addr));
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __FUNCTION__, ii+1, ui32Client3DFenceCount, (void*)psTmpClient3DFenceUFOAddress, psTmpClient3DFenceUFOAddress->ui32Addr, *pui32TmpClient3DFenceValue, *pui32TmpClient3DFenceValue));
+				pui32TmpClient3DFenceValue++;
+			}
+			psTmpClient3DFenceUFOAddress++;
+		}
+		PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D update syncs:", __FUNCTION__, ui32Client3DUpdateCount));
+		for (ii=0; ii<ui32Client3DUpdateCount; ii++)
+		{
+			if (psTmpClient3DUpdateUFOAddress->ui32Addr & 0x1)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32Client3DUpdateCount, (void*)psTmpClient3DUpdateUFOAddress, psTmpClient3DUpdateUFOAddress->ui32Addr));
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)", __FUNCTION__, ii+1, ui32Client3DUpdateCount, (void*)psTmpClient3DUpdateUFOAddress, psTmpClient3DUpdateUFOAddress->ui32Addr, *pui32TmpClient3DUpdateValue, *pui32TmpClient3DUpdateValue));
+				pui32TmpClient3DUpdateValue++;
+			}
+			psTmpClient3DUpdateUFOAddress++;
+		}
+	}
+#endif
+
+	/* Init and acquire to TA command if required */
+	if(bKickTA)
+	{
+		RGX_SERVER_RC_TA_DATA *psTAData = &psRenderContext->sTAData;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		/* Prepare workload estimation */
+		WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
+		               &(psRenderContext->sWorkEstData),
+		               &(psRenderContext->sWorkEstData.sWorkloadMatchingDataTA),
+		               ui32RenderTargetSize,
+		               ui32NumberOfDrawCalls,
+		               ui32NumberOfIndices,
+		               ui32NumberOfMRTs,
+		               ui64DeadlineInus,
+		               &sWorkloadKickDataTA);
+#endif
+
+		/* Init the TA command helper */
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   calling RGXCmdHelperInitCmdCCB(), ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d", __FUNCTION__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount));
+		eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psTAData->psServerCommonContext),
+		                                ui32ClientTAFenceCount,
+		                                pauiClientTAFenceUFOAddress,
+		                                paui32ClientTAFenceValue,
+		                                ui32ClientTAUpdateCount,
+		                                pauiClientTAUpdateUFOAddress,
+		                                paui32ClientTAUpdateValue,
+		                                ui32ServerTASyncPrims,
+		                                paui32ServerTASyncFlags,
+		                                SYNC_FLAG_MASK_ALL,
+		                                pasServerTASyncs,
+		                                ui32TACmdSize,
+		                                pui8TADMCmd,
+		                                & pPreAddr,
+		                                (bKick3D ? NULL : & pPostAddr),
+		                                (bKick3D ? NULL : & pRMWUFOAddr),
+		                                RGXFWIF_CCB_CMD_TYPE_TA,
+		                                ui32ExtJobRef,
+		                                ui32JobId,
+		                                ui32PDumpFlags,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		                                &sWorkloadKickDataTA,
+#else
+										NULL,
+#endif
+		                                "TA",
+		                                pasTACmdHelperData,
+										sRobustnessResetReason);
+		if (eError != PVRSRV_OK)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __FUNCTION__, eError));
+			goto fail_tacmdinit;
+		}
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		/* The following is used to determine the offset of the command header
+		 * containing the workload estimation data so that can be accessed when
+		 * the KCCB is read.
+		 */
+		ui32TACmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(pasTACmdHelperData);
+#endif
+
+		eError = RGXCmdHelperAcquireCmdCCB(CCB_CMD_HELPER_NUM_TA_COMMANDS,
+		                                   pasTACmdHelperData);
+		if (eError != PVRSRV_OK)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __FUNCTION__, eError));
+			goto fail_taacquirecmd;
+		}
+		else
+		{
+			ui32TACmdCount++;
+		}
+	}
+
+	/* Only kick the 3D if required */
+	if (bKickPR)
+	{
+		RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData;
+
+		/*
+			The command helper doesn't know about the PR fence so create
+			the command with all the fences against it and later create
+			the PR command itself which _must_ come after the PR fence.
+		*/
+		sPRUFO.puiAddrUFO = uiPRFenceUFOAddress;
+		sPRUFO.ui32Value = ui32PRFenceValue;
+
+		/* Init the PR fence command helper */
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   calling RGXCmdHelperInitCmdCCB(), ui32Client3DFenceCount=%d", __FUNCTION__, ui32Client3DFenceCount));
+		eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+										ui32Client3DFenceCount,
+										pauiClient3DFenceUFOAddress,
+										paui32Client3DFenceValue,
+										0,
+										NULL,
+										NULL,
+										(bKick3D ? ui32Server3DSyncPrims : 0),
+										paui32Server3DSyncFlags,
+										PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK,
+										pasServer3DSyncs,
+										sizeof(sPRUFO),
+										(IMG_UINT8*) &sPRUFO,
+										NULL,
+										NULL,
+										NULL,
+										RGXFWIF_CCB_CMD_TYPE_FENCE_PR,
+										ui32ExtJobRef,
+										ui32JobId,
+										ui32PDumpFlags,
+										NULL,
+										"3D-PR-Fence",
+										&pas3DCmdHelperData[ui323DCmdCount++],
+										sRobustnessResetReason);
+		if (eError != PVRSRV_OK)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __FUNCTION__, eError));
+			goto fail_prfencecmdinit;
+		}
+
+		/* Init the 3D PR command helper */
+		/*
+			Updates for Android (fence sync and Timeline sync prim) are provided in the PR-update
+			if no 3D is present. This is so the timeline update cannot happen out of order with any
+			other 3D already in flight for the same timeline (PR-updates are done in the 3D cCCB).
+			This out of order timeline sync prim update could happen if we attach it to the TA update.
+		*/
+		if (ui32ClientPRUpdateCount)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: Line %d, ui32ClientPRUpdateCount=%d, pauiClientPRUpdateUFOAddress=%d, paui32ClientPRUpdateValue=%d", __FUNCTION__, __LINE__,
+					 ui32ClientPRUpdateCount, pauiClientPRUpdateUFOAddress->ui32Addr, *paui32ClientPRUpdateValue));
+		}
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   calling RGXCmdHelperInitCmdCCB(), ui32ClientPRUpdateCount=%d", __FUNCTION__, ui32ClientPRUpdateCount));
+		eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+										0,
+										NULL,
+										NULL,
+										ui32ClientPRUpdateCount,
+										pauiClientPRUpdateUFOAddress,
+										paui32ClientPRUpdateValue,
+										0,
+										NULL,
+										SYNC_FLAG_MASK_ALL,
+										NULL,
+										ui323DPRCmdSize,
+										pui83DPRDMCmd,
+										NULL,
+										NULL,
+										NULL,
+										RGXFWIF_CCB_CMD_TYPE_3D_PR,
+										ui32ExtJobRef,
+										ui32JobId,
+										ui32PDumpFlags,
+										NULL,
+										"3D-PR",
+										&pas3DCmdHelperData[ui323DCmdCount++],
+										sRobustnessResetReason);
+		if (eError != PVRSRV_OK)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __FUNCTION__, eError));
+			goto fail_prcmdinit;
+		}
+	}
+
+	if (bKick3D || bAbort)
+	{
+		RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		/* Prepare workload estimation */
+		WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
+		               &(psRenderContext->sWorkEstData),
+		               &(psRenderContext->sWorkEstData.sWorkloadMatchingData3D),
+		               ui32RenderTargetSize,
+		               ui32NumberOfDrawCalls,
+		               ui32NumberOfIndices,
+		               ui32NumberOfMRTs,
+		               ui64DeadlineInus,
+		               &sWorkloadKickData3D);
+#endif
+		/* Init the 3D command helper */
+		eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+										0,
+										NULL,
+										NULL,
+										ui32Client3DUpdateCount,
+										pauiClient3DUpdateUFOAddress,
+										paui32Client3DUpdateValue,
+										ui32Server3DSyncPrims,
+										paui32Server3DSyncFlags,
+										PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE,
+										pasServer3DSyncs,
+										ui323DCmdSize,
+										pui83DDMCmd,
+										(bKickTA ? NULL : & pPreAddr),
+										& pPostAddr,
+										& pRMWUFOAddr,
+										RGXFWIF_CCB_CMD_TYPE_3D,
+										ui32ExtJobRef,
+										ui32JobId,
+										ui32PDumpFlags,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+										&sWorkloadKickData3D,
+#else
+										NULL,
+#endif
+										"3D",
+										&pas3DCmdHelperData[ui323DCmdCount++],
+										sRobustnessResetReason);
+		if (eError != PVRSRV_OK)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __FUNCTION__, eError));
+			goto fail_3dcmdinit;
+		}
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		/* The following are used to determine the offset of the command header
+		 * containing the workload estimation data so that can be accessed when
+		 * the KCCB is read.
+		 */
+		ui323DCmdHeaderOffset =
+			RGXCmdHelperGetDMCommandHeaderOffset(&pas3DCmdHelperData[ui323DCmdCount - 1]);
+		ui323DFullRenderCommandOffset =
+			RGXCmdHelperGetCommandOffset(pas3DCmdHelperData,
+			                             ui323DCmdCount - 1);
+#endif
+	}
+
+	/* Protect against array overflow in RGXCmdHelperAcquireCmdCCB() */
+	if (ui323DCmdCount > CCB_CMD_HELPER_NUM_3D_COMMANDS)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __FUNCTION__, eError));
+		goto fail_3dcmdinit;
+	}
+
+	if (ui323DCmdCount)
+	{
+		PVR_ASSERT(bKickPR || bKick3D);
+
+		/* Acquire space for all the 3D command(s) */
+		eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount,
+										   pas3DCmdHelperData);
+		if (eError != PVRSRV_OK)
+		{
+			/* If RGXCmdHelperAcquireCmdCCB fails we skip the scheduling
+			 * of a new TA command with the same Write offset in Kernel CCB.
+			 */
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __FUNCTION__, eError));
+			goto fail_3dacquirecmd;
+		}
+	}
+
+	/*
+		We should acquire the space in the kernel CCB here as after this point
+		we release the commands which will take operations on server syncs
+		which can't be undone
+	*/
+
+	/*
+		Everything is ready to go now, release the commands
+	*/
+	if (ui32TACmdCount)
+	{
+		ui32TACmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
+		RGXCmdHelperReleaseCmdCCB(ui32TACmdCount,
+								  pasTACmdHelperData,
+								  "TA",
+								  FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		ui32TACmdOffsetWrapCheck =
+			RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
+
+		/* This checks if the command would wrap around at the end of the CCB
+		 * and therefore would start at an offset of 0 rather than the current
+		 * command offset.
+		 */
+		if(ui32TACmdOffset < ui32TACmdOffsetWrapCheck)
+		{
+			ui32TACommandOffset = ui32TACmdOffset;
+		}
+		else
+		{
+			ui32TACommandOffset = 0;
+		}
+#endif
+	}
+
+	if (ui323DCmdCount)
+	{
+		ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
+		RGXCmdHelperReleaseCmdCCB(ui323DCmdCount,
+								  pas3DCmdHelperData,
+								  "3D",
+								  FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr);
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		ui323DCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
+
+		if(ui323DCmdOffset < ui323DCmdOffsetWrapCheck)
+		{
+			ui323DCommandOffset = ui323DCmdOffset;
+		}
+		else
+		{
+			ui323DCommandOffset = 0;
+		}
+#endif
+	}
+
+	if (ui32TACmdCount)
+	{
+		RGXFWIF_KCCB_CMD sTAKCCBCmd;
+		IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr;
+
+		/* Construct the kernel TA CCB command. */
+		sTAKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+		sTAKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext);
+		sTAKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
+
+		/* Add the Workload data into the KCCB kick */
+		sTAKCCBCmd.uCmdData.sCmdKickData.sWorkloadDataFWAddress.ui32Addr = 0;
+		sTAKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		/* Store the offset to the CCCB command header so that it can be
+		 * referenced when the KCCB command reaches the FW
+		 */
+		sTAKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset =
+			ui32TACommandOffset + ui32TACmdHeaderOffset;
+#endif
+
+		if(bCommitRefCountsTA)
+		{
+			AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &sTAKCCBCmd.uCmdData.sCmdKickData.apsCleanupCtl,
+										&sTAKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl,
+										RGXFWIF_DM_TA,
+										bKickTA,
+										psRTDataCleanup,
+										psZBuffer,
+										psSBuffer,
+										psMSAAScratchBuffer);
+			*pbCommittedRefCountsTA = IMG_TRUE;
+		}
+		else
+		{
+			sTAKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+		}
+
+		HTBLOGK(HTB_SF_MAIN_KICK_TA,
+				sTAKCCBCmd.uCmdData.sCmdKickData.psContext,
+				ui32TACmdOffset
+				);
+
+		RGX_HWPERF_HOST_ENQ(psRenderContext,
+		                    OSGetCurrentClientProcessIDKM(),
+		                    ui32FWCtx,
+		                    ui32ExtJobRef,
+		                    ui32JobId,
+		                    RGX_HWPERF_KICK_TYPE_TA,
+		                    uiCheckFenceUID,
+		                    uiUpdateFenceUID,
+		                    ui64DeadlineInus,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		                    sWorkloadKickDataTA.ui64CyclesPrediction
+#else
+		                    NO_CYCEST
+#endif
+		                    );
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice,
+										RGXFWIF_DM_TA,
+										&sTAKCCBCmd,
+										sizeof(sTAKCCBCmd),
+										ui32ClientCacheOpSeqNum,
+										ui32PDumpFlags);
+			if (eError2 != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+		RGXHWPerfFTraceGPUEnqueueEvent(psRenderContext->psDeviceNode->pvDevice,
+					ui32FWCtx, ui32JobId, RGX_HWPERF_KICK_TYPE_TA3D);
+#endif
+	}
+
+	if (ui323DCmdCount)
+	{
+		RGXFWIF_KCCB_CMD s3DKCCBCmd;
+		IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr;
+
+		/* Construct the kernel 3D CCB command. */
+		s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+		s3DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext);
+		s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
+
+		/* Add the Workload data into the KCCB kick */
+		s3DKCCBCmd.uCmdData.sCmdKickData.sWorkloadDataFWAddress.ui32Addr = 0;
+		s3DKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		/* Store the offset to the CCCB command header so that it can be
+		 * referenced when the KCCB command reaches the FW
+		 */
+		s3DKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui323DCommandOffset + ui323DCmdHeaderOffset + ui323DFullRenderCommandOffset;
+#endif
+
+		if(bCommitRefCounts3D)
+		{
+			AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &s3DKCCBCmd.uCmdData.sCmdKickData.apsCleanupCtl,
+											&s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl,
+											RGXFWIF_DM_3D,
+											bKick3D,
+											psRTDataCleanup,
+											psZBuffer,
+											psSBuffer,
+											psMSAAScratchBuffer);
+			*pbCommittedRefCounts3D = IMG_TRUE;
+		}
+		else
+		{
+			s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+		}
+
+
+		HTBLOGK(HTB_SF_MAIN_KICK_3D,
+				s3DKCCBCmd.uCmdData.sCmdKickData.psContext,
+				ui323DCmdOffset);
+
+		RGX_HWPERF_HOST_ENQ(psRenderContext,
+		                    OSGetCurrentClientProcessIDKM(),
+		                    ui32FWCtx,
+		                    ui32ExtJobRef,
+		                    ui32JobId,
+		                    RGX_HWPERF_KICK_TYPE_3D,
+		                    uiCheckFenceUID,
+		                    uiUpdateFenceUID,
+		                    ui64DeadlineInus,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+		                    sWorkloadKickData3D.ui64CyclesPrediction
+#else
+		                    NO_CYCEST
+#endif
+		                    );
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice,
+										RGXFWIF_DM_3D,
+										&s3DKCCBCmd,
+										sizeof(s3DKCCBCmd),
+										ui32ClientCacheOpSeqNum,
+										ui32PDumpFlags);
+			if (eError2 != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+	}
+
+	/*
+	 * Now check eError (which may have returned an error from our earlier calls
+	 * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+	 * so we check it now...
+	 */
+	if (eError != PVRSRV_OK )
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __FUNCTION__, eError));
+		goto fail_3dacquirecmd;
+	}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined (SUPPORT_FALLBACK_FENCE_SYNC)
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	if (iUpdateTimeline >= 0)
+	{
+		/* If we get here, this should never fail. Hitting that likely implies
+		 * a code error above */
+		iUpdateFence = pvr_sync_get_update_fd(psFDData);
+		if (iUpdateFence < 0)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get install update sync fd",
+				__FUNCTION__));
+			/* If we fail here, we cannot rollback the syncs as the hw already
+			 * has references to resources they may be protecting in the kick
+			 * so fallthrough */
+
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto fail_3dacquirecmd;
+		}
+	}
+#if defined(NO_HARDWARE)
+	pvr_sync_nohw_complete_fences(psFDData);
+#endif
+	pvr_sync_free_append_fences_data(psFDData);
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#if defined(NO_HARDWARE)
+	/* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+	if (psUpdateSyncCheckpoint)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __FUNCTION__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint)));
+		SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+	}
+	if (psFenceTimelineUpdateSync)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   Updating NOHW sync prim<%p> to %d", __FUNCTION__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+		SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+	}
+	SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined(NO_HARDWARE) */
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined (SUPPORT_FALLBACK_FENCE_SYNC) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	if (psAppendData)
+	{
+		pvr_buffer_sync_append_finish(psAppendData);
+	}
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	if (psBufferSyncData)
+	{
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   calling pvr_buffer_sync_kick_succeeded(psBufferSyncData=<%p>)...", __FUNCTION__, (void*)psBufferSyncData));
+		pvr_buffer_sync_kick_succeeded(psBufferSyncData);
+	}
+	if (apsBufferFenceSyncCheckpoints)
+	{
+		kfree(apsBufferFenceSyncCheckpoints);
+	}
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+	*piUpdateFence = iUpdateFence;
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+	if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_FENCE_INVALID))
+	{
+		SyncCheckpointFinaliseFence(iUpdateFence, pvUpdateFenceFinaliseData);
+	}
+#endif
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+	/* Free memory allocated to hold the internal list of update values */
+	if (pui32IntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui32IntAllocatedUpdateValues);
+	}
+#endif
+#endif
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psRenderContext->hLock);
+#endif
+	return PVRSRV_OK;
+
+fail_3dacquirecmd:
+fail_3dcmdinit:
+fail_prcmdinit:
+fail_prfencecmdinit:
+fail_taacquirecmd:
+fail_tacmdinit:
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAFence);
+	SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAUpdate);
+	SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DFence);
+	SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DUpdate);
+	if (pauiClientPRUpdateUFOAddress)
+	{
+		SyncCheckpointRollbackFromUFO(psRenderContext->psDeviceNode, pauiClientPRUpdateUFOAddress->ui32Addr);
+	}
+#endif
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+fail_alloc_update_values_mem:
+	if(iUpdateFence != PVRSRV_FENCE_INVALID)
+	{
+		SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+	}
+fail_create_output_fence:
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+fail_resolve_input_fence:
+#endif /* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined (SUPPORT_FALLBACK_FENCE_SYNC)
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	pvr_sync_rollback_append_fences(psFDData);
+	pvr_sync_free_append_fences_data(psFDData);
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+fail_fdsync:
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined (SUPPORT_FALLBACK_FENCE_SYNC) */
+#if defined(SUPPORT_BUFFER_SYNC)
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	pvr_buffer_sync_append_abort(psAppendData);
+fail_sync_append:
+#else
+	if (psBufferSyncData)
+	{
+		pvr_buffer_sync_kick_failed(psBufferSyncData);
+	}
+	if (apsBufferFenceSyncCheckpoints)
+	{
+		kfree(apsBufferFenceSyncCheckpoints);
+	}
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+err_pr_fence_address:
+err_populate_sync_addr_list_3d_update:
+err_populate_sync_addr_list_3d_fence:
+err_populate_sync_addr_list_ta_update:
+err_populate_sync_addr_list_ta_fence:
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+	/* Free memory allocated to hold the internal list of update values */
+	if (pui32IntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui32IntAllocatedUpdateValues);
+	}
+#endif
+#endif
+	PVR_ASSERT(eError != PVRSRV_OK);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psRenderContext->hLock);
+#endif
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                 PVRSRV_DEVICE_NODE * psDeviceNode,
+												 RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+												 IMG_UINT32 ui32Priority)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psRenderContext->hLock);
+#endif
+
+	if (psRenderContext->sTAData.ui32Priority != ui32Priority)
+	{
+		eError = ContextSetPriority(psRenderContext->sTAData.psServerCommonContext,
+									psConnection,
+									psRenderContext->psDeviceNode->pvDevice,
+									ui32Priority,
+									RGXFWIF_DM_TA);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the TA part of the rendercontext (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+			goto fail_tacontext;
+		}
+		psRenderContext->sTAData.ui32Priority = ui32Priority;
+	}
+
+	if (psRenderContext->s3DData.ui32Priority != ui32Priority)
+	{
+		eError = ContextSetPriority(psRenderContext->s3DData.psServerCommonContext,
+									psConnection,
+									psRenderContext->psDeviceNode->pvDevice,
+									ui32Priority,
+									RGXFWIF_DM_3D);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 3D part of the rendercontext (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+			goto fail_3dcontext;
+		}
+		psRenderContext->s3DData.ui32Priority = ui32Priority;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psRenderContext->hLock);
+#endif
+	return PVRSRV_OK;
+
+fail_3dcontext:
+fail_tacontext:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psRenderContext->hLock);
+#endif
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+/*
+ * PVRSRVRGXGetLastRenderContextResetReasonKM
+ */
+PVRSRV_ERROR PVRSRVRGXGetLastRenderContextResetReasonKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+                                                        IMG_UINT32 *peLastResetReason,
+                                                        IMG_UINT32 *pui32LastResetJobRef)
+{
+	RGX_SERVER_RC_TA_DATA         *psRenderCtxTAData;
+	RGX_SERVER_RC_3D_DATA         *psRenderCtx3DData;
+	RGX_SERVER_COMMON_CONTEXT     *psCurrentServerTACommonCtx, *psCurrentServer3DCommonCtx;
+	RGXFWIF_CONTEXT_RESET_REASON  eLastTAResetReason, eLast3DResetReason;
+	IMG_UINT32                    ui32LastTAResetJobRef, ui32Last3DResetJobRef;
+
+	PVR_ASSERT(psRenderContext != NULL);
+	PVR_ASSERT(peLastResetReason != NULL);
+	PVR_ASSERT(pui32LastResetJobRef != NULL);
+
+	psRenderCtxTAData          = &(psRenderContext->sTAData);
+	psCurrentServerTACommonCtx = psRenderCtxTAData->psServerCommonContext;
+	psRenderCtx3DData          = &(psRenderContext->s3DData);
+	psCurrentServer3DCommonCtx = psRenderCtx3DData->psServerCommonContext;
+
+	/* Get the last reset reasons from both the TA and 3D so they are reset... */
+	eLastTAResetReason = FWCommonContextGetLastResetReason(psCurrentServerTACommonCtx, &ui32LastTAResetJobRef);
+	eLast3DResetReason = FWCommonContextGetLastResetReason(psCurrentServer3DCommonCtx, &ui32Last3DResetJobRef);
+
+	/* Combine the reset reason from TA and 3D into one... */
+	*peLastResetReason    = (IMG_UINT32) eLast3DResetReason;
+	*pui32LastResetJobRef = ui32Last3DResetJobRef;
+	if (eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_NONE  ||
+	    ((eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_LOCKUP  ||
+	      eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING)  &&
+	     (eLastTAResetReason == RGXFWIF_CONTEXT_RESET_REASON_GUILTY_LOCKUP  ||
+	      eLastTAResetReason == RGXFWIF_CONTEXT_RESET_REASON_GUILTY_OVERRUNING)))
+	{
+		*peLastResetReason    = eLastTAResetReason;
+		*pui32LastResetJobRef = ui32LastTAResetJobRef;
+	}
+
+	return PVRSRV_OK;
+}
+
+
+/*
+ * PVRSRVRGXGetPartialRenderCountKM
+ */
+PVRSRV_ERROR PVRSRVRGXGetPartialRenderCountKM(DEVMEM_MEMDESC *psHWRTDataMemDesc,
+											  IMG_UINT32 *pui32NumPartialRenders)
+{
+	RGXFWIF_HWRTDATA *psHWRTData;
+	PVRSRV_ERROR eError;
+
+	eError = DevmemAcquireCpuVirtAddr(psHWRTDataMemDesc, (void **)&psHWRTData);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXGetPartialRenderCountKM: Failed to map Firmware Render Target Data (%u)", eError));
+		return eError;
+	}
+
+	*pui32NumPartialRenders = psHWRTData->ui32NumPartialRenders;
+
+	DevmemReleaseCpuVirtAddr(psHWRTDataMemDesc);
+
+	return PVRSRV_OK;
+}
+
+void CheckForStalledRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile)
+{
+	DLLIST_NODE *psNode, *psNext;
+	OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock);
+	dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx =
+			IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode);
+
+		DumpStalledFWCommonContext(psCurrentServerRenderCtx->sTAData.psServerCommonContext,
+								   pfnDumpDebugPrintf, pvDumpDebugFile);
+		DumpStalledFWCommonContext(psCurrentServerRenderCtx->s3DData.psServerCommonContext,
+								   pfnDumpDebugPrintf, pvDumpDebugFile);
+	}
+	OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	DLLIST_NODE *psNode, *psNext;
+	IMG_UINT32 ui32ContextBitMask = 0;
+
+	OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock);
+
+	dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx =
+			IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode);
+		if(NULL != psCurrentServerRenderCtx->sTAData.psServerCommonContext)
+		{
+			if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->sTAData.psServerCommonContext, RGX_KICK_TYPE_DM_TA) == PVRSRV_ERROR_CCCB_STALLED)
+			{
+				ui32ContextBitMask |= RGX_KICK_TYPE_DM_TA;
+			}
+		}
+
+		if(NULL != psCurrentServerRenderCtx->s3DData.psServerCommonContext)
+		{
+			if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_3D) == PVRSRV_ERROR_CCCB_STALLED)
+			{
+				ui32ContextBitMask |= RGX_KICK_TYPE_DM_3D;
+			}
+		}
+	}
+
+	OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock);
+	return ui32ContextBitMask;
+}
+
+/******************************************************************************
+ End of file (rgxta3d.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxta3d.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxta3d.h
new file mode 100644
index 0000000..cfc5253
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxta3d.h
@@ -0,0 +1,480 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX TA and 3D Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX TA and 3D Functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXTA3D_H__)
+#define __RGXTA3D_H__
+
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgx_fwif_shared.h"
+#include "rgx_fwif_resetframework.h"
+#include "sync_server.h"
+#include "connection_server.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+#define RGX_COMPUTE_FL_READY_PAGES(FL_PAGES, THRESHOLD) (((FL_PAGES) * THRESHOLD) / 100)
+
+typedef struct _RGX_SERVER_RENDER_CONTEXT_ RGX_SERVER_RENDER_CONTEXT;
+typedef struct _RGX_FREELIST_ RGX_FREELIST;
+typedef struct _RGX_PMR_NODE_ RGX_PMR_NODE;
+
+typedef struct {
+	PVRSRV_DEVICE_NODE		*psDeviceNode;
+	DEVMEM_MEMDESC			*psFWHWRTDataMemDesc;
+	DEVMEM_MEMDESC			*psRTACtlMemDesc;
+	DEVMEM_MEMDESC			*psRTArrayMemDesc;
+	DEVMEM_MEMDESC          	*psRendersAccArrayMemDesc;
+	RGX_FREELIST 			*apsFreeLists[RGXFW_MAX_FREELISTS];
+	PVRSRV_CLIENT_SYNC_PRIM	*psCleanupSync;
+} RGX_RTDATA_CLEANUP_DATA;
+
+struct _RGX_FREELIST_ {
+	PVRSRV_RGXDEV_INFO 		*psDevInfo;
+
+	/* Free list PMR */
+	PMR						*psFreeListPMR;
+	IMG_DEVMEM_OFFSET_T		uiFreeListPMROffset;
+
+	/* Freelist config */
+	IMG_UINT32				ui32MaxFLPages;
+	IMG_UINT32				ui32InitFLPages;
+	IMG_UINT32				ui32CurrentFLPages;
+	IMG_UINT32				ui32GrowFLPages;
+	IMG_UINT32              ui32ReadyFLPages;
+	IMG_UINT32              ui32GrowThreshold;      /* Percentage of FL memory used that should trigger a new grow request */
+	IMG_UINT32				ui32FreelistID;
+	IMG_UINT32				ui32FreelistGlobalID;	/* related global freelist for this freelist */
+	IMG_UINT64				ui64FreelistChecksum;	/* checksum over freelist content */
+	IMG_BOOL				bCheckFreelist;			/* freelist check enabled */
+	IMG_UINT32				ui32RefCount;			/* freelist reference counting */
+
+	IMG_UINT32				ui32NumGrowReqByApp;	/* Total number of grow requests by Application*/
+	IMG_UINT32				ui32NumGrowReqByFW;		/* Total Number of grow requests by Firmware */
+	IMG_UINT32				ui32NumHighPages;		/* High Mark of pages in the freelist */
+
+	IMG_PID					ownerPid;			/* Pid of the owner of the list */
+
+	/* Memory Blocks */
+	DLLIST_NODE				sMemoryBlockHead;
+	DLLIST_NODE				sMemoryBlockInitHead;
+	DLLIST_NODE				sNode;
+
+	/* FW data structures */
+	DEVMEM_MEMDESC			*psFWFreelistMemDesc;
+	RGXFWIF_DEV_VIRTADDR	sFreeListFWDevVAddr;
+
+	PVRSRV_CLIENT_SYNC_PRIM	*psCleanupSync;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	HASH_TABLE*				psWorkloadHashTable;
+#endif
+} ;
+
+struct _RGX_PMR_NODE_ {
+	RGX_FREELIST			*psFreeList;
+	PMR						*psPMR;
+	PMR_PAGELIST 			*psPageList;
+	DLLIST_NODE				sMemoryBlock;
+	IMG_UINT32				ui32NumPages;
+	IMG_BOOL				bInternal;
+#if defined(PVR_RI_DEBUG)
+	RI_HANDLE				hRIHandle;
+#endif
+} ;
+
+typedef struct {
+	PVRSRV_DEVICE_NODE		*psDeviceNode;
+	DEVMEM_MEMDESC			*psRenderTargetMemDesc;
+} RGX_RT_CLEANUP_DATA;
+
+typedef struct {
+	PVRSRV_RGXDEV_INFO		*psDevInfo;
+	DEVMEM_MEMDESC			*psZSBufferMemDesc;
+	RGXFWIF_DEV_VIRTADDR	sZSBufferFWDevVAddr;
+
+	DEVMEMINT_RESERVATION 	*psReservation;
+	PMR 					*psPMR;
+	DEVMEMINT_MAPPING 		*psMapping;
+	PVRSRV_MEMALLOCFLAGS_T 	uiMapFlags;
+	IMG_UINT32 				ui32ZSBufferID;
+	IMG_UINT32 				ui32RefCount;
+	IMG_BOOL				bOnDemand;
+
+	IMG_BOOL				ui32NumReqByApp;		/* Number of Backing Requests from  Application */
+	IMG_BOOL				ui32NumReqByFW;			/* Number of Backing Requests from Firmware */
+
+	IMG_PID					owner;
+
+	DLLIST_NODE	sNode;
+
+	PVRSRV_CLIENT_SYNC_PRIM	*psCleanupSync;
+}RGX_ZSBUFFER_DATA;
+
+typedef struct {
+	RGX_ZSBUFFER_DATA		*psZSBuffer;
+} RGX_POPULATION;
+
+/* Dump the physical pages of a freelist */
+IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList);
+
+
+/* Create HWRTDataSet */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateHWRTData(CONNECTION_DATA      *psConnection,
+                               PVRSRV_DEVICE_NODE	*psDeviceNode, 
+							   IMG_UINT32			psRenderTarget,
+							   IMG_DEV_VIRTADDR		psPMMListDevVAddr,
+							   RGX_FREELIST			*apsFreeLists[RGXFW_MAX_FREELISTS],
+							   RGX_RTDATA_CLEANUP_DATA	**ppsCleanupData,
+							   DEVMEM_MEMDESC			**ppsRTACtlMemDesc,
+							   IMG_UINT32           ui32PPPScreen,
+							   IMG_UINT32           ui32PPPGridOffset,
+							   IMG_UINT64           ui64PPPMultiSampleCtl,
+							   IMG_UINT32           ui32TPCStride,
+							   IMG_DEV_VIRTADDR		sTailPtrsDevVAddr,
+							   IMG_UINT32           ui32TPCSize,
+							   IMG_UINT32           ui32TEScreen,
+							   IMG_UINT32           ui32TEAA,
+							   IMG_UINT32           ui32TEMTILE1,
+							   IMG_UINT32           ui32TEMTILE2,
+							   IMG_UINT32           ui32MTileStride,
+							   IMG_UINT32                 ui32ISPMergeLowerX,
+							   IMG_UINT32                 ui32ISPMergeLowerY,
+							   IMG_UINT32                 ui32ISPMergeUpperX,
+							   IMG_UINT32                 ui32ISPMergeUpperY,
+							   IMG_UINT32                 ui32ISPMergeScaleX,
+							   IMG_UINT32                 ui32ISPMergeScaleY,
+							   IMG_UINT16			ui16MaxRTs,
+							   DEVMEM_MEMDESC		**psMemDesc,
+							   IMG_UINT32			*puiHWRTData);
+
+/* Destroy HWRTData */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyHWRTData(RGX_RTDATA_CLEANUP_DATA *psCleanupData);
+
+/* Create Render Target */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateRenderTarget(CONNECTION_DATA      *psConnection,
+                                   PVRSRV_DEVICE_NODE	*psDeviceNode,
+								   IMG_DEV_VIRTADDR		psVHeapTableDevVAddr,
+								   RGX_RT_CLEANUP_DATA	**ppsCleanupData,
+								   IMG_UINT32			*sRenderTargetFWDevVAddr);
+
+/* Destroy render target */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyRenderTarget(RGX_RT_CLEANUP_DATA *psCleanupData);
+
+
+/*
+	RGXCreateZSBuffer
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection,
+                                 PVRSRV_DEVICE_NODE	* psDeviceNode,
+                                 DEVMEMINT_RESERVATION 	*psReservation,
+                                 PMR 					*psPMR,
+                                 PVRSRV_MEMALLOCFLAGS_T 	uiMapFlags,
+                                 RGX_ZSBUFFER_DATA		 	**ppsZSBuffer,
+                                 IMG_UINT32					*sRenderTargetFWDevVAddr);
+
+/*
+	RGXDestroyZSBuffer
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+
+/*
+ * RGXBackingZSBuffer()
+ *
+ * Backs ZS-Buffer with physical pages
+ */
+PVRSRV_ERROR
+RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+/*
+ * RGXPopulateZSBufferKM()
+ *
+ * Backs ZS-Buffer with physical pages (called by Bridge calls)
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer,
+									RGX_POPULATION **ppsPopulation);
+
+/*
+ * RGXUnbackingZSBuffer()
+ *
+ * Frees ZS-Buffer's physical pages
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+/*
+ * RGXUnpopulateZSBufferKM()
+ *
+ * Frees ZS-Buffer's physical pages (called by Bridge calls )
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation);
+
+/*
+	RGXProcessRequestZSBufferBacking
+*/
+IMG_EXPORT
+void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+									  IMG_UINT32 ui32ZSBufferID);
+
+/*
+	RGXProcessRequestZSBufferUnbacking
+*/
+IMG_EXPORT
+void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+										IMG_UINT32 ui32ZSBufferID);
+
+/*
+	RGXGrowFreeList
+*/
+IMG_INTERNAL
+PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList,
+                             IMG_UINT32 ui32NumPages,
+                             PDLLIST_NODE pListHeader,
+                             IMG_BOOL bForCreate);
+
+/* Create free list */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA      *psConnection,
+                               PVRSRV_DEVICE_NODE	*psDeviceNode, 
+							   IMG_UINT32			ui32MaxFLPages,
+							   IMG_UINT32			ui32InitFLPages,
+							   IMG_UINT32			ui32GrowFLPages,
+                               IMG_UINT32           ui32GrowParamThreshold,
+							   RGX_FREELIST			*psGlobalFreeList,
+							   IMG_BOOL				bCheckFreelist,
+							   IMG_DEV_VIRTADDR		sFreeListDevVAddr,
+							   PMR					*psFreeListPMR,
+							   IMG_DEVMEM_OFFSET_T	uiFreeListPMROffset,
+							   RGX_FREELIST			**ppsFreeList);
+
+/* Destroy free list */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList);
+
+/*
+	RGXProcessRequestGrow
+*/
+IMG_EXPORT
+void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+						   IMG_UINT32 ui32FreelistID);
+
+
+/* Grow free list */
+IMG_EXPORT
+PVRSRV_ERROR RGXAddBlockToFreeListKM(RGX_FREELIST *psFreeList,
+                                     IMG_UINT32 ui32NumPages);
+
+/* Shrink free list */
+IMG_EXPORT
+PVRSRV_ERROR RGXRemoveBlockFromFreeListKM(RGX_FREELIST *psFreeList);
+
+
+/* Reconstruct free list after Hardware Recovery */
+void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo,
+											  IMG_UINT32 ui32FreelistsCount,
+											  IMG_UINT32 *paui32Freelists);
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXCreateRenderContextKM
+
+ @Description
+	Server-side implementation of RGXCreateRenderContext
+
+ @Input pvDeviceNode - device node
+ @Input psTACCBMemDesc - TA CCB Memory descriptor
+ @Input psTACCBCtlMemDesc - TA CCB Ctrl Memory descriptor
+ @Input ps3DCCBMemDesc - 3D CCB Memory descriptor
+ @Input ps3DCCBCtlMemDesc - 3D CCB Ctrl Memory descriptor
+ @Input ui32Priority - context priority
+ @Input psVDMStackPointer - VDM call stack device virtual address
+ @Input ui32FrameworkRegisterSize - framework register size
+ @Input pbyFrameworkRegisters - ptr to framework register
+ @Input hMemCtxPrivData - memory context private data
+ @Output ppsCleanupData - clean up data
+ @Output ppsFWRenderContextMemDesc - firmware render context memory descriptor
+ @Output ppsFWContextStateMemDesc - firmware context state memory descriptor
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA				*psConnection,
+											PVRSRV_DEVICE_NODE			*psDeviceNode,
+											IMG_UINT32					ui32Priority,
+											IMG_DEV_VIRTADDR			sVDMCallStackAddr,
+											IMG_UINT32					ui32FrameworkCommandSize,
+											IMG_PBYTE					pabyFrameworkCommand,
+											IMG_HANDLE					hMemCtxPrivData,
+											RGX_SERVER_RENDER_CONTEXT	**ppsRenderContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXDestroyRenderContextKM
+
+ @Description
+	Server-side implementation of RGXDestroyRenderContext
+
+ @Input psCleanupData - clean up data
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXKickTA3DKM
+
+ @Description
+	Server-side implementation of RGXKickTA3D
+
+ @Input psRTDataCleanup - RT data associated with the kick (or NULL)
+ @Input psZBuffer - Z-buffer associated with the kick (or NULL)
+ @Input psSBuffer - S-buffer associated with the kick (or NULL)
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT	*psRenderContext,
+								 IMG_UINT32					ui32ClientCacheOpSeqNum,
+								 IMG_UINT32					ui32ClientTAFenceCount,
+								 SYNC_PRIMITIVE_BLOCK				**apsClientTAFenceSyncPrimBlock,
+								 IMG_UINT32					*paui32ClientTAFenceSyncOffset,
+								 IMG_UINT32					*paui32ClientTAFenceValue,
+								 IMG_UINT32					ui32ClientTAUpdateCount,
+								 SYNC_PRIMITIVE_BLOCK				**apsClientUpdateSyncPrimBlock,
+								 IMG_UINT32					*paui32ClientUpdateSyncOffset,
+								 IMG_UINT32					*paui32ClientTAUpdateValue,
+								 IMG_UINT32					ui32ServerTASyncPrims,
+								 IMG_UINT32					*paui32ServerTASyncFlags,
+								 SERVER_SYNC_PRIMITIVE 		**pasServerTASyncs,
+								 IMG_UINT32					ui32Client3DFenceCount,
+								 SYNC_PRIMITIVE_BLOCK				**apsClient3DFenceSyncPrimBlock,
+								 IMG_UINT32					*pauiClient3DFenceSyncOffset,
+								 IMG_UINT32					*paui32Client3DFenceValue,
+								 IMG_UINT32					ui32Client3DUpdateCount,
+								 SYNC_PRIMITIVE_BLOCK				**apsClient3DUpdateSyncPrimBlock,
+								 IMG_UINT32					*paui32Client3DUpdateSyncOffset,
+								 IMG_UINT32					*paui32Client3DUpdateValue,
+								 IMG_UINT32					ui32Server3DSyncPrims,
+								 IMG_UINT32					*paui32Server3DSyncFlags,
+								 SERVER_SYNC_PRIMITIVE 		**pasServer3DSyncs,
+								 SYNC_PRIMITIVE_BLOCK				*psPRSyncPrimBlock,
+								 IMG_UINT32					ui32PRSyncOffset,
+								 IMG_UINT32					ui32PRFenceValue,
+								 PVRSRV_FENCE				iCheckFence,
+								 PVRSRV_TIMELINE			iUpdateTimeline,
+								 PVRSRV_FENCE				*piUpdateFence,
+								 IMG_CHAR					szFenceName[32],
+								 IMG_UINT32					ui32TACmdSize,
+								 IMG_PBYTE					pui8TADMCmd,
+								 IMG_UINT32					ui323DPRCmdSize,
+								 IMG_PBYTE					pui83DPRDMCmd,
+								 IMG_UINT32					ui323DCmdSize,
+								 IMG_PBYTE					pui83DDMCmd,
+								 IMG_UINT32					ui32ExtJobRef,
+								 IMG_BOOL					bLastTAInScene,
+								 IMG_BOOL					bKickTA,
+								 IMG_BOOL					bKickPR,
+								 IMG_BOOL					bKick3D,
+								 IMG_BOOL					bAbort,
+								 IMG_UINT32					ui32PDumpFlags,
+								 RGX_RTDATA_CLEANUP_DATA	*psRTDataCleanup,
+								 RGX_ZSBUFFER_DATA			*psZBuffer,
+								 RGX_ZSBUFFER_DATA			*psSBuffer,
+								 RGX_ZSBUFFER_DATA			*psMSAAScratchBuffer,
+								 IMG_BOOL					bCommitRefCountsTA,
+								 IMG_BOOL					bCommitRefCounts3D,
+								 IMG_BOOL					*pbCommittedRefCountsTA,
+								 IMG_BOOL					*pbCommittedRefCounts3D,
+								 IMG_UINT32					ui32SyncPMRCount,
+								 IMG_UINT32					*paui32SyncPMRFlags,
+								 PMR						**ppsSyncPMRs,
+								 IMG_UINT32					ui32RenderTargetSize,
+								 IMG_UINT32					ui32NumberOfDrawCalls,
+								 IMG_UINT32					ui32NumberOfIndices,
+								 IMG_UINT32					ui32NumberOfMRTs,
+								 IMG_UINT64					ui64DeadlineInus,
+								 IMG_DEV_VIRTADDR			sRobustnessResetReason);
+
+
+PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                 PVRSRV_DEVICE_NODE * psDevNode,
+												 RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+												 IMG_UINT32 ui32Priority);
+
+PVRSRV_ERROR PVRSRVRGXGetLastRenderContextResetReasonKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+                                                        IMG_UINT32 *peLastResetReason,
+                                                        IMG_UINT32 *pui32LastResetJobRef);
+
+PVRSRV_ERROR PVRSRVRGXGetPartialRenderCountKM(DEVMEM_MEMDESC *psHWRTDataMemDesc,
+											  IMG_UINT32 *pui32NumPartialRenders);
+
+/* Debug - check if render context is waiting on a fence */
+void CheckForStalledRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile);
+
+/* Debug/Watchdog - check if client contexts are stalled */
+IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* __RGXTA3D_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtdmtransfer.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtdmtransfer.c
new file mode 100644
index 0000000..cbeabbd
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtdmtransfer.c
@@ -0,0 +1,1227 @@
+/*************************************************************************/ /*!
+@File           rgxtdmtransfer.c
+@Title          Device specific TDM transfer queue routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pdump_km.h"
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxtdmtransfer.h"
+#include "rgx_tq_shared.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgx_memallocflags.h"
+#include "rgxtimerquery.h"
+#include "rgxhwperf.h"
+#include "htbuffer.h"
+
+#include "pdump_km.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_TDM_UFO_DUMP	0
+
+//#define TDM_CHECKPOINT_DEBUG 1
+
+#if defined(TDM_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+typedef struct {
+	RGX_SERVER_COMMON_CONTEXT * psServerCommonContext;
+	IMG_UINT32                  ui32Priority;
+} RGX_SERVER_TQ_TDM_DATA;
+
+
+struct _RGX_SERVER_TQ_TDM_CONTEXT_ {
+	PVRSRV_DEVICE_NODE      *psDeviceNode;
+	DEVMEM_MEMDESC          *psFWFrameworkMemDesc;
+	IMG_UINT32              ui32Flags;
+	RGX_SERVER_TQ_TDM_DATA  sTDMData;
+	PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+	DLLIST_NODE             sListNode;
+	SYNC_ADDR_LIST          sSyncAddrListFence;
+	SYNC_ADDR_LIST          sSyncAddrListUpdate;
+	ATOMIC_T                hJobId;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	POS_LOCK		hLock;
+#endif
+};
+
+static PVRSRV_ERROR _CreateTDMTransferContext(
+	CONNECTION_DATA         * psConnection,
+	PVRSRV_DEVICE_NODE      * psDeviceNode,
+	DEVMEM_MEMDESC          * psFWMemContextMemDesc,
+	IMG_UINT32                ui32Priority,
+	RGX_COMMON_CONTEXT_INFO * psInfo,
+	RGX_SERVER_TQ_TDM_DATA  * psTDMData)
+{
+	PVRSRV_ERROR eError;
+
+	eError = FWCommonContextAllocate(
+		psConnection,
+		psDeviceNode,
+		REQ_TYPE_TQ_TDM,
+		RGXFWIF_DM_TDM,
+		NULL,
+		0,
+		psFWMemContextMemDesc,
+		NULL,
+		RGX_TQ2D_CCB_SIZE_LOG2,
+		ui32Priority,
+		psInfo,
+		&psTDMData->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_contextalloc;
+	}
+
+	psTDMData->ui32Priority = ui32Priority;
+	return PVRSRV_OK;
+
+fail_contextalloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+static PVRSRV_ERROR _DestroyTDMTransferContext(
+	RGX_SERVER_TQ_TDM_DATA  * psTDMData,
+	PVRSRV_DEVICE_NODE      * psDeviceNode,
+	PVRSRV_CLIENT_SYNC_PRIM * psCleanupSync)
+{
+	PVRSRV_ERROR eError;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(
+		psDeviceNode,
+		psTDMData->psServerCommonContext,
+		psCleanupSync,
+		RGXFWIF_DM_TDM,
+		PDUMP_FLAGS_NONE);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				 __func__,
+				 PVRSRVGetErrorStringKM(eError)));
+		return eError;
+	}
+
+	/* ... it has so we can free it's resources */
+	FWCommonContextFree(psTDMData->psServerCommonContext);
+	return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVCreateTransferContextKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM(
+	CONNECTION_DATA            * psConnection,
+	PVRSRV_DEVICE_NODE         * psDeviceNode,
+	IMG_UINT32                   ui32Priority,
+	IMG_UINT32                   ui32FrameworkCommandSize,
+	IMG_PBYTE                    pabyFrameworkCommand,
+	IMG_HANDLE                   hMemCtxPrivData,
+	RGX_SERVER_TQ_TDM_CONTEXT ** ppsTransferContext)
+{
+	RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext;
+
+	DEVMEM_MEMDESC          * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	RGX_COMMON_CONTEXT_INFO   sInfo;
+	PVRSRV_ERROR              eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO        *psDevInfo = psDeviceNode->pvDevice;
+
+	/* Allocate the server side structure */
+	*ppsTransferContext = NULL;
+	psTransferContext = OSAllocZMem(sizeof(*psTransferContext));
+	if (psTransferContext == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	eError = OSLockCreate(&psTransferContext->hLock, LOCK_TYPE_NONE);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+									__func__,
+						PVRSRVGetErrorStringKM(eError)));
+		goto fail_lockcreate;
+	}
+#endif
+
+	psTransferContext->psDeviceNode = psDeviceNode;
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+						   &psTransferContext->psCleanupSync,
+						   "transfer context cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to allocate cleanup sync (0x%x)",
+				eError));
+		goto fail_syncalloc;
+	}
+
+	/* 
+	 * Create the FW framework buffer
+	 */
+	eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+										&psTransferContext->psFWFrameworkMemDesc,
+										ui32FrameworkCommandSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to allocate firmware GPU framework state (%u)",
+				eError));
+		goto fail_frameworkcreate;
+	}
+
+	/* Copy the Framework client data into the framework buffer */
+	eError = PVRSRVRGXFrameworkCopyCommand(psTransferContext->psFWFrameworkMemDesc,
+										   pabyFrameworkCommand,
+										   ui32FrameworkCommandSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to populate the framework buffer (%u)",
+				eError));
+		goto fail_frameworkcopy;
+	}
+
+	sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc;
+
+	eError = _CreateTDMTransferContext(psConnection,
+									  psDeviceNode,
+									  psFWMemContextMemDesc,
+									  ui32Priority,
+									  &sInfo,
+									  &psTransferContext->sTDMData);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_tdmtransfercontext;
+	}
+
+	SyncAddrListInit(&psTransferContext->sSyncAddrListFence);
+	SyncAddrListInit(&psTransferContext->sSyncAddrListUpdate);
+
+	{
+		OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+		dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode));
+		OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+		*ppsTransferContext = psTransferContext;
+	}
+
+	*ppsTransferContext = psTransferContext;
+	
+	return PVRSRV_OK;
+	
+fail_tdmtransfercontext:
+fail_frameworkcopy:
+	DevmemFwFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+fail_frameworkcreate:
+	SyncPrimFree(psTransferContext->psCleanupSync);
+fail_syncalloc:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psTransferContext->hLock);
+fail_lockcreate:
+#endif
+	OSFreeMem(psTransferContext);
+	PVR_ASSERT(eError != PVRSRV_OK);
+	*ppsTransferContext = NULL;
+	return eError;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice;
+
+	/* remove node from list before calling destroy - as destroy, if successful
+	 * will invalidate the node
+	 * must be re-added if destroy fails
+	 */
+	OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+	dllist_remove_node(&(psTransferContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+
+
+	eError = _DestroyTDMTransferContext(&psTransferContext->sTDMData,
+	                                    psTransferContext->psDeviceNode,
+	                                    psTransferContext->psCleanupSync);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_destroyTDM;
+	}
+
+	DevmemFwFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+	SyncPrimFree(psTransferContext->psCleanupSync);
+
+	SyncAddrListDeinit(&psTransferContext->sSyncAddrListFence);
+	SyncAddrListDeinit(&psTransferContext->sSyncAddrListUpdate);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psTransferContext->hLock);
+#endif
+
+	OSFreeMem(psTransferContext);
+
+	return PVRSRV_OK;
+
+  fail_destroyTDM:
+
+	OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+	dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+/*
+ * PVRSRVSubmitTQ3DKickKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM(
+	RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext,
+	IMG_UINT32                  ui32PDumpFlags,
+	IMG_UINT32                  ui32ClientCacheOpSeqNum,
+	IMG_UINT32                  ui32ClientFenceCount,
+	SYNC_PRIMITIVE_BLOCK     ** pauiClientFenceUFOSyncPrimBlock,
+	IMG_UINT32                * paui32ClientFenceSyncOffset,
+	IMG_UINT32                * paui32ClientFenceValue,
+	IMG_UINT32                  ui32ClientUpdateCount,
+	SYNC_PRIMITIVE_BLOCK     ** pauiClientUpdateUFOSyncPrimBlock,
+	IMG_UINT32                * paui32ClientUpdateSyncOffset,
+	IMG_UINT32                * paui32ClientUpdateValue,
+	IMG_UINT32                  ui32ServerSyncCount,
+	IMG_UINT32                * paui32ServerSyncFlags,
+	SERVER_SYNC_PRIMITIVE    ** papsServerSyncs,
+	PVRSRV_FENCE                iCheckFence,
+	PVRSRV_TIMELINE             iUpdateTimeline,
+	PVRSRV_FENCE              * piUpdateFence,
+	IMG_CHAR                    szUpdateFenceName[32],
+	IMG_UINT32                  ui32FWCommandSize,
+	IMG_UINT8                 * pui8FWCommand,
+	IMG_UINT32                  ui32ExtJobRef,
+	IMG_UINT32                  ui32SyncPMRCount,
+	IMG_UINT32                * paui32SyncPMRFlags,
+	PMR                      ** ppsSyncPMRs)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode;
+	RGX_CCB_CMD_HELPER_DATA *psCmdHelper;
+	PRGXFWIF_UFO_ADDR * pauiIntFenceUFOAddress   = NULL;
+	PRGXFWIF_UFO_ADDR * pauiIntUpdateUFOAddress  = NULL;
+	IMG_UINT32        * paui32IntFenceValue      = paui32ClientFenceValue;
+	IMG_UINT32          ui32IntClientFenceCount  = ui32ClientFenceCount;
+	IMG_UINT32        * paui32IntUpdateValue     = paui32ClientUpdateValue;
+	IMG_UINT32          ui32IntClientUpdateCount = ui32ClientUpdateCount;
+	PVRSRV_ERROR eError;
+	PVRSRV_ERROR eError2;
+	PVRSRV_FENCE iUpdateFence = PVRSRV_FENCE_INVALID;
+	IMG_UINT32 ui32JobId;
+
+	IMG_UINT32 ui32CmdOffset = 0;
+
+	PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+	PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+	PRGXFWIF_UFO_ADDR       pRMWUFOAddr;
+	IMG_UINT32               uiCheckFenceUID = 0;
+	IMG_UINT32               uiUpdateFenceUID = 0;
+#if defined(SUPPORT_BUFFER_SYNC)
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	struct pvr_buffer_sync_append_data *psAppendData = NULL;
+#else
+	struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
+	PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
+	IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
+	PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
+#endif
+#endif
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	struct pvr_sync_append_data *psFDFenceData = NULL;
+#endif
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+	PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+	IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+	IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+	PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+	IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+	void *pvUpdateFenceFinaliseData = NULL;
+#endif
+	IMG_DEV_VIRTADDR sRobustnessResetReason = {0};
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	if (iUpdateTimeline >= 0 && !piUpdateFence)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+#else
+	if (iUpdateTimeline >= 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Providing update timeline (%d) in non-supporting driver",
+			__func__, iUpdateTimeline));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	if (iCheckFence >= 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Providing check fence (%d) in non-supporting driver",
+			__func__, iCheckFence));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+#endif
+
+	/* Ensure the string is null-terminated (Required for safety) */
+	szUpdateFenceName[31] = '\0';
+
+	if (ui32SyncPMRCount != 0)
+	{
+		if (!ppsSyncPMRs)
+		{
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+        OSLockAcquire(psTransferContext->hLock);
+#endif
+
+	ui32JobId = OSAtomicIncrement(&psTransferContext->hJobId);
+
+	/* We can't allocate the required amount of stack space on all consumer architectures */
+	psCmdHelper = OSAllocMem(sizeof(RGX_CCB_CMD_HELPER_DATA));
+	if (psCmdHelper == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_allochelper;
+	}
+
+
+	/*
+		Init the command helper commands for all the prepares
+	*/
+	{
+		RGX_CLIENT_CCB *psClientCCB;
+		RGX_SERVER_COMMON_CONTEXT *psServerCommonCtx;
+		IMG_CHAR *pszCommandName;
+		RGXFWIF_CCB_CMD_TYPE eType;
+
+		psServerCommonCtx = psTransferContext->sTDMData.psServerCommonContext;
+		psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx);
+		pszCommandName = "TQ-TDM";
+		eType = (ui32FWCommandSize == 0) ? RGXFWIF_CCB_CMD_TYPE_NULL : RGXFWIF_CCB_CMD_TYPE_TQ_TDM;
+
+		eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListFence,
+		                              ui32ClientFenceCount,
+		                              pauiClientFenceUFOSyncPrimBlock,
+		                              paui32ClientFenceSyncOffset);
+		if(eError != PVRSRV_OK)
+		{
+			goto fail_populate_sync_addr_list;
+		}
+		paui32IntFenceValue      = paui32ClientFenceValue;
+		pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs;
+
+		eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListUpdate,
+										ui32ClientUpdateCount,
+										pauiClientUpdateUFOSyncPrimBlock,
+										paui32ClientUpdateSyncOffset);
+		if(eError != PVRSRV_OK)
+		{
+			goto fail_populate_sync_addr_list;
+		}
+		paui32IntUpdateValue     = paui32ClientUpdateValue;
+		pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
+
+
+		if (ui32SyncPMRCount)
+		{
+#if defined(SUPPORT_BUFFER_SYNC)
+			int err;
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+
+			err = pvr_buffer_sync_append_start(psDeviceNode->psBufferSyncContext,
+											   ui32SyncPMRCount,
+											   ppsSyncPMRs,
+											   paui32SyncPMRFlags,
+											   ui32IntClientFenceCount,
+											   pauiIntFenceUFOAddress,
+											   paui32IntFenceValue,
+											   ui32IntClientUpdateCount,
+											   pauiIntUpdateUFOAddress,
+											   paui32IntUpdateValue,
+											   &psAppendData);
+			if (err)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to append buffer syncs (%s)", __func__, PVRSRVGetErrorStringKM(err)));
+				eError = (err == -ENOMEM) ? PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_ERROR_INVALID_PARAMS;
+				goto fail_sync_append;
+			}
+
+			pvr_buffer_sync_append_checks_get(psAppendData,
+											  &ui32IntClientFenceCount,
+											  &pauiIntFenceUFOAddress,
+											  &paui32IntFenceValue);
+
+			pvr_buffer_sync_append_updates_get(psAppendData,
+											   &ui32IntClientUpdateCount,
+											   &pauiIntUpdateUFOAddress,
+											   &paui32IntUpdateValue);
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+			CHKPT_DBG((PVR_DBG_ERROR, "%s:   Calling pvr_buffer_sync_resolve_and_create_fences", __func__));
+			err = pvr_buffer_sync_resolve_and_create_fences(psDeviceNode->psBufferSyncContext,
+			                                                ui32SyncPMRCount,
+			                                                ppsSyncPMRs,
+			                                                paui32SyncPMRFlags,
+			                                                &ui32BufferFenceSyncCheckpointCount,
+			                                                &apsBufferFenceSyncCheckpoints,
+			                                                &psBufferUpdateSyncCheckpoint,
+			                                                &psBufferSyncData);
+			if (err)
+			{
+				eError = (err == -ENOMEM) ? PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_ERROR_INVALID_PARAMS;
+				PVR_DPF((PVR_DBG_ERROR, "%s:   pvr_buffer_sync_resolve_and_create_fences failed (%d)", __func__, eError));
+				goto fail_resolve_input_fence;
+			}
+
+			/* Append buffer sync fences */
+			if (ui32BufferFenceSyncCheckpointCount > 0)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d buffer sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence , (void*)pauiIntFenceUFOAddress));
+				SyncAddrListAppendAndDeRefCheckpoints(&psTransferContext->sSyncAddrListFence,
+													  ui32BufferFenceSyncCheckpointCount,
+													  apsBufferFenceSyncCheckpoints);
+				if (!pauiIntFenceUFOAddress)
+				{
+					pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs;
+				}
+				ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount;
+			}
+
+			if (psBufferUpdateSyncCheckpoint)
+			{
+				/* Append the update (from output fence) */
+				SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate,
+											  1,
+											  &psBufferUpdateSyncCheckpoint);
+				if (!pauiIntUpdateUFOAddress)
+				{
+					pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
+				}
+				ui32IntClientUpdateCount++;
+			}
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#else /* defined(SUPPORT_BUFFER_SYNC) */
+			PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto fail_populate_sync_addr_list;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+	}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	if (iCheckFence >= 0 || iUpdateTimeline >= 0)
+	{
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+		eError =
+		  pvr_sync_append_fences(szUpdateFenceName,
+		                               iCheckFence,
+		                               iUpdateTimeline,
+		                               ui32IntClientUpdateCount,
+		                               pauiIntUpdateUFOAddress,
+		                               paui32IntUpdateValue,
+		                               ui32IntClientFenceCount,
+		                               pauiIntFenceUFOAddress,
+		                               paui32IntFenceValue,
+		                               &psFDFenceData);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_syncinit;
+		}
+		pvr_sync_get_updates(psFDFenceData, &ui32IntClientUpdateCount,
+			&pauiIntUpdateUFOAddress, &paui32IntUpdateValue);
+		pvr_sync_get_checks(psFDFenceData, &ui32IntClientFenceCount,
+			&pauiIntFenceUFOAddress, &paui32IntFenceValue);
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+		/* Resolve the sync checkpoints that make up the input fence */
+		eError = SyncCheckpointResolveFence(psTransferContext->psDeviceNode->hSyncCheckpointContext,
+											iCheckFence,
+											&ui32FenceSyncCheckpointCount,
+											&apsFenceSyncCheckpoints,
+		                                    &uiCheckFenceUID);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_resolve_input_fence;
+		}
+#if defined(TDM_CHECKPOINT_DEBUG)
+		{
+			IMG_UINT32 ii;
+			for (ii=0; ii<32; ii++)
+			{
+				PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints +  ii);
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:    apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint)); //psFenceSyncCheckpoints[ii]));
+			}
+		}
+#endif
+		/* Create the output fence (if required) */
+		if (piUpdateFence)
+		{
+			eError = SyncCheckpointCreateFence(psTransferContext->psDeviceNode,
+			                                   szUpdateFenceName,
+											   iUpdateTimeline,
+											   psTransferContext->psDeviceNode->hSyncCheckpointContext,
+											   &iUpdateFence,
+											   &uiUpdateFenceUID,
+											   &pvUpdateFenceFinaliseData,
+											   &psUpdateSyncCheckpoint,
+											   (void*)&psFenceTimelineUpdateSync,
+											   &ui32FenceTimelineUpdateValue);
+			if (eError != PVRSRV_OK)
+			{
+				goto fail_create_output_fence;
+			}
+
+			/* Append the sync prim update for the timeline (if required) */
+			if (psFenceTimelineUpdateSync)
+			{
+				IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+				/* Allocate memory to hold the list of update values (including our timeline update) */
+				pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+				if (!pui32IntAllocatedUpdateValues)
+				{
+					/* Failed to allocate memory */
+					eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+					goto fail_alloc_update_values_mem;
+				}
+				OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+				/* Copy the update values into the new memory, then append our timeline update value */
+				OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+				/* Now set the additional update value */
+				pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+				*pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+				ui32IntClientUpdateCount++;
+#if defined(TDM_CHECKPOINT_DEBUG)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+					for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+				/* Now append the timeline sync prim addr to the transfer context update list */
+				SyncAddrListAppendSyncPrim(&psTransferContext->sSyncAddrListUpdate,
+				                           psFenceTimelineUpdateSync);
+#if defined(TDM_CHECKPOINT_DEBUG)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+					for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+				/* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+				paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
+			}
+		}
+
+		if (ui32FenceSyncCheckpointCount)
+		{
+			/* Append the checks (from input fence) */
+			if (ui32FenceSyncCheckpointCount > 0)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence));
+#if defined(TDM_CHECKPOINT_DEBUG)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+					for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+				SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListFence,
+											  ui32FenceSyncCheckpointCount,
+											  apsFenceSyncCheckpoints);
+				if (!pauiIntFenceUFOAddress)
+				{
+					pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs;
+				}
+				ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+			}
+#if defined(TDM_CHECKPOINT_DEBUG)
+			{
+				IMG_UINT32 iii;
+				IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+				for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+					pui32Tmp++;
+				}
+			}
+#endif
+		}
+		if (psUpdateSyncCheckpoint)
+		{
+			/* Append the update (from output fence) */
+			CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 sync checkpoint to TQ Update (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->sSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress));
+			SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate,
+										  1,
+										  &psUpdateSyncCheckpoint);
+			if (!pauiIntUpdateUFOAddress)
+			{
+				pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
+			}
+			ui32IntClientUpdateCount++;
+#if defined(TDM_CHECKPOINT_DEBUG)
+			{
+				IMG_UINT32 iii;
+				IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+				for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+					pui32Tmp++;
+				}
+			}
+#endif
+		}
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	}
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+
+#if (ENABLE_TDM_UFO_DUMP == 1)
+		PVR_DPF((PVR_DBG_ERROR, "%s: dumping TDM fence/updates syncs...", __func__));
+		{
+			IMG_UINT32 ii;
+			PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+			IMG_UINT32 *pui32TmpIntFenceValue = paui32IntFenceValue;
+			PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+			IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+			/* Dump Fence syncs and Update syncs */
+			PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM fence syncs (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+			for (ii=0; ii<ui32IntClientFenceCount; ii++)
+			{
+				if (psTmpIntFenceUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr, *pui32TmpIntFenceValue, *pui32TmpIntFenceValue));
+					pui32TmpIntFenceValue++;
+				}
+				psTmpIntFenceUFOAddress++;
+			}
+			PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM update syncs (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+			for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+			{
+				if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+					pui32TmpIntUpdateValue++;
+				}
+				psTmpIntUpdateUFOAddress++;
+			}
+		}
+#endif
+
+		RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psTransferContext->psDeviceNode->pvDevice,
+		                          & pPreAddr,
+		                          & pPostAddr,
+		                          & pRMWUFOAddr);
+
+		/*
+			Create the command helper data for this command
+		*/
+		eError = RGXCmdHelperInitCmdCCB(psClientCCB,
+		                                ui32IntClientFenceCount,
+		                                pauiIntFenceUFOAddress,
+		                                paui32IntFenceValue,
+		                                ui32IntClientUpdateCount,
+		                                pauiIntUpdateUFOAddress,
+		                                paui32IntUpdateValue,
+		                                ui32ServerSyncCount,
+		                                paui32ServerSyncFlags,
+		                                SYNC_FLAG_MASK_ALL,
+		                                papsServerSyncs,
+		                                ui32FWCommandSize,
+		                                pui8FWCommand,
+		                                & pPreAddr,
+		                                & pPostAddr,
+		                                & pRMWUFOAddr,
+		                                eType,
+		                                ui32ExtJobRef,
+		                                ui32JobId,
+		                                ui32PDumpFlags,
+		                                NULL,
+		                                pszCommandName,
+		                                psCmdHelper,
+										sRobustnessResetReason);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_initcmd;
+		}
+	}
+
+	/*
+		Acquire space for all the commands in one go
+	*/
+	
+	eError = RGXCmdHelperAcquireCmdCCB(1, psCmdHelper);
+	if (eError != PVRSRV_OK)
+	{
+			goto fail_3dcmdacquire;
+	}
+
+
+	/*
+		We should acquire the kernel CCB(s) space here as the schedule could fail
+		and we would have to roll back all the syncs
+	*/
+
+	/*
+		Only do the command helper release (which takes the server sync
+		operations if the acquire succeeded
+	*/
+	ui32CmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext));
+	RGXCmdHelperReleaseCmdCCB(1,
+	                          psCmdHelper,
+	                          "TQ_TDM",
+	                          FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr);
+
+
+	/*
+		Even if we failed to acquire the client CCB space we might still need
+		to kick the HW to process a padding packet to release space for us next
+		time round
+	*/
+	{
+		RGXFWIF_KCCB_CMD sTDMKCCBCmd;
+
+		/* Construct the kernel 3D CCB command. */
+		sTDMKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+		sTDMKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext);
+		sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext));
+		sTDMKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+		/* HTBLOGK(HTB_SF_MAIN_KICK_TDM, */
+		/* 		s3DKCCBCmd.uCmdData.sCmdKickData.psContext, */
+		/* 		ui323DCmdOffset); */
+		RGX_HWPERF_HOST_ENQ(psTransferContext,
+	                        OSGetCurrentClientProcessIDKM(),
+		                    FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr,
+		                    ui32ExtJobRef,
+		                    ui32JobId,
+	                        RGX_HWPERF_KICK_TYPE_TQTDM,
+		                    uiCheckFenceUID,
+		                    uiUpdateFenceUID,
+		                    NO_DEADLINE,
+		                    NO_CYCEST);
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError2 = RGXScheduleCommand(psDeviceNode->pvDevice,
+										RGXFWIF_DM_TDM,
+			                            & sTDMKCCBCmd,
+										sizeof(sTDMKCCBCmd),
+										ui32ClientCacheOpSeqNum,
+										ui32PDumpFlags);
+			if (eError2 != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+		RGXHWPerfFTraceGPUEnqueueEvent(psDeviceNode->pvDevice,
+ 			FWCommonContextGetFWAddress(psTransferContext->
+ 				sTDMData.psServerCommonContext).ui32Addr,
+			ui32JobId, RGX_HWPERF_KICK_TYPE_TQTDM);
+#endif
+	}
+
+	/*
+	 * Now check eError (which may have returned an error from our earlier calls
+	 * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+	 * so we check it now...
+	 */
+	if (eError != PVRSRV_OK )
+	{
+		goto fail_2dcmdacquire;
+	}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	if (iUpdateTimeline >= 0)
+	{
+		/* If we get here, this should never fail. Hitting that likely implies
+		 * a code error above */
+		iUpdateFence = pvr_sync_get_update_fd(psFDFenceData);
+		if (iUpdateFence < 0)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get install update sync fd",
+				__func__));
+			/* If we fail here, we cannot rollback the syncs as the hw already
+			 * has references to resources they may be protecting in the kick
+			 * so fallthrough */
+
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto fail_free_append_data;
+		}
+	}
+#if defined(NO_HARDWARE)
+	pvr_sync_nohw_complete_fences(psFDFenceData);
+#endif
+	/*
+		Free the merged sync memory if required
+	*/
+	pvr_sync_free_append_fences_data(psFDFenceData);
+#else /* defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#if defined(NO_HARDWARE)
+	/* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+	if (psUpdateSyncCheckpoint)
+	{
+		SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+	}
+	if (psFenceTimelineUpdateSync)
+	{
+		SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+	}
+	SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined (NO_HARDWARE) */
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	if (psAppendData)
+	{
+		pvr_buffer_sync_append_finish(psAppendData);
+	}
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	if (psBufferSyncData)
+	{
+		pvr_buffer_sync_kick_succeeded(psBufferSyncData);
+	}
+	if (apsBufferFenceSyncCheckpoints)
+	{
+		kfree(apsBufferFenceSyncCheckpoints);
+	}
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+	* piUpdateFence = iUpdateFence;
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_FENCE_INVALID))
+	{
+		SyncCheckpointFinaliseFence(iUpdateFence, pvUpdateFenceFinaliseData);
+	}
+#endif
+
+	OSFreeMem(psCmdHelper);
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+	/* Free memory allocated to hold the internal list of update values */
+	if (pui32IntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui32IntAllocatedUpdateValues);
+		pui32IntAllocatedUpdateValues = NULL;
+	}
+#endif
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+        OSLockRelease(psTransferContext->hLock);
+#endif
+	return PVRSRV_OK;
+
+/*
+	No resources are created in this function so there is nothing to free
+	unless we had to merge syncs.
+	If we fail after the client CCB acquire there is still nothing to do
+	as only the client CCB release will modify the client CCB
+*/
+fail_2dcmdacquire:
+fail_3dcmdacquire:
+
+fail_initcmd:
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListFence);
+	SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListUpdate);
+fail_alloc_update_values_mem:
+#endif
+
+/* fail_pdumpcheck: */
+/* fail_cmdtype: */
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) &&!defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+fail_syncinit:
+	/* Relocated cleanup here as the loop could fail after the first iteration
+	 * at the above goto tags at which point the psFDCheckData memory would
+	 * have been allocated.
+	 */
+	pvr_sync_rollback_append_fences(psFDFenceData);
+fail_free_append_data:
+	pvr_sync_free_append_fences_data(psFDFenceData);
+#else /* defined(SUPPORT_NATIVE_FENCE_SYNC) &&!defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	if(iUpdateFence != PVRSRV_FENCE_INVALID)
+	{
+		SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+	}
+fail_create_output_fence:
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+fail_resolve_input_fence:
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) &&!defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+#if defined(SUPPORT_BUFFER_SYNC)
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	pvr_buffer_sync_append_abort(psAppendData);
+fail_sync_append:
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	if (psBufferSyncData)
+	{
+		pvr_buffer_sync_kick_failed(psBufferSyncData);
+	}
+	if (apsBufferFenceSyncCheckpoints)
+	{
+		kfree(apsBufferFenceSyncCheckpoints);
+	}
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+fail_populate_sync_addr_list:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	OSFreeMem(psCmdHelper);
+fail_allochelper:
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC) || (defined(SUPPORT_NATIVE_FENCE_SYNC) && defined(PVRSRV_USE_SYNC_CHECKPOINTS))
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+#endif
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+        OSLockRelease(psTransferContext->hLock);
+#endif
+	return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(
+	RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+	IMG_UINT32                 ui32PDumpFlags)
+{
+	RGXFWIF_KCCB_CMD  sKCCBCmd;
+	PVRSRV_ERROR      eError;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+        OSLockAcquire(psTransferContext->hLock);
+#endif
+
+	/* Schedule the firmware command */
+	sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE;
+	sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext);
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psTransferContext->psDeviceNode->pvDevice,
+		                            RGXFWIF_DM_TDM,
+		                            &sKCCBCmd,
+		                            sizeof(sKCCBCmd),
+		                            0,
+		                            ui32PDumpFlags);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXTDMNotifyWriteOffsetUpdateKM: Failed to schedule the FW command %d (%s)",
+				eError, PVRSRVGETERRORSTRING(eError)));
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+        OSLockRelease(psTransferContext->hLock);
+#endif
+	return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                   PVRSRV_DEVICE_NODE * psDeviceNode,
+												   RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+												   IMG_UINT32 ui32Priority)
+{
+	PVRSRV_ERROR eError;
+	
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+        OSLockAcquire(psTransferContext->hLock);
+#endif
+	
+	if (psTransferContext->sTDMData.ui32Priority != ui32Priority)
+	{
+		eError = ContextSetPriority(psTransferContext->sTDMData.psServerCommonContext,
+									psConnection,
+									psTransferContext->psDeviceNode->pvDevice,
+									ui32Priority,
+									RGXFWIF_DM_TDM);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority (%s)", __func__, PVRSRVGetErrorStringKM(eError)));
+			return eError;
+		}
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+        OSLockRelease(psTransferContext->hLock);
+#endif
+	return PVRSRV_OK;
+}
+
+void CheckForStalledTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile)
+{
+	DLLIST_NODE *psNode, *psNext;
+
+	OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock);
+
+	dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx =
+			IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode);
+
+		DumpStalledFWCommonContext(psCurrentServerTransferCtx->sTDMData.psServerCommonContext,
+		                           pfnDumpDebugPrintf, pvDumpDebugFile);
+	}
+
+	OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock);
+}
+
+
+IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	DLLIST_NODE *psNode, *psNext;
+	IMG_UINT32 ui32ContextBitMask = 0;
+
+	OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock);
+
+	dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx =
+			IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode);
+
+		if (CheckStalledClientCommonContext(
+			             psCurrentServerTransferCtx->sTDMData.psServerCommonContext, RGX_KICK_TYPE_DM_TDM_2D)
+			         == PVRSRV_ERROR_CCCB_STALLED) {
+			ui32ContextBitMask = RGX_KICK_TYPE_DM_TDM_2D;
+		}
+	}
+
+	OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock);
+	return ui32ContextBitMask;
+}
+
+/**************************************************************************//**
+ End of file (rgxtdmtransfer.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtdmtransfer.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtdmtransfer.h
new file mode 100644
index 0000000..85711b7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtdmtransfer.h
@@ -0,0 +1,122 @@
+/*************************************************************************/ /*!
+@File           rgxtdmtransfer.h
+@Title          RGX Transfer queue 2 Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX Transfer queue Functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXTDMTRANSFER_H__)
+#define __RGXTDMTRANSFER_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+#include "sync_server.h"
+#include "connection_server.h"
+
+typedef struct _RGX_SERVER_TQ_TDM_CONTEXT_ RGX_SERVER_TQ_TDM_CONTEXT;
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM(
+	CONNECTION_DATA           * psConnection,
+	PVRSRV_DEVICE_NODE        * psDeviceNode,
+	IMG_UINT32                  ui32Priority,
+	IMG_UINT32                  ui32FrameworkCommandSize,
+	IMG_PBYTE                   pabyFrameworkCommand,
+	IMG_HANDLE                  hMemCtxPrivData,
+	RGX_SERVER_TQ_TDM_CONTEXT **ppsTransferContext);
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext);
+
+
+PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM(
+	RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext,
+	IMG_UINT32                  ui32PDumpFlags,
+	IMG_UINT32                  ui32ClientCacheOpSeqNum,
+	IMG_UINT32                  ui32ClientFenceCount,
+	SYNC_PRIMITIVE_BLOCK     ** pauiClientFenceUFOSyncPrimBlock,
+	IMG_UINT32                * paui32ClientFenceSyncOffset,
+	IMG_UINT32                * paui32ClientFenceValue,
+	IMG_UINT32                  ui32ClientUpdateCount,
+	SYNC_PRIMITIVE_BLOCK     ** pauiClientUpdateUFOSyncPrimBlock,
+	IMG_UINT32                * paui32ClientUpdateSyncOffset,
+	IMG_UINT32                * paui32ClientUpdateValue,
+	IMG_UINT32                  ui32ServerSyncCount,
+	IMG_UINT32                * paui32ServerSyncFlags,
+	SERVER_SYNC_PRIMITIVE    ** papsServerSyncs,
+	PVRSRV_FENCE                iCheckFence,
+	PVRSRV_TIMELINE             iUpdateTimeline,
+	PVRSRV_FENCE              * piUpdateFence,
+	IMG_CHAR                    szUpdateFenceName[32],
+	IMG_UINT32                  ui32FWCommandSize,
+	IMG_UINT8                 * pui8FWCommand,
+	IMG_UINT32                  ui32ExtJobRef,
+	IMG_UINT32                  ui32SyncPMRCount,
+	IMG_UINT32                * pui32SyncPMRFlags,
+	PMR                      ** ppsSyncPMRs);
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(
+	RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+	IMG_UINT32                 ui32PDumpFlags);
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                   PVRSRV_DEVICE_NODE * psDeviceNode,
+												   RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+												   IMG_UINT32 ui32Priority);
+
+/* Debug - check if transfer context is waiting on a fence */
+void CheckForStalledTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile);
+
+/* Debug/Watchdog - check if client transfer contexts are stalled */
+IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+
+#endif /* __RGXTDMTRANSFER_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtimecorr.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtimecorr.c
new file mode 100644
index 0000000..77e23fd
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtimecorr.c
@@ -0,0 +1,501 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific time correlation and calibration routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific time correlation and calibration routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxtimecorr.h"
+#include "rgxfwutils.h"
+#include "htbserver.h"
+#include "pvrsrv_apphint.h"
+
+/******************************************************************************
+ *
+ * - A calibration period is started on power-on and after a DVFS transition,
+ *   and it's closed before a power-off and before a DVFS transition
+ *   (so power-on -> dfvs -> dvfs -> power-off , power on -> dvfs -> dvfs...,
+ *   where each arrow is a calibration period)
+ *
+ * - The timers on the Host and on the FW are correlated at the beginning of
+ *   each period together with the (possibly calibrated) current GPU frequency
+ *
+ * - If the frequency has not changed since the last power-off/on sequence or
+ *   before/after a DVFS transition (-> the transition didn't really happen)
+ *   then multiple consecutive periods are merged (the higher the numbers the
+ *   better the accuracy in the computed clock speed)
+ *
+ * - Correlation and calibration are also done more or less periodically
+ *   (using a best effort approach)
+ *
+ *****************************************************************************/
+
+static IMG_UINT32 g_ui32ClockSource = PVRSRV_APPHINT_TIMECORRCLOCK;
+
+/*
+	AppHint interfaces
+*/
+
+/* Forward declarations */
+static void _RGXGPUFreqCalibratePreClockSourceChange(IMG_HANDLE hDevHandle);
+static void _RGXGPUFreqCalibratePostClockSourceChange(IMG_HANDLE hDevHandle);
+
+
+static PVRSRV_ERROR _SetClock(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                              const void *psPrivate,
+                              IMG_UINT32 ui32Value)
+{
+	static const IMG_CHAR *apszClocks[] = {
+		"mono", "mono_raw", "sched"
+	};
+
+	if (ui32Value >= RGXTIMECORR_CLOCK_LAST)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Invalid clock source type (%u)", ui32Value));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	_RGXGPUFreqCalibratePreClockSourceChange((PVRSRV_DEVICE_NODE *) psDeviceNode);
+
+	PVR_DPF((PVR_DBG_WARNING, "Setting time correlation clock from \"%s\" to \"%s\"",
+			apszClocks[g_ui32ClockSource],
+			apszClocks[ui32Value]));
+
+	g_ui32ClockSource = ui32Value;
+
+	_RGXGPUFreqCalibratePostClockSourceChange((PVRSRV_DEVICE_NODE *) psDeviceNode);
+
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+	PVR_UNREFERENCED_PARAMETER(apszClocks);
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _GetClock(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                              const void *psPrivate,
+                              IMG_UINT32 *pui32Value)
+{
+	*pui32Value = g_ui32ClockSource;
+
+	PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+	return PVRSRV_OK;
+}
+
+void RGXGPUFreqCalibrationInitAppHintCallbacks(
+                                         const PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_TimeCorrClock, _GetClock,
+	                                    _SetClock, psDeviceNode, NULL);
+}
+
+/*
+	End of AppHint interface
+*/
+
+IMG_UINT64 RGXGPUFreqCalibrateClockns64(void)
+{
+	IMG_UINT64 ui64Clock;
+
+	switch (g_ui32ClockSource) {
+		case RGXTIMECORR_CLOCK_MONO:
+			return ((void) OSClockMonotonicns64(&ui64Clock), ui64Clock);
+		case RGXTIMECORR_CLOCK_MONO_RAW:
+			return OSClockMonotonicRawns64();
+		case RGXTIMECORR_CLOCK_SCHED:
+			return OSClockns64();
+		default:
+			PVR_ASSERT(IMG_FALSE);
+			return 0;
+	}
+}
+
+IMG_UINT64 RGXGPUFreqCalibrateClockus64(void)
+{
+	IMG_UINT32 rem;
+	return OSDivide64r64(RGXGPUFreqCalibrateClockns64(), 1000, &rem);
+}
+
+void RGXGetTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode,
+							RGXFWIF_TIME_CORR *psTimeCorrs,
+							IMG_UINT32 ui32NumOut)
+{
+	PVRSRV_RGXDEV_INFO    *psDevInfo     = psDeviceNode->pvDevice;
+	RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb;
+	IMG_UINT32 ui32CurrentIndex = psGpuUtilFWCB->ui32TimeCorrSeqCount;
+
+	while(ui32NumOut--)
+	{
+		*(psTimeCorrs++) = psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32CurrentIndex)];
+		ui32CurrentIndex--;
+	}
+}
+
+static void _RGXMakeTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_BOOL bLogToHTB)
+{
+	PVRSRV_RGXDEV_INFO    *psDevInfo     = psDeviceNode->pvDevice;
+	RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb;
+	RGX_GPU_DVFS_TABLE    *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+	RGXFWIF_TIME_CORR     *psTimeCorr;
+	IMG_UINT32            ui32NewSeqCount;
+	IMG_UINT32            ui32CoreClockSpeed;
+	IMG_UINT32            ui32Remainder;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	IMG_UINT64            ui64OSMonoTime = 0;
+#endif
+
+	ui32CoreClockSpeed = psGpuDVFSTable->aui32DVFSClock[psGpuDVFSTable->ui32CurrentDVFSId];
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	{
+		PVRSRV_ERROR eError;
+		eError = OSClockMonotonicns64(&ui64OSMonoTime);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"_RGXMakeTimeCorrData: System Monotonic Clock not available."));
+			PVR_ASSERT(eError == PVRSRV_OK);
+		}
+	}
+#endif
+
+	ui32NewSeqCount = psGpuUtilFWCB->ui32TimeCorrSeqCount + 1;
+	psTimeCorr = &psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32NewSeqCount)];
+
+	psTimeCorr->ui64CRTimeStamp     = RGXReadHWTimerReg(psDevInfo);
+	psTimeCorr->ui64OSTimeStamp     = RGXGPUFreqCalibrateClockns64();
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+	psTimeCorr->ui64OSMonoTimeStamp = ui64OSMonoTime;
+#endif
+	psTimeCorr->ui32CoreClockSpeed  = ui32CoreClockSpeed;
+	psTimeCorr->ui64CRDeltaToOSDeltaKNs =
+	    RGXFWIF_GET_CRDELTA_TO_OSDELTA_K_NS(ui32CoreClockSpeed, ui32Remainder);
+
+	/* Make sure the values are written to memory before updating the index of the current entry */
+	OSWriteMemoryBarrier();
+
+	/* Update the index of the current entry in the timer correlation array */
+	psGpuUtilFWCB->ui32TimeCorrSeqCount = ui32NewSeqCount;
+
+	PVR_DPF((PVR_DBG_MESSAGE,"RGXMakeTimeCorrData: Correlated OS timestamp %" IMG_UINT64_FMTSPEC " (ns) with CR timestamp %" IMG_UINT64_FMTSPEC ", GPU clock speed %uHz",
+	         psTimeCorr->ui64OSTimeStamp, psTimeCorr->ui64CRTimeStamp, psTimeCorr->ui32CoreClockSpeed));
+
+	HTBSyncScale(
+		bLogToHTB,
+		psTimeCorr->ui64OSTimeStamp,
+		psTimeCorr->ui64CRTimeStamp,
+		psTimeCorr->ui32CoreClockSpeed);
+}
+
+
+static void _RGXGPUFreqCalibrationPeriodStart(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_GPU_DVFS_TABLE *psGpuDVFSTable)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo         = psDeviceNode->pvDevice;
+	RGX_DATA           *psRGXData         = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+	IMG_UINT32         ui32CoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+	IMG_UINT32         ui32Index          = RGX_GPU_DVFS_GET_INDEX(ui32CoreClockSpeed);
+
+	IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo);
+	IMG_UINT64 ui64OSTimestamp = RGXGPUFreqCalibrateClockus64();
+
+	psGpuDVFSTable->ui64CalibrationCRTimestamp = ui64CRTimestamp;
+	psGpuDVFSTable->ui64CalibrationOSTimestamp = ui64OSTimestamp;
+
+	/* Set the time needed to (re)calibrate the GPU frequency */
+	if ((psGpuDVFSTable->aui32DVFSClock[ui32Index] == 0) ||                /* We never met this frequency */
+	    (psGpuDVFSTable->aui32DVFSClock[ui32Index] == ui32CoreClockSpeed)) /* We weren't able to calibrate this frequency previously */
+	{
+		psGpuDVFSTable->aui32DVFSClock[ui32Index] = ui32CoreClockSpeed;
+		psGpuDVFSTable->ui32CalibrationPeriod     = RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US;
+
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXGPUFreqCalibrationStart: using uncalibrated GPU frequency %u", ui32CoreClockSpeed));
+	}
+	else if (psGpuDVFSTable->ui32CalibrationPeriod == RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US)
+	{
+		psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US;
+	}
+	else
+	{
+		psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US;
+	}
+
+	/* Update the index to the DVFS table */
+	psGpuDVFSTable->ui32CurrentDVFSId = ui32Index;
+}
+
+
+static void _RGXGPUFreqCalibrationPeriodStop(PVRSRV_DEVICE_NODE *psDeviceNode,
+											 RGX_GPU_DVFS_TABLE *psGpuDVFSTable)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+	IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo);
+	IMG_UINT64 ui64OSTimestamp = RGXGPUFreqCalibrateClockus64();
+
+	if (!psGpuDVFSTable->bAccumulatePeriod)
+	{
+		psGpuDVFSTable->ui64CalibrationCRTimediff = 0;
+		psGpuDVFSTable->ui64CalibrationOSTimediff = 0;
+	}
+
+	psGpuDVFSTable->ui64CalibrationCRTimediff +=
+	    ui64CRTimestamp - psGpuDVFSTable->ui64CalibrationCRTimestamp;
+	psGpuDVFSTable->ui64CalibrationOSTimediff +=
+	    ui64OSTimestamp - psGpuDVFSTable->ui64CalibrationOSTimestamp;
+}
+
+
+static IMG_UINT32 _RGXGPUFreqCalibrationCalculate(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                  RGX_GPU_DVFS_TABLE *psGpuDVFSTable)
+{
+#if !defined(NO_HARDWARE)
+	IMG_UINT32 ui32CalibratedClockSpeed;
+	IMG_UINT32 ui32Remainder;
+
+	ui32CalibratedClockSpeed =
+	    RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(psGpuDVFSTable->ui64CalibrationCRTimediff,
+	                                       psGpuDVFSTable->ui64CalibrationOSTimediff,
+	                                       ui32Remainder);
+
+	PVR_DPF((PVR_DBG_MESSAGE, "GPU frequency calibration: %u -> %u done over %" IMG_UINT64_FMTSPEC " us",
+	         psGpuDVFSTable->aui32DVFSClock[psGpuDVFSTable->ui32CurrentDVFSId],
+	         ui32CalibratedClockSpeed,
+	         psGpuDVFSTable->ui64CalibrationOSTimediff));
+
+	psGpuDVFSTable->aui32DVFSClock[psGpuDVFSTable->ui32CurrentDVFSId] = ui32CalibratedClockSpeed;
+
+	/* Reset time deltas to avoid recalibrating the same frequency over and over again */
+	psGpuDVFSTable->ui64CalibrationCRTimediff = 0;
+	psGpuDVFSTable->ui64CalibrationOSTimediff = 0;
+
+	return ui32CalibratedClockSpeed;
+#else
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+	return psGpuDVFSTable->aui32DVFSClock[psGpuDVFSTable->ui32CurrentDVFSId];
+#endif
+}
+
+
+static void _RGXGPUFreqCalibratePreClockSourceChange(IMG_HANDLE hDevHandle)
+{
+	PVRSRV_DEVICE_NODE  *psDeviceNode   = hDevHandle;
+	PVRSRV_RGXDEV_INFO  *psDevInfo      = psDeviceNode->pvDevice;
+	RGX_GPU_DVFS_TABLE  *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+	_RGXGPUFreqCalibrationPeriodStop(psDeviceNode, psGpuDVFSTable);
+
+	if (psGpuDVFSTable->ui64CalibrationOSTimediff >= psGpuDVFSTable->ui32CalibrationPeriod)
+	{
+		_RGXGPUFreqCalibrationCalculate(psDeviceNode, psGpuDVFSTable);
+	}
+}
+
+
+static void _RGXGPUFreqCalibratePostClockSourceChange(IMG_HANDLE hDevHandle)
+{
+	PVRSRV_DEVICE_NODE  *psDeviceNode      = hDevHandle;
+	PVRSRV_RGXDEV_INFO  *psDevInfo         = psDeviceNode->pvDevice;
+	RGX_GPU_DVFS_TABLE  *psGpuDVFSTable    = psDevInfo->psGpuDVFSTable;
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+	/* Frequency has not changed, accumulate the time diffs to get a better result */
+	psGpuDVFSTable->bAccumulatePeriod = IMG_TRUE;
+
+	_RGXGPUFreqCalibrationPeriodStart(psDeviceNode, psGpuDVFSTable);
+
+	/* Update the timer correlation data */
+	_RGXMakeTimeCorrData(psDeviceNode, IMG_TRUE);
+}
+
+
+/*
+	RGXGPUFreqCalibratePrePowerOff
+*/
+void RGXGPUFreqCalibratePrePowerOff(IMG_HANDLE hDevHandle)
+{
+	PVRSRV_DEVICE_NODE  *psDeviceNode   = hDevHandle;
+	PVRSRV_RGXDEV_INFO  *psDevInfo      = psDeviceNode->pvDevice;
+	RGX_GPU_DVFS_TABLE  *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+	_RGXGPUFreqCalibrationPeriodStop(psDeviceNode, psGpuDVFSTable);
+
+	if (psGpuDVFSTable->ui64CalibrationOSTimediff >= psGpuDVFSTable->ui32CalibrationPeriod)
+	{
+		_RGXGPUFreqCalibrationCalculate(psDeviceNode, psGpuDVFSTable);
+	}
+}
+
+
+/*
+	RGXGPUFreqCalibratePostPowerOn
+*/
+void RGXGPUFreqCalibratePostPowerOn(IMG_HANDLE hDevHandle)
+{
+	PVRSRV_DEVICE_NODE  *psDeviceNode      = hDevHandle;
+	PVRSRV_RGXDEV_INFO  *psDevInfo         = psDeviceNode->pvDevice;
+	RGX_GPU_DVFS_TABLE  *psGpuDVFSTable    = psDevInfo->psGpuDVFSTable;
+	RGX_DATA            *psRGXData         = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+	IMG_UINT32          ui32CoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+	/* If the frequency hasn't changed then accumulate the time diffs to get a better result */
+	psGpuDVFSTable->bAccumulatePeriod =
+	    (RGX_GPU_DVFS_GET_INDEX(ui32CoreClockSpeed) == psGpuDVFSTable->ui32CurrentDVFSId);
+
+	_RGXGPUFreqCalibrationPeriodStart(psDeviceNode, psGpuDVFSTable);
+
+	/* Update the timer correlation data */
+	/* Don't log timing data to the HTB log post power transition.
+	 * Otherwise this will be logged before the HTB partition marker, breaking
+	 * the log sync grammar. This data will be automatically repeated when the
+	 * partition marker is written
+	 */
+	_RGXMakeTimeCorrData(psDeviceNode, IMG_FALSE);
+}
+
+
+/*
+	RGXGPUFreqCalibratePreClockSpeedChange
+*/
+void RGXGPUFreqCalibratePreClockSpeedChange(IMG_HANDLE hDevHandle)
+{
+	PVRSRV_DEVICE_NODE  *psDeviceNode   = hDevHandle;
+	PVRSRV_RGXDEV_INFO  *psDevInfo      = psDeviceNode->pvDevice;
+	RGX_GPU_DVFS_TABLE  *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+	_RGXGPUFreqCalibrationPeriodStop(psDeviceNode, psGpuDVFSTable);
+
+	/* Wait until RGXPostClockSpeedChange() to do anything as the GPU frequency may be left
+	 * unchanged (in that case we delay calibration/correlation to get a better result later) */
+}
+
+
+/*
+	RGXGPUFreqCalibratePostClockSpeedChange
+*/
+IMG_UINT32 RGXGPUFreqCalibratePostClockSpeedChange(IMG_HANDLE hDevHandle, IMG_UINT32 ui32NewClockSpeed)
+{
+	PVRSRV_DEVICE_NODE  *psDeviceNode          = hDevHandle;
+	PVRSRV_RGXDEV_INFO  *psDevInfo             = psDeviceNode->pvDevice;
+	RGX_GPU_DVFS_TABLE  *psGpuDVFSTable        = psDevInfo->psGpuDVFSTable;
+	IMG_UINT32          ui32ReturnedClockSpeed = ui32NewClockSpeed;
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, ui32NewClockSpeed);
+
+	if (RGX_GPU_DVFS_GET_INDEX(ui32NewClockSpeed) != psGpuDVFSTable->ui32CurrentDVFSId)
+	{
+		/* Only calibrate if the last period was long enough */
+		if (psGpuDVFSTable->ui64CalibrationOSTimediff >= RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US)
+		{
+			ui32ReturnedClockSpeed = _RGXGPUFreqCalibrationCalculate(psDeviceNode, psGpuDVFSTable);
+		}
+
+		_RGXGPUFreqCalibrationPeriodStart(psDeviceNode, psGpuDVFSTable);
+
+		/* Update the timer correlation data */
+		_RGXMakeTimeCorrData(psDeviceNode, IMG_TRUE);
+		psGpuDVFSTable->bAccumulatePeriod = IMG_FALSE;
+	}
+	else
+	{
+		psGpuDVFSTable->bAccumulatePeriod = IMG_TRUE;
+	}
+
+	return ui32ReturnedClockSpeed;
+}
+
+
+/*
+	RGXGPUFreqCalibrateCorrelatePeriodic
+*/
+void RGXGPUFreqCalibrateCorrelatePeriodic(IMG_HANDLE hDevHandle)
+{
+	PVRSRV_DEVICE_NODE     *psDeviceNode   = hDevHandle;
+	PVRSRV_RGXDEV_INFO     *psDevInfo      = psDeviceNode->pvDevice;
+	RGX_GPU_DVFS_TABLE     *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+	IMG_UINT64             ui64TimeNow     = RGXGPUFreqCalibrateClockus64();
+	PVRSRV_DEV_POWER_STATE ePowerState;
+	PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+	/* Check if it's the right time to recalibrate the GPU clock frequency */
+	if ((ui64TimeNow - psGpuDVFSTable->ui64CalibrationOSTimestamp) < psGpuDVFSTable->ui32CalibrationPeriod) return;
+
+	/* Try to acquire the powerlock, if not possible then don't wait */
+	if(!OSTryLockAcquire(psDeviceNode->hPowerLock)) return;
+
+	/* If the GPU is off then we can't do anything */
+	PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+	if (ePowerState != PVRSRV_DEV_POWER_STATE_ON)
+	{
+		PVRSRVPowerUnlock(psDeviceNode);
+		return;
+	}
+
+	/* All checks passed, we can calibrate and correlate */
+	_RGXGPUFreqCalibrationPeriodStop(psDeviceNode, psGpuDVFSTable);
+	_RGXGPUFreqCalibrationCalculate(psDeviceNode, psGpuDVFSTable);
+	_RGXGPUFreqCalibrationPeriodStart(psDeviceNode, psGpuDVFSTable);
+	_RGXMakeTimeCorrData(psDeviceNode, IMG_TRUE);
+
+	PVRSRVPowerUnlock(psDeviceNode);
+}
+
+/*
+	RGXGPUFreqCalibrateClockSource
+*/
+RGXTIMECORR_CLOCK_TYPE RGXGPUFreqCalibrateGetClockSource(void)
+{
+	return g_ui32ClockSource;
+}
+
+/*
+	RGXGPUFreqCalibrateClockSource
+*/
+PVRSRV_ERROR RGXGPUFreqCalibrateSetClockSource(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                               RGXTIMECORR_CLOCK_TYPE eClockType)
+{
+	return _SetClock(psDeviceNode, NULL, eClockType);
+}
+
+
+/******************************************************************************
+ End of file (rgxtimecorr.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtimecorr.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtimecorr.h
new file mode 100644
index 0000000..eaf7015
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtimecorr.h
@@ -0,0 +1,209 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX time correlation and calibration header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX time correlation and calibration routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXTIMECORR_H__)
+#define __RGXTIMECORR_H__
+
+#include "img_types.h"
+#include "device.h"
+
+typedef enum {
+    RGXTIMECORR_CLOCK_MONO,
+    RGXTIMECORR_CLOCK_MONO_RAW,
+    RGXTIMECORR_CLOCK_SCHED,
+
+    RGXTIMECORR_CLOCK_LAST
+} RGXTIMECORR_CLOCK_TYPE;
+
+/*!
+******************************************************************************
+
+ @Function    RGXGPUFreqCalibratePrePowerOff
+
+ @Description Manage GPU frequency and timer correlation data
+              before a power off.
+
+ @Input       hDevHandle : RGX Device Node
+
+ @Return      void
+
+******************************************************************************/
+void RGXGPUFreqCalibratePrePowerOff(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function    RGXGPUFreqCalibratePostPowerOn
+
+ @Description Manage GPU frequency and timer correlation data
+              after a power on.
+
+ @Input       hDevHandle : RGX Device Node
+
+ @Return      void
+
+******************************************************************************/
+void RGXGPUFreqCalibratePostPowerOn(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function    RGXGPUFreqCalibratePreClockSpeedChange
+
+ @Description Manage GPU frequency and timer correlation data
+              before a DVFS transition.
+
+ @Input       hDevHandle : RGX Device Node
+
+ @Return      void
+
+******************************************************************************/
+void RGXGPUFreqCalibratePreClockSpeedChange(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function    RGXGPUFreqCalibratePostClockSpeedChange
+
+ @Description Manage GPU frequency and timer correlation data
+              after a DVFS transition.
+
+ @Input       hDevHandle        : RGX Device Node
+ @Input       ui32NewClockSpeed : GPU clock speed after the DVFS transition
+
+ @Return      IMG_UINT32 : Calibrated GPU clock speed after the DVFS transition
+
+******************************************************************************/
+IMG_UINT32 RGXGPUFreqCalibratePostClockSpeedChange(IMG_HANDLE hDevHandle, IMG_UINT32 ui32NewClockSpeed);
+
+/*!
+******************************************************************************
+
+ @Function    RGXGPUFreqCalibratePeriodic
+
+ @Description Calibrate the GPU clock speed and correlate the timers
+              at regular intervals.
+
+ @Input       hDevHandle : RGX Device Node
+
+ @Return      void
+
+******************************************************************************/
+void RGXGPUFreqCalibrateCorrelatePeriodic(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function    RGXGPUFreqCalibrateClockns64
+
+ @Description Returns value of currently selected clock (in ns).
+
+ @Return      clock value from currently selected clock source
+
+******************************************************************************/
+IMG_UINT64 RGXGPUFreqCalibrateClockns64(void);
+
+/*!
+******************************************************************************
+
+ @Function    RGXGPUFreqCalibrateClockns64
+
+ @Description Returns value of currently selected clock (in us).
+
+ @Return      clock value from currently selected clock source
+
+******************************************************************************/
+IMG_UINT64 RGXGPUFreqCalibrateClockus64(void);
+
+/*!
+******************************************************************************
+
+ @Function    RGXGPUFreqCalibrateClockSource
+
+ @Description Returns currently selected clock source
+
+ @Return      clock source type
+
+******************************************************************************/
+RGXTIMECORR_CLOCK_TYPE RGXGPUFreqCalibrateGetClockSource(void);
+
+/*!
+******************************************************************************
+
+ @Function    RGXGPUFreqCalibrateSetClockSource
+
+ @Description Sets clock source for correlation data.
+
+ @Input       psDeviceNode : RGX Device Node
+ @Input       eClockType : clock source type
+
+ @Return      error code
+
+******************************************************************************/
+PVRSRV_ERROR RGXGPUFreqCalibrateSetClockSource(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                             RGXTIMECORR_CLOCK_TYPE eClockType);
+
+void RGXGPUFreqCalibrationInitAppHintCallbacks(
+                                        const PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+/*!
+******************************************************************************
+
+ @Function    RGXGetTimeCorrData
+
+ @Description Get a number of the most recent time correlation data points
+
+ @Input       psDeviceNode : RGX Device Node
+ @Output      psTimeCorrs  : Output array of RGXFWIF_TIME_CORR elements
+                             for data to be written to
+ @Input       ui32NumOut   : Number of elements to be written out
+
+ @Return      void
+
+******************************************************************************/
+void RGXGetTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode,
+							RGXFWIF_TIME_CORR *psTimeCorrs,
+							IMG_UINT32 ui32NumOut);
+
+#endif /* __RGXTIMECORR_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtimerquery.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtimerquery.c
new file mode 100644
index 0000000..bb4f4a5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtimerquery.c
@@ -0,0 +1,229 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Timer queries
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Timer queries
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxtimerquery.h"
+#include "rgxdevice.h"
+#include "rgxtimecorr.h"
+
+#include "rgxfwutils.h"
+#include "pdump_km.h"
+
+PVRSRV_ERROR
+PVRSRVRGXBeginTimerQueryKM(CONNECTION_DATA    * psConnection,
+                           PVRSRV_DEVICE_NODE * psDeviceNode,
+                           IMG_UINT32         ui32QueryId)
+{
+	PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	if (ui32QueryId >= RGX_MAX_TIMER_QUERIES)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevInfo->bSaveStart = IMG_TRUE;
+	psDevInfo->bSaveEnd   = IMG_TRUE;
+
+	/* clear the stamps, in case there is no Kick */
+	psDevInfo->pui64StartTimeById[ui32QueryId] = 0UL;
+	psDevInfo->pui64EndTimeById[ui32QueryId]   = 0UL;
+
+	/* save of the active query index */
+	psDevInfo->ui32ActiveQueryId = ui32QueryId;
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PVRSRVRGXEndTimerQueryKM(CONNECTION_DATA    * psConnection,
+                         PVRSRV_DEVICE_NODE * psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	
+	/* clear off the flags set by Begin(). Note that _START_TIME is
+	 * probably already cleared by Kick()
+	 */
+	psDevInfo->bSaveStart = IMG_FALSE;
+	psDevInfo->bSaveEnd   = IMG_FALSE;
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PVRSRVRGXQueryTimerKM(CONNECTION_DATA    * psConnection,
+                      PVRSRV_DEVICE_NODE * psDeviceNode,
+                      IMG_UINT32         ui32QueryId,
+                      IMG_UINT64         * pui64StartTime,
+                      IMG_UINT64         * pui64EndTime)
+{
+	PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+	IMG_UINT32         ui32Scheduled;
+	IMG_UINT32         ui32Completed;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	
+	if (ui32QueryId >= RGX_MAX_TIMER_QUERIES)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	ui32Scheduled = psDevInfo->aui32ScheduledOnId[ui32QueryId];
+	ui32Completed = psDevInfo->pui32CompletedById[ui32QueryId];
+
+	/* if there was no kick since the Begin() on this id we return 0-s as Begin cleared
+	 * the stamps. If there was no begin the returned data is undefined - but still
+	 * safe from services pov
+	 */
+	if (ui32Completed >= ui32Scheduled)
+	{
+		* pui64StartTime = psDevInfo->pui64StartTimeById[ui32QueryId];
+		* pui64EndTime   = psDevInfo->pui64EndTimeById[ui32QueryId];
+
+		return PVRSRV_OK;
+	}
+	else
+	{
+		return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+	}
+}
+
+
+PVRSRV_ERROR
+PVRSRVRGXCurrentTime(CONNECTION_DATA    * psConnection,
+                     PVRSRV_DEVICE_NODE * psDeviceNode,
+                     IMG_UINT64         * pui64Time)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+	*pui64Time = RGXGPUFreqCalibrateClockns64();
+
+	return PVRSRV_OK;
+}
+
+
+
+/******************************************************************************
+ NOT BRIDGED/EXPORTED FUNCS
+******************************************************************************/
+/* writes a time stamp command in the client CCB */
+void
+RGXWriteTimestampCommand(IMG_PBYTE               * ppbyPtr,
+                         RGXFWIF_CCB_CMD_TYPE    eCmdType,
+                         PRGXFWIF_TIMESTAMP_ADDR pAddr)
+{
+	RGXFWIF_CCB_CMD_HEADER * psHeader;
+
+	psHeader = (RGXFWIF_CCB_CMD_HEADER *) (*ppbyPtr);
+
+	PVR_ASSERT(eCmdType == RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP
+	           || eCmdType == RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP);
+
+	psHeader->eCmdType    = eCmdType;
+	psHeader->ui32CmdSize = (sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN  - 1);
+
+	(*ppbyPtr) += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+	(*(PRGXFWIF_TIMESTAMP_ADDR*)*ppbyPtr) = pAddr;
+
+	(*ppbyPtr) += psHeader->ui32CmdSize;
+}
+
+
+void
+RGX_GetTimestampCmdHelper(PVRSRV_RGXDEV_INFO      * psDevInfo,
+                          PRGXFWIF_TIMESTAMP_ADDR * ppPreAddr,
+                          PRGXFWIF_TIMESTAMP_ADDR * ppPostAddr,
+                          PRGXFWIF_UFO_ADDR       * ppUpdate)
+{
+	if (ppPreAddr != NULL)
+	{
+		if (psDevInfo->bSaveStart)
+		{
+			/* drop the SaveStart on the first Kick */
+			psDevInfo->bSaveStart = IMG_FALSE;
+
+			RGXSetFirmwareAddress(ppPreAddr,
+			                      psDevInfo->psStartTimeMemDesc,
+			                      sizeof(IMG_UINT64) * psDevInfo->ui32ActiveQueryId,
+			                      RFW_FWADDR_NOREF_FLAG);
+		}
+		else
+		{
+			ppPreAddr->ui32Addr = 0;
+		}
+	}
+
+	if (ppPostAddr != NULL && ppUpdate != NULL)
+	{
+		if (psDevInfo->bSaveEnd)
+		{
+			RGXSetFirmwareAddress(ppPostAddr,
+			                      psDevInfo->psEndTimeMemDesc,
+			                      sizeof(IMG_UINT64) * psDevInfo->ui32ActiveQueryId,
+			                      RFW_FWADDR_NOREF_FLAG);
+
+			psDevInfo->aui32ScheduledOnId[psDevInfo->ui32ActiveQueryId]++;
+
+			RGXSetFirmwareAddress(ppUpdate,
+			                      psDevInfo->psCompletedMemDesc,
+			                      sizeof(IMG_UINT32) * psDevInfo->ui32ActiveQueryId,
+			                      RFW_FWADDR_NOREF_FLAG);
+		}
+		else
+		{
+			ppUpdate->ui32Addr   = 0;
+			ppPostAddr->ui32Addr = 0;
+		}
+	}
+}
+
+
+/******************************************************************************
+ End of file (rgxtimerquery.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtimerquery.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtimerquery.h
new file mode 100644
index 0000000..fe65f5f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtimerquery.h
@@ -0,0 +1,135 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Timer queries
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX Timer queries functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if ! defined (_RGX_TIMERQUERIES_H_)
+#define _RGX_TIMERQUERIES_H_
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "device.h"
+#include "rgxdevice.h"
+
+#include "connection_server.h"
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXBeginTimerQuery
+@Description    Opens a new timer query.
+
+@Input          ui32QueryId an identifier between [ 0 and RGX_MAX_TIMER_QUERIES - 1 ]
+@Return         PVRSRV_OK on success.
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXBeginTimerQueryKM(CONNECTION_DATA    * psConnection,
+                           PVRSRV_DEVICE_NODE * psDeviceNode,
+                           IMG_UINT32         ui32QueryId);
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXEndTimerQuery
+@Description    Closes a timer query
+
+                The lack of ui32QueryId argument expresses the fact that there can't
+                be overlapping queries open.
+@Return         PVRSRV_OK on success.
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXEndTimerQueryKM(CONNECTION_DATA    * psConnection,
+                         PVRSRV_DEVICE_NODE * psDeviceNode);
+
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXQueryTimer
+@Description    Queries the state of the specified timer
+
+@Input          ui32QueryId an identifier between [ 0 and RGX_MAX_TIMER_QUERIES - 1 ]
+@Out            pui64StartTime
+@Out            pui64EndTime
+@Return         PVRSRV_OK                         on success.
+                PVRSRV_ERROR_RESOURCE_UNAVAILABLE if the device is still busy with
+                                                  operations from the queried period
+                other error code                  otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXQueryTimerKM(CONNECTION_DATA    * psConnection,
+                      PVRSRV_DEVICE_NODE * psDeviceNode,
+                      IMG_UINT32         ui32QueryId,
+                      IMG_UINT64         * pui64StartTime,
+                      IMG_UINT64         * pui64EndTime);
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXCurrentTime
+@Description    Returns the current state of the timer used in timer queries
+@Input          psDevData  Device data.
+@Out            pui64Time
+@Return         PVRSRV_OK on success.
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXCurrentTime(CONNECTION_DATA    * psConnection,
+                     PVRSRV_DEVICE_NODE * psDeviceNode,
+                     IMG_UINT64         * pui64Time);
+
+
+/******************************************************************************
+ NON BRIDGED/EXPORTED interface
+******************************************************************************/
+
+/* write the timestamp cmd from the helper*/
+void
+RGXWriteTimestampCommand(IMG_PBYTE               * ppui8CmdPtr,
+                         RGXFWIF_CCB_CMD_TYPE    eCmdType,
+                         PRGXFWIF_TIMESTAMP_ADDR pAddr);
+
+/* get the relevant data from the Kick to the helper*/
+void
+RGX_GetTimestampCmdHelper(PVRSRV_RGXDEV_INFO      * psDevInfo,
+                          PRGXFWIF_TIMESTAMP_ADDR * ppPreAddr,
+                          PRGXFWIF_TIMESTAMP_ADDR * ppPostAddr,
+                          PRGXFWIF_UFO_ADDR       * ppUpdate);
+
+#endif /* _RGX_TIMERQUERIES_H_ */
+
+/******************************************************************************
+ End of file (rgxtimerquery.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtransfer.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtransfer.c
new file mode 100644
index 0000000..24c2ffa
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtransfer.c
@@ -0,0 +1,1633 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific transfer queue routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pdump_km.h"
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxtransfer.h"
+#include "rgx_tq_shared.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgx_memallocflags.h"
+#include "rgxtimerquery.h"
+#include "rgxhwperf.h"
+#include "htbuffer.h"
+
+#include "pdump_km.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "rgx_bvnc_defs_km.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+#endif
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_TQ_UFO_DUMP	0
+
+//#define TRANSFER_CHECKPOINT_DEBUG 1
+
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+typedef struct {
+	DEVMEM_MEMDESC				*psFWContextStateMemDesc;
+	RGX_SERVER_COMMON_CONTEXT	*psServerCommonContext;
+	IMG_UINT32					ui32Priority;
+} RGX_SERVER_TQ_3D_DATA;
+
+
+typedef struct {
+	RGX_SERVER_COMMON_CONTEXT	*psServerCommonContext;
+	IMG_UINT32					ui32Priority;
+} RGX_SERVER_TQ_2D_DATA;
+
+struct _RGX_SERVER_TQ_CONTEXT_ {
+	PVRSRV_DEVICE_NODE			*psDeviceNode;
+	DEVMEM_MEMDESC				*psFWFrameworkMemDesc;
+	IMG_UINT32					ui32Flags;
+#define RGX_SERVER_TQ_CONTEXT_FLAGS_2D		(1<<0)
+#define RGX_SERVER_TQ_CONTEXT_FLAGS_3D		(1<<1)
+	RGX_SERVER_TQ_3D_DATA		s3DData;
+	RGX_SERVER_TQ_2D_DATA		s2DData;
+	PVRSRV_CLIENT_SYNC_PRIM		*psCleanupSync;
+	DLLIST_NODE					sListNode;
+	ATOMIC_T			hJobId;
+	IMG_UINT32			ui32PDumpFlags;
+	/* per-prepare sync address lists */
+	SYNC_ADDR_LIST			asSyncAddrListFence[TQ_MAX_PREPARES_PER_SUBMIT];
+	SYNC_ADDR_LIST			asSyncAddrListUpdate[TQ_MAX_PREPARES_PER_SUBMIT];
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	POS_LOCK				hLock;
+#endif
+};
+
+/*
+	Static functions used by transfer context code
+*/
+static PVRSRV_ERROR _Create3DTransferContext(CONNECTION_DATA *psConnection,
+											 PVRSRV_DEVICE_NODE *psDeviceNode,
+											 DEVMEM_MEMDESC *psFWMemContextMemDesc,
+											 IMG_UINT32 ui32Priority,
+											 RGX_COMMON_CONTEXT_INFO *psInfo,
+											 RGX_SERVER_TQ_3D_DATA *ps3DData)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR eError;
+
+	/*
+		Allocate device memory for the firmware GPU context suspend state.
+		Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+	*/
+	PDUMPCOMMENT("Allocate RGX firmware TQ/3D context suspend state");
+
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_3DCTX_STATE),
+							RGX_FWCOMCTX_ALLOCFLAGS,
+							"FwTQ3DContext",
+							&ps3DData->psFWContextStateMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_contextswitchstate;
+	}
+
+	eError = FWCommonContextAllocate(psConnection,
+									 psDeviceNode,
+									 REQ_TYPE_TQ_3D,
+									 RGXFWIF_DM_3D,
+									 NULL,
+									 0,
+									 psFWMemContextMemDesc,
+									 ps3DData->psFWContextStateMemDesc,
+									 RGX_TQ3D_CCB_SIZE_LOG2,
+									 ui32Priority,
+									 psInfo,
+									 &ps3DData->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_contextalloc;
+	}
+
+
+	PDUMPCOMMENT("Dump 3D context suspend state buffer");
+	DevmemPDumpLoadMem(ps3DData->psFWContextStateMemDesc, 0, sizeof(RGXFWIF_3DCTX_STATE), PDUMP_FLAGS_CONTINUOUS);
+
+	ps3DData->ui32Priority = ui32Priority;
+	return PVRSRV_OK;
+
+fail_contextalloc:
+	DevmemFwFree(psDevInfo, ps3DData->psFWContextStateMemDesc);
+fail_contextswitchstate:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+static PVRSRV_ERROR _Create2DTransferContext(CONNECTION_DATA *psConnection,
+											 PVRSRV_DEVICE_NODE *psDeviceNode,
+											 DEVMEM_MEMDESC *psFWMemContextMemDesc,
+											 IMG_UINT32 ui32Priority,
+											 RGX_COMMON_CONTEXT_INFO *psInfo,
+											 RGX_SERVER_TQ_2D_DATA *ps2DData)
+{
+	PVRSRV_ERROR eError;
+
+	eError = FWCommonContextAllocate(psConnection,
+									 psDeviceNode,
+									 REQ_TYPE_TQ_2D,
+									 RGXFWIF_DM_2D,
+									 NULL,
+									 0,
+									 psFWMemContextMemDesc,
+									 NULL,
+									 RGX_TQ2D_CCB_SIZE_LOG2,
+									 ui32Priority,
+									 psInfo,
+									 &ps2DData->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_contextalloc;
+	}
+
+	ps2DData->ui32Priority = ui32Priority;
+	return PVRSRV_OK;
+
+fail_contextalloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+static PVRSRV_ERROR _Destroy2DTransferContext(RGX_SERVER_TQ_2D_DATA *ps2DData,
+											  PVRSRV_DEVICE_NODE *psDeviceNode,
+											  PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync,
+											  IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+											  ps2DData->psServerCommonContext,
+											  psCleanupSync,
+											  RGXFWIF_DM_2D,
+											  ui32PDumpFlags);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				 __func__,
+				 PVRSRVGetErrorStringKM(eError)));
+		return eError;
+	}
+
+	/* ... it has so we can free it's resources */
+	FWCommonContextFree(ps2DData->psServerCommonContext);
+	ps2DData->psServerCommonContext = NULL;
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _Destroy3DTransferContext(RGX_SERVER_TQ_3D_DATA *ps3DData,
+											  PVRSRV_DEVICE_NODE *psDeviceNode,
+											  PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync,
+											  IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+											  ps3DData->psServerCommonContext,
+											  psCleanupSync,
+											  RGXFWIF_DM_3D,
+											  ui32PDumpFlags);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				 __func__,
+				 PVRSRVGetErrorStringKM(eError)));
+		return eError;
+	}
+
+	/* ... it has so we can free it's resources */
+	DevmemFwFree(psDeviceNode->pvDevice, ps3DData->psFWContextStateMemDesc);
+	FWCommonContextFree(ps3DData->psServerCommonContext);
+	ps3DData->psServerCommonContext = NULL;
+	return PVRSRV_OK;
+}
+
+
+/*
+ * PVRSRVCreateTransferContextKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA		*psConnection,
+										   PVRSRV_DEVICE_NODE		*psDeviceNode,
+										   IMG_UINT32				ui32Priority,
+										   IMG_UINT32				ui32FrameworkCommandSize,
+										   IMG_PBYTE				pabyFrameworkCommand,
+										   IMG_HANDLE				hMemCtxPrivData,
+										   RGX_SERVER_TQ_CONTEXT	**ppsTransferContext)
+{
+	RGX_SERVER_TQ_CONTEXT	*psTransferContext;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	DEVMEM_MEMDESC			*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	RGX_COMMON_CONTEXT_INFO	sInfo;
+	PVRSRV_ERROR			eError = PVRSRV_OK;
+
+	/* Allocate the server side structure */
+	*ppsTransferContext = NULL;
+	psTransferContext = OSAllocZMem(sizeof(*psTransferContext));
+	if (psTransferContext == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	eError = OSLockCreate(&psTransferContext->hLock, LOCK_TYPE_NONE);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+									__func__,
+									PVRSRVGetErrorStringKM(eError)));
+		goto fail_createlock;
+	}
+#endif
+
+	psTransferContext->psDeviceNode = psDeviceNode;
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+						   &psTransferContext->psCleanupSync,
+						   "transfer context cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to allocate cleanup sync (0x%x)",
+				eError));
+		goto fail_syncalloc;
+	}
+
+	/* 
+	 * Create the FW framework buffer
+	 */
+	eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+										&psTransferContext->psFWFrameworkMemDesc,
+										ui32FrameworkCommandSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to allocate firmware GPU framework state (%u)",
+				eError));
+		goto fail_frameworkcreate;
+	}
+
+	/* Copy the Framework client data into the framework buffer */
+	eError = PVRSRVRGXFrameworkCopyCommand(psTransferContext->psFWFrameworkMemDesc,
+										   pabyFrameworkCommand,
+										   ui32FrameworkCommandSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to populate the framework buffer (%u)",
+				eError));
+		goto fail_frameworkcopy;
+	}
+
+	sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc;
+
+	eError = _Create3DTransferContext(psConnection,
+									  psDeviceNode,
+									  psFWMemContextMemDesc,
+									  ui32Priority,
+									  &sInfo,
+									  &psTransferContext->s3DData);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_3dtransfercontext;
+	}
+	psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_3D;
+
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_TLA_BIT_MASK)
+	{
+		eError = _Create2DTransferContext(psConnection,
+										  psDeviceNode,
+										  psFWMemContextMemDesc,
+										  ui32Priority,
+										  &sInfo,
+										  &psTransferContext->s2DData);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_2dtransfercontext;
+		}
+		psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_2D;
+	}
+
+	{
+		PVRSRV_RGXDEV_INFO			*psDevInfo = psDeviceNode->pvDevice;
+
+		OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock);
+		dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode));
+		OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock);
+		*ppsTransferContext = psTransferContext;
+	}
+
+	*ppsTransferContext = psTransferContext;
+	
+	return PVRSRV_OK;
+
+
+fail_2dtransfercontext:
+	if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_TLA_BIT_MASK)
+	{
+		_Destroy3DTransferContext(&psTransferContext->s3DData,
+								  psTransferContext->psDeviceNode,
+								  psTransferContext->psCleanupSync,
+								  psTransferContext->ui32PDumpFlags);
+	}
+
+fail_3dtransfercontext:
+fail_frameworkcopy:
+	DevmemFwFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+fail_frameworkcreate:
+	SyncPrimFree(psTransferContext->psCleanupSync);
+fail_syncalloc:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psTransferContext->hLock);
+fail_createlock:
+#endif
+	OSFreeMem(psTransferContext);
+	PVR_ASSERT(eError != PVRSRV_OK);
+	*ppsTransferContext = NULL;
+	return eError;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice;
+	IMG_UINT32 i;
+
+	/* remove node from list before calling destroy - as destroy, if successful
+	 * will invalidate the node
+	 * must be re-added if destroy fails
+	 */
+	OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock);
+	dllist_remove_node(&(psTransferContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock);
+
+	if ((psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && \
+			(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_TLA_BIT_MASK))
+	{
+		eError = _Destroy2DTransferContext(&psTransferContext->s2DData,
+										   psTransferContext->psDeviceNode,
+										   psTransferContext->psCleanupSync,
+										   PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_destroy2d;
+		}
+		/* We've freed the 2D context, don't try to free it again */
+		psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_2D;
+	}
+
+	if (psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D)
+	{
+		eError = _Destroy3DTransferContext(&psTransferContext->s3DData,
+										   psTransferContext->psDeviceNode,
+										   psTransferContext->psCleanupSync,
+										   PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_destroy3d;
+		}
+		/* We've freed the 3D context, don't try to free it again */
+		psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_3D;
+	}
+
+	/* free any resources within the per-prepare UFO address stores */
+	for(i = 0; i < TQ_MAX_PREPARES_PER_SUBMIT; i++)
+	{
+		SyncAddrListDeinit(&psTransferContext->asSyncAddrListFence[i]);
+		SyncAddrListDeinit(&psTransferContext->asSyncAddrListUpdate[i]);
+	}
+
+	DevmemFwFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+	SyncPrimFree(psTransferContext->psCleanupSync);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockDestroy(psTransferContext->hLock);
+#endif
+
+	OSFreeMem(psTransferContext);
+
+	return PVRSRV_OK;
+
+fail_destroy3d:
+
+fail_destroy2d:
+	OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock);
+	dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock);
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*
+ * PVRSRVSubmitTQ3DKickKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT	*psTransferContext,
+									   IMG_UINT32				ui32ClientCacheOpSeqNum,
+									   IMG_UINT32				ui32PrepareCount,
+									   IMG_UINT32				*paui32ClientFenceCount,
+									   SYNC_PRIMITIVE_BLOCK		***papauiClientFenceUFOSyncPrimBlock,
+									   IMG_UINT32				**papaui32ClientFenceSyncOffset,
+									   IMG_UINT32				**papaui32ClientFenceValue,
+									   IMG_UINT32				*paui32ClientUpdateCount,
+									   SYNC_PRIMITIVE_BLOCK		***papauiClientUpdateUFOSyncPrimBlock,
+									   IMG_UINT32				**papaui32ClientUpdateSyncOffset,
+									   IMG_UINT32				**papaui32ClientUpdateValue,
+									   IMG_UINT32				*paui32ServerSyncCount,
+									   IMG_UINT32				**papaui32ServerSyncFlags,
+									   SERVER_SYNC_PRIMITIVE	***papapsServerSyncs,
+									   PVRSRV_FENCE				iCheckFence,
+									   PVRSRV_TIMELINE			iUpdateTimeline,
+									   PVRSRV_FENCE				*piUpdateFence,
+									   IMG_CHAR					szFenceName[32],
+									   IMG_UINT32				*paui32FWCommandSize,
+									   IMG_UINT8				**papaui8FWCommand,
+									   IMG_UINT32				*pui32TQPrepareFlags,
+									   IMG_UINT32				ui32ExtJobRef,
+									   IMG_UINT32				ui32SyncPMRCount,
+									   IMG_UINT32				*paui32SyncPMRFlags,
+									   PMR						**ppsSyncPMRs)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelper;
+	RGX_CCB_CMD_HELPER_DATA *pas2DCmdHelper;
+	IMG_UINT32 ui323DCmdCount = 0;
+	IMG_UINT32 ui322DCmdCount = 0;
+	IMG_UINT32 ui323DCmdOffset = 0;
+	IMG_UINT32 ui322DCmdOffset = 0;
+	IMG_UINT32 ui32PDumpFlags = PDUMP_FLAGS_NONE;
+	IMG_UINT32 i;
+	IMG_UINT32 ui32IntClientFenceCount = 0;
+	IMG_UINT32 *paui32IntFenceValue = NULL;
+	IMG_UINT32 ui32IntClientUpdateCount = 0;
+	IMG_UINT32 *paui32IntUpdateValue = NULL;
+	SYNC_ADDR_LIST *psSyncAddrListFence;
+	SYNC_ADDR_LIST *psSyncAddrListUpdate;
+	IMG_UINT32               uiCheckFenceUID = 0;
+	IMG_UINT32               uiUpdateFenceUID = 0;
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	struct pvr_sync_append_data *psFDFenceData = NULL;
+#endif
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+	PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+	IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+	IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+	PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+	IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+	PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
+	void *pvUpdateFenceFinaliseData = NULL;
+#if defined(SUPPORT_BUFFER_SYNC)
+	struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
+	PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
+	IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+#endif /* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	PVRSRV_ERROR eError;
+	PVRSRV_ERROR eError2;
+	PVRSRV_FENCE iUpdateFence = PVRSRV_FENCE_INVALID;
+	IMG_UINT32 ui32JobId;
+
+	PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+	PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+	PRGXFWIF_UFO_ADDR       pRMWUFOAddr;
+
+#if defined(SUPPORT_BUFFER_SYNC) && !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	struct pvr_buffer_sync_append_data *psAppendData = NULL;
+#endif
+
+	IMG_DEV_VIRTADDR sRobustnessResetReason = {0};
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	if (iUpdateTimeline >= 0 && !piUpdateFence)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+#else /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+	if (iUpdateTimeline >= 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Providing update timeline (%d) in non-supporting driver",
+			__func__, iUpdateTimeline));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	if (iCheckFence >= 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Providing check fence (%d) in non-supporting driver",
+			__func__, iCheckFence));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+
+	/* Ensure the string is null-terminated (Required for safety) */
+	szFenceName[31] = '\0';
+
+	if ((ui32PrepareCount == 0) || (ui32PrepareCount > TQ_MAX_PREPARES_PER_SUBMIT))
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (ui32SyncPMRCount != 0)
+	{
+		if (!ppsSyncPMRs)
+		{
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+#if defined(SUPPORT_BUFFER_SYNC)
+		/* PMR sync is valid only when there is no batching */
+		if ((ui32PrepareCount != 1))
+#endif
+		{
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+	}
+
+	if (iCheckFence >= 0 || iUpdateTimeline >= 0)
+	{
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+		/* Fence FD's are only valid in the 3D case with no batching */
+		if ((ui32PrepareCount !=1) && (!TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[0], 3D)))
+		{
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+#else /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+		/* Timelines/Fences are unsupported */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psTransferContext->hLock);
+#endif
+
+	ui32JobId = OSAtomicIncrement(&psTransferContext->hJobId);
+
+	/* We can't allocate the required amount of stack space on all consumer architectures */
+	pas3DCmdHelper = OSAllocMem(sizeof(*pas3DCmdHelper) * ui32PrepareCount);
+	if (pas3DCmdHelper == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc3dhelper;
+	}
+	pas2DCmdHelper = OSAllocMem(sizeof(*pas2DCmdHelper) * ui32PrepareCount);
+	if (pas2DCmdHelper == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc2dhelper;
+	}
+
+	/*
+		Ensure we do the right thing for server syncs which cross call boundaries
+	*/
+	for (i=0;i<ui32PrepareCount;i++)
+	{
+		IMG_BOOL bHaveStartPrepare = pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_START;
+		IMG_BOOL bHaveEndPrepare = IMG_FALSE;
+
+		if (bHaveStartPrepare)
+		{
+			IMG_UINT32 k;
+			/*
+				We've at the start of a transfer operation (which might be made
+				up of multiple HW operations) so check if we also have then
+				end of the transfer operation in the batch
+			*/
+			for (k=i;k<ui32PrepareCount;k++)
+			{
+				if (pui32TQPrepareFlags[k] & TQ_PREP_FLAGS_END)
+				{
+					bHaveEndPrepare = IMG_TRUE;
+					break;
+				}
+			}
+
+			if (!bHaveEndPrepare)
+			{
+				/*
+					We don't have the complete command passed in this call
+					so drop the update request. When we get called again with
+					the last HW command in this transfer operation we'll do
+					the update at that point.
+				*/
+				for (k=0;k<paui32ServerSyncCount[i];k++)
+				{
+					papaui32ServerSyncFlags[i][k] &= ~PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE;
+				}
+			}
+		}
+	}
+
+
+	/*
+		Init the command helper commands for all the prepares
+	*/
+	for (i=0;i<ui32PrepareCount;i++)
+	{
+		RGX_CLIENT_CCB *psClientCCB;
+		RGX_SERVER_COMMON_CONTEXT *psServerCommonCtx;
+		IMG_CHAR *pszCommandName;
+		RGX_CCB_CMD_HELPER_DATA *psCmdHelper;
+		RGXFWIF_CCB_CMD_TYPE eType;
+		PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL;
+		PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL;
+
+		if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 3D))
+		{
+			psServerCommonCtx = psTransferContext->s3DData.psServerCommonContext;
+			psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx);
+			pszCommandName = "TQ-3D";
+			psCmdHelper = &pas3DCmdHelper[ui323DCmdCount++];
+			eType = RGXFWIF_CCB_CMD_TYPE_TQ_3D;
+		}
+		else if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 2D) && \
+				(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_TLA_BIT_MASK))
+		{
+			psServerCommonCtx = psTransferContext->s2DData.psServerCommonContext;
+			psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx);
+			pszCommandName = "TQ-2D";
+			psCmdHelper = &pas2DCmdHelper[ui322DCmdCount++];
+			eType = RGXFWIF_CCB_CMD_TYPE_TQ_2D;
+		}
+		else
+		{
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto fail_cmdtype;
+		}
+
+		if (i == 0)
+		{
+			ui32PDumpFlags = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) != 0) ? PDUMP_FLAGS_CONTINUOUS : PDUMP_FLAGS_NONE;
+			PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+					"%s Command Server Submit on FWCtx %08x", pszCommandName, FWCommonContextGetFWAddress(psServerCommonCtx).ui32Addr);
+			psTransferContext->ui32PDumpFlags |= ui32PDumpFlags;
+		}
+		else
+		{
+			IMG_UINT32 ui32NewPDumpFlags = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) != 0) ? PDUMP_FLAGS_CONTINUOUS : PDUMP_FLAGS_NONE;
+			if (ui32NewPDumpFlags != ui32PDumpFlags)
+			{
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				PVR_DPF((PVR_DBG_ERROR, "%s: Mixing of continuous and non-continuous command in a batch is not permitted", __func__));
+				goto fail_pdumpcheck;
+			}
+		}
+
+		psSyncAddrListFence = &psTransferContext->asSyncAddrListFence[i];
+		ui32IntClientFenceCount  = paui32ClientFenceCount[i];
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psTransferContext->sSyncAddrListFence, %d fences)", __func__, ui32IntClientFenceCount));
+		eError = SyncAddrListPopulate(psSyncAddrListFence,
+										ui32IntClientFenceCount,
+										papauiClientFenceUFOSyncPrimBlock[i],
+										papaui32ClientFenceSyncOffset[i]);
+		if(eError != PVRSRV_OK)
+		{
+			goto fail_populate_sync_addr_list_fence;
+		}
+		if (!pauiIntFenceUFOAddress)
+		{
+			pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs;
+		}
+
+		paui32IntFenceValue      = papaui32ClientFenceValue[i];
+		psSyncAddrListUpdate = &psTransferContext->asSyncAddrListUpdate[i];
+		ui32IntClientUpdateCount = paui32ClientUpdateCount[i];
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psTransferContext->asSyncAddrListUpdate[], %d updates)", __func__, ui32IntClientUpdateCount));
+		eError = SyncAddrListPopulate(psSyncAddrListUpdate,
+										ui32IntClientUpdateCount,
+										papauiClientUpdateUFOSyncPrimBlock[i],
+										papaui32ClientUpdateSyncOffset[i]);
+		if(eError != PVRSRV_OK)
+		{
+			goto fail_populate_sync_addr_list_update;
+		}
+		if (!pauiIntUpdateUFOAddress)
+		{
+			pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs;
+		}
+		paui32IntUpdateValue     = papaui32ClientUpdateValue[i];
+
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   (after sync prims) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+		if (ui32SyncPMRCount)
+		{
+#if defined(SUPPORT_BUFFER_SYNC)
+			int err;
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+
+			err = pvr_buffer_sync_append_start(psDeviceNode->psBufferSyncContext,
+											   ui32SyncPMRCount,
+											   ppsSyncPMRs,
+											   paui32SyncPMRFlags,
+											   ui32IntClientFenceCount,
+											   pauiIntFenceUFOAddress,
+											   paui32IntFenceValue,
+											   ui32IntClientUpdateCount,
+											   pauiIntUpdateUFOAddress,
+											   paui32IntUpdateValue,
+											   &psAppendData);
+			if (err)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to append buffer syncs (errno=%d)", __func__, err));
+				eError = (err == -ENOMEM) ? PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_ERROR_INVALID_PARAMS;
+				goto fail_sync_append;
+			}
+
+			pvr_buffer_sync_append_checks_get(psAppendData,
+											  &ui32IntClientFenceCount,
+											  &pauiIntFenceUFOAddress,
+											  &paui32IntFenceValue);
+
+			pvr_buffer_sync_append_updates_get(psAppendData,
+											   &ui32IntClientUpdateCount,
+											   &pauiIntUpdateUFOAddress,
+											   &paui32IntUpdateValue);
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+			CHKPT_DBG((PVR_DBG_ERROR, "%s:   Calling pvr_buffer_sync_resolve_and_create_fences", __func__));
+			err = pvr_buffer_sync_resolve_and_create_fences(psDeviceNode->psBufferSyncContext,
+			                                                ui32SyncPMRCount,
+			                                                ppsSyncPMRs,
+			                                                paui32SyncPMRFlags,
+			                                                &ui32BufferFenceSyncCheckpointCount,
+			                                                &apsBufferFenceSyncCheckpoints,
+			                                                &psBufferUpdateSyncCheckpoint,
+			                                                &psBufferSyncData);
+			if (err)
+			{
+				eError = (err == -ENOMEM) ? PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_ERROR_INVALID_PARAMS;
+				PVR_DPF((PVR_DBG_ERROR, "%s:   pvr_buffer_sync_resolve_and_create_fences failed (%s)", __func__, PVRSRVGetErrorStringKM(eError)));
+				goto fail_resolve_input_fence;
+			}
+
+			/* Append buffer sync fences */
+			if (ui32BufferFenceSyncCheckpointCount > 0)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d buffer sync checkpoints to TQ Fence (psSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)psSyncAddrListFence , (void*)pauiIntFenceUFOAddress));
+				SyncAddrListAppendAndDeRefCheckpoints(psSyncAddrListFence,
+													  ui32BufferFenceSyncCheckpointCount,
+													  apsBufferFenceSyncCheckpoints);
+				if (!pauiIntFenceUFOAddress)
+				{
+					pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs;
+				}
+				ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount;
+			}
+
+			if (psBufferUpdateSyncCheckpoint)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 buffer sync checkpoint<%p> to TQ Update (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)psBufferUpdateSyncCheckpoint, (void*)&psTransferContext->sSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress));
+				/* Append the update (from output fence) */
+				SyncAddrListAppendCheckpoints(psSyncAddrListUpdate,
+											  1,
+											  &psBufferUpdateSyncCheckpoint);
+				if (!pauiIntUpdateUFOAddress)
+				{
+					pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs;
+				}
+				ui32IntClientUpdateCount++;
+			}
+			CHKPT_DBG((PVR_DBG_ERROR, "%s:   (after buffer_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#else /* defined(SUPPORT_BUFFER_SYNC) */
+			PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount));
+			PVR_DPF((PVR_DBG_ERROR, "%s:   <--EXIT(%d)", __func__, PVRSRV_ERROR_INVALID_PARAMS));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+			OSLockRelease(psTransferContext->hLock);
+#endif
+			return PVRSRV_ERROR_INVALID_PARAMS;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+		}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	if (iCheckFence >= 0 || iUpdateTimeline >= 0)
+	{
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+		eError =
+		  pvr_sync_append_fences(szFenceName,
+		                               iCheckFence,
+		                               iUpdateTimeline,
+		                               ui32IntClientUpdateCount,
+		                               pauiIntUpdateUFOAddress,
+		                               paui32IntUpdateValue,
+		                               ui32IntClientFenceCount,
+		                               pauiIntFenceUFOAddress,
+		                               paui32IntFenceValue,
+		                               &psFDFenceData);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_syncinit;
+		}
+		pvr_sync_get_updates(psFDFenceData, &ui32IntClientUpdateCount,
+			&pauiIntUpdateUFOAddress, &paui32IntUpdateValue);
+		pvr_sync_get_checks(psFDFenceData, &ui32IntClientFenceCount,
+			&pauiIntFenceUFOAddress, &paui32IntFenceValue);
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psTransferContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psTransferContext->psDeviceNode->hSyncCheckpointContext));
+		/* Resolve the sync checkpoints that make up the input fence */
+		eError = SyncCheckpointResolveFence(psTransferContext->psDeviceNode->hSyncCheckpointContext,
+											iCheckFence,
+											&ui32FenceSyncCheckpointCount,
+											&apsFenceSyncCheckpoints,
+		                                    &uiCheckFenceUID);
+		if (eError != PVRSRV_OK)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __func__, eError));
+			goto fail_resolve_input_fence;
+		}
+		CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+		if (ui32FenceSyncCheckpointCount > 0)
+		{
+			IMG_UINT32 ii;
+			for (ii=0; ii<ui32FenceSyncCheckpointCount; ii++)
+			{
+				PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints +  ii);
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:    apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint));
+			}
+		}
+#endif
+		/* Create the output fence (if required) */
+		if (piUpdateFence)
+		{
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d,  psTranserContext->psDeviceNode->hSyncCheckpointContext=<%p>)", __func__, iUpdateFence, iUpdateTimeline, (void*)psTransferContext->psDeviceNode->hSyncCheckpointContext));
+			eError = SyncCheckpointCreateFence(psTransferContext->psDeviceNode,
+			                                   szFenceName,
+											   iUpdateTimeline,
+											   psTransferContext->psDeviceNode->hSyncCheckpointContext,
+											   &iUpdateFence,
+											   &uiUpdateFenceUID,
+											   &pvUpdateFenceFinaliseData,
+											   &psUpdateSyncCheckpoint,
+											   (void*)&psFenceTimelineUpdateSync,
+											   &ui32FenceTimelineUpdateValue);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s:   SyncCheckpointCreateFence failed (%d)", __func__, eError));
+				goto fail_create_output_fence;
+			}
+
+			CHKPT_DBG((PVR_DBG_ERROR, "%s: returned from SyncCheckpointCreateFence (iUpdateFence=%d)", __func__, iUpdateFence));
+
+			/* Append the sync prim update for the timeline (if required) */
+			if (psFenceTimelineUpdateSync)
+			{
+				IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+				/* Allocate memory to hold the list of update values (including our timeline update) */
+				pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+				if (!pui32IntAllocatedUpdateValues)
+				{
+					/* Failed to allocate memory */
+					eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+					goto fail_alloc_update_values_mem;
+				}
+				OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+				if (psBufferUpdateSyncCheckpoint)
+				{
+					/* Copy the update values into the new memory, then append our timeline update value */
+					OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount-1));
+					pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + (ui32IntClientUpdateCount-1);
+				}
+				else
+				{
+					/* Copy the update values into the new memory, then append our timeline update value */
+					OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+					pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+				}
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: Appending the additional update value 0x%x)", __func__, ui32FenceTimelineUpdateValue));
+				/* Now set the additional update value */
+				*pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+				if (ui32IntClientUpdateCount > 0)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+					for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+				/* Now append the timeline sync prim addr to the transfer context update list */
+				SyncAddrListAppendSyncPrim(psSyncAddrListUpdate,
+				                           psFenceTimelineUpdateSync);
+				ui32IntClientUpdateCount++;
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+				if (ui32IntClientUpdateCount > 0)
+				{
+					IMG_UINT32 iii;
+					IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+					for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+					{
+						CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+						pui32Tmp++;
+					}
+				}
+#endif
+				/* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+				CHKPT_DBG((PVR_DBG_ERROR, "%s: set paui32IntUpdateValue<%p> to point to pui32IntAllocatedUpdateValues<%p>", __func__, (void*)paui32IntUpdateValue, (void*)pui32IntAllocatedUpdateValues));
+				paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
+			}
+		}
+
+		if (ui32FenceSyncCheckpointCount)
+		{
+			/* Append the checks (from input fence) */
+			if (ui32FenceSyncCheckpointCount > 0)
+			{
+				CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d sync checkpoints to TQ Fence (psSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)psSyncAddrListFence));
+				SyncAddrListAppendCheckpoints(psSyncAddrListFence,
+											  ui32FenceSyncCheckpointCount,
+											  apsFenceSyncCheckpoints);
+				if (!pauiIntFenceUFOAddress)
+				{
+					pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs;
+				}
+				ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+			}
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+			if (ui32IntClientFenceCount > 0)
+			{
+				IMG_UINT32 iii;
+				IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+				for (iii=0; iii<ui32IntClientFenceCount; iii++)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: psSyncAddrListFence->pasFWAddrs[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+					pui32Tmp++;
+				}
+			}
+#endif
+		}
+		if (psUpdateSyncCheckpoint)
+		{
+			/* Append the update (from output fence) */
+			CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 sync checkpoint to TQ Update (psSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->asSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress));
+			SyncAddrListAppendCheckpoints(psSyncAddrListUpdate,
+										  1,
+										  &psUpdateSyncCheckpoint);
+			if (!pauiIntUpdateUFOAddress)
+			{
+				pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs;
+			}
+			ui32IntClientUpdateCount++;
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+			{
+				IMG_UINT32 iii;
+				IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress;
+
+				for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+				{
+					CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+					pui32Tmp++;
+				}
+			}
+#endif
+		}
+		CHKPT_DBG((PVR_DBG_ERROR, "%s:   (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+		}
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+
+#if (ENABLE_TQ_UFO_DUMP == 1)
+		PVR_DPF((PVR_DBG_ERROR, "%s: dumping TQ fence/updates syncs...", __func__));
+		{
+			IMG_UINT32 ii;
+			PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+			IMG_UINT32 *pui32TmpIntFenceValue = paui32IntFenceValue;
+			PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+			IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+			/* Dump Fence syncs and Update syncs */
+			PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TQ fence syncs (&psTransferContext->asSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->asSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+			for (ii=0; ii<ui32IntClientFenceCount; ii++)
+			{
+				if (psTmpIntFenceUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr, *pui32TmpIntFenceValue, *pui32TmpIntFenceValue));
+					pui32TmpIntFenceValue++;
+				}
+				psTmpIntFenceUFOAddress++;
+			}
+			PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TQ update syncs (&psTransferContext->asSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->asSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+			for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+			{
+				if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+					pui32TmpIntUpdateValue++;
+				}
+				psTmpIntUpdateUFOAddress++;
+			}
+		}
+#endif
+
+		RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psTransferContext->psDeviceNode->pvDevice,
+		                          & pPreAddr,
+		                          & pPostAddr,
+		                          & pRMWUFOAddr);
+
+		/*
+			Create the command helper data for this command
+		*/
+		eError = RGXCmdHelperInitCmdCCB(psClientCCB,
+		                                ui32IntClientFenceCount,
+		                                pauiIntFenceUFOAddress,
+		                                paui32IntFenceValue,
+		                                ui32IntClientUpdateCount,
+		                                pauiIntUpdateUFOAddress,
+		                                paui32IntUpdateValue,
+		                                paui32ServerSyncCount[i],
+		                                papaui32ServerSyncFlags[i],
+		                                SYNC_FLAG_MASK_ALL,
+		                                papapsServerSyncs[i],
+		                                paui32FWCommandSize[i],
+		                                papaui8FWCommand[i],
+		                                & pPreAddr,
+		                                & pPostAddr,
+		                                & pRMWUFOAddr,
+		                                eType,
+		                                ui32ExtJobRef,
+		                                ui32JobId,
+		                                ui32PDumpFlags,
+		                                NULL,
+		                                pszCommandName,
+		                                psCmdHelper,
+										sRobustnessResetReason);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_initcmd;
+		}
+	}
+
+	/*
+		Acquire space for all the commands in one go
+	*/
+	if (ui323DCmdCount)
+	{
+		eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount,
+										   &pas3DCmdHelper[0]);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_3dcmdacquire;
+		}
+	}
+
+	if (ui322DCmdCount)
+	{
+		eError = RGXCmdHelperAcquireCmdCCB(ui322DCmdCount,
+										   &pas2DCmdHelper[0]);
+		if (eError != PVRSRV_OK)
+		{
+			if (ui323DCmdCount)
+			{
+				ui323DCmdCount = 0;
+				ui322DCmdCount = 0;
+			}
+			else
+			{
+				goto fail_2dcmdacquire;
+			}
+		}
+	}
+
+	/*
+		We should acquire the kernel CCB(s) space here as the schedule could fail
+		and we would have to roll back all the syncs
+	*/
+
+	/*
+		Only do the command helper release (which takes the server sync
+		operations if the acquire succeeded
+	*/
+	if (ui323DCmdCount)
+	{
+		ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext));
+		RGXCmdHelperReleaseCmdCCB(ui323DCmdCount,
+								  &pas3DCmdHelper[0],
+								  "TQ_3D",
+								  FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr);
+	}
+
+	if ((ui322DCmdCount) && (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_TLA_BIT_MASK))
+	{
+		ui322DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext));
+		RGXCmdHelperReleaseCmdCCB(ui322DCmdCount,
+								  &pas2DCmdHelper[0],
+								  "TQ_2D",
+								  FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr);
+	}
+
+	/*
+		Even if we failed to acquire the client CCB space we might still need
+		to kick the HW to process a padding packet to release space for us next
+		time round
+	*/
+	if (ui323DCmdCount)
+	{
+		RGXFWIF_KCCB_CMD s3DKCCBCmd;
+		IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr;
+
+		/* Construct the kernel 3D CCB command. */
+		s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+		s3DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext);
+		s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext));
+		s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+		s3DKCCBCmd.uCmdData.sCmdKickData.sWorkloadDataFWAddress.ui32Addr = 0;
+		s3DKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+		HTBLOGK(HTB_SF_MAIN_KICK_3D,
+				s3DKCCBCmd.uCmdData.sCmdKickData.psContext,
+				ui323DCmdOffset);
+		RGX_HWPERF_HOST_ENQ(psTransferContext,
+		                    OSGetCurrentClientProcessIDKM(),
+		                    ui32FWCtx,
+		                    ui32ExtJobRef,
+		                    ui32JobId,
+		                    RGX_HWPERF_KICK_TYPE_TQ3D,
+		                    uiCheckFenceUID,
+		                    uiUpdateFenceUID,
+		                    NO_DEADLINE,
+		                    NO_CYCEST);
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError2 = RGXScheduleCommand(psDeviceNode->pvDevice,
+										RGXFWIF_DM_3D,
+										&s3DKCCBCmd,
+										sizeof(s3DKCCBCmd),
+										ui32ClientCacheOpSeqNum,
+										ui32PDumpFlags);
+			if (eError2 != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+		RGXHWPerfFTraceGPUEnqueueEvent(psDeviceNode->pvDevice,
+				ui32FWCtx, ui32JobId, RGX_HWPERF_KICK_TYPE_TQ3D);
+#endif
+	}
+
+	if ((ui322DCmdCount) && (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_TLA_BIT_MASK))
+	{
+		RGXFWIF_KCCB_CMD s2DKCCBCmd;
+		IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr;
+
+		/* Construct the kernel 2D CCB command. */
+		s2DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+		s2DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext);
+		s2DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext));
+		s2DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+		HTBLOGK(HTB_SF_MAIN_KICK_2D,
+				s2DKCCBCmd.uCmdData.sCmdKickData.psContext,
+				ui322DCmdOffset);
+		RGX_HWPERF_HOST_ENQ(psTransferContext,
+		                    OSGetCurrentClientProcessIDKM(),
+		                    ui32FWCtx,
+		                    ui32ExtJobRef,
+		                    ui32JobId,
+		                    RGX_HWPERF_KICK_TYPE_TQ2D,
+		                    uiCheckFenceUID,
+		                    uiUpdateFenceUID,
+		                    NO_DEADLINE,
+		                    NO_CYCEST);
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError2 = RGXScheduleCommand(psDeviceNode->pvDevice,
+										RGXFWIF_DM_2D,
+										&s2DKCCBCmd,
+										sizeof(s2DKCCBCmd),
+										ui32ClientCacheOpSeqNum,
+										ui32PDumpFlags);
+			if (eError2 != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+		RGXHWPerfFTraceGPUEnqueueEvent(psDeviceNode->pvDevice,
+				ui32FWCtx, ui32JobId, RGX_HWPERF_KICK_TYPE_TQ2D);
+#endif
+	}
+
+	/*
+	 * Now check eError (which may have returned an error from our earlier calls
+	 * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+	 * so we check it now...
+	 */
+	if (eError != PVRSRV_OK )
+	{
+		goto fail_2dcmdacquire;
+	}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	if (iUpdateTimeline >= 0)
+	{
+		/* If we get here, this should never fail. Hitting that likely implies
+		 * a code error above */
+		iUpdateFence = pvr_sync_get_update_fd(psFDFenceData);
+		if (iUpdateFence < 0)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get install update sync fd",
+				__func__));
+			/* If we fail here, we cannot rollback the syncs as the hw already
+			 * has references to resources they may be protecting in the kick
+			 * so fallthrough */
+
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto fail_free_append_data;
+		}
+	}
+#if defined(NO_HARDWARE)
+	pvr_sync_nohw_complete_fences(psFDFenceData);
+#endif
+	/*
+		Free the merged sync memory if required
+	*/
+	pvr_sync_free_append_fences_data(psFDFenceData);
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#if defined(NO_HARDWARE)
+	/* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+	if (psUpdateSyncCheckpoint)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s:   Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint)));
+		SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+	}
+	if (psFenceTimelineUpdateSync)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s:   Updating NOHW sync prim<%p> to %d", __func__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+		SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+	}
+	SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined (NO_HARDWARE) */
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	if (psAppendData)
+	{
+		pvr_buffer_sync_append_finish(psAppendData);
+	}
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	if (psBufferSyncData)
+	{
+		pvr_buffer_sync_kick_succeeded(psBufferSyncData);
+	}
+	if (apsBufferFenceSyncCheckpoints)
+	{
+		kfree(apsBufferFenceSyncCheckpoints);
+	}
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+	if (piUpdateFence)
+	{
+		*piUpdateFence = iUpdateFence;
+	}
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_FENCE_INVALID))
+	{
+		SyncCheckpointFinaliseFence(iUpdateFence, pvUpdateFenceFinaliseData);
+	}
+#endif
+
+	OSFreeMem(pas2DCmdHelper);
+	OSFreeMem(pas3DCmdHelper);
+
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+	/* Free memory allocated to hold the internal list of update values */
+	if (pui32IntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui32IntAllocatedUpdateValues);
+		pui32IntAllocatedUpdateValues = NULL;
+	}
+#endif /* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psTransferContext->hLock);
+#endif
+	return PVRSRV_OK;
+
+/*
+	No resources are created in this function so there is nothing to free
+	unless we had to merge syncs.
+	If we fail after the client CCB acquire there is still nothing to do
+	as only the client CCB release will modify the client CCB
+*/
+fail_2dcmdacquire:
+fail_3dcmdacquire:
+fail_initcmd:
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, psSyncAddrListFence);
+	SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, psSyncAddrListUpdate);
+fail_alloc_update_values_mem:
+#endif
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	if(iUpdateFence != PVRSRV_FENCE_INVALID)
+	{
+		SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+	}
+fail_create_output_fence:
+	/* Drop the references taken on the sync checkpoints in the
+	 * resolved input fence */
+	SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+								 apsFenceSyncCheckpoints);
+fail_resolve_input_fence:
+#endif /* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) */
+fail_pdumpcheck:
+fail_cmdtype:
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+fail_syncinit:
+	/* Relocated cleanup here as the loop could fail after the first iteration
+	 * at the above goto tags at which point the psFDCheckData memory would
+	 * have been allocated.
+	 */
+	pvr_sync_rollback_append_fences(psFDFenceData);
+fail_free_append_data:
+	pvr_sync_free_append_fences_data(psFDFenceData);
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#if defined(SUPPORT_BUFFER_SYNC)
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	pvr_buffer_sync_append_abort(psAppendData);
+fail_sync_append:
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	if (psBufferSyncData)
+	{
+		pvr_buffer_sync_kick_failed(psBufferSyncData);
+	}
+	if (apsBufferFenceSyncCheckpoints)
+	{
+		kfree(apsBufferFenceSyncCheckpoints);
+	}
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+fail_populate_sync_addr_list_update:
+fail_populate_sync_addr_list_fence:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	OSFreeMem(pas2DCmdHelper);
+fail_alloc2dhelper:
+	OSFreeMem(pas3DCmdHelper);
+fail_alloc3dhelper:
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+	/* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+	if (apsFenceSyncCheckpoints)
+	{
+		SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+	}
+	/* Free memory allocated to hold the internal list of update values */
+	if (pui32IntAllocatedUpdateValues)
+	{
+		OSFreeMem(pui32IntAllocatedUpdateValues);
+		pui32IntAllocatedUpdateValues = NULL;
+	}
+#endif /* defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psTransferContext->hLock);
+#endif
+	return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                   PVRSRV_DEVICE_NODE * psDevNode,
+												   RGX_SERVER_TQ_CONTEXT *psTransferContext,
+												   IMG_UINT32 ui32Priority)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockAcquire(psTransferContext->hLock);
+#endif
+
+	if ((psTransferContext->s2DData.ui32Priority != ui32Priority)  && \
+			(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_TLA_BIT_MASK))
+	{
+		eError = ContextSetPriority(psTransferContext->s2DData.psServerCommonContext,
+									psConnection,
+									psTransferContext->psDeviceNode->pvDevice,
+									ui32Priority,
+									RGXFWIF_DM_2D);
+		if (eError != PVRSRV_OK)
+		{
+			if(eError != PVRSRV_ERROR_RETRY)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 2D part of the transfercontext (%s)", __func__, PVRSRVGetErrorStringKM(eError)));
+			}
+			goto fail_2dcontext;
+		}
+		psTransferContext->s2DData.ui32Priority = ui32Priority;
+	}
+
+	if (psTransferContext->s3DData.ui32Priority != ui32Priority)
+	{
+		eError = ContextSetPriority(psTransferContext->s3DData.psServerCommonContext,
+									psConnection,
+									psTransferContext->psDeviceNode->pvDevice,
+									ui32Priority,
+									RGXFWIF_DM_3D);
+		if (eError != PVRSRV_OK)
+		{
+			if(eError != PVRSRV_ERROR_RETRY)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 3D part of the transfercontext (%s)", __func__, PVRSRVGetErrorStringKM(eError)));
+			}
+			goto fail_3dcontext;
+		}
+		psTransferContext->s3DData.ui32Priority = ui32Priority;
+	}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psTransferContext->hLock);
+#endif
+	return PVRSRV_OK;
+
+fail_3dcontext:
+
+fail_2dcontext:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSLockRelease(psTransferContext->hLock);
+#endif
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+void CheckForStalledTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile)
+{
+	DLLIST_NODE *psNode, *psNext;
+
+	OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock);
+
+	dllist_foreach_node(&psDevInfo->sTransferCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_TQ_CONTEXT *psCurrentServerTransferCtx =
+			IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode);
+
+		if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && \
+				(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_TLA_BIT_MASK))
+		{
+			DumpStalledFWCommonContext(psCurrentServerTransferCtx->s2DData.psServerCommonContext,
+									   pfnDumpDebugPrintf, pvDumpDebugFile);
+		}
+
+		if (psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D)
+		{
+			DumpStalledFWCommonContext(psCurrentServerTransferCtx->s3DData.psServerCommonContext,
+									   pfnDumpDebugPrintf, pvDumpDebugFile);
+		}
+	}
+
+	OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	DLLIST_NODE *psNode, *psNext;
+	IMG_UINT32 ui32ContextBitMask = 0;
+
+	OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock);
+
+	dllist_foreach_node(&psDevInfo->sTransferCtxtListHead, psNode, psNext)
+	{
+		RGX_SERVER_TQ_CONTEXT *psCurrentServerTransferCtx =
+			IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode);
+
+		if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && \
+				(NULL != psCurrentServerTransferCtx->s2DData.psServerCommonContext) && \
+				(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_TLA_BIT_MASK))
+		{
+			if (CheckStalledClientCommonContext(psCurrentServerTransferCtx->s2DData.psServerCommonContext, RGX_KICK_TYPE_DM_TQ2D) == PVRSRV_ERROR_CCCB_STALLED)
+			{
+				ui32ContextBitMask |= RGX_KICK_TYPE_DM_TQ2D;
+			}
+		}
+
+		if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D) && (NULL != psCurrentServerTransferCtx->s3DData.psServerCommonContext))
+		{
+			if ((CheckStalledClientCommonContext(psCurrentServerTransferCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_TQ3D) == PVRSRV_ERROR_CCCB_STALLED))
+			{
+				ui32ContextBitMask |= RGX_KICK_TYPE_DM_TQ3D;
+			}
+		}
+	}
+
+	OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock);
+	return ui32ContextBitMask;
+}
+
+/**************************************************************************//**
+ End of file (rgxtransfer.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtransfer.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtransfer.h
new file mode 100644
index 0000000..a8c31dc
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxtransfer.h
@@ -0,0 +1,154 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Transfer queue Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX Transfer queue Functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXTRANSFER_H__)
+#define __RGXTRANSFER_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+#include "sync_server.h"
+#include "connection_server.h"
+
+typedef struct _RGX_SERVER_TQ_CONTEXT_ RGX_SERVER_TQ_CONTEXT;
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXCreateTransferContextKM
+
+ @Description
+	Server-side implementation of RGXCreateTransferContext
+
+ @Input pvDeviceNode - device node
+ 
+FIXME fill this in
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA		*psConnection,
+										   PVRSRV_DEVICE_NODE		*psDeviceNode,
+										   IMG_UINT32				ui32Priority,
+										   IMG_UINT32				ui32FrameworkCommandSize,
+										   IMG_PBYTE				pabyFrameworkCommand,
+										   IMG_HANDLE				hMemCtxPrivData,
+										   RGX_SERVER_TQ_CONTEXT	**ppsTransferContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXDestroyTransferContextKM
+
+ @Description
+	Server-side implementation of RGXDestroyTransferContext
+
+ @Input psTransferContext - Transfer context
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext);
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVSubmitTransferKM
+
+ @Description
+	Schedules one or more 2D or 3D HW commands on the firmware
+
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT	*psTransferContext,
+									IMG_UINT32				ui32ClientCacheOpSeqNum,
+									IMG_UINT32				ui32PrepareCount,
+									IMG_UINT32				*paui32ClientFenceCount,
+									SYNC_PRIMITIVE_BLOCK		***papauiClientFenceUFOSyncPrimBlock,
+									IMG_UINT32				**papaui32ClientFenceSyncOffset,
+									IMG_UINT32				**papaui32ClientFenceValue,
+									IMG_UINT32				*paui32ClientUpdateCount,
+									SYNC_PRIMITIVE_BLOCK		***papauiClientUpdateUFOSyncPrimBlock,
+									IMG_UINT32				**papaui32ClientUpdateSyncOffset,
+									IMG_UINT32				**papaui32ClientUpdateValue,
+									IMG_UINT32				*paui32ServerSyncCount,
+									IMG_UINT32				**papaui32ServerSyncFlags,
+									SERVER_SYNC_PRIMITIVE	***papapsServerSyncs,
+									PVRSRV_FENCE			iCheckFence,
+									PVRSRV_TIMELINE			iUpdateTimeline,
+									PVRSRV_FENCE			*piUpdateFence,
+									IMG_CHAR				szFenceName[32],
+									IMG_UINT32				*paui32FWCommandSize,
+									IMG_UINT8				**papaui8FWCommand,
+									IMG_UINT32				*pui32TQPrepareFlags,
+									IMG_UINT32				ui32ExtJobRef,
+									IMG_UINT32				ui32SyncPMRCount,
+									IMG_UINT32				*paui32SyncPMRFlags,
+									PMR						**ppsSyncPMRs);
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                   PVRSRV_DEVICE_NODE * psDevNode,
+												   RGX_SERVER_TQ_CONTEXT *psTransferContext,
+												   IMG_UINT32 ui32Priority);
+
+/* Debug - check if transfer context is waiting on a fence */
+void CheckForStalledTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile);
+
+/* Debug/Watchdog - check if client transfer contexts are stalled */
+IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* __RGXTRANSFER_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxutils.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxutils.c
new file mode 100644
index 0000000..2b99f49
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxutils.c
@@ -0,0 +1,480 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific utility routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "rgx_fwif_km.h"
+#include "pdump_km.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "power.h"
+#include "pvrsrv.h"
+#include "sync_internal.h"
+#include "rgxfwutils.h"
+
+
+PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+	const void *pvPrivateData,
+	IMG_UINT32 *pui32State)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+	if (!psDeviceNode)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	psDevInfo = psDeviceNode->pvDevice;
+	*pui32State = psDevInfo->eActivePMConf;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXSetAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+	const void *pvPrivateData,
+	IMG_UINT32 ui32State)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+	if (!psDeviceNode || !psDeviceNode->pvDevice)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevInfo = psDeviceNode->pvDevice;
+
+	if (RGX_ACTIVEPM_FORCE_OFF != ui32State
+		|| !psDevInfo->pvAPMISRData)
+	{
+		return PVRSRV_ERROR_NOT_SUPPORTED;
+	}
+
+#if !defined(NO_HARDWARE)
+	eError = OSUninstallMISR(psDevInfo->pvAPMISRData);
+	if (PVRSRV_OK == eError)
+	{
+		psDevInfo->eActivePMConf = RGX_ACTIVEPM_FORCE_OFF;
+		psDevInfo->pvAPMISRData = NULL;
+		eError = PVRSRVSetDeviceDefaultPowerState((const PPVRSRV_DEVICE_NODE)psDeviceNode,
+		                                          PVRSRV_DEV_POWER_STATE_ON);
+	}
+#endif
+
+	return eError;
+}
+
+PVRSRV_ERROR RGXQueryPdumpPanicEnable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+	const void *pvPrivateData,
+	IMG_BOOL *pbEnabled)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+	if (!psDeviceNode || !psDeviceNode->pvDevice)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevInfo = psDeviceNode->pvDevice;
+
+	*pbEnabled = psDevInfo->bPDPEnabled;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXSetPdumpPanicEnable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+	const void *pvPrivateData,
+	IMG_BOOL bEnable)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+	if (!psDeviceNode || !psDeviceNode->pvDevice)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevInfo = psDeviceNode->pvDevice;
+
+	psDevInfo->bPDPEnabled = bEnable;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXGetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+				IMG_UINT32 *pui32DeviceFlags)
+{
+	if (!pui32DeviceFlags || !psDevInfo)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*pui32DeviceFlags = psDevInfo->ui32DeviceFlags;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+				IMG_UINT32 ui32Config,
+				IMG_BOOL bSetNotClear)
+{
+	IMG_UINT32 ui32DeviceFlags = 0;
+
+	if (!psDevInfo)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (ui32Config & RGXKMIF_DEVICE_STATE_ZERO_FREELIST)
+	{
+		ui32DeviceFlags |= RGXKM_DEVICE_STATE_ZERO_FREELIST;
+	}
+
+	if (ui32Config & RGXKMIF_DEVICE_STATE_DISABLE_DW_LOGGING_EN)
+	{
+		ui32DeviceFlags |= RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN;
+	}
+
+	if (ui32Config & RGXKMIF_DEVICE_STATE_DUST_REQUEST_INJECT_EN)
+	{
+		ui32DeviceFlags |= RGXKM_DEVICE_STATE_DUST_REQUEST_INJECT_EN;
+	}
+
+	if (bSetNotClear)
+	{
+		psDevInfo->ui32DeviceFlags |= ui32DeviceFlags;
+	}
+	else
+	{
+		psDevInfo->ui32DeviceFlags &= ~ui32DeviceFlags;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*
+ * RGXRunScript
+ */
+PVRSRV_ERROR RGXRunScript(PVRSRV_RGXDEV_INFO *psDevInfo,
+				RGX_INIT_COMMAND *psScript,
+				IMG_UINT32 ui32NumCommands,
+				IMG_UINT32 ui32PdumpFlags,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile)
+{
+	IMG_UINT32 ui32PC;
+#if !defined(NO_HARDWARE)
+	IMG_UINT32 ui32LastLoopPoint = 0xFFFFFFFF;
+#endif /* NO_HARDWARE */
+
+	for (ui32PC = 0;  ui32PC < ui32NumCommands;  ui32PC++)
+	{
+		RGX_INIT_COMMAND *psComm = &psScript[ui32PC];
+
+		switch (psComm->eOp)
+		{
+			case RGX_INIT_OP_DBG_READ32_HW_REG:
+			{
+				IMG_UINT32	ui32RegVal;
+				ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM,  psComm->sDBGReadHWReg.ui32Offset);
+				PVR_DUMPDEBUG_LOG("%s: 0x%08X", psComm->sDBGReadHWReg.aszName, ui32RegVal);
+				break;
+			}
+			case RGX_INIT_OP_DBG_READ64_HW_REG:
+			{
+				IMG_UINT64	ui64RegVal;
+				ui64RegVal = OSReadHWReg64(psDevInfo->pvRegsBaseKM, psComm->sDBGReadHWReg.ui32Offset);
+				PVR_DUMPDEBUG_LOG("%s: 0x%016" IMG_UINT64_FMTSPECX , psComm->sDBGReadHWReg.aszName, ui64RegVal);
+				break;
+			}
+			case RGX_INIT_OP_WRITE_HW_REG:
+			{
+				if( !(ui32PdumpFlags & PDUMP_FLAGS_NOHW) )
+				{
+					OSWriteHWReg32(psDevInfo->pvRegsBaseKM, psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value);
+				}
+				PDUMPCOMMENT("RGXRunScript: Write HW reg operation");
+				PDUMPREG32(RGX_PDUMPREG_NAME,
+						psComm->sWriteHWReg.ui32Offset,
+						psComm->sWriteHWReg.ui32Value,
+						ui32PdumpFlags);
+				break;
+			}
+			case RGX_INIT_OP_PDUMP_HW_REG:
+			{
+				PDUMPCOMMENT("RGXRunScript: Dump HW reg operation");
+				PDUMPREG32(RGX_PDUMPREG_NAME, psComm->sPDumpHWReg.ui32Offset,
+						psComm->sPDumpHWReg.ui32Value, ui32PdumpFlags);
+				break;
+			}
+			case RGX_INIT_OP_COND_POLL_HW_REG:
+			{
+#if !defined(NO_HARDWARE)
+				IMG_UINT32	ui32RegVal;
+
+				if( !(ui32PdumpFlags & PDUMP_FLAGS_NOHW) )
+				{
+					/* read the register used as condition */
+					ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM,  psComm->sCondPollHWReg.ui32CondOffset);
+
+					/* if the conditions succeeds, poll the register */
+					if ((ui32RegVal & psComm->sCondPollHWReg.ui32CondMask) == psComm->sCondPollHWReg.ui32CondValue)
+					{
+						if (PVRSRVPollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + psComm->sCondPollHWReg.ui32Offset),
+								psComm->sCondPollHWReg.ui32Value,
+								psComm->sCondPollHWReg.ui32Mask) != PVRSRV_OK)
+						{
+							PVR_DPF((PVR_DBG_ERROR, "RGXRunScript: Cond Poll for Reg (0x%x) failed -> Cancel script.", psComm->sCondPollHWReg.ui32Offset));
+							return PVRSRV_ERROR_TIMEOUT;
+						}
+
+					}
+					else
+					{
+						PVR_DPF((PVR_DBG_WARNING, 
+						"RGXRunScript: Skipping Poll for Reg (0x%x) because the condition is not met (Reg 0x%x ANDed with mask 0x%x equal to 0x%x but value 0x%x found instead).",
+						psComm->sCondPollHWReg.ui32Offset,
+						psComm->sCondPollHWReg.ui32CondOffset,
+						psComm->sCondPollHWReg.ui32CondMask,
+						psComm->sCondPollHWReg.ui32CondValue,
+						ui32RegVal));
+					}
+				}
+#endif
+				break;
+			}
+			case RGX_INIT_OP_POLL_64_HW_REG:
+			{
+				/* Split lower and upper words */
+				IMG_UINT32 ui32UpperValue = (IMG_UINT32) (psComm->sPoll64HWReg.ui64Value >> 32);
+				IMG_UINT32 ui32LowerValue = (IMG_UINT32) (psComm->sPoll64HWReg.ui64Value);
+
+				IMG_UINT32 ui32UpperMask = (IMG_UINT32) (psComm->sPoll64HWReg.ui64Mask >> 32);
+				IMG_UINT32 ui32LowerMask = (IMG_UINT32) (psComm->sPoll64HWReg.ui64Mask);
+
+				PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXRunScript: 64 bit HW offset: %x", psComm->sPoll64HWReg.ui32Offset);
+
+				if( !(ui32PdumpFlags & PDUMP_FLAGS_NOHW) )
+				{
+					if (PVRSRVPollForValueKM((IMG_UINT32 *)(((IMG_UINT8*)psDevInfo->pvRegsBaseKM) + psComm->sPoll64HWReg.ui32Offset + 4),
+										 ui32UpperValue,
+										 ui32UpperMask) != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR, "RGXRunScript: Poll for upper part of Reg (0x%x) failed -> Cancel script.", psComm->sPoll64HWReg.ui32Offset));
+						return PVRSRV_ERROR_TIMEOUT;
+					}
+				}
+				PDUMPREGPOL(RGX_PDUMPREG_NAME,
+							psComm->sPoll64HWReg.ui32Offset + 4,
+							ui32UpperValue,
+							ui32UpperMask,
+							ui32PdumpFlags,
+							PDUMP_POLL_OPERATOR_EQUAL);
+
+				if( !(ui32PdumpFlags & PDUMP_FLAGS_NOHW) )
+				{
+					if (PVRSRVPollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + psComm->sPoll64HWReg.ui32Offset),
+										 ui32LowerValue,
+										 ui32LowerMask) != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR, "RGXRunScript: Poll for lower part of Reg (0x%x) failed -> Cancel script.", psComm->sPoll64HWReg.ui32Offset));
+						return PVRSRV_ERROR_TIMEOUT;
+					}
+				}
+				PDUMPREGPOL(RGX_PDUMPREG_NAME,
+							psComm->sPoll64HWReg.ui32Offset,
+							ui32LowerValue,
+							ui32LowerMask,
+							ui32PdumpFlags,
+							PDUMP_POLL_OPERATOR_EQUAL);
+
+				break;
+			}
+			case RGX_INIT_OP_POLL_HW_REG:
+			{
+				if( !(ui32PdumpFlags & PDUMP_FLAGS_NOHW) )
+				{
+					if (PVRSRVPollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + psComm->sPollHWReg.ui32Offset),
+										 psComm->sPollHWReg.ui32Value,
+										 psComm->sPollHWReg.ui32Mask) != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR, "RGXRunScript: Poll for Reg (0x%x) failed -> Cancel script.", psComm->sPollHWReg.ui32Offset));
+						return PVRSRV_ERROR_TIMEOUT;
+					}
+				}
+				PDUMPREGPOL(RGX_PDUMPREG_NAME,
+							psComm->sPollHWReg.ui32Offset,
+							psComm->sPollHWReg.ui32Value,
+							psComm->sPollHWReg.ui32Mask,
+							ui32PdumpFlags,
+							PDUMP_POLL_OPERATOR_EQUAL);
+
+				break;
+			}
+
+			case RGX_INIT_OP_LOOP_POINT:
+			{
+#if !defined(NO_HARDWARE)
+				ui32LastLoopPoint = ui32PC;
+#endif /* NO_HARDWARE */
+				break;
+			}
+
+			case RGX_INIT_OP_COND_BRANCH:
+			{
+#if !defined(NO_HARDWARE)
+				IMG_UINT32 ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM,
+													  psComm->sConditionalBranchPoint.ui32Offset);
+
+				if((ui32RegVal & psComm->sConditionalBranchPoint.ui32Mask) != psComm->sConditionalBranchPoint.ui32Value)
+				{
+					ui32PC = ui32LastLoopPoint - 1;
+				}
+#endif /* NO_HARDWARE */
+
+				PDUMPIDLWITHFLAGS(30, ui32PdumpFlags);
+				break;
+			}
+			case RGX_INIT_OP_DBG_CALC:
+			{
+				IMG_UINT32 ui32RegVal1;
+				IMG_UINT32 ui32RegVal2;
+				IMG_UINT32 ui32RegVal3;
+				ui32RegVal1 = OSReadHWReg32(psDevInfo->pvRegsBaseKM,  psComm->sDBGCalc.ui32Offset1);
+				ui32RegVal2 = OSReadHWReg32(psDevInfo->pvRegsBaseKM,  psComm->sDBGCalc.ui32Offset2);
+				ui32RegVal3 = OSReadHWReg32(psDevInfo->pvRegsBaseKM,  psComm->sDBGCalc.ui32Offset3);
+				if (ui32RegVal1 + ui32RegVal2 > ui32RegVal3)
+				{
+					PVR_DUMPDEBUG_LOG("%s: 0x%08X", psComm->sDBGCalc.aszName, ui32RegVal1 + ui32RegVal2 - ui32RegVal3);
+				}
+				else
+				{
+					PVR_DUMPDEBUG_LOG("%s: 0x%08X", psComm->sDBGCalc.aszName, 0);
+				}
+				break;
+			}
+			case RGX_INIT_OP_DBG_WAIT:
+			{
+				OSWaitus(psComm->sDBGWait.ui32WaitInUs);
+				break;
+			}
+			case RGX_INIT_OP_DBG_STRING:
+			{
+				PVR_DUMPDEBUG_LOG("%s", psComm->sDBGString.aszString);
+				break;
+			}
+			case RGX_INIT_OP_HALT:
+			{
+				return PVRSRV_OK;
+			}
+			case RGX_INIT_OP_ILLEGAL:
+			/* FALLTHROUGH */
+			default:
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXRunScript: PC %d: Illegal command: %d", ui32PC, psComm->eOp));
+				return PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION;
+			}
+		}
+
+	}
+
+	return PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION;
+}
+
+inline const char * RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM)
+{
+	/*      
+	 *  This is based on the currently defined DMs.             
+	 *  If you need to modify the enum in include/rgx_common.h
+	 *  please keep this function up-to-date too.
+	 *
+	 *       typedef enum _RGXFWIF_DM_
+	 *       {
+	 *           RGXFWIF_DM_GP        = 0,
+	 *           RGXFWIF_DM_2D        = 1, 
+	 *           RGXFWIF_DM_TDM       = 1,
+	 *           RGXFWIF_DM_TA        = 2,
+	 *           RGXFWIF_DM_3D        = 3,
+	 *           RGXFWIF_DM_CDM       = 4,
+	 *           RGXFWIF_DM_RTU       = 5,
+	 *           RGXFWIF_DM_SHG       = 6,
+	 *           RGXFWIF_DM_LAST,
+	 *           RGXFWIF_DM_FORCE_I32 = 0x7fffffff   
+	 *       } RGXFWIF_DM;
+	 */
+	PVR_ASSERT(eKickTypeDM < RGX_KICK_TYPE_DM_LAST);
+
+	switch(eKickTypeDM) {
+		case RGX_KICK_TYPE_DM_GP:
+			return "GP ";
+		case RGX_KICK_TYPE_DM_TDM_2D:   
+			return "TDM/2D ";
+		case RGX_KICK_TYPE_DM_TA:   
+			return "TA ";
+		case RGX_KICK_TYPE_DM_3D:
+			return "3D ";
+		case RGX_KICK_TYPE_DM_CDM:
+			return "CDM ";
+		case RGX_KICK_TYPE_DM_RTU:
+			return "RTU ";
+		case RGX_KICK_TYPE_DM_SHG:
+			return "SHG ";
+		case RGX_KICK_TYPE_DM_TQ2D:
+			return "TQ2D ";
+		case RGX_KICK_TYPE_DM_TQ3D:
+			return "TQ3D ";
+		default:
+			return "Invalid DM ";
+	}
+}
+
+/******************************************************************************
+ End of file (rgxutils.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxutils.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxutils.h
new file mode 100644
index 0000000..693563d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/devices/rgx/rgxutils.h
@@ -0,0 +1,204 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific utility routines declarations
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Inline functions/structures specific to RGX
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+
+/*!
+******************************************************************************
+
+ @Function      RGXQueryAPMState
+
+ @Description   Query the state of the APM configuration
+
+ @Input         psDeviceNode : The device node
+
+ @Input         pvPrivateData: Unused (required for AppHint callback)
+
+ @Output        pui32State   : The APM configuration state
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+	const void *pvPrivateData,
+	IMG_UINT32 *pui32State);
+
+/*!
+******************************************************************************
+
+ @Function      RGXSetAPMState
+
+ @Description   Set the APM configuration state. Currently only 'OFF' is
+                supported
+
+ @Input         psDeviceNode : The device node
+
+ @Input         pvPrivateData: Unused (required for AppHint callback)
+
+ @Input         ui32State    : The requested APM configuration state
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXSetAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+	const void *pvPrivateData,
+	IMG_UINT32 ui32State);
+
+/*!
+******************************************************************************
+
+ @Function      RGXQueryPdumpPanicEnable
+
+ @Description   Get the PDump Panic Enable configuration state.
+
+ @Input         psDeviceNode : The device node
+
+ @Input         pvPrivateData: Unused (required for AppHint callback)
+
+ @Input         pbEnabled    : IMG_TRUE if PDump Panic is enabled
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXQueryPdumpPanicEnable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+	const void *pvPrivateData,
+	IMG_BOOL *pbEnabled);
+
+/*!
+******************************************************************************
+
+ @Function      RGXSetPdumpPanicEnable
+
+ @Description   Set the PDump Panic Enable flag
+
+ @Input         psDeviceNode : The device node
+
+ @Input         pvPrivateData: Unused (required for AppHint callback)
+
+ @Input         bEnable      : The requested configuration state
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXSetPdumpPanicEnable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+	const void *pvPrivateData,
+	IMG_BOOL bEnable);
+
+/*!
+******************************************************************************
+
+ @Function      RGXGetDeviceFlags
+
+ @Description   Get the device flags for a given device
+
+ @Input         psDevInfo        : The device descriptor query
+
+ @Output        pui32DeviceFlags : The current state of the device flags
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXGetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+				IMG_UINT32 *pui32DeviceFlags);
+
+/*!
+******************************************************************************
+
+ @Function      RGXSetDeviceFlags
+
+ @Description   Set the device flags for a given device
+
+ @Input         psDevInfo : The device descriptor to modify
+
+ @Input         ui32Config : The device flags to modify
+
+ @Input         bSetNotClear : Set or clear the specified flags
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+				IMG_UINT32 ui32Config,
+				IMG_BOOL bSetNotClear);
+
+/*!
+******************************************************************************
+
+ @Function	RGXRunScript
+
+ @Description Execute the commands in the script
+
+ @Input 
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXRunScript(PVRSRV_RGXDEV_INFO *psDevInfo,
+				RGX_INIT_COMMAND *psScript,
+				IMG_UINT32 ui32NumCommands,
+				IMG_UINT32 ui32PdumpFlags,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile);
+
+/*!
+******************************************************************************
+
+ @Function    RGXStringifyKickTypeDM
+
+ @Description Gives the kick type DM name stringified
+
+ @Input       Kick type DM
+
+ @Return      Array containing the kick type DM name
+
+******************************************************************************/
+const char* RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM);
+                                                                             
+#define RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(bitmask, eKickTypeDM) bitmask & eKickTypeDM ? RGXStringifyKickTypeDM(eKickTypeDM) : ""
+/******************************************************************************
+ End of file (rgxutils.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/Kbuild.mk b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/Kbuild.mk
new file mode 100644
index 0000000..5a0b5c8
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/Kbuild.mk
@@ -0,0 +1,488 @@
+########################################################################### ###
+#@File
+#@Copyright     Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License       Dual MIT/GPLv2
+# 
+# The contents of this file are subject to the MIT license as set out below.
+# 
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+# 
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+# 
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+# 
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+# 
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+# 
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+
+# Compatibility BVNC
+ccflags-y += -I$(TOP)/services/shared/devices/rgx
+
+# Errata files
+ccflags-y += -I$(TOP)/hwdefs
+
+# Linux-specific headers
+ccflags-y += \
+ -I$(TOP)/services/include/env/linux \
+ -I$(TOP)/kernel/drivers/staging/imgtec
+
+$(PVRSRV_MODNAME)-y += \
+ services/server/env/linux/event.o \
+ services/server/env/linux/km_apphint.o \
+ services/server/env/linux/module_common.o \
+ services/server/env/linux/osmmap_stub.o \
+ services/server/env/linux/osfunc.o \
+ services/server/env/linux/allocmem.o \
+ services/server/env/linux/osconnection_server.o \
+ services/server/env/linux/pdump.o \
+ services/server/env/linux/physmem_osmem_linux.o \
+ services/server/env/linux/pmr_os.o \
+ services/server/env/linux/pvr_debugfs.o \
+ services/server/env/linux/pvr_bridge_k.o \
+ services/server/env/linux/pvr_debug.o \
+ services/server/env/linux/physmem_dmabuf.o \
+ services/server/common/devicemem_heapcfg.o \
+ services/shared/common/devicemem.o \
+ services/shared/common/devicemem_utils.o \
+ services/shared/common/hash.o \
+ services/shared/common/ra.o \
+ services/shared/common/sync.o \
+ services/shared/common/mem_utils.o \
+ services/server/common/devicemem_server.o \
+ services/server/common/handle.o \
+ services/server/common/lists.o \
+ services/server/common/mmu_common.o \
+ services/server/common/connection_server.o \
+ services/server/common/physheap.o \
+ services/server/common/physmem.o \
+ services/server/common/physmem_lma.o \
+ services/server/common/physmem_hostmem.o \
+ services/server/common/physmem_tdsecbuf.o \
+ services/server/common/pmr.o \
+ services/server/common/power.o \
+ services/server/common/process_stats.o \
+ services/server/common/pvr_notifier.o \
+ services/server/common/pvrsrv.o \
+ services/server/common/srvcore.o \
+ services/server/common/sync_checkpoint.o \
+ services/server/common/sync_server.o \
+ services/shared/common/htbuffer.o \
+ services/server/common/htbserver.o \
+ services/server/common/tlintern.o \
+ services/shared/common/tlclient.o \
+ services/server/common/tlserver.o \
+ services/server/common/tlstream.o \
+ services/server/common/cache_km.o \
+ services/shared/common/uniq_key_splay_tree.o \
+ services/server/common/pvrsrv_pool.o \
+ services/server/common/info_page_km.o
+
+ifeq ($(SUPPORT_FALLBACK_FENCE_SYNC),1)
+ $(PVRSRV_MODNAME)-y += \
+ services/server/common/sync_fallback_server.o
+endif
+
+# Wrap ExtMem support
+ifeq ($(SUPPORT_WRAP_EXTMEM),1)
+ $(PVRSRV_MODNAME)-y += \
+ services/server/env/linux/physmem_extmem_linux.o \
+ services/server/common/physmem_extmem.o 
+endif
+
+ifeq ($(SUPPORT_TRUSTED_DEVICE),1)
+ $(PVRSRV_MODNAME)-y += \
+ services/server/common/physmem_tdfwcode.o
+endif
+
+ifeq ($(SUPPORT_PDVFS),1)
+ $(PVRSRV_MODNAME)-y += \
+ services/server/devices/rgx/rgxpdvfs.o
+
+ ifeq ($(SUPPORT_WORKLOAD_ESTIMATION),1)
+  $(PVRSRV_MODNAME)-y += \
+  services/server/devices/rgx/rgxworkest.o
+ endif
+endif
+
+ifeq ($(SUPPORT_DRM_EXT),)
+ ifneq ($(PVR_LOADER),)
+  ifeq ($(KERNEL_DRIVER_DIR),)
+   $(PVRSRV_MODNAME)-y += kernel/drivers/staging/imgtec/$(PVR_LOADER).o
+  else
+   ifneq ($(wildcard $(KERNELDIR)/$(KERNEL_DRIVER_DIR)/$(PVR_SYSTEM)/$(PVR_LOADER).c),)
+     $(PVRSRV_MODNAME)-y += external/$(KERNEL_DRIVER_DIR)/$(PVR_SYSTEM)/$(PVR_LOADER).o
+   else
+    ifneq ($(wildcard $(KERNELDIR)/$(KERNEL_DRIVER_DIR)/$(PVR_LOADER).c),)
+      $(PVRSRV_MODNAME)-y += external/$(KERNEL_DRIVER_DIR)/$(PVR_LOADER).o
+    else
+      $(PVRSRV_MODNAME)-y += kernel/drivers/staging/imgtec/$(PVR_LOADER).o
+    endif
+   endif
+  endif
+ else
+  $(PVRSRV_MODNAME)-y += kernel/drivers/staging/imgtec/pvr_platform_drv.o
+ endif
+endif
+
+
+$(PVRSRV_MODNAME)-y += \
+ services/server/devices/rgx/debugmisc_server.o \
+ services/server/devices/rgx/rgxbreakpoint.o \
+ services/server/devices/rgx/rgxccb.o \
+ services/server/devices/rgx/rgxdebug.o \
+ services/server/devices/rgx/rgxfwutils.o \
+ services/server/devices/rgx/rgxinit.o \
+ services/server/devices/rgx/rgxkicksync.o \
+ services/server/devices/rgx/rgxlayer_impl.o \
+ services/server/devices/rgx/rgxmem.o \
+ services/server/devices/rgx/rgxmmuinit.o \
+ services/server/devices/rgx/rgxregconfig.o \
+ services/server/devices/rgx/rgxta3d.o \
+ services/server/devices/rgx/rgxtimerquery.o \
+ services/server/devices/rgx/rgxtransfer.o \
+ services/server/devices/rgx/rgxtdmtransfer.o \
+ services/server/devices/rgx/rgxutils.o \
+ services/shared/devices/rgx/rgx_compat_bvnc.o \
+ services/server/devices/rgx/rgxmipsmmuinit.o \
+ services/server/devices/rgx/rgxhwperf.o \
+ services/server/devices/rgx/rgxpower.o \
+ services/server/devices/rgx/rgxstartstop.o \
+ services/server/devices/rgx/rgxtimecorr.o
+
+ifeq ($(SUPPORT_DISPLAY_CLASS),1)
+$(PVRSRV_MODNAME)-y += \
+ services/server/common/dc_server.o \
+ services/server/common/scp.o
+endif
+
+ifeq ($(SUPPORT_SECURE_EXPORT),1)
+$(PVRSRV_MODNAME)-y += services/server/env/linux/ossecure_export.o
+endif
+
+ifeq ($(PDUMP),1)
+$(PVRSRV_MODNAME)-y += \
+ services/server/common/pdump_common.o \
+ services/server/common/pdump_mmu.o \
+ services/server/common/pdump_physmem.o \
+ services/shared/common/devicemem_pdump.o \
+ services/shared/common/devicememx_pdump.o \
+ services/server/devices/rgx/rgxpdump.o
+endif
+
+ $(PVRSRV_MODNAME)-y += services/server/devices/rgx/rgxcompute.o
+
+ $(PVRSRV_MODNAME)-y += services/server/devices/rgx/rgxray.o
+
+ifeq ($(PVR_RI_DEBUG),1)
+$(PVRSRV_MODNAME)-y += services/server/common/ri_server.o
+endif
+
+ifeq ($(PVR_TESTING_UTILS),1)
+$(PVRSRV_MODNAME)-y += services/server/common/tutils.o
+endif
+
+ifeq ($(SUPPORT_PAGE_FAULT_DEBUG),1)
+$(PVRSRV_MODNAME)-y += services/server/common/devicemem_history_server.o
+endif
+
+ $(PVRSRV_MODNAME)-y += services/server/devices/rgx/rgxsignals.o
+
+ifeq ($(PVR_HANDLE_BACKEND),generic)
+$(PVRSRV_MODNAME)-y += services/server/common/handle_generic.o
+else
+ifeq ($(PVR_HANDLE_BACKEND),idr)
+$(PVRSRV_MODNAME)-y += services/server/env/linux/handle_idr.o
+endif
+endif
+
+ifeq ($(SUPPORT_GPUTRACE_EVENTS),1)
+$(PVRSRV_MODNAME)-y += services/server/env/linux/pvr_gputrace.o
+endif
+
+ifeq ($(SUPPORT_BUFFER_SYNC),1)
+$(PVRSRV_MODNAME)-y += \
+ kernel/drivers/staging/imgtec/pvr_buffer_sync.o \
+ kernel/drivers/staging/imgtec/pvr_fence.o
+endif
+
+ifeq ($(SUPPORT_NATIVE_FENCE_SYNC),1)
+ifeq ($(PVRSRV_USE_SYNC_CHECKPOINTS),1)
+ifeq ($(SUPPORT_DMA_FENCE),1)
+$(PVRSRV_MODNAME)-y += \
+ kernel/drivers/staging/imgtec/pvr_sync_dma_fence.o \
+ kernel/drivers/staging/imgtec/pvr_counting_timeline.o \
+ kernel/drivers/staging/imgtec/pvr_sw_fence.o \
+ kernel/drivers/staging/imgtec/pvr_fence.o \
+ services/server/env/linux/dma_fence_sync_native_server.o
+else
+$(PVRSRV_MODNAME)-y += services/server/env/linux/sync_native_server.o
+$(PVRSRV_MODNAME)-y += kernel/drivers/staging/imgtec/pvr_sync2.o
+endif
+else
+$(PVRSRV_MODNAME)-y += services/server/env/linux/sync_native_server.o
+$(PVRSRV_MODNAME)-y += kernel/drivers/staging/imgtec/pvr_sync.o
+endif
+else
+ifeq ($(SUPPORT_FALLBACK_FENCE_SYNC),1)
+$(PVRSRV_MODNAME)-y += services/server/common/sync_fallback_server.o
+endif
+endif
+
+ifeq ($(PVR_DVFS),1)
+$(PVRSRV_MODNAME)-y += \
+ services/server/env/linux/pvr_dvfs_device.o
+endif
+
+$(PVRSRV_MODNAME)-$(CONFIG_X86) += services/server/env/linux/osfunc_x86.o
+$(PVRSRV_MODNAME)-$(CONFIG_ARM) += services/server/env/linux/osfunc_arm.o
+$(PVRSRV_MODNAME)-$(CONFIG_ARM64) += services/server/env/linux/osfunc_arm64.o
+$(PVRSRV_MODNAME)-$(CONFIG_METAG) += services/server/env/linux/osfunc_metag.o
+$(PVRSRV_MODNAME)-$(CONFIG_MIPS) += services/server/env/linux/osfunc_mips.o
+
+$(PVRSRV_MODNAME)-$(CONFIG_EVENT_TRACING) += services/server/env/linux/trace_events.o
+
+ifneq ($(SUPPORT_DRM_EXT),1)
+ccflags-y += \
+ -Iinclude/drm \
+ -I$(TOP)/include/drm \
+ -I$(TOP)/services/include/env/linux
+
+$(PVRSRV_MODNAME)-y += \
+ kernel/drivers/staging/imgtec/pvr_drm.o
+endif # SUPPORT_DRM_EXT
+
+ccflags-y += -I$(OUT)/target_neutral/intermediates/firmware
+
+# Srvinit headers and source files
+
+$(PVRSRV_MODNAME)-y += \
+ services/server/devices/rgx/rgxsrvinit.o \
+ services/server/devices/rgx/rgxsrvinit_script.o \
+ services/server/devices/rgx/rgxfwimageutils.o \
+ services/shared/devices/rgx/rgx_compat_bvnc.o \
+ services/shared/devices/rgx/rgx_hwperf_table.o \
+ services/server/devices/rgx/env/linux/km/rgxfwload.o
+
+ccflags-y += \
+ -Iinclude \
+ -Ihwdefs \
+ -Ihwdefs/km \
+ -Iservices/include \
+ -Iservices/include/shared \
+ -Iservices/server/include \
+ -Iservices/server/devices/rgx \
+ -Iservices/shared/include \
+ -Iservices/shared/devices/rgx
+
+# Bridge headers and source files
+
+# Keep in sync with:
+# build/linux/common/bridges.mk AND
+# services/bridge/Linux.mk
+
+ccflags-y += \
+ -I$(bridge_base)/mm_bridge \
+ -I$(bridge_base)/cmm_bridge \
+ -I$(bridge_base)/rgxtq_bridge \
+ -I$(bridge_base)/rgxtq2_bridge \
+ -I$(bridge_base)/rgxta3d_bridge \
+ -I$(bridge_base)/srvcore_bridge \
+ -I$(bridge_base)/sync_bridge \
+ -I$(bridge_base)/synctracking_bridge \
+ -I$(bridge_base)/breakpoint_bridge \
+ -I$(bridge_base)/debugmisc_bridge \
+ -I$(bridge_base)/htbuffer_bridge \
+ -I$(bridge_base)/pvrtl_bridge \
+ -I$(bridge_base)/rgxhwperf_bridge \
+ -I$(bridge_base)/regconfig_bridge \
+ -I$(bridge_base)/timerquery_bridge \
+ -I$(bridge_base)/rgxkicksync_bridge \
+ -I$(bridge_base)/cache_bridge \
+ -I$(bridge_base)/dmabuf_bridge \
+
+
+$(PVRSRV_MODNAME)-y += \
+ generated/mm_bridge/server_mm_bridge.o \
+ generated/cmm_bridge/server_cmm_bridge.o \
+ generated/rgxtq_bridge/server_rgxtq_bridge.o \
+ generated/rgxtq2_bridge/server_rgxtq2_bridge.o \
+ generated/rgxta3d_bridge/server_rgxta3d_bridge.o \
+ generated/srvcore_bridge/server_srvcore_bridge.o \
+ generated/sync_bridge/server_sync_bridge.o \
+ generated/breakpoint_bridge/server_breakpoint_bridge.o \
+ generated/debugmisc_bridge/server_debugmisc_bridge.o \
+ generated/htbuffer_bridge/server_htbuffer_bridge.o \
+ generated/pvrtl_bridge/server_pvrtl_bridge.o \
+ generated/rgxhwperf_bridge/server_rgxhwperf_bridge.o \
+ generated/regconfig_bridge/server_regconfig_bridge.o \
+ generated/timerquery_bridge/server_timerquery_bridge.o \
+ generated/rgxkicksync_bridge/server_rgxkicksync_bridge.o \
+ generated/cache_bridge/server_cache_bridge.o \
+ generated/dmabuf_bridge/server_dmabuf_bridge.o
+ 
+ 
+ifeq ($(SUPPORT_WRAP_EXTMEM),1)
+ccflags-y += -I$(bridge_base)/mmextmem_bridge
+$(PVRSRV_MODNAME)-y += generated/mmextmem_bridge/server_mmextmem_bridge.o 
+endif
+
+ifeq ($(SUPPORT_DISPLAY_CLASS),1)
+ccflags-y += -I$(bridge_base)/dc_bridge
+$(PVRSRV_MODNAME)-y += generated/dc_bridge/server_dc_bridge.o
+endif
+
+ifeq ($(SUPPORT_SECURE_EXPORT),1)
+ccflags-y += -I$(bridge_base)/smm_bridge
+$(PVRSRV_MODNAME)-y += generated/smm_bridge/server_smm_bridge.o
+endif
+
+ifeq ($(SUPPORT_SERVER_SYNC),1)
+ifeq ($(SUPPORT_SECURE_EXPORT),1)
+ccflags-y += -I$(bridge_base)/syncsexport_bridge
+$(PVRSRV_MODNAME)-y += generated/syncsexport_bridge/server_syncsexport_bridge.o
+endif
+ifeq ($(SUPPORT_INSECURE_EXPORT),1)
+ccflags-y += \
+ -I$(bridge_base)/syncexport_bridge
+$(PVRSRV_MODNAME)-y += generated/syncexport_bridge/server_syncexport_bridge.o
+endif
+endif
+
+ifeq ($(PDUMP),1)
+ccflags-y += \
+ -I$(bridge_base)/pdump_bridge \
+ -I$(bridge_base)/pdumpctrl_bridge \
+ -I$(bridge_base)/pdumpmm_bridge \
+ -I$(bridge_base)/rgxpdump_bridge
+$(PVRSRV_MODNAME)-y += \
+ generated/pdump_bridge/server_pdump_bridge.o \
+ generated/pdumpctrl_bridge/server_pdumpctrl_bridge.o \
+ generated/pdumpmm_bridge/server_pdumpmm_bridge.o \
+ generated/rgxpdump_bridge/server_rgxpdump_bridge.o
+endif
+
+ccflags-y += -I$(bridge_base)/rgxcmp_bridge
+$(PVRSRV_MODNAME)-y += generated/rgxcmp_bridge/server_rgxcmp_bridge.o
+
+ccflags-y += -I$(bridge_base)/rgxray_bridge
+$(PVRSRV_MODNAME)-y += generated/rgxray_bridge/server_rgxray_bridge.o
+
+ifeq ($(PVR_RI_DEBUG),1)
+ccflags-y += -I$(bridge_base)/ri_bridge
+$(PVRSRV_MODNAME)-y += generated/ri_bridge/server_ri_bridge.o
+endif
+
+ifeq ($(SUPPORT_VALIDATION),1)
+ccflags-y += -I$(bridge_base)/validation_bridge
+$(PVRSRV_MODNAME)-y += generated/validation_bridge/server_validation_bridge.o
+$(PVRSRV_MODNAME)-y += services/server/common/validation.o
+endif
+
+ifeq ($(PVR_TESTING_UTILS),1)
+ccflags-y += -I$(bridge_base)/tutils_bridge
+$(PVRSRV_MODNAME)-y += generated/tutils_bridge/server_tutils_bridge.o
+endif
+
+ifeq ($(SUPPORT_PAGE_FAULT_DEBUG),1)
+ccflags-y += -I$(bridge_base)/devicememhistory_bridge
+$(PVRSRV_MODNAME)-y += \
+ generated/devicememhistory_bridge/server_devicememhistory_bridge.o
+endif
+
+ifeq ($(SUPPORT_SYNCTRACKING_BRIDGE),1)
+ccflags-y += -I$(bridge_base)/synctracking_bridge
+$(PVRSRV_MODNAME)-y += \
+ generated/synctracking_bridge/server_synctracking_bridge.o
+endif
+
+#ifeq ($(SUPPORT_SIGNAL_FILTER),1)
+ccflags-y += -I$(bridge_base)/rgxsignals_bridge
+$(PVRSRV_MODNAME)-y += generated/rgxsignals_bridge/server_rgxsignals_bridge.o
+#endif
+
+ifeq ($(SUPPORT_FALLBACK_FENCE_SYNC),1)
+ccflags-y += \
+ -I$(bridge_base)/syncfallback_bridge
+$(PVRSRV_MODNAME)-y += generated/syncfallback_bridge/server_syncfallback_bridge.o
+endif
+
+
+
+
+# Direct bridges
+
+$(PVRSRV_MODNAME)-y += \
+ generated/mm_bridge/client_mm_direct_bridge.o \
+ generated/sync_bridge/client_sync_direct_bridge.o \
+ generated/htbuffer_bridge/client_htbuffer_direct_bridge.o \
+ generated/cache_bridge/client_cache_direct_bridge.o \
+ generated/pvrtl_bridge/client_pvrtl_direct_bridge.o
+
+ifeq ($(PDUMP),1)
+$(PVRSRV_MODNAME)-y += generated/pdumpmm_bridge/client_pdumpmm_direct_bridge.o
+endif
+
+ifeq ($(PVR_RI_DEBUG),1)
+$(PVRSRV_MODNAME)-y += generated/ri_bridge/client_ri_direct_bridge.o
+endif
+
+ifeq ($(PDUMP),1)
+ $(PVRSRV_MODNAME)-y += \
+  generated/pdump_bridge/client_pdump_direct_bridge.o \
+  generated/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.o \
+  generated/rgxpdump_bridge/client_rgxpdump_direct_bridge.o
+endif
+
+ifeq ($(SUPPORT_PAGE_FAULT_DEBUG),1)
+$(PVRSRV_MODNAME)-y += \
+ generated/devicememhistory_bridge/client_devicememhistory_direct_bridge.o
+endif
+
+ifeq ($(SUPPORT_SYNCTRACKING_BRIDGE),1)
+$(PVRSRV_MODNAME)-y += \
+ generated/synctracking_bridge/client_synctracking_direct_bridge.o
+endif
+
+# Enable -Werror for all built object files (suppress for Fiasco.OC/L4Linux)
+ifeq ($(CONFIG_L4),)
+ifneq ($(W),1)
+$(foreach _o,$(addprefix CFLAGS_,$(notdir $($(PVRSRV_MODNAME)-y))),$(eval $(_o) := -Werror))
+endif
+endif
+
+# With certain build configurations, e.g., ARM, Werror, we get a build 
+# failure in the ftrace Linux kernel header.  So disable the relevant check.
+CFLAGS_trace_events.o := -Wno-missing-prototypes
+
+# Make sure the mem_utils are built in 'free standing' mode, so the compiler
+# is not encouraged to call out to C library functions
+CFLAGS_mem_utils.o := -ffreestanding
+
+# Chrome OS kernel adds some issues
+ccflags-y += -Wno-ignored-qualifiers
+
+include $(TOP)/services/system/$(PVR_SYSTEM)/Kbuild.mk
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/Linux.mk b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/Linux.mk
new file mode 100644
index 0000000..6e28f48
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/Linux.mk
@@ -0,0 +1,46 @@
+########################################################################### ###
+#@File
+#@Copyright     Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License       Dual MIT/GPLv2
+# 
+# The contents of this file are subject to the MIT license as set out below.
+# 
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+# 
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+# 
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+# 
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+# 
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+# 
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+
+modules := srvkm
+
+srvkm_type := kernel_module
+srvkm_target := $(PVRSRV_MODNAME).ko
+srvkm_makefile := $(THIS_DIR)/Kbuild.mk
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/allocmem.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/allocmem.c
new file mode 100644
index 0000000..ffabcf5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/allocmem.c
@@ -0,0 +1,446 @@
+/*************************************************************************/ /*!
+@File
+@Title          Host memory management implementation for Linux
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "img_defs.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+#include "osfunc.h"
+
+#if defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#define ALLOCMEM_MEMSTATS_PADDING 0
+#else
+#define ALLOCMEM_MEMSTATS_PADDING sizeof(IMG_UINT32)
+#endif
+
+/* Ensure poison value is not divisible by 4.
+ * Used to poison memory to trip up use after free in kernel-side code
+ */
+#define OS_MEM_POISON_VALUE (0x6b)
+
+static inline void _pvr_vfree(const void* pvAddr)
+{
+#if defined(DEBUG)
+			/* Size harder to come by for vmalloc and since vmalloc allocates
+			 * a whole number of pages, poison the minimum size known to have
+			 * been allocated.
+			 */
+			OSCachedMemSet((void*)pvAddr, OS_MEM_POISON_VALUE, PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD);
+#endif
+			vfree(pvAddr);
+}
+
+static inline void _pvr_kfree(const void* pvAddr)
+{
+#if defined(DEBUG)
+			/* Poison whole memory block */
+			OSCachedMemSet((void*)pvAddr, OS_MEM_POISON_VALUE, ksize(pvAddr));
+#endif
+			kfree(pvAddr);
+}
+
+#if !defined(PVRSRV_ENABLE_PROCESS_STATS)
+IMG_INTERNAL void *OSAllocMem(IMG_UINT32 ui32Size)
+{
+	void *pvRet = NULL;
+
+	if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vmalloc(ui32Size);
+	}
+	if (pvRet == NULL)
+	{
+		pvRet = kmalloc(ui32Size, GFP_KERNEL);
+	}
+
+	return pvRet;
+}
+
+IMG_INTERNAL void *OSAllocZMem(IMG_UINT32 ui32Size)
+{
+	void *pvRet = NULL;
+
+	if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vzalloc(ui32Size);
+	}
+	if (pvRet == NULL)
+	{
+		pvRet = kzalloc(ui32Size, GFP_KERNEL);
+	}
+
+	return pvRet;
+}
+
+/*
+ * The parentheses around OSFreeMem prevent the macro in allocmem.h from
+ * applying, as it would break the function's definition.
+ */
+IMG_INTERNAL void (OSFreeMem)(void *pvMem)
+{
+	if (pvMem != NULL)
+	{
+		if (!is_vmalloc_addr(pvMem))
+		{
+			_pvr_kfree(pvMem);
+		}
+		else
+		{
+			_pvr_vfree(pvMem);
+		}
+	}
+}
+#else
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG) && defined(PVRSRV_ENABLE_MEMORY_STATS)
+IMG_INTERNAL void *_OSAllocMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine)
+{
+	void *pvRet = NULL;
+
+	if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vmalloc(ui32Size);
+	}
+	if (pvRet == NULL)
+	{
+		pvRet = kmalloc(ui32Size, GFP_KERNEL);
+	}
+
+	if (pvRet != NULL)
+	{
+
+		if (!is_vmalloc_addr(pvRet))
+		{
+			IMG_CPU_PHYADDR sCpuPAddr;
+			sCpuPAddr.uiAddr = 0;
+
+			_PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+										  pvRet,
+										  sCpuPAddr,
+										  ksize(pvRet),
+										  NULL,
+										  pvAllocFromFile,
+										  ui32AllocFromLine);
+		}
+		else
+		{
+			IMG_CPU_PHYADDR sCpuPAddr;
+			sCpuPAddr.uiAddr = 0;
+
+			_PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+										  pvRet,
+										  sCpuPAddr,
+										  ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+										  NULL,
+										  pvAllocFromFile,
+										  ui32AllocFromLine);
+		}
+	}
+	return pvRet;
+}
+
+IMG_INTERNAL void *_OSAllocZMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine)
+{
+	void *pvRet = NULL;
+
+	if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vzalloc(ui32Size);
+	}
+	if (pvRet == NULL)
+	{
+		pvRet = kzalloc(ui32Size, GFP_KERNEL);
+	}
+
+	if (pvRet != NULL)
+	{
+		if (!is_vmalloc_addr(pvRet))
+		{
+			IMG_CPU_PHYADDR sCpuPAddr;
+			sCpuPAddr.uiAddr = 0;
+
+			_PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+										  pvRet,
+										  sCpuPAddr,
+										  ksize(pvRet),
+										  NULL,
+										  pvAllocFromFile,
+										  ui32AllocFromLine);
+		}
+		else
+		{
+			IMG_CPU_PHYADDR sCpuPAddr;
+			sCpuPAddr.uiAddr = 0;
+
+			_PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+										  pvRet,
+										  sCpuPAddr,
+										  ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+										  NULL,
+										  pvAllocFromFile,
+										  ui32AllocFromLine);
+		}
+	}
+	return pvRet;
+}
+#else
+IMG_INTERNAL void *OSAllocMem(IMG_UINT32 ui32Size)
+{
+	void *pvRet = NULL;
+
+	if ((ui32Size + ALLOCMEM_MEMSTATS_PADDING) > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vmalloc(ui32Size);
+	}
+	if (pvRet == NULL)
+	{
+		/* Allocate an additional 4 bytes to store the PID of the allocating process */
+		pvRet = kmalloc(ui32Size + ALLOCMEM_MEMSTATS_PADDING, GFP_KERNEL);
+	}
+
+	if (pvRet != NULL)
+	{
+
+		if (!is_vmalloc_addr(pvRet))
+		{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			{
+				/* Store the PID in the final additional 4 bytes allocated */
+				IMG_UINT32 *puiTemp = (IMG_UINT32*) (((IMG_BYTE*)pvRet) + (ksize(pvRet) - ALLOCMEM_MEMSTATS_PADDING));
+				*puiTemp = OSGetCurrentProcessID();
+			}
+			PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ksize(pvRet));
+#else
+			IMG_CPU_PHYADDR sCpuPAddr;
+			sCpuPAddr.uiAddr = 0;
+
+			PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+										 pvRet,
+										 sCpuPAddr,
+										 ksize(pvRet),
+										 NULL);
+#endif
+#endif
+		}
+		else
+		{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+											    ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+											    (IMG_UINT64)(uintptr_t) pvRet);
+#else
+			IMG_CPU_PHYADDR sCpuPAddr;
+			sCpuPAddr.uiAddr = 0;
+
+			PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+										 pvRet,
+										 sCpuPAddr,
+										 ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+										 NULL);
+#endif
+#endif
+		}
+	}
+	return pvRet;
+}
+
+IMG_INTERNAL void *OSAllocZMem(IMG_UINT32 ui32Size)
+{
+	void *pvRet = NULL;
+
+	if ((ui32Size + ALLOCMEM_MEMSTATS_PADDING) > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vzalloc(ui32Size);
+	}
+	if (pvRet == NULL)
+	{
+		/* Allocate an additional 4 bytes to store the PID of the allocating process */
+		pvRet = kzalloc(ui32Size + ALLOCMEM_MEMSTATS_PADDING, GFP_KERNEL);
+	}
+
+	if (pvRet != NULL)
+	{
+		if (!is_vmalloc_addr(pvRet))
+		{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			{
+				/* Store the PID in the final additional 4 bytes allocated */
+				IMG_UINT32 *puiTemp = (IMG_UINT32*) (((IMG_BYTE*)pvRet) + (ksize(pvRet) - ALLOCMEM_MEMSTATS_PADDING));
+				*puiTemp = OSGetCurrentProcessID();
+			}
+			PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ksize(pvRet));
+#else
+			IMG_CPU_PHYADDR sCpuPAddr;
+			sCpuPAddr.uiAddr = 0;
+
+			PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+								 pvRet,
+								 sCpuPAddr,
+								 ksize(pvRet),
+								 NULL);
+#endif
+#endif
+		}
+		else
+		{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+											    ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+											    (IMG_UINT64)(uintptr_t) pvRet);
+#else
+			IMG_CPU_PHYADDR sCpuPAddr;
+			sCpuPAddr.uiAddr = 0;
+
+			PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+										 pvRet,
+										 sCpuPAddr,
+										 ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+										 NULL);
+#endif
+#endif
+		}
+	}
+	return pvRet;
+}
+#endif
+
+/*
+ * The parentheses around OSFreeMem prevent the macro in allocmem.h from
+ * applying, as it would break the function's definition.
+ */
+IMG_INTERNAL void (OSFreeMem)(void *pvMem)
+{
+	if (pvMem != NULL)
+	{
+		if (!is_vmalloc_addr(pvMem))
+		{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			{
+				IMG_UINT32 *puiTemp = (IMG_UINT32*) (((IMG_BYTE*)pvMem) + (ksize(pvMem) - ALLOCMEM_MEMSTATS_PADDING));
+				PVRSRVStatsDecrMemKAllocStat(ksize(pvMem), *puiTemp);
+			}
+#else
+			PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+			                                (IMG_UINT64)(uintptr_t) pvMem);
+#endif
+#endif
+			_pvr_kfree(pvMem);
+		}
+		else
+		{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+			                                      (IMG_UINT64)(uintptr_t) pvMem);
+#else
+			PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+			                                (IMG_UINT64)(uintptr_t) pvMem);
+#endif
+#endif
+			_pvr_vfree(pvMem);
+		}
+	}
+}
+#endif
+
+
+IMG_INTERNAL void *OSAllocMemNoStats(IMG_UINT32 ui32Size)
+{
+	void *pvRet = NULL;
+
+	if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vmalloc(ui32Size);
+	}
+	if (pvRet == NULL)
+	{
+		pvRet = kmalloc(ui32Size, GFP_KERNEL);
+	}
+
+	return pvRet;
+}
+
+IMG_INTERNAL void *OSAllocZMemNoStats(IMG_UINT32 ui32Size)
+{
+	void *pvRet = NULL;
+
+	if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vzalloc(ui32Size);
+	}
+	if (pvRet == NULL)
+	{
+		pvRet = kzalloc(ui32Size, GFP_KERNEL);
+	}
+
+	return pvRet;
+}
+
+/*
+ * The parentheses around OSFreeMemNoStats prevent the macro in allocmem.h from
+ * applying, as it would break the function's definition.
+ */
+IMG_INTERNAL void (OSFreeMemNoStats)(void *pvMem)
+{
+	if (pvMem != NULL)
+	{
+		if ( !is_vmalloc_addr(pvMem) )
+		{
+			_pvr_kfree(pvMem);
+		}
+		else
+		{
+			_pvr_vfree(pvMem);
+		}
+	}
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/env_connection.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/env_connection.h
new file mode 100644
index 0000000..46b98e2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/env_connection.h
@@ -0,0 +1,118 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server side connection management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Linux specific server side connection management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_ENV_CONNECTION_H_)
+#define _ENV_CONNECTION_H_
+
+#include <linux/list.h>
+#include <linux/types.h>
+
+#include "handle.h"
+#include "pvr_debug.h"
+#include "device.h"
+
+#if defined(SUPPORT_ION)
+#include PVR_ANDROID_ION_HEADER
+#include "ion_sys.h"
+#include "allocmem.h"
+#endif
+
+typedef struct _ENV_CONNECTION_PRIVATE_DATA_
+{
+	struct file *psFile;
+	PVRSRV_DEVICE_NODE *psDevNode;
+} ENV_CONNECTION_PRIVATE_DATA;
+
+#if defined(SUPPORT_ION)
+#define ION_CLIENT_NAME_SIZE	50
+
+typedef struct _ENV_ION_CONNECTION_DATA_
+{
+	IMG_CHAR azIonClientName[ION_CLIENT_NAME_SIZE];
+	struct ion_device *psIonDev;
+	struct ion_client *psIonClient;
+	IMG_UINT32 ui32IonClientRefCount;
+} ENV_ION_CONNECTION_DATA;
+#endif
+
+typedef struct _ENV_CONNECTION_DATA_
+{
+	pid_t owner;
+
+	struct file *psFile;
+	PVRSRV_DEVICE_NODE *psDevNode;
+
+#if defined(SUPPORT_ION)
+	ENV_ION_CONNECTION_DATA *psIonData;
+#endif
+#if defined(SUPPORT_DRM_EXT)
+	void *pPriv;
+#endif
+} ENV_CONNECTION_DATA;
+
+#if defined(SUPPORT_ION)
+static inline struct ion_client *EnvDataIonClientAcquire(ENV_CONNECTION_DATA *psEnvData)
+{
+	PVR_ASSERT(psEnvData->psIonData != NULL);
+	PVR_ASSERT(psEnvData->psIonData->psIonClient != NULL);
+	PVR_ASSERT(psEnvData->psIonData->ui32IonClientRefCount > 0);
+	psEnvData->psIonData->ui32IonClientRefCount++;
+	return psEnvData->psIonData->psIonClient;
+}
+
+static inline void EnvDataIonClientRelease(ENV_ION_CONNECTION_DATA *psIonData)
+{
+	PVR_ASSERT(psIonData != NULL);
+	PVR_ASSERT(psIonData->psIonClient != NULL);
+	PVR_ASSERT(psIonData->ui32IonClientRefCount > 0);
+	if (--psIonData->ui32IonClientRefCount == 0)
+	{
+		ion_client_destroy(psIonData->psIonClient);
+		IonDevRelease(psIonData->psIonDev);
+		OSFreeMem(psIonData);
+		psIonData = NULL;
+	}
+}
+#endif /* defined(SUPPORT_ION) */
+
+#endif /* !defined(_ENV_CONNECTION_H_) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/event.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/event.c
new file mode 100644
index 0000000..4c66c7a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/event.c
@@ -0,0 +1,372 @@
+/*************************************************************************/ /*!
+@File
+@Title          Event Object
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <asm/io.h>
+#include <asm/page.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <asm/hardirq.h>
+#include <linux/timer.h>
+#include <linux/capability.h>
+#include <asm/uaccess.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "allocmem.h"
+#include "event.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+
+#include "osfunc.h"
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+/* Returns pointer to task_struct that belongs to thread which acquired
+ * bridge lock. */
+extern struct task_struct *BridgeLockGetOwner(void);
+extern IMG_BOOL BridgeLockIsLocked(void);
+#endif
+
+
+typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG
+{
+	rwlock_t sLock;
+	struct list_head sList;
+
+} PVRSRV_LINUX_EVENT_OBJECT_LIST;
+
+
+typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG
+{
+	atomic_t sTimeStamp;
+	IMG_UINT32 ui32TimeStampPrevious;
+#if defined(DEBUG)
+	IMG_UINT ui32Stats;
+#endif
+	wait_queue_head_t sWait;
+	struct list_head sList;
+	PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList;
+} PVRSRV_LINUX_EVENT_OBJECT;
+
+/*!
+******************************************************************************
+
+ @Function	LinuxEventObjectListCreate
+
+ @Description
+
+ Linux wait object list creation
+
+ @Output    hOSEventKM : Pointer to the event object list handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList)
+{
+	PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList;
+
+	psEvenObjectList = OSAllocMem(sizeof(*psEvenObjectList));
+	if (psEvenObjectList == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectCreate: failed to allocate memory for event list"));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	INIT_LIST_HEAD(&psEvenObjectList->sList);
+
+	rwlock_init(&psEvenObjectList->sLock);
+
+	*phEventObjectList = (IMG_HANDLE *) psEvenObjectList;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	LinuxEventObjectListDestroy
+
+ @Description
+
+ Linux wait object list destruction
+
+ @Input    hOSEventKM : Event object list handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList)
+{
+
+	PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList ;
+
+	if(psEvenObjectList)
+	{
+		if (!list_empty(&psEvenObjectList->sList))
+		{
+			 PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectListDestroy: Event List is not empty"));
+			 return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+		}
+		OSFreeMem(psEvenObjectList);
+		/*not nulling pointer, copy on stack*/
+	}
+	return PVRSRV_OK;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	LinuxEventObjectDelete
+
+ @Description
+
+ Linux wait object removal
+
+ @Input    hOSEventObject : Event object handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject)
+{
+	if(hOSEventObject)
+	{
+		PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject;
+		PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList;
+
+		write_lock_bh(&psLinuxEventObjectList->sLock);
+		list_del(&psLinuxEventObject->sList);
+		write_unlock_bh(&psLinuxEventObjectList->sLock);
+
+#if defined(DEBUG)
+//		PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectDelete: Event object waits: %u", psLinuxEventObject->ui32Stats));
+#endif
+
+		OSFreeMem(psLinuxEventObject);
+		/*not nulling pointer, copy on stack*/
+
+		return PVRSRV_OK;
+	}
+	return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+}
+
+/*!
+******************************************************************************
+
+ @Function	LinuxEventObjectAdd
+
+ @Description
+
+ Linux wait object addition
+
+ @Input    hOSEventObjectList : Event object list handle
+ @Output   phOSEventObject : Pointer to the event object handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject)
+ {
+	PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
+	PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
+
+	/* allocate completion variable */
+	psLinuxEventObject = OSAllocMem(sizeof(*psLinuxEventObject));
+	if (psLinuxEventObject == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory "));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	INIT_LIST_HEAD(&psLinuxEventObject->sList);
+
+	atomic_set(&psLinuxEventObject->sTimeStamp, 0);
+	psLinuxEventObject->ui32TimeStampPrevious = 0;
+
+#if defined(DEBUG)
+	psLinuxEventObject->ui32Stats = 0;
+#endif
+	init_waitqueue_head(&psLinuxEventObject->sWait);
+
+	psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList;
+
+	write_lock_bh(&psLinuxEventObjectList->sLock);
+	list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList);
+	write_unlock_bh(&psLinuxEventObjectList->sLock);
+
+	*phOSEventObject = psLinuxEventObject;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	LinuxEventObjectSignal
+
+ @Description
+
+ Linux wait object signaling function
+
+ @Input    hOSEventObjectList : Event object list handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList)
+{
+	PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
+	PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
+	struct list_head *psListEntry, *psListEntryTemp, *psList;
+	psList = &psLinuxEventObjectList->sList;
+
+	read_lock_bh(&psLinuxEventObjectList->sLock);
+	list_for_each_safe(psListEntry, psListEntryTemp, psList)
+	{
+
+		psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList);
+
+		atomic_inc(&psLinuxEventObject->sTimeStamp);
+		wake_up_interruptible(&psLinuxEventObject->sWait);
+	}
+	read_unlock_bh(&psLinuxEventObjectList->sLock);
+
+	return 	PVRSRV_OK;
+
+}
+
+/*!
+******************************************************************************
+
+ @Function	LinuxEventObjectWait
+
+ @Description
+
+ Linux wait object routine
+
+ @Input    hOSEventObject : Event object handle
+
+ @Input   ui64Timeoutus : Time out value in usec
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT64 ui64Timeoutus, IMG_BOOL bHoldBridgeLock)
+{
+	IMG_UINT32 ui32TimeStamp;
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	IMG_BOOL bReleasePVRLock;
+#endif
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	IMG_UINT32 ui32Remainder;
+	long timeOutJiffies;
+	DEFINE_WAIT(sWait);
+
+	PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject;
+
+	/* Check if the driver is good shape */
+	if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		return PVRSRV_ERROR_TIMEOUT;
+	}
+
+	/* usecs_to_jiffies only takes an uint. So if our timeout is bigger than an
+	 * uint use the msec version. With such a long timeout we really don't need
+	 * the high resolution of usecs. */
+	if (ui64Timeoutus > 0xffffffffULL)
+		timeOutJiffies = msecs_to_jiffies(OSDivide64(ui64Timeoutus, 1000, &ui32Remainder));
+	else
+		timeOutJiffies = usecs_to_jiffies(ui64Timeoutus);
+
+	do
+	{
+		prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE);
+		ui32TimeStamp = (IMG_UINT32)atomic_read(&psLinuxEventObject->sTimeStamp);
+
+		if(psLinuxEventObject->ui32TimeStampPrevious != ui32TimeStamp)
+		{
+			break;
+		}
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+		/* Check thread holds the current PVR/bridge lock before obeying the
+		 * 'release before deschedule' behaviour. Some threads choose not to
+		 * hold the bridge lock in their implementation.
+		 */
+		bReleasePVRLock = (!bHoldBridgeLock && BridgeLockIsLocked() && current == BridgeLockGetOwner());
+		if (bReleasePVRLock == IMG_TRUE)
+		{
+			OSReleaseBridgeLock();
+		}
+#else
+		PVR_UNREFERENCED_PARAMETER(bHoldBridgeLock);
+#endif
+
+		timeOutJiffies = schedule_timeout(timeOutJiffies);
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+		if (bReleasePVRLock == IMG_TRUE)
+		{
+			OSAcquireBridgeLock();
+		}
+#endif
+#if defined(DEBUG)
+		psLinuxEventObject->ui32Stats++;
+#endif
+
+
+	} while (timeOutJiffies);
+
+	finish_wait(&psLinuxEventObject->sWait, &sWait);
+
+	psLinuxEventObject->ui32TimeStampPrevious = ui32TimeStamp;
+
+	return timeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT;
+
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/event.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/event.h
new file mode 100644
index 0000000..5b14cec
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/event.h
@@ -0,0 +1,48 @@
+/*************************************************************************/ /*!
+@File
+@Title          Event Object 
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList);
+PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList);
+PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject);
+PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject);
+PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList);
+PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT64 ui64Timeoutus, IMG_BOOL bHoldBridgeLock);
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/handle_idr.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/handle_idr.c
new file mode 100644
index 0000000..a203ebd
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/handle_idr.c
@@ -0,0 +1,439 @@
+/*************************************************************************/ /*!
+@File
+@Title		Resource Handle Manager - IDR Back-end
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Provide IDR based resource handle management back-end
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/idr.h>
+
+#include "handle_impl.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+#define ID_VALUE_MIN	1
+#define ID_VALUE_MAX	INT_MAX
+
+#define	ID_TO_HANDLE(i) ((IMG_HANDLE)(uintptr_t)(i))
+#define	HANDLE_TO_ID(h) ((IMG_INT)(uintptr_t)(h))
+
+struct _HANDLE_IMPL_BASE_
+{
+	struct idr sIdr;
+
+	IMG_UINT32 ui32MaxHandleValue;
+
+	IMG_UINT32 ui32TotalHandCount;
+};
+
+typedef struct _HANDLE_ITER_DATA_WRAPPER_
+{
+	PFN_HANDLE_ITER pfnHandleIter;
+	void *pvHandleIterData;
+} HANDLE_ITER_DATA_WRAPPER;
+
+
+static int HandleIterFuncWrapper(int id, void *data, void *iter_data)
+{
+	HANDLE_ITER_DATA_WRAPPER *psIterData = (HANDLE_ITER_DATA_WRAPPER *)iter_data;
+
+	PVR_UNREFERENCED_PARAMETER(data);
+
+	return (int)psIterData->pfnHandleIter(ID_TO_HANDLE(id), psIterData->pvHandleIterData);
+}
+
+/*!
+******************************************************************************
+
+ @Function	AcquireHandle
+
+ @Description	Acquire a new handle
+
+ @Input		psBase - Pointer to handle base structure
+		phHandle - Points to a handle pointer
+		pvData - Pointer to resource to be associated with the handle
+
+ @Output	phHandle - Points to a handle pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR AcquireHandle(HANDLE_IMPL_BASE *psBase, 
+				  IMG_HANDLE *phHandle, 
+				  void *pvData)
+{
+	int id;
+	int result;
+
+	PVR_ASSERT(psBase != NULL);
+	PVR_ASSERT(phHandle != NULL);
+	PVR_ASSERT(pvData != NULL);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+	idr_preload(GFP_KERNEL);
+	id = idr_alloc(&psBase->sIdr, pvData, ID_VALUE_MIN, psBase->ui32MaxHandleValue + 1, 0);
+	idr_preload_end();
+
+	result = id;
+#else
+	do
+	{
+		if (idr_pre_get(&psBase->sIdr, GFP_KERNEL) == 0)
+		{
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+
+		result = idr_get_new_above(&psBase->sIdr, pvData, ID_VALUE_MIN, &id);
+	} while (result == -EAGAIN);
+
+	if ((IMG_UINT32)id > psBase->ui32MaxHandleValue)
+	{
+		idr_remove(&psBase->sIdr, id);
+		result = -ENOSPC;
+	}
+#endif
+
+	if (result < 0)
+	{
+		if (result == -ENOSPC)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Limit of %u handles reached", 
+				 __FUNCTION__, psBase->ui32MaxHandleValue));
+
+			return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+		}
+
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psBase->ui32TotalHandCount++;
+
+	*phHandle = ID_TO_HANDLE(id);
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	ReleaseHandle
+
+ @Description	Release a handle that is no longer needed.
+
+ @Input		psBase - Pointer to handle base structure
+		hHandle - Handle to release
+		ppvData - Points to a void data pointer
+
+ @Output	ppvData - Points to a void data pointer
+
+ @Return	PVRSRV_OK or PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR ReleaseHandle(HANDLE_IMPL_BASE *psBase, 
+				  IMG_HANDLE hHandle, 
+				  void **ppvData)
+{
+	int id = HANDLE_TO_ID(hHandle);
+	void *pvData;
+
+	PVR_ASSERT(psBase);
+
+	/* Get the data associated with the handle. If we get back NULL then 
+	   it's an invalid handle */
+
+	pvData = idr_find(&psBase->sIdr, id);
+	if (pvData)
+	{
+		idr_remove(&psBase->sIdr, id);
+		psBase->ui32TotalHandCount--;
+	}
+
+	if (pvData == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Handle out of range (%u > %u)", 
+			 __FUNCTION__, id, psBase->ui32TotalHandCount));
+		return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+	}
+
+	if (ppvData)
+	{
+		*ppvData = pvData;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	GetHandleData
+
+ @Description	Get the data associated with the given handle
+
+ @Input		psBase - Pointer to handle base structure
+		hHandle - Handle from which data should be retrieved
+                ppvData - Points to a void data pointer
+
+ @Output	ppvData - Points to a void data pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR GetHandleData(HANDLE_IMPL_BASE *psBase, 
+				  IMG_HANDLE hHandle, 
+				  void **ppvData)
+{
+	int id = HANDLE_TO_ID(hHandle);
+	void *pvData;
+
+	PVR_ASSERT(psBase);
+	PVR_ASSERT(ppvData);
+
+	pvData = idr_find(&psBase->sIdr, id);
+	if (pvData)
+	{
+		*ppvData = pvData;
+
+		return PVRSRV_OK;
+	}
+	else
+	{
+		return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+	}
+}
+
+/*!
+******************************************************************************
+
+ @Function	SetHandleData
+
+ @Description	Set the data associated with the given handle
+
+ @Input		psBase - Pointer to handle base structure
+		hHandle - Handle for which data should be changed
+		pvData - Pointer to new data to be associated with the handle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR SetHandleData(HANDLE_IMPL_BASE *psBase, 
+				  IMG_HANDLE hHandle, 
+				  void *pvData)
+{
+	int id = HANDLE_TO_ID(hHandle);
+	void *pvOldData;
+
+	PVR_ASSERT(psBase);
+
+	pvOldData = idr_replace(&psBase->sIdr, pvData, id);
+	if (IS_ERR(pvOldData))
+	{
+		if (PTR_ERR(pvOldData) == -ENOENT)
+		{
+			return PVRSRV_ERROR_HANDLE_NOT_ALLOCATED;
+		}
+		else
+		{
+			return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR IterateOverHandles(HANDLE_IMPL_BASE *psBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData)
+{
+	HANDLE_ITER_DATA_WRAPPER sIterData;
+
+	PVR_ASSERT(psBase);
+	PVR_ASSERT(pfnHandleIter);
+
+	sIterData.pfnHandleIter = pfnHandleIter;
+	sIterData.pvHandleIterData = pvHandleIterData;
+
+	return (PVRSRV_ERROR)idr_for_each(&psBase->sIdr, HandleIterFuncWrapper, &sIterData);
+}
+
+/*!
+******************************************************************************
+
+ @Function	EnableHandlePurging
+
+ @Description	Enable purging for a given handle base
+
+ @Input 	psBase - pointer to handle base structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR EnableHandlePurging(HANDLE_IMPL_BASE *psBase)
+{
+	PVR_UNREFERENCED_PARAMETER(psBase);
+	PVR_ASSERT(psBase);
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PurgeHandles
+
+ @Description	Purge handles for a given handle base
+
+ @Input 	psBase - Pointer to handle base structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR PurgeHandles(HANDLE_IMPL_BASE *psBase)
+{
+	PVR_UNREFERENCED_PARAMETER(psBase);
+	PVR_ASSERT(psBase);
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	CreateHandleBase
+
+ @Description	Create a handle base structure
+
+ @Input 	ppsBase - pointer to handle base structure pointer
+
+ @Output	ppsBase - points to handle base structure pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR CreateHandleBase(HANDLE_IMPL_BASE **ppsBase)
+{
+	HANDLE_IMPL_BASE *psBase;
+
+	PVR_ASSERT(ppsBase);
+
+	psBase = OSAllocZMem(sizeof(*psBase));
+	if (psBase == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't allocate generic handle base", __FUNCTION__));
+
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	idr_init(&psBase->sIdr);
+
+	psBase->ui32MaxHandleValue = ID_VALUE_MAX;
+	psBase->ui32TotalHandCount = 0;
+
+	*ppsBase = psBase;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	DestroyHandleBase
+
+ @Description	Destroy a handle base structure
+
+ @Input 	psBase - pointer to handle base structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR DestroyHandleBase(HANDLE_IMPL_BASE *psBase)
+{
+	PVR_ASSERT(psBase);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
+	idr_remove_all(&psBase->sIdr);
+#endif
+
+	/* Finally destroy the idr */
+	idr_destroy(&psBase->sIdr);
+
+	OSFreeMem(psBase);
+
+	return PVRSRV_OK;
+}
+
+
+static const HANDLE_IMPL_FUNCTAB g_sHandleFuncTab = 
+{
+	.pfnAcquireHandle = AcquireHandle,
+	.pfnReleaseHandle = ReleaseHandle,
+	.pfnGetHandleData = GetHandleData,
+	.pfnSetHandleData = SetHandleData,
+	.pfnIterateOverHandles = IterateOverHandles,
+	.pfnEnableHandlePurging = EnableHandlePurging,
+	.pfnPurgeHandles = PurgeHandles,
+	.pfnCreateHandleBase = CreateHandleBase,
+	.pfnDestroyHandleBase = DestroyHandleBase
+};
+
+PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs)
+{
+	static IMG_BOOL bAcquired = IMG_FALSE;
+
+	if (bAcquired)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Function table already acquired", 
+			 __FUNCTION__));
+		return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+	}
+
+	if (ppsFuncs == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*ppsFuncs = &g_sHandleFuncTab;
+
+	bAcquired = IMG_TRUE;
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/kernel_compatibility.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/kernel_compatibility.h
new file mode 100644
index 0000000..7a2d821
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/kernel_compatibility.h
@@ -0,0 +1,217 @@
+/*************************************************************************/ /*!
+@File           services/server/env/linux/kernel_compatibility.h
+@Title          Kernel versions compatibility macros
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Per-version macros to allow code to seamlessly use older kernel
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __KERNEL_COMPATIBILITY_H__
+#define __KERNEL_COMPATIBILITY_H__
+
+#include <linux/version.h>
+
+/*
+ * Stop supporting an old kernel? Remove the top block.
+ * New incompatible kernel?       Append a new block at the bottom.
+ *
+ * Please write you version test as `VERSION < X.Y`, and use the earliest
+ * possible version :)
+ */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+
+/* Linux 3.7 split VM_RESERVED into VM_DONTDUMP and VM_DONTEXPAND */
+#define VM_DONTDUMP VM_RESERVED
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) */
+
+/*
+ * Note: this fix had to be written backwards because get_unused_fd_flags
+ * was already defined but not exported on kernels < 3.7
+ *
+ * When removing support for kernels < 3.7, this block should be removed
+ * and all `get_unused_fd()` should be manually replaced with
+ * `get_unused_fd_flags(0)`
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+
+/* Linux 3.19 removed get_unused_fd() */
+/* get_unused_fd_flags  was introduced in 3.7 */
+#define get_unused_fd() get_unused_fd_flags(0)
+
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+
+/*
+ * Headers shouldn't normally be included by this file but this is a special
+ * case as it's not obvious from the name that devfreq_add_device needs this
+ * include.
+ */
+#include <linux/string.h>
+
+#define devfreq_add_device(dev, profile, name, data) \
+	({ \
+		struct devfreq *__devfreq; \
+		if (name && !strcmp(name, "simple_ondemand")) \
+			__devfreq = devfreq_add_device(dev, profile, \
+							   &devfreq_simple_ondemand, data); \
+		else \
+			__devfreq = ERR_PTR(-EINVAL); \
+		__devfreq; \
+	})
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
+
+/* Linux 3.12 introduced a new shrinker API */
+#define SHRINK_STOP (~0UL)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+
+#define dev_pm_opp_get_opp_count(dev) opp_get_opp_count(dev)
+#define dev_pm_opp_get_freq(opp) opp_get_freq(opp)
+#define dev_pm_opp_get_voltage(opp) opp_get_voltage(opp)
+#define dev_pm_opp_add(dev, freq, u_volt) opp_add(dev, freq, u_volt)
+#define dev_pm_opp_find_freq_ceil(dev, freq) opp_find_freq_ceil(dev, freq)
+
+#if defined(CONFIG_ARM)
+/* Linux 3.13 renamed ioremap_cached to ioremap_cache */
+#define ioremap_cache(cookie,size) ioremap_cached(cookie,size)
+#endif /* defined(CONFIG_ARM) */
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+
+/* Linux 3.17 changed the 3rd argument from a `struct page ***pages` to
+ * `struct page **pages` */
+#define map_vm_area(area, prot, pages) map_vm_area(area, prot, &pages)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
+
+/*
+ * Linux 4.7 removed this function but its replacement was available since 3.19.
+ */
+#define drm_crtc_send_vblank_event(crtc, e) drm_send_vblank_event((crtc)->dev, drm_crtc_index(crtc), e)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+
+/* Linux 4.4 renamed GFP_WAIT to GFP_RECLAIM */
+#define __GFP_RECLAIM __GFP_WAIT
+
+#if !defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
+#define dev_pm_opp_of_add_table(dev) of_init_opp_table(dev)
+#define dev_pm_opp_of_remove_table(dev) of_free_opp_table(dev)
+#endif
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) && \
+	(!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)))
+
+/* Linux 4.5 added a new printf-style parameter for debug messages */
+
+#define drm_encoder_init(dev, encoder, funcs, encoder_type, name, ...) \
+        drm_encoder_init(dev, encoder, funcs, encoder_type)
+
+#define drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type, name, ...) \
+        drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type)
+
+#define drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs, name, ...) \
+        drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \
+	(!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)))
+
+/*
+ * Linux 4.6 removed the start and end arguments as it now always maps
+ * the entire DMA-BUF.
+ * Additionally, dma_buf_end_cpu_access() now returns an int error.
+ */
+#define dma_buf_begin_cpu_access(DMABUF, DIRECTION) dma_buf_begin_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION)
+#define dma_buf_end_cpu_access(DMABUF, DIRECTION) ({ dma_buf_end_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION); 0; })
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \
+		  (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0))
+
+/* Linux 4.7 removed the first arguments as it's never been used */
+#define drm_gem_object_lookup(filp, handle) drm_gem_object_lookup((filp)->minor->dev, filp, handle)
+
+/* Linux 4.7 replaced nla_put_u64 with nla_put_u64_64bit */
+#define nla_put_u64_64bit(skb, attrtype, value, padattr) nla_put_u64(skb, attrtype, value)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0))
+
+/* Linux 4.9 changed the second argument to a drm_file pointer */
+#define drm_vma_node_is_allowed(node, file_priv) drm_vma_node_is_allowed(node, (file_priv)->filp)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+static inline unsigned int refcount_read(const atomic_t *r)
+{
+	return atomic_read(r);
+}
+#define drm_mm_insert_node(mm, node, size) drm_mm_insert_node(mm, node, size, 0, DRM_MM_SEARCH_DEFAULT) 
+
+#define drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd) drm_helper_mode_fill_fb_struct(fb, mode_cmd)
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) */
+
+#endif /* __KERNEL_COMPATIBILITY_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/km_apphint.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/km_apphint.c
new file mode 100644
index 0000000..40bb9be
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/km_apphint.c
@@ -0,0 +1,1433 @@
+/*************************************************************************/ /*!
+@File           km_apphint.c
+@Title          Apphint routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debugfs.h"
+#include "pvr_uaccess.h"
+#include <linux/moduleparam.h>
+#include <linux/workqueue.h>
+#include <linux/string.h>
+#include <stdbool.h>
+
+/* for action device access */
+#include "pvrsrv.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxfwutils.h"
+#include "rgxhwperf.h"
+#include "debugmisc_server.h"
+#include "htbserver.h"
+#include "rgxutils.h"
+#include "rgxapi_km.h"
+
+#include "img_defs.h"
+
+/* defines for default values */
+#include "rgx_fwif.h"
+#include "htbuffer_types.h"
+
+#include "pvr_notifier.h"
+
+#include "km_apphint_defs.h"
+#include "km_apphint.h"
+
+#if defined(PDUMP)
+#include <stdarg.h>
+#include "pdump_km.h"
+#endif
+
+/* Size of temporary buffers used to read and write AppHint data.
+ * Must be large enough to contain any strings read/written
+ * but no larger than 4096 with is the buffer size for the
+ * kernel_param_ops .get function.
+ * And less than 1024 to keep the stack frame size within bounds.
+ */
+#define APPHINT_BUFFER_SIZE 512
+
+#define APPHINT_DEVICES_MAX 16
+
+/*
+*******************************************************************************
+ * AppHint mnemonic data type helper tables
+******************************************************************************/
+struct apphint_lookup {
+	char *name;
+	int value;
+};
+
+static const struct apphint_lookup fwt_logtype_tbl[] = {
+	{ "trace", 2},
+	{ "tbi", 1},
+	{ "none", 0}
+};
+
+static const struct apphint_lookup fwt_loggroup_tbl[] = {
+	RGXFWIF_LOG_GROUP_NAME_VALUE_MAP
+};
+
+static const struct apphint_lookup htb_loggroup_tbl[] = {
+#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) },
+	HTB_LOG_SFGROUPLIST
+#undef X
+};
+
+static const struct apphint_lookup htb_opmode_tbl[] = {
+	{ "droplatest", HTB_OPMODE_DROPLATEST},
+	{ "dropoldest", HTB_OPMODE_DROPOLDEST},
+	{ "block", HTB_OPMODE_BLOCK}
+};
+
+__maybe_unused
+static const struct apphint_lookup htb_logmode_tbl[] = {
+	{ "all", HTB_LOGMODE_ALLPID},
+	{ "restricted", HTB_LOGMODE_RESTRICTEDPID}
+};
+
+static const struct apphint_lookup timecorr_clk_tbl[] = {
+	{ "mono", 0 },
+	{ "mono_raw", 1 },
+	{ "sched", 2 }
+};
+
+/*
+*******************************************************************************
+ Data types
+******************************************************************************/
+union apphint_value {
+	IMG_UINT64 UINT64;
+	IMG_UINT32 UINT32;
+	IMG_BOOL BOOL;
+	IMG_CHAR *STRING;
+};
+
+struct apphint_action {
+	union {
+		PVRSRV_ERROR (*UINT64)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value);
+		PVRSRV_ERROR (*UINT32)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value);
+		PVRSRV_ERROR (*BOOL)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value);
+		PVRSRV_ERROR (*STRING)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value);
+	} query;
+	union {
+		PVRSRV_ERROR (*UINT64)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value);
+		PVRSRV_ERROR (*UINT32)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value);
+		PVRSRV_ERROR (*BOOL)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value);
+		PVRSRV_ERROR (*STRING)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value);
+	} set;
+	const PVRSRV_DEVICE_NODE *device;
+	const void *private_data;
+	union apphint_value stored;
+	bool free;
+};
+
+struct apphint_param {
+	IMG_UINT32 id;
+	APPHINT_DATA_TYPE data_type;
+	const void *data_type_helper;
+	IMG_UINT32 helper_size;
+};
+
+struct apphint_init_data {
+	IMG_UINT32 id;			/* index into AppHint Table */
+	APPHINT_CLASS class;
+	IMG_CHAR *name;
+	union apphint_value default_value;
+};
+
+struct apphint_class_state {
+	APPHINT_CLASS class;
+	IMG_BOOL enabled;
+};
+
+struct apphint_work {
+	struct work_struct work;
+	union apphint_value new_value;
+	struct apphint_action *action;
+};
+
+/*
+*******************************************************************************
+ Initialization / configuration table data
+******************************************************************************/
+#define UINT32Bitfield UINT32
+#define UINT32List UINT32
+
+static const struct apphint_init_data init_data_buildvar[] = {
+#define X(a, b, c, d, e) \
+	{APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+	APPHINT_LIST_BUILDVAR
+#undef X
+};
+
+static const struct apphint_init_data init_data_modparam[] = {
+#define X(a, b, c, d, e) \
+	{APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+	APPHINT_LIST_MODPARAM
+#undef X
+};
+
+static const struct apphint_init_data init_data_debugfs[] = {
+#define X(a, b, c, d, e) \
+	{APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+	APPHINT_LIST_DEBUGFS
+#undef X
+};
+
+static const struct apphint_init_data init_data_debugfs_device[] = {
+#define X(a, b, c, d, e) \
+	{APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+	APPHINT_LIST_DEBUGFS_DEVICE
+#undef X
+};
+
+#undef UINT32Bitfield
+#undef UINT32List
+
+/* Don't use the kernel ARRAY_SIZE macro here because it checks
+ * __must_be_array() and we need to be able to use this safely on a NULL ptr.
+ * This will return an undefined size for a NULL ptr - so should only be
+ * used here.
+ */
+#define APPHINT_HELP_ARRAY_SIZE(a) (sizeof((a))/(sizeof((a[0]))))
+
+static const struct apphint_param param_lookup[] = {
+#define X(a, b, c, d, e) \
+	{APPHINT_ID_ ## a, APPHINT_DATA_TYPE_ ## b, e, APPHINT_HELP_ARRAY_SIZE(e) },
+	APPHINT_LIST_ALL
+#undef X
+};
+
+#undef APPHINT_HELP_ARRAY_SIZE
+
+static const struct apphint_class_state class_state[] = {
+#define X(a) {APPHINT_CLASS_ ## a, APPHINT_ENABLED_CLASS_ ## a},
+	APPHINT_CLASS_LIST
+#undef X
+};
+
+/*
+*******************************************************************************
+ Global state
+******************************************************************************/
+/* If the union apphint_value becomes such that it is not possible to read
+ * and write atomically, a mutex may be desirable to prevent a read returning
+ * a partially written state.
+ * This would require a statically initialized mutex outside of the
+ * struct apphint_state to prevent use of an uninitialized mutex when
+ * module_params are provided on the command line.
+ *     static DEFINE_MUTEX(apphint_mutex);
+ */
+static struct apphint_state
+{
+	struct workqueue_struct *workqueue;
+	PPVR_DEBUGFS_DIR_DATA debugfs_device_rootdir[APPHINT_DEVICES_MAX];
+	PPVR_DEBUGFS_ENTRY_DATA debugfs_device_entry[APPHINT_DEVICES_MAX][APPHINT_DEBUGFS_DEVICE_ID_MAX];
+	PPVR_DEBUGFS_DIR_DATA debugfs_rootdir;
+	PPVR_DEBUGFS_ENTRY_DATA debugfs_entry[APPHINT_DEBUGFS_ID_MAX];
+	PPVR_DEBUGFS_DIR_DATA buildvar_rootdir;
+	PPVR_DEBUGFS_ENTRY_DATA buildvar_entry[APPHINT_BUILDVAR_ID_MAX];
+
+	int num_devices;
+	PVRSRV_DEVICE_NODE *devices[APPHINT_DEVICES_MAX];
+	int initialized;
+
+	/* Array contains value space for 1 copy of all apphint values defined
+	 * (for device 1) and N copies of device specific apphint values for
+	 * multi-device platforms.
+	 */
+	struct apphint_action val[APPHINT_ID_MAX + ((APPHINT_DEVICES_MAX-1)*APPHINT_DEBUGFS_DEVICE_ID_MAX)];
+
+} apphint = {
+/* statically initialise default values to ensure that any module_params
+ * provided on the command line are not overwritten by defaults.
+ */
+	.val = {
+#define UINT32Bitfield UINT32
+#define UINT32List UINT32
+#define X(a, b, c, d, e) \
+	{ {NULL}, {NULL}, NULL, NULL, {.b=d}, false },
+	APPHINT_LIST_ALL
+#undef X
+#undef UINT32Bitfield
+#undef UINT32List
+	},
+	.initialized = 0,
+	.num_devices = 0
+};
+
+#define APPHINT_DEBUGFS_DEVICE_ID_OFFSET (APPHINT_ID_MAX-APPHINT_DEBUGFS_DEVICE_ID_MAX)
+
+static inline void
+get_apphint_id_from_action_addr(const struct apphint_action * const addr,
+                                APPHINT_ID * const id)
+{
+	*id = (APPHINT_ID)(addr - apphint.val);
+	if (*id >= APPHINT_ID_MAX) {
+		*id -= APPHINT_DEBUGFS_DEVICE_ID_OFFSET;
+		*id %= APPHINT_DEBUGFS_DEVICE_ID_MAX;
+		*id += APPHINT_DEBUGFS_DEVICE_ID_OFFSET;
+	}
+}
+
+static inline void
+get_value_offset_from_device(const PVRSRV_DEVICE_NODE * const device,
+                             int * const offset)
+{
+	int i;
+
+	/* No device offset if not a device specific apphint */
+	if (APPHINT_OF_DRIVER_NO_DEVICE == device) {
+		*offset = 0;
+		return;
+	}
+
+	for (i = 0; device && i < APPHINT_DEVICES_MAX; i++) {
+		if (apphint.devices[i] == device)
+			break;
+	}
+	if (APPHINT_DEVICES_MAX == i) {
+		PVR_DPF((PVR_DBG_WARNING, "%s: Unregistered device", __func__));
+		i = 0;
+	}
+	*offset = i * APPHINT_DEBUGFS_DEVICE_ID_MAX;
+}
+
+/**
+ * apphint_action_worker - perform an action after an AppHint update has been
+ *                    requested by a UM process
+ *                    And update the record of the current active value
+ */
+static void apphint_action_worker(struct work_struct *work)
+{
+	struct apphint_work *work_pkt = container_of(work,
+	                                             struct apphint_work,
+	                                             work);
+	struct apphint_action *a = work_pkt->action;
+	union apphint_value value = work_pkt->new_value;
+	APPHINT_ID id;
+	PVRSRV_ERROR result = PVRSRV_OK;
+
+	get_apphint_id_from_action_addr(a, &id);
+
+	if (a->set.UINT64) {
+		switch (param_lookup[id].data_type) {
+		case APPHINT_DATA_TYPE_UINT64:
+			result = a->set.UINT64(a->device,
+			                       a->private_data,
+			                       value.UINT64);
+			break;
+
+		case APPHINT_DATA_TYPE_UINT32:
+		case APPHINT_DATA_TYPE_UINT32Bitfield:
+		case APPHINT_DATA_TYPE_UINT32List:
+			result = a->set.UINT32(a->device,
+			                       a->private_data,
+			                       value.UINT32);
+			break;
+
+		case APPHINT_DATA_TYPE_BOOL:
+			result = a->set.BOOL(a->device,
+			                     a->private_data,
+			                     value.BOOL);
+			break;
+
+		case APPHINT_DATA_TYPE_STRING:
+			result = a->set.STRING(a->device,
+								   a->private_data,
+								   value.STRING);
+			kfree(value.STRING);
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: unrecognised data type (%d), index (%d)",
+			         __func__, param_lookup[id].data_type, id));
+		}
+
+		if (PVRSRV_OK != result) {
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: failed (%s)",
+			         __func__, PVRSRVGetErrorStringKM(result)));
+		}
+	} else {
+		if (a->free) {
+			kfree(a->stored.STRING);
+		}
+		a->stored = value;
+		if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) {
+			a->free = true;
+		}
+		PVR_DPF((PVR_DBG_MESSAGE,
+		         "%s: AppHint value updated before handler is registered, ID(%d)",
+		         __func__, id));
+	}
+	kfree((void *)work_pkt);
+}
+
+static void apphint_action(union apphint_value new_value,
+                           struct apphint_action *action)
+{
+	struct apphint_work *work_pkt = kmalloc(sizeof(*work_pkt), GFP_KERNEL);
+
+	/* queue apphint update on a serialized workqueue to avoid races */
+	if (work_pkt) {
+		work_pkt->new_value = new_value;
+		work_pkt->action = action;
+		INIT_WORK(&work_pkt->work, apphint_action_worker);
+		if (0 == queue_work(apphint.workqueue, &work_pkt->work)) {
+			PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed to queue apphint change request",
+				__func__));
+			goto err_exit;
+		}
+	} else {
+		PVR_DPF((PVR_DBG_ERROR,
+			"%s: failed to alloc memory for apphint change request",
+			__func__));
+			goto err_exit;
+	}
+	return;
+err_exit:
+	kfree(new_value.STRING);
+}
+
+/**
+ * apphint_read - read the different AppHint data types
+ * return -errno or the buffer size
+ */
+static int apphint_read(char *buffer, size_t count, APPHINT_ID ue,
+			 union apphint_value *value)
+{
+	APPHINT_DATA_TYPE data_type = param_lookup[ue].data_type;
+	int result = 0;
+
+	switch (data_type) {
+	case APPHINT_DATA_TYPE_UINT64:
+		if (kstrtou64(buffer, 0, &value->UINT64) < 0) {
+			PVR_DPF((PVR_DBG_ERROR,
+				"%s: Invalid UINT64 input data for id %d: %s",
+				__func__, ue, buffer));
+			result = -EINVAL;
+			goto err_exit;
+		}
+		break;
+	case APPHINT_DATA_TYPE_UINT32:
+		if (kstrtou32(buffer, 0, &value->UINT32) < 0) {
+			PVR_DPF((PVR_DBG_ERROR,
+				"%s: Invalid UINT32 input data for id %d: %s",
+				__func__, ue, buffer));
+			result = -EINVAL;
+			goto err_exit;
+		}
+		break;
+	case APPHINT_DATA_TYPE_BOOL:
+		switch (buffer[0]) {
+		case '0':
+		case 'n':
+		case 'N':
+		case 'f':
+		case 'F':
+			value->BOOL = IMG_FALSE;
+			break;
+		case '1':
+		case 'y':
+		case 'Y':
+		case 't':
+		case 'T':
+			value->BOOL = IMG_TRUE;
+			break;
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+				"%s: Invalid BOOL input data for id %d: %s",
+				__func__, ue, buffer));
+			result = -EINVAL;
+			goto err_exit;
+		}
+		break;
+	case APPHINT_DATA_TYPE_UINT32List:
+	{
+		int i;
+		struct apphint_lookup *lookup =
+			(struct apphint_lookup *)
+			param_lookup[ue].data_type_helper;
+		int size = param_lookup[ue].helper_size;
+		/* buffer may include '\n', remove it */
+		char *arg = strsep(&buffer, "\n");
+
+		if (!lookup) {
+			result = -EINVAL;
+			goto err_exit;
+		}
+
+		for (i = 0; i < size; i++) {
+			if (strcasecmp(lookup[i].name, arg) == 0) {
+				value->UINT32 = lookup[i].value;
+				break;
+			}
+		}
+		if (i == size) {
+			if (strlen(arg) == 0) {
+				PVR_DPF((PVR_DBG_ERROR,
+					"%s: No value set for AppHint",
+					__func__));
+			} else {
+				PVR_DPF((PVR_DBG_ERROR,
+					"%s: Unrecognised AppHint value (%s)",
+					__func__, arg));
+			}
+			result = -EINVAL;
+		}
+		break;
+	}
+	case APPHINT_DATA_TYPE_UINT32Bitfield:
+	{
+		int i;
+		struct apphint_lookup *lookup =
+			(struct apphint_lookup *)
+			param_lookup[ue].data_type_helper;
+		int size = param_lookup[ue].helper_size;
+		/* buffer may include '\n', remove it */
+		char *string = strsep(&buffer, "\n");
+		char *token = strsep(&string, ",");
+
+		if (!lookup) {
+			result = -EINVAL;
+			goto err_exit;
+		}
+
+		value->UINT32 = 0;
+		/* empty string is valid to clear the bitfield */
+		while (token && *token) {
+			for (i = 0; i < size; i++) {
+				if (strcasecmp(lookup[i].name, token) == 0) {
+					value->UINT32 |= lookup[i].value;
+					break;
+				}
+			}
+			if (i == size) {
+				PVR_DPF((PVR_DBG_ERROR,
+					"%s: Unrecognised AppHint value (%s)",
+					__func__, token));
+				result = -EINVAL;
+				goto err_exit;
+			}
+			token = strsep(&string, ",");
+		}
+		break;
+	}
+	case APPHINT_DATA_TYPE_STRING:
+	{
+		/* buffer may include '\n', remove it */
+		char *string = strsep(&buffer, "\n");
+		size_t len = strlen(string);
+
+		if (!len) {
+			result = -EINVAL;
+			goto err_exit;
+		}
+
+		++len;
+
+		value->STRING = kmalloc(len, GFP_KERNEL);
+		if (!value->STRING) {
+			result = -ENOMEM;
+			goto err_exit;
+		}
+
+		strlcpy(value->STRING, string, len);
+		break;
+	}
+	default:
+		result = -EINVAL;
+		goto err_exit;
+	}
+
+err_exit:
+	return (result < 0) ? result : count;
+}
+
+/**
+ * apphint_write - write the current AppHint data to a buffer
+ *
+ * Returns length written or -errno
+ */
+static int apphint_write(char *buffer, const size_t size,
+                         const struct apphint_action *a)
+{
+	const struct apphint_param *hint;
+	int result = 0;
+	APPHINT_ID id;
+	union apphint_value value;
+
+	get_apphint_id_from_action_addr(a, &id);
+	hint = &param_lookup[id];
+
+	if (a->query.UINT64) {
+		switch (hint->data_type) {
+		case APPHINT_DATA_TYPE_UINT64:
+			result = a->query.UINT64(a->device,
+			                         a->private_data,
+			                         &value.UINT64);
+			break;
+
+		case APPHINT_DATA_TYPE_UINT32:
+		case APPHINT_DATA_TYPE_UINT32Bitfield:
+		case APPHINT_DATA_TYPE_UINT32List:
+			result = a->query.UINT32(a->device,
+			                         a->private_data,
+			                         &value.UINT32);
+			break;
+
+		case APPHINT_DATA_TYPE_BOOL:
+			result = a->query.BOOL(a->device,
+			                       a->private_data,
+			                       &value.BOOL);
+			break;
+
+		case APPHINT_DATA_TYPE_STRING:
+			result = a->query.STRING(a->device,
+									 a->private_data,
+									 &value.STRING);
+			break;
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: unrecognised data type (%d), index (%d)",
+			         __func__, hint->data_type, id));
+		}
+
+		if (PVRSRV_OK != result) {
+			PVR_DPF((PVR_DBG_ERROR, "%s: failed (%d), index (%d)",
+			         __func__, result, id));
+		}
+	} else {
+		value = a->stored;
+	}
+
+	switch (hint->data_type) {
+	case APPHINT_DATA_TYPE_UINT64:
+		result += snprintf(buffer + result, size - result,
+				"0x%016llx",
+				value.UINT64);
+		break;
+	case APPHINT_DATA_TYPE_UINT32:
+		result += snprintf(buffer + result, size - result,
+				"0x%08x",
+				value.UINT32);
+		break;
+	case APPHINT_DATA_TYPE_BOOL:
+		result += snprintf(buffer + result, size - result,
+			"%s",
+			value.BOOL ? "Y" : "N");
+		break;
+	case APPHINT_DATA_TYPE_STRING:
+		if (value.STRING) {
+			result += snprintf(buffer + result, size - result,
+				"%s",
+				*value.STRING ? value.STRING : "(none)");
+		} else {
+			result += snprintf(buffer + result, size - result,
+			"(none)");
+		}
+		break;
+	case APPHINT_DATA_TYPE_UINT32List:
+	{
+		struct apphint_lookup *lookup =
+			(struct apphint_lookup *) hint->data_type_helper;
+		IMG_UINT32 i;
+
+		if (!lookup) {
+			result = -EINVAL;
+			goto err_exit;
+		}
+
+		for (i = 0; i < hint->helper_size; i++) {
+			if (lookup[i].value == value.UINT32) {
+				result += snprintf(buffer + result,
+						size - result,
+						"%s",
+						lookup[i].name);
+				break;
+			}
+		}
+		break;
+	}
+	case APPHINT_DATA_TYPE_UINT32Bitfield:
+	{
+		struct apphint_lookup *lookup =
+			(struct apphint_lookup *) hint->data_type_helper;
+		IMG_UINT32 i;
+
+		if (!lookup) {
+			result = -EINVAL;
+			goto err_exit;
+		}
+
+		for (i = 0; i < hint->helper_size; i++) {
+			if (lookup[i].value & value.UINT32) {
+				result += snprintf(buffer + result,
+						size - result,
+						"%s,",
+						lookup[i].name);
+			}
+		}
+		if (result) {
+			/* remove any trailing ',' */
+			--result;
+			*(buffer + result) = '\0';
+		} else {
+			result += snprintf(buffer + result,
+					size - result, "none");
+		}
+		break;
+	}
+	default:
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: unrecognised data type (%d), index (%d)",
+			 __func__, hint->data_type, id));
+		result = -EINVAL;
+	}
+
+err_exit:
+	return result;
+}
+
+/*
+*******************************************************************************
+ Module parameters initialization - different from debugfs
+******************************************************************************/
+/**
+ * apphint_kparam_set - Handle an update of a module parameter
+ *
+ * Returns 0, or -errno.  arg is in kp->arg.
+ */
+static int apphint_kparam_set(const char *val, const struct kernel_param *kp)
+{
+	char val_copy[APPHINT_BUFFER_SIZE];
+	APPHINT_ID id;
+	union apphint_value value;
+	int result;
+
+	/* need to discard const in case of string comparison */
+	result = strlcpy(val_copy, val, APPHINT_BUFFER_SIZE);
+
+	get_apphint_id_from_action_addr(kp->arg, &id);
+	if (result < APPHINT_BUFFER_SIZE) {
+		result = apphint_read(val_copy, result, id, &value);
+		if (result >= 0) {
+			((struct apphint_action *)kp->arg)->stored = value;
+			if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) {
+				((struct apphint_action *)kp->arg)->free = true;
+			}
+		}
+	} else {
+		PVR_DPF((PVR_DBG_ERROR, "%s: String too long", __func__));
+	}
+	return (result > 0) ? 0 : result;
+}
+
+/**
+ * apphint_kparam_get - handle a read of a module parameter
+ *
+ * Returns length written or -errno.  Buffer is 4k (ie. be short!)
+ */
+static int apphint_kparam_get(char *buffer, const struct kernel_param *kp)
+{
+	return apphint_write(buffer, PAGE_SIZE, kp->arg);
+}
+
+__maybe_unused
+static const struct kernel_param_ops apphint_kparam_fops = {
+	.set = apphint_kparam_set,
+	.get = apphint_kparam_get,
+};
+
+/*
+ * call module_param_cb() for all AppHints listed in APPHINT_LIST_MODPARAM
+ * apphint_modparam_class_ ## resolves to apphint_modparam_enable() except for
+ * AppHint classes that have been disabled.
+ */
+
+#define apphint_modparam_enable(name, number, perm) \
+	module_param_cb(name, &apphint_kparam_fops, &apphint.val[number], perm);
+
+#define X(a, b, c, d, e) \
+	apphint_modparam_class_ ##c(a, APPHINT_ID_ ## a, (S_IRUSR|S_IRGRP|S_IROTH))
+	APPHINT_LIST_MODPARAM
+#undef X
+
+/*
+*******************************************************************************
+ Debugfs get (seq file) operations - supporting functions
+******************************************************************************/
+static void *apphint_seq_start(struct seq_file *s, loff_t *pos)
+{
+	if (*pos == 0) {
+		/* We want only one entry in the sequence, one call to show() */
+		return (void *) 1;
+	}
+
+	PVR_UNREFERENCED_PARAMETER(s);
+
+	return NULL;
+}
+
+static void apphint_seq_stop(struct seq_file *s, void *v)
+{
+	PVR_UNREFERENCED_PARAMETER(s);
+	PVR_UNREFERENCED_PARAMETER(v);
+}
+
+static void *apphint_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	PVR_UNREFERENCED_PARAMETER(s);
+	PVR_UNREFERENCED_PARAMETER(v);
+	PVR_UNREFERENCED_PARAMETER(pos);
+	return NULL;
+}
+
+static int apphint_seq_show(struct seq_file *s, void *v)
+{
+	IMG_CHAR km_buffer[APPHINT_BUFFER_SIZE];
+	int result;
+
+	PVR_UNREFERENCED_PARAMETER(v);
+
+	result = apphint_write(km_buffer, APPHINT_BUFFER_SIZE, s->private);
+	if (result < 0) {
+		PVR_DPF((PVR_DBG_ERROR, "%s: failure", __func__));
+	} else {
+		/* debugfs requires a trailing \n, module_params don't */
+		result += snprintf(km_buffer + result,
+				APPHINT_BUFFER_SIZE - result,
+				"\n");
+		seq_puts(s, km_buffer);
+	}
+
+	/* have to return 0 to see output */
+	return (result < 0) ? result : 0;
+}
+
+static const struct seq_operations apphint_seq_fops = {
+	.start = apphint_seq_start,
+	.stop  = apphint_seq_stop,
+	.next  = apphint_seq_next,
+	.show  = apphint_seq_show,
+};
+
+/*
+*******************************************************************************
+ Debugfs supporting functions
+******************************************************************************/
+/**
+ * apphint_set - Handle a debugfs value update
+ */
+static ssize_t apphint_set(const char __user *buffer,
+			    size_t count,
+			    loff_t *ppos,
+			    void *data)
+{
+	APPHINT_ID id;
+	union apphint_value value;
+	struct apphint_action *action = data;
+	char km_buffer[APPHINT_BUFFER_SIZE];
+	int result = 0;
+
+	if (ppos == NULL)
+		return -EIO;
+
+	if (count >= APPHINT_BUFFER_SIZE) {
+		PVR_DPF((PVR_DBG_ERROR, "%s: String too long (%zd)",
+			__func__, count));
+		result = -EINVAL;
+		goto err_exit;
+	}
+
+	if (pvr_copy_from_user(km_buffer, buffer, count)) {
+		PVR_DPF((PVR_DBG_ERROR, "%s: Copy of user data failed",
+			__func__));
+		result = -EFAULT;
+		goto err_exit;
+	}
+	km_buffer[count] = '\0';
+
+	get_apphint_id_from_action_addr(action, &id);
+	result = apphint_read(km_buffer, count, id, &value);
+	if (result >= 0)
+		apphint_action(value, action);
+
+	*ppos += count;
+err_exit:
+	return result;
+}
+
+/**
+ * apphint_debugfs_init - Create the specified debugfs entries
+ */
+static int apphint_debugfs_init(char *sub_dir,
+		int device_num,
+		unsigned init_data_size,
+		const struct apphint_init_data *init_data,
+		PPVR_DEBUGFS_DIR_DATA parentdir,
+		PPVR_DEBUGFS_DIR_DATA *rootdir, PPVR_DEBUGFS_ENTRY_DATA *entry)
+{
+	int result = 0;
+	unsigned i;
+	int device_value_offset = device_num * APPHINT_DEBUGFS_DEVICE_ID_MAX;
+
+	if (*rootdir) {
+		PVR_DPF((PVR_DBG_WARNING,
+			"AppHint DebugFS already created, skipping"));
+		result = -EEXIST;
+		goto err_exit;
+	}
+
+	result = PVRDebugFSCreateEntryDir(sub_dir, parentdir,
+					  rootdir);
+	if (result < 0) {
+		PVR_DPF((PVR_DBG_WARNING,
+			"Failed to create \"%s\" DebugFS directory.", sub_dir));
+		goto err_exit;
+	}
+
+	for (i = 0; i < init_data_size; i++) {
+		if (!class_state[init_data[i].class].enabled)
+			continue;
+
+		result = PVRDebugFSCreateEntry(init_data[i].name,
+				*rootdir,
+				&apphint_seq_fops,
+				apphint_set,
+				NULL,
+				NULL,
+				(void *) &apphint.val[init_data[i].id + device_value_offset],
+				&entry[i]);
+		if (result < 0) {
+			PVR_DPF((PVR_DBG_WARNING,
+				"Failed to create \"%s/%s\" DebugFS entry.",
+				sub_dir, init_data[i].name));
+		}
+	}
+
+err_exit:
+	return result;
+}
+
+/**
+ * apphint_debugfs_deinit- destroy the debugfs entries
+ */
+static void apphint_debugfs_deinit(unsigned num_entries,
+		PPVR_DEBUGFS_DIR_DATA *rootdir, PPVR_DEBUGFS_ENTRY_DATA *entry)
+{
+	unsigned i;
+
+	for (i = 0; i < num_entries; i++) {
+		if (entry[i]) {
+			PVRDebugFSRemoveEntry(&entry[i]);
+			entry[i] = NULL;
+		}
+	}
+
+	if (*rootdir) {
+		PVRDebugFSRemoveEntryDir(rootdir);
+		*rootdir = NULL;
+	}
+}
+
+/*
+*******************************************************************************
+ AppHint status dump implementation
+******************************************************************************/
+#if defined(PDUMP)
+static void apphint_pdump_values(void *flags, const IMG_CHAR *format, ...)
+{
+	char km_buffer[APPHINT_BUFFER_SIZE];
+	IMG_UINT32 ui32Flags = *(IMG_UINT32 *)flags;
+	va_list ap;
+
+	va_start(ap, format);
+	(void)vsnprintf(km_buffer, APPHINT_BUFFER_SIZE, format, ap);
+	va_end(ap);
+
+	PDumpCommentKM(km_buffer, ui32Flags);
+}
+#endif
+
+static void apphint_dump_values(char *group_name,
+			int device_num,
+			const struct apphint_init_data *group_data,
+			int group_size,
+			DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+			void *pvDumpDebugFile)
+{
+	int i, result;
+	int device_value_offset = device_num * APPHINT_DEBUGFS_DEVICE_ID_MAX;
+	char km_buffer[APPHINT_BUFFER_SIZE];
+
+	PVR_DUMPDEBUG_LOG("  %s", group_name);
+	for (i = 0; i < group_size; i++) {
+		result = apphint_write(km_buffer, APPHINT_BUFFER_SIZE,
+				&apphint.val[group_data[i].id + device_value_offset]);
+
+		if (result <= 0) {
+			PVR_DUMPDEBUG_LOG("    %s: <Error>",
+				group_data[i].name);
+		} else {
+			PVR_DUMPDEBUG_LOG("    %s: %s",
+				group_data[i].name, km_buffer);
+		}
+	}
+}
+
+/**
+ * Callback for debug dump
+ */
+static void apphint_dump_state(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+			IMG_UINT32 ui32VerbLevel,
+			DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+			void *pvDumpDebugFile)
+{
+	int i, result;
+	char km_buffer[APPHINT_BUFFER_SIZE];
+	PVRSRV_DEVICE_NODE *device = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+
+	if (DEBUG_REQUEST_VERBOSITY_HIGH == ui32VerbLevel) {
+		PVR_DUMPDEBUG_LOG("------[ AppHint Settings ]------");
+
+		apphint_dump_values("Build Vars", 0,
+			init_data_buildvar, ARRAY_SIZE(init_data_buildvar),
+			pfnDumpDebugPrintf, pvDumpDebugFile);
+
+		apphint_dump_values("Module Params", 0,
+			init_data_modparam, ARRAY_SIZE(init_data_modparam),
+			pfnDumpDebugPrintf, pvDumpDebugFile);
+
+		apphint_dump_values("Debugfs Params", 0,
+			init_data_debugfs, ARRAY_SIZE(init_data_debugfs),
+			pfnDumpDebugPrintf, pvDumpDebugFile);
+
+		for (i = 0; i < APPHINT_DEVICES_MAX; i++) {
+			if (!apphint.devices[i]
+			    || (device && device != apphint.devices[i]))
+				continue;
+
+			result = snprintf(km_buffer,
+					  APPHINT_BUFFER_SIZE,
+					  "Debugfs Params Device ID: %d",
+					  i);
+			if (0 > result)
+				continue;
+
+			apphint_dump_values(km_buffer, i,
+					    init_data_debugfs_device,
+					    ARRAY_SIZE(init_data_debugfs_device),
+					    pfnDumpDebugPrintf,
+					    pvDumpDebugFile);
+		}
+	}
+}
+
+/*
+*******************************************************************************
+ Public interface
+******************************************************************************/
+int pvr_apphint_init(void)
+{
+	int result, i;
+
+	if (apphint.initialized) {
+		result = -EEXIST;
+		goto err_out;
+	}
+
+	for (i = 0; i < APPHINT_DEVICES_MAX; i++)
+		apphint.devices[i] = NULL;
+
+	/* create workqueue with strict execution ordering to ensure no
+	 * race conditions when setting/updating apphints from different
+	 * contexts
+	 */
+	apphint.workqueue = alloc_workqueue("apphint_workqueue", WQ_UNBOUND, 1);
+	if (!apphint.workqueue) {
+		result = -ENOMEM;
+		goto err_out;
+	}
+
+	result = apphint_debugfs_init("apphint", 0,
+		ARRAY_SIZE(init_data_debugfs), init_data_debugfs,
+		NULL,
+		&apphint.debugfs_rootdir, apphint.debugfs_entry);
+	if (0 != result)
+		goto err_out;
+
+	result = apphint_debugfs_init("buildvar", 0,
+		ARRAY_SIZE(init_data_buildvar), init_data_buildvar,
+		NULL,
+		&apphint.buildvar_rootdir, apphint.buildvar_entry);
+
+	apphint.initialized = 1;
+
+err_out:
+	return result;
+}
+
+int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device)
+{
+	int result, i;
+	char device_num[APPHINT_BUFFER_SIZE];
+	int device_value_offset;
+
+	if (!apphint.initialized) {
+		result = -EAGAIN;
+		goto err_out;
+	}
+
+	if (apphint.num_devices+1 >= APPHINT_DEVICES_MAX) {
+		result = -EMFILE;
+		goto err_out;
+	}
+
+	result = snprintf(device_num, APPHINT_BUFFER_SIZE, "%d", apphint.num_devices);
+	if (result < 0) {
+		PVR_DPF((PVR_DBG_WARNING,
+			"snprintf failed (%d)", result));
+		result = -EINVAL;
+		goto err_out;
+	}
+
+	/* Set the default values for the new device */
+	device_value_offset = apphint.num_devices * APPHINT_DEBUGFS_DEVICE_ID_MAX;
+	for (i = 0; i < APPHINT_DEBUGFS_DEVICE_ID_MAX; i++) {
+		apphint.val[init_data_debugfs_device[i].id + device_value_offset].stored
+			= init_data_debugfs_device[i].default_value;
+	}
+
+	result = apphint_debugfs_init(device_num, apphint.num_devices,
+	                              ARRAY_SIZE(init_data_debugfs_device),
+	                              init_data_debugfs_device,
+	                              apphint.debugfs_rootdir,
+	                              &apphint.debugfs_device_rootdir[apphint.num_devices],
+	                              apphint.debugfs_device_entry[apphint.num_devices]);
+	if (0 != result)
+		goto err_out;
+
+	apphint.devices[apphint.num_devices] = device;
+	apphint.num_devices++;
+
+	(void)PVRSRVRegisterDbgRequestNotify(
+			&device->hAppHintDbgReqNotify,
+			device,
+			apphint_dump_state,
+			DEBUG_REQUEST_APPHINT,
+			device);
+
+err_out:
+	return result;
+}
+
+void pvr_apphint_device_unregister(PVRSRV_DEVICE_NODE *device)
+{
+	int i;
+
+	if (!apphint.initialized)
+		return;
+
+	/* find the device */
+	for (i = 0; i < APPHINT_DEVICES_MAX; i++) {
+		if (apphint.devices[i] == device)
+			break;
+	}
+
+	if (APPHINT_DEVICES_MAX == i)
+		return;
+
+	if (device->hAppHintDbgReqNotify) {
+		(void)PVRSRVUnregisterDbgRequestNotify(
+			device->hAppHintDbgReqNotify);
+		device->hAppHintDbgReqNotify = NULL;
+	}
+
+	apphint_debugfs_deinit(APPHINT_DEBUGFS_DEVICE_ID_MAX,
+	                       &apphint.debugfs_device_rootdir[i],
+	                       apphint.debugfs_device_entry[i]);
+
+	apphint.devices[i] = NULL;
+	apphint.num_devices--;
+}
+
+void pvr_apphint_deinit(void)
+{
+	int i;
+
+	if (!apphint.initialized)
+		return;
+
+	/* remove any remaining device data */
+	for (i = 0; apphint.num_devices && i < APPHINT_DEVICES_MAX; i++) {
+		if (apphint.devices[i])
+			pvr_apphint_device_unregister(apphint.devices[i]);
+	}
+
+	/* free all alloc'd string apphints and set to NULL */
+	for (i = 0; i < ARRAY_SIZE(apphint.val); i++) {
+		if (apphint.val[i].free && apphint.val[i].stored.STRING) {
+			kfree(apphint.val[i].stored.STRING);
+			apphint.val[i].stored.STRING = NULL;
+			apphint.val[i].free = false;
+		}
+	}
+
+	apphint_debugfs_deinit(APPHINT_DEBUGFS_ID_MAX,
+			&apphint.debugfs_rootdir, apphint.debugfs_entry);
+	apphint_debugfs_deinit(APPHINT_BUILDVAR_ID_MAX,
+			&apphint.buildvar_rootdir, apphint.buildvar_entry);
+
+	destroy_workqueue(apphint.workqueue);
+
+	apphint.initialized = 0;
+}
+
+void pvr_apphint_dump_state(void)
+{
+#if defined(PDUMP)
+	IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+
+	apphint_dump_state(NULL, DEBUG_REQUEST_VERBOSITY_HIGH,
+	                   apphint_pdump_values, (void *)&ui32Flags);
+#endif
+	apphint_dump_state(NULL, DEBUG_REQUEST_VERBOSITY_HIGH,
+	                   NULL, NULL);
+}
+
+int pvr_apphint_get_uint64(APPHINT_ID ue, IMG_UINT64 *pVal)
+{
+	int error = -ERANGE;
+
+	if (ue < APPHINT_ID_MAX) {
+		*pVal = apphint.val[ue].stored.UINT64;
+		error = 0;
+	}
+	return error;
+}
+
+int pvr_apphint_get_uint32(APPHINT_ID ue, IMG_UINT32 *pVal)
+{
+	int error = -ERANGE;
+
+	if (ue < APPHINT_ID_MAX) {
+		*pVal = apphint.val[ue].stored.UINT32;
+		error = 0;
+	}
+	return error;
+}
+
+int pvr_apphint_get_bool(APPHINT_ID ue, IMG_BOOL *pVal)
+{
+	int error = -ERANGE;
+
+	if (ue < APPHINT_ID_MAX) {
+		error = 0;
+		*pVal = apphint.val[ue].stored.BOOL;
+	}
+	return error;
+}
+
+int pvr_apphint_get_string(APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size)
+{
+	int error = -ERANGE;
+	if (ue < APPHINT_ID_MAX && apphint.val[ue].stored.STRING) {
+		if (strlcpy(pBuffer, apphint.val[ue].stored.STRING, size) < size) {
+			error = 0;
+		}
+	}
+	return error;
+}
+
+void pvr_apphint_register_handlers_uint64(APPHINT_ID id,
+	PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value),
+	PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value),
+	const PVRSRV_DEVICE_NODE *device,
+	const void *private_data)
+{
+	int device_value_offset;
+
+	if (id >= APPHINT_ID_MAX) {
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: AppHint ID (%d) is out of range, max (%d)",
+		         __func__, id, APPHINT_ID_MAX-1));
+		return;
+	}
+
+	get_value_offset_from_device(device, &device_value_offset);
+
+	switch (param_lookup[id].data_type) {
+	case APPHINT_DATA_TYPE_UINT64:
+		break;
+	default:
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Does not match AppHint data type for ID (%d)",
+		         __func__, id));
+		return;
+	}
+
+	apphint.val[id + device_value_offset] = (struct apphint_action){
+		.query.UINT64 = query,
+		.set.UINT64 = set,
+		.device = device,
+		.private_data = private_data,
+		.stored = apphint.val[id + device_value_offset].stored
+	};
+}
+
+void pvr_apphint_register_handlers_uint32(APPHINT_ID id,
+	PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value),
+	PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value),
+	const PVRSRV_DEVICE_NODE *device,
+	const void *private_data)
+{
+	int device_value_offset;
+
+	if (id >= APPHINT_ID_MAX) {
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: AppHint ID (%d) is out of range, max (%d)",
+		         __func__, id, APPHINT_ID_MAX-1));
+		return;
+	}
+
+	get_value_offset_from_device(device, &device_value_offset);
+
+	switch (param_lookup[id].data_type) {
+	case APPHINT_DATA_TYPE_UINT32:
+	case APPHINT_DATA_TYPE_UINT32Bitfield:
+	case APPHINT_DATA_TYPE_UINT32List:
+		break;
+
+	default:
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Does not match AppHint data type for ID (%d)",
+		         __func__, id));
+		return;
+	}
+
+	apphint.val[id + device_value_offset] = (struct apphint_action){
+		.query.UINT32 = query,
+		.set.UINT32 = set,
+		.device = device,
+		.private_data = private_data,
+		.stored = apphint.val[id + device_value_offset].stored
+	};
+}
+
+void pvr_apphint_register_handlers_bool(APPHINT_ID id,
+	PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value),
+	PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value),
+	const PVRSRV_DEVICE_NODE *device,
+	const void *private_data)
+{
+	int device_value_offset;
+
+	if (id >= APPHINT_ID_MAX) {
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: AppHint ID (%d) is out of range, max (%d)",
+		         __func__, id, APPHINT_ID_MAX-1));
+		return;
+	}
+
+	get_value_offset_from_device(device, &device_value_offset);
+
+	switch (param_lookup[id].data_type) {
+	case APPHINT_DATA_TYPE_BOOL:
+		break;
+
+	default:
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Does not match AppHint data type for ID (%d)",
+		         __func__, id));
+		return;
+	}
+
+	apphint.val[id + device_value_offset] = (struct apphint_action){
+		.query.BOOL = query,
+		.set.BOOL = set,
+		.device = device,
+		.private_data = private_data,
+		.stored = apphint.val[id + device_value_offset].stored
+	};
+}
+
+void pvr_apphint_register_handlers_string(APPHINT_ID id,
+	PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value),
+	PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value),
+	const PVRSRV_DEVICE_NODE *device,
+	const void *private_data)
+{
+	int device_value_offset;
+
+	if (id >= APPHINT_ID_MAX) {
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: AppHint ID (%d) is out of range, max (%d)",
+		         __func__, id, APPHINT_ID_MAX-1));
+		return;
+	}
+
+	get_value_offset_from_device(device, &device_value_offset);
+
+	switch (param_lookup[id].data_type) {
+	case APPHINT_DATA_TYPE_STRING:
+		break;
+
+	default:
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Does not match AppHint data type for ID (%d)",
+		         __func__, id));
+		return;
+	}
+
+	apphint.val[id + device_value_offset] = (struct apphint_action){
+		.query.STRING = query,
+		.set.STRING = set,
+		.device = device,
+		.private_data = private_data,
+		.stored = apphint.val[id + device_value_offset].stored
+	};
+}
+
+/* EOF */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/km_apphint.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/km_apphint.h
new file mode 100644
index 0000000..4746c23
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/km_apphint.h
@@ -0,0 +1,100 @@
+/*************************************************************************/ /*!
+@File           km_apphint.h
+@Title          Apphint internal header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Linux kernel AppHint control
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __KM_APPHINT_H__
+#define __KM_APPHINT_H__
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "km_apphint_defs.h"
+#include "device.h"
+
+int pvr_apphint_init(void);
+void pvr_apphint_deinit(void);
+int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device);
+void pvr_apphint_device_unregister(PVRSRV_DEVICE_NODE *device);
+void pvr_apphint_dump_state(void);
+
+int pvr_apphint_get_uint64(APPHINT_ID ue, IMG_UINT64 *pVal);
+int pvr_apphint_get_uint32(APPHINT_ID ue, IMG_UINT32 *pVal);
+int pvr_apphint_get_bool(APPHINT_ID ue, IMG_BOOL *pVal);
+int pvr_apphint_get_string(APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size);
+
+/* Supplied to pvr_apphint_register_handlers_*() functions when the apphint
+ * is a global driver apphint, i.e. apphints not present in
+ * APPHINT_DEBUGFS_DEVICE_ID
+ */
+#define APPHINT_OF_DRIVER_NO_DEVICE ((void*)-1U)
+
+void pvr_apphint_register_handlers_uint64(APPHINT_ID id,
+	PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value),
+	PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value),
+	const PVRSRV_DEVICE_NODE *device,
+	const void * private_data);
+void pvr_apphint_register_handlers_uint32(APPHINT_ID id,
+	PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value),
+	PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value),
+	const PVRSRV_DEVICE_NODE *device,
+	const void *private_data);
+void pvr_apphint_register_handlers_bool(APPHINT_ID id,
+	PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value),
+	PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value),
+	const PVRSRV_DEVICE_NODE *device,
+	const void *private_data);
+void pvr_apphint_register_handlers_string(APPHINT_ID id,
+	PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value),
+	PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value),
+	const PVRSRV_DEVICE_NODE *device,
+	const void *private_data);
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* __KM_APPHINT_H__ */
+
+/******************************************************************************
+ End of file (apphint.h)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/linkage.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/linkage.h
new file mode 100644
index 0000000..2cfe060
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/linkage.h
@@ -0,0 +1,64 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux specific Services code internal interfaces
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Interfaces between various parts of the Linux specific
+                Services code, that don't have any other obvious
+                header file to go into.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__LINKAGE_H__)
+#define __LINKAGE_H__
+
+/*
+ * FIXME: This is declared here to save creating a new header which should be
+ * removed soon anyway as bridge gen should be providing this interface.
+ */
+PVRSRV_ERROR DeviceDepBridgeInit(IMG_UINT64 ui64Features);
+PVRSRV_ERROR DeviceDepBridgeDeInit(IMG_UINT64 ui64Features);
+PVRSRV_ERROR LinuxBridgeInit(void);
+PVRSRV_ERROR LinuxBridgeDeInit(void);
+
+PVRSRV_ERROR PVROSFuncInit(void);
+void PVROSFuncDeInit(void);
+
+int PVRDebugCreateDebugFSEntries(void);
+void PVRDebugRemoveDebugFSEntries(void);
+
+#endif /* !defined(__LINKAGE_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/module_common.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/module_common.c
new file mode 100644
index 0000000..47bbf556
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/module_common.c
@@ -0,0 +1,581 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common linux module setup
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/module.h>
+
+#include "pvr_debugfs.h"
+#include "private_data.h"
+#include "linkage.h"
+#include "lists.h"
+#include "power.h"
+#include "env_connection.h"
+#include "process_stats.h"
+#include "module_common.h"
+#include "pvrsrv.h"
+#include "srvcore.h"
+#include "rgxdevice.h"
+#include "pvrsrv_error.h"
+#include "pvr_drv.h"
+#include <linux/moduleparam.h>
+
+#include <pvr_fence.h>
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+#include "pvr_gputrace.h"
+#endif
+
+#include "km_apphint.h"
+#include "srvinit.h"
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+extern IMG_UINT32 gPVRDebugLevel;
+module_param(gPVRDebugLevel, uint, 0644);
+MODULE_PARM_DESC(gPVRDebugLevel,
+				 "Sets the level of debug output (default 0x7)");
+#endif /* defined(PVRSRV_NEED_PVR_DPF) */
+
+#if defined(DEBUG)
+extern IMG_UINT32 gPMRAllocFail;
+module_param(gPMRAllocFail, uint, 0644);
+MODULE_PARM_DESC(gPMRAllocFail, "When number of PMR allocs reaches"
+				 " this value, it will fail (default value is 0 which"
+				 "means that alloc function will behave normally).");
+#endif /* defined(DEBUG) */
+
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+/* Display class interface */
+#include "kerneldisplay.h"
+EXPORT_SYMBOL(DCRegisterDevice);
+EXPORT_SYMBOL(DCUnregisterDevice);
+EXPORT_SYMBOL(DCDisplayConfigurationRetired);
+EXPORT_SYMBOL(DCDisplayHasPendingCommand);
+EXPORT_SYMBOL(DCImportBufferAcquire);
+EXPORT_SYMBOL(DCImportBufferRelease);
+
+/* Physmem interface (required by LMA DC drivers) */
+#include "physheap.h"
+EXPORT_SYMBOL(PhysHeapAcquire);
+EXPORT_SYMBOL(PhysHeapRelease);
+EXPORT_SYMBOL(PhysHeapGetType);
+EXPORT_SYMBOL(PhysHeapRegionGetCpuPAddr);
+EXPORT_SYMBOL(PhysHeapRegionGetSize);
+EXPORT_SYMBOL(PhysHeapCpuPAddrToDevPAddr);
+
+EXPORT_SYMBOL(PVRSRVGetDriverStatus);
+EXPORT_SYMBOL(PVRSRVSystemInstallDeviceLISR);
+EXPORT_SYMBOL(PVRSRVSystemUninstallDeviceLISR);
+
+#include "pvr_notifier.h"
+EXPORT_SYMBOL(PVRSRVCheckStatus);
+
+#include "pvr_debug.h"
+EXPORT_SYMBOL(PVRSRVGetErrorStringKM);
+#endif /* defined(SUPPORT_DISPLAY_CLASS) */
+
+#include "rgxapi_km.h"
+#if defined(SUPPORT_SHARED_SLC)
+EXPORT_SYMBOL(RGXInitSLC);
+#endif
+
+EXPORT_SYMBOL(RGXHWPerfConnect);
+EXPORT_SYMBOL(RGXHWPerfDisconnect);
+EXPORT_SYMBOL(RGXHWPerfControl);
+EXPORT_SYMBOL(RGXHWPerfConfigureAndEnableCounters);
+EXPORT_SYMBOL(RGXHWPerfDisableCounters);
+EXPORT_SYMBOL(RGXHWPerfAcquireEvents);
+EXPORT_SYMBOL(RGXHWPerfReleaseEvents);
+EXPORT_SYMBOL(RGXHWPerfConvertCRTimeStamp);
+#if defined(SUPPORT_KERNEL_HWPERF_TEST)
+EXPORT_SYMBOL(OSAddTimer);
+EXPORT_SYMBOL(OSEnableTimer);
+EXPORT_SYMBOL(OSDisableTimer);
+EXPORT_SYMBOL(OSRemoveTimer);
+#endif
+
+#include "mtk_mfgsys.h"
+
+CONNECTION_DATA *LinuxConnectionFromFile(struct file *pFile)
+{
+	if (pFile)
+	{
+		struct drm_file *psDRMFile = pFile->private_data;
+
+		return psDRMFile->driver_priv;
+	}
+
+	return NULL;
+}
+
+struct file *LinuxFileFromConnection(CONNECTION_DATA *psConnection)
+{
+	ENV_CONNECTION_DATA *psEnvConnection;
+
+	psEnvConnection = PVRSRVConnectionPrivateData(psConnection);
+	PVR_ASSERT(psEnvConnection != NULL);
+
+	return psEnvConnection->psFile;
+}
+
+void MTKCommonDisablePowerDomain(void)
+{
+	MTKDisablePowerDomain();
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVCommonDriverInit
+@Description  Common one time driver initialisation
+@Return       int           0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDriverInit(void)
+{
+	PVRSRV_ERROR pvrerr;
+	int error = 0;
+
+#if defined(PDUMP)
+	error = dbgdrv_init();
+	if (error != 0)
+	{
+		return error;
+	}
+#endif
+
+	error = PVRDebugFSInit();
+	if (error != 0)
+	{
+		return error;
+	}
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	if (PVRSRVStatsInitialise() != PVRSRV_OK)
+	{
+		return -ENOMEM;
+	}
+#endif
+
+	if (PVROSFuncInit() != PVRSRV_OK)
+	{
+		return -ENOMEM;
+	}
+
+	LinuxBridgeInit();
+
+	error = pvr_apphint_init();
+	if (error != 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+			 "%s: failed AppHint setup(%d)",
+			 __func__, error));
+	}
+
+	pvrerr = PVRSRVDriverInit();
+	if (pvrerr != PVRSRV_OK)
+	{
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVCommonDriverDeinit
+@Description  Common one time driver de-initialisation
+@Return       void
+*/ /***************************************************************************/
+void PVRSRVCommonDriverDeinit(void)
+{
+	PVRSRVDriverDeInit();
+
+	pvr_apphint_deinit();
+
+	LinuxBridgeDeInit();
+
+	PVROSFuncDeInit();
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	PVRSRVStatsDestroy();
+#endif
+	PVRDebugFSDeInit();
+
+#if defined(PDUMP)
+	dbgdrv_cleanup();
+#endif
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVCommonDeviceInit
+@Description  Common device related initialisation.
+@Input        psDeviceNode  The device node for which initialisation should be
+                            performed
+@Return       int           0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDeviceInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	int error = 0;
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	{
+		PVRSRV_ERROR eError = pvr_sync_init(psDeviceNode);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: unable to create sync (%d)",
+					 __func__, eError));
+			return -EBUSY;
+		}
+	}
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+	psDeviceNode->psBufferSyncContext =
+		pvr_buffer_sync_context_create(psDeviceNode);
+	if (IS_ERR(psDeviceNode->psBufferSyncContext))
+	{
+		error = PTR_ERR(psDeviceNode->psBufferSyncContext);
+		psDeviceNode->psBufferSyncContext = NULL;
+
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: unable to initialise buffer_sync support (%d)",
+				 __func__, error));
+		return error;
+	}
+#endif
+
+	error = PVRDebugCreateDebugFSEntries();
+	if (error != 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+			 "%s: failed to create default debugfs entries (%d)",
+			 __func__, error));
+	}
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+	error = PVRGpuTraceInitDevice(psDeviceNode);
+	if (error != 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+			 "%s: failed to initialise PVR GPU Tracing on device%d (%d)",
+			 __func__, psDeviceNode->sDevId.i32UMIdentifier, error));
+	}
+#endif
+
+	/* register the AppHint device control before device initialisation
+	 * so individual AppHints can be configured during the init phase
+	 */
+	error = pvr_apphint_device_register(psDeviceNode);
+	if (error != 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+			 "%s: failed to initialise device AppHints (%d)",
+			 __func__, error));
+	}
+
+	/*Initialize the device dependent bridges */
+	error = DeviceDepBridgeInit(psDevInfo->sDevFeatureCfg.ui64Features);
+	if (error != 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+			 "%s: Device dependent bridge initialization failed (%d)",
+			 __func__, error));
+	}
+
+	return 0;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVCommonDeviceDeinit
+@Description  Common device related de-initialisation.
+@Input        psDeviceNode  The device node for which de-initialisation should
+                            be performed
+@Return       void
+*/ /***************************************************************************/
+void PVRSRVCommonDeviceDeinit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	int error = 0;
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+	pvr_apphint_device_unregister(psDeviceNode);
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+	PVRGpuTraceDeInitDevice(psDeviceNode);
+#endif
+
+	PVRDebugRemoveDebugFSEntries();
+
+#if defined(SUPPORT_BUFFER_SYNC)
+	pvr_buffer_sync_context_destroy(psDeviceNode->psBufferSyncContext);
+#endif
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	pvr_sync_deinit();
+#endif
+
+	pvr_fence_cleanup();
+
+	error = DeviceDepBridgeDeInit(psDevInfo->sDevFeatureCfg.ui64Features);
+	if (error != 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+			 "%s: Device dependent bridge deinitialization failed (%d)",
+			 __func__, error));
+	}
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVCommonDeviceShutdown
+@Description  Common device shutdown.
+@Input        psDeviceNode  The device node representing the device that should
+                            be shutdown
+@Return       void
+*/ /***************************************************************************/
+
+void PVRSRVCommonDeviceShutdown(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError;
+
+	/*
+	 * Disable the bridge to stop processes trying to use the driver
+	 * after it has been shut down.
+	 */
+	eError = PVRSRVSuspendDriver();
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			"%s: Failed to suspend driver (%d)",
+			__func__, eError));
+		return;
+	}
+
+	(void) PVRSRVSetDeviceSystemPowerState(psDeviceNode,
+										   PVRSRV_SYS_POWER_STATE_OFF);
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVCommonDeviceSuspend
+@Description  Common device suspend.
+@Input        psDeviceNode  The device node representing the device that should
+                            be suspended
+@Return       int           0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDeviceSuspend(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	/*
+	 * PVRSRVSuspendDriver prevents processes from using the driver while it's
+	 * suspended (this is needed for Android). Acquire the bridge lock first to
+	 * ensure the driver isn't currently in use.
+	 */
+
+	PVRSRVSuspendDriver();
+
+	if (PVRSRVSetDeviceSystemPowerState(psDeviceNode,
+										PVRSRV_SYS_POWER_STATE_OFF) != PVRSRV_OK)
+	{
+		PVRSRVUnsuspendDriver();
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVCommonDeviceResume
+@Description  Common device resume.
+@Input        psDeviceNode  The device node representing the device that should
+                            be resumed
+@Return       int           0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDeviceResume(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	if (PVRSRVSetDeviceSystemPowerState(psDeviceNode,
+										PVRSRV_SYS_POWER_STATE_ON) != PVRSRV_OK)
+	{
+		return -EINVAL;
+	}
+
+	PVRSRVUnsuspendDriver();
+
+	/*
+	 * Reprocess the device queues in case commands were blocked during
+	 * suspend.
+	 */
+	if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE)
+	{
+		PVRSRVCheckStatus(NULL);
+	}
+
+	return 0;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVCommonDeviceOpen
+@Description  Common device open.
+@Input        psDeviceNode  The device node representing the device being
+                            opened by a user mode process
+@Input        psDRMFile     The DRM file data that backs the file handle
+                            returned to the user mode process
+@Return       int           0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDeviceOpen(PVRSRV_DEVICE_NODE *psDeviceNode,
+						   struct drm_file *psDRMFile)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	ENV_CONNECTION_PRIVATE_DATA sPrivData;
+	void *pvConnectionData;
+	PVRSRV_ERROR eError;
+	int iErr = 0;
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSAcquireBridgeLock();
+#endif
+
+	if (!psPVRSRVData)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: No device data", __func__));
+		iErr = -ENODEV;
+		goto e1;
+	}
+
+	/* 
+	 * If the first attempt already set the state to bad,
+	 * there is no point in going the second time, so get out 
+	 */
+	if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_BAD)
+	{    
+		PVR_DPF((PVR_DBG_ERROR, "%s: Driver already in bad state. Device open failed.",
+				 __func__));
+		iErr = -ENODEV;
+		goto e1; 
+	}
+
+	if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT)
+	{
+		eError = PVRSRVDeviceInitialise(psDeviceNode);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise device (%s)",
+					 __func__, PVRSRVGetErrorStringKM(eError)));
+			iErr = -ENODEV;
+			goto e1;
+		}
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+		if (PVRGpuTraceEnabled())
+		{
+			PVRSRV_ERROR eError = PVRGpuTraceEnabledSetNoBridgeLock(psDeviceNode,
+			                                                        IMG_TRUE);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "Failed to initialise GPU event tracing"
+						" (%s)", PVRSRVGetErrorStringKM(eError)));
+			}
+		}
+#endif
+	}
+
+	sPrivData.psDevNode = psDeviceNode;
+	sPrivData.psFile = psDRMFile->filp;
+
+	/*
+	 * Here we pass the file pointer which will passed through to our
+	 * OSConnectionPrivateDataInit function where we can save it so
+	 * we can back reference the file structure from it's connection
+	 */
+	eError = PVRSRVConnectionConnect(&pvConnectionData, (void *) &sPrivData);
+	if (eError != PVRSRV_OK)
+	{
+		iErr = -ENOMEM;
+		goto e1;
+	}
+
+	psDRMFile->driver_priv = pvConnectionData;
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSReleaseBridgeLock();
+#endif
+
+out:
+	return iErr;
+e1:
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSReleaseBridgeLock();
+#endif
+	goto out;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVCommonDeviceRelease
+@Description  Common device release.
+@Input        psDeviceNode  The device node for the device that the given file
+                            represents
+@Input        psDRMFile     The DRM file data that's being released
+@Return       void
+*/ /***************************************************************************/
+void PVRSRVCommonDeviceRelease(PVRSRV_DEVICE_NODE *psDeviceNode,
+							   struct drm_file *psDRMFile)
+{
+	void *pvConnectionData;
+
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSAcquireBridgeLock();
+#endif
+
+	pvConnectionData = psDRMFile->driver_priv;
+	if (pvConnectionData)
+	{
+		PVRSRVConnectionDisconnect(pvConnectionData);
+		psDRMFile->driver_priv = NULL;
+	}
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSReleaseBridgeLock();
+#endif
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/module_common.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/module_common.h
new file mode 100644
index 0000000..bd35290
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/module_common.h
@@ -0,0 +1,69 @@
+/*************************************************************************/ /*!
+@File           module_common.h
+@Title          Common linux module setup header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _MODULE_COMMON_H_
+#define _MODULE_COMMON_H_
+
+/* DRVNAME is the name we use to register our driver. */
+#define DRVNAME PVR_LDM_DRIVER_REGISTRATION_NAME
+
+struct _PVRSRV_DEVICE_NODE_;
+struct drm_file;
+
+int PVRSRVCommonDriverInit(void);
+void PVRSRVCommonDriverDeinit(void);
+
+void MTKCommonDisablePowerDomain(void);
+
+int PVRSRVCommonDeviceInit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+void PVRSRVCommonDeviceDeinit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+void PVRSRVCommonDeviceShutdown(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+int PVRSRVCommonDeviceSuspend(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+int PVRSRVCommonDeviceResume(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+int PVRSRVCommonDeviceOpen(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+						   struct drm_file *psDRMFile);
+void PVRSRVCommonDeviceRelease(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+							   struct drm_file *psDRMFile);
+
+#endif /* _MODULE_COMMON_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osconnection_server.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osconnection_server.c
new file mode 100644
index 0000000..fd464df
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osconnection_server.c
@@ -0,0 +1,148 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux specific per process data functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "connection_server.h"
+#include "osconnection_server.h"
+
+#include "env_connection.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+
+#include <linux/sched.h>
+
+#if defined (SUPPORT_ION)
+#include <linux/err.h>
+#include PVR_ANDROID_ION_HEADER
+
+/*
+	The ion device (the base object for all requests)
+	gets created by the system and we acquire it via
+	linux specific functions provided by the system layer
+*/
+#include "ion_sys.h"
+#endif
+
+PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData)
+{
+	ENV_CONNECTION_PRIVATE_DATA *psPrivData = pvOSData;
+	ENV_CONNECTION_DATA *psEnvConnection;
+#if defined(SUPPORT_ION)
+	ENV_ION_CONNECTION_DATA *psIonConnection;
+#endif
+
+	*phOsPrivateData = OSAllocZMem(sizeof(ENV_CONNECTION_DATA));
+
+	if (*phOsPrivateData == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __FUNCTION__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psEnvConnection = (ENV_CONNECTION_DATA *)*phOsPrivateData;
+
+	psEnvConnection->owner = current->tgid;
+
+	/* Save the pointer to our struct file */
+	psEnvConnection->psFile = psPrivData->psFile;
+	psEnvConnection->psDevNode = psPrivData->psDevNode;
+
+#if defined(SUPPORT_ION)
+	psIonConnection = (ENV_ION_CONNECTION_DATA *)OSAllocZMem(sizeof(ENV_ION_CONNECTION_DATA));
+	if (psIonConnection == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __FUNCTION__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psEnvConnection->psIonData = psIonConnection;
+	/*
+		We can have more then one connection per process so we need more then
+		the PID to have a unique name
+	*/
+	psEnvConnection->psIonData->psIonDev = IonDevAcquire();
+	OSSNPrintf(psEnvConnection->psIonData->azIonClientName, ION_CLIENT_NAME_SIZE, "pvr_ion_client-%p-%d", *phOsPrivateData, OSGetCurrentClientProcessIDKM());
+	psEnvConnection->psIonData->psIonClient =
+		ion_client_create(psEnvConnection->psIonData->psIonDev,
+						  psEnvConnection->psIonData->azIonClientName);
+ 
+	if (IS_ERR_OR_NULL(psEnvConnection->psIonData->psIonClient))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSConnectionPrivateDataInit: Couldn't create "
+								"ion client for per connection data"));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	psEnvConnection->psIonData->ui32IonClientRefCount = 1;
+#endif /* SUPPORT_ION */
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
+{
+	ENV_CONNECTION_DATA *psEnvConnection; 
+
+	if (hOsPrivateData == NULL)
+	{
+		return PVRSRV_OK;
+	}
+
+	psEnvConnection = hOsPrivateData;
+
+#if defined(SUPPORT_ION)
+	EnvDataIonClientRelease(psEnvConnection->psIonData);
+#endif
+
+	OSFreeMem(hOsPrivateData);
+	/*not nulling pointer, copy on stack*/
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_DEVICE_NODE *OSGetDevData(CONNECTION_DATA *psConnection)
+{
+	ENV_CONNECTION_DATA *psEnvConnection;
+
+	psEnvConnection = PVRSRVConnectionPrivateData(psConnection);
+	PVR_ASSERT(psEnvConnection);
+
+	return psEnvConnection->psDevNode;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osfunc.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osfunc.c
new file mode 100644
index 0000000..96383c5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osfunc.c
@@ -0,0 +1,1886 @@
+/*************************************************************************/ /*!
+@File
+@Title          Environment related functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/div64.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+#include <linux/hugetlb.h> 
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/genalloc.h>
+#include <linux/string.h>
+#include <asm/hardirq.h>
+#include <asm/tlbflush.h>
+#include <linux/timer.h>
+#include <linux/capability.h>
+#include <asm/uaccess.h>
+#include <linux/spinlock.h>
+#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || \
+	defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) || \
+	defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || \
+	defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) || \
+	defined(PVR_LINUX_USING_WORKQUEUES)
+#include <linux/workqueue.h>
+#endif
+#include <linux/kthread.h>
+#include <asm/atomic.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+#include <linux/pfn_t.h>
+#include <linux/pfn.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+#include <linux/sched/clock.h>
+#include <linux/sched/signal.h>
+#else
+#include <linux/sched.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) */
+
+#include "log2.h"
+#include "osfunc.h"
+#include "cache_km.h"
+#include "img_types.h"
+#include "allocmem.h"
+#include "devicemem_server_utils.h"
+#include "pvr_debugfs.h"
+#include "event.h"
+#include "linkage.h"
+#include "pvr_uaccess.h"
+#include "pvr_debug.h"
+#include "pvrsrv_memallocflags.h"
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+#include "physmem_osmem_linux.h"
+#include "dma_support.h"
+#include "kernel_compatibility.h"
+
+#if defined(VIRTUAL_PLATFORM)
+#define EVENT_OBJECT_TIMEOUT_US		(120000000ULL)
+#else
+#if defined(EMULATOR) || defined(TC_APOLLO_TCF5)
+#define EVENT_OBJECT_TIMEOUT_US		(2000000ULL)
+#else
+#define EVENT_OBJECT_TIMEOUT_US		(100000ULL)
+#endif /* EMULATOR */
+#endif
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+/*
+ * Main driver lock, used to ensure driver code is single threaded. There are
+ * some places where this lock must not be taken, such as in the mmap related
+ * driver entry points.
+ */
+static DEFINE_MUTEX(gPVRSRVLock);
+
+static void *g_pvBridgeBuffers = NULL;
+
+struct task_struct *BridgeLockGetOwner(void);
+IMG_BOOL BridgeLockIsLocked(void);
+#endif
+
+
+PVRSRV_ERROR OSPhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize,
+							PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr)
+{
+	struct device *psDev = psDevNode->psDevConfig->pvOSDevice;
+	IMG_CPU_PHYADDR sCpuPAddr;
+	struct page *psPage;
+	IMG_UINT32	ui32Order=0;
+	unsigned int gfp_flags;
+
+	PVR_ASSERT(uiSize != 0);
+	/*Align the size to the page granularity */
+	uiSize = PAGE_ALIGN(uiSize);
+
+	/*Get the order to be used with the allocation */
+	ui32Order = get_order(uiSize);
+
+	gfp_flags = GFP_KERNEL;
+
+#if !defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM)
+	if (psDev && *psDev->dma_mask == DMA_BIT_MASK(32))
+	{
+		/* Limit to 32 bit.
+		 * Achieved by setting __GFP_DMA32 for 64 bit systems */
+		gfp_flags |= __GFP_DMA32;
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(psDev);
+#endif
+
+	/*allocate the pages */
+	psPage = alloc_pages(gfp_flags, ui32Order);
+	if (psPage == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	uiSize = (1 << ui32Order) * PAGE_SIZE;
+
+	psMemHandle->u.pvHandle = psPage;
+	psMemHandle->ui32Order = ui32Order;
+	sCpuPAddr.uiAddr =  IMG_CAST_TO_CPUPHYADDR_UINT(page_to_phys(psPage));
+
+	/*
+	 * Even when more pages are allocated as base MMU object we still need one single physical address because
+	 * they are physically contiguous.
+	 */
+	PhysHeapCpuPAddrToDevPAddr(psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL], 1, psDevPAddr, &sCpuPAddr);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	    PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,
+	                                        uiSize,
+	                                        (IMG_UINT64)(uintptr_t) psPage);
+#else
+	PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,
+	                             psPage,
+								 sCpuPAddr,
+								 uiSize,
+								 NULL);
+#endif
+#endif
+
+	return PVRSRV_OK;
+}
+
+void OSPhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle)
+{
+	struct page *psPage = (struct page*) psMemHandle->u.pvHandle;
+	IMG_UINT32	uiSize, uiPageCount=0;
+
+	uiPageCount = (1 << psMemHandle->ui32Order);
+	uiSize = (uiPageCount * PAGE_SIZE);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,
+	                                      (IMG_UINT64)(uintptr_t) psPage);
+#else
+	PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT64)(uintptr_t) psPage);
+#endif
+#endif
+
+	__free_pages(psPage, psMemHandle->ui32Order);
+	psMemHandle->ui32Order = 0;
+}
+
+PVRSRV_ERROR OSPhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+						size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+						void **pvPtr)
+{
+	size_t actualSize = 1 << (PAGE_SHIFT + psMemHandle->ui32Order);
+	*pvPtr = kmap((struct page*)psMemHandle->u.pvHandle);
+
+	PVR_UNREFERENCED_PARAMETER(psDevPAddr);
+
+	PVR_UNREFERENCED_PARAMETER(actualSize); /* If we don't take an #ifdef path */
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, actualSize);
+#else
+	{
+		IMG_CPU_PHYADDR sCpuPAddr;
+		sCpuPAddr.uiAddr = 0;
+
+		PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA,
+									 *pvPtr,
+									 sCpuPAddr,
+									 actualSize,
+									 NULL);
+	}
+#endif
+#endif
+
+	return PVRSRV_OK;
+}
+
+void OSPhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, void *pvPtr)
+{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	/* Mapping is done a page at a time */
+	PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, (1 << (PAGE_SHIFT + psMemHandle->ui32Order)));
+#else
+	PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, (IMG_UINT64)(uintptr_t)pvPtr);
+#endif
+#endif
+
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(pvPtr);
+
+	kunmap((struct page*) psMemHandle->u.pvHandle);
+}
+
+PVRSRV_ERROR OSPhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode,
+                                   PG_HANDLE *psMemHandle,
+                                   IMG_UINT32 uiOffset,
+                                   IMG_UINT32 uiLength)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	struct page* psPage = (struct page*) psMemHandle->u.pvHandle;
+
+	void* pvVirtAddrStart = kmap(psPage) + uiOffset;
+	IMG_CPU_PHYADDR sPhysStart, sPhysEnd;
+
+	if (uiLength == 0)
+	{
+		goto e0;
+	}
+
+	if ((uiOffset + uiLength) > ((1 << psMemHandle->ui32Order) * PAGE_SIZE))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Invalid size params, uiOffset %u, uiLength %u",
+				__FUNCTION__,
+				uiOffset,
+				uiLength));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	sPhysStart.uiAddr = page_to_phys(psPage) + uiOffset;
+	sPhysEnd.uiAddr = sPhysStart.uiAddr + uiLength;
+
+	CacheOpExecKM(psDevNode,
+				pvVirtAddrStart,
+				pvVirtAddrStart + uiLength,
+				sPhysStart,
+				sPhysEnd,
+				PVRSRV_CACHE_OP_CLEAN);
+
+e0:
+	kunmap(psPage);
+
+	return eError;
+}
+
+#if defined(__GNUC__)
+#define PVRSRV_MEM_ALIGN __attribute__ ((aligned (0x8)))
+#define PVRSRV_MEM_ALIGN_MASK (0x7)
+#else
+#error "PVRSRV Alignment macros need to be defined for this compiler"
+#endif
+
+IMG_UINT32 OSCPUCacheAttributeSize(IMG_DCACHE_ATTRIBUTE eCacheAttribute)
+{
+	IMG_UINT32 uiSize = 0;
+
+	switch(eCacheAttribute)
+	{
+		case PVR_DCACHE_LINE_SIZE:
+			uiSize = cache_line_size();
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache attribute type %d",
+					__FUNCTION__, (IMG_UINT32)eCacheAttribute));
+			PVR_ASSERT(0);
+			break;
+	}
+
+	return uiSize;
+}
+
+IMG_UINT32 OSVSScanf(IMG_CHAR *pStr, const IMG_CHAR *pszFormat, ...)
+{
+	va_list argList;
+	IMG_INT32 iCount = 0;
+
+	va_start(argList, pszFormat);
+	iCount = vsscanf(pStr, pszFormat, argList);
+	va_end(argList);
+
+	return iCount;
+}
+
+IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, size_t uiLen)
+{
+	return (IMG_INT) memcmp(pvBufA, pvBufB, uiLen);
+}
+
+IMG_CHAR *OSStringNCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uSize)
+{
+	return strncpy(pszDest, pszSrc, uSize);
+}
+
+IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR *pszFormat, ...)
+{
+	va_list argList;
+	IMG_INT32 iCount;
+
+	va_start(argList, pszFormat);
+	iCount = vsnprintf(pStr, (size_t)ui32Size, pszFormat, argList);
+	va_end(argList);
+
+	return iCount;
+}
+
+size_t OSStringLength(const IMG_CHAR *pStr)
+{
+	return strlen(pStr);
+}
+
+size_t OSStringNLength(const IMG_CHAR *pStr, size_t uiCount)
+{
+	return strnlen(pStr, uiCount);
+}
+
+IMG_INT32 OSStringCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2)
+{
+	return strcmp(pStr1, pStr2);
+}
+
+IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2,
+                          size_t uiSize)
+{
+	return strncmp(pStr1, pStr2, uiSize);
+}
+
+PVRSRV_ERROR OSStringToUINT32(const IMG_CHAR *pStr, IMG_UINT32 ui32Base,
+                              IMG_UINT32 *ui32Result)
+{
+	if (kstrtou32(pStr, ui32Base, ui32Result) != 0)
+		return PVRSRV_ERROR_CONVERSION_FAILED;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSInitEnvData(void)
+{
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	/* allocate memory for the bridge buffers to be used during an ioctl */
+	g_pvBridgeBuffers = OSAllocMem(PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE);
+	if (g_pvBridgeBuffers == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+#endif
+
+	LinuxInitPhysmem();
+
+	return PVRSRV_OK;
+}
+
+
+void OSDeInitEnvData(void)
+{
+
+	LinuxDeinitPhysmem();
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	if (g_pvBridgeBuffers)
+	{
+		/* free-up the memory allocated for bridge buffers */
+		OSFreeMem(g_pvBridgeBuffers);
+		g_pvBridgeBuffers = NULL;
+	}
+#endif
+}
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+PVRSRV_ERROR OSGetGlobalBridgeBuffers(void **ppvBridgeInBuffer,
+									  void **ppvBridgeOutBuffer)
+{
+	PVR_ASSERT (ppvBridgeInBuffer && ppvBridgeOutBuffer);
+
+	*ppvBridgeInBuffer = g_pvBridgeBuffers;
+	*ppvBridgeOutBuffer = *ppvBridgeInBuffer + PVRSRV_MAX_BRIDGE_IN_SIZE;
+
+	return PVRSRV_OK;
+}
+#endif
+
+void OSReleaseThreadQuanta(void)
+{
+	schedule();
+}
+
+/* Not matching/aligning this API to the Clockus() API above to avoid necessary
+ * multiplication/division operations in calling code.
+ */
+static inline IMG_UINT64 Clockns64(void)
+{
+	IMG_UINT64 timenow;
+
+	/* Kernel thread preempt protection. Some architecture implementations 
+	 * (ARM) of sched_clock are not preempt safe when the kernel is configured 
+	 * as such e.g. CONFIG_PREEMPT and others.
+	 */
+	preempt_disable();
+
+	/* Using sched_clock instead of ktime_get since we need a time stamp that
+	 * correlates with that shown in kernel logs and trace data not one that
+	 * is a bit behind. */
+	timenow = sched_clock();
+
+	preempt_enable();
+
+	return timenow;
+}
+
+IMG_UINT64 OSClockns64(void)
+{
+	return Clockns64();	
+}
+
+IMG_UINT64 OSClockus64(void)
+{
+	IMG_UINT64 timenow = Clockns64();
+	IMG_UINT32 remainder;
+
+	return OSDivide64r64(timenow, 1000, &remainder);
+}
+
+IMG_UINT32 OSClockus(void)
+{
+	return (IMG_UINT32) OSClockus64();
+}
+
+IMG_UINT32 OSClockms(void)
+{
+	IMG_UINT64 timenow = Clockns64();
+	IMG_UINT32 remainder;
+
+	return OSDivide64(timenow, 1000000, &remainder);
+}
+
+static inline IMG_UINT64 KClockns64(void)
+{
+	ktime_t sTime = ktime_get();
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+	return sTime;
+#else
+	return sTime.tv64;
+#endif
+}
+
+PVRSRV_ERROR OSClockMonotonicns64(IMG_UINT64 *pui64Time)
+{
+	*pui64Time = KClockns64();
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSClockMonotonicus64(IMG_UINT64 *pui64Time)
+{
+	IMG_UINT64 timenow = KClockns64();
+	IMG_UINT32 remainder;
+
+	*pui64Time = OSDivide64r64(timenow, 1000, &remainder);
+	return PVRSRV_OK;
+}
+
+IMG_UINT64 OSClockMonotonicRawns64(void)
+{
+	struct timespec ts;
+
+	getrawmonotonic(&ts);
+	return (IMG_UINT64) ts.tv_sec * 1000000000 + ts.tv_nsec;
+}
+
+IMG_UINT64 OSClockMonotonicRawus64(void)
+{
+	IMG_UINT32 rem;
+	return OSDivide64r64(OSClockMonotonicRawns64(), 1000, &rem);
+}
+
+/*
+	OSWaitus
+*/
+void OSWaitus(IMG_UINT32 ui32Timeus)
+{
+	udelay(ui32Timeus);
+}
+
+
+/*
+	OSSleepms
+*/
+void OSSleepms(IMG_UINT32 ui32Timems)
+{
+	msleep(ui32Timems);
+}
+
+
+INLINE IMG_UINT64 OSGetCurrentProcessVASpaceSize(void)
+{
+	return (IMG_UINT64)TASK_SIZE;
+}
+
+INLINE IMG_PID OSGetCurrentProcessID(void)
+{
+	if (in_interrupt())
+	{
+		return KERNEL_ID;
+	}
+
+	return (IMG_PID)task_tgid_nr(current);
+}
+
+INLINE IMG_CHAR *OSGetCurrentProcessName(void)
+{
+	return current->comm;
+}
+
+INLINE uintptr_t OSGetCurrentThreadID(void)
+{
+	if (in_interrupt())
+	{
+		return KERNEL_ID;
+	}
+
+	return current->pid;
+}
+
+IMG_PID OSGetCurrentClientProcessIDKM(void)
+{
+	return OSGetCurrentProcessID();
+}
+
+IMG_CHAR *OSGetCurrentClientProcessNameKM(void)
+{
+	return OSGetCurrentProcessName();
+}
+
+uintptr_t OSGetCurrentClientThreadIDKM(void)
+{
+	return OSGetCurrentThreadID();
+}
+
+size_t OSGetPageSize(void)
+{
+	return PAGE_SIZE;
+}
+
+size_t OSGetPageShift(void)
+{
+	return PAGE_SHIFT;
+}
+
+size_t OSGetPageMask(void)
+{
+	return (OSGetPageSize()-1);
+}
+
+size_t OSGetOrder(size_t uSize)
+{
+	return get_order(PAGE_ALIGN(uSize));
+}
+
+IMG_UINT64 OSGetRAMSize(void)
+{
+	struct sysinfo SI;
+	si_meminfo(&SI);
+
+	return (PAGE_SIZE * SI.totalram);
+}
+
+typedef struct
+{
+	int os_error;
+	PVRSRV_ERROR pvr_error;
+} error_map_t;
+
+/* return -ve versions of POSIX errors as they are used in this form */
+static const error_map_t asErrorMap[] =
+{
+	{-EFAULT, PVRSRV_ERROR_BRIDGE_EFAULT},
+	{-EINVAL, PVRSRV_ERROR_BRIDGE_EINVAL},
+	{-ENOMEM, PVRSRV_ERROR_BRIDGE_ENOMEM},
+	{-ERANGE, PVRSRV_ERROR_BRIDGE_ERANGE},
+	{-EPERM,  PVRSRV_ERROR_BRIDGE_EPERM},
+	{-ENOTTY, PVRSRV_ERROR_BRIDGE_ENOTTY},
+	{-ENOTTY, PVRSRV_ERROR_BRIDGE_CALL_FAILED},
+	{-ERANGE, PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL},
+	{-ENOMEM, PVRSRV_ERROR_OUT_OF_MEMORY},
+	{-EINVAL, PVRSRV_ERROR_INVALID_PARAMS},
+
+	{0,       PVRSRV_OK}
+};
+
+#define num_rows(a) (sizeof(a)/sizeof(a[0]))
+
+int PVRSRVToNativeError(PVRSRV_ERROR e)
+{
+	int os_error = -EFAULT;
+	int i;
+	for (i = 0; i < num_rows(asErrorMap); i++)
+	{
+		if (e == asErrorMap[i].pvr_error)
+		{
+			os_error = asErrorMap[i].os_error;
+			break;
+		}
+	}
+	return os_error;
+}
+
+#if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
+typedef struct  _MISR_DATA_ {
+	struct workqueue_struct *psWorkQueue;
+	struct work_struct sMISRWork;
+	PFN_MISR pfnMISR;
+	void *hData;
+} MISR_DATA;
+
+/*
+	MISRWrapper
+*/
+static void MISRWrapper(struct work_struct *data)
+{
+	MISR_DATA *psMISRData = container_of(data, MISR_DATA, sMISRWork);
+
+	psMISRData->pfnMISR(psMISRData->hData);
+}
+
+/*
+	OSInstallMISR
+*/
+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, PFN_MISR pfnMISR,
+							void *hData)
+{
+	MISR_DATA *psMISRData;
+
+	psMISRData = OSAllocMem(sizeof(*psMISRData));
+	if (psMISRData == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psMISRData->hData = hData;
+	psMISRData->pfnMISR = pfnMISR;
+
+	PVR_TRACE(("Installing MISR with cookie %p", psMISRData));
+
+	psMISRData->psWorkQueue = create_singlethread_workqueue("pvr_workqueue");
+
+	if (psMISRData->psWorkQueue == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: create_singlethreaded_workqueue failed"));
+		OSFreeMem(psMISRData);
+		return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD;
+	}
+
+	INIT_WORK(&psMISRData->sMISRWork, MISRWrapper);
+
+	*hMISRData = (IMG_HANDLE) psMISRData;
+
+	return PVRSRV_OK;
+}
+
+/*
+	OSUninstallMISR
+*/
+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData)
+{
+	MISR_DATA *psMISRData = (MISR_DATA *) hMISRData;
+
+	PVR_TRACE(("Uninstalling MISR with cookie %p", psMISRData));
+
+	destroy_workqueue(psMISRData->psWorkQueue);
+	OSFreeMem(psMISRData);
+
+	return PVRSRV_OK;
+}
+
+/*
+	OSScheduleMISR
+*/
+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData)
+{
+	MISR_DATA *psMISRData = (MISR_DATA *) hMISRData;
+
+	/*
+		Note:
+
+		In the case of NO_HARDWARE we want the driver to be synchronous so
+		that we don't have to worry about waiting for previous operations
+		to complete
+	*/
+#if defined(NO_HARDWARE)
+	psMISRData->pfnMISR(psMISRData->hData);
+	return PVRSRV_OK;
+#else
+	{
+		bool rc = queue_work(psMISRData->psWorkQueue, &psMISRData->sMISRWork);
+		return (rc ? PVRSRV_OK : PVRSRV_ERROR_ALREADY_EXISTS);
+	}
+#endif
+}
+#else	/* defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) */
+#if defined(PVR_LINUX_MISR_USING_WORKQUEUE)
+typedef struct  _MISR_DATA_ {
+	struct work_struct sMISRWork;
+	PFN_MISR pfnMISR;
+	void *hData;
+} MISR_DATA;
+
+/*
+	MISRWrapper
+*/
+static void MISRWrapper(struct work_struct *data)
+{
+	MISR_DATA *psMISRData = container_of(data, MISR_DATA, sMISRWork);
+
+	psMISRData->pfnMISR(psMISRData->hData);
+}
+
+/*
+	OSInstallMISR
+*/
+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, PFN_MISR pfnMISR, void *hData)
+{
+	MISR_DATA *psMISRData;
+
+	psMISRData = OSAllocMem(sizeof(*psMISRData));
+	if (psMISRData == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psMISRData->hData = hData;
+	psMISRData->pfnMISR = pfnMISR;
+
+	PVR_TRACE(("Installing MISR with cookie %p", psMISRData));
+
+	INIT_WORK(&psMISRData->sMISRWork, MISRWrapper);
+
+	*hMISRData = (IMG_HANDLE) psMISRData;
+
+	return PVRSRV_OK;
+}
+
+
+/*
+	OSUninstallMISR
+*/
+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData)
+{
+	PVR_TRACE(("Uninstalling MISR with cookie %p", psMISRData));
+
+	flush_scheduled_work();
+
+	OSFreeMem(hMISRData);
+
+	return PVRSRV_OK;
+}
+
+/*
+	OSScheduleMISR
+*/
+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData)
+{
+	MISR_DATA *psMISRData = hMISRData;
+#if defined(NO_HARDWARE)
+	psMISRData->pfnMISR(psMISRData->hData);
+#else
+	schedule_work(&psMISRData->sMISRWork);
+#endif
+	return PVRSRV_OK;
+}
+
+#else	/* #if defined(PVR_LINUX_MISR_USING_WORKQUEUE) */
+typedef struct _MISR_DATA_ {
+	struct tasklet_struct sMISRTasklet;
+	PFN_MISR pfnMISR;
+	void *hData;
+} MISR_DATA;
+
+/*
+	MISRWrapper
+*/
+static void MISRWrapper(unsigned long data)
+{
+	MISR_DATA *psMISRData = (MISR_DATA *) data;
+
+	psMISRData->pfnMISR(psMISRData->hData);
+}
+
+/*
+	OSInstallMISR
+*/
+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, PFN_MISR pfnMISR, void *hData)
+{
+	MISR_DATA *psMISRData;
+
+	psMISRData = OSAllocMem(sizeof(*psMISRData));
+	if (psMISRData == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psMISRData->hData = hData;
+	psMISRData->pfnMISR = pfnMISR;
+
+	PVR_TRACE(("Installing MISR with cookie %p", psMISRData));
+
+	tasklet_init(&psMISRData->sMISRTasklet, MISRWrapper, (unsigned long)psMISRData);
+
+	*hMISRData = (IMG_HANDLE) psMISRData;
+
+	return PVRSRV_OK;
+}
+
+/*
+	OSUninstallMISR
+*/
+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData)
+{
+	MISR_DATA *psMISRData = (MISR_DATA *) hMISRData;
+
+	PVR_TRACE(("Uninstalling MISR with cookie %p", psMISRData));
+
+	tasklet_kill(&psMISRData->sMISRTasklet);
+
+	return PVRSRV_OK;
+}
+
+/*
+	OSScheduleMISR
+*/
+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData)
+{
+	MISR_DATA *psMISRData = (MISR_DATA *) hMISRData;
+
+#if defined(NO_HARDWARE)
+	psMISRData->pfnMISR(psMISRData->hData);
+#else
+	tasklet_schedule(&psMISRData->sMISRTasklet);
+#endif
+	return PVRSRV_OK;
+}
+
+#endif /* #if defined(PVR_LINUX_MISR_USING_WORKQUEUE) */
+#endif /* #if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) */
+
+/* OS specific values for thread priority */
+static const IMG_INT32 ai32OSPriorityValues[OS_THREAD_LAST_PRIORITY] =
+{
+	-20, /* OS_THREAD_HIGHEST_PRIORITY */
+	-10, /* OS_THREAD_HIGH_PRIORITY */
+	  0, /* OS_THREAD_NORMAL_PRIORITY */
+	  9, /* OS_THREAD_LOW_PRIORITY */
+	 19, /* OS_THREAD_LOWEST_PRIORITY */
+	-22, /* OS_THREAD_NOSET_PRIORITY */
+};
+
+typedef struct {
+	struct task_struct *kthread;
+	PFN_THREAD pfnThread;
+	void *hData;
+	OS_THREAD_LEVEL eThreadPriority;
+} OSThreadData;
+
+static int OSThreadRun(void *data)
+{
+	OSThreadData *psOSThreadData = data;
+
+	/* If i32NiceValue is acceptable, set the nice value for the new thread */
+	if (psOSThreadData->eThreadPriority != OS_THREAD_NOSET_PRIORITY &&
+	         psOSThreadData->eThreadPriority < OS_THREAD_LAST_PRIORITY)
+		set_user_nice(current, ai32OSPriorityValues[psOSThreadData->eThreadPriority]);
+
+	/* Call the client's kernel thread with the client's data pointer */
+	psOSThreadData->pfnThread(psOSThreadData->hData);
+
+	/* Wait for OSThreadDestroy() to call kthread_stop() */
+	while (!kthread_should_stop())
+	{
+		 schedule();
+	}
+
+	return 0;
+}
+
+PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread,
+							IMG_CHAR *pszThreadName,
+							PFN_THREAD pfnThread,
+							void *hData)
+{
+	return OSThreadCreatePriority(phThread, pszThreadName, pfnThread, hData, OS_THREAD_NOSET_PRIORITY);
+}
+
+PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread,
+									IMG_CHAR *pszThreadName,
+									PFN_THREAD pfnThread,
+									void *hData,
+									OS_THREAD_LEVEL eThreadPriority)
+{
+	OSThreadData *psOSThreadData;
+	PVRSRV_ERROR eError;
+
+	psOSThreadData = OSAllocMem(sizeof(*psOSThreadData));
+	if (psOSThreadData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	psOSThreadData->pfnThread = pfnThread;
+	psOSThreadData->hData = hData;
+	psOSThreadData->eThreadPriority= eThreadPriority;
+	psOSThreadData->kthread = kthread_run(OSThreadRun, psOSThreadData, "%s", pszThreadName);
+
+	if (IS_ERR(psOSThreadData->kthread))
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_kthread;
+	}
+
+	*phThread = psOSThreadData;
+
+	return PVRSRV_OK;
+
+fail_kthread:
+	OSFreeMem(psOSThreadData);
+fail_alloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread)
+{
+	OSThreadData *psOSThreadData = hThread;
+	int ret;
+
+	/* Let the thread know we are ready for it to end and wait for it. */
+	ret = kthread_stop(psOSThreadData->kthread);
+	if (0 != ret)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "kthread_stop failed(%d)", ret));
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	OSFreeMem(psOSThreadData);
+
+	return PVRSRV_OK;
+}
+
+void OSPanic(void)
+{
+	BUG();
+
+#if defined(__KLOCWORK__)
+	/* Klocworks does not understand that BUG is terminal... */
+	abort();
+#endif
+}
+
+PVRSRV_ERROR OSSetThreadPriority(IMG_HANDLE hThread,
+								 IMG_UINT32  nThreadPriority,
+								 IMG_UINT32  nThreadWeight)
+{
+	PVR_UNREFERENCED_PARAMETER(hThread);
+	PVR_UNREFERENCED_PARAMETER(nThreadPriority);
+	PVR_UNREFERENCED_PARAMETER(nThreadWeight);
+ 	/* Default priorities used on this platform */
+	
+	return PVRSRV_OK;
+}
+
+void *
+OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr,
+			   size_t ui32Bytes,
+			   IMG_UINT32 ui32MappingFlags)
+{
+	void *pvLinAddr;
+
+	if (ui32MappingFlags & ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK))
+	{
+		PVR_ASSERT(!"Found non-cpu cache mode flag when mapping to the cpu");
+		return NULL;
+	}
+
+	if (! PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE))
+	{
+		/*
+		  This is required to support DMA physheaps for GPU virtualization.
+		  Unfortunately, if a region of kernel managed memory is turned into
+		  a DMA buffer, conflicting mappings can come about easily on Linux
+		  as the original memory is mapped by the kernel as normal cached
+		  memory whilst DMA buffers are mapped mostly as uncached device or
+		  cache-coherent device memory. In both cases the system will have
+		  two conflicting mappings for the same memory region and will have
+		  "undefined behaviour" for most processors notably ARMv6 onwards
+		  and some x86 micro-architectures. As a result, perform ioremapping
+		  manually for DMA physheap allocations by translating from CPU/VA 
+		  to BUS/PA thereby preventing the creation of conflicting mappings.
+		*/
+		pvLinAddr = SysDmaDevPAddrToCpuVAddr(BasePAddr.uiAddr, ui32Bytes);
+		if (pvLinAddr != NULL)
+		{
+			return pvLinAddr;
+		}
+	}
+
+	switch (ui32MappingFlags)
+	{
+		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+			pvLinAddr = (void *)ioremap_nocache(BasePAddr.uiAddr, ui32Bytes);
+			break;
+		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+#if defined(CONFIG_X86) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+			pvLinAddr = (void *)ioremap_wc(BasePAddr.uiAddr, ui32Bytes);
+#else
+			pvLinAddr = (void *)ioremap_nocache(BasePAddr.uiAddr, ui32Bytes);
+#endif
+			break;
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+#if defined(CONFIG_X86) || defined(CONFIG_ARM)
+			pvLinAddr = (void *)ioremap_cache(BasePAddr.uiAddr, ui32Bytes);
+#else
+			pvLinAddr = (void *)ioremap(BasePAddr.uiAddr, ui32Bytes);
+#endif
+			break;
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT:
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT:
+			PVR_ASSERT(!"Unexpected cpu cache mode");
+			pvLinAddr = NULL;
+			break;
+		default:
+			PVR_ASSERT(!"Unsupported cpu cache mode");
+			pvLinAddr = NULL;
+			break;
+	}
+
+	return pvLinAddr;
+}
+
+
+IMG_BOOL
+OSUnMapPhysToLin(void *pvLinAddr, size_t ui32Bytes, IMG_UINT32 ui32MappingFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32Bytes);
+
+	if (ui32MappingFlags & ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK))
+	{
+		PVR_ASSERT(!"Found non-cpu cache mode flag when unmapping from the cpu");
+		return IMG_FALSE;
+	}
+
+	if (! PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE))
+	{
+		if (SysDmaCpuVAddrToDevPAddr(pvLinAddr))
+		{
+			return IMG_TRUE;
+		}
+	}
+
+	iounmap(pvLinAddr);
+
+	return IMG_TRUE;
+}
+
+#define	OS_MAX_TIMERS	8
+
+/* Timer callback strucure used by OSAddTimer */
+typedef struct TIMER_CALLBACK_DATA_TAG
+{
+	IMG_BOOL			bInUse;
+	PFN_TIMER_FUNC		pfnTimerFunc;
+	void				*pvData;
+	struct timer_list	sTimer;
+	IMG_UINT32			ui32Delay;
+	IMG_BOOL			bActive;
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+	struct work_struct	sWork;
+#endif
+}TIMER_CALLBACK_DATA;
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+static struct workqueue_struct	*psTimerWorkQueue;
+#endif
+
+static TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS];
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+static DEFINE_MUTEX(sTimerStructLock);
+#else
+/* The lock is used to control access to sTimers */
+static DEFINE_SPINLOCK(sTimerStructLock);
+#endif
+
+static void OSTimerCallbackBody(TIMER_CALLBACK_DATA *psTimerCBData)
+{
+	if (!psTimerCBData->bActive)
+		return;
+
+	/* call timer callback */
+	psTimerCBData->pfnTimerFunc(psTimerCBData->pvData);
+
+	/* reset timer */
+	mod_timer(&psTimerCBData->sTimer, psTimerCBData->ui32Delay + jiffies);
+}
+
+
+/*************************************************************************/ /*!
+@Function       OSTimerCallbackWrapper
+@Description    OS specific timer callback wrapper function
+@Input          uData    Timer callback data
+*/ /**************************************************************************/
+static void OSTimerCallbackWrapper(struct timer_list *t)
+{
+	TIMER_CALLBACK_DATA	*psTimerCBData = (TIMER_CALLBACK_DATA*)from_timer(psTimerCBData, t, sTimer);
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+	int res;
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+	res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork);
+#else
+	res = schedule_work(&psTimerCBData->sWork);
+#endif
+	if (res == 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "OSTimerCallbackWrapper: work already queued"));
+	}
+#else
+	OSTimerCallbackBody(psTimerCBData);
+#endif
+}
+
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+static void OSTimerWorkQueueCallBack(struct work_struct *psWork)
+{
+	TIMER_CALLBACK_DATA *psTimerCBData = container_of(psWork, TIMER_CALLBACK_DATA, sWork);
+
+	OSTimerCallbackBody(psTimerCBData);
+}
+#endif
+
+IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, IMG_UINT32 ui32MsTimeout)
+{
+	TIMER_CALLBACK_DATA	*psTimerCBData;
+	IMG_UINT32		ui32i;
+#if !(defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE))
+	unsigned long		ulLockFlags;
+#endif
+
+	/* check callback */
+	if(!pfnTimerFunc)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback"));
+		return NULL;
+	}
+
+	/* Allocate timer callback data structure */
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+	mutex_lock(&sTimerStructLock);
+#else
+	spin_lock_irqsave(&sTimerStructLock, ulLockFlags);
+#endif
+	for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
+	{
+		psTimerCBData = &sTimers[ui32i];
+		if (!psTimerCBData->bInUse)
+		{
+			psTimerCBData->bInUse = IMG_TRUE;
+			break;
+		}
+	}
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+	mutex_unlock(&sTimerStructLock);
+#else
+	spin_unlock_irqrestore(&sTimerStructLock, ulLockFlags);
+#endif
+	if (ui32i >= OS_MAX_TIMERS)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: all timers are in use"));
+		return NULL;
+	}
+
+	psTimerCBData->pfnTimerFunc = pfnTimerFunc;
+	psTimerCBData->pvData = pvData;
+	psTimerCBData->bActive = IMG_FALSE;
+
+	/*
+		HZ = ticks per second
+		ui32MsTimeout = required ms delay
+		ticks = (Hz * ui32MsTimeout) / 1000
+	*/
+	psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000)
+								?	1
+								:	((HZ * ui32MsTimeout) / 1000);
+	/* initialise object */
+	timer_setup(&psTimerCBData->sTimer, OSTimerCallbackWrapper, 0);
+
+	return (IMG_HANDLE)(uintptr_t)(ui32i + 1);
+}
+
+
+static inline TIMER_CALLBACK_DATA *GetTimerStructure(IMG_HANDLE hTimer)
+{
+	IMG_UINT32 ui32i = (IMG_UINT32)((uintptr_t)hTimer) - 1;
+
+	PVR_ASSERT(ui32i < OS_MAX_TIMERS);
+
+	return &sTimers[ui32i];
+}
+
+PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer)
+{
+	TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+	PVR_ASSERT(psTimerCBData->bInUse);
+	PVR_ASSERT(!psTimerCBData->bActive);
+
+	/* free timer callback data struct */
+	psTimerCBData->bInUse = IMG_FALSE;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer)
+{
+	TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+	PVR_ASSERT(psTimerCBData->bInUse);
+	PVR_ASSERT(!psTimerCBData->bActive);
+
+	/* Start timer arming */
+	psTimerCBData->bActive = IMG_TRUE;
+
+	/* set the expire time */
+	psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
+
+	/* Add the timer to the list */
+	add_timer(&psTimerCBData->sTimer);
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer)
+{
+	TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+	PVR_ASSERT(psTimerCBData->bInUse);
+	PVR_ASSERT(psTimerCBData->bActive);
+
+	/* Stop timer from arming */
+	psTimerCBData->bActive = IMG_FALSE;
+	smp_mb();
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+	flush_workqueue(psTimerWorkQueue);
+#endif
+#if defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+	flush_scheduled_work();
+#endif
+
+	/* remove timer */
+	del_timer_sync(&psTimerCBData->sTimer);
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+	/*
+	 * This second flush is to catch the case where the timer ran
+	 * before we managed to delete it, in which case, it will have
+	 * queued more work for the workqueue.	Since the bActive flag
+	 * has been cleared, this second flush won't result in the
+	 * timer being rearmed.
+	 */
+	flush_workqueue(psTimerWorkQueue);
+#endif
+#if defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+	flush_scheduled_work();
+#endif
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, IMG_HANDLE *hEventObject)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVR_UNREFERENCED_PARAMETER(pszName);
+
+	if(hEventObject)
+	{
+		if(LinuxEventObjectListCreate(hEventObject) != PVRSRV_OK)
+		{
+			 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: hEventObject is not a valid pointer"));
+		eError = PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT;
+	}
+
+	return eError;
+}
+
+
+PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if(hEventObject)
+	{
+		LinuxEventObjectListDestroy(hEventObject);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: hEventObject is not a valid pointer"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return eError;
+}
+
+/*
+ * EventObjectWaitTimeout()
+ */
+static PVRSRV_ERROR EventObjectWaitTimeout(IMG_HANDLE hOSEventKM,
+										   IMG_UINT64 uiTimeoutus,
+										   IMG_BOOL bHoldBridgeLock)
+{
+	PVRSRV_ERROR eError;
+
+	if(hOSEventKM && uiTimeoutus > 0)
+	{
+		eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, bHoldBridgeLock);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWait: invalid arguments %p, %lld", hOSEventKM, uiTimeoutus));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus)
+{
+	return EventObjectWaitTimeout(hOSEventKM, uiTimeoutus, IMG_FALSE);
+}
+
+PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM)
+{
+	return OSEventObjectWaitTimeout(hOSEventKM, EVENT_OBJECT_TIMEOUT_US);
+}
+
+PVRSRV_ERROR OSEventObjectWaitTimeoutAndHoldBridgeLock(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus)
+{
+	return EventObjectWaitTimeout(hOSEventKM, uiTimeoutus, IMG_TRUE);
+}
+
+PVRSRV_ERROR OSEventObjectWaitAndHoldBridgeLock(IMG_HANDLE hOSEventKM)
+{
+	return OSEventObjectWaitTimeoutAndHoldBridgeLock(hOSEventKM, EVENT_OBJECT_TIMEOUT_US);
+}
+
+PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject,
+											IMG_HANDLE *phOSEvent)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if(hEventObject)
+	{
+		if(LinuxEventObjectAdd(hEventObject, phOSEvent) != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed"));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSEventObjectOpen: hEventObject is not a valid pointer"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if(hOSEventKM)
+	{
+		if(LinuxEventObjectDelete(hOSEventKM) != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectDelete: failed"));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: hEventObject is not a valid pointer"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject)
+{
+	PVRSRV_ERROR eError;
+
+	if(hEventObject)
+	{
+		eError = LinuxEventObjectSignal(hEventObject);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSEventObjectSignal: hOSEventKM is not a valid handle"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR OSCopyToUser(void *pvProcess,
+						  void *pvDest,
+						  const void *pvSrc,
+						  size_t ui32Bytes)
+{
+	PVR_UNREFERENCED_PARAMETER(pvProcess);
+
+	if(pvr_copy_to_user(pvDest, pvSrc, ui32Bytes)==0)
+		return PVRSRV_OK;
+	else
+		return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY;
+}
+
+PVRSRV_ERROR OSCopyFromUser(void *pvProcess,
+							void *pvDest,
+							const void *pvSrc,
+							size_t ui32Bytes)
+{
+	PVR_UNREFERENCED_PARAMETER(pvProcess);
+
+	if(pvr_copy_from_user(pvDest, pvSrc, ui32Bytes)==0)
+		return PVRSRV_OK;
+	else
+		return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY;
+}
+
+IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder)
+{
+	*pui32Remainder = do_div(ui64Divident, ui32Divisor);
+
+	return ui64Divident;
+}
+
+IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder)
+{
+	*pui32Remainder = do_div(ui64Divident, ui32Divisor);
+
+	return (IMG_UINT32) ui64Divident;
+}
+
+/* One time osfunc initialisation */
+PVRSRV_ERROR PVROSFuncInit(void)
+{
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+	{
+		PVR_ASSERT(!psTimerWorkQueue);
+
+		psTimerWorkQueue = create_workqueue("pvr_timer");
+		if (psTimerWorkQueue == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: couldn't create timer workqueue", __FUNCTION__));
+			return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD;
+		}
+	}
+#endif
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+	{
+		IMG_UINT32 ui32i;
+
+		for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
+		{
+			TIMER_CALLBACK_DATA *psTimerCBData = &sTimers[ui32i];
+
+			INIT_WORK(&psTimerCBData->sWork, OSTimerWorkQueueCallBack);
+		}
+	}
+#endif
+	return PVRSRV_OK;
+}
+
+/*
+ * Osfunc deinitialisation.
+ * Note that PVROSFuncInit may not have been called
+ */
+void PVROSFuncDeInit(void)
+{
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+	if (psTimerWorkQueue != NULL)
+	{
+		destroy_workqueue(psTimerWorkQueue);
+		psTimerWorkQueue = NULL;
+	}
+#endif
+}
+
+void OSDumpStack(void)
+{
+	dump_stack();
+}
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+
+static struct task_struct *gsOwner;
+
+void OSAcquireBridgeLock(void)
+{
+	mutex_lock(&gPVRSRVLock);
+	gsOwner = current;
+}
+
+void OSReleaseBridgeLock(void)
+{
+	gsOwner = NULL;
+	mutex_unlock(&gPVRSRVLock);
+}
+
+struct task_struct *BridgeLockGetOwner(void)
+{
+	return gsOwner;
+}
+
+IMG_BOOL BridgeLockIsLocked(void)
+{
+	return OSLockIsLocked(&gPVRSRVLock);
+}
+
+#endif
+
+/*************************************************************************/ /*!
+@Function		OSCreateStatisticEntry
+@Description	Create a statistic entry in the specified folder.
+@Input			pszName		   String containing the name for the entry.
+@Input			pvFolder	   Reference from OSCreateStatisticFolder() of the
+							   folder to create the entry in, or NULL for the
+							   root.
+@Input			pfnStatsPrint  Pointer to function that can be used to print the
+							   values of all the statistics.
+@Input			pfnIncMemRefCt Pointer to function that can be used to take a
+							   reference on the memory backing the statistic
+							   entry.
+@Input			pfnDecMemRefCt Pointer to function that can be used to drop a
+							   reference on the memory backing the statistic
+							   entry.
+@Input			pvData		   OS specific reference that can be used by
+							   pfnGetElement.
+@Return			Pointer void reference to the entry created, which can be
+				passed to OSRemoveStatisticEntry() to remove the entry.
+*/ /**************************************************************************/
+void *OSCreateStatisticEntry(IMG_CHAR* pszName, void *pvFolder,
+							 OS_STATS_PRINT_FUNC* pfnStatsPrint,
+							 OS_INC_STATS_MEM_REFCOUNT_FUNC* pfnIncMemRefCt,
+							 OS_DEC_STATS_MEM_REFCOUNT_FUNC* pfnDecMemRefCt,
+							 void *pvData)
+{
+	return (void *)PVRDebugFSCreateStatisticEntry(pszName, (PPVR_DEBUGFS_DIR_DATA)pvFolder, pfnStatsPrint, pfnIncMemRefCt, pfnDecMemRefCt, pvData);
+} /* OSCreateStatisticEntry */
+
+
+/*************************************************************************/ /*!
+@Function		OSRemoveStatisticEntry
+@Description	Removes a statistic entry.
+@Input			pvEntry  Pointer void reference to the entry created by
+						 OSCreateStatisticEntry().
+*/ /**************************************************************************/
+void OSRemoveStatisticEntry(void *pvEntry)
+{
+	PVRDebugFSRemoveStatisticEntry((PPVR_DEBUGFS_DRIVER_STAT)pvEntry);
+} /* OSRemoveStatisticEntry */
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+void *OSCreateRawStatisticEntry(const IMG_CHAR *pszFileName, void *pvParentDir,
+                                OS_STATS_PRINT_FUNC *pfStatsPrint)
+{
+	return (void *) PVRDebugFSCreateRawStatisticEntry(pszFileName, pvParentDir,
+	                                                  pfStatsPrint);
+}
+
+void OSRemoveRawStatisticEntry(void *pvEntry)
+{
+	PVRDebugFSRemoveRawStatisticEntry(pvEntry);
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function		OSCreateStatisticFolder
+@Description	Create a statistic folder to hold statistic entries.
+@Input			pszName   String containing the name for the folder.
+@Input			pvFolder  Reference from OSCreateStatisticFolder() of the folder
+						  to create the folder in, or NULL for the root.
+@Return			Pointer void reference to the folder created, which can be
+				passed to OSRemoveStatisticFolder() to remove the folder.
+*/ /**************************************************************************/
+void *OSCreateStatisticFolder(IMG_CHAR *pszName, void *pvFolder)
+{
+	PPVR_DEBUGFS_DIR_DATA psNewStatFolder = NULL;
+	int iResult;
+
+	iResult = PVRDebugFSCreateEntryDir(pszName, (PPVR_DEBUGFS_DIR_DATA)pvFolder, &psNewStatFolder);
+	return (iResult == 0) ? (void *)psNewStatFolder : NULL;
+} /* OSCreateStatisticFolder */
+
+
+/*************************************************************************/ /*!
+@Function		OSRemoveStatisticFolder
+@Description	Removes a statistic folder.
+@Input          ppvFolder  Reference from OSCreateStatisticFolder() of the
+                           folder that should be removed.
+                           This needs to be double pointer because it has to
+                           be NULLed right after memory is freed to avoid
+                           possible races and use-after-free situations.
+*/ /**************************************************************************/
+void OSRemoveStatisticFolder(void **ppvFolder)
+{
+	PVRDebugFSRemoveEntryDir((PPVR_DEBUGFS_DIR_DATA *)ppvFolder);
+} /* OSRemoveStatisticFolder */
+
+
+PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray,
+                                         IMG_UINT64 sCpuVAddrBase,
+                                         IMG_CPU_PHYADDR sCpuPAHeapBase,
+                                         IMG_UINT32 ui32AllocPageCount,
+                                         IMG_UINT32 *pai32AllocIndices,
+                                         IMG_UINT32 ui32FreePageCount,
+                                         IMG_UINT32 *pai32FreeIndices,
+                                         IMG_BOOL bIsLMA)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+	pfn_t sPFN;
+#else
+	IMG_UINT64 uiPFN;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+
+	PVRSRV_ERROR eError;
+
+	struct mm_struct  *psMM = current->mm;
+	struct vm_area_struct *psVMA = NULL;
+	struct address_space *psMapping = NULL;
+	struct page *psPage = NULL;
+
+	IMG_UINT64 uiCPUVirtAddr = 0;
+	IMG_UINT32 ui32Loop = 0;
+	IMG_UINT32 ui32PageSize = OSGetPageSize();
+	IMG_BOOL bMixedMap = IMG_FALSE;
+
+	/*
+	 * Acquire the lock before manipulating the VMA
+	 * In this case only mmap_sem lock would suffice as the pages associated with this VMA
+	 * are never meant to be swapped out.
+	 *
+	 * In the future, in case the pages are marked as swapped, page_table_lock needs
+	 * to be acquired in conjunction with this to disable page swapping.
+	 */
+
+	/* Find the Virtual Memory Area associated with the user base address */
+	psVMA = find_vma(psMM, (uintptr_t)sCpuVAddrBase);
+	if (NULL == psVMA)
+	{
+		eError = PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND;
+		return eError;
+	}
+
+	/* Acquire the memory sem */
+	down_write(&psMM->mmap_sem);
+
+	psMapping = psVMA->vm_file->f_mapping;
+	
+	/* Set the page offset to the correct value as this is disturbed in MMAP_PMR func */
+	psVMA->vm_pgoff = (psVMA->vm_start >>  PAGE_SHIFT);
+
+	/* Delete the entries for the pages that got freed */
+	if (ui32FreePageCount && (pai32FreeIndices != NULL))
+	{
+		for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++)
+		{
+			uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32FreeIndices[ui32Loop] * ui32PageSize));
+
+			unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1);
+
+#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+			/*
+			 * Still need to map pages in case remap flag is set.
+			 * That is not done until the remap case succeeds
+			 */
+#endif
+		}
+		eError = PVRSRV_OK;
+	}
+
+	if ((psVMA->vm_flags & VM_MIXEDMAP) || bIsLMA)
+	{
+		psVMA->vm_flags |=  VM_MIXEDMAP;
+		bMixedMap = IMG_TRUE;
+	}
+	else
+	{
+		if (ui32AllocPageCount && (NULL != pai32AllocIndices))
+		{
+			for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++)
+			{
+
+				psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]];
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+				sPFN = page_to_pfn_t(psPage);
+
+				if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0)
+#else
+				uiPFN = page_to_pfn(psPage);
+
+				if (!pfn_valid(uiPFN) || (page_count(pfn_to_page(uiPFN)) == 0))
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+				{
+					bMixedMap = IMG_TRUE;
+					psVMA->vm_flags |= VM_MIXEDMAP;
+					break;
+				}
+			}
+		}
+	}
+
+	/* Map the pages that got allocated */
+	if (ui32AllocPageCount && (NULL != pai32AllocIndices))
+	{
+		for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++)
+		{
+			int err;
+
+			uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32AllocIndices[ui32Loop] * ui32PageSize));
+			unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1);
+
+			if (bIsLMA)
+			{
+				phys_addr_t uiAddr = sCpuPAHeapBase.uiAddr +
+				                     ((IMG_DEV_PHYADDR *)psPageArray)[pai32AllocIndices[ui32Loop]].uiAddr;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+				sPFN = phys_to_pfn_t(uiAddr, 0);
+				psPage = pfn_t_to_page(sPFN);
+#else
+				uiPFN = uiAddr >> PAGE_SHIFT;
+				psPage = pfn_to_page(uiPFN);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+			}
+			else
+			{
+				psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]];
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+				sPFN = page_to_pfn_t(psPage);
+#else
+				uiPFN = page_to_pfn(psPage);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+			}
+
+			if (bMixedMap)
+			{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+				err = vm_insert_mixed(psVMA, uiCPUVirtAddr, sPFN);
+#else
+				err = vm_insert_mixed(psVMA, uiCPUVirtAddr, uiPFN);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+			}
+			else
+			{
+				err = vm_insert_page(psVMA, uiCPUVirtAddr, psPage);
+			}
+
+			if (err)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE, "Remap failure error code: %d", err));
+				eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED;
+				goto eFailed;
+			}
+		}
+	}
+
+	eError = PVRSRV_OK;
+	eFailed:
+	up_write(&psMM->mmap_sem);
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       OSDebugSignalPID
+@Description    Sends a SIGTRAP signal to a specific PID in user mode for
+                debugging purposes. The user mode process can register a handler
+                against this signal.
+                This is necessary to support the Rogue debugger. If the Rogue
+                debugger is not used then this function may be implemented as
+                a stub.
+@Input          ui32PID    The PID for the signal.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSDebugSignalPID(IMG_UINT32 ui32PID)
+{
+	int err;
+	struct pid *psPID;
+
+	psPID = find_vpid(ui32PID);
+	if (psPID == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get PID struct.", __func__));
+		return PVRSRV_ERROR_NOT_FOUND;
+	}
+
+	err = kill_pid(psPID, SIGTRAP, 0);
+	if (err != 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Signal Failure %d", __func__, err));
+		return PVRSRV_ERROR_SIGNAL_FAILED;
+	}
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osfunc_arm.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osfunc_arm.c
new file mode 100644
index 0000000..2152536
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osfunc_arm.c
@@ -0,0 +1,226 @@
+/*************************************************************************/ /*!
+@File
+@Title          arm specific OS functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS functions who's implementation are processor specific
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0))
+ #include <asm/system.h>
+#endif
+#include <asm/cacheflush.h>
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+
+
+#if defined(CONFIG_OUTER_CACHE)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0))
+
+	/* Since 3.16 the outer_xxx() functions require irqs to be disabled and no
+	 * other cache masters must operate on the outer cache. */
+	static DEFINE_SPINLOCK(gsCacheFlushLock);
+
+	#define OUTER_CLEAN_RANGE() { \
+		unsigned long uiLockFlags; \
+		\
+		spin_lock_irqsave(&gsCacheFlushLock, uiLockFlags); \
+		outer_clean_range(0, ULONG_MAX); \
+		spin_unlock_irqrestore(&gsCacheFlushLock, uiLockFlags); \
+	}
+
+	#define OUTER_FLUSH_ALL() { \
+		unsigned long uiLockFlags; \
+		\
+		spin_lock_irqsave(&gsCacheFlushLock, uiLockFlags); \
+		outer_flush_all(); \
+		spin_unlock_irqrestore(&gsCacheFlushLock, uiLockFlags); \
+	}
+
+#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) */
+
+	/* No need to disable IRQs for older kernels */
+	#define OUTER_CLEAN_RANGE() outer_clean_range(0, ULONG_MAX)
+	#define OUTER_FLUSH_ALL()   outer_flush_all()
+#endif /*(LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) */
+
+#else /* CONFIG_OUTER_CACHE */
+
+	/* Don't do anything if we have no outer cache */
+	#define OUTER_CLEAN_RANGE()
+	#define OUTER_FLUSH_ALL()
+#endif /* CONFIG_OUTER_CACHE */
+
+static void per_cpu_cache_flush(void *arg)
+{
+	PVR_UNREFERENCED_PARAMETER(arg);
+	flush_cache_all();
+}
+
+PVRSRV_ERROR OSCPUOperation(PVRSRV_CACHE_OP uiCacheOp)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	switch(uiCacheOp)
+	{
+		/* Fall-through */
+		case PVRSRV_CACHE_OP_CLEAN:
+			on_each_cpu(per_cpu_cache_flush, NULL, 1);
+			OUTER_CLEAN_RANGE();
+			break;
+
+		case PVRSRV_CACHE_OP_INVALIDATE:
+		case PVRSRV_CACHE_OP_FLUSH:
+			on_each_cpu(per_cpu_cache_flush, NULL, 1);
+			OUTER_FLUSH_ALL();
+			break;
+
+		case PVRSRV_CACHE_OP_NONE:
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Global cache operation type %d is invalid",
+					__FUNCTION__, uiCacheOp));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			PVR_ASSERT(0);
+			break;
+	}
+
+	return eError;
+}
+
+static inline size_t pvr_dmac_range_len(const void *pvStart, const void *pvEnd)
+{
+	return (size_t)((char *)pvEnd - (char *)pvStart);
+}
+
+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+	arm_dma_ops.sync_single_for_device(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+	arm_dma_ops.sync_single_for_cpu(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+#else	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+	/* Inner cache */
+	dmac_flush_range(pvVirtStart, pvVirtEnd);
+
+	/* Outer cache */
+	outer_flush_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr);
+#endif	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+}
+
+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+	arm_dma_ops.sync_single_for_device(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+#else	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+	/* Inner cache */
+	dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_TO_DEVICE);
+
+	/* Outer cache */
+	outer_clean_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr);
+#endif	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+}
+
+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                                 void *pvVirtStart,
+                                 void *pvVirtEnd,
+                                 IMG_CPU_PHYADDR sCPUPhysStart,
+                                 IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+	arm_dma_ops.sync_single_for_cpu(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+#else	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+#if defined(PVR_LINUX_DONT_USE_RANGE_BASED_INVALIDATE)
+	OSCPUCacheCleanRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd);
+#else
+	/* Inner cache */
+	dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_FROM_DEVICE);
+
+	/* Outer cache */
+	outer_inv_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr);
+#endif
+#endif	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+}
+
+PVRSRV_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+	return PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL;
+#else
+	return PVRSRV_CACHE_OP_ADDR_TYPE_BOTH;
+#endif
+}
+
+/* User Enable Register */
+#define PMUSERENR_EN      0x00000001 /* enable user access to the counters */
+
+static void per_cpu_perf_counter_user_access_en(void *data)
+{
+	PVR_UNREFERENCED_PARAMETER(data);
+#if !defined(CONFIG_L4)
+	/* Enable user-mode access to counters. */
+	asm volatile("mcr p15, 0, %0, c9, c14, 0" :: "r"(PMUSERENR_EN));
+#endif
+}
+
+void OSUserModeAccessToPerfCountersEn(void)
+{
+	on_each_cpu(per_cpu_perf_counter_user_access_en, NULL, 1);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osfunc_arm64.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osfunc_arm64.c
new file mode 100644
index 0000000..4cb7311
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osfunc_arm64.c
@@ -0,0 +1,284 @@
+/*************************************************************************/ /*!
+@File
+@Title          arm specific OS functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS functions who's implementation are processor specific
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/version.h>
+#include <linux/cpumask.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+#if defined(CONFIG_OUTER_CACHE)
+  /* If you encounter a 64-bit ARM system with an outer cache, you'll need
+   * to add the necessary code to manage that cache. See osfunc_arm.c
+   * for an example of how to do so.
+   */
+	#error "CONFIG_OUTER_CACHE not supported on arm64."
+#endif
+
+static void per_cpu_cache_flush(void *arg)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
+	unsigned long irqflags;
+	signed long Clidr, Csselr, LoC, Assoc, Nway, Nsets, Level, Lsize, Var;
+	static DEFINE_SPINLOCK(spinlock);
+
+	spin_lock_irqsave(&spinlock, irqflags);
+
+	/* Read cache level ID register */
+	asm volatile (
+		"dmb sy\n\t"
+		"mrs %[rc], clidr_el1\n\t"
+		: [rc] "=r" (Clidr));
+
+	/* Exit if there is no cache level of coherency */
+	LoC = (Clidr & (((1UL << 3)-1) << 24)) >> 23;
+	if (! LoC)
+	{
+		goto e0;
+	}
+
+	/*
+		This walks the cache hierarchy until the LLC/LOC cache, at each level skip
+		only instruction caches and determine the attributes at this dcache level.
+	*/
+	for (Level = 0; LoC > Level; Level += 2)
+	{
+		/* Mask off this CtypeN bit, skip if not unified cache or separate
+		   instruction and data caches */
+		Var = (Clidr >> (Level + (Level >> 1))) & ((1UL << 3) - 1);
+		if (Var < 2)
+		{
+			continue;
+		}
+
+		/* Select this dcache level for query */
+		asm volatile (
+			"msr csselr_el1, %[val]\n\t"
+			"isb\n\t"
+			"mrs %[rc], ccsidr_el1\n\t"
+			: [rc] "=r" (Csselr) : [val] "r" (Level));
+
+		/* Look-up this dcache organisation attributes */
+		Nsets = (Csselr >> 13) & ((1UL << 15) - 1);
+		Assoc = (Csselr >> 3) & ((1UL << 10) - 1);
+		Lsize = (Csselr & ((1UL << 3) - 1)) + 4;
+		Nway = 0;
+
+		/* For performance, do these in assembly; foreach dcache level/set,
+		   foreach dcache set/way, construct the "DC CISW" instruction
+		   argument and issue instruction */
+		asm volatile (
+			"mov x6, %[val0]\n\t"
+			"mov x9, %[rc1]\n\t"
+			"clz w9, w6\n\t"
+			"mov %[rc1], x9\n\t"
+			"lsetloop:\n\t"
+			"mov %[rc5], %[val0]\n\t"
+			"swayloop:\n\t"
+			"lsl x6, %[rc5], %[rc1]\n\t"
+			"orr x9, %[val2], x6\n\t"
+			"lsl x6, %[rc3], %[val4]\n\t"
+			"orr x9, x9, x6\n\t"
+			"dc	cisw, x9\n\t"
+			"subs %[rc5], %[rc5], #1\n\t"
+			"b.ge swayloop\n\t"
+			"subs %[rc3], %[rc3], #1\n\t"
+			"b.ge lsetloop\n\t"
+			: [rc1] "+r" (Nway), [rc3] "+r" (Nsets), [rc5] "+r" (Var)
+			: [val0] "r" (Assoc), [val2] "r" (Level), [val4] "r" (Lsize)
+			: "x6", "x9", "cc");
+	}
+
+e0:
+	/* Re-select L0 d-cache as active level, issue barrier before exit */
+	Var = 0;
+	asm volatile (
+		"msr csselr_el1, %[val]\n\t"
+		"dsb sy\n\t"
+		"isb\n\t"
+		: : [val] "r" (Var));
+
+	spin_unlock_irqrestore(&spinlock, irqflags);
+#else
+	flush_cache_all();
+#endif
+	PVR_UNREFERENCED_PARAMETER(arg);
+}
+
+static inline void FlushRange(void *pvRangeAddrStart,
+							  void *pvRangeAddrEnd,
+							  PVRSRV_CACHE_OP eCacheOp)
+{
+	IMG_UINT32 ui32CacheLineSize = OSCPUCacheAttributeSize(PVR_DCACHE_LINE_SIZE);
+	IMG_BYTE *pbStart = pvRangeAddrStart;
+	IMG_BYTE *pbEnd = pvRangeAddrEnd;
+	IMG_BYTE *pbBase;
+
+	/*
+	  On arm64, the TRM states in D5.8.1 (data and unified caches) that if cache
+	  maintenance is performed on a memory location using a VA, the effect of
+	  that cache maintenance is visible to all VA aliases of the physical memory
+	  location. So here it's quicker to issue the machine cache maintenance
+	  instruction directly without going via the Linux kernel DMA framework as
+	  this is sufficient to maintain the CPU d-caches on arm64.
+	 */
+	pbEnd = (IMG_BYTE *) PVR_ALIGN((uintptr_t)pbEnd, (uintptr_t)ui32CacheLineSize);
+	for (pbBase = pbStart; pbBase < pbEnd; pbBase += ui32CacheLineSize)
+	{
+		switch (eCacheOp)
+		{
+			case PVRSRV_CACHE_OP_CLEAN:
+				asm volatile ("dc cvac, %0" :: "r" (pbBase));
+				break;
+
+			case PVRSRV_CACHE_OP_INVALIDATE:
+				asm volatile ("dc ivac, %0" :: "r" (pbBase));
+				break;
+
+			case PVRSRV_CACHE_OP_FLUSH:
+				asm volatile ("dc civac, %0" :: "r" (pbBase));
+				break;
+
+			default:
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Cache maintenance operation type %d is invalid",
+						__FUNCTION__, eCacheOp));
+				break;
+		}
+	}
+}
+
+PVRSRV_ERROR OSCPUOperation(PVRSRV_CACHE_OP uiCacheOp)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	switch(uiCacheOp)
+	{
+		case PVRSRV_CACHE_OP_CLEAN:
+		case PVRSRV_CACHE_OP_FLUSH:
+		case PVRSRV_CACHE_OP_INVALIDATE:
+			on_each_cpu(per_cpu_cache_flush, NULL, 1);
+			break;
+
+		case PVRSRV_CACHE_OP_NONE:
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Global cache operation type %d is invalid",
+					__FUNCTION__, uiCacheOp));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			PVR_ASSERT(0);
+			break;
+	}
+
+	return eError;
+}
+
+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+							void *pvVirtStart,
+							void *pvVirtEnd,
+							IMG_CPU_PHYADDR sCPUPhysStart,
+							IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	const struct dma_map_ops *dma_ops;
+
+	if (pvVirtStart)
+	{
+		FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_FLUSH);
+		return;
+	}
+
+	dma_ops = get_dma_ops(psDevNode->psDevConfig->pvOSDevice);
+	dma_ops->sync_single_for_device(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+	dma_ops->sync_single_for_cpu(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+}
+
+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+							void *pvVirtStart,
+							void *pvVirtEnd,
+							IMG_CPU_PHYADDR sCPUPhysStart,
+							IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	const struct dma_map_ops *dma_ops;
+
+	if (pvVirtStart)
+	{
+		FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_FLUSH);
+		return;
+	}
+
+	dma_ops = get_dma_ops(psDevNode->psDevConfig->pvOSDevice);
+	dma_ops->sync_single_for_device(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+}
+
+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+								 void *pvVirtStart,
+								 void *pvVirtEnd,
+								 IMG_CPU_PHYADDR sCPUPhysStart,
+								 IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	const struct dma_map_ops *dma_ops;
+
+	if (pvVirtStart)
+	{
+		FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_FLUSH);
+		return;
+	}
+
+	dma_ops = get_dma_ops(psDevNode->psDevConfig->pvOSDevice);
+	dma_ops->sync_single_for_cpu(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+}
+
+PVRSRV_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void)
+{
+	return PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL;
+}
+
+void OSUserModeAccessToPerfCountersEn(void)
+{
+	/* FIXME: implement similarly to __arm__ */
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osfunc_mips.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osfunc_mips.c
new file mode 100644
index 0000000..0c1cb20
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osfunc_mips.c
@@ -0,0 +1,137 @@
+/*************************************************************************/ /*!
+@File
+@Title          mips specific OS functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS functions who's implementation are processor specific
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+PVRSRV_ERROR OSCPUOperation(PVRSRV_CACHE_OP uiCacheOp)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	switch(uiCacheOp)
+	{
+		case PVRSRV_CACHE_OP_CLEAN:
+		case PVRSRV_CACHE_OP_FLUSH:
+		case PVRSRV_CACHE_OP_INVALIDATE:
+			eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+			break;
+
+		case PVRSRV_CACHE_OP_NONE:
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Global cache operation type %d is invalid",
+					__FUNCTION__, uiCacheOp));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			PVR_ASSERT(0);
+			break;
+	}
+
+	return eError;
+}
+
+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	unsigned long len;
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+	PVR_ASSERT((uintptr_t) pvVirtEnd >= (uintptr_t) pvVirtEnd);
+
+	len = (unsigned long) pvVirtEnd - (unsigned long) pvVirtStart;
+	dma_cache_sync(NULL, (void *)pvVirtStart, len, DMA_BIDIRECTIONAL);
+}
+
+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	unsigned long len;
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+	PVR_ASSERT((uintptr_t) pvVirtEnd >= (uintptr_t) pvVirtEnd);
+
+	len = (unsigned long) pvVirtEnd - (unsigned long) pvVirtStart;
+	dma_cache_sync(NULL, (void *)pvVirtStart, len, DMA_TO_DEVICE);
+}
+
+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                                 void *pvVirtStart,
+                                 void *pvVirtEnd,
+                                 IMG_CPU_PHYADDR sCPUPhysStart,
+                                 IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	unsigned long len;
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+	PVR_ASSERT((uintptr_t) pvVirtEnd >= (uintptr_t) pvVirtEnd);
+
+	len = (unsigned long) pvVirtEnd - (unsigned long) pvVirtStart;
+	dma_cache_sync(NULL, (void *)pvVirtStart, len, DMA_FROM_DEVICE);
+}
+
+PVRSRV_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void)
+{
+	return PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL;
+}
+
+void OSUserModeAccessToPerfCountersEn(void)
+{
+	/* Not applicable to MIPS architecture. */
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osfunc_x86.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osfunc_x86.c
new file mode 100644
index 0000000..09b69d3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osfunc_x86.c
@@ -0,0 +1,154 @@
+/*************************************************************************/ /*!
+@File
+@Title          x86 specific OS functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS functions who's implementation are processor specific
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/smp.h>
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+
+static void per_cpu_cache_flush(void *arg)
+{
+    PVR_UNREFERENCED_PARAMETER(arg);
+    wbinvd();
+}
+
+PVRSRV_ERROR OSCPUOperation(PVRSRV_CACHE_OP uiCacheOp)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	switch(uiCacheOp)
+	{
+		/* Fall-through */
+		case PVRSRV_CACHE_OP_CLEAN:
+		case PVRSRV_CACHE_OP_FLUSH:
+		case PVRSRV_CACHE_OP_INVALIDATE:
+			on_each_cpu(per_cpu_cache_flush, NULL, 1);
+			break;
+
+		case PVRSRV_CACHE_OP_NONE:
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Global cache operation type %d is invalid",
+					__FUNCTION__, uiCacheOp));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			PVR_ASSERT(0);
+			break;
+	}
+
+	return eError;
+}
+
+static void x86_flush_cache_range(const void *pvStart, const void *pvEnd)
+{
+	IMG_BYTE *pbStart = (IMG_BYTE *)pvStart;
+	IMG_BYTE *pbEnd = (IMG_BYTE *)pvEnd;
+	IMG_BYTE *pbBase;
+
+	pbEnd = (IMG_BYTE *)PVR_ALIGN((uintptr_t)pbEnd,
+	                              (uintptr_t)boot_cpu_data.x86_clflush_size);
+
+	mb();
+	for(pbBase = pbStart; pbBase < pbEnd; pbBase += boot_cpu_data.x86_clflush_size)
+	{
+		clflush(pbBase);
+	}
+	mb();
+}
+
+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+	x86_flush_cache_range(pvVirtStart, pvVirtEnd);
+}
+
+
+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+	/* No clean feature on x86 */
+	x86_flush_cache_range(pvVirtStart, pvVirtEnd);
+}
+
+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                                 void *pvVirtStart,
+                                 void *pvVirtEnd,
+                                 IMG_CPU_PHYADDR sCPUPhysStart,
+                                 IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+	/* No invalidate-only support */
+	x86_flush_cache_range(pvVirtStart, pvVirtEnd);
+}
+
+PVRSRV_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void)
+{
+	return PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL;
+}
+
+void OSUserModeAccessToPerfCountersEn(void)
+{
+	/* Not applicable to x86 architecture. */
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osmmap_stub.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osmmap_stub.c
new file mode 100644
index 0000000..fbddf87
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/osmmap_stub.c
@@ -0,0 +1,132 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS abstraction for the mmap2 interface for mapping PMRs into
+                User Mode memory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* our exported API */
+#include "osmmap.h"
+
+/* include/ */
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+/* services/include/ */
+
+/* services/include/srvhelper/ */
+#include "ra.h"
+
+#include "pmr.h"
+
+IMG_INTERNAL PVRSRV_ERROR
+OSMMapPMR(IMG_HANDLE hBridge,
+          IMG_HANDLE hPMR,
+          IMG_DEVMEM_SIZE_T uiPMRSize,
+          IMG_UINT32 uiFlags,
+          IMG_HANDLE *phOSMMapPrivDataOut,
+          void **ppvMappingAddressOut,
+          size_t *puiMappingLengthOut)
+{
+    PVRSRV_ERROR eError;
+    PMR *psPMR;
+    void *pvKernelAddress;
+    size_t uiLength;
+    IMG_HANDLE hPriv;
+
+    PVR_UNREFERENCED_PARAMETER(hBridge);
+    PVR_UNREFERENCED_PARAMETER(uiFlags);
+
+    /*
+      Normally this function would mmap a PMR into the memory space of
+      user process, but in this case we're taking a PMR and mapping it
+      into kernel virtual space.  We keep the same function name for
+      symmetry as this allows the higher layers of the software stack
+      to not care whether they are user mode or kernel
+    */
+
+    psPMR = hPMR;
+
+    eError = PMRAcquireKernelMappingData(psPMR,
+                                         0,
+                                         0,
+                                         &pvKernelAddress,
+                                         &uiLength,
+                                         &hPriv);
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+    
+    *phOSMMapPrivDataOut = hPriv;
+    *ppvMappingAddressOut = pvKernelAddress;
+    *puiMappingLengthOut = uiLength;
+
+    PVR_ASSERT(*puiMappingLengthOut == uiPMRSize);
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+IMG_INTERNAL void
+OSMUnmapPMR(IMG_HANDLE hBridge,
+            IMG_HANDLE hPMR,
+            IMG_HANDLE hOSMMapPrivData,
+            void *pvMappingAddress,
+            size_t uiMappingLength)
+{
+    PMR *psPMR;
+
+    PVR_UNREFERENCED_PARAMETER(hBridge);
+    PVR_UNREFERENCED_PARAMETER(pvMappingAddress);
+    PVR_UNREFERENCED_PARAMETER(uiMappingLength);
+
+    psPMR = hPMR;
+    PMRReleaseKernelMappingData(psPMR,
+                                hOSMMapPrivData);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pdump.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pdump.c
new file mode 100644
index 0000000..b49301b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pdump.c
@@ -0,0 +1,462 @@
+/*************************************************************************/ /*!
+@File
+@Title          Parameter dump macro target routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#if defined (PDUMP)
+
+#include <asm/atomic.h>
+#include <stdarg.h>
+
+#include "pvrversion.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "osfunc.h"
+
+#include "dbgdrvif_srv5.h"
+#include "allocmem.h"
+#include "pdump_km.h"
+#include "pdump_osfunc.h"
+#include "services_km.h"
+
+#include <linux/kernel.h> // sprintf
+#include <linux/string.h> // strncpy, strlen
+#include <linux/mutex.h>
+
+#define PDUMP_DATAMASTER_PIXEL		(1)
+#define PDUMP_DATAMASTER_EDM		(3)
+
+static PDBGKM_SERVICE_TABLE gpfnDbgDrv = NULL;
+
+
+typedef struct PDBG_PDUMP_STATE_TAG
+{
+	PDBG_STREAM psStream[PDUMP_NUM_CHANNELS];
+
+	IMG_CHAR *pszMsg;
+	IMG_CHAR *pszScript;
+	IMG_CHAR *pszFile;
+
+} PDBG_PDUMP_STATE;
+
+static PDBG_PDUMP_STATE gsDBGPdumpState = {{NULL}, NULL, NULL, NULL};
+
+#define SZ_MSG_SIZE_MAX			PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
+#define SZ_SCRIPT_SIZE_MAX		PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
+#define SZ_FILENAME_SIZE_MAX	PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
+
+static struct mutex gsPDumpMutex;
+
+void DBGDrvGetServiceTable(void **fn_table);
+
+
+/*!
+ * \name	PDumpOSGetScriptString
+ */
+PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript,
+									IMG_UINT32 *pui32MaxLen)
+{
+	*phScript = (IMG_HANDLE)gsDBGPdumpState.pszScript;
+	*pui32MaxLen = SZ_SCRIPT_SIZE_MAX;
+	if (!*phScript)
+	{
+		return PVRSRV_ERROR_PDUMP_NOT_ACTIVE;
+	}
+	return PVRSRV_OK;
+}
+
+/*!
+ * \name	PDumpOSGetMessageString
+ */
+PVRSRV_ERROR PDumpOSGetMessageString(IMG_CHAR **ppszMsg,
+									 IMG_UINT32 *pui32MaxLen)
+{
+	*ppszMsg = gsDBGPdumpState.pszMsg;
+	*pui32MaxLen = SZ_MSG_SIZE_MAX;
+	if (!*ppszMsg)
+	{
+		return PVRSRV_ERROR_PDUMP_NOT_ACTIVE;
+	}
+	return PVRSRV_OK;
+}
+
+/*!
+ * \name	PDumpOSGetFilenameString
+ */
+PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile,
+									 IMG_UINT32 *pui32MaxLen)
+{
+	*ppszFile = gsDBGPdumpState.pszFile;
+	*pui32MaxLen = SZ_FILENAME_SIZE_MAX;
+	if (!*ppszFile)
+	{
+		return PVRSRV_ERROR_PDUMP_NOT_ACTIVE;
+	}
+	return PVRSRV_OK;
+}
+
+/*!
+ * \name	PDumpOSBufprintf
+ */
+PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...)
+{
+	IMG_CHAR* pszBuf = hBuf;
+	IMG_INT32 n;
+	va_list	vaArgs;
+
+	va_start(vaArgs, pszFormat);
+
+	n = vsnprintf(pszBuf, ui32ScriptSizeMax, pszFormat, vaArgs);
+
+	va_end(vaArgs);
+
+	if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1)	/* glibc >= 2.1 or glibc 2.0 */
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
+
+		return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
+	}
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+	g_ui32EveryLineCounter++;
+#endif
+
+	/* Put line ending sequence at the end if it isn't already there */
+	PDumpOSVerifyLineEnding(pszBuf, ui32ScriptSizeMax);
+
+	return PVRSRV_OK;
+}
+
+/*!
+ * \name	PDumpOSVSprintf
+ */
+PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, const IMG_CHAR* pszFormat, PDUMP_va_list vaArgs)
+{
+	IMG_INT32 n;
+
+	n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
+
+	if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1)	/* glibc >= 2.1 or glibc 2.0 */
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
+
+		return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+ * \name	PDumpOSDebugPrintf
+ */
+void PDumpOSDebugPrintf(IMG_CHAR* pszFormat, ...)
+{
+	PVR_UNREFERENCED_PARAMETER(pszFormat);
+
+	/* FIXME: Implement using services PVR_DBG or otherwise with kprintf */
+}
+
+/*!
+ * \name	PDumpOSSprintf
+ */
+PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...)
+{
+	IMG_INT32 n;
+	va_list	vaArgs;
+
+	va_start(vaArgs, pszFormat);
+
+	n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
+
+	va_end(vaArgs);
+
+	if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1)	/* glibc >= 2.1 or glibc 2.0 */
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
+
+		return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+ * \name	PDumpOSBuflen
+ */
+IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax)
+{
+	IMG_CHAR* pszBuf = hBuffer;
+	IMG_UINT32 ui32Count = 0;
+
+	while ((pszBuf[ui32Count]!=0) && (ui32Count<ui32BufferSizeMax) )
+	{
+		ui32Count++;
+	}
+	return(ui32Count);
+}
+
+/*!
+ * \name	PDumpOSVerifyLineEnding
+ */
+void PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax)
+{
+	IMG_UINT32 ui32Count;
+	IMG_CHAR* pszBuf = hBuffer;
+
+	/* strlen */
+	ui32Count = PDumpOSBuflen(hBuffer, ui32BufferSizeMax);
+
+	/* Put \n sequence at the end if it isn't already there */
+	if ((ui32Count >= 1) && (pszBuf[ui32Count-1] != '\n') && (ui32Count<ui32BufferSizeMax))
+	{
+		pszBuf[ui32Count] = '\n';
+		ui32Count++;
+		pszBuf[ui32Count] = '\0';
+	}
+}
+
+
+
+/*!
+ * \name	PDumpOSGetStreamOffset
+ */
+IMG_BOOL PDumpOSSetSplitMarker(IMG_HANDLE hStream, IMG_UINT32 ui32Marker)
+{
+	PDBG_STREAM psStream = (PDBG_STREAM) hStream;
+
+	PVR_ASSERT(gpfnDbgDrv);
+	gpfnDbgDrv->pfnSetMarker(psStream, ui32Marker);
+	return IMG_TRUE;
+}
+
+/*!
+ *	\name	PDumpOSDebugDriverWrite
+ */
+IMG_UINT32 PDumpOSDebugDriverWrite( IMG_HANDLE psStream,
+									IMG_UINT8 *pui8Data,
+									IMG_UINT32 ui32BCount)
+{
+	PVR_ASSERT(gpfnDbgDrv != NULL);
+
+	return gpfnDbgDrv->pfnDBGDrivWrite2(psStream, pui8Data, ui32BCount);
+}
+
+/*!
+ *	\name	PDumpOSReleaseExecution
+ */
+void PDumpOSReleaseExecution(void)
+{
+	OSReleaseThreadQuanta();
+}
+
+/**************************************************************************
+ * Function Name  : PDumpOSInit
+ * Outputs        : None
+ * Returns        :
+ * Description    : Reset connection to vldbgdrv
+ *					Then try to connect to PDUMP streams
+**************************************************************************/
+PVRSRV_ERROR PDumpOSInit(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript,
+		IMG_UINT32* pui32InitCapMode, IMG_CHAR** ppszEnvComment)
+{
+	PVRSRV_ERROR     eError;
+
+	*pui32InitCapMode = DEBUG_CAPMODE_FRAMED;
+	*ppszEnvComment = NULL;
+
+	/* If we tried this earlier, then we might have connected to the driver
+	 * But if pdump.exe was running then the stream connected would fail
+	 */
+	if (!gpfnDbgDrv)
+	{
+		DBGDrvGetServiceTable((void **)&gpfnDbgDrv);
+
+		// If something failed then no point in trying to connect streams
+		if (gpfnDbgDrv == NULL)
+		{
+			return PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+		}
+		
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		if(!gsDBGPdumpState.pszFile)
+		{
+			gsDBGPdumpState.pszFile = OSAllocMem(SZ_FILENAME_SIZE_MAX);
+			if (gsDBGPdumpState.pszFile == NULL)
+			{
+				goto init_failed;
+			}
+		}
+
+		if(!gsDBGPdumpState.pszMsg)
+		{
+			gsDBGPdumpState.pszMsg = OSAllocMem(SZ_MSG_SIZE_MAX);
+			if (gsDBGPdumpState.pszMsg == NULL)
+			{
+				goto init_failed;
+			}
+		}
+
+		if(!gsDBGPdumpState.pszScript)
+		{
+			gsDBGPdumpState.pszScript = OSAllocMem(SZ_SCRIPT_SIZE_MAX);
+			if (gsDBGPdumpState.pszScript == NULL)
+			{
+				goto init_failed;
+			}
+		}
+
+		eError = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+		if (!gpfnDbgDrv->pfnCreateStream(PDUMP_PARAM_CHANNEL_NAME, 0, 10, &psParam->hInit, &psParam->hMain, &psParam->hDeinit))
+		{
+			goto init_failed;
+		}
+		gsDBGPdumpState.psStream[PDUMP_CHANNEL_PARAM] = psParam->hMain;
+
+
+		if (!gpfnDbgDrv->pfnCreateStream(PDUMP_SCRIPT_CHANNEL_NAME, 0, 10, &psScript->hInit, &psScript->hMain, &psScript->hDeinit))
+		{
+			goto init_failed;
+		}
+		gsDBGPdumpState.psStream[PDUMP_CHANNEL_SCRIPT] = psScript->hMain;
+	}
+
+	return PVRSRV_OK;
+
+init_failed:
+	PDumpOSDeInit(psParam, psScript);
+	return eError;
+}
+
+
+void PDumpOSDeInit(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript)
+{
+	gpfnDbgDrv->pfnDestroyStream(psScript->hInit, psScript->hMain, psScript->hDeinit);
+	gpfnDbgDrv->pfnDestroyStream(psParam->hInit, psParam->hMain, psParam->hDeinit);
+
+	if(gsDBGPdumpState.pszFile)
+	{
+		OSFreeMem(gsDBGPdumpState.pszFile);
+		gsDBGPdumpState.pszFile = NULL;
+	}
+
+	if(gsDBGPdumpState.pszScript)
+	{
+		OSFreeMem(gsDBGPdumpState.pszScript);
+		gsDBGPdumpState.pszScript = NULL;
+	}
+
+	if(gsDBGPdumpState.pszMsg)
+	{
+		OSFreeMem(gsDBGPdumpState.pszMsg);
+		gsDBGPdumpState.pszMsg = NULL;
+	}
+
+	gpfnDbgDrv = NULL;
+}
+
+PVRSRV_ERROR PDumpOSCreateLock(void)
+{
+	mutex_init(&gsPDumpMutex);
+	return PVRSRV_OK;
+}
+
+void PDumpOSDestroyLock(void)
+{
+	/* no destruction work to be done, just assert
+	 * the lock is not held */
+	PVR_ASSERT(mutex_is_locked(&gsPDumpMutex) == 0);
+}
+
+void PDumpOSLock(void)
+{
+	mutex_lock(&gsPDumpMutex);
+}
+
+void PDumpOSUnlock(void)
+{
+	mutex_unlock(&gsPDumpMutex);
+}
+
+IMG_UINT32 PDumpOSGetCtrlState(IMG_HANDLE hDbgStream,
+		IMG_UINT32 ui32StateID)
+{
+	return (gpfnDbgDrv->pfnGetCtrlState((PDBG_STREAM)hDbgStream, ui32StateID));
+}
+
+void PDumpOSSetFrame(IMG_UINT32 ui32Frame)
+{
+	gpfnDbgDrv->pfnSetFrame(ui32Frame);
+	return;
+}
+
+IMG_BOOL PDumpOSAllowInitPhaseToComplete(IMG_BOOL bPDumpClient, IMG_BOOL bInitClient)
+{
+	return (bInitClient);
+}
+
+#if defined(PVR_TESTING_UTILS)
+void PDumpOSDumpState(void);
+
+void PDumpOSDumpState(void)
+{
+	PVR_LOG(("---- PDUMP LINUX: gpfnDbgDrv( %p )  gpfnDbgDrv.ui32Size( %d )",
+			gpfnDbgDrv, gpfnDbgDrv->ui32Size));
+
+	PVR_LOG(("---- PDUMP LINUX: gsDBGPdumpState( %p )",
+			&gsDBGPdumpState));
+
+	PVR_LOG(("---- PDUMP LINUX: gsDBGPdumpState.psStream[0]( %p )",
+			gsDBGPdumpState.psStream[0]));
+
+	(void) gpfnDbgDrv->pfnGetCtrlState(gsDBGPdumpState.psStream[0], 0xFE);
+
+	PVR_LOG(("---- PDUMP LINUX: gsDBGPdumpState.psStream[1]( %p )",
+			gsDBGPdumpState.psStream[1]));
+
+	(void) gpfnDbgDrv->pfnGetCtrlState(gsDBGPdumpState.psStream[1], 0xFE);
+
+	/* Now dump non-stream specific info */
+	(void) gpfnDbgDrv->pfnGetCtrlState(gsDBGPdumpState.psStream[1], 0xFF);
+}
+#endif
+
+#endif /* #if defined (PDUMP) */
+/*****************************************************************************
+ End of file (PDUMP.C)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/physmem_dmabuf.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/physmem_dmabuf.c
new file mode 100644
index 0000000..4e8e84c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/physmem_dmabuf.c
@@ -0,0 +1,1143 @@
+/*************************************************************************/ /*!
+@File           physmem_dmabuf.c
+@Title          dmabuf memory allocator
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks for dmabuf memory.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+
+#include "physmem_dmabuf.h"
+#include "pvrsrv.h"
+#include "pmr.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP)
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pmr_impl.h"
+#if defined(SUPPORT_LINUX_REFCNT_PMR_ON_IMPORT)
+#include "hash.h"
+#endif
+#include "private_data.h"
+#include "module_common.h"
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+#include "kernel_compatibility.h"
+
+/*
+ * dma_buf_ops
+ *
+ * These are all returning errors if used.
+ * The point is to prevent anyone outside of our driver from importing
+ * and using our dmabuf.
+ */
+
+static int PVRDmaBufOpsAttach(struct dma_buf *psDmaBuf,
+                           struct dma_buf_attachment *psAttachment)
+{
+	return -ENOSYS;
+}
+
+static struct sg_table *PVRDmaBufOpsMap(struct dma_buf_attachment *psAttachment,
+                                      enum dma_data_direction eDirection)
+{
+	/* Attach hasn't been called yet */
+	return ERR_PTR(-EINVAL);
+}
+
+static void PVRDmaBufOpsUnmap(struct dma_buf_attachment *psAttachment,
+                           struct sg_table *psTable,
+                           enum dma_data_direction eDirection)
+{
+}
+
+static void PVRDmaBufOpsRelease(struct dma_buf *psDmaBuf)
+{
+	PMR *psPMR = (PMR *) psDmaBuf->priv;
+
+	PMRUnrefPMR(psPMR);
+}
+
+static int PVRDmaBufOpsKMap(struct dma_buf *psDmaBuf, enum dma_data_direction dir)
+{
+	return -ENOSYS;
+}
+
+static int PVRDmaBufOpsMMap(struct dma_buf *psDmaBuf, struct vm_area_struct *psVMA)
+{
+	return -ENOSYS;
+}
+
+static const struct dma_buf_ops sPVRDmaBufOps =
+{
+	.attach        = PVRDmaBufOpsAttach,
+	.map_dma_buf   = PVRDmaBufOpsMap,
+	.unmap_dma_buf = PVRDmaBufOpsUnmap,
+	.release       = PVRDmaBufOpsRelease,
+	.mmap          = PVRDmaBufOpsMMap,
+};
+
+/* end of dma_buf_ops */
+
+
+typedef struct _PMR_DMA_BUF_DATA_
+{
+	/* Filled in at PMR create time */
+	PHYS_HEAP *psPhysHeap;
+	struct dma_buf_attachment *psAttachment;
+	PFN_DESTROY_DMABUF_PMR pfnDestroy;
+	IMG_BOOL bPoisonOnFree;
+
+	/* Modified by PMR lock/unlock */
+	struct sg_table *psSgTable;
+	IMG_DEV_PHYADDR *pasDevPhysAddr;
+	IMG_UINT32 ui32PhysPageCount;
+	IMG_UINT32 ui32VirtPageCount;
+} PMR_DMA_BUF_DATA;
+
+/* Start size of the g_psDmaBufHash hash table */
+#define DMA_BUF_HASH_SIZE 20
+
+static DEFINE_MUTEX(g_HashLock);
+
+static HASH_TABLE *g_psDmaBufHash = NULL;
+static IMG_UINT32 g_ui32HashRefCount = 0;
+
+#if defined(PVR_ANDROID_ION_USE_SG_LENGTH)
+#define pvr_sg_length(sg) ((sg)->length)
+#else
+#define pvr_sg_length(sg) sg_dma_len(sg)
+#endif
+
+static const IMG_CHAR _AllocPoison[] = "^PoIsOn";
+static const IMG_UINT32 _AllocPoisonSize = 7;
+static const IMG_CHAR _FreePoison[] = "<DEAD-BEEF>";
+static const IMG_UINT32 _FreePoisonSize = 11;
+
+static void _Poison(void *pvKernAddr,
+		    IMG_DEVMEM_SIZE_T uiBufferSize,
+		    const IMG_CHAR *pacPoisonData,
+		    size_t uiPoisonSize)
+{
+	IMG_DEVMEM_SIZE_T uiDestByteIndex;
+	IMG_CHAR *pcDest = pvKernAddr;
+	IMG_UINT32 uiSrcByteIndex = 0;
+
+	for (uiDestByteIndex = 0; uiDestByteIndex < uiBufferSize; uiDestByteIndex++)
+	{
+		pcDest[uiDestByteIndex] = pacPoisonData[uiSrcByteIndex];
+		uiSrcByteIndex++;
+		if (uiSrcByteIndex == uiPoisonSize)
+		{
+			uiSrcByteIndex = 0;
+		}
+	}
+}
+
+
+/*****************************************************************************
+ *                       PMR callback functions                              *
+ *****************************************************************************/
+
+static PVRSRV_ERROR PMRFinalizeDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+	struct dma_buf_attachment *psAttachment = psPrivData->psAttachment;
+	struct dma_buf *psDmaBuf = psAttachment->dmabuf;
+	struct sg_table *psSgTable = psPrivData->psSgTable;
+	PMR *psPMR;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	mutex_lock(&g_HashLock);
+
+	if (psDmaBuf->ops != &sPVRDmaBufOps)
+	{
+		if(g_psDmaBufHash)
+		{
+			/* We have a hash table so check if we've seen this dmabuf before */
+			psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf);
+
+			if(psPMR)
+			{
+				if (!PMRIsPMRLive(psPMR))
+				{
+					HASH_Remove(g_psDmaBufHash, (uintptr_t) psDmaBuf);
+					g_ui32HashRefCount--;
+
+					if (g_ui32HashRefCount == 0)
+					{
+						HASH_Delete(g_psDmaBufHash);
+						g_psDmaBufHash = NULL;
+					}
+				}
+				else{
+					eError = PVRSRV_ERROR_PMR_STILL_REFERENCED;
+				}
+			}
+		}
+	}else
+	{
+		psPMR = (PMR *) psDmaBuf->priv;
+		if (PMRIsPMRLive(psPMR))
+		{
+			eError = PVRSRV_ERROR_PMR_STILL_REFERENCED;
+		}
+
+	}
+
+	if(PVRSRV_OK != eError)
+	{
+		mutex_unlock(&g_HashLock);
+		return eError;
+	}
+
+	psPrivData->ui32PhysPageCount = 0;
+
+	dma_buf_unmap_attachment(psAttachment, psSgTable, DMA_BIDIRECTIONAL);
+
+
+	if (psPrivData->bPoisonOnFree)
+	{
+		void *pvKernAddr;
+		int i, err;
+
+		err = dma_buf_begin_cpu_access(psDmaBuf, DMA_FROM_DEVICE);
+		if (err)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to begin cpu access for free poisoning (err=%d)",
+					 __func__, err));
+			PVR_ASSERT(IMG_FALSE);
+			goto exit;
+		}
+
+		for (i = 0; i < psDmaBuf->size / PAGE_SIZE; i++)
+		{
+			pvKernAddr = dma_buf_kmap(psDmaBuf, i);
+			if (IS_ERR_OR_NULL(pvKernAddr))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Failed to poison allocation before free (err=%ld)",
+						 __func__, pvKernAddr ? PTR_ERR(pvKernAddr) : -ENOMEM));
+				PVR_ASSERT(IMG_FALSE);
+				goto exit_end_access;
+			}
+
+			_Poison(pvKernAddr, PAGE_SIZE, _FreePoison, _FreePoisonSize);
+
+			dma_buf_kunmap(psDmaBuf, i, pvKernAddr);
+		}
+
+exit_end_access:
+		do {
+			err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE);
+		} while (err == -EAGAIN || err == -EINTR);
+	}
+
+exit:
+	if (psPrivData->pfnDestroy)
+	{
+		eError = psPrivData->pfnDestroy(psPrivData->psPhysHeap, psPrivData->psAttachment);
+		if (eError != PVRSRV_OK)
+		{
+			mutex_unlock(&g_HashLock);
+			return eError;
+		}
+	}
+
+	mutex_unlock(&g_HashLock);
+	OSFreeMem(psPrivData->pasDevPhysAddr);
+	OSFreeMem(psPrivData);
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PMRLockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PVR_UNREFERENCED_PARAMETER(pvPriv);
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PMRUnlockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PVR_UNREFERENCED_PARAMETER(pvPriv);
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PMRDevPhysAddrDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+					 IMG_UINT32 ui32Log2PageSize,
+					 IMG_UINT32 ui32NumOfPages,
+					 IMG_DEVMEM_OFFSET_T *puiOffset,
+					 IMG_BOOL *pbValid,
+					 IMG_DEV_PHYADDR *psDevPAddr)
+{
+	PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+	IMG_UINT32 ui32PageIndex;
+	IMG_UINT32 idx;
+
+	if (ui32Log2PageSize != PAGE_SHIFT)
+	{
+		return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+	}
+
+	for (idx=0; idx < ui32NumOfPages; idx++)
+	{
+		if (pbValid[idx])
+		{
+			IMG_UINT32 ui32InPageOffset;
+
+			ui32PageIndex = puiOffset[idx] >> PAGE_SHIFT;
+			ui32InPageOffset = puiOffset[idx] - ((IMG_DEVMEM_OFFSET_T)ui32PageIndex << PAGE_SHIFT);
+
+
+			PVR_ASSERT(ui32PageIndex < psPrivData->ui32VirtPageCount);
+			PVR_ASSERT(ui32InPageOffset < PAGE_SIZE);
+			psDevPAddr[idx].uiAddr = psPrivData->pasDevPhysAddr[ui32PageIndex].uiAddr + ui32InPageOffset;
+		}
+	}
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+				  size_t uiOffset,
+				  size_t uiSize,
+				  void **ppvKernelAddressOut,
+				  IMG_HANDLE *phHandleOut,
+				  PMR_FLAGS_T ulFlags)
+{
+	PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+	struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
+	void *pvKernAddr;
+	PVRSRV_ERROR eError;
+	int err;
+
+	if (psPrivData->ui32PhysPageCount != psPrivData->ui32VirtPageCount)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Kernel mappings for sparse DMABufs "
+				"are not allowed!", __func__));
+		eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+		goto fail;
+	}
+
+	err = dma_buf_begin_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL);
+	if (err)
+	{
+		eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+		goto fail;
+	}
+
+	pvKernAddr = dma_buf_vmap(psDmaBuf);
+	if (IS_ERR_OR_NULL(pvKernAddr))
+	{
+		eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+		goto fail_kmap;
+	}
+
+	*ppvKernelAddressOut = pvKernAddr + uiOffset;
+	*phHandleOut = pvKernAddr;
+
+	return PVRSRV_OK;
+
+fail_kmap:
+	do {
+		err = dma_buf_end_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL);
+	} while (err == -EAGAIN || err == -EINTR);
+
+fail:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static void PMRReleaseKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+					      IMG_HANDLE hHandle)
+{
+	PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+	struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
+	void *pvKernAddr = hHandle;
+	int err;
+
+	dma_buf_vunmap(psDmaBuf, pvKernAddr);
+
+	do {
+		err = dma_buf_end_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL);
+	} while (err == -EAGAIN || err == -EINTR);
+}
+
+static PVRSRV_ERROR PMRMMapDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+                                  PMR *psPMR,
+                                  PMR_MMAP_DATA pOSMMapData)
+{
+	PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+	struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
+	struct vm_area_struct *psVma = pOSMMapData;
+	int err;
+
+	if (psPrivData->ui32PhysPageCount != psPrivData->ui32VirtPageCount)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Not possible to MMAP sparse DMABufs",
+				__func__));
+		return PVRSRV_ERROR_NOT_IMPLEMENTED;
+	}
+
+	err = dma_buf_mmap(psDmaBuf, psVma, 0);
+	if (err)
+	{
+		return (err == -EINVAL) ? PVRSRV_ERROR_NOT_SUPPORTED : PVRSRV_ERROR_BAD_MAPPING;
+	}
+
+	return PVRSRV_OK;
+}
+
+static PMR_IMPL_FUNCTAB _sPMRDmaBufFuncTab =
+{
+	.pfnLockPhysAddresses		= PMRLockPhysAddressesDmaBuf,
+	.pfnUnlockPhysAddresses		= PMRUnlockPhysAddressesDmaBuf,
+	.pfnDevPhysAddr			= PMRDevPhysAddrDmaBuf,
+	.pfnAcquireKernelMappingData	= PMRAcquireKernelMappingDataDmaBuf,
+	.pfnReleaseKernelMappingData	= PMRReleaseKernelMappingDataDmaBuf,
+	.pfnMMap			= PMRMMapDmaBuf,
+	.pfnFinalize			= PMRFinalizeDmaBuf,
+};
+
+/*****************************************************************************
+ *                       Public facing interface                             *
+ *****************************************************************************/
+
+PVRSRV_ERROR
+PhysmemCreateNewDmaBufBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+                                PHYS_HEAP *psHeap,
+                                struct dma_buf_attachment *psAttachment,
+                                PFN_DESTROY_DMABUF_PMR pfnDestroy,
+                                PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                IMG_DEVMEM_SIZE_T uiChunkSize,
+                                IMG_UINT32 ui32NumPhysChunks,
+                                IMG_UINT32 ui32NumVirtChunks,
+                                IMG_UINT32 *pui32MappingTable,
+                                PMR **ppsPMRPtr)
+{
+	struct dma_buf *psDmaBuf = psAttachment->dmabuf;
+	PMR_DMA_BUF_DATA *psPrivData;
+	PMR_FLAGS_T uiPMRFlags;
+	IMG_BOOL bZeroOnAlloc;
+	IMG_BOOL bPoisonOnAlloc;
+	IMG_BOOL bPoisonOnFree;
+	PVRSRV_ERROR eError;
+	IMG_UINT32 i, j;
+	IMG_UINT32 uiPagesPerChunk = uiChunkSize >> PAGE_SHIFT;
+	IMG_UINT32 ui32PageCount = 0;
+	struct scatterlist *sg;
+	struct sg_table *table;
+	IMG_UINT32 uiSglOffset;
+
+	bZeroOnAlloc = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags);
+	bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags);
+	bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags);
+
+	if (bZeroOnAlloc && bPoisonOnFree)
+	{
+		/* Zero on Alloc and Poison on Alloc are mutually exclusive */
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto errReturn;
+	}
+
+	psPrivData = OSAllocZMem(sizeof(*psPrivData));
+	if (psPrivData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto errReturn;
+	}
+
+	psPrivData->psPhysHeap = psHeap;
+	psPrivData->psAttachment = psAttachment;
+	psPrivData->pfnDestroy = pfnDestroy;
+	psPrivData->bPoisonOnFree = bPoisonOnFree;
+	psPrivData->ui32VirtPageCount =
+			(ui32NumVirtChunks * uiChunkSize) >> PAGE_SHIFT;
+
+	psPrivData->pasDevPhysAddr =
+			OSAllocZMem(sizeof(*(psPrivData->pasDevPhysAddr)) *
+			            psPrivData->ui32VirtPageCount);
+	if (!psPrivData->pasDevPhysAddr)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Failed to allocate buffer for physical addresses (oom)",
+				 __func__));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto errFreePrivData;
+	}
+
+	if (bZeroOnAlloc || bPoisonOnAlloc)
+	{
+		void *pvKernAddr;
+		int i, err;
+
+		err = dma_buf_begin_cpu_access(psDmaBuf, DMA_FROM_DEVICE);
+		if (err)
+		{
+			eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+			goto errFreePhysAddr;
+		}
+
+		for (i = 0; i < psDmaBuf->size / PAGE_SIZE; i++)
+		{
+			pvKernAddr = dma_buf_kmap(psDmaBuf, i);
+			if (IS_ERR_OR_NULL(pvKernAddr))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Failed to map page for %s (err=%ld)",
+						 __func__, bZeroOnAlloc ? "zeroing" : "poisoning",
+						 pvKernAddr ? PTR_ERR(pvKernAddr) : -ENOMEM));
+				eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+
+				do {
+					err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE);
+				} while (err == -EAGAIN || err == -EINTR);
+
+				goto errFreePhysAddr;
+			}
+
+			if (bZeroOnAlloc)
+			{
+				memset(pvKernAddr, 0, PAGE_SIZE);
+			}
+			else
+			{
+				_Poison(pvKernAddr, PAGE_SIZE, _AllocPoison, _AllocPoisonSize);
+			}
+
+			dma_buf_kunmap(psDmaBuf, i, pvKernAddr);
+		}
+
+		do {
+			err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE);
+		} while (err == -EAGAIN || err == -EINTR);
+	}
+
+	table = dma_buf_map_attachment(psAttachment, DMA_BIDIRECTIONAL);
+	if (IS_ERR_OR_NULL(table))
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto errFreePhysAddr;
+	}
+
+	/*
+	 * We do a two pass process: first work out how many pages there
+	 * are and second, fill in the data.
+	 */
+	for_each_sg(table->sgl, sg, table->nents, i)
+	{
+		ui32PageCount += PAGE_ALIGN(pvr_sg_length(sg)) / PAGE_SIZE;
+	}
+
+	if (WARN_ON(!ui32PageCount))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Number of phys. pages must not be zero",
+				 __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto errUnmap;
+	}
+
+	if  (WARN_ON(ui32PageCount != ui32NumPhysChunks * uiPagesPerChunk))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Requested physical chunks and actual "
+				"number of physical dma buf pages don't match",
+				 __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto errUnmap;
+	}
+
+	psPrivData->ui32PhysPageCount = ui32PageCount;
+	psPrivData->psSgTable = table;
+	ui32PageCount = 0;
+	sg = table->sgl;
+	uiSglOffset = 0;
+
+
+	/* Fill physical address array */
+	for (i = 0; i < ui32NumPhysChunks; i++)
+	{
+		for (j = 0; j < uiPagesPerChunk; j++)
+		{
+			IMG_UINT32 uiIdx = pui32MappingTable[i] * uiPagesPerChunk + j;
+
+			psPrivData->pasDevPhysAddr[uiIdx].uiAddr =
+					sg_dma_address(sg) + uiSglOffset;
+
+			/* Get the next offset for the current sgl or the next sgl */
+			uiSglOffset += PAGE_SIZE;
+			if (uiSglOffset >= pvr_sg_length(sg))
+			{
+				sg = sg_next(sg);
+				uiSglOffset = 0;
+
+				/* Check that we haven't looped */
+				if (WARN_ON(sg == table->sgl))
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s: Failed to fill phys. address "
+							"array ",
+							 __func__));
+					eError = PVRSRV_ERROR_INVALID_PARAMS;
+					goto errUnmap;
+				}
+			}
+		}
+	}
+
+	uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+
+	/*
+	 * Check no significant bits were lost in cast due to different
+	 * bit widths for flags
+	 */
+	PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+	eError = PMRCreatePMR(psDevNode,
+			      psHeap,
+			      ui32NumVirtChunks * uiChunkSize,
+			      uiChunkSize,
+			      ui32NumPhysChunks,
+			      ui32NumVirtChunks,
+			      pui32MappingTable,
+			      PAGE_SHIFT,
+			      uiPMRFlags,
+			      "IMPORTED_DMABUF",
+			      &_sPMRDmaBufFuncTab,
+			      psPrivData,
+			      PMR_TYPE_DMABUF,
+			      ppsPMRPtr,
+			      PDUMP_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create PMR (%s)",
+				 __func__, PVRSRVGetErrorStringKM(eError)));
+		goto errFreePhysAddr;
+	}
+
+	return PVRSRV_OK;
+
+errUnmap:
+	dma_buf_unmap_attachment(psAttachment, table, DMA_BIDIRECTIONAL);
+errFreePhysAddr:
+	OSFreeMem(psPrivData->pasDevPhysAddr);
+errFreePrivData:
+	OSFreeMem(psPrivData);
+errReturn:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static PVRSRV_ERROR PhysmemDestroyDmaBuf(PHYS_HEAP *psHeap,
+					 struct dma_buf_attachment *psAttachment)
+{
+	struct dma_buf *psDmaBuf = psAttachment->dmabuf;
+
+	PVR_UNREFERENCED_PARAMETER(psHeap);
+
+	dma_buf_detach(psDmaBuf, psAttachment);
+	dma_buf_put(psDmaBuf);
+
+	return PVRSRV_OK;
+}
+
+struct dma_buf *
+PhysmemGetDmaBuf(PMR *psPMR)
+{
+	PMR_DMA_BUF_DATA *psPrivData;
+
+	psPrivData = PMRGetPrivateData(psPMR, &_sPMRDmaBufFuncTab);
+	if (psPrivData)
+	{
+		return psPrivData->psAttachment->dmabuf;
+	}
+
+	return NULL;
+}
+
+PVRSRV_ERROR
+PhysmemExportDmaBuf(CONNECTION_DATA *psConnection,
+                    PVRSRV_DEVICE_NODE *psDevNode,
+                    PMR *psPMR,
+                    IMG_INT *piFd)
+{
+	struct dma_buf *psDmaBuf;
+	IMG_DEVMEM_SIZE_T uiPMRSize;
+	PVRSRV_ERROR eError;
+	IMG_INT iFd;
+
+	mutex_lock(&g_HashLock);
+
+	PMRRefPMR(psPMR);
+
+	eError = PMR_LogicalSize(psPMR, &uiPMRSize);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_pmr_ref;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+	{
+		DEFINE_DMA_BUF_EXPORT_INFO(sDmaBufExportInfo);
+
+		sDmaBufExportInfo.priv  = psPMR;
+		sDmaBufExportInfo.ops   = &sPVRDmaBufOps;
+		sDmaBufExportInfo.size  = uiPMRSize;
+		sDmaBufExportInfo.flags = O_RDWR;
+
+		psDmaBuf = dma_buf_export(&sDmaBufExportInfo);
+	}
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0))
+	psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps,
+	                          uiPMRSize, O_RDWR, NULL);
+#else
+	psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps,
+	                          uiPMRSize, O_RDWR);
+#endif
+
+	if (IS_ERR_OR_NULL(psDmaBuf))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to export buffer (err=%ld)",
+		         __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_pmr_ref;
+	}
+
+	iFd = dma_buf_fd(psDmaBuf, O_RDWR);
+	if (iFd < 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf fd (err=%d)",
+		         __func__, iFd));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_dma_buf;
+	}
+
+	mutex_unlock(&g_HashLock);
+	*piFd = iFd;
+	return PVRSRV_OK;
+
+fail_dma_buf:
+	dma_buf_put(psDmaBuf);
+
+fail_pmr_ref:
+	PMRUnrefPMR(psPMR);
+	mutex_unlock(&g_HashLock);
+
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection,
+                    PVRSRV_DEVICE_NODE *psDevNode,
+                    IMG_INT fd,
+                    PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                    PMR **ppsPMRPtr,
+                    IMG_DEVMEM_SIZE_T *puiSize,
+                    IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_UINT32 ui32MappingTable = 0;
+	struct dma_buf *psDmaBuf;
+	PVRSRV_ERROR eError;
+
+	/* Get the buffer handle */
+	psDmaBuf = dma_buf_get(fd);
+	if (IS_ERR_OR_NULL(psDmaBuf))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf from fd (err=%ld)",
+				 __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM));
+		return PVRSRV_ERROR_BAD_MAPPING;
+
+	}
+
+	uiSize = psDmaBuf->size;
+
+
+	eError = PhysmemImportSparseDmaBuf(psConnection,
+	                                 psDevNode,
+	                                 fd,
+	                                 uiFlags,
+	                                 uiSize,
+	                                 1,
+	                                 1,
+	                                 &ui32MappingTable,
+	                                 ppsPMRPtr,
+	                                 puiSize,
+	                                 puiAlign);
+
+	dma_buf_put(psDmaBuf);
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection,
+                          PVRSRV_DEVICE_NODE *psDevNode,
+                          IMG_INT fd,
+                          PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                          IMG_DEVMEM_SIZE_T uiChunkSize,
+                          IMG_UINT32 ui32NumPhysChunks,
+                          IMG_UINT32 ui32NumVirtChunks,
+                          IMG_UINT32 *pui32MappingTable,
+                          PMR **ppsPMRPtr,
+                          IMG_DEVMEM_SIZE_T *puiSize,
+                          IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+	PMR *psPMR = NULL;
+	struct dma_buf_attachment *psAttachment;
+	struct dma_buf *psDmaBuf;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bHashTableCreated = IMG_FALSE;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	if (!psDevNode)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto errReturn;
+	}
+
+	/* Get the buffer handle */
+	psDmaBuf = dma_buf_get(fd);
+	if (IS_ERR_OR_NULL(psDmaBuf))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf from fd (err=%ld)",
+				 __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM));
+		eError = PVRSRV_ERROR_BAD_MAPPING;
+		goto errReturn;
+	}
+
+	mutex_lock(&g_HashLock);
+	if (psDmaBuf->ops == &sPVRDmaBufOps)
+	{
+		PVRSRV_DEVICE_NODE *psPMRDevNode;
+
+
+		/* We exported this dma_buf, so we can just get its PMR */
+		psPMR = (PMR *) psDmaBuf->priv;
+
+		if (psPMR)
+		{
+			PMRSetPath(psPMR);
+		}
+
+		/* However, we can't import it if it belongs to a different device */
+		psPMRDevNode = PMR_DeviceNode(psPMR);
+		if (psPMRDevNode != psDevNode)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device\n",
+					 __func__));
+			eError = PVRSRV_ERROR_PMR_NOT_PERMITTED;
+			goto err;
+		}
+	}
+	else
+	{
+		if (g_psDmaBufHash)
+		{
+			/* We have a hash table so check if we've seen this dmabuf before */
+			psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf);
+		}
+		else
+		{
+			/*
+			 * As different processes may import the same dmabuf we need to
+			 * create a hash table so we don't generate a duplicate PMR but
+			 * rather just take a reference on an existing one.
+			 */
+			g_psDmaBufHash = HASH_Create(DMA_BUF_HASH_SIZE);
+			if (!g_psDmaBufHash)
+			{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto err;
+			}
+			bHashTableCreated = IMG_TRUE;
+		}
+	}
+
+	if (psPMR)
+	{
+		/* Reuse the PMR we already created */
+		PMRRefPMR(psPMR);
+
+		*ppsPMRPtr = psPMR;
+		PMR_LogicalSize(psPMR, puiSize);
+		*puiAlign = PAGE_SIZE;
+	}
+	/* No errors so far */
+	eError = PVRSRV_OK;
+
+err:
+	if(psPMR || (PVRSRV_OK != eError))
+	{
+		mutex_unlock(&g_HashLock);
+		dma_buf_put(psDmaBuf);
+
+		return eError;
+	}
+
+	/* Do we want this to be a sparse PMR? */
+	if (ui32NumVirtChunks > 1)
+	{
+		IMG_UINT32 i;
+
+		/* Parameter validation */
+		if (psDmaBuf->size != (uiChunkSize * ui32NumPhysChunks) ||
+		    uiChunkSize != PAGE_SIZE ||
+		    ui32NumPhysChunks > ui32NumVirtChunks)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Requesting sparse buffer: "
+					"uiChunkSize ("IMG_DEVMEM_SIZE_FMTSPEC") must be equal to "
+					"OS page size (%lu). uiChunkSize * ui32NumPhysChunks "
+					"("IMG_DEVMEM_SIZE_FMTSPEC") must"
+					" be equal to the buffer size ("IMG_SIZE_FMTSPEC"). "
+					"ui32NumPhysChunks (%u) must be lesser or equal to "
+					"ui32NumVirtChunks (%u)",
+					 __func__,
+					uiChunkSize,
+					PAGE_SIZE,
+					uiChunkSize * ui32NumPhysChunks,
+					psDmaBuf->size,
+					ui32NumPhysChunks,
+					ui32NumVirtChunks));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto errUnlockAndDMAPut;
+		}
+
+		/* Parameter validation - Mapping table entries*/
+		for (i = 0; i < ui32NumPhysChunks; i++)
+		{
+			if (pui32MappingTable[i] > ui32NumVirtChunks)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Requesting sparse buffer: "
+						"Entry in mapping table (%u) is out of allocation "
+						"bounds (%u)",
+						 __func__,
+						 (IMG_UINT32) pui32MappingTable[i],
+						 (IMG_UINT32) ui32NumVirtChunks));
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				goto errUnlockAndDMAPut;
+			}
+		}
+	}
+	else
+	{
+		/* Make sure parameters are valid for non-sparse allocations as well */
+		uiChunkSize = psDmaBuf->size;
+		ui32NumPhysChunks = 1;
+		ui32NumVirtChunks = 1;
+		pui32MappingTable[0] = 0;
+	}
+
+
+	psAttachment = dma_buf_attach(psDmaBuf, psDevNode->psDevConfig->pvOSDevice);
+	if (IS_ERR_OR_NULL(psAttachment))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to attach to dma-buf (err=%ld)",
+				 __func__, psAttachment? PTR_ERR(psAttachment) : -ENOMEM));
+		eError = PVRSRV_ERROR_BAD_MAPPING;
+		goto errUnlockAndDMAPut;
+	}
+
+	/*
+	 * Note:
+	 * While we have no way to determine the type of the buffer we just
+	 * assume that all dmabufs are from the same physical heap.
+	 */
+	eError = PhysmemCreateNewDmaBufBackedPMR(psDevNode,
+	                                         psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL],
+	                                         psAttachment,
+	                                         PhysmemDestroyDmaBuf,
+	                                         uiFlags,
+	                                         uiChunkSize,
+	                                         ui32NumPhysChunks,
+	                                         ui32NumVirtChunks,
+	                                         pui32MappingTable,
+	                                         &psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto errDMADetach;
+	}
+
+
+	/* First time we've seen this dmabuf so store it in the hash table */
+	HASH_Insert(g_psDmaBufHash, (uintptr_t) psDmaBuf, (uintptr_t) psPMR);
+	g_ui32HashRefCount++;
+
+	mutex_unlock(&g_HashLock);
+
+	*ppsPMRPtr = psPMR;
+	*puiSize = ui32NumVirtChunks * uiChunkSize;
+	*puiAlign = PAGE_SIZE;
+
+	return PVRSRV_OK;
+
+errDMADetach:
+	dma_buf_detach(psDmaBuf, psAttachment);
+
+errUnlockAndDMAPut:
+	if(IMG_TRUE == bHashTableCreated)
+	{
+		HASH_Delete(g_psDmaBufHash);
+		g_psDmaBufHash = NULL;
+	}
+	mutex_unlock(&g_HashLock);
+	dma_buf_put(psDmaBuf);
+
+errReturn:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) */
+
+PVRSRV_ERROR
+PhysmemCreateNewDmaBufBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+                                PHYS_HEAP *psHeap,
+                                struct dma_buf_attachment *psAttachment,
+                                PFN_DESTROY_DMABUF_PMR pfnDestroy,
+                                PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                IMG_DEVMEM_SIZE_T uiChunkSize,
+                                IMG_UINT32 ui32NumPhysChunks,
+                                IMG_UINT32 ui32NumVirtChunks,
+                                IMG_UINT32 *pui32MappingTable,
+                                PMR **ppsPMRPtr)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(psHeap);
+	PVR_UNREFERENCED_PARAMETER(psAttachment);
+	PVR_UNREFERENCED_PARAMETER(pfnDestroy);
+	PVR_UNREFERENCED_PARAMETER(uiFlags);
+	PVR_UNREFERENCED_PARAMETER(uiChunkSize);
+	PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+	PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks);
+	PVR_UNREFERENCED_PARAMETER(pui32MappingTable);
+	PVR_UNREFERENCED_PARAMETER(ppsPMRPtr);
+
+	return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+struct dma_buf *
+PhysmemGetDmaBuf(PMR *psPMR)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+
+	return NULL;
+}
+
+PVRSRV_ERROR
+PhysmemExportDmaBuf(CONNECTION_DATA *psConnection,
+                    PVRSRV_DEVICE_NODE *psDevNode,
+                    PMR *psPMR,
+                    IMG_INT *piFd)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(piFd);
+
+	return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+PVRSRV_ERROR
+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection,
+                    PVRSRV_DEVICE_NODE *psDevNode,
+                    IMG_INT fd,
+                    PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                    PMR **ppsPMRPtr,
+                    IMG_DEVMEM_SIZE_T *puiSize,
+                    IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(fd);
+	PVR_UNREFERENCED_PARAMETER(uiFlags);
+	PVR_UNREFERENCED_PARAMETER(ppsPMRPtr);
+	PVR_UNREFERENCED_PARAMETER(puiSize);
+	PVR_UNREFERENCED_PARAMETER(puiAlign);
+
+	return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+PVRSRV_ERROR
+PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection,
+                          PVRSRV_DEVICE_NODE *psDevNode,
+                          IMG_INT fd,
+                          PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                          IMG_DEVMEM_SIZE_T uiChunkSize,
+                          IMG_UINT32 ui32NumPhysChunks,
+                          IMG_UINT32 ui32NumVirtChunks,
+                          IMG_UINT32 *pui32MappingTable,
+                          PMR **ppsPMRPtr,
+                          IMG_DEVMEM_SIZE_T *puiSize,
+                          IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(fd);
+	PVR_UNREFERENCED_PARAMETER(uiFlags);
+	PVR_UNREFERENCED_PARAMETER(ppsPMRPtr);
+	PVR_UNREFERENCED_PARAMETER(puiSize);
+	PVR_UNREFERENCED_PARAMETER(puiAlign);
+	PVR_UNREFERENCED_PARAMETER(uiChunkSize);
+	PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+	PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks);
+	PVR_UNREFERENCED_PARAMETER(pui32MappingTable);
+
+	return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/physmem_osmem_linux.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/physmem_osmem_linux.c
new file mode 100644
index 0000000..4ab965a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/physmem_osmem_linux.c
@@ -0,0 +1,3755 @@
+/*************************************************************************/ /*!
+@File
+@Title          Implementation of PMR functions for OS managed memory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management.  This module is responsible for
+                implementing the function callbacks for physical memory borrowed
+                from that normally managed by the operating system.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/version.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/vmalloc.h>
+#include <linux/gfp.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <asm/io.h>
+#if defined(CONFIG_X86)
+#include <asm/cacheflush.h>
+#endif
+
+/* include/ */
+#include "rgx_heaps.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "rgx_pdump_panics.h"
+/* services/server/include/ */
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pdump_km.h"
+#include "pmr.h"
+#include "pmr_impl.h"
+#include "cache_km.h"
+#include "devicemem_server_utils.h"
+
+/* ourselves */
+#include "physmem_osmem.h"
+#include "physmem_osmem_linux.h"
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#include "kernel_compatibility.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+static IMG_UINT32 g_uiMaxOrder = PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM;
+#else
+/* split_page not available on older kernels */
+#undef PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM
+#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 0
+static IMG_UINT32 g_uiMaxOrder = 0;
+#endif
+
+/*
+   These corresponds to the MMU min/max page sizes and associated PTE
+   alignment that can be used on the device for an allocation. It is
+   4KB (min) and 2MB (max) respectively.
+*/
+#define PVR_MIN_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ	RGX_HEAP_4KB_PAGE_SHIFT
+#define PVR_MAX_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ	RGX_HEAP_2MB_PAGE_SHIFT
+
+/* Defines how many pages should be mapped at once to the kernel */
+#define PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES 1024 /* 4 MB */
+
+/*
+	These are used to get/set/mask lower-order bits in a dma_addr_t
+	to provide side-band information associated with that address.
+	These includes whether the address was obtained via alloc_page
+	or dma_alloc and if address came allocated pre-aligned or an
+	adjustment was made manually to aligned it.
+*/
+#define DMA_SET_ADJUSTED_ADDR(x)		((x) | ((dma_addr_t)0x02))
+#define DMA_IS_ADDR_ADJUSTED(x)			((x) & ((dma_addr_t)0x02))
+#define DMA_SET_ALLOCPG_ADDR(x)			((x) | ((dma_addr_t)0x01))
+#define DMA_IS_ALLOCPG_ADDR(x)			((x) & ((dma_addr_t)0x01))
+#define DMA_GET_ALIGN_ADJUSTMENT(x)		((x>>2) & ((dma_addr_t)0x3ff))
+#define DMA_SET_ALIGN_ADJUSTMENT(x,y)	((x) | (((dma_addr_t)y)<<0x02))
+#define DMA_GET_ADDR(x)					(((dma_addr_t)x) & ((dma_addr_t)~0xfff))
+#define DMA_VADDR_NOT_IN_USE			0xCAFEF00DDEADBEEF
+
+#define INVALID_PAGE_ADDR 0ULL
+
+typedef struct _PMR_OSPAGEARRAY_DATA_ {
+	/* Device for which this allocation has been made */
+	PVRSRV_DEVICE_NODE *psDevNode;
+
+	/*
+	 * iNumOSPagesAllocated:
+	 * Number of pages allocated in this PMR so far.
+	 * This allows for up to (2^31 - 1) pages. With 4KB pages, that's 8TB of memory for each PMR.
+	 */
+	IMG_INT32 iNumOSPagesAllocated;
+
+	/*
+	 * uiTotalNumOSPages:
+	 * Total number of pages supported by this PMR. (Fixed as of now due the fixed Page table array size)
+	 *  number of "pages" (a.k.a. macro pages, compound pages, higher order pages, etc...)
+	 */
+	IMG_UINT32 uiTotalNumOSPages;
+
+	/*
+	  uiLog2AllocPageSize;
+
+	  size of each "page" -- this would normally be the same as
+	  PAGE_SHIFT, but we support the idea that we may allocate pages
+	  in larger chunks for better contiguity, using order>0 in the
+	  call to alloc_pages()
+	*/
+	IMG_UINT32 uiLog2AllocPageSize;
+
+	/*
+	  For non DMA/CMA allocation, pagearray references the pages
+	  thus allocated; one entry per compound page when compound
+	  pages are used. In addition, for DMA/CMA allocations, we
+	  track the returned cpu virtual and device bus address.
+	*/
+	struct page **pagearray;
+	dma_addr_t *dmaphysarray;
+	void **dmavirtarray;
+
+	/*
+	  Record at alloc time whether poisoning will be required when the
+	  PMR is freed.
+	*/
+	IMG_BOOL bZero;
+	IMG_BOOL bPoisonOnFree;
+	IMG_BOOL bPoisonOnAlloc;
+	IMG_BOOL bOnDemand;
+	IMG_BOOL bUnpinned; /* Should be protected by page pool lock */
+	IMG_BOOL bIsCMA; /* Is CMA memory allocated via DMA framework */
+
+	/*
+	  The cache mode of the PMR. Additionally carrying the CPU-Cache-Clean
+	  flag, advising us to do cache maintenance on behalf of the caller.
+	  Boolean used to track if we need to revert the cache attributes
+	  of the pages used in this allocation. Depends on OS/architecture.
+	*/
+	IMG_UINT32 ui32CPUCacheFlags;
+	IMG_BOOL bUnsetMemoryType;
+} PMR_OSPAGEARRAY_DATA;
+
+/***********************************
+ * Page pooling for uncached pages *
+ ***********************************/
+
+static INLINE void
+_FreeOSPage_CMA(struct device *dev,
+				size_t alloc_size,
+				IMG_UINT32 uiOrder,
+				void *virt_addr,
+				dma_addr_t dev_addr,
+				struct page *psPage);
+
+static void
+_FreeOSPage(IMG_UINT32 uiOrder,
+			IMG_BOOL bUnsetMemoryType,
+			struct page *psPage);
+
+static PVRSRV_ERROR
+_FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+			IMG_UINT32 *pai32FreeIndices,
+			IMG_UINT32 ui32FreePageCount);
+
+static PVRSRV_ERROR
+_FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree,
+						   IMG_UINT32 *puiPagesFreed);
+
+static inline void
+_ApplyCacheMaintenance(PVRSRV_DEVICE_NODE *psDevNode,
+					struct page **ppsPage,
+					IMG_UINT32 uiNumPages,
+					IMG_BOOL bFlush);
+
+static inline PVRSRV_ERROR
+_ApplyOSPagesAttribute(PVRSRV_DEVICE_NODE *psDevNode,
+					struct page **ppsPage,
+					IMG_UINT32 uiNumPages,
+					IMG_BOOL bFlush,
+					IMG_UINT32 ui32CPUCacheFlags);
+
+static inline unsigned int
+_GetGFPFlags(IMG_BOOL bZero,
+             PVRSRV_DEVICE_NODE *psDevNode);
+
+/* A struct for our page pool holding an array of zeroed (!) pages.
+ * We always put units of page arrays to the pool but are
+ * able to take individual pages */
+typedef	struct
+{
+	/* Linkage for page pool LRU list */
+	struct list_head sPagePoolItem;
+
+	/* How many items are still in the page array */
+	IMG_UINT32 uiItemsRemaining;
+	/* Array of the actual pages */
+	struct page **ppsPageArray;
+
+} LinuxPagePoolEntry;
+
+/* CleanupThread structure to put allocation in page pool */
+typedef struct
+{
+	PVRSRV_CLEANUP_THREAD_WORK sCleanupWork;
+	IMG_UINT32 ui32CPUCacheMode;
+	LinuxPagePoolEntry *psPoolEntry;
+} LinuxCleanupData;
+
+/* A struct for the unpinned items */
+typedef struct
+{
+	struct list_head sUnpinPoolItem;
+	PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr;
+} LinuxUnpinEntry;
+
+
+/* Caches to hold page pool and page array structures */
+static struct kmem_cache *g_psLinuxPagePoolCache = NULL;
+static struct kmem_cache *g_psLinuxPageArray = NULL;
+
+/* Track what is live, all protected by pool lock.
+ * x86 needs two page pools because we have to change the memory attributes
+ * of the pages which is expensive due to an implicit flush.
+ * See set_pages_array_uc/wc/wb.  */
+static IMG_UINT32 g_ui32UnpinPageCount = 0;
+static IMG_UINT32 g_ui32PagePoolUCCount = 0;
+#if defined(CONFIG_X86)
+static IMG_UINT32 g_ui32PagePoolWCCount = 0;
+#endif
+/* Tracks asynchronous tasks currently accessing the page pool.
+ * It is incremented if a defer free task
+ * is created. Both will decrement the value when they finished the work.
+ * The atomic prevents piling up of deferred work in case the deferred thread
+ * cannot keep up with the application.*/
+static ATOMIC_T g_iPoolCleanTasks;
+/* We don't want too many asynchronous threads trying to access the page pool
+ * at the same time */
+#define PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS 128
+
+/* Defines how many pages the page cache should hold. */
+#if defined(PVR_LINUX_PHYSMEM_MAX_POOL_PAGES)
+static const IMG_UINT32 g_ui32PagePoolMaxEntries = PVR_LINUX_PHYSMEM_MAX_POOL_PAGES;
+#else
+static const IMG_UINT32 g_ui32PagePoolMaxEntries = 0;
+#endif
+
+/* 	We double check if we would exceed this limit if we are below MAX_POOL_PAGES
+	and want to add an allocation to the pool.
+	This prevents big allocations being given back to the OS just because they
+	exceed the MAX_POOL_PAGES limit even though the pool is currently empty. */
+#if defined(PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES)
+static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries = PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES;
+#else
+static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries = 0;
+#endif
+
+#if defined(CONFIG_X86)
+#define PHYSMEM_OSMEM_NUM_OF_POOLS 2
+static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = {
+	PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+	PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE
+};
+#else
+#define PHYSMEM_OSMEM_NUM_OF_POOLS 1
+static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = {
+	PVRSRV_MEMALLOCFLAG_CPU_UNCACHED
+};
+#endif
+
+/* Global structures we use to manage the page pool */
+static DEFINE_MUTEX(g_sPagePoolMutex);
+
+/* List holding the page array pointers: */
+static LIST_HEAD(g_sPagePoolList_WC);
+static LIST_HEAD(g_sPagePoolList_UC);
+static LIST_HEAD(g_sUnpinList);
+
+static inline IMG_UINT32
+_PagesInPoolUnlocked(void)
+{
+	IMG_UINT32 uiCnt = g_ui32PagePoolUCCount;
+#if defined(CONFIG_X86)
+	uiCnt += g_ui32PagePoolWCCount;
+#endif
+	return uiCnt;
+}
+
+static inline void
+_PagePoolLock(void)
+{
+	mutex_lock(&g_sPagePoolMutex);
+}
+
+static inline int
+_PagePoolTrylock(void)
+{
+	return mutex_trylock(&g_sPagePoolMutex);
+}
+
+static inline void
+_PagePoolUnlock(void)
+{
+	mutex_unlock(&g_sPagePoolMutex);
+}
+
+static PVRSRV_ERROR
+_AddUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData)
+{
+	LinuxUnpinEntry *psUnpinEntry;
+
+	psUnpinEntry = OSAllocMem(sizeof(*psUnpinEntry));
+	if (!psUnpinEntry)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: OSAllocMem failed. Cannot add entry to unpin list.",
+				__func__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psUnpinEntry->psPageArrayDataPtr = psOSPageArrayData;
+
+	/* Add into pool that the shrinker can access easily*/
+	list_add_tail(&psUnpinEntry->sUnpinPoolItem, &g_sUnpinList);
+
+	g_ui32UnpinPageCount += psOSPageArrayData->iNumOSPagesAllocated;
+
+	return PVRSRV_OK;
+}
+
+static void
+_RemoveUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData)
+{
+	LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry;
+
+	/* Remove from pool */
+	list_for_each_entry_safe(psUnpinEntry,
+	                         psTempUnpinEntry,
+	                         &g_sUnpinList,
+	                         sUnpinPoolItem)
+	{
+		if (psUnpinEntry->psPageArrayDataPtr == psOSPageArrayData)
+		{
+			list_del(&psUnpinEntry->sUnpinPoolItem);
+			break;
+		}
+	}
+
+	OSFreeMem(psUnpinEntry);
+
+	g_ui32UnpinPageCount -= psOSPageArrayData->iNumOSPagesAllocated;
+}
+
+static inline IMG_BOOL
+_GetPoolListHead(IMG_UINT32 ui32CPUCacheFlags,
+				 struct list_head **ppsPoolHead,
+				 IMG_UINT32 **ppuiCounter)
+{
+	switch(PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags))
+	{
+		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+#if defined(CONFIG_X86)
+		/*
+			For x86 we need to keep different lists for uncached
+			and write-combined as we must always honour the PAT
+			setting which cares about this difference.
+		*/
+
+			*ppsPoolHead = &g_sPagePoolList_WC;
+			*ppuiCounter = &g_ui32PagePoolWCCount;
+			break;
+#endif
+
+		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+			*ppsPoolHead = &g_sPagePoolList_UC;
+			*ppuiCounter = &g_ui32PagePoolUCCount;
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Unknown CPU caching mode. "
+					 "Using default UC pool.",
+					 __func__));
+			*ppsPoolHead = &g_sPagePoolList_UC;
+			*ppuiCounter = &g_ui32PagePoolUCCount;
+			PVR_ASSERT(0);
+			return IMG_FALSE;
+	}
+	return IMG_TRUE;
+}
+
+static struct shrinker g_sShrinker;
+
+/* Returning the number of pages that still reside in the page pool. */
+static unsigned long
+_GetNumberOfPagesInPoolUnlocked(void)
+{
+	return _PagesInPoolUnlocked() + g_ui32UnpinPageCount;
+}
+
+/* Linux shrinker function that informs the OS about how many pages we are caching and
+ * it is able to reclaim. */
+static unsigned long
+_CountObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
+{
+	int remain;
+
+	PVR_ASSERT(psShrinker == &g_sShrinker);
+	(void)psShrinker;
+	(void)psShrinkControl;
+
+	/* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */
+	if (_PagePoolTrylock() == 0)
+		return 0;
+	remain = _GetNumberOfPagesInPoolUnlocked();
+	_PagePoolUnlock();
+
+	return remain;
+}
+
+/* Linux shrinker function to reclaim the pages from our page pool */
+static unsigned long
+_ScanObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
+{
+	unsigned long uNumToScan = psShrinkControl->nr_to_scan;
+	unsigned long uSurplus = 0;
+	LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry;
+	IMG_UINT32 uiPagesFreed;
+
+	PVR_ASSERT(psShrinker == &g_sShrinker);
+	(void)psShrinker;
+
+	/* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */
+	if (_PagePoolTrylock() == 0)
+		return SHRINK_STOP;
+
+	_FreePagesFromPoolUnlocked(uNumToScan,
+							   &uiPagesFreed);
+	uNumToScan -= uiPagesFreed;
+
+	if (uNumToScan == 0)
+	{
+		goto e_exit;
+	}
+
+	/* Free unpinned memory, starting with LRU entries */
+	list_for_each_entry_safe(psUnpinEntry,
+							 psTempUnpinEntry,
+							 &g_sUnpinList,
+							 sUnpinPoolItem)
+	{
+		PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr = psUnpinEntry->psPageArrayDataPtr;
+		IMG_UINT32 uiNumPages = (psPageArrayDataPtr->uiTotalNumOSPages > psPageArrayDataPtr->iNumOSPagesAllocated)?
+								psPageArrayDataPtr->iNumOSPagesAllocated:psPageArrayDataPtr->uiTotalNumOSPages;
+		PVRSRV_ERROR eError;
+
+		/* Free associated pages */
+		eError = _FreeOSPages(psPageArrayDataPtr,
+							  NULL,
+							  0);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Shrinker is unable to free unpinned pages. Error: %s (%d)",
+					 __FUNCTION__,
+					 PVRSRVGetErrorStringKM(eError),
+					 eError));
+			goto e_exit;
+		}
+
+		/* Remove item from pool */
+		list_del(&psUnpinEntry->sUnpinPoolItem);
+
+		g_ui32UnpinPageCount -= uiNumPages;
+
+		/* Check if there is more to free or if we already surpassed the limit */
+		if (uiNumPages < uNumToScan)
+		{
+			uNumToScan -= uiNumPages;
+
+		}
+		else if (uiNumPages > uNumToScan)
+		{
+			uSurplus += uiNumPages - uNumToScan;
+			uNumToScan = 0;
+			goto e_exit;
+		}
+		else
+		{
+			uNumToScan -= uiNumPages;
+			goto e_exit;
+		}
+	}
+
+e_exit:
+	if (list_empty(&g_sUnpinList))
+	{
+		PVR_ASSERT(g_ui32UnpinPageCount == 0);
+	}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0))
+	{
+		int remain;
+		remain = _GetNumberOfPagesInPoolUnlocked();
+		_PagePoolUnlock();
+		return remain;
+	}
+#else
+	/* Returning the number of pages freed during the scan */
+	_PagePoolUnlock();
+	return psShrinkControl->nr_to_scan - uNumToScan + uSurplus;
+#endif
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0))
+static int
+_ShrinkPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
+{
+	if (psShrinkControl->nr_to_scan != 0)
+	{
+		return _ScanObjectsInPagePool(psShrinker, psShrinkControl);
+	}
+	else
+	{
+		/* No pages are being reclaimed so just return the page count */
+		return _CountObjectsInPagePool(psShrinker, psShrinkControl);
+	}
+}
+
+static struct shrinker g_sShrinker =
+{
+	.shrink = _ShrinkPagePool,
+	.seeks = DEFAULT_SEEKS
+};
+#else
+static struct shrinker g_sShrinker =
+{
+	.count_objects = _CountObjectsInPagePool,
+	.scan_objects = _ScanObjectsInPagePool,
+	.seeks = DEFAULT_SEEKS
+};
+#endif
+
+/* Register the shrinker so Linux can reclaim cached pages */
+void LinuxInitPhysmem(void)
+{
+	g_psLinuxPageArray = kmem_cache_create("pvr-pa", sizeof(PMR_OSPAGEARRAY_DATA), 0, 0, NULL);
+
+	_PagePoolLock();
+	g_psLinuxPagePoolCache = kmem_cache_create("pvr-pp", sizeof(LinuxPagePoolEntry), 0, 0, NULL);
+	if (g_psLinuxPagePoolCache)
+	{
+		/* Only create the shrinker if we created the cache OK */
+		register_shrinker(&g_sShrinker);
+	}
+	_PagePoolUnlock();
+
+	OSAtomicWrite(&g_iPoolCleanTasks, 0);
+}
+
+/* Unregister the shrinker and remove all pages from the pool that are still left */
+void LinuxDeinitPhysmem(void)
+{
+	IMG_UINT32 uiPagesFreed;
+
+	if (OSAtomicRead(&g_iPoolCleanTasks) > 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "Still deferred cleanup tasks running "
+				"while deinitialising memory subsystem."));
+	}
+
+	_PagePoolLock();
+	if (_FreePagesFromPoolUnlocked(IMG_UINT32_MAX, &uiPagesFreed) != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Unable to free all pages from page pool when "
+				"deinitialising memory subsystem."));
+		PVR_ASSERT(0);
+	}
+
+	PVR_ASSERT(_PagesInPoolUnlocked() == 0);
+
+	/* Free the page cache */
+	kmem_cache_destroy(g_psLinuxPagePoolCache);
+
+	unregister_shrinker(&g_sShrinker);
+	_PagePoolUnlock();
+
+	kmem_cache_destroy(g_psLinuxPageArray);
+}
+
+static void EnableOOMKiller(void)
+{
+	current->flags &= ~PF_DUMPCORE;
+}
+
+static void DisableOOMKiller(void)
+{
+	/* PF_DUMPCORE is treated by the VM as if the OOM killer was disabled.
+	 *
+	 * As oom_killer_disable() is an inline, non-exported function, we
+	 * can't use it from a modular driver. Furthermore, the OOM killer
+	 * API doesn't look thread safe, which `current' is.
+	 */
+	WARN_ON(current->flags & PF_DUMPCORE);
+	current->flags |= PF_DUMPCORE;
+}
+
+/* Prints out the addresses in a page array for debugging purposes
+ * Define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY locally to activate: */
+/* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY 1 */
+static inline void
+_DumpPageArray(struct page **pagearray, IMG_UINT32 uiPagesToPrint)
+{
+#if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY)
+	IMG_UINT32 i;
+	if (pagearray)
+	{
+		printk("Array %p:\n", pagearray);
+		for (i = 0; i < uiPagesToPrint; i++)
+		{
+			printk("%p | ", (pagearray)[i]);
+		}
+		printk("\n");
+	}
+	else
+	{
+		printk("Array is NULL:\n");
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(pagearray);
+	PVR_UNREFERENCED_PARAMETER(uiPagesToPrint);
+#endif
+}
+
+/* Debugging function that dumps out the number of pages for every
+ * page array that is currently in the page pool.
+ * Not defined by default. Define locally to activate feature: */
+/* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL 1 */
+static void
+_DumpPoolStructure(void)
+{
+#if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL)
+	LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
+	struct list_head *psPoolHead = NULL;
+	IMG_UINT32  j;
+	IMG_UINT32 *puiCounter;
+
+	printk("\n");
+	/* Empty all pools */
+	for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++)
+	{
+
+		printk("pool = %u \n", j);
+
+		/* Get the correct list for this caching mode */
+		if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead, &puiCounter))
+		{
+			break;
+		}
+
+		list_for_each_entry_safe(psPagePoolEntry,
+								 psTempPoolEntry,
+								 psPoolHead,
+								 sPagePoolItem)
+		{
+			printk("%u | ", psPagePoolEntry->uiItemsRemaining);
+		}
+		printk("\n");
+	}
+#endif
+}
+
+/* Free a certain number of pages from the page pool.
+ * Mainly used in error paths or at deinitialisation to
+ * empty the whole pool. */
+static PVRSRV_ERROR
+_FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree,
+						   IMG_UINT32 *puiPagesFreed)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
+	struct list_head *psPoolHead = NULL;
+	IMG_UINT32 i, j;
+	IMG_UINT32 *puiCounter;
+
+	*puiPagesFreed = uiMaxPagesToFree;
+
+	/* Empty all pools */
+	for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++)
+	{
+
+		/* Get the correct list for this caching mode */
+		if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead, &puiCounter))
+		{
+			break;
+		}
+
+		/* Free the pages and remove page arrays from the pool if they are exhausted */
+		list_for_each_entry_safe(psPagePoolEntry,
+								 psTempPoolEntry,
+								 psPoolHead,
+								 sPagePoolItem)
+		{
+			IMG_UINT32 uiItemsToFree;
+			struct page **ppsPageArray;
+
+			/* Check if we are going to free the whole page array or just parts */
+			if (psPagePoolEntry->uiItemsRemaining <= uiMaxPagesToFree)
+			{
+				uiItemsToFree = psPagePoolEntry->uiItemsRemaining;
+				ppsPageArray = psPagePoolEntry->ppsPageArray;
+			}
+			else
+			{
+				uiItemsToFree = uiMaxPagesToFree;
+				ppsPageArray = &(psPagePoolEntry->ppsPageArray[psPagePoolEntry->uiItemsRemaining - uiItemsToFree]);
+			}
+
+#if defined(CONFIG_X86)
+			/* Set the correct page caching attributes on x86 */
+			if (!PVRSRV_CHECK_CPU_CACHED(g_aui32CPUCacheFlags[j]))
+			{
+				int ret;
+				ret = set_pages_array_wb(ppsPageArray, uiItemsToFree);
+				if (ret)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __FUNCTION__));
+					eError = PVRSRV_ERROR_FAILED_TO_FREE_PAGES;
+					goto e_exit;
+				}
+			}
+#endif
+
+			/* Free the actual pages */
+			for (i = 0; i < uiItemsToFree; i++)
+			{
+				__free_pages(ppsPageArray[i], 0);
+				ppsPageArray[i] = NULL;
+			}
+
+			/* Reduce counters */
+			uiMaxPagesToFree -= uiItemsToFree;
+			*puiCounter -= uiItemsToFree;
+			psPagePoolEntry->uiItemsRemaining -= uiItemsToFree;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	/* MemStats usually relies on having the bridge lock held, however
+	 * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+	 * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+	 * the page pool lock is used to ensure these calls are mutually
+	 * exclusive
+	 */
+	PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * uiItemsToFree);
+#endif
+
+			/* Is this pool entry exhausted, delete it */
+			if (psPagePoolEntry->uiItemsRemaining == 0)
+			{
+				OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
+				list_del(&psPagePoolEntry->sPagePoolItem);
+				kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
+			}
+
+			/* Return if we have all our pages */
+			if (uiMaxPagesToFree == 0)
+			{
+				goto e_exit;
+			}
+		}
+	}
+
+e_exit:
+	*puiPagesFreed -= uiMaxPagesToFree;
+	_DumpPoolStructure();
+	return eError;
+}
+
+/* Get a certain number of pages from the page pool and
+ * copy them directly into a given page array. */
+static void
+_GetPagesFromPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags,
+						  IMG_UINT32 uiMaxNumPages,
+						  struct page **ppsPageArray,
+						  IMG_UINT32 *puiNumReceivedPages)
+{
+	LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
+	struct list_head *psPoolHead = NULL;
+	IMG_UINT32 i;
+	IMG_UINT32 *puiCounter;
+
+	*puiNumReceivedPages = 0;
+
+	/* Get the correct list for this caching mode */
+	if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter))
+	{
+		return;
+	}
+
+	/* Check if there are actually items in the list */
+	if (list_empty(psPoolHead))
+	{
+		return;
+	}
+
+	PVR_ASSERT(*puiCounter > 0);
+
+	/* Receive pages from the pool */
+	list_for_each_entry_safe(psPagePoolEntry,
+							 psTempPoolEntry,
+							 psPoolHead,
+							 sPagePoolItem)
+	{
+		/* Get the pages from this pool entry */
+		for (i = psPagePoolEntry->uiItemsRemaining; i != 0 && *puiNumReceivedPages < uiMaxNumPages; i--)
+		{
+			ppsPageArray[*puiNumReceivedPages] = psPagePoolEntry->ppsPageArray[i-1];
+			(*puiNumReceivedPages)++;
+			psPagePoolEntry->uiItemsRemaining--;
+		}
+
+		/* Is this pool entry exhausted, delete it */
+		if (psPagePoolEntry->uiItemsRemaining == 0)
+		{
+			OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
+			list_del(&psPagePoolEntry->sPagePoolItem);
+			kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
+		}
+
+		/* Return if we have all our pages */
+		if (*puiNumReceivedPages == uiMaxNumPages)
+		{
+			goto exit_ok;
+		}
+	}
+
+exit_ok:
+
+	/* Update counters */
+	*puiCounter -= *puiNumReceivedPages;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	/* MemStats usually relies on having the bridge lock held, however
+	 * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+	 * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+	 * the page pool lock is used to ensure these calls are mutually
+	 * exclusive
+	 */
+	PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * (*puiNumReceivedPages));
+#endif
+
+	_DumpPoolStructure();
+	return;
+}
+
+/* Same as _GetPagesFromPoolUnlocked but handles locking and
+ * checks first whether pages from the pool are a valid option. */
+static inline void
+_GetPagesFromPoolLocked(PVRSRV_DEVICE_NODE *psDevNode,
+						IMG_UINT32 ui32CPUCacheFlags,
+						IMG_UINT32 uiPagesToAlloc,
+						IMG_UINT32 uiOrder,
+						IMG_BOOL bZero,
+						struct page **ppsPageArray,
+						IMG_UINT32 *puiPagesFromPool)
+{
+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES)
+	PVR_UNREFERENCED_PARAMETER(bZero);
+#else
+	/* Don't get pages from pool if it doesn't provide zeroed pages */
+	if (bZero)
+	{
+		return;
+	}
+#endif
+
+	/* The page pool stores only order 0 pages. If we need zeroed memory we
+	 * directly allocate from the OS because it is faster than
+	 * doing it within the driver. */
+	if (uiOrder == 0 &&
+	    !PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags))
+	{
+
+		_PagePoolLock();
+		_GetPagesFromPoolUnlocked(ui32CPUCacheFlags,
+								  uiPagesToAlloc,
+								  ppsPageArray,
+								  puiPagesFromPool);
+		_PagePoolUnlock();
+	}
+
+	return;
+}
+
+/* Takes a page array and maps it into the kernel to write zeros */
+static PVRSRV_ERROR
+_ZeroPageArray(IMG_UINT32 uiNumToClean,
+               struct page **ppsCleanArray,
+               pgprot_t pgprot)
+{
+	IMG_CPU_VIRTADDR pvAddr;
+	IMG_UINT32 uiMaxPagesToMap = PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES;
+
+	/* Map and fill the pages with zeros.
+	 * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE
+	 * at a time. */
+	while (uiNumToClean != 0)
+	{
+		IMG_UINT32 uiToClean = (uiNumToClean >= uiMaxPagesToMap) ?
+		                        uiMaxPagesToMap :
+		                        uiNumToClean;
+
+		pvAddr = vm_map_ram(ppsCleanArray, uiToClean, -1, pgprot);
+		if (!pvAddr)
+		{
+			if (uiMaxPagesToMap <= 1)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Out of vmalloc memory, "
+						"unable to map pages for zeroing.",
+						__func__));
+				return PVRSRV_ERROR_OUT_OF_MEMORY;
+			}
+			else
+			{
+				/* Halve the pages to map at once and try again. */
+				uiMaxPagesToMap = uiMaxPagesToMap >> 1;
+				continue;
+			}
+		}
+
+		OSDeviceMemSet(pvAddr, 0, PAGE_SIZE * uiToClean);
+		vm_unmap_ram(pvAddr, uiToClean);
+
+		ppsCleanArray = &(ppsCleanArray[uiToClean]);
+		uiNumToClean -= uiToClean;
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_CleanupThread_CleanPages(void *pvData)
+{
+	PVRSRV_ERROR eError;
+	LinuxCleanupData *psCleanupData = (LinuxCleanupData*) pvData;
+	LinuxPagePoolEntry *psPagePoolEntry = psCleanupData->psPoolEntry;
+	struct list_head *psPoolHead = NULL;
+	pgprot_t pgprot;
+	IMG_UINT32 *puiCounter = NULL;
+
+
+	/* Get the correct pool for this caching mode. */
+	_GetPoolListHead(psCleanupData->ui32CPUCacheMode , &psPoolHead, &puiCounter);
+
+	switch(PVRSRV_CPU_CACHE_MODE(psCleanupData->ui32CPUCacheMode))
+	{
+		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+#if defined(CONFIG_X86)
+			/* For x86 we can only map with the same attributes
+			 * as in the PAT settings*/
+			pgprot = pgprot_noncached(PAGE_KERNEL);
+			break;
+#endif
+
+		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+			pgprot = pgprot_writecombine(PAGE_KERNEL);
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Unknown caching mode to set page protection flags.",
+					__func__));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto eExit;
+	}
+
+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES)
+	/* Map and fill the pages with zeros.
+	 * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE
+	 * at a time. */
+	eError = _ZeroPageArray(psPagePoolEntry->uiItemsRemaining,
+	                        psPagePoolEntry->ppsPageArray,
+	                        pgprot);
+	if (eError != PVRSRV_OK)
+	{
+		goto eExit;
+	}
+#endif
+
+
+	/* Lock down pool and add item */
+	_PagePoolLock();
+
+	/* Pool counters were already updated so don't do it here again*/
+
+	list_add_tail(&psPagePoolEntry->sPagePoolItem, psPoolHead);
+
+	_DumpPoolStructure();
+	_PagePoolUnlock();
+
+	OSFreeMem(pvData);
+	OSAtomicDecrement(&g_iPoolCleanTasks);
+
+	return PVRSRV_OK;
+
+eExit:
+	/* If this was the last retry, give up and free pages to OS */
+	if ((psCleanupData->sCleanupWork.ui32RetryCount - 1) == 0)
+	{
+		IMG_UINT32 i;
+
+		PVR_DPF((PVR_DBG_ERROR,
+						"%s: Deferred task error, freeing pages to OS.",
+						__func__));
+		_PagePoolLock();
+
+		*puiCounter -= psPagePoolEntry->uiItemsRemaining;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+		/* MemStats usually relies on having the bridge lock held, however
+		 * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+		 * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+		 * the page pool lock is used to ensure these calls are mutually
+		 * exclusive
+		 */
+		PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * psCleanupData->psPoolEntry->uiItemsRemaining);
+#endif
+
+		_PagePoolUnlock();
+
+		for (i = 0; i < psCleanupData->psPoolEntry->uiItemsRemaining; i++)
+		{
+			_FreeOSPage(0,
+						IMG_TRUE,
+						psPagePoolEntry->ppsPageArray[i]);
+		}
+		OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
+		kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
+		OSFreeMem(psCleanupData);
+
+		OSAtomicDecrement(&g_iPoolCleanTasks);
+	}
+
+	return eError;
+}
+
+
+/* Put page array to the page pool.
+ * Handles locking and checks whether the pages are
+ * suitable to be stored in the pool. */
+static inline IMG_BOOL
+_PutPagesToPoolLocked(IMG_UINT32 ui32CPUCacheFlags,
+					  struct page **ppsPageArray,
+					  IMG_BOOL bUnpinned,
+					  IMG_UINT32 uiOrder,
+					  IMG_UINT32 uiNumPages)
+{
+	LinuxCleanupData *psCleanupData;
+	PVRSRV_CLEANUP_THREAD_WORK *psCleanupThreadFn;
+
+	if (uiOrder == 0 &&
+		!bUnpinned &&
+		!PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags))
+	{
+		IMG_UINT32 uiEntries;
+		IMG_UINT32 *puiCounter;
+		struct list_head *psPoolHead;
+
+
+		_PagePoolLock();
+
+		uiEntries = _PagesInPoolUnlocked();
+
+		/* Check for number of current page pool entries and whether
+		 * we have other asynchronous tasks in-flight */
+		if ( (uiEntries < g_ui32PagePoolMaxEntries) &&
+		     ((uiEntries + uiNumPages) <
+		      (g_ui32PagePoolMaxEntries + g_ui32PagePoolMaxExcessEntries) ))
+		{
+			if (OSAtomicIncrement(&g_iPoolCleanTasks) <=
+					PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS)
+			{
+				psCleanupData = OSAllocMem(sizeof(*psCleanupData));
+
+				if(!psCleanupData)
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							 "%s: Failed to get memory for deferred page pool cleanup. "
+							 "Trying to free pages immediately",
+							 __FUNCTION__));
+					goto eDecrement;
+				}
+
+				psCleanupThreadFn = &psCleanupData->sCleanupWork;
+				psCleanupData->ui32CPUCacheMode = ui32CPUCacheFlags;
+				psCleanupData->psPoolEntry = kmem_cache_alloc(g_psLinuxPagePoolCache, GFP_KERNEL);
+
+				if(!psCleanupData->psPoolEntry)
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							 "%s: Failed to get memory for deferred page pool cleanup. "
+							 "Trying to free pages immediately",
+							 __FUNCTION__));
+					goto eFreeCleanupData;
+				}
+
+				if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter))
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							 "%s: Failed to get correct page pool",
+							 __FUNCTION__));
+					goto eFreePoolEntry;
+				}
+
+				/* Increase counter here to avoid deferred cleanup tasks piling up */
+				*puiCounter = *puiCounter + uiNumPages;
+
+				psCleanupData->psPoolEntry->ppsPageArray = ppsPageArray;
+				psCleanupData->psPoolEntry->uiItemsRemaining = uiNumPages;
+
+				psCleanupThreadFn->pfnFree = _CleanupThread_CleanPages;
+				psCleanupThreadFn->pvData = psCleanupData;
+				psCleanupThreadFn->ui32RetryCount = CLEANUP_THREAD_RETRY_COUNT_DEFAULT;
+				psCleanupThreadFn->bDependsOnHW = IMG_FALSE;
+
+	#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+				/* MemStats usually relies on having the bridge lock held, however
+				 * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+				 * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+				 * the page pool lock is used to ensure these calls are mutually
+				 * exclusive
+				 */
+				PVRSRVStatsIncrMemAllocPoolStat(PAGE_SIZE * uiNumPages);
+	#endif
+
+				/* We must not hold the pool lock when calling AddWork because it might call us back to
+				 * free pooled pages directly when unloading the driver	 */
+				_PagePoolUnlock();
+
+				PVRSRVCleanupThreadAddWork(psCleanupThreadFn);
+
+
+			}
+			else
+			{
+				goto eDecrement;
+			}
+
+		}
+		else
+		{
+			goto eUnlock;
+		}
+	}
+	else
+	{
+		goto eExitFalse;
+	}
+
+	return IMG_TRUE;
+
+eFreePoolEntry:
+	OSFreeMem(psCleanupData->psPoolEntry);
+eFreeCleanupData:
+	OSFreeMem(psCleanupData);
+eDecrement:
+	OSAtomicDecrement(&g_iPoolCleanTasks);
+eUnlock:
+	_PagePoolUnlock();
+eExitFalse:
+	return IMG_FALSE;
+}
+
+/* Get the GFP flags that we pass to the page allocator */
+static inline unsigned int
+_GetGFPFlags(IMG_BOOL bZero,
+             PVRSRV_DEVICE_NODE *psDevNode)
+{
+	struct device *psDev = psDevNode->psDevConfig->pvOSDevice;
+	unsigned int gfp_flags = 0;
+	gfp_flags = GFP_USER | __GFP_NOWARN | __GFP_NOMEMALLOC;
+
+#if defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM)
+	/* Force use of HIGHMEM */
+	gfp_flags |= __GFP_HIGHMEM;
+
+	PVR_UNREFERENCED_PARAMETER(psDev);
+#else
+
+	if (psDev && *psDev->dma_mask > DMA_BIT_MASK(32))
+	{
+		/* If our system is able to handle large addresses use highmem */
+		gfp_flags |= __GFP_HIGHMEM;
+	}
+	else if (psDev && *psDev->dma_mask == DMA_BIT_MASK(32))
+	{
+		/* Limit to 32 bit.
+		 * Achieved by NOT setting __GFP_HIGHMEM for 32 bit systems and
+         * setting __GFP_DMA32 for 64 bit systems */
+		gfp_flags |= __GFP_DMA32;
+	}
+	else if (psDev)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s: DMA bit mask has unexpected format %#llx. "
+				 "Expected at least 32 bits. "
+				 "Please check your configuration.",
+				 __func__,
+				 (IMG_UINT64) *psDev->dma_mask));
+	}
+#endif
+
+	if (bZero)
+	{
+		gfp_flags |= __GFP_ZERO;
+	}
+
+	return gfp_flags;
+}
+
+/* Poison a page of order uiOrder with string taken from pacPoisonData*/
+static void
+_PoisonPages(PVRSRV_DEVICE_NODE *psDevNode,
+             struct page *page,
+			 IMG_UINT32 uiOrder,
+			 const IMG_CHAR *pacPoisonData,
+			 size_t uiPoisonSize)
+{
+	void *kvaddr;
+	IMG_UINT32 uiSrcByteIndex;
+	IMG_UINT32 uiDestByteIndex;
+	IMG_UINT32 uiSubPageIndex;
+	IMG_CHAR *pcDest;
+	IMG_CPU_PHYADDR sCPUPhysAddrStart;
+	IMG_CPU_PHYADDR sCPUPhysAddrEnd;
+
+	uiSrcByteIndex = 0;
+	for (uiSubPageIndex = 0; uiSubPageIndex < (1U << uiOrder); uiSubPageIndex++)
+	{
+		kvaddr = kmap(page + uiSubPageIndex);
+		pcDest = kvaddr;
+
+		for(uiDestByteIndex=0; uiDestByteIndex<PAGE_SIZE; uiDestByteIndex++)
+		{
+			pcDest[uiDestByteIndex] = pacPoisonData[uiSrcByteIndex];
+			uiSrcByteIndex++;
+			if (uiSrcByteIndex == uiPoisonSize)
+			{
+				uiSrcByteIndex = 0;
+			}
+		}
+
+		sCPUPhysAddrStart.uiAddr = page_to_phys(page);
+		sCPUPhysAddrEnd.uiAddr = sCPUPhysAddrStart.uiAddr + PAGE_SIZE;
+
+		OSCPUCacheFlushRangeKM(psDevNode,
+		                       kvaddr,
+		                       kvaddr + PAGE_SIZE,
+		                       sCPUPhysAddrStart,
+		                       sCPUPhysAddrEnd);
+
+		kunmap(page + uiSubPageIndex);
+	}
+}
+
+static const IMG_CHAR _AllocPoison[] = "^PoIsOn";
+static const IMG_UINT32 _AllocPoisonSize = 7;
+static const IMG_CHAR _FreePoison[] = "<DEAD-BEEF>";
+static const IMG_UINT32 _FreePoisonSize = 11;
+
+/* Allocate and initialise the structure to hold the metadata of the allocation */
+static PVRSRV_ERROR
+_AllocOSPageArray(PVRSRV_DEVICE_NODE *psDevNode,
+				  PMR_SIZE_T uiChunkSize,
+				  IMG_UINT32 ui32NumPhysChunks,
+				  IMG_UINT32 ui32NumVirtChunks,
+				  IMG_UINT32 uiLog2AllocPageSize,
+				  IMG_BOOL bZero,
+				  IMG_BOOL bIsCMA,
+				  IMG_BOOL bPoisonOnAlloc,
+				  IMG_BOOL bPoisonOnFree,
+				  IMG_BOOL bOnDemand,
+				  IMG_UINT32 ui32CPUCacheFlags,
+				  PMR_OSPAGEARRAY_DATA **ppsPageArrayDataPtr)
+{
+	PVRSRV_ERROR eError;
+	PMR_SIZE_T uiSize = uiChunkSize * ui32NumVirtChunks;
+	IMG_UINT32 uiNumOSPageSizeVirtPages;
+	IMG_UINT32 uiNumDevPageSizeVirtPages;
+	PMR_OSPAGEARRAY_DATA *psPageArrayData;
+	PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+
+	/* Use of cast below is justified by the assertion that follows to
+	 * prove that no significant bits have been truncated */
+	uiNumOSPageSizeVirtPages = (IMG_UINT32) (((uiSize - 1) >> PAGE_SHIFT) + 1);
+	PVR_ASSERT(((PMR_SIZE_T) uiNumOSPageSizeVirtPages << PAGE_SHIFT) == uiSize);
+
+	uiNumDevPageSizeVirtPages = uiNumOSPageSizeVirtPages >> (uiLog2AllocPageSize - PAGE_SHIFT);
+
+	/* Allocate the struct to hold the metadata */
+	psPageArrayData = kmem_cache_alloc(g_psLinuxPageArray, GFP_KERNEL);
+	if (psPageArrayData == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: OS refused the memory allocation for the private data.",
+				 __func__));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e_freed_none;
+	}
+
+	/*
+	 * Allocate the page array
+	 *
+	 * We avoid tracking this memory because this structure might go into the page pool.
+	 * The OS can drain the pool asynchronously and when doing that we have to avoid
+	 * any potential deadlocks.
+	 *
+	 * In one scenario the process stats vmalloc hash table lock is held and then
+	 * the oom-killer softirq is trying to call _ScanObjectsInPagePool(), it must not
+	 * try to acquire the vmalloc hash table lock again.
+	 */
+	psPageArrayData->pagearray = OSAllocZMemNoStats(sizeof(struct page *) * uiNumDevPageSizeVirtPages);
+	if (psPageArrayData->pagearray == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e_free_kmem_cache;
+	}
+	else
+	{
+		if (bIsCMA)
+		{
+			/* Allocate additional DMA/CMA cpu kernel virtual address & device bus address array state */
+			psPageArrayData->dmavirtarray = OSAllocZMemNoStats(sizeof(void*) * uiNumDevPageSizeVirtPages);
+			if (psPageArrayData->dmavirtarray == NULL)
+			{
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto e_free_pagearray;
+			}
+
+			psPageArrayData->dmaphysarray = OSAllocZMemNoStats(sizeof(dma_addr_t) * uiNumDevPageSizeVirtPages);
+			if (psPageArrayData->dmaphysarray == NULL)
+			{
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto e_free_cpuvirtaddrarray;
+			}
+		}
+	}
+
+	/* Init metadata */
+	psPageArrayData->psDevNode = psDevNode;
+	psPageArrayData->iNumOSPagesAllocated = 0;
+	psPageArrayData->uiTotalNumOSPages = uiNumOSPageSizeVirtPages;
+	psPageArrayData->uiLog2AllocPageSize = uiLog2AllocPageSize;
+	psPageArrayData->bZero = bZero;
+	psPageArrayData->bIsCMA = bIsCMA;
+	psPageArrayData->bOnDemand = bOnDemand;
+	psPageArrayData->bUnpinned = IMG_FALSE;
+	psPageArrayData->bPoisonOnFree = bPoisonOnFree;
+	psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc;
+	psPageArrayData->ui32CPUCacheFlags = ui32CPUCacheFlags;
+
+	/* Indicate whether this is an allocation with default caching attribute (i.e cached) or not */
+	if (PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags) ||
+		PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags))
+	{
+		psPageArrayData->bUnsetMemoryType = IMG_TRUE;
+	}
+	else
+	{
+		psPageArrayData->bUnsetMemoryType = IMG_FALSE;
+	}
+
+	*ppsPageArrayDataPtr = psPageArrayData;
+	return PVRSRV_OK;
+
+/* Error path */
+e_free_cpuvirtaddrarray:
+	OSFreeMemNoStats(psPageArrayData->dmavirtarray);
+
+e_free_pagearray:
+	OSFreeMemNoStats(psPageArrayData->pagearray);
+
+e_free_kmem_cache:
+	kmem_cache_free(g_psLinuxPageArray, psPageArrayData);
+	PVR_DPF((PVR_DBG_ERROR,
+			 "%s: OS refused the memory allocation for the page pointer table. "
+			 "Did you ask for too much?",
+			 __func__));
+
+e_freed_none:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static inline void
+_ApplyCacheMaintenance(PVRSRV_DEVICE_NODE *psDevNode,
+					   struct page **ppsPage,
+					   IMG_UINT32 uiNumPages,
+					   IMG_BOOL bFlush)
+{
+	PVRSRV_ERROR eError = PVRSRV_ERROR_RETRY;
+	void * pvAddr;
+
+
+	if ((uiNumPages << PAGE_SHIFT) >= PVR_DIRTY_BYTES_FLUSH_THRESHOLD)
+	{
+		/* May fail so fallback to range-based flush */
+		eError = OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+	}
+
+
+	if (eError != PVRSRV_OK)
+	{
+
+		if (OSCPUCacheOpAddressType() == PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+		{
+			pgprot_t pgprot = PAGE_KERNEL;
+
+			IMG_UINT32 uiNumToClean = uiNumPages;
+			struct page **ppsCleanArray = ppsPage;
+
+			/* Map and flush page.
+			 * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE
+			 * at a time. */
+			while (uiNumToClean != 0)
+			{
+				IMG_UINT32 uiToClean = (uiNumToClean >= PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES) ?
+											PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES :
+											uiNumToClean;
+				IMG_CPU_PHYADDR sUnused = {(uintptr_t)0xCAFEF00DDEADBEEF};
+
+				pvAddr = vm_map_ram(ppsCleanArray, uiToClean, -1, pgprot);
+				if (!pvAddr)
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							"Unable to flush page cache for new allocation, skipping flush."));
+					return;
+				}
+
+				CacheOpExecKM(psDevNode,
+							  pvAddr,
+							  pvAddr + PAGE_SIZE,
+							  sUnused,
+							  sUnused,
+							  PVRSRV_CACHE_OP_FLUSH);
+
+				vm_unmap_ram(pvAddr, uiToClean);
+
+				ppsCleanArray = &(ppsCleanArray[uiToClean]);
+				uiNumToClean -= uiToClean;
+			}
+		}
+		else
+		{
+			IMG_UINT32 ui32Idx;
+
+			for (ui32Idx = 0; ui32Idx < uiNumPages;  ++ui32Idx)
+			{
+				IMG_CPU_PHYADDR sCPUPhysAddrStart, sCPUPhysAddrEnd;
+
+				pvAddr = kmap(ppsPage[ui32Idx]);
+				sCPUPhysAddrStart.uiAddr = page_to_phys(ppsPage[ui32Idx]);
+				sCPUPhysAddrEnd.uiAddr = sCPUPhysAddrStart.uiAddr + PAGE_SIZE;
+
+				/* If we're zeroing, we need to make sure the cleared memory is pushed out
+				 * of the cache before the cache lines are invalidated */
+				CacheOpExecKM(psDevNode,
+							  pvAddr,
+							  pvAddr + PAGE_SIZE,
+							  sCPUPhysAddrStart,
+							  sCPUPhysAddrEnd,
+							  PVRSRV_CACHE_OP_FLUSH);
+
+				kunmap(ppsPage[ui32Idx]);
+			}
+		}
+
+	}
+}
+
+/* Change the caching attribute of pages on x86 systems and takes care of
+ * cache maintenance. This function is supposed to be called once for pages that
+ * came from alloc_pages(). It expects an array of OS page sized pages!
+ *
+ * Flush/Invalidate pages in case the allocation is not cached. Necessary to
+ * remove pages from the cache that might be flushed later and corrupt memory. */
+static inline PVRSRV_ERROR
+_ApplyOSPagesAttribute(PVRSRV_DEVICE_NODE *psDevNode,
+					   struct page **ppsPage,
+					   IMG_UINT32 uiNumPages,
+					   IMG_BOOL bFlush,
+					   IMG_UINT32 ui32CPUCacheFlags)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_BOOL bCPUCached = PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags);
+	IMG_BOOL bCPUUncached = PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags);
+	IMG_BOOL bCPUWriteCombine = PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags);
+
+	if (ppsPage != NULL && uiNumPages != 0)
+	{
+#if defined (CONFIG_X86)
+		/* On x86 we have to set page cache attributes for non-cached pages.
+		 * The call is implicitly taking care of all flushing/invalidating
+		 * and therefore we can skip the usual cache maintenance after this. */
+		if (bCPUUncached || bCPUWriteCombine)
+		{
+			/* On x86 if we already have a mapping (e.g. low memory) we need to change the mode of
+				current mapping before we map it ourselves	*/
+			int ret = IMG_FALSE;
+			PVR_UNREFERENCED_PARAMETER(bFlush);
+
+			switch (PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags))
+			{
+				case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+					ret = set_pages_array_uc(ppsPage, uiNumPages);
+					if (ret)
+					{
+						eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE;
+						PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to UC failed, returned %d", ret));
+					}
+					break;
+
+				case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+					ret = set_pages_array_wc(ppsPage, uiNumPages);
+					if (ret)
+					{
+						eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE;
+						PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to WC failed, returned %d", ret));
+					}
+					break;
+
+				case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+					break;
+
+				default:
+					break;
+			}
+		}
+		else
+#endif
+		{
+			if ( bFlush ||
+				 bCPUUncached || bCPUWriteCombine ||
+				 (bCPUCached && PVRSRV_CHECK_CPU_CACHE_CLEAN(ui32CPUCacheFlags)) )
+			{
+				/*  We can be given pages which still remain in the cache.
+					In order to make sure that the data we write through our mappings
+					doesn't get overwritten by later cache evictions we invalidate the
+					pages that are given to us.
+
+					Note:
+					This still seems to be true if we request cold pages, it's just less
+					likely to be in the cache. */
+				_ApplyCacheMaintenance(psDevNode,
+									   ppsPage,
+									   uiNumPages,
+									   bFlush);
+			}
+		}
+	}
+
+	return eError;
+}
+
+/* Same as _AllocOSPage except it uses DMA framework to perform allocation.
+ * uiPageIndex is expected to be the pagearray index where to store the higher order page.  */
+static PVRSRV_ERROR
+_AllocOSPage_CMA(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+				unsigned int gfp_flags,
+				IMG_UINT32 ui32AllocOrder,
+				IMG_UINT32 ui32MinOrder,
+				IMG_UINT32 uiPageIndex)
+{
+	void *virt_addr;
+	struct page *page;
+	dma_addr_t bus_addr;
+	IMG_UINT32 uiAllocIsMisaligned;
+	size_t alloc_size = PAGE_SIZE << ui32AllocOrder;
+	PVR_ASSERT(ui32AllocOrder == ui32MinOrder);
+
+	do
+	{
+		DisableOOMKiller();
+#if defined(PVR_LINUX_PHYSMEM_SUPPRESS_DMA_AC)
+		virt_addr = NULL;
+#else
+		virt_addr = dma_alloc_coherent(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+									  alloc_size,
+									  &bus_addr,
+									  gfp_flags);
+#endif
+		if (virt_addr == NULL)
+		{
+			/* The idea here is primarily to support some older kernels with
+			   broken or non-functioning DMA/CMA implementations (< Linux-3.4)
+			   and to also handle DMA/CMA allocation failures by attempting a
+			   normal page allocation though we expect dma_alloc_coherent()
+			   already attempts this internally also before failing but
+			   nonetheless it does no harm to retry the allocation ourselves */
+			page = alloc_pages(gfp_flags, ui32AllocOrder);
+			if (page)
+			{
+				/* Taint bus_addr as alloc_page, needed when freeing;
+				   also acquire the low memory page address only, this
+				   prevents mapping possible high memory pages into
+				   kernel virtual address space which might exhaust
+				   the VMALLOC address space */
+				bus_addr = DMA_SET_ALLOCPG_ADDR(page_to_phys(page));
+				virt_addr = (void*)(uintptr_t) DMA_VADDR_NOT_IN_USE;
+			}
+			else
+			{
+				return PVRSRV_ERROR_OUT_OF_MEMORY;
+			}
+		}
+		else
+		{
+			page = pfn_to_page(bus_addr >> PAGE_SHIFT);
+		}
+		EnableOOMKiller();
+
+		/* Physical allocation alignment works/hidden behind the scene transparently,
+		   we do this here if the allocated buffer address does not meet its alignment
+		   requirement by over-allocating using the next power-2 order and reporting
+		   aligned-adjusted values back to meet the requested alignment constraint.
+		   Evidently we waste memory by doing this so should only do so if we do not
+		   initially meet the alignment constraint. */
+		uiAllocIsMisaligned = DMA_GET_ADDR(bus_addr) & ((PAGE_SIZE<<ui32MinOrder)-1);
+		if (uiAllocIsMisaligned || ui32AllocOrder > ui32MinOrder)
+		{
+			IMG_BOOL bUsedAllocPages = DMA_IS_ALLOCPG_ADDR(bus_addr);
+			if (ui32AllocOrder == ui32MinOrder)
+			{
+				if (bUsedAllocPages)
+				{
+					__free_pages(page, ui32AllocOrder);
+				}
+				else
+				{
+					dma_free_coherent(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+									  alloc_size,
+									  virt_addr,
+									  bus_addr);
+				}
+
+				ui32AllocOrder = ui32AllocOrder + 1;
+				alloc_size = PAGE_SIZE << ui32AllocOrder;
+
+				PVR_ASSERT(uiAllocIsMisaligned != 0);
+			}
+			else
+			{
+				size_t align_adjust = PAGE_SIZE << ui32MinOrder;
+
+				/* Adjust virtual/bus addresses to meet alignment */
+				bus_addr = bUsedAllocPages ? page_to_phys(page) : bus_addr;
+				align_adjust = PVR_ALIGN((size_t)bus_addr, align_adjust);
+				align_adjust -= (size_t)bus_addr;
+
+				if (align_adjust)
+				{
+					if (bUsedAllocPages)
+					{
+						page += align_adjust >> PAGE_SHIFT;
+						bus_addr = DMA_SET_ALLOCPG_ADDR(page_to_phys(page));
+						virt_addr = (void*)(uintptr_t) DMA_VADDR_NOT_IN_USE;
+					}
+					else
+					{
+						bus_addr += align_adjust;
+						virt_addr += align_adjust;
+						page = pfn_to_page(bus_addr >> PAGE_SHIFT);
+					}
+
+					/* Store adjustments in PAGE_SIZE counts */
+					align_adjust = align_adjust >> PAGE_SHIFT;
+					bus_addr = DMA_SET_ALIGN_ADJUSTMENT(bus_addr, align_adjust);
+				}
+
+				/* Taint bus_addr due to over-allocation, allows us to free
+				 * memory correctly */
+				bus_addr = DMA_SET_ADJUSTED_ADDR(bus_addr);
+				uiAllocIsMisaligned = 0;
+			}
+		}
+	} while (uiAllocIsMisaligned);
+
+	/* Convert OSPageSize-based index into DevicePageSize-based index */
+	psPageArrayData->dmavirtarray[uiPageIndex] = virt_addr;
+	psPageArrayData->dmaphysarray[uiPageIndex] = bus_addr;
+	psPageArrayData->pagearray[uiPageIndex] = page;
+
+	return PVRSRV_OK;
+}
+
+/* Allocate a page of order uiAllocOrder and stores it in the page array ppsPage at
+ * position uiPageIndex.
+ *
+ * If the order is higher than 0, it splits the page into multiples and
+ * stores them at position uiPageIndex to uiPageIndex+(1<<uiAllocOrder).
+ *
+ * This function is supposed to be used for uiMinOrder == 0 only! */
+static PVRSRV_ERROR
+_AllocOSPage(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+			unsigned int gfp_flags,
+			IMG_UINT32 uiAllocOrder,
+			IMG_UINT32 uiMinOrder,
+			IMG_UINT32 uiPageIndex)
+{
+	struct page *psPage;
+	IMG_UINT32 ui32Count;
+
+	/* Sanity check. If it fails we write into the wrong places in the array. */
+	PVR_ASSERT(uiMinOrder == 0);
+
+	/* Allocate the page */
+	DisableOOMKiller();
+	psPage = alloc_pages(gfp_flags, uiAllocOrder);
+	EnableOOMKiller();
+
+	if (psPage == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+	/* In case we need to, split the higher order page;
+	   this should only be used for order-0 allocations
+	   as higher order allocations should use DMA/CMA */
+	if (uiAllocOrder != 0)
+	{
+		split_page(psPage, uiAllocOrder);
+	}
+#endif
+
+	/* Store the page (or multiple split pages) in the page array */
+	for (ui32Count = 0; ui32Count < (1 << uiAllocOrder); ui32Count++)
+	{
+		psPageArrayData->pagearray[uiPageIndex + ui32Count] = &(psPage[ui32Count]);
+	}
+
+	return PVRSRV_OK;
+}
+
+/* Allocation of OS pages: We may allocate 2^N order pages at a time for two reasons.
+ *
+ * Firstly to support device pages which are larger than OS. By asking the OS for 2^N
+ * order OS pages at a time we guarantee the device page is contiguous.
+ *
+ * Secondly for performance where we may ask for 2^N order pages to reduce the number
+ * of calls to alloc_pages, and thus reduce time for huge allocations.
+ *
+ * Regardless of page order requested, we need to break them down to track _OS pages.
+ * The maximum order requested is increased if all max order allocations were successful.
+ * If any request fails we reduce the max order.
+ */
+static PVRSRV_ERROR
+_AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 uiArrayIndex = 0;
+	IMG_UINT32 ui32Order;
+	IMG_UINT32 ui32MinOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+	IMG_BOOL bIncreaseMaxOrder = IMG_TRUE;
+
+	IMG_UINT32 ui32NumPageReq;
+	IMG_UINT32 uiPagesToAlloc;
+	IMG_UINT32 uiPagesFromPool = 0;
+
+	unsigned int gfp_flags = _GetGFPFlags(ui32MinOrder ? psPageArrayData->bZero : IMG_FALSE, /* Zero all pages later as batch */
+	                                      psPageArrayData->psDevNode);
+	IMG_UINT32 ui32GfpFlags;
+	IMG_UINT32 ui32HighOrderGfpFlags = ((gfp_flags & ~__GFP_RECLAIM) | __GFP_NORETRY);
+
+	struct page **ppsPageArray = psPageArrayData->pagearray;
+	struct page **ppsPageAttributeArray = NULL;
+
+	uiPagesToAlloc = psPageArrayData->uiTotalNumOSPages;
+
+	/* Try to get pages from the pool since it is faster;
+	   the page pool currently only supports zero-order pages
+	   thus currently excludes all DMA/CMA allocated memory */
+	_GetPagesFromPoolLocked(psPageArrayData->psDevNode,
+							psPageArrayData->ui32CPUCacheFlags,
+							uiPagesToAlloc,
+							ui32MinOrder,
+							psPageArrayData->bZero,
+							ppsPageArray,
+							&uiPagesFromPool);
+
+	uiArrayIndex = uiPagesFromPool;
+
+	if ((uiPagesToAlloc - uiPagesFromPool) < PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD)
+	{	/* Small allocations: ask for one device page at a time */
+		ui32Order = ui32MinOrder;
+		bIncreaseMaxOrder = IMG_FALSE;
+	}
+	else
+	{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+		/* Large zero-order or none zero-order allocations, ask for
+		   MAX(max-order,min-order) order pages at a time; alloc
+		   failures throttles this down to ZeroOrder allocations */
+		ui32Order = MAX(g_uiMaxOrder, ui32MinOrder);
+#else
+		/* Because split_pages() is not available on older kernels
+		   we cannot mix-and-match any-order pages in the PMR;
+		   only same-order pages must be present in page array.
+		   So we unconditionally force it to use ui32MinOrder on
+		   these older kernels */
+		ui32Order = ui32MinOrder;
+#endif
+	}
+
+	/* Only if asking for more contiguity than we actually need, let it fail */
+	ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
+	ui32NumPageReq = (1 << ui32Order);
+
+	while (uiArrayIndex < uiPagesToAlloc)
+	{
+		IMG_UINT32 ui32PageRemain = uiPagesToAlloc - uiArrayIndex;
+
+		while (ui32NumPageReq > ui32PageRemain)
+		{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+			/* Pages to request is larger than that remaining
+			   so ask for less so never over allocate */
+			ui32Order = MAX(ui32Order >> 1,ui32MinOrder);
+#else
+			/* Pages to request is larger than that remaining so
+			   do nothing thus over allocate as we do not support
+			   mix/match of any-order pages in PMR page-array in
+			   older kernels (simplifies page free logic) */
+			PVR_ASSERT(ui32Order == ui32MinOrder);
+#endif
+			ui32NumPageReq = (1 << ui32Order);
+			ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
+		}
+
+		if (psPageArrayData->bIsCMA)
+		{
+			/* As the DMA/CMA framework rounds-up request to the
+			   next power-of-two, we request multiple uiMinOrder
+			   pages to satisfy allocation request in order to
+			   minimise wasting memory */
+			eError =  _AllocOSPage_CMA(psPageArrayData,
+									   ui32GfpFlags,
+									   ui32Order,
+									   ui32MinOrder,
+									   uiArrayIndex >> ui32MinOrder);
+		}
+		else
+		{
+			/* Allocate uiOrder pages at uiArrayIndex */
+			eError = _AllocOSPage(psPageArrayData,
+								  ui32GfpFlags,
+								  ui32Order,
+								  ui32MinOrder,
+								  uiArrayIndex);
+		}
+
+		if (eError == PVRSRV_OK)
+		{
+			/* Successful request. Move onto next. */
+			uiArrayIndex += ui32NumPageReq;
+		}
+		else
+		{
+			if (ui32Order > ui32MinOrder)
+			{
+				/* Last request failed. Let's ask for less next time */
+				ui32Order = MAX(ui32Order >> 1,ui32MinOrder);
+				bIncreaseMaxOrder = IMG_FALSE;
+				ui32NumPageReq = (1 << ui32Order);
+				ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
+				g_uiMaxOrder = ui32Order;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0))
+				/* We should not trigger this code path in older kernels,
+				   this is enforced by ensuring ui32Order == ui32MinOrder */
+				PVR_ASSERT(ui32Order == ui32MinOrder);
+#endif
+			}
+			else
+			{
+				/* Failed to alloc pages at required contiguity. Failed allocation */
+				PVR_DPF((PVR_DBG_ERROR, "%s: %s failed to honour request at %u of %u, flags = %x, order = %u (%s)",
+								__FUNCTION__,
+								psPageArrayData->bIsCMA ? "dma_alloc_coherent" : "alloc_pages",
+								uiArrayIndex,
+								uiPagesToAlloc,
+								ui32GfpFlags,
+								ui32Order,
+								PVRSRVGetErrorStringKM(eError)));
+				eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+				goto e_free_pages;
+			}
+		}
+	}
+
+	if (bIncreaseMaxOrder && (g_uiMaxOrder < PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM))
+	{	/* All successful allocations on max order. Let's ask for more next time */
+		g_uiMaxOrder++;
+	}
+
+	/* Construct table of page pointers to apply attributes */
+	ppsPageAttributeArray = &ppsPageArray[uiPagesFromPool];
+	if (psPageArrayData->bIsCMA)
+	{
+		IMG_UINT32 uiIdx, uiIdy, uiIdz;
+
+		ppsPageAttributeArray = OSAllocMem(sizeof(struct page *) * uiPagesToAlloc);
+		if (ppsPageAttributeArray == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed OSAllocMem() for page attributes table"));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e_free_pages;
+		}
+
+		for (uiIdx = 0; uiIdx < uiPagesToAlloc; uiIdx += ui32NumPageReq)
+		{
+			uiIdy = uiIdx >> ui32Order;
+			for (uiIdz = 0; uiIdz < ui32NumPageReq; uiIdz++)
+			{
+				ppsPageAttributeArray[uiIdx+uiIdz] = psPageArrayData->pagearray[uiIdy];
+				ppsPageAttributeArray[uiIdx+uiIdz] += uiIdz;
+			}
+		}
+	}
+
+	if (psPageArrayData->bZero && ui32MinOrder == 0)
+	{
+		eError = _ZeroPageArray(uiPagesToAlloc - uiPagesFromPool,
+					   ppsPageAttributeArray,
+					   PAGE_KERNEL);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to zero pages (fast)"));
+			goto e_free_pages;
+		}
+	}
+
+
+	/* Do the cache management as required */
+	eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode,
+									ppsPageAttributeArray,
+									uiPagesToAlloc - uiPagesFromPool,
+									psPageArrayData->bZero,
+									psPageArrayData->ui32CPUCacheFlags);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes"));
+		goto e_free_pages;
+	}
+	else
+	{
+		if (psPageArrayData->bIsCMA)
+		{
+			OSFreeMem(ppsPageAttributeArray);
+		}
+	}
+
+	/* Update metadata */
+	psPageArrayData->iNumOSPagesAllocated = psPageArrayData->uiTotalNumOSPages;
+	return PVRSRV_OK;
+
+/* Error path */
+e_free_pages:
+	{
+		IMG_UINT32 ui32PageToFree;
+
+		if (psPageArrayData->bIsCMA)
+		{
+			IMG_UINT32 uiDevArrayIndex = uiArrayIndex >> ui32Order;
+			IMG_UINT32 uiDevPageSize = PAGE_SIZE << ui32Order;
+			PVR_ASSERT(ui32Order == ui32MinOrder);
+
+			if (ppsPageAttributeArray)
+			{
+				OSFreeMem(ppsPageAttributeArray);
+			}
+
+			for (ui32PageToFree = 0; ui32PageToFree < uiDevArrayIndex; ui32PageToFree++)
+			{
+				_FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+								uiDevPageSize,
+								ui32MinOrder,
+								psPageArrayData->dmavirtarray[ui32PageToFree],
+								psPageArrayData->dmaphysarray[ui32PageToFree],
+								ppsPageArray[ui32PageToFree]);
+				psPageArrayData->dmaphysarray[ui32PageToFree]= (dma_addr_t)0;
+				psPageArrayData->dmavirtarray[ui32PageToFree] = NULL;
+				ppsPageArray[ui32PageToFree] = INVALID_PAGE_ADDR;
+			}
+		}
+		else
+		{
+			/* Free the pages we got from the pool */
+			for(ui32PageToFree = 0; ui32PageToFree < uiPagesFromPool; ui32PageToFree++)
+			{
+				_FreeOSPage(ui32MinOrder,
+							psPageArrayData->bUnsetMemoryType,
+							ppsPageArray[ui32PageToFree]);
+				ppsPageArray[ui32PageToFree] = INVALID_PAGE_ADDR;
+			}
+
+			for (ui32PageToFree = uiPagesFromPool; ui32PageToFree < uiArrayIndex; ui32PageToFree++)
+			{
+				_FreeOSPage(ui32MinOrder, IMG_FALSE, ppsPageArray[ui32PageToFree]);
+				ppsPageArray[ui32PageToFree] = INVALID_PAGE_ADDR;
+			}
+		}
+
+		return eError;
+	}
+}
+
+/* Allocation of OS pages: This function is used for sparse allocations.
+ *
+ * Sparse allocations provide only a proportion of sparse physical backing within the total
+ * virtual range. */
+static PVRSRV_ERROR
+_AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+					 IMG_UINT32 *puiAllocIndices,
+					 IMG_UINT32 uiPagesToAlloc)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 i;
+	struct page **ppsPageArray = psPageArrayData->pagearray;
+	IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+	IMG_UINT32 uiPagesFromPool = 0;
+	IMG_UINT32 uiNumOSPagesToAlloc = uiPagesToAlloc * (1 << uiOrder);
+	IMG_UINT32 uiTotalNumAllocPages = psPageArrayData->uiTotalNumOSPages >> uiOrder;
+	unsigned int ui32GfpFlags = _GetGFPFlags(uiOrder ? psPageArrayData->bZero :
+	                                         IMG_FALSE, /* Zero pages later as batch */
+	                                         psPageArrayData->psDevNode);
+
+	 /* We use this page array to receive pages from the pool and then reuse it afterwards to
+	  * store pages that need their cache attribute changed on x86*/
+	struct page **ppsTempPageArray;
+	IMG_UINT32 uiTempPageArrayIndex = 0;
+
+	/* Allocate the temporary page array that we need here to receive pages
+	 * from the pool and to store pages that need their caching attributes changed.
+	 * Allocate number of OS pages to be able to use the attribute function later. */
+	ppsTempPageArray = OSAllocMem(sizeof(struct page*) * uiNumOSPagesToAlloc);
+	if (ppsTempPageArray == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed metadata allocation", __FUNCTION__));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e_exit;
+	}
+
+	/* Check the requested number of pages if they fit in the page array */
+	if (uiTotalNumAllocPages <
+			((psPageArrayData->iNumOSPagesAllocated >> uiOrder) + uiPagesToAlloc) )
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Trying to allocate more pages (Order %u) than this buffer can handle, "
+				 "Request + Allocated < Max! Request %u, Allocated %u, Max %u.",
+				 __FUNCTION__,
+				 uiOrder,
+				 uiPagesToAlloc,
+				 psPageArrayData->iNumOSPagesAllocated >> uiOrder,
+				 uiTotalNumAllocPages));
+		eError = PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
+		goto e_free_temp_array;
+	}
+
+	/* Try to get pages from the pool since it is faster */
+	_GetPagesFromPoolLocked(psPageArrayData->psDevNode,
+							psPageArrayData->ui32CPUCacheFlags,
+							uiPagesToAlloc,
+							uiOrder,
+							psPageArrayData->bZero,
+							ppsTempPageArray,
+							&uiPagesFromPool);
+
+	/* Allocate pages from the OS or move the pages that we got from the pool
+	 * to the page array */
+	for (i = 0; i < uiPagesToAlloc; i++)
+	{
+		/* Check if the indices we are allocating are in range */
+		if (puiAllocIndices[i] >= uiTotalNumAllocPages)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Given alloc index %u at %u is larger than page array %u.",
+					 __FUNCTION__,
+					 i,
+					 puiAllocIndices[i],
+					 uiTotalNumAllocPages));
+			eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+			goto e_free_pages;
+		}
+
+		/* Check if there is not already a page allocated at this position */
+		if (INVALID_PAGE_ADDR != ppsPageArray[puiAllocIndices[i]])
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Mapping number %u at page array index %u already exists. "
+					 "Page struct %p",
+					 __func__,
+					 i,
+					 puiAllocIndices[i],
+					 ppsPageArray[puiAllocIndices[i]]));
+			eError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS;
+			goto e_free_pages;
+		}
+
+		/* Finally assign a page to the array.
+		 * Either from the pool or allocate a new one. */
+		if (uiPagesFromPool != 0)
+		{
+			uiPagesFromPool--;
+			ppsPageArray[puiAllocIndices[i]] = ppsTempPageArray[uiPagesFromPool];
+		}
+		else
+		{
+			if (psPageArrayData->bIsCMA)
+			{
+
+				/* As the DMA/CMA framework rounds-up request to the
+				   next power-of-two, we request multiple uiMinOrder
+				   pages to satisfy allocation request in order to
+				   minimise wasting memory */
+				eError =  _AllocOSPage_CMA(psPageArrayData,
+										   ui32GfpFlags,
+										   uiOrder,
+										   uiOrder,
+										   puiAllocIndices[i]);
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "Failed to alloc CMA pages"));
+					goto e_free_pages;
+				}
+			}
+			else
+			{
+				DisableOOMKiller();
+				ppsPageArray[puiAllocIndices[i]] = alloc_pages(ui32GfpFlags, uiOrder);
+				EnableOOMKiller();
+			}
+
+			if (ppsPageArray[puiAllocIndices[i]] != NULL)
+			{
+				/* Reusing the temp page array if it has no pool pages anymore */
+
+				if (psPageArrayData->bIsCMA)
+				{
+					IMG_UINT32 idx;
+					struct page* psPageAddr;
+
+					psPageAddr = ppsPageArray[puiAllocIndices[i]];
+
+					for (idx = 0; idx < (1 << uiOrder); idx++)
+					{
+						ppsTempPageArray[uiTempPageArrayIndex + idx] = psPageAddr;
+						psPageAddr++;
+					}
+					uiTempPageArrayIndex += (1 << uiOrder);
+				}
+				else
+				{
+					ppsTempPageArray[uiTempPageArrayIndex] = ppsPageArray[puiAllocIndices[i]];
+					uiTempPageArrayIndex++;
+				}
+			}
+			else
+			{
+				/* Failed to alloc pages at required contiguity. Failed allocation */
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: alloc_pages failed to honour request at %u of %u, flags = %x, order = %u",
+						 __FUNCTION__,
+						 i,
+						 uiPagesToAlloc,
+						 ui32GfpFlags,
+						 uiOrder));
+				eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+				goto e_free_pages;
+			}
+		}
+	}
+
+	if (psPageArrayData->bZero && uiOrder == 0)
+	{
+		eError = _ZeroPageArray(uiTempPageArrayIndex,
+		                        ppsTempPageArray,
+		                        PAGE_KERNEL);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to zero pages (sparse)"));
+			goto e_free_pages;
+		}
+	}
+
+	/* Do the cache management as required */
+	eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode,
+									ppsTempPageArray,
+									uiTempPageArrayIndex,
+									psPageArrayData->bZero,
+									psPageArrayData->ui32CPUCacheFlags);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes"));
+		goto e_free_pages;
+	}
+
+	/* Update metadata */
+	psPageArrayData->iNumOSPagesAllocated += uiNumOSPagesToAlloc;
+
+	/* Free temporary page array */
+	OSFreeMem(ppsTempPageArray);
+	return PVRSRV_OK;
+
+/* Error path */
+e_free_pages:
+	{
+		IMG_UINT32 ui32PageToFree;
+
+		if (psPageArrayData->bIsCMA)
+		{
+			IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder;
+
+			for (ui32PageToFree = 0; ui32PageToFree < i; ui32PageToFree++)
+			{
+				_FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+								uiDevPageSize,
+								uiOrder,
+								psPageArrayData->dmavirtarray[puiAllocIndices[ui32PageToFree]],
+								psPageArrayData->dmaphysarray[puiAllocIndices[ui32PageToFree]],
+								ppsPageArray[puiAllocIndices[ui32PageToFree]]);
+				psPageArrayData->dmaphysarray[puiAllocIndices[ui32PageToFree]]= (dma_addr_t)0;
+				psPageArrayData->dmavirtarray[puiAllocIndices[ui32PageToFree]] = NULL;
+				ppsPageArray[puiAllocIndices[ui32PageToFree]] = (struct page *) INVALID_PAGE_ADDR;
+			}
+		}
+		else
+		{
+			/* Free the pages we got from the pool */
+			for(ui32PageToFree = 0; ui32PageToFree < uiPagesFromPool; ui32PageToFree++)
+			{
+				_FreeOSPage(0,
+							psPageArrayData->bUnsetMemoryType,
+							ppsTempPageArray[ui32PageToFree]);
+			}
+
+			/* Free the pages we just allocated from the OS */
+			for(ui32PageToFree = uiPagesFromPool; ui32PageToFree < i; ui32PageToFree++)
+			{
+				_FreeOSPage(0,
+							IMG_FALSE,
+							ppsPageArray[puiAllocIndices[ui32PageToFree]]);
+			}
+
+			/* Reset all page array entries that have been set so far*/
+			for(ui32PageToFree = 0; ui32PageToFree < i; ui32PageToFree++)
+			{
+				ppsPageArray[puiAllocIndices[ui32PageToFree]] = (struct page *) INVALID_PAGE_ADDR;
+			}
+		}
+	}
+
+e_free_temp_array:
+	OSFreeMem(ppsTempPageArray);
+
+e_exit:
+	return eError;
+}
+
+/* Allocate pages for a given page array.
+ *
+ * The executed allocation path depends whether an array with allocation
+ * indices has been passed or not */
+static PVRSRV_ERROR
+_AllocOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+			  IMG_UINT32 *puiAllocIndices,
+			  IMG_UINT32 uiPagesToAlloc)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 i;
+	struct page **ppsPageArray;
+
+	/* Sanity checks */
+	PVR_ASSERT(NULL != psPageArrayData);
+	if (psPageArrayData->bIsCMA)
+	{
+		PVR_ASSERT(psPageArrayData->dmaphysarray != NULL);
+		PVR_ASSERT(psPageArrayData->dmavirtarray != NULL);
+	}
+	PVR_ASSERT(psPageArrayData->pagearray != NULL);
+	PVR_ASSERT(0 <= psPageArrayData->iNumOSPagesAllocated);
+
+	ppsPageArray = psPageArrayData->pagearray;
+
+	/* Go the sparse alloc path if we have an array with alloc indices.*/
+	if (puiAllocIndices != NULL)
+	{
+		eError =  _AllocOSPages_Sparse(psPageArrayData,
+									   puiAllocIndices,
+									   uiPagesToAlloc);
+	}
+	else
+	{
+		eError =  _AllocOSPages_Fast(psPageArrayData);
+	}
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e_exit;
+	}
+
+	if (psPageArrayData->bPoisonOnAlloc)
+	{
+		for (i = 0; i < uiPagesToAlloc; i++)
+		{
+			IMG_UINT32 uiIdx = puiAllocIndices ? puiAllocIndices[i] : i;
+			_PoisonPages(psPageArrayData->psDevNode,
+			             ppsPageArray[uiIdx],
+			             psPageArrayData->uiLog2AllocPageSize,
+			             _AllocPoison,
+			             _AllocPoisonSize);
+		}
+	}
+
+	_DumpPageArray(ppsPageArray,
+	               psPageArrayData->uiTotalNumOSPages >>
+					   (psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT) );
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	{
+		for (i = 0; i < uiPagesToAlloc; i++)
+		{
+			IMG_CPU_PHYADDR sCPUPhysAddr;
+			IMG_UINT32 uiIdx = puiAllocIndices ? puiAllocIndices[i] : i;
+
+			sCPUPhysAddr.uiAddr = page_to_phys(ppsPageArray[uiIdx]);
+			PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
+										 NULL,
+										 sCPUPhysAddr,
+										 1 << psPageArrayData->uiLog2AllocPageSize,
+										 NULL);
+		}
+	}
+#else
+	PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
+	                            uiPagesToAlloc * (1<<psPageArrayData->uiLog2AllocPageSize));
+#endif
+#endif
+
+	PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: allocated OS memory for PMR @0x%p", psPageArrayData));
+	return PVRSRV_OK;
+
+e_exit:
+	return eError;
+}
+
+/* Same as _FreeOSPage except free memory using DMA framework */
+static INLINE void
+_FreeOSPage_CMA(struct device *dev,
+				size_t alloc_size,
+				IMG_UINT32 uiOrder,
+				void *virt_addr,
+				dma_addr_t dev_addr,
+				struct page *psPage)
+{
+	if (DMA_IS_ALLOCPG_ADDR(dev_addr))
+	{
+#if defined(CONFIG_X86)
+		void *pvPageVAddr = page_address(psPage);
+		if (pvPageVAddr)
+		{
+			int ret = set_memory_wb((unsigned long)pvPageVAddr, 1);
+			if (ret)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Failed to reset page attribute",
+						__FUNCTION__));
+			}
+		}
+#endif
+
+		if (DMA_IS_ADDR_ADJUSTED(dev_addr))
+		{
+			psPage -= DMA_GET_ALIGN_ADJUSTMENT(dev_addr);
+			uiOrder += 1;
+		}
+
+		__free_pages(psPage, uiOrder);
+	}
+	else
+	{
+		if (DMA_IS_ADDR_ADJUSTED(dev_addr))
+		{
+			size_t align_adjust;
+
+			align_adjust = DMA_GET_ALIGN_ADJUSTMENT(dev_addr);
+			alloc_size = alloc_size << 1;
+
+			dev_addr = DMA_GET_ADDR(dev_addr);
+			dev_addr -= align_adjust << PAGE_SHIFT;
+			virt_addr -= align_adjust << PAGE_SHIFT;
+		}
+
+		dma_free_coherent(dev, alloc_size, virt_addr, DMA_GET_ADDR(dev_addr));
+	}
+}
+
+/* Free a single page back to the OS.
+ * Make sure the cache type is set back to the default value.
+ *
+ * Note:
+ * We must _only_ check bUnsetMemoryType in the case where we need to free
+ * the page back to the OS since we may have to revert the cache properties
+ * of the page to the default as given by the OS when it was allocated. */
+static void
+_FreeOSPage(IMG_UINT32 uiOrder,
+			IMG_BOOL bUnsetMemoryType,
+			struct page *psPage)
+{
+
+#if defined(CONFIG_X86)
+	void *pvPageVAddr;
+	pvPageVAddr = page_address(psPage);
+
+	if (pvPageVAddr && bUnsetMemoryType == IMG_TRUE)
+	{
+		int ret;
+
+		ret = set_memory_wb((unsigned long)pvPageVAddr, 1);
+		if (ret)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attribute", __FUNCTION__));
+		}
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(bUnsetMemoryType);
+#endif
+	__free_pages(psPage, uiOrder);
+}
+
+/* Free the struct holding the metadata */
+static PVRSRV_ERROR
+_FreeOSPagesArray(PMR_OSPAGEARRAY_DATA *psPageArrayData)
+{
+	PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: freed OS memory for PMR @0x%p", psPageArrayData));
+
+	/* Check if the page array actually still exists.
+	 * It might be the case that has been moved to the page pool */
+	if (psPageArrayData->pagearray != NULL)
+	{
+		OSFreeMemNoStats(psPageArrayData->pagearray);
+	}
+
+	kmem_cache_free(g_psLinuxPageArray, psPageArrayData);
+
+	return PVRSRV_OK;
+}
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+/* _FreeOSPages_MemStats: Depends on the bridge lock already being held */
+static void
+_FreeOSPages_MemStats(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+					IMG_UINT32 *pai32FreeIndices,
+					IMG_UINT32 ui32NumPages)
+{
+	struct page **ppsPageArray;
+	#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	IMG_UINT32 ui32PageIndex;
+	#endif
+
+	PVR_DPF((PVR_DBG_MESSAGE,
+			"%s: psPageArrayData %p, ui32NumPages %u",
+			__FUNCTION__,
+			psPageArrayData,
+			ui32NumPages));
+	PVR_ASSERT(psPageArrayData->iNumOSPagesAllocated != 0);
+
+	ppsPageArray = psPageArrayData->pagearray;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+		PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
+		                            ui32NumPages * (1<<psPageArrayData->uiLog2AllocPageSize) );
+#else
+		for (ui32PageIndex = 0; ui32PageIndex < ui32NumPages; ui32PageIndex++)
+		{
+			IMG_CPU_PHYADDR sCPUPhysAddr;
+			IMG_UINT32 uiArrayIndex = (pai32FreeIndices) ?
+					pai32FreeIndices[ui32PageIndex] : ui32PageIndex;
+
+			sCPUPhysAddr.uiAddr = page_to_phys(ppsPageArray[uiArrayIndex]);
+			PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
+			                                sCPUPhysAddr.uiAddr);
+		}
+#endif
+#endif
+}
+#endif /* PVRSRV_ENABLE_PROCESS_STATS */
+
+/* Free all or some pages from a sparse page array */
+static PVRSRV_ERROR
+_FreeOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+					IMG_UINT32 *pai32FreeIndices,
+					IMG_UINT32 ui32FreePageCount)
+{
+	IMG_BOOL bSuccess;
+	IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+	IMG_UINT32 uiPageIndex, i, j, uiTempIdx = 0;
+	struct page **ppsPageArray = psPageArrayData->pagearray;
+	IMG_UINT32 uiNumPages;
+
+	struct page **ppsTempPageArray;
+	IMG_UINT32 uiTempArraySize;
+
+	/* We really should have something to free before we call this */
+	PVR_ASSERT(psPageArrayData->iNumOSPagesAllocated != 0);
+
+	if(pai32FreeIndices == NULL)
+	{
+		uiNumPages = psPageArrayData->uiTotalNumOSPages >> uiOrder;
+		uiTempArraySize = psPageArrayData->iNumOSPagesAllocated;
+	}
+	else
+	{
+		uiNumPages = ui32FreePageCount;
+		uiTempArraySize = ui32FreePageCount << uiOrder;
+	}
+
+	/* Poison if necessary */
+	if (psPageArrayData->bPoisonOnFree)
+	{
+		for (i  = 0; i  < uiNumPages; i ++)
+		{
+			IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i;
+			if (INVALID_PAGE_ADDR != ppsPageArray[idx])
+			{
+				_PoisonPages(psPageArrayData->psDevNode,
+				             ppsPageArray[idx],
+				             uiOrder,
+				             _FreePoison,
+				             _FreePoisonSize);
+			}
+		}
+	}
+
+	if (psPageArrayData->bIsCMA)
+	{
+		IMG_UINT32 uiDevNumPages = uiNumPages;
+		IMG_UINT32 uiDevPageSize = 1<<psPageArrayData->uiLog2AllocPageSize;
+
+		for (i = 0; i < uiDevNumPages; i++)
+		{
+			IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i;
+			if (INVALID_PAGE_ADDR != ppsPageArray[idx])
+			{
+				_FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+								uiDevPageSize,
+								uiOrder,
+								psPageArrayData->dmavirtarray[idx],
+								psPageArrayData->dmaphysarray[idx],
+								ppsPageArray[idx]);
+				psPageArrayData->dmaphysarray[idx] = (dma_addr_t)0;
+				psPageArrayData->dmavirtarray[idx] = NULL;
+				ppsPageArray[idx] = INVALID_PAGE_ADDR;
+				uiTempIdx++;
+			}
+		}
+	}
+	else
+	{
+
+		/* OSAllocMemNoStats required because this code may be run without the bridge lock held */
+		ppsTempPageArray = OSAllocMemNoStats(sizeof(struct page*) * uiTempArraySize);
+		if (ppsTempPageArray == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed free_pages metadata allocation", __func__));
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+
+		/* Put pages in a contiguous array so further processing is easier */
+		for (i = 0; i < uiNumPages; i++)
+		{
+			uiPageIndex = pai32FreeIndices ? pai32FreeIndices[i] : i;
+			if (INVALID_PAGE_ADDR != ppsPageArray[uiPageIndex])
+			{
+				struct page *psPage = ppsPageArray[uiPageIndex];
+
+				for (j = 0; j < (1<<uiOrder); j++)
+				{
+					ppsTempPageArray[uiTempIdx] = psPage;
+					uiTempIdx++;
+					psPage++;
+				}
+
+				ppsPageArray[uiPageIndex] = (struct page *) INVALID_PAGE_ADDR;
+			}
+		}
+
+		/* Try to move the temp page array to the pool */
+		bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags,
+										 ppsTempPageArray,
+										 psPageArrayData->bUnpinned,
+										 uiOrder,
+										 uiTempIdx);
+		if (bSuccess)
+		{
+			goto exit_ok;
+		}
+
+		/* Free pages and reset page caching attributes on x86 */
+#if defined(CONFIG_X86)
+		if (uiTempIdx != 0 && psPageArrayData->bUnsetMemoryType == IMG_TRUE)
+		{
+			int iError;
+			iError = set_pages_array_wb(ppsTempPageArray, uiTempIdx);
+
+			if (iError)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __func__));
+			}
+		}
+#endif
+
+		/* Free the pages */
+		for (i = 0; i < uiTempIdx; i++)
+		{
+			__free_pages(ppsTempPageArray[i], uiOrder);
+		}
+
+		/* Free the temp page array here if it did not move to the pool */
+		OSFreeMemNoStats(ppsTempPageArray);
+	}
+
+exit_ok:
+	/* Update metadata */
+	psPageArrayData->iNumOSPagesAllocated -= (uiTempIdx << uiOrder);
+	PVR_ASSERT(0 <= psPageArrayData->iNumOSPagesAllocated);
+	return PVRSRV_OK;
+}
+
+/* Free all the pages in a page array */
+static PVRSRV_ERROR
+_FreeOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData)
+{
+	IMG_BOOL bSuccess;
+	IMG_UINT32 uiOrder;
+	IMG_UINT32 i = 0;
+	IMG_UINT32 uiNumPages = psPageArrayData->uiTotalNumOSPages;
+
+	struct page **ppsPageArray = psPageArrayData->pagearray;
+	uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+
+	/* We really should have something to free before we call this */
+	PVR_ASSERT(psPageArrayData->iNumOSPagesAllocated != 0);
+
+
+	/* Try to move the page array to the pool */
+	bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags,
+									 ppsPageArray,
+									 psPageArrayData->bUnpinned,
+									 uiOrder,
+									 uiNumPages);
+	if (bSuccess)
+	{
+		psPageArrayData->pagearray = NULL;
+		goto exit_ok;
+	}
+
+	/* Poison pages if necessary */
+	if (psPageArrayData->bPoisonOnFree)
+	{
+		for (i = 0; i < uiNumPages; i++)
+		{
+			_PoisonPages(psPageArrayData->psDevNode,
+			             ppsPageArray[i],
+			             psPageArrayData->uiLog2AllocPageSize,
+			             _FreePoison,
+			             _FreePoisonSize);
+		}
+	}
+
+	if (psPageArrayData->bIsCMA)
+	{
+		IMG_UINT32 uiDevNumPages = uiNumPages >> uiOrder;
+		IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder;
+
+		for (i = 0; i < uiDevNumPages; i++)
+		{
+			_FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+							uiDevPageSize,
+							uiOrder,
+							psPageArrayData->dmavirtarray[i],
+							psPageArrayData->dmaphysarray[i],
+							ppsPageArray[i]);
+  			psPageArrayData->dmaphysarray[i] = (dma_addr_t)0;
+			psPageArrayData->dmavirtarray[i] = NULL;
+			ppsPageArray[i] = INVALID_PAGE_ADDR;
+		}
+	}
+	else
+	{
+#if defined(CONFIG_X86)
+		if (psPageArrayData->bUnsetMemoryType == IMG_TRUE)
+		{
+			int ret;
+
+			ret = set_pages_array_wb(ppsPageArray, uiNumPages);
+			if (ret)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __FUNCTION__));
+			}
+		}
+#endif
+
+		for (i = 0; i < uiNumPages; i++)
+		{
+			_FreeOSPage(uiOrder, IMG_FALSE, ppsPageArray[i]);
+			ppsPageArray[i] = INVALID_PAGE_ADDR;
+		}
+	}
+
+exit_ok:
+	/* Update metadata */
+	psPageArrayData->iNumOSPagesAllocated = 0;
+	return PVRSRV_OK;
+}
+
+/* Free pages from a page array.
+ * Takes care of mem stats and chooses correct free path depending on parameters. */
+static PVRSRV_ERROR
+_FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+			 IMG_UINT32 *pai32FreeIndices,
+			 IMG_UINT32 ui32FreePageCount)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 uiNumPages;
+
+	/* Check how many pages do we have to free */
+	if(pai32FreeIndices == NULL)
+	{
+		uiNumPages = psPageArrayData->iNumOSPagesAllocated >>
+						(psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT);
+	}
+	else
+	{
+		uiNumPages = ui32FreePageCount;
+	}
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ 	_FreeOSPages_MemStats(psPageArrayData, pai32FreeIndices, uiNumPages);
+#endif
+
+	/* Go the sparse or non-sparse path */
+	if (psPageArrayData->iNumOSPagesAllocated != psPageArrayData->uiTotalNumOSPages
+		|| pai32FreeIndices != NULL)
+	{
+		eError = _FreeOSPages_Sparse(psPageArrayData,
+									 pai32FreeIndices,
+									 uiNumPages);
+	}
+	else
+	{
+		eError = _FreeOSPages_Fast(psPageArrayData);
+	}
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_FreeOSPages_FreePages failed"));
+	}
+
+	_DumpPageArray(psPageArrayData->pagearray,
+	               psPageArrayData->uiTotalNumOSPages >>
+					   (psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT) );
+
+	return eError;
+}
+
+/*
+ *
+ * Implementation of callback functions
+ *
+ */
+
+/* Destructor func is called after last reference disappears, but
+ * before PMR itself is freed. */
+static PVRSRV_ERROR
+PMRFinalizeOSMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PVRSRV_ERROR eError;
+	PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+
+
+	/* We can't free pages until now. */
+	if (psOSPageArrayData->iNumOSPagesAllocated != 0)
+	{
+		_PagePoolLock();
+		if (psOSPageArrayData->bUnpinned == IMG_TRUE)
+		{
+			_RemoveUnpinListEntryUnlocked(psOSPageArrayData);
+		}
+		_PagePoolUnlock();
+
+		eError = _FreeOSPages(psOSPageArrayData,
+							  NULL,
+							  0);
+		PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+	}
+
+	eError = _FreeOSPagesArray(psOSPageArrayData);
+	PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+	return PVRSRV_OK;
+}
+
+/* Callback function for locking the system physical page addresses.
+ * This function must be called before the lookup address func. */
+static PVRSRV_ERROR
+PMRLockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PVRSRV_ERROR eError;
+	PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+
+	if (psOSPageArrayData->bOnDemand)
+	{
+		/* Allocate Memory for deferred allocation */
+		eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumOSPages);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	eError = PVRSRV_OK;
+	return eError;
+}
+
+static PVRSRV_ERROR
+PMRUnlockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+	/* Just drops the refcount. */
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+
+	if (psOSPageArrayData->bOnDemand)
+	{
+		/* Free Memory for deferred allocation */
+		eError = _FreeOSPages(psOSPageArrayData,
+							  NULL,
+							  0);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	PVR_ASSERT (eError == PVRSRV_OK);
+	return eError;
+}
+
+/* N.B. It is assumed that PMRLockSysPhysAddressesOSMem() is called _before_ this function! */
+static PVRSRV_ERROR
+PMRSysPhysAddrOSMem(PMR_IMPL_PRIVDATA pvPriv,
+					IMG_UINT32 ui32Log2PageSize,
+					IMG_UINT32 ui32NumOfPages,
+					IMG_DEVMEM_OFFSET_T *puiOffset,
+					IMG_BOOL *pbValid,
+					IMG_DEV_PHYADDR *psDevPAddr)
+{
+	const PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+	IMG_UINT32 uiPageSize = 1U << psOSPageArrayData->uiLog2AllocPageSize;
+	IMG_UINT32 uiInPageOffset;
+	IMG_UINT32 uiPageIndex;
+	IMG_UINT32 uiIdx;
+
+	if (psOSPageArrayData->uiLog2AllocPageSize < ui32Log2PageSize)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Requested physical addresses from PMR "
+		         "for incompatible contiguity %u!",
+		         __FUNCTION__,
+		         ui32Log2PageSize));
+		return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+	}
+
+	for (uiIdx=0; uiIdx < ui32NumOfPages; uiIdx++)
+	{
+		if (pbValid[uiIdx])
+		{
+			uiPageIndex = puiOffset[uiIdx] >> psOSPageArrayData->uiLog2AllocPageSize;
+			uiInPageOffset = puiOffset[uiIdx] - ((IMG_DEVMEM_OFFSET_T)uiPageIndex << psOSPageArrayData->uiLog2AllocPageSize);
+
+			PVR_ASSERT(uiPageIndex < psOSPageArrayData->uiTotalNumOSPages);
+			PVR_ASSERT(uiInPageOffset < uiPageSize);
+
+			psDevPAddr[uiIdx].uiAddr = page_to_phys(psOSPageArrayData->pagearray[uiPageIndex]);
+			psDevPAddr[uiIdx].uiAddr += uiInPageOffset;
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+typedef struct _PMR_OSPAGEARRAY_KERNMAP_DATA_ {
+	void *pvBase;
+	IMG_UINT32 ui32PageCount;
+} PMR_OSPAGEARRAY_KERNMAP_DATA;
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv,
+								 size_t uiOffset,
+								 size_t uiSize,
+								 void **ppvKernelAddressOut,
+								 IMG_HANDLE *phHandleOut,
+								 PMR_FLAGS_T ulFlags)
+{
+	PVRSRV_ERROR eError;
+	PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+	void *pvAddress;
+	pgprot_t prot = PAGE_KERNEL;
+	IMG_UINT32 ui32PageOffset=0;
+	size_t uiMapOffset=0;
+	IMG_UINT32 ui32PageCount=0;
+	IMG_UINT32 uiLog2AllocPageSize = psOSPageArrayData->uiLog2AllocPageSize;
+	IMG_UINT32 uiOSPageShift = OSGetPageShift();
+	IMG_UINT32 uiMapPageCount = 0;
+	IMG_INT32 uiPageSizeDiff = 0;
+	struct page **pagearray;
+	PMR_OSPAGEARRAY_KERNMAP_DATA *psData;
+
+	/* For cases device page size greater than the OS page size,
+	 * multiple physically contiguous OS pages constitute one device page.
+	 * However only the first page address of such an ensemble is stored
+	 * as part of the mapping table. Hence when mapping the PMR in part /full,
+	 * all pages that constitute the device page must also be mapped to kernel.
+	 *
+	 * For the case where device page size less than OS page size,
+	 * treat it the same way as the page sizes are equal */
+	if(uiLog2AllocPageSize > uiOSPageShift)
+	{
+		uiPageSizeDiff = uiLog2AllocPageSize - uiOSPageShift;
+	}
+	/*
+		Zero offset and size as a special meaning which means map in the
+		whole of the PMR, this is due to fact that the places that call
+		this callback might not have access to be able to determine the
+		physical size
+	*/
+	if ((uiOffset == 0) && (uiSize == 0))
+	{
+		ui32PageOffset = 0;
+		uiMapOffset = 0;
+		ui32PageCount = psOSPageArrayData->iNumOSPagesAllocated;
+	}
+	else
+	{
+		size_t uiEndoffset;
+
+		ui32PageOffset = uiOffset >> uiLog2AllocPageSize;
+		uiMapOffset = uiOffset - (ui32PageOffset << uiLog2AllocPageSize);
+		uiEndoffset = uiOffset + uiSize - 1;
+		/* Add one as we want the count, not the offset */
+		ui32PageCount = (uiEndoffset >> uiLog2AllocPageSize) + 1;
+		ui32PageCount -= ui32PageOffset;
+	}
+
+	/* The page count to be mapped might be different if the
+	 * OS page size is lesser than the device page size */
+	uiMapPageCount = ui32PageCount << uiPageSizeDiff;
+
+	switch (PVRSRV_CPU_CACHE_MODE(psOSPageArrayData->ui32CPUCacheFlags))
+	{
+		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+				prot = pgprot_noncached(prot);
+				break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+				prot = pgprot_writecombine(prot);
+				break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+				break;
+
+		default:
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				goto e0;
+	}
+
+	if(uiPageSizeDiff)
+	{
+		/* Map all the individual OS page that is part of a single device page. */
+		IMG_UINT32 ui32Temp = 0;
+		struct page **psPage = &psOSPageArrayData->pagearray[ui32PageOffset];
+
+		/* Allocate enough memory for the page pointers for this mapping */
+		pagearray = OSAllocMem(uiMapPageCount * sizeof(pagearray[0]));
+
+		if (pagearray == NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+
+		/* construct array that holds the page pointers that constitute the requested
+		 * mapping */
+		while(ui32Temp < ui32PageCount)
+		{
+			IMG_UINT32 ui32SubPage = 0, ui32SubPageCount = (1 << uiPageSizeDiff);
+
+			/* Fill in the page pointers for the sub pages that constitute a single
+			 * device page */
+			for(;ui32SubPage < ui32SubPageCount; ui32SubPage++)
+			{
+				pagearray[ui32Temp + ui32SubPage] = psPage[ui32Temp]+ui32SubPage;
+			}
+			ui32Temp++;
+		}
+	}else
+	{
+		pagearray = &psOSPageArrayData->pagearray[ui32PageOffset];
+	}
+
+	psData = OSAllocMem(sizeof(*psData));
+	if (psData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e1;
+	}
+
+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
+
+	pvAddress = vmap(pagearray,
+						uiMapPageCount,
+					 VM_READ | VM_WRITE,
+					 prot);
+#else
+
+	pvAddress = vm_map_ram(pagearray,
+							uiMapPageCount,
+						   -1,
+						   prot);
+#endif
+	if (pvAddress == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e2;
+	}
+
+	*ppvKernelAddressOut = pvAddress + uiMapOffset;
+	psData->pvBase = pvAddress;
+	psData->ui32PageCount = uiMapPageCount;
+	*phHandleOut = psData;
+
+	if(uiPageSizeDiff)
+	{
+		OSFreeMem(pagearray);
+	}
+
+	return PVRSRV_OK;
+
+	/*
+	  error exit paths follow
+	*/
+ e2:
+	OSFreeMem(psData);
+ e1:
+ 	 if(uiPageSizeDiff)
+ 	 {
+ 		 OSFreeMem(pagearray);
+ 	 }
+ e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static void PMRReleaseKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv,
+											 IMG_HANDLE hHandle)
+{
+    PMR_OSPAGEARRAY_KERNMAP_DATA *psData = hHandle;
+	PVR_UNREFERENCED_PARAMETER(pvPriv);
+
+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
+	vunmap(psData->pvBase);
+#else
+	vm_unmap_ram(psData->pvBase, psData->ui32PageCount);
+#endif
+	OSFreeMem(psData);
+}
+
+static
+PVRSRV_ERROR PMRUnpinOSMem(PMR_IMPL_PRIVDATA pPriv)
+{
+	PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* Lock down the pool and add the array to the unpin list */
+	_PagePoolLock();
+
+	/* Sanity check */
+	PVR_ASSERT(psOSPageArrayData->bUnpinned == IMG_FALSE);
+	PVR_ASSERT(psOSPageArrayData->bOnDemand == IMG_FALSE);
+
+	eError = _AddUnpinListEntryUnlocked(psOSPageArrayData);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Not able to add allocation to unpinned list (%d).",
+		         __FUNCTION__,
+		         eError));
+
+		goto e_exit;
+	}
+
+	psOSPageArrayData->bUnpinned = IMG_TRUE;
+
+e_exit:
+	_PagePoolUnlock();
+	return eError;
+}
+
+static
+PVRSRV_ERROR PMRPinOSMem(PMR_IMPL_PRIVDATA pPriv,
+						PMR_MAPPING_TABLE *psMappingTable)
+{
+	PVRSRV_ERROR eError;
+	PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv;
+	IMG_UINT32  *pui32MapTable = NULL;
+	IMG_UINT32 i,j=0, ui32Temp=0;
+
+	_PagePoolLock();
+
+	/* Sanity check */
+	PVR_ASSERT(psOSPageArrayData->bUnpinned == IMG_TRUE);
+
+	psOSPageArrayData->bUnpinned = IMG_FALSE;
+
+	/* If there are still pages in the array remove entries from the pool */
+	if (psOSPageArrayData->iNumOSPagesAllocated != 0)
+	{
+		_RemoveUnpinListEntryUnlocked(psOSPageArrayData);
+		_PagePoolUnlock();
+
+		eError = PVRSRV_OK;
+		goto e_exit_mapalloc_failure;
+	}
+	_PagePoolUnlock();
+
+	/* If pages were reclaimed we allocate new ones and
+	 * return PVRSRV_ERROR_PMR_NEW_MEMORY  */
+	if (psMappingTable->ui32NumVirtChunks == 1)
+	{
+		eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumOSPages);
+	}
+	else
+	{
+		pui32MapTable = (IMG_UINT32 *)OSAllocMem(sizeof(*pui32MapTable) * psMappingTable->ui32NumPhysChunks);
+		if(NULL == pui32MapTable)
+		{
+			eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Not able to Alloc Map Table.",
+					 __FUNCTION__));
+			goto e_exit_mapalloc_failure;
+		}
+
+		for (i = 0,j=0; i < psMappingTable->ui32NumVirtChunks; i++)
+		{
+			ui32Temp = psMappingTable->aui32Translation[i];
+			if (TRANSLATION_INVALID != ui32Temp)
+			{
+				pui32MapTable[j++] = ui32Temp;
+			}
+		}
+		eError = _AllocOSPages(psOSPageArrayData, pui32MapTable, psMappingTable->ui32NumPhysChunks);
+	}
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Not able to get new pages for unpinned allocation.",
+				 __FUNCTION__));
+
+		eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+		goto e_exit;
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE,
+			 "%s: Allocating new pages for unpinned allocation. "
+			 "Old content is lost!",
+			 __FUNCTION__));
+
+	eError = PVRSRV_ERROR_PMR_NEW_MEMORY;
+
+e_exit:
+	OSFreeMem(pui32MapTable);
+e_exit_mapalloc_failure:
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       PMRChangeSparseMemOSMem
+@Description    This function Changes the sparse mapping by allocating & freeing
+				of pages. It does also change the GPU and CPU maps accordingly
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv,
+						const PMR *psPMR,
+						IMG_UINT32 ui32AllocPageCount,
+						IMG_UINT32 *pai32AllocIndices,
+						IMG_UINT32 ui32FreePageCount,
+						IMG_UINT32 *pai32FreeIndices,
+						IMG_UINT32 uiFlags)
+{
+	PVRSRV_ERROR eError;
+
+	PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappigTable(psPMR);
+	PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv;
+	struct page **psPageArray = psPMRPageArrayData->pagearray;
+	void **psDMAVirtArray = psPMRPageArrayData->dmavirtarray;
+	dma_addr_t *psDMAPhysArray = psPMRPageArrayData->dmaphysarray;
+
+	struct page *psPage;
+	dma_addr_t psDMAPAddr;
+	void *pvDMAVAddr;
+
+	IMG_UINT32 ui32AdtnlAllocPages = 0; /*<! Number of pages to alloc from the OS */
+	IMG_UINT32 ui32AdtnlFreePages = 0; /*<! Number of pages to free back to the OS */
+	IMG_UINT32 ui32CommonRequestCount = 0; /*<! Number of pages to move position in the page array */
+	IMG_UINT32 ui32Loop = 0;
+	IMG_UINT32 ui32Index = 0;
+	IMG_UINT32 uiAllocpgidx;
+	IMG_UINT32 uiFreepgidx;
+	IMG_UINT32 uiOrder = psPMRPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+	IMG_BOOL bCMA = psPMRPageArrayData->bIsCMA;
+
+
+	/* Check SPARSE flags and calculate pages to allocate and free */
+	if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH))
+	{
+		ui32CommonRequestCount = (ui32AllocPageCount > ui32FreePageCount) ?
+				ui32FreePageCount : ui32AllocPageCount;
+
+		PDUMP_PANIC(SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported");
+	}
+
+	if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC))
+	{
+		ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequestCount;
+	}
+	else
+	{
+		ui32AllocPageCount = 0;
+	}
+
+	if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE))
+	{
+		ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequestCount;
+	}
+	else
+	{
+		ui32FreePageCount = 0;
+	}
+
+	if (0 == (ui32CommonRequestCount || ui32AdtnlAllocPages || ui32AdtnlFreePages))
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Missing parameters for number of pages to alloc/free",
+		         __FUNCTION__));
+		return eError;
+	}
+
+	/* The incoming request is classified into two operations independent of
+	 * each other: alloc & free pages.
+	 * These operations can be combined with two mapping operations as well
+	 * which are GPU & CPU space mappings.
+	 *
+	 * From the alloc and free page requests, the net amount of pages to be
+	 * allocated or freed is computed. Pages that were requested to be freed
+	 * will be reused to fulfil alloc requests.
+	 *
+	 * The order of operations is:
+	 * 1. Allocate new pages from the OS
+	 * 2. Move the free pages from free request to alloc positions.
+	 * 3. Free the rest of the pages not used for alloc
+	 *
+	 * Alloc parameters are validated at the time of allocation
+	 * and any error will be handled then. */
+
+	/* Validate the free indices */
+	if (ui32FreePageCount)
+	{
+		if (NULL != pai32FreeIndices){
+
+			for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++)
+			{
+				uiFreepgidx = pai32FreeIndices[ui32Loop];
+
+				if (uiFreepgidx > (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder))
+				{
+					eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+					goto e0;
+				}
+
+				if (INVALID_PAGE_ADDR == psPageArray[uiFreepgidx])
+				{
+					eError = PVRSRV_ERROR_INVALID_PARAMS;
+					PVR_DPF((PVR_DBG_ERROR,
+					         "%s: Trying to free non-allocated page",
+					         __FUNCTION__));
+					goto e0;
+				}
+			}
+		}
+		else
+		{
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: Given non-zero free count but missing indices array",
+			         __FUNCTION__));
+			return eError;
+		}
+	}
+
+	/* Validate the alloc indices */
+	for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++)
+	{
+		uiAllocpgidx = pai32AllocIndices[ui32Loop];
+
+		if (uiAllocpgidx > (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder))
+		{
+			eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+			goto e0;
+		}
+
+		if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+		{
+			if ((INVALID_PAGE_ADDR != psPageArray[uiAllocpgidx]) ||
+			    (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx]))
+			{
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				PVR_DPF((PVR_DBG_ERROR,
+				         "%s: Trying to allocate already allocated page again",
+				         __FUNCTION__));
+				goto e0;
+			}
+		}
+		else
+		{
+			if ((INVALID_PAGE_ADDR == psPageArray[uiAllocpgidx]) ||
+			    (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx]) )
+			{
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				PVR_DPF((PVR_DBG_ERROR,
+				         "%s: Unable to remap memory due to missing page",
+				         __FUNCTION__));
+				goto e0;
+			}
+		}
+	}
+
+	ui32Loop = 0;
+
+	/* Allocate new pages from the OS */
+	if (0 != ui32AdtnlAllocPages)
+	{
+			eError = _AllocOSPages(psPMRPageArrayData, pai32AllocIndices, ui32AdtnlAllocPages);
+			if (PVRSRV_OK != eError)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE,
+				         "%s: New Addtl Allocation of pages failed",
+				         __FUNCTION__));
+				goto e0;
+			}
+
+			psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages;
+			/*Mark the corresponding pages of translation table as valid */
+			for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++)
+			{
+				psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop];
+			}
+	}
+
+
+	ui32Index = ui32Loop;
+
+	/* Move the corresponding free pages to alloc request */
+	for (ui32Loop = 0; ui32Loop < ui32CommonRequestCount; ui32Loop++, ui32Index++)
+	{
+		uiAllocpgidx = pai32AllocIndices[ui32Index];
+		uiFreepgidx =  pai32FreeIndices[ui32Loop];
+
+		psPage = psPageArray[uiAllocpgidx];
+		psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx];
+
+		if (bCMA)
+		{
+			pvDMAVAddr = psDMAVirtArray[uiAllocpgidx];
+			psDMAPAddr = psDMAPhysArray[uiAllocpgidx];
+			psDMAVirtArray[uiAllocpgidx] = psDMAVirtArray[uiFreepgidx];
+			psDMAPhysArray[uiAllocpgidx] = psDMAPhysArray[uiFreepgidx];
+		}
+
+		/* Is remap mem used in real world scenario? Should it be turned to a
+		 *  debug feature? The condition check needs to be out of loop, will be
+		 *  done at later point though after some analysis */
+		if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+		{
+			psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID;
+			psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+			psPageArray[uiFreepgidx] = (struct page *)INVALID_PAGE_ADDR;
+			if (bCMA)
+			{
+				psDMAVirtArray[uiFreepgidx] = NULL;
+				psDMAPhysArray[uiFreepgidx] = (dma_addr_t)0;
+			}
+		}
+		else
+		{
+			psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx;
+			psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+			psPageArray[uiFreepgidx] = psPage;
+			if (bCMA)
+			{
+				psDMAVirtArray[uiFreepgidx] = pvDMAVAddr;
+				psDMAPhysArray[uiFreepgidx] = psDMAPAddr;
+			}
+		}
+	}
+
+	/* Free the additional free pages */
+	if (0 != ui32AdtnlFreePages)
+	{
+		eError = _FreeOSPages(psPMRPageArrayData,
+		                      &pai32FreeIndices[ui32Loop],
+		                      ui32AdtnlFreePages);
+		if (eError != PVRSRV_OK)
+		{
+			goto e0;
+		}
+		psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages;
+		while (ui32Loop < ui32FreePageCount)
+		{
+			psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Loop]] = TRANSLATION_INVALID;
+			ui32Loop++;
+		}
+	}
+
+	eError = PVRSRV_OK;
+
+e0:
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       PMRChangeSparseMemCPUMapOSMem
+@Description    This function Changes CPU maps accordingly
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static
+PVRSRV_ERROR PMRChangeSparseMemCPUMapOSMem(PMR_IMPL_PRIVDATA pPriv,
+                                           const PMR *psPMR,
+                                           IMG_UINT64 sCpuVAddrBase,
+                                           IMG_UINT32 ui32AllocPageCount,
+                                           IMG_UINT32 *pai32AllocIndices,
+                                           IMG_UINT32 ui32FreePageCount,
+                                           IMG_UINT32 *pai32FreeIndices)
+{
+	struct page **psPageArray;
+	PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv;
+	IMG_CPU_PHYADDR sCPUPAddr;
+
+	sCPUPAddr.uiAddr = 0;
+	psPageArray = psPMRPageArrayData->pagearray;
+
+	return OSChangeSparseMemCPUAddrMap((void **)psPageArray,
+	                                   sCpuVAddrBase,
+	                                   sCPUPAddr,
+	                                   ui32AllocPageCount,
+	                                   pai32AllocIndices,
+	                                   ui32FreePageCount,
+	                                   pai32FreeIndices,
+	                                   IMG_FALSE);
+}
+
+static PMR_IMPL_FUNCTAB _sPMROSPFuncTab = {
+    .pfnLockPhysAddresses = &PMRLockSysPhysAddressesOSMem,
+    .pfnUnlockPhysAddresses = &PMRUnlockSysPhysAddressesOSMem,
+    .pfnDevPhysAddr = &PMRSysPhysAddrOSMem,
+    .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataOSMem,
+    .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataOSMem,
+    .pfnReadBytes = NULL,
+    .pfnWriteBytes = NULL,
+    .pfnUnpinMem = &PMRUnpinOSMem,
+    .pfnPinMem = &PMRPinOSMem,
+    .pfnChangeSparseMem = &PMRChangeSparseMemOSMem,
+    .pfnChangeSparseMemCPUMap = &PMRChangeSparseMemCPUMapOSMem,
+    .pfnFinalize = &PMRFinalizeOSMem,
+};
+
+PVRSRV_ERROR
+PhysmemNewOSRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+						 IMG_DEVMEM_SIZE_T uiSize,
+						 IMG_DEVMEM_SIZE_T uiChunkSize,
+						 IMG_UINT32 ui32NumPhysChunks,
+						 IMG_UINT32 ui32NumVirtChunks,
+						 IMG_UINT32 *puiAllocIndices,
+						 IMG_UINT32 uiLog2AllocPageSize,
+						 PVRSRV_MEMALLOCFLAGS_T uiFlags,
+						 const IMG_CHAR *pszAnnotation,
+						 PMR **ppsPMRPtr)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_ERROR eError2;
+	PMR *psPMR;
+	struct _PMR_OSPAGEARRAY_DATA_ *psPrivData;
+	PMR_FLAGS_T uiPMRFlags;
+	PHYS_HEAP *psPhysHeap;
+	IMG_UINT32 ui32CPUCacheFlags;
+	IMG_BOOL bZero;
+	IMG_BOOL bIsCMA;
+	IMG_BOOL bPoisonOnAlloc;
+	IMG_BOOL bPoisonOnFree;
+	IMG_BOOL bOnDemand;
+	IMG_BOOL bCpuLocal;
+	IMG_BOOL bFwLocal;
+
+	/*
+	 * The host driver (but not guest) can still use this factory for firmware
+	 * allocations
+	 */
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) && PVRSRV_CHECK_FW_LOCAL(uiFlags))
+	{
+		PVR_ASSERT(0);
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto errorOnParam;
+	}
+
+	/* Select correct caching mode */
+	eError = DevmemCPUCacheMode(psDevNode, uiFlags, &ui32CPUCacheFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto errorOnParam;
+	}
+
+	if (PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags))
+	{
+		ui32CPUCacheFlags |= PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN;
+	}
+
+	/*
+	 * Use CMA framework if order is greater than OS page size; please note
+	 * that OSMMapPMRGeneric() has the same expectation as well.
+	 */
+	bIsCMA = uiLog2AllocPageSize > PAGE_SHIFT ? IMG_TRUE : IMG_FALSE;
+	bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bCpuLocal = PVRSRV_CHECK_CPU_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bFwLocal = PVRSRV_CHECK_FW_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
+	bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags) ? IMG_TRUE : IMG_FALSE;
+
+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES)
+	/* Overwrite flags and always zero pages that could go back to UM */
+	bZero = IMG_TRUE;
+	bPoisonOnAlloc = IMG_FALSE;
+#endif
+
+	/* Physical allocation alignment is generally not supported except under
+	   very restrictive conditions, also there is a maximum alignment value
+	   which must not exceed the largest device page-size. If these are not
+	   met then fail the aligned-requested allocation */
+	if (bIsCMA)
+	{
+		IMG_UINT32 uiAlign = 1 << uiLog2AllocPageSize;
+		if (uiAlign > uiSize || uiAlign > (1 << PVR_MAX_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ))
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Invalid PA alignment: size 0x%llx, align 0x%x",
+					__FUNCTION__, uiSize, uiAlign));
+			eError = PVRSRV_ERROR_INVALID_ALIGNMENT;
+			goto errorOnParam;
+		}
+		PVR_ASSERT(uiLog2AllocPageSize > PVR_MIN_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ);
+	}
+
+	/* Create Array structure that hold the physical pages */
+	eError = _AllocOSPageArray(psDevNode,
+							   uiChunkSize,
+							   ui32NumPhysChunks,
+							   ui32NumVirtChunks,
+							   uiLog2AllocPageSize,
+							   bZero,
+							   bIsCMA,
+							   bPoisonOnAlloc,
+							   bPoisonOnFree,
+							   bOnDemand,
+							   ui32CPUCacheFlags,
+							   &psPrivData);
+	if (eError != PVRSRV_OK)
+	{
+		goto errorOnAllocPageArray;
+	}
+
+	if (!bOnDemand)
+	{
+		/* Do we fill the whole page array or just parts (sparse)? */
+		if (ui32NumPhysChunks == ui32NumVirtChunks)
+		{
+			/* Allocate the physical pages */
+			eError = _AllocOSPages(psPrivData,
+			                       NULL,
+			                       psPrivData->uiTotalNumOSPages >>
+									   (uiLog2AllocPageSize - PAGE_SHIFT) );
+		}
+		else
+		{
+			if (ui32NumPhysChunks != 0)
+			{
+				/* Calculate the number of pages we want to allocate */
+				IMG_UINT32 uiPagesToAlloc =
+					(IMG_UINT32) ((((ui32NumPhysChunks * uiChunkSize) - 1) >> uiLog2AllocPageSize) + 1);
+
+				/* Make sure calculation is correct */
+				PVR_ASSERT(((PMR_SIZE_T) uiPagesToAlloc << uiLog2AllocPageSize) ==
+						   (ui32NumPhysChunks * uiChunkSize) );
+
+				/* Allocate the physical pages */
+				eError = _AllocOSPages(psPrivData, puiAllocIndices,
+									   uiPagesToAlloc);
+			}
+		}
+
+		if (eError != PVRSRV_OK)
+		{
+			goto errorOnAllocPages;
+		}
+	}
+
+	/*
+	 * In this instance, we simply pass flags straight through.
+	 *
+	 * Generically, uiFlags can include things that control the PMR factory, but
+	 * we don't need any such thing (at the time of writing!), and our caller
+	 * specifies all PMR flags so we don't need to meddle with what was given to
+	 * us.
+	 */
+	uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+
+	/*
+	 * Check no significant bits were lost in cast due to different bit widths
+	 * for flags
+	 */
+	PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+	if (bOnDemand)
+	{
+		PDUMPCOMMENT("Deferred Allocation PMR (UMA)");
+	}
+
+	if (bFwLocal)
+	{
+		PDUMPCOMMENT("FW_LOCAL allocation requested");
+		psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+	}
+	else if (bCpuLocal)
+	{
+		PDUMPCOMMENT("CPU_LOCAL allocation requested");
+		psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL];
+	}
+	else
+	{
+		psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+	}
+
+	eError = PMRCreatePMR(psDevNode,
+						  psPhysHeap,
+						  uiSize,
+						  uiChunkSize,
+						  ui32NumPhysChunks,
+						  ui32NumVirtChunks,
+						  puiAllocIndices,
+						  uiLog2AllocPageSize,
+						  uiPMRFlags,
+						  pszAnnotation,
+						  &_sPMROSPFuncTab,
+						  psPrivData,
+						  PMR_TYPE_OSMEM,
+						  &psPMR,
+						  PDUMP_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto errorOnCreate;
+	}
+
+	*ppsPMRPtr = psPMR;
+
+	return PVRSRV_OK;
+
+errorOnCreate:
+	if (!bOnDemand)
+	{
+		eError2 = _FreeOSPages(psPrivData, NULL, 0);
+		PVR_ASSERT(eError2 == PVRSRV_OK);
+	}
+
+errorOnAllocPages:
+	eError2 = _FreeOSPagesArray(psPrivData);
+	PVR_ASSERT(eError2 == PVRSRV_OK);
+
+errorOnAllocPageArray:
+errorOnParam:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/physmem_osmem_linux.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/physmem_osmem_linux.h
new file mode 100644
index 0000000..3fac82d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/physmem_osmem_linux.h
@@ -0,0 +1,49 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux OS physmem implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PHYSMEM_OSMEM_LINUX_H__
+#define __PHYSMEM_OSMEM_LINUX_H__
+
+void LinuxInitPhysmem(void);
+void LinuxDeinitPhysmem(void);
+
+#endif /* __PHYSMEM_OSMEM_LINUX_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pmr_os.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pmr_os.c
new file mode 100644
index 0000000..812ce9e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pmr_os.c
@@ -0,0 +1,571 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux OS PMR functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <asm/io.h>
+#include <asm/page.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#if defined(CONFIG_L4)
+#include <asm/api-l4env/api.h>
+#endif
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+#include <linux/pfn_t.h>
+#include <linux/pfn.h>
+#endif
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "allocmem.h"
+#include "devicemem_server_utils.h"
+#include "pmr.h"
+#include "pmr_os.h"
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#include "kernel_compatibility.h"
+
+/*
+ * x86_32:
+ * Use vm_insert_page because remap_pfn_range has issues when mapping HIGHMEM
+ * pages with default memory attributes; these HIGHMEM pages are skipped in
+ * set_pages_array_[uc,wc] during allocation; see reserve_pfn_range().
+ * Also vm_insert_page is faster.
+ *
+ * x86_64:
+ * Use vm_insert_page because it is faster.
+ *
+ * Other platforms:
+ * Use remap_pfn_range by default because it does not issue a cache flush.
+ * It is known that ARM32 benefits from this. When other platforms become
+ * available it has to be investigated if this assumption holds for them as well.
+ *
+ * Since vm_insert_page does more precise memory accounting we have the build
+ * flag PVR_MMAP_USE_VM_INSERT that forces its use. This is useful as a debug
+ * feature.
+ *
+ */
+#if defined(CONFIG_X86) || defined(PVR_MMAP_USE_VM_INSERT)
+#define PMR_OS_USE_VM_INSERT_PAGE 1
+#endif
+
+static void MMapPMROpen(struct vm_area_struct *ps_vma)
+{
+	PMR *psPMR = ps_vma->vm_private_data;
+
+	/* Our VM flags should ensure this function never gets called */
+	PVR_DPF((PVR_DBG_WARNING,
+			 "%s: Unexpected mmap open call, this is probably an application bug.",
+			 __func__));
+	PVR_DPF((PVR_DBG_WARNING,
+			 "%s: vma struct: 0x%p, vAddr: %#lX, length: %#lX, PMR pointer: 0x%p",
+			 __func__,
+			 ps_vma,
+			 ps_vma->vm_start,
+			 ps_vma->vm_end - ps_vma->vm_start,
+			 psPMR));
+
+	/* In case we get called anyway let's do things right by increasing the refcount and
+	 * locking down the physical addresses. */
+	PMRRefPMR(psPMR);
+
+	if (PMRLockSysPhysAddresses(psPMR) != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Could not lock down physical addresses, aborting.", __func__));
+		PMRUnrefPMR(psPMR);
+	}
+}
+
+static void MMapPMRClose(struct vm_area_struct *ps_vma)
+{
+	PMR *psPMR = ps_vma->vm_private_data;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if	defined(PVRSRV_ENABLE_MEMORY_STATS)
+	{
+		uintptr_t vAddr = ps_vma->vm_start;
+
+		while (vAddr < ps_vma->vm_end)
+		{
+			/* USER MAPPING */
+			PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT64)vAddr);
+			vAddr += PAGE_SIZE;
+		}
+	}
+#else
+	PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, ps_vma->vm_end - ps_vma->vm_start);
+#endif
+#endif
+
+	PMRUnlockSysPhysAddresses(psPMR);
+	PMRUnrefPMR(psPMR);
+}
+
+/*
+ * This vma operation is used to read data from mmap regions. It is called
+ * by access_process_vm, which is called to handle PTRACE_PEEKDATA ptrace
+ * requests and reads from /proc/<pid>/mem.
+ */
+static int MMapVAccess(struct vm_area_struct *ps_vma, unsigned long addr,
+		       void *buf, int len, int write)
+{
+	PMR *psPMR = ps_vma->vm_private_data;
+	unsigned long ulOffset = addr - ps_vma->vm_start;
+	size_t uiBytesCopied;
+	PVRSRV_ERROR eError;
+	int iRetVal = -EINVAL;
+
+	if (write)
+	{
+		eError = PMR_WriteBytes(psPMR,
+					(IMG_DEVMEM_OFFSET_T) ulOffset,
+					buf,
+					len,
+					&uiBytesCopied);
+	}
+	else
+	{
+		eError = PMR_ReadBytes(psPMR,
+				       (IMG_DEVMEM_OFFSET_T) ulOffset,
+				       buf,
+				       len,
+				       &uiBytesCopied);
+	}
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Error from %s (%d)",
+			 __func__,
+			 write ? "PMR_WriteBytes" : "PMR_ReadBytes",
+			 eError));
+	}
+	else
+	{
+		iRetVal = uiBytesCopied;
+	}
+
+	return iRetVal;
+}
+
+static const struct vm_operations_struct gsMMapOps =
+{
+	.open = &MMapPMROpen,
+	.close = &MMapPMRClose,
+	.access = MMapVAccess,
+};
+
+static INLINE int _OSMMapPMR(PVRSRV_DEVICE_NODE *psDevNode,
+							struct vm_area_struct *ps_vma,
+							IMG_DEVMEM_OFFSET_T uiOffset,
+							IMG_CPU_PHYADDR *psCpuPAddr,
+							IMG_UINT32 uiLog2PageSize,
+							IMG_BOOL bUseVMInsertPage,
+							IMG_BOOL bUseMixedMap)
+{
+	IMG_INT32 iStatus;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+	pfn_t sPFN;
+#else
+	unsigned long uiPFN;
+#endif
+
+#if defined(CONFIG_L4)
+	IMG_CPU_VIRTADDR pvCpuVAddr;
+
+	/* Use L4LINUX function, removes per-arch code-path */
+	pvCpuVAddr = l4x_phys_to_virt(psCpuPAddr->uiAddr);
+	if (pvCpuVAddr == NULL)
+	{
+		return -1;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+	sPFN = phys_to_pfn_t((uintptr_t)pvCpuVAddr, 0);
+#else
+	uiPFN = ((uintptr_t) pvCpuVAddr) >> PAGE_SHIFT;
+#endif
+#else /* defined(CONFIG_L4) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+	sPFN = phys_to_pfn_t(psCpuPAddr->uiAddr, 0);
+#else
+	uiPFN = psCpuPAddr->uiAddr >> PAGE_SHIFT;
+	PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr->uiAddr);
+#endif
+#endif
+
+	/*
+	 * vm_insert_page() allows insertion of individual pages into user
+	 * VMA space _only_ if page is a order-zero allocated page
+	 */
+	if (bUseVMInsertPage)
+	{
+		if (bUseMixedMap)
+		{
+			/*
+			 * This path is just for debugging. It should be
+			 * equivalent to the remap_pfn_range() path.
+			 */
+			iStatus = vm_insert_mixed(ps_vma,
+									  ps_vma->vm_start + uiOffset,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+									  sPFN);
+#else
+									  uiPFN);
+#endif
+		}
+		else
+		{
+			/* Since kernel 3.7 this sets VM_MIXEDMAP internally */
+			iStatus = vm_insert_page(ps_vma,
+									 ps_vma->vm_start + uiOffset,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+									 pfn_t_to_page(sPFN));
+#else
+									 pfn_to_page(uiPFN));
+#endif
+		}
+	}
+	else
+	{
+		/*
+		   NOTE: Regarding absence of dma_mmap_coherent() in _OSMMapPMR()
+
+		   The current services mmap model maps in a PMR's full-length size
+		   into the user VMA & applies any user specified offset to the kernel
+		   returned zero-offset based VA in services client; this essentially
+		   means services server ignores ps_vma->vm_pgoff (this houses hPMR)
+		   during a mmap call.
+
+		   Furthermore, during a DMA/CMA memory allocation, multiple order-n
+		   pages are used to satisfy an allocation request due to DMA/CMA
+		   framework rounding-up allocation size to next power-of-two which
+		   can lead to wasted memory (so we don't allocate using single call).
+
+		   The combination of the above two issues mean that we cannot use the
+		   dma_mmap_coherent() for a number of reasons outlined below:
+
+		     - Services mmap semantics does not fit with dma_mmap_coherent()
+		       which requires proper ps_vma->vm_pgoff; seeing this houses a
+		       hPMR handle value, calls into dma_mmap_coherent() fails. This
+		       could be avoided by forcing ps_vma->vm_pgoff to zero but the
+		       ps_vma->vm_pgoff is applied to DMA bus address PFN and not
+		       user VMA which is always mapped at ps_vma->vm_start.
+
+		     - As multiple order-n pages are used for DMA/CMA allocations, a
+		       single dma_mmap_coherent() call with a vma->vm_pgoff set to
+		       zero cannot (maybe) be used because there is no guarantee that
+		       all of the multiple order-n pages in the PMR are physically
+		       contiguous from the first entry to the last. Whilst this is
+		       highly likely to be the case, there is no guarantee that it
+		       will be so we cannot depend on this being the case.
+
+		   The solution is to manually mmap DMA/CMA pages into user VMA
+		   using remap_pfn_range() directly. Furthermore, accounting is
+		   always compromised for DMA/CMA allocations.
+		*/
+		size_t uiNumContiguousBytes = 1ULL << uiLog2PageSize;
+
+		iStatus = remap_pfn_range(ps_vma,
+								  ps_vma->vm_start + uiOffset,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+								  pfn_t_to_pfn(sPFN),
+#else
+								  uiPFN,
+#endif
+								  uiNumContiguousBytes,
+								  ps_vma->vm_page_prot);
+	}
+
+	return iStatus;
+}
+
+PVRSRV_ERROR
+OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData)
+{
+	struct vm_area_struct *ps_vma = pOSMMapData;
+	PVRSRV_DEVICE_NODE *psDevNode = PMR_DeviceNode(psPMR);
+	PVRSRV_ERROR eError;
+	size_t uiLength;
+	IMG_INT32 iStatus;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_UINT32 ui32CPUCacheFlags;
+	pgprot_t sPageProt;
+	IMG_CPU_PHYADDR asCpuPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_UINT32 uiOffsetIdx;
+	IMG_UINT32 uiNumOfPFNs;
+	IMG_UINT32 uiLog2PageSize;
+	IMG_CPU_PHYADDR *psCpuPAddr;
+	IMG_BOOL *pbValid;
+	IMG_BOOL bUseMixedMap = IMG_FALSE;
+	IMG_BOOL bUseVMInsertPage = IMG_FALSE;
+
+	eError = PMRLockSysPhysAddresses(psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
+		((ps_vma->vm_flags & VM_SHARED) == 0))
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e1;
+	}
+
+	sPageProt = vm_get_page_prot(ps_vma->vm_flags);
+
+	eError = DevmemCPUCacheMode(psDevNode,
+	                            PMR_Flags(psPMR),
+	                            &ui32CPUCacheFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	switch (ui32CPUCacheFlags)
+	{
+		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+				sPageProt = pgprot_noncached(sPageProt);
+				break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+				sPageProt = pgprot_writecombine(sPageProt);
+				break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+		{
+/* Do not set to write-combine for plato */
+#if !defined(PLATO_MEMORY_CONFIG)
+				PHYS_HEAP *psPhysHeap = PMR_PhysHeap(psPMR);
+
+				if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA)
+					sPageProt = pgprot_writecombine(sPageProt);
+#endif
+				break;
+		}
+
+		default:
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				goto e1;
+	}
+	ps_vma->vm_page_prot = sPageProt;
+
+	ps_vma->vm_flags |= VM_IO;
+
+	/* Don't include the mapping in core dumps */
+	ps_vma->vm_flags |= VM_DONTDUMP;
+
+	/*
+	 * Disable mremap because our nopage handler assumes all
+	 * page requests have already been validated.
+	 */
+	ps_vma->vm_flags |= VM_DONTEXPAND;
+
+	/* Don't allow mapping to be inherited across a process fork */
+	ps_vma->vm_flags |= VM_DONTCOPY;
+
+	uiLength = ps_vma->vm_end - ps_vma->vm_start;
+
+	/* Is this mmap targeting non order-zero pages or does it use pfn mappings?
+	 * If yes, don't use vm_insert_page */
+	uiLog2PageSize = PMR_GetLog2Contiguity(psPMR);
+#if defined(PMR_OS_USE_VM_INSERT_PAGE)
+	bUseVMInsertPage = (uiLog2PageSize == PAGE_SHIFT) && (PMR_GetType(psPMR) != PMR_TYPE_EXTMEM);
+#endif
+
+	/* Can we use stack allocations */
+	uiNumOfPFNs = uiLength >> uiLog2PageSize;
+	if (uiNumOfPFNs > PMR_MAX_TRANSLATION_STACK_ALLOC)
+	{
+		psCpuPAddr = OSAllocMem(uiNumOfPFNs * sizeof(*psCpuPAddr));
+		if (psCpuPAddr == NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e1;
+		}
+
+		/* Should allocation fail, clean-up here before exiting */
+		pbValid = OSAllocMem(uiNumOfPFNs * sizeof(*pbValid));
+		if (pbValid == NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			OSFreeMem(psCpuPAddr);
+			goto e1;
+		}
+	}
+	else
+	{
+		psCpuPAddr = asCpuPAddr;
+		pbValid = abValid;
+	}
+
+	/* Obtain map range pfns */
+	eError = PMR_CpuPhysAddr(psPMR,
+				 uiLog2PageSize,
+				 uiNumOfPFNs,
+				 0,
+				 psCpuPAddr,
+				 pbValid);
+	if (eError != PVRSRV_OK)
+	{
+		goto e3;
+	}
+
+	/*
+	 * Scan the map range for pfns without struct page* handling. If
+	 * we find one, this is a mixed map, and we can't use vm_insert_page()
+	 * NOTE: vm_insert_page() allows insertion of individual pages into user
+	 * VMA space _only_ if said page is an order-zero allocated page.
+	 */
+	if (bUseVMInsertPage)
+	{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+		pfn_t sPFN;
+#else
+		unsigned long uiPFN;
+#endif
+
+		for (uiOffsetIdx = 0; uiOffsetIdx < uiNumOfPFNs; ++uiOffsetIdx)
+		{
+			if (pbValid[uiOffsetIdx])
+			{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+				sPFN = phys_to_pfn_t(psCpuPAddr[uiOffsetIdx].uiAddr, 0);
+
+				if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0)
+#else
+				uiPFN = psCpuPAddr[uiOffsetIdx].uiAddr >> PAGE_SHIFT;
+				PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr[uiOffsetIdx].uiAddr);
+
+				if (!pfn_valid(uiPFN) || page_count(pfn_to_page(uiPFN)) == 0)
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+				{
+					bUseMixedMap = IMG_TRUE;
+					break;
+				}
+			}
+		}
+
+		if (bUseMixedMap)
+		{
+			ps_vma->vm_flags |= VM_MIXEDMAP;
+		}
+	}
+	else
+	{
+		ps_vma->vm_flags |= VM_PFNMAP;
+	}
+
+	/* For each PMR page-size contiguous bytes, map page(s) into user VMA */
+	for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<uiLog2PageSize)
+	{
+		uiOffsetIdx = uiOffset >> uiLog2PageSize;
+		/*
+		 * Only map in pages that are valid, any that aren't will be
+		 * picked up by the nopage handler which will return a zeroed
+		 * page for us.
+		 */
+		if (pbValid[uiOffsetIdx])
+		{
+			iStatus = _OSMMapPMR(psDevNode,
+								 ps_vma,
+								 uiOffset,
+								 &psCpuPAddr[uiOffsetIdx],
+								 uiLog2PageSize,
+								 bUseVMInsertPage,
+								 bUseMixedMap);
+			if (iStatus)
+			{
+				/* Failure error code doesn't get propagated */
+				eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED;
+				PVR_ASSERT(0);
+				goto e3;
+			}
+		}
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS)
+		PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,
+									(void*)(uintptr_t)(ps_vma->vm_start + uiOffset),
+									psCpuPAddr[uiOffsetIdx],
+									1<<uiLog2PageSize,
+									NULL);
+#endif
+	}
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, uiNumOfPFNs * PAGE_SIZE);
+#endif
+
+	if (psCpuPAddr != asCpuPAddr)
+	{
+		OSFreeMem(psCpuPAddr);
+		OSFreeMem(pbValid);
+	}
+
+	/* let us see the PMR so we can unlock it later */
+	ps_vma->vm_private_data = psPMR;
+
+	/* Install open and close handlers for ref-counting */
+	ps_vma->vm_ops = &gsMMapOps;
+
+	/*
+	 * Take a reference on the PMR so that it can't be freed while mapped
+	 * into the user process.
+	 */
+	PMRRefPMR(psPMR);
+
+	return PVRSRV_OK;
+
+	/* Error exit paths follow */
+ e3:
+	if (psCpuPAddr != asCpuPAddr)
+	{
+		OSFreeMem(psCpuPAddr);
+		OSFreeMem(pbValid);
+	}
+ e1:
+	PMRUnlockSysPhysAddresses(psPMR);
+ e0:
+	return eError;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/private_data.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/private_data.h
new file mode 100644
index 0000000..6d63f15
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/private_data.h
@@ -0,0 +1,53 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux private data structure
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__INCLUDED_PRIVATE_DATA_H_)
+#define __INCLUDED_PRIVATE_DATA_H_
+
+#include <linux/fs.h>
+
+#include "connection_server.h"
+
+CONNECTION_DATA *LinuxConnectionFromFile(struct file *pFile);
+struct file *LinuxFileFromConnection(CONNECTION_DATA *psConnection);
+
+#endif /* !defined(__INCLUDED_PRIVATE_DATA_H_) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_bridge_k.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_bridge_k.c
new file mode 100644
index 0000000..428eff4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_bridge_k.c
@@ -0,0 +1,1043 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Bridge Module (kernel side)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Receives calls from the user portion of services and
+                despatches them to functions in the kernel portion.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/mm_types.h>
+
+#include "img_defs.h"
+#include "pvr_bridge.h"
+#include "connection_server.h"
+#include "syscommon.h"
+#include "pvr_debug.h"
+#include "pvr_debugfs.h"
+#include "private_data.h"
+#include "linkage.h"
+#include "pmr.h"
+#include "rgx_bvnc_defs_km.h"
+
+#include <drm/drmP.h>
+#include "pvr_drm.h"
+#include "pvr_drv.h"
+
+#include "env_connection.h"
+#include <linux/sched.h>
+
+/* RGX: */
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+
+#include "srvcore.h"
+#include "common_srvcore_bridge.h"
+
+#if defined(SUPPORT_DRM_EXT)
+#define CAST_BRIDGE_CMD_PTR_TO_PTR(ptr) (ptr)
+#else
+#define CAST_BRIDGE_CMD_PTR_TO_PTR(ptr) (void *)(uintptr_t)(ptr)
+#endif
+
+#if defined(MODULE_TEST)
+/************************************************************************/
+// additional includes for services testing
+/************************************************************************/
+#include "pvr_test_bridge.h"
+#include "kern_test.h"
+/************************************************************************/
+// end of additional includes
+/************************************************************************/
+#endif
+
+/* WARNING!
+ * The mmap code has its own mutex, to prevent a possible deadlock,
+ * when using gPVRSRVLock.
+ * The Linux kernel takes the mm->mmap_sem before calling the mmap
+ * entry points (PVRMMap, MMapVOpen, MMapVClose), but the ioctl
+ * entry point may take mm->mmap_sem during fault handling, or 
+ * before calling get_user_pages.  If gPVRSRVLock was used in the
+ * mmap entry points, a deadlock could result, due to the ioctl
+ * and mmap code taking the two locks in different orders.
+ * As a corollary to this, the mmap entry points must not call
+ * any driver code that relies on gPVRSRVLock is held.
+ */
+static DEFINE_MUTEX(g_sMMapMutex);
+
+#if defined(DEBUG_BRIDGE_KM)
+static PPVR_DEBUGFS_ENTRY_DATA gpsPVRDebugFSBridgeStatsEntry = NULL;
+static struct seq_operations gsBridgeStatsReadOps;
+static ssize_t BridgeStatsWrite(const char __user *pszBuffer,
+								size_t uiCount,
+								loff_t *puiPosition,
+								void *pvData);
+#endif
+
+/* These will go when full bridge gen comes in */
+#if defined(PDUMP)
+PVRSRV_ERROR InitPDUMPCTRLBridge(void);
+PVRSRV_ERROR DeinitPDUMPCTRLBridge(void);
+PVRSRV_ERROR InitPDUMPBridge(void);
+PVRSRV_ERROR DeinitPDUMPBridge(void);
+PVRSRV_ERROR InitRGXPDUMPBridge(void);
+PVRSRV_ERROR DeinitRGXPDUMPBridge(void);
+#endif
+#if defined(SUPPORT_DISPLAY_CLASS)
+PVRSRV_ERROR InitDCBridge(void);
+PVRSRV_ERROR DeinitDCBridge(void);
+#endif
+PVRSRV_ERROR InitMMBridge(void);
+PVRSRV_ERROR DeinitMMBridge(void);
+#if !defined(EXCLUDE_CMM_BRIDGE)
+PVRSRV_ERROR InitCMMBridge(void);
+PVRSRV_ERROR DeinitCMMBridge(void);
+#endif
+PVRSRV_ERROR InitPDUMPMMBridge(void);
+PVRSRV_ERROR DeinitPDUMPMMBridge(void);
+PVRSRV_ERROR InitSRVCOREBridge(void);
+PVRSRV_ERROR DeinitSRVCOREBridge(void);
+PVRSRV_ERROR InitSYNCBridge(void);
+PVRSRV_ERROR DeinitSYNCBridge(void);
+
+#if defined(SUPPORT_SERVER_SYNC)
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR InitSYNCEXPORTBridge(void);
+PVRSRV_ERROR DeinitSYNCEXPORTBridge(void);
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR InitSYNCSEXPORTBridge(void);
+PVRSRV_ERROR DeinitSYNCSEXPORTBridge(void);
+#endif
+#endif /* defined(SUPPORT_SERVER_SYNC) */
+
+#if defined (SUPPORT_RGX)
+PVRSRV_ERROR InitRGXTA3DBridge(void);
+PVRSRV_ERROR DeinitRGXTA3DBridge(void);
+PVRSRV_ERROR InitRGXTQBridge(void);
+PVRSRV_ERROR DeinitRGXTQBridge(void);
+PVRSRV_ERROR InitRGXTQ2Bridge(void);
+PVRSRV_ERROR DeinitRGXTQ2Bridge(void);
+PVRSRV_ERROR InitRGXCMPBridge(void);
+PVRSRV_ERROR DeinitRGXCMPBridge(void);
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+PVRSRV_ERROR InitBREAKPOINTBridge(void);
+PVRSRV_ERROR DeinitBREAKPOINTBridge(void);
+#endif
+PVRSRV_ERROR InitDEBUGMISCBridge(void);
+PVRSRV_ERROR DeinitDEBUGMISCBridge(void);
+PVRSRV_ERROR InitRGXHWPERFBridge(void);
+PVRSRV_ERROR DeinitRGXHWPERFBridge(void);
+PVRSRV_ERROR InitRGXRAYBridge(void);
+PVRSRV_ERROR DeinitRGXRAYBridge(void);
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+PVRSRV_ERROR InitREGCONFIGBridge(void);
+PVRSRV_ERROR DeinitREGCONFIGBridge(void);
+#endif
+PVRSRV_ERROR InitTIMERQUERYBridge(void);
+PVRSRV_ERROR DeinitTIMERQUERYBridge(void);
+PVRSRV_ERROR InitRGXKICKSYNCBridge(void);
+PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void);
+PVRSRV_ERROR InitRGXSIGNALSBridge(void);
+PVRSRV_ERROR DeinitRGXSIGNALSBridge(void);
+#endif /* SUPPORT_RGX */
+PVRSRV_ERROR InitCACHEBridge(void);
+PVRSRV_ERROR DeinitCACHEBridge(void);
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR InitSMMBridge(void);
+PVRSRV_ERROR DeinitSMMBridge(void);
+#endif
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+PVRSRV_ERROR InitHTBUFFERBridge(void);
+PVRSRV_ERROR DeinitHTBUFFERBridge(void);
+#endif
+PVRSRV_ERROR InitPVRTLBridge(void);
+PVRSRV_ERROR DeinitPVRTLBridge(void);
+#if defined(PVR_RI_DEBUG)
+PVRSRV_ERROR InitRIBridge(void);
+PVRSRV_ERROR DeinitRIBridge(void);
+#endif
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void);
+PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void);
+#endif
+PVRSRV_ERROR InitDMABUFBridge(void);
+PVRSRV_ERROR DeinitDMABUFBridge(void);
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+PVRSRV_ERROR InitVALIDATIONBridge(void);
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+PVRSRV_ERROR InitTUTILSBridge(void);
+PVRSRV_ERROR DeinitTUTILSBridge(void);
+#endif
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+PVRSRV_ERROR InitSYNCTRACKINGBridge(void);
+PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void);
+#endif
+#if defined(SUPPORT_WRAP_EXTMEM)
+PVRSRV_ERROR InitMMEXTMEMBridge(void);
+PVRSRV_ERROR DeinitMMEXTMEMBridge(void);
+#endif
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+PVRSRV_ERROR InitSYNCFALLBACKBridge(void);
+PVRSRV_ERROR DeinitSYNCFALLBACKBridge(void);
+#endif
+
+PVRSRV_ERROR
+DeviceDepBridgeInit(IMG_UINT64 ui64Features)
+{
+	PVRSRV_ERROR eError;
+
+	if(ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK)
+	{
+		eError = InitRGXCMPBridge();
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+
+	if(ui64Features & RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK)
+	{
+		eError = InitRGXSIGNALSBridge();
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	if(ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+	{
+		eError = InitRGXRAYBridge();
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	if(ui64Features & RGX_FEATURE_FASTRENDER_DM_BIT_MASK)
+	{
+		eError = InitRGXTQ2Bridge();
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+DeviceDepBridgeDeInit(IMG_UINT64 ui64Features)
+{
+	PVRSRV_ERROR eError;
+
+	if(ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK)
+	{
+		eError = DeinitRGXCMPBridge();
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+
+	if(ui64Features & RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK)
+	{
+		eError = DeinitRGXSIGNALSBridge();
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	if(ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK)
+	{
+		eError = DeinitRGXRAYBridge();
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	if(ui64Features & RGX_FEATURE_FASTRENDER_DM_BIT_MASK)
+	{
+		eError = DeinitRGXTQ2Bridge();
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+
+
+PVRSRV_ERROR
+LinuxBridgeInit(void)
+{
+	PVRSRV_ERROR eError;
+#if defined(DEBUG_BRIDGE_KM)
+	IMG_INT iResult;
+
+	iResult = PVRDebugFSCreateEntry("bridge_stats",
+					NULL,
+					&gsBridgeStatsReadOps,
+					BridgeStatsWrite,
+					NULL,
+					NULL,
+					&g_BridgeDispatchTable[0],
+					&gpsPVRDebugFSBridgeStatsEntry);
+	if (iResult != 0)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+#endif
+
+	BridgeDispatchTableStartOffsetsInit();
+
+	eError = InitSRVCOREBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = InitSYNCBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#if defined(SUPPORT_SERVER_SYNC)
+#if defined(SUPPORT_INSECURE_EXPORT)
+	eError = InitSYNCEXPORTBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+	eError = InitSYNCSEXPORTBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+#endif /* defined(SUPPORT_SERVER_SYNC) */
+
+#if defined(PDUMP)
+	eError = InitPDUMPCTRLBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	eError = InitMMBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#if !defined(EXCLUDE_CMM_BRIDGE)
+	eError = InitCMMBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+#if defined(PDUMP)
+	eError = InitPDUMPMMBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+	eError = InitPDUMPBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	eError = InitDMABUFBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+	eError = InitDCBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	eError = InitCACHEBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#if defined(SUPPORT_SECURE_EXPORT)
+	eError = InitSMMBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+	eError = InitHTBUFFERBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	eError = InitPVRTLBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	#if defined(PVR_RI_DEBUG)
+	eError = InitRIBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+	#endif
+
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+	eError = InitVALIDATIONBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+	eError = InitTUTILSBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	eError = InitDEVICEMEMHISTORYBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+	eError = InitSYNCTRACKINGBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	#if defined (SUPPORT_RGX)
+
+	eError = InitRGXTQBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = InitRGXTA3DBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+	eError = InitBREAKPOINTBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	eError = InitDEBUGMISCBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#if defined(PDUMP)
+	eError = InitRGXPDUMPBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	eError = InitRGXHWPERFBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+	eError = InitREGCONFIGBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	eError = InitTIMERQUERYBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = InitRGXKICKSYNCBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#endif /* SUPPORT_RGX */
+
+#if defined(SUPPORT_WRAP_EXTMEM)
+	eError = InitMMEXTMEMBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	eError = InitSYNCFALLBACKBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	return eError;
+}
+
+PVRSRV_ERROR
+LinuxBridgeDeInit(void)
+{
+	PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+	eError = DeinitSYNCFALLBACKBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+#if defined(SUPPORT_WRAP_EXTMEM)
+	eError = DeinitMMEXTMEMBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+#if defined(DEBUG_BRIDGE_KM)
+	if (gpsPVRDebugFSBridgeStatsEntry != NULL)
+	{
+		PVRDebugFSRemoveEntry(&gpsPVRDebugFSBridgeStatsEntry);
+	}
+#endif
+
+	eError = DeinitSRVCOREBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = DeinitSYNCBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#if defined(SUPPORT_SERVER_SYNC)
+#if defined(SUPPORT_INSECURE_EXPORT)
+	eError = DeinitSYNCEXPORTBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+	eError = DeinitSYNCSEXPORTBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+#endif /* defined(SUPPORT_SERVER_SYNC) */
+
+#if defined(PDUMP)
+	eError = DeinitPDUMPCTRLBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	eError = DeinitMMBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#if !defined(EXCLUDE_CMM_BRIDGE)
+	eError = DeinitCMMBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+#if defined(PDUMP)
+	eError = DeinitPDUMPMMBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+	eError = DeinitPDUMPBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	eError = DeinitDMABUFBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#if defined(PVR_TESTING_UTILS)
+	eError = DeinitTUTILSBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+	eError = DeinitDCBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	eError = DeinitCACHEBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#if defined(SUPPORT_SECURE_EXPORT)
+	eError = DeinitSMMBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+	eError = DeinitHTBUFFERBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	eError = DeinitPVRTLBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	#if defined(PVR_RI_DEBUG)
+	eError = DeinitRIBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+	#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	eError = DeinitDEVICEMEMHISTORYBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+	eError = DeinitSYNCTRACKINGBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	#if defined (SUPPORT_RGX)
+
+	eError = DeinitRGXTQBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+
+	eError = DeinitRGXTA3DBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+	eError = DeinitBREAKPOINTBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	eError = DeinitDEBUGMISCBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#if defined(PDUMP)
+	eError = DeinitRGXPDUMPBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	eError = DeinitRGXHWPERFBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+	eError = DeinitREGCONFIGBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	eError = DeinitTIMERQUERYBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = DeinitRGXKICKSYNCBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#endif /* SUPPORT_RGX */
+
+	return eError;
+}
+
+#if defined(DEBUG_BRIDGE_KM)
+static void *BridgeStatsSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+	PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = (PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *)psSeqFile->private;
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSAcquireBridgeLock();
+#endif
+
+	if (psDispatchTable == NULL || (*puiPosition) > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
+	{
+		return NULL;
+	}
+
+	if ((*puiPosition) == 0) 
+	{
+		return SEQ_START_TOKEN;
+	}
+
+	return &(psDispatchTable[(*puiPosition) - 1]);
+}
+
+static void BridgeStatsSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSReleaseBridgeLock();
+#endif
+}
+
+static void *BridgeStatsSeqNext(struct seq_file *psSeqFile,
+			       void *pvData,
+			       loff_t *puiPosition)
+{
+	PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = (PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *)psSeqFile->private;
+	loff_t uiItemAskedFor = *puiPosition; /* puiPosition on entry is the index to return */
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	/* Is the item asked for (starts at 0) a valid table index? */
+	if (uiItemAskedFor < BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
+	{
+		(*puiPosition)++; /* on exit it is the next seq index to ask for */
+		return &(psDispatchTable[uiItemAskedFor]);
+	}
+
+	/* Now passed the end of the table to indicate stop */
+	return NULL;
+}
+
+static int BridgeStatsSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	if (pvData == SEQ_START_TOKEN)
+	{
+		seq_printf(psSeqFile,
+			   "Total ioctl call count = %u\n"
+			   "Total number of bytes copied via copy_from_user = %u\n"
+			   "Total number of bytes copied via copy_to_user = %u\n"
+			   "Total number of bytes copied via copy_*_user = %u\n\n"
+			   "%3s: %-60s | %-48s | %10s | %20s | %20s | %20s | %20s \n",
+			   g_BridgeGlobalStats.ui32IOCTLCount,
+			   g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
+			   g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
+			   g_BridgeGlobalStats.ui32TotalCopyFromUserBytes + g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
+			   "#",
+			   "Bridge Name",
+			   "Wrapper Function",
+			   "Call Count",
+			   "copy_from_user (B)",
+			   "copy_to_user (B)",
+			   "Total Time (us)",
+			   "Max Time (us)");
+	}
+	else if (pvData != NULL)
+	{
+		PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry = (	PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *)pvData;
+		IMG_UINT32 ui32Remainder;
+
+		seq_printf(psSeqFile,
+			   "%3d: %-60s   %-48s   %-10u   %-20u   %-20u   %-20llu   %-20llu\n",
+			   (IMG_UINT32)(((size_t)psEntry-(size_t)g_BridgeDispatchTable)/sizeof(*g_BridgeDispatchTable)),
+			   psEntry->pszIOCName,
+			   (psEntry->pfFunction != NULL) ? psEntry->pszFunctionName : "(null)",
+			   psEntry->ui32CallCount,
+			   psEntry->ui32CopyFromUserTotalBytes,
+			   psEntry->ui32CopyToUserTotalBytes,
+			   (unsigned long long) OSDivide64r64(psEntry->ui64TotalTimeNS, 1000, &ui32Remainder),
+			   (unsigned long long) OSDivide64r64(psEntry->ui64MaxTimeNS, 1000, &ui32Remainder));
+	}
+
+	return 0;
+}
+
+static struct seq_operations gsBridgeStatsReadOps =
+{
+	.start = BridgeStatsSeqStart,
+	.stop = BridgeStatsSeqStop,
+	.next = BridgeStatsSeqNext,
+	.show = BridgeStatsSeqShow,
+};
+
+static ssize_t BridgeStatsWrite(const char __user *pszBuffer,
+								size_t uiCount,
+								loff_t *puiPosition,
+								void *pvData)
+{
+	IMG_UINT32 i;
+	/* We only care if a '0' is written to the file, if so we reset results. */
+	char buf[1];
+	ssize_t iResult = simple_write_to_buffer(&buf[0], sizeof(buf), puiPosition, pszBuffer, uiCount);
+
+	if (iResult < 0)
+	{
+		return iResult;
+	}
+
+	if (iResult == 0 || buf[0] != '0')
+	{
+		return -EINVAL;
+	}
+
+	/* Reset stats. */
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSAcquireBridgeLock();
+#endif
+
+	g_BridgeGlobalStats.ui32IOCTLCount = 0;
+	g_BridgeGlobalStats.ui32TotalCopyFromUserBytes = 0;
+	g_BridgeGlobalStats.ui32TotalCopyToUserBytes = 0;
+
+	for (i = 0; i < IMG_ARR_NUM_ELEMS(g_BridgeDispatchTable); i++)
+	{
+		g_BridgeDispatchTable[i].ui32CallCount = 0;
+		g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0;
+		g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0;
+		g_BridgeDispatchTable[i].ui64TotalTimeNS = 0;
+		g_BridgeDispatchTable[i].ui64MaxTimeNS = 0;
+	}
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+	OSReleaseBridgeLock();
+#endif
+
+	return uiCount;
+}
+
+#endif /* defined(DEBUG_BRIDGE_KM) */
+
+int
+PVRSRV_BridgeDispatchKM(struct drm_device __maybe_unused *dev, void *arg, struct drm_file *pDRMFile)
+{
+	struct drm_pvr_srvkm_cmd *psSrvkmCmd = (struct drm_pvr_srvkm_cmd *) arg;
+	PVRSRV_BRIDGE_PACKAGE sBridgePackageKM = { 0 };
+	CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pDRMFile->filp);
+	PVRSRV_ERROR error;
+
+	if(psConnection == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Connection is closed", __FUNCTION__));
+		return -EFAULT;
+	}
+
+	PVR_ASSERT(psSrvkmCmd != NULL);
+
+	DRM_DEBUG("tgid=%d, tgid_connection=%d, bridge_id=%d, func_id=%d",
+			  task_tgid_nr(current),
+			  ((ENV_CONNECTION_DATA *)PVRSRVConnectionPrivateData(psConnection))->owner,
+			  psSrvkmCmd->bridge_id,
+			  psSrvkmCmd->bridge_func_id);
+
+	sBridgePackageKM.ui32BridgeID = psSrvkmCmd->bridge_id;
+	sBridgePackageKM.ui32FunctionID = psSrvkmCmd->bridge_func_id;
+	sBridgePackageKM.ui32Size = sizeof(sBridgePackageKM);
+	sBridgePackageKM.pvParamIn = CAST_BRIDGE_CMD_PTR_TO_PTR(psSrvkmCmd->in_data_ptr);
+	sBridgePackageKM.ui32InBufferSize = psSrvkmCmd->in_data_size;
+	sBridgePackageKM.pvParamOut = CAST_BRIDGE_CMD_PTR_TO_PTR(psSrvkmCmd->out_data_ptr);
+	sBridgePackageKM.ui32OutBufferSize = psSrvkmCmd->out_data_size;
+
+	error =  BridgedDispatchKM(psConnection, &sBridgePackageKM);
+	return OSPVRSRVToNativeError(error);
+}
+
+int
+PVRSRV_MMap(struct file *pFile, struct vm_area_struct *ps_vma)
+{
+	CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile);
+	IMG_HANDLE hSecurePMRHandle = (IMG_HANDLE)((uintptr_t)ps_vma->vm_pgoff);
+	PMR *psPMR;
+	PVRSRV_ERROR eError;
+
+	if(psConnection == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Invalid connection data"));
+		return -ENOENT;
+	}
+
+	/*
+	 * The bridge lock used here to protect PVRSRVLookupHandle is replaced
+	 * by a specific lock considering that the handle functions have now
+	 * their own lock. This change was necessary to solve the lockdep issues
+	 * related with the PVRSRV_MMap.
+	 */
+	mutex_lock(&g_sMMapMutex);
+
+	eError = PVRSRVLookupHandle(psConnection->psHandleBase,
+								(void **)&psPMR,
+								hSecurePMRHandle,
+								PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+								IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	/* Note: PMRMMapPMR will take a reference on the PMR.
+	 * Unref the handle immediately, because we have now done
+	 * the required operation on the PMR (whether it succeeded or not)
+	 */
+	eError = PMRMMapPMR(psPMR, ps_vma);
+	PVRSRVReleaseHandle(psConnection->psHandleBase, hSecurePMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: PMRMMapPMR failed (%s)",
+				__func__, PVRSRVGetErrorStringKM(eError)));
+		goto e0;
+	}
+
+	mutex_unlock(&g_sMMapMutex);
+
+	return 0;
+
+e0:
+	mutex_unlock(&g_sMMapMutex);
+
+	PVR_DPF((PVR_DBG_ERROR, "Unable to translate error %d", eError));
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return -ENOENT; // -EAGAIN // or what?
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_debug.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_debug.c
new file mode 100644
index 0000000..b727d86
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_debug.c
@@ -0,0 +1,1319 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debug Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Provides kernel side Debug Functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/hardirq.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <stdarg.h>
+
+#include "allocmem.h"
+#include "pvrversion.h"
+#include "img_types.h"
+#include "servicesext.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "pvr_debugfs.h"
+#include "linkage.h"
+#include "pvr_uaccess.h"
+#include "pvrsrv.h"
+#include "rgxdevice.h"
+#include "rgxdebug.h"
+#include "rgxinit.h"
+#include "lists.h"
+#include "osfunc.h"
+
+/* Handle used by DebugFS to get GPU utilisation stats */
+static IMG_HANDLE ghGpuUtilUserDebugFS = NULL;
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+/******** BUFFERED LOG MESSAGES ********/
+
+/* Because we don't want to have to handle CCB wrapping, each buffered
+ * message is rounded up to PVRSRV_DEBUG_CCB_MESG_MAX bytes. This means
+ * there is the same fixed number of messages that can be stored,
+ * regardless of message length.
+ */
+
+#if defined(PVRSRV_DEBUG_CCB_MAX)
+
+#define PVRSRV_DEBUG_CCB_MESG_MAX	PVR_MAX_DEBUG_MESSAGE_LEN
+
+#include <linux/syscalls.h>
+#include <linux/time.h>
+
+typedef struct
+{
+	const IMG_CHAR *pszFile;
+	IMG_INT iLine;
+	IMG_UINT32 ui32TID;
+	IMG_UINT32 ui32PID;
+	IMG_CHAR pcMesg[PVRSRV_DEBUG_CCB_MESG_MAX];
+	struct timeval sTimeVal;
+}
+PVRSRV_DEBUG_CCB;
+
+static PVRSRV_DEBUG_CCB gsDebugCCB[PVRSRV_DEBUG_CCB_MAX] = { { 0 } };
+
+static IMG_UINT giOffset = 0;
+
+static DEFINE_MUTEX(gsDebugCCBMutex);
+
+static void
+AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line,
+			   const IMG_CHAR *szBuffer)
+{
+	mutex_lock(&gsDebugCCBMutex);
+
+	gsDebugCCB[giOffset].pszFile = pszFileName;
+	gsDebugCCB[giOffset].iLine   = ui32Line;
+	gsDebugCCB[giOffset].ui32TID = current->pid;
+	gsDebugCCB[giOffset].ui32PID = current->tgid;
+
+	do_gettimeofday(&gsDebugCCB[giOffset].sTimeVal);
+
+	strncpy(gsDebugCCB[giOffset].pcMesg, szBuffer, PVRSRV_DEBUG_CCB_MESG_MAX - 1);
+	gsDebugCCB[giOffset].pcMesg[PVRSRV_DEBUG_CCB_MESG_MAX - 1] = 0;
+
+	giOffset = (giOffset + 1) % PVRSRV_DEBUG_CCB_MAX;
+
+	mutex_unlock(&gsDebugCCBMutex);
+}
+
+IMG_EXPORT void PVRSRVDebugPrintfDumpCCB(void)
+{
+	int i;
+
+	mutex_lock(&gsDebugCCBMutex);
+	
+	for (i = 0; i < PVRSRV_DEBUG_CCB_MAX; i++)
+	{
+		PVRSRV_DEBUG_CCB *psDebugCCBEntry =
+			&gsDebugCCB[(giOffset + i) % PVRSRV_DEBUG_CCB_MAX];
+
+		/* Early on, we won't have PVRSRV_DEBUG_CCB_MAX messages */
+		if (!psDebugCCBEntry->pszFile)
+		{
+			continue;
+		}
+
+		printk(KERN_ERR "%s:%d: (%ld.%ld, tid=%u, pid=%u) %s\n",
+			   psDebugCCBEntry->pszFile,
+			   psDebugCCBEntry->iLine,
+			   (long)psDebugCCBEntry->sTimeVal.tv_sec,
+			   (long)psDebugCCBEntry->sTimeVal.tv_usec,
+			   psDebugCCBEntry->ui32TID,
+			   psDebugCCBEntry->ui32PID,
+			   psDebugCCBEntry->pcMesg);
+
+		/* Clear this entry so it doesn't get printed the next time again. */
+		psDebugCCBEntry->pszFile = NULL;
+	}
+
+	mutex_unlock(&gsDebugCCBMutex);
+}
+
+#else /* defined(PVRSRV_DEBUG_CCB_MAX) */
+static INLINE void
+AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line,
+			   const IMG_CHAR *szBuffer)
+{
+	(void)pszFileName;
+	(void)szBuffer;
+	(void)ui32Line;
+}
+
+IMG_EXPORT void PVRSRVDebugPrintfDumpCCB(void)
+{
+	/* Not available */
+}
+
+#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */
+
+#endif /* defined(PVRSRV_NEED_PVR_DPF) */
+
+static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz,
+						 const IMG_CHAR *pszFormat, va_list VArgs)
+						 __printf(3, 0);
+
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+#define PVR_MAX_FILEPATH_LEN 256
+
+static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz,
+						const IMG_CHAR *pszFormat, ...)
+						__printf(3, 4);
+
+/* NOTE: Must NOT be static! Used in module.c.. */
+IMG_UINT32 gPVRDebugLevel =
+	(
+	 DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING
+
+#if defined(PVRSRV_DEBUG_CCB_MAX)
+	 | DBGPRIV_BUFFERED
+#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */
+
+#if defined(PVR_DPF_ADHOC_DEBUG_ON)
+	 | DBGPRIV_DEBUG
+#endif /* defined(PVR_DPF_ADHOC_DEBUG_ON) */
+	);
+
+#endif /* defined(PVRSRV_NEED_PVR_DPF) || defined(PVRSRV_NEED_PVR_TRACE) */
+
+#define	PVR_MAX_MSG_LEN PVR_MAX_DEBUG_MESSAGE_LEN
+
+/* Message buffer for non-IRQ messages */
+static IMG_CHAR gszBufferNonIRQ[PVR_MAX_MSG_LEN + 1];
+
+/* Message buffer for IRQ messages */
+static IMG_CHAR gszBufferIRQ[PVR_MAX_MSG_LEN + 1];
+
+/* The lock is used to control access to gszBufferNonIRQ */
+static DEFINE_MUTEX(gsDebugMutexNonIRQ);
+
+/* The lock is used to control access to gszBufferIRQ */
+static DEFINE_SPINLOCK(gsDebugLockIRQ);
+
+#define	USE_SPIN_LOCK (in_interrupt() || !preemptible())
+
+static inline void GetBufferLock(unsigned long *pulLockFlags)
+{
+	if (USE_SPIN_LOCK)
+	{
+		spin_lock_irqsave(&gsDebugLockIRQ, *pulLockFlags);
+	}
+	else
+	{
+		mutex_lock(&gsDebugMutexNonIRQ);
+	}
+}
+
+static inline void ReleaseBufferLock(unsigned long ulLockFlags)
+{
+	if (USE_SPIN_LOCK)
+	{
+		spin_unlock_irqrestore(&gsDebugLockIRQ, ulLockFlags);
+	}
+	else
+	{
+		mutex_unlock(&gsDebugMutexNonIRQ);
+	}
+}
+
+static inline void SelectBuffer(IMG_CHAR **ppszBuf, IMG_UINT32 *pui32BufSiz)
+{
+	if (USE_SPIN_LOCK)
+	{
+		*ppszBuf = gszBufferIRQ;
+		*pui32BufSiz = sizeof(gszBufferIRQ);
+	}
+	else
+	{
+		*ppszBuf = gszBufferNonIRQ;
+		*pui32BufSiz = sizeof(gszBufferNonIRQ);
+	}
+}
+
+/*
+ * Append a string to a buffer using formatted conversion.
+ * The function takes a variable number of arguments, pointed
+ * to by the var args list.
+ */
+static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, va_list VArgs)
+{
+	IMG_UINT32 ui32Used;
+	IMG_UINT32 ui32Space;
+	IMG_INT32 i32Len;
+
+	ui32Used = strlen(pszBuf);
+	BUG_ON(ui32Used >= ui32BufSiz);
+	ui32Space = ui32BufSiz - ui32Used;
+
+	i32Len = vsnprintf(&pszBuf[ui32Used], ui32Space, pszFormat, VArgs);
+	pszBuf[ui32BufSiz - 1] = 0;
+
+	/* Return true if string was truncated */
+	return i32Len < 0 || i32Len >= (IMG_INT32)ui32Space;
+}
+
+/*************************************************************************/ /*!
+@Function       PVRSRVReleasePrintf
+@Description    To output an important message to the user in release builds
+@Input          pszFormat   The message format string
+@Input          ...         Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+void PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...)
+{
+	va_list vaArgs;
+	unsigned long ulLockFlags = 0;
+	IMG_CHAR *pszBuf;
+	IMG_UINT32 ui32BufSiz;
+	IMG_INT32  result;
+
+	SelectBuffer(&pszBuf, &ui32BufSiz);
+
+	va_start(vaArgs, pszFormat);
+
+	GetBufferLock(&ulLockFlags);
+
+	result = snprintf(pszBuf, (ui32BufSiz - 2), "PVR_K:  %u: ", current->pid);
+	PVR_ASSERT(result>0);
+	ui32BufSiz -= result;
+
+	if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
+	{
+		printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+	}
+	else
+	{
+		printk(KERN_ERR "%s\n", pszBuf);
+	}
+
+	ReleaseBufferLock(ulLockFlags);
+	va_end(vaArgs);
+}
+
+#if defined(PVRSRV_NEED_PVR_TRACE)
+
+/*************************************************************************/ /*!
+@Function       PVRTrace
+@Description    To output a debug message to the user
+@Input          pszFormat   The message format string
+@Input          ...         Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+void PVRSRVTrace(const IMG_CHAR *pszFormat, ...)
+{
+	va_list VArgs;
+	unsigned long ulLockFlags = 0;
+	IMG_CHAR *pszBuf;
+	IMG_UINT32 ui32BufSiz;
+	IMG_INT32  result;
+
+	SelectBuffer(&pszBuf, &ui32BufSiz);
+
+	va_start(VArgs, pszFormat);
+
+	GetBufferLock(&ulLockFlags);
+
+	result = snprintf(pszBuf, (ui32BufSiz - 2), "PVR: %u: ", current->pid);
+	PVR_ASSERT(result>0);
+	ui32BufSiz -= result;
+
+	if (VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs))
+	{
+		printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+	}
+	else
+	{
+		printk(KERN_ERR "%s\n", pszBuf);
+	}
+
+	ReleaseBufferLock(ulLockFlags);
+
+	va_end(VArgs);
+}
+
+#endif /* defined(PVRSRV_NEED_PVR_TRACE) */
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+/*
+ * Append a string to a buffer using formatted conversion.
+ * The function takes a variable number of arguments, calling
+ * VBAppend to do the actual work.
+ */
+static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, ...)
+{
+	va_list VArgs;
+	IMG_BOOL bTrunc;
+
+	va_start (VArgs, pszFormat);
+
+	bTrunc = VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs);
+
+	va_end (VArgs);
+
+	return bTrunc;
+}
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDebugPrintf
+@Description    To output a debug message to the user
+@Input          uDebugLevel The current debug level
+@Input          pszFile     The source file generating the message
+@Input          uLine       The line of the source file
+@Input          pszFormat   The message format string
+@Input          ...         Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+void PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel,
+			   const IMG_CHAR *pszFullFileName,
+			   IMG_UINT32 ui32Line,
+			   const IMG_CHAR *pszFormat,
+			   ...)
+{
+	IMG_BOOL bNoLoc;
+	const IMG_CHAR *pszFileName = pszFullFileName;
+	IMG_CHAR *pszLeafName;
+
+	bNoLoc = (IMG_BOOL)((ui32DebugLevel & DBGPRIV_CALLTRACE) |
+						(ui32DebugLevel & DBGPRIV_BUFFERED)) ? IMG_TRUE : IMG_FALSE;
+
+	if (gPVRDebugLevel & ui32DebugLevel)
+	{
+		va_list vaArgs;
+		unsigned long ulLockFlags = 0;
+		IMG_CHAR *pszBuf;
+		IMG_UINT32 ui32BufSiz;
+
+		SelectBuffer(&pszBuf, &ui32BufSiz);
+
+		va_start(vaArgs, pszFormat);
+
+		GetBufferLock(&ulLockFlags);
+
+		switch (ui32DebugLevel)
+		{
+			case DBGPRIV_FATAL:
+			{
+				strncpy(pszBuf, "PVR_K:(Fatal): ", (ui32BufSiz - 2));
+				break;
+			}
+			case DBGPRIV_ERROR:
+			{
+				strncpy(pszBuf, "PVR_K:(Error): ", (ui32BufSiz - 2));
+				break;
+			}
+			case DBGPRIV_WARNING:
+			{
+				strncpy(pszBuf, "PVR_K:(Warn):  ", (ui32BufSiz - 2));
+				break;
+			}
+			case DBGPRIV_MESSAGE:
+			{
+				strncpy(pszBuf, "PVR_K:(Mesg):  ", (ui32BufSiz - 2));
+				break;
+			}
+			case DBGPRIV_VERBOSE:
+			{
+				strncpy(pszBuf, "PVR_K:(Verb):  ", (ui32BufSiz - 2));
+				break;
+			}
+			case DBGPRIV_DEBUG:
+			{
+				strncpy(pszBuf, "PVR_K:(Debug): ", (ui32BufSiz - 2));
+				break;
+			}
+			case DBGPRIV_CALLTRACE:
+			case DBGPRIV_ALLOC:
+			case DBGPRIV_BUFFERED:
+			default:
+			{
+				strncpy(pszBuf, "PVR_K: ", (ui32BufSiz - 2));
+				break;
+			}
+		}
+		pszBuf[ui32BufSiz - 1] = '\0';
+
+		if (current->pid == task_tgid_nr(current))
+		{
+			(void) BAppend(pszBuf, ui32BufSiz, "%5u: ", current->pid);
+		}
+		else
+		{
+			(void) BAppend(pszBuf, ui32BufSiz, "%5u-%5u: ", task_tgid_nr(current) /* pid id of group*/, current->pid /* task id */);
+		}
+
+		if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
+		{
+			printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+		}
+		else
+		{
+			IMG_BOOL bTruncated = IMG_FALSE;
+
+#if !defined(__sh__)
+			pszLeafName = (IMG_CHAR *)strrchr (pszFileName, '/');
+
+			if (pszLeafName)
+			{
+				pszFileName = pszLeafName+1;
+			}
+#endif /* __sh__ */
+
+#if defined(DEBUG)
+			{
+				static const IMG_CHAR *lastFile = NULL;
+
+				if (lastFile == pszFileName)
+				{
+					bTruncated = BAppend(pszBuf, ui32BufSiz, " [%u]", ui32Line);
+				}
+				else
+				{
+					bTruncated = BAppend(pszBuf, ui32BufSiz, " [%s:%u]", pszFileName, ui32Line);
+					lastFile = pszFileName;
+				}
+			}
+#endif
+
+			if (bTruncated)
+			{
+				printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+			}
+			else
+			{
+				if (ui32DebugLevel & DBGPRIV_BUFFERED)
+				{
+					AddToBufferCCB(pszFileName, ui32Line, pszBuf);
+				}
+				else
+				{
+					printk(KERN_ERR "%s\n", pszBuf);
+				}
+			}
+		}
+
+		ReleaseBufferLock(ulLockFlags);
+
+		va_end (vaArgs);
+	}
+}
+
+#endif /* PVRSRV_NEED_PVR_DPF */
+
+
+/*************************************************************************/ /*!
+ Version DebugFS entry
+*/ /**************************************************************************/
+
+static void *_DebugVersionCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode,
+					  va_list va)
+{
+	loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+	loff_t uiPosition = va_arg(va, loff_t);
+	loff_t uiCurrentPosition = *puiCurrentPosition;
+
+	(*puiCurrentPosition)++;
+
+	return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugVersionSeqStart(struct seq_file *psSeqFile,
+				   loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	if (*puiPosition == 0)
+	{
+		return SEQ_START_TOKEN;
+	}
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+										  _DebugVersionCompare_AnyVaCb,
+										  &uiCurrentPosition,
+										  *puiPosition);
+}
+
+static void _DebugVersionSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugVersionSeqNext(struct seq_file *psSeqFile,
+				  void *pvData,
+				  loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	(*puiPosition)++;
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+										  _DebugVersionCompare_AnyVaCb,
+										  &uiCurrentPosition,
+										  *puiPosition);
+}
+
+static int _DebugVersionSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	if (pvData == SEQ_START_TOKEN)
+	{
+		if(psPVRSRVData->sDriverInfo.bIsNoMatch)
+		{
+			seq_printf(psSeqFile, "Driver UM Version: %d (%s) %s\n",
+					psPVRSRVData->sDriverInfo.sUMBuildInfo.ui32BuildRevision,
+				    (psPVRSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType)?"release":"debug",
+				    PVR_BUILD_DIR);
+			seq_printf(psSeqFile, "Driver KM Version: %d (%s) %s\n",
+								psPVRSRVData->sDriverInfo.sKMBuildInfo.ui32BuildRevision,
+							    (BUILD_TYPE_RELEASE == psPVRSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType)?"release":"debug",
+							    PVR_BUILD_DIR);
+		}else
+		{
+			seq_printf(psSeqFile, "Driver Version: %s (%s) %s\n",
+						   PVRVERSION_STRING,
+						   PVR_BUILD_TYPE, PVR_BUILD_DIR);
+		}
+	}
+	else if (pvData != NULL)
+	{
+		PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)pvData;
+
+		seq_printf(psSeqFile, "\nDevice Name: %s\n", psDevNode->psDevConfig->pszName);
+
+		if (psDevNode->psDevConfig->pszVersion)
+		{
+			seq_printf(psSeqFile, "Device Version: %s\n", psDevNode->psDevConfig->pszVersion);
+		}
+
+		if (psDevNode->pfnDeviceVersionString)
+		{
+			IMG_CHAR *pszDeviceVersionString;
+
+			if (psDevNode->pfnDeviceVersionString(psDevNode, &pszDeviceVersionString) == PVRSRV_OK)
+			{
+				seq_printf(psSeqFile, "%s\n", pszDeviceVersionString);
+
+				OSFreeMem(pszDeviceVersionString);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static struct seq_operations gsDebugVersionReadOps =
+{
+	.start = _DebugVersionSeqStart,
+	.stop = _DebugVersionSeqStop,
+	.next = _DebugVersionSeqNext,
+	.show = _DebugVersionSeqShow,
+};
+
+/*************************************************************************/ /*!
+ Status DebugFS entry
+*/ /**************************************************************************/
+
+static void *_DebugStatusCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode,
+										 va_list va)
+{
+	loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+	loff_t uiPosition = va_arg(va, loff_t);
+	loff_t uiCurrentPosition = *puiCurrentPosition;
+
+	(*puiCurrentPosition)++;
+
+	return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugStatusSeqStart(struct seq_file *psSeqFile,
+								  loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	if (*puiPosition == 0)
+	{
+		return SEQ_START_TOKEN;
+	}
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+										  _DebugStatusCompare_AnyVaCb,
+										  &uiCurrentPosition,
+										  *puiPosition);
+}
+
+static void _DebugStatusSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugStatusSeqNext(struct seq_file *psSeqFile,
+								 void *pvData,
+								 loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	(*puiPosition)++;
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+										  _DebugStatusCompare_AnyVaCb,
+										  &uiCurrentPosition,
+										  *puiPosition);
+}
+
+static int _DebugStatusSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	if (pvData == SEQ_START_TOKEN)
+	{
+		PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+
+		if (psPVRSRVData != NULL)
+		{
+			switch (psPVRSRVData->eServicesState)
+			{
+				case PVRSRV_SERVICES_STATE_OK:
+					seq_printf(psSeqFile, "Driver Status:   OK\n");
+					break;
+				case PVRSRV_SERVICES_STATE_BAD:
+					seq_printf(psSeqFile, "Driver Status:   BAD\n");
+					break;
+				default:
+					seq_printf(psSeqFile, "Driver Status:   %d\n", psPVRSRVData->eServicesState);
+					break;
+			}
+		}
+	}
+	else if (pvData != NULL)
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+		IMG_CHAR           *pszStatus = "";
+		IMG_CHAR           *pszReason = "";
+		PVRSRV_DEVICE_HEALTH_STATUS eHealthStatus;
+		PVRSRV_DEVICE_HEALTH_REASON eHealthReason;
+		
+		/* Update the health status now if possible... */
+		if (psDeviceNode->pfnUpdateHealthStatus)
+		{
+			psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, IMG_FALSE);
+		}
+		eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus);
+		eHealthReason = OSAtomicRead(&psDeviceNode->eHealthReason);
+		
+		switch (eHealthStatus)
+		{
+			case PVRSRV_DEVICE_HEALTH_STATUS_OK:  pszStatus = "OK";  break;
+			case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING:  pszStatus = "NOT RESPONDING";  break;
+			case PVRSRV_DEVICE_HEALTH_STATUS_DEAD:  pszStatus = "DEAD";  break;
+			default:  pszStatus = "UNKNOWN";  break;
+		}
+
+		switch (eHealthReason)
+		{
+			case PVRSRV_DEVICE_HEALTH_REASON_NONE:  pszReason = "";  break;
+			case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED:  pszReason = " (FW Assert)";  break;
+			case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING:  pszReason = " (Poll failure)";  break;
+			case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS:  pszReason = " (Global Event Object timeouts rising)";  break;
+			case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT:  pszReason = " (KCCB offset invalid)";  break;
+			case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED:  pszReason = " (KCCB stalled)";  break;
+			default:  pszReason = " (Unknown reason)";  break;
+		}
+
+		seq_printf(psSeqFile, "Firmware Status: %s%s\n", pszStatus, pszReason);
+
+		if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+		{
+			/*
+			 * Guest drivers do not support the following functionality:
+			 *	- Perform actual on-chip fw tracing.
+			 *	- Collect actual on-chip GPU utilization stats.
+			 *	- Perform actual on-chip GPU power/dvfs management.
+			 *	- As a result no more information can be provided.
+			 */
+			return 0;
+		}
+
+		/* Write other useful stats to aid the test cycle... */
+		if (psDeviceNode->pvDevice != NULL)
+		{
+			PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+			RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+			/* Calculate the number of HWR events in total across all the DMs... */
+			if (psRGXFWIfTraceBufCtl != NULL)
+			{
+				IMG_UINT32 ui32HWREventCount = 0;
+				IMG_UINT32 ui32CRREventCount = 0;
+				IMG_UINT32 ui32DMIndex;
+
+				for (ui32DMIndex = 0; ui32DMIndex < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; ui32DMIndex++)
+				{
+					ui32HWREventCount += psRGXFWIfTraceBufCtl->aui32HwrDmLockedUpCount[ui32DMIndex];
+					ui32CRREventCount += psRGXFWIfTraceBufCtl->aui32HwrDmOverranCount[ui32DMIndex];
+				}
+
+				seq_printf(psSeqFile, "HWR Event Count: %d\n", ui32HWREventCount);
+				seq_printf(psSeqFile, "CRR Event Count: %d\n", ui32CRREventCount);
+			}
+
+			/* Write the number of APM events... */
+			seq_printf(psSeqFile, "APM Event Count: %d\n", psDevInfo->ui32ActivePMReqTotal);
+
+			/* Write the current GPU Utilisation values... */
+			if (psDevInfo->pfnGetGpuUtilStats &&
+				eHealthStatus == PVRSRV_DEVICE_HEALTH_STATUS_OK)
+			{
+				RGXFWIF_GPU_UTIL_STATS sGpuUtilStats;
+				PVRSRV_ERROR eError = PVRSRV_OK;
+
+				eError = psDevInfo->pfnGetGpuUtilStats(psDeviceNode,
+													   ghGpuUtilUserDebugFS,
+													   &sGpuUtilStats);
+
+				if ((eError == PVRSRV_OK) &&
+					((IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative))
+				{
+					IMG_UINT64 util;
+					IMG_UINT32 rem;
+
+					util = 100 * (sGpuUtilStats.ui64GpuStatActiveHigh +
+								  sGpuUtilStats.ui64GpuStatActiveLow);
+					util = OSDivide64(util, (IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative, &rem);
+
+					seq_printf(psSeqFile, "GPU Utilisation: %u%%\n", (IMG_UINT32)util);
+				}
+				else
+				{
+					seq_printf(psSeqFile, "GPU Utilisation: -\n");
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT DebugStatusSet(const char __user *pcBuffer,
+							  size_t uiCount,
+							  loff_t *puiPosition,
+							  void *pvData)
+{
+	IMG_CHAR acDataBuffer[6];
+
+	if (puiPosition == NULL || *puiPosition != 0)
+	{
+		return -EIO;
+	}
+
+	if (uiCount > (sizeof(acDataBuffer) / sizeof(acDataBuffer[0])))
+	{
+		return -EINVAL;
+	}
+
+	if (pvr_copy_from_user(acDataBuffer, pcBuffer, uiCount))
+	{
+		return -EINVAL;
+	}
+
+	if (acDataBuffer[uiCount - 1] != '\n')
+	{
+		return -EINVAL;
+	}
+
+	if (((acDataBuffer[0] == 'k') || ((acDataBuffer[0] == 'K'))) && uiCount == 2)
+	{
+		PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+		psPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_BAD;
+	}
+	else
+	{
+		return -EINVAL;
+	}
+
+	*puiPosition += uiCount;
+	return uiCount;
+}
+
+static struct seq_operations gsDebugStatusReadOps =
+{
+	.start = _DebugStatusSeqStart,
+	.stop = _DebugStatusSeqStop,
+	.next = _DebugStatusSeqNext,
+	.show = _DebugStatusSeqShow,
+};
+
+/*************************************************************************/ /*!
+ Dump Debug DebugFS entry
+*/ /**************************************************************************/
+
+static void *_DebugDumpDebugCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, va_list va)
+{
+	loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+	loff_t uiPosition = va_arg(va, loff_t);
+	loff_t uiCurrentPosition = *puiCurrentPosition;
+
+	(*puiCurrentPosition)++;
+
+	return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugDumpDebugSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	if (*puiPosition == 0)
+	{
+		return SEQ_START_TOKEN;
+	}
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+										  _DebugDumpDebugCompare_AnyVaCb,
+										  &uiCurrentPosition,
+										  *puiPosition);
+}
+
+static void _DebugDumpDebugSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugDumpDebugSeqNext(struct seq_file *psSeqFile,
+									void *pvData,
+									loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	(*puiPosition)++;
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+										  _DebugDumpDebugCompare_AnyVaCb,
+										  &uiCurrentPosition,
+										  *puiPosition);
+}
+
+static void _DumpDebugSeqPrintf(void *pvDumpDebugFile,
+				const IMG_CHAR *pszFormat, ...)
+{
+	struct seq_file *psSeqFile = (struct seq_file *)pvDumpDebugFile;
+	IMG_CHAR  szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+	va_list  ArgList;
+
+	va_start(ArgList, pszFormat);
+	vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList);
+	va_end(ArgList);
+	seq_printf(psSeqFile, "%s\n", szBuffer);
+}
+
+static int _DebugDumpDebugSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	if (pvData != NULL  &&  pvData != SEQ_START_TOKEN)
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+
+		if (psDeviceNode->pvDevice != NULL)
+		{
+			PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX,
+						_DumpDebugSeqPrintf, psSeqFile);
+		}
+	}
+
+	return 0;
+}
+
+static struct seq_operations gsDumpDebugReadOps =
+{
+	.start = _DebugDumpDebugSeqStart,
+	.stop  = _DebugDumpDebugSeqStop,
+	.next  = _DebugDumpDebugSeqNext,
+	.show  = _DebugDumpDebugSeqShow,
+};
+/*************************************************************************/ /*!
+ Firmware Trace DebugFS entry
+*/ /**************************************************************************/
+static void *_DebugFWTraceCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, va_list va)
+{
+	loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+	loff_t uiPosition = va_arg(va, loff_t);
+	loff_t uiCurrentPosition = *puiCurrentPosition;
+
+	(*puiCurrentPosition)++;
+
+	return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugFWTraceSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	if (*puiPosition == 0)
+	{
+		return SEQ_START_TOKEN;
+	}
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+										  _DebugFWTraceCompare_AnyVaCb,
+										  &uiCurrentPosition,
+										  *puiPosition);
+}
+
+static void _DebugFWTraceSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugFWTraceSeqNext(struct seq_file *psSeqFile,
+								  void *pvData,
+								  loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	(*puiPosition)++;
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+										  _DebugFWTraceCompare_AnyVaCb,
+										  &uiCurrentPosition,
+										  *puiPosition);
+}
+
+static void _FWTraceSeqPrintf(void *pvDumpDebugFile,
+				const IMG_CHAR *pszFormat, ...)
+{
+	struct seq_file *psSeqFile = (struct seq_file *)pvDumpDebugFile;
+	IMG_CHAR  szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+	va_list  ArgList;
+
+	va_start(ArgList, pszFormat);
+	vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList);
+	va_end(ArgList);
+	seq_printf(psSeqFile, "%s\n", szBuffer);
+}
+
+static int _DebugFWTraceSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	if (pvData != NULL  &&  pvData != SEQ_START_TOKEN)
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+
+		if (psDeviceNode->pvDevice != NULL)
+		{
+			PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+			RGXDumpFirmwareTrace(_FWTraceSeqPrintf, psSeqFile, psDevInfo);
+		}
+	}
+
+	return 0;
+}
+
+static struct seq_operations gsFWTraceReadOps =
+{
+	.start = _DebugFWTraceSeqStart,
+	.stop  = _DebugFWTraceSeqStop,
+	.next  = _DebugFWTraceSeqNext,
+	.show  = _DebugFWTraceSeqShow,
+};
+
+/*************************************************************************/ /*!
+ Debug level DebugFS entry
+*/ /**************************************************************************/
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+static void *DebugLevelSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+	if (*puiPosition == 0)
+	{
+		return psSeqFile->private;
+	}
+
+	return NULL;
+}
+
+static void DebugLevelSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *DebugLevelSeqNext(struct seq_file *psSeqFile,
+							   void *pvData,
+							   loff_t *puiPosition)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+	PVR_UNREFERENCED_PARAMETER(puiPosition);
+
+	return NULL;
+}
+
+static int DebugLevelSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	if (pvData != NULL)
+	{
+		IMG_UINT32 uiDebugLevel = *((IMG_UINT32 *)pvData);
+
+		seq_printf(psSeqFile, "%u\n", uiDebugLevel);
+
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static struct seq_operations gsDebugLevelReadOps =
+{
+	.start = DebugLevelSeqStart,
+	.stop = DebugLevelSeqStop,
+	.next = DebugLevelSeqNext,
+	.show = DebugLevelSeqShow,
+};
+
+
+static IMG_INT DebugLevelSet(const char __user *pcBuffer,
+							 size_t uiCount,
+							 loff_t *puiPosition,
+							 void *pvData)
+{
+	IMG_UINT32 *uiDebugLevel = (IMG_UINT32 *)pvData;
+	IMG_CHAR acDataBuffer[6];
+
+	if (puiPosition == NULL || *puiPosition != 0)
+	{
+		return -EIO;
+	}
+
+	if (uiCount > (sizeof(acDataBuffer) / sizeof(acDataBuffer[0])))
+	{
+		return -EINVAL;
+	}
+
+	if (pvr_copy_from_user(acDataBuffer, pcBuffer, uiCount))
+	{
+		return -EINVAL;
+	}
+
+	if (acDataBuffer[uiCount - 1] != '\n')
+	{
+		return -EINVAL;
+	}
+
+	if (sscanf(acDataBuffer, "%u", &gPVRDebugLevel) == 0)
+	{
+		return -EINVAL;
+	}
+
+	/* As this is Linux the next line uses a GCC builtin function */
+	(*uiDebugLevel) &= (1 << __builtin_ffsl(DBGPRIV_LAST)) - 1;
+
+	*puiPosition += uiCount;
+	return uiCount;
+}
+#endif /* defined(DEBUG) */
+
+static PPVR_DEBUGFS_ENTRY_DATA gpsVersionDebugFSEntry;
+
+static PPVR_DEBUGFS_ENTRY_DATA gpsStatusDebugFSEntry;
+static PPVR_DEBUGFS_ENTRY_DATA gpsDumpDebugDebugFSEntry;
+
+static PPVR_DEBUGFS_ENTRY_DATA gpsFWTraceDebugFSEntry;
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+static PPVR_DEBUGFS_ENTRY_DATA gpsDebugLevelDebugFSEntry;
+#endif
+
+int PVRDebugCreateDebugFSEntries(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	int iResult;
+
+	PVR_ASSERT(psPVRSRVData != NULL);
+
+	/*
+	 * The DebugFS entries are designed to work in a single device system but
+	 * this function will be called multiple times in a multi-device system.
+	 * Return an error in this case.
+	 */
+	if (gpsVersionDebugFSEntry)
+	{
+		return -EEXIST;
+	}
+
+#if !defined(NO_HARDWARE)
+	if (RGXRegisterGpuUtilStats(&ghGpuUtilUserDebugFS) != PVRSRV_OK)
+	{
+		return -ENOMEM;
+	}
+#endif
+
+	iResult = PVRDebugFSCreateEntry("version",
+									NULL,
+									&gsDebugVersionReadOps,
+									NULL,
+									NULL,
+									NULL,
+									psPVRSRVData,
+									&gpsVersionDebugFSEntry);
+	if (iResult != 0)
+	{
+		return iResult;
+	}
+
+	iResult = PVRDebugFSCreateEntry("status",
+									NULL,
+									&gsDebugStatusReadOps,
+									(PVRSRV_ENTRY_WRITE_FUNC *)DebugStatusSet,
+									NULL,
+									NULL,
+									psPVRSRVData,
+									&gpsStatusDebugFSEntry);
+	if (iResult != 0)
+	{
+		goto ErrorRemoveVersionEntry;
+	}
+
+	iResult = PVRDebugFSCreateEntry("debug_dump",
+									NULL,
+									&gsDumpDebugReadOps,
+									NULL,
+									NULL,
+									NULL,
+									psPVRSRVData,
+									&gpsDumpDebugDebugFSEntry);
+	if (iResult != 0)
+	{
+		goto ErrorRemoveStatusEntry;
+	}
+
+	if (! PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		iResult = PVRDebugFSCreateEntry("firmware_trace",
+										NULL,
+										&gsFWTraceReadOps,
+										NULL,
+										NULL,
+										NULL,
+										psPVRSRVData,
+										&gpsFWTraceDebugFSEntry);
+		if (iResult != 0)
+		{
+			goto ErrorRemoveDumpDebugEntry;
+		}
+	}
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+	iResult = PVRDebugFSCreateEntry("debug_level",
+									NULL,
+									&gsDebugLevelReadOps,
+									(PVRSRV_ENTRY_WRITE_FUNC *)DebugLevelSet,
+									NULL,
+									NULL,
+									&gPVRDebugLevel,
+									&gpsDebugLevelDebugFSEntry);
+	if (iResult != 0)
+	{
+		goto ErrorRemoveFWTraceLogEntry;
+	}
+#endif
+
+	return 0;
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+ErrorRemoveFWTraceLogEntry:
+	PVRDebugFSRemoveEntry(&gpsFWTraceDebugFSEntry);
+#endif
+ErrorRemoveDumpDebugEntry:
+	if (gpsDumpDebugDebugFSEntry)
+	{
+		PVRDebugFSRemoveEntry(&gpsDumpDebugDebugFSEntry);
+	}
+ErrorRemoveStatusEntry:
+	PVRDebugFSRemoveEntry(&gpsStatusDebugFSEntry);
+ErrorRemoveVersionEntry:
+	PVRDebugFSRemoveEntry(&gpsVersionDebugFSEntry);
+
+	return iResult;
+}
+
+void PVRDebugRemoveDebugFSEntries(void)
+{
+#if !defined(NO_HARDWARE)
+	if (ghGpuUtilUserDebugFS != NULL)
+	{
+		RGXUnregisterGpuUtilStats(ghGpuUtilUserDebugFS);
+		ghGpuUtilUserDebugFS = NULL;
+	}
+#endif
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+	if (gpsDebugLevelDebugFSEntry != NULL)
+	{
+		PVRDebugFSRemoveEntry(&gpsDebugLevelDebugFSEntry);
+	}
+#endif
+
+	if (gpsFWTraceDebugFSEntry != NULL)
+	{
+		PVRDebugFSRemoveEntry(&gpsFWTraceDebugFSEntry);
+	}
+
+	if (gpsDumpDebugDebugFSEntry != NULL)
+	{
+		PVRDebugFSRemoveEntry(&gpsDumpDebugDebugFSEntry);
+	}
+
+	if (gpsStatusDebugFSEntry != NULL)
+	{
+		PVRDebugFSRemoveEntry(&gpsStatusDebugFSEntry);
+	}
+
+	if (gpsVersionDebugFSEntry != NULL)
+	{
+		PVRDebugFSRemoveEntry(&gpsVersionDebugFSEntry);
+	}
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_debugfs.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_debugfs.c
new file mode 100644
index 0000000..3c1f612
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_debugfs.c
@@ -0,0 +1,1135 @@
+/*************************************************************************/ /*!
+@File
+@Title          Functions for creating debugfs directories and entries.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "pvr_debug.h"
+#include "pvr_debugfs.h"
+#include "allocmem.h"
+
+#define PVR_DEBUGFS_DIR_NAME PVR_DRM_NAME
+
+/* Define to set the PVR_DPF debug output level for pvr_debugfs.
+ * Normally, leave this set to PVR_DBGDRIV_MESSAGE, but when debugging
+ * you can temporarily change this to PVR_DBG_ERROR.
+ */
+#if defined(PVRSRV_NEED_PVR_DPF)
+#define PVR_DEBUGFS_PVR_DPF_LEVEL      PVR_DBGDRIV_MESSAGE
+#else
+#define PVR_DEBUGFS_PVR_DPF_LEVEL      0
+#endif
+
+static struct dentry *gpsPVRDebugFSEntryDir = NULL;
+
+/* Lock used when adjusting refCounts and deleting entries */
+static struct mutex gDebugFSLock;
+
+/*************************************************************************/ /*!
+ Statistic entry read functions
+*/ /**************************************************************************/
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+typedef struct _PVR_DEBUGFS_RAW_DRIVER_STAT_
+{
+	OS_STATS_PRINT_FUNC *pfStatsPrint;
+	PPVR_DEBUGFS_ENTRY_DATA pvDebugFsEntry;
+} PVR_DEBUGFS_RAW_DRIVER_STAT;
+#endif
+
+typedef struct _PVR_DEBUGFS_DRIVER_STAT_
+{
+	void				 *pvData;
+	OS_STATS_PRINT_FUNC  *pfnStatsPrint;
+	PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC	*pfnIncStatMemRefCount;
+	PVRSRV_DEC_STAT_MEM_REFCOUNT_FUNC	*pfnDecStatMemRefCount;
+	IMG_UINT32				ui32RefCount;
+	PPVR_DEBUGFS_ENTRY_DATA	pvDebugFSEntry;
+} PVR_DEBUGFS_DRIVER_STAT;
+
+typedef struct _PVR_DEBUGFS_DIR_DATA_
+{
+	struct dentry *psDir;
+	PPVR_DEBUGFS_DIR_DATA psParentDir;
+	IMG_UINT32	ui32RefCount;
+} PVR_DEBUGFS_DIR_DATA;
+
+typedef struct _PVR_DEBUGFS_ENTRY_DATA_
+{
+	struct dentry *psEntry;
+	PVR_DEBUGFS_DIR_DATA *psParentDir;
+	IMG_UINT32	ui32RefCount;
+	PVR_DEBUGFS_DRIVER_STAT *psStatData;
+} PVR_DEBUGFS_ENTRY_DATA;
+
+typedef struct _PVR_DEBUGFS_BLOB_ENTRY_DATA_
+{
+	struct dentry *psEntry;
+	PVR_DEBUGFS_DIR_DATA *psParentDir;
+	struct debugfs_blob_wrapper blob;
+} PVR_DEBUGFS_BLOB_ENTRY_DATA;
+
+typedef struct _PVR_DEBUGFS_PRIV_DATA_
+{
+	const struct seq_operations *psReadOps;
+	PVRSRV_ENTRY_WRITE_FUNC	*pfnWrite;
+	void			*pvData;
+	PVRSRV_INC_FSENTRY_PVDATA_REFCNT_FN *pfIncPvDataRefCnt;
+	PVRSRV_DEC_FSENTRY_PVDATA_REFCNT_FN *pfDecPvDataRefCnt;
+	IMG_BOOL		bValid;
+	PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry;
+} PVR_DEBUGFS_PRIV_DATA;
+
+static IMG_BOOL _RefDirEntry(PVR_DEBUGFS_DIR_DATA *psDirEntry);
+static inline void _UnrefAndMaybeDestroyDirEntry(PVR_DEBUGFS_DIR_DATA **ppsDirEntry);
+static void _UnrefAndMaybeDestroyDirEntryWhileLocked(PVR_DEBUGFS_DIR_DATA **ppsDirEntry);
+static IMG_BOOL _RefDebugFSEntryNoLock(PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry);
+static void _UnrefAndMaybeDestroyDebugFSEntry(PVR_DEBUGFS_ENTRY_DATA **ppsDebugFSEntry);
+static IMG_BOOL _RefStatEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry);
+static IMG_BOOL _UnrefAndMaybeDestroyStatEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry);
+
+static void _StatsSeqPrintf(void *pvFile, const IMG_CHAR *pszFormat, ...)
+{
+	IMG_CHAR  szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+	va_list  ArgList;
+
+	va_start(ArgList, pszFormat);
+	vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList);
+	seq_printf((struct seq_file *)pvFile, "%s", szBuffer);
+	va_end(ArgList);
+}
+
+static void *_DebugFSStatisticSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+	PVR_DEBUGFS_DRIVER_STAT *psStatData = (PVR_DEBUGFS_DRIVER_STAT *)psSeqFile->private;
+
+	if (psStatData)
+	{
+		/* take reference on psStatData (for duration of stat iteration) */
+		if (!_RefStatEntry(psStatData))
+		{
+			PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called for '%s' but failed"
+			        " to take ref on stat entry, returning -EIO(%d)", __func__,
+			        psStatData->pvDebugFSEntry->psEntry->d_iname, -EIO));
+			return NULL;
+		}
+
+		if (*puiPosition == 0)
+		{
+			return psStatData;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is NULL", __FUNCTION__));
+	}
+
+	return NULL;
+}
+
+static void _DebugFSStatisticSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_DEBUGFS_DRIVER_STAT *psStatData = (PVR_DEBUGFS_DRIVER_STAT *)psSeqFile->private;
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	if (psStatData)
+	{
+		/* drop ref taken on stat memory, and if it is now zero, be sure we don't try to read it again */
+		if (psStatData->ui32RefCount > 0)
+		{
+			/* drop reference on psStatData (held for duration of stat iteration) */
+			_UnrefAndMaybeDestroyStatEntry((void*)psStatData);
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: PVR_DEBUGFS_DRIVER_STAT has zero refcount",
+											__func__));
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is NULL", __FUNCTION__));
+	}
+}
+
+static void *_DebugFSStatisticSeqNext(struct seq_file *psSeqFile,
+				      void *pvData,
+				      loff_t *puiPosition)
+{
+	PVR_DEBUGFS_DRIVER_STAT *psStatData = (PVR_DEBUGFS_DRIVER_STAT *)psSeqFile->private;
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	if (psStatData)
+	{
+		if (psStatData->pvData)
+		{
+			if (puiPosition)
+			{
+				(*puiPosition)++;
+			}
+			else
+			{
+				PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called with puiPosition NULL", __FUNCTION__));
+			}
+		}
+		else
+		{
+			/* psStatData->pvData is NULL */
+			/* NB This is valid if the stat has no structure associated with it (eg. driver_stats, which prints totals stored in a number of global vars) */
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is NULL", __FUNCTION__));
+	}
+
+	return NULL;
+}
+
+static int _DebugFSStatisticSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_DEBUGFS_DRIVER_STAT *psStatData = (PVR_DEBUGFS_DRIVER_STAT *)pvData;
+
+	if (psStatData != NULL)
+	{
+		psStatData->pfnStatsPrint((void*)psSeqFile, psStatData->pvData, _StatsSeqPrintf);
+		return 0;
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is NULL, returning -ENODATA(%d)", __FUNCTION__, -ENODATA));
+	}
+
+	return -ENODATA;
+}
+
+static struct seq_operations gsDebugFSStatisticReadOps =
+{
+	.start = _DebugFSStatisticSeqStart,
+	.stop  = _DebugFSStatisticSeqStop,
+	.next  = _DebugFSStatisticSeqNext,
+	.show  = _DebugFSStatisticSeqShow,
+};
+
+
+/*************************************************************************/ /*!
+ Common internal API
+*/ /**************************************************************************/
+
+static int _DebugFSFileOpen(struct inode *psINode, struct file *psFile)
+{
+	PVR_DEBUGFS_PRIV_DATA *psPrivData;
+	int iResult = -EIO;
+	IMG_BOOL bRefRet = IMG_FALSE;
+	PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry = NULL;
+
+	mutex_lock(&gDebugFSLock);
+
+	PVR_ASSERT(psINode);
+	psPrivData = (PVR_DEBUGFS_PRIV_DATA *)psINode->i_private;
+
+	if (psPrivData)
+	{
+		/* Check that psPrivData is still valid to use */
+		if (psPrivData->bValid)
+		{
+			psDebugFSEntry = psPrivData->psDebugFSEntry;
+
+			/* Take ref on stat entry before opening seq file - this ref will be dropped if we
+			 * fail to open the seq file or when we close it
+			 */
+			if (psDebugFSEntry)
+			{
+				bRefRet = _RefDebugFSEntryNoLock(psDebugFSEntry);
+				mutex_unlock(&gDebugFSLock);
+				if (psPrivData->pfIncPvDataRefCnt)
+				{
+					psPrivData->pfIncPvDataRefCnt(psPrivData->pvData);
+				}
+				if (bRefRet)
+				{
+					iResult = seq_open(psFile, psPrivData->psReadOps);
+					if (iResult == 0)
+					{
+						struct seq_file *psSeqFile = psFile->private_data;
+
+						psSeqFile->private = psPrivData->pvData;
+					}
+					else
+					{
+						if (psPrivData->pfDecPvDataRefCnt)
+						{
+							psPrivData->pfDecPvDataRefCnt(psPrivData->pvData);
+						}
+						/* Drop ref if we failed to open seq file */
+						_UnrefAndMaybeDestroyDebugFSEntry(&psPrivData->psDebugFSEntry);
+						PVR_DPF((PVR_DBG_ERROR, "%s: Failed to seq_open psFile, returning %d", __FUNCTION__, iResult));
+					}
+				}
+			}
+			else
+			{
+				mutex_unlock(&gDebugFSLock);
+			}
+		}
+		else
+		{
+			mutex_unlock(&gDebugFSLock);
+		}
+	}
+	else
+	{
+		mutex_unlock(&gDebugFSLock);
+	}
+
+	return iResult;
+}
+
+static int _DebugFSFileClose(struct inode *psINode, struct file *psFile)
+{
+	int iResult;
+	PVR_DEBUGFS_PRIV_DATA *psPrivData = (PVR_DEBUGFS_PRIV_DATA *)psINode->i_private;
+	PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry = NULL;
+
+	if (psPrivData)
+	{
+		psDebugFSEntry = psPrivData->psDebugFSEntry;
+	}
+	iResult = seq_release(psINode, psFile);
+	if (psDebugFSEntry)
+	{
+		_UnrefAndMaybeDestroyDebugFSEntry(&psPrivData->psDebugFSEntry);
+	}
+	if (psPrivData->pfDecPvDataRefCnt)
+	{
+		psPrivData->pfDecPvDataRefCnt(psPrivData->pvData);
+	}
+	return iResult;
+}
+
+static ssize_t _DebugFSFileWrite(struct file *psFile,
+				 const char __user *pszBuffer,
+				 size_t uiCount,
+				 loff_t *puiPosition)
+{
+	struct inode *psINode = psFile->f_path.dentry->d_inode;
+	PVR_DEBUGFS_PRIV_DATA *psPrivData = (PVR_DEBUGFS_PRIV_DATA *)psINode->i_private;
+
+	if (psPrivData->pfnWrite == NULL)
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called for file '%s', which does not have pfnWrite defined, returning -EIO(%d)", __FUNCTION__, psFile->f_path.dentry->d_iname, -EIO));
+		return -EIO;
+	}
+
+	return psPrivData->pfnWrite(pszBuffer, uiCount, puiPosition, psPrivData->pvData);
+}
+
+static const struct file_operations gsPVRDebugFSFileOps =
+{
+	.owner = THIS_MODULE,
+	.open = _DebugFSFileOpen,
+	.read = seq_read,
+	.write = _DebugFSFileWrite,
+	.llseek = seq_lseek,
+	.release = _DebugFSFileClose,
+};
+
+
+/*************************************************************************/ /*!
+ Public API
+*/ /**************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       PVRDebugFSInit
+@Description    Initialise PVR debugfs support. This should be called before
+                using any PVRDebugFS functions.
+@Return         int      On success, returns 0. Otherwise, returns an
+                         error code.
+*/ /**************************************************************************/
+int PVRDebugFSInit(void)
+{
+	PVR_ASSERT(gpsPVRDebugFSEntryDir == NULL);
+
+	mutex_init(&gDebugFSLock);
+
+	gpsPVRDebugFSEntryDir = debugfs_create_dir(PVR_DEBUGFS_DIR_NAME, NULL);
+	if (gpsPVRDebugFSEntryDir == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Cannot create '%s' debugfs root directory",
+			 __FUNCTION__, PVR_DEBUGFS_DIR_NAME));
+
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       PVRDebugFSDeInit
+@Description    Deinitialise PVR debugfs support. This should be called only
+                if PVRDebugFSInit() has already been called. All debugfs
+                directories and entries should be removed otherwise this
+                function will fail.
+@Return         void
+*/ /**************************************************************************/
+void PVRDebugFSDeInit(void)
+{
+	if (gpsPVRDebugFSEntryDir != NULL)
+	{
+		debugfs_remove(gpsPVRDebugFSEntryDir);
+		gpsPVRDebugFSEntryDir = NULL;
+		mutex_destroy(&gDebugFSLock);
+	}
+}
+
+/*************************************************************************/ /*!
+@Function		PVRDebugFSCreateEntryDir
+@Description	Create a directory for debugfs entries that will be located
+				under the root directory, as created by
+				PVRDebugFSCreateEntries().
+@Input			pszName		 String containing the name for the directory.
+@Input			psParentDir	 The parent directory in which to create the new
+							 directory. This should either be NULL, meaning it
+							 should be created in the root directory, or a
+							 pointer to a directory as returned by this
+							 function.
+@Output			ppsNewDir	 On success, points to the newly created
+							 directory.
+@Return			int			 On success, returns 0. Otherwise, returns an
+							 error code.
+*/ /**************************************************************************/
+int PVRDebugFSCreateEntryDir(IMG_CHAR *pszName,
+				 PVR_DEBUGFS_DIR_DATA *psParentDir,
+				 PVR_DEBUGFS_DIR_DATA **ppsNewDir)
+{
+	PVR_DEBUGFS_DIR_DATA *psNewDir;
+
+	PVR_ASSERT(gpsPVRDebugFSEntryDir != NULL);
+
+	if (pszName == NULL || ppsNewDir == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s:   Invalid param", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	psNewDir = OSAllocMemNoStats(sizeof(*psNewDir));
+
+	if (psNewDir == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Cannot allocate memory for '%s' pvr_debugfs structure",
+			 __FUNCTION__, pszName));
+		return -ENOMEM;
+	}
+
+	psNewDir->psParentDir = psParentDir;
+	psNewDir->psDir = debugfs_create_dir(pszName, (psNewDir->psParentDir) ? psNewDir->psParentDir->psDir : gpsPVRDebugFSEntryDir);
+
+	if (psNewDir->psDir == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Cannot create '%s' debugfs directory",
+			 __FUNCTION__, pszName));
+
+		OSFreeMemNoStats(psNewDir);
+		return -ENOMEM;
+	}
+
+	*ppsNewDir = psNewDir;
+	psNewDir->ui32RefCount = 1;
+
+	/* if parent directory is not gpsPVRDebugFSEntryDir, increment its refCount */
+	if (psNewDir->psParentDir)
+	{
+		/* if we fail to acquire the reference that probably means that
+		 * parent dir was already freed - we have to cleanup in this situation */
+		if (!_RefDirEntry(psNewDir->psParentDir))
+		{
+			_UnrefAndMaybeDestroyDirEntry(ppsNewDir);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function		PVRDebugFSRemoveEntryDir
+@Description	Remove a directory that was created by
+				PVRDebugFSCreateEntryDir(). Any directories or files created
+				under the directory being removed should be removed first.
+@Input          ppsDir       Pointer representing the directory to be removed.
+                             Has to be double pointer to avoid possible races
+                             and use-after-free situations.
+@Return			void
+*/ /**************************************************************************/
+void PVRDebugFSRemoveEntryDir(PVR_DEBUGFS_DIR_DATA **ppsDir)
+{
+	_UnrefAndMaybeDestroyDirEntry(ppsDir);
+}
+
+/*************************************************************************/ /*!
+@Function		PVRDebugFSCreateEntry
+@Description	Create an entry in the specified directory.
+@Input			pszName			String containing the name for the entry.
+@Input			psParentDir		Pointer from PVRDebugFSCreateEntryDir()
+								representing the directory in which to create
+								the entry or NULL for the root directory.
+@Input			psReadOps		Pointer to structure containing the necessary
+								functions to read from the entry.
+@Input			pfnWrite		Callback function used to write to the entry.
+								This function must update the offset pointer
+								before it returns.
+@Input			pvData			Private data to be passed to the read
+								functions, in the seq_file private member, and
+								the write function callback.
+@Output			ppsNewEntry		On success, points to the newly created entry.
+@Return			int				On success, returns 0. Otherwise, returns an
+								error code.
+*/ /**************************************************************************/
+int PVRDebugFSCreateEntry(const char *pszName,
+			  PVR_DEBUGFS_DIR_DATA *psParentDir,
+			  const struct seq_operations *psReadOps,
+			  PVRSRV_ENTRY_WRITE_FUNC *pfnWrite,
+			  PVRSRV_INC_FSENTRY_PVDATA_REFCNT_FN *pfnIncPvDataRefCnt,
+			  PVRSRV_DEC_FSENTRY_PVDATA_REFCNT_FN *pfnDecPvDataRefCnt,
+			  void *pvData,
+			  PVR_DEBUGFS_ENTRY_DATA **ppsNewEntry)
+{
+	PVR_DEBUGFS_PRIV_DATA *psPrivData;
+	PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry;
+	struct dentry *psEntry;
+	umode_t uiMode;
+
+	PVR_ASSERT(gpsPVRDebugFSEntryDir != NULL);
+	PVR_ASSERT(!((pfnIncPvDataRefCnt != NULL && pfnDecPvDataRefCnt == NULL) ||
+	           (pfnIncPvDataRefCnt == NULL && pfnDecPvDataRefCnt != NULL)));
+
+	psPrivData = OSAllocMemNoStats(sizeof(*psPrivData));
+	if (psPrivData == NULL)
+	{
+		return -ENOMEM;
+	}
+	psDebugFSEntry = OSAllocMemNoStats(sizeof(*psDebugFSEntry));
+	if (psDebugFSEntry == NULL)
+	{
+		OSFreeMemNoStats(psPrivData);
+		return -ENOMEM;
+	}
+
+	psPrivData->psReadOps = psReadOps;
+	psPrivData->pfnWrite = pfnWrite;
+	psPrivData->pvData = (void*)pvData;
+	psPrivData->pfIncPvDataRefCnt = pfnIncPvDataRefCnt;
+	psPrivData->pfDecPvDataRefCnt = pfnDecPvDataRefCnt;
+	psPrivData->bValid = IMG_TRUE;
+	/* Store ptr to debugFSEntry in psPrivData, so a ref can be taken on it
+	 * when the client opens a file */
+	psPrivData->psDebugFSEntry = psDebugFSEntry;
+
+	uiMode = S_IFREG;
+
+	if (psReadOps != NULL)
+	{
+		uiMode |= S_IRUGO;
+	}
+
+	if (pfnWrite != NULL)
+	{
+		uiMode |= S_IWUSR;
+	}
+
+	psDebugFSEntry->psParentDir = psParentDir;
+	psDebugFSEntry->ui32RefCount = 1;
+	psDebugFSEntry->psStatData = (PVR_DEBUGFS_DRIVER_STAT*)pvData;
+
+	if (psDebugFSEntry->psParentDir)
+	{
+		/* increment refCount of parent directory */
+		if (!_RefDirEntry(psDebugFSEntry->psParentDir))
+		{
+			kfree(psDebugFSEntry);
+			kfree(psPrivData);
+			return -EFAULT;
+		}
+	}
+
+	psEntry = debugfs_create_file(pszName,
+				      uiMode,
+				      (psParentDir != NULL) ? psParentDir->psDir : gpsPVRDebugFSEntryDir,
+				      psPrivData,
+				      &gsPVRDebugFSFileOps);
+	if (IS_ERR(psEntry))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Cannot create debugfs '%s' file",
+			 __FUNCTION__, pszName));
+
+		return PTR_ERR(psEntry);
+	}
+
+	psDebugFSEntry->psEntry = psEntry;
+	*ppsNewEntry = (void*)psDebugFSEntry;
+
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function		PVRDebugFSRemoveEntry
+@Description	Removes an entry that was created by PVRDebugFSCreateEntry().
+@Input          ppsDebugFSEntry  Pointer representing the entry to be removed.
+                Has to be double pointer to avoid possible races
+                and use-after-free situations.
+@Return			void
+*/ /**************************************************************************/
+void PVRDebugFSRemoveEntry(PVR_DEBUGFS_ENTRY_DATA **ppsDebugFSEntry)
+{
+	_UnrefAndMaybeDestroyDebugFSEntry(ppsDebugFSEntry);
+}
+
+/*************************************************************************/ /*!
+@Function		PVRDebugFSCreateStatisticEntry
+@Description	Create a statistic entry in the specified directory.
+@Input			pszName			String containing the name for the entry.
+@Input			psDir			Pointer from PVRDebugFSCreateEntryDir()
+								representing the directory in which to create
+								the entry or NULL for the root directory.
+@Input			pfnStatsPrint	A callback function used to print all the
+								statistics when reading from the statistic
+								entry.
+@Input			pfnIncStatMemRefCount	A callback function used take a
+										reference on the memory backing the
+										statistic.
+@Input			pfnDecStatMemRefCount	A callback function used drop a
+										reference on the memory backing the
+										statistic.
+@Input			pvData			Private data to be passed to the provided
+								callback function.
+
+@Return			PVR_DEBUGFS_DRIVER_STAT*   On success, a pointer representing
+										   the newly created statistic entry.
+										   Otherwise, NULL.
+*/ /**************************************************************************/
+PVR_DEBUGFS_DRIVER_STAT *PVRDebugFSCreateStatisticEntry(const char *pszName,
+					 PVR_DEBUGFS_DIR_DATA *psDir,
+				     OS_STATS_PRINT_FUNC *pfnStatsPrint,
+					 PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC	*pfnIncStatMemRefCount,
+					 PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC	*pfnDecStatMemRefCount,
+				     void *pvData)
+{
+	PVR_DEBUGFS_DRIVER_STAT *psStatData;
+	PVR_DEBUGFS_ENTRY_DATA * psDebugFSEntry;
+
+	int iResult;
+
+	if (pszName == NULL || pfnStatsPrint == NULL)
+	{
+		return NULL;
+	}
+	if ((pfnIncStatMemRefCount != NULL || pfnDecStatMemRefCount != NULL) && pvData == NULL)
+	{
+		return NULL;
+	}
+
+	psStatData = OSAllocZMemNoStats(sizeof(*psStatData));
+	if (psStatData == NULL)
+	{
+		return NULL;
+	}
+
+	psStatData->pvData = pvData;
+	psStatData->pfnStatsPrint = pfnStatsPrint;
+	psStatData->pfnIncStatMemRefCount = pfnIncStatMemRefCount;
+	psStatData->pfnDecStatMemRefCount = pfnDecStatMemRefCount;
+	psStatData->ui32RefCount = 1;
+
+	iResult = PVRDebugFSCreateEntry(pszName,
+					psDir,
+					&gsDebugFSStatisticReadOps,
+					NULL,
+					(PVRSRV_INC_FSENTRY_PVDATA_REFCNT_FN *) _RefStatEntry,
+					(PVRSRV_DEC_FSENTRY_PVDATA_REFCNT_FN *) _UnrefAndMaybeDestroyStatEntry,
+					psStatData,
+					&psDebugFSEntry);
+	if (iResult != 0)
+	{
+		OSFreeMemNoStats(psStatData);
+		return NULL;
+	}
+	psStatData->pvDebugFSEntry = (void*)psDebugFSEntry;
+
+	if (pfnIncStatMemRefCount)
+	{
+		/* call function to take reference on the memory holding the stat */
+		psStatData->pfnIncStatMemRefCount((void*)psStatData->pvData);
+	}
+
+	psDebugFSEntry->ui32RefCount = 1;
+
+	return psStatData;
+}
+
+/*************************************************************************/ /*!
+@Function		PVRDebugFSRemoveStatisticEntry
+@Description	Removes a statistic entry that was created by
+				PVRDebugFSCreateStatisticEntry().
+@Input			psStatEntry  Pointer representing the statistic entry to be
+							 removed.
+@Return			void
+*/ /**************************************************************************/
+void PVRDebugFSRemoveStatisticEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry)
+{
+	PVR_ASSERT(psStatEntry != NULL);
+	/* drop reference on pvStatEntry*/
+	_UnrefAndMaybeDestroyStatEntry(psStatEntry);
+}
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+static void *_DebugFSRawStatisticSeqStart(struct seq_file *psSeqFile,
+                                          loff_t *puiPosition)
+{
+	PVR_DEBUGFS_RAW_DRIVER_STAT *psStatData =
+	        (PVR_DEBUGFS_RAW_DRIVER_STAT *) psSeqFile->private;
+
+	if (psStatData)
+	{
+		if (*puiPosition == 0)
+		{
+			return psStatData;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is"
+		        " NULL", __func__));
+	}
+
+	return NULL;
+}
+
+static void _DebugFSRawStatisticSeqStop(struct seq_file *psSeqFile,
+                                        void *pvData)
+{
+	PVR_DEBUGFS_RAW_DRIVER_STAT *psStatData =
+	        (PVR_DEBUGFS_RAW_DRIVER_STAT *) psSeqFile->private;
+
+	if (!psStatData)
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is"
+		        " NULL", __func__));
+	}
+}
+
+static void *_DebugFSRawStatisticSeqNext(struct seq_file *psSeqFile,
+                                         void *pvData,
+                                         loff_t *puiPosition)
+{
+	PVR_DEBUGFS_RAW_DRIVER_STAT *psStatData =
+	        (PVR_DEBUGFS_RAW_DRIVER_STAT *) psSeqFile->private;
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	if (!psStatData)
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is"
+		        " NULL", __func__));
+	}
+
+	return NULL;
+}
+
+static int _DebugFSRawStatisticSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_DEBUGFS_RAW_DRIVER_STAT *psStatData =
+	        (PVR_DEBUGFS_RAW_DRIVER_STAT *) pvData;
+
+	if (psStatData != NULL)
+	{
+		psStatData->pfStatsPrint((void *) psSeqFile, NULL,
+		                         _StatsSeqPrintf);
+		return 0;
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is"
+		        " NULL, returning -ENODATA(%d)", __FUNCTION__, -ENODATA));
+	}
+
+	return -ENODATA;
+}
+
+static struct seq_operations gsDebugFSRawStatisticReadOps =
+{
+	.start = _DebugFSRawStatisticSeqStart,
+	.stop  = _DebugFSRawStatisticSeqStop,
+	.next  = _DebugFSRawStatisticSeqNext,
+	.show  = _DebugFSRawStatisticSeqShow,
+};
+
+PVR_DEBUGFS_RAW_DRIVER_STAT *PVRDebugFSCreateRawStatisticEntry(
+                                             const IMG_CHAR *pszFileName,
+                                             void *pvParentDir,
+                                             OS_STATS_PRINT_FUNC *pfStatsPrint)
+{
+	PVR_DEBUGFS_RAW_DRIVER_STAT *psStatData;
+	PVR_DEBUGFS_ENTRY_DATA *psDebugFsEntry;
+
+	int iResult;
+
+	if (pszFileName == NULL || pfStatsPrint == NULL)
+	{
+		return NULL;
+	}
+
+	psStatData = OSAllocZMemNoStats(sizeof(*psStatData));
+	if (psStatData == NULL)
+	{
+		return NULL;
+	}
+
+	psStatData->pfStatsPrint = pfStatsPrint;
+
+	PVR_ASSERT((pvParentDir == NULL));
+
+	iResult = PVRDebugFSCreateEntry(pszFileName,
+	                                pvParentDir,
+	                                &gsDebugFSRawStatisticReadOps,
+	                                NULL,
+	                                NULL,
+	                                NULL,
+	                                psStatData,
+	                                &psDebugFsEntry);
+	if (iResult != 0)
+	{
+		OSFreeMemNoStats(psStatData);
+		return NULL;
+	}
+	psStatData->pvDebugFsEntry = (void *) psDebugFsEntry;
+
+	psDebugFsEntry->ui32RefCount = 1;
+
+	return psStatData;
+}
+
+void PVRDebugFSRemoveRawStatisticEntry(PVR_DEBUGFS_RAW_DRIVER_STAT *psStatEntry)
+{
+	PVR_ASSERT(psStatEntry != NULL);
+
+	PVRDebugFSRemoveEntry(&psStatEntry->pvDebugFsEntry);
+	OSFreeMemNoStats(psStatEntry);
+}
+#endif
+
+static IMG_BOOL _RefDirEntry(PVR_DEBUGFS_DIR_DATA *psDirEntry)
+{
+	IMG_BOOL bStatus = IMG_FALSE;
+
+	PVR_ASSERT(psDirEntry != NULL && psDirEntry->psDir != NULL);
+
+	mutex_lock(&gDebugFSLock);
+
+	if (psDirEntry->ui32RefCount > 0)
+	{
+		/* Increment refCount */
+		psDirEntry->ui32RefCount++;
+		bStatus = IMG_TRUE;
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to ref psDirEntry '%s'"
+		        " when ui32RefCount is zero", __FUNCTION__,
+		        psDirEntry->psDir->d_iname));
+	}
+
+	mutex_unlock(&gDebugFSLock);
+
+	return bStatus;
+}
+
+static void _UnrefAndMaybeDestroyDirEntryWhileLocked(PVR_DEBUGFS_DIR_DATA **ppsDirEntry)
+{
+	PVR_DEBUGFS_DIR_DATA *psDirEntry = *ppsDirEntry;
+
+	PVR_ASSERT(psDirEntry != NULL && psDirEntry->psDir != NULL);
+
+	if (psDirEntry->ui32RefCount > 0)
+	{
+		/* Decrement refCount and free if now zero */
+		if (--psDirEntry->ui32RefCount == 0)
+		{
+			/* if parent directory is not gpsPVRDebugFSEntryDir, decrement its refCount */
+			debugfs_remove(psDirEntry->psDir);
+			if (psDirEntry->psParentDir)
+			{
+				_UnrefAndMaybeDestroyDirEntryWhileLocked(&psDirEntry->psParentDir);
+			}
+			OSFreeMemNoStats(psDirEntry);
+			*ppsDirEntry = NULL;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to unref psDirEntry '%s'"
+		        " when ui32RefCount is zero", __FUNCTION__,
+		        psDirEntry->psDir->d_iname));
+	}
+}
+
+static inline void _UnrefAndMaybeDestroyDirEntry(PVR_DEBUGFS_DIR_DATA **ppsDirEntry)
+{
+	mutex_lock(&gDebugFSLock);
+	_UnrefAndMaybeDestroyDirEntryWhileLocked(ppsDirEntry);
+	mutex_unlock(&gDebugFSLock);
+}
+
+static IMG_BOOL _RefDebugFSEntryNoLock(PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry)
+{
+	IMG_BOOL bResult = IMG_FALSE;
+
+	PVR_ASSERT(psDebugFSEntry != NULL);
+
+	bResult = (psDebugFSEntry->ui32RefCount > 0);
+	if (bResult)
+	{
+		/* Increment refCount of psDebugFSEntry */
+		psDebugFSEntry->ui32RefCount++;
+	}
+
+	return bResult;
+}
+
+static void _UnrefAndMaybeDestroyDebugFSEntry(PVR_DEBUGFS_ENTRY_DATA **ppsDebugFSEntry)
+{
+	PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry;
+
+	mutex_lock(&gDebugFSLock);
+	/* Decrement refCount of psDebugFSEntry, and free if now zero */
+	psDebugFSEntry = *ppsDebugFSEntry;
+	PVR_ASSERT(psDebugFSEntry != NULL);
+
+	if (psDebugFSEntry->ui32RefCount > 0)
+	{
+		if (--psDebugFSEntry->ui32RefCount == 0)
+		{
+			struct dentry *psEntry = psDebugFSEntry->psEntry;
+
+			if (psEntry)
+			{
+				/* Free any private data that was provided to debugfs_create_file() */
+				if (psEntry->d_inode->i_private != NULL)
+				{
+					PVR_DEBUGFS_PRIV_DATA *psPrivData = (PVR_DEBUGFS_PRIV_DATA*)psDebugFSEntry->psEntry->d_inode->i_private;
+
+					psPrivData->bValid = IMG_FALSE;
+					psPrivData->psDebugFSEntry = NULL;
+					OSFreeMemNoStats(psEntry->d_inode->i_private);
+					psEntry->d_inode->i_private = NULL;
+				}
+				debugfs_remove(psEntry);
+			}
+			/* decrement refcount of parent directory */
+			if (psDebugFSEntry->psParentDir)
+			{
+				_UnrefAndMaybeDestroyDirEntryWhileLocked(&psDebugFSEntry->psParentDir);
+			}
+
+			/* now free the memory allocated for psDebugFSEntry */
+			OSFreeMemNoStats(psDebugFSEntry);
+			*ppsDebugFSEntry = NULL;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to unref psDebugFSEntry '%s' when ui32RefCount is zero", __FUNCTION__, psDebugFSEntry->psEntry->d_iname));
+	}
+
+	mutex_unlock(&gDebugFSLock);
+}
+
+static IMG_BOOL _RefStatEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry)
+{
+	IMG_BOOL bResult = IMG_FALSE;
+
+	PVR_ASSERT(psStatEntry != NULL);
+
+	mutex_lock(&gDebugFSLock);
+
+	bResult = (psStatEntry->ui32RefCount > 0);
+	if (bResult)
+	{
+		/* Increment refCount of psStatEntry */
+		psStatEntry->ui32RefCount++;
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to ref psStatEntry '%s' when ui32RefCount is zero", __FUNCTION__, psStatEntry->pvDebugFSEntry->psEntry->d_iname));
+	}
+
+	mutex_unlock(&gDebugFSLock);
+
+	return bResult;
+}
+
+static IMG_BOOL _UnrefAndMaybeDestroyStatEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry)
+{
+	IMG_BOOL bResult;
+
+	PVR_ASSERT(psStatEntry != NULL);
+
+	mutex_lock(&gDebugFSLock);
+
+	bResult = (psStatEntry->ui32RefCount > 0);
+
+	if (bResult)
+	{
+		/* Decrement refCount of psStatData, and free if now zero */
+		if (--psStatEntry->ui32RefCount == 0)
+		{
+			mutex_unlock(&gDebugFSLock);
+
+			if (psStatEntry->pvDebugFSEntry)
+			{
+				_UnrefAndMaybeDestroyDebugFSEntry((PVR_DEBUGFS_ENTRY_DATA**)&psStatEntry->pvDebugFSEntry);
+			}
+			if (psStatEntry->pfnDecStatMemRefCount)
+			{
+				/* call function to drop reference on the memory holding the stat */
+				psStatEntry->pfnDecStatMemRefCount((void*)psStatEntry->pvData);
+			}
+			OSFreeMemNoStats(psStatEntry);
+		}
+		else
+		{
+			mutex_unlock(&gDebugFSLock);
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to unref psStatEntry '%s' when ui32RefCount is zero", __FUNCTION__, psStatEntry->pvDebugFSEntry->psEntry->d_iname));
+		mutex_unlock(&gDebugFSLock);
+	}
+
+	return bResult;
+}
+
+int PVRDebugFSCreateBlobEntry(const char *pszName,
+			  PVR_DEBUGFS_DIR_DATA *psParentDir,
+			  void *pvData,
+			  unsigned long size,
+			  PVR_DEBUGFS_BLOB_ENTRY_DATA **ppsNewEntry)
+{
+	PVR_DEBUGFS_BLOB_ENTRY_DATA *psDebugFSEntry;
+	struct dentry *psEntry;
+	umode_t uiMode;
+
+	PVR_ASSERT(gpsPVRDebugFSEntryDir != NULL);
+
+	psDebugFSEntry = OSAllocMemNoStats(sizeof(*psDebugFSEntry));
+	if (psDebugFSEntry == NULL)
+	{
+		return -ENOMEM;
+	}
+
+	uiMode = S_IFREG | S_IRUGO;
+
+	psDebugFSEntry->psParentDir = psParentDir;
+	psDebugFSEntry->blob.data = pvData;
+	psDebugFSEntry->blob.size = size;
+
+	if (psDebugFSEntry->psParentDir)
+	{
+		/* increment refCount of parent directory */
+		if (!_RefDirEntry(psDebugFSEntry->psParentDir))
+		{
+			OSFreeMemNoStats(psDebugFSEntry);
+			return -EFAULT;
+		}
+	}
+
+	psEntry = debugfs_create_blob(pszName, 
+			                      uiMode,
+								  (psParentDir != NULL) ? psParentDir->psDir : gpsPVRDebugFSEntryDir,
+								  &psDebugFSEntry->blob);
+	if (IS_ERR(psEntry))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Cannot create debugfs '%s' blob file",
+			 __FUNCTION__, pszName));
+
+		OSFreeMemNoStats(psDebugFSEntry);
+		return PTR_ERR(psEntry);
+	}
+
+	psDebugFSEntry->psEntry = psEntry;
+	*ppsNewEntry = (void*)psDebugFSEntry;
+
+	return 0;
+}
+
+void PVRDebugFSRemoveBlobEntry(PVR_DEBUGFS_BLOB_ENTRY_DATA **ppsDebugFSEntry)
+{
+	PVR_DEBUGFS_BLOB_ENTRY_DATA *psDebugFSEntry;
+
+	PVR_ASSERT(ppsDebugFSEntry != NULL);
+
+	psDebugFSEntry = *ppsDebugFSEntry;
+	PVR_ASSERT(psDebugFSEntry != NULL);
+
+	mutex_lock(&gDebugFSLock);
+
+	if (psDebugFSEntry->psEntry)
+	{
+		debugfs_remove(psDebugFSEntry->psEntry);
+	}
+
+	/* decrement refcount of parent directory */
+	if (psDebugFSEntry->psParentDir)
+	{
+		_UnrefAndMaybeDestroyDirEntryWhileLocked(&psDebugFSEntry->psParentDir);
+	}
+
+	/* now free the memory allocated for psDebugFSEntry */
+	OSFreeMemNoStats(psDebugFSEntry);
+	*ppsDebugFSEntry = NULL;
+
+	mutex_unlock(&gDebugFSLock);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_debugfs.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_debugfs.h
new file mode 100644
index 0000000..c5ad960
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_debugfs.h
@@ -0,0 +1,118 @@
+/*************************************************************************/ /*!
+@File
+@Title          Functions for creating debugfs directories and entries.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_DEBUGFS_H__)
+#define __PVR_DEBUGFS_H__
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "img_types.h"
+#include "osfunc.h"
+
+typedef ssize_t (PVRSRV_ENTRY_WRITE_FUNC)(const char __user *pszBuffer,
+					  size_t uiCount,
+					  loff_t *puiPosition,
+					  void *pvData);
+
+
+typedef IMG_UINT32 (PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC)(void *pvStatPtr);
+typedef IMG_UINT32 (PVRSRV_DEC_STAT_MEM_REFCOUNT_FUNC)(void *pvStatPtr);
+
+typedef IMG_UINT32 (PVRSRV_INC_FSENTRY_PVDATA_REFCNT_FN)(void *pvData);
+typedef IMG_UINT32 (PVRSRV_DEC_FSENTRY_PVDATA_REFCNT_FN)(void *pvData);
+
+typedef struct _PVR_DEBUGFS_DIR_DATA_ *PPVR_DEBUGFS_DIR_DATA;
+typedef struct _PVR_DEBUGFS_ENTRY_DATA_ *PPVR_DEBUGFS_ENTRY_DATA;
+typedef struct _PVR_DEBUGFS_DRIVER_STAT_ *PPVR_DEBUGFS_DRIVER_STAT;
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+typedef struct _PVR_DEBUGFS_RAW_DRIVER_STAT_ *PPVR_DEBUGFS_RAW_DRIVER_STAT;
+#endif
+typedef struct _PVR_DEBUGFS_BLOB_ENTRY_DATA_ *PPVR_DEBUGFS_BLOB_ENTRY_DATA;
+
+int PVRDebugFSInit(void);
+void PVRDebugFSDeInit(void);
+
+int PVRDebugFSCreateEntryDir(IMG_CHAR *pszName,
+ 	 	 	     PPVR_DEBUGFS_DIR_DATA psParentDir,
+			     PPVR_DEBUGFS_DIR_DATA *ppsNewDir);
+
+void PVRDebugFSRemoveEntryDir(PPVR_DEBUGFS_DIR_DATA *ppsDir);
+
+int PVRDebugFSCreateEntry(const char *pszName,
+			  PPVR_DEBUGFS_DIR_DATA psParentDir,
+			  const struct seq_operations *psReadOps,
+			  PVRSRV_ENTRY_WRITE_FUNC *pfnWrite,
+			  PVRSRV_INC_FSENTRY_PVDATA_REFCNT_FN *pfnIncPvDataRefCnt,
+			  PVRSRV_DEC_FSENTRY_PVDATA_REFCNT_FN *pfnDecPvDataRefCnt,
+			  void *pvData,
+			  PPVR_DEBUGFS_ENTRY_DATA *ppsNewEntry);
+
+void PVRDebugFSRemoveEntry(PPVR_DEBUGFS_ENTRY_DATA *ppsDebugFSEntry);
+
+PPVR_DEBUGFS_DRIVER_STAT PVRDebugFSCreateStatisticEntry(const char *pszName,
+		       PPVR_DEBUGFS_DIR_DATA psDir,
+		       OS_STATS_PRINT_FUNC *pfnStatsPrint,
+		       PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC *pfnIncStatMemRefCount,
+		       PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC *pfnDecStatMemRefCount,
+		       void *pvData);
+
+void PVRDebugFSRemoveStatisticEntry(PPVR_DEBUGFS_DRIVER_STAT psStatEntry);
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+PPVR_DEBUGFS_RAW_DRIVER_STAT PVRDebugFSCreateRawStatisticEntry(
+                                             const IMG_CHAR *pszFileName,
+                                             void *pvParentDir,
+                                             OS_STATS_PRINT_FUNC* pfnStatsPrint);
+
+void PVRDebugFSRemoveRawStatisticEntry(PPVR_DEBUGFS_RAW_DRIVER_STAT psStatEntry);
+#endif
+
+int PVRDebugFSCreateBlobEntry(const char *pszName,
+			  PPVR_DEBUGFS_DIR_DATA psParentDir,
+			  void *pvData,
+			  unsigned long size,
+			  PPVR_DEBUGFS_BLOB_ENTRY_DATA *ppsNewEntry);
+
+void PVRDebugFSRemoveBlobEntry(PPVR_DEBUGFS_BLOB_ENTRY_DATA *ppsDebugFSEntry);
+
+#endif /* !defined(__PVR_DEBUGFS_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_gputrace.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_gputrace.c
new file mode 100644
index 0000000..b08d0a5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_gputrace.c
@@ -0,0 +1,354 @@
+/*************************************************************************/ /*!
+@File           pvr_gputrace.c
+@Title          PVR GPU Trace module Linux implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv_error.h"
+#include "srvkm.h"
+#include "pvr_debug.h"
+#include "pvr_debugfs.h"
+#include "pvr_uaccess.h"
+#include "pvrsrv.h"
+
+#include "device.h"
+#include "pvr_gputrace.h"
+#include "rgxhwperf.h"
+
+#include "trace_events.h"
+#define CREATE_TRACE_POINTS
+#include "rogue_trace_events.h"
+
+
+
+/******************************************************************************
+ Module internal implementation
+******************************************************************************/
+
+/* This lock ensures state change of GPU_TRACING on/off is done atomically */
+static POS_LOCK ghGPUTraceStateLock;
+static IMG_BOOL gbFTraceGPUEventsEnabled = IMG_FALSE;
+
+/* DebugFS entry for the feature's on/off file */
+static PPVR_DEBUGFS_ENTRY_DATA gpsPVRDebugFSGpuTracingOnEntry = NULL;
+
+
+/*
+  If SUPPORT_GPUTRACE_EVENTS is defined the drive is built with support
+  to route RGX HWPerf packets to the Linux FTrace mechanism. To allow
+  this routing feature to be switched on and off at run-time the following
+  debugfs entry is created:
+  	/sys/kernel/debug/pvr/gpu_tracing_on
+  To enable GPU events in the FTrace log type the following on the target:
+ 	echo Y > /sys/kernel/debug/pvr/gpu_tracing_on
+  To disable, type:
+  	echo N > /sys/kernel/debug/pvr/gpu_tracing_on
+
+  It is also possible to enable this feature at driver load by setting the
+  default application hint "EnableFTraceGPU=1" in /etc/powervr.ini.
+*/
+
+static void *GpuTracingSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+	if (*puiPosition == 0)
+	{
+		/* We want only one entry in the sequence, one call to show() */
+		return (void*)1;
+	}
+
+	return NULL;
+}
+
+
+static void GpuTracingSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+}
+
+
+static void *GpuTracingSeqNext(struct seq_file *psSeqFile, void *pvData, loff_t *puiPosition)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	return NULL;
+}
+
+
+static int GpuTracingSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	const IMG_CHAR *pszInit = "N\n";
+
+	if (gbFTraceGPUEventsEnabled)
+		pszInit = "Y\n";
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	seq_puts(psSeqFile, pszInit);
+	return 0;
+}
+
+
+static struct seq_operations gsGpuTracingReadOps =
+{
+	.start = GpuTracingSeqStart,
+	.stop  = GpuTracingSeqStop,
+	.next  = GpuTracingSeqNext,
+	.show  = GpuTracingSeqShow,
+};
+
+
+static IMG_INT GpuTracingSet(const IMG_CHAR *buffer, size_t count, loff_t *puiPosition, void *data)
+{
+	IMG_CHAR cFirstChar;
+	IMG_BOOL bEnable = IMG_FALSE;
+
+	PVR_UNREFERENCED_PARAMETER(data);
+
+	if (puiPosition == NULL)
+	{
+		return -EIO;
+	}
+
+	if (!count)
+	{
+		return -EINVAL;
+	}
+
+	if (pvr_copy_from_user(&cFirstChar, buffer, 1))
+	{
+		return -EFAULT;
+	}
+
+	switch (cFirstChar)
+	{
+		case '0':
+		case 'n':
+		case 'N':
+			bEnable = IMG_FALSE;
+			break;
+
+		case '1':
+		case 'y':
+		case 'Y':
+			bEnable = IMG_TRUE;
+			break;
+	}
+
+	/* Lock down the state to avoid concurrent writes */
+	OSLockAcquire(ghGPUTraceStateLock);
+
+	if (bEnable != gbFTraceGPUEventsEnabled)
+	{
+		if (PVRGpuTraceEnabledSet(bEnable) == PVRSRV_OK)
+		{
+			PVR_TRACE(("%s GPU FTrace", bEnable ? "ENABLED" : "DISABLED"));
+			gbFTraceGPUEventsEnabled = (bEnable ? IMG_TRUE : IMG_FALSE);
+		}
+		else
+		{
+			PVR_TRACE(("FAILED to %s GPU FTrace", bEnable ? "enable" : "disable"));
+			/* On failure, partial enable/disable might have resulted.
+			 * Try best to restore to previous state. Ignore error */
+			PVRGpuTraceEnabledSet(gbFTraceGPUEventsEnabled);
+		}
+	}
+	else
+	{
+		PVR_TRACE(("GPU FTrace already %s!", bEnable ? "enabled" : "disabled"));
+	}
+
+	OSLockRelease(ghGPUTraceStateLock);
+
+	*puiPosition += count;
+	return count;
+}
+
+
+/******************************************************************************
+ Module In-bound API
+******************************************************************************/
+
+
+void PVRGpuTraceClientWork(
+		PVRSRV_DEVICE_NODE *psDevNode,
+		const IMG_UINT32 ui32CtxId,
+		const IMG_UINT32 ui32JobId,
+		const IMG_CHAR* pszKickType)
+{
+	PVR_ASSERT(pszKickType);
+
+	PVR_DPF((PVR_DBG_VERBOSE, "PVRGpuTraceClientKick(%s): contextId %u, "
+	        "jobId %u", pszKickType, ui32CtxId, ui32JobId));
+
+	if (PVRGpuTraceEnabled())
+	{
+//		trace_gpu_job_enqueue(ui32CtxId, ui32JobId, pszKickType);
+	}
+}
+
+
+void PVRGpuTraceWorkSwitch(
+		IMG_UINT64 ui64HWTimestampInOSTime,
+		const IMG_UINT32 ui32CtxId,
+		const IMG_UINT32 ui32CtxPriority,
+		const IMG_UINT32 ui32JobId,
+		const IMG_CHAR* pszWorkType,
+		PVR_GPUTRACE_SWITCH_TYPE eSwType)
+{
+	PVR_ASSERT(pszWorkType);
+
+	/* Invert the priority cause this is what systrace expects. Lower values
+	 * convey a higher priority to systrace. */
+//	trace_gpu_sched_switch(pszWorkType, ui64HWTimestampInOSTime,
+//			eSwType == PVR_GPUTRACE_SWITCH_TYPE_END ? 0 : ui32CtxId,
+//			2-ui32CtxPriority, ui32JobId);
+}
+
+void PVRGpuTraceUfo(
+		IMG_UINT64 ui64OSTimestamp,
+		const RGX_HWPERF_UFO_EV eEvType,
+		const IMG_UINT32 ui32ExtJobRef,
+		const IMG_UINT32 ui32CtxId,
+		const IMG_UINT32 ui32JobId,
+		const IMG_UINT32 ui32UFOCount,
+		const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+	switch (eEvType) {
+		case RGX_HWPERF_UFO_EV_UPDATE:
+			trace_rogue_ufo_updates(ui64OSTimestamp, ui32CtxId,
+			        ui32JobId, ui32UFOCount, puData);
+			break;
+		case RGX_HWPERF_UFO_EV_CHECK_SUCCESS:
+			trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32CtxId,
+					ui32JobId, IMG_FALSE, ui32UFOCount, puData);
+			break;
+		case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS:
+			trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32CtxId,
+					ui32JobId, IMG_TRUE, ui32UFOCount, puData);
+			break;
+		case RGX_HWPERF_UFO_EV_CHECK_FAIL:
+			trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32CtxId,
+					ui32JobId, IMG_FALSE, ui32UFOCount, puData);
+			break;
+		case RGX_HWPERF_UFO_EV_PRCHECK_FAIL:
+			trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32CtxId,
+					ui32JobId, IMG_TRUE, ui32UFOCount, puData);
+			break;
+		default:
+			break;
+	}
+}
+
+void PVRGpuTraceFirmware(
+		IMG_UINT64 ui64HWTimestampInOSTime,
+		const IMG_CHAR* pszWorkType,
+		PVR_GPUTRACE_SWITCH_TYPE eSwType)
+{
+	trace_rogue_firmware_activity(ui64HWTimestampInOSTime, pszWorkType, eSwType);
+}
+
+void PVRGpuTraceEventsLost(
+		const RGX_HWPERF_STREAM_ID eStreamId,
+		const IMG_UINT32 ui32LastOrdinal,
+		const IMG_UINT32 ui32CurrOrdinal)
+{
+	trace_rogue_events_lost(eStreamId, ui32LastOrdinal, ui32CurrOrdinal);
+}
+
+PVRSRV_ERROR PVRGpuTraceSupportInit()
+{
+	PVRSRV_ERROR eError;
+
+	eError = RGXHWPerfFTraceGPUInitSupport();
+	PVR_LOGR_IF_ERROR (eError, "RGXHWPerfFTraceGPUSupportInit");
+
+	eError = OSLockCreate(&ghGPUTraceStateLock, LOCK_TYPE_PASSIVE);
+	PVR_LOGR_IF_ERROR (eError, "OSLockCreate");
+
+	eError = PVRDebugFSCreateEntry("gpu_tracing_on", NULL, &gsGpuTracingReadOps,
+	                               (PVRSRV_ENTRY_WRITE_FUNC *)GpuTracingSet,
+	                               NULL, NULL, NULL,
+	                               &gpsPVRDebugFSGpuTracingOnEntry);
+	PVR_LOGR_IF_ERROR(eError, "PVRDebugFSCreateEntry");
+
+	return PVRSRV_OK;
+}
+
+void PVRGpuTraceSupportDeInit()
+{
+	/* Can be NULL if driver startup failed */
+	if (gpsPVRDebugFSGpuTracingOnEntry)
+	{
+		PVRDebugFSRemoveEntry(&gpsPVRDebugFSGpuTracingOnEntry);
+	}
+
+	if (ghGPUTraceStateLock)
+	{
+		OSLockDestroy(ghGPUTraceStateLock);
+	}
+
+	RGXHWPerfFTraceGPUDeInitSupport();
+}
+
+PVRSRV_ERROR PVRGpuTraceInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_ERROR eError;
+
+	eError = RGXHWPerfFTraceGPUInitDevice(psDeviceNode);
+	PVR_LOGG_IF_ERROR(eError, "RGXHWPerfFTraceGPUInitDevice", e0);
+
+	return PVRSRV_OK;
+
+e0:
+	RGXHWPerfFTraceGPUDeInitDevice(psDeviceNode);
+	return eError;
+}
+
+void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	RGXHWPerfFTraceGPUDeInitDevice(psDeviceNode);
+}
+
+IMG_BOOL PVRGpuTraceEnabled(void)
+{
+	return gbFTraceGPUEventsEnabled;
+}
+
+/******************************************************************************
+ End of file (pvr_gputrace.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_gputrace.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_gputrace.h
new file mode 100644
index 0000000..3c73fd9
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_gputrace.h
@@ -0,0 +1,129 @@
+/*************************************************************************/ /*!
+@File           pvr_gputrace.h
+@Title          PVR GPU Trace module common environment interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_GPUTRACE_H_
+#define PVR_GPUTRACE_H_
+
+#include "img_types.h"
+#include "rgx_hwperf.h"
+#include "device.h"
+
+
+/******************************************************************************
+ Module out-bound API
+******************************************************************************/
+
+/*
+  The device layer of the KM driver defines these two APIs to allow a
+  platform module to set and retrieve the feature's on/off state.
+*/
+extern PVRSRV_ERROR PVRGpuTraceEnabledSet(IMG_BOOL bNewValue);
+extern PVRSRV_ERROR PVRGpuTraceEnabledSetNoBridgeLock(
+		PVRSRV_DEVICE_NODE *psDeviceNode,
+		IMG_BOOL bNewValue);
+
+/******************************************************************************
+ Module In-bound API
+******************************************************************************/
+
+typedef enum {
+	PVR_GPUTRACE_SWITCH_TYPE_UNDEF = 0,
+
+	PVR_GPUTRACE_SWITCH_TYPE_BEGIN = 1,
+	PVR_GPUTRACE_SWITCH_TYPE_END = 2
+
+} PVR_GPUTRACE_SWITCH_TYPE;
+
+void PVRGpuTraceClientWork(
+		PVRSRV_DEVICE_NODE *psDevNode,
+		const IMG_UINT32 ui32ExtJobRef,
+		const IMG_UINT32 ui32IntJobRef,
+		const IMG_CHAR* pszKickType);
+
+
+void PVRGpuTraceWorkSwitch(
+		IMG_UINT64 ui64OSTimestamp,
+		const IMG_UINT32 ui32ContextId,
+		const IMG_UINT32 ui32CtxPriority,
+		const IMG_UINT32 ui32JobId,
+		const IMG_CHAR* pszWorkType,
+		PVR_GPUTRACE_SWITCH_TYPE eSwType);
+
+void PVRGpuTraceUfo(
+		IMG_UINT64 ui64OSTimestamp,
+		const RGX_HWPERF_UFO_EV eEvType,
+		const IMG_UINT32 ui32ExtJobRef,
+		const IMG_UINT32 ui32CtxId,
+		const IMG_UINT32 ui32JobId,
+		const IMG_UINT32 ui32UFOCount,
+		const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
+
+void PVRGpuTraceFirmware(
+		IMG_UINT64 ui64HWTimestampInOSTime,
+		const IMG_CHAR* pszWorkType,
+		PVR_GPUTRACE_SWITCH_TYPE eSwType);
+
+void PVRGpuTraceEventsLost(
+		const RGX_HWPERF_STREAM_ID eStreamId,
+		const IMG_UINT32 ui32LastOrdinal,
+		const IMG_UINT32 ui32CurrOrdinal);
+
+/* Early initialisation of GPU Ftrace events logic.
+ * This function creates debugfs entry and initialises some necessary
+ * structures. */
+PVRSRV_ERROR PVRGpuTraceSupportInit(void);
+void PVRGpuTraceSupportDeInit(void);
+
+PVRSRV_ERROR PVRGpuTraceInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_BOOL PVRGpuTraceEnabled(void);
+
+/* FTrace events callbacks */
+
+void PVRGpuTraceEnableUfoCallback(void);
+void PVRGpuTraceDisableUfoCallback(void);
+
+void PVRGpuTraceEnableFirmwareActivityCallback(void);
+void PVRGpuTraceDisableFirmwareActivityCallback(void);
+
+#endif /* PVR_GPUTRACE_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_uaccess.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_uaccess.h
new file mode 100644
index 0000000..382cca4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/pvr_uaccess.h
@@ -0,0 +1,91 @@
+/*************************************************************************/ /*!
+@File
+@Title          Utility functions for user space access
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __PVR_UACCESS_H__
+#define __PVR_UACCESS_H__
+
+#include <linux/uaccess.h>
+
+static inline unsigned long pvr_copy_to_user(void __user *pvTo, const void *pvFrom, unsigned long ulBytes)
+{
+    if (access_ok(VERIFY_WRITE, pvTo, ulBytes))
+    {
+		return __copy_to_user(pvTo, pvFrom, ulBytes);
+    }
+
+    return ulBytes;
+}
+
+
+#if defined(__KLOCWORK__)
+	/* this part is only to tell Klocwork not to report false positive because
+	   it doesn't understand that pvr_copy_from_user will initialise the memory
+	   pointed to by pvTo */
+#include <linux/string.h> /* get the memset prototype */
+static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes)
+{
+	if (pvTo != NULL)
+	{
+		memset(pvTo, 0xAA, ulBytes);
+		return 0;
+	}
+	return 1;
+}
+	
+#else /* real implementation */
+
+static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes)
+{
+    /*
+     * The compile time correctness checking introduced for copy_from_user in
+     * Linux 2.6.33 isn't fully compatible with our usage of the function.
+     */
+    if (access_ok(VERIFY_READ, pvFrom, ulBytes))
+    {
+		return __copy_from_user(pvTo, pvFrom, ulBytes);
+    }
+
+    return ulBytes;
+}
+#endif /* klocworks */ 
+
+#endif /* __PVR_UACCESS_H__ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/rogue_trace_events.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/rogue_trace_events.h
new file mode 100644
index 0000000..227621a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/rogue_trace_events.h
@@ -0,0 +1,454 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rogue
+
+#if !defined(_ROGUE_TRACE_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _ROGUE_TRACE_EVENTS_H
+
+#include <linux/version.h>
+#include <linux/tracepoint.h>
+#include <linux/time.h>
+
+#define show_secs_from_ns(ns) \
+	({ \
+		u64 t = ns + (NSEC_PER_USEC / 2); \
+		do_div(t, NSEC_PER_SEC); \
+		t; \
+	})
+
+#define show_usecs_from_ns(ns) \
+	({ \
+		u64 t = ns + (NSEC_PER_USEC / 2) ; \
+		u32 rem; \
+		do_div(t, NSEC_PER_USEC); \
+		rem = do_div(t, USEC_PER_SEC); \
+	})
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int trace_fence_update_enabled_callback(void);
+#else
+void trace_fence_update_enabled_callback(void);
+#endif
+void trace_fence_update_disabled_callback(void);
+
+TRACE_EVENT_FN(rogue_fence_update,
+
+	TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 ctx_id, u32 offset,
+		u32 sync_fwaddr, u32 sync_value),
+
+	TP_ARGS(comm, cmd, dm, ctx_id, offset, sync_fwaddr, sync_value),
+
+	TP_STRUCT__entry(
+		__string(       comm,           comm            )
+		__string(       cmd,            cmd             )
+		__string(       dm,             dm              )
+		__field(        u32,            ctx_id          )
+		__field(        u32,            offset          )
+		__field(        u32,            sync_fwaddr     )
+		__field(        u32,            sync_value      )
+	),
+
+	TP_fast_assign(
+		__assign_str(comm, comm);
+		__assign_str(cmd, cmd);
+		__assign_str(dm, dm);
+		__entry->ctx_id = ctx_id;
+		__entry->offset = offset;
+		__entry->sync_fwaddr = sync_fwaddr;
+		__entry->sync_value = sync_value;
+	),
+
+	TP_printk("comm=%s cmd=%s dm=%s ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx",
+		__get_str(comm),
+		__get_str(cmd),
+		__get_str(dm),
+		(unsigned long)__entry->ctx_id,
+		(unsigned long)__entry->offset,
+		(unsigned long)__entry->sync_fwaddr,
+		(unsigned long)__entry->sync_value),
+
+	trace_fence_update_enabled_callback,
+	trace_fence_update_disabled_callback
+);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int trace_fence_check_enabled_callback(void);
+#else
+void trace_fence_check_enabled_callback(void);
+#endif
+void trace_fence_check_disabled_callback(void);
+
+TRACE_EVENT_FN(rogue_fence_check,
+
+	TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 ctx_id, u32 offset,
+		u32 sync_fwaddr, u32 sync_value),
+
+	TP_ARGS(comm, cmd, dm, ctx_id, offset, sync_fwaddr, sync_value),
+
+	TP_STRUCT__entry(
+		__string(       comm,           comm            )
+		__string(       cmd,            cmd             )
+		__string(       dm,             dm              )
+		__field(        u32,            ctx_id          )
+		__field(        u32,            offset          )
+		__field(        u32,            sync_fwaddr     )
+		__field(        u32,            sync_value      )
+	),
+
+	TP_fast_assign(
+		__assign_str(comm, comm);
+		__assign_str(cmd, cmd);
+		__assign_str(dm, dm);
+		__entry->ctx_id = ctx_id;
+		__entry->offset = offset;
+		__entry->sync_fwaddr = sync_fwaddr;
+		__entry->sync_value = sync_value;
+	),
+
+	TP_printk("comm=%s cmd=%s dm=%s ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx",
+		__get_str(comm),
+		__get_str(cmd),
+		__get_str(dm),
+		(unsigned long)__entry->ctx_id,
+		(unsigned long)__entry->offset,
+		(unsigned long)__entry->sync_fwaddr,
+		(unsigned long)__entry->sync_value),
+
+	trace_fence_check_enabled_callback,
+	trace_fence_check_disabled_callback
+);
+
+TRACE_EVENT(rogue_create_fw_context,
+
+	TP_PROTO(const char *comm, const char *dm, u32 ctx_id),
+
+	TP_ARGS(comm, dm, ctx_id),
+
+	TP_STRUCT__entry(
+		__string(       comm,           comm            )
+		__string(       dm,             dm              )
+		__field(        u32,            ctx_id          )
+	),
+
+	TP_fast_assign(
+		__assign_str(comm, comm);
+		__assign_str(dm, dm);
+		__entry->ctx_id = ctx_id;
+	),
+
+	TP_printk("comm=%s dm=%s ctx_id=%lu",
+		__get_str(comm),
+		__get_str(dm),
+		(unsigned long)__entry->ctx_id)
+);
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+
+void PVRGpuTraceEnableUfoCallback(void);
+void PVRGpuTraceDisableUfoCallback(void);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int PVRGpuTraceEnableUfoCallbackWrapper(void);
+#else
+#define PVRGpuTraceEnableUfoCallbackWrapper \
+		PVRGpuTraceEnableUfoCallback
+#endif
+
+TRACE_EVENT_FN(rogue_ufo_update,
+
+	TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 fwaddr,
+		u32 old_value, u32 new_value),
+
+	TP_ARGS(timestamp, ctx_id, job_id, fwaddr, old_value, new_value),
+
+	TP_STRUCT__entry(
+		__field(        u64,            timestamp   )
+		__field(        u32,            ctx_id      )
+		__field(        u32,            job_id      )
+		__field(        u32,            fwaddr      )
+		__field(        u32,            old_value   )
+		__field(        u32,            new_value   )
+	),
+
+	TP_fast_assign(
+		__entry->timestamp = timestamp;
+		__entry->ctx_id = ctx_id;
+		__entry->job_id = job_id;
+		__entry->fwaddr = fwaddr;
+		__entry->old_value = old_value;
+		__entry->new_value = new_value;
+	),
+
+	TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu fwaddr=%#lx "
+		"old_value=%#lx new_value=%#lx",
+		(unsigned long long)show_secs_from_ns(__entry->timestamp),
+		(unsigned long)show_usecs_from_ns(__entry->timestamp),
+		(unsigned long)__entry->ctx_id,
+		(unsigned long)__entry->job_id,
+		(unsigned long)__entry->fwaddr,
+		(unsigned long)__entry->old_value,
+		(unsigned long)__entry->new_value),
+	PVRGpuTraceEnableUfoCallbackWrapper,
+	PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_check_fail,
+
+	TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 fwaddr,
+		u32 value, u32 required),
+
+	TP_ARGS(timestamp, ctx_id, job_id, fwaddr, value, required),
+
+	TP_STRUCT__entry(
+		__field(        u64,            timestamp   )
+		__field(        u32,            ctx_id      )
+		__field(        u32,            job_id      )
+		__field(        u32,            fwaddr      )
+		__field(        u32,            value       )
+		__field(        u32,            required    )
+	),
+
+	TP_fast_assign(
+		__entry->timestamp = timestamp;
+		__entry->ctx_id = ctx_id;
+		__entry->job_id = job_id;
+		__entry->fwaddr = fwaddr;
+		__entry->value = value;
+		__entry->required = required;
+	),
+
+	TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu fwaddr=%#lx "
+		"value=%#lx required=%#lx",
+		(unsigned long long)show_secs_from_ns(__entry->timestamp),
+		(unsigned long)show_usecs_from_ns(__entry->timestamp),
+		(unsigned long)__entry->ctx_id,
+		(unsigned long)__entry->job_id,
+		(unsigned long)__entry->fwaddr,
+		(unsigned long)__entry->value,
+		(unsigned long)__entry->required),
+	PVRGpuTraceEnableUfoCallbackWrapper,
+	PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_pr_check_fail,
+
+	TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 fwaddr,
+		u32 value, u32 required),
+
+	TP_ARGS(timestamp, ctx_id, job_id, fwaddr, value, required),
+
+	TP_STRUCT__entry(
+		__field(        u64,            timestamp   )
+		__field(        u32,            ctx_id      )
+		__field(        u32,            job_id      )
+		__field(        u32,            fwaddr      )
+		__field(        u32,            value       )
+		__field(        u32,            required    )
+	),
+
+	TP_fast_assign(
+		__entry->timestamp = timestamp;
+		__entry->ctx_id = ctx_id;
+		__entry->job_id = job_id;
+		__entry->fwaddr = fwaddr;
+		__entry->value = value;
+		__entry->required = required;
+	),
+
+	TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu fwaddr=%#lx "
+		"value=%#lx required=%#lx",
+		(unsigned long long)show_secs_from_ns(__entry->timestamp),
+		(unsigned long)show_usecs_from_ns(__entry->timestamp),
+		(unsigned long)__entry->ctx_id,
+		(unsigned long)__entry->job_id,
+		(unsigned long)__entry->fwaddr,
+		(unsigned long)__entry->value,
+		(unsigned long)__entry->required),
+	PVRGpuTraceEnableUfoCallbackWrapper,
+	PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_check_success,
+
+	TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 fwaddr, u32 value),
+
+	TP_ARGS(timestamp, ctx_id, job_id, fwaddr, value),
+
+	TP_STRUCT__entry(
+		__field(        u64,            timestamp   )
+		__field(        u32,            ctx_id      )
+		__field(        u32,            job_id      )
+		__field(        u32,            fwaddr      )
+		__field(        u32,            value       )
+	),
+
+	TP_fast_assign(
+		__entry->timestamp = timestamp;
+		__entry->ctx_id = ctx_id;
+		__entry->job_id = job_id;
+		__entry->fwaddr = fwaddr;
+		__entry->value = value;
+	),
+
+	TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu fwaddr=%#lx value=%#lx",
+		(unsigned long long)show_secs_from_ns(__entry->timestamp),
+		(unsigned long)show_usecs_from_ns(__entry->timestamp),
+		(unsigned long)__entry->ctx_id,
+		(unsigned long)__entry->job_id,
+		(unsigned long)__entry->fwaddr,
+		(unsigned long)__entry->value),
+	PVRGpuTraceEnableUfoCallbackWrapper,
+	PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_pr_check_success,
+
+	TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 fwaddr, u32 value),
+
+	TP_ARGS(timestamp, ctx_id, job_id, fwaddr, value),
+
+	TP_STRUCT__entry(
+		__field(        u64,            timestamp   )
+		__field(        u32,            ctx_id      )
+		__field(        u32,            job_id      )
+		__field(        u32,            fwaddr      )
+		__field(        u32,            value       )
+	),
+
+	TP_fast_assign(
+		__entry->timestamp = timestamp;
+		__entry->ctx_id = ctx_id;
+		__entry->job_id = job_id;
+		__entry->fwaddr = fwaddr;
+		__entry->value = value;
+	),
+
+	TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu fwaddr=%#lx value=%#lx",
+		(unsigned long long)show_secs_from_ns(__entry->timestamp),
+		(unsigned long)show_usecs_from_ns(__entry->timestamp),
+		(unsigned long)__entry->ctx_id,
+		(unsigned long)__entry->job_id,
+		(unsigned long)__entry->fwaddr,
+		(unsigned long)__entry->value),
+	PVRGpuTraceEnableUfoCallbackWrapper,
+	PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT(rogue_events_lost,
+
+	TP_PROTO(u32 event_source, u32 last_ordinal, u32 curr_ordinal),
+
+	TP_ARGS(event_source, last_ordinal, curr_ordinal),
+
+	TP_STRUCT__entry(
+		__field(        u32,            event_source     )
+		__field(        u32,            last_ordinal     )
+		__field(        u32,            curr_ordinal     )
+	),
+
+	TP_fast_assign(
+		__entry->event_source = event_source;
+		__entry->last_ordinal = last_ordinal;
+		__entry->curr_ordinal = curr_ordinal;
+	),
+
+	TP_printk("event_source=%s last_ordinal=%u curr_ordinal=%u",
+		__print_symbolic(__entry->event_source, {0, "GPU"}, {1, "Host"}),
+		__entry->last_ordinal,
+		__entry->curr_ordinal)
+);
+
+void PVRGpuTraceEnableFirmwareActivityCallback(void);
+void PVRGpuTraceDisableFirmwareActivityCallback(void);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int PVRGpuTraceEnableFirmwareActivityCallbackWrapper(void);
+#else
+#define PVRGpuTraceEnableFirmwareActivityCallbackWrapper \
+		PVRGpuTraceEnableFirmwareActivityCallback
+#endif
+
+TRACE_EVENT_FN(rogue_firmware_activity,
+
+	TP_PROTO(u64 timestamp, const char *task, u32 fw_event),
+
+	TP_ARGS(timestamp, task, fw_event),
+
+	TP_STRUCT__entry(
+		__field(        u64,            timestamp       )
+		__string(       task,           task            )
+		__field(        u32,            fw_event        )
+	),
+
+	TP_fast_assign(
+		__entry->timestamp = timestamp;
+		__assign_str(task, task);
+		__entry->fw_event = fw_event;
+	),
+
+	TP_printk("ts=%llu.%06lu task=%s event=%s",
+		(unsigned long long)show_secs_from_ns(__entry->timestamp),
+		(unsigned long)show_usecs_from_ns(__entry->timestamp),
+		__get_str(task),
+		__print_symbolic(__entry->fw_event,
+			/* These values are from pvr_gputrace.h. */
+			{ 1, "begin" },
+			{ 2, "end" })),
+
+	PVRGpuTraceEnableFirmwareActivityCallbackWrapper,
+	PVRGpuTraceDisableFirmwareActivityCallback
+);
+
+#endif /* defined(SUPPORT_GPUTRACE_EVENTS) */
+
+#undef show_secs_from_ns
+#undef show_usecs_from_ns
+
+#endif /* _ROGUE_TRACE_EVENTS_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+
+/* This is needed because the name of this file doesn't match TRACE_SYSTEM. */
+#define TRACE_INCLUDE_FILE rogue_trace_events
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/trace_events.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/trace_events.c
new file mode 100644
index 0000000..78c26cf
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/trace_events.c
@@ -0,0 +1,243 @@
+/*************************************************************************/ /*!
+@Title          Linux trace event helper functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#include <linux/sched.h>
+
+#include "img_types.h"
+#include "trace_events.h"
+#if !defined(SUPPORT_GPUTRACE_EVENTS)
+#define CREATE_TRACE_POINTS
+#endif
+#include "rogue_trace_events.h"
+#if defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+#include "sync_checkpoint_external.h"
+#endif
+
+static bool fence_update_event_enabled, fence_check_event_enabled;
+
+bool trace_rogue_are_fence_updates_traced(void)
+{
+	return fence_update_event_enabled;
+}
+
+bool trace_rogue_are_fence_checks_traced(void)
+{
+	return fence_check_event_enabled;
+}
+
+/*
+ * Call backs referenced from rogue_trace_events.h. Note that these are not
+ * thread-safe, however, since running trace code when tracing is not enabled is
+ * simply a no-op, there is no harm in it.
+ */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int trace_fence_update_enabled_callback(void)
+#else
+void trace_fence_update_enabled_callback(void)
+#endif
+{
+	fence_update_event_enabled = true;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+	return 0;
+#endif
+}
+
+void trace_fence_update_disabled_callback(void)
+{
+	fence_update_event_enabled = false;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int trace_fence_check_enabled_callback(void)
+#else
+void trace_fence_check_enabled_callback(void)
+#endif
+{
+	fence_check_event_enabled = true;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+	return 0;
+#endif
+}
+
+void trace_fence_check_disabled_callback(void)
+{
+	fence_check_event_enabled = false;
+}
+
+/* This is a helper that calls trace_rogue_fence_update for each fence in an
+ * array.
+ */
+void trace_rogue_fence_updates(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext,
+							   IMG_UINT32 ui32Offset,
+							   IMG_UINT uCount,
+							   PRGXFWIF_UFO_ADDR *pauiAddresses,
+							   IMG_UINT32 *paui32Values)
+{
+	IMG_UINT i;
+	for (i = 0; i < uCount; i++)
+	{
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+		trace_rogue_fence_update(current->comm, cmd, dm, ui32FWContext, ui32Offset,
+								 pauiAddresses[i].ui32Addr, paui32Values[i]);
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+		trace_rogue_fence_update(current->comm, cmd, dm, ui32FWContext, ui32Offset,
+								 pauiAddresses[i].ui32Addr, PVRSRV_SYNC_CHECKPOINT_SIGNALLED);
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	}
+}
+
+void trace_rogue_fence_checks(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext,
+							  IMG_UINT32 ui32Offset,
+							  IMG_UINT uCount,
+							  PRGXFWIF_UFO_ADDR *pauiAddresses,
+							  IMG_UINT32 *paui32Values)
+{
+	IMG_UINT i;
+	for (i = 0; i < uCount; i++)
+	{
+#if !defined(PVRSRV_USE_SYNC_CHECKPOINTS)
+		trace_rogue_fence_check(current->comm, cmd, dm, ui32FWContext, ui32Offset,
+							  pauiAddresses[i].ui32Addr, paui32Values[i]);
+#else /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+		trace_rogue_fence_check(current->comm, cmd, dm, ui32FWContext, ui32Offset,
+							  pauiAddresses[i].ui32Addr, PVRSRV_SYNC_CHECKPOINT_SIGNALLED);
+#endif /* !defined(PVRSRV_USE_SYNC_CHECKPOINTS) */
+	}
+}
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+
+void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp,
+							 IMG_UINT32 ui32FWCtx,
+							 IMG_UINT32 ui32JobId,
+							 IMG_UINT32 ui32UFOCount,
+							 const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+	IMG_UINT i;
+	for (i = 0; i < ui32UFOCount; i++)
+	{
+		trace_rogue_ufo_update(ui64OSTimestamp, ui32FWCtx,
+				ui32JobId,
+				puData->sUpdate.ui32FWAddr,
+				puData->sUpdate.ui32OldValue,
+				puData->sUpdate.ui32NewValue);
+		puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) (((IMG_BYTE *) puData)
+				+ sizeof(puData->sUpdate));
+	}
+}
+
+void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp,
+									IMG_UINT32 ui32FWCtx,
+									IMG_UINT32 ui32JobId,
+									IMG_BOOL bPrEvent,
+									IMG_UINT32 ui32UFOCount,
+									const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+	IMG_UINT i;
+	for (i = 0; i < ui32UFOCount; i++)
+	{
+		if (bPrEvent)
+		{
+			trace_rogue_ufo_pr_check_success(ui64OSTimestamp, ui32FWCtx, ui32JobId,
+					puData->sCheckSuccess.ui32FWAddr,
+					puData->sCheckSuccess.ui32Value);
+		}
+		else
+		{
+			trace_rogue_ufo_check_success(ui64OSTimestamp, ui32FWCtx, ui32JobId,
+					puData->sCheckSuccess.ui32FWAddr,
+					puData->sCheckSuccess.ui32Value);
+		}
+		puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) (((IMG_BYTE *) puData)
+				+ sizeof(puData->sCheckSuccess));
+	}
+}
+
+void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp,
+								 IMG_UINT32 ui32FWCtx,
+								 IMG_UINT32 ui32JobId,
+								 IMG_BOOL bPrEvent,
+								 IMG_UINT32 ui32UFOCount,
+								 const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+	IMG_UINT i;
+	for (i = 0; i < ui32UFOCount; i++)
+	{
+		if (bPrEvent)
+		{
+			trace_rogue_ufo_pr_check_fail(ui64OSTimestamp, ui32FWCtx, ui32JobId,
+					puData->sCheckFail.ui32FWAddr,
+					puData->sCheckFail.ui32Value,
+					puData->sCheckFail.ui32Required);
+		}
+		else
+		{
+			trace_rogue_ufo_check_fail(ui64OSTimestamp, ui32FWCtx, ui32JobId,
+					puData->sCheckFail.ui32FWAddr,
+					puData->sCheckFail.ui32Value,
+					puData->sCheckFail.ui32Required);
+		}
+		puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) (((IMG_BYTE *) puData)
+				+ sizeof(puData->sCheckFail));
+	}
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+
+int PVRGpuTraceEnableUfoCallbackWrapper(void)
+{
+	PVRGpuTraceEnableUfoCallback();
+
+	return 0;
+}
+
+int PVRGpuTraceEnableFirmwareActivityCallbackWrapper(void)
+{
+	PVRGpuTraceEnableFirmwareActivityCallback();
+
+	return 0;
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) */
+#endif /* defined(SUPPORT_GPUTRACE_EVENTS) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/trace_events.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/trace_events.h
new file mode 100644
index 0000000..840e93b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/env/linux/trace_events.h
@@ -0,0 +1,154 @@
+/*************************************************************************/ /*!
+@Title          Linux trace events and event helper functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(TRACE_EVENTS_H)
+#define TRACE_EVENTS_H
+
+#include "rgx_fwif_km.h"
+#include "rgx_hwperf.h"
+
+/* We need to make these functions do nothing if CONFIG_EVENT_TRACING isn't
+ * enabled, just like the actual trace event functions that the kernel
+ * defines for us.
+ */
+#ifdef CONFIG_EVENT_TRACING
+bool trace_rogue_are_fence_checks_traced(void);
+
+bool trace_rogue_are_fence_updates_traced(void);
+
+void trace_rogue_fence_updates(const char *cmd, const char *dm,
+							   IMG_UINT32 ui32FWContext,
+							   IMG_UINT32 ui32Offset,
+							   IMG_UINT uCount,
+							   PRGXFWIF_UFO_ADDR *pauiAddresses,
+							   IMG_UINT32 *paui32Values);
+
+void trace_rogue_fence_checks(const char *cmd, const char *dm,
+							  IMG_UINT32 ui32FWContext,
+							  IMG_UINT32 ui32Offset,
+							  IMG_UINT uCount,
+							  PRGXFWIF_UFO_ADDR *pauiAddresses,
+							  IMG_UINT32 *paui32Values);
+
+void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp,
+							 IMG_UINT32 ui32FWCtx,
+							 IMG_UINT32 ui32JobId,
+							 IMG_UINT32 ui32UFOCount,
+							 const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
+
+void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp,
+									IMG_UINT32 ui32FWCtx,
+									IMG_UINT32 ui32JobId,
+									IMG_BOOL bPrEvent,
+									IMG_UINT32 ui32UFOCount,
+									const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
+
+void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp,
+								 IMG_UINT32 ui32FWCtx,
+								 IMG_UINT32 ui32JobId,
+								 IMG_BOOL bPrEvent,
+								 IMG_UINT32 ui32UFOCount,
+								 const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
+
+#else  /* CONFIG_TRACE_EVENTS */
+static inline
+bool trace_rogue_are_fence_checks_traced(void)
+{
+	return false;
+}
+
+static inline
+bool trace_rogue_are_fence_updates_traced(void)
+{
+	return false;
+}
+
+static inline
+void trace_rogue_fence_updates(const char *cmd, const char *dm,
+							   IMG_UINT32 ui32FWContext,
+							   IMG_UINT32 ui32Offset,
+							   IMG_UINT uCount,
+							   PRGXFWIF_UFO_ADDR *pauiAddresses,
+							   IMG_UINT32 *paui32Values)
+{
+}
+
+static inline
+void trace_rogue_fence_checks(const char *cmd, const char *dm,
+							  IMG_UINT32 ui32FWContext,
+							  IMG_UINT32 ui32Offset,
+							  IMG_UINT uCount,
+							  PRGXFWIF_UFO_ADDR *pauiAddresses,
+							  IMG_UINT32 *paui32Values)
+{
+}
+
+static inline
+void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp,
+							 IMG_UINT32 ui32FWCtx,
+							 IMG_UINT32 ui32JobId,
+							 IMG_UINT32 ui32UFOCount,
+							 const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+}
+
+static inline
+void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp,
+									IMG_UINT32 ui32FWCtx,
+									IMG_UINT32 ui32JobId,
+									IMG_BOOL bPrEvent,
+									IMG_UINT32 ui32UFOCount,
+									const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+}
+
+static inline
+void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp,
+								 IMG_UINT32 ui32FWCtx,
+								 IMG_UINT32 ui32JobId,
+								 IMG_BOOL bPrEvent,
+								 IMG_UINT32 ui32UFOCount,
+								 const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+}
+#endif /* CONFIG_TRACE_EVENTS */
+
+#endif /* TRACE_EVENTS_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/cache_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/cache_km.h
new file mode 100644
index 0000000..fcebfe7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/cache_km.h
@@ -0,0 +1,170 @@
+/*************************************************************************/ /*!
+@File           cache.h
+@Title          CPU cache management header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _CACHE_KM_H_
+#define _CACHE_KM_H_
+
+#if defined(LINUX)
+#include <linux/version.h>
+#else
+#define KERNEL_VERSION
+#endif
+
+#include "pvrsrv_error.h"
+#include "os_cpu_cache.h"
+#include "img_types.h"
+#include "cache_ops.h"
+#include "device.h"
+#include "pmr.h"
+
+typedef IMG_UINT32 PVRSRV_CACHE_OP_ADDR_TYPE;	/*!< Type represents address required for CacheOp */
+#define PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL	0x1	/*!< Operation requires virtual address only */
+#define PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL	0x2	/*!< Operation requires physical address only */
+#define PVRSRV_CACHE_OP_ADDR_TYPE_BOTH		0x3	/*!< Operation requires both virtual & physical addresses */
+
+/*
+ * CacheOpInit() & CacheOpDeInit()
+ *
+ * This must be called to initialise the KM cache maintenance framework.
+ * This is called early during the driver/module (un)loading phase.
+ */
+PVRSRV_ERROR CacheOpInit(void);
+void CacheOpDeInit(void);
+
+/*
+ * CacheOpInit2() & CacheOpDeInit2()
+ *
+ * This must be called to initialise the UM cache maintenance framework.
+ * This is called when the driver is loaded/unloaded from the kernel.
+ */
+PVRSRV_ERROR CacheOpInit2(void);
+void CacheOpDeInit2(void);
+
+/*
+ * CacheOpAcquireInfoPage() & CacheOpReleaseInfoPage()
+ *
+ * This interface is used for obtaining the global CacheOp info. page
+ * which acts as a repository of meta-data for the cache maintenance
+ * framework. The use of this information page outside of services
+ * is _not_ recommended.
+ */
+PVRSRV_ERROR CacheOpAcquireInfoPage (PMR **ppsPMR);
+PVRSRV_ERROR CacheOpReleaseInfoPage (PMR *psPMR);
+
+/*
+ * CacheOpExec()
+ *
+ * This is the primary cache maintenance interface and it is always
+ * guaranteed to be synchronous. pvAddress can be NULL in which case
+ * a remap is performed internally if required for cache maintenance,
+ * if not it should be the valid virtual address for the to be
+ * maintained PMR's range.
+ */
+PVRSRV_ERROR CacheOpExec (PMR *psPMR,
+						IMG_UINT64 uiAddress,
+						IMG_DEVMEM_OFFSET_T uiOffset,
+						IMG_DEVMEM_SIZE_T uiSize,
+						PVRSRV_CACHE_OP uiCacheOp);
+
+/*
+ * CacheOpExecKM()
+ *
+ * This is the primary cache maintenance interface for kernel-mode
+ * users and it too is also guaranteed to be synchronous.
+ */
+PVRSRV_ERROR CacheOpExecKM (PPVRSRV_DEVICE_NODE psDevNode,
+							void *pvVirtStart,
+							void *pvVirtEnd,
+							IMG_CPU_PHYADDR sCPUPhysStart,
+							IMG_CPU_PHYADDR sCPUPhysEnd,
+							PVRSRV_CACHE_OP uiCacheOp);
+
+/*
+ * CacheOpQueue()
+ *
+ * This is the secondary cache maintenance interface and it is not 
+ * guaranteed to be synchronous in that requests could be deferred
+ * and executed asynchronously. This interface is primarily meant
+ * as services client bridge call handler. Both uiInfoPgGFSeqNum
+ * and ui32[Current,Next]FenceSeqNum implements an internal client
+ * server queueing protocol so making use of this interface outside
+ * of services client is not recommended and should not be done.
+ */
+PVRSRV_ERROR CacheOpQueue (IMG_UINT32 ui32OpCount,
+						PMR **ppsPMR,
+						IMG_UINT64 *puiAddress,
+						IMG_DEVMEM_OFFSET_T *puiOffset,
+						IMG_DEVMEM_SIZE_T *puiSize,
+						PVRSRV_CACHE_OP *puiCacheOp,
+						IMG_UINT32 ui32OpTimeline,
+						IMG_UINT32 uiOpInfoPgGFSeqNum,
+						IMG_UINT32 uiCurrentFenceSeqNum,
+						IMG_UINT32 *puiNextFenceSeqNum);
+
+/*
+ * CacheOpFence()
+ *
+ * This is used for fencing for any client in-flight cache maintenance
+ * operations that might have been deferred by the use of CacheOpQueue().
+ * This should be called before any subsequent HW device kicks to ensure
+ * device memory is coherent with the HW before the kick.
+ */
+PVRSRV_ERROR CacheOpFence (RGXFWIF_DM eOpType, IMG_UINT32 ui32OpSeqNum);
+
+/*
+ * CacheOpLog()
+ *
+ * This is used for logging client cache maintenance operations that
+ * was executed in user-space.
+ */
+PVRSRV_ERROR CacheOpLog (PMR *psPMR,
+						IMG_UINT64 uiAddress,
+						IMG_DEVMEM_OFFSET_T uiOffset,
+						IMG_DEVMEM_SIZE_T uiSize,
+						IMG_UINT64 ui64QueuedTimeMs,
+						IMG_UINT64 ui64ExecuteTimeMs,
+						IMG_UINT32 ui32NumRBF,
+						IMG_BOOL bIsDiscard,
+						PVRSRV_CACHE_OP uiCacheOp);
+
+#endif	/* _CACHE_KM_H_ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/connection_server.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/connection_server.h
new file mode 100644
index 0000000..481a07a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/connection_server.h
@@ -0,0 +1,118 @@
+/**************************************************************************/ /*!
+@File
+@Title          Server side connection management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    API for server side connection management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(_CONNECTION_SERVER_H_)
+#define _CONNECTION_SERVER_H_
+
+
+#include "img_types.h"
+#include "handle.h"
+#include "pvrsrv_cleanup.h"
+
+/* Variable used to hold in memory the timeout for the current time slice*/
+extern IMG_UINT64 gui64TimesliceLimit;
+/* Counter number of handle data freed during the current time slice */
+extern IMG_UINT32 gui32HandleDataFreeCounter;
+/* Set the maximum time the freeing of the resources can keep the lock */
+#define CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS 3000 * 1000 /* 3ms */
+
+typedef struct _CONNECTION_DATA_
+{
+	PVRSRV_HANDLE_BASE		*psHandleBase;
+	PROCESS_HANDLE_BASE		*psProcessHandleBase;
+	struct _SYNC_CONNECTION_DATA_	*psSyncConnectionData;
+	struct _PDUMP_CONNECTION_DATA_	*psPDumpConnectionData;
+
+	/* Holds the client flags supplied at connection time */
+	IMG_UINT32			ui32ClientFlags;
+
+	/*
+	 * OS specific data can be stored via this handle.
+	 * See osconnection_server.h for a generic mechanism
+	 * for initialising this field.
+	 */
+	IMG_HANDLE			hOsPrivateData;
+
+	IMG_PID				pid;
+
+	void				*hSecureData;
+
+	IMG_HANDLE			hProcessStats;
+
+	IMG_HANDLE			hClientTLStream;
+
+	/* Structure which is hooked into the cleanup thread work list */
+	PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn;
+
+	/* List navigation for deferred freeing of connection data */
+	struct _CONNECTION_DATA_	**ppsThis;
+	struct _CONNECTION_DATA_	*psNext;
+} CONNECTION_DATA;
+
+#include "osconnection_server.h"
+
+PVRSRV_ERROR PVRSRVConnectionConnect(void **ppvPrivData, void *pvOSData);
+void PVRSRVConnectionDisconnect(void *pvPrivData);
+
+IMG_PID PVRSRVGetPurgeConnectionPid(void);
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVConnectionPrivateData)
+#endif
+static INLINE
+IMG_HANDLE PVRSRVConnectionPrivateData(CONNECTION_DATA *psConnection)
+{
+	return (psConnection != NULL) ? psConnection->hOsPrivateData : NULL;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVGetDevData)
+#endif
+static INLINE
+PVRSRV_DEVICE_NODE * PVRSRVGetDevData(CONNECTION_DATA *psConnection)
+{
+	return OSGetDevData(psConnection);
+}
+
+#endif /* !defined(_CONNECTION_SERVER_H_) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/device.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/device.h
new file mode 100644
index 0000000..5fdbb52
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/device.h
@@ -0,0 +1,411 @@
+/**************************************************************************/ /*!
+@File
+@Title          Common Device header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device related function templates and defines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __DEVICE_H__
+#define __DEVICE_H__
+
+
+#include "devicemem_heapcfg.h"
+#include "mmu_common.h"
+#include "ra.h"  		/* RA_ARENA */
+#include "pvrsrv_device.h"
+#include "sync_checkpoint.h"
+#include "srvkm.h"
+#include "physheap.h"
+#include <powervr/sync_external.h>
+#include "sysinfo.h"
+#include "dllist.h"
+#include "cache_km.h"
+
+#include "lock.h"
+
+#include "power.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+struct pvr_buffer_sync_context;
+#endif
+
+typedef struct _PVRSRV_POWER_DEV_TAG_ *PPVRSRV_POWER_DEV;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+struct SYNC_RECORD;
+#endif
+
+/*********************************************************************/ /*!
+ @Function      AllocUFOCallback
+ @Description   Device specific callback for allocation of an UFO block
+
+ @Input         psDeviceNode          Pointer to device node to allocate
+                                      the UFO for.
+ @Output        ppsMemDesc            Pointer to pointer for the memdesc of
+                                      the allocation
+ @Output        pui32SyncAddr         FW Base address of the UFO block
+ @Output        puiSyncPrimBlockSize  Size of the UFO block
+
+ @Return        PVRSRV_OK if allocation was successful
+ */
+/*********************************************************************/
+typedef PVRSRV_ERROR (*AllocUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+														DEVMEM_MEMDESC **ppsMemDesc,
+														IMG_UINT32 *pui32SyncAddr,
+														IMG_UINT32 *puiSyncPrimBlockSize);
+
+/*********************************************************************/ /*!
+ @Function      FreeUFOCallback
+ @Description   Device specific callback for freeing of an UFO
+
+ @Input         psDeviceNode    Pointer to device node that the UFO block was
+                                allocated from.
+ @Input         psMemDesc       Pointer to pointer for the memdesc of
+                                the UFO block to free.
+ */
+/*********************************************************************/
+typedef void (*FreeUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+									 DEVMEM_MEMDESC *psMemDesc);
+
+typedef struct _PVRSRV_DEVICE_IDENTIFIER_
+{
+	/* Pdump memory and register bank names */
+	IMG_CHAR				*pszPDumpDevName;
+	IMG_CHAR				*pszPDumpRegName;
+
+	/* Under Linux, this is the minor number of RenderNode corresponding to this Device */
+	IMG_INT32				i32UMIdentifier;
+} PVRSRV_DEVICE_IDENTIFIER;
+
+typedef struct _DEVICE_MEMORY_INFO_
+{
+	/* heap count.  Doesn't include additional heaps from PVRSRVCreateDeviceMemHeap */
+	IMG_UINT32				ui32HeapCount;
+
+    /* Blueprints for creating new device memory contexts */
+    IMG_UINT32              uiNumHeapConfigs;
+    DEVMEM_HEAP_CONFIG      *psDeviceMemoryHeapConfigArray;
+    DEVMEM_HEAP_BLUEPRINT   *psDeviceMemoryHeap;
+} DEVICE_MEMORY_INFO;
+
+
+typedef struct _PG_HANDLE_
+{
+	union
+	{
+		void *pvHandle;
+		IMG_UINT64 ui64Handle;
+	}u;
+	/*Order of the corresponding allocation */
+	IMG_UINT32	ui32Order;
+} PG_HANDLE;
+
+#define MMU_BAD_PHYS_ADDR (0xbadbad00badULL)
+typedef struct __DUMMY_PAGE__
+{
+	/*Page handle for the dummy page allocated (UMA/LMA)*/
+	PG_HANDLE	sDummyPageHandle;
+	POS_LOCK	psDummyPgLock;
+	ATOMIC_T	atRefCounter;
+	/*Dummy page size in terms of log2 */
+	IMG_UINT32	ui32Log2DummyPgSize;
+	IMG_UINT64	ui64DummyPgPhysAddr;
+#if defined(PDUMP)
+#define DUMMY_PAGE	("DUMMY_PAGE")
+	IMG_HANDLE hPdumpDummyPg;
+#endif
+} PVRSRV_DUMMY_PAGE ;
+
+typedef enum _PVRSRV_DEVICE_STATE_
+{
+	PVRSRV_DEVICE_STATE_UNDEFINED = 0,
+	PVRSRV_DEVICE_STATE_INIT,
+	PVRSRV_DEVICE_STATE_ACTIVE,
+	PVRSRV_DEVICE_STATE_DEINIT,
+	PVRSRV_DEVICE_STATE_BAD,
+} PVRSRV_DEVICE_STATE;
+
+typedef enum _PVRSRV_DEVICE_HEALTH_STATUS_
+{
+	PVRSRV_DEVICE_HEALTH_STATUS_OK = 0,
+	PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING,
+	PVRSRV_DEVICE_HEALTH_STATUS_DEAD
+} PVRSRV_DEVICE_HEALTH_STATUS;
+
+typedef enum _PVRSRV_DEVICE_HEALTH_REASON_
+{
+	PVRSRV_DEVICE_HEALTH_REASON_NONE = 0,
+	PVRSRV_DEVICE_HEALTH_REASON_ASSERTED,
+	PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING,
+	PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS,
+	PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT,
+	PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED
+} PVRSRV_DEVICE_HEALTH_REASON;
+
+typedef PVRSRV_ERROR (*FN_CREATERAMBACKEDPMR)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+										IMG_DEVMEM_SIZE_T uiSize,
+										IMG_DEVMEM_SIZE_T uiChunkSize,
+										IMG_UINT32 ui32NumPhysChunks,
+										IMG_UINT32 ui32NumVirtChunks,
+										IMG_UINT32 *pui32MappingTable,
+										IMG_UINT32 uiLog2PageSize,
+										PVRSRV_MEMALLOCFLAGS_T uiFlags,
+										const IMG_CHAR *pszAnnotation,
+										PMR **ppsPMRPtr);
+
+typedef struct _PVRSRV_DEVICE_NODE_
+{
+	PVRSRV_DEVICE_IDENTIFIER	sDevId;
+
+	PVRSRV_DEVICE_STATE			eDevState;
+	ATOMIC_T					eHealthStatus; /* Holds values from PVRSRV_DEVICE_HEALTH_STATUS */
+	ATOMIC_T					eHealthReason; /* Holds values from PVRSRV_DEVICE_HEALTH_REASON */
+
+	IMG_HANDLE						*hDebugTable;
+
+	/* device specific MMU attributes */
+   	MMU_DEVICEATTRIBS      *psMMUDevAttrs;
+	/* device specific MMU firmware atrributes, used only in some devices*/
+	MMU_DEVICEATTRIBS      *psFirmwareMMUDevAttrs;
+
+	/* lock for power state transitions */
+	POS_LOCK				hPowerLock;
+	/* current system device power state */
+	PVRSRV_SYS_POWER_STATE	eCurrentSysPowerState;
+	PPVRSRV_POWER_DEV	psPowerDev;
+
+	/*
+		callbacks the device must support:
+	*/
+
+    FN_CREATERAMBACKEDPMR pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_LAST];
+
+    PVRSRV_ERROR (*pfnDevPxAlloc)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, size_t uiSize,
+									PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr);
+
+    void (*pfnDevPxFree)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, PG_HANDLE *psMemHandle);
+
+	PVRSRV_ERROR (*pfnDevPxMap)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, PG_HANDLE *pshMemHandle,
+								size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+								void **pvPtr);
+
+	void (*pfnDevPxUnMap)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+						  PG_HANDLE *psMemHandle, void *pvPtr);
+
+	PVRSRV_ERROR (*pfnDevPxClean)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+								PG_HANDLE *pshMemHandle,
+								IMG_UINT32 uiOffset,
+								IMG_UINT32 uiLength);
+
+	IMG_UINT32 uiMMUPxLog2AllocGran;
+
+	void (*pfnMMUCacheInvalidate)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+								  IMG_HANDLE hDeviceData,
+								  MMU_LEVEL eLevel,
+								  IMG_BOOL bUnmap);
+
+	PVRSRV_ERROR (*pfnMMUCacheInvalidateKick)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+	                                          IMG_UINT16 *pui16NextMMUInvalidateUpdate,
+	                                          IMG_BOOL bInterrupt);
+
+	IMG_UINT32 (*pfnMMUCacheGetInvalidateCounter)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+
+	void (*pfnDumpDebugInfo)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+	PVRSRV_ERROR (*pfnUpdateHealthStatus)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+	                                      IMG_BOOL bIsTimerPoll);
+
+	PVRSRV_ERROR (*pfnResetHWRLogs)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+	/* Method to drain device HWPerf packets from firmware buffer to host buffer */
+	PVRSRV_ERROR (*pfnServiceHWPerf)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+	PVRSRV_ERROR (*pfnDeviceVersionString)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_CHAR **ppszVersionString);
+
+	PVRSRV_ERROR (*pfnDeviceClockSpeed)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_PUINT32 pui32RGXClockSpeed);
+
+	PVRSRV_ERROR (*pfnSoftReset)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64ResetValue1, IMG_UINT64 ui64ResetValue2);
+
+	PVRSRV_ERROR (*pfnAlignmentCheck)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 ui32FWAlignChecksSize, IMG_UINT32 aui32FWAlignChecks[]);
+	IMG_BOOL	(*pfnCheckDeviceFeature)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64FeatureMask);
+
+	IMG_INT32	(*pfnGetDeviceFeatureValue)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64FeatureMask);
+
+	PVRSRV_DEVICE_CONFIG	*psDevConfig;
+
+	/* device post-finalise compatibility check */
+	PVRSRV_ERROR			(*pfnInitDeviceCompatCheck) (struct _PVRSRV_DEVICE_NODE_*);
+
+	/* information about the device's address space and heaps */
+	DEVICE_MEMORY_INFO		sDevMemoryInfo;
+
+	/* device's shared-virtual-memory heap max virtual address */
+	IMG_UINT64				ui64GeneralSVMHeapTopVA;
+
+	ATOMIC_T				iNumClockSpeedChanges;
+
+	/* private device information */
+	void					*pvDevice;
+
+
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	RA_ARENA                *psOSidSubArena[GPUVIRT_VALIDATION_NUM_OS];
+#endif
+
+
+#define PVRSRV_MAX_RA_NAME_LENGTH (50)
+	RA_ARENA				**apsLocalDevMemArenas;
+	IMG_CHAR				**apszRANames;
+	IMG_UINT32				ui32NumOfLocalMemArenas;
+
+	IMG_CHAR				szKernelFwRAName[RGXFW_NUM_OS][PVRSRV_MAX_RA_NAME_LENGTH];
+	RA_ARENA				*psKernelFwMemArena[RGXFW_NUM_OS];
+	RA_BASE_T				ui64RABase[RGXFW_NUM_OS];
+	IMG_UINT32				uiKernelFwRAIdx;
+
+	IMG_UINT32				ui32RegisteredPhysHeaps;
+	PHYS_HEAP				**papsRegisteredPhysHeaps;
+
+	/*
+	 * Pointers to the device's physical memory heap(s)
+	 * The first entry (apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]) will be used for allocations
+	 *  where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL flag is not set. Normally this will be an LMA heap
+	 *  (but the device configuration could specify a UMA heap here, if desired)
+	 * The second entry (apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]) will be used for allocations
+	 *  where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL flag is set. Normally this will be a UMA heap
+	 *  (but the configuration could specify an LMA heap here, if desired)
+	 * The third entry (apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]) will be used for allocations
+	 *  where the PVRSRV_MEMALLOCFLAG_FW_LOCAL flag is set; this is used when virtualization is enabled
+	 * The device configuration will always specify two physical heap IDs - in the event of the device
+	 *  only using one physical heap, both of these IDs will be the same, and hence both pointers below
+	 *  will also be the same; when virtualization is enabled the device configuration specifies
+	 *  three physical heap IDs, the last being for PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL allocations
+	 */
+	PHYS_HEAP				*apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_LAST];
+
+	struct _PVRSRV_DEVICE_NODE_	*psNext;
+	struct _PVRSRV_DEVICE_NODE_	**ppsThis;
+	
+	/* Functions for notification about memory contexts */
+	PVRSRV_ERROR			(*pfnRegisterMemoryContext)(struct _PVRSRV_DEVICE_NODE_	*psDeviceNode,
+														MMU_CONTEXT					*psMMUContext,
+														IMG_HANDLE					*hPrivData);
+	void					(*pfnUnregisterMemoryContext)(IMG_HANDLE hPrivData);
+
+	/* Functions for allocation/freeing of UFOs */
+	AllocUFOBlockCallback	pfnAllocUFOBlock;	/*!< Callback for allocation of a block of UFO memory */
+	FreeUFOBlockCallback	pfnFreeUFOBlock;	/*!< Callback for freeing of a block of UFO memory */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+	struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+
+	IMG_HANDLE				hSyncServerNotify;
+	POS_LOCK				hSyncServerListLock;
+	DLLIST_NODE				sSyncServerSyncsList;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+	IMG_HANDLE				hSyncServerRecordNotify;
+	POS_LOCK				hSyncServerRecordLock;
+	IMG_UINT32				ui32SyncServerRecordCount;
+	IMG_UINT32				ui32SyncServerRecordCountHighWatermark;
+	DLLIST_NODE				sSyncServerRecordList;
+	struct SYNC_RECORD		*apsSyncServerRecordsFreed[PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN];
+	IMG_UINT32				uiSyncServerRecordFreeIdx;
+
+	IMG_HANDLE				hSyncCheckpointRecordNotify;
+	POS_LOCK				hSyncCheckpointRecordLock;
+	IMG_UINT32				ui32SyncCheckpointRecordCount;
+	IMG_UINT32				ui32SyncCheckpointRecordCountHighWatermark;
+	DLLIST_NODE				sSyncCheckpointRecordList;
+	struct SYNC_CHECKPOINT_RECORD	*apsSyncCheckpointRecordsFreed[PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN];
+	IMG_UINT32				uiSyncCheckpointRecordFreeIdx;
+#endif
+
+	IMG_HANDLE				hSyncCheckpointNotify;
+	POS_LOCK				hSyncCheckpointListLock;
+	DLLIST_NODE				sSyncCheckpointSyncsList;
+
+	PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext;
+	PSYNC_PRIM_CONTEXT		hSyncPrimContext;
+
+	PVRSRV_CLIENT_SYNC_PRIM	*psSyncPrim;
+	/* With this sync-prim we make sure the MMU cache is flushed
+	 * before we free the page table memory */
+	PVRSRV_CLIENT_SYNC_PRIM	*psMMUCacheSyncPrim;
+	IMG_UINT16				ui16NextMMUInvalidateUpdate;
+
+	IMG_HANDLE				hCmdCompNotify;
+	IMG_HANDLE				hDbgReqNotify;
+	IMG_HANDLE				hHtbDbgReqNotify;
+	IMG_HANDLE				hAppHintDbgReqNotify;
+
+	PVRSRV_DUMMY_PAGE		sDummyPage;
+
+	DLLIST_NODE				sMemoryContextPageFaultNotifyListHead;
+
+#if defined(PDUMP)
+	/* 	device-level callback which is called when pdump.exe starts.
+	 *	Should be implemented in device-specific init code, e.g. rgxinit.c
+	 */
+	PVRSRV_ERROR			(*pfnPDumpInitDevice)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+	/* device-level callback to return pdump ID associated to a memory context */
+	IMG_UINT32				(*pfnMMUGetContextID)(IMG_HANDLE hDevMemContext);
+#endif
+} PVRSRV_DEVICE_NODE;
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode,
+											   IMG_BOOL bInitSuccessful);
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR IMG_CALLCONV RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32ClientBuildOptions);
+
+	
+#endif /* __DEVICE_H__ */
+
+/******************************************************************************
+ End of file (device.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/devicemem_heapcfg.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/devicemem_heapcfg.h
new file mode 100644
index 0000000..8933831
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/devicemem_heapcfg.h
@@ -0,0 +1,163 @@
+/**************************************************************************/ /*!
+@File
+@Title          Temporary Device Memory 2 stuff
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device memory management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __DEVICEMEMHEAPCFG_H__
+#define __DEVICEMEMHEAPCFG_H__
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+
+/* FIXME: Find a better way of defining _PVRSRV_DEVICE_NODE_ */
+struct _PVRSRV_DEVICE_NODE_;
+/* FIXME: Find a better way of defining _CONNECTION_DATA_ */
+struct _CONNECTION_DATA_;
+
+
+/*
+  A "heap config" is a blueprint to be used for initial setting up of
+  heaps when a device memory context is created.
+
+  We define a data structure to define this, but it's really down to
+  the caller to populate it.  This is all expected to be in-kernel.
+  We provide an API that client code can use to enquire about the
+  blueprint, such that it may do the heap setup during the context
+  creation call on behalf of the user */
+
+/* blueprint for a single heap */
+typedef struct _DEVMEM_HEAP_BLUEPRINT_
+{
+	/* Name of this heap - for debug purposes, and perhaps for lookup
+	by name? */
+	const IMG_CHAR *pszName;
+
+	/* Virtual address of the beginning of the heap.  This _must_ be a
+	multiple of the data page size for the heap.  It is
+	_recommended_ that it be coarser than that - especially, it
+	should begin on a boundary appropriate to the MMU for the
+	device.  For Rogue, this is a Page Directory boundary, or 1GB
+	(virtual address a multiple of 0x0040000000). */
+	IMG_DEV_VIRTADDR sHeapBaseAddr;
+
+	/* Length of the heap.  Given that the END address of the heap has
+	a similar restriction to that of the _beginning_ of the heap.
+	That is the heap length _must_ be a whole number of data pages.
+	Again, the recommendation is that it ends on a 1GB boundary.
+	Again, this is not essential, but we do know that (at the time
+	of writing) the current implementation of mmu_common.c is such
+	that no two heaps may share a page directory, thus the
+	remaining virtual space would be wasted if the length were not
+	a multiple of 1GB */
+	IMG_DEVMEM_SIZE_T uiHeapLength;
+
+	/* Data page size.  This is the page size that is going to get
+	programmed into the MMU, so it needs to be a valid one for the
+	device.  Importantly, the start address and length _must_ be
+	multiples of this page size.  Note that the page size is
+	specified as the log 2 relative to 1 byte (e.g. 12 indicates
+	4kB) */
+	IMG_UINT32 uiLog2DataPageSize;
+
+	/* Import alignment.  Force imports to this heap to be
+	aligned to at least this value */
+	IMG_UINT32 uiLog2ImportAlignment;
+
+	/* Tiled heaps have an optimum byte-stride, this can be derived from
+	the heap alignment and tiling mode. This is abstracted here such that
+	Log2ByteStride = Log2Alignment - Log2TilingStrideFactor */
+	IMG_UINT32 uiLog2TilingStrideFactor;
+} DEVMEM_HEAP_BLUEPRINT;
+
+/* entire named heap config */
+typedef struct _DEVMEM_HEAP_CONFIG_
+{
+    /* Name of this heap config - for debug and maybe lookup */
+    const IMG_CHAR *pszName;
+
+    /* Number of heaps in this config */
+    IMG_UINT32 uiNumHeaps;
+
+    /* Array of individual heap blueprints as defined above */
+    DEVMEM_HEAP_BLUEPRINT *psHeapBlueprintArray;
+} DEVMEM_HEAP_CONFIG;
+
+
+extern PVRSRV_ERROR
+HeapCfgHeapConfigCount(struct _CONNECTION_DATA_ * psConnection,
+    const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+    IMG_UINT32 *puiNumHeapConfigsOut
+);
+
+extern PVRSRV_ERROR
+HeapCfgHeapCount(struct _CONNECTION_DATA_ * psConnection,
+    const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+    IMG_UINT32 uiHeapConfigIndex,
+    IMG_UINT32 *puiNumHeapsOut
+);
+
+extern PVRSRV_ERROR
+HeapCfgHeapConfigName(struct _CONNECTION_DATA_ * psConnection,
+    const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+    IMG_UINT32 uiHeapConfigIndex,
+    IMG_UINT32 uiHeapConfigNameBufSz,
+    IMG_CHAR *pszHeapConfigNameOut
+);
+
+extern PVRSRV_ERROR
+HeapCfgHeapDetails(struct _CONNECTION_DATA_ * psConnection,
+    const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+    IMG_UINT32 uiHeapConfigIndex,
+    IMG_UINT32 uiHeapIndex,
+    IMG_UINT32 uiHeapNameBufSz,
+    IMG_CHAR *pszHeapNameOut,
+    IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+    IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+    IMG_UINT32 *puiLog2DataPageSizeOut,
+    IMG_UINT32 *puiLog2ImportAlignmentOut,
+    IMG_UINT32 *puiLog2TilingStrideFactorOut
+);
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/devicemem_history_server.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/devicemem_history_server.h
new file mode 100644
index 0000000..24e6730
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/devicemem_history_server.h
@@ -0,0 +1,154 @@
+/*************************************************************************/ /*!
+@File			devicemem_history_server.h
+@Title          Resource Information abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Devicemem History functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEVICEMEM_HISTORY_SERVER_H_
+#define _DEVICEMEM_HISTORY_SERVER_H_
+
+#include "img_defs.h"
+#include "mm_common.h"
+#include "pvrsrv_error.h"
+#include "rgxmem.h"
+
+extern PVRSRV_ERROR
+DevicememHistoryInitKM(void);
+
+extern void
+DevicememHistoryDeInitKM(void);
+
+PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR,
+							IMG_UINT32 ui32Offset,
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEVMEM_SIZE_T uiSize,
+							const char szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+							IMG_UINT32 ui32PageSize,
+							IMG_UINT32 ui32AllocationIndex,
+							IMG_UINT32 *pui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistoryUnmapKM(PMR *psPMR,
+							IMG_UINT32 ui32Offset,
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEVMEM_SIZE_T uiSize,
+							const char szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+							IMG_UINT32 ui32PageSize,
+							IMG_UINT32 ui32AllocationIndex,
+							IMG_UINT32 *pui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistoryMapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr,
+							IMG_UINT32 ui32StartPage,
+							IMG_UINT32 ui32NumPages,
+							IMG_DEVMEM_SIZE_T uiAllocSize,
+							const IMG_CHAR szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+							IMG_UINT32 ui32Log2PageSize,
+							IMG_UINT32 ui32AllocationIndex,
+							IMG_UINT32 *ui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistoryUnmapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr,
+							IMG_UINT32 ui32StartPage,
+							IMG_UINT32 ui32NumPages,
+							IMG_DEVMEM_SIZE_T uiAllocSize,
+							const IMG_CHAR szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+							IMG_UINT32 ui32Log2PageSize,
+							IMG_UINT32 ui32AllocationIndex,
+							IMG_UINT32 *ui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR,
+							IMG_UINT32 ui32Offset,
+							IMG_DEV_VIRTADDR sDevVAddr,
+							IMG_DEVMEM_SIZE_T uiSize,
+							const char szName[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+							IMG_UINT32 ui32PageSize,
+							IMG_UINT32 ui32AllocPageCount,
+							IMG_UINT32 *paui32AllocPageIndices,
+							IMG_UINT32 ui32FreePageCount,
+							IMG_UINT32 *pauiFreePageIndices,
+							IMG_UINT32 AllocationIndex,
+							IMG_UINT32 *pui32AllocationIndexOut);
+
+/* used when the PID does not matter */
+#define DEVICEMEM_HISTORY_PID_ANY 0xFFFFFFFE
+
+typedef struct _DEVICEMEM_HISTORY_QUERY_IN_
+{
+	IMG_PID uiPID;
+	IMG_DEV_VIRTADDR sDevVAddr;
+} DEVICEMEM_HISTORY_QUERY_IN;
+
+/* Store up to 4 results for a lookup. In the case of the faulting page being
+ * re-mapped between the page fault occurring on HW and the page fault analysis
+ * being done, the second result entry will show the allocation being unmapped.
+ * A further 2 entries are added to cater for multiple buffers in the same page.
+ */
+#define DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS 4
+
+typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_RESULT_
+{
+	IMG_CHAR szString[DEVICEMEM_HISTORY_TEXT_BUFSZ];
+	IMG_DEV_VIRTADDR sBaseDevVAddr;
+	size_t uiSize;
+	IMG_BOOL bMap;
+	IMG_BOOL bRange;
+	IMG_BOOL bAll;
+	IMG_UINT64 ui64When;
+	IMG_UINT64 ui64Age;
+	/* info for sparse map/unmap operations (i.e. bRange=IMG_TRUE) */
+	IMG_UINT32 ui32StartPage;
+	IMG_UINT32 ui32PageCount;
+	IMG_DEV_VIRTADDR sMapStartAddr;
+	IMG_DEV_VIRTADDR sMapEndAddr;
+	RGXMEM_PROCESS_INFO sProcessInfo;
+} DEVICEMEM_HISTORY_QUERY_OUT_RESULT;
+
+typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_
+{
+	IMG_UINT32 ui32NumResults;
+	/* result 0 is the newest */
+	DEVICEMEM_HISTORY_QUERY_OUT_RESULT sResults[DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS];
+} DEVICEMEM_HISTORY_QUERY_OUT;
+
+extern IMG_BOOL
+DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn,
+                      DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut,
+                      IMG_UINT32 ui32PageSizeBytes,
+                      IMG_BOOL bMatchAnyAllocInPage);
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/devicemem_server.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/devicemem_server.h
new file mode 100644
index 0000000..1d11f68
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/devicemem_server.h
@@ -0,0 +1,497 @@
+/**************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header file for server side component of device memory management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __DEVICEMEM_SERVER_H__
+#define __DEVICEMEM_SERVER_H__
+
+#include "device.h" /* For device node */
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "connection_server.h"
+
+#include "pmr.h"
+
+
+typedef struct _DEVMEMINT_CTX_ DEVMEMINT_CTX;
+typedef struct _DEVMEMINT_CTX_EXPORT_ DEVMEMINT_CTX_EXPORT;
+typedef struct _DEVMEMINT_HEAP_ DEVMEMINT_HEAP;
+
+typedef struct _DEVMEMINT_RESERVATION_ DEVMEMINT_RESERVATION;
+typedef struct _DEVMEMINT_MAPPING_ DEVMEMINT_MAPPING;
+typedef struct _DEVMEMINT_PF_NOTIFY_ DEVMEMINT_PF_NOTIFY;
+
+
+/**************************************************************************/ /*!
+@Function       DevmemIntUnpin
+@Description    This is the counterpart to DevmemPin(). It is meant to be
+                called when the allocation is NOT mapped in the device virtual
+                space.
+
+@Input          psPMR           The physical memory to unpin.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the memory is
+                                registered to be reclaimed. Error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR DevmemIntUnpin(PMR *psPMR);
+
+/**************************************************************************/ /*!
+@Function       DevmemIntUnpinInvalidate
+@Description    This is the counterpart to DevmemIntPinValidate(). It is meant to be
+                called for allocations that ARE mapped in the device virtual space
+                and we have to invalidate the mapping.
+
+@Input          psPMR           The physical memory to unpin.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the memory is
+                                registered to be reclaimed. Error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR DevmemIntUnpinInvalidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR);
+
+/**************************************************************************/ /*!
+@Function       DevmemIntPin
+@Description    This is the counterpart to DevmemIntUnpin().
+                Is meant to be called if there is NO device mapping present.
+
+@Input          psPMR           The physical memory to pin.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the allocation content
+                                was successfully restored.
+
+                                PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+                                could not be restored and new physical memory
+                                was allocated.
+
+                                A different error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR DevmemIntPin(PMR *psPMR);
+
+/**************************************************************************/ /*!
+@Function       DevmemIntPinValidate
+@Description    This is the counterpart to DevmemIntUnpinInvalidate().
+                Is meant to be called if there is IS a device mapping present
+                that needs to be taken care of.
+
+@Input          psDevmemMapping The mapping structure used for the passed PMR.
+
+@Input          psPMR           The physical memory to pin.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the allocation content
+                                was successfully restored.
+
+                                PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+                                could not be restored and new physical memory
+                                was allocated.
+
+                                A different error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR DevmemIntPinValidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR);
+/*
+ * DevmemServerGetImportHandle()
+ *
+ * For given exportable memory descriptor returns PMR handle
+ *
+ */
+PVRSRV_ERROR
+DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+                            IMG_HANDLE *phImport);
+
+/*
+ * DevmemServerGetHeapHandle()
+ *
+ * For given reservation returns the Heap handle
+ *
+ */
+PVRSRV_ERROR
+DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation,
+                          IMG_HANDLE *phHeap);
+
+/*
+ * DevmemIntCtxCreate()
+ *
+ * Create a Server-side Device Memory Context.  This is usually the
+ * counterpart of the client side memory context, and indeed is
+ * usually created at the same time.
+ *
+ * You must have one of these before creating any heaps.
+ *
+ * All heaps must have been destroyed before calling
+ * DevmemIntCtxDestroy()
+ *
+ * If you call DevmemIntCtxCreate() (and it succeeds) you are promising
+ * to later call DevmemIntCtxDestroy()
+ *
+ * Note that this call will cause the device MMU code to do some work
+ * for creating the device memory context, but it does not guarantee
+ * that a page catalogue will have been created, as this may be
+ * deferred until first allocation.
+ *
+ * Caller to provide storage for a pointer to the DEVMEM_CTX object
+ * that will be created by this call.
+ */
+extern PVRSRV_ERROR
+DevmemIntCtxCreate(CONNECTION_DATA *psConnection,
+                   PVRSRV_DEVICE_NODE *psDeviceNode,
+                   /* devnode / perproc etc */
+                   IMG_BOOL bKernelMemoryCtx,
+                   DEVMEMINT_CTX **ppsDevmemCtxPtr,
+                   IMG_HANDLE *hPrivData,
+                   IMG_UINT32 *pui32CPUCacheLineSize);
+/*
+ * DevmemIntCtxDestroy()
+ *
+ * Undoes a prior DevmemIntCtxCreate or DevmemIntCtxImport.
+ */
+extern PVRSRV_ERROR
+DevmemIntCtxDestroy(DEVMEMINT_CTX *psDevmemCtx);
+
+/*
+ * DevmemIntHeapCreate()
+ *
+ * Creates a new heap in this device memory context.  This will cause
+ * a call into the MMU code to allocate various data structures for
+ * managing this heap.  It will not necessarily cause any page tables
+ * to be set up, as this can be deferred until first allocation.
+ * (i.e. we shouldn't care - it's up to the MMU code)
+ *
+ * Note that the data page size must be specified (as log 2).  The
+ * data page size as specified here will be communicated to the mmu
+ * module, and thus may determine the page size configured in page
+ * directory entries for subsequent allocations from this heap.  It is
+ * essential that the page size here is less than or equal to the
+ * "minimum contiguity guarantee" of any PMR that you subsequently
+ * attempt to map to this heap.
+ *
+ * If you call DevmemIntHeapCreate() (and the call succeeds) you are
+ * promising that you shall subsequently call DevmemIntHeapDestroy()
+ *
+ * Caller to provide storage for a pointer to the DEVMEM_HEAP object
+ * that will be created by this call.
+ */
+extern PVRSRV_ERROR
+DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx,
+                    IMG_DEV_VIRTADDR sHeapBaseAddr,
+                    IMG_DEVMEM_SIZE_T uiHeapLength,
+                    IMG_UINT32 uiLog2DataPageSize,
+                    DEVMEMINT_HEAP **ppsDevmemHeapPtr);
+/*
+ * DevmemIntHeapDestroy()
+ *
+ * Destroys a heap previously created with DevmemIntHeapCreate()
+ *
+ * All allocations from his heap must have been freed before this
+ * call.
+ */
+extern PVRSRV_ERROR
+DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap);
+
+/*
+ * DevmemIntMapPMR()
+ *
+ * Maps the given PMR to the virtual range previously allocated with
+ * DevmemIntReserveRange()
+ *
+ * If appropriate, the PMR must have had its physical backing
+ * committed, as this call will call into the MMU code to set up the
+ * page tables for this allocation, which shall in turn request the
+ * physical addresses from the PMR.  Alternatively, the PMR
+ * implementation can choose to do so off the back of the "lock"
+ * callback, which it will receive as a result (indirectly) of this
+ * call.
+ *
+ * This function makes no promise w.r.t. the circumstances that it can
+ * be called, and these would be "inherited" from the implementation
+ * of the PMR.  For example if the PMR "lock" callback causes pages to
+ * be pinned at that time (which may cause scheduling or disk I/O
+ * etc.) then it would not be legal to "Map" the PMR in a context
+ * where scheduling events are disallowed.
+ *
+ * If you call DevmemIntMapPMR() (and the call succeeds) then you are
+ * promising that you shall later call DevmemIntUnmapPMR()
+ */
+extern PVRSRV_ERROR
+DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap,
+                DEVMEMINT_RESERVATION *psReservation,
+                PMR *psPMR,
+                PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+                DEVMEMINT_MAPPING **ppsMappingPtr);
+/*
+ * DevmemIntUnmapPMR()
+ *
+ * Reverses the mapping caused by DevmemIntMapPMR()
+ */
+extern PVRSRV_ERROR
+DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping);
+
+/* DevmemIntMapPages()
+ *
+ * Maps an arbitrary amount of pages from a PMR to a reserved range
+ *
+ * @input         psReservation      Reservation handle for the range
+ * @input         psPMR              PMR that is mapped
+ * @input         ui32PageCount      Number of consecutive pages that are mapped
+ * @input         uiPhysicalOffset   Logical offset in the PMR
+ * @input         uiFlags            Mapping flags
+ * @input         sDevVAddrBase      Virtual address base to start the mapping from
+ */
+extern PVRSRV_ERROR
+DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation,
+                  PMR *psPMR,
+                  IMG_UINT32 ui32PageCount,
+                  IMG_UINT32 ui32PhysicalPgOffset,
+                  PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                  IMG_DEV_VIRTADDR sDevVAddrBase);
+
+/* DevmemIntUnmapPages()
+ *
+ * Unmaps an arbitrary amount of pages from a reserved range
+ *
+ * @input         psReservation      Reservation handle for the range
+ * @input         sDevVAddrBase      Virtual address base to start from
+ * @input         ui32PageCount      Number of consecutive pages that are unmapped
+  */
+extern PVRSRV_ERROR
+DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation,
+                    IMG_DEV_VIRTADDR sDevVAddrBase,
+                    IMG_UINT32 ui32PageCount);
+
+/*
+ * DevmemIntReserveRange()
+ *
+ * Indicates that the specified range should be reserved from the
+ * given heap.
+ *
+ * In turn causes the page tables to be allocated to cover the
+ * specified range.
+ *
+ * If you call DevmemIntReserveRange() (and the call succeeds) then you
+ * are promising that you shall later call DevmemIntUnreserveRange()
+ */
+extern PVRSRV_ERROR
+DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap,
+                      IMG_DEV_VIRTADDR sAllocationDevVAddr,
+                      IMG_DEVMEM_SIZE_T uiAllocationSize,
+                      DEVMEMINT_RESERVATION **ppsReservationPtr);
+/*
+ * DevmemIntUnreserveRange()
+ *
+ * Undoes the state change caused by DevmemIntReserveRage()
+ */
+extern PVRSRV_ERROR
+DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psDevmemReservation);
+
+/*************************************************************************/ /*!
+@Function       DevmemIntChangeSparse
+@Description    Changes the sparse allocations of a PMR by allocating and freeing
+                pages and changing their corresponding CPU and GPU mappings.
+
+@input          psDevmemHeap          Pointer to the heap we map on
+@input          psPMR                 The PMR we want to map
+@input          ui32AllocPageCount    Number of pages to allocate
+@input          pai32AllocIndices     The logical PMR indices where pages will
+                                      be allocated. May be NULL.
+@input          ui32FreePageCount     Number of pages to free
+@input          pai32FreeIndices      The logical PMR indices where pages will
+                                      be freed. May be NULL.
+@input          uiSparseFlags         Flags passed in to determine which kind
+                                      of sparse change the user wanted.
+                                      See devicemem_typedefs.h for details.
+@input          uiFlags               The memalloc flags for this virtual range.
+@input          sDevVAddrBase         The base address of the virtual range of
+                                      this sparse allocation.
+@input          sCpuVAddrBase         The CPU base address of this allocation.
+                                      May be 0 if not existing.
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+extern PVRSRV_ERROR
+DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap,
+                      PMR *psPMR,
+                      IMG_UINT32 ui32AllocPageCount,
+                      IMG_UINT32 *pai32AllocIndices,
+                      IMG_UINT32 ui32FreePageCount,
+                      IMG_UINT32 *pai32FreeIndices,
+                      SPARSE_MEM_RESIZE_FLAGS uiSparseFlags,
+                      PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                      IMG_DEV_VIRTADDR sDevVAddrBase,
+                      IMG_UINT64 sCpuVAddrBase);
+
+extern PVRSRV_ERROR
+DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection,
+                         PVRSRV_DEVICE_NODE *psDevNode,
+                         DEVMEMINT_CTX *psDevMemContext,
+                         IMG_DEV_VIRTADDR sDevAddr);
+
+/*************************************************************************/ /*!
+@Function       DevmemIntRegisterPFNotify
+@Description    Registers a PID to be notified when a page fault occurs on a
+                specific device memory context.
+@Input          psDevmemCtx    The context to be notified about.
+@Input          ui32PID        The PID of the process that would like to be
+                               notified.
+@Input          bRegister      If true, register. If false, de-register.
+@Return         PVRSRV_ERROR.
+*/ /**************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx,
+                            IMG_INT32     ui32PID,
+                            IMG_BOOL      bRegister);
+
+/*************************************************************************/ /*!
+@Function       DevmemIntPFNotify
+@Description    Notifies any processes that have registered themselves to be
+                notified when a page fault happens on a specific device memory
+                context.
+@Input          *psDevNode           The device node.
+@Input          ui64FaultedPCAddress The page catalogue address that faulted.
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode,
+                               IMG_UINT64         ui64FaultedPCAddress);
+
+#if defined(PDUMP)
+/*
+ * DevmemIntPDumpSaveToFileVirtual()
+ *
+ * Writes out PDump "SAB" commands with the data found in memory at
+ * the given virtual address.
+ */
+extern PVRSRV_ERROR
+DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+                                IMG_DEV_VIRTADDR sDevAddrStart,
+                                IMG_DEVMEM_SIZE_T uiSize,
+                                IMG_UINT32 uiArraySize,
+                                const IMG_CHAR *pszFilename,
+                                IMG_UINT32 ui32FileOffset,
+                                IMG_UINT32 ui32PDumpFlags);
+
+extern IMG_UINT32
+DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext);
+
+extern PVRSRV_ERROR
+DevmemIntPDumpBitmap(CONNECTION_DATA * psConnection,
+                     PVRSRV_DEVICE_NODE *psDeviceNode,
+                     IMG_CHAR *pszFileName,
+                     IMG_UINT32 ui32FileOffset,
+                     IMG_UINT32 ui32Width,
+                     IMG_UINT32 ui32Height,
+                     IMG_UINT32 ui32StrideInBytes,
+                     IMG_DEV_VIRTADDR sDevBaseAddr,
+                     DEVMEMINT_CTX *psDevMemContext,
+                     IMG_UINT32 ui32Size,
+                     PDUMP_PIXEL_FORMAT ePixelFormat,
+                     IMG_UINT32 ui32AddrMode,
+                     IMG_UINT32 ui32PDumpFlags);
+#else	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+                                IMG_DEV_VIRTADDR sDevAddrStart,
+                                IMG_DEVMEM_SIZE_T uiSize,
+                                IMG_UINT32 uiArraySize,
+                                const IMG_CHAR *pszFilename,
+                                IMG_UINT32 ui32FileOffset,
+                                IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevmemCtx);
+	PVR_UNREFERENCED_PARAMETER(sDevAddrStart);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiArraySize);
+	PVR_UNREFERENCED_PARAMETER(pszFilename);
+	PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemIntPDumpBitmap(CONNECTION_DATA * psConnection,
+                     PVRSRV_DEVICE_NODE *psDeviceNode,
+                     IMG_CHAR *pszFileName,
+                     IMG_UINT32 ui32FileOffset,
+                     IMG_UINT32 ui32Width,
+                     IMG_UINT32 ui32Height,
+                     IMG_UINT32 ui32StrideInBytes,
+                     IMG_DEV_VIRTADDR sDevBaseAddr,
+                     DEVMEMINT_CTX *psDevMemContext,
+                     IMG_UINT32 ui32Size,
+                     PDUMP_PIXEL_FORMAT ePixelFormat,
+                     IMG_UINT32 ui32AddrMode,
+                     IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(pszFileName);
+	PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Width);
+	PVR_UNREFERENCED_PARAMETER(ui32Height);
+	PVR_UNREFERENCED_PARAMETER(ui32StrideInBytes);
+	PVR_UNREFERENCED_PARAMETER(sDevBaseAddr);
+	PVR_UNREFERENCED_PARAMETER(psDevMemContext);
+	PVR_UNREFERENCED_PARAMETER(ui32Size);
+	PVR_UNREFERENCED_PARAMETER(ePixelFormat);
+	PVR_UNREFERENCED_PARAMETER(ui32AddrMode);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+#endif /* PDUMP */
+
+PVRSRV_ERROR
+DevmemIntExportCtx(DEVMEMINT_CTX *psContext,
+                   PMR *psPMR,
+                   DEVMEMINT_CTX_EXPORT **ppsContextExport);
+
+PVRSRV_ERROR
+DevmemIntUnexportCtx(DEVMEMINT_CTX_EXPORT *psContextExport);
+
+PVRSRV_ERROR
+DevmemIntAcquireRemoteCtx(PMR *psPMR,
+                          DEVMEMINT_CTX **ppsContext,
+                          IMG_HANDLE *phPrivData);
+
+#endif /* ifndef __DEVICEMEM_SERVER_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/devicemem_server_utils.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/devicemem_server_utils.h
new file mode 100644
index 0000000..469080d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/devicemem_server_utils.h
@@ -0,0 +1,204 @@
+/**************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header file utilities that are specific to device memory functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "device.h"
+#include "pvrsrv_memallocflags.h"
+#include "pvrsrv.h"
+
+static INLINE PVRSRV_ERROR DevmemCPUCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode,
+											  PVRSRV_MEMALLOCFLAGS_T ulFlags,
+											  IMG_UINT32 *pui32Ret)
+{
+	IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags);
+	IMG_UINT32 ui32Ret;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_ASSERT(ui32CPUCacheMode == PVRSRV_CPU_CACHE_MODE(ulFlags));
+
+	switch (ui32CPUCacheMode)
+	{
+		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+			ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED;
+			break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+			ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE;
+			break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT:
+			ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED;
+			break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT:
+
+			/*
+			 * If system has no coherency but coherency has been requested for CPU
+			 * and GPU we currently have to fall back to uncached.
+			 *
+			 * Usually the first case here should return an error but as long as a lot
+			 * of services allocations using both CPU/GPU coherency flags and rely on
+			 * the UNCACHED fallback we have to leave it here.
+			*/
+			if ( (PVRSRV_GPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) &&
+				!(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) )
+			{
+				ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED;
+			}
+			else
+			{
+				ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED;
+			}
+
+			break;
+
+		default:
+			PVR_LOG(("DevmemCPUCacheMode: Unknown CPU cache mode 0x%08x", ui32CPUCacheMode));
+			PVR_ASSERT(0);
+			/*
+				We should never get here, but if we do then setting the mode
+				to uncached is the safest thing to do.
+			*/
+			ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED;
+			eError = PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+			break;
+	}
+
+	*pui32Ret = ui32Ret;
+	
+	return eError;
+}
+
+static INLINE PVRSRV_ERROR DevmemDeviceCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode,
+												 PVRSRV_MEMALLOCFLAGS_T ulFlags,
+												 IMG_UINT32 *pui32Ret)
+{
+	IMG_UINT32 ui32DeviceCacheMode = PVRSRV_GPU_CACHE_MODE(ulFlags);
+	IMG_UINT32 ui32Ret;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_ASSERT(ui32DeviceCacheMode == PVRSRV_GPU_CACHE_MODE(ulFlags));
+
+	switch (ui32DeviceCacheMode)
+	{
+		case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED:
+			ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED;
+			break;
+
+		case PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE:
+			ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE;
+			break;
+
+		case PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT:
+			ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED;
+			break;
+
+		case PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT:
+
+			/*
+			 * If system has no coherency but coherency has been requested for CPU
+			 * and GPU we currently have to fall back to uncached.
+			 *
+			 * Usually the first case here should return an error but as long as a lot
+			 * of services allocations using both CPU/GPU coherency flags and rely on
+			 * the UNCACHED fallback we have to leave it here.
+			*/
+			if ( (PVRSRV_CPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) &&
+				!(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) )
+			{
+				ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED;
+			}
+			else
+			{
+				ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED;
+			}
+
+			break;
+
+		default:
+			PVR_LOG(("DevmemDeviceCacheMode: Unknown device cache mode 0x%08x", ui32DeviceCacheMode));
+			PVR_ASSERT(0);
+			/*
+				We should never get here, but if we do then setting the mode
+				to uncached is the safest thing to do.
+			*/
+			ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED;
+			eError = PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+			break;
+	}
+
+	*pui32Ret = ui32Ret;
+	
+	return eError;
+}
+
+static INLINE IMG_BOOL DevmemCPUCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode,
+											   PVRSRV_MEMALLOCFLAGS_T ulFlags)
+{
+	IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags);
+	IMG_BOOL bRet = IMG_FALSE;
+
+	PVR_ASSERT(ui32CPUCacheMode == PVRSRV_CPU_CACHE_MODE(ulFlags));
+
+	if (ui32CPUCacheMode == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT)
+	{
+		bRet = PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig);
+	}
+	return bRet;
+}
+
+static INLINE IMG_BOOL DevmemDeviceCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode,
+												  PVRSRV_MEMALLOCFLAGS_T ulFlags)
+{
+	IMG_UINT32 ui32DeviceCacheMode = PVRSRV_GPU_CACHE_MODE(ulFlags);
+	IMG_BOOL bRet = IMG_FALSE;
+
+	PVR_ASSERT(ui32DeviceCacheMode == PVRSRV_GPU_CACHE_MODE(ulFlags));
+
+	if (ui32DeviceCacheMode == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT)
+	{
+		bRet = PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig);
+	}
+	return bRet;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/handle.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/handle.h
new file mode 100644
index 0000000..f2b8d48
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/handle.h
@@ -0,0 +1,200 @@
+/**************************************************************************/ /*!
+@File
+@Title          Handle Manager API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provide handle management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__HANDLE_H__)
+#define __HANDLE_H__
+
+/*
+ * Handle API
+ * ----------
+ * The handle API is intended to provide handles for kernel resources,
+ * which can then be passed back to user space processes.
+ *
+ * The following functions comprise the API.  Each function takes a
+ * pointer to a PVRSRV_HANDLE_BASE strcture, one of which is allocated
+ * for each process, and stored in the per-process data area.  Use
+ * KERNEL_HANDLE_BASE for handles not allocated for a particular process,
+ * or for handles that need to be allocated before the PVRSRV_HANDLE_BASE
+ * structure for the process is available.
+ *
+ * PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType,
+ * 	PVRSRV_HANDLE_ALLOC_FLAG eFlag);
+ *
+ * Allocate a handle phHandle, for the resource of type eType pointed to by
+ * pvData.
+ *
+ * For handles that have a definite lifetime, where the corresponding
+ * resource is explicitly created and destroyed, eFlag should be zero.
+ *
+ * If a particular resource may be referenced multiple times by a
+ * given process, setting eFlag to PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ * will allow multiple handles to be allocated for the resource.
+ * Such handles cannot be found with PVRSRVFindHandle.
+ *
+ * PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType,
+ * 	PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
+ *
+ * This function is similar to PVRSRVAllocHandle, except that the allocated
+ * handles are associated with a parent handle, hParent, that has been
+ * allocated previously.  Subhandles are automatically deallocated when their
+ * parent handle is deallocated.
+ * Subhandles can be treated as ordinary handles.  For example, they may
+ * have subhandles of their own, and may be explicity deallocated using
+ * PVRSRVReleaseHandle (see below).
+ *
+ * PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Find the handle previously allocated for the resource pointed to by
+ * pvData, of type eType.  Handles allocated with the flag
+ * PVRSRV_HANDLE_ALLOC_FLAG_MULTI cannot be found using this
+ * function.
+ *
+ * PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Given a handle for a resource of type eType, return the pointer to the
+ * resource.
+ *
+ * PVRSRV_ERROR PVRSRVLookuSubHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType,
+ * 	IMH_HANDLE hAncestor);
+ *
+ * Similar to PVRSRVLookupHandle, but checks the handle is a descendant
+ * of hAncestor.
+ *
+ * PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Deallocate a handle of given type.
+ *
+ * PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	void **phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Return the parent of a handle in *phParent, or NULL if the handle has
+ * no parent.
+ */
+
+#include "img_types.h"
+#include "hash.h"
+
+typedef enum
+{
+	#define HANDLETYPE(x) PVRSRV_HANDLE_TYPE_##x,
+	#include "handle_types.h"
+	#undef HANDLETYPE
+} PVRSRV_HANDLE_TYPE;
+
+static_assert(PVRSRV_HANDLE_TYPE_NONE == 0, "PVRSRV_HANDLE_TYPE_NONE must be zero");
+
+typedef enum
+{
+	PVRSRV_HANDLE_BASE_TYPE_CONNECTION,
+	PVRSRV_HANDLE_BASE_TYPE_PROCESS,
+	PVRSRV_HANDLE_BASE_TYPE_GLOBAL
+} PVRSRV_HANDLE_BASE_TYPE;
+
+
+typedef enum
+{
+	/* No flags */
+	PVRSRV_HANDLE_ALLOC_FLAG_NONE = 		0,
+	/* Muliple handles can point at the given data pointer */
+	PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 		0x01,
+	/* Subhandles are allocated in a private handle space */
+	PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 		0x02
+} PVRSRV_HANDLE_ALLOC_FLAG;
+
+typedef struct _HANDLE_BASE_ PVRSRV_HANDLE_BASE;
+
+typedef struct _PROCESS_HANDLE_BASE_
+{
+	PVRSRV_HANDLE_BASE *psHandleBase;
+	ATOMIC_T iRefCount;
+
+} PROCESS_HANDLE_BASE;
+
+extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase;
+#define	KERNEL_HANDLE_BASE (gpsKernelHandleBase)
+
+#define HANDLE_DEBUG_LISTING_MAX_NUM 20
+
+typedef PVRSRV_ERROR (*PFN_HANDLE_RELEASE)(void *pvData);
+
+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, PFN_HANDLE_RELEASE pfnReleaseData);
+PVRSRV_ERROR PVRSRVAllocHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, PFN_HANDLE_RELEASE pfnReleaseData);
+
+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
+PVRSRV_ERROR PVRSRVAllocSubHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
+
+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType);
+PVRSRV_ERROR PVRSRVFindHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_BOOL bRef);
+PVRSRV_ERROR PVRSRVLookupHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_BOOL bRef);
+
+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor);
+
+PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+PVRSRV_ERROR PVRSRVReleaseHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase);
+
+PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase,
+                                   PVRSRV_HANDLE_BASE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime);
+
+PVRSRV_ERROR PVRSRVHandleInit(void);
+
+PVRSRV_ERROR PVRSRVHandleDeInit(void);
+
+void LockHandle(void);
+void UnlockHandle(void);
+
+
+#endif /* !defined(__HANDLE_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/handle_impl.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/handle_impl.h
new file mode 100644
index 0000000..95043d7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/handle_impl.h
@@ -0,0 +1,89 @@
+/**************************************************************************/ /*!
+@File
+@Title          Implementation Callbacks for Handle Manager API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the handle manager API. This file is for declarations 
+                and definitions that are private/internal to the handle manager 
+                API but need to be shared between the generic handle manager 
+                code and the various handle manager backends, i.e. the code that 
+                implements the various callbacks.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__HANDLE_IMPL_H__)
+#define __HANDLE_IMPL_H__
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+typedef struct _HANDLE_IMPL_BASE_ HANDLE_IMPL_BASE;
+
+typedef PVRSRV_ERROR (*PFN_HANDLE_ITER)(IMG_HANDLE hHandle, void *pvData);
+
+typedef struct _HANDLE_IMPL_FUNCTAB_
+{
+	/* Acquire a new handle which is associated with the given data */
+	PVRSRV_ERROR (*pfnAcquireHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE *phHandle, void *pvData);
+
+	/* Release the given handle (optionally returning the data associated with it) */
+	PVRSRV_ERROR (*pfnReleaseHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData);
+
+	/* Get the data associated with the given handle */
+	PVRSRV_ERROR (*pfnGetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData);
+
+	/* Set the data associated with the given handle */
+	PVRSRV_ERROR (*pfnSetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void *pvData);
+
+	PVRSRV_ERROR (*pfnIterateOverHandles)(HANDLE_IMPL_BASE *psHandleBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData);
+
+	/* Enable handle purging on the given handle base */
+	PVRSRV_ERROR (*pfnEnableHandlePurging)(HANDLE_IMPL_BASE *psHandleBase);
+
+	/* Purge handles on the given handle base */
+	PVRSRV_ERROR (*pfnPurgeHandles)(HANDLE_IMPL_BASE *psHandleBase);
+
+	/* Create handle base */
+	PVRSRV_ERROR (*pfnCreateHandleBase)(HANDLE_IMPL_BASE **psHandleBase);
+
+	/* Destroy handle base */
+	PVRSRV_ERROR (*pfnDestroyHandleBase)(HANDLE_IMPL_BASE *psHandleBase);
+} HANDLE_IMPL_FUNCTAB;
+
+PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs);
+
+#endif /* !defined(__HANDLE_IMPL_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/handle_types.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/handle_types.h
new file mode 100644
index 0000000..5653b1d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/handle_types.h
@@ -0,0 +1,89 @@
+/**************************************************************************/ /*!
+@File
+@Title          Handle Manager handle types
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provide handle management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+/* NOTE: Do not add include guards to this file */
+
+HANDLETYPE(NONE)
+HANDLETYPE(SHARED_EVENT_OBJECT)
+HANDLETYPE(EVENT_OBJECT_CONNECT)
+HANDLETYPE(PMR_LOCAL_EXPORT_HANDLE)
+HANDLETYPE(PHYSMEM_PMR)
+HANDLETYPE(PHYSMEM_PMR_EXPORT)
+HANDLETYPE(PHYSMEM_PMR_SECURE_EXPORT)
+HANDLETYPE(DEVMEMINT_CTX)
+HANDLETYPE(DEVMEMINT_CTX_EXPORT)
+HANDLETYPE(DEVMEMINT_HEAP)
+HANDLETYPE(DEVMEMINT_RESERVATION)
+HANDLETYPE(DEVMEMINT_MAPPING)
+HANDLETYPE(RGX_FW_MEMDESC)
+HANDLETYPE(RGX_RTDATA_CLEANUP)
+HANDLETYPE(RGX_FREELIST)
+HANDLETYPE(RGX_SERVER_RPM_CONTEXT)
+HANDLETYPE(RGX_RPM_FREELIST)
+HANDLETYPE(RGX_MEMORY_BLOCK)
+HANDLETYPE(RGX_SERVER_RENDER_CONTEXT)
+HANDLETYPE(RGX_SERVER_TQ_CONTEXT)
+HANDLETYPE(RGX_SERVER_TQ_TDM_CONTEXT)
+HANDLETYPE(RGX_SERVER_COMPUTE_CONTEXT)
+HANDLETYPE(RGX_SERVER_RAY_CONTEXT)
+HANDLETYPE(RGX_SERVER_KICKSYNC_CONTEXT)
+HANDLETYPE(SYNC_PRIMITIVE_BLOCK)
+HANDLETYPE(SERVER_SYNC_PRIMITIVE)
+HANDLETYPE(SERVER_SYNC_EXPORT)
+HANDLETYPE(SERVER_OP_COOKIE)
+HANDLETYPE(SYNC_RECORD_HANDLE)
+HANDLETYPE(PVRSRV_TIMELINE_SERVER)
+HANDLETYPE(PVRSRV_FENCE_SERVER)
+HANDLETYPE(RGX_FWIF_RENDERTARGET)
+HANDLETYPE(RGX_FWIF_ZSBUFFER)
+HANDLETYPE(RGX_POPULATION)
+HANDLETYPE(DC_DEVICE)
+HANDLETYPE(DC_DISPLAY_CONTEXT)
+HANDLETYPE(DC_BUFFER)
+HANDLETYPE(DC_PIN_HANDLE)
+HANDLETYPE(DEVMEM_MEM_IMPORT)
+HANDLETYPE(PHYSMEM_PMR_PAGELIST)
+HANDLETYPE(PVR_TL_SD)
+HANDLETYPE(RI_HANDLE)
+HANDLETYPE(DEV_PRIV_DATA)
+HANDLETYPE(MM_PLAT_CLEANUP)
+HANDLETYPE(WORKEST_RETURN_DATA)
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/htbserver.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/htbserver.h
new file mode 100644
index 0000000..670e83a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/htbserver.h
@@ -0,0 +1,237 @@
+/*************************************************************************/ /*!
+@File           htbserver.h
+@Title          Host Trace Buffer server implementation.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+
+@Description    Host Trace Buffer provides a mechanism to log Host events to a
+                buffer in a similar way to the Firmware Trace mechanism.
+                Host Trace Buffer logs data using a Transport Layer buffer.
+                The Transport Layer and pvrtld tool provides the mechanism to
+                retrieve the trace data.
+
+                A Host Trace can be merged with a corresponding Firmware Trace.
+                This is achieved by inserting synchronisation data into both
+                traces and post processing to merge them.
+
+                The FW Trace will contain a "Sync Partition Marker". This is
+                updated every time the RGX is brought out of reset (RGX clock
+                timestamps reset at this point) and is repeated when the FW
+                Trace buffer wraps to ensure there is always at least 1
+                partition marker in the Firmware Trace buffer whenever it is
+                read.
+
+                The Host Trace will contain corresponding "Sync Partition
+                Markers" - #HTBSyncPartitionMarker(). Each partition is then
+                subdivided into "Sync Scale" sections - #HTBSyncScale(). The
+                "Sync Scale" data allows the timestamps from the two traces to
+                be correlated. The "Sync Scale" data is updated as part of the
+                standard RGX time correlation code (rgxtimecorr.c) and is
+                updated periodically including on power and clock changes.
+
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __HTBSERVER_H__
+#define __HTBSERVER_H__
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv.h"
+#include "htbuffer.h"
+
+
+/************************************************************************/ /*!
+ @Function      HTBIDeviceCreate
+ @Description   Initialisation actions for HTB at device creation.
+
+ @Input         psDeviceNode    Reference to the device node in context
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBDeviceCreate(
+		PVRSRV_DEVICE_NODE *psDeviceNode
+);
+
+
+/************************************************************************/ /*!
+ @Function      HTBIDeviceDestroy
+ @Description   De-initialisation actions for HTB at device destruction.
+
+ @Input         psDeviceNode    Reference to the device node in context
+
+*/ /**************************************************************************/
+void
+HTBDeviceDestroy(
+		PVRSRV_DEVICE_NODE *psDeviceNode
+);
+
+
+/************************************************************************/ /*!
+ @Function      HTBDeInit
+ @Description   Close the Host Trace Buffer and free all resources
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBDeInit( void );
+
+
+/*************************************************************************/ /*!
+ @Function      HTBConfigureKM
+ @Description   Configure or update the configuration of the Host Trace Buffer
+
+ @Input         ui32NameSize    Size of the pszName string
+
+ @Input         pszName         Name to use for the underlying data buffer
+
+ @Input         ui32BufferSize  Size of the underlying data buffer
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBConfigureKM(
+		IMG_UINT32 ui32NameSize,
+		const IMG_CHAR * pszName,
+		const IMG_UINT32 ui32BufferSize
+);
+
+
+/*************************************************************************/ /*!
+ @Function      HTBControlKM
+ @Description   Update the configuration of the Host Trace Buffer
+
+ @Input         ui32NumFlagGroups Number of group enable flags words
+
+ @Input         aui32GroupEnable  Flags words controlling groups to be logged
+
+ @Input         ui32LogLevel    Log level to record
+
+ @Input         ui32EnablePID   PID to enable logging for a specific process
+
+ @Input         eLogMode        Enable logging for all or specific processes,
+
+ @Input         eOpMode         Control the behaviour of the data buffer
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBControlKM(
+	const IMG_UINT32 ui32NumFlagGroups,
+	const IMG_UINT32 * aui32GroupEnable,
+	const IMG_UINT32 ui32LogLevel,
+	const IMG_UINT32 ui32EnablePID,
+	const HTB_LOGMODE_CTRL eLogMode,
+	const HTB_OPMODE_CTRL eOpMode
+);
+
+
+/*************************************************************************/ /*!
+ @Function      HTBSyncPartitionMarker
+ @Description   Write an HTB sync partition marker to the HTB log
+
+ @Input         ui33Marker      Marker value
+
+*/ /**************************************************************************/
+void
+HTBSyncPartitionMarker(
+	const IMG_UINT32 ui32Marker
+);
+
+
+/*************************************************************************/ /*!
+ @Function      HTBSyncScale
+ @Description   Write FW-Host synchronisation data to the HTB log when clocks
+                change or are re-calibrated
+
+ @Input         bLogValues      IMG_TRUE if value should be immediately written
+                                out to the log
+
+ @Input         ui32OSTS        OS Timestamp
+
+ @Input         ui32CRTS        Rogue timestamp
+
+ @Input         ui32CalcClkSpd  Calculated clock speed
+
+*/ /**************************************************************************/
+void
+HTBSyncScale(
+	const IMG_BOOL bLogValues,
+	const IMG_UINT64 ui64OSTS,
+	const IMG_UINT64 ui64CRTS,
+	const IMG_UINT32 ui32CalcClkSpd
+);
+
+
+/*************************************************************************/ /*!
+ @Function      HTBLogKM
+ @Description   Record a Host Trace Buffer log event
+
+ @Input         PID             The PID of the process the event is associated
+                                with. This is provided as an argument rather
+                                than querying internally so that events associated
+                                with a particular process, but performed by
+                                another can be logged correctly.
+
+ @Input         ui32TimeStamp   The timestamp to be associated with this log event
+
+ @Input         SF              The log event ID
+
+ @Input         ...             Log parameters
+
+ @Return        PVRSRV_OK       Success.
+
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBLogKM(
+		IMG_UINT32 PID,
+		IMG_UINT32 ui32TimeStamp,
+		HTB_LOG_SFids SF,
+		IMG_UINT32 ui32NumArgs,
+		IMG_UINT32 * aui32Args
+);
+
+
+#endif /* __HTBSERVER_H__ */
+
+/* EOF */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/info_page.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/info_page.h
new file mode 100644
index 0000000..909f9d2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/info_page.h
@@ -0,0 +1,69 @@
+/*************************************************************************/ /*!
+@File
+@Title          Kernel/User mode general purpose shared memory.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    General purpose memory shared between kernel driver and user
+                mode.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _INFO_PAGE_KM_H_
+#define _INFO_PAGE_KM_H_
+
+#include "pvrsrv_error.h"
+
+#include "pmr.h"
+#include "pvrsrv.h"
+
+/**
+ * @Function InfoPageCreate
+ * @Description Allocates resources for global information page.
+ * @Input psData pointer to PVRSRV data
+ * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error.
+ */
+PVRSRV_ERROR InfoPageCreate(PVRSRV_DATA *psData);
+
+/**
+ * @Function InfoPageDestroy
+ * @Description Frees all of the resource of global information page.
+ * @Input psData pointer to PVRSRV data
+ * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error.
+ */
+void InfoPageDestroy(PVRSRV_DATA *psData);
+
+#endif /* _INFO_PAGE_KM_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/lists.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/lists.h
new file mode 100644
index 0000000..e7a900f
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/lists.h
@@ -0,0 +1,355 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linked list shared functions templates.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Definition of the linked list function templates.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __LISTS_UTILS__
+#define __LISTS_UTILS__
+
+/* instruct QAC to ignore warnings about the following custom formatted macros */
+/* PRQA S 0881,3410 ++ */
+#include <stdarg.h>
+#include "img_types.h"
+#include "device.h"
+#include "power.h"
+
+/*
+ - USAGE -
+
+ The list functions work with any structure that provides the fields psNext and
+ ppsThis. In order to make a function available for a given type, it is required
+ to use the funcion template macro that creates the actual code.
+
+ There are 5 main types of functions:
+ - INSERT	   : given a pointer to the head pointer of the list and a pointer
+                 to the node, inserts it as the new head.
+ - INSERT TAIL : given a pointer to the head pointer of the list and a pointer
+                 to the node, inserts the node at the tail of the list.
+ - REMOVE	   : given a pointer to a node, removes it from its list.
+ - FOR EACH	   : apply a function over all the elements of a list.
+ - ANY		   : apply a function over the elements of a list, until one of them
+                 return a non null value, and then returns it.
+
+ The two last functions can have a variable argument form, with allows to pass
+ additional parameters to the callback function. In order to do this, the
+ callback function must take two arguments, the first is the current node and
+ the second is a list of variable arguments (va_list).
+
+ The ANY functions have also another for which specifies the return type of the
+ callback function and the default value returned by the callback function.
+
+*/
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_ForEach
+@Description    Apply a callback function to all the elements of a list.
+@Input          psHead        The head of the list to be processed.
+@Input          pfnCallBack   The function to be applied to each element of the list.
+*/ /**************************************************************************/
+#define DECLARE_LIST_FOR_EACH(TYPE) \
+void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_FOR_EACH(TYPE) \
+void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))\
+{\
+	while(psHead)\
+	{\
+		pfnCallBack(psHead);\
+		psHead = psHead->psNext;\
+	}\
+}
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_ForEachSafe
+@Description    Apply a callback function to all the elements of a list. Do it
+                in a safe way that handles the fact that a node might remove itself
+                from the list during the iteration.
+@Input          psHead        The head of the list to be processed.
+@Input          pfnCallBack   The function to be applied to each element of the list.
+*/ /**************************************************************************/
+#define DECLARE_LIST_FOR_EACH_SAFE(TYPE) \
+void List_##TYPE##_ForEachSafe(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_FOR_EACH_SAFE(TYPE) \
+void List_##TYPE##_ForEachSafe(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))\
+{\
+	TYPE *psNext;\
+\
+	while(psHead)\
+	{\
+		psNext = psHead->psNext; \
+		pfnCallBack(psHead);\
+		psHead = psNext;\
+	}\
+}
+
+
+#define DECLARE_LIST_FOR_EACH_VA(TYPE) \
+void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_FOR_EACH_VA(TYPE) \
+void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...) \
+{\
+	va_list ap;\
+	while(psHead)\
+	{\
+		va_start(ap, pfnCallBack);\
+		pfnCallBack(psHead, ap);\
+		psHead = psHead->psNext;\
+		va_end(ap);\
+	}\
+}
+
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_Any
+@Description    Applies a callback function to the elements of a list until the function
+                returns a non null value, then returns it.
+@Input          psHead        The head of the list to be processed.
+@Input          pfnCallBack   The function to be applied to each element of the list.
+@Return         The first non null value returned by the callback function.
+*/ /**************************************************************************/
+#define DECLARE_LIST_ANY(TYPE) \
+void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_ANY(TYPE) \
+void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode))\
+{ \
+	void *pResult;\
+	TYPE *psNextNode;\
+	pResult = NULL;\
+	psNextNode = psHead;\
+	while(psHead && !pResult)\
+	{\
+		psNextNode = psNextNode->psNext;\
+		pResult = pfnCallBack(psHead);\
+		psHead = psNextNode;\
+	}\
+	return pResult;\
+}
+
+
+/*with variable arguments, that will be passed as a va_list to the callback function*/
+
+#define DECLARE_LIST_ANY_VA(TYPE) \
+void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_ANY_VA(TYPE) \
+void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
+{\
+	va_list ap;\
+	TYPE *psNextNode;\
+	void* pResult = NULL;\
+	while(psHead && !pResult)\
+	{\
+		psNextNode = psHead->psNext;\
+		va_start(ap, pfnCallBack);\
+		pResult = pfnCallBack(psHead, ap);\
+		va_end(ap);\
+		psHead = psNextNode;\
+	}\
+	return pResult;\
+}
+
+/*those ones are for extra type safety, so there's no need to use castings for the results*/
+
+#define DECLARE_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))\
+{ \
+	RTYPE result;\
+	TYPE *psNextNode;\
+	result = CONTINUE;\
+	psNextNode = psHead;\
+	while(psHead && result == CONTINUE)\
+	{\
+		psNextNode = psNextNode->psNext;\
+		result = pfnCallBack(psHead);\
+		psHead = psNextNode;\
+	}\
+	return result;\
+}
+
+
+#define DECLARE_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
+{\
+	va_list ap;\
+	TYPE *psNextNode;\
+	RTYPE result = CONTINUE;\
+	while(psHead && result == CONTINUE)\
+	{\
+		psNextNode = psHead->psNext;\
+		va_start(ap, pfnCallBack);\
+		result = pfnCallBack(psHead, ap);\
+		va_end(ap);\
+		psHead = psNextNode;\
+	}\
+	return result;\
+}
+
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_Remove
+@Description    Removes a given node from the list.
+@Input          psNode      The pointer to the node to be removed.
+*/ /**************************************************************************/
+#define DECLARE_LIST_REMOVE(TYPE) \
+void List_##TYPE##_Remove(TYPE *psNode)
+
+#define IMPLEMENT_LIST_REMOVE(TYPE) \
+void List_##TYPE##_Remove(TYPE *psNode)\
+{\
+	(*psNode->ppsThis)=psNode->psNext;\
+	if(psNode->psNext)\
+	{\
+		psNode->psNext->ppsThis = psNode->ppsThis;\
+	}\
+}
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_Insert
+@Description    Inserts a given node at the beginnning of the list.
+@Input          psHead   The pointer to the pointer to the head node.
+@Input          psNode   The pointer to the node to be inserted.
+*/ /**************************************************************************/
+#define DECLARE_LIST_INSERT(TYPE) \
+void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)
+
+#define IMPLEMENT_LIST_INSERT(TYPE) \
+void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)\
+{\
+	psNewNode->ppsThis = ppsHead;\
+	psNewNode->psNext = *ppsHead;\
+	*ppsHead = psNewNode;\
+	if(psNewNode->psNext)\
+	{\
+		psNewNode->psNext->ppsThis = &(psNewNode->psNext);\
+	}\
+}
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_InsertTail
+@Description    Inserts a given node at the end of the list.
+@Input          psHead   The pointer to the pointer to the head node.
+@Input          psNode   The pointer to the node to be inserted.
+*/ /**************************************************************************/
+#define DECLARE_LIST_INSERT_TAIL(TYPE) \
+void List_##TYPE##_InsertTail(TYPE **ppsHead, TYPE *psNewNode)
+
+#define IMPLEMENT_LIST_INSERT_TAIL(TYPE) \
+void List_##TYPE##_InsertTail(TYPE **ppsHead, TYPE *psNewNode)\
+{\
+	TYPE *psTempNode = *ppsHead;\
+	if (psTempNode != NULL)\
+	{\
+		while (psTempNode->psNext)\
+			psTempNode = psTempNode->psNext;\
+		ppsHead = &psTempNode->psNext;\
+	}\
+	psNewNode->ppsThis = ppsHead;\
+	psNewNode->psNext = NULL;\
+	*ppsHead = psNewNode;\
+}
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_Reverse
+@Description    Reverse a list in place
+@Input          ppsHead    The pointer to the pointer to the head node.
+*/ /**************************************************************************/
+#define DECLARE_LIST_REVERSE(TYPE) \
+void List_##TYPE##_Reverse(TYPE **ppsHead)
+
+#define IMPLEMENT_LIST_REVERSE(TYPE) \
+void List_##TYPE##_Reverse(TYPE **ppsHead)\
+{\
+    TYPE *psTmpNode1; \
+    TYPE *psTmpNode2; \
+    TYPE *psCurNode; \
+	psTmpNode1 = NULL; \
+	psCurNode = *ppsHead; \
+	while(psCurNode) { \
+    	psTmpNode2 = psCurNode->psNext; \
+        psCurNode->psNext = psTmpNode1; \
+		psTmpNode1 = psCurNode; \
+		psCurNode = psTmpNode2; \
+		if(psCurNode) \
+		{ \
+			psTmpNode1->ppsThis = &(psCurNode->psNext); \
+		} \
+		else \
+		{ \
+			psTmpNode1->ppsThis = ppsHead;		\
+		} \
+	} \
+	*ppsHead = psTmpNode1; \
+}
+
+#define IS_LAST_ELEMENT(x) ((x)->psNext == NULL)
+
+
+DECLARE_LIST_ANY(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, IMG_BOOL, IMG_FALSE);
+DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
+DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
+DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_INSERT_TAIL(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE);
+
+#undef DECLARE_LIST_ANY_2
+#undef DECLARE_LIST_ANY_VA
+#undef DECLARE_LIST_ANY_VA_2
+#undef DECLARE_LIST_FOR_EACH
+#undef DECLARE_LIST_FOR_EACH_VA
+#undef DECLARE_LIST_INSERT
+#undef DECLARE_LIST_REMOVE
+
+#endif
+
+/* re-enable warnings */
+/* PRQA S 0881,3410 -- */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/mmu_common.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/mmu_common.h
new file mode 100644
index 0000000..32483b6
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/mmu_common.h
@@ -0,0 +1,730 @@
+/**************************************************************************/ /*!
+@File
+@Title          Common MMU Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements basic low level control of MMU.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef MMU_COMMON_H
+#define MMU_COMMON_H
+
+/*
+	The Memory Management Unit (MMU) performs device virtual to physical translation.
+
+	Terminology:
+	 - page catalogue, PC	(optional, 3 tier MMU)
+	 - page directory, PD
+	 - page table, PT (can be variable sized)
+	 - data page, DP (can be variable sized)
+    Note: PD and PC are fixed size and can't be larger than 
+           the native physical (CPU) page size
+	Shifts and AlignShift variables:
+	 - 'xxxShift' represent the number of bits a bitfield is shifted left from bit0 
+	 - 'xxxAlignShift' is used to convert a bitfield (based at bit0) into byte units 
+	 	by applying a bit shift left by 'xxxAlignShift' bits
+*/
+
+/*
+	Device Virtual Address Config:
+
+	Incoming Device Virtual Address is deconstructed into up to 4
+	fields, where the virtual address is up to 64bits:
+	MSB-----------------------------------------------LSB
+	| PC Index:   | PD Index:  | PT Index: | DP offset: |
+	| d bits      | c bits     | b-v bits  |  a+v bits  |
+	-----------------------------------------------------
+	where v is the variable page table modifier, e.g.
+			v == 0 -> 4KB DP
+			v == 2 -> 16KB DP
+			v == 4 -> 64KB DP
+			v == 6 -> 256KB DP
+			v == 8 -> 1MB DP
+			v == 10 -> 4MB DP
+*/
+
+/* services/server/include/ */
+#include "pmr.h"
+
+/* include/ */
+#include "img_types.h"
+#include "pvr_notifier.h"
+#include "pvrsrv_error.h"
+#include "servicesext.h"
+
+
+/*!
+	The level of the MMU
+*/
+typedef enum
+{
+	MMU_LEVEL_0 = 0,	/* Level 0 = Page */
+
+	MMU_LEVEL_1,
+	MMU_LEVEL_2,
+	MMU_LEVEL_3,
+	MMU_LEVEL_LAST
+} MMU_LEVEL;
+
+/* moved after declaration of MMU_LEVEL, as pdump_mmu.h references it */
+#include "pdump_mmu.h"
+
+#define MMU_MAX_LEVEL 3
+
+struct _MMU_DEVVADDR_CONFIG_;
+
+/*!
+	MMU device attributes. This structure is the interface between the generic
+	MMU code and the device specific MMU code.
+*/
+typedef struct _MMU_DEVICEATTRIBS_
+{
+	PDUMP_MMU_TYPE eMMUType;
+
+	IMG_CHAR *pszMMUPxPDumpMemSpaceName;
+
+	/*! The type of the top level object */
+	MMU_LEVEL eTopLevel;
+
+	/*! Alignment requirement of the base object */
+	IMG_UINT32 ui32BaseAlign;
+
+	/*! HW config of the base object */
+	struct _MMU_PxE_CONFIG_ *psBaseConfig;
+
+	/*! Address split for the base object */
+	const struct _MMU_DEVVADDR_CONFIG_ *psTopLevelDevVAddrConfig;
+
+	/*! Callback for creating protection bits for the page catalogue entry with 8 byte entry */
+	IMG_UINT64 (*pfnDerivePCEProt8)(IMG_UINT32, IMG_UINT32);
+	/*! Callback for creating protection bits for the page catalogue entry with 4 byte entry */
+	IMG_UINT32 (*pfnDerivePCEProt4)(IMG_UINT32);
+	/*! Callback for creating protection bits for the page directory entry with 8 byte entry */
+	IMG_UINT64 (*pfnDerivePDEProt8)(IMG_UINT32, IMG_UINT32);
+	/*! Callback for creating protection bits for the page directory entry with 4 byte entry */
+	IMG_UINT32 (*pfnDerivePDEProt4)(IMG_UINT32);
+	/*! Callback for creating protection bits for the page table entry with 8 byte entry */
+	IMG_UINT64 (*pfnDerivePTEProt8)(IMG_UINT32, IMG_UINT32);
+	/*! Callback for creating protection bits for the page table entry with 4 byte entry */
+	IMG_UINT32 (*pfnDerivePTEProt4)(IMG_UINT32);
+
+	/*! Callback for getting the MMU configuration based on the specified page size */
+	PVRSRV_ERROR (*pfnGetPageSizeConfiguration)(IMG_UINT32 ui32DataPageSize,
+												const struct _MMU_PxE_CONFIG_ **ppsMMUPDEConfig,
+												const struct _MMU_PxE_CONFIG_ **ppsMMUPTEConfig,
+												const struct _MMU_DEVVADDR_CONFIG_ **ppsMMUDevVAddrConfig,
+												IMG_HANDLE *phPriv2);
+	/*! Callback for putting the MMU configuration obtained from pfnGetPageSizeConfiguration */
+	PVRSRV_ERROR (*pfnPutPageSizeConfiguration)(IMG_HANDLE hPriv);
+
+	/*! Callback for getting the page size from the PDE for the page table entry with 4 byte entry */
+	PVRSRV_ERROR (*pfnGetPageSizeFromPDE4)(IMG_UINT32, IMG_UINT32 *);
+	/*! Callback for getting the page size from the PDE for the page table entry with 8 byte entry */
+	PVRSRV_ERROR (*pfnGetPageSizeFromPDE8)(IMG_UINT64, IMG_UINT32 *);
+
+	/*! Private data handle */
+	IMG_HANDLE hGetPageSizeFnPriv;
+} MMU_DEVICEATTRIBS;
+
+/*!
+	MMU virtual address split
+*/
+typedef struct _MMU_DEVVADDR_CONFIG_
+{
+	/*! Page catalogue index mask */
+	IMG_UINT64	uiPCIndexMask;
+	/*! Page catalogue index shift */
+	IMG_UINT8	uiPCIndexShift;
+	/*! Total number of PC entries */
+	IMG_UINT32  uiNumEntriesPC;
+	/*! Page directory mask */
+	IMG_UINT64	uiPDIndexMask;
+	/*! Page directory shift */
+	IMG_UINT8	uiPDIndexShift;
+	/*! Total number of PD entries */
+	IMG_UINT32  uiNumEntriesPD;
+	/*! Page table mask */
+	IMG_UINT64	uiPTIndexMask;
+	/*! Page index shift */
+	IMG_UINT8	uiPTIndexShift;
+	/*! Total number of PT entries */
+	IMG_UINT32  uiNumEntriesPT;
+	/*! Page offset mask */
+	IMG_UINT64	uiPageOffsetMask;
+	/*! Page offset shift */
+	IMG_UINT8	uiPageOffsetShift;
+	/*! First virtual address mappable for this config */
+	IMG_UINT64  uiOffsetInBytes;
+
+} MMU_DEVVADDR_CONFIG;
+
+/*
+	P(C/D/T) Entry Config:
+
+	MSB-----------------------------------------------LSB
+	| PT Addr:   | variable PT ctrl | protection flags: |
+	| bits c+v   | b bits           | a bits            |
+	-----------------------------------------------------
+	where v is the variable page table modifier and is optional
+*/
+/*!
+	Generic MMU entry description. This is used to describe PC, PD and PT entries.
+*/
+typedef struct _MMU_PxE_CONFIG_
+{
+	IMG_UINT8	uiBytesPerEntry;  /*! Size of an entry in bytes */
+
+	IMG_UINT64	 uiAddrMask;      /*! Physical address mask */
+	IMG_UINT8	 uiAddrShift;     /*! Physical address shift */
+	IMG_UINT8	 uiAddrLog2Align; /*! Physical address Log 2 alignment */
+
+	IMG_UINT64	 uiVarCtrlMask;	  /*! Variable control mask */
+	IMG_UINT8	 uiVarCtrlShift;  /*! Variable control shift */
+
+	IMG_UINT64	 uiProtMask;      /*! Protection flags mask */
+	IMG_UINT8	 uiProtShift;     /*! Protection flags shift */
+
+	IMG_UINT64   uiValidEnMask;   /*! Entry valid bit mask */
+	IMG_UINT8    uiValidEnShift;  /*! Entry valid bit shift */
+} MMU_PxE_CONFIG;
+
+/* MMU Protection flags */
+
+
+/* These are specified generically and in a h/w independent way, and
+   are interpreted at each level (PC/PD/PT) separately. */
+
+/* The following flags are for internal use only, and should not
+   traverse the API */
+#define MMU_PROTFLAGS_INVALID 0x80000000U
+
+typedef IMG_UINT32 MMU_PROTFLAGS_T;
+
+/* The following flags should be supplied by the caller: */
+#define MMU_PROTFLAGS_READABLE	   				(1U<<0)
+#define MMU_PROTFLAGS_WRITEABLE		   		    (1U<<1)
+#define MMU_PROTFLAGS_CACHE_COHERENT			(1U<<2)
+#define MMU_PROTFLAGS_CACHED					(1U<<3)
+
+/* Device specific flags*/
+#define MMU_PROTFLAGS_DEVICE_OFFSET		16
+#define MMU_PROTFLAGS_DEVICE_MASK		0x000f0000UL
+#define MMU_PROTFLAGS_DEVICE(n)	\
+			(((n) << MMU_PROTFLAGS_DEVICE_OFFSET) & \
+			MMU_PROTFLAGS_DEVICE_MASK)
+
+
+typedef struct _MMU_CONTEXT_ MMU_CONTEXT;
+
+struct _PVRSRV_DEVICE_NODE_; 
+
+typedef struct _MMU_PAGESIZECONFIG_
+{
+	const MMU_PxE_CONFIG *psPDEConfig;
+	const MMU_PxE_CONFIG *psPTEConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	IMG_UINT32 uiRefCount;
+	IMG_UINT32 uiMaxRefCount;
+} MMU_PAGESIZECONFIG;
+
+/*************************************************************************/ /*!
+@Function       MMU_ContextCreate
+
+@Description    Create a new MMU context
+
+@Input          psDevNode               Device node of the device to create the
+                                        MMU context for
+
+@Output         ppsMMUContext           The created MMU context
+
+@Return         PVRSRV_OK if the MMU context was successfully created
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR
+MMU_ContextCreate (struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+				   MMU_CONTEXT **ppsMMUContext,
+				   MMU_DEVICEATTRIBS *psDevAttrs);
+
+
+/*************************************************************************/ /*!
+@Function       MMU_ContextDestroy
+
+@Description    Destroy a MMU context
+
+@Input          ppsMMUContext           MMU context to destroy
+
+@Return         None
+*/
+/*****************************************************************************/
+extern void
+MMU_ContextDestroy (MMU_CONTEXT *psMMUContext);
+
+/*************************************************************************/ /*!
+@Function       MMU_Alloc
+
+@Description    Allocate the page tables required for the specified virtual range
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          uSize                   The size of the allocation
+
+@Output         puActualSize            Actual size of allocation
+
+@Input          uiProtFlags             Generic MMU protection flags
+
+@Input          uDevVAddrAlignment      Alignment requirement of the virtual
+                                        allocation
+
+@Input          psDevVAddr              Virtual address to start the allocation
+                                        from
+
+@Return         PVRSRV_OK if the allocation of the page tables was successful
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR
+MMU_Alloc (MMU_CONTEXT *psMMUContext,
+           IMG_DEVMEM_SIZE_T uSize,
+           IMG_DEVMEM_SIZE_T *puActualSize,
+           IMG_UINT32 uiProtFlags,
+           IMG_DEVMEM_SIZE_T uDevVAddrAlignment,
+           IMG_DEV_VIRTADDR *psDevVAddr,
+           IMG_UINT32 uiLog2PageSize);
+
+
+/*************************************************************************/ /*!
+@Function       MMU_Free
+
+@Description    Free the page tables of the specified virtual range
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          psDevVAddr              Virtual address to start the free
+                                        from
+
+@Input          uSize                   The size of the allocation
+
+@Return         None
+*/
+/*****************************************************************************/
+extern void
+MMU_Free (MMU_CONTEXT *psMMUContext,
+          IMG_DEV_VIRTADDR sDevVAddr,
+          IMG_DEVMEM_SIZE_T uiSize,
+          IMG_UINT32 uiLog2DataPageSize);
+
+
+/*************************************************************************/ /*!
+@Function       MMU_MapPages
+
+@Description    Map pages to the MMU.
+                Two modes of operation: One requires a list of physical page
+                indices that are going to be mapped, the other just takes
+                the PMR and a possible offset to map parts of it.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          uiMappingFlags          Memalloc flags for the mapping
+
+@Input          sDevVAddrBase           Device virtual address of the 1st page
+
+@Input          psPMR                   PMR to map
+
+@Input          ui32PhysPgOffset        Physical offset into the PMR
+
+@Input          ui32MapPageCount        Number of pages to map
+
+@Input          paui32MapIndices        List of page indices to map,
+                                         can be NULL
+
+@Input          uiLog2PageSize          Log2 page size of the pages to map
+
+@Return         PVRSRV_OK if the mapping was successful
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR
+MMU_MapPages(MMU_CONTEXT *psMMUContext,
+             PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+             IMG_DEV_VIRTADDR sDevVAddrBase,
+             PMR *psPMR,
+             IMG_UINT32 ui32PhysPgOffset,
+             IMG_UINT32 ui32MapPageCount,
+             IMG_UINT32 *paui32MapIndices,
+             IMG_UINT32 uiLog2PageSize);
+
+/*************************************************************************/ /*!
+@Function       MMU_UnmapPages
+
+@Description    Unmap pages from the MMU.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          uiMappingFlags          Memalloc flags for the mapping
+
+@Input          psDevVAddr              Device virtual address of the 1st page
+
+@Input          ui32PageCount           Number of pages to unmap
+
+@Input          pai32UnmapIndicies      Array of page indices to be unmapped
+
+@Input          uiLog2PageSize          log2 size of the page
+
+
+@Input          bDummyBacking           Bool that indicates if the unmapped
+										regions need to be backed by dummy
+										page
+
+@Return         None
+*/
+/*****************************************************************************/
+extern void
+MMU_UnmapPages (MMU_CONTEXT *psMMUContext,
+				PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+                IMG_DEV_VIRTADDR sDevVAddr,
+                IMG_UINT32 ui32PageCount,
+                IMG_UINT32 *pai32UnmapIndicies,
+                IMG_UINT32 uiLog2PageSize,
+                IMG_BOOL bDummyBacking);
+
+/*************************************************************************/ /*!
+@Function       MMU_MapPMRFast
+
+@Description    Map a PMR into the MMU. Must be not sparse.
+                This is supposed to cover most mappings and, as the name suggests,
+                should be as fast as possible.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddr               Device virtual address to map the PMR
+                                        into
+
+@Input          psPMR                   PMR to map
+
+@Input          uiSizeBytes             Size in bytes to map
+
+@Input          uiMappingFlags          Memalloc flags for the mapping
+
+@Return         PVRSRV_OK if the PMR was successfully mapped
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR
+MMU_MapPMRFast (MMU_CONTEXT *psMMUContext,
+                IMG_DEV_VIRTADDR sDevVAddr,
+                const PMR *psPMR,
+                IMG_DEVMEM_SIZE_T uiSizeBytes,
+                PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+                IMG_UINT32 uiLog2PageSize);
+
+/*************************************************************************/ /*!
+@Function       MMU_UnmapPMRFast
+
+@Description    Unmap pages from the MMU as fast as possible.
+                PMR must be non sparse!
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddrBase           Device virtual address of the 1st page
+
+@Input          ui32PageCount           Number of pages to unmap
+
+@Input          uiLog2PageSize          log2 size of the page
+
+@Return         None
+*/
+/*****************************************************************************/
+extern void
+MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext,
+                 IMG_DEV_VIRTADDR sDevVAddrBase,
+                 IMG_UINT32 ui32PageCount,
+                 IMG_UINT32 uiLog2PageSize);
+
+/*************************************************************************/ /*!
+@Function       MMU_ChangeValidity
+
+@Description    Sets or unsets the valid bit of page table entries for a given
+                address range.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddr               The device virtual base address of
+                                        the range we want to modify
+
+@Input          uiSizeBytes             The size of the range in bytes
+
+@Input          uiLog2PageSize          Log2 of the used page size
+
+@Input          bMakeValid              Choose to set or unset the valid bit.
+                                        (bMakeValid == IMG_TRUE ) -> SET
+                                        (bMakeValid == IMG_FALSE) -> UNSET
+
+@Input          psPMR                   The PMR backing the allocation.
+                                        Needed in case we have sparse memory
+                                        where we have to check whether a physical
+                                        address actually backs the virtual.
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+MMU_ChangeValidity(MMU_CONTEXT *psMMUContext,
+                   IMG_DEV_VIRTADDR sDevVAddr,
+                   IMG_DEVMEM_SIZE_T uiSizeBytes,
+                   IMG_UINT32 uiLog2PageSize,
+                   IMG_BOOL bMakeValid,
+                   PMR *psPMR);
+
+/*************************************************************************/ /*!
+@Function       MMU_AcquireBaseAddr
+
+@Description    Acquire the device physical address of the base level MMU object
+
+@Input          psMMUContext            MMU context to operate on
+
+@Output         psPhysAddr              Device physical address of the base level
+                                        MMU object
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr);
+
+/*************************************************************************/ /*!
+@Function       MMU_ReleaseBaseAddr
+
+@Description    Release the device physical address of the base level MMU object
+
+@Input          psMMUContext            MMU context to operate on
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+void
+MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/***********************************************************************************/ /*!
+@Function       MMU_SetOSid
+
+@Description    Set the OSid associated with the application (and the MMU Context)
+
+@Input          psMMUContext            MMU context to store the OSid on
+
+@Input          ui32OSid                the OSid in question
+
+@Input			ui32OSidReg				The value that the firmware will assign to the
+										registers.
+
+@Input          bOSidAxiProt            Toggles whether the AXI prot bit will be set or
+                                        not.
+@Return None
+*/
+/***********************************************************************************/
+
+void MMU_SetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt);
+
+/***********************************************************************************/ /*!
+@Function       MMU_GetOSid
+
+@Description    Retrieve the OSid associated with the MMU context.
+
+@Input          psMMUContext            MMU context in which the OSid is stored
+
+@Output			pui32OSid               The OSid in question
+
+@Output			pui32OSidReg            The OSid that the firmware will assign to the
+                                        registers.
+
+@Output         pbOSidAxiProt           Toggles whether the AXI prot bit will be set or
+                                        not.
+@Return None
+*/
+/***********************************************************************************/
+
+void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 * pui32OSid, IMG_UINT32 * pui32OSidReg, IMG_BOOL *pbOSidAxiProt);
+#endif
+
+/*************************************************************************/ /*!
+@Function       MMU_SetDeviceData
+
+@Description    Set the device specific callback data
+
+@Input          psMMUContext            MMU context to store the data on
+
+@Input          hDevData                Device data
+
+@Return         None
+*/
+/*****************************************************************************/
+void MMU_SetDeviceData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hDevData);
+
+/*************************************************************************/ /*!
+@Function       MMU_CheckFaultAddress
+
+@Description    Check the specified MMU context to see if the provided address
+                should be valid
+
+@Input          psMMUContext            MMU context to store the data on
+
+@Input          psDevVAddr              Address to check
+
+@Input          pfnDumpDebugPrintf      Debug print function
+
+@Input          pvDumpDebugFile         Optional file identifier to be passed
+                                        to the debug print function if required
+
+@Return         None
+*/
+/*****************************************************************************/
+void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext,
+				IMG_DEV_VIRTADDR *psDevVAddr,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile);
+
+/*************************************************************************/ /*!
+@Function       MMUI_IsVDevAddrValid
+@Description    Checks if given address is valid.
+@Input          psMMUContext MMU context to store the data on
+@Input          uiLog2PageSize page size
+@Input          psDevVAddr Address to check
+@Return         IMG_TRUE of address is valid
+*/ /**************************************************************************/
+IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext,
+                             IMG_UINT32 uiLog2PageSize,
+                             IMG_DEV_VIRTADDR sDevVAddr);
+
+
+#if defined(PDUMP)
+/*************************************************************************/ /*!
+@Function       MMU_ContextDerivePCPDumpSymAddr
+
+@Description    Derives a PDump Symbolic address for the top level MMU object
+
+@Input          psMMUContext                    MMU context to operate on
+
+@Input          pszPDumpSymbolicNameBuffer      Buffer to write the PDump symbolic
+                                                address to
+
+@Input          uiPDumpSymbolicNameBufferSize   Size of the buffer
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext,
+                                                    IMG_CHAR *pszPDumpSymbolicNameBuffer,
+                                                    size_t uiPDumpSymbolicNameBufferSize);
+
+/*************************************************************************/ /*!
+@Function       MMU_PDumpWritePageCatBase
+
+@Description    PDump write of the top level MMU object to a device register
+
+@Input          psMMUContext        MMU context to operate on
+
+@Input          pszSpaceName		PDump name of the mem/reg space
+
+@Input          uiOffset			Offset to write the address to
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext,
+        								const IMG_CHAR *pszSpaceName,
+        								IMG_DEVMEM_OFFSET_T uiOffset,
+        								IMG_UINT32 ui32WordSize,
+        								IMG_UINT32 ui32AlignShift,
+        								IMG_UINT32 ui32Shift,
+        								PDUMP_FLAGS_T uiPdumpFlags);
+
+/*************************************************************************/ /*!
+@Function       MMU_AcquirePDumpMMUContext
+
+@Description    Acquire a reference to the PDump MMU context for this MMU
+                context
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          pszRegSpaceName         PDump name of the register space
+
+@Output         pui32PDumpMMUContextID  PDump MMU context ID
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext, IMG_UINT32 *pui32PDumpMMUContextID);
+
+/*************************************************************************/ /*!
+@Function       MMU_ReleasePDumpMMUContext
+
+@Description    Release a reference to the PDump MMU context for this MMU context
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          pszRegSpaceName         PDump name of the register space
+
+@Output         pui32PDumpMMUContextID  PDump MMU context ID
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext);
+#else	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(MMU_PDumpWritePageCatBase)
+#endif
+static INLINE void
+MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext,
+        						const IMG_CHAR *pszSpaceName,
+        						IMG_DEVMEM_OFFSET_T uiOffset,
+        						IMG_UINT32 ui32WordSize,
+        						IMG_UINT32 ui32AlignShift,
+        						IMG_UINT32 ui32Shift,
+        						PDUMP_FLAGS_T uiPdumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMMUContext);
+	PVR_UNREFERENCED_PARAMETER(pszSpaceName);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32WordSize);
+	PVR_UNREFERENCED_PARAMETER(ui32AlignShift);
+	PVR_UNREFERENCED_PARAMETER(ui32Shift);
+	PVR_UNREFERENCED_PARAMETER(uiPdumpFlags);
+}
+#endif /* PDUMP */
+
+
+#endif /* #ifdef MMU_COMMON_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/opaque_types.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/opaque_types.h
new file mode 100644
index 0000000..766bc22
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/opaque_types.h
@@ -0,0 +1,56 @@
+/*************************************************************************/ /*!
+@File
+@Title          Opaque Types
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines opaque types for various services types
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef SERVICES_OPAQUE_TYPES_H
+#define SERVICES_OPAQUE_TYPES_H
+
+#include "img_defs.h"
+#include "img_types.h"
+
+typedef struct _PVRSRV_DEVICE_NODE_ *PPVRSRV_DEVICE_NODE;
+typedef const struct _PVRSRV_DEVICE_NODE_ *PCPVRSRV_DEVICE_NODE;
+
+#endif /* SERVICES_OPAQUE_TYPES_H */
+
+/******************************************************************************
+ End of file (opaque_types.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/osconnection_server.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/osconnection_server.h
new file mode 100644
index 0000000..192ef58
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/osconnection_server.h
@@ -0,0 +1,120 @@
+/**************************************************************************/ /*!
+@File
+@Title          Server side connection management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    API for OS specific callbacks from server side connection
+                management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#ifndef _OSCONNECTION_SERVER_H_
+#define _OSCONNECTION_SERVER_H_
+
+#include "handle.h"
+
+
+#if defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS)
+PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData);
+PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData);
+
+PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase);
+
+PVRSRV_DEVICE_NODE* OSGetDevData(CONNECTION_DATA *psConnection);
+
+#else	/* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSConnectionPrivateDataInit)
+#endif
+/*************************************************************************/ /*!
+@Function       OSConnectionPrivateDataInit
+@Description    Allocates and initialises any OS-specific private data
+                relating to a connection.
+                Called from PVRSRVConnectionConnect().
+@Input          pvOSData            pointer to any OS private data
+@Output         phOsPrivateData     handle to the created connection
+                                    private data
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData)
+{
+	PVR_UNREFERENCED_PARAMETER(phOsPrivateData);
+	PVR_UNREFERENCED_PARAMETER(pvOSData);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSConnectionPrivateDataDeInit)
+#endif
+/*************************************************************************/ /*!
+@Function       OSConnectionPrivateDataDeInit
+@Description    Frees previously allocated OS-specific private data
+                relating to a connection.
+@Input          hOsPrivateData      handle to the connection private data
+                                    to be freed
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
+{
+	PVR_UNREFERENCED_PARAMETER(hOsPrivateData);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSPerProcessSetHandleOptions)
+#endif
+static INLINE PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
+{
+	PVR_UNREFERENCED_PARAMETER(psHandleBase);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSGetDevData)
+#endif
+static INLINE PVRSRV_DEVICE_NODE* OSGetDevData(CONNECTION_DATA *psConnection)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	return NULL;
+}
+#endif	/* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */
+
+
+#endif /* _OSCONNECTION_SERVER_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/osfunc.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/osfunc.h
new file mode 100644
index 0000000..dd4f4a2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/osfunc.h
@@ -0,0 +1,1682 @@
+/**************************************************************************/ /*!
+@File
+@Title          OS functions header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS specific API definitions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifdef DEBUG_RELEASE_BUILD
+#pragma optimize( "", off )
+#define DEBUG		1
+#endif
+
+#ifndef __OSFUNC_H__
+#define __OSFUNC_H__
+
+
+#if defined(LINUX) && defined(__KERNEL__) && !defined(NO_HARDWARE)
+#include <asm/io.h>
+#endif
+
+#if defined(__QNXNTO__)
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#endif
+
+#if defined(INTEGRITY_OS)
+#include <stdio.h>
+#include <string.h>
+#endif
+
+#include "img_types.h"
+#include "device.h"
+#include "pvrsrv_device.h"
+
+/******************************************************************************
+ * Static defines
+ *****************************************************************************/
+#define KERNEL_ID			0xffffffffL
+#define ISR_ID				0xfffffffdL
+
+/*************************************************************************/ /*!
+@Function       OSClockns64
+@Description    This function returns the number of ticks since system boot
+                expressed in nanoseconds. Unlike OSClockns, OSClockns64 has
+                a near 64-bit range.
+@Return         The 64-bit clock value, in nanoseconds.
+*/ /**************************************************************************/
+IMG_UINT64 OSClockns64(void);
+
+/*************************************************************************/ /*!
+@Function       OSClockus64
+@Description    This function returns the number of ticks since system boot
+                expressed in microseconds. Unlike OSClockus, OSClockus64 has
+                a near 64-bit range.
+@Return         The 64-bit clock value, in microseconds.
+*/ /**************************************************************************/
+IMG_UINT64 OSClockus64(void);
+
+/*************************************************************************/ /*!
+@Function       OSClockus
+@Description    This function returns the number of ticks since system boot
+                in microseconds.
+@Return         The 32-bit clock value, in microseconds.
+*/ /**************************************************************************/
+IMG_UINT32 OSClockus(void);
+
+/*************************************************************************/ /*!
+@Function       OSClockms
+@Description    This function returns the number of ticks since system boot
+                in milliseconds.
+@Return         The 32-bit clock value, in milliseconds.
+*/ /**************************************************************************/
+IMG_UINT32 OSClockms(void);
+
+/*************************************************************************/ /*!
+@Function       OSClockMonotonicns64
+@Description    This function returns a clock value based on the system
+                monotonic clock.
+@Output         pui64Time     The 64-bit clock value, in nanoseconds.
+@Return         Error Code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSClockMonotonicns64(IMG_UINT64 *pui64Time);
+
+/*************************************************************************/ /*!
+@Function       OSClockMonotonicus64
+@Description    This function returns a clock value based on the system
+                monotonic clock.
+@Output         pui64Time     The 64-bit clock value, in microseconds.
+@Return         Error Code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSClockMonotonicus64(IMG_UINT64 *pui64Time);
+
+/*************************************************************************/ /*!
+@Function       OSClockMonotonicRawns64
+@Description    This function returns a clock value based on the system
+                monotonic raw clock.
+@Return         64bit ns timestamp
+*/ /**************************************************************************/
+IMG_UINT64 OSClockMonotonicRawns64(void);
+
+/*************************************************************************/ /*!
+@Function       OSClockMonotonicRawns64
+@Description    This function returns a clock value based on the system
+                monotonic raw clock.
+@Return         64bit us timestamp
+*/ /**************************************************************************/
+IMG_UINT64 OSClockMonotonicRawus64(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetPageSize
+@Description    This function returns the page size.
+                If the OS is not using memory mappings it should return a
+                default value of 4096.
+@Return         The size of a page, in bytes.
+*/ /**************************************************************************/
+size_t OSGetPageSize(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetPageShift
+@Description    This function returns the page size expressed as a power
+                of two. A number of pages, left-shifted by this value, gives
+                the equivalent size in bytes.
+                If the OS is not using memory mappings it should return a
+                default value of 12.
+@Return         The page size expressed as a power of two.
+*/ /**************************************************************************/
+size_t OSGetPageShift(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetPageMask
+@Description    This function returns a bitmask that may be applied to an
+                address to mask off the least-significant bits so as to
+                leave the start address of the page containing that address.
+@Return         The page mask.
+*/ /**************************************************************************/
+size_t OSGetPageMask(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetOrder
+@Description    This function returns the order of power of two for a given
+                size. Eg. for a uSize of 4096 bytes the function would
+                return 12 (4096 = 2^12).
+@Input          uSize     The size in bytes.
+@Return         The order of power of two.
+*/ /**************************************************************************/
+size_t OSGetOrder(size_t uSize);
+
+/*************************************************************************/ /*!
+@Function       OSGetRAMSize
+@Description    This function returns the total amount of GPU-addressable
+                memory provided by the system. In other words, after loading
+                the driver this would be the largest allocation an
+                application would reasonably expect to be able to make.
+                Note that this is function is not expected to return the
+                current available memory but the amount which would be
+                available on startup.
+@Return         Total GPU-addressable memory size, in bytes.
+*/ /**************************************************************************/
+IMG_UINT64 OSGetRAMSize(void);
+
+typedef void (*PFN_MISR)(void *pvData);
+typedef void (*PFN_THREAD)(void *pvData);
+
+/**************************************************************************/ /*!
+@Function       OSChangeSparseMemCPUAddrMap
+@Description    This function changes the CPU mapping of the underlying
+                sparse allocation. It is used by a PMR 'factory'
+                implementation if that factory supports sparse
+                allocations.
+@Input          psPageArray        array representing the pages in the
+                                   sparse allocation
+@Input          sCpuVAddrBase      the virtual base address of the sparse
+                                   allocation ('first' page)
+@Input          sCpuPAHeapBase     the physical address of the virtual
+                                   base address 'sCpuVAddrBase'
+@Input          ui32AllocPageCount the number of pages referenced in
+                                   'pai32AllocIndices'
+@Input          pai32AllocIndices  list of indices of pages within
+                                   'psPageArray' that we now want to
+                                   allocate and map
+@Input          ui32FreePageCount  the number of pages referenced in
+                                   'pai32FreeIndices'
+@Input          pai32FreeIndices   list of indices of pages within
+                                   'psPageArray' we now want to
+                                   unmap and free
+@Input          bIsLMA             flag indicating if the sparse allocation
+                                   is from LMA or UMA memory
+@Return         PVRSRV_OK on success, a failure code otherwise.
+ */ /**************************************************************************/
+PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray,
+                                         IMG_UINT64 sCpuVAddrBase,
+                                         IMG_CPU_PHYADDR sCpuPAHeapBase,
+                                         IMG_UINT32 ui32AllocPageCount,
+                                         IMG_UINT32 *pai32AllocIndices,
+                                         IMG_UINT32 ui32FreePageCount,
+                                         IMG_UINT32 *pai32FreeIndices,
+                                         IMG_BOOL bIsLMA);
+
+/*************************************************************************/ /*!
+@Function       OSInstallMISR
+@Description    Installs a Mid-level Interrupt Service Routine (MISR)
+                which handles higher-level processing of interrupts from
+                the device (GPU).
+                An MISR runs outside of interrupt context, and so may be
+                descheduled. This means it can contain code that would
+                not be permitted in the LISR.
+                An MISR is invoked when OSScheduleMISR() is called. This
+                call should be made by installed LISR once it has completed
+                its interrupt processing.
+                Multiple MISRs may be installed by the driver to handle
+                different causes of interrupt.
+@Input          pfnMISR       pointer to the function to be installed
+                              as the MISR
+@Input          hData         private data provided to the MISR
+@Output         hMISRData     handle to the installed MISR (to be used
+                              for a subsequent uninstall)
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData,
+						   PFN_MISR pfnMISR,
+						   void *hData);
+
+/*************************************************************************/ /*!
+@Function       OSUninstallMISR
+@Description    Uninstalls a Mid-level Interrupt Service Routine (MISR).
+@Input          hMISRData     handle to the installed MISR
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData);
+
+/*************************************************************************/ /*!
+@Function       OSScheduleMISR
+@Description    Schedules a Mid-level Interrupt Service Routine (MISR) to be
+                executed. An MISR should be executed outside of interrupt
+                context, for example in a work queue.
+@Input          hMISRData     handle to the installed MISR
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData);
+
+
+/*************************************************************************/ /*!
+@Function       OSThreadCreate
+@Description    Creates a kernel thread and starts it running. The caller
+                is responsible for informing the thread that it must finish
+                and return from the pfnThread function. It is not possible
+                to kill or terminate it. The new thread runs with the default
+                priority provided by the Operating System.
+@Output         phThread       Returned handle to the thread.
+@Input          pszThreadName  Name to assign to the thread.
+@Input          pfnThread      Thread entry point function.
+@Input          hData          Thread specific data pointer for pfnThread().
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+
+PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread,
+							IMG_CHAR *pszThreadName,
+							PFN_THREAD pfnThread,
+							void *hData);
+
+/*! Available priority levels for the creation of a new Kernel Thread. */
+typedef enum priority_levels
+{
+	OS_THREAD_HIGHEST_PRIORITY = 0,
+	OS_THREAD_HIGH_PRIORITY,
+	OS_THREAD_NORMAL_PRIORITY,
+	OS_THREAD_LOW_PRIORITY,
+	OS_THREAD_LOWEST_PRIORITY,
+	OS_THREAD_NOSET_PRIORITY,   /* With this option the priority level is the default for the given OS */
+	OS_THREAD_LAST_PRIORITY     /* This must be always the last entry */
+} OS_THREAD_LEVEL;
+
+/*************************************************************************/ /*!
+@Function       OSThreadCreatePriority
+@Description    As OSThreadCreate, this function creates a kernel thread and
+                starts it running. The difference is that with this function
+                is possible to specify the priority used to schedule the new
+                thread.
+
+@Output         phThread        Returned handle to the thread.
+@Input          pszThreadName   Name to assign to the thread.
+@Input          pfnThread       Thread entry point function.
+@Input          hData           Thread specific data pointer for pfnThread().
+@Input          eThreadPriority Priority level to assign to the new thread.
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread,
+									IMG_CHAR *pszThreadName,
+									PFN_THREAD pfnThread,
+									void *hData,
+									OS_THREAD_LEVEL eThreadPriority);
+
+/*************************************************************************/ /*!
+@Function       OSThreadDestroy
+@Description    Waits for the thread to end and then destroys the thread
+                handle memory. This function will block and wait for the
+                thread to finish successfully, thereby providing a sync point
+                for the thread completing its work. No attempt is made to kill
+                or otherwise terminate the thread.
+@Input          hThread   The thread handle returned by OSThreadCreate().
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread);
+
+/*************************************************************************/ /*!
+@Function       OSSetThreadPriority
+@Description    Set the priority and weight of a thread
+@Input          hThread  			The thread handle.
+@Input			nThreadPriority		The integer value of the thread priority
+@Input			nThreadWeight		The integer value of the thread weight
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSSetThreadPriority( IMG_HANDLE hThread,
+								  IMG_UINT32  nThreadPriority,
+								  IMG_UINT32  nThreadWeight);
+
+#if defined(__arm64__) || defined(__aarch64__) || defined (PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)
+
+/* Workarounds for assumptions made that memory will not be mapped uncached
+ * in kernel or user address spaces on arm64 platforms (or other testing).
+ */
+
+/**************************************************************************/ /*!
+@Function       DeviceMemSet
+@Description    Set memory, whose mapping may be uncached, to a given value.
+                On some architectures, additional processing may be needed
+                if the mapping is uncached. In such cases, OSDeviceMemSet()
+                is defined as a call to this function.
+@Input          pvDest     void pointer to the memory to be set
+@Input          ui8Value   byte containing the value to be set
+@Input          ui32Size   the number of bytes to be set to the given value
+@Return         None
+ */ /**************************************************************************/
+void DeviceMemSet(void *pvDest, IMG_UINT8 ui8Value, size_t ui32Size);
+
+/**************************************************************************/ /*!
+@Function       DeviceMemCopy
+@Description    Copy values from one area of memory, to another, when one
+                or both mappings may be uncached.
+                On some architectures, additional processing may be needed
+                if mappings are uncached. In such cases, OSDeviceMemCopy()
+                is defined as a call to this function.
+@Input          pvDst      void pointer to the destination memory
+@Input          pvSrc      void pointer to the source memory
+@Input          ui32Size   the number of bytes to be copied
+@Return         None
+ */ /**************************************************************************/
+void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t ui32Size);
+
+#define OSDeviceMemSet(a,b,c)  DeviceMemSet((a), (b), (c))
+#define OSDeviceMemCopy(a,b,c) DeviceMemCopy((a), (b), (c))
+#define OSCachedMemSet(a,b,c)  memset((a), (b), (c))
+#define OSCachedMemCopy(a,b,c) memcpy((a), (b), (c))
+
+#else /* !(defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */
+
+/* Everything else */
+
+/**************************************************************************/ /*!
+@Function       OSDeviceMemSet
+@Description    Set memory, whose mapping may be uncached, to a given value.
+                On some architectures, additional processing may be needed
+                if the mapping is uncached.
+@Input          a     void pointer to the memory to be set
+@Input          b     byte containing the value to be set
+@Input          c     the number of bytes to be set to the given value
+@Return         Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSDeviceMemSet(a,b,c) memset((a), (b), (c))
+
+/**************************************************************************/ /*!
+@Function       OSDeviceMemCopy
+@Description    Copy values from one area of memory, to another, when one
+                or both mappings may be uncached.
+                On some architectures, additional processing may be needed
+                if mappings are uncached.
+@Input          a     void pointer to the destination memory
+@Input          b     void pointer to the source memory
+@Input          c     the number of bytes to be copied
+@Return         Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSDeviceMemCopy(a,b,c) memcpy((a), (b), (c))
+
+/**************************************************************************/ /*!
+@Function       OSCachedMemSet
+@Description    Set memory, where the mapping is known to be cached, to a
+                given value. This function exists to allow an optimal memset
+                to be performed when memory is known to be cached.
+@Input          a     void pointer to the memory to be set
+@Input          b     byte containing the value to be set
+@Input          c     the number of bytes to be set to the given value
+@Return         Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSCachedMemSet(a,b,c)  memset((a), (b), (c))
+
+/**************************************************************************/ /*!
+@Function       OSCachedMemCopy
+@Description    Copy values from one area of memory, to another, when both
+                mappings are known to be cached.
+                This function exists to allow an optimal memcpy to be
+                performed when memory is known to be cached.
+@Input          a     void pointer to the destination memory
+@Input          b     void pointer to the source memory
+@Input          c     the number of bytes to be copied
+@Return         Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSCachedMemCopy(a,b,c) memcpy((a), (b), (c))
+
+#endif /* !(defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */
+
+/**************************************************************************/ /*!
+@Function       OSMapPhysToLin
+@Description    Maps physical memory into a linear address range.
+@Input          BasePAddr    physical CPU address
+@Input          ui32Bytes    number of bytes to be mapped
+@Input          ui32Flags    flags denoting the caching mode to be employed
+                             for the mapping (uncached/write-combined,
+                             cached coherent or cached incoherent).
+                             See pvrsrv_memallocflags.h for full flag bit
+                             definitions.
+@Return         Pointer to the new mapping if successful, NULL otherwise.
+ */ /**************************************************************************/
+void *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, size_t ui32Bytes, IMG_UINT32 ui32Flags);
+
+/**************************************************************************/ /*!
+@Function       OSUnMapPhysToLin
+@Description    Unmaps physical memory previously mapped by OSMapPhysToLin().
+@Input          pvLinAddr    the linear mapping to be unmapped
+@Input          ui32Bytes    number of bytes to be unmapped
+@Input          ui32Flags    flags denoting the caching mode that was employed
+                             for the original mapping.
+@Return         IMG_TRUE if unmapping was successful, IMG_FALSE otherwise.
+ */ /**************************************************************************/
+IMG_BOOL OSUnMapPhysToLin(void *pvLinAddr, size_t ui32Bytes, IMG_UINT32 ui32Flags);
+
+/**************************************************************************/ /*!
+@Function       OSCPUOperation
+@Description    Perform the specified cache operation on the CPU.
+@Input          eCacheOp      the type of cache operation to be performed
+@Return         PVRSRV_OK on success, a failure code otherwise.
+ */ /**************************************************************************/
+PVRSRV_ERROR OSCPUOperation(PVRSRV_CACHE_OP eCacheOp);
+
+/**************************************************************************/ /*!
+@Function       OSCPUCacheFlushRangeKM
+@Description    Clean and invalidate the CPU cache for the specified
+                address range.
+@Input          psDevNode     device on which the allocation was made
+@Input          pvVirtStart   virtual start address of the range to be
+                              flushed
+@Input          pvVirtEnd     virtual end address of the range to be
+                              flushed
+@Input          sCPUPhysStart physical start address of the range to be
+                              flushed
+@Input          sCPUPhysEnd   physical end address of the range to be
+                              flushed
+@Return         None
+ */ /**************************************************************************/
+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd);
+
+/**************************************************************************/ /*!
+@Function       OSCPUCacheCleanRangeKM
+@Description    Clean the CPU cache for the specified address range.
+                This writes out the contents of the cache and unsets the
+                'dirty' bit (which indicates the physical memory is
+                consistent with the cache contents).
+@Input          psDevNode     device on which the allocation was made
+@Input          pvVirtStart   virtual start address of the range to be
+                              cleaned
+@Input          pvVirtEnd     virtual end address of the range to be
+                              cleaned
+@Input          sCPUPhysStart physical start address of the range to be
+                              cleaned
+@Input          sCPUPhysEnd   physical end address of the range to be
+                              cleaned
+@Return         None
+ */ /**************************************************************************/
+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd);
+
+/**************************************************************************/ /*!
+@Function       OSCPUCacheInvalidateRangeKM
+@Description    Invalidate the CPU cache for the specified address range.
+                The cache must reload data from those addresses if they
+                are accessed.
+@Input          psDevNode     device on which the allocation was made
+@Input          pvVirtStart   virtual start address of the range to be
+                              invalidated
+@Input          pvVirtEnd     virtual end address of the range to be
+                              invalidated
+@Input          sCPUPhysStart physical start address of the range to be
+                              invalidated
+@Input          sCPUPhysEnd   physical end address of the range to be
+                              invalidated
+@Return         None
+ */ /**************************************************************************/
+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                                 void *pvVirtStart,
+                                 void *pvVirtEnd,
+                                 IMG_CPU_PHYADDR sCPUPhysStart,
+                                 IMG_CPU_PHYADDR sCPUPhysEnd);
+
+/**************************************************************************/ /*!
+@Function       OSCPUCacheOpAddressType
+@Description    Returns the address type (i.e. virtual/physical/both) that OS
+                uses to perform cache maintenance on the CPU. This is used
+				to infer whether the virtual or physical address supplied to
+				the OSCPUCacheXXXRangeKM functions can be omitted when called.
+@Return         PVRSRV_CACHE_OP_ADDR_TYPE
+ */ /**************************************************************************/
+PVRSRV_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void);
+
+/*!
+ ******************************************************************************
+ * Cache attribute size type
+ *****************************************************************************/
+typedef enum _IMG_DCACHE_ATTRIBUTE_
+{
+	PVR_DCACHE_LINE_SIZE = 0,    /*!< The cache line size */
+	PVR_DCACHE_ATTRIBUTE_COUNT   /*!< The number of attributes (must be last) */
+} IMG_DCACHE_ATTRIBUTE;
+
+/**************************************************************************/ /*!
+@Function       OSCPUCacheAttributeSize
+@Description    Returns the size of a given cache attribute.
+                Typically this function is used to return the cache line
+                size, but may be extended to return the size of other
+                cache attributes.
+@Input          eCacheAttribute   the cache attribute whose size should
+                                  be returned.
+@Return         The size of the specified cache attribute, in bytes.
+ */ /**************************************************************************/
+IMG_UINT32 OSCPUCacheAttributeSize(IMG_DCACHE_ATTRIBUTE eCacheAttribute);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentProcessID
+@Description    Returns ID of current process (thread group)
+@Return         ID of current process
+*****************************************************************************/
+IMG_PID OSGetCurrentProcessID(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentProcessName
+@Description    Gets the name of current process
+@Return         Process name
+*****************************************************************************/
+IMG_CHAR *OSGetCurrentProcessName(void);
+
+/*************************************************************************/ /*!
+@Function		OSGetCurrentProcessVASpaceSize
+@Description	Returns the CPU virtual address space size of current process
+@Return			Process VA space size
+*/ /**************************************************************************/
+IMG_UINT64 OSGetCurrentProcessVASpaceSize(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentThreadID
+@Description    Returns ID for current thread
+@Return         ID of current thread
+*****************************************************************************/
+uintptr_t OSGetCurrentThreadID(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentClientProcessIDKM
+@Description    Returns ID of current client process (thread group) which
+                has made a bridge call into the server.
+                For some operating systems, this may simply be the current
+                process id. For others, it may be that a dedicated thread
+                is used to handle the processing of bridge calls and that
+                some additional processing is required to obtain the ID of
+                the client process making the bridge call.
+@Return         ID of current client process
+*****************************************************************************/
+IMG_PID OSGetCurrentClientProcessIDKM(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentClientProcessNameKM
+@Description    Gets the name of current client process
+@Return         Client process name
+*****************************************************************************/
+IMG_CHAR *OSGetCurrentClientProcessNameKM(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentClientThreadIDKM
+@Description    Returns ID for current client thread
+                For some operating systems, this may simply be the current
+                thread id. For others, it may be that a dedicated thread
+                is used to handle the processing of bridge calls and that
+                some additional processing is require to obtain the ID of
+                the client thread making the bridge call.
+@Return         ID of current client thread
+*****************************************************************************/
+uintptr_t OSGetCurrentClientThreadIDKM(void);
+
+/**************************************************************************/ /*!
+@Function       OSMemCmp
+@Description    Compares two blocks of memory for equality.
+@Input          pvBufA      Pointer to the first block of memory
+@Input          pvBufB      Pointer to the second block of memory
+@Input          uiLen       The number of bytes to be compared
+@Return         Value < 0 if pvBufA is less than pvBufB.
+                Value > 0 if pvBufB is less than pvBufA.
+                Value = 0 if pvBufA is equal to pvBufB.
+*****************************************************************************/
+IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, size_t uiLen);
+
+/*************************************************************************/ /*!
+@Function       OSPhyContigPagesAlloc
+@Description    Allocates a number of contiguous physical pages.
+                If allocations made by this function are CPU cached then
+                OSPhyContigPagesClean has to be implemented to write the
+                cached data to memory.
+@Input          psDevNode     the device for which the allocation is
+                              required
+@Input          uiSize        the size of the required allocation (in bytes)
+@Output         psMemHandle   a returned handle to be used to refer to this
+                              allocation
+@Output         psDevPAddr    the physical address of the allocation
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*****************************************************************************/
+PVRSRV_ERROR OSPhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize,
+							PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr);
+
+/*************************************************************************/ /*!
+@Function       OSPhyContigPagesFree
+@Description    Frees a previous allocation of contiguous physical pages
+@Input          psDevNode     the device on which the allocation was made
+@Input          psMemHandle   the handle of the allocation to be freed
+@Return         None.
+*****************************************************************************/
+void OSPhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle);
+
+/*************************************************************************/ /*!
+@Function       OSPhyContigPagesMap
+@Description    Maps the specified allocation of contiguous physical pages
+                to a kernel virtual address
+@Input          psDevNode     the device on which the allocation was made
+@Input          psMemHandle   the handle of the allocation to be mapped
+@Input          uiSize        the size of the allocation (in bytes)
+@Input          psDevPAddr    the physical address of the allocation
+@Output         pvPtr         the virtual kernel address to which the
+                              allocation is now mapped
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*****************************************************************************/
+PVRSRV_ERROR OSPhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+						size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+						void **pvPtr);
+
+/*************************************************************************/ /*!
+@Function       OSPhyContigPagesUnmap
+@Description    Unmaps the kernel mapping for the specified allocation of
+                contiguous physical pages
+@Input          psDevNode     the device on which the allocation was made
+@Input          psMemHandle   the handle of the allocation to be unmapped
+@Input          pvPtr         the virtual kernel address to which the
+                              allocation is currently mapped
+@Return         None.
+*****************************************************************************/
+void OSPhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, void *pvPtr);
+
+/*************************************************************************/ /*!
+@Function       OSPhyContigPagesClean
+@Description    Write the content of the specified allocation from CPU cache to
+                memory from (start + uiOffset) to (start + uiOffset + uiLength)
+                It is expected to be implemented as a cache clean operation but
+                it is allowed to fall back to a cache clean + invalidate
+                (i.e. flush).
+                If allocations returned by OSPhyContigPagesAlloc are always
+                uncached this can be implemented as nop.
+@Input          psDevNode     device on which the allocation was made
+@Input          psMemHandle   the handle of the allocation to be flushed
+@Input          uiOffset      the offset in bytes from the start of the
+                              allocation from where to start flushing
+@Input          uiLength      the amount to flush from the offset in bytes
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*****************************************************************************/
+PVRSRV_ERROR OSPhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode,
+                                   PG_HANDLE *psMemHandle,
+                                   IMG_UINT32 uiOffset,
+                                   IMG_UINT32 uiLength);
+
+
+/**************************************************************************/ /*!
+@Function       OSInitEnvData
+@Description    Called to initialise any environment-specific data. This
+                could include initialising the bridge calling infrastructure
+                or device memory management infrastructure.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+ */ /**************************************************************************/
+PVRSRV_ERROR OSInitEnvData(void);
+
+/**************************************************************************/ /*!
+@Function       OSDeInitEnvData
+@Description    The counterpart to OSInitEnvData(). Called to free any
+                resources which may have been allocated by OSInitEnvData().
+@Return         None.
+ */ /**************************************************************************/
+void OSDeInitEnvData(void);
+
+/**************************************************************************/ /*!
+@Function       OSSScanf
+@Description    OS function to support the standard C sscanf() function.
+ */ /**************************************************************************/
+IMG_UINT32 OSVSScanf(IMG_CHAR *pStr, const IMG_CHAR *pszFormat, ...);
+
+/**************************************************************************/ /*!
+@Function       OSStringNCopy
+@Description    OS function to support the standard C strncpy() function.
+ */ /**************************************************************************/
+IMG_CHAR* OSStringNCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uSize);
+
+/**************************************************************************/ /*!
+@Function       OSSNPrintf
+@Description    OS function to support the standard C snprintf() function.
+ */ /**************************************************************************/
+IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR *pszFormat, ...) __printf(3, 4);
+
+/**************************************************************************/ /*!
+@Function       OSStringLength
+@Description    OS function to support the standard C strlen() function.
+ */ /**************************************************************************/
+size_t OSStringLength(const IMG_CHAR *pStr);
+
+/**************************************************************************/ /*!
+@Function       OSStringNLength
+@Description    Return the length of a string, excluding the terminating null
+                byte ('\0'), but return at most 'uiCount' bytes. Only the first
+                'uiCount' bytes of 'pStr' are interrogated.
+@Input          pStr     pointer to the string
+@Input          uiCount  the maximum length to return
+@Return         Length of the string if less than 'uiCount' bytes, otherwise
+                'uiCount'.
+ */ /**************************************************************************/
+size_t OSStringNLength(const IMG_CHAR *pStr, size_t uiCount);
+
+/**************************************************************************/ /*!
+@Function       OSStringCompare
+@Description    OS function to support the standard C strcmp() function.
+ */ /**************************************************************************/
+IMG_INT32 OSStringCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2);
+
+/**************************************************************************/ /*!
+@Function       OSStringNCompare
+@Description    OS function to support the standard C strncmp() function.
+ */ /**************************************************************************/
+IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2,
+                           size_t uiSize);
+
+/**************************************************************************/ /*!
+@Function       OSStringToUINT32
+@Description    Changes string to IMG_UINT32.
+ */ /**************************************************************************/
+PVRSRV_ERROR OSStringToUINT32(const IMG_CHAR *pStr, IMG_UINT32 ui32Base,
+                              IMG_UINT32 *ui32Result);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectCreate
+@Description    Create an event object.
+@Input          pszName         name to assign to the new event object.
+@Output         EventObject     the created event object.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName,
+								 IMG_HANDLE *EventObject);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectDestroy
+@Description    Destroy an event object.
+@Input          hEventObject    the event object to destroy.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectSignal
+@Description    Signal an event object. Any thread waiting on that event
+                object will be woken.
+@Input          hEventObject    the event object to signal.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectWait
+@Description    Wait for an event object to signal. The function is passed
+                an OS event object handle (which allows the OS to have the
+                calling thread wait on the associated event object).
+                The calling thread will be rescheduled when the associated
+                event object signals.
+                If the event object has not signalled after a default timeout
+                period (defined in EVENT_OBJECT_TIMEOUT_MS), the function
+                will return with the result code PVRSRV_ERROR_TIMEOUT.
+
+                Note: The global bridge lock should be released while waiting
+                for the event object to signal (if held by the current thread).
+                The following logic should be implemented in the OS
+                implementation:
+                ...
+                bReleasePVRLock = (!bHoldBridgeLock &&
+                                   BridgeLockIsLocked() &&
+                                   current == BridgeLockGetOwner());
+                if (bReleasePVRLock == IMG_TRUE) OSReleaseBridgeLock();
+                ...
+                / * sleep & reschedule - wait for signal * /
+                ...
+                if (bReleasePVRLock == IMG_TRUE) OSReleaseBridgeLock();
+                ...
+
+@Input          hOSEventKM    the OS event object handle associated with
+                              the event object.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectWaitTimeout
+@Description    Wait for an event object to signal or timeout. The function
+                is passed an OS event object handle (which allows the OS to
+                have the calling thread wait on the associated event object).
+                The calling thread will be rescheduled when the associated
+                event object signals.
+                If the event object has not signalled after the specified
+                timeout period (passed in 'uiTimeoutus'), the function
+                will return with the result code PVRSRV_ERROR_TIMEOUT.
+                NB. The global bridge lock should be released while waiting
+                for the event object to signal (if held by the current thread)
+                See OSEventObjectWait() for details.
+@Input          hOSEventKM    the OS event object handle associated with
+                              the event object.
+@Input          uiTimeoutus   the timeout period (in usecs)
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectWaitAndHoldBridgeLock
+@Description    Wait for an event object to signal. The function is passed
+                an OS event object handle (which allows the OS to have the
+                calling thread wait on the associated event object).
+                The calling thread will be rescheduled when the associated
+                event object signals.
+                If the event object has not signalled after a default timeout
+                period (defined in EVENT_OBJECT_TIMEOUT_MS), the function
+                will return with the result code PVRSRV_ERROR_TIMEOUT.
+                The global bridge lock is held while waiting for the event
+                object to signal (this will prevent other bridge calls from
+                being serviced during this time).
+                See OSEventObjectWait() for details.
+@Input          hOSEventKM    the OS event object handle associated with
+                              the event object.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWaitAndHoldBridgeLock(IMG_HANDLE hOSEventKM);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectWaitTimeoutAndHoldBridgeLock
+@Description    Wait for an event object to signal or timeout. The function
+                is passed an OS event object handle (which allows the OS to
+                have the calling thread wait on the associated event object).
+                The calling thread will be rescheduled when the associated
+                event object signals.
+                If the event object has not signalled after the specified
+                timeout period (passed in 'uiTimeoutus'), the function
+                will return with the result code PVRSRV_ERROR_TIMEOUT.
+                The global bridge lock is held while waiting for the event
+                object to signal (this will prevent other bridge calls from
+                being serviced during this time).
+                See OSEventObjectWait() for details.
+@Input          hOSEventKM    the OS event object handle associated with
+                              the event object.
+@Input          uiTimeoutus   the timeout period (in usecs)
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWaitTimeoutAndHoldBridgeLock(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectOpen
+@Description    Open an OS handle on the specified event object.
+                This OS handle may then be used to make a thread wait for
+                that event object to signal.
+@Input          hEventObject    Event object handle.
+@Output         phOSEvent       OS handle to the returned event object.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject,
+											IMG_HANDLE *phOSEvent);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectClose
+@Description    Close an OS handle previously opened for an event object.
+@Input          hOSEventKM      OS event object handle to close.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM);
+
+/**************************************************************************/ /*!
+@Function       OSStringCopy
+@Description    OS function to support the standard C strcpy() function.
+ */ /**************************************************************************/
+/* Avoid macros so we don't evaluate pszSrc twice */
+static INLINE IMG_CHAR *OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc)
+{
+	return OSStringNCopy(pszDest, pszSrc, OSStringLength(pszSrc) + 1);
+}
+
+/*************************************************************************/ /*!
+@Function      OSWaitus
+@Description   Implements a busy wait of the specified number of microseconds.
+               This function does NOT release thread quanta.
+@Input         ui32Timeus     The duration of the wait period (in us)
+@Return        None.
+*/ /**************************************************************************/
+void OSWaitus(IMG_UINT32 ui32Timeus);
+
+/*************************************************************************/ /*!
+@Function       OSSleepms
+@Description    Implements a sleep of the specified number of milliseconds.
+                This function may allow pre-emption, meaning the thread
+                may potentially not be rescheduled for a longer period.
+@Input          ui32Timems    The duration of the sleep (in ms)
+@Return         None.
+*/ /**************************************************************************/
+void OSSleepms(IMG_UINT32 ui32Timems);
+
+/*************************************************************************/ /*!
+@Function       OSReleaseThreadQuanta
+@Description    Relinquishes the current thread's execution time-slice,
+                permitting the OS scheduler to schedule another thread.
+@Return         None.
+*/ /**************************************************************************/
+void OSReleaseThreadQuanta(void);
+
+#if defined(LINUX) && defined(__KERNEL__) && !defined(NO_HARDWARE)
+	#define OSReadHWReg8(addr, off)  (IMG_UINT8)readb((IMG_PBYTE)(addr) + (off))
+	#define OSReadHWReg16(addr, off) (IMG_UINT16)readw((IMG_PBYTE)(addr) + (off))
+	#define OSReadHWReg32(addr, off) (IMG_UINT32)readl((IMG_PBYTE)(addr) + (off))
+	/* Little endian support only */
+	#define OSReadHWReg64(addr, off) \
+			({ \
+				__typeof__(addr) _addr = addr; \
+				__typeof__(off) _off = off; \
+				(IMG_UINT64) \
+				( \
+					( (IMG_UINT64)(readl((IMG_PBYTE)(_addr) + (_off) + 4)) << 32) \
+					| readl((IMG_PBYTE)(_addr) + (_off)) \
+				); \
+			})
+
+	#define OSWriteHWReg8(addr, off, val)  writeb((IMG_UINT8)(val), (IMG_PBYTE)(addr) + (off))
+	#define OSWriteHWReg16(addr, off, val) writew((IMG_UINT16)(val), (IMG_PBYTE)(addr) + (off))
+	#define OSWriteHWReg32(addr, off, val) writel((IMG_UINT32)(val), (IMG_PBYTE)(addr) + (off))
+	/* Little endian support only */
+	#define OSWriteHWReg64(addr, off, val) do \
+			{ \
+				__typeof__(addr) _addr = addr; \
+				__typeof__(off) _off = off; \
+				__typeof__(val) _val = val; \
+				writel((IMG_UINT32)((_val) & 0xffffffff), (_addr) + (_off));	\
+				writel((IMG_UINT32)(((IMG_UINT64)(_val) >> 32) & 0xffffffff), (_addr) + (_off) + 4); \
+			} while (0)
+
+#elif defined(NO_HARDWARE)
+	/* FIXME: OSReadHWReg should not exist in no hardware builds */
+	#define OSReadHWReg8(addr, off)  (0x4eU)
+	#define OSReadHWReg16(addr, off) (0x3a4eU)
+	#define OSReadHWReg32(addr, off) (0x30f73a4eU)
+	#define OSReadHWReg64(addr, off) (0x5b376c9d30f73a4eU)
+
+	#define OSWriteHWReg8(addr, off, val)
+	#define OSWriteHWReg16(addr, off, val)
+	#define OSWriteHWReg32(addr, off, val)
+	#define OSWriteHWReg64(addr, off, val)
+#else
+/*************************************************************************/ /*!
+@Function       OSReadHWReg8
+@Description    Read from an 8-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to read from a location
+                but instead returns a constant value.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be read.
+@Return         The byte read.
+*/ /**************************************************************************/
+	IMG_UINT8 OSReadHWReg8(void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function       OSReadHWReg16
+@Description    Read from a 16-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to read from a location
+                but instead returns a constant value.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be read.
+@Return         The word read.
+*/ /**************************************************************************/
+	IMG_UINT16 OSReadHWReg16(void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function       OSReadHWReg32
+@Description    Read from a 32-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to read from a location
+                but instead returns a constant value.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be read.
+@Return         The long word read.
+*/ /**************************************************************************/
+	IMG_UINT32 OSReadHWReg32(void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function       OSReadHWReg64
+@Description    Read from a 64-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to read from a location
+                but instead returns a constant value.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be read.
+@Return         The long long word read.
+*/ /**************************************************************************/
+	IMG_UINT64 OSReadHWReg64(void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function       OSWriteHWReg8
+@Description    Write to an 8-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to write to a location.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be written to.
+@Input          ui8Value           The byte to be written to the register.
+@Return         None.
+*/ /**************************************************************************/
+	void OSWriteHWReg8(void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT8 ui8Value);
+
+/*************************************************************************/ /*!
+@Function       OSWriteHWReg16
+@Description    Write to a 16-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to write to a location.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be written to.
+@Input          ui16Value          The word to be written to the register.
+@Return         None.
+*/ /**************************************************************************/
+	void OSWriteHWReg16(void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT16 ui16Value);
+
+/*************************************************************************/ /*!
+@Function       OSWriteHWReg32
+@Description    Write to a 32-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to write to a location.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be written to.
+@Input          ui32Value          The long word to be written to the register.
+@Return         None.
+*/ /**************************************************************************/
+	void OSWriteHWReg32(void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
+
+/*************************************************************************/ /*!
+@Function       OSWriteHWReg64
+@Description    Write to a 64-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to write to a location.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be written to.
+@Input          ui64Value          The long long word to be written to the
+                                   register.
+@Return         None.
+*/ /**************************************************************************/
+	void OSWriteHWReg64(void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT64 ui64Value);
+#endif
+
+typedef void (*PFN_TIMER_FUNC)(void*);
+/*************************************************************************/ /*!
+@Function       OSAddTimer
+@Description    OS specific function to install a timer callback. The
+                timer will then need to be enabled, as it is disabled by
+                default.
+                When enabled, the callback will be invoked once the specified
+                timeout has elapsed.
+@Input          pfnTimerFunc    Timer callback
+@Input          *pvData         Callback data
+@Input          ui32MsTimeout   Callback period
+@Return         Valid handle on success, NULL if a failure
+*/ /**************************************************************************/
+IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, IMG_UINT32 ui32MsTimeout);
+
+/*************************************************************************/ /*!
+@Function       OSRemoveTimer
+@Description    Removes the specified timer. The handle becomes invalid and
+                should no longer be used.
+@Input          hTimer          handle of the timer to be removed
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSRemoveTimer(IMG_HANDLE hTimer);
+
+/*************************************************************************/ /*!
+@Function       OSEnableTimer
+@Description    Enable the specified timer. after enabling, the timer will
+                invoke the associated callback at an interval determined by
+                the configured timeout period until disabled.
+@Input          hTimer          handle of the timer to be enabled
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEnableTimer(IMG_HANDLE hTimer);
+
+/*************************************************************************/ /*!
+@Function       OSDisableTimer
+@Description    Disable the specified timer
+@Input          hTimer          handle of the timer to be disabled
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSDisableTimer(IMG_HANDLE hTimer);
+
+
+/*************************************************************************/ /*!
+ @Function      OSPanic
+ @Description   Take action in response to an unrecoverable driver error
+ @Return        None
+*/ /**************************************************************************/
+void OSPanic(void);
+
+/*************************************************************************/ /*!
+@Function       OSCopyToUser
+@Description    Copy data to user-addressable memory from kernel-addressable
+                memory.
+                Note that pvDest may be an invalid address or NULL and the
+                function should return an error in this case.
+                For operating systems that do not have a user/kernel space
+                distinction, this function should be implemented as a stub
+                which simply returns PVRSRV_ERROR_NOT_SUPPORTED.
+@Input          pvProcess        handle of the connection
+@Input          pvDest           pointer to the destination User memory
+@Input          pvSrc            pointer to the source Kernel memory
+@Input          ui32Bytes        size of the data to be copied
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSCopyToUser(void *pvProcess, void *pvDest, const void *pvSrc, size_t ui32Bytes);
+
+/*************************************************************************/ /*!
+@Function       OSCopyFromUser
+@Description    Copy data from user-addressable memory to kernel-addressable
+                memory.
+                Note that pvSrc may be an invalid address or NULL and the
+                function should return an error in this case.
+                For operating systems that do not have a user/kernel space
+                distinction, this function should be implemented as a stub
+                which simply returns PVRSRV_ERROR_NOT_SUPPORTED.
+@Input          pvProcess        handle of the connection
+@Input          pvDest           pointer to the destination Kernel memory
+@Input          pvSrc            pointer to the source User memory
+@Input          ui32Bytes        size of the data to be copied
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSCopyFromUser(void *pvProcess, void *pvDest, const void *pvSrc, size_t ui32Bytes);
+
+#if defined (__linux__) || defined (WINDOWS_WDF) || defined(INTEGRITY_OS)
+#define OSBridgeCopyFromUser OSCopyFromUser
+#define OSBridgeCopyToUser OSCopyToUser
+#else
+/*************************************************************************/ /*!
+@Function       OSBridgeCopyFromUser
+@Description    Copy data from user-addressable memory into kernel-addressable
+                memory as part of a bridge call operation.
+                For operating systems that do not have a user/kernel space
+                distinction, this function will require whatever implementation
+                is needed to pass data for making the bridge function call.
+                For operating systems which do have a user/kernel space
+                distinction (such as Linux) this function may be defined so
+                as to equate to a call to OSCopyFromUser().
+@Input          pvProcess        handle of the connection
+@Input          pvDest           pointer to the destination Kernel memory
+@Input          pvSrc            pointer to the source User memory
+@Input          ui32Bytes        size of the data to be copied
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSBridgeCopyFromUser (void *pvProcess,
+						void *pvDest,
+						const void *pvSrc,
+						size_t ui32Bytes);
+
+/*************************************************************************/ /*!
+@Function       OSBridgeCopyToUser
+@Description    Copy data to user-addressable memory from kernel-addressable
+                memory as part of a bridge call operation.
+                For operating systems that do not have a user/kernel space
+                distinction, this function will require whatever implementation
+                is needed to pass data for making the bridge function call.
+                For operating systems which do have a user/kernel space
+                distinction (such as Linux) this function may be defined so
+                as to equate to a call to OSCopyToUser().
+@Input          pvProcess        handle of the connection
+@Input          pvDest           pointer to the destination User memory
+@Input          pvSrc            pointer to the source Kernel memory
+@Input          ui32Bytes        size of the data to be copied
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSBridgeCopyToUser (void *pvProcess,
+						void *pvDest,
+						const void *pvSrc,
+						size_t ui32Bytes);
+#endif
+
+/* To be increased if required in future */
+#define PVRSRV_MAX_BRIDGE_IN_SIZE      0x2000    /*!< Size of the memory block used to hold data passed in to a bridge call */
+#define PVRSRV_MAX_BRIDGE_OUT_SIZE     0x1000    /*!< Size of the memory block used to hold data returned from a bridge call */
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK) || defined(DOXYGEN)
+/*************************************************************************/ /*!
+@Function       OSGetGlobalBridgeBuffers
+@Description    Returns the addresses and sizes of the buffers used to pass
+                data into and out of bridge function calls.
+@Output         ppvBridgeInBuffer         pointer to the input bridge data buffer
+                                          of size PVRSRV_MAX_BRIDGE_IN_SIZE.
+@Output         ppvBridgeOutBuffer        pointer to the output bridge data buffer
+                                          of size PVRSRV_MAX_BRIDGE_OUT_SIZE.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSGetGlobalBridgeBuffers (void **ppvBridgeInBuffer,
+									   void **ppvBridgeOutBuffer);
+#endif
+
+#if defined(LINUX) && defined(__KERNEL__)
+#define OSWriteMemoryBarrier() wmb()
+#define OSReadMemoryBarrier() rmb()
+#define OSMemoryBarrier() mb()
+#else
+/*************************************************************************/ /*!
+@Function       OSWriteMemoryBarrier
+@Description    Insert a write memory barrier.
+                The write memory barrier guarantees that all store operations
+                (writes) specified before the barrier will appear to happen
+                before all of the store operations specified after the barrier.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+void OSWriteMemoryBarrier(void);
+#define OSReadMemoryBarrier() OSMemoryBarrier()
+/*************************************************************************/ /*!
+@Function       OSMemoryBarrier
+@Description    Insert a read/write memory barrier.
+                The read and write memory barrier guarantees that all load
+                (read) and all store (write) operations specified before the
+                barrier will appear to happen before all of the load/store
+                operations specified after the barrier.
+@Return         None.
+*/ /**************************************************************************/
+void OSMemoryBarrier(void);
+#endif
+
+/*************************************************************************/ /*!
+@Function       PVRSRVToNativeError
+@Description    Returns the OS-specific equivalent error number/code for
+                the specified PVRSRV_ERROR value.
+                If there is no equivalent, or the PVRSRV_ERROR value is
+                PVRSRV_OK (no error), 0 is returned.
+@Return         The OS equivalent error code.
+*/ /**************************************************************************/
+int PVRSRVToNativeError(PVRSRV_ERROR e);
+#define OSPVRSRVToNativeError(e) ( (PVRSRV_OK == e)? 0: PVRSRVToNativeError(e) )
+
+
+#if defined(LINUX) && defined(__KERNEL__)
+
+/* Provide LockDep friendly definitions for Services RW locks */
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include "allocmem.h"
+
+typedef struct rw_semaphore *POSWR_LOCK;
+
+#define OSWRLockCreate(ppsLock) ({ \
+	PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+	*(ppsLock) = OSAllocMem(sizeof(struct rw_semaphore)); \
+	if (*(ppsLock)) { init_rwsem(*(ppsLock)); e = PVRSRV_OK; }; \
+	e;})
+#define OSWRLockDestroy(psLock) ({OSFreeMem(psLock); PVRSRV_OK;})
+
+#define OSWRLockAcquireRead(psLock) ({down_read(psLock); PVRSRV_OK;})
+#define OSWRLockReleaseRead(psLock) ({up_read(psLock); PVRSRV_OK;})
+#define OSWRLockAcquireWrite(psLock) ({down_write(psLock); PVRSRV_OK;})
+#define OSWRLockReleaseWrite(psLock) ({up_write(psLock); PVRSRV_OK;})
+
+#elif defined(LINUX) || defined(__QNXNTO__) || defined (INTEGRITY_OS)
+/* User-mode unit tests use these definitions on Linux */
+
+typedef struct _OSWR_LOCK_ *POSWR_LOCK;
+
+PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock);
+void OSWRLockDestroy(POSWR_LOCK psLock);
+void OSWRLockAcquireRead(POSWR_LOCK psLock);
+void OSWRLockReleaseRead(POSWR_LOCK psLock);
+void OSWRLockAcquireWrite(POSWR_LOCK psLock);
+void OSWRLockReleaseWrite(POSWR_LOCK psLock);
+
+#else
+struct _OSWR_LOCK_ {
+	IMG_UINT32 ui32Dummy;
+};
+#if defined(WINDOWS_WDF)
+	typedef struct _OSWR_LOCK_ *POSWR_LOCK;
+#endif
+
+/*************************************************************************/ /*!
+@Function       OSWRLockCreate
+@Description    Create a writer/reader lock.
+                This type of lock allows multiple concurrent readers but
+                only a single writer, allowing for optimized performance.
+@Output         ppsLock     A handle to the created WR lock.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock)
+{
+	PVR_UNREFERENCED_PARAMETER(ppsLock);
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSWRLockDestroy
+@Description    Destroys a writer/reader lock.
+@Input          psLock     The handle of the WR lock to be destroyed.
+@Return         None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockDestroy(POSWR_LOCK psLock)
+{
+	PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+/*************************************************************************/ /*!
+@Function       OSWRLockAcquireRead
+@Description    Acquire a writer/reader read lock.
+                If the write lock is already acquired, the caller will
+                block until it is released.
+@Input          psLock     The handle of the WR lock to be acquired for
+                           reading.
+@Return         None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockAcquireRead(POSWR_LOCK psLock)
+{
+	PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+/*************************************************************************/ /*!
+@Function       OSWRLockReleaseRead
+@Description    Release a writer/reader read lock.
+@Input          psLock     The handle of the WR lock whose read lock is to
+                           be released.
+@Return         None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockReleaseRead(POSWR_LOCK psLock)
+{
+	PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+/*************************************************************************/ /*!
+@Function       OSWRLockAcquireWrite
+@Description    Acquire a writer/reader write lock.
+                If the write lock or any read lock are already acquired,
+                the caller will block until all are released.
+@Input          psLock     The handle of the WR lock to be acquired for
+                           writing.
+@Return         None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockAcquireWrite(POSWR_LOCK psLock)
+{
+	PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+/*************************************************************************/ /*!
+@Function       OSWRLockReleaseWrite
+@Description    Release a writer/reader write lock.
+@Input          psLock     The handle of the WR lock whose write lock is to
+                           be released.
+@Return         None
+*/ /**************************************************************************/
+static INLINE void OSWRLockReleaseWrite(POSWR_LOCK psLock)
+{
+	PVR_UNREFERENCED_PARAMETER(psLock);
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function       OSDivide64r64
+@Description    Divide a 64-bit value by a 32-bit value. Return the 64-bit
+                quotient.
+                The remainder is also returned in 'pui32Remainder'.
+@Input          ui64Divident        The number to be divided.
+@Input          ui32Divisor         The 32-bit value 'ui64Divident' is to
+                                    be divided by.
+@Output         pui32Remainder      The remainder of the division.
+@Return         The 64-bit quotient (result of the division).
+*/ /**************************************************************************/
+IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder);
+
+/*************************************************************************/ /*!
+@Function       OSDivide64
+@Description    Divide a 64-bit value by a 32-bit value. Return a 32-bit
+                quotient.
+                The remainder is also returned in 'pui32Remainder'.
+                This function allows for a more optional implementation
+                of a 64-bit division when the result is known to be
+                representable in 32-bits.
+@Input          ui64Divident        The number to be divided.
+@Input          ui32Divisor         The 32-bit value 'ui64Divident' is to
+                                    be divided by.
+@Output         pui32Remainder      The remainder of the division.
+@Return         The 32-bit quotient (result of the division).
+*/ /**************************************************************************/
+IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder);
+
+/*************************************************************************/ /*!
+@Function       OSDumpStack
+@Description    Dump the current task information and its stack trace.
+@Return         None
+*/ /**************************************************************************/
+void OSDumpStack(void);
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK) || defined(DOXYGEN)
+/*************************************************************************/ /*!
+@Function       OSAcquireBridgeLock
+@Description    Acquire the global bridge lock.
+                This prevents another bridge call from being actioned while
+                we are still servicing the current bridge call.
+                NB. This function must not return until the lock is acquired
+                (meaning the implementation should not timeout or return with
+                an error, as the caller will assume they have the lock).
+                This function has an OS-specific implementation rather than
+                an abstracted implementation for efficiency reasons, as it
+                is called frequently.
+@Return         None
+*/ /**************************************************************************/
+void OSAcquireBridgeLock(void);
+/*************************************************************************/ /*!
+@Function       OSReleaseBridgeLock
+@Description    Release the global bridge lock.
+                This function has an OS-specific implementation rather than
+                an abstracted implementation for efficiency reasons, as it
+                is called frequently.
+@Return         None
+*/ /**************************************************************************/
+void OSReleaseBridgeLock(void);
+#endif
+
+/*
+ *  Functions for providing support for PID statistics.
+ */
+typedef void (OS_STATS_PRINTF_FUNC)(void *pvFilePtr, const IMG_CHAR *pszFormat, ...);
+
+typedef void (OS_STATS_PRINT_FUNC)(void *pvFilePtr,
+								   void *pvStatPtr,
+								   OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+typedef IMG_UINT32 (OS_INC_STATS_MEM_REFCOUNT_FUNC)(void *pvStatPtr);
+typedef IMG_UINT32 (OS_DEC_STATS_MEM_REFCOUNT_FUNC)(void *pvStatPtr);
+
+/*************************************************************************/ /*!
+@Function       OSCreateStatisticEntry
+@Description    Create a statistic entry in the specified folder.
+                Where operating systems do not support a debugfs,
+                file system this function may be implemented as a stub.
+@Input          pszName        String containing the name for the entry.
+@Input          pvFolder       Reference from OSCreateStatisticFolder() of the
+                               folder to create the entry in, or NULL for the
+                               root.
+@Input          pfnStatsPrint  Pointer to function that can be used to print the
+                               values of all the statistics.
+@Input          pfnIncMemRefCt Pointer to function that can be used to take a
+                               reference on the memory backing the statistic
+                               entry.
+@Input          pfnDecMemRefCt Pointer to function that can be used to drop a
+                               reference on the memory backing the statistic
+                               entry.
+@Input          pvData         OS specific reference that can be used by
+                               pfnGetElement.
+@Return	        Pointer void reference to the entry created, which can be
+                passed to OSRemoveStatisticEntry() to remove the entry.
+*/ /**************************************************************************/
+void *OSCreateStatisticEntry(IMG_CHAR* pszName, void *pvFolder,
+							 OS_STATS_PRINT_FUNC* pfnStatsPrint,
+							 OS_INC_STATS_MEM_REFCOUNT_FUNC* pfnIncMemRefCt,
+							 OS_DEC_STATS_MEM_REFCOUNT_FUNC* pfnDecMemRefCt,
+							 void *pvData);
+
+/*************************************************************************/ /*!
+@Function       OSRemoveStatisticEntry
+@Description    Removes a statistic entry.
+                Where operating systems do not support a debugfs,
+                file system this function may be implemented as a stub.
+@Input          pvEntry  Pointer void reference to the entry created by
+                         OSCreateStatisticEntry().
+*/ /**************************************************************************/
+void OSRemoveStatisticEntry(void *pvEntry);
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+/*************************************************************************/ /*!
+@Function       OSCreateRawStatisticEntry
+@Description    Create a raw statistic entry in the specified folder.
+                Where operating systems do not support a debugfs
+                file system this function may be implemented as a stub.
+@Input          pszFileName    String containing the name for the entry.
+@Input          pvParentDir    Reference from OSCreateStatisticFolder() of the
+                               folder to create the entry in, or NULL for the
+                               root.
+@Input          pfnStatsPrint  Pointer to function that can be used to print the
+                               values of all the statistics.
+@Return	        Pointer void reference to the entry created, which can be
+                passed to OSRemoveRawStatisticEntry() to remove the entry.
+*/ /**************************************************************************/
+void *OSCreateRawStatisticEntry(const IMG_CHAR *pszFileName, void *pvParentDir,
+                                OS_STATS_PRINT_FUNC *pfStatsPrint);
+
+/*************************************************************************/ /*!
+@Function       OSRemoveRawStatisticEntry
+@Description    Removes a raw statistic entry.
+                Where operating systems do not support a debugfs
+                file system this function may be implemented as a stub.
+@Input          pvEntry  Pointer void reference to the entry created by
+                         OSCreateRawStatisticEntry().
+*/ /**************************************************************************/
+void OSRemoveRawStatisticEntry(void *pvEntry);
+#endif
+
+/*************************************************************************/ /*!
+@Function       OSCreateStatisticFolder
+@Description    Create a statistic folder to hold statistic entries.
+                Where operating systems do not support a debugfs,
+                file system this function may be implemented as a stub.
+@Input          pszName   String containing the name for the folder.
+@Input          pvFolder  Reference from OSCreateStatisticFolder() of the folder
+                          to create the folder in, or NULL for the root.
+@Return         Pointer void reference to the folder created, which can be
+                passed to OSRemoveStatisticFolder() to remove the folder.
+*/ /**************************************************************************/
+void *OSCreateStatisticFolder(IMG_CHAR *pszName, void *pvFolder);
+
+/*************************************************************************/ /*!
+@Function       OSRemoveStatisticFolder
+@Description    Removes a statistic folder.
+                Where operating systems do not support a debugfs,
+                file system this function may be implemented as a stub.
+@Input          ppvFolder  Reference from OSCreateStatisticFolder() of the
+                           folder that should be removed.
+                           This needs to be double pointer because it has to
+                           be NULLed right after memory is freed to avoid
+                           possible races and use-after-free situations.
+*/ /**************************************************************************/
+void OSRemoveStatisticFolder(void **ppvFolder);
+
+/*************************************************************************/ /*!
+@Function       OSUserModeAccessToPerfCountersEn
+@Description    Permit User-mode access to CPU performance counter
+                registers.
+                This function is called during device initialisation.
+                Certain CPU architectures may need to explicitly permit
+                User mode access to performance counters - if this is
+                required, the necessary code should be implemented inside
+                this function.
+@Return         None.
+*/ /**************************************************************************/
+void OSUserModeAccessToPerfCountersEn(void);
+
+/*************************************************************************/ /*!
+@Function       OSDebugSignalPID
+@Description    Sends a SIGTRAP signal to a specific PID in user mode for
+                debugging purposes. The user mode process can register a handler
+                against this signal.
+                This is necessary to support the Rogue debugger. If the Rogue
+                debugger is not used then this function may be implemented as
+                a stub.
+@Input          ui32PID    The PID for the signal.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSDebugSignalPID(IMG_UINT32 ui32PID);
+
+#if defined(LINUX) && defined(__KERNEL__) && !defined(DOXYGEN)
+#define OSWarnOn(a) WARN_ON(a)
+#else
+/*************************************************************************/ /*!
+@Function       OSWarnOn
+@Description    This API allows the driver to emit a special token and stack
+                dump to the server log when an issue is detected that needs the
+                OS to be notified. The token or call may be used to trigger
+                log collection by the OS environment.
+                PVR_DPF log messages will have been emitted prior to this call.
+@Input          a    Expression to evaluate, if true trigger Warn signal
+@Return         None
+*/ /**************************************************************************/
+#define OSWarnOn(a) do { if ((a)) { OSDumpStack(); } } while(0)
+#endif
+
+#if defined(CONFIG_L4)
+#include <asm/api-l4env/api.h>
+#include <asm/io.h>
+
+#if defined(page_to_phys)
+#undef page_to_phys
+#define page_to_phys(x) l4x_virt_to_phys(x)
+#else
+#error "Unable to override page_to_phys() implementation"
+#endif
+#endif
+
+#endif /* __OSFUNC_H__ */
+
+/******************************************************************************
+ End of file (osfunc.h)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/oskm_apphint.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/oskm_apphint.h
new file mode 100644
index 0000000..809bdae1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/oskm_apphint.h
@@ -0,0 +1,181 @@
+/*************************************************************************/ /*!
+@File           oskm_apphint.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS-independent interface for retrieving KM apphints
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "img_defs.h"
+#if defined(LINUX)
+#include "km_apphint.h"
+#else
+#include "services_client_porting.h"
+#endif
+#if !defined(__OSKM_APPHINT_H__)
+#define __OSKM_APPHINT_H__
+
+
+#if defined(LINUX) && !defined(DOXYGEN)
+static INLINE IMG_UINT os_get_km_apphint_UINT32(void *state, APPHINT_ID id, IMG_UINT32 *pAppHintDefault, IMG_UINT32 *pVal) {
+	return !pvr_apphint_get_uint32(id, pVal);
+}
+static INLINE IMG_UINT os_get_km_apphint_UINT64(void *state, APPHINT_ID id, IMG_UINT64 *pAppHintDefault, IMG_UINT64 *pVal) {
+	return !pvr_apphint_get_uint64(id, pVal);
+}
+static INLINE IMG_UINT os_get_km_apphint_BOOL(void *state, APPHINT_ID id, IMG_BOOL *pAppHintDefault, IMG_BOOL *pVal) {
+	return !pvr_apphint_get_bool(id, pVal);
+}
+static INLINE IMG_UINT os_get_km_apphint_STRING(void *state, APPHINT_ID id, IMG_CHAR **pAppHintDefault, IMG_CHAR *buffer, size_t size) {
+	return !pvr_apphint_get_string(id, buffer, size);
+}
+
+#define OSGetKMAppHintUINT32(state, name, appHintDefault, value) \
+	os_get_km_apphint_UINT32(state, APPHINT_ID_ ## name, appHintDefault, value)
+
+#define OSGetKMAppHintUINT64(state, name, appHintDefault, value) \
+	os_get_km_apphint_UINT64(state, APPHINT_ID_ ## name, appHintDefault, value)
+
+#define OSGetKMAppHintBOOL(state, name, appHintDefault, value) \
+	os_get_km_apphint_BOOL(state, APPHINT_ID_ ## name, appHintDefault, value)
+
+#define OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size) \
+	os_get_km_apphint_STRING(state, APPHINT_ID_ ## name, appHintDefault, buffer, size)
+
+
+#define OSCreateKMAppHintState(state) \
+	PVR_UNREFERENCED_PARAMETER(state)
+
+#define OSFreeKMAppHintState(state) \
+	PVR_UNREFERENCED_PARAMETER(state)
+
+#else /* #if defined(LINUX) && !defined(DOXYGEN) */
+
+static INLINE IMG_BOOL os_get_km_apphint_STRING(void *state, IMG_CHAR *name, IMG_CHAR **pAppHintDefault, IMG_CHAR *buffer, size_t size) {
+	PVR_UNREFERENCED_PARAMETER(size);
+	return PVRSRVGetAppHint(state, name, IMG_STRING_TYPE, pAppHintDefault, buffer);
+}
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintUINT32(state, name, appHintDefault, value)
+@Description    Interface for retrieval of uint32 km app hint.
+				For non-linux operating systems, this macro implements a call
+				from server code to PVRSRVGetAppHint() declared in
+				services_client_porting.h, effectively making it 'shared' code.
+@Input          state             App hint state
+@Input          name              Name used to identify app hint
+@Input          appHintDefault    Default value to be returned if no
+								  app hint is found.
+@Output         value             Pointer to returned app hint value.
+ */ /**************************************************************************/
+#define OSGetKMAppHintUINT32(state, name, appHintDefault, value) \
+	PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value)
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintUINT64(state, name, appHintDefault, value)
+@Description    Interface for retrieval of uint64 km app hint.
+				For non-linux operating systems, this macro implements a call
+				from server code to PVRSRVGetAppHint() declared in
+				services_client_porting.h, effectively making it 'shared' code.
+@Input          state             App hint state
+@Input          name              Name used to identify app hint
+@Input          appHintDefault    Default value to be returned if no
+								  app hint is found.
+@Output         value             Pointer to returned app hint value.
+ */ /**************************************************************************/
+#define OSGetKMAppHintUINT64(state, name, appHintDefault, value) \
+	PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value)
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintBOOL(state, name, appHintDefault, value)
+@Description    Interface for retrieval of IMG_BOOL km app hint.
+				For non-linux operating systems, this macro implements a call
+				from server code to PVRSRVGetAppHint() declared in
+				services_client_porting.h, effectively making it 'shared' code.
+@Input          state             App hint state
+@Input          name              Name used to identify app hint
+@Input          appHintDefault    Default value to be returned if no
+								  app hint is found.
+@Output         value             Pointer to returned app hint value.
+ */ /**************************************************************************/
+#define OSGetKMAppHintBOOL(state, name, appHintDefault, value) \
+	PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value)
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size)
+@Description    Interface for retrieval of string km app hint.
+				For non-linux operating systems, this macro implements a call
+				from server code to PVRSRVGetAppHint() declared in
+				services_client_porting.h, effectively making it 'shared' code.
+@Input          state             App hint state
+@Input          name              Name used to identify app hint
+@Input          appHintDefault    Default value to be returned if no
+								  app hint is found.
+@Output         buffer            Buffer used to return app hint string.
+@Input			size			  Size of the buffer.
+ */ /**************************************************************************/
+#define OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size) \
+	os_get_km_apphint_STRING(state, # name, appHintDefault, buffer, size)
+
+/**************************************************************************/ /*!
+@def OSCreateKMAppHintState(state)
+@Description    Creates the app hint state.
+				For non-linux operating systems, this macro implements a call
+				from server code to PVRSRVCreateAppHintState() declared in
+				services_client_porting.h, effectively making it 'shared' code.
+@Output          state             App hint state
+ */ /**************************************************************************/
+#define OSCreateKMAppHintState(state) \
+	PVRSRVCreateAppHintState(IMG_SRV_UM, 0, state)
+
+/**************************************************************************/ /*!
+@def OSFreeKMAppHintState
+@Description    Free the app hint state.
+				For non-linux operating systems, this macro implements a call
+				from server code to PVRSRVCreateAppHintState() declared in
+				services_client_porting.h, effectively making it 'shared' code.
+@Output          state             App hint state
+ */ /**************************************************************************/
+#define OSFreeKMAppHintState(state) \
+	PVRSRVFreeAppHintState(IMG_SRV_UM, state)
+
+#endif /* #if defined(LINUX) */
+
+#endif /* __OSKM_APPHINT_H__ */
+
+/******************************************************************************
+ End of file (oskm_apphint.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pdump_km.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pdump_km.h
new file mode 100644
index 0000000..5b86a27
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pdump_km.h
@@ -0,0 +1,797 @@
+/*************************************************************************/ /*!
+@File
+@Title          pdump functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main APIs for pdump functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PDUMP_KM_H_
+#define _PDUMP_KM_H_
+
+#if defined(PDUMP)
+#include <stdarg.h>
+#endif
+
+/* services/srvkm/include/ */
+#include "device.h"
+
+/* include/ */
+#include "pvrsrv_error.h"
+
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "connection_server.h"
+#include "sync_server.h"
+/*
+ *	Pull in pdump flags from services include
+ */
+#include "pdump.h"
+#include "pdumpdefs.h"
+
+/* Define this to enable the PDUMP_HERE trace in the server */
+#undef PDUMP_TRACE
+
+#if defined(PDUMP_TRACE)
+#define PDUMP_HERE(a)	if (ui32Flags & PDUMP_FLAGS_DEBUG) PVR_DPF((PVR_DBG_WARNING, "HERE %d", (a)))
+#define PDUMP_HEREA(a)	PVR_DPF((PVR_DBG_WARNING, "HERE ALWAYS %d", (a)))
+#else
+#define PDUMP_HERE(a)	(void)(a);
+#define PDUMP_HEREA(a)	(void)(a);
+#endif
+
+#define PDUMP_PD_UNIQUETAG	(IMG_HANDLE)0
+#define PDUMP_PT_UNIQUETAG	(IMG_HANDLE)0
+
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+/* counter increments each time debug write is called */
+extern IMG_UINT32 g_ui32EveryLineCounter;
+#endif
+
+typedef struct _PDUMP_CONNECTION_DATA_ PDUMP_CONNECTION_DATA;
+typedef PVRSRV_ERROR (*PFN_PDUMP_TRANSITION)(void **pvData, IMG_BOOL bInto, IMG_UINT32 ui32PDumpFlags);
+
+#ifdef PDUMP
+
+/*! Macro used to record a panic in the PDump script stream */
+#define PDUMP_PANIC(_id, _msg) do \
+		{ PVRSRV_ERROR _eE;\
+			_eE = PDumpPanic(((RGX_PDUMP_PANIC_ ## _id) & 0xFFFF), _msg, __FUNCTION__, __LINE__);	\
+			PVR_LOG_IF_ERROR(_eE, "PDumpPanic");\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+/*! Macro used to record a driver error in the PDump script stream to invalidate the capture */
+#define PDUMP_ERROR(_err, _msg) do \
+		{   (void) PDumpCaptureError((_err), (_msg), __FUNCTION__, __LINE__);\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	/* Shared across pdump_x files */
+	PVRSRV_ERROR PDumpInitCommon(void);
+	void PDumpDeInitCommon(void);
+	IMG_BOOL PDumpReady(void);
+	void PDumpGetParameterZeroPageInfo(PDUMP_FILEOFFSET_T *puiZeroPageOffset,
+									size_t *puiZeroPageSize,
+									const IMG_CHAR **ppszZeroPageFilename);
+
+	void PDumpConnectionNotify(void);
+	void PDumpDisconnectionNotify(void);
+
+	void PDumpStopInitPhase(IMG_BOOL bPDumpClient, IMG_BOOL bInitClient);
+	PVRSRV_ERROR PDumpSetFrameKM(CONNECTION_DATA *psConnection,
+                             PVRSRV_DEVICE_NODE * psDeviceNode,
+                             IMG_UINT32 ui32Frame);
+	PVRSRV_ERROR PDumpGetFrameKM(CONNECTION_DATA *psConnection,
+                             PVRSRV_DEVICE_NODE * psDeviceNode,
+                             IMG_UINT32* pui32Frame);
+	PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags);
+
+	PVRSRV_ERROR PDumpSetDefaultCaptureParamsKM(IMG_UINT32 ui32Mode,
+	                                           IMG_UINT32 ui32Start,
+	                                           IMG_UINT32 ui32End,
+	                                           IMG_UINT32 ui32Interval,
+	                                           IMG_UINT32 ui32MaxParamFileSize);
+
+
+	PVRSRV_ERROR PDumpReg32(IMG_CHAR	*pszPDumpRegName,
+							IMG_UINT32	ui32RegAddr,
+							IMG_UINT32	ui32RegValue,
+							IMG_UINT32	ui32Flags);
+
+	PVRSRV_ERROR PDumpReg64(IMG_CHAR	*pszPDumpRegName,
+							IMG_UINT32	ui32RegAddr,
+							IMG_UINT64	ui64RegValue,
+							IMG_UINT32	ui32Flags);
+
+	PVRSRV_ERROR PDumpRegLabelToReg64(IMG_CHAR *pszPDumpRegName,
+                                          IMG_UINT32 ui32RegDst,
+                                          IMG_UINT32 ui32RegSrc,
+                                          IMG_UINT32 ui32Flags);
+
+	PVRSRV_ERROR PDumpPhysHandleToInternalVar64(IMG_CHAR *pszInternalVar,
+	                                            IMG_HANDLE hPdumpPages,
+	                                            IMG_UINT32 ui32Flags);
+
+	PVRSRV_ERROR PDumpMemLabelToInternalVar64(IMG_CHAR *pszInternalVar,
+                                                PMR *psPMR,
+                                                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                                                IMG_UINT32 ui32Flags);
+
+	PVRSRV_ERROR PDumpWriteVarORValueOp (const IMG_CHAR *pszInternalVariable,
+                                             const IMG_UINT64 ui64Value,
+                                             const IMG_UINT32 ui32PDumpFlags);
+
+	PVRSRV_ERROR PDumpWriteVarANDValueOp (const IMG_CHAR *pszInternalVariable,
+                                              const IMG_UINT64 ui64Value,
+                                              const IMG_UINT32 ui32PDumpFlags);
+
+	PVRSRV_ERROR PDumpWriteVarSHRValueOp (const IMG_CHAR *pszInternalVariable,
+                                              const IMG_UINT64 ui64Value,
+                                              const IMG_UINT32 ui32PDumpFlags);
+
+	PVRSRV_ERROR PDumpInternalVarToReg32(IMG_CHAR *pszPDumpRegName,
+                                             IMG_UINT32	ui32Reg,
+                                             IMG_CHAR *pszInternalVar,
+                                             IMG_UINT32	ui32Flags);
+
+	PVRSRV_ERROR PDumpInternalVarToReg64(IMG_CHAR *pszPDumpRegName,
+                                             IMG_UINT32	ui32Reg,
+                                             IMG_CHAR *pszInternalVar,
+                                             IMG_UINT32	ui32Flags);
+
+	PVRSRV_ERROR PDumpMemLabelToMem32(PMR *psPMRSource,
+                                          PMR *psPMRDest,
+                                          IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+                                          IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+                                          IMG_UINT32 ui32Flags);
+
+	PVRSRV_ERROR PDumpMemLabelToMem64(PMR *psPMRSource,
+                                          PMR *psPMRDest,
+                                          IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+                                          IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+                                          IMG_UINT32	ui32Flags);
+
+	PVRSRV_ERROR PDumpRegLabelToMem32(IMG_CHAR *pszPDumpRegName,
+                                          IMG_UINT32 ui32Reg,
+                                          PMR *psPMR,
+	                                  IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                                          IMG_UINT32 ui32Flags);
+
+	PVRSRV_ERROR PDumpRegLabelToMem64(IMG_CHAR *pszPDumpRegName,
+									  IMG_UINT32 ui32Reg,
+									  PMR *psPMR,
+									  IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+									  IMG_UINT32 ui32Flags);
+
+	PVRSRV_ERROR PDumpRegLabelToInternalVar(IMG_CHAR *pszPDumpRegName,
+                                                IMG_UINT32 ui32Reg,
+                                                IMG_CHAR *pszInternalVar,
+                                                IMG_UINT32 ui32Flags);
+
+	PVRSRV_ERROR PDumpSAW(IMG_CHAR      *pszDevSpaceName,
+	                      IMG_UINT32    ui32HPOffsetBytes,
+	                      IMG_UINT32    ui32NumSaveBytes,
+	                      IMG_CHAR      *pszOutfileName,
+	                      IMG_UINT32    ui32OutfileOffsetByte,
+	                      PDUMP_FLAGS_T uiPDumpFlags);
+
+	PVRSRV_ERROR PDumpRegPolKM(IMG_CHAR				*pszPDumpRegName,
+							   IMG_UINT32			ui32RegAddr,
+							   IMG_UINT32			ui32RegValue,
+							   IMG_UINT32			ui32Mask,
+							   IMG_UINT32			ui32Flags,
+							   PDUMP_POLL_OPERATOR	eOperator);
+
+	IMG_IMPORT PVRSRV_ERROR PDumpBitmapKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+										  IMG_CHAR *pszFileName,
+										  IMG_UINT32 ui32FileOffset,
+										  IMG_UINT32 ui32Width,
+										  IMG_UINT32 ui32Height,
+										  IMG_UINT32 ui32StrideInBytes,
+										  IMG_DEV_VIRTADDR sDevBaseAddr,
+										  IMG_UINT32 ui32MMUContextID,
+										  IMG_UINT32 ui32Size,
+										  PDUMP_PIXEL_FORMAT ePixelFormat,
+										  IMG_UINT32 ui32AddrMode,
+										  IMG_UINT32 ui32PDumpFlags);
+
+	IMG_IMPORT PVRSRV_ERROR PDumpReadRegKM(IMG_CHAR *pszPDumpRegName,
+										   IMG_CHAR *pszFileName,
+										   IMG_UINT32 ui32FileOffset,
+										   IMG_UINT32 ui32Address,
+										   IMG_UINT32 ui32Size,
+										   IMG_UINT32 ui32PDumpFlags);
+
+	PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32	ui32Flags,
+									   IMG_CHAR*	pszFormat,
+									   ...) __printf(2, 3);
+
+	PVRSRV_ERROR PDumpCommentWithFlagsVA(IMG_UINT32 ui32Flags,
+									    const IMG_CHAR * pszFormat,
+										va_list args);
+
+	PVRSRV_ERROR PDumpPanic(IMG_UINT32      ui32PanicNo,
+							IMG_CHAR*       pszPanicMsg,
+							const IMG_CHAR* pszPPFunc,
+							IMG_UINT32      ui32PPline);
+
+	PVRSRV_ERROR PDumpCaptureError(PVRSRV_ERROR    ui32ErrorNo,
+	                               IMG_CHAR*       pszErrorMsg,
+	                               const IMG_CHAR* pszPPFunc,
+	                               IMG_UINT32      ui32PPline);
+
+	PVRSRV_ERROR PDumpPDReg(PDUMP_MMU_ATTRIB *psMMUAttrib,
+							IMG_UINT32	ui32Reg,
+							IMG_UINT32	ui32dwData,
+							IMG_HANDLE	hUniqueTag);
+	PVRSRV_ERROR PDumpPDRegWithFlags(PDUMP_MMU_ATTRIB *psMMUAttrib,
+									 IMG_UINT32		ui32Reg,
+									 IMG_UINT32		ui32Data,
+									 IMG_UINT32		ui32Flags,
+									 IMG_HANDLE		hUniqueTag);
+
+	PVRSRV_ERROR PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame);
+
+	PVRSRV_ERROR PDumpIsCaptureFrameKM(IMG_BOOL *bIsCapturing);
+
+	PVRSRV_ERROR PDumpRegRead32(IMG_CHAR *pszPDumpRegName,
+								const IMG_UINT32 dwRegOffset,
+								IMG_UINT32	ui32Flags);
+	PVRSRV_ERROR PDumpRegRead64(IMG_CHAR *pszPDumpRegName,
+								const IMG_UINT32 dwRegOffset,
+								IMG_UINT32	ui32Flags);
+
+	PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags);
+	PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks);
+
+	PVRSRV_ERROR PDumpRegBasedCBP(IMG_CHAR		*pszPDumpRegName,
+								  IMG_UINT32	ui32RegOffset,
+								  IMG_UINT32	ui32WPosVal,
+								  IMG_UINT32	ui32PacketSize,
+								  IMG_UINT32	ui32BufferSize,
+								  IMG_UINT32	ui32Flags);
+
+	PVRSRV_ERROR PDumpTRG(IMG_CHAR *pszMemSpace,
+	                      IMG_UINT32 ui32MMUCtxID,
+	                      IMG_UINT32 ui32RegionID,
+	                      IMG_BOOL bEnable,
+	                      IMG_UINT64 ui64VAddr,
+	                      IMG_UINT64 ui64LenBytes,
+	                      IMG_UINT32 ui32XStride,
+	                      IMG_UINT32 ui32Flags);
+
+	PVRSRV_ERROR PDumpCreateLockKM(void);
+	void PDumpDestroyLockKM(void);
+	void PDumpLock(void);
+	void PDumpUnlock(void);
+
+	PVRSRV_ERROR PDumpIfKM(IMG_CHAR		*pszPDumpCond);
+	PVRSRV_ERROR PDumpElseKM(IMG_CHAR	*pszPDumpCond);
+	PVRSRV_ERROR PDumpFiKM(IMG_CHAR		*pszPDumpCond);
+
+	void PDumpPowerTransitionStart(void);
+	void PDumpPowerTransitionEnd(void);
+	IMG_BOOL PDumpInPowerTransition(void);
+	IMG_BOOL PDumpIsDumpSuspended(void);
+
+	/*!
+	 * @name	PDumpWriteParameter
+	 * @brief	General function for writing to PDump stream. Used
+	 *          mainly for memory dumps to parameter stream.
+	 * 			Usually more convenient to use PDumpWriteScript below
+	 * 			for the script stream.
+	 * @param	psui8Data - data to write
+	 * @param	ui32Size - size of write
+	 * @param	ui32Flags - PDump flags
+	 * @param   pui32FileOffset - on return contains the file offset to
+	 *                            the start of the parameter data
+	 * @param   aszFilenameStr - pointer to at least a 20 char buffer to
+	 *                           return the parameter filename
+	 * @return	error
+	 */
+	PVRSRV_ERROR PDumpWriteParameter(IMG_UINT8 *psui8Data, IMG_UINT32 ui32Size,
+			IMG_UINT32 ui32Flags, IMG_UINT32* pui32FileOffset,
+			IMG_CHAR* aszFilenameStr);
+
+	/*!
+	 * @name	PDumpWriteScript
+	 * @brief	Write an PDumpOS created string to the "script" output stream
+	 * @param	hString - PDump OS layer handle of string buffer to write
+	 * @param	ui32Flags - PDump flags
+	 * @return	IMG_TRUE on success.
+	 */
+	IMG_BOOL PDumpWriteScript(IMG_HANDLE hString, IMG_UINT32 ui32Flags);
+
+    /*
+      PDumpWriteShiftedMaskedValue():
+
+      loads the "reference" address into an internal PDump register,
+      optionally shifts it right,
+      optionally shifts it left,
+      optionally masks it
+      then finally writes the computed value to the given destination address
+
+      i.e. it emits pdump language equivalent to this expression:
+
+      dest = ((&ref) >> SHRamount << SHLamount) & MASK
+    */
+extern PVRSRV_ERROR
+PDumpWriteShiftedMaskedValue(const IMG_CHAR *pszDestRegspaceName,
+                             const IMG_CHAR *pszDestSymbolicName,
+                             IMG_DEVMEM_OFFSET_T uiDestOffset,
+                             const IMG_CHAR *pszRefRegspaceName,
+                             const IMG_CHAR *pszRefSymbolicName,
+                             IMG_DEVMEM_OFFSET_T uiRefOffset,
+                             IMG_UINT32 uiSHRAmount,
+                             IMG_UINT32 uiSHLAmount,
+                             IMG_UINT32 uiMask,
+                             IMG_DEVMEM_SIZE_T uiWordSize,
+                             IMG_UINT32 uiPDumpFlags);
+
+    /*
+      PDumpWriteSymbAddress():
+
+      writes the address of the "reference" to the offset given
+    */
+extern PVRSRV_ERROR
+PDumpWriteSymbAddress(const IMG_CHAR *pszDestSpaceName,
+                      IMG_DEVMEM_OFFSET_T uiDestOffset,
+                      const IMG_CHAR *pszRefSymbolicName,
+                      IMG_DEVMEM_OFFSET_T uiRefOffset,
+                      const IMG_CHAR *pszPDumpDevName,
+                      IMG_UINT32 ui32WordSize,
+                      IMG_UINT32 ui32AlignShift,
+                      IMG_UINT32 ui32Shift,
+                      IMG_UINT32 uiPDumpFlags);
+
+/* Register the connection with the PDump subsystem */
+extern PVRSRV_ERROR PDumpRegisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData,
+											PDUMP_CONNECTION_DATA **ppsPDumpConnectionData);
+
+/* Unregister the connection with the PDump subsystem */
+extern void PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData);
+
+/* Register for notification of PDump Transition into/out of capture range */
+extern PVRSRV_ERROR PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+													 PFN_PDUMP_TRANSITION pfnCallback,
+													 void *hPrivData,
+													 void **ppvHandle);
+
+/* Unregister notification of PDump Transition */
+extern void PDumpUnregisterTransitionCallback(void *pvHandle);
+
+/* Notify PDump of a Transition into/out of capture range */
+extern PVRSRV_ERROR PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, IMG_BOOL bInto, IMG_UINT32 ui32PDumpFlags);
+
+/* Wires-up a MIPS TLB in the page table*/
+extern PVRSRV_ERROR PdumpWireUpMipsTLB(PMR *psPMRSource,
+						PMR *psPMRDest,
+						IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+						IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+						IMG_UINT32 ui32AllocationFlags,
+						IMG_UINT32 ui32Flags);
+
+/*Invalidate a MIPS TLB in the page table */
+PVRSRV_ERROR PdumpInvalidateMipsTLB(PMR *psPMRDest,
+									IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+									IMG_UINT32 ui32MipsTLBValidClearMask,
+									IMG_UINT32 ui32Flags);
+
+
+
+	#define PDUMP_LOCK				PDumpLock
+	#define PDUMP_UNLOCK			PDumpUnlock
+
+	#define PDUMPINIT				PDumpInitCommon
+	#define PDUMPDEINIT				PDumpDeInitCommon
+	#define PDUMPREG32				PDumpReg32
+	#define PDUMPREG64				PDumpReg64
+	#define PDUMPREGREAD32			PDumpRegRead32
+	#define PDUMPREGREAD64			PDumpRegRead64
+	#define PDUMPCOMMENT(...)		PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, __VA_ARGS__)
+	#define PDUMPCOMMENTWITHFLAGS	PDumpCommentWithFlags
+	#define PDUMPREGPOL				PDumpRegPolKM
+	#define PDUMPPDREG				PDumpPDReg
+	#define PDUMPPDREGWITHFLAGS		PDumpPDRegWithFlags
+	#define PDUMPREGBASEDCBP		PDumpRegBasedCBP
+	#define PDUMPENDINITPHASE		PDumpStopInitPhase
+	#define PDUMPIDLWITHFLAGS		PDumpIDLWithFlags
+	#define PDUMPIDL				PDumpIDL
+	#define PDUMPPOWCMDSTART		PDumpPowerTransitionStart
+	#define PDUMPPOWCMDEND			PDumpPowerTransitionEnd
+	#define PDUMPPOWCMDINTRANS		PDumpInPowerTransition
+	#define PDUMPIF					PDumpIfKM
+	#define PDUMPELSE				PDumpElseKM
+	#define PDUMPFI					PDumpFiKM
+#else
+	/*
+		We should be clearer about which functions can be called
+		across the bridge as this looks rather unbalanced
+	*/
+
+/*! Macro used to record a panic in the PDump script stream */
+#define PDUMP_PANIC(_id, _msg)  ((void)0);
+
+/*! Macro used to record a driver error in the PDump script stream to invalidate the capture */
+#define PDUMP_ERROR(_err, _msg) ((void)0);
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpInitCommon)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpInitCommon(void)
+{
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpConnectionNotify)
+#endif
+static INLINE void
+PDumpConnectionNotify(void)
+{
+	return;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpDisconnectionNotify)
+#endif
+static INLINE void
+PDumpDisconnectionNotify(void)
+{
+	return;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpCreateLockKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpCreateLockKM(void)
+{
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpDestroyLockKM)
+#endif
+static INLINE void
+PDumpDestroyLockKM(void)
+{
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpLock)
+#endif
+static INLINE void
+PDumpLock(void)
+{
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpUnlock)
+#endif
+static INLINE void
+PDumpUnlock(void)
+{
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpStopInitPhase)
+#endif
+static INLINE void
+PDumpStopInitPhase(IMG_BOOL bPDumpClient, IMG_BOOL bInitClient)
+{
+	PVR_UNREFERENCED_PARAMETER(bPDumpClient);
+	PVR_UNREFERENCED_PARAMETER(bInitClient);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpSetFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpSetFrameKM(CONNECTION_DATA *psConnection,
+                PVRSRV_DEVICE_NODE *psDevNode,
+                IMG_UINT32 ui32Frame)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(ui32Frame);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpGetFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpGetFrameKM(CONNECTION_DATA *psConnection,
+                PVRSRV_DEVICE_NODE *psDeviceNode,
+                IMG_UINT32* pui32Frame)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(pui32Frame);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpCommentKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags)
+{
+	PVR_UNREFERENCED_PARAMETER(pszComment);
+	PVR_UNREFERENCED_PARAMETER(ui32Flags);
+	return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpCommentKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpSetDefaultCaptureParamsKM(IMG_UINT32 ui32Mode,
+                              IMG_UINT32 ui32Start,
+                              IMG_UINT32 ui32End,
+                              IMG_UINT32 ui32Interval,
+                              IMG_UINT32 ui32MaxParamFileSize)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32Mode);
+	PVR_UNREFERENCED_PARAMETER(ui32Start);
+	PVR_UNREFERENCED_PARAMETER(ui32End);
+	PVR_UNREFERENCED_PARAMETER(ui32Interval);
+	PVR_UNREFERENCED_PARAMETER(ui32MaxParamFileSize);
+
+	return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpPanic)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpPanic(IMG_UINT32      ui32PanicNo,
+		   IMG_CHAR*       pszPanicMsg,
+		   const IMG_CHAR* pszPPFunc,
+		   IMG_UINT32      ui32PPline)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32PanicNo);
+	PVR_UNREFERENCED_PARAMETER(pszPanicMsg);
+	PVR_UNREFERENCED_PARAMETER(pszPPFunc);
+	PVR_UNREFERENCED_PARAMETER(ui32PPline);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpCaptureError)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpCaptureError(PVRSRV_ERROR    ui32ErrorNo,
+                  IMG_CHAR*       pszErrorMsg,
+                  const IMG_CHAR* pszPPFunc,
+                  IMG_UINT32      ui32PPline)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32ErrorNo);
+	PVR_UNREFERENCED_PARAMETER(pszErrorMsg);
+	PVR_UNREFERENCED_PARAMETER(pszPPFunc);
+	PVR_UNREFERENCED_PARAMETER(ui32PPline);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpIsLastCaptureFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame)
+{
+	*pbIsLastCaptureFrame = IMG_FALSE;
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpIsCaptureFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpIsCaptureFrameKM(IMG_BOOL *bIsCapturing)
+{
+	*bIsCapturing = IMG_FALSE;
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpBitmapKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpBitmapKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+										  IMG_CHAR *pszFileName,
+										  IMG_UINT32 ui32FileOffset,
+										  IMG_UINT32 ui32Width,
+										  IMG_UINT32 ui32Height,
+										  IMG_UINT32 ui32StrideInBytes,
+										  IMG_DEV_VIRTADDR sDevBaseAddr,
+										  IMG_UINT32 ui32MMUContextID,
+										  IMG_UINT32 ui32Size,
+										  PDUMP_PIXEL_FORMAT ePixelFormat,
+										  IMG_UINT32 ui32AddrMode,
+										  IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(pszFileName);
+	PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Width);
+	PVR_UNREFERENCED_PARAMETER(ui32Height);
+	PVR_UNREFERENCED_PARAMETER(ui32StrideInBytes);
+	PVR_UNREFERENCED_PARAMETER(sDevBaseAddr);
+	PVR_UNREFERENCED_PARAMETER(ui32MMUContextID);
+	PVR_UNREFERENCED_PARAMETER(ui32Size);
+	PVR_UNREFERENCED_PARAMETER(ePixelFormat);
+	PVR_UNREFERENCED_PARAMETER(ui32AddrMode);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpRegisterConnection)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpRegisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData,
+						PDUMP_CONNECTION_DATA **ppsPDumpConnectionData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSyncConnectionData);
+	PVR_UNREFERENCED_PARAMETER(ppsPDumpConnectionData);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpUnregisterConnection)
+#endif
+static INLINE
+void PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+	PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpRegisterTransitionCallback)
+#endif
+static INLINE
+PVRSRV_ERROR PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+											  PFN_PDUMP_TRANSITION pfnCallback,
+											  void *hPrivData,
+											  void **ppvHandle)
+{
+	PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData);
+	PVR_UNREFERENCED_PARAMETER(pfnCallback);
+	PVR_UNREFERENCED_PARAMETER(hPrivData);
+	PVR_UNREFERENCED_PARAMETER(ppvHandle);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpUnregisterTransitionCallback)
+#endif
+static INLINE
+void PDumpUnregisterTransitionCallback(void *pvHandle)
+{
+	PVR_UNREFERENCED_PARAMETER(pvHandle);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpTransition)
+#endif
+static INLINE
+PVRSRV_ERROR PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, IMG_BOOL bInto, IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData);
+	PVR_UNREFERENCED_PARAMETER(bInto);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+
+	#if defined WIN32
+		#define PDUMPINIT			PDumpInitCommon
+		#define PDUMPDEINIT(...)		/ ## * PDUMPDEINIT(__VA_ARGS__) * ## /
+		#define PDUMPREG32(...)			/ ## * PDUMPREG32(__VA_ARGS__) * ## /
+		#define PDUMPREG64(...)			/ ## * PDUMPREG64(__VA_ARGS__) * ## /
+		#define PDUMPREGREAD32(...)			/ ## * PDUMPREGREAD32(__VA_ARGS__) * ## /
+		#define PDUMPREGREAD64(...)			/ ## * PDUMPREGREAD64(__VA_ARGS__) * ## /
+		#define PDUMPCOMMENT(...)		/ ## * PDUMPCOMMENT(__VA_ARGS__) * ## /
+		#define PDUMPREGPOL(...)		/ ## * PDUMPREGPOL(__VA_ARGS__) * ## /
+		#define PDUMPPDREG(...)			/ ## * PDUMPPDREG(__VA_ARGS__) * ## /
+		#define PDUMPPDREGWITHFLAGS(...)	/ ## * PDUMPPDREGWITHFLAGS(__VA_ARGS__) * ## /
+		#define PDUMPSYNC(...)			/ ## * PDUMPSYNC(__VA_ARGS__) * ## /
+		#define PDUMPCOPYTOMEM(...)		/ ## * PDUMPCOPYTOMEM(__VA_ARGS__) * ## /
+		#define PDUMPWRITE(...)			/ ## * PDUMPWRITE(__VA_ARGS__) * ## /
+		#define PDUMPCBP(...)			/ ## * PDUMPCBP(__VA_ARGS__) * ## /
+		#define	PDUMPREGBASEDCBP(...)		/ ## * PDUMPREGBASEDCBP(__VA_ARGS__) * ## /
+		#define PDUMPCOMMENTWITHFLAGS(...)	/ ## * PDUMPCOMMENTWITHFLAGS(__VA_ARGS__) * ## /
+		#define PDUMPMALLOCPAGESPHYS(...)	/ ## * PDUMPMALLOCPAGESPHYS(__VA_ARGS__) * ## /
+		#define PDUMPENDINITPHASE(...)		/ ## * PDUMPENDINITPHASE(__VA_ARGS__) * ## /
+		#define PDUMPMSVDXREG(...)		/ ## * PDUMPMSVDXREG(__VA_ARGS__) * ## /
+		#define PDUMPMSVDXREGWRITE(...)		/ ## * PDUMPMSVDXREGWRITE(__VA_ARGS__) * ## /
+		#define PDUMPMSVDXREGREAD(...)		/ ## * PDUMPMSVDXREGREAD(__VA_ARGS__) * ## /
+		#define PDUMPMSVDXPOLEQ(...)		/ ## * PDUMPMSVDXPOLEQ(__VA_ARGS__) * ## /
+		#define PDUMPMSVDXPOL(...)		/ ## * PDUMPMSVDXPOL(__VA_ARGS__) * ## /
+		#define PDUMPIDLWITHFLAGS(...)		/ ## * PDUMPIDLWITHFLAGS(__VA_ARGS__) * ## /
+		#define PDUMPIDL(...)			/ ## * PDUMPIDL(__VA_ARGS__) * ## /
+		#define PDUMPPOWCMDSTART(...)		/ ## * PDUMPPOWCMDSTART(__VA_ARGS__) * ## /
+		#define PDUMPPOWCMDEND(...)		/ ## * PDUMPPOWCMDEND(__VA_ARGS__) * ## /
+		#define PDUMP_LOCK			/ ## * PDUMP_LOCK(__VA_ARGS__) * ## /
+		#define PDUMP_UNLOCK			/ ## * PDUMP_UNLOCK(__VA_ARGS__) * ## /
+	#else
+		#if defined LINUX || defined GCC_IA32 || defined GCC_ARM || defined __QNXNTO__ || defined(INTEGRITY_OS)
+			#define PDUMPINIT	PDumpInitCommon
+			#define PDUMPDEINIT(args...)
+			#define PDUMPREG32(args...)
+			#define PDUMPREG64(args...)
+			#define PDUMPREGREAD32(args...)
+			#define PDUMPREGREAD64(args...)
+			#define PDUMPCOMMENT(args...)
+			#define PDUMPREGPOL(args...)
+			#define PDUMPPDREG(args...)
+			#define PDUMPPDREGWITHFLAGS(args...)
+			#define PDUMPSYNC(args...)
+			#define PDUMPCOPYTOMEM(args...)
+			#define PDUMPWRITE(args...)
+			#define PDUMPREGBASEDCBP(args...)
+			#define PDUMPCOMMENTWITHFLAGS(args...)
+			#define PDUMPENDINITPHASE(args...)
+			#define PDUMPIDLWITHFLAGS(args...)
+			#define PDUMPIDL(args...)
+			#define PDUMPPOWCMDSTART(args...)
+			#define PDUMPPOWCMDEND(args...)
+			#define PDUMP_LOCK(args...)
+			#define PDUMP_UNLOCK(args...)
+
+		#else
+			#error Compiler not specified
+		#endif
+	#endif
+#endif
+
+
+#endif /* _PDUMP_KM_H_ */
+
+/******************************************************************************
+ End of file (pdump_km.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pdump_mmu.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pdump_mmu.h
new file mode 100644
index 0000000..24c2663
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pdump_mmu.h
@@ -0,0 +1,189 @@
+/**************************************************************************/ /*!
+@File
+@Title          Common MMU Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements basic low level control of MMU.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVKM_PDUMP_MMU_H
+#define SRVKM_PDUMP_MMU_H
+
+/* services/server/include/ */
+#include "pdump_symbolicaddr.h"
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "mmu_common.h"
+
+/*
+	PDUMP MMU attributes
+*/
+typedef struct _PDUMP_MMU_ATTRIB_DEVICE_
+{
+    /* Per-Device Pdump attribs */
+
+	/*!< Pdump memory bank name */
+	IMG_CHAR				*pszPDumpMemDevName;
+
+	/*!< Pdump register bank name */
+	IMG_CHAR				*pszPDumpRegDevName;
+
+} PDUMP_MMU_ATTRIB_DEVICE;
+
+typedef struct _PDUMP_MMU_ATTRIB_CONTEXT_
+{
+	IMG_UINT32 ui32Dummy;
+} PDUMP_MMU_ATTRIB_CONTEXT;
+
+typedef struct _PDUMP_MMU_ATTRIB_HEAP_
+{
+	/* data page info */
+	IMG_UINT32 ui32DataPageMask;
+} PDUMP_MMU_ATTRIB_HEAP;
+
+typedef struct _PDUMP_MMU_ATTRIB_
+{
+    /* FIXME: would these be better as pointers rather than copies? */
+    struct _PDUMP_MMU_ATTRIB_DEVICE_ sDevice;
+    struct _PDUMP_MMU_ATTRIB_CONTEXT_ sContext;
+    struct _PDUMP_MMU_ATTRIB_HEAP_ sHeap;
+} PDUMP_MMU_ATTRIB;
+
+#if defined(PDUMP)
+	extern PVRSRV_ERROR PDumpMMUMalloc(const IMG_CHAR			*pszPDumpDevName,
+                                           MMU_LEVEL 				eMMULevel,
+                                           IMG_DEV_PHYADDR			*psDevPAddr,
+                                           IMG_UINT32				ui32Size,
+                                           IMG_UINT32				ui32Align,
+                                           PDUMP_MMU_TYPE          eMMUType);
+
+    extern PVRSRV_ERROR PDumpMMUFree(const IMG_CHAR				*pszPDumpDevName,
+                                     MMU_LEVEL					eMMULevel,
+                                     IMG_DEV_PHYADDR			        *psDevPAddr,
+                                     PDUMP_MMU_TYPE             eMMUType);
+
+    extern PVRSRV_ERROR PDumpMMUMalloc2(const IMG_CHAR			*pszPDumpDevName,
+                                        const IMG_CHAR			*pszTableType,/* PAGE_CATALOGUE, PAGE_DIRECTORY, PAGE_TABLE */
+                                        const IMG_CHAR 			*pszSymbolicAddr,
+                                        IMG_UINT32			ui32Size,
+                                        IMG_UINT32			ui32Align);
+
+    extern PVRSRV_ERROR PDumpMMUFree2(const IMG_CHAR			*pszPDumpDevName,
+                                      const IMG_CHAR			*pszTableType,/* PAGE_CATALOGUE, PAGE_DIRECTORY, PAGE_TABLE */
+                                      const IMG_CHAR 			*pszSymbolicAddr);
+
+
+	extern PVRSRV_ERROR PDumpPTBaseObjectToMem64(const IMG_CHAR *pszPDumpDevName,
+                                                     PMR *psPMRDest,
+                                                     IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+                                                     IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+                                                     IMG_UINT32 ui32Flags,
+                                                     MMU_LEVEL eMMULevel,
+                                                     IMG_UINT64 ui64PxSymAddr);
+
+    extern PVRSRV_ERROR PDumpMMUDumpPxEntries(MMU_LEVEL eMMULevel,
+    								   const IMG_CHAR *pszPDumpDevName,
+                                       void *pvPxMem,
+                                       IMG_DEV_PHYADDR sPxDevPAddr,
+                                       IMG_UINT32 uiFirstEntry,
+                                       IMG_UINT32 uiNumEntries,
+                                       const IMG_CHAR *pszMemspaceName,
+                                       const IMG_CHAR *pszSymbolicAddr,
+                                       IMG_UINT64 uiSymbolicAddrOffset,
+                                       IMG_UINT32 uiBytesPerEntry,
+                                       IMG_UINT32 uiLog2Align,
+                                       IMG_UINT32 uiAddrShift,
+                                       IMG_UINT64 uiAddrMask,
+                                       IMG_UINT64 uiPxEProtMask,
+                                       IMG_UINT64 uiDataValidEnable,
+                                       IMG_UINT32 ui32Flags,
+                                       PDUMP_MMU_TYPE eMMUType);
+
+
+    extern PVRSRV_ERROR PDumpMMUAllocMMUContext(const IMG_CHAR *pszPDumpMemSpaceName,
+                                                IMG_DEV_PHYADDR sPCDevPAddr,
+                                                PDUMP_MMU_TYPE eMMUType,
+                                                IMG_UINT32 *pui32MMUContextID);
+
+    extern PVRSRV_ERROR PDumpMMUFreeMMUContext(const IMG_CHAR *pszPDumpMemSpaceName,
+                                               IMG_UINT32 ui32MMUContextID);
+
+	extern PVRSRV_ERROR PDumpMMUActivateCatalog(const IMG_CHAR *pszPDumpRegSpaceName,
+												const IMG_CHAR *pszPDumpRegName,
+												IMG_UINT32 uiRegAddr,
+												const IMG_CHAR *pszPDumpPCSymbolicName);
+
+	/* FIXME: split to separate file... (debatable whether this is anything to do with MMU) */
+extern PVRSRV_ERROR
+PDumpMMUSAB(const IMG_CHAR *pszPDumpMemNamespace,
+            IMG_UINT32 uiPDumpMMUCtx,
+            IMG_DEV_VIRTADDR sDevAddrStart,
+            IMG_DEVMEM_SIZE_T uiSize,
+            const IMG_CHAR *pszFilename,
+            IMG_UINT32 uiFileOffset,
+            IMG_UINT32 ui32PDumpFlags);
+
+	#define PDUMP_MMU_MALLOC_DP(pszPDumpMemDevName, aszSymbolicAddr, ui32Size, ui32Align) \
+		PDumpMMUMalloc2(pszPDumpMemDevName, "DATA_PAGE", aszSymbolicAddr, ui32Size, ui32Align)
+    #define PDUMP_MMU_FREE_DP(pszPDumpMemDevName, aszSymbolicAddr) \
+        PDumpMMUFree2(pszPDumpMemDevName, "DATA_PAGE", aszSymbolicAddr)
+
+	#define PDUMP_MMU_ALLOC_MMUCONTEXT(pszPDumpMemDevName, sPCDevPAddr, eMMUType, puiPDumpCtxID) \
+        PDumpMMUAllocMMUContext(pszPDumpMemDevName,                     \
+                                sPCDevPAddr,                            \
+                                eMMUType,								\
+                                puiPDumpCtxID)
+
+    #define PDUMP_MMU_FREE_MMUCONTEXT(pszPDumpMemDevName, uiPDumpCtxID) \
+        PDumpMMUFreeMMUContext(pszPDumpMemDevName, uiPDumpCtxID)
+#else
+
+	#define PDUMP_MMU_MALLOC_DP(pszPDumpMemDevName, pszDevPAddr, ui32Size, ui32Align) \
+        ((void)0)
+    #define PDUMP_MMU_FREE_DP(pszPDumpMemDevName, psDevPAddr) \
+        ((void)0)
+	#define PDUMP_MMU_ALLOC_MMUCONTEXT(pszPDumpMemDevName, sPCDevPAddr, eMMUType, puiPDumpCtxID) \
+        ((void)0)
+    #define PDUMP_MMU_FREE_MMUCONTEXT(pszPDumpMemDevName, uiPDumpCtxID) \
+        ((void)0)
+
+#endif // defined(PDUMP)
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pdump_osfunc.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pdump_osfunc.h
new file mode 100644
index 0000000..1ec9ac2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pdump_osfunc.h
@@ -0,0 +1,365 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	OS-independent interface to helper functions for pdump
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stdarg.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_device_types.h"
+
+
+/* FIXME
+ * Some OSes (WinXP,CE) allocate the string on the stack, but some
+ * (Linux) use a global variable/lock instead.
+ * Would be good to use the same across all OSes.
+ *
+ * A handle is returned which represents IMG_CHAR* type on all OSes.
+ *
+ * The allocated buffer length is also returned on OSes where it's
+ * supported (e.g. Linux).
+ */
+#define MAX_PDUMP_STRING_LENGTH (256)
+#if defined(WIN32)
+#define PDUMP_GET_SCRIPT_STRING()	\
+	IMG_CHAR pszScript[MAX_PDUMP_STRING_LENGTH];		\
+	IMG_UINT32	ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1;	\
+	IMG_HANDLE	hScript = (IMG_HANDLE)pszScript;
+
+#define PDUMP_GET_MSG_STRING()		\
+	IMG_CHAR pszMsg[MAX_PDUMP_STRING_LENGTH];			\
+	IMG_UINT32	ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1;
+
+#define PDUMP_GET_SCRIPT_AND_FILE_STRING()		\
+	IMG_CHAR 	pszScript[MAX_PDUMP_STRING_LENGTH];		\
+	IMG_CHAR	pszFileName[MAX_PDUMP_STRING_LENGTH];	\
+	IMG_UINT32	ui32MaxLenScript = MAX_PDUMP_STRING_LENGTH-1;	\
+	IMG_UINT32	ui32MaxLenFileName = MAX_PDUMP_STRING_LENGTH-1;	\
+	IMG_HANDLE	hScript = (IMG_HANDLE)pszScript;
+
+#else	/* WIN32 */
+
+#if defined(__QNXNTO__)
+
+#define PDUMP_GET_SCRIPT_STRING()	\
+	IMG_CHAR pszScript[MAX_PDUMP_STRING_LENGTH];		\
+	IMG_UINT32	ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1;	\
+	IMG_HANDLE	hScript = (IMG_HANDLE)pszScript;
+
+#define PDUMP_GET_MSG_STRING()		\
+	IMG_CHAR pszMsg[MAX_PDUMP_STRING_LENGTH];			\
+	IMG_UINT32	ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1;
+
+#define PDUMP_GET_SCRIPT_AND_FILE_STRING()		\
+	IMG_CHAR 	pszScript[MAX_PDUMP_STRING_LENGTH];		\
+	IMG_CHAR	pszFileName[MAX_PDUMP_STRING_LENGTH];	\
+	IMG_UINT32	ui32MaxLenScript = MAX_PDUMP_STRING_LENGTH-1;	\
+	IMG_UINT32	ui32MaxLenFileName = MAX_PDUMP_STRING_LENGTH-1;	\
+	IMG_HANDLE	hScript = (IMG_HANDLE)pszScript;
+
+#else  /* __QNXNTO__ */
+
+	/*
+	 * Linux
+	 */
+#define PDUMP_GET_SCRIPT_STRING()				\
+	IMG_HANDLE hScript;							\
+	IMG_UINT32	ui32MaxLen;						\
+	PVRSRV_ERROR eErrorPDump;						\
+	eErrorPDump = PDumpOSGetScriptString(&hScript, &ui32MaxLen);\
+	PVR_LOGR_IF_ERROR(eErrorPDump, "PDumpOSGetScriptString");
+
+#define PDUMP_GET_MSG_STRING()					\
+	IMG_CHAR *pszMsg;							\
+	IMG_UINT32	ui32MaxLen;						\
+	PVRSRV_ERROR eErrorPDump;						\
+	eErrorPDump = PDumpOSGetMessageString(&pszMsg, &ui32MaxLen);\
+	PVR_LOGR_IF_ERROR(eErrorPDump, "PDumpOSGetMessageString");
+
+#define PDUMP_GET_SCRIPT_AND_FILE_STRING()		\
+	IMG_HANDLE hScript;							\
+	IMG_CHAR *pszFileName;						\
+	IMG_UINT32	ui32MaxLenScript;				\
+	IMG_UINT32	ui32MaxLenFileName;				\
+	PVRSRV_ERROR eErrorPDump;						\
+	eErrorPDump = PDumpOSGetScriptString(&hScript, &ui32MaxLenScript);\
+	PVR_LOGR_IF_ERROR(eErrorPDump, "PDumpOSGetScriptString");\
+	eErrorPDump = PDumpOSGetFilenameString(&pszFileName, &ui32MaxLenFileName);\
+	PVR_LOGR_IF_ERROR(eErrorPDump, "PDumpOSGetFilenameString");
+
+	/**************************************************************************/ /*!
+	@Function       PDumpOSGetScriptString
+	@Description    Get the handle of the PDump "script" buffer.
+	                This function is only called if PDUMP is defined.
+	@Output         phScript           Handle of the PDump script buffer
+	@Output         pui32MaxLen        max length the script buffer can be
+	@Return         PVRSRV_OK on success, a failure code otherwise.
+	*/ /**************************************************************************/
+	PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript, IMG_UINT32 *pui32MaxLen);
+
+	/**************************************************************************/ /*!
+	@Function       PDumpOSGetMessageString
+	@Description    Get the PDump "message" buffer.
+	                This function is only called if PDUMP is defined.
+	@Output         ppszMsg            Pointer to the PDump message buffer
+	@Output         pui32MaxLen        max length the message buffer can be
+	@Return         PVRSRV_OK on success, a failure code otherwise.
+	*/ /**************************************************************************/
+	PVRSRV_ERROR PDumpOSGetMessageString(IMG_CHAR **ppszMsg, IMG_UINT32 *pui32MaxLen);
+
+	/**************************************************************************/ /*!
+	@Function       PDumpOSGetFilenameString
+	@Description    Get the PDump "filename" buffer.
+	                This function is only called if PDUMP is defined.
+	@Output         ppszFile           Pointer to the PDump filename buffer
+	@Output         pui32MaxLen        max length the filename buffer can be
+	@Return         PVRSRV_OK on success, a failure code otherwise.
+	*/ /**************************************************************************/
+	PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile, IMG_UINT32 *pui32MaxLen);
+
+#endif /* __QNXNTO__ */
+#endif /* WIN32 */
+
+
+/*
+ * PDump streams, channels, init and deinit routines (common to all OSes)
+ */
+
+typedef struct
+{
+	IMG_HANDLE hInit;        /*!< Driver initialisation PDump stream */
+	IMG_HANDLE hMain;        /*!< App framed PDump stream */
+	IMG_HANDLE hDeinit;      /*!< Driver/HW de-initialisation PDump stream */
+} PDUMP_CHANNEL;
+
+/**************************************************************************/ /*!
+@Function       PDumpOSInit
+@Description    Reset the connection to vldbgdrv, then try to connect to
+                PDump streams. This function is only called if PDUMP is
+                defined.
+@Input          psParam            PDump channel to be used for logging
+                                   parameters
+@Input          psScript           PDump channel to be used for logging
+                                   commands / events
+@Output         pui32InitCapMode   The initial PDump capture mode.
+@Output         ppszEnvComment     Environment-specific comment that is
+                                   output when writing to the PDump
+                                   stream (this may be NULL).
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR PDumpOSInit(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript,
+		IMG_UINT32* pui32InitCapMode, IMG_CHAR** ppszEnvComment);
+
+/**************************************************************************/ /*!
+@Function       PDumpOSDeInit
+@Description    Disconnect the PDump streams and close the connection to
+                vldbgdrv. This function is only called if PDUMP is defined.
+@Input          psParam            PDump parameter channel to be closed
+@Input          psScript           PDump command channel to be closed
+@Return         None
+*/ /**************************************************************************/
+void PDumpOSDeInit(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript);
+
+/**************************************************************************/ /*!
+@Function       PDumpOSSetSplitMarker
+@Description    Inform the PDump client to start a new file at the given
+                marker. This function is only called if PDUMP is defined.
+@Input          hStream            handle of PDump stream
+@Input          ui32Marker         byte file position
+@Return         IMG_TRUE
+*/ /**************************************************************************/
+IMG_BOOL PDumpOSSetSplitMarker(IMG_HANDLE hStream, IMG_UINT32 ui32Marker);
+
+/**************************************************************************/ /*!
+@Function       PDumpOSDebugDriverWrite
+@Description    Writes a given number of bytes from the specified buffer
+                to a PDump stream. This function is only called if PDUMP
+                is defined.
+@Input          psStream           handle of PDump stream to write into
+@Input          pui8Data           buffer to write data from
+@Input          ui32BCount         number of bytes to write
+@Return         The number of bytes actually written (may be less than
+                ui32BCount if there is insufficient space in the target
+                PDump stream buffer)
+*/ /**************************************************************************/
+IMG_UINT32 PDumpOSDebugDriverWrite(IMG_HANDLE psStream,
+                                   IMG_UINT8 *pui8Data,
+                                   IMG_UINT32 ui32BCount);
+
+/*
+ * Define macro for processing variable args list in OS-independent
+ * manner. See e.g. PDumpCommentWithFlags().
+ */
+#define PDUMP_va_list	va_list
+#define PDUMP_va_start	va_start
+#define PDUMP_va_end	va_end
+
+
+/**************************************************************************/ /*!
+@Function       PDumpOSBufprintf
+@Description    Printf to OS-specific PDump state buffer. This function is
+                only called if PDUMP is defined.
+@Input          hBuf               handle of buffer to write into
+@Input          ui32ScriptSizeMax  maximum size of data to write (chars)
+@Input          pszFormat          format string
+@Return         None
+*/ /**************************************************************************/
+PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...) __printf(3, 4);
+
+/**************************************************************************/ /*!
+@Function       PDumpOSDebugPrintf
+@Description    Debug message during PDumping. This function is only called
+                if PDUMP is defined.
+@Input          pszFormat            format string
+@Return         None
+*/ /**************************************************************************/
+void PDumpOSDebugPrintf(IMG_CHAR* pszFormat, ...) __printf(1, 2);
+
+/*
+ * Write into a IMG_CHAR* on all OSes. Can be allocated on the stack or heap.
+ */
+/**************************************************************************/ /*!
+@Function       PDumpOSSprintf
+@Description    Printf to IMG char array. This function is only called if
+                PDUMP is defined.
+@Input          ui32ScriptSizeMax    maximum size of data to write (chars)
+@Input          pszFormat            format string
+@Output         pszComment           char array to print into
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...) __printf(3, 4);
+
+/**************************************************************************/ /*!
+@Function       PDumpOSVSprintf
+@Description    Printf to IMG string using variable args (see stdarg.h).
+                This is necessary because the '...' notation does not
+                support nested function calls.
+                This function is only called if PDUMP is defined.
+@Input          ui32ScriptSizeMax    maximum size of data to write (chars)
+@Input          pszFormat            format string
+@Input          vaArgs               variable args structure (from stdarg.h)
+@Output         pszMsg               char array to print into
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszMsg, IMG_UINT32 ui32ScriptSizeMax, const IMG_CHAR* pszFormat, PDUMP_va_list vaArgs) __printf(3, 0);
+
+/**************************************************************************/ /*!
+@Function       PDumpOSBuflen
+@Description    Returns the length of the specified buffer (in chars).
+                This function is only called if PDUMP is defined.
+@Input          hBuffer              handle to buffer
+@Input          ui32BufferSizeMax    max size of buffer (chars)
+@Return         The length of the buffer, will always be <= ui32BufferSizeMax
+*/ /**************************************************************************/
+IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax);
+
+/**************************************************************************/ /*!
+@Function       PDumpOSVerifyLineEnding
+@Description    Put line ending sequence at the end if it isn't already
+                there. This function is only called if PDUMP is defined.
+@Input          hBuffer              handle to buffer
+@Input          ui32BufferSizeMax    max size of buffer (chars)
+@Return         None
+*/ /**************************************************************************/
+void PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax);
+
+/**************************************************************************/ /*!
+@Function       PDumpOSReleaseExecution
+@Description    OS function to switch to another process, to clear PDump
+                buffers.
+                This function can simply wrap OSReleaseThreadQuanta.
+                This function is only called if PDUMP is defined.
+@Return         None
+*/ /**************************************************************************/
+void PDumpOSReleaseExecution(void);
+
+/**************************************************************************/ /*!
+@Function       PDumpOSCreateLock
+@Description    Create the global pdump lock. This function is only called
+                if PDUMP is defined.
+@Return         None
+*/ /**************************************************************************/
+PVRSRV_ERROR PDumpOSCreateLock(void);
+
+/**************************************************************************/ /*!
+@Function       PDumpOSDestroyLock
+@Description    Destroy the global pdump lock This function is only called
+                if PDUMP is defined.
+@Return         None
+*/ /**************************************************************************/
+void PDumpOSDestroyLock(void);
+
+/**************************************************************************/ /*!
+@Function       PDumpOSLock
+@Description    Acquire the global pdump lock. This function is only called
+                if PDUMP is defined.
+@Return         None
+*/ /**************************************************************************/
+void PDumpOSLock(void);
+
+/**************************************************************************/ /*!
+@Function       PDumpOSUnlock
+@Description    Release the global pdump lock. This function is only called
+                if PDUMP is defined.
+@Return         None
+*/ /**************************************************************************/
+void PDumpOSUnlock(void);
+
+/*!
+ * @name	PDumpOSGetCtrlState
+ * @brief	Retrieve some state from the debug driver or debug driver stream
+ */
+IMG_UINT32 PDumpOSGetCtrlState(IMG_HANDLE hDbgStream, IMG_UINT32 ui32StateID);
+
+/*!
+ * @name	PDumpOSSetFrame
+ * @brief	Set the current frame value mirrored in the debug driver
+ */
+void PDumpOSSetFrame(IMG_UINT32 ui32Frame);
+
+/*!
+ * @name	PDumpOSAllowInitPhaseToComplete
+ * @brief	Some platforms wish to control when the init phase is marked as
+ *          complete depending on who is instructing it so.
+ */
+IMG_BOOL PDumpOSAllowInitPhaseToComplete(IMG_BOOL bPDumpClient, IMG_BOOL bInitClient);
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pdump_physmem.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pdump_physmem.h
new file mode 100644
index 0000000..67a1994
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pdump_physmem.h
@@ -0,0 +1,242 @@
+/**************************************************************************/ /*!
+@File
+@Title          pdump functions to assist with physmem allocations
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements basic low level control of MMU.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVSRV_PDUMP_PHYSMEM_H
+#define SRVSRV_PDUMP_PHYSMEM_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pmr.h"
+
+#define PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH 40
+#define PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH 60
+#define PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH (PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH)
+
+typedef struct _PDUMP_PHYSMEM_INFO_T_ PDUMP_PHYSMEM_INFO_T;
+
+#if defined(PDUMP)
+extern PVRSRV_ERROR
+PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle,
+                     IMG_CHAR **ppszSymbolicAddress);
+
+extern PVRSRV_ERROR
+PDumpMalloc(const IMG_CHAR *pszDevSpace,
+               const IMG_CHAR *pszSymbolicAddress,
+               IMG_UINT64 ui64Size,
+               /* alignment is alignment of start of buffer _and_
+                  minimum contiguity - i.e. smallest allowable
+                  page-size. */
+               IMG_DEVMEM_ALIGN_T uiAlign,
+               IMG_BOOL bInitialise,
+               IMG_UINT32 ui32InitValue,
+               IMG_HANDLE *phHandlePtr,
+               IMG_UINT32 ui32PDumpFlags);
+
+extern
+PVRSRV_ERROR PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle);
+
+IMG_INTERNAL void
+PDumpMakeStringValid(IMG_CHAR *pszString,
+                     IMG_UINT32 ui32StrLen);
+#else	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle,
+                     IMG_CHAR **ppszSymbolicAddress)
+{
+	PVR_UNREFERENCED_PARAMETER(hPhysmemPDumpHandle);
+	PVR_UNREFERENCED_PARAMETER(ppszSymbolicAddress);
+	return PVRSRV_OK;
+}
+
+static INLINE PVRSRV_ERROR
+PDumpMalloc(const IMG_CHAR *pszDevSpace,
+               const IMG_CHAR *pszSymbolicAddress,
+               IMG_UINT64 ui64Size,
+               IMG_DEVMEM_ALIGN_T uiAlign,
+               IMG_BOOL bInitialise,
+               IMG_UINT32 ui32InitValue,
+               IMG_HANDLE *phHandlePtr,
+               IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(pszDevSpace);
+	PVR_UNREFERENCED_PARAMETER(pszSymbolicAddress);
+	PVR_UNREFERENCED_PARAMETER(ui64Size);
+	PVR_UNREFERENCED_PARAMETER(uiAlign);
+	PVR_UNREFERENCED_PARAMETER(bInitialise);
+	PVR_UNREFERENCED_PARAMETER(ui32InitValue);
+	PVR_UNREFERENCED_PARAMETER(phHandlePtr);
+	return PVRSRV_OK;
+}
+
+static INLINE PVRSRV_ERROR
+PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+	PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle);
+	return PVRSRV_OK;
+}
+#endif	/* PDUMP */
+
+#define PMR_DEFAULT_PREFIX "PMR"
+#define PMR_SYMBOLICADDR_FMTSPEC "%s%llu_%llu_%s"
+#define PMR_MEMSPACE_FMTSPEC "%s"
+#define PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC PMR_MEMSPACE_FMTSPEC
+
+#if defined(PDUMP)
+#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr) \
+    PDumpMalloc(pszPDumpMemDevName, PMR_OSALLOCPAGES_PREFIX, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr, PDUMP_NONE)
+#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \
+    PDumpFree(hHandle)
+#else
+#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr) \
+    ((void)(*phHandlePtr=NULL))
+#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \
+    ((void)(0))
+#endif // defined(PDUMP)
+
+extern PVRSRV_ERROR
+PDumpPMRWRW32(const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_UINT32 ui32Value,
+            PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRWRW32InternalVarToMem(const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              const IMG_CHAR *pszInternalVar,
+                              PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRRDW32MemToInternalVar(const IMG_CHAR *pszInternalVar,
+                              const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRWRW64(const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_UINT64 ui64Value,
+            PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRWRW64InternalVarToMem(const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              const IMG_CHAR *pszInternalVar,
+                              PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRRDW64MemToInternalVar(const IMG_CHAR *pszInternalVar,
+                              const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRLDB(const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_DEVMEM_SIZE_T uiSize,
+            const IMG_CHAR *pszFilename,
+            IMG_UINT32 uiFileOffset,
+            PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRSAB(const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_DEVMEM_SIZE_T uiSize,
+            const IMG_CHAR *pszFileName,
+            IMG_UINT32 uiFileOffset);
+
+/*
+  PDumpPMRPOL()
+
+  emits a POL to the PDUMP.
+*/
+extern PVRSRV_ERROR
+PDumpPMRPOL(const IMG_CHAR *pszMempaceName,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_UINT32 ui32Value,
+            IMG_UINT32 ui32Mask,
+            PDUMP_POLL_OPERATOR eOperator,
+            IMG_UINT32 uiCount,
+            IMG_UINT32 uiDelay,
+            PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRCBP(const IMG_CHAR *pszMemspaceName,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiReadOffset,
+            IMG_DEVMEM_OFFSET_T uiWriteOffset,
+            IMG_DEVMEM_SIZE_T uiPacketSize,
+            IMG_DEVMEM_SIZE_T uiBufferSize);
+
+/*
+ * PDumpWriteBuffer()
+ *
+ * writes a binary blob to the pdump param stream containing the
+ * current contents of the memory, and returns the filename and offset
+ * of where that blob is located (for use in a subsequent LDB, for
+ * example)
+ *
+ * Caller to provide buffer to receive filename, and declare the size
+ * of that buffer
+ */
+extern PVRSRV_ERROR
+PDumpWriteBuffer(IMG_UINT8 *pcBuffer,
+                 size_t uiNumBytes,
+                 PDUMP_FLAGS_T uiPDumpFlags,
+                 IMG_CHAR *pszFilenameOut,
+                 size_t uiFilenameBufSz,
+                 PDUMP_FILEOFFSET_T *puiOffsetOut);
+
+#endif /* #ifndef SRVSRV_PDUMP_PHYSMEM_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pdump_symbolicaddr.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pdump_symbolicaddr.h
new file mode 100644
index 0000000..ed912a5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pdump_symbolicaddr.h
@@ -0,0 +1,55 @@
+/**************************************************************************/ /*!
+@File
+@Title          Abstraction of PDUMP symbolic address derivation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Allows pdump functions to derive symbolic addresses on-the-fly
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVKM_PDUMP_SYMBOLICADDR_H
+#define SRVKM_PDUMP_SYMBOLICADDR_H
+
+#include "img_types.h"
+
+#include "pvrsrv_error.h"
+
+/* pdump symbolic addresses are generated on-the-fly with a callback */
+
+typedef PVRSRV_ERROR (*PVRSRV_SYMADDRFUNCPTR)(IMG_HANDLE hPriv, IMG_UINT32 uiOffset, IMG_CHAR *pszSymbolicAddr, IMG_UINT32 ui32SymbolicAddrLen, IMG_UINT32 *pui32NewOffset);
+
+#endif /* #ifndef SRVKM_PDUMP_SYMBOLICADDR_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/physmem.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/physmem.h
new file mode 100644
index 0000000..01a5959
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/physmem.h
@@ -0,0 +1,235 @@
+/*************************************************************************/ /*!
+@File
+@Title          Physmem header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for common entry point for creation of RAM backed PMR's
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SRVSRV_PHYSMEM_H_
+#define _SRVSRV_PHYSMEM_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "connection_server.h"
+
+/* services/server/include/ */
+#include "pmr.h"
+#include "pmr_impl.h"
+
+/* Valid values for TC_MEMORY_CONFIG configuration option */
+#define TC_MEMORY_LOCAL			(1)
+#define TC_MEMORY_HOST			(2)
+#define TC_MEMORY_HYBRID		(3)
+
+/* Valid values for the PLATO_MEMORY_CONFIG configuration option */
+#define PLATO_MEMORY_LOCAL		(1)
+#define PLATO_MEMORY_HOST		(2)
+#define PLATO_MEMORY_HYBRID		(3)
+
+/*************************************************************************/ /*!
+@Function       DevPhysMemAlloc
+
+@Description    Allocate memory from device specific heaps directly.
+
+@Input          psDevNode               device node to operate on
+@Input          ui32MemSize             Size of the memory to be allocated
+@Input          u8Value                 Value to be initialised to.
+@Input          bInitPage               Flag to control initialisation
+@Input          pszDevSpace             PDUMP memory space in which the
+                                          allocation is to be done
+@Input          pszSymbolicAddress      Symbolic name of the allocation
+@Input          phHandlePtr             PDUMP handle to the allocation
+@Output         psMemHandle             Handle to the allocated memory
+@Output         psDevPhysAddr           Device Physical address of allocated
+                                          page
+
+@Return         PVRSRV_OK if the allocation is successful
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+                                    IMG_UINT32 ui32MemSize,
+                                    IMG_UINT32 ui32Log2Align,
+                                    const IMG_UINT8 u8Value,
+                                    IMG_BOOL bInitPage,
+#if defined(PDUMP)
+                                    const IMG_CHAR *pszDevSpace,
+                                    const IMG_CHAR *pszSymbolicAddress,
+                                    IMG_HANDLE *phHandlePtr,
+#endif
+                                    IMG_HANDLE hMemHandle,
+                                    IMG_DEV_PHYADDR *psDevPhysAddr);
+
+/*************************************************************************/ /*!
+@Function       DevPhysMemFree
+
+@Description    Free memory to device specific heaps directly.
+
+@Input          	psDevNode            	device node to operate on
+@Input 				hPDUMPMemHandle			Pdump handle to allocated memory
+@Input 				hMemHandle				Devmem handle to allocated memory
+
+@Return
+*/
+/*****************************************************************************/
+extern void DevPhysMemFree(PVRSRV_DEVICE_NODE *psDevNode,
+#if defined(PDUMP)
+		IMG_HANDLE	hPDUMPMemHandle,
+#endif
+		IMG_HANDLE	hMemHandle);
+
+/*
+ * PhysmemNewRamBackedPMR
+ *
+ * This function will create a RAM backed PMR using the device specific
+ * callback, this allows control at a per-devicenode level to select the
+ * memory source thus supporting mixed UMA/LMA systems.
+ *
+ * The size must be a multiple of page size.  The page size is
+ * specified in log2.  It should be regarded as a minimum contiguity
+ * of which the that the resulting memory must be a multiple.  It may
+ * be that this should be a fixed number.  It may be that the
+ * allocation size needs to be a multiple of some coarser "page size"
+ * than that specified in the page size argument.  For example, take
+ * an OS whose page granularity is a fixed 16kB, but the caller
+ * requests memory in page sizes of 4kB.  The request can be satisfied
+ * if and only if the SIZE requested is a multiple of 16kB.  If the
+ * arguments supplied are such that this OS cannot grant the request,
+ * PVRSRV_ERROR_INVALID_PARAMS will be returned.
+ *
+ * The caller should supply storage of a pointer.  Upon successful
+ * return a PMR object will have been created and a pointer to it
+ * returned in the PMROut argument.
+ *
+ * A PMR thusly created should be destroyed with PhysmemUnrefPMR.
+ *
+ * Note that this function may cause memory allocations and on some
+ * OSes this may cause scheduling events, so it is important that this
+ * function be called with interrupts enabled and in a context where
+ * scheduling events and memory allocations are permitted.
+ *
+ * The flags may be used by the implementation to change its behaviour
+ * if required.  The flags will also be stored in the PMR as immutable
+ * metadata and returned to mmu_common when it asks for it.
+ *
+ */
+extern PVRSRV_ERROR
+PhysmemNewRamBackedPMR(CONNECTION_DATA * psConnection,
+                       PVRSRV_DEVICE_NODE *psDevNode,
+                       IMG_DEVMEM_SIZE_T uiSize,
+                       IMG_DEVMEM_SIZE_T uiChunkSize,
+                       IMG_UINT32 ui32NumPhysChunks,
+                       IMG_UINT32 ui32NumVirtChunks,
+                       IMG_UINT32 *pui32MappingTable,
+                       IMG_UINT32 uiLog2PageSize,
+                       PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                       IMG_UINT32 uiAnnotationLength,
+                       const IMG_CHAR *pszAnnotation,
+                       PMR **ppsPMROut);
+
+
+/*
+ * PhysmemNewRamBackedLockedPMR
+ *
+ * Same as function above but is additionally locking down the PMR.
+ *
+ * Get the physical memory and lock down the PMR directly, we do not want to
+ * defer the actual allocation to mapping time.
+ *
+ * In general the concept of on-demand allocations is not useful for allocations
+ * where we give the users the freedom to map and unmap memory at will. The user
+ * is not expecting his memory contents to suddenly vanish just because he unmapped
+ * the buffer.
+ * Even if he would know and be ok with it, we do not want to check for every page
+ * we unmap whether we have to unlock the underlying PMR.
+*/
+extern PVRSRV_ERROR
+PhysmemNewRamBackedLockedPMR(CONNECTION_DATA * psConnection,
+                             PVRSRV_DEVICE_NODE *psDevNode,
+                             IMG_DEVMEM_SIZE_T uiSize,
+                             PMR_SIZE_T uiChunkSize,
+                             IMG_UINT32 ui32NumPhysChunks,
+                             IMG_UINT32 ui32NumVirtChunks,
+                             IMG_UINT32 *pui32MappingTable,
+                             IMG_UINT32 uiLog2PageSize,
+                             PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                             IMG_UINT32 uiAnnotationLength,
+                             const IMG_CHAR *pszAnnotation,
+                             PMR **ppsPMRPtr);
+
+/**************************************************************************/ /*!
+@Function       PhysmemImportPMR
+@Description    Import PMR a previously exported PMR
+@Input          psPMRExport           The exported PMR token
+@Input          uiPassword            Authorisation password
+                                      for the PMR being imported
+@Input          uiSize                Size of the PMR being imported
+                                      (for verification)
+@Input          uiLog2Contig          Log2 continuity of the PMR being
+                                      imported (for verification)
+@Output         ppsPMR                The imported PMR
+@Return         PVRSRV_ERROR_PMR_NOT_PERMITTED if not for the same device
+                PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR if password incorrect
+                PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES if size or contiguity incorrect
+                PVRSRV_OK if successful
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PhysmemImportPMR(CONNECTION_DATA *psConnection,
+                 PVRSRV_DEVICE_NODE *psDevNode,
+                 PMR_EXPORT *psPMRExport,
+                 PMR_PASSWORD_T uiPassword,
+                 PMR_SIZE_T uiSize,
+                 PMR_LOG2ALIGN_T uiLog2Contig,
+                 PMR **ppsPMR);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVGetMaxDevMemSizeKM
+@Description    Get the amount of device memory on current platform
+@Output         uiLMASize             LMA memory size
+@Output         uiUMASize             UMA memory size
+@Return         None
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVGetMaxDevMemSizeKM( CONNECTION_DATA * psConnection,
+		                   PVRSRV_DEVICE_NODE *psDevNode,
+		                   IMG_DEVMEM_SIZE_T *puiLMASize,
+		                   IMG_DEVMEM_SIZE_T *puiUMASize );
+
+#endif /* _SRVSRV_PHYSMEM_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/physmem_dmabuf.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/physmem_dmabuf.h
new file mode 100644
index 0000000..c3be373
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/physmem_dmabuf.h
@@ -0,0 +1,108 @@
+/**************************************************************************/ /*!
+@File           physmem_dmabuf.h
+@Title          Header for dmabuf PMR factory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks importing Ion allocations
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(_PHYSMEM_DMABUF_H_)
+#define _PHYSMEM_DMABUF_H_
+
+#include <linux/dma-buf.h>
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "connection_server.h"
+
+#include "pmr.h"
+
+typedef PVRSRV_ERROR (*PFN_DESTROY_DMABUF_PMR)(PHYS_HEAP *psHeap,
+                                               struct dma_buf_attachment *psAttachment);
+
+PVRSRV_ERROR
+PhysmemCreateNewDmaBufBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+                                PHYS_HEAP *psHeap,
+                                struct dma_buf_attachment *psAttachment,
+                                PFN_DESTROY_DMABUF_PMR pfnDestroy,
+                                PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                IMG_DEVMEM_SIZE_T uiChunkSize,
+                                IMG_UINT32 ui32NumPhysChunks,
+                                IMG_UINT32 ui32NumVirtChunks,
+                                IMG_UINT32 *pui32MappingTable,
+                                PMR **ppsPMRPtr);
+
+struct dma_buf *
+PhysmemGetDmaBuf(PMR *psPMR);
+
+PVRSRV_ERROR
+PhysmemExportDmaBuf(CONNECTION_DATA *psConnection,
+                    PVRSRV_DEVICE_NODE *psDevNode,
+                    PMR *psPMR,
+                    IMG_INT *piFd);
+
+PVRSRV_ERROR
+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection,
+                    PVRSRV_DEVICE_NODE *psDevNode,
+                    IMG_INT fd,
+                    PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                    PMR **ppsPMRPtr,
+                    IMG_DEVMEM_SIZE_T *puiSize,
+                    IMG_DEVMEM_ALIGN_T *puiAlign);
+
+PVRSRV_ERROR
+PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection,
+                          PVRSRV_DEVICE_NODE *psDevNode,
+                          IMG_INT fd,
+                          PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                          IMG_DEVMEM_SIZE_T uiChunkSize,
+                          IMG_UINT32 ui32NumPhysChunks,
+                          IMG_UINT32 ui32NumVirtChunks,
+                          IMG_UINT32 *pui32MappingTable,
+                          PMR **ppsPMRPtr,
+                          IMG_DEVMEM_SIZE_T *puiSize,
+                          IMG_DEVMEM_ALIGN_T *puiAlign);
+
+#endif /* !defined(_PHYSMEM_DMABUF_H_) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/physmem_hostmem.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/physmem_hostmem.h
new file mode 100644
index 0000000..883ca2a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/physmem_hostmem.h
@@ -0,0 +1,54 @@
+/*************************************************************************/ /*!
+@File           physmem_hostmem.h
+@Title          Host memory device node header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__PHYSMEM_HOSTMEM_H__)
+#define __PHYSMEM_HOSTMEM_H__
+
+#include "pvrsrv_device.h"
+
+/*! Heap ID of the host driver's device heap */
+#define PHYS_HEAP_ID_HOSTMEM (~((IMG_UINT32)0))
+
+PVRSRV_DEVICE_CONFIG* HostMemGetDeviceConfig(void);
+
+#endif /* !defined (__PHYSMEM_HOSTMEM_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/physmem_lma.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/physmem_lma.h
new file mode 100644
index 0000000..be7bef7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/physmem_lma.h
@@ -0,0 +1,85 @@
+/**************************************************************************/ /*!
+@File
+@Title          Header for local card memory allocator
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks for local card memory.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _SRVSRV_PHYSMEM_LMA_H_
+#define _SRVSRV_PHYSMEM_LMA_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+/* services/server/include/ */
+#include "pmr.h"
+#include "pmr_impl.h"
+
+/*
+ * PhysmemNewLocalRamBackedPMR
+ *
+ * This function will create a PMR using the local card memory and is OS
+ * agnostic.
+ */
+PVRSRV_ERROR
+PhysmemNewLocalRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+							IMG_DEVMEM_SIZE_T uiSize,
+							IMG_DEVMEM_SIZE_T uiChunkSize,
+							IMG_UINT32 ui32NumPhysChunks,
+							IMG_UINT32 ui32NumVirtChunks,
+							IMG_UINT32 *pui32MappingTable,
+							IMG_UINT32 uiLog2PageSize,
+							PVRSRV_MEMALLOCFLAGS_T uiFlags,
+							const IMG_CHAR *pszAnnotation,
+							PMR **ppsPMRPtr);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*
+ * Define some helper list functions for the virtualization validation code
+ */
+
+void	InsertPidOSidsCoupling(IMG_PID pId, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt);
+void	RetrieveOSidsfromPidList(IMG_PID pId, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg, IMG_BOOL *pbOSidAxiProt);
+void	RemovePidOSidCoupling(IMG_PID pId);
+#endif
+
+#endif /* #ifndef _SRVSRV_PHYSMEM_LMA_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/physmem_osmem.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/physmem_osmem.h
new file mode 100644
index 0000000..6c7502c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/physmem_osmem.h
@@ -0,0 +1,125 @@
+/**************************************************************************/ /*!
+@File
+@Title		PMR implementation of OS derived physical memory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Part of the memory management.  This module is
+                responsible for the an implementation of the "PMR"
+                abstraction.  This interface is for the
+                PhysmemNewOSRamBackedPMR() "PMR Factory" which is
+                responsible for claiming chunks of memory (in
+                particular physically contiguous quanta) from the
+                Operating System.
+
+                As such, this interface will be implemented on a
+                Per-OS basis, in the "env" directory for that system.
+                A dummy implementation is available in
+                physmem_osmem_dummy.c for operating systems that
+                cannot, or do not wish to, offer this functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#ifndef _SRVSRV_PHYSMEM_OSMEM_H_
+#define _SRVSRV_PHYSMEM_OSMEM_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+/* services/server/include/ */
+#include "pmr.h"
+#include "pmr_impl.h"
+
+/*************************************************************************/ /*!
+@Function       PhysmemNewOSRamBackedPMR
+@Description    Rogue Services will call this function to allocate GPU device 
+                memory from the PMR factory supported by the OS DDK port. This 
+                factory typically obtains physical memory from the kernel/OS 
+                API that allocates memory from the default heap of shared system 
+                memory available on the platform. The allocated memory must be 
+                page-aligned and be a whole number of pages. 
+                After allocating the required memory, the implementation must 
+                then call PMRCreatePMR() to obtain the PMR structure that 
+                describes this allocation to the upper layers of the Services.
+                memory management sub-system. 
+                NB. Implementation of this function is mandatory. If shared 
+                system memory is not to be used in the OS port then the 
+                implementation must return PVRSRV_ERROR_NOT_SUPPORTED.
+
+@Input          psDevNode        the device node
+@Input          uiSize           the size of the allocation
+                                 (must be a multiple of page size)
+@Input          uiChunkSize      when sparse allocations are requested,
+                                 this is the allocated chunk size.
+                                 For regular allocations, this will be
+                                 the same as uiSize.
+                                 (must be a multiple of page size)
+@Input          ui32NumPhysChunks  when sparse allocations are requested,
+                                   this is the number of physical chunks
+                                   to be allocated.
+                                   For regular allocations, this will be 1.
+@Input          ui32NumVirtChunks  when sparse allocations are requested,
+                                   this is the number of virtual chunks
+                                   covering the sparse allocation.
+                                   For regular allocations, this will be 1.
+@Input          pui32MappingTable  when sparse allocations are requested,
+                                   this is the list of the indices of
+                                   each physically-backed virtual chunk
+                                   For regular allocations, this will
+                                   be NULL.
+@Input          uiLog2PageSize   the physical pagesize in log2(bytes).
+@Input          uiFlags          the allocation flags.
+@Input          pszAnnotation    string describing the PMR (for debug).
+                                 This should be passed into the function
+                                 PMRCreatePMR().
+@Output         ppsPMROut        pointer to the PMR created for the
+                                 new allocation
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+extern PVRSRV_ERROR
+PhysmemNewOSRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+                         IMG_DEVMEM_SIZE_T uiSize,
+						 IMG_DEVMEM_SIZE_T uiChunkSize,
+						 IMG_UINT32 ui32NumPhysChunks,
+						 IMG_UINT32 ui32NumVirtChunks,
+						 IMG_UINT32 *pui32MappingTable,
+                         IMG_UINT32 uiLog2PageSize,
+                         PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                         const IMG_CHAR *pszAnnotation,
+                         PMR **ppsPMROut);
+
+#endif /* #ifndef _SRVSRV_PHYSMEM_OSMEM_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/physmem_tdsecbuf.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/physmem_tdsecbuf.h
new file mode 100644
index 0000000..6d13802
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/physmem_tdsecbuf.h
@@ -0,0 +1,84 @@
+/**************************************************************************/ /*!
+@File
+@Title          Header for secure buffer PMR factory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks importing secure buffer
+                allocations.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _PHYSMEM_TDSECBUF_H_
+#define _PHYSMEM_TDSECBUF_H_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "pmr.h"
+
+/*
+ * PhysmemNewTDSecureBufPMR
+ *
+ * This function is used as part of the facility to provide secure buffer
+ * memory. A default implementation is provided but it can be replaced by
+ * the SoC implementor if necessary.
+ *
+ * Calling this function will create a PMR for a memory allocation made
+ * in "secure buffer memory". It will only be writable by a trusted
+ * entity and when the feature is enabled on the SoC the GPU will only
+ * be able to perform operations permitted by security rules.
+ */
+
+PVRSRV_ERROR PhysmemNewTDSecureBufPMR(CONNECTION_DATA *psConnection,
+                                      PVRSRV_DEVICE_NODE *psDevNode,
+                                      IMG_DEVMEM_SIZE_T uiSize,
+                                      PMR_LOG2ALIGN_T uiLog2Align,
+                                      PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                      PMR **ppsPMRPtr,
+                                      IMG_UINT64 *pui64SecBufHandle);
+
+PVRSRV_ERROR PhysmemImportSecBuf(CONNECTION_DATA *psConnection,
+                                 PVRSRV_DEVICE_NODE *psDevNode,
+                                 IMG_DEVMEM_SIZE_T uiSize,
+                                 IMG_UINT32 ui32Log2Align,
+                                 PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                 PMR **ppsPMRPtr,
+                                 IMG_UINT64 *pui64SecBufHandle);
+
+#endif /* _PHYSMEM_TDSECBUF_H_ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pmr.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pmr.h
new file mode 100644
index 0000000..9130075
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pmr.h
@@ -0,0 +1,1099 @@
+/**************************************************************************/ /*!
+@File
+@Title		Physmem (PMR) abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Part of the memory management.  This module is responsible for
+                the "PMR" abstraction.  A PMR (Physical Memory Resource)
+                represents some unit of physical memory which is
+                allocated/freed/mapped/unmapped as an indivisible unit
+                (higher software levels provide an abstraction above that
+                to deal with dividing this down into smaller manageable units).
+                Importantly, this module knows nothing of virtual memory, or
+                of MMUs etc., with one excuseable exception.  We have the
+                concept of a "page size", which really means nothing in
+                physical memory, but represents a "contiguity quantum" such
+                that the higher level modules which map this memory are able
+                to verify that it matches the needs of the page size for the
+                virtual realm into which it is being mapped.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _SRVSRV_PMR_H_
+#define _SRVSRV_PMR_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"			/* Required for export DEVMEM_EXPORTCOOKIE */
+
+/* services/include */
+#include "pdump.h"
+
+/* services/server/include/ */
+#include "pmr_impl.h"
+#include "physheap.h"
+#include "opaque_types.h"
+
+#define PMR_MAX_TRANSLATION_STACK_ALLOC				(32)
+
+typedef IMG_UINT64 PMR_BASE_T;
+typedef IMG_UINT64 PMR_SIZE_T;
+#define PMR_SIZE_FMTSPEC "0x%010llX"
+#define PMR_VALUE32_FMTSPEC "0x%08X"
+#define PMR_VALUE64_FMTSPEC "0x%016llX"
+typedef IMG_UINT32 PMR_LOG2ALIGN_T;
+typedef IMG_UINT64 PMR_PASSWORD_T;
+
+struct _PMR_MAPPING_TABLE_
+{
+	PMR_SIZE_T	uiChunkSize;			/*!< Size of a "chunk" */
+	IMG_UINT32 	ui32NumPhysChunks;		/*!< Number of physical chunks that are valid */
+	IMG_UINT32 	ui32NumVirtChunks;		/*!< Number of virtual chunks in the mapping */
+	/* Must be last */
+	IMG_UINT32 	aui32Translation[1];    /*!< Translation mapping for "logical" to physical */
+};
+
+#define TRANSLATION_INVALID 0xFFFFFFFFUL
+
+typedef struct _PMR_EXPORT_ PMR_EXPORT;
+
+typedef struct _PMR_PAGELIST_ PMR_PAGELIST;
+
+//typedef struct _PVRSRV_DEVICE_NODE_ *PPVRSRV_DEVICE_NODE;
+
+/*
+ * PMRCreatePMR
+ *
+ * Not to be called directly, only via implementations of PMR
+ * factories, e.g. in physmem_osmem.c, deviceclass.c, etc.
+ *
+ * Creates a PMR object, with callbacks and private data as per the
+ * FuncTab/PrivData args.
+ *
+ * Note that at creation time the PMR must set in stone the "logical
+ * size" and the "contiguity guarantee"
+ *
+ * Flags are also set at this time.  (T.B.D.  flags also immutable for
+ * the life of the PMR?)
+ *
+ * Logical size is the amount of Virtual space this allocation would
+ * take up when mapped.  Note that this does not have to be the same
+ * as the actual physical size of the memory.  For example, consider
+ * the sparsely allocated non-power-of-2 texture case.  In this
+ * instance, the "logical size" would be the virtual size of the
+ * rounded-up power-of-2 texture.  That some pages of physical memory
+ * may not exist does not affect the logical size calculation.
+ *
+ * The PMR must also supply the "contiguity guarantee" which is the
+ * finest granularity of alignment and size of physical pages that the
+ * PMR will provide after LockSysPhysAddresses is called.  Note that
+ * the calling code may choose to call PMRSysPhysAddr with a finer
+ * granularity than this, for example if it were to map into a device
+ * MMU with a smaller page size, and it's also OK for the PMR to
+ * supply physical memory in larger chunks than this.  But
+ * importantly, never the other way around.
+ *
+ * More precisely, the following inequality must be maintained
+ * whenever mappings and/or physical addresses exist:
+ *
+ *       (device MMU page size) <= 2**(uiLog2ContiguityGuarantee) <= (actual contiguity of physical memory)
+ *
+ * The function table will contain the following callbacks which may
+ * be overridden by the PMR implementation:
+ *
+ * pfnLockPhysAddresses
+ *
+ *      Called when someone locks requests that Physical pages are to
+ *      be locked down via the PMRLockSysPhysAddresses() API.  Note
+ *      that if physical pages are prefaulted at PMR creation time and
+ *      therefore static, it would not be necessary to override this
+ *      function, in which case NULL may be supplied.
+ *
+ * pfnUnlockPhysAddresses
+ *
+ *      The reverse of pfnLockPhysAddresses.  Note that this should be
+ *      NULL if and only if pfnLockPhysAddresses is NULL
+ *
+ * pfnSysPhysAddr
+ *
+ *      This function is mandatory.  This is the one which returns the
+ *      system physical address for a given offset into this PMR.  The
+ *      "lock" function will have been called, if overridden, before
+ *      this function, thus the implementation should not increase any
+ *      refcount when answering this call.  Refcounting, if necessary,
+ *      should be done in the lock/unlock calls.  Refcounting would
+ *      not be necessary in the prefaulted/static scenario, as the
+ *      pmr.c abstraction will handle the refcounting for the whole
+ *      PMR.
+ *
+ * pfnFinalize
+ *
+ *      Called when the PMR's refcount reaches zero and it gets
+ *      destroyed.  This allows the implementation to free up any
+ *      resource acquired during creation time.
+ *
+ */
+extern PVRSRV_ERROR
+PMRCreatePMR(PPVRSRV_DEVICE_NODE psDevNode,
+             PHYS_HEAP *psPhysHeap,
+             PMR_SIZE_T uiLogicalSize,
+             PMR_SIZE_T uiChunkSize,
+             IMG_UINT32 ui32NumPhysChunks,
+             IMG_UINT32 ui32NumVirtChunks,
+             IMG_UINT32 *pui32MappingTable,
+             PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee,
+             PMR_FLAGS_T uiFlags,
+             const IMG_CHAR *pszAnnotation,
+             const PMR_IMPL_FUNCTAB *psFuncTab,
+             PMR_IMPL_PRIVDATA pvPrivData,
+             PMR_IMPL_TYPE eType,
+             PMR **ppsPMRPtr,
+             IMG_UINT32 ui32PDumpFlags);
+
+/*
+ * PMRLockSysPhysAddresses()
+ *
+ * Calls the relevant callback to lock down the system physical addresses of the memory that makes up the whole PMR.
+ *
+ * Before this call, it is not valid to use any of the information
+ * getting APIs: PMR_Flags(), PMR_SysPhysAddr(),
+ * [ see note below about lock/unlock semantics ]
+ *
+ * The caller of this function does not have to care about how the PMR
+ * is implemented.  He only has to know that he is allowed access to
+ * the physical addresses _after_ calling this function and _until_
+ * calling PMRUnlockSysPhysAddresses().
+ *
+ *
+ * Notes to callback implementers (authors of PMR Factories):
+ *
+ * Some PMR implementations will be such that the physical memory
+ * exists for the lifetime of the PMR, with a static address, (and
+ * normally flags and symbolic address are static too) and so it is
+ * legal for a PMR implementation to not provide an implementation for
+ * the lock callback.
+ *
+ * Some PMR implementation may wish to page memory in from secondary
+ * storage on demand.  The lock/unlock callbacks _may_ be the place to
+ * do this.  (more likely, there would be a separate API for doing
+ * this, but this API provides a useful place to assert that it has
+ * been done)
+ */
+
+extern PVRSRV_ERROR
+PMRLockSysPhysAddresses(PMR *psPMR);
+
+extern PVRSRV_ERROR
+PMRLockSysPhysAddressesNested(PMR *psPMR,
+                        IMG_UINT32 ui32NestingLevel);
+
+/*
+ * PMRUnlockSysPhysAddresses()
+ *
+ * the reverse of PMRLockSysPhysAddresses()
+ */
+extern PVRSRV_ERROR
+PMRUnlockSysPhysAddresses(PMR *psPMR);
+
+extern PVRSRV_ERROR
+PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel);
+
+
+/**************************************************************************/ /*!
+@Function       PMRUnpinPMR
+@Description    This is the counterpart to PMRPinPMR(). It is meant to be
+                called before repinning an allocation.
+
+                For a detailed description see client API documentation.
+
+@Input          psPMR           The physical memory to unpin.
+
+@Input          bDevMapped      A flag that indicates if this PMR has been
+                                mapped to device virtual space.
+                                Needed to check if this PMR is allowed to be
+                                unpinned or not.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the memory is
+                                registered to be reclaimed. Error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR PMRUnpinPMR(PMR *psPMR, IMG_BOOL bDevMapped);
+
+/**************************************************************************/ /*!
+@Function       PMRPinPMR
+@Description    This is the counterpart to PMRUnpinPMR(). It is meant to be
+                called after unpinning an allocation.
+
+                For a detailed description see client API documentation.
+
+@Input          psPMR           The physical memory to pin.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the allocation content
+                                was successfully restored.
+
+                                PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+                                could not be restored and new physical memory
+                                was allocated.
+
+                                A different error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR PMRPinPMR(PMR *psPMR);
+
+
+/*
+ * PhysmemPMRExport()
+ *
+ * Given a PMR, creates a PMR "Export", which is a handle that
+ * provides sufficient data to be able to "import" this PMR elsewhere.
+ * The PMR Export is an object in its own right, whose existence
+ * implies a reference on the PMR, thus the PMR cannot be destroyed
+ * while the PMR Export exists.  The intention is that the PMR Export
+ * will be wrapped in the devicemem layer by a cross process handle,
+ * and some IPC by which to communicate the handle value and password
+ * to other processes.  The receiving process is able to unwrap this
+ * to gain access to the same PMR Export in this layer, and, via
+ * PhysmemPMRImport(), obtain a reference to the original PMR.
+ *
+ * The caller receives, along with the PMR Export object, information
+ * about the size and contiguity guarantee for the PMR, and also the
+ * PMRs secret password, in order to authenticate the subsequent
+ * import.
+ *
+ * N.B.  If you call PMRExportPMR() (and it succeeds), you are
+ * promising to later call PMRUnexportPMR()
+ */
+extern PVRSRV_ERROR
+PMRExportPMR(PMR *psPMR,
+             PMR_EXPORT **ppsPMRExport,
+             PMR_SIZE_T *puiSize,
+             PMR_LOG2ALIGN_T *puiLog2Contig,
+             PMR_PASSWORD_T *puiPassword);
+
+/*!
+*******************************************************************************
+
+ @Function	PMRMakeLocalImportHandle
+
+ @Description
+
+ Transform a general handle type into one that we are able to import.
+ Takes a PMR reference.
+
+ @Input   psPMR     The input PMR.
+ @Output  ppsPMR    The output PMR that is going to be transformed to the
+                    correct handle type.
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+extern PVRSRV_ERROR
+PMRMakeLocalImportHandle(PMR *psPMR,
+                         PMR **ppsPMR);
+
+/*!
+*******************************************************************************
+
+ @Function	PMRUnmakeLocalImportHandle
+
+ @Description
+
+ Take a PMR, destroy the handle and release a reference.
+ Counterpart to PMRMakeServerExportClientExport().
+
+ @Input   psPMR       PMR to destroy.
+                      Created by PMRMakeLocalImportHandle().
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+extern PVRSRV_ERROR
+PMRUnmakeLocalImportHandle(PMR *psPMR);
+
+/*
+ * PMRUnexporPMRt()
+ *
+ * The reverse of PMRExportPMR().  This causes the PMR to no
+ * longer be exported.  If the PMR has already been imported, the
+ * imported PMR reference will still be valid, but no further imports
+ * will be possible.
+ */
+extern PVRSRV_ERROR
+PMRUnexportPMR(PMR_EXPORT *psPMRExport);
+
+/*
+ * PMRImportPMR()
+ *
+ * Takes a PMR Export object, as obtained by PMRExportPMR(), and
+ * obtains a reference to the original PMR.
+ *
+ * The password must match, and is assumed to have been (by whatever
+ * means, IPC etc.) preserved intact from the former call to
+ * PMRExportPMR()
+ *
+ * The size and contiguity arguments are entirely irrelevant for the
+ * import, however they are verified in order to trap bugs.
+ *
+ * N.B.  If you call PhysmemPMRImport() (and it succeeds), you are
+ * promising to later call PhysmemPMRUnimport()
+ */
+extern PVRSRV_ERROR
+PMRImportPMR(PMR_EXPORT *psPMRExport,
+             PMR_PASSWORD_T uiPassword,
+             PMR_SIZE_T uiSize,
+             PMR_LOG2ALIGN_T uiLog2Contig,
+             PMR **ppsPMR);
+
+/*
+ * PMRUnimportPMR()
+ *
+ * releases the reference on the PMR as obtained by PMRImportPMR()
+ */
+extern PVRSRV_ERROR
+PMRUnimportPMR(PMR *psPMR);
+
+PVRSRV_ERROR
+PMRLocalImportPMR(PMR *psPMR,
+				  PMR **ppsPMR,
+				  IMG_DEVMEM_SIZE_T *puiSize,
+				  IMG_DEVMEM_ALIGN_T *puiAlign);
+
+/*
+ * Equivalent mapping functions when in kernel mode - TOOD: should
+ * unify this and the PMRAcquireMMapArgs API with a suitable
+ * abstraction
+ */
+extern PVRSRV_ERROR
+PMRAcquireKernelMappingData(PMR *psPMR,
+                            size_t uiLogicalOffset,
+                            size_t uiSize,
+                            void **ppvKernelAddressOut,
+                            size_t *puiLengthOut,
+                            IMG_HANDLE *phPrivOut);
+
+extern PVRSRV_ERROR
+PMRAcquireSparseKernelMappingData(PMR *psPMR,
+                                  size_t uiLogicalOffset,
+                                  size_t uiSize,
+                                  void **ppvKernelAddressOut,
+                                  size_t *puiLengthOut,
+                                  IMG_HANDLE *phPrivOut);
+
+extern PVRSRV_ERROR
+PMRReleaseKernelMappingData(PMR *psPMR,
+                            IMG_HANDLE hPriv);
+
+#if defined(INTEGRITY_OS)
+extern PVRSRV_ERROR
+PMRMapMemoryObject(PMR *psPMR,
+                  IMG_HANDLE *phMemObj,
+					void **pvClientAddr,
+                   IMG_HANDLE hPriv);
+extern PVRSRV_ERROR
+PMRUnmapMemoryObject(PMR *psPMR,
+                     IMG_HANDLE hPriv);
+#endif
+
+/*
+ * PMR_ReadBytes()
+ *
+ * calls into the PMR implementation to read up to uiBufSz bytes,
+ * returning the actual number read in *puiNumBytes
+ *
+ * this will read up to the end of the PMR, or the next symbolic name
+ * boundary, or until the requested number of bytes is read, whichever
+ * comes first
+ *
+ * In the case of sparse PMR's the caller doesn't know what offsets are
+ * valid and which ones aren't so we will just write 0 to invalid offsets
+ */
+extern PVRSRV_ERROR
+PMR_ReadBytes(PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT8 *pcBuffer,
+              size_t uiBufSz,
+              size_t *puiNumBytes);
+
+/*
+ * PMR_WriteBytes()
+ *
+ * calls into the PMR implementation to write up to uiBufSz bytes,
+ * returning the actual number read in *puiNumBytes
+ *
+ * this will write up to the end of the PMR, or the next symbolic name
+ * boundary, or until the requested number of bytes is written, whichever
+ * comes first
+ *
+ * In the case of sparse PMR's the caller doesn't know what offsets are
+ * valid and which ones aren't so we will just ignore data at invalid offsets
+ */
+extern PVRSRV_ERROR
+PMR_WriteBytes(PMR *psPMR,
+			   IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+               IMG_UINT8 *pcBuffer,
+               size_t uiBufSz,
+               size_t *puiNumBytes);
+
+/**************************************************************************/ /*!
+@Function       PMRMMapPMR
+@Description    Performs the necessary steps to map the PMR into a user process
+                address space. The caller does not need to call
+                PMRLockSysPhysAddresses before calling this function.
+
+@Input          psPMR           PMR to map.
+
+@Input          pOSMMapData     OS specific data needed to create a mapping.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success or an error otherwise.
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData);
+
+/*
+ * PMRRefPMR()
+ *
+ * Take a reference on the passed in PMR
+ */
+extern void
+PMRRefPMR(PMR *psPMR);
+
+/*
+ * PMRUnrefPMR()
+ *
+ * This undoes a call to any of the PhysmemNew* family of APIs
+ * (i.e. any PMR factory "constructor")
+ *
+ * This relinquishes a reference to the PMR, and, where the refcount
+ * reaches 0, causes the PMR to be destroyed (calling the finalizer
+ * callback on the PMR, if there is one)
+ */
+extern PVRSRV_ERROR
+PMRUnrefPMR(PMR *psPMR);
+
+/*
+ * PMRUnrefUnlockPMR()
+ *
+ * Same as above but also unlocks the PMR.
+ */
+extern PVRSRV_ERROR
+PMRUnrefUnlockPMR(PMR *psPMR);
+
+extern PPVRSRV_DEVICE_NODE
+PMR_DeviceNode(const PMR *psPMR);
+
+/*
+ * PMRIsPMRLive()
+ *
+ * This function returns true if the PMR is in use and false otherwise.
+ * This function is not thread safe and hence the caller
+ * needs to ensure the thread safety by explicitly taking
+ * the lock on the PMR or through other means */
+IMG_BOOL  PMRIsPMRLive(PMR *psPMR);
+
+/*
+ * PMR_Flags()
+ *
+ * Flags are static and guaranteed for the life of the PMR.  Thus this
+ * function is idempotent and acquire/release semantics is not
+ * required.
+ *
+ * Returns the flags as specified on the PMR.  The flags are to be
+ * interpreted as mapping permissions
+ */
+extern PMR_FLAGS_T
+PMR_Flags(const PMR *psPMR);
+
+extern IMG_BOOL
+PMR_IsSparse(const PMR *psPMR);
+
+
+
+extern PVRSRV_ERROR
+PMR_LogicalSize(const PMR *psPMR,
+				IMG_DEVMEM_SIZE_T *puiLogicalSize);
+
+extern PHYS_HEAP *
+PMR_PhysHeap(const PMR *psPMR);
+
+extern PMR_MAPPING_TABLE *
+PMR_GetMappigTable(const PMR *psPMR);
+
+extern IMG_UINT32
+PMR_GetLog2Contiguity(const PMR *psPMR);
+/*
+ * PMR_IsOffsetValid()
+ *
+ * Returns if an address offset inside a PMR has a valid
+ * physical backing.
+ */
+extern PVRSRV_ERROR
+PMR_IsOffsetValid(const PMR *psPMR,
+				IMG_UINT32 ui32Log2PageSize,
+				IMG_UINT32 ui32NumOfPages,
+				IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+				IMG_BOOL *pbValid);
+
+extern PMR_IMPL_TYPE
+PMR_GetType(const PMR *psPMR);
+
+/*
+ * PMR_SysPhysAddr()
+ *
+ * A note regarding Lock/Unlock semantics
+ * ======================================
+ *
+ * PMR_SysPhysAddr may only be called after PMRLockSysPhysAddresses()
+ * has been called.  The data returned may be used only until
+ * PMRUnlockSysPhysAddresses() is called after which time the licence
+ * to use the data is revoked and the information may be invalid.
+ *
+ * Given an offset, this function returns the device physical address of the
+ * corresponding page in the PMR.  It may be called multiple times
+ * until the address of all relevant pages has been determined.
+ *
+ * If caller only wants one physical address it is sufficient to pass in:
+ * ui32Log2PageSize==0 and ui32NumOfPages==1
+ */
+extern PVRSRV_ERROR
+PMR_DevPhysAddr(const PMR *psPMR,
+                IMG_UINT32 ui32Log2PageSize,
+                IMG_UINT32 ui32NumOfPages,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_DEV_PHYADDR *psDevAddr,
+                IMG_BOOL *pbValid);
+
+/*
+ * PMR_CpuPhysAddr()
+ *
+ * See note above about Lock/Unlock semantics.
+ *
+ * Given an offset, this function returns the CPU physical address of the
+ * corresponding page in the PMR.  It may be called multiple times
+ * until the address of all relevant pages has been determined.
+ *
+ */
+extern PVRSRV_ERROR
+PMR_CpuPhysAddr(const PMR *psPMR,
+                IMG_UINT32 ui32Log2PageSize,
+                IMG_UINT32 ui32NumOfPages,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_CPU_PHYADDR *psCpuAddrPtr,
+                IMG_BOOL *pbValid);
+
+PVRSRV_ERROR
+PMRGetUID(PMR *psPMR,
+          IMG_UINT64 *pui64UID);
+/*
+ * PMR_ChangeSparseMem()
+ *
+ * See note above about Lock/Unlock semantics.
+ *
+ * This function alters the memory map of the given PMR in device space by adding/deleting the pages
+ * as requested.
+ *
+ */
+PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR,
+                                 IMG_UINT32 ui32AllocPageCount,
+                                 IMG_UINT32 *pai32AllocIndices,
+                                 IMG_UINT32 ui32FreePageCount,
+                                 IMG_UINT32 *pai32FreeIndices,
+                                 IMG_UINT32	uiFlags);
+
+/*
+ * PMR_ChangeSparseMemCPUMap()
+ *
+ * See note above about Lock/Unlock semantics.
+ *
+ * This function alters the memory map of the given PMR in CPU space by adding/deleting the pages
+ * as requested.
+ *
+ */
+PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR,
+                                       IMG_UINT64 sCpuVAddrBase,
+                                       IMG_UINT32 ui32AllocPageCount,
+                                       IMG_UINT32 *pai32AllocIndices,
+                                       IMG_UINT32 ui32FreePageCount,
+                                       IMG_UINT32 *pai32FreeIndices);
+
+#if defined(PDUMP)
+
+extern void
+PDumpPMRMallocPMR(PMR *psPMR,
+                  IMG_DEVMEM_SIZE_T uiSize,
+                  IMG_DEVMEM_ALIGN_T uiBlockSize,
+                  IMG_UINT32 ui32ChunkSize,
+                  IMG_UINT32 ui32NumPhysChunks,
+                  IMG_UINT32 ui32NumVirtChunks,
+                  IMG_UINT32 *puiMappingTable,
+                  IMG_UINT32 uiLog2Contiguity,
+                  IMG_BOOL bInitialise,
+                  IMG_UINT32 ui32InitValue,
+                  IMG_HANDLE *phPDumpAllocInfoPtr,
+                  IMG_UINT32 ui32PDumpFlags);
+
+extern void
+PDumpPMRFreePMR(PMR *psPMR,
+                IMG_DEVMEM_SIZE_T uiSize,
+                IMG_DEVMEM_ALIGN_T uiBlockSize,
+                IMG_UINT32 uiLog2Contiguity,
+                IMG_HANDLE hPDumpAllocationInfoHandle);
+
+extern void
+PDumpPMRChangeSparsePMR(PMR *psPMR,
+                        IMG_UINT32 uiBlockSize,
+                        IMG_UINT32 ui32AllocPageCount,
+                        IMG_UINT32 *pai32AllocIndices,
+                        IMG_UINT32 ui32FreePageCount,
+                        IMG_UINT32 *pai32FreeIndices,
+                        IMG_BOOL bInitialise,
+                        IMG_UINT32 ui32InitValue,
+                        IMG_HANDLE *phPDumpAllocInfoOut);
+/*
+ * PMR_PDumpSymbolicAddr()
+ *
+ * Given an offset, returns the pdump memspace name and symbolic
+ * address of the corresponding page in the PMR.
+ *
+ * Note that PDump memspace names and symbolic addresses are static
+ * and valid for the lifetime of the PMR, therefore we don't require
+ * acquire/release semantics here.
+ *
+ * Note that it is expected that the pdump "mapping" code will call
+ * this function multiple times as each page is mapped in turn
+ *
+ * Note that NextSymName is the offset from the base of the PMR to the
+ * next pdump symbolic address (or the end of the PMR if the PMR only
+ * had one PDUMPMALLOC
+ */
+extern PVRSRV_ERROR
+PMR_PDumpSymbolicAddr(const PMR *psPMR,
+                      IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                      IMG_UINT32 ui32NamespaceNameLen,
+                      IMG_CHAR *pszNamespaceName,
+                      IMG_UINT32 ui32SymbolicAddrLen,
+                      IMG_CHAR *pszSymbolicAddr,
+                      IMG_DEVMEM_OFFSET_T *puiNewOffset,
+		      IMG_DEVMEM_OFFSET_T *puiNextSymName
+                      );
+
+/*
+ * PMRPDumpLoadMemValue32()
+ *
+ * writes the current contents of a dword in PMR memory to the pdump
+ * script stream. Useful for patching a buffer by simply editing the
+ * script output file in ASCII plain text.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpLoadMemValue32(PMR *psPMR,
+			         IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT32 ui32Value,
+                     PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpCopyMem32
+ *
+ * Adds in the pdump script stream a copy of a dword in one PMR memory location
+ * to another PMR memory location.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpCopyMem32(PMR *psDstPMR,
+                  IMG_DEVMEM_OFFSET_T uiDstLogicalOffset,
+                  PMR *psSrcPMR,
+                  IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset,
+                  const IMG_CHAR *pszTmpVar,
+                  PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpLoadMemValue64()
+ *
+ * writes the current contents of a dword in PMR memory to the pdump
+ * script stream. Useful for patching a buffer by simply editing the
+ * script output file in ASCII plain text.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpLoadMemValue64(PMR *psPMR,
+			         IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT64 ui64Value,
+                     PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpCopyMem64
+ *
+ * Adds in the pdump script stream a copy of a quadword in one PMR memory location
+ * to another PMR memory location.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpCopyMem64(PMR *psDstPMR,
+                  IMG_DEVMEM_OFFSET_T uiDstLogicalOffset,
+                  PMR *psSrcPMR,
+                  IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset,
+                  const IMG_CHAR *pszTmpVar,
+                  PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpLoadMem()
+ *
+ * writes the current contents of the PMR memory to the pdump PRM
+ * stream, and emits some PDump code to the script stream to LDB said
+ * bytes from said file. If bZero is IMG_TRUE then the PDump zero page
+ * is used as the source for the LDB.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpLoadMem(PMR *psPMR,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_DEVMEM_SIZE_T uiSize,
+                PDUMP_FLAGS_T uiPDumpFlags,
+                IMG_BOOL bZero);
+
+/*
+ * PMRPDumpSaveToFile()
+ *
+ * emits some PDump that does an SAB (save bytes) using the PDump
+ * symbolic address of the PMR.  Note that this is generally not the
+ * preferred way to dump the buffer contents.  There is an equivalent
+ * function in devicemem_server.h which also emits SAB but using the
+ * virtual address, which is the "right" way to dump the buffer
+ * contents to a file.  This function exists just to aid testing by
+ * providing a means to dump the PMR directly by symbolic address
+ * also.
+ */
+extern PVRSRV_ERROR
+PMRPDumpSaveToFile(const PMR *psPMR,
+                   IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   IMG_UINT32 uiArraySize,
+                   const IMG_CHAR *pszFilename,
+                   IMG_UINT32 uiFileOffset);
+#else	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpPMRMallocPMR)
+#endif
+static INLINE void
+PDumpPMRMallocPMR(PMR *psPMR,
+                  IMG_DEVMEM_SIZE_T uiSize,
+                  IMG_DEVMEM_ALIGN_T uiBlockSize,
+                  IMG_UINT32 ui32NumPhysChunks,
+                  IMG_UINT32 ui32NumVirtChunks,
+                  IMG_UINT32 *puiMappingTable,
+                  IMG_UINT32 uiLog2Contiguity,
+                  IMG_BOOL bInitialise,
+                  IMG_UINT32 ui32InitValue,
+                  IMG_HANDLE *phPDumpAllocInfoPtr,
+                  IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiBlockSize);
+	PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+	PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks);
+	PVR_UNREFERENCED_PARAMETER(puiMappingTable);
+	PVR_UNREFERENCED_PARAMETER(uiLog2Contiguity);
+	PVR_UNREFERENCED_PARAMETER(bInitialise);
+	PVR_UNREFERENCED_PARAMETER(ui32InitValue);
+	PVR_UNREFERENCED_PARAMETER(phPDumpAllocInfoPtr);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpPMRFreePMR)
+#endif
+static INLINE void
+PDumpPMRFreePMR(PMR *psPMR,
+                IMG_DEVMEM_SIZE_T uiSize,
+                IMG_DEVMEM_ALIGN_T uiBlockSize,
+                IMG_UINT32 uiLog2Contiguity,
+                IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiBlockSize);
+	PVR_UNREFERENCED_PARAMETER(uiLog2Contiguity);
+	PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpPMRChangeSparsePMR)
+#endif
+static INLINE void
+PDumpPMRChangeSparsePMR(PMR *psPMR,
+                        IMG_UINT32 uiBlockSize,
+                        IMG_UINT32 ui32AllocPageCount,
+                        IMG_UINT32 *pai32AllocIndices,
+                        IMG_UINT32 ui32FreePageCount,
+                        IMG_UINT32 *pai32FreeIndices,
+                        IMG_BOOL bInitialise,
+                        IMG_UINT32 ui32InitValue,
+                        IMG_HANDLE *phPDumpAllocInfoOut)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiBlockSize);
+	PVR_UNREFERENCED_PARAMETER(ui32AllocPageCount);
+	PVR_UNREFERENCED_PARAMETER(pai32AllocIndices);
+	PVR_UNREFERENCED_PARAMETER(ui32FreePageCount);
+	PVR_UNREFERENCED_PARAMETER(pai32FreeIndices);
+	PVR_UNREFERENCED_PARAMETER(bInitialise);
+	PVR_UNREFERENCED_PARAMETER(ui32InitValue);
+	PVR_UNREFERENCED_PARAMETER(phPDumpAllocInfoOut);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMR_PDumpSymbolicAddr)
+#endif
+static INLINE PVRSRV_ERROR
+PMR_PDumpSymbolicAddr(const PMR *psPMR,
+                      IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                      IMG_UINT32 ui32NamespaceNameLen,
+                      IMG_CHAR *pszNamespaceName,
+                      IMG_UINT32 ui32SymbolicAddrLen,
+                      IMG_CHAR *pszSymbolicAddr,
+                      IMG_DEVMEM_OFFSET_T *puiNewOffset,
+                      IMG_DEVMEM_OFFSET_T *puiNextSymName)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32NamespaceNameLen);
+	PVR_UNREFERENCED_PARAMETER(pszNamespaceName);
+	PVR_UNREFERENCED_PARAMETER(ui32SymbolicAddrLen);
+	PVR_UNREFERENCED_PARAMETER(pszSymbolicAddr);
+	PVR_UNREFERENCED_PARAMETER(puiNewOffset);
+	PVR_UNREFERENCED_PARAMETER(puiNextSymName);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpLoadMemValue)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpLoadMemValue32(PMR *psPMR,
+			         IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT32 ui32Value,
+                     PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpLoadMemValue)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpLoadMemValue64(PMR *psPMR,
+			         IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT64 ui64Value,
+                     PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+	PVR_UNREFERENCED_PARAMETER(ui64Value);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpLoadMem)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpLoadMem(PMR *psPMR,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_DEVMEM_SIZE_T uiSize,
+                PDUMP_FLAGS_T uiPDumpFlags,
+                IMG_BOOL bZero)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+	PVR_UNREFERENCED_PARAMETER(bZero);
+	return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpSaveToFile)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpSaveToFile(const PMR *psPMR,
+                   IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   IMG_UINT32 uiArraySize,
+                   const IMG_CHAR *pszFilename,
+                   IMG_UINT32 uiFileOffset)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiArraySize);
+	PVR_UNREFERENCED_PARAMETER(pszFilename);
+	PVR_UNREFERENCED_PARAMETER(uiFileOffset);
+	return PVRSRV_OK;
+}
+
+#endif	/* PDUMP */
+
+/* This function returns the private data that a pmr subtype
+   squirrelled in here. We use the function table pointer as
+   "authorization" that this function is being called by the pmr
+   subtype implementation.  We can assume (assert) that.  It would be
+   a bug in the implementation of the pmr subtype if this assertion
+   ever fails. */
+extern void *
+PMRGetPrivateData(const PMR *psPMR,
+                  const PMR_IMPL_FUNCTAB *psFuncTab);
+
+extern PVRSRV_ERROR
+PMRZeroingPMR(PMR *psPMR,
+				IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize);
+
+PVRSRV_ERROR
+PMRDumpPageList(PMR *psReferencePMR,
+					IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize);
+
+extern PVRSRV_ERROR
+PMRWritePMPageList(/* Target PMR, offset, and length */
+                   PMR *psPageListPMR,
+                   IMG_DEVMEM_OFFSET_T uiTableOffset,
+                   IMG_DEVMEM_SIZE_T  uiTableLength,
+                   /* Referenced PMR, and "page" granularity */
+                   PMR *psReferencePMR,
+                   IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize,
+                   PMR_PAGELIST **ppsPageList);
+
+/* Doesn't actually erase the page list - just releases the appropriate refcounts */
+extern PVRSRV_ERROR // should be void, surely
+PMRUnwritePMPageList(PMR_PAGELIST *psPageList);
+
+#if defined(PDUMP)
+extern PVRSRV_ERROR
+PMRPDumpPol32(const PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT32 ui32Value,
+              IMG_UINT32 ui32Mask,
+              PDUMP_POLL_OPERATOR eOperator,
+              PDUMP_FLAGS_T uiFlags);
+
+extern PVRSRV_ERROR
+PMRPDumpCBP(const PMR *psPMR,
+            IMG_DEVMEM_OFFSET_T uiReadOffset,
+            IMG_DEVMEM_OFFSET_T uiWriteOffset,
+            IMG_DEVMEM_SIZE_T uiPacketSize,
+            IMG_DEVMEM_SIZE_T uiBufferSize);
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpPol32)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpPol32(const PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT32 ui32Value,
+              IMG_UINT32 ui32Mask,
+              PDUMP_POLL_OPERATOR eOperator,
+              PDUMP_FLAGS_T uiFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(uiFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpCBP)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpCBP(const PMR *psPMR,
+            IMG_DEVMEM_OFFSET_T uiReadOffset,
+            IMG_DEVMEM_OFFSET_T uiWriteOffset,
+            IMG_DEVMEM_SIZE_T uiPacketSize,
+            IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiReadOffset);
+	PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+	PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+	PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+	return PVRSRV_OK;
+}
+#endif
+
+extern PPVRSRV_DEVICE_NODE PMRGetExportDeviceNode(PMR_EXPORT *psExportPMR);
+
+/*
+ * PMRInit()
+ *
+ * To be called once and only once to initialise the internal data in
+ * the PMR module (mutexes and such)
+ *
+ * Not for general use.  Only PVRSRVInit(); should be calling this.
+ */
+extern PVRSRV_ERROR
+PMRInit(void);
+
+/*
+ * PMRDeInit()
+ *
+ * To be called once and only once to deinitialise the internal data in
+ * the PMR module (mutexes and such) and for debug checks
+ *
+ * Not for general use.  Only PVRSRVDeInit(); should be calling this.
+ */
+extern PVRSRV_ERROR
+PMRDeInit(void);
+
+#if defined(PVR_RI_DEBUG)
+extern PVRSRV_ERROR
+PMRStoreRIHandle(PMR *psPMR,
+				 void *hRIHandle);
+#endif
+
+int  PMRRefCount(const PMR *psPMR);
+void PMRSetPath(PMR *psPMR);
+
+#endif /* #ifdef _SRVSRV_PMR_H_ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pmr_impl.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pmr_impl.h
new file mode 100644
index 0000000..fad4762
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pmr_impl.h
@@ -0,0 +1,525 @@
+/**************************************************************************/ /*!
+@File
+@Title          Implementation Callbacks for Physmem (PMR) abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management.  This file is for definitions
+                that are private to the world of PMRs, but that need to be
+                shared between pmr.c itself and the modules that implement the
+                callbacks for the PMR.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _SRVSRV_PMR_IMPL_H_
+#define _SRVSRV_PMR_IMPL_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+typedef struct _PMR_ PMR;
+/* stuff that per-flavour callbacks need to share with pmr.c */
+typedef void *PMR_IMPL_PRIVDATA;
+
+typedef PVRSRV_MEMALLOCFLAGS_T PMR_FLAGS_T;
+typedef struct _PMR_MAPPING_TABLE_ PMR_MAPPING_TABLE;
+typedef void *PMR_MMAP_DATA;
+
+/**
+ *  Which PMR factory has created this PMR?
+ */
+typedef enum _PMR_IMPL_TYPE_
+{
+	PMR_TYPE_NONE = 0,
+	PMR_TYPE_OSMEM,
+	PMR_TYPE_LMA,
+	PMR_TYPE_DMABUF,
+	PMR_TYPE_EXTMEM,
+	PMR_TYPE_DC,
+	PMR_TYPE_TDFWCODE,
+	PMR_TYPE_TDSECBUF
+} PMR_IMPL_TYPE;
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_LOCK_PHYS_ADDRESSES_FN
+
+@Description    Called to lock down the physical addresses for all pages
+                allocated for a PMR.
+                The default implementation is to simply increment a
+                lock-count for debugging purposes.
+                If overridden, the PFN_LOCK_PHYS_ADDRESSES_FN function will
+                be called when someone first requires a physical address,
+                and the PFN_UNLOCK_PHYS_ADDRESSES_FN counterpart will be
+                called when the last such reference is released.
+                The PMR implementation may assume that physical addresses
+                will have been "locked" in this manner before any call is
+                made to the pfnDevPhysAddr() callback
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+
+@Return         PVRSRV_OK if the operation was successful, an error code
+                otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_LOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_UNLOCK_PHYS_ADDRESSES_FN
+
+@Description    Called to release the lock taken on the physical addresses
+                for all pages allocated for a PMR.
+                The default implementation is to simply decrement a
+                lock-count for debugging purposes.
+                If overridden, the PFN_UNLOCK_PHYS_ADDRESSES_FN will be
+                called when the last reference taken on the PMR is
+                released.
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+
+@Return         PVRSRV_OK if the operation was successful, an error code
+                otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_UNLOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_DEV_PHYS_ADDR_FN
+
+@Description    Called to obtain one or more physical addresses for given
+                offsets within a PMR.
+
+                The PFN_LOCK_PHYS_ADDRESSES_FN callback (if overridden) is
+                guaranteed to have been called prior to calling the
+                PFN_DEV_PHYS_ADDR_FN callback and the caller promises not to
+                rely on the physical address thus obtained after the
+                PFN_UNLOCK_PHYS_ADDRESSES_FN callback is called.
+
+   Implementation of this callback is mandatory.
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+@Input          ui32Log2PageSize          The log2 page size.
+@Input          ui32NumOfAddr             The number of addresses to be
+                                          returned
+@Input          puiOffset                 The offset from the start of the
+                                          PMR (in bytes) for which the
+                                          physical address is required.
+                                          Where multiple addresses are
+                                          requested, this will contain a
+                                          list of offsets.
+@Output         pbValid                   List of boolean flags indicating
+                                          which addresses in the returned
+                                          list (psDevAddrPtr) are valid
+                                          (for sparse allocations, not all
+                                          pages may have a physical backing)
+@Output         psDevAddrPtr              Returned list of physical addresses
+
+@Return         PVRSRV_OK if the operation was successful, an error code
+                otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_DEV_PHYS_ADDR_FN)(PMR_IMPL_PRIVDATA pvPriv,
+                      IMG_UINT32 ui32Log2PageSize,
+                      IMG_UINT32 ui32NumOfAddr,
+                      IMG_DEVMEM_OFFSET_T *puiOffset,
+                      IMG_BOOL *pbValid,
+                      IMG_DEV_PHYADDR *psDevAddrPtr);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN
+
+@Description    Called to obtain a kernel-accessible address (mapped to a
+                virtual address if required) for the PMR for use internally
+                in Services.
+
+    Implementation of this function for the (default) PMR factory providing
+    OS-allocations is mandatory (the driver will expect to be able to call
+    this function for OS-provided allocations).
+    For other PMR factories, implementation of this function is only necessary
+    where an MMU mapping is required for the Kernel to be able to access the
+    allocated memory.
+    If no mapping is needed, this function can remain unimplemented and the
+    pfn may be set to NULL.
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+@Input          uiOffset                  Offset from the beginning of
+                                          the PMR at which mapping is to
+                                          start
+@Input          uiSize                    Size of mapping (in bytes)
+@Output         ppvKernelAddressOut       Mapped kernel address
+@Output         phHandleOut	              Returned handle of the new mapping
+@Input          ulFlags                   Mapping flags
+
+@Return         PVRSRV_OK if the mapping was successful, an error code
+                otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN)(PMR_IMPL_PRIVDATA pvPriv,
+                      size_t uiOffset,
+                      size_t uiSize,
+                      void **ppvKernelAddressOut,
+                      IMG_HANDLE *phHandleOut,
+                      PMR_FLAGS_T ulFlags);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_RELEASE_KERNEL_MAPPING_DATA_FN
+
+@Description    Called to release a mapped kernel virtual address
+
+   Implementation of this callback is mandatory if PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN
+   is provided for the PMR factory, otherwise this function can remain unimplemented
+   and the pfn may be set to NULL.
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+@Input          hHandle                   Handle of the mapping to be
+                                          released
+
+@Return         None
+*/
+/*****************************************************************************/
+typedef void (*PFN_RELEASE_KERNEL_MAPPING_DATA_FN)(PMR_IMPL_PRIVDATA pvPriv,
+              IMG_HANDLE hHandle);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_READ_BYTES_FN
+
+@Description    Called to read bytes from an unmapped allocation
+
+   Implementation of this callback is optional -
+   where it is not provided, the driver will use PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN
+   to map the entire PMR (if an MMU mapping is required for the Kernel to be
+   able to access the allocated memory).
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+@Input          uiOffset                  Offset from the beginning of
+                                          the PMR at which to begin
+                                          reading
+@Output         pcBuffer                  Buffer in which to return the
+                                          read data
+@Input          uiBufSz                   Number of bytes to be read
+@Output         puiNumBytes               Number of bytes actually read
+                                          (may be less than uiBufSz)
+
+@Return         PVRSRV_OK if the read was successful, an error code
+                otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_READ_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv,
+                      IMG_DEVMEM_OFFSET_T uiOffset,
+                      IMG_UINT8 *pcBuffer,
+                      size_t uiBufSz,
+                      size_t *puiNumBytes);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_WRITE_BYTES_FN
+
+@Description    Called to write bytes into an unmapped allocation
+
+   Implementation of this callback is optional -
+   where it is not provided, the driver will use PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN
+   to map the entire PMR (if an MMU mapping is required for the Kernel to be
+   able to access the allocated memory).
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+@Input          uiOffset                  Offset from the beginning of
+                                          the PMR at which to begin
+                                          writing
+@Input          pcBuffer                  Buffer containing the data to be
+                                          written
+@Input          uiBufSz                   Number of bytes to be written
+@Output         puiNumBytes               Number of bytes actually written
+                                          (may be less than uiBufSz)
+
+@Return         PVRSRV_OK if the write was successful, an error code
+                otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_WRITE_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv,
+                      IMG_DEVMEM_OFFSET_T uiOffset,
+                      IMG_UINT8 *pcBuffer,
+                      size_t uiBufSz,
+                      size_t *puiNumBytes);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_UNPIN_MEM_FN
+
+@Description    Called to unpin an allocation.
+                Once unpinned, the pages backing the allocation may be
+                re-used by the Operating System for another purpose.
+                When the pages are required again, they may be re-pinned
+                (by calling PFN_PIN_MEM_FN). The driver will try to return
+                same pages as before. The caller will be told if the
+                content of these returned pages has been modified or if
+                the pages returned are not the original pages.
+
+   Implementation of this callback is optional.
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+
+@Return         PVRSRV_OK if the unpin was successful, an error code
+                otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_UNPIN_MEM_FN)(PMR_IMPL_PRIVDATA pPriv);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_PIN_MEM_FN
+
+@Description    Called to pin a previously unpinned allocation.
+                The driver will try to return same pages as were previously
+                assigned to the allocation. The caller will be told if the
+                content of these returned pages has been modified or if
+                the pages returned are not the original pages.
+
+   Implementation of this callback is optional.
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+
+@Input          psMappingTable            Mapping table, which describes how
+                                          virtual 'chunks' are to be mapped to
+                                          physical 'chunks' for the allocation.
+
+@Return         PVRSRV_OK if the original pages were returned unmodified.
+                PVRSRV_ERROR_PMR_NEW_MEMORY if the memory returned was modified
+                or different pages were returned.
+                Another PVRSRV_ERROR code on failure.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_PIN_MEM_FN)(PMR_IMPL_PRIVDATA pPriv,
+                      PMR_MAPPING_TABLE *psMappingTable);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_CHANGE_SPARSE_MEM_FN
+
+@Description    Called to modify the physical backing for a given sparse
+                allocation.
+                The caller provides a list of the pages within the sparse
+                allocation which should be backed with a physical allocation
+                and a list of the pages which do not require backing.
+
+                Implementation of this callback is mandatory.
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+@Input          psPMR                     The PMR of the sparse allocation
+                                          to be modified
+@Input          ui32AllocPageCount        The number of pages specified in
+                                          pai32AllocIndices
+@Input          pai32AllocIndices         The list of pages in the sparse
+                                          allocation that should be backed
+                                          with a physical allocation. Pages
+                                          are referenced by their index
+                                          within the sparse allocation
+                                          (e.g. in a 10 page allocation, pages
+                                          are denoted by indices 0 to 9)
+@Input          ui32FreePageCount         The number of pages specified in
+                                          pai32FreeIndices
+@Input          pai32FreeIndices          The list of pages in the sparse
+                                          allocation that do not require
+                                          a physical allocation.
+@Input          ui32Flags                 Allocation flags
+
+@Return         PVRSRV_OK if the sparse allocation physical backing was updated
+                successfully, an error code otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_FN)(PMR_IMPL_PRIVDATA pPriv,
+                      const PMR *psPMR,
+                      IMG_UINT32 ui32AllocPageCount,
+                      IMG_UINT32 *pai32AllocIndices,
+                      IMG_UINT32 ui32FreePageCount,
+                      IMG_UINT32 *pai32FreeIndices,
+                      IMG_UINT32 uiFlags);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN
+
+@Description    Called to modify which pages are mapped for a given sparse
+                allocation.
+                The caller provides a list of the pages within the sparse
+                allocation which should be given a CPU mapping and a list
+                of the pages which do not require a CPU mapping.
+
+   Implementation of this callback is mandatory.
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+@Input          psPMR                     The PMR of the sparse allocation
+                                          to be modified
+@Input          sCpuVAddrBase             The virtual base address of the
+                                          sparse allocation
+@Input          ui32AllocPageCount        The number of pages specified in
+                                          pai32AllocIndices
+@Input          pai32AllocIndices         The list of pages in the sparse
+                                          allocation that should be given
+                                          a CPU mapping. Pages are referenced
+                                          by their index within the sparse
+                                          allocation (e.g. in a 10 page
+                                          allocation, pages are denoted by
+                                          indices 0 to 9)
+@Input          ui32FreePageCount         The number of pages specified in
+                                          pai32FreeIndices
+@Input          pai32FreeIndices          The list of pages in the sparse
+                                          allocation that do not require a CPU
+                                          mapping.
+
+@Return         PVRSRV_OK if the page mappings were updated successfully, an
+                error code otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN)(PMR_IMPL_PRIVDATA pPriv,
+                      const PMR *psPMR,
+                      IMG_UINT64 sCpuVAddrBase,
+                      IMG_UINT32 ui32AllocPageCount,
+                      IMG_UINT32 *pai32AllocIndices,
+                      IMG_UINT32 ui32FreePageCount,
+                      IMG_UINT32 *pai32FreeIndices);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_MMAP_FN
+
+@Description    Called to map pages in the specified PMR.
+
+   Implementation of this callback is optional.
+   Where it is provided, it will be used in place of OSMMapPMRGeneric().
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+@Input          psPMR                     The PMR of the allocation to be
+                                          mapped
+@Input          pMMapData                 OS-specific data to describe how
+                                          mapping should be performed
+
+@Return         PVRSRV_OK if the mapping was successful, an error code
+                otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_MMAP_FN)(PMR_IMPL_PRIVDATA pPriv,
+                                    PMR *psPMR,
+                                    PMR_MMAP_DATA pMMapData);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_FINALIZE_FN
+
+@Description    Called to destroy the PMR.
+                This callback will be called only when all references to
+                the PMR have been dropped.
+                The PMR was created via a call to PhysmemNewRamBackedPMR()
+                and is destroyed via this callback.
+
+   Implementation of this callback is mandatory.
+
+@Input          pvPriv                    Private data (which was generated
+                                          by the PMR factory when PMR was
+                                          created)
+
+@Return         PVRSRV_OK if the PMR destruction was successful, an error
+                code otherwise.
+                Currently PVRSRV_ERROR_PMR_STILL_REFERENCED is the only
+                error returned from physmem_dmabuf.c layer and on this
+                error, destroying of the PMR is aborted without disturbing
+                the PMR state.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_FINALIZE_FN)(PMR_IMPL_PRIVDATA pvPriv);
+
+#if 1
+struct _PMR_IMPL_FUNCTAB_ {
+#else
+typedef struct _PMR_IMPL_FUNCTAB_ {
+#endif
+    PFN_LOCK_PHYS_ADDRESSES_FN pfnLockPhysAddresses;
+    PFN_UNLOCK_PHYS_ADDRESSES_FN pfnUnlockPhysAddresses;
+
+    PFN_DEV_PHYS_ADDR_FN pfnDevPhysAddr;
+
+    PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN pfnAcquireKernelMappingData;
+    PFN_RELEASE_KERNEL_MAPPING_DATA_FN pfnReleaseKernelMappingData;
+
+#if defined (INTEGRITY_OS)
+    /*
+     * MapMemoryObject()/UnmapMemoryObject()
+     *
+     * called to map/unmap memory objects in Integrity OS
+     */
+
+    PVRSRV_ERROR (*pfnMapMemoryObject)(PMR_IMPL_PRIVDATA pvPriv,
+   										IMG_HANDLE *phMemObj,
+										void **pvClientAddr);
+    PVRSRV_ERROR (*pfnUnmapMemoryObject)(PMR_IMPL_PRIVDATA pvPriv);
+
+#if defined(USING_HYPERVISOR)
+    IMG_HANDLE (*pfnGetPmr)(PMR_IMPL_PRIVDATA pvPriv, size_t ulOffset);
+#endif
+#endif
+
+    PFN_READ_BYTES_FN pfnReadBytes;
+    PFN_WRITE_BYTES_FN pfnWriteBytes;
+
+    PFN_UNPIN_MEM_FN pfnUnpinMem;
+    PFN_PIN_MEM_FN pfnPinMem;
+
+    PFN_CHANGE_SPARSE_MEM_FN pfnChangeSparseMem;
+    PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN pfnChangeSparseMemCPUMap;
+
+    PFN_MMAP_FN pfnMMap;
+
+    PFN_FINALIZE_FN pfnFinalize;
+} ;
+typedef struct _PMR_IMPL_FUNCTAB_ PMR_IMPL_FUNCTAB;
+
+
+#endif /* of #ifndef _SRVSRV_PMR_IMPL_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pmr_os.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pmr_os.h
new file mode 100644
index 0000000..0dfbd49
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pmr_os.h
@@ -0,0 +1,62 @@
+/*************************************************************************/ /*!
+@File
+@Title          OS PMR functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS specific PMR functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PMR_OS_H__)
+#define __PMR_OS_H__
+
+#include "pmr_impl.h"
+
+/*************************************************************************/ /*!
+@Function       OSMMapPMRGeneric
+@Description    Implements a generic PMR mapping function, which is used
+                to CPU map a PMR where the PMR does not have a mapping
+                function defined by the creating PMR factory.
+@Input          psPMR               the PMR to be mapped
+@Output         pOSMMapData         pointer to any private data
+                                    needed by the generic mapping function
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData);
+
+#endif /* !defined(__PMR_OS_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/power.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/power.h
new file mode 100644
index 0000000..cc582e29
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/power.h
@@ -0,0 +1,133 @@
+/*************************************************************************/ /*!
+@File
+@Title          Power Management Functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main APIs for power management functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef POWER_H
+#define POWER_H
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_device.h"
+#include "pvrsrv_error.h"
+#include "servicesext.h"
+#include "opaque_types.h"
+
+/*!
+ *****************************************************************************
+ *	Power management
+ *****************************************************************************/
+
+typedef struct _PVRSRV_POWER_DEV_TAG_ PVRSRV_POWER_DEV;
+
+typedef IMG_BOOL (*PFN_SYS_DEV_IS_DEFAULT_STATE_OFF)(PVRSRV_POWER_DEV *psPowerDevice);
+
+
+IMG_IMPORT PVRSRV_ERROR PVRSRVPowerLock(PCPVRSRV_DEVICE_NODE psDeviceNode);
+IMG_IMPORT void PVRSRVForcedPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode);
+IMG_IMPORT void PVRSRVPowerUnlock(PCPVRSRV_DEVICE_NODE psDeviceNode);
+
+IMG_IMPORT IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(PPVRSRV_DEVICE_NODE	psDeviceNode,
+										 PVRSRV_DEV_POWER_STATE	eNewPowerState,
+										 IMG_BOOL				bForced);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode,
+											 PVRSRV_SYS_POWER_STATE ePVRState);
+
+PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(PCPVRSRV_DEVICE_NODE psDeviceNode,
+					PVRSRV_DEV_POWER_STATE eNewPowerState);
+
+/* Type PFN_DC_REGISTER_POWER */
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE		psDeviceNode,
+									   PFN_PRE_POWER				pfnDevicePrePower,
+									   PFN_POST_POWER				pfnDevicePostPower,
+									   PFN_SYS_DEV_PRE_POWER		pfnSystemPrePower,
+									   PFN_SYS_DEV_POST_POWER		pfnSystemPostPower,
+									   PFN_PRE_CLOCKSPEED_CHANGE	pfnPreClockSpeedChange,
+									   PFN_POST_CLOCKSPEED_CHANGE	pfnPostClockSpeedChange,
+									   PFN_FORCED_IDLE_REQUEST		pfnForcedIdleRequest,
+									   PFN_FORCED_IDLE_CANCEL_REQUEST	pfnForcedIdleCancelRequest,
+									   PFN_DUST_COUNT_REQUEST	pfnDustCountRequest,
+									   IMG_HANDLE					hDevCookie,
+									   PVRSRV_DEV_POWER_STATE		eCurrentPowerState,
+									   PVRSRV_DEV_POWER_STATE		eDefaultPowerState);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVRemovePowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVGetDevicePowerState(PCPVRSRV_DEVICE_NODE psDeviceNode,
+									   PPVRSRV_DEV_POWER_STATE pePowerState);
+
+IMG_IMPORT
+IMG_BOOL PVRSRVIsDevicePowered(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+											 IMG_BOOL	bIdleDevice,
+											 void	*pvInfo);
+
+IMG_IMPORT
+void PVRSRVDevicePostClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+									  IMG_BOOL		bIdleDevice,
+									  void		*pvInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode,
+					PFN_SYS_DEV_IS_DEFAULT_STATE_OFF	pfnCheckIdleReq,
+					IMG_BOOL				bDeviceOffPermitted);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+PVRSRV_ERROR PVRSRVDeviceDustCountChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+						IMG_UINT32	ui32DustCount);
+
+
+#endif /* POWER_H */
+
+/******************************************************************************
+ End of file (power.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/process_stats.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/process_stats.h
new file mode 100644
index 0000000..9d1aba1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/process_stats.h
@@ -0,0 +1,207 @@
+/*************************************************************************/ /*!
+@File
+@Title          Functions for creating and reading proc filesystem entries.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PROCESS_STATS_H__
+#define __PROCESS_STATS_H__
+
+#include <powervr/mem_types.h>
+
+#include "pvrsrv_error.h"
+#include "cache_ops.h"
+
+/*
+ *  The publishing of Process Stats is controlled by the
+ *  PVRSRV_ENABLE_PROCESS_STATS build option. The recording of all Memory
+ *  allocations is controlled by the PVRSRV_ENABLE_MEMORY_STATS build option.
+ * 
+ *  Note: There will be a performance degradation with memory allocation
+ *        recording enabled!
+ */
+
+
+/*
+ *  Memory types which can be tracked...
+ */
+typedef enum {
+    PVRSRV_MEM_ALLOC_TYPE_KMALLOC,				/* memory allocated by kmalloc() */
+    PVRSRV_MEM_ALLOC_TYPE_VMALLOC,				/* memory allocated by vmalloc() */
+    PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,	/* pages allocated from UMA to hold page table information */
+    PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA,			/* ALLOC_PAGES_PT_UMA mapped to kernel address space */
+    PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,	/* pages allocated from LMA to hold page table information */
+    PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA,		/* ALLOC_PAGES_PT_LMA mapped to kernel address space */
+    PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,		/* pages allocated from LMA */
+    PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,		/* pages allocated from UMA */
+    PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,	/* mapped UMA/LMA pages  */
+    PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES,		/* pages in the page pool */
+
+	/* Must be the last enum...*/
+    PVRSRV_MEM_ALLOC_TYPE_COUNT
+} PVRSRV_MEM_ALLOC_TYPE;
+
+
+/*
+ * Functions for managing the processes recorded...
+ */
+PVRSRV_ERROR  PVRSRVStatsInitialise(void);
+
+void  PVRSRVStatsDestroy(void);
+
+PVRSRV_ERROR  PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats);
+
+void  PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats);
+
+#define MAX_POWER_STAT_ENTRIES		51
+
+/*
+ * Functions for recording the statistics...
+ */
+void  PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+								   void *pvCpuVAddr,
+								   IMG_CPU_PHYADDR sCpuPAddr,
+								   size_t uiBytes,
+								   void *pvPrivateData);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+void  _PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+									void *pvCpuVAddr,
+									IMG_CPU_PHYADDR sCpuPAddr,
+									size_t uiBytes,
+									void *pvPrivateData,
+									void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine);
+#endif
+void  PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+									  IMG_UINT64 ui64Key);
+
+void PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+								 size_t uiBytes);
+/*
+ * Increases the memory stat for eAllocType. Tracks the allocation size value
+ * by inserting a value into a hash table with uiCpuVAddr as key.
+ * Pair with PVRSRVStatsDecrMemAllocStatAndUntrack().
+ */
+void PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+										 size_t uiBytes,
+										 IMG_UINT64 uiCpuVAddr);
+
+void PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+								 size_t uiBytes);
+
+void PVRSRVStatsDecrMemKAllocStat(size_t uiBytes,
+								  IMG_PID decrPID);
+
+/*
+ * Decrease the memory stat for eAllocType. Takes the allocation size value from the
+ * hash table with uiCpuVAddr as key. Pair with PVRSRVStatsIncrMemAllocStatAndTrack().
+ */
+void PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+        							IMG_UINT64 uiCpuVAddr);
+
+void
+PVRSRVStatsIncrMemAllocPoolStat(size_t uiBytes);
+
+void
+PVRSRVStatsDecrMemAllocPoolStat(size_t uiBytes);
+
+void  PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders,
+										  IMG_UINT32 ui32TotalNumOutOfMemory,
+										  IMG_UINT32 ui32TotalTAStores,
+										  IMG_UINT32 ui32Total3DStores,
+										  IMG_UINT32 ui32TotalSHStores,
+										  IMG_UINT32 ui32TotalCDMStores,
+										  IMG_PID owner);
+
+void  PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp,
+									 IMG_UINT32 ui32NumReqByFW,
+									 IMG_PID owner);
+
+void  PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp,
+									 IMG_UINT32 ui32NumGrowReqByFW,
+									 IMG_UINT32 ui32InitFLPages,
+									 IMG_UINT32 ui32NumHighPages,
+									 IMG_PID	ownerPid);
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+void  PVRSRVStatsUpdateCacheOpStats(PVRSRV_CACHE_OP uiCacheOp,
+									IMG_UINT32 ui32OpSeqNum,
+#if defined(PVR_RI_DEBUG)  && defined(DEBUG)
+									IMG_DEV_VIRTADDR sDevVAddr,
+									IMG_DEV_PHYADDR sDevPAddr,
+									IMG_UINT32 eFenceOpType,
+#endif
+									IMG_DEVMEM_SIZE_T uiOffset,
+									IMG_DEVMEM_SIZE_T uiSize,
+									IMG_UINT64 ui64ExecuteTimeMs,
+									IMG_BOOL bRangeBasedFlush,
+									IMG_BOOL bUserModeFlush,
+									IMG_BOOL bIsFence,
+									IMG_PID ownerPid);
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+/* Update pre/post power transition timing statistics */
+void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
+                              IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
+                              IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower);
+
+void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer);
+void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer);
+#else
+/* Update pre/post power transition timing statistics */
+static inline
+void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
+                              IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
+                              IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower) {}
+static inline
+void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer) {}
+
+static inline
+void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer) {}
+#endif
+
+void SetFirmwareStartTime(IMG_UINT32 ui32TimeStamp);
+
+void SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration);
+
+/* Functions used for calculating the memory usage statistics of a process */
+PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, IMG_UINT32 ui32ArrSize,
+                                       IMG_BOOL bAllProcessStats, IMG_UINT32 *ui32MemoryStats);
+
+#endif /* __PROCESS_STATS_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvr_notifier.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvr_notifier.h
new file mode 100644
index 0000000..93bb9e7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvr_notifier.h
@@ -0,0 +1,248 @@
+/**************************************************************************/ /*!
+@File
+@Title          PowerVR notifier interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__PVR_NOTIFIER_H__)
+#define __PVR_NOTIFIER_H__
+
+#include "img_types.h"
+#include "pvr_debug.h"
+
+
+/**************************************************************************/ /*!
+Command Complete Notifier Interface
+*/ /***************************************************************************/
+
+typedef IMG_HANDLE PVRSRV_CMDCOMP_HANDLE;
+#ifndef _CMDCOMPNOTIFY_PFN_
+typedef void (*PFN_CMDCOMP_NOTIFY)(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle);
+#define _CMDCOMPNOTIFY_PFN_
+#endif
+
+/**************************************************************************/ /*!
+@Function       PVRSRVCmdCompleteInit
+@Description    Performs initialisation of the command complete notifier
+                interface.
+@Return         PVRSRV_ERROR         PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVCmdCompleteInit(void);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVCmdCompleteDeinit
+@Description    Performs cleanup for the command complete notifier interface.
+@Return         PVRSRV_ERROR         PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+void
+PVRSRVCmdCompleteDeinit(void);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRegisterCmdCompleteNotify
+@Description    Register a callback function that is called when some device
+                finishes some work, which is signalled via a call to
+                PVRSRVCheckStatus.
+@Output         phNotify             On success, points to command complete
+                                     notifier handle
+@Input          pfnCmdCompleteNotify Function callback
+@Input          hPrivData            Data to be passed back to the caller via
+                                     the callback function
+@Return         PVRSRV_ERROR         PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify,
+								PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify,
+								PVRSRV_CMDCOMP_HANDLE hPrivData);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVUnregisterCmdCompleteNotify
+@Description    Unregister a previously registered callback function.
+@Input          hNotify              Command complete notifier handle
+@Return         PVRSRV_ERROR         PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVCheckStatus
+@Description    Notify any registered command complete handlers that some work
+                has been finished (unless hCmdCompCallerHandle matches a
+                handler's hPrivData). Also signal the global event object.
+@Input          hCmdCompCallerHandle Used to prevent a handler from being
+                                     notified. A NULL value results in all
+									 handlers being notified.
+*/ /***************************************************************************/
+void
+PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle);
+
+
+/**************************************************************************/ /*!
+Debug Notifier Interface
+*/ /***************************************************************************/
+
+#define DEBUG_REQUEST_DC                0
+#define DEBUG_REQUEST_SERVERSYNC        1
+#define DEBUG_REQUEST_SYS               2
+#define DEBUG_REQUEST_ANDROIDSYNC       3
+#define DEBUG_REQUEST_LINUXFENCE        4
+#define DEBUG_REQUEST_SYNCCHECKPOINT    5
+#define DEBUG_REQUEST_HTB               6
+#define DEBUG_REQUEST_APPHINT           7
+#define DEBUG_REQUEST_FALLBACKSYNC      8
+
+#define DEBUG_REQUEST_VERBOSITY_LOW		0
+#define DEBUG_REQUEST_VERBOSITY_MEDIUM	1
+#define DEBUG_REQUEST_VERBOSITY_HIGH	2
+#define DEBUG_REQUEST_VERBOSITY_MAX		DEBUG_REQUEST_VERBOSITY_HIGH
+
+/*
+ * Macro used within debug dump functions to send output either to PVR_LOG or
+ * a custom function. The custom function should be stored as a function pointer
+ * in a local variable called 'pfnDumpDebugPrintf'. 'pvDumpDebugFile' is also
+ * required as a local variable to serve as a file identifier for the printf
+ * function if required.
+ */
+#define PVR_DUMPDEBUG_LOG(...)                                            \
+	do                                                                \
+	{                                                                 \
+		if (pfnDumpDebugPrintf)                                   \
+			pfnDumpDebugPrintf(pvDumpDebugFile, __VA_ARGS__); \
+		else                                                      \
+			PVR_LOG((__VA_ARGS__));                           \
+	} while(0)
+
+struct _PVRSRV_DEVICE_NODE_;
+
+typedef IMG_HANDLE PVRSRV_DBGREQ_HANDLE;
+#ifndef _DBGNOTIFY_PFNS_
+typedef void (DUMPDEBUG_PRINTF_FUNC)(void *pvDumpDebugFile,
+					const IMG_CHAR *pszFormat, ...);
+typedef void (*PFN_DBGREQ_NOTIFY)(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+					IMG_UINT32 ui32VerbLevel,
+					DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+					void *pvDumpDebugFile);
+#define _DBGNOTIFY_PFNS_
+#endif
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRegisterDbgTable
+@Description    Registers a debug requester table for the given device. The
+                order in which the debug requester IDs appear in the given
+                table determine the order in which a set of notifier callbacks
+                will be called. In other words, the requester ID that appears
+                first will have all of its associated debug notifier callbacks
+                called first. This will then be followed by all the callbacks
+                associated with the next requester ID in the table and so on.
+@Input          psDevNode     Device node with which to register requester table
+@Input          paui32Table   Array of requester IDs
+@Input          ui32Length    Number of elements in paui32Table
+@Return         PVRSRV_ERROR  PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRegisterDbgTable(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+					   IMG_UINT32 *paui32Table, IMG_UINT32 ui32Length);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVUnregisterDbgTable
+@Description    Unregisters a debug requester table.
+@Input          psDevNode     Device node for which the requester table should
+                              be unregistered
+@Return         void
+*/ /***************************************************************************/
+void
+PVRSRVUnregisterDbgTable(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRegisterDbgRequestNotify
+@Description    Register a callback function that is called when a debug request
+                is made via a call PVRSRVDebugRequest. There are a number of
+				verbosity levels ranging from DEBUG_REQUEST_VERBOSITY_LOW up to
+                DEBUG_REQUEST_VERBOSITY_MAX. The callback will be called once
+                for each level up to the highest level specified to
+                PVRSRVDebugRequest.
+@Output         phNotify             On success, points to debug notifier handle
+@Input          psDevNode            Device node for which the debug callback
+                                     should be registered
+@Input          pfnDbgRequestNotify  Function callback
+@Input          ui32RequesterID      Requester ID. This is used to determine
+                                     the order in which callbacks are called
+@Input          hDbgReqeustHandle    Data to be passed back to the caller via
+                                     the callback function
+@Return         PVRSRV_ERROR         PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRegisterDbgRequestNotify(IMG_HANDLE *phNotify,
+							   struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+							   PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+							   IMG_UINT32 ui32RequesterID,
+							   PVRSRV_DBGREQ_HANDLE hDbgReqeustHandle);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVUnregisterDbgRequestNotify
+@Description    Unregister a previously registered callback function.
+@Input          hNotify              Debug notifier handle.
+@Return         PVRSRV_ERROR         PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVUnregisterDbgRequestNotify(IMG_HANDLE hNotify);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVDebugRequest
+@Description    Notify any registered debug request handlers that a debug
+                request has been made and at what level.
+@Input          psDevNode           Device node for which the debug request has
+                                    been made
+@Input          ui32VerbLevel       The maximum verbosity level to dump
+@Input          pfnDumpDebugPrintf  Used to specify the print function that
+                                    should be used to dump any debug
+                                    information. If this argument is NULL then
+                                    PVR_LOG() will be used as the default print
+                                    function.
+@Input          pvDumpDebugFile     Optional file identifier to be passed to
+                                    the print function if required.
+@Return         void
+*/ /***************************************************************************/
+void
+PVRSRVDebugRequest(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+				   IMG_UINT32 ui32VerbLevel,
+				   DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				   void *pvDumpDebugFile);
+
+#endif /* !defined(__PVR_NOTIFIER_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvrsrv.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvrsrv.h
new file mode 100644
index 0000000..9fc1e59
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvrsrv.h
@@ -0,0 +1,548 @@
+/**************************************************************************/ /*!
+@File
+@Title          PowerVR services server header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef PVRSRV_H
+#define PVRSRV_H
+
+
+#include "connection_server.h"
+#include "device.h"
+#include "power.h"
+#include "sysinfo.h"
+#include "physheap.h"
+#include "cache_ops.h"
+#include "pvr_notifier.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "pvrsrv_pool.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+#include "dma_support.h"
+#include "vz_support.h"
+#include "vz_physheap.h"
+
+/*!
+ * For OSThreadDestroy(), which may require a retry
+ * Try for 100 ms to destroy an OS thread before failing
+ */
+#define OS_THREAD_DESTROY_TIMEOUT_US 100000ULL
+#define OS_THREAD_DESTROY_RETRY_COUNT 10
+
+typedef enum _VMM_CONF_PARAM_
+{
+	VMM_CONF_PRIO_OSID0 = 0,
+	VMM_CONF_PRIO_OSID1 = 1,
+	VMM_CONF_PRIO_OSID2 = 2,
+	VMM_CONF_PRIO_OSID3 = 3,
+	VMM_CONF_PRIO_OSID4 = 4,
+	VMM_CONF_PRIO_OSID5 = 5,
+	VMM_CONF_PRIO_OSID6 = 6,
+	VMM_CONF_PRIO_OSID7 = 7,
+	VMM_CONF_ISOL_THRES = 8,
+	VMM_CONF_HCS_DEADLINE = 9
+} VMM_CONF_PARAM;
+
+typedef struct _BUILD_INFO_
+{
+	IMG_UINT32	ui32BuildOptions;
+	IMG_UINT32	ui32BuildVersion;
+	IMG_UINT32	ui32BuildRevision;
+	IMG_UINT32	ui32BuildType;
+#define BUILD_TYPE_DEBUG	0
+#define BUILD_TYPE_RELEASE	1
+	/*The above fields are self explanatory */
+	/* B.V.N.C can be added later if required */
+} BUILD_INFO;
+
+typedef struct _DRIVER_INFO_
+{
+	BUILD_INFO	sUMBuildInfo;
+	BUILD_INFO	sKMBuildInfo;
+	IMG_UINT8	ui8UMSupportedArch;
+	IMG_UINT8	ui8KMBitArch;
+
+#define	BUILD_ARCH_64BIT			(1 << 0)
+#define	BUILD_ARCH_32BIT			(1 << 1)
+#define	BUILD_ARCH_BOTH		(BUILD_ARCH_32BIT | BUILD_ARCH_64BIT)
+	IMG_BOOL	bIsNoMatch;
+}DRIVER_INFO;
+
+typedef struct PVRSRV_DATA_TAG
+{
+	PVRSRV_DRIVER_MODE			eDriverMode;				/*!< Driver mode (i.e. native, host or guest) */
+	DRIVER_INFO					sDriverInfo;
+	IMG_UINT32					ui32RegisteredDevices;
+	PVRSRV_DEVICE_NODE			*psDeviceNodeList;			/*!< List head of device nodes */
+	PVRSRV_DEVICE_NODE			*psHostMemDeviceNode;		/*!< DeviceNode to be used for device independent
+	                                                             host based memory allocations where the DevMem
+	                                                             framework is to be used e.g. TL */
+	PVRSRV_SERVICES_STATE		eServicesState;				/*!< global driver state */
+
+	HASH_TABLE					*psProcessHandleBase_Table; /*!< Hash table with process handle bases */
+	POS_LOCK					hProcessHandleBase_Lock;	/*!< Lock for the process handle base table */
+
+	IMG_HANDLE					hGlobalEventObject;			/*!< OS Global Event Object */
+	IMG_UINT32					ui32GEOConsecutiveTimeouts;	/*!< OS Global Event Object Timeouts */
+
+	IMG_HANDLE					hCleanupThread;				/*!< Cleanup thread */
+	IMG_HANDLE					hCleanupEventObject;		/*!< Event object to drive cleanup thread */
+	POS_LOCK					hCleanupThreadWorkListLock;	/*!< Lock protecting the cleanup thread work list */
+	DLLIST_NODE					sCleanupThreadWorkList;		/*!< List of work for the cleanup thread */
+	IMG_PID						cleanupThreadPid;			/*!< Cleanup thread process id */
+
+	IMG_HANDLE					hDevicesWatchdogThread;		/*!< Devices watchdog thread */
+	IMG_HANDLE					hDevicesWatchdogEvObj;		/*! Event object to drive devices watchdog thread */
+	volatile IMG_UINT32			ui32DevicesWatchdogPwrTrans;/*! Number of off -> on power state transitions */
+	volatile IMG_UINT32			ui32DevicesWatchdogTimeout; /*! Timeout for the Devices watchdog Thread */
+#ifdef PVR_TESTING_UTILS
+	volatile IMG_UINT32			ui32DevicesWdWakeupCounter;	/* Need this for the unit tests. */
+#endif
+
+	IMG_HANDLE					hPvzConnection;				/*!< PVZ connection used for cross-VM hyper-calls */
+	POS_LOCK					hPvzConnectionLock;			/*!< Lock protecting PVZ connection */
+	IMG_BOOL					abVmOnline[RGXFW_NUM_OS];
+
+	IMG_BOOL					bUnload;					/*!< Driver unload is in progress */
+
+	IMG_HANDLE					hTLCtrlStream;				/*! Control plane for TL streams */
+
+	PVRSRV_POOL					*psBridgeBufferPool;			/*! Pool of bridge buffers */
+	IMG_HANDLE					hDriverThreadEventObject;		/*! Event object relating to multi-threading in the Server */
+	POS_LOCK					hDriverThreadLock;			/*! Lock to protect bDriverSuspended and iNumActiveDriverThreads */
+	IMG_BOOL					bDriverSuspended;			/*! if TRUE, the driver is suspended and new threads should not enter */
+	ATOMIC_T					iNumActiveDriverThreads;			/*! Number of threads active in the Server */
+
+	PMR							*psInfoPagePMR;				/*! Handle to exportable PMR of the information page. */
+	IMG_UINT32					*pui32InfoPage;				/*! CPU memory mapping for information page. */
+	DEVMEM_MEMDESC				*psInfoPageMemDesc;			/*! Memory descriptor of the information page. */
+	POS_LOCK					hInfoPageLock;				/*! Lock guarding access to information page. */
+} PVRSRV_DATA;
+
+typedef IMG_BOOL (*PFN_LISR)(void *pvData);
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVGetPVRSRVData
+
+ @Description	Get a pointer to the global data
+
+ @Return   PVRSRV_DATA *
+
+******************************************************************************/
+PVRSRV_DATA *PVRSRVGetPVRSRVData(void);
+
+/*!
+******************************************************************************************
+@Note   Kernel code must always query the driver mode using the PVRSRV_VZ_MODE_IS() macro
+		_only_ and PVRSRV_DATA->eDriverMode should not be read directly as the field also
+		overloads as driver OSID (i.e. not to be confused with hardware kick register OSID)
+		when running on non-VZ capable BVNC as the driver has to simulate OSID propagation
+		to the firmware in the absence of the hardware kick register propagating this OSID
+		on any non-VZ BVNC.
+******************************************************************************************/
+#define PVRSRV_VZ_MODE_IS(_expr)              (((((IMG_INT)_expr)>0)&&((IMG_INT)PVRSRVGetPVRSRVData()->eDriverMode>0)) ? \
+                                                   (IMG_TRUE) : ((_expr) == (PVRSRVGetPVRSRVData()->eDriverMode)))
+#define PVRSRV_VZ_RETN_IF_MODE(_expr)         do { if (  PVRSRV_VZ_MODE_IS(_expr)) { return; } } while(0)
+#define PVRSRV_VZ_RETN_IF_NOT_MODE(_expr)     do { if (! PVRSRV_VZ_MODE_IS(_expr)) { return; } } while(0)
+#define PVRSRV_VZ_RET_IF_MODE(_expr, _rc)     do { if (  PVRSRV_VZ_MODE_IS(_expr)) { return (_rc); } } while(0)
+#define PVRSRV_VZ_RET_IF_NOT_MODE(_expr, _rc) do { if (! PVRSRV_VZ_MODE_IS(_expr)) { return (_rc); } } while(0)
+#define PVRSRV_VZ_DRIVER_OSID                 (((IMG_INT)PVRSRVGetPVRSRVData()->eDriverMode) > (0) ? \
+												   ((IMG_UINT32)(PVRSRVGetPVRSRVData()->eDriverMode)) : (0))
+
+/*!
+************************************************************************************************
+@Note	The driver execution mode AppHint (i.e. PVRSRV_APPHINT_DRIVERMODE) can be an override or
+		non-override 32-bit value. An override value has the MSB bit set & a non-override value
+		has this MSB bit cleared. Excluding this MSB bit & interpreting the remaining 31-bit as
+		a signed 31-bit integer, the mode values are [-1 native <default>: 0 host : +1 guest ].
+************************************************************************************************/
+#define PVRSRV_VZ_APPHINT_MODE_IS_OVERRIDE(_expr)   ((IMG_UINT32)(_expr)&(IMG_UINT32)(1<<31))
+#define PVRSRV_VZ_APPHINT_MODE(_expr)				\
+	((((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF) == (IMG_UINT32)0x7FFFFFFF) ? DRIVER_MODE_NATIVE : \
+		!((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF) ? DRIVER_MODE_HOST : \
+			((IMG_UINT32)((IMG_UINT32)(_expr)&(IMG_UINT)0x7FFFFFFF)==(IMG_UINT32)0x1) ? DRIVER_MODE_GUEST : \
+				((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF))
+
+
+/*!
+******************************************************************************
+ @Function      PVRSRVSuspendDriver
+ @Description   This function will wait for any existing threads in the Server
+                to exit and then suspend the driver. New threads will not be
+                allowed to enter the Server until the driver is
+                unsuspended (see PVRSRVUnsuspendDriver)
+ @Return        PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVSuspendDriver(void);
+
+/*!
+******************************************************************************
+ @Function      PVRSRVUnsuspendDriver
+ @Description   This function will unsuspend the Server and allow any threads
+                waiting to enter the Server to continue.
+                unsuspended (see PVRSRVUnsuspendDriver)
+ @Return        PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVUnsuspendDriver(void);
+
+/*!
+******************************************************************************
+ @Function      PVRSRVDriverThreadEnter
+ @Description   Inform Services Server a new thread is entering the Server.
+                If the Server is currently suspended, this function will block
+                until the Server is unsuspended.
+ @Return        PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDriverThreadEnter(void);
+
+/*!
+******************************************************************************
+ @Function      PVRSRVDriverThreadExit
+ @Description   Inform Services Server a thread which previously entered the
+                Server (and called PVRSRVDriverThreadEnter) is now exiting.
+ @Return        PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDriverThreadExit(void);
+
+/*!
+******************************************************************************
+
+ @Function	LMA memory management API
+
+******************************************************************************/
+PVRSRV_ERROR LMA_PhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize,
+							PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr);
+
+void LMA_PhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle);
+
+PVRSRV_ERROR LMA_PhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+							size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+							void **pvPtr);
+
+void LMA_PhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+					void *pvPtr);
+
+PVRSRV_ERROR LMA_PhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode,
+                                     PG_HANDLE *psMemHandle,
+                                     IMG_UINT32 uiOffset,
+                                     IMG_UINT32 uiLength);
+
+
+/*!
+******************************************************************************
+ @Function	PVRSRVPollForValueKM
+
+ @Description
+ Polls for a value to match a masked read
+
+ @Input pui32LinMemAddr : CPU linear address to poll
+ @Input ui32Value : required value
+ @Input ui32Mask : Mask
+
+ @Return   PVRSRV_ERROR :
+******************************************************************************/
+IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVPollForValueKM(volatile IMG_UINT32	*pui32LinMemAddr,
+														  IMG_UINT32			ui32Value,
+														  IMG_UINT32			ui32Mask);
+
+/*!
+******************************************************************************
+ @Function	PVRSRVWaitForValueKM
+
+ @Description
+ Waits (using EventObjects) for a value to match a masked read
+
+ @Input pui32LinMemAddr			: CPU linear address to poll
+ @Input ui32Value				: required value
+ @Input ui32Mask				: Mask
+
+ @Return   PVRSRV_ERROR :
+******************************************************************************/
+IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKM(volatile IMG_UINT32	*pui32LinMemAddr,
+														IMG_UINT32			ui32Value,
+														IMG_UINT32			ui32Mask);
+
+/*!
+******************************************************************************
+ @Function	PVRSRVWaitForValueKMAndHoldBridgeLockKM
+
+ @Description
+ Waits without releasing bridge lock (using EventObjects) for a value
+ to match a masked read
+
+ @Input pui32LinMemAddr			: CPU linear address to poll
+ @Input ui32Value				: required value
+ @Input ui32Mask				: Mask
+
+ @Return   PVRSRV_ERROR :
+******************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKMAndHoldBridgeLockKM(volatile IMG_UINT32 *pui32LinMemAddr,
+                                                                  IMG_UINT32          ui32Value,
+                                                                  IMG_UINT32          ui32Mask);
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVSystemHasCacheSnooping
+
+ @Description	: Returns whether the system has cache snooping
+
+ @Return : IMG_TRUE if the system has cache snooping
+*****************************************************************************/
+IMG_BOOL PVRSRVSystemHasCacheSnooping(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVSystemSnoopingIsEmulated
+
+ @Description : Returns whether system cache snooping support is emulated
+
+ @Return : IMG_TRUE if the system cache snooping is emulated in software
+*****************************************************************************/
+IMG_BOOL PVRSRVSystemSnoopingIsEmulated(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVSystemSnoopingOfCPUCache
+
+ @Description	: Returns whether the system supports snooping of the CPU cache
+
+ @Return : IMG_TRUE if the system has CPU cache snooping
+*****************************************************************************/
+IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVSystemSnoopingOfDeviceCache
+
+ @Description	: Returns whether the system supports snooping of the device cache
+
+ @Return : IMG_TRUE if the system has device cache snooping
+*****************************************************************************/
+IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVSystemHasNonMappableLocalMemory
+
+ @Description	: Returns whether the device has non-mappable part of local memory
+
+ @Return : IMG_TRUE if the device has non-mappable part of local memory
+*****************************************************************************/
+IMG_BOOL PVRSRVSystemHasNonMappableLocalMemory(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVSystemWaitCycles
+
+ @Description	: Waits for at least ui32Cycles of the Device clk.
+
+*****************************************************************************/
+void PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles);
+
+PVRSRV_ERROR PVRSRVSystemInstallDeviceLISR(void *pvOSDevice,
+										   IMG_UINT32 ui32IRQ,
+										   const IMG_CHAR *pszName,
+										   PFN_LISR pfnLISR,
+										   void *pvData,
+										   IMG_HANDLE *phLISRData);
+
+PVRSRV_ERROR PVRSRVSystemUninstallDeviceLISR(IMG_HANDLE hLISRData);
+
+int PVRSRVGetDriverStatus(void);
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVIsBridgeEnabled
+
+ @Description	: Returns whether the given bridge group is enabled
+
+ @Return : IMG_TRUE if the given bridge group is enabled
+*****************************************************************************/
+static inline IMG_BOOL PVRSRVIsBridgeEnabled(IMG_HANDLE hServices, IMG_UINT32 ui32BridgeGroup)
+{
+	PVR_UNREFERENCED_PARAMETER(hServices);
+
+#if defined(SUPPORT_RGX)
+	if(ui32BridgeGroup >= PVRSRV_BRIDGE_RGX_FIRST)
+	{
+		return ((1U << (ui32BridgeGroup - PVRSRV_BRIDGE_RGX_FIRST)) &
+							gui32RGXBridges) != 0;
+	}
+	else
+#endif /* SUPPORT_RGX */
+	{
+		return ((1U << (ui32BridgeGroup - PVRSRV_BRIDGE_FIRST)) &
+							gui32PVRBridges) != 0;
+	}
+}
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVSystemBIFTilingHeapGetXStride
+
+ @Description	: return the default x-stride configuration for the given
+                  BIF tiling heap number
+
+ @Input psDevConfig: Pointer to a device config
+
+ @Input uiHeapNum: BIF tiling heap number, starting from 1
+
+ @Output puiXStride: pointer to x-stride output of the requested heap
+
+*****************************************************************************/
+PVRSRV_ERROR
+PVRSRVSystemBIFTilingHeapGetXStride(PVRSRV_DEVICE_CONFIG *psDevConfig,
+									IMG_UINT32 uiHeapNum,
+									IMG_UINT32 *puiXStride);
+
+/*!
+*****************************************************************************
+ @Function              : PVRSRVSystemBIFTilingGetConfig
+
+ @Description           : return the BIF tiling mode and number of BIF
+                          tiling heaps for the given device config
+
+ @Input psDevConfig     : Pointer to a device config
+
+ @Output peBifTilingMode: Pointer to a BIF tiling mode enum
+
+ @Output puiNumHeaps    : pointer to uint to hold number of heaps
+
+*****************************************************************************/
+PVRSRV_ERROR
+PVRSRVSystemBIFTilingGetConfig(PVRSRV_DEVICE_CONFIG  *psDevConfig,
+                               RGXFWIF_BIFTILINGMODE *peBifTilingMode,
+                               IMG_UINT32            *puiNumHeaps);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*!
+***********************************************************************************
+ @Function				: PopulateLMASubArenas
+
+ @Description			: Uses the Apphints passed by the client at initialization
+						  time to add bases and sizes in the various arenas in the
+						  LMA memory
+
+ @Input psDeviceNode	: Pointer to the device node struct containing all the
+						  arena information
+
+ @Input ui32OSidMin		: Single dimensional array containing the minimum values
+						  for each OSid area
+
+ @Input ui32OSidMax		: Single dimensional array containing the maximum values
+						  for each OSid area
+***********************************************************************************/
+
+void PopulateLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]);
+
+#if defined(EMULATOR)
+	void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState);
+	void SetTrustedDeviceAceEnabled(void);
+#endif
+
+#endif
+
+
+/*!
+******************************************************************************
+
+ @Function			PVRSRVVzDeviceCreate
+
+ @Description 		Performs additional device creation
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR PVRSRVVzDeviceCreate(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+******************************************************************************
+
+ @Function			PVRSRVVzDeviceDestroy
+
+ @Description 		Performs additional device destruction
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR PVRSRVVzDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+******************************************************************************
+
+ @Function			PVRSRVVzRegisterFirmwarePhysHeap
+
+ @Description 		Request to map a physical heap to kernel FW memory context
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR PVRSRVVzRegisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+											  IMG_DEV_PHYADDR sDevPAddr,
+											  IMG_UINT64 ui64DevPSize,
+											  IMG_UINT32 uiOSID);
+
+/*!
+******************************************************************************
+
+ @Function			PVRSRVVzUnregisterFirmwarePhysHeap
+
+ @Description 		Request to unmap a physical heap from kernel FW memory context
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR PVRSRVVzUnregisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+												IMG_UINT32 uiOSID);
+
+#endif /* PVRSRV_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvrsrv_apphint.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvrsrv_apphint.h
new file mode 100644
index 0000000..43eccef
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvrsrv_apphint.h
@@ -0,0 +1,66 @@
+/**************************************************************************/ /*!
+@File
+@Title          PowerVR AppHint generic interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__PVRSRV_APPHINT_H__)
+#define __PVRSRV_APPHINT_H__
+
+#if defined(LINUX)
+
+#include "km_apphint.h"
+#define PVRSRVAppHintDumpState() pvr_apphint_dump_state()
+#define PVRSRVAppHintRegisterHandlersUINT64(i,q,s,d,p) pvr_apphint_register_handlers_uint64(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersUINT32(i,q,s,d,p) pvr_apphint_register_handlers_uint32(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersBOOL(i,q,s,d,p) pvr_apphint_register_handlers_bool(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersSTRING(i,q,s,d,p) pvr_apphint_register_handlers_string(i,q,s,d,p)
+
+#else
+
+#define PVRSRVAppHintDumpState()
+#define PVRSRVAppHintRegisterHandlersUINT64(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersUINT32(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersBOOL(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersSTRING(i,q,s,d,p)
+
+#endif
+
+#endif /* !defined(__PVRSRV_APPHINT_H__) */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvrsrv_cleanup.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvrsrv_cleanup.h
new file mode 100644
index 0000000..fc0d432
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvrsrv_cleanup.h
@@ -0,0 +1,77 @@
+/**************************************************************************/ /*!
+@File
+@Title          PowerVR SrvKM cleanup thread deferred work interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef PVRSRV_CLEANUP_H
+#define PVRSRV_CLEANUP_H
+
+typedef PVRSRV_ERROR (*CLEANUP_THREAD_FN)(void *pvParam);
+
+/* typical number of times a caller should want the work to be retried in case
+ * of the callback function (pfnFree) returning an error.
+ * Callers to PVRSRVCleanupThreadAddWork should provide this value as the retry
+ * count (ui32RetryCount) unless there are special requirements.
+ * A value of 6000 corresponds to around 10 minutes.
+ */
+#define CLEANUP_THREAD_RETRY_COUNT_DEFAULT 6000
+
+typedef struct _PVRSRV_CLEANUP_THREAD_WORK_
+{
+	DLLIST_NODE sNode; /*!< list node to attach to the cleanup thread work list */
+	CLEANUP_THREAD_FN pfnFree; /*!< function to be called */
+	void *pvData; /*!< private data for pfnFree */
+	IMG_UINT32 ui32RetryCount; /*!< number of times the callback should be re-tried when it returns error */
+	IMG_BOOL bDependsOnHW;
+} PVRSRV_CLEANUP_THREAD_WORK;
+
+/*!
+******************************************************************************
+ @Function                PVRSRVCleanupThreadAddWork
+
+ @Description             Add a work item to be called from the cleanup thread
+
+ @Input psData          : The function pointer and private data for the callback
+
+ @Return                  None
+******************************************************************************/
+void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData);
+
+#endif /* PVRSRV_CLEANUP_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvrsrv_device.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvrsrv_device.h
new file mode 100644
index 0000000..9caafad
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvrsrv_device.h
@@ -0,0 +1,295 @@
+/**************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __PVRSRV_DEVICE_H__
+#define __PVRSRV_DEVICE_H__
+
+#include "img_types.h"
+#include "physheap.h"
+#include "pvrsrv_error.h"
+#include "rgx_fwif_km.h"
+#include "servicesext.h"
+
+#if defined(PVR_DVFS) || defined(SUPPORT_PDVFS)
+#include "pvr_dvfs.h"
+#endif
+
+typedef struct _PVRSRV_DEVICE_CONFIG_ PVRSRV_DEVICE_CONFIG;
+typedef enum _DRIVER_MODE_
+{
+/* Do not use these enumerations directly, to query the
+   current driver mode, use the PVRSRV_VZ_MODE_IS()
+   macro */
+	DRIVER_MODE_NATIVE	= -1,
+	DRIVER_MODE_HOST	=  0,
+	DRIVER_MODE_GUEST
+} PVRSRV_DRIVER_MODE;
+
+/*
+ * All the heaps from which regular device memory allocations can be made in
+ * terms of their locality to the respective device.
+ */
+typedef enum
+{
+	PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL = 0,
+	PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL = 1,
+	PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL = 2,
+	PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL = 3,
+	PVRSRV_DEVICE_PHYS_HEAP_LAST
+} PVRSRV_DEVICE_PHYS_HEAP;
+
+typedef enum
+{
+	PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_MAPPABLE = 0,
+	PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_NON_MAPPABLE = 1,
+	PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_LAST
+} PVRSRV_DEVICE_LOCAL_MEMORY_ARENA;
+
+typedef enum _PVRSRV_DEVICE_SNOOP_MODE_
+{
+	PVRSRV_DEVICE_SNOOP_NONE = 0,
+	PVRSRV_DEVICE_SNOOP_CPU_ONLY,
+	PVRSRV_DEVICE_SNOOP_DEVICE_ONLY,
+	PVRSRV_DEVICE_SNOOP_CROSS,
+	PVRSRV_DEVICE_SNOOP_EMULATED,
+} PVRSRV_DEVICE_SNOOP_MODE;
+
+typedef IMG_UINT32
+(*PFN_SYS_DEV_CLK_FREQ_GET)(IMG_HANDLE hSysData);
+
+typedef PVRSRV_ERROR
+(*PFN_SYS_DEV_PRE_POWER)(IMG_HANDLE hSysData,
+						 PVRSRV_DEV_POWER_STATE eNewPowerState,
+						 PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+						 IMG_BOOL bForced);
+
+typedef PVRSRV_ERROR
+(*PFN_SYS_DEV_POST_POWER)(IMG_HANDLE hSysData,
+						  PVRSRV_DEV_POWER_STATE eNewPowerState,
+						  PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+						  IMG_BOOL bForced);
+
+typedef void
+(*PFN_SYS_DEV_INTERRUPT_HANDLED)(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+typedef PVRSRV_ERROR
+(*PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE)(IMG_HANDLE hSysData,
+									IMG_UINT64 ui64MemSize);
+
+typedef void (*PFN_SYS_DEV_FEAT_DEP_INIT)(PVRSRV_DEVICE_CONFIG *, IMG_UINT64);
+
+typedef PVRSRV_DRIVER_MODE (*PFN_SYS_DRIVER_MODE)(void);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+
+#define PVRSRV_DEVICE_FW_CODE_REGION          (0)
+#define PVRSRV_DEVICE_FW_COREMEM_CODE_REGION  (1)
+
+typedef struct _PVRSRV_TD_FW_PARAMS_
+{
+	const void *pvFirmware;
+	IMG_UINT32 ui32FirmwareSize;
+	IMG_DEV_VIRTADDR sFWCodeDevVAddrBase;
+	IMG_DEV_VIRTADDR sFWDataDevVAddrBase;
+	RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr;
+	RGXFWIF_DEV_VIRTADDR sFWInitFWAddr;
+} PVRSRV_TD_FW_PARAMS;
+
+typedef PVRSRV_ERROR
+(*PFN_TD_SEND_FW_IMAGE)(IMG_HANDLE hSysData,
+						PVRSRV_TD_FW_PARAMS *psTDFWParams);
+
+typedef struct _PVRSRV_TD_POWER_PARAMS_
+{
+	IMG_DEV_PHYADDR sPCAddr; /* META only used param */
+
+	/* MIPS only used fields */
+	IMG_DEV_PHYADDR sGPURegAddr;
+	IMG_DEV_PHYADDR sBootRemapAddr;
+	IMG_DEV_PHYADDR sCodeRemapAddr;
+	IMG_DEV_PHYADDR sDataRemapAddr;
+} PVRSRV_TD_POWER_PARAMS;
+
+typedef PVRSRV_ERROR
+(*PFN_TD_SET_POWER_PARAMS)(IMG_HANDLE hSysData,
+						   PVRSRV_TD_POWER_PARAMS *psTDPowerParams);
+
+typedef PVRSRV_ERROR
+(*PFN_TD_RGXSTART)(IMG_HANDLE hSysData);
+
+typedef PVRSRV_ERROR
+(*PFN_TD_RGXSTOP)(IMG_HANDLE hSysData);
+
+typedef struct _PVRSRV_TD_SECBUF_PARAMS_
+{
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_DEVMEM_ALIGN_T uiAlign;
+	IMG_CPU_PHYADDR *psSecBufAddr;
+	IMG_UINT64 *pui64SecBufHandle;
+} PVRSRV_TD_SECBUF_PARAMS;
+
+typedef PVRSRV_ERROR
+(*PFN_TD_SECUREBUF_ALLOC)(IMG_HANDLE hSysData,
+						  PVRSRV_TD_SECBUF_PARAMS *psTDSecBufParams);
+
+typedef PVRSRV_ERROR
+(*PFN_TD_SECUREBUF_FREE)(IMG_HANDLE hSysData,
+						 IMG_UINT64 ui64SecBufHandle);
+#endif /* defined(SUPPORT_TRUSTED_DEVICE) */
+
+struct _PVRSRV_DEVICE_CONFIG_
+{
+	/*! OS device passed to SysDevInit (linux: 'struct device') */
+	void *pvOSDevice;
+
+	/*!
+	 *! Service representation of pvOSDevice. Should be set to NULL when the
+	 *! config is created in SysDevInit. Set by Services once a device node has
+	 *! been created for this config and unset before SysDevDeInit is called.
+	 */
+	struct _PVRSRV_DEVICE_NODE_ *psDevNode;
+
+	/*! Name of the device */
+	IMG_CHAR *pszName;
+
+	/*! Version of the device (optional) */
+	IMG_CHAR *pszVersion;
+
+	/*! Register bank address */
+	IMG_CPU_PHYADDR sRegsCpuPBase;
+	/*! Register bank size */
+	IMG_UINT32 ui32RegsSize;
+	/*! Device interrupt number */
+	IMG_UINT32 ui32IRQ;
+
+	PVRSRV_DEVICE_SNOOP_MODE eCacheSnoopingMode;
+
+	/*! Device specific data handle */
+	IMG_HANDLE hDevData;
+
+	/*! System specific data that gets passed into system callback functions. */
+	IMG_HANDLE hSysData;
+
+	IMG_BOOL bHasNonMappableLocalMemory;
+
+	PHYS_HEAP_CONFIG *pasPhysHeaps;
+	IMG_UINT32 ui32PhysHeapCount;
+
+	/*!
+	 *! ID of the Physical memory heap to use.
+	 *!
+	 *! The first entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL])
+	 *! will be used for allocations where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL
+	 *! flag is not set. Normally this will be the PhysHeapID of an LMA heap
+	 *! but the configuration could specify a UMA heap here (if desired).
+	 *!
+	 *! The second entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL])
+	 *! will be used for allocations where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL
+	 *! flag is set. Normally this will be the PhysHeapID of a UMA heap but
+	 *! the configuration could specify an LMA heap here (if desired).
+	 *!
+	 *! The third entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL])
+	 *! will be used for allocations where the PVRSRV_MEMALLOCFLAG_FW_LOCAL
+	 *! flag is set.
+	 *!
+	 *! The fourth entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL])
+	 *! will be used for allocations that are imported into the driver and
+	 *! are local to other devices, e.g. a display controller.
+	 *!
+	 *! In the event of there being only one Physical Heap, the configuration
+	 *! should specify the same heap details in all entries.
+	 */
+	IMG_UINT32 aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_LAST];
+
+	RGXFWIF_BIFTILINGMODE eBIFTilingMode;
+	IMG_UINT32 *pui32BIFTilingHeapConfigs;
+	IMG_UINT32 ui32BIFTilingHeapCount;
+
+	/*!
+	 *! Callbacks to change system device power state at the beginning and end
+	 *! of a power state change (optional).
+	 */
+	PFN_SYS_DEV_PRE_POWER pfnPrePowerState;
+	PFN_SYS_DEV_POST_POWER pfnPostPowerState;
+
+	/*! Callback to obtain the clock frequency from the device (optional). */
+	PFN_SYS_DEV_CLK_FREQ_GET pfnClockFreqGet;
+
+	/*!
+	 *! Callback to handle memory budgeting. Can be used to reject allocations
+	 *! over a certain size (optional).
+	 */
+	PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	/*!
+	 *! Callback to send FW image and FW boot time parameters to the trusted
+	 *! device.
+	 */
+	PFN_TD_SEND_FW_IMAGE pfnTDSendFWImage;
+
+	/*!
+	 *! Callback to send parameters needed in a power transition to the trusted
+	 *! device.
+	 */
+	PFN_TD_SET_POWER_PARAMS pfnTDSetPowerParams;
+
+	/*! Callbacks to ping the trusted device to securely run RGXStart/Stop() */
+	PFN_TD_RGXSTART pfnTDRGXStart;
+	PFN_TD_RGXSTOP pfnTDRGXStop;
+
+	/*! Callback to request allocation/freeing of secure buffers */
+	PFN_TD_SECUREBUF_ALLOC pfnTDSecureBufAlloc;
+	PFN_TD_SECUREBUF_FREE pfnTDSecureBufFree;
+#endif /* defined(SUPPORT_TRUSTED_DEVICE) */
+
+	/*! Function that does device feature specific system layer initialisation */
+	PFN_SYS_DEV_FEAT_DEP_INIT	pfnSysDevFeatureDepInit;
+
+	/*! Function returns system layer execution environment */
+	PFN_SYS_DRIVER_MODE			pfnSysDriverMode;
+
+#if defined(PVR_DVFS) || defined(SUPPORT_PDVFS)
+	PVRSRV_DVFS sDVFS;
+#endif
+};
+
+#endif /* __PVRSRV_DEVICE_H__*/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvrsrv_pool.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvrsrv_pool.h
new file mode 100644
index 0000000..2a620f0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvrsrv_pool.h
@@ -0,0 +1,69 @@
+/**************************************************************************/ /*!
+@File
+@Title          Services pool implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides a generic pool implementation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__PVRSRVPOOL_H__)
+#define __PVRSRVPOOL_H__
+
+typedef PVRSRV_ERROR (PVRSRV_POOL_ALLOC_FUNC)(void *pvPrivData, void **pvOut);
+typedef void (PVRSRV_POOL_FREE_FUNC)(void *pvPrivData, void *pvFreeData);
+
+typedef IMG_HANDLE PVRSRV_POOL_TOKEN;
+
+typedef struct _PVRSRV_POOL_ PVRSRV_POOL;
+
+PVRSRV_ERROR PVRSRVPoolCreate(PVRSRV_POOL_ALLOC_FUNC *pfnAlloc,
+					PVRSRV_POOL_FREE_FUNC *pfnFree,
+					IMG_UINT32 ui32MaxEntries,
+					const IMG_CHAR *pszName,
+					void *pvPrivData,
+					PVRSRV_POOL **ppsPool);
+
+void PVRSRVPoolDestroy(PVRSRV_POOL *psPool);
+
+PVRSRV_ERROR PVRSRVPoolGet(PVRSRV_POOL *psPool,
+						PVRSRV_POOL_TOKEN *hToken,
+						void **ppvDataOut);
+PVRSRV_ERROR PVRSRVPoolPut(PVRSRV_POOL *psPool,
+						PVRSRV_POOL_TOKEN hToken);
+
+#endif /* __PVRSRVPOOL_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvrsrv_sync_server.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvrsrv_sync_server.h
new file mode 100644
index 0000000..cc149e8
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/pvrsrv_sync_server.h
@@ -0,0 +1,39 @@
+#ifndef _PVRSRV_SYNC_SERVER_H_
+#define _PVRSRV_SYNC_SERVER_H_
+
+#include "img_types.h"
+#include "pvrsrv_sync_km.h"
+
+#define SYNC_SW_TIMELINE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH
+#define SYNC_SW_FENCE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH
+
+/*****************************************************************************/
+/*                                                                           */
+/*                      SW TIMELINE SPECIFIC FUNCTIONS                       */
+/*                                                                           */
+/*****************************************************************************/
+
+PVRSRV_ERROR SyncSWTimelineFenceCreateKM(PVRSRV_TIMELINE iSWTimeline,
+                                        IMG_UINT32 ui32NextSyncPtVal,
+                                        const IMG_CHAR *pszFenceName,
+                                        PVRSRV_FENCE *piOutputFence);
+
+PVRSRV_ERROR SyncSWTimelineAdvanceKM(SYNC_TIMELINE_OBJ pvSWTimelineObj);
+
+PVRSRV_ERROR SyncSWTimelineReleaseKM(SYNC_TIMELINE_OBJ pvSWTimelineObj);
+
+PVRSRV_ERROR SyncSWTimelineFenceReleaseKM(SYNC_FENCE_OBJ pvSWFenceObj);
+
+PVRSRV_ERROR SyncSWTimelineFenceWaitKM(SYNC_FENCE_OBJ pvSWFenceObj,
+                                       IMG_UINT32 uiTimeout);
+
+PVRSRV_ERROR SyncSWGetTimelineObj(PVRSRV_TIMELINE iSWTimeline,
+                                  SYNC_TIMELINE_OBJ *ppvSWTimelineObj);
+
+PVRSRV_ERROR SyncSWGetFenceObj(PVRSRV_FENCE iSWFence,
+                               SYNC_FENCE_OBJ *ppvSWFenceObj);
+
+IMG_BOOL PVRSRVIsTimelineValidKM(PVRSRV_TIMELINE iTimeline);
+IMG_BOOL PVRSRVIsFenceValidKM(PVRSRV_FENCE iFence);
+
+#endif /* _PVRSRV_SYNC_SERVER_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/ri_server.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/ri_server.h
new file mode 100644
index 0000000..25db065
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/ri_server.h
@@ -0,0 +1,110 @@
+/*************************************************************************/ /*!
+@File			ri_server.h
+@Title          Resource Information abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Resource Information (RI) functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RI_SERVER_H_
+#define _RI_SERVER_H_
+
+#include <img_defs.h>
+#include <ri_typedefs.h>
+#include <pmr.h>
+#include <pvrsrv_error.h>
+
+PVRSRV_ERROR RIInitKM(void);
+void RIDeInitKM(void);
+
+PVRSRV_ERROR RIPMRPhysicalBackingKM(PMR *psPMR, IMG_BOOL bHasBacking);
+
+PVRSRV_ERROR RIWritePMREntryKM(PMR *psPMR,
+					   	   	   IMG_UINT32 ui32TextASize,
+					   	   	   const IMG_CHAR ai8TextA[RI_MAX_TEXT_LEN+1],
+					   	   	   IMG_UINT64 uiLogicalSize);
+
+PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR,
+					   	   	   	   IMG_UINT32 ui32TextBSize,
+					   	   	   	   const IMG_CHAR ai8TextB[RI_MAX_TEXT_LEN+1],
+					   	   	   	   IMG_UINT64 uiOffset,
+					   	   	   	   IMG_UINT64 uiSize,
+					   	   	   	   IMG_UINT64 uiBackedSize,
+					   	   	   	   IMG_BOOL bIsImport,
+					   	   	   	   IMG_BOOL bIsExportable,
+					   	   	   	   RI_HANDLE *phRIHandle);
+
+PVRSRV_ERROR RIWriteProcListEntryKM(IMG_UINT32 ui32TextBSize,
+                                    const IMG_CHAR *psz8TextB,
+                                    IMG_UINT64 ui64Size,
+                                    IMG_UINT64 uiBackedSize,
+                                    IMG_UINT64 ui64DevVAddr,
+                                    RI_HANDLE *phRIHandle);
+
+PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle,
+								   IMG_DEV_VIRTADDR sVAddr);
+
+PVRSRV_ERROR RIUpdateMEMDESCPinningKM(RI_HANDLE hRIHandle,
+								   IMG_BOOL bIsPinned);
+
+PVRSRV_ERROR RIUpdateMEMDESCBackingKM(RI_HANDLE hRIHandle,
+                                      IMG_INT32 iNumModified);
+
+PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle);
+PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle);
+
+PVRSRV_ERROR RIDeleteListKM(void);
+
+PVRSRV_ERROR RIDumpListKM(PMR *psPMR);
+
+PVRSRV_ERROR RIDumpAllKM(void);
+
+PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid);
+
+#if defined(DEBUG)
+PVRSRV_ERROR RIDumpProcessListKM(PMR *psPMR,
+								 IMG_PID pid,
+								 IMG_UINT64 ui64Offset,
+								 IMG_DEV_VIRTADDR *psDevVAddr);
+#endif
+
+IMG_BOOL RIGetListEntryKM(IMG_PID pid,
+						  IMG_HANDLE **ppHandle,
+						  IMG_CHAR **ppszEntryString);
+
+#endif /* #ifndef _RI_SERVER_H _*/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/srvcore.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/srvcore.h
new file mode 100644
index 0000000..f90d661
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/srvcore.h
@@ -0,0 +1,207 @@
+/**************************************************************************/ /*!
+@File
+@Title          PVR Bridge Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the PVR Bridge code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __BRIDGED_PVR_BRIDGE_H__
+#define __BRIDGED_PVR_BRIDGE_H__
+
+#include "lock_types.h"
+#include "connection_server.h"
+#include "pvr_debug.h"
+
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+
+PVRSRV_ERROR
+CopyFromUserWrapper(CONNECTION_DATA *psConnection,
+					IMG_UINT32 ui32DispatchTableEntry,
+					void *pvDest,
+					void *pvSrc,
+					IMG_UINT32 ui32Size);
+PVRSRV_ERROR
+CopyToUserWrapper(CONNECTION_DATA *psConnection, 
+				  IMG_UINT32 ui32DispatchTableEntry,
+				  void *pvDest,
+				  void *pvSrc,
+				  IMG_UINT32 ui32Size);
+
+IMG_INT
+DummyBW(IMG_UINT32 ui32DispatchTableEntry,
+		void *psBridgeIn,
+		void *psBridgeOut,
+		CONNECTION_DATA *psConnection);
+
+typedef IMG_INT (*BridgeWrapperFunction)(IMG_UINT32 ui32DispatchTableEntry,
+									 void *psBridgeIn,
+									 void *psBridgeOut,
+									 CONNECTION_DATA *psConnection);
+
+typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY
+{
+	BridgeWrapperFunction pfFunction; /*!< The wrapper function that validates the ioctl
+										arguments before calling into srvkm proper */
+	POS_LOCK	hBridgeLock;	/*!< The bridge lock which needs to be acquired 
+						before calling the above wrapper */
+	IMG_BOOL    bUseLock;                 /*!< Specify whether to use a bridge lock at all */
+#if defined(DEBUG_BRIDGE_KM)
+	const IMG_CHAR *pszIOCName; /*!< Name of the ioctl: e.g. "PVRSRV_BRIDGE_CONNECT_SERVICES" */
+	const IMG_CHAR *pszFunctionName; /*!< Name of the wrapper function: e.g. "PVRSRVConnectBW" */
+	const IMG_CHAR *pszBridgeLockName;	/*!< Name of bridge lock which will be acquired */
+	IMG_UINT32 ui32CallCount; /*!< The total number of times the ioctl has been called */
+	IMG_UINT32 ui32CopyFromUserTotalBytes; /*!< The total number of bytes copied from
+											 userspace within this ioctl */
+	IMG_UINT32 ui32CopyToUserTotalBytes; /*!< The total number of bytes copied from
+										   userspace within this ioctl */
+	IMG_UINT64 ui64TotalTimeNS; /*!< The total amount of time spent in this bridge function */
+	IMG_UINT64 ui64MaxTimeNS; /*!< The maximum amount of time for a single call to this bridge function */
+#endif
+}PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY;
+
+#if defined(SUPPORT_RGX)
+	#define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT  (PVRSRV_BRIDGE_RGX_DISPATCH_LAST+1)
+	#define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT  (PVRSRV_BRIDGE_RGX_LAST+1)
+#else
+	#define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT  (PVRSRV_BRIDGE_DISPATCH_LAST+1)
+	#define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT  (PVRSRV_BRIDGE_LAST+1)
+#endif
+
+extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
+
+void BridgeDispatchTableStartOffsetsInit(void);
+
+void
+_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup,
+					   IMG_UINT32 ui32Index,
+					   const IMG_CHAR *pszIOCName,
+					   BridgeWrapperFunction pfFunction,
+					   const IMG_CHAR *pszFunctionName,
+					   POS_LOCK hBridgeLock,
+					   const IMG_CHAR* pszBridgeLockName,
+					   IMG_BOOL bUseLock );
+
+
+/* PRQA S 0884,3410 2*/ /* macro relies on the lack of brackets */
+#define SetDispatchTableEntry(ui32BridgeGroup, ui32Index, pfFunction,\
+					hBridgeLock, bUseLock) \
+	_SetDispatchTableEntry(ui32BridgeGroup, ui32Index, #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction,\
+							(POS_LOCK)hBridgeLock, #hBridgeLock, bUseLock )
+
+#define DISPATCH_TABLE_GAP_THRESHOLD 5
+
+
+#if defined(DEBUG_BRIDGE_KM)
+typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS
+{
+	IMG_UINT32 ui32IOCTLCount;
+	IMG_UINT32 ui32TotalCopyFromUserBytes;
+	IMG_UINT32 ui32TotalCopyToUserBytes;
+} PVRSRV_BRIDGE_GLOBAL_STATS;
+
+/* OS specific code may want to report the stats held here and within the
+ * BRIDGE_DISPATCH_TABLE_ENTRYs (E.g. on Linux we report these via a
+ * debugfs entry /sys/kernel/debug/pvr/bridge_stats) */
+extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
+#endif
+
+PVRSRV_ERROR BridgeInit(void);
+void BridgeDeinit(void);
+
+PVRSRV_ERROR BridgedDispatchKM(CONNECTION_DATA * psConnection,
+					  PVRSRV_BRIDGE_PACKAGE   * psBridgePackageKM);
+
+
+PVRSRV_ERROR
+PVRSRVConnectKM(CONNECTION_DATA *psConnection,
+                PVRSRV_DEVICE_NODE * psDeviceNode,
+				IMG_UINT32 ui32Flags,
+				IMG_UINT32 ui32ClientBuildOptions,
+				IMG_UINT32 ui32ClientDDKVersion,
+				IMG_UINT32 ui32ClientDDKBuild,
+				IMG_UINT8  *pui8KernelArch,
+				IMG_UINT32 *ui32CapabilityFlags,
+				IMG_UINT32 *ui32PVRBridges,
+				IMG_UINT32 *ui32RGXBridges);
+
+PVRSRV_ERROR
+PVRSRVDisconnectKM(void);
+
+PVRSRV_ERROR
+PVRSRVAcquireGlobalEventObjectKM(IMG_HANDLE *phGlobalEventObject);
+
+PVRSRV_ERROR
+PVRSRVReleaseGlobalEventObjectKM(IMG_HANDLE hGlobalEventObject);
+
+PVRSRV_ERROR
+PVRSRVDumpDebugInfoKM(CONNECTION_DATA *psConnection,
+					  PVRSRV_DEVICE_NODE *psDeviceNode,
+					  IMG_UINT32 ui32VerbLevel);
+
+PVRSRV_ERROR
+PVRSRVGetDevClockSpeedKM(CONNECTION_DATA * psConnection,
+                         PVRSRV_DEVICE_NODE *psDeviceNode,
+                         IMG_PUINT32  pui32RGXClockSpeed);
+
+PVRSRV_ERROR
+PVRSRVHWOpTimeoutKM(CONNECTION_DATA *psConnection,
+					PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR PVRSRVAlignmentCheckKM(CONNECTION_DATA *psConnection,
+                                    PVRSRV_DEVICE_NODE * psDeviceNode,
+                                    IMG_UINT32 ui32FWAlignChecksSize,
+                                    IMG_UINT32 aui32FWAlignChecks[]);
+
+PVRSRV_ERROR PVRSRVGetDeviceStatusKM(CONNECTION_DATA *psConnection,
+                                     PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     IMG_UINT32 *pui32DeviceStatus);
+
+PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid,
+                                         IMG_UINT32 ui32ArrSize,
+                                         IMG_BOOL bAllProcessStats,
+                                         IMG_UINT32 *ui32MemoryStats);
+
+#endif /* __BRIDGED_PVR_BRIDGE_H__ */
+
+/******************************************************************************
+ End of file (srvcore.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/srvinit.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/srvinit.h
new file mode 100644
index 0000000..ba03fa2
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/srvinit.h
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@File
+@Title          Initialisation server internal header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the connections between the various parts of the
+		initialisation server.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __SRVINIT_H__
+#define __SRVINIT_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "device_connection.h"
+#include "device.h"
+
+#if defined(SUPPORT_RGX)
+IMG_INTERNAL PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode);
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+#endif /* __SRVINIT_H__ */
+
+/******************************************************************************
+ End of file (srvinit.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/srvkm.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/srvkm.h
new file mode 100644
index 0000000..1ce8218
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/srvkm.h
@@ -0,0 +1,141 @@
+/**************************************************************************/ /*!
+@File
+@Title          Services kernel module internal header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVKM_H
+#define SRVKM_H
+
+#include "servicesext.h"
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+struct _PVRSRV_DEVICE_NODE_;
+
+/*************************************************************************/ /*!
+@Function     PVRSRVDriverInit
+@Description  Performs one time initialisation of Services.
+@Return       PVRSRV_ERROR   PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDriverInit(void);
+
+/*************************************************************************/ /*!
+@Function     PVRSRVDriverInit
+@Description  Performs one time de-initialisation of Services.
+@Return       void 
+*/ /**************************************************************************/
+void IMG_CALLCONV PVRSRVDriverDeInit(void);
+
+/*************************************************************************/ /*!
+@Function     PVRSRVDeviceCreate
+@Description  Creates a PVR Services device node for an OS native device.
+@Input        pvOSDevice      OS native device
+@Input        i32UMIdentifier A unique identifier which helps recognize this
+                              Device in the UM space.
+@Output       ppsDeviceNode   Points to the new device node on success
+@Return       PVRSRV_ERROR    PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV
+PVRSRVDeviceCreate(void *pvOSDevice, IMG_INT32 i32UMIdentifier,
+				   struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode);
+
+/*************************************************************************/ /*!
+@Function     PVRSRVDeviceInitialise
+@Description  Initialises the given device, created by PVRSRVDeviceCreate, so
+              that's in a functional state ready to be used.
+@Input        psDeviceNode  Device node of the device to be initialised
+@Return       PVRSRV_ERROR  PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVDeviceInitialise(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+/*************************************************************************/ /*!
+@Function     PVRSRVDeviceDestroy
+@Description  Destroys a PVR Services device node.
+@Input        psDeviceNode  Device node to destroy
+@Return       PVRSRV_ERROR  PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV
+PVRSRVDeviceDestroy(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+/******************
+HIGHER LEVEL MACROS
+*******************/
+
+/*----------------------------------------------------------------------------
+Repeats the body of the loop for a certain minimum time, or until the body
+exits by its own means (break, return, goto, etc.)
+
+Example of usage:
+
+LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+{
+	if(psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset)
+	{
+		bTimeout = IMG_FALSE;
+		break;
+	}
+	
+	OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+} END_LOOP_UNTIL_TIMEOUT();
+
+-----------------------------------------------------------------------------*/
+
+/*	uiNotLastLoop will remain at 1 until the timeout has expired, at which time		
+ * 	it will be decremented and the loop executed one final time. This is necessary
+ *	when preemption is enabled. 
+ */
+/* PRQA S 3411,3431 12 */ /* critical format, leave alone */
+#define LOOP_UNTIL_TIMEOUT(TIMEOUT) \
+{\
+	IMG_UINT32 uiOffset, uiStart, uiCurrent; \
+	IMG_INT32 iNotLastLoop;					 \
+	for(uiOffset = 0, uiStart = OSClockus(), uiCurrent = uiStart + 1, iNotLastLoop = 1;\
+		((uiCurrent - uiStart + uiOffset) < (TIMEOUT)) || iNotLastLoop--;				\
+		uiCurrent = OSClockus(),													\
+		uiOffset = uiCurrent < uiStart ? IMG_UINT32_MAX - uiStart : uiOffset,		\
+		uiStart = uiCurrent < uiStart ? 0 : uiStart)
+
+#define END_LOOP_UNTIL_TIMEOUT() \
+}
+
+#endif /* SRVKM_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/sync_checkpoint.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/sync_checkpoint.h
new file mode 100644
index 0000000..3c96505
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/sync_checkpoint.h
@@ -0,0 +1,492 @@
+/*************************************************************************/ /*!
+@File
+@Title          Synchronisation checkpoint interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the client side interface for synchronisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_CHECKPOINT_
+#define _SYNC_CHECKPOINT_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_sync_km.h"
+#include "pdumpdefs.h"
+#include "pdump.h"
+#include "dllist.h"
+#include "pvr_debug.h"
+#include "device_connection.h"
+#include "opaque_types.h"
+
+#ifndef _CHECKPOINT_TYPES_
+#define _CHECKPOINT_TYPES_
+typedef struct _SYNC_CHECKPOINT_CONTEXT *PSYNC_CHECKPOINT_CONTEXT;
+
+typedef struct _SYNC_CHECKPOINT *PSYNC_CHECKPOINT;
+#endif
+
+/* definitions for functions to be implemented by OS-specific sync - the OS-specific sync code
+   will call SyncCheckpointRegisterFunctions() when initialised, in order to register functions
+   we can then call */
+#ifndef _CHECKPOINT_PFNS_
+#define _CHECKPOINT_PFNS_
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN)(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                                                             PVRSRV_FENCE fence,
+                                                             IMG_UINT32 *nr_checkpoints,
+                                                             PSYNC_CHECKPOINT **checkpoint_handles,
+                                                             IMG_UINT32 *pui32FenceUID);
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN)(const IMG_CHAR *fence_name,
+                                                            PVRSRV_TIMELINE timeline,
+                                                            PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                                                            PVRSRV_FENCE *new_fence,
+                                                            IMG_UINT32 *pui32FenceUID,
+                                                            void **ppvFenceFinaliseData,
+                                                            PSYNC_CHECKPOINT *new_checkpoint_handle,
+                                                            IMG_HANDLE *timeline_update_sync,
+                                                            IMG_UINT32 *timeline_update_value);
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN)(PVRSRV_FENCE fence_to_rollback, void *finalise_data);
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN)(PVRSRV_FENCE fence_to_finalise, void *finalise_data);
+typedef void (*PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN)(void *private_data);
+typedef void (*PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN)(void *mem_ptr);
+#endif
+
+PVRSRV_ERROR SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve,
+	                                         PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate,
+                                             PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback,
+                                             PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise,
+                                             PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines,
+                                             PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointContextCreate
+
+@Description    Create a new synchronisation checkpoint context
+
+@Input          psDevNode                 Device node
+
+@Output         ppsSyncCheckpointContext  Handle to the created synchronisation
+                                          checkpoint context
+
+@Return         PVRSRV_OK if the synchronisation checkpoint context was
+                successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode,
+                            PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointContextDestroy
+
+@Description    Destroy a synchronisation checkpoint context
+
+@Input          psSyncCheckpointContext  Handle to the synchronisation
+                                         checkpoint context to destroy
+
+@Return         PVRSRV_OK if the synchronisation checkpoint context was
+                successfully destroyed.
+                PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT if the context still
+                has sync checkpoints defined
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointAlloc
+
+@Description    Allocate a new synchronisation checkpoint on the specified
+                synchronisation checkpoint context
+
+@Input          hSyncCheckpointContext  Handle to the synchronisation
+                                        checkpoint context
+
+@Input          hTimeline               Timeline on which this sync
+                                        checkpoint is being created
+
+@Input          pszClassName            Sync checkpoint source annotation
+                                        (will be truncated to at most
+                                         PVRSRV_SYNC_NAME_LENGTH chars)
+
+@Output         ppsSyncCheckpoint       Created synchronisation checkpoint
+
+@Return         PVRSRV_OK if the synchronisation checkpoint was
+                successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext,
+                    PVRSRV_TIMELINE hTimeline,
+                    const IMG_CHAR *pszCheckpointName,
+                    PSYNC_CHECKPOINT *ppsSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointFree
+
+@Description    Free a synchronization checkpoint
+                The reference count held for the synchronization checkpoint
+                is decremented - if it has becomes zero, it is also freed.
+
+@Input          psSyncCheckpoint        The synchronisation checkpoint to free
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointSignal
+
+@Description    Signal the synchronisation checkpoint
+
+@Input          psSyncCheckpoint        The synchronisation checkpoint to signal
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointSignalNoHW
+
+@Description    Signal the synchronisation checkpoint in NO_HARWARE build
+
+@Input          psSyncCheckpoint        The synchronisation checkpoint to signal
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointSignalNoHW(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointError
+
+@Description    Error the synchronisation checkpoint
+
+@Input          psSyncCheckpoint        The synchronisation checkpoint to error
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointErrorFromUFO
+
+@Description    Error the synchronisation checkpoint which has the
+                given UFO firmware address
+
+@Input          psDevNode               The device owning the sync
+                                        checkpoint to be errored
+
+@Input          ui32FwAddr              The firmware address of the sync
+                                        checkpoint to be errored
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointErrorFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointRollbackFromUFO
+
+@Description    Drop the enqueued count reference taken on the synchronisation
+                checkpoint on behalf of the firmware.
+                Called in the event of a DM Kick failing.
+
+@Input          psDevNode               The device owning the sync
+                                        checkpoint to be rolled back
+
+@Input          ui32FwAddr              The firmware address of the sync
+                                        checkpoint to be rolled back
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointRollbackFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointIsSignalled
+
+@Description    Returns IMG_TRUE if the synchronisation checkpoint is
+                signalled or errored
+
+@Input          psSyncCheckpoint        The synchronisation checkpoint to test
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_BOOL
+SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointIsErrored
+
+@Description    Returns IMG_TRUE if the synchronisation checkpoint is
+                errored
+
+@Input          psSyncCheckpoint        The synchronisation checkpoint to test
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_BOOL
+SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointTakeRef
+
+@Description    Take a reference on a synchronisation checkpoint
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to take a
+                                        reference on
+
+@Return         PVRSRV_OK if a reference was taken on the synchronisation
+                primitive
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointDropRef
+
+@Description    Drop a reference on a synchronisation checkpoint
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to drop a
+                                        reference on
+
+@Return         PVRSRV_OK if a reference was dropped on the synchronisation
+                primitive
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointResolveFence
+
+@Description    Resolve a fence, returning a list of the sync checkpoints
+                that fence contains.
+                This function in turn calls a function provided by the
+                OS native sync implementation.
+
+@Input          psSyncCheckpointContext The sync checkpoint context
+                                        on which checkpoints should be
+                                        created (in the event of the fence
+                                        having a native sync pt with no
+                                        associated sync checkpoint)
+
+@Input          hFence                  The fence to be resolved
+
+@Output         pui32NumSyncCheckpoints The number of sync checkpoints the
+                                        fence contains. Can return 0 if
+                                        passed a null (-1) fence.
+
+@Output         papsSyncCheckpoints     List of sync checkpoints the fence
+                                        contains
+
+@Output         puiFenceUID             Unique ID of the resolved fence
+
+@Return         PVRSRV_OK if a valid fence was provided.
+                PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+                sync has not registered a callback function.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointResolveFence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, PVRSRV_FENCE hFence, IMG_UINT32 *pui32NumSyncCheckpoints, PSYNC_CHECKPOINT **papsSyncCheckpoints, IMG_UINT32 *puiFenceUID);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointCreateFence
+
+@Description    Create a fence containing a single sync checkpoint.
+                Return the fence and a ptr to sync checkpoint it contains.
+                This function in turn calls a function provided by the
+                OS native sync implementation.
+
+@Input          pszFenceName            String to assign to the new fence
+                                        (for debugging purposes)
+
+@Input          hTimeline               Timeline on which the new fence is
+                                        to be created
+
+@Input          psSyncCheckpointContext Sync checkpoint context to be used
+                                        when creating the new fence
+
+@Output         phNewFence              The newly created fence
+
+@Output         pui32FenceUID           Unique ID of the created fence
+
+@Output         ppvFenceFinaliseData    Any data needed to finalise the fence
+                                        in a later call to the function
+                                        SyncCheckpointFinaliseFence()
+
+@Output         psNewSyncCheckpoint     The sync checkpoint contained in
+                                        the new fence
+
+@Return         PVRSRV_OK if a valid fence was provided.
+                PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+                sync has not registered a callback function.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointCreateFence(PPVRSRV_DEVICE_NODE psDeviceNode,
+                          const IMG_CHAR *pszFenceName,
+                          PVRSRV_TIMELINE hTimeline,
+                          PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                          PVRSRV_FENCE *phNewFence,
+                          IMG_UINT32 *pui32FenceUID,
+                          void **ppvFenceFinaliseData,
+                          PSYNC_CHECKPOINT *psNewSyncCheckpoint,
+                          void **ppvTimelineUpdateSyncPrim,
+                          IMG_UINT32 *pui32TimelineUpdateValue);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointRollbackFenceData
+
+@Description    'Rolls back' the fence specified (destroys the fence and
+                takes any other required actions to undo the fence
+                creation (eg if the implementation wishes to revert the
+                incrementing of the fence's timeline, etc).
+                This function in turn calls a function provided by the
+                OS native sync implementation.
+
+@Input          hFence                  Fence to be 'rolled back'
+
+@Input          pvFinaliseData          Data needed to finalise the
+                                        fence
+
+@Return         PVRSRV_OK if a valid fence was provided.
+                PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+                sync has not registered a callback function.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointRollbackFenceData(PVRSRV_FENCE hFence, void *pvFinaliseData);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointFinaliseFence
+
+@Description    'Finalise' the fence specified (performs any actions the
+                underlying implementation may need to perform just prior
+                to the fence being returned to the client.
+                This function in turn calls a function provided by the
+                OS native sync implementation - if the native sync
+                implementation does not need to perform any actions at
+                this time, this function does not need to be registered.
+
+@Input          hFence                  Fence to be 'finalised'
+
+@Input          pvFinaliseData          Data needed to finalise the fence
+
+@Return         PVRSRV_OK if a valid fence and finalise data were provided.
+                PVRSRV_ERROR_INVALID_PARAMS if an invalid fence or finalise
+                data were provided.
+                PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+                sync has not registered a callback function (permitted).
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointFinaliseFence(PVRSRV_FENCE hFence, void *pvFinaliseData);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointFreeCheckpointListMem
+
+@Description    Free memory the memory which was allocated by the sync
+                implementation and used to return the list of sync
+                checkpoints when resolving a fence.
+                to the fence being returned to the client.
+                This function in turn calls a free function registered by
+                the sync implementation (if a function has been registered).
+
+@Input          pvCheckpointListMem     Pointer to the memory to be freed
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointFreeCheckpointListMem(void *pvCheckpointListMem);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointNoHWUpdateTimelines
+
+@Description    Called by the DDK in a NO_HARDWARE build only.
+                After syncs have been manually signalled by the DDK, this
+                function is called to allow the OS native sync implementation
+                to update its timelines (as the usual callback notification
+                of signalled checkpoints is not supported for NO_HARDWARE).
+                This function in turn calls a function provided by the
+                OS native sync implementation.
+
+@Input          pvPrivateData            Any data the OS native sync
+                                         implementation might require.
+
+@Return         PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+                sync has not registered a callback function, otherwise
+                PVRSRV_OK.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointNoHWUpdateTimelines(void *pvPrivateData);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointPDumpPol
+
+@Description    Called to insert a poll into the PDump script on a given
+                sync checkpoint being signalled or errored.
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint for
+                                        PDump to poll on
+
+@Input          ui32PDumpFlags          PDump flags
+
+@Return         PVRSRV_OK if a valid sync checkpoint was provided.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointPDumpPol(PSYNC_CHECKPOINT psSyncCheckpoint, PDUMP_FLAGS_T ui32PDumpFlags);
+
+#endif	/* _SYNC_CHECKPOINT_ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/sync_checkpoint_init.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/sync_checkpoint_init.h
new file mode 100644
index 0000000..f5aa139
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/sync_checkpoint_init.h
@@ -0,0 +1,82 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services synchronisation checkpoint initialisation interface
+                header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines synchronisation checkpoint structures that are visible
+                internally and externally
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_CHECKPOINT_INIT_
+#define _SYNC_CHECKPOINT_INIT_
+
+#include "device.h"
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointInit
+
+@Description    Initialise the sync checkpoint driver by giving it the
+                device node (needed to determine the pfnUFOAlloc function
+                to call in order to allocate sync block memory).
+
+@Input          psDevNode               Device for which sync checkpoints
+                                        are being initialised
+
+@Return         PVRSRV_OK               initialised successfully,
+                PVRSRV_ERROR_<error>    otherwise
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointInit(PVRSRV_DEVICE_NODE *psDevNode);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointDeinit
+
+@Description    Deinitialise the sync checkpoint driver.
+                Frees resources allocated during initialisation.
+
+@Input          psDevNode               Device for which sync checkpoints
+                                        are being de-initialised
+
+@Return         None
+*/
+/*****************************************************************************/
+void SyncCheckpointDeinit(PVRSRV_DEVICE_NODE *psDevNode);
+
+#endif /* _SYNC_CHECKPOINT_INIT_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/sync_fallback_server.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/sync_fallback_server.h
new file mode 100644
index 0000000..05e4ca7
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/sync_fallback_server.h
@@ -0,0 +1,83 @@
+#ifndef _SYNC_FALLBACK_SERVER_H_
+#define _SYNC_FALLBACK_SERVER_H_
+
+#include "img_types.h"
+#include "sync_checkpoint.h"
+#include "device.h"
+
+
+typedef struct _PVRSRV_TIMELINE_SERVER_ PVRSRV_TIMELINE_SERVER;
+typedef struct _PVRSRV_FENCE_SERVER_ PVRSRV_FENCE_SERVER;
+
+typedef struct _PVRSRV_SYNC_PT_ PVRSRV_SYNC_PT;
+
+#define SYNC_FB_TIMELINE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH
+#define SYNC_FB_FENCE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH
+
+/*****************************************************************************/
+/*                                                                           */
+/*                         SW SPECIFIC FUNCTIONS                             */
+/*                                                                           */
+/*****************************************************************************/
+
+PVRSRV_ERROR SyncFbTimelineCreateSW(IMG_UINT32 uiTimelineNameSize,
+                                    const IMG_CHAR *pszTimelineName,
+                                    PVRSRV_TIMELINE_SERVER **ppsTimeline);
+
+/*****************************************************************************/
+/*                                                                           */
+/*                         PVR SPECIFIC FUNCTIONS                            */
+/*                                                                           */
+/*****************************************************************************/
+
+PVRSRV_ERROR SyncFbTimelineCreatePVR(IMG_UINT32 uiTimelineNameSize,
+                                     const IMG_CHAR *pszTimelineName,
+                                     PVRSRV_TIMELINE_SERVER **ppsTimeline);
+
+PVRSRV_ERROR SyncFbFenceCreatePVR(const IMG_CHAR *pszName,
+                                  PVRSRV_TIMELINE iTl,
+                                  PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext,
+                                  PVRSRV_FENCE *piOutFence,
+                                  IMG_UINT32 *puiFenceUID,
+                                  void **ppvFenceFinaliseData,
+                                  PSYNC_CHECKPOINT *ppsOutCheckpoint,
+                                  void **ppvTimelineUpdateSync,
+                                  IMG_UINT32 *puiTimelineUpdateValue);
+
+PVRSRV_ERROR SyncFbFenceResolvePVR(PSYNC_CHECKPOINT_CONTEXT psContext,
+                                   PVRSRV_FENCE iFence,
+                                   IMG_UINT32 *puiNumCheckpoints,
+                                   PSYNC_CHECKPOINT **papsCheckpoints,
+                                   IMG_UINT32 *puiFenceUID);
+
+/*****************************************************************************/
+/*                                                                           */
+/*                         GENERIC FUNCTIONS                                 */
+/*                                                                           */
+/*****************************************************************************/
+
+PVRSRV_ERROR SyncFbTimelineRelease(PVRSRV_TIMELINE_SERVER *psTl);
+
+PVRSRV_ERROR SyncFbFenceRelease(PVRSRV_FENCE_SERVER *psFence);
+
+PVRSRV_ERROR SyncFbFenceDup(PVRSRV_FENCE_SERVER *psInFence,
+                            PVRSRV_FENCE_SERVER **ppsOutFence);
+
+PVRSRV_ERROR SyncFbFenceMerge(PVRSRV_FENCE_SERVER *psInFence1,
+                              PVRSRV_FENCE_SERVER *psInFence2,
+                              IMG_UINT32 uiFenceNameSize,
+                              const IMG_CHAR *pszFenceName,
+                              PVRSRV_FENCE_SERVER **ppsOutFence);
+
+PVRSRV_ERROR SyncFbFenceWait(PVRSRV_FENCE_SERVER *psFence,
+                             IMG_UINT32 uiTimeout);
+
+PVRSRV_ERROR SyncFbFenceDump(PVRSRV_FENCE_SERVER *psFence,
+                             IMG_UINT32 uiLine,
+                             IMG_UINT32 uiFileNameLenght,
+                             const IMG_CHAR *pszFile);
+
+PVRSRV_ERROR SyncFbRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR SyncFbDeregisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+#endif /* _SYNC_FALLBACK_SERVER_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/sync_server.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/sync_server.h
new file mode 100644
index 0000000..5750b80
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/sync_server.h
@@ -0,0 +1,437 @@
+/**************************************************************************/ /*!
+@File
+@Title          Server side synchronisation interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Describes the server side synchronisation functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "img_types.h"
+#include "pvrsrv.h"
+#include "device.h"
+#include "devicemem.h"
+#include "pdump.h"
+#include "pvrsrv_error.h"
+#include "connection_server.h"
+
+#ifndef _SYNC_SERVER_H_
+#define _SYNC_SERVER_H_
+
+typedef struct _SERVER_OP_COOKIE_ SERVER_OP_COOKIE;
+typedef struct _SERVER_SYNC_PRIMITIVE_ SERVER_SYNC_PRIMITIVE;
+typedef struct _SYNC_PRIMITIVE_BLOCK_ SYNC_PRIMITIVE_BLOCK;
+typedef struct _SERVER_SYNC_EXPORT_ SERVER_SYNC_EXPORT;
+typedef struct _SYNC_CONNECTION_DATA_ SYNC_CONNECTION_DATA;
+typedef struct SYNC_RECORD* SYNC_RECORD_HANDLE;
+
+typedef struct _SYNC_ADDR_LIST_
+{
+	IMG_UINT32 ui32NumSyncs;
+	PRGXFWIF_UFO_ADDR *pasFWAddrs;
+	IMG_UINT32 ui32State;
+} SYNC_ADDR_LIST;
+
+PVRSRV_ERROR
+SyncPrimitiveBlockToFWAddr(SYNC_PRIMITIVE_BLOCK *psSyncPrimBlock,
+						IMG_UINT32 ui32Offset,
+						PRGXFWIF_UFO_ADDR *psAddrOut);
+
+void
+SyncAddrListInit(SYNC_ADDR_LIST *psList);
+
+void
+SyncAddrListDeinit(SYNC_ADDR_LIST *psList);
+
+PVRSRV_ERROR
+SyncAddrListPopulate(SYNC_ADDR_LIST *psList,
+						IMG_UINT32 ui32NumSyncs,
+						SYNC_PRIMITIVE_BLOCK **apsSyncPrimBlock,
+						IMG_UINT32 *paui32SyncOffset);
+
+PVRSRV_ERROR
+SyncAddrListAppendSyncPrim(SYNC_ADDR_LIST          *psList,
+						   PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim);
+PVRSRV_ERROR
+SyncAddrListAppendCheckpoints(SYNC_ADDR_LIST *psList,
+								IMG_UINT32 ui32NumCheckpoints,
+								PSYNC_CHECKPOINT *apsSyncCheckpoint);
+
+PVRSRV_ERROR
+SyncAddrListAppendAndDeRefCheckpoints(SYNC_ADDR_LIST *psList,
+									  IMG_UINT32 ui32NumCheckpoints,
+									  PSYNC_CHECKPOINT *apsSyncCheckpoint);
+
+void
+SyncAddrListDeRefCheckpoints(IMG_UINT32 ui32NumCheckpoints,
+							 PSYNC_CHECKPOINT *apsSyncCheckpoint);
+
+PVRSRV_ERROR
+SyncAddrListRollbackCheckpoints(PVRSRV_DEVICE_NODE *psDevNode, SYNC_ADDR_LIST *psList);
+
+PVRSRV_ERROR
+PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection,
+                                PVRSRV_DEVICE_NODE * psDevNode,
+								SYNC_PRIMITIVE_BLOCK **ppsSyncBlk,
+								IMG_UINT32 *puiSyncPrimVAddr,
+								IMG_UINT32 *puiSyncPrimBlockSize,
+								PMR        **ppsSyncPMR);
+
+PVRSRV_ERROR
+PVRSRVExportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk,
+								 DEVMEM_EXPORTCOOKIE **psExportCookie);
+
+PVRSRV_ERROR
+PVRSRVUnexportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk);
+
+PVRSRV_ERROR
+PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *ppsSyncBlk);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index,
+					IMG_UINT32 ui32Value);
+
+PVRSRV_ERROR
+PVRSRVServerSyncPrimSetKM(SERVER_SYNC_PRIMITIVE *psServerSync, IMG_UINT32 ui32Value);
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR
+PVRSRVSyncPrimServerExportKM(SERVER_SYNC_PRIMITIVE *psSync,
+							SERVER_SYNC_EXPORT **ppsExport);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerUnexportKM(SERVER_SYNC_EXPORT *psExport);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerImportKM(CONNECTION_DATA *psConnection,
+							 PVRSRV_DEVICE_NODE *psDevNode,
+							 SERVER_SYNC_EXPORT *psExport,
+							 SERVER_SYNC_PRIMITIVE **ppsSync,
+							 IMG_UINT32 *pui32SyncPrimVAddr);
+#endif
+
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureExportKM(CONNECTION_DATA *psConnection,
+                                   PVRSRV_DEVICE_NODE * psDevNode,
+								   SERVER_SYNC_PRIMITIVE *psSync,
+								   IMG_SECURE_TYPE *phSecure,
+								   SERVER_SYNC_EXPORT **ppsExport,
+								   CONNECTION_DATA **ppsSecureConnection);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureUnexportKM(SERVER_SYNC_EXPORT *psExport);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureImportKM(CONNECTION_DATA *psConnection,
+								   PVRSRV_DEVICE_NODE *psDevNode,
+								   IMG_SECURE_TYPE hSecure,
+								   SERVER_SYNC_PRIMITIVE **ppsSync,
+								   IMG_UINT32 *pui32SyncPrimVAddr);
+#endif
+
+IMG_UINT32 PVRSRVServerSyncRequesterRegisterKM(IMG_UINT32 *pui32SyncRequesterID);
+void PVRSRVServerSyncRequesterUnregisterKM(IMG_UINT32 ui32SyncRequesterID);
+
+PVRSRV_ERROR
+PVRSRVSyncAllocEventKM(CONNECTION_DATA *psConnection,
+					   PVRSRV_DEVICE_NODE *psDevNode,
+					   IMG_BOOL bServerSync,
+                       IMG_UINT32 ui32FWAddr,
+                       IMG_UINT32 ui32ClassNameSize,
+                       const IMG_CHAR *pszClassName);
+
+PVRSRV_ERROR
+PVRSRVSyncFreeEventKM(CONNECTION_DATA *psConnection,
+					   PVRSRV_DEVICE_NODE *psDevNode,
+					   IMG_UINT32 ui32FWAddr);
+
+PVRSRV_ERROR
+PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection,
+					  PVRSRV_DEVICE_NODE *psDevNode,
+					  SYNC_RECORD_HANDLE *phRecord,
+					  SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock,
+					  IMG_UINT32 ui32FwBlockAddr,
+					  IMG_UINT32 ui32SyncOffset,
+					  IMG_BOOL bServerSync,
+					  IMG_UINT32 ui32ClassNameSize,
+					  const IMG_CHAR *pszClassName);
+
+PVRSRV_ERROR
+PVRSRVSyncRecordRemoveByHandleKM(
+			SYNC_RECORD_HANDLE hRecord);
+
+PVRSRV_ERROR
+PVRSRVServerSyncAllocKM(CONNECTION_DATA * psConnection,
+                        PVRSRV_DEVICE_NODE *psDevNode,
+						SERVER_SYNC_PRIMITIVE **ppsSync,
+						IMG_UINT32 *pui32SyncPrimVAddr,
+						IMG_UINT32 ui32ClassNameSize,
+						const IMG_CHAR *szClassName);
+PVRSRV_ERROR
+PVRSRVServerSyncFreeKM(SERVER_SYNC_PRIMITIVE *psSync);
+
+PVRSRV_ERROR
+PVRSRVServerSyncGetStatusKM(IMG_UINT32 ui32SyncCount,
+							SERVER_SYNC_PRIMITIVE **papsSyncs,
+							IMG_UINT32 *pui32UID,
+							IMG_UINT32 *pui32FWAddr,
+							IMG_UINT32 *pui32CurrentOp,
+							IMG_UINT32 *pui32NextOp);
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueSWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+						  IMG_UINT32 *pui32FenceValue,
+						  IMG_UINT32 *pui32UpdateValue,
+						  IMG_UINT32 ui32SyncRequesterID,
+						  IMG_BOOL bUpdate,
+						  IMG_BOOL *pbFenceRequired);
+PVRSRV_ERROR
+PVRSRVServerSyncQueueSWOpKM_NoGlobalLock(SERVER_SYNC_PRIMITIVE *psSync,
+						  IMG_UINT32 *pui32FenceValue,
+						  IMG_UINT32 *pui32UpdateValue,
+						  IMG_UINT32 ui32SyncRequesterID,
+						  IMG_BOOL bUpdate,
+						  IMG_BOOL *pbFenceRequired);
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueHWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+							   IMG_BOOL bUpdate,
+						       IMG_UINT32 *pui32FenceValue,
+						       IMG_UINT32 *pui32UpdateValue);
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueHWOpKM_NoGlobalLock(SERVER_SYNC_PRIMITIVE *psSync,
+							   IMG_BOOL bUpdate,
+						       IMG_UINT32 *pui32FenceValue,
+						       IMG_UINT32 *pui32UpdateValue);
+
+IMG_BOOL
+ServerSyncFenceIsMet(SERVER_SYNC_PRIMITIVE *psSync,
+					 IMG_UINT32 ui32FenceValue);
+
+void
+ServerSyncCompleteOp(SERVER_SYNC_PRIMITIVE *psSync,
+					 IMG_BOOL bDoUpdate,
+					 IMG_UINT32 ui32UpdateValue);
+
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCreateKM(IMG_UINT32 ui32SyncBlockCount,
+						 SYNC_PRIMITIVE_BLOCK **papsSyncPrimBlock,
+						 IMG_UINT32 ui32ClientSyncCount,
+						 IMG_UINT32 *paui32SyncBlockIndex,
+						 IMG_UINT32 *paui32Index,
+						 IMG_UINT32 ui32ServerSyncCount,
+						 SERVER_SYNC_PRIMITIVE **papsServerSync,
+						 SERVER_OP_COOKIE **ppsServerCookie);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpTakeKM(SERVER_OP_COOKIE *psServerCookie,
+					       IMG_UINT32 ui32ClientSyncCount,
+					       IMG_UINT32 *paui32Flags,
+					       IMG_UINT32 *paui32FenceValue,
+					       IMG_UINT32 *paui32UpdateValue,
+					       IMG_UINT32 ui32ServerSyncCount,
+						   IMG_UINT32 *paui32ServerFlags);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpReadyKM(SERVER_OP_COOKIE *psServerCookie,
+						IMG_BOOL *pbReady);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCompleteKM(SERVER_OP_COOKIE *psServerCookie);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpDestroyKM(SERVER_OP_COOKIE *psServerCookie);
+
+IMG_UINT32 ServerSyncGetId(SERVER_SYNC_PRIMITIVE *psSync);
+
+PVRSRV_ERROR
+ServerSyncGetFWAddr(SERVER_SYNC_PRIMITIVE *psSync, IMG_UINT32 *pui32SyncAddr);
+
+IMG_UINT32 ServerSyncGetValue(SERVER_SYNC_PRIMITIVE *psSync);
+
+IMG_UINT32 ServerSyncGetNextValue(SERVER_SYNC_PRIMITIVE *psSync);
+
+PVRSRV_DEVICE_NODE* ServerSyncGetDeviceNode(SERVER_SYNC_PRIMITIVE *psSync);
+ 
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+void SyncRecordLookup(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32FwAddr,
+					  IMG_CHAR * pszSyncInfo, size_t len);
+#endif
+
+void ServerSyncDumpPending(void);
+
+PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData);
+void SyncUnregisterConnection(SYNC_CONNECTION_DATA *ppsSyncConnectionData);
+void SyncConnectionPDumpSyncBlocks(SYNC_CONNECTION_DATA *ppsSyncConnectionData);
+
+/*!
+******************************************************************************
+@Function      ServerSyncInit
+
+@Description   Per-device initialisation for the ServerSync module
+******************************************************************************/
+PVRSRV_ERROR ServerSyncInit(PVRSRV_DEVICE_NODE *psDevNode);
+void ServerSyncDeinit(PVRSRV_DEVICE_NODE *psDevNode);
+
+/*!
+******************************************************************************
+@Function      ServerSyncInitOnce
+
+@Description   One-time initialisation for the ServerSync module
+******************************************************************************/
+PVRSRV_ERROR ServerSyncInitOnce(PVRSRV_DATA *psPVRSRVData);
+void ServerSyncDeinitOnce(PVRSRV_DATA *psPVRSRVData);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+/*!
+******************************************************************************
+@Function      PVRSRVLockServerSync
+
+@Description   Acquire a global lock to maintain server sync consistency
+******************************************************************************/
+void PVRSRVLockServerSync(void);
+/*!
+******************************************************************************
+@Function      PVRSRVUnlockServerSync
+
+@Description   Release the global server sync lock
+******************************************************************************/
+void PVRSRVUnlockServerSync(void);
+#endif
+
+#if defined(PDUMP)
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, 
+							IMG_UINT32 ui32Value);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+						 IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask,
+						 PDUMP_POLL_OPERATOR eOperator,
+						 PDUMP_FLAGS_T uiDumpFlags);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpPDumpPolKM(SERVER_OP_COOKIE *psServerCookie,
+						 PDUMP_POLL_OPERATOR eOperator,
+						 PDUMP_FLAGS_T ui32PDumpFlags);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset,
+						 IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize,
+						 IMG_UINT64 uiBufferSize);
+
+#else	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset)
+{
+	PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpValueKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, 
+							IMG_UINT32 ui32Value)
+{
+	PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+						 IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask,
+						 PDUMP_POLL_OPERATOR eOperator,
+						 PDUMP_FLAGS_T uiDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(uiDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimOpPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimOpPDumpPolKM(SERVER_OP_COOKIE *psServerCookie,
+						 PDUMP_POLL_OPERATOR eOperator,
+						 PDUMP_FLAGS_T uiDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psServerCookie);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(uiDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpCBPKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset,
+						 IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize,
+						 IMG_UINT64 uiBufferSize)
+{
+	PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+	PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+	PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+	return PVRSRV_OK;
+}
+#endif	/* PDUMP */
+#endif	/*_SYNC_SERVER_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/tlintern.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/tlintern.h
new file mode 100644
index 0000000..7f33510
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/tlintern.h
@@ -0,0 +1,300 @@
+/*************************************************************************/ /*!
+@File
+@Title          Transport Layer internals
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport Layer header used by TL internally
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __TLINTERN_H__
+#define __TLINTERN_H__
+
+
+#include "devicemem_typedefs.h"
+#include "pvrsrv_tlcommon.h"
+#include "device.h"
+#include "lock.h"
+#include "tlstream.h"
+
+/* Forward declarations */
+typedef struct _TL_SNODE_* PTL_SNODE;
+
+/* To debug buffer utilisation enable this macro here and
+ * define PVRSRV_NEED_PVR_TRACE in the server pvr_debug.c and in tutils.c
+ * before the inclusion of pvr_debug.h. Issue pvrtutils 6 on target to see
+ * stream buffer utilisation. */
+//#define TL_BUFFER_STATS 1
+
+/*! TL stream structure container.
+ *    pbyBuffer   holds the circular buffer.
+ *    ui32Read    points to the beginning of the buffer, ie to where data to
+ *                  Read begin.
+ *    ui32Write   points to the end of data that have been committed, ie this is
+ *                  where new data will be written.
+ *    ui32Pending number of bytes reserved in last reserve call which have not
+ *                  yet been submitted. Therefore these data are not ready to
+ *                  be transported.
+ *    hStreamWLock - provides atomic protection for the ui32Pending & ui32Write
+ *                   members of the structure for when they are checked and/or
+ *                   updated in the context of a stream writer (producer)
+ *                   calling DoTLStreamReserve() & TLStreamCommit().
+ *                 - Reader context is not multi-threaded, only one client per
+ *                   stream is allowed. Also note the read context may be in an
+ *                   ISR which prevents a design where locks can be held in the
+ *                   AcquireData/ReleaseData() calls. Thus this lock only
+ *                   protects the stream members from simultaneous writers.
+ *
+ *      ui32Read < ui32Write <= ui32Pending
+ *        where < and <= operators are overloaded to make sense in a circular way.
+ */
+typedef struct _TL_STREAM_
+{
+	IMG_CHAR                szName[PRVSRVTL_MAX_STREAM_NAME_SIZE];  /*!< String name identifier */
+	PVRSRV_DEVICE_NODE      *psDevNode;                             /*!< Underlying device on which the stream's buffer is allocated */
+	TL_OPMODE               eOpMode;                                /*!< Mode of Operation of TL Buffer */
+
+	IMG_BOOL                bWaitForEmptyOnDestroy;                 /*!< Flag: On destroying a non-empty stream block until
+                                                                         *         stream is drained. */
+	IMG_BOOL                bNoSignalOnCommit;                      /*!< Flag: Used to avoid the TL signalling waiting consumers
+                                                                         *         that new data is available on every commit. Producers
+                                                                         *         using this flag will need to manually signal when
+                                                                         *         appropriate using the TLStreamSync() API */
+
+	void                    (*pfOnReaderOpenCallback)(void *);      /*!< Optional on reader connect callback */
+	void                    *pvOnReaderOpenUserData;                /*!< On reader connect user data */
+	void                    (*pfProducerCallback)(void);            /*!< Optional producer callback of type TL_STREAM_SOURCECB */
+	void                    *pvProducerUserData;                    /*!< Producer callback user data */
+
+	struct _TL_STREAM_      *psNotifStream;                         /*!< Pointer to the stream to which notification will be sent */
+
+	volatile IMG_UINT32     ui32Read;                               /*!< Pointer to the beginning of available data */
+	volatile IMG_UINT32     ui32Write;                              /*!< Pointer to already committed data which are ready to be
+                                                                         *   copied to user space */
+	IMG_UINT32              ui32Pending;                            /*!< Count pending bytes reserved in buffer */
+	IMG_UINT32              ui32Size;                               /*!< Buffer size */
+	IMG_UINT32              ui32MaxPacketSize;                      /*! Max TL packet size */
+	IMG_BYTE                *pbyBuffer;                             /*!< Actual data buffer */
+
+	PTL_SNODE               psNode;	                                /*!< Ptr to parent stream node */
+	DEVMEM_MEMDESC          *psStreamMemDesc;                       /*!< MemDescriptor used to allocate buffer space through PMR */
+
+	IMG_HANDLE              hProducerEvent;	                        /*!< Handle to wait on if there is not enough space */
+	IMG_HANDLE              hProducerEventObj;                      /*!< Handle to signal blocked reserve calls */
+
+	POS_LOCK                hStreamWLock;                           /*!< Writers Lock for ui32Pending & ui32Write*/
+	POS_LOCK                hReadLock;                              /*!< Readers Lock for bReadPending & ui32Read*/
+	IMG_BOOL                bReadPending;                           /*!< Tracks if a read operation is pending or not*/
+
+#if defined(TL_BUFFER_STATS)
+	IMG_UINT32              ui32CntReadFails;                       /*!< Tracks how many times reader failed to acquire read lock */
+	IMG_UINT32              ui32CntReadSuccesses;                   /*!< Tracks how many times reader acquires read lock successfully */
+	IMG_UINT32              ui32CntWriteSuccesses;                  /*!< Tracks how many times writer acquires read lock successfully */
+	IMG_UINT32              ui32CntWriteWaits;                      /*!< Tracks how many times writer had to wait to acquire read lock */
+	IMG_UINT32              ui32CntNumWriteSuccess;	                /*!< Tracks how many write operations were successful*/
+	IMG_UINT32              ui32BufferUt;                           /*!< Buffer utilisation high watermark, see TL_BUFFER_STATS above */
+#endif
+} TL_STREAM, *PTL_STREAM;
+
+/* there need to be enough space reserved in the buffer for 2 minimal packets
+ * and it needs to be aligned the same way the buffer is or there will be a
+ * compile error.*/
+#define BUFFER_RESERVED_SPACE 2*PVRSRVTL_PACKET_ALIGNMENT
+
+/* ensure the space reserved follows the buffer's alignment */
+static_assert(!(BUFFER_RESERVED_SPACE&(PVRSRVTL_PACKET_ALIGNMENT-1)),
+			  "BUFFER_RESERVED_SPACE must be a multiple of PVRSRVTL_PACKET_ALIGNMENT");
+
+/* Define the largest value that a uint that matches the
+ * PVRSRVTL_PACKET_ALIGNMENT size can hold */
+#define MAX_UINT 0xffffFFFF
+
+/*! Defines the value used for TL_STREAM.ui32Pending when no reserve is
+ * outstanding on the stream. */
+#define NOTHING_PENDING IMG_UINT32_MAX
+
+
+/*
+ * Transport Layer Stream Descriptor types/defs
+ */
+typedef struct _TL_STREAM_DESC_
+{
+	PTL_SNODE	psNode;			/*!< Ptr to parent stream node */
+	IMG_UINT32	ui32Flags;
+	IMG_HANDLE	hReadEvent; 	/*!< For wait call (only used/set in reader descriptors) */
+	IMG_INT		uiRefCount;     /*!< Reference count to the SD */
+} TL_STREAM_DESC, *PTL_STREAM_DESC;
+
+PTL_STREAM_DESC TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3);
+
+#define TL_STREAM_KM_FLAG_MASK	0xFFFF0000
+#define TL_STREAM_FLAG_TEST		0x10000000
+#define TL_STREAM_FLAG_WRAPREAD	0x00010000
+
+#define TL_STREAM_UM_FLAG_MASK	0x0000FFFF
+
+/*
+ * Transport Layer stream list node
+ */
+typedef struct _TL_SNODE_
+{
+	struct _TL_SNODE_*  psNext;				/*!< Linked list next element */
+	IMG_HANDLE			hReadEventObj;		/*!< Readers 'wait for data' event */
+	PTL_STREAM 			psStream;			/*!< TL Stream object */
+	IMG_INT				uiWRefCount;		/*!< Stream writer reference count */
+	PTL_STREAM_DESC 	psRDesc;			/*!< Stream reader 0 or ptr only */
+	PTL_STREAM_DESC		psWDesc;			/*!< Stream writer 0 or ptr only */
+} TL_SNODE;
+
+PTL_SNODE TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4);
+
+/*
+ * Transport Layer global top types and variables
+ * Use access function to obtain pointer.
+ *
+ * hTLGDLock - provides atomicity over read/check/write operations and
+ *             sequence of operations on uiClientCnt, psHead list of SNODEs and
+ *             the immediate members in a list element SNODE structure.
+ *           - This larger scope of responsibility for this lock helps avoid
+ *             the need for a lock in the SNODE structure.
+ *           - Lock held in the client (reader) context when streams are
+ *             opened/closed and in the server (writer) context when streams
+ *             are created/open/closed.
+ */
+typedef struct _TL_GDATA_
+{
+	IMG_HANDLE hTLEventObj;         /* Global TL signal object, new streams, etc */
+
+	IMG_UINT   uiClientCnt;         /* Counter to track the number of client stream connections. */
+	PTL_SNODE  psHead;              /* List of TL streams and associated client handle */
+
+	POS_LOCK   hTLGDLock;          /* Lock for structure AND psHead SNODE list */
+} TL_GLOBAL_DATA, *PTL_GLOBAL_DATA;
+
+/*
+ * Transport Layer Internal Kernel-Mode Server API
+ */
+TL_GLOBAL_DATA* TLGGD(void);		/* TLGetGlobalData() */
+
+PVRSRV_ERROR TLInit(void);
+void TLDeInit(void);
+
+void  TLAddStreamNode(PTL_SNODE psAdd);
+PTL_SNODE TLFindStreamNodeByName(const IMG_CHAR *pszName);
+PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc);
+IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern,
+                          IMG_CHAR aaszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE],
+                          IMG_UINT32 ui32Max);
+PTL_SNODE TLFindAndGetStreamNodeByDesc(PTL_STREAM_DESC psDesc);
+void TLReturnStreamNode(PTL_SNODE psNode);
+
+/****************************************************************************************
+ Function Name	: TLTryRemoveStreamAndFreeStreamNode
+
+ Inputs		: PTL_SNODE	Pointer to the TL_SNODE whose stream is requested
+ 			to be removed from TL_GLOBAL_DATA's list
+
+ Return Value	: IMG_TRUE	-	If the stream was made NULL and this
+ 					TL_SNODE was removed from the
+					TL_GLOBAL_DATA's list
+
+ 		  IMG_FALSE	-	If the stream wasn't made NULL as there
+		  			is a client connected to this stream
+
+ Description	: If there is no client currently connected to this stream then,
+ 			This function removes this TL_SNODE from the
+			TL_GLOBAL_DATA's list. The caller is responsible for the
+			cleanup of the TL_STREAM whose TL_SNODE may be removed
+
+		  Otherwise, this function does nothing
+*****************************************************************************************/
+IMG_BOOL  TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove);
+
+/*****************************************************************************************
+ Function Name	: TLUnrefDescAndTryFreeStreamNode
+
+ Inputs		: PTL_SNODE	Pointer to the TL_SNODE whose descriptor is
+ 			requested to be removed
+ 			: PTL_STREAM_DESC	Pointer to the STREAM_DESC
+
+ Return Value	: IMG_TRUE	-	If this	TL_SNODE was removed from the
+					TL_GLOBAL_DATA's list
+
+ 		  IMG_FALSE	-	Otherwise
+
+ Description	: This function removes the stream descriptor from this TL_SNODE
+ 		  and,
+		  If there is no writer (producer context) currently bound to this stream,
+ 			This function removes this TL_SNODE from the
+			TL_GLOBAL_DATA's list. The caller is responsible for the
+			cleanup of the TL_STREAM whose TL_SNODE may be removed
+******************************************************************************************/
+IMG_BOOL  TLUnrefDescAndTryFreeStreamNode(PTL_SNODE psRemove, PTL_STREAM_DESC psSD);
+
+/*
+ * Transport Layer stream interface to server part declared here to avoid
+ * circular dependency.
+ */
+IMG_UINT32 TLStreamAcquireReadPos(PTL_STREAM psStream,
+                                  IMG_BOOL bDisableCallback,
+                                  IMG_UINT32* puiReadOffset);
+void TLStreamAdvanceReadPos(PTL_STREAM psStream, IMG_UINT32 uiReadLen);
+
+DEVMEM_MEMDESC* TLStreamGetBufferPointer(PTL_STREAM psStream);
+IMG_BOOL TLStreamEOS(PTL_STREAM psStream);
+
+/****************************************************************************************
+ Function Name	: TLStreamDestroy
+
+ Inputs		: PTL_STREAM	Pointer to the TL_STREAM to be destroyed
+
+ Description	: This function performs all the clean-up operations required for
+ 			destruction of this stream
+*****************************************************************************************/
+void TLStreamDestroy (PTL_STREAM);
+
+/*
+ * Test related functions
+ */
+PVRSRV_ERROR TUtilsInit (PVRSRV_DEVICE_NODE *psDeviceNode);
+PVRSRV_ERROR TUtilsDeinit (PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#endif /* __TLINTERN_H__ */
+/******************************************************************************
+ End of file (tlintern.h)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/tlserver.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/tlserver.h
new file mode 100644
index 0000000..3d22bed
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/tlserver.h
@@ -0,0 +1,100 @@
+/*************************************************************************/ /*!
+@File
+@Title          KM server Transport Layer implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main bridge APIs for Transport Layer client functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __TLSERVER_H_
+#define __TLSERVER_H_
+
+#include <stddef.h>
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+
+#include "tlintern.h"
+
+/*
+ * Transport Layer Client API Kernel-Mode bridge implementation
+ */
+
+PVRSRV_ERROR TLServerConnectKM(CONNECTION_DATA *psConnection);
+PVRSRV_ERROR TLServerDisconnectKM(CONNECTION_DATA *psConnection);
+
+PVRSRV_ERROR TLServerOpenStreamKM(const IMG_CHAR* pszName,
+			   IMG_UINT32 ui32Mode,
+			   PTL_STREAM_DESC* ppsSD,
+			   PMR** ppsTLPMR);
+
+PVRSRV_ERROR TLServerCloseStreamKM(PTL_STREAM_DESC psSD);
+
+PVRSRV_ERROR TLServerDiscoverStreamsKM(const IMG_CHAR *pszNamePattern,
+                          IMG_UINT32 ui32Max,
+                          IMG_CHAR *pszStreams,
+                          IMG_UINT32 *pui32NumFound);
+
+PVRSRV_ERROR TLServerReserveStreamKM(PTL_STREAM_DESC psSD,
+                                     IMG_UINT32* ui32BufferOffset,
+                                     IMG_UINT32 ui32Size,
+                                     IMG_UINT32 ui32SizeMin,
+                                     IMG_UINT32* pui32Available);
+
+PVRSRV_ERROR TLServerCommitStreamKM(PTL_STREAM_DESC psSD,
+                                    IMG_UINT32 ui32Size);
+
+PVRSRV_ERROR TLServerAcquireDataKM(PTL_STREAM_DESC psSD,
+			   IMG_UINT32* puiReadOffset,
+			   IMG_UINT32* puiReadLen);
+
+PVRSRV_ERROR TLServerReleaseDataKM(PTL_STREAM_DESC psSD,
+				 IMG_UINT32 uiReadOffset,
+				 IMG_UINT32 uiReadLen);
+
+PVRSRV_ERROR TLServerWriteDataKM(PTL_STREAM_DESC psSD,
+                                 IMG_UINT32 ui32Size,
+                                 IMG_BYTE *pui8Data);
+
+#endif /* __TLSERVER_H_ */
+
+/*****************************************************************************
+ End of file (tlserver.h)
+*****************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/tlstream.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/tlstream.h
new file mode 100644
index 0000000..a92d12d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/server/include/tlstream.h
@@ -0,0 +1,482 @@
+/*************************************************************************/ /*!
+@File
+@Title          Transport Layer kernel side API.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    TL provides driver components with a way to copy data from kernel
+                space to user space (e.g. screen/file).
+
+                Data can be passed to the Transport Layer through the 
+                TL Stream (kernel space) API interface.
+
+                The buffer provided to every stream is a modified version of a 
+                circular buffer. Which CB version is created is specified by
+                relevant flags when creating a stream. Currently two types
+                of buffer are available:
+                - TL_OPMODE_DROP_NEWER:
+                  When the buffer is full, incoming data are dropped 
+                  (instead of overwriting older data) and a marker is set 
+                  to let the user know that data have been lost.
+                - TL_OPMODE_BLOCK:
+                  When the circular buffer is full, reserve/write calls block
+                  until enough space is freed.
+                - TL_OPMODE_DROP_OLDEST:
+                  When the circular buffer is full, the oldest packets in the
+                  buffer are dropped and a flag is set in header of next packet
+                  to let the user know that data have been lost.
+
+                All size/space requests are in bytes. However, the actual
+                implementation uses native word sizes (i.e. 4 byte aligned).
+
+                The user does not need to provide space for the stream buffer 
+                as the TL handles memory allocations and usage.
+
+                Inserting data to a stream's buffer can be done either:
+                - by using TLReserve/TLCommit: User is provided with a buffer
+                                                 to write data to.
+                - or by using TLWrite:         User provides a buffer with 
+                                                 data to be committed. The TL 
+                                                 copies the data from the 
+                                                 buffer into the stream buffer 
+                                                 and returns.
+                Users should be aware that there are implementation overheads 
+                associated with every stream buffer. If you find that less 
+                data are captured than expected then try increasing the
+                stream buffer size or use TLInfo to obtain buffer parameters
+                and calculate optimum required values at run time.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __TLSTREAM_H__
+#define __TLSTREAM_H__
+
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_tlcommon.h"
+#include "device.h"
+
+/*! Extract TL stream opmode from the given stream create flags.
+ * Last 3 bits of streamFlag is used for storing opmode, hence
+ * opmode mask is set as following. */
+#define TL_OPMODE_MASK 0x7
+
+/*! Opmode specifying circular buffer behaviour */
+typedef enum
+{
+	/*! Undefined operation mode */
+	TL_OPMODE_UNDEF = 0,
+
+	/*! Reject new data if the buffer is full, producer may then decide to
+	 *    drop the data or retry after some time. */
+	TL_OPMODE_DROP_NEWER,
+
+	/*! Block Reserve (subsequently Write) calls if there is not enough space
+	 *    until some space is freed via a client read operation. */
+	TL_OPMODE_BLOCK,
+
+	/*! When buffer is full, advance the tail/read position to accept the new
+	 * reserve call (size permitting), effectively overwriting the oldest
+	 * data in the circular buffer. Not supported yet. */
+	TL_OPMODE_DROP_OLDEST,
+
+	/*!< For error checking */
+	TL_OPMODE_LAST
+
+} TL_OPMODE;
+
+static_assert(TL_OPMODE_LAST <= TL_OPMODE_MASK,
+	      "TL_OPMODE_LAST must not exceed TL_OPMODE_MASK");
+
+/*! Flags specifying stream behaviour */
+/*! Do not destroy stream if there still are data that have not been
+ *     copied in user space. BLock until the stream is emptied. */
+#define TL_FLAG_FORCE_FLUSH            (1U<<8)
+/*! Do not signal consumers on commit automatically when the stream buffer
+ * transitions from empty to non-empty. Producer responsible for signal when
+ * it chooses. */
+#define TL_FLAG_NO_SIGNAL_ON_COMMIT    (1U<<9)
+
+/*! Defer allocation of stream's shared memory until first open. */
+#define TL_FLAG_ALLOCATE_ON_FIRST_OPEN (1U<<10)
+
+/*! Structure used to pass internal TL stream sizes information to users.*/
+typedef struct _TL_STREAM_INFO_
+{
+    IMG_UINT32 headerSize;          /*!< Packet header size in bytes */
+    IMG_UINT32 minReservationSize;  /*!< Minimum data size reserved in bytes */
+    IMG_UINT32 pageSize;            /*!< Page size in bytes */
+    IMG_UINT32 pageAlign;           /*!< Page alignment in bytes */
+} TL_STREAM_INFO, *PTL_STREAM_INFO;
+
+/*! Callback operations or notifications that a stream producer may handle
+ * when requested by the Transport Layer.
+ */
+#define TL_SOURCECB_OP_CLIENT_EOS 0x01  /*!< Client has reached end of stream,
+                                         * can anymore data be supplied?
+                                         * ui32Resp ignored in this operation */
+
+/*! Function pointer type for the callback handler into the "producer" code
+ * that writes data to the TL stream.  Producer should handle the notification
+ * or operation supplied in ui32ReqOp on stream hStream. The
+ * Operations and notifications are defined above in TL_SOURCECB_OP */
+typedef PVRSRV_ERROR (*TL_STREAM_SOURCECB)(IMG_HANDLE hStream,
+		IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser);
+
+typedef void (*TL_STREAM_ONREADEROPENCB)(void *pvArg);
+
+/*************************************************************************/ /*!
+ @Function      TLAllocSharedMem
+ @Description   Allocates shared memory for the stream.
+ @Input         phStream    Stream handle.
+ @Return        eError      Internal services call returned eError error
+                            number.
+ @Return        PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLAllocSharedMemIfNull(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLFreeSharedMem
+ @Description   Frees stream's shared memory.
+ @Input         phStream    Stream handle.
+*/ /**************************************************************************/
+void
+TLFreeSharedMem(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamCreate
+ @Description   Request the creation of a new stream and open a handle.
+ 				If creating a stream which should continue to exist after the
+				current context is finished, then TLStreamCreate must be 
+				followed by a TLStreamOpen call. On any case, the number of 
+				create/open calls must balance with the number of close calls
+				used. This ensures the resources of a stream are released when
+				it is no longer required.
+ @Output        phStream        Pointer to handle to store the new stream.
+ @Input			psDevNode	Pointer to the Device Node to be used for
+ 								stream allocation.
+ @Input         szStreamName    Name of stream, maximum length:
+                                  PRVSRVTL_MAX_STREAM_NAME_SIZE.
+                                  If a longer string is provided,creation fails.
+ @Input         ui32Size        Desired buffer size in bytes.
+ @Input         ui32StreamFlags Flags that configure buffer behaviour.See above.
+ @Input			pfOnReaderOpenCB    Optional callback called when a client opens
+                                      this stream, may be null.
+ @Input			pvOnReaderOpenUD    Optional user data for pfOnReaderOpenCB, may
+                                      be null.
+ @Input         pfProducerCB    Optional callback, may be null.
+ @Input         pvProducerUD    Optional user data for callback, may be null.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handle or string name 
+                                               exceeded MAX_STREAM_NAME_SIZE
+ @Return        PVRSRV_ERROR_OUT_OF_MEMORY   Failed to allocate space for stream
+                                               handle.
+ @Return        PVRSRV_ERROR_DUPLICATE_VALUE There already exists a stream with
+ 											   the same stream name string.
+ @Return        eError                       Internal services call returned
+                                               eError error number.
+ @Return        PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR 
+TLStreamCreate(IMG_HANDLE *phStream,
+               PVRSRV_DEVICE_NODE *psDevNode,
+               IMG_CHAR *szStreamName,
+               IMG_UINT32 ui32Size,
+               IMG_UINT32 ui32StreamFlags,
+               TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB,
+               void *pvOnReaderOpenUD,
+               TL_STREAM_SOURCECB pfProducerCB,
+               void *pvProducerUD);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamOpen
+ @Description   Attach to existing stream that has already been created by a
+                  TLStreamCreate call. A handle is returned to the stream.
+ @Output        phStream        Pointer to handle to store the stream.
+ @Input         szStreamName    Name of stream, should match an already
+                                  existing stream name
+ @Return        PVRSRV_ERROR_NOT_FOUND        None of the streams matched the
+                                                 requested stream name.
+				PVRSRV_ERROR_INVALID_PARAMS	   non NULL pointer to stream 
+											     handler is required.
+ @Return        PVRSRV_OK                      Success.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamOpen(IMG_HANDLE *phStream,
+             IMG_CHAR   *szStreamName);
+
+
+/*************************************************************************/ /*!
+ @Function      TLStreamReset
+ @Description   Resets read and write pointers and pending flag.
+ @Output        phStream Pointer to stream's handle
+*/ /**************************************************************************/
+void TLStreamReset(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamOpen
+ @Description   Registers a "notification stream" which will be used to publish
+                information about state change of the "hStream" stream.
+                Notification can inform about events such as stream open/close,
+                etc.
+ @Input         hStream         Handle to stream to update.
+ @Input         hNotifStream    Handle to the stream which will be used for
+                                publishing notifications.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS    if either of the parameters is
+                                               NULL
+ @Return        PVRSRV_OK                      Success.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamSetNotifStream(IMG_HANDLE hStream, IMG_HANDLE hNotifStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamReconfigure
+ @Description   Request the stream flags controlling buffer behaviour to
+                be updated.
+                In the case where TL_OPMODE_BLOCK is to be used,
+                TLStreamCreate should be called without that flag and this
+                function used to change the stream mode once a consumer process
+                has been started. This avoids a deadlock scenario where the
+                TLStreaWrite/TLStreamReserve call will hold the Bridge Lock
+                while blocking if the TL buffer is full.
+                The TL_OPMODE_BLOCK should never drop the Bridge Lock
+                as this leads to another deadlock scenario where the caller to
+                TLStreamWrite/TLStreamReserve has already acquired another lock
+                (eg. gHandleLock) which is not dropped. This then leads to that
+                thead acquiring locks out of order.
+ @Input         hStream         Handle to stream to update.
+ @Input         ui32StreamFlags Flags that configure buffer behaviour. See above.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handle or inconsistent
+                                             stream flags.
+ @Return        PVRSRV_ERROR_NOT_READY       Stream is currently being written to
+                                             try again later.
+ @Return        eError                       Internal services call returned
+                                               eError error number.
+ @Return        PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamReconfigure(
+		IMG_HANDLE hStream,
+		IMG_UINT32 ui32StreamFlags);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamClose
+ @Description   Detach from the stream associated with the given handle. If
+                  the current handle is the last one accessing the stream 
+				  (i.e. the number of TLStreamCreate+TLStreamOpen calls matches
+				  the number of TLStreamClose calls) then the stream is also
+				  deleted.
+				On return the handle is no longer valid.
+ @Input         hStream     Handle to stream that will be closed.
+ @Return        None.
+*/ /**************************************************************************/
+void
+TLStreamClose(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamReserve
+ @Description   Reserve space in stream buffer. When successful every
+                  TLStreamReserve call must be followed by a matching
+                  TLStreamCommit call. While a TLStreamCommit call is pending
+                  for a stream, subsequent TLStreamReserve calls for this
+                  stream will fail.
+ @Input         hStream         Stream handle.
+ @Output        ppui8Data       Pointer to a pointer to a location in the 
+                                  buffer. The caller can then use this address
+                                  in writing data into the stream. 
+ @Input         ui32Size        Number of bytes to reserve in buffer.
+ @Return        PVRSRV_INVALID_PARAMS       NULL stream handler.
+ @Return        PVRSRV_ERROR_NOT_READY      There are data previously reserved
+                                              that are pending to be committed.
+ @Return        PVRSRV_ERROR_STREAM_MISUSE  Misusing the stream by trying to 
+                                              reserve more space than the 
+                                              buffer size.
+ @Return        PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG  The reserve size requested
+                                                     is larger than the free
+                                                     space or maximum supported
+                                                     packet size.
+ @Return        PVRSRV_OK                   Success, output arguments valid.
+*/ /**************************************************************************/
+PVRSRV_ERROR 
+TLStreamReserve(IMG_HANDLE hStream, 
+                IMG_UINT8  **ppui8Data,
+                IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamReserve2
+ @Description   Reserve space in stream buffer. When successful every
+                  TLStreamReserve call must be followed by a matching
+                  TLStreamCommit call. While a TLStreamCommit call is pending
+                  for a stream, subsequent TLStreamReserve calls for this
+                  stream will fail.
+ @Input         hStream         Stream handle.
+ @Output        ppui8Data       Pointer to a pointer to a location in the
+                                  buffer. The caller can then use this address
+                                  in writing data into the stream.
+ @Input         ui32Size        Ideal number of bytes to reserve in buffer.
+ @Input         ui32SizeMin     Minimum number of bytes to reserve in buffer.
+ @Input         pui32Available  Optional, but when present and the
+                                  RESERVE_TOO_BIG error is returned, a size
+                                  suggestion is returned in this argument which
+                                  the caller can attempt to reserve again for a
+                                  successful allocation.
+ @Return        PVRSRV_INVALID_PARAMS       NULL stream handler.
+ @Return        PVRSRV_ERROR_NOT_READY      There are data previously reserved
+                                              that are pending to be committed.
+ @Return        PVRSRV_ERROR_STREAM_MISUSE  Misusing the stream by trying to
+                                              reserve more space than the
+                                              buffer size.
+ @Return        PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG  The reserve size requested
+                                                     is larger than the free
+                                                     space or maximum supported
+                                                     packet size.
+                                                     Check the pui32Available
+                                                     value for the correct
+                                                     reserve size to use.
+ @Return        PVRSRV_OK                   Success, output arguments valid.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamReserve2(IMG_HANDLE hStream,
+                IMG_UINT8  **ppui8Data,
+                IMG_UINT32 ui32Size,
+                IMG_UINT32 ui32SizeMin,
+                IMG_UINT32* pui32Available);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamCommit
+ @Description   Notify TL that data have been written in the stream buffer.
+                  Should always follow and match TLStreamReserve call.
+ @Input         hStream         Stream handle.
+ @Input         ui32Size        Number of bytes that have been added to the
+                                  stream.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handle.
+ @Return        PVRSRV_ERROR_STREAM_MISUSE   Commit results in more data 
+                                               committed than the buffer size,
+                                               the stream is misused.
+ @Return        eError                       Commit was successful but 
+                                               internal services call returned
+                                               eError error number.
+ @Return        PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR 
+TLStreamCommit(IMG_HANDLE hStream,
+               IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamWrite
+ @Description   Combined Reserve/Commit call. This function Reserves space in 
+                  the specified stream buffer, copies ui32Size bytes of data
+                  from the array pui8Src points to and Commits in an "atomic"
+                  style operation.
+ @Input         hStream         Stream handle.
+ @Input         pui8Src         Source to read data from.
+ @Input         ui32Size        Number of bytes to copy and commit.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handler.
+ @Return        eError                       Error codes returned by either 
+                                               Reserve or Commit.
+ @Return        PVRSRV_OK
+ */ /**************************************************************************/
+PVRSRV_ERROR 
+TLStreamWrite(IMG_HANDLE hStream, 
+              IMG_UINT8  *pui8Src,
+              IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamSync
+ @Description   Signal the consumer to start acquiring data from the stream
+                buffer. Called by producers that use the TL_FLAG_NO_SIGNAL_ON_COMMIT
+                flag to manually control when consumers starting reading the
+                stream. Used when multiple small writes need to be batched.
+ @Input         hStream         Stream handle.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handle.
+ @Return        eError                       Error codes returned by either
+                                               Reserve or Commit.
+ @Return        PVRSRV_OK
+ */ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamSync(IMG_HANDLE hStream);
+
+
+/*************************************************************************/ /*!
+ @Function      TLStreamMarkEOS
+ @Description   Insert a EOS marker packet in the given stream.
+ @Input         hStream         Stream handle.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS	NULL stream handler.
+ @Return        eError                     	Error codes returned by either
+                                              Reserve or Commit.
+ @Return        PVRSRV_OK       			Success.
+*/ /**************************************************************************/
+PVRSRV_ERROR 
+TLStreamMarkEOS(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+@Function       TLStreamMarkStreamOpen
+@Description    Puts *open* stream packet into hStream's notification stream,
+                if set, error otherwise."
+@Input          hStream Stream handle.
+@Return         PVRSRV_OK on success and error code on failure
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamMarkStreamOpen(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+@Function       TLStreamMarkStreamClose
+@Description    Puts *close* stream packet into hStream's notification stream,
+                if set, error otherwise."
+@Input          hStream Stream handle.
+@Return         PVRSRV_OK on success and error code on failure
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamMarkStreamClose(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamInfo
+ @Description   Run time information about buffer elemental sizes.
+                It sets psInfo members accordingly. Users can use those values
+                to calculate the parameters they use in TLStreamCreate and 
+                TLStreamReserve.
+ @Output        psInfo          pointer to stream info structure.
+ @Return        None.
+*/ /**************************************************************************/
+void
+TLStreamInfo(PTL_STREAM_INFO psInfo);
+
+
+#endif /* __TLSTREAM_H__ */
+/*****************************************************************************
+ End of file (tlstream.h)
+*****************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/devicemem.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/devicemem.c
new file mode 100644
index 0000000..af17ca8
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/devicemem.c
@@ -0,0 +1,2922 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Front End (nominally Client side part, but now invokable
+                from server too) of device memory management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+
+#include "devicemem.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "allocmem.h"
+#include "ra.h"
+#include "osfunc.h"
+#include "osmmap.h"
+#include "devicemem_utils.h"
+#include "client_mm_bridge.h"
+#include "client_cache_bridge.h"
+#include "services_km.h"
+
+#if defined(PDUMP)
+#if defined(__KERNEL__)
+#include "pdump_km.h"
+#else
+#include "client_pdump_bridge.h"
+#endif
+#include "devicemem_pdump.h"
+#endif
+#if defined(PVR_RI_DEBUG)
+#include "client_ri_bridge.h"
+#endif
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+#include "client_devicememhistory_bridge.h"
+#endif
+
+#include "rgx_heaps.h"
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#include "rgxdefs_km.h"
+#include "rgx_bvnc_defs_km.h"
+#if defined(LINUX)
+#include "linux/kernel.h"
+#endif
+#else
+#include "rgxdefs.h"
+#endif
+
+#if defined(__KERNEL__) && defined(PVR_RI_DEBUG)
+extern PVRSRV_ERROR RIDumpAllKM(void);
+#endif
+
+#if defined(__KERNEL__)
+#define GET_ERROR_STRING(eError) PVRSRVGetErrorStringKM(eError)
+#else
+#define GET_ERROR_STRING(eError) PVRSRVGetErrorString(eError)
+#endif
+/*****************************************************************************
+ *                    Sub allocation internals                               *
+ *****************************************************************************/
+
+static PVRSRV_ERROR
+_AllocateDeviceMemory(SHARED_DEV_CONNECTION hDevConnection,
+                      IMG_UINT32 uiLog2Quantum,
+                      IMG_DEVMEM_SIZE_T uiSize,
+                      IMG_DEVMEM_SIZE_T uiChunkSize,
+                      IMG_UINT32 ui32NumPhysChunks,
+                      IMG_UINT32 ui32NumVirtChunks,
+                      IMG_UINT32 *pui32MappingTable,
+                      IMG_DEVMEM_ALIGN_T uiAlign,
+                      DEVMEM_FLAGS_T uiFlags,
+                      IMG_BOOL bExportable,
+                      const IMG_CHAR *pszAnnotation,
+                      DEVMEM_IMPORT **ppsImport)
+{
+	DEVMEM_IMPORT *psImport;
+	DEVMEM_FLAGS_T uiPMRFlags;
+	IMG_HANDLE hPMR;
+	PVRSRV_ERROR eError;
+
+	eError = _DevmemImportStructAlloc(hDevConnection,
+									  &psImport);
+	if (eError != PVRSRV_OK)
+	{
+		goto failAlloc;
+	}
+
+    /* Check the size is a multiple of the quantum */
+    PVR_ASSERT((uiSize & ((1ULL<<uiLog2Quantum)-1)) == 0);
+
+	/* Pass only the PMR flags down */
+	uiPMRFlags = uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK;
+    eError = BridgePhysmemNewRamBackedPMR(hDevConnection,
+                                          uiSize,
+                                          uiChunkSize,
+                                          ui32NumPhysChunks,
+                                          ui32NumVirtChunks,
+                                          pui32MappingTable,
+                                          uiLog2Quantum,
+                                          uiPMRFlags,
+#if defined(PDUMP)
+                                          OSStringLength(pszAnnotation) + 1,
+                                          pszAnnotation,
+                                          &hPMR);
+#else
+                                          1,
+                                          "",
+                                          &hPMR);
+
+	PVR_UNREFERENCED_PARAMETER(pszAnnotation);
+#endif
+
+
+    if (eError != PVRSRV_OK)
+    {
+    	PVR_DPF((PVR_DBG_ERROR,
+    			"%s: Problem to allocate memory for %s (%s)",
+    			__func__,
+    			pszAnnotation,
+    			PVRSRVGETERRORSTRING(eError) ));
+        goto failPMR;
+    }
+
+    _DevmemImportStructInit(psImport,
+							uiSize,
+							uiAlign,
+							uiFlags,
+							hPMR,
+							bExportable ? DEVMEM_PROPERTIES_EXPORTABLE : 0);
+
+	*ppsImport = psImport;
+	return PVRSRV_OK;
+
+failPMR:
+	_DevmemImportDiscard(psImport);
+failAlloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+
+/*****************************************************************************
+ *                    Sub allocation internals                               *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DeviceMemChangeSparse(DEVMEM_MEMDESC *psMemDesc,
+                      IMG_UINT32 ui32AllocPageCount,
+                      IMG_UINT32 *paui32AllocPageIndices,
+                      IMG_UINT32 ui32FreePageCount,
+                      IMG_UINT32 *pauiFreePageIndices,
+                      SPARSE_MEM_RESIZE_FLAGS uiSparseFlags)
+{
+	PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+	DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+	SHARED_DEV_CONNECTION hDevConnection;
+	IMG_HANDLE hPMR;
+	IMG_HANDLE hSrvDevMemHeap;
+	POS_LOCK hLock;
+	IMG_DEV_VIRTADDR sDevVAddr;
+	IMG_CPU_VIRTADDR sCpuVAddr;
+
+	if (NULL == psImport)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Sparse memory import", __func__));
+		goto e0;
+	}
+
+	hDevConnection = psImport->hDevConnection;
+	hPMR = psImport->hPMR;
+	hLock = psImport->hLock;
+	sDevVAddr = psImport->sDeviceImport.sDevVAddr;
+	sCpuVAddr = psImport->sCPUImport.pvCPUVAddr;
+
+	if (NULL == hDevConnection)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Bridge handle", __func__));
+		goto e0;
+	}
+
+	if (NULL == hPMR)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid PMR handle", __func__));
+		goto e0;
+	}
+
+	if ((uiSparseFlags & SPARSE_RESIZE_BOTH) && (0 == sDevVAddr.uiAddr))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Device Virtual Map", __func__));
+		goto e0;
+	}
+
+	if ((uiSparseFlags & SPARSE_MAP_CPU_ADDR) && (0 == sCpuVAddr))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid CPU Virtual Map", __func__));
+		goto e0;
+	}
+
+	if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_SECURE)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Secure buffers currently do not support sparse changes",
+				__func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	hSrvDevMemHeap = psImport->sDeviceImport.psHeap->hDevMemServerHeap;
+
+	OSLockAcquire(hLock);
+
+	eError = BridgeChangeSparseMem(hDevConnection,
+	                               hSrvDevMemHeap,
+	                               hPMR,
+	                               ui32AllocPageCount,
+	                               paui32AllocPageIndices,
+	                               ui32FreePageCount,
+	                               pauiFreePageIndices,
+	                               uiSparseFlags,
+	                               psImport->uiFlags,
+	                               sDevVAddr,
+	                               (IMG_UINT64)((uintptr_t)sCpuVAddr));
+
+	 OSLockRelease(hLock);
+
+#if defined(PVR_RI_DEBUG)
+	if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+	{
+		BridgeRIUpdateMEMDESCBacking(psImport->hDevConnection,
+		                             psMemDesc->hRIHandle,
+		                             ((IMG_INT32) ui32AllocPageCount - (IMG_INT32) ui32FreePageCount)
+		                              * (1 << psImport->sDeviceImport.psHeap->uiLog2Quantum));
+	}
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+	{
+		BridgeDevicememHistorySparseChange(psMemDesc->psImport->hDevConnection,
+							psMemDesc->psImport->hPMR,
+							psMemDesc->uiOffset,
+							psMemDesc->sDeviceMemDesc.sDevVAddr,
+							psMemDesc->uiAllocSize,
+							psMemDesc->sTraceData.szText,
+							DevmemGetHeapLog2PageSize(psImport->sDeviceImport.psHeap),
+							ui32AllocPageCount,
+							paui32AllocPageIndices,
+							ui32FreePageCount,
+							pauiFreePageIndices,
+							psMemDesc->sTraceData.ui32AllocationIndex,
+							&psMemDesc->sTraceData.ui32AllocationIndex);
+	}
+#endif
+
+#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+	if ((PVRSRV_OK == eError) && (psMemDesc->sCPUMemDesc.ui32RefCount))
+	{
+		/*
+		 * Release the CPU Virtual mapping here
+		 * the caller is supposed to map entire range again
+		 */
+		DevmemReleaseCpuVirtAddr(psMemDesc);
+	}
+#endif
+
+e0:
+	return eError;
+}
+
+static void
+_FreeDeviceMemory(DEVMEM_IMPORT *psImport)
+{
+	_DevmemImportStructRelease(psImport);
+}
+
+static PVRSRV_ERROR
+_SubAllocImportAlloc(RA_PERARENA_HANDLE hArena,
+                     RA_LENGTH_T uiSize,
+                     RA_FLAGS_T _flags,
+                     const IMG_CHAR *pszAnnotation,
+                     /* returned data */
+                     RA_BASE_T *puiBase,
+                     RA_LENGTH_T *puiActualSize,
+                     RA_PERISPAN_HANDLE *phImport)
+{
+	/* When suballocations need a new lump of memory, the RA calls
+	   back here.  Later, in the kernel, we must construct a new PMR
+	   and a pairing between the new lump of virtual memory and the
+	   PMR (whether or not such PMR is backed by physical memory) */
+	DEVMEM_HEAP *psHeap;
+	DEVMEM_IMPORT *psImport;
+	IMG_DEVMEM_ALIGN_T uiAlign;
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32MappingTable = 0;
+	DEVMEM_FLAGS_T uiFlags = (DEVMEM_FLAGS_T) _flags;
+	IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS;
+
+	/* Per-arena private handle is, for us, the heap */
+	psHeap = hArena;
+
+	/* align to the l.s.b. of the size...  e.g. 96kiB aligned to
+	   32kiB. NB: There is an argument to say that the RA should never
+	   ask us for Non-power-of-2 size anyway, but I don't want to make
+	   that restriction arbitrarily now */
+	uiAlign = uiSize & ~(uiSize-1);
+
+	/* Technically this is only required for guest drivers due to
+	   fw heaps being pre-allocated and pre-mapped resulting in
+	   a 1:1 (i.e. virtual : physical) offset correlation but we
+	   force this behaviour for all drivers to maintain consistency
+	   (i.e. heap->VA uiAlign <= heap->PA uiLog2Quantum) */
+	if (uiAlign > (IMG_DEVMEM_ALIGN_T)(1 << psHeap->uiLog2Quantum))
+	{
+		uiAlign = (IMG_DEVMEM_ALIGN_T)(1 << psHeap->uiLog2Quantum);
+	}
+
+	/* The RA should not have invoked us with a size that is not a
+	   multiple of the quantum anyway */
+	PVR_ASSERT((uiSize & ((1ULL<<psHeap->uiLog2Quantum)-1)) == 0);
+
+	eError = _AllocateDeviceMemory(psHeap->psCtx->hDevConnection,
+	                               psHeap->uiLog2Quantum,
+	                               uiSize,
+	                               uiSize,
+	                               1,
+	                               1,
+	                               &ui32MappingTable,
+	                               uiAlign,
+	                               uiFlags,
+	                               IMG_FALSE,
+	                               pszAnnotation,
+	                               &psImport);
+	if (eError != PVRSRV_OK)
+	{
+		goto failAlloc;
+	}
+
+#if defined (PDUMP)
+	/* Keep the annotation in the Devmem layer so we know where suballocations were done from*/
+	psImport->pszAnnotation = OSAllocMem(OSStringLength(pszAnnotation)+1);
+	if (psImport->pszAnnotation == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto failAllocMem;
+	}
+	OSStringNCopy(psImport->pszAnnotation, pszAnnotation, OSStringLength(pszAnnotation)+1);
+#endif
+
+#if defined(PVR_RI_DEBUG)
+	if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+	{
+		eError = BridgeRIWritePMREntry (psImport->hDevConnection,
+										psImport->hPMR,
+										sizeof("PMR sub-allocated"),
+										"PMR sub-allocated",
+										psImport->uiSize);
+		if( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntry failed (eError=%d)", __func__, eError));
+		}
+	}
+#endif
+
+	/*
+		Suballocations always get mapped into the device was we need to
+		key the RA off something and as we can't export suballocations
+		there is no valid reason to request an allocation an not map it
+	*/
+	eError = _DevmemImportStructDevMap(psHeap,
+									   IMG_TRUE,
+									   psImport,
+									   ui64OptionalMapAddress);
+	if (eError != PVRSRV_OK)
+	{
+		goto failMap;
+	}
+
+	/* Mark this import struct as zeroed so we can save some PDump LDBs
+	 * and do not have to CPU map + memset()*/
+	if (uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+	{
+		psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_ZEROED;
+	}
+	psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_CLEAN;
+
+	*puiBase = psImport->sDeviceImport.sDevVAddr.uiAddr;
+	*puiActualSize = uiSize;
+	*phImport = psImport;
+
+	return PVRSRV_OK;
+
+	/*
+	  error exit paths follow
+	*/
+failMap:
+#if defined(PDUMP)
+failAllocMem:
+	OSFreeMem(psImport->pszAnnotation);
+	psImport->pszAnnotation = NULL;
+#endif
+	_FreeDeviceMemory(psImport);
+failAlloc:
+
+	return eError;
+}
+
+static void
+_SubAllocImportFree(RA_PERARENA_HANDLE hArena,
+                    RA_BASE_T uiBase,
+                    RA_PERISPAN_HANDLE hImport)
+{
+	DEVMEM_IMPORT *psImport = hImport;
+
+	PVR_ASSERT(psImport != NULL);
+	PVR_ASSERT(hArena == psImport->sDeviceImport.psHeap);
+	PVR_ASSERT(uiBase == psImport->sDeviceImport.sDevVAddr.uiAddr);
+
+	_DevmemImportStructDevUnmap(psImport);
+	_DevmemImportStructRelease(psImport);
+}
+
+/*****************************************************************************
+ *                    Devmem context internals                               *
+ *****************************************************************************/
+
+static PVRSRV_ERROR
+_PopulateContextFromBlueprint(struct _DEVMEM_CONTEXT_ *psCtx,
+                              DEVMEM_HEAPCFGID uiHeapBlueprintID)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_ERROR eError2;
+	struct _DEVMEM_HEAP_ **ppsHeapArray;
+	IMG_UINT32 uiNumHeaps;
+	IMG_UINT32 uiHeapsToUnwindOnError;
+	IMG_UINT32 uiHeapIndex;
+	IMG_DEV_VIRTADDR sDevVAddrBase;
+	IMG_CHAR aszHeapName[DEVMEM_HEAPNAME_MAXLENGTH];
+	IMG_DEVMEM_SIZE_T uiHeapLength;
+	IMG_DEVMEM_LOG2ALIGN_T uiLog2DataPageSize;
+	IMG_DEVMEM_LOG2ALIGN_T uiLog2ImportAlignment;
+	IMG_DEVMEM_LOG2ALIGN_T uiLog2TilingStrideFactor;
+
+    eError = DevmemHeapCount(psCtx->hDevConnection,
+                             uiHeapBlueprintID,
+                             &uiNumHeaps);
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+
+    if (uiNumHeaps == 0)
+    {
+        ppsHeapArray = NULL;
+    }
+    else
+    {
+        ppsHeapArray = OSAllocMem(sizeof(*ppsHeapArray) * uiNumHeaps);
+        if (ppsHeapArray == NULL)
+        {
+            eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+            goto e0;
+        }
+    }
+
+    uiHeapsToUnwindOnError = 0;
+
+    for (uiHeapIndex = 0; uiHeapIndex < uiNumHeaps; uiHeapIndex++)
+    {
+        eError = DevmemHeapDetails(psCtx->hDevConnection,
+                                   uiHeapBlueprintID,
+                                   uiHeapIndex,
+                                   &aszHeapName[0],
+                                   sizeof(aszHeapName),
+                                   &sDevVAddrBase,
+                                   &uiHeapLength,
+                                   &uiLog2DataPageSize,
+                                   &uiLog2ImportAlignment,
+                                   &uiLog2TilingStrideFactor);
+        if (eError != PVRSRV_OK)
+        {
+            goto e1;
+        }
+
+        eError = DevmemCreateHeap(psCtx,
+                                  sDevVAddrBase,
+                                  uiHeapLength,
+                                  uiLog2DataPageSize,
+                                  uiLog2ImportAlignment,
+                                  uiLog2TilingStrideFactor,
+                                  aszHeapName,
+                                  uiHeapBlueprintID,
+                                  &ppsHeapArray[uiHeapIndex]);
+        if (eError != PVRSRV_OK)
+        {
+            goto e1;
+        }
+
+        uiHeapsToUnwindOnError = uiHeapIndex + 1;
+    }
+
+    psCtx->uiAutoHeapCount = uiNumHeaps;
+    psCtx->ppsAutoHeapArray = ppsHeapArray;
+
+    PVR_ASSERT(psCtx->uiNumHeaps >= psCtx->uiAutoHeapCount);
+    PVR_ASSERT(psCtx->uiAutoHeapCount == uiNumHeaps);
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths
+    */
+ e1:
+    for (uiHeapIndex = 0; uiHeapIndex < uiHeapsToUnwindOnError; uiHeapIndex++)
+    {
+        eError2 = DevmemDestroyHeap(ppsHeapArray[uiHeapIndex]);
+        PVR_ASSERT(eError2 == PVRSRV_OK);
+    }
+
+    if (uiNumHeaps != 0)
+    {
+        OSFreeMem(ppsHeapArray);
+    }
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+static PVRSRV_ERROR
+_UnpopulateContextFromBlueprint(struct _DEVMEM_CONTEXT_ *psCtx)
+{
+	PVRSRV_ERROR eReturn = PVRSRV_OK;
+	PVRSRV_ERROR eError2;
+	IMG_UINT32 uiHeapIndex;
+	IMG_BOOL bDoCheck = IMG_TRUE;
+#if defined(__KERNEL__)
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		bDoCheck = IMG_FALSE;
+	}
+#endif
+
+	for (uiHeapIndex = 0; uiHeapIndex < psCtx->uiAutoHeapCount; uiHeapIndex++)
+	{
+		if (!psCtx->ppsAutoHeapArray[uiHeapIndex])
+		{
+			continue;
+		}
+
+		eError2 = DevmemDestroyHeap(psCtx->ppsAutoHeapArray[uiHeapIndex]);
+		if (eError2 != PVRSRV_OK)
+		{
+			eReturn = eError2;
+		}
+		else
+		{
+			psCtx->ppsAutoHeapArray[uiHeapIndex] = NULL;
+		}
+	}
+
+	if ((!bDoCheck || (eReturn == PVRSRV_OK)) && psCtx->ppsAutoHeapArray)
+	{
+		OSFreeMem(psCtx->ppsAutoHeapArray);
+		psCtx->ppsAutoHeapArray = NULL;
+		psCtx->uiAutoHeapCount = 0;
+	}
+
+	return eReturn;
+}
+
+static PVRSRV_ERROR
+_AllocateMCUFenceAddress(struct _DEVMEM_CONTEXT_ *psCtx)
+{
+    PVRSRV_ERROR		eError;
+    DEVMEM_HEAP			*psGeneralHeap;
+    IMG_DEV_VIRTADDR	sTempMCUFenceAddr;
+
+	eError = DevmemFindHeapByName(psCtx, RGX_GENERAL_HEAP_IDENT, &psGeneralHeap);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: General Heap not found (%s)", __func__, GET_ERROR_STRING(eError)));
+		goto e0;
+	}
+
+	eError = DevmemAllocate(psGeneralHeap,
+							sizeof(IMG_UINT32),
+							RGX_CR_MCU_FENCE_ADDR_ALIGNSIZE,
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE,
+							"MCUFence:  Fixed address reserved per Memory Context",
+							&psCtx->psMCUFenceMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate MCU fence word (%s)", __func__, GET_ERROR_STRING(eError)));
+		goto e0;
+	}
+
+	/* This is the first memory allocation on General Heap so its virtual address
+	 * is always equal to heap base address. Storing this address separately is not required. */
+	eError = DevmemMapToDevice(psCtx->psMCUFenceMemDesc, psGeneralHeap, &sTempMCUFenceAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map MCU fence word (%s)", __func__, GET_ERROR_STRING(eError)));
+		goto e1;
+	}
+	else if (sTempMCUFenceAddr.uiAddr != psGeneralHeap->sBaseAddress.uiAddr)
+	{
+
+		PVR_DPF((PVR_DBG_ERROR, "%s: MCU_FENCE address (%llx) not at the start of General Heap (%llx)", \
+				__FUNCTION__, (long long unsigned) sTempMCUFenceAddr.uiAddr, \
+				(long long unsigned) psGeneralHeap->sBaseAddress.uiAddr));
+		eError = PVRSRV_ERROR_DEVICEMEM_MAP_FAILED;
+		goto e1;
+	}
+
+e0:
+	return eError;
+
+e1:
+	DevmemFree(psCtx->psMCUFenceMemDesc);
+	psCtx->psMCUFenceMemDesc = NULL;
+	return eError;
+}
+
+/*****************************************************************************
+ *                    Devmem context functions                               *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemCreateContext(SHARED_DEV_CONNECTION hDevConnection,
+                    DEVMEM_HEAPCFGID uiHeapBlueprintID,
+                   DEVMEM_CONTEXT **ppsCtxPtr)
+{
+    PVRSRV_ERROR		eError;
+    DEVMEM_CONTEXT		*psCtx;
+    /* handle to the server-side counterpart of the device memory
+       context (specifically, for handling mapping to device MMU) */
+    IMG_HANDLE			hDevMemServerContext;
+    IMG_HANDLE			hPrivData;
+    IMG_BOOL			bHeapCfgMetaId = (uiHeapBlueprintID == DEVMEM_HEAPCFG_META);
+
+    if (ppsCtxPtr == NULL)
+    {
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        goto e0;
+    }
+
+    psCtx = OSAllocMem(sizeof *psCtx);
+    if (psCtx == NULL)
+    {
+        eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+        goto e0;
+    }
+
+    psCtx->uiNumHeaps = 0;
+
+    psCtx->hDevConnection = hDevConnection;
+
+    /* Create (server-side) Device Memory context */
+    eError = BridgeDevmemIntCtxCreate(psCtx->hDevConnection,
+                                      bHeapCfgMetaId,
+                                      &hDevMemServerContext,
+                                      &hPrivData,
+                                      &psCtx->ui32CPUCacheLineSize);
+    if (eError != PVRSRV_OK) goto e1;
+
+    psCtx->hDevMemServerContext = hDevMemServerContext;
+    psCtx->hPrivData = hPrivData;
+
+    /* automagic heap creation */
+    psCtx->uiAutoHeapCount = 0;
+
+    eError = _PopulateContextFromBlueprint(psCtx, uiHeapBlueprintID);
+    if (eError != PVRSRV_OK) goto e2;
+
+    /* Allocate a word at the start of the General heap to be used as MCU_FENCE Address*/
+    if (uiHeapBlueprintID == DEVMEM_HEAPCFG_FORCLIENTS)
+    {
+    	eError = _AllocateMCUFenceAddress(psCtx);
+    	if (eError != PVRSRV_OK) goto e2;
+    }
+    else
+    {
+    	psCtx->psMCUFenceMemDesc = NULL;
+    }
+
+    *ppsCtxPtr = psCtx;
+
+    PVR_ASSERT(psCtx->uiNumHeaps == psCtx->uiAutoHeapCount);
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+ e2:
+    PVR_ASSERT(psCtx->uiAutoHeapCount == 0);
+    PVR_ASSERT(psCtx->uiNumHeaps == 0);
+    BridgeDevmemIntCtxDestroy(psCtx->hDevConnection, hDevMemServerContext);
+
+ e1:
+    OSFreeMem(psCtx);
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx,
+                         IMG_HANDLE *hPrivData)
+{
+	PVRSRV_ERROR eError;
+
+	if ((psCtx == NULL) || (hPrivData == NULL))
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	*hPrivData = psCtx->hPrivData;
+	return PVRSRV_OK;
+
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx)
+{
+	PVRSRV_ERROR eError;
+
+	if (psCtx == NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+	return PVRSRV_OK;
+
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemFindHeapByName(const struct _DEVMEM_CONTEXT_ *psCtx,
+                     const IMG_CHAR *pszHeapName,
+                     struct _DEVMEM_HEAP_ **ppsHeapRet)
+{
+    IMG_UINT32 uiHeapIndex;
+
+    /* N.B.  This func is only useful for finding "automagic" heaps by name */
+    for (uiHeapIndex = 0;
+         uiHeapIndex < psCtx->uiAutoHeapCount;
+         uiHeapIndex++)
+    {
+        if (!OSStringCompare(psCtx->ppsAutoHeapArray[uiHeapIndex]->pszName, pszHeapName))
+        {
+            *ppsHeapRet = psCtx->ppsAutoHeapArray[uiHeapIndex];
+            return PVRSRV_OK;
+        }
+    }
+
+    return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemDestroyContext(DEVMEM_CONTEXT *psCtx)
+{
+	PVRSRV_ERROR eError;
+	IMG_BOOL bDoCheck = IMG_TRUE;
+
+#if defined(__KERNEL__)
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		bDoCheck = IMG_FALSE;
+	}
+#endif
+
+	if (psCtx == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (psCtx->psMCUFenceMemDesc != NULL)
+	{
+		DevmemReleaseDevVirtAddr(psCtx->psMCUFenceMemDesc);
+		DevmemFree(psCtx->psMCUFenceMemDesc);
+	}
+
+	eError = _UnpopulateContextFromBlueprint(psCtx);
+	if (bDoCheck && eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: _UnpopulateContextFromBlueprint failed (%d) leaving %d heaps",
+		          __func__, eError, psCtx->uiNumHeaps));
+		goto e1;
+	}
+
+	eError = BridgeDevmemIntCtxDestroy(psCtx->hDevConnection,
+	                                   psCtx->hDevMemServerContext);
+	if (bDoCheck && eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: BridgeDevmemIntCtxDestroy failed (%d)",
+		          __func__, eError));
+		goto e1;
+	}
+
+	/* should be no more heaps left */
+	if (bDoCheck && psCtx->uiNumHeaps)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Additional heaps remain in DEVMEM_CONTEXT",
+		          __func__));
+		eError = PVRSRV_ERROR_DEVICEMEM_ADDITIONAL_HEAPS_IN_CONTEXT;
+		goto e1;
+	}
+
+	OSDeviceMemSet(psCtx, 0, sizeof(*psCtx));
+	OSFreeMem(psCtx);
+
+e1:
+	return eError;
+}
+
+/*****************************************************************************
+ *                 Devmem heap query functions                               *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapConfigCount(SHARED_DEV_CONNECTION hDevConnection,
+                      IMG_UINT32 *puiNumHeapConfigsOut)
+{
+    PVRSRV_ERROR eError;
+
+    eError = BridgeHeapCfgHeapConfigCount(hDevConnection,
+                                          puiNumHeapConfigsOut);
+    return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapCount(SHARED_DEV_CONNECTION hDevConnection,
+                IMG_UINT32 uiHeapConfigIndex,
+                IMG_UINT32 *puiNumHeapsOut)
+{
+    PVRSRV_ERROR eError;
+
+    eError = BridgeHeapCfgHeapCount(hDevConnection,
+                                    uiHeapConfigIndex,
+                                    puiNumHeapsOut);
+    return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapConfigName(SHARED_DEV_CONNECTION hDevConnection,
+                     IMG_UINT32 uiHeapConfigIndex,
+                     IMG_CHAR *pszConfigNameOut,
+                     IMG_UINT32 uiConfigNameBufSz)
+{
+    PVRSRV_ERROR eError;
+
+    eError = BridgeHeapCfgHeapConfigName(hDevConnection,
+                                         uiHeapConfigIndex,
+                                         uiConfigNameBufSz,
+                                         pszConfigNameOut);
+    return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapDetails(SHARED_DEV_CONNECTION hDevConnection,
+                  IMG_UINT32 uiHeapConfigIndex,
+                  IMG_UINT32 uiHeapIndex,
+                  IMG_CHAR *pszHeapNameOut,
+                  IMG_UINT32 uiHeapNameBufSz,
+                  IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+                  IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+                  IMG_UINT32 *puiLog2DataPageSizeOut,
+                  IMG_UINT32 *puiLog2ImportAlignmentOut,
+                  IMG_UINT32 *puiLog2TilingStrideFactor)
+{
+    PVRSRV_ERROR eError;
+
+    eError = BridgeHeapCfgHeapDetails(hDevConnection,
+                                      uiHeapConfigIndex,
+                                      uiHeapIndex,
+                                      uiHeapNameBufSz,
+                                      pszHeapNameOut,
+                                      psDevVAddrBaseOut,
+                                      puiHeapLengthOut,
+                                      puiLog2DataPageSizeOut,
+                                      puiLog2ImportAlignmentOut,
+                                      puiLog2TilingStrideFactor);
+
+    VG_MARK_INITIALIZED(pszHeapNameOut,uiHeapNameBufSz);
+
+    return eError;
+}
+
+/*****************************************************************************
+ *                    Devmem heap functions                                  *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetHeapInt(DEVMEM_HEAP *psHeap,
+				 IMG_HANDLE *phDevmemHeap)
+{
+	if (psHeap == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	*phDevmemHeap  = psHeap->hDevMemServerHeap;
+	return PVRSRV_OK;
+}
+
+/* See devicemem.h for important notes regarding the arguments
+   to this function */
+IMG_INTERNAL PVRSRV_ERROR
+DevmemCreateHeap(DEVMEM_CONTEXT *psCtx,
+                 IMG_DEV_VIRTADDR sBaseAddress,
+                 IMG_DEVMEM_SIZE_T uiLength,
+                 IMG_UINT32 ui32Log2Quantum,
+                 IMG_UINT32 ui32Log2ImportAlignment,
+                 IMG_UINT32 ui32Log2TilingStrideFactor,
+                 const IMG_CHAR *pszName,
+                 DEVMEM_HEAPCFGID uiHeapBlueprintID,
+                 DEVMEM_HEAP **ppsHeapPtr)
+{
+    PVRSRV_ERROR eError = PVRSRV_OK;
+    PVRSRV_ERROR eError2;
+    DEVMEM_HEAP *psHeap;
+    /* handle to the server-side counterpart of the device memory
+       heap (specifically, for handling mapping to device MMU */
+    IMG_HANDLE hDevMemServerHeap;
+    IMG_BOOL bRANoSplit = IMG_FALSE;
+
+    IMG_CHAR aszBuf[100];
+    IMG_CHAR *pszStr;
+
+    if (ppsHeapPtr == NULL)
+    {
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        goto e0;
+    }
+
+    psHeap = OSAllocMem(sizeof *psHeap);
+    if (psHeap == NULL)
+    {
+        eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+        goto e0;
+    }
+
+    /* Need to keep local copy of heap name, so caller may free
+       theirs */
+    pszStr = OSAllocMem(OSStringLength(pszName)+1);
+    if (pszStr == NULL)
+    {
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+        goto e1;
+    }
+    OSStringCopy(pszStr, pszName);
+    psHeap->pszName = pszStr;
+
+    psHeap->uiSize = uiLength;
+    psHeap->sBaseAddress = sBaseAddress;
+    OSAtomicWrite(&psHeap->hImportCount,0);
+
+    OSSNPrintf(aszBuf, sizeof(aszBuf),
+               "NDM heap '%s' (suballocs) ctx:%p",
+               pszName, psCtx);
+    pszStr = OSAllocMem(OSStringLength(aszBuf)+1);
+    if (pszStr == NULL)
+    {
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+        goto e2;
+    }
+    OSStringCopy(pszStr, aszBuf);
+    psHeap->pszSubAllocRAName = pszStr;
+
+#if defined(PDUMP)
+    /* the META heap is shared globally so a single
+     * physical memory import may be used to satisfy
+     * allocations of different processes.
+     * This is problematic when PDumping because the
+     * physical memory import used to satisfy a new allocation
+     * may actually have been imported (and thus the PDump MALLOC
+     * generated) before the PDump client was started, leading to the
+     * MALLOC being missing.
+     * This is solved by disabling splitting of imports for the META physmem
+     * RA, meaning that every firmware allocation gets its own import, thus
+     * ensuring the MALLOC is present for every allocation made within the
+     * pdump capture range
+     */
+    if(uiHeapBlueprintID == DEVMEM_HEAPCFG_META)
+    {
+    	bRANoSplit = IMG_TRUE;
+    }
+#else
+    PVR_UNREFERENCED_PARAMETER(uiHeapBlueprintID);
+#endif
+
+
+    psHeap->psSubAllocRA = RA_Create(psHeap->pszSubAllocRAName,
+                       /* Subsequent imports: */
+                       ui32Log2Quantum,
+					   RA_LOCKCLASS_2,
+                       _SubAllocImportAlloc,
+                       _SubAllocImportFree,
+                       (RA_PERARENA_HANDLE) psHeap,
+                       bRANoSplit);
+    if (psHeap->psSubAllocRA == NULL)
+    {
+        eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA;
+        goto e3;
+    }
+
+	psHeap->uiLog2ImportAlignment = ui32Log2ImportAlignment;
+	psHeap->uiLog2TilingStrideFactor = ui32Log2TilingStrideFactor;
+	psHeap->uiLog2Quantum = ui32Log2Quantum;
+
+	if (! OSStringCompare(pszName, RGX_GENERAL_SVM_HEAP_IDENT))
+	{
+		/* The SVM heap normally starts out as this type though
+		   it may transition to DEVMEM_HEAP_TYPE_USER_MANAGED
+		   on platforms with more processor virtual address
+		   bits than device virtual address bits */
+		psHeap->eHeapType = DEVMEM_HEAP_TYPE_KERNEL_MANAGED;
+	}
+	else
+	{
+		psHeap->eHeapType = DEVMEM_HEAP_TYPE_UNKNOWN;
+	}
+
+	OSSNPrintf(aszBuf, sizeof(aszBuf),
+				"NDM heap '%s' (QVM) ctx:%p",
+				pszName, psCtx);
+	pszStr = OSAllocMem(OSStringLength(aszBuf)+1);
+	if (pszStr == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e4;
+	}
+	OSStringCopy(pszStr, aszBuf);
+	psHeap->pszQuantizedVMRAName = pszStr;
+
+	psHeap->psQuantizedVMRA = RA_Create(psHeap->pszQuantizedVMRAName,
+					   /* Subsequent import: */
+									   0, RA_LOCKCLASS_1, NULL, NULL,
+					   (RA_PERARENA_HANDLE) psHeap,
+					   IMG_FALSE);
+	if (psHeap->psQuantizedVMRA == NULL)
+	{
+		eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA;
+		goto e5;
+	}
+
+	if (!RA_Add(psHeap->psQuantizedVMRA,
+					   (RA_BASE_T)sBaseAddress.uiAddr,
+					   (RA_LENGTH_T)uiLength,
+					   (RA_FLAGS_T)0, /* This RA doesn't use or need flags */
+				NULL /* per ispan handle */))
+	{
+		RA_Delete(psHeap->psQuantizedVMRA);
+		eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA;
+		goto e5;
+	}
+
+    psHeap->psCtx = psCtx;
+
+
+    /* Create server-side counterpart of Device Memory heap */
+    eError = BridgeDevmemIntHeapCreate(psCtx->hDevConnection,
+                                      psCtx->hDevMemServerContext,
+                                      sBaseAddress,
+                                      uiLength,
+                                      ui32Log2Quantum,
+                                      &hDevMemServerHeap);
+    if (eError != PVRSRV_OK)
+    {
+        goto e6;
+    }
+    psHeap->hDevMemServerHeap = hDevMemServerHeap;
+
+	eError = OSLockCreate(&psHeap->hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto e7;
+	}
+
+    psHeap->psCtx->uiNumHeaps ++;
+    *ppsHeapPtr = psHeap;
+
+#if defined PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING
+    psHeap->psMemDescList = NULL;
+#endif  /* PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING */
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths
+    */
+ e7:
+    eError2 = BridgeDevmemIntHeapDestroy(psCtx->hDevConnection,
+                                       psHeap->hDevMemServerHeap);
+    PVR_ASSERT (eError2 == PVRSRV_OK);
+ e6:
+    if (psHeap->psQuantizedVMRA)
+		RA_Delete(psHeap->psQuantizedVMRA);
+ e5:
+    if (psHeap->pszQuantizedVMRAName)
+		OSFreeMem(psHeap->pszQuantizedVMRAName);
+ e4:
+    RA_Delete(psHeap->psSubAllocRA);
+ e3:
+    OSFreeMem(psHeap->pszSubAllocRAName);
+ e2:
+    OSFreeMem(psHeap->pszName);
+ e1:
+    OSFreeMem(psHeap);
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetHeapBaseDevVAddr(struct _DEVMEM_HEAP_ *psHeap,
+			  IMG_DEV_VIRTADDR *pDevVAddr)
+{
+	if (psHeap == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*pDevVAddr = psHeap->sBaseAddress;
+
+	return PVRSRV_OK;
+}
+
+IMG_INTERNAL void
+DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum,
+                                    IMG_DEVMEM_SIZE_T *puiSize,
+                                    IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+	IMG_DEVMEM_SIZE_T uiSize = *puiSize;
+	IMG_DEVMEM_ALIGN_T uiAlign = *puiAlign;
+
+    if ((1ULL << uiLog2Quantum) > uiAlign)
+    {
+		uiAlign = 1ULL << uiLog2Quantum;
+    }
+    uiSize = (uiSize + uiAlign - 1) & ~(uiAlign - 1);
+
+	*puiSize = uiSize;
+	*puiAlign = uiAlign;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemDestroyHeap(DEVMEM_HEAP *psHeap)
+{
+	PVRSRV_ERROR eError;
+	IMG_INT uiImportCount;
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	IMG_BOOL bDoCheck = IMG_TRUE;
+#if defined(__KERNEL__)
+	if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		bDoCheck = IMG_FALSE;
+	}
+#endif
+#endif
+
+	if (psHeap == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	uiImportCount = OSAtomicRead(&psHeap->hImportCount);
+	if (uiImportCount > 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%d(%s) leaks remain", uiImportCount, psHeap->pszName));
+#if defined(__KERNEL__)
+#if defined(PVR_RI_DEBUG)
+		PVR_DPF((PVR_DBG_ERROR, "Details of remaining allocated device memory (for all processes):"));
+		RIDumpAllKM();
+#else
+		PVR_DPF((PVR_DBG_ERROR, "Compile with PVR_RI_DEBUG=1 to get a full "
+				"list of all driver allocations."));
+#endif
+#endif
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+		if (bDoCheck)
+#endif
+		{
+			return PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP;
+		}
+	}
+
+	eError = BridgeDevmemIntHeapDestroy(psHeap->psCtx->hDevConnection,
+	                                    psHeap->hDevMemServerHeap);
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	if (bDoCheck)
+#endif
+	{
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+			         "%s: BridgeDevmemIntHeapDestroy failed (%d)",
+			          __func__, eError));
+			return eError;
+		}
+	}
+
+	PVR_ASSERT(psHeap->psCtx->uiNumHeaps > 0);
+	psHeap->psCtx->uiNumHeaps--;
+
+	OSLockDestroy(psHeap->hLock);
+
+	if (psHeap->psQuantizedVMRA)
+	{
+		RA_Delete(psHeap->psQuantizedVMRA);
+	}
+	if (psHeap->pszQuantizedVMRAName)
+	{
+		OSFreeMem(psHeap->pszQuantizedVMRAName);
+	}
+
+	RA_Delete(psHeap->psSubAllocRA);
+	OSFreeMem(psHeap->pszSubAllocRAName);
+
+	OSFreeMem(psHeap->pszName);
+
+	OSDeviceMemSet(psHeap, 0, sizeof(*psHeap));
+	OSFreeMem(psHeap);
+
+	return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ *                Devmem allocation/free functions                           *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier,
+                  DEVMEM_HEAP *psHeap,
+                  IMG_DEVMEM_SIZE_T uiSize,
+                  IMG_DEVMEM_ALIGN_T uiAlign,
+                  DEVMEM_FLAGS_T uiFlags,
+                  const IMG_CHAR *pszText,
+                  DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	RA_BASE_T uiAllocatedAddr;
+	RA_LENGTH_T uiAllocatedSize;
+	RA_PERISPAN_HANDLE hImport; /* the "import" from which this sub-allocation came */
+	PVRSRV_ERROR eError;
+	DEVMEM_MEMDESC *psMemDesc = NULL;
+	IMG_DEVMEM_OFFSET_T uiOffset = 0;
+	DEVMEM_IMPORT *psImport;
+	IMG_UINT32 ui32CPUCacheLineSize;
+	void *pvAddr = NULL;
+
+	IMG_BOOL bImportClean;
+	IMG_BOOL bCPUCleanFlag = PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags);
+	IMG_BOOL bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags);
+	IMG_BOOL bCPUCached = (PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags)   ||
+	                       PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags));
+	IMG_BOOL bGPUCached = (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags)   ||
+	                       PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags));
+	PVRSRV_CACHE_OP eOp = PVRSRV_CACHE_OP_INVALIDATE;
+	IMG_UINT32	ui32CacheLineSize;
+
+	if (uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+	{
+		/* Deferred Allocation not supported on SubAllocs*/
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto failParams;
+	}
+
+	if (psHeap == NULL || psHeap->psCtx == NULL ||ppsMemDescPtr == NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto failParams;
+	}
+
+#if defined(__KERNEL__)
+	{
+		/* The hDevConnection holds two different types of pointers depending on the
+		 * address space in which it is used.
+		 * In this instance the variable points to the device node in server */
+		PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psHeap->psCtx->hDevConnection;
+		ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE(psDevNode->pfnGetDeviceFeatureValue(psDevNode, \
+				RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK));
+	}
+#else
+	ui32CacheLineSize = ROGUE_CACHE_LINE_SIZE;
+#endif
+
+	/* The following logic makes sure that any cached memory is aligned to both the CPU and GPU.
+	 * To be aligned on both you have to take the Lowest Common Multiple (LCM) of the cache line sizes of each.
+	 * As the possibilities are all powers of 2 then simply the largest number can be picked as the LCM.
+	 * Therefore this algorithm just picks the highest from the CPU, GPU and given alignments.
+	 */
+	ui32CPUCacheLineSize = psHeap->psCtx->ui32CPUCacheLineSize;
+	/* If the CPU cache line size is larger than the alignment given then it is the lowest common multiple
+	 * Also checking if the allocation is going to be cached on the CPU
+	 * Currently there is no check for the validity of the cache coherent option.
+	 * In this case, the alignment could be applied but the mode could still fall back to uncached.
+	 */
+	if (ui32CPUCacheLineSize > uiAlign && bCPUCached)
+	{
+		uiAlign = ui32CPUCacheLineSize;
+	}
+
+	/* If the GPU cache line size is larger than the alignment given then it is the lowest common multiple
+	 * Also checking if the allocation is going to be cached on the GPU via checking for any of the cached options.
+	 * Currently there is no check for the validity of the cache coherent option.
+	 * In this case, the alignment could be applied but the mode could still fall back to uncached.
+	 */
+	if (ui32CacheLineSize > uiAlign && bGPUCached)
+	{
+		uiAlign = ui32CacheLineSize;
+	}
+
+	eError = _DevmemValidateParams(uiSize,
+	                               uiAlign,
+	                               &uiFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto failParams;
+	}
+
+	eError =_DevmemMemDescAlloc(&psMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		goto failMemDescAlloc;
+	}
+
+	/* No request for exportable memory so use the RA */
+	eError = RA_Alloc(psHeap->psSubAllocRA,
+	                  uiSize,
+	                  uiPreAllocMultiplier,
+	                  uiFlags,
+	                  uiAlign,
+	                  pszText,
+	                  &uiAllocatedAddr,
+	                  &uiAllocatedSize,
+	                  &hImport);
+	if (PVRSRV_OK != eError)
+	{
+		goto failDeviceMemAlloc;
+	}
+
+	psImport = hImport;
+
+	/* This assignment is assuming the RA returns an hImport where suballocations
+	 * can be made from if uiSize is NOT a page multiple of the passed heap.
+	 *
+	 * So we check if uiSize is a page multiple and mark it as exportable
+	 * if it is not.
+	 * */
+	if (!(uiSize & ((1 << psHeap->uiLog2Quantum) - 1)) &&
+	     (uiPreAllocMultiplier == RA_NO_IMPORT_MULTIPLIER) )
+	{
+		psImport->uiProperties |= DEVMEM_PROPERTIES_EXPORTABLE;
+	}
+	psImport->uiProperties |= DEVMEM_PROPERTIES_SUBALLOCATABLE;
+
+	uiOffset = uiAllocatedAddr - psImport->sDeviceImport.sDevVAddr.uiAddr;
+
+#if defined(PDUMP)
+#if defined(__KERNEL__)
+	PDumpCommentWithFlags(PDUMP_NONE,
+	                      "Suballocated %u Byte for \"%s\" from physical allocation \"%s\"",
+	                      (IMG_UINT32) uiSize, pszText, psImport->pszAnnotation);
+#else
+	{
+		IMG_CHAR pszComment[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
+		OSSNPrintf(pszComment,
+	                   PVRSRV_PDUMP_MAX_COMMENT_SIZE,
+	                   "Suballocated %u Byte for \"%s\" from physical allocation \"%s\"",
+	                   (IMG_UINT32) uiSize,
+	                   pszText,
+	                   psImport->pszAnnotation);
+
+		BridgePVRSRVPDumpComment(psHeap->psCtx->hDevConnection, pszComment, IMG_FALSE);
+	}
+#endif
+#endif
+
+	_DevmemMemDescInit(psMemDesc,
+	                   uiOffset,
+	                   psImport,
+	                   uiSize);
+
+	bImportClean = ((psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_CLEAN) != 0);
+
+	/* Zero the memory */
+	if (bZero)
+	{
+		/* Has the import been zeroed on allocation and were no suballocations returned to it so far? */
+		bImportClean = bImportClean && ((psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_ZEROED) != 0);
+
+		if(!bImportClean)
+		{
+			eOp = PVRSRV_CACHE_OP_FLUSH;
+
+			eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvAddr);
+			if (eError != PVRSRV_OK)
+			{
+				goto failMaintenance;
+			}
+
+			/* uiSize is a 64-bit quantity whereas the 3rd argument
+			 * to OSDeviceMemSet is a 32-bit quantity on 32-bit systems
+			 * hence a compiler warning of implicit cast and loss of data.
+			 * Added explicit cast and assert to remove warning.
+			 */
+			PVR_ASSERT(uiSize < IMG_UINT32_MAX);
+
+			OSDeviceMemSet(pvAddr, 0x0, (size_t) uiSize);
+#if defined(PDUMP)
+			DevmemPDumpLoadZeroMem(psMemDesc, 0, uiSize, PDUMP_FLAGS_CONTINUOUS);
+#endif
+		}
+	}
+
+	/* Flush or invalidate */
+	if (bCPUCached && !bImportClean && (bZero || bCPUCleanFlag))
+	{
+		/* BridgeCacheOpQueue _may_ be deferred so use BridgeCacheOpExec
+		   to ensure this cache maintenance is actioned immediately */
+		eError = BridgeCacheOpExec (psMemDesc->psImport->hDevConnection,
+		                            psMemDesc->psImport->hPMR,
+									(IMG_UINT64)(uintptr_t)
+										pvAddr - psMemDesc->uiOffset,
+		                            psMemDesc->uiOffset,
+		                            psMemDesc->uiAllocSize,
+		                            eOp);
+		if (eError != PVRSRV_OK)
+		{
+			goto failMaintenance;
+		}
+	}
+
+	if (pvAddr)
+	{
+		DevmemReleaseCpuVirtAddr(psMemDesc);
+		pvAddr = NULL;
+	}
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+	{
+		/* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+		 * the allocation gets mapped/unmapped
+		 */
+		OSStringNCopy(psMemDesc->sTraceData.szText, pszText, sizeof(psMemDesc->sTraceData.szText) - 1);
+	}
+#endif
+
+#if defined(PVR_RI_DEBUG)
+	if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+	{
+		/* Attach RI information */
+		eError = BridgeRIWriteMEMDESCEntry (psMemDesc->psImport->hDevConnection,
+		                                    psMemDesc->psImport->hPMR,
+		                                    OSStringNLength(pszText, RI_MAX_TEXT_LEN),
+		                                    pszText,
+		                                    psMemDesc->uiOffset,
+		                                    uiAllocatedSize,
+		                                    uiAllocatedSize, // Deferred Allocation not supported on SubAllocs
+		                                    IMG_FALSE,
+		                                    IMG_FALSE,
+		                                    &(psMemDesc->hRIHandle));
+		if( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+		}
+	}
+#else  /* if defined(PVR_RI_DEBUG) */
+	PVR_UNREFERENCED_PARAMETER (pszText);
+#endif /* if defined(PVR_RI_DEBUG) */
+
+	*ppsMemDescPtr = psMemDesc;
+
+	return PVRSRV_OK;
+
+	/*
+	  error exit paths follow
+	 */
+
+failMaintenance:
+	if (pvAddr)
+	{
+		DevmemReleaseCpuVirtAddr(psMemDesc);
+		pvAddr = NULL;
+	}
+	_DevmemMemDescRelease(psMemDesc);
+	psMemDesc = NULL;	/* Make sure we don't do a discard after the release */
+failDeviceMemAlloc:
+	if (psMemDesc)
+	{
+		_DevmemMemDescDiscard(psMemDesc);
+	}
+failMemDescAlloc:
+failParams:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	PVR_DPF((PVR_DBG_ERROR,
+			"%s: Failed! Error is %s. Allocation size: %#llX",
+			__func__,
+			PVRSRVGETERRORSTRING(eError),
+			(unsigned long long) uiSize));
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection,
+                         IMG_DEVMEM_SIZE_T uiSize,
+                         IMG_DEVMEM_ALIGN_T uiAlign,
+                         IMG_UINT32 uiLog2HeapPageSize,
+                         DEVMEM_FLAGS_T uiFlags,
+                         const IMG_CHAR *pszText,
+                         DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	PVRSRV_ERROR eError;
+	DEVMEM_MEMDESC *psMemDesc = NULL;
+	DEVMEM_IMPORT *psImport;
+	IMG_UINT32 ui32MappingTable = 0;
+
+	DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize,
+	                                    &uiSize,
+	                                    &uiAlign);
+
+	eError = _DevmemValidateParams(uiSize,
+	                               uiAlign,
+	                               &uiFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto failParams;
+	}
+
+	eError =_DevmemMemDescAlloc(&psMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		goto failMemDescAlloc;
+	}
+
+	eError = _AllocateDeviceMemory(hDevConnection,
+	                               uiLog2HeapPageSize,
+	                               uiSize,
+	                               uiSize,
+	                               1,
+	                               1,
+	                               &ui32MappingTable,
+	                               uiAlign,
+	                               uiFlags,
+	                               IMG_TRUE,
+	                               pszText,
+	                               &psImport);
+	if (eError != PVRSRV_OK)
+	{
+		goto failDeviceMemAlloc;
+	}
+
+	_DevmemMemDescInit(psMemDesc,
+	                   0,
+	                   psImport,
+	                   uiSize);
+
+    *ppsMemDescPtr = psMemDesc;
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+	{
+		/* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+		 * the allocation gets mapped/unmapped
+		 */
+		OSStringNCopy(psMemDesc->sTraceData.szText, pszText, sizeof(psMemDesc->sTraceData.szText) - 1);
+	}
+#endif
+
+#if defined(PVR_RI_DEBUG)
+	if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+	{
+		eError = BridgeRIWritePMREntry (psImport->hDevConnection,
+		                                psImport->hPMR,
+		                                OSStringNLength(pszText, RI_MAX_TEXT_LEN),
+		                                (IMG_CHAR *)pszText,
+		                                psImport->uiSize);
+		if( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntry failed (eError=%d)", __func__, eError));
+		}
+
+		 /* Attach RI information */
+		eError = BridgeRIWriteMEMDESCEntry (psImport->hDevConnection,
+		                                    psImport->hPMR,
+		                                    sizeof("^"),
+		                                    "^",
+		                                    psMemDesc->uiOffset,
+		                                    uiSize,
+		                                    PVRSRV_CHECK_ON_DEMAND(uiFlags) ? 0 : uiSize,
+		                                    IMG_FALSE,
+		                                    IMG_TRUE,
+		                                    &psMemDesc->hRIHandle);
+		if( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+		}
+	}
+#else  /* if defined(PVR_RI_DEBUG) */
+	PVR_UNREFERENCED_PARAMETER (pszText);
+#endif /* if defined(PVR_RI_DEBUG) */
+
+	return PVRSRV_OK;
+
+	/*
+	  error exit paths follow
+	 */
+
+failDeviceMemAlloc:
+	_DevmemMemDescDiscard(psMemDesc);
+
+failMemDescAlloc:
+failParams:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	PVR_DPF((PVR_DBG_ERROR,
+			"%s: Failed! Error is %s. Allocation size: %#llX",
+			__func__,
+			PVRSRVGETERRORSTRING(eError),
+			(unsigned long long) uiSize));
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection,
+                     IMG_DEVMEM_SIZE_T uiSize,
+                     IMG_DEVMEM_SIZE_T uiChunkSize,
+                     IMG_UINT32 ui32NumPhysChunks,
+                     IMG_UINT32 ui32NumVirtChunks,
+                     IMG_UINT32 *pui32MappingTable,
+                     IMG_DEVMEM_ALIGN_T uiAlign,
+                     IMG_UINT32 uiLog2HeapPageSize,
+                     DEVMEM_FLAGS_T uiFlags,
+                     const IMG_CHAR *pszText,
+                     DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	PVRSRV_ERROR eError;
+	DEVMEM_MEMDESC *psMemDesc = NULL;
+	DEVMEM_IMPORT *psImport;
+
+	DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize,
+	                                    &uiSize,
+	                                    &uiAlign);
+
+	eError = _DevmemValidateParams(uiSize,
+	                               uiAlign,
+	                               &uiFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto failParams;
+	}
+
+	eError =_DevmemMemDescAlloc(&psMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		goto failMemDescAlloc;
+	}
+
+	eError = _AllocateDeviceMemory(hDevConnection,
+	                               uiLog2HeapPageSize,
+	                               uiSize,
+	                               uiChunkSize,
+	                               ui32NumPhysChunks,
+	                               ui32NumVirtChunks,
+	                               pui32MappingTable,
+	                               uiAlign,
+	                               uiFlags,
+	                               IMG_TRUE,
+	                               pszText,
+	                               &psImport);
+	if (eError != PVRSRV_OK)
+	{
+		goto failDeviceMemAlloc;
+	}
+
+	_DevmemMemDescInit(psMemDesc,
+	                   0,
+	                   psImport,
+	                   uiSize);
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+	{
+		/* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+		 * the allocation gets mapped/unmapped
+		 */
+		OSStringNCopy(psMemDesc->sTraceData.szText, pszText, sizeof(psMemDesc->sTraceData.szText) - 1);
+	}
+#endif
+
+#if defined(PVR_RI_DEBUG)
+	if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+	{
+		eError = BridgeRIWritePMREntry (psImport->hDevConnection,
+		                                psImport->hPMR,
+		                                OSStringNLength(pszText, RI_MAX_TEXT_LEN),
+		                                (IMG_CHAR *)pszText,
+		                                psImport->uiSize);
+		if( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntry failed (eError=%d)", __func__, eError));
+		}
+
+		/* Attach RI information */
+		eError = BridgeRIWriteMEMDESCEntry (psMemDesc->psImport->hDevConnection,
+		                                    psMemDesc->psImport->hPMR,
+		                                    sizeof("^"),
+		                                    "^",
+		                                    psMemDesc->uiOffset,
+		                                    uiSize,
+		                                    PVRSRV_CHECK_ON_DEMAND(uiFlags) ? 0 : ui32NumPhysChunks * uiChunkSize,
+		                                    IMG_FALSE,
+		                                    IMG_TRUE,
+		                                    &psMemDesc->hRIHandle);
+		if( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+		}
+	}
+#else  /* if defined(PVR_RI_DEBUG) */
+	PVR_UNREFERENCED_PARAMETER (pszText);
+#endif /* if defined(PVR_RI_DEBUG) */
+
+	*ppsMemDescPtr = psMemDesc;
+
+	return PVRSRV_OK;
+
+	/*
+	  error exit paths follow
+	 */
+
+failDeviceMemAlloc:
+	_DevmemMemDescDiscard(psMemDesc);
+
+failMemDescAlloc:
+failParams:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	PVR_DPF((PVR_DBG_ERROR,
+		"%s: Failed! Error is %s. Allocation size: %#llX",
+		__func__,
+		PVRSRVGETERRORSTRING(eError),
+		(unsigned long long) uiSize));
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMakeLocalImportHandle(SHARED_DEV_CONNECTION hBridge,
+                            IMG_HANDLE hServerHandle,
+                            IMG_HANDLE *hLocalImportHandle)
+{
+	return BridgePMRMakeLocalImportHandle(hBridge,
+	                                      hServerHandle,
+	                                      hLocalImportHandle);
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemUnmakeLocalImportHandle(SHARED_DEV_CONNECTION hBridge,
+                              IMG_HANDLE hLocalImportHandle)
+{
+	return BridgePMRUnmakeLocalImportHandle(hBridge, hLocalImportHandle);
+}
+
+/*****************************************************************************
+ *                Devmem unsecure export functions                           *
+ *****************************************************************************/
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+
+static PVRSRV_ERROR
+_Mapping_Export(DEVMEM_IMPORT *psImport,
+                DEVMEM_EXPORTHANDLE *phPMRExportHandlePtr,
+                DEVMEM_EXPORTKEY *puiExportKeyPtr,
+                DEVMEM_SIZE_T *puiSize,
+                DEVMEM_LOG2ALIGN_T *puiLog2Contig)
+{
+    /* Gets an export handle and key for the PMR used for this mapping */
+    /* Can only be done if there are no suballocations for this mapping */
+
+    PVRSRV_ERROR eError;
+    DEVMEM_EXPORTHANDLE hPMRExportHandle;
+    DEVMEM_EXPORTKEY uiExportKey;
+    IMG_DEVMEM_SIZE_T uiSize;
+    IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig;
+
+    if (psImport == NULL)
+    {
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        goto failParams;
+    }
+
+	if ((psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE) == 0)
+    {
+		eError = PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION;
+        goto failParams;
+    }
+
+    eError = BridgePMRExportPMR(psImport->hDevConnection,
+                                psImport->hPMR,
+                                &hPMRExportHandle,
+                                &uiSize,
+                                &uiLog2Contig,
+                                &uiExportKey);
+    if (eError != PVRSRV_OK)
+    {
+        goto failExport;
+    }
+
+    PVR_ASSERT(uiSize == psImport->uiSize);
+
+    *phPMRExportHandlePtr = hPMRExportHandle;
+    *puiExportKeyPtr = uiExportKey;
+    *puiSize = uiSize;
+    *puiLog2Contig = uiLog2Contig;
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+failExport:
+failParams:
+
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+
+}
+
+static void
+_Mapping_Unexport(DEVMEM_IMPORT *psImport,
+                  DEVMEM_EXPORTHANDLE hPMRExportHandle)
+{
+    PVRSRV_ERROR eError;
+
+    PVR_ASSERT (psImport != NULL);
+
+    eError = BridgePMRUnexportPMR(psImport->hDevConnection,
+                                  hPMRExportHandle);
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemExport(DEVMEM_MEMDESC *psMemDesc,
+             DEVMEM_EXPORTCOOKIE *psExportCookie)
+{
+    /* Caller to provide storage for export cookie struct */
+    PVRSRV_ERROR eError;
+    IMG_HANDLE hPMRExportHandle = 0;
+    IMG_UINT64 uiPMRExportPassword = 0;
+    IMG_DEVMEM_SIZE_T uiSize = 0;
+    IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig = 0;
+
+    if (psMemDesc == NULL || psExportCookie == NULL)
+    {
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        goto e0;
+    }
+
+    eError = _Mapping_Export(psMemDesc->psImport,
+                             &hPMRExportHandle,
+                             &uiPMRExportPassword,
+                             &uiSize,
+                             &uiLog2Contig);
+    if (eError != PVRSRV_OK)
+    {
+		psExportCookie->uiSize = 0;
+        goto e0;
+    }
+
+    psExportCookie->hPMRExportHandle = hPMRExportHandle;
+    psExportCookie->uiPMRExportPassword = uiPMRExportPassword;
+    psExportCookie->uiSize = uiSize;
+    psExportCookie->uiLog2ContiguityGuarantee = uiLog2Contig;
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+IMG_INTERNAL void
+DevmemUnexport(DEVMEM_MEMDESC *psMemDesc,
+               DEVMEM_EXPORTCOOKIE *psExportCookie)
+{
+    _Mapping_Unexport(psMemDesc->psImport,
+                      psExportCookie->hPMRExportHandle);
+
+    psExportCookie->uiSize = 0;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemImport(SHARED_DEV_CONNECTION hDevConnection,
+			 DEVMEM_EXPORTCOOKIE *psCookie,
+			 DEVMEM_FLAGS_T uiFlags,
+			 DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+    DEVMEM_MEMDESC *psMemDesc = NULL;
+    DEVMEM_IMPORT *psImport;
+    IMG_HANDLE hPMR;
+    PVRSRV_ERROR eError;
+
+	if (ppsMemDescPtr == NULL)
+    {
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        goto failParams;
+    }
+
+	eError =_DevmemMemDescAlloc(&psMemDesc);
+    if (eError != PVRSRV_OK)
+    {
+        goto failMemDescAlloc;
+    }
+
+	eError = _DevmemImportStructAlloc(hDevConnection,
+									  &psImport);
+	if (eError != PVRSRV_OK)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto failImportAlloc;
+	}
+
+	/* Get a handle to the PMR (inc refcount) */
+    eError = BridgePMRImportPMR(hDevConnection,
+                                psCookie->hPMRExportHandle,
+                                psCookie->uiPMRExportPassword,
+                                psCookie->uiSize, /* not trusted - just for sanity checks */
+                                psCookie->uiLog2ContiguityGuarantee, /* not trusted - just for sanity checks */
+                                &hPMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto failImport;
+	}
+
+	_DevmemImportStructInit(psImport,
+							psCookie->uiSize,
+							1ULL << psCookie->uiLog2ContiguityGuarantee,
+							uiFlags,
+							hPMR,
+							DEVMEM_PROPERTIES_IMPORTED |
+							DEVMEM_PROPERTIES_EXPORTABLE);
+
+	_DevmemMemDescInit(psMemDesc,
+					   0,
+					   psImport,
+					   psImport->uiSize);
+
+    *ppsMemDescPtr = psMemDesc;
+
+#if defined(PVR_RI_DEBUG)
+	if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+	{
+		/* Attach RI information */
+		eError = BridgeRIWriteMEMDESCEntry (psMemDesc->psImport->hDevConnection,
+											psMemDesc->psImport->hPMR,
+											sizeof("^"),
+											"^",
+											psMemDesc->uiOffset,
+											psMemDesc->psImport->uiSize,
+											psMemDesc->psImport->uiSize,
+											IMG_TRUE,
+											IMG_FALSE,
+											&psMemDesc->hRIHandle);
+		if( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+		}
+	}
+#endif /* if defined(PVR_RI_DEBUG) */
+
+	return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+failImport:
+    _DevmemImportDiscard(psImport);
+failImportAlloc:
+    _DevmemMemDescDiscard(psMemDesc);
+failMemDescAlloc:
+failParams:
+    PVR_ASSERT(eError != PVRSRV_OK);
+
+    return eError;
+}
+
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+/*****************************************************************************
+ *                   Common MemDesc functions                                *
+ *****************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemUnpin(DEVMEM_MEMDESC *psMemDesc)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+
+	/* Stop if the allocation might have suballocations. */
+	if (!(psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE))
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: The passed allocation is not valid to unpin because "
+		         "there might be suballocations on it. Make sure you allocate a page multiple "
+		         "of the heap when using PVRSRVAllocDeviceMem()",
+		         __FUNCTION__));
+
+		goto e_exit;
+	}
+
+	/* Stop if the Import is still mapped to CPU */
+	if (psImport->sCPUImport.ui32RefCount)
+	{
+		eError = PVRSRV_ERROR_STILL_MAPPED;
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: There are still %u references on the CPU mapping. "
+		         "Please remove all CPU mappings before unpinning.",
+		         __FUNCTION__,
+		         psImport->sCPUImport.ui32RefCount));
+
+		goto e_exit;
+	}
+
+	/* Only unpin if it is not already unpinned
+	 * Return PVRSRV_OK */
+	if (psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+	{
+		goto e_exit;
+	}
+
+	/* Unpin it and invalidate mapping */
+	if (psImport->sDeviceImport.bMapped == IMG_TRUE)
+	{
+		eError = BridgeDevmemIntUnpinInvalidate(psImport->hDevConnection,
+		                                        psImport->sDeviceImport.hMapping,
+		                                        psImport->hPMR);
+	}
+	else
+	{
+		/* Or just unpin it */
+		eError = BridgeDevmemIntUnpin(psImport->hDevConnection,
+		                              psImport->hPMR);
+	}
+
+	/* Update flags and RI when call was successful */
+	if (eError == PVRSRV_OK)
+	{
+		psImport->uiProperties |= DEVMEM_PROPERTIES_UNPINNED;
+#if defined(PVR_RI_DEBUG)
+		if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+		{
+			if (psMemDesc->hRIHandle)
+			{
+				PVRSRV_ERROR eError2;
+
+				eError2 = BridgeRIUpdateMEMDESCPinning(psMemDesc->psImport->hDevConnection,
+				                                       psMemDesc->hRIHandle,
+				                                       IMG_FALSE);
+
+				if( eError2 != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIUpdateMEMDESCPinningKM failed (eError=%d)",
+					         __func__,
+					         eError));
+				}
+			}
+		}
+#endif
+	}
+	else
+	{
+		/* Or just show what went wrong */
+		PVR_DPF((PVR_DBG_ERROR, "%s: Unpin aborted because of error %d",
+		         __func__,
+		         eError));
+	}
+
+e_exit:
+	return eError;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPin(DEVMEM_MEMDESC *psMemDesc)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+
+	/* Only pin if it is unpinned */
+	if ((psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED) == 0)
+	{
+		goto e_exit;
+	}
+
+	/* Pin it and make mapping valid */
+	if (psImport->sDeviceImport.bMapped)
+	{
+		eError = BridgeDevmemIntPinValidate(psImport->hDevConnection,
+		                                    psImport->sDeviceImport.hMapping,
+		                                    psImport->hPMR);
+	}
+	else
+	{
+		/* Or just pin it */
+		eError = BridgeDevmemIntPin(psImport->hDevConnection,
+		                            psImport->hPMR);
+	}
+
+	if ( (eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_PMR_NEW_MEMORY) )
+	{
+		psImport->uiProperties &= ~DEVMEM_PROPERTIES_UNPINNED;
+#if defined(PVR_RI_DEBUG)
+		if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+		{
+			if (psMemDesc->hRIHandle)
+			{
+				PVRSRV_ERROR eError2;
+
+				eError2 = BridgeRIUpdateMEMDESCPinning(psMemDesc->psImport->hDevConnection,
+								       psMemDesc->hRIHandle,
+								       IMG_TRUE);
+
+				if( eError2 != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIUpdateMEMDESCPinningKM failed (eError=%d)",
+						 __func__,
+						 eError));
+				}
+			}
+		}
+#endif
+	}
+	else
+	{
+		/* Or just show what went wrong */
+		PVR_DPF((PVR_DBG_ERROR, "%s: Pin aborted because of error %d",
+		         __func__,
+		         eError));
+	}
+
+e_exit:
+	return eError;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetSize(DEVMEM_MEMDESC *psMemDesc, IMG_DEVMEM_SIZE_T* puiSize)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	*puiSize = psMemDesc->uiAllocSize;
+
+	return eError;
+}
+
+/*
+	This function is called for freeing any class of memory
+*/
+IMG_INTERNAL void
+DevmemFree(DEVMEM_MEMDESC *psMemDesc)
+{
+	if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_SECURE)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Please use methods dedicated to secure buffers.",
+				__func__));
+		return;
+	}
+
+#if defined(PVR_RI_DEBUG)
+	if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+	{
+		if (psMemDesc->hRIHandle)
+		{
+		    PVRSRV_ERROR eError;
+
+		    eError = BridgeRIDeleteMEMDESCEntry(psMemDesc->psImport->hDevConnection,
+									   psMemDesc->hRIHandle);
+			if( eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIDeleteMEMDESCEntry failed (eError=%d)", __func__, eError));
+			}
+		}
+	}
+#endif  /* if defined(PVR_RI_DEBUG) */
+	_DevmemMemDescRelease(psMemDesc);
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc,
+				  DEVMEM_HEAP *psHeap,
+				  IMG_DEV_VIRTADDR *psDevVirtAddr)
+{
+	DEVMEM_IMPORT *psImport;
+	IMG_DEV_VIRTADDR sDevVAddr;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bMap = IMG_TRUE;
+	IMG_BOOL bDestroyed = IMG_FALSE;
+
+	/* Do not try to map unpinned memory */
+	if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+	{
+		eError = PVRSRV_ERROR_INVALID_MAP_REQUEST;
+		goto failFlags;
+	}
+
+	OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+	if (psHeap == NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto failParams;
+	}
+
+	if (psMemDesc->sDeviceMemDesc.ui32RefCount != 0)
+	{
+		eError = PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED;
+		goto failCheck;
+	}
+
+	/* Don't map memory for deferred allocations */
+	if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+	{
+		PVR_ASSERT(psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE);
+		bMap = IMG_FALSE;
+	}
+
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psMemDesc,
+					psMemDesc->sDeviceMemDesc.ui32RefCount,
+					psMemDesc->sDeviceMemDesc.ui32RefCount+1);
+
+	psImport = psMemDesc->psImport;
+	_DevmemMemDescAcquire(psMemDesc);
+
+	eError = _DevmemImportStructDevMap(psHeap,
+									   bMap,
+									   psImport,
+									   DEVICEMEM_UTILS_NO_ADDRESS);
+	if (eError != PVRSRV_OK)
+	{
+		goto failMap;
+	}
+
+	sDevVAddr.uiAddr = psImport->sDeviceImport.sDevVAddr.uiAddr;
+	sDevVAddr.uiAddr += psMemDesc->uiOffset;
+	psMemDesc->sDeviceMemDesc.sDevVAddr = sDevVAddr;
+	psMemDesc->sDeviceMemDesc.ui32RefCount++;
+
+    *psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr;
+
+    OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+	{
+		BridgeDevicememHistoryMap(psMemDesc->psImport->hDevConnection,
+							psMemDesc->psImport->hPMR,
+							psMemDesc->uiOffset,
+							psMemDesc->sDeviceMemDesc.sDevVAddr,
+							psMemDesc->uiAllocSize,
+							psMemDesc->sTraceData.szText,
+							DevmemGetHeapLog2PageSize(psHeap),
+							psMemDesc->sTraceData.ui32AllocationIndex,
+							&psMemDesc->sTraceData.ui32AllocationIndex);
+	}
+#endif
+
+#if defined(PVR_RI_DEBUG)
+	if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+	{
+		if (psMemDesc->hRIHandle)
+		{
+			 eError = BridgeRIUpdateMEMDESCAddr(psImport->hDevConnection,
+											   psMemDesc->hRIHandle,
+											   psImport->sDeviceImport.sDevVAddr);
+			if( eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIUpdateMEMDESCAddr failed (eError=%d)", __func__, eError));
+			}
+		}
+	}
+#endif
+
+    return PVRSRV_OK;
+
+failMap:
+	bDestroyed = _DevmemMemDescRelease(psMemDesc);
+failCheck:
+failParams:
+	if (!bDestroyed)
+	{
+		OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+	}
+	PVR_ASSERT(eError != PVRSRV_OK);
+failFlags:
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc,
+                         DEVMEM_HEAP *psHeap,
+                         IMG_DEV_VIRTADDR sDevVirtAddr)
+{
+	DEVMEM_IMPORT *psImport;
+	IMG_DEV_VIRTADDR sDevVAddr;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bMap = IMG_TRUE;
+	IMG_BOOL bDestroyed = IMG_FALSE;
+
+	/* Do not try to map unpinned memory */
+	if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+	{
+		eError = PVRSRV_ERROR_INVALID_MAP_REQUEST;
+		goto failFlags;
+	}
+
+	OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+	if (psHeap == NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto failParams;
+	}
+
+	if (psMemDesc->sDeviceMemDesc.ui32RefCount != 0)
+	{
+		eError = PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED;
+		goto failCheck;
+	}
+
+	/* Don't map memory for deferred allocations */
+	if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+	{
+		PVR_ASSERT(psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE);
+		bMap = IMG_FALSE;
+	}
+
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psMemDesc,
+					psMemDesc->sDeviceMemDesc.ui32RefCount,
+					psMemDesc->sDeviceMemDesc.ui32RefCount+1);
+
+	psImport = psMemDesc->psImport;
+	_DevmemMemDescAcquire(psMemDesc);
+
+	eError = _DevmemImportStructDevMap(psHeap,
+									   bMap,
+									   psImport,
+									   sDevVirtAddr.uiAddr);
+	if (eError != PVRSRV_OK)
+	{
+		goto failMap;
+	}
+
+	sDevVAddr.uiAddr = psImport->sDeviceImport.sDevVAddr.uiAddr;
+	sDevVAddr.uiAddr += psMemDesc->uiOffset;
+	psMemDesc->sDeviceMemDesc.sDevVAddr = sDevVAddr;
+	psMemDesc->sDeviceMemDesc.ui32RefCount++;
+
+    OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+	{
+		BridgeDevicememHistoryMap(psMemDesc->psImport->hDevConnection,
+							psMemDesc->psImport->hPMR,
+							psMemDesc->uiOffset,
+							psMemDesc->sDeviceMemDesc.sDevVAddr,
+							psMemDesc->uiAllocSize,
+							psMemDesc->sTraceData.szText,
+							DevmemGetHeapLog2PageSize(psHeap),
+							psMemDesc->sTraceData.ui32AllocationIndex,
+							&psMemDesc->sTraceData.ui32AllocationIndex);
+	}
+#endif
+
+#if defined(PVR_RI_DEBUG)
+	if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+	{
+		if (psMemDesc->hRIHandle)
+		{
+			 eError = BridgeRIUpdateMEMDESCAddr(psImport->hDevConnection,
+											   psMemDesc->hRIHandle,
+											   psImport->sDeviceImport.sDevVAddr);
+			if( eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIUpdateMEMDESCAddr failed (eError=%d)", __func__, eError));
+			}
+		}
+	}
+#endif
+
+    return PVRSRV_OK;
+
+failMap:
+	bDestroyed = _DevmemMemDescRelease(psMemDesc);
+failCheck:
+failParams:
+	if (!bDestroyed)
+	{
+		OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+	}
+	PVR_ASSERT(eError != PVRSRV_OK);
+failFlags:
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+                         IMG_DEV_VIRTADDR *psDevVirtAddr)
+{
+	PVRSRV_ERROR eError;
+
+	/* Do not try to map unpinned memory */
+	if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+	{
+		eError = PVRSRV_ERROR_INVALID_MAP_REQUEST;
+		goto failCheck;
+	}
+
+	OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psMemDesc,
+					psMemDesc->sDeviceMemDesc.ui32RefCount,
+					psMemDesc->sDeviceMemDesc.ui32RefCount+1);
+
+	if (psMemDesc->sDeviceMemDesc.ui32RefCount == 0)
+	{
+		eError = PVRSRV_ERROR_DEVICEMEM_NO_MAPPING;
+		goto failRelease;
+	}
+	psMemDesc->sDeviceMemDesc.ui32RefCount++;
+
+    *psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr;
+	OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+    return PVRSRV_OK;
+
+failRelease:
+	OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+	PVR_ASSERT(eError != PVRSRV_OK);
+failCheck:
+	return eError;
+}
+
+IMG_INTERNAL void
+DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc)
+{
+	PVR_ASSERT(psMemDesc != NULL);
+
+	OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psMemDesc,
+					psMemDesc->sDeviceMemDesc.ui32RefCount,
+					psMemDesc->sDeviceMemDesc.ui32RefCount-1);
+
+	PVR_ASSERT(psMemDesc->sDeviceMemDesc.ui32RefCount != 0);
+
+	if (--psMemDesc->sDeviceMemDesc.ui32RefCount == 0)
+	{
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+		if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+		{
+			BridgeDevicememHistoryUnmap(psMemDesc->psImport->hDevConnection,
+								psMemDesc->psImport->hPMR,
+								psMemDesc->uiOffset,
+								psMemDesc->sDeviceMemDesc.sDevVAddr,
+								psMemDesc->uiAllocSize,
+								psMemDesc->sTraceData.szText,
+								DevmemGetHeapLog2PageSize(psMemDesc->psImport->sDeviceImport.psHeap),
+								psMemDesc->sTraceData.ui32AllocationIndex,
+								&psMemDesc->sTraceData.ui32AllocationIndex);
+		}
+#endif
+		_DevmemImportStructDevUnmap(psMemDesc->psImport);
+		OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+		_DevmemMemDescRelease(psMemDesc);
+	}
+	else
+	{
+		OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+	}
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+                         void **ppvCpuVirtAddr)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psMemDesc != NULL);
+	PVR_ASSERT(ppvCpuVirtAddr != NULL);
+
+	if ( psMemDesc->psImport->uiProperties &
+	    (DEVMEM_PROPERTIES_UNPINNED | DEVMEM_PROPERTIES_SECURE) )
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Allocation is currently unpinned or a secure buffer. "
+				"Not possible to map to CPU!",
+				__func__));
+		eError = PVRSRV_ERROR_INVALID_MAP_REQUEST;
+		goto failFlags;
+	}
+
+	OSLockAcquire(psMemDesc->sCPUMemDesc.hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psMemDesc,
+					psMemDesc->sCPUMemDesc.ui32RefCount,
+					psMemDesc->sCPUMemDesc.ui32RefCount+1);
+
+	if (psMemDesc->sCPUMemDesc.ui32RefCount++ == 0)
+	{
+		DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+		IMG_UINT8 *pui8CPUVAddr;
+
+		_DevmemMemDescAcquire(psMemDesc);
+		eError = _DevmemImportStructCPUMap(psImport);
+		if (eError != PVRSRV_OK)
+		{
+			goto failMap;
+		}
+
+		pui8CPUVAddr = psImport->sCPUImport.pvCPUVAddr;
+		pui8CPUVAddr += psMemDesc->uiOffset;
+		psMemDesc->sCPUMemDesc.pvCPUVAddr = pui8CPUVAddr;
+	}
+    *ppvCpuVirtAddr = psMemDesc->sCPUMemDesc.pvCPUVAddr;
+
+    VG_MARK_INITIALIZED(*ppvCpuVirtAddr, psMemDesc->psImport->uiSize);
+
+    OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+
+    return PVRSRV_OK;
+
+failMap:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	psMemDesc->sCPUMemDesc.ui32RefCount--;
+
+	if (!_DevmemMemDescRelease(psMemDesc))
+	{
+		OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+	}
+failFlags:
+	return eError;
+}
+
+IMG_INTERNAL void
+DevmemReacquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+                           void **ppvCpuVirtAddr)
+{
+	PVR_ASSERT(psMemDesc != NULL);
+	PVR_ASSERT(ppvCpuVirtAddr != NULL);
+
+	OSLockAcquire(psMemDesc->sCPUMemDesc.hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psMemDesc,
+					psMemDesc->sCPUMemDesc.ui32RefCount,
+					psMemDesc->sCPUMemDesc.ui32RefCount+1);
+
+	*ppvCpuVirtAddr = NULL;
+	if (psMemDesc->sCPUMemDesc.ui32RefCount)
+	{
+		*ppvCpuVirtAddr = psMemDesc->sCPUMemDesc.pvCPUVAddr;
+		psMemDesc->sCPUMemDesc.ui32RefCount += 1;
+	}
+
+	VG_MARK_INITIALIZED(*ppvCpuVirtAddr, psMemDesc->psImport->uiSize);
+	OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+}
+
+IMG_INTERNAL void
+DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc)
+{
+	PVR_ASSERT(psMemDesc != NULL);
+
+	OSLockAcquire(psMemDesc->sCPUMemDesc.hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psMemDesc,
+					psMemDesc->sCPUMemDesc.ui32RefCount,
+					psMemDesc->sCPUMemDesc.ui32RefCount-1);
+
+	PVR_ASSERT(psMemDesc->sCPUMemDesc.ui32RefCount != 0);
+
+	if (--psMemDesc->sCPUMemDesc.ui32RefCount == 0)
+	{
+		OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+		_DevmemImportStructCPUUnmap(psMemDesc->psImport);
+		_DevmemMemDescRelease(psMemDesc);
+	}
+	else
+	{
+		OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+	}
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+			   IMG_HANDLE *phImport)
+{
+	if ((psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE) == 0)
+	{
+		return PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION;
+	}
+
+	*phImport = psMemDesc->psImport->hPMR;
+
+	return PVRSRV_OK;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc,
+						   IMG_UINT64 *pui64UID)
+{
+	DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+	PVRSRV_ERROR eError;
+
+	eError = BridgePMRGetUID(psImport->hDevConnection,
+							 psImport->hPMR,
+							 pui64UID);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc,
+				IMG_HANDLE *hReservation)
+{
+	DEVMEM_IMPORT *psImport;
+
+	PVR_ASSERT(psMemDesc);
+	psImport = psMemDesc->psImport;
+
+	PVR_ASSERT(psImport);
+	*hReservation = psImport->sDeviceImport.hReservation;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc,
+		IMG_HANDLE *phPMR,
+		IMG_DEVMEM_OFFSET_T *puiPMROffset)
+{
+	DEVMEM_IMPORT *psImport;
+
+	PVR_ASSERT(psMemDesc);
+	*puiPMROffset = psMemDesc->uiOffset;
+	psImport = psMemDesc->psImport;
+
+	PVR_ASSERT(psImport);
+	*phPMR = psImport->hPMR;
+
+	return PVRSRV_OK;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc,
+				DEVMEM_FLAGS_T *puiFlags)
+{
+	DEVMEM_IMPORT *psImport;
+
+	PVR_ASSERT(psMemDesc);
+	psImport = psMemDesc->psImport;
+
+	PVR_ASSERT(psImport);
+	*puiFlags = psImport->uiFlags;
+
+	return PVRSRV_OK;
+}
+
+IMG_INTERNAL IMG_HANDLE
+DevmemGetConnection(DEVMEM_MEMDESC *psMemDesc)
+{
+	return psMemDesc->psImport->hDevConnection;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemLocalImport(IMG_HANDLE hBridge,
+				  IMG_HANDLE hExtHandle,
+				  DEVMEM_FLAGS_T uiFlags,
+				  DEVMEM_MEMDESC **ppsMemDescPtr,
+				  IMG_DEVMEM_SIZE_T *puiSizePtr,
+				  const IMG_CHAR *pszAnnotation)
+{
+    DEVMEM_MEMDESC *psMemDesc = NULL;
+    DEVMEM_IMPORT *psImport;
+    IMG_DEVMEM_SIZE_T uiSize;
+    IMG_DEVMEM_ALIGN_T uiAlign;
+    IMG_HANDLE hPMR;
+    PVRSRV_ERROR eError;
+
+    if (ppsMemDescPtr == NULL)
+    {
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        goto failParams;
+    }
+
+	eError =_DevmemMemDescAlloc(&psMemDesc);
+    if (eError != PVRSRV_OK)
+    {
+        goto failMemDescAlloc;
+    }
+
+	eError = _DevmemImportStructAlloc(hBridge,
+									  &psImport);
+    if (eError != PVRSRV_OK)
+    {
+        eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+        goto failImportAlloc;
+    }
+
+	/* Get the PMR handle and its size from the server */
+	eError = BridgePMRLocalImportPMR(hBridge,
+									 hExtHandle,
+									 &hPMR,
+									 &uiSize,
+									 &uiAlign);
+	if (eError != PVRSRV_OK)
+	{
+		goto failImport;
+	}
+
+	_DevmemImportStructInit(psImport,
+							uiSize,
+							uiAlign,
+							uiFlags,
+							hPMR,
+							DEVMEM_PROPERTIES_IMPORTED |
+							DEVMEM_PROPERTIES_EXPORTABLE);
+
+	_DevmemMemDescInit(psMemDesc,
+					   0,
+					   psImport,
+					   uiSize);
+
+    *ppsMemDescPtr = psMemDesc;
+	if (puiSizePtr)
+		*puiSizePtr = uiSize;
+
+#if defined(PVR_RI_DEBUG)
+	if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+	{
+		/* Attach RI information.
+		 * Set backed size to 0 since this allocation has been allocated
+		 * by the same process and has been accounted for. */
+		eError = BridgeRIWriteMEMDESCEntry (psMemDesc->psImport->hDevConnection,
+											psMemDesc->psImport->hPMR,
+											sizeof("^"),
+											"^",
+											psMemDesc->uiOffset,
+											psMemDesc->psImport->uiSize,
+											0,
+											IMG_TRUE,
+											IMG_FALSE,
+											&(psMemDesc->hRIHandle));
+		if( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+		}
+	}
+#endif /* if defined(PVR_RI_DEBUG) */
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+	{
+		/* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+		* the allocation gets mapped/unmapped
+		*/
+		OSStringNCopy(psMemDesc->sTraceData.szText, pszAnnotation, sizeof(psMemDesc->sTraceData.szText) - 1);
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(pszAnnotation);
+#endif
+
+	return PVRSRV_OK;
+
+failImport:
+    _DevmemImportDiscard(psImport);
+failImportAlloc:
+	_DevmemMemDescDiscard(psMemDesc);
+failMemDescAlloc:
+failParams:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext,
+                         IMG_DEV_VIRTADDR sDevVAddr)
+{
+    return BridgeDevmemIsVDevAddrValid(psContext->hDevConnection,
+                                       psContext->hDevMemServerContext,
+                                       sDevVAddr);
+}
+
+IMG_INTERNAL IMG_UINT32
+DevmemGetHeapLog2PageSize(DEVMEM_HEAP *psHeap)
+{
+	return psHeap->uiLog2Quantum;
+}
+
+IMG_INTERNAL IMG_UINT32
+DevmemGetHeapTilingProperties(DEVMEM_HEAP *psHeap,
+                              IMG_UINT32 *puiLog2ImportAlignment,
+                              IMG_UINT32 *puiLog2TilingStrideFactor)
+{
+	*puiLog2ImportAlignment = psHeap->uiLog2ImportAlignment;
+	*puiLog2TilingStrideFactor = psHeap->uiLog2TilingStrideFactor;
+	return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function       RegisterDevMemPFNotify
+@Description    Registers that the application wants to be signaled when a page
+                fault occurs.
+
+@Input          psContext      Memory context the process that would like to
+                               be notified about.
+@Input          ui32PID        The PID  of the calling process.
+@Input          bRegister      If true, register. If false, de-register.
+@Return         PVRSRV_ERROR:  PVRSRV_OK on success. Otherwise, a PVRSRV_
+                               error code
+*/ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext,
+                       IMG_UINT32     ui32PID,
+                       IMG_BOOL       bRegister)
+{
+	PVRSRV_ERROR eError;
+
+	eError = BridgeDevmemIntRegisterPFNotifyKM(psContext->hDevConnection,
+	                                           psContext->hDevMemServerContext,
+	                                           ui32PID,
+	                                           bRegister);
+	if (eError == PVRSRV_ERROR_BRIDGE_CALL_FAILED)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Bridge Call Failed: This could suggest a UM/KM miss-match (%d)",
+		         __func__,
+		         (IMG_INT)(eError)));
+	}
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+GetMaxDevMemSize(SHARED_DEV_CONNECTION psConnection,
+		 IMG_DEVMEM_SIZE_T *puiLMASize,
+		 IMG_DEVMEM_SIZE_T *puiUMASize)
+{
+	return BridgeGetMaxDevMemSize(psConnection,
+				      puiLMASize,
+				      puiUMASize);
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/devicemem_pdump.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/devicemem_pdump.c
new file mode 100644
index 0000000..acbc53d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/devicemem_pdump.c
@@ -0,0 +1,334 @@
+/*************************************************************************/ /*!
+@File
+@Title          Shared device memory management PDump functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements common (client & server) PDump functions for the
+                memory management code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined PDUMP
+
+#include "allocmem.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pdump.h"
+#include "devicemem.h"
+#include "devicemem_utils.h"
+#include "devicemem_pdump.h"
+#include "client_pdumpmm_bridge.h"
+#if defined(LINUX) && !defined(__KERNEL__)
+#include <stdio.h>
+#if defined(SUPPORT_ANDROID_PLATFORM)
+#include "android_utils.h"
+#endif
+#endif
+
+IMG_INTERNAL void
+DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc,
+                   IMG_DEVMEM_OFFSET_T uiOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   PDUMP_FLAGS_T uiPDumpFlags)
+{
+    PVRSRV_ERROR eError;
+
+    PVR_ASSERT(uiOffset + uiSize <= psMemDesc->psImport->uiSize);
+
+    eError = BridgePMRPDumpLoadMem(psMemDesc->psImport->hDevConnection,
+                                   psMemDesc->psImport->hPMR,
+                                   psMemDesc->uiOffset + uiOffset,
+                                   uiSize,
+                                   uiPDumpFlags,
+                                   IMG_FALSE);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void
+DevmemPDumpLoadZeroMem(DEVMEM_MEMDESC *psMemDesc,
+                   IMG_DEVMEM_OFFSET_T uiOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   PDUMP_FLAGS_T uiPDumpFlags)
+{
+    PVRSRV_ERROR eError;
+
+    PVR_ASSERT(uiOffset + uiSize <= psMemDesc->psImport->uiSize);
+
+    eError = BridgePMRPDumpLoadMem(psMemDesc->psImport->hDevConnection,
+                                   psMemDesc->psImport->hPMR,
+                                   psMemDesc->uiOffset + uiOffset,
+                                   uiSize,
+                                   uiPDumpFlags,
+                                   IMG_TRUE);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void
+DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc,
+                        IMG_DEVMEM_OFFSET_T uiOffset,
+                        IMG_UINT32 ui32Value,
+                        PDUMP_FLAGS_T uiPDumpFlags)
+{
+    PVRSRV_ERROR eError;
+
+    eError = BridgePMRPDumpLoadMemValue32(psMemDesc->psImport->hDevConnection,
+                                        psMemDesc->psImport->hPMR,
+                                        psMemDesc->uiOffset + uiOffset,
+                                        ui32Value,
+                                        uiPDumpFlags);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void
+DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc,
+                        IMG_DEVMEM_OFFSET_T uiOffset,
+                        IMG_UINT64 ui64Value,
+                        PDUMP_FLAGS_T uiPDumpFlags)
+{
+    PVRSRV_ERROR eError;
+
+    eError = BridgePMRPDumpLoadMemValue64(psMemDesc->psImport->hDevConnection,
+                                          psMemDesc->psImport->hPMR,
+                                          psMemDesc->uiOffset + uiOffset,
+                                          ui64Value,
+                                          uiPDumpFlags);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+/* FIXME: This should be server side only */
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC		*psMemDesc,
+							  IMG_DEVMEM_OFFSET_T	*puiMemOffset,
+							  IMG_CHAR				*pszName,
+							  IMG_UINT32			ui32Size)
+{
+    PVRSRV_ERROR		eError;
+	IMG_CHAR			aszMemspaceName[100];
+	IMG_CHAR			aszSymbolicName[100];
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+	*puiMemOffset += psMemDesc->uiOffset;
+
+    eError = BridgePMRPDumpSymbolicAddr(psMemDesc->psImport->hDevConnection,
+										psMemDesc->psImport->hPMR,
+										*puiMemOffset,
+										sizeof(aszMemspaceName),
+										&aszMemspaceName[0],
+										sizeof(aszSymbolicName),
+										&aszSymbolicName[0],
+										puiMemOffset,
+										&uiNextSymName);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+
+	OSSNPrintf(pszName, ui32Size, "%s:%s", &aszMemspaceName[0], &aszSymbolicName[0]);
+	return eError;
+}
+
+IMG_INTERNAL void
+DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc,
+                      IMG_DEVMEM_OFFSET_T uiOffset,
+                      IMG_DEVMEM_SIZE_T uiSize,
+                      const IMG_CHAR *pszFilename,
+                      IMG_UINT32 uiFileOffset)
+{
+    PVRSRV_ERROR eError;
+
+    eError = BridgePMRPDumpSaveToFile(psMemDesc->psImport->hDevConnection,
+									  psMemDesc->psImport->hPMR,
+									  psMemDesc->uiOffset + uiOffset,
+									  uiSize,
+									  OSStringLength(pszFilename) + 1,
+									  pszFilename,
+									  uiFileOffset);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+
+
+/* FIXME: Remove? */
+IMG_INTERNAL void
+DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc,
+                             IMG_DEVMEM_OFFSET_T uiOffset,
+                             IMG_DEVMEM_SIZE_T uiSize,
+                             const IMG_CHAR *pszFilename,
+							 IMG_UINT32 ui32FileOffset,
+							 IMG_UINT32	ui32PdumpFlags)
+{
+    PVRSRV_ERROR eError;
+    IMG_DEV_VIRTADDR sDevAddrStart;
+
+    sDevAddrStart = psMemDesc->psImport->sDeviceImport.sDevVAddr;
+    sDevAddrStart.uiAddr += psMemDesc->uiOffset;
+    sDevAddrStart.uiAddr += uiOffset;
+
+    eError = BridgeDevmemIntPDumpSaveToFileVirtual(psMemDesc->psImport->hDevConnection,
+                                                   psMemDesc->psImport->sDeviceImport.psHeap->psCtx->hDevMemServerContext,
+                                                   sDevAddrStart,
+                                                   uiSize,
+                                                   OSStringLength(pszFilename) + 1,
+                                                   pszFilename,
+												   ui32FileOffset,
+												   ui32PdumpFlags);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc,
+                       IMG_DEVMEM_OFFSET_T uiOffset,
+                       IMG_UINT32 ui32Value,
+                       IMG_UINT32 ui32Mask,
+                       PDUMP_POLL_OPERATOR eOperator,
+                       PDUMP_FLAGS_T ui32PDumpFlags)
+{
+    PVRSRV_ERROR eError;
+    IMG_DEVMEM_SIZE_T uiNumBytes;
+
+    uiNumBytes = 4;
+
+    if (psMemDesc->uiOffset + uiOffset + uiNumBytes >= psMemDesc->psImport->uiSize)
+    {
+        eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+        goto e0;
+    }
+
+    eError = BridgePMRPDumpPol32(psMemDesc->psImport->hDevConnection,
+                                 psMemDesc->psImport->hPMR,
+                                 psMemDesc->uiOffset + uiOffset,
+                                 ui32Value,
+                                 ui32Mask,
+                                 eOperator,
+                                 ui32PDumpFlags);
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc,
+				IMG_DEVMEM_OFFSET_T uiReadOffset,
+				IMG_DEVMEM_OFFSET_T uiWriteOffset,
+				IMG_DEVMEM_SIZE_T uiPacketSize,
+				IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+	PVRSRV_ERROR eError;
+
+	if ((psMemDesc->uiOffset + uiReadOffset) > psMemDesc->psImport->uiSize)
+	{
+		eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+		goto e0;
+	}
+
+	eError = BridgePMRPDumpCBP(psMemDesc->psImport->hDevConnection,
+							   psMemDesc->psImport->hPMR,
+							   psMemDesc->uiOffset + uiReadOffset,
+							   uiWriteOffset,
+							   uiPacketSize,
+							   uiBufferSize);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	return PVRSRV_OK;
+
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+#endif /* PDUMP */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/devicemem_utils.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/devicemem_utils.c
new file mode 100644
index 0000000..cf8da18
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/devicemem_utils.c
@@ -0,0 +1,1085 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management internal utility functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Utility functions used internally by device memory management
+                code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "allocmem.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "ra.h"
+#include "devicemem_utils.h"
+#include "client_mm_bridge.h"
+#include "osfunc.h"
+
+/*
+	SVM heap management support functions for CPU (un)mapping
+*/
+#define DEVMEM_MAP_SVM_USER_MANAGED_RETRY				2
+
+/* Time to wait (in microseconds) between retries when unmapping memory from
+   the device. */
+#define DEVMEM_UNMAP_RETRY_DELAY_US 500U
+
+static inline PVRSRV_ERROR 
+_DevmemCPUMapSVMKernelManaged(DEVMEM_HEAP *psHeap,
+							  DEVMEM_IMPORT *psImport,
+							  IMG_UINT64 *ui64MapAddress)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT64 ui64SvmMapAddr;
+	IMG_UINT64 ui64SvmMapAddrEnd;
+	IMG_UINT64 ui64SvmHeapAddrEnd;
+
+	/* SVM heap management is always XXX_KERNEL_MANAGED unless we
+	   have triggered the fall back code-path in which case we
+	   should not be calling into this code-path */
+	PVR_ASSERT(psHeap->eHeapType == DEVMEM_HEAP_TYPE_KERNEL_MANAGED);
+
+	/* By acquiring the CPU virtual address here, it essentially
+	   means we lock-down the virtual address for the duration
+	   of the life-cycle of the allocation until a de-allocation
+	   request comes in. Thus the allocation is guaranteed not to
+	   change its virtual address on the CPU during its life-time. 
+	   NOTE: Import might have already been CPU Mapped before now,
+	   normally this is not a problem, see fall back */
+	eError = _DevmemImportStructCPUMap(psImport);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Unable to CPU map (lock-down) device memory for SVM use",
+				__func__));
+		eError = PVRSRV_ERROR_DEVICEMEM_MAP_FAILED;
+		goto failSVM;
+	}
+
+	/* Supplied kernel mmap virtual address is also device virtual address;
+	   calculate the heap & kernel supplied mmap virtual address limits */
+	ui64SvmMapAddr = (IMG_UINT64)(uintptr_t)psImport->sCPUImport.pvCPUVAddr;
+	ui64SvmHeapAddrEnd = psHeap->sBaseAddress.uiAddr + psHeap->uiSize;
+	ui64SvmMapAddrEnd = ui64SvmMapAddr + psImport->uiSize;
+	PVR_ASSERT(ui64SvmMapAddr != (IMG_UINT64)0);
+
+	/* SVM limit test may fail if processor has more virtual address bits than device */
+	if (ui64SvmMapAddr >= ui64SvmHeapAddrEnd || ui64SvmMapAddrEnd > ui64SvmHeapAddrEnd)
+	{
+		/* Unmap incompatible SVM virtual address, this
+		   may not release address if it was elsewhere
+		   CPU Mapped before call into this function */
+		_DevmemImportStructCPUUnmap(psImport);
+
+		/* Flag incompatible SVM mapping */
+		eError = PVRSRV_ERROR_BAD_MAPPING;
+		goto failSVM;
+	}
+
+	*ui64MapAddress = ui64SvmMapAddr;
+failSVM:
+	/* either OK, MAP_FAILED or BAD_MAPPING */
+	return eError;
+}
+
+static inline void 
+_DevmemCPUUnmapSVMKernelManaged(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport)
+{
+	PVR_UNREFERENCED_PARAMETER(psHeap);
+	_DevmemImportStructCPUUnmap(psImport);
+}
+
+static inline PVRSRV_ERROR 
+_DevmemCPUMapSVMUserManaged(DEVMEM_HEAP *psHeap,
+							DEVMEM_IMPORT *psImport,
+							IMG_UINT uiAlign,
+							IMG_UINT64 *ui64MapAddress)
+{
+	RA_LENGTH_T uiAllocatedSize;
+	RA_BASE_T uiAllocatedAddr;
+	IMG_UINT64 ui64SvmMapAddr;
+	IMG_UINT uiRetry = 0;
+	PVRSRV_ERROR eError;
+
+	/* If SVM heap management has transitioned to XXX_USER_MANAGED,
+	   this is essentially a fall back approach that ensures we
+	   continue to satisfy SVM alloc. This approach is not without
+	   hazards in that we may specify a virtual address that is
+	   already in use by the user process */
+	PVR_ASSERT(psHeap->eHeapType == DEVMEM_HEAP_TYPE_USER_MANAGED);
+
+	/* Normally, for SVM heap allocations, CPUMap _must_  be done
+	   before DevMap; ideally the initial CPUMap should be done by
+	   SVM functions though this is not a hard requirement as long
+	   as the prior elsewhere obtained CPUMap virtual address meets
+	   SVM address requirements. This is a fall-back code-pathway
+	   so we have to test that this assumption holds before we 
+	   progress any further */
+	OSLockAcquire(psImport->sCPUImport.hLock);
+
+	if (psImport->sCPUImport.ui32RefCount)
+	{
+		/* Already CPU Mapped SVM heap allocation, this prior elsewhere
+		   obtained virtual address is  responsible for the above 
+		   XXX_KERNEL_MANAGED failure. As we are not responsible for 
+		   this, we cannot progress any further so need to fail */
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: Previously obtained CPU map address not SVM compatible"
+				, __func__));
+
+		/* Revert SVM heap to DEVMEM_HEAP_TYPE_KERNEL_MANAGED */
+		psHeap->eHeapType = DEVMEM_HEAP_TYPE_KERNEL_MANAGED;
+		PVR_DPF((PVR_DBG_MESSAGE,
+				"%s: Reverting SVM heap back to kernel managed",
+				__func__));
+
+		OSLockRelease(psImport->sCPUImport.hLock);
+
+		/* Do we need a more specific error code here */
+		eError = PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED;
+		goto failSVM;
+	}
+
+	OSLockRelease(psImport->sCPUImport.hLock);
+
+	do
+	{
+		/* Next we proceed to instruct the kernel to use the RA_Alloc supplied
+		   virtual address to map-in this SVM import suballocation; there is no
+		   guarantee that this RA_Alloc virtual address may not collide with an
+		   already in-use VMA range in the process */
+		eError = RA_Alloc(psHeap->psQuantizedVMRA,
+						psImport->uiSize,
+						RA_NO_IMPORT_MULTIPLIER,
+						0, /* flags: this RA doesn't use flags*/
+						uiAlign,
+						"SVM_Virtual_Alloc",
+						&uiAllocatedAddr,
+						&uiAllocatedSize,
+						NULL /* don't care about per-import priv data */);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"%s: Cannot RA allocate SVM compatible address",
+					__func__));
+			goto failSVM;
+		}
+
+		/* No reason for allocated virtual size to be different from
+		   the PMR's size */
+		psImport->sCPUImport.pvCPUVAddr = (void*)(uintptr_t)uiAllocatedAddr;
+		PVR_ASSERT(uiAllocatedSize == psImport->uiSize);
+			
+		/* Map the import or allocation using the RA_Alloc virtual address;
+		   the kernel may fail the request if the supplied virtual address
+		   is already in-use in which case we re-try using another virtual
+		   address obtained from the RA_Alloc */
+		eError = _DevmemImportStructCPUMap(psImport);
+		if (eError != PVRSRV_OK)
+		{
+			/* For now we simply discard failed RA_Alloc() obtained virtual 
+			   address (i.e. plenty of virtual space), this prevents us from
+			   re-using these and furthermore essentially blacklists these
+			   addresses from future SVM consideration; We exit fall-back
+			   attempt if retry exceeds the fall-back retry limit */
+			if (uiRetry++ > DEVMEM_MAP_SVM_USER_MANAGED_RETRY)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Cannot find SVM compatible address, bad mapping",
+						__func__));
+				eError = PVRSRV_ERROR_BAD_MAPPING;
+				goto failSVM;
+			}
+		}
+		else
+		{
+			/* Found compatible SVM virtual address, set as device virtual address */
+			ui64SvmMapAddr = (IMG_UINT64)(uintptr_t)psImport->sCPUImport.pvCPUVAddr;
+		}
+	} while (eError != PVRSRV_OK);
+
+	*ui64MapAddress = ui64SvmMapAddr;
+failSVM:	
+	return eError;
+}
+
+static inline void 
+_DevmemCPUUnmapSVMUserManaged(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport)
+{
+	RA_BASE_T uiAllocatedAddr;
+
+	/* We only free SVM compatible addresses, all addresses in
+	   the blacklist are essentially excluded from future RA_Alloc */
+	uiAllocatedAddr = psImport->sDeviceImport.sDevVAddr.uiAddr;
+	RA_Free(psHeap->psQuantizedVMRA, uiAllocatedAddr);
+
+	_DevmemImportStructCPUUnmap(psImport);
+}
+
+static inline PVRSRV_ERROR 
+_DevmemImportStructDevMapSVM(DEVMEM_HEAP *psHeap,
+							 DEVMEM_IMPORT *psImport,
+							 IMG_UINT uiAlign,
+							 IMG_UINT64 *ui64MapAddress)
+{
+	PVRSRV_ERROR eError;
+
+	switch(psHeap->eHeapType)
+	{
+		case DEVMEM_HEAP_TYPE_KERNEL_MANAGED:
+			eError = _DevmemCPUMapSVMKernelManaged(psHeap,
+												   psImport,
+												   ui64MapAddress);
+			if (eError == PVRSRV_ERROR_BAD_MAPPING)
+			{
+				/* If the SVM map address is outside of SVM heap limits,
+				   change heap type to DEVMEM_HEAP_TYPE_USER_MANAGED */
+				psHeap->eHeapType = DEVMEM_HEAP_TYPE_USER_MANAGED;
+				PVR_DPF((PVR_DBG_MESSAGE,
+					"%s: Kernel managed SVM heap is now user managed",
+					__func__));
+
+				/* Retry using user managed fall-back approach */
+				eError = _DevmemCPUMapSVMUserManaged(psHeap,
+													 psImport,
+													 uiAlign,
+													 ui64MapAddress);
+			}
+			break;
+
+		case DEVMEM_HEAP_TYPE_USER_MANAGED:
+			eError = _DevmemCPUMapSVMUserManaged(psHeap,
+												 psImport,
+												 uiAlign,
+												 ui64MapAddress);
+			break;
+
+		default:
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			break;
+	}
+
+	return eError;
+}
+
+static inline void 
+_DevmemImportStructDevUnmapSVM(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport)
+{
+	switch(psHeap->eHeapType)
+	{
+		case DEVMEM_HEAP_TYPE_KERNEL_MANAGED:
+			_DevmemCPUUnmapSVMKernelManaged(psHeap, psImport);
+			break;
+
+		case DEVMEM_HEAP_TYPE_USER_MANAGED:
+			_DevmemCPUUnmapSVMUserManaged(psHeap, psImport);
+			break;
+
+		default:
+			break;
+	}
+}
+
+/*
+	The Devmem import structure is the structure we use
+	to manage memory that is "imported" (which is page
+	granular) from the server into our process, this
+	includes allocations.
+
+	This allows memory to be imported without requiring
+	any CPU or device mapping. Memory can then be mapped
+	into the device or CPU on demand, but neither is
+	required.
+*/
+
+IMG_INTERNAL
+void _DevmemImportStructAcquire(DEVMEM_IMPORT *psImport)
+{
+	IMG_INT iRefCount = OSAtomicIncrement(&psImport->hRefCount);
+	PVR_UNREFERENCED_PARAMETER(iRefCount);
+	PVR_ASSERT(iRefCount != 1);
+
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psImport,
+					iRefCount-1,
+					iRefCount);
+}
+
+IMG_INTERNAL
+IMG_BOOL _DevmemImportStructRelease(DEVMEM_IMPORT *psImport)
+{
+	IMG_INT iRefCount = OSAtomicDecrement(&psImport->hRefCount);
+	PVR_ASSERT(iRefCount >= 0);
+
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psImport,
+					iRefCount+1,
+					iRefCount);
+
+	if (iRefCount == 0)
+	{
+		BridgePMRUnrefPMR(psImport->hDevConnection,
+						  psImport->hPMR);
+		OSLockDestroy(psImport->sCPUImport.hLock);
+		OSLockDestroy(psImport->sDeviceImport.hLock);
+		OSLockDestroy(psImport->hLock);
+#if defined(PDUMP)
+		OSFreeMem(psImport->pszAnnotation);
+#endif
+		OSFreeMem(psImport);
+
+		return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+IMG_INTERNAL
+void _DevmemImportDiscard(DEVMEM_IMPORT *psImport)
+{
+	PVR_ASSERT(OSAtomicRead(&psImport->hRefCount) == 0);
+	OSLockDestroy(psImport->sCPUImport.hLock);
+	OSLockDestroy(psImport->sDeviceImport.hLock);
+	OSLockDestroy(psImport->hLock);
+	OSFreeMem(psImport);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc)
+{
+	DEVMEM_MEMDESC *psMemDesc;
+	PVRSRV_ERROR eError;
+
+	psMemDesc = OSAllocMem(sizeof(DEVMEM_MEMDESC));
+
+	if (psMemDesc == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto failAlloc;
+	}
+	
+	/* Structure must be zero'd incase it needs to be freed before it is initialised! */
+	OSCachedMemSet(psMemDesc, 0, sizeof(DEVMEM_MEMDESC));
+
+	eError = OSLockCreate(&psMemDesc->hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto failMDLock;
+	}
+
+	eError = OSLockCreate(&psMemDesc->sDeviceMemDesc.hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto failDMDLock;
+	}
+
+	eError = OSLockCreate(&psMemDesc->sCPUMemDesc.hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto failCMDLock;
+	}
+
+	*ppsMemDesc = psMemDesc;
+
+	return PVRSRV_OK;
+
+failCMDLock:
+	OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock);
+failDMDLock:
+	OSLockDestroy(psMemDesc->hLock);
+failMDLock:
+	OSFreeMem(psMemDesc);
+failAlloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+/*
+	Init the MemDesc structure
+*/
+IMG_INTERNAL
+void _DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc,
+										  IMG_DEVMEM_OFFSET_T uiOffset,
+										  DEVMEM_IMPORT *psImport,
+										  IMG_DEVMEM_SIZE_T uiSize)
+{
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psMemDesc,
+					0,
+					1);
+
+	psMemDesc->psImport = psImport;
+	psMemDesc->uiOffset = uiOffset;
+
+	psMemDesc->sDeviceMemDesc.ui32RefCount = 0;
+	psMemDesc->sCPUMemDesc.ui32RefCount = 0;
+	psMemDesc->uiAllocSize = uiSize;
+	psMemDesc->hPrivData = NULL;
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	psMemDesc->sTraceData.ui32AllocationIndex = DEVICEMEM_HISTORY_ALLOC_INDEX_NONE;
+#endif
+
+	OSAtomicWrite(&psMemDesc->hRefCount, 1);
+}
+
+IMG_INTERNAL
+void _DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc)
+{
+	IMG_INT iRefCount = 0;
+
+	iRefCount = OSAtomicIncrement(&psMemDesc->hRefCount);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psMemDesc,
+					iRefCount-1,
+					iRefCount);
+}
+
+IMG_INTERNAL
+IMG_BOOL _DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc)
+{
+	IMG_INT iRefCount;
+	PVR_ASSERT(psMemDesc != NULL);
+	
+	iRefCount = OSAtomicDecrement(&psMemDesc->hRefCount);
+	PVR_ASSERT(iRefCount >= 0);
+
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psMemDesc,
+					iRefCount+1,
+					iRefCount);
+
+	if (iRefCount == 0)
+	{
+		if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_SUBALLOCATABLE)
+		{
+			/* As soon as the first sub-allocation on the psImport is freed
+			 * we might get dirty memory when reusing it.
+			 * We have to delete the ZEROED & CLEAN flag */
+
+			psMemDesc->psImport->uiProperties &= ~DEVMEM_PROPERTIES_IMPORT_IS_ZEROED;
+			psMemDesc->psImport->uiProperties &= ~DEVMEM_PROPERTIES_IMPORT_IS_CLEAN;
+
+			RA_Free(psMemDesc->psImport->sDeviceImport.psHeap->psSubAllocRA,
+					psMemDesc->psImport->sDeviceImport.sDevVAddr.uiAddr +
+					psMemDesc->uiOffset);
+		}
+		else
+		{
+			_DevmemImportStructRelease(psMemDesc->psImport);
+		}
+
+		OSLockDestroy(psMemDesc->sCPUMemDesc.hLock);
+		OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock);
+		OSLockDestroy(psMemDesc->hLock);
+		OSFreeMem(psMemDesc);
+
+		return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+IMG_INTERNAL
+void _DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc)
+{
+	PVR_ASSERT(OSAtomicRead(&psMemDesc->hRefCount) == 0);
+
+	OSLockDestroy(psMemDesc->sCPUMemDesc.hLock);
+	OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock);
+	OSLockDestroy(psMemDesc->hLock);
+	OSFreeMem(psMemDesc);
+}
+
+
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize,
+                                   IMG_DEVMEM_ALIGN_T uiAlign,
+                                   DEVMEM_FLAGS_T *puiFlags)
+{
+	if ((*puiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) &&
+	    (*puiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Zero on Alloc and Poison on Alloc are mutually exclusive.",
+		         __FUNCTION__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (uiAlign & (uiAlign-1))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: The requested alignment is not a power of two.",
+		         __FUNCTION__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+ 	}
+
+	if (uiSize == 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: Please request a non-zero size value.",
+		         __FUNCTION__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* If zero flag is set we have to have write access to the page. */
+	if (PVRSRV_CHECK_ZERO_ON_ALLOC(*puiFlags) || PVRSRV_CHECK_CPU_WRITEABLE(*puiFlags))
+	{
+		(*puiFlags) |= PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+		             PVRSRV_MEMALLOCFLAG_CPU_READABLE;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*
+	Allocate and init an import structure
+*/
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemImportStructAlloc(SHARED_DEV_CONNECTION hDevConnection,
+									  DEVMEM_IMPORT **ppsImport)
+{
+	DEVMEM_IMPORT *psImport;
+	PVRSRV_ERROR eError;
+
+    psImport = OSAllocMem(sizeof *psImport);
+    if (psImport == NULL)
+    {
+        return PVRSRV_ERROR_OUT_OF_MEMORY;
+    }
+
+#if defined (PDUMP)
+	/* Make sure this points nowhere as long as we don't need it */
+	psImport->pszAnnotation = NULL;
+#endif
+
+	/* Setup some known bad values for things we don't have yet */
+	psImport->sDeviceImport.hReservation = LACK_OF_RESERVATION_POISON;
+    psImport->sDeviceImport.hMapping = LACK_OF_MAPPING_POISON;
+    psImport->sDeviceImport.psHeap = NULL;
+    psImport->sDeviceImport.bMapped = IMG_FALSE;
+
+	eError = OSLockCreate(&psImport->sDeviceImport.hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto failDIOSLockCreate;
+	}
+
+	psImport->sCPUImport.hOSMMapData = NULL;
+	psImport->sCPUImport.pvCPUVAddr = NULL;
+
+	eError = OSLockCreate(&psImport->sCPUImport.hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto failCIOSLockCreate;
+	}
+
+	/* Set up common elements */
+    psImport->hDevConnection = hDevConnection;
+
+    /* Setup properties */
+    psImport->uiProperties = 0;
+
+	/* Setup refcounts */
+    psImport->sDeviceImport.ui32RefCount = 0;
+    psImport->sCPUImport.ui32RefCount = 0;
+    OSAtomicWrite(&psImport->hRefCount, 0);
+
+	/* Create the lock */
+	eError = OSLockCreate(&psImport->hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto failILockAlloc;
+	}
+
+    *ppsImport = psImport;
+    
+    return PVRSRV_OK;
+
+failILockAlloc:
+	OSLockDestroy(psImport->sCPUImport.hLock);
+failCIOSLockCreate:
+	OSLockDestroy(psImport->sDeviceImport.hLock);
+failDIOSLockCreate:
+	OSFreeMem(psImport);
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+/*
+	Initialise the import structure
+*/
+IMG_INTERNAL
+void _DevmemImportStructInit(DEVMEM_IMPORT *psImport,
+								 IMG_DEVMEM_SIZE_T uiSize,
+								 IMG_DEVMEM_ALIGN_T uiAlign,
+								 DEVMEM_FLAGS_T uiFlags,
+								 IMG_HANDLE hPMR,
+								 DEVMEM_PROPERTIES_T uiProperties)
+{
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psImport,
+					0,
+					1);
+
+	psImport->uiSize = uiSize;
+	psImport->uiAlign = uiAlign;
+	psImport->uiFlags = uiFlags;
+	psImport->hPMR = hPMR;
+	psImport->uiProperties = uiProperties;
+	OSAtomicWrite(&psImport->hRefCount, 1);
+}
+
+/*
+	Map an import to the device
+*/
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemImportStructDevMap(DEVMEM_HEAP *psHeap,
+                                       IMG_BOOL bMap,
+                                       DEVMEM_IMPORT *psImport,
+                                       IMG_UINT64 ui64OptionalMapAddress)
+{
+	DEVMEM_DEVICE_IMPORT *psDeviceImport;
+	RA_BASE_T uiAllocatedAddr;
+	RA_LENGTH_T uiAllocatedSize;
+	IMG_DEV_VIRTADDR sBase;
+	IMG_HANDLE hReservation;
+	PVRSRV_ERROR eError;
+	IMG_UINT uiAlign;
+	IMG_BOOL bDestroyed = IMG_FALSE;
+
+	/* Round the provided import alignment to the configured heap alignment */
+	uiAlign = 1ULL << psHeap->uiLog2ImportAlignment;
+	uiAlign = (psImport->uiAlign + uiAlign - 1) & ~(uiAlign-1);
+
+	psDeviceImport = &psImport->sDeviceImport;
+
+	OSLockAcquire(psDeviceImport->hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psImport,
+					psDeviceImport->ui32RefCount,
+					psDeviceImport->ui32RefCount+1);
+
+	if (psDeviceImport->ui32RefCount++ == 0)
+	{
+		_DevmemImportStructAcquire(psImport);
+
+		OSAtomicIncrement(&psHeap->hImportCount);
+
+		if (PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags))
+		{
+			/*  SVM (shared virtual memory) imports or allocations always
+				need to acquire CPU virtual address first as address is
+				used to map the allocation into the device virtual address
+				space; i.e. the virtual address of the allocation for both
+				the CPU/GPU must be identical. */
+			eError = _DevmemImportStructDevMapSVM(psHeap,
+												  psImport,
+												  uiAlign,
+												  &ui64OptionalMapAddress);
+			if (eError != PVRSRV_OK)
+			{
+				goto failVMRAAlloc;
+			}
+		}
+
+		if (ui64OptionalMapAddress == 0)
+		{
+			if (psHeap->eHeapType == DEVMEM_HEAP_TYPE_USER_MANAGED ||
+				psHeap->eHeapType == DEVMEM_HEAP_TYPE_KERNEL_MANAGED)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						psHeap->eHeapType == DEVMEM_HEAP_TYPE_USER_MANAGED ?
+						"%s: Heap is user managed, please use PVRSRVMapToDeviceAddress().":
+						"%s: Heap is kernel managed, use right allocation flags (e.g. SVM).",
+						__func__));
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				goto failVMRAAlloc;
+			}
+			psHeap->eHeapType = DEVMEM_HEAP_TYPE_RA_MANAGED;
+
+			/* Allocate space in the VM */
+			eError = RA_Alloc(psHeap->psQuantizedVMRA,
+			                  psImport->uiSize,
+			                  RA_NO_IMPORT_MULTIPLIER,
+			                  0, /* flags: this RA doesn't use flags*/
+			                  uiAlign,
+			                  "Virtual_Alloc",
+			                  &uiAllocatedAddr,
+			                  &uiAllocatedSize,
+			                  NULL /* don't care about per-import priv data */
+			                  );
+			if (PVRSRV_OK != eError)
+			{
+				eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM;
+				goto failVMRAAlloc;
+			}
+
+			/* No reason for the allocated virtual size to be different from
+			   the PMR's size */
+			PVR_ASSERT(uiAllocatedSize == psImport->uiSize);
+
+			sBase.uiAddr = uiAllocatedAddr;
+
+		}
+		else
+		{
+			IMG_UINT64 uiHeapAddrEnd;
+
+			switch (psHeap->eHeapType)
+			{
+				case DEVMEM_HEAP_TYPE_UNKNOWN:
+					/* DEVMEM_HEAP_TYPE_USER_MANAGED can apply to _any_
+					   heap and can only be determined here. This heap
+					   type transitions from DEVMEM_HEAP_TYPE_UNKNOWN
+					   to DEVMEM_HEAP_TYPE_USER_MANAGED on 1st alloc */
+					psHeap->eHeapType = DEVMEM_HEAP_TYPE_USER_MANAGED;
+					break;
+
+				case DEVMEM_HEAP_TYPE_USER_MANAGED:
+				case DEVMEM_HEAP_TYPE_KERNEL_MANAGED:
+					if (! psHeap->uiSize)
+					{
+						PVR_DPF((PVR_DBG_ERROR,
+							psHeap->eHeapType == DEVMEM_HEAP_TYPE_USER_MANAGED ?
+							"%s: Heap DEVMEM_HEAP_TYPE_USER_MANAGED is disabled.":
+							"%s: Heap DEVMEM_HEAP_TYPE_KERNEL_MANAGED is disabled."
+							, __func__));
+						eError = PVRSRV_ERROR_INVALID_HEAP;
+						goto failVMRAAlloc;
+					}
+					break;
+
+				case DEVMEM_HEAP_TYPE_RA_MANAGED:
+					PVR_DPF((PVR_DBG_ERROR,
+						"%s: This heap is managed by an RA, please use PVRSRVMapToDevice()"
+						" and don't use allocation flags that assume differently (e.g. SVM)."
+						, __func__));
+					eError = PVRSRV_ERROR_INVALID_PARAMS;
+					goto failVMRAAlloc;
+
+				default:
+					break;
+			}
+
+			/* Ensure supplied ui64OptionalMapAddress is within heap range */
+			uiHeapAddrEnd = psHeap->sBaseAddress.uiAddr + psHeap->uiSize;
+			if (ui64OptionalMapAddress >= uiHeapAddrEnd ||
+				ui64OptionalMapAddress + psImport->uiSize > uiHeapAddrEnd)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: ui64OptionalMapAddress %p is outside of heap limits <%p:%p>."
+						, __func__
+						, (void*)(uintptr_t)ui64OptionalMapAddress
+						, (void*)(uintptr_t)psHeap->sBaseAddress.uiAddr
+						, (void*)(uintptr_t)uiHeapAddrEnd));
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				goto failVMRAAlloc;
+			}
+
+			if (ui64OptionalMapAddress & ((1 << psHeap->uiLog2Quantum) - 1))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Invalid address to map to. Please prove an address aligned to"
+						"a page multiple of the heap."
+						, __func__));
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				goto failVMRAAlloc;
+			}
+
+			uiAllocatedAddr = ui64OptionalMapAddress;
+
+			if (psImport->uiSize & ((1 << psHeap->uiLog2Quantum) - 1))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"%s: Invalid heap to map to. "
+						"Please choose a heap that can handle smaller page sizes."
+						, __func__));
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				goto failVMRAAlloc;
+			}
+			uiAllocatedSize = psImport->uiSize;
+			sBase.uiAddr = uiAllocatedAddr;
+		}
+	
+		/* Setup page tables for the allocated VM space */
+		eError = BridgeDevmemIntReserveRange(psHeap->psCtx->hDevConnection,
+											 psHeap->hDevMemServerHeap,
+											 sBase,
+											 uiAllocatedSize,
+											 &hReservation);
+		if (eError != PVRSRV_OK)
+		{
+			goto failReserve;
+		}
+
+		if (bMap)
+		{
+			DEVMEM_FLAGS_T uiMapFlags;
+			
+			uiMapFlags = psImport->uiFlags & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK;
+
+			/* Actually map the PMR to allocated VM space */
+			eError = BridgeDevmemIntMapPMR(psHeap->psCtx->hDevConnection,
+										   psHeap->hDevMemServerHeap,
+										   hReservation,
+										   psImport->hPMR,
+										   uiMapFlags,
+										   &psDeviceImport->hMapping);
+			if (eError != PVRSRV_OK)
+			{
+				goto failMap;
+			}
+			psDeviceImport->bMapped = IMG_TRUE;
+		}
+
+		/* Setup device mapping specific parts of the mapping info */
+	    psDeviceImport->hReservation = hReservation;
+		psDeviceImport->sDevVAddr.uiAddr = uiAllocatedAddr;
+		psDeviceImport->psHeap = psHeap;
+	}
+	else
+	{
+		/*
+			Check that we've been asked to map it into the
+			same heap 2nd time around
+		*/
+		if (psHeap != psDeviceImport->psHeap)
+		{
+			eError = PVRSRV_ERROR_INVALID_HEAP;
+			goto failParams;
+		}
+	}
+	OSLockRelease(psDeviceImport->hLock);
+
+	return PVRSRV_OK;
+
+failMap:
+	BridgeDevmemIntUnreserveRange(psHeap->psCtx->hDevConnection,
+								  hReservation);
+failReserve:
+	if (ui64OptionalMapAddress == 0)
+	{
+		RA_Free(psHeap->psQuantizedVMRA,
+				uiAllocatedAddr);
+	}
+failVMRAAlloc:
+	bDestroyed = _DevmemImportStructRelease(psImport);
+	OSAtomicDecrement(&psHeap->hImportCount);
+failParams:
+	if (!bDestroyed)
+	{
+		psDeviceImport->ui32RefCount--;
+		OSLockRelease(psDeviceImport->hLock);
+	}
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*
+	Unmap an import from the Device
+*/
+IMG_INTERNAL
+void _DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport)
+{
+	PVRSRV_ERROR eError;
+	DEVMEM_DEVICE_IMPORT *psDeviceImport;
+
+	psDeviceImport = &psImport->sDeviceImport;
+
+	OSLockAcquire(psDeviceImport->hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psImport,
+					psDeviceImport->ui32RefCount,
+					psDeviceImport->ui32RefCount-1);
+
+	if (--psDeviceImport->ui32RefCount == 0)
+	{
+		DEVMEM_HEAP *psHeap = psDeviceImport->psHeap;
+
+		if (psDeviceImport->bMapped)
+		{
+			do
+			{
+				eError = BridgeDevmemIntUnmapPMR(psImport->hDevConnection,
+												 psDeviceImport->hMapping);
+				if (eError == PVRSRV_ERROR_RETRY)
+				{
+					OSWaitus(DEVMEM_UNMAP_RETRY_DELAY_US);
+				}
+			} while (eError == PVRSRV_ERROR_RETRY);
+			PVR_ASSERT(eError == PVRSRV_OK);
+		}
+	
+	    eError = BridgeDevmemIntUnreserveRange(psImport->hDevConnection,
+	                                        psDeviceImport->hReservation);
+	    PVR_ASSERT(eError == PVRSRV_OK);
+
+	    psDeviceImport->bMapped = IMG_FALSE;
+	    psDeviceImport->hMapping = LACK_OF_MAPPING_POISON;
+	    psDeviceImport->hReservation = LACK_OF_RESERVATION_POISON;
+
+		if (psHeap->eHeapType == DEVMEM_HEAP_TYPE_RA_MANAGED)
+		{
+			RA_Free(psHeap->psQuantizedVMRA,
+					psDeviceImport->sDevVAddr.uiAddr);
+		}
+
+		if (PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags))
+		{
+			_DevmemImportStructDevUnmapSVM(psHeap, psImport);
+		}
+
+	    OSLockRelease(psDeviceImport->hLock);
+
+		_DevmemImportStructRelease(psImport);
+
+		OSAtomicDecrement(&psHeap->hImportCount);
+	}
+	else
+	{
+		OSLockRelease(psDeviceImport->hLock);
+	}
+}
+
+/*
+	Map an import into the CPU
+*/
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport)
+{
+	PVRSRV_ERROR eError;
+	DEVMEM_CPU_IMPORT *psCPUImport;
+	size_t uiMappingLength;
+
+	psCPUImport = &psImport->sCPUImport;
+
+	OSLockAcquire(psCPUImport->hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psImport,
+					psCPUImport->ui32RefCount,
+					psCPUImport->ui32RefCount+1);
+
+	if (psCPUImport->ui32RefCount++ == 0)
+	{
+		_DevmemImportStructAcquire(psImport);
+
+		eError = OSMMapPMR(psImport->hDevConnection,
+		                   psImport->hPMR,
+		                   psImport->uiSize,
+		                   psImport->uiFlags,
+		                   &psCPUImport->hOSMMapData,
+		                   &psCPUImport->pvCPUVAddr,
+		                   &uiMappingLength);
+		if (eError != PVRSRV_OK)
+		{
+			goto failMap;
+		}
+
+		/* There is no reason the mapping length is different to the size */
+		PVR_ASSERT(uiMappingLength == psImport->uiSize);
+	}
+	OSLockRelease(psCPUImport->hLock);
+
+	return PVRSRV_OK;
+
+failMap:
+	psCPUImport->ui32RefCount--;
+	if (!_DevmemImportStructRelease(psImport))
+	{
+		OSLockRelease(psCPUImport->hLock);
+	}
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*
+	Unmap an import from the CPU
+*/
+IMG_INTERNAL
+void _DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport)
+{
+	DEVMEM_CPU_IMPORT *psCPUImport;
+
+	psCPUImport = &psImport->sCPUImport;
+
+	OSLockAcquire(psCPUImport->hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psImport,
+					psCPUImport->ui32RefCount,
+					psCPUImport->ui32RefCount-1);
+
+	if (--psCPUImport->ui32RefCount == 0)
+	{
+		/* psImport->uiSize is a 64-bit quantity whereas the 5th
+		 * argument to OSUnmapPMR is a 32-bit quantity on 32-bit systems
+		 * hence a compiler warning of implicit cast and loss of data.
+		 * Added explicit cast and assert to remove warning.
+		 */
+#if (defined(_WIN32) && !defined(_WIN64)) || (defined(LINUX) && defined(__i386__))
+		PVR_ASSERT(psImport->uiSize<IMG_UINT32_MAX);
+#endif
+		OSMUnmapPMR(psImport->hDevConnection,
+					psImport->hPMR,
+					psCPUImport->hOSMMapData,
+					psCPUImport->pvCPUVAddr,
+					psImport->uiSize);
+
+		OSLockRelease(psCPUImport->hLock);
+
+		_DevmemImportStructRelease(psImport);
+	}
+	else
+	{
+		OSLockRelease(psCPUImport->hLock);
+	}
+}
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/devicememx_pdump.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/devicememx_pdump.c
new file mode 100644
index 0000000..42db380
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/devicememx_pdump.c
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@File
+@Title          Shared X device memory management PDump functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements common (client & server) PDump functions for the
+                memory management code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#if defined(PDUMP)
+
+#include "devicememx_pdump.h"
+#include "pdump.h"
+#include "client_pdumpmm_bridge.h"
+#include "devicemem_utils.h"
+
+IMG_INTERNAL void
+DevmemXPDumpLoadMem(DEVMEMX_PHYSDESC *psMemDescPhys,
+                    IMG_DEVMEM_OFFSET_T uiOffset,
+                    IMG_DEVMEM_SIZE_T uiSize,
+                    PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(uiSize != 0);
+	PVR_ASSERT(uiOffset + uiSize <= (psMemDescPhys->uiNumPages << psMemDescPhys->uiLog2PageSize));
+
+	eError = BridgePMRPDumpLoadMem(psMemDescPhys->hBridge,
+	                               psMemDescPhys->hPMR,
+	                               uiOffset,
+	                               uiSize,
+	                               uiPDumpFlags,
+	                               IMG_FALSE);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+		         "%s: failed with error %d",
+		         __FUNCTION__, eError));
+	}
+}
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/hash.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/hash.c
new file mode 100644
index 0000000..b098dfe
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/hash.c
@@ -0,0 +1,701 @@
+/*************************************************************************/ /*!
+@File
+@Title          Self scaling hash tables.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description 
+   Implements simple self scaling hash tables. Hash collisions are
+   handled by chaining entries together. Hash tables are increased in
+   size when they become more than (50%?) full and decreased in size
+   when less than (25%?) full. Hash tables are never decreased below
+   their initial size.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* include/ */
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+/* services/shared/include/ */
+#include "hash.h"
+
+/* services/client/include/ or services/server/include/ */
+#include "osfunc.h"
+#include "allocmem.h"
+
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#endif
+
+#define PRIVATE_MAX(a,b) ((a)>(b)?(a):(b))
+
+#define	KEY_TO_INDEX(pHash, key, uSize) \
+	((pHash)->pfnHashFunc((pHash)->uKeySize, (key), (uSize)) % (uSize))
+
+#define	KEY_COMPARE(pHash, pKey1, pKey2) \
+	((pHash)->pfnKeyComp((pHash)->uKeySize, (pKey1), (pKey2)))
+
+/* Each entry in a hash table is placed into a bucket */
+struct _BUCKET_
+{
+	/* the next bucket on the same chain */
+	struct _BUCKET_ *pNext;
+
+	/* entry value */
+	uintptr_t v;
+
+	/* entry key */
+#if defined (WIN32)
+	uintptr_t k[1];
+#else
+	uintptr_t k[];		/* PRQA S 0642 */ /* override dynamic array declaration warning */
+#endif
+};
+typedef struct _BUCKET_ BUCKET;
+
+struct _HASH_TABLE_
+{
+	/* current size of the hash table */
+	IMG_UINT32 uSize;
+
+	/* number of entries currently in the hash table */
+	IMG_UINT32 uCount;
+
+	/* the minimum size that the hash table should be re-sized to */
+	IMG_UINT32 uMinimumSize;
+
+	/* size of key in bytes */
+	IMG_UINT32 uKeySize;
+
+	/* hash function */
+	HASH_FUNC *pfnHashFunc;
+
+	/* key comparison function */
+	HASH_KEY_COMP *pfnKeyComp;
+
+	/* the hash table array */
+	BUCKET **ppBucketTable;
+};
+
+/*************************************************************************/ /*!
+@Function       HASH_Func_Default
+@Description    Hash function intended for hashing keys composed of
+                uintptr_t arrays.
+@Input          uKeySize     The size of the hash key, in bytes.
+@Input          pKey         A pointer to the key to hash.
+@Input          uHashTabLen  The length of the hash table.
+@Return         The hash value.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_UINT32
+HASH_Func_Default (size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen)
+{
+	uintptr_t *p = (uintptr_t *)pKey;
+	IMG_UINT32 uKeyLen = uKeySize / sizeof(uintptr_t);
+	IMG_UINT32 ui;
+	IMG_UINT32 uHashKey = 0;
+
+	PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+
+	PVR_ASSERT((uKeySize % sizeof(uintptr_t)) == 0);
+
+	for (ui = 0; ui < uKeyLen; ui++)
+	{
+		IMG_UINT32 uHashPart = (IMG_UINT32)*p++;
+
+		uHashPart += (uHashPart << 12);
+		uHashPart ^= (uHashPart >> 22);
+		uHashPart += (uHashPart << 4);
+		uHashPart ^= (uHashPart >> 9);
+		uHashPart += (uHashPart << 10);
+		uHashPart ^= (uHashPart >> 2);
+		uHashPart += (uHashPart << 7);
+		uHashPart ^= (uHashPart >> 12);
+
+		uHashKey += uHashPart;
+	}
+
+	return uHashKey;
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Key_Comp_Default
+@Description    Compares keys composed of uintptr_t arrays.
+@Input          uKeySize    The size of the hash key, in bytes.
+@Input          pKey1       Pointer to first hash key to compare.
+@Input          pKey2       Pointer to second hash key to compare.
+@Return         IMG_TRUE    The keys match.
+                IMG_FALSE   The keys don't match.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+HASH_Key_Comp_Default (size_t uKeySize, void *pKey1, void *pKey2)
+{
+	uintptr_t *p1 = (uintptr_t *)pKey1;
+	uintptr_t *p2 = (uintptr_t *)pKey2;
+	IMG_UINT32 uKeyLen = uKeySize / sizeof(uintptr_t);
+	IMG_UINT32 ui;
+
+	PVR_ASSERT((uKeySize % sizeof(uintptr_t)) == 0);
+
+	for (ui = 0; ui < uKeyLen; ui++)
+	{
+		if (*p1++ != *p2++)
+			return IMG_FALSE;
+	}
+
+	return IMG_TRUE;
+}
+
+/*************************************************************************/ /*!
+@Function       _ChainInsert
+@Description    Insert a bucket into the appropriate hash table chain.
+@Input          pBucket       The bucket
+@Input          ppBucketTable The hash table
+@Input          uSize         The size of the hash table
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static void
+_ChainInsert (HASH_TABLE *pHash, BUCKET *pBucket, BUCKET **ppBucketTable, IMG_UINT32 uSize)
+{
+	IMG_UINT32 uIndex;
+
+	/* We assume that all parameters passed by the caller are valid. */
+	PVR_ASSERT (pBucket != NULL);
+	PVR_ASSERT (ppBucketTable != NULL);
+	PVR_ASSERT (uSize != 0);
+
+	uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize);	/* PRQA S 0432,0541 */ /* ignore dynamic array warning */
+	pBucket->pNext = ppBucketTable[uIndex];
+	ppBucketTable[uIndex] = pBucket;
+}
+
+/*************************************************************************/ /*!
+@Function       _Rehash
+@Description    Iterate over every entry in an old hash table and
+                rehash into the new table.
+@Input          ppOldTable   The old hash table
+@Input          uOldSize     The size of the old hash table
+@Input          ppNewTable   The new hash table
+@Input          uNewSize     The size of the new hash table
+@Return         None
+*/ /**************************************************************************/
+static void
+_Rehash (HASH_TABLE *pHash,
+         BUCKET **ppOldTable, IMG_UINT32 uOldSize,
+         BUCKET **ppNewTable, IMG_UINT32 uNewSize)
+{
+	IMG_UINT32 uIndex;
+	for (uIndex=0; uIndex< uOldSize; uIndex++)
+    {
+		BUCKET *pBucket;
+		pBucket = ppOldTable[uIndex];
+		while (pBucket != NULL)
+		{
+			BUCKET *pNextBucket = pBucket->pNext;
+			_ChainInsert (pHash, pBucket, ppNewTable, uNewSize);
+			pBucket = pNextBucket;
+		}
+    }
+}
+
+/*************************************************************************/ /*!
+@Function       _Resize
+@Description    Attempt to resize a hash table, failure to allocate a
+                new larger hash table is not considered a hard failure.
+                We simply continue and allow the table to fill up, the
+                effect is to allow hash chains to become longer.
+@Input          pHash      Hash table to resize.
+@Input          uNewSize   Required table size.
+@Return         IMG_TRUE Success
+                IMG_FALSE Failed
+*/ /**************************************************************************/
+static IMG_BOOL
+_Resize (HASH_TABLE *pHash, IMG_UINT32 uNewSize)
+{
+	if (uNewSize != pHash->uSize)
+    {
+		BUCKET **ppNewTable;
+        IMG_UINT32 uIndex;
+
+#if defined(__linux__) && defined(__KERNEL__)
+		ppNewTable = OSAllocMemNoStats(sizeof (BUCKET *) * uNewSize);
+#else
+		ppNewTable = OSAllocMem(sizeof (BUCKET *) * uNewSize);
+#endif
+		if (ppNewTable == NULL)
+        {
+            return IMG_FALSE;
+        }
+
+        for (uIndex=0; uIndex<uNewSize; uIndex++)
+            ppNewTable[uIndex] = NULL;
+
+        _Rehash(pHash, pHash->ppBucketTable, pHash->uSize, ppNewTable, uNewSize);
+
+#if defined(__linux__) && defined(__KERNEL__)
+        OSFreeMemNoStats(pHash->ppBucketTable);
+#else
+        OSFreeMem(pHash->ppBucketTable);
+#endif
+        /*not nulling pointer, being reassigned just below*/
+        pHash->ppBucketTable = ppNewTable;
+        pHash->uSize = uNewSize;
+    }
+    return IMG_TRUE;
+}
+
+
+/*************************************************************************/ /*!
+@Function       HASH_Create_Extended
+@Description    Create a self scaling hash table, using the supplied
+                key size, and the supplied hash and key comparsion
+                functions.
+@Input          uInitialLen   Initial and minimum length of the
+                              hash table, where the length refers to the number
+                              of entries in the hash table, not its size in
+                              bytes.
+@Input          uKeySize      The size of the key, in bytes.
+@Input          pfnHashFunc   Pointer to hash function.
+@Input          pfnKeyComp    Pointer to key comparsion function.
+@Return         NULL or hash table handle.
+*/ /**************************************************************************/
+IMG_INTERNAL 
+HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp)
+{
+	HASH_TABLE *pHash;
+	IMG_UINT32 uIndex;
+
+	if (uInitialLen == 0 || uKeySize == 0 || pfnHashFunc == NULL || pfnKeyComp == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "HASH_Create_Extended: invalid input parameters"));
+		return NULL;
+	}
+
+	PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Create_Extended: InitialSize=0x%x", uInitialLen));
+
+#if defined(__linux__) && defined(__KERNEL__)
+	pHash = OSAllocMemNoStats(sizeof(HASH_TABLE));
+#else
+	pHash = OSAllocMem(sizeof(HASH_TABLE));
+#endif
+    if (pHash == NULL)
+	{
+		return NULL;
+	}
+
+	pHash->uCount = 0;
+	pHash->uSize = uInitialLen;
+	pHash->uMinimumSize = uInitialLen;
+	pHash->uKeySize = uKeySize;
+	pHash->pfnHashFunc = pfnHashFunc;
+	pHash->pfnKeyComp = pfnKeyComp;
+
+#if defined(__linux__) && defined(__KERNEL__)
+    pHash->ppBucketTable = OSAllocMemNoStats(sizeof (BUCKET *) * pHash->uSize);
+#else
+    pHash->ppBucketTable = OSAllocMem(sizeof (BUCKET *) * pHash->uSize);
+#endif
+    if (pHash->ppBucketTable == NULL)
+    {
+#if defined(__linux__) && defined(__KERNEL__)
+		OSFreeMemNoStats(pHash);
+#else
+		OSFreeMem(pHash);
+#endif
+		/*not nulling pointer, out of scope*/
+		return NULL;
+    }
+
+	for (uIndex=0; uIndex<pHash->uSize; uIndex++)
+		pHash->ppBucketTable[uIndex] = NULL;
+	return pHash;
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Create
+@Description    Create a self scaling hash table with a key
+                consisting of a single uintptr_t, and using
+                the default hash and key comparison functions.
+@Input          uInitialLen   Initial and minimum length of the
+                              hash table, where the length refers to the
+                              number of entries in the hash table, not its size
+                              in bytes.
+@Return         NULL or hash table handle.
+*/ /**************************************************************************/
+IMG_INTERNAL 
+HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen)
+{
+	return HASH_Create_Extended(uInitialLen, sizeof(uintptr_t),
+		&HASH_Func_Default, &HASH_Key_Comp_Default);
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Delete
+@Description    Delete a hash table created by HASH_Create_Extended or
+                HASH_Create.  All entries in the table must have been
+                removed before calling this function.
+@Input          pHash     Hash table
+@Return         None
+*/ /**************************************************************************/
+IMG_INTERNAL void
+HASH_Delete (HASH_TABLE *pHash)
+{
+	IMG_BOOL bDoCheck = IMG_TRUE;
+#if defined(__KERNEL__) && !defined(__QNXNTO__)
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	if (psPVRSRVData != NULL)
+	{
+		if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+		{
+			bDoCheck = IMG_FALSE;
+		}
+	}
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+	else
+	{
+		bDoCheck = IMG_FALSE;
+	}
+#endif
+#endif
+	if (pHash != NULL)
+    {
+		PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Delete"));
+
+		if (bDoCheck)
+		{
+			PVR_ASSERT (pHash->uCount==0);
+		}
+		if(pHash->uCount != 0)
+		{
+			IMG_UINT32 uiEntriesLeft = pHash->uCount;
+			IMG_UINT32 i;
+			PVR_DPF ((PVR_DBG_ERROR, "%s: Leak detected in hash table!", __func__));
+			PVR_DPF ((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmemcontext", __func__));
+			PVR_DPF ((PVR_DBG_ERROR, "%s: Removing remaining %u hash entries.", __func__, uiEntriesLeft));
+
+			for (i = 0; i < uiEntriesLeft; i++)
+			{
+#if defined(__linux__) && defined(__KERNEL__)
+				OSFreeMemNoStats(pHash->ppBucketTable[i]);
+#else
+				OSFreeMem(pHash->ppBucketTable[i]);
+#endif
+			}
+		}
+#if defined(__linux__) && defined(__KERNEL__)
+		OSFreeMemNoStats(pHash->ppBucketTable);
+#else
+		OSFreeMem(pHash->ppBucketTable);
+#endif
+		pHash->ppBucketTable = NULL;
+#if defined(__linux__) && defined(__KERNEL__)
+		OSFreeMemNoStats(pHash);
+#else
+		OSFreeMem(pHash);
+#endif
+		/*not nulling pointer, copy on stack*/
+    }
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Insert_Extended
+@Description    Insert a key value pair into a hash table created
+                with HASH_Create_Extended.
+@Input          pHash     Hash table
+@Input          pKey      Pointer to the key.
+@Input          v         The value associated with the key.
+@Return         IMG_TRUE  - success
+                IMG_FALSE  - failure
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+HASH_Insert_Extended (HASH_TABLE *pHash, void *pKey, uintptr_t v)
+{
+	BUCKET *pBucket;
+
+	PVR_ASSERT (pHash != NULL);
+
+	if (pHash == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "HASH_Insert_Extended: invalid parameter"));
+		return IMG_FALSE;
+	}
+
+#if defined(__linux__) && defined(__KERNEL__)
+	pBucket = OSAllocMemNoStats(sizeof(BUCKET) + pHash->uKeySize);
+#else
+	pBucket = OSAllocMem(sizeof(BUCKET) + pHash->uKeySize);
+#endif
+    if (pBucket == NULL)
+	{
+		return IMG_FALSE;
+	}
+
+	pBucket->v = v;
+	/* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k (linux)*/
+	OSCachedMemCopy(pBucket->k, pKey, pHash->uKeySize);
+
+	_ChainInsert (pHash, pBucket, pHash->ppBucketTable, pHash->uSize);
+
+	pHash->uCount++;
+
+	/* check if we need to think about re-balancing */
+	if (pHash->uCount << 1 > pHash->uSize)
+    {
+        /* Ignore the return code from _Resize because the hash table is
+           still in a valid state and although not ideally sized, it is still
+           functional */
+        _Resize (pHash, pHash->uSize << 1);
+    }
+
+
+	return IMG_TRUE;
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Insert
+@Description    Insert a key value pair into a hash table created with
+                HASH_Create.
+@Input          pHash     Hash table
+@Input          k         The key value.
+@Input          v         The value associated with the key.
+@Return         IMG_TRUE - success.
+                IMG_FALSE - failure.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+HASH_Insert (HASH_TABLE *pHash, uintptr_t k, uintptr_t v)
+{
+	return HASH_Insert_Extended(pHash, &k, v);
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Remove_Extended
+@Description    Remove a key from a hash table created with
+                HASH_Create_Extended.
+@Input          pHash     Hash table
+@Input          pKey      Pointer to key.
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Remove_Extended(HASH_TABLE *pHash, void *pKey)
+{
+	BUCKET **ppBucket;
+	IMG_UINT32 uIndex;
+
+	PVR_ASSERT (pHash != NULL);
+
+	if (pHash == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "HASH_Remove_Extended: Null hash table"));
+		return 0;
+	}
+
+	uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
+
+	for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL; ppBucket = &((*ppBucket)->pNext))
+	{
+		/* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */
+		if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
+		{
+			BUCKET *pBucket = *ppBucket;
+			uintptr_t v = pBucket->v;
+			(*ppBucket) = pBucket->pNext;
+
+#if defined(__linux__) && defined(__KERNEL__)
+			OSFreeMemNoStats(pBucket);
+#else
+			OSFreeMem(pBucket);
+#endif
+			/*not nulling original pointer, already overwritten*/
+
+			pHash->uCount--;
+
+			/* check if we need to think about re-balancing */
+			if (pHash->uSize > (pHash->uCount << 2) &&
+                pHash->uSize > pHash->uMinimumSize)
+            {
+                /* Ignore the return code from _Resize because the
+                   hash table is still in a valid state and although
+                   not ideally sized, it is still functional */
+				_Resize (pHash,
+                         PRIVATE_MAX (pHash->uSize >> 1,
+                                      pHash->uMinimumSize));
+            }
+
+			return v;
+		}
+	}
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Remove
+@Description    Remove a key value pair from a hash table created
+                with HASH_Create.
+@Input          pHash     Hash table
+@Input          k         The key
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Remove (HASH_TABLE *pHash, uintptr_t k)
+{
+	return HASH_Remove_Extended(pHash, &k);
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Retrieve_Extended
+@Description    Retrieve a value from a hash table created with
+                HASH_Create_Extended.
+@Input          pHash     Hash table
+@Input          pKey      Pointer to the key.
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Retrieve_Extended (HASH_TABLE *pHash, void *pKey)
+{
+	BUCKET **ppBucket;
+	IMG_UINT32 uIndex;
+
+	PVR_ASSERT (pHash != NULL);
+
+	if (pHash == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "HASH_Retrieve_Extended: Null hash table"));
+		return 0;
+	}
+
+	uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
+
+	for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL; ppBucket = &((*ppBucket)->pNext))
+	{
+		/* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */
+		if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
+		{
+			BUCKET *pBucket = *ppBucket;
+			uintptr_t v = pBucket->v;
+
+			return v;
+		}
+	}
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Retrieve
+@Description    Retrieve a value from a hash table created with
+                HASH_Create.
+@Input          pHash     Hash table
+@Input          k         The key
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Retrieve (HASH_TABLE *pHash, uintptr_t k)
+{
+	return HASH_Retrieve_Extended(pHash, &k);
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Iterate
+@Description    Iterate over every entry in the hash table
+@Input          pHash - Hash table to iterate
+@Input          pfnCallback - Callback to call with the key and data for each
+							  entry in the hash table
+@Return         Callback error if any, otherwise PVRSRV_OK
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback)
+{
+    IMG_UINT32 uIndex;
+    for (uIndex=0; uIndex < pHash->uSize; uIndex++)
+    {
+        BUCKET *pBucket;
+        pBucket = pHash->ppBucketTable[uIndex];
+        while (pBucket != NULL)
+        {
+            PVRSRV_ERROR eError;
+            BUCKET *pNextBucket = pBucket->pNext;
+
+            eError = pfnCallback((uintptr_t) ((void *) *(pBucket->k)), (uintptr_t) pBucket->v);
+
+            /* The callback might want us to break out early */
+            if (eError != PVRSRV_OK)
+                return eError;
+
+            pBucket = pNextBucket;
+        }
+    }
+    return PVRSRV_OK;
+}
+
+#ifdef HASH_TRACE
+/*************************************************************************/ /*!
+@Function       HASH_Dump
+@Description    To dump the contents of a hash table in human readable
+                form.
+@Input          pHash     Hash table
+*/ /**************************************************************************/
+void
+HASH_Dump (HASH_TABLE *pHash)
+{
+	IMG_UINT32 uIndex;
+	IMG_UINT32 uMaxLength=0;
+	IMG_UINT32 uEmptyCount=0;
+
+	PVR_ASSERT (pHash != NULL);
+	for (uIndex=0; uIndex<pHash->uSize; uIndex++)
+	{
+		BUCKET *pBucket;
+		IMG_UINT32 uLength = 0;
+		if (pHash->ppBucketTable[uIndex] == NULL)
+		{
+			uEmptyCount++;
+		}
+		for (pBucket=pHash->ppBucketTable[uIndex];
+				pBucket != NULL;
+				pBucket = pBucket->pNext)
+		{
+			uLength++;
+		}
+		uMaxLength = PRIVATE_MAX (uMaxLength, uLength);
+	}
+
+	PVR_TRACE(("hash table: uMinimumSize=%d  size=%d  count=%d",
+			pHash->uMinimumSize, pHash->uSize, pHash->uCount));
+	PVR_TRACE(("  empty=%d  max=%d", uEmptyCount, uMaxLength));
+}
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/htbuffer.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/htbuffer.c
new file mode 100644
index 0000000..b2f2006
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/htbuffer.c
@@ -0,0 +1,226 @@
+/*************************************************************************/ /*!
+@File			htbuffer.c
+@Title          Host Trace Buffer shared API.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Host Trace Buffer provides a mechanism to log Host events to a
+				buffer in a similar way to the Firmware Trace mechanism.
+				Host Trace Buffer logs data using a Transport Layer buffer.
+				The Transport Layer and pvrtld tool provides the mechanism to
+				retrieve the trace data.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stdarg.h>
+#include "htbuffer.h"
+//#include "allocmem.h"
+#include "osfunc.h"
+#include "client_htbuffer_bridge.h"
+#if defined(__KERNEL__)
+//#include "osfunc.h"
+#endif
+
+/* the group flags array of ints large enough to store all the group flags
+ * NB: This will only work while all logging is in the kernel
+ */
+IMG_INTERNAL HTB_FLAG_EL_T g_auiHTBGroupEnable[HTB_FLAG_NUM_EL] = {0};
+
+/*************************************************************************/ /*!
+ @Function      HTBConfigure
+ @Description   Configure the Host Trace Buffer.
+                Once these parameters are set they may not be changed
+
+ @Input         hSrvHandle      Server Handle
+
+ @Input         pszBufferName   Name to use for the TL buffer, this will be
+                                required to request trace data from the TL
+
+ @Input         ui32BufferSize  Requested TL buffer size in bytes
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBConfigure(
+	IMG_HANDLE hSrvHandle,
+	IMG_CHAR * pszBufferName,
+	IMG_UINT32 ui32BufferSize
+)
+{
+	return BridgeHTBConfigure(
+			hSrvHandle,
+			(OSStringLength(pszBufferName)+1),
+			pszBufferName,
+			ui32BufferSize
+			);
+}
+
+
+/*************************************************************************/ /*!
+ @Function      HTBControl
+ @Description   Update the configuration of the Host Trace Buffer
+
+ @Input         hSrvHandle      Server Handle
+
+ @Input         ui32NumFlagGroups Number of group enable flags words
+ 
+ @Input         aui32GroupEnable  Flags words controlling groups to be logged
+
+ @Input         ui32LogLevel    Log level to record
+
+ @Input         ui32EnablePID   PID to enable logging for a specific process
+
+ @Input         eLogPidMode     Enable logging for all or specific processes,
+
+ @Input         eOpMode         Control what trace data is dropped if the TL
+                                buffer is full
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBControl(
+	IMG_HANDLE hSrvHandle,
+	IMG_UINT32 ui32NumFlagGroups,
+	IMG_UINT32 * aui32GroupEnable,
+	IMG_UINT32 ui32LogLevel,
+	IMG_UINT32 ui32EnablePID,
+	HTB_LOGMODE_CTRL eLogPidMode,
+	HTB_OPMODE_CTRL eOpMode
+)
+{
+	return BridgeHTBControl(
+			hSrvHandle,
+			ui32NumFlagGroups,
+			aui32GroupEnable,
+			ui32LogLevel,
+			ui32EnablePID,
+			eLogPidMode,
+			eOpMode
+			);
+}
+
+
+/*************************************************************************/ /*!
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 ui32TimeStampus, HTB_LOG_SFids SF, va_list args)
+{
+#if defined(__KERNEL__)
+	IMG_UINT32 i;
+	IMG_UINT32 ui32NumArgs = HTB_SF_PARAMNUM(SF);
+	IMG_UINT32 aui32Args[HTB_LOG_MAX_PARAMS];
+
+	PVR_ASSERT(ui32NumArgs <= HTB_LOG_MAX_PARAMS);
+	ui32NumArgs = (ui32NumArgs>HTB_LOG_MAX_PARAMS)? HTB_LOG_MAX_PARAMS: ui32NumArgs;
+
+	/* unpack var args before sending over bridge */
+	for (i=0; i<ui32NumArgs; i++)
+	{
+		aui32Args[i] = va_arg(args, IMG_UINT32);
+	}
+
+	return BridgeHTBLog(hSrvHandle, PID, ui32TimeStampus, SF, ui32NumArgs, aui32Args);
+#else
+	PVR_UNREFERENCED_PARAMETER(hSrvHandle);
+	PVR_UNREFERENCED_PARAMETER(PID);
+	PVR_UNREFERENCED_PARAMETER(ui32TimeStampus);
+	PVR_UNREFERENCED_PARAMETER(SF);
+	PVR_UNREFERENCED_PARAMETER(args);
+
+	PVR_ASSERT(0=="HTB Logging in UM is not yet supported");
+	return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+}
+
+
+/*************************************************************************/ /*!
+ @Function      HTBLog
+ @Description   Record a Host Trace Buffer log event
+
+ @Input         PID			    The PID of the process the event is associated
+								with. This is provided as an argument rather
+								than querying internally so that events associated
+								with a particular process, but performed by
+								another can be logged correctly.
+
+ @Input			ui32TimeStampus	The timestamp to be associated with this log event
+
+ @Input         SF    			The log event ID
+
+ @Input			...				Log parameters
+
+ @Return        PVRSRV_OK       Success.
+
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 ui32TimeStampus, IMG_UINT32 SF, ...)
+{
+	PVRSRV_ERROR eError;
+	va_list args;
+	va_start(args, SF);
+	eError =_HTBLog(hSrvHandle, PID, ui32TimeStampus, SF, args);
+	va_end(args);
+	return eError;
+}
+
+
+/*************************************************************************/ /*!
+ @Function      HTBLogSimple
+ @Description   Record a Host Trace Buffer log event with implicit PID and Timestamp
+
+ @Input         SF              The log event ID
+
+ @Input         ...             Log parameters
+
+ @Return        PVRSRV_OK       Success.
+
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLogSimple(IMG_HANDLE hSrvHandle, IMG_UINT32 SF, ...)
+{
+	PVRSRV_ERROR eError;
+	va_list args;
+	va_start(args, SF);
+	eError = _HTBLog(hSrvHandle, OSGetCurrentProcessID(), OSClockus(), SF, args);
+	va_end(args);
+	return eError;
+}
+
+
+/* EOF */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/mem_utils.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/mem_utils.c
new file mode 100644
index 0000000..1722695
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/mem_utils.c
@@ -0,0 +1,313 @@
+/*************************************************************************/ /*!
+@File
+@Title          Memory manipulation functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Memory related functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* This workaround is only *required* on ARM64. Avoid building or including
+ * it by default on other architectures, unless the 'safe memcpy' test flag
+ * is enabled. (The code should work on other architectures.)
+ */
+
+#if defined(__arm64__) || defined(__aarch64__) || defined (PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)
+
+/* NOTE: This C file is compiled with -ffreestanding to avoid pattern matching
+ *       by the compiler to stdlib functions, and it must only use the below
+ *       headers. Do not include any IMG or services headers in this file.
+ */
+#include <stddef.h>
+
+/* Prototypes to suppress warnings in -ffreestanding mode */
+void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t uSize);
+void DeviceMemSet(void *pvDst, unsigned char ui8Value, size_t uSize);
+
+/* This file is only intended to be used on platforms which use GCC or Clang,
+ * due to its requirement on __attribute__((vector_size(n))), typeof() and
+ * __SIZEOF__ macros.
+ */
+#if defined(__GNUC__)
+
+#define MIN(a, b) \
+ ({__typeof(a) _a = (a); __typeof(b) _b = (b); _a > _b ? _b : _a;})
+
+#if !defined(DEVICE_MEMSETCPY_ALIGN_IN_BYTES)
+#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES __SIZEOF_LONG__
+#endif
+#if DEVICE_MEMSETCPY_ALIGN_IN_BYTES % 2 != 0
+#error "DEVICE_MEMSETCPY_ALIGN_IN_BYTES must be a power of 2"
+#endif
+#if DEVICE_MEMSETCPY_ALIGN_IN_BYTES < 4
+#error "DEVICE_MEMSETCPY_ALIGN_IN_BYTES must be equal or greater than 4"
+#endif
+
+#if __SIZEOF_POINTER__ != __SIZEOF_LONG__
+#error No support for architectures where void* and long are sized differently
+#endif
+
+#if   __SIZEOF_LONG__ >  DEVICE_MEMSETCPY_ALIGN_IN_BYTES
+/* Meaningless, and harder to do correctly */
+# error Cannot handle DEVICE_MEMSETCPY_ALIGN_IN_BYTES < sizeof(long)
+typedef unsigned long block_t;
+#elif __SIZEOF_LONG__ <= DEVICE_MEMSETCPY_ALIGN_IN_BYTES
+typedef unsigned int block_t
+	__attribute__((vector_size(DEVICE_MEMSETCPY_ALIGN_IN_BYTES)));
+# if defined(__arm64__) || defined(__aarch64__)
+#  if   DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8
+#   define DEVICE_MEMSETCPY_ARM64
+#   define REGSZ "w"
+#   define REGCL "w"
+#   define BVCLB "r"
+#  elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16
+#   define DEVICE_MEMSETCPY_ARM64
+#   define REGSZ "x"
+#   define REGCL "x"
+#   define BVCLB "r"
+#  elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32
+#   if defined(__ARM_NEON_FP)
+#    define DEVICE_MEMSETCPY_ARM64
+#    define REGSZ "q"
+#    define REGCL "v"
+#    define BVCLB "w"
+#   endif
+#  endif
+#  if defined(DEVICE_MEMSETCPY_ARM64)
+#   if defined(DEVICE_MEMSETCPY_ARM64_NON_TEMPORAL)
+#    define NSHLD() __asm__ ("dmb nshld")
+#    define NSHST() __asm__ ("dmb nshst")
+#    define LDP "ldnp"
+#    define STP "stnp"
+#   else
+#    define NSHLD()
+#    define NSHST()
+#    define LDP "ldp"
+#    define STP "stp"
+#   endif
+ typedef unsigned int block_half_t
+	__attribute__((vector_size(DEVICE_MEMSETCPY_ALIGN_IN_BYTES / 2)));
+#  endif
+# endif
+#endif
+
+__attribute__((visibility("hidden")))
+void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t uSize)
+{
+	volatile const char *pcSrc = pvSrc;
+	volatile char *pcDst = pvDst;
+	size_t uPreambleBytes;
+	int bBlockCopy = 0;
+
+	size_t uSrcUnaligned = (size_t)pcSrc % sizeof(block_t);
+	size_t uDstUnaligned = (size_t)pcDst % sizeof(block_t);
+
+	if (!uSrcUnaligned && !uDstUnaligned)
+	{
+		/* Neither pointer is unaligned. Optimal case. */
+		bBlockCopy = 1;
+	}
+	else
+	{
+		if (uSrcUnaligned == uDstUnaligned)
+		{
+			/* Neither pointer is usefully aligned, but they are misaligned in
+			 * the same way, so we can copy a preamble in a slow way, then
+			 * optimize the rest.
+			 */
+			uPreambleBytes = MIN(sizeof(block_t) - uDstUnaligned, uSize);
+			uSize -= uPreambleBytes;
+			while (uPreambleBytes)
+			{
+				*pcDst++ = *pcSrc++;
+				uPreambleBytes--;
+			}
+
+			bBlockCopy = 1;
+		}
+		else if ((uSrcUnaligned | uDstUnaligned) % sizeof(int) == 0)
+		{
+			/* Both pointers are at least 32-bit aligned, and we assume that
+			 * the processor must handle all kinds of 32-bit load-stores.
+			 * NOTE: Could we optimize this with a non-temporal version?
+			 */
+			if (uSize >= sizeof(int))
+			{
+				volatile int *piSrc = (int *)pcSrc;
+				volatile int *piDst = (int *)pcDst;
+
+				while (uSize >= sizeof(int))
+				{
+					*piDst++ = *piSrc++;
+					uSize -= sizeof(int);
+				}
+
+				pcSrc = (char *)piSrc;
+				pcDst = (char *)piDst;
+			}
+		}
+	}
+
+	if (bBlockCopy && uSize >= sizeof(block_t))
+	{
+		volatile block_t *pSrc = (block_t *)pcSrc;
+		volatile block_t *pDst = (block_t *)pcDst;
+
+		NSHLD();
+
+		while (uSize >= sizeof(block_t))
+		{
+#if defined(DEVICE_MEMSETCPY_ARM64)
+			__asm__ (LDP " " REGSZ "0, " REGSZ "1, [%[pSrc]]\n\t"
+			         STP " " REGSZ "0, " REGSZ "1, [%[pDst]]"
+						:
+						: [pSrc] "r" (pSrc), [pDst] "r" (pDst)
+						: "memory", REGCL "0", REGCL "1");
+#else
+			*pDst = *pSrc;
+#endif
+			pDst++;
+			pSrc++;
+			uSize -= sizeof(block_t);
+		}
+
+		NSHST();
+
+		pcSrc = (char *)pSrc;
+		pcDst = (char *)pDst;
+	}
+
+	while (uSize)
+	{
+		*pcDst++ = *pcSrc++;
+		uSize--;
+	}
+}
+
+__attribute__((visibility("hidden")))
+void DeviceMemSet(void *pvDst, unsigned char ui8Value, size_t uSize)
+{
+	volatile char *pcDst = pvDst;
+	size_t uPreambleBytes;
+
+	size_t uDstUnaligned = (size_t)pcDst % sizeof(block_t);
+
+	if (uDstUnaligned)
+	{
+		uPreambleBytes = MIN(sizeof(block_t) - uDstUnaligned, uSize);
+		uSize -= uPreambleBytes;
+		while (uPreambleBytes)
+		{
+			*pcDst++ = ui8Value;
+			uPreambleBytes--;
+		}
+	}
+
+	if (uSize >= sizeof(block_t))
+	{
+		volatile block_t *pDst = (block_t *)pcDst;
+#if defined(DEVICE_MEMSETCPY_ARM64)
+		block_half_t bValue;
+#else
+		block_t bValue;
+# endif
+		size_t i;
+
+		for (i = 0; i < sizeof(bValue) / sizeof(unsigned int); i++)
+			bValue[i] = ui8Value << 24U |
+			            ui8Value << 16U |
+			            ui8Value <<  8U |
+			            ui8Value;
+
+		NSHLD();
+
+		while (uSize >= sizeof(block_t))
+		{
+#if defined(DEVICE_MEMSETCPY_ARM64)
+			__asm__ (STP " %" REGSZ "[bValue], %" REGSZ "[bValue], [%[pDst]]"
+						:
+						: [bValue] BVCLB (bValue), [pDst] "r" (pDst)
+						: "memory");
+#else
+			*pDst = bValue;
+#endif
+			pDst++;
+			uSize -= sizeof(block_t);
+		}
+
+		NSHST();
+
+		pcDst = (char *)pDst;
+	}
+
+	while (uSize)
+	{
+		*pcDst++ = ui8Value;
+		uSize--;
+	}
+}
+
+#else /* !defined(__GNUC__) */
+
+/* Potentially very slow (but safe) fallbacks for non-GNU C compilers */
+
+void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t uSize)
+{
+	volatile const char *pcSrc = pvSrc;
+	volatile char *pcDst = pvDst;
+
+	while (uSize)
+	{
+		*pcDst++ = *pcSrc++;
+		uSize--;
+	}
+}
+
+void DeviceMemSet(void *pvDst, unsigned char ui8Value, size_t uSize)
+{
+	volatile char *pcDst = pvDst;
+
+	while (uSize)
+	{
+		*pcDst++ = ui8Value;
+		uSize--;
+	}
+}
+
+#endif /* !defined(__GNUC__) */
+
+#endif /* defined(__arm64__) || defined(__aarch64__) || defined (PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/ra.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/ra.c
new file mode 100644
index 0000000..fb0a74e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/ra.c
@@ -0,0 +1,1399 @@
+/*************************************************************************/ /*!
+@File
+@Title          Resource Allocator
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+@Description
+ Implements generic resource allocation. The resource
+ allocator was originally intended to manage address spaces.  In
+ practice the resource allocator is generic and can manage arbitrary
+ sets of integers.
+
+ Resources are allocated from arenas. Arena's can be created with an
+ initial span of resources. Further resources spans can be added to
+ arenas. A call back mechanism allows an arena to request further
+ resource spans on demand.
+
+ Each arena maintains an ordered list of resource segments each
+ described by a boundary tag. Each boundary tag describes a segment
+ of resources which are either 'free', available for allocation, or
+ 'busy' currently allocated. Adjacent 'free' segments are always
+ coallesced to avoid fragmentation.
+
+ For allocation, all 'free' segments are kept on lists of 'free'
+ segments in a table index by pvr_log2(segment size). ie Each table index
+ n holds 'free' segments in the size range 2^n -> 2^(n+1) - 1.
+
+ Allocation policy is based on an *almost* good fit strategy. 
+
+ Allocated segments are inserted into a self scaling hash table which
+ maps the base resource of the span to the relevant boundary
+ tag. This allows the code to get back to the bounary tag without
+ exporting explicit boundary tag references through the API.
+
+ Each arena has an associated quantum size, all allocations from the
+ arena are made in multiples of the basic quantum.
+
+ On resource exhaustion in an arena, a callback if provided will be
+ used to request further resources. Resouces spans allocated by the
+ callback mechanism will be returned when freed (through one of the
+ two callbacks).
+*/ /**************************************************************************/
+
+/* Issues:
+ * - flags, flags are passed into the resource allocator but are not currently used.
+ * - determination, of import size, is currently braindead.
+ * - debug code should be moved out to own module and #ifdef'd
+ */
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "uniq_key_splay_tree.h"
+
+#include "hash.h"
+#include "ra.h"
+#include "pvrsrv_memallocflags.h"
+
+#include "osfunc.h"
+#include "allocmem.h"
+#include "lock.h"
+#include "pvr_intrinsics.h"
+
+/* The initial, and minimum size of the live address -> boundary tag
+   structure hash table. The value 64 is a fairly arbitrary
+   choice. The hash table resizes on demand so the value chosen is
+   not critical. */
+#define MINIMUM_HASH_SIZE (64)
+
+
+/* #define RA_VALIDATE */
+
+#if defined(__KLOCWORK__)
+  /* make sure Klocworks analyse all the code (including the debug one) */
+  #if !defined(RA_VALIDATE)
+    #define RA_VALIDATE
+  #endif
+#endif
+
+#if (!defined(PVRSRV_NEED_PVR_ASSERT)) || (!defined(RA_VALIDATE))
+  /* Disable the asserts unless explicitly told otherwise.  They slow the driver
+     too much for other people */
+
+  #undef PVR_ASSERT
+  /* let's use a macro that really do not do anything when compiling in release
+     mode! */
+  #define PVR_ASSERT(x)
+#endif
+
+/* boundary tags, used to describe a resource segment */
+struct _BT_
+{
+	enum bt_type
+	{
+		btt_free,				/* free resource segment */
+		btt_live				/* allocated resource segment */
+	} type;
+
+	unsigned int is_leftmost;
+	unsigned int is_rightmost;
+	unsigned int free_import;
+
+	/* The base resource and extent of this segment */
+	RA_BASE_T base;
+	RA_LENGTH_T uSize;
+
+	/* doubly linked ordered list of all segments within the arena */
+	struct _BT_ *pNextSegment;
+	struct _BT_ *pPrevSegment;
+
+	/* doubly linked un-ordered list of free segments with the same flags. */
+	struct _BT_ * next_free;
+	struct _BT_ * prev_free;
+	
+	/* a user reference associated with this span, user references are
+	 * currently only provided in the callback mechanism */
+    IMG_HANDLE hPriv;
+
+    /* Flags to match on this span */
+    IMG_UINT32 uFlags;
+
+};
+typedef struct _BT_ BT;
+
+
+/* resource allocation arena */
+struct _RA_ARENA_
+{
+	/* arena name for diagnostics output */
+	IMG_CHAR *name;
+
+	/* allocations within this arena are quantum sized */
+	RA_LENGTH_T uQuantum;
+
+	/* import interface, if provided */
+	PVRSRV_ERROR (*pImportAlloc)(RA_PERARENA_HANDLE h,
+							 RA_LENGTH_T uSize,
+							 IMG_UINT32 uFlags,
+							 const IMG_CHAR *pszAnnotation,
+							 RA_BASE_T *pBase,
+							 RA_LENGTH_T *pActualSize,
+                             RA_PERISPAN_HANDLE *phPriv);
+	void (*pImportFree) (RA_PERARENA_HANDLE,
+                         RA_BASE_T,
+                         RA_PERISPAN_HANDLE hPriv);
+
+	/* arbitrary handle provided by arena owner to be passed into the
+	 * import alloc and free hooks */
+	void *pImportHandle;
+
+	IMG_PSPLAY_TREE per_flags_buckets;
+	
+	/* resource segment list */
+	BT *pHeadSegment;
+
+	/* segment address to boundary tag hash table */
+	HASH_TABLE *pSegmentHash;
+
+	/* Lock for this arena */
+	POS_LOCK hLock;
+
+	/* LockClass of this arena. This is used within lockdep to decide if a
+	 * recursive call sequence with the same lock class is allowed or not. */
+	IMG_UINT32 ui32LockClass;
+
+	/* If TRUE, imports will not be split up. Allocations will always get their
+	 * own import
+	 */
+	IMG_BOOL bNoSplit;
+};
+
+/*************************************************************************/ /*!
+@Function       _RequestAllocFail
+@Description    Default callback allocator used if no callback is
+                specified, always fails to allocate further resources to the
+                arena.
+@Input          _h - callback handle
+@Input          _uSize - requested allocation size
+@Output         _pActualSize - actual allocation size
+@Input          _pRef - user reference
+@Input          _uflags - allocation flags
+@Input          _pBase - receives allocated base
+@Return         PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL, this function always fails to allocate.
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_RequestAllocFail (RA_PERARENA_HANDLE _h,
+                   RA_LENGTH_T _uSize,
+                   IMG_UINT32 _uFlags,
+                   const IMG_CHAR *_pszAnnotation,
+                   RA_BASE_T *_pBase,
+                   RA_LENGTH_T *_pActualSize,
+                   RA_PERISPAN_HANDLE *_phPriv)
+{
+	PVR_UNREFERENCED_PARAMETER (_h);
+	PVR_UNREFERENCED_PARAMETER (_uSize);
+	PVR_UNREFERENCED_PARAMETER (_pActualSize);
+	PVR_UNREFERENCED_PARAMETER (_phPriv);
+	PVR_UNREFERENCED_PARAMETER (_uFlags);
+	PVR_UNREFERENCED_PARAMETER (_pBase);
+	PVR_UNREFERENCED_PARAMETER (_pszAnnotation);
+
+	return PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL;
+}
+
+
+#if defined (PVR_CTZLL)
+    /* make sure to trigger an error if someone change the buckets or the bHasEltsMapping size
+       the bHasEltsMapping is used to quickly determine the smallest bucket containing elements.
+       therefore it must have at least as many bits has the buckets array have buckets. The RA
+       implementation actually uses one more bit. */
+    static_assert((sizeof(((IMG_PSPLAY_TREE) 0)->buckets) / sizeof(((IMG_PSPLAY_TREE) 0)->buckets[0]))
+				  < 8 * sizeof(((IMG_PSPLAY_TREE) 0)->bHasEltsMapping),
+				  "Too many buckets for bHasEltsMapping bitmap");
+#endif 
+
+
+/*************************************************************************/ /*!
+@Function       pvr_log2
+@Description    Computes the floor of the log base 2 of a unsigned integer
+@Input          n       Unsigned integer
+@Return         Floor(Log2(n))
+*/ /**************************************************************************/
+#if defined(PVR_CLZLL)
+/* make sure to trigger a problem if someone changes the RA_LENGTH_T type
+   indeed the __builtin_clzll is for unsigned long long variables.
+
+   if someone changes RA_LENGTH to unsigned long, then use __builtin_clzl
+   if it changes to unsigned int, use __builtin_clz
+
+   if it changes for something bigger than unsigned long long, 
+   then revert the pvr_log2 to the classic implementation */
+static_assert(sizeof(RA_LENGTH_T) == sizeof(unsigned long long),
+			  "RA log routines not tuned for sizeof(RA_LENGTH_T)");
+
+static inline IMG_UINT32 pvr_log2(RA_LENGTH_T n)
+{
+	PVR_ASSERT( n != 0 ); /* Log2 is not defined on 0 */
+
+	return (8 * sizeof(RA_LENGTH_T)) - 1 - PVR_CLZLL(n);
+}
+#else
+static IMG_UINT32
+pvr_log2 (RA_LENGTH_T n)
+{
+	IMG_UINT32 l = 0;
+
+	PVR_ASSERT( n != 0 ); /* Log2 is not defined on 0 */
+
+	n>>=1;
+	while (n>0)
+	{
+		n>>=1;
+		l++;
+	}
+	return l;
+}
+#endif
+
+
+#if defined(RA_VALIDATE)
+/*************************************************************************/ /*!
+@Function       _IsInSegmentList
+@Description    Tests if a BT is in the segment list.
+@Input          pArena           The arena.
+@Input          pBT              The boundary tag to look for.
+@Return         IMG_FALSE  BT was not in the arena's segment list.
+                IMG_TRUE   BT was in the arena's segment list.
+*/ /**************************************************************************/
+static IMG_BOOL
+_IsInSegmentList (RA_ARENA *pArena,
+                  BT *pBT)
+{
+	BT*  pBTScan;
+
+	PVR_ASSERT (pArena != NULL);
+	PVR_ASSERT (pBT != NULL);
+
+	/* Walk the segment list until we see the BT pointer... */
+	pBTScan = pArena->pHeadSegment;
+	while (pBTScan != NULL  &&  pBTScan != pBT)
+	{
+		pBTScan = pBTScan->pNextSegment;
+	}
+
+	/* Test if we found it and then return */
+	return (pBTScan == pBT);
+}
+
+/*************************************************************************/ /*!
+@Function       _IsInFreeList
+@Description    Tests if a BT is in the free list.
+@Input          pArena           The arena.
+@Input          pBT              The boundary tag to look for.
+@Return         IMG_FALSE  BT was not in the arena's free list.
+                IMG_TRUE   BT was in the arena's free list.
+*/ /**************************************************************************/
+static IMG_BOOL
+_IsInFreeList (RA_ARENA *pArena,
+               BT *pBT)
+{
+	BT*  pBTScan;
+	IMG_UINT32  uIndex;
+
+	PVR_ASSERT (pArena != NULL);
+	PVR_ASSERT (pBT != NULL);
+
+	/* Look for the free list that holds BTs of this size... */
+	uIndex  = pvr_log2 (pBT->uSize);
+	PVR_ASSERT (uIndex < FREE_TABLE_LIMIT);
+
+	pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets);
+	if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->flags != pBT->uFlags))
+	{
+		return 0;
+	}
+	else
+	{
+		pBTScan = pArena->per_flags_buckets->buckets[uIndex];
+		while (pBTScan != NULL  &&  pBTScan != pBT)
+		{
+			pBTScan = pBTScan->next_free;
+		}
+
+		/* Test if we found it and then return */
+		return (pBTScan == pBT);
+	}
+}
+
+/* is_arena_valid should only be used in debug mode.
+   it checks that some properties an arena must have are verified */
+static int is_arena_valid(struct _RA_ARENA_ * arena)
+{
+	struct _BT_ * chunk;
+#if defined(PVR_CTZLL)
+	unsigned int i;
+#endif
+
+	for (chunk = arena->pHeadSegment; chunk != NULL; chunk = chunk->pNextSegment)
+	{
+		/* if next segment is NULL, then it must be a rightmost */
+		PVR_ASSERT((chunk->pNextSegment != NULL) || (chunk->is_rightmost));
+		/* if prev segment is NULL, then it must be a leftmost */
+		PVR_ASSERT((chunk->pPrevSegment != NULL) || (chunk->is_leftmost));
+
+		if (chunk->type == btt_free)
+		{
+			/* checks the correctness of the type field */
+			PVR_ASSERT(_IsInFreeList(arena, chunk));
+
+		    /* check that there can't be two consecutive free chunks.
+		       Indeed, instead of having two consecutive free chunks,
+			   there should be only one that span the size of the two. */
+			PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->type != btt_free));
+			PVR_ASSERT((chunk->is_rightmost) || (chunk->pNextSegment->type != btt_free));
+		}
+		else
+		{
+			/* checks the correctness of the type field */
+			PVR_ASSERT(!_IsInFreeList(arena, chunk));
+		}
+
+		PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->base + chunk->pPrevSegment->uSize == chunk->base));
+		PVR_ASSERT((chunk->is_rightmost) || (chunk->base + chunk->uSize == chunk->pNextSegment->base));
+
+		/* all segments of the same imports must have the same flags ... */
+		PVR_ASSERT((chunk->is_rightmost) || (chunk->uFlags == chunk->pNextSegment->uFlags));
+		/* ... and the same import handle */
+		PVR_ASSERT((chunk->is_rightmost) || (chunk->hPriv == chunk->pNextSegment->hPriv));
+
+
+		/* if a free chunk spans a whole import, then it must be an 'not to free import'.
+		   Otherwise it should have been freed. */
+		PVR_ASSERT((!chunk->is_leftmost) || (!chunk->is_rightmost) || (chunk->type == btt_live) || (!chunk->free_import));
+	}
+
+#if defined(PVR_CTZLL)
+    if (arena->per_flags_buckets != NULL)
+	{
+		for (i = 0; i < FREE_TABLE_LIMIT; ++i)
+		{
+			/* verify that the bHasEltsMapping is correct for this flags bucket */
+			PVR_ASSERT( 
+				((arena->per_flags_buckets->buckets[i] == NULL) &&
+				 (( (arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) == 0)))
+				||
+				((arena->per_flags_buckets->buckets[i] != NULL) &&
+				 ((  (arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) != 0)))
+				);		
+		}
+	}
+#endif	
+
+	/* if arena was not valid, one of the assert before should have triggered */
+	return 1;
+}
+#endif
+/*************************************************************************/ /*!
+@Function       _SegmentListInsertAfter
+@Description    Insert a boundary tag into an arena segment list after a
+                specified boundary tag.
+@Input          pInsertionPoint  The insertion point.
+@Input          pBT              The boundary tag to insert.
+@Return         PVRSRV_OK (doesn't fail)
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR
+_SegmentListInsertAfter (BT *pInsertionPoint,
+						 BT *pBT)
+{
+	PVR_ASSERT (pBT != NULL);
+	PVR_ASSERT (pInsertionPoint != NULL);
+
+	pBT->pNextSegment = pInsertionPoint->pNextSegment;
+	pBT->pPrevSegment = pInsertionPoint;
+	if (pInsertionPoint->pNextSegment != NULL)
+	{
+		pInsertionPoint->pNextSegment->pPrevSegment = pBT;
+	}
+	pInsertionPoint->pNextSegment = pBT;
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       _SegmentListInsert
+@Description    Insert a boundary tag into an arena segment list
+@Input          pArena    The arena.
+@Input          pBT       The boundary tag to insert.
+@Return         PVRSRV_OK (doesn't fail)
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR
+_SegmentListInsert (RA_ARENA *pArena, BT *pBT)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVR_ASSERT (!_IsInSegmentList(pArena, pBT));
+
+	/* insert into the segment chain */
+	pBT->pNextSegment = pArena->pHeadSegment;
+	pArena->pHeadSegment = pBT;
+	if (pBT->pNextSegment != NULL)
+	{
+		pBT->pNextSegment->pPrevSegment = pBT;
+	}
+
+	pBT->pPrevSegment = NULL;
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       _SegmentListRemove
+@Description    Remove a boundary tag from an arena segment list.
+@Input          pArena    The arena.
+@Input          pBT       The boundary tag to remove.
+*/ /**************************************************************************/
+static void
+_SegmentListRemove (RA_ARENA *pArena, BT *pBT)
+{
+	PVR_ASSERT (_IsInSegmentList(pArena, pBT));
+	
+	if (pBT->pPrevSegment == NULL)
+		pArena->pHeadSegment = pBT->pNextSegment;
+	else
+		pBT->pPrevSegment->pNextSegment = pBT->pNextSegment;
+
+	if (pBT->pNextSegment != NULL)
+		pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _BuildBT
+@Description    Construct a boundary tag for a free segment.
+@Input          base     The base of the resource segment.
+@Input          uSize    The extent of the resouce segment.
+@Input          uFlags   The flags to give to the boundary tag
+@Return         Boundary tag or NULL
+*/ /**************************************************************************/
+static BT *
+_BuildBT (RA_BASE_T base,
+          RA_LENGTH_T uSize,
+          RA_FLAGS_T uFlags
+          )
+{
+	BT *pBT;
+
+	pBT = OSAllocMem(sizeof(BT));
+    if (pBT == NULL)
+	{
+		return NULL;
+	}
+
+	OSCachedMemSet(pBT, 0, sizeof(BT));
+
+	pBT->is_leftmost = 1;
+	pBT->is_rightmost = 1;
+	pBT->type = btt_live;
+	pBT->base = base;
+	pBT->uSize = uSize;
+    pBT->uFlags = uFlags;
+	pBT->free_import = 0;
+
+	return pBT;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _SegmentSplit
+@Description    Split a segment into two, maintain the arena segment list. The
+                boundary tag should not be in the free table. Neither the
+                original or the new neighbour bounary tag will be in the free
+                table.
+@Input          pBT       The boundary tag to split.
+@Input          uSize     The required segment size of boundary tag after
+                          splitting.
+@Return         New neighbour boundary tag or NULL.
+*/ /**************************************************************************/
+static BT *
+_SegmentSplit (BT *pBT, RA_LENGTH_T uSize)
+{
+	BT *pNeighbour;
+
+	pNeighbour = _BuildBT(pBT->base + uSize, pBT->uSize - uSize, pBT->uFlags);
+    if (pNeighbour == NULL)
+    {
+        return NULL;
+    }
+
+	_SegmentListInsertAfter(pBT, pNeighbour);
+
+	pNeighbour->is_leftmost = 0;
+	pNeighbour->is_rightmost = pBT->is_rightmost;
+	pNeighbour->free_import = pBT->free_import;
+	pBT->is_rightmost = 0;
+	pNeighbour->hPriv = pBT->hPriv;
+	pBT->uSize = uSize;
+	pNeighbour->uFlags = pBT->uFlags;
+
+	return pNeighbour;
+}
+
+/*************************************************************************/ /*!
+@Function       _FreeListInsert
+@Description    Insert a boundary tag into an arena free table.
+@Input          pArena    The arena.
+@Input          pBT       The boundary tag.
+*/ /**************************************************************************/
+static void
+_FreeListInsert (RA_ARENA *pArena, BT *pBT)
+{
+	IMG_UINT32 uIndex;
+	uIndex = pvr_log2 (pBT->uSize);
+
+	PVR_ASSERT (uIndex < FREE_TABLE_LIMIT);
+	PVR_ASSERT (!_IsInFreeList(pArena, pBT));
+
+	pBT->type = btt_free;
+
+	pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets);
+	/* the flags item in the splay tree must have been created before-hand by
+	   _InsertResource */
+	PVR_ASSERT(pArena->per_flags_buckets != NULL);
+	PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL);
+
+	pBT->next_free = pArena->per_flags_buckets->buckets[uIndex];
+	if (pBT->next_free != NULL)
+	{
+		pBT->next_free->prev_free = pBT;
+	}
+	pBT->prev_free = NULL;
+	pArena->per_flags_buckets->buckets[uIndex] = pBT;
+
+#if defined(PVR_CTZLL)
+	/* tells that bucket[index] now contains elements */
+    pArena->per_flags_buckets->bHasEltsMapping |= ((IMG_ELTS_MAPPINGS) 1 << uIndex);
+#endif
+}
+
+/*************************************************************************/ /*!
+@Function       _FreeListRemove
+@Description    Remove a boundary tag from an arena free table.
+@Input          pArena    The arena.
+@Input          pBT       The boundary tag.
+*/ /**************************************************************************/
+static void
+_FreeListRemove (RA_ARENA *pArena, BT *pBT)
+{
+	IMG_UINT32 uIndex;
+	uIndex = pvr_log2 (pBT->uSize);
+
+	PVR_ASSERT (uIndex < FREE_TABLE_LIMIT);
+	PVR_ASSERT (_IsInFreeList(pArena, pBT));
+
+	if (pBT->next_free != NULL)
+	{
+		pBT->next_free->prev_free = pBT->prev_free;
+	}
+
+	if (pBT->prev_free != NULL)
+	{
+		pBT->prev_free->next_free = pBT->next_free;
+	}
+	else
+	{
+		pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets);
+		/* the flags item in the splay tree must have already been created
+		   (otherwise how could there be a segment with these flags */
+		PVR_ASSERT(pArena->per_flags_buckets != NULL);
+		PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL);
+
+		pArena->per_flags_buckets->buckets[uIndex] = pBT->next_free;
+#if defined(PVR_CTZLL)
+		if (pArena->per_flags_buckets->buckets[uIndex] == NULL)
+		{
+			/* there is no more elements in this bucket. Update the mapping. */
+			pArena->per_flags_buckets->bHasEltsMapping &= ~((IMG_ELTS_MAPPINGS) 1 << uIndex);
+		}
+#endif
+	}
+	
+
+	PVR_ASSERT (!_IsInFreeList(pArena, pBT));
+	pBT->type = btt_live;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _InsertResource
+@Description    Add a free resource segment to an arena.
+@Input          pArena    The arena.
+@Input          base      The base of the resource segment.
+@Input          uSize     The extent of the resource segment.
+@Input          uFlags    The flags of the new resources.
+@Return         New bucket pointer
+                NULL on failure
+*/ /**************************************************************************/
+static BT *
+_InsertResource (RA_ARENA *pArena,
+                 RA_BASE_T base,
+                 RA_LENGTH_T uSize,
+                 RA_FLAGS_T uFlags
+                 )
+{
+	BT *pBT;
+	PVR_ASSERT (pArena!=NULL);
+
+	pBT = _BuildBT (base, uSize, uFlags);
+
+	if (pBT != NULL)
+	{
+		IMG_PSPLAY_TREE tmp = PVRSRVInsert(pBT->uFlags, pArena->per_flags_buckets);
+		if (tmp == NULL)
+		{
+			OSFreeMem(pBT);
+			return NULL;
+		}
+		
+		pArena->per_flags_buckets = tmp;
+		_SegmentListInsert (pArena, pBT);
+		_FreeListInsert (pArena, pBT);
+	}
+	return pBT;
+}
+
+/*************************************************************************/ /*!
+@Function       _InsertResourceSpan
+@Description    Add a free resource span to an arena, marked for free_import.
+@Input          pArena    The arena.
+@Input          base      The base of the resource segment.
+@Input          uSize     The extent of the resource segment.
+@Return         The boundary tag representing the free resource segment,
+                or NULL on failure.
+*/ /**************************************************************************/
+static INLINE BT *
+_InsertResourceSpan (RA_ARENA *pArena,
+                     RA_BASE_T base,
+                     RA_LENGTH_T uSize,
+                     RA_FLAGS_T uFlags)
+{
+	BT *pBT = _InsertResource(pArena, base, uSize, uFlags);
+	if (pBT != NULL)
+	{
+		pBT->free_import = 1;
+	}
+	return pBT;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _RemoveResourceSpan
+@Description    Frees a resource span from an arena, returning the imported
+				span via the callback.
+@Input          pArena     The arena.
+@Input          pBT        The boundary tag to free.
+@Return         IMG_FALSE failure - span was still in use
+                IMG_TRUE  success - span was removed and returned
+*/ /**************************************************************************/
+static INLINE IMG_BOOL
+_RemoveResourceSpan (RA_ARENA *pArena, BT *pBT)
+{
+	PVR_ASSERT (pArena!=NULL);
+	PVR_ASSERT (pBT!=NULL);
+
+	if (pBT->free_import &&
+		pBT->is_leftmost &&
+		pBT->is_rightmost)
+	{
+		_SegmentListRemove (pArena, pBT);
+		pArena->pImportFree (pArena->pImportHandle, pBT->base, pBT->hPriv);
+		OSFreeMem(pBT);
+
+		return IMG_TRUE;
+	}
+
+
+	return IMG_FALSE;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _FreeBT
+@Description    Free a boundary tag taking care of the segment list and the
+                boundary tag free table.
+@Input          pArena     The arena.
+@Input          pBT        The boundary tag to free.
+*/ /**************************************************************************/
+static void
+_FreeBT (RA_ARENA *pArena, BT *pBT)
+{
+	BT *pNeighbour;
+
+	PVR_ASSERT (pArena!=NULL);
+	PVR_ASSERT (pBT!=NULL);
+	PVR_ASSERT (!_IsInFreeList(pArena, pBT));
+
+	/* try and coalesce with left neighbour */
+	pNeighbour = pBT->pPrevSegment;
+	if ((!pBT->is_leftmost)	&& (pNeighbour->type == btt_free))
+	{
+		/* Sanity check. */
+		PVR_ASSERT(pNeighbour->base + pNeighbour->uSize == pBT->base);
+
+		_FreeListRemove (pArena, pNeighbour);
+		_SegmentListRemove (pArena, pNeighbour);
+		pBT->base = pNeighbour->base;
+
+		pBT->uSize += pNeighbour->uSize;
+		pBT->is_leftmost = pNeighbour->is_leftmost;
+        OSFreeMem(pNeighbour);
+	}
+
+	/* try to coalesce with right neighbour */
+	pNeighbour = pBT->pNextSegment;
+	if ((!pBT->is_rightmost) && (pNeighbour->type == btt_free))
+	{
+		/* sanity check */
+		PVR_ASSERT(pBT->base + pBT->uSize == pNeighbour->base);
+
+		_FreeListRemove (pArena, pNeighbour);
+		_SegmentListRemove (pArena, pNeighbour);
+		pBT->uSize += pNeighbour->uSize;
+		pBT->is_rightmost = pNeighbour->is_rightmost;
+		OSFreeMem(pNeighbour);
+	}
+
+	if (_RemoveResourceSpan(pArena, pBT) == IMG_FALSE)
+	{
+		_FreeListInsert (pArena, pBT);
+		PVR_ASSERT( (!pBT->is_rightmost) || (!pBT->is_leftmost) || (!pBT->free_import) );
+	}
+	
+	PVR_ASSERT(is_arena_valid(pArena));
+}
+
+
+/*
+  This function returns the first element in a bucket that can be split
+  in a way that one of the subsegment can meet the size and alignment
+  criteria.
+
+  The first_elt is the bucket to look into. Remember that a bucket is
+  implemented as a pointer to the first element of the linked list.
+
+  nb_max_try is used to limit the number of elements considered.
+  This is used to only consider the first nb_max_try elements in the
+  free-list. The special value ~0 is used to say unlimited i.e. consider
+  all elements in the free list 
+ */
+static INLINE
+struct _BT_ * find_chunk_in_bucket(struct _BT_ * first_elt,
+								   RA_LENGTH_T uSize,
+								   RA_LENGTH_T uAlignment,
+								   unsigned int nb_max_try)
+{
+	struct _BT_ * walker;
+
+	for (walker = first_elt; (walker != NULL) && (nb_max_try != 0); walker = walker->next_free)
+	{
+		const RA_BASE_T aligned_base = (uAlignment > 1) ?
+			(walker->base + uAlignment - 1) & ~(uAlignment - 1)
+			: walker->base;
+		
+		if (walker->base + walker->uSize >= aligned_base + uSize)
+		{
+			return walker;
+		}
+
+		/* 0xFFFF...FFFF is used has nb_max_try = infinity. */
+		if (nb_max_try != (unsigned int) ~0)
+		{
+			nb_max_try--;
+		}
+	}
+
+	return NULL;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _AttemptAllocAligned
+@Description    Attempt an allocation from an arena.
+@Input          pArena       The arena.
+@Input          uSize        The requested allocation size.
+@Output         phPriv		 The user references associated with
+                             the imported segment. (optional)
+@Input          flags        Allocation flags
+@Input          uAlignment   Required uAlignment, or 0. 
+                             Must be a power of 2 if not 0
+@Output         base         Allocated resource base (non optional, must not be NULL)
+@Return         IMG_FALSE failure
+                IMG_TRUE success
+*/ /**************************************************************************/
+static IMG_BOOL
+_AttemptAllocAligned (RA_ARENA *pArena,
+					  RA_LENGTH_T uSize,
+					  IMG_UINT32 uFlags,
+					  RA_LENGTH_T uAlignment,
+					  RA_BASE_T *base,
+                      RA_PERISPAN_HANDLE *phPriv) /* this is the "per-import" private data */
+{
+
+	IMG_UINT32 index_low;
+	IMG_UINT32 index_high; 
+	IMG_UINT32 i; 
+	struct _BT_ * pBT = NULL;
+	RA_BASE_T aligned_base;
+
+	PVR_ASSERT (pArena!=NULL);
+	PVR_ASSERT (base != NULL);
+
+	pArena->per_flags_buckets = PVRSRVSplay(uFlags, pArena->per_flags_buckets);
+	if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->ui32Flags != uFlags))
+	{
+		/* no chunks with these flags. */
+		return IMG_FALSE;
+	}
+
+	index_low = pvr_log2(uSize);
+	index_high = pvr_log2(uSize + uAlignment - 1);
+	
+	PVR_ASSERT(index_low < FREE_TABLE_LIMIT);
+	PVR_ASSERT(index_high < FREE_TABLE_LIMIT);
+	PVR_ASSERT(index_low <= index_high);
+
+#if defined(PVR_CTZLL)
+	i = PVR_CTZLL((IMG_ELTS_MAPPINGS) (~((1 << (index_high + 1)) - 1)) & pArena->per_flags_buckets->bHasEltsMapping);
+#else
+ 	for (i = index_high + 1; (i < FREE_TABLE_LIMIT) && (pArena->per_flags_buckets->buckets[i] == NULL); ++i)
+	{
+	}
+#endif
+	PVR_ASSERT(i <= FREE_TABLE_LIMIT);
+
+	if (i != FREE_TABLE_LIMIT)
+	{
+		/* since we start at index_high + 1, we are guarantee to exit */
+		pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, 1);
+	}
+	else
+	{
+		for (i = index_high; (i != index_low - 1) && (pBT == NULL); --i)
+		{
+			pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, (unsigned int) ~0);			
+		}
+	}
+
+	if (pBT == NULL)
+	{
+		return IMG_FALSE;
+	}
+
+	aligned_base = (uAlignment > 1) ? (pBT->base + uAlignment - 1) & ~(uAlignment - 1) : pBT->base;
+
+	_FreeListRemove (pArena, pBT);
+
+	if(pArena->bNoSplit)
+	{
+		goto nosplit;
+	}
+
+	/* with uAlignment we might need to discard the front of this segment */
+	if (aligned_base > pBT->base)
+	{
+		BT *pNeighbour;
+		pNeighbour = _SegmentSplit (pBT, (RA_LENGTH_T)(aligned_base - pBT->base));
+		/* partition the buffer, create a new boundary tag */
+		if (pNeighbour == NULL)
+		{
+			PVR_DPF ((PVR_DBG_ERROR, "%s: Front split failed", __FUNCTION__));
+			/* Put pBT back in the list */
+			_FreeListInsert (pArena, pBT);
+			return IMG_FALSE;
+		}
+
+		_FreeListInsert(pArena, pBT);
+		pBT = pNeighbour;
+	}
+
+	/* the segment might be too big, if so, discard the back of the segment */
+	if (pBT->uSize > uSize)
+	{
+		BT *pNeighbour;
+		pNeighbour = _SegmentSplit(pBT, uSize);
+		/* partition the buffer, create a new boundary tag */
+		if (pNeighbour == NULL)
+		{
+			PVR_DPF ((PVR_DBG_ERROR, "%s: Back split failed", __FUNCTION__));
+			/* Put pBT back in the list */
+			_FreeListInsert (pArena, pBT);
+			return IMG_FALSE;
+		}
+	
+		_FreeListInsert (pArena, pNeighbour);
+	}
+nosplit:
+	pBT->type = btt_live;
+	
+	if (!HASH_Insert_Extended (pArena->pSegmentHash, &pBT->base, (uintptr_t)pBT))
+	{
+		_FreeBT (pArena, pBT);
+		return IMG_FALSE;
+	}
+	
+	if (phPriv != NULL)
+		*phPriv = pBT->hPriv;
+	
+	*base = pBT->base;
+	
+	return IMG_TRUE;
+}
+
+
+
+/*************************************************************************/ /*!
+@Function       RA_Create
+@Description    To create a resource arena.
+@Input          name          The name of the arena for diagnostic purposes.
+@Input          base          The base of an initial resource span or 0.
+@Input          uSize         The size of an initial resource span or 0.
+@Input          uFlags        The flags of an initial resource span or 0.
+@Input          ulog2Quantum  The arena allocation quantum.
+@Input          imp_alloc     A resource allocation callback or 0.
+@Input          imp_free      A resource de-allocation callback or 0.
+@Input          pImportHandle Handle passed to alloc and free or 0.
+@Input          bNoSplit      Disable splitting up imports.
+@Return         arena handle, or NULL.
+*/ /**************************************************************************/
+IMG_INTERNAL RA_ARENA *
+RA_Create (IMG_CHAR *name,
+		   RA_LOG2QUANTUM_T uLog2Quantum,
+		   IMG_UINT32 ui32LockClass,
+		   PVRSRV_ERROR (*imp_alloc)(RA_PERARENA_HANDLE h, 
+                                 RA_LENGTH_T uSize,
+                                 RA_FLAGS_T _flags, 
+                                 const IMG_CHAR *pszAnnotation,
+                                 /* returned data */
+                                 RA_BASE_T *pBase,
+                                 RA_LENGTH_T *pActualSize,
+                                 RA_PERISPAN_HANDLE *phPriv),
+		   void (*imp_free) (RA_PERARENA_HANDLE,
+                             RA_BASE_T,
+                             RA_PERISPAN_HANDLE),
+		   RA_PERARENA_HANDLE arena_handle,
+		   IMG_BOOL bNoSplit)
+{
+	RA_ARENA *pArena;
+	PVRSRV_ERROR eError;
+
+	if (name == NULL)
+	{
+		PVR_DPF ((PVR_DBG_ERROR, "RA_Create: invalid parameter 'name' (NULL not accepted)"));
+		return NULL;
+	}
+	
+	PVR_DPF ((PVR_DBG_MESSAGE, "RA_Create: name='%s'", name));
+
+	pArena = OSAllocMem(sizeof (*pArena));
+    if (pArena == NULL)
+	{
+		goto arena_fail;
+	}
+
+	eError = OSLockCreate(&pArena->hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto lock_fail;
+	}
+
+	pArena->pSegmentHash = HASH_Create_Extended(MINIMUM_HASH_SIZE, sizeof(RA_BASE_T), HASH_Func_Default, HASH_Key_Comp_Default);
+
+	if (pArena->pSegmentHash==NULL)
+	{
+		goto hash_fail;
+	}
+
+	pArena->name = name;
+	pArena->pImportAlloc = (imp_alloc!=NULL) ? imp_alloc : &_RequestAllocFail;
+	pArena->pImportFree = imp_free;
+	pArena->pImportHandle = arena_handle;
+	pArena->pHeadSegment = NULL;
+	pArena->uQuantum = (IMG_UINT64) (1 << uLog2Quantum);
+	pArena->per_flags_buckets = NULL;
+	pArena->ui32LockClass = ui32LockClass;
+	pArena->bNoSplit = bNoSplit;
+
+	PVR_ASSERT(is_arena_valid(pArena));
+	return pArena;
+
+hash_fail:
+	OSLockDestroy(pArena->hLock);
+lock_fail:
+	OSFreeMem(pArena);
+	/*not nulling pointer, out of scope*/
+arena_fail:
+	return NULL;
+}
+
+/*************************************************************************/ /*!
+@Function       RA_Delete
+@Description    To delete a resource arena. All resources allocated from
+                the arena must be freed before deleting the arena.
+@Input          pArena        The arena to delete.
+*/ /**************************************************************************/
+IMG_INTERNAL void
+RA_Delete (RA_ARENA *pArena)
+{
+	IMG_UINT32 uIndex;
+	IMG_BOOL bWarn = IMG_TRUE;
+
+	PVR_ASSERT(pArena != NULL);
+
+	if (pArena == NULL)
+	{
+		PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: invalid parameter - pArena"));
+		return;
+	}
+
+	PVR_ASSERT(is_arena_valid(pArena));
+
+	PVR_DPF ((PVR_DBG_MESSAGE,
+			  "RA_Delete: name='%s'", pArena->name));
+
+	while (pArena->pHeadSegment != NULL)
+	{
+		BT *pBT = pArena->pHeadSegment;
+
+		if (pBT->type != btt_free)
+		{
+			if (bWarn)
+			{
+				PVR_DPF ((PVR_DBG_ERROR, "%s: Allocations still exist in the arena that is being destroyed", __func__));
+				PVR_DPF ((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmem context", __func__));
+				PVR_DPF ((PVR_DBG_ERROR, "%s: base = 0x%llx size=0x%llx", __func__,
+					  (unsigned long long)pBT->base, (unsigned long long)pBT->uSize));
+				PVR_DPF ((PVR_DBG_ERROR, "%s: This warning will be issued only once for the first allocation found!", __func__));
+				bWarn = IMG_FALSE;
+			}
+		}
+		else
+		{
+			_FreeListRemove(pArena, pBT);
+		}
+
+		_SegmentListRemove (pArena, pBT);
+		OSFreeMem(pBT);
+		/*not nulling original pointer, it has changed*/
+	}
+
+	while (pArena->per_flags_buckets != NULL)
+	{
+		for (uIndex=0; uIndex<FREE_TABLE_LIMIT; uIndex++)
+		{
+			PVR_ASSERT(pArena->per_flags_buckets->buckets[uIndex] == NULL);
+		}
+
+		pArena->per_flags_buckets = PVRSRVDelete(pArena->per_flags_buckets->ui32Flags, pArena->per_flags_buckets);
+	}
+
+	HASH_Delete (pArena->pSegmentHash);
+	OSLockDestroy(pArena->hLock);
+	OSFreeMem(pArena);
+	/*not nulling pointer, copy on stack*/
+}
+
+/*************************************************************************/ /*!
+@Function       RA_Add
+@Description    To add a resource span to an arena. The span must not
+                overlapp with any span previously added to the arena.
+@Input          pArena     The arena to add a span into.
+@Input          base       The base of the span.
+@Input          uSize      The extent of the span.
+@Input			uFlags	   the flags of the new import
+@Input			hPriv	   a private handle associate to the span. (reserved for user)
+@Return         IMG_TRUE - Success
+                IMG_FALSE - failure
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+RA_Add (RA_ARENA *pArena,
+		RA_BASE_T base,
+		RA_LENGTH_T uSize,
+		RA_FLAGS_T uFlags,
+		RA_PERISPAN_HANDLE hPriv)
+{
+	struct _BT_* bt;
+	PVR_ASSERT (pArena != NULL);
+	PVR_ASSERT (uSize != 0);
+
+	if (pArena == NULL)
+	{
+		PVR_DPF ((PVR_DBG_ERROR,"RA_Add: invalid parameter - pArena"));
+		return IMG_FALSE;
+	}
+
+	if(uSize == 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RA_Add: invalid size 0 added to arena %s", pArena->name));
+		return IMG_FALSE;
+	}
+
+	OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+	PVR_ASSERT(is_arena_valid(pArena));
+	PVR_DPF ((PVR_DBG_MESSAGE, "RA_Add: name='%s', "
+              "base=0x%llx, size=0x%llx", pArena->name,
+			  (unsigned long long)base, (unsigned long long)uSize));
+
+	uSize = (uSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1);
+	bt = _InsertResource(pArena, base, uSize, uFlags);
+	if (bt != NULL)
+	{
+		bt->hPriv = hPriv;
+	}
+
+	PVR_ASSERT(is_arena_valid(pArena));
+	OSLockRelease(pArena->hLock);
+
+	return bt != NULL;
+}
+
+/*************************************************************************/ /*!
+@Function       RA_Alloc
+@Description    To allocate resource from an arena.
+@Input          pArena            The arena
+@Input          uRequestSize      The size of resource segment requested.
+@Input          uImportMultiplier Import x-times more for future requests if
+                                  we have to import new memory.
+@Output         pActualSize       The actual size of resource segment
+                                  allocated, typcially rounded up by quantum.
+@Output         phPriv            The user reference associated with allocated resource span.
+@Input          uImportFlags            Flags influencing allocation policy.
+@Input          uAlignment        The uAlignment constraint required for the
+                                  allocated segment, use 0 if uAlignment not required, otherwise
+                                  must be a power of 2.
+@Output         base              Allocated base resource
+@Return         PVRSRV_OK - success
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+RA_Alloc (RA_ARENA *pArena,
+		  RA_LENGTH_T uRequestSize,
+		  IMG_UINT8 uImportMultiplier,
+		  RA_FLAGS_T uImportFlags,
+		  RA_LENGTH_T uAlignment,
+		  const IMG_CHAR *pszAnnotation,
+		  RA_BASE_T *base,
+		  RA_LENGTH_T *pActualSize,
+		  RA_PERISPAN_HANDLE *phPriv)
+{
+	PVRSRV_ERROR eError;
+	IMG_BOOL bResult;
+	RA_LENGTH_T uSize = uRequestSize;
+	RA_FLAGS_T uFlags = (uImportFlags & PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK);
+
+	if (pArena == NULL || uImportMultiplier == 0 || uSize == 0)
+	{
+		PVR_DPF ((PVR_DBG_ERROR,
+		          "RA_Alloc: One of the necessary parameters is 0"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+	PVR_ASSERT(is_arena_valid(pArena));
+
+	if (pActualSize != NULL)
+	{
+		*pActualSize = uSize;
+	}
+
+	/* Must be a power of 2 or 0 */
+	PVR_ASSERT((uAlignment == 0) || (uAlignment & (uAlignment - 1)) == 0);
+
+	PVR_DPF ((PVR_DBG_MESSAGE,
+	          "RA_Alloc: arena='%s', size=0x%llx(0x%llx), "
+	          "alignment=0x%llx", pArena->name,
+	          (unsigned long long)uSize, (unsigned long long)uRequestSize,
+	          (unsigned long long)uAlignment));
+
+	/* if allocation failed then we might have an import source which
+	   can provide more resource, else we will have to fail the
+	   allocation to the caller. */
+	bResult = _AttemptAllocAligned (pArena, uSize, uFlags, uAlignment, base, phPriv);
+	if (!bResult)
+	{
+		IMG_HANDLE hPriv;
+		RA_BASE_T import_base;
+		RA_LENGTH_T uImportSize = uSize;
+
+		/*
+			Ensure that we allocate sufficient space to meet the uAlignment
+			constraint
+		 */
+		if (uAlignment > pArena->uQuantum)
+		{
+			uImportSize += (uAlignment - pArena->uQuantum);
+		}
+
+		/* apply over-allocation multiplier after all alignment adjustments */
+		uImportSize *= uImportMultiplier;
+
+		/* ensure that we import according to the quanta of this arena */
+		uImportSize = (uImportSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1);
+
+		eError = pArena->pImportAlloc (pArena->pImportHandle,
+		                               uImportSize, uImportFlags,
+		                               pszAnnotation,
+		                               &import_base, &uImportSize,
+		                               &hPriv);
+		if (PVRSRV_OK != eError)
+		{
+			OSLockRelease(pArena->hLock);
+			return eError;
+		}
+		else
+		{
+			BT *pBT;
+			pBT = _InsertResourceSpan (pArena, import_base, uImportSize, uFlags);
+			/* successfully import more resource, create a span to
+			   represent it and retry the allocation attempt */
+			if (pBT == NULL)
+			{
+				/* insufficient resources to insert the newly acquired span,
+				   so free it back again */
+				pArena->pImportFree(pArena->pImportHandle, import_base, hPriv);
+
+				PVR_DPF ((PVR_DBG_MESSAGE, "RA_Alloc: name='%s', "
+				          "size=0x%llx failed!", pArena->name,
+				          (unsigned long long)uSize));
+				/* RA_Dump (arena); */
+
+				OSLockRelease(pArena->hLock);
+				return PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED;
+			}
+
+			pBT->hPriv = hPriv;
+
+			bResult = _AttemptAllocAligned(pArena, uSize, uFlags, uAlignment, base, phPriv);
+			if (!bResult)
+			{
+				PVR_DPF ((PVR_DBG_ERROR,
+				          "RA_Alloc: name='%s' second alloc failed!",
+				          pArena->name));
+
+				/*
+				  On failure of _AttemptAllocAligned() depending on the exact point
+				  of failure, the imported segment may have been used and freed, or
+				  left untouched. If the later, we need to return it.
+				*/
+				_FreeBT(pArena, pBT);
+
+				OSLockRelease(pArena->hLock);
+				return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED;
+			}
+			else
+			{
+				/* Check if the new allocation was in the span we just added... */
+				if (*base < import_base  ||  *base > (import_base + uImportSize))
+				{
+					PVR_DPF ((PVR_DBG_ERROR,
+					          "RA_Alloc: name='%s' alloc did not occur in the imported span!",
+					          pArena->name));
+
+					/*
+					  Remove the imported span which should not be in use (if it is then
+					  that is okay, but essentially no span should exist that is not used).
+					*/
+					_FreeBT(pArena, pBT);
+				}
+			}
+		}
+	}
+
+	PVR_DPF ((PVR_DBG_MESSAGE, "RA_Alloc: name='%s', size=0x%llx, "
+              "*base=0x%llx = %d",pArena->name, (unsigned long long)uSize,
+			  (unsigned long long)*base, bResult));
+
+	PVR_ASSERT(is_arena_valid(pArena));
+
+	OSLockRelease(pArena->hLock);
+	return PVRSRV_OK;
+}
+
+
+
+
+/*************************************************************************/ /*!
+@Function       RA_Free
+@Description    To free a resource segment.
+@Input          pArena     The arena the segment was originally allocated from.
+@Input          base       The base of the resource span to free.
+*/ /**************************************************************************/
+IMG_INTERNAL void
+RA_Free (RA_ARENA *pArena, RA_BASE_T base)
+{
+	BT *pBT;
+
+	PVR_ASSERT (pArena != NULL);
+
+	if (pArena == NULL)
+	{
+		PVR_DPF ((PVR_DBG_ERROR,"RA_Free: invalid parameter - pArena"));
+		return;
+	}
+
+	OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+	PVR_ASSERT(is_arena_valid(pArena));
+
+	PVR_DPF ((PVR_DBG_MESSAGE, "RA_Free: name='%s', base=0x%llx", pArena->name,
+			  (unsigned long long)base));
+
+	pBT = (BT *) HASH_Remove_Extended (pArena->pSegmentHash, &base);
+	PVR_ASSERT (pBT != NULL);
+
+	if (pBT)
+	{
+		PVR_ASSERT (pBT->base == base);
+		_FreeBT (pArena, pBT);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RA_Free: no resource span found for given base (0x%llX) in arena %s",
+										(unsigned long long) base,
+											pArena->name));
+	}
+
+	PVR_ASSERT(is_arena_valid(pArena));
+	OSLockRelease(pArena->hLock);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/sync.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/sync.c
new file mode 100644
index 0000000..797eb14
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/sync.c
@@ -0,0 +1,2065 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services synchronisation interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements client side code for services synchronisation
+                interface
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "client_sync_bridge.h"
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+#include "client_synctracking_bridge.h"
+#endif
+#include "pvr_bridge.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "pvr_debug.h"
+#include "dllist.h"
+#include "sync.h"
+#include "sync_internal.h"
+#include "lock.h"
+#include "log2.h"
+/* FIXME */
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#endif
+
+
+#define SYNC_BLOCK_LIST_CHUNCK_SIZE	10
+
+/*
+	This defines the maximum amount of synchronisation memory
+	that can be allocated per SyncPrim context.
+	In reality this number is meaningless as we would run out
+	of synchronisation memory before we reach this limit, but
+	we need to provide a size to the span RA.
+*/
+#define MAX_SYNC_MEM				(4 * 1024 * 1024)
+
+typedef struct _SYNC_BLOCK_LIST_
+{
+	IMG_UINT32			ui32BlockCount;			/*!< Number of contexts in the list */
+	IMG_UINT32			ui32BlockListSize;		/*!< Size of the array contexts */
+	SYNC_PRIM_BLOCK		**papsSyncPrimBlock;	/*!< Array of syncprim blocks */
+} SYNC_BLOCK_LIST;
+
+typedef struct _SYNC_OP_COOKIE_
+{
+	IMG_UINT32				ui32SyncCount;
+	IMG_UINT32				ui32ClientSyncCount;
+	IMG_UINT32				ui32ServerSyncCount;
+	IMG_BOOL				bHaveServerSync;
+	IMG_HANDLE				hBridge;
+	IMG_HANDLE				hServerCookie;
+
+	SYNC_BLOCK_LIST			*psSyncBlockList;
+	PVRSRV_CLIENT_SYNC_PRIM	**papsSyncPrim;
+	/*
+		Client sync(s) info.
+		If this changes update the calculation of ui32ClientAllocSize
+	*/
+	IMG_UINT32				*paui32SyncBlockIndex;
+	IMG_UINT32				*paui32Index;
+	IMG_UINT32				*paui32Flags;
+	IMG_UINT32				*paui32FenceValue;
+	IMG_UINT32				*paui32UpdateValue;
+
+	/*
+		Server sync(s) info
+		If this changes update the calculation of ui32ServerAllocSize
+	*/
+	IMG_HANDLE				*pahServerSync;
+	IMG_UINT32              *paui32ServerFlags;
+} SYNC_OP_COOKIE;
+
+/* forward declaration */
+static PVRSRV_ERROR
+_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value);
+
+/*
+	Internal interfaces for management of SYNC_PRIM_CONTEXT
+*/
+static void
+_SyncPrimContextUnref(SYNC_PRIM_CONTEXT *psContext)
+{
+	if (!OSAtomicRead(&psContext->hRefCount))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_SyncPrimContextUnref context already freed"));
+	}
+	else if (0 == OSAtomicDecrement(&psContext->hRefCount))
+	{
+		/* SyncPrimContextDestroy only when no longer referenced */
+		RA_Delete(psContext->psSpanRA);
+		RA_Delete(psContext->psSubAllocRA);
+		OSFreeMem(psContext);
+	}
+}
+
+static void
+_SyncPrimContextRef(SYNC_PRIM_CONTEXT *psContext)
+{
+	if (!OSAtomicRead(&psContext->hRefCount))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_SyncPrimContextRef context use after free"));
+	}
+	else
+	{
+		OSAtomicIncrement(&psContext->hRefCount);
+	}
+}
+
+/*
+	Internal interfaces for management of synchronisation block memory
+*/
+static PVRSRV_ERROR
+AllocSyncPrimitiveBlock(SYNC_PRIM_CONTEXT *psContext,
+						SYNC_PRIM_BLOCK **ppsSyncBlock)
+{
+	SYNC_PRIM_BLOCK *psSyncBlk;
+	IMG_HANDLE hSyncPMR;
+	IMG_HANDLE hSyncImportHandle;
+	IMG_DEVMEM_SIZE_T uiImportSize;
+	PVRSRV_ERROR eError;
+
+	psSyncBlk = OSAllocMem(sizeof(SYNC_PRIM_BLOCK));
+	if (psSyncBlk == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+	psSyncBlk->psContext = psContext;
+
+	/* Allocate sync prim block */
+	eError = BridgeAllocSyncPrimitiveBlock(psContext->hDevConnection,
+	                                       &psSyncBlk->hServerSyncPrimBlock,
+										   &psSyncBlk->ui32FirmwareAddr,
+										   &psSyncBlk->ui32SyncBlockSize,
+										   &hSyncPMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_blockalloc;
+	}
+
+	/* Make it mappable by the client */
+	eError = DevmemMakeLocalImportHandle(psContext->hDevConnection,
+										hSyncPMR,
+										&hSyncImportHandle);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_export;
+	}
+
+	/* Get CPU mapping of the memory block */
+	eError = DevmemLocalImport(psContext->hDevConnection,
+	                           hSyncImportHandle,
+	                           PVRSRV_MEMALLOCFLAG_CPU_READABLE,
+	                           &psSyncBlk->hMemDesc,
+	                           &uiImportSize,
+	                           "SyncPrimitiveBlock");
+
+	/*
+		Regardless of success or failure we "undo" the export
+	*/
+	DevmemUnmakeLocalImportHandle(psContext->hDevConnection,
+								 hSyncImportHandle);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_import;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psSyncBlk->hMemDesc,
+									  (void **) &psSyncBlk->pui32LinAddr);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_cpuvaddr;
+	}
+
+	*ppsSyncBlock = psSyncBlk;
+	return PVRSRV_OK;
+
+fail_cpuvaddr:
+	DevmemFree(psSyncBlk->hMemDesc);
+fail_import:
+fail_export:
+	BridgeFreeSyncPrimitiveBlock(psContext->hDevConnection,
+								 psSyncBlk->hServerSyncPrimBlock);
+fail_blockalloc:
+	OSFreeMem(psSyncBlk);
+fail_alloc:
+	return eError;
+}
+
+static void
+FreeSyncPrimitiveBlock(SYNC_PRIM_BLOCK *psSyncBlk)
+{
+	SYNC_PRIM_CONTEXT *psContext = psSyncBlk->psContext;
+
+	DevmemReleaseCpuVirtAddr(psSyncBlk->hMemDesc);
+	DevmemFree(psSyncBlk->hMemDesc);
+	BridgeFreeSyncPrimitiveBlock(psContext->hDevConnection,
+								 psSyncBlk->hServerSyncPrimBlock);
+	OSFreeMem(psSyncBlk);
+}
+
+static PVRSRV_ERROR
+SyncPrimBlockImport(RA_PERARENA_HANDLE hArena,
+					RA_LENGTH_T uSize,
+					RA_FLAGS_T uFlags,
+					const IMG_CHAR *pszAnnotation,
+					RA_BASE_T *puiBase,
+					RA_LENGTH_T *puiActualSize,
+					RA_PERISPAN_HANDLE *phImport)
+{
+	SYNC_PRIM_CONTEXT *psContext = hArena;
+	SYNC_PRIM_BLOCK *psSyncBlock = NULL;
+	RA_LENGTH_T uiSpanSize;
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(uFlags);
+
+	/* Check we've not be called with an unexpected size */
+	if (!hArena || sizeof(IMG_UINT32) != uSize)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid input params", __FUNCTION__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	/*
+		Ensure the synprim context doesn't go away while we have sync blocks
+		attached to it
+	*/
+	_SyncPrimContextRef(psContext);
+
+	/* Allocate the block of memory */
+	eError = AllocSyncPrimitiveBlock(psContext, &psSyncBlock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to allocate syncprim block (%d)", eError));
+		goto fail_syncblockalloc;
+	}
+
+	/* Allocate a span for it */
+	eError = RA_Alloc(psContext->psSpanRA,
+					psSyncBlock->ui32SyncBlockSize,
+					RA_NO_IMPORT_MULTIPLIER,
+					0,
+					psSyncBlock->ui32SyncBlockSize,
+					pszAnnotation,
+					&psSyncBlock->uiSpanBase,
+					&uiSpanSize,
+					NULL);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_spanalloc;
+	}
+
+	/*
+		There is no reason the span RA should return an allocation larger
+		then we request
+	*/
+	PVR_ASSERT(uiSpanSize == psSyncBlock->ui32SyncBlockSize);
+
+	*puiBase = psSyncBlock->uiSpanBase;
+	*puiActualSize = psSyncBlock->ui32SyncBlockSize;
+	*phImport = psSyncBlock;
+	return PVRSRV_OK;
+
+fail_spanalloc:
+	FreeSyncPrimitiveBlock(psSyncBlock);
+fail_syncblockalloc:
+	_SyncPrimContextUnref(psContext);
+e0:
+	return eError;
+}
+
+static void
+SyncPrimBlockUnimport(RA_PERARENA_HANDLE hArena,
+					  RA_BASE_T uiBase,
+					  RA_PERISPAN_HANDLE hImport)
+{
+	SYNC_PRIM_CONTEXT *psContext = hArena;
+	SYNC_PRIM_BLOCK *psSyncBlock = hImport;
+
+	if (!psContext || !psSyncBlock || uiBase != psSyncBlock->uiSpanBase)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid input params", __FUNCTION__));
+		return;
+	}
+
+	/* Free the span this import is using */
+	RA_Free(psContext->psSpanRA, uiBase);
+
+	/* Free the syncpim block */
+	FreeSyncPrimitiveBlock(psSyncBlock);
+
+	/*	Drop our reference to the syncprim context */
+	_SyncPrimContextUnref(psContext);
+}
+
+static INLINE IMG_UINT32 SyncPrimGetOffset(SYNC_PRIM *psSyncInt)
+{
+	IMG_UINT64 ui64Temp;
+	
+	PVR_ASSERT(psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL);
+
+	/* FIXME: Subtracting a 64-bit address from another and then implicit
+	 * cast to 32-bit number. Need to review all call sequences that use this
+	 * function, added explicit casting for now.
+	 */
+	ui64Temp =  psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase;
+	PVR_ASSERT(ui64Temp<IMG_UINT32_MAX);
+	return (IMG_UINT32)ui64Temp;
+}
+
+static void SyncPrimGetCPULinAddr(SYNC_PRIM *psSyncInt)
+{
+	SYNC_PRIM_BLOCK *psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+
+	psSyncInt->sCommon.pui32LinAddr = psSyncBlock->pui32LinAddr +
+									  (SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32));
+}
+
+static void SyncPrimLocalFree(SYNC_PRIM *psSyncInt)
+{
+	SYNC_PRIM_BLOCK *psSyncBlock;
+	SYNC_PRIM_CONTEXT *psContext;
+
+	psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+	psContext = psSyncBlock->psContext;
+
+	{
+		PVRSRV_ERROR eError;
+		IMG_HANDLE hConn =
+				psSyncInt->u.sLocal.psSyncBlock->psContext->hDevConnection;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+		if(PVRSRVIsBridgeEnabled(hConn, PVRSRV_BRIDGE_SYNCTRACKING))
+		{
+			if(psSyncInt->u.sLocal.hRecord)
+			{
+				/* remove this sync record */
+				eError = BridgeSyncRecordRemoveByHandle(hConn,
+				                                        psSyncInt->u.sLocal.hRecord);
+				if (PVRSRV_OK != eError)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "%s: failed to remove SyncRecord", __FUNCTION__));
+				}
+			}
+		}
+		else
+#endif /* if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+		{
+			IMG_UINT32 ui32FWAddr = psSyncBlock->ui32FirmwareAddr +
+					SyncPrimGetOffset(psSyncInt);
+
+			eError = BridgeSyncFreeEvent(hConn, ui32FWAddr);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_WARNING, "BridgeSyncAllocEvent failed with error:"
+				        " %d", eError));
+			}
+		}
+	}
+	/* reset the sync prim value as it is freed.
+	 * this guarantees the client sync allocated to the client will
+	 * have a value of zero and the client does not need to
+	 * explicitly initialise the sync value to zero.
+	 * the allocation of the backing memory for the sync prim block
+	 * is done with ZERO_ON_ALLOC so the memory is initially all zero.
+	 */
+	(void) _SyncPrimSetValue(psSyncInt, LOCAL_SYNC_PRIM_RESET_VALUE);
+
+	RA_Free(psContext->psSubAllocRA, psSyncInt->u.sLocal.uiSpanAddr);
+	OSFreeMem(psSyncInt);
+	_SyncPrimContextUnref(psContext);
+}
+
+static void SyncPrimServerFree(SYNC_PRIM *psSyncInt)
+{
+	PVRSRV_ERROR eError;
+
+	eError = BridgeServerSyncFree(psSyncInt->u.sServer.hBridge,
+								  psSyncInt->u.sServer.hServerSync);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimServerFree failed"));
+	}
+	OSFreeMem(psSyncInt);
+}
+
+static void SyncPrimLocalUnref(SYNC_PRIM *psSyncInt)
+{
+	if (!OSAtomicRead(&psSyncInt->u.sLocal.hRefCount))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimLocalUnref sync already freed"));
+	}
+	else if (0 == OSAtomicDecrement(&psSyncInt->u.sLocal.hRefCount))
+	{
+		SyncPrimLocalFree(psSyncInt);
+	}
+}
+
+static void SyncPrimLocalRef(SYNC_PRIM *psSyncInt)
+{
+	if (!OSAtomicRead(&psSyncInt->u.sLocal.hRefCount))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimLocalRef sync use after free"));
+	}
+	else
+	{
+		OSAtomicIncrement(&psSyncInt->u.sLocal.hRefCount);
+	}
+}
+
+static IMG_UINT32 SyncPrimGetFirmwareAddrLocal(SYNC_PRIM *psSyncInt)
+{
+	SYNC_PRIM_BLOCK *psSyncBlock;
+
+	psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+	return psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psSyncInt);	
+}
+
+static IMG_UINT32 SyncPrimGetFirmwareAddrServer(SYNC_PRIM *psSyncInt)
+{
+	return psSyncInt->u.sServer.ui32FirmwareAddr;
+}
+
+#if !defined(__KERNEL__)
+static SYNC_BRIDGE_HANDLE _SyncPrimGetBridgeHandleLocal(SYNC_PRIM *psSyncInt)
+{
+	return psSyncInt->u.sLocal.psSyncBlock->psContext->hDevConnection;
+}
+
+static SYNC_BRIDGE_HANDLE _SyncPrimGetBridgeHandleServer(SYNC_PRIM *psSyncInt)
+{
+	return psSyncInt->u.sServer.hBridge;
+}
+
+static SYNC_BRIDGE_HANDLE _SyncPrimGetBridgeHandle(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	SYNC_PRIM *psSyncInt;
+
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+	if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+	{
+		return _SyncPrimGetBridgeHandleLocal(psSyncInt);
+	}
+	else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+	{
+		return _SyncPrimGetBridgeHandleServer(psSyncInt);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_SyncPrimGetBridgeHandle: Invalid sync type"));
+		/*
+			Either the client has given us a bad pointer or there is an
+			error in this module
+		*/
+		return 0;
+	}
+}
+#endif
+
+/*
+	Internal interfaces for management of syncprim block lists
+*/
+static SYNC_BLOCK_LIST *_SyncPrimBlockListCreate(void)
+{
+	SYNC_BLOCK_LIST *psBlockList;
+
+	psBlockList = OSAllocMem(sizeof(SYNC_BLOCK_LIST));
+	if (!psBlockList)
+	{
+		return NULL;
+	}
+
+	psBlockList->ui32BlockCount = 0;
+	psBlockList->ui32BlockListSize = SYNC_BLOCK_LIST_CHUNCK_SIZE;
+
+	psBlockList->papsSyncPrimBlock = OSAllocMem(sizeof(SYNC_PRIM_BLOCK *)
+													* SYNC_BLOCK_LIST_CHUNCK_SIZE);
+	if (!psBlockList->papsSyncPrimBlock)
+	{
+		OSFreeMem(psBlockList);
+		return NULL;
+	}
+
+	OSCachedMemSet(psBlockList->papsSyncPrimBlock,
+			 0,
+			 sizeof(SYNC_PRIM_BLOCK *) * psBlockList->ui32BlockListSize);
+
+	return psBlockList;
+}
+
+static PVRSRV_ERROR _SyncPrimBlockListAdd(SYNC_BLOCK_LIST *psBlockList,
+											SYNC_PRIM_BLOCK *psSyncPrimBlock)
+{
+	IMG_UINT32 i;
+
+	/* Check the context isn't already on the list */
+	for (i=0;i<psBlockList->ui32BlockCount;i++)
+	{
+		if (psBlockList->papsSyncPrimBlock[i] == psSyncPrimBlock)
+		{
+			return PVRSRV_OK;
+		}
+	}
+
+	/* Check we have space for a new item */
+	if (psBlockList->ui32BlockCount == psBlockList->ui32BlockListSize)
+	{
+		SYNC_PRIM_BLOCK	**papsNewSyncPrimBlock;
+
+		papsNewSyncPrimBlock = OSAllocMem(sizeof(SYNC_PRIM_BLOCK *) *
+											(psBlockList->ui32BlockListSize +
+											SYNC_BLOCK_LIST_CHUNCK_SIZE));
+		if (!papsNewSyncPrimBlock)
+		{
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+
+		OSCachedMemCopy(papsNewSyncPrimBlock,
+				  psBlockList->papsSyncPrimBlock,
+				  sizeof(SYNC_PRIM_CONTEXT *) *
+				  psBlockList->ui32BlockListSize);
+
+		OSFreeMem(psBlockList->papsSyncPrimBlock);
+
+		psBlockList->papsSyncPrimBlock = papsNewSyncPrimBlock;
+		psBlockList->ui32BlockListSize += SYNC_BLOCK_LIST_CHUNCK_SIZE;
+	}
+
+	/* Add the context to the list */
+	psBlockList->papsSyncPrimBlock[psBlockList->ui32BlockCount++] = psSyncPrimBlock;
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _SyncPrimBlockListBlockToIndex(SYNC_BLOCK_LIST *psBlockList,
+												   SYNC_PRIM_BLOCK *psSyncPrimBlock,
+												   IMG_UINT32 *pui32Index)
+{
+	IMG_UINT32 i;
+
+	for (i=0;i<psBlockList->ui32BlockCount;i++)
+	{
+		if (psBlockList->papsSyncPrimBlock[i] == psSyncPrimBlock)
+		{
+			*pui32Index = i;
+			return PVRSRV_OK;
+		}
+	}
+
+	return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+static PVRSRV_ERROR _SyncPrimBlockListHandleArrayCreate(SYNC_BLOCK_LIST *psBlockList,
+														IMG_UINT32 *pui32BlockHandleCount,
+														IMG_HANDLE **ppahHandleList)
+{
+	IMG_HANDLE *pahHandleList;
+	IMG_UINT32 i;
+
+	pahHandleList = OSAllocMem(sizeof(IMG_HANDLE) *
+							   psBlockList->ui32BlockCount);
+	if (!pahHandleList)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	for (i=0;i<psBlockList->ui32BlockCount;i++)
+	{
+		pahHandleList[i] = psBlockList->papsSyncPrimBlock[i]->hServerSyncPrimBlock;
+	}
+
+	*ppahHandleList = pahHandleList;
+	*pui32BlockHandleCount = psBlockList->ui32BlockCount;
+
+	return PVRSRV_OK;
+}
+
+static void _SyncPrimBlockListHandleArrayDestroy(IMG_HANDLE *pahHandleList)
+{
+	OSFreeMem(pahHandleList);
+}
+
+static IMG_UINT32 _SyncPrimBlockListGetClientValue(SYNC_BLOCK_LIST *psBlockList,
+												   IMG_UINT32 ui32BlockIndex,
+												   IMG_UINT32 ui32Index)
+{
+	return psBlockList->papsSyncPrimBlock[ui32BlockIndex]->pui32LinAddr[ui32Index];
+}
+
+static void _SyncPrimBlockListDestroy(SYNC_BLOCK_LIST *psBlockList)
+{
+	OSFreeMem(psBlockList->papsSyncPrimBlock);
+	OSFreeMem(psBlockList);
+}
+
+
+static INLINE IMG_UINT32 _Log2(IMG_UINT32 ui32Align)
+{
+	PVR_ASSERT(IsPower2(ui32Align));
+	return ExactLog2(ui32Align);
+}
+
+/*
+	External interfaces
+*/
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection,
+                      PSYNC_PRIM_CONTEXT *phSyncPrimContext)
+{
+	SYNC_PRIM_CONTEXT *psContext;
+	PVRSRV_ERROR eError;
+
+	psContext = OSAllocMem(sizeof(SYNC_PRIM_CONTEXT));
+	if (psContext == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	psContext->hDevConnection = hDevConnection;
+
+	OSSNPrintf(psContext->azName, SYNC_PRIM_NAME_SIZE, "Sync Prim RA-%p", psContext);
+	OSSNPrintf(psContext->azSpanName, SYNC_PRIM_NAME_SIZE, "Sync Prim span RA-%p", psContext);
+
+	/*
+		Create the RA for sub-allocations of the SynPrim's
+
+		Note:
+		The import size doesn't matter here as the server will pass
+		back the blocksize when does the import which overrides
+		what we specify here.
+	*/
+
+	psContext->psSubAllocRA = RA_Create(psContext->azName,
+										/* Params for imports */
+										_Log2(sizeof(IMG_UINT32)),
+										RA_LOCKCLASS_2,
+										SyncPrimBlockImport,
+										SyncPrimBlockUnimport,
+										psContext,
+										IMG_FALSE);
+	if (psContext->psSubAllocRA == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_suballoc;
+	}
+
+	/*
+		Create the span-management RA
+
+		The RA requires that we work with linear spans. For our use
+		here we don't require this behaviour as we're always working
+		within offsets of blocks (imports). However, we need to keep
+		the RA happy so we create the "span" management RA which
+		ensures that all are imports are added to the RA in a linear
+		fashion
+	*/
+	psContext->psSpanRA = RA_Create(psContext->azSpanName,
+									/* Params for imports */
+									0,
+									RA_LOCKCLASS_1,
+									NULL,
+									NULL,
+									NULL,
+									IMG_FALSE);
+	if (psContext->psSpanRA == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_span;
+	}
+
+	if (!RA_Add(psContext->psSpanRA, 0, MAX_SYNC_MEM, 0, NULL))
+	{
+		RA_Delete(psContext->psSpanRA);
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_span;
+	}
+
+	OSAtomicWrite(&psContext->hRefCount, 1);
+
+	*phSyncPrimContext = psContext;
+	return PVRSRV_OK;
+fail_span:
+	RA_Delete(psContext->psSubAllocRA);
+fail_suballoc:
+	OSFreeMem(psContext);
+fail_alloc:
+	return eError;
+}
+
+IMG_INTERNAL void SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext)
+{
+	SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext;
+	if (1 != OSAtomicRead(&psContext->hRefCount))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s attempted with active references, may be the result of a race", __FUNCTION__));
+	}
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+#if defined(__KERNEL__)
+	if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Forcing context destruction due to bad driver state.", __FUNCTION__));
+		OSAtomicWrite(&psContext->hRefCount, 1);
+	}
+#endif
+#endif
+	_SyncPrimContextUnref(psContext);
+}
+
+static PVRSRV_ERROR _SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+                                   PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+                                   const IMG_CHAR *pszClassName,
+                                   IMG_BOOL bServerSync)
+{
+	SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext;
+	SYNC_PRIM_BLOCK *psSyncBlock;
+	SYNC_PRIM *psNewSync;
+	PVRSRV_ERROR eError;
+	RA_BASE_T uiSpanAddr;
+
+	if (!hSyncPrimContext)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid context", __func__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psNewSync = OSAllocMem(sizeof(SYNC_PRIM));
+	if (psNewSync == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	eError = RA_Alloc(psContext->psSubAllocRA,
+	                  sizeof(IMG_UINT32),
+	                  RA_NO_IMPORT_MULTIPLIER,
+	                  0,
+	                  sizeof(IMG_UINT32),
+	                  "Sync_Prim",
+	                  &uiSpanAddr,
+	                  NULL,
+	                  (RA_PERISPAN_HANDLE *) &psSyncBlock);
+	if (PVRSRV_OK != eError)
+	{
+		goto fail_raalloc;
+	}
+	psNewSync->eType = SYNC_PRIM_TYPE_LOCAL;
+	OSAtomicWrite(&psNewSync->u.sLocal.hRefCount, 1);
+	psNewSync->u.sLocal.uiSpanAddr = uiSpanAddr;
+	psNewSync->u.sLocal.psSyncBlock = psSyncBlock;
+	SyncPrimGetCPULinAddr(psNewSync);
+	*ppsSync = &psNewSync->sCommon;
+	_SyncPrimContextRef(psContext);
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+	if(PVRSRVIsBridgeEnabled(psSyncBlock->psContext->hDevConnection, PVRSRV_BRIDGE_SYNCTRACKING))
+	{
+		IMG_CHAR szClassName[SYNC_MAX_CLASS_NAME_LEN];
+		if(pszClassName)
+		{
+			/* Copy the class name annotation into a fixed-size array */
+			OSStringNCopy(szClassName, pszClassName, SYNC_MAX_CLASS_NAME_LEN - 1);
+			szClassName[SYNC_MAX_CLASS_NAME_LEN - 1] = 0;
+		}
+		else
+		{
+			/* No class name annotation */
+			szClassName[0] = 0;
+		}
+		/* record this sync */
+		eError = BridgeSyncRecordAdd(
+					psSyncBlock->psContext->hDevConnection,
+					&psNewSync->u.sLocal.hRecord,
+					psSyncBlock->hServerSyncPrimBlock,
+					psSyncBlock->ui32FirmwareAddr,
+					SyncPrimGetOffset(psNewSync),
+					bServerSync,
+					OSStringNLength(szClassName, SYNC_MAX_CLASS_NAME_LEN),
+					szClassName);
+		if (PVRSRV_OK != eError)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: failed to add SyncRecord \"%s\" (%s)",
+											__FUNCTION__,
+											szClassName,
+											PVRSRVGETERRORSTRING(eError)));
+			psNewSync->u.sLocal.hRecord = NULL;
+		}
+	}
+	else
+#endif /* if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+	{
+		eError = BridgeSyncAllocEvent(hSyncPrimContext->hDevConnection,
+		                              bServerSync,
+		                              psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psNewSync),
+		                              OSStringNLength(pszClassName, SYNC_MAX_CLASS_NAME_LEN),
+		                              pszClassName);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "BridgeSyncAllocEvent failed with error: %d",
+			        eError));
+		}
+	}
+
+	return PVRSRV_OK;
+
+fail_raalloc:
+	OSFreeMem(psNewSync);
+fail_alloc:
+	return eError;
+}
+
+#if defined(__KERNEL__)
+IMG_INTERNAL PVRSRV_ERROR SyncPrimAllocForServerSync(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+										PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+										const IMG_CHAR *pszClassName)
+{
+	return _SyncPrimAlloc(hSyncPrimContext,
+					  ppsSync,
+					  pszClassName,
+					  IMG_TRUE);
+}
+#endif
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+										PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+										const IMG_CHAR *pszClassName)
+{
+	return _SyncPrimAlloc(hSyncPrimContext,
+	                      ppsSync,
+	                      pszClassName,
+	                      IMG_FALSE);
+}
+
+static PVRSRV_ERROR
+_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value)
+{
+	PVRSRV_ERROR eError;
+
+	if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+	{
+		SYNC_PRIM_BLOCK *psSyncBlock;
+		SYNC_PRIM_CONTEXT *psContext;
+
+		psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+		psContext = psSyncBlock->psContext;
+
+		eError = BridgeSyncPrimSet(psContext->hDevConnection,
+									psSyncBlock->hServerSyncPrimBlock,
+									SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32),
+									ui32Value);
+	}
+	else
+	{
+		eError = BridgeServerSyncPrimSet(psSyncInt->u.sServer.hBridge,
+									psSyncInt->u.sServer.hServerSync,
+									ui32Value);
+	}
+	/* These functions don't actually fail */
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	SYNC_PRIM *psSyncInt;
+
+	if (!psSync)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: null sync pointer", __FUNCTION__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto err_out;
+	}
+
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+	if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+	{
+		SyncPrimLocalUnref(psSyncInt);
+	}
+	else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+	{
+		SyncPrimServerFree(psSyncInt);
+	}
+	else
+	{
+		/*
+			Either the client has given us a bad pointer or there is an
+			error in this module
+		*/
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync type", __FUNCTION__));
+		eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+		goto err_out;
+	}
+
+err_out:
+	return eError;
+}
+
+#if defined(NO_HARDWARE)
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	SYNC_PRIM *psSyncInt;
+
+	if (!psSync)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: null sync pointer", __FUNCTION__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto err_out;
+	}
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	/* There is no check for the psSyncInt to be LOCAL as this call
+	   substitutes the Firmware updating a sync and that sync could
+	   be a server one */
+
+	eError =  _SyncPrimSetValue(psSyncInt, ui32Value);
+
+err_out:
+	return eError;
+}
+#endif
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	SYNC_PRIM *psSyncInt;
+
+	if (!psSync)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: null sync pointer", __FUNCTION__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto err_out;
+	}
+
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+	if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimSet: Invalid sync type"));
+		eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+		goto err_out;
+	}
+
+	eError = _SyncPrimSetValue(psSyncInt, ui32Value);
+
+#if defined(PDUMP)
+	SyncPrimPDump(psSync);
+#endif
+err_out:
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimLocalGetHandleAndOffset(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+							IMG_HANDLE *phBlock,
+							IMG_UINT32 *pui32Offset)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	SYNC_PRIM *psSyncInt;
+
+	if(!psSync || !phBlock || !pui32Offset)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimGetHandleAndOffset: invalid input pointer"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto err_out;
+	}
+
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+	{
+		*phBlock = psSyncInt->u.sLocal.psSyncBlock->hServerSyncPrimBlock;
+		*pui32Offset = psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: psSync not a Local sync prim (%d)",
+			__FUNCTION__, psSyncInt->eType));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto err_out;
+	}
+
+err_out:
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 *pui32FwAddr)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	SYNC_PRIM *psSyncInt;
+
+	*pui32FwAddr = 0;
+	if (!psSync)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __FUNCTION__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto err_out;
+	}
+
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+	if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+	{
+		*pui32FwAddr = SyncPrimGetFirmwareAddrLocal(psSyncInt);
+	}
+	else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+	{
+		*pui32FwAddr = SyncPrimGetFirmwareAddrServer(psSyncInt);
+	}
+	else
+	{
+		/* Either the client has given us a bad pointer or there is an
+		 * error in this module
+		 */
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync type", __FUNCTION__));
+		eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+		goto err_out;
+	}
+
+err_out:
+	return eError;
+}
+
+#if !defined(__KERNEL__)
+IMG_INTERNAL PVRSRV_ERROR SyncPrimDumpSyncs(IMG_UINT32 ui32SyncCount, PVRSRV_CLIENT_SYNC_PRIM **papsSync, const IMG_CHAR *pcszExtraInfo)
+{
+#if defined(PVRSRV_NEED_PVR_DPF)
+	SYNC_PRIM *psSyncInt;
+	PVRSRV_CLIENT_SYNC_PRIM **papsServerSync;
+	IMG_UINT32 ui32ServerSyncs = 0;
+	IMG_UINT32 *pui32UID = NULL;
+	IMG_UINT32 *pui32FWAddr = NULL;
+	IMG_UINT32 *pui32CurrentOp = NULL;
+	IMG_UINT32 *pui32NextOp = NULL;
+	IMG_UINT32 i;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	papsServerSync = OSAllocMem(ui32SyncCount * sizeof(PVRSRV_CLIENT_SYNC_PRIM *));
+	if (!papsServerSync)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	for (i = 0; i < ui32SyncCount; i++)
+	{
+		psSyncInt = IMG_CONTAINER_OF(papsSync[i], SYNC_PRIM, sCommon);
+		if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: sync=local  fw=0x%x curr=0x%04x",
+					 pcszExtraInfo,
+					 SyncPrimGetFirmwareAddrLocal(psSyncInt),
+					 *psSyncInt->sCommon.pui32LinAddr));
+		}
+		else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+		{
+			papsServerSync[ui32ServerSyncs++] = papsSync[i];
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "SyncPrimDumpSyncs: Invalid sync type"));
+			/*
+			   Either the client has given us a bad pointer or there is an
+			   error in this module
+			   */
+			eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+			goto err_free;
+		}
+	}
+
+	if (ui32ServerSyncs > 0)
+	{
+		pui32UID = OSAllocMem(ui32ServerSyncs * sizeof(IMG_UINT32));
+		if (!pui32UID)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto err_free;
+		}
+		pui32FWAddr = OSAllocMem(ui32ServerSyncs * sizeof(IMG_UINT32));
+		if (!pui32FWAddr)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto err_free;
+		}
+		pui32CurrentOp = OSAllocMem(ui32ServerSyncs * sizeof(IMG_UINT32));
+		if (!pui32CurrentOp)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto err_free;
+		}
+		pui32NextOp = OSAllocMem(ui32ServerSyncs * sizeof(IMG_UINT32));
+		if (!pui32NextOp)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto err_free;
+		}
+		eError = SyncPrimServerGetStatus(ui32ServerSyncs, papsServerSync,
+										 pui32UID,
+										 pui32FWAddr,
+										 pui32CurrentOp,
+										 pui32NextOp);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "SyncPrimDumpSyncs: Error querying server sync status (%d)",
+					 eError));
+			goto err_free;
+		}
+		for (i = 0; i < ui32ServerSyncs; i++)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: sync=server fw=0x%x curr=0x%04x next=0x%04x id=%u%s",
+					 pcszExtraInfo,
+					 pui32FWAddr[i],
+					 pui32CurrentOp[i],
+					 pui32NextOp[i],
+					 pui32UID[i],
+					 (pui32NextOp[i] - pui32CurrentOp[i] == 1) ? " *" : 
+					 (pui32NextOp[i] - pui32CurrentOp[i] >  1) ? " **" : 
+					 ""));
+		}
+	}
+
+err_free:
+	OSFreeMem(papsServerSync);
+	if (pui32UID)
+	{
+		OSFreeMem(pui32UID);
+	}
+	if (pui32FWAddr)
+	{
+		OSFreeMem(pui32FWAddr);
+	}
+	if (pui32CurrentOp)
+	{
+		OSFreeMem(pui32CurrentOp);
+	}
+	if (pui32NextOp)
+	{
+		OSFreeMem(pui32NextOp);
+	}
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(ui32SyncCount);
+	PVR_UNREFERENCED_PARAMETER(papsSync);
+	PVR_UNREFERENCED_PARAMETER(pcszExtraInfo);
+	return PVRSRV_OK;
+#endif
+}
+#endif
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpCreate(IMG_UINT32 ui32SyncCount,
+			  PVRSRV_CLIENT_SYNC_PRIM **papsSyncPrim,
+			  PSYNC_OP_COOKIE *ppsCookie)
+{
+	SYNC_OP_COOKIE *psNewCookie;
+	SYNC_BLOCK_LIST *psSyncBlockList;
+	IMG_UINT32 ui32ServerSyncCount = 0;
+	IMG_UINT32 ui32ClientSyncCount = 0;
+	IMG_UINT32 ui32ServerAllocSize;
+	IMG_UINT32 ui32ClientAllocSize;
+	IMG_UINT32 ui32TotalAllocSize;
+	IMG_UINT32 ui32ServerIndex = 0;
+	IMG_UINT32 ui32ClientIndex = 0;
+	IMG_UINT32 i;
+	IMG_UINT32 ui32SyncBlockCount;
+	IMG_HANDLE hBridge;
+	IMG_HANDLE *pahHandleList;
+	IMG_CHAR *pcPtr;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bServerSync;
+
+	psSyncBlockList = _SyncPrimBlockListCreate();
+
+	if (!psSyncBlockList)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	for (i=0;i<ui32SyncCount;i++)
+	{
+		eError = SyncPrimIsServerSync(papsSyncPrim[i], &bServerSync);
+		if (PVRSRV_OK != eError) goto e1;
+		if (bServerSync)
+		{
+			ui32ServerSyncCount++;
+		}
+		else
+		{
+			SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[i];
+
+			ui32ClientSyncCount++;
+			eError = _SyncPrimBlockListAdd(psSyncBlockList, psSync->u.sLocal.psSyncBlock);
+			if (eError != PVRSRV_OK)
+			{
+				goto e1;
+			}
+		}
+	}
+
+	ui32ServerAllocSize = ui32ServerSyncCount * (sizeof(IMG_HANDLE) + sizeof(IMG_UINT32));
+	ui32ClientAllocSize = ui32ClientSyncCount * (5 * sizeof(IMG_UINT32));
+	ui32TotalAllocSize = sizeof(SYNC_OP_COOKIE) +
+							 (sizeof(PVRSRV_CLIENT_SYNC_PRIM *) * ui32SyncCount) +
+							 ui32ServerAllocSize + 
+							 ui32ClientAllocSize;
+
+	psNewCookie = OSAllocMem(ui32TotalAllocSize);
+	pcPtr = (IMG_CHAR *) psNewCookie;
+
+	if (!psNewCookie)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e1;
+	}
+
+	/* Setup the pointers */
+	pcPtr += sizeof(SYNC_OP_COOKIE);
+	psNewCookie->papsSyncPrim = (PVRSRV_CLIENT_SYNC_PRIM **) pcPtr;
+
+	pcPtr += sizeof(PVRSRV_CLIENT_SYNC_PRIM *) * ui32SyncCount;
+	psNewCookie->paui32SyncBlockIndex = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32Index = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32Flags = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32FenceValue = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32UpdateValue = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->pahServerSync =(IMG_HANDLE *) pcPtr;
+	pcPtr += sizeof(IMG_HANDLE) * ui32ServerSyncCount;
+
+	psNewCookie->paui32ServerFlags =(IMG_UINT32 *) pcPtr;
+	pcPtr += sizeof(IMG_UINT32) * ui32ServerSyncCount;
+
+	/* Check the pointer setup went ok */
+	if (!(pcPtr == (((IMG_CHAR *) psNewCookie) + ui32TotalAllocSize)))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: cookie setup failed", __FUNCTION__));
+		eError = PVRSRV_ERROR_INTERNAL_ERROR;
+		goto e2;
+	}
+
+	psNewCookie->ui32SyncCount = ui32SyncCount;
+	psNewCookie->ui32ServerSyncCount = ui32ServerSyncCount;
+	psNewCookie->ui32ClientSyncCount = ui32ClientSyncCount;
+	psNewCookie->psSyncBlockList = psSyncBlockList;
+
+	/*
+		Get the bridge handle from the 1st sync.
+
+		Note: We assume the all syncs have been created with the same
+			  services connection.
+	*/
+	eError = SyncPrimIsServerSync(papsSyncPrim[0], &bServerSync);
+	if (PVRSRV_OK != eError) goto e2;
+	if (bServerSync)
+	{
+		SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[0];
+
+		hBridge = psSync->u.sServer.hBridge;
+	}
+	else
+	{
+		SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[0];
+
+		hBridge = psSync->u.sLocal.psSyncBlock->psContext->hDevConnection;
+	}
+
+	psNewCookie->hBridge = hBridge;
+
+	if (ui32ServerSyncCount)
+	{
+		psNewCookie->bHaveServerSync = IMG_TRUE;
+	}
+	else
+	{
+		psNewCookie->bHaveServerSync = IMG_FALSE;
+	}
+
+	/* Fill in the server and client sync data */
+	for (i=0;i<ui32SyncCount;i++)
+	{
+		SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[i];
+
+		eError = SyncPrimIsServerSync(papsSyncPrim[i], &bServerSync);
+		if (PVRSRV_OK != eError) goto e2;
+		if (bServerSync)
+		{
+			psNewCookie->pahServerSync[ui32ServerIndex] = psSync->u.sServer.hServerSync;
+
+			ui32ServerIndex++;
+		}
+		else
+		{
+			/* Location of sync */
+			eError = _SyncPrimBlockListBlockToIndex(psSyncBlockList,
+													psSync->u.sLocal.psSyncBlock,
+													&psNewCookie->paui32SyncBlockIndex[ui32ClientIndex]);
+			if (eError != PVRSRV_OK)
+			{
+				goto e2;
+			}
+
+			/* Workout the index to sync */
+			psNewCookie->paui32Index[ui32ClientIndex] =
+					SyncPrimGetOffset(psSync)/sizeof(IMG_UINT32);
+
+			ui32ClientIndex++;
+		}
+
+		psNewCookie->papsSyncPrim[i] = papsSyncPrim[i];
+	}
+
+	eError = _SyncPrimBlockListHandleArrayCreate(psSyncBlockList,
+												 &ui32SyncBlockCount,
+												 &pahHandleList);
+	if (eError !=PVRSRV_OK)
+	{
+		goto e2;
+	}
+
+	/*
+		Create the server side cookie. Here we pass in all the unchanging
+		data so we only need to pass in the minimum at takeop time
+	*/
+	eError = BridgeSyncPrimOpCreate(hBridge,
+									ui32SyncBlockCount,
+									pahHandleList,
+									psNewCookie->ui32ClientSyncCount,
+									psNewCookie->paui32SyncBlockIndex,
+									psNewCookie->paui32Index,
+									psNewCookie->ui32ServerSyncCount,
+									psNewCookie->pahServerSync,
+									&psNewCookie->hServerCookie);
+
+	/* Free the handle list regardless of error */
+	_SyncPrimBlockListHandleArrayDestroy(pahHandleList);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e2;
+	}
+
+	/* Increase the reference count on all referenced local sync prims
+	 * so that they cannot be freed until this Op is finished with
+	 */
+	for (i=0;i<ui32SyncCount;i++)
+	{
+		SYNC_PRIM *psSyncInt;
+		psSyncInt = IMG_CONTAINER_OF(papsSyncPrim[i], SYNC_PRIM, sCommon);
+		if (SYNC_PRIM_TYPE_LOCAL == psSyncInt->eType)
+		{
+			SyncPrimLocalRef(psSyncInt);
+		}
+	}
+
+	*ppsCookie = psNewCookie;
+	return PVRSRV_OK;
+
+e2:
+	OSFreeMem(psNewCookie);
+e1:
+	_SyncPrimBlockListDestroy(psSyncBlockList);
+e0:
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpTake(PSYNC_OP_COOKIE psCookie,
+							IMG_UINT32 ui32SyncCount,
+							PVRSRV_CLIENT_SYNC_PRIM_OP *pasSyncOp)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 ui32ServerIndex = 0;
+	IMG_UINT32 ui32ClientIndex = 0;
+	IMG_UINT32 i;
+	IMG_BOOL bServerSync;
+
+	/* Copy client sync operations */
+	for (i=0;i<ui32SyncCount;i++)
+	{
+		/*
+			Sanity check the client passes in the same syncs as the
+			ones we got at create time
+		*/
+		if (psCookie->papsSyncPrim[i] != pasSyncOp[i].psSync)
+		{
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto e0;
+		}
+
+		eError = SyncPrimIsServerSync(pasSyncOp[i].psSync, &bServerSync);
+		if (PVRSRV_OK != eError) goto e0;
+		if (bServerSync)
+		{
+			psCookie->paui32ServerFlags[ui32ServerIndex] =
+					pasSyncOp[i].ui32Flags;
+
+			ui32ServerIndex++;
+		}
+		else
+		{
+			/* Client operation information */
+			psCookie->paui32Flags[ui32ClientIndex] =
+					pasSyncOp[i].ui32Flags;
+			psCookie->paui32FenceValue[ui32ClientIndex] =
+					pasSyncOp[i].ui32FenceValue;
+			psCookie->paui32UpdateValue[ui32ClientIndex] =
+					pasSyncOp[i].ui32UpdateValue;
+
+			ui32ClientIndex++;
+		}
+	}
+
+	eError = BridgeSyncPrimOpTake(psCookie->hBridge,
+								  psCookie->hServerCookie,
+								  psCookie->ui32ClientSyncCount,
+								  psCookie->paui32Flags,
+								  psCookie->paui32FenceValue,
+								  psCookie->paui32UpdateValue,
+								  psCookie->ui32ServerSyncCount,
+								  psCookie->paui32ServerFlags);
+
+e0:
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpReady(PSYNC_OP_COOKIE psCookie,
+							 IMG_BOOL *pbReady)
+{
+	PVRSRV_ERROR eError;
+	if (!psCookie)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __FUNCTION__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	/*
+		If we have a server sync we have no choice
+		but to do the check in the server
+	*/
+	if (psCookie->bHaveServerSync)
+	{
+		eError = BridgeSyncPrimOpReady(psCookie->hBridge,
+									   psCookie->hServerCookie,
+									   pbReady);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to do sync check in server (Error = %d)",
+					 __FUNCTION__, eError));
+			goto e0;
+		}
+	}
+	else
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 ui32SnapShot;
+		IMG_BOOL bReady = IMG_TRUE;
+
+		for (i=0;i<psCookie->ui32ClientSyncCount;i++)
+		{
+			if ((psCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK) == 0)
+			{
+				continue;
+			}
+
+			ui32SnapShot = _SyncPrimBlockListGetClientValue(psCookie->psSyncBlockList,
+															psCookie->paui32SyncBlockIndex[i],
+															psCookie->paui32Index[i]);
+			if (ui32SnapShot != psCookie->paui32FenceValue[i])
+			{
+				bReady = IMG_FALSE;
+				break;
+			}
+		}
+
+		*pbReady = bReady;
+	}
+
+	return PVRSRV_OK;
+e0:
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpComplete(PSYNC_OP_COOKIE psCookie)
+{
+	PVRSRV_ERROR eError;
+
+	eError = BridgeSyncPrimOpComplete(psCookie->hBridge,
+									  psCookie->hServerCookie);
+
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpDestroy(PSYNC_OP_COOKIE psCookie)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 i;
+
+	eError = BridgeSyncPrimOpDestroy(psCookie->hBridge, psCookie->hServerCookie);
+	if (PVRSRV_OK != eError)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			"%s: Failed to destroy SyncPrimOp (Error = %d)",
+			 __FUNCTION__, eError));
+		goto err_out;
+	}
+
+	/* Decrease the reference count on all referenced local sync prims
+	 * so that they can be freed now this Op is finished with
+	 */
+	for (i=0;i<psCookie->ui32SyncCount;i++)
+	{
+		SYNC_PRIM *psSyncInt;
+		psSyncInt = IMG_CONTAINER_OF(psCookie->papsSyncPrim[i], SYNC_PRIM, sCommon);
+		if (SYNC_PRIM_TYPE_LOCAL == psSyncInt->eType)
+		{
+			SyncPrimLocalUnref(psSyncInt);
+		}
+	}
+
+	_SyncPrimBlockListDestroy(psCookie->psSyncBlockList);
+	OSFreeMem(psCookie);
+
+err_out:
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpResolve(PSYNC_OP_COOKIE psCookie,
+							   IMG_UINT32 *pui32SyncCount,
+							   PVRSRV_CLIENT_SYNC_PRIM_OP **ppsSyncOp)
+{
+	IMG_UINT32 ui32ServerIndex = 0;
+	IMG_UINT32 ui32ClientIndex = 0;
+	PVRSRV_CLIENT_SYNC_PRIM_OP *psSyncOps;
+	IMG_UINT32 i;
+	IMG_BOOL bServerSync;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	psSyncOps = OSAllocMem(sizeof(PVRSRV_CLIENT_SYNC_PRIM_OP) * 
+						   psCookie->ui32SyncCount);
+	if (!psSyncOps)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	for (i=0; i<psCookie->ui32SyncCount; i++)
+	{
+		psSyncOps[i].psSync = psCookie->papsSyncPrim[i];
+		eError = SyncPrimIsServerSync(psCookie->papsSyncPrim[i], &bServerSync);
+		if (PVRSRV_OK != eError) goto e1;
+		if (bServerSync)
+		{
+			psSyncOps[i].ui32FenceValue = 0;
+			psSyncOps[i].ui32UpdateValue = 0;
+			psSyncOps[i].ui32Flags = psCookie->paui32ServerFlags[ui32ServerIndex];
+			ui32ServerIndex++;
+		}
+		else
+		{
+			psSyncOps[i].ui32FenceValue = psCookie->paui32FenceValue[ui32ClientIndex]; 
+			psSyncOps[i].ui32UpdateValue = psCookie->paui32UpdateValue[ui32ClientIndex]; 
+			psSyncOps[i].ui32Flags = psCookie->paui32Flags[ui32ClientIndex];
+			ui32ClientIndex++;
+		}
+	}
+
+	*ppsSyncOp = psSyncOps;
+	*pui32SyncCount = psCookie->ui32SyncCount;
+
+e1:
+	OSFreeMem(psSyncOps);
+e0:
+	return eError;
+}
+
+#if !defined(__KERNEL__)
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimServerAlloc(SYNC_BRIDGE_HANDLE hBridge,
+								 PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+								 const IMG_CHAR *pszClassName
+								 PVR_DBG_FILELINE_PARAM)
+{
+	IMG_CHAR szClassName[SYNC_MAX_CLASS_NAME_LEN];
+	SYNC_PRIM *psNewSync;
+	PVRSRV_ERROR eError;
+
+#if !defined(PVR_SYNC_PRIM_ALLOC_TRACE)
+	PVR_DBG_FILELINE_UNREF();
+#endif
+	psNewSync = OSAllocMem(sizeof(SYNC_PRIM));
+	if (psNewSync == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+	OSCachedMemSet(psNewSync, 0, sizeof(SYNC_PRIM));
+
+	if(pszClassName)
+	{
+		/* Copy the class name annotation into a fixed-size array */
+		OSStringNCopy(szClassName, pszClassName, SYNC_MAX_CLASS_NAME_LEN - 1);
+		szClassName[SYNC_MAX_CLASS_NAME_LEN - 1] = 0;
+	}
+	else
+	{
+		/* No class name annotation */
+		szClassName[0] = 0;
+	}
+
+	eError = BridgeServerSyncAlloc(hBridge,
+								   &psNewSync->u.sServer.hServerSync,
+								   &psNewSync->u.sServer.ui32FirmwareAddr,
+								   OSStringNLength(szClassName, SYNC_MAX_CLASS_NAME_LEN),
+								   szClassName);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+#if defined(PVR_SYNC_PRIM_ALLOC_TRACE)
+	PVR_DPF((PVR_DBG_WARNING, "Allocated sync=server fw=0x%x [%p]" PVR_DBG_FILELINE_FMT,
+			 psNewSync->u.sServer.ui32FirmwareAddr, &psNewSync->sCommon PVR_DBG_FILELINE_ARG));
+#endif
+
+	psNewSync->eType = SYNC_PRIM_TYPE_SERVER;
+	psNewSync->u.sServer.hBridge = hBridge;
+	*ppsSync = &psNewSync->sCommon;
+
+	return PVRSRV_OK;
+e1:
+	OSFreeMem(psNewSync);
+e0:
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimServerGetStatus(IMG_UINT32 ui32SyncCount,
+									 PVRSRV_CLIENT_SYNC_PRIM **papsSync,
+									 IMG_UINT32 *pui32UID,
+									 IMG_UINT32 *pui32FWAddr,
+									 IMG_UINT32 *pui32CurrentOp,
+									 IMG_UINT32 *pui32NextOp)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 i;
+	SYNC_BRIDGE_HANDLE hBridge = NULL;
+	IMG_HANDLE *pahServerHandle;
+	IMG_BOOL bServerSync;
+
+	if (papsSync[0])
+	{
+		hBridge = _SyncPrimGetBridgeHandle(papsSync[0]);
+	}
+	if (!hBridge)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid Sync connection\n", __FUNCTION__));
+		eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+		goto e0;
+	}
+
+	pahServerHandle = OSAllocMem(sizeof(IMG_HANDLE) * ui32SyncCount);
+	if (pahServerHandle == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	/*
+		Check that all the sync we've been passed are server syncs
+		and that they all are on the same connection.
+	*/
+	for (i=0;i<ui32SyncCount;i++)
+	{
+		SYNC_PRIM *psIntSync = IMG_CONTAINER_OF(papsSync[i], SYNC_PRIM, sCommon);
+
+		eError = SyncPrimIsServerSync(papsSync[i], &bServerSync);
+		if (PVRSRV_OK != eError) goto e1;
+		if (!bServerSync)
+		{
+			eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+			goto e1;
+		}
+
+		if (!papsSync[i] || hBridge != _SyncPrimGetBridgeHandle(papsSync[i]))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "SyncServerGetStatus: Sync connection is different\n"));
+			eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+			goto e1;
+		}
+
+		pahServerHandle[i] = psIntSync->u.sServer.hServerSync;
+	}
+
+	eError = BridgeServerSyncGetStatus(hBridge,
+									   ui32SyncCount,
+									   pahServerHandle,
+									   pui32UID,
+									   pui32FWAddr,
+									   pui32CurrentOp,
+									   pui32NextOp);
+	OSFreeMem(pahServerHandle);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+	return PVRSRV_OK;
+
+e1:
+	OSFreeMem(pahServerHandle);
+e0:
+	return eError;
+}
+
+#endif
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimIsServerSync(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_BOOL *pbServerSync)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	SYNC_PRIM *psSyncInt;
+
+	if (!psSync)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __FUNCTION__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+	if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+	{
+		*pbServerSync = IMG_FALSE;
+	}
+	else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+	{
+		*pbServerSync = IMG_TRUE;
+	}
+	else
+	{
+		/* Either the client has given us a bad pointer or there is an
+		 * error in this module
+		 */
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync type", __FUNCTION__));
+		eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+		goto e0;
+	}
+
+e0:
+	return eError;
+}
+
+IMG_INTERNAL
+IMG_HANDLE SyncPrimGetServerHandle(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	SYNC_PRIM *psSyncInt;
+
+	if (!psSync)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __FUNCTION__));
+		goto e0;
+	}
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+	if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+	{
+		return psSyncInt->u.sServer.hServerSync;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid sync type (%d)",
+			__FUNCTION__, psSyncInt->eType));
+		goto e0;
+	}
+e0:
+	return 0;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimServerQueueOp(PVRSRV_CLIENT_SYNC_PRIM_OP *psSyncOp)
+{
+	SYNC_PRIM *psSyncInt;
+	IMG_BOOL bUpdate;
+	PVRSRV_ERROR eError;
+
+	if (!psSyncOp)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __FUNCTION__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	psSyncInt = IMG_CONTAINER_OF(psSyncOp->psSync, SYNC_PRIM, sCommon);
+	if (psSyncInt->eType != SYNC_PRIM_TYPE_SERVER)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: invalid sync type (%d)",
+			__FUNCTION__, psSyncInt->eType));
+		eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+		goto e0;
+	}
+	if (0 == psSyncOp->ui32Flags)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: no sync flags", __FUNCTION__));
+		eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+		goto e0;
+	}
+
+	if (psSyncOp->ui32Flags & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)
+	{
+		bUpdate = IMG_TRUE;
+	}else
+	{
+		bUpdate = IMG_FALSE;
+	}
+
+	eError = BridgeServerSyncQueueHWOp(psSyncInt->u.sServer.hBridge,
+									      psSyncInt->u.sServer.hServerSync,
+										  bUpdate,
+									      &psSyncOp->ui32FenceValue,
+									      &psSyncOp->ui32UpdateValue);
+e0:
+	return eError;
+}
+
+#if defined(PDUMP)
+IMG_INTERNAL void SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	SYNC_PRIM *psSyncInt;
+	SYNC_PRIM_BLOCK *psSyncBlock;
+	SYNC_PRIM_CONTEXT *psContext;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psSync != NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimPDump: Invalid sync type"));
+		PVR_ASSERT(IMG_FALSE);
+		return;
+	}
+
+	psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+	psContext = psSyncBlock->psContext;
+
+	eError = BridgeSyncPrimPDump(psContext->hDevConnection,
+								 psSyncBlock->hServerSyncPrimBlock,
+								 SyncPrimGetOffset(psSyncInt));
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+	SYNC_PRIM *psSyncInt;
+	SYNC_PRIM_BLOCK *psSyncBlock;
+	SYNC_PRIM_CONTEXT *psContext;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psSync != NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimPDump: Invalid sync type"));
+		PVR_ASSERT(IMG_FALSE);
+		return;
+	}
+
+	psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+	psContext = psSyncBlock->psContext;
+
+	eError = BridgeSyncPrimPDumpValue(psContext->hDevConnection,
+								 psSyncBlock->hServerSyncPrimBlock,
+								 SyncPrimGetOffset(psSyncInt),
+								 ui32Value);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+								   IMG_UINT32 ui32Value,
+								   IMG_UINT32 ui32Mask,
+								   PDUMP_POLL_OPERATOR eOperator,
+								   IMG_UINT32 ui32PDumpFlags)
+{
+	SYNC_PRIM *psSyncInt;
+	SYNC_PRIM_BLOCK *psSyncBlock;
+	SYNC_PRIM_CONTEXT *psContext;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psSync != NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimPDumpPol: Invalid sync type (expected SYNC_PRIM_TYPE_LOCAL)"));
+		PVR_ASSERT(IMG_FALSE);
+		return;
+	}
+
+	psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+	psContext = psSyncBlock->psContext;
+
+	eError = BridgeSyncPrimPDumpPol(psContext->hDevConnection,
+									psSyncBlock->hServerSyncPrimBlock,
+									SyncPrimGetOffset(psSyncInt),
+									ui32Value,
+									ui32Mask,
+									eOperator,
+									ui32PDumpFlags);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void SyncPrimOpPDumpPol(PSYNC_OP_COOKIE psCookie,
+									 PDUMP_POLL_OPERATOR eOperator,
+									 IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psCookie != NULL);
+
+	eError = BridgeSyncPrimOpPDumpPol(psCookie->hBridge,
+									psCookie->hServerCookie,
+									eOperator,
+									ui32PDumpFlags);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+	
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+								   IMG_UINT64 uiWriteOffset,
+								   IMG_UINT64 uiPacketSize,
+								   IMG_UINT64 uiBufferSize)
+{
+	SYNC_PRIM *psSyncInt;
+	SYNC_PRIM_BLOCK *psSyncBlock;
+	SYNC_PRIM_CONTEXT *psContext;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psSync != NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimPDumpCBP: Invalid sync type"));
+		PVR_ASSERT(IMG_FALSE);
+		return;
+	}
+
+	psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+	psContext = psSyncBlock->psContext;
+
+	/* FIXME: uiWriteOffset, uiPacketSize, uiBufferSize were changed to
+	 * 64-bit quantities to resolve Windows compiler warnings.
+	 * However the bridge is only 32-bit hence compiler warnings
+	 * of implicit cast and loss of data.
+	 * Added explicit cast and assert to remove warning.
+	 */
+#if (defined(_WIN32) && !defined(_WIN64)) || (defined(LINUX) && defined(__i386__))
+	PVR_ASSERT(uiWriteOffset<IMG_UINT32_MAX);
+	PVR_ASSERT(uiPacketSize<IMG_UINT32_MAX);
+	PVR_ASSERT(uiBufferSize<IMG_UINT32_MAX);
+#endif
+	eError = BridgeSyncPrimPDumpCBP(psContext->hDevConnection,
+									psSyncBlock->hServerSyncPrimBlock,
+									SyncPrimGetOffset(psSyncInt),
+									(IMG_UINT32)uiWriteOffset,
+									(IMG_UINT32)uiPacketSize,
+									(IMG_UINT32)uiBufferSize);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+#endif
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/tlclient.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/tlclient.c
new file mode 100644
index 0000000..e62446c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/tlclient.c
@@ -0,0 +1,474 @@
+/*************************************************************************/ /*!
+@File			tlclient.c
+@Title          Services Transport Layer shared API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport layer common API used in both clients and server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* DESIGN NOTE
+ * This transport layer consumer-role API was created as a shared API when a
+ * client wanted to read the data of a TL stream from within the KM server
+ * driver. This was in addition to the existing clients supported externally
+ * by the UM client library component via PVR API layer.
+ * This shared API is thus used by the PVR TL API in the client library and
+ * by clients internal to the server driver module. It depends on
+ * client entry points of the TL and DEVMEM bridge modules. These entry points
+ * encapsulate from the TL shared API whether a direct bridge or an indirect
+ * (ioctl) bridge is used.
+ * One reason for needing this layer centres around the fact that some of the
+ * API functions make multiple bridge calls and the logic that glues these
+ * together is common regardless of client location. Further this layer has
+ * allowed the defensive coding that checks parameters to move into the PVR
+ * API layer where untrusted clients enter giving a more efficient KM code path.
+ */
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+
+#include "allocmem.h"
+#include "devicemem.h"
+
+#include "tlclient.h"
+#include "pvrsrv_tlcommon.h"
+#include "client_pvrtl_bridge.h"
+
+/* Defines/Constants
+ */
+
+#define NO_ACQUIRE             0xffffffffU
+
+/* User-side stream descriptor structure.
+ */
+typedef struct _TL_STREAM_DESC_
+{
+	/* Handle on kernel-side stream descriptor*/
+	IMG_HANDLE		hServerSD;
+
+	/* Stream data buffer variables */
+	DEVMEM_MEMDESC*			psUMmemDesc;
+	IMG_PBYTE				pBaseAddr;
+
+	/* Offset in bytes into the circular buffer and valid only after
+	 * an Acquire call and undefined after a release. */
+	IMG_UINT32 	uiReadOffset;
+
+	/* Always a positive integer when the Acquire call returns and a release
+	 * is outstanding. Undefined at all other times. */
+	IMG_UINT32	uiReadLen;
+
+	/* Flag indicating if the RESERVE_TOO_BIG error was already printed.
+	 * It's used to reduce number of errors in kernel log. */
+	IMG_BOOL bPrinted;
+} TL_STREAM_DESC, *PTL_STREAM_DESC;
+
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientOpenStream(IMG_HANDLE hSrvHandle,
+		const IMG_CHAR* pszName,
+		IMG_UINT32   ui32Mode,
+		IMG_HANDLE*  phSD)
+{
+	PVRSRV_ERROR 				eError = PVRSRV_OK;
+	TL_STREAM_DESC* 			psSD = 0;
+	IMG_HANDLE hTLPMR;
+	IMG_HANDLE hTLImportHandle;
+	IMG_DEVMEM_SIZE_T uiImportSize;
+	IMG_UINT32 ui32MemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE;
+
+	PVR_ASSERT(hSrvHandle);
+	PVR_ASSERT(pszName);
+	PVR_ASSERT(phSD);
+	*phSD = NULL;
+
+	/* Allocate memory for the stream descriptor object, initialise with
+	 * "no data read" yet. */
+	psSD = OSAllocZMem(sizeof(TL_STREAM_DESC));
+	if (psSD == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_DPF((PVR_DBG_ERROR, "BridgeTLOpenStream: KM returned %d", eError));
+		goto e0;
+	}
+	psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE;
+
+	/* Send open stream request to kernel server to get stream handle and
+	 * buffer cookie so we can get access to the buffer in this process. */
+	eError = BridgeTLOpenStream(hSrvHandle, pszName, ui32Mode,
+										&psSD->hServerSD, &hTLPMR);
+	if (eError != PVRSRV_OK)
+	{
+		if ((ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT) &&
+			(eError == PVRSRV_ERROR_TIMEOUT))
+		{
+			goto e1;
+		}
+		PVR_LOGG_IF_ERROR(eError, "BridgeTLOpenStream", e1);
+	}
+
+	/* Convert server export cookie into a cookie for use by this client */
+	eError = DevmemMakeLocalImportHandle(hSrvHandle,
+										hTLPMR, &hTLImportHandle);
+	PVR_LOGG_IF_ERROR(eError, "DevmemMakeLocalImportHandle", e2);
+
+	ui32MemFlags |= ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ?
+	        PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE : 0;
+	/* Now convert client cookie into a client handle on the buffer's
+	 * physical memory region */
+	eError = DevmemLocalImport(hSrvHandle,
+	                           hTLImportHandle,
+	                           PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+	                           PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE,
+	                           &psSD->psUMmemDesc,
+	                           &uiImportSize,
+	                           "TLBuffer");
+	PVR_LOGG_IF_ERROR(eError, "DevmemImport", e3);
+
+	/* Now map the memory into the virtual address space of this process. */
+	eError = DevmemAcquireCpuVirtAddr(psSD->psUMmemDesc, (void **)
+															&psSD->pBaseAddr);
+	PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e4);
+
+	/* Ignore error, not much that can be done */
+	(void) DevmemUnmakeLocalImportHandle(hSrvHandle,
+			hTLImportHandle);
+
+	/* Return client descriptor handle to caller */
+	*phSD = psSD;
+	return PVRSRV_OK;
+
+/* Clean up post buffer setup */
+e4:
+	DevmemFree(psSD->psUMmemDesc);
+e3:
+	(void) DevmemUnmakeLocalImportHandle(hSrvHandle,
+				&hTLImportHandle);
+/* Clean up post stream open */
+e2:
+	BridgeTLCloseStream(hSrvHandle, psSD->hServerSD);
+
+/* Cleanup post allocation of the descriptor object */
+e1:
+	OSFreeMem(psSD);
+
+e0:
+	return eError;
+}
+
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCloseStream(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE hSD)
+{
+	PVRSRV_ERROR          eError = PVRSRV_OK;
+	TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+	PVR_ASSERT(hSrvHandle);
+	PVR_ASSERT(hSD);
+
+	/* Check the caller provided connection is valid */
+	if (!psSD->hServerSD)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "TLClientCloseStream: descriptor already closed/not open"));
+		return PVRSRV_ERROR_HANDLE_NOT_FOUND;
+	}
+
+	/* Check if acquire is outstanding, perform release if it is, ignore result
+	 * as there is not much we can do if it is an error other than close */
+	if (psSD->uiReadLen != NO_ACQUIRE)
+	{
+		(void) BridgeTLReleaseData(hSrvHandle, psSD->hServerSD,
+									psSD->uiReadOffset, psSD->uiReadLen);
+		psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE;
+	}
+
+	/* Clean up DevMem resources used for this stream in this client */
+	DevmemReleaseCpuVirtAddr(psSD->psUMmemDesc);
+
+	DevmemFree(psSD->psUMmemDesc);
+
+	/* Send close to server to clean up kernel mode resources for this
+	 * handle and release the memory. */
+	eError = BridgeTLCloseStream(hSrvHandle, psSD->hServerSD);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "BridgeTLCloseStream: KM returned %d", eError));
+		/* Not much we can do with error, fall through to clean up
+		 * return eError; */
+	}
+
+	OSCachedMemSet(psSD, 0x00, sizeof(TL_STREAM_DESC));
+	OSFreeMem (psSD);
+
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientDiscoverStreams(IMG_HANDLE hSrvHandle,
+		const IMG_CHAR *pszNamePattern,
+		IMG_CHAR aszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE],
+		IMG_UINT32 *pui32NumFound)
+{
+	PVR_ASSERT(hSrvHandle);
+	PVR_ASSERT(pszNamePattern);
+	PVR_ASSERT(pui32NumFound);
+
+	return BridgeTLDiscoverStreams(hSrvHandle,
+	                               pszNamePattern,
+	                               // we need to treat this as one dimensional
+	                               // array
+	                               *pui32NumFound * PRVSRVTL_MAX_STREAM_NAME_SIZE,
+	                               (IMG_CHAR *) aszStreams,
+	                               pui32NumFound);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE hSD,
+		IMG_UINT8 **ppui8Data,
+		IMG_UINT32 ui32Size)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+	IMG_UINT32 ui32BufferOffset, ui32Dummy;
+
+	PVR_ASSERT(hSrvHandle);
+	PVR_ASSERT(hSD);
+	PVR_ASSERT(ppui8Data);
+	PVR_ASSERT(ui32Size);
+
+	eError = BridgeTLReserveStream(hSrvHandle, psSD->hServerSD,
+	                               &ui32BufferOffset, ui32Size, ui32Size,
+	                               &ui32Dummy);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	*ppui8Data = psSD->pBaseAddr + ui32BufferOffset;
+
+	return PVRSRV_OK;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream2(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE hSD,
+		IMG_UINT8 **ppui8Data,
+		IMG_UINT32 ui32Size,
+		IMG_UINT32 ui32SizeMin,
+		IMG_UINT32 *pui32Available)
+{
+		PVRSRV_ERROR eError;
+	TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+	IMG_UINT32 ui32BufferOffset;
+
+	PVR_ASSERT(hSrvHandle);
+	PVR_ASSERT(hSD);
+	PVR_ASSERT(ppui8Data);
+	PVR_ASSERT(ui32Size);
+
+	eError = BridgeTLReserveStream(hSrvHandle, psSD->hServerSD,
+	                               &ui32BufferOffset, ui32Size, ui32SizeMin,
+	                               pui32Available);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	*ppui8Data = psSD->pBaseAddr + ui32BufferOffset;
+
+	return PVRSRV_OK;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCommitStream(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE hSD,
+		IMG_UINT32 ui32Size)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+	PVR_ASSERT(hSrvHandle);
+	PVR_ASSERT(hSD);
+	PVR_ASSERT(ui32Size);
+
+	eError = BridgeTLCommitStream(hSrvHandle, psSD->hServerSD, ui32Size);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	return PVRSRV_OK;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientAcquireData(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE  hSD,
+		IMG_PBYTE*  ppPacketBuf,
+		IMG_UINT32* pui32BufLen)
+{
+	PVRSRV_ERROR 		  eError = PVRSRV_OK;
+	TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+	PVR_ASSERT(hSrvHandle);
+	PVR_ASSERT(hSD);
+	PVR_ASSERT(ppPacketBuf);
+	PVR_ASSERT(pui32BufLen);
+
+	/* Check Acquire has not been called twice in a row without a release */
+	if (psSD->uiReadOffset != NO_ACQUIRE)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "TLClientAcquireData: acquire already outstanding"));
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	*pui32BufLen = 0;
+	/* Ask the kernel server for the next chunk of data to read */
+	eError = BridgeTLAcquireData(hSrvHandle, psSD->hServerSD,
+									&psSD->uiReadOffset, &psSD->uiReadLen);
+	if (eError != PVRSRV_OK)
+	{
+		if ((eError != PVRSRV_ERROR_RESOURCE_UNAVAILABLE) &&
+			(eError != PVRSRV_ERROR_TIMEOUT))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "BridgeTLAcquireData: KM returned %d", eError));
+		}
+		psSD->uiReadOffset = psSD->uiReadLen = NO_ACQUIRE;
+		return eError;
+	}
+
+	/* Return the data offset and length to the caller if bytes are available
+	 * to be read. Could be zero for non-blocking mode. */
+	if (psSD->uiReadLen)
+	{
+		*ppPacketBuf = psSD->pBaseAddr + psSD->uiReadOffset;
+		*pui32BufLen = psSD->uiReadLen;
+	}
+	else
+	{
+		/* On non-blocking, zero length data could be returned from server
+		 * Which is basically a no-acquire operation */
+		*ppPacketBuf = 0;
+		*pui32BufLen = 0;
+	}
+
+	return eError;
+}
+
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReleaseData(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE hSD)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+	PVR_ASSERT(hSrvHandle);
+	PVR_ASSERT(hSD);
+
+	/* the previous acquire did not return any data, this is a no-operation */
+	if (psSD->uiReadLen == 0)
+	{
+		return PVRSRV_OK;
+	}
+
+	/* Check release has not been called twice in a row without an acquire */
+	if (psSD->uiReadOffset == NO_ACQUIRE)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "TLClientReleaseData_: no acquire to release"));
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	/* Inform the kernel to release the data from the buffer */
+	eError = BridgeTLReleaseData(hSrvHandle, psSD->hServerSD,
+										psSD->uiReadOffset, psSD->uiReadLen);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "BridgeTLReleaseData: KM returned %d", eError));
+		/* Need to continue to keep client data consistent, fall through
+		 * return eError */
+	}
+
+	/* Reset state to indicate no outstanding acquire */
+	psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE;
+
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientWriteData(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE hSD,
+		IMG_UINT32 ui32Size,
+		IMG_BYTE *pui8Data)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+	PVR_ASSERT(hSrvHandle);
+	PVR_ASSERT(hSD);
+	PVR_ASSERT(ui32Size);
+	PVR_ASSERT(pui8Data);
+
+	eError = BridgeTLWriteData(hSrvHandle, psSD->hServerSD, ui32Size, pui8Data);
+	if (eError != PVRSRV_OK)
+	{
+		if (eError == PVRSRV_ERROR_STREAM_RESERVE_TOO_BIG)
+		{
+			if (!psSD->bPrinted)
+			{
+				psSD->bPrinted = IMG_TRUE;
+				PVR_DPF((PVR_DBG_ERROR, "Not enough space. Failed to write"
+				        " data to the stream (%d).", eError));
+			}
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "TLClientWriteData: KM returned %d",
+			        eError));
+		}
+	}
+
+	return eError;
+}
+
+/******************************************************************************
+ End of file (tlclient.c)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/uniq_key_splay_tree.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/uniq_key_splay_tree.c
new file mode 100644
index 0000000..ddc76449
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/uniq_key_splay_tree.c
@@ -0,0 +1,244 @@
+/*************************************************************************/ /*!
+@File
+@Title          Provides splay-trees.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implementation of splay-trees.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "allocmem.h" /* for OSMemAlloc / OSMemFree */
+#include "osfunc.h" /* for OSMemFree */
+#include "pvr_debug.h"
+#include "uniq_key_splay_tree.h"
+
+/**
+ * This function performs a simple top down splay
+ *
+ * @param ui32Flags the flags that must splayed to the root (if possible).
+ * @param psTree The tree to splay.
+ * @return the resulting tree after the splay operation.
+ */
+IMG_INTERNAL
+IMG_PSPLAY_TREE PVRSRVSplay (IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree) 
+{
+	IMG_SPLAY_TREE sTmp1;
+	IMG_PSPLAY_TREE psLeft;
+	IMG_PSPLAY_TREE psRight;
+	IMG_PSPLAY_TREE psTmp2;
+
+	if (psTree == NULL)
+	{
+		return NULL;
+	}
+	
+	sTmp1.psLeft = NULL;
+	sTmp1.psRight = NULL;
+
+	psLeft = &sTmp1;
+	psRight = &sTmp1;
+	
+    for (;;)
+	{
+		if (ui32Flags < psTree->ui32Flags)
+		{
+			if (psTree->psLeft == NULL)
+			{
+				break;
+			}
+			
+			if (ui32Flags < psTree->psLeft->ui32Flags)
+			{
+				/* if we get to this point, we need to rotate right the tree */
+				psTmp2 = psTree->psLeft;
+				psTree->psLeft = psTmp2->psRight;
+				psTmp2->psRight = psTree;
+				psTree = psTmp2;
+				if (psTree->psLeft == NULL)
+				{
+					break;
+				}
+			}
+
+			/* if we get to this point, we need to link right */
+			psRight->psLeft = psTree;
+			psRight = psTree;
+			psTree = psTree->psLeft;
+		}
+		else
+		{
+			if (ui32Flags > psTree->ui32Flags)
+			{
+				if (psTree->psRight == NULL)
+				{
+					break;
+				}
+
+				if (ui32Flags > psTree->psRight->ui32Flags)
+				{
+					/* if we get to this point, we need to rotate left the tree */
+					psTmp2 = psTree->psRight;
+					psTree->psRight = psTmp2->psLeft;
+					psTmp2->psLeft = psTree;
+					psTree = psTmp2;
+					if (psTree->psRight == NULL)
+					{
+						break;
+					}
+				}
+
+				/* if we get to this point, we need to link left */
+				psLeft->psRight = psTree;
+				psLeft = psTree;
+				psTree = psTree->psRight;
+			}
+			else
+			{
+				break;
+			}
+		}
+    }
+
+	/* at this point re-assemble the tree */
+    psLeft->psRight = psTree->psLeft;
+    psRight->psLeft = psTree->psRight;
+    psTree->psLeft = sTmp1.psRight;
+    psTree->psRight = sTmp1.psLeft;
+    return psTree;
+}
+
+
+/**
+ * This function inserts a node into the Tree (unless it is already present, in
+ * which case it is equivalent to performing only a splay operation
+ *
+ * @param ui32Flags the key of the new node
+ * @param psTree The tree into which one wants to add a new node
+ * @return The resulting with the node in it
+ */
+IMG_INTERNAL
+IMG_PSPLAY_TREE PVRSRVInsert(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree) 
+{
+    IMG_PSPLAY_TREE psNew;
+
+	if (psTree != NULL)
+	{
+		psTree = PVRSRVSplay(ui32Flags, psTree);
+		if (psTree->ui32Flags == ui32Flags)
+		{
+			return psTree;
+		}
+	}
+	
+	psNew = (IMG_PSPLAY_TREE) OSAllocMem(sizeof(IMG_SPLAY_TREE));
+	if (psNew == NULL)
+	{
+		PVR_DPF ((PVR_DBG_ERROR, "Error: failed to allocate memory to add a node to the splay tree."));
+		return NULL;
+	}
+	
+	psNew->ui32Flags = ui32Flags;
+	OSCachedMemSet(&(psNew->buckets[0]), 0, sizeof(psNew->buckets));
+
+#if defined(PVR_CTZLL)
+	psNew->bHasEltsMapping = ~(((IMG_ELTS_MAPPINGS) 1 << (sizeof(psNew->buckets) / (sizeof(psNew->buckets[0])))) - 1);
+#endif
+
+    if (psTree == NULL)
+	{
+		psNew->psLeft  = NULL;
+		psNew->psRight = NULL;
+		return psNew;
+    }
+
+    if (ui32Flags < psTree->ui32Flags)
+	{
+		psNew->psLeft  = psTree->psLeft;
+		psNew->psRight = psTree;
+		psTree->psLeft = NULL;
+    }
+	else 
+	{
+		psNew->psRight  = psTree->psRight;
+		psNew->psLeft   = psTree;
+		psTree->psRight = NULL;
+    }
+
+	return psNew;
+}
+
+
+/**
+ * Deletes a node from the tree (unless it is not there, in which case it is
+ * equivalent to a splay operation)
+ * 
+ * @param ui32Flags the value of the node to remove
+ * @param psTree the tree into which the node must be removed 
+ * @return the resulting tree
+ */
+IMG_INTERNAL
+IMG_PSPLAY_TREE PVRSRVDelete(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree)
+{
+    IMG_PSPLAY_TREE psTmp;
+    if (psTree == NULL)
+	{
+		return NULL;
+	}
+
+    psTree = PVRSRVSplay(ui32Flags, psTree);
+    if (ui32Flags == psTree->ui32Flags)
+	{
+		/* The value was present in the tree */
+		if (psTree->psLeft == NULL)
+		{
+			psTmp = psTree->psRight;
+		}
+		else
+		{
+			psTmp = PVRSRVSplay(ui32Flags, psTree->psLeft);
+			psTmp->psRight = psTree->psRight;
+		}
+		OSFreeMem(psTree);
+		return psTmp;
+    }
+
+	/* the value was not present in the tree, so just return it as is (after the
+	 * splay) */
+    return psTree;
+}
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/uniq_key_splay_tree.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/uniq_key_splay_tree.h
new file mode 100644
index 0000000..945d93c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/common/uniq_key_splay_tree.h
@@ -0,0 +1,86 @@
+/*************************************************************************/ /*!
+@File
+@Title          Splay trees interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides debug functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef UNIQ_KEY_SPLAY_TREE_H_
+#define UNIQ_KEY_SPLAY_TREE_H_
+
+#include "img_types.h"
+#include "pvr_intrinsics.h"
+
+#if defined(PVR_CTZLL)
+  /* map the is_bucket_n_free to an int.
+   * This way, the driver can find the first non empty without loop
+   */
+  typedef IMG_UINT64 IMG_ELTS_MAPPINGS;
+#endif
+
+/* head of list of free boundary tags for indexed by pvr_log2 of the
+   boundary tag size */
+#define FREE_TABLE_LIMIT 40
+
+struct _BT_;
+
+typedef struct img_splay_tree 
+{
+	/* left child/subtree */
+    struct img_splay_tree * psLeft;
+
+	/* right child/subtree */
+    struct img_splay_tree * psRight;
+
+    /* Flags to match on this span, used as the key. */
+    IMG_UINT32 ui32Flags;
+#if defined(PVR_CTZLL)
+	/* each bit of this int is a boolean telling if the corresponding
+	   bucket is empty or not */
+    IMG_ELTS_MAPPINGS bHasEltsMapping;
+#endif
+	struct _BT_ * buckets[FREE_TABLE_LIMIT];
+} IMG_SPLAY_TREE, *IMG_PSPLAY_TREE;
+
+IMG_PSPLAY_TREE PVRSRVSplay (IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree);
+IMG_PSPLAY_TREE PVRSRVInsert(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree);
+IMG_PSPLAY_TREE PVRSRVDelete(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree);
+
+
+#endif /* !UNIQ_KEY_SPLAY_TREE_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/devices/rgx/rgx_compat_bvnc.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/devices/rgx/rgx_compat_bvnc.c
new file mode 100644
index 0000000..c361c1d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/devices/rgx/rgx_compat_bvnc.c
@@ -0,0 +1,218 @@
+/*************************************************************************/ /*!
+@File           rgx_compact_bvnc.c
+@Title          BVNC compatibility check utilities
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Utility functions used for packing BNC and V.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgx_compat_bvnc.h"
+#if defined(RGX_FIRMWARE)
+#include "rgxfw_utils.h"
+#elif !defined(RGX_BUILD_BINARY)
+#include "pvr_debug.h"
+#endif
+
+#if defined(RGX_FIRMWARE)
+#define PVR_COMPAT_ASSERT RGXFW_ASSERT
+#elif !defined(RGX_BUILD_BINARY)
+#define PVR_COMPAT_ASSERT PVR_ASSERT
+#else
+#include <assert.h>
+#define PVR_COMPAT_ASSERT assert
+#endif
+
+/**************************************************************************//**
+ * C library strlen function.
+ *****************************************************************************/
+static INLINE IMG_UINT32 OSStringLength(const IMG_CHAR* pszInput)
+{
+	const IMG_CHAR* pszTemp = pszInput;
+
+	while (*pszTemp)
+		pszTemp++;
+
+	return (pszTemp - pszInput);
+}
+
+/**************************************************************************//**
+ * Utility function for packing BNC
+ *****************************************************************************/
+static INLINE IMG_UINT64 rgx_bnc_pack(IMG_UINT32 ui32B, IMG_UINT32 ui32N,
+														IMG_UINT32 ui32C)
+{
+	/*
+	 * Test for input B, N and C exceeding max bit width.
+	 */
+	PVR_COMPAT_ASSERT((ui32B & (~(RGX_BVNC_PACK_MASK_B >> RGX_BVNC_PACK_SHIFT_B))) == 0);
+	PVR_COMPAT_ASSERT((ui32N & (~(RGX_BVNC_PACK_MASK_N >> RGX_BVNC_PACK_SHIFT_N))) == 0);
+	PVR_COMPAT_ASSERT((ui32C & (~(RGX_BVNC_PACK_MASK_C >> RGX_BVNC_PACK_SHIFT_C))) == 0);
+
+	return (((IMG_UINT64)ui32B << RGX_BVNC_PACK_SHIFT_B) |
+			((IMG_UINT64)ui32N << RGX_BVNC_PACK_SHIFT_N) |
+			((IMG_UINT64)ui32C << RGX_BVNC_PACK_SHIFT_C));
+}
+
+/**************************************************************************//**
+ * Utility function for packing BNC and V to be used by compatibility check.
+ * BNC is packed into 48 bit format.
+ * If the array pointed to by pszV is a string that is shorter than 
+ * ui32OutVMaxLen characters, null characters are appended to the copy in the
+ * array pointed to by pszOutV, until 'ui32OutVMaxLen' characters in all have
+ * been written.
+ *
+ * @param:      pui64OutBNC       Output containing packed BNC.
+ * @param       pszOutV           Output containing version string.
+ * @param       ui32OutVMaxLen    Max characters that can be written to 
+                                  pszOutV (excluding terminating null character)
+ * @param       ui32B             Input 'B' value
+ * @param       pszV              Input 'V' string
+ * @param       ui32N             Input 'N' value
+ * @param       ui32C             Input 'C' value
+ * @return      None
+ *****************************************************************************/
+void rgx_bvnc_packed(IMG_UINT64 *pui64OutBNC, IMG_CHAR *pszOutV, IMG_UINT32 ui32OutVMaxLen,
+					 IMG_UINT32 ui32B, IMG_CHAR *pszV, IMG_UINT32 ui32N, IMG_UINT32 ui32C)
+{
+	*pui64OutBNC = rgx_bnc_pack(ui32B, ui32N, ui32C);
+
+	if (!pszOutV)
+		return;
+
+	if (pszV)
+	{
+		/*
+		 * Assert can fail for two reasons
+		 * 1. Caller is passing invalid 'V' string or
+		 * 2. Dest buffer does not have enough memory allocated for max 'V' size.
+		 */
+		PVR_COMPAT_ASSERT(OSStringLength(pszV) <= ui32OutVMaxLen);
+
+		
+		for (; ui32OutVMaxLen > 0 && *pszV != '\0'; --ui32OutVMaxLen)
+		{
+			/* When copying the V, omit any characters as these would cause
+			 * the compatibility check against the V read from HW to fail
+			 */
+			if (*pszV && (*pszV >= '0') && (*pszV <='9'))
+			{
+				*pszOutV++ = *pszV++;
+			}
+			else
+			{
+				pszV++;
+			}
+		}
+	}
+
+	do
+	{
+		*pszOutV++ = '\0';
+	}while(ui32OutVMaxLen-- > 0);
+}
+
+/**************************************************************************//**
+ * Utility function for packing BNC and V to be used by compatibility check.
+ * Input B,N and C is packed into 48 bit format.
+ * Input V is converted into string. If number of characters required to
+ * represent 16 bit wide version number is less than ui32OutVMaxLen, than null
+ * characters are appended to pszOutV, until ui32OutVMaxLen characters in all 
+ * have been written.
+ *
+ * @param:      pui64OutBNC       Output containing packed BNC.
+ * @param       pszOutV           Output containing version string.
+ * @param       ui32OutVMaxLen    Max characters that can be written to 
+                                  pszOutV (excluding terminating null character)
+ * @param       ui32B             Input 'B' value (16 bit wide)
+ * @param       ui32V             Input 'V' value (16 bit wide)
+ * @param       ui32N             Input 'N' value (16 bit wide)
+ * @param       ui32C             Input 'C' value (16 bit wide)
+ * @return     .None
+ *****************************************************************************/
+void rgx_bvnc_pack_hw(IMG_UINT64 *pui64OutBNC, IMG_CHAR *pszOutV, IMG_UINT32 ui32OutVMaxLen,
+					  IMG_UINT32 ui32B, IMG_UINT32 ui32V, IMG_UINT32 ui32N, IMG_UINT32 ui32C)
+{
+	/*
+	 * Allocate space for max digits required to represent 16 bit wide version
+	 * number (including NULL terminating character).
+	 */
+	IMG_CHAR aszBuf[6];
+	IMG_CHAR *pszPointer = aszBuf;
+
+	*pui64OutBNC = rgx_bnc_pack(ui32B, ui32N, ui32C);
+
+	if (!pszOutV)
+		return;
+
+	/*
+	 * Function only supports 16 bits wide version number.
+	 */
+	PVR_COMPAT_ASSERT((ui32V & ~0xFFFF) == 0);
+
+	if (ui32V > 9999)
+		pszPointer+=5;
+	else if (ui32V > 999)
+		pszPointer+=4;
+	else if (ui32V > 99)
+		pszPointer+=3;
+	else if (ui32V > 9)
+		pszPointer+=2;
+	else
+		pszPointer+=1;
+	
+	*pszPointer-- = '\0';
+	*pszPointer = '0';
+	
+	while (ui32V > 0)
+	{
+		*pszPointer-- = (ui32V % 10) + '0';
+		ui32V /= 10;
+	}
+
+	for (pszPointer = aszBuf; ui32OutVMaxLen > 0 && *pszPointer != '\0'; --ui32OutVMaxLen)
+		*pszOutV++ = *pszPointer++;
+
+	/*
+	 * Append NULL characters.
+	 */
+	do
+	{
+		*pszOutV++ = '\0';
+	}while(ui32OutVMaxLen-- > 0);
+}
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/devices/rgx/rgx_compat_bvnc.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/devices/rgx/rgx_compat_bvnc.h
new file mode 100644
index 0000000..51e5d76
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/devices/rgx/rgx_compat_bvnc.h
@@ -0,0 +1,136 @@
+/*************************************************************************/ /*!
+@File
+@Title          Functions for BVNC manipulating
+
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Utility functions used internally by device memory management
+                code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_COMPAT_BVNC_H__)
+#define __RGX_COMPAT_BVNC_H__
+
+#include "img_types.h"
+
+/* 64bit endian converting macros */
+#if defined(__BIG_ENDIAN__)
+#define RGX_INT64_TO_BE(N) (N)
+#define RGX_INT64_FROM_BE(N) (N)
+#define RGX_INT32_TO_BE(N) (N)
+#define RGX_INT32_FROM_BE(N) (N)
+#else
+#define RGX_INT64_TO_BE(N)        \
+	((((N) >> 56)   & 0xff)       \
+	 | (((N) >> 40) & 0xff00)     \
+	 | (((N) >> 24) & 0xff0000)   \
+	 | (((N) >> 8)  & 0xff000000) \
+	 | ((N)                << 56) \
+	 | (((N) & 0xff00)     << 40) \
+	 | (((N) & 0xff0000)   << 24) \
+	 | (((N) & 0xff000000) << 8))
+#define RGX_INT64_FROM_BE(N) RGX_INT64_TO_BE(N)
+
+#define RGX_INT32_TO_BE(N)   \
+	((((N) >> 24)  & 0xff)   \
+	 | (((N) >> 8) & 0xff00) \
+	 | ((N)           << 24) \
+	 | (((N & 0xff00) << 8)))
+#define RGX_INT32_FROM_BE(N) RGX_INT32_TO_BE(N)
+#endif
+
+/******************************************************************************
+ * RGX Version packed into 24-bit (BNC) and string (V) to be used by Compatibility Check
+ *****************************************************************************/
+
+#define RGX_BVNC_PACK_SHIFT_B 32
+#define RGX_BVNC_PACK_SHIFT_N 16
+#define RGX_BVNC_PACK_SHIFT_C 0
+
+#define RGX_BVNC_PACK_MASK_B (IMG_UINT64_C(0x0000FFFF00000000))
+#define RGX_BVNC_PACK_MASK_N (IMG_UINT64_C(0x00000000FFFF0000))
+#define RGX_BVNC_PACK_MASK_C (IMG_UINT64_C(0x000000000000FFFF))
+
+#define RGX_BVNC_PACKED_EXTR_B(BVNC) ((IMG_UINT32)(((BVNC).ui64BNC & RGX_BVNC_PACK_MASK_B) >> RGX_BVNC_PACK_SHIFT_B))
+#define RGX_BVNC_PACKED_EXTR_V(BVNC) ((BVNC).aszV)
+#define RGX_BVNC_PACKED_EXTR_N(BVNC) ((IMG_UINT32)(((BVNC).ui64BNC & RGX_BVNC_PACK_MASK_N) >> RGX_BVNC_PACK_SHIFT_N))
+#define RGX_BVNC_PACKED_EXTR_C(BVNC) ((IMG_UINT32)(((BVNC).ui64BNC & RGX_BVNC_PACK_MASK_C) >> RGX_BVNC_PACK_SHIFT_C))
+
+#if !defined(RGX_SKIP_BVNC_CHECK)
+#define RGX_BVNC_EQUAL(L,R,all,version,lenmax,bnc,v) do {													\
+										(lenmax) = IMG_FALSE;												\
+										(bnc) = IMG_FALSE;													\
+										(v) = IMG_FALSE;													\
+										(version) = ((L).ui32LayoutVersion == (R).ui32LayoutVersion);		\
+										if (version)														\
+										{																	\
+											(lenmax) = ((L).ui32VLenMax == (R).ui32VLenMax);				\
+										}																	\
+										if (lenmax)															\
+										{																	\
+											(bnc) = ((L).ui64BNC == (R).ui64BNC);							\
+										}																	\
+										if (bnc)															\
+										{																	\
+											(L).aszV[(L).ui32VLenMax] = '\0';								\
+											(R).aszV[(R).ui32VLenMax] = '\0';								\
+											(v) = (OSStringCompare((L).aszV, (R).aszV)==0);					\
+										}																	\
+										(all) = (version) && (lenmax) && (bnc) && (v);						\
+									} while (0)
+#else
+#define RGX_BVNC_EQUAL(L,R,all,version,lenmax,bnc,v)														\
+						(all) 		= IMG_TRUE;																\
+						(version) 	= IMG_TRUE;																\
+						(lenmax) 	= IMG_TRUE;																\
+						(bnc) 		= IMG_TRUE;																\
+						(v) 		= IMG_TRUE;																\
+
+#endif
+
+void rgx_bvnc_packed(IMG_UINT64 *pui64OutBNC, IMG_CHAR *pszOutV, IMG_UINT32 ui32OutVMaxLen,
+					 IMG_UINT32 ui32B, IMG_CHAR *pszV, IMG_UINT32 ui32N, IMG_UINT32 ui32C);
+void rgx_bvnc_pack_hw(IMG_UINT64 *pui64OutBNC, IMG_CHAR *pszOutV, IMG_UINT32 ui32OutVMaxLen,
+					  IMG_UINT32 ui32B, IMG_UINT32 ui32V, IMG_UINT32 ui32N, IMG_UINT32 ui32C);
+
+#endif /*  __RGX_COMPAT_BVNC_H__ */
+
+/******************************************************************************
+ End of file (rgx_compat_bvnc.h)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/devices/rgx/rgx_hwperf_table.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/devices/rgx/rgx_hwperf_table.c
new file mode 100644
index 0000000..f79f444
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/devices/rgx/rgx_hwperf_table.c
@@ -0,0 +1,669 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HW Performance counter table
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX HW Performance counters table
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgx_fwif_hwperf.h"
+#include "rgxdefs_km.h"
+#include "rgx_hwperf_table.h"
+
+/* Includes needed for PVRSRVKM (Server) context */
+#	include "rgx_bvnc_defs_km.h"
+#	if defined(__KERNEL__)
+#		include "rgxdevice.h"
+#	endif
+
+/* Shared compile-time context ASSERT macro */
+#if defined(RGX_FIRMWARE)
+#	include "rgxfw_utils.h"
+/*  firmware context */
+#	define DBG_ASSERT(_c) RGXFW_ASSERT((_c))
+#else
+#	include "pvr_debug.h"
+/*  host client/server context */
+#	define DBG_ASSERT(_c) PVR_ASSERT((_c))
+#endif
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered()
+
+ Referenced in gasCntBlkTypeModel[] table below and only called from
+ RGX_FIRMWARE run-time context. Therefore compile time configuration is used.
+ *****************************************************************************/
+
+#if defined(RGX_FIRMWARE) && defined(RGX_FEATURE_PERFBUS)
+#	include "rgxfw_pow.h"
+#	include "rgxfw_utils.h"
+
+static IMG_BOOL rgxfw_hwperf_pow_st_direct(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId)
+{
+	PVR_UNREFERENCED_PARAMETER(eBlkType);
+	PVR_UNREFERENCED_PARAMETER(ui8UnitId);
+
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+	/* S7XT: JONES */
+	return (eBlkType == RGX_CNTBLK_ID_JONES) ? IMG_TRUE : IMG_FALSE;
+#elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+	/* S6XT: TA, TORNADO */
+	return IMG_TRUE;
+#else
+	/* S6  : TA, HUB, RASTER (RASCAL) */
+	return (gsPowCtl.ePowState & RGXFW_POW_ST_RD_ON) ? IMG_TRUE : IMG_FALSE;
+#endif
+}
+
+/* Only use conditional compilation when counter blocks appear in different
+ * islands for different Rogue families.
+ */
+static IMG_BOOL rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId)
+{
+	IMG_UINT32 ui32NumDustsEnabled = rgxfw_pow_get_enabled_dusts_num();
+
+	if ((gsPowCtl.ePowState & RGXFW_POW_ST_RD_ON) &&
+			(ui32NumDustsEnabled > 0))
+	{
+#if defined(RGX_FEATURE_DYNAMIC_DUST_POWER)
+		IMG_UINT32 ui32NumUscEnabled = ui32NumDustsEnabled*2;
+
+		switch (eBlkType)
+		{
+		case RGX_CNTBLK_ID_TPU_MCU0:                   /* S6 and S6XT */
+#if defined (RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+		case RGX_CNTBLK_ID_TEXAS0:                     /* S7 */
+#endif
+			if (ui8UnitId >= ui32NumDustsEnabled)
+			{
+				return IMG_FALSE;
+			}
+			break;
+		case RGX_CNTBLK_ID_USC0:                       /* S6, S6XT, S7 */
+		case RGX_CNTBLK_ID_PBE0:                       /* S7 */
+			/* Handle single cluster cores */
+			if (ui8UnitId >= ((ui32NumUscEnabled > RGX_FEATURE_NUM_CLUSTERS) ? RGX_FEATURE_NUM_CLUSTERS : ui32NumUscEnabled))
+			{
+				return IMG_FALSE;
+			}
+			break;
+		case RGX_CNTBLK_ID_BLACKPEARL0:                /* S7 */
+		case RGX_CNTBLK_ID_RASTER0:                    /* S6XT */
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+		case RGX_CNTBLK_ID_TEXAS0:                     /* S6XT */
+#endif
+			if (ui8UnitId >= (RGX_REQ_NUM_PHANTOMS(ui32NumUscEnabled)))
+			{
+				return IMG_FALSE;
+			}
+			break;
+		default:
+			RGXFW_ASSERT(IMG_FALSE);  /* should never get here, table error */
+			break;
+		}
+#else
+		/* Always true, no fused DUSTs, all powered so do not check unit */
+		PVR_UNREFERENCED_PARAMETER(eBlkType);
+		PVR_UNREFERENCED_PARAMETER(ui8UnitId);
+#endif
+	}
+	else
+	{
+		return IMG_FALSE;
+	}
+	return IMG_TRUE;
+}
+
+#else /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */
+
+# define rgxfw_hwperf_pow_st_direct   ((void*)NULL)
+# define rgxfw_hwperf_pow_st_indirect ((void*)NULL)
+# define rgxfw_hwperf_pow_st_gandalf  ((void*)NULL)
+
+#endif /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */
+
+#if defined(RGX_FIRMWARE) && defined(RGX_FEATURE_RAY_TRACING)
+
+/* Currently there is no power island control in the firmware for ray tracing
+ * so we currently assume these blocks are always powered. */
+static IMG_BOOL rgxfw_hwperf_pow_st_gandalf(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId)
+{
+	PVR_UNREFERENCED_PARAMETER(eBlkType);
+	PVR_UNREFERENCED_PARAMETER(ui8UnitId);
+
+	return IMG_TRUE;
+}
+
+#else /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_RAY_TRACING) */
+
+# define rgxfw_hwperf_pow_st_gandalf  ((void*)NULL)
+
+#endif /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_RAY_TRACING) */
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() end
+ *****************************************************************************/
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() start
+
+ Referenced in gasCntBlkTypeModel[] table below and called from all build
+ contexts:
+ RGX_FIRMWARE, PVRSRVCTL (UM) and PVRSRVKM (Server).
+
+ Therefore each function has two implementations, one for compile time and one
+ run time configuration depending on the context. The functions will inform the
+ caller whether this block is valid for this particular RGX device. Other
+ run-time dependent data is returned in psRtInfo for the caller to use.
+ *****************************************************************************/
+
+/* Used for block types: USC */
+static IMG_BOOL rgx_hwperf_blk_present_perfbus(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+	DBG_ASSERT(psBlkTypeDesc != NULL);
+	DBG_ASSERT(psRtInfo != NULL);
+	DBG_ASSERT(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_USC0);
+
+#if defined(__KERNEL__) /* Server context */
+	PVR_ASSERT(pvDev_km != NULL);
+	{
+		PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+		if ((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_PERFBUS_BIT_MASK) != 0)
+		{
+			psRtInfo->uiBitSelectPreserveMask = 0x0000;
+			psRtInfo->uiNumUnits = psDevInfo->sDevFeatureCfg.ui32NumClusters;
+			return IMG_TRUE;
+		}
+	}
+    PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+#else /* FW context */
+	PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if defined(RGX_FEATURE_PERFBUS)
+	psRtInfo->uiBitSelectPreserveMask = 0x0000;
+	psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+	return IMG_TRUE;
+# else
+    PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+    PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+	return IMG_FALSE;
+}
+
+/* Used for block types: Direct RASTERISATION, HUB */
+static IMG_BOOL rgx_hwperf_blk_present_not_clustergrouping(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+	DBG_ASSERT(psBlkTypeDesc != NULL);
+	DBG_ASSERT(psRtInfo != NULL);
+	DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_RASTER) ||
+		(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_HUB));
+
+#if defined(__KERNEL__) /* Server context */
+	PVR_ASSERT(pvDev_km != NULL);
+	{
+		PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+		if (((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK) == 0) &&
+				((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_PERFBUS_BIT_MASK) != 0))
+		{
+			psRtInfo->uiNumUnits = 1;
+			if (((psDevInfo->sDevFeatureCfg.ui64ErnsBrns & HW_ERN_44885_BIT_MASK) != 0) &&
+				(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_RASTER))
+			{
+				psRtInfo->uiBitSelectPreserveMask = 0X7c00;
+			}
+			else
+			{
+				psRtInfo->uiBitSelectPreserveMask = 0x0000;
+			}
+			return IMG_TRUE;
+		}
+	}
+#else /* FW context */
+	PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS)
+	psRtInfo->uiNumUnits = 1;
+#  if defined(HW_ERN_44885)
+	if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_RASTER)
+	{
+		psRtInfo->uiBitSelectPreserveMask = 0x7C00;
+	}
+	else
+#  endif
+	{
+		psRtInfo->uiBitSelectPreserveMask = 0x0000;
+	}
+	PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+	return IMG_TRUE;
+# else
+	PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+	PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+	return IMG_FALSE;
+}
+
+/* Used for block types: BF, BT, RT, SH, BX_TU */
+static IMG_BOOL rgx_hwperf_blk_present_raytracing(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+	DBG_ASSERT(psBlkTypeDesc != NULL);
+	DBG_ASSERT(psRtInfo != NULL);
+	DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BF) ||
+		(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BT) ||
+		(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_RT) ||
+		(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_SH) ||
+		(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BX_TU0));
+
+#if defined(__KERNEL__) /* Server context */
+	PVR_ASSERT(pvDev_km != NULL);
+	{
+		PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+		if ((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_BIT_MASK) != 0)
+		{
+			psRtInfo->uiBitSelectPreserveMask = 0x0000;
+			/* Exception case, read from table as ray-tracing units do not vary by feature. */
+			psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+			return IMG_TRUE;
+		}
+	}
+#else /* FW context */
+	PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if defined(RGX_FEATURE_RAY_TRACING)
+	psRtInfo->uiBitSelectPreserveMask = 0x0000;
+	psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+	DBG_ASSERT(psBlkTypeDesc->uiPerfReg != 0); /* Check for broken config */
+	return IMG_TRUE;
+# else
+	PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+	PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+	return IMG_FALSE;
+}
+
+#if defined(__KERNEL__) /* Server context */
+static INLINE IMG_UINT32 rgx_units_indirect_by_phantom(PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg)
+{
+	/* Run-time math for RGX_HWPERF_INDIRECT_BY_PHANTOM */
+	return ((psFeatCfg->ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK) == 0) ? 1
+		: (psFeatCfg->ui32NumClusters+3)/4;
+}
+
+static INLINE IMG_UINT32 rgx_units_phantom_indirect_by_dust(PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg)
+{
+	/* Run-time math for RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST */
+	return MAX((psFeatCfg->ui32NumClusters>>1),1);
+}
+
+static INLINE IMG_UINT32 rgx_units_phantom_indirect_by_cluster(PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg)
+{
+	/* Run-time math for RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER */
+	return psFeatCfg->ui32NumClusters;
+}
+#endif /* defined(__KERNEL__) */
+
+/* Used for block types: TORNADO, TEXAS, Indirect RASTERISATION */
+static IMG_BOOL rgx_hwperf_blk_present_xttop(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+	DBG_ASSERT(psBlkTypeDesc != NULL);
+	DBG_ASSERT(psRtInfo != NULL);
+	DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TORNADO) ||
+		(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) ||
+		(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_RASTER0));
+
+#if defined(__KERNEL__) /* Server context */
+	PVR_ASSERT(pvDev_km != NULL);
+	{
+		PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+		if ((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK) != 0)
+		{
+			psRtInfo->uiBitSelectPreserveMask = 0x0000;
+			psRtInfo->uiNumUnits =
+				(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TORNADO) ? 1
+					: rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg); // Texas, Ind. Raster
+			return IMG_TRUE;
+		}
+	}
+#else /* FW context */
+	PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS)
+	psRtInfo->uiBitSelectPreserveMask = 0x0000;
+	psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+	return IMG_TRUE;
+# else
+	PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+	PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+	return IMG_FALSE;
+}
+
+/* Used for block types: JONES, TPU_MCU, TEXAS, BLACKPERL, PBE */
+static IMG_BOOL rgx_hwperf_blk_present_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+	DBG_ASSERT(psBlkTypeDesc != NULL);
+	DBG_ASSERT(psRtInfo != NULL);
+	DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_JONES) ||
+		(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) ||
+		(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) ||
+		(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BLACKPEARL0) ||
+		(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_PBE0));
+
+#if defined(__KERNEL__) /* Server context */
+	PVR_ASSERT(pvDev_km != NULL);
+	{
+		PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+		if ((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) != 0)
+		{
+			if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0)
+			{
+				psRtInfo->uiBitSelectPreserveMask =
+						((psDevInfo->sDevFeatureCfg.ui64ErnsBrns & HW_ERN_41805_BIT_MASK) != 0)
+						? 0x8000 : 0x0000;
+				psRtInfo->uiNumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg);
+				return IMG_TRUE;
+			}
+			else if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TEXAS0)
+			{
+				psRtInfo->uiBitSelectPreserveMask = 0x0000;
+				psRtInfo->uiNumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg);
+				return IMG_TRUE;
+			}
+			else if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BLACKPEARL0)
+			{
+				psRtInfo->uiBitSelectPreserveMask = 0x0000;
+				psRtInfo->uiNumUnits = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg);
+				return IMG_TRUE;
+			}
+			else if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_PBE0)
+			{
+				psRtInfo->uiBitSelectPreserveMask = 0x0000;
+				psRtInfo->uiNumUnits = rgx_units_phantom_indirect_by_cluster(&psDevInfo->sDevFeatureCfg);
+				return IMG_TRUE;
+			}
+			else if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_JONES)
+			{
+				psRtInfo->uiBitSelectPreserveMask = 0x0000;
+				psRtInfo->uiNumUnits = 1;
+				return IMG_TRUE;
+			}
+		}
+	}
+#else /* FW context */
+	PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+	psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+#  if defined(HW_ERN_41805)
+	if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0)
+	{
+		psRtInfo->uiBitSelectPreserveMask = 0x8000;
+	}
+	else
+#  endif
+	{
+		psRtInfo->uiBitSelectPreserveMask = 0x0000;
+	}
+	return IMG_TRUE;
+# else
+	PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+	PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+	return IMG_FALSE;
+}
+
+/* Used for block types: TA, TPU_MCU */
+static IMG_BOOL rgx_hwperf_blk_present_not_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+	DBG_ASSERT(psBlkTypeDesc != NULL);
+	DBG_ASSERT(psRtInfo != NULL);
+	DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TA) ||
+		(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0));
+
+#if defined(__KERNEL__) /* Server context */
+	PVR_ASSERT(pvDev_km != NULL);
+	{
+		PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+		if (((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) == 0) &&
+				((psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_PERFBUS_BIT_MASK) != 0))
+		{
+			if (((psDevInfo->sDevFeatureCfg.ui64ErnsBrns & HW_ERN_41805_BIT_MASK) != 0) &&
+				(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0))
+			{
+				psRtInfo->uiBitSelectPreserveMask = 0X8000;
+			}
+			else
+			{
+				psRtInfo->uiBitSelectPreserveMask = 0x0000;
+			}
+			psRtInfo->uiNumUnits = (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TA) ? 1
+				: rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg); // TPU_MCU0
+			return IMG_TRUE;
+		}
+	}
+#else /* FW context */
+	PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS)
+	psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+#  if defined(HW_ERN_41805)
+	if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0)
+	{
+		psRtInfo->uiBitSelectPreserveMask = 0x8000;
+	}
+	else
+#  endif
+	{
+		psRtInfo->uiBitSelectPreserveMask = 0x0000;
+	}
+	return IMG_TRUE;
+# else
+	PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+	PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+	return IMG_FALSE;
+}
+
+#if !defined(__KERNEL__) /* Firmware or User-mode context */
+static IMG_BOOL rgx_hwperf_blk_present_false(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+	PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+	PVR_UNREFERENCED_PARAMETER(pvDev_km);
+	PVR_UNREFERENCED_PARAMETER(psRtInfo);
+
+	/* Some functions not used on some BVNCs, silence compiler warnings */
+	PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_perfbus);
+	PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_not_clustergrouping);
+	PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_raytracing);
+	PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_xttop);
+	PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_s7top);
+	PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_not_s7top);
+
+	return IMG_FALSE;
+}
+
+/* Used to instantiate a null row in the block type model table below where the
+ * block is not supported for a given build BVNC in firmware/user mode context.
+ * This is needed as the blockid to block type lookup uses the table as well
+ * and clients may try to access blocks not in the hardware. */
+#define RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(_blkid) {_blkid, 0, 0, 0, 0, 0, 0, 0, 0, #_blkid, NULL, rgx_hwperf_blk_present_false}
+
+#endif
+
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() end
+ *****************************************************************************/
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] table
+
+ This table holds the entries for the performance counter block type model.
+ Where the block is not present on an RGX device in question the
+ pfnIsBlkPresent() returns false, if valid and present it returns true.
+ Columns in the table with a ** indicate the value is a default and the
+ value returned in RGX_HWPERF_CNTBLK_RT_INFO when calling pfnIsBlkPresent()
+ should be used at runtime by the caller. These columns are only valid for
+ compile time BVNC configured contexts.
+
+ Order of table rows must match order of counter block IDs in the enumeration
+ RGX_HWPERF_CNTBLK_ID.
+*****************************************************************************/
+
+static const RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] =
+{
+/*   uiCntBlkIdBase,         iIndirectReg,                  uiPerfReg,                  uiSelect0BaseReg,                    uiCounter0BaseReg                   uiNumCounters,  uiNumUnits**,                  uiSelectRegModeShift, uiSelectRegOffsetShift,            pfnIsBlkPowered               pfnIsBlkPresent
+ *                                                                                                                                                                                                                                                   pszBlockNameComment,  */
+    /*RGX_CNTBLK_ID_TA*/
+#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+    {RGX_CNTBLK_ID_TA,       0, /* direct */                RGX_CR_TA_PERF,             RGX_CR_TA_PERF_SELECT0,              RGX_CR_TA_PERF_COUNTER_0,             4,              1,                              21,                  3,  "RGX_CR_TA_PERF",              rgxfw_hwperf_pow_st_direct,   rgx_hwperf_blk_present_not_s7top },
+#else
+    RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TA),
+#endif
+
+    /*RGX_CNTBLK_ID_RASTER*/
+#if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+    {RGX_CNTBLK_ID_RASTER,   0, /* direct */                RGX_CR_RASTERISATION_PERF,  RGX_CR_RASTERISATION_PERF_SELECT0,   RGX_CR_RASTERISATION_PERF_COUNTER_0,  4,              1,                              21,                  3,  "RGX_CR_RASTERISATION_PERF",   rgxfw_hwperf_pow_st_direct,   rgx_hwperf_blk_present_not_clustergrouping },
+#else
+    RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RASTER),
+#endif
+
+    /*RGX_CNTBLK_ID_HUB*/
+#if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+    {RGX_CNTBLK_ID_HUB,      0, /* direct */                RGX_CR_HUB_BIFPMCACHE_PERF, RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0,  RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0, 4,              1,                              21,                  3,  "RGX_CR_HUB_BIFPMCACHE_PERF",  rgxfw_hwperf_pow_st_direct,   rgx_hwperf_blk_present_not_clustergrouping },
+#else
+    RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_HUB),
+#endif
+
+    /*RGX_CNTBLK_ID_TORNADO*/
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+    {RGX_CNTBLK_ID_TORNADO,  0, /* direct */                RGX_CR_TORNADO_PERF,        RGX_CR_TORNADO_PERF_SELECT0,         RGX_CR_TORNADO_PERF_COUNTER_0,        4,              1,                              21,                  4,  "RGX_CR_TORNADO_PERF",         rgxfw_hwperf_pow_st_direct,   rgx_hwperf_blk_present_xttop },
+#else
+	RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TORNADO),
+#endif
+
+    /*RGX_CNTBLK_ID_JONES*/
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+    {RGX_CNTBLK_ID_JONES,   0, /* direct */                 RGX_CR_JONES_PERF,          RGX_CR_JONES_PERF_SELECT0,           RGX_CR_JONES_PERF_COUNTER_0,          4,              1,                              21,                  3,  "RGX_CR_JONES_PERF",           rgxfw_hwperf_pow_st_direct,    rgx_hwperf_blk_present_s7top },
+#else
+    RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_JONES),
+#endif
+
+	/*RGX_CNTBLK_ID_BF RGX_CNTBLK_ID_BT RGX_CNTBLK_ID_RT RGX_CNTBLK_ID_SH*/
+#if defined(RGX_FEATURE_RAY_TRACING ) || defined(__KERNEL__)
+    {RGX_CNTBLK_ID_BF,      0, /* direct */                 DPX_CR_BF_PERF,             DPX_CR_BF_PERF_SELECT0,              DPX_CR_BF_PERF_COUNTER_0,             4,              1,                              21,                  3,  "RGX_CR_BF_PERF",              rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing },
+    {RGX_CNTBLK_ID_BT,      0, /* direct */                 DPX_CR_BT_PERF,             DPX_CR_BT_PERF_SELECT0,              DPX_CR_BT_PERF_COUNTER_0,             4,              1,                              21,                  3,  "RGX_CR_BT_PERF",              rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing },
+    {RGX_CNTBLK_ID_RT,      0, /* direct */                 DPX_CR_RT_PERF,             DPX_CR_RT_PERF_SELECT0,              DPX_CR_RT_PERF_COUNTER_0,             4,              1,                              21,                  3,  "RGX_CR_RT_PERF",              rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing },
+    {RGX_CNTBLK_ID_SH,      0, /* direct */                 RGX_CR_SH_PERF,             RGX_CR_SH_PERF_SELECT0,              RGX_CR_SH_PERF_COUNTER_0,             4,              1,                              21,                  3,  "RGX_CR_SH_PERF",              rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing },
+#else
+	RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BF),
+    RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BT),
+    RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RT),
+    RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_SH),
+#endif
+
+    /*RGX_CNTBLK_ID_TPU_MCU0*/
+#if defined(RGX_FEATURE_PERFBUS) && !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+    {RGX_CNTBLK_ID_TPU_MCU0, RGX_CR_TPU_MCU_L0_PERF_INDIRECT, RGX_CR_TPU_MCU_L0_PERF,   RGX_CR_TPU_MCU_L0_PERF_SELECT0,     RGX_CR_TPU_MCU_L0_PERF_COUNTER_0,     4,              RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST,    21,          3,  "RGX_CR_TPU_MCU_L0_PERF",      rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_not_s7top },
+#else
+	RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TPU_MCU0),
+#endif
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+    {RGX_CNTBLK_ID_TPU_MCU0, RGX_CR_TPU_PERF_INDIRECT,      RGX_CR_TPU_MCU_L0_PERF,     RGX_CR_TPU_MCU_L0_PERF_SELECT0,     RGX_CR_TPU_MCU_L0_PERF_COUNTER_0,     4,              RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST,    21,          3,  "RGX_CR_TPU_MCU_L0_PERF",      rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top },
+#else
+    RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TPU_MCU0),
+#endif
+
+    /*RGX_CNTBLK_ID_USC0*/
+#if defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+    {RGX_CNTBLK_ID_USC0,    RGX_CR_USC_PERF_INDIRECT,       RGX_CR_USC_PERF,            RGX_CR_USC_PERF_SELECT0,            RGX_CR_USC_PERF_COUNTER_0,            4,              RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER, 21,          3,  "RGX_CR_USC_PERF",             rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_perfbus },
+#else
+    RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_USC0),
+#endif
+
+    /*RGX_CNTBLK_ID_TEXAS0*/
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+    {RGX_CNTBLK_ID_TEXAS0,  RGX_CR_TEXAS3_PERF_INDIRECT,    RGX_CR_TEXAS_PERF,          RGX_CR_TEXAS_PERF_SELECT0,          RGX_CR_TEXAS_PERF_COUNTER_0,          6,              RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST,    31,          3,  "RGX_CR_TEXAS_PERF",           rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top },
+#else
+    RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TEXAS0),
+#endif
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+    {RGX_CNTBLK_ID_TEXAS0,  RGX_CR_TEXAS_PERF_INDIRECT,     RGX_CR_TEXAS_PERF,          RGX_CR_TEXAS_PERF_SELECT0,          RGX_CR_TEXAS_PERF_COUNTER_0,          6,              RGX_HWPERF_INDIRECT_BY_PHANTOM,         31,          3,  "RGX_CR_TEXAS_PERF",           rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_xttop },
+#else
+    RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TEXAS0),
+#endif
+
+    /*RGX_CNTBLK_ID_RASTER0*/
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+    {RGX_CNTBLK_ID_RASTER0, RGX_CR_RASTERISATION_PERF_INDIRECT, RGX_CR_RASTERISATION_PERF, RGX_CR_RASTERISATION_PERF_SELECT0, RGX_CR_RASTERISATION_PERF_COUNTER_0,  4,            RGX_HWPERF_INDIRECT_BY_PHANTOM,         21,          3,  "RGX_CR_RASTERISATION_PERF",   rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_xttop },
+#else
+    RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RASTER0),
+#endif
+
+    /*RGX_CNTBLK_ID_BLACKPEARL0*/
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+    {RGX_CNTBLK_ID_BLACKPEARL0, RGX_CR_BLACKPEARL_PERF_INDIRECT, RGX_CR_BLACKPEARL_PERF, RGX_CR_BLACKPEARL_PERF_SELECT0,    RGX_CR_BLACKPEARL_PERF_COUNTER_0,     6,              RGX_HWPERF_INDIRECT_BY_PHANTOM,         21,          3,  "RGX_CR_BLACKPEARL_PERF",      rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top },
+#else
+    RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BLACKPEARL0),
+#endif
+
+    /*RGX_CNTBLK_ID_PBE0*/
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+    {RGX_CNTBLK_ID_PBE0,    RGX_CR_PBE_PERF_INDIRECT, RGX_CR_PBE_PERF,                  RGX_CR_PBE_PERF_SELECT0,            RGX_CR_PBE_PERF_COUNTER_0,            4,              RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER, 21,          3,  "RGX_CR_PBE_PERF",             rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top },
+#else
+    RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_PBE0),
+#endif
+
+    /*RGX_CNTBLK_ID_BX_TU0*/
+#if defined (RGX_FEATURE_RAY_TRACING) || defined(__KERNEL__)
+    {RGX_CNTBLK_ID_BX_TU0, RGX_CR_BX_TU_PERF_INDIRECT,       DPX_CR_BX_TU_PERF,           DPX_CR_BX_TU_PERF_SELECT0,        DPX_CR_BX_TU_PERF_COUNTER_0,          4,              RGX_HWPERF_DOPPLER_BX_TU_BLKS,          21,          3,  "RGX_CR_BX_TU_PERF",           rgxfw_hwperf_pow_st_gandalf,  rgx_hwperf_blk_present_raytracing },
+#else
+    RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BX_TU0),
+#endif
+	};
+
+
+IMG_INTERNAL IMG_UINT32
+RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel)
+{
+	*ppsModel = gasCntBlkTypeModel;
+	return IMG_ARR_NUM_ELEMS(gasCntBlkTypeModel);
+}
+
+/******************************************************************************
+ End of file (rgx_hwperf_table.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/devices/rgx/rgx_hwperf_table.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/devices/rgx/rgx_hwperf_table.h
new file mode 100644
index 0000000..167349b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/devices/rgx/rgx_hwperf_table.h
@@ -0,0 +1,112 @@
+/*************************************************************************/ /*!
+@File
+@Title          HWPerf counter table header
+
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Utility functions used internally for HWPerf data retrieval
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_HWPERF_TABLE_H__)
+#define __RGX_HWPERF_TABLE_H__
+
+#include "img_types.h"
+#include "rgx_fwif_hwperf.h"
+
+
+/*****************************************************************************/
+
+/* Forward declaration */
+typedef struct _RGXFW_HWPERF_CNTBLK_TYPE_MODEL_ RGXFW_HWPERF_CNTBLK_TYPE_MODEL;
+
+/* Function pointer type for functions to check dynamic power state of
+ * counter block instance. Used only in firmware. */
+typedef IMG_BOOL (*PFN_RGXFW_HWPERF_CNTBLK_POWERED)(
+		RGX_HWPERF_CNTBLK_ID eBlkType,
+		IMG_UINT8 ui8UnitId);
+
+/* Counter block run-time info */
+typedef struct _RGX_HWPERF_CNTBLK_RT_INFO_
+{
+	IMG_UINT32 uiBitSelectPreserveMask; /* Select register bits to preserve on programming, HW_ERN_41805 */
+	IMG_UINT32 uiNumUnits;              /* Number of instances of this block type in the core */
+} RGX_HWPERF_CNTBLK_RT_INFO;
+
+/* Function pointer type for functions to check block is valid and present
+ * on that RGX Device at runtime. It may have compile logic or run-time
+ * logic depending on where the code executes: server, srvinit or firmware.
+ * Values in the psRtInfo output parameter are only valid if true returned. */
+typedef IMG_BOOL (*PFN_RGXFW_HWPERF_CNTBLK_PRESENT)(
+		const struct _RGXFW_HWPERF_CNTBLK_TYPE_MODEL_* psBlkTypeDesc,
+		void *pvDev_km,
+		RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo);
+
+/* This structure encodes properties of a type of performance counter block.
+ * The structure is sometimes referred to as a block type descriptor. These
+ * properties contained in this structure represent the columns in the
+ * block type model table variable below. There values vary depending on
+ * the build BVNC and core type.
+ * Each direct block has a unique type descriptor and each indirect group has
+ * a type descriptor. */
+struct _RGXFW_HWPERF_CNTBLK_TYPE_MODEL_
+{
+	/* Could use RGXFW_ALIGN_DCACHEL here but then we would waste 40% of the cache line? */
+	IMG_UINT32 uiCntBlkIdBase;         /* The starting block id for this block type */
+	IMG_UINT32 uiIndirectReg;          /* 0 if direct type otherwise the indirect control register to select indirect unit */
+	IMG_UINT32 uiPerfReg;              /* RGX_CR_*_PERF register for this block type */
+	IMG_UINT32 uiSelect0BaseReg;       /* RGX_CR_*_PERF_SELECT0 register for this block type */
+	IMG_UINT32 uiCounter0BaseReg;      /* RGX_CR_*_PERF_COUNTER_0 register for this block type */
+	IMG_UINT8  uiNumCounters;          /* Number of counters in this block type */
+	IMG_UINT8  uiNumUnits;             /* Number of instances of this block type in the core */
+	IMG_UINT8  uiSelectRegModeShift;   /* Mode field shift value of select registers */
+	IMG_UINT8  uiSelectRegOffsetShift; /* Interval between select registers, either 8 bytes or 16, hence << 3 or << 4 */
+	IMG_CHAR   pszBlockNameComment[30];              /* Name of the PERF register. Used while dumping the perf counters to pdumps */
+	PFN_RGXFW_HWPERF_CNTBLK_POWERED pfnIsBlkPowered; /* A function to determine dynamic power state for the block type */
+	PFN_RGXFW_HWPERF_CNTBLK_PRESENT pfnIsBlkPresent; /* A function to determine presence on RGX Device at run-time */
+};
+
+/*****************************************************************************/
+
+IMG_INTERNAL IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel);
+
+
+#endif /*  __RGX_HWPERF_TABLE_H__ */
+
+/******************************************************************************
+ End of file (rgx_hwperf_table.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/device_connection.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/device_connection.h
new file mode 100644
index 0000000..e7d284d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/device_connection.h
@@ -0,0 +1,76 @@
+/*************************************************************************/ /*!
+@File           device_connection.h
+@Title          
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__DEVICE_CONNECTION_H__)
+#define __DEVICE_CONNECTION_H__
+
+#include "img_types.h"
+
+#if defined(__KERNEL__)
+typedef struct _PVRSRV_DEVICE_NODE_ *SHARED_DEV_CONNECTION;
+#else
+typedef IMG_HANDLE SHARED_DEV_CONNECTION;
+#endif
+
+/******************************************************************************
+ * Device capability flags and masks
+ *****************************************************************************/
+
+/* Flag to be passed over the bridge during connection stating whether CPU cache coherent is available*/
+#define PVRSRV_CACHE_COHERENT_SHIFT (0)
+#define	PVRSRV_CACHE_COHERENT_DEVICE_FLAG (1U << PVRSRV_CACHE_COHERENT_SHIFT)
+#define	PVRSRV_CACHE_COHERENT_CPU_FLAG (2U << PVRSRV_CACHE_COHERENT_SHIFT)
+#define	PVRSRV_CACHE_COHERENT_EMULATE_FLAG (4U << PVRSRV_CACHE_COHERENT_SHIFT)
+#define PVRSRV_CACHE_COHERENT_MASK (7U << PVRSRV_CACHE_COHERENT_SHIFT)
+
+/* Flag to be passed over the bridge during connection stating whether CPU non-mappable memory is present */
+#define PVRSRV_NONMAPPABLE_MEMORY_PRESENT_SHIFT (7)
+#define PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG (1U << PVRSRV_NONMAPPABLE_MEMORY_PRESENT_SHIFT)
+
+/* Flag to be passed over the bridge during connection stating SVM allocation availability */
+#define PVRSRV_DEVMEM_SVM_ALLOC_SHIFT (8)
+#define PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED (1U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT)
+#define PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED (2U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT)
+#define PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL (4U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT)
+
+#endif /* !defined(__DEVICE_CONNECTION_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/devicemem.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/devicemem.h
new file mode 100644
index 0000000..1eb4bfb
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/devicemem.h
@@ -0,0 +1,675 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management core internal
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services internal interface to core device memory management
+                functions that are shared between client and server code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SRVCLIENT_DEVICEMEM_H
+#define SRVCLIENT_DEVICEMEM_H
+
+/********************************************************************************
+ *                                                                              *
+ *   +------------+   +------------+    +--------------+      +--------------+  *
+ *   | a   sub-   |   | a   sub-   |    |  an          |      | allocation   |  *
+ *   | allocation |   | allocation |    |  allocation  |      | also mapped  |  *
+ *   |            |   |            |    |  in proc 1   |      | into proc 2  |  *
+ *   +------------+   +------------+    +--------------+      +--------------+  *
+ *             |         |                     |                     |          *
+ *          +--------------+            +--------------+      +--------------+  *
+ *          | page   gran- |            | page   gran- |      | page   gran- |  *
+ *          | ular mapping |            | ular mapping |      | ular mapping |  *
+ *          +--------------+            +--------------+      +--------------+  *
+ *                 |                                 |          |               *
+ *                 |                                 |          |               *
+ *                 |                                 |          |               *
+ *          +--------------+                       +--------------+             *
+ *          |              |                       |              |             *
+ *          | A  "P.M.R."  |                       | A  "P.M.R."  |             *
+ *          |              |                       |              |             *
+ *          +--------------+                       +--------------+             *
+ *                                                                              *
+ ********************************************************************************/
+
+/*
+    All device memory allocations are ultimately a view upon (not
+    necessarily the whole of) a "PMR".
+
+    A PMR is a "Physical Memory Resource", which may be a
+    "pre-faulted" lump of physical memory, or it may be a
+    representation of some physical memory that will be instantiated
+    at some future time.
+
+    PMRs always represent multiple of some power-of-2 "contiguity"
+    promised by the PMR, which will allow them to be mapped in whole
+    pages into the device MMU.  As memory allocations may be smaller
+    than a page, these mappings may be suballocated and thus shared
+    between multiple allocations in one process.  A PMR may also be
+    mapped simultaneously into multiple device memory contexts
+    (cross-process scenario), however, for security reasons, it is not
+    legal to share a PMR "both ways" at once, that is, mapped into
+    multiple processes and divided up amongst several suballocations.
+
+    This PMR terminology is introduced here for background
+    information, but is generally of little concern to the caller of
+    this API.  This API handles suballocations and mappings, and the
+    caller thus deals primarily with MEMORY DESCRIPTORS representing
+    an allocation or suballocation, HEAPS representing ranges of
+    virtual addresses in a CONTEXT.
+*/
+
+/*
+   |<---------------------------context------------------------------>|
+   |<-------heap------->|   |<-------heap------->|<-------heap------->|
+   |<-alloc->|          |   |<-alloc->|<-alloc->||   |<-alloc->|      |
+*/
+
+#include "img_types.h"
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+#include "pdump.h"
+
+#include "device_connection.h"
+
+
+typedef IMG_UINT32 DEVMEM_HEAPCFGID;
+#define DEVMEM_HEAPCFG_FORCLIENTS 0
+#define DEVMEM_HEAPCFG_META 1
+
+
+
+
+
+/*
+  In order to call the server side functions, we need a bridge handle.
+  We abstract that here, as we may wish to change its form.
+ */
+
+typedef IMG_HANDLE DEVMEM_BRIDGE_HANDLE;
+
+/**************************************************************************/ /*!
+@Function       DevmemUnpin
+@Description    This is the counterpart to DevmemPin(). It is meant to be
+                called before repinning an allocation.
+
+                For a detailed description see client API documentation.
+
+@Input          phMemDesc       The MemDesc that is going to be unpinned.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the memory is
+                                registered to be reclaimed. Error otherwise.
+*/ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemUnpin(DEVMEM_MEMDESC *psMemDesc);
+
+/**************************************************************************/ /*!
+@Function       DevmemPin
+@Description    This is the counterpart to DevmemUnpin(). It is meant to be
+                called after unpinning an allocation.
+
+                For a detailed description see client API documentation.
+
+@Input          phMemDesc       The MemDesc that is going to be pinned.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the allocation content
+                                was successfully restored.
+
+                                PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+                                could not be restored and new physical memory
+                                was allocated.
+
+                                A different error otherwise.
+*/ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPin(DEVMEM_MEMDESC *psMemDesc);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetHeapInt(DEVMEM_HEAP *psHeap,
+				 IMG_HANDLE *phDevmemHeap);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetSize(DEVMEM_MEMDESC *psMemDesc,
+			  IMG_DEVMEM_SIZE_T* puiSize);
+
+/*
+ * DevmemCreateContext()
+ *
+ * Create a device memory context
+ *
+ * This must be called before any heap is created in this context
+ *
+ * Caller to provide bridge handle which will be squirreled away
+ * internally and used for all future operations on items from this
+ * memory context.  Caller also to provide devicenode handle, as this
+ * is used for MMU configuration and also to determine the heap
+ * configuration for the auto-instantiated heaps.
+ *
+ * Note that when compiled in services/server, the hBridge is not used
+ * and is thrown away by the "fake" direct bridge.  (This may change.
+ * It is recommended that NULL be passed for the handle for now)
+ *
+ * hDeviceNode and uiHeapBlueprintID shall together dictate which
+ * heap-config to use.
+ *
+ * This will cause the server side counterpart to be created also.
+ *
+ * If you call DevmemCreateContext() (and the call succeeds) you
+ * are promising that you will later call Devmem_ContextDestroy(),
+ * except for abnormal process termination in which case it is
+ * expected it will be destroyed as part of handle clean up.
+ *
+ * Caller to provide storage for the pointer to the NEWDEVMEM_CONTEXT
+ * object thusly created.
+ */
+extern PVRSRV_ERROR
+DevmemCreateContext(SHARED_DEV_CONNECTION hDevConnection,
+                    DEVMEM_HEAPCFGID uiHeapBlueprintID,
+                    DEVMEM_CONTEXT **ppsCtxPtr);
+
+/*
+ * DevmemAcquireDevPrivData()
+ * 
+ * Acquire the device private data for this memory context
+ */
+PVRSRV_ERROR
+DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx,
+                         IMG_HANDLE *hPrivData);
+
+/*
+ * DevmemReleaseDevPrivData()
+ * 
+ * Release the device private data for this memory context
+ */
+PVRSRV_ERROR
+DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx);
+
+/*
+ * DevmemDestroyContext()
+ *
+ * Undoes that done by DevmemCreateContext()
+ */
+extern PVRSRV_ERROR
+DevmemDestroyContext(DEVMEM_CONTEXT *psCtx);
+
+/*
+ * DevmemCreateHeap()
+ *
+ * Create a heap in the given context.
+ *
+ * N.B.  Not intended to be called directly, though it can be.
+ * Normally, heaps are instantiated at context creation time according
+ * to the specified blueprint.  See DevmemCreateContext() for details.
+ *
+ * This will cause MMU code to set up data structures for the heap,
+ * but may not cause page tables to be modified until allocations are
+ * made from the heap.
+ *
+ * The "Quantum" is both the device MMU page size to be configured for
+ * this heap, and the unit multiples of which "quantized" allocations
+ * are made (allocations smaller than this, known as "suballocations"
+ * will be made from a "sub alloc RA" and will "import" chunks
+ * according to this quantum)
+ *
+ * Where imported PMRs (or, for example, PMRs created by device class
+ * buffers) are mapped into this heap, it is important that the
+ * physical contiguity guarantee offered by the PMR is greater than or
+ * equal to the quantum size specified here, otherwise the attempt to
+ * map it will fail.  "Normal" allocations via Devmem_Allocate
+ * shall automatically meet this requirement, as each "import" will
+ * trigger the creation of a PMR with the desired contiguity.  The
+ * supported quantum sizes in that case shall be dictated by the OS
+ * specific implementation of PhysmemNewOSRamBackedPMR() (see)
+ */
+extern PVRSRV_ERROR
+DevmemCreateHeap(DEVMEM_CONTEXT *psCtxPtr,
+                 /* base and length of heap */
+                 IMG_DEV_VIRTADDR sBaseAddress,
+                 IMG_DEVMEM_SIZE_T uiLength,
+                 /* log2 of allocation quantum, i.e. "page" size.
+                    All allocations (that go to server side) are
+                    multiples of this.  We use a client-side RA to
+                    make sub-allocations from this */
+                 IMG_UINT32 ui32Log2Quantum,
+                 /* The minimum import alignment for this heap */
+                 IMG_UINT32 ui32Log2ImportAlignment,
+                 /* (For tiling heaps) the factor to use to convert
+                    alignment to optimum buffer stride */
+                 IMG_UINT32 ui32Log2TilingStrideFactor,
+                 /* Name of heap for debug */
+                 /* N.B.  Okay to exist on caller's stack - this
+                    func takes a copy if it needs it. */
+                 const IMG_CHAR *pszName,
+                 DEVMEM_HEAPCFGID uiHeapBlueprintID,
+                 DEVMEM_HEAP **ppsHeapPtr);
+/*
+ * DevmemDestroyHeap()
+ *
+ * Reverses DevmemCreateHeap()
+ *
+ * N.B. All allocations must have been freed and all mappings must
+ * have been unmapped before invoking this call
+ */
+extern PVRSRV_ERROR
+DevmemDestroyHeap(DEVMEM_HEAP *psHeap);
+
+/*
+ * DevmemExportalignAdjustSizeAndAlign()
+ * Compute the Size and Align passed to avoid suballocations (used when allocation with PVRSRV_MEMALLOCFLAG_EXPORTALIGN)
+ */
+IMG_INTERNAL void
+DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum,
+                                    IMG_DEVMEM_SIZE_T *puiSize,
+                                    IMG_DEVMEM_ALIGN_T *puiAlign);
+
+/*
+ * DevmemSubAllocate()
+ *
+ * Makes an allocation (possibly a "suballocation", as described
+ * below) of device virtual memory from this heap.
+ *
+ * The size and alignment of the allocation will be honoured by the RA
+ * that allocates the "suballocation".  The resulting allocation will
+ * be mapped into GPU virtual memory and the physical memory to back
+ * it will exist, by the time this call successfully completes.
+ * 
+ * The size must be a positive integer multiple of the alignment.
+ * (i.e. the aligment specifies the alignment of both the start and
+ * the end of the resulting allocation.)
+ *
+ * Allocations made via this API are routed though a "suballocation
+ * RA" which is responsible for ensuring that small allocations can be
+ * made without wasting physical memory in the server.  Furthermore,
+ * such suballocations can be made entirely client side without
+ * needing to go to the server unless the allocation spills into a new
+ * page.
+ *
+ * Such suballocations cause many allocations to share the same "PMR".
+ * This happens only when the flags match exactly.
+ *
+ */
+
+PVRSRV_ERROR
+DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier,
+                  DEVMEM_HEAP *psHeap,
+                  IMG_DEVMEM_SIZE_T uiSize,
+                  IMG_DEVMEM_ALIGN_T uiAlign,
+                  DEVMEM_FLAGS_T uiFlags,
+                  const IMG_CHAR *pszText,
+                  DEVMEM_MEMDESC **ppsMemDescPtr);
+
+#define DevmemAllocate(...) \
+    DevmemSubAllocate(DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER, __VA_ARGS__)
+
+PVRSRV_ERROR
+DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection,
+                         IMG_DEVMEM_SIZE_T uiSize,
+                         IMG_DEVMEM_ALIGN_T uiAlign,
+                         IMG_UINT32 uiLog2HeapPageSize,
+                         DEVMEM_FLAGS_T uiFlags,
+                         const IMG_CHAR *pszText,
+                         DEVMEM_MEMDESC **ppsMemDescPtr);
+
+PVRSRV_ERROR
+DeviceMemChangeSparse(DEVMEM_MEMDESC *psMemDesc,
+                      IMG_UINT32 ui32AllocPageCount,
+                      IMG_UINT32 *paui32AllocPageIndices,
+                      IMG_UINT32 ui32FreePageCount,
+                      IMG_UINT32 *pauiFreePageIndices,
+                      SPARSE_MEM_RESIZE_FLAGS uiFlags);
+
+PVRSRV_ERROR
+DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection,
+                     IMG_DEVMEM_SIZE_T uiSize,
+                     IMG_DEVMEM_SIZE_T uiChunkSize,
+                     IMG_UINT32 ui32NumPhysChunks,
+                     IMG_UINT32 ui32NumVirtChunks,
+                     IMG_UINT32 *pui32MappingTable,
+                     IMG_DEVMEM_ALIGN_T uiAlign,
+                     IMG_UINT32 uiLog2HeapPageSize,
+                     DEVMEM_FLAGS_T uiFlags,
+                     const IMG_CHAR *pszText,
+                     DEVMEM_MEMDESC **ppsMemDescPtr);
+
+/*
+ * DevmemFree()
+ *
+ * Reverses that done by DevmemSubAllocate() N.B.  The underlying
+ * mapping and server side allocation _may_ not be torn down, for
+ * example, if the allocation has been exported, or if multiple
+ * allocations were suballocated from the same mapping, but this is
+ * properly refcounted, so the caller does not have to care.
+ */
+
+extern void
+DevmemFree(DEVMEM_MEMDESC *psMemDesc);
+
+/*
+	DevmemMapToDevice:
+
+	Map an allocation to the device it was allocated from.
+	This function _must_ be called before any call to 
+	DevmemAcquireDevVirtAddr is made as it binds the allocation
+	to the heap.
+	DevmemReleaseDevVirtAddr is used to release the reference
+	to the device mapping this function created, but it doesn't
+	mean that the memory will actually be unmapped from the
+	device as other references to the mapping obtained via
+	DevmemAcquireDevVirtAddr could still be active.
+*/
+PVRSRV_ERROR DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc,
+							   DEVMEM_HEAP *psHeap,
+							   IMG_DEV_VIRTADDR *psDevVirtAddr);
+
+/*
+	DevmemMapToDeviceAddress:
+
+	Same as DevmemMapToDevice but the caller chooses the address
+	to map to.
+*/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc,
+                         DEVMEM_HEAP *psHeap,
+                         IMG_DEV_VIRTADDR sDevVirtAddr);
+
+/*
+	DevmemAcquireDevVirtAddr
+
+	Acquire the MemDesc's device virtual address.
+	This function _must_ be called after DevmemMapToDevice
+	and is expected to be used be functions which didn't allocate
+	the MemDesc but need to know it's address
+ */
+PVRSRV_ERROR DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+                                      IMG_DEV_VIRTADDR *psDevVirtAddrRet);
+/*
+ * DevmemReleaseDevVirtAddr()
+ *
+ * give up the licence to use the device virtual address that was
+ * acquired by "Acquire" or "MapToDevice"
+ */
+extern void
+DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc);
+
+/*
+ * DevmemAcquireCpuVirtAddr()
+ *
+ * Acquires a license to use the cpu virtual address of this mapping.
+ * Note that the memory may not have been mapped into cpu virtual
+ * memory prior to this call.  On first "acquire" the memory will be
+ * mapped in (if it wasn't statically mapped in) and on last put it
+ * _may_ become unmapped.  Later calling "Acquire" again, _may_ cause
+ * the memory to be mapped at a different address.
+ */
+PVRSRV_ERROR DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+                                      void **ppvCpuVirtAddr);
+
+/*
+ * DevmemReacquireCpuVirtAddr()
+ *
+ * (Re)acquires license to use the cpu virtual address of this mapping
+ * if (and only if) there is already a pre-existing license to use the
+ * cpu virtual address for the mapping, returns NULL otherwise.
+ */
+void DevmemReacquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+                                void **ppvCpuVirtAddr);
+
+/*
+ * DevmemReleaseDevVirtAddr()
+ *
+ * give up the licence to use the cpu virtual address that was granted
+ * with the "Get" call.
+ */
+extern void
+DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc);
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+/*
+ * DevmemExport()
+ *
+ * Given a memory allocation allocated with DevmemAllocateExportable()
+ * create a "cookie" that can be passed intact by the caller's own choice
+ * of secure IPC to another process and used as the argument to "map"
+ * to map this memory into a heap in the target processes.  N.B.  This can
+ * also be used to map into multiple heaps in one process, though that's not
+ * the intention.
+ *
+ * Note, the caller must later call Unexport before freeing the
+ * memory.
+ */
+PVRSRV_ERROR DevmemExport(DEVMEM_MEMDESC *psMemDesc,
+                          DEVMEM_EXPORTCOOKIE *psExportCookie);
+
+
+void DevmemUnexport(DEVMEM_MEMDESC *psMemDesc,
+					DEVMEM_EXPORTCOOKIE *psExportCookie);
+
+PVRSRV_ERROR
+DevmemImport(SHARED_DEV_CONNECTION hDevConnection,
+			 DEVMEM_EXPORTCOOKIE *psCookie,
+			 DEVMEM_FLAGS_T uiFlags,
+			 DEVMEM_MEMDESC **ppsMemDescPtr);
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+/*
+ * DevmemMakeLocalImportHandle()
+ * 
+ * This is a "special case" function for making a server export cookie
+ * which went through the direct bridge into an export cookie that can
+ * be passed through the client bridge.
+ */
+PVRSRV_ERROR
+DevmemMakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection,
+                            IMG_HANDLE hServerExport,
+                            IMG_HANDLE *hClientExport);
+
+/*
+ * DevmemUnmakeLocalImportHandle()
+ * 
+ * Free any resource associated with the Make operation
+ */
+PVRSRV_ERROR
+DevmemUnmakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection,
+                              IMG_HANDLE hClientExport);
+
+/*
+ *
+ * The following set of functions is specific to the heap "blueprint"
+ * stuff, for automatic creation of heaps when a context is created
+ *
+ */
+
+
+/* Devmem_HeapConfigCount: returns the number of heap configs that
+   this device has.  Note that there is no acquire/release semantics
+   required, as this data is guaranteed to be constant for the
+   lifetime of the device node */
+extern PVRSRV_ERROR
+DevmemHeapConfigCount(SHARED_DEV_CONNECTION hDevConnection,
+                      IMG_UINT32 *puiNumHeapConfigsOut);
+
+/* Devmem_HeapCount: returns the number of heaps that a given heap
+   config on this device has.  Note that there is no acquire/release
+   semantics required, as this data is guaranteed to be constant for
+   the lifetime of the device node */
+extern PVRSRV_ERROR
+DevmemHeapCount(SHARED_DEV_CONNECTION hDevConnection,
+                IMG_UINT32 uiHeapConfigIndex,
+                IMG_UINT32 *puiNumHeapsOut);
+/* Devmem_HeapConfigName: return the name of the given heap config.
+   The caller is to provide the storage for the returned string and
+   indicate the number of bytes (including null terminator) for such
+   string in the BufSz arg.  Note that there is no acquire/release
+   semantics required, as this data is guaranteed to be constant for
+   the lifetime of the device node.
+ */
+extern PVRSRV_ERROR
+DevmemHeapConfigName(SHARED_DEV_CONNECTION hsDevConnection,
+                     IMG_UINT32 uiHeapConfigIndex,
+                     IMG_CHAR *pszConfigNameOut,
+                     IMG_UINT32 uiConfigNameBufSz);
+
+/* Devmem_HeapDetails: fetches all the metadata that is recorded in
+   this heap "blueprint".  Namely: heap name (caller to provide
+   storage, and indicate buffer size (including null terminator) in
+   BufSz arg), device virtual address and length, log2 of data page
+   size (will be one of 12, 14, 16, 18, 20, 21, at time of writing).
+   Note that there is no acquire/release semantics required, as this
+   data is guaranteed to be constant for the lifetime of the device
+   node. */
+extern PVRSRV_ERROR
+DevmemHeapDetails(SHARED_DEV_CONNECTION hDevConnection,
+                  IMG_UINT32 uiHeapConfigIndex,
+                  IMG_UINT32 uiHeapIndex,
+                  IMG_CHAR *pszHeapNameOut,
+                  IMG_UINT32 uiHeapNameBufSz,
+                  IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+                  IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+                  IMG_UINT32 *puiLog2DataPageSize,
+                  IMG_UINT32 *puiLog2ImportAlignmentOut,
+                  IMG_UINT32 *puiLog2TilingStrideFactor);
+
+/*
+ * Devmem_FindHeapByName()
+ *
+ * returns the heap handle for the named _automagic_ heap in this
+ * context.  "automagic" heaps are those that are born with the
+ * context from a blueprint
+ */
+extern PVRSRV_ERROR
+DevmemFindHeapByName(const DEVMEM_CONTEXT *psCtx,
+                     const IMG_CHAR *pszHeapName,
+                     DEVMEM_HEAP **ppsHeapRet);
+
+/*
+ * DevmemGetHeapBaseDevVAddr()
+ *
+ * returns the device virtual address of the base of the heap.
+ */
+
+PVRSRV_ERROR
+DevmemGetHeapBaseDevVAddr(DEVMEM_HEAP *psHeap,
+			  IMG_DEV_VIRTADDR *pDevVAddr);
+
+extern PVRSRV_ERROR
+DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+			   IMG_HANDLE *phImport);
+
+extern PVRSRV_ERROR
+DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc,
+						   IMG_UINT64 *pui64UID);
+
+PVRSRV_ERROR
+DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc,
+				IMG_HANDLE *hReservation);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc,
+		IMG_HANDLE *hPMR,
+		IMG_DEVMEM_OFFSET_T *puiPMROffset);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc,
+				DEVMEM_FLAGS_T *puiFlags);
+
+IMG_INTERNAL IMG_HANDLE
+DevmemGetConnection(DEVMEM_MEMDESC *psMemDesc);
+
+PVRSRV_ERROR
+DevmemLocalImport(IMG_HANDLE hBridge,
+				  IMG_HANDLE hExtHandle,
+				  DEVMEM_FLAGS_T uiFlags,
+				  DEVMEM_MEMDESC **ppsMemDescPtr,
+				  IMG_DEVMEM_SIZE_T *puiSizePtr,
+				  const IMG_CHAR *pszAnnotation);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext,
+                         IMG_DEV_VIRTADDR sDevVAddr);
+
+/* DevmemGetHeapLog2PageSize()
+ *
+ * Get the page size used for a certain heap.
+ */
+IMG_UINT32
+DevmemGetHeapLog2PageSize(DEVMEM_HEAP *psHeap);
+
+/* DevmemGetHeapTilingProperties()
+ *
+ * Get the import alignment and tiling stride factor used for a certain heap.
+ */
+IMG_UINT32
+DevmemGetHeapTilingProperties(DEVMEM_HEAP *psHeap,
+                              IMG_UINT32 *puiLog2ImportAlignment,
+                              IMG_UINT32 *puiLog2TilingStrideFactor);
+
+/**************************************************************************/ /*!
+@Function       RegisterDevMemPFNotify
+@Description    Registers that the application wants to be signaled when a page
+                fault occurs.
+
+@Input          psContext      Memory context the process that would like to
+                               be notified about.
+@Input          ui32PID        The PID  of the calling process.
+@Input          bRegister      If true, register. If false, de-register.
+@Return         PVRSRV_ERROR:  PVRSRV_OK on success. Otherwise, a PVRSRV_
+                               error code
+*/ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext,
+                       IMG_UINT32     ui32PID,
+                       IMG_BOOL       bRegister);
+
+/**************************************************************************/ /*!
+@Function       GetMaxDevMemSize
+@Description    Get the amount of device memory on current platform
+		(memory size in Bytes)
+@Output         puiLMASize            LMA memory size
+@Output         puiUMASize            UMA memory size
+@Return         Error code
+*/ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+GetMaxDevMemSize(SHARED_DEV_CONNECTION psConnection,
+		 IMG_DEVMEM_SIZE_T *puiLMASize,
+		 IMG_DEVMEM_SIZE_T *puiUMASize);
+
+#endif /* #ifndef SRVCLIENT_DEVICEMEM_CLIENT_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/devicemem_history_shared.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/devicemem_history_shared.h
new file mode 100644
index 0000000..03f1765
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/devicemem_history_shared.h
@@ -0,0 +1,57 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory History shared definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Shared (client/server) definitions related to the Devicemem History
+                functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DEVICEMEM_HISTORY_SHARED_H
+#define DEVICEMEM_HISTORY_SHARED_H
+
+/* structure used inside MEMDESC to hold the allocation name until
+ * the allocation is unmapped
+ */
+typedef struct _DEVICEMEM_HISTORY_MEMDESC_DATA_
+{
+	IMG_CHAR szText[DEVICEMEM_HISTORY_TEXT_BUFSZ];
+	IMG_UINT32 ui32AllocationIndex;
+} DEVICEMEM_HISTORY_MEMDESC_DATA;
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/devicemem_pdump.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/devicemem_pdump.h
new file mode 100644
index 0000000..cbf948d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/devicemem_pdump.h
@@ -0,0 +1,346 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management PDump internal
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services internal interface to PDump device memory management
+                functions that are shared between client and server code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEVICEMEM_PDUMP_H_
+#define _DEVICEMEM_PDUMP_H_
+
+#include "devicemem.h"
+#include "pdumpdefs.h"
+#include "pdump.h"
+
+#if defined(PDUMP)
+/*
+ * DevmemPDumpMem()
+ *
+ * takes a memory descriptor, offset, and size, and takes the current
+ * contents of the memory at that location and writes it to the prm
+ * pdump file, and emits a pdump LDB to load the data from that file.
+ * The intention here is that the contents of the simulated buffer
+ * upon pdump playback will be made to be the same as they are when
+ * this command is run, enabling pdump of cases where the memory has
+ * been modified externally, i.e. by the host cpu or by a third
+ * party.
+ */
+extern void
+DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc,
+                   IMG_DEVMEM_OFFSET_T uiOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpZeroMem()
+ *
+ * as DevmemPDumpMem() but the PDump allocation will be populated with zeros from
+ * the zero page in the parameter stream
+ */
+extern void
+DevmemPDumpLoadZeroMem(DEVMEM_MEMDESC *psMemDesc,
+                   IMG_DEVMEM_OFFSET_T uiOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpMemValue()
+ * 
+ * As above but dumps the value at a dword-aligned address in plain
+ * text to the pdump script2 file. Useful for patching a buffer at
+ * pdump playback by simply editing the script output file.
+ * 
+ * (The same functionality can be achieved by the above function but
+ *  the binary PARAM file must be patched in that case.)
+ */
+IMG_INTERNAL void
+DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc,
+                        IMG_DEVMEM_OFFSET_T uiOffset,
+                        IMG_UINT32 ui32Value,
+                        PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpMemValue64()
+ *
+ * As above but dumps the 64bit-value at a dword-aligned address in plain
+ * text to the pdump script2 file. Useful for patching a buffer at
+ * pdump playback by simply editing the script output file.
+ *
+ * (The same functionality can be achieved by the above function but
+ *  the binary PARAM file must be patched in that case.)
+ */
+IMG_INTERNAL void
+DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc,
+                        IMG_DEVMEM_OFFSET_T uiOffset,
+                        IMG_UINT64 ui64Value,
+                        PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpPageCatBaseToSAddr()
+ *
+ * Returns the symbolic address of a piece of memory represented
+ * by an offset into the mem descriptor.
+ */
+extern PVRSRV_ERROR
+DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC		*psMemDesc,
+							  IMG_DEVMEM_OFFSET_T	*puiMemOffset,
+							  IMG_CHAR				*pszName,
+							  IMG_UINT32			ui32Size);
+
+/*
+ * DevmemPDumpSaveToFile()
+ *
+ * emits a pdump SAB to cause the current contents of the memory to be
+ * written to the given file during playback
+ */
+extern void
+DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc,
+                      IMG_DEVMEM_OFFSET_T uiOffset,
+                      IMG_DEVMEM_SIZE_T uiSize,
+                      const IMG_CHAR *pszFilename,
+                      IMG_UINT32 uiFileOffset);
+
+/*
+ * DevmemPDumpSaveToFileVirtual()
+ *
+ * emits a pdump SAB, just like DevmemPDumpSaveToFile(), but uses the
+ * virtual address and device MMU context to cause the pdump player to
+ * traverse the MMU page tables itself.
+ */
+extern void
+DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc,
+                             IMG_DEVMEM_OFFSET_T uiOffset,
+                             IMG_DEVMEM_SIZE_T uiSize,
+                             const IMG_CHAR *pszFilename,
+							 IMG_UINT32 ui32FileOffset,
+							 IMG_UINT32 ui32PdumpFlags);
+
+
+/*
+ *
+ * Devmem_PDumpDevmemPol32()
+ *
+ * writes a PDump 'POL' command to wait for a masked 32-bit memory
+ * location to become the specified value
+ */
+extern PVRSRV_ERROR
+DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc,
+                           IMG_DEVMEM_OFFSET_T uiOffset,
+                           IMG_UINT32 ui32Value,
+                           IMG_UINT32 ui32Mask,
+                           PDUMP_POLL_OPERATOR eOperator,
+                           PDUMP_FLAGS_T ui32PDumpFlags);
+
+/*
+ * DevmemPDumpCBP()
+ *
+ * Polls for space in circular buffer. Reads the read offset
+ * from memory and waits until there is enough space to write
+ * the packet.
+ *
+ * hMemDesc      - MemDesc which contains the read offset
+ * uiReadOffset  - Offset into MemDesc to the read offset
+ * uiWriteOffset - Current write offset
+ * uiPacketSize  - Size of packet to write
+ * uiBufferSize  - Size of circular buffer
+ */
+extern PVRSRV_ERROR
+DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc,
+				IMG_DEVMEM_OFFSET_T uiReadOffset,
+				IMG_DEVMEM_OFFSET_T uiWriteOffset,
+				IMG_DEVMEM_SIZE_T uiPacketSize,
+				IMG_DEVMEM_SIZE_T uiBufferSize);
+
+#else	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMem)
+#endif
+static INLINE void
+DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc,
+                   IMG_DEVMEM_OFFSET_T uiOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMemValue32)
+#endif
+static INLINE void
+DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc,
+                        IMG_DEVMEM_OFFSET_T uiOffset,
+                        IMG_UINT32 ui32Value,
+                        PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMemValue64)
+#endif
+static INLINE void
+DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc,
+                        IMG_DEVMEM_OFFSET_T uiOffset,
+                        IMG_UINT64 ui64Value,
+                        PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(ui64Value);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMemValue)
+#endif
+static INLINE void
+DevmemPDumpLoadMemValue(DEVMEM_MEMDESC *psMemDesc,
+                        IMG_DEVMEM_OFFSET_T uiOffset,
+                        IMG_UINT32 ui32Value,
+                        PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpPageCatBaseToSAddr)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC		*psMemDesc,
+							  IMG_DEVMEM_OFFSET_T	*puiMemOffset,
+							  IMG_CHAR				*pszName,
+							  IMG_UINT32			ui32Size)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(puiMemOffset);
+	PVR_UNREFERENCED_PARAMETER(pszName);
+	PVR_UNREFERENCED_PARAMETER(ui32Size);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpSaveToFile)
+#endif
+static INLINE void
+DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc,
+                      IMG_DEVMEM_OFFSET_T uiOffset,
+                      IMG_DEVMEM_SIZE_T uiSize,
+                      const IMG_CHAR *pszFilename,
+                      IMG_UINT32 uiFileOffset)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(pszFilename);
+	PVR_UNREFERENCED_PARAMETER(uiFileOffset);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpSaveToFileVirtual)
+#endif
+static INLINE void
+DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc,
+                             IMG_DEVMEM_OFFSET_T uiOffset,
+                             IMG_DEVMEM_SIZE_T uiSize,
+                             const IMG_CHAR *pszFilename,
+							 IMG_UINT32 ui32FileOffset,
+							 IMG_UINT32 ui32PdumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(pszFilename);
+	PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32PdumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpDevmemPol32)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc,
+                           IMG_DEVMEM_OFFSET_T uiOffset,
+                           IMG_UINT32 ui32Value,
+                           IMG_UINT32 ui32Mask,
+                           PDUMP_POLL_OPERATOR eOperator,
+                           PDUMP_FLAGS_T ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpCBP)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc,
+				IMG_DEVMEM_OFFSET_T uiReadOffset,
+				IMG_DEVMEM_OFFSET_T uiWriteOffset,
+				IMG_DEVMEM_SIZE_T uiPacketSize,
+				IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiReadOffset);
+	PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+	PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+	PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+
+	return PVRSRV_OK;
+}
+#endif	/* PDUMP */
+#endif	/* _DEVICEMEM_PDUMP_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/devicemem_utils.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/devicemem_utils.h
new file mode 100644
index 0000000..2e7ff84
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/devicemem_utils.h
@@ -0,0 +1,456 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management internal utility functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Utility functions used internally by device memory management
+                code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEVICEMEM_UTILS_H_
+#define _DEVICEMEM_UTILS_H_
+
+#include "devicemem.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvr_debug.h"
+#include "allocmem.h"
+#include "ra.h"
+#include "osfunc.h"
+#include "lock.h"
+#include "osmmap.h"
+#include "devicemem_utils.h"
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+#include "mm_common.h"
+#include "devicemem_history_shared.h"
+#endif
+
+#define DEVMEM_HEAPNAME_MAXLENGTH 160
+
+#if defined(DEVMEM_DEBUG) && defined(REFCOUNT_DEBUG)
+#define DEVMEM_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_ERROR, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define DEVMEM_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+/* If we need a "hMapping" but we don't have a server-side mapping, we
+   poison the entry with this value so that it's easily recognised in
+   the debugger.  Note that this is potentially a valid handle, but
+   then so is NULL, which is no better, indeed worse, as it's not
+   obvious in the debugger.  The value doesn't matter.  We _never_ use
+   it (and because it's valid, we never assert it isn't this) but it's
+   nice to have a value in the source code that we can grep for when
+   things go wrong. */
+#define LACK_OF_MAPPING_POISON ((IMG_HANDLE)0x6116dead)
+#define LACK_OF_RESERVATION_POISON ((IMG_HANDLE)0x7117dead)
+
+struct _DEVMEM_CONTEXT_ {
+
+	SHARED_DEV_CONNECTION hDevConnection;
+	
+    /* Number of heaps that have been created in this context
+       (regardless of whether they have allocations) */
+    IMG_UINT32 uiNumHeaps;
+
+    /*
+      Each "DEVMEM_CONTEXT" has a counterpart in the server,
+      which is responsible for handling the mapping into device MMU.
+      We have a handle to that here.
+    */
+    IMG_HANDLE hDevMemServerContext;
+
+    /* Number of automagically created heaps in this context,
+       i.e. those that are born at context creation time from the
+       chosen "heap config" or "blueprint" */
+    IMG_UINT32 uiAutoHeapCount;
+
+    /* pointer to array of such heaps */
+    struct _DEVMEM_HEAP_ **ppsAutoHeapArray;
+
+    /* The cache line size for use when allocating memory, as it is not queryable on the client side */
+    IMG_UINT32 ui32CPUCacheLineSize;
+
+	/* Private data handle for device specific data */
+	IMG_HANDLE hPrivData;
+
+	/* Memory allocated to be used for MCU fences */
+	DEVMEM_MEMDESC		*psMCUFenceMemDesc;
+};
+
+
+typedef enum
+{
+	DEVMEM_HEAP_TYPE_UNKNOWN = 0,
+	DEVMEM_HEAP_TYPE_USER_MANAGED,
+	DEVMEM_HEAP_TYPE_KERNEL_MANAGED,
+	DEVMEM_HEAP_TYPE_RA_MANAGED,
+}DEVMEM_HEAP_TYPE;
+
+struct _DEVMEM_HEAP_ {
+	/* Name of heap - for debug and lookup purposes. */
+	IMG_CHAR *pszName;
+
+	/* Number of live imports in the heap */
+	ATOMIC_T hImportCount;
+
+	/*
+	* Base address and size of heap, required by clients due to some requesters
+	* not being full range
+	*/
+	IMG_DEV_VIRTADDR sBaseAddress;
+	DEVMEM_SIZE_T uiSize;
+
+	/* The heap type, describing if the space is managed by the user or an RA*/
+	DEVMEM_HEAP_TYPE eHeapType;
+
+	/* This RA is for managing sub-allocations in virtual space.  Two
+	more RA's will be used under the Hood for managing the coarser
+	allocation of virtual space from the heap, and also for
+	managing the physical backing storage. */
+	RA_ARENA *psSubAllocRA;
+	IMG_CHAR *pszSubAllocRAName;
+	/*
+	This RA is for the coarse allocation of virtual space from the heap
+	*/
+	RA_ARENA *psQuantizedVMRA;
+	IMG_CHAR *pszQuantizedVMRAName;
+
+	/* We also need to store a copy of the quantum size in order to
+	feed this down to the server */
+	IMG_UINT32 uiLog2Quantum;
+
+	/* Store a copy of the minimum import alignment */
+	IMG_UINT32 uiLog2ImportAlignment;
+
+	/* The relationship between tiled heap alignment and heap byte-stride
+	 * (dependent on tiling mode, abstracted here) */
+	IMG_UINT32 uiLog2TilingStrideFactor;
+
+	/* The parent memory context for this heap */
+	struct _DEVMEM_CONTEXT_ *psCtx;
+
+	/* Lock to protect this structure */
+	POS_LOCK hLock;
+
+	/*
+	Each "DEVMEM_HEAP" has a counterpart in the server,
+	which is responsible for handling the mapping into device MMU.
+	We have a handle to that here.
+	*/
+	IMG_HANDLE hDevMemServerHeap;
+};
+
+typedef IMG_UINT32 DEVMEM_PROPERTIES_T;                 /*!< Typedef for Devicemem properties */
+#define DEVMEM_PROPERTIES_EXPORTABLE        (1UL<<0)    /*!< Is it exportable? */
+#define DEVMEM_PROPERTIES_IMPORTED          (1UL<<1)    /*!< Is it imported from another process? */
+#define DEVMEM_PROPERTIES_SUBALLOCATABLE    (1UL<<2)    /*!< Is it suballocatable? */
+#define DEVMEM_PROPERTIES_UNPINNED          (1UL<<3)    /*!< Is it currently pinned? */
+#define DEVMEM_PROPERTIES_IMPORT_IS_ZEROED  (1UL<<4)	/*!< Is the memory fully zeroed? */
+#define DEVMEM_PROPERTIES_IMPORT_IS_CLEAN   (1UL<<5)	/*!< Is the memory clean, i.e. not been used before? */
+#define DEVMEM_PROPERTIES_SECURE            (1UL<<6)    /*!< Is it a special secure buffer? No CPU maps allowed! */
+
+
+typedef struct _DEVMEM_DEVICE_IMPORT_ {
+	DEVMEM_HEAP *psHeap;			/*!< Heap this import is bound to */
+	IMG_DEV_VIRTADDR sDevVAddr;		/*!< Device virtual address of the import */
+	IMG_UINT32 ui32RefCount;		/*!< Refcount of the device virtual address */
+	IMG_HANDLE hReservation;		/*!< Device memory reservation handle */
+	IMG_HANDLE hMapping;			/*!< Device mapping handle */
+	IMG_BOOL bMapped;				/*!< This is import mapped? */
+	POS_LOCK hLock;					/*!< Lock to protect the device import */
+} DEVMEM_DEVICE_IMPORT;
+
+typedef struct _DEVMEM_CPU_IMPORT_ {
+	void *pvCPUVAddr;			/*!< CPU virtual address of the import */
+	IMG_UINT32 ui32RefCount;		/*!< Refcount of the CPU virtual address */
+	IMG_HANDLE hOSMMapData;			/*!< CPU mapping handle */
+	POS_LOCK hLock;					/*!< Lock to protect the CPU import */
+} DEVMEM_CPU_IMPORT;
+
+typedef struct _DEVMEM_IMPORT_ {
+	SHARED_DEV_CONNECTION hDevConnection;
+	IMG_DEVMEM_ALIGN_T uiAlign;			/*!< Alignment of the PMR */
+	DEVMEM_SIZE_T uiSize;				/*!< Size of import */
+    ATOMIC_T hRefCount;					/*!< Refcount for this import */
+    DEVMEM_PROPERTIES_T uiProperties;	/*!< Stores properties of an import like if
+    										it is exportable, pinned or suballocatable */
+    IMG_HANDLE hPMR;					/*!< Handle to the PMR */
+    DEVMEM_FLAGS_T uiFlags;				/*!< Flags for this import */
+    POS_LOCK hLock;						/*!< Lock to protect the import */
+
+	DEVMEM_DEVICE_IMPORT sDeviceImport;	/*!< Device specifics of the import */
+	DEVMEM_CPU_IMPORT sCPUImport;		/*!< CPU specifics of the import */
+#if defined(PDUMP)
+	IMG_CHAR *pszAnnotation;
+#endif
+} DEVMEM_IMPORT;
+
+typedef struct _DEVMEM_DEVICE_MEMDESC_ {
+	IMG_DEV_VIRTADDR sDevVAddr;		/*!< Device virtual address of the allocation */
+	IMG_UINT32 ui32RefCount;		/*!< Refcount of the device virtual address */
+	POS_LOCK hLock;					/*!< Lock to protect device memdesc */
+} DEVMEM_DEVICE_MEMDESC;
+
+typedef struct _DEVMEM_CPU_MEMDESC_ {
+	void *pvCPUVAddr;			/*!< CPU virtual address of the import */
+	IMG_UINT32 ui32RefCount;		/*!< Refcount of the device CPU address */
+	POS_LOCK hLock;					/*!< Lock to protect CPU memdesc */
+} DEVMEM_CPU_MEMDESC;
+
+struct _DEVMEM_MEMDESC_ {
+    DEVMEM_IMPORT *psImport;				/*!< Import this memdesc is on */
+    IMG_DEVMEM_OFFSET_T uiOffset;			/*!< Offset into import where our allocation starts */
+	IMG_DEVMEM_SIZE_T uiAllocSize;          /*!< Size of the allocation */
+    ATOMIC_T hRefCount;						/*!< Refcount of the memdesc */
+    POS_LOCK hLock;							/*!< Lock to protect memdesc */
+    IMG_HANDLE hPrivData;
+
+	DEVMEM_DEVICE_MEMDESC sDeviceMemDesc;	/*!< Device specifics of the memdesc */
+	DEVMEM_CPU_MEMDESC sCPUMemDesc;		/*!< CPU specifics of the memdesc */
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	DEVICEMEM_HISTORY_MEMDESC_DATA sTraceData;
+#endif
+
+#if defined(PVR_RI_DEBUG)
+    IMG_HANDLE hRIHandle;					/*!< Handle to RI information */
+#endif
+};
+
+/* The physical descriptor used to store handles and information of
+ * device physical allocations. */
+struct _DEVMEMX_PHYS_MEMDESC_ {
+	IMG_UINT32 uiNumPages;					/*!< Number of pages that the import has*/
+	IMG_UINT32 uiLog2PageSize;				/*!< Page size */
+	ATOMIC_T hRefCount;						/*!< Refcount of the memdesc */
+	DEVMEM_FLAGS_T uiFlags;					/*!< Flags for this import */
+	IMG_HANDLE hPMR;						/*!< Handle to the PMR */
+	DEVMEM_CPU_IMPORT sCPUImport;			/*!< CPU specifics of the memdesc */
+	DEVMEM_BRIDGE_HANDLE hBridge;			/*!< Bridge connection for the server */
+};
+
+/* The virtual descriptor used to store handles and information of a
+ * device virtual range and the mappings to it. */
+struct _DEVMEMX_VIRT_MEMDESC_ {
+	IMG_UINT32 uiNumPages;					/*!< Number of pages that the import has*/
+	DEVMEM_FLAGS_T uiFlags;					/*!< Flags for this import */
+	DEVMEMX_PHYSDESC **apsPhysDescTable;		/*!< Table to store links to physical descs */
+	DEVMEM_DEVICE_IMPORT sDeviceImport;		/*!< Device specifics of the memdesc */
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	DEVICEMEM_HISTORY_MEMDESC_DATA sTraceData;	/*!< To track mappings in this range */
+#endif
+
+#if defined(PVR_RI_DEBUG)
+	IMG_HANDLE hRIHandle;					/*!< Handle to RI information */
+#endif
+};
+
+#define DEVICEMEM_UTILS_NO_ADDRESS 0
+
+/******************************************************************************
+@Function       _DevmemValidateParams
+@Description    Check if flags are conflicting and if align is a size multiple.
+
+@Input          uiSize      Size of the import.
+@Input          uiAlign     Alignment of the import.
+@Input          puiFlags    Pointer to the flags for the import.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize,
+                                   IMG_DEVMEM_ALIGN_T uiAlign,
+                                   DEVMEM_FLAGS_T *puiFlags);
+
+/******************************************************************************
+@Function       _DevmemImportStructAlloc
+@Description    Allocates memory for an import struct. Does not allocate a PMR!
+                Create locks for CPU and Devmem mappings.
+
+@Input          hBridge       Bridge to use for calls from the import.
+@Input          ppsImport     The import to allocate.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemImportStructAlloc(SHARED_DEV_CONNECTION hDevConnection,
+									  DEVMEM_IMPORT **ppsImport);
+
+/******************************************************************************
+@Function       _DevmemImportStructInit
+@Description    Initialises the import struct with the given parameters.
+                Set it's refcount to 1!
+
+@Input          psImport     The import to initialise.
+@Input          uiSize       Size of the import.
+@Input          uiAlign      Alignment of allocations in the import.
+@Input          uiMapFlags
+@Input          hPMR         Reference to the PMR of this import struct.
+@Input          uiProperties Properties of the import. Is it exportable,
+                              imported, suballocatable, unpinned?
+******************************************************************************/
+void _DevmemImportStructInit(DEVMEM_IMPORT *psImport,
+							 IMG_DEVMEM_SIZE_T uiSize,
+							 IMG_DEVMEM_ALIGN_T uiAlign,
+							 PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+							 IMG_HANDLE hPMR,
+							 DEVMEM_PROPERTIES_T uiProperties);
+
+/******************************************************************************
+@Function       _DevmemImportStructDevMap
+@Description    NEVER call after the last _DevmemMemDescRelease()
+                Maps the PMR referenced by the import struct to the device's
+                virtual address space.
+                Does nothing but increase the cpu mapping refcount if the
+                import struct was already mapped.
+
+@Input          psHeap    The heap to map to.
+@Input          bMap      Caller can choose if the import should be really
+                          mapped in the page tables or if just a virtual range
+                          should be reserved and the refcounts increased.
+@Input          psImport  The import we want to map.
+@Input          uiOptionalMapAddress  An optional address to map to.
+                                      Pass DEVICEMEM_UTILS_NOADDRESS if not used.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemImportStructDevMap(DEVMEM_HEAP *psHeap,
+									   IMG_BOOL bMap,
+									   DEVMEM_IMPORT *psImport,
+									   IMG_UINT64 uiOptionalMapAddress);
+
+/******************************************************************************
+@Function       _DevmemImportStructDevUnmap
+@Description    Unmaps the PMR referenced by the import struct from the
+                device's virtual address space.
+                If this was not the last remaining CPU mapping on the import
+                struct only the cpu mapping refcount is decreased.
+******************************************************************************/
+void _DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       _DevmemImportStructCPUMap
+@Description    NEVER call after the last _DevmemMemDescRelease()
+                Maps the PMR referenced by the import struct to the CPU's
+                virtual address space.
+                Does nothing but increase the cpu mapping refcount if the
+                import struct was already mapped.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       _DevmemImportStructCPUUnmap
+@Description    Unmaps the PMR referenced by the import struct from the CPU's
+                virtual address space.
+                If this was not the last remaining CPU mapping on the import
+                struct only the cpu mapping refcount is decreased.
+******************************************************************************/
+void _DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport);
+
+
+/******************************************************************************
+@Function       _DevmemImportStructAcquire
+@Description    Acquire an import struct by increasing it's refcount.
+******************************************************************************/
+void _DevmemImportStructAcquire(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       _DevmemImportStructRelease
+@Description    Reduces the refcount of the import struct.
+                Destroys the import in the case it was the last reference.
+                Destroys underlying PMR if this import was the last reference
+                to it.
+@return         A boolean to signal if the import was destroyed. True = yes.
+******************************************************************************/
+IMG_BOOL _DevmemImportStructRelease(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       _DevmemImportDiscard
+@Description    Discard a created, but unitilised import structure.
+                This must only be called before _DevmemImportStructInit
+                after which _DevmemImportStructRelease must be used to
+                "free" the import structure.
+******************************************************************************/
+void _DevmemImportDiscard(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       _DevmemMemDescAlloc
+@Description    Allocates a MemDesc and create it's various locks.
+                Zero the allocated memory.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc);
+
+/******************************************************************************
+@Function       _DevmemMemDescInit
+@Description    Sets the given offset and import struct fields in the MemDesc.
+                Initialises refcount to 1 and other values to 0.
+
+@Input          psMemDesc    MemDesc to initialise.
+@Input          uiOffset     Offset in the import structure.
+@Input          psImport     Import the MemDesc is on.
+@Input          uiAllocSize  Size of the allocation
+******************************************************************************/
+void _DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc,
+						IMG_DEVMEM_OFFSET_T uiOffset,
+						DEVMEM_IMPORT *psImport,
+						IMG_DEVMEM_SIZE_T uiAllocSize);
+
+/******************************************************************************
+@Function       _DevmemMemDescAcquire
+@Description    Acquires the MemDesc by increasing it's refcount.
+******************************************************************************/
+void _DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc);
+
+/******************************************************************************
+@Function       _DevmemMemDescRelease
+@Description    Releases the MemDesc by reducing it's refcount.
+                Destroy the MemDesc if it's recount is 0.
+                Destroy the import struct the MemDesc is on if that was the
+                last MemDesc on the import, probably following the destruction
+                of the underlying PMR.
+@return         A boolean to signal if the MemDesc was destroyed. True = yes.
+******************************************************************************/
+IMG_BOOL _DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc);
+
+/******************************************************************************
+@Function       _DevmemMemDescDiscard
+@Description    Discard a created, but unitilised MemDesc structure.
+                This must only be called before _DevmemMemDescInit
+                after which _DevmemMemDescRelease must be used to
+                "free" the MemDesc structure.
+******************************************************************************/
+void _DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc);
+
+#endif /* _DEVICEMEM_UTILS_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/devicememx.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/devicememx.h
new file mode 100644
index 0000000..b0646cc
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/devicememx.h
@@ -0,0 +1,176 @@
+/*************************************************************************/ /*!
+@File
+@Title          X Device Memory Management core internal
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services internal interface for extended device memory management.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DEVICEMEMX_H
+#define DEVICEMEMX_H
+
+#include "img_types.h"
+#include "devicemem_typedefs.h"
+#include "devicemem_utils.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "osfunc.h"
+
+/* DevmemXAllocPhysical()
+ *
+ * Allocate physical device memory and return a physical
+ * descriptor for it.
+ */
+PVRSRV_ERROR
+DevmemXAllocPhysical(DEVMEM_CONTEXT *psCtx,
+                    IMG_UINT32 uiNumPages,
+                    IMG_UINT32 uiLog2PageSize,
+                    DEVMEM_FLAGS_T uiFlags,
+                    const IMG_CHAR *pszText,
+                    DEVMEMX_PHYSDESC **ppsPhysDesc);
+
+/* DevmemXReleasePhysical()
+ *
+ * Removes a physical device allocation if all references
+ * to it are dropped, otherwise just decreases the refcount.
+ */
+void
+DevmemXReleasePhysical(DEVMEMX_PHYSDESC *psPhysDesc);
+
+/* DevmemAllocVirtual()
+ *
+ * Allocate and reserve a device virtual range and return
+ * a virtual descriptor for it.
+ */
+PVRSRV_ERROR
+DevmemXAllocVirtual(DEVMEM_HEAP* hHeap,
+                   IMG_UINT32 uiNumPages,
+                   DEVMEM_FLAGS_T uiFlags,
+                   const IMG_CHAR *pszText,
+                   DEVMEMX_VIRTDESC **ppsVirtDesc,
+                   IMG_DEV_VIRTADDR *psVirtAddr);
+
+/* DevmemXFreeVirtual()
+ *
+ * Removes a device virtual range if all mappings on it
+ * have been removed.
+ */
+PVRSRV_ERROR
+DevmemXFreeVirtual(DEVMEMX_VIRTDESC *psVirtDesc);
+
+/* DevmemXMapVirtualRange()
+ *
+ * Map memory from a physical descriptor into a
+ * virtual range.
+ */
+PVRSRV_ERROR
+DevmemXMapVirtualRange(IMG_UINT32 ui32PageCount,
+                      DEVMEMX_PHYSDESC *psPhysDesc,
+                      IMG_UINT32 ui32PhysOffset,
+                      DEVMEMX_VIRTDESC *psVirtDesc,
+                      IMG_UINT32 ui32VirtOffset);
+
+/* DevmemXUnmapVirtualRange()
+ *
+ * Unmap pages from a device virtual range.
+ */
+PVRSRV_ERROR
+DevmemXUnmapVirtualRange(IMG_UINT32 ui32PageCount,
+                        DEVMEMX_VIRTDESC *psVirtDesc,
+                        IMG_UINT32 ui32VirtPgOffset);
+
+/* DevmemXMapPhysicalToCPU()
+ *
+ * Map a full physical descriptor to CPU space.
+ */
+PVRSRV_ERROR
+DevmemXMapPhysicalToCPU(DEVMEMX_PHYSDESC *psMemAllocPhys,
+                       IMG_CPU_VIRTADDR *psVirtAddr);
+
+/* DevmemXUnmapPhysicalToCPU()
+ *
+ * Remove the CPU mapping from the descriptor.
+ */
+PVRSRV_ERROR
+DevmemXUnmapPhysicalToCPU(DEVMEMX_PHYSDESC *psMemAllocPhys);
+
+/* DevmemXCreateDevmemMemDesc()
+ *
+ * DEPRICATED!
+ * DO NOT USE IN PRODUCTION DRIVER!
+ *
+ * Create a devmem memdesc from a physical and
+ * virtual descriptor.
+ * Always destroy with DevmemFreePhysVirtMemDesc().
+ */
+
+PVRSRV_ERROR
+DevmemXCreateDevmemMemDesc(const IMG_DEV_VIRTADDR sVirtualAddress,
+                            DEVMEM_MEMDESC **ppsMemDesc);
+
+/* DevmemXFreeDevmemMemDesc()
+ *
+ * DEPRICATED!
+ * DO NOT USE IN PRODUCTION DRIVER!
+ *
+ * Free the memdesc again. Has no impact on the underlying
+ * physical and virtual descriptors.
+ */
+PVRSRV_ERROR
+DevmemXFreeDevmemMemDesc(DEVMEM_MEMDESC *psMemDesc);
+
+PVRSRV_ERROR
+_DevmemXFlagCompatibilityCheck(IMG_UINT32 uiPhysFlags,
+                              IMG_UINT32 uiVirtFlags);
+
+PVRSRV_ERROR
+_DevmemXPhysDescAlloc(DEVMEMX_PHYSDESC **ppsPhysDesc);
+
+void
+_DevmemXPhysDescInit(DEVMEMX_PHYSDESC *psPhysDesc,
+                    IMG_HANDLE hPMR,
+                    IMG_UINT32 uiNumPages,
+                    IMG_UINT32 uiLog2PageSize,
+                    PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                    IMG_HANDLE hBridge);
+
+void
+_DevmemXPhysDescFree(DEVMEMX_PHYSDESC *psPhysDesc);
+
+#endif /* DEVICEMEMX_H */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/devicememx_pdump.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/devicememx_pdump.h
new file mode 100644
index 0000000..81743ed
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/devicememx_pdump.h
@@ -0,0 +1,81 @@
+/*************************************************************************/ /*!
+@File
+@Title          X Device Memory Management PDump internal
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services internal interface to PDump device memory management
+                functions that are shared between client and server code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEVICEMEMX_PDUMP_H_
+#define _DEVICEMEMX_PDUMP_H_
+
+#include "devicememx.h"
+#include "pdumpdefs.h"
+#include "pdump.h"
+
+#if defined(PDUMP)
+/*
+ * PVRSRVDevMemXPDumpLoadMem()
+ *
+ * Same as DevmemPDumpLoadMem().
+ */
+extern void
+DevmemXPDumpLoadMem(DEVMEMX_PHYSDESC *psMemDescPhys,
+                       IMG_DEVMEM_OFFSET_T uiOffset,
+                       IMG_DEVMEM_SIZE_T uiSize,
+                       PDUMP_FLAGS_T uiPDumpFlags);
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVDevMemXPDumpLoadMem)
+#endif
+
+static INLINE void
+DevmemXPDumpLoadMem(DEVMEMX_PHYSDESC *psMemDescPhys,
+                    IMG_DEVMEM_OFFSET_T uiOffset,
+                    IMG_DEVMEM_SIZE_T uiSize,
+                    PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDescPhys);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+#endif	/* PDUMP */
+#endif	/* _DEVICEMEMX_PDUMP_H_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/htbuffer.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/htbuffer.h
new file mode 100644
index 0000000..888a041
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/htbuffer.h
@@ -0,0 +1,129 @@
+/*************************************************************************/ /*!
+@File           htbuffer.h
+@Title          Host Trace Buffer shared API.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Host Trace Buffer provides a mechanism to log Host events to a
+                buffer in a similar way to the Firmware Trace mechanism.
+                Host Trace Buffer logs data using a Transport Layer buffer.
+                The Transport Layer and pvrtld tool provides the mechanism to
+                retrieve the trace data.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __HTBUFFER_H__
+#define __HTBUFFER_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "htbuffer_sf.h"
+#include "htbuffer_types.h"
+#include "htbuffer_init.h"
+
+#if defined(__KERNEL__)
+#define HTBLOGK(SF, args...) do { if (HTB_GROUP_ENABLED(SF)) HTBLogSimple(0, SF, ## args); } while (0)
+#else
+#define HTBLOG(handle, SF, args...) do { if (HTB_GROUP_ENABLED(SF)) HTBLogSimple(handle, SF, ## args); } while (0)
+#endif
+
+/* macros to cast 64 or 32-bit pointers into 32-bit integer components for Host Trace */
+#define HTBLOG_PTR_BITS_HIGH(p) ((IMG_UINT32)((((IMG_UINT64)((uintptr_t)p))>>32)&0xffffffff))
+#define HTBLOG_PTR_BITS_LOW(p)  ((IMG_UINT32)(((IMG_UINT64)((uintptr_t)p))&0xffffffff))
+
+/* macros to cast 64-bit integers into 32-bit integer components for Host Trace */
+#define HTBLOG_U64_BITS_HIGH(u) ((IMG_UINT32)((u>>32)&0xffffffff))
+#define HTBLOG_U64_BITS_LOW(u)  ((IMG_UINT32)(u&0xffffffff))
+
+/*************************************************************************/ /*!
+ @Function      HTBLog
+ @Description   Record a Host Trace Buffer log event
+
+ @Input         PID	            The PID of the process the event is associated
+                                with. This is provided as an argument rather
+                                than querying internally so that events associated
+                                with a particular process, but performed by
+                                another can be logged correctly.
+
+ @Input         TimeStampus     The timestamp in us for this event
+
+ @Input         SF              The log event ID
+
+ @Input         ...             Log parameters
+
+ @Return        PVRSRV_OK       Success.
+
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 ui32TimeStampus, IMG_UINT32 SF, ...);
+
+
+/*************************************************************************/ /*!
+ @Function      HTBLogSimple
+ @Description   Record a Host Trace Buffer log event with implicit PID and Timestamp
+
+ @Input         SF              The log event ID
+
+ @Input         ...             Log parameters
+
+ @Return        PVRSRV_OK       Success.
+
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLogSimple(IMG_HANDLE hSrvHandle, IMG_UINT32 SF, ...);
+
+
+
+/*  DEBUG log group enable */
+#if !defined(HTB_DEBUG_LOG_GROUP)
+#undef HTB_LOG_TYPE_DBG    /* No trace statements in this log group should be checked in */
+#define HTB_LOG_TYPE_DBG    __BUILDERROR__
+#endif
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __HTBUFFER_H__ */
+/*****************************************************************************
+ End of file (htbuffer.h)
+*****************************************************************************/
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/htbuffer_init.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/htbuffer_init.h
new file mode 100644
index 0000000..8e782ff
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/htbuffer_init.h
@@ -0,0 +1,115 @@
+/*************************************************************************/ /*!
+@File           htbuffer_init.h
+@Title          Host Trace Buffer functions needed for Services initialisation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __HTBUFFER_INIT_H__
+#define __HTBUFFER_INIT_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_types.h"
+
+/*************************************************************************/ /*!
+ @Function      HTBConfigure
+ @Description   Configure the Host Trace Buffer.
+                Once these parameters are set they may not be changed
+
+ @Input         hSrvHandle      Server Handle
+
+ @Input         pszBufferName   Name to use for the TL buffer, this will be
+                                required to request trace data from the TL
+
+ @Input         ui32BufferSize  Requested TL buffer size in bytes
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBConfigure(
+	IMG_HANDLE hSrvHandle,
+	IMG_CHAR * pszBufferName,
+	IMG_UINT32 ui32BufferSize
+);
+
+/*************************************************************************/ /*!
+ @Function      HTBControl
+ @Description   Update the configuration of the Host Trace Buffer
+
+ @Input         hSrvHandle      Server Handle
+
+ @Input         ui32NumFlagGroups Number of group enable flags words
+
+ @Input         aui32GroupEnable  Flags words controlling groups to be logged
+
+ @Input         ui32LogLevel    Log level to record
+
+ @Input         ui32EnablePID   PID to enable logging for a specific process
+
+ @Input         eLogMode        Enable logging for all or specific processes,
+
+ @Input         eOpMode         Control what trace data is dropped if the TL
+                                buffer is full
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBControl(
+	IMG_HANDLE hSrvHandle,
+	IMG_UINT32 ui32NumFlagGroups,
+	IMG_UINT32 * aui32GroupEnable,
+	IMG_UINT32 ui32LogLevel,
+	IMG_UINT32 ui32EnablePID,
+	HTB_LOGMODE_CTRL eLogMode,
+	HTB_OPMODE_CTRL eOpMode
+);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __HTBUFFER_INIT_H__ */
+/*****************************************************************************
+ End of file (htbuffer_init.h)
+*****************************************************************************/
+
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/osmmap.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/osmmap.h
new file mode 100644
index 0000000..bc83151
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/osmmap.h
@@ -0,0 +1,123 @@
+/*************************************************************************/ /*!
+@File
+@Title          OS Interface for mapping PMRs into CPU space.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS abstraction for the mmap2 interface for mapping PMRs into
+                User Mode memory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _OSMMAP_H_
+#define _OSMMAP_H_
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/**************************************************************************/ /*!
+@Function       OSMMapPMR
+@Description    Maps the specified PMR into CPU memory so that it may be
+                accessed by the user process.
+                Whether the memory is mapped read only, read/write, or not at
+                all, is dependent on the PMR itself.
+                The PMR handle is opaque to the user, and lower levels of this
+                stack ensure that the handle is private to this process, such that
+                this API cannot be abused to gain access to other people's PMRs.
+                The OS implementation of this function should return the virtual
+                address and length for the User to use. The "PrivData" is to be
+                stored opaquely by the caller (N.B. he should make no assumptions,
+                in particular, NULL is a valid handle) and given back to the
+                call to OSMUnmapPMR.
+                The OS implementation is free to use the PrivData handle for any
+                purpose it sees fit.
+@Input          hBridge              The bridge handle.
+@Input          hPMR                 The handle of the PMR to be mapped.
+@Input          uiPMRLength          The size of the PMR.
+@Input          uiFlags              Flags indicating how the mapping should
+                                     be done (read-only, etc). These may not
+                                     be honoured if the PMR does not permit
+                                     them.
+@Input          uiPMRLength          The size of the PMR.
+@Output         phOSMMapPrivDataOut  Returned private data.
+@Output         ppvMappingAddressOut The returned mapping.
+@Output         puiMappingLengthOut  The size of the returned mapping.
+@Return         PVRSRV_OK on success, failure code otherwise.
+ */ /**************************************************************************/
+extern PVRSRV_ERROR
+OSMMapPMR(IMG_HANDLE hBridge,
+          IMG_HANDLE hPMR,
+          IMG_DEVMEM_SIZE_T uiPMRLength,
+          IMG_UINT32 uiFlags,
+          IMG_HANDLE *phOSMMapPrivDataOut,
+          void **ppvMappingAddressOut,
+          size_t *puiMappingLengthOut);
+
+/**************************************************************************/ /*!
+@Function       OSMUnmapPMR
+@Description    Unmaps the specified PMR from CPU memory.
+                This function is the counterpart to OSMMapPMR.
+                The caller is required to pass the PMR handle back in along
+                with the same 3-tuple of information that was returned by the
+                call to OSMMapPMR in phOSMMapPrivDataOut.
+                It is possible to unmap only part of the original mapping
+                with this call, by specifying only the address range to be
+                unmapped in pvMappingAddress and uiMappingLength.
+@Input          hBridge              The bridge handle.
+@Input          hPMR                 The handle of the PMR to be unmapped.
+@Input          hOSMMapPrivData      The OS private data of the mapping.
+@Input          pvMappingAddress     The address to be unmapped.
+@Input          uiMappingLength      The size to be unmapped.
+@Return         PVRSRV_OK on success, failure code otherwise.
+ */ /**************************************************************************/
+/*
+   FIXME:
+   perhaps this function should take _only_ the hOSMMapPrivData arg,
+   and the implementation is required to store any of the other data
+   items that it requires to do the unmap?
+*/
+extern void
+OSMUnmapPMR(IMG_HANDLE hBridge,
+            IMG_HANDLE hPMR,
+            IMG_HANDLE hOSMMapPrivData,
+            void *pvMappingAddress,
+            size_t uiMappingLength);
+
+
+#endif /* _OSMMAP_H_ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/proc_stats.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/proc_stats.h
new file mode 100644
index 0000000..6fbd642
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/proc_stats.h
@@ -0,0 +1,85 @@
+#ifndef PROC_STATS_H
+#define PROC_STATS_H
+
+
+/* X-Macro for Process stat keys */
+#define PVRSRV_PROCESS_STAT_KEY \
+	X(PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS, "Connections") \
+	X(PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS, "ConnectionsMax") \
+	X(PVRSRV_PROCESS_STAT_TYPE_RC_OOMS, "RenderContextOutOfMemoryEvents") \
+	X(PVRSRV_PROCESS_STAT_TYPE_RC_PRS, "RenderContextPartialRenders") \
+	X(PVRSRV_PROCESS_STAT_TYPE_RC_GROWS, "RenderContextGrows") \
+	X(PVRSRV_PROCESS_STAT_TYPE_RC_PUSH_GROWS, "RenderContextPushGrows") \
+	X(PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES, "RenderContextTAStores") \
+	X(PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES, "RenderContext3DStores") \
+	X(PVRSRV_PROCESS_STAT_TYPE_RC_SH_STORES, "RenderContextSHStores") \
+	X(PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES, "RenderContextCDMStores") \
+	X(PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP, "ZSBufferRequestsByApp") \
+	X(PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW, "ZSBufferRequestsByFirmware") \
+	X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP, "FreeListGrowRequestsByApp") \
+	X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW, "FreeListGrowRequestsByFirmware") \
+	X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT, "FreeListInitialPages") \
+	X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES, "FreeListMaxPages") \
+	X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC, "MemoryUsageKMalloc") \
+	X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC_MAX, "MemoryUsageKMallocMax") \
+	X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC, "MemoryUsageVMalloc") \
+	X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC_MAX, "MemoryUsageVMallocMax") \
+	X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, "MemoryUsageAllocPTMemoryUMA") \
+	X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA_MAX, "MemoryUsageAllocPTMemoryUMAMax") \
+	X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, "MemoryUsageVMapPTUMA") \
+	X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA_MAX, "MemoryUsageVMapPTUMAMax") \
+	X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, "MemoryUsageAllocPTMemoryLMA") \
+	X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA_MAX, "MemoryUsageAllocPTMemoryLMAMax") \
+	X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, "MemoryUsageIORemapPTLMA") \
+	X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA_MAX, "MemoryUsageIORemapPTLMAMax") \
+	X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, "MemoryUsageAllocGPUMemLMA") \
+	X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES_MAX, "MemoryUsageAllocGPUMemLMAMax") \
+	X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, "MemoryUsageAllocGPUMemUMA") \
+	X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES_MAX, "MemoryUsageAllocGPUMemUMAMax") \
+	X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, "MemoryUsageMappedGPUMemUMA/LMA") \
+	X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES_MAX, "MemoryUsageMappedGPUMemUMA/LMAMax")
+
+
+/* X-Macro for Driver stat keys */
+#define PVRSRV_DRIVER_STAT_KEY \
+	X(PVRSRV_DRIVER_STAT_TYPE_KMALLOC, "MemoryUsageKMalloc") \
+	X(PVRSRV_DRIVER_STAT_TYPE_KMALLOC_MAX, "MemoryUsageKMallocMax") \
+	X(PVRSRV_DRIVER_STAT_TYPE_VMALLOC, "MemoryUsageVMalloc") \
+	X(PVRSRV_DRIVER_STAT_TYPE_VMALLOC_MAX, "MemoryUsageVMallocMax") \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, "MemoryUsageAllocPTMemoryUMA") \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA_MAX, "MemoryUsageAllocPTMemoryUMAMax") \
+	X(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, "MemoryUsageVMapPTUMA") \
+	X(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA_MAX, "MemoryUsageVMapPTUMAMax") \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, "MemoryUsageAllocPTMemoryLMA") \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA_MAX, "MemoryUsageAllocPTMemoryLMAMax") \
+	X(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, "MemoryUsageIORemapPTLMA") \
+	X(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA_MAX, "MemoryUsageIORemapPTLMAMax") \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, "MemoryUsageAllocGPUMemLMA") \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA_MAX, "MemoryUsageAllocGPUMemLMAMax") \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, "MemoryUsageAllocGPUMemUMA") \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_MAX, "MemoryUsageAllocGPUMemUMAMax") \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, "MemoryUsageAllocGPUMemUMAPool") \
+	X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL_MAX, "MemoryUsageAllocGPUMemUMAPoolMax") \
+	X(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, "MemoryUsageMappedGPUMemUMA_LMA") \
+	X(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA_MAX, "MemoryUsageMappedGPUMemUMA_LMAMax")
+
+
+typedef enum {
+#define X(stat_type, stat_str) stat_type,
+	PVRSRV_PROCESS_STAT_KEY
+#undef X
+	PVRSRV_PROCESS_STAT_TYPE_COUNT
+}PVRSRV_PROCESS_STAT_TYPE;
+
+typedef enum {
+#define X(stat_type, stat_str) stat_type,
+	PVRSRV_DRIVER_STAT_KEY
+#undef X
+	PVRSRV_DRIVER_STAT_TYPE_COUNT
+}PVRSRV_DRIVER_STAT_TYPE;
+
+extern const IMG_CHAR *const pszProcessStatType[];
+
+extern const IMG_CHAR *const pszDriverStatType[];
+
+#endif // PROC_STATS_H
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/sync.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/sync.h
new file mode 100644
index 0000000..ccf91f6
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/sync.h
@@ -0,0 +1,400 @@
+/*************************************************************************/ /*!
+@File
+@Title          Synchronisation interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the client side interface for synchronisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_
+#define _SYNC_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include <powervr/sync_external.h>
+#include "pdumpdefs.h"
+#include "dllist.h"
+#include "pvr_debug.h"
+
+#include "device_connection.h"
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+/*************************************************************************/ /*!
+@Function       SyncPrimContextCreate
+
+@Description    Create a new synchronisation context
+
+@Input          hBridge                 Bridge handle
+
+@Input          hDeviceNode             Device node handle
+
+@Output         hSyncPrimContext        Handle to the created synchronisation
+                                        primitive context
+
+@Return         PVRSRV_OK if the synchronisation primitive context was
+                successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection,
+					  PSYNC_PRIM_CONTEXT	*hSyncPrimContext);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimContextDestroy
+
+@Description    Destroy a synchronisation context
+
+@Input          hSyncPrimContext        Handle to the synchronisation
+                                        primitive context to destroy
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimAlloc
+
+@Description    Allocate a new synchronisation primitive on the specified
+                synchronisation context
+
+@Input          hSyncPrimContext        Handle to the synchronisation
+                                        primitive context
+
+@Output         ppsSync                 Created synchronisation primitive
+
+@Input          pszClassName            Sync source annotation
+
+@Return         PVRSRV_OK if the synchronisation primitive was
+                successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimAlloc(PSYNC_PRIM_CONTEXT		hSyncPrimContext,
+			  PVRSRV_CLIENT_SYNC_PRIM	**ppsSync,
+			  const IMG_CHAR 			*pszClassName);
+
+#if defined(__KERNEL__)
+/*************************************************************************/ /*!
+@Function       SyncPrimAllocForServerSync
+
+@Description    Allocate a new synchronisation primitive on the specified
+                synchronisation context for a server sync
+
+@Input          hSyncPrimContext        Handle to the synchronisation
+                                        primitive context
+
+@Output         ppsSync                 Created synchronisation primitive
+
+@Input          pszClassName            Sync source annotation
+
+@Return         PVRSRV_OK if the synchronisation primitive was
+                successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimAllocForServerSync(PSYNC_PRIM_CONTEXT   hSyncPrimContext,
+						PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+						const IMG_CHAR          *pszClassName);
+#endif
+
+/*************************************************************************/ /*!
+@Function       SyncPrimFree
+
+@Description    Free a synchronisation primitive
+
+@Input          psSync                  The synchronisation primitive to free
+
+@Return         PVRSRV_OK if the synchronisation primitive was
+                successfully freed
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimSet
+
+@Description    Set the synchronisation primitive to a value
+
+@Input          psSync                  The synchronisation primitive to set
+
+@Input          ui32Value               Value to set it to
+
+@Return         PVRSRV_OK on success
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value);
+
+#if defined(NO_HARDWARE)
+
+/*************************************************************************/ /*!
+@Function       SyncPrimNoHwUpdate
+
+@Description    Updates the synchronisation primitive value (in NoHardware drivers)
+
+@Input          psSync                  The synchronisation primitive to update
+
+@Input          ui32Value               Value to update it to
+
+@Return         PVRSRV_OK on success
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value);
+#endif
+
+PVRSRV_ERROR
+SyncPrimServerAlloc(SHARED_DEV_CONNECTION hDevConnection,
+					PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+					const IMG_CHAR		*pszClassName
+					PVR_DBG_FILELINE_PARAM);
+
+PVRSRV_ERROR
+SyncPrimServerGetStatus(IMG_UINT32 ui32SyncCount,
+						PVRSRV_CLIENT_SYNC_PRIM **papsSync,
+						IMG_UINT32 *pui32UID,
+						IMG_UINT32 *pui32FWAddr,
+						IMG_UINT32 *pui32CurrentOp,
+						IMG_UINT32 *pui32NextOp);
+
+PVRSRV_ERROR
+SyncPrimServerQueueOp(PVRSRV_CLIENT_SYNC_PRIM_OP *psSyncOp);
+
+PVRSRV_ERROR
+SyncPrimIsServerSync(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_BOOL *pbServerSync);
+
+IMG_HANDLE
+SyncPrimGetServerHandle(PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+
+
+PVRSRV_ERROR
+SyncPrimOpCreate(IMG_UINT32 ui32SyncCount,
+				 PVRSRV_CLIENT_SYNC_PRIM **papsSyncPrim,
+				 PSYNC_OP_COOKIE *ppsCookie);
+
+PVRSRV_ERROR
+SyncPrimOpTake(PSYNC_OP_COOKIE psCookie,
+			   IMG_UINT32 ui32SyncCount,
+			   PVRSRV_CLIENT_SYNC_PRIM_OP *pasSyncOp);
+
+PVRSRV_ERROR
+SyncPrimOpReady(PSYNC_OP_COOKIE psCookie,
+				IMG_BOOL *pbReady);
+
+PVRSRV_ERROR
+SyncPrimOpComplete(PSYNC_OP_COOKIE psCookie);
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpDestroy(PSYNC_OP_COOKIE psCookie);
+
+PVRSRV_ERROR
+SyncPrimOpResolve(PSYNC_OP_COOKIE psCookie,
+				  IMG_UINT32 *pui32SyncCount,
+				  PVRSRV_CLIENT_SYNC_PRIM_OP **ppsSyncOp);
+
+PVRSRV_ERROR
+SyncPrimDumpSyncs(IMG_UINT32 ui32SyncCount, PVRSRV_CLIENT_SYNC_PRIM **papsSync, const IMG_CHAR *pcszExtraInfo);
+
+#if defined(PDUMP)
+/*************************************************************************/ /*!
+@Function       SyncPrimPDump
+
+@Description    PDump the current value of the synchronisation primitive
+
+@Input          psSync                  The synchronisation primitive to PDump
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimPDumpValue
+
+@Description    PDump the ui32Value as the value of the synchronisation 
+				primitive (regardless of the current value).
+
+@Input          psSync          The synchronisation primitive to PDump
+@Input			ui32Value		Value to give to the sync prim on the pdump
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimPDumpPol
+
+@Description    Do a PDump poll of the synchronisation primitive
+
+@Input          psSync                  The synchronisation primitive to PDump
+
+@Input          ui32Value               Value to poll for 
+
+@Input          ui32Mask                PDump mask operator
+
+@Input          ui32PDumpFlags          PDump flags
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+				 IMG_UINT32 ui32Value,
+				 IMG_UINT32 ui32Mask,
+				 PDUMP_POLL_OPERATOR eOperator,
+				 IMG_UINT32 ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimOpPDumpPol
+
+@Description    Do a PDump poll all the synchronisation primitives on this
+				Operation cookie.
+
+@Input          psCookie                Operation cookie
+
+@Input          ui32PDumpFlags          PDump flags
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncPrimOpPDumpPol(PSYNC_OP_COOKIE psCookie,
+				 PDUMP_POLL_OPERATOR eOperator,
+				 IMG_UINT32 ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimPDumpCBP
+
+@Description    Do a PDump CB poll using the synchronisation primitive
+
+@Input          psSync                  The synchronisation primitive to PDump
+
+@Input          uiWriteOffset           Current write offset of buffer
+
+@Input          uiPacketSize            Size of the packet to write into CB
+
+@Input          uiBufferSize            Size of the CB
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+				 IMG_UINT64 uiWriteOffset,
+				 IMG_UINT64 uiPacketSize,
+				 IMG_UINT64 uiBufferSize);
+
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDumpValue)
+#endif
+static INLINE void
+SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+	PVR_UNREFERENCED_PARAMETER(psSync);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDump)
+#endif
+static INLINE void
+SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	PVR_UNREFERENCED_PARAMETER(psSync);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDumpPol)
+#endif
+static INLINE void
+SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+				 IMG_UINT32 ui32Value,
+				 IMG_UINT32 ui32Mask,
+				 PDUMP_POLL_OPERATOR eOperator,
+				 IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psSync);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimServerPDumpPol)
+#endif
+static INLINE void
+SyncPrimServerPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+				 PDUMP_POLL_OPERATOR eOperator,
+				 IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psSync);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDumpCBP)
+#endif
+static INLINE void
+SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+				 IMG_UINT64 uiWriteOffset,
+				 IMG_UINT64 uiPacketSize,
+				 IMG_UINT64 uiBufferSize)
+{
+	PVR_UNREFERENCED_PARAMETER(psSync);
+	PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+	PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+	PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+}
+#endif	/* PDUMP */
+#endif	/* _PVRSRV_SYNC_ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/sync_internal.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/sync_internal.h
new file mode 100644
index 0000000..e9a2586
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/sync_internal.h
@@ -0,0 +1,128 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services internal synchronisation interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the internal client side interface for services
+                synchronisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_INTERNAL_
+#define _SYNC_INTERNAL_
+
+#include "img_types.h"
+#include <powervr/sync_external.h>
+#include "ra.h"
+#include "dllist.h"
+#include "lock.h"
+#include "devicemem.h"
+
+
+#define LOCAL_SYNC_PRIM_RESET_VALUE 0
+
+/*
+	Private structure's
+*/
+#define SYNC_PRIM_NAME_SIZE		50
+typedef struct SYNC_PRIM_CONTEXT
+{
+	SHARED_DEV_CONNECTION       hDevConnection;
+	IMG_CHAR					azName[SYNC_PRIM_NAME_SIZE];	/*!< Name of the RA */
+	RA_ARENA					*psSubAllocRA;					/*!< RA context */
+	IMG_CHAR					azSpanName[SYNC_PRIM_NAME_SIZE];/*!< Name of the span RA */
+	RA_ARENA					*psSpanRA;						/*!< RA used for span management of SubAllocRA */
+	ATOMIC_T				hRefCount;	/*!< Ref count for this context */
+} SYNC_PRIM_CONTEXT;
+
+typedef struct _SYNC_PRIM_BLOCK_
+{
+	SYNC_PRIM_CONTEXT	*psContext;				/*!< Our copy of the services connection */
+	IMG_HANDLE			hServerSyncPrimBlock;	/*!< Server handle for this block */
+	IMG_UINT32			ui32SyncBlockSize;		/*!< Size of the sync prim block */
+	IMG_UINT32			ui32FirmwareAddr;		/*!< Firmware address */
+	DEVMEM_MEMDESC		*hMemDesc;				/*!< Host mapping handle */
+	IMG_UINT32			*pui32LinAddr;			/*!< User CPU mapping */
+	IMG_UINT64			uiSpanBase;				/*!< Base of this import in the span RA */
+	DLLIST_NODE			sListNode;				/*!< List node for the sync block list */
+} SYNC_PRIM_BLOCK;
+
+typedef enum _SYNC_PRIM_TYPE_
+{
+	SYNC_PRIM_TYPE_UNKNOWN = 0,
+	SYNC_PRIM_TYPE_LOCAL,
+	SYNC_PRIM_TYPE_SERVER,
+} SYNC_PRIM_TYPE;
+
+typedef struct _SYNC_PRIM_LOCAL_
+{
+	ATOMIC_T				hRefCount;	/*!< Ref count for this sync */
+	SYNC_PRIM_BLOCK			*psSyncBlock;	/*!< Synchronisation block this primitive is allocated on */
+	IMG_UINT64				uiSpanAddr;		/*!< Span address of the sync */
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+	IMG_HANDLE				hRecord;		/*!< Sync record handle */
+#endif
+} SYNC_PRIM_LOCAL;
+
+typedef struct _SYNC_PRIM_SERVER_
+{
+	SYNC_BRIDGE_HANDLE		hBridge;			/*!< Bridge handle */
+	IMG_HANDLE				hServerSync;		/*!< Handle to the server sync */
+	IMG_UINT32				ui32FirmwareAddr;	/*!< Firmware address of the sync */
+} SYNC_PRIM_SERVER;
+
+typedef struct _SYNC_PRIM_
+{
+	PVRSRV_CLIENT_SYNC_PRIM	sCommon;		/*!< Client visible part of the sync prim */
+	SYNC_PRIM_TYPE			eType;			/*!< Sync primitive type */
+	union {
+		SYNC_PRIM_LOCAL		sLocal;			/*!< Local sync primitive data */
+		SYNC_PRIM_SERVER	sServer;		/*!< Server sync primitive data */
+	} u;
+} SYNC_PRIM;
+
+
+/* FIXME this must return a correctly typed pointer */
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 *pui32FwAddr);
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimLocalGetHandleAndOffset(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+							IMG_HANDLE *phBlock,
+							IMG_UINT32 *pui32Offset);
+
+
+#endif	/* _SYNC_INTERNAL_ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/tlclient.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/tlclient.h
new file mode 100644
index 0000000..134d55c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/shared/include/tlclient.h
@@ -0,0 +1,233 @@
+/*************************************************************************/ /*!
+@File           tlclient.h
+@Title          Services Transport Layer shared API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport layer common API used in both clients and server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+ 
+#ifndef TLCLIENT_H_
+#define TLCLIENT_H_
+
+
+#include "img_defs.h"
+#include "pvrsrv_tlcommon.h"
+#include "pvrsrv_error.h"
+
+
+/* This value is used for the hSrvHandle argument in the client API when
+ * called directly from the kernel which will lead to a direct bridge access.
+ */
+#define DIRECT_BRIDGE_HANDLE	((IMG_HANDLE)0xDEADBEEFU)
+
+
+/**************************************************************************/ /*!
+ @Function		TLClientOpenStream
+ @Description	Open a descriptor onto an existing kernel transport stream.
+ @Input			hSrvHandle    	Address of a pointer to a connection object
+ @Input			pszName			Address of the stream name string, no longer
+ 	 	 	 	 	 	 	 	than PRVSRVTL_MAX_STREAM_NAME_SIZE.
+ @Input			ui32Mode		Unused
+ @Output		phSD			Address of a pointer to an stream object
+ @Return 		PVRSRV_ERROR_NOT_FOUND:        when named stream not found
+ @Return		PVRSRV_ERROR_ALREADY_OPEN:     stream already open by another
+ @Return		PVRSRV_ERROR_STREAM_ERROR:     internal driver state error
+ @Return        PVRSRV_ERROR_TIMEOUT:          block timed out, stream not found
+ @Return		PVRSRV_ERROR:			       for other system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientOpenStream(IMG_HANDLE hSrvHandle,
+		const IMG_CHAR* pszName,
+		IMG_UINT32   ui32Mode,
+		IMG_HANDLE*  phSD);
+
+
+/**************************************************************************/ /*!
+ @Function		TLClientCloseStream
+ @Description	Close and release the stream connection to Services kernel
+				server transport layer. Any outstanding Acquire will be
+				released.
+ @Input			hSrvHandle      Address of a pointer to a connection object
+ @Input			hSD				Handle of the stream object to close
+ @Return		PVRSRV_ERROR_HANDLE_NOT_FOUND: when SD handle is not known
+ @Return		PVRSRV_ERROR_STREAM_ERROR: 	  internal driver state error
+ @Return		PVRSRV_ERROR:				  for system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCloseStream(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE hSD);
+
+/**************************************************************************/ /*!
+ @Function      TLClientDiscoverStreams
+ @Description   Finds all streams that's name starts with pszNamePattern and
+                ends with a number.
+ @Input         hSrvHandle      Address of a pointer to a connection object
+ @Input         pszNamePattern  Name pattern. Must be beginning of a string.
+ @Output        pui32Streams    Array of numbers from end of the discovered
+                names.
+ @inOut         pui32Count      When input max number of number that can fit
+                                into pui32Streams. When output number of
+                                discovered streams.
+ @Return		PVRSRV_ERROR    for system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientDiscoverStreams(IMG_HANDLE hSrvHandle,
+		const IMG_CHAR *pszNamePattern,
+		IMG_CHAR aszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE],
+		IMG_UINT32 *pui32NumFound);
+
+/**************************************************************************/ /*!
+ @Function      TLClientReserveStream
+ @Description   Reserves a region with given size in the stream. If the stream
+                is already reserved the function will return an error.
+ @Input         hSrvHandle      Address of a pointer to a connection object
+ @Input         hSD             Handle of the stream object to close
+ @Output        ppui8Data       pointer to the buffer
+ @Input         ui32Size        size of the data
+ @Return
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE hSD,
+		IMG_UINT8 **ppui8Data,
+		IMG_UINT32 ui32Size);
+
+/**************************************************************************/ /*!
+ @Function      TLClientStreamReserve2
+ @Description   Reserves a region with given size in the stream. If the stream
+                is already reserved the function will return an error.
+ @Input         hSrvHandle      Address of a pointer to a connection object
+ @Input         hSD             Handle of the stream object to close
+ @Output        ppui8Data       pointer to the buffer
+ @Input         ui32Size        size of the data
+ @Input         ui32SizeMin     minimum size of the data
+ @Input         ui32Available   available space in buffer
+ @Return
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream2(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE hSD,
+		IMG_UINT8 **ppui8Data,
+		IMG_UINT32 ui32Size,
+		IMG_UINT32 ui32SizeMin,
+		IMG_UINT32 *pui32Available);
+
+/**************************************************************************/ /*!
+ @Function      TLClientStreamCommit
+ @Description   Commits previously reserved region in the stream and therefore
+                allows next reserves.
+                This function call has to be preceded by the call to
+                TLClientReserveStream or TLClientReserveStream2.
+ @Input         hSrvHandle      Address of a pointer to a connection object
+ @Input         hSD             Handle of the stream object to close
+ @Input         ui32Size        Size of the data
+ @Return
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCommitStream(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE hSD,
+		IMG_UINT32 ui32Size);
+
+/**************************************************************************/ /*!
+ @Function		TLClientAcquireData
+ @Description	When there is data available in the stream buffer this call
+ 	 	 	 	returns with the address and length of the data buffer the
+ 	 	 	 	client can safely read. This buffer may contain one or more
+ 	 	 	 	packets of data.
+ 	 	 	 	If no data is available then this call blocks until it becomes
+ 	 	 	 	available. However if the stream has been destroyed while
+ 	 	 	 	waiting then a resource unavailable error will be returned
+ 	 	 	 	to the caller. Clients must pair this call with a
+ 	 	 	 	ReleaseData call.
+ @Input			hSrvHandle  	Address of a pointer to a connection object
+ @Input			hSD				Handle of the stream object to read
+ @Output		ppPacketBuf		Address of a pointer to an byte buffer. On exit
+								pointer contains address of buffer to read from
+ @Output		puiBufLen		Pointer to an integer. On exit it is the size
+								of the data to read from the packet buffer
+ @Return		PVRSRV_ERROR_RESOURCE_UNAVAILABLE: when stream no longer exists
+ @Return		PVRSRV_ERROR_HANDLE_NOT_FOUND:     when SD handle not known
+ @Return		PVRSRV_ERROR_STREAM_ERROR: 	       internal driver state error
+ @Return		PVRSRV_ERROR_RETRY:				   release not called beforehand
+ @Return        PVRSRV_ERROR_TIMEOUT:              block timed out, no data
+ @Return		PVRSRV_ERROR:					   for other system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientAcquireData(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE  hSD,
+		IMG_PBYTE*  ppPacketBuf,
+		IMG_UINT32* puiBufLen);
+
+
+/**************************************************************************/ /*!
+ @Function		TLClientReleaseData
+ @Description	Called after client has read the stream data out of the buffer
+ 	 	 	 	The data is subsequently flushed from the stream buffer to make
+ 	 	 	 	room for more data packets from the stream source.
+ @Input			hSrvHandle  	Address of a pointer to a connection object
+ @Input			hSD				Handle of the stream object to read
+ @Return		PVRSRV_ERROR_RESOURCE_UNAVAILABLE: when stream no longer exists
+ @Return		PVRSRV_ERROR_HANDLE_NOT_FOUND:   when SD handle not known to TL
+ @Return		PVRSRV_ERROR_STREAM_ERROR: 	     internal driver state error
+ @Return		PVRSRV_ERROR_RETRY:				 acquire not called beforehand
+ @Return		PVRSRV_ERROR:	                 for system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReleaseData(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE hSD);
+
+/**************************************************************************/ /*!
+ @Function      TLClientWriteData
+ @Description   Writes data to the stream.
+ @Input         hSrvHandle      Address of a pointer to a connection object
+ @Input         hSD             Handle of the stream object to read
+ @Input         ui32Size        Size of the data
+ @Input         pui8Data        Pointer to data
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientWriteData(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE hSD,
+		IMG_UINT32 ui32Size,
+		IMG_BYTE *pui8Data);
+
+
+#endif /* TLCLIENT_H_ */
+
+/******************************************************************************
+ End of file (tlclient.h)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/env/linux/dma_support.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/env/linux/dma_support.c
new file mode 100644
index 0000000..2b4109e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/env/linux/dma_support.c
@@ -0,0 +1,335 @@
+/*************************************************************************/ /*!
+@File			dma_support.c
+@Title          System DMA support
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This provides a contiguous memory allocator (i.e. DMA allocator);
+				these APIs are used for allocation/ioremapping (DMA/PA <-> CPU/VA)
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#if defined(LINUX)
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#endif
+
+#include "allocmem.h"
+#include "dma_support.h"
+
+#define DMA_MAX_IOREMAP_ENTRIES 2
+static IMG_BOOL gbEnableDmaIoRemapping = IMG_FALSE;
+static DMA_ALLOC gsDmaIoRemapArray[DMA_MAX_IOREMAP_ENTRIES] = {{0}};
+
+/*!
+******************************************************************************
+ @Function			SysDmaAllocMem
+
+ @Description 		Allocates physically contiguous memory
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (psDmaAlloc != NULL && psDmaAlloc->pvOSDevice != NULL)
+	{
+#if defined(LINUX)
+		psDmaAlloc->pvVirtAddr =
+				dma_alloc_coherent((struct device *)psDmaAlloc->pvOSDevice,
+								   (size_t) psDmaAlloc->ui64Size,
+								   (dma_addr_t *)&psDmaAlloc->sBusAddr.uiAddr,
+								   GFP_KERNEL);
+		PVR_LOGR_IF_FALSE((NULL != psDmaAlloc->pvVirtAddr), "dma_alloc_coherent() failed", PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES);
+#else
+		#error "Provide OS implementation of DMA allocation";
+#endif
+	}
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+ @Function			SysDmaFreeMem
+
+ @Description 		Free physically contiguous memory
+
+ @Return			void
+ ******************************************************************************/
+void SysDmaFreeMem(DMA_ALLOC *psDmaAlloc)
+{
+	if (psDmaAlloc && psDmaAlloc->pvVirtAddr)
+	{
+#if defined(LINUX)
+		dma_free_coherent((struct device *)psDmaAlloc->pvOSDevice,
+						  (size_t) psDmaAlloc->ui64Size,
+						  psDmaAlloc->pvVirtAddr,
+						  (dma_addr_t )psDmaAlloc->sBusAddr.uiAddr);
+#else
+		#error "Provide OS implementation of DMA deallocation";
+#endif
+	}
+}
+
+/*!
+******************************************************************************
+ @Function			SysDmaRegisterForIoRemapping
+
+ @Description 		Registers DMA_ALLOC for manual I/O remapping
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psDmaAlloc)
+{
+	IMG_UINT32 ui32Idx;
+	IMG_BOOL bTabEntryFound = IMG_TRUE;
+	PVRSRV_ERROR eError = PVRSRV_ERROR_TOO_FEW_BUFFERS;
+
+	if (psDmaAlloc == NULL ||
+		psDmaAlloc->ui64Size == 0 ||
+		psDmaAlloc->pvVirtAddr == 0 ||
+		psDmaAlloc->sBusAddr.uiAddr == 0)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+	{
+		/* Check if an I/O remap entry exists for remapping */
+		if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr == NULL)
+		{
+			PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr == 0);
+			PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].ui64Size == 0);
+			break;
+		}
+	}
+
+	if (ui32Idx >= DMA_MAX_IOREMAP_ENTRIES)
+	{
+		bTabEntryFound = IMG_FALSE;
+	}
+
+	if (bTabEntryFound)
+	{
+		IMG_BOOL bSameVAddr, bSamePAddr, bSameSize;
+
+		bSamePAddr = gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr == psDmaAlloc->sBusAddr.uiAddr;
+		bSameVAddr = gsDmaIoRemapArray[ui32Idx].pvVirtAddr == psDmaAlloc->pvVirtAddr;
+		bSameSize = gsDmaIoRemapArray[ui32Idx].ui64Size == psDmaAlloc->ui64Size;
+
+		if (bSameVAddr)
+		{
+			if (bSamePAddr && bSameSize)
+			{
+				eError = PVRSRV_OK;
+			}
+			else
+			{
+				eError = PVRSRV_ERROR_ALREADY_EXISTS;
+			}
+		}
+		else
+		{
+			PVR_ASSERT(bSamePAddr == IMG_FALSE);
+
+			gsDmaIoRemapArray[ui32Idx].ui64Size = psDmaAlloc->ui64Size;
+			gsDmaIoRemapArray[ui32Idx].sBusAddr = psDmaAlloc->sBusAddr;
+			gsDmaIoRemapArray[ui32Idx].pvVirtAddr = psDmaAlloc->pvVirtAddr;
+
+			PVR_DPF((PVR_DBG_MESSAGE,
+					"DMA: register I/O remap: "\
+					"VA: 0x%p, PA: 0x%llx, Size: 0x%llx",
+					psDmaAlloc->pvVirtAddr, 
+					psDmaAlloc->sBusAddr.uiAddr, 
+					psDmaAlloc->ui64Size));
+
+			gbEnableDmaIoRemapping = IMG_TRUE;
+			eError = PVRSRV_OK;
+		}
+	}
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+ @Function			SysDmaDeregisterForIoRemapping
+
+ @Description 		Deregisters DMA_ALLOC from manual I/O remapping
+
+ @Return			void
+ ******************************************************************************/
+void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psDmaAlloc)
+{
+	IMG_UINT32 ui32Idx;
+
+	if (psDmaAlloc == NULL ||
+		psDmaAlloc->ui64Size == 0 ||
+		psDmaAlloc->pvVirtAddr == 0 ||
+		psDmaAlloc->sBusAddr.uiAddr == 0)
+	{
+		return;
+	}
+
+	/* Remove specified entries from list of I/O remap entries */
+	for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+	{
+		if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr == psDmaAlloc->pvVirtAddr)
+		{
+			gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr = 0;
+			gsDmaIoRemapArray[ui32Idx].pvVirtAddr = NULL;
+			gsDmaIoRemapArray[ui32Idx].ui64Size =  0;
+
+			PVR_DPF((PVR_DBG_MESSAGE,
+					"DMA: deregister I/O remap: "\
+					"VA: 0x%p, PA: 0x%llx, Size: 0x%llx",
+					psDmaAlloc->pvVirtAddr, 
+					psDmaAlloc->sBusAddr.uiAddr, 
+					psDmaAlloc->ui64Size));
+
+			break;
+		}
+	}
+
+	/* Check if no other I/O remap entries exists for remapping */
+	for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+	{
+		if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr != NULL)
+		{
+			break;
+		}
+	}
+
+	if (ui32Idx == DMA_MAX_IOREMAP_ENTRIES)
+	{
+		/* No entries found so disable remapping */
+		gbEnableDmaIoRemapping = IMG_FALSE;
+	}
+}
+
+/*!
+******************************************************************************
+ @Function			SysDmaDevPAddrToCpuVAddr
+
+ @Description 		Maps a DMA_ALLOC physical address to CPU virtual address
+
+ @Return			IMG_CPU_VIRTADDR on success. Otherwise, a NULL
+ ******************************************************************************/
+IMG_CPU_VIRTADDR SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size)
+{
+	IMG_CPU_VIRTADDR pvDMAVirtAddr = NULL;
+	DMA_ALLOC *psHeapDmaAlloc;
+	IMG_UINT32 ui32Idx;
+
+	if (gbEnableDmaIoRemapping == IMG_FALSE)
+	{
+		return pvDMAVirtAddr;
+	}
+
+	for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+	{
+		psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx];
+		if (psHeapDmaAlloc->sBusAddr.uiAddr && uiAddr >= psHeapDmaAlloc->sBusAddr.uiAddr)
+		{
+			IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size;
+			IMG_UINT64 uiOffset = uiAddr - psHeapDmaAlloc->sBusAddr.uiAddr;
+
+			if (uiOffset < uiSpan)
+			{
+				PVR_ASSERT((uiOffset+ui64Size-1) < uiSpan);
+				pvDMAVirtAddr = psHeapDmaAlloc->pvVirtAddr + uiOffset;
+
+				PVR_DPF((PVR_DBG_MESSAGE,
+					"DMA: remap: PA: 0x%llx => VA: 0x%p",
+					uiAddr, pvDMAVirtAddr));
+
+				break;
+			}
+		}
+	}
+
+	return pvDMAVirtAddr;
+}
+
+/*!
+******************************************************************************
+ @Function			SysDmaCpuVAddrToDevPAddr
+
+ @Description 		Maps a DMA_ALLOC CPU virtual address to physical address
+
+ @Return			Non-zero value on success. Otherwise, a 0
+ ******************************************************************************/
+IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr)
+{
+	IMG_UINT64 uiAddr = 0;
+	DMA_ALLOC *psHeapDmaAlloc;
+	IMG_UINT32 ui32Idx;
+
+	if (gbEnableDmaIoRemapping == IMG_FALSE)
+	{
+		return uiAddr;
+	}
+
+	for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+	{
+		psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx];
+		if (psHeapDmaAlloc->pvVirtAddr && pvDMAVirtAddr >= psHeapDmaAlloc->pvVirtAddr)
+		{
+			IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size;
+			IMG_UINT64 uiOffset = pvDMAVirtAddr - psHeapDmaAlloc->pvVirtAddr;
+
+			if (uiOffset < uiSpan)
+			{
+				uiAddr = psHeapDmaAlloc->sBusAddr.uiAddr + uiOffset;
+
+				PVR_DPF((PVR_DBG_MESSAGE,
+					"DMA: remap: VA: 0x%p => PA: 0x%llx",
+					pvDMAVirtAddr, uiAddr));
+
+				break;
+			}
+		}
+	}
+
+	return uiAddr;
+}
+
+/******************************************************************************
+ End of file (dma_support.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/env/linux/interrupt_support.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/env/linux/interrupt_support.c
new file mode 100644
index 0000000..39ba4ea
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/env/linux/interrupt_support.c
@@ -0,0 +1,151 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/interrupt.h>
+
+#include "pvr_debug.h"
+#include "allocmem.h"
+#include "interrupt_support.h"
+
+typedef struct LISR_DATA_TAG
+{
+	IMG_UINT32	ui32IRQ;
+	PFN_SYS_LISR	pfnLISR;
+	void		*pvData;
+} LISR_DATA;
+
+static irqreturn_t SystemISRWrapper(int irq, void *dev_id)
+{
+	LISR_DATA *psLISRData = (LISR_DATA *)dev_id;
+
+	PVR_UNREFERENCED_PARAMETER(irq);
+
+	if (psLISRData)
+	{
+		if (psLISRData->pfnLISR(psLISRData->pvData))
+		{
+			return IRQ_HANDLED;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Missing interrupt data", __FUNCTION__));
+	}
+
+	return IRQ_NONE;
+}
+
+PVRSRV_ERROR OSInstallSystemLISR(IMG_HANDLE *phLISR,
+				 IMG_UINT32 ui32IRQ,
+				 const IMG_CHAR *pszDevName,
+				 PFN_SYS_LISR pfnLISR,
+				 void *pvData,
+				 IMG_UINT32 ui32Flags)
+{
+	LISR_DATA *psLISRData;
+	unsigned long ulIRQFlags = 0;
+
+	if (pfnLISR == NULL || pvData == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (ui32Flags & ~SYS_IRQ_FLAG_MASK)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	switch (ui32Flags & SYS_IRQ_FLAG_TRIGGER_MASK)
+	{
+		case SYS_IRQ_FLAG_TRIGGER_DEFAULT:
+			break;
+		case SYS_IRQ_FLAG_TRIGGER_LOW:
+			ulIRQFlags |= IRQF_TRIGGER_LOW;
+			break;
+		case SYS_IRQ_FLAG_TRIGGER_HIGH:
+			ulIRQFlags |= IRQF_TRIGGER_HIGH;
+			break;
+		default:
+			return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (ui32Flags & SYS_IRQ_FLAG_SHARED)
+	{
+		ulIRQFlags |= IRQF_SHARED;
+	}
+
+	psLISRData = OSAllocMem(sizeof *psLISRData);
+	if (psLISRData == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psLISRData->ui32IRQ = ui32IRQ;
+	psLISRData->pfnLISR = pfnLISR;
+	psLISRData->pvData = pvData;
+
+	if (request_irq(ui32IRQ, SystemISRWrapper, ulIRQFlags, pszDevName, psLISRData))
+	{
+		OSFreeMem(psLISRData);
+
+		return PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER;
+	}
+
+	*phLISR = (IMG_HANDLE)psLISRData;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSUninstallSystemLISR(IMG_HANDLE hLISR)
+{
+	LISR_DATA *psLISRData = (LISR_DATA *)hLISR;
+
+	if (psLISRData == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	free_irq(psLISRData->ui32IRQ, psLISRData);
+
+	OSFreeMem(psLISRData);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/env/linux/pci_support.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/env/linux/pci_support.c
new file mode 100644
index 0000000..8d4a86b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/env/linux/pci_support.c
@@ -0,0 +1,716 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#include <linux/pci.h>
+
+#if defined(CONFIG_MTRR)
+#include <asm/mtrr.h>
+#endif
+
+#include "pci_support.h"
+#include "allocmem.h"
+
+typedef	struct _PVR_PCI_DEV_TAG
+{
+	struct pci_dev		*psPCIDev;
+	HOST_PCI_INIT_FLAGS	ePCIFlags;
+	IMG_BOOL		abPCIResourceInUse[DEVICE_COUNT_RESOURCE];
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+	int			iMTRR[DEVICE_COUNT_RESOURCE];
+#endif
+} PVR_PCI_DEV;
+
+/*************************************************************************/ /*!
+@Function       OSPCISetDev
+@Description    Set a PCI device for subsequent use.
+@Input          pvPCICookie             Pointer to OS specific PCI structure
+@Input          eFlags                  Flags
+@Return		PVRSRV_PCI_DEV_HANDLE   Pointer to PCI device handle
+*/ /**************************************************************************/
+PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags)
+{
+	int err;
+	IMG_UINT32 i;
+	PVR_PCI_DEV *psPVRPCI;
+
+	psPVRPCI = OSAllocMem(sizeof(*psPVRPCI));
+	if (psPVRPCI == NULL)
+	{
+		printk(KERN_ERR "OSPCISetDev: Couldn't allocate PVR PCI structure\n");
+		return NULL;
+	}
+
+	psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie;
+	psPVRPCI->ePCIFlags = eFlags;
+
+	err = pci_enable_device(psPVRPCI->psPCIDev);
+	if (err != 0)
+	{
+		printk(KERN_ERR "OSPCISetDev: Couldn't enable device (%d)\n", err);
+		OSFreeMem(psPVRPCI);
+		return NULL;
+	}
+
+	if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)	/* PRQA S 3358 */ /* misuse of enums */
+	{
+		pci_set_master(psPVRPCI->psPCIDev);
+	}
+
+	if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI)		/* PRQA S 3358 */ /* misuse of enums */
+	{
+#if defined(CONFIG_PCI_MSI)
+		err = pci_enable_msi(psPVRPCI->psPCIDev);
+		if (err != 0)
+		{
+			printk(KERN_ERR "OSPCISetDev: Couldn't enable MSI (%d)", err);
+			psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI;	/* PRQA S 1474,3358,4130 */ /* misuse of enums */
+		}
+#else
+		printk(KERN_ERR "OSPCISetDev: MSI support not enabled in the kernel");
+#endif
+}
+
+	/* Initialise the PCI resource and MTRR tracking array */
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+	{
+		psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+		psPVRPCI->iMTRR[i] = -1;
+#endif
+	}
+
+	return (PVRSRV_PCI_DEV_HANDLE)psPVRPCI;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIAcquireDev
+@Description    Acquire a PCI device for subsequent use.
+@Input          ui16VendorID            Vendor PCI ID
+@Input          ui16DeviceID            Device PCI ID
+@Input          eFlags                  Flags
+@Return		PVRSRV_PCI_DEV_HANDLE   Pointer to PCI device handle
+*/ /**************************************************************************/
+PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, 
+				      IMG_UINT16 ui16DeviceID, 
+				      HOST_PCI_INIT_FLAGS eFlags)
+{
+	struct pci_dev *psPCIDev;
+
+	psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL);
+	if (psPCIDev == NULL)
+	{
+		return NULL;
+	}
+
+	return OSPCISetDev((void *)psPCIDev, eFlags);
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIDevID
+@Description    Get the PCI device ID.
+@Input          hPVRPCI                 PCI device handle
+@Output         pui16DeviceID           Pointer to where the device ID should 
+                                        be returned
+@Return		PVRSRV_ERROR            Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIDevID(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT16 *pui16DeviceID)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+
+	if (pui16DeviceID == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*pui16DeviceID = psPVRPCI->psPCIDev->device;
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIIRQ
+@Description    Get the interrupt number for the device.
+@Input          hPVRPCI                 PCI device handle
+@Output         pui16DeviceID           Pointer to where the interrupt number 
+                                        should be returned
+@Return		PVRSRV_ERROR            Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+
+	if (pui32IRQ == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*pui32IRQ = psPVRPCI->psPCIDev->irq;
+
+	return PVRSRV_OK;
+}
+
+/* Functions supported by OSPCIAddrRangeFunc */
+enum HOST_PCI_ADDR_RANGE_FUNC
+{
+	HOST_PCI_ADDR_RANGE_FUNC_LEN,
+	HOST_PCI_ADDR_RANGE_FUNC_START,
+	HOST_PCI_ADDR_RANGE_FUNC_END,
+	HOST_PCI_ADDR_RANGE_FUNC_REQUEST,
+	HOST_PCI_ADDR_RANGE_FUNC_RELEASE
+};
+
+/*************************************************************************/ /*!
+@Function       OSPCIAddrRangeFunc
+@Description    Internal support function for various address range related 
+                functions
+@Input          eFunc                   Function to perform
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return		IMG_UINT32              Function dependent value
+*/ /**************************************************************************/
+static IMG_UINT64 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc,
+										 PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+										 IMG_UINT32 ui32Index)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+
+	if (ui32Index >= DEVICE_COUNT_RESOURCE)
+	{
+		printk(KERN_ERR "OSPCIAddrRangeFunc: Index out of range");
+		return 0;
+	}
+
+	switch (eFunc)
+	{
+		case HOST_PCI_ADDR_RANGE_FUNC_LEN:
+		{
+			return pci_resource_len(psPVRPCI->psPCIDev, ui32Index);
+		}
+		case HOST_PCI_ADDR_RANGE_FUNC_START:
+		{
+			return pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+		}
+		case HOST_PCI_ADDR_RANGE_FUNC_END:
+		{
+			return pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
+		}
+		case HOST_PCI_ADDR_RANGE_FUNC_REQUEST:
+		{
+			int err = pci_request_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index, PVRSRV_MODNAME);
+			if (err != 0)
+			{
+				printk(KERN_ERR "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err);
+				return 0;
+			}
+			psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE;
+			return 1;
+		}
+		case HOST_PCI_ADDR_RANGE_FUNC_RELEASE:
+		{
+			if (psPVRPCI->abPCIResourceInUse[ui32Index])
+			{
+				pci_release_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index);
+				psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE;
+			}
+			return 1;
+		}
+		default:
+		{
+			printk(KERN_ERR "OSPCIAddrRangeFunc: Unknown function");
+			break;
+		}
+	}
+
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIAddrRangeLen
+@Description    Returns length of a given address range
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return		IMG_UINT32              Length of address range or 0 if no 
+                                        such range
+*/ /**************************************************************************/
+IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+	return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI, ui32Index);
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIAddrRangeStart
+@Description    Returns the start of a given address range
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return		IMG_UINT32              Start of address range or 0 if no 
+                                        such range
+*/ /**************************************************************************/
+IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+	return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI, ui32Index); 
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIAddrRangeEnd
+@Description    Returns the end of a given address range
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return		IMG_UINT32              End of address range or 0 if no such
+                                        range
+*/ /**************************************************************************/
+IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+	return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI, ui32Index); 
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIRequestAddrRange
+@Description    Request a given address range index for subsequent use
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+								   IMG_UINT32 ui32Index)
+{
+	if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI, ui32Index) == 0)
+	{
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+	else
+	{
+		return PVRSRV_OK;
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIReleaseAddrRange
+@Description    Release a given address range that is no longer being used
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+	if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI, ui32Index) == 0)
+	{
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+	else
+	{
+		return PVRSRV_OK;
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIRequestAddrRegion
+@Description    Request a given region from an address range for subsequent use
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Input          uiOffset              Offset into the address range that forms 
+                                        the start of the region
+@Input          uiLength              Length of the region
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+									IMG_UINT32 ui32Index,
+									IMG_UINT64 uiOffset,
+									IMG_UINT64 uiLength)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+	resource_size_t start;
+	resource_size_t end;
+
+	start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+	end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
+
+	/* Check that the requested region is valid */
+	if ((start + uiOffset + uiLength - 1) > end)
+	{
+		return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH;
+	}
+
+	if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO)
+	{
+		if (request_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL)
+		{
+			return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE;
+		}
+	}
+	else
+	{
+		if (request_mem_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL)
+		{
+			return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE;
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIReleaseAddrRegion
+@Description    Release a given region, from an address range, that is no 
+                longer in use
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Input          ui32Offset              Offset into the address range that forms 
+                                        the start of the region
+@Input          ui32Length              Length of the region
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+									IMG_UINT32 ui32Index,
+									IMG_UINT64 uiOffset,
+									IMG_UINT64 uiLength)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+	resource_size_t start;
+	resource_size_t end;
+
+	start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+	end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
+
+	/* Check that the region is valid */
+	if ((start + uiOffset + uiLength - 1) > end)
+	{
+		return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH;
+	}
+
+	if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO)
+	{
+		release_region(start + uiOffset, uiLength);
+	}
+	else
+	{
+		release_mem_region(start + uiOffset, uiLength);
+	}
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIReleaseDev
+@Description    Release a PCI device that is no longer being used
+@Input          hPVRPCI                 PCI device handle
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+	int i;
+
+	/* Release all PCI regions that are currently in use */
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+	{
+		if (psPVRPCI->abPCIResourceInUse[i])
+		{
+			pci_release_region(psPVRPCI->psPCIDev, i);
+			psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
+		}
+	}
+
+#if defined(CONFIG_PCI_MSI)
+	if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI)		/* PRQA S 3358 */ /* misuse of enums */
+	{
+		pci_disable_msi(psPVRPCI->psPCIDev);
+	}
+#endif
+
+	if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)	/* PRQA S 3358 */ /* misuse of enums */
+	{
+		pci_clear_master(psPVRPCI->psPCIDev);
+	}
+
+	pci_disable_device(psPVRPCI->psPCIDev);
+
+	OSFreeMem(psPVRPCI);
+	/*not nulling pointer, copy on stack*/
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCISuspendDev
+@Description    Prepare PCI device to be turned off by power management
+@Input          hPVRPCI                 PCI device handle
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+	int i;
+	int err;
+
+	/* Release all PCI regions that are currently in use */
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+	{
+		if (psPVRPCI->abPCIResourceInUse[i])
+		{
+			pci_release_region(psPVRPCI->psPCIDev, i);
+		}
+	}
+
+	err = pci_save_state(psPVRPCI->psPCIDev);
+	if (err != 0)
+	{
+		printk(KERN_ERR "OSPCISuspendDev: pci_save_state_failed (%d)", err);
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+
+	pci_disable_device(psPVRPCI->psPCIDev);
+
+	err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_SUSPEND));
+	switch(err)
+	{
+		case 0:
+			break;
+		case -EIO:
+			printk(KERN_ERR "OSPCISuspendDev: device doesn't support PCI PM");
+			break;
+		case -EINVAL:
+			printk(KERN_ERR "OSPCISuspendDev: can't enter requested power state");
+			break;
+		default:
+			printk(KERN_ERR "OSPCISuspendDev: pci_set_power_state failed (%d)", err);
+			break;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIResumeDev
+@Description    Prepare a PCI device to be resumed by power management
+@Input          hPVRPCI                 PCI device handle
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+	int err;
+	int i;
+
+	err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_ON));
+	switch(err)
+	{
+		case 0:
+			break;
+		case -EIO:
+			printk(KERN_ERR "OSPCIResumeDev: device doesn't support PCI PM");
+			break;
+		case -EINVAL:
+			printk(KERN_ERR "OSPCIResumeDev: can't enter requested power state");
+			return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
+		default:
+			printk(KERN_ERR "OSPCIResumeDev: pci_set_power_state failed (%d)", err);
+			return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
+	}
+
+	pci_restore_state(psPVRPCI->psPCIDev);
+
+	err = pci_enable_device(psPVRPCI->psPCIDev);
+	if (err != 0)
+	{
+		printk(KERN_ERR "OSPCIResumeDev: Couldn't enable device (%d)", err);
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+
+	if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)	/* PRQA S 3358 */ /* misuse of enums */
+		pci_set_master(psPVRPCI->psPCIDev);
+
+	/* Restore the PCI resource tracking array */
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+	{
+		if (psPVRPCI->abPCIResourceInUse[i])
+		{
+			err = pci_request_region(psPVRPCI->psPCIDev, i, PVRSRV_MODNAME);
+			if (err != 0)
+			{
+				printk(KERN_ERR "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err);
+			}
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+#if defined(CONFIG_MTRR)
+
+/*************************************************************************/ /*!
+@Function       OSPCIClearResourceMTRRs
+@Description    Clear any BIOS-configured MTRRs for a PCI memory region
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+	resource_size_t start, end;
+	int res;
+
+	start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+	end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+	res = arch_io_reserve_memtype_wc(start, end - start);
+	if (res)
+	{
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+#endif
+	res = arch_phys_wc_add(start, end - start);
+	if (res < 0)
+	{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+		arch_io_free_memtype_wc(start, end - start);
+#endif
+
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+	psPVRPCI->iMTRR[ui32Index] = res;
+#else
+
+	res = mtrr_add(start, end - start, MTRR_TYPE_UNCACHABLE, 0);
+	if (res < 0)
+	{
+		printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res);
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+
+	res = mtrr_del(res, start, end - start);
+	if (res < 0)
+	{
+		printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", res);
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+
+	/* Workaround for overlapping MTRRs. */
+	{
+		IMG_BOOL bGotMTRR0 = IMG_FALSE;
+
+		/* Current mobo BIOSes will normally set up a WRBACK MTRR spanning
+		 * 0->4GB, and then another 4GB->6GB. If the PCI card's automatic &
+		 * overlapping UNCACHABLE MTRR is deleted, we see WRBACK behaviour.
+		 *
+		 * WRBACK is incompatible with some PCI devices, so try to split
+		 * the UNCACHABLE regions up and insert a WRCOMB region instead.
+		 */
+		res = mtrr_add(start, end - start, MTRR_TYPE_WRBACK, 0);
+		if (res < 0)
+		{
+			/* If this fails, services has probably run before and created
+			 * a write-combined MTRR for the test chip. Assume it has, and
+			 * don't return an error here.
+			 */
+			return PVRSRV_OK;
+		}
+
+		if (res == 0)
+			bGotMTRR0 = IMG_TRUE;
+
+		res = mtrr_del(res, start, end - start);
+		if (res < 0)
+		{
+			printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", res);
+			return PVRSRV_ERROR_PCI_CALL_FAILED;
+		}
+
+		if (bGotMTRR0)
+		{
+			/* Replace 0 with a non-overlapping WRBACK MTRR */
+			res = mtrr_add(0, start, MTRR_TYPE_WRBACK, 0);
+			if (res < 0)
+			{
+				printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res);
+				return PVRSRV_ERROR_PCI_CALL_FAILED;
+			}
+
+			/* Add a WRCOMB MTRR for the PCI device memory bar */
+			res = mtrr_add(start, end - start, MTRR_TYPE_WRCOMB, 0);
+			if (res < 0)
+			{
+				printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res);
+				return PVRSRV_ERROR_PCI_CALL_FAILED;
+			}
+		}
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIReleaseResourceMTRRs
+@Description    Release resources allocated by OSPCIClearResourceMTRRs 
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+*/ /**************************************************************************/
+void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+
+	if (psPVRPCI->iMTRR[ui32Index] >= 0)
+	{
+		arch_phys_wc_del(psPVRPCI->iMTRR[ui32Index]);
+		psPVRPCI->iMTRR[ui32Index] = -1;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+		{
+			resource_size_t start, end;
+
+			start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+			end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1;
+
+			arch_io_free_memtype_wc(start, end - start);
+		}
+#endif
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(hPVRPCI);
+	PVR_UNREFERENCED_PARAMETER(ui32Index);
+#endif
+}
+#endif /* defined(CONFIG_MTRR) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vmm_pvz_client.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vmm_pvz_client.c
new file mode 100644
index 0000000..e4db7d1
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vmm_pvz_client.c
@@ -0,0 +1,360 @@
+/*************************************************************************/ /*!
+@File			vmm_pvz_client.c
+@Title          VM manager client para-virtualization
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header provides VMM client para-virtualization APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "vmm_impl.h"
+#include "vz_vmm_pvz.h"
+#include "vz_physheap.h"
+#include "vmm_pvz_client.h"
+
+
+static inline void
+PvzClientLockAcquire(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	OSLockAcquire(psPVRSRVData->hPvzConnectionLock);
+}
+
+static inline void
+PvzClientLockRelease(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	OSLockRelease(psPVRSRVData->hPvzConnectionLock);
+}
+
+/*
+ * ===========================================================
+ *  The following client para-virtualization (pvz) functions
+ *  are exclusively called by guests to initiate a pvz call
+ *  to the host via hypervisor (guest -> vm manager -> host)
+ * ===========================================================
+ */
+
+PVRSRV_ERROR 
+PvzClientCreateDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+						 IMG_UINT32 ui32DevID)
+{
+	IMG_UINT32 ui32IRQ;
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32RegsSize;
+	IMG_UINT64 ui64RegsCpuPBase;
+	VMM_PVZ_CONNECTION *psVmmPvz;
+	IMG_UINT32 uiFuncID = PVZ_BRIDGE_CREATEDEVICECONFIG;
+
+	psVmmPvz = SysVzPvzConnectionAcquire();
+	PVR_ASSERT(psVmmPvz);
+
+	PvzClientLockAcquire();
+
+	PVR_ASSERT(psVmmPvz->sHostFuncTab.pfnCreateDevConfig);
+
+	eError = psVmmPvz->sHostFuncTab.pfnCreateDevConfig(uiFuncID,
+													   ui32DevID,
+													   &ui32IRQ,
+													   &ui32RegsSize,
+													   &ui64RegsCpuPBase);
+	if (eError != PVRSRV_OK)
+	{
+		if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+		{
+			eError = PVRSRV_OK;
+		}
+
+		goto e0;
+	}
+
+	/* Device VM system configuration MMIO/IRQ values */
+	if (ui64RegsCpuPBase)
+	{
+		psDevConfig->sRegsCpuPBase.uiAddr = ui64RegsCpuPBase;
+	}
+
+	if (ui32RegsSize)
+	{
+		psDevConfig->ui32RegsSize = ui32RegsSize;
+	}
+
+	if (ui32IRQ)
+	{
+		psDevConfig->ui32IRQ = ui32IRQ;
+	}
+
+e0:
+	PvzClientLockRelease();
+	SysVzPvzConnectionRelease(psVmmPvz);
+
+	PVR_ASSERT(psDevConfig->sRegsCpuPBase.uiAddr);
+	PVR_ASSERT(psDevConfig->ui32RegsSize);
+	PVR_ASSERT(psDevConfig->ui32IRQ);
+	PVR_ASSERT(eError == PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzClientDestroyDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+						  IMG_UINT32 ui32DevID)
+{
+	PVRSRV_ERROR eError;
+	VMM_PVZ_CONNECTION *psVmmPvz;
+	IMG_UINT32 uiFuncID = PVZ_BRIDGE_DESTROYDEVICECONFIG;
+
+	PVR_UNREFERENCED_PARAMETER(psDevConfig);
+
+	psVmmPvz = SysVzPvzConnectionAcquire();
+	PVR_ASSERT(psVmmPvz);
+
+	PvzClientLockAcquire();
+
+	PVR_ASSERT(psVmmPvz->sHostFuncTab.pfnDestroyDevConfig);
+
+	eError = psVmmPvz->sHostFuncTab.pfnDestroyDevConfig(uiFuncID,
+														ui32DevID);
+	if (eError != PVRSRV_OK)
+	{
+		if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+		{
+			eError = PVRSRV_OK;
+		}
+
+		goto e0;
+	}
+
+e0:
+	PvzClientLockRelease();
+	SysVzPvzConnectionRelease(psVmmPvz);
+
+	PVR_ASSERT(eError == PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR 
+PvzClientCreateDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig,
+							IMG_UINT32 ui32DevID)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32HeapType;
+	PHYS_HEAP_TYPE eHeapType;
+	IMG_UINT64 ui64FwPhysHeapSize;
+	IMG_UINT64 ui64FwPhysHeapAddr;
+	IMG_UINT64 ui64GpuPhysHeapSize;
+	IMG_UINT64 ui64GpuPhysHeapAddr;
+	VMM_PVZ_CONNECTION *psVmmPvz;
+	PVRSRV_DEVICE_PHYS_HEAP ePhysHeap;
+	IMG_UINT32 uiFuncID = PVZ_BRIDGE_CREATEDEVICEPHYSHEAPS;
+
+	psVmmPvz = SysVzPvzConnectionAcquire();
+	PVR_ASSERT(psVmmPvz);
+
+	PvzClientLockAcquire();
+
+	PVR_ASSERT(psVmmPvz->sHostFuncTab.pfnCreateDevPhysHeaps);
+
+	eError = psVmmPvz->sHostFuncTab.pfnCreateDevPhysHeaps(uiFuncID,
+														  ui32DevID,
+														  &ui32HeapType,
+														  &ui64FwPhysHeapSize,
+														  &ui64FwPhysHeapAddr,
+														  &ui64GpuPhysHeapSize,
+														  &ui64GpuPhysHeapAddr);
+	if (eError != PVRSRV_OK)
+	{
+		if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+		{
+			eError = PVRSRV_OK;
+		}
+
+		goto e0;
+	}
+
+	eHeapType = (PHYS_HEAP_TYPE) ui32HeapType;
+	for (ePhysHeap = 0; ePhysHeap < PVRSRV_DEVICE_PHYS_HEAP_LAST; ePhysHeap++)
+	{
+		IMG_UINT64 ui64PhysHeapSize;
+		IMG_DEV_PHYADDR sPhysHeapAddr;
+
+		switch (ePhysHeap)
+		{
+			case PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL:
+				sPhysHeapAddr.uiAddr = ui64GpuPhysHeapAddr;
+				ui64PhysHeapSize = ui64GpuPhysHeapSize;
+				break;
+
+			case PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL:
+				sPhysHeapAddr.uiAddr = ui64FwPhysHeapAddr;
+				ui64PhysHeapSize = ui64FwPhysHeapSize;
+				break;
+
+			default:
+				ui64PhysHeapSize = (IMG_UINT64)0;
+				break;
+		}
+
+		if (ui64PhysHeapSize)
+		{
+			eError = SysVzSetPhysHeapAddrSize(psDevConfig,
+											  ePhysHeap,
+											  eHeapType,
+											  sPhysHeapAddr,
+											  ui64PhysHeapSize);
+			PVR_ASSERT(eError == PVRSRV_OK);
+
+			eError = SysVzRegisterPhysHeap(psDevConfig, ePhysHeap);
+			PVR_ASSERT(eError == PVRSRV_OK);
+		}
+	}
+
+e0:
+	PvzClientLockRelease();
+	SysVzPvzConnectionRelease(psVmmPvz);
+
+	PVR_ASSERT(eError == PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzClientDestroyDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig,
+							 IMG_UINT32 ui32DevID)
+{
+	PVRSRV_ERROR eError;
+	VMM_PVZ_CONNECTION *psVmmPvz;
+	IMG_UINT32 uiFuncID = PVZ_BRIDGE_DESTROYDEVICEPHYSHEAPS;
+
+	PVR_UNREFERENCED_PARAMETER(psDevConfig);
+
+	psVmmPvz = SysVzPvzConnectionAcquire();
+	PVR_ASSERT(psVmmPvz);
+
+	PvzClientLockAcquire();
+
+	PVR_ASSERT(psVmmPvz->sHostFuncTab.pfnDestroyDevPhysHeaps);
+
+	eError = psVmmPvz->sHostFuncTab.pfnDestroyDevPhysHeaps(ui32DevID,
+														   uiFuncID);
+	if (eError != PVRSRV_OK)
+	{
+		if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+		{
+			eError = PVRSRV_OK;
+		}
+
+		goto e0;
+	}
+
+e0:
+	PvzClientLockRelease();
+	SysVzPvzConnectionRelease(psVmmPvz);
+
+	PVR_ASSERT(eError == PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzClientMapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+						IMG_UINT32 ui32DevID,
+						IMG_DEV_PHYADDR sDevPAddr,
+						IMG_UINT64 ui64DevPSize)
+{
+	PVRSRV_ERROR eError;
+	VMM_PVZ_CONNECTION *psVmmPvz;
+	IMG_UINT32 uiFuncID = PVZ_BRIDGE_MAPDEVICEPHYSHEAP;
+
+	psVmmPvz = SysVzPvzConnectionAcquire();
+	PVR_ASSERT(psVmmPvz);
+
+	PvzClientLockAcquire();
+
+	PVR_ASSERT(psVmmPvz->sHostFuncTab.pfnMapDevPhysHeap);
+
+	eError = psVmmPvz->sHostFuncTab.pfnMapDevPhysHeap(uiFuncID,
+													  ui32DevID,
+													  ui64DevPSize,
+													  sDevPAddr.uiAddr);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+e0:
+	PvzClientLockRelease();
+	SysVzPvzConnectionRelease(psVmmPvz);
+
+	PVR_ASSERT(eError == PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzClientUnmapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+						  IMG_UINT32 ui32DevID)
+{
+	PVRSRV_ERROR eError;
+	VMM_PVZ_CONNECTION *psVmmPvz;
+	IMG_UINT32 uiFuncID = PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP;
+
+	psVmmPvz = SysVzPvzConnectionAcquire();
+	PVR_ASSERT(psVmmPvz);
+
+	PvzClientLockAcquire();
+
+	PVR_ASSERT(psVmmPvz->sHostFuncTab.pfnUnmapDevPhysHeap);
+
+	eError = psVmmPvz->sHostFuncTab.pfnUnmapDevPhysHeap(uiFuncID,
+														ui32DevID);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+e0:
+	PvzClientLockRelease();
+	SysVzPvzConnectionRelease(psVmmPvz);
+
+	PVR_ASSERT(eError == PVRSRV_OK);
+	return eError;
+}
+
+/******************************************************************************
+ End of file (vmm_pvz_client.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vmm_pvz_server.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vmm_pvz_server.c
new file mode 100644
index 0000000..0217f1b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vmm_pvz_server.c
@@ -0,0 +1,272 @@
+/*************************************************************************/ /*!
+@File			vmm_pvz_server.c
+@Title          VM manager server para-virtualization handlers
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header provides VMM server para-virtz handler APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "vz_support.h"
+#include "vz_vm.h"
+#include "vmm_pvz_server.h"
+#include "vz_physheap.h"
+
+
+static inline void
+PvzServerLockAcquire(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	OSLockAcquire(psPVRSRVData->hPvzConnectionLock);
+}
+
+static inline void
+PvzServerLockRelease(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	OSLockRelease(psPVRSRVData->hPvzConnectionLock);
+}
+
+
+/*
+ * ===========================================================
+ *  The following server para-virtualization (pvz) functions
+ *  are exclusively called by the VM manager (hypervisor) on
+ *  behalf of guests to complete guest pvz calls 
+ *  (guest -> vm manager -> host)
+ * ===========================================================
+ */
+
+PVRSRV_ERROR
+PvzServerCreateDevConfig(IMG_UINT32 ui32OSID,
+						 IMG_UINT32 ui32FuncID,
+						 IMG_UINT32 ui32DevID,
+						 IMG_UINT32 *pui32IRQ,
+						 IMG_UINT32 *pui32RegsSize,
+						 IMG_UINT64 *pui64RegsCpuPBase)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(ui32FuncID == PVZ_BRIDGE_CREATEDEVICECONFIG);
+
+	PvzServerLockAcquire();
+
+	eError = SysVzPvzCreateDevConfig(ui32OSID,
+									 ui32DevID,
+									 pui32IRQ,
+									 pui32RegsSize,
+									 pui64RegsCpuPBase);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	PvzServerLockRelease();
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzServerDestroyDevConfig(IMG_UINT32 ui32OSID,
+						  IMG_UINT32 ui32FuncID,
+						  IMG_UINT32 ui32DevID)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(ui32FuncID == PVZ_BRIDGE_DESTROYDEVICECONFIG);
+
+	PvzServerLockAcquire();
+
+	eError = SysVzPvzDestroyDevConfig(ui32OSID, ui32DevID);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	PvzServerLockRelease();
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzServerCreateDevPhysHeaps(IMG_UINT32 ui32OSID,
+							IMG_UINT32 ui32FuncID,
+							IMG_UINT32  ui32DevID,
+							IMG_UINT32 *peHeapType,
+							IMG_UINT64 *pui64FwSize,
+							IMG_UINT64 *pui64FwAddr,
+							IMG_UINT64 *pui64GpuSize,
+							IMG_UINT64 *pui64GpuAddr)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(ui32FuncID == PVZ_BRIDGE_CREATEDEVICEPHYSHEAPS);
+
+	PvzServerLockAcquire();
+
+	eError = SysVzPvzCreateDevPhysHeaps(ui32OSID,
+											ui32DevID,
+											peHeapType,
+											pui64FwSize,
+											pui64FwAddr,
+											pui64GpuSize,
+											pui64GpuAddr);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	PvzServerLockRelease();
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzServerDestroyDevPhysHeaps(IMG_UINT32 ui32OSID,
+							 IMG_UINT32 ui32FuncID,
+							 IMG_UINT32 ui32DevID)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(ui32FuncID == PVZ_BRIDGE_DESTROYDEVICEPHYSHEAPS);
+
+	PvzServerLockAcquire();
+
+	eError = SysVzPvzDestroyDevPhysHeaps(ui32OSID, ui32DevID);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	PvzServerLockRelease();
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID,
+						IMG_UINT32 ui32FuncID,
+						IMG_UINT32 ui32DevID,
+						IMG_UINT64 ui64Size,
+						IMG_UINT64 ui64PAddr)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(ui32FuncID == PVZ_BRIDGE_MAPDEVICEPHYSHEAP);
+
+	PvzServerLockAcquire();
+
+	eError = SysVzPvzRegisterFwPhysHeap(ui32OSID,
+											ui32DevID,
+											ui64Size,
+											ui64PAddr);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	PvzServerLockRelease();
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID,
+						  IMG_UINT32 ui32FuncID,
+						  IMG_UINT32 ui32DevID)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(ui32FuncID == PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP);
+
+	PvzServerLockAcquire();
+
+	eError = SysVzPvzUnregisterFwPhysHeap(ui32OSID, ui32DevID);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	PvzServerLockRelease();
+
+	return eError;
+}
+
+
+/*
+ * ============================================================
+ *  The following server para-virtualization (pvz) functions
+ *  are exclusively called by the VM manager (hypervisor) to
+ *  pass side band information to the host (vm manager -> host)
+ * ============================================================
+ */
+
+PVRSRV_ERROR
+PvzServerOnVmOnline(IMG_UINT32 ui32OSID, IMG_UINT32 ui32Priority)
+{
+	PVRSRV_ERROR eError;
+
+	PvzServerLockAcquire();
+
+	eError = SysVzPvzOnVmOnline(ui32OSID, ui32Priority);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	PvzServerLockRelease();
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzServerOnVmOffline(IMG_UINT32 ui32OSID)
+{
+	PVRSRV_ERROR eError;
+
+	PvzServerLockAcquire();
+
+	eError = SysVzPvzOnVmOffline(ui32OSID);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	PvzServerLockRelease();
+
+	return eError;
+}
+
+PVRSRV_ERROR
+PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue)
+{
+	PVRSRV_ERROR eError;
+
+	PvzServerLockAcquire();
+
+	eError = SysVzPvzVMMConfigure(eVMMParamType, ui32ParamValue);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	PvzServerLockRelease();
+
+	return eError;
+
+}
+
+/******************************************************************************
+ End of file (vmm_pvz_server.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vmm_type_stub.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vmm_type_stub.c
new file mode 100644
index 0000000..8598a36
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vmm_type_stub.c
@@ -0,0 +1,226 @@
+/*************************************************************************/ /*!
+@File			vmm_type_stub.c
+@Title          Stub VM manager type
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Sample stub (no-operation) VM manager implementation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "rgxheapconfig.h"
+
+#include "vmm_impl.h"
+#include "vmm_pvz_server.h"
+
+static PVRSRV_ERROR
+StubVMMCreateDevConfig(IMG_UINT32 ui32FuncID,
+					   IMG_UINT32 ui32DevID,
+					   IMG_UINT32 *pui32IRQ,
+					   IMG_UINT32 *pui32RegsSize,
+					   IMG_UINT64 *pui64RegsCpuPBase)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+	PVR_UNREFERENCED_PARAMETER(ui32DevID);
+	PVR_UNREFERENCED_PARAMETER(pui32IRQ);
+	PVR_UNREFERENCED_PARAMETER(pui32RegsSize);
+	PVR_UNREFERENCED_PARAMETER(pui64RegsCpuPBase);
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMDestroyDevConfig(IMG_UINT32 ui32FuncID,
+						IMG_UINT32 ui32DevID)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+	PVR_UNREFERENCED_PARAMETER(ui32DevID);
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMCreateDevPhysHeaps(IMG_UINT32 ui32FuncID,
+						  IMG_UINT32 ui32DevID,
+						  IMG_UINT32 *peType,
+						  IMG_UINT64 *pui64FwPhysHeapSize,
+						  IMG_UINT64 *pui64FwPhysHeapAddr,
+						  IMG_UINT64 *pui64GpuPhysHeapSize,
+						  IMG_UINT64 *pui64GpuPhysHeapAddr)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+	PVR_UNREFERENCED_PARAMETER(ui32DevID);
+	PVR_UNREFERENCED_PARAMETER(peType);
+	PVR_UNREFERENCED_PARAMETER(pui64FwPhysHeapSize);
+	PVR_UNREFERENCED_PARAMETER(pui64FwPhysHeapAddr);
+	PVR_UNREFERENCED_PARAMETER(pui64GpuPhysHeapSize);
+	PVR_UNREFERENCED_PARAMETER(pui64GpuPhysHeapAddr);
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMDestroyDevPhysHeaps(IMG_UINT32 ui32FuncID,
+						   IMG_UINT32 ui32DevID)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+	PVR_UNREFERENCED_PARAMETER(ui32DevID);
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMMapDevPhysHeap(IMG_UINT32 ui32FuncID,
+					  IMG_UINT32 ui32DevID,
+					  IMG_UINT64 ui64Size,
+					  IMG_UINT64 ui64Addr)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+	PVR_UNREFERENCED_PARAMETER(ui32DevID);
+	PVR_UNREFERENCED_PARAMETER(ui64Size);
+	PVR_UNREFERENCED_PARAMETER(ui64Addr);
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMUnmapDevPhysHeap(IMG_UINT32 ui32FuncID,
+						IMG_UINT32 ui32DevID)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+	PVR_UNREFERENCED_PARAMETER(ui32DevID);
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMGetDevPhysHeapOrigin(PVRSRV_DEVICE_CONFIG *psDevConfig,
+							PVRSRV_DEVICE_PHYS_HEAP eHeapType,
+							PVRSRV_DEVICE_PHYS_HEAP_ORIGIN *peOrigin)
+{
+	*peOrigin = PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST;
+	PVR_UNREFERENCED_PARAMETER(psDevConfig);
+	PVR_UNREFERENCED_PARAMETER(eHeapType);
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+StubVMMGetDevPhysHeapAddrSize(PVRSRV_DEVICE_CONFIG *psDevConfig,
+							  PVRSRV_DEVICE_PHYS_HEAP eHeapType,
+							  IMG_UINT64 *pui64Size,
+							  IMG_UINT64 *pui64Addr)
+{
+	*pui64Size = 0;
+	*pui64Addr = 0;
+	PVR_UNREFERENCED_PARAMETER(psDevConfig);
+	PVR_UNREFERENCED_PARAMETER(eHeapType);
+	return PVRSRV_OK;
+}
+
+static VMM_PVZ_CONNECTION gsStubVmmPvz =
+{
+	.sHostFuncTab = {
+		/* pfnCreateDevConfig */
+		&StubVMMCreateDevConfig,
+
+		/* pfnDestroyDevConfig */
+		&StubVMMDestroyDevConfig,
+
+		/* pfnCreateDevPhysHeaps */
+		&StubVMMCreateDevPhysHeaps,
+
+		/* pfnDestroyDevPhysHeaps */
+		&StubVMMDestroyDevPhysHeaps,
+
+		/* pfnMapDevPhysHeap */
+		&StubVMMMapDevPhysHeap,
+
+		/* pfnUnmapDevPhysHeap */
+		&StubVMMUnmapDevPhysHeap
+	},
+
+	.sGuestFuncTab = {
+		/* pfnCreateDevConfig */
+		&PvzServerCreateDevConfig,
+
+		/* pfnDestroyDevConfig */
+		&PvzServerDestroyDevConfig,
+
+		/* pfnCreateDevPhysHeaps */
+		&PvzServerCreateDevPhysHeaps,
+
+		/* pfnDestroyDevPhysHeaps */
+		&PvzServerDestroyDevPhysHeaps,
+
+		/* pfnMapDevPhysHeap */
+		&PvzServerMapDevPhysHeap,
+
+		/* pfnUnmapDevPhysHeap */
+		&PvzServerUnmapDevPhysHeap
+	},
+
+	.sConfigFuncTab = {
+		/* pfnGetDevPhysHeapOrigin */
+		&StubVMMGetDevPhysHeapOrigin,
+
+		/* pfnGetDevPhysHeapAddrSize */
+		&StubVMMGetDevPhysHeapAddrSize
+	},
+
+	.sVmmFuncTab = {
+		/* pfnOnVmOnline */
+		&PvzServerOnVmOnline,
+
+		/* pfnOnVmOffline */
+		&PvzServerOnVmOffline,
+
+		/* pfnVMMConfigure */
+		&PvzServerVMMConfigure
+	}
+};
+
+PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection)
+{
+	PVR_LOGR_IF_FALSE((NULL != psPvzConnection), "VMMCreatePvzConnection", PVRSRV_ERROR_INVALID_PARAMS);
+	*psPvzConnection = &gsStubVmmPvz;
+	PVR_DPF((PVR_DBG_ERROR, "Using a stub VM manager type, no runtime VZ support"));
+	return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection)
+{
+	PVR_LOG_IF_FALSE((NULL != psPvzConnection), "VMMDestroyPvzConnection");
+}
+
+/******************************************************************************
+ End of file (vmm_type_stub.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vz_physheap_common.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vz_physheap_common.c
new file mode 100644
index 0000000..8a7c521
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vz_physheap_common.c
@@ -0,0 +1,527 @@
+/*************************************************************************/ /*!
+@File           vz_physheap_common.c
+@Title          System virtualization common physheap configuration API(s)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    System virtualization common physical heap configuration API(s)
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "allocmem.h"
+#include "physheap.h"
+#include "rgxdevice.h"
+#include "pvrsrv_device.h"
+#include "rgxfwutils.h"
+
+#include "dma_support.h"
+#include "vz_support.h"
+#include "vz_vmm_pvz.h"
+#include "vz_physheap.h"
+#include "vmm_pvz_client.h"
+#include "vmm_impl.h"
+
+PVRSRV_ERROR SysVzCreateDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_ERROR eError;
+
+	eError = PvzClientCreateDevPhysHeaps(psDevConfig, 0);
+	PVR_LOG_IF_ERROR(eError, "PvzClientCreateDevPhysHeaps");
+
+	return eError;
+}
+
+void SysVzDestroyDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PvzClientDestroyDevPhysHeaps(psDevConfig, 0);
+}
+
+PVRSRV_ERROR SysVzRegisterFwPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+	PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+
+	eError = SysVzGetPhysHeapOrigin(psDevConfig, eHeap, &eHeapOrigin);
+	PVR_LOGG_IF_ERROR(eError, "SysVzGetPhysHeapOrigin", e0);
+
+	if (eHeapOrigin != PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+	{
+		PHYS_HEAP_CONFIG *psPhysHeapConfig;
+		IMG_DEV_PHYADDR sDevPAddr;
+		IMG_UINT64 ui64DevPSize;
+
+		psPhysHeapConfig = SysVzGetPhysHeapConfig(psDevConfig, eHeap);
+		PVR_LOGR_IF_FALSE((NULL != psPhysHeapConfig), "SysVzGetPhysHeapConfig", PVRSRV_ERROR_INVALID_PARAMS);
+
+		sDevPAddr.uiAddr = psPhysHeapConfig->pasRegions[0].sStartAddr.uiAddr;
+		PVR_LOGR_IF_FALSE((0 != sDevPAddr.uiAddr), "SysVzGetPhysHeapConfig", PVRSRV_ERROR_INVALID_PARAMS);
+		ui64DevPSize = psPhysHeapConfig->pasRegions[0].uiSize;
+		PVR_LOGR_IF_FALSE((0 != ui64DevPSize), "SysVzGetPhysHeapConfig", PVRSRV_ERROR_INVALID_PARAMS);
+
+		eError = PvzClientMapDevPhysHeap(psDevConfig, 0, sDevPAddr, ui64DevPSize);
+		PVR_LOGG_IF_ERROR(eError, "PvzClientMapDevPhysHeap", e0);
+	}
+
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR SysVzUnregisterFwPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+	PVRSRV_DEVICE_PHYS_HEAP eHeapType = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+
+	eError = SysVzGetPhysHeapOrigin(psDevConfig, eHeapType, &eHeapOrigin);
+	PVR_LOGG_IF_ERROR(eError, "PvzClientMapDevPhysHeap", e0);
+
+	if (eHeapOrigin != PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+	{
+		eError = PvzClientUnmapDevPhysHeap(psDevConfig, 0);
+		PVR_LOGG_IF_ERROR(eError, "PvzClientMapDevPhysHeap", e0);
+	}
+
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR SysVzRegisterPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+								   PVRSRV_DEVICE_PHYS_HEAP eHeap)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PHYS_HEAP_CONFIG *psPhysHeapConfig;
+	PVR_LOGR_IF_FALSE((eHeap < PVRSRV_DEVICE_PHYS_HEAP_LAST), "Invalid Heap", PVRSRV_ERROR_INVALID_PARAMS);
+	PVR_LOGR_IF_FALSE((eHeap != PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL), "Skipping CPU local heap registration", PVRSRV_OK);
+
+	/* Currently we only support GPU/FW DMA physheap registration */
+	psPhysHeapConfig = SysVzGetPhysHeapConfig(psDevConfig, eHeap);
+	PVR_LOGR_IF_FALSE((NULL != psPhysHeapConfig), "SysVzGetPhysHeapConfig", PVRSRV_ERROR_INVALID_PARAMS);
+
+	if (psPhysHeapConfig &&
+		psPhysHeapConfig->pasRegions &&
+		psPhysHeapConfig->pasRegions[0].hPrivData)
+	{
+		DMA_ALLOC *psDmaAlloc;
+
+		if (psPhysHeapConfig->eType == PHYS_HEAP_TYPE_DMA)
+		{
+			/* DMA physheaps have quirks on some OS environments */
+			psDmaAlloc = psPhysHeapConfig->pasRegions[0].hPrivData;
+			eError = SysDmaRegisterForIoRemapping(psDmaAlloc);
+			PVR_LOG_IF_ERROR(eError, "SysDmaRegisterForIoRemapping");
+		}
+	}
+
+	return eError;
+}
+
+void SysVzDeregisterPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+							 PVRSRV_DEVICE_PHYS_HEAP eHeapType)
+{
+	PHYS_HEAP_CONFIG *psPhysHeapConfig;
+
+	if (eHeapType == PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL ||
+		eHeapType >= PVRSRV_DEVICE_PHYS_HEAP_LAST)
+	{
+		return;
+	}
+
+	/* Currently we only support GPU/FW physheap deregistration */
+	psPhysHeapConfig = SysVzGetPhysHeapConfig(psDevConfig, eHeapType);
+	PVR_LOG_IF_FALSE((psPhysHeapConfig!=NULL), "SysVzGetPhysHeapConfig");
+
+	if (psPhysHeapConfig &&
+		psPhysHeapConfig->pasRegions &&
+		psPhysHeapConfig->pasRegions[0].hPrivData)
+	{
+		DMA_ALLOC *psDmaAlloc;
+
+		if (psPhysHeapConfig->eType == PHYS_HEAP_TYPE_DMA)
+		{
+			psDmaAlloc = psPhysHeapConfig->pasRegions[0].hPrivData;
+			SysDmaDeregisterForIoRemapping(psDmaAlloc);
+		}
+	}
+
+}
+
+PHYS_HEAP_CONFIG *SysVzGetPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+										 PVRSRV_DEVICE_PHYS_HEAP eHeapType)
+{
+	IMG_UINT uiIdx;
+	IMG_UINT aui32PhysHeapID;
+	IMG_UINT32 ui32PhysHeapCount;
+	PHYS_HEAP_CONFIG *psPhysHeap;
+	PHYS_HEAP_CONFIG *ps1stPhysHeap = &psDevConfig->pasPhysHeaps[0];
+
+	if (eHeapType == PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL)
+	{
+		return ps1stPhysHeap;
+	}
+
+	/* Initialise here to catch lookup failures */
+	ui32PhysHeapCount = psDevConfig->ui32PhysHeapCount;
+	psPhysHeap = NULL;
+
+	if (eHeapType < PVRSRV_DEVICE_PHYS_HEAP_LAST)
+	{
+		/* Lookup ID of the physheap and get a pointer structure */
+		aui32PhysHeapID = psDevConfig->aui32PhysHeapID[eHeapType];
+		for (uiIdx = 1; uiIdx < ui32PhysHeapCount; uiIdx++)
+		{
+			if (ps1stPhysHeap[uiIdx].ui32PhysHeapID == aui32PhysHeapID)
+			{
+				psPhysHeap = &ps1stPhysHeap[uiIdx];
+				break;
+			}
+		}
+	}
+	PVR_LOG_IF_FALSE((psPhysHeap != NULL), "eHeapType >= PVRSRV_DEVICE_PHYS_HEAP_LAST");
+
+	return psPhysHeap;
+}
+
+PVRSRV_ERROR  SysVzSetPhysHeapAddrSize(PVRSRV_DEVICE_CONFIG *psDevConfig,
+									   PVRSRV_DEVICE_PHYS_HEAP ePhysHeap,
+									   PHYS_HEAP_TYPE eHeapType,
+									   IMG_DEV_PHYADDR sPhysHeapAddr,
+									   IMG_UINT64 ui64PhysHeapSize)
+{
+	PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+	PHYS_HEAP_CONFIG *psPhysHeapConfig;
+
+	psPhysHeapConfig = SysVzGetPhysHeapConfig(psDevConfig, ePhysHeap);
+	PVR_LOGR_IF_FALSE((psPhysHeapConfig != NULL), "Invalid PhysHeapConfig", eError);
+	PVR_LOGR_IF_FALSE((ui64PhysHeapSize != 0), "Invalid PhysHeapSize", eError);
+
+	if (eHeapType == PHYS_HEAP_TYPE_UMA || eHeapType == PHYS_HEAP_TYPE_LMA)
+	{
+		/* At this junction, we _may_ initialise new state */
+		PVR_ASSERT(sPhysHeapAddr.uiAddr  && ui64PhysHeapSize);
+
+		if (psPhysHeapConfig->pasRegions == NULL)
+		{
+			psPhysHeapConfig->pasRegions = OSAllocZMem(sizeof(PHYS_HEAP_REGION));
+			if (psPhysHeapConfig->pasRegions == NULL)
+			{
+				return PVRSRV_ERROR_OUT_OF_MEMORY;
+			}
+
+			psPhysHeapConfig->pasRegions[0].bDynAlloc = IMG_TRUE;
+			psPhysHeapConfig->ui32NumOfRegions++;
+		}
+
+		if (eHeapType == PHYS_HEAP_TYPE_UMA)
+		{
+			psPhysHeapConfig->pasRegions[0].sCardBase = sPhysHeapAddr;
+		}
+
+		psPhysHeapConfig->pasRegions[0].sStartAddr.uiAddr = sPhysHeapAddr.uiAddr;
+		psPhysHeapConfig->pasRegions[0].uiSize = ui64PhysHeapSize;
+		psPhysHeapConfig->eType = eHeapType;
+
+		eError = PVRSRV_OK;
+	}
+
+	PVR_LOG_IF_ERROR(eError, "SysVzSetPhysHeapAddrSize");
+	return eError;
+}
+
+PVRSRV_ERROR SysVzGetPhysHeapAddrSize(PVRSRV_DEVICE_CONFIG *psDevConfig,
+									  PVRSRV_DEVICE_PHYS_HEAP ePhysHeap,
+									  PHYS_HEAP_TYPE eHeapType,
+									  IMG_DEV_PHYADDR *psAddr,
+									  IMG_UINT64 *pui64Size)
+{
+	IMG_UINT64 uiAddr;
+	PVRSRV_ERROR eError;
+	VMM_PVZ_CONNECTION *psVmmPvz;
+
+	PVR_UNREFERENCED_PARAMETER(eHeapType);
+
+	psVmmPvz = SysVzPvzConnectionAcquire();
+	PVR_ASSERT(psVmmPvz);
+
+	PVR_ASSERT(psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapAddrSize);
+
+	eError = psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapAddrSize(psDevConfig,
+																ePhysHeap,
+																pui64Size,
+																&uiAddr);
+	if (eError != PVRSRV_OK)
+	{
+		if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: VMM/PVZ pfnGetDevPhysHeapAddrSize() must be implemented (%s)",
+					__FUNCTION__,
+					PVRSRVGetErrorStringKM(eError)));
+		}
+
+		goto e0;
+	}
+
+	psAddr->uiAddr = uiAddr;
+e0:
+	SysVzPvzConnectionRelease(psVmmPvz);
+	return eError;
+}
+
+PVRSRV_ERROR SysVzGetPhysHeapOrigin(PVRSRV_DEVICE_CONFIG *psDevConfig,
+									PVRSRV_DEVICE_PHYS_HEAP eHeap,
+									PVRSRV_DEVICE_PHYS_HEAP_ORIGIN *peOrigin)
+{
+	PVRSRV_ERROR eError;
+	VMM_PVZ_CONNECTION *psVmmPvz;
+
+	psVmmPvz = SysVzPvzConnectionAcquire();
+	PVR_ASSERT(psVmmPvz);
+
+	PVR_ASSERT(psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapOrigin);
+
+	eError = psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapOrigin(psDevConfig,
+															  eHeap,
+															  peOrigin);
+	if (eError != PVRSRV_OK)
+	{
+		if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: VMM/PVZ pfnGetDevPhysHeapOrigin() must be implemented (%s)",
+					__FUNCTION__,
+					PVRSRVGetErrorStringKM(eError)));
+		}
+
+		goto e0;
+	}
+
+e0:
+	SysVzPvzConnectionRelease(psVmmPvz);
+	return eError;
+}
+
+PVRSRV_ERROR SysVzPvzCreateDevPhysHeaps(IMG_UINT32 ui32OSID,
+										IMG_UINT32 ui32DevID,
+										IMG_UINT32 *pePhysHeapType,
+										IMG_UINT64 *pui64FwPhysHeapSize,
+										IMG_UINT64 *pui64FwPhysHeapAddr,
+										IMG_UINT64 *pui64GpuPhysHeapSize,
+										IMG_UINT64 *pui64GpuPhysHeapAddr)
+{
+	IMG_UINT64 uiHeapSize;
+	IMG_DEV_PHYADDR sCardBase;
+	IMG_CPU_PHYADDR sStartAddr;
+	PHYS_HEAP_CONFIG *psPhysHeap;
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+	PVRSRV_DEVICE_CONFIG *psDevConfig;
+	PVRSRV_DEVICE_PHYS_HEAP ePhysHeap;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+	PVR_LOGR_IF_FALSE((ui32DevID == 0), "Invalid Device ID", eError);
+	PVR_LOGR_IF_FALSE((psPVRSRVData != NULL), "Invalid PVRSRVData", eError);
+	PVR_LOGR_IF_FALSE((ui32OSID > 0 && ui32OSID < RGXFW_NUM_OS), "Invalid OSID", eError);
+
+	/* For now, limit support to single device setups */
+	psDeviceNode = psPVRSRVData->psDeviceNodeList;
+	psDevConfig = psDeviceNode->psDevConfig;
+
+	/* Default is a kernel managed UMA 
+	   physheap memory configuration */
+	*pui64FwPhysHeapSize = (IMG_UINT64)0;
+	*pui64FwPhysHeapAddr = (IMG_UINT64)0;
+	*pui64GpuPhysHeapSize = (IMG_UINT64)0;
+	*pui64GpuPhysHeapAddr = (IMG_UINT64)0;
+
+	*pePhysHeapType = (IMG_UINT32) SysVzGetMemoryConfigPhysHeapType();
+	for (ePhysHeap = 0; ePhysHeap < PVRSRV_DEVICE_PHYS_HEAP_LAST; ePhysHeap++)
+	{
+		switch (ePhysHeap)
+		{
+			/* Only interested in these physheaps */
+			case PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL:
+			case PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL:
+				{
+					PVRSRV_ERROR eError;
+
+					eError = SysVzGetPhysHeapOrigin(psDevConfig,
+													ePhysHeap,
+													&eHeapOrigin);
+					PVR_LOGR_IF_ERROR(eError, "SysVzGetPhysHeapOrigin");
+
+					if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST)
+					{
+						continue;
+					}
+				}
+				break;
+
+			default:
+				continue;
+		}
+
+		/* Determine what type of physheap backs this phyconfig */
+		psPhysHeap = SysVzGetPhysHeapConfig(psDevConfig, ePhysHeap);
+		if (psPhysHeap && psPhysHeap->pasRegions)
+		{
+			/* Services managed physheap (LMA/UMA carve-out/DMA) */
+			sStartAddr = psPhysHeap->pasRegions[0].sStartAddr;
+			sCardBase = psPhysHeap->pasRegions[0].sCardBase;
+			uiHeapSize = psPhysHeap->pasRegions[0].uiSize;
+
+			if (! uiHeapSize)
+			{
+				/* UMA (i.e. non carve-out), don't re-base so skip */
+				PVR_ASSERT(!sStartAddr.uiAddr && !sCardBase.uiAddr);
+				continue;
+			}
+
+			/* Rebase this guest OSID physical heap */
+			sStartAddr.uiAddr += ui32OSID * uiHeapSize;
+			sCardBase.uiAddr += ui32OSID * uiHeapSize;
+
+			switch (ePhysHeap)
+			{
+				case PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL:
+					*pui64GpuPhysHeapSize = uiHeapSize;
+					*pui64GpuPhysHeapAddr = sStartAddr.uiAddr;
+					break;
+
+				case PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL:
+					*pui64FwPhysHeapSize = uiHeapSize;
+					*pui64FwPhysHeapAddr = sStartAddr.uiAddr;
+					break;
+
+				default:
+					PVR_ASSERT(0);
+					break;
+			}
+		}
+		else
+		{
+#if defined(DEBUG)
+			eError = SysVzGetPhysHeapOrigin(psDevConfig,
+											ePhysHeap,
+											&eHeapOrigin);
+			PVR_LOGR_IF_ERROR(eError, "SysVzGetPhysHeapOrigin");
+
+			if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+			{
+				PVR_ASSERT(ePhysHeap != PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL);
+			}
+#endif
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysVzPvzDestroyDevPhysHeaps(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32OSID);
+	PVR_UNREFERENCED_PARAMETER(ui32DevID);
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysVzPvzRegisterFwPhysHeap(IMG_UINT32 ui32OSID,
+										IMG_UINT32 ui32DevID,
+										IMG_UINT64 ui64Size,
+										IMG_UINT64 ui64PAddr)
+{
+	PVRSRV_DEVICE_NODE* psDeviceNode;
+	PVRSRV_DEVICE_CONFIG *psDevConfig;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+	PVRSRV_DEVICE_PHYS_HEAP eHeapType = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+	PVR_LOGR_IF_FALSE((ui32DevID == 0), "Invalid Device ID", eError);
+	PVR_LOGR_IF_FALSE((psPVRSRVData != NULL), "Invalid PVRSRVData", eError);
+
+	psDeviceNode = psPVRSRVData->psDeviceNodeList;
+	psDevConfig = psDeviceNode->psDevConfig;
+
+	eError = SysVzGetPhysHeapOrigin(psDevConfig,
+									eHeapType,
+									&eHeapOrigin);
+	PVR_LOGG_IF_ERROR(eError, "SysVzGetPhysHeapOrigin", e0);
+
+	if (eHeapOrigin != PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+	{
+		IMG_DEV_PHYADDR sDevPAddr = {ui64PAddr};
+		eError = RGXVzRegisterFirmwarePhysHeap(psDeviceNode,
+											   ui32OSID,
+											   sDevPAddr,
+											   ui64Size);
+		PVR_LOGG_IF_ERROR(eError, "RGXVzRegisterFirmwarePhysHeap", e0);
+	}
+
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR SysVzPvzUnregisterFwPhysHeap(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+	PVRSRV_DEVICE_CONFIG *psDevConfig;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+	PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+	PVR_LOGR_IF_FALSE((ui32DevID == 0), "Invalid Device ID", eError);
+	PVR_LOGR_IF_FALSE((psPVRSRVData != NULL), "Invalid PVRSRVData", eError);
+
+	psDeviceNode = psPVRSRVData->psDeviceNodeList;
+	psDevConfig = psDeviceNode->psDevConfig;
+
+	eError = SysVzGetPhysHeapOrigin(psDevConfig,
+									eHeap,
+									&eHeapOrigin);
+	PVR_LOGG_IF_ERROR(eError, "SysVzGetPhysHeapOrigin", e0);
+
+	if (eHeapOrigin != PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+	{
+		psDeviceNode = psPVRSRVData->psDeviceNodeList;
+		eError = RGXVzUnregisterFirmwarePhysHeap(psDeviceNode, ui32OSID);
+		PVR_LOG_IF_ERROR(eError, "RGXVzUnregisterFirmwarePhysHeap");
+	}
+
+e0:
+	return eError;
+}
+
+/******************************************************************************
+ End of file (vz_physheap_common.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vz_physheap_generic.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vz_physheap_generic.c
new file mode 100644
index 0000000..f2c0e87
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vz_physheap_generic.c
@@ -0,0 +1,412 @@
+/*************************************************************************/ /*!
+@File           vz_physheap_generic.c
+@Title          System virtualization physheap configuration
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    System virtualization physical heap configuration
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "allocmem.h"
+#include "physheap.h"
+#include "rgxdevice.h"
+#include "pvrsrv_device.h"
+#include "rgxfwutils.h"
+
+#include "dma_support.h"
+#include "vz_support.h"
+#include "vz_vmm_pvz.h"
+#include "vz_physheap.h"
+
+#if defined(CONFIG_L4)
+static IMG_HANDLE gahPhysHeapIoRemap[PVRSRV_DEVICE_PHYS_HEAP_LAST];
+#endif
+
+static PVRSRV_ERROR
+SysVzCreateDmaPhysHeap(PHYS_HEAP_CONFIG *psPhysHeapConfig)
+{
+	PVRSRV_ERROR eError;
+	DMA_ALLOC *psDmaAlloc;
+	PHYS_HEAP_REGION *psPhysHeapRegion;
+
+	psPhysHeapRegion = &psPhysHeapConfig->pasRegions[0];
+	PVR_LOGR_IF_FALSE((NULL != psPhysHeapRegion->hPrivData), "DMA physheap already created", PVRSRV_ERROR_INVALID_PARAMS);
+
+	psDmaAlloc = (DMA_ALLOC*)psPhysHeapRegion->hPrivData;
+	psDmaAlloc->ui64Size = psPhysHeapRegion->uiSize;
+
+	eError = SysDmaAllocMem(psDmaAlloc);
+	if (eError != PVRSRV_OK)
+	{
+		psPhysHeapConfig->eType = PHYS_HEAP_TYPE_UMA;
+	}
+	else
+	{
+		psPhysHeapRegion->sStartAddr.uiAddr = psDmaAlloc->sBusAddr.uiAddr;
+		psPhysHeapRegion->sCardBase.uiAddr = psDmaAlloc->sBusAddr.uiAddr;
+		psPhysHeapConfig->eType = PHYS_HEAP_TYPE_DMA;
+	}
+
+	return eError;
+}
+
+static void
+SysVzDestroyDmaPhysHeap(PHYS_HEAP_CONFIG *psPhysHeapConfig)
+{
+	DMA_ALLOC *psDmaAlloc;
+	PHYS_HEAP_REGION *psPhysHeapRegion;
+
+	psPhysHeapRegion = &psPhysHeapConfig->pasRegions[0];
+	psDmaAlloc = (DMA_ALLOC*)psPhysHeapRegion->hPrivData;
+
+	if (psDmaAlloc != NULL)
+	{
+		PVR_LOG_IF_FALSE((0 != psPhysHeapRegion->sStartAddr.uiAddr), "Invalid DMA physheap start address");
+		PVR_LOG_IF_FALSE((0 != psPhysHeapRegion->sCardBase.uiAddr), "Invalid DMA physheap card address");
+		PVR_LOG_IF_FALSE((0 != psPhysHeapRegion->uiSize), "Invalid DMA physheap size");
+
+		SysDmaFreeMem(psDmaAlloc);
+
+		psPhysHeapRegion->sCardBase.uiAddr = 0;	
+		psPhysHeapRegion->sStartAddr.uiAddr = 0;
+		psPhysHeapConfig->eType = PHYS_HEAP_TYPE_UMA;
+	}
+}
+
+static PVRSRV_ERROR
+SysVzCreatePhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+					PVRSRV_DEVICE_PHYS_HEAP ePhysHeap)
+{
+	IMG_DEV_PHYADDR sHeapAddr;
+	IMG_UINT64 ui64HeapSize = 0;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PHYS_HEAP_REGION *psPhysHeapRegion;
+	PHYS_HEAP_CONFIG *psPhysHeapConfig;
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+
+	/* Lookup GPU/FW physical heap config, allocate primary region */
+	psPhysHeapConfig = SysVzGetPhysHeapConfig(psDevConfig, ePhysHeap);
+	PVR_LOGR_IF_FALSE((NULL != psPhysHeapConfig), "Invalid physheap config", PVRSRV_ERROR_INVALID_PARAMS);
+
+	if (psPhysHeapConfig->pasRegions == NULL)
+	{
+		psPhysHeapConfig->pasRegions = OSAllocZMem(sizeof(PHYS_HEAP_REGION));
+		if (psPhysHeapConfig->pasRegions == NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			PVR_LOGG_IF_ERROR(eError, "OSAllocZMem", e0);
+		}
+
+		psPhysHeapConfig->pasRegions[0].bDynAlloc = IMG_TRUE;
+		psPhysHeapConfig->ui32NumOfRegions++;
+	}
+	else
+	{
+		psPhysHeapConfig->pasRegions[0].bDynAlloc = IMG_FALSE;
+	}
+
+	if (psPhysHeapConfig->pasRegions[0].hPrivData == NULL)
+	{
+		DMA_ALLOC *psDmaAlloc = OSAllocZMem(sizeof(DMA_ALLOC));
+		if (psDmaAlloc == NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			PVR_LOGG_IF_ERROR(eError, "OSAllocZMem", e0);
+		}
+
+		psDmaAlloc->pvOSDevice = psDevConfig->pvOSDevice;
+		psPhysHeapConfig->pasRegions[0].hPrivData = psDmaAlloc;
+	}
+
+	/* Lookup physheap addr/size from VM manager type */
+	eError = SysVzGetPhysHeapAddrSize(psDevConfig,
+									  ePhysHeap,
+									  PHYS_HEAP_TYPE_UMA,
+								 	  &sHeapAddr,
+								 	  &ui64HeapSize);
+	PVR_LOGG_IF_ERROR(eError, "SysVzGetPhysHeapAddrSize", e0);
+
+	/* Initialise physical heap and region state */
+	psPhysHeapRegion = &psPhysHeapConfig->pasRegions[0];
+	psPhysHeapRegion->sStartAddr.uiAddr = sHeapAddr.uiAddr;
+	psPhysHeapRegion->sCardBase.uiAddr = sHeapAddr.uiAddr;
+	psPhysHeapRegion->uiSize = ui64HeapSize;
+
+	if (ePhysHeap == PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL)
+	{
+		/* Firmware physheaps require additional init */
+		psPhysHeapConfig->pszPDumpMemspaceName = "SYSMEM";
+		psPhysHeapConfig->psMemFuncs =
+				psDevConfig->pasPhysHeaps[0].psMemFuncs;
+	}
+
+	/* Which driver is responsible for allocating the
+	   physical memory backing the device physheap */
+	eError = SysVzGetPhysHeapOrigin(psDevConfig,
+									ePhysHeap,
+									&eHeapOrigin);
+	PVR_LOGG_IF_ERROR(eError, "SysVzGetPhysHeapOrigin", e0);
+
+	if (psPhysHeapRegion->sStartAddr.uiAddr == 0)
+	{
+		if (psPhysHeapRegion->uiSize)
+		{
+			if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+			{
+				/* Scale DMA size by the number of OSIDs */
+				psPhysHeapRegion->uiSize *= RGXFW_NUM_OS;
+			}
+
+			eError = SysVzCreateDmaPhysHeap(psPhysHeapConfig);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_LOGG_IF_ERROR(eError, "SysVzCreateDmaPhysHeap", e0);
+			}
+
+			/* Verify the validity of DMA physheap region */
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			PVR_LOGG_IF_FALSE((0 != psPhysHeapRegion->sStartAddr.uiAddr), "Invalid DMA physheap start address", e0);
+			PVR_LOGG_IF_FALSE((0 != psPhysHeapRegion->sCardBase.uiAddr), "Invalid DMA physheap card address", e0);
+			PVR_LOGG_IF_FALSE((0 != psPhysHeapRegion->uiSize), "Invalid DMA physheap size", e0);
+			eError = PVRSRV_OK;
+
+			/* Services managed DMA physheap setup complete */
+			psPhysHeapConfig->eType = PHYS_HEAP_TYPE_DMA;
+
+			/* Only the PHYS_HEAP_TYPE_DMA should be registered */
+			eError = SysVzRegisterPhysHeap(psDevConfig, ePhysHeap);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_LOGG_IF_ERROR(eError, "SysVzRegisterPhysHeap", e0);
+			}
+
+			if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+			{
+				/* Restore original physheap size */
+				psPhysHeapRegion->uiSize /= RGXFW_NUM_OS;
+			}
+		}
+		else
+		{
+			if (psPhysHeapConfig->pasRegions[0].hPrivData)
+			{
+				OSFreeMem(psPhysHeapConfig->pasRegions[0].hPrivData);
+				psPhysHeapConfig->pasRegions[0].hPrivData = NULL;
+			}
+
+			if (psPhysHeapConfig->pasRegions[0].bDynAlloc)
+			{
+				OSFreeMem(psPhysHeapConfig->pasRegions);
+				psPhysHeapConfig->pasRegions = NULL;
+				psPhysHeapConfig->ui32NumOfRegions--;
+				PVR_LOGG_IF_FALSE((psPhysHeapConfig->ui32NumOfRegions == 0), "Invalid refcount", e0);
+			}
+
+			/* Kernel managed UMA physheap setup complete */
+			psPhysHeapConfig->eType = PHYS_HEAP_TYPE_UMA;
+		}
+	}
+	else
+	{
+		/* Verify the validity of DMA physheap region */
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		PVR_LOGG_IF_FALSE((0 != psPhysHeapRegion->sStartAddr.uiAddr), "Invalid DMA physheap start address", e0);
+		PVR_LOGG_IF_FALSE((0 != psPhysHeapRegion->sCardBase.uiAddr), "Invalid DMA physheap card address", e0);
+		PVR_LOGG_IF_FALSE((0 != psPhysHeapRegion->uiSize), "Invalid DMA physheap size", e0);
+		eError = PVRSRV_OK;
+
+#if defined(CONFIG_L4)
+		{
+			IMG_UINT64 ui64Offset;
+			IMG_UINT64 ui64BaseAddr;
+			IMG_CPU_VIRTADDR pvCpuVAddr;
+
+			/* On Fiasco.OC/l4linux, ioremap physheap now (might fail) */
+			gahPhysHeapIoRemap[ePhysHeap] = 
+							OSMapPhysToLin(psPhysHeapRegion->sStartAddr,
+										   psPhysHeapRegion->uiSize,
+										   PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+			PVR_LOGG_IF_FALSE((NULL != gahPhysHeapIoRemap[ePhysHeap]), "OSMapPhysToLin", e0);
+
+			for (ui64Offset = 0;
+				 ui64Offset < psPhysHeapRegion->uiSize;
+				 ui64Offset += (IMG_UINT64)OSGetPageSize())
+			{
+				/* Pre-fault-in all physheap pages into l4linux address space,
+				   this avoids having to pre-fault these before mapping into
+				   an application address space during OSMMapPMRGeneric() call */
+				ui64BaseAddr = psPhysHeapRegion->sStartAddr.uiAddr + ui64Offset;
+				pvCpuVAddr = l4x_phys_to_virt(ui64BaseAddr);
+
+				/* We need to ensure the compiler does not optimise this out */
+				*((volatile int*)pvCpuVAddr) = *((volatile int*)pvCpuVAddr);
+			}
+		}
+#endif
+
+		/* Services managed UMA carve-out physheap 
+		   setup complete */
+		psPhysHeapConfig->eType = PHYS_HEAP_TYPE_UMA;
+	}
+
+	return eError;
+
+e0:
+	if (psPhysHeapConfig->pasRegions)
+	{
+		SysVzDeregisterPhysHeap(psDevConfig, ePhysHeap);
+
+		if (psPhysHeapConfig->pasRegions[0].hPrivData)
+		{
+			OSFreeMem(psPhysHeapConfig->pasRegions[0].hPrivData);
+			psPhysHeapConfig->pasRegions[0].hPrivData = NULL;
+		}
+
+		if (psPhysHeapConfig->pasRegions[0].bDynAlloc)
+		{
+			OSFreeMem(psPhysHeapConfig->pasRegions);
+			psPhysHeapConfig->pasRegions = NULL;
+			psPhysHeapConfig->ui32NumOfRegions--;
+			PVR_LOG_IF_FALSE((psPhysHeapConfig->ui32NumOfRegions == 0), "Invalid refcount");
+		}
+	}
+
+	return  eError;
+}
+
+static void
+SysVzDestroyPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+					 PVRSRV_DEVICE_PHYS_HEAP ePhysHeap)
+{
+	PHYS_HEAP_CONFIG *psPhysHeapConfig;
+
+	SysVzDeregisterPhysHeap(psDevConfig, ePhysHeap);
+
+	psPhysHeapConfig = SysVzGetPhysHeapConfig(psDevConfig, ePhysHeap);
+	if (psPhysHeapConfig == NULL || 
+		psPhysHeapConfig->pasRegions == NULL)
+	{
+		return;
+	}
+
+#if defined(CONFIG_L4)
+	if (gahPhysHeapIoRemap[ePhysHeap] != NULL)
+	{
+		OSUnMapPhysToLin(gahPhysHeapIoRemap[ePhysHeap],
+						psPhysHeapConfig->pasRegions[0].uiSize,
+						PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+	}
+
+	gahPhysHeapIoRemap[ePhysHeap] = NULL;
+#endif
+
+	if (psPhysHeapConfig->pasRegions[0].hPrivData)
+	{
+		SysVzDestroyDmaPhysHeap(psPhysHeapConfig);
+		OSFreeMem(psPhysHeapConfig->pasRegions[0].hPrivData);
+		psPhysHeapConfig->pasRegions[0].hPrivData = NULL;
+	}
+
+	if (psPhysHeapConfig->pasRegions[0].bDynAlloc)
+	{
+		OSFreeMem(psPhysHeapConfig->pasRegions);
+		psPhysHeapConfig->pasRegions = NULL;
+		psPhysHeapConfig->ui32NumOfRegions--;
+		PVR_LOG_IF_FALSE((psPhysHeapConfig->ui32NumOfRegions == 0), "Invalid refcount");
+	}
+}
+
+static PVRSRV_ERROR
+SysVzCreateGpuPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL;
+	return SysVzCreatePhysHeap(psDevConfig, eHeap);
+}
+
+static void
+SysVzDestroyGpuPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL;
+	SysVzDestroyPhysHeap(psDevConfig, eHeap);
+}
+
+static PVRSRV_ERROR
+SysVzCreateFwPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+	return SysVzCreatePhysHeap(psDevConfig, eHeap);
+}
+
+static void
+SysVzDestroyFwPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+	SysVzDestroyPhysHeap(psDevConfig, eHeap);
+}
+
+PHYS_HEAP_TYPE SysVzGetMemoryConfigPhysHeapType(void)
+{
+	return PHYS_HEAP_TYPE_UMA;
+}
+
+PVRSRV_ERROR SysVzInitDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_ERROR eError;
+
+	eError = SysVzCreateFwPhysHeap(psDevConfig);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = SysVzCreateGpuPhysHeap(psDevConfig);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	return eError;
+}
+
+void SysVzDeInitDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	SysVzDestroyGpuPhysHeap(psDevConfig);
+	SysVzDestroyFwPhysHeap(psDevConfig);
+}
+
+/******************************************************************************
+ End of file (vz_physheap_generic.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vz_support.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vz_support.c
new file mode 100644
index 0000000..8dc99b3
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vz_support.c
@@ -0,0 +1,173 @@
+/*************************************************************************/ /*!
+@File           vz_support.c
+@Title          System virtualization configuration setup
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    System virtualization configuration support API(s)
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "allocmem.h"
+#include "physheap.h"
+#include "rgxdevice.h"
+#include "pvrsrv.h"
+#include "pvrsrv_device.h"
+
+#include "dma_support.h"
+#include "vz_support.h"
+#include "vz_vmm_pvz.h"
+#include "vz_physheap.h"
+#include "vmm_pvz_client.h"
+#include "vmm_pvz_server.h"
+
+PVRSRV_ERROR SysVzDevInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_ERROR eError;
+	RGX_DATA* psDevData = psDevConfig->hDevData;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+
+	/* Initialise pvz connection */
+	eError =  SysVzPvzConnectionInit();
+	PVR_LOGR_IF_ERROR(eError, "SysVzPvzConnectionInit");
+
+	psPVRSRVData->abVmOnline[0] = IMG_TRUE;
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		/* Undo any functionality not supported in guest drivers */
+		psDevData->psRGXTimingInfo->bEnableRDPowIsland  = IMG_FALSE;
+		psDevData->psRGXTimingInfo->bEnableActivePM = IMG_FALSE;
+		psDevConfig->pfnPrePowerState  = NULL;
+		psDevConfig->pfnPostPowerState = NULL;
+
+		/* Perform additional guest-specific device
+		   configuration initialisation */
+		eError =  SysVzCreateDevConfig(psDevConfig);
+		PVR_LOGR_IF_ERROR(eError, "SysVzCreateDevConfig");
+
+		eError =  SysVzCreateDevPhysHeaps(psDevConfig);
+		PVR_LOGR_IF_ERROR(eError, "SysVzCreateDevPhysHeaps");
+	}
+
+	/* Perform general device physheap initialisation */
+	eError =  SysVzInitDevPhysHeaps(psDevConfig);
+	PVR_LOGR_IF_ERROR(eError, "SysVzInitDevPhysHeaps");
+
+	return eError;
+}
+
+PVRSRV_ERROR SysVzDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+
+	SysVzDeInitDevPhysHeaps(psDevConfig);
+	if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+	{
+		SysVzDestroyDevPhysHeaps(psDevConfig);
+		SysVzDestroyDevConfig(psDevConfig);
+	}
+
+	SysVzPvzConnectionDeInit();
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysVzCreateDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_ERROR eError;
+
+	eError = PvzClientCreateDevConfig(psDevConfig, 0);
+	PVR_LOGR_IF_ERROR(eError, "PvzClientCreateDevConfig");
+
+	return eError;
+}
+
+PVRSRV_ERROR SysVzDestroyDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_ERROR eError;
+
+	eError = PvzClientDestroyDevConfig(psDevConfig, 0);
+	PVR_LOGR_IF_ERROR(eError, "PvzClientCreateDevConfig");
+
+	return eError;
+}
+
+PVRSRV_ERROR
+SysVzPvzCreateDevConfig(IMG_UINT32 ui32OSID,
+						IMG_UINT32 ui32DevID,
+						IMG_UINT32 *pui32IRQ,
+						IMG_UINT32 *pui32RegsSize,
+						IMG_UINT64 *pui64RegsCpuPBase)
+{
+	PVRSRV_DEVICE_NODE *psDevNode;
+	PVRSRV_DEVICE_CONFIG *psDevConfig;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	if (ui32OSID == 0        ||
+		ui32DevID != 0       ||
+		psPVRSRVData == NULL ||
+		ui32OSID >= RGXFW_NUM_OS)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* For now, limit support to single device setups */
+	psDevNode = psPVRSRVData->psDeviceNodeList;
+	psDevConfig = psDevNode->psDevConfig;
+
+	/* Copy across guest VM device config information, here
+	   we assume this is the same across VMs and host */
+	*pui64RegsCpuPBase = psDevConfig->sRegsCpuPBase.uiAddr;
+	*pui32RegsSize = psDevConfig->ui32RegsSize;
+	*pui32IRQ = psDevConfig->ui32IRQ;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+SysVzPvzDestroyDevConfig(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID)
+{
+	if (ui32OSID == 0        ||
+		ui32DevID != 0       ||
+		ui32OSID >= RGXFW_NUM_OS)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (vz_support.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vz_vmm_pvz.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vz_vmm_pvz.c
new file mode 100644
index 0000000..b58433a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vz_vmm_pvz.c
@@ -0,0 +1,109 @@
+/*************************************************************************/ /*!
+@File			vz_vmm_pvz.c
+@Title          VM manager para-virtualization APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    VM manager para-virtualization management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "allocmem.h"
+#include "pvrsrv.h"
+#include "vz_vmm_pvz.h"
+
+PVRSRV_ERROR SysVzPvzConnectionInit(void)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	/* Create para-virtualization connection lock */
+	eError = OSLockCreate(&psPVRSRVData->hPvzConnectionLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s:  OSLockCreate failed (%s)",
+				__FUNCTION__,
+				PVRSRVGetErrorStringKM(eError)));
+	}
+
+	/* Create VM manager para-virtualization connection */
+	eError = VMMCreatePvzConnection((VMM_PVZ_CONNECTION **)&psPVRSRVData->hPvzConnection);
+	if (eError != PVRSRV_OK)
+	{
+		OSLockDestroy(psPVRSRVData->hPvzConnectionLock);
+		psPVRSRVData->hPvzConnectionLock = NULL;
+
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Unable to create PVZ connection (%s)",
+				__FUNCTION__,
+				PVRSRVGetErrorStringKM(eError)));
+	}
+
+	return eError;
+}
+
+void SysVzPvzConnectionDeInit(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	VMMDestroyPvzConnection(psPVRSRVData->hPvzConnection);
+	psPVRSRVData->hPvzConnection = NULL;
+
+	OSLockDestroy(psPVRSRVData->hPvzConnectionLock);
+	psPVRSRVData->hPvzConnectionLock = NULL;
+}
+
+VMM_PVZ_CONNECTION* SysVzPvzConnectionAcquire(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVR_ASSERT(psPVRSRVData->hPvzConnection != NULL);
+	return psPVRSRVData->hPvzConnection;
+}
+
+void SysVzPvzConnectionRelease(VMM_PVZ_CONNECTION *psParaVz)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	/* Nothing to do, sanity check the pointer passed back */
+	PVR_ASSERT(psParaVz == psPVRSRVData->hPvzConnection);
+}
+
+/******************************************************************************
+ End of file (vz_vmm_pvz.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vz_vmm_vm.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vz_vmm_vm.c
new file mode 100644
index 0000000..27db94c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/common/vz_vmm_vm.c
@@ -0,0 +1,221 @@
+/*************************************************************************/ /*!
+@File			vz_vmm_vm.c
+@Title          System virtualization VM support APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    System virtualization VM support functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv.h"
+#include "pvrsrv_error.h"
+#include "vz_vm.h"
+#include "rgxfwutils.h"
+
+IMG_BOOL
+SysVzIsVmOnline(IMG_UINT32 ui32OSID)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVR_ASSERT(ui32OSID > 0 && ui32OSID < RGXFW_NUM_OS);
+	return psPVRSRVData->abVmOnline[ui32OSID];
+}
+
+PVRSRV_ERROR
+SysVzPvzOnVmOnline(IMG_UINT32 ui32OSid, IMG_UINT32 ui32Priority)
+{
+	PVRSRV_ERROR       eError          = PVRSRV_OK;
+	PVRSRV_DATA        *psPVRSRVData   = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDevNode;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	if (ui32OSid == 0 || ui32OSid >= RGXFW_NUM_OS)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: invalid OSID (%d)",
+				 __FUNCTION__, ui32OSid));
+
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (psPVRSRVData->abVmOnline[ui32OSid])
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: OSID %d is already enabled.",
+				 __FUNCTION__, ui32OSid));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* For now, limit support to single device setups */
+	psDevNode = psPVRSRVData->psDeviceNodeList;
+	psDevInfo = psDevNode->pvDevice;
+
+	if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT)
+	{
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSAcquireBridgeLock();
+#endif
+
+		/* Firmware not initialized yet, do it here */
+		eError = PVRSRVDeviceInitialise(psDevNode);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: failed to initialize firmware (%s)",
+					 __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+			goto e0;
+		}
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+		OSReleaseBridgeLock();
+#endif
+	}
+
+	/* request new priority and enable OS */
+
+	eError = RGXFWSetVMOnlineState(psDevInfo, ui32OSid, RGXFWIF_OS_ONLINE);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	psPVRSRVData->abVmOnline[ui32OSid] = IMG_TRUE;
+
+	eError = RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32Priority);
+
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR
+SysVzPvzOnVmOffline(IMG_UINT32 ui32OSid)
+{
+	PVRSRV_ERROR      eError          = PVRSRV_OK;
+	PVRSRV_DATA       *psPVRSRVData   = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDevNode;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	if (ui32OSid == 0 || ui32OSid >= RGXFW_NUM_OS)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: invalid OSID (%d)",
+				 __FUNCTION__, ui32OSid));
+
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (!psPVRSRVData->abVmOnline[ui32OSid])
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: OSID %d is already disabled.",
+				 __FUNCTION__, ui32OSid));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* For now, limit support to single device setups */
+	psDevNode = psPVRSRVData->psDeviceNodeList;
+	psDevInfo = psDevNode->pvDevice;
+
+	eError = RGXFWSetVMOnlineState(psDevInfo, ui32OSid, RGXFWIF_OS_OFFLINE);
+	if (eError == PVRSRV_OK)
+	{
+		psPVRSRVData->abVmOnline[ui32OSid] = IMG_FALSE;
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR
+SysVzPvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	psDeviceNode = psPVRSRVData->psDeviceNodeList;
+	psDevInfo = psDeviceNode->pvDevice;
+
+	switch(eVMMParamType)
+	{
+		case VMM_CONF_PRIO_OSID0:
+		case VMM_CONF_PRIO_OSID1:
+		case VMM_CONF_PRIO_OSID2:
+		case VMM_CONF_PRIO_OSID3:
+		case VMM_CONF_PRIO_OSID4:
+		case VMM_CONF_PRIO_OSID5:
+		case VMM_CONF_PRIO_OSID6:
+		case VMM_CONF_PRIO_OSID7:
+	    {
+			IMG_UINT32 ui32OSid = eVMMParamType;
+			IMG_UINT32 ui32Prio = ui32ParamValue;
+
+			if (ui32OSid < RGXFW_NUM_OS)
+			{
+				eError = RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32Prio);
+			}
+			else
+			{
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+			}
+			break;
+		}
+		case VMM_CONF_ISOL_THRES:
+	    {
+			IMG_UINT32 ui32Threshold = ui32ParamValue;
+			eError = RGXFWSetOSIsolationThreshold(psDevInfo, ui32Threshold);
+			break;
+		}
+		case VMM_CONF_HCS_DEADLINE:
+		{
+			IMG_UINT32 ui32HCSDeadline = ui32ParamValue;
+			eError = RGXFWSetHCSDeadline(psDevInfo, ui32HCSDeadline);
+			break;
+		}
+		default:
+		{
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+		}
+	}
+
+	return eError;
+}
+
+/******************************************************************************
+ End of file (vz_vmm_vm.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/europa/Kbuild.mk b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/europa/Kbuild.mk
new file mode 100644
index 0000000..995065e
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/europa/Kbuild.mk
@@ -0,0 +1,55 @@
+########################################################################### ###
+#@File
+#@Copyright     Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License       Dual MIT/GPLv2
+# 
+# The contents of this file are subject to the MIT license as set out below.
+# 
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+# 
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+# 
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+# 
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+# 
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+# 
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+
+PVRSRVKM_NAME = $(PVRSRV_MODNAME)
+
+$(PVRSRVKM_NAME)-y += \
+ services/system/$(PVR_SYSTEM)/sysconfig_dt.o \
+ services/system/common/env/linux/interrupt_support.o \
+ services/system/common/env/linux/dma_support.o \
+ services/system/common/vz_physheap_generic.o \
+ services/system/common/vz_physheap_common.o \
+ services/system/common/vmm_pvz_client.o \
+ services/system/common/vmm_pvz_server.o \
+ services/system/common/vz_vmm_pvz.o \
+ services/system/common/vz_vmm_vm.o \
+ services/system/common/vz_support.o \
+ services/system/common/vmm_type_stub.o
\ No newline at end of file
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/europa/sysconfig.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/europa/sysconfig.h
new file mode 100644
index 0000000..0be7f32
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/europa/sysconfig.h
@@ -0,0 +1,70 @@
+/*************************************************************************/ /*!
+@File
+@Title          System Description Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides system-specific declarations and macros
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv_device.h"
+#include "rgxdevice.h"
+
+#if !defined(__SYSCCONFIG_H__)
+#define __SYSCCONFIG_H__
+
+
+#define RGX_NOHW_CORE_CLOCK_SPEED 100000000
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (10)
+
+/* BIF Tiling mode configuration */
+static RGXFWIF_BIFTILINGMODE geBIFTilingMode = RGXFWIF_BIFTILINGMODE_256x16;
+
+/* default BIF tiling heap x-stride configurations. */
+static IMG_UINT32 gauiBIFTilingHeapXStrides[RGXFWIF_NUM_BIF_TILING_CONFIGS] =
+{
+	0, /* BIF tiling heap 1 x-stride */
+	1, /* BIF tiling heap 2 x-stride */
+	2, /* BIF tiling heap 3 x-stride */
+	3  /* BIF tiling heap 4 x-stride */
+};
+
+/*****************************************************************************
+ * system specific data structures
+ *****************************************************************************/
+ 
+#endif	/* __SYSCCONFIG_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/europa/sysconfig_dt.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/europa/sysconfig_dt.c
new file mode 100644
index 0000000..1671a8a
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/europa/sysconfig_dt.c
@@ -0,0 +1,273 @@
+/*************************************************************************/ /*!
+@File
+@Title          System Configuration
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    System Configuration functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <asm/io.h>
+#include <asm/page.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/kobject.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/dma-mapping.h>
+
+#include "interrupt_support.h"
+#include "pvrsrv_device.h"
+#include "syscommon.h"
+#include "sysconfig.h"
+#include "physheap.h"
+
+
+/*
+	CPU to Device physical address translation
+*/
+static
+void UMAPhysHeapCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+									   IMG_UINT32 ui32NumOfAddr,
+									   IMG_DEV_PHYADDR *psDevPAddr,
+									   IMG_CPU_PHYADDR *psCpuPAddr)
+{
+	PVR_UNREFERENCED_PARAMETER(hPrivData);
+	
+	/* Optimise common case */
+	psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr;
+	if (ui32NumOfAddr > 1)
+	{
+		IMG_UINT32 ui32Idx;
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+		{
+			psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr;
+		}
+	}
+}
+
+/*
+	Device to CPU physical address translation
+*/
+static
+void UMAPhysHeapDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+									   IMG_UINT32 ui32NumOfAddr,
+									   IMG_CPU_PHYADDR *psCpuPAddr,
+									   IMG_DEV_PHYADDR *psDevPAddr)				  
+{
+	PVR_UNREFERENCED_PARAMETER(hPrivData);
+	
+	/* Optimise common case */
+	psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr;
+	if (ui32NumOfAddr > 1)
+	{
+		IMG_UINT32 ui32Idx;
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+		{
+			psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr;
+		}
+	}
+}
+
+static PHYS_HEAP_FUNCTIONS gsPhysHeapFuncs =
+{
+	UMAPhysHeapCpuPAddrToDevPAddr,
+	UMAPhysHeapDevPAddrToCpuPAddr,
+	NULL,
+};
+
+static PVRSRV_ERROR PhysHeapsCreate(PHYS_HEAP_CONFIG **ppasPhysHeapsOut,
+									IMG_UINT32 *puiPhysHeapCountOut)
+{
+	PHYS_HEAP_CONFIG *pasPhysHeaps;
+
+	pasPhysHeaps = OSAllocZMem(sizeof(*pasPhysHeaps));
+	if (!pasPhysHeaps)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	pasPhysHeaps[0].ui32PhysHeapID = 0;
+	pasPhysHeaps[0].pszPDumpMemspaceName = "SYSMEM";
+	pasPhysHeaps[0].eType = PHYS_HEAP_TYPE_UMA;
+	pasPhysHeaps[0].psMemFuncs = &gsPhysHeapFuncs;
+
+	*ppasPhysHeapsOut = pasPhysHeaps;
+	*puiPhysHeapCountOut = 1;
+
+	return PVRSRV_OK;
+}
+
+static void PhysHeapsDestroy(PHYS_HEAP_CONFIG *pasPhysHeaps)
+{
+	OSFreeMem(pasPhysHeaps);
+}
+
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig)
+{
+	struct device *psDev = pvOSDevice;
+	PVRSRV_DEVICE_CONFIG *psDevConfig;
+	RGX_DATA *psRGXData;
+	RGX_TIMING_INFORMATION *psRGXTimingInfo;
+	PHYS_HEAP_CONFIG *pasPhysHeaps;
+	IMG_UINT32 uiPhysHeapCount;
+	struct resource sResource;
+	PVRSRV_ERROR eError;
+	int iErr;
+
+	PVR_ASSERT(psDev);
+
+	psDevConfig = OSAllocZMem(sizeof(*psDevConfig) +
+							  sizeof(*psRGXData) +
+							  sizeof(*psRGXTimingInfo));
+	if (!psDevConfig)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	dma_set_mask(pvOSDevice, DMA_BIT_MASK(40));
+
+	psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig));
+	psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData));
+
+	eError = PhysHeapsCreate(&pasPhysHeaps, &uiPhysHeapCount);
+	if (eError)
+	{
+		goto ErrorFreeDevConfig;
+	}
+
+	psDevConfig->pasPhysHeaps      = pasPhysHeaps;
+	psDevConfig->ui32PhysHeapCount = uiPhysHeapCount;
+
+	/*
+	 * Setup RGX specific timing data
+	 */
+	psRGXTimingInfo->ui32CoreClockSpeed        = RGX_NOHW_CORE_CLOCK_SPEED;
+	psRGXTimingInfo->bEnableActivePM           = IMG_FALSE;
+	psRGXTimingInfo->bEnableRDPowIsland        = IMG_FALSE;
+	psRGXTimingInfo->ui32ActivePMLatencyms     = SYS_RGX_ACTIVE_POWER_LATENCY_MS;
+
+	/*
+	 *Setup RGX specific data
+	 */
+	psRGXData->psRGXTimingInfo = psRGXTimingInfo;
+
+	/* Setup the device config */
+	psDevConfig->pvOSDevice				= pvOSDevice;
+	psDevConfig->pszName                = "RGX:europa";
+	psDevConfig->pszVersion             = NULL;
+
+	psDevConfig->eBIFTilingMode = geBIFTilingMode;;
+	psDevConfig->pui32BIFTilingHeapConfigs = gauiBIFTilingHeapXStrides;
+	psDevConfig->ui32BIFTilingHeapCount = IMG_ARR_NUM_ELEMS(gauiBIFTilingHeapXStrides);
+
+	iErr = of_address_to_resource(psDev->of_node, 0, &sResource);
+	if (iErr)
+	{
+		eError = PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE;
+		goto ErrorFreePhysHeaps;
+	}
+
+	psDevConfig->sRegsCpuPBase.uiAddr   = sResource.start;
+	psDevConfig->ui32RegsSize           = sResource.end - sResource.start + 1;
+
+	psDevConfig->ui32IRQ                = irq_of_parse_and_map(psDev->of_node, 0);
+	if (psDevConfig->ui32IRQ == 0)
+	{
+		eError = PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE;
+		goto ErrorFreePhysHeaps;
+	}
+
+	psDevConfig->eCacheSnoopingMode     = PVRSRV_DEVICE_SNOOP_NONE;
+
+	psDevConfig->pfnPrePowerState       = NULL;
+	psDevConfig->pfnPostPowerState      = NULL;
+	psDevConfig->pfnClockFreqGet        = NULL;
+	psDevConfig->pfnSysDriverMode		= NULL;
+
+	psDevConfig->hDevData               = psRGXData;
+
+	*ppsDevConfig = psDevConfig;
+
+	return PVRSRV_OK;
+
+ErrorFreePhysHeaps:
+	PhysHeapsDestroy(pasPhysHeaps);
+ErrorFreeDevConfig:
+	OSFreeMem(psDevConfig);
+	return eError;
+}
+
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PhysHeapsDestroy(psDevConfig->pasPhysHeaps);
+	OSFreeMem(psDevConfig);
+}
+
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+								  IMG_UINT32 ui32IRQ,
+								  const IMG_CHAR *pszName,
+								  PFN_LISR pfnLISR,
+								  void *pvData,
+								  IMG_HANDLE *phLISRData)
+{
+	PVR_UNREFERENCED_PARAMETER(hSysData);
+
+	return OSInstallSystemLISR(phLISRData, ui32IRQ, pszName, pfnLISR, pvData,
+							   SYS_IRQ_FLAG_TRIGGER_DEFAULT);
+}
+
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+	return OSUninstallSystemLISR(hLISRData);
+}
+
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevConfig);
+	PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+	PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile);
+	return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (sysconfig.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/europa/sysinfo.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/europa/sysinfo.h
new file mode 100644
index 0000000..fb28585
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/europa/sysinfo.h
@@ -0,0 +1,65 @@
+/*************************************************************************/ /*!
+@File
+@Title          System Description Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides system-specific declarations and macros
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__SYSINFO_H__)
+#define __SYSINFO_H__
+
+/*!< System specific poll/timeout details */
+#if defined(VIRTUAL_PLATFORM)
+#define MAX_HW_TIME_US                            (2000000000) /*500000*/
+#else
+//#define MAX_HW_TIME_US                           (500000)
+#define MAX_HW_TIME_US                           (5000000)
+#endif
+
+#if defined(VIRTUAL_PLATFORM)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT   (100000)
+#else
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT  (10000)
+#endif
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT  (3600000)
+#define WAIT_TRY_COUNT                            (10000)
+
+#define SYS_RGX_OF_COMPATIBLE "img,clyde-gpu"
+
+#endif	/* !defined(__SYSINFO_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/dma_support.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/dma_support.h
new file mode 100644
index 0000000..1421137
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/dma_support.h
@@ -0,0 +1,125 @@
+/*************************************************************************/ /*!
+@File           dma_support.h
+@Title          Device contiguous memory allocator and I/O re-mapper
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides a contiguous memory allocator API; mainly
+                used for allocating / ioremapping (DMA/PA <-> CPU/VA)
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DMA_SUPPORT_H_
+#define _DMA_SUPPORT_H_
+
+#include "osfunc.h"
+#include "pvrsrv.h"
+
+typedef struct _DMA_ALLOC_
+{
+	IMG_UINT64	ui64Size;
+	IMG_CPU_VIRTADDR pvVirtAddr;
+	IMG_DEV_PHYADDR	 sBusAddr;
+	void *pvOSDevice;
+} DMA_ALLOC;
+
+/*!
+******************************************************************************
+ @Function			SysDmaAllocMem
+
+ @Description 		Allocates physically contiguous memory
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc);
+
+/*!
+******************************************************************************
+ @Function			SysDmaFreeMem
+
+ @Description 		Free physically contiguous memory
+
+ @Return			void
+ ******************************************************************************/
+void SysDmaFreeMem(DMA_ALLOC *psCmaAlloc);
+
+/*!
+******************************************************************************
+ @Function			SysDmaRegisterForIoRemapping
+
+ @Description 		Registers DMA_ALLOC for manual I/O remapping
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc);
+
+/*!
+******************************************************************************
+ @Function			SysDmaDeregisterForIoRemapping
+
+ @Description 		Deregisters DMA_ALLOC from manual I/O remapping
+
+ @Return			void
+ ******************************************************************************/
+void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc);
+
+/*!
+******************************************************************************
+ @Function			SysDmaDevPAddrToCpuVAddr
+
+ @Description 		Maps a DMA_ALLOC physical address to CPU virtual address
+
+ @Return			IMG_CPU_VIRTADDR on success. Otherwise, a NULL
+ ******************************************************************************/
+IMG_CPU_VIRTADDR SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size);
+
+/*!
+******************************************************************************
+ @Function			SysDmaCpuVAddrToDevPAddr
+
+ @Description 		Maps a DMA_ALLOC CPU virtual address to physical address
+
+ @Return			Non-zero value on success. Otherwise, a 0
+ ******************************************************************************/
+IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr);
+
+#endif /* _DMA_SUPPORT_H_ */
+
+/*****************************************************************************
+ End of file (dma_support.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/interrupt_support.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/interrupt_support.h
new file mode 100644
index 0000000..e4db2fe
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/interrupt_support.h
@@ -0,0 +1,102 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__INTERRUPT_SUPPORT_H__)
+#define __INTERRUPT_SUPPORT_H__
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_device.h"
+
+#define SYS_IRQ_FLAG_TRIGGER_DEFAULT (0x0 << 0)
+#define SYS_IRQ_FLAG_TRIGGER_LOW     (0x1 << 0)
+#define SYS_IRQ_FLAG_TRIGGER_HIGH    (0x2 << 0)
+#define SYS_IRQ_FLAG_TRIGGER_MASK    (SYS_IRQ_FLAG_TRIGGER_DEFAULT | \
+                                      SYS_IRQ_FLAG_TRIGGER_LOW | \
+                                      SYS_IRQ_FLAG_TRIGGER_HIGH)
+#define SYS_IRQ_FLAG_SHARED          (0x1 << 8)
+
+#define SYS_IRQ_FLAG_MASK            (SYS_IRQ_FLAG_TRIGGER_MASK | \
+                                      SYS_IRQ_FLAG_SHARED)
+
+typedef IMG_BOOL (*PFN_SYS_LISR)(void *pvData);
+
+typedef struct _SYS_INTERRUPT_DATA_
+{
+	void			*psSysData;
+	const IMG_CHAR	*pszName;
+	PFN_SYS_LISR	pfnLISR;
+	void			*pvData;
+	IMG_UINT32		ui32InterruptFlag;
+	IMG_UINT32		ui32IRQ;
+} SYS_INTERRUPT_DATA;
+
+/*************************************************************************/ /*!
+@Function       OSInstallSystemLISR
+@Description    Installs a system low-level interrupt handler
+@Output         phLISR                  On return, contains a handle to the
+                                        installed LISR
+@Input          ui32IRQ                 The IRQ number for which the
+                                        interrupt handler should be installed
+@Input          pszDevName              Name of the device for which the handler
+                                        is being installed
+@Input          pfnLISR                 A pointer to an interrupt handler
+                                        function
+@Input          pvData                  A pointer to data that should be passed
+                                        to pfnLISR when it is called
+@Input          ui32Flags               Interrupt flags
+@Return         PVRSRV_OK on success, a failure code otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR OSInstallSystemLISR(IMG_HANDLE *phLISR, 
+				 IMG_UINT32 ui32IRQ,
+				 const IMG_CHAR *pszDevName, 
+				 PFN_SYS_LISR pfnLISR, 
+				 void *pvData,
+				 IMG_UINT32 ui32Flags);
+
+/*************************************************************************/ /*!
+@Function       OSUninstallSystemLISR
+@Description    Uninstalls a system low-level interrupt handler
+@Input          hLISRData              The handle to the LISR to uninstall
+@Return         PVRSRV_OK on success, a failure code otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR OSUninstallSystemLISR(IMG_HANDLE hLISRData);
+#endif /* !defined(__INTERRUPT_SUPPORT_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/pci_support.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/pci_support.h
new file mode 100644
index 0000000..46cfe87
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/pci_support.h
@@ -0,0 +1,98 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PCI_SUPPORT_H__
+#define __PCI_SUPPORT_H__
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#if defined(LINUX)
+#include <linux/pci.h>
+#define TO_PCI_COOKIE(dev) to_pci_dev((struct device *)(dev))
+#else
+#define TO_PCI_COOKIE(dev) (dev)
+#endif
+
+typedef enum _HOST_PCI_INIT_FLAGS_
+{
+	HOST_PCI_INIT_FLAG_BUS_MASTER	= 0x00000001,
+	HOST_PCI_INIT_FLAG_MSI		= 0x00000002,
+	HOST_PCI_INIT_FLAG_FORCE_I32 	= 0x7fffffff
+} HOST_PCI_INIT_FLAGS;
+
+struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_;
+typedef struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_ *PVRSRV_PCI_DEV_HANDLE;
+
+PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags);
+PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags);
+PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
+PVRSRV_ERROR OSPCIDevID(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT16 *pui16DeviceID);
+PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ);
+IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength);
+PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength);
+PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
+PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
+
+#if defined(CONFIG_MTRR)
+PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+#else
+static inline PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+	PVR_UNREFERENCED_PARAMETER(hPVRPCI);
+	PVR_UNREFERENCED_PARAMETER(ui32Index);
+	return PVRSRV_OK;
+}
+
+static inline void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+	PVR_UNREFERENCED_PARAMETER(hPVRPCI);
+	PVR_UNREFERENCED_PARAMETER(ui32Index);
+}
+#endif
+
+#endif /* __PCI_SUPPORT_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/syscommon.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/syscommon.h
new file mode 100644
index 0000000..4280731
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/syscommon.h
@@ -0,0 +1,124 @@
+/**************************************************************************/ /*!
+@File
+@Title          Common System APIs and structures
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides common system-specific declarations and
+                macros that are supported by all systems
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__SYSCOMMON_H__)
+#define __SYSCOMMON_H__
+
+#include "img_types.h"
+#include "pvr_notifier.h"
+#include "pvrsrv_device.h"
+#include "pvrsrv_error.h"
+
+typedef IMG_BOOL (*PFN_LISR)(void *pvData);
+
+/**************************************************************************/ /*!
+@Function       SysDevInit
+@Description    System specific device initialisation function.
+@Input          pvOSDevice          pointer to the OS device reference
+@Input          ppsDevConfig        returned device configuration info
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig);
+
+/**************************************************************************/ /*!
+@Function       SysDevDeInit
+@Description    System specific device deinitialisation function.
+@Input          psDevConfig        device configuration info of the device to be
+                                   deinitialised
+@Return         None.
+*/ /***************************************************************************/
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/**************************************************************************/ /*!
+@Function       SysDebugInfo
+@Description    Dump system specific device debug information.
+@Input          psDevConfig         pointer to device configuration info
+@Input          pfnDumpDebugPrintf  the 'printf' function to be called to
+                                    display the debug info
+@Input          pvDumpDebugFile     optional file identifier to be passed to
+                                    the 'printf' function if required
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile);
+
+/**************************************************************************/ /*!
+@Function       SysInstallDeviceLISR
+@Description    Installs the system Low-level Interrupt Service Routine (LISR)
+                which handles low-level processing of interrupts from the device
+                (GPU).
+                The LISR will be invoked when the device raises an interrupt. An
+                LISR may not be descheduled, so code which needs to do so should
+                be placed in an MISR.
+                The installed LISR will schedule any MISRs once it has completed
+                its interrupt processing, by calling OSScheduleMISR().
+@Input          hSysData      pointer to the system data of the device
+@Input          ui32IRQ       the IRQ on which the LISR is to be installed
+@Input          pszName       name of the module installing the LISR
+@Input          pfnLISR       pointer to the function to be installed as the
+                              LISR
+@Input          pvData        private data provided to the LISR
+@Output         phLISRData    handle to the installed LISR (to be used for a
+                              subsequent uninstall)
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+								  IMG_UINT32 ui32IRQ,
+								  const IMG_CHAR *pszName,
+								  PFN_LISR pfnLISR,
+								  void *pvData,
+								  IMG_HANDLE *phLISRData);
+
+/**************************************************************************/ /*!
+@Function       SysUninstallDeviceLISR
+@Description    Uninstalls the system Low-level Interrupt Service Routine (LISR)
+                which handles low-level processing of interrupts from the device
+                (GPU).
+@Input          hLISRData     handle of the LISR to be uninstalled
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData);
+
+#endif /* !defined(__SYSCOMMON_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/sysvalidation.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/sysvalidation.h
new file mode 100644
index 0000000..ae46ee5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/sysvalidation.h
@@ -0,0 +1,63 @@
+/*************************************************************************/ /*!
+@File
+@Title          Validation System APIs and structures
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides system-specific declarations and macros
+                needed for hardware validation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__SYSVALIDATION_H__)
+#define __SYSVALIDATION_H__
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "img_types.h"
+#include "rgxdefs_km.h"
+#include "virt_validation_defs.h"
+
+void SysSetOSidRegisters(IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+                         IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]);
+void SysPrintAndResetFaultStatusRegister(void);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR)
+void SysSetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState);
+void SysSetTrustedDeviceAceEnabled(void);
+#endif
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+
+#endif /* !defined(__SYSVALIDATION_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vmm_impl.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vmm_impl.h
new file mode 100644
index 0000000..2251223
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vmm_impl.h
@@ -0,0 +1,283 @@
+/*************************************************************************/ /*!
+@File           vmm_impl.h
+@Title          Common VM manager API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides common VM manager definitions that need to
+                be shared by system virtualization layer itself and modules that
+                implement the actual VM manager types.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VMM_IMPL_H_
+#define _VMM_IMPL_H_
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/*
+ 	 Virtual machine manager para-virtualization (PVZ) connection:
+		- Type is implemented by host and guest drivers
+			- Assumes synchronous function call semantics
+			- Unidirectional semantics
+				- For Host  (vmm -> host)
+				- For Guest (guest -> vmm)
+			- Parameters can be IN/OUT/INOUT
+
+		- Host pvz entries are pre-implemented by IMG
+			- For host implementation, see vmm_pvz_server.c
+			- Called by host side hypercall handler or VMM
+
+		- Guest pvz entries are supplied by 3rd-party
+			- These are specific to hypervisor (VMM) type
+			- These implement the actual hypercalls mechanism
+
+	 Para-virtualization call runtime sequence:
+		1 - Guest driver in guest VM calls PVZ function
+		1.1 - Guest PVZ connection calls
+		1.2 - Guest VM Manager type which
+		1.2.1 - Performs any pre-processing like parameter packing, etc.
+		1.2.2 - Issues hypercall (blocking synchronous call)
+
+		2 - VM Manager (hypervisor) receives hypercall
+		2.1 - Hypercall handler:
+		2.1.1 - Performs any pre-processing
+		2.1.2 - If call terminates in VM Manager: perform action and return from hypercall
+		2.1.3 - Otherwise forward to host driver (implementation specific call)
+
+		3 - Host driver receives call from VM Manager
+		3.1 - Host VM manager type:
+		3.1.1 - Performs any pre-processing like parameter unpacking, etc.
+		3.1.2 - Acquires host driver PVZ handler and calls the appropriate entry
+		3.2 - Host PVZ connection calls corresponding host system virtualisation layer
+		3.3 - Host driver system virtualisation layer:
+		3.3.1 - Perform action requested by guest driver
+		3.3.2 - Return to host VM Manager type
+		3.4 - Host VM Manager type:
+		3.4.1 - Prepare to return from hypercall
+		3.4.2 - Perform any post-processing like result packing, etc.
+		3.4.3 - Issue return from hypercall
+
+		4 - VM Manager (hypervisor)
+		4.1 - Perform any post-processing
+		4.2 - Return control to guest driver
+
+		5 - Guest driver in guest VM
+		5.1 - Perform any post-processing like parameter unpacking, etc.
+		5.2 - Continue execution in guest VM
+ */
+typedef struct _VMM_PVZ_CONNECTION_
+{
+	struct {
+		/*
+		   This pair must be implemented if the device configuration is
+		   not provided during guest build or if the device interrupt 
+		   is dynamically mapped into the VM virtual interrupt line.
+		   If not implemented, return PVRSRV_ERROR_NOT_IMPLEMENTED.
+		 */
+		PVRSRV_ERROR (*pfnCreateDevConfig)(IMG_UINT32 ui32FuncID,
+										   IMG_UINT32 ui32DevID,
+										   IMG_UINT32 *pui32IRQ,
+										   IMG_UINT32 *pui32RegsSize,
+										   IMG_UINT64 *pui64RegsPBase);
+
+		PVRSRV_ERROR (*pfnDestroyDevConfig)(IMG_UINT32 ui32FuncID,
+											IMG_UINT32 ui32DevID);
+
+		/*
+		   This pair must be implemented if the host is responsible for
+		   allocating the physical heaps on behalf of the guest; these
+		   physical heaps Addr/Size are allocated in the host domain
+		   and are communicated to the guest so must be re-expressed
+		   relative to the guest VM IPA space. The guest assumes said
+		   memory is not managed by the underlying GuestOS kernel.
+   		   If not implemented, return PVRSRV_ERROR_NOT_IMPLEMENTED.
+		 */
+		PVRSRV_ERROR (*pfnCreateDevPhysHeaps)(IMG_UINT32 ui32FuncID,
+											  IMG_UINT32 ui32DevID,
+											  IMG_UINT32 *peType,
+											  IMG_UINT64 *pui64FwSize,
+											  IMG_UINT64 *pui64FwPAddr,
+											  IMG_UINT64 *pui64GpuSize,
+											  IMG_UINT64 *pui64GpuPAddr);
+
+		PVRSRV_ERROR (*pfnDestroyDevPhysHeaps)(IMG_UINT32 ui32FuncID,
+											   IMG_UINT32 ui32DevID);
+
+		/*
+		   This pair must be implemented if the guest is responsible
+		   for allocating the physical heap that backs its firmware
+		   allocations, this is the default configuration. The physical
+		   heap is allocated within the guest VM IPA space and this
+		   IPA Addr/Size must be re-expressed as PA space Addr/Size
+		   by the VM manager before forwarding request to host.
+   		   If not implemented, return PVRSRV_ERROR_NOT_IMPLEMENTED.
+		 */
+		PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32FuncID,
+										  IMG_UINT32 ui32DevID,
+										  IMG_UINT64 ui64Size,
+										  IMG_UINT64 ui64PAddr);
+
+		PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32FuncID,
+											IMG_UINT32 ui32DevID);
+	} sHostFuncTab;
+
+	struct {
+		/*
+			Corresponding server side entries to handle guest PVZ calls
+			NOTE:
+				 - Pvz function ui32OSID parameter
+				 	 - OSID determination is responsibility of VM manager
+				 	 - Actual OSID value must be supplied by VM manager
+					 	- This can be done either in client/VMM/host side
+					 - Must be done before host pvz function(s) are called
+				 	 - Host pvz function assumes valid OSID
+		 */
+		PVRSRV_ERROR (*pfnCreateDevConfig)(IMG_UINT32 ui32OSID,
+										   IMG_UINT32 ui32FuncID,
+										   IMG_UINT32 ui32DevID,
+										   IMG_UINT32 *pui32IRQ,
+										   IMG_UINT32 *pui32RegsSize,
+										   IMG_UINT64 *pui64RegsPBase);
+
+		PVRSRV_ERROR (*pfnDestroyDevConfig)(IMG_UINT32 ui32OSID,
+											IMG_UINT32 ui32FuncID,
+											IMG_UINT32 ui32DevID);
+
+		PVRSRV_ERROR (*pfnCreateDevPhysHeaps)(IMG_UINT32 ui32OSID,
+											  IMG_UINT32 ui32FuncID,
+											  IMG_UINT32 ui32DevID,
+											  IMG_UINT32 *peType,
+											  IMG_UINT64 *pui64FwSize,
+											  IMG_UINT64 *pui64FwPAddr,
+											  IMG_UINT64 *pui64GpuSize,
+											  IMG_UINT64 *pui64GpuPAddr);
+
+		PVRSRV_ERROR (*pfnDestroyDevPhysHeaps)(IMG_UINT32 ui32OSID,
+											   IMG_UINT32 ui32FuncID,
+											   IMG_UINT32 ui32DevID);
+
+		PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32OSID,
+										  IMG_UINT32 ui32FuncID,
+										  IMG_UINT32 ui32DevID,
+										  IMG_UINT64 ui64Size,
+										  IMG_UINT64 ui64PAddr);
+
+		PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32OSID,
+											IMG_UINT32 ui32FuncID,
+											IMG_UINT32 ui32DevID);
+	} sGuestFuncTab;
+
+	struct {
+		/*
+		   This configuration interface specifies which driver host/guest is
+		   responsible for allocating the physical memory backing the guest 
+		   driver(s) physical heap. Both the host and guest(s) must agree to
+		   use the same policy. It must be implemented and should return
+		   PVRSRV_OK.
+		 */
+		PVRSRV_ERROR (*pfnGetDevPhysHeapOrigin)(PVRSRV_DEVICE_CONFIG *psDevConfig,
+												PVRSRV_DEVICE_PHYS_HEAP eHeap,
+												PVRSRV_DEVICE_PHYS_HEAP_ORIGIN *peOrigin);
+
+		/*
+			If the host is responsible for allocating the backing memory for
+			the physical heap, the function should return heap Addr/Size value
+			pairs obtained in sHostFuncTab->pfnCreateDevPhysHeaps().
+
+			If the guest is responsible for allocating the backing memory for
+			the physical heap, the function should return the proper values to
+			direct the guest driver on which allocation method to use. This is
+			communicated by using the returned pui64Addr/pui64Size value pairs
+			as show below:
+
+				For UMA platforms:
+					- For GPU physical heap
+						- 0/0							=> UMA
+						- 0/0x[hex-value]				=> DMA
+						- 0x[hex-value]/0x[hex-value]	=> UMA/carve-out
+
+					- For FW physical heap
+						- 0/0x[hex-value]				=> DMA
+						- 0x[hex-value]/0x[hex-value]	=> UMA/carve-out
+
+				For LMA platforms:
+					- For GPU physical heap
+						- 0x/0x[hex-value]				=> LMA
+
+					- For FW physical heap
+						- 0x/0x[hex-value]				=> LMA
+		*/
+		PVRSRV_ERROR (*pfnGetDevPhysHeapAddrSize)(PVRSRV_DEVICE_CONFIG *psDevConfig,
+												  PVRSRV_DEVICE_PHYS_HEAP eHeap,
+												  IMG_UINT64 *pui64Size,
+												  IMG_UINT64 *pui64Addr);
+	} sConfigFuncTab;
+
+	struct {
+		/*
+		   This is used by the VM manager to report pertinent runtime guest VM
+		   information to the host; these events may in turn be forwarded to 
+		   the firmware
+		 */
+		PVRSRV_ERROR (*pfnOnVmOnline)(IMG_UINT32 ui32OSID, IMG_UINT32 ui32Priority);
+
+		PVRSRV_ERROR (*pfnOnVmOffline)(IMG_UINT32 ui32OSID);
+
+		PVRSRV_ERROR (*pfnVMMConfigure)(IMG_UINT32 ui32VMMParamType, IMG_UINT32 ui32ParamValue);
+
+	} sVmmFuncTab;
+} VMM_PVZ_CONNECTION;
+
+/*!
+******************************************************************************
+ @Function			VMMCreatePvzConnection() and VMMDestroyPvzConnection()
+
+ @Description 		Both the guest and VM manager call this in order to obtain
+ 					a PVZ connection to the VM and host respectively; that is,
+					guest calls it to obtain connection to VM, VM calls it to
+					obtain connection to host.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection);
+void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection);
+
+#endif /* _VMM_IMPL_H_ */
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vmm_pvz_client.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vmm_pvz_client.h
new file mode 100644
index 0000000..374dafc
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vmm_pvz_client.h
@@ -0,0 +1,143 @@
+/*************************************************************************/ /*!
+@File           vmm_pvz_client.h
+@Title          Guest VM manager client para-virtualization routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header provides guest VMM client para-virtualization APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VMM_PVZ_CLIENT_H_
+#define _VMM_PVZ_CLIENT_H_
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "vmm_pvz_common.h"
+
+
+/*!
+******************************************************************************
+ @Function			PvzClientCreateDevConfig
+
+ @Description 		The guest front-end to initiate a pfnCreateDevConfig PVZ 
+					call to the host.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR 
+PvzClientCreateDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+						 IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function			PvzClientDestroyDevConfig
+
+ @Description 		The guest front-end to initiate a pfnDestroyDevConfig PVZ 
+					call to the host.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR 
+PvzClientDestroyDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+						  IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function			PvzClientCreateDevPhysHeaps
+
+ @Description 		The guest front-end to initiate a pfnCreateDevPhysHeaps PVZ 
+					call to the host.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR 
+PvzClientCreateDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig,
+							IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function			PvzClientDestroyDevPhysHeaps
+
+ @Description 		The guest front-end to initiate a pfnDestroyDevPhysHeaps PVZ 
+					call to the host.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR 
+PvzClientDestroyDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig,
+							 IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function			PvzClientMapDevPhysHeap
+
+ @Description 		The guest front-end to initiate a pfnMapDevPhysHeap PVZ 
+					call to the host.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzClientMapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+						IMG_UINT32 ui32DevID,
+						IMG_DEV_PHYADDR sDevPAddr,
+						IMG_UINT64 ui64DevPSize);
+
+/*!
+******************************************************************************
+ @Function			PvzClientUnmapDevPhysHeap
+
+ @Description 		The guest front-end to initiate a pfnUnmapDevPhysHeap PVZ 
+					call to the host.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzClientUnmapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+						  IMG_UINT32 ui32DevID);
+
+#endif /* _VMM_PVZ_CLIENT_H_ */
+
+/*****************************************************************************
+ End of file (vmm_pvz_client.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vmm_pvz_common.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vmm_pvz_common.h
new file mode 100644
index 0000000..b8d5a93
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vmm_pvz_common.h
@@ -0,0 +1,60 @@
+/*************************************************************************/ /*!
+@File           vmm_pvz_common.h
+@Title          Common VM manager function IDs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header provides VM manager para-virtualization function IDs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VMM_PVZ_COMMON_H_
+#define _VMM_PVZ_COMMON_H_
+
+#define PVZ_BRIDGE_DEFAULT					0UL
+#define PVZ_BRIDGE_CREATEDEVICECONFIG		(PVZ_BRIDGE_DEFAULT + 1)
+#define PVZ_BRIDGE_DESTROYDEVICECONFIG		(PVZ_BRIDGE_CREATEDEVICECONFIG  + 1)
+#define PVZ_BRIDGE_CREATEDEVICEPHYSHEAPS	(PVZ_BRIDGE_DESTROYDEVICECONFIG + 1)
+#define PVZ_BRIDGE_DESTROYDEVICEPHYSHEAPS	(PVZ_BRIDGE_CREATEDEVICEPHYSHEAPS  + 1)
+#define PVZ_BRIDGE_MAPDEVICEPHYSHEAP		(PVZ_BRIDGE_DESTROYDEVICEPHYSHEAPS + 1)
+#define PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP		(PVZ_BRIDGE_MAPDEVICEPHYSHEAP   + 1)
+#define PVZ_BRIDGE_LAST						(PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP + 1)
+
+#endif /* _VMM_PVZ_COMMON_H_ */
+
+/*****************************************************************************
+ End of file (vmm_pvz_common.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vmm_pvz_server.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vmm_pvz_server.h
new file mode 100644
index 0000000..397ca5d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vmm_pvz_server.h
@@ -0,0 +1,205 @@
+/*************************************************************************/ /*!
+@File           vmm_pvz_server.h
+@Title          VM manager para-virtualization interface helper routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header provides API(s) available to VM manager, this must be 
+                called to close the loop during guest para-virtualization calls.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VMM_PVZ_SERVER_H_
+#define _VMM_PVZ_SERVER_H_
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "vmm_pvz_common.h"
+
+
+/*!
+******************************************************************************
+ @Function			PvzServerCreateDevConfig
+
+ @Description 		The VM manager calls this in response to guest PVZ interface
+					call pfnCreateDevConfig.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR 
+PvzServerCreateDevConfig(IMG_UINT32 ui32OSID,
+						 IMG_UINT32 ui32FuncID,
+						 IMG_UINT32 ui32DevID,
+						 IMG_UINT32 *pui32IRQ,
+						 IMG_UINT32 *pui32RegsSize,
+						 IMG_UINT64 *pui64RegsPAddr);
+
+/*!
+******************************************************************************
+ @Function			PvzServerDestroyDevConfig
+
+ @Description 		The VM manager calls this in response to guest PVZ interface
+					call pfnDestroyDevConfig.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR 
+PvzServerDestroyDevConfig(IMG_UINT32 ui32OSID,
+						  IMG_UINT32 ui32FuncID,
+						  IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function			PvzServerCreateDevPhysHeaps
+
+ @Description 		The VM manager calls this in response to guest PVZ interface
+					call pfnCreateDevPhysHeaps.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR 
+PvzServerCreateDevPhysHeaps(IMG_UINT32 ui32OSID,
+							IMG_UINT32 ui32FuncID,
+							IMG_UINT32 ui32DevID,
+							IMG_UINT32 *pePHeapType,
+							IMG_UINT64 *pui64FwSize,
+							IMG_UINT64 *pui64FwAddr,
+							IMG_UINT64 *pui64GpuSize,
+							IMG_UINT64 *pui64GpuAddr);
+
+/*!
+******************************************************************************
+ @Function			PvzServerDestroyDevPhysHeaps
+
+ @Description 		The VM manager calls this in response to guest PVZ interface
+					call pfnDestroyDevPhysHeaps.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR 
+PvzServerDestroyDevPhysHeaps(IMG_UINT32 ui32OSID,
+							 IMG_UINT32 ui32FuncID,
+							 IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function			PvzServerMapDevPhysHeap
+
+ @Description 		The VM manager calls this in response to guest PVZ interface
+					call pfnMapDevPhysHeap.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR 
+PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID,
+						IMG_UINT32 ui32FuncID,
+						IMG_UINT32 ui32DevID,
+						IMG_UINT64 ui64Size,
+						IMG_UINT64 ui64PAddr);
+
+/*!
+******************************************************************************
+ @Function			PvzServerUnmapDevPhysHeap
+
+ @Description 		The VM manager calls this in response to guest PVZ interface
+					call pfnUnmapDevPhysHeap.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR 
+PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID,
+						  IMG_UINT32 ui32FuncID,
+						  IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function			PvzServerOnVmOnline
+
+ @Description 		The VM manager calls this when guest VM machine comes online.
+                    The host driver might initialize the FW if it has not done so
+					already.                    
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR 
+PvzServerOnVmOnline(IMG_UINT32 ui32OSID, IMG_UINT32 ui32Priority);
+
+/*!
+******************************************************************************
+ @Function			PvzServerOnVmOffline
+
+ @Description 		The VM manager calls this when a guest VM machine is about to
+                    go offline. The VM manager might have unmapped the GPU kick
+					register for such VM but not the GPU memory until the call returns.
+					Once the function returns, the FW does not hold any reference
+					for such VM and no workloads from it are running in the GPU and
+					it is safe to remove the memory for such VM.
+
+ @Return			PVRSRV_OK on success. PVRSRV_ERROR_TIMEOUT if
+                    for some reason the FW is taking too long to
+					clean-up the resources of the OSID. Otherwise, 
+					a PVRSRV_ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR 
+PvzServerOnVmOffline(IMG_UINT32 ui32OSID);
+
+/*!
+******************************************************************************
+ @Function			PvzServerVMMConfigure
+
+ @Description 		The VM manager calls this to configure several parameters
+                    like HCS or isolation.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR 
+PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType, 
+                      IMG_UINT32 ui32ParamValue);
+
+#endif /* _VMM_PVZ_SERVER_H_ */
+
+/*****************************************************************************
+ End of file (vmm_pvz_server.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vz_physheap.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vz_physheap.h
new file mode 100644
index 0000000..a3ac9fc
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vz_physheap.h
@@ -0,0 +1,267 @@
+/*************************************************************************/ /*!
+@File           vz_physheap.h
+@Title          System virtualization physheap support APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides physheaps virtualization-specific APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VZ_PHYSHEAP_H_
+#define _VZ_PHYSHEAP_H_
+
+#include "pvrsrv.h"
+
+typedef enum _PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_
+{
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST  = 0,
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST = 1,
+	PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_LAST
+} PVRSRV_DEVICE_PHYS_HEAP_ORIGIN;
+
+/*!
+******************************************************************************
+ @Function			SysVzGetPhysHeapAddrSize
+
+ @Description 		Get the address and size value of the specified device heap
+
+ @Return			PHYS_HEAP_CONFIG * on success. Otherwise, NULL
+ ******************************************************************************/
+PVRSRV_ERROR SysVzGetPhysHeapAddrSize(PVRSRV_DEVICE_CONFIG *psDevConfig,
+									  PVRSRV_DEVICE_PHYS_HEAP eHeap,
+									  PHYS_HEAP_TYPE eType,
+									  IMG_DEV_PHYADDR *psAddr,
+									  IMG_UINT64 *pui64Size);
+
+/*!
+******************************************************************************
+ @Function			SysVzSetPhysHeapAddrSize
+
+ @Description 		Set physical heap configuration attributes
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR  SysVzSetPhysHeapAddrSize(PVRSRV_DEVICE_CONFIG *psDevConfig,
+									   PVRSRV_DEVICE_PHYS_HEAP eHeap,
+									   PHYS_HEAP_TYPE eType,
+									   IMG_DEV_PHYADDR sAddr,
+									   IMG_UINT64 ui64Size);
+
+/*!
+******************************************************************************
+ @Function			SysVzRegisterPhysHeap
+
+ @Description 		Registers heap with virtualization services
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzRegisterPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+								   PVRSRV_DEVICE_PHYS_HEAP eHeap);
+
+/*!
+******************************************************************************
+ @Function			SysVzDeregisterPhysHeap
+
+ @Description 		Deregister heap from virtualization services
+
+ @Return			void
+ ******************************************************************************/
+void SysVzDeregisterPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+							 PVRSRV_DEVICE_PHYS_HEAP eHeap);
+
+
+/*!
+******************************************************************************
+ @Function			SysVzGetPhysHeapConfig
+
+ @Description 		Looks-up device physical heap configuration
+
+ @Return			PHYS_HEAP_CONFIG * on success. Otherwise, NULL
+ ******************************************************************************/
+PHYS_HEAP_CONFIG *SysVzGetPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+										 PVRSRV_DEVICE_PHYS_HEAP eHeap);
+
+/*!
+******************************************************************************
+ @Function			SysVzGetPhysHeapOrigin
+
+ @Description 		Identify which driver is responsible for allocating the
+					device physical heap backing-memory
+
+ @Return			void
+ ******************************************************************************/
+PVRSRV_ERROR SysVzGetPhysHeapOrigin(PVRSRV_DEVICE_CONFIG *psDevConfig,
+									PVRSRV_DEVICE_PHYS_HEAP eHeap,
+									PVRSRV_DEVICE_PHYS_HEAP_ORIGIN *peOrigin);
+
+/*!
+******************************************************************************
+ @Function			SysVzGetMemoryConfigPhysHeapType
+
+ @Description 		Get the platform memory configuration physical heap type
+
+ @Return			PHYS_HEAP_TYPE
+ ******************************************************************************/
+PHYS_HEAP_TYPE SysVzGetMemoryConfigPhysHeapType(void);
+
+/*!
+******************************************************************************
+ @Function			SysVzInitDevPhysHeaps
+
+ @Description 		Initialize device physical heap
+
+ @Return			void
+ ******************************************************************************/
+PVRSRV_ERROR SysVzInitDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzDeInitDevPhysHeaps
+
+ @Description 		DeInitialize device physical heap
+
+ @Return			void
+ ******************************************************************************/
+void SysVzDeInitDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzCreateDevPhysHeaps
+
+ @Description 		Create device physical heaps
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzCreateDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzDestroyDevPhysHeaps
+
+ @Description 		Destroy device physical heaps
+
+ @Return			void
+ ******************************************************************************/
+void SysVzDestroyDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzRegisterFwPhysHeap
+
+ @Description 		Maps VM relative physically contiguous memory into the 
+ 	 	 	 	 	firmware kernel memory context
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzRegisterFwPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzUnregisterFwPhysHeap
+
+ @Description 		Unmaps VM relative physically contiguous memory from the 
+ 	 	 	 	 	firmware kernel memory context
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzUnregisterFwPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzCreateDevPhysHeaps
+
+ @Description 		Create guest device physical heaps
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzCreateDevPhysHeaps(IMG_UINT32 ui32OSID,
+										IMG_UINT32 ui32DevID,
+										IMG_UINT32 *peType,
+										IMG_UINT64 *pui64FwSize,
+										IMG_UINT64 *pui64FwAddr,
+										IMG_UINT64 *pui64GpuSize,
+										IMG_UINT64 *puiGpuAddr);
+
+/*!
+******************************************************************************
+ @Function			SysVzDestroyDevPhysHeaps
+
+ @Description 		Destroy guest device physical heaps
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzDestroyDevPhysHeaps(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function			SysVzRegisterFwPhysHeap
+
+ @Description 		Maps guest VM relative physically contiguous memory into
+					the firmware kernel memory context
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzRegisterFwPhysHeap(IMG_UINT32 ui32OSID,
+										IMG_UINT32 ui32DevID,
+										IMG_UINT64 ui64Size,
+										IMG_UINT64 ui64Addr);
+
+/*!
+******************************************************************************
+ @Function			SysVzUnregisterFwPhysHeap
+
+ @Description 		Unmaps guest VM relative physically contiguous memory from
+					the firmware kernel memory context
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzUnregisterFwPhysHeap(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID);
+
+#endif /* _VZ_PHYSHEAP_H_ */
+
+/*****************************************************************************
+ End of file (vz_physheap.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vz_support.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vz_support.h
new file mode 100644
index 0000000..d0d526b
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vz_support.h
@@ -0,0 +1,126 @@
+/*************************************************************************/ /*!
+@File           vz_support.h
+@Title          System virtualization support API(s)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides the system virtualization API(s)
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VZ_SUPPORT_H_
+#define _VZ_SUPPORT_H_
+
+#include "osfunc.h"
+#include "pvrsrv.h"
+
+/*!
+******************************************************************************
+ @Function			SysVzDevInit
+
+ @Description 		Entry into system virtualization per device configuration
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzDevInit(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzDevDeInit
+
+ @Description 		Exit from system virtualization per device configuration
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzCreateDevConfig
+
+ @Description 		Guest para-virtualization initialization per device
+					configuration.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzCreateDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzDestroyDevConfig
+
+ @Description 		Guest para-virtualization deinitialization per device
+					configuration.
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzDestroyDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function			SysVzCreateDevConfig
+
+ @Description 		Server para-virtz handler for client SysVzCreateDevConfig
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzCreateDevConfig(IMG_UINT32 ui32OSID,
+									 IMG_UINT32 ui32DevID,
+									 IMG_UINT32 *pui32IRQ,
+									 IMG_UINT32 *pui32RegsSize,
+									 IMG_UINT64 *pui64RegsPAddr);
+
+/*!
+******************************************************************************
+ @Function			SysVzDestroyDevConfig
+
+ @Description 		Server para-virtz handler for client SysVzDestroyDevConfig
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzDestroyDevConfig(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID);
+
+#endif /* _VZ_SUPPORT_H_ */
+
+/*****************************************************************************
+ End of file (vz_support.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vz_vm.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vz_vm.h
new file mode 100644
index 0000000..cfcbea5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vz_vm.h
@@ -0,0 +1,61 @@
+/*************************************************************************/ /*!
+@File			vz_vm.h
+@Title          System virtualization VM support APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides VM management support APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VZ_SUPPORT_VM_H_
+#define _VZ_SUPPORT_VM_H_
+
+#include "pvrsrv.h"
+
+IMG_BOOL SysVzIsVmOnline(IMG_UINT32 ui32OSID);
+
+PVRSRV_ERROR SysVzPvzOnVmOnline(IMG_UINT32 ui32OSid, IMG_UINT32 ui32Priority);
+
+PVRSRV_ERROR SysVzPvzOnVmOffline(IMG_UINT32 ui32OSid);
+
+PVRSRV_ERROR SysVzPvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue);
+
+#endif /* _VZ_SUPPORT_VM_H_ */
+
+/*****************************************************************************
+ End of file (vz_vm.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vz_vmm_pvz.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vz_vmm_pvz.h
new file mode 100644
index 0000000..99d2a14
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/include/vz_vmm_pvz.h
@@ -0,0 +1,85 @@
+/*************************************************************************/ /*!
+@File           vz_vmm_pvz.h
+@Title          System virtualization VM manager management APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides VM manager para-virtz management APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VZ_VMM_PVZ_H_
+#define _VZ_VMM_PVZ_H_
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "vmm_impl.h"
+
+/*!
+******************************************************************************
+ @Function			SysVzPvzConnectionInit() and SysVzPvzConnectionDeInit()
+
+ @Description 		SysVzPvzConnectionInit initializes the VM manager para-virtz
+					which is used subsequently for communication between guest and
+					host; depending on the underlying VM setup, this could either be
+					either a hyper-call or cross-VM call
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+									error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzConnectionInit(void);
+void SysVzPvzConnectionDeInit(void);
+
+/*!
+******************************************************************************
+ @Function			 SysVzPvzConnectionAcquire() and SysVzPvzConnectionRelease()
+
+ @Description 		 These are to acquire/release a handle to the VM manager para-virtz
+					 connection to make a pvz call; on the client, use it to make the
+					 actual pvz call and on the sever handler / VM manager, use it
+					 to complete the processing for the pvz call or make a VM manager
+					 to host pvzbridge call
+
+ @Return			PVRSRV_ERROR	VMM_PVZ_CONNECTION* on success. Otherwise NULL
+ ******************************************************************************/
+VMM_PVZ_CONNECTION* SysVzPvzConnectionAcquire(void);
+void SysVzPvzConnectionRelease(VMM_PVZ_CONNECTION *psPvzConnection);
+
+#endif /* _VZ_VMM_PVZ_H_ */
+
+/*****************************************************************************
+ End of file (vz_vmm_pvz.h)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_linux_tc/Kbuild.mk b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_linux_tc/Kbuild.mk
new file mode 100644
index 0000000..0839521
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_linux_tc/Kbuild.mk
@@ -0,0 +1,60 @@
+########################################################################### ###
+#@File
+#@Copyright     Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License       Dual MIT/GPLv2
+# 
+# The contents of this file are subject to the MIT license as set out below.
+# 
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+# 
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+# 
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+# 
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+# 
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+# 
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+PVRSRVKM_NAME = $(PVRSRV_MODNAME)
+
+$(PVRSRVKM_NAME)-y += \
+	services/system/$(PVR_SYSTEM)/sysconfig.o \
+	services/system/common/env/linux/pci_support.o \
+	services/system/common/env/linux/dma_support.o \
+	services/system/common/vz_physheap_generic.o \
+	services/system/common/vz_physheap_common.o \
+	services/system/common/vmm_pvz_client.o \
+	services/system/common/vmm_pvz_server.o \
+	services/system/common/vz_vmm_pvz.o \
+	services/system/common/vz_vmm_vm.o \
+	services/system/common/vz_support.o \
+	services/system/common/vmm_type_stub.o
+
+ccflags-y += \
+	-I$(TOP)/services/system/common/env/linux \
+	-I$(TOP)/services/linux/include \
+	-I$(TOP)/kernel/drivers/staging/imgtec \
+	-I$(TOP)/include/system/rgx_tc
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_linux_tc/sysconfig.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_linux_tc/sysconfig.c
new file mode 100644
index 0000000..e692aa6
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_linux_tc/sysconfig.c
@@ -0,0 +1,800 @@
+/*************************************************************************/ /*!
+@File
+@Title          System Configuration
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    System Configuration functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "sysinfo.h"
+#include "apollo_regs.h"
+
+#include "pvrsrv_device.h"
+#include "rgxdevice.h"
+#include "syscommon.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+
+#if defined(SUPPORT_ION)
+#include PVR_ANDROID_ION_HEADER
+#include "ion_support.h"
+#include "ion_sys.h"
+#endif
+
+#include "tc_drv.h"
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#if !defined(LMA)
+#error TC only supports LMA at the minute
+#endif
+
+/* Valid values for the TC_MEMORY_CONFIG configuration option */
+#define TC_MEMORY_LOCAL		(1)
+#define TC_MEMORY_HOST		(2)
+#define TC_MEMORY_HYBRID	(3)
+
+#if TC_MEMORY_CONFIG != TC_MEMORY_LOCAL
+#error TC only supports TC_MEMORY_LOCAL at the minute
+#endif
+
+/* These must be consecutive */
+#define PHYS_HEAP_IDX_GENERAL	0
+#define PHYS_HEAP_IDX_DMABUF	1
+#define PHYS_HEAP_IDX_COUNT		2
+
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (10)
+
+#if defined(PVR_DVFS) || defined(SUPPORT_PDVFS)
+
+/* Dummy DVFS configuration used purely for testing purposes */
+
+static const IMG_OPP asOPPTable[] =
+{
+	{ 8,  25000000},
+	{ 16, 50000000},
+	{ 32, 75000000},
+	{ 64, 100000000},
+};
+
+#define LEVEL_COUNT (sizeof(asOPPTable) / sizeof(IMG_OPP))
+
+static void SetFrequency(IMG_UINT32 ui32Frequency)
+{
+	PVR_DPF((PVR_DBG_ERROR, "SetFrequency %u", ui32Frequency));
+}
+
+static void SetVoltage(IMG_UINT32 ui32Voltage)
+{
+	PVR_DPF((PVR_DBG_ERROR, "SetVoltage %u", ui32Voltage));
+}
+
+#endif
+
+static void TCLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+				      IMG_UINT32 ui32NumOfAddr,
+				      IMG_DEV_PHYADDR *psDevPAddr,
+				      IMG_CPU_PHYADDR *psCpuPAddr);
+
+static void TCLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+				      IMG_UINT32 ui32NumOfAddr,
+				      IMG_CPU_PHYADDR *psCpuPAddr,
+				      IMG_DEV_PHYADDR *psDevPAddr);
+
+static IMG_UINT32 TCLocalGetRegionId(IMG_HANDLE hPrivData,
+					  PVRSRV_MEMALLOCFLAGS_T uiAllocFlags);
+
+static PHYS_HEAP_FUNCTIONS gsLocalPhysHeapFuncs =
+{
+	.pfnCpuPAddrToDevPAddr = TCLocalCpuPAddrToDevPAddr,
+	.pfnDevPAddrToCpuPAddr = TCLocalDevPAddrToCpuPAddr,
+	.pfnGetRegionId = TCLocalGetRegionId,
+};
+
+static void TCIonCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+				    IMG_UINT32 ui32NumOfAddr,
+				    IMG_DEV_PHYADDR *psDevPAddr,
+				    IMG_CPU_PHYADDR *psCpuPAddr);
+
+static void TCIonDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+				    IMG_UINT32 ui32NumOfAddr,
+				    IMG_CPU_PHYADDR *psCpuPAddr,
+				    IMG_DEV_PHYADDR *psDevPAddr);
+
+static IMG_UINT32 TCIonGetRegionId(IMG_HANDLE hPrivData,
+					  PVRSRV_MEMALLOCFLAGS_T uiAllocFlags);
+
+static PHYS_HEAP_FUNCTIONS gsIonPhysHeapFuncs =
+{
+	.pfnCpuPAddrToDevPAddr = TCIonCpuPAddrToDevPAddr,
+	.pfnDevPAddrToCpuPAddr = TCIonDevPAddrToCpuPAddr,
+	.pfnGetRegionId = TCIonGetRegionId,
+};
+
+/* BIF Tiling mode configuration */
+static RGXFWIF_BIFTILINGMODE geBIFTilingMode = RGXFWIF_BIFTILINGMODE_256x16;
+
+/* Default BIF tiling heap x-stride configurations. */
+static IMG_UINT32 gauiBIFTilingHeapXStrides[RGXFWIF_NUM_BIF_TILING_CONFIGS] =
+{
+	0, /* BIF tiling heap 1 x-stride */
+	1, /* BIF tiling heap 2 x-stride */
+	2, /* BIF tiling heap 3 x-stride */
+	3  /* BIF tiling heap 4 x-stride */
+};
+
+typedef struct _SYS_DATA_ SYS_DATA;
+
+struct _SYS_DATA_
+{
+	struct platform_device *pdev;
+
+	struct tc_rogue_platform_data *pdata;
+
+	struct resource *registers;
+
+#if defined(SUPPORT_ION)
+	struct ion_client *ion_client;
+	struct ion_handle *ion_rogue_allocation;
+#endif
+};
+
+#define SYSTEM_INFO_FORMAT_STRING	"FPGA Revision: %s\tTCF Core Revision: %s\tTCF Core Target Build ID: %s\tPCI Version: %s\tMacro Version: %s"
+static IMG_CHAR *GetDeviceVersionString(SYS_DATA *psSysData)
+{
+	int err;
+	char str_fpga_rev[12];
+	char str_tcf_core_rev[12];
+	char str_tcf_core_target_build_id[4];
+	char str_pci_ver[4];
+	char str_macro_ver[8];
+
+	IMG_CHAR *pszVersion;
+	IMG_UINT32 ui32StringLength;
+
+	err = tc_sys_strings(psSysData->pdev->dev.parent,
+							 str_fpga_rev, sizeof(str_fpga_rev),
+							 str_tcf_core_rev, sizeof(str_tcf_core_rev),
+							 str_tcf_core_target_build_id, sizeof(str_tcf_core_target_build_id),
+							 str_pci_ver, sizeof(str_pci_ver),
+							 str_macro_ver, sizeof(str_macro_ver));
+	if (err)
+	{
+		return NULL;
+	}
+
+	ui32StringLength = OSStringLength(SYSTEM_INFO_FORMAT_STRING);
+	ui32StringLength += OSStringLength(str_fpga_rev);
+	ui32StringLength += OSStringLength(str_tcf_core_rev);
+	ui32StringLength += OSStringLength(str_tcf_core_target_build_id);
+	ui32StringLength += OSStringLength(str_pci_ver);
+	ui32StringLength += OSStringLength(str_macro_ver);
+
+	/* Create the version string */
+	pszVersion = OSAllocZMem(ui32StringLength * sizeof(IMG_CHAR));
+	if (pszVersion)
+	{
+		OSSNPrintf(&pszVersion[0], ui32StringLength,
+				   SYSTEM_INFO_FORMAT_STRING,
+				   str_fpga_rev,
+				   str_tcf_core_rev,
+				   str_tcf_core_target_build_id,
+				   str_pci_ver,
+				   str_macro_ver);
+	}
+
+	return pszVersion;
+}
+
+#if defined(SUPPORT_ION)
+static SYS_DATA *gpsIonPrivateData;
+
+PVRSRV_ERROR IonInit(void *pvPrivateData)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	SYS_DATA *psSysData = pvPrivateData;
+	gpsIonPrivateData = psSysData;
+
+	psSysData->ion_client = ion_client_create(psSysData->pdata->ion_device, SYS_RGX_DEV_NAME);
+	if (IS_ERR(psSysData->ion_client))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create ION client (%ld)", __func__, PTR_ERR(psSysData->ion_client)));
+		/* FIXME: Find a better matching error code */
+		eError = PVRSRV_ERROR_PCI_CALL_FAILED;
+		goto err_out;
+	}
+	/* Allocate the whole rogue ion heap and pass that to services to manage */
+	psSysData->ion_rogue_allocation = ion_alloc(psSysData->ion_client, psSysData->pdata->rogue_heap_memory_size, 4096, (1 << psSysData->pdata->ion_heap_id), 0);
+	if (IS_ERR(psSysData->ion_rogue_allocation))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate ION rogue buffer (%ld)", __func__, PTR_ERR(psSysData->ion_rogue_allocation)));
+		/* FIXME: Find a better matching error code */
+		eError = PVRSRV_ERROR_PCI_CALL_FAILED;
+		goto err_destroy_client;
+
+	}
+
+	return PVRSRV_OK;
+err_destroy_client:
+	ion_client_destroy(psSysData->ion_client);
+	psSysData->ion_client = NULL;
+err_out:
+	return eError;
+}
+
+void IonDeinit(void)
+{
+	SYS_DATA *psSysData = gpsIonPrivateData;
+	ion_free(psSysData->ion_client, psSysData->ion_rogue_allocation);
+	psSysData->ion_rogue_allocation = NULL;
+	ion_client_destroy(psSysData->ion_client);
+	psSysData->ion_client = NULL;
+}
+
+struct ion_device *IonDevAcquire(void)
+{
+	return gpsIonPrivateData->pdata->ion_device;
+}
+
+void IonDevRelease(struct ion_device *ion_device)
+{
+	PVR_ASSERT(ion_device == gpsIonPrivateData->pdata->ion_device);
+}
+#endif /* defined(SUPPORT_ION) */
+
+static void TCLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+				      IMG_UINT32 ui32NumOfAddr,
+				      IMG_DEV_PHYADDR *psDevPAddr,
+				      IMG_CPU_PHYADDR *psCpuPAddr)
+{
+	PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+
+	/* Optimise common case */
+	psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr - psDevConfig->pasPhysHeaps[0].pasRegions[0].sStartAddr.uiAddr;
+	if (ui32NumOfAddr > 1)
+	{
+		IMG_UINT32 ui32Idx;
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+		{
+			psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr - psDevConfig->pasPhysHeaps[0].pasRegions[0].sStartAddr.uiAddr;
+		}
+	}
+}
+
+static void TCLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+				      IMG_UINT32 ui32NumOfAddr,
+				      IMG_CPU_PHYADDR *psCpuPAddr,
+				      IMG_DEV_PHYADDR *psDevPAddr)
+{
+	PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+	
+	/* Optimise common case */
+	psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr + psDevConfig->pasPhysHeaps[0].pasRegions[0].sStartAddr.uiAddr;
+	if (ui32NumOfAddr > 1)
+	{
+		IMG_UINT32 ui32Idx;
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+		{
+			psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr + psDevConfig->pasPhysHeaps[0].pasRegions[0].sStartAddr.uiAddr;
+		}
+	}
+}
+
+static IMG_UINT32 TCLocalGetRegionId(IMG_HANDLE hPrivData,
+					  PVRSRV_MEMALLOCFLAGS_T uiAllocFlags)
+{
+	/* Return first region which is always valid */
+	return 0;
+}
+
+static void TCIonCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+				    IMG_UINT32 ui32NumOfAddr,
+				    IMG_DEV_PHYADDR *psDevPAddr,
+				    IMG_CPU_PHYADDR *psCpuPAddr)
+{
+	PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+	SYS_DATA *psSysData = psDevConfig->hSysData;
+	
+	/* Optimise common case */
+	psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr - psSysData->pdata->tc_memory_base;	
+	if (ui32NumOfAddr > 1)
+	{
+		IMG_UINT32 ui32Idx;
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+		{
+			psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr - psSysData->pdata->tc_memory_base;
+		}
+	}
+}
+
+static void TCIonDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+				    IMG_UINT32 ui32NumOfAddr,
+				    IMG_CPU_PHYADDR *psCpuPAddr,
+				    IMG_DEV_PHYADDR *psDevPAddr)
+{
+	PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+	SYS_DATA *psSysData = psDevConfig->hSysData;
+
+	/* Optimise common case */
+	psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr + psSysData->pdata->tc_memory_base;
+	if (ui32NumOfAddr > 1)
+	{
+		IMG_UINT32 ui32Idx;
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+		{
+			psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr + psSysData->pdata->tc_memory_base;
+		}
+	}
+}
+
+static IMG_UINT32 TCIonGetRegionId(IMG_HANDLE hPrivData,
+					  PVRSRV_MEMALLOCFLAGS_T uiAllocFlags)
+{
+	/* Return first region which is always valid */
+	return 0;
+}
+
+static PVRSRV_ERROR PhysHeapsCreate(SYS_DATA *psSysData,
+									void *pvPrivData,
+									PHYS_HEAP_CONFIG **ppasPhysHeapsOut,
+									IMG_UINT32 *puiPhysHeapCountOut)
+{
+	static IMG_UINT32 uiHeapIDBase = 0;
+	PHYS_HEAP_CONFIG *pasPhysHeaps;
+	PHYS_HEAP_REGION *psRegion;
+	PVRSRV_ERROR eError;
+
+	pasPhysHeaps = OSAllocMem(sizeof(*pasPhysHeaps) * PHYS_HEAP_IDX_COUNT);
+	if (!pasPhysHeaps)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psRegion = OSAllocMem(sizeof(*psRegion));
+	if (!psRegion)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorFreePhysHeaps;
+	}
+
+	psRegion->sStartAddr.uiAddr = psSysData->pdata->rogue_heap_memory_base;
+	psRegion->sCardBase.uiAddr = 0;
+	psRegion->uiSize = psSysData->pdata->rogue_heap_memory_size;
+
+	pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32PhysHeapID =
+		uiHeapIDBase + PHYS_HEAP_IDX_GENERAL;
+	pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].eType = PHYS_HEAP_TYPE_LMA;
+	pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].pszPDumpMemspaceName = "LMA";
+	pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].psMemFuncs = &gsLocalPhysHeapFuncs;
+	pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].pasRegions = psRegion;
+	pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32NumOfRegions = 1;
+	pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].hPrivData = pvPrivData;
+
+	psRegion = OSAllocMem(sizeof(*psRegion));
+	if (!psRegion)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorGeneralPhysHeapDestroy;
+	}
+
+	psRegion->sStartAddr.uiAddr = psSysData->pdata->pdp_heap_memory_base;
+	psRegion->sCardBase.uiAddr = 0;
+	psRegion->uiSize = psSysData->pdata->pdp_heap_memory_size;
+
+	pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].ui32PhysHeapID =
+		uiHeapIDBase + PHYS_HEAP_IDX_DMABUF;
+	pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].eType = PHYS_HEAP_TYPE_LMA;
+	pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].pszPDumpMemspaceName = "LMA";
+	pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].psMemFuncs = &gsIonPhysHeapFuncs;
+	pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].pasRegions = psRegion;
+	pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].ui32NumOfRegions = 1;
+	pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].hPrivData = pvPrivData;
+
+	uiHeapIDBase += PHYS_HEAP_IDX_COUNT;
+
+	*ppasPhysHeapsOut = pasPhysHeaps;
+	*puiPhysHeapCountOut = PHYS_HEAP_IDX_COUNT;
+
+	return PVRSRV_OK;
+
+ErrorGeneralPhysHeapDestroy:
+	OSFreeMem(pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].pasRegions);
+
+ErrorFreePhysHeaps:
+	OSFreeMem(pasPhysHeaps);
+	return eError;
+}
+
+static void PhysHeapsDestroy(PHYS_HEAP_CONFIG *pasPhysHeaps,
+							 IMG_UINT32 uiPhysHeapCount)
+{
+	IMG_UINT32 i;
+
+	for (i = 0; i < uiPhysHeapCount; i++)
+	{
+		if (pasPhysHeaps[i].pasRegions)
+		{
+			OSFreeMem(pasPhysHeaps[i].pasRegions);
+		}
+	}
+
+	OSFreeMem(pasPhysHeaps);
+}
+
+static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData,
+									   PVRSRV_DEVICE_CONFIG **ppsDevConfigOut)
+{
+	PVRSRV_DEVICE_CONFIG *psDevConfig;
+	RGX_DATA *psRGXData;
+	RGX_TIMING_INFORMATION *psRGXTimingInfo;
+	PHYS_HEAP_CONFIG *pasPhysHeaps;
+	IMG_UINT32 uiPhysHeapCount;
+	PVRSRV_ERROR eError;
+
+	psDevConfig = OSAllocZMem(sizeof(*psDevConfig) +
+							  sizeof(*psRGXData) +
+							  sizeof(*psRGXTimingInfo));
+	if (!psDevConfig)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig));
+	psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData));
+
+	eError = PhysHeapsCreate(psSysData, psDevConfig, &pasPhysHeaps, &uiPhysHeapCount);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorFreeDevConfig;
+	}
+
+	/* Setup RGX specific timing data */
+	psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(&psSysData->pdev->dev) * 6;
+	psRGXTimingInfo->bEnableActivePM = IMG_FALSE;
+	psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE;
+	psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS;
+
+	/* Set up the RGX data */
+	psRGXData->psRGXTimingInfo = psRGXTimingInfo;
+
+	/* Setup the device config */
+	psDevConfig->pvOSDevice = &psSysData->pdev->dev;
+	psDevConfig->pszName = "tc";
+	psDevConfig->pszVersion = GetDeviceVersionString(psSysData);
+
+	psDevConfig->sRegsCpuPBase.uiAddr = psSysData->registers->start;
+	psDevConfig->ui32RegsSize = resource_size(psSysData->registers);
+
+	psDevConfig->ui32IRQ = TC_INTERRUPT_EXT;
+
+	psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE;
+
+	psDevConfig->pasPhysHeaps = pasPhysHeaps;
+	psDevConfig->ui32PhysHeapCount = uiPhysHeapCount;
+
+	psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] =
+		pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32PhysHeapID;
+	psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] =
+		pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32PhysHeapID;
+	psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] =
+		pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32PhysHeapID;
+	psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL] =
+		pasPhysHeaps[PHYS_HEAP_IDX_DMABUF].ui32PhysHeapID;
+
+	psDevConfig->eBIFTilingMode = geBIFTilingMode;
+	psDevConfig->pui32BIFTilingHeapConfigs = &gauiBIFTilingHeapXStrides[0];
+	psDevConfig->ui32BIFTilingHeapCount = IMG_ARR_NUM_ELEMS(gauiBIFTilingHeapXStrides);
+
+	psDevConfig->hDevData = psRGXData;
+	psDevConfig->hSysData = psSysData;
+
+#if defined(PVR_DVFS) || defined(SUPPORT_PDVFS)
+	/* Dummy DVFS configuration used purely for testing purposes */
+	psDevConfig->sDVFS.sDVFSDeviceCfg.pasOPPTable = asOPPTable;
+	psDevConfig->sDVFS.sDVFSDeviceCfg.ui32OPPTableSize = LEVEL_COUNT;
+	psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetFrequency = SetFrequency;
+	psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetVoltage = SetVoltage;
+#endif
+#if defined(PVR_DVFS)
+	psDevConfig->sDVFS.sDVFSDeviceCfg.ui32PollMs = 1000;
+	psDevConfig->sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_TRUE;
+	psDevConfig->sDVFS.sDVFSGovernorCfg.ui32UpThreshold = 90;
+	psDevConfig->sDVFS.sDVFSGovernorCfg.ui32DownDifferential = 10;
+#endif
+
+	*ppsDevConfigOut = psDevConfig;
+
+	return PVRSRV_OK;
+
+ErrorFreeDevConfig:
+	OSFreeMem(psDevConfig);
+	return eError;
+}
+
+static void DeviceConfigDestroy(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	if (psDevConfig->pszVersion)
+	{
+		OSFreeMem(psDevConfig->pszVersion);
+	}
+
+	PhysHeapsDestroy(psDevConfig->pasPhysHeaps, psDevConfig->ui32PhysHeapCount);
+
+	OSFreeMem(psDevConfig);
+}
+
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig)
+{
+	PVRSRV_DEVICE_CONFIG *psDevConfig;
+	SYS_DATA *psSysData;
+	resource_size_t uiRegistersSize;
+	PVRSRV_ERROR eError;
+	int err = 0;
+
+	PVR_ASSERT(pvOSDevice);
+
+	dma_set_mask(pvOSDevice, DMA_BIT_MASK(32));
+
+	psSysData = OSAllocZMem(sizeof(*psSysData));
+	if (psSysData == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psSysData->pdev = to_platform_device((struct device *)pvOSDevice);
+	psSysData->pdata = psSysData->pdev->dev.platform_data;
+
+	err = tc_enable(psSysData->pdev->dev.parent);
+	if (err)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to enable PCI device (%d)", __func__, err));
+		eError = PVRSRV_ERROR_PCI_CALL_FAILED;
+		goto ErrFreeSysData;
+	}
+
+	psSysData->registers = platform_get_resource_byname(psSysData->pdev,
+														IORESOURCE_MEM,
+														"rogue-regs");
+	if (!psSysData->registers)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Failed to get Rogue register information",
+				 __func__));
+		eError = PVRSRV_ERROR_PCI_REGION_UNAVAILABLE;
+		goto ErrorDevDisable;
+	}
+
+	/* Check the address range is large enough. */
+	uiRegistersSize = resource_size(psSysData->registers);
+	if (uiRegistersSize < SYS_RGX_REG_REGION_SIZE)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: Rogue register region isn't big enough (was %pa, required 0x%08x)",
+				 __FUNCTION__, &uiRegistersSize, SYS_RGX_REG_REGION_SIZE));
+
+		eError = PVRSRV_ERROR_PCI_REGION_TOO_SMALL;
+		goto ErrorDevDisable;
+	}
+
+	/* Reserve the address range */
+	if (!request_mem_region(psSysData->registers->start,
+							resource_size(psSysData->registers),
+							SYS_RGX_DEV_NAME))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Rogue register memory region not available", __FUNCTION__));
+		eError = PVRSRV_ERROR_PCI_CALL_FAILED;
+
+		goto ErrorDevDisable;
+	}
+
+	eError = DeviceConfigCreate(psSysData, &psDevConfig);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorReleaseMemRegion;
+	}
+
+#if defined(SUPPORT_ION)
+	eError = IonInit(psSysData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise ION", __func__));
+		goto ErrorDeviceConfigDestroy;
+	}
+#endif
+
+	*ppsDevConfig = psDevConfig;
+
+	return PVRSRV_OK;
+
+#if defined(SUPPORT_ION)
+ErrorDeviceConfigDestroy:
+	DeviceConfigDestroy(psDevConfig);
+#endif
+ErrorReleaseMemRegion:
+	release_mem_region(psSysData->registers->start,
+					   resource_size(psSysData->registers));
+ErrorDevDisable:
+	tc_disable(psSysData->pdev->dev.parent);
+ErrFreeSysData:
+	OSFreeMem(psSysData);
+	return eError;
+}
+
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	SYS_DATA *psSysData = (SYS_DATA *)psDevConfig->hSysData;
+
+#if defined(SUPPORT_ION)
+	IonDeinit();
+#endif
+
+	DeviceConfigDestroy(psDevConfig);
+
+	release_mem_region(psSysData->registers->start,
+					   resource_size(psSysData->registers));
+	tc_disable(psSysData->pdev->dev.parent);
+
+	OSFreeMem(psSysData);
+}
+
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile)
+{
+#if defined(TC_APOLLO_TCF5)
+	PVR_UNREFERENCED_PARAMETER(psDevConfig);
+	PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+	return PVRSRV_OK;
+#else
+	SYS_DATA *psSysData = psDevConfig->hSysData;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	u32 tmp = 0;
+	u32 pll;
+
+	PVR_DUMPDEBUG_LOG("------[ rgx_tc system debug ]------");
+
+	if (tc_sys_info(psSysData->pdev->dev.parent, &tmp, &pll))
+		goto err_out;
+
+	if (tmp > 0)
+		PVR_DUMPDEBUG_LOG("Chip temperature: %d degrees C", tmp);
+	PVR_DUMPDEBUG_LOG("PLL status: %x", pll);
+
+err_out:
+	return eError;
+#endif
+}
+
+typedef struct
+{
+	struct device *psDev;
+	int iInterruptID;
+	void *pvData;
+	PFN_LISR pfnLISR;
+} LISR_DATA;
+
+static void TCInterruptHandler(void* pvData)
+{
+	LISR_DATA *psLISRData = pvData;
+	psLISRData->pfnLISR(psLISRData->pvData);
+}
+
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+				  IMG_UINT32 ui32IRQ,
+				  const IMG_CHAR *pszName,
+				  PFN_LISR pfnLISR,
+				  void *pvData,
+				  IMG_HANDLE *phLISRData)
+{
+	SYS_DATA *psSysData = (SYS_DATA *)hSysData;
+	LISR_DATA *psLISRData;
+	PVRSRV_ERROR eError;
+	int err;
+
+	if (ui32IRQ != TC_INTERRUPT_EXT)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: No device matching IRQ %d", __func__, ui32IRQ));
+		return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+	}
+
+	psLISRData = OSAllocZMem(sizeof(*psLISRData));
+	if (!psLISRData)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_out;
+	}
+
+	psLISRData->pfnLISR = pfnLISR;
+	psLISRData->pvData = pvData;
+	psLISRData->iInterruptID = ui32IRQ;
+	psLISRData->psDev = psSysData->pdev->dev.parent;
+
+	err = tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, TCInterruptHandler, psLISRData);
+	if (err)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: tc_set_interrupt_handler() failed (%d)", __func__, err));
+		eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+		goto err_free_data;
+	}
+
+	err = tc_enable_interrupt(psLISRData->psDev, psLISRData->iInterruptID);
+	if (err)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: tc_enable_interrupt() failed (%d)", __func__, err));
+		eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+		goto err_unset_interrupt_handler;
+	}
+
+	*phLISRData = psLISRData;
+	eError = PVRSRV_OK;
+
+	PVR_TRACE(("Installed device LISR %pf to irq %u", pfnLISR, ui32IRQ));
+
+err_out:
+	return eError;
+err_unset_interrupt_handler:
+	tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL);
+err_free_data:
+	OSFreeMem(psLISRData);
+	goto err_out;
+}
+
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+	LISR_DATA *psLISRData = (LISR_DATA *) hLISRData;
+	int err;
+
+	err = tc_disable_interrupt(psLISRData->psDev, psLISRData->iInterruptID);
+	if (err)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: tc_enable_interrupt() failed (%d)", __func__, err));
+	}
+
+	err = tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL);
+	if (err)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: tc_set_interrupt_handler() failed (%d)", __func__, err));
+	}
+
+	PVR_TRACE(("Uninstalled device LISR %pf from irq %u", psLISRData->pfnLISR, psLISRData->iInterruptID));
+
+	OSFreeMem(psLISRData);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_linux_tc/sysinfo.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_linux_tc/sysinfo.h
new file mode 100644
index 0000000..750da9c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_linux_tc/sysinfo.h
@@ -0,0 +1,60 @@
+/*************************************************************************/ /*!
+@File
+@Title          System Description Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides system-specific declarations and macros
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__SYSINFO_H__)
+#define __SYSINFO_H__
+
+/*!< System specific poll/timeout details */
+#if defined (VIRTUAL_PLATFORM)
+#define MAX_HW_TIME_US                           (240000000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT  (120000)
+#else
+#define MAX_HW_TIME_US                           (500000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT  (10000)
+#endif
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000)
+#define WAIT_TRY_COUNT                           (10000)
+
+#define SYS_RGX_DEV_NAME "tc_rogue"
+
+#endif /* !defined(__SYSINFO_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_mtk/mt8167/mtk_mfgsys.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_mtk/mt8167/mtk_mfgsys.c
new file mode 100644
index 0000000..f0c58a6
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_mtk/mt8167/mtk_mfgsys.c
@@ -0,0 +1,816 @@
+/*
+ * Copyright (C) 2015 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/reboot.h>
+#include <linux/version.h>
+#include <linux/notifier.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeup.h>
+
+#include "mtk_mfgsys.h"
+#include "pvr_gputrace.h"
+#include "rgxdevice.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "rgxhwperf.h"
+#include "device.h"
+#include "rgxinit.h"
+
+#ifdef CONFIG_MTK_HIBERNATION
+#include "sysconfig.h"
+#include <mach/mtk_hibernate_dpm.h>
+#include <mach/mt_irq.h>
+#include <mach/irqs.h>
+#endif
+
+#ifdef MTK_CAL_POWER_INDEX
+static IMG_PVOID g_pvRegsBaseKM;
+#define MTK_WAIT_FW_RESPONSE_TIMEOUT_US 5000
+#define MTK_GPIO_REG_OFFSET             0x30
+#define MTK_RGX_DEVICE_INDEX_INVALID    -1
+#endif
+
+static IMG_UINT32 gpu_debug_enable;
+static IMG_BOOL g_bDeviceInit;
+
+static IMG_BOOL g_bUnsync;
+static IMG_UINT32 g_ui32_unsync_freq_id;
+static IMG_BOOL bCoreinitSucceeded;
+
+static struct platform_device *sPVRLDMDev;
+static struct platform_device *sMFGASYNCDev;
+static struct platform_device *sMFG2DDev;
+#define GET_MTK_MFG_BASE(x) (struct mtk_mfg_base *)(x->dev.platform_data)
+
+static const char * const top_mfg_clk_sel_name[] = {
+	"mfg_slow_in_sel",
+	"mfg_axi_in_sel",
+	"mfg_mm_in_sel",
+};
+
+static const char * const top_mfg_clk_sel_parent_name[] = {
+	"slow_clk26m",
+	"bus_mainpll_d11",
+	"engine_csw_mux",
+};
+
+static const char * const top_mfg_clk_name[] = {
+	"top_slow",
+	"top_axi",
+	"top_mm",
+};
+
+#define TOP_MFG_CLK_SLOW    0
+#define TOP_MFG_CLK_AXI     1
+#define TOP_MFG_CLK_MM      2
+#define MAX_TOP_MFG_CLK ARRAY_SIZE(top_mfg_clk_name)
+
+#define REG_MFG_AXI BIT(0)
+#define REG_MFG_MEM BIT(1)
+#define REG_MFG_G3D BIT(2)
+#define REG_MFG_26M BIT(3)
+#define REG_MFG_ALL (REG_MFG_AXI | REG_MFG_MEM | REG_MFG_G3D | REG_MFG_26M)
+
+#define REG_MFG_CG_STA 0x00
+#define REG_MFG_CG_SET 0x04
+#define REG_MFG_CG_CLR 0x08
+
+#ifdef CONFIG_MTK_HIBERNATION
+int gpu_pm_restore_noirq(struct device *device)
+{
+#if defined(MTK_CONFIG_OF) && defined(CONFIG_OF)
+	int irq = MTKSysGetIRQ();
+#else
+	int irq = SYS_MTK_RGX_IRQ;
+#endif
+	mt_irq_set_sens(irq, MT_LEVEL_SENSITIVE);
+	mt_irq_set_polarity(irq, MT_POLARITY_LOW);
+	return 0;
+}
+#endif
+
+static PVRSRV_DEVICE_NODE *MTKGetRGXDevNode(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	IMG_UINT32 i;
+
+	for (i = 0; i < psPVRSRVData->ui32RegisteredDevices; i++) {
+		PVRSRV_DEVICE_NODE *psDeviceNode = &psPVRSRVData->psDeviceNodeList[i];
+
+		if (psDeviceNode && psDeviceNode->psDevConfig)
+			return psDeviceNode;
+	}
+	return NULL;
+}
+
+#define MTKCLK_prepare_enable(clk)								\
+	do {											\
+		if (clk) {									\
+			if (clk_prepare_enable(clk))						\
+				pr_debug("PVR_K: clk_prepare_enable failed when enabling " #clk);\
+		}										\
+	} while (0)
+
+#define MTKCLK_disable_unprepare(clk)			\
+	do {						\
+		if (clk)				\
+			clk_disable_unprepare(clk);	\
+	} while (0)
+
+
+static void mtk_mfg_set_clock_gating(void __iomem *reg)
+{
+	writel(REG_MFG_ALL, reg + REG_MFG_CG_SET);
+}
+
+static void mtk_mfg_clr_clock_gating(void __iomem *reg)
+{
+	writel(REG_MFG_ALL, reg + REG_MFG_CG_CLR);
+}
+
+#if defined(MTK_USE_HW_APM)
+static void mtk_mfg_enable_hw_apm(void)
+{
+	struct mtk_mfg_base *mfg_base = GET_MTK_MFG_BASE(sPVRLDMDev);
+
+	writel(0x01a80000, mfg_base->reg_base + 0x504);
+	writel(0x00080010, mfg_base->reg_base + 0x508);
+	writel(0x00080010, mfg_base->reg_base + 0x50c);
+	writel(0x00b800b8, mfg_base->reg_base + 0x510);
+	writel(0x00b000b0, mfg_base->reg_base + 0x514);
+	writel(0x00c000c8, mfg_base->reg_base + 0x518);
+	writel(0x00c000c8, mfg_base->reg_base + 0x51c);
+	writel(0x00d000d8, mfg_base->reg_base + 0x520);
+	writel(0x00d800d8, mfg_base->reg_base + 0x524);
+	writel(0x00d800d8, mfg_base->reg_base + 0x528);
+	writel(0x9000001b, mfg_base->reg_base + 0x24);
+	writel(0x8000001b, mfg_base->reg_base + 0x24);
+}
+static void mtk_mfg_disable_hw_apm(void) {};
+#else
+static void mtk_mfg_enable_hw_apm(void) {};
+static void mtk_mfg_disable_hw_apm(void) {};
+#endif /* MTK_USE_HW_APM */
+
+static void mtk_mfg_enable_clock(void)
+{
+	int i;
+	struct mtk_mfg_base *mfg_base = GET_MTK_MFG_BASE(sPVRLDMDev);
+
+	/*
+	** Hold wakelock when mfg power-on, prevent suspend when gpu active.
+	** When enter system suspend flow, forbbiden power domain control.
+	** If power domain can control, async/2d/mfg has sequence issue.
+	*/
+	pm_stay_awake(&mfg_base->mfg_async_pdev->dev);
+
+	/* Resume mfg power domain */
+	pm_runtime_get_sync(&mfg_base->mfg_async_pdev->dev);
+	pm_runtime_get_sync(&mfg_base->mfg_2d_pdev->dev);
+#if !defined(MTK_USE_HW_APM)
+	pm_runtime_get_sync(&mfg_base->pdev->dev);
+#endif
+
+	/* Prepare and enable mfg top clock */
+	for (i = 0; i < MAX_TOP_MFG_CLK; i++)
+		MTKCLK_prepare_enable(mfg_base->top_clk[i]);
+
+	/* Enable(un-gated) mfg clock */
+	mtk_mfg_clr_clock_gating(mfg_base->reg_base);
+}
+
+static void mtk_mfg_disable_clock(void)
+{
+	int i;
+	struct mtk_mfg_base *mfg_base = GET_MTK_MFG_BASE(sPVRLDMDev);
+
+	/* Disable(gated) mfg clock */
+	mtk_mfg_set_clock_gating(mfg_base->reg_base);
+
+	/* Disable and unprepare mfg top clock */
+	for (i = MAX_TOP_MFG_CLK - 1; i >= 0; i--)
+		MTKCLK_disable_unprepare(mfg_base->top_clk[i]);
+
+	/* Suspend mfg power domain */
+#if !defined(MTK_USE_HW_APM)
+	pm_runtime_put_sync(&mfg_base->pdev->dev);
+#endif
+	pm_runtime_put_sync(&mfg_base->mfg_2d_pdev->dev);
+	pm_runtime_put_sync(&mfg_base->mfg_async_pdev->dev);
+
+	/* Release wakelock when mfg power-off */
+	pm_relax(&mfg_base->mfg_async_pdev->dev);
+}
+
+static int mfg_notify_handler(struct notifier_block *this, unsigned long code,
+			      void *unused)
+{
+	struct mtk_mfg_base *mfg_base = container_of(this,
+						     typeof(*mfg_base),
+						     mfg_notifier);
+	if ((code != SYS_RESTART) && (code != SYS_POWER_OFF))
+		return 0;
+
+	pr_info("PVR_K: shutdown notified, code=%x\n", (unsigned int)code);
+
+	mutex_lock(&mfg_base->set_power_state);
+
+	/* the workaround code, because it seems that GPU have un-finished commands */
+	mtk_mfg_enable_clock();
+	mtk_mfg_enable_hw_apm();
+
+	mfg_base->shutdown = true;
+
+	mutex_unlock(&mfg_base->set_power_state);
+
+	return 0;
+}
+
+static void MTKEnableMfgClock(void)
+{
+	struct mtk_mfg_base *mfg_base = GET_MTK_MFG_BASE(sPVRLDMDev);
+
+	mutex_lock(&mfg_base->set_power_state);
+
+	if (!mfg_base->shutdown) {
+		mtk_mfg_enable_clock();
+		mtk_mfg_enable_hw_apm();
+	}
+
+	mutex_unlock(&mfg_base->set_power_state);
+}
+
+static void MTKDisableMfgClock(void)
+{
+	struct mtk_mfg_base *mfg_base = GET_MTK_MFG_BASE(sPVRLDMDev);
+
+	mutex_lock(&mfg_base->set_power_state);
+
+	if (!mfg_base->shutdown) {
+		mtk_mfg_disable_hw_apm();
+		mtk_mfg_disable_clock();
+	}
+
+	mutex_unlock(&mfg_base->set_power_state);
+}
+
+#ifdef MTK_CAL_POWER_INDEX
+static IMG_UINT32 MTKGetRGXDevIdx(void)
+{
+	static IMG_UINT32 ms_ui32RGXDevIdx = MTK_RGX_DEVICE_INDEX_INVALID;
+
+	if (ms_ui32RGXDevIdx == MTK_RGX_DEVICE_INDEX_INVALID) {
+		PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+		IMG_UINT32 i;
+
+		for (i = 0; i < psPVRSRVData->ui32RegisteredDevices; i++) {
+			PVRSRV_DEVICE_NODE *psDeviceNode = &psPVRSRVData->psDeviceNodeList[i];
+
+			if (psDeviceNode && psDeviceNode->psDevConfig) {
+				ms_ui32RGXDevIdx = i;
+				break;
+			}
+		}
+	}
+
+	return ms_ui32RGXDevIdx;
+}
+
+static void MTKStartPowerIndex(void)
+{
+	if (!g_pvRegsBaseKM) {
+		PVRSRV_DEVICE_NODE *psDevNode = MTKGetRGXDevNode();
+
+		if (psDevNode) {
+			PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+
+			if (psDevInfo)
+				g_pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
+		}
+	}
+
+	if (g_pvRegsBaseKM)
+		DRV_WriteReg32(g_pvRegsBaseKM + (uintptr_t)0x6320, 0x1);
+}
+
+static void MTKReStartPowerIndex(void)
+{
+	if (g_pvRegsBaseKM)
+		DRV_WriteReg32(g_pvRegsBaseKM + (uintptr_t)0x6320, 0x1);
+}
+
+static void MTKStopPowerIndex(void)
+{
+	if (g_pvRegsBaseKM)
+		DRV_WriteReg32(g_pvRegsBaseKM + (uintptr_t)0x6320, 0x0);
+}
+
+static IMG_UINT32 MTKCalPowerIndex(void)
+{
+	IMG_UINT32 ui32State, ui32Result;
+	PVRSRV_DEV_POWER_STATE  ePowerState;
+	IMG_BOOL bTimeout;
+	IMG_UINT32 u32Deadline;
+	IMG_PVOID pvGPIO_REG = g_pvRegsKM + (uintptr_t)MTK_GPIO_REG_OFFSET;
+	IMG_PVOID pvPOWER_ESTIMATE_RESULT = g_pvRegsBaseKM + (uintptr_t)6328;
+
+	if ((!g_pvRegsKM) || (!g_pvRegsBaseKM))
+		return 0;
+
+	if (PVRSRVPowerLock() != PVRSRV_OK)
+		return 0;
+
+	PVRSRVGetDevicePowerState(MTKGetRGXDevIdx(), &ePowerState);
+	if (ePowerState != PVRSRV_DEV_POWER_STATE_ON) {
+		PVRSRVPowerUnlock();
+		return 0;
+	}
+
+	/* writes 1 to GPIO_INPUT_REQ, bit[0] */
+	DRV_WriteReg32(pvGPIO_REG, DRV_Reg32(pvGPIO_REG) | 0x1);
+
+	/* wait for 1 in GPIO_OUTPUT_REQ, bit[16] */
+	bTimeout = IMG_TRUE;
+	u32Deadline = OSClockus() + MTK_WAIT_FW_RESPONSE_TIMEOUT_US;
+	while (OSClockus() < u32Deadline) {
+		if (0x10000 & DRV_Reg32(pvGPIO_REG)) {
+			bTimeout = IMG_FALSE;
+			break;
+		}
+	}
+
+	/* writes 0 to GPIO_INPUT_REQ, bit[0] */
+	DRV_WriteReg32(pvGPIO_REG, DRV_Reg32(pvGPIO_REG) & (~0x1));
+	if (bTimeout) {
+		PVRSRVPowerUnlock();
+		return 0;
+	}
+
+	/* read GPIO_OUTPUT_DATA, bit[24] */
+	ui32State = DRV_Reg32(pvGPIO_REG) >> 24;
+
+	/* read POWER_ESTIMATE_RESULT */
+	ui32Result = DRV_Reg32(pvPOWER_ESTIMATE_RESULT);
+
+	/*writes 1 to GPIO_OUTPUT_ACK, bit[17]*/
+	DRV_WriteReg32(pvGPIO_REG, DRV_Reg32(pvGPIO_REG)|0x20000);
+
+	/* wait for 0 in GPIO_OUTPUT_REG, bit[16] */
+	bTimeout = IMG_TRUE;
+	u32Deadline = OSClockus() + MTK_WAIT_FW_RESPONSE_TIMEOUT_US;
+	while (OSClockus() < u32Deadline) {
+		if (!(0x10000 & DRV_Reg32(pvGPIO_REG))) {
+			bTimeout = IMG_FALSE;
+			break;
+		}
+	}
+
+	/* writes 0 to GPIO_OUTPUT_ACK, bit[17] */
+	DRV_WriteReg32(pvGPIO_REG, DRV_Reg32(pvGPIO_REG) & (~0x20000));
+	if (bTimeout) {
+		PVRSRVPowerUnlock();
+		return 0;
+	}
+
+	MTKReStartPowerIndex();
+	PVRSRVPowerUnlock();
+	return (ui32State == 1) ? ui32Result : 0;
+}
+#endif
+
+static bool MTKCheckDeviceInit(void)
+{
+	PVRSRV_DEVICE_NODE *psDevNode = MTKGetRGXDevNode();
+	bool ret = false;
+
+	if (psDevNode) {
+		if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE)
+			ret = true;
+	}
+
+	return ret;
+}
+
+PVRSRV_ERROR MTKDevPrePowerState(IMG_HANDLE hSysData, PVRSRV_DEV_POWER_STATE eNewPowerState,
+				 PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+				 IMG_BOOL bForced)
+{
+	struct mtk_mfg_base *mfg_base = GET_MTK_MFG_BASE(sPVRLDMDev);
+
+	mutex_lock(&mfg_base->set_power_state);
+
+	if ((eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) &&
+	    (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON)) {
+		if (g_bDeviceInit) {
+#ifdef MTK_CAL_POWER_INDEX
+			MTKStopPowerIndex();
+#endif
+		} else
+			g_bDeviceInit = MTKCheckDeviceInit();
+
+		mtk_mfg_disable_hw_apm();
+		mtk_mfg_disable_clock();
+	}
+
+	mutex_unlock(&mfg_base->set_power_state);
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR MTKDevPostPowerState(IMG_HANDLE hSysData, PVRSRV_DEV_POWER_STATE eNewPowerState,
+				  PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+				  IMG_BOOL bForced)
+{
+	struct mtk_mfg_base *mfg_base = GET_MTK_MFG_BASE(sPVRLDMDev);
+
+	mutex_lock(&mfg_base->set_power_state);
+
+	if ((eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) &&
+	    (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON)) {
+		mtk_mfg_enable_clock();
+		mtk_mfg_enable_hw_apm();
+
+		if (g_bDeviceInit) {
+#ifdef MTK_CAL_POWER_INDEX
+			MTKStartPowerIndex();
+#endif
+		} else
+			g_bDeviceInit = MTKCheckDeviceInit();
+
+		if (g_bUnsync == IMG_TRUE) {
+//			mt_gpufreq_target(g_ui32_unsync_freq_id);
+			g_bUnsync = IMG_FALSE;
+		}
+	}
+
+	mutex_unlock(&mfg_base->set_power_state);
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR MTKSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
+{
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR MTKSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
+{
+	return PVRSRV_OK;
+}
+
+#ifdef SUPPORT_PDVFS
+#include "rgxpdvfs.h"
+static IMG_OPP *gpasOPPTable;
+
+static int MTKMFGOppUpdate(int ui32ThrottlePoint)
+{
+	PVRSRV_DEVICE_NODE *psDevNode = MTKGetRGXDevNode();
+	int i, ui32OPPTableSize;
+
+	static RGXFWIF_PDVFS_OPP sPDFVSOppInfo;
+	static int bNotReady = 1;
+
+	if (bNotReady) {
+		ui32OPPTableSize = mt_gpufreq_get_dvfs_table_num();
+		gpasOPPTable = (IMG_OPP  *)OSAllocZMem(sizeof(IMG_OPP) * ui32OPPTableSize);
+
+		for (i = 0; i < ui32OPPTableSize; i++) {
+			gpasOPPTable[i].ui32Volt = mt_gpufreq_get_volt_by_idx(i);
+			gpasOPPTable[i].ui32Freq = mt_gpufreq_get_freq_by_idx(i) * 1000;
+		}
+
+		if (psDevNode) {
+			PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+			PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+
+			psDevConfig->sDVFS.sDVFSDeviceCfg.pasOPPTable = gpasOPPTable;
+			psDevConfig->sDVFS.sDVFSDeviceCfg.ui32OPPTableSize = ui32OPPTableSize;
+
+			/* sPDFVSOppInfo.ui32ThrottlePoint = ui32ThrottlePoint; */
+			/* PDVFSSendOPPPoints(psDevInfo, sPDFVSOppInfo); */
+			bNotReady = 0;
+
+			PVR_DPF((PVR_DBG_ERROR, "PDVFS opptab=%p size=%d init completed",
+				 psDevConfig->sDVFS.sDVFSDeviceCfg.pasOPPTable, ui32OPPTableSize));
+		} else {
+			if (gpasOPPTable)
+				OSFreeMem(gpasOPPTable);
+		}
+	}
+}
+
+static void MTKFakeGpuLoading(unsigned int *pui32Loading, unsigned int *pui32Block, unsigned int *pui32Idle)
+{
+	*pui32Loading = 0;
+	*pui32Block = 0;
+	*pui32Idle = 0;
+}
+#endif
+
+PVRSRV_ERROR MTKMFGSystemInit(void)
+{
+	/* Set the CB for ptpod use */
+//	mt_gpufreq_mfgclock_notify_registerCB(MTKEnableMfgClock, MTKDisableMfgClock);
+
+#ifdef CONFIG_MTK_HIBERNATION
+	register_swsusp_restore_noirq_func(ID_M_GPU, gpu_pm_restore_noirq, NULL);
+#endif
+
+	return PVRSRV_OK;
+}
+
+void MTKMFGSystemDeInit(void)
+{
+#ifdef SUPPORT_PDVFS
+	if (gpasOPPTable)
+		OSFreeMem(gpasOPPTable);
+#endif
+
+#ifdef CONFIG_MTK_HIBERNATION
+	unregister_swsusp_restore_noirq_func(ID_M_GPU);
+#endif
+
+#ifdef MTK_CAL_POWER_INDEX
+	g_pvRegsBaseKM = NULL;
+#endif
+}
+
+static void mfg_clk_set_parent(struct mtk_mfg_base *mfg_base)
+{
+	/* mfg_slow_in_sel/mfg_axi_in_sel: non-glitch-free mux, should disable mux before set parent.
+	 * mfg_mm_in_sel: glitch-free mux, should enable mux before set parent.
+	 */
+	clk_set_parent(mfg_base->top_clk_sel[TOP_MFG_CLK_SLOW], mfg_base->top_clk_sel_parent[TOP_MFG_CLK_SLOW]);
+	clk_set_parent(mfg_base->top_clk_sel[TOP_MFG_CLK_AXI], mfg_base->top_clk_sel_parent[TOP_MFG_CLK_AXI]);
+
+	clk_prepare_enable(mfg_base->top_clk_sel[TOP_MFG_CLK_MM]);
+	clk_set_parent(mfg_base->top_clk_sel[TOP_MFG_CLK_MM], mfg_base->top_clk_sel_parent[TOP_MFG_CLK_MM]);
+	clk_disable_unprepare(mfg_base->top_clk_sel[TOP_MFG_CLK_MM]);
+}
+
+static int mtk_mfg_bind_device_resource(struct platform_device *pdev,
+				 struct mtk_mfg_base *mfg_base)
+{
+	int i, err;
+	int len_clk = sizeof(struct clk *) * MAX_TOP_MFG_CLK;
+
+	if (!sMFGASYNCDev || !sMFG2DDev) {
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get pm_domain", __func__));
+		return -EPROBE_DEFER;
+	}
+
+	mfg_base->top_clk_sel = devm_kzalloc(&pdev->dev, len_clk, GFP_KERNEL);
+	if (!mfg_base->top_clk_sel)
+		return -ENOMEM;
+
+	mfg_base->top_clk_sel_parent = devm_kzalloc(&pdev->dev, len_clk, GFP_KERNEL);
+	if (!mfg_base->top_clk_sel_parent)
+		return -ENOMEM;
+
+	mfg_base->top_clk = devm_kzalloc(&pdev->dev, len_clk, GFP_KERNEL);
+	if (!mfg_base->top_clk)
+		return -ENOMEM;
+
+	mfg_base->reg_base = of_iomap(pdev->dev.of_node, 1);
+	if (!mfg_base->reg_base) {
+		PVR_DPF((PVR_DBG_ERROR, "%s: Unable to ioremap registers pdev %p", __func__, pdev));
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < MAX_TOP_MFG_CLK; i++) {
+		mfg_base->top_clk_sel_parent[i] = devm_clk_get(&pdev->dev,
+					top_mfg_clk_sel_parent_name[i]);
+		if (IS_ERR(mfg_base->top_clk_sel_parent[i])) {
+			err = PTR_ERR(mfg_base->top_clk_sel_parent[i]);
+			PVR_DPF((PVR_DBG_ERROR, "%s: devm_clk_get %s failed", __func__,
+				top_mfg_clk_sel_parent_name[i]));
+			goto err_iounmap_reg_base;
+		}
+	}
+
+	for (i = 0; i < MAX_TOP_MFG_CLK; i++) {
+		mfg_base->top_clk_sel[i] = devm_clk_get(&pdev->dev,
+						    top_mfg_clk_sel_name[i]);
+		if (IS_ERR(mfg_base->top_clk_sel[i])) {
+			err = PTR_ERR(mfg_base->top_clk_sel[i]);
+			PVR_DPF((PVR_DBG_ERROR, "%s: devm_clk_get %s failed", __func__, top_mfg_clk_sel_name[i]));
+			goto err_iounmap_reg_base;
+		}
+	}
+
+	for (i = 0; i < MAX_TOP_MFG_CLK; i++) {
+		mfg_base->top_clk[i] = devm_clk_get(&pdev->dev,
+						    top_mfg_clk_name[i]);
+		if (IS_ERR(mfg_base->top_clk[i])) {
+			err = PTR_ERR(mfg_base->top_clk[i]);
+			PVR_DPF((PVR_DBG_ERROR, "%s: devm_clk_get %s failed", __func__, top_mfg_clk_name[i]));
+			goto err_iounmap_reg_base;
+		}
+	}
+
+	mfg_clk_set_parent(mfg_base);
+
+	mfg_base->mfg_2d_pdev = sMFG2DDev;
+	mfg_base->mfg_async_pdev = sMFGASYNCDev;
+
+	mfg_base->mfg_notifier.notifier_call = mfg_notify_handler;
+	register_reboot_notifier(&mfg_base->mfg_notifier);
+
+	pm_runtime_enable(&pdev->dev);
+
+	mfg_base->pdev = pdev;
+	return 0;
+
+err_iounmap_reg_base:
+	iounmap(mfg_base->reg_base);
+	return err;
+}
+
+int MTKRGXDeviceInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	struct platform_device *pdev;
+	struct mtk_mfg_base *mfg_base;
+	int err;
+
+	pdev = to_platform_device((struct device *)psDevConfig->pvOSDevice);
+
+	sPVRLDMDev = pdev;
+	mfg_base = devm_kzalloc(&pdev->dev, sizeof(*mfg_base), GFP_KERNEL);
+	if (!mfg_base)
+		return -ENOMEM;
+
+	err = mtk_mfg_bind_device_resource(pdev, mfg_base);
+	if (err != 0)
+		return err;
+
+	mutex_init(&mfg_base->set_power_state);
+	pdev->dev.platform_data = mfg_base;
+
+	bCoreinitSucceeded = IMG_TRUE;
+	return 0;
+}
+
+int MTKRGXDeviceDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	return 0;
+}
+
+void MTKDisablePowerDomain(void)
+{
+	if (sMFG2DDev)
+		pm_runtime_put_sync(&sMFG2DDev->dev);
+
+	if (sMFGASYNCDev)
+		pm_runtime_put_sync(&sMFGASYNCDev->dev);
+}
+
+bool mt_gpucore_ready(void)
+{
+	return (bCoreinitSucceeded == IMG_TRUE);
+}
+EXPORT_SYMBOL(mt_gpucore_ready);
+
+module_param(gpu_debug_enable, uint, 0644);
+
+
+static int mtk_mfg_async_probe(struct platform_device *pdev)
+{
+#ifdef MTK_DEBUG
+	pr_info("mtk_mfg_async_probe\n");
+#endif
+
+	if (!pdev->dev.pm_domain) {
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dev->pm_domain", __func__));
+		return -EPROBE_DEFER;
+	}
+
+	sMFGASYNCDev = pdev;
+	pm_runtime_enable(&pdev->dev);
+
+	/* Use async power domain as a system suspend indicator. */
+	device_init_wakeup(&pdev->dev, true);
+	return 0;
+}
+
+static int mtk_mfg_async_remove(struct platform_device *pdev)
+{
+	device_init_wakeup(&pdev->dev, false);
+	pm_runtime_disable(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id mtk_mfg_async_of_ids[] = {
+	{ .compatible = "mediatek,mt8167-mfg-async",},
+	{}
+};
+
+static struct platform_driver mtk_mfg_async_driver = {
+	.probe  = mtk_mfg_async_probe,
+	.remove = mtk_mfg_async_remove,
+	.driver = {
+		.name = "mfg-async",
+		.of_match_table = mtk_mfg_async_of_ids,
+	}
+};
+
+#if defined(MODULE)
+int mtk_mfg_async_init(void)
+#else
+static int __init mtk_mfg_async_init(void)
+#endif
+{
+	int ret;
+
+	ret = platform_driver_register(&mtk_mfg_async_driver);
+	if (ret != 0) {
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to register mfg driver", __func__));
+		return ret;
+	}
+
+	if (sMFGASYNCDev)
+		pm_runtime_get_sync(&sMFGASYNCDev->dev);
+	else
+		PVR_DPF((PVR_DBG_ERROR, "%s: Enable power domain failed", __func__));
+
+	return 0;
+}
+
+static int mtk_mfg_2d_probe(struct platform_device *pdev)
+{
+#ifdef MTK_DEBUG
+	pr_info("mtk_mfg_2d_probe\n");
+#endif
+
+	if (!pdev->dev.pm_domain) {
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dev->pm_domain", __func__));
+		return -EPROBE_DEFER;
+	}
+
+	sMFG2DDev = pdev;
+	pm_runtime_enable(&pdev->dev);
+	return 0;
+}
+
+static int mtk_mfg_2d_remove(struct platform_device *pdev)
+{
+	pm_runtime_disable(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id mtk_mfg_2d_of_ids[] = {
+	{ .compatible = "mediatek,mt8167-mfg-2d",},
+	{}
+};
+
+static struct platform_driver mtk_mfg_2d_driver = {
+	.probe  = mtk_mfg_2d_probe,
+	.remove = mtk_mfg_2d_remove,
+	.driver = {
+		.name = "mfg-2d",
+		.of_match_table = mtk_mfg_2d_of_ids,
+	}
+};
+
+#if defined(MODULE)
+int mtk_mfg_2d_init(void)
+#else
+static int __init mtk_mfg_2d_init(void)
+#endif
+{
+	int ret;
+
+	ret = platform_driver_register(&mtk_mfg_2d_driver);
+	if (ret != 0) {
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to register mfg driver", __func__));
+		return ret;
+	}
+
+	if (sMFG2DDev)
+		pm_runtime_get_sync(&sMFG2DDev->dev);
+	else
+		PVR_DPF((PVR_DBG_ERROR, "%s: Enable power domain failed", __func__));
+
+	return 0;
+}
+
+#ifndef MODULE
+subsys_initcall(mtk_mfg_async_init);
+subsys_initcall(mtk_mfg_2d_init);
+#endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_mtk/mt8167/mtk_mfgsys.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_mtk/mt8167/mtk_mfgsys.h
new file mode 100644
index 0000000..ab7de36
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_mtk/mt8167/mtk_mfgsys.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2015 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MTK_MFGSYS_H
+#define MTK_MFGSYS_H
+
+#include "servicesext.h"
+#include "rgxdevice.h"
+#include <linux/regulator/consumer.h>
+
+/* Control SW APM is enabled or not  */
+#ifndef MTK_BRINGUP
+#define MTK_PM_SUPPORT 1
+#else
+#define MTK_PM_SUPPORT 0
+#endif
+
+struct mtk_mfg_base {
+	struct platform_device *pdev;
+	struct platform_device *mfg_2d_pdev;
+	struct platform_device *mfg_async_pdev;
+
+	struct clk **top_clk_sel;
+	struct clk **top_clk_sel_parent;
+	struct clk **top_clk;
+	void __iomem *reg_base;
+
+	/* mutex protect for set power state */
+	struct mutex set_power_state;
+	bool shutdown;
+	struct notifier_block mfg_notifier;
+};
+
+PVRSRV_ERROR MTKMFGSystemInit(void);
+void MTKMFGSystemDeInit(void);
+void MTKDisablePowerDomain(void);
+void MTKFWDump(void);
+
+/* below register interface in RGX sysconfig.c */
+PVRSRV_ERROR MTKDevPrePowerState(IMG_HANDLE hSysData, PVRSRV_DEV_POWER_STATE eNewPowerState,
+				 PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+				 IMG_BOOL bForced);
+
+PVRSRV_ERROR MTKDevPostPowerState(IMG_HANDLE hSysData, PVRSRV_DEV_POWER_STATE eNewPowerState,
+				  PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+				  IMG_BOOL bForced);
+
+PVRSRV_ERROR MTKSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState);
+
+PVRSRV_ERROR MTKSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState);
+
+int MTKRGXDeviceInit(PVRSRV_DEVICE_CONFIG *psDevConfig);
+int MTKRGXDeviceDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+#ifdef CONFIG_MTK_HIBERNATION
+extern void mt_irq_set_sens(unsigned int irq, unsigned int sens);
+extern void mt_irq_set_polarity(unsigned int irq, unsigned int polarity);
+int gpu_pm_restore_noirq(struct device *device);
+#endif
+
+#ifdef SUPPORT_PDVFS
+extern unsigned int mt_gpufreq_get_volt_by_idx(unsigned int idx);
+#endif
+
+#if defined(MODULE)
+int mtk_mfg_async_init(void);
+int mtk_mfg_2d_init(void);
+#endif
+
+#endif
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_mtk/sysconfig.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_mtk/sysconfig.c
new file mode 100644
index 0000000..29d6f43
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_mtk/sysconfig.c
@@ -0,0 +1,311 @@
+/*************************************************************************
+* @File
+* @Title          System Configuration
+* @Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+* @Description    System Configuration functions
+* @License        Dual MIT/GPLv2
+*
+* The contents of this file are subject to the MIT license as set out below.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy
+* of this software and associated documentation files (the "Software"), to deal
+* in the Software without restriction, including without limitation the rights
+* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+* copies of the Software, and to permit persons to whom the Software is
+* furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* Alternatively, the contents of this file may be used under the terms of
+* the GNU General Public License Version 2 ("GPL") in which case the provisions
+* of GPL are applicable instead of those above.
+*
+* If you wish to allow use of your version of this file only under the terms of
+* GPL, and not to allow others to use your version of this file under the terms
+* of the MIT license, indicate your decision by deleting the provisions above
+* and replace them with the notice and other provisions required by GPL as set
+* out in the file called "GPL-COPYING" included in this distribution. If you do
+* not delete the provisions above, a recipient may use your version of this file
+* under the terms of either the MIT license or GPL.
+*
+* This License is also included in this distribution in the file called
+* "MIT-COPYING".
+*
+* EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+* PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+* PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+**************************************************************************/
+
+#include "interrupt_support.h"
+#include "pvrsrv_device.h"
+#include "syscommon.h"
+#include "sysconfig.h"
+#include "physheap.h"
+#if defined(SUPPORT_ION)
+#include "ion_support.h"
+#endif
+#include "mtk_mfgsys.h"
+
+#if defined(MTK_CONFIG_OF) && defined(CONFIG_OF)
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+struct platform_device *gpsPVRCfgDev;
+#endif
+
+#define RGX_CR_ISP_GRIDOFFSET   (0x0FA0U)
+
+static RGX_TIMING_INFORMATION   gsRGXTimingInfo;
+static RGX_DATA                 gsRGXData;
+static PVRSRV_DEVICE_CONFIG     gsDevices[1];
+
+static PHYS_HEAP_FUNCTIONS      gsPhysHeapFuncs;
+static PHYS_HEAP_CONFIG         gsPhysHeapConfig;
+
+#if  defined(SUPPORT_PDVFS)
+/* Dummy DVFS configuration used purely for testing purposes */
+static const IMG_OPP asOPPTable[] = {
+	{ 100000, 253500000},
+	{ 100000, 338000000},
+	{ 100000, 390000000},
+	{ 112500, 546000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+	{ 112500, 676000000},
+};
+
+#define LEVEL_COUNT (sizeof(asOPPTable) / sizeof(IMG_OPP))
+#endif
+
+
+/* CPU to Device physcial address translation */
+static void UMAPhysHeapCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+				   IMG_UINT32 ui32NumOfAddr,
+				   IMG_DEV_PHYADDR *psDevPAddr,
+				   IMG_CPU_PHYADDR *psCpuPAddr)
+{
+	PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+	/* Optimise common case */
+	psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr;
+	if (ui32NumOfAddr > 1) {
+		IMG_UINT32 ui32Idx;
+
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+			psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr;
+	}
+}
+
+/* Device to CPU physcial address translation */
+static void UMAPhysHeapDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+				   IMG_UINT32 ui32NumOfAddr,
+				   IMG_CPU_PHYADDR *psCpuPAddr,
+				   IMG_DEV_PHYADDR *psDevPAddr)
+{
+	PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+	/* Optimise common case */
+	psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr;
+	if (ui32NumOfAddr > 1) {
+		IMG_UINT32 ui32Idx;
+
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+			psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr;
+	}
+}
+
+#if defined(MTK_CONFIG_OF) && defined(CONFIG_OF)
+static int g32SysIrq = -1;
+int MTKSysGetIRQ(void)
+{
+	return g32SysIrq;
+}
+#endif
+
+/* SysCreateConfigData */
+static PHYS_HEAP_REGION gsHeapRegionsLocal[] = {
+	/* sStartAddr, sCardBase, uiSize */
+	{ { 0 }, { 0 }, 0, },
+};
+
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig)
+{
+	PVRSRV_ERROR err = PVRSRV_OK;
+
+	gsPhysHeapFuncs.pfnCpuPAddrToDevPAddr = UMAPhysHeapCpuPAddrToDevPAddr;
+	gsPhysHeapFuncs.pfnDevPAddrToCpuPAddr = UMAPhysHeapDevPAddrToCpuPAddr;
+
+	gsPhysHeapConfig.ui32PhysHeapID = 0;
+	gsPhysHeapConfig.pszPDumpMemspaceName = "SYSMEM";
+	gsPhysHeapConfig.eType = PHYS_HEAP_TYPE_UMA;
+	gsPhysHeapConfig.psMemFuncs = &gsPhysHeapFuncs;
+	gsPhysHeapConfig.hPrivData = (IMG_HANDLE)&gsDevices[0];
+
+	gsPhysHeapConfig.pasRegions = &gsHeapRegionsLocal[0];
+
+	gsDevices[0].pvOSDevice = pvOSDevice;
+	gsDevices[0].pasPhysHeaps = &gsPhysHeapConfig;
+	gsDevices[0].ui32PhysHeapCount = sizeof(gsPhysHeapConfig) / sizeof(PHYS_HEAP_CONFIG);
+
+	gsDevices[0].eBIFTilingMode = RGXFWIF_BIFTILINGMODE_256x16;
+	gsDevices[0].pui32BIFTilingHeapConfigs = gauiBIFTilingHeapXStrides;
+	gsDevices[0].ui32BIFTilingHeapCount = IMG_ARR_NUM_ELEMS(gauiBIFTilingHeapXStrides);
+
+	/* Setup RGX specific timing data */
+	gsRGXTimingInfo.ui32CoreClockSpeed = RGX_HW_CORE_CLOCK_SPEED;
+
+#if MTK_PM_SUPPORT
+	gsRGXTimingInfo.bEnableActivePM = true;
+	gsRGXTimingInfo.ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS,
+#else
+	gsRGXTimingInfo.bEnableActivePM = false;
+#endif
+
+	/* define HW APM */
+#if defined(MTK_USE_HW_APM)
+	gsRGXTimingInfo.bEnableRDPowIsland = true;
+#else
+	gsRGXTimingInfo.bEnableRDPowIsland = false;
+#endif
+
+	/* Setup RGX specific data */
+	gsRGXData.psRGXTimingInfo = &gsRGXTimingInfo;
+
+	/* Setup RGX device */
+	gsDevices[0].pszName = "RGX";
+	gsDevices[0].pszVersion = NULL;
+
+	/* Device setup information */
+#if defined(MTK_CONFIG_OF) && defined(CONFIG_OF)
+	/* MTK: using device tree */
+	{
+		struct resource *irq_res;
+		struct resource *reg_res;
+
+		gpsPVRCfgDev = to_platform_device((struct device *)pvOSDevice);
+		irq_res = platform_get_resource(gpsPVRCfgDev, IORESOURCE_IRQ, 0);
+
+		if (irq_res) {
+			gsDevices[0].ui32IRQ = irq_res->start;
+			g32SysIrq = irq_res->start;
+
+			PVR_LOG(("irq_res = 0x%x", (int)irq_res->start));
+		} else {
+			PVR_DPF((PVR_DBG_ERROR, "irq_res = NULL!"));
+			return PVRSRV_ERROR_INIT_FAILURE;
+		}
+
+		reg_res = platform_get_resource(gpsPVRCfgDev, IORESOURCE_MEM, 0);
+
+		if (reg_res) {
+			gsDevices[0].sRegsCpuPBase.uiAddr = reg_res->start;
+			gsDevices[0].ui32RegsSize = resource_size(reg_res);
+
+			PVR_LOG(("reg_res = 0x%x, 0x%x", (int)reg_res->start,
+									(int)resource_size(reg_res)));
+		} else {
+			PVR_DPF((PVR_DBG_ERROR, "reg_res = NULL!"));
+			return PVRSRV_ERROR_INIT_FAILURE;
+		}
+	}
+#else
+	gsDevices[0].sRegsCpuPBase.uiAddr = SYS_MTK_RGX_REGS_SYS_PHYS_BASE;
+	gsDevices[0].ui32RegsSize = SYS_MTK_RGX_REGS_SIZE;
+	gsDevices[0].ui32IRQ = SYS_MTK_RGX_IRQ;
+#endif
+
+	/* Power management on HW system */
+	gsDevices[0].pfnPrePowerState = MTKDevPrePowerState;
+	gsDevices[0].pfnPostPowerState = MTKDevPostPowerState;
+
+	/* Clock frequency */
+	gsDevices[0].pfnClockFreqGet = NULL;
+
+	gsDevices[0].hDevData = &gsRGXData;
+	gsDevices[0].eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE;
+
+#if defined(CONFIG_MACH_MT6739)
+	gsDevices[0].pfnSysDevFeatureDepInit = NULL;
+#endif
+
+#if defined(SUPPORT_PDVFS)
+	/* Dummy DVFS configuration used purely for testing purposes */
+	gsDevices[0].sDVFS.sDVFSDeviceCfg.pasOPPTable = asOPPTable;
+	gsDevices[0].sDVFS.sDVFSDeviceCfg.ui32OPPTableSize = LEVEL_COUNT;
+#endif
+
+	/* Setup other system specific stuff */
+#if defined(SUPPORT_ION)
+	IonInit(NULL);
+#endif
+
+	gsDevices[0].pvOSDevice = pvOSDevice;
+	*ppsDevConfig = &gsDevices[0];
+
+	MTKRGXDeviceInit(gsDevices);
+	return err;
+}
+
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+#if defined(SUPPORT_ION)
+	IonDeinit();
+#endif
+
+	MTKRGXDeviceDeInit(gsDevices);
+	psDevConfig->pvOSDevice = NULL;
+}
+
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+				  IMG_UINT32 ui32IRQ,
+				  const IMG_CHAR *pszName,
+				  PFN_LISR pfnLISR,
+				  void *pvData,
+				  IMG_HANDLE *phLISRData)
+{
+	IMG_UINT32 ui32IRQFlags = SYS_IRQ_FLAG_TRIGGER_LOW;
+
+	PVR_UNREFERENCED_PARAMETER(hSysData);
+
+#if defined(PVRSRV_GPUVIRT_MULTIDRV_MODEL)
+	ui32IRQFlags |= SYS_IRQ_FLAG_SHARED;
+#endif
+
+	return OSInstallSystemLISR(phLISRData, ui32IRQ, pszName, pfnLISR, pvData,
+							   ui32IRQFlags);
+}
+
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+	return OSUninstallSystemLISR(hLISRData);
+}
+
+
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+	DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, void *pvDumpDebugFile)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevConfig);
+	PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+	return PVRSRV_OK;
+}
+
+/******************************************************************************
+* End of file (sysconfig.c)
+******************************************************************************/
+
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_mtk/sysconfig.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_mtk/sysconfig.h
new file mode 100644
index 0000000..46f8a6c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_mtk/sysconfig.h
@@ -0,0 +1,89 @@
+/*************************************************************************
+* @File
+* @Title          System Description Header
+* @Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+* @Description    This header provides system-specific declarations and macros
+* @License        Dual MIT/GPLv2
+*
+* The contents of this file are subject to the MIT license as set out below.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy
+* of this software and associated documentation files (the "Software"), to deal
+* in the Software without restriction, including without limitation the rights
+* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+* copies of the Software, and to permit persons to whom the Software is
+* furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* Alternatively, the contents of this file may be used under the terms of
+* the GNU General Public License Version 2 ("GPL") in which case the provisions
+* of GPL are applicable instead of those above.
+*
+* If you wish to allow use of your version of this file only under the terms of
+* GPL, and not to allow others to use your version of this file under the terms
+* of the MIT license, indicate your decision by deleting the provisions above
+* and replace them with the notice and other provisions required by GPL as set
+* out in the file called "GPL-COPYING" included in this distribution. If you do
+* not delete the provisions above, a recipient may use your version of this file
+* under the terms of either the MIT license or GPL.
+*
+* This License is also included in this distribution in the file called
+* "MIT-COPYING".
+*
+* EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+* PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+* PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+***************************************************************************/
+
+#include "pvrsrv_device.h"
+#include "rgxdevice.h"
+
+#if !defined(__SYSCCONFIG_H__)
+#define __SYSCCONFIG_H__
+
+
+#define RGX_HW_SYSTEM_NAME "RGX HW"
+
+#define RGX_HW_CORE_CLOCK_SPEED			(500000000)
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (50)
+
+static IMG_UINT32 gauiBIFTilingHeapXStrides[RGXFWIF_NUM_BIF_TILING_CONFIGS] = {
+	0, /* BIF tiling heap 1 x-stride */
+	1, /* BIF tiling heap 2 x-stride */
+	2, /* BIF tiling heap 3 x-stride */
+	3  /* BIF tiling heap 4 x-stride */
+};
+
+#if defined(MTK_CONFIG_OF) && defined(CONFIG_OF)
+int MTKSysGetIRQ(void);
+#else
+
+/* if *CONFIG_OF is not set, please makesure the following address and IRQ number are right */
+/* #error RGX_GPU_please_fill_the_following_defines */
+#define SYS_MTK_RGX_REGS_SYS_PHYS_BASE      0x13000000
+#define SYS_MTK_RGX_REGS_SIZE               0x80000
+
+#if defined(CONFIG_MACH_MT8173)
+#define SYS_MTK_RGX_IRQ                     0x102
+#elif defined(CONFIG_MACH_MT8167)
+#define SYS_MTK_RGX_IRQ                     0xDB
+#elif defined(CONFIG_MACH_MT6739)
+#define SYS_MTK_RGX_IRQ                     0x150
+#else
+#endif
+
+#endif
+
+
+
+/*****************************************************************************
+ * system specific data structures
+ *****************************************************************************/
+
+#endif	/* __SYSCCONFIG_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_mtk/sysinfo.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_mtk/sysinfo.h
new file mode 100644
index 0000000..3346c6c
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_mtk/sysinfo.h
@@ -0,0 +1,77 @@
+/*************************************************************************
+* @File
+* @Title          System Description Header
+* @Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+* @Description    This header provides system-specific declarations and macros
+* @License        Dual MIT/GPLv2
+*
+* The contents of this file are subject to the MIT license as set out below.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy
+* of this software and associated documentation files (the "Software"), to deal
+* in the Software without restriction, including without limitation the rights
+* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+* copies of the Software, and to permit persons to whom the Software is
+* furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* Alternatively, the contents of this file may be used under the terms of
+* the GNU General Public License Version 2 ("GPL") in which case the provisions
+* of GPL are applicable instead of those above.
+*
+* If you wish to allow use of your version of this file only under the terms of
+* GPL, and not to allow others to use your version of this file under the terms
+* of the MIT license, indicate your decision by deleting the provisions above
+* and replace them with the notice and other provisions required by GPL as set
+* out in the file called "GPL-COPYING" included in this distribution. If you do
+* not delete the provisions above, a recipient may use your version of this file
+* under the terms of either the MIT license or GPL.
+*
+* This License is also included in this distribution in the file called
+* "MIT-COPYING".
+*
+* EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+* PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+* PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+**************************************************************************/
+
+#if !defined(__SYSINFO_H__)
+#define __SYSINFO_H__
+
+/*!< System specific poll/timeout details */
+#if defined(PVR_LINUX_USING_WORKQUEUES)
+#define MAX_HW_TIME_US								(1000000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT		(10000)
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT	(3600000)
+#define WAIT_TRY_COUNT								(20000)
+#else
+#define MAX_HW_TIME_US								(5000000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT		(10000)
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT	(3600000)
+#define WAIT_TRY_COUNT								(100000)
+#endif
+
+#define SYS_DEVICE_COUNT		3 /* RGX, DISPLAY (external), BUFFER (external) */
+
+#define SYS_PHYS_HEAP_COUNT		1
+
+#define SYS_RGX_OF_COMPATIBLE	"mediatek,mt8167-clark"
+
+#if defined(__linux__)
+/*
+ * Use the static bus ID for the platform DRM device.
+ */
+#if defined(PVR_DRM_DEV_BUS_ID)
+#define	SYS_RGX_DEV_DRM_BUS_ID	PVR_DRM_DEV_BUS_ID
+#else
+#define SYS_RGX_DEV_DRM_BUS_ID	"platform:pvrsrvkm"
+#endif	/* defined(PVR_DRM_DEV_BUS_ID) */
+#endif
+
+#endif	/* !defined(__SYSINFO_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_nohw/Kbuild.mk b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_nohw/Kbuild.mk
new file mode 100644
index 0000000..6f68093
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_nohw/Kbuild.mk
@@ -0,0 +1,56 @@
+########################################################################### ###
+#@File
+#@Copyright     Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License       Dual MIT/GPLv2
+# 
+# The contents of this file are subject to the MIT license as set out below.
+# 
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+# 
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+# 
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+# 
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+# 
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+# 
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+PVRSRVKM_NAME = $(PVRSRV_MODNAME)
+
+$(PVRSRVKM_NAME)-y += services/system/$(PVR_SYSTEM)/sysconfig.o \
+ services/system/common/env/linux/dma_support.o \
+ services/system/common/vz_physheap_generic.o \
+ services/system/common/vz_physheap_common.o \
+ services/system/common/vmm_pvz_client.o \
+ services/system/common/vmm_pvz_server.o \
+ services/system/common/vz_vmm_pvz.o \
+ services/system/common/vz_vmm_vm.o \
+ services/system/common/vz_support.o \
+ services/system/common/vmm_type_stub.o
+
+ifeq ($(SUPPORT_ION),1)
+$(PVRSRVKM_NAME)-y += services/system/common/env/linux/ion_support_generic.o
+endif
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_nohw/sysconfig.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_nohw/sysconfig.c
new file mode 100644
index 0000000..965409d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_nohw/sysconfig.c
@@ -0,0 +1,337 @@
+/*************************************************************************/ /*!
+@File
+@Title          System Configuration
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    System Configuration functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv_device.h"
+#include "syscommon.h"
+#include "vz_support.h"
+#include "allocmem.h"
+#include "sysinfo.h"
+#include "sysconfig.h"
+#include "physheap.h"
+#if defined(SUPPORT_ION)
+#include "ion_support.h"
+#endif
+#if defined(LINUX)
+#include <linux/dma-mapping.h>
+#endif
+#include "rgx_bvnc_defs_km.h"
+/*
+ * In systems that support trusted device address protection, there are three
+ * physical heaps from which pages should be allocated:
+ * - one heap for normal allocations
+ * - one heap for allocations holding META code memory
+ * - one heap for allocations holding secured DRM data
+ */
+
+#define PHYS_HEAP_IDX_GENERAL     0
+#define PHYS_HEAP_IDX_FW          1
+#define PHYS_HEAP_IDX_TDFWCODE    2
+#define PHYS_HEAP_IDX_TDSECUREBUF 3
+
+/*
+	CPU to Device physical address translation
+*/
+static
+void UMAPhysHeapCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+								   IMG_UINT32 ui32NumOfAddr,
+								   IMG_DEV_PHYADDR *psDevPAddr,
+								   IMG_CPU_PHYADDR *psCpuPAddr)
+{
+	PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+	/* Optimise common case */
+	psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr;
+	if (ui32NumOfAddr > 1)
+	{
+		IMG_UINT32 ui32Idx;
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+		{
+			psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr;
+		}
+	}
+}
+
+/*
+	Device to CPU physical address translation
+*/
+static
+void UMAPhysHeapDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+								   IMG_UINT32 ui32NumOfAddr,
+								   IMG_CPU_PHYADDR *psCpuPAddr,
+								   IMG_DEV_PHYADDR *psDevPAddr)
+{
+	PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+	/* Optimise common case */
+	psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr;
+	if (ui32NumOfAddr > 1)
+	{
+		IMG_UINT32 ui32Idx;
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+		{
+			psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr;
+		}
+	}
+}
+
+static PHYS_HEAP_FUNCTIONS gsPhysHeapFuncs =
+{
+	/* pfnCpuPAddrToDevPAddr */
+	UMAPhysHeapCpuPAddrToDevPAddr,
+	/* pfnDevPAddrToCpuPAddr */
+	UMAPhysHeapDevPAddrToCpuPAddr,
+	/* pfnGetRegionId */
+	NULL,
+};
+
+static PVRSRV_ERROR PhysHeapsCreate(PHYS_HEAP_CONFIG **ppasPhysHeapsOut,
+									IMG_UINT32 *puiPhysHeapCountOut)
+{
+	PHYS_HEAP_CONFIG *pasPhysHeaps;
+	IMG_UINT32 ui32NextHeapID = 0;
+	IMG_UINT32 uiHeapCount = 2;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	uiHeapCount += 2;
+#endif
+
+	pasPhysHeaps = OSAllocZMem(sizeof(*pasPhysHeaps) * uiHeapCount);
+	if (!pasPhysHeaps)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	pasPhysHeaps[ui32NextHeapID].ui32PhysHeapID = PHYS_HEAP_IDX_GENERAL;
+	pasPhysHeaps[ui32NextHeapID].pszPDumpMemspaceName = "SYSMEM";
+	pasPhysHeaps[ui32NextHeapID].eType = PHYS_HEAP_TYPE_UMA;
+	pasPhysHeaps[ui32NextHeapID].psMemFuncs = &gsPhysHeapFuncs;
+	ui32NextHeapID++;
+
+	pasPhysHeaps[ui32NextHeapID].ui32PhysHeapID = PHYS_HEAP_IDX_FW;
+	pasPhysHeaps[ui32NextHeapID].pszPDumpMemspaceName = "SYSMEM_FW";
+	pasPhysHeaps[ui32NextHeapID].eType = PHYS_HEAP_TYPE_UMA;
+	pasPhysHeaps[ui32NextHeapID].psMemFuncs = &gsPhysHeapFuncs;
+	ui32NextHeapID++;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	pasPhysHeaps[ui32NextHeapID].ui32PhysHeapID = PHYS_HEAP_IDX_TDFWCODE;
+	pasPhysHeaps[ui32NextHeapID].pszPDumpMemspaceName = "TDFWCODEMEM";
+	pasPhysHeaps[ui32NextHeapID].eType = PHYS_HEAP_TYPE_UMA;
+	pasPhysHeaps[ui32NextHeapID].psMemFuncs = &gsPhysHeapFuncs;
+	ui32NextHeapID++;
+
+	pasPhysHeaps[ui32NextHeapID].ui32PhysHeapID = PHYS_HEAP_IDX_TDSECUREBUF;
+	pasPhysHeaps[ui32NextHeapID].pszPDumpMemspaceName = "TDSECUREBUFMEM";
+	pasPhysHeaps[ui32NextHeapID].eType = PHYS_HEAP_TYPE_UMA;
+	pasPhysHeaps[ui32NextHeapID].psMemFuncs = &gsPhysHeapFuncs;
+	ui32NextHeapID++;
+#endif
+
+	*ppasPhysHeapsOut = pasPhysHeaps;
+	*puiPhysHeapCountOut = ui32NextHeapID;
+
+	return PVRSRV_OK;
+}
+
+static void PhysHeapsDestroy(PHYS_HEAP_CONFIG *pasPhysHeaps)
+{
+	OSFreeMem(pasPhysHeaps);
+}
+
+static void SysDevFeatureDepInit(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT64 ui64Features)
+{
+#if defined(SUPPORT_AXI_ACE_TEST)
+		if( ui64Features & RGX_FEATURE_AXI_ACELITE_BIT_MASK)
+		{
+			psDevConfig->eCacheSnoopingMode		= PVRSRV_DEVICE_SNOOP_CPU_ONLY;
+		}else
+#endif
+		if( ui64Features & RGX_FEATURE_GPU_CPU_COHERENCY_BIT_MASK)
+		{
+			psDevConfig->eCacheSnoopingMode		= PVRSRV_DEVICE_SNOOP_CROSS;
+		}else
+		{
+			psDevConfig->eCacheSnoopingMode		= PVRSRV_DEVICE_SNOOP_NONE;
+		}
+}
+
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig)
+{
+	PVRSRV_DEVICE_CONFIG *psDevConfig;
+	RGX_DATA *psRGXData;
+	RGX_TIMING_INFORMATION *psRGXTimingInfo;
+	PHYS_HEAP_CONFIG *pasPhysHeaps;
+	IMG_UINT32 uiPhysHeapCount;
+	PVRSRV_ERROR eError;
+
+#if defined(LINUX)
+	dma_set_mask(pvOSDevice, DMA_BIT_MASK(40));
+#endif
+
+	psDevConfig = OSAllocZMem(sizeof(*psDevConfig) +
+							  sizeof(*psRGXData) +
+							  sizeof(*psRGXTimingInfo));
+	if (!psDevConfig)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig));
+	psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData));
+
+	eError = PhysHeapsCreate(&pasPhysHeaps, &uiPhysHeapCount);
+	if (eError)
+	{
+		goto ErrorFreeDevConfig;
+	}
+
+	/* Setup RGX specific timing data */
+	psRGXTimingInfo->ui32CoreClockSpeed        = RGX_NOHW_CORE_CLOCK_SPEED;
+	psRGXTimingInfo->bEnableActivePM           = IMG_FALSE;
+	psRGXTimingInfo->bEnableRDPowIsland        = IMG_FALSE;
+	psRGXTimingInfo->ui32ActivePMLatencyms     = SYS_RGX_ACTIVE_POWER_LATENCY_MS;
+
+	/* Set up the RGX data */
+	psRGXData->psRGXTimingInfo = psRGXTimingInfo;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	psRGXData->bHasTDFWCodePhysHeap = IMG_TRUE;
+	psRGXData->uiTDFWCodePhysHeapID = PHYS_HEAP_IDX_TDFWCODE;
+
+	psRGXData->bHasTDSecureBufPhysHeap = IMG_TRUE;
+	psRGXData->uiTDSecureBufPhysHeapID = PHYS_HEAP_IDX_TDSECUREBUF; 
+#endif
+
+	/* Setup the device config */
+	psDevConfig->pvOSDevice				= pvOSDevice;
+	psDevConfig->pszName                = "nohw";
+	psDevConfig->pszVersion             = NULL;
+	psDevConfig->pfnSysDevFeatureDepInit = SysDevFeatureDepInit;
+
+	/* Device setup information */
+	psDevConfig->sRegsCpuPBase.uiAddr   = 0x00f00baa;
+	psDevConfig->ui32RegsSize           = 0x4000;
+	psDevConfig->ui32IRQ                = 0x00000bad;
+
+	psDevConfig->pasPhysHeaps			= pasPhysHeaps;
+	psDevConfig->ui32PhysHeapCount		= uiPhysHeapCount;
+
+	psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = PHYS_HEAP_IDX_GENERAL;
+	psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = PHYS_HEAP_IDX_GENERAL;
+	psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL] = PHYS_HEAP_IDX_GENERAL;
+	psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = PHYS_HEAP_IDX_FW;
+
+	psDevConfig->eBIFTilingMode = geBIFTilingMode;
+	psDevConfig->pui32BIFTilingHeapConfigs = gauiBIFTilingHeapXStrides;
+	psDevConfig->ui32BIFTilingHeapCount = IMG_ARR_NUM_ELEMS(gauiBIFTilingHeapXStrides);
+
+	/* No power management on no HW system */
+	psDevConfig->pfnPrePowerState       = NULL;
+	psDevConfig->pfnPostPowerState      = NULL;
+
+	/* No clock frequency either */
+	psDevConfig->pfnClockFreqGet        = NULL;
+
+	psDevConfig->hDevData               = psRGXData;
+
+	/* Setup other system specific stuff */
+#if defined(SUPPORT_ION)
+	IonInit(NULL);
+#endif
+
+	*ppsDevConfig = psDevConfig;
+
+	return PVRSRV_OK;
+
+ErrorFreeDevConfig:
+	OSFreeMem(psDevConfig);
+	return eError;
+}
+
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+#if defined(SUPPORT_ION)
+	IonDeinit();
+#endif
+
+	PhysHeapsDestroy(psDevConfig->pasPhysHeaps);
+	OSFreeMem(psDevConfig);
+}
+
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+								  IMG_UINT32 ui32IRQ,
+								  const IMG_CHAR *pszName,
+								  PFN_LISR pfnLISR,
+								  void *pvData,
+								  IMG_HANDLE *phLISRData)
+{
+	PVR_UNREFERENCED_PARAMETER(hSysData);
+	PVR_UNREFERENCED_PARAMETER(ui32IRQ);
+	PVR_UNREFERENCED_PARAMETER(pszName);
+	PVR_UNREFERENCED_PARAMETER(pfnLISR);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+	PVR_UNREFERENCED_PARAMETER(phLISRData);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+	PVR_UNREFERENCED_PARAMETER(hLISRData);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+				DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+				void *pvDumpDebugFile)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevConfig);
+	PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+	PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile);
+	return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (sysconfig.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_nohw/sysconfig.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_nohw/sysconfig.h
new file mode 100644
index 0000000..6bcf94d
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_nohw/sysconfig.h
@@ -0,0 +1,70 @@
+/*************************************************************************/ /*!
+@File
+@Title          System Description Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides system-specific declarations and macros
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv_device.h"
+#include "rgxdevice.h"
+
+#if !defined(__SYSCCONFIG_H__)
+#define __SYSCCONFIG_H__
+
+
+#define RGX_NOHW_CORE_CLOCK_SPEED 100000000
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (100)
+
+/* BIF Tiling mode configuration */
+static RGXFWIF_BIFTILINGMODE geBIFTilingMode = RGXFWIF_BIFTILINGMODE_256x16;
+
+/* default BIF tiling heap x-stride configurations. */
+static IMG_UINT32 gauiBIFTilingHeapXStrides[RGXFWIF_NUM_BIF_TILING_CONFIGS] =
+{
+	0, /* BIF tiling heap 1 x-stride */
+	1, /* BIF tiling heap 2 x-stride */
+	2, /* BIF tiling heap 3 x-stride */
+	3  /* BIF tiling heap 4 x-stride */
+};
+
+/*****************************************************************************
+ * system specific data structures
+ *****************************************************************************/
+ 
+#endif	/* __SYSCCONFIG_H__ */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_nohw/sysinfo.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_nohw/sysinfo.h
new file mode 100644
index 0000000..8395add
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/services/system/rgx_nohw/sysinfo.h
@@ -0,0 +1,57 @@
+/*************************************************************************/ /*!
+@File
+@Title          System Description Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides system-specific declarations and macros
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__SYSINFO_H__)
+#define __SYSINFO_H__
+
+/*!< System specific poll/timeout details */
+#define MAX_HW_TIME_US                           (500000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT  (10000)
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000)
+#define WAIT_TRY_COUNT                           (10000)
+
+#if defined(__linux__)
+#define SYS_RGX_DEV_NAME    "rgxnohw"
+#endif
+
+#endif	/* !defined(__SYSINFO_H__) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/Kbuild.mk b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/Kbuild.mk
new file mode 100644
index 0000000..5685efae
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/Kbuild.mk
@@ -0,0 +1,50 @@
+########################################################################### ###
+#@Copyright     Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License       Dual MIT/GPLv2
+# 
+# The contents of this file are subject to the MIT license as set out below.
+# 
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+# 
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+# 
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+# 
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+# 
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+# 
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+
+ccflags-y += \
+	-I$(TOP)/tools/services/debug/dbgdriv/common \
+	-I$(TOP)/tools/services/debug/include
+
+dbgdrv-y += \
+	tools/services/debug/dbgdriv/common/dbgdriv.o \
+	tools/services/debug/dbgdriv/common/ioctl.o \
+	tools/services/debug/dbgdriv/common/dbgdriv_handle.o \
+	tools/services/debug/dbgdriv/linux/main.o \
+	tools/services/debug/dbgdriv/linux/hostfunc.o
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/Linux.mk b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/Linux.mk
new file mode 100644
index 0000000..e050879
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/Linux.mk
@@ -0,0 +1,45 @@
+########################################################################### ###
+#@Copyright     Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License       Dual MIT/GPLv2
+# 
+# The contents of this file are subject to the MIT license as set out below.
+# 
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+# 
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+# 
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+# 
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+# 
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+# 
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+
+modules := dbgdrv
+
+dbgdrv_type := kernel_module
+dbgdrv_target := dbgdrv.ko
+dbgdrv_makefile := $(THIS_DIR)/Kbuild.mk
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/common/dbgdriv.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/common/dbgdriv.c
new file mode 100644
index 0000000..fe3511a5
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/common/dbgdriv.c
@@ -0,0 +1,1562 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debug Driver
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    32 Bit kernel mode debug driver
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(_WIN32)
+#pragma  warning(disable:4201)
+#pragma  warning(disable:4214)
+#pragma  warning(disable:4115)
+#pragma  warning(disable:4514)
+
+
+#include <ntddk.h>
+#include <windef.h>
+#include <winerror.h>
+#endif /* _WIN32 */
+
+#ifdef LINUX
+#include <linux/string.h>
+#endif
+
+#if defined (__QNXNTO__) || defined (INTEGRITY_OS)
+#include <string.h>
+#endif
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "dbgdrvif_srv5.h"
+#include "dbgdriv.h"
+#include "hostfunc.h"
+
+#ifdef _WIN32
+#pragma  warning(default:4214)
+#pragma  warning(default:4115)
+#endif /* _WIN32 */
+
+
+/******************************************************************************
+ Types
+******************************************************************************/
+
+/*
+	Per-buffer control structure.
+*/
+typedef struct _DBG_STREAM_
+{
+	struct _DBG_STREAM_* psNext;
+	struct _DBG_STREAM_* psInitStream;
+	struct _DBG_STREAM_* psDeinitStream;
+	IMG_UINT32 ui32Flags;			/*!< flags (see DEBUG_FLAGS) */
+	void *pvBase;
+	IMG_UINT32 ui32Size;
+	IMG_UINT32 ui32RPtr;
+	IMG_UINT32 ui32WPtr;
+
+	IMG_UINT32 ui32Marker;			/*!< Size marker for file splitting */
+
+	IMG_UINT32 ui32InitPhaseWOff;	/*!< snapshot offset for init phase end for follow-on pdump */
+
+	IMG_CHAR   szName[DEBUG_STREAM_NAME_MAX];			/* Give this a size, some compilers don't like [] */
+} DBG_STREAM;
+
+/* Check 4xDBG_STREAM will fit in one page */
+static_assert((sizeof(DBG_STREAM) * 4) < HOST_PAGESIZE, "DBG_STREAM is too large");
+
+/******************************************************************************
+ Global variables
+******************************************************************************/
+
+static PDBG_STREAM          g_psStreamList = 0;
+
+/* Mutex used to prevent UM threads (via the dbgdrv ioctl interface) and KM
+ * threads (from pvrsrvkm via the ExtDBG API) entering the debug driver core
+ * and changing the state of share data at the same time.
+ */
+void *                      g_pvAPIMutex=NULL;
+
+static IMG_UINT32			g_PDumpCurrentFrameNo = 0;
+
+DBGKM_SERVICE_TABLE g_sDBGKMServices =
+{
+	sizeof (DBGKM_SERVICE_TABLE),
+	ExtDBGDrivCreateStream,
+	ExtDBGDrivDestroyStream,
+	ExtDBGDrivWrite2,
+	ExtDBGDrivSetMarker,
+	ExtDBGDrivWaitForEvent,
+	ExtDBGDrivGetCtrlState,
+	ExtDBGDrivSetFrame
+};
+
+
+/***************************************************************************
+ Forward declarations
+***************************************************************************/
+
+IMG_BOOL   IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR *pszName, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Pages, IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit);
+void   IMG_CALLCONV DBGDrivDestroyStream(IMG_HANDLE hInit,IMG_HANDLE hMain, IMG_HANDLE hDeinit);
+void * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream);
+IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psStream, IMG_UINT32 ui32BufID, IMG_UINT32 ui32OutBufferSize,IMG_UINT8 *pui8OutBuf);
+void   IMG_CALLCONV DBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32Stop,IMG_UINT32 ui32SampleRate);
+IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize);
+void   IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream);
+void   IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetCtrlState(PDBG_STREAM psStream, IMG_UINT32 ui32StateID);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(void);
+void   IMG_CALLCONV DBGDrivSetFrame(IMG_UINT32 ui32Frame);
+void   DestroyAllStreams(void);
+
+/* Static function declarations */
+static IMG_UINT32 SpaceInStream(PDBG_STREAM psStream);
+static IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize);
+static void InvalidateAllStreams(void);
+
+
+/*****************************************************************************
+ Code
+*****************************************************************************/
+
+/*!
+ @name	ExtDBGDrivCreateStream
+ */
+IMG_BOOL IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR *pszName, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit)
+{
+	IMG_BOOL pvRet;
+
+	/* Acquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	pvRet=DBGDrivCreateStream(pszName, ui32Flags, ui32Size, phInit, phMain, phDeinit);
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return pvRet;
+}
+
+/*!
+ @name	ExtDBGDrivDestroyStream
+ */
+void IMG_CALLCONV ExtDBGDrivDestroyStream(IMG_HANDLE hInit,IMG_HANDLE hMain, IMG_HANDLE hDeinit)
+{
+	/* Acquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	DBGDrivDestroyStream(hInit, hMain, hDeinit);
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return;
+}
+
+/*!
+ @name	ExtDBGDrivFindStream
+ */
+void * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream)
+{
+	void *	pvRet;
+
+	/* Acquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	pvRet=DBGDrivFindStream(pszName, bResetStream);
+	if (pvRet == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "ExtDBGDrivFindStream: Stream not found"));
+	}
+
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return pvRet;
+}
+
+/*!
+ @name	ExtDBGDrivRead
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_UINT32 ui32BufID, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf)
+{
+	IMG_UINT32 ui32Ret;
+
+	/* Acquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	ui32Ret=DBGDrivRead(psStream, ui32BufID, ui32OutBuffSize, pui8OutBuf);
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return ui32Ret;
+}
+
+/*!
+ @name	ExtDBGDrivWrite2
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize)
+{
+	IMG_UINT32	ui32Ret;
+
+	/* Acquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	ui32Ret=DBGDrivWrite2(psStream, pui8InBuf, ui32InBuffSize);
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return ui32Ret;
+}
+
+/*!
+ @name	ExtDBGDrivSetMarker
+ */
+void IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
+{
+	/* Acquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	DBGDrivSetMarker(psStream, ui32Marker);
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return;
+}
+
+/*!
+ @name	ExtDBGDrivGetMarker
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream)
+{
+	IMG_UINT32	ui32Marker;
+
+	/* Acquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	ui32Marker = DBGDrivGetMarker(psStream);
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return ui32Marker;
+}
+
+/*!
+ @name	ExtDBGDrivWaitForEvent
+ */
+void IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent)
+{
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+	DBGDrivWaitForEvent(eEvent);
+#else	/* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
+	PVR_UNREFERENCED_PARAMETER(eEvent);				/* PRQA S 3358 */
+#endif	/* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
+}
+
+
+/*!
+ @name	ExtDBGDrivGetCtrlState
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetCtrlState(PDBG_STREAM psStream, IMG_UINT32 ui32StateID)
+{
+	IMG_UINT32 ui32State = 0;
+
+	/* Acquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	ui32State = DBGDrivGetCtrlState(psStream, ui32StateID);
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return ui32State;
+}
+
+/*!
+ @name	ExtDBGDrivGetFrame
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(void)
+{
+	IMG_UINT32 ui32Frame = 0;
+
+	/* Acquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	ui32Frame = DBGDrivGetFrame();
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return ui32Frame;
+}
+
+/*!
+ @name	ExtDBGDrivGetCtrlState
+ */
+void IMG_CALLCONV ExtDBGDrivSetFrame(IMG_UINT32 ui32Frame)
+{
+	/* Acquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	DBGDrivSetFrame(ui32Frame);
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return;
+}
+
+
+
+/*!****************************************************************************
+ @name		AtoI
+ @brief		Returns the integer value of a decimal string
+ @param		szIn - String with hexadecimal value
+ @return	IMG_UINT32 integer value, 0 if string is null or not valid
+				Based on Max`s one, now copes with (only) hex ui32ords, upper or lower case a-f.
+*****************************************************************************/
+IMG_UINT32 AtoI(IMG_CHAR *szIn)
+{
+	IMG_INT		iLen = 0;
+	IMG_UINT32	ui32Value = 0;
+	IMG_UINT32	ui32Digit=1;
+	IMG_UINT32	ui32Base=10;
+	IMG_INT		iPos;
+	IMG_CHAR	bc;
+
+	/* get len of string */
+	while (szIn[iLen] > 0)
+	{
+		iLen ++;
+	}
+
+	/* nothing to do */
+	if (iLen == 0)
+	{
+		return (0);
+	}
+
+	/* See if we have an 'x' or 'X' before the number to make it a hex number */
+	iPos=0;
+	while (szIn[iPos] == '0')
+	{
+		iPos++;
+	}
+	if (szIn[iPos] == '\0')
+	{
+		return 0;
+	}
+	if (szIn[iPos] == 'x' || szIn[iPos] == 'X')
+	{
+		ui32Base=16;
+		szIn[iPos]='0';
+	}
+
+	/* go through string from right (least significant) to left */
+	for (iPos = iLen - 1; iPos >= 0; iPos --)
+	{
+		bc = szIn[iPos];
+
+		if ( (bc >= 'a') && (bc <= 'f') && ui32Base == 16)	/* handle lower case a-f */
+		{
+			bc -= 'a' - 0xa;
+		}
+		else
+		if ( (bc >= 'A') && (bc <= 'F') && ui32Base == 16)	/* handle upper case A-F */
+		{
+			bc -= 'A' - 0xa;
+		}
+		else
+		if ((bc >= '0') && (bc <= '9'))	/* if char out of range, return 0 */
+		{
+			bc -= '0';
+		}
+		else
+			return (0);
+
+		ui32Value += (IMG_UINT32)bc  * ui32Digit;
+
+		ui32Digit = ui32Digit * ui32Base;
+	}
+	return (ui32Value);
+}
+
+
+/*!****************************************************************************
+ @name		StreamValid
+ @brief		Validates supplied debug buffer.
+ @param		psStream - debug stream
+ @return	true if valid
+*****************************************************************************/
+static IMG_BOOL StreamValid(PDBG_STREAM psStream)
+{
+	PDBG_STREAM	psThis;
+
+	psThis = g_psStreamList;
+
+	while (psThis)
+	{
+		if (psStream && ((psThis == psStream) ||
+						(psThis->psInitStream == psStream) ||
+						(psThis->psDeinitStream == psStream)) )
+		{
+			return(IMG_TRUE);
+		}
+		else
+		{
+			psThis = psThis->psNext;
+		}
+	}
+
+	return(IMG_FALSE);
+}
+
+
+/*!****************************************************************************
+ @name		StreamValidForRead
+ @brief		Validates supplied debug buffer for read op.
+ @param		psStream - debug stream
+ @return	true if readable
+*****************************************************************************/
+static IMG_BOOL StreamValidForRead(PDBG_STREAM psStream)
+{
+	if( StreamValid(psStream) &&
+		((psStream->ui32Flags & DEBUG_FLAGS_WRITEONLY) == 0) )
+	{
+		return(IMG_TRUE);
+	}
+
+	return(IMG_FALSE);
+}
+
+/*!****************************************************************************
+ @name		StreamValidForWrite
+ @brief		Validates supplied debug buffer for write op.
+ @param		psStream - debug stream
+ @return	true if writable
+*****************************************************************************/
+static IMG_BOOL StreamValidForWrite(PDBG_STREAM psStream)
+{
+	if( StreamValid(psStream) &&
+		((psStream->ui32Flags & DEBUG_FLAGS_READONLY) == 0) )
+	{
+		return(IMG_TRUE);
+	}
+
+	return(IMG_FALSE);
+}
+
+/*!****************************************************************************
+ @name		Write
+ @brief		Copies data from a buffer into selected stream. Stream size is fixed.
+ @param		psStream - stream for output
+ @param		pui8Data - input buffer
+ @param		ui32InBuffSize - size of input
+ @return	none
+*****************************************************************************/
+static void Write(PDBG_STREAM psStream,IMG_PUINT8 pui8Data,IMG_UINT32 ui32InBuffSize)
+{
+	/*
+		Split copy into two bits as necessary (if we're allowed to wrap).
+	*/
+	if ((psStream->ui32Flags & DEBUG_FLAGS_CIRCULAR) == 0)
+	{
+		PVR_ASSERT( (psStream->ui32WPtr + ui32InBuffSize) < psStream->ui32Size );
+	}
+
+	if ((psStream->ui32WPtr + ui32InBuffSize) > psStream->ui32Size)
+	{
+		/* Yes we need two bits, calculate their sizes */
+		IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32WPtr;
+		IMG_UINT32 ui32B2 = ui32InBuffSize - ui32B1;
+
+		/* Copy first block to current location */
+		HostMemCopy((void *)((uintptr_t)psStream->pvBase + psStream->ui32WPtr),
+				(void *) pui8Data,
+				ui32B1);
+
+		/* Copy second block to start of buffer */
+		HostMemCopy(psStream->pvBase,
+				(void *)(pui8Data + ui32B1),
+				ui32B2);
+
+		/* Set pointer to be the new end point */
+		psStream->ui32WPtr = ui32B2;
+	}
+	else
+	{	/* Can fit block in single chunk */
+		HostMemCopy((void *)((uintptr_t)psStream->pvBase + psStream->ui32WPtr),
+				(void *) pui8Data,
+				ui32InBuffSize);
+
+		psStream->ui32WPtr += ui32InBuffSize;
+
+		if (psStream->ui32WPtr == psStream->ui32Size)
+		{
+			psStream->ui32WPtr = 0;
+		}
+	}
+}
+
+
+/*!****************************************************************************
+ @name		WriteExpandingBuffer
+ @brief		Copies data from a buffer into selected stream. Stream size may be expandable.
+ @param		psStream - stream for output
+ @param		pui8InBuf - input buffer
+ @param		ui32InBuffSize - size of input
+ @return	bytes copied
+*****************************************************************************/
+static IMG_UINT32 WriteExpandingBuffer(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize)
+{
+	IMG_UINT ui32Space;
+
+	/*
+		How much space have we got in the buffer ?
+	*/
+	ui32Space = SpaceInStream(psStream);
+
+	/*
+		Check if we can expand the buffer
+	*/
+	if (psStream->ui32Flags & DEBUG_FLAGS_NO_BUF_EXPANDSION)
+	{
+		/*
+			Don't do anything if we've got less that 32 ui8tes of space and
+			we're not allowing expansion of buffer space...
+		*/
+		if (ui32Space < 32)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "WriteExpandingBuffer: buffer %p is full and isn't expandable", psStream));
+			return(0);
+		}
+	}
+	else
+	{
+		if ((ui32Space < 32) || (ui32Space <= (ui32InBuffSize + 4)))
+		{
+			IMG_UINT32	ui32NewBufSize;
+
+			/*
+				Find new buffer size, double the current size or increase by 1MB
+			*/
+			ui32NewBufSize = MIN(psStream->ui32Size<<1,psStream->ui32Size+(1<<20));
+			ui32NewBufSize = MIN(ui32NewBufSize, PDUMP_STREAMBUF_MAX_SIZE_MB<<20);
+			PVR_DPF((PVR_DBGDRIV_MESSAGE, "Expanding buffer size = %x, new size = %x",
+					psStream->ui32Size, ui32NewBufSize));
+
+			if (ui32InBuffSize > psStream->ui32Size)
+			{
+				ui32NewBufSize += ui32InBuffSize;
+				PVR_DPF((PVR_DBG_ERROR, "WriteExpandingBuffer: buffer %p is expanding by size of input buffer %u", psStream, ui32NewBufSize));
+			}
+
+			/*
+				Attempt to expand the buffer
+			*/
+			if ((ui32NewBufSize < psStream->ui32Size) ||
+					!ExpandStreamBuffer(psStream,ui32NewBufSize))
+			{
+				if (ui32Space < 32)
+				{
+					if((psStream->ui32Flags & DEBUG_FLAGS_CIRCULAR) != 0)
+					{
+						return(0);
+					}
+					else
+					{
+						/* out of memory */
+						PVR_LOG(("DBGDRV: Error: unable to expand %p stream. Out of PDump memory, InvalidateAllStreams() called", psStream));
+						InvalidateAllStreams();
+						return (0xFFFFFFFFUL);
+					}
+				}
+			}
+
+			/*
+				Recalc the space in the buffer
+			*/
+			ui32Space = SpaceInStream(psStream);
+			PVR_DPF((PVR_DBGDRIV_MESSAGE, "Expanded buffer, free space = %x",
+					ui32Space));
+		}
+	}
+
+	/*
+		Only copy what we can..
+	*/
+	if (ui32Space <= (ui32InBuffSize + 4))
+	{
+		ui32InBuffSize = ui32Space - 4;
+	}
+
+	/*
+		Write the stuff...
+	*/
+	Write(psStream,pui8InBuf,ui32InBuffSize);
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+	if (ui32InBuffSize)
+	{
+		HostSignalEvent(DBG_EVENT_STREAM_DATA);
+	}
+#endif
+	return(ui32InBuffSize);
+}
+
+/*****************************************************************************
+******************************************************************************
+******************************************************************************
+ THE ACTUAL FUNCTIONS
+******************************************************************************
+******************************************************************************
+*****************************************************************************/
+
+static void DBGDrivSetStreamName(PDBG_STREAM psStream,
+									 IMG_CHAR* pszBase,
+									 IMG_CHAR* pszExt)
+{
+	IMG_CHAR* pCh = psStream->szName;
+	IMG_CHAR* pChEnd = psStream->szName+DEBUG_STREAM_NAME_MAX-8;
+	IMG_CHAR* pSrcCh;
+	IMG_CHAR* pSrcChEnd;
+
+	for (pSrcCh = pszBase, pSrcChEnd = pszBase+strlen(pszBase);
+			(pSrcCh < pSrcChEnd) && (pCh < pChEnd) ;
+			pSrcCh++, pCh++)
+	{
+		*pCh = *pSrcCh;
+	}
+
+	for (pSrcCh = pszExt, pSrcChEnd = pszExt+strlen(pszExt);
+			(pSrcCh < pSrcChEnd) && (pCh < pChEnd) ;
+			pSrcCh++, pCh++)
+	{
+		*pCh = *pSrcCh;
+	}
+
+	*pCh = '\0';
+}
+
+/*!****************************************************************************
+ @name		DBGDrivCreateStream
+ @brief		Creates a pdump/debug stream
+ @param		pszName - stream name
+ @param		ui32Flags - output flags, text stream bit is set for pdumping
+ @param		ui32Size - size of stream buffer in pages
+ @return	none
+*****************************************************************************/
+IMG_BOOL IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR *pszName,
+                                          IMG_UINT32 ui32Flags,
+                                          IMG_UINT32 ui32Size,
+                                          IMG_HANDLE* phInit,
+                                          IMG_HANDLE* phMain,
+                                          IMG_HANDLE* phDeinit)
+{
+	IMG_BOOL            bUseNonPagedMem4Buffers = ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0);
+	PDBG_STREAM         psStream = NULL;
+	PDBG_STREAM	        psInitStream = NULL;
+	PDBG_STREAM         psStreamDeinit = NULL;
+	void*           pvBase = NULL;
+
+	/*
+		If we already have a buffer using this name just return
+		its handle.
+	*/
+	psStream = (PDBG_STREAM) DBGDrivFindStream(pszName, IMG_FALSE);
+	if (psStream)
+	{
+		*phInit = psStream->psInitStream;
+		*phMain = psStream;
+		*phDeinit = psStream->psDeinitStream;
+		return IMG_TRUE;
+	}
+
+	/*
+		Allocate memory for control structures
+	*/
+	psStream = HostNonPageablePageAlloc(1);
+	if	(!psStream)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc control structs\n\r"));
+		goto errCleanup;
+	}
+	psInitStream = psStream+1;
+	psStreamDeinit = psStream+2;
+
+
+	/* Allocate memory for Main buffer */
+	psStream->pvBase = NULL;
+	if (bUseNonPagedMem4Buffers)
+	{
+		pvBase = HostNonPageablePageAlloc(ui32Size);
+	}
+	else
+	{
+		pvBase = HostPageablePageAlloc(ui32Size);
+	}
+
+	if (!pvBase)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc Stream buffer\n\r"));
+		goto errCleanup;
+	}
+
+	/*
+		Setup debug buffer state.
+	*/
+	psStream->psNext = 0;
+	psStream->pvBase = pvBase;
+	psStream->ui32Flags = ui32Flags | DEBUG_FLAGS_CIRCULAR;
+	psStream->ui32Size = ui32Size * HOST_PAGESIZE;
+	psStream->ui32RPtr = 0;
+	psStream->ui32WPtr = 0;
+	psStream->ui32Marker = 0;
+	psStream->ui32InitPhaseWOff = 0;
+	DBGDrivSetStreamName(psStream, pszName, "");
+	PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Created stream with deinit name (%s)\n\r", psStream->szName));
+
+	/* Allocate memory for Init buffer */
+	psInitStream->pvBase = NULL;
+	if (bUseNonPagedMem4Buffers)
+	{
+		pvBase = HostNonPageablePageAlloc(ui32Size);
+	}
+	else
+	{
+		pvBase = HostPageablePageAlloc(ui32Size);
+	}
+
+	if (!pvBase)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc InitStream buffer\n\r"));
+		goto errCleanup;
+	}
+
+	/* Initialise the stream for the Init phase */
+	psInitStream->psNext = psInitStream->psInitStream = psInitStream->psDeinitStream = NULL;
+	psInitStream->ui32Flags = ui32Flags;
+	psInitStream->pvBase = pvBase;
+	psInitStream->ui32Size = ui32Size * HOST_PAGESIZE;
+	psInitStream->ui32RPtr = 0;
+	psInitStream->ui32WPtr = 0;
+	psInitStream->ui32Marker = 0;
+	psInitStream->ui32InitPhaseWOff = 0;
+	DBGDrivSetStreamName(psInitStream, pszName, "_Init");
+	PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Created stream with init name (%s)\n\r", psInitStream->szName));
+	psStream->psInitStream = psInitStream;
+
+	/* Allocate memory for Deinit buffer */
+	psStreamDeinit->pvBase = NULL;
+	if (bUseNonPagedMem4Buffers)
+	{
+		pvBase = HostNonPageablePageAlloc(1);
+	}
+	else
+	{
+		pvBase = HostPageablePageAlloc(1);
+	}
+
+	if (!pvBase)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc DeinitStream buffer\n\r"));
+		goto errCleanup;
+	}
+
+	/* Initialise the stream for the Deinit phase */
+	psStreamDeinit->psNext = psStreamDeinit->psInitStream = psStreamDeinit->psDeinitStream = NULL;
+	psStreamDeinit->pvBase = pvBase;
+	psStreamDeinit->ui32Flags = ui32Flags;
+	psStreamDeinit->ui32Size = HOST_PAGESIZE;
+	psStreamDeinit->ui32RPtr = 0;
+	psStreamDeinit->ui32WPtr = 0;
+	psStreamDeinit->ui32Marker = 0;
+	psStreamDeinit->ui32InitPhaseWOff = 0;
+	DBGDrivSetStreamName(psStreamDeinit, pszName, "_Deinit");
+	PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Created stream with deinit name (%s)\n\r", psStreamDeinit->szName));
+
+	psStream->psDeinitStream = psStreamDeinit;
+
+	/*
+		Insert into list.
+	*/
+	psStream->psNext = g_psStreamList;
+	g_psStreamList = psStream;
+
+	AddSIDEntry(psStream);
+
+	*phInit = psStream->psInitStream;
+	*phMain = psStream;
+	*phDeinit = psStream->psDeinitStream;
+
+	return IMG_TRUE;
+
+errCleanup:
+	if (bUseNonPagedMem4Buffers)
+	{
+		if (psStream) HostNonPageablePageFree(psStream->pvBase);
+		if (psInitStream) HostNonPageablePageFree(psInitStream->pvBase);
+		if (psStreamDeinit) HostNonPageablePageFree(psStreamDeinit->pvBase);
+	}
+	else
+	{
+		if (psStream) HostPageablePageFree(psStream->pvBase);
+		if (psInitStream) HostPageablePageFree(psInitStream->pvBase);
+		if (psStreamDeinit) HostPageablePageFree(psStreamDeinit->pvBase);
+	}
+	HostNonPageablePageFree(psStream);
+	psStream = psInitStream = psStreamDeinit = NULL;
+	return IMG_FALSE;
+}
+
+/*!****************************************************************************
+ @name		DBGDrivDestroyStream
+ @brief		Delete a stream and free its memory
+ @param		psStream - stream to be removed
+ @return	none
+*****************************************************************************/
+void IMG_CALLCONV DBGDrivDestroyStream(IMG_HANDLE hInit,IMG_HANDLE hMain, IMG_HANDLE hDeinit)
+{
+	PDBG_STREAM psStreamInit = (PDBG_STREAM) hInit;
+	PDBG_STREAM psStream = (PDBG_STREAM) hMain;
+	PDBG_STREAM	psStreamDeinit = (PDBG_STREAM) hDeinit;
+	PDBG_STREAM	psStreamThis;
+	PDBG_STREAM	psStreamPrev;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "DBGDriv: Destroying stream %s\r\n", psStream->szName ));
+
+	/*
+		Validate buffer.
+	*/
+	if (!StreamValid(psStream))
+	{
+		return;
+	}
+
+	RemoveSIDEntry(psStream);
+
+	/*
+		Remove from linked list.
+	*/
+	psStreamThis = g_psStreamList;
+	psStreamPrev = 0;
+
+	while (psStreamThis)
+	{
+		if (psStreamThis == psStream)
+		{
+			if (psStreamPrev)
+			{
+				psStreamPrev->psNext = psStreamThis->psNext;
+			}
+			else
+			{
+				g_psStreamList = psStreamThis->psNext;
+			}
+
+			psStreamThis = 0;
+		}
+		else
+		{
+			psStreamPrev = psStreamThis;
+			psStreamThis = psStreamThis->psNext;
+		}
+	}
+
+	/*
+		And free its memory.
+	*/
+	if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+	{
+		HostNonPageablePageFree(psStream->pvBase);
+		HostNonPageablePageFree(psStreamInit->pvBase);
+		HostNonPageablePageFree(psStreamDeinit->pvBase);
+	}
+	else
+	{
+		HostPageablePageFree(psStream->pvBase);
+		HostPageablePageFree(psStreamInit->pvBase);
+		HostPageablePageFree(psStreamDeinit->pvBase);
+	}
+
+	/* Free the shared page used for the three stream tuple */
+	HostNonPageablePageFree(psStream);
+	psStream = psStreamInit = psStreamDeinit = NULL;
+
+	if (g_psStreamList == 0)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Stream list now empty" ));
+	}
+
+	return;
+}
+
+/*!****************************************************************************
+ @name		DBGDrivFindStream
+ @brief		Finds/resets a named stream
+ @param		pszName - stream name
+ @param		bResetStream - whether to reset the stream, e.g. to end pdump init phase
+ @return	none
+*****************************************************************************/
+void * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream)
+{
+	PDBG_STREAM	psStream;
+	PDBG_STREAM	psThis;
+	IMG_UINT32	ui32Off;
+	IMG_BOOL	bAreSame;
+
+	psStream = 0;
+
+	PVR_DPF((PVR_DBGDRIV_MESSAGE, "PDump client connecting to %s %s",
+			pszName,
+			(bResetStream == IMG_TRUE) ? "with reset" : "no reset"));
+
+	/*
+		Scan buffer names for supplied one.
+	*/
+	for (psThis = g_psStreamList; psThis != NULL; psThis = psThis->psNext)
+	{
+		bAreSame = IMG_TRUE;
+		ui32Off = 0;
+
+		if (strlen(psThis->szName) == strlen(pszName))
+		{
+			while ((ui32Off < DEBUG_STREAM_NAME_MAX) && (psThis->szName[ui32Off] != 0) && (pszName[ui32Off] != 0) && bAreSame)
+			{
+				if (psThis->szName[ui32Off] != pszName[ui32Off])
+				{
+					bAreSame = IMG_FALSE;
+				}
+
+				ui32Off++;
+			}
+		}
+		else
+		{
+			bAreSame = IMG_FALSE;
+		}
+
+		if (bAreSame)
+		{
+			psStream = psThis;
+			break;
+		}
+	}
+
+	if(psStream)
+	{
+		psStream->psInitStream->ui32RPtr = 0;
+		psStream->psDeinitStream->ui32RPtr = 0;
+		psStream->ui32RPtr = 0;
+		if (bResetStream)
+		{
+			/* This will erase any data written to the main stream
+			 * before the client starts. */
+			psStream->ui32WPtr = 0;
+		}
+		psStream->ui32Marker = psStream->psInitStream->ui32Marker = 0;
+
+
+		/* mark init stream to prevent further reading by pdump client */
+		/* Check for possible race condition */
+		psStream->psInitStream->ui32InitPhaseWOff = psStream->psInitStream->ui32WPtr;
+
+		PVR_DPF((PVR_DBGDRIV_MESSAGE, "Set %s client marker bo %x",
+				psStream->szName,
+				psStream->psInitStream->ui32InitPhaseWOff));
+	}
+
+	return((void *) psStream);
+}
+
+static void IMG_CALLCONV DBGDrivInvalidateStream(PDBG_STREAM psStream)
+{
+	IMG_CHAR pszErrorMsg[] = "**OUTOFMEM\n";
+	IMG_UINT32 ui32Space;
+	IMG_UINT32 ui32Off = 0;
+	IMG_UINT32 ui32WPtr = psStream->ui32WPtr;
+	IMG_PUINT8 pui8Buffer = (IMG_UINT8 *) psStream->pvBase;
+
+	PVR_DPF((PVR_DBG_ERROR, "DBGDrivInvalidateStream: An error occurred for stream %s", psStream->szName ));
+
+	/*
+		Validate buffer.
+	*/
+	/*
+	if (!StreamValid(psStream))
+	{
+		return;
+	}
+*/
+	/* Write what we can of the error message */
+	ui32Space = SpaceInStream(psStream);
+
+	/* Make sure there's space for termination character */
+	if(ui32Space > 0)
+	{
+		ui32Space--;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DBGDrivInvalidateStream: Buffer full."));
+	}
+
+	while((pszErrorMsg[ui32Off] != 0) && (ui32Off < ui32Space))
+	{
+		pui8Buffer[ui32WPtr] = (IMG_UINT8)pszErrorMsg[ui32Off];
+		ui32Off++;
+		ui32WPtr++;
+	}
+	pui8Buffer[ui32WPtr++] = '\0';
+	psStream->ui32WPtr = ui32WPtr;
+
+	/* Buffer will accept no more params from Services/client driver */
+	psStream->ui32Flags |= DEBUG_FLAGS_READONLY;
+}
+
+/*!****************************************************************************
+ @name		InvalidateAllStreams
+ @brief		invalidate all streams in list
+ @return	none
+*****************************************************************************/
+static void InvalidateAllStreams(void)
+{
+	PDBG_STREAM psStream = g_psStreamList;
+	while (psStream != NULL)
+	{
+		DBGDrivInvalidateStream(psStream);
+		DBGDrivInvalidateStream(psStream->psInitStream);
+		DBGDrivInvalidateStream(psStream->psDeinitStream);
+		psStream = psStream->psNext;
+	}
+	return;
+}
+
+/*!****************************************************************************
+ @name		DBGDrivWrite2
+ @brief		Copies data from a buffer into selected (expandable) stream.
+ @param		psStream - stream for output
+ @param		pui8InBuf - input buffer
+ @param		ui32InBuffSize - size of input
+ @return	bytes copied, 0 if recoverable error, -1 if unrecoverable error
+*****************************************************************************/
+IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize)
+{
+
+	/*
+		Validate buffer.
+	*/
+	if (!StreamValidForWrite(psStream))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DBGDrivWrite2: stream not valid"));
+		return(0xFFFFFFFFUL);
+	}
+
+	PVR_DPF((PVR_DBGDRIV_MESSAGE, "Recv(exp) %d b for %s: Roff = %x, WOff = %x",
+			ui32InBuffSize,
+			psStream->szName,
+			psStream->ui32RPtr,
+			psStream->ui32WPtr));
+
+	return( WriteExpandingBuffer(psStream, pui8InBuf, ui32InBuffSize) );
+}
+
+/*!****************************************************************************
+ @name		DBGDrivRead
+ @brief		Read from debug driver buffers
+ @param		psMainStream - stream
+ @param		ui32BufID - on of the DEBUG_READ_BUFID flags to indicate which buffer
+ @param		ui32OutBuffSize - available space in client buffer
+ @param		pui8OutBuf - output buffer
+ @return	bytes read, 0 if failure occurred
+*****************************************************************************/
+IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psMainStream, IMG_UINT32 ui32BufID, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf)
+{
+	IMG_UINT32 ui32Data;
+	DBG_STREAM *psStream;
+
+	/*
+		Validate buffer.
+	*/
+	if (!StreamValidForRead(psMainStream))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DBGDrivRead: buffer %p is invalid", psMainStream));
+		return(0);
+	}
+
+	if(ui32BufID == DEBUG_READ_BUFID_INIT)
+	{
+		psStream = psMainStream->psInitStream;
+	}
+	else if (ui32BufID == DEBUG_READ_BUFID_DEINIT)
+	{
+		psStream = psMainStream->psDeinitStream;
+	}
+	else
+	{
+		psStream = psMainStream;
+	}
+
+	/* Don't read beyond the init phase marker point */
+	if (psStream->ui32RPtr == psStream->ui32WPtr ||
+		((psStream->ui32InitPhaseWOff > 0) &&
+		 (psStream->ui32RPtr >= psStream->ui32InitPhaseWOff)) )
+	{
+		return(0);
+	}
+
+	/*
+		Get amount of data in buffer.
+	*/
+	if (psStream->ui32RPtr <= psStream->ui32WPtr)
+	{
+		ui32Data = psStream->ui32WPtr - psStream->ui32RPtr;
+	}
+	else
+	{
+		ui32Data = psStream->ui32WPtr + (psStream->ui32Size - psStream->ui32RPtr);
+	}
+
+	/*
+		Don't read beyond the init phase marker point
+	*/
+	if ((psStream->ui32InitPhaseWOff > 0) &&
+		(psStream->ui32InitPhaseWOff < psStream->ui32WPtr))
+	{
+		ui32Data = psStream->ui32InitPhaseWOff - psStream->ui32RPtr;
+	}
+
+	/*
+		Only transfer what target buffer can handle.
+	*/
+	if (ui32Data > ui32OutBuffSize)
+	{
+		ui32Data = ui32OutBuffSize;
+	}
+
+	PVR_DPF((PVR_DBGDRIV_MESSAGE, "Send %x b from %s: Roff = %x, WOff = %x",
+			ui32Data,
+			psStream->szName,
+			psStream->ui32RPtr,
+			psStream->ui32WPtr));
+
+	/*
+		Split copy into two bits or one depending on W/R position.
+	*/
+	if ((psStream->ui32RPtr + ui32Data) > psStream->ui32Size)
+	{	/* Calc block 1 and block 2 sizes */
+		IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32RPtr;
+		IMG_UINT32 ui32B2 = ui32Data - ui32B1;
+
+		/* Copy up to end of circular buffer */
+		HostMemCopy((void *) pui8OutBuf,
+				(void *)((uintptr_t)psStream->pvBase + psStream->ui32RPtr),
+				ui32B1);
+
+		/* Copy from start of circular buffer */
+		HostMemCopy((void *)(pui8OutBuf + ui32B1),
+				psStream->pvBase,
+				ui32B2);
+
+		/* Update read pointer now that we've copied the data out */
+		psStream->ui32RPtr = ui32B2;
+	}
+	else
+	{	/* Copy data from wherever */
+		HostMemCopy((void *) pui8OutBuf,
+				(void *)((uintptr_t)psStream->pvBase + psStream->ui32RPtr),
+				ui32Data);
+
+		/* Update read pointer now that we've copied the data out */
+		psStream->ui32RPtr += ui32Data;
+
+		/* Check for wrapping */
+		if ((psStream->ui32RPtr != psStream->ui32WPtr) &&
+			(psStream->ui32RPtr >= psStream->ui32Size))
+		{
+			psStream->ui32RPtr = 0;
+		}
+	}
+
+	return(ui32Data);
+}
+
+/*!****************************************************************************
+ @name		DBGDrivSetMarker
+ @brief		Sets the marker in the stream to split output files
+ @param		psStream, ui32Marker
+ @return	nothing
+*****************************************************************************/
+void IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
+{
+	/*
+		Validate buffer
+	*/
+	if (!StreamValid(psStream))
+	{
+		return;
+	}
+
+	/* Called by PDump client to reset the marker to zero after a file split */
+	if ((ui32Marker == 0) && (psStream->ui32Marker == 0))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DBGDrivSetMarker: Client resetting marker that is already zero!"));
+	}
+	/* Called by pvrsrvkm to set the marker to signal a file split is required */
+	if ((ui32Marker != 0) && (psStream->ui32Marker != 0))
+	{
+		/* In this case a previous split request is still outstanding. The
+		 * client has not yet actioned and acknowledged the previous
+		 * marker. This may be an error if the client does not catch-up and
+		 * the stream's written data is allowed to pass the max file
+		 * size again. If this happens the PDump is invalid as the offsets
+		 * from the script file will be incorrect.
+		 */
+		PVR_DPF((PVR_DBG_ERROR, "DBGDrivSetMarker: Server setting marker that is already set!"));
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "DBGDrivSetMarker: Setting stream split marker to %d (was %d)", ui32Marker, psStream->ui32Marker));
+	}
+
+	psStream->ui32Marker = ui32Marker;
+}
+
+/*!****************************************************************************
+ @name		DBGDrivGetMarker
+ @brief 	Gets the marker in the stream to split output files
+ @param	 	psStream - stream
+ @return	marker offset
+*****************************************************************************/
+IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream)
+{
+	/*
+		Validate buffer
+	*/
+	if (!StreamValid(psStream))
+	{
+		return 0;
+	}
+
+	return psStream->ui32Marker;
+}
+
+/*!****************************************************************************
+ @name		DBGDrivGetServiceTable
+ @brief		get jump table for Services driver
+ @return	pointer to jump table
+*****************************************************************************/
+void * IMG_CALLCONV DBGDrivGetServiceTable(void)
+{
+	return &g_sDBGKMServices;
+}
+
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+/*!****************************************************************************
+ @name		DBGDrivWaitForEvent
+ @brief		waits for an event
+ @param		eEvent - debug driver event
+ @return	void
+*****************************************************************************/
+void IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent)
+{
+	HostWaitForEvent(eEvent);
+}
+#endif
+
+/*!****************************************************************************
+ @name		DBGDrivGetCtrlState
+ @brief		Gets a state value from the debug driver or stream
+ @param		psStream - stream
+ @param		ui32StateID - state ID
+ @return	Nothing
+*****************************************************************************/
+IMG_UINT32 IMG_CALLCONV DBGDrivGetCtrlState(PDBG_STREAM psStream, IMG_UINT32 ui32StateID)
+{
+	/* Validate buffer */
+	if (!StreamValid(psStream))
+	{
+		return (0xFFFFFFFF);
+	}
+
+	/* Retrieve the state asked for */
+	switch (ui32StateID)
+	{
+	case DBG_GET_STATE_FLAG_IS_READONLY:
+		return ((psStream->ui32Flags & DEBUG_FLAGS_READONLY) != 0);
+
+	case 0xFE: /* Dump the current stream state */
+		PVR_DPF((PVR_DBG_CALLTRACE,
+				 "------ PDUMP DBGDriv: psStream( %p ) ( -- %s -- ) ui32Flags( %x )",
+				 psStream, psStream->szName, psStream->ui32Flags));
+		PVR_DPF((PVR_DBG_CALLTRACE,
+				 "------ PDUMP DBGDriv: psStream->pvBase( %p ) psStream->ui32Size( %u )",
+				 psStream->pvBase, psStream->ui32Size));
+		PVR_DPF((PVR_DBG_CALLTRACE,
+				 "------ PDUMP DBGDriv: psStream->ui32RPtr( %u ) psStream->ui32WPtr( %u )",
+				 psStream->ui32RPtr, psStream->ui32WPtr));
+		PVR_DPF((PVR_DBG_CALLTRACE,
+				 "------ PDUMP DBGDriv: psStream->ui32Marker( %u ) psStream->ui32InitPhaseWOff( %u )",
+				 psStream->ui32Marker, psStream->ui32InitPhaseWOff));
+		if (psStream->psInitStream)
+		{
+			PVR_DPF((PVR_DBG_CALLTRACE,
+					 "-------- PDUMP DBGDriv: psInitStream( %p ) ( -- %s -- ) ui32Flags( %x )",
+					 psStream->psInitStream, psStream->psInitStream->szName, psStream->ui32Flags));
+			PVR_DPF((PVR_DBG_CALLTRACE,
+					 "-------- PDUMP DBGDriv: psInitStream->pvBase( %p ) psInitStream->ui32Size( %u )",
+					 psStream->psInitStream->pvBase, psStream->psInitStream->ui32Size));
+			PVR_DPF((PVR_DBG_CALLTRACE,
+					 "-------- PDUMP DBGDriv: psInitStream->ui32RPtr( %u ) psInitStream->ui32WPtr( %u )",
+					 psStream->psInitStream->ui32RPtr, psStream->psInitStream->ui32WPtr));
+			PVR_DPF((PVR_DBG_CALLTRACE,
+					 "-------- PDUMP DBGDriv: psInitStream->ui32Marker( %u ) psInitStream->ui32InitPhaseWOff( %u ) ",
+					 psStream->psInitStream->ui32Marker, psStream->psInitStream->ui32InitPhaseWOff));
+		}
+
+		break;
+
+	case 0xFF: /* Dump driver state not in a stream */
+		{
+			PVR_DPF((PVR_DBG_CALLTRACE,
+					 "------ PDUMP DBGDriv: g_psStreamList( head %p ) g_pvAPIMutex( %p ) g_PDumpCurrentFrameNo( %u )",
+					 g_psStreamList, g_pvAPIMutex, g_PDumpCurrentFrameNo));
+		}
+		break;
+
+	default:
+		PVR_ASSERT(0);
+	}
+
+	return (0xFFFFFFFF);
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(void)
+{
+	return g_PDumpCurrentFrameNo;
+}
+
+void IMG_CALLCONV DBGDrivSetFrame(IMG_UINT32 ui32Frame)
+{
+	g_PDumpCurrentFrameNo = ui32Frame;
+}
+
+
+/*!****************************************************************************
+ @name		ExpandStreamBuffer
+ @brief		allocates a new buffer when the current one is full
+ @param		psStream - stream
+ @param		ui32NewSize - new size
+ @return	IMG_TRUE - if allocation succeeded, IMG_FALSE - if not
+*****************************************************************************/
+static IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize)
+{
+	void *	pvNewBuf;
+	IMG_UINT32	ui32NewSizeInPages;
+	IMG_UINT32	ui32NewWOffset;
+	IMG_UINT32	ui32NewROffset;
+	IMG_UINT32	ui32SpaceInOldBuf;
+
+	/*
+		First check new size is bigger than existing size
+	*/
+	if (psStream->ui32Size >= ui32NewSize)
+	{
+		return IMG_FALSE;
+	}
+
+	/*
+		Calc space in old buffer
+	*/
+	ui32SpaceInOldBuf = SpaceInStream(psStream);
+
+	/*
+		Allocate new buffer
+	*/
+	ui32NewSizeInPages = ((ui32NewSize + 0xfffUL) & ~0xfffUL) / HOST_PAGESIZE;
+
+	if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+	{
+		pvNewBuf = HostNonPageablePageAlloc(ui32NewSizeInPages);
+	}
+	else
+	{
+		pvNewBuf = HostPageablePageAlloc(ui32NewSizeInPages);
+	}
+
+	if (pvNewBuf == NULL)
+	{
+		return IMG_FALSE;
+	}
+
+	if ((psStream->ui32Flags & DEBUG_FLAGS_CIRCULAR) != 0)
+	{
+		/*
+			Copy over old buffer to new one, we place data at start of buffer
+			even if Read offset is not at start of buffer
+		*/
+		if (psStream->ui32RPtr <= psStream->ui32WPtr)
+		{
+			/*
+				No wrapping of data so copy data to start of new buffer
+			*/
+		HostMemCopy(pvNewBuf,
+					(void *)((uintptr_t)psStream->pvBase + psStream->ui32RPtr),
+					psStream->ui32WPtr - psStream->ui32RPtr);
+		}
+		else
+		{
+			IMG_UINT32	ui32FirstCopySize;
+
+			/*
+				The data has wrapped around the buffer, copy beginning of buffer first
+			*/
+			ui32FirstCopySize = psStream->ui32Size - psStream->ui32RPtr;
+
+			HostMemCopy(pvNewBuf,
+					(void *)((uintptr_t)psStream->pvBase + psStream->ui32RPtr),
+					ui32FirstCopySize);
+
+			/*
+				Now second half
+			*/
+			HostMemCopy((void *)((uintptr_t)pvNewBuf + ui32FirstCopySize),
+					(void *)(IMG_PBYTE)psStream->pvBase,
+					psStream->ui32WPtr);
+		}
+		ui32NewROffset = 0;
+	}
+	else
+	{
+		/* Copy everything in the old buffer to the new one */
+		HostMemCopy(pvNewBuf, psStream->pvBase,	psStream->ui32WPtr);
+		ui32NewROffset = psStream->ui32RPtr;
+	}
+
+	/*
+		New Write offset is at end of data
+	*/
+	ui32NewWOffset = psStream->ui32Size - ui32SpaceInOldBuf;
+
+	/*
+		Free old buffer
+	*/
+	if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+	{
+		HostNonPageablePageFree(psStream->pvBase);
+	}
+	else
+	{
+		HostPageablePageFree(psStream->pvBase);
+	}
+
+	/*
+		Now set new params up
+	*/
+	psStream->pvBase = pvNewBuf;
+	psStream->ui32RPtr = ui32NewROffset;
+	psStream->ui32WPtr = ui32NewWOffset;
+	psStream->ui32Size = ui32NewSizeInPages * HOST_PAGESIZE;
+
+	return IMG_TRUE;
+}
+
+/*!****************************************************************************
+ @name		SpaceInStream
+ @brief		remaining space in stream
+ @param		psStream - stream
+ @return	bytes remaining
+*****************************************************************************/
+static IMG_UINT32 SpaceInStream(PDBG_STREAM psStream)
+{
+	IMG_UINT32	ui32Space;
+
+	if ((psStream->ui32Flags & DEBUG_FLAGS_CIRCULAR) != 0)
+	{
+		/* Allow overwriting the buffer which was already read */
+		if (psStream->ui32RPtr > psStream->ui32WPtr)
+		{
+			ui32Space = psStream->ui32RPtr - psStream->ui32WPtr;
+		}
+		else
+		{
+			ui32Space = psStream->ui32RPtr + (psStream->ui32Size - psStream->ui32WPtr);
+		}
+	}
+	else
+	{
+		/* Don't overwrite anything */
+		ui32Space = psStream->ui32Size - psStream->ui32WPtr;
+	}
+
+	return ui32Space;
+}
+
+
+/*!****************************************************************************
+ @name		DestroyAllStreams
+ @brief		delete all streams in list
+ @return	none
+*****************************************************************************/
+void DestroyAllStreams(void)
+{
+	PDBG_STREAM psStream = g_psStreamList;
+	PDBG_STREAM psStreamToFree;
+
+	while (psStream != NULL)
+	{
+		psStreamToFree = psStream;
+		psStream = psStream->psNext;
+		DBGDrivDestroyStream(psStreamToFree->psInitStream, psStreamToFree, psStreamToFree->psDeinitStream);
+	}
+	g_psStreamList = NULL;
+	return;
+}
+
+/******************************************************************************
+ End of file (DBGDRIV.C)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/common/dbgdriv.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/common/dbgdriv.h
new file mode 100644
index 0000000..f310b42
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/common/dbgdriv.h
@@ -0,0 +1,120 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DBGDRIV_
+#define _DBGDRIV_
+
+/*****************************************************************************
+ The odd constant or two
+*****************************************************************************/
+
+#define DBGDRIV_VERSION 	0x100
+#define MAX_PROCESSES 		2
+#define BLOCK_USED			0x01
+#define BLOCK_LOCKED		0x02
+#define DBGDRIV_MONOBASE	0x000B0000
+
+
+/*****************************************************************************
+ * OS-specific declarations and init/cleanup functions
+*****************************************************************************/
+extern void *	g_pvAPIMutex;
+
+extern IMG_INT dbgdrv_init(void);
+extern void dbgdrv_cleanup(void);
+
+/*****************************************************************************
+ Internal debug driver core functions
+*****************************************************************************/
+/* Called by WDDM debug driver win7/hostfunc.c */
+IMG_BOOL IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR *pszName, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Pages,
+											IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit);
+
+/* Called by Linux debug driver main.c to allow the API mutex lock to be used
+ * to protect the common IOCTL read buffer while avoiding deadlock in the Ext
+ * layer
+ */
+IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psStream, IMG_UINT32 ui32BufID,
+									IMG_UINT32 ui32OutBufferSize,IMG_UINT8 *pui8OutBuf);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream);
+
+/* Used in ioctl.c in DBGDIOCDrivGetServiceTable() which is called in WDDM PDump files */
+void * IMG_CALLCONV DBGDrivGetServiceTable(void);
+
+/* Used in WDDM version of debug driver win7/main.c */
+void DestroyAllStreams(void);
+
+/*****************************************************************************
+ Function prototypes
+*****************************************************************************/
+IMG_UINT32 AtoI(IMG_CHAR *szIn);
+
+void HostMemSet(void *pvDest,IMG_UINT8 ui8Value,IMG_UINT32 ui32Size);
+void HostMemCopy(void *pvDest,void *pvSrc,IMG_UINT32 ui32Size);
+
+/*****************************************************************************
+ Secure handle Function prototypes
+*****************************************************************************/
+IMG_SID PStream2SID(PDBG_STREAM psStream);
+PDBG_STREAM SID2PStream(IMG_SID hStream); 
+IMG_BOOL AddSIDEntry(PDBG_STREAM psStream);
+IMG_BOOL RemoveSIDEntry(PDBG_STREAM psStream);
+
+/*****************************************************************************
+ Declarations for IOCTL Service table and KM table entry points
+*****************************************************************************/
+IMG_BOOL   IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR *pszName, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit);
+void   IMG_CALLCONV ExtDBGDrivDestroyStream(IMG_HANDLE hInit, IMG_HANDLE hMain, IMG_HANDLE hDeinit);
+void * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_UINT32 ui32BufID, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 *pui8OutBuf);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize);
+void   IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream);
+void   IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetCtrlState(PDBG_STREAM psStream, IMG_UINT32 ui32StateID);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(void);
+void   IMG_CALLCONV ExtDBGDrivSetFrame(IMG_UINT32 ui32Frame);
+
+#endif
+
+/*****************************************************************************
+ End of file (DBGDRIV.H)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/common/dbgdriv_handle.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/common/dbgdriv_handle.c
new file mode 100644
index 0000000..3388c40
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/common/dbgdriv_handle.c
@@ -0,0 +1,141 @@
+/*************************************************************************/ /*!
+@File
+@Title          Resource Handle Manager
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provide resource handle management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "img_defs.h"
+#include "dbgdrvif_srv5.h"
+#include "dbgdriv.h"
+
+/* max number of streams held in SID info table */
+#define MAX_SID_ENTRIES		8
+
+typedef struct _SID_INFO
+{
+	PDBG_STREAM	psStream;
+} SID_INFO, *PSID_INFO;
+
+static SID_INFO gaSID_Xlat_Table[MAX_SID_ENTRIES];
+
+IMG_SID PStream2SID(PDBG_STREAM psStream)
+{
+	if (psStream != (PDBG_STREAM)NULL)
+	{
+		IMG_INT32 iIdx;
+
+		for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++)
+		{
+			if (psStream == gaSID_Xlat_Table[iIdx].psStream)
+			{
+				/* idx is one based */
+				return (IMG_SID)iIdx+1;
+			}
+		}
+	}
+
+	return (IMG_SID)0;
+}
+
+
+PDBG_STREAM SID2PStream(IMG_SID hStream)
+{
+	/* changed to zero based */
+	IMG_INT32 iIdx = (IMG_INT32)hStream-1;
+
+	if (iIdx >= 0 && iIdx < MAX_SID_ENTRIES)
+	{
+		return gaSID_Xlat_Table[iIdx].psStream;
+	}
+	else
+	{
+    	return (PDBG_STREAM)NULL;
+    }
+}
+
+
+IMG_BOOL AddSIDEntry(PDBG_STREAM psStream)
+{
+	if (psStream != (PDBG_STREAM)NULL)
+	{
+		IMG_INT32 iIdx;
+
+		for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++)
+		{
+			if (psStream == gaSID_Xlat_Table[iIdx].psStream)
+			{
+				/* already created */
+				return IMG_TRUE;
+			}
+
+			if (gaSID_Xlat_Table[iIdx].psStream == (PDBG_STREAM)NULL)
+			{
+				/* free entry */
+				gaSID_Xlat_Table[iIdx].psStream = psStream;
+				return IMG_TRUE;
+			}
+		}
+	}
+
+	return IMG_FALSE;
+}
+
+IMG_BOOL RemoveSIDEntry(PDBG_STREAM psStream)
+{
+	if (psStream != (PDBG_STREAM)NULL)
+	{
+		IMG_INT32 iIdx;
+
+		for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++)
+		{
+			if (psStream == gaSID_Xlat_Table[iIdx].psStream)
+			{
+				gaSID_Xlat_Table[iIdx].psStream = (PDBG_STREAM)NULL;
+				return IMG_TRUE;
+			}
+		}
+	}
+
+	return IMG_FALSE;
+}
+
+
+/******************************************************************************
+ End of file (handle.c)
+******************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/common/dbgdriv_ioctl.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/common/dbgdriv_ioctl.h
new file mode 100644
index 0000000..0cc46eb
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/common/dbgdriv_ioctl.h
@@ -0,0 +1,58 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DBGDRIV_IOCTL_H_
+#define _DBGDRIV_IOCTL_H_
+
+#include "dbgdrvif_srv5.h"
+
+
+/* Share this debug driver global with the OS layer so that IOCTL calls
+ * coming from the OS enter the common table of entry points.
+ */
+extern IMG_UINT32 (*g_DBGDrivProc[DEBUG_SERVICE_MAX_API])(void *, void *, IMG_BOOL);
+
+
+#endif /* _DBGDRIV_IOCTL_H_ */
+
+/*****************************************************************************
+ End of file
+ *****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/common/hostfunc.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/common/hostfunc.h
new file mode 100644
index 0000000..b677cfd
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/common/hostfunc.h
@@ -0,0 +1,105 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _HOSTFUNC_
+#define _HOSTFUNC_
+
+/*****************************************************************************
+ Defines
+*****************************************************************************/
+#define HOST_PAGESIZE			(4096UL)
+#define DBG_MEMORY_INITIALIZER	(0xe2)
+
+/*****************************************************************************
+ Function prototypes
+*****************************************************************************/
+IMG_UINT32 HostReadRegistryDWORDFromString(IMG_CHAR *pcKey, IMG_CHAR *pcValueName, IMG_UINT32 *pui32Data);
+
+void * HostPageablePageAlloc(IMG_UINT32 ui32Pages);
+void HostPageablePageFree(void * pvBase);
+void * HostNonPageablePageAlloc(IMG_UINT32 ui32Pages);
+void HostNonPageablePageFree(void * pvBase);
+
+void * HostMapKrnBufIntoUser(void * pvKrnAddr, IMG_UINT32 ui32Size, void * *ppvMdl);
+void HostUnMapKrnBufFromUser(void * pvUserAddr, void * pvMdl, void * pvProcess);
+
+void HostCreateRegDeclStreams(void);
+
+/* Direct macros for Linux to avoid LockDep false-positives from occurring */
+#if defined(LINUX) && defined(__KERNEL__)
+
+#undef HOST_PAGESIZE
+#define HOST_PAGESIZE (PAGE_SIZE)
+
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#define HostCreateMutex(void) ({ \
+	struct mutex* pMutex = NULL; \
+	pMutex = kmalloc(sizeof(struct mutex), GFP_KERNEL); \
+	if (pMutex) { mutex_init(pMutex); }; \
+	pMutex;})
+#define HostDestroyMutex(hLock) ({mutex_destroy((hLock)); kfree((hLock)); PVRSRV_OK;})
+
+#define HostAquireMutex(hLock) ({mutex_lock((hLock)); PVRSRV_OK;})
+#define HostReleaseMutex(hLock) ({mutex_unlock((hLock)); PVRSRV_OK;})
+
+#else /* defined(LINUX) && defined(__KERNEL__) */
+
+void * HostCreateMutex(void);
+void HostAquireMutex(void * pvMutex);
+void HostReleaseMutex(void * pvMutex);
+void HostDestroyMutex(void * pvMutex);
+
+#endif
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+IMG_INT32 HostCreateEventObjects(void);
+void HostWaitForEvent(DBG_EVENT eEvent);
+void HostSignalEvent(DBG_EVENT eEvent);
+void HostDestroyEventObjects(void);
+#endif	/*defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
+
+#endif
+
+/*****************************************************************************
+ End of file (HOSTFUNC.H)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/common/ioctl.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/common/ioctl.c
new file mode 100644
index 0000000..85d1fa0
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/common/ioctl.c
@@ -0,0 +1,315 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(_WIN32)
+#pragma  warning(disable:4201)
+#pragma  warning(disable:4214)
+#pragma  warning(disable:4115)
+#pragma  warning(disable:4514)
+
+#include <ntddk.h>
+#include <windef.h>
+
+#endif /* _WIN32 */
+
+#ifdef LINUX
+#include <asm/uaccess.h>
+#include "pvr_uaccess.h"
+#endif /* LINUX */
+
+#include "img_types.h"
+#include "dbgdrvif_srv5.h"
+#include "dbgdriv.h"
+#include "dbgdriv_ioctl.h"
+#include "hostfunc.h"
+
+#ifdef _WIN32
+#pragma  warning(default:4214)
+#pragma  warning(default:4115)
+#endif /* _WIN32 */
+
+/*****************************************************************************
+ Code
+*****************************************************************************/
+
+/*****************************************************************************
+ FUNCTION	:	DBGDIOCDrivGetServiceTable
+
+ PURPOSE	:
+
+ PARAMETERS	:
+
+ RETURNS	:
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivGetServiceTable(void *pvInBuffer, void *pvOutBuffer, IMG_BOOL bCompat)
+{
+	void **ppvOut;
+
+	PVR_UNREFERENCED_PARAMETER(pvInBuffer);
+	PVR_UNREFERENCED_PARAMETER(bCompat);
+	ppvOut = (void **) pvOutBuffer;
+
+	*ppvOut = DBGDrivGetServiceTable();
+
+	return IMG_TRUE;
+}
+
+#if defined(__QNXNTO__)
+/*****************************************************************************
+ FUNCTION	:	DBGDIOCDrivCreateStream
+
+ PURPOSE	:
+
+ PARAMETERS	:
+
+ RETURNS	:
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivCreateStream(void *pvInBuffer, void *pvOutBuffer, IMG_BOOL bCompat)
+{
+	PDBG_IN_CREATESTREAM psIn;
+	PDBG_OUT_CREATESTREAM psOut;
+
+	PVR_UNREFERENCED_PARAMETER(bCompat);
+
+	psIn = (PDBG_IN_CREATESTREAM) pvInBuffer;
+	psOut = (PDBG_OUT_CREATESTREAM) pvOutBuffer;
+
+	return ExtDBGDrivCreateStream(psIn->u.pszName, DEBUG_FLAGS_NO_BUF_EXPANDSION, psIn->ui32Pages, &psOut->phInit, &psOut->phMain, &psOut->phDeinit);
+}
+#endif
+
+/*****************************************************************************
+ FUNCTION	:	DBGDIOCDrivGetStream
+
+ PURPOSE	:
+
+ PARAMETERS	:
+
+ RETURNS	:
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivGetStream(void *pvInBuffer, void *pvOutBuffer, IMG_BOOL bCompat)
+{
+	PDBG_IN_FINDSTREAM psParams;
+	IMG_SID *phStream;
+
+	PVR_UNREFERENCED_PARAMETER(bCompat);
+
+	psParams	= (PDBG_IN_FINDSTREAM)pvInBuffer;
+	phStream	= (IMG_SID *)pvOutBuffer;
+
+	/* Ensure that the name will be NULL terminated */
+	psParams->pszName[DEBUG_STREAM_NAME_MAX-1] = '\0';
+
+	*phStream = PStream2SID(ExtDBGDrivFindStream(psParams->pszName, psParams->bResetStream));
+
+	return IMG_TRUE;
+}
+
+/*****************************************************************************
+ FUNCTION	:	DBGDIOCDrivRead
+
+ PURPOSE	:
+
+ PARAMETERS	:
+
+ RETURNS	:
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivRead(void *pvInBuffer, void *pvOutBuffer, IMG_BOOL bCompat)
+{
+	IMG_UINT32   *pui32BytesCopied;
+	PDBG_IN_READ  psInParams;
+	PDBG_STREAM   psStream;
+	IMG_UINT8    *pui8ReadBuffer;
+
+	PVR_UNREFERENCED_PARAMETER(bCompat);
+
+	psInParams = (PDBG_IN_READ) pvInBuffer;
+	pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
+	pui8ReadBuffer = WIDEPTR_GET_PTR(psInParams->pui8OutBuffer, bCompat);
+
+	psStream = SID2PStream(psInParams->hStream);
+
+	if (psStream != (PDBG_STREAM)NULL)
+	{
+		*pui32BytesCopied = ExtDBGDrivRead(psStream,
+									   psInParams->ui32BufID,
+									   psInParams->ui32OutBufferSize,
+									   pui8ReadBuffer);
+		return IMG_TRUE;
+	}
+	else
+	{
+		/* invalid SID */
+		*pui32BytesCopied = 0;
+		return IMG_FALSE;
+	}
+}
+
+/*****************************************************************************
+ FUNCTION	: DBGDIOCDrivSetMarker
+
+ PURPOSE	: Sets the marker in the stream to split output files
+
+ PARAMETERS	: pvInBuffer, pvOutBuffer
+
+ RETURNS	: success
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivSetMarker(void *pvInBuffer, void *pvOutBuffer, IMG_BOOL bCompat)
+{
+	PDBG_IN_SETMARKER	psParams;
+	PDBG_STREAM			psStream;
+
+	psParams = (PDBG_IN_SETMARKER) pvInBuffer;
+	PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+	PVR_UNREFERENCED_PARAMETER(bCompat);
+
+	psStream = SID2PStream(psParams->hStream);
+	if (psStream != (PDBG_STREAM)NULL)
+	{
+		ExtDBGDrivSetMarker(psStream, psParams->ui32Marker);
+		return IMG_TRUE;
+	}
+	else
+	{
+		/* invalid SID */
+		return IMG_FALSE;
+	}
+}
+
+/*****************************************************************************
+ FUNCTION	: DBGDIOCDrivGetMarker
+
+ PURPOSE	: Gets the marker in the stream to split output files
+
+ PARAMETERS	: pvInBuffer, pvOutBuffer
+
+ RETURNS	: success
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivGetMarker(void *pvInBuffer, void *pvOutBuffer, IMG_BOOL bCompat)
+{
+	PDBG_STREAM  psStream;
+	IMG_UINT32  *pui32Current;
+
+	PVR_UNREFERENCED_PARAMETER(bCompat);
+
+	pui32Current = (IMG_UINT32 *) pvOutBuffer;
+
+	psStream = SID2PStream(*(IMG_SID *)pvInBuffer);
+	if (psStream != (PDBG_STREAM)NULL)
+	{
+		*pui32Current = ExtDBGDrivGetMarker(psStream);
+		return IMG_TRUE;
+	}
+	else
+	{
+		/* invalid SID */
+		*pui32Current = 0;
+		return IMG_FALSE;
+	}
+}
+
+
+/*****************************************************************************
+ FUNCTION	:	DBGDIOCDrivWaitForEvent
+
+ PURPOSE	:
+
+ PARAMETERS	:
+
+ RETURNS	:
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivWaitForEvent(void *pvInBuffer, void *pvOutBuffer, IMG_BOOL bCompat)
+{
+	DBG_EVENT eEvent = (DBG_EVENT)(*(IMG_UINT32 *)pvInBuffer);
+
+	PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+	PVR_UNREFERENCED_PARAMETER(bCompat);
+
+	ExtDBGDrivWaitForEvent(eEvent);
+
+	return IMG_TRUE;
+}
+
+
+/*****************************************************************************
+ FUNCTION	: DBGDIOCDrivGetFrame
+
+ PURPOSE	: Gets the marker in the stream to split output files
+
+ PARAMETERS	: pvInBuffer, pvOutBuffer
+
+ RETURNS	: success
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivGetFrame(void *pvInBuffer, void *pvOutBuffer, IMG_BOOL bCompat)
+{
+	IMG_UINT32  *pui32Current;
+
+	PVR_UNREFERENCED_PARAMETER(pvInBuffer);
+	PVR_UNREFERENCED_PARAMETER(bCompat);
+
+	pui32Current = (IMG_UINT32 *) pvOutBuffer;
+
+	*pui32Current = ExtDBGDrivGetFrame();
+
+	return IMG_TRUE;
+}
+
+/*
+	ioctl interface jump table.
+	Accessed from the UM debug driver client
+*/
+IMG_UINT32 (*g_DBGDrivProc[DEBUG_SERVICE_MAX_API])(void *, void *, IMG_BOOL) =
+{
+	DBGDIOCDrivGetServiceTable, /* WDDM only for KMD to retrieve address from DBGDRV, Not used by umdbgdrvlnx */
+	DBGDIOCDrivGetStream,
+	DBGDIOCDrivRead,
+	DBGDIOCDrivSetMarker,
+	DBGDIOCDrivGetMarker,
+	DBGDIOCDrivWaitForEvent,
+	DBGDIOCDrivGetFrame,
+#if defined(__QNXNTO__)
+	DBGDIOCDrivCreateStream
+#endif
+};
+
+/*****************************************************************************
+ End of file (ioctl.c)
+*****************************************************************************/
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/linux/hostfunc.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/linux/hostfunc.c
new file mode 100644
index 0000000..b0bf414
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/linux/hostfunc.c
@@ -0,0 +1,216 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debug driver file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <asm/page.h>
+#include <linux/vmalloc.h>
+#include <linux/mutex.h>
+#include <linux/hardirq.h>
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#endif	/* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
+
+#include "img_types.h"
+#include "pvr_debug.h"
+
+#include "dbgdrvif_srv5.h"
+#include "hostfunc.h"
+#include "dbgdriv.h"
+
+
+/*!
+******************************************************************************
+
+ @Function	HostMemSet
+
+ @Description Function that does the same as the C memset() functions
+
+ @Modified *pvDest :	pointer to start of buffer to be set
+
+ @Input    ui8Value:	value to set each byte to
+
+ @Input    ui32Size :	number of bytes to set
+
+ @Return   void
+
+******************************************************************************/
+void HostMemSet(void *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size)
+{
+	memset(pvDest, (int) ui8Value, (size_t) ui32Size);
+}
+
+/*!
+******************************************************************************
+
+ @Function		HostMemCopy
+
+ @Description	Copies memory around
+
+ @Input    pvDst - pointer to dst
+ @Output   pvSrc - pointer to src
+ @Input    ui32Size - bytes to copy
+
+ @Return  none
+
+******************************************************************************/
+void HostMemCopy(void *pvDst, void *pvSrc, IMG_UINT32 ui32Size)
+{
+#if defined(USE_UNOPTIMISED_MEMCPY)
+    unsigned char *src,*dst;
+    int i;
+
+    src=(unsigned char *)pvSrc;
+    dst=(unsigned char *)pvDst;
+    for(i=0;i<ui32Size;i++)
+    {
+        dst[i]=src[i];
+    }
+#else
+    memcpy(pvDst, pvSrc, ui32Size);
+#endif
+}
+
+IMG_UINT32 HostReadRegistryDWORDFromString(char *pcKey, char *pcValueName, IMG_UINT32 *pui32Data)
+{
+    /* XXX Not yet implemented */
+	return 0;
+}
+
+void * HostPageablePageAlloc(IMG_UINT32 ui32Pages)
+{
+    return (void*)vmalloc(ui32Pages * PAGE_SIZE);/*, GFP_KERNEL);*/
+}
+
+void HostPageablePageFree(void * pvBase)
+{
+    vfree(pvBase);
+}
+
+void * HostNonPageablePageAlloc(IMG_UINT32 ui32Pages)
+{
+    return (void*)vmalloc(ui32Pages * PAGE_SIZE);/*, GFP_KERNEL);*/
+}
+
+void HostNonPageablePageFree(void * pvBase)
+{
+    vfree(pvBase);
+}
+
+void * HostMapKrnBufIntoUser(void * pvKrnAddr, IMG_UINT32 ui32Size, void **ppvMdl)
+{
+    /* XXX Not yet implemented */
+	return NULL;
+}
+
+void HostUnMapKrnBufFromUser(void * pvUserAddr, void * pvMdl, void * pvProcess)
+{
+    /* XXX Not yet implemented */
+}
+
+void HostCreateRegDeclStreams(void)
+{
+    /* XXX Not yet implemented */
+}
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+
+#define	EVENT_WAIT_TIMEOUT_MS	500
+#define	EVENT_WAIT_TIMEOUT_JIFFIES	(EVENT_WAIT_TIMEOUT_MS * HZ / 1000)
+
+static int iStreamData;
+static wait_queue_head_t sStreamDataEvent;
+
+IMG_INT32 HostCreateEventObjects(void)
+{
+	init_waitqueue_head(&sStreamDataEvent);
+
+	return 0;
+}
+
+void HostWaitForEvent(DBG_EVENT eEvent)
+{
+	switch(eEvent)
+	{
+		case DBG_EVENT_STREAM_DATA:
+			/*
+			 * More than one process may be woken up.
+			 * Any process that wakes up should consume
+			 * all the data from the streams.
+			 */
+			wait_event_interruptible_timeout(sStreamDataEvent, iStreamData != 0, EVENT_WAIT_TIMEOUT_JIFFIES);
+			iStreamData = 0;
+			break;
+		default:
+			/*
+			 * For unknown events, enter an interruptible sleep.
+			 */
+			msleep_interruptible(EVENT_WAIT_TIMEOUT_MS);
+			break;
+	}
+}
+
+void HostSignalEvent(DBG_EVENT eEvent)
+{
+	switch(eEvent)
+	{
+		case DBG_EVENT_STREAM_DATA:
+			iStreamData = 1;
+			wake_up_interruptible(&sStreamDataEvent);
+			break;
+		default:
+			break;
+	}
+}
+
+void HostDestroyEventObjects(void)
+{
+}
+#endif	/* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/linux/main.c b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/linux/main.c
new file mode 100644
index 0000000..f008db4
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/dbgdriv/linux/main.c
@@ -0,0 +1,249 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debug driver main file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/kdev_t.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <asm/uaccess.h>
+#include <drm/drmP.h>
+
+#include "img_types.h"
+#include "linuxsrv.h"
+#include "dbgdriv_ioctl.h"
+#include "dbgdrvif_srv5.h"
+#include "dbgdriv.h"
+#include "hostfunc.h"
+#include "pvr_debug.h"
+#include "pvrmodule.h"
+#include "pvr_uaccess.h"
+#include "pvr_drm.h"
+#include "pvr_drv.h"
+
+/* Outward temp buffer used by IOCTL handler allocated once and grows as needed.
+ * This optimisation means the debug driver performs less vmallocs/vfrees
+ * reducing the chance of kernel vmalloc space exhaustion.
+ * Singular out buffer for PDump UM reads is not multi-thread safe and so
+ * it now needs a mutex to protect it from multiple simultaneous reads in 
+ * the future.
+ */
+static IMG_CHAR*  g_outTmpBuf = NULL;
+static IMG_UINT32 g_outTmpBufSize = 64*PAGE_SIZE;
+static void*      g_pvOutTmpBufMutex = NULL;
+
+void DBGDrvGetServiceTable(void **fn_table);
+
+void DBGDrvGetServiceTable(void **fn_table)
+{
+	extern DBGKM_SERVICE_TABLE g_sDBGKMServices;
+
+	*fn_table = &g_sDBGKMServices;
+}
+
+void dbgdrv_cleanup(void)
+{
+	if (g_outTmpBuf)
+	{
+		vfree(g_outTmpBuf);
+		g_outTmpBuf = NULL;
+	}
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+	HostDestroyEventObjects();
+#endif
+	HostDestroyMutex(g_pvOutTmpBufMutex);
+	HostDestroyMutex(g_pvAPIMutex);
+	return;
+}
+
+IMG_INT dbgdrv_init(void)
+{
+	/* Init API mutex */
+	if ((g_pvAPIMutex=HostCreateMutex()) == NULL)
+	{
+		return -ENOMEM;
+	}
+
+	/* Init TmpBuf mutex */
+	if ((g_pvOutTmpBufMutex=HostCreateMutex()) == NULL)
+	{
+		return -ENOMEM;
+	}
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+	/*
+	 * The current implementation of HostCreateEventObjects on Linux
+	 * can never fail, so there is no need to check for error.
+	 */
+	(void) HostCreateEventObjects();
+#endif
+
+	return 0;
+}
+
+static IMG_INT dbgdrv_ioctl_work(void *arg, IMG_BOOL bCompat)
+{
+	struct drm_pvr_dbgdrv_cmd *psDbgdrvCmd = (struct drm_pvr_dbgdrv_cmd *) arg;
+	char *buffer, *in, *out;
+	unsigned int cmd;
+	void *pBufferIn, *pBufferOut;
+
+	if (psDbgdrvCmd->pad)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Invalid pad value\n"));
+		return -EINVAL;
+	}
+
+	if ((psDbgdrvCmd->in_data_size > (PAGE_SIZE >> 1)) ||
+		(psDbgdrvCmd->out_data_size > (PAGE_SIZE >> 1)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Sizes of the buffers are too large, cannot do ioctl\n"));
+		return -1;
+	}
+
+	buffer = (char *) HostPageablePageAlloc(1);
+	if (!buffer)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to allocate buffer, cannot do ioctl\n"));
+		return -EFAULT;
+	}
+
+	in = buffer;
+	out = buffer + (PAGE_SIZE >>1);
+
+	pBufferIn = (void *)(uintptr_t) psDbgdrvCmd->in_data_ptr;
+	pBufferOut = (void *)(uintptr_t) psDbgdrvCmd->out_data_ptr;
+
+	if (pvr_copy_from_user(in, pBufferIn, psDbgdrvCmd->in_data_size) != 0)
+	{
+		goto init_failed;
+	}
+
+	/* Extra -1 because ioctls start at DEBUG_SERVICE_IOCTL_BASE + 1 */
+	cmd = MAKEIOCTLINDEX(psDbgdrvCmd->cmd) - DEBUG_SERVICE_IOCTL_BASE - 1;
+
+	if (psDbgdrvCmd->cmd == DEBUG_SERVICE_READ)
+	{
+		IMG_UINT32 *pui32BytesCopied = (IMG_UINT32 *)out;
+		DBG_OUT_READ *psReadOutParams = (DBG_OUT_READ *)out;
+		DBG_IN_READ *psReadInParams = (DBG_IN_READ *)in;
+		void *pvOutBuffer;
+		PDBG_STREAM psStream;
+
+		psStream = SID2PStream(psReadInParams->hStream);
+		if (!psStream)
+		{
+			goto init_failed;
+		}
+
+		/* Serialise IOCTL Read op access to the singular output buffer */
+		HostAquireMutex(g_pvOutTmpBufMutex);
+
+		if ((g_outTmpBuf == NULL) || (psReadInParams->ui32OutBufferSize > g_outTmpBufSize))
+		{
+			if (psReadInParams->ui32OutBufferSize > g_outTmpBufSize)
+			{
+				g_outTmpBufSize = psReadInParams->ui32OutBufferSize;
+			}
+			g_outTmpBuf = vmalloc(g_outTmpBufSize);
+			if (!g_outTmpBuf)
+			{
+				HostReleaseMutex(g_pvOutTmpBufMutex);
+				goto init_failed;
+			}
+		}
+
+		/* Ensure only one thread is allowed into the DBGDriv core at a time */
+		HostAquireMutex(g_pvAPIMutex);
+
+		psReadOutParams->ui32DataRead = DBGDrivRead(psStream,
+										   psReadInParams->ui32BufID,
+										   psReadInParams->ui32OutBufferSize,
+										   g_outTmpBuf);
+		psReadOutParams->ui32SplitMarker = DBGDrivGetMarker(psStream);
+
+		HostReleaseMutex(g_pvAPIMutex);
+
+		pvOutBuffer = WIDEPTR_GET_PTR(psReadInParams->pui8OutBuffer, bCompat);
+
+		if (pvr_copy_to_user(pvOutBuffer,
+						g_outTmpBuf,
+						*pui32BytesCopied) != 0)
+		{
+			HostReleaseMutex(g_pvOutTmpBufMutex);
+			goto init_failed;
+		}
+
+		HostReleaseMutex(g_pvOutTmpBufMutex);
+
+	}
+	else
+	{
+		(g_DBGDrivProc[cmd])(in, out, bCompat);
+	}
+
+	if (copy_to_user(pBufferOut, out, psDbgdrvCmd->out_data_size) != 0)
+	{
+		goto init_failed;
+	}
+
+	HostPageablePageFree((void *)buffer);
+	return 0;
+
+init_failed:
+	HostPageablePageFree((void *)buffer);
+	return -EFAULT;
+}
+
+int dbgdrv_ioctl(struct drm_device *dev, void *arg, struct drm_file *pFile)
+{
+	return dbgdrv_ioctl_work((void *) arg, IMG_FALSE);
+}
+
+int dbgdrv_ioctl_compat(struct file *file, unsigned int ioctlCmd, unsigned long arg)
+{
+	return dbgdrv_ioctl_work((void *) arg, IMG_TRUE);
+}
diff --git a/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/include/linuxsrv.h b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/include/linuxsrv.h
new file mode 100644
index 0000000..7023870
--- /dev/null
+++ b/drivers/misc/mediatek/gpu/gpu_rgx/m1.9ED4917962/tools/services/debug/include/linuxsrv.h
@@ -0,0 +1,56 @@
+/*************************************************************************/ /*!
+@File
+@Title          Module defs for pvr core drivers
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _LINUXSRV_H__
+#define _LINUXSRV_H__
+
+#include "dbgdrvif_srv5.h"
+
+IMG_UINT32 DeviceIoControl(IMG_UINT32 hDevice,		
+						IMG_UINT32 ui32ControlCode, 
+						void *pInBuffer,
+						IMG_UINT32 ui32InBufferSize,
+						void *pOutBuffer,
+						IMG_UINT32 ui32OutBufferSize,  
+						IMG_UINT32 *pui32BytesReturned); 
+
+#endif /* _LINUXSRV_H__*/
diff --git a/drivers/misc/mediatek/include/mt-plat/sync_write.h b/drivers/misc/mediatek/include/mt-plat/sync_write.h
new file mode 100644
index 0000000..5d6ff6c
--- /dev/null
+++ b/drivers/misc/mediatek/include/mt-plat/sync_write.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MT_SYNC_WRITE_H
+#define _MT_SYNC_WRITE_H
+
+#if defined(__KERNEL__)
+
+#include <linux/io.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Define macros.
+ */
+#define mt_reg_sync_writel(v, a) \
+	do {    \
+		__raw_writel((v), (void __force __iomem *)((a)));   \
+		mb();  \
+	} while (0)
+
+#define mt_reg_sync_writew(v, a) \
+	do {    \
+		__raw_writew((v), (void __force __iomem *)((a)));   \
+		mb();  \
+	} while (0)
+
+#define mt_reg_sync_writeb(v, a) \
+	do {    \
+		__raw_writeb((v), (void __force __iomem *)((a)));   \
+		mb();  \
+	} while (0)
+
+#ifdef CONFIG_64BIT
+#define mt_reg_sync_writeq(v, a) \
+	do {    \
+		__raw_writeq((v), (void __force __iomem *)((a)));   \
+		mb();  \
+	} while (0)
+#endif
+
+#else				/* __KERNEL__ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+
+#define mt_reg_sync_writel(v, a)        mt65xx_reg_sync_writel(v, a)
+#define mt_reg_sync_writew(v, a)        mt65xx_reg_sync_writew(v, a)
+#define mt_reg_sync_writeb(v, a)        mt65xx_reg_sync_writeb(v, a)
+
+#define mb()   \
+	{    \
+		__asm__ __volatile__ ("dsb" : : : "memory"); \
+	}
+
+#define mt65xx_reg_sync_writel(v, a) \
+	do {    \
+		*(volatile unsigned int *)(a) = (v);    \
+		mb(); \
+	} while (0)
+
+#define mt65xx_reg_sync_writew(v, a) \
+	do {    \
+		*(volatile unsigned short *)(a) = (v);    \
+		mb(); \
+	} while (0)
+
+#define mt65xx_reg_sync_writeb(v, a) \
+	do {    \
+		*(volatile unsigned char *)(a) = (v);    \
+		mb(); \
+	} while (0)
+
+#endif				/* __KERNEL__ */
+
+#endif				/* !_MT_SYNC_WRITE_H */
diff --git a/drivers/misc/mediatek/usb11/Kconfig b/drivers/misc/mediatek/usb11/Kconfig
new file mode 100644
index 0000000..6bfdfae
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/Kconfig
@@ -0,0 +1,57 @@
+#
+# MediaTek ICUSB Driver
+#
+
+config MTK_USBFSH
+	bool "MediaTek USB fullspeed Host driver"
+	---help---
+	  This is support for Mediatek customized Host-only MUSB hardware
+	  The USBFSH driver remove USB Device functions
+	  And provide supports for second MODEM or ICUSB, etc.
+
+config MTK_ICUSB_SUPPORT
+	bool "MediaTek ICUSB SIM card driver"
+	depends on MTK_USBFSH
+	---help---
+	  To support the ICUSB feature on USBFSH port
+	  ICUSB is a standard USB interface to connect special SIM card
+	  Beside normal SIM card feature, ICUSB provide more features
+	  Like Mass Storage, EEM, etc.
+	  This port is usually configured as USB port 1 on device.
+
+config MTK_DT_USB_SUPPORT
+	bool "MediaTek USBFSH dual talk feature driver"
+	depends on MTK_USBFSH
+	---help---
+	  To enable the dual talk feature in phone over MTK_MUSBFSH USB port
+	  When there is second MODEM on the device and connected to main
+	  SoC via MTK_MUSBFSH USB port. MTK_DT_USB_SUPPORT is need for
+	  supporting dual talk to transfer data between 2 MODEMs
+	  This port is usually configured as USB port 1 on device.
+
+config MTK_MUSBFSH_QMU_SUPPORT
+	bool "QMU Transfer mode of MediaTek MUSBFSH"
+	depends on MTK_USBFSH
+	---help---
+	  Say Y here if your system has a QMU capability for USB11
+	  controller based on the MTK MUSBFSH IP.
+	  QMU is some kind of enhanced DMA mode by HW queeu rather
+	  than SW queue to reduce SW effort.
+
+config MTK_MUSBFSH_BIND_DEV_EP
+	bool "MediaTek USBFSH bind device endpoints to MUSB endpoints"
+	depends on MTK_USBFSH
+	---help---
+	  Say Y here if want bind device endpoints to MUSB endpoints
+	  Unbind device endpoints when device disconnect
+
+config MTK_MUSBFSH_OCP_SUPPORT
+	bool "Over-Current Support of MediaTek MUSBFSH"
+	depends on MTK_USBFSH
+	---help---
+	  Say Y here if your PORT1 supports over-current protect.
+	  It means that:
+	  If usb port1 port's current is over the specified current
+	  (eg: 500ma), OCP pin will protect, disable Power, then will
+	  resume.
+	  It depends the OCP HW and the design.
diff --git a/drivers/misc/mediatek/usb11/Makefile b/drivers/misc/mediatek/usb11/Makefile
new file mode 100644
index 0000000..5f58c5e
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/Makefile
@@ -0,0 +1,17 @@
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/usb11/
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/usb11/mt8167/
+ccflags-y += -I$(srctree)/drivers/usb/core/
+
+obj-$(CONFIG_MTK_USBFSH) := musbfsh_host.o musbfsh_hsdma.o musbfsh_virthub.o
+obj-$(CONFIG_MTK_ICUSB_SUPPORT) += musbfsh_icusb.o
+
+obj-y += mt8167/
+
+# QMU Realted
+obj-$(CONFIG_MTK_MUSBFSH_QMU_SUPPORT) += mtk11_qmu.o musbfsh_qmu.o
+ifeq ($(CONFIG_MTK_MUSBFSH_QMU_SUPPORT),y)
+subdir-ccflags-$(CONFIG_ARCH_MT8163) += -DMUSBFSH_QMU_LIMIT_SUPPORT -DMUSBFSH_QMU_LIMIT_RXQ_NUM=4 -DMUSBFSH_QMU_LIMIT_TXQ_NUM=4
+endif
+ifeq ($(CONFIG_MTK_MUSBFSH_QMU_SUPPORT),y)
+subdir-ccflags-$(CONFIG_MACH_MT8167) += -DMUSBFSH_QMU_LIMIT_SUPPORT -DMUSBFSH_QMU_LIMIT_RXQ_NUM=4 -DMUSBFSH_QMU_LIMIT_TXQ_NUM=4
+endif
diff --git a/drivers/misc/mediatek/usb11/mt8167/Makefile b/drivers/misc/mediatek/usb11/mt8167/Makefile
new file mode 100644
index 0000000..3822b8b
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/mt8167/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_MTK_USBFSH) := musbfsh_core.o musbfsh_mt65xx.o musbfsh_debugfs.o
+
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/usb20
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/usb11
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/usb11/mt8167/
+ccflags-y += -I$(srctree)/include/linux/
+
diff --git a/drivers/misc/mediatek/usb11/mt8167/musbfsh_core.c b/drivers/misc/mediatek/usb11/mt8167/musbfsh_core.c
new file mode 100644
index 0000000..fff3906
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/mt8167/musbfsh_core.c
@@ -0,0 +1,1877 @@
+/*
+ * MUSB OTG driver core code
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
+ *
+ * This consists of a Host Controller Driver (HCD) and a peripheral
+ * controller driver implementing the "Gadget" API; OTG support is
+ * in the works.  These are normal Linux-USB controller drivers which
+ * use IRQs and have no dedicated thread.
+ *
+ * This version of the driver has only been used with products from
+ * Texas Instruments.  Those products integrate the Inventra logic
+ * with other DMA, IRQ, and bus modules, as well as other logic that
+ * needs to be reflected in this driver.
+ *
+ *
+ * NOTE:  the original Mentor code here was pretty much a collection
+ * of mechanisms that don't seem to have been fully integrated/working
+ * for *any* Linux kernel version.  This version aims at Linux 2.6.now,
+ * Key open issues include:
+ *
+ *  - Lack of host-side transaction scheduling, for all transfer types.
+ *    The hardware doesn't do it; instead, software must.
+ *
+ *    This is not an issue for OTG devices that don't support external
+ *    hubs, but for more "normal" USB hosts it's a user issue that the
+ *    "multipoint" support doesn't scale in the expected ways.  That
+ *    includes DaVinci EVM in a common non-OTG mode.
+ *
+ *      * Control and bulk use dedicated endpoints, and there's as
+ *        yet no mechanism to either (a) reclaim the hardware when
+ *        peripherals are NAKing, which gets complicated with bulk
+ *        endpoints, or (b) use more than a single bulk endpoint in
+ *        each direction.
+ *
+ *        RESULT:  one device may be perceived as blocking another one.
+ *
+ *      * Interrupt and isochronous will dynamically allocate endpoint
+ *        hardware, but (a) there's no record keeping for bandwidth;
+ *        (b) in the common case that few endpoints are available, there
+ *        is no mechanism to reuse endpoints to talk to multiple devices.
+ *
+ *        RESULT:  At one extreme, bandwidth can be overcommitted in
+ *        some hardware configurations, no faults will be reported.
+ *        At the other extreme, the bandwidth capabilities which do
+ *        exist tend to be severely undercommitted.  You can't yet hook
+ *        up both a keyboard and a mouse to an external USB hub.
+ */
+
+/*
+ * This gets many kinds of configuration information:
+ *	- Kconfig for everything user-configurable
+ *	- platform_device for addressing, irq, and platform_data
+ *	- platform_data is mostly for board-specific informarion
+ *	  (plus recentrly, SOC or family details)
+ *
+ * Most of the conditional compilation will (someday) vanish.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/prefetch.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/idr.h>
+#include <linux/proc_fs.h>
+#include <linux/ctype.h>
+#include <linux/dma-mapping.h>
+#ifdef CONFIG_OF
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#endif
+
+#include <linux/gpio.h>
+
+#include "musbfsh_core.h"
+#include "musbfsh_host.h"
+#include "musbfsh_dma.h"
+#include "musbfsh_hsdma.h"
+#include "musbfsh_mt65xx.h"
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+#include "musbfsh_qmu.h"
+#include "mtk11_qmu.h"
+u32 mtk11_dma_burst_setting, mtk11_qmu_ioc_setting;
+/*struct musbfsh_hw_ep *mtk11_qmu_isoc_ep;*/
+int mtk11_qmu_dbg_level = LOG_CRIT;
+int mtk11_qmu_max_gpd_num;
+int mtk11_isoc_ep_start_idx = 4;
+int mtk11_isoc_ep_gpd_count = 3000;
+int mtk11_host_qmu_concurrent = 1;
+int mtk11_host_qmu_pipe_msk = (PIPE_ISOCHRONOUS + 1); /* | (PIPE_BULK + 1) | (PIPE_INTERRUPT+ 1) */;
+int mtk11_host_qmu_max_active_isoc_gpd;
+int mtk11_host_qmu_max_number_of_pkts;
+
+module_param(mtk11_qmu_dbg_level, int, 0644);
+module_param(mtk11_host_qmu_concurrent, int, 0644);
+module_param(mtk11_host_qmu_pipe_msk, int, 0644);
+module_param(mtk11_host_qmu_max_active_isoc_gpd, int, 0644);
+module_param(mtk11_host_qmu_max_number_of_pkts, int, 0644);
+#endif
+
+int musbfsh_host_dynamic_fifo = 1;
+int musbfsh_host_dynamic_fifo_usage_msk;
+module_param(musbfsh_host_dynamic_fifo, int, 0644);
+
+#ifdef CONFIG_MUSBFSH_PIO_ONLY
+#undef CONFIG_MUSBFSH_PIO_ONLY
+#endif
+
+#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia, Mediatek"
+#define DRIVER_DESC "MT65xx USB Host Controller Driver"
+
+#define MUSBFSH_VERSION "6.0"
+
+#define DRIVER_INFO DRIVER_DESC ", v" MUSBFSH_VERSION
+
+#define MUSBFSH_DRIVER_NAME "musbfsh-hdrc"
+
+u32 usb1_irq_number;
+static const struct of_device_id apusb_of_ids[] = {
+	{.compatible = "mediatek,mt8167-usb11",},
+	{},
+};
+
+struct musbfsh *musbfsh_Device;
+#ifdef CONFIG_OF
+struct device_node *usb11_dts_np;
+#endif
+
+const char musbfsh_driver_name[] = MUSBFSH_DRIVER_NAME;
+static DEFINE_IDA(musbfsh_ida);
+
+struct pinctrl *musbfsh_pinctrl;
+
+#ifdef CONFIG_MTK_MUSBFSH_OCP_SUPPORT
+/* OCP pin start */
+static unsigned int ocp_pin_p1;
+static unsigned int ocp_p1_irq_number;
+
+static unsigned int ocp_pin_p2;
+static unsigned int ocp_p2_irq_number;
+
+/* OCP pin end */
+#endif
+
+MODULE_DESCRIPTION(DRIVER_INFO);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" MUSBFSH_DRIVER_NAME);
+
+/*struct wake_lock musbfsh_suspend_lock;*/
+DEFINE_SPINLOCK(musbfs_io_lock);
+/*-------------------------------------------------------------------------*/
+#ifdef IC_USB
+static ssize_t show_start(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "start session under IC-USB mode\n");
+}
+
+static ssize_t store_start(struct device *dev, struct device_attribute *attr, const char *buf,
+			   size_t size)
+{
+	char *pvalue = NULL;
+	unsigned int value = 0;
+	size_t count = 0;
+	u8 devctl = musbfsh_readb((unsigned char __iomem *)USB11_BASE, MUSBFSH_DEVCTL);
+
+	/*value = simple_strtoul(buf, &pvalue, 10);*/
+	value = kstrtol(buf, 10, &pvalue); /* KS format requirement, sscanf -> kstrtol */
+	count = pvalue - buf;
+
+	if (*pvalue && isspace(*pvalue))
+		count++;
+
+	if (count == size) {
+		if (value) {
+			WARNING("[IC-USB]start session\n");
+			devctl |= MUSBFSH_DEVCTL_SESSION;	/* wx? why not wait until device connected*/
+			musbfsh_writeb((unsigned char __iomem *)USB11_BASE, MUSBFSH_DEVCTL, devctl);
+			WARNING("[IC-USB]power on VSIM\n");
+			hwPowerOn(MT65XX_POWER_LDO_VSIM, VOL_3000, "USB11-SIM");
+		}
+	}
+	return size;
+}
+
+static DEVICE_ATTR(start, S_IWUSR | S_IWGRP | S_IRUGO, show_start, store_start);
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static inline struct musbfsh *dev_to_musbfsh(struct device *dev)
+{
+	return hcd_to_musbfsh(dev_get_drvdata(dev));
+}
+
+/*-------------------------------------------------------------------------*/
+
+int musbfsh_get_id(struct device *dev, gfp_t gfp_mask)
+{
+	int id;
+
+	id = ida_alloc(&musbfsh_ida, gfp_mask);
+	if (id < 0) {
+		dev_err(dev, "failed to allocate a new id\n");
+		return id;
+	}
+
+	return id;
+}
+EXPORT_SYMBOL_GPL(musbfsh_get_id);
+
+void musbfsh_put_id(struct device *dev, int id)
+{
+	dev_dbg(dev, "removing id %d\n", id);
+	ida_free(&musbfsh_ida, id);
+}
+EXPORT_SYMBOL_GPL(musbfsh_put_id);
+
+/*-------------------------------------------------------------------------*/
+/*#ifdef CONFIG_MUSBFSH_PIO_ONLY*/
+/*
+* Load an endpoint's FIFO
+*/
+void musbfsh_write_fifo(struct musbfsh_hw_ep *hw_ep, u16 len, const u8 *src)
+{
+	void __iomem *fifo = hw_ep->fifo;
+
+	prefetch((u8 *)src);
+
+	INFO("%cX ep%d fifo %p count %d buf %p\n", 'T', hw_ep->epnum, fifo, len, src);
+	INFO("[Flow][USB11]%s:%d\n", __func__, __LINE__);
+	/* we can't assume unaligned reads work */
+	if (likely((0x01 & (unsigned long)src) == 0)) {
+		u16 index = 0;
+
+		/* best case is 32bit-aligned source address */
+		if ((0x02 & (unsigned long)src) == 0) {
+			if (len >= 4) {
+				/*writesl(fifo, src + index, len >> 2);*/
+				iowrite32_rep(fifo, src + index, len >> 2);
+				index += len & ~0x03;
+			}
+			if (len & 0x02) {
+				musbfsh_writew(fifo, 0, *(u16 *)&src[index]);
+				index += 2;
+			}
+		} else {
+			if (len >= 2) {
+				/*writesw(fifo, src + index, len >> 1);*/
+				iowrite16_rep(fifo, src + index, len >> 1);
+				index += len & ~0x01;
+			}
+		}
+		if (len & 0x01)
+			musbfsh_writeb(fifo, 0, src[index]);
+	} else {
+		/* byte aligned */
+		/*writesb(fifo, src, len);*/
+		iowrite8_rep(fifo, src, len);
+	}
+}
+
+/*
+ * Unload an endpoint's FIFO
+*/
+void musbfsh_read_fifo(struct musbfsh_hw_ep *hw_ep, u16 len, u8 *dst)
+{
+	void __iomem *fifo = hw_ep->fifo;
+
+	INFO("[Flow][USB11]%s:%d\n", __func__, __LINE__);
+	INFO("%cX ep%d fifo %p count %d buf %p\n", 'R', hw_ep->epnum, fifo, len, dst);
+
+	/* we can't assume unaligned writes work */
+	if (likely((0x01 & (unsigned long)dst) == 0)) {
+		u16 index = 0;
+
+		/* best case is 32bit-aligned destination address */
+		if ((0x02 & (unsigned long)dst) == 0) {
+			if (len >= 4) {
+				/*readsl(fifo, dst, len >> 2);*/
+				ioread32_rep(fifo, dst, len >> 2);
+				index = len & ~0x03;
+			}
+			if (len & 0x02) {
+				*(u16 *)&dst[index] = musbfsh_readw(fifo, 0);
+				index += 2;
+			}
+		} else {
+			if (len >= 2) {
+				/*readsw(fifo, dst, len >> 1);*/
+				ioread16_rep(fifo, dst, len >> 1);
+				index = len & ~0x01;
+			}
+		}
+		if (len & 0x01)
+			dst[index] = musbfsh_readb(fifo, 0);
+	} else {
+		/* byte aligned */
+		/*readsb(fifo, dst, len);*/
+		ioread8_rep(fifo, dst, len);
+	}
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* for high speed test mode; see USB 2.0 spec 7.1.20 */
+static const u8 musbfsh_test_packet[53] = {
+	/* implicit SYNC then DATA0 to start */
+
+	/* JKJKJKJK x9 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	/* JJKKJJKK x8 */
+	0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+	/* JJJJKKKK x8 */
+	0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
+	/* JJJJJJJKKKKKKK x8 */
+	0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	/* JJJJJJJK x8 */
+	0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
+	/* JKKKKKKK x10, JK */
+	0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
+	    /* implicit CRC16 then EOP to end */
+};
+
+void musbfsh_load_testpacket(struct musbfsh *musbfsh)
+{
+	void __iomem *regs = musbfsh->endpoints[0].regs;
+
+	musbfsh_ep_select(musbfsh->mregs, 0);	/*should be implemented*/
+	musbfsh_write_fifo(musbfsh->control_ep, sizeof(musbfsh_test_packet), musbfsh_test_packet);
+	musbfsh_writew(regs, MUSBFSH_CSR0, MUSBFSH_CSR0_TXPKTRDY);
+}
+
+static struct musbfsh_fifo_cfg ep0_cfg = {
+	.style = FIFO_RXTX, .maxpacket = 64,
+};
+
+/*-------------------------------------------------------------------------*/
+/*
+ * Interrupt Service Routine to record USB "global" interrupts.
+ * Since these do not happen often and signify things of
+ * paramount importance, it seems OK to check them individually;
+ * the order of the tests is specified in the manual
+ *
+ * @param musb instance pointer
+ * @param int_usb register contents
+ * @param devctl
+ * @param power
+ */
+
+static irqreturn_t musbfsh_stage0_irq(struct musbfsh *musbfsh, u8 int_usb, u8 devctl, u8 power)
+{
+	irqreturn_t handled = IRQ_NONE;
+
+	/* in host mode, the peripheral may issue remote wakeup.
+	 * in peripheral mode, the host may resume the link.
+	 * spurious RESUME irqs happen too, paired with SUSPEND.
+	 */
+	if (int_usb & MUSBFSH_INTR_RESUME) {
+		handled = IRQ_HANDLED;
+		WARNING("RESUME!\n");
+
+		if (devctl & MUSBFSH_DEVCTL_HM) {
+			void __iomem *mbase = musbfsh->mregs;
+
+			/* remote wakeup?  later, GetPortStatus
+			 * will stop RESUME signaling
+			 */
+
+			if (power & MUSBFSH_POWER_SUSPENDM) {
+				/* spurious */
+				musbfsh->int_usb &= ~MUSBFSH_INTR_SUSPEND;
+				WARNING("Spurious SUSPENDM\n");
+			}
+
+			power &= ~MUSBFSH_POWER_SUSPENDM;
+			musbfsh_writeb(mbase, MUSBFSH_POWER, power | MUSBFSH_POWER_RESUME);
+
+			musbfsh->port1_status |= (USB_PORT_STAT_C_SUSPEND << 16)
+			    | MUSBFSH_PORT_STAT_RESUME;
+			musbfsh->rh_timer = jiffies + msecs_to_jiffies(20);
+
+			musbfsh->is_active = 1;
+			usb_hcd_resume_root_hub(musbfsh_to_hcd(musbfsh));
+		}
+	}
+
+	/* see manual for the order of the tests */
+	if (int_usb & MUSBFSH_INTR_SESSREQ) {
+		/*will not run to here */
+		void __iomem *mbase = musbfsh->mregs;
+
+		WARNING("SESSION_REQUEST\n");
+
+		/* IRQ arrives from ID pin sense or (later, if VBUS power
+		 * is removed) SRP.  responses are time critical:
+		 *  - turn on VBUS (with silicon-specific mechanism)
+		 *  - go through A_WAIT_VRISE
+		 *  - ... to A_WAIT_BCON.
+		 * a_wait_vrise_tmout triggers VBUS_ERROR transitions
+		 */
+		devctl |= MUSBFSH_DEVCTL_SESSION;
+		musbfsh_writeb(mbase, MUSBFSH_DEVCTL, devctl);
+		musbfsh->ep0_stage = MUSBFSH_EP0_START;
+		musbfsh_platform_set_vbus(musbfsh, 1);
+
+		handled = IRQ_HANDLED;
+	}
+
+	if (int_usb & MUSBFSH_INTR_VBUSERROR) {
+		int ignore = 0;
+
+		/* During connection as an A-Device, we may see a short
+		 * current spikes causing voltage drop, because of cable
+		 * and peripheral capacitance combined with vbus draw.
+		 * (So: less common with truly self-powered devices, where
+		 * vbus doesn't act like a power supply.)
+		 *
+		 * Such spikes are short; usually less than ~500 usec, max
+		 * of ~2 msec.  That is, they're not sustained overcurrent
+		 * errors, though they're reported using VBUSERROR irqs.
+		 *
+		 * Workarounds:  (a) hardware: use self powered devices.
+		 * (b) software:  ignore non-repeated VBUS errors.
+		 *
+		 * REVISIT:  do delays from lots of DEBUG_KERNEL checks
+		 * make trouble here, keeping VBUS < 4.4V ?
+		 */
+		if (musbfsh->vbuserr_retry) {
+			void __iomem *mbase = musbfsh->mregs;
+
+			musbfsh->vbuserr_retry--;
+			ignore = 1;
+			devctl |= MUSBFSH_DEVCTL_SESSION;
+			musbfsh_writeb(mbase, MUSBFSH_DEVCTL, devctl);
+		} else {
+			musbfsh->port1_status |=
+			    USB_PORT_STAT_OVERCURRENT | (USB_PORT_STAT_C_OVERCURRENT << 16);
+		}
+
+		ERR("VBUS_ERROR (%02x, %s), retry #%d, port1_status 0x%08x\n",
+			devctl, ({
+				char *s;
+
+				switch (devctl & MUSBFSH_DEVCTL_VBUS) {
+				case 0 << MUSBFSH_DEVCTL_VBUS_SHIFT:
+					s = "<SessEnd"; break;
+				case 1 << MUSBFSH_DEVCTL_VBUS_SHIFT:
+					s = "<AValid"; break;
+				case 2 << MUSBFSH_DEVCTL_VBUS_SHIFT:
+					s = "<VBusValid"; break;
+				/* case 3 << MUSBFSH_DEVCTL_VBUS_SHIFT: */
+				default:
+					s = "VALID"; break;
+				}; s; }
+		    ), VBUSERR_RETRY_COUNT - musbfsh->vbuserr_retry, musbfsh->port1_status);
+
+		/* go through A_WAIT_VFALL then start a new session */
+		if (!ignore)
+			musbfsh_platform_set_vbus(musbfsh, 0);
+		handled = IRQ_HANDLED;
+	}
+
+	if (int_usb & MUSBFSH_INTR_SUSPEND) {
+		INFO("SUSPEND devctl %02x power %02x\n", devctl, power);
+		handled = IRQ_HANDLED;
+	}
+
+	if (int_usb & MUSBFSH_INTR_CONNECT) {
+		struct usb_hcd *hcd = musbfsh_to_hcd(musbfsh);
+
+		handled = IRQ_HANDLED;
+		musbfsh->is_active = 1;
+
+		musbfsh->ep0_stage = MUSBFSH_EP0_START;
+		musbfsh->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
+					   | USB_PORT_STAT_HIGH_SPEED | USB_PORT_STAT_ENABLE);
+		musbfsh->port1_status |= USB_PORT_STAT_CONNECTION
+		    | (USB_PORT_STAT_C_CONNECTION << 16);
+		if (musbfsh_host_dynamic_fifo)
+			musbfsh_host_dynamic_fifo_usage_msk = 0;
+
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+		musbfsh_disable_q_all(musbfsh);
+#endif
+
+		/* high vs full speed is just a guess until after reset */
+		if (devctl & MUSBFSH_DEVCTL_LSDEV)
+			musbfsh->port1_status |= USB_PORT_STAT_LOW_SPEED;
+
+		if (hcd->status_urb)
+			usb_hcd_poll_rh_status(hcd);
+		else
+			usb_hcd_resume_root_hub(hcd);
+
+		WARNING("CONNECT ! devctl 0x%02x\n", devctl);
+	}
+
+	if (int_usb & MUSBFSH_INTR_DISCONNECT) {
+		WARNING("DISCONNECT !devctl %02x\n", devctl);
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+		musbfsh_disable_q_all(musbfsh);
+#endif
+
+		handled = IRQ_HANDLED;
+		usb_hcd_resume_root_hub(musbfsh_to_hcd(musbfsh));
+		musbfsh_root_disconnect(musbfsh);
+	}
+
+	/* mentor saves a bit: bus reset and babble share the same irq.
+	 * only host sees babble; only peripheral sees bus reset.
+	 */
+	if (int_usb & MUSBFSH_INTR_BABBLE) {
+		handled = IRQ_HANDLED;
+		/*
+		 * Looks like non-HS BABBLE can be ignored, but
+		 * HS BABBLE is an error condition. For HS the solution
+		 * is to avoid babble in the first place and fix what
+		 * caused BABBLE. When HS BABBLE happens we can only
+		 * stop the session.
+		 */
+		if (devctl & (MUSBFSH_DEVCTL_FSDEV | MUSBFSH_DEVCTL_LSDEV)) {
+			ERR("BABBLE devctl: %02x\n", devctl);
+		} else {
+			ERR("Stopping host session -- babble\n");
+			devctl |= MUSBFSH_DEVCTL_SESSION;
+			/*musbfsh_writeb(musbfsh->mregs, MUSBFSH_DEVCTL, devctl);*/ /*ignore babble*/
+		}
+	}
+
+	return handled;
+}
+
+/*-------------------------------------------------------------------------*/
+/*
+* Program the HDRC to start (enable interrupts, dma, etc.).
+*/
+void musbfsh_start(struct musbfsh *musbfsh)
+{
+	void __iomem *regs = musbfsh->mregs;	/*base address of usb mac*/
+	u8 devctl = musbfsh_readb(regs, MUSBFSH_DEVCTL);
+	u8 power = musbfsh_readb(regs, MUSBFSH_POWER);
+	int int_level1 = 0;
+
+	WARNING("<== devctl 0x%02x\n", devctl);
+
+	/*  Set INT enable registers, enable interrupts */
+	musbfsh_writew(regs, MUSBFSH_INTRTXE, musbfsh->epmask);
+	musbfsh_writew(regs, MUSBFSH_INTRRXE, musbfsh->epmask & 0xfffe);
+	musbfsh_writeb(regs, MUSBFSH_INTRUSBE, 0xf7);
+	/* enable level 1 interrupts */
+	#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+	musbfsh_writew(regs, USB11_L1INTM, 0x002f);
+	#else
+	musbfsh_writew(regs, USB11_L1INTM, 0x000f);
+	#endif
+	int_level1 = musbfsh_readw(musbfsh->mregs, USB11_L1INTM);
+	INFO("Level 1 Interrupt Mask 0x%x\n", int_level1);
+	int_level1 = musbfsh_readw(musbfsh->mregs, USB11_L1INTP);
+	INFO("Level 1 Interrupt Polarity 0x%x\n", int_level1);
+
+	/* flush pending interrupts */
+	musbfsh_writew(regs, MUSBFSH_INTRTX, 0xffff);
+	musbfsh_writew(regs, MUSBFSH_INTRRX, 0xffff);
+	musbfsh_writeb(regs, MUSBFSH_INTRUSB, 0xff);
+	musbfsh_writeb(regs, MUSBFSH_HSDMA_INTR, 0xff);
+
+	/*remove babble: NOISE_STALL_SOF:1, BABBLE_CLR_EN:0*/
+	devctl = musbfsh_readb(regs, MUSBFSH_ULPI_REG_DATA);
+	devctl = devctl | 0x80;
+	devctl = devctl & 0xbf;
+	musbfsh_writeb(regs, MUSBFSH_ULPI_REG_DATA, devctl);
+
+	musbfsh->is_active = 0;
+	musbfsh->is_multipoint = 1;
+
+	/* need to enable the VBUS */
+	musbfsh_platform_set_vbus(musbfsh, 1);
+	musbfsh_platform_enable(musbfsh);
+
+#ifndef IC_USB
+	/* start session, assume ID pin is hard-wired to ground */
+	devctl |= MUSBFSH_DEVCTL_SESSION;	/* wx? why not wait until device connected*/
+	musbfsh_writeb(regs, MUSBFSH_DEVCTL, devctl);
+#endif
+
+	/* disable high speed negotiate */
+	power |= MUSBFSH_POWER_HSENAB;
+	power |= MUSBFSH_POWER_SOFTCONN;
+	/*  enable SUSPENDM, this will put PHY into low power mode, not so "low" as save current mode,
+	*  but it will be able to detect line state (remote wakeup/connect/disconnect)
+	*/
+	power |= MUSBFSH_POWER_ENSUSPEND;
+	musbfsh_writeb(regs, MUSBFSH_POWER, power);
+
+	devctl = musbfsh_readb(regs, MUSBFSH_DEVCTL);
+	power = musbfsh_readb(regs, MUSBFSH_POWER);
+	INFO(" musb ready. devctl=0x%x, power=0x%x\n", devctl, power);
+	mdelay(50);		/* wx?*/
+}
+
+
+static void musbfsh_generic_disable(struct musbfsh *musbfsh)
+{
+	void __iomem *mbase = musbfsh->mregs;
+	u16 temp;
+
+	INFO("++\n");
+	/* disable interrupts */
+	musbfsh_writeb(mbase, MUSBFSH_INTRUSBE, 0);
+	musbfsh_writew(mbase, MUSBFSH_INTRTXE, 0);
+	musbfsh_writew(mbase, MUSBFSH_INTRRXE, 0);
+
+	/* off */
+	musbfsh_writeb(mbase, MUSBFSH_DEVCTL, 0);
+
+	/*  flush pending interrupts */
+	temp = musbfsh_readb(mbase, MUSBFSH_INTRUSB);
+	temp = musbfsh_readw(mbase, MUSBFSH_INTRTX);
+	temp = musbfsh_readw(mbase, MUSBFSH_INTRRX);
+}
+
+/*
+ * Make the HDRC stop (disable interrupts, etc.);
+ * reversible by musbfsh_start
+ * called on gadget driver unregister
+ * with controller locked, irqs blocked
+ * acts as a NOP unless some role activated the hardware
+ */
+void musbfsh_stop(struct musbfsh *musbfsh)
+{
+	/* stop IRQs, timers, ... */
+	musbfsh_platform_disable(musbfsh);
+	musbfsh_generic_disable(musbfsh);
+	INFO("HDRC disabled\n");
+
+	/* FIXME
+	 *  - mark host and/or peripheral drivers unusable/inactive
+	 *  - disable DMA (and enable it in HdrcStart)
+	 *  - make sure we can musbfsh_start() after musbfsh_stop(); with
+	 *    OTG mode, gadget driver module rmmod/modprobe cycles that
+	 *  - ...
+	 */
+	musbfsh_platform_try_idle(musbfsh, 0);
+}
+
+static void musbfsh_shutdown(struct platform_device *pdev)
+{
+	struct musbfsh *musbfsh = dev_to_musbfsh(&pdev->dev);
+	unsigned long flags;
+
+	INFO("++\n");
+	spin_lock_irqsave(&musbfsh->lock, flags);
+	musbfsh_platform_disable(musbfsh);
+	musbfsh_generic_disable(musbfsh);
+	musbfsh_platform_set_vbus(musbfsh, 0);
+	musbfsh_platform_set_power(musbfsh, 0);
+	spin_unlock_irqrestore(&musbfsh->lock, flags);
+
+	/* FIXME power down */
+}
+
+
+/*-------------------------------------------------------------------------*/
+/*
+ * tables defining fifo_mode values.  define more if you like.
+ * for host side, make sure both halves of ep1 are set up.
+ */
+
+/* fits in 4KB */
+#define MAXFIFOSIZE 8096
+
+static struct musbfsh_fifo_cfg epx_cfg[] __initdata = {
+	{.hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_SINGLE},
+	{.hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_SINGLE},
+	{.hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_SINGLE},
+	{.hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_SINGLE},
+	{.hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_SINGLE},
+	{.hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_SINGLE},
+	{.hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_SINGLE},
+	{.hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_SINGLE},
+	{.hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_SINGLE},
+	{.hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_SINGLE},
+	{.hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_SINGLE},
+	{.hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_SINGLE},
+	{.hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_SINGLE},
+	{.hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_SINGLE},
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * configure a fifo; for non-shared endpoints, this may be called
+ * once for a tx fifo and once for an rx fifo.
+ *
+ * returns negative errno or offset for next fifo.
+ */
+static int __init
+fifo_setup(struct musbfsh *musbfsh, struct musbfsh_hw_ep *hw_ep,
+	   const struct musbfsh_fifo_cfg *cfg, u16 offset)
+{
+	void __iomem *mbase = musbfsh->mregs;
+	int size = 0;
+	u16 maxpacket = cfg->maxpacket;
+	u16 c_off = offset >> 3;
+	u8 c_size;		/*will be written into the fifo register*/
+
+	INFO("hw_ep->epnum=%d,cfg->hw_ep_num=%d\n", hw_ep->epnum, cfg->hw_ep_num);
+	/* expect hw_ep has already been zero-initialized */
+
+	size = ffs(max_t(u16, maxpacket, 8)) - 1;
+	maxpacket = 1 << size;
+
+	c_size = size - 3;
+	if (cfg->mode == BUF_DOUBLE) {
+		if ((offset + (maxpacket << 1)) > MAXFIFOSIZE)
+			return -EMSGSIZE;
+		c_size |= MUSBFSH_FIFOSZ_DPB;
+	} else {
+		if ((offset + maxpacket) > MAXFIFOSIZE)
+			return -EMSGSIZE;
+	}
+
+	/* configure the FIFO */
+	musbfsh_writeb(mbase, MUSBFSH_INDEX, hw_ep->epnum);
+	/* EP0 reserved endpoint for control, bidirectional;
+	 * EP1 reserved for bulk, two unidirection halves.
+	 */
+	if (hw_ep->epnum == 1)
+		musbfsh->bulk_ep = hw_ep;
+	/* REVISIT error check:  be sure ep0 can both rx and tx ... */
+	switch (cfg->style) {
+	case FIFO_TX:
+		musbfsh_write_txfifosz(mbase, c_size);
+		musbfsh_write_txfifoadd(mbase, c_off);
+		hw_ep->tx_double_buffered = !!(c_size & MUSBFSH_FIFOSZ_DPB);
+		hw_ep->max_packet_sz_tx = maxpacket;
+		break;
+	case FIFO_RX:
+		musbfsh_write_rxfifosz(mbase, c_size);
+		musbfsh_write_rxfifoadd(mbase, c_off);
+		hw_ep->rx_double_buffered = !!(c_size & MUSBFSH_FIFOSZ_DPB);
+		hw_ep->max_packet_sz_rx = maxpacket;
+		break;
+	case FIFO_RXTX:
+		musbfsh_write_txfifosz(mbase, c_size);
+		musbfsh_write_txfifoadd(mbase, c_off);
+		hw_ep->rx_double_buffered = !!(c_size & MUSBFSH_FIFOSZ_DPB);
+		hw_ep->max_packet_sz_rx = maxpacket;
+
+		musbfsh_write_rxfifosz(mbase, c_size);
+		musbfsh_write_rxfifoadd(mbase, c_off);
+		hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
+		hw_ep->max_packet_sz_tx = maxpacket;
+		hw_ep->is_shared_fifo = true;
+		break;
+	}
+
+	/* NOTE rx and tx endpoint irqs aren't managed separately,
+	 * which happens to be ok
+	 */
+	musbfsh->epmask |= (1 << hw_ep->epnum);
+
+	return offset + (maxpacket << ((c_size & MUSBFSH_FIFOSZ_DPB) ? 1 : 0));
+}
+
+static int __init ep_config_from_table(struct musbfsh *musbfsh)
+{
+	const struct musbfsh_fifo_cfg *cfg = NULL;
+	unsigned i = 0;
+	unsigned n = 0;
+	int offset;
+	struct musbfsh_hw_ep *hw_ep = musbfsh->endpoints;
+
+	if (musbfsh_host_dynamic_fifo)
+		musbfsh_host_dynamic_fifo_usage_msk = 0;
+
+	INFO("++\n");
+	if (musbfsh->config->fifo_cfg) {
+		cfg = musbfsh->config->fifo_cfg;
+		n = musbfsh->config->fifo_cfg_size;
+		INFO("fifo_cfg, n=%d\n", n);
+		goto done;
+	}
+
+done:
+	offset = fifo_setup(musbfsh, hw_ep, &ep0_cfg, 0);
+	/* assert(offset > 0) */
+
+	/* NOTE:  for RTL versions >= 1.400 EPINFO and RAMINFO would
+	 * be better than static musbfsh->config->num_eps and DYN_FIFO_SIZE...
+	 */
+
+	for (i = 0; i < n; i++) {
+		u8 epn = cfg->hw_ep_num;
+
+		if (epn >= musbfsh->config->num_eps) {
+			ERR("%s: invalid ep %d\n", musbfsh_driver_name, epn);
+			return -EINVAL;
+		}
+		offset = fifo_setup(musbfsh, hw_ep + epn, cfg++, offset);
+		if (offset < 0) {
+			ERR("%s: mem overrun, ep %d\n", musbfsh_driver_name, epn);
+			return -EINVAL;
+		}
+
+		epn++;		/*include ep0*/
+		musbfsh->nr_endpoints = max(epn, musbfsh->nr_endpoints);
+	}
+	INFO("%s: %d/%d max ep, %d/%d memory\n",
+	     musbfsh_driver_name, n + 1, musbfsh->config->num_eps * 2 - 1, offset, MAXFIFOSIZE);
+
+	if (!musbfsh->bulk_ep) {
+		ERR("%s: missing bulk\n", musbfsh_driver_name);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+
+/* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
+ * configure endpoints, or take their config from silicon
+ */
+static int __init musbfsh_core_init(struct musbfsh *musbfsh)
+{
+	void __iomem *mbase = musbfsh->mregs;
+	int status = 0;
+	int i;
+	char aInfo[90];
+	u8 reg;
+
+	/* log core options (read using indexed model) */
+	reg = musbfsh_read_configdata(mbase);
+	strcpy(aInfo, (reg & MUSBFSH_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
+	if (reg & MUSBFSH_CONFIGDATA_DYNFIFO) {
+		strcat(aInfo, ", dyn FIFOs");
+		musbfsh->dyn_fifo = true;
+	}
+	if (reg & MUSBFSH_CONFIGDATA_MPRXE) {
+		strcat(aInfo, ", bulk combine");
+		musbfsh->bulk_combine = true;
+	}
+	if (reg & MUSBFSH_CONFIGDATA_MPTXE) {
+		strcat(aInfo, ", bulk split");
+		musbfsh->bulk_split = true;
+	}
+	if (reg & MUSBFSH_CONFIGDATA_HBRXE) {
+		strcat(aInfo, ", HB-ISO Rx");
+		musbfsh->hb_iso_rx = true;
+	}
+	if (reg & MUSBFSH_CONFIGDATA_HBTXE) {
+		strcat(aInfo, ", HB-ISO Tx");
+		musbfsh->hb_iso_tx = true;
+	}
+	if (reg & MUSBFSH_CONFIGDATA_SOFTCONE)
+		strcat(aInfo, ", SoftConn");
+
+	WARNING("%s: ConfigData=0x%02x (%s)\n", musbfsh_driver_name, reg, aInfo);
+
+	INFO("++\n");
+	/* configure ep0 */
+	musbfsh_configure_ep0(musbfsh);
+
+	/* discover endpoint configuration */
+	musbfsh->nr_endpoints = 1;	/*will update in func: ep_config_from_table*/
+	musbfsh->epmask = 1;
+
+	status = ep_config_from_table(musbfsh);
+	if (status < 0)
+		return status;
+
+	/* finish init, and print endpoint config */
+	for (i = 0; i < musbfsh->nr_endpoints; i++) {
+		struct musbfsh_hw_ep *hw_ep = musbfsh->endpoints + i;
+
+		hw_ep->fifo = MUSBFSH_FIFO_OFFSET(i) + mbase;
+		hw_ep->regs = MUSBFSH_EP_OFFSET(i, 0) + mbase;
+		hw_ep->rx_reinit = 1;
+		hw_ep->tx_reinit = 1;
+
+		if (hw_ep->max_packet_sz_tx) {
+			INFO("%s: hw_ep %d%s, %smax %d,and hw_ep->epnum=%d\n",
+			     musbfsh_driver_name, i,
+			     hw_ep->is_shared_fifo ? "shared" : "tx",
+			     hw_ep->tx_double_buffered
+			     ? "doublebuffer, " : "", hw_ep->max_packet_sz_tx, hw_ep->epnum);
+		}
+		if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
+			INFO("%s: hw_ep %d%s, %smax %d,and hw_ep->epnum=%d\n",
+			     musbfsh_driver_name, i,
+			     "rx",
+			     hw_ep->rx_double_buffered
+			     ? "doublebuffer, " : "", hw_ep->max_packet_sz_rx, hw_ep->epnum);
+		}
+		if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
+			INFO("hw_ep %d not configured\n", i);
+	}
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+void musbfsh_read_clear_generic_interrupt(struct musbfsh *musbfsh)
+{
+	musbfsh->int_usb = musbfsh_readb(musbfsh->mregs, MUSBFSH_INTRUSB);
+	musbfsh->int_tx = musbfsh_readw(musbfsh->mregs, MUSBFSH_INTRTX);
+	musbfsh->int_rx = musbfsh_readw(musbfsh->mregs, MUSBFSH_INTRRX);
+	musbfsh->int_dma = musbfsh_readb(musbfsh->mregs, MUSBFSH_HSDMA_INTR);
+	#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+	musbfsh->int_queue = musbfsh_readl(musbfsh->mregs, MUSBFSH_QISAR);
+	#endif
+	INFO("** musbfsh::IRQ! usb%04x tx%04x rx%04x dma%04x\n",
+	     musbfsh->int_usb, musbfsh->int_tx, musbfsh->int_rx, musbfsh->int_dma);
+	/* clear interrupt status */
+	musbfsh_writew(musbfsh->mregs, MUSBFSH_INTRTX, musbfsh->int_tx);
+	musbfsh_writew(musbfsh->mregs, MUSBFSH_INTRRX, musbfsh->int_rx);
+	musbfsh_writeb(musbfsh->mregs, MUSBFSH_INTRUSB, musbfsh->int_usb);
+	musbfsh_writeb(musbfsh->mregs, MUSBFSH_HSDMA_INTR, musbfsh->int_dma);
+	#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+	musbfsh_writel(musbfsh->mregs, MUSBFSH_QISAR, musbfsh->int_queue);
+	musbfsh->int_queue &= ~(musbfsh_readl(musbfsh->mregs, MUSBFSH_QIMR));
+	#endif
+}
+
+static irqreturn_t generic_interrupt(int irq, void *__hci)
+{
+	unsigned long flags;
+	irqreturn_t retval = IRQ_NONE;
+	struct musbfsh *musbfsh = __hci;
+	u16 int_level1 = 0;
+
+	INFO("musbfsh:generic_interrupt++\r\n");
+	spin_lock_irqsave(&musbfsh->lock, flags);
+
+	musbfsh_read_clear_generic_interrupt(musbfsh);
+	int_level1 = musbfsh_readw(musbfsh->mregs, USB11_L1INTS);
+	INFO("Level 1 Interrupt Status 0x%x\r\n", int_level1);
+
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+	if (musbfsh->int_usb || musbfsh->int_tx || musbfsh->int_rx || musbfsh->int_queue)
+		retval = musbfsh_interrupt(musbfsh);
+#else
+	if (musbfsh->int_usb || musbfsh->int_tx || musbfsh->int_rx)
+		retval = musbfsh_interrupt(musbfsh);
+#endif
+
+#ifndef CONFIG_MUSBFSH_PIO_ONLY
+	if (musbfsh->int_dma)
+		retval = musbfsh_dma_controller_irq(irq, musbfsh->musbfsh_dma_controller);
+#endif
+
+	spin_unlock_irqrestore(&musbfsh->lock, flags);
+	return retval;
+}
+
+/*
+ * handle all the irqs defined by the HDRC core. for now we expect:  other
+ * irq sources (phy, dma, etc) will be handled first, musbfsh->int_* values
+ * will be assigned, and the irq will already have been acked.
+ *
+ * called in irq context with spinlock held, irqs blocked
+ */
+irqreturn_t musbfsh_interrupt(struct musbfsh *musbfsh)
+{
+	irqreturn_t retval = IRQ_NONE;
+	u8 devctl, power;
+	int ep_num;
+	u32 reg;
+
+	devctl = musbfsh_readb(musbfsh->mregs, MUSBFSH_DEVCTL);
+	power = musbfsh_readb(musbfsh->mregs, MUSBFSH_POWER);
+
+	INFO("** musbfsh::devctl 0x%x power 0x%x\n", devctl, power);
+
+	/* the core can interrupt us for multiple reasons; docs have
+	 * a generic interrupt flowchart to follow
+	 */
+	if (musbfsh->int_usb)
+		retval |= musbfsh_stage0_irq(musbfsh, musbfsh->int_usb, devctl, power);
+
+	/* "stage 1" is handling endpoint irqs */
+
+	/* handle endpoint 0 first */
+	if (musbfsh->int_tx & 1)
+		retval |= musbfsh_h_ep0_irq(musbfsh);
+
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+	/* process generic queue interrupt */
+	if (musbfsh->int_queue) {
+		musbfsh_q_irq(musbfsh);
+		retval = IRQ_HANDLED;
+	}
+#endif
+
+	/* RX on endpoints 1-15 */
+	reg = musbfsh->int_rx >> 1;
+	ep_num = 1;
+	while (reg) {
+		if (reg & 1) {
+			/* musbfsh_ep_select(musbfsh->mregs, ep_num); */
+			/* REVISIT just retval = ep->rx_irq(...) */
+			retval = IRQ_HANDLED;
+			musbfsh_host_rx(musbfsh, ep_num);	/*the real ep_num*/
+		}
+
+		reg >>= 1;
+		ep_num++;
+	}
+
+	/* TX on endpoints 1-15 */
+	reg = musbfsh->int_tx >> 1;
+	ep_num = 1;
+	while (reg) {
+		if (reg & 1) {
+			/* musbfsh_ep_select(musbfsh->mregs, ep_num); */
+			/* REVISIT just retval |= ep->tx_irq(...) */
+			retval = IRQ_HANDLED;
+			musbfsh_host_tx(musbfsh, ep_num);
+		}
+		reg >>= 1;
+		ep_num++;
+	}
+	return retval;
+}
+
+
+#ifndef CONFIG_MUSBFSH_PIO_ONLY
+static bool use_dma __initdata = 1;
+
+/* "modprobe ... use_dma=0" etc */
+module_param(use_dma, bool, 0);
+MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
+
+void musbfsh_dma_completion(struct musbfsh *musbfsh, u8 epnum, u8 transmit)
+{
+	INFO("++\n");
+	/* called with controller lock already held */
+
+	/* endpoints 1..15 */
+	if (transmit)
+		musbfsh_host_tx(musbfsh, epnum);
+	else
+		musbfsh_host_rx(musbfsh, epnum);
+}
+
+#else
+#define use_dma			0
+#endif
+
+/* --------------------------------------------------------------------------
+ * Init support
+*/
+
+static struct musbfsh *__init
+allocate_instance(struct device *dev, struct musbfsh_hdrc_config *config, void __iomem *mbase)
+{
+	struct musbfsh *musbfsh;
+	struct musbfsh_hw_ep *ep;
+	int epnum;
+	struct usb_hcd *hcd;
+
+	INFO("++\n");
+
+	musbfsh_hc_driver.flags = HCD_USB11 | HCD_MEMORY;
+	hcd = usb_create_hcd(&musbfsh_hc_driver, dev, dev_name(dev));
+	if (!hcd)
+		return NULL;
+	/* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
+
+	musbfsh = hcd_to_musbfsh(hcd);
+	INIT_LIST_HEAD(&musbfsh->control);
+	INIT_LIST_HEAD(&musbfsh->in_bulk);
+	INIT_LIST_HEAD(&musbfsh->out_bulk);
+
+	hcd->uses_new_polling = 1;
+	hcd->has_tt = 1;
+
+	musbfsh->vbuserr_retry = VBUSERR_RETRY_COUNT;
+
+	musbfsh->mregs = mbase;
+	musbfsh->ctrl_base = mbase;
+	musbfsh->nIrq = -ENODEV;	/*will be update after return from this func*/
+	musbfsh->config = config;
+	if (musbfsh->config->num_eps > MUSBFSH_C_NUM_EPS)
+		musbfsh_bug();
+
+	for (epnum = 0, ep = musbfsh->endpoints; epnum < musbfsh->config->num_eps; epnum++, ep++) {
+		ep->musbfsh = musbfsh;
+		ep->epnum = epnum;
+	}
+
+	musbfsh->controller = dev;
+	return musbfsh;
+}
+
+static void musbfsh_free(struct musbfsh *musbfsh)
+{
+	/* this has multiple entry modes. it handles fault cleanup after
+	 * probe(), where things may be partially set up, as well as rmmod
+	 * cleanup after everything's been de-activated.
+	 */
+	INFO("++\n");
+	#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+	musbfsh_disable_q_all(musbfsh);
+	musbfsh_qmu_exit(musbfsh);
+	#endif
+
+	if (musbfsh->nIrq >= 0) {
+		if (musbfsh->irq_wake)
+			disable_irq_wake(musbfsh->nIrq);
+		free_irq(musbfsh->nIrq, musbfsh);
+	}
+	if (is_dma_capable() && musbfsh->dma_controller) {
+		struct dma_controller *c = musbfsh->dma_controller;
+
+		(void)c->stop(c);
+		musbfsh_dma_controller_destroy(c);
+	}
+	usb_put_hcd(musbfsh_to_hcd(musbfsh));
+	kfree(musbfsh);
+}
+
+static int musbfsh_set_pin_state(const char *pin_name, struct pinctrl *ctrl)
+{
+	struct pinctrl_state *pin_state;
+	int ret;
+
+	pin_state = pinctrl_lookup_state(ctrl, pin_name);
+	if (IS_ERR(pin_state)) {
+		ret = PTR_ERR(pin_state);
+		dev_err(musbfsh_Device->controller, "Cannot find %s \n", pin_name);
+
+		return -1;
+	} else
+		pinctrl_select_state(ctrl, pin_state);
+
+	return 0;
+}
+
+#ifdef CONFIG_MTK_MUSBFSH_OCP_SUPPORT
+static void musbfsh_p1_cldet_work(struct work_struct *data)
+{
+	int val = 0;
+
+	val = gpio_get_value(ocp_pin_p1);
+	if (val == 0) {
+		pr_err("musbfsh_p1_cldet_work has happenned: %i, IRQF_TRIGGER_LOW\n", val);
+
+		musbfsh_set_pin_state("usb1_p1_low", musbfsh_pinctrl);
+
+		irq_set_irq_type(ocp_p1_irq_number, IRQF_TRIGGER_HIGH);
+	} else {
+		pr_err("musbfsh_p1_cldet_work has happenned: %i, IRQF_TRIGGER_HIGH\n", val);
+
+		mdelay(500);
+		musbfsh_set_pin_state("usb1_p1_high", musbfsh_pinctrl);
+
+		irq_set_irq_type(ocp_p1_irq_number, IRQF_TRIGGER_LOW);
+	}
+	enable_irq(ocp_p1_irq_number);
+
+}
+
+static irqreturn_t musbfsh_p1_cldet_int(int irq, void *dev_id)
+{
+	disable_irq_nosync(ocp_p1_irq_number);
+
+	schedule_delayed_work(&musbfsh_Device->p1_cldet_pin_work, 0);
+
+	pr_err("[musbfsh] musbfsh_p1_cldet_intinterrupt assert\n");
+
+	return IRQ_HANDLED;
+}
+
+static void musbfsh_p2_cldet_work(struct work_struct *data)
+{
+	int val = 0;
+
+	val = gpio_get_value(ocp_pin_p2);
+	if (val == 0) {
+		pr_err("musbfsh_p2_cldet_work has happenned: %i, IRQF_TRIGGER_LOW\n", val);
+
+		musbfsh_set_pin_state("usb1_p2_low", musbfsh_pinctrl);
+
+		irq_set_irq_type(ocp_p2_irq_number, IRQF_TRIGGER_HIGH);
+	} else {
+		pr_err("musbfsh_p2_cldet_work has happenned: %i, IRQF_TRIGGER_HIGH\n", val);
+
+		mdelay(500);
+		musbfsh_set_pin_state("usb1_p2_high", musbfsh_pinctrl);
+
+		irq_set_irq_type(ocp_p2_irq_number, IRQF_TRIGGER_LOW);
+	}
+	enable_irq(ocp_p2_irq_number);
+
+}
+static irqreturn_t musbfsh_p2_cldet_int(int irq, void *dev_id)
+{
+	disable_irq_nosync(ocp_p2_irq_number);
+
+	schedule_delayed_work(&musbfsh_Device->p2_cldet_pin_work, 0);
+
+	pr_err("[musbfsh] musbfsh_p2_cldet_intinterrupt assert\n");
+
+	return IRQ_HANDLED;
+}
+
+void	musbfsh_init_ocp_pin(struct musbfsh *musbfsh)
+{
+	struct device_node *node;
+	struct platform_device *pdev_node;
+
+	node = of_find_compatible_node(NULL, NULL, "mediatek,mt8167-usb11");
+	if (node == NULL) {
+		WARNING("USB PORT1 - get node failed\n");
+	} else {
+		ocp_pin_p1 = of_get_named_gpio(node, "ocp_p1_cldet", 0);
+		if (ocp_pin_p1 == 0) {
+			WARNING("ocp p1 gpio fail\n");
+		} else {
+			ocp_p1_irq_number = gpio_to_irq(ocp_pin_p1);
+			WARNING("usb ocp p1:%u, p1_irq_num:%u\n", ocp_pin_p1, ocp_p1_irq_number);
+		}
+
+		ocp_pin_p2 = of_get_named_gpio(node, "ocp_p2_cldet", 0);
+		if (ocp_pin_p2 == 0) {
+			WARNING("ocp p2 gpio fail\n");
+		} else {
+			ocp_p2_irq_number = gpio_to_irq(ocp_pin_p2);
+			WARNING("usb ocp p2:%u, p2_irq_num:%u\n", ocp_pin_p2, ocp_p2_irq_number);
+		}
+	}
+
+	pdev_node = of_find_device_by_node(node);
+	if (!pdev_node) {
+		WARNING("ERROR: Cannot find usb1 pdev!\n");
+		return;
+	}
+	musbfsh_pinctrl = devm_pinctrl_get(&pdev_node->dev);
+	if (IS_ERR(musbfsh_pinctrl))
+		WARNING("ERROR: Cannot find musbfsh_pinctrl!\n");
+
+	musbfsh_set_pin_state("usb1_p1_high", musbfsh_pinctrl);
+	musbfsh_set_pin_state("usb1_p2_high", musbfsh_pinctrl);
+
+	musbfsh_set_pin_state("usb1_p1_cldet_init", musbfsh_pinctrl);
+	musbfsh_set_pin_state("usb1_p2_cldet_init", musbfsh_pinctrl);
+
+	/*register overcurrent gpio interrupt */
+	if (request_irq(ocp_p1_irq_number, musbfsh_p1_cldet_int, IRQF_TRIGGER_LOW, "usb1_ocp_p1", musbfsh)) {
+		dev_err(musbfsh->controller, "musbfsh::request_irq OCP P1 %u failed!\n", ocp_p1_irq_number);
+	}
+
+	if (request_irq(ocp_p2_irq_number, musbfsh_p2_cldet_int, IRQF_TRIGGER_LOW, "usb1_ocp_p2", musbfsh)) {
+		dev_err(musbfsh->controller, "musbfsh::request_irq OCP P1 %u failed!\n", ocp_p1_irq_number);
+	}
+}
+#else
+void	musbfsh_init_enable_pin(struct musbfsh *musbfsh)
+{
+	struct device_node *node;
+	struct platform_device *pdev_node;
+
+	node = of_find_compatible_node(NULL, NULL, "mediatek,mt8167-usb11");
+	if (node == NULL) {
+		WARNING("USB PORT1 - get node failed\n");
+	} else {
+		pdev_node = of_find_device_by_node(node);
+		if (!pdev_node) {
+			WARNING("ERROR: Cannot find usb1 pdev!\n");
+			return;
+		}
+		musbfsh_pinctrl = devm_pinctrl_get(&pdev_node->dev);
+		if (IS_ERR(musbfsh_pinctrl))
+			WARNING("ERROR: Cannot find musbfsh_pinctrl!\n");
+		else {
+			WARNING("will enable port if need!\n");
+			musbfsh_set_pin_state("usb1_p1_high", musbfsh_pinctrl);
+			musbfsh_set_pin_state("usb1_p2_high", musbfsh_pinctrl);
+		}
+	}
+}
+#endif
+
+/*
+ * Perform generic per-controller initialization.
+ *
+ * @pDevice: the controller (already clocked, etc)
+ * @nIrq: irq
+ * @mregs: virtual address of controller registers,
+ *	not yet corrected for platform-specific offsets
+ */
+#ifdef CONFIG_OF
+static int
+musbfsh_init_controller(struct device *dev, int nIrq, void __iomem *ctrl, void __iomem *ctrlp)
+#else
+static int musbfsh_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
+#endif
+{
+	int status;
+	struct musbfsh *musbfsh;
+	struct musbfsh_hdrc_platform_data *plat = dev->platform_data;
+	struct usb_hcd *hcd;
+
+	INFO("++\n");
+	/* The driver might handle more features than the board; OK.
+	 * Fail when the board needs a feature that's not enabled.
+	 */
+	INFO("[Flow][USB11]%s:%d,pbase= 0x%lx\n", __func__, __LINE__,
+	       (unsigned long)ctrlp);
+	if (!plat) {
+		dev_dbg(dev, "no platform_data?\n");
+		status = -ENODEV;
+		goto fail0;
+	}
+
+	/* allocate */
+	musbfsh = allocate_instance(dev, plat->config, ctrl);
+	if (!musbfsh) {
+		status = -ENOMEM;
+		goto fail0;
+	}
+
+	spin_lock_init(&musbfsh->lock);
+	musbfsh->board_mode = plat->mode;
+	musbfsh->board_set_power = plat->set_power;
+	musbfsh->ops = plat->platform_ops;
+
+	musbfsh->config->fifo_cfg = epx_cfg;
+	musbfsh->config->fifo_cfg_size = sizeof(epx_cfg) / sizeof(struct musbfsh_fifo_cfg);
+	/* The musbfsh_platform_init() call:
+	 *   - adjusts musbfsh->mregs and musbfsh->isr if needed,
+	 *   - may initialize an integrated tranceiver
+	 *   - initializes musbfsh->xceiv, usually by otg_get_transceiver()
+	 *   - activates clocks.
+	 *   - stops powering VBUS
+	 *   - assigns musbfsh->board_set_vbus if host mode is enabled
+	 *
+	 * There are various transceiver configurations.  Blackfin,
+	 * DaVinci, TUSB60x0, and others integrate them.  OMAP3 uses
+	 * external/discrete ones in various flavors (twl4030 family,
+	 * isp1504, non-OTG, etc) mostly hooking up through ULPI.
+	 */
+	musbfsh_Device = musbfsh;
+#ifdef CONFIG_OF
+	INFO("[Flow][USB11]%s:%d  unsigned longbase == 0x%lx ,musbfsh_Device->phy_reg_base = 0x%lx\n",
+	     __func__, __LINE__, (unsigned long)ctrlp,
+	     (unsigned long)(musbfsh_Device->phy_reg_base));
+
+	musbfsh_Device->phy_reg_base = ctrlp;
+
+#endif
+	musbfsh->isr = generic_interrupt;
+	INFO("[Flow][USB11]%s:%d  unsigned longbase == 0x%lx ,musbfsh_Device->phy_reg_base = 0x%lx\n",
+	     __func__, __LINE__, (unsigned long)ctrlp,
+	     (unsigned long)(musbfsh_Device->phy_reg_base));
+	status = musbfsh_platform_init(musbfsh);
+	INFO("[Flow][USB11]%s:%d\n", __func__, __LINE__);
+	if (status < 0) {
+		ERR("musbfsh_platform_init fail!status=%d", status);
+		goto fail1;
+	}
+	INFO("[Flow][USB11]%s:%d\n", __func__, __LINE__);
+	if (!musbfsh->isr) {
+		status = -ENODEV;
+		goto fail2;
+	}
+#ifndef CONFIG_MUSBFSH_PIO_ONLY
+	INFO("DMA mode\n");
+	INFO("[Flow][USB11]%s:%d  DMA Mode\n", __func__, __LINE__);
+	if (use_dma && dev->dma_mask) {
+		struct dma_controller *c;
+
+		c = musbfsh_dma_controller_create(musbfsh, musbfsh->mregs);	/*only software config*/
+		musbfsh->dma_controller = c;
+		if (c)
+			(void)c->start(c);	/*do nothing in fact*/
+	}
+#else
+	INFO("PIO mode\n");
+	INFO("[Flow][USB11]%s:%d	PIO Mode\n", __func__, __LINE__);
+#endif
+
+	/* ideally this would be abstracted in platform setup */
+	if (!is_dma_capable() || !musbfsh->dma_controller)
+		dev->dma_mask = NULL;
+
+	/* be sure interrupts are disabled before connecting ISR */
+	musbfsh_platform_disable(musbfsh);	/*wz,need implement in MT65xx, but not power off!*/
+	musbfsh_generic_disable(musbfsh);	/*must power on the USB module*/
+
+	/* setup musb parts of the core (especially endpoints) */
+	status = musbfsh_core_init(musbfsh);
+	if (status < 0) {
+		ERR("musbfsh_core_init fail!");
+		goto fail2;
+	}
+
+	#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+	musbfsh_qmu_init(musbfsh);
+	#endif
+
+	/* attach to the IRQ */
+	INFO("[Flow][USB11]%s:%d ,request_irq %d\n", __func__, __LINE__, nIrq);
+	/*wx? usb_add_hcd will also try do request_irq, if hcd_driver.irq is set*/
+	if (request_irq(nIrq, musbfsh->isr, IRQF_TRIGGER_LOW, dev_name(dev), musbfsh)) {
+		dev_err(dev, "musbfsh::request_irq %d failed!\n", nIrq);
+		status = -ENODEV;
+		goto fail3;
+	}
+	musbfsh->nIrq = nIrq;	/*update the musbfsh->nIrq after request_irq !*/
+	/* FIXME this handles wakeup irqs wrong */
+	if (enable_irq_wake(nIrq) == 0) {	/*wx, need to be replaced by modifying kernel/core/mt6573_ost.c*/
+		musbfsh->irq_wake = 1;
+		device_init_wakeup(dev, 1);	/*wx? usb_add_hcd will do this any way*/
+	} else {
+		musbfsh->irq_wake = 0;
+	}
+
+	/* host side needs more setup */
+	hcd = musbfsh_to_hcd(musbfsh);
+	/*plat->power can be set to 0,so the power is set to 500ma .*/
+	hcd->power_budget = 2 * (plat->power ? plat->power : 250);
+
+	/* For the host-only role, we can activate right away.
+	 * (We expect the ID pin to be forcibly grounded!!)
+	 * Otherwise, wait till the gadget driver hooks up.
+	 */
+	status = usb_add_hcd(musbfsh_to_hcd(musbfsh), -1, 0);	/*mportant!!*/
+	hcd->self.uses_pio_for_control = 1;
+
+	if (status < 0) {
+		ERR("usb_add_hcd fail!");
+		goto fail3;
+	}
+	status = musbfsh_init_debugfs(musbfsh);
+
+	if (status < 0) {
+		ERR("usb_add_debugfs fail!");
+		goto fail4;
+	}
+	dev_info(dev, "USB controller at %p using %s, IRQ %d\n",
+		 ctrl, (is_dma_capable() && musbfsh->dma_controller)
+		 ? "DMA" : "PIO", musbfsh->nIrq);
+
+	return 0;
+
+
+fail4:
+	musbfsh_exit_debugfs(musbfsh);
+
+fail3:
+	#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+	musbfsh_qmu_exit(musbfsh);
+	#endif
+
+fail2:
+	if (musbfsh->irq_wake)
+		device_init_wakeup(dev, 0);
+	musbfsh_platform_exit(musbfsh);
+
+fail1:
+	dev_err(musbfsh->controller, "musbfsh_init_controller failed with status %d\n", status);
+	musbfsh_free(musbfsh);
+
+fail0:
+
+	return status;
+
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
+ * bridge to a platform device; this driver then suffices.
+ */
+
+#ifndef CONFIG_MUSBFSH_PIO_ONLY
+static u64 *orig_dma_mask;
+#endif
+
+static int __init musbfsh_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node;
+
+	int irq = MT_USB1_IRQ_ID;
+	int status;
+	unsigned char __iomem *base = (unsigned char __iomem *)USB11_BASE;
+#ifdef CONFIG_OF
+	void __iomem *pbase;
+	unsigned long usb_mac_base;
+	unsigned long usb_phy11_base;
+
+	INFO("[Flow][USB11]%s:%d,CONFIG_OF\n", __func__, __LINE__);
+	node = of_find_compatible_node(NULL, NULL, "mediatek,mt8167-usb11");
+	if (node == NULL)
+		INFO("[Flow][USB11] get node failed\n");
+	base = of_iomap(node, 0);
+	usb1_irq_number = irq_of_parse_and_map(node, 0);
+	pbase = of_iomap(node, 1);
+
+	usb_mac_base = (unsigned long)base;
+	usb_phy11_base = (unsigned long)pbase;
+	irq = usb1_irq_number;
+
+	INFO("[Flow][USB11]musb probe reg: 0x%lx ,usb_phy11_base == 0x%lx ,pbase == 0x%lx irq: %d\n",
+	     usb_mac_base, usb_phy11_base, (unsigned long)pbase, usb1_irq_number);
+
+#endif
+	INFO("++\n");
+	INFO("[Flow][USB11]%s: %d\n", __func__, __LINE__);
+
+#ifndef CONFIG_MUSBFSH_PIO_ONLY	/*using DMA*/
+	/* clobbered by use_dma=n */
+	orig_dma_mask = dev->dma_mask;
+#endif
+
+#ifdef CONFIG_OF
+	status = musbfsh_init_controller(dev, irq, base, (void __iomem *)pbase);
+#else
+	INFO("[Flow][USB11]%s:%d, base == %p\n", __func__, __LINE__, USB_BASE);
+	base = (void *)USB_BASE;
+	status = musbfsh_init_controller(dev, irq, base);
+#endif
+
+
+	if (status < 0)
+		ERR("musbfsh_init_controller failed with status %d\n", status);
+	INFO("--\n");
+#ifdef IC_USB
+	device_create_file(dev, &dev_attr_start);
+	WARNING("IC-USB is enabled\n");
+#endif
+	INFO("[Flow][USB11]%s:%d end ere\n", __func__, __LINE__);
+
+#ifdef CONFIG_MTK_MUSBFSH_OCP_SUPPORT
+	INIT_DELAYED_WORK(&musbfsh_Device->p1_cldet_pin_work, musbfsh_p1_cldet_work);
+	INIT_DELAYED_WORK(&musbfsh_Device->p2_cldet_pin_work, musbfsh_p2_cldet_work);
+
+	musbfsh_init_ocp_pin(musbfsh_Device);
+#else
+	musbfsh_init_enable_pin(musbfsh_Device);
+#endif
+
+	return status;
+}
+
+static int __exit musbfsh_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct musbfsh *musbfsh = dev_to_musbfsh(dev);
+	void __iomem *ctrl_base = musbfsh->ctrl_base;
+
+	INFO("++\n");
+	/* this gets called on rmmod.
+	 *  - Host mode: host may still be active
+	 *  - Peripheral mode: peripheral is deactivated (or never-activated)
+	 *  - OTG mode: both roles are deactivated (or never-activated)
+	 */
+	musbfsh_shutdown(pdev);
+	if (musbfsh->board_mode == MUSBFSH_HOST)
+		usb_remove_hcd(musbfsh_to_hcd(musbfsh));
+	musbfsh_writeb(musbfsh->mregs, MUSBFSH_DEVCTL, 0);
+	musbfsh_platform_exit(musbfsh);
+	musbfsh_writeb(musbfsh->mregs, MUSBFSH_DEVCTL, 0);
+
+	musbfsh_free(musbfsh);
+	iounmap(ctrl_base);
+	device_init_wakeup(&pdev->dev, 0);
+#ifndef CONFIG_MUSBFSH_PIO_ONLY
+	dma_set_mask(dev, *dev->parent->dma_mask);
+#endif
+	return 0;
+}
+
+#ifdef	CONFIG_PM
+
+static void musbfsh_save_context(struct musbfsh *musbfsh)
+{
+	int i;
+	void __iomem *musbfsh_base = musbfsh->mregs;
+	void __iomem *epio;
+
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+	mtk11_dma_burst_setting = musbfsh_readl(musbfsh->mregs, 0x204);
+	mtk11_qmu_ioc_setting = musbfsh_readl((musbfsh->mregs + MUSBFSH_QISAR), 0x30);
+#endif
+	musbfsh->context.power = musbfsh_readb(musbfsh_base, MUSBFSH_POWER);
+	musbfsh->context.intrtxe = musbfsh_readw(musbfsh_base, MUSBFSH_INTRTXE);
+	musbfsh->context.intrrxe = musbfsh_readw(musbfsh_base, MUSBFSH_INTRRXE);
+	musbfsh->context.intrusbe = musbfsh_readb(musbfsh_base, MUSBFSH_INTRUSBE);
+	musbfsh->context.index = musbfsh_readb(musbfsh_base, MUSBFSH_INDEX);
+	musbfsh->context.devctl = musbfsh_readb(musbfsh_base, MUSBFSH_DEVCTL);
+
+	musbfsh->context.l1_int = musbfsh_readl(musbfsh_base, USB11_L1INTM);
+
+	for (i = 0; i < MUSBFSH_C_NUM_EPS - 1; ++i) {
+		struct musbfsh_hw_ep *hw_ep;
+
+		hw_ep = &musbfsh->endpoints[i];
+		if (!hw_ep)
+			continue;
+
+		epio = hw_ep->regs;
+		if (!epio)
+			continue;
+
+		musbfsh_writeb(musbfsh_base, MUSBFSH_INDEX, i);
+		musbfsh->context.index_regs[i].txmaxp = musbfsh_readw(epio, MUSBFSH_TXMAXP);
+		musbfsh->context.index_regs[i].txcsr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+		musbfsh->context.index_regs[i].rxmaxp = musbfsh_readw(epio, MUSBFSH_RXMAXP);
+		musbfsh->context.index_regs[i].rxcsr = musbfsh_readw(epio, MUSBFSH_RXCSR);
+
+		if (musbfsh->dyn_fifo) {
+			musbfsh->context.index_regs[i].txfifoadd =
+			    musbfsh_read_txfifoadd(musbfsh_base);
+			musbfsh->context.index_regs[i].rxfifoadd =
+			    musbfsh_read_rxfifoadd(musbfsh_base);
+			musbfsh->context.index_regs[i].txfifosz =
+			    musbfsh_read_txfifosz(musbfsh_base);
+			musbfsh->context.index_regs[i].rxfifosz =
+			    musbfsh_read_rxfifosz(musbfsh_base);
+		}
+	}
+}
+
+static void musbfsh_restore_context(struct musbfsh *musbfsh)
+{
+	int i;
+	void __iomem *musbfsh_base = musbfsh->mregs;
+	void __iomem *epio;
+
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+	musbfsh_writel(musbfsh->mregs, 0x204, mtk11_dma_burst_setting);
+	musbfsh_writel((musbfsh->mregs + MUSBFSH_QISAR), 0x30, mtk11_qmu_ioc_setting);
+#endif
+
+	musbfsh_writeb(musbfsh_base, MUSBFSH_POWER, musbfsh->context.power);
+	musbfsh_writew(musbfsh_base, MUSBFSH_INTRTXE, musbfsh->context.intrtxe);
+	musbfsh_writew(musbfsh_base, MUSBFSH_INTRRXE, musbfsh->context.intrrxe);
+	musbfsh_writeb(musbfsh_base, MUSBFSH_INTRUSBE, musbfsh->context.intrusbe);
+	musbfsh_writeb(musbfsh_base, MUSBFSH_DEVCTL, musbfsh->context.devctl);
+
+	for (i = 0; i < MUSBFSH_C_NUM_EPS - 1; ++i) {
+		struct musbfsh_hw_ep *hw_ep;
+
+		hw_ep = &musbfsh->endpoints[i];
+		if (!hw_ep)
+			continue;
+
+		epio = hw_ep->regs;
+		if (!epio)
+			continue;
+
+		musbfsh_writeb(musbfsh_base, MUSBFSH_INDEX, i);
+		musbfsh_writew(epio, MUSBFSH_TXMAXP, musbfsh->context.index_regs[i].txmaxp);
+		musbfsh_writew(epio, MUSBFSH_TXCSR, musbfsh->context.index_regs[i].txcsr);
+		musbfsh_writew(epio, MUSBFSH_RXMAXP, musbfsh->context.index_regs[i].rxmaxp);
+		musbfsh_writew(epio, MUSBFSH_RXCSR, musbfsh->context.index_regs[i].rxcsr);
+
+		if (musbfsh->dyn_fifo) {
+			musbfsh_write_txfifosz(musbfsh_base,
+					       musbfsh->context.index_regs[i].txfifosz);
+			musbfsh_write_rxfifosz(musbfsh_base,
+					       musbfsh->context.index_regs[i].rxfifosz);
+			musbfsh_write_txfifoadd(musbfsh_base,
+						musbfsh->context.index_regs[i].txfifoadd);
+			musbfsh_write_rxfifoadd(musbfsh_base,
+						musbfsh->context.index_regs[i].rxfifoadd);
+		}
+	}
+
+	musbfsh_writeb(musbfsh_base, MUSBFSH_INDEX, musbfsh->context.index);
+	mb();/* */
+	/* Enable all interrupts at DMA
+	 * Caution: The DMA Reg type is WRITE to SET or CLEAR
+	 */
+	musbfsh_writel(musbfsh->mregs, MUSBFSH_HSDMA_INTR,
+		       0xFF | (0xFF << MUSBFSH_DMA_INTR_UNMASK_SET_OFFSET));
+	musbfsh_writel(musbfsh_base, USB11_L1INTM, musbfsh->context.l1_int);
+}
+
+int mt_usb11_clock_prepare(void)
+{
+	int retval = 0;
+
+#if 0
+	INFO("mt_usb11_clock_prepare\n");
+	retval = clk_prepare(usbpll_clk);
+	if (retval)
+		goto exit;
+#if 0
+	retval = clk_prepare(usb_clk);
+	if (retval)
+		goto exit;
+#endif
+	retval = clk_prepare(usbmcu_clk);
+	if (retval)
+		goto exit;
+	retval = clk_prepare(icusb_clk);
+	if (retval)
+		goto exit;
+#endif
+
+	return 0;
+exit:
+	WARNING("[USB11] clock prepare fail\n");
+	return retval;
+}
+
+void mt_usb11_clock_unprepare(void)
+{
+	INFO("mt_usb11_clock_unprepare\n");
+
+#if 0
+	clk_unprepare(icusb_clk);
+	clk_unprepare(usbmcu_clk);
+#if 0
+	clk_unprepare(usb_clk);
+#endif
+	clk_unprepare(usbpll_clk);
+#endif
+}
+
+#if 0
+/* If want to trigger disconnect irq, use this func */
+static void musbfsh_disconnect(struct musbfsh *musbfsh)
+{
+	WARNING("trigger DISCONNECT!\n");
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+	musbfsh_disable_q_all(musbfsh);
+#endif
+
+	usb_hcd_resume_root_hub(musbfsh_to_hcd(musbfsh));
+	musbfsh_root_disconnect(musbfsh);
+}
+#endif
+
+static int musbfsh_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	unsigned long flags;
+	struct musbfsh *musbfsh = dev_to_musbfsh(&pdev->dev);
+
+	WARNING("++\n");
+	spin_lock_irqsave(&musbfsh->lock, flags);
+	musbfsh_save_context(musbfsh);
+	musbfsh_platform_set_power(musbfsh, 0);
+	spin_unlock_irqrestore(&musbfsh->lock, flags);
+
+	msleep(20);
+
+	spin_lock_irqsave(&musbfsh->lock, flags);
+	musbfsh_generic_disable(musbfsh);
+	spin_unlock_irqrestore(&musbfsh->lock, flags);
+
+	mt_usb11_clock_unprepare();
+	WARNING("-\n");
+	return 0;
+}
+
+static int musbfsh_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	unsigned long flags;
+	struct musbfsh *musbfsh = dev_to_musbfsh(&pdev->dev);
+	int retval = 0;
+
+	WARNING("++\n");
+	retval = mt_usb11_clock_prepare();
+	if (retval) {
+		WARNING("!!musbfsh clock prepre fail,need to check!!\n");
+		return retval;
+	}
+	spin_lock_irqsave(&musbfsh->lock, flags);
+	musbfsh_platform_set_power(musbfsh, 1);
+	musbfsh_restore_context(musbfsh);
+	spin_unlock_irqrestore(&musbfsh->lock, flags);
+	WARNING("-\n");
+	return 0;
+}
+
+static const struct dev_pm_ops musbfsh_dev_pm_ops = {
+	.suspend = musbfsh_suspend,
+	.resume = musbfsh_resume,
+};
+
+#define MUSBFSH_DEV_PM_OPS (&musbfsh_dev_pm_ops)
+#else
+#define	MUSBFSH_DEV_PM_OPS	NULL
+#endif
+
+static struct platform_driver musbfsh_driver = {
+	.driver = {
+		   .name = (char *)musbfsh_driver_name,
+		   .bus = &platform_bus_type,
+		   .of_match_table = apusb_of_ids,
+		   .owner = THIS_MODULE,
+		   .pm = MUSBFSH_DEV_PM_OPS,
+		   },
+	.probe = musbfsh_probe,
+	.remove = __exit_p(musbfsh_remove),
+	.shutdown = musbfsh_shutdown,
+};
+
+
+/*-------------------------------------------------------------------------*/
+static int __init musbfsh_init(void)
+{
+#if 1
+	if (usb_disabled())	/*based on the config variable.*/
+		return 0;
+
+	WARNING("MUSBFSH is enabled\n");
+	INFO("[Flow][USB11]%s:%d\n", __func__, __LINE__);
+
+	usb11_init();
+
+	return platform_driver_register(&musbfsh_driver);
+#else
+	return 0;
+#endif
+}
+
+/* make us init after usbcore and i2c (transceivers, regulators, etc)
+ * and before usb gadget and host-side drivers start to register
+*/
+late_initcall_sync(musbfsh_init);
+
+static void __exit musbfsh_cleanup(void)
+{
+	/*wake_lock_destroy(&musbfsh_suspend_lock);*/
+	platform_driver_unregister(&musbfsh_driver);
+	usb11_exit();
+}
+
+module_exit(musbfsh_cleanup);
+
+int musbfsh_debug;
+
+module_param(musbfsh_debug, int, 0644);
diff --git a/drivers/misc/mediatek/usb11/mt8167/musbfsh_core.h b/drivers/misc/mediatek/usb11/mt8167/musbfsh_core.h
new file mode 100644
index 0000000..9c1361b
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/mt8167/musbfsh_core.h
@@ -0,0 +1,437 @@
+/*
+ * MUSB OTG driver defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSBFSH_CORE_H__
+#define __MUSBFSH_CORE_H__
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/otg.h>
+
+struct musbfsh;
+struct musbfsh_hw_ep;
+struct musbfsh_ep;
+
+/* Helper defines for struct musbfsh->hwvers */
+#define MUSBFSH_HWVERS_MAJOR(x)	((x >> 10) & 0x1f)
+#define MUSBFSH_HWVERS_MINOR(x)	(x & 0x3ff)
+#define MUSBFSH_HWVERS_RC		0x8000
+#define MUSBFSH_HWVERS_1300	0x52C
+#define MUSBFSH_HWVERS_1400	0x590
+#define MUSBFSH_HWVERS_1800	0x720
+#define MUSBFSH_HWVERS_1900	0x784
+#define MUSBFSH_HWVERS_2000	0x800
+
+#include "musbfsh.h"
+#include "musbfsh_io.h"
+#include "musbfsh_regs.h"
+#include "musbfsh_debug.h"
+
+#include <linux/i2c.h>
+#ifndef CONFIG_OF
+#include <mach/irqs.h>
+#include <mach/eint.h>
+#include <mach/mt_reg_base.h>
+#endif
+
+#include <linux/platform_device.h>
+#if defined(CONFIG_MTK_LEGACY)
+#include <cust_gpio_usage.h>
+#endif
+
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+extern int mtk11_host_qmu_concurrent;
+extern int mtk11_host_qmu_pipe_msk;
+extern int mtk11_host_qmu_max_active_isoc_gpd;
+extern int mtk11_host_qmu_max_number_of_pkts;
+extern int mtk11_qmu_dbg_level;	/* refer to musb_core.c */
+extern int mtk11_qmu_max_gpd_num;
+/*extern struct musb_hw_ep *mtk11_qmu_isoc_ep;*/
+extern int mtk11_isoc_ep_start_idx;
+extern int mtk11_isoc_ep_gpd_count;
+#endif
+
+extern int musbfsh_host_dynamic_fifo;
+extern int musbfsh_host_dynamic_fifo_usage_msk;
+
+extern struct clk *usbpll_clk;
+extern struct clk *usbmcu_clk;
+/* extern struct clk *usb_clk; */
+extern struct clk *icusb_clk;
+
+#define MT_USB1_IRQ_ID                      (105)
+#define USB1_BASE                   0xF1270000
+
+#ifdef CONFIG_OF
+extern struct device_node *usb11_dts_np;
+#endif
+
+#define MYDBG(fmt, args...) pr_warn("MTK_ICUSB [DBG], <%s(), %d> " fmt, \
+				    __func__, __LINE__, ## args)
+
+/* NOTE:  otg and peripheral-only state machines start at B_IDLE.
+ * OTG or host-only go to A_IDLE when ID is sensed.
+ */
+#define is_host_active(m)		((m)->is_host)
+
+/****************************** HOST ROLE ***********************************/
+
+#define	is_host_capable()	(1)
+
+#define host_hc_driver_flag() (HCD_USB2 | HCD_MEMORY)
+
+extern irqreturn_t musbfsh_h_ep0_irq(struct musbfsh *);
+extern void musbfsh_host_tx(struct musbfsh *, u8);
+extern void musbfsh_host_rx(struct musbfsh *, u8);
+extern int mt_usb11_clock_prepare(void);
+extern void mt_usb11_clock_unprepare(void);
+
+/****************************** CONSTANTS ********************************/
+
+#ifndef MUSBFSH_C_NUM_EPS
+#define MUSBFSH_C_NUM_EPS ((u8)16)
+#endif
+
+#ifndef MUSBFSH_MAX_END0_PACKET
+#define MUSBFSH_MAX_END0_PACKET ((u16)MUSBFSH_EP0_FIFOSIZE)
+#endif
+
+/* host side ep0 states */
+enum musbfsh_h_ep0_state {
+	MUSBFSH_EP0_IDLE,
+	MUSBFSH_EP0_START,	/* expect ack of setup */
+	MUSBFSH_EP0_IN,	/* expect IN DATA */
+	MUSBFSH_EP0_OUT,	/* expect ack of OUT DATA */
+	MUSBFSH_EP0_STATUS,	/* expect ack of STATUS */
+} __packed;
+
+/*************************** REGISTER ACCESS ********************************/
+
+/* "indexed" mapping: INDEX register controls register bank select */
+#define musbfsh_ep_select(_mbase, _epnum) \
+	musbfsh_writeb((_mbase), MUSBFSH_INDEX, (_epnum))
+#define	MUSBFSH_EP_OFFSET			MUSBFSH_INDEXED_OFFSET
+
+/****************************** FUNCTIONS ********************************/
+
+#define test_devctl_hst_mode(_x) \
+	(musbfsh_readb((_x)->mregs, MUSBFSH_DEVCTL)&MUSBFSH_DEVCTL_HM)
+
+/******************************** TYPES *************************************/
+struct dma_channel;
+
+/**
+ * struct musb_platform_ops - Operations passed to musb_core by HW glue layer
+ * @init:	turns on clocks, sets up platform-specific registers, etc
+ * @exit:	undoes @init
+ * @set_mode:	forcefully changes operating mode
+ * @try_ilde:	tries to idle the IP
+ * @vbus_status: returns vbus status if possible
+ * @set_vbus:	forces vbus status
+ * @adjust_channel_params: pre check for standard dma channel_program func
+ */
+struct musbfsh_platform_ops {
+	int (*init)(struct musbfsh *musbfsh);
+	int (*exit)(struct musbfsh *musbfsh);
+
+	void (*enable)(struct musbfsh *musbfsh);
+	void (*disable)(struct musbfsh *musbfsh);
+
+	int (*set_mode)(struct musbfsh *musbfsh, u8 mode);
+	void (*try_idle)(struct musbfsh *musbfsh, unsigned long timeout);
+
+	int (*vbus_status)(struct musbfsh *musbfsh);
+	void (*set_vbus)(struct musbfsh *musbfsh, int on);
+	void (*set_power)(struct musbfsh *musbfsh, int action);
+
+	int (*adjust_channel_params)(struct dma_channel *channel,
+	u16 packet_sz, u8 *mode, dma_addr_t *dma_addr, u32 *len);
+};
+
+/*
+ * struct musbfsh_hw_ep - endpoint hardware (bidirectional)
+ *
+ * Ordered slightly for better cacheline locality.
+ */
+struct musbfsh_hw_ep {
+	struct musbfsh *musbfsh;
+	void __iomem *fifo;
+	void __iomem *regs;
+	/* index in musbfsh->endpoints[]  */
+	u8 epnum;
+	/* hardware configuration, possibly dynamic */
+	bool is_shared_fifo;
+	bool tx_double_buffered;
+	bool rx_double_buffered;
+	u16 max_packet_sz_tx;
+	u16 max_packet_sz_rx;
+	struct dma_channel *tx_channel;
+	struct dma_channel *rx_channel;
+	void __iomem *target_regs;
+	/* currently scheduled peripheral endpoint */
+	struct musbfsh_qh *in_qh;
+	struct musbfsh_qh *out_qh;
+	u8 rx_reinit;
+	u8 tx_reinit;
+	u8 type;
+};
+
+
+struct musbfsh_csr_regs {
+	/* FIFO registers */
+	u16 txmaxp, txcsr, rxmaxp, rxcsr;
+	u16 rxfifoadd, txfifoadd;
+	u8 txtype, txinterval, rxtype, rxinterval;
+	u8 rxfifosz, txfifosz;
+	u8 txfunaddr, txhubaddr, txhubport;
+	u8 rxfunaddr, rxhubaddr, rxhubport;
+};
+
+struct musbfsh_context_registers {
+	u8 power;
+	u16 intrtxe, intrrxe;
+	u8 intrusbe;
+	u16 frame;
+	u8 index, testmode;
+
+	u8 devctl, busctl, misc;
+	u32 otg_interfsel;
+	u32 l1_int;
+
+	struct musbfsh_csr_regs index_regs[MUSBFSH_C_NUM_EPS];
+};
+
+/*
+ * struct musb - Driver instance data.
+ */
+struct musbfsh {
+	/* device lock */
+	spinlock_t lock;
+	struct musbfsh_context_registers context;
+	const struct musbfsh_platform_ops *ops;
+	irqreturn_t (*isr)(int, void *);
+
+	/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
+#define MUSBFSH_PORT_STAT_RESUME	(1 << 31)
+
+	u32 port1_status;
+
+	unsigned long rh_timer;
+	enum musbfsh_h_ep0_state ep0_stage;
+
+	/* bulk traffic normally dedicates endpoint hardware, and each
+	* direction has its own ring of host side endpoints.
+	* we try to progress the transfer at the head of each endpoint's
+	* queue until it completes or NAKs too much; then we try the next
+	* endpoint.
+	*/
+	struct musbfsh_hw_ep *bulk_ep;
+
+	struct list_head control;	/* of musbfsh_qh */
+	struct list_head in_bulk;	/* of musbfsh_qh */
+	struct list_head out_bulk;	/* of musbfsh_qh */
+
+	struct dma_controller *dma_controller;
+	struct musbfsh_dma_controller *musbfsh_dma_controller;
+
+	struct device *controller;
+	void __iomem *ctrl_base;
+	void __iomem *mregs;
+	void __iomem *phy_reg_base;
+
+
+	/* passed down from chip/board specific irq handlers */
+	u8 int_usb;
+	u16 int_rx;
+	u16 int_tx;
+	u8 int_dma;
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+	u32 int_queue;
+#endif
+	int nIrq;
+	unsigned irq_wake:1;
+
+	struct musbfsh_hw_ep endpoints[MUSBFSH_C_NUM_EPS];
+#define control_ep		endpoints
+
+#define VBUSERR_RETRY_COUNT	3
+	u16 vbuserr_retry;
+	u16 epmask;
+	u8 nr_endpoints;
+
+	u8 board_mode;	/* enum musbfsh_mode */
+	int (*board_set_power)(int state);
+	bool is_host;
+
+	unsigned is_multipoint:1;
+
+	unsigned long idle_timeout;	/* Next timeout in jiffies */
+
+	/* active means connected and not suspended */
+	unsigned is_active:1;
+	unsigned ignore_disconnect:1;	/* during bus resets */
+
+	unsigned hb_iso_rx:1;	/* high bandwidth iso rx? */
+	unsigned hb_iso_tx:1;	/* high bandwidth iso tx? */
+	unsigned dyn_fifo:1;	/* dynamic FIFO supported? */
+
+	unsigned bulk_split:1;
+#define	can_bulk_split(musb, type) \
+	(((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
+
+	unsigned bulk_combine:1;
+#define	can_bulk_combine(musb, type) \
+	(((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
+
+	/*
+	* FIXME: Remove this flag.
+	*
+	* This is only added to allow Blackfin to work
+	* with current driver. For some unknown reason
+	* Blackfin doesn't work with double buffering
+	* and that's enabled by default.
+	*
+	* We added this flag to forcefully disable double
+	* buffering until we get it working.
+	*/
+	unsigned double_buffer_not_ok:1;
+
+	struct musbfsh_hdrc_config *config;
+
+#ifdef CONFIG_MTK_MUSBFSH_OCP_SUPPORT
+	struct delayed_work p1_cldet_pin_work;
+	struct delayed_work p2_cldet_pin_work;
+#endif
+};
+
+static inline void musbfsh_configure_ep0(struct musbfsh *musbfsh)
+{
+	  musbfsh->endpoints[0].max_packet_sz_tx = MUSBFSH_EP0_FIFOSIZE;
+	  musbfsh->endpoints[0].max_packet_sz_rx = MUSBFSH_EP0_FIFOSIZE;
+	  musbfsh->endpoints[0].is_shared_fifo = true;
+}
+
+/***************************** Glue it together *****************************/
+#ifndef CONFIG_MUSBFSH_PIO_ONLY
+extern irqreturn_t musbfsh_dma_controller_irq(int irq, void *private_data);
+#endif
+
+extern int musbfsh_init_debugfs(struct musbfsh *musb);
+extern void musbfsh_exit_debugfs(struct musbfsh *musb);
+
+extern const char musbfsh_driver_name[];
+
+extern void musbfsh_start(struct musbfsh *musbfsh);
+extern void musbfsh_stop(struct musbfsh *musbfsh);
+extern int musbfsh_get_id(struct device *dev, gfp_t gfp_mask);
+extern void musbfsh_put_id(struct device *dev, int id);
+
+extern void musbfsh_write_fifo(struct musbfsh_hw_ep *ep, u16 len, const u8 *src);
+extern void musbfsh_read_fifo(struct musbfsh_hw_ep *ep, u16 len, u8 *dst);
+
+extern void musbfsh_load_testpacket(struct musbfsh *);
+
+extern irqreturn_t musbfsh_interrupt(struct musbfsh *);
+
+static inline void musbfsh_platform_set_power(struct musbfsh *musbfsh, int action)
+{
+	if (musbfsh->ops->set_power)
+		musbfsh->ops->set_power(musbfsh, action);
+}
+
+static inline void musbfsh_platform_set_vbus(struct musbfsh *musbfsh, int is_on)
+{
+	if (musbfsh->ops->set_vbus)
+		musbfsh->ops->set_vbus(musbfsh, is_on);
+}
+
+/* to conform original api interface */
+static inline void musbfsh_set_vbus(struct musbfsh *musbfsh, int is_on)
+{
+musbfsh_platform_set_vbus(musbfsh, is_on);
+}
+
+static inline void musbfsh_platform_enable(struct musbfsh *musbfsh)
+{
+	if (musbfsh->ops->enable)
+		musbfsh->ops->enable(musbfsh);
+}
+
+static inline void musbfsh_platform_disable(struct musbfsh *musbfsh)
+{
+	if (musbfsh->ops->disable)
+		musbfsh->ops->disable(musbfsh);
+}
+
+static inline int musbfsh_platform_set_mode(struct musbfsh *musbfsh, u8 mode)
+{
+	if (!musbfsh->ops->set_mode)
+		return 0;
+
+	return musbfsh->ops->set_mode(musbfsh, mode);
+}
+
+static inline void musbfsh_platform_try_idle(struct musbfsh *musbfsh, unsigned long timeout)
+{
+	if (musbfsh->ops->try_idle)
+		musbfsh->ops->try_idle(musbfsh, timeout);
+}
+
+static inline int musbfsh_platform_get_vbus_status(struct musbfsh *musbfsh)
+{
+	if (!musbfsh->ops->vbus_status)
+		return 0;
+
+	return musbfsh->ops->vbus_status(musbfsh);
+}
+
+static inline int musbfsh_platform_init(struct musbfsh *musbfsh)
+{
+	if (!musbfsh->ops->init)
+		return -EINVAL;
+
+	return musbfsh->ops->init(musbfsh);
+}
+
+static inline int musbfsh_platform_exit(struct musbfsh *musbfsh)
+{
+	if (!musbfsh->ops->exit)
+		return -EINVAL;
+
+	return musbfsh->ops->exit(musbfsh);
+}
+
+#endif				/* __MUSBFSH_CORE_H__ */
diff --git a/drivers/misc/mediatek/usb11/mt8167/musbfsh_debugfs.c b/drivers/misc/mediatek/usb11/mt8167/musbfsh_debugfs.c
new file mode 100644
index 0000000..ca777c3
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/mt8167/musbfsh_debugfs.c
@@ -0,0 +1,593 @@
+/*
+ * MUSB OTG driver debugfs support
+ *
+ * Copyright 2010 Nokia Corporation
+ * Contact: Felipe Balbi <felipe.balbi@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+
+#include <linux/uaccess.h>
+
+#include "musbfsh_mt65xx.h"
+#include "musbfsh_core.h"
+#include <linux/usb/ch9.h>
+
+#define MUSBFSH_OTG_CSR0 0x102
+
+static struct dentry *musbfsh_debugfs_root;
+
+struct musbfsh_register_map {
+	char *name;
+	unsigned offset;
+	unsigned size;
+};
+
+static const struct musbfsh_register_map musbfsh_regmap[] = {
+	{"FAddr", 0x00, 8},
+	{"Power", 0x01, 8},
+	{"Frame", 0x0c, 16},
+	{"Index", 0x0e, 8},
+	{"Testmode", 0x0f, 8},
+	{"TxMaxPp", 0x10, 16},
+	{"TxCSRp", 0x12, 16},
+	{"RxMaxPp", 0x14, 16},
+	{"RxCSR", 0x16, 16},
+	{"RxCount", 0x18, 16},
+	{"ConfigData", 0x1f, 8},
+	{"DevCtl", 0x60, 8},
+	{"MISC", 0x61, 8},
+	{"TxFIFOsz", 0x62, 8},
+	{"RxFIFOsz", 0x63, 8},
+	{"TxFIFOadd", 0x64, 16},
+	{"RxFIFOadd", 0x66, 16},
+	{"VControl", 0x68, 32},
+	{"HWVers", 0x6C, 16},
+	{"EPInfo", 0x78, 8},
+	{"RAMInfo", 0x79, 8},
+	{"LinkInfo", 0x7A, 8},
+	{"VPLen", 0x7B, 8},
+	{"HS_EOF1", 0x7C, 8},
+	{"FS_EOF1", 0x7D, 8},
+	{"LS_EOF1", 0x7E, 8},
+	{"SOFT_RST", 0x7F, 8},
+	{"DMA_CNTLch0", 0x204, 16},
+	{"DMA_ADDRch0", 0x208, 32},
+	{"DMA_COUNTch0", 0x20C, 32},
+	{"DMA_CNTLch1", 0x214, 16},
+	{"DMA_ADDRch1", 0x218, 32},
+	{"DMA_COUNTch1", 0x21C, 32},
+	{"DMA_CNTLch2", 0x224, 16},
+	{"DMA_ADDRch2", 0x228, 32},
+	{"DMA_COUNTch2", 0x22C, 32},
+	{"DMA_CNTLch3", 0x234, 16},
+	{"DMA_ADDRch3", 0x238, 32},
+	{"DMA_COUNTch3", 0x23C, 32},
+	{"DMA_CNTLch4", 0x244, 16},
+	{"DMA_ADDRch4", 0x248, 32},
+	{"DMA_COUNTch4", 0x24C, 32},
+	{"DMA_CNTLch5", 0x254, 16},
+	{"DMA_ADDRch5", 0x258, 32},
+	{"DMA_COUNTch5", 0x25C, 32},
+	{"DMA_CNTLch6", 0x264, 16},
+	{"DMA_ADDRch6", 0x268, 32},
+	{"DMA_COUNTch6", 0x26C, 32},
+	{"DMA_CNTLch7", 0x274, 16},
+	{"DMA_ADDRch7", 0x278, 32},
+	{"DMA_COUNTch7", 0x27C, 32},
+	{}			/* Terminating Entry */
+};
+
+
+
+static int musbfsh_regdump_show(struct seq_file *s, void *unused)
+{
+	struct musbfsh *musbfsh = s->private;
+	unsigned i;
+
+	seq_puts(s, "MUSB (M)HDRC Register Dump\n");
+
+	for (i = 0; i < ARRAY_SIZE(musbfsh_regmap); i++) {
+		switch (musbfsh_regmap[i].size) {
+		case 8:
+			seq_printf(s, "%-12s: %02x\n", musbfsh_regmap[i].name,
+			musbfsh_readb(musbfsh->mregs, musbfsh_regmap[i].offset));
+			break;
+		case 16:
+			seq_printf(s, "%-12s: %04x\n", musbfsh_regmap[i].name,
+			musbfsh_readw(musbfsh->mregs, musbfsh_regmap[i].offset));
+			break;
+		case 32:
+			seq_printf(s, "%-12s: %08x\n", musbfsh_regmap[i].name,
+			musbfsh_readl(musbfsh->mregs, musbfsh_regmap[i].offset));
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int musbfsh_regdump_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, musbfsh_regdump_show, inode->i_private);
+}
+
+
+
+static int musbfsh_test_mode_show(struct seq_file *s, void *unused)
+{
+	struct musbfsh *musbfsh = s->private;
+	unsigned test;
+
+	test = musbfsh_readb(musbfsh->mregs, MUSBFSH_TESTMODE);
+
+	if (test & MUSBFSH_TEST_FORCE_HOST)
+		seq_puts(s, "force host\n");
+
+	if (test & MUSBFSH_TEST_FIFO_ACCESS)
+		seq_puts(s, "fifo access\n");
+
+	if (test & MUSBFSH_TEST_FORCE_FS)
+		seq_puts(s, "force full-speed\n");
+
+	if (test & MUSBFSH_TEST_FORCE_HS)
+		seq_puts(s, "force high-speed\n");
+
+	if (test & MUSBFSH_TEST_PACKET)
+		seq_puts(s, "test packet\n");
+
+	if (test & MUSBFSH_TEST_K)
+		seq_puts(s, "test K\n");
+
+	if (test & MUSBFSH_TEST_J)
+		seq_puts(s, "test J\n");
+
+	if (test & MUSBFSH_TEST_SE0_NAK)
+		seq_puts(s, "test SE0 NAK\n");
+
+	return 0;
+}
+
+static const struct file_operations musbfsh_regdump_fops = {
+	.open = musbfsh_regdump_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+
+static int musbfsh_test_mode_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, musbfsh_test_mode_show, inode->i_private);
+}
+
+void musbfshdebugfs_otg_write_fifo(u16 len, u8 *buf, struct musbfsh *mtk_musb)
+{
+	int i;
+
+	INFO("musb_otg_write_fifo,len=%d\n", len);
+	for (i = 0; i < len; i++)
+		musbfsh_writeb(mtk_musb->mregs, 0x20, *(buf + i));
+}
+
+void musbfshdebugfs_h_setup(struct usb_ctrlrequest *setup, struct musbfsh *mtk_musb)
+{
+	unsigned short csr0;
+
+	INFO("musbfsh_h_setup++\n");
+	musbfshdebugfs_otg_write_fifo(sizeof(struct usb_ctrlrequest), (u8 *) setup, mtk_musb);
+	csr0 = musbfsh_readw(mtk_musb->mregs, MUSBFSH_OTG_CSR0);
+	INFO("musbfsh_h_setup,csr0=0x%x\n", csr0);
+	csr0 |= MUSBFSH_CSR0_H_SETUPPKT | MUSBFSH_CSR0_TXPKTRDY;
+	musbfsh_writew(mtk_musb->mregs, MUSBFSH_OTG_CSR0, csr0);
+
+	INFO("musbfsh_h_setup--\n");
+}
+
+static ssize_t musbfsh_test_mode_write(struct file *file,
+				       const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct seq_file *s = file->private_data;
+	struct musbfsh *musbfsh = s->private;
+	u8 test = 0;
+	char buf[20];
+	unsigned char power;
+	struct usb_ctrlrequest setup_packet;
+
+	setup_packet.bRequestType = USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE;
+	setup_packet.bRequest = USB_REQ_GET_DESCRIPTOR;
+	setup_packet.wIndex = 0;
+	setup_packet.wValue = 0x0100;
+	setup_packet.wLength = 0x40;
+
+	memset(buf, 0x00, sizeof(buf));
+
+	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+		return -EFAULT;
+
+	if (!strncmp(buf, "force host", 9))
+		test = MUSBFSH_TEST_FORCE_HOST;
+
+	if (!strncmp(buf, "fifo access", 11))
+		test = MUSBFSH_TEST_FIFO_ACCESS;
+
+	if (!strncmp(buf, "force full-speed", 15))
+		test = MUSBFSH_TEST_FORCE_FS;
+
+	if (!strncmp(buf, "force high-speed", 15))
+		test = MUSBFSH_TEST_FORCE_HS;
+
+	if (!strncmp(buf, "test packet", 10)) {
+		test = MUSBFSH_TEST_PACKET;
+		musbfsh_load_testpacket(musbfsh);
+	}
+
+	if (!strncmp(buf, "test suspend_resume", 18)) {
+		INFO("HS_HOST_PORT_SUSPEND_RESUME\n");
+		msleep(5000);	/*the host must continue sending SOFs for 15s*/
+		INFO("please begin to trigger suspend!\n");
+		msleep(10000);
+		power = musbfsh_readb(musbfsh->mregs, MUSBFSH_POWER);
+		power |= MUSBFSH_POWER_SUSPENDM | MUSBFSH_POWER_ENSUSPEND;
+		musbfsh_writeb(musbfsh->mregs, MUSBFSH_POWER, power);
+		msleep(5000);
+		INFO("please begin to trigger resume!\n");
+		msleep(10000);
+		power &= ~MUSBFSH_POWER_SUSPENDM;
+		power |= MUSBFSH_POWER_RESUME;
+		musbfsh_writeb(musbfsh->mregs, MUSBFSH_POWER, power);
+		mdelay(25);
+		power &= ~MUSBFSH_POWER_RESUME;
+		musbfsh_writeb(musbfsh->mregs, MUSBFSH_POWER, power);
+		/*SOF continue*/
+		musbfshdebugfs_h_setup(&setup_packet, musbfsh);
+		return count;
+	}
+
+	if (!strncmp(buf, "test get_descripter", 18)) {
+		INFO("SINGLE_STEP_GET_DEVICE_DESCRIPTOR\n");
+/*the host issues SOFs for 15s allowing the test engineer to raise the scope trigger just above the SOF voltage level.*/
+		msleep(15000);
+		musbfshdebugfs_h_setup(&setup_packet, musbfsh);
+		return count;
+	}
+
+
+	if (!strncmp(buf, "test K", 6))
+		test = MUSBFSH_TEST_K;
+
+	if (!strncmp(buf, "test J", 6))
+		test = MUSBFSH_TEST_J;
+
+	if (!strncmp(buf, "test SE0 NAK", 12))
+		test = MUSBFSH_TEST_SE0_NAK;
+
+	musbfsh_writeb(musbfsh->mregs, MUSBFSH_TESTMODE, test);
+
+	return count;
+}
+
+static const struct file_operations musbfsh_test_mode_fops = {
+	  .open = musbfsh_test_mode_open,
+	  .write = musbfsh_test_mode_write,
+	  .read = seq_read,
+	  .llseek = seq_lseek,
+	  .release = single_release,
+};
+
+static inline int my_isspace(char c)
+{
+	return (c == ' ' || c == '\t' || c == '\n' || c == '\12');
+}
+
+static inline int my_isupper(char c)
+{
+	return (c >= 'A' && c <= 'Z');
+}
+
+static inline int my_isalpha(char c)
+{
+	return ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'));
+}
+
+static inline int my_isdigit(char c)
+{
+	return (c >= '0' && c <= '9');
+}
+
+static unsigned my_strtoul(const char *nptr, char **endptr, unsigned int base)
+{
+	const char *s = nptr;
+	unsigned long acc;
+	int c;
+	unsigned long cutoff;
+	int neg = 0, any, cutlim;
+
+	do {
+		c = *s++;
+	} while (my_isspace(c));
+	if (c == '-') {
+		neg = 1;
+		c = *s++;
+	} else if (c == '+')
+		c = *s++;
+
+	if ((base == 0 || base == 16) &&
+		c == '0' && (*s == 'x' || *s == 'X')) {
+		c = s[1];
+		s += 2;
+		base = 16;
+	} else if ((base == 0 || base == 2) &&
+			c == '0' && (*s == 'b' || *s == 'B')) {
+		c = s[1];
+		s += 2;
+		base = 2;
+	}
+	if (base == 0)
+		base = c == '0' ? 8 : 10;
+
+	cutoff = (unsigned long)ULONG_MAX / (unsigned long)base;
+	cutlim = (unsigned long)ULONG_MAX % (unsigned long)base;
+
+	for (acc = 0, any = 0;; c = *s++) {
+		if (my_isdigit(c))
+			c -= '0';
+		else if (my_isalpha(c))
+			c -= my_isupper(c) ? 'A' - 10 : 'a' - 10;
+		else
+			break;
+
+		if (c >= base)
+			break;
+		if ((any < 0 || acc > cutoff || acc == cutoff) && c > cutlim)
+			any = -1;
+		else {
+			any = 1;
+			acc *= base;
+			acc += c;
+		}
+	}
+	if (any < 0)
+		acc = ULONG_MAX;
+	else if (neg)
+		acc = -acc;
+
+	if (endptr != 0)
+		*endptr = (char *)(any ? s - 1 : nptr);
+
+	return acc;
+}
+
+static int musbfsh_regw_show(struct seq_file *s, void *unused)
+{
+	INFO("%s -> Called\n", __func__);
+
+	pr_warn("Uage:\n");
+	pr_warn("Mac Write: echo mac:addr:data > regw\n");
+	pr_warn("Phy Write: echo phy:addr:data > regw\n");
+
+	return 0;
+}
+
+static int musbfsh_regw_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, musbfsh_regw_show, inode->i_private);
+}
+
+static ssize_t musbfsh_regw_mode_write(struct file *file,
+				    const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct seq_file *s = file->private_data;
+	struct musbfsh *musbfsh = s->private;
+	char			buf[20];
+	u8 is_mac = 0;
+	char *tmp1 = NULL;
+	char *tmp2 = NULL;
+	unsigned offset = 0;
+	u8 data = 0;
+
+	memset(buf, 0x00, sizeof(buf));
+
+	pr_warn("%s -> Called\n", __func__);
+
+	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+		return -EFAULT;
+
+	if ((!strncmp(buf, "MAC", 3)) || (!strncmp(buf, "mac", 3)))
+		is_mac = 1;
+	else if ((!strncmp(buf, "PHY", 3)) || (!strncmp(buf, "phy", 3)))
+		is_mac = 0;
+	else
+		return -EFAULT;
+
+	tmp1 = strchr(buf, ':');
+	if (tmp1 == NULL)
+		return -EFAULT;
+	tmp1++;
+	if (strlen(tmp1) == 0)
+		return -EFAULT;
+
+	tmp2 = strrchr(buf, ':');
+	if (tmp2 == NULL)
+		return -EFAULT;
+	tmp2++;
+	if (strlen(tmp2) == 0)
+		return -EFAULT;
+
+
+	offset = my_strtoul(tmp1, NULL, 0);
+	data = my_strtoul(tmp2, NULL, 0);
+
+	if (is_mac == 1) {
+		pr_warn("Mac base adddr 0x%lx, Write %d[%d]\n", (unsigned long)musbfsh->mregs, offset, data);
+		musbfsh_writeb(musbfsh->mregs, offset, data);
+	} else {
+		pr_warn("Phy base adddr 0x%lx, Write %d[%d]\n",
+		(unsigned long)((void __iomem *)(musbfsh_Device->phy_reg_base + 0x900)), offset, data);
+		USB11PHY_WRITE8(offset, data);
+	}
+
+	return count;
+}
+
+static const struct file_operations musbfsh_regw_fops = {
+	.open = musbfsh_regw_open,
+	.write = musbfsh_regw_mode_write,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int musbfsh_regr_show(struct seq_file *s, void *unused)
+{
+	INFO("%s -> Called\n", __func__);
+
+	pr_warn("Uage:\n");
+	pr_warn("Mac Read: echo mac:addr > regr\n");
+	pr_warn("Phy Read: echo phy:addr > regr\n");
+
+	return 0;
+}
+
+static int musbfsh_regr_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, musbfsh_regr_show, inode->i_private);
+}
+
+static ssize_t musbfsh_regr_mode_write(struct file *file,
+				    const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct seq_file *s = file->private_data;
+	struct musbfsh *musbfsh = s->private;
+	char			buf[20];
+	u8 is_mac = 0;
+	char *tmp = NULL;
+	unsigned offset = 0;
+
+	memset(buf, 0x00, sizeof(buf));
+
+	pr_warn("%s -> Called\n", __func__);
+
+	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+		return -EFAULT;
+
+	if ((!strncmp(buf, "MAC", 3)) || (!strncmp(buf, "mac", 3)))
+		is_mac = 1;
+	else if ((!strncmp(buf, "PHY", 3)) || (!strncmp(buf, "phy", 3)))
+		is_mac = 0;
+	else
+		return -EFAULT;
+
+	tmp = strrchr(buf, ':');
+
+	if (tmp == NULL)
+		return -EFAULT;
+
+	tmp++;
+
+	if (strlen(tmp) == 0)
+		return -EFAULT;
+
+	offset = my_strtoul(tmp, NULL, 0);
+
+	if (is_mac == 1)
+		pr_warn("Read Mac base adddr 0x%lx, Read %d[%d]\n",
+			(unsigned long)musbfsh->mregs, offset, musbfsh_readb(musbfsh->mregs, offset));
+	else
+		pr_warn("Read Phy base adddr 0x%lx, Read %d[%d]\n",
+			(unsigned long)((void __iomem *)(musbfsh_Device->phy_reg_base + 0x900)), offset,
+			USB11PHY_READ8(offset));
+
+	return count;
+}
+
+static const struct file_operations musbfsh_regr_fops = {
+	.open = musbfsh_regr_open,
+	.write = musbfsh_regr_mode_write,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+int musbfsh_init_debugfs(struct musbfsh *musbfsh)
+{
+	struct dentry *root;
+	struct dentry *file;
+	int ret;
+
+	INFO("musbfsh_init_debugfs\n");
+	INFO("++\n");
+	root = debugfs_create_dir("musbfsh", NULL);
+	if (!root) {
+		ret = -ENOMEM;
+		goto err0;
+	}
+	file = debugfs_create_file("regdump", S_IRUGO, root, musbfsh, &musbfsh_regdump_fops);
+	if (!file) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	INFO("musbfsh_init_debugfs 1\n");
+	file = debugfs_create_file("testmode", S_IRUGO | S_IWUSR,
+								root, musbfsh, &musbfsh_test_mode_fops);
+	if (!file) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	file = debugfs_create_file("regw", S_IRUGO | S_IWUSR, root, musbfsh, &musbfsh_regw_fops);
+	if (!file) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	file = debugfs_create_file("regr", S_IRUGO | S_IWUSR, root, musbfsh, &musbfsh_regr_fops);
+	if (!file) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	INFO("musbfsh_init_debugfs 2\n");
+
+	musbfsh_debugfs_root = root;
+
+	return 0;
+
+err1:
+	debugfs_remove_recursive(root);
+
+err0:
+	return ret;
+}
+
+void /* __init_or_exit */ musbfsh_exit_debugfs(struct musbfsh *musbfsh)
+{
+	  debugfs_remove_recursive(musbfsh_debugfs_root);
+}
diff --git a/drivers/misc/mediatek/usb11/mt8167/musbfsh_mt65xx.c b/drivers/misc/mediatek/usb11/mt8167/musbfsh_mt65xx.c
new file mode 100644
index 0000000..b112a8a
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/mt8167/musbfsh_mt65xx.c
@@ -0,0 +1,675 @@
+/*
+ * Driver for Special USB-PHY of MUSB HOST peripheral
+ *
+ * The power sequence and programming bits were different from SoC.
+ * Please use the coorsponding USB-PHY API for your SoC.
+ * This driver includes Mediatek MUSB DT/ICUSB support.
+ *
+ * Copyright 2015 Mediatek Inc.
+ *      Marvin Lin <marvin.lin@mediatek.com>
+ *      Arvin Wang <arvin.wang@mediatek.com>
+ *      Vincent Fan <vincent.fan@mediatek.com>
+ *      Bryant Lu <bryant.lu@mediatek.com>
+ *      Yu-Chang Wang <yu-chang.wang@mediatek.com>
+ *      Macpaul Lin <macpaul.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#ifdef CONFIG_OF
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#endif
+
+#include "musbfsh_core.h"
+#include "musbfsh_mt65xx.h"
+
+#define FRA (48)
+#define PARA (25)
+bool musbfsh_power;
+
+struct mt_usb11_glue {
+	struct device *dev;
+	struct platform_device *musbfsh;
+};
+static const struct of_device_id apusb_of_ids[] = {
+	{.compatible = "mediatek,mt8167-usb11",},
+	{},
+};
+
+
+void usb11_hs_slew_rate_cal(void)
+{
+	unsigned long data;
+	unsigned long x;
+	unsigned char value;
+	unsigned long start_time, timeout;
+	unsigned int timeout_flag = 0;
+	/*4 s1:enable usb ring oscillator.*/
+	USB11PHY_WRITE8(0x15, 0x80);
+
+	/*4 s2:wait 1us.*/
+	udelay(1);
+
+	/*4 s3:enable free run clock*/
+	USB11PHY_WRITE8(0xf00 - 0x900 + 0x11, 0x01);
+	/*4 s4:setting cyclecnt*/
+	USB11PHY_WRITE8(0xf00 - 0x900 + 0x01, 0x04);
+	/*4 s5:enable frequency meter*/
+	USB11PHY_SET8(0xf00 - 0x900 + 0x03, 0x05);
+
+	/*4 s6:wait for frequency valid.*/
+	start_time = jiffies;
+	timeout = jiffies + 3 * HZ;
+	while (!(USB11PHY_READ8(0xf00 - 0x900 + 0x10) & 0x1)) {
+		if (time_after(jiffies, timeout)) {
+			timeout_flag = 1;
+			break;
+	    }
+	}
+
+	/*4 s7: read result.*/
+	if (timeout_flag) {
+		INFO("[USBPHY] Slew Rate Calibration: Timeout\n");
+		value = 0x4;
+	} else {
+	    data = USB11PHY_READ32(0xf00 - 0x900 + 0x0c);
+	    x = ((1024 * FRA * PARA) / data);
+	    value = (unsigned char)(x / 1000);
+		if (((x - value * 1000) / 100) >= 5)
+			value += 1;
+	    /* INFO("[USB11PHY]slew calibration:FM_OUT =%d, x=%d,value=%d\n",data,x,value);*/
+	}
+
+	/*4 s8: disable Frequency and run clock.*/
+	USB11PHY_CLR8(0xf00 - 0x900 + 0x03, 0x05);	/*disable frequency meter*/
+	USB11PHY_CLR8(0xf00 - 0x900 + 0x11, 0x01);	/*disable free run clock*/
+
+	/*4 s9: */
+	USB11PHY_WRITE8(0x15, value << 4);
+
+	/*4 s10:disable usb ring oscillator.*/
+	USB11PHY_CLR8(0x15, 0x80);
+}
+
+void mt65xx_usb11_phy_poweron(void)
+{
+	INFO("mt65xx_usb11_phy_poweron++\r\n");
+	INFO("[Flow][USB11]%s:%d\n", __func__, __LINE__);
+	/* enable_pll(UNIVPLL, "USB11"); */
+	/*udelay(100); */
+
+	udelay(50);
+
+	USB11PHY_CLR8(0x6b, 0x04);
+	USB11PHY_CLR8(0x6e, 0x01);
+
+	USB11PHY_CLR8(0x1a, 0x80);
+
+	/* remove in MT6588 ?????
+	 * USBPHY_CLR8(0x02, 0x7f);
+	 * USBPHY_SET8(0x02, 0x09);
+	 * USBPHY_CLR8(0x22, 0x03);
+	*/
+
+	USB11PHY_CLR8(0x6a, 0x04);
+	/*USBPHY_SET8(0x1b, 0x08);*/
+
+	/*force VBUS Valid          */
+	USB11PHY_SET8(0x6C, 0x2C);
+	USB11PHY_SET8(0x6D, 0x3C);
+	/* VBUSVALID=0, AVALID=0, BVALID=0, SESSEND=1, IDDIG=X */
+	USB11PHY_SET8(0x6c, 0x10);
+	USB11PHY_CLR8(0x6c, 0x2e);
+	USB11PHY_SET8(0x6d, 0x3e);
+
+	/* wait */
+	msleep(20);
+	/* restart session */
+
+	/* USB MAC ONand Host Mode */
+	/* VBUSVALID=1, AVALID=1, BVALID=1, SESSEND=0, IDDIG=0 */
+	USB11PHY_CLR8(0x6c, 0x10);
+	USB11PHY_SET8(0x6c, 0x2c);
+	USB11PHY_SET8(0x6d, 0x3e);
+	udelay(800);
+
+}
+
+
+void mt65xx_usb11_phy_savecurrent(void)
+{
+	INFO("mt65xx_usb11_phy_savecurrent++\r\n");
+	INFO("[Flow][USB11]%s:%d\n", __func__, __LINE__);
+
+	/*4 1. swtich to USB function. (system register, force ip into usb mode.*/
+	USB11PHY_CLR8(0x6b, 0x04);
+	USB11PHY_CLR8(0x6e, 0x01);
+
+	/*4 2. release force suspendm.*/
+	USB11PHY_CLR8(0x6a, 0x04);
+	/*4 3. RG_DPPULLDOWN./RG_DMPULLDOWN.*/
+	USB11PHY_SET8(0x68, 0xc0);
+	/*4 4. RG_XCVRSEL[1:0] =2'b01.*/
+	USB11PHY_CLR8(0x68, 0x30);
+	USB11PHY_SET8(0x68, 0x10);
+	/*4 5. RG_TERMSEL = 1'b1*/
+	USB11PHY_SET8(0x68, 0x04);
+	/*4 6. RG_DATAIN[3:0]=4'b0000*/
+	USB11PHY_CLR8(0x69, 0x3c);
+	/*4 7.force_dp_pulldown, force_dm_pulldown, force_xcversel,force_termsel.*/
+	USB11PHY_SET8(0x6a, 0xba);
+
+	/*4 8.RG_USB20_BC11_SW_EN 1'b0*/
+	USB11PHY_CLR8(0x1a, 0x80);
+	/*4 9.RG_USB20_OTG_VBUSSCMP_EN 1'b0*/
+	USB11PHY_CLR8(0x1a, 0x10);
+	/*4 10. delay 800us.*/
+	udelay(800);
+	/*4 11. rg_usb20_pll_stable = 1*/
+	USB11PHY_SET8(0x63, 0x02);
+
+	udelay(1);
+	/*4 12.  force suspendm = 1.*/
+	USB11PHY_SET8(0x6a, 0x04);
+
+	USB11PHY_CLR8(0x6C, 0x2C);
+	USB11PHY_SET8(0x6C, 0x10);
+	USB11PHY_CLR8(0x6D, 0x3C);
+
+#if 0
+	/*4 13.  wait 1us*/
+	udelay(1);
+	/*4 14. turn off internal 48Mhz PLL.*/
+	enable_phy_clock(false);
+#endif
+}
+
+void mt81xx_usb11_phy_recover(void)
+{
+	INFO("[Flow][USB11]%s:%d\n", __func__, __LINE__);
+	INFO("mt65xx_usb11_phy_recover++\r\n");
+	/*4 1. turn on USB reference clock.     */
+	/*enable_pll(UNIVPLL, "USB11"); */
+	/*
+	* swtich to USB function.
+	* (system register, force ip into usb mode).
+	*/
+	USB11PHY_CLR8(0x6b, 0x04);
+	USB11PHY_CLR8(0x6e, 0x01);
+
+	/* RG_USB20_BC11_SW_EN = 1'b0 */
+	USB11PHY_CLR8(0x1a, 0x80);
+
+	/* RG_USB20_DP_100K_EN = 1'b0 */
+	/* RG_USB20_DM_100K_EN = 1'b0 */
+	USB11PHY_CLR8(0x22, 0x03);
+
+	/* release force suspendm */
+	USB11PHY_CLR8(0x6a, 0x04);
+
+	udelay(800);
+
+	/* force enter device mode */
+	USB11PHY_CLR8(0x6c, 0x10);
+	USB11PHY_SET8(0x6c, 0x2E);
+	USB11PHY_SET8(0x6d, 0x3E);
+
+	/* clean PUPD_BIST_EN */
+	/* PUPD_BIST_EN = 1'b0 */
+	/* PMIC will use it to detect charger type */
+	/*USB11PHY_CLR8(0x1d, 0x10);*/
+
+	/* force_uart_en = 1'b0 */
+	USB11PHY_CLR8(0x6b, 0x04);
+	/* RG_UART_EN = 1'b0 */
+	USB11PHY_CLR8(0x6e, 0x01);
+	/* force_uart_en = 1'b0 */
+	USB11PHY_CLR8(0x6a, 0x04);
+
+	USB11PHY_CLR8(0x68, 0xf4);
+	USB11PHY_SET8(0x68, 0x08);
+
+	/* RG_DATAIN[3:0] = 4'b0000 */
+	USB11PHY_CLR8(0x69, 0x3c);
+
+	USB11PHY_CLR8(0x6a, 0xba);
+
+	/* RG_USB20_BC11_SW_EN = 1'b0 */
+	USB11PHY_SET8(0x1a, 0x10);
+	USB11PHY_CLR8(0x1a, 0x80);
+	/* RG_USB20_OTG_VBUSSCMP_EN = 1'b1 */
+	USB11PHY_SET8(0x1a, 0x10);
+
+	udelay(800);
+
+	/* force enter device mode */
+	USB11PHY_CLR8(0x6c, 0x10);
+	USB11PHY_SET8(0x6c, 0x2E);
+	USB11PHY_SET8(0x6d, 0x3E);
+
+	/* force enter host mode */
+	udelay(100);
+
+	USB11PHY_SET8(0x6d, 0x3e);
+	USB11PHY_SET8(0x6c, 0x10);
+	USB11PHY_CLR8(0x6c, 0x2e);
+
+	udelay(5);
+
+	USB11PHY_CLR8(0x6c, 0x10);
+	USB11PHY_SET8(0x6c, 0x2c);
+}
+
+void mt65xx_usb11_phy_recover(void)
+{
+	INFO("[Flow][USB11]%s:%d\n", __func__, __LINE__);
+	INFO("mt65xx_usb11_phy_recover++\r\n");
+#if 0
+	/*4 1. turn on USB reference clock.  */
+	enable_phy_clock(true);
+#endif
+
+	INFO("[Flow][USB11]%s:%d\n", __func__, __LINE__);
+	USB11PHY_CLR8(0x6b, 0x04);
+	USB11PHY_CLR8(0x6e, 0x01);
+
+	/* RG_USB20_BC11_SW_EN = 1'b0 */
+	USB11PHY_CLR8(0x1a, 0x80);
+
+	/* RG_USB20_DP_100K_EN = 1'b0 */
+	/* RG_USB20_DM_100K_EN = 1'b0 */
+	USB11PHY_CLR8(0x22, 0x03);
+
+	/* release force suspendm */
+	USB11PHY_CLR8(0x6a, 0x04);
+
+	udelay(800);
+
+	/* force enter device mode */
+	USB11PHY_CLR8(0x6c, 0x10);
+	USB11PHY_SET8(0x6c, 0x2E);
+	USB11PHY_SET8(0x6d, 0x3E);
+
+	/* clean PUPD_BIST_EN */
+	/* PUPD_BIST_EN = 1'b0 */
+	/* PMIC will use it to detect charger type */
+	USB11PHY_CLR8(0x1d, 0x10);
+
+	/* force_uart_en = 1'b0 */
+	USB11PHY_CLR8(0x6b, 0x04);
+	/* RG_UART_EN = 1'b0 */
+	USB11PHY_CLR8(0x6e, 0x01);
+	/* force_uart_en = 1'b0 */
+	USB11PHY_CLR8(0x6a, 0x04);
+
+	USB11PHY_CLR8(0x68, 0xf4);
+
+	/* RG_DATAIN[3:0] = 4'b0000 */
+	USB11PHY_CLR8(0x69, 0x3c);
+
+	USB11PHY_CLR8(0x6a, 0xba);
+
+	/* RG_USB20_BC11_SW_EN = 1'b0 */
+	USB11PHY_CLR8(0x1a, 0x80);
+	/* RG_USB20_OTG_VBUSSCMP_EN = 1'b1 */
+	USB11PHY_SET8(0x1a, 0x10);
+
+	udelay(800);
+
+	/* force enter device mode */
+	USB11PHY_CLR8(0x6c, 0x10);
+	USB11PHY_SET8(0x6c, 0x2E);
+	USB11PHY_SET8(0x6d, 0x3E);
+	INFO("[Flow][USB11]%s:%d\n", __func__, __LINE__);
+	/* force enter host mode */
+	udelay(100);
+
+	USB11PHY_SET8(0x6d, 0x3e);
+	USB11PHY_READ8(0x6d);
+	USB11PHY_SET8(0x6c, 0x10);
+	USB11PHY_CLR8(0x6c, 0x2e);
+
+	udelay(5);
+
+	USB11PHY_CLR8(0x6c, 0x10);
+	USB11PHY_SET8(0x6c, 0x2c);
+	USB11PHY_READ8(0x6c);
+	INFO("[Flow][USB11]%s:%d\n", __func__, __LINE__);
+}
+
+
+static bool clock_enabled;
+
+void mt65xx_usb11_clock_enable(bool enable)
+{
+	INFO("[Flow][USB]mt65xx_usb11_clock_enable++\r\n");
+	if (enable) {
+		if (clock_enabled) {	/*already enable*/
+			/* do nothing */
+			INFO("[Flow][USB]already enable\r\n");
+		} else {
+#if 0
+			enable_phy_clock(enable);
+			INFO("[Flow][USB]enable usb11 clock ++\r\n");
+			enable_mcu_clock(true);
+			/* clk_enable(usb_clk); */
+			clk_enable(icusb_clk);
+#endif
+			clock_enabled = true;
+		}
+	} else {
+		if (!clock_enabled)	{/*already disabled.*/
+			/* do nothing */
+			INFO("[Flow][USB]already disabled\r\n");
+		} else {
+			INFO("[Flow][USB]disable usb11 clock --\r\n");
+#if 0
+			clk_disable(icusb_clk);
+			/* clk_disable(usb_clk); */
+			enable_mcu_clock(false);
+			enable_phy_clock(enable);
+#endif
+			clock_enabled = false;
+		}
+	}
+}
+
+
+void mt_usb11_poweron(struct musbfsh *musbfsh, int on)
+{
+	static bool recover;
+
+	INFO("mt65xx_usb11_poweron++\r\n");
+	INFO("[Flow][USB11]%s:%d\n", __func__, __LINE__);
+	if (on) {
+		if (musbfsh_power) {
+			/* do nothing */
+			INFO("[Flow][USB]power on\r\n");
+		} else {
+			mt65xx_usb11_clock_enable(true);
+			INFO("[Flow][USB11]%s:%d\n", __func__, __LINE__);
+			if (!recover) {
+				INFO("[Flow][USB11]%s:%d\n", __func__, __LINE__);
+				/*mt65xx_usb11_phy_poweron();*/
+				mt65xx_usb11_phy_recover();
+				/*mt81xx_usb11_phy_recover();*/
+				recover = true;
+			} else {
+				INFO("[Flow][USB11]%s:%d\n", __func__, __LINE__);
+				mt65xx_usb11_phy_recover();
+				/*mt81xx_usb11_phy_recover();*/
+			}
+			musbfsh_power = true;
+		}
+	} else {
+		if (!musbfsh_power) {
+			/* do nothing */
+			INFO("[Flow][USB]power off\r\n");
+		} else {
+			mt65xx_usb11_phy_savecurrent();
+			mt65xx_usb11_clock_enable(false);
+			musbfsh_power = false;
+		}
+	}
+}
+
+void mt_usb11_set_vbus(struct musbfsh *musbfsh, int is_on)
+{
+	INFO("is_on=%d\n", is_on);
+}
+
+void musbfs_check_mpu_violation(u32 addr, int wr_vio)
+{
+	void __iomem *mregs = (void *)USB_BASE;
+
+	INFO(KERN_CRIT "MUSB checks EMI MPU violation.\n");
+	INFO(KERN_CRIT "addr = 0x%x, %s violation.\n", addr, wr_vio ? "Write" : "Read");
+	INFO(KERN_CRIT "POWER = 0x%x,DEVCTL= 0x%x.\n", musbfsh_readb(mregs, MUSBFSH_POWER),
+	musbfsh_readb((void __iomem *)USB11_BASE, MUSBFSH_DEVCTL));
+	INFO(KERN_CRIT "DMA_CNTLch0 0x%04x,DMA_ADDRch0 0x%08x,DMA_COUNTch0 0x%08x\n",
+	musbfsh_readw(mregs, 0x204), musbfsh_readl(mregs, 0x208), musbfsh_readl(mregs,
+					0x20C));
+	INFO(KERN_CRIT "DMA_CNTLch1 0x%04x,DMA_ADDRch1 0x%08x,DMA_COUNTch1 0x%08x\n",
+	musbfsh_readw(mregs, 0x214), musbfsh_readl(mregs, 0x218), musbfsh_readl(mregs,
+					0x21C));
+	INFO(KERN_CRIT "DMA_CNTLch2 0x%04x,DMA_ADDRch2 0x%08x,DMA_COUNTch2 0x%08x\n",
+	musbfsh_readw(mregs, 0x224), musbfsh_readl(mregs, 0x228), musbfsh_readl(mregs,
+					0x22C));
+	INFO(KERN_CRIT "DMA_CNTLch3 0x%04x,DMA_ADDRch3 0x%08x,DMA_COUNTch3 0x%08x\n",
+	musbfsh_readw(mregs, 0x234), musbfsh_readl(mregs, 0x238), musbfsh_readl(mregs,
+					0x23C));
+	INFO(KERN_CRIT "DMA_CNTLch4 0x%04x,DMA_ADDRch4 0x%08x,DMA_COUNTch4 0x%08x\n",
+	musbfsh_readw(mregs, 0x244), musbfsh_readl(mregs, 0x248), musbfsh_readl(mregs,
+					0x24C));
+	INFO(KERN_CRIT "DMA_CNTLch5 0x%04x,DMA_ADDRch5 0x%08x,DMA_COUNTch5 0x%08x\n",
+	musbfsh_readw(mregs, 0x254), musbfsh_readl(mregs, 0x258), musbfsh_readl(mregs,
+					0x25C));
+	INFO(KERN_CRIT "DMA_CNTLch6 0x%04x,DMA_ADDRch6 0x%08x,DMA_COUNTch6 0x%08x\n",
+	musbfsh_readw(mregs, 0x264), musbfsh_readl(mregs, 0x268), musbfsh_readl(mregs,
+					0x26C));
+	INFO(KERN_CRIT "DMA_CNTLch7 0x%04x,DMA_ADDRch7 0x%08x,DMA_COUNTch7 0x%08x\n",
+	musbfsh_readw(mregs, 0x274), musbfsh_readl(mregs, 0x278), musbfsh_readl(mregs,
+					0x27C));
+}
+
+int mt_usb11_init(struct musbfsh *musbfsh)
+{
+	INFO("++\n");
+	if (!musbfsh) {
+		ERR("musbfsh_platform_init,error,musbfsh is NULL");
+		return -1;
+	}
+
+	INFO("[Flow][USB11]%s:%d\n", __func__, __LINE__);
+	mt_usb11_poweron(musbfsh, true);
+	return 0;
+}
+
+int mt_usb11_exit(struct musbfsh *musbfsh)
+{
+	INFO("++\n");
+	mt_usb11_poweron(musbfsh, false);
+	/* put it here because we can't shutdown PHY power during suspend */
+	/* hwPowerDown(MT65XX_POWER_LDO_VUSB, "USB11");  */
+	return 0;
+}
+
+void musbfsh_hcd_release(struct device *dev)
+{
+/*    INFO("musbfsh_hcd_release++,dev = 0x%08X.\n", (uint32_t)dev);*/
+}
+
+static const struct musbfsh_platform_ops mt_usb11_ops = {
+	.init = mt_usb11_init,
+	.exit = mt_usb11_exit,
+	.set_vbus = mt_usb11_set_vbus,
+	.set_power = mt_usb11_poweron,
+};
+
+static u64 mt_usb11_dmamask = DMA_BIT_MASK(32);
+
+static int __init mt_usb11_probe(struct platform_device *pdev)
+{
+	struct musbfsh_hdrc_platform_data *pdata = pdev->dev.platform_data;
+	struct platform_device *musbfsh;
+	struct mt_usb11_glue *glue;
+	struct musbfsh_hdrc_config *config;
+	int ret = -ENOMEM;
+	int musbfshid;
+	struct device_node *np = pdev->dev.of_node;
+
+	INFO("[Flow][USB11]%s:%d\n", __func__, __LINE__);
+	ret = mt_usb11_clock_prepare();
+	if (ret) {
+		dev_err(&pdev->dev, "musbfsh clock prepare fail\n");
+		goto err0;
+	}
+	glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+	if (!glue) {
+		dev_err(&pdev->dev, "failed to allocate glue context\n");
+		goto err0;
+	}
+
+	/* get the musbfsh id */
+	musbfshid = musbfsh_get_id(&pdev->dev, GFP_KERNEL);
+	if (musbfshid < 0) {
+		dev_err(&pdev->dev, "failed to allocate musbfsh id\n");
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	musbfsh = platform_device_alloc("musbfsh-hdrc", musbfshid);
+	if (!musbfsh) {
+		dev_err(&pdev->dev, "failed to allocate musb device\n");
+		goto err2;
+	}
+#ifdef CONFIG_OF
+	usb11_dts_np = pdev->dev.of_node;
+	INFO("[usb11] usb11_dts_np %p\n", usb11_dts_np);
+	/*usb_irq_number1 = irq_of_parse_and_map(pdev->dev.of_node, 0);
+	  * usb_mac = (unsigned long)of_iomap(pdev->dev.of_node, 0);
+	   * usb_phy_base = (unsigned long)of_iomap(pdev->dev.of_node, 1);
+	  */
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata) {
+		ERR("failed to allocate musb platform data\n");
+		goto err2;
+	}
+
+	config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL);
+	if (!config) {
+		ERR("failed to allocate musb hdrc config\n");
+		goto err2;
+	}
+	of_property_read_u32(np, "mode", (u32 *)&pdata->mode);
+
+	/*of_property_read_u32(np, "dma_channels",    (u32 *)&config->dma_channels); */
+	of_property_read_u32(np, "num_eps", (u32 *)&config->num_eps);
+	config->multipoint = of_property_read_bool(np, "multipoint");
+	/*
+		* config->dyn_fifo = of_property_read_bool(np, "dyn_fifo");
+		* config->soft_con = of_property_read_bool(np, "soft_con");
+		* config->dma = of_property_read_bool(np, "dma");
+	*/
+
+	pdata->config = config;
+	INFO("[Flow][USB11]mode = %d ,num_eps = %d,multipoint = %d\n", pdata->mode,
+	config->num_eps, config->multipoint);
+#endif
+
+	musbfsh->id = musbfshid;
+	musbfsh->dev.parent = &pdev->dev;
+	musbfsh->dev.dma_mask = &mt_usb11_dmamask;
+	musbfsh->dev.coherent_dma_mask = mt_usb11_dmamask;
+#ifdef CONFIG_OF
+	pdev->dev.dma_mask = &mt_usb11_dmamask;
+	pdev->dev.coherent_dma_mask = mt_usb11_dmamask;
+	arch_setup_dma_ops(&musbfsh->dev, 0, mt_usb11_dmamask, NULL, 0);
+#endif
+
+	glue->dev = &pdev->dev;
+	glue->musbfsh = musbfsh;
+
+	pdata->platform_ops = &mt_usb11_ops;
+
+	platform_set_drvdata(pdev, glue);
+
+	ret = platform_device_add_resources(musbfsh, pdev->resource, pdev->num_resources);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add resources\n");
+		goto err3;
+	}
+
+	ret = platform_device_add_data(musbfsh, pdata, sizeof(*pdata));
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add platform_data\n");
+		goto err3;
+	}
+
+	ret = platform_device_add(musbfsh);
+
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register musbfsh device\n");
+		goto err3;
+	}
+
+	return 0;
+
+err3:
+	platform_device_put(musbfsh);
+
+err2:
+	musbfsh_put_id(&pdev->dev, musbfshid);
+
+err1:
+	kfree(glue);
+
+err0:
+	return ret;
+}
+
+static int __exit mt_usb_remove(struct platform_device *pdev)
+{
+	struct mt_usb11_glue *glue = platform_get_drvdata(pdev);
+
+	musbfsh_put_id(&pdev->dev, glue->musbfsh->id);
+	platform_device_del(glue->musbfsh);
+	platform_device_put(glue->musbfsh);
+	kfree(glue);
+
+	return 0;
+}
+
+static struct platform_driver mt_usb11_driver = {
+	.remove = __exit_p(mt_usb_remove),
+	.probe = mt_usb11_probe,
+	.driver = {
+	.name = "mt_usb11",
+#ifdef CONFIG_OF
+	.of_match_table = apusb_of_ids,
+#endif
+	},
+};
+
+int usb11_init(void)
+{
+	INFO("[Flow][USB11]%s:%d\n", __func__, __LINE__);
+	return platform_driver_register(&mt_usb11_driver);
+}
+
+/*mubsys_initcall(usb11_init);*/
+
+void usb11_exit(void)
+{
+	platform_driver_unregister(&mt_usb11_driver);
+}
+
+/*module_exit(usb11_exit) */
diff --git a/drivers/misc/mediatek/usb11/mt8167/musbfsh_mt65xx.h b/drivers/misc/mediatek/usb11/mt8167/musbfsh_mt65xx.h
new file mode 100644
index 0000000..a663298f
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/mt8167/musbfsh_mt65xx.h
@@ -0,0 +1,113 @@
+/*
+ * Driver for Special USB-PHY of MUSB HOST peripheral
+ *
+ * The power sequence and programming bits were different from SoC.
+ * Please use the coorsponding USB-PHY API for your SoC.
+ * This driver includes Mediatek MUSB DT/ICUSB support.
+ *
+ * Copyright 2015 Mediatek Inc.
+ *      Marvin Lin <marvin.lin@mediatek.com>
+ *      Arvin Wang <arvin.wang@mediatek.com>
+ *      Vincent Fan <vincent.fan@mediatek.com>
+ *      Bryant Lu <bryant.lu@mediatek.com>
+ *      Yu-Chang Wang <yu-chang.wang@mediatek.com>
+ *      Macpaul Lin <macpaul.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MUSBFSH_MT65XX_H__
+#define __MUSBFSH_MT65XX_H__
+#ifndef CONFIG_OF
+#include <mach/mt_reg_base.h>
+#endif
+
+extern int usb11_init(void);
+extern void usb11_exit(void);
+extern void enable_mcu_clock(bool enable);
+extern void enable_phy_clock(bool enable);
+extern struct clk *icusb_clk;
+/* extern struct clk *usb_clk; */
+#ifdef CONFIG_OF
+extern struct musbfsh *musbfsh_Device;
+#endif
+/*#define IC_USB*/
+
+/*Level 1 interrupts:*/
+#define USB11_L1INTS 0xA0
+#define USB11_L1INTM 0xA4
+#define USB11_L1INTP 0xA8
+#define MUSBFSH_DMA_INTR_UNMASK_CLR_OFFSET (16)
+#define MUSBFSH_DMA_INTR_UNMASK_SET_OFFSET (24)
+#define USB11_BASE USB1_BASE
+#define USB_BASE USB11_BASE
+/*USB11 PHY registers:*/
+#define USB11_PHY_ADDR (USB_SIF_BASE + 0x900)
+
+#define U1PHYCR0 0xC0
+#define RG_USB11_FSLS_ENBGRI 0x08	/* @U1PHYCR0+1, 1:power on or recovery; 0:save current*/
+
+#define U1PHYCR1 0xC4
+#define force_usb11_en_fs_ls_rcv 0x04	/* @U1PHYCR1+2*/
+#define force_usb11_en_fs_ls_tx 0x02	/* @U1PHYCR1+2*/
+#define RG_USB11_EN_FS_LS_RCV 0x04	/* @U1PHYCR1+3*/
+#define RG_USB11_EN_FS_LS_TX 0x02	/* @U1PHYCR1+3*/
+
+#define U1PHTCR2 0xC8
+#define force_usb11_dm_rpu 0x01
+#define force_usb11_dp_rpu 0x02
+#define force_usb11_dm_rpd 0x04
+#define force_usb11_dp_rpd 0x08
+#define RG_USB11_DM_RPU 0x10
+#define RG_USB11_DP_RPU 0x20
+#define RG_USB11_DM_RPD 0x40
+#define RG_USB11_DP_RPD 0x80
+#define RG_USB11_AVALID 0x04	/* @U1PHYCR2+2*/
+#define RG_USB11_BVALID 0x08	/* @U1PHYCR2+2*/
+#define RG_USB11_SESSEND 0x10	/* @U1PHYCR2+2*/
+#define RG_USB11_VBUSVALID 0x20	/* @U1PHYCR2+2*/
+#define force_usb11_avalid 0x04	/*@U1PHYCR2+3*/
+#define force_usb11_bvalid 0x08	/* @U1PHYCR2+3*/
+#define force_usb11_sessend 0x10	/* @U1PHYCR2+3*/
+#define force_usb11_vbusvalid 0x20	/* @U1PHYCR2+3*/
+
+#ifdef CONFIG_OF
+
+/*USB11 PHY access macro: Need Modify Later*/
+#define USB11PHY_READ32(offset)         __raw_readl((void __iomem *)(musbfsh_Device->phy_reg_base + 0x900 + (offset)))
+#define USB11PHY_READ8(offset)          __raw_readb((void __iomem *)(musbfsh_Device->phy_reg_base + 0x900 + (offset)))
+#define USB11PHY_WRITE8(offset, value)  \
+	__raw_writeb(value, (void __iomem *)(musbfsh_Device->phy_reg_base + 0x900 + (offset)))
+#define USB11PHY_SET8(offset, mask)     USB11PHY_WRITE8((offset), USB11PHY_READ8(offset) | (mask))
+#define USB11PHY_CLR8(offset, mask)     USB11PHY_WRITE8((offset), USB11PHY_READ8(offset) & (~(mask)))
+#else
+
+/*USB11 PHY access macro:*/
+#define USB11PHY_READ32(offset)         __raw_readl((void __iomem *)(USB11_PHY_ADDR + (offset)))
+#define USB11PHY_READ8(offset)          __raw_readb((void __iomem *)(USB11_PHY_ADDR + (offset)))
+#define USB11PHY_WRITE8(offset, value)  __raw_writeb(value, (void __iomem *)(USB11_PHY_ADDR + (offset)))
+#define USB11PHY_SET8(offset, mask)     USB11PHY_WRITE8((offset), USB11PHY_READ8(offset) | (mask))
+#define USB11PHY_CLR8(offset, mask)     USB11PHY_WRITE8((offset), USB11PHY_READ8(offset) & (~(mask)))
+#endif
+#endif
diff --git a/drivers/misc/mediatek/usb11/mtk11_qmu.c b/drivers/misc/mediatek/usb11/mtk11_qmu.c
new file mode 100644
index 0000000..0750a57
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/mtk11_qmu.c
@@ -0,0 +1,1359 @@
+/*
+ * Copyright (C) 2015 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include "musbfsh_qmu.h"
+#include "mtk11_qmu.h"
+#include "musbfsh_host.h"
+
+static PGPD Rx_gpd_head[MAX_QMU_EP + 1];
+static PGPD Tx_gpd_head[MAX_QMU_EP + 1];
+static PGPD Rx_gpd_end[MAX_QMU_EP + 1];
+static PGPD Tx_gpd_end[MAX_QMU_EP + 1];
+static PGPD Rx_gpd_last[MAX_QMU_EP + 1];
+static PGPD Tx_gpd_last[MAX_QMU_EP + 1];
+static GPD_R Rx_gpd_List[MAX_QMU_EP + 1];
+static GPD_R Tx_gpd_List[MAX_QMU_EP + 1];
+static u64 Rx_gpd_Offset[MAX_QMU_EP + 1];
+static u64 Tx_gpd_Offset[MAX_QMU_EP + 1];
+static u32 Rx_gpd_free_count[MAX_QMU_EP + 1];
+static u32 Tx_gpd_free_count[MAX_QMU_EP + 1];
+static u32 Rx_gpd_max_count[MAX_QMU_EP + 1];
+static u32 Tx_gpd_max_count[MAX_QMU_EP + 1];
+
+
+u32 mtk11_qmu_used_gpd_count(u8 isRx, u32 num)
+{
+	if (isRx)
+		return (Rx_gpd_max_count[num] - 1) - Rx_gpd_free_count[num];
+	else
+		return (Tx_gpd_max_count[num] - 1) - Tx_gpd_free_count[num];
+}
+
+u32 mtk11_qmu_free_gpd_count(u8 isRx, u32 num)
+{
+	if (isRx)
+		return Rx_gpd_free_count[num];
+	else
+		return Tx_gpd_free_count[num];
+}
+
+u8 mtk11_PDU_calcCksum(u8 *data, int len)
+{
+	u8 *uDataPtr, ckSum;
+	int i;
+
+	*(data + 1) = 0x0;
+	uDataPtr = data;
+	ckSum = 0;
+	for (i = 0; i < len; i++)
+		ckSum += *(uDataPtr + i);
+
+	return 0xFF - ckSum;
+}
+
+static PGPD get_gpd(u8 isRx, u32 num)
+{
+	PGPD ptr;
+
+	if (isRx) {
+		ptr = Rx_gpd_List[num].pNext;
+		Rx_gpd_List[num].pNext = (PGPD) ((u8 *) (Rx_gpd_List[num].pNext) + GPD_LEN_ALIGNED);
+
+		if (Rx_gpd_List[num].pNext >= Rx_gpd_List[num].pEnd)
+			Rx_gpd_List[num].pNext = Rx_gpd_List[num].pStart;
+		Rx_gpd_free_count[num]--;
+	} else {
+		ptr = Tx_gpd_List[num].pNext;
+		Tx_gpd_List[num].pNext = (PGPD) ((u8 *) (Tx_gpd_List[num].pNext) + GPD_LEN_ALIGNED);
+
+		if (Tx_gpd_List[num].pNext >= Tx_gpd_List[num].pEnd)
+			Tx_gpd_List[num].pNext = Tx_gpd_List[num].pStart;
+		Tx_gpd_free_count[num]--;
+	}
+	return ptr;
+}
+
+static void gpd_ptr_align(u8 isRx, u32 num, PGPD ptr)
+{
+	if (isRx)
+		Rx_gpd_List[num].pNext = (PGPD) ((u8 *) (ptr) + GPD_LEN_ALIGNED);
+	else
+		Tx_gpd_List[num].pNext = (PGPD) ((u8 *) (ptr) + GPD_LEN_ALIGNED);
+}
+
+static dma_addr_t gpd_virt_to_phys(void *vaddr, u8 isRx, u32 num)
+{
+	dma_addr_t paddr;
+
+	if (isRx)
+		paddr = (dma_addr_t) ((u64) (unsigned long)vaddr - Rx_gpd_Offset[num]);
+	else
+		paddr = (dma_addr_t) ((u64) (unsigned long)vaddr - Tx_gpd_Offset[num]);
+
+	QMU_INFO("%s[%d]phys=%p<->virt=%p\n",
+		 ((isRx == RXQ) ? "RQ" : "TQ"), num, (void *)paddr, vaddr);
+
+	return paddr;
+}
+
+static void *gpd_phys_to_virt(dma_addr_t paddr, u8 isRx, u32 num)
+{
+	void *vaddr;
+
+
+	if (isRx)
+		vaddr = (void *)(unsigned long)((u64) paddr + Rx_gpd_Offset[num]);
+	else
+		vaddr = (void *)(unsigned long)((u64) paddr + Tx_gpd_Offset[num]);
+	QMU_INFO("%s[%d]phys=%p<->virt=%p\n",
+		 ((isRx == RXQ) ? "RQ" : "TQ"), num, (void *)paddr, vaddr);
+
+	return vaddr;
+}
+
+static void init_gpd_list(u8 isRx, int num, PGPD ptr, PGPD io_ptr, u32 size)
+{
+	if (isRx) {
+		Rx_gpd_List[num].pStart = ptr;
+		Rx_gpd_List[num].pEnd = (PGPD) ((u8 *) (ptr + size) + (GPD_EXT_LEN * size));
+		Rx_gpd_Offset[num] = (u64) (unsigned long)ptr - (u64) (unsigned long)io_ptr;
+		ptr++;
+		Rx_gpd_List[num].pNext = (PGPD) ((u8 *) ptr + GPD_EXT_LEN);
+
+		QMU_INFO("Rx_gpd_List[%d].pStart=%p, pNext=%p, pEnd=%p\n",
+			 num, Rx_gpd_List[num].pStart, Rx_gpd_List[num].pNext,
+			 Rx_gpd_List[num].pEnd);
+		QMU_INFO("Rx_gpd_Offset[%d]=%p\n", num, (void *)(unsigned long)Rx_gpd_Offset[num]);
+	} else {
+		Tx_gpd_List[num].pStart = ptr;
+		Tx_gpd_List[num].pEnd = (PGPD) ((u8 *) (ptr + size) + (GPD_EXT_LEN * size));
+		Tx_gpd_Offset[num] = (u64) (unsigned long)ptr - (u64) (unsigned long)io_ptr;
+		ptr++;
+		Tx_gpd_List[num].pNext = (PGPD) ((u8 *) ptr + GPD_EXT_LEN);
+
+		QMU_INFO("Tx_gpd_List[%d].pStart=%p, pNext=%p, pEnd=%p\n",
+			 num, Tx_gpd_List[num].pStart, Tx_gpd_List[num].pNext,
+			 Tx_gpd_List[num].pEnd);
+		QMU_INFO("Tx_gpd_Offset[%d]=%p\n", num, (void *)(unsigned long)Tx_gpd_Offset[num]);
+	}
+}
+
+int mtk11_qmu_init_gpd_pool(struct device *dev)
+{
+	u32 i, size;
+	TGPD *ptr, *io_ptr;
+	dma_addr_t dma_handle;
+	u32 gpd_sz;
+
+#ifdef MUSBFSH_QMU_LIMIT_SUPPORT
+	for (i = 1; i <= MAX_QMU_EP; i++)
+		Rx_gpd_max_count[i] = Tx_gpd_max_count[i] = mtk11_isoc_ep_gpd_count;
+#else
+	if (!mtk11_qmu_max_gpd_num)
+		mtk11_qmu_max_gpd_num = DFT_MAX_GPD_NUM;
+
+	for (i = 1; i < mtk11_isoc_ep_start_idx; i++)
+		Rx_gpd_max_count[i] = Tx_gpd_max_count[i] = mtk11_qmu_max_gpd_num;
+
+	for (i = mtk11_isoc_ep_start_idx; i <= MAX_QMU_EP; i++) {
+		if (mtk11_isoc_ep_gpd_count > mtk11_qmu_max_gpd_num)
+			Rx_gpd_max_count[i] = Tx_gpd_max_count[i] = mtk11_isoc_ep_gpd_count;
+		else
+			Rx_gpd_max_count[i] = Tx_gpd_max_count[i] = mtk11_qmu_max_gpd_num;
+	}
+#endif
+	gpd_sz = (u32) (u64) sizeof(TGPD);
+	QMU_INFO("sizeof(TGPD):%d\n", gpd_sz);
+	if (gpd_sz != GPD_SZ)
+		QMU_ERR("ERR!!!, GPD SIZE != %d\n", GPD_SZ);
+
+	for (i = 1; i <= RXQ_NUM; i++) {
+		/* Allocate Rx GPD */
+		size = GPD_LEN_ALIGNED * Rx_gpd_max_count[i];
+		ptr = (TGPD *) dma_alloc_coherent(dev, size, &dma_handle, GFP_KERNEL);
+		if (!ptr)
+			return -ENOMEM;
+		memset(ptr, 0, size);
+		io_ptr = (TGPD *) (dma_handle);
+
+		init_gpd_list(RXQ, i, ptr, io_ptr, Rx_gpd_max_count[i]);
+		Rx_gpd_head[i] = ptr;
+		QMU_INFO("ALLOC RX GPD Head [%d] Virtual Mem=%p, DMA addr=%p\n", i, Rx_gpd_head[i],
+			 io_ptr);
+		Rx_gpd_end[i] = Rx_gpd_last[i] = Rx_gpd_head[i];
+		Rx_gpd_free_count[i] = Rx_gpd_max_count[i] - 1; /* one must be for tail */
+		TGPD_CLR_FLAGS_HWO(Rx_gpd_end[i]);
+		gpd_ptr_align(RXQ, i, Rx_gpd_end[i]);
+		QMU_INFO("RQSAR[%d]=%p\n", i, (void *)gpd_virt_to_phys(Rx_gpd_end[i], RXQ, i));
+	}
+
+	for (i = 1; i <= TXQ_NUM; i++) {
+		/* Allocate Tx GPD */
+		size = GPD_LEN_ALIGNED * Tx_gpd_max_count[i];
+		ptr = (TGPD *) dma_alloc_coherent(dev, size, &dma_handle, GFP_KERNEL);
+		if (!ptr)
+			return -ENOMEM;
+		memset(ptr, 0, size);
+		io_ptr = (TGPD *) (dma_handle);
+
+		init_gpd_list(TXQ, i, ptr, io_ptr, Tx_gpd_max_count[i]);
+		Tx_gpd_head[i] = ptr;
+		QMU_INFO("ALLOC TX GPD Head [%d] Virtual Mem=%p, DMA addr=%p\n", i, Tx_gpd_head[i],
+			 io_ptr);
+		Tx_gpd_end[i] = Tx_gpd_last[i] = Tx_gpd_head[i];
+		Tx_gpd_free_count[i] = Tx_gpd_max_count[i] - 1; /* one must be for tail */
+		TGPD_CLR_FLAGS_HWO(Tx_gpd_end[i]);
+		gpd_ptr_align(TXQ, i, Tx_gpd_end[i]);
+		QMU_INFO("TQSAR[%d]=%p\n", i, (void *)gpd_virt_to_phys(Tx_gpd_end[i], TXQ, i));
+	}
+
+	return 0;
+}
+
+void mtk11_qmu_reset_gpd_pool(u32 ep_num, u8 isRx)
+{
+	u32 size;
+
+	/* SW reset */
+	if (isRx) {
+		size = GPD_LEN_ALIGNED * Rx_gpd_max_count[ep_num];
+		memset(Rx_gpd_head[ep_num], 0, size);
+		Rx_gpd_end[ep_num] = Rx_gpd_last[ep_num] = Rx_gpd_head[ep_num];
+		Rx_gpd_free_count[ep_num] = Rx_gpd_max_count[ep_num] - 1; /* one must be for tail */
+		TGPD_CLR_FLAGS_HWO(Rx_gpd_end[ep_num]);
+		gpd_ptr_align(isRx, ep_num, Rx_gpd_end[ep_num]);
+
+	} else {
+		size = GPD_LEN_ALIGNED * Tx_gpd_max_count[ep_num];
+		memset(Tx_gpd_head[ep_num], 0, size);
+		Tx_gpd_end[ep_num] = Tx_gpd_last[ep_num] = Tx_gpd_head[ep_num];
+		Tx_gpd_free_count[ep_num] = Tx_gpd_max_count[ep_num] - 1; /* one must be for tail */
+		TGPD_CLR_FLAGS_HWO(Tx_gpd_end[ep_num]);
+		gpd_ptr_align(isRx, ep_num, Tx_gpd_end[ep_num]);
+	}
+}
+
+void mtk11_qmu_destroy_gpd_pool(struct device *dev)
+{
+	int i;
+
+	for (i = 1; i <= RXQ_NUM; i++) {
+		dma_free_coherent(dev, GPD_LEN_ALIGNED * Rx_gpd_max_count[i], Rx_gpd_head[i],
+				  gpd_virt_to_phys(Rx_gpd_head[i], RXQ, i));
+	}
+
+	for (i = 1; i <= TXQ_NUM; i++) {
+		dma_free_coherent(dev, GPD_LEN_ALIGNED * Tx_gpd_max_count[i], Tx_gpd_head[i],
+				  gpd_virt_to_phys(Tx_gpd_head[i], TXQ, i));
+	}
+}
+
+static void prepare_rx_gpd(u8 *pBuf, u32 data_len, u8 ep_num, u8 isioc)
+{
+	TGPD *gpd;
+
+	/* get gpd from tail */
+	gpd = Rx_gpd_end[ep_num];
+
+	TGPD_SET_DATA(gpd, pBuf);
+	TGPD_CLR_FORMAT_BDP(gpd);
+
+	TGPD_SET_DataBUF_LEN(gpd, data_len);
+	TGPD_SET_BUF_LEN(gpd, 0);
+
+/* TGPD_CLR_FORMAT_BPS(gpd); */
+
+	if (isioc)
+		TGPD_SET_IOC(gpd);
+	else
+		TGPD_CLR_IOC(gpd);
+
+	/* update gpd tail */
+	Rx_gpd_end[ep_num] = get_gpd(RXQ, ep_num);
+	QMU_INFO("[RX]Rx_gpd_end[%d]=%p gpd=%p\n", ep_num, Rx_gpd_end[ep_num], gpd);
+	memset(Rx_gpd_end[ep_num], 0, GPD_LEN_ALIGNED);
+	TGPD_CLR_FLAGS_HWO(Rx_gpd_end[ep_num]);
+
+	/* make sure struct ready before set to next */
+	mb();
+	TGPD_SET_NEXT(gpd, gpd_virt_to_phys(Rx_gpd_end[ep_num], RXQ, ep_num));
+
+	TGPD_SET_CHKSUM_HWO(gpd, 16);
+
+	/* make sure struct ready before HWO */
+	mb();
+	TGPD_SET_FLAGS_HWO(gpd);
+}
+
+static void prepare_tx_gpd(u8 *pBuf, u32 data_len, u8 ep_num, u8 zlp, u8 isioc)
+{
+	TGPD *gpd;
+
+	/* get gpd from tail */
+	gpd = Tx_gpd_end[ep_num];
+
+	TGPD_SET_DATA(gpd, pBuf);
+	TGPD_CLR_FORMAT_BDP(gpd);
+
+	TGPD_SET_BUF_LEN(gpd, data_len);
+	TGPD_SET_EXT_LEN(gpd, 0);
+
+	if (zlp)
+		TGPD_SET_FORMAT_ZLP(gpd);
+	else
+		TGPD_CLR_FORMAT_ZLP(gpd);
+
+	/* TGPD_CLR_FORMAT_BPS(gpd); */
+
+	if (isioc)
+		TGPD_SET_IOC(gpd);
+	else
+		TGPD_CLR_IOC(gpd);
+
+
+	/* update gpd tail */
+	Tx_gpd_end[ep_num] = get_gpd(TXQ, ep_num);
+	QMU_INFO("[TX]Tx_gpd_end[%d]=%p gpd=%p\n", ep_num, Tx_gpd_end[ep_num], gpd);
+	memset(Tx_gpd_end[ep_num], 0, GPD_LEN_ALIGNED);
+	TGPD_CLR_FLAGS_HWO(Tx_gpd_end[ep_num]);
+
+
+	/* make sure struct ready before set to next */
+	mb();
+	TGPD_SET_NEXT(gpd, gpd_virt_to_phys(Tx_gpd_end[ep_num], TXQ, ep_num));
+
+	TGPD_SET_CHKSUM_HWO(gpd, 16);
+
+	/* make sure struct ready before HWO */
+	mb();
+	TGPD_SET_FLAGS_HWO(gpd);
+
+}
+
+bool mtk11_is_qmu_enabled(u8 ep_num, u8 isRx)
+{
+	void __iomem *base = musbfsh_qmu_base;
+
+	if (isRx) {
+		if (MGC_ReadQUCS32(base, MGC_O_QUCS_USBGCSR) & (USB_QMU_Rx_EN(ep_num)))
+			return true;
+	} else {
+		if (MGC_ReadQUCS32(base, MGC_O_QUCS_USBGCSR) & (USB_QMU_Tx_EN(ep_num)))
+			return true;
+	}
+	return false;
+}
+
+void mtk11_qmu_enable(struct musbfsh *musbfsh, u8 ep_num, u8 isRx)
+{
+	struct musbfsh_hw_ep *hw_ep;
+	u32 QCR;
+	void __iomem *base = musbfsh_qmu_base;
+	void __iomem *mbase = musbfsh->mregs;
+	void __iomem *epio;
+	u16 csr = 0;
+	u16 intr_e = 0;
+
+	epio = musbfsh->endpoints[ep_num].regs;
+	hw_ep = &musbfsh->endpoints[ep_num];
+	musbfsh_ep_select(mbase, ep_num);
+
+	if (isRx) {
+		QMU_WARN("enable RQ(%d)\n", ep_num);
+
+		/* enable dma */
+		csr |= MUSBFSH_RXCSR_DMAENAB;
+
+		/* check ISOC */
+		if (hw_ep->type == USB_ENDPOINT_XFER_ISOC)
+			csr |= MUSBFSH_RXCSR_P_ISO;
+		musbfsh_writew(epio, MUSBFSH_RXCSR, csr);
+
+		/* turn off intrRx */
+		intr_e = musbfsh_readw(mbase, MUSBFSH_INTRRXE);
+		intr_e = intr_e & (~(1 << (ep_num)));
+		musbfsh_writew(mbase, MUSBFSH_INTRRXE, intr_e);
+
+		/* set 1st gpd and enable */
+		MGC_WriteQMU32(base, MGC_O_QMU_RQSAR(ep_num),
+			       gpd_virt_to_phys(Rx_gpd_end[ep_num], RXQ, ep_num));
+		MGC_WriteQUCS32(base, MGC_O_QUCS_USBGCSR,
+				MGC_ReadQUCS32(base, MGC_O_QUCS_USBGCSR) | (USB_QMU_Rx_EN(ep_num)));
+
+#ifdef CFG_CS_CHECK
+		QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR0);
+		MGC_WriteQMU32(base, MGC_O_QMU_QCR0, QCR | DQMU_RQCS_EN(ep_num));
+#endif
+
+#ifdef CFG_RX_ZLP_EN
+		QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR3);
+		MGC_WriteQMU32(base, MGC_O_QMU_QCR3, QCR | DQMU_RX_ZLP(ep_num));
+#endif
+
+#ifdef CFG_RX_COZ_EN
+		QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR3);
+		MGC_WriteQMU32(base, MGC_O_QMU_QCR3, QCR | DQMU_RX_COZ(ep_num));
+#endif
+
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_QIMCR,
+				DQMU_M_RX_DONE(ep_num) | DQMU_M_RQ_EMPTY | DQMU_M_RXQ_ERR |
+				DQMU_M_RXEP_ERR);
+
+
+#ifdef CFG_EMPTY_CHECK
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_REPEMPMCR, DQMU_M_RX_EMPTY(ep_num));
+#else
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_QIMSR, DQMU_M_RQ_EMPTY);
+#endif
+
+		QCR = DQMU_M_RX_LEN_ERR(ep_num);
+#ifdef CFG_CS_CHECK
+		QCR |= DQMU_M_RX_GPDCS_ERR(ep_num);
+#endif
+
+#ifdef CFG_RX_ZLP_EN
+		QCR |= DQMU_M_RX_ZLP_ERR(ep_num);
+#endif
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_RQEIMCR, QCR);
+
+
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_REPEIMCR, DQMU_M_RX_EP_ERR(ep_num));
+
+		mb();
+		/* qmu start */
+		MGC_WriteQMU32(base, MGC_O_QMU_RQCSR(ep_num), DQMU_QUE_START);
+
+	} else {
+		QMU_WARN("enable TQ(%d)\n", ep_num);
+
+		/* enable dma */
+		csr |= MUSBFSH_TXCSR_DMAENAB;
+
+		/* check ISOC */
+		if (hw_ep->type == USB_ENDPOINT_XFER_ISOC)
+			csr |= MUSBFSH_TXCSR_P_ISO;
+		musbfsh_writew(epio, MUSBFSH_TXCSR, csr);
+
+		/* turn off intrTx */
+		intr_e = musbfsh_readw(mbase, MUSBFSH_INTRTXE);
+		intr_e = intr_e & (~(1 << ep_num));
+		musbfsh_writew(mbase, MUSBFSH_INTRTXE, intr_e);
+
+		/* set 1st gpd and enable */
+		MGC_WriteQMU32(base, MGC_O_QMU_TQSAR(ep_num),
+			       gpd_virt_to_phys(Tx_gpd_end[ep_num], TXQ, ep_num));
+		MGC_WriteQUCS32(base, MGC_O_QUCS_USBGCSR,
+				MGC_ReadQUCS32(base, MGC_O_QUCS_USBGCSR) | (USB_QMU_Tx_EN(ep_num)));
+
+#ifdef CFG_CS_CHECK
+		QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR0);
+		MGC_WriteQMU32(base, MGC_O_QMU_QCR0, QCR | DQMU_TQCS_EN(ep_num));
+#endif
+
+#if (TXZLP == HW_MODE)
+		QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR2);
+		MGC_WriteQMU32(base, MGC_O_QMU_QCR2, QCR | DQMU_TX_ZLP(ep_num));
+#elif (TXZLP == GPD_MODE)
+		QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR2);
+		MGC_WriteQMU32(base, MGC_O_QMU_QCR2, QCR | DQMU_TX_MULTIPLE(ep_num));
+#endif
+
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_QIMCR,
+				DQMU_M_TX_DONE(ep_num) | DQMU_M_TQ_EMPTY | DQMU_M_TXQ_ERR |
+				DQMU_M_TXEP_ERR);
+
+#ifdef CFG_EMPTY_CHECK
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_TEPEMPMCR, DQMU_M_TX_EMPTY(ep_num));
+#else
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_QIMSR, DQMU_M_TQ_EMPTY);
+#endif
+
+		QCR = DQMU_M_TX_LEN_ERR(ep_num);
+#ifdef CFG_CS_CHECK
+		QCR |= DQMU_M_TX_GPDCS_ERR(ep_num) | DQMU_M_TX_BDCS_ERR(ep_num);
+#endif
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_TQEIMCR, QCR);
+
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_TEPEIMCR, DQMU_M_TX_EP_ERR(ep_num));
+
+		mb();
+		/* qmu start */
+		MGC_WriteQMU32(base, MGC_O_QMU_TQCSR(ep_num), DQMU_QUE_START);
+	}
+}
+
+void mtk11_qmu_stop(u8 ep_num, u8 isRx)
+{
+	void __iomem *base = musbfsh_qmu_base;
+
+	if (!isRx) {
+		if (MGC_ReadQMU16(base, MGC_O_QMU_TQCSR(ep_num)) & DQMU_QUE_ACTIVE) {
+			MGC_WriteQMU32(base, MGC_O_QMU_TQCSR(ep_num), DQMU_QUE_STOP);
+			QMU_WARN("Stop TQ %d\n", ep_num);
+		} else {
+			QMU_WARN("TQ %d already inactive\n", ep_num);
+		}
+	} else {
+		if (MGC_ReadQMU16(base, MGC_O_QMU_RQCSR(ep_num)) & DQMU_QUE_ACTIVE) {
+			MGC_WriteQMU32(base, MGC_O_QMU_RQCSR(ep_num), DQMU_QUE_STOP);
+			QMU_WARN("Stop RQ %d\n", ep_num);
+		} else {
+			QMU_WARN("RQ %d already inactive\n", ep_num);
+		}
+	}
+}
+
+static void mtk11_qmu_disable(u8 ep_num, u8 isRx)
+{
+	u32 QCR;
+	void __iomem *base = musbfsh_qmu_base;
+
+	QMU_WARN("disable %s(%d)\n", isRx ? "RQ" : "TQ", ep_num);
+
+	mtk11_qmu_stop(ep_num, isRx);
+	if (isRx) {
+		/* / clear Queue start address */
+		MGC_WriteQMU32(base, MGC_O_QMU_RQSAR(ep_num), 0);
+
+		/* KOBE, in denali, different EP QMU EN is separated in MGC_O_QUCS_USBGCSR ?? */
+		MGC_WriteQUCS32(base, MGC_O_QUCS_USBGCSR,
+				MGC_ReadQUCS32(base,
+					       MGC_O_QUCS_USBGCSR) & (~(USB_QMU_Rx_EN(ep_num))));
+
+		QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR0);
+		MGC_WriteQMU32(base, MGC_O_QMU_QCR0, QCR & (~(DQMU_RQCS_EN(ep_num))));
+		QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR3);
+		MGC_WriteQMU32(base, MGC_O_QMU_QCR3, QCR & (~(DQMU_RX_ZLP(ep_num))));
+
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_QIMSR, DQMU_M_RX_DONE(ep_num));
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_REPEMPMSR, DQMU_M_RX_EMPTY(ep_num));
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_RQEIMSR,
+				DQMU_M_RX_LEN_ERR(ep_num) | DQMU_M_RX_GPDCS_ERR(ep_num) |
+				DQMU_M_RX_ZLP_ERR(ep_num));
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_REPEIMSR, DQMU_M_RX_EP_ERR(ep_num));
+	} else {
+		/* / clear Queue start address */
+		MGC_WriteQMU32(base, MGC_O_QMU_TQSAR(ep_num), 0);
+
+		/* KOBE, in denali, different EP QMU EN is separated in MGC_O_QUCS_USBGCSR ?? */
+		MGC_WriteQUCS32(base, MGC_O_QUCS_USBGCSR,
+				MGC_ReadQUCS32(base,
+					       MGC_O_QUCS_USBGCSR) & (~(USB_QMU_Tx_EN(ep_num))));
+
+		QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR0);
+		MGC_WriteQMU32(base, MGC_O_QMU_QCR0, QCR & (~(DQMU_TQCS_EN(ep_num))));
+		QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR2);
+		MGC_WriteQMU32(base, MGC_O_QMU_QCR2, QCR & (~(DQMU_TX_ZLP(ep_num))));
+
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_QIMSR, DQMU_M_TX_DONE(ep_num));
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_TEPEMPMSR, DQMU_M_TX_EMPTY(ep_num));
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_TQEIMSR,
+				DQMU_M_TX_LEN_ERR(ep_num) | DQMU_M_TX_GPDCS_ERR(ep_num) |
+				DQMU_M_TX_BDCS_ERR(ep_num));
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_TEPEIMSR, DQMU_M_TX_EP_ERR(ep_num));
+	}
+}
+
+void mtk11_qmu_insert_task(u8 ep_num, u8 isRx, u8 *buf, u32 length, u8 zlp, u8 isioc)
+{
+	QMU_INFO("mtk11_qmu_insert_task ep_num: %d, isRx: %d, buf: %p, length: %d zlp: %d isioc: %d\n",
+			ep_num, isRx, buf, length, zlp, isioc);
+	if (isRx) /* rx don't care zlp input */
+		prepare_rx_gpd(buf, length, ep_num, isioc);
+	else
+		prepare_tx_gpd(buf, length, ep_num, zlp, isioc);
+}
+
+void mtk11_qmu_resume(u8 ep_num, u8 isRx)
+{
+	void __iomem *base = musbfsh_qmu_base;
+
+	if (!isRx) {
+		MGC_WriteQMU32(base, MGC_O_QMU_TQCSR(ep_num), DQMU_QUE_RESUME);
+		if (!MGC_ReadQMU32(base, MGC_O_QMU_TQCSR(ep_num))) {
+			QMU_ERR("TQCSR[%d]=%x\n", ep_num,
+				MGC_ReadQMU32(base, MGC_O_QMU_TQCSR(ep_num)));
+			MGC_WriteQMU32(base, MGC_O_QMU_TQCSR(ep_num), DQMU_QUE_RESUME);
+			QMU_ERR("TQCSR[%d]=%x\n", ep_num,
+				MGC_ReadQMU32(base, MGC_O_QMU_TQCSR(ep_num)));
+		}
+	} else {
+		MGC_WriteQMU32(base, MGC_O_QMU_RQCSR(ep_num), DQMU_QUE_RESUME);
+		if (!MGC_ReadQMU32(base, MGC_O_QMU_RQCSR(ep_num))) {
+			QMU_ERR("RQCSR[%d]=%x\n", ep_num,
+				MGC_ReadQMU32(base, MGC_O_QMU_RQCSR(ep_num)));
+			MGC_WriteQMU32(base, MGC_O_QMU_RQCSR(ep_num), DQMU_QUE_RESUME);
+			QMU_ERR("RQCSR[%d]=%x\n", ep_num,
+				MGC_ReadQMU32(base, MGC_O_QMU_RQCSR(ep_num)));
+		}
+	}
+}
+
+void mtk11_flush_ep_csr(struct musbfsh *musbfsh, u8 ep_num, u8 isRx)
+{
+	void __iomem *mbase = musbfsh->mregs;
+	struct musbfsh_hw_ep *hw_ep = musbfsh->endpoints + ep_num;
+	void __iomem *epio = hw_ep->regs;
+	u16 csr, wCsr;
+
+	if (epio == NULL)
+		QMU_ERR("epio == NULL\n");
+
+	if (isRx) {
+		csr = musbfsh_readw(epio, MUSBFSH_RXCSR);
+		csr |= MUSBFSH_RXCSR_FLUSHFIFO | MUSBFSH_RXCSR_RXPKTRDY;
+		csr &= ~MUSBFSH_RXCSR_H_REQPKT;
+
+		/* write 2x to allow double buffering */
+		/* CC: see if some check is necessary */
+		musbfsh_writew(epio, MUSBFSH_RXCSR, csr);
+		/*musbfsh_writew(epio, MUSBFSH_RXCSR, csr | MUSBFSH_RXCSR_CLRDATATOG);*/
+	} else {
+		csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+		if (csr & MUSBFSH_TXCSR_TXPKTRDY) {
+			wCsr = csr | MUSBFSH_TXCSR_FLUSHFIFO | MUSBFSH_TXCSR_TXPKTRDY;
+			musbfsh_writew(epio, MUSBFSH_TXCSR, wCsr);
+		}
+
+		csr |= MUSBFSH_TXCSR_FLUSHFIFO & ~MUSBFSH_TXCSR_TXPKTRDY;
+		musbfsh_writew(epio, MUSBFSH_TXCSR, csr);
+		/*musbfsh_writew(epio, MUSBFSH_TXCSR, csr | MUSBFSH_TXCSR_CLRDATATOG);*/
+		/* CC: why is this special? */
+		musbfsh_writew(mbase, MUSBFSH_INTRTX, 1 << ep_num);
+	}
+}
+
+void mtk11_disable_q(struct musbfsh *musbfsh, u8 ep_num, u8 isRx)
+{
+	void __iomem *mbase = musbfsh->mregs;
+	struct musbfsh_hw_ep *hw_ep = musbfsh->endpoints + ep_num;
+	void __iomem *epio = hw_ep->regs;
+	u16 csr;
+
+	mtk11_qmu_disable(ep_num, isRx);
+	mtk11_qmu_reset_gpd_pool(ep_num, isRx);
+
+	musbfsh_ep_select(mbase, ep_num);
+	if (isRx) {
+		csr = musbfsh_readw(epio, MUSBFSH_RXCSR);
+		csr &= ~MUSBFSH_RXCSR_DMAENAB;
+		musbfsh_writew(epio, MUSBFSH_RXCSR, csr);
+		mtk11_flush_ep_csr(musbfsh, ep_num, isRx);
+	} else {
+		csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+		csr &= ~MUSBFSH_TXCSR_DMAENAB;
+		musbfsh_writew(epio, MUSBFSH_TXCSR, csr);
+		mtk11_flush_ep_csr(musbfsh, ep_num, isRx);
+	}
+}
+
+void mtk11_qmu_host_iso_rx_err_info(struct musbfsh *musbfsh, u8 epnum)
+{
+	u16 rx_csr;
+	struct musbfsh_hw_ep *hw_ep = musbfsh->endpoints + epnum;
+	void __iomem *epio = hw_ep->regs;
+	void __iomem *mbase = musbfsh->mregs;
+
+	musbfsh_ep_select(mbase, epnum);
+	rx_csr = musbfsh_readw(epio, MUSBFSH_RXCSR);
+
+	WARNING("<== hw %d rxcsr %04x\n", epnum, rx_csr);
+
+	/* check for errors, concurrent stall & unlink is not really handled yet! */
+	if (rx_csr & MUSBFSH_RXCSR_H_RXSTALL) {
+		WARNING("RX end %d STALL\n", epnum);
+	} else if (rx_csr & MUSBFSH_RXCSR_H_ERROR) {
+		WARNING("end %d RX proto error,rxtoggle=0x%x\n", epnum,
+		    musbfsh_readl(mbase, MUSBFSH_RXTOG));
+	} else if (rx_csr & MUSBFSH_RXCSR_DATAERROR) {
+		WARNING("RX end %d ISO data error\n", epnum);
+	} else if (rx_csr & MUSBFSH_RXCSR_INCOMPRX) {
+		WARNING("end %d high bandwidth incomplete ISO packet RX\n", epnum);
+	}
+}
+
+void mtk11_qmu_host_iso_tx_err_info(struct musbfsh *musbfsh, u8 epnum)
+{
+	u16 tx_csr;
+	struct musbfsh_hw_ep *hw_ep = musbfsh->endpoints + epnum;
+	void __iomem *epio = hw_ep->regs;
+	void __iomem *mbase = musbfsh->mregs;
+
+	musbfsh_ep_select(mbase, epnum);
+	tx_csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+	WARNING("OUT/TX%d end, csr %04x\n", epnum, tx_csr);
+	/* check for errors */
+	if (tx_csr & MUSBFSH_TXCSR_H_RXSTALL) {
+		/* dma was disabled, fifo flushed */
+		WARNING("TX end %d stall\n", epnum);
+	} else if (tx_csr & MUSBFSH_TXCSR_H_ERROR) {
+		/* (NON-ISO) dma was disabled, fifo flushed */
+		WARNING("TX 3strikes on ep=%d\n", epnum);
+	} else if (tx_csr & MUSBFSH_TXCSR_H_NAKTIMEOUT) {
+		WARNING("TX end=%d device not responding\n", epnum);
+	}
+}
+
+void mtk11_qmu_host_rx_err(struct musbfsh *musbfsh, u8 epnum)
+{
+	struct urb *urb;
+	u16 rx_csr, val;
+	struct musbfsh_hw_ep *hw_ep = musbfsh->endpoints + epnum;
+	void __iomem *epio = hw_ep->regs;
+	struct musbfsh_qh *qh = hw_ep->in_qh;
+	bool done = false;
+	u32 status = 0;
+	void __iomem *mbase = musbfsh->mregs;
+
+	musbfsh_ep_select(mbase, epnum);
+	rx_csr = musbfsh_readw(epio, MUSBFSH_RXCSR);
+	val = rx_csr;
+
+	if (!qh) {
+		WARNING("!QH for ep %d\n", epnum);
+		goto finished;
+	}
+
+	urb = next_urb(qh);
+	status = 0;
+
+	if (unlikely(!urb)) {
+		/* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
+		 * usbtest #11 (unlinks) triggers it regularly, sometimes
+		 * with fifo full.  (Only with DMA??)
+		 */
+		WARNING("BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
+		    musbfsh_readw(epio, MUSBFSH_RXCOUNT));
+		musbfsh_h_flush_rxfifo(hw_ep, 0);
+		goto finished;
+	}
+
+	WARNING("<== hw %d rxcsr %04x, urb actual %d\n",
+	    epnum, rx_csr, urb->actual_length);
+
+	/* check for errors, concurrent stall & unlink is not really handled yet! */
+	if (rx_csr & MUSBFSH_RXCSR_H_RXSTALL) {
+		WARNING("RX end %d STALL\n", epnum);
+
+		/* handle stall in MAC */
+		rx_csr &= ~MUSBFSH_RXCSR_H_RXSTALL;
+		musbfsh_writew(epio, MUSBFSH_RXCSR, rx_csr);
+
+		/* stall; record URB status */
+		status = -EPIPE;
+
+	} else if (rx_csr & MUSBFSH_RXCSR_H_ERROR) {
+		WARNING("end %d RX proto error,rxtoggle=0x%x\n", epnum,
+		    musbfsh_readl(mbase, MUSBFSH_RXTOG));
+
+		status = -EPROTO;
+		musbfsh_writeb(epio, MUSBFSH_RXINTERVAL, 0);
+
+	} else if (rx_csr & MUSBFSH_RXCSR_DATAERROR) {
+
+		WARNING("RX end %d ISO data error\n", epnum);
+	} else if (rx_csr & MUSBFSH_RXCSR_INCOMPRX) {
+		WARNING("end %d high bandwidth incomplete ISO packet RX\n", epnum);
+		status = -EPROTO;
+	}
+
+	/* faults abort the transfer */
+	if (status) {
+		musbfsh_h_flush_rxfifo(hw_ep, 0);
+		musbfsh_writeb(epio, MUSBFSH_RXINTERVAL, 0);
+		done = true;
+	}
+
+	if (done) {
+		if (urb->status == -EINPROGRESS)
+			urb->status = status;
+		musbfsh_advance_schedule(musbfsh, urb, hw_ep, USB_DIR_IN);
+	}
+
+finished:
+	{
+		/* must use static string for AEE usage */
+		static char string[100];
+
+		sprintf(string, "USB11_HOST, RXQ<%d> ERR, CSR:%x", epnum, val);
+		QMU_ERR("%s\n", string);
+	}
+}
+
+void mtk11_qmu_host_tx_err(struct musbfsh *musbfsh, u8 epnum)
+{
+	struct urb *urb;
+	u16 tx_csr, val;
+	struct musbfsh_hw_ep *hw_ep = musbfsh->endpoints + epnum;
+	void __iomem *epio = hw_ep->regs;
+	struct musbfsh_qh *qh = hw_ep->out_qh;
+	bool done = false;
+	u32 status = 0;
+	void __iomem *mbase = musbfsh->mregs;
+
+	musbfsh_ep_select(mbase, epnum);
+	tx_csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+	val = tx_csr;
+
+	if (!qh) {
+		WARNING("!QH for ep %d\n", epnum);
+		goto finished;
+	}
+
+	urb = next_urb(qh);
+	/* with CPPI, DMA sometimes triggers "extra" irqs */
+	if (!urb) {
+		WARNING("extra TX%d ready, csr %04x\n", epnum, tx_csr);
+		goto finished;
+	}
+
+	WARNING("OUT/TX%d end, csr %04x\n", epnum, tx_csr);
+
+	/* check for errors */
+	if (tx_csr & MUSBFSH_TXCSR_H_RXSTALL) {
+		/* dma was disabled, fifo flushed */
+		WARNING("TX end %d stall\n", epnum);
+
+		/* stall; record URB status */
+		status = -EPIPE;
+
+	} else if (tx_csr & MUSBFSH_TXCSR_H_ERROR) {
+		/* (NON-ISO) dma was disabled, fifo flushed */
+		WARNING("TX 3strikes on ep=%d\n", epnum);
+
+		status = -ETIMEDOUT;
+	} else if (tx_csr & MUSBFSH_TXCSR_H_NAKTIMEOUT) {
+		WARNING("TX end=%d device not responding\n", epnum);
+
+		/* NOTE:  this code path would be a good place to PAUSE a
+		 * transfer, if there's some other (nonperiodic) tx urb
+		 * that could use this fifo.  (dma complicates it...)
+		 * That's already done for bulk RX transfers.
+		 *
+		 * if (bulk && qh->ring.next != &musb->out_bulk), then
+		 * we have a candidate... NAKing is *NOT* an error
+		 */
+		musbfsh_ep_select(mbase, epnum);
+		musbfsh_writew(epio, MUSBFSH_TXCSR, MUSBFSH_TXCSR_H_WZC_BITS | MUSBFSH_TXCSR_TXPKTRDY);
+		return;
+	}
+
+/* done: */
+	if (status) {
+		tx_csr &= ~(MUSBFSH_TXCSR_AUTOSET
+			    | MUSBFSH_TXCSR_DMAENAB
+			    | MUSBFSH_TXCSR_H_ERROR | MUSBFSH_TXCSR_H_RXSTALL | MUSBFSH_TXCSR_H_NAKTIMEOUT);
+
+		musbfsh_ep_select(mbase, epnum);
+		musbfsh_writew(epio, MUSBFSH_TXCSR, tx_csr);
+		/* REVISIT may need to clear FLUSHFIFO ... */
+		musbfsh_writew(epio, MUSBFSH_TXCSR, tx_csr);
+		musbfsh_writeb(epio, MUSBFSH_TXINTERVAL, 0);
+
+		done = true;
+	}
+
+	/* urb->status != -EINPROGRESS means request has been faulted,
+	 * so we must abort this transfer after cleanup
+	 */
+	if (urb->status != -EINPROGRESS) {
+		done = true;
+		if (status == 0)
+			status = urb->status;
+	}
+
+	if (done) {
+		/* set status */
+		urb->status = status;
+		urb->actual_length = qh->offset;
+		musbfsh_advance_schedule(musbfsh, urb, hw_ep, USB_DIR_OUT);
+	}
+
+finished:
+	{
+		/* must use static string for AEE usage */
+		static char string[100];
+
+		sprintf(string, "USB11_HOST, TXQ<%d> ERR, CSR:%x", epnum, val);
+		QMU_ERR("%s\n", string);
+#ifdef CONFIG_MEDIATEK_SOLUTION
+		aee_kernel_warning(string, string);
+#endif
+	}
+
+}
+
+static void mtk11_flush_urb_status(struct musbfsh_qh	*qh, struct urb *urb)
+{
+	urb->actual_length = 0;
+	urb->status = -EINPROGRESS;
+	if (qh->type == USB_ENDPOINT_XFER_ISOC) {
+		struct usb_iso_packet_descriptor	*d;
+		int index;
+
+		for (index = 0; index < urb->number_of_packets; index++) {
+			d = urb->iso_frame_desc + qh->iso_idx;
+			d->actual_length = 0;
+			d->status = -EXDEV;
+		}
+	}
+}
+
+void mtk11_qmu_err_recover(struct musbfsh *musbfsh, u8 ep_num, u8 isRx, bool is_len_err)
+{
+	struct urb *urb;
+	struct musbfsh_hw_ep *hw_ep = musbfsh->endpoints + ep_num;
+	struct musbfsh_qh			*qh;
+	struct usb_host_endpoint	*hep;
+
+	if (isRx)
+		qh = hw_ep->in_qh;
+	else
+		qh = hw_ep->out_qh;
+
+	hep = qh->hep;
+	/* same action as musb_flush_qmu */
+	mtk11_qmu_stop(ep_num, isRx);
+	mtk11_qmu_reset_gpd_pool(ep_num, isRx);
+
+	urb = next_urb(qh);
+	if (unlikely(!urb)) {
+		pr_warn("No URB.\n");
+		return;
+	}
+
+	if (usb_pipeisoc(urb->pipe)) {
+		if (isRx)
+			mtk11_qmu_host_iso_rx_err_info(musbfsh, ep_num);
+		else
+			mtk11_qmu_host_iso_tx_err_info(musbfsh, ep_num);
+		mtk11_flush_ep_csr(musbfsh, ep_num, isRx);
+		mtk11_qmu_enable(musbfsh, ep_num, isRx);
+		list_for_each_entry(urb, &hep->urb_list, urb_list) {
+			QMU_WARN("%s qh:0x%p flush and kick urb:0x%p\n", __func__, qh, urb);
+			mtk11_flush_urb_status(qh, urb);
+			mtk11_kick_CmdQ(musbfsh, isRx, qh, urb);
+		}
+	} else {
+		mtk11_flush_ep_csr(musbfsh, ep_num, isRx);
+		if (isRx)
+			mtk11_qmu_host_rx_err(musbfsh, ep_num);
+		else
+			mtk11_qmu_host_tx_err(musbfsh, ep_num);
+	}
+}
+
+void mtk11_qmu_irq_err(struct musbfsh *musbfsh, u32 qisar)
+{
+	u8 i;
+	u32 wQmuVal;
+	u32 wRetVal;
+	void __iomem *base = musbfsh_qmu_base;
+	u8 err_ep_num = 0;
+	bool is_len_err = false;
+	u8 isRx;
+
+	wQmuVal = qisar;
+
+	/* RXQ ERROR */
+	if (wQmuVal & DQMU_M_RXQ_ERR) {
+		wRetVal =
+		    MGC_ReadQIRQ32(base,
+				   MGC_O_QIRQ_RQEIR) & (~(MGC_ReadQIRQ32(base, MGC_O_QIRQ_RQEIMR)));
+		QMU_ERR("RQ error in QMU mode![0x%x]\n", wRetVal);
+
+		isRx = RXQ;
+		for (i = 1; i <= RXQ_NUM; i++) {
+			if (wRetVal & DQMU_M_RX_GPDCS_ERR(i)) {
+				QMU_ERR("RQ %d GPD checksum error!\n", i);
+				err_ep_num = i;
+			}
+			if (wRetVal & DQMU_M_RX_LEN_ERR(i)) {
+				QMU_ERR("RQ %d receive length error!\n", i);
+				err_ep_num = i;
+				is_len_err = true;
+			}
+			if (wRetVal & DQMU_M_RX_ZLP_ERR(i))
+				QMU_ERR("RQ %d receive an zlp packet!\n", i);
+		}
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_RQEIR, wRetVal);
+	}
+
+	/* TXQ ERROR */
+	if (wQmuVal & DQMU_M_TXQ_ERR) {
+		isRx = TXQ;
+		wRetVal =
+		    MGC_ReadQIRQ32(base,
+				   MGC_O_QIRQ_TQEIR) & (~(MGC_ReadQIRQ32(base, MGC_O_QIRQ_TQEIMR)));
+		QMU_ERR("TQ error in QMU mode![0x%x]\n", wRetVal);
+
+		for (i = 1; i <= RXQ_NUM; i++) {
+			if (wRetVal & DQMU_M_TX_BDCS_ERR(i)) {
+				QMU_ERR("TQ %d BD checksum error!\n", i);
+				err_ep_num = i;
+			}
+			if (wRetVal & DQMU_M_TX_GPDCS_ERR(i)) {
+				QMU_ERR("TQ %d GPD checksum error!\n", i);
+				err_ep_num = i;
+			}
+			if (wRetVal & DQMU_M_TX_LEN_ERR(i)) {
+				QMU_ERR("TQ %d buffer length error!\n", i);
+				err_ep_num = i;
+				is_len_err = true;
+			}
+		}
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_TQEIR, wRetVal);
+	}
+
+	/* RX EP ERROR */
+	if (wQmuVal & DQMU_M_RXEP_ERR) {
+		isRx = RXQ;
+		wRetVal =
+		    MGC_ReadQIRQ32(base,
+				   MGC_O_QIRQ_REPEIR) &
+		    (~(MGC_ReadQIRQ32(base, MGC_O_QIRQ_REPEIMR)));
+		QMU_ERR("Rx endpoint error in QMU mode![0x%x]\n", wRetVal);
+
+		for (i = 1; i <= RXQ_NUM; i++) {
+			if (wRetVal & DQMU_M_RX_EP_ERR(i)) {
+				QMU_ERR("RX EP %d ERR\n", i);
+				err_ep_num = i;
+			}
+		}
+
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_REPEIR, wRetVal);
+	}
+
+	/* TX EP ERROR */
+	if (wQmuVal & DQMU_M_TXEP_ERR) {
+		isRx = TXQ;
+		wRetVal =
+		    MGC_ReadQIRQ32(base,
+				   MGC_O_QIRQ_TEPEIR) &
+		    (~(MGC_ReadQIRQ32(base, MGC_O_QIRQ_TEPEIMR)));
+		QMU_ERR("Tx endpoint error in QMU mode![0x%x]\n", wRetVal);
+
+		for (i = 1; i <= TXQ_NUM; i++) {
+			if (wRetVal & DQMU_M_TX_EP_ERR(i)) {
+				QMU_ERR("TX EP %d ERR\n", i);
+				err_ep_num = i;
+			}
+		}
+
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_TEPEIR, wRetVal);
+	}
+
+	/* RXQ EMPTY */
+	if (wQmuVal & DQMU_M_RQ_EMPTY) {
+		wRetVal = MGC_ReadQIRQ32(base, MGC_O_QIRQ_REPEMPR)
+		    & (~(MGC_ReadQIRQ32(base, MGC_O_QIRQ_REPEMPMR)));
+		QMU_ERR("RQ Empty in QMU mode![0x%x]\n", wRetVal);
+
+		for (i = 1; i <= RXQ_NUM; i++) {
+			if (wRetVal & DQMU_M_RX_EMPTY(i))
+				QMU_ERR("RQ %d Empty!\n", i);
+		}
+
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_REPEMPR, wRetVal);
+	}
+
+	/* TXQ EMPTY */
+	if (wQmuVal & DQMU_M_TQ_EMPTY) {
+		wRetVal = MGC_ReadQIRQ32(base, MGC_O_QIRQ_TEPEMPR)
+		    & (~(MGC_ReadQIRQ32(base, MGC_O_QIRQ_TEPEMPMR)));
+		QMU_ERR("TQ Empty in QMU mode![0x%x]\n", wRetVal);
+
+		for (i = 1; i <= TXQ_NUM; i++) {
+			if (wRetVal & DQMU_M_TX_EMPTY(i))
+				QMU_ERR("TQ %d Empty!\n", i);
+		}
+
+		MGC_WriteQIRQ32(base, MGC_O_QIRQ_TEPEMPR, wRetVal);
+	}
+
+	/* QMU ERR RECOVER , only servie one ep error ? */
+	if (err_ep_num)
+		mtk11_qmu_err_recover(musbfsh, err_ep_num, isRx, is_len_err);
+}
+
+void h_mtk11_qmu_done_rx(struct musbfsh *musbfsh, u8 ep_num)
+{
+	void __iomem *base = musbfsh_qmu_base;
+
+	TGPD *gpd = Rx_gpd_last[ep_num];
+	TGPD *gpd_current = (TGPD *)(unsigned long)MGC_ReadQMU32(base, MGC_O_QMU_RQCPR(ep_num));
+	struct musbfsh_hw_ep	*hw_ep = musbfsh->endpoints + ep_num;
+	struct musbfsh_qh	*qh = hw_ep->in_qh;
+	struct urb	*urb = NULL;
+	bool done = true;
+
+	if (unlikely(!qh)) {
+		WARNING("hw_ep:%d, QH NULL\n", ep_num);
+		return;
+	}
+
+	urb = next_urb(qh);
+	if (unlikely(!urb)) {
+		WARNING("hw_ep:%d, !URB\n", ep_num);
+		return;
+	}
+	INFO("\n");
+
+	/*Transfer PHY addr got from QMU register to VIR addr*/
+	gpd_current = (TGPD *)gpd_phys_to_virt((dma_addr_t)gpd_current, RXQ, ep_num);
+	INFO("\n");
+
+	QMU_INFO("[RXD]%s EP%d, Last=%p, Current=%p, End=%p\n",
+				__func__, ep_num, gpd, gpd_current, Rx_gpd_end[ep_num]);
+
+	/* gpd_current should at least point to the next GPD to the previous last one */
+	if (gpd == gpd_current) {
+		QMU_ERR("[RXD][ERROR] gpd(%p) == gpd_current(%p)\n",
+					gpd, gpd_current);
+
+		QMU_ERR("[RXD][ERROR]EP%d RQCSR=%x, RQSAR=%x, RQCPR=%x, RQLDPR=%x\n",
+				ep_num,
+				MGC_ReadQMU32(base, MGC_O_QMU_RQCSR(ep_num)),
+				MGC_ReadQMU32(base, MGC_O_QMU_RQSAR(ep_num)),
+				MGC_ReadQMU32(base, MGC_O_QMU_RQCPR(ep_num)),
+				MGC_ReadQMU32(base, MGC_O_QMU_RQLDPR(ep_num)));
+
+		QMU_ERR("[RXD][ERROR]QCR0=%x, QCR2=%x, QCR3=%x, QGCSR=%x\n",
+					MGC_ReadQMU32(base, MGC_O_QMU_QCR0),
+					MGC_ReadQMU32(base, MGC_O_QMU_QCR2),
+					MGC_ReadQMU32(base, MGC_O_QMU_QCR3),
+					MGC_ReadQUCS32(base, MGC_O_QUCS_USBGCSR));
+
+		QMU_ERR("[RX]HWO=%d, Next_GPD=%p ,BufLen=%d, Buf=%p, RLen=%d, EP=%d\n",
+				(u32)TGPD_GET_FLAG(gpd), TGPD_GET_NEXT(gpd),
+				(u32)TGPD_GET_DataBUF_LEN(gpd), TGPD_GET_DATA(gpd),
+				(u32)TGPD_GET_BUF_LEN(gpd), (u32)TGPD_GET_EPaddr(gpd));
+
+		return;
+	}
+
+	if (!gpd || !gpd_current) {
+		QMU_ERR("[RXD][ERROR] EP%d, gpd=%p, gpd_current=%p, ishwo=%d, rx_gpd_last=%p, RQCPR=0x%x\n",
+				ep_num, gpd, gpd_current,
+				((gpd == NULL) ? 999 : TGPD_IS_FLAGS_HWO(gpd)),
+				Rx_gpd_last[ep_num],
+				MGC_ReadQMU32(base, MGC_O_QMU_RQCPR(ep_num)));
+		return;
+	}
+
+	if (TGPD_IS_FLAGS_HWO(gpd)) {
+		QMU_ERR("[RXD][ERROR]HWO=1!!\n");
+		musbfsh_bug();
+	}
+
+	/* NORMAL EXEC FLOW */
+	while (gpd != gpd_current && !TGPD_IS_FLAGS_HWO(gpd)) {
+		u32 rcv_len = (u32)TGPD_GET_BUF_LEN(gpd);
+
+		urb = next_urb(qh);
+		if (!urb) {
+			INFO("extra RX%d ready\n", ep_num);
+			mtk11_qmu_stop(ep_num, USB_DIR_IN);
+			return;
+		}
+
+		if (!TGPD_GET_NEXT(gpd) || !TGPD_GET_DATA(gpd)) {
+			QMU_ERR("[RXD][ERROR] EP%d ,gpd=%p\n", ep_num, gpd);
+			musbfsh_bug();
+		}
+		if (usb_pipebulk(urb->pipe)
+				&& urb->transfer_buffer_length >= QMU_RX_SPLIT_THRE
+				&& usb_pipein(urb->pipe)) {
+			urb->actual_length += TGPD_GET_BUF_LEN(gpd);
+			qh->offset += TGPD_GET_BUF_LEN(gpd);
+			qh->iso_idx++;
+			done = (qh->iso_idx == urb->number_of_packets) ? true : false;
+		} else if (usb_pipeisoc(urb->pipe)) {
+			struct usb_iso_packet_descriptor	*d;
+
+			d = urb->iso_frame_desc + qh->iso_idx;
+			d->actual_length = rcv_len;
+			d->status = 0;
+			urb->actual_length += rcv_len;
+			qh->offset += TGPD_GET_BUF_LEN(gpd);
+			qh->iso_idx++;
+			done = (qh->iso_idx == urb->number_of_packets) ? true : false;
+			} else {
+			urb->actual_length = TGPD_GET_BUF_LEN(gpd);
+			qh->offset = TGPD_GET_BUF_LEN(gpd);
+			done = true;
+		}
+
+		gpd = TGPD_GET_NEXT(gpd);
+
+		gpd = gpd_phys_to_virt((dma_addr_t)gpd, RXQ, ep_num);
+		INFO("gpd = %p ep_num = %d\n", gpd, ep_num);
+		if (!gpd) {
+			QMU_ERR("[RXD][ERROR]%s EP%d ,gpd=%p\n", __func__, ep_num, gpd);
+			musbfsh_bug();
+		}
+		INFO("gpd = %p ep_num = %d\n", gpd, ep_num);
+		Rx_gpd_last[ep_num] = gpd;
+		Rx_gpd_free_count[ep_num]++;
+		INFO("gpd = %p ep_num = %d\n", gpd, ep_num);
+		INFO("hw_ep = %p\n", hw_ep);
+
+
+
+		INFO("\n");
+		if (done) {
+			if (musbfsh_ep_get_qh(hw_ep, USB_DIR_IN))
+				qh->iso_idx = 0;
+
+			musbfsh_advance_schedule(musbfsh, urb, hw_ep, USB_DIR_IN);
+
+			if (!hw_ep->in_qh) {
+				WARNING("hw_ep:%d, QH NULL after advance_schedule\n", ep_num);
+				return;
+			}
+		}
+	}
+	/* QMU should keep take HWO gpd , so there is error*/
+	if (gpd != gpd_current && TGPD_IS_FLAGS_HWO(gpd)) {
+		QMU_ERR("[RXD][ERROR]gpd=%p\n", gpd);
+
+		QMU_ERR("[RXD][ERROR]EP%d RQCSR=%x, RQSAR=%x, RQCPR=%x, RQLDPR=%x\n",
+				ep_num,
+				MGC_ReadQMU32(base, MGC_O_QMU_RQCSR(ep_num)),
+				MGC_ReadQMU32(base, MGC_O_QMU_RQSAR(ep_num)),
+				MGC_ReadQMU32(base, MGC_O_QMU_RQCPR(ep_num)),
+				MGC_ReadQMU32(base, MGC_O_QMU_RQLDPR(ep_num)));
+
+		QMU_ERR("[RXD][ERROR]QCR0=%x, QCR2=%x, QCR3=%x, QGCSR=%x\n",
+				MGC_ReadQMU32(base, MGC_O_QMU_QCR0),
+				MGC_ReadQMU32(base, MGC_O_QMU_QCR2),
+				MGC_ReadQMU32(base, MGC_O_QMU_QCR3),
+				MGC_ReadQUCS32(base, MGC_O_QUCS_USBGCSR));
+
+		QMU_ERR("[RX]HWO=%d, Next_GPD=%p ,BufLen=%d, Buf=%p, RLen=%d, EP=%d\n",
+				(u32)TGPD_GET_FLAG(gpd), TGPD_GET_NEXT(gpd),
+				(u32)TGPD_GET_DataBUF_LEN(gpd), TGPD_GET_DATA(gpd),
+				(u32)TGPD_GET_BUF_LEN(gpd), (u32)TGPD_GET_EPaddr(gpd));
+	}
+
+	QMU_INFO("[RXD]%s EP%d, Last=%p, End=%p, complete\n", __func__,
+				ep_num, Rx_gpd_last[ep_num], Rx_gpd_end[ep_num]);
+	INFO("\n");
+}
+
+void h_mtk11_qmu_done_tx(struct musbfsh *musbfsh, u8 ep_num)
+{
+	void __iomem *base = musbfsh_qmu_base;
+	TGPD *gpd = Tx_gpd_last[ep_num];
+	TGPD *gpd_current = (TGPD *)(unsigned long)MGC_ReadQMU32(base, MGC_O_QMU_TQCPR(ep_num));
+	struct musbfsh_hw_ep	*hw_ep = musbfsh->endpoints + ep_num;
+	struct musbfsh_qh	*qh = hw_ep->out_qh;
+	struct urb	*urb = NULL;
+	bool done = true;
+
+	if (unlikely(!qh)) {
+		WARNING("hw_ep:%d, QH NULL\n", ep_num);
+		return;
+	}
+
+	urb = next_urb(qh);
+	if (unlikely(!urb)) {
+		WARNING("hw_ep:%d, !URB\n", ep_num);
+		return;
+	}
+
+	/*Transfer PHY addr got from QMU register to VIR addr*/
+	gpd_current = gpd_phys_to_virt((dma_addr_t)gpd_current, TXQ, ep_num);
+
+	QMU_INFO("[TXD]%s EP%d, Last=%p, Current=%p, End=%p\n",
+				__func__, ep_num, gpd, gpd_current, Tx_gpd_end[ep_num]);
+
+	/*gpd_current should at least point to the next GPD to the previous last one.*/
+	if (gpd == gpd_current)
+		return;
+
+	if (TGPD_IS_FLAGS_HWO(gpd)) {
+		QMU_ERR("[TXD] HWO=1, CPR=%x\n", MGC_ReadQMU32(base, MGC_O_QMU_TQCPR(ep_num)));
+		musbfsh_bug();
+	}
+
+	/* NORMAL EXEC FLOW */
+	while (gpd != gpd_current && !TGPD_IS_FLAGS_HWO(gpd)) {
+		QMU_INFO("[TXD]gpd=%p ->HWO=%d, BPD=%d, Next_GPD=%p, DataBuffer=%p, BufferLen=%d\n",
+			gpd, (u32)TGPD_GET_FLAG(gpd), (u32)TGPD_GET_FORMAT(gpd),
+			TGPD_GET_NEXT(gpd), TGPD_GET_DATA(gpd), (u32)TGPD_GET_BUF_LEN(gpd));
+
+		if (!TGPD_GET_NEXT(gpd)) {
+			QMU_ERR("[TXD][ERROR]Next GPD is null!!\n");
+			break;
+		}
+
+		urb = next_urb(qh);
+		if (!urb) {
+			QMU_ERR("extra TX%d ready\n", ep_num);
+			mtk11_qmu_stop(ep_num, USB_DIR_OUT);
+			return;
+		}
+
+		if (!TGPD_GET_NEXT(gpd) || !TGPD_GET_DATA(gpd)) {
+			QMU_ERR("[RXD][ERROR] EP%d ,gpd=%p\n", ep_num, gpd);
+			musbfsh_bug();
+		}
+
+		if (usb_pipebulk(urb->pipe)
+				&& urb->transfer_buffer_length >= QMU_RX_SPLIT_THRE
+				&& usb_pipeout(urb->pipe)) {
+			QMU_WARN("bulk???\n");
+			urb->actual_length += TGPD_GET_BUF_LEN(gpd);
+			qh->offset += TGPD_GET_BUF_LEN(gpd);
+			qh->iso_idx++;
+			done = (qh->iso_idx == urb->number_of_packets) ? true : false;
+		} else if (usb_pipeisoc(urb->pipe)) {
+			struct usb_iso_packet_descriptor	*d;
+
+			d = urb->iso_frame_desc + qh->iso_idx;
+			d->actual_length = TGPD_GET_BUF_LEN(gpd);
+			d->status = 0;
+			urb->actual_length += TGPD_GET_BUF_LEN(gpd);
+			qh->offset += TGPD_GET_BUF_LEN(gpd);
+			qh->iso_idx++;
+			done = (qh->iso_idx == urb->number_of_packets) ? true : false;
+		} else {
+			QMU_WARN("others use qmu???\n");
+			urb->actual_length = TGPD_GET_BUF_LEN(gpd);
+			qh->offset = TGPD_GET_BUF_LEN(gpd);
+			done = true;
+		}
+		gpd = TGPD_GET_NEXT(gpd);
+		gpd = gpd_phys_to_virt((dma_addr_t)gpd, TXQ, ep_num);
+		Tx_gpd_last[ep_num] = gpd;
+		Tx_gpd_free_count[ep_num]++;
+
+		if (done) {
+			if (musbfsh_ep_get_qh(hw_ep, USB_DIR_OUT))
+				qh->iso_idx = 0;
+
+			musbfsh_advance_schedule(musbfsh, urb, hw_ep, USB_DIR_OUT);
+
+			if (!hw_ep->out_qh) {
+				WARNING("hw_ep:%d, QH NULL after advance_schedule\n", ep_num);
+				return;
+			}
+		}
+	}
+}
+#endif
diff --git a/drivers/misc/mediatek/usb11/mtk11_qmu.h b/drivers/misc/mediatek/usb11/mtk11_qmu.h
new file mode 100644
index 0000000..2e9777e
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/mtk11_qmu.h
@@ -0,0 +1,351 @@
+/*
+ * Copyright (C) 2015 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTKFSH_QMU_H_
+#define _MTKFSH_QMU_H_
+
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+
+/* for musb_read/write api */
+/*#include "mtk_musb.h"*/
+#include "musbfsh_debug.h"
+#include "musbfsh_io.h"
+
+#include <linux/dmapool.h>
+
+/* CUSTOM SETTING */
+#define GPD_LEN_ALIGNED (64)	/* > gpd len (16) and cache line size aligned */
+#define GPD_EXT_LEN (48)	/* GPD_LEN_ALIGNED - 16(should be sizeof(TGPD) */
+#define GPD_SZ (16)
+#define DFT_MAX_GPD_NUM 36
+#ifndef MUSBFSH_QMU_LIMIT_SUPPORT
+#define RXQ_NUM 8
+#define TXQ_NUM 8
+#else
+#define RXQ_NUM MUSBFSH_QMU_LIMIT_RXQ_NUM
+#define TXQ_NUM MUSBFSH_QMU_LIMIT_TXQ_NUM
+#endif
+#define MAX_QMU_EP RXQ_NUM
+#define TXQ	0
+#define RXQ	1
+
+/* QMU SETTING */
+#define NO_ZLP 0
+#define HW_MODE 1
+#define GPD_MODE 2
+/* #define TXZLP GPD_MODE */
+/* #define TXZLP HW_MODE */
+#define TXZLP NO_ZLP
+
+/* #define CFG_RX_ZLP_EN */
+/* #define CFG_RX_COZ_EN */
+
+#define CFG_CS_CHECK
+/* #define CFG_EMPTY_CHECK */
+
+/* TGPD */
+typedef struct _TGPD {
+	u8 flag;
+	u8 chksum;
+	u16 DataBufferLen;	/*Rx Allow Length */
+
+	/* address field, 32-bit long */
+	u32 pNext;
+	u32 pBuf;
+
+	u16 bufLen;
+	u8 ExtLength;
+	u8 ZTepFlag;
+} TGPD, *PGPD;
+
+typedef struct _GPD_RANGE {
+	PGPD pNext;
+	PGPD pStart;
+	PGPD pEnd;
+} GPD_R, *RGPD;
+
+#define LOG_EMERG		0
+#define LOG_ALERT		1
+#define LOG_CRIT		2
+#define LOG_ERR		3
+#define LOG_WARN	4
+#define LOG_NOTICE		5
+#define LOG_INFO		6
+#define LOG_DBG		7
+
+/* #define QMU_DBG_ON */
+#ifdef QMU_DBG_ON
+static inline int mtk11_dbg_level(unsigned level)
+{
+	return mtk11_qmu_dbg_level >= level;
+}
+
+#define QMU_ERR(format, args...) do {if (mtk11_dbg_level(LOG_ERR)) \
+	pr_warn("QMU_ERR,<%s %d>, " format, __func__, __LINE__, ## args);  } \
+	while (0)
+#define QMU_WARN(format, args...) do {if (mtk11_dbg_level(LOG_WARN)) \
+	pr_warn("QMU_WARN,<%s %d>, " format, __func__, __LINE__, ## args);  } \
+	while (0)
+#define QMU_INFO(format, args...) do {if (mtk11_dbg_level(LOG_INFO)) \
+	pr_warn("QMU_INFO,<%s %d>, " format, __func__, __LINE__, ## args);  } \
+	while (0)
+#define QMU_DBG(format, args...) do {if (mtk11_dbg_level(LOG_DBG)) \
+	pr_warn("QMU_DBG,<%s %d>, " format, __func__, __LINE__, ## args);  } \
+	while (0)
+#else
+#define QMU_ERR(format, args...) do {} while (0)
+#define QMU_WARN(format, args...) do {} while (0)
+#define QMU_INFO(format, args...) do {} while (0)
+#define QMU_DBG(format, args...) do {} while (0)
+#endif
+
+
+
+/* QMU macros */
+#define USB_HW_QMU_OFF	0x0000
+#define USB_HW_QUCS_OFF	0x0300
+#define USB_HW_QIRQ_OFF	0x0400
+#define USB_HW_QDBG_OFF	0x04F0
+
+#define MGC_O_QMU_QCR0	0x0000
+#define MGC_O_QMU_QCR2	0x0008
+#define MGC_O_QMU_QCR3	0x000C
+
+#define MGC_O_QMU_RQCSR0	0x0010
+#define MGC_O_QMU_RQSAR0	0x0014
+#define MGC_O_QMU_RQCPR0	0x0018
+#define MGC_O_QMU_RQCSR(n) (MGC_O_QMU_RQCSR0+0x0010*((n)-1))
+#define MGC_O_QMU_RQSAR(n) (MGC_O_QMU_RQSAR0+0x0010*((n)-1))
+#define MGC_O_QMU_RQCPR(n) (MGC_O_QMU_RQCPR0+0x0010*((n)-1))
+
+#define MGC_O_QMU_RQTR_BASE	0x0090
+#define MGC_O_QMU_RQTR(n)		(MGC_O_QMU_RQTR_BASE+0x4*((n)-1))
+#define MGC_O_QMU_RQLDPR0		0x0100
+#define MGC_O_QMU_RQLDPR(n)	(MGC_O_QMU_RQLDPR0+0x4*((n)-1))
+
+#define MGC_O_QMU_TQCSR0	0x0200
+#define MGC_O_QMU_TQSAR0	0x0204
+#define MGC_O_QMU_TQCPR0	0x0208
+#define MGC_O_QMU_TQCSR(n) (MGC_O_QMU_TQCSR0+0x0010*((n)-1))
+#define MGC_O_QMU_TQSAR(n) (MGC_O_QMU_TQSAR0+0x0010*((n)-1))
+#define MGC_O_QMU_TQCPR(n) (MGC_O_QMU_TQCPR0+0x0010*((n)-1))
+
+#define MGC_O_QMU_QAR		0x0300
+#define MGC_O_QUCS_USBGCSR	0x0000
+#define MGC_O_QIRQ_QISAR		0x0000
+#define MGC_O_QIRQ_QIMR		0x0004
+#define MGC_O_QIRQ_QIMCR		0x0008
+#define MGC_O_QIRQ_QIMSR		0x000C
+#define MGC_O_QIRQ_IOCDISR    0x0030
+#define MGC_O_QIRQ_TEPEMPR	0x0060
+#define MGC_O_QIRQ_TEPEMPMR	0x0064
+#define MGC_O_QIRQ_TEPEMPMCR	0x0068
+#define MGC_O_QIRQ_TEPEMPMSR	0x006C
+#define MGC_O_QIRQ_REPEMPR	0x0070
+#define MGC_O_QIRQ_REPEMPMR	0x0074
+#define MGC_O_QIRQ_REPEMPMCR	0x0078
+#define MGC_O_QIRQ_REPEMPMSR	0x007C
+
+#define MGC_O_QIRQ_RQEIR		0x0090
+#define MGC_O_QIRQ_RQEIMR		0x0094
+#define MGC_O_QIRQ_RQEIMCR	0x0098
+#define MGC_O_QIRQ_RQEIMSR	0x009C
+#define MGC_O_QIRQ_REPEIR		0x00A0
+#define MGC_O_QIRQ_REPEIMR	0x00A4
+#define MGC_O_QIRQ_REPEIMCR	0x00A8
+#define MGC_O_QIRQ_REPEIMSR	0x00AC
+#define MGC_O_QIRQ_TQEIR		0x00B0
+#define MGC_O_QIRQ_TQEIMR		0x00B4
+#define MGC_O_QIRQ_TQEIMCR	0x00B8
+#define MGC_O_QIRQ_TQEIMSR	0x00BC
+#define MGC_O_QIRQ_TEPEIR		0x00C0
+#define MGC_O_QIRQ_TEPEIMR	0x00C4
+#define MGC_O_QIRQ_TEPEIMCR	0x00C8
+#define MGC_O_QIRQ_TEPEIMSR	0x00CC
+
+#define MGC_O_QDBG_DFCR	0x0000
+#define MGC_O_QDBG_DFMR	0x0004
+
+/* brief Queue Control value Definition */
+#define DQMU_QUE_START	0x00000001
+#define DQMU_QUE_RESUME	0x00000002
+#define DQMU_QUE_STOP		0x00000004
+#define DQMU_QUE_ACTIVE	0x00008000
+
+/*brief USB QMU Special Control USBGCSR value Definition*/
+#define USB_QMU_Tx0_EN			0x00000001
+#define USB_QMU_Tx_EN(n)			(USB_QMU_Tx0_EN<<((n)-1))
+#define USB_QMU_Rx0_EN			0x00010000
+#define USB_QMU_Rx_EN(n)			(USB_QMU_Rx0_EN<<((n)-1))
+#define USB_QMU_HIFEVT_EN			0x00000100
+#define USB_QMU_HIFCMD_EN			0x01000000
+#define DQMU_SW_RESET		0x00010000
+#define DQMU_CS16B_EN		0x80000000
+#define DQMU_TQ0CS_EN		0x00010000
+#define DQMU_TQCS_EN(n)	(DQMU_TQ0CS_EN<<((n)-1))
+#define DQMU_RQ0CS_EN		0x00000001
+#define DQMU_RQCS_EN(n)	(DQMU_RQ0CS_EN<<((n)-1))
+#define DQMU_TX0_ZLP		0x01000000
+#define DQMU_TX_ZLP(n)		(DQMU_TX0_ZLP<<((n)-1))
+#define DQMU_TX0_MULTIPLE	0x00010000
+#define DQMU_TX_MULTIPLE(n)	(DQMU_TX0_MULTIPLE<<((n)-1))
+#define DQMU_RX0_MULTIPLE	0x00010000
+#define DQMU_RX_MULTIPLE(n)	(DQMU_RX0_MULTIPLE<<((n)-1))
+#define DQMU_RX0_ZLP		0x01000000
+#define DQMU_RX_ZLP(n)		(DQMU_RX0_ZLP<<((n)-1))
+#define DQMU_RX0_COZ		0x00000100
+#define DQMU_RX_COZ(n)		(DQMU_RX0_COZ<<((n)-1))
+
+#define DQMU_M_TXEP_ERR	0x10000000
+#define DQMU_M_TXQ_ERR	0x08000000
+#define DQMU_M_RXEP_ERR	0x04000000
+#define DQMU_M_RXQ_ERR	0x02000000
+#define DQMU_M_RQ_EMPTY	0x00020000
+#define DQMU_M_TQ_EMPTY	0x00010000
+#define DQMU_M_RX0_EMPTY	0x00000001
+#define DQMU_M_RX_EMPTY(n)	(DQMU_M_RX0_EMPTY<<((n)-1))
+#define DQMU_M_TX0_EMPTY	0x00000001
+#define DQMU_M_TX_EMPTY(n)	(DQMU_M_TX0_EMPTY<<((n)-1))
+#define DQMU_M_RX0_DONE	0x00000100
+#define DQMU_M_RX_DONE(n)	(DQMU_M_RX0_DONE<<((n)-1))
+#define DQMU_M_TX0_DONE	0x00000001
+#define DQMU_M_TX_DONE(n)	(DQMU_M_TX0_DONE<<((n)-1))
+
+#define DQMU_M_RX0_ZLP_ERR	0x01000000
+#define DQMU_M_RX_ZLP_ERR(n)	(DQMU_M_RX0_ZLP_ERR<<((n)-1))
+#define DQMU_M_RX0_LEN_ERR	0x00000100
+#define DQMU_M_RX_LEN_ERR(n)	(DQMU_M_RX0_LEN_ERR<<((n)-1))
+#define DQMU_M_RX0_GPDCS_ERR		0x00000001
+#define DQMU_M_RX_GPDCS_ERR(n)	(DQMU_M_RX0_GPDCS_ERR<<((n)-1))
+
+#define DQMU_M_TX0_LEN_ERR	0x00010000
+#define DQMU_M_TX_LEN_ERR(n)	(DQMU_M_TX0_LEN_ERR<<((n)-1))
+#define DQMU_M_TX0_GPDCS_ERR	0x00000100
+#define DQMU_M_TX_GPDCS_ERR(n)	(DQMU_M_TX0_GPDCS_ERR<<((n)-1))
+#define DQMU_M_TX0_BDCS_ERR		0x00000001
+#define DQMU_M_TX_BDCS_ERR(n)	(DQMU_M_TX0_BDCS_ERR<<((n)-1))
+
+#define DQMU_M_TX0_EP_ERR		0x00000001
+#define DQMU_M_TX_EP_ERR(n)	(DQMU_M_TX0_EP_ERR<<((n)-1))
+
+#define DQMU_M_RX0_EP_ERR		0x00000001
+#define DQMU_M_RX_EP_ERR(n)	(DQMU_M_RX0_EP_ERR<<((n)-1))
+#define DQMU_M_RQ_DIS_IOC(n)   (0x100<<((n)-1))
+
+#define MGC_ReadQMU8(base, _offset) \
+	musbfsh_readb(base, (USB_HW_QMU_OFF + _offset))
+
+#define MGC_ReadQUCS8(base, _offset) \
+	musbfsh_readb(base, (USB_HW_QUCS_OFF + _offset))
+
+#define MGC_ReadQIRQ8(base, _offset) \
+	musbfsh_readb(base, (USB_HW_QIRQ_OFF + _offset))
+
+#define MGC_ReadQMU16(base, _offset) \
+	musbfsh_readw(base, (USB_HW_QMU_OFF + _offset))
+
+#define MGC_ReadQUCS16(base, _offset) \
+	musbfsh_readw(base, (USB_HW_QUCS_OFF + _offset))
+
+#define MGC_ReadQIRQ16(base, _offset) \
+	musbfsh_readw(base, (USB_HW_QIRQ_OFF + _offset))
+#define MGC_ReadQMU32(base, _offset) \
+	musbfsh_readl(base, (USB_HW_QMU_OFF + _offset))
+
+#define MGC_ReadQUCS32(base, _offset) \
+	musbfsh_readl(base, (USB_HW_QUCS_OFF + _offset))
+
+#define MGC_ReadQIRQ32(base, _offset) \
+	musbfsh_readl(base, (USB_HW_QIRQ_OFF + _offset))
+
+#define MGC_WriteQMU32(base, _offset, _data) \
+	musbfsh_writel(base, (USB_HW_QMU_OFF + _offset), _data)
+
+#define MGC_WriteQUCS32(base, _offset, _data) \
+	musbfsh_writel(base, (USB_HW_QUCS_OFF + _offset), _data)
+
+#define MGC_WriteQIRQ32(base, _offset, _data) \
+	musbfsh_writel(base, (USB_HW_QIRQ_OFF + _offset), _data)
+
+u8 mtk11_PDU_calcCksum(u8 *data, int len);
+
+/* brief Define DMAQ GPD format */
+#define TGPD_FLAGS_HWO              0x01
+#define TGPD_IS_FLAGS_HWO(_pd)      (((TGPD *)_pd)->flag & TGPD_FLAGS_HWO)
+#define TGPD_SET_FLAGS_HWO(_pd)     (((TGPD *)_pd)->flag |= TGPD_FLAGS_HWO)
+#define TGPD_CLR_FLAGS_HWO(_pd)     (((TGPD *)_pd)->flag &= (~TGPD_FLAGS_HWO))
+#define TGPD_FORMAT_BDP             0x02
+#define TGPD_IS_FORMAT_BDP(_pd)     (((TGPD *)_pd)->flag & TGPD_FORMAT_BDP)
+#define TGPD_SET_FORMAT_BDP(_pd)    (((TGPD *)_pd)->flag |= TGPD_FORMAT_BDP)
+#define TGPD_CLR_FORMAT_BDP(_pd)    (((TGPD *)_pd)->flag &= (~TGPD_FORMAT_BDP))
+
+#define TGPD_SET_FLAG(_pd, _flag)   (((TGPD *)_pd)->flag = (((TGPD *)_pd)->flag&(~TGPD_FLAGS_HWO))|(_flag))
+#define TGPD_GET_FLAG(_pd)             (((TGPD *)_pd)->flag & TGPD_FLAGS_HWO)
+#define TGPD_SET_CHKSUM(_pd, _n)    (((TGPD *)_pd)->chksum = mtk11_PDU_calcCksum((u8 *)_pd, _n))
+#define TGPD_SET_CHKSUM_HWO(_pd, _n)    (((TGPD *)_pd)->chksum = mtk11_PDU_calcCksum((u8 *)_pd, _n)-1)
+#define TGPD_GET_CHKSUM(_pd)        (((TGPD *)_pd)->chksum)
+#define TGPD_SET_FORMAT(_pd, _fmt)  (((TGPD *)_pd)->flag = (((TGPD *)_pd)->flag&(~TGPD_FORMAT_BDP))|(_fmt))
+#define TGPD_GET_FORMAT(_pd)        (((((TGPD *)_pd)->flag & TGPD_FORMAT_BDP)>>1))
+#define TGPD_SET_DataBUF_LEN(_pd, _len) (((TGPD *)_pd)->DataBufferLen = _len)
+#define TGPD_ADD_DataBUF_LEN(_pd, _len) (((TGPD *)_pd)->DataBufferLen += _len)
+#define TGPD_GET_DataBUF_LEN(_pd)       (((TGPD *)_pd)->DataBufferLen)
+#define TGPD_SET_NEXT(_pd, _next)   (((TGPD *)_pd)->pNext = (u32)(unsigned long)((TGPD *)_next))
+#define TGPD_GET_NEXT(_pd)			((TGPD *)(unsigned long)((TGPD *)_pd)->pNext)
+
+#define TGPD_SET_DATA(_pd, _data)   (((TGPD *)_pd)->pBuf = (u32)(unsigned long)_data)
+#define TGPD_GET_DATA(_pd)          ((u8 *)(unsigned long)((TGPD *)_pd)->pBuf)
+#define TGPD_SET_BUF_LEN(_pd, _len) (((TGPD *)_pd)->bufLen = _len)
+#define TGPD_ADD_BUF_LEN(_pd, _len) (((TGPD *)_pd)->bufLen += _len)
+#define TGPD_GET_BUF_LEN(_pd)       (((TGPD *)_pd)->bufLen)
+#define TGPD_SET_EXT_LEN(_pd, _len) (((TGPD *)_pd)->ExtLength = _len)
+#define TGPD_GET_EXT_LEN(_pd)        (((TGPD *)_pd)->ExtLength)
+#define TGPD_SET_EPaddr(_pd, _EP)  (((TGPD *)_pd)->ZTepFlag = (((TGPD *)_pd)->ZTepFlag&0xF0)|(_EP))
+#define TGPD_GET_EPaddr(_pd)        (((TGPD *)_pd)->ZTepFlag & 0x0F)
+
+#define TGPD_FORMAT_TGL             0x10
+#define TGPD_IS_FORMAT_TGL(_pd)     ((((TGPD *)_pd)->ZTepFlag & TGPD_FORMAT_TGL))
+#define TGPD_SET_FORMAT_TGL(_pd)    ((((TGPD *)_pd)->ZTepFlag |= TGPD_FORMAT_TGL))
+#define TGPD_CLR_FORMAT_TGL(_pd)    ((((TGPD *)_pd)->ZTepFlag &= (~TGPD_FORMAT_TGL)))
+#define TGPD_FORMAT_ZLP             0x20
+#define TGPD_IS_FORMAT_ZLP(_pd)     ((((TGPD *)_pd)->ZTepFlag & TGPD_FORMAT_ZLP))
+#define TGPD_SET_FORMAT_ZLP(_pd)    ((((TGPD *)_pd)->ZTepFlag |= TGPD_FORMAT_ZLP))
+#define TGPD_CLR_FORMAT_ZLP(_pd)    ((((TGPD *)_pd)->ZTepFlag &= (~TGPD_FORMAT_ZLP)))
+
+#define TGPD_SET_TGL(_pd, _TGL)  (((TGPD *)_pd)->ZTepFlag |= ((_TGL) ? 0x10 : 0x00))
+#define TGPD_GET_TGL(_pd)        (((TGPD *)_pd)->ZTepFlag & 0x10 ? 1:0)
+#define TGPD_SET_ZLP(_pd, _ZLP)  (((TGPD *)_pd)->ZTepFlag |= ((_ZLP) ? 0x20 : 0x00))
+#define TGPD_GET_ZLP(_pd)        (((TGPD *)_pd)->ZTepFlag & 0x20 ? 1:0)
+
+#define TGPD_FLAG_IOC				0x80
+#define TGPD_SET_IOC(_pd)			(((TGPD *)_pd)->flag |= TGPD_FLAG_IOC)
+#define TGPD_CLR_IOC(_pd)			(((TGPD *)_pd)->flag &= (~TGPD_FLAG_IOC))
+
+extern void mtk11_qmu_destroy_gpd_pool(struct device *dev);
+extern int mtk11_qmu_init_gpd_pool(struct device *dev);
+extern void mtk11_qmu_reset_gpd_pool(u32 ep_num, u8 isRx);
+extern bool mtk11_is_qmu_enabled(u8 EP_Num, u8 isRx);
+extern void mtk11_qmu_insert_task(u8 EP_Num, u8 isRx, u8 *buf, u32 length, u8 zlp, u8 isioc);
+extern void mtk11_qmu_resume(u8 ep_num, u8 isRx);
+extern void mtk11_disable_q(struct musbfsh *musbfsh, u8 ep_num, u8 isRx);
+extern void mtk11_qmu_irq_err(struct musbfsh *musbfsh, u32 qisar);
+extern void mtk11_flush_ep_csr(struct musbfsh *musbfsh, u8 ep_num, u8 isRx);
+extern void mtk11_qmu_stop(u8 ep_num, u8 isRx);
+
+#define QMU_RX_SPLIT_BLOCK_SIZE (32*1024)
+#define QMU_RX_SPLIT_THRE	(64*1024)
+extern u32 mtk11_qmu_used_gpd_count(u8 isRx, u32 num);
+extern u32 mtk11_qmu_free_gpd_count(u8 isRx, u32 num);
+extern void h_mtk11_qmu_done_rx(struct musbfsh *musbfsh, u8 ep_num);
+extern void h_mtk11_qmu_done_tx(struct musbfsh *musbfsh, u8 ep_num);
+#endif
+#endif
diff --git a/drivers/misc/mediatek/usb11/musbfsh.h b/drivers/misc/mediatek/usb11/musbfsh.h
new file mode 100644
index 0000000..c59a8e9
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/musbfsh.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ */
+/*
+ * This is used to for host and peripheral modes of the driver for
+ * Inventra (Multidrop) Highspeed Dual-Role Controllers: (M)HDRC.
+ *
+ * Board initialization should put one of these into dev->platform_data,
+ * probably on some platform_device named "musbfsh_hdrc".  It encapsulates
+ * key configuration differences between boards.
+ */
+
+#ifndef __LINUX_USB_MUSBFSH_H
+#define __LINUX_USB_MUSBFSH_H
+
+/* The USB role is defined by the connector used on the board, so long as
+ * standards are being followed.  (Developer boards sometimes won't.)
+ */
+enum musbfsh_mode {
+	MUSBFSH_UNDEFINED = 0,
+	MUSBFSH_HOST,		/* A or Mini-A connector */
+	MUSBFSH_PERIPHERAL,	/* B or Mini-B connector */
+	MUSBFSH_OTG		/* Mini-AB connector */
+};
+
+struct clk;
+enum musbfsh_fifo_style {
+	FIFO_RXTX,
+	FIFO_TX,
+	FIFO_RX
+} __packed;
+
+enum musbfsh_buf_mode {
+	BUF_SINGLE,
+	BUF_DOUBLE
+} __packed;
+
+struct musbfsh_fifo_cfg {
+	u8 hw_ep_num;
+	enum musbfsh_fifo_style style;
+	enum musbfsh_buf_mode mode;
+	u16 maxpacket;
+};
+
+#define MUSBFSH_EP_FIFO(ep, st, m, pkt)		\
+{						\
+	.hw_ep_num	= ep,			\
+	.style		= st,			\
+	.mode		= m,			\
+	.maxpacket	= pkt,			\
+}
+
+#define MUSBFSH_EP_FIFO_SINGLE(ep, st, pkt)	\
+	MUSBFSH_EP_FIFO(ep, st, BUF_SINGLE, pkt)
+
+#define MUSBFSH_EP_FIFO_DOUBLE(ep, st, pkt)	\
+	MUSBFSH_EP_FIFO(ep, st, BUF_DOUBLE, pkt)
+
+struct musbfsh_hdrc_eps_bits {
+	const char name[16];
+	u8 bits;
+};
+
+struct musbfsh_hdrc_config {
+	struct musbfsh_fifo_cfg *fifo_cfg;	/* board fifo configuration */
+	unsigned fifo_cfg_size;		/* size of the fifo configuration */
+
+	/* MUSB configuration-specific details */
+	unsigned multipoint:1;			/* multipoint device */
+	unsigned dyn_fifo:1 __deprecated; /* supports dynamic fifo sizing */
+	unsigned soft_con:1 __deprecated; /* soft connect required */
+	unsigned utm_16:1 __deprecated;	/* utm data witdh is 16 bits */
+	unsigned big_endian:1;		/* true if CPU uses big-endian */
+	unsigned mult_bulk_tx:1;	/* Tx ep required for multbulk pkts */
+	unsigned mult_bulk_rx:1;	/* Rx ep required for multbulk pkts */
+	unsigned high_iso_tx:1;		/* Tx ep required for HB iso */
+	unsigned high_iso_rx:1;		/* Rx ep required for HD iso */
+	unsigned dma:1 __deprecated;	/* supports DMA */
+	unsigned vendor_req:1 __deprecated;	/* vendor registers required */
+
+	u8 num_eps;			/* number of endpoints _with_ ep0 */
+
+	u8 dma_channels __deprecated;	/* number of dma channels */
+	u8 dyn_fifo_size;		/* dynamic size in bytes */
+
+	u8 vendor_ctrl __deprecated;	/* vendor control reg width */
+	u8 vendor_stat __deprecated;	/* vendor status reg witdh */
+	u8 dma_req_chan __deprecated;	/* bitmask for required dma channels */
+	u8 ram_bits;			/* ram address size */
+
+	struct musbfsh_hdrc_eps_bits *eps_bits __deprecated;
+
+};
+
+struct musbfsh_hdrc_platform_data {
+	/* MUSBFSH_HOST, MUSBFSH_PERIPHERAL, or MUSBFSH_OTG */
+	u8 mode;
+
+	const char *clock;
+	/* (HOST or OTG) switch VBUS on/off */
+	int (*set_vbus)(struct device *dev, int is_on);
+
+	/* (HOST or OTG) mA/2 power supplied on (default = 8mA) */
+	u8 power;
+
+	u8 min_power;
+	u8 potpgt;
+	/* (HOST or OTG) program PHY for external Vbus */
+	unsigned extvbus:1;
+
+	/* Power the device on or off */
+	int (*set_power)(int state);
+
+	/* MUSB configuration-specific details */
+	struct musbfsh_hdrc_config *config;
+
+	void *board_data;
+	const void *platform_ops;
+};
+
+#endif				/* __LINUX_USB_MUSBFSH_H */
diff --git a/drivers/misc/mediatek/usb11/musbfsh_debug.h b/drivers/misc/mediatek/usb11/musbfsh_debug.h
new file mode 100644
index 0000000..dd95e7f
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/musbfsh_debug.h
@@ -0,0 +1,65 @@
+/*
+ * MUSB OTG driver debug defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * Copyright 2015 Mediatek Inc.
+ *	Marvin Lin <marvin.lin@mediatek.com>
+ *	Arvin Wang <arvin.wang@mediatek.com>
+ *	Vincent Fan <vincent.fan@mediatek.com>
+ *	Bryant Lu <bryant.lu@mediatek.com>
+ *	Yu-Chang Wang <yu-chang.wang@mediatek.com>
+ *	Macpaul Lin <macpaul.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MUSBFSH_LINUX_DEBUG_H__
+#define __MUSBFSH_LINUX_DEBUG_H__
+
+extern void musbfsh_bug(void);
+
+/* for normal log, very detail, impact performance a lot */
+extern int musbfsh_debug;
+#define yprintk(facility, format, args...) \
+	do { \
+		if (musbfsh_debug) { \
+			printk(facility "[MUSBFSH] %s %d: " format, \
+					__func__, __LINE__, ## args); \
+		} \
+	} while (0)
+
+#define INFO(fmt, args...) yprintk(KERN_NOTICE, fmt, ## args)
+
+/* for critical log */
+#define zprintk(facility, format, args...) \
+		printk(facility "[MUSBFSH] %s %d: " \
+		       format, __func__, __LINE__, ## args)
+
+#define WARNING(fmt, args...) zprintk(KERN_WARNING, fmt, ## args)
+#define ERR(fmt, args...) zprintk(KERN_ERR, fmt, ## args)
+
+#endif
diff --git a/drivers/misc/mediatek/usb11/musbfsh_dma.h b/drivers/misc/mediatek/usb11/musbfsh_dma.h
new file mode 100644
index 0000000..66a891b
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/musbfsh_dma.h
@@ -0,0 +1,164 @@
+/*
+ * MUSB OTG driver DMA controller abstraction
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * Copyright 2015 Mediatek Inc.
+ *	Marvin Lin <marvin.lin@mediatek.com>
+ *	Arvin Wang <arvin.wang@mediatek.com>
+ *	Vincent Fan <vincent.fan@mediatek.com>
+ *	Bryant Lu <bryant.lu@mediatek.com>
+ *	Yu-Chang Wang <yu-chang.wang@mediatek.com>
+ *	Macpaul Lin <macpaul.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSBFSH_DMA_H__
+#define __MUSBFSH_DMA_H__
+
+struct musbfsh_hw_ep;
+
+/*
+ * DMA Controller Abstraction
+ *
+ * DMA Controllers are abstracted to allow use of a variety of different
+ * implementations of DMA, as allowed by the Inventra USB cores.  On the
+ * host side, usbcore sets up the DMA mappings and flushes caches; on the
+ * peripheral side, the gadget controller driver does.  Responsibilities
+ * of a DMA controller driver include:
+ *
+ *  - Handling the details of moving multiple USB packets
+ *    in cooperation with the Inventra USB core, including especially
+ *    the correct RX side treatment of short packets and buffer-full
+ *    states (both of which terminate transfers).
+ *
+ *  - Knowing the correlation between dma channels and the
+ *    Inventra core's local endpoint resources and data direction.
+ *
+ *  - Maintaining a list of allocated/available channels.
+ *
+ *  - Updating channel status on interrupts,
+ *    whether shared with the Inventra core or separate.
+ */
+
+#define	DMA_ADDR_INVALID	(~(dma_addr_t)0)
+
+#ifndef CONFIG_MUSBFSH_PIO_ONLY
+#define	is_dma_capable()	(1)
+#else
+#define	is_dma_capable()	(0)
+#endif
+
+/*
+ * DMA channel status ... updated by the dma controller driver whenever that
+ * status changes, and protected by the overall controller spinlock.
+ */
+enum dma_channel_status {
+	/* unallocated */
+	MUSBFSH_DMA_STATUS_UNKNOWN,
+	/* allocated ... but not busy, no errors */
+	MUSBFSH_DMA_STATUS_FREE,
+	/* busy ... transactions are active */
+	MUSBFSH_DMA_STATUS_BUSY,
+	/* transaction(s) aborted due to ... dma or memory bus error */
+	MUSBFSH_DMA_STATUS_BUS_ABORT,
+	/* transaction(s) aborted due to ... core error or USB fault */
+	MUSBFSH_DMA_STATUS_CORE_ABORT
+};
+
+struct dma_controller;
+
+/**
+ * struct dma_channel - A DMA channel.
+ * @private_data: channel-private data
+ * @max_len: the maximum number of bytes the channel can move in one
+ *	transaction (typically representing many USB maximum-sized packets)
+ * @actual_len: how many bytes have been transferred
+ * @status: current channel status (updated e.g. on interrupt)
+ * @desired_mode: true if mode 1 is desired; false if mode 0 is desired
+ *
+ * channels are associated with an endpoint for the duration of at least
+ * one usb transfer.
+ */
+struct dma_channel {
+	void *private_data;
+	/* FIXME not void* private_data, but a dma_controller * */
+	size_t max_len;
+	size_t actual_len;
+	enum dma_channel_status status;
+	bool desired_mode;
+};
+
+/*
+ * dma_channel_status - return status of dma channel
+ * @c: the channel
+ *
+ * Returns the software's view of the channel status.  If that status is BUSY
+ * then it's possible that the hardware has completed (or aborted) a transfer,
+ * so the driver needs to update that status.
+ */
+static inline enum dma_channel_status dma_channel_status(struct dma_channel *c)
+{
+	return (is_dma_capable() && c) ? c->status : MUSBFSH_DMA_STATUS_UNKNOWN;
+}
+
+/**
+ * struct dma_controller - A DMA Controller.
+ * @start: call this to start a DMA controller;
+ *	return 0 on success, else negative errno
+ * @stop: call this to stop a DMA controller
+ *	return 0 on success, else negative errno
+ * @channel_alloc: call this to allocate a DMA channel
+ * @channel_release: call this to release a DMA channel
+ * @channel_abort: call this to abort a pending DMA transaction,
+ *	returning it to FREE (but allocated) state
+ *
+ * Controllers manage dma channels.
+ */
+struct dma_controller {
+	int (*start)(struct dma_controller *);
+	int (*stop)(struct dma_controller *);
+	struct dma_channel *(*channel_alloc)(struct dma_controller *,
+					      struct musbfsh_hw_ep *, u8 is_tx);
+	void (*channel_release)(struct dma_channel *);
+	int (*channel_program)(struct dma_channel *channel,
+				u16 maxpacket, u8 mode, dma_addr_t dma_addr,
+				u32 length);
+	int (*channel_abort)(struct dma_channel *);
+};
+
+/* called after channel_program(), may indicate a fault */
+extern void musbfsh_dma_completion(struct musbfsh *musb, u8 epnum, u8 transmit);
+
+
+extern struct dma_controller *__init
+musbfsh_dma_controller_create(struct musbfsh *, void __iomem *);
+
+extern void musbfsh_dma_controller_destroy(struct dma_controller *);
+
+#endif				/* __MUSBFSH_DMA_H__ */
diff --git a/drivers/misc/mediatek/usb11/musbfsh_host.c b/drivers/misc/mediatek/usb11/musbfsh_host.c
new file mode 100644
index 0000000..0dd58e7
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/musbfsh_host.c
@@ -0,0 +1,2863 @@
+/*
+ * MUSB OTG driver host support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
+ *
+ * Copyright 2015 Mediatek Inc.
+ *	Marvin Lin <marvin.lin@mediatek.com>
+ *	Arvin Wang <arvin.wang@mediatek.com>
+ *	Vincent Fan <vincent.fan@mediatek.com>
+ *	Bryant Lu <bryant.lu@mediatek.com>
+ *	Yu-Chang Wang <yu-chang.wang@mediatek.com>
+ *	Macpaul Lin <macpaul.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/list.h>
+
+#include "musbfsh_core.h"
+#include "musbfsh_host.h"
+#include "musbfsh_dma.h"
+#include "usb.h"
+#include "musbfsh_qmu.h"
+#include "mtk11_qmu.h"
+
+/* MUSB HOST status 22-mar-2006
+ *
+ * - There's still lots of partial code duplication for fault paths, so
+ *   they aren't handled as consistently as they need to be.
+ *
+ * - PIO mostly behaved when last tested.
+ *     + including ep0, with all usbtest cases 9, 10
+ *     + usbtest 14 (ep0out) doesn't seem to run at all
+ *     + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
+ *       configurations, but otherwise double buffering passes basic tests.
+ *     + for 2.6.N, for N > ~10, needs API changes for hcd framework.
+ *
+ * - DMA (CPPI) ... partially behaves, not currently recommended
+ *     + about 1/15 the speed of typical EHCI implementations (PCI)
+ *     + RX, all too often reqpkt seems to misbehave after tx
+ *     + TX, no known issues (other than evident silicon issue)
+ *
+ * - DMA (Mentor/OMAP) ...has at least toggle update problems
+ *
+ * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
+ *   starvation ... nothing yet for TX, interrupt, or bulk.
+ *
+ * - Not tested with HNP, but some SRP paths seem to behave.
+ *
+ * NOTE 24-August-2006:
+ *
+ * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
+ *   extra endpoint for periodic use enabling hub + keybd + mouse.  That
+ *   mostly works, except that with "usbnet" it's easy to trigger cases
+ *   with "ping" where RX loses.  (a) ping to davinci, even "ping -f",
+ *   fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
+ *   although ARP RX wins.  (That test was done with a full speed link.)
+ */
+
+/*
+ * NOTE on endpoint usage:
+ *
+ * CONTROL transfers all go through ep0.  BULK ones go through dedicated IN
+ * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
+ * (Yes, bulk _could_ use more of the endpoints than that, and would even
+ * benefit from it.)
+ *
+ * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
+ * So far that scheduling is both dumb and optimistic:  the endpoint will be
+ * "claimed" until its software queue is no longer refilled.  No multiplexing
+ * of transfers between endpoints, or anything clever.
+ */
+
+static u8 dynamic_fifo_total_slot = 15;
+int musbfsh_host_alloc_ep_fifo(struct musbfsh *musbfsh, struct musbfsh_qh *qh, u8 is_in)
+{
+	void __iomem *mbase = musbfsh->mregs;
+	int epnum = qh->hw_ep->epnum;
+	u16 maxpacket;
+	u16 request_fifo_sz, fifo_unit_nr;
+	u16 idx_start = 0;
+	u8 index, i;
+	u16 c_off = 0;
+	u8 c_size = 0;
+	u16 free_uint = 0;
+	u8 found = 0;
+
+	maxpacket = qh->maxpacket * qh->hb_mult;
+	if (maxpacket <= 512) {
+		request_fifo_sz = 512;
+		fifo_unit_nr = 1;
+		c_size = 6;
+	} else if (maxpacket <= 1024) {
+		request_fifo_sz = 1024;
+		fifo_unit_nr = 2;
+		c_size = 7;
+	} else if (maxpacket <= 2048) {
+		request_fifo_sz = 2048;
+		fifo_unit_nr = 4;
+		c_size = 8;
+	} else if (maxpacket <= 4096) {
+		request_fifo_sz = 4096;
+		fifo_unit_nr = 8;
+		c_size = 9;
+	} else {
+		ERR("should not be here qh maxp:%d maxp:%d\n", qh->maxpacket, maxpacket);
+		request_fifo_sz = 0;
+		fifo_unit_nr = 0;
+		musbfsh_bug();
+		return -ENOSPC;
+	}
+
+	for (i = 0; i < dynamic_fifo_total_slot; i++) {
+		if (!(musbfsh_host_dynamic_fifo_usage_msk & (1 << i)))
+			free_uint++;
+		else
+			free_uint = 0;
+
+		if (free_uint == fifo_unit_nr) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (found == 0) {
+		ERR("!enough, dynamic_fifo_usage_msk:0x%x,maxp:%d,req_len:%d,ep%d-%s\n",
+				musbfsh_host_dynamic_fifo_usage_msk, maxpacket,
+				request_fifo_sz, epnum, is_in ? "in":"out");
+		return -1;
+	}
+
+	idx_start = i - (fifo_unit_nr - 1);
+	c_off = (64 >> 3) + idx_start * (512 >> 3);
+
+	for (i = 0; i < fifo_unit_nr; i++)
+		musbfsh_host_dynamic_fifo_usage_msk |= (1 << (idx_start + i));
+
+	index = musbfsh_readb(mbase, MUSBFSH_INDEX);
+	musbfsh_writeb(musbfsh->mregs, MUSBFSH_INDEX, epnum);
+	if (is_in) {
+		musbfsh_write_rxfifosz(mbase, c_size);
+		musbfsh_write_rxfifoadd(mbase, c_off);
+
+		INFO("addr:0x%x, size:0x%x\n", musbfsh_read_rxfifoadd(mbase), musbfsh_read_rxfifosz(mbase));
+	} else {
+		musbfsh_write_txfifosz(mbase, c_size);
+		musbfsh_write_txfifoadd(mbase, c_off);
+		INFO("addr:0x%x, size:0x%x\n", musbfsh_read_txfifoadd(mbase), musbfsh_read_txfifosz(mbase));
+	}
+	musbfsh_writeb(mbase, MUSBFSH_INDEX, index);
+
+	INFO("maxp:%d, req_len:%d, dynamic_fifo_usage_msk:0x%x, ep%d-%s, qh->type:%d\n",
+	    maxpacket, request_fifo_sz, musbfsh_host_dynamic_fifo_usage_msk, epnum, is_in ? "in":"out", qh->type);
+	return 0;
+}
+
+void musbfsh_host_free_ep_fifo(struct musbfsh *musbfsh, struct musbfsh_qh *qh, u8 is_in)
+{
+	void __iomem *mbase = musbfsh->mregs;
+	int epnum = qh->hw_ep->epnum;
+	u16 maxpacket = qh->maxpacket;
+	u16 request_fifo_sz, fifo_unit_nr;
+	u16 idx_start = 0;
+	u8 index, i;
+	u16 c_off = 0;
+
+	maxpacket = qh->maxpacket * qh->hb_mult;
+	if (maxpacket <= 512) {
+		request_fifo_sz = 512;
+		fifo_unit_nr = 1;
+	} else if (maxpacket <= 1024) {
+		request_fifo_sz = 1024;
+		fifo_unit_nr = 2;
+	} else if (maxpacket <= 2048) {
+		request_fifo_sz = 2048;
+		fifo_unit_nr = 4;
+	} else if (maxpacket <= 4096) {
+		request_fifo_sz = 4096;
+		fifo_unit_nr = 8;
+	} else {
+		ERR("should not be here qh maxp:%d maxp:%d\n", qh->maxpacket, maxpacket);
+		request_fifo_sz = 0;
+		fifo_unit_nr = 0;
+		musbfsh_bug();
+	}
+
+	index = musbfsh_readb(mbase, MUSBFSH_INDEX);
+	musbfsh_writeb(mbase, MUSBFSH_INDEX, epnum);
+
+	if (is_in)
+		c_off =  musbfsh_read_rxfifoadd(mbase);
+	else
+		c_off = musbfsh_read_txfifoadd(mbase);
+
+	idx_start = (c_off - (64 >> 3)) / (512 >> 3);
+
+	for (i = 0; i < fifo_unit_nr; i++)
+		musbfsh_host_dynamic_fifo_usage_msk &= ~(1 << (idx_start + i));
+
+	if (is_in) {
+		musbfsh_write_rxfifosz(mbase, 0);
+		musbfsh_write_rxfifoadd(mbase, 0);
+	} else {
+		musbfsh_write_txfifosz(mbase, 0);
+		musbfsh_write_txfifoadd(mbase, 0);
+	}
+	musbfsh_writeb(mbase, MUSBFSH_INDEX, index);
+
+	INFO("maxp:%d, req_len:%d, dynamic_fifo_usage_msk:0x%x, ep%d-%s, qh->type:%d\n",
+	    maxpacket, request_fifo_sz, musbfsh_host_dynamic_fifo_usage_msk, epnum, is_in ? "in":"out", qh->type);
+}
+
+static void musbfsh_ep_program(struct musbfsh *musbfsh, u8 epnum,
+			       struct urb *urb, int is_out, u8 *buf,
+			       u32 offset, u32 len);
+
+void musbfsh_bug(void)
+{
+	/* make KE happen */
+	char *ptr = NULL;
+
+	*ptr = 10;
+}
+
+/*
+ * Clear TX fifo. Needed to avoid BABBLE errors.
+ */
+void musbfsh_h_tx_flush_fifo(struct musbfsh_hw_ep *ep)
+{
+	void __iomem *epio = ep->regs;
+	u16 csr;
+	u16 lastcsr = 0;
+	int retries = 1000;
+
+	INFO("%s++\r\n", __func__);
+	csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+	while (csr & MUSBFSH_TXCSR_FIFONOTEMPTY) {
+		if (csr != lastcsr)
+			INFO("Host TX FIFONOTEMPTY csr: %02x\n", csr);
+		lastcsr = csr;
+		csr &= ~MUSBFSH_TXCSR_TXPKTRDY;
+		csr |= MUSBFSH_TXCSR_FLUSHFIFO;
+		musbfsh_writew(epio, MUSBFSH_TXCSR, csr);
+		csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+		if (retries-- < 1) {
+			WARNING("Could not flush host TX%d fifo: csr: %04x\n",
+				ep->epnum, csr);
+			return;
+		}
+		mdelay(1);
+	}
+}
+
+static void musbfsh_h_ep0_flush_fifo(struct musbfsh_hw_ep *ep)
+{
+	void __iomem *epio = ep->regs;
+	u16 csr;
+	int retries = 5;
+
+	INFO("%s++\r\n", __func__);
+	/* scrub any data left in the fifo */
+	do {
+		csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+		if (!(csr & (MUSBFSH_CSR0_TXPKTRDY | MUSBFSH_CSR0_RXPKTRDY)))
+			break;
+		musbfsh_writew(epio, MUSBFSH_TXCSR, MUSBFSH_CSR0_FLUSHFIFO);
+		csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+		udelay(10);
+	} while (--retries);
+
+	if (!retries)
+		WARNING("Could not flush host TX%d fifo: csr: %04x\n",
+			ep->epnum, csr);
+
+	/* and reset for the next transfer */
+	musbfsh_writew(epio, MUSBFSH_TXCSR, 0);
+}
+
+/*
+ * Start transmit. Caller is responsible for locking shared resources.
+ * musb must be locked.
+ */
+static inline void musbfsh_h_tx_start(struct musbfsh_hw_ep *ep)
+{
+	u16 txcsr;
+
+	INFO("%s++\r\n", __func__);
+	/* NOTE: no locks here; caller should lock and select EP */
+	if (ep->epnum) {
+		txcsr = musbfsh_readw(ep->regs, MUSBFSH_TXCSR);
+		INFO("txcsr=0x%x for ep%d\n", txcsr, ep->epnum);
+		txcsr |= MUSBFSH_TXCSR_TXPKTRDY | MUSBFSH_TXCSR_H_WZC_BITS;
+		musbfsh_writew(ep->regs, MUSBFSH_TXCSR, txcsr);
+		txcsr = musbfsh_readw(ep->regs, MUSBFSH_TXCSR);
+		INFO("txcsr=0x%x for ep%d\n", txcsr, ep->epnum);
+	} else {
+		txcsr = musbfsh_readw(ep->regs, MUSBFSH_CSR0);
+		INFO("txcsr=0x%x for ep%d\n", txcsr, ep->epnum);
+		txcsr = MUSBFSH_CSR0_H_DIS_PING |  MUSBFSH_CSR0_H_SETUPPKT | MUSBFSH_CSR0_TXPKTRDY;
+		musbfsh_writew(ep->regs, MUSBFSH_CSR0, txcsr);
+		txcsr = musbfsh_readw(ep->regs, MUSBFSH_TXCSR);
+		INFO("txcsr=0x%x for ep%d\n", txcsr, ep->epnum);
+	}
+
+}
+
+void musbfsh_ep_set_qh(struct musbfsh_hw_ep *ep, int is_in,
+			      struct musbfsh_qh *qh)
+{
+	if (is_in != 0 || ep->is_shared_fifo)
+		ep->in_qh = qh;
+	if (is_in == 0 || ep->is_shared_fifo)
+		ep->out_qh = qh;
+}
+
+struct musbfsh_qh *musbfsh_ep_get_qh(struct musbfsh_hw_ep *ep, int is_in)
+{
+	INFO("%s++, hw_ep%d, is_in=%d\r\n",
+	     __func__, ep->epnum, is_in);
+	return is_in ? ep->in_qh : ep->out_qh;
+}
+
+/*
+ * Start the URB at the front of an endpoint's queue
+ * end must be claimed from the caller.
+ *
+ * Context: controller locked, irqs blocked
+ */
+static void musbfsh_start_urb(struct musbfsh *musbfsh, int is_in,
+			      struct musbfsh_qh *qh)
+{
+	u16 frame;
+	u32 len;
+	struct urb *urb = next_urb(qh);
+	void *buf = urb->transfer_buffer;
+	u32 offset = 0;
+	struct musbfsh_hw_ep *hw_ep = qh->hw_ep;
+	unsigned pipe = urb->pipe;
+	u8 address = usb_pipedevice(pipe);
+	int epnum = hw_ep->epnum;
+	void __iomem *mbase = musbfsh->mregs;
+
+	INFO("%s++, addr=%d, hw_ep->epnum=%d, urb_ep_addr:0x%x \r\n",
+	     __func__, address, epnum, urb->ep->desc.bEndpointAddress);
+	/*
+	 * MYDBG("urb:%x, blen:%d, alen:%d, hep:%x, ep:%x\n",
+	 *	urb, urb->transfer_buffer_length, urb->actual_length,
+	 *	epnum, urb->ep->desc.bEndpointAddress);
+	 */
+
+	/* initialize software qh state */
+	/* indicate the buffer pointer now. */
+	qh->offset = 0;
+	qh->segsize = 0;
+
+	/* gather right source of data */
+	switch (qh->type) {
+	case USB_ENDPOINT_XFER_CONTROL:	/* PIO mode only */
+		/* control transfers always start with SETUP */
+		/* setup packet should be sent out of the controller. */
+		is_in = 0;
+		musbfsh->ep0_stage = MUSBFSH_EP0_START;
+		buf = urb->setup_packet;	/* contain the request. */
+		len = 8;
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		qh->iso_idx = 0;
+		qh->frame = 0;
+		offset = urb->iso_frame_desc[0].offset;
+		len = urb->iso_frame_desc[0].length;
+		break;
+	default:
+		/* bulk, interrupt */
+		/* actual_length may be nonzero on retry paths */
+		/* before the urb, actual_length should be 0. */
+		buf = urb->transfer_buffer + urb->actual_length;
+		len = urb->transfer_buffer_length - urb->actual_length;
+	}
+	INFO("qh %p urb %p dev%d ep%d %s %s, hw_ep %d, %p/%d\n",
+		qh, urb, address, qh->epnum, is_in ? "in" : "out",
+		({ char *s;
+			switch (qh->type) {
+			case USB_ENDPOINT_XFER_CONTROL:
+				s = "-ctl";
+				break;
+			case USB_ENDPOINT_XFER_BULK:
+				s = "-bulk";
+				break;
+			default:
+				s = "-intr";
+				break;
+			};
+			s;
+		}),
+		epnum, buf + offset, len);
+	/* Configure endpoint */
+	musbfsh_ep_set_qh(hw_ep, is_in, qh);
+
+	/* !is_in, because the fourth parameter of this func is is_out */
+	musbfsh_ep_program(musbfsh, epnum, urb, !is_in, buf, offset, len);
+
+	/* transmit may have more work: start it when it is time */
+	/*
+	 * Rx,has configure OK in the func: musbfsh_ep_program,
+	 * so return directly
+	 */
+	if (is_in)
+		return;
+
+	INFO("Start TX%d %s\n", epnum, hw_ep->tx_channel ? "dma" : "pio");
+	switch (qh->type) {
+	case USB_ENDPOINT_XFER_ISOC:
+	case USB_ENDPOINT_XFER_INT:
+		INFO("check whether there's still time for periodic Tx\n");
+		frame = musbfsh_readw(mbase, MUSBFSH_FRAME);
+		/* FIXME this doesn't implement that scheduling policy ...
+		 * or handle framecounter wrapping
+		 */
+		if ((urb->transfer_flags & URB_ISO_ASAP)
+		    || (frame >= urb->start_frame)) {
+			/* REVISIT the SOF irq handler shouldn't duplicate
+			 * this code; and we don't init urb->start_frame...
+			 */
+			qh->frame = 0;
+			goto start;
+		} else {
+			qh->frame = urb->start_frame;
+			/* enable SOF interrupt so we can count down */
+			INFO("SOF for %d\n", epnum);
+			musbfsh_writeb(mbase, MUSBFSH_INTRUSBE, 0xff);
+		}
+		break;
+	default:
+start:
+		INFO("Start TX%d %s\n", epnum, hw_ep->tx_channel ? "dma" : "pio");
+
+		if (!hw_ep->tx_channel) {
+			/* for pio mode, dma mode will send data after the configuration of the dma channel */
+			musbfsh_h_tx_start(hw_ep);
+		}
+		/* else if (is_cppi_enabled() || tusb_dma_omap()) */
+		/* musb_h_tx_dma_start(hw_ep); */
+	}
+}
+
+/* Context: caller owns controller lock, IRQs are blocked */
+static void musbfsh_giveback(struct musbfsh *musbfsh, struct urb *urb,
+			     int status)
+__releases(musbfsh->lock) __acquires(musbfsh->lock)
+{
+	INFO("%s++, complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
+		__func__, urb, urb->complete, status,
+		usb_pipedevice(urb->pipe),
+		usb_pipeendpoint(urb->pipe),
+		usb_pipein(urb->pipe) ? "in" : "out",
+		urb->actual_length, urb->transfer_buffer_length);
+
+	/*
+	 * MYDBG("urb:%x, blen:%d, alen:%d, ep:%x\n", urb,
+	 * urb->transfer_buffer_length, urb->actual_length,
+	 * urb->ep->desc.bEndpointAddress);
+	 */
+
+	usb_hcd_unlink_urb_from_ep(musbfsh_to_hcd(musbfsh), urb);
+	spin_unlock(&musbfsh->lock);
+	usb_hcd_giveback_urb(musbfsh_to_hcd(musbfsh), urb, status);
+	spin_lock(&musbfsh->lock);
+}
+
+/* For bulk/interrupt endpoints only */
+static inline void musbfsh_save_toggle(struct musbfsh_qh *qh, int is_in,
+				       struct urb *urb)
+{
+	struct musbfsh *musbfsh = qh->hw_ep->musbfsh;
+	u8 epnum = qh->hw_ep->epnum;
+	int toggle;
+
+	INFO("%s++\r\n", __func__);
+	/*
+	 * FIXME: the current Mentor DMA code seems to have
+	 * problems getting toggle correct.
+	 */
+	if (is_in) {
+		toggle = musbfsh_readl(musbfsh->mregs, MUSBFSH_RXTOG);
+		INFO("toggle_IN=0x%x\n", toggle);
+	} else {
+		toggle = musbfsh_readl(musbfsh->mregs, MUSBFSH_TXTOG);
+		INFO("toggle_OUT=0x%x\n", toggle);
+	}
+
+	if (toggle & (1 << epnum))
+		usb_settoggle(urb->dev, qh->epnum, !is_in, 1);
+	else
+		usb_settoggle(urb->dev, qh->epnum, !is_in, 0);
+}
+
+static inline void musbfsh_set_toggle(struct musbfsh_qh *qh, int is_in,
+				      struct urb *urb)
+{
+	struct musbfsh *musbfsh = qh->hw_ep->musbfsh;
+	u8 epnum = qh->hw_ep->epnum;
+	int tog; /* toggle */
+
+	INFO("%s++: qh->hw_ep->epnum %d, qh->epnum %d\n",
+	     __func__, qh->hw_ep->epnum,
+	     qh->epnum);
+
+	tog = usb_gettoggle(urb->dev, qh->epnum, !is_in);
+	if (is_in) {
+		INFO("qh->dev->toggle[IN]=0x%x\n", qh->dev->toggle[!is_in]);
+		musbfsh_writel(musbfsh->mregs, MUSBFSH_RXTOG,
+			       (((1 << epnum) << 16) | (tog << epnum)));
+		musbfsh_writel(musbfsh->mregs, MUSBFSH_RXTOG, (tog << epnum));
+	} else {
+		INFO("qh->dev->toggle[OUT]=0x%x\n", qh->dev->toggle[!is_in]);
+		musbfsh_writel(musbfsh->mregs, MUSBFSH_TXTOG,
+			       (((1 << epnum) << 16) | (tog << epnum)));
+		musbfsh_writel(musbfsh->mregs, MUSBFSH_TXTOG, (tog << epnum));
+	}
+}
+
+/*
+ * Advance this hardware endpoint's queue, completing the specified URB and
+ * advancing to either the next URB queued to that qh, or else invalidating
+ * that qh and advancing to the next qh scheduled after the current one.
+ *
+ * Context: caller owns controller lock, IRQs are blocked
+ */
+void musbfsh_advance_schedule(struct musbfsh *musbfsh, struct urb *urb,
+				     struct musbfsh_hw_ep *hw_ep, int is_in)
+{
+	struct musbfsh_qh *qh;
+	struct musbfsh_hw_ep *ep;
+	int ready;
+	int status;
+
+	/* the current qh */
+	qh = musbfsh_ep_get_qh(hw_ep, is_in);
+	ep = qh->hw_ep;
+	ready = qh->is_ready;
+
+	INFO("%s++\r\n", __func__);
+	status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
+
+	/* save toggle eagerly, for paranoia */
+	switch (qh->type) {
+	case USB_ENDPOINT_XFER_BULK:
+	case USB_ENDPOINT_XFER_INT:
+		/* after the urb, should save the toggle for the ep! */
+		musbfsh_save_toggle(qh, is_in, urb);
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		if (status == 0 && urb->error_count)
+			status = -EXDEV;
+		break;
+	}
+
+	qh->is_ready = 0;
+	musbfsh_giveback(musbfsh, urb, status);
+	if ((is_in && !hw_ep->in_qh)
+			|| (!is_in && !hw_ep->out_qh)) {
+		WARNING("QH already freed\n");
+		return;
+	}
+	qh->is_ready = ready;
+
+	/* work around from tablet, avoid KE for qh->hep content 0x6b6b6b6b...
+	* side effect will cause touch memory after free
+	*/
+	/* if the urb list is empty, the next qh will be excute. */
+#ifdef CONFIG_MTK_MUSBFSH_BIND_DEV_EP
+	if (list_empty(&qh->hep->urb_list)) {
+		struct list_head *head;
+		struct dma_controller *dma = musbfsh->dma_controller;
+
+		if (is_in) {
+			ep->rx_reinit = 1;
+			if (ep->rx_channel) {
+				dma->channel_release(ep->rx_channel);
+				ep->rx_channel = NULL;
+			}
+		} else {
+			ep->tx_reinit = 1;
+			if (ep->tx_channel) {
+				dma->channel_release(ep->tx_channel);
+				ep->tx_channel = NULL;
+			}
+		}
+
+		/* Clobber old pointers to this qh */
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+		mark_qh_activity(qh->epnum, ep->epnum, is_in, 1);
+#endif
+
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+		if (qh->is_use_qmu)
+			mtk11_disable_q(musbfsh, hw_ep->epnum, is_in);
+#endif
+
+		if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
+			musbfsh_ep_set_qh(ep, is_in, NULL);
+			qh->hep->hcpriv = NULL;
+			if (qh->mux == 1) {
+				head = qh->ring.prev;
+				list_del(&qh->ring);
+				kfree(qh);
+				qh = first_qh(head);
+			}
+		} else
+			qh->resubmit = 1;
+	}
+#else
+	if (list_empty(&qh->hep->urb_list)) {
+		struct list_head *head;
+
+		if (is_in)
+			ep->rx_reinit = 1;
+		else
+			ep->tx_reinit = 1;
+
+		/* Clobber old pointers to this qh */
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+		mark_qh_activity(qh->epnum, ep->epnum, is_in, 1);
+#endif
+
+		musbfsh_ep_set_qh(ep, is_in, NULL);
+		qh->hep->hcpriv = NULL;
+
+		if (musbfsh_host_dynamic_fifo && qh->type != USB_ENDPOINT_XFER_CONTROL)
+			musbfsh_host_free_ep_fifo(musbfsh, qh, is_in);
+
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+		if (qh->is_use_qmu)
+			mtk11_disable_q(musbfsh, hw_ep->epnum, is_in);
+#endif
+
+		switch (qh->type) {
+		case USB_ENDPOINT_XFER_CONTROL:
+		case USB_ENDPOINT_XFER_BULK:
+			/*
+			 * fifo policy for these lists, except that NAKing
+			 * should rotate a qh to the end (for fairness).
+			 */
+			if (qh->mux == 1) {
+				head = qh->ring.prev;
+				list_del(&qh->ring);
+				kfree(qh);
+				qh = first_qh(head);
+				break;
+			}
+		case USB_ENDPOINT_XFER_INT:
+		case USB_ENDPOINT_XFER_ISOC:
+			/*
+			 * this is where periodic bandwidth should be
+			 * de-allocated if it's tracked and allocated;
+			 * and where we'd update the schedule tree...
+			 */
+			kfree(qh);
+			qh = NULL;
+			break;
+		}
+	}
+#endif
+
+#ifdef CONFIG_MTK_MUSBFSH_BIND_DEV_EP
+	if (qh != NULL && qh->is_ready && next_urb(qh))
+#else
+	if (qh != NULL && qh->is_ready)
+#endif
+	{
+		INFO("... next ep%d %cX urb %p\n",
+		     hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+		if (qh->is_use_qmu && !mtk11_host_qmu_concurrent) {
+			musbfsh_ep_set_qh(hw_ep, is_in, qh);
+			mtk11_kick_CmdQ(musbfsh, is_in ? 1:0, qh, next_urb(qh));
+		} else if (!qh->is_use_qmu)
+			musbfsh_start_urb(musbfsh, is_in, qh);
+#else
+		musbfsh_start_urb(musbfsh, is_in, qh);
+#endif
+	}
+}
+
+u16 musbfsh_h_flush_rxfifo(struct musbfsh_hw_ep *hw_ep, u16 csr)
+{
+	/* we don't want fifo to fill itself again;
+	 * ignore dma (various models),
+	 * leave toggle alone (may not have been saved yet)
+	 */
+	INFO("%s++\r\n", __func__);
+	csr |= MUSBFSH_RXCSR_FLUSHFIFO | MUSBFSH_RXCSR_RXPKTRDY;
+	csr &= ~(MUSBFSH_RXCSR_H_REQPKT | MUSBFSH_RXCSR_H_AUTOREQ |
+		MUSBFSH_RXCSR_AUTOCLEAR);
+
+	/* write 2x to allow double buffering */
+	musbfsh_writew(hw_ep->regs, MUSBFSH_RXCSR, csr);
+	musbfsh_writew(hw_ep->regs, MUSBFSH_RXCSR, csr);
+
+	/* flush writebuffer */
+	return musbfsh_readw(hw_ep->regs, MUSBFSH_RXCSR);
+}
+
+/*
+ * PIO RX for a packet (or part of it).
+ */
+static bool musbfsh_host_packet_rx(struct musbfsh *musbfsh, struct urb *urb,
+				   u8 epnum, u8 iso_err)
+{
+	u16 rx_count;
+	u8 *buf;
+	u16 csr;
+	bool done = false;
+	u32 length;
+	int do_flush = 0;
+	struct musbfsh_hw_ep *hw_ep = musbfsh->endpoints + epnum;
+	void __iomem *epio = hw_ep->regs;
+	struct musbfsh_qh *qh = hw_ep->in_qh;
+	void *buffer = urb->transfer_buffer;
+
+	/* musbfsh_ep_select(mbase, epnum); */
+	rx_count = musbfsh_readw(epio, MUSBFSH_RXCOUNT);
+	INFO("%s++: real RX%d count %d, buffer %p len %d/%d\n",
+	     __func__, epnum, rx_count, urb->transfer_buffer, qh->offset,
+	     urb->transfer_buffer_length);
+	/* unload FIFO */
+	if (usb_pipeisoc(urb->pipe)) {
+		int status = 0;
+		struct usb_iso_packet_descriptor *d;
+
+		if (iso_err) {
+			status = -EILSEQ;
+			urb->error_count++;
+		}
+
+		d = urb->iso_frame_desc + qh->iso_idx;
+		buf = buffer + d->offset;
+		length = d->length;
+		if (rx_count > length) {
+			if (status == 0) {
+				status = -EOVERFLOW;
+				urb->error_count++;
+			}
+			WARNING("** OVERFLOW %d into %d\n", rx_count, length);
+			do_flush = 1;
+		} else
+			length = rx_count;
+		urb->actual_length += length;
+		d->actual_length = length;
+
+		d->status = status;
+
+		/* see if we are done */
+		done = (++qh->iso_idx >= urb->number_of_packets);
+	} else {
+		/* non-isoch */
+		buf = buffer + qh->offset;
+		length = urb->transfer_buffer_length - qh->offset;
+		if (rx_count > length) {
+			if (urb->status == -EINPROGRESS)
+				urb->status = -EOVERFLOW;
+			WARNING("** OVERFLOW %d into %d\n", rx_count, length);
+			do_flush = 1;
+		} else
+			length = rx_count;
+		urb->actual_length += length;
+		qh->offset += length;
+
+		/* see if we are done */
+		done = (urb->actual_length == urb->transfer_buffer_length)
+		    || (rx_count < qh->maxpacket)
+		    || (urb->status != -EINPROGRESS);
+		if (done && (urb->status == -EINPROGRESS)
+		    && (urb->transfer_flags & URB_SHORT_NOT_OK)
+		    && (urb->actual_length < urb->transfer_buffer_length))
+			urb->status = -EREMOTEIO;
+	}
+
+	musbfsh_read_fifo(hw_ep, length, buf);
+
+	csr = musbfsh_readw(epio, MUSBFSH_RXCSR);
+	csr |= MUSBFSH_RXCSR_H_WZC_BITS;
+	if (unlikely(do_flush))
+		musbfsh_h_flush_rxfifo(hw_ep, csr);
+	else {
+		/* REVISIT this assumes AUTOCLEAR is never set */
+		csr &= ~(MUSBFSH_RXCSR_RXPKTRDY | MUSBFSH_RXCSR_H_REQPKT);
+		if (!done)
+			csr |= MUSBFSH_RXCSR_H_REQPKT;
+		musbfsh_writew(epio, MUSBFSH_RXCSR, csr);
+	}
+
+	return done;
+}
+
+/* we don't always need to reinit a given side of an endpoint...
+ * when we do, use tx/rx reinit routine and then construct a new CSR
+ * to address data toggle, NYET, and DMA or PIO.
+ *
+ * it's possible that driver bugs (especially for DMA) or aborting a
+ * transfer might have left the endpoint busier than it should be.
+ * the busy/not-empty tests are basically paranoia.
+ */
+static void
+musbfsh_rx_reinit(struct musbfsh *musbfsh, struct musbfsh_qh *qh,
+		  struct musbfsh_hw_ep *ep)
+{
+	u16 csr;
+
+	INFO("%s++\r\n", __func__);
+	/* NOTE:  we know the "rx" fifo reinit never triggers for ep0.
+	 * That always uses tx_reinit since ep0 repurposes TX register
+	 * offsets; the initial SETUP packet is also a kind of OUT.
+	 */
+
+	/* if programmed for Tx, put it in RX mode */
+	if (ep->is_shared_fifo) {
+		csr = musbfsh_readw(ep->regs, MUSBFSH_TXCSR);
+		if (csr & MUSBFSH_TXCSR_MODE) {
+			musbfsh_h_tx_flush_fifo(ep);
+			csr = musbfsh_readw(ep->regs, MUSBFSH_TXCSR);
+			musbfsh_writew(ep->regs, MUSBFSH_TXCSR,
+				       csr | MUSBFSH_TXCSR_FRCDATATOG);
+		}
+
+		/*
+		 * Clear the MODE bit (and everything else) to enable Rx.
+		 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
+		 */
+		if (csr & MUSBFSH_TXCSR_DMAMODE)
+			musbfsh_writew(ep->regs, MUSBFSH_TXCSR,
+				       MUSBFSH_TXCSR_DMAMODE);
+		musbfsh_writew(ep->regs, MUSBFSH_TXCSR, 0);
+
+		/* scrub all previous state, clearing toggle */
+	} else {
+		csr = musbfsh_readw(ep->regs, MUSBFSH_RXCSR);
+		if (csr & MUSBFSH_RXCSR_RXPKTRDY)
+			INFO("musbfsh::rx%d, packet/%d ready?\n", ep->epnum,
+			     musbfsh_readw(ep->regs, MUSBFSH_RXCOUNT));
+
+		musbfsh_h_flush_rxfifo(ep, 0);
+	}
+
+	/* target addr and (for multipoint) hub addr/port */
+	if (musbfsh->is_multipoint) {
+		musbfsh_write_rxfunaddr(musbfsh->mregs, ep->epnum,
+					qh->addr_reg);
+		musbfsh_write_rxhubaddr(musbfsh->mregs, ep->epnum,
+					qh->h_addr_reg);
+		musbfsh_write_rxhubport(musbfsh->mregs, ep->epnum,
+					qh->h_port_reg);
+	} else {
+		musbfsh_writeb(musbfsh->mregs, MUSBFSH_FADDR, qh->addr_reg);
+	}
+
+	/* protocol/endpoint, interval/NAKlimit, i/o size */
+	musbfsh_writeb(ep->regs, MUSBFSH_RXTYPE, qh->type_reg);
+	musbfsh_writeb(ep->regs, MUSBFSH_RXINTERVAL, qh->intv_reg);
+
+	musbfsh_writew(ep->regs, MUSBFSH_RXMAXP, qh->maxpacket);
+
+	ep->rx_reinit = 0;
+}
+
+static bool musbfsh_tx_dma_program(struct dma_controller *dma,
+				   struct musbfsh_hw_ep *hw_ep,
+				   struct musbfsh_qh *qh,
+				   struct urb *urb, u32 offset, u32 len)
+{
+	struct dma_channel *channel = hw_ep->tx_channel;
+	void __iomem *epio = hw_ep->regs;
+	u16 pkt_size = qh->maxpacket;
+	u16 csr;
+	u8 mode;
+
+	INFO("%s++\r\n", __func__);
+	if (len > channel->max_len)
+		len = channel->max_len;
+
+	csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+	if (len > pkt_size) {
+		INFO("%s: mode 1\r\n", __func__);
+		mode = 1;
+		csr |= MUSBFSH_TXCSR_DMAMODE | MUSBFSH_TXCSR_DMAENAB;
+		csr |= MUSBFSH_TXCSR_AUTOSET;
+	} else {
+		INFO("%s: mode 0\r\n", __func__);
+		mode = 0;
+		csr &= ~(MUSBFSH_TXCSR_AUTOSET | MUSBFSH_TXCSR_DMAMODE);
+		csr |= MUSBFSH_TXCSR_DMAENAB;	/* against programmer's guide */
+	}
+	channel->desired_mode = mode;
+	INFO("%s: txcsr=0x%x\r\n", __func__, csr);
+	/* finish the configration for TXCSR register. */
+	musbfsh_writew(epio, MUSBFSH_TXCSR, csr);
+	qh->segsize = len;
+
+	/*
+	 * Ensure the data reaches to main memory before starting
+	 * DMA transfer
+	 */
+	wmb();
+
+	if (!dma->channel_program(channel, pkt_size, mode,
+				  urb->transfer_dma + offset, len)) {
+		/* give up the channel, so other ep can use it */
+		dma->channel_release(channel);
+		hw_ep->tx_channel = NULL;
+
+		csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+		csr &= ~(MUSBFSH_TXCSR_AUTOSET | MUSBFSH_TXCSR_DMAENAB);
+		musbfsh_writew(epio, MUSBFSH_TXCSR,
+			       csr | MUSBFSH_TXCSR_H_WZC_BITS);
+		return false;
+	}
+	return true;
+}
+
+/*
+ * Program an HDRC endpoint as per the given URB
+ * Context: irqs blocked, controller lock held
+ * u8 epnum: the index number, not the real number
+ * int is_out: so the parameter sent to this func is !is_in.
+ */
+static void musbfsh_ep_program(struct musbfsh *musbfsh, u8 epnum,
+			       struct urb *urb, int is_out,
+			       u8 *buf, u32 offset, u32 len)
+{
+	struct dma_controller *dma_controller;
+	struct dma_channel *dma_channel;
+	void __iomem *mbase = musbfsh->mregs;
+	struct musbfsh_hw_ep *hw_ep = musbfsh->endpoints + epnum;
+	void __iomem *epio = hw_ep->regs;
+	/* the parameter sent to musbfsh_ep_get_qh is is_in */
+	struct musbfsh_qh *qh = musbfsh_ep_get_qh(hw_ep, !is_out);
+	u16 packet_sz = qh->maxpacket;
+
+	INFO("%s++: %s hw%d urb %p spd%d",
+	     __func__, is_out ? "-->" : "<--",
+	     epnum, urb, urb->dev->speed);
+	INFO("%s  : dev%d ep%d%s h_addr%02x h_port%02x bytes %d\n",
+	     __func__,
+	     qh->addr_reg, qh->epnum, is_out ? "out" : "in",
+	     qh->h_addr_reg, qh->h_port_reg, len);
+
+	/* very important, then we can use the register via epio */
+	musbfsh_ep_select(mbase, epnum);
+
+	/* candidate for DMA? */
+	/*
+	 * wz:for MT65xx, there are not enough dma channels for all of the eps,
+	 * so I think we should add a flag in the hw_ep struct to indicate
+	 * whether it has a dma channel.
+	 * And check it here to set the dma_channel
+	 */
+	dma_controller = musbfsh->dma_controller;
+
+	/* will check epnum, indicate dma is not used for ep0! */
+	if (is_dma_capable() && epnum && dma_controller) {
+		INFO("Using DMA epnum%d\n", epnum);
+
+		/* not all eps have dma channel */
+		dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
+
+		/*
+		 * if no dma channel yet,
+		 * will allocate a channel for this ep!
+		 */
+		if (!dma_channel) {
+			/*
+			 * maybe return NULL, if all of the dma channels
+			 * have been used.
+			 */
+			dma_channel =
+				dma_controller->channel_alloc(dma_controller,
+							      hw_ep, is_out);
+			if (dma_channel) {
+				INFO("Got a DMA channel for ep%d\n", epnum);
+				if (is_out)
+					hw_ep->tx_channel = dma_channel;
+				else
+					hw_ep->rx_channel = dma_channel;
+			} else {
+				WARNING("DMA channel alloc fail for ep%d\n",
+					epnum);
+			}
+		}
+	} else {
+		INFO("Using PIO for ep%d\n", epnum);
+		dma_channel = NULL;
+	}
+
+	/* make sure we clear DMAEnab, autoSet bits from previous run */
+
+	/* OUT/transmit/EP0 or IN/receive? */
+	if (is_out) {
+		u16 csr;
+		u16 int_txe;
+		u16 load_count;
+
+		csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+
+		/* disable interrupt in case we flush */
+		int_txe = musbfsh_readw(mbase, MUSBFSH_INTRTXE);
+		musbfsh_writew(mbase, MUSBFSH_INTRTXE, int_txe & ~(1 << epnum));
+
+		/* general endpoint setup, not ep0 */
+		if (epnum) {	/* Tx endpoint */
+			/* flush all old state, set default */
+			musbfsh_h_tx_flush_fifo(hw_ep);
+
+			/*
+			 * We must not clear the DMAMODE bit before or in
+			 * the same cycle with the DMAENAB bit, so we clear
+			 * the latter first...
+			 */
+			csr &= ~(MUSBFSH_TXCSR_H_NAKTIMEOUT	|
+				MUSBFSH_TXCSR_AUTOSET		|
+				MUSBFSH_TXCSR_DMAENAB		|
+				MUSBFSH_TXCSR_FRCDATATOG	|
+				MUSBFSH_TXCSR_H_RXSTALL		|
+				MUSBFSH_TXCSR_H_ERROR		|
+				MUSBFSH_TXCSR_TXPKTRDY);
+
+			/* wz add to init the toggle */
+			musbfsh_set_toggle(qh, !is_out, urb);
+
+			musbfsh_writew(epio, MUSBFSH_TXCSR, csr);
+			/* REVISIT may need to clear FLUSHFIFO ... */
+			csr &= ~MUSBFSH_TXCSR_DMAMODE;
+			musbfsh_writew(epio, MUSBFSH_TXCSR, csr);
+			csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+		} else {
+			/* endpoint 0: just flush */
+			musbfsh_h_ep0_flush_fifo(hw_ep);
+		}
+
+		/* target addr and (for multipoint) hub addr/port */
+		if (musbfsh->is_multipoint) {
+			musbfsh_write_txfunaddr(mbase, epnum, qh->addr_reg);
+			musbfsh_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
+			musbfsh_write_txhubport(mbase, epnum, qh->h_port_reg);
+			INFO("set address! h_port_reg 0x%x h_addr_reg 0x%x\n",
+			     qh->h_port_reg, qh->h_addr_reg);
+		} else {
+			/* set the address of the device,very important!! */
+			musbfsh_writeb(mbase, MUSBFSH_FADDR, qh->addr_reg);
+			INFO("set address! 0x%x\n", qh->addr_reg);
+		}
+
+		/* protocol/endpoint/interval/NAKlimit */
+		if (epnum) {
+			/* set the transfer type and endpoint number */
+			musbfsh_writeb(epio, MUSBFSH_TXTYPE, qh->type_reg);
+			musbfsh_writew(epio, MUSBFSH_TXMAXP, qh->maxpacket);
+			musbfsh_writeb(epio, MUSBFSH_TXINTERVAL, qh->intv_reg);
+		} else {	/* ep0 */
+			musbfsh_writeb(epio, MUSBFSH_NAKLIMIT0, qh->intv_reg);
+			if (musbfsh->is_multipoint)
+				musbfsh_writeb(epio, MUSBFSH_TYPE0,
+					       qh->type_reg);
+		}
+		load_count = min_t(u32, packet_sz, len);
+
+		/* write data to the fifo */
+		if (dma_channel && musbfsh_tx_dma_program(dma_controller,
+							  hw_ep, qh, urb,
+							  offset, len))
+			load_count = 0;
+
+		if (load_count) {	/* dma is not available */
+			/* PIO to load FIFO */
+			qh->segsize = load_count;
+			musbfsh_write_fifo(hw_ep, load_count, buf);
+		}
+
+		/* re-enable interrupt */
+		/* after load the data to fifo, but not set txpakready */
+		musbfsh_writew(mbase, MUSBFSH_INTRTXE, int_txe);
+
+		/* IN/receive */
+	} else {
+		u16 csr;
+
+		if (hw_ep->rx_reinit) {
+			musbfsh_rx_reinit(musbfsh, qh, hw_ep);
+			/* wz add to init the toggle */
+			musbfsh_set_toggle(qh, !is_out, urb);
+			csr = 0;
+
+			/* disable NYET for interrupt transfer */
+			if (qh->type == USB_ENDPOINT_XFER_INT)
+				csr |= MUSBFSH_RXCSR_DISNYET;
+
+		} else {	/* bulk IN */
+			csr = musbfsh_readw(hw_ep->regs, MUSBFSH_RXCSR);
+
+			if (csr & (MUSBFSH_RXCSR_RXPKTRDY |
+				   MUSBFSH_RXCSR_DMAENAB  |
+				   MUSBFSH_RXCSR_H_REQPKT))
+				ERR("broken !rx_reinit, ep%d csr %04x\n",
+				    hw_ep->epnum, csr);
+
+			/* scrub any stale state, leaving toggle alone */
+			csr &= MUSBFSH_RXCSR_DISNYET;
+		}
+
+		/* kick things off */
+
+		csr |= MUSBFSH_RXCSR_H_REQPKT;	/* ask packet from the device */
+		INFO("RXCSR%d := %04x\n", epnum, csr);
+		musbfsh_writew(hw_ep->regs, MUSBFSH_RXCSR, csr);
+		csr = musbfsh_readw(hw_ep->regs, MUSBFSH_RXCSR);
+	}
+}
+
+
+/*
+ * Service the default endpoint (ep0) as host.
+ * Return false until it's time to start the status stage.
+ */
+static bool musbfsh_h_ep0_continue(struct musbfsh *musbfsh, u16 len,
+				   struct urb *urb)
+{
+	bool more = false;
+	u8 *fifo_dest = NULL;
+	u16 fifo_count = 0;
+	struct musbfsh_hw_ep *hw_ep = musbfsh->control_ep;
+	struct musbfsh_qh *qh = hw_ep->in_qh;
+	struct usb_ctrlrequest *request;
+
+	INFO("%s++\r\n", __func__);
+	switch (musbfsh->ep0_stage) {
+	case MUSBFSH_EP0_IN:
+		/* actual_length: the data number transferred */
+		fifo_dest = urb->transfer_buffer + urb->actual_length;
+		fifo_count = min_t(size_t, len,
+				   urb->transfer_buffer_length -
+					urb->actual_length);
+
+		/* len: the data number in the EP0 fifo */
+		if (fifo_count < len)
+			urb->status = -EOVERFLOW;
+
+		/* not use dma for ep0 */
+		musbfsh_read_fifo(hw_ep, fifo_count, fifo_dest);
+
+		/* update the actual_length! */
+		urb->actual_length += fifo_count;
+		/*
+		 * the in transaction is complete,
+		 * should run to status stage.
+		 */
+		if (len < qh->maxpacket) {
+			/* always terminate on short read; it's
+			 * rarely reported as an error.
+			 more = false;//add by zheng wang
+			 */
+		} else if (urb->actual_length < urb->transfer_buffer_length)
+			more = true;
+		break;
+	case MUSBFSH_EP0_START:
+		request = (struct usb_ctrlrequest *)urb->setup_packet;
+
+		if (!request->wLength) {
+			INFO("start no-DATA\n");
+			break;
+		} else if (request->bRequestType & USB_DIR_IN) {
+			INFO("start IN-DATA\n");
+			musbfsh->ep0_stage = MUSBFSH_EP0_IN;
+			more = true;
+			break;	/*wait for next interrupt! */
+		}
+
+		INFO("start OUT-DATA\n");
+		musbfsh->ep0_stage = MUSBFSH_EP0_OUT;
+		more = true;
+		/* no break here, send data right now! */
+		/* FALLTHROUGH */
+	case MUSBFSH_EP0_OUT:
+		fifo_count = min_t(size_t, qh->maxpacket,
+				   urb->transfer_buffer_length -
+					urb->actual_length);
+		if (fifo_count) {
+			fifo_dest =
+				(u8 *)(urb->transfer_buffer +
+					urb->actual_length);
+			INFO("Sending %d byte%s to ep0 fifo %p\n",
+			     fifo_count,
+			     (fifo_count == 1) ? "" : "s",
+			     fifo_dest);
+			musbfsh_write_fifo(hw_ep, fifo_count, fifo_dest);
+
+			urb->actual_length += fifo_count;
+			more = true;
+		}
+		break;
+	default:
+		ERR("bogus ep0 stage %d\n", musbfsh->ep0_stage);
+		break;
+	}
+
+	return more;
+}
+
+/*
+ * Handle default endpoint interrupt as host. Only called in IRQ time
+ * from musbfsh_interrupt().
+ *
+ * called with controller irqlocked
+ */
+irqreturn_t musbfsh_h_ep0_irq(struct musbfsh *musbfsh)
+{
+	struct urb *urb;
+	u16 csr, len;
+	int status = 0;
+	void __iomem *mbase = musbfsh->mregs;
+	struct musbfsh_hw_ep *hw_ep = musbfsh->control_ep;
+	void __iomem *epio = hw_ep->regs;
+	struct musbfsh_qh *qh = hw_ep->in_qh;
+	bool complete = false;
+	irqreturn_t retval = IRQ_NONE;
+
+	INFO("%s++\r\n", __func__);
+	/* ep0 only has one queue, "in" */
+	urb = next_urb(qh);
+
+	musbfsh_ep_select(mbase, 0);
+	csr = musbfsh_readw(epio, MUSBFSH_CSR0);
+	len = (csr & MUSBFSH_CSR0_RXPKTRDY)
+	    ? musbfsh_readb(epio, MUSBFSH_COUNT0)
+	    : 0;
+
+	INFO("<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
+		csr, qh, len, urb, musbfsh->ep0_stage);
+
+	/* if we just did status stage, we are done */
+	if (musbfsh->ep0_stage == MUSBFSH_EP0_STATUS) {
+		retval = IRQ_HANDLED;
+		complete = true;
+	}
+
+	/* prepare status */
+	if (csr & MUSBFSH_CSR0_H_RXSTALL) {
+		WARNING("STALLING ENDPOINT\n");
+		status = -EPIPE;
+
+	} else if (csr & MUSBFSH_CSR0_H_ERROR) {
+		WARNING("no response, csr0 %04x\n", csr);
+		status = -EPROTO;
+
+	} else if (csr & MUSBFSH_CSR0_H_NAKTIMEOUT) {
+		WARNING("control NAK timeout\n");
+
+		/* NOTE:  this code path would be a good place to PAUSE a
+		 * control transfer, if another one is queued, so that
+		 * ep0 is more likely to stay busy.  That's already done
+		 * for bulk RX transfers.
+		 *
+		 * if (qh->ring.next != &musbfsh->control), then
+		 * we have a candidate... NAKing is *NOT* an error
+		 */
+		musbfsh_writew(epio, MUSBFSH_CSR0, 0);
+		retval = IRQ_HANDLED;
+	}
+
+	/* if there is an error in the control transfer, and abort it! */
+	if (status) {
+		INFO("aborting\n");
+		retval = IRQ_HANDLED;
+		if (urb)
+			urb->status = status;
+		complete = true;
+
+		/* use the proper sequence to abort the transfer */
+		if (csr & MUSBFSH_CSR0_H_REQPKT) {
+			csr &= ~MUSBFSH_CSR0_H_REQPKT;
+			musbfsh_writew(epio, MUSBFSH_CSR0, csr);
+			csr &= ~MUSBFSH_CSR0_H_NAKTIMEOUT;
+			musbfsh_writew(epio, MUSBFSH_CSR0, csr);
+		} else {
+			musbfsh_h_ep0_flush_fifo(hw_ep);
+		}
+
+		musbfsh_writeb(epio, MUSBFSH_NAKLIMIT0, 0);
+
+		/* clear it */
+		musbfsh_writew(epio, MUSBFSH_CSR0, 0);
+	}
+
+	if (unlikely(!urb)) {
+		/* stop endpoint since we have no place for its data, this
+		 * SHOULD NEVER HAPPEN!
+		 */
+		ERR("no URB for end 0\n");
+
+		musbfsh_h_ep0_flush_fifo(hw_ep);
+		goto done;
+	}
+
+	if (!complete) {	/* not the status stage */
+		/* call common logic and prepare response */
+		if (musbfsh_h_ep0_continue(musbfsh, len, urb)) {
+			/* more packets required */
+			/*
+			 * wz, I think the following code can be
+			 * run in musbfsh_h_ep0_continue
+			 */
+			csr = (musbfsh->ep0_stage == MUSBFSH_EP0_IN)
+			    ? MUSBFSH_CSR0_H_REQPKT : MUSBFSH_CSR0_TXPKTRDY;
+		} else {
+			/* data transfer complete; perform status phase */
+			/*
+			 * indicate no data stage,
+			 * so there is no need to set the stage in
+			 * musbfsh_h_ep0_continue
+			 */
+			if (usb_pipeout(urb->pipe)
+			    || !urb->transfer_buffer_length)
+				csr = MUSBFSH_CSR0_H_STATUSPKT |
+				      MUSBFSH_CSR0_H_REQPKT;
+			else
+				csr = MUSBFSH_CSR0_H_STATUSPKT |
+				      MUSBFSH_CSR0_TXPKTRDY;
+
+			/* flag status stage */
+			musbfsh->ep0_stage = MUSBFSH_EP0_STATUS;
+
+			INFO("ep0 STATUS, csr %04x\n", csr);
+
+		}
+		musbfsh_writew(epio, MUSBFSH_CSR0, csr);
+		retval = IRQ_HANDLED;
+	} else
+		musbfsh->ep0_stage = MUSBFSH_EP0_IDLE;
+
+	/* call completion handler if done */
+	if (complete) {
+		/* MYDBG(""); */
+		musbfsh_advance_schedule(musbfsh, urb, hw_ep, 1);
+	}
+done:
+	return retval;
+}
+
+/* Host side TX (OUT) using Mentor DMA works as follows:
+	*	submit_urb ->
+	*	- if queue was empty, Program Endpoint
+	*	- ... which starts DMA to fifo in mode 1 or 0
+	*
+	*DMA Isr (transfer complete) -> TxAvail()
+	*	- Stop DMA (~DmaEnab)	(<--- Alert ... currently happens
+	*				only in musb_cleanup_urb)
+	*	- TxPktRdy has to be set in mode 0 or for
+	*		short packets in mode 1.
+*/
+
+/* Service a Tx-Available or dma completion irq for the endpoint */
+void musbfsh_host_tx(struct musbfsh *musbfsh, u8 epnum)	/* real ep num */
+{
+	int pipe;
+	bool done = false;
+	u16 tx_csr;
+	size_t length = 0;
+	size_t offset = 0;
+	struct musbfsh_hw_ep *hw_ep = musbfsh->endpoints + epnum;
+	void __iomem *epio = hw_ep->regs;
+	struct musbfsh_qh *qh = hw_ep->out_qh;
+
+	struct urb *urb = next_urb(qh);	/* the current urb been processing */
+
+	/* indicate the transfer error, if status=0, there is no error! */
+	u32 status = 0;
+	void __iomem *mbase = musbfsh->mregs;
+	struct dma_channel *dma;
+	bool transfer_pending = false;
+
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+		if (qh && qh->is_use_qmu)
+			return;
+#endif
+
+	INFO("%s++, real ep=%d\r\n", __func__, epnum);
+	musbfsh_ep_select(mbase, epnum);
+	tx_csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+
+	/* with CPPI, DMA sometimes triggers "extra" irqs */
+	if (!urb) {
+		WARNING("extra TX%d ready, csr %04x\n", epnum, tx_csr);
+		return;
+	}
+
+	pipe = urb->pipe;
+	dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
+	INFO("OUT/TX%d end, csr %04x%s\n",
+	     epnum, tx_csr, dma ? ", dma" : "pio");
+
+	/* check for errors */
+	if (tx_csr & MUSBFSH_TXCSR_H_RXSTALL) {
+		/* dma was disabled, fifo flushed */
+		WARNING("TX end %d stall\n", epnum);
+
+		/* stall; record URB status */
+		status = -EPIPE;
+
+	} else if (tx_csr & MUSBFSH_TXCSR_H_ERROR) {
+		/* (NON-ISO) dma was disabled, fifo flushed */
+		WARNING("TX 3strikes on ep=%d\n", epnum);
+
+		status = -ETIMEDOUT;
+
+	} else if (tx_csr & MUSBFSH_TXCSR_H_NAKTIMEOUT) {
+		WARNING("TX end=%d device not responding\n", epnum);
+
+		/* NOTE:  this code path would be a good place to PAUSE a
+		 * transfer, if there's some other (nonperiodic) tx urb
+		 * that could use this fifo.  (dma complicates it...)
+		 * That's already done for bulk RX transfers.
+		 *
+		 * if (bulk && qh->ring.next != &musbfsh->out_bulk), then
+		 * we have a candidate... NAKing is *NOT* an error
+		 */
+		musbfsh_ep_select(mbase, epnum);
+		musbfsh_writew(epio, MUSBFSH_TXCSR,
+			       MUSBFSH_TXCSR_H_WZC_BITS |
+			       MUSBFSH_TXCSR_TXPKTRDY);
+		return;
+	}
+
+	/* if status is not 0, have error, will stop to send data. */
+	if (status) {
+		if (dma_channel_status(dma) == MUSBFSH_DMA_STATUS_BUSY) {
+			dma->status = MUSBFSH_DMA_STATUS_CORE_ABORT;
+			(void)musbfsh->dma_controller->channel_abort(dma);
+		}
+
+		/* do the proper sequence to abort the transfer in the
+		 * usb core; the dma engine should already be stopped.
+		 */
+		musbfsh_h_tx_flush_fifo(hw_ep);
+		tx_csr &= ~(MUSBFSH_TXCSR_AUTOSET |
+			    MUSBFSH_TXCSR_DMAENAB |
+			    MUSBFSH_TXCSR_H_ERROR |
+			    MUSBFSH_TXCSR_H_RXSTALL |
+			    MUSBFSH_TXCSR_H_NAKTIMEOUT);
+
+		musbfsh_ep_select(mbase, epnum);
+		musbfsh_writew(epio, MUSBFSH_TXCSR, tx_csr);
+		/* REVISIT may need to clear FLUSHFIFO ... */
+		musbfsh_writew(epio, MUSBFSH_TXCSR, tx_csr);
+		musbfsh_writeb(epio, MUSBFSH_TXINTERVAL, 0);
+
+		done = true;
+	}
+
+	/* second cppi case */
+	if (dma_channel_status(dma) == MUSBFSH_DMA_STATUS_BUSY) {
+		WARNING("extra TX%d ready, csr %04x\n", epnum, tx_csr);
+		return;
+	}
+
+	if (is_dma_capable() && dma && !status) {
+		/*
+		 * DMA has completed.  But if we're using DMA mode 1 (multi
+		 * packet DMA), we need a terminal TXPKTRDY interrupt before
+		 * we can consider this transfer completed, lest we trash
+		 * its last packet when writing the next URB's data.  So we
+		 * switch back to mode 0 to get that interrupt; we'll come
+		 * back here once it happens.
+		 */
+		if (tx_csr & MUSBFSH_TXCSR_DMAMODE) {
+			/*
+			 * We shouldn't clear DMAMODE with DMAENAB set; so
+			 * clear them in a safe order.  That should be OK
+			 * once TXPKTRDY has been set (and I've never seen
+			 * it being 0 at this moment -- DMA interrupt latency
+			 * is significant) but if it hasn't been then we have
+			 * no choice but to stop being polite and ignore the
+			 * programmer's guide... :-)
+			 *
+			 * Note that we must write TXCSR with TXPKTRDY cleared
+			 * in order not to re-trigger the packet send (this bit
+			 * can't be cleared by CPU), and there's another caveat:
+			 * TXPKTRDY may be set shortly and then cleared in the
+			 * double-buffered FIFO mode, so we do an extra TXCSR
+			 * read for debouncing...
+			 */
+			tx_csr &= musbfsh_readw(epio, MUSBFSH_TXCSR);
+			if (tx_csr & MUSBFSH_TXCSR_TXPKTRDY) {
+				tx_csr &= ~(MUSBFSH_TXCSR_DMAENAB |
+					    MUSBFSH_TXCSR_TXPKTRDY);
+				musbfsh_writew(epio, MUSBFSH_TXCSR,
+					       tx_csr |
+					       MUSBFSH_TXCSR_H_WZC_BITS);
+			}
+			tx_csr &= ~(MUSBFSH_TXCSR_DMAMODE |
+				    MUSBFSH_TXCSR_TXPKTRDY);
+			musbfsh_writew(epio, MUSBFSH_TXCSR,
+				       tx_csr | MUSBFSH_TXCSR_H_WZC_BITS);
+
+			/*
+			 * There is no guarantee that we'll get an interrupt
+			 * after clearing DMAMODE as we might have done this
+			 * too late (after TXPKTRDY was cleared by controller).
+			 * Re-read TXCSR as we have spoiled its previous value.
+			 */
+			tx_csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+		}
+
+		/*
+		 * We may get here from a DMA completion or TXPKTRDY interrupt.
+		 * In any case, we must check the FIFO status here and bail out
+		 * only if the FIFO still has data -- that should prevent the
+		 * "missed" TXPKTRDY interrupts and deal with double-buffered
+		 * FIFO mode too...
+		 */
+		if (tx_csr &
+			(MUSBFSH_TXCSR_FIFONOTEMPTY | MUSBFSH_TXCSR_TXPKTRDY)) {
+			INFO("DMA complete but Data still in FIFO, CSR %04x\n",
+			     tx_csr);
+			return;
+		}
+	}
+
+	if (!status || dma || usb_pipeisoc(pipe)) {
+		if (dma)
+			length = dma->actual_len;
+		else
+			length = qh->segsize;
+		qh->offset += length;
+
+		if (usb_pipeisoc(pipe)) {
+			struct usb_iso_packet_descriptor *d;
+
+			d = urb->iso_frame_desc + qh->iso_idx;
+			d->actual_length = length;
+			d->status = status;
+			if (++qh->iso_idx >= urb->number_of_packets) {
+				done = true;
+			} else {
+				d++;
+				offset = d->offset;
+				length = d->length;
+			}
+/* } else if (dma) { */
+		} else if (dma && urb->transfer_buffer_length == qh->offset) {
+			done = true;
+		} else {
+			/* see if we need to send more data, or ZLP */
+			/* sent a short packet */
+			if (qh->segsize < qh->maxpacket)
+				done = true;
+			else if (qh->offset == urb->transfer_buffer_length
+				 && !(urb->transfer_flags & URB_ZERO_PACKET))
+				done = true;
+			if (!done) {
+				offset = qh->offset;
+				length = urb->transfer_buffer_length - offset;
+				transfer_pending = true;
+			}
+		}
+	}
+
+	/* urb->status != -EINPROGRESS means request has been faulted,
+	 * so we must abort this transfer after cleanup
+	 */
+	if (urb->status != -EINPROGRESS) {
+		done = true;
+		if (status == 0)
+			status = urb->status;
+	}
+
+	if (done) {
+		/* set status */
+		urb->status = status;
+		urb->actual_length = qh->offset;
+		musbfsh_advance_schedule(musbfsh, urb, hw_ep, USB_DIR_OUT);
+		return;
+	} else if (transfer_pending && dma) {
+		if (musbfsh_tx_dma_program(musbfsh->dma_controller,
+					   hw_ep, qh, urb, offset, length))
+			return;
+	} else if (tx_csr & MUSBFSH_TXCSR_DMAENAB) {
+		WARNING("not complete, but DMA enabled?\n");
+		return;
+	}
+
+	/*
+	 * PIO: start next packet in this URB.
+	 *
+	 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
+	 * (and presumably, FIFO is not half-full) we should write *two*
+	 * packets before updating TXCSR; other docs disagree...
+	 */
+	if (length > qh->maxpacket)
+		length = qh->maxpacket;
+	/* Unmap the buffer so that CPU can use it */
+	usb_hcd_unmap_urb_for_dma(musbfsh_to_hcd(musbfsh), urb);
+	musbfsh_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
+	qh->segsize = length;
+
+	musbfsh_ep_select(mbase, epnum);
+	musbfsh_writew(epio, MUSBFSH_TXCSR, MUSBFSH_TXCSR_H_WZC_BITS |
+		       MUSBFSH_TXCSR_TXPKTRDY);
+}
+
+
+/* Host side RX (IN) using Mentor DMA works as follows:
+	*	submit_urb ->
+	*	- if queue was empty, ProgramEndpoint
+	*	- first IN token is sent out (by setting ReqPkt)
+	*LinuxIsr -> RxReady()
+	*	/\	=> first packet is received
+	*	|	- Set in mode 0 (DmaEnab, ~ReqPkt)
+	*	|		-> DMA Isr (transfer complete) -> RxReady()
+	*	|		    - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
+	*	|		    - if urb not complete, send next IN token (ReqPkt)
+	*	|			   |		else complete urb.
+	*	|			   |
+	*---------------------------
+ *
+ * Nuances of mode 1:
+ *	For short packets, no ack (+RxPktRdy) is sent automatically
+ *	(even if AutoClear is ON)
+ *	For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
+ *	automatically => major problem, as collecting the next packet becomes
+ *	difficult. Hence mode 1 is not used.
+ *
+ * REVISIT
+ *	All we care about at this driver level is that
+ *       (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
+ *       (b) termination conditions are: short RX, or buffer full;
+ *       (c) fault modes include
+ *           - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
+ *             (and that endpoint's dma queue stops immediately)
+ *           - overflow (full, PLUS more bytes in the terminal packet)
+ *
+ *	So for example, usb-storage sets URB_SHORT_NOT_OK, and would
+ *	thus be a great candidate for using mode 1 ... for all but the
+ *	last packet of one URB's transfer.
+ */
+
+/* Schedule next QH from musbfsh->in_bulk and move the current qh to
+ * the end; avoids starvation for other endpoints.
+ */
+static void musbfsh_bulk_rx_nak_timeout(struct musbfsh *musbfsh,
+					struct musbfsh_hw_ep *ep)
+{
+	struct dma_channel *dma;
+	struct urb *urb;
+	void __iomem *mbase = musbfsh->mregs;
+	void __iomem *epio = ep->regs;
+	struct musbfsh_qh *cur_qh, *next_qh;
+	u16 rx_csr;
+
+	INFO("musbfsh_bulk_rx_nak_timeout++\r\n");
+	musbfsh_ep_select(mbase, ep->epnum);
+	dma = is_dma_capable() ? ep->rx_channel : NULL;
+
+	/* clear nak timeout bit */
+	rx_csr = musbfsh_readw(epio, MUSBFSH_RXCSR);
+	rx_csr |= MUSBFSH_RXCSR_H_WZC_BITS;
+	rx_csr &= ~MUSBFSH_RXCSR_DATAERROR;
+	musbfsh_writew(epio, MUSBFSH_RXCSR, rx_csr);
+
+	cur_qh = first_qh(&musbfsh->in_bulk);
+	if (cur_qh) {
+		urb = next_urb(cur_qh);
+		if (dma_channel_status(dma) == MUSBFSH_DMA_STATUS_BUSY) {
+			dma->status = MUSBFSH_DMA_STATUS_CORE_ABORT;
+			musbfsh->dma_controller->channel_abort(dma);
+			urb->actual_length += dma->actual_len;
+			dma->actual_len = 0L;
+		}
+		musbfsh_save_toggle(cur_qh, 1, urb);
+
+		/* move cur_qh to end of queue */
+		list_move_tail(&cur_qh->ring, &musbfsh->in_bulk);
+
+		/* get the next qh from musbfsh->in_bulk */
+		next_qh = first_qh(&musbfsh->in_bulk);
+
+		/* set rx_reinit and schedule the next qh */
+		ep->rx_reinit = 1;
+		/* MYDBG("musbfsh_start_urb go\n"); */
+		musbfsh_start_urb(musbfsh, 1, next_qh);
+	}
+}
+
+/*
+ * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
+ * and high-bandwidth IN transfer cases.
+ */
+void musbfsh_host_rx(struct musbfsh *musbfsh, u8 epnum)
+{
+	struct urb *urb;
+	struct musbfsh_hw_ep *hw_ep = musbfsh->endpoints + epnum;
+	void __iomem *epio = hw_ep->regs;
+	struct musbfsh_qh *qh = hw_ep->in_qh;
+	size_t xfer_len;
+	void __iomem *mbase = musbfsh->mregs;
+	int pipe;
+	u16 rx_csr, val;
+	bool done = false;
+	u32 status;
+	struct dma_channel *dma;
+	bool iso_err = false;
+
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+	if (qh && qh->is_use_qmu)
+		return;
+#endif
+
+	INFO("musbfsh_host_rx++,real ep=%d\r\n", epnum);
+	musbfsh_ep_select(mbase, epnum);
+
+	urb = next_urb(qh);	/* current urb */
+	dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
+	status = 0;
+	xfer_len = 0;
+
+	rx_csr = musbfsh_readw(epio, MUSBFSH_RXCSR);
+	val = rx_csr;
+
+	if (unlikely(!urb)) {
+		/* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
+		 * usbtest #11 (unlinks) triggers it regularly, sometimes
+		 * with fifo full.  (Only with DMA??)
+		 */
+		WARNING("BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
+			musbfsh_readw(epio, MUSBFSH_RXCOUNT));
+		musbfsh_h_flush_rxfifo(hw_ep, 0);
+		return;
+	}
+
+	pipe = urb->pipe;
+
+	INFO("<==real hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
+	     epnum, rx_csr, urb->actual_length, dma ? dma->actual_len : 0);
+
+	/* check for errors, concurrent stall & unlink is not really
+	 * handled yet!
+	 */
+	if (rx_csr & MUSBFSH_RXCSR_H_RXSTALL) {
+		WARNING("RX end %d STALL\n", epnum);
+		rx_csr &= ~MUSBFSH_RXCSR_H_RXSTALL;
+		musbfsh_writew(epio, MUSBFSH_RXCSR, rx_csr);
+		/* stall; record URB status */
+		status = -EPIPE;
+
+	} else if (rx_csr & MUSBFSH_RXCSR_H_ERROR) {
+		WARNING("end %d RX proto error\n", epnum);
+
+		status = -EPROTO;
+		musbfsh_writeb(epio, MUSBFSH_RXINTERVAL, 0);
+
+	} else if (rx_csr & MUSBFSH_RXCSR_DATAERROR) {
+		if (qh->type != USB_ENDPOINT_XFER_ISOC) {
+			INFO("RX end %d NAK timeout\n", epnum);
+			/* removed due to too many logs */
+
+			/* NOTE: NAKing is *NOT* an error, so we want to
+			 * continue.  Except ... if there's a request for
+			 * another QH, use that instead of starving it.
+			 *
+			 * Devices like Ethernet and serial adapters keep
+			 * reads posted at all times, which will starve
+			 * other devices without this logic.
+			 */
+			if (usb_pipebulk(urb->pipe)
+			    && qh->mux == 1 && !list_is_singular(&musbfsh->in_bulk)) {
+				musbfsh_bulk_rx_nak_timeout(musbfsh, hw_ep);
+				return;
+			}
+			musbfsh_ep_select(mbase, epnum);
+			rx_csr |= MUSBFSH_RXCSR_H_WZC_BITS;
+			rx_csr &= ~MUSBFSH_RXCSR_DATAERROR;
+			musbfsh_writew(epio, MUSBFSH_RXCSR, rx_csr);
+			goto finish;
+		} else {
+			INFO("RX end %d ISO data error\n", epnum);
+			/* packet error reported later */
+			iso_err = true;
+		}
+	} else if (rx_csr & MUSBFSH_RXCSR_INCOMPRX) {
+		WARNING("end %d high bandwidth incomplete ISO packet RX\n",
+			epnum);
+		status = -EPROTO;
+	}
+
+	/* faults abort the transfer */
+	if (status) {
+		/* clean up dma and collect transfer count */
+		if (dma_channel_status(dma) == MUSBFSH_DMA_STATUS_BUSY) {
+			dma->status = MUSBFSH_DMA_STATUS_CORE_ABORT;
+			(void)musbfsh->dma_controller->channel_abort(dma);
+			xfer_len = dma->actual_len;
+		}
+		musbfsh_h_flush_rxfifo(hw_ep, 0);
+		musbfsh_writeb(epio, MUSBFSH_RXINTERVAL, 0);
+
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+		if (!musbfsh_connect_flag) {
+			MYDBG("err(%d) after disc\n", status);
+			return;
+		}
+#endif
+
+		done = true;
+		goto finish;
+	}
+
+	if (unlikely(dma_channel_status(dma) == MUSBFSH_DMA_STATUS_BUSY)) {
+		/* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
+		ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
+		goto finish;
+	}
+
+	/* thorough shutdown for now ... given more precise fault handling
+	 * and better queueing support, we might keep a DMA pipeline going
+	 * while processing this irq for earlier completions.
+	 */
+
+	/* FIXME this is _way_ too much in-line logic for Mentor DMA */
+	/* here rx_csr & MUSBFSH_RXCSR_DMAENAB is very very important! */
+	if (dma && (rx_csr & MUSBFSH_RXCSR_DMAENAB)) {
+		xfer_len = dma->actual_len;
+
+		/* These should be clear! */
+		val &= ~(MUSBFSH_RXCSR_DMAENAB |
+			MUSBFSH_RXCSR_H_AUTOREQ |
+			MUSBFSH_RXCSR_AUTOCLEAR |
+			MUSBFSH_RXCSR_RXPKTRDY);
+		musbfsh_writew(hw_ep->regs, MUSBFSH_RXCSR, val);
+
+		if (usb_pipeisoc(pipe)) {
+			struct usb_iso_packet_descriptor *d;
+
+			d = urb->iso_frame_desc + qh->iso_idx;
+			d->actual_length = xfer_len;
+
+			/* even if there was an error, we did the dma
+			 * for iso_frame_desc->length
+			 */
+			if (d->status != -EILSEQ && d->status != -EOVERFLOW)
+				d->status = 0;
+
+			if (++qh->iso_idx >= urb->number_of_packets)
+				done = true;
+			else
+				done = false;
+		} else
+			done = (urb->actual_length + xfer_len >=
+				urb->transfer_buffer_length ||
+				dma->actual_len < qh->maxpacket);
+
+		/* send IN token for next packet, without AUTOREQ */
+		if (!done) {
+			val |= MUSBFSH_RXCSR_H_REQPKT;
+			musbfsh_writew(epio, MUSBFSH_RXCSR,
+				       MUSBFSH_RXCSR_H_WZC_BITS | val);
+		}
+
+		INFO("ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
+		     done ? "off" : "reset",
+		     musbfsh_readw(epio, MUSBFSH_RXCSR),
+				   musbfsh_readw(epio, MUSBFSH_RXCOUNT));
+	} else if (urb->status == -EINPROGRESS) {
+		/* if no errors, be sure a packet is ready for unloading */
+		if (unlikely(!(rx_csr & MUSBFSH_RXCSR_RXPKTRDY))) {
+			status = -EPROTO;
+			ERR("Rx interrupt with no errors or packet!\n");
+
+			/* FIXME this is another "SHOULD NEVER HAPPEN" */
+
+			/* SCRUB (RX) */
+			/* do the proper sequence to abort the transfer */
+			musbfsh_ep_select(mbase, epnum);
+			val &= ~MUSBFSH_RXCSR_H_REQPKT;
+			musbfsh_writew(epio, MUSBFSH_RXCSR, val);
+			goto finish;
+		}
+
+		/* we are expecting IN packets */
+		if (dma) {
+			struct dma_controller *c;
+			u16 rx_count;
+			int ret, length;
+			dma_addr_t buf;
+
+			rx_count = musbfsh_readw(epio, MUSBFSH_RXCOUNT);
+
+			INFO("RX%d count %d, buffer 0x%x len %d/%d\n",
+			     epnum, rx_count,
+			     (unsigned int)urb->transfer_dma
+			     + urb->actual_length, qh->offset,
+			     urb->transfer_buffer_length);
+
+			c = musbfsh->dma_controller;
+			if (usb_pipeisoc(pipe)) {
+				int d_status = 0;
+				struct usb_iso_packet_descriptor *d;
+
+				d = urb->iso_frame_desc + qh->iso_idx;
+
+				if (iso_err) {
+					d_status = -EILSEQ;
+					urb->error_count++;
+				}
+				if (rx_count > d->length) {
+					if (d_status == 0) {
+						d_status = -EOVERFLOW;
+						urb->error_count++;
+					}
+					INFO("** OVERFLOW %d into %d\n", rx_count, d->length);
+
+					length = d->length;
+				} else
+					length = rx_count;
+				d->status = d_status;
+				buf = urb->transfer_dma + d->offset;
+			} else {
+				length = rx_count;
+				buf = urb->transfer_dma + urb->actual_length;
+			}
+			dma->desired_mode = 0;
+#ifdef USE_MODE1
+			/* because of the issue below, mode 1 will
+			 * only rarely behave with correct semantics.
+			 */
+			if ((urb->transfer_flags & URB_SHORT_NOT_OK) &&
+			    (urb->transfer_buffer_length - urb->actual_length) >
+			    qh->maxpacket)
+				dma->desired_mode = 1;
+			if (rx_count < hw_ep->max_packet_sz_rx) {
+				length = rx_count;
+				dma->desired_mode = 0;
+			} else {
+				length = urb->transfer_buffer_length;
+			}
+#endif
+
+			/*
+			 * Disadvantage of using mode 1:
+			 *      It's basically usable only for mass storage
+			 *	class; essentially all other protocols also
+			 *	terminate transfers on short packets.
+			 *
+			 * Details:
+			 *      An extra IN token is sent at the end of the
+			 *	transfer (due to AUTOREQ). If you try to use
+			 *	mode 1 for (transfer_buffer_length - 512),
+			 *	and try to use the extra IN token to grab the
+			 *	last packet using mode 0, then the problem is
+			 *	that you cannot be sure when the device will
+			 *	send the last packet and RxPktRdy set.
+			 *	Sometimes the packet is recd too soon such that
+			 *	it gets lost when RxCSR is re-set at the end of
+			 *	the mode 1 transfer, while sometimes it is recd
+			 *	just a little late so that if you try to
+			 *	configure for mode 0 soon after the mode 1
+			 *	transfer is completed, you will find rxcount 0.
+			 *
+			 *	Okay, so you might think why not
+			 *      wait for an interrupt when the pkt is recd.
+			 *	Well, you won't get any!
+			 */
+
+			val = musbfsh_readw(epio, MUSBFSH_RXCSR);
+			val &= ~MUSBFSH_RXCSR_H_REQPKT;
+
+			if (dma->desired_mode == 0)
+				val &= ~MUSBFSH_RXCSR_H_AUTOREQ;
+			else
+				val |= MUSBFSH_RXCSR_H_AUTOREQ;
+
+			val |= MUSBFSH_RXCSR_DMAENAB;
+
+			musbfsh_writew(epio, MUSBFSH_RXCSR,
+				       MUSBFSH_RXCSR_H_WZC_BITS | val);
+
+			/* REVISIT if when actual_length != 0,
+			 * transfer_buffer_length needs to be
+			 * adjusted first...
+			 */
+			/*
+			 * dma is a dma channel, which is already allocated for
+			 * the Rx EP in the func:musbfsh_ep_program
+			 */
+			ret = c->channel_program(dma, qh->maxpacket,
+						 dma->desired_mode, buf,
+						 length);
+
+			if (!ret) {
+				c->channel_release(dma);
+				hw_ep->rx_channel = NULL;
+				dma = NULL;
+				/* REVISIT reset CSR */
+			}
+		}
+
+		if (!dma) {
+			/* Unmap the buffer so that CPU can use it */
+			usb_hcd_unmap_urb_for_dma(musbfsh_to_hcd(musbfsh), urb);
+			done = musbfsh_host_packet_rx(musbfsh, urb, epnum, iso_err);
+			INFO("read %spacket\n", done ? "last " : "");
+		}
+	}
+
+finish:
+	urb->actual_length += xfer_len;
+	qh->offset += xfer_len;
+	if (done) {
+		if (urb->status == -EINPROGRESS)
+			urb->status = status;
+		musbfsh_advance_schedule(musbfsh, urb, hw_ep, USB_DIR_IN);
+	}
+}
+
+/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
+ * the software schedule associates multiple such nodes with a given
+ * host side hardware endpoint + direction; scheduling may activate
+ * that hardware endpoint.
+ */
+static int musbfsh_schedule(struct musbfsh *musbfsh, struct musbfsh_qh *qh,
+			    int is_in)
+{
+	int idle;
+	int epnum, hw_end = 0;
+	struct musbfsh_hw_ep *hw_ep = NULL;
+	struct list_head *head = NULL;
+
+	INFO("%s++, qh->epnum=%d, is_in=%d\r\n",
+	     __func__, qh->epnum, (unsigned int)is_in);
+	/* use fixed hardware for control and bulk */
+	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
+		head = &musbfsh->control;
+		hw_ep = musbfsh->control_ep;
+		goto success;
+	}
+
+#ifdef MUSBFSH_QMU_LIMIT_SUPPORT
+	if (mtk11_isoc_ep_gpd_count
+		&& qh->is_use_qmu) {
+		for (epnum = 1, hw_ep = musbfsh->endpoints + 1;
+				epnum <= MAX_QMU_EP; epnum++, hw_ep++) {
+			/* int	diff; */
+
+			if (musbfsh_ep_get_qh(hw_ep, is_in) != NULL)
+				continue;
+
+			hw_end = epnum;
+			hw_ep = musbfsh->endpoints + hw_end;	/* got the right ep */
+			break;
+		}
+
+		if (hw_end) {
+			idle = 1;
+			qh->mux = 0;
+			goto success;
+		}
+	}
+	qh->is_use_qmu = 0;
+	for (epnum = (MAX_QMU_EP + 1), hw_ep = musbfsh->endpoints + (MAX_QMU_EP + 1);
+		epnum < musbfsh->nr_endpoints; epnum++, hw_ep++) {
+		if (musbfsh_ep_get_qh(hw_ep, is_in) != NULL)
+			continue;
+
+		hw_end = epnum;
+		hw_ep = musbfsh->endpoints + hw_end;	/* got the right ep */
+		break;
+	}
+
+	if (hw_end) {
+		idle = 1;
+		qh->mux = 0;
+		goto success;
+	}
+
+	if (!hw_end) {
+		for (epnum = 1, hw_ep = musbfsh->endpoints + 1;
+				epnum <= MAX_QMU_EP; epnum++, hw_ep++) {
+			/* int	diff; */
+
+			if (musbfsh_ep_get_qh(hw_ep, is_in) != NULL)
+				continue;
+
+			hw_end = epnum;
+			hw_ep = musbfsh->endpoints + hw_end;	/* got the right ep */
+			break;
+		}
+	}
+
+	if (hw_end) {
+		idle = 1;
+		qh->mux = 0;
+		goto success;
+	} else {
+		WARNING("EP OVERFLOW.\n");
+		return -ENOSPC;
+	}
+#endif
+
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+	if (mtk11_isoc_ep_gpd_count
+		&& qh->type == USB_ENDPOINT_XFER_ISOC) {
+		for (epnum = mtk11_isoc_ep_start_idx, hw_ep = musbfsh->endpoints + mtk11_isoc_ep_start_idx;
+				epnum < musbfsh->nr_endpoints; epnum++, hw_ep++) {
+			/* int	diff; */
+
+			if (musbfsh_ep_get_qh(hw_ep, is_in) != NULL)
+				continue;
+
+			hw_end = epnum;
+			hw_ep = musbfsh->endpoints + hw_end;	/* got the right ep */
+			break;
+		}
+
+		if (hw_end) {
+			idle = 1;
+			qh->mux = 0;
+			goto success;
+		}
+	}
+#endif
+
+	/* else, periodic transfers get muxed to other endpoints */
+
+	/*
+	 * We know this qh hasn't been scheduled, so all we need to do
+	 * is choose which hardware endpoint to put it on ...
+	 *
+	 * REVISIT what we really want here is a regular schedule tree
+	 * like e.g. OHCI uses.
+	 */
+	for (epnum = 1, hw_ep = musbfsh->endpoints + 1;
+	     epnum < musbfsh->nr_endpoints; epnum++, hw_ep++) {
+
+		if (musbfsh_ep_get_qh(hw_ep, is_in) != NULL)
+			continue;
+
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+		if (mtk11_isoc_ep_gpd_count && (epnum >= mtk11_isoc_ep_start_idx)) {
+			epnum = musbfsh->nr_endpoints;
+			continue;
+		}
+#endif
+
+		hw_end = epnum;
+		hw_ep = musbfsh->endpoints + hw_end;	/* got the right ep */
+		break;
+	}
+
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+	/* grab isoc ep if no other ep is available */
+	if (mtk11_isoc_ep_gpd_count &&
+		!hw_end &&
+		qh->type != USB_ENDPOINT_XFER_ISOC) {
+		for (epnum = mtk11_isoc_ep_start_idx, hw_ep = musbfsh->endpoints + mtk11_isoc_ep_start_idx;
+				epnum < musbfsh->nr_endpoints; epnum++, hw_ep++) {
+			/* int	diff; */
+
+			if (musbfsh_ep_get_qh(hw_ep, is_in) != NULL)
+				continue;
+
+			hw_end = epnum;
+			hw_ep = musbfsh->endpoints + hw_end;	/* got the right ep */
+			break;
+		}
+	}
+#endif
+	if (!hw_end)
+		return -ENOSPC;
+
+	idle = 1;
+	qh->mux = 0;
+success:
+	if (head) {
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+		MYDBG("head!=NULL\n");
+#endif
+		idle = list_empty(head);
+		list_add_tail(&qh->ring, head);
+		qh->mux = 1;
+	}
+	qh->hw_ep = hw_ep;
+	qh->hep->hcpriv = qh;
+
+	if (musbfsh_host_dynamic_fifo && qh->type != USB_ENDPOINT_XFER_CONTROL) {
+		int ret;
+
+		/* take this after qh->hw_ep is set */
+		ret = musbfsh_host_alloc_ep_fifo(musbfsh, qh, is_in);
+		if (ret) {
+			qh->hw_ep = NULL;
+			qh->hep->hcpriv = NULL;
+			WARNING("NOT ENOUGH FIFO\n");
+			return -ENOSPC;
+		}
+	}
+	hw_ep->type = qh->type;
+	/* the new urb added is the first urb now, excute it! */
+	if (idle) {
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+		mark_qh_activity(qh->epnum, hw_ep->epnum, is_in, 0);
+#endif
+
+/* downgrade to non-qmu if no specific ep grabbed whenmtk11_ isoc_ep_gpd_count is set*/
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+#ifdef MUSBFSH_QMU_LIMIT_SUPPORT
+		if (mtk11_isoc_ep_gpd_count &&
+			qh->is_use_qmu &&
+			hw_end <= MAX_QMU_EP)
+			qh->is_use_qmu = 1;
+#else
+		if (mtk11_isoc_ep_gpd_count &&
+			qh->type == USB_ENDPOINT_XFER_ISOC &&
+			hw_end < mtk11_isoc_ep_start_idx)
+			qh->is_use_qmu = 0;
+#endif
+
+		if (qh->is_use_qmu) {
+			musbfsh_ep_set_qh(hw_ep, is_in, qh);
+			mtk11_kick_CmdQ(musbfsh, is_in ? 1:0, qh, next_urb(qh));
+		} else
+			musbfsh_start_urb(musbfsh, is_in, qh);
+#else
+		musbfsh_start_urb(musbfsh, is_in, qh);
+#endif
+
+	}
+	return 0;
+}
+
+static int musbfsh_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+			       gfp_t mem_flags)
+{
+	unsigned long flags;
+	struct musbfsh *musbfsh = hcd_to_musbfsh(hcd);
+	struct usb_host_endpoint *hep = urb->ep;
+	struct musbfsh_qh *qh;
+	struct usb_endpoint_descriptor *epd = &hep->desc;
+	int ret;
+	unsigned type_reg;
+	unsigned interval;
+
+	INFO("musbfsh_urb_enqueue++:urb addr=0x%p\r\n", urb);
+
+	/*
+	 * MYDBG("urb:%x, blen:%d, alen:%d, ep:%x\n",
+	 * urb, urb->transfer_buffer_length, urb->actual_length,
+	 * epd->bEndpointAddress);
+	 */
+	/*
+	 * workaround for DMA issue,
+	 * to make usb core jump over unmap_urb_for_dma
+	 * in usb_hcd_giveback_urb for control message
+	 */
+	if (usb_endpoint_num(epd) == 0)
+		urb->transfer_flags &= ~URB_DMA_MAP_SINGLE;
+	spin_lock_irqsave(&musbfsh->lock, flags);
+
+	/* add the urb to the ep, return 0 for no error. */
+	ret = usb_hcd_link_urb_to_ep(hcd, urb);
+	qh = ret ? NULL : hep->hcpriv;
+	if (qh)
+		urb->hcpriv = qh;
+
+#ifdef CONFIG_MTK_MUSBFSH_BIND_DEV_EP
+	if (qh && (ret == 0) && qh->resubmit) {
+		qh->resubmit = 0;
+
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+		if (mtk11_host_qmu_concurrent && qh && qh->is_use_qmu && (ret == 0)) {
+			mtk11_kick_CmdQ(musbfsh, (epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1:0, qh, urb);
+			spin_unlock_irqrestore(&musbfsh->lock, flags);
+			return ret;
+		}
+#endif
+		musbfsh_start_urb(musbfsh, (epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1:0, qh);
+		spin_unlock_irqrestore(&musbfsh->lock, flags);
+		return ret;
+	}
+#endif
+
+	/* DMA mapping was already done, if needed, and this urb is on
+	 * hep->urb_list now ... so we're done, unless hep wasn't yet
+	 * scheduled onto a live qh.
+	 *
+	 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
+	 * disabled, testing for empty qh->ring and avoiding qh setup costs
+	 * except for the first urb queued after a config change.
+	 */
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+	if (mtk11_host_qmu_concurrent && qh && qh->is_use_qmu && (ret == 0)) {
+		mtk11_kick_CmdQ(musbfsh, (epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1:0, qh, urb);
+		spin_unlock_irqrestore(&musbfsh->lock, flags);
+		return ret;
+	}
+#endif
+	if (qh || ret) {
+		spin_unlock_irqrestore(&musbfsh->lock, flags);
+		return ret;
+	}
+
+	/* Allocate and initialize qh, minimizing the work done each time
+	 * hw_ep gets reprogrammed, or with irqs blocked.  Then schedule it.
+	 *
+	 * REVISIT consider a dedicated qh kmem_cache, so it's harder
+	 * for bugs in other kernel code to break this driver...
+	 */
+	qh = kzalloc(sizeof(*qh), GFP_ATOMIC);
+	if (!qh) {
+		usb_hcd_unlink_urb_from_ep(hcd, urb);
+		spin_unlock_irqrestore(&musbfsh->lock, flags);
+		return -ENOMEM;
+	}
+
+	qh->hep = hep;
+	qh->dev = urb->dev;
+	INIT_LIST_HEAD(&qh->ring);
+	qh->is_ready = 1;
+
+	qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
+	qh->type = usb_endpoint_type(epd);
+	INFO("desc type=%d\r\n", qh->type);
+	/* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
+	 * Some musb cores don't support high bandwidth ISO transfers; and
+	 * we don't (yet!) support high bandwidth interrupt transfers.
+	 */
+	qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
+	if (qh->hb_mult > 1) {
+		int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
+
+		if (ok)
+			ok = (usb_pipein(urb->pipe) && musbfsh->hb_iso_rx)
+				|| (usb_pipeout(urb->pipe) && musbfsh->hb_iso_tx);
+
+		if (!ok) {
+			ret = -EMSGSIZE;
+			goto done;
+		}
+		qh->maxpacket &= 0x7ff;
+	}
+	qh->epnum = usb_endpoint_num(epd);
+	INFO("desc epnum=%d\r\n", qh->epnum);
+
+	/* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
+	qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
+	INFO("desc pipe=0x%x, desc devnum=%d\r\n", urb->pipe, urb->dev->devnum);
+
+	/* precompute rxtype/txtype/type0 register */
+	type_reg = (qh->type << 4) | qh->epnum;
+	switch (urb->dev->speed) {
+	case USB_SPEED_LOW:
+		type_reg |= 0xc0;
+		break;
+	case USB_SPEED_FULL:
+		type_reg |= 0x80;
+		break;
+	default:
+		type_reg |= 0x40;
+	}
+	qh->type_reg = type_reg;
+
+	/* Precompute RXINTERVAL/TXINTERVAL register */
+	switch (qh->type) {
+	case USB_ENDPOINT_XFER_INT:
+		/*
+		 * Full/low speeds use the  linear encoding,
+		 * high speed uses the logarithmic encoding.
+		 */
+		if (urb->dev->speed <= USB_SPEED_FULL) {
+			interval = max_t(u8, epd->bInterval, 1);
+			break;
+		}
+	case USB_ENDPOINT_XFER_ISOC:
+		/* ISO always uses logarithmic encoding */
+		interval = min_t(u8, epd->bInterval, 16);
+		break;
+	default:
+		/* REVISIT we actually want to use NAK limits, hinting to the
+		 * transfer scheduling logic to try some other qh, e.g. try
+		 * for 2 msec first:
+		 *
+		 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
+		 *
+		 * The downside of disabling this is that transfer scheduling
+		 * gets VERY unfair for nonperiodic transfers; a misbehaving
+		 * peripheral could make that hurt.  That's perfectly normal
+		 * for reads from network or serial adapters ... so we have
+		 * partial NAKlimit support for bulk RX.
+		 *
+		 * The upside of disabling it is simpler transfer scheduling.
+		 */
+		interval = 0;
+	}
+	qh->intv_reg = interval;
+
+	/* precompute addressing for external hub/tt ports */
+	if (musbfsh->is_multipoint) {
+		struct usb_device *parent = urb->dev->parent;
+
+		if (parent != hcd->self.root_hub) {
+			qh->h_addr_reg = (u8) parent->devnum;
+
+			/* set up tt info if needed */
+			if (urb->dev->tt) {
+				qh->h_port_reg = (u8) urb->dev->ttport;
+				if (urb->dev->tt->hub)
+					qh->h_addr_reg =
+						(u8)urb->dev->tt->hub->devnum;
+				if (urb->dev->tt->multi)
+					qh->h_addr_reg |= 0x80;
+			}
+		}
+		INFO("addr_reg=0x%x,h_addr_reg=0x%x,h_port_reg=0x%x",
+			qh->addr_reg, qh->h_addr_reg, qh->h_port_reg);
+	}
+
+	/* invariant: hep->hcpriv is null OR the qh that's already scheduled.
+	 * until we get real dma queues (with an entry for each urb/buffer),
+	 * we only have work to do in the former case.
+	 */
+	if (hep->hcpriv) {
+		/* some concurrent activity submitted another urb to hep...
+		 * odd, rare, error prone, but legal.
+		 */
+		kfree(qh);
+		qh = NULL;
+		ret = 0;
+	} else {
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+		if ((!usb_pipecontrol(urb->pipe)) && ((usb_pipetype(urb->pipe) + 1) & mtk11_host_qmu_pipe_msk))
+			qh->is_use_qmu = 1;
+#endif
+		ret = musbfsh_schedule(musbfsh, qh,
+			(epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK));
+		/*
+		 * MYDBG("after musbfsh_schedule,
+		 * urb:%x, ret:%d, ep:%x\n", urb, ret,
+		 * epd->bEndpointAddress);
+		 */
+	}
+
+	if (ret == 0) {
+		urb->hcpriv = qh;
+		/*
+		 * FIXME: set urb->start_frame for iso/intr, it's tested in
+		 * musbfsh_start_urb(), but otherwise only konicawc cares ...
+		 */
+	}
+done:
+	spin_unlock_irqrestore(&musbfsh->lock, flags);
+	if (ret != 0) {
+		spin_lock_irqsave(&musbfsh->lock, flags);
+		usb_hcd_unlink_urb_from_ep(hcd, urb);
+		spin_unlock_irqrestore(&musbfsh->lock, flags);
+		kfree(qh);
+	}
+	return ret;
+}
+
+/*
+ * abort a transfer that's at the head of a hardware queue.
+ * called with controller locked, irqs blocked
+ * that hardware queue advances to the next transfer, unless prevented
+ */
+static int musbfsh_cleanup_urb(struct urb *urb, struct musbfsh_qh *qh)
+{
+	struct musbfsh_hw_ep *ep = qh->hw_ep;
+	void __iomem *epio = ep->regs;
+	unsigned hw_end = ep->epnum;
+	void __iomem *regs = ep->musbfsh->mregs;
+	int is_in = usb_pipein(urb->pipe);
+	int stat = 0;
+	u16 csr;
+
+	INFO("%s++\r\n", __func__);
+	musbfsh_ep_select(regs, hw_end);
+
+	if (is_dma_capable()) {
+		struct dma_channel *dma;
+
+		dma = is_in ? ep->rx_channel : ep->tx_channel;
+		if (dma) {
+			stat = ep->musbfsh->dma_controller->channel_abort(dma);
+			WARNING("abort %cX%d DMA for urb %p --> %d\n",
+				is_in ? 'R' : 'T', ep->epnum, urb, stat);
+			urb->actual_length += dma->actual_len;
+		}
+	}
+
+	/* turn off DMA requests, discard state, stop polling ... */
+	if (is_in) {
+		/* giveback saves bulk toggle */
+		csr = musbfsh_h_flush_rxfifo(ep, 0);
+
+		/* REVISIT we still get an irq; should likely clear the
+		 * endpoint's irq stat here to avoid bogus irqs.
+		 * clearing that stat is platform-specific...
+		 */
+	} else if (ep->epnum) {
+		musbfsh_h_tx_flush_fifo(ep);
+		csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+		csr &= ~(MUSBFSH_TXCSR_AUTOSET
+			 | MUSBFSH_TXCSR_DMAENAB
+			 | MUSBFSH_TXCSR_H_RXSTALL
+			 | MUSBFSH_TXCSR_H_NAKTIMEOUT
+			 | MUSBFSH_TXCSR_H_ERROR | MUSBFSH_TXCSR_TXPKTRDY);
+		musbfsh_writew(epio, MUSBFSH_TXCSR, csr);
+		/* REVISIT may need to clear FLUSHFIFO ... */
+		musbfsh_writew(epio, MUSBFSH_TXCSR, csr);
+		/* flush cpu writebuffer */
+		csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+	} else {
+		musbfsh_h_ep0_flush_fifo(ep);
+	}
+	if (stat == 0)
+		musbfsh_advance_schedule(ep->musbfsh, urb, ep, is_in);
+	return stat;
+}
+
+static int musbfsh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	struct musbfsh *musbfsh = hcd_to_musbfsh(hcd);
+	struct musbfsh_qh *qh;
+	unsigned long flags;
+	int is_in = usb_pipein(urb->pipe);
+	int ret;
+
+	INFO("urb=%p, dev%d ep%d%s\n", urb,
+		usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe),
+		is_in ? "in" : "out");
+
+	spin_lock_irqsave(&musbfsh->lock, flags);
+	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+	if (ret)
+		goto done;
+
+	qh = urb->hcpriv;
+	if (!qh)
+		goto done;
+
+	/*
+	 * Any URB not actively programmed into endpoint hardware can be
+	 * immediately given back; that's any URB not at the head of an
+	 * endpoint queue, unless someday we get real DMA queues.  And even
+	 * if it's at the head, it might not be known to the hardware...
+	 *
+	 * Otherwise abort current transfer, pending DMA, etc.; urb->status
+	 * has already been updated.  This is a synchronous abort; it'd be
+	 * OK to hold off until after some IRQ, though.
+	 *
+	 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
+	 */
+	if (!qh->is_ready
+	    || urb->urb_list.prev != &qh->hep->urb_list
+	    || musbfsh_ep_get_qh(qh->hw_ep, is_in) != qh) {
+		int ready = qh->is_ready;
+
+		qh->is_ready = 0;
+		musbfsh_giveback(musbfsh, urb, 0);
+		qh->is_ready = ready;
+
+		/* If nothing else (usually musbfsh_giveback) is using it
+		 * and its URB list has emptied, recycle this qh.
+		 */
+		if (ready && list_empty(&qh->hep->urb_list)) {
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+			if (qh->is_use_qmu)
+				mtk11_disable_q(musbfsh, qh->hw_ep->epnum, is_in);
+#endif
+			qh->hep->hcpriv = NULL;
+			list_del(&qh->ring);
+			if (musbfsh_host_dynamic_fifo && qh->type != USB_ENDPOINT_XFER_CONTROL)
+				musbfsh_host_free_ep_fifo(musbfsh, qh, is_in);
+			kfree(qh);
+		}
+	} else
+		ret = musbfsh_cleanup_urb(urb, qh);
+done:
+	spin_unlock_irqrestore(&musbfsh->lock, flags);
+	return ret;
+}
+
+/* disable an endpoint */
+static void musbfsh_h_disable(struct usb_hcd *hcd,
+			      struct usb_host_endpoint *hep)
+{
+	u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
+	unsigned long flags;
+	struct musbfsh *musbfsh = hcd_to_musbfsh(hcd);
+	struct musbfsh_qh *qh;
+	struct urb *urb;
+
+	WARNING("%s++: ep: 0x%x\r\n",
+		__func__, hep->desc.bEndpointAddress);
+	spin_lock_irqsave(&musbfsh->lock, flags);
+
+	qh = hep->hcpriv;
+	if (qh == NULL) {
+		MYDBG("qh == NULL\n");
+		goto exit;
+	}
+
+	/* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
+
+	/* Kick the first URB off the hardware, if needed */
+	qh->is_ready = 0;
+	if (musbfsh_ep_get_qh(qh->hw_ep, is_in) == qh) {
+		urb = next_urb(qh);
+
+		/*
+		 * work around from tablet,
+		 * avoid KE for qh->hep content 0x6b6b6b6b...
+		 * side effect will cause touch memory after free
+		 */
+
+		/*
+		 * enable this workaround for
+		 * irq->adv_schedule / musbfsh_h_disable
+		 * cocurrency issue
+		 */
+		if (!virt_addr_valid(urb)) {
+			MYDBG("urb(%p) addr error\n", urb);
+			goto exit;
+		}
+		/* make software (then hardware) stop ASAP */
+		if (!urb->unlinked)
+			urb->status = -ESHUTDOWN;
+
+		/* cleanup */
+		musbfsh_cleanup_urb(urb, qh);
+
+		/* Then nuke all the others ... and advance the
+		 * queue on hw_ep (e.g. bulk ring) when we're done.
+		 */
+		while (!list_empty(&hep->urb_list)) {
+			urb = next_urb(qh);
+			urb->status = -ESHUTDOWN;
+			musbfsh_advance_schedule(musbfsh, urb, qh->hw_ep,
+						 is_in);
+		}
+	} else {
+		/* Just empty the queue; the hardware is busy with
+		 * other transfers, and since !qh->is_ready nothing
+		 * will activate any of these as it advances.
+		 */
+		while (!list_empty(&hep->urb_list))
+			musbfsh_giveback(musbfsh, next_urb(qh), -ESHUTDOWN);
+
+#ifndef CONFIG_MTK_MUSBFSH_BIND_DEV_EP
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+		if (qh->is_use_qmu)
+			mtk11_disable_q(musbfsh, qh->hw_ep->epnum, is_in);
+#endif
+		hep->hcpriv = NULL;
+		list_del(&qh->ring);
+
+		if (musbfsh_host_dynamic_fifo && qh->type != USB_ENDPOINT_XFER_CONTROL)
+			musbfsh_host_free_ep_fifo(musbfsh, qh, is_in);
+		kfree(qh);
+#endif
+	}
+exit:
+#ifdef CONFIG_MTK_MUSBFSH_BIND_DEV_EP
+	if (hep->hcpriv) {
+		struct dma_controller *dma = musbfsh->dma_controller;
+		struct musbfsh_hw_ep *hw_ep;
+
+		qh = hep->hcpriv;
+		hw_ep = qh->hw_ep;
+
+		if (is_in) {
+			hw_ep->rx_reinit = 1;
+			if (hw_ep->rx_channel) {
+				dma->channel_release(hw_ep->rx_channel);
+				hw_ep->rx_channel = NULL;
+			}
+		} else {
+			hw_ep->tx_reinit = 1;
+			if (hw_ep->tx_channel) {
+				dma->channel_release(hw_ep->tx_channel);
+				hw_ep->tx_channel = NULL;
+			}
+		}
+
+		musbfsh_ep_set_qh(qh->hw_ep, is_in, NULL);
+		hep->hcpriv = NULL;
+
+		if (musbfsh_host_dynamic_fifo && qh->type != USB_ENDPOINT_XFER_CONTROL)
+			musbfsh_host_free_ep_fifo(musbfsh, qh, is_in);
+
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+		if (qh->is_use_qmu)
+			mtk11_disable_q(musbfsh, qh->hw_ep->epnum, is_in);
+#endif
+
+		if (qh->mux == 1) {
+			struct list_head *head;
+
+			head = qh->ring.prev;
+			list_del(&qh->ring);
+			kfree(qh);
+			qh = first_qh(head);
+		} else {
+			kfree(qh);
+			qh = NULL;
+		}
+	}
+#endif
+	spin_unlock_irqrestore(&musbfsh->lock, flags);
+}
+
+static int musbfsh_h_get_frame_number(struct usb_hcd *hcd)
+{
+	struct musbfsh *musbfsh = hcd_to_musbfsh(hcd);
+
+	return musbfsh_readw(musbfsh->mregs, MUSBFSH_FRAME);
+}
+
+static int musbfsh_h_start(struct usb_hcd *hcd)
+{
+	struct musbfsh *musbfsh = hcd_to_musbfsh(hcd);
+
+	INFO("musbfsh_h_start++\r\n");
+	/* NOTE: musbfsh_start() is called when the hub driver turns
+	 * on port power, or when (OTG) peripheral starts.
+	 */
+	hcd->state = HC_STATE_RUNNING;
+	musbfsh->port1_status = 0;
+	return 0;
+}
+
+static void musbfsh_h_stop(struct usb_hcd *hcd)
+{
+	INFO("musbfsh_h_stop++\r\n");
+	musbfsh_stop(hcd_to_musbfsh(hcd));
+	hcd->state = HC_STATE_HALT;
+}
+
+/* only send suspend signal to bus */
+static int musbfsh_bus_suspend(struct usb_hcd *hcd)
+{
+	struct musbfsh *musbfsh = hcd_to_musbfsh(hcd);
+	unsigned char power = musbfsh_readb(musbfsh->mregs, MUSBFSH_POWER);
+
+	WARNING("musbfsh_bus_suspend++,power=0x%x\r\n", power);
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+#if defined(CONFIG_PM_RUNTIME)
+	usb11_plat_suspend();
+#endif
+#endif
+
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+#if defined(CONFIG_PM_RUNTIME) && defined(USB11_REMOTE_IRQ_NON_AUTO_MASK)
+	enable_remote_wake_up();
+#endif
+#endif
+
+#ifdef MTK_USB_RUNTIME_SUPPORT
+	/*
+	 * Edge triggered EINT interrupt will be hold after masked (only one),
+	 * and reported after unmasked.
+	 */
+	mt_eint_unmask(CUST_EINT_MT6280_USB_WAKEUP_NUM);
+#endif
+	/*
+	 * wx, let child port do the job;
+	 * joson,runtime suspend not ready now,i
+	 * set suspend signal here
+	 */
+	return 0;
+}
+
+/* only send resume signal to bus */
+static int musbfsh_bus_resume(struct usb_hcd *hcd)
+{
+	/* resuming child port does the work */
+	struct musbfsh *musbfsh = hcd_to_musbfsh(hcd);
+	unsigned char power;
+
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+#if defined(CONFIG_PM_RUNTIME)
+	usb11_plat_resume();
+	return 0;
+#endif
+#endif
+
+#ifdef MTK_USB_RUNTIME_SUPPORT
+	mt_eint_mask(CUST_EINT_MT6280_USB_WAKEUP_NUM);
+#endif
+	power = musbfsh_readb(musbfsh->mregs, MUSBFSH_POWER);
+	WARNING("musbfsh_bus_resume++,power=0x%x\r\n", power);
+
+	/*
+	 * wx, let child port do the job;
+	 * joson,runtime suspend not ready now,
+	 * set resume signal here
+	 */
+	return 0;
+}
+
+struct hc_driver musbfsh_hc_driver = {
+	.description = "musbfsh-hcd",
+	.product_desc = "MUSBFSH HDRC host driver",
+	.hcd_priv_size = sizeof(struct musbfsh),
+	.flags = HCD_USB2 | HCD_MEMORY,
+
+	/*
+	 * not using irq handler or reset hooks from usbcore, since
+	 * those must be shared with peripheral code for OTG configs
+	 */
+
+	.start = musbfsh_h_start,
+	.stop = musbfsh_h_stop,
+
+	.get_frame_number = musbfsh_h_get_frame_number,
+
+	.urb_enqueue = musbfsh_urb_enqueue,
+	.urb_dequeue = musbfsh_urb_dequeue,
+	.endpoint_disable = musbfsh_h_disable,
+
+	.hub_status_data = musbfsh_hub_status_data,
+	.hub_control = musbfsh_hub_control,
+	.bus_suspend = musbfsh_bus_suspend,
+	.bus_resume = musbfsh_bus_resume,
+	/* .start_port_reset    = NULL, */
+	/* .hub_irq_enable      = NULL, */
+};
diff --git a/drivers/misc/mediatek/usb11/musbfsh_host.h b/drivers/misc/mediatek/usb11/musbfsh_host.h
new file mode 100644
index 0000000..70b4ab3
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/musbfsh_host.h
@@ -0,0 +1,155 @@
+/*
+ * MUSB OTG driver host defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * Copyright 2015 Mediatek Inc.
+ *	Marvin Lin <marvin.lin@mediatek.com>
+ *	Arvin Wang <arvin.wang@mediatek.com>
+ *	Vincent Fan <vincent.fan@mediatek.com>
+ *	Bryant Lu <bryant.lu@mediatek.com>
+ *	Yu-Chang Wang <yu-chang.wang@mediatek.com>
+ *	Macpaul Lin <macpaul.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _MUSBFSH_HOST_H
+#define _MUSBFSH_HOST_H
+
+#include <linux/scatterlist.h>
+
+static inline struct usb_hcd *musbfsh_to_hcd(struct musbfsh *musb)
+{
+	return container_of((void *)musb, struct usb_hcd, hcd_priv);
+}
+
+static inline struct musbfsh *hcd_to_musbfsh(struct usb_hcd *hcd)
+{
+	return (struct musbfsh *)(hcd->hcd_priv);
+}
+
+/* stored in "usb_host_endpoint.hcpriv" for scheduled endpoints */
+struct musbfsh_qh {
+	struct usb_host_endpoint *hep;	/* usbcore info */
+	struct usb_device *dev;
+	struct musbfsh_hw_ep *hw_ep;	/* current binding */
+
+	struct list_head ring;	/* of musbfsh_qh */
+	/* struct musbfsh_qh            *next; *//* for periodic tree */
+	u8 mux;			/* qh multiplexed to hw_ep */
+
+	unsigned offset;	/* in urb->transfer_buffer */
+	unsigned segsize;	/* current xfer fragment */
+
+	u8 type_reg;		/* {rx,tx} type register */
+	u8 intv_reg;		/* {rx,tx} interval register */
+	u8 addr_reg;		/* device address register */
+	u8 h_addr_reg;		/* hub address register */
+	u8 h_port_reg;		/* hub port register */
+
+	u8 is_ready;		/* safe to modify hw_ep */
+	u8 type;		/* XFERTYPE_* */
+	u8 epnum;
+	u8 hb_mult;		/* high bandwidth pkts per uf */
+	u16 maxpacket;
+	u16 frame;		/* for periodic schedule */
+	unsigned iso_idx;	/* in urb->iso_frame_desc[] */
+	unsigned resubmit;
+	struct sg_mapping_iter sg_miter;	/* for highmem in PIO mode */
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+	u8 is_use_qmu;
+#endif
+};
+
+/* map from control or bulk queue head to the first qh on that ring */
+static inline struct musbfsh_qh *first_qh(struct list_head *q)
+{
+	if (list_empty(q))
+		return NULL;
+	return list_entry(q->next, struct musbfsh_qh, ring);
+}
+
+
+extern void musbfsh_root_disconnect(struct musbfsh *musb);
+
+struct usb_hcd;
+
+extern int musbfsh_hub_status_data(struct usb_hcd *hcd, char *buf);
+extern int musbfsh_hub_control(struct usb_hcd *hcd,
+			       u16 typeReq, u16 wValue, u16 wIndex, char *buf,
+			       u16 wLength);
+
+extern struct hc_driver musbfsh_hc_driver;
+
+static inline struct urb *next_urb(struct musbfsh_qh *qh)
+{
+	struct list_head *queue;
+
+	if (!qh)
+		return NULL;
+	queue = &qh->hep->urb_list;
+	if (list_empty(queue))
+		return NULL;
+	return list_entry(queue->next, struct urb, urb_list);
+}
+
+/* file include host.h for get icusb related struct */
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+#include "musbfsh_icusb.h"
+#endif
+
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+extern int musbfsh_connect_flag;
+
+/* This should be in platform musbfsh_mt65xx.h */
+#endif
+
+#ifdef MTK_USB_RUNTIME_SUPPORT
+#include <cust_eint.h>
+extern void mt_eint_unmask(unsigned int line);
+extern void mt_eint_mask(unsigned int line);
+extern void request_wakeup_md_timeout(unsigned int dev_id,
+				      unsigned int dev_sub_id);
+#endif
+
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+extern void request_wakeup_md_timeout(unsigned int dev_id,
+	unsigned int dev_sub_id);
+extern int musbfsh_skip_port_suspend;
+extern int musbfsh_skip_port_resume;
+#endif
+
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+extern void musbfsh_ep_set_qh(struct musbfsh_hw_ep *ep, int isRx, struct musbfsh_qh *qh);
+extern struct musbfsh_qh *musbfsh_ep_get_qh(struct musbfsh_hw_ep *ep, int isRx);
+extern void musbfsh_advance_schedule(struct musbfsh *musb, struct urb *urb,
+				  struct musbfsh_hw_ep *hw_ep, int is_in);
+extern u16 musbfsh_h_flush_rxfifo(struct musbfsh_hw_ep *hw_ep, u16 csr);
+extern void musbfsh_h_tx_flush_fifo(struct musbfsh_hw_ep *ep);
+#endif
+#endif				/* _MUSBFSH_HOST_H */
diff --git a/drivers/misc/mediatek/usb11/musbfsh_hsdma.c b/drivers/misc/mediatek/usb11/musbfsh_hsdma.c
new file mode 100644
index 0000000..85e9f51
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/musbfsh_hsdma.c
@@ -0,0 +1,447 @@
+/*
+ * MUSB OTG driver - support for Mentor's DMA controller
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2007 by Texas Instruments
+ *
+ * Copyright 2015 Mediatek Inc.
+ *	Marvin Lin <marvin.lin@mediatek.com>
+ *	Arvin Wang <arvin.wang@mediatek.com>
+ *	Vincent Fan <vincent.fan@mediatek.com>
+ *	Bryant Lu <bryant.lu@mediatek.com>
+ *	Yu-Chang Wang <yu-chang.wang@mediatek.com>
+ *	Macpaul Lin <macpaul.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "musbfsh_core.h"
+#include "musbfsh_host.h"
+#include "musbfsh_dma.h"
+#include "musbfsh_hsdma.h"
+#include "usb.h"
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+#include "musbfsh_icusb.h"
+#endif
+
+static int dma_controller_start(struct dma_controller *c)
+{
+	INFO("++\n");
+	/* nothing to do */
+	return 0;
+}
+
+static void dma_channel_release(struct dma_channel *channel);
+
+static int dma_controller_stop(struct dma_controller *c)
+{
+	struct musbfsh_dma_controller *controller =
+		container_of(c, struct musbfsh_dma_controller, controller);
+
+	struct musbfsh *musbfsh = controller->private_data;
+	struct dma_channel *channel;
+	u8 bit;
+
+	INFO("++\n");
+	if (controller->used_channels != 0) {
+		dev_err(musbfsh->controller,
+			"Stopping DMA controller while channel active\n");
+
+		for (bit = 0; bit < MUSBFSH_HSDMA_CHANNELS; bit++) {
+			if (controller->used_channels & (1 << bit)) {
+				channel = &controller->channel[bit].channel;
+				dma_channel_release(channel);
+
+				if (!controller->used_channels)
+					break;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static struct dma_channel *dma_channel_allocate(struct dma_controller *c,
+						struct musbfsh_hw_ep *hw_ep,
+						u8 transmit)
+{
+	struct musbfsh_dma_controller *controller =
+		container_of(c, struct musbfsh_dma_controller, controller);
+	struct musbfsh_dma_channel *musbfsh_channel = NULL;
+	struct dma_channel *channel = NULL;
+	u8 bit, start_bit;
+
+	INFO("epnum=%d\n", hw_ep->epnum);
+
+/* reserve dma channel 0 for QMU */
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+	start_bit = 1;
+#else
+	start_bit = 0;
+#endif
+	for (bit = start_bit; bit < MUSBFSH_HSDMA_CHANNELS; bit++) {
+		if (!(controller->used_channels & (1 << bit))) {
+			controller->used_channels |= (1 << bit);
+			musbfsh_channel = &(controller->channel[bit]);
+			musbfsh_channel->controller = controller;
+			musbfsh_channel->idx = bit;
+			musbfsh_channel->epnum = hw_ep->epnum;
+			musbfsh_channel->transmit = transmit;
+			channel = &(musbfsh_channel->channel);
+			channel->private_data = musbfsh_channel;
+			channel->status = MUSBFSH_DMA_STATUS_FREE;
+			channel->max_len = 0x10000;
+			/* Tx => mode 1; Rx => mode 0 */
+			channel->desired_mode = transmit;
+			/* wz:set Tx and Rx to mode 0 */
+			/* channel->desired_mode = 0; */
+			channel->actual_len = 0;
+			break;
+		}
+	}
+	if (musbfsh_channel)
+		INFO("idx=%d\n", musbfsh_channel->idx);
+	return channel;
+}
+
+static void dma_channel_release(struct dma_channel *channel)
+{
+	struct musbfsh_dma_channel *musbfsh_channel = channel->private_data;
+
+	INFO("idx=%d\n", musbfsh_channel->idx);
+	channel->actual_len = 0;
+	musbfsh_channel->start_addr = 0;
+	musbfsh_channel->len = 0;
+
+	musbfsh_channel->controller->used_channels &=
+		~(1 << musbfsh_channel->idx);
+
+	channel->status = MUSBFSH_DMA_STATUS_UNKNOWN;
+}
+
+static void configure_channel(struct dma_channel *channel,
+			      u16 packet_sz, u8 mode, dma_addr_t dma_addr,
+			      u32 len)
+{
+	struct musbfsh_dma_channel *musbfsh_channel = channel->private_data;
+	struct musbfsh_dma_controller *controller = musbfsh_channel->controller;
+
+	/* struct musbfs *musb = controller->private_data; */
+	void __iomem *mbase = controller->base;
+	u8 bchannel = musbfsh_channel->idx;
+	u16 csr = 0;
+
+	INFO("idx=%d\n", musbfsh_channel->idx);
+	INFO("%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n",
+	     channel, packet_sz, (unsigned int)dma_addr, len, mode);
+
+	if (mode) {		/* mode 1,multi-packet */
+		csr |= 1 << MUSBFSH_HSDMA_MODE1_SHIFT;
+		if (len < packet_sz)
+			musbfsh_bug();
+	}
+	csr |= MUSBFSH_HSDMA_BURSTMODE_INCR16 << MUSBFSH_HSDMA_BURSTMODE_SHIFT;
+
+	csr |= (musbfsh_channel->epnum << MUSBFSH_HSDMA_ENDPOINT_SHIFT)
+	    | (1 << MUSBFSH_HSDMA_ENABLE_SHIFT)
+	    | (1 << MUSBFSH_HSDMA_IRQENABLE_SHIFT)
+	    | (musbfsh_channel->transmit ? (1 << MUSBFSH_HSDMA_TRANSMIT_SHIFT)
+	       : 0);
+
+	/* address/count */
+	musbfsh_write_hsdma_addr(mbase, bchannel, dma_addr);
+	musbfsh_write_hsdma_count(mbase, bchannel, len);
+
+	/* control (this should start things) */
+	musbfsh_writew(mbase, MUSBFSH_HSDMA_CHANNEL_OFFSET(bchannel,
+							 MUSBFSH_HSDMA_CONTROL),
+							 csr);
+}
+
+static int dma_channel_program(struct dma_channel *channel,
+			       u16 packet_sz, u8 mode, dma_addr_t dma_addr,
+			       u32 len)
+{
+	struct musbfsh_dma_channel *musbfsh_channel = channel->private_data;
+	/* struct musbfsh_dma_controller *controller =
+	 *	musbfsh_channel->controller;
+	 */
+	/* struct musfsh *musbfsh = controller->private_data; */
+
+	INFO("ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
+	     musbfsh_channel->epnum,
+	     musbfsh_channel->transmit ? "Tx" : "Rx", packet_sz,
+	     (unsigned int)dma_addr, len, mode);
+
+	if (channel->status == MUSBFSH_DMA_STATUS_UNKNOWN ||
+	       channel->status == MUSBFSH_DMA_STATUS_BUSY)
+		musbfsh_bug();
+
+	channel->actual_len = 0;
+	musbfsh_channel->start_addr = dma_addr;
+	musbfsh_channel->len = len;
+	musbfsh_channel->max_packet_sz = packet_sz;
+	channel->status = MUSBFSH_DMA_STATUS_BUSY;
+
+	configure_channel(channel, packet_sz, mode, dma_addr, len);
+
+	return true;
+}
+
+static int dma_channel_abort(struct dma_channel *channel)
+{
+	struct musbfsh_dma_channel *musbfsh_channel = channel->private_data;
+	void __iomem *mbase = musbfsh_channel->controller->base;
+
+	u8 bchannel = musbfsh_channel->idx;
+	int offset;
+	u16 csr;
+
+	INFO("%s, idx=%d\r\n", __func__, musbfsh_channel->idx);
+
+	if (channel->status == MUSBFSH_DMA_STATUS_BUSY) {
+		if (musbfsh_channel->transmit) {
+			offset = MUSBFSH_EP_OFFSET(musbfsh_channel->epnum,
+						   MUSBFSH_TXCSR);
+
+			/*
+			 * The programming guide says that we must clear
+			 * the DMAENA bit before the DMAMODE bit...
+			 */
+			csr = musbfsh_readw(mbase, offset);
+			csr &= ~(MUSBFSH_TXCSR_AUTOSET | MUSBFSH_TXCSR_DMAENAB);
+			musbfsh_writew(mbase, offset, csr);
+			csr &= ~MUSBFSH_TXCSR_DMAMODE;
+			musbfsh_writew(mbase, offset, csr);
+		} else {
+			offset = MUSBFSH_EP_OFFSET(musbfsh_channel->epnum,
+						   MUSBFSH_RXCSR);
+
+			csr = musbfsh_readw(mbase, offset);
+			csr &= ~(MUSBFSH_RXCSR_AUTOCLEAR |
+				 MUSBFSH_RXCSR_DMAENAB | MUSBFSH_RXCSR_DMAMODE);
+			musbfsh_writew(mbase, offset, csr);
+		}
+
+		musbfsh_writew(mbase,
+			       MUSBFSH_HSDMA_CHANNEL_OFFSET(bchannel,
+			       MUSBFSH_HSDMA_CONTROL),
+							    0);
+		musbfsh_write_hsdma_addr(mbase, bchannel, 0);
+		musbfsh_write_hsdma_count(mbase, bchannel, 0);
+		channel->status = MUSBFSH_DMA_STATUS_FREE;
+	}
+
+	return 0;
+}
+
+irqreturn_t musbfsh_dma_controller_irq(int irq, void *private_data)
+{
+	struct musbfsh_dma_controller *controller = private_data;
+	struct musbfsh *musbfsh = controller->private_data;
+	struct musbfsh_dma_channel *musbfsh_chan; /* musbfsh_channel */
+	struct dma_channel *channel;
+
+	void __iomem *mbase = controller->base;
+
+	irqreturn_t retval = IRQ_NONE;
+
+	/* unsigned long flags; */
+	u8 bchanl;	/* channel */
+	u8 int_hsdma;
+
+	u32 addr, count;
+	u16 csr;
+
+	INFO("++\n");
+
+	/*
+	 * This function is called inside generic_interrupt
+	 * We don't need spin_lock_irqsave(&musbfsh->lock, flags) here
+	 */
+
+	int_hsdma = musbfsh->int_dma;
+
+	/* should not to run here! */
+	if (!int_hsdma) {
+		WARNING("spurious DMA irq\n");
+
+		for (bchanl = 0; bchanl < MUSBFSH_HSDMA_CHANNELS; bchanl++) {
+			musbfsh_chan = (struct musbfsh_dma_channel *)
+			    &(controller->channel[bchanl]);
+			channel = &musbfsh_chan->channel;
+			if (channel->status == MUSBFSH_DMA_STATUS_BUSY) {
+				count = musbfsh_read_hsdma_count(mbase, bchanl);
+
+				/*
+				 * All of the data have been transferred,
+				 * should notify the CPU to process.
+				 */
+				if (count == 0)
+					int_hsdma |= (1 << bchanl);
+			}
+		}
+
+		INFO("int_hsdma = 0x%x\n", int_hsdma);
+
+		if (!int_hsdma)
+			goto done;
+	}
+
+	for (bchanl = 0; bchanl < MUSBFSH_HSDMA_CHANNELS; bchanl++) {
+		if (int_hsdma & (1 << bchanl)) {
+			musbfsh_chan = (struct musbfsh_dma_channel *)
+			    &(controller->channel[bchanl]);
+			channel = &musbfsh_chan->channel;
+
+			csr = musbfsh_readw(mbase,
+					    MUSBFSH_HSDMA_CHANNEL_OFFSET(bchanl,
+						MUSBFSH_HSDMA_CONTROL));
+
+			if (csr & (1 << MUSBFSH_HSDMA_BUSERROR_SHIFT)) {
+				musbfsh_chan->channel.status =
+					MUSBFSH_DMA_STATUS_BUS_ABORT;
+			} else {
+				u8 devctl;
+
+				/*
+				 * the register of address will increase with
+				 * the data transfer.
+				 */
+				addr = musbfsh_read_hsdma_addr(mbase, bchanl);
+				channel->actual_len =
+					addr - musbfsh_chan->start_addr;
+
+				INFO("ch %p, 0x%x -> 0x%x (%zu / %d) %s\n",
+				     channel, musbfsh_chan->start_addr,
+				     addr, channel->actual_len,
+				     musbfsh_chan->len,
+				     (channel->actual_len < musbfsh_chan->len) ?
+					"=> reconfig 0" : "=> complete");
+
+				devctl = musbfsh_readb(mbase, MUSBFSH_DEVCTL);
+
+				channel->status = MUSBFSH_DMA_STATUS_FREE;
+
+				/* completed */
+				if ((devctl & MUSBFSH_DEVCTL_HM) &&
+					(musbfsh_chan->transmit) && /* Tx */
+					((channel->desired_mode == 0) ||
+					(channel->actual_len & /* short pkt */
+					(musbfsh_chan->max_packet_sz - 1)))
+				    ) {
+					u8 epnum = musbfsh_chan->epnum;
+					int offset =
+						MUSBFSH_EP_OFFSET(epnum,
+								 MUSBFSH_TXCSR);
+					u16 txcsr;
+
+					/*
+					 * The programming guide says that we
+					 * must clear DMAENAB before DMAMODE.
+					 */
+					musbfsh_ep_select(mbase, epnum);
+					txcsr = musbfsh_readw(mbase, offset);
+					txcsr &= ~(MUSBFSH_TXCSR_DMAENAB |
+						MUSBFSH_TXCSR_AUTOSET);
+					musbfsh_writew(mbase, offset, txcsr);
+					/* Send out the packet */
+					txcsr &= ~MUSBFSH_TXCSR_DMAMODE;
+					/*
+					 * the packet has been in the fifo,
+					 * only need to set TxPktRdy
+					 **/
+					txcsr |= MUSBFSH_TXCSR_TXPKTRDY;
+					musbfsh_writew(mbase, offset, txcsr);
+				}
+				musbfsh_dma_completion(musbfsh,
+						       musbfsh_chan->epnum,
+						       musbfsh_chan->transmit);
+			}
+		}
+	}
+
+	retval = IRQ_HANDLED;
+done:
+	/* spin_unlock_irqrestore(&musbfsh->lock, flags); */
+	return retval;
+}
+
+void musbfsh_dma_controller_destroy(struct dma_controller *c)
+{
+	struct musbfsh_dma_controller *controller =
+		container_of(c, struct musbfsh_dma_controller, controller);
+
+	INFO("++\n");
+	if (!controller)
+		return;
+
+	if (controller->irq)
+		free_irq(controller->irq, c);
+
+	kfree(controller);
+}
+
+struct dma_controller *__init
+musbfsh_dma_controller_create(struct musbfsh *musbfsh, void __iomem *base)
+{
+	struct musbfsh_dma_controller *controller;
+
+	INFO("++\n");
+
+	controller = kzalloc(sizeof(*controller), GFP_KERNEL);
+	if (!controller)
+		return NULL;
+
+	controller->channel_count = MUSBFSH_HSDMA_CHANNELS;
+	controller->private_data = musbfsh;
+	controller->base = base;
+
+	controller->controller.start = dma_controller_start;
+	controller->controller.stop = dma_controller_stop;
+	controller->controller.channel_alloc = dma_channel_allocate;
+	controller->controller.channel_release = dma_channel_release;
+	controller->controller.channel_program = dma_channel_program;
+	controller->controller.channel_abort = dma_channel_abort;
+
+	controller->irq = 0;
+	musbfsh->musbfsh_dma_controller = controller;
+	/* enable DMA interrupt for all channels */
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+	if (skip_mac_init_attr.value)
+		MYDBG("");
+	else
+		musbfsh_writeb(base, MUSBFSH_HSDMA_DMA_INTR_UNMASK_SET, 0xff);
+#else
+	musbfsh_writeb(base, MUSBFSH_HSDMA_DMA_INTR_UNMASK_SET, 0xff);
+#endif
+
+	return &controller->controller;
+}
diff --git a/drivers/misc/mediatek/usb11/musbfsh_hsdma.h b/drivers/misc/mediatek/usb11/musbfsh_hsdma.h
new file mode 100644
index 0000000..f1cdbb2
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/musbfsh_hsdma.h
@@ -0,0 +1,115 @@
+/*
+ * MUSB OTG driver - support for Mentor's DMA controller
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2007 by Texas Instruments
+ *
+ * Copyright 2015 Mediatek Inc.
+ *	Marvin Lin <marvin.lin@mediatek.com>
+ *	Arvin Wang <arvin.wang@mediatek.com>
+ *	Vincent Fan <vincent.fan@mediatek.com>
+ *	Bryant Lu <bryant.lu@mediatek.com>
+ *	Yu-Chang Wang <yu-chang.wang@mediatek.com>
+ *	Macpaul Lin <macpaul.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program,
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include "musbfsh_dma.h"
+
+#define MUSBFSH_HSDMA_BASE			0x200
+#define MUSBFSH_HSDMA_INTR			(MUSBFSH_HSDMA_BASE + 0x0)
+#define MUSBFSH_HSDMA_DMA_INTR_UNMASK		(MUSBFSH_HSDMA_INTR + 0x1)
+#define MUSBFSH_HSDMA_DMA_INTR_UNMASK_CLEAR	(MUSBFSH_HSDMA_INTR + 0x2)
+#define MUSBFSH_HSDMA_DMA_INTR_UNMASK_SET	(MUSBFSH_HSDMA_INTR + 0x3)
+
+#define MUSBFSH_HSDMA_CONTROL			0x4
+#define MUSBFSH_HSDMA_ADDRESS			0x8
+#define MUSBFSH_HSDMA_COUNT			0xc
+
+/* _bchannel starts from 0 */
+#define MUSBFSH_HSDMA_CHANNEL_OFFSET(_bchannel, _offset)		\
+		(MUSBFSH_HSDMA_BASE + (_bchannel << 4) + _offset)
+
+#define musbfsh_read_hsdma_addr(mbase, bchannel)			\
+	musbfsh_readl(mbase,						\
+		      MUSBFSH_HSDMA_CHANNEL_OFFSET(bchannel,		\
+						MUSBFSH_HSDMA_ADDRESS))
+
+#define musbfsh_write_hsdma_addr(mbase, bchannel, addr)			\
+	musbfsh_writel(mbase,						\
+		       MUSBFSH_HSDMA_CHANNEL_OFFSET(bchannel,		\
+						MUSBFSH_HSDMA_ADDRESS), \
+		       addr)
+
+#define musbfsh_read_hsdma_count(mbase, bchannel)			\
+	musbfsh_readl(mbase,						\
+		   MUSBFSH_HSDMA_CHANNEL_OFFSET(bchannel, MUSBFSH_HSDMA_COUNT))
+
+#define musbfsh_write_hsdma_count(mbase, bchannel, len)			\
+	musbfsh_writel(mbase,						\
+		       MUSBFSH_HSDMA_CHANNEL_OFFSET(bchannel,		\
+						    MUSBFSH_HSDMA_COUNT), \
+		       len)
+
+/* control register (16-bit): */
+#define MUSBFSH_HSDMA_ENABLE_SHIFT	0
+#define MUSBFSH_HSDMA_TRANSMIT_SHIFT	1
+#define MUSBFSH_HSDMA_MODE1_SHIFT	2
+#define MUSBFSH_HSDMA_IRQENABLE_SHIFT	3
+#define MUSBFSH_HSDMA_ENDPOINT_SHIFT	4
+#define MUSBFSH_HSDMA_BUSERROR_SHIFT	8
+#define MUSBFSH_HSDMA_BURSTMODE_SHIFT	9
+#define MUSBFSH_HSDMA_BURSTMODE		(3 << MUSBFSH_HSDMA_BURSTMODE_SHIFT)
+#define MUSBFSH_HSDMA_BURSTMODE_UNSPEC	0
+#define MUSBFSH_HSDMA_BURSTMODE_INCR4	1
+#define MUSBFSH_HSDMA_BURSTMODE_INCR8	2
+#define MUSBFSH_HSDMA_BURSTMODE_INCR16	3
+
+#ifndef MUSBFSH_HSDMA_CHANNELS
+#define MUSBFSH_HSDMA_CHANNELS		8
+#endif
+
+struct musbfsh_dma_controller;
+
+struct musbfsh_dma_channel {
+	struct dma_channel channel;
+	struct musbfsh_dma_controller *controller;
+	u32 start_addr;
+	u32 len;
+	u16 max_packet_sz;
+	u8 idx;
+	u8 epnum;
+	u8 transmit;
+};
+
+struct musbfsh_dma_controller {
+	struct dma_controller controller;
+	struct musbfsh_dma_channel channel[MUSBFSH_HSDMA_CHANNELS];
+	void *private_data;
+	void __iomem *base;
+	u8 channel_count;
+	u8 used_channels;
+	u8 irq;
+};
diff --git a/drivers/misc/mediatek/usb11/musbfsh_icusb.c b/drivers/misc/mediatek/usb11/musbfsh_icusb.c
new file mode 100644
index 0000000..ff58813
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/musbfsh_icusb.c
@@ -0,0 +1,624 @@
+/*
+ * ICUSB - for MUSB Host Driver
+ *
+ * Copyright 2015 Mediatek Inc.
+ *	Marvin Lin <marvin.lin@mediatek.com>
+ *	Arvin Wang <arvin.wang@mediatek.com>
+ *	Vincent Fan <vincent.fan@mediatek.com>
+ *	Bryant Lu <bryant.lu@mediatek.com>
+ *	Yu-Chang Wang <yu-chang.wang@mediatek.com>
+ *	Macpaul Lin <macpaul.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/usb/input.h>
+#include <linux/hid.h>
+
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#include <net/sock.h>
+#include <net/netlink.h>
+#include <linux/skbuff.h>
+
+/*
+ * Version Information
+ */
+#define DRIVER_VERSION ""
+#define DRIVER_AUTHOR ""
+#define DRIVER_DESC "USB ICUSB DRIVER"
+#define DRIVER_LICENSE "GPL"
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE(DRIVER_LICENSE);
+
+#define ICCD_INTERFACE_CLASS 0x0B
+#define ICCD_CLASS_DESCRIPTOR_LENGTH	(0x36)
+
+#include "usb.h"
+#include "musbfsh_icusb.h"
+
+struct usb_icusb {
+	char name[128];
+};
+
+struct my_attr power_resume_time_neogo_attr = {
+	.attr.name = "power_resume_time_neogo",
+	.attr.mode = 0644,
+#ifdef MTK_ICUSB_POWER_AND_RESUME_TIME_NEOGO_SUPPORT
+	.value = 1
+#else
+	.value = 0
+#endif
+};
+
+static struct my_attr my_attr_test = {
+	.attr.name = "my_attr_test",
+	.attr.mode = 0644,
+	.value = 1
+};
+
+static struct attribute *myattr[] = {
+	(struct attribute *)&my_attr_test,
+	(struct attribute *)&power_resume_time_neogo_attr,
+	(struct attribute *)&skip_session_req_attr,
+	(struct attribute *)&skip_enable_session_attr,
+	(struct attribute *)&skip_mac_init_attr,
+	(struct attribute *)&resistor_control_attr,
+	(struct attribute *)&hw_dbg_attr,
+	(struct attribute *)&skip_port_pm_attr,
+	NULL
+};
+
+static struct IC_USB_CMD ic_cmd;
+unsigned int g_ic_usb_status =
+	((USB_PORT1_DISCONNECT_DONE) << USB_PORT1_STS_SHIFT);
+static struct sock *netlink_sock;
+static u_int g_pid;
+static struct proc_dir_entry *proc_drv_icusb_dir_entry;
+
+static void icusb_dump_data(char *buf, int len);
+static void set_icusb_phy_power_negotiation_fail(void);
+static void set_icusb_phy_power_negotiation_ok(void);
+static void set_icusb_data_of_interface_power_request(short data);
+
+static void icusb_resume_time_negotiation(struct usb_device *dev)
+{
+	int ret;
+	int retries = IC_USB_RETRIES_RESUME_TIME_NEGOTIATION;
+	char resume_time_negotiation_data[IC_USB_LEN_RESUME_TIME_NEGOTIATION];
+
+	while (retries-- > 0) {
+		MYDBG("");
+		ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+				      IC_USB_REQ_GET_INTERFACE_RESUME_TIME,
+				      IC_USB_REQ_TYPE_GET_INTERFACE_RESUME_TIME,
+				      IC_USB_WVALUE_RESUME_TIME_NEGOTIATION,
+				      IC_USB_WINDEX_RESUME_TIME_NEGOTIATION,
+				      resume_time_negotiation_data,
+				      IC_USB_LEN_RESUME_TIME_NEGOTIATION,
+				      USB_CTRL_GET_TIMEOUT);
+		if (ret < 0) {
+			MYDBG("ret : %d\n", ret);
+			continue;
+		} else {
+			MYDBG("");
+			icusb_dump_data(resume_time_negotiation_data,
+					IC_USB_LEN_RESUME_TIME_NEGOTIATION);
+			break;
+		}
+
+	}
+}
+
+void icusb_power_negotiation(struct usb_device *dev)
+{
+	int ret;
+	int retries = IC_USB_RETRIES_POWER_NEGOTIATION;
+	char get_power_negotiation_data[IC_USB_LEN_POWER_NEGOTIATION];
+	char set_power_negotiation_data[IC_USB_LEN_POWER_NEGOTIATION];
+	int power_negotiation_done = 0;
+	enum PHY_VOLTAGE_TYPE phy_volt;
+
+	while (retries-- > 0) {
+		MYDBG("");
+		power_negotiation_done = 0;
+		ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+				      IC_USB_REQ_GET_IFACE_POWER,
+				      IC_USB_REQ_TYPE_GET_IFACE_POWER,
+				      IC_USB_WVALUE_POWER_NEGOTIATION,
+				      IC_USB_WINDEX_POWER_NEGOTIATION,
+				      get_power_negotiation_data,
+				      IC_USB_LEN_POWER_NEGOTIATION,
+				      USB_CTRL_GET_TIMEOUT);
+		if (ret < 0) {
+			MYDBG("ret : %d\n", ret);
+			continue;
+		} else {
+			MYDBG("");
+			icusb_dump_data(get_power_negotiation_data,
+					IC_USB_LEN_POWER_NEGOTIATION);
+
+			/* copy the prefer bit from get interface power */
+			set_power_negotiation_data[0] =
+				(get_power_negotiation_data[0] &
+					IC_USB_PREFER_CLASSB_ENABLE_BIT);
+
+			/* set our current voltage */
+			phy_volt = get_usb11_phy_voltage();
+			if (phy_volt == VOL_33)
+				set_power_negotiation_data[0] |=
+					(char)IC_USB_CLASSB;
+			else if (phy_volt == VOL_18)
+				set_power_negotiation_data[0] |=
+					(char)IC_USB_CLASSC;
+			else
+				MYDBG("");
+
+			/* set current */
+			if (set_power_negotiation_data[1] > IC_USB_CURRENT) {
+				MYDBG("");
+				set_power_negotiation_data[1] = IC_USB_CURRENT;
+			} else {
+				MYDBG("");
+				set_power_negotiation_data[1] =
+					get_power_negotiation_data[1];
+			}
+			MYDBG("power_negotiation_data[0] : 0x%x",
+			      set_power_negotiation_data[0]);
+			MYDBG("power_negotiation_data[1] : 0x%x",
+			     set_power_negotiation_data[1]);
+			MYDBG("IC_USB_CURRENT :%d\n", IC_USB_CURRENT);
+
+			ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+					      IC_USB_REQ_SET_IFACE_POWER,
+					      IC_USB_REQ_TYPE_SET_IFACE_POWER,
+					      IC_USB_WVALUE_POWER_NEGOTIATION,
+					      IC_USB_WINDEX_POWER_NEGOTIATION,
+					      set_power_negotiation_data,
+					      IC_USB_LEN_POWER_NEGOTIATION,
+					      USB_CTRL_SET_TIMEOUT);
+
+			if (ret < 0) {
+				MYDBG("ret : %d\n", ret);
+			} else {
+				MYDBG("");
+				power_negotiation_done = 1;
+				break;
+			}
+			/* break; */
+		}
+	}
+
+	MYDBG("retries : %d\n", retries);
+	if (!power_negotiation_done) {
+		set_icusb_phy_power_negotiation_fail();
+	} else {
+		set_icusb_data_of_interface_power_request(
+			*((short *)get_power_negotiation_data));
+		set_icusb_phy_power_negotiation_ok();
+	}
+}
+
+void usb11_wait_disconnect_done(int value)
+{
+	if (is_usb11_enabled()) {
+		while (1) {
+			unsigned int ic_usb_status = g_ic_usb_status;
+
+			MYDBG("ic_usb_status : %x\n", ic_usb_status);
+			ic_usb_status &=
+				(USB_PORT1_STS_MSK << USB_PORT1_STS_SHIFT);
+			MYDBG("ic_usb_status : %x\n", ic_usb_status);
+
+			if (ic_usb_status ==
+				(USB_PORT1_DISCONNECT_DONE <<
+					USB_PORT1_STS_SHIFT)) {
+				MYDBG("USB_PORT1_DISCONNECT_DONE\n");
+				break;
+			}
+
+			if (ic_usb_status ==
+				(USB_PORT1_DISCONNECTING <<
+					USB_PORT1_STS_SHIFT))
+				MYDBG("USB_PORT1_DISCONNECTING\n");
+
+			mdelay(10);
+		}
+	} else {
+		MYDBG("usb11 is not enabled, skip\n");
+		MYDBG("usb11_wait_disconnect_done()\n");
+	}
+
+}
+
+int check_usb11_sts_disconnect_done(void)
+{
+	unsigned int ic_usb_status = g_ic_usb_status;
+
+	MYDBG("ic_usb_status : %x\n", ic_usb_status);
+	ic_usb_status &= (USB_PORT1_STS_MSK << USB_PORT1_STS_SHIFT);
+	MYDBG("ic_usb_status : %x\n", ic_usb_status);
+
+	if (ic_usb_status ==
+	    (USB_PORT1_DISCONNECT_DONE << USB_PORT1_STS_SHIFT)) {
+		MYDBG("USB_PORT1_DISCONNECT_DONE got\n");
+		return 1;
+	} else {
+		return 0;
+	}
+
+}
+
+void set_usb11_sts_connect(void)
+{
+	MYDBG("...................");
+	g_ic_usb_status &= ~(USB_PORT1_STS_MSK << USB_PORT1_STS_SHIFT);
+	g_ic_usb_status |= ((USB_PORT1_CONNECT) << USB_PORT1_STS_SHIFT);
+}
+
+void set_usb11_sts_disconnecting(void)
+{
+	MYDBG("...................");
+	g_ic_usb_status &= ~(USB_PORT1_STS_MSK << USB_PORT1_STS_SHIFT);
+	g_ic_usb_status |= ((USB_PORT1_DISCONNECTING) << USB_PORT1_STS_SHIFT);
+}
+
+void set_icusb_sts_disconnect_done(void)
+{
+	MYDBG("...................");
+	g_ic_usb_status &= ~(USB_PORT1_STS_MSK << USB_PORT1_STS_SHIFT);
+	g_ic_usb_status |= ((USB_PORT1_DISCONNECT_DONE) << USB_PORT1_STS_SHIFT);
+}
+
+void set_icusb_data_of_interface_power_request(short data)
+{
+	MYDBG("...................");
+	g_ic_usb_status |= ((data) << PREFER_VOL_CLASS_SHIFT);
+}
+
+void reset_usb11_phy_power_negotiation_status(void)
+{
+	MYDBG("...................");
+
+	g_ic_usb_status &= ~(PREFER_VOL_STS_MSK << PREFER_VOL_STS_SHIFT);
+	g_ic_usb_status |= ((PREFER_VOL_NOT_INITED) << PREFER_VOL_STS_SHIFT);
+
+}
+
+void set_icusb_phy_power_negotiation_fail(void)
+{
+	MYDBG("...................");
+
+	g_ic_usb_status &= ~(PREFER_VOL_STS_MSK << PREFER_VOL_STS_SHIFT);
+	g_ic_usb_status |= ((PREFER_VOL_PWR_NEG_FAIL) << PREFER_VOL_STS_SHIFT);
+
+}
+
+void set_icusb_phy_power_negotiation_ok(void)
+{
+	MYDBG("...................");
+
+	g_ic_usb_status &= ~(PREFER_VOL_STS_MSK << PREFER_VOL_STS_SHIFT);
+	g_ic_usb_status |= ((PREFER_VOL_PWR_NEG_OK) << PREFER_VOL_STS_SHIFT);
+
+}
+
+
+void usb11_phy_prefer_3v_status_check(void)
+{
+	unsigned int ic_usb_status = g_ic_usb_status;
+
+	MYDBG("ic_usb_status : %x\n", ic_usb_status);
+	ic_usb_status &= (PREFER_VOL_STS_MSK << PREFER_VOL_STS_SHIFT);
+	MYDBG("ic_usb_status : %x\n", ic_usb_status);
+}
+
+
+void icusb_dump_data(char *buf, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		MYDBG("data[%d]: %x\n", i, buf[i]);
+
+}
+
+int usb11_init_phy_by_voltage(enum PHY_VOLTAGE_TYPE phy_volt)
+{
+	musbfsh_init_phy_by_voltage(phy_volt);
+	return 0;
+}
+
+int usb11_session_control(enum SESSION_CONTROL_ACTION action)
+{
+	if (action == START_SESSION)
+		musbfsh_start_session();
+	else if (action == STOP_SESSION) {
+		/* musbfsh_stop_session(); */
+		if (!is_usb11_enabled()) {
+			mt65xx_usb11_mac_reset_and_phy_stress_set();
+		} else {
+			MYDBG("usb11 has been enabled, skip");
+			MYDBG("mt65xx_usb11_mac_reset_and_phy_stress_set()\n");
+		}
+	} else
+		MYDBG("unknown action\n");
+
+
+	return 0;
+}
+
+static void udp_reply(int pid, int seq, void *payload)
+{
+	struct sk_buff *skb;
+	struct nlmsghdr *nlh;
+	int size = strlen(payload) + 1;
+	int len = NLMSG_SPACE(size);
+	void *data;
+	int ret;
+
+	skb = alloc_skb(len, GFP_ATOMIC);
+	if (!skb)
+		return;
+	/* 3.10 specific */
+	nlh = __nlmsg_put(skb, pid, seq, 0, size, 0);
+	nlh->nlmsg_flags = 0;
+	data = NLMSG_DATA(nlh);
+	memcpy(data, payload, size);
+
+	/* 3.10 specific */
+	NETLINK_CB(skb).portid = 0;	/* from kernel */
+	NETLINK_CB(skb).dst_group = 0;	/* unicast */
+	ret = netlink_unicast(netlink_sock, skb, pid, MSG_DONTWAIT);
+	if (ret < 0)
+		MYDBG("send failed\n");
+}
+
+/* Receive messages from netlink socket. */
+static void udp_receive(struct sk_buff *skb)
+{
+	kuid_t uid,
+	u_int seq;
+	void *data;
+	struct nlmsghdr *nlh;
+	char reply_data[16];
+
+	MYDBG("");
+	nlh = (struct nlmsghdr *)skb->data;
+
+	/* global here */
+	g_pid = NETLINK_CREDS(skb)->pid;
+	uid = NETLINK_CREDS(skb)->uid;
+	seq = nlh->nlmsg_seq;
+	data = NLMSG_DATA(nlh);
+	MYDBG("recv skb from user space pid:%d seq:%d\n",
+	      g_pid, seq);
+	MYDBG("data is :%s\n", (char *)data);
+
+
+	sprintf(reply_data, "%d", g_pid);
+	udp_reply(g_pid, 0, reply_data);
+}
+
+struct netlink_kernel_cfg nl_cfg = {
+	.input = udp_receive,
+};
+
+
+static ssize_t default_show(struct kobject *kobj, struct attribute *attr,
+			    char *buf)
+{
+	struct my_attr *a = container_of(attr, struct my_attr, attr);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", a->value);
+}
+
+static ssize_t default_store(struct kobject *kobj, struct attribute *attr,
+			     const char *buf, size_t len)
+{
+	struct my_attr *a = container_of(attr, struct my_attr, attr);
+	int result = kstrtoul(buf, 0, (unsigned long *)&a->value);
+
+	if (result)
+		return sizeof(int);
+	else
+		return -EINVAL;
+}
+
+static const struct sysfs_ops myops = {
+	.show = default_show,
+	.store = default_store,
+};
+
+static struct kobj_type mytype = {
+	.sysfs_ops = &myops,
+	.default_attrs = myattr,
+};
+
+struct kobject *mykobj;
+void create_icusb_sysfs_attr(void)
+{
+	int err = -1;
+
+	mykobj = kzalloc(sizeof(*mykobj), GFP_KERNEL);
+	if (mykobj) {
+		MYDBG("");
+		kobject_init(mykobj, &mytype);
+		if (kobject_add(mykobj, NULL, "%s", "icusb_attr")) {
+			err = -1;
+			MYDBG("Sysfs creation failed\n");
+			kobject_put(mykobj);
+			mykobj = NULL;
+		}
+		err = 0;
+	}
+	return;
+
+}
+
+static ssize_t musbfsh_ic_tmp_proc_entry(struct file *file_ptr,
+					 const char __user *user_buffer,
+					 size_t count, loff_t *position)
+{
+	char cmd[64];
+	int ret = copy_from_user((char *)&cmd, user_buffer, count);
+
+	if (ret != 0)
+		return -EFAULT;
+
+	if (cmd[0] == '4') {
+		MYDBG("");
+		udp_reply(g_pid, 0, "HELLO, SS7_IC_USB!!!");
+	}
+
+	MYDBG("");
+
+	return count;
+}
+
+const struct file_operations musbfsh_ic_tmp_proc_fops = {
+	.write = musbfsh_ic_tmp_proc_entry
+};
+
+void create_ic_tmp_entry(void)
+{
+	struct proc_dir_entry *pr_entry;
+
+	if (proc_drv_icusb_dir_entry == NULL) {
+		MYDBG("[%s]: /proc/driver/icusb not exist\n", __func__);
+		return;
+	}
+
+	pr_entry =
+		proc_create("IC_TMP_ENTRY", 0660, proc_drv_icusb_dir_entry,
+			&musbfsh_ic_tmp_proc_fops);
+	if (pr_entry)
+		MYDBG("add /proc/IC_TMP_ENTRY ok\n");
+	else
+		MYDBG("add /proc/IC_TMP_ENTRY fail\n");
+}
+
+static ssize_t musbfsh_ic_usb_cmd_proc_status_read(struct file *file_ptr,
+						   char __user *user_buffer,
+						   size_t count,
+						   loff_t *position)
+{
+	int len;
+
+	MYDBG("");
+
+	if (copy_to_user(user_buffer,
+			 &g_ic_usb_status, sizeof(g_ic_usb_status)) != 0)
+		return -EFAULT;
+
+	/* *position += count; */
+	len = sizeof(g_ic_usb_status);
+	return len;
+}
+
+
+ssize_t musbfsh_ic_usb_cmd_proc_entry(struct file *file_ptr,
+				      const char __user *user_buffer,
+				      size_t count, loff_t *position)
+{
+
+
+	int ret = copy_from_user((char *)&ic_cmd, user_buffer, count);
+
+	if (ret != 0)
+		return -EFAULT;
+
+	MYDBG("type : %x, length : %x, data[0] : %x\n",
+	      ic_cmd.type, ic_cmd.length, ic_cmd.data[0]);
+
+	switch (ic_cmd.type) {
+	case USB11_SESSION_CONTROL:
+		MYDBG("");
+		usb11_session_control(ic_cmd.data[0]);
+		break;
+	case USB11_INIT_PHY_BY_VOLTAGE:
+		MYDBG("");
+		usb11_init_phy_by_voltage(ic_cmd.data[0]);
+		break;
+	case USB11_WAIT_DISCONNECT_DONE:
+		MYDBG("");
+		usb11_wait_disconnect_done(ic_cmd.data[0]);
+		break;
+		/*--- special purpose ---*/
+	case 's':
+		MYDBG("create sysfs\n");
+		create_icusb_sysfs_attr();
+		break;
+	case 't':
+		MYDBG("create tmp proc\n");
+		create_ic_tmp_entry();
+		break;
+	}
+	return count;
+}
+
+static const struct file_operations musbfsh_ic_usb_cmd_proc_fops = {
+	.read = musbfsh_ic_usb_cmd_proc_status_read,
+	.write = musbfsh_ic_usb_cmd_proc_entry
+};
+
+void create_ic_usb_cmd_proc_entry(void)
+{
+	struct proc_dir_entry *prEntry;
+
+	MYDBG("");
+	proc_drv_icusb_dir_entry = proc_mkdir("driver/icusb", NULL);
+
+	if (proc_drv_icusb_dir_entry == NULL) {
+		MYDBG("[%s]: mkdir /proc/driver/icusb failed\n", __func__);
+		return;
+	}
+
+	prEntry =
+	    proc_create("IC_USB_CMD_ENTRY", 0660, proc_drv_icusb_dir_entry,
+			&musbfsh_ic_usb_cmd_proc_fops);
+	if (prEntry) {
+		MYDBG("add IC_USB_CMD_ENTRY ok\n");
+		netlink_sock = netlink_kernel_create(&init_net,
+						     NETLINK_USERSOCK, &nl_cfg);
+	} else {
+		MYDBG("add IC_USB_CMD_ENTRY fail\n");
+	}
+}
+
+void set_icusb_phy_power_negotiation(struct usb_device *udev)
+{
+	if (power_resume_time_neogo_attr.value) {
+		icusb_power_negotiation(udev);
+		icusb_resume_time_negotiation(udev);
+	} else {
+		set_icusb_phy_power_negotiation_ok();
+	}
+}
diff --git a/drivers/misc/mediatek/usb11/musbfsh_icusb.h b/drivers/misc/mediatek/usb11/musbfsh_icusb.h
new file mode 100644
index 0000000..355a2d3
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/musbfsh_icusb.h
@@ -0,0 +1,151 @@
+/*
+ * ICUSB - for MUSB Host Driver defines
+ *
+ * Copyright 2015 Mediatek Inc.
+ *	Marvin Lin <marvin.lin@mediatek.com>
+ *	Arvin Wang <arvin.wang@mediatek.com>
+ *	Vincent Fan <vincent.fan@mediatek.com>
+ *	Bryant Lu <bryant.lu@mediatek.com>
+ *	Yu-Chang Wang <yu-chang.wang@mediatek.com>
+ *	Macpaul Lin <macpaul.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MUSBFSH_ICUSB_H
+#define _MUSBFSH_ICUSB_H
+
+enum PHY_VOLTAGE_TYPE {
+	VOL_18 = 0,
+	VOL_33,
+	VOL_50,
+};
+
+enum SESSION_CONTROL_ACTION {
+	STOP_SESSION = 0,
+	START_SESSION,
+};
+
+enum WAIT_DISCONNECT_DONE_ACTION {
+	WAIT_DISCONNECT_DONE_DFT_ACTION = 0,
+};
+
+#define IC_USB_CMD_LEN 255
+struct IC_USB_CMD {
+	unsigned char type;
+	unsigned char length;
+	unsigned char data[IC_USB_CMD_LEN];
+};
+
+enum IC_USB_CMD_TYPE {
+	USB11_SESSION_CONTROL = 0,
+	USB11_INIT_PHY_BY_VOLTAGE,
+	USB11_WAIT_DISCONNECT_DONE,
+};
+
+/* ICUSB feature list */
+/* --- sysfs controlable feature --- */
+#define MTK_ICUSB_POWER_AND_RESUME_TIME_NEOGO_SUPPORT
+#define MTK_ICUSB_SKIP_SESSION_REQ
+#define MTK_ICUSB_SKIP_ENABLE_SESSION
+#define MTK_ICUSB_SKIP_MAC_INIT
+#define MTK_ICUSB_RESISTOR_CONTROL
+#define MTK_ICUSB_HW_DBG
+/* #define MTK_ICUSB_SKIP_PORT_PM */
+
+/* --- non sysfs controlable feature --- */
+/* #define MTK_ICUSB_TAKE_WAKE_LOCK */
+/* #define MTK_ICUSB_BABBLE_RECOVER */
+
+struct my_attr {
+	struct attribute attr;
+	int value;
+};
+
+/* power neogo */
+#define IC_USB_REQ_TYPE_GET_IFACE_POWER 0xC0	/* Get interface power */
+#define IC_USB_REQ_TYPE_SET_IFACE_POWER 0x40	/* Set interface power */
+#define IC_USB_REQ_GET_IFACE_POWER 0x01		/* Get interface power */
+#define IC_USB_REQ_SET_IFACE_POWER 0x02		/* Set interface power */
+#define IC_USB_WVALUE_POWER_NEGOTIATION 0
+#define IC_USB_WINDEX_POWER_NEGOTIATION 0
+#define IC_USB_LEN_POWER_NEGOTIATION 2
+#define IC_USB_PREFER_CLASSB_ENABLE_BIT 0x80
+#define IC_USB_RETRIES_POWER_NEGOTIATION 3
+#define IC_USB_CLASSB (1<<1)
+#define IC_USB_CLASSC (1<<2)
+#define IC_USB_CURRENT 100	/* in 2 mA unit, 100 denotes 200 mA */
+
+/* resume_time neogo */
+#define IC_USB_REQ_TYPE_GET_INTERFACE_RESUME_TIME  0xC0
+#define IC_USB_REQ_GET_INTERFACE_RESUME_TIME 0x03
+#define IC_USB_WVALUE_RESUME_TIME_NEGOTIATION 0
+#define IC_USB_WINDEX_RESUME_TIME_NEGOTIATION 0
+#define IC_USB_LEN_RESUME_TIME_NEGOTIATION 3
+#define IC_USB_RETRIES_RESUME_TIME_NEGOTIATION 3
+
+/* == =================== */
+/* ic_usb_status : */
+/* Byte4 : wait disconnect status */
+/* Byte3 Byte2 : get interface power reqest data field */
+/* Byte1 : power negotiation result */
+/*  */
+/* ===================== */
+
+#define PREFER_VOL_STS_SHIFT (0)
+#define PREFER_VOL_STS_MSK (0x3)
+
+#define PREFER_VOL_NOT_INITED  0x0
+#define PREFER_VOL_PWR_NEG_FAIL 0x1
+#define PREFER_VOL_PWR_NEG_OK 0x2
+
+#define PREFER_VOL_CLASS_SHIFT (8)
+#define PREFER_VOL_CLASS_MSK (0xff)
+
+#define USB_PORT1_STS_SHIFT (24)
+#define USB_PORT1_STS_MSK (0xf)
+
+#define USB_PORT1_DISCONNECTING 0x0
+#define USB_PORT1_DISCONNECT_DONE 0x1
+#define USB_PORT1_CONNECT 0x2
+
+extern struct my_attr power_resume_time_neogo_attr;
+extern struct my_attr skip_session_req_attr;
+extern struct my_attr skip_enable_session_attr;
+extern struct my_attr skip_mac_init_attr;
+extern struct my_attr resistor_control_attr;
+extern struct my_attr hw_dbg_attr;
+extern struct my_attr skip_port_pm_attr;
+
+extern void musbfsh_start_session(void);
+extern void musbfsh_start_session_pure(void);
+extern void musbfsh_stop_session(void);
+extern void musbfsh_init_phy_by_voltage(enum PHY_VOLTAGE_TYPE);
+extern enum PHY_VOLTAGE_TYPE get_usb11_phy_voltage(void);
+extern void mt65xx_usb11_mac_reset_and_phy_stress_set(void);
+extern int is_usb11_enabled(void);
+
+#define MYDBG(fmt, args...) pr_warn("MTK_ICUSB [DBG], <%s(), %d> " fmt, \
+				__func__, __LINE__, ## args)
+#endif
diff --git a/drivers/misc/mediatek/usb11/musbfsh_io.h b/drivers/misc/mediatek/usb11/musbfsh_io.h
new file mode 100644
index 0000000..62705b7
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/musbfsh_io.h
@@ -0,0 +1,177 @@
+/*
+ * MUSB OTG driver register I/O
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * Copyright 2015 Mediatek Inc.
+ *	Marvin Lin <marvin.lin@mediatek.com>
+ *	Arvin Wang <arvin.wang@mediatek.com>
+ *	Vincent Fan <vincent.fan@mediatek.com>
+ *	Bryant Lu <bryant.lu@mediatek.com>
+ *	Yu-Chang Wang <yu-chang.wang@mediatek.com>
+ *	Macpaul Lin <macpaul.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSBFSH_LINUX_PLATFORM_ARCH_H__
+#define __MUSBFSH_LINUX_PLATFORM_ARCH_H__
+
+#include <linux/io.h>
+#include <linux/spinlock.h>
+extern spinlock_t musbfs_io_lock;
+extern void mt65xx_usb11_clock_enable(bool enable);
+extern bool musbfsh_power;
+/* NOTE:  these offsets are all in bytes */
+
+static inline u16 musbfsh_readw(const void __iomem *addr, unsigned offset)
+{
+	u16 rc = 0;
+
+	if (musbfsh_power) {
+		rc = readw(addr + offset);
+	} else {
+		unsigned long flags = 0;
+
+		spin_lock_irqsave(&musbfs_io_lock, flags);
+		mt65xx_usb11_clock_enable(true);
+		/*
+		 * DBG(0, "[MUSBfsh]: access %s FUNC: USB CLK is off 0x%X\n",
+		 * __func__, offset);
+		 */
+		rc = readw(addr + offset);
+		mt65xx_usb11_clock_enable(false);
+		spin_unlock_irqrestore(&musbfs_io_lock, flags);
+	}
+	return rc;
+}
+
+static inline u32 musbfsh_readl(const void __iomem *addr, unsigned offset)
+{
+	u32 rc = 0;
+
+	if (musbfsh_power) {
+		rc = readl(addr + offset);
+	} else {
+		unsigned long flags = 0;
+
+		spin_lock_irqsave(&musbfs_io_lock, flags);
+		mt65xx_usb11_clock_enable(true);
+		/*
+		 * DBG(0, "[MUSBfsh]: access %s FUNC: USB CLK is off 0x%X\n",
+		 * __func__, offset);
+		 */
+		rc = readl(addr + offset);
+		mt65xx_usb11_clock_enable(false);
+		spin_unlock_irqrestore(&musbfs_io_lock, flags);
+	}
+	return rc;
+}
+
+
+static inline void musbfsh_writew(void __iomem *addr, unsigned offset, u16 data)
+{
+	if (musbfsh_power) {
+		writew(data, addr + offset);
+	} else {
+		unsigned long flags = 0;
+
+		spin_lock_irqsave(&musbfs_io_lock, flags);
+		mt65xx_usb11_clock_enable(true);
+		/*
+		 * DBG(0, "[MUSBfsh]: access %s FUNC: USB CLK is off 0x%X\n",
+		 * __func__, offset);
+		 */
+		writew(data, addr + offset);
+		mt65xx_usb11_clock_enable(false);
+		spin_unlock_irqrestore(&musbfs_io_lock, flags);
+	}
+}
+
+static inline void musbfsh_writel(void __iomem *addr, unsigned offset, u32 data)
+{
+	if (musbfsh_power) {
+		writel(data, addr + offset);
+	} else {
+		unsigned long flags = 0;
+
+		spin_lock_irqsave(&musbfs_io_lock, flags);
+		mt65xx_usb11_clock_enable(true);
+		/*
+		 * DBG(0, "[MUSBfsh]: access %s FUNC: USB CLK is off 0x%X\n",
+		 * __func__, offset);
+		 */
+		writel(data, addr + offset);
+		mt65xx_usb11_clock_enable(false);
+		spin_unlock_irqrestore(&musbfs_io_lock, flags);
+	}
+}
+
+
+static inline u8 musbfsh_readb(const void __iomem *addr, unsigned offset)
+{
+	u8 rc = 0;
+
+	if (musbfsh_power) {
+		rc = readb(addr + offset);
+	} else {
+		unsigned long flags = 0;
+
+		spin_lock_irqsave(&musbfs_io_lock, flags);
+		mt65xx_usb11_clock_enable(true);
+		/*
+		 * DBG(0, "[MUSBfsh]: access %s FUNC: USB CLK is off 0x%X\n",
+		 * __func__, offset);
+		 */
+		rc = readb(addr + offset);
+		mt65xx_usb11_clock_enable(false);
+		spin_unlock_irqrestore(&musbfs_io_lock, flags);
+	}
+	return rc;
+}
+
+static inline void musbfsh_writeb(void __iomem *addr, unsigned offset, u8 data)
+{
+	if (musbfsh_power) {
+		writeb(data, addr + offset);
+	} else {
+		unsigned long flags = 0;
+
+		spin_lock_irqsave(&musbfs_io_lock, flags);
+		mt65xx_usb11_clock_enable(true);
+		/*
+		 * DBG(0, "[MUSBfsh]: access %s FUNC: USB CLK is off 0x%X\n",
+		 * __func__, offset);
+		 */
+		writeb(data, addr + offset);
+		mt65xx_usb11_clock_enable(false);
+		spin_unlock_irqrestore(&musbfs_io_lock, flags);
+	}
+}
+
+/* NOTE:  these offsets are all in bytes */
+#endif
diff --git a/drivers/misc/mediatek/usb11/musbfsh_qmu.c b/drivers/misc/mediatek/usb11/musbfsh_qmu.c
new file mode 100644
index 0000000..a6271e5
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/musbfsh_qmu.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2015 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_MTK_MUSBFSH_QMU_SUPPORT
+
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+
+#include "musbfsh_core.h"
+#include "musbfsh_host.h"
+#include "musbfsh_hsdma.h"
+/*#include "mtk_musb.h"*/
+#include "musbfsh_qmu.h"
+#include "mtk11_qmu.h"
+
+void __iomem *musbfsh_qmu_base;
+/* debug variable to check musbfsh_qmu_base issue */
+void __iomem *musbfsh_qmu_base_2;
+
+int musbfsh_qmu_init(struct musbfsh *musbfsh)
+{
+	/* set DMA channel 0 burst mode to boost QMU speed */
+	musbfsh_writel(musbfsh->mregs, 0x204, musbfsh_readl(musbfsh->mregs, 0x204) | 0x600);
+	musbfsh_writel((musbfsh->mregs + MUSBFSH_QISAR), 0x30, 0);
+
+#ifdef CONFIG_OF
+	musbfsh_qmu_base = (void __iomem *)(musbfsh->mregs + MUSBFSH_QMUBASE);
+	/* debug variable to check musbfsh_qmu_base issue */
+	musbfsh_qmu_base_2 = (void __iomem *)(musbfsh->mregs + MUSBFSH_QMUBASE);
+#else
+	musbfsh_qmu_base = (void __iomem *)(musbfsh->mregs + MUSBFSH_QMUBASE);
+	/* debug variable to check musbfsh_qmu_base issue */
+	musbfsh_qmu_base_2 = (void __iomem *)(musbfsh->mregs + MUSBFSH_QMUBASE);
+#endif
+	mb();
+
+	if (mtk11_qmu_init_gpd_pool(musbfsh->controller)) {
+		QMU_ERR("[QMU]mtk11_qmu_init_gpd_pool fail\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+void musbfsh_qmu_exit(struct musbfsh *musbfsh)
+{
+	mtk11_qmu_destroy_gpd_pool(musbfsh->controller);
+}
+
+void musbfsh_disable_q_all(struct musbfsh *musbfsh)
+{
+	u32 ep_num;
+
+	QMU_WARN("disable_q_all\n");
+
+	for (ep_num = 1; ep_num <= RXQ_NUM; ep_num++) {
+		if (mtk11_is_qmu_enabled(ep_num, RXQ))
+			mtk11_disable_q(musbfsh, ep_num, 1);
+	}
+	for (ep_num = 1; ep_num <= TXQ_NUM; ep_num++) {
+		if (mtk11_is_qmu_enabled(ep_num, TXQ))
+			mtk11_disable_q(musbfsh, ep_num, 0);
+	}
+}
+
+irqreturn_t musbfsh_q_irq(struct musbfsh *musbfsh)
+{
+	irqreturn_t retval = IRQ_NONE;
+	u32 wQmuVal = musbfsh->int_queue;
+	u32 i;
+
+	QMU_ERR("wQmuVal:%d\n", wQmuVal);
+	for (i = 1; i <= MAX_QMU_EP; i++) {
+		if (wQmuVal & DQMU_M_RX_DONE(i))
+			h_mtk11_qmu_done_rx(musbfsh, i);
+
+		if (wQmuVal & DQMU_M_TX_DONE(i))
+			h_mtk11_qmu_done_tx(musbfsh, i);
+	}
+	mtk11_qmu_irq_err(musbfsh, wQmuVal);
+
+	return retval;
+}
+
+void musbfsh_flush_qmu(u32 ep_num, u8 isRx)
+{
+	QMU_DBG("flush %s(%d)\n", isRx ? "RQ" : "TQ", ep_num);
+	mtk11_qmu_stop(ep_num, isRx);
+	mtk11_qmu_reset_gpd_pool(ep_num, isRx);
+}
+
+void musbfsh_restart_qmu(struct musbfsh *musbfsh, u32 ep_num, u8 isRx)
+{
+	QMU_DBG("restart %s(%d)\n", isRx ? "RQ" : "TQ", ep_num);
+	mtk11_flush_ep_csr(musbfsh, ep_num, isRx);
+	mtk11_qmu_enable(musbfsh, ep_num, isRx);
+}
+
+bool musbfsh_is_qmu_stop(u32 ep_num, u8 isRx)
+{
+	void __iomem *base = musbfsh_qmu_base;
+
+	/* debug variable to check musbfsh_qmu_base issue */
+	if (musbfsh_qmu_base != musbfsh_qmu_base_2) {
+		QMU_WARN("musbfsh_qmu_base != musbfsh_qmu_base_2");
+		QMU_WARN("musbfsh_qmu_base = %p, musbfsh_qmu_base_2=%p", musbfsh_qmu_base, musbfsh_qmu_base_2);
+	}
+
+	if (!isRx) {
+		if (MGC_ReadQMU16(base, MGC_O_QMU_TQCSR(ep_num)) & DQMU_QUE_ACTIVE)
+			return false;
+		else
+			return true;
+	} else {
+		if (MGC_ReadQMU16(base, MGC_O_QMU_RQCSR(ep_num)) & DQMU_QUE_ACTIVE)
+			return false;
+		else
+			return true;
+	}
+}
+
+void musbfsh_tx_zlp_qmu(struct musbfsh *musbfsh, u32 ep_num)
+{
+	/* sent ZLP through PIO */
+	void __iomem *epio = musbfsh->endpoints[ep_num].regs;
+	void __iomem *mbase = musbfsh->mregs;
+	int cnt = 50; /* 50*200us, total 10 ms */
+	int is_timeout = 1;
+	u16 csr;
+
+	QMU_WARN("TX ZLP direct sent\n");
+	musbfsh_ep_select(mbase, ep_num);
+
+	/* disable dma for pio */
+	csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+	csr &= ~MUSBFSH_TXCSR_DMAENAB;
+	musbfsh_writew(epio, MUSBFSH_TXCSR, csr);
+
+	/* TXPKTRDY */
+	csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+	csr |= MUSBFSH_TXCSR_TXPKTRDY;
+	musbfsh_writew(epio, MUSBFSH_TXCSR, csr);
+
+	/* wait ZLP sent */
+	while (cnt--) {
+		csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+		if (!(csr & MUSBFSH_TXCSR_TXPKTRDY)) {
+			is_timeout = 0;
+			break;
+		}
+		udelay(200);
+	}
+
+	/* re-enable dma for qmu */
+	csr = musbfsh_readw(epio, MUSBFSH_TXCSR);
+	csr |= MUSBFSH_TXCSR_DMAENAB;
+	musbfsh_writew(epio, MUSBFSH_TXCSR, csr);
+
+	if (is_timeout)
+		QMU_ERR("TX ZLP sent fail???\n");
+	QMU_WARN("TX ZLP sent done\n");
+}
+
+int mtk11_kick_CmdQ(struct musbfsh *musbfsh, int isRx, struct musbfsh_qh *qh, struct urb *urb)
+{
+	void __iomem        *mbase = musbfsh->mregs;
+	u16 intr_e = 0;
+	struct musbfsh_hw_ep	*hw_ep = qh->hw_ep;
+	void __iomem		*epio = hw_ep->regs;
+	unsigned int offset = 0;
+	u8 bIsIoc;
+	u8 *pBuffer;
+	u32 dwLength;
+	u16 i;
+	u32 gdp_free_count = 0;
+
+	if (!urb) {
+		QMU_WARN("!urb\n");
+		return -1; /*KOBE : should we return a value */
+	}
+
+	if (!mtk11_is_qmu_enabled(hw_ep->epnum, isRx)) {
+		QMU_INFO("! mtk_is_qmu_enabled\n");
+
+		musbfsh_ep_select(mbase, hw_ep->epnum);
+		mtk11_flush_ep_csr(musbfsh, hw_ep->epnum,  isRx);
+
+		if (isRx) {
+			QMU_INFO("isRX = 1\n");
+			if (qh->type == USB_ENDPOINT_XFER_ISOC) {
+				QMU_INFO("USB_ENDPOINT_XFER_ISOC\n");
+				if (qh->hb_mult == 3)
+					musbfsh_writew(epio, MUSBFSH_RXMAXP, qh->maxpacket|0x1000);
+				else if (qh->hb_mult == 2)
+					musbfsh_writew(epio, MUSBFSH_RXMAXP, qh->maxpacket|0x800);
+				else
+					musbfsh_writew(epio, MUSBFSH_RXMAXP, qh->maxpacket);
+			} else {
+				QMU_INFO("!! USB_ENDPOINT_XFER_ISOC\n");
+				musbfsh_writew(epio, MUSBFSH_RXMAXP, qh->maxpacket);
+			}
+
+			musbfsh_writew(epio, MUSBFSH_RXCSR, MUSBFSH_RXCSR_DMAENAB);
+			/*CC: speed */
+			musbfsh_writeb(epio, MUSBFSH_RXTYPE, qh->type_reg);
+			musbfsh_writeb(epio, MUSBFSH_RXINTERVAL, qh->intv_reg);
+
+			if (musbfsh->is_multipoint) {
+				QMU_INFO("is_multipoint\n");
+				musbfsh_write_rxfunaddr(musbfsh->mregs, hw_ep->epnum, qh->addr_reg);
+				musbfsh_write_rxhubaddr(musbfsh->mregs, hw_ep->epnum, qh->h_addr_reg);
+				musbfsh_write_rxhubport(musbfsh->mregs, hw_ep->epnum, qh->h_port_reg);
+			} else {
+				QMU_INFO("!! is_multipoint\n");
+				musbfsh_writeb(musbfsh->mregs, MUSBFSH_FADDR, qh->addr_reg);
+			}
+
+			/*turn off intrRx*/
+			intr_e = musbfsh_readw(musbfsh->mregs, MUSBFSH_INTRRXE);
+			intr_e = intr_e & (~(1<<(hw_ep->epnum)));
+			musbfsh_writew(musbfsh->mregs, MUSBFSH_INTRRXE, intr_e);
+		} else {
+			musbfsh_writew(epio, MUSBFSH_TXMAXP, qh->maxpacket);
+			musbfsh_writew(epio, MUSBFSH_TXCSR, MUSBFSH_TXCSR_DMAENAB);
+			/*CC: speed?*/
+			musbfsh_writeb(epio, MUSBFSH_TXTYPE, qh->type_reg);
+			musbfsh_writeb(epio, MUSBFSH_TXINTERVAL, qh->intv_reg);
+
+			if (musbfsh->is_multipoint) {
+				QMU_INFO("is_multipoint\n");
+				musbfsh_write_txfunaddr(mbase, hw_ep->epnum, qh->addr_reg);
+				musbfsh_write_txhubaddr(mbase, hw_ep->epnum, qh->h_addr_reg);
+				musbfsh_write_txhubport(mbase, hw_ep->epnum, qh->h_port_reg);
+				/* FIXME if !epnum, do the same for RX ... */
+			} else {
+				QMU_INFO("!! is_multipoint\n");
+				musbfsh_writeb(mbase, MUSBFSH_FADDR, qh->addr_reg);
+			}
+			/* turn off intrTx , but this will be revert by musbfsh_ep_program*/
+			intr_e = musbfsh_readw(musbfsh->mregs, MUSBFSH_INTRTXE);
+			intr_e = intr_e & (~(1<<hw_ep->epnum));
+			musbfsh_writew(musbfsh->mregs, MUSBFSH_INTRTXE, intr_e);
+		}
+
+		QMU_INFO("mtk11_qmu_enable\n");
+		mtk11_qmu_enable(musbfsh, hw_ep->epnum, isRx);
+	}
+
+	gdp_free_count = mtk11_qmu_free_gpd_count(isRx, hw_ep->epnum);
+	if (qh->type == USB_ENDPOINT_XFER_ISOC) {
+		QMU_INFO("USB_ENDPOINT_XFER_ISOC\n");
+		pBuffer = (uint8_t *)urb->transfer_dma;
+
+		if (gdp_free_count < urb->number_of_packets) {
+			QMU_INFO("gdp_free_count:%d, number_of_packets:%d\n", gdp_free_count, urb->number_of_packets);
+			musbfsh_bug();
+		}
+		for (i = 0; i < urb->number_of_packets; i++) {
+			urb->iso_frame_desc[i].status = 0;
+			offset = urb->iso_frame_desc[i].offset;
+			dwLength = urb->iso_frame_desc[i].length;
+			/* If interrupt on complete ? */
+			bIsIoc = (i == (urb->number_of_packets-1)) ? 1 : 0;
+			QMU_INFO("mtk11_qmu_insert_task\n");
+			mtk11_qmu_insert_task(hw_ep->epnum, isRx, pBuffer+offset, dwLength, 0, bIsIoc);
+
+			mtk11_qmu_resume(hw_ep->epnum, isRx);
+		}
+
+		if (mtk11_host_qmu_max_active_isoc_gpd < mtk11_qmu_used_gpd_count(isRx, hw_ep->epnum))
+			mtk11_host_qmu_max_active_isoc_gpd = mtk11_qmu_used_gpd_count(isRx, hw_ep->epnum);
+
+		if (mtk11_host_qmu_max_number_of_pkts < urb->number_of_packets)
+			mtk11_host_qmu_max_number_of_pkts = urb->number_of_packets;
+
+		{
+			static DEFINE_RATELIMIT_STATE(ratelimit, 1 * HZ, 1);
+			static int skip_cnt;
+
+			if (__ratelimit(&ratelimit)) {
+				QMU_INFO("max_isoc gpd:%d, max_pkts:%d, skip_cnt:%d\n",
+						mtk11_host_qmu_max_active_isoc_gpd,
+						mtk11_host_qmu_max_number_of_pkts,
+						skip_cnt);
+				skip_cnt = 0;
+			} else
+				skip_cnt++;
+		}
+	} else {
+		/* Must be the bulk transfer type */
+		QMU_WARN("non isoc\n");
+		pBuffer = (uint8_t *)urb->transfer_dma;
+		if (urb->transfer_buffer_length < QMU_RX_SPLIT_THRE) {
+			if (gdp_free_count < 1) {
+				QMU_INFO("gdp_free_count:%d, number_of_packets:%d\n",
+						gdp_free_count, urb->number_of_packets);
+				musbfsh_bug();
+			}
+			QMU_INFO("urb->transfer_buffer_length : %d\n", urb->transfer_buffer_length);
+
+			dwLength = urb->transfer_buffer_length;
+			bIsIoc = 1;
+
+			mtk11_qmu_insert_task(hw_ep->epnum, isRx, pBuffer+offset, dwLength, 0, bIsIoc);
+			mtk11_qmu_resume(hw_ep->epnum, isRx);
+		} else {
+			/*reuse isoc urb->unmber_of_packets*/
+			urb->number_of_packets =
+				((urb->transfer_buffer_length) + QMU_RX_SPLIT_BLOCK_SIZE-1)/(QMU_RX_SPLIT_BLOCK_SIZE);
+			if (gdp_free_count < urb->number_of_packets) {
+				QMU_INFO("gdp_free_count:%d, number_of_packets:%d\n",
+						gdp_free_count, urb->number_of_packets);
+				musbfsh_bug();
+			}
+			for (i = 0; i < urb->number_of_packets; i++) {
+				offset = QMU_RX_SPLIT_BLOCK_SIZE*i;
+				dwLength = QMU_RX_SPLIT_BLOCK_SIZE;
+
+				/* If interrupt on complete ? */
+				bIsIoc = (i == (urb->number_of_packets-1)) ? 1 : 0;
+				dwLength = (i == (urb->number_of_packets-1)) ?
+					((urb->transfer_buffer_length) % QMU_RX_SPLIT_BLOCK_SIZE) : dwLength;
+				if (dwLength == 0)
+					dwLength = QMU_RX_SPLIT_BLOCK_SIZE;
+
+				mtk11_qmu_insert_task(hw_ep->epnum, isRx, pBuffer+offset, dwLength, 0, bIsIoc);
+				mtk11_qmu_resume(hw_ep->epnum, isRx);
+			}
+		}
+	}
+	QMU_INFO("\n");
+	return 0;
+}
+#endif
diff --git a/drivers/misc/mediatek/usb11/musbfsh_qmu.h b/drivers/misc/mediatek/usb11/musbfsh_qmu.h
new file mode 100644
index 0000000..0466880
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/musbfsh_qmu.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2015 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MUSBFSH_QMU_H_
+#define _MUSBFSH_QMU_H_
+
+#include "musbfsh_core.h"		/* for struct musb */
+
+#define MUSBFSH_QMUBASE	(0x800)
+#define MUSBFSH_QISAR	(0xc00)
+#define MUSBFSH_QIMR	(0xc04)
+
+extern void __iomem *musbfsh_qmu_base;
+
+extern int musbfsh_qmu_init(struct musbfsh *musbfsh);
+extern void musbfsh_qmu_exit(struct musbfsh *musbfsh);
+extern void musbfsh_disable_q_all(struct musbfsh *musbfsh);
+extern irqreturn_t musbfsh_q_irq(struct musbfsh *musbfsh);
+extern void musbfsh_flush_qmu(u32 ep_num, u8 isRx);
+extern void musbfsh_restart_qmu(struct musbfsh *musbfsh, u32 ep_num, u8 isRx);
+extern bool musbfsh_is_qmu_stop(u32 ep_num, u8 isRx);
+extern void musbfsh_tx_zlp_qmu(struct musbfsh *musbfsh, u32 ep_num);
+extern void mtk11_qmu_enable(struct musbfsh *musbfsh, u8 EP_Num, u8 isRx);
+extern int mtk11_kick_CmdQ(struct musbfsh *musbfsh, int isRx, struct musbfsh_qh *qh, struct urb *urb);
+#endif
diff --git a/drivers/misc/mediatek/usb11/musbfsh_regs.h b/drivers/misc/mediatek/usb11/musbfsh_regs.h
new file mode 100644
index 0000000..14caffda9
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/musbfsh_regs.h
@@ -0,0 +1,417 @@
+/*
+ * MUSB OTG driver register defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * Copyright 2015 Mediatek Inc.
+ *	Marvin Lin <marvin.lin@mediatek.com>
+ *	Arvin Wang <arvin.wang@mediatek.com>
+ *	Vincent Fan <vincent.fan@mediatek.com>
+ *	Bryant Lu <bryant.lu@mediatek.com>
+ *	Yu-Chang Wang <yu-chang.wang@mediatek.com>
+ *	Macpaul Lin <macpaul.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSBFSH_REGS_H__
+#define __MUSBFSH_REGS_H__
+
+#define MUSBFSH_EP0_FIFOSIZE	64	/* This is non-configurable */
+
+/*
+ * MUSB Register bits
+ */
+
+/* POWER */
+#define MUSBFSH_POWER_ISOUPDATE	0x80
+#define MUSBFSH_POWER_SOFTCONN	0x40
+#define MUSBFSH_POWER_HSENAB	0x20
+#define MUSBFSH_POWER_HSMODE	0x10
+#define MUSBFSH_POWER_RESET	0x08
+#define MUSBFSH_POWER_RESUME	0x04
+#define MUSBFSH_POWER_SUSPENDM	0x02
+#define MUSBFSH_POWER_ENSUSPEND	0x01
+
+/* INTRUSB */
+#define MUSBFSH_INTR_SUSPEND	0x01
+#define MUSBFSH_INTR_RESUME	0x02
+#define MUSBFSH_INTR_RESET	0x04
+#define MUSBFSH_INTR_BABBLE	0x04
+#define MUSBFSH_INTR_SOF	0x08
+#define MUSBFSH_INTR_CONNECT	0x10
+#define MUSBFSH_INTR_DISCONNECT	0x20
+#define MUSBFSH_INTR_SESSREQ	0x40
+#define MUSBFSH_INTR_VBUSERROR	0x80	/* For SESSION end */
+
+/* DEVCTL */
+#define MUSBFSH_DEVCTL_BDEVICE	0x80
+#define MUSBFSH_DEVCTL_FSDEV	0x40
+#define MUSBFSH_DEVCTL_LSDEV	0x20
+#define MUSBFSH_DEVCTL_VBUS	0x18
+#define MUSBFSH_DEVCTL_VBUS_SHIFT	3
+#define MUSBFSH_DEVCTL_HM	0x04
+#define MUSBFSH_DEVCTL_HR	0x02
+#define MUSBFSH_DEVCTL_SESSION	0x01
+
+/* MUSB ULPI VBUSCONTROL */
+#define MUSBFSH_ULPI_USE_EXTVBUS	0x01
+#define MUSBFSH_ULPI_USE_EXTVBUSIND 0x02
+/* ULPI_REG_CONTROL */
+#define MUSBFSH_ULPI_REG_REQ	(1 << 0)
+#define MUSBFSH_ULPI_REG_CMPLT	(1 << 1)
+#define MUSBFSH_ULPI_RDN_WR	(1 << 2)
+
+/* TESTMODE */
+#define MUSBFSH_TEST_FORCE_HOST	0x80
+#define MUSBFSH_TEST_FIFO_ACCESS	0x40
+#define MUSBFSH_TEST_FORCE_FS	0x20
+#define MUSBFSH_TEST_FORCE_HS	0x10
+#define MUSBFSH_TEST_PACKET	0x08
+#define MUSBFSH_TEST_K		0x04
+#define MUSBFSH_TEST_J		0x02
+#define MUSBFSH_TEST_SE0_NAK	0x01
+
+/* Allocate for double-packet buffering (effectively doubles assigned _SIZE) */
+#define MUSBFSH_FIFOSZ_DPB	0x10
+/* Allocation size (8, 16, 32, ... 4096) */
+#define MUSBFSH_FIFOSZ_SIZE	0x0f
+
+/* CSR0 */
+#define MUSBFSH_CSR0_FLUSHFIFO	0x0100
+#define MUSBFSH_CSR0_TXPKTRDY	0x0002
+#define MUSBFSH_CSR0_RXPKTRDY	0x0001
+
+/* CSR0 in Peripheral mode */
+#define MUSBFSH_CSR0_P_SVDSETUPEND	0x0080
+#define MUSBFSH_CSR0_P_SVDRXPKTRDY	0x0040
+#define MUSBFSH_CSR0_P_SENDSTALL	0x0020
+#define MUSBFSH_CSR0_P_SETUPEND		0x0010
+#define MUSBFSH_CSR0_P_DATAEND		0x0008
+#define MUSBFSH_CSR0_P_SENTSTALL	0x0004
+
+/* CSR0 in Host mode */
+#define MUSBFSH_CSR0_H_DIS_PING		0x0800
+#define MUSBFSH_CSR0_H_WR_DATATOGGLE	0x0400	/* Set to allow setting: */
+#define MUSBFSH_CSR0_H_DATATOGGLE	0x0200	/* Data toggle control */
+#define MUSBFSH_CSR0_H_NAKTIMEOUT	0x0080
+#define MUSBFSH_CSR0_H_STATUSPKT	0x0040
+#define MUSBFSH_CSR0_H_REQPKT		0x0020
+#define MUSBFSH_CSR0_H_ERROR		0x0010
+#define MUSBFSH_CSR0_H_SETUPPKT		0x0008
+#define MUSBFSH_CSR0_H_RXSTALL		0x0004
+
+/* CSR0 bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSBFSH_CSR0_P_WZC_BITS	\
+	(MUSBFSH_CSR0_P_SENTSTALL)
+#define MUSBFSH_CSR0_H_WZC_BITS	\
+	(MUSBFSH_CSR0_H_NAKTIMEOUT | MUSBFSH_CSR0_H_RXSTALL \
+	| MUSBFSH_CSR0_RXPKTRDY)
+
+/* TxType/RxType */
+#define MUSBFSH_TYPE_SPEED		0xc0
+#define MUSBFSH_TYPE_SPEED_SHIFT	6
+#define MUSBFSH_TYPE_PROTO		0x30	/* Implicitly zero for ep0 */
+#define MUSBFSH_TYPE_PROTO_SHIFT	4
+#define MUSBFSH_TYPE_REMOTE_END		0xf	/* Implicitly zero for ep0 */
+
+/* CONFIGDATA */
+#define MUSBFSH_CONFIGDATA_MPRXE	0x80	/* Auto bulk pkt combining */
+#define MUSBFSH_CONFIGDATA_MPTXE	0x40	/* Auto bulk pkt splitting */
+#define MUSBFSH_CONFIGDATA_BIGENDIAN	0x20
+#define MUSBFSH_CONFIGDATA_HBRXE	0x10	/* HB-ISO for RX */
+#define MUSBFSH_CONFIGDATA_HBTXE	0x08	/* HB-ISO for TX */
+#define MUSBFSH_CONFIGDATA_DYNFIFO	0x04	/* Dynamic FIFO sizing */
+#define MUSBFSH_CONFIGDATA_SOFTCONE	0x02	/* SoftConnect */
+#define MUSBFSH_CONFIGDATA_UTMIDW	0x01	/* Data width 0/1 => 8/16bits */
+
+/* TXCSR in Peripheral and Host mode */
+#define MUSBFSH_TXCSR_AUTOSET		0x8000
+#define MUSBFSH_TXCSR_DMAENAB		0x1000
+#define MUSBFSH_TXCSR_FRCDATATOG	0x0800
+#define MUSBFSH_TXCSR_DMAMODE		0x0400
+#define MUSBFSH_TXCSR_CLRDATATOG	0x0040
+#define MUSBFSH_TXCSR_FLUSHFIFO		0x0008
+#define MUSBFSH_TXCSR_FIFONOTEMPTY	0x0002
+#define MUSBFSH_TXCSR_TXPKTRDY		0x0001
+
+/* TXCSR in Peripheral mode */
+#define MUSBFSH_TXCSR_P_ISO		0x4000
+#define MUSBFSH_TXCSR_P_INCOMPTX	0x0080
+#define MUSBFSH_TXCSR_P_SENTSTALL	0x0020
+#define MUSBFSH_TXCSR_P_SENDSTALL	0x0010
+#define MUSBFSH_TXCSR_P_UNDERRUN	0x0004
+
+/* TXCSR in Host mode */
+#define MUSBFSH_TXCSR_H_WR_DATATOGGLE	0x0200
+#define MUSBFSH_TXCSR_H_DATATOGGLE	0x0100
+#define MUSBFSH_TXCSR_H_NAKTIMEOUT	0x0080
+#define MUSBFSH_TXCSR_H_RXSTALL		0x0020
+#define MUSBFSH_TXCSR_H_ERROR		0x0004
+
+/* TXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSBFSH_TXCSR_P_WZC_BITS	\
+	(MUSBFSH_TXCSR_P_INCOMPTX | MUSBFSH_TXCSR_P_SENTSTALL \
+	| MUSBFSH_TXCSR_P_UNDERRUN | MUSBFSH_TXCSR_FIFONOTEMPTY)
+#define MUSBFSH_TXCSR_H_WZC_BITS	\
+	(MUSBFSH_TXCSR_H_NAKTIMEOUT | MUSBFSH_TXCSR_H_RXSTALL \
+	| MUSBFSH_TXCSR_H_ERROR | MUSBFSH_TXCSR_FIFONOTEMPTY)
+
+/* RXCSR in Peripheral and Host mode */
+#define MUSBFSH_RXCSR_AUTOCLEAR		0x8000
+#define MUSBFSH_RXCSR_DMAENAB		0x2000
+#define MUSBFSH_RXCSR_DISNYET		0x1000
+#define MUSBFSH_RXCSR_PID_ERR		0x1000
+#define MUSBFSH_RXCSR_DMAMODE		0x0800
+#define MUSBFSH_RXCSR_INCOMPRX		0x0100
+#define MUSBFSH_RXCSR_CLRDATATOG	0x0080
+#define MUSBFSH_RXCSR_FLUSHFIFO		0x0010
+#define MUSBFSH_RXCSR_DATAERROR		0x0008
+#define MUSBFSH_RXCSR_FIFOFULL		0x0002
+#define MUSBFSH_RXCSR_RXPKTRDY		0x0001
+
+/* RXCSR in Peripheral mode */
+#define MUSBFSH_RXCSR_P_ISO		0x4000
+#define MUSBFSH_RXCSR_P_SENTSTALL	0x0040
+#define MUSBFSH_RXCSR_P_SENDSTALL	0x0020
+#define MUSBFSH_RXCSR_P_OVERRUN		0x0004
+
+/* RXCSR in Host mode */
+#define MUSBFSH_RXCSR_H_AUTOREQ		0x4000
+#define MUSBFSH_RXCSR_H_WR_DATATOGGLE	0x0400
+#define MUSBFSH_RXCSR_H_DATATOGGLE	0x0200
+#define MUSBFSH_RXCSR_H_RXSTALL		0x0040
+#define MUSBFSH_RXCSR_H_REQPKT		0x0020
+#define MUSBFSH_RXCSR_H_ERROR		0x0004
+
+/* RXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSBFSH_RXCSR_P_WZC_BITS	\
+	(MUSBFSH_RXCSR_P_SENTSTALL | MUSBFSH_RXCSR_P_OVERRUN \
+	| MUSBFSH_RXCSR_RXPKTRDY)
+#define MUSBFSH_RXCSR_H_WZC_BITS	\
+	(MUSBFSH_RXCSR_H_RXSTALL | MUSBFSH_RXCSR_H_ERROR \
+	| MUSBFSH_RXCSR_DATAERROR | MUSBFSH_RXCSR_RXPKTRDY)
+
+/* HUBADDR */
+#define MUSBFSH_HUBADDR_MULTI_TT	0x80
+
+/*
+ * Common USB registers
+ */
+
+#define MUSBFSH_FADDR		0x00	/* 8-bit */
+#define MUSBFSH_POWER		0x01	/* 8-bit */
+
+#define MUSBFSH_INTRTX		0x02	/* 16-bit */
+#define MUSBFSH_INTRRX		0x04
+#define MUSBFSH_INTRTXE		0x06
+#define MUSBFSH_INTRRXE		0x08
+#define MUSBFSH_INTRUSB		0x0A	/* 8 bit */
+#define MUSBFSH_INTRUSBE	0x0B	/* 8 bit */
+#define MUSBFSH_FRAME		0x0C
+#define MUSBFSH_INDEX		0x0E	/* 8 bit */
+#define MUSBFSH_TESTMODE	0x0F	/* 8 bit */
+
+/* Get offset for a given FIFO from musbfsh->mregs */
+#define MUSBFSH_FIFO_OFFSET(epnum)	(0x20 + ((epnum) * 4))
+
+/*
+ * Additional Control Registers
+ */
+
+#define MUSBFSH_DEVCTL		0x60	/* 8 bit */
+
+/* These are always controlled through the INDEX register */
+#define MUSBFSH_TXFIFOSZ	0x62	/* 8-bit (see masks) */
+#define MUSBFSH_RXFIFOSZ	0x63	/* 8-bit (see masks) */
+#define MUSBFSH_TXFIFOADD	0x64	/* 16-bit offset shifted right 3 */
+#define MUSBFSH_RXFIFOADD	0x66	/* 16-bit offset shifted right 3 */
+
+#define MUSBFSH_ULPI_REG_DATA  0x74 /* 8 bit */
+
+#define MUSBFSH_EPINFO		0x78	/* 8 bit */
+#define MUSBFSH_RAMINFO		0x79	/* 8 bit */
+#define MUSBFSH_LINKINFO	0x7a	/* 8 bit */
+#define MUSBFSH_VPLEN		0x7b	/* 8 bit */
+#define MUSBFSH_HS_EOF1		0x7c	/* 8 bit */
+#define MUSBFSH_FS_EOF1		0x7d	/* 8 bit */
+#define MUSBFSH_LS_EOF1		0x7e	/* 8 bit */
+
+#define MUSBFSH_RXTOG		0x80	/* 16 bit */
+#define MUSBFSH_RXTOGEN		0x82	/* 16 bit */
+#define MUSBFSH_TXTOG		0x84	/* 16 bit */
+#define MUSBFSH_TXTOGEN		0x86	/* 16 bit */
+
+/* Offsets to endpoint registers */
+#define MUSBFSH_TXMAXP		0x00
+#define MUSBFSH_TXCSR		0x02
+#define MUSBFSH_CSR0		MUSBFSH_TXCSR	/* Re-used for EP0 */
+#define MUSBFSH_RXMAXP		0x04
+#define MUSBFSH_RXCSR		0x06
+#define MUSBFSH_RXCOUNT		0x08
+#define MUSBFSH_COUNT0		MUSBFSH_RXCOUNT	/* Re-used for EP0 */
+#define MUSBFSH_TXTYPE		0x0A
+#define MUSBFSH_TYPE0		MUSBFSH_TXTYPE	/* Re-used for EP0 */
+#define MUSBFSH_TXINTERVAL	0x0B
+#define MUSBFSH_NAKLIMIT0	MUSBFSH_TXINTERVAL	/* Re-used for EP0 */
+#define MUSBFSH_RXTYPE		0x0C
+#define MUSBFSH_RXINTERVAL	0x0D
+#define MUSBFSH_FIFOSIZE	0x0F
+#define MUSBFSH_CONFIGDATA	MUSBFSH_FIFOSIZE	/* Re-used for EP0 */
+
+/* Offsets to endpoint registers in indexed model (using INDEX register) */
+#define MUSBFSH_INDEXED_OFFSET(_epnum, _offset)	\
+	(0x10 + (_offset))
+
+/* Offsets to endpoint registers in flat models */
+#define MUSBFSH_FLAT_OFFSET(_epnum, _offset)	\
+	(0x100 + (0x10*(_epnum)) + (_offset))
+
+#define MUSBFSH_TXCSR_MODE	0x2000
+
+/* "bus control"/target registers, for host side multipoint (external hubs) */
+#define MUSBFSH_TXFUNCADDR	0x0480
+#define MUSBFSH_TXHUBADDR	0x0482
+
+#define MUSBFSH_RXFUNCADDR	0x0484
+#define MUSBFSH_RXHUBADDR	0x0486
+
+#define MUSBFSH_BUSCTL_OFFSET(_epnum, _offset) \
+	(0x80 + (8*(_epnum)) + (_offset))
+
+static inline void musbfsh_write_txfifosz(void __iomem *mbase, u8 c_size)
+{
+	musbfsh_writeb(mbase, MUSBFSH_TXFIFOSZ, c_size);
+}
+
+static inline void musbfsh_write_txfifoadd(void __iomem *mbase, u16 c_off)
+{
+	musbfsh_writew(mbase, MUSBFSH_TXFIFOADD, c_off);
+}
+
+static inline void musbfsh_write_rxfifosz(void __iomem *mbase, u8 c_size)
+{
+	musbfsh_writeb(mbase, MUSBFSH_RXFIFOSZ, c_size);
+}
+
+static inline void musbfsh_write_rxfifoadd(void __iomem *mbase, u16 c_off)
+{
+	musbfsh_writew(mbase, MUSBFSH_RXFIFOADD, c_off);
+}
+
+static inline u8 musbfsh_read_txfifosz(void __iomem *mbase)
+{
+	return musbfsh_readb(mbase, MUSBFSH_TXFIFOSZ);
+}
+
+static inline u16 musbfsh_read_txfifoadd(void __iomem *mbase)
+{
+	return musbfsh_readw(mbase, MUSBFSH_TXFIFOADD);
+}
+
+static inline u8 musbfsh_read_rxfifosz(void __iomem *mbase)
+{
+	return musbfsh_readb(mbase, MUSBFSH_RXFIFOSZ);
+}
+
+static inline u16 musbfsh_read_rxfifoadd(void __iomem *mbase)
+{
+	return musbfsh_readw(mbase, MUSBFSH_RXFIFOADD);
+}
+
+static inline u8 musbfsh_read_configdata(void __iomem *mbase)
+{
+	musbfsh_writeb(mbase, MUSBFSH_INDEX, 0);
+	return musbfsh_readb(mbase, 0x10 + MUSBFSH_CONFIGDATA);
+}
+
+static inline void __iomem *musbfsh_read_target_reg_base(u8 i,
+							 void __iomem *mbase)
+{
+	void __iomem *tmp_base;
+
+	tmp_base = MUSBFSH_BUSCTL_OFFSET(i, 0) + mbase;
+	return tmp_base;
+}
+
+static inline void musbfsh_write_rxfunaddr(void __iomem *mbase, u8 epnum,
+					   u8 qh_addr_reg)
+{
+	musbfsh_writew(mbase, MUSBFSH_RXFUNCADDR + 8 * epnum, qh_addr_reg);
+}
+
+static inline void musbfsh_write_rxhubaddr(void __iomem *mbase, u8 epnum,
+					   u8 qh_h_addr_reg)
+{
+	u16 rx_hub_port_addr = musbfsh_readw(mbase,
+					     MUSBFSH_RXHUBADDR + 8 * epnum);
+	rx_hub_port_addr &= 0xff00;
+	rx_hub_port_addr |= qh_h_addr_reg;
+	musbfsh_writew(mbase, MUSBFSH_RXHUBADDR + 8 * epnum, rx_hub_port_addr);
+}
+
+static inline void musbfsh_write_rxhubport(void __iomem *mbase, u8 epnum,
+					   u8 qh_h_port_reg)
+{
+	u16 rx_hub_port_addr = musbfsh_readw(mbase,
+					     MUSBFSH_RXHUBADDR + 8 * epnum);
+	u16 rx_port_addr = (u16) qh_h_port_reg;
+
+	rx_hub_port_addr &= 0x00ff;
+	rx_hub_port_addr |= (rx_port_addr << 8);
+	musbfsh_writew(mbase, MUSBFSH_RXHUBADDR + 8 * epnum, rx_hub_port_addr);
+}
+
+static inline void musbfsh_write_txfunaddr(void __iomem *mbase, u8 epnum,
+					   u8 qh_addr_reg)
+{
+	musbfsh_writew(mbase, MUSBFSH_TXFUNCADDR + 8 * epnum, qh_addr_reg);
+}
+
+static inline void musbfsh_write_txhubaddr(void __iomem *mbase, u8 epnum,
+					   u8 qh_h_addr_reg)
+{
+	u16 tx_hub_port_addr = musbfsh_readw(mbase,
+					     MUSBFSH_TXHUBADDR + 8 * epnum);
+	tx_hub_port_addr &= 0xff00;
+	tx_hub_port_addr |= qh_h_addr_reg;
+	musbfsh_writew(mbase, MUSBFSH_TXHUBADDR + 8 * epnum, tx_hub_port_addr);
+}
+
+static inline void musbfsh_write_txhubport(void __iomem *mbase, u8 epnum,
+					   u8 qh_h_port_reg)
+{
+	u16 tx_hub_port_addr = musbfsh_readw(mbase,
+					     MUSBFSH_TXHUBADDR + 8 * epnum);
+	u16 tx_port_addr = (u16) qh_h_port_reg;
+
+	tx_hub_port_addr &= 0x00ff;
+	tx_hub_port_addr |= (tx_port_addr << 8);
+	musbfsh_writew(mbase, MUSBFSH_TXHUBADDR + 8 * epnum, tx_hub_port_addr);
+}
+
+#endif				/* __MUSBFSH_REGS_H__ */
diff --git a/drivers/misc/mediatek/usb11/musbfsh_virthub.c b/drivers/misc/mediatek/usb11/musbfsh_virthub.c
new file mode 100644
index 0000000..8db9bbd
--- /dev/null
+++ b/drivers/misc/mediatek/usb11/musbfsh_virthub.c
@@ -0,0 +1,616 @@
+/*
+ * MUSB OTG driver virtual root hub support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * Copyright 2015 Mediatek Inc.
+ *	Marvin Lin <marvin.lin@mediatek.com>
+ *	Arvin Wang <arvin.wang@mediatek.com>
+ *	Vincent Fan <vincent.fan@mediatek.com>
+ *	Bryant Lu <bryant.lu@mediatek.com>
+ *	Yu-Chang Wang <yu-chang.wang@mediatek.com>
+ *	Macpaul Lin <macpaul.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+
+#include <asm/unaligned.h>
+
+#include "musbfsh_core.h"
+#include "musbfsh_host.h"
+#include "usb.h"
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+#include "musbfsh_mt65xx.h"
+#endif
+
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+struct my_attr resistor_control_attr = {
+	.attr.name = "resistor_control",
+	.attr.mode = 0644,
+#ifdef MTK_ICUSB_RESISTOR_CONTROL
+	.value = 1
+#else
+	.value = 0
+#endif
+};
+
+struct my_attr skip_port_pm_attr = {
+	.attr.name = "skip_port_pm",
+	.attr.mode = 0644,
+#ifdef MTK_ICUSB_SKIP_PORT_PM
+	.value = 1
+#else
+	.value = 0
+#endif
+};
+
+#endif
+
+static void musbfsh_port_suspend(struct musbfsh *musbfsh, bool do_suspend)
+{
+	u8 power;
+	u8 intrusbe;
+	u8 intrusb;
+	void __iomem *mbase = musbfsh->mregs;
+	int retries = 0;
+
+
+	/* MYDBG("cpuid:%d\n", smp_processor_id()); */
+
+	/* NOTE:  this doesn't necessarily put PHY into low power mode,
+	 * turning off its clock; that's a function of PHY integration and
+	 * MUSBFSH_POWER_ENSUSPEND.  PHY may need a clock (sigh) to detect
+	 * SE0 changing to connect (J) or wakeup (K) states.
+	 */
+	if (do_suspend) {
+
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+		if (musbfsh_skip_port_suspend) {
+			MYDBG("\n");
+			musbfsh->port1_status |= USB_PORT_STAT_SUSPEND;
+			return;
+		}
+#endif
+
+		/* clean MUSBFSH_INTR_SOF in MUSBFSH_INTRUSBE */
+		intrusbe = musbfsh_readb(mbase, MUSBFSH_INTRUSBE);
+		intrusbe &= ~MUSBFSH_INTR_SOF;
+		musbfsh_writeb(mbase, MUSBFSH_INTRUSBE, intrusbe);
+		mb(); /* flush POWER and PHY setting immediately */
+		/* clean MUSBFSH_INTR_SOF in MUSBFSH_INTRUSB */
+		intrusb = musbfsh_readb(mbase, MUSBFSH_INTRUSB);
+		intrusb |= MUSBFSH_INTR_SOF;
+		musbfsh_writeb(mbase, MUSBFSH_INTRUSB, intrusb);
+		mb(); /* flush POWER and PHY setting immediately */
+		retries = 10000;
+		intrusb = musbfsh_readb(mbase, MUSBFSH_INTRUSB);
+		while (!(intrusb & MUSBFSH_INTR_SOF)) {
+			intrusb = musbfsh_readb(mbase, MUSBFSH_INTRUSB);
+			if (retries-- < 1) {
+				MYDBG("\n");
+				break;
+			}
+		}
+
+		/* delay 10 us */
+		udelay(10);
+
+		/* set MUSBFSH_POWER_SUSPENDM in MUSBFSH_POWER_SUSPENDM */
+		power = musbfsh_readb(mbase, MUSBFSH_POWER);
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+#if defined(CONFIG_PM_RUNTIME) &&  defined(USB11_REMOTE_IRQ_NON_AUTO_MASK)
+		disable_remote_wake_up();
+#endif
+#endif
+
+#ifdef MTK_USB_RUNTIME_SUPPORT
+		/*
+		 * mask remote wake up IRQ between port suspend and bus suspend.
+		 * hub.c will call set_port_feature first then
+		 * usb_set_device_state, so if EINT comes between them,
+		 * resume flow may see device state without USB_STATE_SUSPENDED
+		 * and do nothing.
+		 * So we postpone remote wake up IRQ until the suspend flow
+		 * is all done (when bus_suspend is called). Since suspend flow
+		 * may be interrupted (root hub is suspended, but not host
+		 * controller), so we also unmaks EINT when resume is done.
+		 */
+		mt_eint_mask(CUST_EINT_MT6280_USB_WAKEUP_NUM);
+#endif
+
+		retries = 10000;
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+		if (skip_port_pm_attr.value) {
+			MYDBG("skip hw operation for port suspend\n");
+		} else {
+			power &= ~MUSBFSH_POWER_RESUME;
+			power |= MUSBFSH_POWER_SUSPENDM;
+			musbfsh_writeb(mbase, MUSBFSH_POWER, power);
+
+			/* Needed for OPT A tests */
+			power = musbfsh_readb(mbase, MUSBFSH_POWER);
+			while (power & MUSBFSH_POWER_SUSPENDM) {
+				power = musbfsh_readb(mbase, MUSBFSH_POWER);
+				if (retries-- < 1)
+					break;
+			}
+		}
+#else
+		power &= ~MUSBFSH_POWER_RESUME;
+		power |= MUSBFSH_POWER_SUSPENDM;
+		musbfsh_writeb(mbase, MUSBFSH_POWER, power);
+
+		/* Needed for OPT A tests */
+		power = musbfsh_readb(mbase, MUSBFSH_POWER);
+		while (power & MUSBFSH_POWER_SUSPENDM) {
+			power = musbfsh_readb(mbase, MUSBFSH_POWER);
+			if (retries-- < 1) {
+				MYDBG("\n");
+				break;
+			}
+		}
+#endif
+		mb(); /* flush POWER and PHY setting immediately */
+		WARNING("Root port suspended, power 0x%02x\n", power);
+
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+#if defined(CONFIG_PM_RUNTIME)
+		disable_usb11_clk();
+#endif
+#endif
+		musbfsh->port1_status |= USB_PORT_STAT_SUSPEND;
+	} else {
+
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+		if (musbfsh_skip_port_resume) {
+			MYDBG("\n");
+			request_wakeup_md_timeout(0, 0);
+			musbfsh->port1_status |= MUSBFSH_PORT_STAT_RESUME;
+			musbfsh->rh_timer = jiffies + msecs_to_jiffies(20);
+			return;
+		}
+#if defined(CONFIG_PM_RUNTIME)
+		enable_usb11_clk();
+#endif
+#endif
+
+		power = musbfsh_readb(mbase, MUSBFSH_POWER);
+		if (!(power & MUSBFSH_POWER_SUSPENDM)) {
+			WARNING("Root port resuming abort, power 0x%02x\n",
+				power);
+			if (power & MUSBFSH_POWER_RESUME)
+				goto finish;
+			else
+				return;
+		}
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+		request_wakeup_md_timeout(0, 0);
+#endif
+
+#ifdef MTK_USB_RUNTIME_SUPPORT
+		/* ERR("EINT to wake up MD for resume\n"); */
+		/* request_wakeup_md_timeout(0, 0); //wx, wakeup MD first */
+#endif
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+		if (skip_port_pm_attr.value) {
+			MYDBG("skip hw operation for port resume\n");
+		} else {
+			power &= ~MUSBFSH_POWER_SUSPENDM;
+			power |= MUSBFSH_POWER_RESUME;
+			musbfsh_writeb(mbase, MUSBFSH_POWER, power);
+		}
+#else
+		power &= ~MUSBFSH_POWER_SUSPENDM;
+		power |= MUSBFSH_POWER_RESUME;
+		musbfsh_writeb(mbase, MUSBFSH_POWER, power);
+#endif
+		mb();  /* flush POWER and PHY setting immediately */
+		WARNING("Root port resuming, power 0x%02x\n", power);
+finish:
+		/* later, GetPortStatus will stop RESUME signaling */
+		musbfsh->port1_status |= MUSBFSH_PORT_STAT_RESUME;
+		musbfsh->rh_timer = jiffies + msecs_to_jiffies(20);
+	}
+}
+
+static void musbfsh_port_reset(struct musbfsh *musbfsh, bool do_reset)
+{
+	u8 power;
+	void __iomem *mbase = musbfsh->mregs;
+
+	/* NOTE:  caller guarantees it will turn off the reset when
+	 * the appropriate amount of time has passed
+	 */
+	power = musbfsh_readb(mbase, MUSBFSH_POWER);
+	WARNING("reset=%d power=0x%x\n", do_reset, power);
+	if (do_reset) {
+		if (power & MUSBFSH_POWER_SUSPENDM) {
+			WARNING("reset a suspended device\n");
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+			request_wakeup_md_timeout(0, 0);
+#endif
+
+#ifdef MTK_USB_RUNTIME_SUPPORT
+			/* ERR("EINT to wake up MD for reset\n"); */
+			/* wx, we may have to reset a suspended MD */
+			/* request_wakeup_md_timeout(0, 0); */
+#endif
+			musbfsh_writeb(mbase,
+				       MUSBFSH_POWER, power |
+				       MUSBFSH_POWER_RESUME);
+			mdelay(20);
+			musbfsh_writeb(mbase, MUSBFSH_POWER,
+				       power & ~MUSBFSH_POWER_RESUME);
+		}
+
+		/*
+		 * If RESUME is set, we must make sure it stays minimum 20 ms.
+		 * Then we must clear RESUME and wait a bit to let musb start
+		 * generating SOFs. If we don't do this, OPT HS A 6.8 tests
+		 * fail with "Error! Did not receive an SOF before suspend
+		 * detected".
+		 */
+		if (power & MUSBFSH_POWER_RESUME) {
+			WARNING("reset a resuming device\n");
+			while (time_before(jiffies, musbfsh->rh_timer))
+				mdelay(1);
+			/* stop the resume signal */
+			musbfsh_writeb(mbase, MUSBFSH_POWER,
+				       power & ~MUSBFSH_POWER_RESUME);
+			mdelay(1);
+		}
+
+		musbfsh->ignore_disconnect = true;
+		power &= 0xf0;
+		musbfsh_writeb(mbase, MUSBFSH_POWER,
+			       power | MUSBFSH_POWER_RESET);
+		mb();  /* flush POWER and PHY setting immediately */
+		musbfsh->port1_status |= USB_PORT_STAT_RESET;
+		musbfsh->port1_status &= ~USB_PORT_STAT_ENABLE;
+		musbfsh->rh_timer = jiffies + msecs_to_jiffies(50);
+	} else {
+		INFO("Root port reset stopped\n");
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+
+		if (resistor_control_attr.value) {
+			/* improve signal quality, from Dingjun */
+
+			/* original flow from SS5 */
+			USB11PHY_SET8(U1PHTCR2,
+				      force_usb11_dm_rpd | force_usb11_dp_rpd);
+
+			/*
+			 * disconnect host port's pull down resistors
+			 * on D+ and D-
+			 */
+			USB11PHY_CLR8(U1PHTCR2,
+				      RG_USB11_DM_RPD | RG_USB11_DP_RPD);
+
+			/*
+			 * Tell MAC there still is a device attached,
+			 * ohterwise we will get disconnect interrupt
+			 */
+			USB11PHY_SET8(U1PHTCR2,
+				      force_usb11_dp_rpu | RG_USB11_DP_RPU);
+
+			/* force */
+			USB11PHY_SET8(0x6a, 0x20 | 0x10);
+			/* RG */
+			/*
+			 * disconnect host port's pull down resistors
+			 * on D+ and D-
+			 */
+			USB11PHY_CLR8(0x68, 0x80 | 0x40);
+
+			/*
+			 * Tell MAC there still is a device attached,
+			 * ohterwise we will get disconnect interrupt.
+			 */
+			/* USB11PHY_SET8(U1PHTCR2,
+			 *		 force_usb11_dp_rpu |
+			 *		 RG_USB11_DP_RPU);
+			 */
+
+			MYDBG("USB1.1 PHY special config for IC-USB\n");
+		} else {
+			MYDBG("");
+		}
+#endif
+		musbfsh_writeb(mbase,
+			       MUSBFSH_POWER, power & ~MUSBFSH_POWER_RESET);
+
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+		if (resistor_control_attr.value)
+			USB11PHY_CLR8(0x6a, 0x20 | 0x10);
+		else
+			MYDBG("");
+#endif
+		mb(); /* flush POWER and PHY setting immediately */
+		musbfsh->ignore_disconnect = false;
+
+		power = musbfsh_readb(mbase, MUSBFSH_POWER);
+		if (power & MUSBFSH_POWER_HSMODE) {
+			INFO("high-speed device connected\n");
+			musbfsh->port1_status |= USB_PORT_STAT_HIGH_SPEED;
+		}
+		musbfsh->port1_status &= ~USB_PORT_STAT_RESET;
+		musbfsh->port1_status |=
+			USB_PORT_STAT_ENABLE | (USB_PORT_STAT_C_RESET << 16) |
+			(USB_PORT_STAT_C_ENABLE << 16);
+
+		/* call back func to notify the hub thread the state of hub! */
+		usb_hcd_poll_rh_status(musbfsh_to_hcd(musbfsh));
+
+		musbfsh->vbuserr_retry = VBUSERR_RETRY_COUNT;
+	}
+}
+
+void musbfsh_root_disconnect(struct musbfsh *musbfsh)
+{
+	INFO("%s++\r\n", __func__);
+	musbfsh->port1_status =
+		USB_PORT_STAT_POWER | (USB_PORT_STAT_C_CONNECTION << 16);
+
+	usb_hcd_poll_rh_status(musbfsh_to_hcd(musbfsh));
+	musbfsh->is_active = 0;
+}
+
+
+/*---------------------------------------------------------------------*/
+
+/* Caller may or may not hold musbfsh->lock */
+int musbfsh_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct musbfsh *musbfsh = hcd_to_musbfsh(hcd);
+	int retval = 0;
+
+	INFO("musbfsh_hub_status_data++\r\n");
+	/* called in_irq() via usb_hcd_poll_rh_status() */
+	if (musbfsh->port1_status & 0xffff0000) {
+		*buf = 0x02;
+		retval = 1;
+	}
+	return retval;
+}
+
+int musbfsh_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+			u16 wIndex, char *buf, u16 wLength)
+{
+	struct musbfsh *musbfsh = hcd_to_musbfsh(hcd);
+	u32 temp;
+	int retval = 0;
+	unsigned long flags;
+
+	INFO("%s++, typeReq=0x%x, wValue=0x%x, wIndex=0x%x\r\n",
+	     __func__, typeReq, wValue, wIndex);
+	spin_lock_irqsave(&musbfsh->lock, flags);
+
+	if (unlikely(!HCD_HW_ACCESSIBLE(hcd))) {
+		spin_unlock_irqrestore(&musbfsh->lock, flags);
+		return -ESHUTDOWN;
+	}
+
+	/* hub features:  always zero, setting is a NOP
+	 * port features: reported, sometimes updated when host is active
+	 * no indicators
+	 */
+	switch (typeReq) {
+	case ClearHubFeature:
+	case SetHubFeature:
+		switch (wValue) {
+		case C_HUB_OVER_CURRENT:
+		case C_HUB_LOCAL_POWER:
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case ClearPortFeature:
+		/* wIndex indicate the port number, here it is should be 1 */
+		if ((wIndex & 0xff) != 1)
+			goto error;
+
+		switch (wValue) {
+		case USB_PORT_FEAT_ENABLE:
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			/* here is clearing the suspend */
+			musbfsh_port_suspend(musbfsh, false);
+			break;
+		case USB_PORT_FEAT_POWER:
+#ifndef MTK_ALPS_BOX_SUPPORT
+			/* only power off the vbus */
+			musbfsh_set_vbus(musbfsh, 0);
+#else
+			/* only power off the vbus */
+			musbfsh_platform_set_vbus(musbfsh, 0);
+#endif
+			break;
+		case USB_PORT_FEAT_C_CONNECTION:
+		case USB_PORT_FEAT_C_ENABLE:
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+		case USB_PORT_FEAT_C_RESET:
+		case USB_PORT_FEAT_C_SUSPEND:
+			break;
+		default:
+			goto error;
+		}
+		INFO("clear feature %d\n", wValue);
+		musbfsh->port1_status &= ~(1 << wValue);
+		break;
+	case GetHubDescriptor:
+		{
+			struct usb_hub_descriptor *desc = (void *)buf;
+
+			desc->bDescLength = 9;
+			desc->bDescriptorType = 0x29;
+			desc->bNbrPorts = 1;
+
+			/* 0x0001: per-port power switching */
+			/* 0x0010: no overcurrent reporting */
+			desc->wHubCharacteristics =
+				cpu_to_le16(0x0001 | 0x0010);
+			/* msec/2 */
+			desc->bPwrOn2PwrGood = 5;
+			desc->bHubContrCurrent = 0;
+
+			/* workaround bogus struct definition */
+			desc->u.hs.DeviceRemovable[0] = 0x02;	/* port 1 */
+			desc->u.hs.DeviceRemovable[1] = 0xff;
+		}
+		break;
+	case GetHubStatus:
+		temp = 0;
+		*(__le32 *)buf = cpu_to_le32(temp);
+		break;
+	case GetPortStatus:
+		if (wIndex != 1)
+			goto error;
+
+		/* finish RESET signaling? */
+		/* if FALSE: stop the reset because the timeout of reset. */
+		if ((musbfsh->port1_status & USB_PORT_STAT_RESET)
+		    && time_after_eq(jiffies, musbfsh->rh_timer))
+			musbfsh_port_reset(musbfsh, false);
+
+		/* finish RESUME signaling? */
+		if ((musbfsh->port1_status & MUSBFSH_PORT_STAT_RESUME)
+		    && time_after_eq(jiffies, musbfsh->rh_timer)) {
+			u8 pwr;
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+			if (!musbfsh_skip_port_resume) {
+				pwr = musbfsh_readb(musbfsh->mregs,
+						      MUSBFSH_POWER);
+				pwr &= ~MUSBFSH_POWER_RESUME;
+				WARNING("Root port resume stopped\n");
+				WARNING("power 0x%02x\n", pwr);
+				musbfsh_writeb(musbfsh->mregs, MUSBFSH_POWER,
+					       pwr);
+#if defined(CONFIG_PM_RUNTIME) && defined(USB11_REMOTE_IRQ_NON_AUTO_MASK)
+				enable_remote_wake_up();
+#endif
+			} else {
+				MYDBG("\n");
+			}
+#else
+
+			pwr = musbfsh_readb(musbfsh->mregs, MUSBFSH_POWER);
+			pwr &= ~MUSBFSH_POWER_RESUME;
+			WARNING("Root port resume stopped, power 0x%02x\n",
+				pwr);
+			musbfsh_writeb(musbfsh->mregs, MUSBFSH_POWER, pwr);
+#endif
+
+#ifdef MTK_USB_RUNTIME_SUPPORT
+			/* mt_eint_unmask(CUST_EINT_MT6280_USB_WAKEUP_NUM); */
+#endif
+
+			/*
+			 * ISSUE: DaVinci (RTL 1.300) disconnects after
+			 * resume of high speed peripherals (but not full
+			 * speed ones).
+			 */
+
+			musbfsh->is_active = 1;
+			musbfsh->port1_status &= ~(USB_PORT_STAT_SUSPEND
+						   | MUSBFSH_PORT_STAT_RESUME);
+			musbfsh->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
+			usb_hcd_poll_rh_status(musbfsh_to_hcd(musbfsh));
+		}
+
+		put_unaligned(cpu_to_le32(musbfsh->port1_status &
+					  ~MUSBFSH_PORT_STAT_RESUME),
+					  (__le32 *)buf);
+
+		/* port change status is more interesting */
+		WARNING("port status %08x,devctl=0x%x\n", musbfsh->port1_status,
+			musbfsh_readb(musbfsh->mregs, MUSBFSH_DEVCTL));
+		break;
+	case SetPortFeature:
+		if ((wIndex & 0xff) != 1)
+			goto error;
+
+		switch (wValue) {
+		case USB_PORT_FEAT_POWER:
+			/* NOTE: this controller has a strange state machine
+			 * that involves "requesting sessions" according to
+			 * magic side effects from incompletely-described
+			 * rules about startup...
+			 *
+			 * This call is what really starts the host mode; be
+			 * very careful about side effects if you reorder any
+			 * initialization logic, e.g. for OTG, or change any
+			 * logic relating to VBUS power-up.
+			 */
+			INFO("musbfsh_start is called in hub control\r\n");
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+			if (skip_mac_init_attr.value)
+				MYDBG("");
+			else
+				musbfsh_start(musbfsh);
+#else
+			musbfsh_start(musbfsh);
+#endif
+			break;
+		case USB_PORT_FEAT_RESET:
+			/* enable the reset, but not finish */
+			musbfsh_port_reset(musbfsh, true);
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			musbfsh_port_suspend(musbfsh, true);
+			break;
+		case USB_PORT_FEAT_TEST:
+			break;
+		default:
+			goto error;
+		}
+		INFO("set feature %d\n", wValue);
+		musbfsh->port1_status |= 1 << wValue;
+		break;
+
+	default:
+error:
+		/* "protocol stall" on error */
+		retval = -EPIPE;
+	}
+	spin_unlock_irqrestore(&musbfsh->lock, flags);
+	return retval;
+}
diff --git a/drivers/misc/mediatek/xo/Makefile b/drivers/misc/mediatek/xo/Makefile
new file mode 100644
index 0000000..551e3ff
--- /dev/null
+++ b/drivers/misc/mediatek/xo/Makefile
@@ -0,0 +1,14 @@
+#
+# Copyright (C) 2015 MediaTek Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+obj-y := xo.o
+ccflags-y += -I$(srctree)/include/linux/
diff --git a/drivers/misc/mediatek/xo/xo.c b/drivers/misc/mediatek/xo/xo.c
new file mode 100644
index 0000000..df4d3dd
--- /dev/null
+++ b/drivers/misc/mediatek/xo/xo.c
@@ -0,0 +1,1138 @@
+/*
+* Copyright (C) 2016 MediaTek Inc.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+* See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/suspend.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/mfd/mt6397/rtc_misc.h>
+#include <dt-bindings/clock/mt8167-clk.h>
+
+#define BSI_BASE          (xo_inst->base)
+#define BSI_CON	          0x0000
+#define BSI_WRDAT_CON     0x0004
+#define BSI_WRDAT         0x0008
+#define BSI_RDCON         0x0c40
+#define BSI_RDADDR_CON    0x0c44
+#define BSI_RDADDR        0x0c48
+#define BSI_RDCS_CON      0x0c4c
+#define BSI_RDDAT         0x0c50
+
+#define BSI_WRITE_READY (1 << 31)
+#define BSI_READ_READY (1 << 31)
+#define BSI_READ_BIT (1 << 8)
+#define BITS(m, n) (~(BIT(m)-1) & ((BIT(n) - 1) | BIT(n)))
+
+#define READ_REGISTER_UINT32(reg)          readl((void __iomem *)reg)
+#define WRITE_REGISTER_UINT32(reg, val)     writel((val), (void __iomem *)(reg))
+
+#define XOCAP_NVRAM_FILE_NAME   "/data/nvram/APCFG/APRDEB/XOCAP"
+
+#define KEEP_LDOH
+
+struct xo_dev {
+	struct device *dev;
+	void __iomem *base;
+	void __iomem *top_rtc32k;
+	struct clk *bsi_clk;
+	struct clk *rg_bsi_clk;
+	struct clk *bsi_sel_clk;
+	struct clk *top_26m_clk;
+	uint32_t cur_xo_capid;
+	uint32_t ori_xo_capid;
+	bool has_ext_crystal;
+	bool crystal_check_done;
+};
+
+void __iomem *pxo_efuse;
+unsigned long xo_data;
+struct timer_list xocap_timer;
+struct work_struct xocap_work;
+static struct xo_dev *xo_inst;
+static const struct of_device_id apxo_of_ids[] = {
+	{ .compatible = "mediatek,mt8167-xo", },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, apxo_of_ids);
+
+/*----------------------------------------------------------------------------*/
+/*!
+* \brief Utility function for reading data from files on NVRAM-FS
+*
+* \param[in]
+*           filename
+*           len
+*           offset
+* \param[out]
+*           buf
+* \return
+*           actual length of data being read
+*/
+/*----------------------------------------------------------------------------*/
+static int nvram_read(char *filename, char *buf, ssize_t len, int offset)
+{
+#if 1
+	struct file *fd;
+	int retLen = -1;
+	loff_t pos;
+	char __user *p;
+
+	mm_segment_t old_fs = get_fs();
+
+	set_fs(KERNEL_DS);
+
+	fd = filp_open(filename, O_RDONLY, 0644);
+
+	if (IS_ERR(fd)) {
+		pr_err("[MT6620][nvram_read] : failed to open!!\n");
+		return -1;
+	}
+
+	do {
+		if (fd->f_op == NULL) {
+			pr_err("[MT6620][nvram_read] : f_op is NULL!!\n");
+			break;
+		}
+
+		if (fd->f_pos != offset) {
+			if (fd->f_op->llseek) {
+				if (fd->f_op->llseek(fd, offset, 0) != offset) {
+					pr_err("[MT6620][nvram_read] : failed to seek!!\n");
+					break;
+				}
+			} else {
+				fd->f_pos = offset;
+			}
+		}
+
+		p = (__force char __user *)buf;
+		pos = (loff_t)offset;
+		retLen = __vfs_read(fd, p, len, &pos);
+		if (retLen < 0)
+			pr_err("[MT6620][nvram_read] : read failed!! Error code: %d\n", retLen);
+
+	} while (0);
+
+	filp_close(fd, NULL);
+
+	set_fs(old_fs);
+
+	return retLen;
+
+#else /* !CFG_SUPPORT_NVRAM */
+
+	return -EIO;
+
+#endif
+}
+
+/*----------------------------------------------------------------------------*/
+/*!
+* \brief Utility function for writing data to files on NVRAM-FS
+*
+* \param[in]
+*           filename
+*           buf
+*           len
+*           offset
+* \return
+*           actual length of data being written
+*/
+/*----------------------------------------------------------------------------*/
+static int nvram_write(char *filename, char *buf, ssize_t len, int offset)
+{
+//#if CFG_SUPPORT_NVRAM
+#if 1
+	struct file *fd;
+	int retLen = -1;
+	loff_t pos;
+	char __user *p;
+
+	mm_segment_t old_fs = get_fs();
+
+	set_fs(KERNEL_DS);
+
+	fd = filp_open(filename, O_WRONLY | O_CREAT, 0644);
+
+	if (IS_ERR(fd)) {
+		pr_debug("[MT6620][nvram_write] : failed to open!!\n");
+		return -1;
+	}
+
+	do {
+		if (fd->f_op == NULL) {
+			pr_debug("[MT6620][nvram_write] : f_op is NULL!!\n");
+			break;
+		}
+		/* End of if */
+		if (fd->f_pos != offset) {
+			if (fd->f_op->llseek) {
+				if (fd->f_op->llseek(fd, offset, 0) != offset) {
+					pr_debug("[MT6620][nvram_write] : failed to seek!!\n");
+					break;
+				}
+			} else {
+				fd->f_pos = offset;
+			}
+		}
+
+		p = (__force char __user *)buf;
+		pos = (loff_t)offset;
+
+		retLen = vfs_write(fd, p, len, &pos);
+		if (retLen < 0)
+			pr_debug("[MT6620][nvram_write] : write failed!! Error code: %d\n", retLen);
+
+	} while (0);
+
+	filp_close(fd, NULL);
+
+	set_fs(old_fs);
+
+	return retLen;
+
+#else /* !CFG_SUPPORT_NVRAMS */
+
+	return -EIO;
+
+#endif
+}
+
+static uint32_t BSI_read(uint32_t rdaddr)
+{
+	uint32_t readaddr = BSI_READ_BIT | rdaddr;
+	uint32_t ret;
+
+	WRITE_REGISTER_UINT32(BSI_BASE + BSI_RDCON, 0x9f8b);
+	WRITE_REGISTER_UINT32(BSI_BASE + BSI_RDADDR_CON, 0x0902);
+	WRITE_REGISTER_UINT32(BSI_BASE + BSI_RDADDR, readaddr);
+	WRITE_REGISTER_UINT32(BSI_BASE + BSI_RDCS_CON, 0x0);
+	WRITE_REGISTER_UINT32(BSI_BASE + BSI_RDCON, 0x89f8b);
+
+	while (!(READ_REGISTER_UINT32(BSI_BASE + BSI_RDCON) & BSI_READ_READY))
+		pr_debug("wait bsi read done!\n");
+
+	ret = READ_REGISTER_UINT32(BSI_BASE + BSI_RDDAT) & 0x0000ffff;
+	pr_debug("BSI Read Done: value = 0x%x\n", ret);
+	return ret;
+}
+
+static void BSI_write(uint32_t wraddr, uint32_t wrdata)
+{
+	uint32_t wrdat;
+
+	WRITE_REGISTER_UINT32(BSI_BASE + BSI_WRDAT_CON, 0x1d00);
+	wrdat = (wraddr << 20) + wrdata;
+
+	pr_debug("BSI_write: wrdat = 0x%x\n", wrdat);
+	WRITE_REGISTER_UINT32(BSI_BASE + BSI_WRDAT, wrdat);
+	WRITE_REGISTER_UINT32(BSI_BASE + BSI_CON, 0x80401);
+	while (!(READ_REGISTER_UINT32(BSI_BASE + BSI_CON) & BSI_WRITE_READY))
+		pr_debug("wait bsi write done!\n");
+
+	pr_debug("BSI Write Done\n");
+}
+
+static void XO_trim_write(uint32_t cap_code)
+{
+	uint32_t wrdat = 0;
+	/* 0x09 [14:12] = cap_code[6:4] */
+	wrdat = BSI_read(0x09) & ~BITS(12, 14);
+	wrdat |= (cap_code & BITS(4, 6)) << 8;
+	BSI_write(0x09, wrdat);
+	/* 0x09 [10:4] = cap_code[6:0] */
+	wrdat = BSI_read(0x09) & ~BITS(4, 10);
+	wrdat |= (cap_code & BITS(0, 6)) << 4;
+	BSI_write(0x09, wrdat);
+	/* 0x01 [11:10] = 2'b11 */
+	BSI_write(0x01, 0xC00);
+	mdelay(10);
+	/* 0x01 [11:10] = 2'b01 */
+	BSI_write(0x01, 0x400);
+	/* 0x1f [5:3] =  cap_code[6:4] */
+	wrdat = BSI_read(0x1f) & ~BITS(3, 5);
+	wrdat |= (cap_code & BITS(4, 6)) >> 1;
+	BSI_write(0x1f, wrdat);
+	/* 0x1f [2:0] =  cap_code[6:4] */
+	wrdat = BSI_read(0x1f) & ~BITS(0, 2);
+	wrdat |= (cap_code & BITS(4, 6)) >> 4;
+	BSI_write(0x1f, wrdat);
+	/* 0x1e [15:12] =  cap_code[3:0] */
+	wrdat = BSI_read(0x1e) & ~BITS(12, 15);
+	wrdat |= (cap_code & BITS(0, 3)) << 12;
+	BSI_write(0x1e, wrdat);
+	/* 0x4b [5:3] =  cap_code[6:4] */
+	wrdat = BSI_read(0x4b) & ~BITS(3, 5);
+	wrdat |= (cap_code & BITS(4, 6)) >> 1;
+	BSI_write(0x4b, wrdat);
+	/* 0x4b [2:0] =  cap_code[6:4] */
+	wrdat = BSI_read(0x4b) & ~BITS(0, 2);
+	wrdat |= (cap_code & BITS(4, 6)) >> 4;
+	BSI_write(0x4b, wrdat);
+	/* 0x4a [15:12] =  cap_code[3:0] */
+	wrdat = BSI_read(0x4a) & ~BITS(12, 15);
+	wrdat |= (cap_code & BITS(0, 3)) << 12;
+	BSI_write(0x4a, wrdat);
+}
+
+static uint32_t XO_trim_read(void)
+{
+	uint32_t cap_code = 0;
+	/* cap_code[4:0] = 0x00 [15:11] */
+	cap_code = (BSI_read(0x00) & BITS(11, 15)) >> 11;
+	/* cap_code[6:5] = 0x01 [1:0] */
+	cap_code |= (BSI_read(0x01) & BITS(0, 1)) << 5;
+	return cap_code;
+}
+
+static void enable_xo_low_power_mode(void)
+{
+	uint32_t value = 0;
+
+	/* RG_DA_EN_XO_BG_MANVALUE = 1 */
+	value = BSI_read(0x03) | (1<<12);
+	BSI_write(0x03, value);
+	/* RG_DA_EN_XO_BG_MAN = 1 */
+	value = BSI_read(0x03) | (1<<13);
+	BSI_write(0x03, value);
+
+#if defined(KEEP_LDOH)
+	/* RG_DA_EN_XO_LDOH_MANVALUE = 1 */
+	value = BSI_read(0x03) | (1<<8);
+	BSI_write(0x03, value);
+	/* RG_DA_EN_XO_LDOH_MAN = 1 */
+	value = BSI_read(0x03) | (1<<9);
+	BSI_write(0x03, value);
+#endif
+	/* RG_DA_EN_XO_LDOL_MANVALUE = 1 */
+	value = BSI_read(0x03) | 0x1;
+	BSI_write(0x03, value);
+	/* RG_DA_EN_XO_LDOL_MAN = 1 */
+	value = BSI_read(0x03) | (1<<1);
+	BSI_write(0x03, value);
+	/* RG_DA_EN_XO_PRENMBUF_VALUE = 1 */
+	value = BSI_read(0x02) | (1<<6);
+	BSI_write(0x02, value);
+	/* RG_DA_EN_XO_PRENMBUF_MAN = 1 */
+	value = BSI_read(0x02) | (1<<7);
+	BSI_write(0x02, value);
+	/* RG_DA_EN_XO_PLLGP_BUF_MANVALUE = 1 */
+	value = BSI_read(0x34) | 0x1;
+	BSI_write(0x34, value);
+	/* RG_DA_EN_XO_PLLGP_BUF_MAN = 1 */
+	value = BSI_read(0x34) | (1<<1);
+	BSI_write(0x34, value);
+
+	/* RG_DA_EN_XO_VGTIELOW_MANVALUE=0 */
+	value = BSI_read(0x05) & 0xFEFF;
+	BSI_write(0x05, value);
+
+	/* RG_DA_EN_XO_VGTIELOW_MAN=1 */
+	value = BSI_read(0x05) | (1<<9);
+	BSI_write(0x05, value);
+
+	/* RG_DA_XO_LPM_BIAS1/2/3_MAN=1 */
+	value = BSI_read(0x06) | (1<<13);
+	BSI_write(0x06, value);
+	value = BSI_read(0x06) | (1<<11);
+	BSI_write(0x06, value);
+#if defined(KEEP_LDOH)
+	value = BSI_read(0x06) | (1<<9);
+	BSI_write(0x06, value);
+#endif
+	/* RG_DA_XO_LPM_BIAS1/2/3_MANVALUE=0 */
+	value = BSI_read(0x06) & ~BIT(12);
+	BSI_write(0x06, value);
+	value = BSI_read(0x06) & ~BIT(10);
+	BSI_write(0x06, value);
+#if defined(KEEP_LDOH)
+	value = BSI_read(0x06) & ~BIT(8);
+	BSI_write(0x06, value);
+#endif
+	/* bit 10 set 0 */
+	value = BSI_read(0x08) & 0xFBFF;
+	BSI_write(0x08, value);
+
+	/* DIG_CR_XO_04_L[9]:RG_XO_INT32K_NOR2LPM_TRIGGER = 1 */
+	value = BSI_read(0x08) | (1<<9);
+	BSI_write(0x08, value);
+	mdelay(5);
+	pr_notice("[xo] enable xo low power mode!\n");
+}
+
+static void disable_xo_low_power_mode(void)
+{
+	uint32_t value = 0;
+
+	/* DIG_CR_XO_04_L[9]:RG_XO_INT32K_NOR2LPM_TRIGGER = 0 */
+	value = BSI_read(0x08) & ~BIT(9);
+	BSI_write(0x08, value);
+	mdelay(5);
+
+	/* RG_DA_EN_XO_BG_MAN = 0 */
+	value = BSI_read(0x03) & ~BIT(13);
+	BSI_write(0x03, value);
+
+#if defined(KEEP_LDOH)
+	/* RG_DA_EN_XO_LDOH_MAN = 0 */
+	value = BSI_read(0x03) & ~BIT(9);
+	BSI_write(0x03, value);
+#endif
+	/* RG_DA_EN_XO_LDOL_MAN = 0 */
+	value = BSI_read(0x03) & ~BIT(1);
+	BSI_write(0x03, value);
+
+	/* RG_DA_EN_XO_PRENMBUF_MAN = 0 */
+	value = BSI_read(0x02) & ~BIT(7);
+	BSI_write(0x02, value);
+
+	/* RG_DA_EN_XO_PLLGP_BUF_MAN = 0 */
+	value = BSI_read(0x34) & ~BIT(1);
+	BSI_write(0x34, value);
+
+	/* RG_DA_EN_XO_VGTIELOW_MAN= 0 */
+	value = BSI_read(0x05) & ~BIT(9);
+	BSI_write(0x05, value);
+
+	/* RG_DA_XO_LPM_BIAS1/2_MAN=0 */
+	value = BSI_read(0x06) & ~BIT(13);
+	BSI_write(0x06, value);
+	value = BSI_read(0x06) & ~BIT(11);
+	BSI_write(0x06, value);
+#if defined(KEEP_LDOH)
+	value = BSI_read(0x06) & ~BIT(9);
+	BSI_write(0x06, value);
+#endif
+
+	pr_notice("[xo] disable xo low power mode!\n");
+}
+
+static void get_xo_status(void)
+{
+	uint32_t status = 0;
+
+	status = (BSI_read(0x26) & BITS(4, 9))>>4;
+	pr_notice("[xo] status: 0x%x\n", status);
+}
+
+void enable_32K_clock_to_pmic(void)
+{
+	uint32_t value = 0;
+
+	/* Set DIG_CR_XO_24[3:2]=2'b10. */
+	value = BSI_read(0x34) | BITS(2, 3);
+	BSI_write(0x34, value);
+}
+
+void disable_32K_clock_to_pmic(void)
+{
+	uint32_t value = 0;
+
+	/* Set DIG_CR_XO_24[3:2]=2'b10. */
+	value = BSI_read(0x34) & ~BITS(2, 3);
+	value = value | (1<<3);
+	BSI_write(0x34, value);
+}
+
+void enable_26M_clock_to_pmic(void)
+{
+	uint32_t value = 0;
+
+	/* Set DIG_CR_XO_02[2]=1 */
+	value = BSI_read(0x04) | 0x4;
+	BSI_write(0x04, value);
+	/* Set DIG_CR_XO_02[1]=1 */
+	value = BSI_read(0x04) | 0x2;
+	BSI_write(0x04, value);
+	/* Set DIG_CR_XO_03[29]=1 */
+	value = BSI_read(0x7) | (1<<13);
+	BSI_write(0x07, value);
+	/* Set DIG_CR_XO_03[28]=1 */
+	value = BSI_read(0x7) | (1<<12);
+	BSI_write(0x07, value);
+}
+
+void disable_26M_clock_to_pmic(void)
+{
+	uint32_t value = 0;
+
+	/* Set DIG_CR_XO_02[2]=1 */
+	value = BSI_read(0x04) | 0x4;
+	BSI_write(0x04, value);
+	/* Set DIG_CR_XO_02[1]=0 */
+	value = BSI_read(0x04) & 0xFFFD;
+	BSI_write(0x04, value);
+	/* Set DIG_CR_XO_03[29]=1 */
+	value = BSI_read(0x7) | (1<<13);
+	BSI_write(0x07, value);
+	/* Set DIG_CR_XO_03[28]=0 */
+	value = BSI_read(0x7) & 0xEFFF;
+	BSI_write(0x07, value);
+}
+
+void disable_26M_clock_to_conn_rf(void)
+{
+	uint32_t value = 0;
+
+	/* RG_CLKBUF_XO_EN<7:0>=8'h00 */
+	value = BSI_read(0x33) & ~BITS(8, 15);
+	BSI_write(0x33, value);
+
+	/* Toggle RG_XO_1_2=0'1'0 */
+	value = BSI_read(0x29) & 0xFFFE;
+	BSI_write(0x29, value);
+	value = BSI_read(0x29) | 0x1;
+	BSI_write(0x29, value);
+	value = BSI_read(0x29) & 0xFFFE;
+	BSI_write(0x29, value);
+}
+
+void enable_26M_clock_to_conn_rf(void)
+{
+	uint32_t value = 0;
+
+	/* RG_CLKBUF_XO_EN<7:0>=8'hFF */
+	value = BSI_read(0x33) | BITS(8, 15);
+	BSI_write(0x33, value);
+
+	/* Toggle RG_XO_1_2=0'1'0 */
+	value = BSI_read(0x29) & 0xFFFE;
+	BSI_write(0x29, value);
+	value = BSI_read(0x29) | 0x1;
+	BSI_write(0x29, value);
+	value = BSI_read(0x29) & 0xFFFE;
+	BSI_write(0x29, value);
+}
+
+static void bsi_clock_enable(bool en)
+{
+	if (en) {
+		clk_prepare_enable(xo_inst->bsi_clk);
+		clk_prepare_enable(xo_inst->rg_bsi_clk);
+	} else {
+		clk_disable_unprepare(xo_inst->rg_bsi_clk);
+		clk_disable_unprepare(xo_inst->bsi_clk);
+	}
+}
+
+static ssize_t show_xo_nvram_board_offset(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	char capid = 0;
+
+	nvram_read(XOCAP_NVRAM_FILE_NAME, &capid, sizeof(unsigned char), 0);
+
+	return sprintf(buf, "xo nvram capid: 0x%x\n", capid);
+}
+
+static ssize_t store_xo_nvram_board_offset(struct device *dev, struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	uint32_t capid_in;
+	char capid;
+	int ret;
+
+	if (buf != NULL) {
+		ret = kstrtouint(buf, 0, &capid_in);
+		if (ret) {
+			pr_err("wrong format!\n");
+			return 0;
+		}
+	}
+
+	capid = capid_in & 0x7f;
+
+	pr_notice("store_xo_nvram_board_offset xo set buf is 0x%x!\n", capid);
+	nvram_write(XOCAP_NVRAM_FILE_NAME, (char *)(&capid), sizeof(unsigned char), 0);
+
+	return size;
+}
+
+static DEVICE_ATTR(xo_nvram_board_offset, 0664, show_xo_nvram_board_offset, store_xo_nvram_board_offset);
+
+
+static ssize_t show_xo_capid(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	uint32_t capid;
+
+	bsi_clock_enable(true);
+	capid = XO_trim_read();
+	bsi_clock_enable(false);
+	return sprintf(buf, "xo capid: 0x%x\n", capid);
+}
+
+static ssize_t store_xo_capid(struct device *dev, struct device_attribute *attr,
+				     const char *buf, size_t size)
+{
+	uint32_t capid;
+	int ret;
+
+	if (buf != NULL && size != 0) {
+		ret = kstrtouint(buf, 0, &capid);
+		if (ret) {
+			pr_err("wrong format!\n");
+			return size;
+		}
+		if (capid > 0x7f) {
+			pr_err("cap code should be 7bit!\n");
+			return size;
+		}
+
+		bsi_clock_enable(true);
+
+		pr_notice("original cap code: 0x%x\n", XO_trim_read());
+		XO_trim_write(capid);
+		mdelay(10);
+		xo_inst->cur_xo_capid = XO_trim_read();
+		pr_notice("write cap code 0x%x done. current cap code:0x%x\n", capid, xo_inst->cur_xo_capid);
+
+		bsi_clock_enable(false);
+	}
+
+	return size;
+}
+
+static DEVICE_ATTR(xo_capid, 0664, show_xo_capid, store_xo_capid);
+
+static uint32_t xo_capid_add_offset(uint32_t capid, uint32_t offset)
+{
+	uint32_t capid_sign, capid_value;
+	uint32_t offset_sign, offset_value;
+	int32_t tmp_value;
+	uint32_t final_capid;
+
+	capid_sign = !!(capid & 0x40);
+	capid_value = capid & 0x3F;
+	offset_sign = !!(offset & 0x40);
+	offset_value = offset & 0x3F;
+
+	/* process plus/minus overflow */
+	if (capid_sign ^ offset_sign) {	/* minus */
+		tmp_value = (int32_t)capid_value - (int32_t)offset_value;
+		if (tmp_value < 0) {
+			capid_sign = !capid_sign;
+			tmp_value = -tmp_value;
+		}
+		final_capid = (capid_sign << 6) | (uint32_t)tmp_value;
+	} else {	/* plus */
+		tmp_value = (int32_t)capid_value + (int32_t)offset_value;
+		if (tmp_value > 0x3F) { /* value overflow */
+			final_capid = (capid_sign << 6) | 0x3F;
+		} else {
+			final_capid = (capid_sign << 6) | (uint32_t)tmp_value;
+		}
+	}
+	return final_capid;
+}
+
+static uint32_t xo_capid_sub_offset(uint32_t cur_capid, uint32_t ori_capid)
+{
+	uint32_t cur_capid_sign, cur_capid_value;
+	uint32_t ori_capid_sign, ori_capid_value;
+	int32_t tmp_value;
+	uint32_t final_offset;
+
+	cur_capid_sign = !!(cur_capid & 0x40);
+	cur_capid_value = cur_capid & 0x3F;
+	ori_capid_sign = !!(ori_capid & 0x40);
+	ori_capid_value = ori_capid & 0x3F;
+
+	/* process plus/minus error */
+	if (cur_capid_sign ^ ori_capid_sign) {	/* plus */
+		tmp_value = (int32_t)cur_capid_value + (int32_t)ori_capid_value;
+		if (tmp_value > 0x3F) { /* value overflow */
+			final_offset = (cur_capid_sign << 6) | 0x3F;
+		} else {
+			final_offset = (cur_capid_sign << 6) | (uint32_t)tmp_value;
+		}
+	} else {	/* minus */
+		tmp_value = (int32_t)cur_capid_value - (int32_t)ori_capid_value;
+		if (tmp_value < 0) {
+			cur_capid_sign = !cur_capid_sign;
+			tmp_value = -tmp_value;
+		}
+		final_offset = (cur_capid_sign << 6) | (uint32_t)tmp_value;
+	}
+	return final_offset;
+}
+
+static ssize_t show_xo_board_offset(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	uint32_t offset;
+
+	offset = xo_capid_sub_offset(xo_inst->cur_xo_capid, xo_inst->ori_xo_capid);
+
+	return sprintf(buf, "xo capid offset: 0x%x\n", offset);
+}
+
+static ssize_t store_xo_board_offset(struct device *dev, struct device_attribute *attr,
+				     const char *buf, size_t size)
+{
+	uint32_t offset, capid;
+	int ret;
+
+	if (buf != NULL && size != 0) {
+		ret = kstrtouint(buf, 0, &offset);
+		if (ret) {
+			pr_err("wrong format!\n");
+			return size;
+		}
+		if (offset > 0x7f) {
+			pr_err("offset should be within 7bit!\n");
+			return size;
+		}
+
+		bsi_clock_enable(true);
+
+		capid = xo_inst->ori_xo_capid;
+		pr_notice("original cap code: 0x%x\n", capid);
+
+		capid = xo_capid_add_offset(capid, offset);
+		XO_trim_write(capid);
+		mdelay(10);
+		xo_inst->cur_xo_capid = XO_trim_read();
+		pr_notice("write cap code 0x%x done. current cap code:0x%x\n", capid, xo_inst->cur_xo_capid);
+
+		bsi_clock_enable(false);
+	}
+
+	return size;
+}
+
+static DEVICE_ATTR(xo_board_offset, 0664, show_xo_board_offset, store_xo_board_offset);
+
+static ssize_t show_xo_cmd(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "1: status 2/3: in/out LPM 4/5: dis/en 26M 6/7: dis/en 32K 8/9: dis/en rf\n");
+}
+
+static ssize_t store_xo_cmd(struct device *dev, struct device_attribute *attr,
+				     const char *buf, size_t size)
+{
+	uint32_t cmd;
+	int ret;
+
+	if (buf != NULL && size != 0) {
+		ret = kstrtouint(buf, 0, &cmd);
+		if (ret) {
+			pr_err("wrong format!\n");
+			return size;
+		}
+
+		bsi_clock_enable(true);
+
+		switch (cmd) {
+		case 1:
+			get_xo_status();
+			break;
+		case 2:
+			get_xo_status();
+			enable_xo_low_power_mode();
+			mdelay(10);
+			get_xo_status();
+			break;
+		case 3:
+			get_xo_status();
+			disable_xo_low_power_mode();
+			mdelay(10);
+			get_xo_status();
+			break;
+		case 4:
+			disable_26M_clock_to_pmic();
+			break;
+		case 5:
+			enable_26M_clock_to_pmic();
+			break;
+		case 6:
+			disable_32K_clock_to_pmic();
+			break;
+		case 7:
+			enable_32K_clock_to_pmic();
+			break;
+		case 8:
+			disable_26M_clock_to_conn_rf();
+			break;
+		case 9:
+			enable_26M_clock_to_conn_rf();
+			break;
+		default:
+			pr_notice("cmd not support!\n");
+		}
+
+		bsi_clock_enable(false);
+	}
+
+	return size;
+}
+
+static DEVICE_ATTR(xo_cmd, 0664, show_xo_cmd, store_xo_cmd);
+
+static ssize_t show_bsi_read(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "not support!\n");
+}
+
+static ssize_t store_bsi_read(struct device *dev, struct device_attribute *attr,
+				     const char *buf, size_t size)
+{
+	uint32_t addr, value;
+	int ret;
+
+	if (buf != NULL && size != 0) {
+		ret = kstrtouint(buf, 0, &addr);
+		if (ret) {
+			pr_err("wrong format!\n");
+			return size;
+		}
+
+		bsi_clock_enable(true);
+		value = BSI_read(addr);
+		bsi_clock_enable(false);
+		pr_notice("bsi read 0x%x: 0x%x\n", addr, value);
+	}
+
+	return size;
+}
+
+static DEVICE_ATTR(bsi_read, 0664, show_bsi_read, store_bsi_read);
+
+static ssize_t show_bsi_write(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "not support!\n");
+}
+
+static ssize_t store_bsi_write(struct device *dev, struct device_attribute *attr,
+				     const char *buf, size_t size)
+{
+	char temp_buf[32];
+	char *pvalue;
+	uint32_t addr, value;
+	int ret;
+
+	strncpy(temp_buf, buf, sizeof(temp_buf));
+	temp_buf[sizeof(temp_buf) - 1] = 0;
+	pvalue = temp_buf;
+
+	if (buf != NULL && size > 5) {
+
+		ret = kstrtouint(strsep(&pvalue, " "), 0, &addr);
+		if (ret)
+			return ret;
+		ret = kstrtouint(pvalue, 0, &value);
+		if (ret)
+			return ret;
+
+		bsi_clock_enable(true);
+		pr_notice("bsi read 0x%x: 0x%x\n", addr, BSI_read(addr));
+		BSI_write(addr, value);
+		pr_notice("bsi write 0x%x: 0x%x\n", addr, value);
+		pr_notice("bsi read 0x%x: 0x%x\n", addr, BSI_read(addr));
+		bsi_clock_enable(false);
+	}
+
+	return size;
+}
+
+static DEVICE_ATTR(bsi_write, 0664, show_bsi_write, store_bsi_write);
+
+/* for SPM driver to get cap code at suspend */
+uint32_t mt_xo_get_current_capid(void)
+{
+	return xo_inst->cur_xo_capid;
+}
+EXPORT_SYMBOL(mt_xo_get_current_capid);
+
+/* for SPM driver to get crystal status at suspend */
+bool mt_xo_has_ext_crystal(void)
+{
+	return xo_inst->has_ext_crystal;
+}
+EXPORT_SYMBOL(mt_xo_has_ext_crystal);
+
+static void xocap_work_func(struct work_struct *work)
+{
+	char xo_nvram_cap = 0;
+	uint32_t capid;
+	int ret;
+
+	pr_err("[XO] xocap_work_func\n");
+	bsi_clock_enable(true);
+
+	capid = XO_trim_read();
+
+	ret = nvram_read(XOCAP_NVRAM_FILE_NAME, &xo_nvram_cap, sizeof(unsigned char), 0);
+	if( ret < 0)
+		pr_err(" ret is %d!\n", ret);
+
+	if (xo_nvram_cap > 0x7f)
+		pr_err("offset should be within 7bit!\n");
+	else {
+		capid = xo_capid_add_offset(capid, xo_nvram_cap & 0x7f);
+		XO_trim_write(capid);
+		mdelay(10);
+		xo_inst->cur_xo_capid = XO_trim_read();
+		pr_notice("current cap code(after nvram):0x%x\n", xo_inst->cur_xo_capid);
+	}
+
+	bsi_clock_enable(false);
+}
+
+static void xocap_timer_func(struct timer_list *timer)
+{
+	pr_err("[XO] xocap_timer_func\n");
+	schedule_work(&xocap_work);
+}
+
+void mt_xo_init_pre(uint32_t default_capid)
+{
+	uint32_t xo_efuse;
+	uint32_t cap_code;
+	int ret;
+
+	bsi_clock_enable(false);
+	ret = clk_set_parent(xo_inst->bsi_sel_clk, xo_inst->top_26m_clk);
+	if (ret != 0)
+		pr_notice("[xo] clk_set_parent fail. ret is %d\n", ret);
+	bsi_clock_enable(true);
+
+	pr_notice("[xo] default cap_code: 0x%x\n", XO_trim_read());
+
+	xo_efuse = READ_REGISTER_UINT32(pxo_efuse);
+
+	if ((xo_efuse>>31) & 0x1) {
+		pr_notice("[xo] get xo efuse: %x\n", xo_efuse);
+		cap_code = (xo_efuse & BITS(24, 30))>>24;
+
+		if ((xo_efuse>>23) & 0x1)
+			cap_code = xo_capid_add_offset(cap_code, (xo_efuse & BITS(16, 22))>>16);
+
+		if ((xo_efuse>>15) & 0x1)
+			cap_code = xo_capid_add_offset(cap_code, (xo_efuse & BITS(8, 14))>>8);
+
+		cap_code = xo_capid_add_offset(cap_code, default_capid);
+	} else {
+		pr_notice("[xo] no efuse, apply sw default cap code!\n");
+		#ifdef MTK_MT8167_EVB
+		cap_code = xo_capid_add_offset(0x22, default_capid);
+		#else
+		cap_code = xo_capid_add_offset(0x1c, default_capid);
+		#endif
+	}
+	XO_trim_write(cap_code);
+	mdelay(10);
+
+	pr_notice("[xo] set cap_code: 0x%x\n", cap_code);
+	pr_notice("[xo] current cap_code: 0x%x\n", XO_trim_read());
+
+	/*Audio use XO path, so add the workaround setting for Audio 26M*/
+
+	if((BSI_read(0x25) & 0x1000) != 0){
+		BSI_write(0x25, BSI_read(0x25) & ~(1 << 12));
+		pr_notice("[Preloader] BSI read: [0x25] = 0x%x\n", BSI_read(0x25));
+		BSI_write(0x29, BSI_read(0x29) | (1 << 0));
+		pr_notice("[Preloader] BSI read: [0x29] = 0x%x\n", BSI_read(0x29));
+		/*delay 100us*/
+		udelay(100);
+		BSI_write(0x29, BSI_read(0x29) & ~(1 << 0));
+		pr_notice("[Preloader] BSI read: [0x29] = 0x%x\n", BSI_read(0x29));
+	}
+
+	get_xo_status();
+}
+
+static int mt_xo_dts_probe(struct platform_device *pdev)
+{
+	int retval = 0;
+	struct resource *res;
+	uint32_t default_capid = 0;
+
+	xo_inst = devm_kzalloc(&pdev->dev, sizeof(*xo_inst), GFP_KERNEL);
+	if (!xo_inst)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	xo_inst->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(xo_inst->base))
+		return PTR_ERR(xo_inst->base);
+
+	xo_inst->top_rtc32k = devm_ioremap(&pdev->dev, 0x10018000, PAGE_SIZE);
+	if (IS_ERR(xo_inst->top_rtc32k))
+		return PTR_ERR(xo_inst->top_rtc32k);
+
+	pxo_efuse = devm_ioremap(&pdev->dev, 0x10009264, PAGE_SIZE);
+	if (IS_ERR(pxo_efuse))
+		return PTR_ERR(pxo_efuse);
+
+	xo_inst->crystal_check_done = false;
+	xo_inst->dev = &pdev->dev;
+	platform_set_drvdata(pdev, xo_inst);
+
+	retval = device_create_file(&pdev->dev, &dev_attr_bsi_read);
+	if (retval != 0)
+		dev_dbg(&pdev->dev, "fail to create file: %d\n", retval);
+
+	retval = device_create_file(&pdev->dev, &dev_attr_bsi_write);
+	if (retval != 0)
+		dev_dbg(&pdev->dev, "fail to create file: %d\n", retval);
+
+	retval = device_create_file(&pdev->dev, &dev_attr_xo_nvram_board_offset);
+	if (retval != 0)
+		dev_dbg(&pdev->dev, "fail to create file: %d\n", retval);
+
+	retval = device_create_file(&pdev->dev, &dev_attr_xo_capid);
+	if (retval != 0)
+		dev_dbg(&pdev->dev, "fail to create file: %d\n", retval);
+
+	retval = device_create_file(&pdev->dev, &dev_attr_xo_cmd);
+	if (retval != 0)
+		dev_dbg(&pdev->dev, "fail to create cmd file: %d\n", retval);
+
+	retval = device_create_file(&pdev->dev, &dev_attr_xo_board_offset);
+	if (retval != 0)
+		dev_dbg(&pdev->dev, "fail to create offset file: %d\n", retval);
+
+	xo_inst->bsi_clk = devm_clk_get(&pdev->dev, "bsi");
+	if (IS_ERR(xo_inst->bsi_clk)) {
+		dev_err(&pdev->dev, "fail to get bsi clock: %ld\n", PTR_ERR(xo_inst->bsi_clk));
+		return PTR_ERR(xo_inst->bsi_clk);
+	}
+
+	xo_inst->rg_bsi_clk = devm_clk_get(&pdev->dev, "rgbsi");
+	if (IS_ERR(xo_inst->rg_bsi_clk)) {
+		dev_err(&pdev->dev, "fail to get rgbsi clock: %ld\n", PTR_ERR(xo_inst->rg_bsi_clk));
+		return PTR_ERR(xo_inst->rg_bsi_clk);
+	}
+
+	xo_inst->bsi_sel_clk= devm_clk_get(&pdev->dev, "bsisel");
+	if (IS_ERR(xo_inst->bsi_sel_clk)) {
+		dev_err(&pdev->dev, "fail to get bsi_sel clock: %ld\n", PTR_ERR(xo_inst->bsi_sel_clk));
+		return PTR_ERR(xo_inst->bsi_sel_clk);
+	}
+
+	xo_inst->top_26m_clk= devm_clk_get(&pdev->dev, "clk26m");
+	if (IS_ERR(xo_inst->top_26m_clk)) {
+		dev_err(&pdev->dev, "fail to get top_26m clock: %ld\n", PTR_ERR(xo_inst->top_26m_clk));
+		return PTR_ERR(xo_inst->top_26m_clk);
+	}
+
+	bsi_clock_enable(true);
+
+	retval = of_property_read_u32(xo_inst->dev->of_node, "default_capid", &default_capid);
+	if (retval != 0) {
+		dev_err(&pdev->dev, "fail to get default_capid from dts: %d\n", retval);
+		default_capid = 0;
+		retval = 0;
+	}
+	default_capid = default_capid & 0x7f;
+	pr_notice("[xo] dts default cap code: 0x%x\n", default_capid);
+	mt_xo_init_pre(default_capid);
+	xo_inst->cur_xo_capid = XO_trim_read();
+	xo_inst->ori_xo_capid = XO_trim_read();
+	pr_notice("[xo] current cap code: 0x%x\n", xo_inst->cur_xo_capid);
+
+	bsi_clock_enable(false);
+
+	INIT_WORK(&xocap_work, xocap_work_func);
+	timer_setup(&xocap_timer, xocap_timer_func, 0);
+	mod_timer(&xocap_timer, jiffies + msecs_to_jiffies(5000));
+
+	return retval;
+}
+
+static int mt_xo_dts_remove(struct platform_device *pdev)
+{
+	bsi_clock_enable(false);
+	return 0;
+}
+
+static int xo_pm_suspend(struct device *device)
+{
+	if (!xo_inst->crystal_check_done) {
+		xo_inst->has_ext_crystal = !mtk_misc_crystal_exist_status();
+		xo_inst->crystal_check_done = true;
+
+		/* let XO use external RTC32K */
+		if (xo_inst->has_ext_crystal)
+			WRITE_REGISTER_UINT32(xo_inst->top_rtc32k, READ_REGISTER_UINT32(xo_inst->top_rtc32k) | (1<<10));
+	}
+
+	return 0;
+}
+
+static int xo_pm_resume(struct device *device)
+{
+	uint32_t value = 0;
+
+	/* re-setting XO audio path for external 32k */
+	if (xo_inst->has_ext_crystal) {
+		bsi_clock_enable(true);
+		/* RG_XO2AUDIO_XO_EN = 0*/
+		value = BSI_read(0x25) & ~(1 << 12);
+		BSI_write(0x25, value);
+		/*XO_EN_MAN = 1*/
+		value = BSI_read(0x29) | (1 << 0);
+		BSI_write(0x29, value);
+		/*delay 100us*/
+		udelay(100);
+		/*XO_EN_MAN = 0*/
+		value = BSI_read(0x29) & ~(1 << 0);
+		BSI_write(0x29, value);
+		bsi_clock_enable(false);
+	}
+
+	return 0;
+}
+
+struct dev_pm_ops const xo_pm_ops = {
+	.suspend = xo_pm_suspend,
+	.resume = xo_pm_resume,
+};
+
+static struct platform_driver mt_xo_driver = {
+	.remove		= mt_xo_dts_remove,
+	.probe		= mt_xo_dts_probe,
+	.driver		= {
+		.name	= "mt_dts_xo",
+		.of_match_table = apxo_of_ids,
+		.pm = &xo_pm_ops,
+	},
+};
+
+static int __init mt_xo_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&mt_xo_driver);
+
+	return ret;
+}
+
+module_init(mt_xo_init);
+
+static void __exit mt_xo_exit(void)
+{
+	platform_driver_unregister(&mt_xo_driver);
+}
+module_exit(mt_xo_exit);
+
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 9ecf86b..5069970 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -87,6 +87,13 @@
 #define SDC_FIFO_CFG     0x228
 
 /*--------------------------------------------------------------------------*/
+/* Top Pad Register Offset                                                  */
+/*--------------------------------------------------------------------------*/
+#define EMMC_TOP_CONTROL	0x00
+#define EMMC_TOP_CMD		0x04
+#define EMMC50_PAD_DS_TUNE	0x0c
+
+/*--------------------------------------------------------------------------*/
 /* Register Mask                                                            */
 /*--------------------------------------------------------------------------*/
 
@@ -262,6 +269,23 @@
 #define SDC_FIFO_CFG_WRVALIDSEL   (0x1 << 24)  /* RW */
 #define SDC_FIFO_CFG_RDVALIDSEL   (0x1 << 25)  /* RW */
 
+/* EMMC_TOP_CONTROL mask */
+#define PAD_RXDLY_SEL           (0x1 << 0)      /* RW */
+#define DELAY_EN                (0x1 << 1)      /* RW */
+#define PAD_DAT_RD_RXDLY2       (0x1f << 2)     /* RW */
+#define PAD_DAT_RD_RXDLY        (0x1f << 7)     /* RW */
+#define PAD_DAT_RD_RXDLY2_SEL   (0x1 << 12)     /* RW */
+#define PAD_DAT_RD_RXDLY_SEL    (0x1 << 13)     /* RW */
+#define DATA_K_VALUE_SEL        (0x1 << 14)     /* RW */
+#define SDC_RX_ENH_EN           (0x1 << 15)     /* TW */
+
+/* EMMC_TOP_CMD mask */
+#define PAD_CMD_RXDLY2          (0x1f << 0)     /* RW */
+#define PAD_CMD_RXDLY           (0x1f << 5)     /* RW */
+#define PAD_CMD_RD_RXDLY2_SEL   (0x1 << 10)     /* RW */
+#define PAD_CMD_RD_RXDLY_SEL    (0x1 << 11)     /* RW */
+#define PAD_CMD_TX_DLY          (0x1f << 12)    /* RW */
+
 #define REQ_CMD_EIO  (0x1 << 0)
 #define REQ_CMD_TMO  (0x1 << 1)
 #define REQ_DAT_ERR  (0x1 << 2)
@@ -277,6 +301,8 @@
 #define CMD_TIMEOUT         (HZ/10 * 5)	/* 100ms x5 */
 #define DAT_TIMEOUT         (HZ    * 5)	/* 1000ms x5 */
 
+#define DEFAULT_DEBOUNCE	(8)	/* 8 cycles CD debounce */
+
 #define PAD_DELAY_MAX	32 /* PAD delay cells */
 /*--------------------------------------------------------------------------*/
 /* Descriptor Structure                                                     */
@@ -334,6 +360,9 @@
 	u32 emmc50_cfg0;
 	u32 emmc50_cfg3;
 	u32 sdc_fifo_cfg;
+	u32 emmc_top_control;
+	u32 emmc_top_cmd;
+	u32 emmc50_pad_ds_tune;
 };
 
 struct mtk_mmc_compatible {
@@ -346,12 +375,15 @@
 	bool stop_clk_fix;
 	bool enhance_rx;
 	bool support_64g;
+	bool use_internal_cd;
 };
 
 struct msdc_tune_para {
 	u32 iocon;
 	u32 pad_tune;
 	u32 pad_cmd_tune;
+	u32 emmc_top_control;
+	u32 emmc_top_cmd;
 };
 
 struct msdc_delay_phase {
@@ -373,6 +405,7 @@
 	int error;
 
 	void __iomem *base;		/* host base address */
+	void __iomem *top_base;		/* host top register base address */
 
 	struct msdc_dma dma;	/* dma channel */
 	u64 dma_mask;
@@ -388,6 +421,7 @@
 
 	struct clk *src_clk;	/* msdc source clock */
 	struct clk *h_clk;      /* msdc h_clk */
+	struct clk *bus_clk;	/* bus clock which used to access register */
 	struct clk *src_clk_cg; /* msdc source clock control gate */
 	u32 mclk;		/* mmc subsystem clock frequency */
 	u32 src_clk_freq;	/* source clock frequency */
@@ -400,6 +434,7 @@
 	bool hs400_cmd_resp_sel_rising;
 				 /* cmd response sample selection for HS400 */
 	bool hs400_mode;	/* current eMMC will run at hs400 mode */
+	bool internal_cd;	/* Use internal card-detect logic */
 	struct msdc_save_para save_para; /* used when gate HCLK */
 	struct msdc_tune_para def_tune_para; /* default tune setting */
 	struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */
@@ -429,6 +464,18 @@
 	.support_64g = false,
 };
 
+static const struct mtk_mmc_compatible mt8183_compat = {
+	.clk_div_bits = 12,
+	.hs400_tune = false,
+	.pad_tune_reg = MSDC_PAD_TUNE0,
+	.async_fifo = true,
+	.data_tune = true,
+	.busy_check = true,
+	.stop_clk_fix = true,
+	.enhance_rx = true,
+	.support_64g = true,
+};
+
 static const struct mtk_mmc_compatible mt2701_compat = {
 	.clk_div_bits = 12,
 	.hs400_tune = false,
@@ -465,12 +512,37 @@
 	.support_64g = false,
 };
 
+static const struct mtk_mmc_compatible mt8516_compat = {
+	.clk_div_bits = 12,
+	.hs400_tune = false,
+	.pad_tune_reg = MSDC_PAD_TUNE0,
+	.async_fifo = true,
+	.data_tune = true,
+	.busy_check = true,
+	.stop_clk_fix = true,
+};
+
+static const struct mtk_mmc_compatible mt7620_compat = {
+	.clk_div_bits = 8,
+	.hs400_tune = false,
+	.pad_tune_reg = MSDC_PAD_TUNE,
+	.async_fifo = false,
+	.data_tune = false,
+	.busy_check = false,
+	.stop_clk_fix = false,
+	.enhance_rx = false,
+	.use_internal_cd = true,
+};
+
 static const struct of_device_id msdc_of_ids[] = {
 	{ .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat},
 	{ .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat},
+	{ .compatible = "mediatek,mt8183-mmc", .data = &mt8183_compat},
 	{ .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat},
 	{ .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat},
 	{ .compatible = "mediatek,mt7622-mmc", .data = &mt7622_compat},
+	{ .compatible = "mediatek,mt8516-mmc", .data = &mt8516_compat},
+	{ .compatible = "mediatek,mt7620-mmc", .data = &mt7620_compat},
 	{}
 };
 MODULE_DEVICE_TABLE(of, msdc_of_ids);
@@ -660,12 +732,14 @@
 {
 	clk_disable_unprepare(host->src_clk_cg);
 	clk_disable_unprepare(host->src_clk);
+	clk_disable_unprepare(host->bus_clk);
 	clk_disable_unprepare(host->h_clk);
 }
 
 static void msdc_ungate_clock(struct msdc_host *host)
 {
 	clk_prepare_enable(host->h_clk);
+	clk_prepare_enable(host->bus_clk);
 	clk_prepare_enable(host->src_clk);
 	clk_prepare_enable(host->src_clk_cg);
 	while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
@@ -775,12 +849,28 @@
 	 */
 	if (host->mmc->actual_clock <= 52000000) {
 		writel(host->def_tune_para.iocon, host->base + MSDC_IOCON);
-		writel(host->def_tune_para.pad_tune, host->base + tune_reg);
+		if (host->top_base) {
+			writel(host->def_tune_para.emmc_top_control,
+			       host->top_base + EMMC_TOP_CONTROL);
+			writel(host->def_tune_para.emmc_top_cmd,
+			       host->top_base + EMMC_TOP_CMD);
+		} else {
+			writel(host->def_tune_para.pad_tune,
+			       host->base + tune_reg);
+		}
 	} else {
 		writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON);
-		writel(host->saved_tune_para.pad_tune, host->base + tune_reg);
 		writel(host->saved_tune_para.pad_cmd_tune,
 		       host->base + PAD_CMD_TUNE);
+		if (host->top_base) {
+			writel(host->saved_tune_para.emmc_top_control,
+			       host->top_base + EMMC_TOP_CONTROL);
+			writel(host->saved_tune_para.emmc_top_cmd,
+			       host->top_base + EMMC_TOP_CMD);
+		} else {
+			writel(host->saved_tune_para.pad_tune,
+			       host->base + tune_reg);
+		}
 	}
 
 	if (timing == MMC_TIMING_MMC_HS400 &&
@@ -942,6 +1032,8 @@
 	msdc_track_cmd_data(host, mrq->cmd, mrq->data);
 	if (mrq->data)
 		msdc_unprepare_data(host, mrq);
+	if (host->error)
+		msdc_reset_hw(host);
 	mmc_request_done(host->mmc, mrq);
 }
 
@@ -1053,6 +1145,7 @@
 		struct mmc_request *mrq, struct mmc_command *cmd)
 {
 	u32 rawcmd;
+	unsigned long flags;
 
 	WARN_ON(host->cmd);
 	host->cmd = cmd;
@@ -1070,7 +1163,10 @@
 	cmd->error = 0;
 	rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd);
 
+	spin_lock_irqsave(&host->lock, flags);
 	sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask);
+	spin_unlock_irqrestore(&host->lock, flags);
+
 	writel(cmd->arg, host->base + SDC_ARG);
 	writel(rawcmd, host->base + SDC_CMD);
 }
@@ -1290,6 +1386,32 @@
 	}
 }
 
+static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb)
+{
+	if (enb) {
+		sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
+		sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
+	} else {
+		sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
+		sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
+	}
+}
+
+static void msdc_enable_sdio_irq(struct mmc_host *mmc, int enb)
+{
+	unsigned long flags;
+	struct msdc_host *host = mmc_priv(mmc);
+
+	spin_lock_irqsave(&host->lock, flags);
+	__msdc_enable_sdio_irq(host, enb);
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	if (enb)
+		pm_runtime_get_noresume(host->dev);
+	else
+		pm_runtime_put_noidle(host->dev);
+}
+
 static irqreturn_t msdc_irq(int irq, void *dev_id)
 {
 	struct msdc_host *host = (struct msdc_host *) dev_id;
@@ -1304,6 +1426,8 @@
 		spin_lock_irqsave(&host->lock, flags);
 		events = readl(host->base + MSDC_INT);
 		event_mask = readl(host->base + MSDC_INTEN);
+		if ((events & event_mask) & MSDC_INT_SDIOIRQ)
+			__msdc_enable_sdio_irq(host, 0);
 		/* clear interrupts */
 		writel(events & event_mask, host->base + MSDC_INT);
 
@@ -1312,7 +1436,16 @@
 		data = host->data;
 		spin_unlock_irqrestore(&host->lock, flags);
 
-		if (!(events & event_mask))
+		if ((events & event_mask) & MSDC_INT_SDIOIRQ)
+			sdio_signal_irq(host->mmc);
+
+		if ((events & event_mask) & MSDC_INT_CDSC) {
+			if (host->internal_cd)
+				mmc_detect_change(host->mmc, msecs_to_jiffies(20));
+			events &= ~MSDC_INT_CDSC;
+		}
+
+		if (!(events & (event_mask & ~MSDC_INT_SDIOIRQ)))
 			break;
 
 		if (!mrq) {
@@ -1345,15 +1478,30 @@
 	/* Reset */
 	msdc_reset_hw(host);
 
-	/* Disable card detection */
-	sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
-
 	/* Disable and clear all interrupts */
 	writel(0, host->base + MSDC_INTEN);
 	val = readl(host->base + MSDC_INT);
 	writel(val, host->base + MSDC_INT);
 
-	writel(0, host->base + tune_reg);
+	/* Configure card detection */
+	if (host->internal_cd) {
+		sdr_set_field(host->base + MSDC_PS, MSDC_PS_CDDEBOUNCE,
+			      DEFAULT_DEBOUNCE);
+		sdr_set_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
+		sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
+		sdr_set_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
+	} else {
+		sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
+		sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
+		sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
+	}
+
+	if (host->top_base) {
+		writel(0, host->top_base + EMMC_TOP_CONTROL);
+		writel(0, host->top_base + EMMC_TOP_CMD);
+	} else {
+		writel(0, host->base + tune_reg);
+	}
 	writel(0, host->base + MSDC_IOCON);
 	sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0);
 	writel(0x403c0046, host->base + MSDC_PATCH_BIT);
@@ -1377,8 +1525,12 @@
 		sdr_set_field(host->base + MSDC_PATCH_BIT2,
 			      MSDC_PB2_RESPWAIT, 3);
 		if (host->dev_comp->enhance_rx) {
-			sdr_set_bits(host->base + SDC_ADV_CFG0,
-				     SDC_RX_ENHANCE_EN);
+			if (host->top_base)
+				sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
+					     SDC_RX_ENH_EN);
+			else
+				sdr_set_bits(host->base + SDC_ADV_CFG0,
+					     SDC_RX_ENHANCE_EN);
 		} else {
 			sdr_set_field(host->base + MSDC_PATCH_BIT2,
 				      MSDC_PB2_RESPSTSENSEL, 2);
@@ -1396,11 +1548,26 @@
 		sdr_set_bits(host->base + MSDC_PATCH_BIT2,
 			     MSDC_PB2_SUPPORT_64G);
 	if (host->dev_comp->data_tune) {
-		sdr_set_bits(host->base + tune_reg,
-			     MSDC_PAD_TUNE_RD_SEL | MSDC_PAD_TUNE_CMD_SEL);
+		if (host->top_base) {
+			sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
+				     PAD_DAT_RD_RXDLY_SEL);
+			sdr_clr_bits(host->top_base + EMMC_TOP_CONTROL,
+				     DATA_K_VALUE_SEL);
+			sdr_set_bits(host->top_base + EMMC_TOP_CMD,
+				     PAD_CMD_RD_RXDLY_SEL);
+		} else {
+			sdr_set_bits(host->base + tune_reg,
+				     MSDC_PAD_TUNE_RD_SEL |
+				     MSDC_PAD_TUNE_CMD_SEL);
+		}
 	} else {
 		/* choose clock tune */
-		sdr_set_bits(host->base + tune_reg, MSDC_PAD_TUNE_RXDLYSEL);
+		if (host->top_base)
+			sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
+				     PAD_RXDLY_SEL);
+		else
+			sdr_set_bits(host->base + tune_reg,
+				     MSDC_PAD_TUNE_RXDLYSEL);
 	}
 
 	/* Configure to enable SDIO mode.
@@ -1408,22 +1575,40 @@
 	 */
 	sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
 
-	/* disable detect SDIO device interrupt function */
+	/* Config SDIO device detect interrupt function */
 	sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
 
 	/* Configure to default data timeout */
 	sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
 
 	host->def_tune_para.iocon = readl(host->base + MSDC_IOCON);
-	host->def_tune_para.pad_tune = readl(host->base + tune_reg);
 	host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
-	host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
+	if (host->top_base) {
+		host->def_tune_para.emmc_top_control =
+			readl(host->top_base + EMMC_TOP_CONTROL);
+		host->def_tune_para.emmc_top_cmd =
+			readl(host->top_base + EMMC_TOP_CMD);
+		host->saved_tune_para.emmc_top_control =
+			readl(host->top_base + EMMC_TOP_CONTROL);
+		host->saved_tune_para.emmc_top_cmd =
+			readl(host->top_base + EMMC_TOP_CMD);
+	} else {
+		host->def_tune_para.pad_tune = readl(host->base + tune_reg);
+		host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
+	}
 	dev_dbg(host->dev, "init hardware done!");
 }
 
 static void msdc_deinit_hw(struct msdc_host *host)
 {
 	u32 val;
+
+	if (host->internal_cd) {
+		/* Disabled card-detect */
+		sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
+		sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
+	}
+
 	/* Disable and clear all interrupts */
 	writel(0, host->base + MSDC_INTEN);
 
@@ -1565,6 +1750,30 @@
 	return delay_phase;
 }
 
+static inline void msdc_set_cmd_delay(struct msdc_host *host, u32 value)
+{
+	u32 tune_reg = host->dev_comp->pad_tune_reg;
+
+	if (host->top_base)
+		sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY,
+			      value);
+	else
+		sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
+			      value);
+}
+
+static inline void msdc_set_data_delay(struct msdc_host *host, u32 value)
+{
+	u32 tune_reg = host->dev_comp->pad_tune_reg;
+
+	if (host->top_base)
+		sdr_set_field(host->top_base + EMMC_TOP_CONTROL,
+			      PAD_DAT_RD_RXDLY, value);
+	else
+		sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY,
+			      value);
+}
+
 static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
 {
 	struct msdc_host *host = mmc_priv(mmc);
@@ -1585,8 +1794,7 @@
 
 	sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
 	for (i = 0 ; i < PAD_DELAY_MAX; i++) {
-		sdr_set_field(host->base + tune_reg,
-			      MSDC_PAD_TUNE_CMDRDLY, i);
+		msdc_set_cmd_delay(host, i);
 		/*
 		 * Using the same parameters, it may sometimes pass the test,
 		 * but sometimes it may fail. To make sure the parameters are
@@ -1610,8 +1818,7 @@
 
 	sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
 	for (i = 0; i < PAD_DELAY_MAX; i++) {
-		sdr_set_field(host->base + tune_reg,
-			      MSDC_PAD_TUNE_CMDRDLY, i);
+		msdc_set_cmd_delay(host, i);
 		/*
 		 * Using the same parameters, it may sometimes pass the test,
 		 * but sometimes it may fail. To make sure the parameters are
@@ -1635,15 +1842,13 @@
 		final_maxlen = final_fall_delay.maxlen;
 	if (final_maxlen == final_rise_delay.maxlen) {
 		sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
-		sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
-			      final_rise_delay.final_phase);
 		final_delay = final_rise_delay.final_phase;
 	} else {
 		sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
-		sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
-			      final_fall_delay.final_phase);
 		final_delay = final_fall_delay.final_phase;
 	}
+	msdc_set_cmd_delay(host, final_delay);
+
 	if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay)
 		goto skip_internal;
 
@@ -1719,7 +1924,6 @@
 	u32 rise_delay = 0, fall_delay = 0;
 	struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
 	u8 final_delay, final_maxlen;
-	u32 tune_reg = host->dev_comp->pad_tune_reg;
 	int i, ret;
 
 	sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL,
@@ -1727,8 +1931,7 @@
 	sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
 	sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
 	for (i = 0 ; i < PAD_DELAY_MAX; i++) {
-		sdr_set_field(host->base + tune_reg,
-			      MSDC_PAD_TUNE_DATRRDLY, i);
+		msdc_set_data_delay(host, i);
 		ret = mmc_send_tuning(mmc, opcode, NULL);
 		if (!ret)
 			rise_delay |= (1 << i);
@@ -1742,8 +1945,7 @@
 	sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
 	sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
 	for (i = 0; i < PAD_DELAY_MAX; i++) {
-		sdr_set_field(host->base + tune_reg,
-			      MSDC_PAD_TUNE_DATRRDLY, i);
+		msdc_set_data_delay(host, i);
 		ret = mmc_send_tuning(mmc, opcode, NULL);
 		if (!ret)
 			fall_delay |= (1 << i);
@@ -1755,20 +1957,79 @@
 	if (final_maxlen == final_rise_delay.maxlen) {
 		sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
 		sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
-		sdr_set_field(host->base + tune_reg,
-			      MSDC_PAD_TUNE_DATRRDLY,
-			      final_rise_delay.final_phase);
 		final_delay = final_rise_delay.final_phase;
 	} else {
 		sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
 		sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
-		sdr_set_field(host->base + tune_reg,
-			      MSDC_PAD_TUNE_DATRRDLY,
-			      final_fall_delay.final_phase);
+		final_delay = final_fall_delay.final_phase;
+	}
+	msdc_set_data_delay(host, final_delay);
+
+	dev_dbg(host->dev, "Final data pad delay: %x\n", final_delay);
+	return final_delay == 0xff ? -EIO : 0;
+}
+
+/*
+ * MSDC IP which supports data tune + async fifo can do CMD/DAT tune
+ * together, which can save the tuning time.
+ */
+static int msdc_tune_together(struct mmc_host *mmc, u32 opcode)
+{
+	struct msdc_host *host = mmc_priv(mmc);
+	u32 rise_delay = 0, fall_delay = 0;
+	struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
+	u8 final_delay, final_maxlen;
+	int i, ret;
+
+	sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL,
+		      host->latch_ck);
+
+	sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
+	sdr_clr_bits(host->base + MSDC_IOCON,
+		     MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
+	for (i = 0 ; i < PAD_DELAY_MAX; i++) {
+		msdc_set_cmd_delay(host, i);
+		msdc_set_data_delay(host, i);
+		ret = mmc_send_tuning(mmc, opcode, NULL);
+		if (!ret)
+			rise_delay |= (1 << i);
+	}
+	final_rise_delay = get_best_delay(host, rise_delay);
+	/* if rising edge has enough margin, then do not scan falling edge */
+	if (final_rise_delay.maxlen >= 12 ||
+	    (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
+		goto skip_fall;
+
+	sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
+	sdr_set_bits(host->base + MSDC_IOCON,
+		     MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
+	for (i = 0; i < PAD_DELAY_MAX; i++) {
+		msdc_set_cmd_delay(host, i);
+		msdc_set_data_delay(host, i);
+		ret = mmc_send_tuning(mmc, opcode, NULL);
+		if (!ret)
+			fall_delay |= (1 << i);
+	}
+	final_fall_delay = get_best_delay(host, fall_delay);
+
+skip_fall:
+	final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
+	if (final_maxlen == final_rise_delay.maxlen) {
+		sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
+		sdr_clr_bits(host->base + MSDC_IOCON,
+			     MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
+		final_delay = final_rise_delay.final_phase;
+	} else {
+		sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
+		sdr_set_bits(host->base + MSDC_IOCON,
+			     MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
 		final_delay = final_fall_delay.final_phase;
 	}
 
-	dev_dbg(host->dev, "Final data pad delay: %x\n", final_delay);
+	msdc_set_cmd_delay(host, final_delay);
+	msdc_set_data_delay(host, final_delay);
+
+	dev_dbg(host->dev, "Final pad delay: %x\n", final_delay);
 	return final_delay == 0xff ? -EIO : 0;
 }
 
@@ -1778,6 +2039,15 @@
 	int ret;
 	u32 tune_reg = host->dev_comp->pad_tune_reg;
 
+	if (host->dev_comp->data_tune && host->dev_comp->async_fifo) {
+		ret = msdc_tune_together(mmc, opcode);
+		if (host->hs400_mode) {
+			sdr_clr_bits(host->base + MSDC_IOCON,
+				     MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
+			msdc_set_data_delay(host, 0);
+		}
+		goto tune_done;
+	}
 	if (host->hs400_mode &&
 	    host->dev_comp->hs400_tune)
 		ret = hs400_tune_response(mmc, opcode);
@@ -1793,9 +2063,16 @@
 			dev_err(host->dev, "Tune data fail!\n");
 	}
 
+tune_done:
 	host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
 	host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
 	host->saved_tune_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
+	if (host->top_base) {
+		host->saved_tune_para.emmc_top_control = readl(host->top_base +
+				EMMC_TOP_CONTROL);
+		host->saved_tune_para.emmc_top_cmd = readl(host->top_base +
+				EMMC_TOP_CMD);
+	}
 	return ret;
 }
 
@@ -1804,7 +2081,11 @@
 	struct msdc_host *host = mmc_priv(mmc);
 	host->hs400_mode = true;
 
-	writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
+	if (host->top_base)
+		writel(host->hs400_ds_delay,
+		       host->top_base + EMMC50_PAD_DS_TUNE);
+	else
+		writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
 	/* hs400 mode must set it to 0 */
 	sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS);
 	/* to improve read performance, set outstanding to 2 */
@@ -1822,13 +2103,43 @@
 	sdr_clr_bits(host->base + EMMC_IOCON, 1);
 }
 
+static void msdc_ack_sdio_irq(struct mmc_host *mmc)
+{
+	unsigned long flags;
+	struct msdc_host *host = mmc_priv(mmc);
+
+	spin_lock_irqsave(&host->lock, flags);
+	__msdc_enable_sdio_irq(host, 1);
+	spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static int msdc_get_cd(struct mmc_host *mmc)
+{
+	struct msdc_host *host = mmc_priv(mmc);
+	int val;
+
+	if (mmc->caps & MMC_CAP_NONREMOVABLE)
+		return 1;
+
+	if (!host->internal_cd)
+		return mmc_gpio_get_cd(mmc);
+
+	val = readl(host->base + MSDC_PS) & MSDC_PS_CDSTS;
+	if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
+		return !!val;
+	else
+		return !val;
+}
+
 static const struct mmc_host_ops mt_msdc_ops = {
 	.post_req = msdc_post_req,
 	.pre_req = msdc_pre_req,
 	.request = msdc_ops_request,
 	.set_ios = msdc_ops_set_ios,
 	.get_ro = mmc_gpio_get_ro,
-	.get_cd = mmc_gpio_get_cd,
+	.get_cd = msdc_get_cd,
+	.enable_sdio_irq = msdc_enable_sdio_irq,
+	.ack_sdio_irq = msdc_ack_sdio_irq,
 	.start_signal_voltage_switch = msdc_ops_switch_volt,
 	.card_busy = msdc_card_busy,
 	.execute_tuning = msdc_execute_tuning,
@@ -1887,6 +2198,13 @@
 		goto host_free;
 	}
 
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (res) {
+		host->top_base = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(host->top_base))
+			host->top_base = NULL;
+	}
+
 	ret = mmc_regulator_get_supply(mmc);
 	if (ret)
 		goto host_free;
@@ -1903,6 +2221,9 @@
 		goto host_free;
 	}
 
+	host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+	if (IS_ERR(host->bus_clk))
+		host->bus_clk = NULL;
 	/*source clock control gate is optional clock*/
 	host->src_clk_cg = devm_clk_get(&pdev->dev, "source_cg");
 	if (IS_ERR(host->src_clk_cg))
@@ -1948,6 +2269,19 @@
 	else
 		mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 4095);
 
+	if (!(mmc->caps & MMC_CAP_NONREMOVABLE) &&
+	    !mmc_can_gpio_cd(mmc) &&
+	    host->dev_comp->use_internal_cd) {
+		/*
+		 * Is removable but no GPIO declared, so
+		 * use internal functionality.
+		 */
+		host->internal_cd = true;
+	}
+
+	if (mmc->caps & MMC_CAP_SDIO_IRQ)
+		mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+
 	mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
 	/* MMC core transfer sizes tunable parameters */
 	mmc->max_segs = MAX_BD_NUM;
@@ -1981,7 +2315,7 @@
 	msdc_init_hw(host);
 
 	ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq,
-		IRQF_TRIGGER_LOW | IRQF_ONESHOT, pdev->name, host);
+			       IRQF_TRIGGER_NONE, pdev->name, host);
 	if (ret)
 		goto release;
 
@@ -2052,7 +2386,6 @@
 	host->save_para.msdc_cfg = readl(host->base + MSDC_CFG);
 	host->save_para.iocon = readl(host->base + MSDC_IOCON);
 	host->save_para.sdc_cfg = readl(host->base + SDC_CFG);
-	host->save_para.pad_tune = readl(host->base + tune_reg);
 	host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT);
 	host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1);
 	host->save_para.patch_bit2 = readl(host->base + MSDC_PATCH_BIT2);
@@ -2061,6 +2394,16 @@
 	host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0);
 	host->save_para.emmc50_cfg3 = readl(host->base + EMMC50_CFG3);
 	host->save_para.sdc_fifo_cfg = readl(host->base + SDC_FIFO_CFG);
+	if (host->top_base) {
+		host->save_para.emmc_top_control =
+			readl(host->top_base + EMMC_TOP_CONTROL);
+		host->save_para.emmc_top_cmd =
+			readl(host->top_base + EMMC_TOP_CMD);
+		host->save_para.emmc50_pad_ds_tune =
+			readl(host->top_base + EMMC50_PAD_DS_TUNE);
+	} else {
+		host->save_para.pad_tune = readl(host->base + tune_reg);
+	}
 }
 
 static void msdc_restore_reg(struct msdc_host *host)
@@ -2070,7 +2413,6 @@
 	writel(host->save_para.msdc_cfg, host->base + MSDC_CFG);
 	writel(host->save_para.iocon, host->base + MSDC_IOCON);
 	writel(host->save_para.sdc_cfg, host->base + SDC_CFG);
-	writel(host->save_para.pad_tune, host->base + tune_reg);
 	writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT);
 	writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1);
 	writel(host->save_para.patch_bit2, host->base + MSDC_PATCH_BIT2);
@@ -2079,6 +2421,16 @@
 	writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0);
 	writel(host->save_para.emmc50_cfg3, host->base + EMMC50_CFG3);
 	writel(host->save_para.sdc_fifo_cfg, host->base + SDC_FIFO_CFG);
+	if (host->top_base) {
+		writel(host->save_para.emmc_top_control,
+		       host->top_base + EMMC_TOP_CONTROL);
+		writel(host->save_para.emmc_top_cmd,
+		       host->top_base + EMMC_TOP_CMD);
+		writel(host->save_para.emmc50_pad_ds_tune,
+		       host->top_base + EMMC50_PAD_DS_TUNE);
+	} else {
+		writel(host->save_para.pad_tune, host->base + tune_reg);
+	}
 }
 
 static int msdc_runtime_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index f9149d2..e1d46cd 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -14,4 +14,14 @@
 	  This driver supports the gigabit ethernet MACs in the
 	  MediaTek SoC family.
 
+config NET_MEDIATEK_STAR
+	tristate "Mediatek STAR MAC ethernet support"
+	select MII
+	---help---
+	  If you have a network (Ethernet) chipset belonging to this class,
+	  say Y.
+
+	  To compile this driver as a module, choose Y here. The module
+	  will be called star_mac.
+
 endif #NET_VENDOR_MEDIATEK
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index aa3f1c8..01386fb 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -2,4 +2,6 @@
 # Makefile for the Mediatek SoCs built-in ethernet macs
 #
 
+obj-$(CONFIG_NET_MEDIATEK_STAR) += star/
+
 obj-$(CONFIG_NET_MEDIATEK_SOC)			+= mtk_eth_soc.o
diff --git a/drivers/net/ethernet/mediatek/star/Makefile b/drivers/net/ethernet/mediatek/star/Makefile
new file mode 100644
index 0000000..efbe65c
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/star/Makefile
@@ -0,0 +1,10 @@
+
+#
+# Makefile for Broadcom 10-Gigabit ethernet driver
+#
+
+subdir-ccflags-y += -I$(srctree)/drivers/misc/mediatek/base/power/mt8167
+
+obj-$(CONFIG_NET_MEDIATEK_STAR) += star_eth.o
+star_eth-objs := star.o star_mac.o star_phy.o star_procfs.o
+
diff --git a/drivers/net/ethernet/mediatek/star/star.c b/drivers/net/ethernet/mediatek/star/star.c
new file mode 100644
index 0000000..dac5ba6
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/star/star.c
@@ -0,0 +1,1141 @@
+/* Mediatek STAR MAC network driver.
+ *
+ * Copyright (c) 2016-2017 Mediatek Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/of_net.h>
+
+#include "star.h"
+#include "star_procfs.h"
+#include "mtk_spm_sleep.h"
+
+#define STAR_DRV_NAME	"star-eth"
+#define STAR_DRV_VERSION "version-1.0"
+#define ETH_WOL_NAME "WOL"
+
+int star_dbg_level = STAR_ERR;
+
+static void star_finish_xmit(struct net_device *dev);
+
+static struct sk_buff *get_skb(struct net_device *ndev)
+{
+	unsigned char *tail;
+	u32 offset;
+	struct sk_buff *skb;
+
+	skb = dev_alloc_skb(ndev->mtu + ETH_EXTRA_PKT_LEN);
+	if (!skb)
+		return NULL;
+
+	/* Shift to 16 byte alignment */
+	/* while  call dev_alloc_skb(), the pointer of
+	 * skb->tail & skb->data is the same
+	 */
+	tail = skb_tail_pointer(skb);
+	if (((uintptr_t)tail) & (ETH_SKB_ALIGNMENT - 1)) {
+		offset = ((uintptr_t)tail) & (ETH_SKB_ALIGNMENT - 1);
+		skb_reserve(skb, ETH_SKB_ALIGNMENT - offset);
+	}
+
+	/* Reserve 2 bytes for zero copy */
+	/* Reserving 2 bytes makes the skb->data points to
+	 * a 16-byte aligned address after eth_type_trans is called.
+	 * Since eth_type_trans will extracts the pointer (ETH_LEN)
+	 * 14 bytes. With this 2 bytes reserved, the skb->data
+	 * can be 16-byte aligned before passing to upper layer.
+	 */
+	skb_reserve(skb, 2);
+
+	return skb;
+}
+
+/* pre-allocate Rx buffer */
+static int alloc_rx_skbs(star_dev *star_dev)
+{
+	int retval;
+	star_private *star_prv = star_dev->star_prv;
+
+	do {
+		u32 dmaBuf;
+		struct sk_buff *skb = get_skb(star_prv->dev);
+
+		if (!skb) {
+			STAR_MSG(STAR_ERR, "Error! No momory for rx sk_buff\n");
+			return -ENOMEM;
+		}
+
+		/* Note:
+		* We pass to dma addr with skb->tail-2 (4N aligned),
+		* Because Star Ethernet buffer must 16 byte align
+		* But the RX_OFFSET_2B_DIS has to be set to 0, making
+		* DMA to write tail (4N+2) addr.
+		*/
+		dmaBuf = dma_map_single(star_dev->dev,
+					skb_tail_pointer(skb) - 2,
+					skb_tailroom(skb),
+					DMA_FROM_DEVICE);
+		if (dma_mapping_error(star_dev->dev, dmaBuf)) {
+			STAR_MSG(STAR_ERR, "dma_mapping_error error\n");
+			return -ENOMEM;
+		}
+
+		retval = star_dma_rx_set(star_dev, dmaBuf,
+					 skb_tailroom(skb), (uintptr_t)skb);
+		STAR_MSG(STAR_VERB, "rx descriptor idx(%d) for skb(%p)\n",
+			 retval, skb);
+		if (retval < 0) {
+			dma_unmap_single(star_dev->dev, dmaBuf,
+					 skb_tailroom(skb), DMA_FROM_DEVICE);
+					 dev_kfree_skb(skb);
+		}
+	} while (retval >= 0);
+
+	return 0;
+}
+
+/* Free Tx descriptor and skbs not xmited */
+static void free_tx_skbs(star_dev *star_dev)
+{
+	int retval;
+	uintptr_t extBuf;
+	u32 ctrl_len, len, dmaBuf;
+
+	do {
+		retval = star_dma_tx_get(star_dev,
+					 (u32 *)&dmaBuf, &ctrl_len, &extBuf);
+		if (retval >= 0 && extBuf != 0) {
+			len = star_dma_tx_length(ctrl_len);
+			dma_unmap_single(star_dev->dev, dmaBuf,
+					 len, DMA_TO_DEVICE);
+			STAR_MSG(STAR_DBG,
+				 "get tx desc index(%d) for skb(0x%lx)\n",
+				 retval, extBuf);
+			dev_kfree_skb((struct sk_buff *)extBuf);
+		}
+	} while (retval >= 0);
+}
+
+static void free_rx_skbs(star_dev *star_dev)
+{
+	int retval;
+	uintptr_t  extBuf;
+	u32 dmaBuf;
+
+	/* Free Rx descriptor */
+	do {
+		retval = star_dma_rx_get(star_dev,
+					 (u32 *)&dmaBuf, NULL, &extBuf);
+		if (retval >= 0 && extBuf != 0) {
+			dma_unmap_single(star_dev->dev, dmaBuf,
+					 skb_tailroom((struct sk_buff *)
+						      extBuf),
+						      DMA_FROM_DEVICE);
+			STAR_MSG(STAR_DBG,
+				 "get tx desc index(%d) for skb(0x%lx)\n",
+				 retval, extBuf);
+			dev_kfree_skb((struct sk_buff *)extBuf);
+		}
+	} while (retval >= 0);
+}
+
+static int receive_one_packet(star_dev *star_dev, bool napi)
+{
+	int retval;
+	uintptr_t extBuf;
+	u32 ctrl_len, len, dmaBuf;
+	struct sk_buff *curr_skb, *new_skb;
+	star_private *star_prv = star_dev->star_prv;
+	struct net_device *ndev = star_prv->dev;
+
+	retval = star_dma_rx_get(star_dev, &dmaBuf, &ctrl_len, &extBuf);
+	/*no any skb to receive*/
+	if (retval < 0)
+		return retval;
+
+	curr_skb = (struct sk_buff *)extBuf;
+	dma_unmap_single(star_dev->dev, dmaBuf,
+			 skb_tailroom(curr_skb), DMA_FROM_DEVICE);
+	STAR_MSG(STAR_VERB, "%s(%s):rx des %d for skb(0x%lx)/length(%d)\n",
+		 __func__, ndev->name, retval, extBuf,
+		 star_dma_rx_length(ctrl_len));
+
+	if (star_dma_rx_valid(ctrl_len)) {
+		len = star_dma_rx_length(ctrl_len);
+		new_skb = get_skb(ndev);
+		if (new_skb) {
+			skb_put(curr_skb, len);
+			curr_skb->ip_summed = CHECKSUM_NONE;
+			curr_skb->protocol = eth_type_trans(curr_skb, ndev);
+			curr_skb->dev = ndev;
+
+			/* send the packet up protocol stack */
+			(napi ? netif_receive_skb : netif_rx)(curr_skb);
+			/* set the time of the last receive */
+			star_dev->stats.rx_packets++;
+			star_dev->stats.rx_bytes += len;
+		} else {
+			star_dev->stats.rx_dropped++;
+			new_skb = curr_skb;
+		}
+	} else {
+		/* Error packet */
+		new_skb = curr_skb;
+		star_dev->stats.rx_errors++;
+		star_dev->stats.rx_crc_errors += star_dma_rx_crc_err(ctrl_len);
+	}
+
+	dmaBuf = dma_map_single(star_dev->dev,
+				skb_tail_pointer(new_skb) - 2,
+				skb_tailroom(new_skb),
+				DMA_FROM_DEVICE);
+	star_dma_rx_set(star_dev, dmaBuf,
+			skb_tailroom(new_skb), (uintptr_t)new_skb);
+
+	return retval;
+}
+
+static int star_poll(struct napi_struct *napi, int budget)
+{
+	int retval, npackets;
+	star_private *star_prv = container_of(napi, star_private, napi);
+	star_dev *star_dev = &star_prv->star_dev;
+
+	for (npackets = 0; npackets < budget; npackets++) {
+		retval = receive_one_packet(star_dev, true);
+		if (retval < 0)
+			break;
+	}
+
+	star_dma_rx_resume(star_dev);
+
+	if (npackets < budget) {
+		local_irq_disable();
+		napi_complete(napi);
+		star_intr_rx_enable(star_dev);
+		local_irq_enable();
+	}
+
+	return npackets;
+}
+
+/* star tx use tasklet */
+static void star_dsr(unsigned long data)
+{
+	star_private *star_prv;
+	star_dev *star_dev;
+	struct net_device *ndev = (struct net_device *)data;
+
+	STAR_MSG(STAR_VERB, "%s(%s)\n", __func__, ndev->name);
+
+	star_prv = netdev_priv(ndev);
+	star_dev = &star_prv->star_dev;
+
+	if (star_prv->tsk_tx) {
+		star_prv->tsk_tx = false;
+		star_finish_xmit(ndev);
+	}
+}
+
+static irqreturn_t star_isr(int irq, void *dev_id)
+{
+	u32 intrStatus;
+	u32 intr_clr_msk = 0xffffffff;
+	star_private *star_prv;
+	star_dev *star_dev;
+	struct net_device *dev = (struct net_device *)dev_id;
+
+	intr_clr_msk &= ~STAR_INT_STA_RXC;
+
+	if (!dev) {
+		STAR_MSG(STAR_ERR, "star_isr - unknown device\n");
+		return IRQ_NONE;
+	}
+
+	STAR_MSG(STAR_VERB, "star_isr(%s)\n", dev->name);
+
+	star_prv = netdev_priv(dev);
+	star_dev = &star_prv->star_dev;
+
+	star_intr_disable(star_dev);
+	intrStatus = star_intr_status(star_dev);
+	star_intr_clear(star_dev, intrStatus & intr_clr_msk);
+
+	do {
+		STAR_MSG(STAR_VERB,
+			 "star_isr:interrupt status(0x%08x)\n", intrStatus);
+		if (intrStatus & STAR_INT_STA_RXC) {
+			STAR_MSG(STAR_VERB, "rx complete\n");
+			/* Disable rx interrupts */
+			star_intr_rx_disable(star_dev);
+			/* Clear rx interrupt */
+			star_intr_clear(star_dev, STAR_INT_STA_RXC);
+			napi_schedule(&star_prv->napi);
+		}
+
+		if (intrStatus & STAR_INT_STA_RXQF)
+			STAR_MSG(STAR_VERB, "rx queue full\n");
+
+		if (intrStatus & STAR_INT_STA_RXFIFOFULL)
+			STAR_MSG(STAR_WARN, "rx fifo full\n");
+
+		if (intrStatus & STAR_INT_STA_TXC) {
+			STAR_MSG(STAR_VERB, " tx complete\n");
+			star_prv->tsk_tx = true;
+		}
+
+		if (intrStatus & STAR_INT_STA_TXQE)
+			STAR_MSG(STAR_VERB, "tx queue empty\n");
+
+		if (intrStatus & STAR_INT_STA_RX_PCODE)
+			STAR_MSG(STAR_DBG, "Rx PCODE\n");
+
+		if (intrStatus & STAR_INT_STA_MAGICPKT)
+			STAR_MSG(STAR_WARN, "magic packet received\n");
+
+		if (intrStatus & STAR_INT_STA_MIBCNTHALF) {
+			STAR_MSG(STAR_VERB, " mib counter reach 2G\n");
+			star_mib_init(star_dev);
+		}
+
+		if (intrStatus & STAR_INT_STA_PORTCHANGE) {
+			STAR_MSG(STAR_DBG, "port status change\n");
+			star_link_status_change(star_dev);
+		}
+
+		/* read interrupt requests came during interrupt handling */
+		intrStatus = star_intr_status(star_dev);
+		star_intr_clear(star_dev, intrStatus & intr_clr_msk);
+	} while ((intrStatus & intr_clr_msk) != 0);
+
+	star_intr_enable(star_dev);
+	if (star_prv->tsk_tx)
+		tasklet_schedule(&star_prv->dsr);
+
+	STAR_MSG(STAR_VERB, "star_isr return\n");
+
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_STAR_USE_RMII_MODE
+static irqreturn_t star_eint_isr(int irq, void *dev_id)
+{
+	STAR_MSG(STAR_DBG, "enter star_eint_isr\n");
+
+	return IRQ_HANDLED;
+}
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void star_netpoll(struct net_device *dev)
+{
+	star_private *tp = netdev_priv(dev);
+	star_dev *pdev = tp->mii.dev;
+
+	disable_irq(pdev->irq);
+	star_isr(pdev->irq, dev);
+	enable_irq(pdev->irq);
+}
+#endif
+
+static int star_mac_enable(struct net_device *ndev)
+{
+	int intrStatus;
+	star_private *star_prv = netdev_priv(ndev);
+	star_dev *star_dev = &star_prv->star_dev;
+
+	STAR_MSG(STAR_DBG, "%s(%s)\n", __func__, ndev->name);
+
+	/* Start RX FIFO receive */
+	star_nic_pdset(star_dev, false);
+
+	star_intr_disable(star_dev);
+	star_dma_tx_stop(star_dev);
+	star_dma_rx_stop(star_dev);
+
+	netif_carrier_off(ndev);
+
+	star_mac_init(star_dev, ndev->dev_addr);
+
+	star_dma_init(star_dev,
+		      star_prv->desc_vir_addr, star_prv->desc_dma_addr);
+
+	/*Enable PHY auto-polling*/
+	star_phyctrl_init(star_dev, 1, star_prv->phy_addr);
+
+	if (alloc_rx_skbs(star_dev)) {
+		STAR_MSG(STAR_ERR, "rx bufs init fail\n");
+		return -ENOMEM;
+	}
+
+	STAR_MSG(STAR_DBG, "request interrupt vector=%d\n", ndev->irq);
+	if (request_irq(ndev->irq, star_isr, IRQF_TRIGGER_FALLING,
+			ndev->name, ndev) != 0) {
+		STAR_MSG(STAR_ERR, "interrupt %d request fail\n", ndev->irq);
+		return -ENODEV;
+	}
+
+#ifdef CONFIG_STAR_USE_RMII_MODE
+	STAR_MSG(STAR_DBG, "request eint_irq vector=%d\n", star_prv->eint_irq);
+	if (request_irq(star_prv->eint_irq, star_eint_isr,
+			IRQ_TYPE_EDGE_FALLING, ndev->name, ndev) != 0) {
+		STAR_MSG(STAR_ERR,
+			 "eint_irq %d request fail\n", star_prv->eint_irq);
+		return -ENODEV;
+	}
+#endif
+
+	napi_enable(&star_prv->napi);
+
+	intrStatus = star_intr_status(star_dev);
+	star_intr_clear(star_dev, intrStatus);
+	star_intr_enable(star_dev);
+
+	star_dev->phy_ops->init(star_dev);
+
+	dma_tx_start_and_reset_tx_desc(star_dev);
+	dma_rx_start_and_reset_rx_desc(star_dev);
+
+	star_link_status_change(star_dev);
+	netif_start_queue(ndev);
+
+	return 0;
+}
+
+static void star_mac_disable(struct net_device *ndev)
+{
+	int intrStatus;
+	star_private *star_prv = netdev_priv(ndev);
+	star_dev *star_dev = &star_prv->star_dev;
+
+	STAR_MSG(STAR_DBG, "%s(%s)\n", __func__, ndev->name);
+
+	netif_stop_queue(ndev);
+
+	napi_disable(&star_prv->napi);
+
+	star_intr_disable(star_dev);
+	star_dma_tx_stop(star_dev);
+	star_dma_rx_stop(star_dev);
+	intrStatus = star_intr_status(star_dev);
+	star_intr_clear(star_dev, intrStatus);
+
+	free_irq(ndev->irq, ndev);
+#ifdef CONFIG_STAR_USE_RMII_MODE
+	free_irq(star_prv->eint_irq, ndev);
+#endif
+
+	/* Free Tx descriptor */
+	free_tx_skbs(star_dev);
+
+	/* Free Rx descriptor */
+	free_rx_skbs(star_dev);
+}
+
+static int star_open(struct net_device *ndev)
+{
+	int ret;
+	star_private *star_prv = netdev_priv(ndev);
+
+	STAR_MSG(STAR_DBG, "star_open(%s)\n", ndev->name);
+
+	if (star_prv->opened) {
+		STAR_MSG(STAR_DBG, "%s(%s) is already open\n",
+			 __func__, ndev->name);
+		return 0;
+	}
+
+	ret = star_mac_enable(ndev);
+	if (ret) {
+		STAR_MSG(STAR_DBG, "star_mac_enable(%s) fail\n", ndev->name);
+		return ret;
+	}
+
+	star_prv->opened = true;
+
+	return 0;
+}
+
+static int star_stop(struct net_device *ndev)
+{
+	star_private *star_prv = netdev_priv(ndev);
+
+	STAR_MSG(STAR_DBG, "enter %s\n", __func__);
+	if (!star_prv->opened) {
+		STAR_MSG(STAR_DBG, "%s(%s) is already close\n", __func__,
+			 ndev->name);
+		return 0;
+	}
+
+	star_mac_disable(ndev);
+	star_prv->opened = false;
+
+	return 0;
+}
+
+static int star_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	u32 dmaBuf;
+	unsigned long flags;
+	star_private *star_prv;
+	star_dev *star_dev;
+
+	star_prv = netdev_priv(ndev);
+	star_dev = &star_prv->star_dev;
+
+	/* If frame size > Max frame size, drop this packet */
+	if (skb->len > ETH_MAX_FRAME_SIZE) {
+		STAR_MSG(STAR_WARN, "%s:Tx frame len is oversized(%d bytes)\n",
+			 ndev->name, skb->len);
+		dev_kfree_skb(skb);
+		star_dev->stats.tx_dropped++;
+		return NETDEV_TX_OK;
+	}
+
+	dmaBuf = dma_map_single(star_dev->dev,
+				skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(star_dev->dev, dmaBuf))) {
+		STAR_MSG(STAR_ERR, "%s,dma_mapping_error error\n", __func__);
+		return -ENOMEM;
+	}
+
+	spin_lock_irqsave(&star_prv->lock, flags);
+	star_dma_tx_set(star_dev, dmaBuf, skb->len, (uintptr_t)skb);
+	/* Tx descriptor ring full */
+	if (star_dev->tx_num == star_dev->tx_ring_size)
+		netif_stop_queue(ndev);
+	spin_unlock_irqrestore(&star_prv->lock, flags);
+	star_dma_tx_resume(star_dev);
+
+	return NETDEV_TX_OK;
+}
+
+static void star_finish_xmit(struct net_device *ndev)
+{
+	int retval, wake = 0;
+	star_private *star_prv;
+	star_dev *star_dev;
+
+	star_prv = netdev_priv(ndev);
+	star_dev = &star_prv->star_dev;
+
+	do {
+		uintptr_t extBuf;
+		u32 ctrl_len;
+		u32 len;
+		u32 dmaBuf;
+		unsigned long flags;
+
+		spin_lock_irqsave(&star_prv->lock, flags);
+		retval = star_dma_tx_get(star_dev, (u32 *)&dmaBuf,
+					 &ctrl_len, &extBuf);
+		spin_unlock_irqrestore(&star_prv->lock, flags);
+
+		if (retval >= 0 && extBuf != 0) {
+			len = star_dma_tx_length(ctrl_len);
+			dma_unmap_single(star_dev->dev,
+					 dmaBuf, len, DMA_TO_DEVICE);
+			STAR_MSG(STAR_VERB,
+				 "%s get tx desc(%d) for skb(0x%lx), len(%08x)\n",
+				 __func__, retval, extBuf, len);
+
+			dev_kfree_skb_irq((struct sk_buff *)extBuf);
+
+			star_dev->stats.tx_bytes += len;
+			star_dev->stats.tx_packets++;
+			wake = 1;
+		}
+	} while (retval >= 0);
+
+	if (wake)
+		netif_wake_queue(ndev);
+}
+
+static struct net_device_stats *star_get_stats(struct net_device *ndev)
+{
+	star_private *star_prv;
+	star_dev *star_dev;
+
+	STAR_MSG(STAR_VERB, "enter %s\n", __func__);
+
+	star_prv = netdev_priv(ndev);
+	star_dev = &star_prv->star_dev;
+
+	return &star_dev->stats;
+}
+
+#define STAR_HTABLE_SIZE 512
+#define STAR_HTABLE_SIZE_LIMIT (STAR_HTABLE_SIZE >> 1)
+
+static void star_set_multicast_list(struct net_device *ndev)
+{
+	unsigned long flags;
+	star_private *star_prv;
+	star_dev *star_dev;
+
+	STAR_MSG(STAR_VERB, "enter %s\n", __func__);
+
+	star_prv = netdev_priv(ndev);
+	star_dev = &star_prv->star_dev;
+
+	spin_lock_irqsave(&star_prv->lock, flags);
+
+	if (ndev->flags & IFF_PROMISC) {
+		STAR_MSG(STAR_WARN, "%s: Promiscuous mode enabled.\n",
+			 ndev->name);
+		star_arl_promisc_enable(star_dev);
+	} else if ((netdev_mc_count(ndev) > STAR_HTABLE_SIZE_LIMIT) ||
+				(ndev->flags & IFF_ALLMULTI)) {
+		u32 hashIdx;
+
+		for (hashIdx = 0; hashIdx < STAR_HTABLE_SIZE; hashIdx++)
+			star_set_hashbit(star_dev, hashIdx, 1);
+	} else {
+		struct netdev_hw_addr *ha;
+
+		netdev_for_each_mc_addr(ha, ndev) {
+			u32 hashAddr;
+
+			hashAddr = (u32)(((ha->addr[0] & 0x1) << 8)
+				   + (u32)(ha->addr[5]));
+			star_set_hashbit(star_dev, hashAddr, 1);
+		}
+	}
+
+	spin_unlock_irqrestore(&star_prv->lock, flags);
+}
+
+static int star_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+	star_private *star_prv = netdev_priv(dev);
+	unsigned long flags;
+	int rc = 0;
+
+	if (!netif_running(dev))
+		return -EINVAL;
+
+	spin_lock_irqsave(&star_prv->lock, flags);
+	rc = generic_mii_ioctl(&star_prv->mii, if_mii(req), cmd, NULL);
+	spin_unlock_irqrestore(&star_prv->lock, flags);
+
+	return rc;
+}
+
+static void star_tx_timeout(struct net_device *ndev)
+{
+	bool state;
+	int ret;
+
+	STAR_MSG(STAR_ERR, "%s tx timeout \n", __func__);
+	STAR_MSG(STAR_DBG, "request interrupt vector=%d\n", ndev->irq);
+	ret = irq_get_irqchip_state(ndev->irq, IRQCHIP_STATE_MASKED, &state);
+	STAR_MSG(STAR_DBG, "irq mask status(ret=%d)=0x%x\n", ret, state);
+	ret = irq_get_irqchip_state(ndev->irq, IRQCHIP_STATE_PENDING, &state);
+	STAR_MSG(STAR_DBG, "irq pending status(ret=%d)=0x%x\n", ret, state);
+	ret = irq_get_irqchip_state(ndev->irq, IRQCHIP_STATE_ACTIVE, &state);
+	STAR_MSG(STAR_DBG, "irq active status(ret=%d)=0x%x\n", ret, state);
+}
+
+static int mdcmdio_read(struct net_device *dev, int phy_id, int location)
+{
+	star_private *star_prv;
+	star_dev *star_dev;
+
+	star_prv = netdev_priv(dev);
+	star_dev = &star_prv->star_dev;
+
+	return star_mdc_mdio_read(star_dev, phy_id, location);
+}
+
+static void mdcmdio_write(struct net_device *dev, int phy_id,
+			  int location, int val)
+{
+	star_private *star_prv;
+	star_dev *star_dev;
+
+	star_prv = netdev_priv(dev);
+	star_dev = &star_prv->star_dev;
+	star_mdc_mdio_write(star_dev, phy_id, location, val);
+}
+
+const struct net_device_ops star_netdev_ops = {
+	.ndo_open = star_open,
+	.ndo_stop = star_stop,
+	.ndo_start_xmit = star_start_xmit,
+	.ndo_get_stats = star_get_stats,
+	.ndo_set_rx_mode = star_set_multicast_list,
+	.ndo_do_ioctl = star_ioctl,
+	.ndo_tx_timeout	= star_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller = star_netpoll,
+#endif
+	.ndo_change_mtu	= eth_change_mtu,
+	.ndo_set_mac_address = eth_mac_addr,
+	.ndo_validate_addr = eth_validate_addr,
+};
+
+static int starmac_get_link_ksettings(struct net_device *ndev,
+				struct ethtool_link_ksettings *settings)
+{
+	unsigned long flags;
+	star_private *star_prv = netdev_priv(ndev);
+
+	spin_lock_irqsave(&star_prv->lock, flags);
+	mii_ethtool_get_link_ksettings(&star_prv->mii, settings);
+	spin_unlock_irqrestore(&star_prv->lock, flags);
+
+	return 0;
+}
+
+static int starmac_set_link_ksettings(struct net_device *ndev,
+				const struct ethtool_link_ksettings *settings)
+{
+	int ret;
+	unsigned long flags;
+	star_private *star_prv = netdev_priv(ndev);
+
+	spin_lock_irqsave(&star_prv->lock, flags);
+	ret = mii_ethtool_set_link_ksettings(&star_prv->mii, settings);
+	spin_unlock_irqrestore(&star_prv->lock, flags);
+
+	return ret;
+}
+
+static int starmac_nway_reset(struct net_device *ndev)
+{
+	int ret;
+	unsigned long flags;
+	star_private *star_prv = netdev_priv(ndev);
+
+	spin_lock_irqsave(&star_prv->lock, flags);
+	ret = mii_nway_restart(&star_prv->mii);
+	spin_unlock_irqrestore(&star_prv->lock, flags);
+
+	return ret;
+}
+
+static u32 starmac_get_link(struct net_device *ndev)
+{
+	u32 ret;
+	unsigned long flags;
+	star_private *star_prv = netdev_priv(ndev);
+
+	spin_lock_irqsave(&star_prv->lock, flags);
+	ret = mii_link_ok(&star_prv->mii);
+	spin_unlock_irqrestore(&star_prv->lock, flags);
+	STAR_MSG(STAR_DBG, "ETHTOOL_TEST is called\n");
+
+	return ret;
+}
+
+static int starmac_check_if_running(struct net_device *dev)
+{
+	if (!netif_running(dev))
+		return -EINVAL;
+
+	return 0;
+}
+
+static void starmac_get_drvinfo(struct net_device *dev,
+				struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, STAR_DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, STAR_DRV_VERSION, sizeof(info->version));
+}
+
+static struct ethtool_ops starmac_ethtool_ops = {
+	.begin = starmac_check_if_running,
+	.get_drvinfo = starmac_get_drvinfo,
+	.get_link_ksettings = starmac_get_link_ksettings,
+	.set_link_ksettings = starmac_set_link_ksettings,
+	.nway_reset = starmac_nway_reset,
+	.get_link = starmac_get_link,
+};
+
+int star_get_wol_flag(star_private *star_prv)
+{
+	return star_prv->support_wol;
+}
+
+void star_set_wol_flag(star_private *star_prv, bool flag)
+{
+	star_prv->support_wol = flag;
+}
+
+static int  star_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct net_device *netdev = platform_get_drvdata(pdev);
+	star_private *star_prv = netdev_priv(netdev);
+	star_dev *star_dev = &star_prv->star_dev;
+
+	STAR_MSG(STAR_DBG, "entered %s, line(%d)\n", __func__, __LINE__);
+
+	if (star_prv->opened) {
+		if (WOL_NONE == star_prv->wol) {
+			STAR_MSG(STAR_DBG, "Not support wol.\n");
+			star_mac_disable(netdev);
+			clk_disable_unprepare(star_prv->core_clk);
+			clk_disable_unprepare(star_prv->reg_clk);
+			clk_disable_unprepare(star_prv->trans_clk);
+			regulator_disable(star_prv->phy_regulator);
+		} else if (MAC_WOL == star_prv->wol) {
+			STAR_MSG(STAR_DBG, "support mac wol.\n");
+//			spm_set_sleep_26m_req(1);
+			star_config_wol(star_dev, true);
+		} else if (PHY_WOL == star_prv->wol) {
+			STAR_MSG(STAR_DBG, "support phy wol.\n");
+			star_mac_disable(netdev);
+			if (star_dev->phy_ops->wol_enable)
+				star_dev->phy_ops->wol_enable(netdev);
+			enable_irq_wake(star_prv->eint_irq);
+		}
+	}
+
+	return 0;
+}
+
+static int star_resume(struct platform_device *pdev)
+{
+	struct net_device *netdev = platform_get_drvdata(pdev);
+	star_private *star_prv = netdev_priv(netdev);
+	star_dev *star_dev = &star_prv->star_dev;
+	int ret;
+
+	STAR_MSG(STAR_DBG, "entered %s(%s)\n", __func__, netdev->name);
+
+	if (star_prv->opened) {
+		if (WOL_NONE == star_prv->wol) {
+			STAR_MSG(STAR_DBG, "Not support wol.\n");
+			ret = regulator_enable(star_prv->phy_regulator);
+			if (ret != 0)
+				STAR_MSG(STAR_ERR, "failed to regulator_enable(%d)\n", ret);
+
+			ret = clk_prepare_enable(star_prv->core_clk);
+			if (ret < 0)
+				STAR_MSG(STAR_ERR, "failed to enable core-clk (%d)\n", ret);
+
+			ret = clk_prepare_enable(star_prv->reg_clk);
+			if (ret < 0)
+				STAR_MSG(STAR_ERR, "failed to enable reg-clk (%d)\n", ret);
+
+			ret = clk_prepare_enable(star_prv->trans_clk);
+			if (ret < 0)
+				STAR_MSG(STAR_ERR, "failed to enable trans-clk (%d)\n", ret);
+
+			star_hw_init(star_dev);
+			star_mac_enable(netdev);
+		} else if (MAC_WOL == star_prv->wol) {
+			STAR_MSG(STAR_DBG, "support mac wol.\n");
+			star_config_wol(star_dev, false);
+//			spm_set_sleep_26m_req(0);
+		} else if (PHY_WOL == star_prv->wol) {
+			STAR_MSG(STAR_DBG, "support phy wol.\n");
+			if (star_dev->phy_ops->wol_disable)
+				star_dev->phy_ops->wol_disable(netdev);
+			disable_irq_wake(star_prv->eint_irq);
+			star_hw_init(star_dev);
+			star_mac_enable(netdev);
+		}
+	}
+
+	return 0;
+}
+
+static int star_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	star_private *star_prv;
+	star_dev *star_dev;
+	struct net_device *netdev;
+	struct device_node *np;
+	const char *mac_addr;
+
+	star_set_dbg_level(STAR_DBG);
+	STAR_MSG(STAR_DBG, "%s entered\n", __func__);
+
+	netdev = alloc_etherdev(sizeof(star_private));
+	if (!netdev)
+		return -ENOMEM;
+
+	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	star_prv = netdev_priv(netdev);
+	memset(star_prv, 0, sizeof(star_private));
+	star_prv->dev = netdev;
+	/* defalt close eth */
+	star_prv->opened = false;
+
+#ifdef ETH_SUPPORT_WOL
+	STAR_MSG(STAR_DBG, "%s() support WOL\n", __func__);
+	star_prv->support_wol = true;
+#endif
+	star_dev = &star_prv->star_dev;
+	star_dev->dev = &pdev->dev;
+
+	np = of_find_compatible_node(NULL, NULL, "mediatek,mt8516-ethernet");
+	if (!np) {
+		STAR_MSG(STAR_ERR, "%s, fail to find node\n", __func__);
+		ret = -EINVAL;
+		goto err_free_netdev;
+	}
+
+	star_prv->core_clk = devm_clk_get(&pdev->dev, "core");
+	if (IS_ERR(star_prv->core_clk)) {
+		ret = PTR_ERR(star_prv->core_clk);
+		STAR_MSG(STAR_ERR, "failed to get core-clk: %d\n", ret);
+		goto err_free_netdev;
+	}
+	ret = clk_prepare_enable(star_prv->core_clk);
+	if (ret < 0) {
+		STAR_MSG(STAR_ERR, "failed to enable core-clk (%d)\n", ret);
+		goto err_free_netdev;
+	}
+
+	star_prv->reg_clk = devm_clk_get(&pdev->dev, "reg");
+	if (IS_ERR(star_prv->reg_clk)) {
+		ret = PTR_ERR(star_prv->reg_clk);
+		STAR_MSG(STAR_ERR, "failed to get reg-clk: %d\n", ret);
+		goto err_free_netdev;
+	}
+	ret = clk_prepare_enable(star_prv->reg_clk);
+	if (ret < 0) {
+		STAR_MSG(STAR_ERR, "failed to enable reg-clk (%d)\n", ret);
+		goto err_free_netdev;
+	}
+
+	star_prv->trans_clk = devm_clk_get(&pdev->dev, "trans");
+	if (IS_ERR(star_prv->trans_clk)) {
+		ret = PTR_ERR(star_prv->trans_clk);
+		STAR_MSG(STAR_ERR, "failed to get trans-clk: %d\n", ret);
+		goto err_free_netdev;
+	}
+	ret = clk_prepare_enable(star_prv->trans_clk);
+	if (ret < 0) {
+		STAR_MSG(STAR_ERR, "failed to enable trans-clk (%d)\n", ret);
+		goto err_free_netdev;
+	}
+
+	star_prv->phy_regulator = devm_regulator_get(&pdev->dev, "eth-regulator");
+	ret = regulator_set_voltage(star_prv->phy_regulator, 3300000, 3300000);
+	if (ret != 0) {
+		STAR_MSG(STAR_ERR, "failed to regulator_set_voltage(%d)\n", ret);
+		return ret;
+	}
+	ret = regulator_enable(star_prv->phy_regulator);
+	if (ret != 0) {
+		STAR_MSG(STAR_ERR, "failed to regulator_enable(%d)\n", ret);
+		return ret;
+	}
+
+	star_dev->base = of_iomap(np, 0);
+	if (!star_dev->base) {
+		STAR_MSG(STAR_ERR, "fail to ioremap eth!\n");
+		ret = -ENOMEM;
+		goto err_free_netdev;
+	}
+
+	star_dev->pericfg_base = of_iomap(np, 1);
+	if (!star_dev->pericfg_base) {
+		STAR_MSG(STAR_ERR, "fail to ioremap pericfg_base!\n");
+		ret = -ENOMEM;
+		goto err_free_netdev;
+	}
+
+	STAR_MSG(STAR_DBG, "BASE: mac(0x%p), clk(0x%p)\n",
+		 star_dev->base, star_dev->pericfg_base);
+
+#ifdef CONFIG_STAR_USE_RMII_MODE
+	star_switch_to_rmii_mode(star_dev);
+#endif
+
+	tasklet_init(&star_prv->dsr, star_dsr, (unsigned long)netdev);
+
+    /* Init system locks */
+	spin_lock_init(&star_prv->lock);
+
+	star_prv->desc_vir_addr =
+		(uintptr_t)dma_alloc_coherent(star_dev->dev,
+					      TX_DESC_TOTAL_SIZE +
+					      RX_DESC_TOTAL_SIZE,
+					      &star_prv->desc_dma_addr,
+					      GFP_KERNEL | GFP_DMA);
+	if (!star_prv->desc_vir_addr) {
+		STAR_MSG(STAR_ERR, "fail to dma_alloc_coherent!!\n");
+		ret = -ENOMEM;
+		goto alloc_desc_fail;
+	}
+
+	star_dev->star_prv = star_prv;
+
+	STAR_MSG(STAR_DBG, "Ethernet disable powerdown!\n");
+	star_nic_pdset(star_dev, false);
+
+	star_hw_init(star_dev);
+
+	/* Get PHY ID */
+	star_prv->phy_addr = star_detect_phyid(star_dev);
+	if (star_prv->phy_addr == 32) {
+		STAR_MSG(STAR_ERR, "can't detect phy_addr,default to %d\n",
+			 star_prv->phy_addr);
+		ret = -ENODEV;
+		goto phy_detect_fail;
+	} else {
+		STAR_MSG(STAR_WARN, "PHY addr = 0x%04x\n", star_prv->phy_addr);
+	}
+
+	star_prv->mii.phy_id = star_prv->phy_addr;
+	star_prv->mii.dev = netdev;
+	star_prv->mii.mdio_read = mdcmdio_read;
+	star_prv->mii.mdio_write = mdcmdio_write;
+	star_prv->mii.phy_id_mask = 0x1f;
+	star_prv->mii.reg_num_mask = 0x1f;
+
+	/* Set MAC address */
+	mac_addr = of_get_mac_address(np);
+	if (!IS_ERR_OR_NULL(mac_addr))
+		ether_addr_copy(netdev->dev_addr, mac_addr);
+
+	STAR_MSG(STAR_DBG, "default netdev->dev_addr(%pM).\n", netdev->dev_addr);
+	/* If the mac address is invalid, use random mac address  */
+	if (!is_valid_ether_addr(netdev->dev_addr)) {
+		random_ether_addr(netdev->dev_addr);
+		STAR_MSG(STAR_WARN, "generated random MAC address %pM\n",
+			netdev->dev_addr);
+		netdev->addr_assign_type = NET_ADDR_RANDOM;
+	}
+
+	netdev->irq = platform_get_irq(pdev, 0);
+	if (netdev->irq < 0) {
+		STAR_MSG(STAR_ERR, "no IRQ resource found\n");
+		goto phy_detect_fail;
+	}
+	STAR_MSG(STAR_DBG, "eth irq (%d)\n", netdev->irq);
+
+#ifdef CONFIG_STAR_USE_RMII_MODE
+	star_prv->eint_pin = of_get_named_gpio(np, "eth-gpios", 0);
+	if (star_prv->eint_pin < 0)
+		STAR_MSG(STAR_DBG, "not find eth-gpio\n");
+	star_prv->eint_irq = gpio_to_irq(star_prv->eint_pin);
+#endif
+	star_prv->wol = WOL_NONE;
+	star_prv->wol_flag = false;
+
+	netdev->base_addr = (unsigned long)star_dev->base;
+	netdev->netdev_ops = &star_netdev_ops;
+
+	STAR_MSG(STAR_DBG, "EthTool installed\n");
+	netdev->ethtool_ops = &starmac_ethtool_ops;
+
+	netif_napi_add(netdev, &star_prv->napi, star_poll, STAR_NAPI_WEIGHT);
+
+	ret = register_netdev(netdev);
+	if (ret)
+		goto phy_detect_fail;
+
+	platform_set_drvdata(pdev, netdev);
+
+	ret = star_init_procfs();
+	if (ret)
+		STAR_MSG(STAR_WARN, "star_init_procfs fail\n");
+
+	STAR_MSG(STAR_DBG, "star_probe success.\n");
+
+	return 0;
+
+phy_detect_fail:
+	dma_free_coherent(star_dev->dev,
+			  TX_DESC_TOTAL_SIZE + RX_DESC_TOTAL_SIZE,
+			  (void *)star_prv->desc_vir_addr,
+			  star_prv->desc_dma_addr);
+
+alloc_desc_fail:
+	free_netdev(netdev);
+err_free_netdev:
+	unregister_netdev(netdev);
+	STAR_MSG(STAR_ERR, "Star MAC init fail\n");
+	return ret;
+}
+
+static int star_remove(struct platform_device *pdev)
+{
+	struct net_device *netdev = platform_get_drvdata(pdev);
+	star_private *star_prv = netdev_priv(netdev);
+	star_dev *star_dev = &star_prv->star_dev;
+
+	star_exit_procfs();
+
+	unregister_netdev(netdev);
+
+	dma_free_coherent(star_dev->dev,
+			  TX_DESC_TOTAL_SIZE + RX_DESC_TOTAL_SIZE,
+			  (void *)star_prv->desc_vir_addr,
+			  star_prv->desc_dma_addr);
+
+	free_netdev(netdev);
+
+	return 0;
+}
+
+static const struct of_device_id star_of_match[] = {
+	{ .compatible = "mediatek,mt8516-ethernet", },
+	{},
+};
+
+static struct platform_device *star_pdev;
+
+static struct platform_driver star_pdrv = {
+	.driver = {
+		.name = STAR_DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = star_of_match,
+	},
+	.probe = star_probe,
+	.suspend = star_suspend,
+	.resume = star_resume,
+	.remove = star_remove,
+};
+
+static int __init star_init(void)
+{
+	int err;
+
+	STAR_MSG(STAR_DBG, "enter %s\n", __func__);
+
+	err = platform_driver_register(&star_pdrv);
+	if (err)
+		return err;
+
+	STAR_MSG(STAR_DBG, "%s success.\n", __func__);
+	return 0;
+}
+
+static void __exit star_exit(void)
+{
+	platform_device_unregister(star_pdev);
+	platform_driver_unregister(&star_pdrv);
+	STAR_MSG(STAR_DBG, "%s ...\n", __func__);
+}
+
+module_init(star_init);
+module_exit(star_exit);
+
+MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
+MODULE_DESCRIPTION("Mediatek STAR Network Driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/net/ethernet/mediatek/star/star.h b/drivers/net/ethernet/mediatek/star/star.h
new file mode 100644
index 0000000..397a387
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/star/star.h
@@ -0,0 +1,230 @@
+/* Mediatek STAR MAC network driver.
+ *
+ * Copyright (c) 2016-2017 Mediatek Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _STAR_H_
+#define _STAR_H_
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/mii.h>
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#include <linux/syscalls.h>
+#include <linux/fcntl.h>
+#include <linux/regulator/consumer.h>
+
+#include "star_mac.h"
+#include "star_phy.h"
+
+/* use  rmii mode */
+#define CONFIG_STAR_USE_RMII_MODE
+
+#define ETH_MAX_FRAME_SIZE          1536
+#define ETH_SKB_ALIGNMENT           16
+
+#define TX_DESC_NUM  128
+#define RX_DESC_NUM  128
+#define TX_DESC_TOTAL_SIZE (sizeof(tx_desc) * TX_DESC_NUM)
+#define RX_DESC_TOTAL_SIZE (sizeof(rx_desc) * RX_DESC_NUM)
+#define ETH_EXTRA_PKT_LEN 36
+#define ETH_ADDR_LEN 6
+#define STAR_NAPI_WEIGHT (RX_DESC_NUM << 1)
+
+/* Star Ethernet Configuration*/
+/* ====================================== */
+#define star_intr_disable(dev) \
+		star_set_reg(star_int_mask((dev)->base), 0xffffffff)
+#define star_intr_enable(dev) \
+		star_set_reg(star_int_mask((dev)->base), 0)
+#define star_intr_clear(dev, intrStatus) \
+		star_set_reg(star_int_sta((dev)->base), intrStatus)
+#define star_intr_status(dev) \
+		star_get_reg(star_int_sta((dev)->base))
+#define star_intr_rx_enable(dev) \
+		star_clear_bit(star_int_mask((dev)->base), STAR_INT_STA_RXC)
+#define star_intr_rx_disable(dev) \
+		star_set_bit(star_int_mask((dev)->base), STAR_INT_STA_RXC)
+#define star_intr_mask(dev) \
+		star_get_reg(star_int_mask((dev)->base))
+
+#define RX_RESUME BIT(2)
+#define RX_STOP BIT(1)
+#define RX_START BIT(0)
+
+#define dma_tx_start_and_reset_tx_desc(dev) \
+		star_set_bit(star_tx_dma_ctrl((dev)->base), TX_START)
+#define dma_rx_start_and_reset_rx_desc(dev) \
+		star_set_bit(star_rx_dma_ctrl((dev)->base), RX_START)
+#define star_dma_tx_enable(dev) \
+		star_set_bit(star_tx_dma_ctrl((dev)->base), TX_RESUME)
+#define star_dma_tx_disable(dev) \
+		star_set_bit(star_tx_dma_ctrl((dev)->base), TX_STOP)
+#define star_dma_rx_enable(dev) \
+		star_set_bit(star_rx_dma_ctrl((dev)->base), RX_RESUME)
+#define star_dma_rx_disable(dev) \
+		star_set_bit(star_rx_dma_ctrl((dev)->base), RX_STOP)
+
+#define star_dma_tx_resume(dev) star_dma_tx_enable(dev)
+#define star_dma_rx_resume(dev) star_dma_rx_enable(dev)
+
+#define star_reset_hash_table(dev) \
+		star_set_bit(star_test1((dev)->base), STAR_TEST1_RST_HASH_BIST)
+
+#define star_dma_rx_valid(ctrl_len) \
+		(((ctrl_len & RX_FS) != 0) && ((ctrl_len & RX_LS) != 0) && \
+		((ctrl_len & RX_CRCERR) == 0) && ((ctrl_len & RX_OSIZE) == 0))
+
+#define star_dma_rx_crc_err(ctrl_len) ((ctrl_len & RX_CRCERR) ? 1 : 0)
+#define star_dma_rx_over_size(ctrl_len) ((ctrl_len & RX_OSIZE) ? 1 : 0)
+
+#define star_dma_rx_length(ctrl_len) \
+		((ctrl_len >> RX_LEN_OFFSET) & RX_LEN_MASK)
+#define star_dma_tx_length(ctrl_len) \
+		((ctrl_len >> TX_LEN_OFFSET) & TX_LEN_MASK)
+
+#define star_arl_promisc_enable(dev) \
+		star_set_bit(STAR_ARL_CFG((dev)->base), STAR_ARL_CFG_MISCMODE)
+
+enum wol_type {
+	WOL_NONE = 0,
+	MAC_WOL,
+	PHY_WOL,
+};
+
+/**
+ * @brief structure for Star private data
+ * @wol:		ethernet mac wol type status
+ * @wol_flag:		normal wol: set true to enable, set false to disable.
+ */
+typedef struct star_private_s {
+	struct regulator *phy_regulator;
+	struct clk *core_clk, *reg_clk, *trans_clk;
+	star_dev star_dev;
+	struct net_device *dev;
+	dma_addr_t desc_dma_addr;
+	uintptr_t desc_vir_addr;
+	u32 phy_addr;
+	/* star lock */
+	spinlock_t lock;
+	struct tasklet_struct dsr;
+	bool tsk_tx;
+	struct napi_struct napi;
+	struct mii_if_info mii;
+	struct input_dev *idev;
+	bool opened;
+	bool support_wol;
+	bool support_rmii;
+	int eint_irq;
+	int eint_pin;
+	enum wol_type wol;
+	bool wol_flag;
+} star_private;
+
+struct eth_phy_ops {
+	u32 addr;
+	/* value of phy reg3(identifier2) */
+	u32 phy_id;
+	void (*init)(star_dev *sdev);
+	void (*wol_enable)(struct net_device *netdev);
+	void (*wol_disable)(struct net_device *netdev);
+};
+
+/* debug level */
+enum {
+	STAR_ERR = 0,
+	STAR_WARN,
+	STAR_DBG,
+	STAR_VERB,
+	STAR_DBG_MAX
+};
+
+#ifndef STAR_DBG_LVL_DEFAULT
+#define STAR_DBG_LVL_DEFAULT STAR_ERR
+#endif
+
+extern int star_dbg_level;
+
+/* star mac memory barrier */
+#define star_mb() mb()
+
+#define STAR_MSG(lvl, fmt...) do {\
+		if (lvl <= star_dbg_level)\
+			pr_err("star: " fmt);\
+		} while (0)
+
+static inline void star_set_reg(void __iomem *reg, u32 value)
+{
+	STAR_MSG(STAR_VERB, "star_set_reg(%p)=%08x\n", reg, value);
+	iowrite32(value, reg);
+}
+
+static inline u32 star_get_reg(void __iomem *reg)
+{
+	u32 data = ioread32(reg);
+
+	STAR_MSG(STAR_VERB, "star_get_reg(%p)=%08x\n", reg, data);
+	return data;
+}
+
+static inline void star_set_bit(void __iomem *reg, u32 bit)
+{
+	u32 data = ioread32(reg);
+
+	data |= bit;
+	STAR_MSG(STAR_VERB, "star_set_bit(%p,bit:%08x)=%08x\n", reg, bit, data);
+	iowrite32(data, reg);
+	star_mb();
+}
+
+static inline void star_clear_bit(void __iomem *reg, u32 bit)
+{
+	u32 data = ioread32(reg);
+
+	data &= ~bit;
+	STAR_MSG(STAR_VERB,
+		 "star_clear_bit(%p,bit:%08x)=%08x\n", reg, bit, data);
+	iowrite32(data, reg);
+	star_mb();
+}
+
+static inline u32 star_get_bit_mask(void __iomem *reg, u32 mask, u32 offset)
+{
+	u32 data = ioread32(reg);
+
+	data = ((data >> offset) & mask);
+	STAR_MSG(STAR_VERB,
+		 "star_get_bit_mask(%p,mask:%08x,offset:%08x)=%08x(data)\n",
+		 reg, mask, offset, data);
+	return data;
+}
+
+static inline u32 star_is_set_bit(void __iomem *reg, u32 bit)
+{
+	u32 data = ioread32(reg);
+
+	data &= bit;
+	STAR_MSG(STAR_VERB,
+		 "star_is_set_bit(%p,bit:%08x)=%08x\n", reg, bit, data);
+	return data ? 1 : 0;
+}
+
+int star_get_wol_flag(star_private *star_prv);
+void star_set_wol_flag(star_private *star_prv, bool flag);
+int star_get_dbg_level(void);
+void star_set_dbg_level(int dbg);
+
+#endif /* _STAR_H_ */
diff --git a/drivers/net/ethernet/mediatek/star/star_mac.c b/drivers/net/ethernet/mediatek/star/star_mac.c
new file mode 100644
index 0000000..47e4d4e
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/star/star_mac.c
@@ -0,0 +1,526 @@
+/* Mediatek STAR MAC network driver.
+ *
+ * Copyright (c) 2016-2017 Mediatek Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include "star.h"
+
+#ifdef CONFIG_ARM64
+uintptr_t tx_skb_reserve[TX_DESC_NUM];
+uintptr_t rx_skb_reserve[RX_DESC_NUM];
+#endif
+
+u16 star_mdc_mdio_read(star_dev *dev, u32 phy_addr, u32 phy_reg)
+{
+	u16 data;
+	u32 phy_ctl;
+	void __iomem *base = dev->base;
+
+	/* Clear previous read/write OK status (write 1 clear) */
+	star_set_reg(STAR_PHY_CTRL0(base), STAR_PHY_CTRL0_RWOK);
+	phy_ctl = (phy_addr & STAR_PHY_CTRL0_PA_MASK)
+		  << STAR_PHY_CTRL0_PA_OFFSET |
+		  (phy_reg & STAR_PHY_CTRL0_PREG_MASK)
+		  << STAR_PHY_CTRL0_PREG_OFFSET |
+		  STAR_PHY_CTRL0_RDCMD;
+	star_mb();
+	star_set_reg(STAR_PHY_CTRL0(base), phy_ctl);
+	star_mb();
+
+	STAR_POLLING_TIMEOUT(star_is_set_bit(STAR_PHY_CTRL0(base),
+					     STAR_PHY_CTRL0_RWOK));
+	star_mb();
+	data = (u16)star_get_bit_mask(STAR_PHY_CTRL0(base),
+				      STAR_PHY_CTRL0_RWDATA_MASK,
+				      STAR_PHY_CTRL0_RWDATA_OFFSET);
+
+	return data;
+	}
+
+void star_mdc_mdio_write(star_dev *dev, u32 phy_addr, u32 phy_reg, u16 value)
+{
+	u32 phy_ctl;
+	void __iomem *base = dev->base;
+
+	/* Clear previous read/write OK status (write 1 clear) */
+	star_set_reg(STAR_PHY_CTRL0(base), STAR_PHY_CTRL0_RWOK);
+	phy_ctl = ((value & STAR_PHY_CTRL0_RWDATA_MASK)
+		   << STAR_PHY_CTRL0_RWDATA_OFFSET) |
+		   ((phy_addr & STAR_PHY_CTRL0_PA_MASK)
+		   << STAR_PHY_CTRL0_PA_OFFSET) |
+		   ((phy_reg & STAR_PHY_CTRL0_PREG_MASK)
+		   << STAR_PHY_CTRL0_PREG_OFFSET) |
+		   STAR_PHY_CTRL0_WTCMD;
+	star_mb();
+	star_set_reg(STAR_PHY_CTRL0(base), phy_ctl);
+	star_mb();
+	STAR_POLLING_TIMEOUT(star_is_set_bit(STAR_PHY_CTRL0(base),
+					     STAR_PHY_CTRL0_RWOK));
+}
+
+static void desc_tx_init(tx_desc *tx_desc, u32 is_eor)
+{
+	tx_desc->buffer = 0;
+	tx_desc->ctrl_len = TX_COWN | (is_eor ? TX_EOR : 0);
+	tx_desc->vtag = 0;
+	tx_desc->reserve = 0;
+}
+
+static void desc_rx_init(rx_desc *rx_desc, u32 is_eor)
+{
+	rx_desc->buffer = 0;
+	rx_desc->ctrl_len = RX_COWN | (is_eor ? RX_EOR : 0);
+	rx_desc->vtag = 0;
+	rx_desc->reserve = 0;
+}
+
+static void desc_tx_take(tx_desc *tx_desc)
+{
+	if (desc_tx_dma(tx_desc))
+		tx_desc->ctrl_len |= TX_COWN;
+}
+
+static void desc_rx_take(rx_desc *rx_desc)
+{
+	if (desc_rx_dma(rx_desc))
+		rx_desc->ctrl_len |= RX_COWN;
+}
+
+int star_dma_init(star_dev *dev, uintptr_t desc_viraddr,
+		  dma_addr_t desc_dmaaddrdr)
+{
+	int i;
+	void __iomem *base = dev->base;
+
+	STAR_MSG(STAR_VERB, "%s virAddr=0x%lx\n", __func__, desc_viraddr);
+	dev->tx_ring_size = TX_DESC_NUM;
+	dev->rx_ring_size = RX_DESC_NUM;
+
+	dev->tx_desc = (tx_desc *)desc_viraddr;
+	dev->rx_desc = (rx_desc *)dev->tx_desc + dev->tx_ring_size;
+
+	for (i = 0; i < dev->tx_ring_size; i++)
+		desc_tx_init(dev->tx_desc + i, i == dev->tx_ring_size - 1);
+	for (i = 0; i < dev->rx_ring_size; i++)
+		desc_rx_init(dev->rx_desc + i, i == dev->rx_ring_size - 1);
+
+	dev->tx_head = 0;
+	dev->tx_tail = 0;
+	dev->rx_head = 0;
+	dev->rx_tail = 0;
+	dev->tx_num = 0;
+	dev->rx_num = 0;
+
+	/* Set Tx/Rx descriptor address */
+	star_set_reg(STAR_TX_BASE_ADDR(base), (u32)desc_dmaaddrdr);
+	star_set_reg(STAR_TX_DPTR(base), (u32)desc_dmaaddrdr);
+	star_set_reg(STAR_RX_BASE_ADDR(base),
+		     (u32)desc_dmaaddrdr + sizeof(tx_desc) * dev->tx_ring_size);
+	star_set_reg(STAR_RX_DPTR(base),
+		     (u32)desc_dmaaddrdr + sizeof(tx_desc) * dev->tx_ring_size);
+
+	star_intr_disable(dev);
+
+	return 0;
+}
+
+int star_dma_tx_set(star_dev *dev, u32 buffer, u32 length, uintptr_t ext_buf)
+{
+	int is_tx_last;
+	int desc_idx = dev->tx_head;
+	tx_desc *tx_desc = dev->tx_desc + desc_idx;
+	u32 len = (((length < 60) ? 60 : length) & TX_LEN_MASK)
+		  << TX_LEN_OFFSET;
+
+	/* Error checking */
+	if (dev->tx_num == dev->tx_ring_size)
+		goto err;
+	/* descriptor is not empty - cannot set */
+	if (!desc_tx_empty(tx_desc))
+		goto err;
+
+	tx_desc->buffer = buffer;
+	tx_desc->ctrl_len |= len | TX_FS | TX_LS | TX_INT;
+#ifdef CONFIG_ARM64
+	tx_skb_reserve[desc_idx] = ext_buf;
+#else
+	tx_desc->reserve = ext_buf;
+#endif
+	/* star memory barrier */
+	wmb();
+	/* Set HW own */
+	tx_desc->ctrl_len &= ~TX_COWN;
+
+	dev->tx_num++;
+	is_tx_last = desc_tx_last(tx_desc);
+	dev->tx_head = is_tx_last ? 0 : desc_idx + 1;
+
+	return desc_idx;
+err:
+	return -1;
+}
+
+int star_dma_tx_get(star_dev *dev, u32 *buffer,
+		    u32 *ctrl_len, uintptr_t *ext_buf)
+{
+	int is_tx_last;
+	int desc_idx = dev->tx_tail;
+	tx_desc *tx_desc = dev->tx_desc + desc_idx;
+
+	if (dev->tx_num == 0)
+		goto err;
+	if (desc_tx_dma(tx_desc))
+		goto err;
+	if (desc_tx_empty(tx_desc))
+		goto err;
+
+	if (buffer != 0)
+		*buffer = tx_desc->buffer;
+	if (ctrl_len != 0)
+		*ctrl_len = tx_desc->ctrl_len;
+
+#ifdef CONFIG_ARM64
+	if (ext_buf != 0)
+		*ext_buf = tx_skb_reserve[desc_idx];
+#else
+	if (ext_buf != 0)
+		*ext_buf = tx_desc->reserve;
+#endif
+	/* add star memory barrier */
+	rmb();
+
+	desc_tx_init(tx_desc, desc_tx_last(tx_desc));
+	dev->tx_num--;
+	is_tx_last = desc_tx_last(tx_desc);
+	dev->tx_tail = is_tx_last ? 0 : desc_idx + 1;
+
+	return desc_idx;
+err:
+	return -1;
+}
+
+int star_dma_rx_set(star_dev *dev, u32 buffer, u32 length, uintptr_t ext_buf)
+{
+	int desc_idx = dev->rx_head;
+	rx_desc *rx_desc = dev->rx_desc + desc_idx;
+	int is_rx_last;
+
+	/* Error checking */
+	if (dev->rx_num == dev->rx_ring_size)
+		goto err;
+	/* descriptor is not empty - cannot set */
+	if (!desc_rx_empty(rx_desc))
+		goto err;
+
+	rx_desc->buffer = buffer;
+	rx_desc->ctrl_len |= ((length & RX_LEN_MASK) << RX_LEN_OFFSET);
+#ifdef CONFIG_ARM64
+	rx_skb_reserve[desc_idx] = ext_buf;
+#else
+	rx_desc->reserve = ext_buf;
+#endif
+	/* star memory barrier */
+	wmb();
+	/* Set HW own */
+	rx_desc->ctrl_len &= ~RX_COWN;
+
+	dev->rx_num++;
+	is_rx_last = desc_rx_last(rx_desc);
+	dev->rx_head = is_rx_last ? 0 : desc_idx + 1;
+
+	return desc_idx;
+err:
+	return -1;
+}
+
+int star_dma_rx_get(star_dev *dev, u32 *buffer,
+		    u32 *ctrl_len, uintptr_t *ext_buf)
+{
+	int is_rx_last;
+	int desc_idx = dev->rx_tail;
+	rx_desc *rx_desc = dev->rx_desc + desc_idx;
+
+	/* Error checking */
+	/* No buffer can be got */
+	if (dev->rx_num == 0)
+		goto err;
+	/* descriptor is owned by DMA - cannot get */
+	if (desc_rx_dma(rx_desc))
+		goto err;
+	/* descriptor is empty - cannot get */
+	if (desc_rx_empty(rx_desc))
+		goto err;
+
+	if (buffer != 0)
+		*buffer = rx_desc->buffer;
+	if (ctrl_len != 0)
+		*ctrl_len = rx_desc->ctrl_len;
+#ifdef CONFIG_ARM64
+	if (ext_buf != 0)
+		*ext_buf = rx_skb_reserve[desc_idx];
+#else
+	if (ext_buf != 0)
+		*ext_buf = rx_desc->reserve;
+#endif
+	/* star memory barrier */
+	rmb();
+
+	desc_rx_init(rx_desc, desc_rx_last(rx_desc));
+	dev->rx_num--;
+	is_rx_last = desc_rx_last(rx_desc);
+	dev->rx_tail = is_rx_last ? 0 : desc_idx + 1;
+
+	return desc_idx;
+err:
+	return -1;
+}
+
+void star_dma_tx_stop(star_dev *dev)
+{
+	int i;
+
+	star_dma_tx_disable(dev);
+	for (i = 0; i < dev->tx_ring_size; i++)
+		desc_tx_take(dev->tx_desc + i);
+}
+
+void star_dma_rx_stop(star_dev *dev)
+{
+	int i;
+
+	star_dma_rx_disable(dev);
+	for (i = 0; i < dev->rx_ring_size; i++)
+		desc_rx_take(dev->rx_desc + i);
+}
+
+int star_mac_init(star_dev *dev, u8 mac_addr[6])
+{
+	void __iomem *base = dev->base;
+
+	STAR_MSG(STAR_VERB, "MAC Initialization\n");
+
+	/* Set Mac Address */
+	star_set_reg(star_my_mac_h(base),
+		     mac_addr[0] << 8 | mac_addr[1] << 0);
+	star_set_reg(star_my_mac_l(base),
+		     mac_addr[2] << 24 | mac_addr[3] << 16 |
+				mac_addr[4] << 8 | mac_addr[5] << 0);
+
+	/* Set Mac Configuration */
+	star_set_reg(STAR_MAC_CFG(base),
+		     STAR_MAC_CFG_CRCSTRIP |
+		     STAR_MAC_CFG_MAXLEN_1522 |
+		     /* 12 byte IPG */
+		     (0x1f & STAR_MAC_CFG_IPG_MASK) << STAR_MAC_CFG_IPG_OFFSET);
+
+	/* Init Flow Control register */
+	star_set_reg(STAR_FC_CFG(base),
+		     STAR_FC_CFG_SEND_PAUSE_TH_DEF |
+		     STAR_FC_CFG_UCPAUSEDIS |
+		     STAR_FC_CFG_BPEN);
+
+	/* Init SEND_PAUSE_RLS */
+	star_set_reg(star_extend_cfg(base), STAR_EXTEND_CFG_SEND_PAUSE_RLS_DEF);
+
+	/* Init MIB counter (reset to 0) */
+	star_mib_init(dev);
+
+	/* Enable Hash Table BIST */
+	star_set_bit(star_hash_ctrl(base), STAR_HASH_CTRL_HASHEN);
+
+	/* Reset Hash Table (All reset to 0) */
+	star_reset_hash_table(dev);
+	star_clear_bit(STAR_ARL_CFG(base), STAR_ARL_CFG_MISCMODE);
+	star_clear_bit(STAR_ARL_CFG(base), STAR_ARL_CFG_HASHALG_CRCDA);
+
+	/*Recv VLAN tag in RX packet */
+	star_clear_bit(STAR_MAC_CFG(base), STAR_MAC_CFG_VLANSTRIP);
+
+	return 0;
+}
+
+static void star_mib_reset(star_dev *dev)
+{
+	void __iomem *base = dev->base;
+
+	star_get_reg(STAR_MIB_RXOKPKT(base));
+	star_get_reg(STAR_MIB_RXOKBYTE(base));
+	star_get_reg(STAR_MIB_RXRUNT(base));
+	star_get_reg(STAR_MIB_RXOVERSIZE(base));
+	star_get_reg(STAR_MIB_RXNOBUFDROP(base));
+	star_get_reg(STAR_MIB_RXCRCERR(base));
+	star_get_reg(STAR_MIB_RXARLDROP(base));
+	star_get_reg(STAR_MIB_RXVLANDROP(base));
+	star_get_reg(STAR_MIB_RXCKSERR(base));
+	star_get_reg(STAR_MIB_RXPAUSE(base));
+	star_get_reg(STAR_MIB_TXOKPKT(base));
+	star_get_reg(STAR_MIB_TXOKBYTE(base));
+	star_get_reg(STAR_MIB_TXPAUSECOL(base));
+}
+
+int star_mib_init(star_dev *dev)
+{
+	star_mib_reset(dev);
+
+	return 0;
+}
+
+int star_phyctrl_init(star_dev *dev, u32 enable, u32 phy_addr)
+{
+	u32 data;
+	void __iomem *base = dev->base;
+
+	data = STAR_PHY_CTRL1_FORCETXFC |
+	STAR_PHY_CTRL1_FORCERXFC |
+	STAR_PHY_CTRL1_FORCEFULL |
+	STAR_PHY_CTRL1_FORCESPD_100M |
+	STAR_PHY_CTRL1_ANEN;
+
+	STAR_MSG(STAR_VERB, "PHY Control Initialization\n");
+	/* Enable/Disable PHY auto-polling */
+	if (enable)
+		star_set_reg(STAR_PHY_CTRL1(base),
+			     data | STAR_PHY_CTRL1_APEN |
+			     (phy_addr &
+			     STAR_PHY_CTRL1_phy_addr_MASK)
+			     << STAR_PHY_CTRL1_phy_addr_OFFSET);
+	else
+		star_set_reg(STAR_PHY_CTRL1(base), data | STAR_PHY_CTRL1_APDIS);
+
+	return 0;
+}
+
+void star_set_hashbit(star_dev *dev, u32 addr, u32 value)
+{
+	u32 data;
+	void __iomem *base = dev->base;
+
+	STAR_POLLING_TIMEOUT(star_is_set_bit(star_hash_ctrl(base),
+					     STAR_HASH_CTRL_HTBISTDONE));
+	STAR_POLLING_TIMEOUT(star_is_set_bit(star_hash_ctrl(base),
+					     STAR_HASH_CTRL_HTBISTOK));
+	STAR_POLLING_TIMEOUT(!star_is_set_bit(star_hash_ctrl(base),
+					      STAR_HASH_CTRL_START));
+
+	data = (STAR_HASH_CTRL_HASHEN |
+		STAR_HASH_CTRL_ACCESSWT | STAR_HASH_CTRL_START |
+		(value ? STAR_HASH_CTRL_HBITDATA : 0) |
+		(addr &	STAR_HASH_CTRL_HBITADDR_MASK)
+		<< STAR_HASH_CTRL_HBITADDR_OFFSET);
+	star_set_reg(star_hash_ctrl(base), data);
+	STAR_POLLING_TIMEOUT(!star_is_set_bit(star_hash_ctrl(base),
+					      STAR_HASH_CTRL_START));
+}
+
+int star_hw_init(star_dev *dev)
+{
+	star_set_reg(ETHSYS_CONFIG(dev->base),
+		     SWC_MII_MODE | EXT_MDC_MODE | MII_PAD_OE);
+	star_set_reg(MAC_CLOCK_CONFIG(dev->base),
+		     (star_get_reg(MAC_CLOCK_CONFIG(dev->base)) &
+		     (~(0xff << 0))) | MDC_CLK_DIV_10);
+
+	return 0;
+}
+
+void star_link_status_change(star_dev *dev)
+{
+	u32 val, speed;
+
+	val = star_get_reg(STAR_PHY_CTRL1(dev->base));
+	if (dev->link_up != ((val & STAR_PHY_CTRL1_STA_LINK) ? 1UL : 0UL)) {
+		dev->link_up = (val & STAR_PHY_CTRL1_STA_LINK) ? 1UL : 0UL;
+		STAR_MSG(STAR_WARN, "Link status: %s\n",
+			 dev->link_up ? "Up" : "Down");
+		if (dev->link_up) {
+			speed = ((val >> STAR_PHY_CTRL1_STA_SPD_OFFSET) &
+				STAR_PHY_CTRL1_STA_SPD_MASK);
+			STAR_MSG(STAR_WARN, "%s Duplex - %s Mbps mode\n",
+				 (val & STAR_PHY_CTRL1_STA_FULL) ?
+				"Full" : "Half",
+				!speed ? "10" : (speed == 1 ? "100" :
+				(speed == 2 ? "1000" : "unknown")));
+			STAR_MSG(STAR_WARN,
+				 "TX flow control:%s, RX flow control:%s\n",
+				(val & STAR_PHY_CTRL1_STA_TXFC) ? "On" : "Off",
+				(val & STAR_PHY_CTRL1_STA_RXFC) ? "On" : "Off");
+	} else {
+			netif_carrier_off(((star_private *)dev->star_prv)->dev);
+		}
+	}
+
+	if (dev->link_up)
+		netif_carrier_on(((star_private *)dev->star_prv)->dev);
+}
+
+void star_nic_pdset(star_dev *dev, bool flag)
+{
+#define MAX_NICPDRDY_RETRY  10000
+	u32 data, retry = 0;
+
+	data = star_get_reg(STAR_MAC_CFG(dev->base));
+	if (flag) {
+		data |= STAR_MAC_CFG_NICPD;
+		star_set_reg(STAR_MAC_CFG(dev->base), data);
+		/* wait until NIC_PD_READY and clear it */
+		do {
+			data = star_get_reg(STAR_MAC_CFG(dev->base));
+			if (data & STAR_MAC_CFG_NICPDRDY) {
+				/* clear NIC_PD_READY */
+				data |= STAR_MAC_CFG_NICPDRDY;
+				star_set_reg(STAR_MAC_CFG(dev->base), data);
+				break;
+			}
+		} while (retry++ < MAX_NICPDRDY_RETRY);
+		if (retry >= MAX_NICPDRDY_RETRY)
+			STAR_MSG(STAR_ERR, "timeout MAX_NICPDRDY_RETRY(%d)\n",
+				 MAX_NICPDRDY_RETRY);
+	} else {
+		data &= ~STAR_MAC_CFG_NICPD;
+		star_set_reg(STAR_MAC_CFG(dev->base), data);
+	}
+}
+
+void star_config_wol(star_dev *star_dev, bool enable)
+{
+	STAR_MSG(STAR_DBG, "[%s]%s wol\n", __func__,
+		 enable ? "enable" : "disable");
+	if (enable) {
+		star_set_reg(star_int_sta(star_dev->base),
+			     star_get_reg(star_int_sta(star_dev->base)));
+		star_set_bit(STAR_MAC_CFG(star_dev->base), STAR_MAC_CFG_WOLEN);
+		star_mb();
+		star_clear_bit(star_int_mask(star_dev->base),
+			       STAR_INT_STA_MAGICPKT);
+	} else {
+		star_clear_bit(STAR_MAC_CFG(star_dev->base),
+			       STAR_MAC_CFG_WOLEN);
+		star_mb();
+		star_set_bit(star_int_mask(star_dev->base),
+			     STAR_INT_STA_MAGICPKT);
+	}
+}
+
+void star_switch_to_rmii_mode(star_dev *star_dev)
+{
+	u32 reg_val;
+
+	reg_val = star_get_reg(star_dev->pericfg_base + 0x14);
+	reg_val &= ~(0xf << 0);
+	/* select RMII mode */
+	reg_val |= (0x1 << 0);
+	star_set_reg(star_dev->pericfg_base + 0x14, reg_val);
+
+#ifdef STAR_USE_TX_CLOCK
+	reg_val = star_get_reg(star_dev->pericfg_base + 0x18);
+	reg_val &= ~(0x1 << 0);
+	/* select tx clock */
+	reg_val |= (0x1 << 0);
+	star_set_reg(star_dev->pericfg_base + 0x18, reg_val);
+#endif
+}
+
diff --git a/drivers/net/ethernet/mediatek/star/star_mac.h b/drivers/net/ethernet/mediatek/star/star_mac.h
new file mode 100644
index 0000000..4e595bb
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/star/star_mac.h
@@ -0,0 +1,413 @@
+/* Mediatek STAR MAC network driver.
+ *
+ * Copyright (c) 2016-2017 Mediatek Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _STAR_MAC_H_
+#define _STAR_MAC_H_
+
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+
+#define desc_tx_dma(desc) ((((desc)->ctrl_len) & TX_COWN) ? 0 : 1)
+#define desc_rx_dma(desc) ((((desc)->ctrl_len) & RX_COWN) ? 0 : 1)
+#define desc_tx_last(desc) ((((desc)->ctrl_len) & TX_EOR) ? 1 : 0)
+#define desc_rx_last(desc) ((((desc)->ctrl_len) & RX_EOR) ? 1 : 0)
+#define desc_tx_empty(desc) (((desc)->buffer == 0) && \
+		(((desc)->ctrl_len & ~TX_EOR) == TX_COWN) && \
+		((desc)->vtag == 0) && ((desc)->reserve == 0))
+
+#define desc_rx_empty(desc) (((desc)->buffer == 0) && \
+		(((desc)->ctrl_len & ~RX_EOR) == RX_COWN) && \
+		((desc)->vtag == 0) && ((desc)->reserve == 0))
+
+#ifndef STAR_POLLING_TIMEOUT
+#define STAR_TIMEOUT_COUNT 3000
+#define STAR_POLLING_TIMEOUT(cond) \
+do {\
+	u32 timeout = STAR_TIMEOUT_COUNT; \
+	while (!cond) { \
+		if (--timeout == 0) \
+			break; \
+	}   \
+	if (timeout == 0) { \
+		STAR_MSG(STAR_ERR, "polling timeout in %s\n",  __func__); \
+	} \
+} while (0)
+#endif
+
+/* Star Ethernet Controller registers */
+/* =============================== */
+#define STAR_PHY_CTRL0(base) (base + 0x0000)
+#define STAR_PHY_CTRL0_RWDATA_MASK (0xffff)
+#define STAR_PHY_CTRL0_RWDATA_OFFSET (16)
+#define STAR_PHY_CTRL0_RWOK BIT(15)
+#define STAR_PHY_CTRL0_RDCMD BIT(14)
+#define STAR_PHY_CTRL0_WTCMD  BIT(13)
+#define STAR_PHY_CTRL0_PREG_MASK (0x1f)
+#define STAR_PHY_CTRL0_PREG_OFFSET (8)
+#define STAR_PHY_CTRL0_PA_MASK (0x1f)
+#define STAR_PHY_CTRL0_PA_OFFSET (0)
+
+#define STAR_PHY_CTRL1(base) (base + 0x0004)
+#define STAR_PHY_CTRL1_APDIS BIT(31)
+#define STAR_PHY_CTRL1_APEN (0 << 31)
+#define STAR_PHY_CTRL1_phy_addr_MASK (0x1f)
+#define STAR_PHY_CTRL1_phy_addr_OFFSET (24)
+#define STAR_PHY_CTRL1_RGMII BIT(17)
+#define STAR_PHY_CTRL1_REVMII BIT(16)
+#define STAR_PHY_CTRL1_TXCLK_CKEN BIT(14)
+#define STAR_PHY_CTRL1_FORCETXFC BIT(13)
+#define STAR_PHY_CTRL1_FORCERXFC BIT(12)
+#define STAR_PHY_CTRL1_FORCEFULL BIT(11)
+#define STAR_PHY_CTRL1_FORCESPD_MASK (0x3)
+#define STAR_PHY_CTRL1_FORCESPD_OFFSET (9)
+#define STAR_PHY_CTRL1_FORCESPD_10M (0 << STAR_PHY_CTRL1_FORCESPD_OFFSET)
+#define STAR_PHY_CTRL1_FORCESPD_100M BIT(9)
+#define STAR_PHY_CTRL1_FORCESPD_1G (2 << STAR_PHY_CTRL1_FORCESPD_OFFSET)
+#define STAR_PHY_CTRL1_FORCESPD_RESV (3 << STAR_PHY_CTRL1_FORCESPD_OFFSET)
+#define STAR_PHY_CTRL1_ANEN BIT(8)
+#define STAR_PHY_CTRL1_MIDIS BIT(7)
+#define STAR_PHY_CTRL1_STA_TXFC BIT(6)
+#define STAR_PHY_CTRL1_STA_RXFC BIT(5)
+#define STAR_PHY_CTRL1_STA_FULL BIT(4)
+#define STAR_PHY_CTRL1_STA_SPD_MASK (0x3)
+#define STAR_PHY_CTRL1_STA_DPX_MASK (0x1)
+#define STAR_PHY_CTRL1_STA_SPD_DPX_MASK (0x7)
+#define STAR_PHY_CTRL1_STA_SPD_OFFSET (2)
+#define STAR_PHY_CTRL1_STA_SPD_10M (0 << STAR_PHY_CTRL1_STA_SPD_OFFSET)
+#define STAR_PHY_CTRL1_STA_SPD_100M  BIT(2)
+#define STAR_PHY_CTRL1_STA_SPD_1G (2 << STAR_PHY_CTRL1_STA_SPD_OFFSET)
+#define STAR_PHY_CTRL1_STA_SPD_RESV (3 << STAR_PHY_CTRL1_STA_SPD_OFFSET)
+#define STAR_PHY_CTRL1_STA_TXCLK BIT(1)
+#define STAR_PHY_CTRL1_STA_LINK BIT(0)
+#define STAR_PHY_CTRL1_STA_10M_HALF (0x0)
+#define STAR_PHY_CTRL1_STA_100M_HALF (0x1)
+#define STAR_PHY_CTRL1_STA_10M_FULL (0x4)
+#define STAR_PHY_CTRL1_STA_100M_FULL (0x5)
+
+#define STAR_MAC_CFG(base) (base + 0x0008)
+#define STAR_MAC_CFG_NICPD BIT(31)
+#define STAR_MAC_CFG_WOLEN BIT(30)
+#define STAR_MAC_CFG_NICPDRDY BIT(29)
+#define STAR_MAC_CFG_TXCKSEN BIT(26)
+#define STAR_MAC_CFG_RXCKSEN BIT(25)
+#define STAR_MAC_CFG_ACPTCKSERR BIT(24)
+#define STAR_MAC_CFG_ISTEN BIT(23)
+#define STAR_MAC_CFG_VLANSTRIP BIT(22)
+#define STAR_MAC_CFG_ACPTCRCERR BIT(21)
+#define STAR_MAC_CFG_CRCSTRIP BIT(20)
+#define STAR_MAC_CFG_TXAUTOPAD BIT(19)
+#define STAR_MAC_CFG_ACPTLONGPKT BIT(18)
+#define STAR_MAC_CFG_MAXLEN_MASK (0x3)
+#define STAR_MAC_CFG_MAXLEN_OFFSET (16)
+#define STAR_MAC_CFG_MAXLEN_1518 (0 << STAR_MAC_CFG_MAXLEN_OFFSET)
+#define STAR_MAC_CFG_MAXLEN_1522  BIT(16)
+#define STAR_MAC_CFG_MAXLEN_1536 (2 << STAR_MAC_CFG_MAXLEN_OFFSET)
+#define STAR_MAC_CFG_MAXLEN_RESV (3 << STAR_MAC_CFG_MAXLEN_OFFSET)
+#define STAR_MAC_CFG_IPG_MASK (0x1f)
+#define STAR_MAC_CFG_IPG_OFFSET (10)
+#define STAR_MAC_CFG_NSKP16COL BIT(9)
+#define STAR_MAC_CFG_FASTBACKOFF BIT(8)
+#define STAR_MAC_CFG_TXVLAN_ATPARSE BIT(0)
+
+#define STAR_FC_CFG(base) (base + 0x000c)
+#define STAR_FC_CFG_SENDPAUSETH_MASK (0xfff)
+#define STAR_FC_CFG_SENDPAUSETH_OFFSET (16)
+#define STAR_FC_CFG_COLCNT_CLR_MODE BIT(9)
+#define STAR_FC_CFG_UCPAUSEDIS BIT(8)
+#define STAR_FC_CFG_BPEN BIT(7)
+#define STAR_FC_CFG_CRS_BP_MODE BIT(6)
+#define STAR_FC_CFG_MAXBPCOLEN BIT(5)
+#define STAR_FC_CFG_MAXBPCOLCNT_MASK (0x1f)
+#define STAR_FC_CFG_MAXBPCOLCNT_OFFSET (0)
+/* default value for SEND_PAUSE_TH */
+#define STAR_FC_CFG_SEND_PAUSE_TH_DEF ((STAR_FC_CFG_SEND_PAUSE_TH_2K & \
+				       STAR_FC_CFG_SENDPAUSETH_MASK) \
+				       << STAR_FC_CFG_SENDPAUSETH_OFFSET)
+#define STAR_FC_CFG_SEND_PAUSE_TH_2K (0x800)
+
+#define STAR_ARL_CFG(base) (base + 0x0010)
+#define STAR_ARL_CFG_FILTER_PRI_TAG BIT(6)
+#define STAR_ARL_CFG_FILTER_VLAN_UNTAG BIT(5)
+#define STAR_ARL_CFG_MISCMODE BIT(4)
+#define STAR_ARL_CFG_MYMACONLY BIT(3)
+#define STAR_ARL_CFG_CPULEARNDIS BIT(2)
+#define STAR_ARL_CFG_RESVMCFILTER BIT(1)
+#define STAR_ARL_CFG_HASHALG_CRCDA BIT(0)
+
+#define star_my_mac_h(base) (base + 0x0014)
+#define star_my_mac_l(base) (base + 0x0018)
+
+#define star_hash_ctrl(base) (base + 0x001c)
+#define STAR_HASH_CTRL_HASHEN BIT(31)
+#define STAR_HASH_CTRL_HTBISTDONE BIT(17)
+#define STAR_HASH_CTRL_HTBISTOK BIT(16)
+#define STAR_HASH_CTRL_START BIT(14)
+#define STAR_HASH_CTRL_ACCESSWT BIT(13)
+#define STAR_HASH_CTRL_ACCESSRD (0 << 13)
+#define STAR_HASH_CTRL_HBITDATA BIT(12)
+#define STAR_HASH_CTRL_HBITADDR_MASK (0x1ff)
+#define STAR_HASH_CTRL_HBITADDR_OFFSET (0)
+
+#define star_vlan_ctrl(base) (base + 0x0020)
+#define STAR_VLAN_ID_0_1(base) (base + 0x0024)
+#define STAR_VLAN_ID_2_3(base) (base + 0x0028)
+
+#define star_dummy(base) (base + 0x002C)
+#define STAR_DUMMY_FPGA_MODE BIT(31)
+#define STAR_DUMMY_E2_ECO BIT(7)
+#define STAR_DUMMY_TXRXRDY BIT(1)
+#define STAR_DUMMY_MDCMDIODONE BIT(0)
+
+#define star_dma_cfg(base) (base + 0x0030)
+#define STAR_DMA_CFG_RX2BOFSTDIS BIT(16)
+#define STAR_DMA_CFG_TXPOLLPERIOD_MASK (0x3)
+#define STAR_DMA_CFG_TXPOLLPERIOD_OFFSET (6)
+#define STAR_DMA_CFG_TXPOLLPERIOD_1US (0 << STAR_DMA_CFG_TXPOLLPERIOD_OFFSET)
+#define STAR_DMA_CFG_TXPOLLPERIOD_10US BIT(6)
+#define STAR_DMA_CFG_TXPOLLPERIOD_100US (2 << STAR_DMA_CFG_TXPOLLPERIOD_OFFSET)
+#define STAR_DMA_CFG_TXPOLLPERIOD_1000US (3 << STAR_DMA_CFG_TXPOLLPERIOD_OFFSET)
+#define STAR_DMA_CFG_TXPOLLEN BIT(5)
+#define STAR_DMA_CFG_TXSUSPEND BIT(4)
+#define STAR_DMA_CFG_RXPOLLPERIOD_MASK (0x3)
+#define STAR_DMA_CFG_RXPOLLPERIOD_OFFSET (2)
+#define STAR_DMA_CFG_RXPOLLPERIOD_1US (0 << STAR_DMA_CFG_RXPOLLPERIOD_OFFSET)
+#define STAR_DMA_CFG_RXPOLLPERIOD_10US BIT(2)
+#define STAR_DMA_CFG_RXPOLLPERIOD_100US (2 << STAR_DMA_CFG_RXPOLLPERIOD_OFFSET)
+#define STAR_DMA_CFG_RXPOLLPERIOD_1000US (3 << STAR_DMA_CFG_RXPOLLPERIOD_OFFSET)
+#define STAR_DMA_CFG_RXPOLLEN BIT(1)
+#define STAR_DMA_CFG_RXSUSPEND BIT(0)
+
+#define star_tx_dma_ctrl(base) (base + 0x0034)
+#define TX_RESUME ((u32)0x01 << 2)
+#define TX_STOP ((u32)0x01 << 1)
+#define TX_START ((u32)0x01 << 0)
+
+#define star_rx_dma_ctrl(base) (base + 0x0038)
+#define STAR_TX_DPTR(base) (base + 0x003c)
+#define STAR_RX_DPTR(base) (base + 0x0040)
+#define STAR_TX_BASE_ADDR(base) (base + 0x0044)
+#define STAR_RX_BASE_ADDR(base) (base + 0x0048)
+
+#define star_int_sta(base) (base + 0x0050)
+#define STAR_INT_STA_RX_PCODE BIT(10)
+#define STAR_INT_STA_TX_SKIP BIT(9)
+#define STAR_INT_STA_TXC BIT(8)
+#define STAR_INT_STA_TXQE BIT(7)
+#define STAR_INT_STA_RXC BIT(6)
+#define STAR_INT_STA_RXQF BIT(5)
+#define STAR_INT_STA_MAGICPKT BIT(4)
+#define STAR_INT_STA_MIBCNTHALF BIT(3)
+#define STAR_INT_STA_PORTCHANGE BIT(2)
+#define STAR_INT_STA_RXFIFOFULL BIT(1)
+
+#define star_int_mask(base) (base + 0x0054)
+#define star_test0(base) (base + 0x0058)
+
+#define star_test1(base) (base + 0x005c)
+#define STAR_TEST1_RST_HASH_BIST BIT(31)
+#define STAR_TEST1_EXTEND_RETRY BIT(20)
+
+#define star_extend_cfg(base) (base + 0x0060)
+#define STAR_EXTEND_CFG_SDPAUSEOFFTH_MASK (0xfff)
+#define STAR_EXTEND_CFG_SDPAUSEOFFTH_OFFSET (16)
+/* default value for SEND_PAUSE_RLS */
+#define STAR_EXTEND_CFG_SEND_PAUSE_RLS_DEF \
+		((STAR_EXTEND_CFG_SEND_PAUSE_RLS_1K & \
+		STAR_EXTEND_CFG_SDPAUSEOFFTH_MASK) \
+		<< STAR_EXTEND_CFG_SDPAUSEOFFTH_OFFSET)
+#define STAR_EXTEND_CFG_SEND_PAUSE_RLS_1K (0x400)
+
+#define ETHSYS_CONFIG(base)	(base + 0x94)
+#define INT_PHY_SEL BIT(3)
+#define SWC_MII_MODE BIT(2)
+#define EXT_MDC_MODE BIT(1)
+#define MII_PAD_OE BIT(0)
+
+#define MAC_MODE_CONFIG(base) (base + 0x98)
+#define BIG_ENDIAN BIT(0)
+
+#define MAC_CLOCK_CONFIG(base)  (base + 0xac)
+#define TXCLK_OUT_INV BIT(19)
+#define RXCLK_OUT_INV BIT(18)
+#define TXCLK_IN_INV BIT(17)
+#define RXCLK_IN_INV BIT(16)
+#define MDC_INV BIT(12)
+#define MDC_NEG_LAT BIT(8)
+#define MDC_DIV ((u32)0xFF << 0)
+#define MDC_CLK_DIV_10 ((u32)0x0A << 0)
+
+/* MIB Counter register */
+#define STAR_MIB_RXOKPKT(base) (base + 0x0100)
+#define STAR_MIB_RXOKBYTE(base) (base + 0x0104)
+#define STAR_MIB_RXRUNT(base) (base + 0x0108)
+#define STAR_MIB_RXOVERSIZE(base) (base + 0x010c)
+#define STAR_MIB_RXNOBUFDROP(base) (base + 0x0110)
+#define STAR_MIB_RXCRCERR(base) (base + 0x0114)
+#define STAR_MIB_RXARLDROP(base) (base + 0x0118)
+#define STAR_MIB_RXVLANDROP(base) (base + 0x011c)
+#define STAR_MIB_RXCKSERR(base) (base + 0x0120)
+#define STAR_MIB_RXPAUSE(base) (base + 0x0124)
+#define STAR_MIB_TXOKPKT(base) (base + 0x0128)
+#define STAR_MIB_TXOKBYTE(base) (base + 0x012c)
+#define STAR_MIB_TXPAUSECOL(base) (base + 0x0130)
+
+/**
+ * @brief structure for Tx descriptor Ring
+ */
+typedef struct tx_desc_s {
+	/* Tx control and length */
+	u32 ctrl_len;
+/* Tx descriptor Own bit; 1: CPU own */
+#define TX_COWN  BIT(31)
+/* End of Tx descriptor ring */
+#define TX_EOR BIT(30)
+/* First Segment descriptor */
+#define TX_FS BIT(29)
+/* Last Segment descriptor */
+#define TX_LS BIT(28)
+/* Tx complete interrupt enable (when set, DMA generate
+ * interrupt after tx sending out pkt)
+ */
+#define TX_INT BIT(27)
+/* Insert VLAN Tag in the following word (in tdes2) */
+#define TX_INSV BIT(26)
+/* Enable IP checksum generation offload */
+#define TX_ICO BIT(25)
+/* Enable UDP checksum generation offload */
+#define TX_UCO BIT(24)
+/* Enable TCP checksum generation offload */
+#define TX_TCO BIT(23)
+/* Tx Segment Data length */
+#define TX_LEN_MASK (0xffff)
+#define TX_LEN_OFFSET (0)
+	/* Tx segment data pointer */
+	u32 buffer;
+	u32 vtag;
+/* VLAN Tag EPID */
+#define TX_EPID_MASK (0xffff)
+#define TX_EPID_OFFSET (16)
+/* VLNA Tag Priority */
+#define TX_PRI_MASK (0x7)
+#define TX_PRI_OFFSET (13)
+/* VLAN Tag CFI (Canonical Format Indicator) */
+#define TX_CFI BIT(12)
+/* VLAN Tag VID */
+#define TX_VID_MASK (0xfff)
+#define TX_VID_OFFSET (0)
+	/* Tx pointer for external management usage */
+	u32 reserve;
+} tx_desc;
+
+/* Rx Ring */
+typedef struct rx_desc_s {
+	/* Rx control and length */
+	u32 ctrl_len;
+/* RX descriptor Own bit; 1: CPU own */
+#define RX_COWN BIT(31)
+/* End of Rx descriptor ring */
+#define RX_EOR BIT(30)
+/* First Segment descriptor */
+#define RX_FS BIT(29)
+/* Last Segment descriptor */
+#define RX_LS BIT(28)
+/* Rx packet is oversize */
+#define RX_OSIZE BIT(25)
+/* Rx packet is CRC Error */
+#define RX_CRCERR BIT(24)
+/* Rx packet DMAC is Reserved Multicast Address */
+#define RX_RMC BIT(23)
+/* Rx packet DMAC is hit in hash table */
+#define RX_HHIT BIT(22)
+/* Rx packet DMAC is My_MAC */
+#define RX_MYMAC BIT(21)
+/* VLAN Tagged int the following word */
+#define RX_VTAG BIT(20)
+#define RX_PROT_MASK (0x3)
+#define RX_PROT_OFFSET (18)
+/* Protocol: IPV4 */
+#define RX_PROT_IP (0x0)
+/* Protocol: UDP */
+#define RX_PROT_UDP (0x1)
+/* Protocol: TCP */
+#define RX_PROT_TCP (0x2)
+/* Protocol: TCP */
+#define RX_PROT_OTHERS (0x3)
+/* IP checksum fail (meaningful when PROT is IPV4) */
+#define RX_IPF BIT(17)
+/* Layer-4 checksum fail (meaningful when PROT is UDP or TCP) */
+#define RX_L4F BIT(16)
+/* Segment Data length(FS=0) / Whole Packet Length(FS=1) */
+#define RX_LEN_MASK (0xffff)
+#define RX_LEN_OFFSET			(0)
+	/* RX segment data pointer */
+	u32 buffer;
+
+	u32 vtag;
+#define RX_EPID_MASK			(0xffff)	/* VLAN Tag EPID */
+#define RX_EPID_OFFSET			(16)
+#define RX_PRI_MASK (0x7)		/* VLAN Tag Priority */
+#define RX_PRI_OFFSET			(13)
+#define RX_CFI BIT(12)
+#define RX_VID_MASK (0xfff)		/* VLAN Tag VID */
+#define RX_VID_OFFSET			(0)
+	u32 reserve;	/* Rx pointer for external management usage */
+} rx_desc;
+
+typedef struct star_dev_s {
+	void __iomem *base;               /* Base register of Star Ethernet */
+	void __iomem *pericfg_base;            /* Base register of PERICFG */
+	tx_desc *tx_desc;         /* Base Address of Tx descriptor Ring */
+	rx_desc *rx_desc;         /* Base Address of Rx descriptor Ring */
+	u32 tx_ring_size;
+	u32 rx_ring_size;
+	u32 tx_head;             /* Head of Tx descriptor (least sent) */
+	u32 tx_tail;             /* Tail of Tx descriptor (least be free) */
+	u32 rx_head;             /* Head of Rx descriptor (least sent) */
+	u32 rx_tail;             /* Tail of Rx descriptor (least be free) */
+	u32 tx_num;
+	u32 rx_num;
+	u32 link_up;             /*link status */
+	void *star_prv;
+	struct net_device_stats stats;
+	struct eth_phy_ops *phy_ops;
+	struct device *dev;
+} star_dev;
+
+int star_hw_init(star_dev *dev);
+
+u16 star_mdc_mdio_read(star_dev *dev, u32 phy_addr, u32 phy_reg);
+void star_mdc_mdio_write(star_dev *dev, u32 phy_addr, u32 phy_reg, u16 value);
+
+int star_dma_init(star_dev *dev, uintptr_t desc_viraddr,
+		  dma_addr_t desc_dmaaddr);
+int star_dma_tx_set(star_dev *dev, u32 buffer,
+		    u32 length, uintptr_t extBuf);
+int star_dma_tx_get(star_dev *dev, u32 *buffer,
+		    u32 *ctrl_len, uintptr_t *extBuf);
+int star_dma_rx_set(star_dev *dev, u32 buffer,
+		    u32 length, uintptr_t extBuf);
+int star_dma_rx_get(star_dev *dev, u32 *buffer,
+		    u32 *ctrl_len, uintptr_t *extBuf);
+void star_dma_tx_stop(star_dev *dev);
+void star_dma_rx_stop(star_dev *dev);
+
+int star_mac_init(star_dev *dev, u8 mac_addr[6]);
+
+int star_mib_init(star_dev *dev);
+int star_phyctrl_init(star_dev *dev, u32 enable, u32 phy_addr);
+void star_set_hashbit(star_dev *dev, u32 addr, u32 value);
+
+void star_link_status_change(star_dev *dev);
+void star_nic_pdset(star_dev *dev, bool flag);
+
+void star_config_wol(star_dev *star_dev, bool enable);
+void enable_eth_wol(star_dev *star_dev);
+void disable_eth_wol(star_dev *star_dev);
+void star_switch_to_rmii_mode(star_dev *star_dev);
+#endif
diff --git a/drivers/net/ethernet/mediatek/star/star_phy.c b/drivers/net/ethernet/mediatek/star/star_phy.c
new file mode 100644
index 0000000..21489248
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/star/star_phy.c
@@ -0,0 +1,348 @@
+/* Mediatek STAR MAC network driver.
+ *
+ * Copyright (c) 2016-2017 Mediatek Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include "star.h"
+
+static void ex_phy_reset(star_dev *sdev, u32 id)
+{
+	u16 val = 0;
+
+	val = star_mdc_mdio_read(sdev, id, 0);
+	val |= BMCR_RESET;
+	star_mdc_mdio_write(sdev, id, 0, val);
+}
+
+static void ex_phy_re_an(star_dev *dev, u32 id)
+{
+	u16 val = 0;
+
+	val = star_mdc_mdio_read(dev, id, 0);
+	/* enable AN, and restart AN for SMSC PHY */
+	val |= BMCR_ANENABLE | BMCR_ANRESTART;
+	star_mdc_mdio_write(dev, id, 0, val);
+}
+
+static void ex_phy_dis_gpsi(star_dev *dev, u32 id)
+{
+	u16 val = 0;
+
+	val = star_mdc_mdio_read(dev, id, 18);
+	val &= ~0x400;
+	star_mdc_mdio_write(dev, id, 18, val);
+}
+
+static void smsc8710a_phy_init(star_dev *sdev)
+{
+	u32 phy_id = sdev->phy_ops->addr;
+
+	/* E2 ECO fixup the problem which 10M can't rx packet on E1 IC */
+	star_set_bit(star_dummy(sdev->base), STAR_DUMMY_E2_ECO);
+	/* for smsc8710a, after soft reset,
+	 * AN is disabled, so enable it again
+	 */
+	ex_phy_reset(sdev, phy_id);
+	ex_phy_re_an(sdev, phy_id);
+}
+
+static struct eth_phy_ops smsc8710a_phy_ops = {
+	.phy_id = PHYID2_SMSC8710A,
+	.init = smsc8710a_phy_init,
+};
+
+static void dm9162_phy_init(star_dev *sdev)
+{
+	u32 phy_id = sdev->phy_ops->addr;
+
+	ex_phy_reset(sdev, phy_id);
+	ex_phy_dis_gpsi(sdev, phy_id);
+}
+
+static struct eth_phy_ops dm9162_phy_ops = {
+	.phy_id = PHYID2_DM9162_XMII,
+	.init = dm9162_phy_init,
+};
+
+static void ksz8081mnx_phy_init(star_dev *sdev)
+{
+	u32 data;
+	star_private *star_prv = sdev->star_prv;
+
+	/*set davicom phy register0 bit10 is 0 in MII for mt8160*/
+	data = star_mdc_mdio_read(sdev, star_prv->phy_addr, 0x0) & (~(1 << 10));
+	star_mdc_mdio_write(sdev, star_prv->phy_addr, 0x0, data);
+	data = star_mdc_mdio_read(sdev, star_prv->phy_addr, 0x0);
+}
+
+static struct eth_phy_ops ksz8081mnx_phy_ops = {
+	.phy_id = PHYID2_KSZ8081MNX,
+	.init = ksz8081mnx_phy_init,
+};
+
+static void default_phy_init(star_dev *sdev)
+{
+	u32 phy_id = sdev->phy_ops->addr;
+
+	/* E2 ECO fixup the problem which 10M can't rx packet on E1 IC */
+	star_set_bit(star_dummy(sdev->base), STAR_DUMMY_E2_ECO);
+	ex_phy_reset(sdev, phy_id);
+}
+
+static struct eth_phy_ops default_phy_ops = {
+	.phy_id = 0,
+	.init = default_phy_init,
+};
+
+static void ip101g_az_disable(star_dev *dev, u32 id)
+{
+	star_mdc_mdio_write(dev, id, 0x0d, 0x0007);
+	star_mdc_mdio_write(dev, id, 0x0e, 0x003c);
+	star_mdc_mdio_write(dev, id, 0x0d, 0x4007);
+	star_mdc_mdio_write(dev, id, 0x0e, 0x0000);
+	star_mdc_mdio_read(dev, id, 0x0e);
+}
+
+static void ip101g_anar_init(star_dev *sdev, u32 id)
+{
+	u16 val = 0;
+
+	val = star_mdc_mdio_read(sdev, id, 4);
+	val &= ~(ADVERTISE_NPAGE | ADVERTISE_RFAULT | ADVERTISE_PAUSE_ASYM);
+	star_mdc_mdio_write(sdev, id, 4, val);
+}
+
+static void ip101g_phy_init(star_dev *sdev)
+{
+	u32 phy_id = sdev->phy_ops->addr;
+
+	/* E2 ECO fixup the problem which 10M can't rx packet on E1 IC */
+	star_set_bit(star_dummy(sdev->base), STAR_DUMMY_E2_ECO);
+	ex_phy_reset(sdev, phy_id);
+	ip101g_az_disable(sdev, phy_id);
+	ip101g_anar_init(sdev, phy_id);
+}
+
+static struct eth_phy_ops ip101g_phy_ops = {
+	.phy_id = PHYID2_IP101G,
+	.init = ip101g_phy_init,
+};
+
+static void rtl8201fr_phy_init(star_dev *sdev)
+{
+	u16 save_page;
+	u32 temp;
+	star_private *star_prv = sdev->star_prv;
+
+	/* save page */
+	save_page = star_mdc_mdio_read(sdev, star_prv->phy_addr, 31);
+	/* set to page0 */
+	star_mdc_mdio_write(sdev, star_prv->phy_addr, 31, 0x0000);
+	/* set register 0[15]=1 */
+	temp = star_mdc_mdio_read(sdev, star_prv->phy_addr, 0);
+	star_mdc_mdio_write(sdev, star_prv->phy_addr, 0, temp | (1 << 15));
+	/* set register 0[12]=0 */
+	temp = star_mdc_mdio_read(sdev, star_prv->phy_addr, 0);
+	star_mdc_mdio_write(sdev, star_prv->phy_addr, 0, temp & (~(1 << 12)));
+	/* set register 4[11]=0 */
+	temp = star_mdc_mdio_read(sdev, star_prv->phy_addr, 4);
+	star_mdc_mdio_write(sdev, star_prv->phy_addr, 4, temp & (~(1 << 11)));
+	/* set register 0[12]=1 */
+	temp = star_mdc_mdio_read(sdev, star_prv->phy_addr, 0);
+	star_mdc_mdio_write(sdev, star_prv->phy_addr, 0, temp | (1 << 12));
+	/* set page from save_page */
+	star_mdc_mdio_write(sdev, star_prv->phy_addr, 31, save_page);
+
+	save_page = star_mdc_mdio_read(sdev, star_prv->phy_addr, 31);
+	/* set page 4 */
+	star_mdc_mdio_write(sdev, star_prv->phy_addr, 31, 0x0004);
+	/* EEE_nway_disable */
+	star_mdc_mdio_write(sdev, star_prv->phy_addr, 16, 0x4077);
+	/* set to page0 */
+	star_mdc_mdio_write(sdev, star_prv->phy_addr, 31, 0x0000);
+	/* Set Address mode and MMD Device = 7 */
+	star_mdc_mdio_write(sdev, star_prv->phy_addr, 13, 0x0007);
+	/* Set Address Value */
+	star_mdc_mdio_write(sdev, star_prv->phy_addr, 14, 0x003C);
+	/* Set Data mode and MMD Device = 7 */
+	star_mdc_mdio_write(sdev, star_prv->phy_addr, 13, 0x4007);
+	/* turn off 100BASE-TX EEE capability */
+	star_mdc_mdio_write(sdev, star_prv->phy_addr, 14, 0x0000);
+	/* Restart Auto-Negotiation */
+	star_mdc_mdio_write(sdev, star_prv->phy_addr, 0, 0x1200);
+	star_mdc_mdio_write(sdev, star_prv->phy_addr, 31, save_page);
+
+#if 0
+	save_page = star_mdc_mdio_read(sdev, star_prv->phy_addr, 31);
+	star_mdc_mdio_write(sdev, star_prv->phy_addr, 31, 0x0000);
+	temp = star_mdc_mdio_read(sdev, star_prv->phy_addr, 24);
+	star_mdc_mdio_write(sdev, star_prv->phy_addr, 24, temp & (~(1 << 15)));
+	star_mdc_mdio_write(sdev, star_prv->phy_addr, 31, save_page);
+#endif
+
+#if 0
+	/* init tx for realtek RMII timing issue */
+	temp = star_get_reg(star_test0(sdev->base));
+	temp &= ~(0x1 << 31);
+	/* select tx clock inverse */
+	temp |= (0x1 << 31);
+	star_set_reg(star_test0(sdev->base), temp);
+	STAR_MSG(STAR_DBG, "0x58(0x%x).\n",
+		 star_get_reg(star_test0(sdev->base)));
+#endif
+}
+
+void rtl8201fr_wol_enable(struct net_device *netdev)
+{
+	star_private *star_prv = NULL;
+	star_dev *dev = NULL;
+	struct sockaddr sa;
+	char *mac_addr = sa.sa_data;
+	u32 val = 0;
+
+	star_prv = netdev_priv(netdev);
+	dev = &star_prv->star_dev;
+
+	STAR_MSG(STAR_DBG, "enter rtl8201fr_wol_enable\n");
+
+	memcpy(sa.sa_data, netdev->dev_addr, netdev->addr_len);
+	STAR_MSG(STAR_DBG, "device mac address:%x %x %x %x %x %x.\n",
+		 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
+		 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
+
+	/* enable phy wol */
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 4, 0x61);
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 0, 0x3200);
+
+	/* set mac address */
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 31, 0x12);
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 16,
+			    (mac_addr[1] << 8) | (mac_addr[0] << 0));
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 17,
+			    (mac_addr[3] << 8) | (mac_addr[2] << 0));
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 18,
+			    (mac_addr[5] << 8) | (mac_addr[4] << 0));
+	STAR_MSG(STAR_DBG, "mac address:%x %x %x %x %x %x.\n",
+		 mac_addr[0], mac_addr[1], mac_addr[2],
+		 mac_addr[3], mac_addr[4], mac_addr[5]);
+
+	/* set max length */
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 31, 0x11);
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 17, 0x1FFF);
+
+	/* enable magic packet event */
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 16, 0x1000);
+
+	/* set tx isolate */
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 31, 0x7);
+	val = star_mdc_mdio_read(dev, star_prv->phy_addr, 20);
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 20, val | (1 << 15));
+
+	/* set rx isolate */
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 31, 0x17);
+	val = star_mdc_mdio_read(dev, star_prv->phy_addr, 19);
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 19, val | (1 << 15));
+
+	/* return page 0 */
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 31, 0);
+}
+
+void rtl8201fr_wol_disable(struct net_device *netdev)
+{
+	u32 val = 0;
+	star_private *star_prv = netdev_priv(netdev);
+	star_dev *dev = &star_prv->star_dev;
+
+	STAR_MSG(STAR_DBG, "enter rtl8201fr_wol_disable\n");
+
+	/* unset rx isolate */
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 31, 0x17);
+	val = star_mdc_mdio_read(dev, star_prv->phy_addr, 19);
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 19, val & (~(1 << 15)));
+
+	/* unset tx isolate */
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 31, 0x7);
+	val = star_mdc_mdio_read(dev, star_prv->phy_addr, 20);
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 20, val & (~(1 << 15)));
+
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 31, 0x11);
+	/* disable magic packet event */
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 16, 0x0);
+
+	/* unset max length and reset PMEB pin as high */
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 17, 0x9FFF);
+
+	/* return page 0 */
+	star_mdc_mdio_write(dev, star_prv->phy_addr, 31, 0);
+}
+
+static struct eth_phy_ops rtl8201fr_phy_ops = {
+	.phy_id = PHYID2_RTL8201FR,
+	.init = rtl8201fr_phy_init,
+	.wol_enable = rtl8201fr_wol_enable,
+	.wol_disable = rtl8201fr_wol_disable,
+};
+
+int star_detect_phyid(star_dev *dev)
+{
+	int addr;
+	u16 reg2;
+
+	for (addr = 0; addr < 32; addr++) {
+		reg2 = star_mdc_mdio_read(dev, addr, PHY_REG_IDENTFIR2);
+		STAR_MSG(STAR_DBG, "%s(%d) id=%d, vendor=0x%x\n",
+			 __func__, __LINE__, addr, reg2);
+
+		if (reg2 == PHYID2_SMSC8710A) {
+			STAR_MSG(STAR_WARN, "Ethernet: SMSC8710A PHY\n\r");
+			dev->phy_ops = &smsc8710a_phy_ops;
+			dev->phy_ops->addr = addr;
+			break;
+		} else if (reg2 == PHYID2_DM9162_XMII) {
+			STAR_MSG(STAR_WARN, "Ethernet: DM9162 PHY\n\r");
+			dev->phy_ops = &dm9162_phy_ops;
+			dev->phy_ops->addr = addr;
+			break;
+		} else if (reg2 == PHYID2_KSZ8081MNX) {
+			STAR_MSG(STAR_WARN, "Ethernet: KSZ8081 PHY\n\r");
+			dev->phy_ops = &ksz8081mnx_phy_ops;
+			dev->phy_ops->addr = addr;
+			break;
+		} else if (reg2 == PHYID2_IP101G) {
+			STAR_MSG(STAR_WARN, "Ethernet: IP101G PHY\n\r");
+			dev->phy_ops = &ip101g_phy_ops;
+			dev->phy_ops->addr = addr;
+			break;
+		} else if (reg2 == PHYID2_RTL8201FR) {
+			STAR_MSG(STAR_WARN, "Ethernet: RTL8201FR PHY\n\r");
+			dev->phy_ops = &rtl8201fr_phy_ops;
+			dev->phy_ops->addr = addr;
+			break;
+		}
+	}
+
+	if (addr == 32) {
+		for (addr = 0; addr < 32; addr++) {
+			reg2 = star_mdc_mdio_read(dev, addr, PHY_REG_IDENTFIR2);
+			STAR_MSG(STAR_ERR,
+				 "%s id=%d, vendor=0x%x\n", __func__,
+				 addr, reg2);
+
+			if (reg2 != 0xFFFF) {
+				STAR_MSG(STAR_ERR,
+					 "Don't support current PHY\n");
+				dev->phy_ops = &default_phy_ops;
+				dev->phy_ops->phy_id = reg2;
+				dev->phy_ops->addr = addr;
+				break;
+			}
+		}
+	}
+
+	return addr;
+}
+
diff --git a/drivers/net/ethernet/mediatek/star/star_phy.h b/drivers/net/ethernet/mediatek/star/star_phy.h
new file mode 100644
index 0000000..33b7a2f
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/star/star_phy.h
@@ -0,0 +1,30 @@
+/* Mediatek STAR MAC network driver.
+ *
+ * Copyright (c) 2016-2017 Mediatek Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _STAR_PHY_H_
+#define _STAR_PHY_H_
+
+/* Reg3: PHY Identifier 2 */
+#define PHY_REG_IDENTFIR2 (3)
+#define PHYID2_SMSC8710A (0xC0F1)
+#define PHYID2_DM9162_XMII (0xB8A0)
+#define PHYID2_KSZ8081MNX (0x1560)
+#define PHYID2_IP101G (0x0c54)
+#define PHYID2_INTVITESSE (0x0430)
+#define PHYID2_RTL8201 (0x8201)
+#define PHYID2_RTL8211 (0xC912)
+#define PHYID2_IP101A (0x0C50)
+#define PHYID2_SMSC7100 (0xC0B1)
+#define PHYID2_SMSC8700 (0xC0C4)
+#define PHYID2_DM8710A (0x0011)
+#define PHYID2_RTL8201FR (0xC816)
+
+int star_detect_phyid(star_dev *dev);
+
+#endif
diff --git a/drivers/net/ethernet/mediatek/star/star_procfs.c b/drivers/net/ethernet/mediatek/star/star_procfs.c
new file mode 100644
index 0000000..03a97c2
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/star/star_procfs.c
@@ -0,0 +1,504 @@
+/* Mediatek STAR MAC network driver.
+ *
+ * Copyright (c) 2016-2017 Mediatek Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include "star.h"
+#include "star_procfs.h"
+
+static struct star_procfs star_proc;
+
+static bool str_cmp_seq(char **buf, const char *substr)
+{
+	size_t len = strlen(substr);
+
+	if (!strncmp(*buf, substr, len)) {
+		*buf += len + 1;
+		return true;
+	} else {
+		return false;
+	}
+}
+
+static struct net_device *star_get_net_device(void)
+{
+	if (!star_proc.ndev)
+		star_proc.ndev = dev_get_by_name(&init_net, "eth0");
+
+	return star_proc.ndev;
+}
+
+static void star_put_net_device(void)
+{
+	if (!star_proc.ndev)
+		return;
+
+	dev_put(star_proc.ndev);
+}
+
+static ssize_t proc_phy_reg_read(struct file *file, char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	STAR_MSG(STAR_ERR, "read phy register useage:\n");
+	STAR_MSG(STAR_ERR, "\t echo rp reg_addr > phy_reg\n");
+
+	STAR_MSG(STAR_ERR, "write phy register useage:\n");
+	STAR_MSG(STAR_ERR, "\t echo wp reg_addr value > phy_reg\n");
+
+	return 0;
+}
+
+static ssize_t proc_reg_write(struct file *file,
+			      const char __user *buffer,
+			      size_t count, loff_t *pos)
+{
+	char *buf, *tmp;
+	u16 phy_val;
+	u32 i, mac_val, len = 0, address = 0, value = 0;
+	struct net_device *dev;
+	star_private *star_prv;
+	star_dev *star_dev;
+
+	tmp = kmalloc(count + 1, GFP_KERNEL);
+	buf = tmp;
+	if (copy_from_user(buf, buffer, count))
+		return -EFAULT;
+	buf[count] = '\0';
+
+	dev = star_get_net_device();
+	if (!dev) {
+		STAR_MSG(STAR_ERR, "Could not get eth0 device!!!\n");
+		return -1;
+	}
+
+	star_prv = netdev_priv(dev);
+	star_dev = &star_prv->star_dev;
+
+	if (str_cmp_seq(&buf, "rp")) {
+		if (!kstrtou32(buf, 0, &address)) {
+			STAR_MSG(STAR_ERR, "address(0x%x):0x%x\n",
+				 address,
+				 star_mdc_mdio_read(star_dev,
+						    star_prv->phy_addr,
+						    address));
+		} else {
+			STAR_MSG(STAR_ERR, "kstrtou32 rp(%s) error\n", buf);
+		}
+	} else if (str_cmp_seq(&buf, "wp")) {
+		if (sscanf(buf, "%x %x", &address, &value) == 2) {
+			phy_val = star_mdc_mdio_read(star_dev,
+						     star_prv->phy_addr,
+						     address);
+			star_mdc_mdio_write(star_dev, star_prv->phy_addr,
+					    address, (u16)value);
+			STAR_MSG(STAR_ERR, "0x%x: 0x%x --> 0x%x!\n",
+				 address, phy_val,
+				 star_mdc_mdio_read(star_dev,
+						    star_prv->phy_addr,
+						    address));
+		} else {
+			STAR_MSG(STAR_ERR, "sscanf wp(%s) error\n", buf);
+		}
+	} else if (str_cmp_seq(&buf, "rr")) {
+		if (sscanf(buf, "%x %x", &address, &len) == 2) {
+			for (i = 0; i < len / 4; i++) {
+				STAR_MSG(STAR_ERR,
+					 "%p:\t%08x\t%08x\t%08x\t%08x\t\n",
+					 star_dev->base + address + i * 16,
+					 star_get_reg(star_dev->base
+						    + address + i * 16),
+					 star_get_reg(star_dev->base + address
+						    + i * 16 + 4),
+					 star_get_reg(star_dev->base + address
+						    + i * 16 + 8),
+					 star_get_reg(star_dev->base + address
+						    + i * 16 + 12));
+			}
+		} else {
+			STAR_MSG(STAR_ERR, "sscanf rr(%s) error\n", buf);
+		}
+	} else if (str_cmp_seq(&buf, "wr")) {
+		if (sscanf(buf, "%x %x", &address, &value) == 2) {
+			mac_val = star_get_reg(star_dev->base + address);
+			star_set_reg(star_dev->base + address, value);
+			STAR_MSG(STAR_ERR, "%p: %08x --> %08x!\n",
+				 star_dev->base + address,
+				 mac_val,
+				 star_get_reg(star_dev->base
+					    + address));
+		} else {
+			STAR_MSG(STAR_ERR, "sscanf wr(%s) error\n", buf);
+		}
+	} else {
+		STAR_MSG(STAR_ERR, "wrong arg:%s\n", buf);
+	}
+
+	kfree(tmp);
+	return count;
+}
+
+static const struct file_operations star_phy_reg_ops = {
+	.read = proc_phy_reg_read,
+	.write = proc_reg_write,
+};
+
+static ssize_t proc_mac_reg_read(struct file *file, char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	STAR_MSG(STAR_ERR, "read MAC register useage:\n");
+	STAR_MSG(STAR_ERR, "\t echo rr reg_addr len > macreg\n");
+
+	STAR_MSG(STAR_ERR, "write MAC register useage:\n");
+	STAR_MSG(STAR_ERR, "\t echo wr reg_addr value > macreg\n");
+
+	return 0;
+}
+
+static const struct file_operations star_mac_reg_ops = {
+	.read = proc_mac_reg_read,
+	.write = proc_reg_write,
+};
+
+static int get_wol_status(struct seq_file *seq, void *v)
+{
+	struct net_device *dev;
+	star_private *star_prv;
+
+	dev = star_get_net_device();
+	if (!dev) {
+		STAR_MSG(STAR_ERR, "Could not get eth0 device!!!\n");
+		return -1;
+	}
+
+	star_prv = netdev_priv(dev);
+
+	seq_printf(seq, "Wake On Lan (WOL) type is (%d)\n", star_prv->wol);
+	STAR_MSG(STAR_ERR, "Use 'echo 0 > /proc/driver/star/wol' switch to WOL_NONE\n");
+	STAR_MSG(STAR_ERR, "Use 'echo 1 > /proc/driver/star/wol' switch to MAC_WOL\n");
+	STAR_MSG(STAR_ERR, "Use 'echo 2 > /proc/driver/star/wol' switch to PHY_WOL\n");
+
+	return 0;
+}
+
+static ssize_t wol_write(struct file *file, const char __user *buffer,
+		      size_t count, loff_t *data)
+{
+	struct net_device *dev;
+	star_private *star_prv;
+	char *buf;
+
+	buf = kmalloc(count + 1, GFP_KERNEL);
+	if(copy_from_user(buf, buffer, count))
+		return -EFAULT;
+
+	buf[count] = '\0';
+	dev = star_get_net_device();
+	if (!dev) {
+		STAR_MSG(STAR_ERR, "Could not get eth0 device!!!\n");
+		return -1;
+	}
+
+	star_prv = netdev_priv(dev);
+	star_prv->wol = buf[0] - '0';
+	STAR_MSG(STAR_ERR, "Wake On Lan (WOL) type is (%d)\n", star_prv->wol);
+	kfree(buf);
+
+	return count;
+}
+
+static int wol_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, get_wol_status, NULL);
+}
+
+static const struct file_operations star_wol_ops = {
+	.owner 		= THIS_MODULE,
+	.open	 	= wol_open,
+	.read	 	= seq_read,
+	.write		= wol_write,
+};
+
+static int get_wol_flag_status(struct seq_file *seq, void *v)
+{
+	struct net_device *dev;
+	star_private *star_prv;
+
+	dev = star_get_net_device();
+	if (!dev) {
+		STAR_MSG(STAR_ERR, "Could not get eth0 device!!!\n");
+		return -1;
+	}
+
+	star_prv = netdev_priv(dev);
+	seq_printf(seq, "Wake On Lan (WOL) flag is (%d)\n", star_prv->wol_flag);
+
+	return 0;
+}
+
+static ssize_t wol_flag_write(struct file *file, const char __user *buffer,
+		      size_t count, loff_t *data)
+{
+	struct net_device *dev;
+	star_private *star_prv;
+	star_dev *star_dev;
+	char *buf;
+
+	buf = kmalloc(count + 1, GFP_KERNEL);
+	if(copy_from_user(buf, buffer, count))
+		return -EFAULT;
+
+	buf[count] = '\0';
+	dev = star_get_net_device();
+	if (!dev) {
+		STAR_MSG(STAR_ERR, "Could not get eth0 device!!!\n");
+		return -1;
+	}
+
+	star_prv = netdev_priv(dev);
+	star_dev = &star_prv->star_dev;
+	star_prv->wol_flag = buf[0] - '0';
+	pr_err("Wake On Lan (WOL) flag is (%d)\n", star_prv->wol_flag);
+	if (star_prv->wol == MAC_WOL) {
+		if (star_prv->wol_flag == true) {
+			star_config_wol(star_dev, true);
+		} else {
+			star_config_wol(star_dev, false);
+		}
+	} else if (star_prv->wol == PHY_WOL) {
+		if (star_prv->wol_flag == true) {
+			/*set ethernet phy wol setting*/
+			if (star_dev->phy_ops->wol_enable)
+				star_dev->phy_ops->wol_enable(star_prv->dev);
+			enable_irq_wake(star_prv->eint_irq);
+			pr_err("set ethernet phy wol setting done.\n");
+		} else {
+			/*clear ethernet phy wol setting*/
+			if (star_dev->phy_ops->wol_disable)
+				star_dev->phy_ops->wol_disable(star_prv->dev);
+			disable_irq_wake(star_prv->eint_irq);
+			pr_err("clear ethernet phy wol setting done.\n");
+		}
+	}
+	kfree(buf);
+
+	return count;
+}
+
+static int wol_flag_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, get_wol_flag_status, NULL);
+}
+
+static const struct file_operations star_wol_flag_ops = {
+	.owner 		= THIS_MODULE,
+	.open	 	= wol_flag_open,
+	.read	 	= seq_read,
+	.write		= wol_flag_write,
+};
+
+
+static ssize_t proc_dump_net_stat(struct file *file,
+				  char __user *buf, size_t count, loff_t *ppos)
+{
+	struct net_device *dev;
+	star_private *star_prv;
+	star_dev *star_dev;
+
+	dev = star_get_net_device();
+	if (!dev) {
+		STAR_MSG(STAR_ERR, "Could not get eth0 device!!!\n");
+		return -1;
+	}
+
+	star_prv = netdev_priv(dev);
+	star_dev = &star_prv->star_dev;
+	STAR_MSG(STAR_ERR, "\n");
+	STAR_MSG(STAR_ERR, "rx_packets	=%lu  <total packets received>\n",
+		 star_dev->stats.rx_packets);
+	STAR_MSG(STAR_ERR, "tx_packets	=%lu  <total packets transmitted>\n",
+		 star_dev->stats.tx_packets);
+	STAR_MSG(STAR_ERR, "rx_bytes	=%lu  <total bytes received>\n",
+		 star_dev->stats.rx_bytes);
+	STAR_MSG(STAR_ERR, "tx_bytes	=%lu  <total bytes transmitted>\n",
+		 star_dev->stats.tx_bytes);
+	STAR_MSG(STAR_ERR, "rx_errors;	=%lu  <bad packets received>\n",
+		 star_dev->stats.rx_errors);
+	STAR_MSG(STAR_ERR, "tx_errors;	=%lu  <packet transmit problems>\n",
+		 star_dev->stats.tx_errors);
+	STAR_MSG(STAR_ERR, "rx_crc_errors =%lu  <recved pkt with crc error>\n",
+		 star_dev->stats.rx_crc_errors);
+	STAR_MSG(STAR_ERR, "\n");
+	STAR_MSG(STAR_ERR,
+		 "Use 'cat /proc/driver/star/stat' to dump net info\n");
+	STAR_MSG(STAR_ERR,
+		 "Use 'echo clear > /proc/driver/star/stat' to clear info\n");
+
+	return 0;
+}
+
+static ssize_t proc_clear_net_stat(struct file *file,
+				   const char __user *buffer,
+				   size_t count, loff_t *pos)
+{
+	char *buf;
+	struct net_device *ndev;
+	star_private *star_prv;
+	star_dev *star_dev;
+
+	buf = kmalloc(count + 1, GFP_KERNEL);
+	if (copy_from_user(buf, buffer, count))
+		return -EFAULT;
+	buf[count] = '\0';
+
+	ndev = star_get_net_device();
+	if (!ndev) {
+		STAR_MSG(STAR_ERR, "Could not get eth0 device!!!\n");
+		return -1;
+	}
+
+	star_prv = netdev_priv(ndev);
+	star_dev = &star_prv->star_dev;
+
+	if (!strncmp(buf, "clear", count - 1))
+		memset(&star_dev->stats, 0, sizeof(struct net_device_stats));
+	else
+		STAR_MSG(STAR_ERR, "fail to clear stat, buf:%s\n", buf);
+
+	kfree(buf);
+
+	return count;
+}
+
+static const struct file_operations star_net_status_ops = {
+	.read = proc_dump_net_stat,
+	.write = proc_clear_net_stat,
+};
+
+void star_set_dbg_level(int dbg)
+{
+	star_dbg_level = dbg;
+}
+
+int star_get_dbg_level(void)
+{
+	return star_dbg_level;
+}
+
+static ssize_t proc_get_dbg_lvl(struct file *file,
+					char __user *buf, size_t count, loff_t *ppos)
+{
+	switch(star_get_dbg_level()) {
+		case STAR_ERR:
+			STAR_MSG(STAR_ERR, "star dbglvl: STAR_ERR\n");
+			break;
+		case STAR_WARN:
+			STAR_MSG(STAR_ERR, "star dbglvl: STAR_WARN\n");
+			break;
+		case STAR_DBG:
+			STAR_MSG(STAR_ERR, "star dbglvl: STAR_DBG\n");
+			break;
+		case STAR_VERB:
+			STAR_MSG(STAR_ERR, "star dbglvl: STAR_VERB\n");
+			break;
+		case STAR_DBG_MAX:
+			STAR_MSG(STAR_ERR, "star dbglvl: STAR_DBG_MAX\n");
+			break;
+	}
+
+	STAR_MSG(STAR_ERR, "Useage:\n");
+	STAR_MSG(STAR_ERR, "  echo err > dbglvl\n");
+	STAR_MSG(STAR_ERR, "  echo warn > dbglvl\n");
+	STAR_MSG(STAR_ERR, "  echo dbg > dbglvl\n");
+	STAR_MSG(STAR_ERR, "  echo verb > dbglvl\n");
+	STAR_MSG(STAR_ERR, "  echo max > dbglvl\n");
+
+    return 0;
+}
+
+static ssize_t proc_set_dbg_lvl(struct file *file,
+				const char __user *buffer,
+				size_t count, loff_t *pos)
+{
+	char *buf, *tmp;
+
+	tmp = kmalloc(count + 1, GFP_KERNEL);
+	buf = tmp;
+	if (copy_from_user(buf, buffer, count))
+	        return -EFAULT;
+	buf[count] = '\0';
+
+	if (str_cmp_seq(&buf, "err"))
+		star_set_dbg_level(STAR_ERR);
+	else if (str_cmp_seq(&buf, "warn"))
+		star_set_dbg_level(STAR_WARN);
+	else if (str_cmp_seq(&buf, "dbg"))
+		star_set_dbg_level(STAR_DBG);
+	else if (str_cmp_seq(&buf, "verb"))
+		star_set_dbg_level(STAR_VERB);
+	else if (str_cmp_seq(&buf, "max"))
+		star_set_dbg_level(STAR_DBG_MAX);
+	else
+		STAR_MSG(STAR_ERR, "wrong arg:%s\n", buf);
+
+	kfree(tmp);
+
+	return count;
+}
+
+static const struct file_operations star_dbg_lvl_ops = {
+    .read = proc_get_dbg_lvl,
+    .write = proc_set_dbg_lvl,
+};
+
+static struct star_proc_file star_file_tbl[] = {
+	{"phy_reg", &star_phy_reg_ops},
+	{"macreg", &star_mac_reg_ops},
+	{"wol", &star_wol_ops},
+	{"wol_flag", &star_wol_flag_ops},
+	{"stat", &star_net_status_ops},
+	{"dbglvl", &star_dbg_lvl_ops},
+};
+
+int star_init_procfs(void)
+{
+	int i;
+
+	STAR_MSG(STAR_ERR, "%s entered\n", __func__);
+	star_proc.root = proc_mkdir("driver/star", NULL);
+	if (!star_proc.root) {
+		STAR_MSG(STAR_ERR, "star_proc_dir create failed\n");
+		return -1;
+	}
+
+	star_proc.entry = kmalloc(ARRAY_SIZE(star_file_tbl) *
+				  sizeof(struct star_proc_file), GFP_KERNEL);
+	for (i = 0 ; i < ARRAY_SIZE(star_file_tbl); i++) {
+		star_proc.entry[i] = proc_create(star_file_tbl[i].name,
+			0755, star_proc.root, star_file_tbl[i].fops);
+		if (!star_proc.entry[i]) {
+			STAR_MSG(STAR_ERR,
+				 "%s create failed\n", star_file_tbl[i].name);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+void star_exit_procfs(void)
+{
+	int i;
+
+	STAR_MSG(STAR_ERR, "%s entered\n", __func__);
+	for (i = 0 ; i < ARRAY_SIZE(star_file_tbl); i++)
+		remove_proc_entry(star_file_tbl[i].name, star_proc.root);
+
+	kfree(star_proc.entry);
+	remove_proc_entry("driver/star", NULL);
+	star_put_net_device();
+}
+
diff --git a/drivers/net/ethernet/mediatek/star/star_procfs.h b/drivers/net/ethernet/mediatek/star/star_procfs.h
new file mode 100644
index 0000000..3dbe38a
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/star/star_procfs.h
@@ -0,0 +1,30 @@
+/* Mediatek STAR MAC network driver.
+ *
+ * Copyright (c) 2016-2017 Mediatek Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _STAR_PROCFS_H_
+#define _STAR_PROCFS_H_
+
+#include <linux/proc_fs.h>
+
+struct star_proc_file {
+	const char * const name;
+	const struct file_operations *fops;
+};
+
+struct star_procfs {
+	struct net_device *ndev;
+	struct proc_dir_entry *root;
+	struct proc_dir_entry **entry;
+};
+
+int star_init_procfs(void);
+void star_exit_procfs(void);
+
+#endif /* _STAR_PROCFS_H_ */
+
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 34515f4..3eaf443 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -108,6 +108,34 @@
 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
 
 /**
+ * dev_pm_opp_get_voltage_supply() - Gets the voltage corresponding to an opp
+ * with index
+ * @opp:        opp for which voltage has to be returned for
+ * @index:      index to specify the returned supplies
+ *
+ * Return: voltage in micro volt corresponding to the opp with index, else
+ * return 0
+ *
+ * This is useful for devices with multiple power supplies.
+ */
+unsigned long dev_pm_opp_get_voltage_supply(struct dev_pm_opp *opp,
+					    unsigned int index)
+{
+	if (IS_ERR_OR_NULL(opp)) {
+		pr_err("%s: Invalid parameters\n", __func__);
+		return 0;
+	}
+
+	if (index >= opp->opp_table->regulator_count) {
+		pr_err("%s: Invalid supply index: %u\n", __func__, index);
+		return 0;
+	}
+
+	return opp->supplies[index].u_volt;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage_supply);
+
+/**
  * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
  * @opp:	opp for which frequency has to be returned for
  *
@@ -884,11 +912,9 @@
 	kfree(opp);
 }
 
-static void _opp_kref_release(struct kref *kref)
+static void _opp_kref_release(struct dev_pm_opp *opp,
+			      struct opp_table *opp_table)
 {
-	struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
-	struct opp_table *opp_table = opp->opp_table;
-
 	/*
 	 * Notify the changes in the availability of the operable
 	 * frequency/voltage list.
@@ -897,7 +923,22 @@
 	opp_debug_remove_one(opp);
 	list_del(&opp->node);
 	kfree(opp);
+}
 
+static void _opp_kref_release_unlocked(struct kref *kref)
+{
+	struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
+	struct opp_table *opp_table = opp->opp_table;
+
+	_opp_kref_release(opp, opp_table);
+}
+
+static void _opp_kref_release_locked(struct kref *kref)
+{
+	struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
+	struct opp_table *opp_table = opp->opp_table;
+
+	_opp_kref_release(opp, opp_table);
 	mutex_unlock(&opp_table->lock);
 	dev_pm_opp_put_opp_table(opp_table);
 }
@@ -909,10 +950,16 @@
 
 void dev_pm_opp_put(struct dev_pm_opp *opp)
 {
-	kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
+	kref_put_mutex(&opp->kref, _opp_kref_release_locked,
+		       &opp->opp_table->lock);
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_put);
 
+static void dev_pm_opp_put_unlocked(struct dev_pm_opp *opp)
+{
+	kref_put(&opp->kref, _opp_kref_release_unlocked);
+}
+
 /**
  * dev_pm_opp_remove()  - Remove an OPP from OPP table
  * @dev:	device for which we do this operation
@@ -952,6 +999,40 @@
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
 
+/**
+ * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
+ * @dev:	device for which we do this operation
+ *
+ * This function removes all dynamically created OPPs from the opp table.
+ */
+void dev_pm_opp_remove_all_dynamic(struct device *dev)
+{
+	struct opp_table *opp_table;
+	struct dev_pm_opp *opp, *temp;
+	int count = 0;
+
+	opp_table = _find_opp_table(dev);
+	if (IS_ERR(opp_table))
+		return;
+
+	mutex_lock(&opp_table->lock);
+	list_for_each_entry_safe(opp, temp, &opp_table->opp_list, node) {
+		if (opp->dynamic) {
+			dev_pm_opp_put_unlocked(opp);
+			count++;
+		}
+	}
+	mutex_unlock(&opp_table->lock);
+
+	/* Drop the references taken by dev_pm_opp_add() */
+	while (count--)
+		dev_pm_opp_put_opp_table(opp_table);
+
+	/* Drop the reference taken by _find_opp_table() */
+	dev_pm_opp_put_opp_table(opp_table);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
+
 struct dev_pm_opp *_opp_allocate(struct opp_table *table)
 {
 	struct dev_pm_opp *opp;
@@ -1628,6 +1709,84 @@
 	return r;
 }
 
+/*
+ * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP
+ * @dev:		device for which we do this operation
+ * @freq:		OPP frequency to adjust voltage of
+ * @u_volt:		new OPP voltage
+ *
+ * Change the voltage of an OPP with an RCU operation.
+ *
+ * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
+ * copy operation, returns 0 if no modifcation was done OR modification was
+ * successful.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks to
+ * keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex locking or synchronize_rcu() blocking calls cannot be used.
+ */
+int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+			      unsigned long u_volt)
+{
+	struct opp_table *opp_table;
+	struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
+	int r = 0;
+
+	/* Find the opp_table */
+	opp_table = _find_opp_table(dev);
+	if (IS_ERR(opp_table)) {
+		r = PTR_ERR(opp_table);
+		dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
+		return r;
+	}
+
+	/* keep the node allocated */
+	new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
+	if (!new_opp)
+		return -ENOMEM;
+
+	mutex_lock(&opp_table->lock);
+
+	/* Do we have the frequency? */
+	list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
+		if (tmp_opp->rate == freq) {
+			opp = tmp_opp;
+			break;
+		}
+	}
+
+	if (IS_ERR(opp)) {
+		r = PTR_ERR(opp);
+		goto unlock;
+	}
+
+	/* Is update really needed? */
+	if (opp->supplies->u_volt == u_volt)
+		goto unlock;
+
+	/* copy the old data over */
+	*new_opp = *opp;
+
+	/* plug in new node */
+	new_opp->supplies->u_volt = u_volt;
+
+	list_replace_rcu(&opp->node, &new_opp->node);
+	mutex_unlock(&opp_table->lock);
+
+	/* Notify the change of the OPP */
+	blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE,
+				     opp);
+
+	return 0;
+
+unlock:
+	mutex_unlock(&opp_table->lock);
+	kfree(new_opp);
+	return r;
+}
+
 /**
  * dev_pm_opp_enable() - Enable a specific OPP
  * @dev:	device for which we do this operation
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 9905dc6..11a3808 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -3,7 +3,7 @@
 
 config EINT_MTK
 	bool "MediaTek External Interrupt Support"
-	depends on PINCTRL_MTK || PINCTRL_MT7622 || COMPILE_TEST
+	depends on PINCTRL_MTK || PINCTRL_MTK_MOORE || COMPILE_TEST
 	select IRQ_DOMAIN
 
 config PINCTRL_MTK
@@ -15,6 +15,24 @@
 	select EINT_MTK
 	select OF_GPIO
 
+config PINCTRL_MTK_MOORE
+	bool "MediaTek Moore Core that implements generic binding"
+	depends on OF
+	select GENERIC_PINCONF
+	select GENERIC_PINCTRL_GROUPS
+	select GENERIC_PINMUX_FUNCTIONS
+	select GPIOLIB
+	select OF_GPIO
+
+config PINCTRL_MTK_PARIS
+	bool "MediaTek Paris Core that implements vendor binding"
+	depends on OF
+	select PINMUX
+	select GENERIC_PINCONF
+	select GPIOLIB
+	select EINT_MTK
+	select OF_GPIO
+
 # For ARMv7 SoCs
 config PINCTRL_MT2701
 	bool "Mediatek MT2701 pin control"
@@ -23,6 +41,12 @@
 	default MACH_MT2701
 	select PINCTRL_MTK
 
+config PINCTRL_MT7623
+	bool "Mediatek MT7623 pin control with generic binding"
+	depends on MACH_MT7623 || COMPILE_TEST
+	depends on PINCTRL_MTK_MOORE
+	default y
+
 config PINCTRL_MT8135
 	bool "Mediatek MT8135 pin control"
 	depends on MACH_MT8135 || COMPILE_TEST
@@ -47,13 +71,16 @@
 
 config PINCTRL_MT7622
 	bool "MediaTek MT7622 pin control"
+	depends on ARM64 || COMPILE_TEST
+	depends on PINCTRL_MTK_MOORE
+	default y
+
+config PINCTRL_MT8167
+	bool "Mediatek MT8167 pin control"
 	depends on OF
 	depends on ARM64 || COMPILE_TEST
-	select GENERIC_PINCONF
-	select GENERIC_PINCTRL_GROUPS
-	select GENERIC_PINMUX_FUNCTIONS
-	select GPIOLIB
-	select OF_GPIO
+	default ARM64 && ARCH_MEDIATEK
+	select PINCTRL_MTK
 
 config PINCTRL_MT8173
 	bool "Mediatek MT8173 pin control"
@@ -62,6 +89,20 @@
 	default ARM64 && ARCH_MEDIATEK
 	select PINCTRL_MTK
 
+config PINCTRL_MT8183
+	bool "Mediatek MT8183 pin control"
+	depends on OF
+	depends on ARM64 || COMPILE_TEST
+	default ARM64 && ARCH_MEDIATEK
+	select PINCTRL_MTK_PARIS
+
+config PINCTRL_MT8516
+	bool "Mediatek MT8516 pin control"
+	depends on OF
+	depends on ARM64 || COMPILE_TEST
+	default ARM64 && ARCH_MEDIATEK
+	select PINCTRL_MTK
+
 # For PMIC
 config PINCTRL_MT6397
 	bool "Mediatek MT6397 pin control"
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index 3de7156..c32e875 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -2,6 +2,8 @@
 # Core
 obj-$(CONFIG_EINT_MTK)		+= mtk-eint.o
 obj-$(CONFIG_PINCTRL_MTK)	+= pinctrl-mtk-common.o
+obj-$(CONFIG_PINCTRL_MTK_MOORE) += pinctrl-moore.o pinctrl-mtk-common-v2.o
+obj-$(CONFIG_PINCTRL_MTK_PARIS) += pinctrl-paris.o pinctrl-mtk-common-v2.o
 
 # SoC Drivers
 obj-$(CONFIG_PINCTRL_MT2701)	+= pinctrl-mt2701.o
@@ -9,5 +11,9 @@
 obj-$(CONFIG_PINCTRL_MT8135)	+= pinctrl-mt8135.o
 obj-$(CONFIG_PINCTRL_MT8127)	+= pinctrl-mt8127.o
 obj-$(CONFIG_PINCTRL_MT7622)	+= pinctrl-mt7622.o
+obj-$(CONFIG_PINCTRL_MT7623)	+= pinctrl-mt7623.o
+obj-$(CONFIG_PINCTRL_MT8167)	+= pinctrl-mt8167.o
 obj-$(CONFIG_PINCTRL_MT8173)	+= pinctrl-mt8173.o
+obj-$(CONFIG_PINCTRL_MT8183)	+= pinctrl-mt8183.o
+obj-$(CONFIG_PINCTRL_MT8516)	+= pinctrl-mt8516.o
 obj-$(CONFIG_PINCTRL_MT6397)	+= pinctrl-mt6397.o
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.c b/drivers/pinctrl/mediatek/pinctrl-moore.c
new file mode 100644
index 0000000..c3d0c94
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.c
@@ -0,0 +1,690 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MediaTek Pinctrl Moore Driver, which implement the generic dt-binding
+ * pinctrl-bindings.txt for MediaTek SoC.
+ *
+ * Copyright (C) 2017-2018 MediaTek Inc.
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/gpio/driver.h>
+#include "pinctrl-moore.h"
+
+#define PINCTRL_PINCTRL_DEV		KBUILD_MODNAME
+
+/* Custom pinconf parameters */
+#define MTK_PIN_CONFIG_TDSEL	(PIN_CONFIG_END + 1)
+#define MTK_PIN_CONFIG_RDSEL	(PIN_CONFIG_END + 2)
+#define MTK_PIN_CONFIG_PU_ADV	(PIN_CONFIG_END + 3)
+#define MTK_PIN_CONFIG_PD_ADV	(PIN_CONFIG_END + 4)
+
+static const struct pinconf_generic_params mtk_custom_bindings[] = {
+	{"mediatek,tdsel",	MTK_PIN_CONFIG_TDSEL,		0},
+	{"mediatek,rdsel",	MTK_PIN_CONFIG_RDSEL,		0},
+	{"mediatek,pull-up-adv", MTK_PIN_CONFIG_PU_ADV,		1},
+	{"mediatek,pull-down-adv", MTK_PIN_CONFIG_PD_ADV,	1},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item mtk_conf_items[] = {
+	PCONFDUMP(MTK_PIN_CONFIG_TDSEL, "tdsel", NULL, true),
+	PCONFDUMP(MTK_PIN_CONFIG_RDSEL, "rdsel", NULL, true),
+	PCONFDUMP(MTK_PIN_CONFIG_PU_ADV, "pu-adv", NULL, true),
+	PCONFDUMP(MTK_PIN_CONFIG_PD_ADV, "pd-adv", NULL, true),
+};
+#endif
+
+static int mtk_pinmux_set_mux(struct pinctrl_dev *pctldev,
+			      unsigned int selector, unsigned int group)
+{
+	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+	struct function_desc *func;
+	struct group_desc *grp;
+	int i;
+
+	func = pinmux_generic_get_function(pctldev, selector);
+	if (!func)
+		return -EINVAL;
+
+	grp = pinctrl_generic_get_group(pctldev, group);
+	if (!grp)
+		return -EINVAL;
+
+	dev_dbg(pctldev->dev, "enable function %s group %s\n",
+		func->name, grp->name);
+
+	for (i = 0; i < grp->num_pins; i++) {
+		const struct mtk_pin_desc *desc;
+		int *pin_modes = grp->data;
+		int pin = grp->pins[i];
+
+		desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
+
+		mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_MODE,
+				 pin_modes[i]);
+	}
+
+	return 0;
+}
+
+static int mtk_pinmux_gpio_request_enable(struct pinctrl_dev *pctldev,
+					  struct pinctrl_gpio_range *range,
+					  unsigned int pin)
+{
+	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+	const struct mtk_pin_desc *desc;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
+
+	return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_MODE,
+				hw->soc->gpio_m);
+}
+
+static int mtk_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
+					 struct pinctrl_gpio_range *range,
+					 unsigned int pin, bool input)
+{
+	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+	const struct mtk_pin_desc *desc;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
+
+	/* hardware would take 0 as input direction */
+	return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR, !input);
+}
+
+static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
+			   unsigned int pin, unsigned long *config)
+{
+	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+	u32 param = pinconf_to_config_param(*config);
+	int val, val2, err, reg, ret = 1;
+	const struct mtk_pin_desc *desc;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+		if (hw->soc->bias_disable_get) {
+			err = hw->soc->bias_disable_get(hw, desc, &ret);
+			if (err)
+				return err;
+		} else {
+			return -ENOTSUPP;
+		}
+		break;
+	case PIN_CONFIG_BIAS_PULL_UP:
+		if (hw->soc->bias_get) {
+			err = hw->soc->bias_get(hw, desc, 1, &ret);
+			if (err)
+				return err;
+		} else {
+			return -ENOTSUPP;
+		}
+		break;
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		if (hw->soc->bias_get) {
+			err = hw->soc->bias_get(hw, desc, 0, &ret);
+			if (err)
+				return err;
+		} else {
+			return -ENOTSUPP;
+		}
+		break;
+	case PIN_CONFIG_SLEW_RATE:
+		err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SR, &val);
+		if (err)
+			return err;
+
+		if (!val)
+			return -EINVAL;
+
+		break;
+	case PIN_CONFIG_INPUT_ENABLE:
+	case PIN_CONFIG_OUTPUT_ENABLE:
+		err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &val);
+		if (err)
+			return err;
+
+		/* HW takes input mode as zero; output mode as non-zero */
+		if ((val && param == PIN_CONFIG_INPUT_ENABLE) ||
+		    (!val && param == PIN_CONFIG_OUTPUT_ENABLE))
+			return -EINVAL;
+
+		break;
+	case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+		err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &val);
+		if (err)
+			return err;
+
+		err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SMT, &val2);
+		if (err)
+			return err;
+
+		if (val || !val2)
+			return -EINVAL;
+
+		break;
+	case PIN_CONFIG_DRIVE_STRENGTH:
+		if (hw->soc->drive_get) {
+			err = hw->soc->drive_get(hw, desc, &ret);
+			if (err)
+				return err;
+		} else {
+			err = -ENOTSUPP;
+		}
+		break;
+	case MTK_PIN_CONFIG_TDSEL:
+	case MTK_PIN_CONFIG_RDSEL:
+		reg = (param == MTK_PIN_CONFIG_TDSEL) ?
+		       PINCTRL_PIN_REG_TDSEL : PINCTRL_PIN_REG_RDSEL;
+
+		err = mtk_hw_get_value(hw, desc, reg, &val);
+		if (err)
+			return err;
+
+		ret = val;
+
+		break;
+	case MTK_PIN_CONFIG_PU_ADV:
+	case MTK_PIN_CONFIG_PD_ADV:
+		if (hw->soc->adv_pull_get) {
+			bool pullup;
+
+			pullup = param == MTK_PIN_CONFIG_PU_ADV;
+			err = hw->soc->adv_pull_get(hw, desc, pullup, &ret);
+			if (err)
+				return err;
+		} else {
+			return -ENOTSUPP;
+		}
+		break;
+	default:
+		return -ENOTSUPP;
+	}
+
+	*config = pinconf_to_config_packed(param, ret);
+
+	return 0;
+}
+
+static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+			   unsigned long *configs, unsigned int num_configs)
+{
+	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+	const struct mtk_pin_desc *desc;
+	u32 reg, param, arg;
+	int cfg, err = 0;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
+
+	for (cfg = 0; cfg < num_configs; cfg++) {
+		param = pinconf_to_config_param(configs[cfg]);
+		arg = pinconf_to_config_argument(configs[cfg]);
+
+		switch (param) {
+		case PIN_CONFIG_BIAS_DISABLE:
+			if (hw->soc->bias_disable_set) {
+				err = hw->soc->bias_disable_set(hw, desc);
+				if (err)
+					return err;
+			} else {
+				return -ENOTSUPP;
+			}
+			break;
+		case PIN_CONFIG_BIAS_PULL_UP:
+			if (hw->soc->bias_set) {
+				err = hw->soc->bias_set(hw, desc, 1);
+				if (err)
+					return err;
+			} else {
+				return -ENOTSUPP;
+			}
+			break;
+		case PIN_CONFIG_BIAS_PULL_DOWN:
+			if (hw->soc->bias_set) {
+				err = hw->soc->bias_set(hw, desc, 0);
+				if (err)
+					return err;
+			} else {
+				return -ENOTSUPP;
+			}
+			break;
+		case PIN_CONFIG_OUTPUT_ENABLE:
+			err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
+					       MTK_DISABLE);
+			if (err)
+				goto err;
+
+			err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+					       MTK_OUTPUT);
+			if (err)
+				goto err;
+			break;
+		case PIN_CONFIG_INPUT_ENABLE:
+
+			if (hw->soc->ies_present) {
+				mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_IES,
+						 MTK_ENABLE);
+			}
+
+			err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+					       MTK_INPUT);
+			if (err)
+				goto err;
+			break;
+		case PIN_CONFIG_SLEW_RATE:
+			err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SR,
+					       arg);
+			if (err)
+				goto err;
+
+			break;
+		case PIN_CONFIG_OUTPUT:
+			err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+					       MTK_OUTPUT);
+			if (err)
+				goto err;
+
+			err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO,
+					       arg);
+			if (err)
+				goto err;
+			break;
+		case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+			/* arg = 1: Input mode & SMT enable ;
+			 * arg = 0: Output mode & SMT disable
+			 */
+			arg = arg ? 2 : 1;
+			err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+					       arg & 1);
+			if (err)
+				goto err;
+
+			err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
+					       !!(arg & 2));
+			if (err)
+				goto err;
+			break;
+		case PIN_CONFIG_DRIVE_STRENGTH:
+			if (hw->soc->drive_set) {
+				err = hw->soc->drive_set(hw, desc, arg);
+				if (err)
+					return err;
+			} else {
+				err = -ENOTSUPP;
+			}
+			break;
+		case MTK_PIN_CONFIG_TDSEL:
+		case MTK_PIN_CONFIG_RDSEL:
+			reg = (param == MTK_PIN_CONFIG_TDSEL) ?
+			       PINCTRL_PIN_REG_TDSEL : PINCTRL_PIN_REG_RDSEL;
+
+			err = mtk_hw_set_value(hw, desc, reg, arg);
+			if (err)
+				goto err;
+			break;
+		case MTK_PIN_CONFIG_PU_ADV:
+		case MTK_PIN_CONFIG_PD_ADV:
+			if (hw->soc->adv_pull_set) {
+				bool pullup;
+
+				pullup = param == MTK_PIN_CONFIG_PU_ADV;
+				err = hw->soc->adv_pull_set(hw, desc, pullup,
+							    arg);
+				if (err)
+					return err;
+			} else {
+				return -ENOTSUPP;
+			}
+			break;
+		default:
+			err = -ENOTSUPP;
+		}
+	}
+err:
+	return err;
+}
+
+static int mtk_pinconf_group_get(struct pinctrl_dev *pctldev,
+				 unsigned int group, unsigned long *config)
+{
+	const unsigned int *pins;
+	unsigned int i, npins, old = 0;
+	int ret;
+
+	ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < npins; i++) {
+		if (mtk_pinconf_get(pctldev, pins[i], config))
+			return -ENOTSUPP;
+
+		/* configs do not match between two pins */
+		if (i && old != *config)
+			return -ENOTSUPP;
+
+		old = *config;
+	}
+
+	return 0;
+}
+
+static int mtk_pinconf_group_set(struct pinctrl_dev *pctldev,
+				 unsigned int group, unsigned long *configs,
+				 unsigned int num_configs)
+{
+	const unsigned int *pins;
+	unsigned int i, npins;
+	int ret;
+
+	ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < npins; i++) {
+		ret = mtk_pinconf_set(pctldev, pins[i], configs, num_configs);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static const struct pinctrl_ops mtk_pctlops = {
+	.get_groups_count = pinctrl_generic_get_group_count,
+	.get_group_name = pinctrl_generic_get_group_name,
+	.get_group_pins = pinctrl_generic_get_group_pins,
+	.dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+	.dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static const struct pinmux_ops mtk_pmxops = {
+	.get_functions_count = pinmux_generic_get_function_count,
+	.get_function_name = pinmux_generic_get_function_name,
+	.get_function_groups = pinmux_generic_get_function_groups,
+	.set_mux = mtk_pinmux_set_mux,
+	.gpio_request_enable = mtk_pinmux_gpio_request_enable,
+	.gpio_set_direction = mtk_pinmux_gpio_set_direction,
+	.strict = true,
+};
+
+static const struct pinconf_ops mtk_confops = {
+	.is_generic = true,
+	.pin_config_get = mtk_pinconf_get,
+	.pin_config_set = mtk_pinconf_set,
+	.pin_config_group_get = mtk_pinconf_group_get,
+	.pin_config_group_set = mtk_pinconf_group_set,
+	.pin_config_config_dbg_show = pinconf_generic_dump_config,
+};
+
+static struct pinctrl_desc mtk_desc = {
+	.name = PINCTRL_PINCTRL_DEV,
+	.pctlops = &mtk_pctlops,
+	.pmxops = &mtk_pmxops,
+	.confops = &mtk_confops,
+	.owner = THIS_MODULE,
+};
+
+static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
+{
+	struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+	const struct mtk_pin_desc *desc;
+	int value, err;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
+
+	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DI, &value);
+	if (err)
+		return err;
+
+	return !!value;
+}
+
+static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
+{
+	struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+	const struct mtk_pin_desc *desc;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
+
+	mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value);
+}
+
+static int mtk_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
+{
+	return pinctrl_gpio_direction_input(chip->base + gpio);
+}
+
+static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
+				     int value)
+{
+	mtk_gpio_set(chip, gpio, value);
+
+	return pinctrl_gpio_direction_output(chip->base + gpio);
+}
+
+static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
+{
+	struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+	const struct mtk_pin_desc *desc;
+
+	if (!hw->eint)
+		return -ENOTSUPP;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[offset];
+
+	if (desc->eint.eint_n == EINT_NA)
+		return -ENOTSUPP;
+
+	return mtk_eint_find_irq(hw->eint, desc->eint.eint_n);
+}
+
+static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+			       unsigned long config)
+{
+	struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+	const struct mtk_pin_desc *desc;
+	u32 debounce;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[offset];
+
+	if (!hw->eint ||
+	    pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE ||
+	    desc->eint.eint_n == EINT_NA)
+		return -ENOTSUPP;
+
+	debounce = pinconf_to_config_argument(config);
+
+	return mtk_eint_set_debounce(hw->eint, desc->eint.eint_n, debounce);
+}
+
+static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
+{
+	struct gpio_chip *chip = &hw->chip;
+	int ret;
+
+	chip->label		= PINCTRL_PINCTRL_DEV;
+	chip->parent		= hw->dev;
+	chip->request		= gpiochip_generic_request;
+	chip->free		= gpiochip_generic_free;
+	chip->direction_input	= mtk_gpio_direction_input;
+	chip->direction_output	= mtk_gpio_direction_output;
+	chip->get		= mtk_gpio_get;
+	chip->set		= mtk_gpio_set;
+	chip->to_irq		= mtk_gpio_to_irq,
+	chip->set_config	= mtk_gpio_set_config,
+	chip->base		= -1;
+	chip->ngpio		= hw->soc->npins;
+	chip->of_node		= np;
+	chip->of_gpio_n_cells	= 2;
+
+	ret = gpiochip_add_data(chip, hw);
+	if (ret < 0)
+		return ret;
+
+	/* Just for backward compatible for these old pinctrl nodes without
+	 * "gpio-ranges" property. Otherwise, called directly from a
+	 * DeviceTree-supported pinctrl driver is DEPRECATED.
+	 * Please see Section 2.1 of
+	 * Documentation/devicetree/bindings/gpio/gpio.txt on how to
+	 * bind pinctrl and gpio drivers via the "gpio-ranges" property.
+	 */
+	if (!of_find_property(np, "gpio-ranges", NULL)) {
+		ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0,
+					     chip->ngpio);
+		if (ret < 0) {
+			gpiochip_remove(chip);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int mtk_build_groups(struct mtk_pinctrl *hw)
+{
+	int err, i;
+
+	for (i = 0; i < hw->soc->ngrps; i++) {
+		const struct group_desc *group = hw->soc->grps + i;
+
+		err = pinctrl_generic_add_group(hw->pctrl, group->name,
+						group->pins, group->num_pins,
+						group->data);
+		if (err < 0) {
+			dev_err(hw->dev, "Failed to register group %s\n",
+				group->name);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static int mtk_build_functions(struct mtk_pinctrl *hw)
+{
+	int i, err;
+
+	for (i = 0; i < hw->soc->nfuncs ; i++) {
+		const struct function_desc *func = hw->soc->funcs + i;
+
+		err = pinmux_generic_add_function(hw->pctrl, func->name,
+						  func->group_names,
+						  func->num_group_names,
+						  func->data);
+		if (err < 0) {
+			dev_err(hw->dev, "Failed to register function %s\n",
+				func->name);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+int mtk_moore_pinctrl_probe(struct platform_device *pdev,
+			    const struct mtk_pin_soc *soc)
+{
+	struct pinctrl_pin_desc *pins;
+	struct resource *res;
+	struct mtk_pinctrl *hw;
+	int err, i;
+
+	hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
+	if (!hw)
+		return -ENOMEM;
+
+	hw->soc = soc;
+	hw->dev = &pdev->dev;
+
+	if (!hw->soc->nbase_names) {
+		dev_err(&pdev->dev,
+			"SoC should be assigned at least one register base\n");
+		return -EINVAL;
+	}
+
+	hw->base = devm_kmalloc_array(&pdev->dev, hw->soc->nbase_names,
+				      sizeof(*hw->base), GFP_KERNEL);
+	if (IS_ERR(hw->base))
+		return PTR_ERR(hw->base);
+
+	for (i = 0; i < hw->soc->nbase_names; i++) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						   hw->soc->base_names[i]);
+		if (!res) {
+			dev_err(&pdev->dev, "missing IO resource\n");
+			return -ENXIO;
+		}
+
+		hw->base[i] = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(hw->base[i]))
+			return PTR_ERR(hw->base[i]);
+	}
+
+	hw->nbase = hw->soc->nbase_names;
+
+	/* Copy from internal struct mtk_pin_desc to register to the core */
+	pins = devm_kmalloc_array(&pdev->dev, hw->soc->npins, sizeof(*pins),
+				  GFP_KERNEL);
+	if (IS_ERR(pins))
+		return PTR_ERR(pins);
+
+	for (i = 0; i < hw->soc->npins; i++) {
+		pins[i].number = hw->soc->pins[i].number;
+		pins[i].name = hw->soc->pins[i].name;
+	}
+
+	/* Setup pins descriptions per SoC types */
+	mtk_desc.pins = (const struct pinctrl_pin_desc *)pins;
+	mtk_desc.npins = hw->soc->npins;
+	mtk_desc.num_custom_params = ARRAY_SIZE(mtk_custom_bindings);
+	mtk_desc.custom_params = mtk_custom_bindings;
+#ifdef CONFIG_DEBUG_FS
+	mtk_desc.custom_conf_items = mtk_conf_items;
+#endif
+
+	err = devm_pinctrl_register_and_init(&pdev->dev, &mtk_desc, hw,
+					     &hw->pctrl);
+	if (err)
+		return err;
+
+	/* Setup groups descriptions per SoC types */
+	err = mtk_build_groups(hw);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to build groups\n");
+		return err;
+	}
+
+	/* Setup functions descriptions per SoC types */
+	err = mtk_build_functions(hw);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to build functions\n");
+		return err;
+	}
+
+	/* For able to make pinctrl_claim_hogs, we must not enable pinctrl
+	 * until all groups and functions are being added one.
+	 */
+	err = pinctrl_enable(hw->pctrl);
+	if (err)
+		return err;
+
+	err = mtk_build_eint(hw, pdev);
+	if (err)
+		dev_warn(&pdev->dev,
+			 "Failed to add EINT, but pinctrl still can work\n");
+
+	/* Build gpiochip should be after pinctrl_enable is done */
+	err = mtk_build_gpiochip(hw, pdev->dev.of_node);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to add gpio_chip\n");
+		return err;
+	}
+
+	platform_set_drvdata(pdev, hw);
+
+	return 0;
+}
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.h b/drivers/pinctrl/mediatek/pinctrl-moore.h
new file mode 100644
index 0000000..e1b4b82
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017-2018 MediaTek Inc.
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+#ifndef __PINCTRL_MOORE_H
+#define __PINCTRL_MOORE_H
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#include "../core.h"
+#include "../pinconf.h"
+#include "../pinmux.h"
+#include "mtk-eint.h"
+#include "pinctrl-mtk-common-v2.h"
+
+#define MTK_RANGE(_a)		{ .range = (_a), .nranges = ARRAY_SIZE(_a), }
+
+#define MTK_PIN(_number, _name, _eint_m, _eint_n, _drv_n) {	\
+		.number = _number,			\
+		.name = _name,				\
+		.eint = {				\
+			.eint_m = _eint_m,		\
+			.eint_n = _eint_n,		\
+		},					\
+		.drv_n = _drv_n,			\
+		.funcs = NULL,				\
+	}
+
+#define PINCTRL_PIN_GROUP(name, id)			\
+	{						\
+		name,					\
+		id##_pins,				\
+		ARRAY_SIZE(id##_pins),			\
+		id##_funcs,				\
+	}
+
+int mtk_moore_pinctrl_probe(struct platform_device *pdev,
+			    const struct mtk_pin_soc *soc);
+
+#endif /* __PINCTRL_MOORE_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
index 6f931b8..ce4a8a0 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
@@ -1,297 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * MediaTek MT7622 Pinctrl Driver
+ * Copyright (C) 2017-2018 MediaTek Inc.
  *
- * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
+ * Author: Sean Wang <sean.wang@mediatek.com>
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
-#include <linux/gpio.h>
-#include <linux/gpio/driver.h>
-#include <linux/io.h>
-#include <linux/init.h>
-#include <linux/mfd/syscon.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/pinconf.h>
-#include <linux/pinctrl/pinconf-generic.h>
-#include <linux/regmap.h>
+#include "pinctrl-moore.h"
 
-#include "../core.h"
-#include "../pinconf.h"
-#include "../pinmux.h"
-#include "mtk-eint.h"
-
-#define PINCTRL_PINCTRL_DEV		KBUILD_MODNAME
-#define MTK_RANGE(_a)		{ .range = (_a), .nranges = ARRAY_SIZE(_a), }
-#define PINCTRL_PIN_GROUP(name, id)			\
-	{						\
-		name,					\
-		id##_pins,				\
-		ARRAY_SIZE(id##_pins),			\
-		id##_funcs,				\
-	}
-
-#define MTK_GPIO_MODE	1
-#define MTK_INPUT	0
-#define MTK_OUTPUT	1
-#define MTK_DISABLE	0
-#define MTK_ENABLE	1
-
-/* Custom pinconf parameters */
-#define MTK_PIN_CONFIG_TDSEL	(PIN_CONFIG_END + 1)
-#define MTK_PIN_CONFIG_RDSEL	(PIN_CONFIG_END + 2)
-
-/* List these attributes which could be modified for the pin */
-enum {
-	PINCTRL_PIN_REG_MODE,
-	PINCTRL_PIN_REG_DIR,
-	PINCTRL_PIN_REG_DI,
-	PINCTRL_PIN_REG_DO,
-	PINCTRL_PIN_REG_SR,
-	PINCTRL_PIN_REG_SMT,
-	PINCTRL_PIN_REG_PD,
-	PINCTRL_PIN_REG_PU,
-	PINCTRL_PIN_REG_E4,
-	PINCTRL_PIN_REG_E8,
-	PINCTRL_PIN_REG_TDSEL,
-	PINCTRL_PIN_REG_RDSEL,
-	PINCTRL_PIN_REG_MAX,
-};
-
-/* struct mtk_pin_field - the structure that holds the information of the field
- *			  used to describe the attribute for the pin
- * @offset:		the register offset relative to the base address
- * @mask:		the mask used to filter out the field from the register
- * @bitpos:		the start bit relative to the register
- * @next:		the indication that the field would be extended to the
-			next register
- */
-struct mtk_pin_field {
-	u32 offset;
-	u32 mask;
-	u8  bitpos;
-	u8  next;
-};
-
-/* struct mtk_pin_field_calc - the structure that holds the range providing
- *			       the guide used to look up the relevant field
- * @s_pin:		the start pin within the range
- * @e_pin:		the end pin within the range
- * @s_addr:		the start address for the range
- * @x_addrs:		the address distance between two consecutive registers
- *			within the range
- * @s_bit:		the start bit for the first register within the range
- * @x_bits:		the bit distance between two consecutive pins within
- *			the range
- */
-struct mtk_pin_field_calc {
-	u16 s_pin;
-	u16 e_pin;
-	u32 s_addr;
-	u8  x_addrs;
-	u8  s_bit;
-	u8  x_bits;
-};
-
-/* struct mtk_pin_reg_calc - the structure that holds all ranges used to
- *			     determine which register the pin would make use of
- *			     for certain pin attribute.
- * @range:		     the start address for the range
- * @nranges:		     the number of items in the range
- */
-struct mtk_pin_reg_calc {
-	const struct mtk_pin_field_calc *range;
-	unsigned int nranges;
-};
-
-/* struct mtk_pin_soc - the structure that holds SoC-specific data */
-struct mtk_pin_soc {
-	const struct mtk_pin_reg_calc	*reg_cal;
-	const struct pinctrl_pin_desc	*pins;
-	unsigned int			npins;
-	const struct group_desc		*grps;
-	unsigned int			ngrps;
-	const struct function_desc	*funcs;
-	unsigned int			nfuncs;
-	const struct mtk_eint_regs	*eint_regs;
-	const struct mtk_eint_hw	*eint_hw;
-};
-
-struct mtk_pinctrl {
-	struct pinctrl_dev		*pctrl;
-	void __iomem			*base;
-	struct device			*dev;
-	struct gpio_chip		chip;
-	const struct mtk_pin_soc	*soc;
-	struct mtk_eint			*eint;
-};
+#define MT7622_PIN(_number, _name)					\
+	MTK_PIN(_number, _name, 1, _number, DRV_GRP0)
 
 static const struct mtk_pin_field_calc mt7622_pin_mode_range[] = {
-	{0, 0, 0x320, 0x10, 16, 4},
-	{1, 4, 0x3a0, 0x10,  16, 4},
-	{5, 5, 0x320, 0x10,  0, 4},
-	{6, 6, 0x300, 0x10,  4, 4},
-	{7, 7, 0x300, 0x10,  4, 4},
-	{8, 9, 0x350, 0x10,  20, 4},
-	{10, 10, 0x300, 0x10, 8, 4},
-	{11, 11, 0x300, 0x10, 8, 4},
-	{12, 12, 0x300, 0x10, 8, 4},
-	{13, 13, 0x300, 0x10, 8, 4},
-	{14, 15, 0x320, 0x10, 4, 4},
-	{16, 17, 0x320, 0x10, 20, 4},
-	{18, 21, 0x310, 0x10, 16, 4},
-	{22, 22, 0x380, 0x10, 16, 4},
-	{23, 23, 0x300,	0x10, 24, 4},
-	{24, 24, 0x300, 0x10, 24, 4},
-	{25, 25, 0x300, 0x10, 12, 4},
-	{25, 25, 0x300, 0x10, 12, 4},
-	{26, 26, 0x300, 0x10, 12, 4},
-	{27, 27, 0x300, 0x10, 12, 4},
-	{28, 28, 0x300, 0x10, 12, 4},
-	{29, 29, 0x300, 0x10, 12, 4},
-	{30, 30, 0x300, 0x10, 12, 4},
-	{31, 31, 0x300, 0x10, 12, 4},
-	{32, 32, 0x300, 0x10, 12, 4},
-	{33, 33, 0x300,	0x10, 12, 4},
-	{34, 34, 0x300,	0x10, 12, 4},
-	{35, 35, 0x300,	0x10, 12, 4},
-	{36, 36, 0x300, 0x10, 12, 4},
-	{37, 37, 0x300, 0x10, 20, 4},
-	{38, 38, 0x300, 0x10, 20, 4},
-	{39, 39, 0x300, 0x10, 20, 4},
-	{40, 40, 0x300, 0x10, 20, 4},
-	{41, 41, 0x300,	0x10, 20, 4},
-	{42, 42, 0x300, 0x10, 20, 4},
-	{43, 43, 0x300,	0x10, 20, 4},
-	{44, 44, 0x300, 0x10, 20, 4},
-	{45, 46, 0x300, 0x10, 20, 4},
-	{47, 47, 0x300,	0x10, 20, 4},
-	{48, 48, 0x300, 0x10, 20, 4},
-	{49, 49, 0x300, 0x10, 20, 4},
-	{50, 50, 0x300, 0x10, 20, 4},
-	{51, 70, 0x330, 0x10, 4, 4},
-	{71, 71, 0x300, 0x10, 16, 4},
-	{72, 72, 0x300, 0x10, 16, 4},
-	{73, 76, 0x310, 0x10, 0, 4},
-	{77, 77, 0x320, 0x10, 28, 4},
-	{78, 78, 0x320, 0x10, 12, 4},
-	{79, 82, 0x3a0, 0x10, 0, 4},
-	{83, 83, 0x350,	0x10, 28, 4},
-	{84, 84, 0x330, 0x10, 0, 4},
-	{85, 90, 0x360, 0x10, 4, 4},
-	{91, 94, 0x390, 0x10, 16, 4},
-	{95, 97, 0x380, 0x10, 20, 4},
-	{98, 101, 0x390, 0x10, 0, 4},
-	{102, 102, 0x360, 0x10, 0, 4},
+	PIN_FIELD(0, 0, 0x320, 0x10, 16, 4),
+	PIN_FIELD(1, 4, 0x3a0, 0x10, 16, 4),
+	PIN_FIELD(5, 5, 0x320, 0x10, 0, 4),
+	PINS_FIELD(6, 7, 0x300, 0x10, 4, 4),
+	PIN_FIELD(8, 9, 0x350, 0x10, 20, 4),
+	PINS_FIELD(10, 13, 0x300, 0x10, 8, 4),
+	PIN_FIELD(14, 15, 0x320, 0x10, 4, 4),
+	PIN_FIELD(16, 17, 0x320, 0x10, 20, 4),
+	PIN_FIELD(18, 21, 0x310, 0x10, 16, 4),
+	PIN_FIELD(22, 22, 0x380, 0x10, 16, 4),
+	PINS_FIELD(23, 24, 0x300, 0x10, 24, 4),
+	PINS_FIELD(25, 36, 0x300, 0x10, 12, 4),
+	PINS_FIELD(37, 50, 0x300, 0x10, 20, 4),
+	PIN_FIELD(51, 70, 0x330, 0x10, 4, 4),
+	PINS_FIELD(71, 72, 0x300, 0x10, 16, 4),
+	PIN_FIELD(73, 76, 0x310, 0x10, 0, 4),
+	PIN_FIELD(77, 77, 0x320, 0x10, 28, 4),
+	PIN_FIELD(78, 78, 0x320, 0x10, 12, 4),
+	PIN_FIELD(79, 82, 0x3a0, 0x10, 0, 4),
+	PIN_FIELD(83, 83, 0x350, 0x10, 28, 4),
+	PIN_FIELD(84, 84, 0x330, 0x10, 0, 4),
+	PIN_FIELD(85, 90, 0x360, 0x10, 4, 4),
+	PIN_FIELD(91, 94, 0x390, 0x10, 16, 4),
+	PIN_FIELD(95, 97, 0x380, 0x10, 20, 4),
+	PIN_FIELD(98, 101, 0x390, 0x10, 0, 4),
+	PIN_FIELD(102, 102, 0x360, 0x10, 0, 4),
 };
 
 static const struct mtk_pin_field_calc mt7622_pin_dir_range[] = {
-	{0, 102, 0x0, 0x10, 0, 1},
+	PIN_FIELD(0, 102, 0x0, 0x10, 0, 1),
 };
 
 static const struct mtk_pin_field_calc mt7622_pin_di_range[] = {
-	{0, 102, 0x200, 0x10, 0, 1},
+	PIN_FIELD(0, 102, 0x200, 0x10, 0, 1),
 };
 
 static const struct mtk_pin_field_calc mt7622_pin_do_range[] = {
-	{0, 102, 0x100, 0x10, 0, 1},
+	PIN_FIELD(0, 102, 0x100, 0x10, 0, 1),
 };
 
 static const struct mtk_pin_field_calc mt7622_pin_sr_range[] = {
-	{0, 31, 0x910, 0x10, 0, 1},
-	{32, 50, 0xa10, 0x10, 0, 1},
-	{51, 70, 0x810, 0x10, 0, 1},
-	{71, 72, 0xb10, 0x10, 0, 1},
-	{73, 86, 0xb10, 0x10, 4, 1},
-	{87, 90, 0xc10, 0x10, 0, 1},
-	{91, 102, 0xb10, 0x10, 18, 1},
+	PIN_FIELD(0, 31, 0x910, 0x10, 0, 1),
+	PIN_FIELD(32, 50, 0xa10, 0x10, 0, 1),
+	PIN_FIELD(51, 70, 0x810, 0x10, 0, 1),
+	PIN_FIELD(71, 72, 0xb10, 0x10, 0, 1),
+	PIN_FIELD(73, 86, 0xb10, 0x10, 4, 1),
+	PIN_FIELD(87, 90, 0xc10, 0x10, 0, 1),
+	PIN_FIELD(91, 102, 0xb10, 0x10, 18, 1),
 };
 
 static const struct mtk_pin_field_calc mt7622_pin_smt_range[] = {
-	{0, 31, 0x920, 0x10, 0, 1},
-	{32, 50, 0xa20, 0x10, 0, 1},
-	{51, 70, 0x820, 0x10, 0, 1},
-	{71, 72, 0xb20, 0x10, 0, 1},
-	{73, 86, 0xb20, 0x10, 4, 1},
-	{87, 90, 0xc20, 0x10, 0, 1},
-	{91, 102, 0xb20, 0x10, 18, 1},
+	PIN_FIELD(0, 31, 0x920, 0x10, 0, 1),
+	PIN_FIELD(32, 50, 0xa20, 0x10, 0, 1),
+	PIN_FIELD(51, 70, 0x820, 0x10, 0, 1),
+	PIN_FIELD(71, 72, 0xb20, 0x10, 0, 1),
+	PIN_FIELD(73, 86, 0xb20, 0x10, 4, 1),
+	PIN_FIELD(87, 90, 0xc20, 0x10, 0, 1),
+	PIN_FIELD(91, 102, 0xb20, 0x10, 18, 1),
 };
 
 static const struct mtk_pin_field_calc mt7622_pin_pu_range[] = {
-	{0, 31, 0x930, 0x10, 0, 1},
-	{32, 50, 0xa30, 0x10, 0, 1},
-	{51, 70, 0x830, 0x10, 0, 1},
-	{71, 72, 0xb30, 0x10, 0, 1},
-	{73, 86, 0xb30, 0x10, 4, 1},
-	{87, 90, 0xc30, 0x10, 0, 1},
-	{91, 102, 0xb30, 0x10, 18, 1},
+	PIN_FIELD(0, 31, 0x930, 0x10, 0, 1),
+	PIN_FIELD(32, 50, 0xa30, 0x10, 0, 1),
+	PIN_FIELD(51, 70, 0x830, 0x10, 0, 1),
+	PIN_FIELD(71, 72, 0xb30, 0x10, 0, 1),
+	PIN_FIELD(73, 86, 0xb30, 0x10, 4, 1),
+	PIN_FIELD(87, 90, 0xc30, 0x10, 0, 1),
+	PIN_FIELD(91, 102, 0xb30, 0x10, 18, 1),
 };
 
 static const struct mtk_pin_field_calc mt7622_pin_pd_range[] = {
-	{0, 31, 0x940, 0x10, 0, 1},
-	{32, 50, 0xa40, 0x10, 0, 1},
-	{51, 70, 0x840, 0x10, 0, 1},
-	{71, 72, 0xb40, 0x10, 0, 1},
-	{73, 86, 0xb40, 0x10, 4, 1},
-	{87, 90, 0xc40, 0x10, 0, 1},
-	{91, 102, 0xb40, 0x10, 18, 1},
+	PIN_FIELD(0, 31, 0x940, 0x10, 0, 1),
+	PIN_FIELD(32, 50, 0xa40, 0x10, 0, 1),
+	PIN_FIELD(51, 70, 0x840, 0x10, 0, 1),
+	PIN_FIELD(71, 72, 0xb40, 0x10, 0, 1),
+	PIN_FIELD(73, 86, 0xb40, 0x10, 4, 1),
+	PIN_FIELD(87, 90, 0xc40, 0x10, 0, 1),
+	PIN_FIELD(91, 102, 0xb40, 0x10, 18, 1),
 };
 
 static const struct mtk_pin_field_calc mt7622_pin_e4_range[] = {
-	{0, 31, 0x960, 0x10, 0, 1},
-	{32, 50, 0xa60, 0x10, 0, 1},
-	{51, 70, 0x860, 0x10, 0, 1},
-	{71, 72, 0xb60, 0x10, 0, 1},
-	{73, 86, 0xb60, 0x10, 4, 1},
-	{87, 90, 0xc60, 0x10, 0, 1},
-	{91, 102, 0xb60, 0x10, 18, 1},
+	PIN_FIELD(0, 31, 0x960, 0x10, 0, 1),
+	PIN_FIELD(32, 50, 0xa60, 0x10, 0, 1),
+	PIN_FIELD(51, 70, 0x860, 0x10, 0, 1),
+	PIN_FIELD(71, 72, 0xb60, 0x10, 0, 1),
+	PIN_FIELD(73, 86, 0xb60, 0x10, 4, 1),
+	PIN_FIELD(87, 90, 0xc60, 0x10, 0, 1),
+	PIN_FIELD(91, 102, 0xb60, 0x10, 18, 1),
 };
 
 static const struct mtk_pin_field_calc mt7622_pin_e8_range[] = {
-	{0, 31, 0x970, 0x10, 0, 1},
-	{32, 50, 0xa70, 0x10, 0, 1},
-	{51, 70, 0x870, 0x10, 0, 1},
-	{71, 72, 0xb70, 0x10, 0, 1},
-	{73, 86, 0xb70, 0x10, 4, 1},
-	{87, 90, 0xc70, 0x10, 0, 1},
-	{91, 102, 0xb70, 0x10, 18, 1},
+	PIN_FIELD(0, 31, 0x970, 0x10, 0, 1),
+	PIN_FIELD(32, 50, 0xa70, 0x10, 0, 1),
+	PIN_FIELD(51, 70, 0x870, 0x10, 0, 1),
+	PIN_FIELD(71, 72, 0xb70, 0x10, 0, 1),
+	PIN_FIELD(73, 86, 0xb70, 0x10, 4, 1),
+	PIN_FIELD(87, 90, 0xc70, 0x10, 0, 1),
+	PIN_FIELD(91, 102, 0xb70, 0x10, 18, 1),
 };
 
 static const struct mtk_pin_field_calc mt7622_pin_tdsel_range[] = {
-	{0, 31, 0x980, 0x4, 0, 4},
-	{32, 50, 0xa80, 0x4, 0, 4},
-	{51, 70, 0x880, 0x4, 0, 4},
-	{71, 72, 0xb80, 0x4, 0, 4},
-	{73, 86, 0xb80, 0x4, 16, 4},
-	{87, 90, 0xc80, 0x4, 0, 4},
-	{91, 102, 0xb88, 0x4, 8, 4},
+	PIN_FIELD(0, 31, 0x980, 0x4, 0, 4),
+	PIN_FIELD(32, 50, 0xa80, 0x4, 0, 4),
+	PIN_FIELD(51, 70, 0x880, 0x4, 0, 4),
+	PIN_FIELD(71, 72, 0xb80, 0x4, 0, 4),
+	PIN_FIELD(73, 86, 0xb80, 0x4, 16, 4),
+	PIN_FIELD(87, 90, 0xc80, 0x4, 0, 4),
+	PIN_FIELD(91, 102, 0xb88, 0x4, 8, 4),
 };
 
 static const struct mtk_pin_field_calc mt7622_pin_rdsel_range[] = {
-	{0, 31, 0x990, 0x4, 0, 6},
-	{32, 50, 0xa90, 0x4, 0, 6},
-	{51, 58, 0x890, 0x4, 0, 6},
-	{59, 60, 0x894, 0x4, 28, 6},
-	{61, 62, 0x894, 0x4, 16, 6},
-	{63, 66, 0x898, 0x4, 8, 6},
-	{67, 68, 0x89c, 0x4, 12, 6},
-	{69, 70, 0x89c, 0x4, 0, 6},
-	{71, 72, 0xb90, 0x4, 0, 6},
-	{73, 86, 0xb90, 0x4, 24, 6},
-	{87, 90, 0xc90, 0x4, 0, 6},
-	{91, 102, 0xb9c, 0x4, 12, 6},
+	PIN_FIELD(0, 31, 0x990, 0x4, 0, 6),
+	PIN_FIELD(32, 50, 0xa90, 0x4, 0, 6),
+	PIN_FIELD(51, 58, 0x890, 0x4, 0, 6),
+	PIN_FIELD(59, 60, 0x894, 0x4, 28, 6),
+	PIN_FIELD(61, 62, 0x894, 0x4, 16, 6),
+	PIN_FIELD(63, 66, 0x898, 0x4, 8, 6),
+	PIN_FIELD(67, 68, 0x89c, 0x4, 12, 6),
+	PIN_FIELD(69, 70, 0x89c, 0x4, 0, 6),
+	PIN_FIELD(71, 72, 0xb90, 0x4, 0, 6),
+	PIN_FIELD(73, 86, 0xb90, 0x4, 24, 6),
+	PIN_FIELD(87, 90, 0xc90, 0x4, 0, 6),
+	PIN_FIELD(91, 102, 0xb9c, 0x4, 12, 6),
 };
 
 static const struct mtk_pin_reg_calc mt7622_reg_cals[PINCTRL_PIN_REG_MAX] = {
@@ -309,110 +152,110 @@
 	[PINCTRL_PIN_REG_RDSEL] = MTK_RANGE(mt7622_pin_rdsel_range),
 };
 
-static const struct pinctrl_pin_desc mt7622_pins[] = {
-	PINCTRL_PIN(0, "GPIO_A"),
-	PINCTRL_PIN(1, "I2S1_IN"),
-	PINCTRL_PIN(2, "I2S1_OUT"),
-	PINCTRL_PIN(3, "I2S_BCLK"),
-	PINCTRL_PIN(4, "I2S_WS"),
-	PINCTRL_PIN(5, "I2S_MCLK"),
-	PINCTRL_PIN(6, "TXD0"),
-	PINCTRL_PIN(7, "RXD0"),
-	PINCTRL_PIN(8, "SPI_WP"),
-	PINCTRL_PIN(9, "SPI_HOLD"),
-	PINCTRL_PIN(10, "SPI_CLK"),
-	PINCTRL_PIN(11, "SPI_MOSI"),
-	PINCTRL_PIN(12, "SPI_MISO"),
-	PINCTRL_PIN(13, "SPI_CS"),
-	PINCTRL_PIN(14, "I2C_SDA"),
-	PINCTRL_PIN(15, "I2C_SCL"),
-	PINCTRL_PIN(16, "I2S2_IN"),
-	PINCTRL_PIN(17, "I2S3_IN"),
-	PINCTRL_PIN(18, "I2S4_IN"),
-	PINCTRL_PIN(19, "I2S2_OUT"),
-	PINCTRL_PIN(20, "I2S3_OUT"),
-	PINCTRL_PIN(21, "I2S4_OUT"),
-	PINCTRL_PIN(22, "GPIO_B"),
-	PINCTRL_PIN(23, "MDC"),
-	PINCTRL_PIN(24, "MDIO"),
-	PINCTRL_PIN(25, "G2_TXD0"),
-	PINCTRL_PIN(26, "G2_TXD1"),
-	PINCTRL_PIN(27, "G2_TXD2"),
-	PINCTRL_PIN(28, "G2_TXD3"),
-	PINCTRL_PIN(29, "G2_TXEN"),
-	PINCTRL_PIN(30, "G2_TXC"),
-	PINCTRL_PIN(31, "G2_RXD0"),
-	PINCTRL_PIN(32, "G2_RXD1"),
-	PINCTRL_PIN(33, "G2_RXD2"),
-	PINCTRL_PIN(34, "G2_RXD3"),
-	PINCTRL_PIN(35, "G2_RXDV"),
-	PINCTRL_PIN(36, "G2_RXC"),
-	PINCTRL_PIN(37, "NCEB"),
-	PINCTRL_PIN(38, "NWEB"),
-	PINCTRL_PIN(39, "NREB"),
-	PINCTRL_PIN(40, "NDL4"),
-	PINCTRL_PIN(41, "NDL5"),
-	PINCTRL_PIN(42, "NDL6"),
-	PINCTRL_PIN(43, "NDL7"),
-	PINCTRL_PIN(44, "NRB"),
-	PINCTRL_PIN(45, "NCLE"),
-	PINCTRL_PIN(46, "NALE"),
-	PINCTRL_PIN(47, "NDL0"),
-	PINCTRL_PIN(48, "NDL1"),
-	PINCTRL_PIN(49, "NDL2"),
-	PINCTRL_PIN(50, "NDL3"),
-	PINCTRL_PIN(51, "MDI_TP_P0"),
-	PINCTRL_PIN(52, "MDI_TN_P0"),
-	PINCTRL_PIN(53, "MDI_RP_P0"),
-	PINCTRL_PIN(54, "MDI_RN_P0"),
-	PINCTRL_PIN(55, "MDI_TP_P1"),
-	PINCTRL_PIN(56, "MDI_TN_P1"),
-	PINCTRL_PIN(57, "MDI_RP_P1"),
-	PINCTRL_PIN(58, "MDI_RN_P1"),
-	PINCTRL_PIN(59, "MDI_RP_P2"),
-	PINCTRL_PIN(60, "MDI_RN_P2"),
-	PINCTRL_PIN(61, "MDI_TP_P2"),
-	PINCTRL_PIN(62, "MDI_TN_P2"),
-	PINCTRL_PIN(63, "MDI_TP_P3"),
-	PINCTRL_PIN(64, "MDI_TN_P3"),
-	PINCTRL_PIN(65, "MDI_RP_P3"),
-	PINCTRL_PIN(66, "MDI_RN_P3"),
-	PINCTRL_PIN(67, "MDI_RP_P4"),
-	PINCTRL_PIN(68, "MDI_RN_P4"),
-	PINCTRL_PIN(69, "MDI_TP_P4"),
-	PINCTRL_PIN(70, "MDI_TN_P4"),
-	PINCTRL_PIN(71, "PMIC_SCL"),
-	PINCTRL_PIN(72, "PMIC_SDA"),
-	PINCTRL_PIN(73, "SPIC1_CLK"),
-	PINCTRL_PIN(74, "SPIC1_MOSI"),
-	PINCTRL_PIN(75, "SPIC1_MISO"),
-	PINCTRL_PIN(76, "SPIC1_CS"),
-	PINCTRL_PIN(77, "GPIO_D"),
-	PINCTRL_PIN(78, "WATCHDOG"),
-	PINCTRL_PIN(79, "RTS3_N"),
-	PINCTRL_PIN(80, "CTS3_N"),
-	PINCTRL_PIN(81, "TXD3"),
-	PINCTRL_PIN(82, "RXD3"),
-	PINCTRL_PIN(83, "PERST0_N"),
-	PINCTRL_PIN(84, "PERST1_N"),
-	PINCTRL_PIN(85, "WLED_N"),
-	PINCTRL_PIN(86, "EPHY_LED0_N"),
-	PINCTRL_PIN(87, "AUXIN0"),
-	PINCTRL_PIN(88, "AUXIN1"),
-	PINCTRL_PIN(89, "AUXIN2"),
-	PINCTRL_PIN(90, "AUXIN3"),
-	PINCTRL_PIN(91, "TXD4"),
-	PINCTRL_PIN(92, "RXD4"),
-	PINCTRL_PIN(93, "RTS4_N"),
-	PINCTRL_PIN(94, "CTS4_N"),
-	PINCTRL_PIN(95, "PWM1"),
-	PINCTRL_PIN(96, "PWM2"),
-	PINCTRL_PIN(97, "PWM3"),
-	PINCTRL_PIN(98, "PWM4"),
-	PINCTRL_PIN(99, "PWM5"),
-	PINCTRL_PIN(100, "PWM6"),
-	PINCTRL_PIN(101, "PWM7"),
-	PINCTRL_PIN(102, "GPIO_E"),
+static const struct mtk_pin_desc mt7622_pins[] = {
+	MT7622_PIN(0, "GPIO_A"),
+	MT7622_PIN(1, "I2S1_IN"),
+	MT7622_PIN(2, "I2S1_OUT"),
+	MT7622_PIN(3, "I2S_BCLK"),
+	MT7622_PIN(4, "I2S_WS"),
+	MT7622_PIN(5, "I2S_MCLK"),
+	MT7622_PIN(6, "TXD0"),
+	MT7622_PIN(7, "RXD0"),
+	MT7622_PIN(8, "SPI_WP"),
+	MT7622_PIN(9, "SPI_HOLD"),
+	MT7622_PIN(10, "SPI_CLK"),
+	MT7622_PIN(11, "SPI_MOSI"),
+	MT7622_PIN(12, "SPI_MISO"),
+	MT7622_PIN(13, "SPI_CS"),
+	MT7622_PIN(14, "I2C_SDA"),
+	MT7622_PIN(15, "I2C_SCL"),
+	MT7622_PIN(16, "I2S2_IN"),
+	MT7622_PIN(17, "I2S3_IN"),
+	MT7622_PIN(18, "I2S4_IN"),
+	MT7622_PIN(19, "I2S2_OUT"),
+	MT7622_PIN(20, "I2S3_OUT"),
+	MT7622_PIN(21, "I2S4_OUT"),
+	MT7622_PIN(22, "GPIO_B"),
+	MT7622_PIN(23, "MDC"),
+	MT7622_PIN(24, "MDIO"),
+	MT7622_PIN(25, "G2_TXD0"),
+	MT7622_PIN(26, "G2_TXD1"),
+	MT7622_PIN(27, "G2_TXD2"),
+	MT7622_PIN(28, "G2_TXD3"),
+	MT7622_PIN(29, "G2_TXEN"),
+	MT7622_PIN(30, "G2_TXC"),
+	MT7622_PIN(31, "G2_RXD0"),
+	MT7622_PIN(32, "G2_RXD1"),
+	MT7622_PIN(33, "G2_RXD2"),
+	MT7622_PIN(34, "G2_RXD3"),
+	MT7622_PIN(35, "G2_RXDV"),
+	MT7622_PIN(36, "G2_RXC"),
+	MT7622_PIN(37, "NCEB"),
+	MT7622_PIN(38, "NWEB"),
+	MT7622_PIN(39, "NREB"),
+	MT7622_PIN(40, "NDL4"),
+	MT7622_PIN(41, "NDL5"),
+	MT7622_PIN(42, "NDL6"),
+	MT7622_PIN(43, "NDL7"),
+	MT7622_PIN(44, "NRB"),
+	MT7622_PIN(45, "NCLE"),
+	MT7622_PIN(46, "NALE"),
+	MT7622_PIN(47, "NDL0"),
+	MT7622_PIN(48, "NDL1"),
+	MT7622_PIN(49, "NDL2"),
+	MT7622_PIN(50, "NDL3"),
+	MT7622_PIN(51, "MDI_TP_P0"),
+	MT7622_PIN(52, "MDI_TN_P0"),
+	MT7622_PIN(53, "MDI_RP_P0"),
+	MT7622_PIN(54, "MDI_RN_P0"),
+	MT7622_PIN(55, "MDI_TP_P1"),
+	MT7622_PIN(56, "MDI_TN_P1"),
+	MT7622_PIN(57, "MDI_RP_P1"),
+	MT7622_PIN(58, "MDI_RN_P1"),
+	MT7622_PIN(59, "MDI_RP_P2"),
+	MT7622_PIN(60, "MDI_RN_P2"),
+	MT7622_PIN(61, "MDI_TP_P2"),
+	MT7622_PIN(62, "MDI_TN_P2"),
+	MT7622_PIN(63, "MDI_TP_P3"),
+	MT7622_PIN(64, "MDI_TN_P3"),
+	MT7622_PIN(65, "MDI_RP_P3"),
+	MT7622_PIN(66, "MDI_RN_P3"),
+	MT7622_PIN(67, "MDI_RP_P4"),
+	MT7622_PIN(68, "MDI_RN_P4"),
+	MT7622_PIN(69, "MDI_TP_P4"),
+	MT7622_PIN(70, "MDI_TN_P4"),
+	MT7622_PIN(71, "PMIC_SCL"),
+	MT7622_PIN(72, "PMIC_SDA"),
+	MT7622_PIN(73, "SPIC1_CLK"),
+	MT7622_PIN(74, "SPIC1_MOSI"),
+	MT7622_PIN(75, "SPIC1_MISO"),
+	MT7622_PIN(76, "SPIC1_CS"),
+	MT7622_PIN(77, "GPIO_D"),
+	MT7622_PIN(78, "WATCHDOG"),
+	MT7622_PIN(79, "RTS3_N"),
+	MT7622_PIN(80, "CTS3_N"),
+	MT7622_PIN(81, "TXD3"),
+	MT7622_PIN(82, "RXD3"),
+	MT7622_PIN(83, "PERST0_N"),
+	MT7622_PIN(84, "PERST1_N"),
+	MT7622_PIN(85, "WLED_N"),
+	MT7622_PIN(86, "EPHY_LED0_N"),
+	MT7622_PIN(87, "AUXIN0"),
+	MT7622_PIN(88, "AUXIN1"),
+	MT7622_PIN(89, "AUXIN2"),
+	MT7622_PIN(90, "AUXIN3"),
+	MT7622_PIN(91, "TXD4"),
+	MT7622_PIN(92, "RXD4"),
+	MT7622_PIN(93, "RTS4_N"),
+	MT7622_PIN(94, "CTS4_N"),
+	MT7622_PIN(95, "PWM1"),
+	MT7622_PIN(96, "PWM2"),
+	MT7622_PIN(97, "PWM3"),
+	MT7622_PIN(98, "PWM4"),
+	MT7622_PIN(99, "PWM5"),
+	MT7622_PIN(100, "PWM6"),
+	MT7622_PIN(101, "PWM7"),
+	MT7622_PIN(102, "GPIO_E"),
 };
 
 /* List all groups consisting of these pins dedicated to the enablement of
@@ -906,18 +749,6 @@
 	{"watchdog", mt7622_wdt_groups, ARRAY_SIZE(mt7622_wdt_groups)},
 };
 
-static const struct pinconf_generic_params mtk_custom_bindings[] = {
-	{"mediatek,tdsel",	MTK_PIN_CONFIG_TDSEL,		0},
-	{"mediatek,rdsel",	MTK_PIN_CONFIG_RDSEL,		0},
-};
-
-#ifdef CONFIG_DEBUG_FS
-static const struct pin_config_item mtk_conf_items[] = {
-	PCONFDUMP(MTK_PIN_CONFIG_TDSEL, "tdsel", NULL, true),
-	PCONFDUMP(MTK_PIN_CONFIG_RDSEL, "rdsel", NULL, true),
-};
-#endif
-
 static const struct mtk_eint_hw mt7622_eint_hw = {
 	.port_mask = 7,
 	.ports     = 7,
@@ -934,830 +765,38 @@
 	.funcs = mt7622_functions,
 	.nfuncs = ARRAY_SIZE(mt7622_functions),
 	.eint_hw = &mt7622_eint_hw,
+	.gpio_m	= 1,
+	.ies_present = false,
+	.base_names = mtk_default_register_base_names,
+	.nbase_names = ARRAY_SIZE(mtk_default_register_base_names),
+	.bias_disable_set = mtk_pinconf_bias_disable_set,
+	.bias_disable_get = mtk_pinconf_bias_disable_get,
+	.bias_set = mtk_pinconf_bias_set,
+	.bias_get = mtk_pinconf_bias_get,
+	.drive_set = mtk_pinconf_drive_set,
+	.drive_get = mtk_pinconf_drive_get,
 };
 
-static void mtk_w32(struct mtk_pinctrl *pctl, u32 reg, u32 val)
-{
-	writel_relaxed(val, pctl->base + reg);
-}
-
-static u32 mtk_r32(struct mtk_pinctrl *pctl, u32 reg)
-{
-	return readl_relaxed(pctl->base + reg);
-}
-
-static void mtk_rmw(struct mtk_pinctrl *pctl, u32 reg, u32 mask, u32 set)
-{
-	u32 val;
-
-	val = mtk_r32(pctl, reg);
-	val &= ~mask;
-	val |= set;
-	mtk_w32(pctl, reg, val);
-}
-
-static int mtk_hw_pin_field_lookup(struct mtk_pinctrl *hw, int pin,
-				   const struct mtk_pin_reg_calc *rc,
-				   struct mtk_pin_field *pfd)
-{
-	const struct mtk_pin_field_calc *c, *e;
-	u32 bits;
-
-	c = rc->range;
-	e = c + rc->nranges;
-
-	while (c < e) {
-		if (pin >= c->s_pin && pin <= c->e_pin)
-			break;
-		c++;
-	}
-
-	if (c >= e) {
-		dev_err(hw->dev, "Out of range for pin = %d\n", pin);
-		return -EINVAL;
-	}
-
-	/* Caculated bits as the overall offset the pin is located at */
-	bits = c->s_bit + (pin - c->s_pin) * (c->x_bits);
-
-	/* Fill pfd from bits and 32-bit register applied is assumed */
-	pfd->offset = c->s_addr + c->x_addrs * (bits / 32);
-	pfd->bitpos = bits % 32;
-	pfd->mask = (1 << c->x_bits) - 1;
-
-	/* pfd->next is used for indicating that bit wrapping-around happens
-	 * which requires the manipulation for bit 0 starting in the next
-	 * register to form the complete field read/write.
-	 */
-	pfd->next = pfd->bitpos + c->x_bits - 1 > 31 ? c->x_addrs : 0;
-
-	return 0;
-}
-
-static int mtk_hw_pin_field_get(struct mtk_pinctrl *hw, int pin,
-				int field, struct mtk_pin_field *pfd)
-{
-	const struct mtk_pin_reg_calc *rc;
-
-	if (field < 0 || field >= PINCTRL_PIN_REG_MAX) {
-		dev_err(hw->dev, "Invalid Field %d\n", field);
-		return -EINVAL;
-	}
-
-	if (hw->soc->reg_cal && hw->soc->reg_cal[field].range) {
-		rc = &hw->soc->reg_cal[field];
-	} else {
-		dev_err(hw->dev, "Undefined range for field %d\n", field);
-		return -EINVAL;
-	}
-
-	return mtk_hw_pin_field_lookup(hw, pin, rc, pfd);
-}
-
-static void mtk_hw_bits_part(struct mtk_pin_field *pf, int *h, int *l)
-{
-	*l = 32 - pf->bitpos;
-	*h = get_count_order(pf->mask) - *l;
-}
-
-static void mtk_hw_write_cross_field(struct mtk_pinctrl *hw,
-				     struct mtk_pin_field *pf, int value)
-{
-	int nbits_l, nbits_h;
-
-	mtk_hw_bits_part(pf, &nbits_h, &nbits_l);
-
-	mtk_rmw(hw, pf->offset, pf->mask << pf->bitpos,
-		(value & pf->mask) << pf->bitpos);
-
-	mtk_rmw(hw, pf->offset + pf->next, BIT(nbits_h) - 1,
-		(value & pf->mask) >> nbits_l);
-}
-
-static void mtk_hw_read_cross_field(struct mtk_pinctrl *hw,
-				    struct mtk_pin_field *pf, int *value)
-{
-	int nbits_l, nbits_h, h, l;
-
-	mtk_hw_bits_part(pf, &nbits_h, &nbits_l);
-
-	l  = (mtk_r32(hw, pf->offset) >> pf->bitpos) & (BIT(nbits_l) - 1);
-	h  = (mtk_r32(hw, pf->offset + pf->next)) & (BIT(nbits_h) - 1);
-
-	*value = (h << nbits_l) | l;
-}
-
-static int mtk_hw_set_value(struct mtk_pinctrl *hw, int pin, int field,
-			    int value)
-{
-	struct mtk_pin_field pf;
-	int err;
-
-	err = mtk_hw_pin_field_get(hw, pin, field, &pf);
-	if (err)
-		return err;
-
-	if (!pf.next)
-		mtk_rmw(hw, pf.offset, pf.mask << pf.bitpos,
-			(value & pf.mask) << pf.bitpos);
-	else
-		mtk_hw_write_cross_field(hw, &pf, value);
-
-	return 0;
-}
-
-static int mtk_hw_get_value(struct mtk_pinctrl *hw, int pin, int field,
-			    int *value)
-{
-	struct mtk_pin_field pf;
-	int err;
-
-	err = mtk_hw_pin_field_get(hw, pin, field, &pf);
-	if (err)
-		return err;
-
-	if (!pf.next)
-		*value = (mtk_r32(hw, pf.offset) >> pf.bitpos) & pf.mask;
-	else
-		mtk_hw_read_cross_field(hw, &pf, value);
-
-	return 0;
-}
-
-static int mtk_pinmux_set_mux(struct pinctrl_dev *pctldev,
-			      unsigned int selector, unsigned int group)
-{
-	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
-	struct function_desc *func;
-	struct group_desc *grp;
-	int i;
-
-	func = pinmux_generic_get_function(pctldev, selector);
-	if (!func)
-		return -EINVAL;
-
-	grp = pinctrl_generic_get_group(pctldev, group);
-	if (!grp)
-		return -EINVAL;
-
-	dev_dbg(pctldev->dev, "enable function %s group %s\n",
-		func->name, grp->name);
-
-	for (i = 0; i < grp->num_pins; i++) {
-		int *pin_modes = grp->data;
-
-		mtk_hw_set_value(hw, grp->pins[i], PINCTRL_PIN_REG_MODE,
-				 pin_modes[i]);
-	}
-
-	return 0;
-}
-
-static int mtk_pinmux_gpio_request_enable(struct pinctrl_dev *pctldev,
-					  struct pinctrl_gpio_range *range,
-					  unsigned int pin)
-{
-	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
-
-	return mtk_hw_set_value(hw, pin, PINCTRL_PIN_REG_MODE, MTK_GPIO_MODE);
-}
-
-static int mtk_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
-					 struct pinctrl_gpio_range *range,
-					 unsigned int pin, bool input)
-{
-	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
-
-	/* hardware would take 0 as input direction */
-	return mtk_hw_set_value(hw, pin, PINCTRL_PIN_REG_DIR, !input);
-}
-
-static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
-			   unsigned int pin, unsigned long *config)
-{
-	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
-	u32 param = pinconf_to_config_param(*config);
-	int val, val2, err, reg, ret = 1;
-
-	switch (param) {
-	case PIN_CONFIG_BIAS_DISABLE:
-		err = mtk_hw_get_value(hw, pin, PINCTRL_PIN_REG_PU, &val);
-		if (err)
-			return err;
-
-		err = mtk_hw_get_value(hw, pin, PINCTRL_PIN_REG_PD, &val2);
-		if (err)
-			return err;
-
-		if (val || val2)
-			return -EINVAL;
-
-		break;
-	case PIN_CONFIG_BIAS_PULL_UP:
-	case PIN_CONFIG_BIAS_PULL_DOWN:
-	case PIN_CONFIG_SLEW_RATE:
-		reg = (param == PIN_CONFIG_BIAS_PULL_UP) ?
-		      PINCTRL_PIN_REG_PU :
-		      (param == PIN_CONFIG_BIAS_PULL_DOWN) ?
-		      PINCTRL_PIN_REG_PD : PINCTRL_PIN_REG_SR;
-
-		err = mtk_hw_get_value(hw, pin, reg, &val);
-		if (err)
-			return err;
-
-		if (!val)
-			return -EINVAL;
-
-		break;
-	case PIN_CONFIG_INPUT_ENABLE:
-	case PIN_CONFIG_OUTPUT_ENABLE:
-		err = mtk_hw_get_value(hw, pin, PINCTRL_PIN_REG_DIR, &val);
-		if (err)
-			return err;
-
-		/* HW takes input mode as zero; output mode as non-zero */
-		if ((val && param == PIN_CONFIG_INPUT_ENABLE) ||
-		    (!val && param == PIN_CONFIG_OUTPUT_ENABLE))
-			return -EINVAL;
-
-		break;
-	case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
-		err = mtk_hw_get_value(hw, pin, PINCTRL_PIN_REG_DIR, &val);
-		if (err)
-			return err;
-
-		err = mtk_hw_get_value(hw, pin, PINCTRL_PIN_REG_SMT, &val2);
-		if (err)
-			return err;
-
-		if (val || !val2)
-			return -EINVAL;
-
-		break;
-	case PIN_CONFIG_DRIVE_STRENGTH:
-		err = mtk_hw_get_value(hw, pin, PINCTRL_PIN_REG_E4, &val);
-		if (err)
-			return err;
-
-		err = mtk_hw_get_value(hw, pin, PINCTRL_PIN_REG_E8, &val2);
-		if (err)
-			return err;
-
-		/* 4mA when (e8, e4) = (0, 0); 8mA when (e8, e4) = (0, 1)
-		 * 12mA when (e8, e4) = (1, 0); 16mA when (e8, e4) = (1, 1)
-		 */
-		ret = ((val2 << 1) + val + 1) * 4;
-
-		break;
-	case MTK_PIN_CONFIG_TDSEL:
-	case MTK_PIN_CONFIG_RDSEL:
-		reg = (param == MTK_PIN_CONFIG_TDSEL) ?
-		       PINCTRL_PIN_REG_TDSEL : PINCTRL_PIN_REG_RDSEL;
-
-		err = mtk_hw_get_value(hw, pin, reg, &val);
-		if (err)
-			return err;
-
-		ret = val;
-
-		break;
-	default:
-		return -ENOTSUPP;
-	}
-
-	*config = pinconf_to_config_packed(param, ret);
-
-	return 0;
-}
-
-static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
-			   unsigned long *configs, unsigned int num_configs)
-{
-	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
-	u32 reg, param, arg;
-	int cfg, err = 0;
-
-	for (cfg = 0; cfg < num_configs; cfg++) {
-		param = pinconf_to_config_param(configs[cfg]);
-		arg = pinconf_to_config_argument(configs[cfg]);
-
-		switch (param) {
-		case PIN_CONFIG_BIAS_DISABLE:
-		case PIN_CONFIG_BIAS_PULL_UP:
-		case PIN_CONFIG_BIAS_PULL_DOWN:
-			arg = (param == PIN_CONFIG_BIAS_DISABLE) ? 0 :
-			       (param == PIN_CONFIG_BIAS_PULL_UP) ? 1 : 2;
-
-			err = mtk_hw_set_value(hw, pin, PINCTRL_PIN_REG_PU,
-					       arg & 1);
-			if (err)
-				goto err;
-
-			err = mtk_hw_set_value(hw, pin, PINCTRL_PIN_REG_PD,
-					       !!(arg & 2));
-			if (err)
-				goto err;
-			break;
-		case PIN_CONFIG_OUTPUT_ENABLE:
-			err = mtk_hw_set_value(hw, pin, PINCTRL_PIN_REG_SMT,
-					       MTK_DISABLE);
-			if (err)
-				goto err;
-			/* else: fall through */
-		case PIN_CONFIG_INPUT_ENABLE:
-		case PIN_CONFIG_SLEW_RATE:
-			reg = (param == PIN_CONFIG_SLEW_RATE) ?
-			       PINCTRL_PIN_REG_SR : PINCTRL_PIN_REG_DIR;
-
-			arg = (param == PIN_CONFIG_INPUT_ENABLE) ? 0 :
-			      (param == PIN_CONFIG_OUTPUT_ENABLE) ? 1 : arg;
-			err = mtk_hw_set_value(hw, pin, reg, arg);
-			if (err)
-				goto err;
-
-			break;
-		case PIN_CONFIG_OUTPUT:
-			err = mtk_hw_set_value(hw, pin, PINCTRL_PIN_REG_DIR,
-					       MTK_OUTPUT);
-			if (err)
-				goto err;
-
-			err = mtk_hw_set_value(hw, pin, PINCTRL_PIN_REG_DO,
-					       arg);
-			if (err)
-				goto err;
-			break;
-		case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
-			/* arg = 1: Input mode & SMT enable ;
-			 * arg = 0: Output mode & SMT disable
-			 */
-			arg = arg ? 2 : 1;
-			err = mtk_hw_set_value(hw, pin, PINCTRL_PIN_REG_DIR,
-					       arg & 1);
-			if (err)
-				goto err;
-
-			err = mtk_hw_set_value(hw, pin, PINCTRL_PIN_REG_SMT,
-					       !!(arg & 2));
-			if (err)
-				goto err;
-			break;
-		case PIN_CONFIG_DRIVE_STRENGTH:
-			/* 4mA when (e8, e4) = (0, 0);
-			 * 8mA when (e8, e4) = (0, 1);
-			 * 12mA when (e8, e4) = (1, 0);
-			 * 16mA when (e8, e4) = (1, 1)
-			 */
-			if (!(arg % 4) && (arg >= 4 && arg <= 16)) {
-				arg = arg / 4 - 1;
-				err = mtk_hw_set_value(hw, pin,
-						       PINCTRL_PIN_REG_E4,
-						       arg & 0x1);
-				if (err)
-					goto err;
-
-				err = mtk_hw_set_value(hw, pin,
-						       PINCTRL_PIN_REG_E8,
-						       (arg & 0x2) >> 1);
-				if (err)
-					goto err;
-			} else {
-				err = -ENOTSUPP;
-			}
-			break;
-		case MTK_PIN_CONFIG_TDSEL:
-		case MTK_PIN_CONFIG_RDSEL:
-			reg = (param == MTK_PIN_CONFIG_TDSEL) ?
-			       PINCTRL_PIN_REG_TDSEL : PINCTRL_PIN_REG_RDSEL;
-
-			err = mtk_hw_set_value(hw, pin, reg, arg);
-			if (err)
-				goto err;
-			break;
-		default:
-			err = -ENOTSUPP;
-		}
-	}
-err:
-	return err;
-}
-
-static int mtk_pinconf_group_get(struct pinctrl_dev *pctldev,
-				 unsigned int group, unsigned long *config)
-{
-	const unsigned int *pins;
-	unsigned int i, npins, old = 0;
-	int ret;
-
-	ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < npins; i++) {
-		if (mtk_pinconf_get(pctldev, pins[i], config))
-			return -ENOTSUPP;
-
-		/* configs do not match between two pins */
-		if (i && old != *config)
-			return -ENOTSUPP;
-
-		old = *config;
-	}
-
-	return 0;
-}
-
-static int mtk_pinconf_group_set(struct pinctrl_dev *pctldev,
-				 unsigned int group, unsigned long *configs,
-				 unsigned int num_configs)
-{
-	const unsigned int *pins;
-	unsigned int i, npins;
-	int ret;
-
-	ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < npins; i++) {
-		ret = mtk_pinconf_set(pctldev, pins[i], configs, num_configs);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static const struct pinctrl_ops mtk_pctlops = {
-	.get_groups_count = pinctrl_generic_get_group_count,
-	.get_group_name = pinctrl_generic_get_group_name,
-	.get_group_pins = pinctrl_generic_get_group_pins,
-	.dt_node_to_map = pinconf_generic_dt_node_to_map_all,
-	.dt_free_map = pinconf_generic_dt_free_map,
-};
-
-static const struct pinmux_ops mtk_pmxops = {
-	.get_functions_count = pinmux_generic_get_function_count,
-	.get_function_name = pinmux_generic_get_function_name,
-	.get_function_groups = pinmux_generic_get_function_groups,
-	.set_mux = mtk_pinmux_set_mux,
-	.gpio_request_enable = mtk_pinmux_gpio_request_enable,
-	.gpio_set_direction = mtk_pinmux_gpio_set_direction,
-	.strict = true,
-};
-
-static const struct pinconf_ops mtk_confops = {
-	.is_generic = true,
-	.pin_config_get = mtk_pinconf_get,
-	.pin_config_set = mtk_pinconf_set,
-	.pin_config_group_get = mtk_pinconf_group_get,
-	.pin_config_group_set = mtk_pinconf_group_set,
-	.pin_config_config_dbg_show = pinconf_generic_dump_config,
-};
-
-static struct pinctrl_desc mtk_desc = {
-	.name = PINCTRL_PINCTRL_DEV,
-	.pctlops = &mtk_pctlops,
-	.pmxops = &mtk_pmxops,
-	.confops = &mtk_confops,
-	.owner = THIS_MODULE,
-};
-
-static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
-{
-	struct mtk_pinctrl *hw = gpiochip_get_data(chip);
-	int value, err;
-
-	err = mtk_hw_get_value(hw, gpio, PINCTRL_PIN_REG_DI, &value);
-	if (err)
-		return err;
-
-	return !!value;
-}
-
-static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
-{
-	struct mtk_pinctrl *hw = gpiochip_get_data(chip);
-
-	mtk_hw_set_value(hw, gpio, PINCTRL_PIN_REG_DO, !!value);
-}
-
-static int mtk_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
-{
-	return pinctrl_gpio_direction_input(chip->base + gpio);
-}
-
-static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
-				     int value)
-{
-	mtk_gpio_set(chip, gpio, value);
-
-	return pinctrl_gpio_direction_output(chip->base + gpio);
-}
-
-static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
-{
-	struct mtk_pinctrl *hw = gpiochip_get_data(chip);
-	unsigned long eint_n;
-
-	if (!hw->eint)
-		return -ENOTSUPP;
-
-	eint_n = offset;
-
-	return mtk_eint_find_irq(hw->eint, eint_n);
-}
-
-static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
-			       unsigned long config)
-{
-	struct mtk_pinctrl *hw = gpiochip_get_data(chip);
-	unsigned long eint_n;
-	u32 debounce;
-
-	if (!hw->eint ||
-	    pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
-		return -ENOTSUPP;
-
-	debounce = pinconf_to_config_argument(config);
-	eint_n = offset;
-
-	return mtk_eint_set_debounce(hw->eint, eint_n, debounce);
-}
-
-static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
-{
-	struct gpio_chip *chip = &hw->chip;
-	int ret;
-
-	chip->label		= PINCTRL_PINCTRL_DEV;
-	chip->parent		= hw->dev;
-	chip->request		= gpiochip_generic_request;
-	chip->free		= gpiochip_generic_free;
-	chip->direction_input	= mtk_gpio_direction_input;
-	chip->direction_output	= mtk_gpio_direction_output;
-	chip->get		= mtk_gpio_get;
-	chip->set		= mtk_gpio_set;
-	chip->to_irq		= mtk_gpio_to_irq,
-	chip->set_config	= mtk_gpio_set_config,
-	chip->base		= -1;
-	chip->ngpio		= hw->soc->npins;
-	chip->of_node		= np;
-	chip->of_gpio_n_cells	= 2;
-
-	ret = gpiochip_add_data(chip, hw);
-	if (ret < 0)
-		return ret;
-
-	/* Just for backward compatible for these old pinctrl nodes without
-	 * "gpio-ranges" property. Otherwise, called directly from a
-	 * DeviceTree-supported pinctrl driver is DEPRECATED.
-	 * Please see Section 2.1 of
-	 * Documentation/devicetree/bindings/gpio/gpio.txt on how to
-	 * bind pinctrl and gpio drivers via the "gpio-ranges" property.
-	 */
-	if (!of_find_property(np, "gpio-ranges", NULL)) {
-		ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0,
-					     chip->ngpio);
-		if (ret < 0) {
-			gpiochip_remove(chip);
-			return ret;
-		}
-	}
-
-	return 0;
-}
-
-static int mtk_build_groups(struct mtk_pinctrl *hw)
-{
-	int err, i;
-
-	for (i = 0; i < hw->soc->ngrps; i++) {
-		const struct group_desc *group = hw->soc->grps + i;
-
-		err = pinctrl_generic_add_group(hw->pctrl, group->name,
-						group->pins, group->num_pins,
-						group->data);
-		if (err < 0) {
-			dev_err(hw->dev, "Failed to register group %s\n",
-				group->name);
-			return err;
-		}
-	}
-
-	return 0;
-}
-
-static int mtk_build_functions(struct mtk_pinctrl *hw)
-{
-	int i, err;
-
-	for (i = 0; i < hw->soc->nfuncs ; i++) {
-		const struct function_desc *func = hw->soc->funcs + i;
-
-		err = pinmux_generic_add_function(hw->pctrl, func->name,
-						  func->group_names,
-						  func->num_group_names,
-						  func->data);
-		if (err < 0) {
-			dev_err(hw->dev, "Failed to register function %s\n",
-				func->name);
-			return err;
-		}
-	}
-
-	return 0;
-}
-
-static int mtk_xt_get_gpio_n(void *data, unsigned long eint_n,
-			     unsigned int *gpio_n,
-			     struct gpio_chip **gpio_chip)
-{
-	struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data;
-
-	*gpio_chip = &hw->chip;
-	*gpio_n = eint_n;
-
-	return 0;
-}
-
-static int mtk_xt_get_gpio_state(void *data, unsigned long eint_n)
-{
-	struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data;
-	struct gpio_chip *gpio_chip;
-	unsigned int gpio_n;
-	int err;
-
-	err = mtk_xt_get_gpio_n(hw, eint_n, &gpio_n, &gpio_chip);
-	if (err)
-		return err;
-
-	return mtk_gpio_get(gpio_chip, gpio_n);
-}
-
-static int mtk_xt_set_gpio_as_eint(void *data, unsigned long eint_n)
-{
-	struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data;
-	struct gpio_chip *gpio_chip;
-	unsigned int gpio_n;
-	int err;
-
-	err = mtk_xt_get_gpio_n(hw, eint_n, &gpio_n, &gpio_chip);
-	if (err)
-		return err;
-
-	err = mtk_hw_set_value(hw, gpio_n, PINCTRL_PIN_REG_MODE,
-			       MTK_GPIO_MODE);
-	if (err)
-		return err;
-
-	err = mtk_hw_set_value(hw, gpio_n, PINCTRL_PIN_REG_DIR, MTK_INPUT);
-	if (err)
-		return err;
-
-	err = mtk_hw_set_value(hw, gpio_n, PINCTRL_PIN_REG_SMT, MTK_ENABLE);
-	if (err)
-		return err;
-
-	return 0;
-}
-
-static const struct mtk_eint_xt mtk_eint_xt = {
-	.get_gpio_n = mtk_xt_get_gpio_n,
-	.get_gpio_state = mtk_xt_get_gpio_state,
-	.set_gpio_as_eint = mtk_xt_set_gpio_as_eint,
-};
-
-static int
-mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev)
-{
-	struct device_node *np = pdev->dev.of_node;
-	struct resource *res;
-
-	if (!IS_ENABLED(CONFIG_EINT_MTK))
-		return 0;
-
-	if (!of_property_read_bool(np, "interrupt-controller"))
-		return -ENODEV;
-
-	hw->eint = devm_kzalloc(hw->dev, sizeof(*hw->eint), GFP_KERNEL);
-	if (!hw->eint)
-		return -ENOMEM;
-
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "eint");
-	if (!res) {
-		dev_err(&pdev->dev, "Unable to get eint resource\n");
-		return -ENODEV;
-	}
-
-	hw->eint->base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(hw->eint->base))
-		return PTR_ERR(hw->eint->base);
-
-	hw->eint->irq = irq_of_parse_and_map(np, 0);
-	if (!hw->eint->irq)
-		return -EINVAL;
-
-	hw->eint->dev = &pdev->dev;
-	hw->eint->hw = hw->soc->eint_hw;
-	hw->eint->pctl = hw;
-	hw->eint->gpio_xlate = &mtk_eint_xt;
-
-	return mtk_eint_do_init(hw->eint);
-}
-
-static const struct of_device_id mtk_pinctrl_of_match[] = {
-	{ .compatible = "mediatek,mt7622-pinctrl", .data = &mt7622_data},
+static const struct of_device_id mt7622_pinctrl_of_match[] = {
+	{ .compatible = "mediatek,mt7622-pinctrl", },
 	{ }
 };
 
-static int mtk_pinctrl_probe(struct platform_device *pdev)
+static int mt7622_pinctrl_probe(struct platform_device *pdev)
 {
-	struct resource *res;
-	struct mtk_pinctrl *hw;
-	const struct of_device_id *of_id =
-		of_match_device(mtk_pinctrl_of_match, &pdev->dev);
-	int err;
-
-	hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
-	if (!hw)
-		return -ENOMEM;
-
-	hw->soc = of_id->data;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "missing IO resource\n");
-		return -ENXIO;
-	}
-
-	hw->dev = &pdev->dev;
-	hw->base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(hw->base))
-		return PTR_ERR(hw->base);
-
-	/* Setup pins descriptions per SoC types */
-	mtk_desc.pins = hw->soc->pins;
-	mtk_desc.npins = hw->soc->npins;
-	mtk_desc.num_custom_params = ARRAY_SIZE(mtk_custom_bindings);
-	mtk_desc.custom_params = mtk_custom_bindings;
-#ifdef CONFIG_DEBUG_FS
-	mtk_desc.custom_conf_items = mtk_conf_items;
-#endif
-
-	err = devm_pinctrl_register_and_init(&pdev->dev, &mtk_desc, hw,
-					     &hw->pctrl);
-	if (err)
-		return err;
-
-	/* Setup groups descriptions per SoC types */
-	err = mtk_build_groups(hw);
-	if (err) {
-		dev_err(&pdev->dev, "Failed to build groups\n");
-		return err;
-	}
-
-	/* Setup functions descriptions per SoC types */
-	err = mtk_build_functions(hw);
-	if (err) {
-		dev_err(&pdev->dev, "Failed to build functions\n");
-		return err;
-	}
-
-	/* For able to make pinctrl_claim_hogs, we must not enable pinctrl
-	 * until all groups and functions are being added one.
-	 */
-	err = pinctrl_enable(hw->pctrl);
-	if (err)
-		return err;
-
-	err = mtk_build_eint(hw, pdev);
-	if (err)
-		dev_warn(&pdev->dev,
-			 "Failed to add EINT, but pinctrl still can work\n");
-
-	/* Build gpiochip should be after pinctrl_enable is done */
-	err = mtk_build_gpiochip(hw, pdev->dev.of_node);
-	if (err) {
-		dev_err(&pdev->dev, "Failed to add gpio_chip\n");
-		return err;
-	}
-
-	platform_set_drvdata(pdev, hw);
-
-	return 0;
+	return mtk_moore_pinctrl_probe(pdev, &mt7622_data);
 }
 
-static struct platform_driver mtk_pinctrl_driver = {
+static struct platform_driver mt7622_pinctrl_driver = {
 	.driver = {
-		.name = "mtk-pinctrl",
-		.of_match_table = mtk_pinctrl_of_match,
+		.name = "mt7622-pinctrl",
+		.of_match_table = mt7622_pinctrl_of_match,
 	},
-	.probe = mtk_pinctrl_probe,
+	.probe = mt7622_pinctrl_probe,
 };
 
-static int __init mtk_pinctrl_init(void)
+static int __init mt7622_pinctrl_init(void)
 {
-	return platform_driver_register(&mtk_pinctrl_driver);
+	return platform_driver_register(&mt7622_pinctrl_driver);
 }
-arch_initcall(mtk_pinctrl_init);
+arch_initcall(mt7622_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7623.c b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
new file mode 100644
index 0000000..b8d9d31
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
@@ -0,0 +1,1441 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The MT7623 driver based on Linux generic pinctrl binding.
+ *
+ * Copyright (C) 2015 - 2018 MediaTek Inc.
+ * Author: Biao Huang <biao.huang@mediatek.com>
+ *	   Ryder Lee <ryder.lee@mediatek.com>
+ *	   Sean Wang <sean.wang@mediatek.com>
+ */
+
+#include "pinctrl-moore.h"
+
+#define PIN_BOND_REG0		0xb10
+#define PIN_BOND_REG1		0xf20
+#define PIN_BOND_REG2		0xef0
+#define BOND_PCIE_CLR		(0x77 << 3)
+#define BOND_I2S_CLR		0x3
+#define BOND_MSDC0E_CLR		0x1
+
+#define PIN_FIELD15(_s_pin, _e_pin, _s_addr, _x_addrs, _s_bit, _x_bits)	\
+	PIN_FIELD_CALC(_s_pin, _e_pin, 0, _s_addr, _x_addrs, _s_bit,	\
+		       _x_bits, 15, false)
+
+#define PIN_FIELD16(_s_pin, _e_pin, _s_addr, _x_addrs, _s_bit, _x_bits)	\
+	PIN_FIELD_CALC(_s_pin, _e_pin, 0, _s_addr, _x_addrs, _s_bit,	\
+		       _x_bits, 16, 0)
+
+#define PINS_FIELD16(_s_pin, _e_pin, _s_addr, _x_addrs, _s_bit, _x_bits)	\
+	PIN_FIELD_CALC(_s_pin, _e_pin, 0, _s_addr, _x_addrs, _s_bit,	\
+		       _x_bits, 16, 1)
+
+#define MT7623_PIN(_number, _name, _eint_n, _drv_grp)			\
+	MTK_PIN(_number, _name, 0, _eint_n, _drv_grp)
+
+static const struct mtk_pin_field_calc mt7623_pin_mode_range[] = {
+	PIN_FIELD15(0, 278, 0x760, 0x10, 0, 3),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_dir_range[] = {
+	PIN_FIELD16(0, 175, 0x0, 0x10, 0, 1),
+	PIN_FIELD16(176, 278, 0xc0, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_di_range[] = {
+	PIN_FIELD16(0, 278, 0x630, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_do_range[] = {
+	PIN_FIELD16(0, 278, 0x500, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_ies_range[] = {
+	PINS_FIELD16(0, 6, 0xb20, 0x10, 0, 1),
+	PINS_FIELD16(7, 9, 0xb20, 0x10, 1, 1),
+	PINS_FIELD16(10, 13, 0xb30, 0x10, 3, 1),
+	PINS_FIELD16(14, 15, 0xb30, 0x10, 13, 1),
+	PINS_FIELD16(16, 17, 0xb40, 0x10, 7, 1),
+	PINS_FIELD16(18, 29, 0xb40, 0x10, 13, 1),
+	PINS_FIELD16(30, 32, 0xb40, 0x10, 7, 1),
+	PINS_FIELD16(33, 37, 0xb40, 0x10, 13, 1),
+	PIN_FIELD16(38, 38, 0xb20, 0x10, 13, 1),
+	PINS_FIELD16(39, 42, 0xb40, 0x10, 13, 1),
+	PINS_FIELD16(43, 45, 0xb20, 0x10, 10, 1),
+	PINS_FIELD16(47, 48, 0xb20, 0x10, 11, 1),
+	PIN_FIELD16(49, 49, 0xb20, 0x10, 12, 1),
+	PINS_FIELD16(50, 52, 0xb20, 0x10, 13, 1),
+	PINS_FIELD16(53, 56, 0xb20, 0x10, 14, 1),
+	PINS_FIELD16(57, 58, 0xb20, 0x10, 15, 1),
+	PIN_FIELD16(59, 59, 0xb30, 0x10, 10, 1),
+	PINS_FIELD16(60, 62, 0xb30, 0x10, 0, 1),
+	PINS_FIELD16(63, 65, 0xb30, 0x10, 1, 1),
+	PINS_FIELD16(66, 71, 0xb30, 0x10, 2, 1),
+	PINS_FIELD16(72, 74, 0xb20, 0x10, 12, 1),
+	PINS_FIELD16(75, 76, 0xb30, 0x10, 3, 1),
+	PINS_FIELD16(77, 78, 0xb30, 0x10, 4, 1),
+	PINS_FIELD16(79, 82, 0xb30, 0x10, 5, 1),
+	PINS_FIELD16(83, 84, 0xb30, 0x10, 2, 1),
+	PIN_FIELD16(85, 85, 0xda0, 0x10, 4, 1),
+	PIN_FIELD16(86, 86, 0xd90, 0x10, 4, 1),
+	PINS_FIELD16(87, 90, 0xdb0, 0x10, 4, 1),
+	PINS_FIELD16(101, 104, 0xb30, 0x10, 6, 1),
+	PIN_FIELD16(105, 105, 0xd40, 0x10, 4, 1),
+	PIN_FIELD16(106, 106, 0xd30, 0x10, 4, 1),
+	PINS_FIELD16(107, 110, 0xd50, 0x10, 4, 1),
+	PINS_FIELD16(111, 115, 0xce0, 0x10, 4, 1),
+	PIN_FIELD16(116, 116, 0xcd0, 0x10, 4, 1),
+	PIN_FIELD16(117, 117, 0xcc0, 0x10, 4, 1),
+	PINS_FIELD16(118, 121, 0xce0, 0x10, 4, 1),
+	PINS_FIELD16(122, 125, 0xb30, 0x10, 7, 1),
+	PIN_FIELD16(126, 126, 0xb20, 0x10, 12, 1),
+	PINS_FIELD16(127, 142, 0xb30, 0x10, 9, 1),
+	PINS_FIELD16(143, 160, 0xb30, 0x10, 10, 1),
+	PINS_FIELD16(161, 168, 0xb30, 0x10, 12, 1),
+	PINS_FIELD16(169, 183, 0xb30, 0x10, 10, 1),
+	PINS_FIELD16(184, 186, 0xb30, 0x10, 9, 1),
+	PIN_FIELD16(187, 187, 0xb30, 0x10, 14, 1),
+	PIN_FIELD16(188, 188, 0xb20, 0x10, 13, 1),
+	PINS_FIELD16(189, 193, 0xb30, 0x10, 15, 1),
+	PINS_FIELD16(194, 198, 0xb40, 0x10, 0, 1),
+	PIN_FIELD16(199, 199, 0xb20, 0x10, 1, 1),
+	PINS_FIELD16(200, 202, 0xb40, 0x10, 1, 1),
+	PINS_FIELD16(203, 207, 0xb40, 0x10, 2, 1),
+	PINS_FIELD16(208, 209, 0xb40, 0x10, 3, 1),
+	PIN_FIELD16(210, 210, 0xb40, 0x10, 4, 1),
+	PINS_FIELD16(211, 235, 0xb40, 0x10, 5, 1),
+	PINS_FIELD16(236, 241, 0xb40, 0x10, 6, 1),
+	PINS_FIELD16(242, 243, 0xb40, 0x10, 7, 1),
+	PINS_FIELD16(244, 247, 0xb40, 0x10, 8, 1),
+	PIN_FIELD16(248, 248, 0xb40, 0x10, 9, 1),
+	PINS_FIELD16(249, 257, 0xfc0, 0x10, 4, 1),
+	PIN_FIELD16(258, 258, 0xcb0, 0x10, 4, 1),
+	PIN_FIELD16(259, 259, 0xc90, 0x10, 4, 1),
+	PIN_FIELD16(260, 260, 0x3a0, 0x10, 4, 1),
+	PIN_FIELD16(261, 261, 0xd50, 0x10, 4, 1),
+	PINS_FIELD16(262, 277, 0xb40, 0x10, 12, 1),
+	PIN_FIELD16(278, 278, 0xb40, 0x10, 13, 1),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_smt_range[] = {
+	PINS_FIELD16(0, 6, 0xb50, 0x10, 0, 1),
+	PINS_FIELD16(7, 9, 0xb50, 0x10, 1, 1),
+	PINS_FIELD16(10, 13, 0xb60, 0x10, 3, 1),
+	PINS_FIELD16(14, 15, 0xb60, 0x10, 13, 1),
+	PINS_FIELD16(16, 17, 0xb70, 0x10, 7, 1),
+	PINS_FIELD16(18, 29, 0xb70, 0x10, 13, 1),
+	PINS_FIELD16(30, 32, 0xb70, 0x10, 7, 1),
+	PINS_FIELD16(33, 37, 0xb70, 0x10, 13, 1),
+	PIN_FIELD16(38, 38, 0xb50, 0x10, 13, 1),
+	PINS_FIELD16(39, 42, 0xb70, 0x10, 13, 1),
+	PINS_FIELD16(43, 45, 0xb50, 0x10, 10, 1),
+	PINS_FIELD16(47, 48, 0xb50, 0x10, 11, 1),
+	PIN_FIELD16(49, 49, 0xb50, 0x10, 12, 1),
+	PINS_FIELD16(50, 52, 0xb50, 0x10, 13, 1),
+	PINS_FIELD16(53, 56, 0xb50, 0x10, 14, 1),
+	PINS_FIELD16(57, 58, 0xb50, 0x10, 15, 1),
+	PIN_FIELD16(59, 59, 0xb60, 0x10, 10, 1),
+	PINS_FIELD16(60, 62, 0xb60, 0x10, 0, 1),
+	PINS_FIELD16(63, 65, 0xb60, 0x10, 1, 1),
+	PINS_FIELD16(66, 71, 0xb60, 0x10, 2, 1),
+	PINS_FIELD16(72, 74, 0xb50, 0x10, 12, 1),
+	PINS_FIELD16(75, 76, 0xb60, 0x10, 3, 1),
+	PINS_FIELD16(77, 78, 0xb60, 0x10, 4, 1),
+	PINS_FIELD16(79, 82, 0xb60, 0x10, 5, 1),
+	PINS_FIELD16(83, 84, 0xb60, 0x10, 2, 1),
+	PIN_FIELD16(85, 85, 0xda0, 0x10, 11, 1),
+	PIN_FIELD16(86, 86, 0xd90, 0x10, 11, 1),
+	PIN_FIELD16(87, 87, 0xdc0, 0x10, 3, 1),
+	PIN_FIELD16(88, 88, 0xdc0, 0x10, 7, 1),
+	PIN_FIELD16(89, 89, 0xdc0, 0x10, 11, 1),
+	PIN_FIELD16(90, 90, 0xdc0, 0x10, 15, 1),
+	PINS_FIELD16(101, 104, 0xb60, 0x10, 6, 1),
+	PIN_FIELD16(105, 105, 0xd40, 0x10, 11, 1),
+	PIN_FIELD16(106, 106, 0xd30, 0x10, 11, 1),
+	PIN_FIELD16(107, 107, 0xd60, 0x10, 3, 1),
+	PIN_FIELD16(108, 108, 0xd60, 0x10, 7, 1),
+	PIN_FIELD16(109, 109, 0xd60, 0x10, 11, 1),
+	PIN_FIELD16(110, 110, 0xd60, 0x10, 15, 1),
+	PIN_FIELD16(111, 111, 0xd00, 0x10, 15, 1),
+	PIN_FIELD16(112, 112, 0xd00, 0x10, 11, 1),
+	PIN_FIELD16(113, 113, 0xd00, 0x10, 7, 1),
+	PIN_FIELD16(114, 114, 0xd00, 0x10, 3, 1),
+	PIN_FIELD16(115, 115, 0xd10, 0x10, 3, 1),
+	PIN_FIELD16(116, 116, 0xcd0, 0x10, 11, 1),
+	PIN_FIELD16(117, 117, 0xcc0, 0x10, 11, 1),
+	PIN_FIELD16(118, 118, 0xcf0, 0x10, 15, 1),
+	PIN_FIELD16(119, 119, 0xcf0, 0x10, 7, 1),
+	PIN_FIELD16(120, 120, 0xcf0, 0x10, 3, 1),
+	PIN_FIELD16(121, 121, 0xcf0, 0x10, 7, 1),
+	PINS_FIELD16(122, 125, 0xb60, 0x10, 7, 1),
+	PIN_FIELD16(126, 126, 0xb50, 0x10, 12, 1),
+	PINS_FIELD16(127, 142, 0xb60, 0x10, 9, 1),
+	PINS_FIELD16(143, 160, 0xb60, 0x10, 10, 1),
+	PINS_FIELD16(161, 168, 0xb60, 0x10, 12, 1),
+	PINS_FIELD16(169, 183, 0xb60, 0x10, 10, 1),
+	PINS_FIELD16(184, 186, 0xb60, 0x10, 9, 1),
+	PIN_FIELD16(187, 187, 0xb60, 0x10, 14, 1),
+	PIN_FIELD16(188, 188, 0xb50, 0x10, 13, 1),
+	PINS_FIELD16(189, 193, 0xb60, 0x10, 15, 1),
+	PINS_FIELD16(194, 198, 0xb70, 0x10, 0, 1),
+	PIN_FIELD16(199, 199, 0xb50, 0x10, 1, 1),
+	PINS_FIELD16(200, 202, 0xb70, 0x10, 1, 1),
+	PINS_FIELD16(203, 207, 0xb70, 0x10, 2, 1),
+	PINS_FIELD16(208, 209, 0xb70, 0x10, 3, 1),
+	PIN_FIELD16(210, 210, 0xb70, 0x10, 4, 1),
+	PINS_FIELD16(211, 235, 0xb70, 0x10, 5, 1),
+	PINS_FIELD16(236, 241, 0xb70, 0x10, 6, 1),
+	PINS_FIELD16(242, 243, 0xb70, 0x10, 7, 1),
+	PINS_FIELD16(244, 247, 0xb70, 0x10, 8, 1),
+	PIN_FIELD16(248, 248, 0xb70, 0x10, 9, 10),
+	PIN_FIELD16(249, 249, 0x140, 0x10, 3, 1),
+	PIN_FIELD16(250, 250, 0x130, 0x10, 15, 1),
+	PIN_FIELD16(251, 251, 0x130, 0x10, 11, 1),
+	PIN_FIELD16(252, 252, 0x130, 0x10, 7, 1),
+	PIN_FIELD16(253, 253, 0x130, 0x10, 3, 1),
+	PIN_FIELD16(254, 254, 0xf40, 0x10, 15, 1),
+	PIN_FIELD16(255, 255, 0xf40, 0x10, 11, 1),
+	PIN_FIELD16(256, 256, 0xf40, 0x10, 7, 1),
+	PIN_FIELD16(257, 257, 0xf40, 0x10, 3, 1),
+	PIN_FIELD16(258, 258, 0xcb0, 0x10, 11, 1),
+	PIN_FIELD16(259, 259, 0xc90, 0x10, 11, 1),
+	PIN_FIELD16(260, 260, 0x3a0, 0x10, 11, 1),
+	PIN_FIELD16(261, 261, 0x0b0, 0x10, 3, 1),
+	PINS_FIELD16(262, 277, 0xb70, 0x10, 12, 1),
+	PIN_FIELD16(278, 278, 0xb70, 0x10, 13, 1),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_pullen_range[] = {
+	PIN_FIELD16(0, 278, 0x150, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_pullsel_range[] = {
+	PIN_FIELD16(0, 278, 0x280, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_drv_range[] = {
+	PINS_FIELD16(0, 6, 0xf50, 0x10, 0, 4),
+	PINS_FIELD16(7, 9, 0xf50, 0x10, 4, 4),
+	PINS_FIELD16(10, 13, 0xf50, 0x10, 4, 4),
+	PINS_FIELD16(14, 15, 0xf50, 0x10, 12, 4),
+	PINS_FIELD16(16, 17, 0xf60, 0x10, 0, 4),
+	PINS_FIELD16(18, 21, 0xf60, 0x10, 0, 4),
+	PINS_FIELD16(22, 26, 0xf60, 0x10, 8, 4),
+	PINS_FIELD16(27, 29, 0xf60, 0x10, 12, 4),
+	PINS_FIELD16(30, 32, 0xf60, 0x10, 0, 4),
+	PINS_FIELD16(33, 37, 0xf70, 0x10, 0, 4),
+	PIN_FIELD16(38, 38, 0xf70, 0x10, 4, 4),
+	PINS_FIELD16(39, 42, 0xf70, 0x10, 8, 4),
+	PINS_FIELD16(43, 45, 0xf70, 0x10, 12, 4),
+	PINS_FIELD16(47, 48, 0xf80, 0x10, 0, 4),
+	PIN_FIELD16(49, 49, 0xf80, 0x10, 4, 4),
+	PINS_FIELD16(50, 52, 0xf70, 0x10, 4, 4),
+	PINS_FIELD16(53, 56, 0xf80, 0x10, 12, 4),
+	PINS_FIELD16(60, 62, 0xf90, 0x10, 8, 4),
+	PINS_FIELD16(63, 65, 0xf90, 0x10, 12, 4),
+	PINS_FIELD16(66, 71, 0xfa0, 0x10, 0, 4),
+	PINS_FIELD16(72, 74, 0xf80, 0x10, 4, 4),
+	PIN_FIELD16(85, 85, 0xda0, 0x10, 0, 4),
+	PIN_FIELD16(86, 86, 0xd90, 0x10, 0, 4),
+	PINS_FIELD16(87, 90, 0xdb0, 0x10, 0, 4),
+	PIN_FIELD16(105, 105, 0xd40, 0x10, 0, 4),
+	PIN_FIELD16(106, 106, 0xd30, 0x10, 0, 4),
+	PINS_FIELD16(107, 110, 0xd50, 0x10, 0, 4),
+	PINS_FIELD16(111, 115, 0xce0, 0x10, 0, 4),
+	PIN_FIELD16(116, 116, 0xcd0, 0x10, 0, 4),
+	PIN_FIELD16(117, 117, 0xcc0, 0x10, 0, 4),
+	PINS_FIELD16(118, 121, 0xce0, 0x10, 0, 4),
+	PIN_FIELD16(126, 126, 0xf80, 0x10, 4, 4),
+	PIN_FIELD16(188, 188, 0xf70, 0x10, 4, 4),
+	PINS_FIELD16(189, 193, 0xfe0, 0x10, 8, 4),
+	PINS_FIELD16(194, 198, 0xfe0, 0x10, 12, 4),
+	PIN_FIELD16(199, 199, 0xf50, 0x10, 4, 4),
+	PINS_FIELD16(200, 202, 0xfd0, 0x10, 0, 4),
+	PINS_FIELD16(203, 207, 0xfd0, 0x10, 4, 4),
+	PINS_FIELD16(208, 209, 0xfd0, 0x10, 8, 4),
+	PIN_FIELD16(210, 210, 0xfd0, 0x10, 12, 4),
+	PINS_FIELD16(211, 235, 0xff0, 0x10, 0, 4),
+	PINS_FIELD16(236, 241, 0xff0, 0x10, 4, 4),
+	PINS_FIELD16(242, 243, 0xff0, 0x10, 8, 4),
+	PIN_FIELD16(248, 248, 0xf00, 0x10, 0, 4),
+	PINS_FIELD16(249, 256, 0xfc0, 0x10, 0, 4),
+	PIN_FIELD16(257, 257, 0xce0, 0x10, 0, 4),
+	PIN_FIELD16(258, 258, 0xcb0, 0x10, 0, 4),
+	PIN_FIELD16(259, 259, 0xc90, 0x10, 0, 4),
+	PIN_FIELD16(260, 260, 0x3a0, 0x10, 0, 4),
+	PIN_FIELD16(261, 261, 0xd50, 0x10, 0, 4),
+	PINS_FIELD16(262, 277, 0xf00, 0x10, 8, 4),
+	PIN_FIELD16(278, 278, 0xf70, 0x10, 8, 4),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_tdsel_range[] = {
+	PINS_FIELD16(262, 276, 0x4c0, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_pupd_range[] = {
+	/* MSDC0 */
+	PIN_FIELD16(111, 111, 0xd00, 0x10, 12, 1),
+	PIN_FIELD16(112, 112, 0xd00, 0x10, 8, 1),
+	PIN_FIELD16(113, 113, 0xd00, 0x10, 4, 1),
+	PIN_FIELD16(114, 114, 0xd00, 0x10, 0, 1),
+	PIN_FIELD16(115, 115, 0xd10, 0x10, 0, 1),
+	PIN_FIELD16(116, 116, 0xcd0, 0x10, 8, 1),
+	PIN_FIELD16(117, 117, 0xcc0, 0x10, 8, 1),
+	PIN_FIELD16(118, 118, 0xcf0, 0x10, 12, 1),
+	PIN_FIELD16(119, 119, 0xcf0, 0x10, 8, 1),
+	PIN_FIELD16(120, 120, 0xcf0, 0x10, 4, 1),
+	PIN_FIELD16(121, 121, 0xcf0, 0x10, 0, 1),
+	/* MSDC1 */
+	PIN_FIELD16(105, 105, 0xd40, 0x10, 8, 1),
+	PIN_FIELD16(106, 106, 0xd30, 0x10, 8, 1),
+	PIN_FIELD16(107, 107, 0xd60, 0x10, 0, 1),
+	PIN_FIELD16(108, 108, 0xd60, 0x10, 10, 1),
+	PIN_FIELD16(109, 109, 0xd60, 0x10, 4, 1),
+	PIN_FIELD16(110, 110, 0xc60, 0x10, 12, 1),
+	/* MSDC1 */
+	PIN_FIELD16(85, 85, 0xda0, 0x10, 8, 1),
+	PIN_FIELD16(86, 86, 0xd90, 0x10, 8, 1),
+	PIN_FIELD16(87, 87, 0xdc0, 0x10, 0, 1),
+	PIN_FIELD16(88, 88, 0xdc0, 0x10, 10, 1),
+	PIN_FIELD16(89, 89, 0xdc0, 0x10, 4, 1),
+	PIN_FIELD16(90, 90, 0xdc0, 0x10, 12, 1),
+	/* MSDC0E */
+	PIN_FIELD16(249, 249, 0x140, 0x10, 0, 1),
+	PIN_FIELD16(250, 250, 0x130, 0x10, 12, 1),
+	PIN_FIELD16(251, 251, 0x130, 0x10, 8, 1),
+	PIN_FIELD16(252, 252, 0x130, 0x10, 4, 1),
+	PIN_FIELD16(253, 253, 0x130, 0x10, 0, 1),
+	PIN_FIELD16(254, 254, 0xf40, 0x10, 12, 1),
+	PIN_FIELD16(255, 255, 0xf40, 0x10, 8, 1),
+	PIN_FIELD16(256, 256, 0xf40, 0x10, 4, 1),
+	PIN_FIELD16(257, 257, 0xf40, 0x10, 0, 1),
+	PIN_FIELD16(258, 258, 0xcb0, 0x10, 8, 1),
+	PIN_FIELD16(259, 259, 0xc90, 0x10, 8, 1),
+	PIN_FIELD16(261, 261, 0x140, 0x10, 8, 1),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_r1_range[] = {
+	/* MSDC0 */
+	PIN_FIELD16(111, 111, 0xd00, 0x10, 13, 1),
+	PIN_FIELD16(112, 112, 0xd00, 0x10, 9, 1),
+	PIN_FIELD16(113, 113, 0xd00, 0x10, 5, 1),
+	PIN_FIELD16(114, 114, 0xd00, 0x10, 1, 1),
+	PIN_FIELD16(115, 115, 0xd10, 0x10, 1, 1),
+	PIN_FIELD16(116, 116, 0xcd0, 0x10, 9, 1),
+	PIN_FIELD16(117, 117, 0xcc0, 0x10, 9, 1),
+	PIN_FIELD16(118, 118, 0xcf0, 0x10, 13, 1),
+	PIN_FIELD16(119, 119, 0xcf0, 0x10, 9, 1),
+	PIN_FIELD16(120, 120, 0xcf0, 0x10, 5, 1),
+	PIN_FIELD16(121, 121, 0xcf0, 0x10, 1, 1),
+	/* MSDC1 */
+	PIN_FIELD16(105, 105, 0xd40, 0x10, 9, 1),
+	PIN_FIELD16(106, 106, 0xd30, 0x10, 9, 1),
+	PIN_FIELD16(107, 107, 0xd60, 0x10, 1, 1),
+	PIN_FIELD16(108, 108, 0xd60, 0x10, 9, 1),
+	PIN_FIELD16(109, 109, 0xd60, 0x10, 5, 1),
+	PIN_FIELD16(110, 110, 0xc60, 0x10, 13, 1),
+	/* MSDC2 */
+	PIN_FIELD16(85, 85, 0xda0, 0x10, 9, 1),
+	PIN_FIELD16(86, 86, 0xd90, 0x10, 9, 1),
+	PIN_FIELD16(87, 87, 0xdc0, 0x10, 1, 1),
+	PIN_FIELD16(88, 88, 0xdc0, 0x10, 9, 1),
+	PIN_FIELD16(89, 89, 0xdc0, 0x10, 5, 1),
+	PIN_FIELD16(90, 90, 0xdc0, 0x10, 13, 1),
+	/* MSDC0E */
+	PIN_FIELD16(249, 249, 0x140, 0x10, 1, 1),
+	PIN_FIELD16(250, 250, 0x130, 0x10, 13, 1),
+	PIN_FIELD16(251, 251, 0x130, 0x10, 9, 1),
+	PIN_FIELD16(252, 252, 0x130, 0x10, 5, 1),
+	PIN_FIELD16(253, 253, 0x130, 0x10, 1, 1),
+	PIN_FIELD16(254, 254, 0xf40, 0x10, 13, 1),
+	PIN_FIELD16(255, 255, 0xf40, 0x10, 9, 1),
+	PIN_FIELD16(256, 256, 0xf40, 0x10, 5, 1),
+	PIN_FIELD16(257, 257, 0xf40, 0x10, 1, 1),
+	PIN_FIELD16(258, 258, 0xcb0, 0x10, 9, 1),
+	PIN_FIELD16(259, 259, 0xc90, 0x10, 9, 1),
+	PIN_FIELD16(261, 261, 0x140, 0x10, 9, 1),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_r0_range[] = {
+	/* MSDC0 */
+	PIN_FIELD16(111, 111, 0xd00, 0x10, 14, 1),
+	PIN_FIELD16(112, 112, 0xd00, 0x10, 10, 1),
+	PIN_FIELD16(113, 113, 0xd00, 0x10, 6, 1),
+	PIN_FIELD16(114, 114, 0xd00, 0x10, 2, 1),
+	PIN_FIELD16(115, 115, 0xd10, 0x10, 2, 1),
+	PIN_FIELD16(116, 116, 0xcd0, 0x10, 10, 1),
+	PIN_FIELD16(117, 117, 0xcc0, 0x10, 10, 1),
+	PIN_FIELD16(118, 118, 0xcf0, 0x10, 14, 1),
+	PIN_FIELD16(119, 119, 0xcf0, 0x10, 10, 1),
+	PIN_FIELD16(120, 120, 0xcf0, 0x10, 6, 1),
+	PIN_FIELD16(121, 121, 0xcf0, 0x10, 2, 1),
+	/* MSDC1 */
+	PIN_FIELD16(105, 105, 0xd40, 0x10, 10, 1),
+	PIN_FIELD16(106, 106, 0xd30, 0x10, 10, 1),
+	PIN_FIELD16(107, 107, 0xd60, 0x10, 2, 1),
+	PIN_FIELD16(108, 108, 0xd60, 0x10, 8, 1),
+	PIN_FIELD16(109, 109, 0xd60, 0x10, 6, 1),
+	PIN_FIELD16(110, 110, 0xc60, 0x10, 14, 1),
+	/* MSDC2 */
+	PIN_FIELD16(85, 85, 0xda0, 0x10, 10, 1),
+	PIN_FIELD16(86, 86, 0xd90, 0x10, 10, 1),
+	PIN_FIELD16(87, 87, 0xdc0, 0x10, 2, 1),
+	PIN_FIELD16(88, 88, 0xdc0, 0x10, 8, 1),
+	PIN_FIELD16(89, 89, 0xdc0, 0x10, 6, 1),
+	PIN_FIELD16(90, 90, 0xdc0, 0x10, 14, 1),
+	/* MSDC0E */
+	PIN_FIELD16(249, 249, 0x140, 0x10, 2, 1),
+	PIN_FIELD16(250, 250, 0x130, 0x10, 14, 1),
+	PIN_FIELD16(251, 251, 0x130, 0x10, 10, 1),
+	PIN_FIELD16(252, 252, 0x130, 0x10, 6, 1),
+	PIN_FIELD16(253, 253, 0x130, 0x10, 2, 1),
+	PIN_FIELD16(254, 254, 0xf40, 0x10, 14, 1),
+	PIN_FIELD16(255, 255, 0xf40, 0x10, 10, 1),
+	PIN_FIELD16(256, 256, 0xf40, 0x10, 6, 1),
+	PIN_FIELD16(257, 257, 0xf40, 0x10, 5, 1),
+	PIN_FIELD16(258, 258, 0xcb0, 0x10, 10, 1),
+	PIN_FIELD16(259, 259, 0xc90, 0x10, 10, 1),
+	PIN_FIELD16(261, 261, 0x140, 0x10, 10, 1),
+};
+
+static const struct mtk_pin_reg_calc mt7623_reg_cals[] = {
+	[PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt7623_pin_mode_range),
+	[PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt7623_pin_dir_range),
+	[PINCTRL_PIN_REG_DI] = MTK_RANGE(mt7623_pin_di_range),
+	[PINCTRL_PIN_REG_DO] = MTK_RANGE(mt7623_pin_do_range),
+	[PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt7623_pin_smt_range),
+	[PINCTRL_PIN_REG_PULLSEL] = MTK_RANGE(mt7623_pin_pullsel_range),
+	[PINCTRL_PIN_REG_PULLEN] = MTK_RANGE(mt7623_pin_pullen_range),
+	[PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt7623_pin_drv_range),
+	[PINCTRL_PIN_REG_TDSEL] = MTK_RANGE(mt7623_pin_tdsel_range),
+	[PINCTRL_PIN_REG_IES] = MTK_RANGE(mt7623_pin_ies_range),
+	[PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt7623_pin_pupd_range),
+	[PINCTRL_PIN_REG_R0] = MTK_RANGE(mt7623_pin_r0_range),
+	[PINCTRL_PIN_REG_R1] = MTK_RANGE(mt7623_pin_r1_range),
+};
+
+static const struct mtk_pin_desc mt7623_pins[] = {
+	MT7623_PIN(0, "PWRAP_SPI0_MI", 148, DRV_GRP3),
+	MT7623_PIN(1, "PWRAP_SPI0_MO", 149, DRV_GRP3),
+	MT7623_PIN(2, "PWRAP_INT", 150, DRV_GRP3),
+	MT7623_PIN(3, "PWRAP_SPI0_CK", 151, DRV_GRP3),
+	MT7623_PIN(4, "PWRAP_SPI0_CSN", 152, DRV_GRP3),
+	MT7623_PIN(5, "PWRAP_SPI0_CK2", 153, DRV_GRP3),
+	MT7623_PIN(6, "PWRAP_SPI0_CSN2", 154, DRV_GRP3),
+	MT7623_PIN(7, "SPI1_CSN", 155, DRV_GRP3),
+	MT7623_PIN(8, "SPI1_MI", 156, DRV_GRP3),
+	MT7623_PIN(9, "SPI1_MO", 157, DRV_GRP3),
+	MT7623_PIN(10, "RTC32K_CK", 158, DRV_GRP3),
+	MT7623_PIN(11, "WATCHDOG", 159, DRV_GRP3),
+	MT7623_PIN(12, "SRCLKENA", 160, DRV_GRP3),
+	MT7623_PIN(13, "SRCLKENAI", 161, DRV_GRP3),
+	MT7623_PIN(14, "URXD2", 162, DRV_GRP1),
+	MT7623_PIN(15, "UTXD2", 163, DRV_GRP1),
+	MT7623_PIN(16, "I2S5_DATA_IN", 164, DRV_GRP1),
+	MT7623_PIN(17, "I2S5_BCK", 165, DRV_GRP1),
+	MT7623_PIN(18, "PCM_CLK", 166, DRV_GRP1),
+	MT7623_PIN(19, "PCM_SYNC", 167, DRV_GRP1),
+	MT7623_PIN(20, "PCM_RX", EINT_NA, DRV_GRP1),
+	MT7623_PIN(21, "PCM_TX", EINT_NA, DRV_GRP1),
+	MT7623_PIN(22, "EINT0", 0, DRV_GRP1),
+	MT7623_PIN(23, "EINT1", 1, DRV_GRP1),
+	MT7623_PIN(24, "EINT2", 2, DRV_GRP1),
+	MT7623_PIN(25, "EINT3", 3, DRV_GRP1),
+	MT7623_PIN(26, "EINT4", 4, DRV_GRP1),
+	MT7623_PIN(27, "EINT5", 5, DRV_GRP1),
+	MT7623_PIN(28, "EINT6", 6, DRV_GRP1),
+	MT7623_PIN(29, "EINT7", 7, DRV_GRP1),
+	MT7623_PIN(30, "I2S5_LRCK", 12, DRV_GRP1),
+	MT7623_PIN(31, "I2S5_MCLK", 13, DRV_GRP1),
+	MT7623_PIN(32, "I2S5_DATA", 14, DRV_GRP1),
+	MT7623_PIN(33, "I2S1_DATA", 15, DRV_GRP1),
+	MT7623_PIN(34, "I2S1_DATA_IN", 16, DRV_GRP1),
+	MT7623_PIN(35, "I2S1_BCK", 17, DRV_GRP1),
+	MT7623_PIN(36, "I2S1_LRCK", 18, DRV_GRP1),
+	MT7623_PIN(37, "I2S1_MCLK", 19, DRV_GRP1),
+	MT7623_PIN(38, "I2S2_DATA", 20, DRV_GRP1),
+	MT7623_PIN(39, "JTMS", 21, DRV_GRP3),
+	MT7623_PIN(40, "JTCK", 22, DRV_GRP3),
+	MT7623_PIN(41, "JTDI", 23, DRV_GRP3),
+	MT7623_PIN(42, "JTDO", 24, DRV_GRP3),
+	MT7623_PIN(43, "NCLE", 25, DRV_GRP1),
+	MT7623_PIN(44, "NCEB1", 26, DRV_GRP1),
+	MT7623_PIN(45, "NCEB0", 27, DRV_GRP1),
+	MT7623_PIN(46, "IR", 28, DRV_FIXED),
+	MT7623_PIN(47, "NREB", 29, DRV_GRP1),
+	MT7623_PIN(48, "NRNB", 30, DRV_GRP1),
+	MT7623_PIN(49, "I2S0_DATA", 31, DRV_GRP1),
+	MT7623_PIN(50, "I2S2_BCK", 32, DRV_GRP1),
+	MT7623_PIN(51, "I2S2_DATA_IN", 33, DRV_GRP1),
+	MT7623_PIN(52, "I2S2_LRCK", 34, DRV_GRP1),
+	MT7623_PIN(53, "SPI0_CSN", 35, DRV_GRP1),
+	MT7623_PIN(54, "SPI0_CK", 36, DRV_GRP1),
+	MT7623_PIN(55, "SPI0_MI", 37, DRV_GRP1),
+	MT7623_PIN(56, "SPI0_MO", 38, DRV_GRP1),
+	MT7623_PIN(57, "SDA1", 39, DRV_FIXED),
+	MT7623_PIN(58, "SCL1", 40, DRV_FIXED),
+	MT7623_PIN(59, "RAMBUF_I_CLK", EINT_NA, DRV_FIXED),
+	MT7623_PIN(60, "WB_RSTB", 41, DRV_GRP3),
+	MT7623_PIN(61, "F2W_DATA", 42, DRV_GRP3),
+	MT7623_PIN(62, "F2W_CLK", 43, DRV_GRP3),
+	MT7623_PIN(63, "WB_SCLK", 44, DRV_GRP3),
+	MT7623_PIN(64, "WB_SDATA", 45, DRV_GRP3),
+	MT7623_PIN(65, "WB_SEN", 46, DRV_GRP3),
+	MT7623_PIN(66, "WB_CRTL0", 47, DRV_GRP3),
+	MT7623_PIN(67, "WB_CRTL1", 48, DRV_GRP3),
+	MT7623_PIN(68, "WB_CRTL2", 49, DRV_GRP3),
+	MT7623_PIN(69, "WB_CRTL3", 50, DRV_GRP3),
+	MT7623_PIN(70, "WB_CRTL4", 51, DRV_GRP3),
+	MT7623_PIN(71, "WB_CRTL5", 52, DRV_GRP3),
+	MT7623_PIN(72, "I2S0_DATA_IN", 53, DRV_GRP1),
+	MT7623_PIN(73, "I2S0_LRCK", 54, DRV_GRP1),
+	MT7623_PIN(74, "I2S0_BCK", 55, DRV_GRP1),
+	MT7623_PIN(75, "SDA0", 56, DRV_FIXED),
+	MT7623_PIN(76, "SCL0", 57, DRV_FIXED),
+	MT7623_PIN(77, "SDA2", 58, DRV_FIXED),
+	MT7623_PIN(78, "SCL2", 59, DRV_FIXED),
+	MT7623_PIN(79, "URXD0", 60, DRV_FIXED),
+	MT7623_PIN(80, "UTXD0", 61, DRV_FIXED),
+	MT7623_PIN(81, "URXD1", 62, DRV_FIXED),
+	MT7623_PIN(82, "UTXD1", 63, DRV_FIXED),
+	MT7623_PIN(83, "LCM_RST", 64, DRV_FIXED),
+	MT7623_PIN(84, "DSI_TE", 65, DRV_FIXED),
+	MT7623_PIN(85, "MSDC2_CMD", 66, DRV_GRP4),
+	MT7623_PIN(86, "MSDC2_CLK", 67, DRV_GRP4),
+	MT7623_PIN(87, "MSDC2_DAT0", 68, DRV_GRP4),
+	MT7623_PIN(88, "MSDC2_DAT1", 69, DRV_GRP4),
+	MT7623_PIN(89, "MSDC2_DAT2", 70, DRV_GRP4),
+	MT7623_PIN(90, "MSDC2_DAT3", 71, DRV_GRP4),
+	MT7623_PIN(91, "TDN3", EINT_NA, DRV_FIXED),
+	MT7623_PIN(92, "TDP3", EINT_NA, DRV_FIXED),
+	MT7623_PIN(93, "TDN2", EINT_NA, DRV_FIXED),
+	MT7623_PIN(94, "TDP2", EINT_NA, DRV_FIXED),
+	MT7623_PIN(95, "TCN", EINT_NA, DRV_FIXED),
+	MT7623_PIN(96, "TCP", EINT_NA, DRV_FIXED),
+	MT7623_PIN(97, "TDN1", EINT_NA, DRV_FIXED),
+	MT7623_PIN(98, "TDP1", EINT_NA, DRV_FIXED),
+	MT7623_PIN(99, "TDN0", EINT_NA, DRV_FIXED),
+	MT7623_PIN(100, "TDP0", EINT_NA, DRV_FIXED),
+	MT7623_PIN(101, "SPI2_CSN", 74, DRV_FIXED),
+	MT7623_PIN(102, "SPI2_MI", 75, DRV_FIXED),
+	MT7623_PIN(103, "SPI2_MO", 76, DRV_FIXED),
+	MT7623_PIN(104, "SPI2_CLK", 77, DRV_FIXED),
+	MT7623_PIN(105, "MSDC1_CMD", 78, DRV_GRP4),
+	MT7623_PIN(106, "MSDC1_CLK", 79, DRV_GRP4),
+	MT7623_PIN(107, "MSDC1_DAT0", 80, DRV_GRP4),
+	MT7623_PIN(108, "MSDC1_DAT1", 81, DRV_GRP4),
+	MT7623_PIN(109, "MSDC1_DAT2", 82, DRV_GRP4),
+	MT7623_PIN(110, "MSDC1_DAT3", 83, DRV_GRP4),
+	MT7623_PIN(111, "MSDC0_DAT7", 84, DRV_GRP4),
+	MT7623_PIN(112, "MSDC0_DAT6", 85, DRV_GRP4),
+	MT7623_PIN(113, "MSDC0_DAT5", 86, DRV_GRP4),
+	MT7623_PIN(114, "MSDC0_DAT4", 87, DRV_GRP4),
+	MT7623_PIN(115, "MSDC0_RSTB", 88, DRV_GRP4),
+	MT7623_PIN(116, "MSDC0_CMD", 89, DRV_GRP4),
+	MT7623_PIN(117, "MSDC0_CLK", 90, DRV_GRP4),
+	MT7623_PIN(118, "MSDC0_DAT3", 91, DRV_GRP4),
+	MT7623_PIN(119, "MSDC0_DAT2", 92, DRV_GRP4),
+	MT7623_PIN(120, "MSDC0_DAT1", 93, DRV_GRP4),
+	MT7623_PIN(121, "MSDC0_DAT0", 94, DRV_GRP4),
+	MT7623_PIN(122, "CEC", 95, DRV_FIXED),
+	MT7623_PIN(123, "HTPLG", 96, DRV_FIXED),
+	MT7623_PIN(124, "HDMISCK", 97, DRV_FIXED),
+	MT7623_PIN(125, "HDMISD", 98, DRV_FIXED),
+	MT7623_PIN(126, "I2S0_MCLK", 99, DRV_GRP1),
+	MT7623_PIN(127, "RAMBUF_IDATA0", EINT_NA, DRV_FIXED),
+	MT7623_PIN(128, "RAMBUF_IDATA1", EINT_NA, DRV_FIXED),
+	MT7623_PIN(129, "RAMBUF_IDATA2", EINT_NA, DRV_FIXED),
+	MT7623_PIN(130, "RAMBUF_IDATA3", EINT_NA, DRV_FIXED),
+	MT7623_PIN(131, "RAMBUF_IDATA4", EINT_NA, DRV_FIXED),
+	MT7623_PIN(132, "RAMBUF_IDATA5", EINT_NA, DRV_FIXED),
+	MT7623_PIN(133, "RAMBUF_IDATA6", EINT_NA, DRV_FIXED),
+	MT7623_PIN(134, "RAMBUF_IDATA7", EINT_NA, DRV_FIXED),
+	MT7623_PIN(135, "RAMBUF_IDATA8", EINT_NA, DRV_FIXED),
+	MT7623_PIN(136, "RAMBUF_IDATA9", EINT_NA, DRV_FIXED),
+	MT7623_PIN(137, "RAMBUF_IDATA10", EINT_NA, DRV_FIXED),
+	MT7623_PIN(138, "RAMBUF_IDATA11", EINT_NA, DRV_FIXED),
+	MT7623_PIN(139, "RAMBUF_IDATA12", EINT_NA, DRV_FIXED),
+	MT7623_PIN(140, "RAMBUF_IDATA13", EINT_NA, DRV_FIXED),
+	MT7623_PIN(141, "RAMBUF_IDATA14", EINT_NA, DRV_FIXED),
+	MT7623_PIN(142, "RAMBUF_IDATA15", EINT_NA, DRV_FIXED),
+	MT7623_PIN(143, "RAMBUF_ODATA0", EINT_NA, DRV_FIXED),
+	MT7623_PIN(144, "RAMBUF_ODATA1", EINT_NA, DRV_FIXED),
+	MT7623_PIN(145, "RAMBUF_ODATA2", EINT_NA, DRV_FIXED),
+	MT7623_PIN(146, "RAMBUF_ODATA3", EINT_NA, DRV_FIXED),
+	MT7623_PIN(147, "RAMBUF_ODATA4", EINT_NA, DRV_FIXED),
+	MT7623_PIN(148, "RAMBUF_ODATA5", EINT_NA, DRV_FIXED),
+	MT7623_PIN(149, "RAMBUF_ODATA6", EINT_NA, DRV_FIXED),
+	MT7623_PIN(150, "RAMBUF_ODATA7", EINT_NA, DRV_FIXED),
+	MT7623_PIN(151, "RAMBUF_ODATA8", EINT_NA, DRV_FIXED),
+	MT7623_PIN(152, "RAMBUF_ODATA9", EINT_NA, DRV_FIXED),
+	MT7623_PIN(153, "RAMBUF_ODATA10", EINT_NA, DRV_FIXED),
+	MT7623_PIN(154, "RAMBUF_ODATA11", EINT_NA, DRV_FIXED),
+	MT7623_PIN(155, "RAMBUF_ODATA12", EINT_NA, DRV_FIXED),
+	MT7623_PIN(156, "RAMBUF_ODATA13", EINT_NA, DRV_FIXED),
+	MT7623_PIN(157, "RAMBUF_ODATA14", EINT_NA, DRV_FIXED),
+	MT7623_PIN(158, "RAMBUF_ODATA15", EINT_NA, DRV_FIXED),
+	MT7623_PIN(159, "RAMBUF_BE0", EINT_NA, DRV_FIXED),
+	MT7623_PIN(160, "RAMBUF_BE1", EINT_NA, DRV_FIXED),
+	MT7623_PIN(161, "AP2PT_INT", EINT_NA, DRV_FIXED),
+	MT7623_PIN(162, "AP2PT_INT_CLR", EINT_NA, DRV_FIXED),
+	MT7623_PIN(163, "PT2AP_INT", EINT_NA, DRV_FIXED),
+	MT7623_PIN(164, "PT2AP_INT_CLR", EINT_NA, DRV_FIXED),
+	MT7623_PIN(165, "AP2UP_INT", EINT_NA, DRV_FIXED),
+	MT7623_PIN(166, "AP2UP_INT_CLR", EINT_NA, DRV_FIXED),
+	MT7623_PIN(167, "UP2AP_INT", EINT_NA, DRV_FIXED),
+	MT7623_PIN(168, "UP2AP_INT_CLR", EINT_NA, DRV_FIXED),
+	MT7623_PIN(169, "RAMBUF_ADDR0", EINT_NA, DRV_FIXED),
+	MT7623_PIN(170, "RAMBUF_ADDR1", EINT_NA, DRV_FIXED),
+	MT7623_PIN(171, "RAMBUF_ADDR2", EINT_NA, DRV_FIXED),
+	MT7623_PIN(172, "RAMBUF_ADDR3", EINT_NA, DRV_FIXED),
+	MT7623_PIN(173, "RAMBUF_ADDR4", EINT_NA, DRV_FIXED),
+	MT7623_PIN(174, "RAMBUF_ADDR5", EINT_NA, DRV_FIXED),
+	MT7623_PIN(175, "RAMBUF_ADDR6", EINT_NA, DRV_FIXED),
+	MT7623_PIN(176, "RAMBUF_ADDR7", EINT_NA, DRV_FIXED),
+	MT7623_PIN(177, "RAMBUF_ADDR8", EINT_NA, DRV_FIXED),
+	MT7623_PIN(178, "RAMBUF_ADDR9", EINT_NA, DRV_FIXED),
+	MT7623_PIN(179, "RAMBUF_ADDR10", EINT_NA, DRV_FIXED),
+	MT7623_PIN(180, "RAMBUF_RW", EINT_NA, DRV_FIXED),
+	MT7623_PIN(181, "RAMBUF_LAST", EINT_NA, DRV_FIXED),
+	MT7623_PIN(182, "RAMBUF_HP", EINT_NA, DRV_FIXED),
+	MT7623_PIN(183, "RAMBUF_REQ", EINT_NA, DRV_FIXED),
+	MT7623_PIN(184, "RAMBUF_ALE", EINT_NA, DRV_FIXED),
+	MT7623_PIN(185, "RAMBUF_DLE", EINT_NA, DRV_FIXED),
+	MT7623_PIN(186, "RAMBUF_WDLE", EINT_NA, DRV_FIXED),
+	MT7623_PIN(187, "RAMBUF_O_CLK", EINT_NA, DRV_FIXED),
+	MT7623_PIN(188, "I2S2_MCLK", 100, DRV_GRP1),
+	MT7623_PIN(189, "I2S3_DATA", 101, DRV_GRP1),
+	MT7623_PIN(190, "I2S3_DATA_IN", 102, DRV_GRP1),
+	MT7623_PIN(191, "I2S3_BCK", 103, DRV_GRP1),
+	MT7623_PIN(192, "I2S3_LRCK", 104, DRV_GRP1),
+	MT7623_PIN(193, "I2S3_MCLK", 105, DRV_GRP1),
+	MT7623_PIN(194, "I2S4_DATA", 106, DRV_GRP1),
+	MT7623_PIN(195, "I2S4_DATA_IN", 107, DRV_GRP1),
+	MT7623_PIN(196, "I2S4_BCK", 108, DRV_GRP1),
+	MT7623_PIN(197, "I2S4_LRCK", 109, DRV_GRP1),
+	MT7623_PIN(198, "I2S4_MCLK", 110, DRV_GRP1),
+	MT7623_PIN(199, "SPI1_CLK", 111, DRV_GRP3),
+	MT7623_PIN(200, "SPDIF_OUT", 112, DRV_GRP1),
+	MT7623_PIN(201, "SPDIF_IN0", 113, DRV_GRP1),
+	MT7623_PIN(202, "SPDIF_IN1", 114, DRV_GRP1),
+	MT7623_PIN(203, "PWM0", 115, DRV_GRP1),
+	MT7623_PIN(204, "PWM1", 116, DRV_GRP1),
+	MT7623_PIN(205, "PWM2", 117, DRV_GRP1),
+	MT7623_PIN(206, "PWM3", 118, DRV_GRP1),
+	MT7623_PIN(207, "PWM4", 119, DRV_GRP1),
+	MT7623_PIN(208, "AUD_EXT_CK1", 120, DRV_GRP1),
+	MT7623_PIN(209, "AUD_EXT_CK2", 121, DRV_GRP1),
+	MT7623_PIN(210, "AUD_CLOCK", EINT_NA, DRV_GRP3),
+	MT7623_PIN(211, "DVP_RESET", EINT_NA, DRV_GRP3),
+	MT7623_PIN(212, "DVP_CLOCK", EINT_NA, DRV_GRP3),
+	MT7623_PIN(213, "DVP_CS", EINT_NA, DRV_GRP3),
+	MT7623_PIN(214, "DVP_CK", EINT_NA, DRV_GRP3),
+	MT7623_PIN(215, "DVP_DI", EINT_NA, DRV_GRP3),
+	MT7623_PIN(216, "DVP_DO", EINT_NA, DRV_GRP3),
+	MT7623_PIN(217, "AP_CS", EINT_NA, DRV_GRP3),
+	MT7623_PIN(218, "AP_CK", EINT_NA, DRV_GRP3),
+	MT7623_PIN(219, "AP_DI", EINT_NA, DRV_GRP3),
+	MT7623_PIN(220, "AP_DO", EINT_NA, DRV_GRP3),
+	MT7623_PIN(221, "DVD_BCLK", EINT_NA, DRV_GRP3),
+	MT7623_PIN(222, "T8032_CLK", EINT_NA, DRV_GRP3),
+	MT7623_PIN(223, "AP_BCLK", EINT_NA, DRV_GRP3),
+	MT7623_PIN(224, "HOST_CS", EINT_NA, DRV_GRP3),
+	MT7623_PIN(225, "HOST_CK", EINT_NA, DRV_GRP3),
+	MT7623_PIN(226, "HOST_DO0", EINT_NA, DRV_GRP3),
+	MT7623_PIN(227, "HOST_DO1", EINT_NA, DRV_GRP3),
+	MT7623_PIN(228, "SLV_CS", EINT_NA, DRV_GRP3),
+	MT7623_PIN(229, "SLV_CK", EINT_NA, DRV_GRP3),
+	MT7623_PIN(230, "SLV_DI0", EINT_NA, DRV_GRP3),
+	MT7623_PIN(231, "SLV_DI1", EINT_NA, DRV_GRP3),
+	MT7623_PIN(232, "AP2DSP_INT", EINT_NA, DRV_GRP3),
+	MT7623_PIN(233, "AP2DSP_INT_CLR", EINT_NA, DRV_GRP3),
+	MT7623_PIN(234, "DSP2AP_INT", EINT_NA, DRV_GRP3),
+	MT7623_PIN(235, "DSP2AP_INT_CLR", EINT_NA, DRV_GRP3),
+	MT7623_PIN(236, "EXT_SDIO3", 122, DRV_GRP1),
+	MT7623_PIN(237, "EXT_SDIO2", 123, DRV_GRP1),
+	MT7623_PIN(238, "EXT_SDIO1", 124, DRV_GRP1),
+	MT7623_PIN(239, "EXT_SDIO0", 125, DRV_GRP1),
+	MT7623_PIN(240, "EXT_XCS", 126, DRV_GRP1),
+	MT7623_PIN(241, "EXT_SCK", 127, DRV_GRP1),
+	MT7623_PIN(242, "URTS2", 128, DRV_GRP1),
+	MT7623_PIN(243, "UCTS2", 129, DRV_GRP1),
+	MT7623_PIN(244, "HDMI_SDA_RX", 130, DRV_FIXED),
+	MT7623_PIN(245, "HDMI_SCL_RX", 131, DRV_FIXED),
+	MT7623_PIN(246, "MHL_SENCE", 132, DRV_FIXED),
+	MT7623_PIN(247, "HDMI_HPD_CBUS_RX", 69, DRV_FIXED),
+	MT7623_PIN(248, "HDMI_TESTOUTP_RX", 133, DRV_GRP1),
+	MT7623_PIN(249, "MSDC0E_RSTB", 134, DRV_GRP4),
+	MT7623_PIN(250, "MSDC0E_DAT7", 135, DRV_GRP4),
+	MT7623_PIN(251, "MSDC0E_DAT6", 136, DRV_GRP4),
+	MT7623_PIN(252, "MSDC0E_DAT5", 137, DRV_GRP4),
+	MT7623_PIN(253, "MSDC0E_DAT4", 138, DRV_GRP4),
+	MT7623_PIN(254, "MSDC0E_DAT3", 139, DRV_GRP4),
+	MT7623_PIN(255, "MSDC0E_DAT2", 140, DRV_GRP4),
+	MT7623_PIN(256, "MSDC0E_DAT1", 141, DRV_GRP4),
+	MT7623_PIN(257, "MSDC0E_DAT0", 142, DRV_GRP4),
+	MT7623_PIN(258, "MSDC0E_CMD", 143, DRV_GRP4),
+	MT7623_PIN(259, "MSDC0E_CLK", 144, DRV_GRP4),
+	MT7623_PIN(260, "MSDC0E_DSL", 145, DRV_GRP4),
+	MT7623_PIN(261, "MSDC1_INS", 146, DRV_GRP4),
+	MT7623_PIN(262, "G2_TXEN", 8, DRV_GRP1),
+	MT7623_PIN(263, "G2_TXD3", 9, DRV_GRP1),
+	MT7623_PIN(264, "G2_TXD2", 10, DRV_GRP1),
+	MT7623_PIN(265, "G2_TXD1", 11, DRV_GRP1),
+	MT7623_PIN(266, "G2_TXD0", EINT_NA, DRV_GRP1),
+	MT7623_PIN(267, "G2_TXC", EINT_NA, DRV_GRP1),
+	MT7623_PIN(268, "G2_RXC", EINT_NA, DRV_GRP1),
+	MT7623_PIN(269, "G2_RXD0", EINT_NA, DRV_GRP1),
+	MT7623_PIN(270, "G2_RXD1", EINT_NA, DRV_GRP1),
+	MT7623_PIN(271, "G2_RXD2", EINT_NA, DRV_GRP1),
+	MT7623_PIN(272, "G2_RXD3", EINT_NA, DRV_GRP1),
+	MT7623_PIN(273, "ESW_INT", 168, DRV_GRP1),
+	MT7623_PIN(274, "G2_RXDV", EINT_NA, DRV_GRP1),
+	MT7623_PIN(275, "MDC", EINT_NA, DRV_GRP1),
+	MT7623_PIN(276, "MDIO", EINT_NA, DRV_GRP1),
+	MT7623_PIN(277, "ESW_RST", EINT_NA, DRV_GRP1),
+	MT7623_PIN(278, "JTAG_RESET", 147, DRV_GRP3),
+	MT7623_PIN(279, "USB3_RES_BOND", EINT_NA, DRV_GRP1),
+};
+
+/* List all groups consisting of these pins dedicated to the enablement of
+ * certain hardware block and the corresponding mode for all of the pins.
+ * The hardware probably has multiple combinations of these pinouts.
+ */
+
+/* AUDIO EXT CLK */
+static int mt7623_aud_ext_clk0_pins[] = { 208, };
+static int mt7623_aud_ext_clk0_funcs[] = { 1, };
+static int mt7623_aud_ext_clk1_pins[] = { 209, };
+static int mt7623_aud_ext_clk1_funcs[] = { 1, };
+
+/* DISP PWM */
+static int mt7623_disp_pwm_0_pins[] = { 72, };
+static int mt7623_disp_pwm_0_funcs[] = { 5, };
+static int mt7623_disp_pwm_1_pins[] = { 203, };
+static int mt7623_disp_pwm_1_funcs[] = { 2, };
+static int mt7623_disp_pwm_2_pins[] = { 208, };
+static int mt7623_disp_pwm_2_funcs[] = { 5, };
+
+/* ESW */
+static int mt7623_esw_int_pins[] = { 273, };
+static int mt7623_esw_int_funcs[] = { 1, };
+static int mt7623_esw_rst_pins[] = { 277, };
+static int mt7623_esw_rst_funcs[] = { 1, };
+
+/* EPHY */
+static int mt7623_ephy_pins[] = { 262, 263, 264, 265, 266, 267, 268,
+				  269, 270, 271, 272, 274, };
+static int mt7623_ephy_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, };
+
+/* EXT_SDIO */
+static int mt7623_ext_sdio_pins[] = { 236, 237, 238, 239, 240, 241, };
+static int mt7623_ext_sdio_funcs[] = { 1, 1, 1, 1, 1, 1, };
+
+/* HDMI RX */
+static int mt7623_hdmi_rx_pins[] = { 247, 248, };
+static int mt7623_hdmi_rx_funcs[] = { 1, 1 };
+static int mt7623_hdmi_rx_i2c_pins[] = { 244, 245, };
+static int mt7623_hdmi_rx_i2c_funcs[] = { 1, 1 };
+
+/* HDMI TX */
+static int mt7623_hdmi_cec_pins[] = { 122, };
+static int mt7623_hdmi_cec_funcs[] = { 1, };
+static int mt7623_hdmi_htplg_pins[] = { 123, };
+static int mt7623_hdmi_htplg_funcs[] = { 1, };
+static int mt7623_hdmi_i2c_pins[] = { 124, 125, };
+static int mt7623_hdmi_i2c_funcs[] = { 1, 1 };
+
+/* I2C */
+static int mt7623_i2c0_pins[] = { 75, 76, };
+static int mt7623_i2c0_funcs[] = { 1, 1, };
+static int mt7623_i2c1_0_pins[] = { 57, 58, };
+static int mt7623_i2c1_0_funcs[] = { 1, 1, };
+static int mt7623_i2c1_1_pins[] = { 242, 243, };
+static int mt7623_i2c1_1_funcs[] = { 4, 4, };
+static int mt7623_i2c1_2_pins[] = { 85, 86, };
+static int mt7623_i2c1_2_funcs[] = { 3, 3, };
+static int mt7623_i2c1_3_pins[] = { 105, 106, };
+static int mt7623_i2c1_3_funcs[] = { 3, 3, };
+static int mt7623_i2c1_4_pins[] = { 124, 125, };
+static int mt7623_i2c1_4_funcs[] = { 4, 4, };
+static int mt7623_i2c2_0_pins[] = { 77, 78, };
+static int mt7623_i2c2_0_funcs[] = { 1, 1, };
+static int mt7623_i2c2_1_pins[] = { 89, 90, };
+static int mt7623_i2c2_1_funcs[] = { 3, 3, };
+static int mt7623_i2c2_2_pins[] = { 109, 110, };
+static int mt7623_i2c2_2_funcs[] = { 3, 3, };
+static int mt7623_i2c2_3_pins[] = { 122, 123, };
+static int mt7623_i2c2_3_funcs[] = { 4, 4, };
+
+/* I2S */
+static int mt7623_i2s0_pins[] = { 49, 72, 73, 74, 126, };
+static int mt7623_i2s0_funcs[] = { 1, 1, 1, 1, 1, };
+static int mt7623_i2s1_pins[] = { 33, 34, 35, 36, 37, };
+static int mt7623_i2s1_funcs[] = { 1, 1, 1, 1, 1, };
+static int mt7623_i2s2_bclk_lrclk_mclk_pins[] = { 50, 52, 188, };
+static int mt7623_i2s2_bclk_lrclk_mclk_funcs[] = { 1, 1, 1, };
+static int mt7623_i2s2_data_in_pins[] = { 51, };
+static int mt7623_i2s2_data_in_funcs[] = { 1, };
+static int mt7623_i2s2_data_0_pins[] = { 203, };
+static int mt7623_i2s2_data_0_funcs[] = { 9, };
+static int mt7623_i2s2_data_1_pins[] = { 38,  };
+static int mt7623_i2s2_data_1_funcs[] = { 4, };
+static int mt7623_i2s3_bclk_lrclk_mclk_pins[] = { 191, 192, 193, };
+static int mt7623_i2s3_bclk_lrclk_mclk_funcs[] = { 1, 1, 1, };
+static int mt7623_i2s3_data_in_pins[] = { 190, };
+static int mt7623_i2s3_data_in_funcs[] = { 1, };
+static int mt7623_i2s3_data_0_pins[] = { 204, };
+static int mt7623_i2s3_data_0_funcs[] = { 9, };
+static int mt7623_i2s3_data_1_pins[] = { 2, };
+static int mt7623_i2s3_data_1_funcs[] = { 0, };
+static int mt7623_i2s4_pins[] = { 194, 195, 196, 197, 198, };
+static int mt7623_i2s4_funcs[] = { 1, 1, 1, 1, 1, };
+static int mt7623_i2s5_pins[] = { 16, 17, 30, 31, 32, };
+static int mt7623_i2s5_funcs[] = { 1, 1, 1, 1, 1, };
+
+/* IR */
+static int mt7623_ir_pins[] = { 46, };
+static int mt7623_ir_funcs[] = { 1, };
+
+/* LCD */
+static int mt7623_mipi_tx_pins[] = { 91, 92, 93, 94, 95, 96, 97, 98,
+				     99, 100, };
+static int mt7623_mipi_tx_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, };
+static int mt7623_dsi_te_pins[] = { 84, };
+static int mt7623_dsi_te_funcs[] = { 1, };
+static int mt7623_lcm_rst_pins[] = { 83, };
+static int mt7623_lcm_rst_funcs[] = { 1, };
+
+/* MDC/MDIO */
+static int mt7623_mdc_mdio_pins[] = { 275, 276, };
+static int mt7623_mdc_mdio_funcs[] = { 1, 1, };
+
+/* MSDC */
+static int mt7623_msdc0_pins[] = { 111, 112, 113, 114, 115, 116, 117, 118,
+				   119, 120, 121, };
+static int mt7623_msdc0_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, };
+static int mt7623_msdc1_pins[] = { 105, 106, 107, 108, 109, 110, };
+static int mt7623_msdc1_funcs[] = { 1, 1, 1, 1, 1, 1, };
+static int mt7623_msdc1_ins_pins[] = { 261, };
+static int mt7623_msdc1_ins_funcs[] = { 1, };
+static int mt7623_msdc1_wp_0_pins[] = { 29, };
+static int mt7623_msdc1_wp_0_funcs[] = { 1, };
+static int mt7623_msdc1_wp_1_pins[] = { 55, };
+static int mt7623_msdc1_wp_1_funcs[] = { 3, };
+static int mt7623_msdc1_wp_2_pins[] = { 209, };
+static int mt7623_msdc1_wp_2_funcs[] = { 2, };
+static int mt7623_msdc2_pins[] = { 85, 86, 87, 88, 89, 90, };
+static int mt7623_msdc2_funcs[] = { 1, 1, 1, 1, 1, 1, };
+static int mt7623_msdc3_pins[] = { 249, 250, 251, 252, 253, 254, 255, 256,
+				   257, 258, 259, 260, };
+static int mt7623_msdc3_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, };
+
+/* NAND */
+static int mt7623_nandc_pins[] = { 43, 47, 48, 111, 112, 113, 114, 115,
+				   116, 117, 118, 119, 120, 121, };
+static int mt7623_nandc_funcs[] = { 1, 1, 1, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+				   4, 4, };
+static int mt7623_nandc_ceb0_pins[] = { 45, };
+static int mt7623_nandc_ceb0_funcs[] = { 1, };
+static int mt7623_nandc_ceb1_pins[] = { 44, };
+static int mt7623_nandc_ceb1_funcs[] = { 1, };
+
+/* RTC */
+static int mt7623_rtc_pins[] = { 10, };
+static int mt7623_rtc_funcs[] = { 1, };
+
+/* OTG */
+static int mt7623_otg_iddig0_0_pins[] = { 29, };
+static int mt7623_otg_iddig0_0_funcs[] = { 1, };
+static int mt7623_otg_iddig0_1_pins[] = { 44, };
+static int mt7623_otg_iddig0_1_funcs[] = { 2, };
+static int mt7623_otg_iddig0_2_pins[] = { 236, };
+static int mt7623_otg_iddig0_2_funcs[] = { 2, };
+static int mt7623_otg_iddig1_0_pins[] = { 27, };
+static int mt7623_otg_iddig1_0_funcs[] = { 2, };
+static int mt7623_otg_iddig1_1_pins[] = { 47, };
+static int mt7623_otg_iddig1_1_funcs[] = { 2, };
+static int mt7623_otg_iddig1_2_pins[] = { 238, };
+static int mt7623_otg_iddig1_2_funcs[] = { 2, };
+static int mt7623_otg_drv_vbus0_0_pins[] = { 28, };
+static int mt7623_otg_drv_vbus0_0_funcs[] = { 1, };
+static int mt7623_otg_drv_vbus0_1_pins[] = { 45, };
+static int mt7623_otg_drv_vbus0_1_funcs[] = { 2, };
+static int mt7623_otg_drv_vbus0_2_pins[] = { 237, };
+static int mt7623_otg_drv_vbus0_2_funcs[] = { 2, };
+static int mt7623_otg_drv_vbus1_0_pins[] = { 26, };
+static int mt7623_otg_drv_vbus1_0_funcs[] = { 2, };
+static int mt7623_otg_drv_vbus1_1_pins[] = { 48, };
+static int mt7623_otg_drv_vbus1_1_funcs[] = { 2, };
+static int mt7623_otg_drv_vbus1_2_pins[] = { 239, };
+static int mt7623_otg_drv_vbus1_2_funcs[] = { 2, };
+
+/* PCIE */
+static int mt7623_pcie0_0_perst_pins[] = { 208, };
+static int mt7623_pcie0_0_perst_funcs[] = { 3, };
+static int mt7623_pcie0_1_perst_pins[] = { 22, };
+static int mt7623_pcie0_1_perst_funcs[] = { 2, };
+static int mt7623_pcie1_0_perst_pins[] = { 209, };
+static int mt7623_pcie1_0_perst_funcs[] = { 3, };
+static int mt7623_pcie1_1_perst_pins[] = { 23, };
+static int mt7623_pcie1_1_perst_funcs[] = { 2, };
+static int mt7623_pcie2_0_perst_pins[] = { 24, };
+static int mt7623_pcie2_0_perst_funcs[] = { 2, };
+static int mt7623_pcie2_1_perst_pins[] = { 29, };
+static int mt7623_pcie2_1_perst_funcs[] = { 6, };
+static int mt7623_pcie0_0_wake_pins[] = { 28, };
+static int mt7623_pcie0_0_wake_funcs[] = { 6, };
+static int mt7623_pcie0_1_wake_pins[] = { 251, };
+static int mt7623_pcie0_1_wake_funcs[] = { 6, };
+static int mt7623_pcie1_0_wake_pins[] = { 27, };
+static int mt7623_pcie1_0_wake_funcs[] = { 6, };
+static int mt7623_pcie1_1_wake_pins[] = { 253, };
+static int mt7623_pcie1_1_wake_funcs[] = { 6, };
+static int mt7623_pcie2_0_wake_pins[] = { 26, };
+static int mt7623_pcie2_0_wake_funcs[] = { 6, };
+static int mt7623_pcie2_1_wake_pins[] = { 255, };
+static int mt7623_pcie2_1_wake_funcs[] = { 6, };
+static int mt7623_pcie0_clkreq_pins[] = { 250, };
+static int mt7623_pcie0_clkreq_funcs[] = { 6, };
+static int mt7623_pcie1_clkreq_pins[] = { 252, };
+static int mt7623_pcie1_clkreq_funcs[] = { 6, };
+static int mt7623_pcie2_clkreq_pins[] = { 254, };
+static int mt7623_pcie2_clkreq_funcs[] = { 6, };
+
+/* the pcie_*_rev are only used for MT7623 */
+static int mt7623_pcie0_0_rev_perst_pins[] = { 208, };
+static int mt7623_pcie0_0_rev_perst_funcs[] = { 11, };
+static int mt7623_pcie0_1_rev_perst_pins[] = { 22, };
+static int mt7623_pcie0_1_rev_perst_funcs[] = { 10, };
+static int mt7623_pcie1_0_rev_perst_pins[] = { 209, };
+static int mt7623_pcie1_0_rev_perst_funcs[] = { 11, };
+static int mt7623_pcie1_1_rev_perst_pins[] = { 23, };
+static int mt7623_pcie1_1_rev_perst_funcs[] = { 10, };
+static int mt7623_pcie2_0_rev_perst_pins[] = { 24, };
+static int mt7623_pcie2_0_rev_perst_funcs[] = { 11, };
+static int mt7623_pcie2_1_rev_perst_pins[] = { 29, };
+static int mt7623_pcie2_1_rev_perst_funcs[] = { 14, };
+
+/* PCM */
+static int mt7623_pcm_clk_0_pins[] = { 18, };
+static int mt7623_pcm_clk_0_funcs[] = { 1, };
+static int mt7623_pcm_clk_1_pins[] = { 17, };
+static int mt7623_pcm_clk_1_funcs[] = { 3, };
+static int mt7623_pcm_clk_2_pins[] = { 35, };
+static int mt7623_pcm_clk_2_funcs[] = { 3, };
+static int mt7623_pcm_clk_3_pins[] = { 50, };
+static int mt7623_pcm_clk_3_funcs[] = { 3, };
+static int mt7623_pcm_clk_4_pins[] = { 74, };
+static int mt7623_pcm_clk_4_funcs[] = { 3, };
+static int mt7623_pcm_clk_5_pins[] = { 191, };
+static int mt7623_pcm_clk_5_funcs[] = { 3, };
+static int mt7623_pcm_clk_6_pins[] = { 196, };
+static int mt7623_pcm_clk_6_funcs[] = { 3, };
+static int mt7623_pcm_sync_0_pins[] = { 19, };
+static int mt7623_pcm_sync_0_funcs[] = { 1, };
+static int mt7623_pcm_sync_1_pins[] = { 30, };
+static int mt7623_pcm_sync_1_funcs[] = { 3, };
+static int mt7623_pcm_sync_2_pins[] = { 36, };
+static int mt7623_pcm_sync_2_funcs[] = { 3, };
+static int mt7623_pcm_sync_3_pins[] = { 52, };
+static int mt7623_pcm_sync_3_funcs[] = { 31, };
+static int mt7623_pcm_sync_4_pins[] = { 73, };
+static int mt7623_pcm_sync_4_funcs[] = { 3, };
+static int mt7623_pcm_sync_5_pins[] = { 192, };
+static int mt7623_pcm_sync_5_funcs[] = { 3, };
+static int mt7623_pcm_sync_6_pins[] = { 197, };
+static int mt7623_pcm_sync_6_funcs[] = { 3, };
+static int mt7623_pcm_rx_0_pins[] = { 20, };
+static int mt7623_pcm_rx_0_funcs[] = { 1, };
+static int mt7623_pcm_rx_1_pins[] = { 16, };
+static int mt7623_pcm_rx_1_funcs[] = { 3, };
+static int mt7623_pcm_rx_2_pins[] = { 34, };
+static int mt7623_pcm_rx_2_funcs[] = { 3, };
+static int mt7623_pcm_rx_3_pins[] = { 51, };
+static int mt7623_pcm_rx_3_funcs[] = { 3, };
+static int mt7623_pcm_rx_4_pins[] = { 72, };
+static int mt7623_pcm_rx_4_funcs[] = { 3, };
+static int mt7623_pcm_rx_5_pins[] = { 190, };
+static int mt7623_pcm_rx_5_funcs[] = { 3, };
+static int mt7623_pcm_rx_6_pins[] = { 195, };
+static int mt7623_pcm_rx_6_funcs[] = { 3, };
+static int mt7623_pcm_tx_0_pins[] = { 21, };
+static int mt7623_pcm_tx_0_funcs[] = { 1, };
+static int mt7623_pcm_tx_1_pins[] = { 32, };
+static int mt7623_pcm_tx_1_funcs[] = { 3, };
+static int mt7623_pcm_tx_2_pins[] = { 33, };
+static int mt7623_pcm_tx_2_funcs[] = { 3, };
+static int mt7623_pcm_tx_3_pins[] = { 38, };
+static int mt7623_pcm_tx_3_funcs[] = { 3, };
+static int mt7623_pcm_tx_4_pins[] = { 49, };
+static int mt7623_pcm_tx_4_funcs[] = { 3, };
+static int mt7623_pcm_tx_5_pins[] = { 189, };
+static int mt7623_pcm_tx_5_funcs[] = { 3, };
+static int mt7623_pcm_tx_6_pins[] = { 194, };
+static int mt7623_pcm_tx_6_funcs[] = { 3, };
+
+/* PWM */
+static int mt7623_pwm_ch1_0_pins[] = { 203, };
+static int mt7623_pwm_ch1_0_funcs[] = { 1, };
+static int mt7623_pwm_ch1_1_pins[] = { 208, };
+static int mt7623_pwm_ch1_1_funcs[] = { 2, };
+static int mt7623_pwm_ch1_2_pins[] = { 72, };
+static int mt7623_pwm_ch1_2_funcs[] = { 4, };
+static int mt7623_pwm_ch1_3_pins[] = { 88, };
+static int mt7623_pwm_ch1_3_funcs[] = { 3, };
+static int mt7623_pwm_ch1_4_pins[] = { 108, };
+static int mt7623_pwm_ch1_4_funcs[] = { 3, };
+static int mt7623_pwm_ch2_0_pins[] = { 204, };
+static int mt7623_pwm_ch2_0_funcs[] = { 1, };
+static int mt7623_pwm_ch2_1_pins[] = { 53, };
+static int mt7623_pwm_ch2_1_funcs[] = { 5, };
+static int mt7623_pwm_ch2_2_pins[] = { 88, };
+static int mt7623_pwm_ch2_2_funcs[] = { 6, };
+static int mt7623_pwm_ch2_3_pins[] = { 108, };
+static int mt7623_pwm_ch2_3_funcs[] = { 6, };
+static int mt7623_pwm_ch2_4_pins[] = { 209, };
+static int mt7623_pwm_ch2_4_funcs[] = { 5, };
+static int mt7623_pwm_ch3_0_pins[] = { 205, };
+static int mt7623_pwm_ch3_0_funcs[] = { 1, };
+static int mt7623_pwm_ch3_1_pins[] = { 55, };
+static int mt7623_pwm_ch3_1_funcs[] = { 5, };
+static int mt7623_pwm_ch3_2_pins[] = { 89, };
+static int mt7623_pwm_ch3_2_funcs[] = { 6, };
+static int mt7623_pwm_ch3_3_pins[] = { 109, };
+static int mt7623_pwm_ch3_3_funcs[] = { 6, };
+static int mt7623_pwm_ch4_0_pins[] = { 206, };
+static int mt7623_pwm_ch4_0_funcs[] = { 1, };
+static int mt7623_pwm_ch4_1_pins[] = { 90, };
+static int mt7623_pwm_ch4_1_funcs[] = { 6, };
+static int mt7623_pwm_ch4_2_pins[] = { 110, };
+static int mt7623_pwm_ch4_2_funcs[] = { 6, };
+static int mt7623_pwm_ch4_3_pins[] = { 124, };
+static int mt7623_pwm_ch4_3_funcs[] = { 5, };
+static int mt7623_pwm_ch5_0_pins[] = { 207, };
+static int mt7623_pwm_ch5_0_funcs[] = { 1, };
+static int mt7623_pwm_ch5_1_pins[] = { 125, };
+static int mt7623_pwm_ch5_1_funcs[] = { 5, };
+
+/* PWRAP */
+static int mt7623_pwrap_pins[] = { 0, 1, 2, 3, 4, 5, 6, };
+static int mt7623_pwrap_funcs[] = { 1, 1, 1, 1, 1, 1, 1, };
+
+/* SPDIF */
+static int mt7623_spdif_in0_0_pins[] = { 56, };
+static int mt7623_spdif_in0_0_funcs[] = { 3, };
+static int mt7623_spdif_in0_1_pins[] = { 201, };
+static int mt7623_spdif_in0_1_funcs[] = { 1, };
+static int mt7623_spdif_in1_0_pins[] = { 54, };
+static int mt7623_spdif_in1_0_funcs[] = { 3, };
+static int mt7623_spdif_in1_1_pins[] = { 202, };
+static int mt7623_spdif_in1_1_funcs[] = { 1, };
+static int mt7623_spdif_out_pins[] = { 202, };
+static int mt7623_spdif_out_funcs[] = { 1, };
+
+/* SPI */
+static int mt7623_spi0_pins[] = { 53, 54, 55, 56, };
+static int mt7623_spi0_funcs[] = { 1, 1, 1, 1, };
+static int mt7623_spi1_pins[] = { 7, 199, 8, 9, };
+static int mt7623_spi1_funcs[] = { 1, 1, 1, 1, };
+static int mt7623_spi2_pins[] = { 101, 104, 102, 103, };
+static int mt7623_spi2_funcs[] = { 1, 1, 1, 1, };
+
+/* UART */
+static int mt7623_uart0_0_txd_rxd_pins[] = { 79, 80, };
+static int mt7623_uart0_0_txd_rxd_funcs[] = { 1, 1, };
+static int mt7623_uart0_1_txd_rxd_pins[] = { 87, 88, };
+static int mt7623_uart0_1_txd_rxd_funcs[] = { 5, 5, };
+static int mt7623_uart0_2_txd_rxd_pins[] = { 107, 108, };
+static int mt7623_uart0_2_txd_rxd_funcs[] = { 5, 5, };
+static int mt7623_uart0_3_txd_rxd_pins[] = { 123, 122, };
+static int mt7623_uart0_3_txd_rxd_funcs[] = { 5, 5, };
+static int mt7623_uart0_rts_cts_pins[] = { 22, 23, };
+static int mt7623_uart0_rts_cts_funcs[] = { 1, 1, };
+static int mt7623_uart1_0_txd_rxd_pins[] = { 81, 82, };
+static int mt7623_uart1_0_txd_rxd_funcs[] = { 1, 1, };
+static int mt7623_uart1_1_txd_rxd_pins[] = { 89, 90, };
+static int mt7623_uart1_1_txd_rxd_funcs[] = { 5, 5, };
+static int mt7623_uart1_2_txd_rxd_pins[] = { 109, 110, };
+static int mt7623_uart1_2_txd_rxd_funcs[] = { 5, 5, };
+static int mt7623_uart1_rts_cts_pins[] = { 24, 25, };
+static int mt7623_uart1_rts_cts_funcs[] = { 1, 1, };
+static int mt7623_uart2_0_txd_rxd_pins[] = { 14, 15, };
+static int mt7623_uart2_0_txd_rxd_funcs[] = { 1, 1, };
+static int mt7623_uart2_1_txd_rxd_pins[] = { 200, 201, };
+static int mt7623_uart2_1_txd_rxd_funcs[] = { 6, 6, };
+static int mt7623_uart2_rts_cts_pins[] = { 242, 243, };
+static int mt7623_uart2_rts_cts_funcs[] = { 1, 1, };
+static int mt7623_uart3_txd_rxd_pins[] = { 242, 243, };
+static int mt7623_uart3_txd_rxd_funcs[] = { 2, 2, };
+static int mt7623_uart3_rts_cts_pins[] = { 26, 27, };
+static int mt7623_uart3_rts_cts_funcs[] = { 1, 1, };
+
+/* Watchdog */
+static int mt7623_watchdog_0_pins[] = { 11, };
+static int mt7623_watchdog_0_funcs[] = { 1, };
+static int mt7623_watchdog_1_pins[] = { 121, };
+static int mt7623_watchdog_1_funcs[] = { 5, };
+
+static const struct group_desc mt7623_groups[] = {
+	PINCTRL_PIN_GROUP("aud_ext_clk0", mt7623_aud_ext_clk0),
+	PINCTRL_PIN_GROUP("aud_ext_clk1", mt7623_aud_ext_clk1),
+	PINCTRL_PIN_GROUP("dsi_te", mt7623_dsi_te),
+	PINCTRL_PIN_GROUP("disp_pwm_0", mt7623_disp_pwm_0),
+	PINCTRL_PIN_GROUP("disp_pwm_1", mt7623_disp_pwm_1),
+	PINCTRL_PIN_GROUP("disp_pwm_2", mt7623_disp_pwm_2),
+	PINCTRL_PIN_GROUP("ephy", mt7623_ephy),
+	PINCTRL_PIN_GROUP("esw_int", mt7623_esw_int),
+	PINCTRL_PIN_GROUP("esw_rst", mt7623_esw_rst),
+	PINCTRL_PIN_GROUP("ext_sdio", mt7623_ext_sdio),
+	PINCTRL_PIN_GROUP("hdmi_cec", mt7623_hdmi_cec),
+	PINCTRL_PIN_GROUP("hdmi_htplg", mt7623_hdmi_htplg),
+	PINCTRL_PIN_GROUP("hdmi_i2c", mt7623_hdmi_i2c),
+	PINCTRL_PIN_GROUP("hdmi_rx", mt7623_hdmi_rx),
+	PINCTRL_PIN_GROUP("hdmi_rx_i2c", mt7623_hdmi_rx_i2c),
+	PINCTRL_PIN_GROUP("i2c0", mt7623_i2c0),
+	PINCTRL_PIN_GROUP("i2c1_0", mt7623_i2c1_0),
+	PINCTRL_PIN_GROUP("i2c1_1", mt7623_i2c1_1),
+	PINCTRL_PIN_GROUP("i2c1_2", mt7623_i2c1_2),
+	PINCTRL_PIN_GROUP("i2c1_3", mt7623_i2c1_3),
+	PINCTRL_PIN_GROUP("i2c1_4", mt7623_i2c1_4),
+	PINCTRL_PIN_GROUP("i2c2_0", mt7623_i2c2_0),
+	PINCTRL_PIN_GROUP("i2c2_1", mt7623_i2c2_1),
+	PINCTRL_PIN_GROUP("i2c2_2", mt7623_i2c2_2),
+	PINCTRL_PIN_GROUP("i2c2_3", mt7623_i2c2_3),
+	PINCTRL_PIN_GROUP("i2s0", mt7623_i2s0),
+	PINCTRL_PIN_GROUP("i2s1", mt7623_i2s1),
+	PINCTRL_PIN_GROUP("i2s4", mt7623_i2s4),
+	PINCTRL_PIN_GROUP("i2s5", mt7623_i2s5),
+	PINCTRL_PIN_GROUP("i2s2_bclk_lrclk_mclk", mt7623_i2s2_bclk_lrclk_mclk),
+	PINCTRL_PIN_GROUP("i2s3_bclk_lrclk_mclk", mt7623_i2s3_bclk_lrclk_mclk),
+	PINCTRL_PIN_GROUP("i2s2_data_in", mt7623_i2s2_data_in),
+	PINCTRL_PIN_GROUP("i2s3_data_in", mt7623_i2s3_data_in),
+	PINCTRL_PIN_GROUP("i2s2_data_0", mt7623_i2s2_data_0),
+	PINCTRL_PIN_GROUP("i2s2_data_1", mt7623_i2s2_data_1),
+	PINCTRL_PIN_GROUP("i2s3_data_0", mt7623_i2s3_data_0),
+	PINCTRL_PIN_GROUP("i2s3_data_1", mt7623_i2s3_data_1),
+	PINCTRL_PIN_GROUP("ir", mt7623_ir),
+	PINCTRL_PIN_GROUP("lcm_rst", mt7623_lcm_rst),
+	PINCTRL_PIN_GROUP("mdc_mdio", mt7623_mdc_mdio),
+	PINCTRL_PIN_GROUP("mipi_tx", mt7623_mipi_tx),
+	PINCTRL_PIN_GROUP("msdc0", mt7623_msdc0),
+	PINCTRL_PIN_GROUP("msdc1", mt7623_msdc1),
+	PINCTRL_PIN_GROUP("msdc1_ins", mt7623_msdc1_ins),
+	PINCTRL_PIN_GROUP("msdc1_wp_0", mt7623_msdc1_wp_0),
+	PINCTRL_PIN_GROUP("msdc1_wp_1", mt7623_msdc1_wp_1),
+	PINCTRL_PIN_GROUP("msdc1_wp_2", mt7623_msdc1_wp_2),
+	PINCTRL_PIN_GROUP("msdc2", mt7623_msdc2),
+	PINCTRL_PIN_GROUP("msdc3", mt7623_msdc3),
+	PINCTRL_PIN_GROUP("nandc", mt7623_nandc),
+	PINCTRL_PIN_GROUP("nandc_ceb0", mt7623_nandc_ceb0),
+	PINCTRL_PIN_GROUP("nandc_ceb1", mt7623_nandc_ceb1),
+	PINCTRL_PIN_GROUP("otg_iddig0_0", mt7623_otg_iddig0_0),
+	PINCTRL_PIN_GROUP("otg_iddig0_1", mt7623_otg_iddig0_1),
+	PINCTRL_PIN_GROUP("otg_iddig0_2", mt7623_otg_iddig0_2),
+	PINCTRL_PIN_GROUP("otg_iddig1_0", mt7623_otg_iddig1_0),
+	PINCTRL_PIN_GROUP("otg_iddig1_1", mt7623_otg_iddig1_1),
+	PINCTRL_PIN_GROUP("otg_iddig1_2", mt7623_otg_iddig1_2),
+	PINCTRL_PIN_GROUP("otg_drv_vbus0_0", mt7623_otg_drv_vbus0_0),
+	PINCTRL_PIN_GROUP("otg_drv_vbus0_1", mt7623_otg_drv_vbus0_1),
+	PINCTRL_PIN_GROUP("otg_drv_vbus0_2", mt7623_otg_drv_vbus0_2),
+	PINCTRL_PIN_GROUP("otg_drv_vbus1_0", mt7623_otg_drv_vbus1_0),
+	PINCTRL_PIN_GROUP("otg_drv_vbus1_1", mt7623_otg_drv_vbus1_1),
+	PINCTRL_PIN_GROUP("otg_drv_vbus1_2", mt7623_otg_drv_vbus1_2),
+	PINCTRL_PIN_GROUP("pcie0_0_perst", mt7623_pcie0_0_perst),
+	PINCTRL_PIN_GROUP("pcie0_1_perst", mt7623_pcie0_1_perst),
+	PINCTRL_PIN_GROUP("pcie1_0_perst", mt7623_pcie1_0_perst),
+	PINCTRL_PIN_GROUP("pcie1_1_perst", mt7623_pcie1_1_perst),
+	PINCTRL_PIN_GROUP("pcie1_1_perst", mt7623_pcie1_1_perst),
+	PINCTRL_PIN_GROUP("pcie0_0_rev_perst", mt7623_pcie0_0_rev_perst),
+	PINCTRL_PIN_GROUP("pcie0_1_rev_perst", mt7623_pcie0_1_rev_perst),
+	PINCTRL_PIN_GROUP("pcie1_0_rev_perst", mt7623_pcie1_0_rev_perst),
+	PINCTRL_PIN_GROUP("pcie1_1_rev_perst", mt7623_pcie1_1_rev_perst),
+	PINCTRL_PIN_GROUP("pcie2_0_rev_perst", mt7623_pcie2_0_rev_perst),
+	PINCTRL_PIN_GROUP("pcie2_1_rev_perst", mt7623_pcie2_1_rev_perst),
+	PINCTRL_PIN_GROUP("pcie2_0_perst", mt7623_pcie2_0_perst),
+	PINCTRL_PIN_GROUP("pcie2_1_perst", mt7623_pcie2_1_perst),
+	PINCTRL_PIN_GROUP("pcie0_0_wake", mt7623_pcie0_0_wake),
+	PINCTRL_PIN_GROUP("pcie0_1_wake", mt7623_pcie0_1_wake),
+	PINCTRL_PIN_GROUP("pcie1_0_wake", mt7623_pcie1_0_wake),
+	PINCTRL_PIN_GROUP("pcie1_1_wake", mt7623_pcie1_1_wake),
+	PINCTRL_PIN_GROUP("pcie2_0_wake", mt7623_pcie2_0_wake),
+	PINCTRL_PIN_GROUP("pcie2_1_wake", mt7623_pcie2_1_wake),
+	PINCTRL_PIN_GROUP("pcie0_clkreq", mt7623_pcie0_clkreq),
+	PINCTRL_PIN_GROUP("pcie1_clkreq", mt7623_pcie1_clkreq),
+	PINCTRL_PIN_GROUP("pcie2_clkreq", mt7623_pcie2_clkreq),
+	PINCTRL_PIN_GROUP("pcm_clk_0", mt7623_pcm_clk_0),
+	PINCTRL_PIN_GROUP("pcm_clk_1", mt7623_pcm_clk_1),
+	PINCTRL_PIN_GROUP("pcm_clk_2", mt7623_pcm_clk_2),
+	PINCTRL_PIN_GROUP("pcm_clk_3", mt7623_pcm_clk_3),
+	PINCTRL_PIN_GROUP("pcm_clk_4", mt7623_pcm_clk_4),
+	PINCTRL_PIN_GROUP("pcm_clk_5", mt7623_pcm_clk_5),
+	PINCTRL_PIN_GROUP("pcm_clk_6", mt7623_pcm_clk_6),
+	PINCTRL_PIN_GROUP("pcm_sync_0", mt7623_pcm_sync_0),
+	PINCTRL_PIN_GROUP("pcm_sync_1", mt7623_pcm_sync_1),
+	PINCTRL_PIN_GROUP("pcm_sync_2", mt7623_pcm_sync_2),
+	PINCTRL_PIN_GROUP("pcm_sync_3", mt7623_pcm_sync_3),
+	PINCTRL_PIN_GROUP("pcm_sync_4", mt7623_pcm_sync_4),
+	PINCTRL_PIN_GROUP("pcm_sync_5", mt7623_pcm_sync_5),
+	PINCTRL_PIN_GROUP("pcm_sync_6", mt7623_pcm_sync_6),
+	PINCTRL_PIN_GROUP("pcm_rx_0", mt7623_pcm_rx_0),
+	PINCTRL_PIN_GROUP("pcm_rx_1", mt7623_pcm_rx_1),
+	PINCTRL_PIN_GROUP("pcm_rx_2", mt7623_pcm_rx_2),
+	PINCTRL_PIN_GROUP("pcm_rx_3", mt7623_pcm_rx_3),
+	PINCTRL_PIN_GROUP("pcm_rx_4", mt7623_pcm_rx_4),
+	PINCTRL_PIN_GROUP("pcm_rx_5", mt7623_pcm_rx_5),
+	PINCTRL_PIN_GROUP("pcm_rx_6", mt7623_pcm_rx_6),
+	PINCTRL_PIN_GROUP("pcm_tx_0", mt7623_pcm_tx_0),
+	PINCTRL_PIN_GROUP("pcm_tx_1", mt7623_pcm_tx_1),
+	PINCTRL_PIN_GROUP("pcm_tx_2", mt7623_pcm_tx_2),
+	PINCTRL_PIN_GROUP("pcm_tx_3", mt7623_pcm_tx_3),
+	PINCTRL_PIN_GROUP("pcm_tx_4", mt7623_pcm_tx_4),
+	PINCTRL_PIN_GROUP("pcm_tx_5", mt7623_pcm_tx_5),
+	PINCTRL_PIN_GROUP("pcm_tx_6", mt7623_pcm_tx_6),
+	PINCTRL_PIN_GROUP("pwm_ch1_0", mt7623_pwm_ch1_0),
+	PINCTRL_PIN_GROUP("pwm_ch1_1", mt7623_pwm_ch1_1),
+	PINCTRL_PIN_GROUP("pwm_ch1_2", mt7623_pwm_ch1_2),
+	PINCTRL_PIN_GROUP("pwm_ch1_3", mt7623_pwm_ch1_3),
+	PINCTRL_PIN_GROUP("pwm_ch1_4", mt7623_pwm_ch1_4),
+	PINCTRL_PIN_GROUP("pwm_ch2_0", mt7623_pwm_ch2_0),
+	PINCTRL_PIN_GROUP("pwm_ch2_1", mt7623_pwm_ch2_1),
+	PINCTRL_PIN_GROUP("pwm_ch2_2", mt7623_pwm_ch2_2),
+	PINCTRL_PIN_GROUP("pwm_ch2_3", mt7623_pwm_ch2_3),
+	PINCTRL_PIN_GROUP("pwm_ch2_4", mt7623_pwm_ch2_4),
+	PINCTRL_PIN_GROUP("pwm_ch3_0", mt7623_pwm_ch3_0),
+	PINCTRL_PIN_GROUP("pwm_ch3_1", mt7623_pwm_ch3_1),
+	PINCTRL_PIN_GROUP("pwm_ch3_2", mt7623_pwm_ch3_2),
+	PINCTRL_PIN_GROUP("pwm_ch3_3", mt7623_pwm_ch3_3),
+	PINCTRL_PIN_GROUP("pwm_ch4_0", mt7623_pwm_ch4_0),
+	PINCTRL_PIN_GROUP("pwm_ch4_1", mt7623_pwm_ch4_1),
+	PINCTRL_PIN_GROUP("pwm_ch4_2", mt7623_pwm_ch4_2),
+	PINCTRL_PIN_GROUP("pwm_ch4_3", mt7623_pwm_ch4_3),
+	PINCTRL_PIN_GROUP("pwm_ch5_0", mt7623_pwm_ch5_0),
+	PINCTRL_PIN_GROUP("pwm_ch5_1", mt7623_pwm_ch5_1),
+	PINCTRL_PIN_GROUP("pwrap", mt7623_pwrap),
+	PINCTRL_PIN_GROUP("rtc", mt7623_rtc),
+	PINCTRL_PIN_GROUP("spdif_in0_0", mt7623_spdif_in0_0),
+	PINCTRL_PIN_GROUP("spdif_in0_1", mt7623_spdif_in0_1),
+	PINCTRL_PIN_GROUP("spdif_in1_0", mt7623_spdif_in1_0),
+	PINCTRL_PIN_GROUP("spdif_in1_1", mt7623_spdif_in1_1),
+	PINCTRL_PIN_GROUP("spdif_out", mt7623_spdif_out),
+	PINCTRL_PIN_GROUP("spi0", mt7623_spi0),
+	PINCTRL_PIN_GROUP("spi1", mt7623_spi1),
+	PINCTRL_PIN_GROUP("spi2", mt7623_spi2),
+	PINCTRL_PIN_GROUP("uart0_0_txd_rxd",  mt7623_uart0_0_txd_rxd),
+	PINCTRL_PIN_GROUP("uart0_1_txd_rxd",  mt7623_uart0_1_txd_rxd),
+	PINCTRL_PIN_GROUP("uart0_2_txd_rxd",  mt7623_uart0_2_txd_rxd),
+	PINCTRL_PIN_GROUP("uart0_3_txd_rxd",  mt7623_uart0_3_txd_rxd),
+	PINCTRL_PIN_GROUP("uart1_0_txd_rxd",  mt7623_uart1_0_txd_rxd),
+	PINCTRL_PIN_GROUP("uart1_1_txd_rxd",  mt7623_uart1_1_txd_rxd),
+	PINCTRL_PIN_GROUP("uart1_2_txd_rxd",  mt7623_uart1_2_txd_rxd),
+	PINCTRL_PIN_GROUP("uart2_0_txd_rxd",  mt7623_uart2_0_txd_rxd),
+	PINCTRL_PIN_GROUP("uart2_1_txd_rxd",  mt7623_uart2_1_txd_rxd),
+	PINCTRL_PIN_GROUP("uart3_txd_rxd",  mt7623_uart3_txd_rxd),
+	PINCTRL_PIN_GROUP("uart0_rts_cts",  mt7623_uart0_rts_cts),
+	PINCTRL_PIN_GROUP("uart1_rts_cts",  mt7623_uart1_rts_cts),
+	PINCTRL_PIN_GROUP("uart2_rts_cts",  mt7623_uart2_rts_cts),
+	PINCTRL_PIN_GROUP("uart3_rts_cts",  mt7623_uart3_rts_cts),
+	PINCTRL_PIN_GROUP("watchdog_0", mt7623_watchdog_0),
+	PINCTRL_PIN_GROUP("watchdog_1", mt7623_watchdog_1),
+};
+
+/* Joint those groups owning the same capability in user point of view which
+ * allows that people tend to use through the device tree.
+ */
+static const char *mt7623_aud_clk_groups[] = { "aud_ext_clk0",
+					       "aud_ext_clk1", };
+static const char *mt7623_disp_pwm_groups[] = { "disp_pwm_0", "disp_pwm_1",
+						"disp_pwm_2", };
+static const char *mt7623_ethernet_groups[] = { "esw_int", "esw_rst",
+						"ephy", "mdc_mdio", };
+static const char *mt7623_ext_sdio_groups[] = { "ext_sdio", };
+static const char *mt7623_hdmi_groups[] = { "hdmi_cec", "hdmi_htplg",
+					    "hdmi_i2c", "hdmi_rx",
+					    "hdmi_rx_i2c", };
+static const char *mt7623_i2c_groups[] = { "i2c0", "i2c1_0", "i2c1_1",
+					   "i2c1_2", "i2c1_3", "i2c1_4",
+					   "i2c2_0", "i2c2_1", "i2c2_2",
+					   "i2c2_3", };
+static const char *mt7623_i2s_groups[] = { "i2s0", "i2s1",
+					   "i2s2_bclk_lrclk_mclk",
+					   "i2s3_bclk_lrclk_mclk",
+					   "i2s4", "i2s5",
+					   "i2s2_data_in", "i2s3_data_in",
+					   "i2s2_data_0", "i2s2_data_1",
+					   "i2s3_data_0", "i2s3_data_1", };
+static const char *mt7623_ir_groups[] = { "ir", };
+static const char *mt7623_lcd_groups[] = { "dsi_te", "lcm_rst", "mipi_tx", };
+static const char *mt7623_msdc_groups[] = { "msdc0", "msdc1", "msdc1_ins",
+					    "msdc1_wp_0", "msdc1_wp_1",
+					    "msdc1_wp_2", "msdc2",
+						"msdc3", };
+static const char *mt7623_nandc_groups[] = { "nandc", "nandc_ceb0",
+					     "nandc_ceb1", };
+static const char *mt7623_otg_groups[] = { "otg_iddig0_0", "otg_iddig0_1",
+					    "otg_iddig0_2", "otg_iddig1_0",
+					    "otg_iddig1_1", "otg_iddig1_2",
+					    "otg_drv_vbus0_0",
+					    "otg_drv_vbus0_1",
+					    "otg_drv_vbus0_2",
+					    "otg_drv_vbus1_0",
+					    "otg_drv_vbus1_1",
+					    "otg_drv_vbus1_2", };
+static const char *mt7623_pcie_groups[] = { "pcie0_0_perst", "pcie0_1_perst",
+					    "pcie1_0_perst", "pcie1_1_perst",
+					    "pcie2_0_perst", "pcie2_1_perst",
+					    "pcie0_0_rev_perst",
+					    "pcie0_1_rev_perst",
+					    "pcie1_0_rev_perst",
+					    "pcie1_1_rev_perst",
+					    "pcie2_0_rev_perst",
+					    "pcie2_1_rev_perst",
+					    "pcie0_0_wake", "pcie0_1_wake",
+					    "pcie2_0_wake", "pcie2_1_wake",
+					    "pcie0_clkreq", "pcie1_clkreq",
+					    "pcie2_clkreq", };
+static const char *mt7623_pcm_groups[] = { "pcm_clk_0", "pcm_clk_1",
+					   "pcm_clk_2", "pcm_clk_3",
+					   "pcm_clk_4", "pcm_clk_5",
+					   "pcm_clk_6", "pcm_sync_0",
+					   "pcm_sync_1", "pcm_sync_2",
+					   "pcm_sync_3", "pcm_sync_4",
+					   "pcm_sync_5", "pcm_sync_6",
+					   "pcm_rx_0", "pcm_rx_1",
+					   "pcm_rx_2", "pcm_rx_3",
+					   "pcm_rx_4", "pcm_rx_5",
+					   "pcm_rx_6", "pcm_tx_0",
+					   "pcm_tx_1", "pcm_tx_2",
+					   "pcm_tx_3", "pcm_tx_4",
+					   "pcm_tx_5", "pcm_tx_6", };
+static const char *mt7623_pwm_groups[] = { "pwm_ch1_0", "pwm_ch1_1",
+					   "pwm_ch1_2", "pwm_ch2_0",
+					   "pwm_ch2_1", "pwm_ch2_2",
+					   "pwm_ch3_0", "pwm_ch3_1",
+					   "pwm_ch3_2", "pwm_ch4_0",
+					   "pwm_ch4_1", "pwm_ch4_2",
+					   "pwm_ch4_3", "pwm_ch5_0",
+					   "pwm_ch5_1", "pwm_ch5_2",
+					   "pwm_ch6_0", "pwm_ch6_1",
+					   "pwm_ch6_2", "pwm_ch6_3",
+					   "pwm_ch7_0", "pwm_ch7_1",
+					   "pwm_ch7_2", };
+static const char *mt7623_pwrap_groups[] = { "pwrap", };
+static const char *mt7623_rtc_groups[] = { "rtc", };
+static const char *mt7623_spi_groups[] = { "spi0", "spi2", "spi2", };
+static const char *mt7623_spdif_groups[] = { "spdif_in0_0", "spdif_in0_1",
+					     "spdif_in1_0", "spdif_in1_1",
+					     "spdif_out", };
+static const char *mt7623_uart_groups[] = { "uart0_0_txd_rxd",
+					    "uart0_1_txd_rxd",
+					    "uart0_2_txd_rxd",
+					    "uart0_3_txd_rxd",
+					    "uart1_0_txd_rxd",
+					    "uart1_1_txd_rxd",
+					    "uart1_2_txd_rxd",
+					    "uart2_0_txd_rxd",
+					    "uart2_1_txd_rxd",
+					    "uart3_txd_rxd",
+					    "uart0_rts_cts",
+					    "uart1_rts_cts",
+					    "uart2_rts_cts",
+					    "uart3_rts_cts", };
+static const char *mt7623_wdt_groups[] = { "watchdog_0", "watchdog_1", };
+
+static const struct function_desc mt7623_functions[] = {
+	{"audck", mt7623_aud_clk_groups, ARRAY_SIZE(mt7623_aud_clk_groups)},
+	{"disp", mt7623_disp_pwm_groups, ARRAY_SIZE(mt7623_disp_pwm_groups)},
+	{"eth",	mt7623_ethernet_groups, ARRAY_SIZE(mt7623_ethernet_groups)},
+	{"sdio", mt7623_ext_sdio_groups, ARRAY_SIZE(mt7623_ext_sdio_groups)},
+	{"hdmi", mt7623_hdmi_groups, ARRAY_SIZE(mt7623_hdmi_groups)},
+	{"i2c", mt7623_i2c_groups, ARRAY_SIZE(mt7623_i2c_groups)},
+	{"i2s",	mt7623_i2s_groups, ARRAY_SIZE(mt7623_i2s_groups)},
+	{"ir",	mt7623_ir_groups, ARRAY_SIZE(mt7623_ir_groups)},
+	{"lcd", mt7623_lcd_groups, ARRAY_SIZE(mt7623_lcd_groups)},
+	{"msdc", mt7623_msdc_groups, ARRAY_SIZE(mt7623_msdc_groups)},
+	{"nand", mt7623_nandc_groups, ARRAY_SIZE(mt7623_nandc_groups)},
+	{"otg", mt7623_otg_groups, ARRAY_SIZE(mt7623_otg_groups)},
+	{"pcie", mt7623_pcie_groups, ARRAY_SIZE(mt7623_pcie_groups)},
+	{"pcm",	mt7623_pcm_groups, ARRAY_SIZE(mt7623_pcm_groups)},
+	{"pwm",	mt7623_pwm_groups, ARRAY_SIZE(mt7623_pwm_groups)},
+	{"pwrap", mt7623_pwrap_groups, ARRAY_SIZE(mt7623_pwrap_groups)},
+	{"rtc", mt7623_rtc_groups, ARRAY_SIZE(mt7623_rtc_groups)},
+	{"spi",	mt7623_spi_groups, ARRAY_SIZE(mt7623_spi_groups)},
+	{"spdif", mt7623_spdif_groups, ARRAY_SIZE(mt7623_spdif_groups)},
+	{"uart", mt7623_uart_groups, ARRAY_SIZE(mt7623_uart_groups)},
+	{"watchdog", mt7623_wdt_groups, ARRAY_SIZE(mt7623_wdt_groups)},
+};
+
+static const struct mtk_eint_hw mt7623_eint_hw = {
+	.port_mask = 6,
+	.ports     = 6,
+	.ap_num    = 169,
+	.db_cnt    = 20,
+};
+
+static struct mtk_pin_soc mt7623_data = {
+	.reg_cal = mt7623_reg_cals,
+	.pins = mt7623_pins,
+	.npins = ARRAY_SIZE(mt7623_pins),
+	.grps = mt7623_groups,
+	.ngrps = ARRAY_SIZE(mt7623_groups),
+	.funcs = mt7623_functions,
+	.nfuncs = ARRAY_SIZE(mt7623_functions),
+	.eint_hw = &mt7623_eint_hw,
+	.gpio_m = 0,
+	.ies_present = true,
+	.base_names = mtk_default_register_base_names,
+	.nbase_names = ARRAY_SIZE(mtk_default_register_base_names),
+	.bias_disable_set = mtk_pinconf_bias_disable_set_rev1,
+	.bias_disable_get = mtk_pinconf_bias_disable_get_rev1,
+	.bias_set = mtk_pinconf_bias_set_rev1,
+	.bias_get = mtk_pinconf_bias_get_rev1,
+	.drive_set = mtk_pinconf_drive_set_rev1,
+	.drive_get = mtk_pinconf_drive_get_rev1,
+	.adv_pull_get = mtk_pinconf_adv_pull_get,
+	.adv_pull_set = mtk_pinconf_adv_pull_set,
+};
+
+/*
+ * There are some specific pins have mux functions greater than 8,
+ * and if we want to switch thees high modes we need to disable
+ * bonding constraints firstly.
+ */
+static void mt7623_bonding_disable(struct platform_device *pdev)
+{
+	struct mtk_pinctrl *hw = platform_get_drvdata(pdev);
+
+	mtk_rmw(hw, 0, PIN_BOND_REG0, BOND_PCIE_CLR, BOND_PCIE_CLR);
+	mtk_rmw(hw, 0, PIN_BOND_REG1, BOND_I2S_CLR, BOND_I2S_CLR);
+	mtk_rmw(hw, 0, PIN_BOND_REG2, BOND_MSDC0E_CLR, BOND_MSDC0E_CLR);
+}
+
+static const struct of_device_id mt7623_pctrl_match[] = {
+	{ .compatible = "mediatek,mt7623-moore-pinctrl", },
+	{}
+};
+
+static int mt7623_pinctrl_probe(struct platform_device *pdev)
+{
+	int err;
+
+	err = mtk_moore_pinctrl_probe(pdev, &mt7623_data);
+	if (err)
+		return err;
+
+	mt7623_bonding_disable(pdev);
+
+	return 0;
+}
+
+static struct platform_driver mtk_pinctrl_driver = {
+	.probe = mt7623_pinctrl_probe,
+	.driver = {
+		.name = "mt7623-moore-pinctrl",
+		.of_match_table = mt7623_pctrl_match,
+	},
+};
+
+static int __init mtk_pinctrl_init(void)
+{
+	return platform_driver_register(&mtk_pinctrl_driver);
+}
+arch_initcall(mtk_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8167.c b/drivers/pinctrl/mediatek/pinctrl-mt8167.c
new file mode 100644
index 0000000..ed6a363
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8167.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Min.Guo <min.guo@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/regmap.h>
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#include "pinctrl-mtk-common.h"
+#include "pinctrl-mtk-mt8167.h"
+
+static const struct mtk_drv_group_desc mt8167_drv_grp[] = {
+	/* 0E4E8SR 4/8/12/16 */
+	MTK_DRV_GRP(4, 16, 1, 2, 4),
+	/* 0E2E4SR  2/4/6/8 */
+	MTK_DRV_GRP(2, 8, 1, 2, 2),
+	/* E8E4E2  2/4/6/8/10/12/14/16 */
+	MTK_DRV_GRP(2, 16, 0, 2, 2)
+};
+
+static const struct mtk_pin_drv_grp mt8167_pin_drv[] = {
+	MTK_PIN_DRV_GRP(0, 0xd00, 0, 0),
+	MTK_PIN_DRV_GRP(1, 0xd00, 0, 0),
+	MTK_PIN_DRV_GRP(2, 0xd00, 0, 0),
+	MTK_PIN_DRV_GRP(3, 0xd00, 0, 0),
+	MTK_PIN_DRV_GRP(4, 0xd00, 0, 0),
+
+	MTK_PIN_DRV_GRP(5, 0xd00, 4, 0),
+	MTK_PIN_DRV_GRP(6, 0xd00, 4, 0),
+	MTK_PIN_DRV_GRP(7, 0xd00, 4, 0),
+	MTK_PIN_DRV_GRP(8, 0xd00, 4, 0),
+	MTK_PIN_DRV_GRP(9, 0xd00, 4, 0),
+	MTK_PIN_DRV_GRP(10, 0xd00, 4, 0),
+
+	MTK_PIN_DRV_GRP(11, 0xd00, 8, 0),
+	MTK_PIN_DRV_GRP(12, 0xd00, 8, 0),
+	MTK_PIN_DRV_GRP(13, 0xd00, 8, 0),
+
+	MTK_PIN_DRV_GRP(14, 0xd00, 12, 2),
+	MTK_PIN_DRV_GRP(15, 0xd00, 12, 2),
+	MTK_PIN_DRV_GRP(16, 0xd00, 12, 2),
+	MTK_PIN_DRV_GRP(17, 0xd00, 12, 2),
+
+	MTK_PIN_DRV_GRP(18, 0xd10, 0, 0),
+	MTK_PIN_DRV_GRP(19, 0xd10, 0, 0),
+	MTK_PIN_DRV_GRP(20, 0xd10, 0, 0),
+
+	MTK_PIN_DRV_GRP(21, 0xd00, 12, 2),
+	MTK_PIN_DRV_GRP(22, 0xd00, 12, 2),
+	MTK_PIN_DRV_GRP(23, 0xd00, 12, 2),
+
+	MTK_PIN_DRV_GRP(24, 0xd00, 8, 0),
+	MTK_PIN_DRV_GRP(25, 0xd00, 8, 0),
+
+	MTK_PIN_DRV_GRP(26, 0xd10, 4, 1),
+	MTK_PIN_DRV_GRP(27, 0xd10, 4, 1),
+	MTK_PIN_DRV_GRP(28, 0xd10, 4, 1),
+	MTK_PIN_DRV_GRP(29, 0xd10, 4, 1),
+	MTK_PIN_DRV_GRP(30, 0xd10, 4, 1),
+
+	MTK_PIN_DRV_GRP(31, 0xd10, 8, 1),
+	MTK_PIN_DRV_GRP(32, 0xd10, 8, 1),
+	MTK_PIN_DRV_GRP(33, 0xd10, 8, 1),
+
+	MTK_PIN_DRV_GRP(34, 0xd10, 12, 0),
+	MTK_PIN_DRV_GRP(35, 0xd10, 12, 0),
+
+	MTK_PIN_DRV_GRP(36, 0xd20, 0, 0),
+	MTK_PIN_DRV_GRP(37, 0xd20, 0, 0),
+	MTK_PIN_DRV_GRP(38, 0xd20, 0, 0),
+	MTK_PIN_DRV_GRP(39, 0xd20, 0, 0),
+
+	MTK_PIN_DRV_GRP(40, 0xd20, 4, 1),
+
+	MTK_PIN_DRV_GRP(41, 0xd20, 8, 1),
+	MTK_PIN_DRV_GRP(42, 0xd20, 8, 1),
+	MTK_PIN_DRV_GRP(43, 0xd20, 8, 1),
+
+	MTK_PIN_DRV_GRP(44, 0xd20, 12, 1),
+	MTK_PIN_DRV_GRP(45, 0xd20, 12, 1),
+	MTK_PIN_DRV_GRP(46, 0xd20, 12, 1),
+	MTK_PIN_DRV_GRP(47, 0xd20, 12, 1),
+
+	MTK_PIN_DRV_GRP(48, 0xd30, 0, 1),
+	MTK_PIN_DRV_GRP(49, 0xd30, 0, 1),
+	MTK_PIN_DRV_GRP(50, 0xd30, 0, 1),
+	MTK_PIN_DRV_GRP(51, 0xd30, 0, 1),
+
+	MTK_PIN_DRV_GRP(54, 0xd30, 8, 1),
+
+	MTK_PIN_DRV_GRP(55, 0xd30, 12, 1),
+	MTK_PIN_DRV_GRP(56, 0xd30, 12, 1),
+	MTK_PIN_DRV_GRP(57, 0xd30, 12, 1),
+
+	MTK_PIN_DRV_GRP(62, 0xd40, 8, 1),
+	MTK_PIN_DRV_GRP(63, 0xd40, 8, 1),
+	MTK_PIN_DRV_GRP(64, 0xd40, 8, 1),
+	MTK_PIN_DRV_GRP(65, 0xd40, 8, 1),
+	MTK_PIN_DRV_GRP(66, 0xd40, 8, 1),
+	MTK_PIN_DRV_GRP(67, 0xd40, 8, 1),
+
+	MTK_PIN_DRV_GRP(68, 0xd40, 12, 2),
+
+	MTK_PIN_DRV_GRP(69, 0xd50, 0, 2),
+
+	MTK_PIN_DRV_GRP(70, 0xd50, 4, 2),
+	MTK_PIN_DRV_GRP(71, 0xd50, 4, 2),
+	MTK_PIN_DRV_GRP(72, 0xd50, 4, 2),
+	MTK_PIN_DRV_GRP(73, 0xd50, 4, 2),
+
+	MTK_PIN_DRV_GRP(100, 0xd50, 8, 1),
+	MTK_PIN_DRV_GRP(101, 0xd50, 8, 1),
+	MTK_PIN_DRV_GRP(102, 0xd50, 8, 1),
+	MTK_PIN_DRV_GRP(103, 0xd50, 8, 1),
+
+	MTK_PIN_DRV_GRP(104, 0xd50, 12, 2),
+
+	MTK_PIN_DRV_GRP(105, 0xd60, 0, 2),
+
+	MTK_PIN_DRV_GRP(106, 0xd60, 4, 2),
+	MTK_PIN_DRV_GRP(107, 0xd60, 4, 2),
+	MTK_PIN_DRV_GRP(108, 0xd60, 4, 2),
+	MTK_PIN_DRV_GRP(109, 0xd60, 4, 2),
+
+	MTK_PIN_DRV_GRP(110, 0xd70, 0, 2),
+	MTK_PIN_DRV_GRP(111, 0xd70, 0, 2),
+	MTK_PIN_DRV_GRP(112, 0xd70, 0, 2),
+	MTK_PIN_DRV_GRP(113, 0xd70, 0, 2),
+
+	MTK_PIN_DRV_GRP(114, 0xd70, 4, 2),
+
+	MTK_PIN_DRV_GRP(115, 0xd60, 12, 2),
+
+	MTK_PIN_DRV_GRP(116, 0xd60, 8, 2),
+
+	MTK_PIN_DRV_GRP(117, 0xd70, 0, 2),
+	MTK_PIN_DRV_GRP(118, 0xd70, 0, 2),
+	MTK_PIN_DRV_GRP(119, 0xd70, 0, 2),
+	MTK_PIN_DRV_GRP(120, 0xd70, 0, 2),
+};
+
+static const struct mtk_pin_spec_pupd_set_samereg mt8167_spec_pupd[] = {
+	MTK_PIN_PUPD_SPEC_SR(14, 0xe50, 14, 13, 12),
+	MTK_PIN_PUPD_SPEC_SR(15, 0xe60, 2, 1, 0),
+	MTK_PIN_PUPD_SPEC_SR(16, 0xe60, 6, 5, 4),
+	MTK_PIN_PUPD_SPEC_SR(17, 0xe60, 10, 9, 8),
+
+	MTK_PIN_PUPD_SPEC_SR(21, 0xe60, 14, 13, 12),
+	MTK_PIN_PUPD_SPEC_SR(22, 0xe70, 2, 1, 0),
+	MTK_PIN_PUPD_SPEC_SR(23, 0xe70, 6, 5, 4),
+
+	MTK_PIN_PUPD_SPEC_SR(40, 0xe80, 2, 1, 0),
+	MTK_PIN_PUPD_SPEC_SR(41, 0xe80, 6, 5, 4),
+	MTK_PIN_PUPD_SPEC_SR(42, 0xe90, 2, 1, 0),
+	MTK_PIN_PUPD_SPEC_SR(43, 0xe90, 6, 5, 4),
+
+	MTK_PIN_PUPD_SPEC_SR(68, 0xe50, 10, 9, 8),
+	MTK_PIN_PUPD_SPEC_SR(69, 0xe50, 6, 5, 4),
+	MTK_PIN_PUPD_SPEC_SR(70, 0xe40, 6, 5, 4),
+	MTK_PIN_PUPD_SPEC_SR(71, 0xe40, 10, 9, 8),
+	MTK_PIN_PUPD_SPEC_SR(72, 0xe40, 14, 13, 12),
+	MTK_PIN_PUPD_SPEC_SR(73, 0xe50, 2, 1, 0),
+
+	MTK_PIN_PUPD_SPEC_SR(104, 0xe40, 2, 1, 0),
+	MTK_PIN_PUPD_SPEC_SR(105, 0xe30, 14, 13, 12),
+	MTK_PIN_PUPD_SPEC_SR(106, 0xe20, 14, 13, 12),
+	MTK_PIN_PUPD_SPEC_SR(107, 0xe30, 2, 1, 0),
+	MTK_PIN_PUPD_SPEC_SR(108, 0xe30, 6, 5, 4),
+	MTK_PIN_PUPD_SPEC_SR(109, 0xe30, 10, 9, 8),
+	MTK_PIN_PUPD_SPEC_SR(110, 0xe10, 14, 13, 12),
+	MTK_PIN_PUPD_SPEC_SR(111, 0xe10, 10, 9, 8),
+	MTK_PIN_PUPD_SPEC_SR(112, 0xe10, 6, 5, 4),
+	MTK_PIN_PUPD_SPEC_SR(113, 0xe10, 2, 1, 0),
+	MTK_PIN_PUPD_SPEC_SR(114, 0xe20, 10, 9, 8),
+	MTK_PIN_PUPD_SPEC_SR(115, 0xe20, 2, 1, 0),
+	MTK_PIN_PUPD_SPEC_SR(116, 0xe20, 6, 5, 4),
+	MTK_PIN_PUPD_SPEC_SR(117, 0xe00, 14, 13, 12),
+	MTK_PIN_PUPD_SPEC_SR(118, 0xe00, 10, 9, 8),
+	MTK_PIN_PUPD_SPEC_SR(119, 0xe00, 6, 5, 4),
+	MTK_PIN_PUPD_SPEC_SR(120, 0xe00, 2, 1, 0),
+};
+
+static int mt8167_spec_pull_set(struct regmap *regmap, unsigned int pin,
+			unsigned char align, bool isup, unsigned int r1r0)
+{
+	return mtk_pctrl_spec_pull_set_samereg(regmap, mt8167_spec_pupd,
+		ARRAY_SIZE(mt8167_spec_pupd), pin, align, isup, r1r0);
+}
+
+static const struct mtk_pin_ies_smt_set mt8167_ies_set[] = {
+	MTK_PIN_IES_SMT_SPEC(0, 6, 0x900, 2),
+	MTK_PIN_IES_SMT_SPEC(7, 10, 0x900, 3),
+	MTK_PIN_IES_SMT_SPEC(11, 13, 0x900, 12),
+	MTK_PIN_IES_SMT_SPEC(14, 17, 0x900, 13),
+	MTK_PIN_IES_SMT_SPEC(18, 20, 0x910, 10),
+	MTK_PIN_IES_SMT_SPEC(21, 23, 0x900, 13),
+	MTK_PIN_IES_SMT_SPEC(24, 25, 0x900, 12),
+	MTK_PIN_IES_SMT_SPEC(26, 30, 0x900, 0),
+	MTK_PIN_IES_SMT_SPEC(31, 33, 0x900, 1),
+	MTK_PIN_IES_SMT_SPEC(34, 39, 0x900, 2),
+	MTK_PIN_IES_SMT_SPEC(40, 40, 0x910, 11),
+	MTK_PIN_IES_SMT_SPEC(41, 43, 0x900, 10),
+	MTK_PIN_IES_SMT_SPEC(44, 47, 0x900, 11),
+	MTK_PIN_IES_SMT_SPEC(48, 51, 0x900, 14),
+	MTK_PIN_IES_SMT_SPEC(52, 53, 0x910, 0),
+	MTK_PIN_IES_SMT_SPEC(54, 54, 0x910, 2),
+	MTK_PIN_IES_SMT_SPEC(55, 57, 0x910, 4),
+	MTK_PIN_IES_SMT_SPEC(58, 59, 0x900, 15),
+	MTK_PIN_IES_SMT_SPEC(60, 61, 0x910, 1),
+	MTK_PIN_IES_SMT_SPEC(62, 65, 0x910, 5),
+	MTK_PIN_IES_SMT_SPEC(66, 67, 0x910, 6),
+	MTK_PIN_IES_SMT_SPEC(68, 68, 0x930, 2),
+	MTK_PIN_IES_SMT_SPEC(69, 69, 0x930, 1),
+	MTK_PIN_IES_SMT_SPEC(70, 70, 0x930, 6),
+	MTK_PIN_IES_SMT_SPEC(71, 71, 0x930, 5),
+	MTK_PIN_IES_SMT_SPEC(72, 72, 0x930, 4),
+	MTK_PIN_IES_SMT_SPEC(73, 73, 0x930, 3),
+	MTK_PIN_IES_SMT_SPEC(100, 103, 0x910, 7),
+	MTK_PIN_IES_SMT_SPEC(104, 104, 0x920, 12),
+	MTK_PIN_IES_SMT_SPEC(105, 105, 0x920, 11),
+	MTK_PIN_IES_SMT_SPEC(106, 106, 0x930, 0),
+	MTK_PIN_IES_SMT_SPEC(107, 107, 0x920, 15),
+	MTK_PIN_IES_SMT_SPEC(108, 108, 0x920, 14),
+	MTK_PIN_IES_SMT_SPEC(109, 109, 0x920, 13),
+	MTK_PIN_IES_SMT_SPEC(110, 110, 0x920, 9),
+	MTK_PIN_IES_SMT_SPEC(111, 111, 0x920, 8),
+	MTK_PIN_IES_SMT_SPEC(112, 112, 0x920, 7),
+	MTK_PIN_IES_SMT_SPEC(113, 113, 0x920, 6),
+	MTK_PIN_IES_SMT_SPEC(114, 114, 0x920, 10),
+	MTK_PIN_IES_SMT_SPEC(115, 115, 0x920, 1),
+	MTK_PIN_IES_SMT_SPEC(116, 116, 0x920, 0),
+	MTK_PIN_IES_SMT_SPEC(117, 117, 0x920, 5),
+	MTK_PIN_IES_SMT_SPEC(118, 118, 0x920, 4),
+	MTK_PIN_IES_SMT_SPEC(119, 119, 0x920, 3),
+	MTK_PIN_IES_SMT_SPEC(120, 120, 0x920, 2),
+	MTK_PIN_IES_SMT_SPEC(121, 124, 0x910, 9),
+};
+
+static const struct mtk_pin_ies_smt_set mt8167_smt_set[] = {
+	MTK_PIN_IES_SMT_SPEC(0, 6, 0xA00, 2),
+	MTK_PIN_IES_SMT_SPEC(7, 10, 0xA00, 3),
+	MTK_PIN_IES_SMT_SPEC(11, 13, 0xA00, 12),
+	MTK_PIN_IES_SMT_SPEC(14, 17, 0xA00, 13),
+	MTK_PIN_IES_SMT_SPEC(18, 20, 0xA10, 10),
+	MTK_PIN_IES_SMT_SPEC(21, 23, 0xA00, 13),
+	MTK_PIN_IES_SMT_SPEC(24, 25, 0xA00, 12),
+	MTK_PIN_IES_SMT_SPEC(26, 30, 0xA00, 0),
+	MTK_PIN_IES_SMT_SPEC(31, 33, 0xA00, 1),
+	MTK_PIN_IES_SMT_SPEC(34, 39, 0xA900, 2),
+	MTK_PIN_IES_SMT_SPEC(40, 40, 0xA10, 11),
+	MTK_PIN_IES_SMT_SPEC(41, 43, 0xA00, 10),
+	MTK_PIN_IES_SMT_SPEC(44, 47, 0xA00, 11),
+	MTK_PIN_IES_SMT_SPEC(48, 51, 0xA00, 14),
+	MTK_PIN_IES_SMT_SPEC(52, 53, 0xA10, 0),
+	MTK_PIN_IES_SMT_SPEC(54, 54, 0xA10, 2),
+	MTK_PIN_IES_SMT_SPEC(55, 57, 0xA10, 4),
+	MTK_PIN_IES_SMT_SPEC(58, 59, 0xA00, 15),
+	MTK_PIN_IES_SMT_SPEC(60, 61, 0xA10, 1),
+	MTK_PIN_IES_SMT_SPEC(62, 65, 0xA10, 5),
+	MTK_PIN_IES_SMT_SPEC(66, 67, 0xA10, 6),
+	MTK_PIN_IES_SMT_SPEC(68, 68, 0xA30, 2),
+	MTK_PIN_IES_SMT_SPEC(69, 69, 0xA30, 1),
+	MTK_PIN_IES_SMT_SPEC(70, 70, 0xA30, 3),
+	MTK_PIN_IES_SMT_SPEC(71, 71, 0xA30, 4),
+	MTK_PIN_IES_SMT_SPEC(72, 72, 0xA30, 5),
+	MTK_PIN_IES_SMT_SPEC(73, 73, 0xA30, 6),
+
+	MTK_PIN_IES_SMT_SPEC(100, 103, 0xA10, 7),
+	MTK_PIN_IES_SMT_SPEC(104, 104, 0xA20, 12),
+	MTK_PIN_IES_SMT_SPEC(105, 105, 0xA20, 11),
+	MTK_PIN_IES_SMT_SPEC(106, 106, 0xA30, 13),
+	MTK_PIN_IES_SMT_SPEC(107, 107, 0xA20, 14),
+	MTK_PIN_IES_SMT_SPEC(108, 108, 0xA20, 15),
+	MTK_PIN_IES_SMT_SPEC(109, 109, 0xA30, 0),
+	MTK_PIN_IES_SMT_SPEC(110, 110, 0xA20, 9),
+	MTK_PIN_IES_SMT_SPEC(111, 111, 0xA20, 8),
+	MTK_PIN_IES_SMT_SPEC(112, 112, 0xA20, 7),
+	MTK_PIN_IES_SMT_SPEC(113, 113, 0xA20, 6),
+	MTK_PIN_IES_SMT_SPEC(114, 114, 0xA20, 10),
+	MTK_PIN_IES_SMT_SPEC(115, 115, 0xA20, 1),
+	MTK_PIN_IES_SMT_SPEC(116, 116, 0xA20, 0),
+	MTK_PIN_IES_SMT_SPEC(117, 117, 0xA20, 5),
+	MTK_PIN_IES_SMT_SPEC(118, 118, 0xA20, 4),
+	MTK_PIN_IES_SMT_SPEC(119, 119, 0xA20, 3),
+	MTK_PIN_IES_SMT_SPEC(120, 120, 0xA20, 2),
+	MTK_PIN_IES_SMT_SPEC(121, 124, 0xA10, 9),
+};
+
+static int mt8167_ies_smt_set(struct regmap *regmap, unsigned int pin,
+		unsigned char align, int value, enum pin_config_param arg)
+{
+	if (arg == PIN_CONFIG_INPUT_ENABLE)
+		return mtk_pconf_spec_set_ies_smt_range(regmap, mt8167_ies_set,
+			ARRAY_SIZE(mt8167_ies_set), pin, align, value);
+	else if (arg == PIN_CONFIG_INPUT_SCHMITT_ENABLE)
+		return mtk_pconf_spec_set_ies_smt_range(regmap, mt8167_smt_set,
+			ARRAY_SIZE(mt8167_smt_set), pin, align, value);
+	return -EINVAL;
+}
+
+#if 0
+static int mt8167_spec_ies_get(struct regmap *regmap, unsigned int pin)
+{
+	return mtk_spec_get_ies_smt_range(regmap, mt8167_ies_set,
+		ARRAY_SIZE(mt8167_ies_set), pin);
+}
+
+static int mt8167_spec_smt_get(struct regmap *regmap, unsigned int pin)
+{
+	return mtk_spec_get_ies_smt_range(regmap, mt8167_smt_set,
+		ARRAY_SIZE(mt8167_smt_set), pin);
+}
+
+static int mt8167_spec_pull_get(struct regmap *regmap, unsigned int pin)
+{
+	return mtk_spec_pull_get_samereg(regmap, mt8167_spec_pupd,
+		ARRAY_SIZE(mt8167_spec_pupd), pin);
+}
+#endif
+
+static struct mtk_eint_regs mt8167_eint_regs = {
+	.stat = 0x000,
+	.ack = 0x040,
+	.mask = 0x080,
+	.mask_set = 0x0c0,
+	.mask_clr = 0x100,
+	.sens = 0x140,
+	.sens_set = 0x180,
+	.sens_clr = 0x1c0,
+	.soft = 0x200,
+	.soft_set = 0x240,
+	.soft_clr = 0x280,
+	.pol = 0x300,
+	.pol_set = 0x340,
+	.pol_clr = 0x380,
+	.dom_en = 0x400,
+	.dbnc_ctrl = 0x500,
+	.dbnc_set = 0x600,
+	.dbnc_clr = 0x700,
+};
+
+static const struct mtk_pinctrl_devdata mt8167_pinctrl_data = {
+	.pins = mtk_pins_mt8167,
+	.npins = ARRAY_SIZE(mtk_pins_mt8167),
+	.grp_desc = mt8167_drv_grp,
+	.n_grp_cls = ARRAY_SIZE(mt8167_drv_grp),
+	.pin_drv_grp = mt8167_pin_drv,
+	.n_pin_drv_grps = ARRAY_SIZE(mt8167_pin_drv),
+	.spec_pull_set = mt8167_spec_pull_set,
+	.spec_ies_smt_set = mt8167_ies_smt_set,
+//	.spec_pull_get = mt8167_spec_pull_get,
+//	.spec_ies_get = mt8167_spec_ies_get,
+//	.spec_smt_get = mt8167_spec_smt_get,
+	.dir_offset = 0x0000,
+	.pullen_offset = 0x0500,
+	.pullsel_offset = 0x0600,
+	.dout_offset = 0x0100,
+	.din_offset = 0x0200,
+	.pinmux_offset = 0x0300,
+	.type1_start = 125,
+	.type1_end = 125,
+	.port_shf = 4,
+	.port_mask = 0xf,
+	.port_align = 4,
+	.eint_regs = &mt8167_eint_regs,
+	.eint_hw = {
+		.port_mask = 7,
+		.ports     = 6,
+		.ap_num    = 169,
+		.db_cnt    = 64,
+	},
+};
+
+static int mt8167_pinctrl_probe(struct platform_device *pdev)
+{
+	return mtk_pctrl_init(pdev, &mt8167_pinctrl_data, NULL);
+}
+
+static const struct of_device_id mt8167_pctrl_match[] = {
+	{.compatible = "mediatek,mt8167-pinctrl",},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, mt8167_pctrl_match);
+
+static struct platform_driver mtk_pinctrl_driver = {
+	.probe = mt8167_pinctrl_probe,
+	.driver = {
+		   .name = "mediatek-mt8167-pinctrl",
+		   .owner = THIS_MODULE,
+		   .of_match_table = mt8167_pctrl_match,
+		   .pm = &mtk_eint_pm_ops,
+		   },
+};
+
+static int __init mtk_pinctrl_init(void)
+{
+	return platform_driver_register(&mtk_pinctrl_driver);
+}
+arch_initcall(mtk_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8183.c b/drivers/pinctrl/mediatek/pinctrl-mt8183.c
new file mode 100644
index 0000000..9613874
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8183.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
+ *
+ */
+
+#include "pinctrl-mtk-mt8183.h"
+#include "pinctrl-paris.h"
+
+/* MT8183 have multiple bases to program pin configuration listed as the below:
+ * iocfg[0]:0x10005000, iocfg[1]:0x11F20000, iocfg[2]:0x11E80000,
+ * iocfg[3]:0x11E70000, iocfg[4]:0x11E90000, iocfg[5]:0x11D30000,
+ * iocfg[6]:0x11D20000, iocfg[7]:0x11C50000, iocfg[8]:0x11F30000.
+ * _i_based could be used to indicate what base the pin should be mapped into.
+ */
+
+#define PIN_FIELD_BASE(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, _x_bits)	\
+	PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit,	\
+		       _x_bits, 32, 0)
+
+#define PINS_FIELD_BASE(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, _x_bits)	\
+	PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit,	\
+		      _x_bits, 32, 1)
+
+static const struct mtk_pin_field_calc mt8183_pin_mode_range[] = {
+	PIN_FIELD(0, 192, 0x300, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_dir_range[] = {
+	PIN_FIELD(0, 192, 0x0, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_di_range[] = {
+	PIN_FIELD(0, 192, 0x200, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_do_range[] = {
+	PIN_FIELD(0, 192, 0x100, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_ies_range[] = {
+	PINS_FIELD_BASE(0, 3, 6, 0x000, 0x10, 3, 1),
+	PINS_FIELD_BASE(4, 7, 6, 0x000, 0x10, 5, 1),
+	PIN_FIELD_BASE(8, 8, 6, 0x000, 0x10, 0, 1),
+	PINS_FIELD_BASE(9, 10, 6, 0x000, 0x10, 12, 1),
+	PIN_FIELD_BASE(11, 11, 1, 0x000, 0x10, 3, 1),
+	PIN_FIELD_BASE(12, 12, 1, 0x000, 0x10, 7, 1),
+	PINS_FIELD_BASE(13, 16, 2, 0x000, 0x10, 2, 1),
+	PINS_FIELD_BASE(17, 20, 2, 0x000, 0x10, 3, 1),
+	PINS_FIELD_BASE(21, 24, 2, 0x000, 0x10, 4, 1),
+	PINS_FIELD_BASE(25, 28, 2, 0x000, 0x10, 5, 1),
+	PIN_FIELD_BASE(29, 29, 2, 0x000, 0x10, 6, 1),
+	PIN_FIELD_BASE(30, 30, 2, 0x000, 0x10, 7, 1),
+	PINS_FIELD_BASE(31, 31, 2, 0x000, 0x10, 8, 1),
+	PINS_FIELD_BASE(32, 34, 2, 0x000, 0x10, 7, 1),
+	PINS_FIELD_BASE(35, 37, 3, 0x000, 0x10, 0, 1),
+	PINS_FIELD_BASE(38, 40, 3, 0x000, 0x10, 1, 1),
+	PINS_FIELD_BASE(41, 42, 3, 0x000, 0x10, 2, 1),
+	PINS_FIELD_BASE(43, 45, 3, 0x000, 0x10, 3, 1),
+	PINS_FIELD_BASE(46, 47, 3, 0x000, 0x10, 4, 1),
+	PINS_FIELD_BASE(48, 49, 3, 0x000, 0x10, 5, 1),
+	PINS_FIELD_BASE(50, 51, 4, 0x000, 0x10, 0, 1),
+	PINS_FIELD_BASE(52, 57, 4, 0x000, 0x10, 1, 1),
+	PINS_FIELD_BASE(58, 60, 4, 0x000, 0x10, 2, 1),
+	PINS_FIELD_BASE(61, 64, 5, 0x000, 0x10, 0, 1),
+	PINS_FIELD_BASE(65, 66, 5, 0x000, 0x10, 1, 1),
+	PINS_FIELD_BASE(67, 68, 5, 0x000, 0x10, 2, 1),
+	PINS_FIELD_BASE(69, 71, 5, 0x000, 0x10, 3, 1),
+	PINS_FIELD_BASE(72, 76, 5, 0x000, 0x10, 4, 1),
+	PINS_FIELD_BASE(77, 80, 5, 0x000, 0x10, 5, 1),
+	PIN_FIELD_BASE(81, 81, 5, 0x000, 0x10, 6, 1),
+	PINS_FIELD_BASE(82, 83, 5, 0x000, 0x10, 7, 1),
+	PIN_FIELD_BASE(84, 84, 5, 0x000, 0x10, 6, 1),
+	PINS_FIELD_BASE(85, 88, 5, 0x000, 0x10, 8, 1),
+	PIN_FIELD_BASE(89, 89, 6, 0x000, 0x10, 11, 1),
+	PIN_FIELD_BASE(90, 90, 6, 0x000, 0x10, 1, 1),
+	PINS_FIELD_BASE(91, 94, 6, 0x000, 0x10, 2, 1),
+	PINS_FIELD_BASE(95, 96, 6, 0x000, 0x10, 6, 1),
+	PINS_FIELD_BASE(97, 98, 6, 0x000, 0x10, 7, 1),
+	PIN_FIELD_BASE(99, 99, 6, 0x000, 0x10, 8, 1),
+	PIN_FIELD_BASE(100, 100, 6, 0x000, 0x10, 9, 1),
+	PINS_FIELD_BASE(101, 102, 6, 0x000, 0x10, 10, 1),
+	PINS_FIELD_BASE(103, 104, 6, 0x000, 0x10, 13, 1),
+	PINS_FIELD_BASE(105, 106, 6, 0x000, 0x10, 14, 1),
+	PIN_FIELD_BASE(107, 107, 7, 0x000, 0x10, 0, 1),
+	PIN_FIELD_BASE(108, 108, 7, 0x000, 0x10, 1, 1),
+	PIN_FIELD_BASE(109, 109, 7, 0x000, 0x10, 2, 1),
+	PIN_FIELD_BASE(110, 110, 7, 0x000, 0x10, 0, 1),
+	PIN_FIELD_BASE(111, 111, 7, 0x000, 0x10, 3, 1),
+	PIN_FIELD_BASE(112, 112, 7, 0x000, 0x10, 2, 1),
+	PIN_FIELD_BASE(113, 113, 7, 0x000, 0x10, 4, 1),
+	PIN_FIELD_BASE(114, 114, 7, 0x000, 0x10, 5, 1),
+	PIN_FIELD_BASE(115, 115, 7, 0x000, 0x10, 6, 1),
+	PIN_FIELD_BASE(116, 116, 7, 0x000, 0x10, 7, 1),
+	PIN_FIELD_BASE(117, 117, 7, 0x000, 0x10, 8, 1),
+	PIN_FIELD_BASE(118, 118, 7, 0x000, 0x10, 9, 1),
+	PIN_FIELD_BASE(119, 119, 7, 0x000, 0x10, 10, 1),
+	PIN_FIELD_BASE(120, 120, 7, 0x000, 0x10, 11, 1),
+	PIN_FIELD_BASE(121, 121, 7, 0x000, 0x10, 12, 1),
+	PIN_FIELD_BASE(122, 122, 8, 0x000, 0x10, 0, 1),
+	PIN_FIELD_BASE(123, 123, 8, 0x000, 0x10, 1, 1),
+	PIN_FIELD_BASE(124, 124, 8, 0x000, 0x10, 2, 1),
+	PINS_FIELD_BASE(125, 130, 8, 0x000, 0x10, 1, 1),
+	PIN_FIELD_BASE(131, 131, 8, 0x000, 0x10, 3, 1),
+	PIN_FIELD_BASE(132, 132, 8, 0x000, 0x10, 1, 1),
+	PIN_FIELD_BASE(133, 133, 8, 0x000, 0x10, 4, 1),
+	PIN_FIELD_BASE(134, 134, 1, 0x000, 0x10, 0, 1),
+	PIN_FIELD_BASE(135, 135, 1, 0x000, 0x10, 1, 1),
+	PINS_FIELD_BASE(136, 143, 1, 0x000, 0x10, 2, 1),
+	PINS_FIELD_BASE(144, 147, 1, 0x000, 0x10, 4, 1),
+	PIN_FIELD_BASE(148, 148, 1, 0x000, 0x10, 5, 1),
+	PIN_FIELD_BASE(149, 149, 1, 0x000, 0x10, 6, 1),
+	PINS_FIELD_BASE(150, 153, 1, 0x000, 0x10, 8, 1),
+	PIN_FIELD_BASE(154, 154, 1, 0x000, 0x10, 9, 1),
+	PINS_FIELD_BASE(155, 157, 1, 0x000, 0x10, 10, 1),
+	PINS_FIELD_BASE(158, 160, 1, 0x000, 0x10, 8, 1),
+	PINS_FIELD_BASE(161, 164, 2, 0x000, 0x10, 0, 1),
+	PINS_FIELD_BASE(165, 166, 2, 0x000, 0x10, 1, 1),
+	PINS_FIELD_BASE(167, 168, 4, 0x000, 0x10, 2, 1),
+	PIN_FIELD_BASE(169, 169, 4, 0x000, 0x10, 3, 1),
+	PINS_FIELD_BASE(170, 174, 4, 0x000, 0x10, 4, 1),
+	PINS_FIELD_BASE(175, 176, 4, 0x000, 0x10, 3, 1),
+	PINS_FIELD_BASE(177, 179, 6, 0x000, 0x10, 4, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_smt_range[] = {
+	PINS_FIELD_BASE(0, 3, 6, 0x010, 0x10, 3, 1),
+	PINS_FIELD_BASE(4, 7, 6, 0x010, 0x10, 5, 1),
+	PIN_FIELD_BASE(8, 8, 6, 0x010, 0x10, 0, 1),
+	PINS_FIELD_BASE(9, 10, 6, 0x010, 0x10, 12, 1),
+	PIN_FIELD_BASE(11, 11, 1, 0x010, 0x10, 3, 1),
+	PIN_FIELD_BASE(12, 12, 1, 0x010, 0x10, 7, 1),
+	PINS_FIELD_BASE(13, 16, 2, 0x010, 0x10, 2, 1),
+	PINS_FIELD_BASE(17, 20, 2, 0x010, 0x10, 3, 1),
+	PINS_FIELD_BASE(21, 24, 2, 0x010, 0x10, 4, 1),
+	PINS_FIELD_BASE(25, 28, 2, 0x010, 0x10, 5, 1),
+	PIN_FIELD_BASE(29, 29, 2, 0x010, 0x10, 6, 1),
+	PIN_FIELD_BASE(30, 30, 2, 0x010, 0x10, 7, 1),
+	PINS_FIELD_BASE(31, 31, 2, 0x010, 0x10, 8, 1),
+	PINS_FIELD_BASE(32, 34, 2, 0x010, 0x10, 7, 1),
+	PINS_FIELD_BASE(35, 37, 3, 0x010, 0x10, 0, 1),
+	PINS_FIELD_BASE(38, 40, 3, 0x010, 0x10, 1, 1),
+	PINS_FIELD_BASE(41, 42, 3, 0x010, 0x10, 2, 1),
+	PINS_FIELD_BASE(43, 45, 3, 0x010, 0x10, 3, 1),
+	PINS_FIELD_BASE(46, 47, 3, 0x010, 0x10, 4, 1),
+	PINS_FIELD_BASE(48, 49, 3, 0x010, 0x10, 5, 1),
+	PINS_FIELD_BASE(50, 51, 4, 0x010, 0x10, 0, 1),
+	PINS_FIELD_BASE(52, 57, 4, 0x010, 0x10, 1, 1),
+	PINS_FIELD_BASE(58, 60, 4, 0x010, 0x10, 2, 1),
+	PINS_FIELD_BASE(61, 64, 5, 0x010, 0x10, 0, 1),
+	PINS_FIELD_BASE(65, 66, 5, 0x010, 0x10, 1, 1),
+	PINS_FIELD_BASE(67, 68, 5, 0x010, 0x10, 2, 1),
+	PINS_FIELD_BASE(69, 71, 5, 0x010, 0x10, 3, 1),
+	PINS_FIELD_BASE(72, 76, 5, 0x010, 0x10, 4, 1),
+	PINS_FIELD_BASE(77, 80, 5, 0x010, 0x10, 5, 1),
+	PIN_FIELD_BASE(81, 81, 5, 0x010, 0x10, 6, 1),
+	PINS_FIELD_BASE(82, 83, 5, 0x010, 0x10, 7, 1),
+	PIN_FIELD_BASE(84, 84, 5, 0x010, 0x10, 6, 1),
+	PINS_FIELD_BASE(85, 88, 5, 0x010, 0x10, 8, 1),
+	PIN_FIELD_BASE(89, 89, 6, 0x010, 0x10, 11, 1),
+	PIN_FIELD_BASE(90, 90, 6, 0x010, 0x10, 1, 1),
+	PINS_FIELD_BASE(91, 94, 6, 0x010, 0x10, 2, 1),
+	PINS_FIELD_BASE(95, 96, 6, 0x010, 0x10, 6, 1),
+	PINS_FIELD_BASE(97, 98, 6, 0x010, 0x10, 7, 1),
+	PIN_FIELD_BASE(99, 99, 6, 0x010, 0x10, 8, 1),
+	PIN_FIELD_BASE(100, 100, 6, 0x010, 0x10, 9, 1),
+	PINS_FIELD_BASE(101, 102, 6, 0x010, 0x10, 10, 1),
+	PINS_FIELD_BASE(103, 104, 6, 0x010, 0x10, 13, 1),
+	PINS_FIELD_BASE(105, 106, 6, 0x010, 0x10, 14, 1),
+	PIN_FIELD_BASE(107, 107, 7, 0x010, 0x10, 0, 1),
+	PIN_FIELD_BASE(108, 108, 7, 0x010, 0x10, 1, 1),
+	PIN_FIELD_BASE(109, 109, 7, 0x010, 0x10, 2, 1),
+	PIN_FIELD_BASE(110, 110, 7, 0x010, 0x10, 0, 1),
+	PIN_FIELD_BASE(111, 111, 7, 0x010, 0x10, 3, 1),
+	PIN_FIELD_BASE(112, 112, 7, 0x010, 0x10, 2, 1),
+	PIN_FIELD_BASE(113, 113, 7, 0x010, 0x10, 4, 1),
+	PIN_FIELD_BASE(114, 114, 7, 0x010, 0x10, 5, 1),
+	PIN_FIELD_BASE(115, 115, 7, 0x010, 0x10, 6, 1),
+	PIN_FIELD_BASE(116, 116, 7, 0x010, 0x10, 7, 1),
+	PIN_FIELD_BASE(117, 117, 7, 0x010, 0x10, 8, 1),
+	PIN_FIELD_BASE(118, 118, 7, 0x010, 0x10, 9, 1),
+	PIN_FIELD_BASE(119, 119, 7, 0x010, 0x10, 10, 1),
+	PIN_FIELD_BASE(120, 120, 7, 0x010, 0x10, 11, 1),
+	PIN_FIELD_BASE(121, 121, 7, 0x010, 0x10, 12, 1),
+	PIN_FIELD_BASE(122, 122, 8, 0x010, 0x10, 0, 1),
+	PIN_FIELD_BASE(123, 123, 8, 0x010, 0x10, 1, 1),
+	PIN_FIELD_BASE(124, 124, 8, 0x010, 0x10, 2, 1),
+	PINS_FIELD_BASE(125, 130, 8, 0x010, 0x10, 1, 1),
+	PIN_FIELD_BASE(131, 131, 8, 0x010, 0x10, 3, 1),
+	PIN_FIELD_BASE(132, 132, 8, 0x010, 0x10, 1, 1),
+	PIN_FIELD_BASE(133, 133, 8, 0x010, 0x10, 4, 1),
+	PIN_FIELD_BASE(134, 134, 1, 0x010, 0x10, 0, 1),
+	PIN_FIELD_BASE(135, 135, 1, 0x010, 0x10, 1, 1),
+	PINS_FIELD_BASE(136, 143, 1, 0x010, 0x10, 2, 1),
+	PINS_FIELD_BASE(144, 147, 1, 0x010, 0x10, 4, 1),
+	PIN_FIELD_BASE(148, 148, 1, 0x010, 0x10, 5, 1),
+	PIN_FIELD_BASE(149, 149, 1, 0x010, 0x10, 6, 1),
+	PINS_FIELD_BASE(150, 153, 1, 0x010, 0x10, 8, 1),
+	PIN_FIELD_BASE(154, 154, 1, 0x010, 0x10, 9, 1),
+	PINS_FIELD_BASE(155, 157, 1, 0x010, 0x10, 10, 1),
+	PINS_FIELD_BASE(158, 160, 1, 0x010, 0x10, 8, 1),
+	PINS_FIELD_BASE(161, 164, 2, 0x010, 0x10, 0, 1),
+	PINS_FIELD_BASE(165, 166, 2, 0x010, 0x10, 1, 1),
+	PINS_FIELD_BASE(167, 168, 4, 0x010, 0x10, 2, 1),
+	PIN_FIELD_BASE(169, 169, 4, 0x010, 0x10, 3, 1),
+	PINS_FIELD_BASE(170, 174, 4, 0x010, 0x10, 4, 1),
+	PINS_FIELD_BASE(175, 176, 4, 0x010, 0x10, 3, 1),
+	PINS_FIELD_BASE(177, 179, 6, 0x010, 0x10, 4, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_pullen_range[] = {
+	PIN_FIELD_BASE(0, 3, 6, 0x060, 0x10, 6, 1),
+	PIN_FIELD_BASE(4, 7, 6, 0x060, 0x10, 11, 1),
+	PIN_FIELD_BASE(8, 8, 6, 0x060, 0x10, 0, 1),
+	PIN_FIELD_BASE(9, 10, 6, 0x060, 0x10, 26, 1),
+	PIN_FIELD_BASE(11, 11, 1, 0x060, 0x10, 10, 1),
+	PIN_FIELD_BASE(12, 12, 1, 0x060, 0x10, 17, 1),
+	PIN_FIELD_BASE(13, 28, 2, 0x060, 0x10, 6, 1),
+	PIN_FIELD_BASE(29, 34, 2, 0x060, 0x10, 22, 1),
+	PIN_FIELD_BASE(43, 49, 3, 0x060, 0x10, 8, 1),
+	PIN_FIELD_BASE(50, 60, 4, 0x060, 0x10, 0, 1),
+	PIN_FIELD_BASE(61, 88, 5, 0x060, 0x10, 0, 1),
+	PIN_FIELD_BASE(89, 89, 6, 0x060, 0x10, 24, 1),
+	PIN_FIELD_BASE(90, 90, 6, 0x060, 0x10, 1, 1),
+	PIN_FIELD_BASE(95, 95, 6, 0x060, 0x10, 15, 1),
+	PIN_FIELD_BASE(96, 102, 6, 0x060, 0x10, 17, 1),
+	PIN_FIELD_BASE(103, 106, 6, 0x060, 0x10, 28, 1),
+	PIN_FIELD_BASE(107, 121, 7, 0x060, 0x10, 0, 1),
+	PIN_FIELD_BASE(122, 133, 8, 0x060, 0x10, 0, 1),
+	PIN_FIELD_BASE(134, 143, 1, 0x060, 0x10, 0, 1),
+	PIN_FIELD_BASE(144, 149, 1, 0x060, 0x10, 11, 1),
+	PIN_FIELD_BASE(150, 160, 1, 0x060, 0x10, 18, 1),
+	PIN_FIELD_BASE(161, 166, 2, 0x060, 0x10, 0, 1),
+	PIN_FIELD_BASE(167, 176, 4, 0x060, 0x10, 11, 1),
+	PIN_FIELD_BASE(177, 177, 6, 0x060, 0x10, 10, 1),
+	PIN_FIELD_BASE(178, 178, 6, 0x060, 0x10, 16, 1),
+	PIN_FIELD_BASE(179, 179, 6, 0x060, 0x10, 25, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_pullsel_range[] = {
+	PIN_FIELD_BASE(0, 3, 6, 0x080, 0x10, 6, 1),
+	PIN_FIELD_BASE(4, 7, 6, 0x080, 0x10, 11, 1),
+	PIN_FIELD_BASE(8, 8, 6, 0x080, 0x10, 0, 1),
+	PIN_FIELD_BASE(9, 10, 6, 0x080, 0x10, 26, 1),
+	PIN_FIELD_BASE(11, 11, 1, 0x080, 0x10, 10, 1),
+	PIN_FIELD_BASE(12, 12, 1, 0x080, 0x10, 17, 1),
+	PIN_FIELD_BASE(13, 28, 2, 0x080, 0x10, 6, 1),
+	PIN_FIELD_BASE(29, 34, 2, 0x080, 0x10, 22, 1),
+	PIN_FIELD_BASE(43, 49, 3, 0x080, 0x10, 8, 1),
+	PIN_FIELD_BASE(50, 60, 4, 0x080, 0x10, 0, 1),
+	PIN_FIELD_BASE(61, 88, 5, 0x080, 0x10, 0, 1),
+	PIN_FIELD_BASE(89, 89, 6, 0x080, 0x10, 24, 1),
+	PIN_FIELD_BASE(90, 90, 6, 0x080, 0x10, 1, 1),
+	PIN_FIELD_BASE(95, 95, 6, 0x080, 0x10, 15, 1),
+	PIN_FIELD_BASE(96, 102, 6, 0x080, 0x10, 17, 1),
+	PIN_FIELD_BASE(103, 106, 6, 0x080, 0x10, 28, 1),
+	PIN_FIELD_BASE(107, 121, 7, 0x080, 0x10, 0, 1),
+	PIN_FIELD_BASE(122, 133, 8, 0x080, 0x10, 0, 1),
+	PIN_FIELD_BASE(134, 143, 1, 0x080, 0x10, 0, 1),
+	PIN_FIELD_BASE(144, 149, 1, 0x080, 0x10, 11, 1),
+	PIN_FIELD_BASE(150, 160, 1, 0x080, 0x10, 18, 1),
+	PIN_FIELD_BASE(161, 166, 2, 0x080, 0x10, 0, 1),
+	PIN_FIELD_BASE(167, 176, 4, 0x080, 0x10, 11, 1),
+	PIN_FIELD_BASE(177, 177, 6, 0x080, 0x10, 10, 1),
+	PIN_FIELD_BASE(178, 178, 6, 0x080, 0x10, 16, 1),
+	PIN_FIELD_BASE(179, 179, 6, 0x080, 0x10, 25, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_drv_range[] = {
+	PINS_FIELD_BASE(0, 3, 6, 0x0A0, 0x10, 12, 3),
+	PINS_FIELD_BASE(4, 7, 6, 0x0A0, 0x10, 20, 3),
+	PIN_FIELD_BASE(8, 8, 6, 0x0A0, 0x10, 0, 3),
+	PINS_FIELD_BASE(9, 10, 6, 0x0B0, 0x10, 16, 3),
+	PIN_FIELD_BASE(11, 11, 1, 0x0A0, 0x10, 12, 3),
+	PIN_FIELD_BASE(12, 12, 1, 0x0A0, 0x10, 28, 3),
+	PINS_FIELD_BASE(13, 16, 2, 0x0A0, 0x10, 8, 3),
+	PINS_FIELD_BASE(17, 20, 2, 0x0A0, 0x10, 12, 3),
+	PINS_FIELD_BASE(21, 24, 2, 0x0A0, 0x10, 16, 3),
+	PINS_FIELD_BASE(25, 28, 2, 0x0A0, 0x10, 20, 3),
+	PIN_FIELD_BASE(29, 29, 2, 0x0A0, 0x10, 24, 3),
+	PIN_FIELD_BASE(30, 30, 2, 0x0A0, 0x10, 28, 3),
+	PINS_FIELD_BASE(31, 31, 2, 0x0B0, 0x10, 0, 3),
+	PINS_FIELD_BASE(32, 34, 2, 0x0A0, 0x10, 28, 3),
+	PINS_FIELD_BASE(35, 37, 3, 0x0A0, 0x10, 0, 3),
+	PINS_FIELD_BASE(38, 40, 3, 0x0A0, 0x10, 4, 3),
+	PINS_FIELD_BASE(41, 42, 3, 0x0A0, 0x10, 8, 3),
+	PINS_FIELD_BASE(43, 45, 3, 0x0A0, 0x10, 12, 3),
+	PINS_FIELD_BASE(46, 47, 3, 0x0A0, 0x10, 16, 3),
+	PINS_FIELD_BASE(48, 49, 3, 0x0A0, 0x10, 20, 3),
+	PINS_FIELD_BASE(50, 51, 4, 0x0A0, 0x10, 0, 3),
+	PINS_FIELD_BASE(52, 57, 4, 0x0A0, 0x10, 4, 3),
+	PINS_FIELD_BASE(58, 60, 4, 0x0A0, 0x10, 8, 3),
+	PINS_FIELD_BASE(61, 64, 5, 0x0A0, 0x10, 0, 3),
+	PINS_FIELD_BASE(65, 66, 5, 0x0A0, 0x10, 4, 3),
+	PINS_FIELD_BASE(67, 68, 5, 0x0A0, 0x10, 8, 3),
+	PINS_FIELD_BASE(69, 71, 5, 0x0A0, 0x10, 12, 3),
+	PINS_FIELD_BASE(72, 76, 5, 0x0A0, 0x10, 16, 3),
+	PINS_FIELD_BASE(77, 80, 5, 0x0A0, 0x10, 20, 3),
+	PIN_FIELD_BASE(81, 81, 5, 0x0A0, 0x10, 24, 3),
+	PINS_FIELD_BASE(82, 83, 5, 0x0A0, 0x10, 28, 3),
+	PIN_FIELD_BASE(84, 84, 5, 0x0A0, 0x10, 24, 3),
+	PINS_FIELD_BASE(85, 88, 5, 0x0B0, 0x10, 0, 3),
+	PIN_FIELD_BASE(89, 89, 6, 0x0B0, 0x10, 12, 3),
+	PIN_FIELD_BASE(90, 90, 6, 0x0A0, 0x10, 4, 3),
+	PINS_FIELD_BASE(91, 94, 6, 0x0A0, 0x10, 8, 3),
+	PINS_FIELD_BASE(95, 96, 6, 0x0A0, 0x10, 24, 3),
+	PINS_FIELD_BASE(97, 98, 6, 0x0A0, 0x10, 28, 3),
+	PIN_FIELD_BASE(99, 99, 6, 0x0B0, 0x10, 0, 3),
+	PIN_FIELD_BASE(100, 100, 6, 0x0B0, 0x10, 4, 3),
+	PINS_FIELD_BASE(101, 102, 6, 0x0B0, 0x10, 8, 3),
+	PINS_FIELD_BASE(103, 104, 6, 0x0B0, 0x10, 20, 3),
+	PINS_FIELD_BASE(105, 106, 6, 0x0B0, 0x10, 24, 3),
+	PIN_FIELD_BASE(107, 107, 7, 0x0A0, 0x10, 0, 3),
+	PIN_FIELD_BASE(108, 108, 7, 0x0A0, 0x10, 4, 3),
+	PIN_FIELD_BASE(109, 109, 7, 0x0A0, 0x10, 8, 3),
+	PIN_FIELD_BASE(110, 110, 7, 0x0A0, 0x10, 0, 3),
+	PIN_FIELD_BASE(111, 111, 7, 0x0A0, 0x10, 4, 3),
+	PIN_FIELD_BASE(112, 112, 7, 0x0A0, 0x10, 8, 3),
+	PIN_FIELD_BASE(113, 113, 7, 0x0A0, 0x10, 16, 3),
+	PIN_FIELD_BASE(114, 114, 7, 0x0A0, 0x10, 20, 3),
+	PIN_FIELD_BASE(115, 115, 7, 0x0A0, 0x10, 24, 3),
+	PIN_FIELD_BASE(116, 116, 7, 0x0A0, 0x10, 28, 3),
+	PIN_FIELD_BASE(117, 117, 7, 0x0B0, 0x10, 0, 3),
+	PIN_FIELD_BASE(118, 118, 7, 0x0B0, 0x10, 4, 3),
+	PIN_FIELD_BASE(119, 119, 7, 0x0B0, 0x10, 8, 3),
+	PIN_FIELD_BASE(120, 120, 7, 0x0B0, 0x10, 12, 3),
+	PIN_FIELD_BASE(121, 121, 7, 0x0B0, 0x10, 16, 3),
+	PIN_FIELD_BASE(122, 122, 8, 0x0A0, 0x10, 0, 3),
+	PIN_FIELD_BASE(123, 123, 8, 0x0A0, 0x10, 4, 3),
+	PIN_FIELD_BASE(124, 124, 8, 0x0A0, 0x10, 8, 3),
+	PINS_FIELD_BASE(125, 130, 8, 0x0A0, 0x10, 4, 3),
+	PIN_FIELD_BASE(131, 131, 8, 0x0A0, 0x10, 12, 3),
+	PIN_FIELD_BASE(132, 132, 8, 0x0A0, 0x10, 4, 3),
+	PIN_FIELD_BASE(133, 133, 8, 0x0A0, 0x10, 16, 3),
+	PIN_FIELD_BASE(134, 134, 1, 0x0A0, 0x10, 0, 3),
+	PIN_FIELD_BASE(135, 135, 1, 0x0A0, 0x10, 4, 3),
+	PINS_FIELD_BASE(136, 143, 1, 0x0A0, 0x10, 8, 3),
+	PINS_FIELD_BASE(144, 147, 1, 0x0A0, 0x10, 16, 3),
+	PIN_FIELD_BASE(148, 148, 1, 0x0A0, 0x10, 20, 3),
+	PIN_FIELD_BASE(149, 149, 1, 0x0A0, 0x10, 24, 3),
+	PINS_FIELD_BASE(150, 153, 1, 0x0B0, 0x10, 0, 3),
+	PIN_FIELD_BASE(154, 154, 1, 0x0B0, 0x10, 4, 3),
+	PINS_FIELD_BASE(155, 157, 1, 0x0B0, 0x10, 8, 3),
+	PINS_FIELD_BASE(158, 160, 1, 0x0B0, 0x10, 0, 3),
+	PINS_FIELD_BASE(161, 164, 2, 0x0A0, 0x10, 0, 3),
+	PINS_FIELD_BASE(165, 166, 2, 0x0A0, 0x10, 4, 3),
+	PINS_FIELD_BASE(167, 168, 4, 0x0A0, 0x10, 8, 3),
+	PIN_FIELD_BASE(169, 169, 4, 0x0A0, 0x10, 12, 3),
+	PINS_FIELD_BASE(170, 174, 4, 0x0A0, 0x10, 16, 3),
+	PINS_FIELD_BASE(175, 176, 4, 0x0A0, 0x10, 12, 3),
+	PINS_FIELD_BASE(177, 179, 6, 0x0A0, 0x10, 16, 3),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_pupd_range[] = {
+	PIN_FIELD_BASE(29, 29, 2, 0x0C0, 0x10, 2, 1),
+	PIN_FIELD_BASE(30, 30, 2, 0x0C0, 0x10, 6, 1),
+	PIN_FIELD_BASE(31, 31, 2, 0x0C0, 0x10, 10, 1),
+	PIN_FIELD_BASE(32, 32, 2, 0x0C0, 0x10, 14, 1),
+	PIN_FIELD_BASE(33, 33, 2, 0x0C0, 0x10, 18, 1),
+	PIN_FIELD_BASE(34, 34, 2, 0x0C0, 0x10, 22, 1),
+	PIN_FIELD_BASE(35, 35, 3, 0x0C0, 0x10, 2, 1),
+	PIN_FIELD_BASE(36, 36, 3, 0x0C0, 0x10, 6, 1),
+	PIN_FIELD_BASE(37, 37, 3, 0x0C0, 0x10, 10, 1),
+	PIN_FIELD_BASE(38, 38, 3, 0x0C0, 0x10, 14, 1),
+	PIN_FIELD_BASE(39, 39, 3, 0x0C0, 0x10, 18, 1),
+	PIN_FIELD_BASE(40, 40, 3, 0x0C0, 0x10, 22, 1),
+	PIN_FIELD_BASE(41, 41, 3, 0x0C0, 0x10, 26, 1),
+	PIN_FIELD_BASE(42, 42, 3, 0x0C0, 0x10, 30, 1),
+	PIN_FIELD_BASE(91, 91, 6, 0x0C0, 0x10, 2, 1),
+	PIN_FIELD_BASE(92, 92, 6, 0x0C0, 0x10, 6, 1),
+	PIN_FIELD_BASE(93, 93, 6, 0x0C0, 0x10, 10, 1),
+	PIN_FIELD_BASE(94, 94, 6, 0x0C0, 0x10, 14, 1),
+	PIN_FIELD_BASE(122, 122, 8, 0x0C0, 0x10, 2, 1),
+	PIN_FIELD_BASE(123, 123, 8, 0x0C0, 0x10, 6, 1),
+	PIN_FIELD_BASE(124, 124, 8, 0x0C0, 0x10, 10, 1),
+	PIN_FIELD_BASE(125, 125, 8, 0x0C0, 0x10, 14, 1),
+	PIN_FIELD_BASE(126, 126, 8, 0x0C0, 0x10, 18, 1),
+	PIN_FIELD_BASE(127, 127, 8, 0x0C0, 0x10, 22, 1),
+	PIN_FIELD_BASE(128, 128, 8, 0x0C0, 0x10, 26, 1),
+	PIN_FIELD_BASE(129, 129, 8, 0x0C0, 0x10, 30, 1),
+	PIN_FIELD_BASE(130, 130, 8, 0x0D0, 0x10, 2, 1),
+	PIN_FIELD_BASE(131, 131, 8, 0x0D0, 0x10, 6, 1),
+	PIN_FIELD_BASE(132, 132, 8, 0x0D0, 0x10, 10, 1),
+	PIN_FIELD_BASE(133, 133, 8, 0x0D0, 0x10, 14, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_r0_range[] = {
+	PIN_FIELD_BASE(29, 29, 2, 0x0C0, 0x10, 0, 1),
+	PIN_FIELD_BASE(30, 30, 2, 0x0C0, 0x10, 4, 1),
+	PIN_FIELD_BASE(31, 31, 2, 0x0C0, 0x10, 8, 1),
+	PIN_FIELD_BASE(32, 32, 2, 0x0C0, 0x10, 12, 1),
+	PIN_FIELD_BASE(33, 33, 2, 0x0C0, 0x10, 16, 1),
+	PIN_FIELD_BASE(34, 34, 2, 0x0C0, 0x10, 20, 1),
+	PIN_FIELD_BASE(35, 35, 3, 0x0C0, 0x10, 0, 1),
+	PIN_FIELD_BASE(36, 36, 3, 0x0C0, 0x10, 4, 1),
+	PIN_FIELD_BASE(37, 37, 3, 0x0C0, 0x10, 8, 1),
+	PIN_FIELD_BASE(38, 38, 3, 0x0C0, 0x10, 12, 1),
+	PIN_FIELD_BASE(39, 39, 3, 0x0C0, 0x10, 16, 1),
+	PIN_FIELD_BASE(40, 40, 3, 0x0C0, 0x10, 20, 1),
+	PIN_FIELD_BASE(41, 41, 3, 0x0C0, 0x10, 24, 1),
+	PIN_FIELD_BASE(42, 42, 3, 0x0C0, 0x10, 28, 1),
+	PIN_FIELD_BASE(48, 48, 3, 0x0F0, 0x10, 18, 1),
+	PIN_FIELD_BASE(49, 49, 3, 0x0F0, 0x10, 13, 1),
+	PIN_FIELD_BASE(50, 50, 4, 0x0F0, 0x10, 10, 1),
+	PIN_FIELD_BASE(51, 51, 4, 0x0F0, 0x10, 5, 1),
+	PIN_FIELD_BASE(81, 81, 5, 0x0F0, 0x10, 7, 1),
+	PIN_FIELD_BASE(82, 82, 5, 0x0F0, 0x10, 5, 1),
+	PIN_FIELD_BASE(83, 83, 5, 0x0F0, 0x10, 15, 1),
+	PIN_FIELD_BASE(84, 84, 5, 0x0F0, 0x10, 17, 1),
+	PIN_FIELD_BASE(91, 91, 6, 0x0C0, 0x10, 0, 1),
+	PIN_FIELD_BASE(92, 92, 6, 0x0C0, 0x10, 4, 1),
+	PIN_FIELD_BASE(93, 93, 6, 0x0C0, 0x10, 8, 1),
+	PIN_FIELD_BASE(94, 94, 6, 0x0C0, 0x10, 12, 1),
+	PIN_FIELD_BASE(103, 103, 6, 0x0F0, 0x10, 20, 1),
+	PIN_FIELD_BASE(104, 104, 6, 0x0F0, 0x10, 10, 1),
+	PIN_FIELD_BASE(105, 105, 6, 0x0F0, 0x10, 22, 1),
+	PIN_FIELD_BASE(106, 106, 6, 0x0F0, 0x10, 12, 1),
+	PIN_FIELD_BASE(122, 122, 8, 0x0C0, 0x10, 0, 1),
+	PIN_FIELD_BASE(123, 123, 8, 0x0C0, 0x10, 4, 1),
+	PIN_FIELD_BASE(124, 124, 8, 0x0C0, 0x10, 8, 1),
+	PIN_FIELD_BASE(125, 125, 8, 0x0C0, 0x10, 12, 1),
+	PIN_FIELD_BASE(126, 126, 8, 0x0C0, 0x10, 16, 1),
+	PIN_FIELD_BASE(127, 127, 8, 0x0C0, 0x10, 20, 1),
+	PIN_FIELD_BASE(128, 128, 8, 0x0C0, 0x10, 24, 1),
+	PIN_FIELD_BASE(129, 129, 8, 0x0C0, 0x10, 28, 1),
+	PIN_FIELD_BASE(130, 130, 8, 0x0D0, 0x10, 0, 1),
+	PIN_FIELD_BASE(131, 131, 8, 0x0D0, 0x10, 4, 1),
+	PIN_FIELD_BASE(132, 132, 8, 0x0D0, 0x10, 8, 1),
+	PIN_FIELD_BASE(133, 133, 8, 0x0D0, 0x10, 12, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_r1_range[] = {
+	PIN_FIELD_BASE(29, 29, 2, 0x0C0, 0x10, 1, 1),
+	PIN_FIELD_BASE(30, 30, 2, 0x0C0, 0x10, 5, 1),
+	PIN_FIELD_BASE(31, 31, 2, 0x0C0, 0x10, 9, 1),
+	PIN_FIELD_BASE(32, 32, 2, 0x0C0, 0x10, 13, 1),
+	PIN_FIELD_BASE(33, 33, 2, 0x0C0, 0x10, 17, 1),
+	PIN_FIELD_BASE(34, 34, 2, 0x0C0, 0x10, 21, 1),
+	PIN_FIELD_BASE(35, 35, 3, 0x0C0, 0x10, 1, 1),
+	PIN_FIELD_BASE(36, 36, 3, 0x0C0, 0x10, 5, 1),
+	PIN_FIELD_BASE(37, 37, 3, 0x0C0, 0x10, 9, 1),
+	PIN_FIELD_BASE(38, 38, 3, 0x0C0, 0x10, 13, 1),
+	PIN_FIELD_BASE(39, 39, 3, 0x0C0, 0x10, 17, 1),
+	PIN_FIELD_BASE(40, 40, 3, 0x0C0, 0x10, 21, 1),
+	PIN_FIELD_BASE(41, 41, 3, 0x0C0, 0x10, 25, 1),
+	PIN_FIELD_BASE(42, 42, 3, 0x0C0, 0x10, 29, 1),
+	PIN_FIELD_BASE(48, 48, 3, 0x0F0, 0x10, 19, 1),
+	PIN_FIELD_BASE(49, 49, 3, 0x0F0, 0x10, 14, 1),
+	PIN_FIELD_BASE(50, 50, 4, 0x0F0, 0x10, 11, 1),
+	PIN_FIELD_BASE(51, 51, 4, 0x0F0, 0x10, 6, 1),
+	PIN_FIELD_BASE(81, 81, 5, 0x0F0, 0x10, 8, 1),
+	PIN_FIELD_BASE(82, 82, 5, 0x0F0, 0x10, 6, 1),
+	PIN_FIELD_BASE(83, 83, 5, 0x0F0, 0x10, 16, 1),
+	PIN_FIELD_BASE(84, 84, 5, 0x0F0, 0x10, 18, 1),
+	PIN_FIELD_BASE(91, 91, 6, 0x0C0, 0x10, 1, 1),
+	PIN_FIELD_BASE(92, 92, 6, 0x0C0, 0x10, 5, 1),
+	PIN_FIELD_BASE(93, 93, 6, 0x0C0, 0x10, 9, 1),
+	PIN_FIELD_BASE(94, 94, 6, 0x0C0, 0x10, 13, 1),
+	PIN_FIELD_BASE(103, 103, 6, 0x0F0, 0x10, 21, 1),
+	PIN_FIELD_BASE(104, 104, 6, 0x0F0, 0x10, 11, 1),
+	PIN_FIELD_BASE(105, 105, 6, 0x0F0, 0x10, 23, 1),
+	PIN_FIELD_BASE(106, 106, 6, 0x0F0, 0x10, 13, 1),
+	PIN_FIELD_BASE(122, 122, 8, 0x0C0, 0x10, 1, 1),
+	PIN_FIELD_BASE(123, 123, 8, 0x0C0, 0x10, 5, 1),
+	PIN_FIELD_BASE(124, 124, 8, 0x0C0, 0x10, 9, 1),
+	PIN_FIELD_BASE(125, 125, 8, 0x0C0, 0x10, 13, 1),
+	PIN_FIELD_BASE(126, 126, 8, 0x0C0, 0x10, 17, 1),
+	PIN_FIELD_BASE(127, 127, 8, 0x0C0, 0x10, 21, 1),
+	PIN_FIELD_BASE(128, 128, 8, 0x0C0, 0x10, 25, 1),
+	PIN_FIELD_BASE(129, 129, 8, 0x0C0, 0x10, 29, 1),
+	PIN_FIELD_BASE(130, 130, 8, 0x0D0, 0x10, 1, 1),
+	PIN_FIELD_BASE(131, 131, 8, 0x0D0, 0x10, 5, 1),
+	PIN_FIELD_BASE(132, 132, 8, 0x0D0, 0x10, 9, 1),
+	PIN_FIELD_BASE(133, 133, 8, 0x0D0, 0x10, 13, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_e1e0en_range[] = {
+	PIN_FIELD_BASE(48, 48, 3, 0x0F0, 0x10, 20, 1),
+	PIN_FIELD_BASE(49, 49, 3, 0x0F0, 0x10, 15, 1),
+	PIN_FIELD_BASE(50, 50, 4, 0x0F0, 0x10, 12, 1),
+	PIN_FIELD_BASE(51, 51, 4, 0x0F0, 0x10, 7, 1),
+	PIN_FIELD_BASE(81, 81, 5, 0x0F0, 0x10, 12, 1),
+	PIN_FIELD_BASE(82, 82, 5, 0x0F0, 0x10, 9, 1),
+	PIN_FIELD_BASE(83, 83, 5, 0x0F0, 0x10, 19, 1),
+	PIN_FIELD_BASE(84, 84, 5, 0x0F0, 0x10, 22, 1),
+	PIN_FIELD_BASE(103, 103, 6, 0x0F0, 0x10, 24, 1),
+	PIN_FIELD_BASE(104, 104, 6, 0x0F0, 0x10, 14, 1),
+	PIN_FIELD_BASE(105, 105, 6, 0x0F0, 0x10, 27, 1),
+	PIN_FIELD_BASE(106, 106, 6, 0x0F0, 0x10, 17, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_e0_range[] = {
+	PIN_FIELD_BASE(48, 48, 3, 0x0F0, 0x10, 21, 1),
+	PIN_FIELD_BASE(49, 49, 3, 0x0F0, 0x10, 16, 1),
+	PIN_FIELD_BASE(50, 50, 4, 0x0F0, 0x10, 13, 1),
+	PIN_FIELD_BASE(51, 51, 4, 0x0F0, 0x10, 8, 1),
+	PIN_FIELD_BASE(81, 81, 5, 0x0F0, 0x10, 13, 1),
+	PIN_FIELD_BASE(82, 82, 5, 0x0F0, 0x10, 10, 1),
+	PIN_FIELD_BASE(83, 83, 5, 0x0F0, 0x10, 20, 1),
+	PIN_FIELD_BASE(84, 84, 5, 0x0F0, 0x10, 23, 1),
+	PIN_FIELD_BASE(103, 103, 6, 0x0F0, 0x10, 25, 1),
+	PIN_FIELD_BASE(104, 104, 6, 0x0F0, 0x10, 15, 1),
+	PIN_FIELD_BASE(105, 105, 6, 0x0F0, 0x10, 28, 1),
+	PIN_FIELD_BASE(106, 106, 6, 0x0F0, 0x10, 18, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_e1_range[] = {
+	PIN_FIELD_BASE(48, 48, 3, 0x0F0, 0x10, 22, 1),
+	PIN_FIELD_BASE(49, 49, 3, 0x0F0, 0x10, 17, 1),
+	PIN_FIELD_BASE(50, 50, 4, 0x0F0, 0x10, 14, 1),
+	PIN_FIELD_BASE(51, 51, 4, 0x0F0, 0x10, 9, 1),
+	PIN_FIELD_BASE(81, 81, 5, 0x0F0, 0x10, 14, 1),
+	PIN_FIELD_BASE(82, 82, 5, 0x0F0, 0x10, 11, 1),
+	PIN_FIELD_BASE(83, 83, 5, 0x0F0, 0x10, 21, 1),
+	PIN_FIELD_BASE(84, 84, 5, 0x0F0, 0x10, 24, 1),
+	PIN_FIELD_BASE(103, 103, 6, 0x0F0, 0x10, 26, 1),
+	PIN_FIELD_BASE(104, 104, 6, 0x0F0, 0x10, 16, 1),
+	PIN_FIELD_BASE(105, 105, 6, 0x0F0, 0x10, 29, 1),
+	PIN_FIELD_BASE(106, 106, 6, 0x0F0, 0x10, 19, 1),
+};
+
+static const struct mtk_pin_reg_calc mt8183_reg_cals[PINCTRL_PIN_REG_MAX] = {
+	[PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt8183_pin_mode_range),
+	[PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8183_pin_dir_range),
+	[PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8183_pin_di_range),
+	[PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8183_pin_do_range),
+	[PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8183_pin_smt_range),
+	[PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8183_pin_ies_range),
+	[PINCTRL_PIN_REG_PULLEN] = MTK_RANGE(mt8183_pin_pullen_range),
+	[PINCTRL_PIN_REG_PULLSEL] = MTK_RANGE(mt8183_pin_pullsel_range),
+	[PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt8183_pin_drv_range),
+	[PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt8183_pin_pupd_range),
+	[PINCTRL_PIN_REG_R0] = MTK_RANGE(mt8183_pin_r0_range),
+	[PINCTRL_PIN_REG_R1] = MTK_RANGE(mt8183_pin_r1_range),
+	[PINCTRL_PIN_REG_DRV_EN] = MTK_RANGE(mt8183_pin_e1e0en_range),
+	[PINCTRL_PIN_REG_DRV_E0] = MTK_RANGE(mt8183_pin_e0_range),
+	[PINCTRL_PIN_REG_DRV_E1] = MTK_RANGE(mt8183_pin_e1_range),
+};
+
+static const char * const mt8183_pinctrl_register_base_names[] = {
+	"iocfg0", "iocfg1", "iocfg2", "iocfg3", "iocfg4", "iocfg5",
+	"iocfg6", "iocfg7", "iocfg8",
+};
+
+static const struct mtk_eint_hw mt8183_eint_hw = {
+	.port_mask = 7,
+	.ports     = 6,
+	.ap_num    = 212,
+	.db_cnt    = 13,
+};
+
+static const struct mtk_pin_soc mt8183_data = {
+	.reg_cal = mt8183_reg_cals,
+	.pins = mtk_pins_mt8183,
+	.npins = ARRAY_SIZE(mtk_pins_mt8183),
+	.ngrps = ARRAY_SIZE(mtk_pins_mt8183),
+	.eint_hw = &mt8183_eint_hw,
+	.gpio_m = 0,
+	.ies_present = true,
+	.base_names = mt8183_pinctrl_register_base_names,
+	.nbase_names = ARRAY_SIZE(mt8183_pinctrl_register_base_names),
+	.bias_disable_set = mtk_pinconf_bias_disable_set_rev1,
+	.bias_disable_get = mtk_pinconf_bias_disable_get_rev1,
+	.bias_set = mtk_pinconf_bias_set_rev1,
+	.bias_get = mtk_pinconf_bias_get_rev1,
+	.drive_set = mtk_pinconf_drive_set_rev1,
+	.drive_get = mtk_pinconf_drive_get_rev1,
+	.adv_pull_get = mtk_pinconf_adv_pull_get,
+	.adv_pull_set = mtk_pinconf_adv_pull_set,
+	.adv_drive_get = mtk_pinconf_adv_drive_get,
+	.adv_drive_set = mtk_pinconf_adv_drive_set,
+};
+
+static const struct of_device_id mt8183_pinctrl_of_match[] = {
+	{ .compatible = "mediatek,mt8183-pinctrl", },
+	{ }
+};
+
+static int mt8183_pinctrl_probe(struct platform_device *pdev)
+{
+	return mtk_paris_pinctrl_probe(pdev, &mt8183_data);
+}
+
+static struct platform_driver mt8183_pinctrl_driver = {
+	.driver = {
+		.name = "mt8183-pinctrl",
+		.of_match_table = mt8183_pinctrl_of_match,
+	},
+	.probe = mt8183_pinctrl_probe,
+};
+
+static int __init mt8183_pinctrl_init(void)
+{
+	return platform_driver_register(&mt8183_pinctrl_driver);
+}
+arch_initcall(mt8183_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8516.c b/drivers/pinctrl/mediatek/pinctrl-mt8516.c
new file mode 100644
index 0000000..b375426
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8516.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Min.Guo <min.guo@mediatek.com>
+ */
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "pinctrl-mtk-common.h"
+#include "pinctrl-mtk-mt8516.h"
+
+static const struct mtk_drv_group_desc mt8516_drv_grp[] = {
+	/* 0E4E8SR 4/8/12/16 */
+	MTK_DRV_GRP(4, 16, 1, 2, 4),
+	/* 0E2E4SR  2/4/6/8 */
+	MTK_DRV_GRP(2, 8, 1, 2, 2),
+	/* E8E4E2  2/4/6/8/10/12/14/16 */
+	MTK_DRV_GRP(2, 16, 0, 2, 2)
+};
+
+static const struct mtk_pin_drv_grp mt8516_pin_drv[] = {
+	MTK_PIN_DRV_GRP(0, 0xd00, 0, 0),
+	MTK_PIN_DRV_GRP(1, 0xd00, 0, 0),
+	MTK_PIN_DRV_GRP(2, 0xd00, 0, 0),
+	MTK_PIN_DRV_GRP(3, 0xd00, 0, 0),
+	MTK_PIN_DRV_GRP(4, 0xd00, 0, 0),
+
+	MTK_PIN_DRV_GRP(5, 0xd00, 4, 0),
+	MTK_PIN_DRV_GRP(6, 0xd00, 4, 0),
+	MTK_PIN_DRV_GRP(7, 0xd00, 4, 0),
+	MTK_PIN_DRV_GRP(8, 0xd00, 4, 0),
+	MTK_PIN_DRV_GRP(9, 0xd00, 4, 0),
+	MTK_PIN_DRV_GRP(10, 0xd00, 4, 0),
+
+	MTK_PIN_DRV_GRP(11, 0xd00, 8, 0),
+	MTK_PIN_DRV_GRP(12, 0xd00, 8, 0),
+	MTK_PIN_DRV_GRP(13, 0xd00, 8, 0),
+
+	MTK_PIN_DRV_GRP(14, 0xd00, 12, 2),
+	MTK_PIN_DRV_GRP(15, 0xd00, 12, 2),
+	MTK_PIN_DRV_GRP(16, 0xd00, 12, 2),
+	MTK_PIN_DRV_GRP(17, 0xd00, 12, 2),
+
+	MTK_PIN_DRV_GRP(18, 0xd10, 0, 0),
+	MTK_PIN_DRV_GRP(19, 0xd10, 0, 0),
+	MTK_PIN_DRV_GRP(20, 0xd10, 0, 0),
+
+	MTK_PIN_DRV_GRP(21, 0xd00, 12, 2),
+	MTK_PIN_DRV_GRP(22, 0xd00, 12, 2),
+	MTK_PIN_DRV_GRP(23, 0xd00, 12, 2),
+
+	MTK_PIN_DRV_GRP(24, 0xd00, 8, 0),
+	MTK_PIN_DRV_GRP(25, 0xd00, 8, 0),
+
+	MTK_PIN_DRV_GRP(26, 0xd10, 4, 1),
+	MTK_PIN_DRV_GRP(27, 0xd10, 4, 1),
+	MTK_PIN_DRV_GRP(28, 0xd10, 4, 1),
+	MTK_PIN_DRV_GRP(29, 0xd10, 4, 1),
+	MTK_PIN_DRV_GRP(30, 0xd10, 4, 1),
+
+	MTK_PIN_DRV_GRP(31, 0xd10, 8, 1),
+	MTK_PIN_DRV_GRP(32, 0xd10, 8, 1),
+	MTK_PIN_DRV_GRP(33, 0xd10, 8, 1),
+
+	MTK_PIN_DRV_GRP(34, 0xd10, 12, 0),
+	MTK_PIN_DRV_GRP(35, 0xd10, 12, 0),
+
+	MTK_PIN_DRV_GRP(36, 0xd20, 0, 0),
+	MTK_PIN_DRV_GRP(37, 0xd20, 0, 0),
+	MTK_PIN_DRV_GRP(38, 0xd20, 0, 0),
+	MTK_PIN_DRV_GRP(39, 0xd20, 0, 0),
+
+	MTK_PIN_DRV_GRP(40, 0xd20, 4, 1),
+
+	MTK_PIN_DRV_GRP(41, 0xd20, 8, 1),
+	MTK_PIN_DRV_GRP(42, 0xd20, 8, 1),
+	MTK_PIN_DRV_GRP(43, 0xd20, 8, 1),
+
+	MTK_PIN_DRV_GRP(44, 0xd20, 12, 1),
+	MTK_PIN_DRV_GRP(45, 0xd20, 12, 1),
+	MTK_PIN_DRV_GRP(46, 0xd20, 12, 1),
+	MTK_PIN_DRV_GRP(47, 0xd20, 12, 1),
+
+	MTK_PIN_DRV_GRP(48, 0xd30, 0, 1),
+	MTK_PIN_DRV_GRP(49, 0xd30, 0, 1),
+	MTK_PIN_DRV_GRP(50, 0xd30, 0, 1),
+	MTK_PIN_DRV_GRP(51, 0xd30, 0, 1),
+
+	MTK_PIN_DRV_GRP(54, 0xd30, 8, 1),
+
+	MTK_PIN_DRV_GRP(55, 0xd30, 12, 1),
+	MTK_PIN_DRV_GRP(56, 0xd30, 12, 1),
+	MTK_PIN_DRV_GRP(57, 0xd30, 12, 1),
+
+	MTK_PIN_DRV_GRP(62, 0xd40, 8, 1),
+	MTK_PIN_DRV_GRP(63, 0xd40, 8, 1),
+	MTK_PIN_DRV_GRP(64, 0xd40, 8, 1),
+	MTK_PIN_DRV_GRP(65, 0xd40, 8, 1),
+	MTK_PIN_DRV_GRP(66, 0xd40, 8, 1),
+	MTK_PIN_DRV_GRP(67, 0xd40, 8, 1),
+
+	MTK_PIN_DRV_GRP(68, 0xd40, 12, 2),
+
+	MTK_PIN_DRV_GRP(69, 0xd50, 0, 2),
+
+	MTK_PIN_DRV_GRP(70, 0xd50, 4, 2),
+	MTK_PIN_DRV_GRP(71, 0xd50, 4, 2),
+	MTK_PIN_DRV_GRP(72, 0xd50, 4, 2),
+	MTK_PIN_DRV_GRP(73, 0xd50, 4, 2),
+
+	MTK_PIN_DRV_GRP(100, 0xd50, 8, 1),
+	MTK_PIN_DRV_GRP(101, 0xd50, 8, 1),
+	MTK_PIN_DRV_GRP(102, 0xd50, 8, 1),
+	MTK_PIN_DRV_GRP(103, 0xd50, 8, 1),
+
+	MTK_PIN_DRV_GRP(104, 0xd50, 12, 2),
+
+	MTK_PIN_DRV_GRP(105, 0xd60, 0, 2),
+
+	MTK_PIN_DRV_GRP(106, 0xd60, 4, 2),
+	MTK_PIN_DRV_GRP(107, 0xd60, 4, 2),
+	MTK_PIN_DRV_GRP(108, 0xd60, 4, 2),
+	MTK_PIN_DRV_GRP(109, 0xd60, 4, 2),
+
+	MTK_PIN_DRV_GRP(110, 0xd70, 0, 2),
+	MTK_PIN_DRV_GRP(111, 0xd70, 0, 2),
+	MTK_PIN_DRV_GRP(112, 0xd70, 0, 2),
+	MTK_PIN_DRV_GRP(113, 0xd70, 0, 2),
+
+	MTK_PIN_DRV_GRP(114, 0xd70, 4, 2),
+
+	MTK_PIN_DRV_GRP(115, 0xd60, 12, 2),
+
+	MTK_PIN_DRV_GRP(116, 0xd60, 8, 2),
+
+	MTK_PIN_DRV_GRP(117, 0xd70, 0, 2),
+	MTK_PIN_DRV_GRP(118, 0xd70, 0, 2),
+	MTK_PIN_DRV_GRP(119, 0xd70, 0, 2),
+	MTK_PIN_DRV_GRP(120, 0xd70, 0, 2),
+};
+
+static const struct mtk_pin_spec_pupd_set_samereg mt8516_spec_pupd[] = {
+	MTK_PIN_PUPD_SPEC_SR(14, 0xe50, 14, 13, 12),
+	MTK_PIN_PUPD_SPEC_SR(15, 0xe60, 2, 1, 0),
+	MTK_PIN_PUPD_SPEC_SR(16, 0xe60, 6, 5, 4),
+	MTK_PIN_PUPD_SPEC_SR(17, 0xe60, 10, 9, 8),
+
+	MTK_PIN_PUPD_SPEC_SR(21, 0xe60, 14, 13, 12),
+	MTK_PIN_PUPD_SPEC_SR(22, 0xe70, 2, 1, 0),
+	MTK_PIN_PUPD_SPEC_SR(23, 0xe70, 6, 5, 4),
+
+	MTK_PIN_PUPD_SPEC_SR(40, 0xe80, 2, 1, 0),
+	MTK_PIN_PUPD_SPEC_SR(41, 0xe80, 6, 5, 4),
+	MTK_PIN_PUPD_SPEC_SR(42, 0xe90, 2, 1, 0),
+	MTK_PIN_PUPD_SPEC_SR(43, 0xe90, 6, 5, 4),
+
+	MTK_PIN_PUPD_SPEC_SR(68, 0xe50, 10, 9, 8),
+	MTK_PIN_PUPD_SPEC_SR(69, 0xe50, 6, 5, 4),
+	MTK_PIN_PUPD_SPEC_SR(70, 0xe40, 6, 5, 4),
+	MTK_PIN_PUPD_SPEC_SR(71, 0xe40, 10, 9, 8),
+	MTK_PIN_PUPD_SPEC_SR(72, 0xe40, 14, 13, 12),
+	MTK_PIN_PUPD_SPEC_SR(73, 0xe50, 2, 1, 0),
+
+	MTK_PIN_PUPD_SPEC_SR(104, 0xe40, 2, 1, 0),
+	MTK_PIN_PUPD_SPEC_SR(105, 0xe30, 14, 13, 12),
+	MTK_PIN_PUPD_SPEC_SR(106, 0xe20, 14, 13, 12),
+	MTK_PIN_PUPD_SPEC_SR(107, 0xe30, 2, 1, 0),
+	MTK_PIN_PUPD_SPEC_SR(108, 0xe30, 6, 5, 4),
+	MTK_PIN_PUPD_SPEC_SR(109, 0xe30, 10, 9, 8),
+	MTK_PIN_PUPD_SPEC_SR(110, 0xe10, 14, 13, 12),
+	MTK_PIN_PUPD_SPEC_SR(111, 0xe10, 10, 9, 8),
+	MTK_PIN_PUPD_SPEC_SR(112, 0xe10, 6, 5, 4),
+	MTK_PIN_PUPD_SPEC_SR(113, 0xe10, 2, 1, 0),
+	MTK_PIN_PUPD_SPEC_SR(114, 0xe20, 10, 9, 8),
+	MTK_PIN_PUPD_SPEC_SR(115, 0xe20, 2, 1, 0),
+	MTK_PIN_PUPD_SPEC_SR(116, 0xe20, 6, 5, 4),
+	MTK_PIN_PUPD_SPEC_SR(117, 0xe00, 14, 13, 12),
+	MTK_PIN_PUPD_SPEC_SR(118, 0xe00, 10, 9, 8),
+	MTK_PIN_PUPD_SPEC_SR(119, 0xe00, 6, 5, 4),
+	MTK_PIN_PUPD_SPEC_SR(120, 0xe00, 2, 1, 0),
+};
+
+static int mt8516_spec_pull_set(struct regmap *regmap, unsigned int pin,
+			unsigned char align, bool isup, unsigned int r1r0)
+{
+	return mtk_pctrl_spec_pull_set_samereg(regmap, mt8516_spec_pupd,
+		ARRAY_SIZE(mt8516_spec_pupd), pin, align, isup, r1r0);
+}
+
+static const struct mtk_pin_ies_smt_set mt8516_ies_set[] = {
+	MTK_PIN_IES_SMT_SPEC(0, 6, 0x900, 2),
+	MTK_PIN_IES_SMT_SPEC(7, 10, 0x900, 3),
+	MTK_PIN_IES_SMT_SPEC(11, 13, 0x900, 12),
+	MTK_PIN_IES_SMT_SPEC(14, 17, 0x900, 13),
+	MTK_PIN_IES_SMT_SPEC(18, 20, 0x910, 10),
+	MTK_PIN_IES_SMT_SPEC(21, 23, 0x900, 13),
+	MTK_PIN_IES_SMT_SPEC(24, 25, 0x900, 12),
+	MTK_PIN_IES_SMT_SPEC(26, 30, 0x900, 0),
+	MTK_PIN_IES_SMT_SPEC(31, 33, 0x900, 1),
+	MTK_PIN_IES_SMT_SPEC(34, 39, 0x900, 2),
+	MTK_PIN_IES_SMT_SPEC(40, 40, 0x910, 11),
+	MTK_PIN_IES_SMT_SPEC(41, 43, 0x900, 10),
+	MTK_PIN_IES_SMT_SPEC(44, 47, 0x900, 11),
+	MTK_PIN_IES_SMT_SPEC(48, 51, 0x900, 14),
+	MTK_PIN_IES_SMT_SPEC(52, 53, 0x910, 0),
+	MTK_PIN_IES_SMT_SPEC(54, 54, 0x910, 2),
+	MTK_PIN_IES_SMT_SPEC(55, 57, 0x910, 4),
+	MTK_PIN_IES_SMT_SPEC(58, 59, 0x900, 15),
+	MTK_PIN_IES_SMT_SPEC(60, 61, 0x910, 1),
+	MTK_PIN_IES_SMT_SPEC(62, 65, 0x910, 5),
+	MTK_PIN_IES_SMT_SPEC(66, 67, 0x910, 6),
+	MTK_PIN_IES_SMT_SPEC(68, 68, 0x930, 2),
+	MTK_PIN_IES_SMT_SPEC(69, 69, 0x930, 1),
+	MTK_PIN_IES_SMT_SPEC(70, 70, 0x930, 6),
+	MTK_PIN_IES_SMT_SPEC(71, 71, 0x930, 5),
+	MTK_PIN_IES_SMT_SPEC(72, 72, 0x930, 4),
+	MTK_PIN_IES_SMT_SPEC(73, 73, 0x930, 3),
+	MTK_PIN_IES_SMT_SPEC(100, 103, 0x910, 7),
+	MTK_PIN_IES_SMT_SPEC(104, 104, 0x920, 12),
+	MTK_PIN_IES_SMT_SPEC(105, 105, 0x920, 11),
+	MTK_PIN_IES_SMT_SPEC(106, 106, 0x930, 0),
+	MTK_PIN_IES_SMT_SPEC(107, 107, 0x920, 15),
+	MTK_PIN_IES_SMT_SPEC(108, 108, 0x920, 14),
+	MTK_PIN_IES_SMT_SPEC(109, 109, 0x920, 13),
+	MTK_PIN_IES_SMT_SPEC(110, 110, 0x920, 9),
+	MTK_PIN_IES_SMT_SPEC(111, 111, 0x920, 8),
+	MTK_PIN_IES_SMT_SPEC(112, 112, 0x920, 7),
+	MTK_PIN_IES_SMT_SPEC(113, 113, 0x920, 6),
+	MTK_PIN_IES_SMT_SPEC(114, 114, 0x920, 10),
+	MTK_PIN_IES_SMT_SPEC(115, 115, 0x920, 1),
+	MTK_PIN_IES_SMT_SPEC(116, 116, 0x920, 0),
+	MTK_PIN_IES_SMT_SPEC(117, 117, 0x920, 5),
+	MTK_PIN_IES_SMT_SPEC(118, 118, 0x920, 4),
+	MTK_PIN_IES_SMT_SPEC(119, 119, 0x920, 3),
+	MTK_PIN_IES_SMT_SPEC(120, 120, 0x920, 2),
+	MTK_PIN_IES_SMT_SPEC(121, 124, 0x910, 9),
+};
+
+static const struct mtk_pin_ies_smt_set mt8516_smt_set[] = {
+	MTK_PIN_IES_SMT_SPEC(0, 6, 0xA00, 2),
+	MTK_PIN_IES_SMT_SPEC(7, 10, 0xA00, 3),
+	MTK_PIN_IES_SMT_SPEC(11, 13, 0xA00, 12),
+	MTK_PIN_IES_SMT_SPEC(14, 17, 0xA00, 13),
+	MTK_PIN_IES_SMT_SPEC(18, 20, 0xA10, 10),
+	MTK_PIN_IES_SMT_SPEC(21, 23, 0xA00, 13),
+	MTK_PIN_IES_SMT_SPEC(24, 25, 0xA00, 12),
+	MTK_PIN_IES_SMT_SPEC(26, 30, 0xA00, 0),
+	MTK_PIN_IES_SMT_SPEC(31, 33, 0xA00, 1),
+	MTK_PIN_IES_SMT_SPEC(34, 39, 0xA900, 2),
+	MTK_PIN_IES_SMT_SPEC(40, 40, 0xA10, 11),
+	MTK_PIN_IES_SMT_SPEC(41, 43, 0xA00, 10),
+	MTK_PIN_IES_SMT_SPEC(44, 47, 0xA00, 11),
+	MTK_PIN_IES_SMT_SPEC(48, 51, 0xA00, 14),
+	MTK_PIN_IES_SMT_SPEC(52, 53, 0xA10, 0),
+	MTK_PIN_IES_SMT_SPEC(54, 54, 0xA10, 2),
+	MTK_PIN_IES_SMT_SPEC(55, 57, 0xA10, 4),
+	MTK_PIN_IES_SMT_SPEC(58, 59, 0xA00, 15),
+	MTK_PIN_IES_SMT_SPEC(60, 61, 0xA10, 1),
+	MTK_PIN_IES_SMT_SPEC(62, 65, 0xA10, 5),
+	MTK_PIN_IES_SMT_SPEC(66, 67, 0xA10, 6),
+	MTK_PIN_IES_SMT_SPEC(68, 68, 0xA30, 2),
+	MTK_PIN_IES_SMT_SPEC(69, 69, 0xA30, 1),
+	MTK_PIN_IES_SMT_SPEC(70, 70, 0xA30, 3),
+	MTK_PIN_IES_SMT_SPEC(71, 71, 0xA30, 4),
+	MTK_PIN_IES_SMT_SPEC(72, 72, 0xA30, 5),
+	MTK_PIN_IES_SMT_SPEC(73, 73, 0xA30, 6),
+
+	MTK_PIN_IES_SMT_SPEC(100, 103, 0xA10, 7),
+	MTK_PIN_IES_SMT_SPEC(104, 104, 0xA20, 12),
+	MTK_PIN_IES_SMT_SPEC(105, 105, 0xA20, 11),
+	MTK_PIN_IES_SMT_SPEC(106, 106, 0xA30, 13),
+	MTK_PIN_IES_SMT_SPEC(107, 107, 0xA20, 14),
+	MTK_PIN_IES_SMT_SPEC(108, 108, 0xA20, 15),
+	MTK_PIN_IES_SMT_SPEC(109, 109, 0xA30, 0),
+	MTK_PIN_IES_SMT_SPEC(110, 110, 0xA20, 9),
+	MTK_PIN_IES_SMT_SPEC(111, 111, 0xA20, 8),
+	MTK_PIN_IES_SMT_SPEC(112, 112, 0xA20, 7),
+	MTK_PIN_IES_SMT_SPEC(113, 113, 0xA20, 6),
+	MTK_PIN_IES_SMT_SPEC(114, 114, 0xA20, 10),
+	MTK_PIN_IES_SMT_SPEC(115, 115, 0xA20, 1),
+	MTK_PIN_IES_SMT_SPEC(116, 116, 0xA20, 0),
+	MTK_PIN_IES_SMT_SPEC(117, 117, 0xA20, 5),
+	MTK_PIN_IES_SMT_SPEC(118, 118, 0xA20, 4),
+	MTK_PIN_IES_SMT_SPEC(119, 119, 0xA20, 3),
+	MTK_PIN_IES_SMT_SPEC(120, 120, 0xA20, 2),
+	MTK_PIN_IES_SMT_SPEC(121, 124, 0xA10, 9),
+};
+
+static int mt8516_ies_smt_set(struct regmap *regmap, unsigned int pin,
+		unsigned char align, int value, enum pin_config_param arg)
+{
+	if (arg == PIN_CONFIG_INPUT_ENABLE)
+		return mtk_pconf_spec_set_ies_smt_range(regmap, mt8516_ies_set,
+			ARRAY_SIZE(mt8516_ies_set), pin, align, value);
+	else if (arg == PIN_CONFIG_INPUT_SCHMITT_ENABLE)
+		return mtk_pconf_spec_set_ies_smt_range(regmap, mt8516_smt_set,
+			ARRAY_SIZE(mt8516_smt_set), pin, align, value);
+	return -EINVAL;
+}
+
+static const struct mtk_pinctrl_devdata mt8516_pinctrl_data = {
+	.pins = mtk_pins_mt8516,
+	.npins = ARRAY_SIZE(mtk_pins_mt8516),
+	.grp_desc = mt8516_drv_grp,
+	.n_grp_cls = ARRAY_SIZE(mt8516_drv_grp),
+	.pin_drv_grp = mt8516_pin_drv,
+	.n_pin_drv_grps = ARRAY_SIZE(mt8516_pin_drv),
+	.spec_pull_set = mt8516_spec_pull_set,
+	.spec_ies_smt_set = mt8516_ies_smt_set,
+	.dir_offset = 0x0000,
+	.pullen_offset = 0x0500,
+	.pullsel_offset = 0x0600,
+	.dout_offset = 0x0100,
+	.din_offset = 0x0200,
+	.pinmux_offset = 0x0300,
+	.type1_start = 125,
+	.type1_end = 125,
+	.port_shf = 4,
+	.port_mask = 0xf,
+	.port_align = 4,
+	.eint_hw = {
+		.port_mask = 7,
+		.ports     = 6,
+		.ap_num    = 169,
+		.db_cnt    = 64,
+	},
+};
+
+static int mt8516_pinctrl_probe(struct platform_device *pdev)
+{
+	return mtk_pctrl_init(pdev, &mt8516_pinctrl_data, NULL);
+}
+
+static const struct of_device_id mt8516_pctrl_match[] = {
+	{
+		.compatible = "mediatek,mt8516-pinctrl",
+	},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, mt8516_pctrl_match);
+
+static struct platform_driver mtk_pinctrl_driver = {
+	.probe = mt8516_pinctrl_probe,
+	.driver = {
+		.name = "mediatek-mt8516-pinctrl",
+		.of_match_table = mt8516_pctrl_match,
+		.pm = &mtk_eint_pm_ops,
+	},
+};
+
+static int __init mtk_pinctrl_init(void)
+{
+	return platform_driver_register(&mtk_pinctrl_driver);
+}
+arch_initcall(mtk_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
new file mode 100644
index 0000000..20e1c89
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+
+#include "mtk-eint.h"
+#include "pinctrl-mtk-common-v2.h"
+
+/**
+ * struct mtk_drive_desc - the structure that holds the information
+ *			    of the driving current
+ * @min:	the minimum current of this group
+ * @max:	the maximum current of this group
+ * @step:	the step current of this group
+ * @scal:	the weight factor
+ *
+ * formula: output = ((input) / step - 1) * scal
+ */
+struct mtk_drive_desc {
+	u8 min;
+	u8 max;
+	u8 step;
+	u8 scal;
+};
+
+/* The groups of drive strength */
+static const struct mtk_drive_desc mtk_drive[] = {
+	[DRV_GRP0] = { 4, 16, 4, 1 },
+	[DRV_GRP1] = { 4, 16, 4, 2 },
+	[DRV_GRP2] = { 2, 8, 2, 1 },
+	[DRV_GRP3] = { 2, 8, 2, 2 },
+	[DRV_GRP4] = { 2, 16, 2, 1 },
+};
+
+static void mtk_w32(struct mtk_pinctrl *pctl, u8 i, u32 reg, u32 val)
+{
+	writel_relaxed(val, pctl->base[i] + reg);
+}
+
+static u32 mtk_r32(struct mtk_pinctrl *pctl, u8 i, u32 reg)
+{
+	return readl_relaxed(pctl->base[i] + reg);
+}
+
+void mtk_rmw(struct mtk_pinctrl *pctl, u8 i, u32 reg, u32 mask, u32 set)
+{
+	u32 val;
+
+	val = mtk_r32(pctl, i, reg);
+	val &= ~mask;
+	val |= set;
+	mtk_w32(pctl, i, reg, val);
+}
+
+static int mtk_hw_pin_field_lookup(struct mtk_pinctrl *hw,
+				   const struct mtk_pin_desc *desc,
+				   int field, struct mtk_pin_field *pfd)
+{
+	const struct mtk_pin_field_calc *c, *e;
+	const struct mtk_pin_reg_calc *rc;
+	u32 bits;
+
+	if (hw->soc->reg_cal && hw->soc->reg_cal[field].range) {
+		rc = &hw->soc->reg_cal[field];
+	} else {
+		dev_dbg(hw->dev,
+			"Not support field %d for pin %d (%s)\n",
+			field, desc->number, desc->name);
+		return -ENOTSUPP;
+	}
+
+	c = rc->range;
+	e = c + rc->nranges;
+
+	while (c < e) {
+		if (desc->number >= c->s_pin && desc->number <= c->e_pin)
+			break;
+		c++;
+	}
+
+	if (c >= e) {
+		dev_dbg(hw->dev, "Not support field %d for pin = %d (%s)\n",
+			field, desc->number, desc->name);
+		return -ENOTSUPP;
+	}
+
+	if (c->i_base > hw->nbase - 1) {
+		dev_err(hw->dev,
+			"Invalid base for field %d for pin = %d (%s)\n",
+			field, desc->number, desc->name);
+		return -EINVAL;
+	}
+
+	/* Calculated bits as the overall offset the pin is located at,
+	 * if c->fixed is held, that determines the all the pins in the
+	 * range use the same field with the s_pin.
+	 */
+	bits = c->fixed ? c->s_bit : c->s_bit +
+	       (desc->number - c->s_pin) * (c->x_bits);
+
+	/* Fill pfd from bits. For example 32-bit register applied is assumed
+	 * when c->sz_reg is equal to 32.
+	 */
+	pfd->index = c->i_base;
+	pfd->offset = c->s_addr + c->x_addrs * (bits / c->sz_reg);
+	pfd->bitpos = bits % c->sz_reg;
+	pfd->mask = (1 << c->x_bits) - 1;
+
+	/* pfd->next is used for indicating that bit wrapping-around happens
+	 * which requires the manipulation for bit 0 starting in the next
+	 * register to form the complete field read/write.
+	 */
+	pfd->next = pfd->bitpos + c->x_bits > c->sz_reg ? c->x_addrs : 0;
+
+	return 0;
+}
+
+static int mtk_hw_pin_field_get(struct mtk_pinctrl *hw,
+				const struct mtk_pin_desc *desc,
+				int field, struct mtk_pin_field *pfd)
+{
+	if (field < 0 || field >= PINCTRL_PIN_REG_MAX) {
+		dev_err(hw->dev, "Invalid Field %d\n", field);
+		return -EINVAL;
+	}
+
+	return mtk_hw_pin_field_lookup(hw, desc, field, pfd);
+}
+
+static void mtk_hw_bits_part(struct mtk_pin_field *pf, int *h, int *l)
+{
+	*l = 32 - pf->bitpos;
+	*h = get_count_order(pf->mask) - *l;
+}
+
+static void mtk_hw_write_cross_field(struct mtk_pinctrl *hw,
+				     struct mtk_pin_field *pf, int value)
+{
+	int nbits_l, nbits_h;
+
+	mtk_hw_bits_part(pf, &nbits_h, &nbits_l);
+
+	mtk_rmw(hw, pf->index, pf->offset, pf->mask << pf->bitpos,
+		(value & pf->mask) << pf->bitpos);
+
+	mtk_rmw(hw, pf->index, pf->offset + pf->next, BIT(nbits_h) - 1,
+		(value & pf->mask) >> nbits_l);
+}
+
+static void mtk_hw_read_cross_field(struct mtk_pinctrl *hw,
+				    struct mtk_pin_field *pf, int *value)
+{
+	int nbits_l, nbits_h, h, l;
+
+	mtk_hw_bits_part(pf, &nbits_h, &nbits_l);
+
+	l  = (mtk_r32(hw, pf->index, pf->offset)
+	      >> pf->bitpos) & (BIT(nbits_l) - 1);
+	h  = (mtk_r32(hw, pf->index, pf->offset + pf->next))
+	      & (BIT(nbits_h) - 1);
+
+	*value = (h << nbits_l) | l;
+}
+
+int mtk_hw_set_value(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc,
+		     int field, int value)
+{
+	struct mtk_pin_field pf;
+	int err;
+
+	err = mtk_hw_pin_field_get(hw, desc, field, &pf);
+	if (err)
+		return err;
+
+	if (!pf.next)
+		mtk_rmw(hw, pf.index, pf.offset, pf.mask << pf.bitpos,
+			(value & pf.mask) << pf.bitpos);
+	else
+		mtk_hw_write_cross_field(hw, &pf, value);
+
+	return 0;
+}
+
+int mtk_hw_get_value(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc,
+		     int field, int *value)
+{
+	struct mtk_pin_field pf;
+	int err;
+
+	err = mtk_hw_pin_field_get(hw, desc, field, &pf);
+	if (err)
+		return err;
+
+	if (!pf.next)
+		*value = (mtk_r32(hw, pf.index, pf.offset)
+			  >> pf.bitpos) & pf.mask;
+	else
+		mtk_hw_read_cross_field(hw, &pf, value);
+
+	return 0;
+}
+
+static int mtk_xt_find_eint_num(struct mtk_pinctrl *hw, unsigned long eint_n)
+{
+	const struct mtk_pin_desc *desc;
+	int i = 0;
+
+	desc = (const struct mtk_pin_desc *)hw->soc->pins;
+
+	while (i < hw->soc->npins) {
+		if (desc[i].eint.eint_n == eint_n)
+			return desc[i].number;
+		i++;
+	}
+
+	return EINT_NA;
+}
+
+static int mtk_xt_get_gpio_n(void *data, unsigned long eint_n,
+			     unsigned int *gpio_n,
+			     struct gpio_chip **gpio_chip)
+{
+	struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data;
+	const struct mtk_pin_desc *desc;
+
+	desc = (const struct mtk_pin_desc *)hw->soc->pins;
+	*gpio_chip = &hw->chip;
+
+	/* Be greedy to guess first gpio_n is equal to eint_n */
+	if (desc[eint_n].eint.eint_n == eint_n)
+		*gpio_n = eint_n;
+	else
+		*gpio_n = mtk_xt_find_eint_num(hw, eint_n);
+
+	return *gpio_n == EINT_NA ? -EINVAL : 0;
+}
+
+static int mtk_xt_get_gpio_state(void *data, unsigned long eint_n)
+{
+	struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data;
+	const struct mtk_pin_desc *desc;
+	struct gpio_chip *gpio_chip;
+	unsigned int gpio_n;
+	int value, err;
+
+	err = mtk_xt_get_gpio_n(hw, eint_n, &gpio_n, &gpio_chip);
+	if (err)
+		return err;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio_n];
+
+	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DI, &value);
+	if (err)
+		return err;
+
+	return !!value;
+}
+
+static int mtk_xt_set_gpio_as_eint(void *data, unsigned long eint_n)
+{
+	struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data;
+	const struct mtk_pin_desc *desc;
+	struct gpio_chip *gpio_chip;
+	unsigned int gpio_n;
+	int err;
+
+	err = mtk_xt_get_gpio_n(hw, eint_n, &gpio_n, &gpio_chip);
+	if (err)
+		return err;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio_n];
+
+	err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_MODE,
+			       desc->eint.eint_m);
+	if (err)
+		return err;
+
+	err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR, MTK_INPUT);
+	if (err)
+		return err;
+
+	err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT, MTK_ENABLE);
+	/* SMT is supposed to be supported by every real GPIO and doesn't
+	 * support virtual GPIOs, so the extra condition err != -ENOTSUPP
+	 * is just for adding EINT support to these virtual GPIOs. It should
+	 * add an extra flag in the pin descriptor when more pins with
+	 * distinctive characteristic come out.
+	 */
+	if (err && err != -ENOTSUPP)
+		return err;
+
+	return 0;
+}
+
+static const struct mtk_eint_xt mtk_eint_xt = {
+	.get_gpio_n = mtk_xt_get_gpio_n,
+	.get_gpio_state = mtk_xt_get_gpio_state,
+	.set_gpio_as_eint = mtk_xt_set_gpio_as_eint,
+};
+
+int mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct resource *res;
+
+	if (!IS_ENABLED(CONFIG_EINT_MTK))
+		return 0;
+
+	if (!of_property_read_bool(np, "interrupt-controller"))
+		return -ENODEV;
+
+	hw->eint = devm_kzalloc(hw->dev, sizeof(*hw->eint), GFP_KERNEL);
+	if (!hw->eint)
+		return -ENOMEM;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "eint");
+	if (!res) {
+		dev_err(&pdev->dev, "Unable to get eint resource\n");
+		return -ENODEV;
+	}
+
+	hw->eint->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hw->eint->base))
+		return PTR_ERR(hw->eint->base);
+
+	hw->eint->irq = irq_of_parse_and_map(np, 0);
+	if (!hw->eint->irq)
+		return -EINVAL;
+
+	if (!hw->soc->eint_hw)
+		return -ENODEV;
+
+	hw->eint->dev = &pdev->dev;
+	hw->eint->hw = hw->soc->eint_hw;
+	hw->eint->pctl = hw;
+	hw->eint->gpio_xlate = &mtk_eint_xt;
+
+	return mtk_eint_do_init(hw->eint);
+}
+
+/* Revision 0 */
+int mtk_pinconf_bias_disable_set(struct mtk_pinctrl *hw,
+				 const struct mtk_pin_desc *desc)
+{
+	int err;
+
+	err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PU,
+			       MTK_DISABLE);
+	if (err)
+		return err;
+
+	err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PD,
+			       MTK_DISABLE);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+int mtk_pinconf_bias_disable_get(struct mtk_pinctrl *hw,
+				 const struct mtk_pin_desc *desc, int *res)
+{
+	int v, v2;
+	int err;
+
+	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PU, &v);
+	if (err)
+		return err;
+
+	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PD, &v2);
+	if (err)
+		return err;
+
+	if (v == MTK_ENABLE || v2 == MTK_ENABLE)
+		return -EINVAL;
+
+	*res = 1;
+
+	return 0;
+}
+
+int mtk_pinconf_bias_set(struct mtk_pinctrl *hw,
+			 const struct mtk_pin_desc *desc, bool pullup)
+{
+	int err, arg;
+
+	arg = pullup ? 1 : 2;
+
+	err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PU, arg & 1);
+	if (err)
+		return err;
+
+	err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PD,
+			       !!(arg & 2));
+	if (err)
+		return err;
+
+	return 0;
+}
+
+int mtk_pinconf_bias_get(struct mtk_pinctrl *hw,
+			 const struct mtk_pin_desc *desc, bool pullup, int *res)
+{
+	int reg, err, v;
+
+	reg = pullup ? PINCTRL_PIN_REG_PU : PINCTRL_PIN_REG_PD;
+
+	err = mtk_hw_get_value(hw, desc, reg, &v);
+	if (err)
+		return err;
+
+	if (!v)
+		return -EINVAL;
+
+	*res = 1;
+
+	return 0;
+}
+
+/* Revision 1 */
+int mtk_pinconf_bias_disable_set_rev1(struct mtk_pinctrl *hw,
+				      const struct mtk_pin_desc *desc)
+{
+	int err;
+
+	err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PULLEN,
+			       MTK_DISABLE);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+int mtk_pinconf_bias_disable_get_rev1(struct mtk_pinctrl *hw,
+				      const struct mtk_pin_desc *desc, int *res)
+{
+	int v, err;
+
+	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PULLEN, &v);
+	if (err)
+		return err;
+
+	if (v == MTK_ENABLE)
+		return -EINVAL;
+
+	*res = 1;
+
+	return 0;
+}
+
+int mtk_pinconf_bias_set_rev1(struct mtk_pinctrl *hw,
+			      const struct mtk_pin_desc *desc, bool pullup)
+{
+	int err, arg;
+
+	arg = pullup ? MTK_PULLUP : MTK_PULLDOWN;
+
+	err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PULLEN,
+			       MTK_ENABLE);
+	if (err)
+		return err;
+
+	err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PULLSEL, arg);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+int mtk_pinconf_bias_get_rev1(struct mtk_pinctrl *hw,
+			      const struct mtk_pin_desc *desc, bool pullup,
+			      int *res)
+{
+	int err, v;
+
+	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PULLEN, &v);
+	if (err)
+		return err;
+
+	if (v == MTK_DISABLE)
+		return -EINVAL;
+
+	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PULLSEL, &v);
+	if (err)
+		return err;
+
+	if (pullup ^ (v == MTK_PULLUP))
+		return -EINVAL;
+
+	*res = 1;
+
+	return 0;
+}
+
+/* Revision 0 */
+int mtk_pinconf_drive_set(struct mtk_pinctrl *hw,
+			  const struct mtk_pin_desc *desc, u32 arg)
+{
+	const struct mtk_drive_desc *tb;
+	int err = -ENOTSUPP;
+
+	tb = &mtk_drive[desc->drv_n];
+	/* 4mA when (e8, e4) = (0, 0)
+	 * 8mA when (e8, e4) = (0, 1)
+	 * 12mA when (e8, e4) = (1, 0)
+	 * 16mA when (e8, e4) = (1, 1)
+	 */
+	if ((arg >= tb->min && arg <= tb->max) && !(arg % tb->step)) {
+		arg = (arg / tb->step - 1) * tb->scal;
+		err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_E4,
+				       arg & 0x1);
+		if (err)
+			return err;
+
+		err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_E8,
+				       (arg & 0x2) >> 1);
+		if (err)
+			return err;
+	}
+
+	return err;
+}
+
+int mtk_pinconf_drive_get(struct mtk_pinctrl *hw,
+			  const struct mtk_pin_desc *desc, int *val)
+{
+	const struct mtk_drive_desc *tb;
+	int err, val1, val2;
+
+	tb = &mtk_drive[desc->drv_n];
+
+	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_E4, &val1);
+	if (err)
+		return err;
+
+	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_E8, &val2);
+	if (err)
+		return err;
+
+	/* 4mA when (e8, e4) = (0, 0); 8mA when (e8, e4) = (0, 1)
+	 * 12mA when (e8, e4) = (1, 0); 16mA when (e8, e4) = (1, 1)
+	 */
+	*val = (((val2 << 1) + val1) / tb->scal + 1) * tb->step;
+
+	return 0;
+}
+
+/* Revision 1 */
+int mtk_pinconf_drive_set_rev1(struct mtk_pinctrl *hw,
+			       const struct mtk_pin_desc *desc, u32 arg)
+{
+	const struct mtk_drive_desc *tb;
+	int err = -ENOTSUPP;
+
+	tb = &mtk_drive[desc->drv_n];
+
+	if ((arg >= tb->min && arg <= tb->max) && !(arg % tb->step)) {
+		arg = (arg / tb->step - 1) * tb->scal;
+
+		err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV,
+				       arg);
+		if (err)
+			return err;
+	}
+
+	return err;
+}
+
+int mtk_pinconf_drive_get_rev1(struct mtk_pinctrl *hw,
+			       const struct mtk_pin_desc *desc, int *val)
+{
+	const struct mtk_drive_desc *tb;
+	int err, val1;
+
+	tb = &mtk_drive[desc->drv_n];
+
+	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV, &val1);
+	if (err)
+		return err;
+
+	*val = ((val1 & 0x7) / tb->scal + 1) * tb->step;
+
+	return 0;
+}
+
+int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
+			     const struct mtk_pin_desc *desc, bool pullup,
+			     u32 arg)
+{
+	int err;
+
+	/* 10K off & 50K (75K) off, when (R0, R1) = (0, 0);
+	 * 10K off & 50K (75K) on, when (R0, R1) = (0, 1);
+	 * 10K on & 50K (75K) off, when (R0, R1) = (1, 0);
+	 * 10K on & 50K (75K) on, when (R0, R1) = (1, 1)
+	 */
+	err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_R0, arg & 1);
+	if (err)
+		return 0;
+
+	err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_R1,
+			       !!(arg & 2));
+	if (err)
+		return 0;
+
+	arg = pullup ? 0 : 1;
+
+	err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PUPD, arg);
+
+	/* If PUPD register is not supported for that pin, let's fallback to
+	 * general bias control.
+	 */
+	if (err == -ENOTSUPP) {
+		if (hw->soc->bias_set) {
+			err = hw->soc->bias_set(hw, desc, pullup);
+			if (err)
+				return err;
+		} else {
+			return -ENOTSUPP;
+		}
+	}
+
+	return err;
+}
+
+int mtk_pinconf_adv_pull_get(struct mtk_pinctrl *hw,
+			     const struct mtk_pin_desc *desc, bool pullup,
+			     u32 *val)
+{
+	u32 t, t2;
+	int err;
+
+	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PUPD, &t);
+
+	/* If PUPD register is not supported for that pin, let's fallback to
+	 * general bias control.
+	 */
+	if (err == -ENOTSUPP) {
+		if (hw->soc->bias_get) {
+			err = hw->soc->bias_get(hw, desc, pullup, val);
+			if (err)
+				return err;
+		} else {
+			return -ENOTSUPP;
+		}
+	} else {
+		/* t == 0 supposes PULLUP for the customized PULL setup */
+		if (err)
+			return err;
+
+		if (pullup ^ !t)
+			return -EINVAL;
+	}
+
+	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_R0, &t);
+	if (err)
+		return err;
+
+	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_R1, &t2);
+	if (err)
+		return err;
+
+	*val = (t | t2 << 1) & 0x7;
+
+	return 0;
+}
+
+int mtk_pinconf_adv_drive_set(struct mtk_pinctrl *hw,
+			      const struct mtk_pin_desc *desc, u32 arg)
+{
+	int err;
+	int en = arg & 1;
+	int e0 = !!(arg & 2);
+	int e1 = !!(arg & 4);
+
+	err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV_EN, en);
+	if (err)
+		return err;
+
+	if (!en)
+		return err;
+
+	err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV_E0, e0);
+	if (err)
+		return err;
+
+	err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV_E1, e1);
+	if (err)
+		return err;
+
+	return err;
+}
+
+int mtk_pinconf_adv_drive_get(struct mtk_pinctrl *hw,
+			      const struct mtk_pin_desc *desc, u32 *val)
+{
+	u32 en, e0, e1;
+	int err;
+
+	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV_EN, &en);
+	if (err)
+		return err;
+
+	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV_E0, &e0);
+	if (err)
+		return err;
+
+	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV_E1, &e1);
+	if (err)
+		return err;
+
+	*val = (en | e0 << 1 | e1 << 2) & 0x7;
+
+	return 0;
+}
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
new file mode 100644
index 0000000..da018a6
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#ifndef __PINCTRL_MTK_COMMON_V2_H
+#define __PINCTRL_MTK_COMMON_V2_H
+
+#include <linux/gpio/driver.h>
+
+#define MTK_INPUT      0
+#define MTK_OUTPUT     1
+#define MTK_DISABLE    0
+#define MTK_ENABLE     1
+#define MTK_PULLDOWN   0
+#define MTK_PULLUP     1
+
+#define EINT_NA	-1
+
+#define PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs,      \
+		       _s_bit, _x_bits, _sz_reg, _fixed) {		\
+		.s_pin = _s_pin,					\
+		.e_pin = _e_pin,					\
+		.i_base = _i_base,					\
+		.s_addr = _s_addr,					\
+		.x_addrs = _x_addrs,					\
+		.s_bit = _s_bit,					\
+		.x_bits = _x_bits,					\
+		.sz_reg = _sz_reg,					\
+		.fixed = _fixed,					\
+	}
+
+#define PIN_FIELD(_s_pin, _e_pin, _s_addr, _x_addrs, _s_bit, _x_bits)	\
+	PIN_FIELD_CALC(_s_pin, _e_pin, 0, _s_addr, _x_addrs, _s_bit,	\
+		       _x_bits, 32, 0)
+
+#define PINS_FIELD(_s_pin, _e_pin, _s_addr, _x_addrs, _s_bit, _x_bits)	\
+	PIN_FIELD_CALC(_s_pin, _e_pin, 0, _s_addr, _x_addrs, _s_bit,	\
+		       _x_bits, 32, 1)
+
+/* List these attributes which could be modified for the pin */
+enum {
+	PINCTRL_PIN_REG_MODE,
+	PINCTRL_PIN_REG_DIR,
+	PINCTRL_PIN_REG_DI,
+	PINCTRL_PIN_REG_DO,
+	PINCTRL_PIN_REG_SR,
+	PINCTRL_PIN_REG_SMT,
+	PINCTRL_PIN_REG_PD,
+	PINCTRL_PIN_REG_PU,
+	PINCTRL_PIN_REG_E4,
+	PINCTRL_PIN_REG_E8,
+	PINCTRL_PIN_REG_TDSEL,
+	PINCTRL_PIN_REG_RDSEL,
+	PINCTRL_PIN_REG_DRV,
+	PINCTRL_PIN_REG_PUPD,
+	PINCTRL_PIN_REG_R0,
+	PINCTRL_PIN_REG_R1,
+	PINCTRL_PIN_REG_IES,
+	PINCTRL_PIN_REG_PULLEN,
+	PINCTRL_PIN_REG_PULLSEL,
+	PINCTRL_PIN_REG_DRV_EN,
+	PINCTRL_PIN_REG_DRV_E0,
+	PINCTRL_PIN_REG_DRV_E1,
+	PINCTRL_PIN_REG_MAX,
+};
+
+/* Group the pins by the driving current */
+enum {
+	DRV_FIXED,
+	DRV_GRP0,
+	DRV_GRP1,
+	DRV_GRP2,
+	DRV_GRP3,
+	DRV_GRP4,
+	DRV_GRP_MAX,
+};
+
+static const char * const mtk_default_register_base_names[] = {
+	"base",
+};
+
+/* struct mtk_pin_field - the structure that holds the information of the field
+ *			  used to describe the attribute for the pin
+ * @base:		the index pointing to the entry in base address list
+ * @offset:		the register offset relative to the base address
+ * @mask:		the mask used to filter out the field from the register
+ * @bitpos:		the start bit relative to the register
+ * @next:		the indication that the field would be extended to the
+			next register
+ */
+struct mtk_pin_field {
+	u8  index;
+	u32 offset;
+	u32 mask;
+	u8  bitpos;
+	u8  next;
+};
+
+/* struct mtk_pin_field_calc - the structure that holds the range providing
+ *			       the guide used to look up the relevant field
+ * @s_pin:		the start pin within the range
+ * @e_pin:		the end pin within the range
+ * @i_base:		the index pointing to the entry in base address list
+ * @s_addr:		the start address for the range
+ * @x_addrs:		the address distance between two consecutive registers
+ *			within the range
+ * @s_bit:		the start bit for the first register within the range
+ * @x_bits:		the bit distance between two consecutive pins within
+ *			the range
+ * @sz_reg:		the size of bits in a register
+ * @fixed:		the consecutive pins share the same bits with the 1st
+ *			pin
+ */
+struct mtk_pin_field_calc {
+	u16 s_pin;
+	u16 e_pin;
+	u8  i_base;
+	u32 s_addr;
+	u8  x_addrs;
+	u8  s_bit;
+	u8  x_bits;
+	u8  sz_reg;
+	u8  fixed;
+};
+
+/* struct mtk_pin_reg_calc - the structure that holds all ranges used to
+ *			     determine which register the pin would make use of
+ *			     for certain pin attribute.
+ * @range:		     the start address for the range
+ * @nranges:		     the number of items in the range
+ */
+struct mtk_pin_reg_calc {
+	const struct mtk_pin_field_calc *range;
+	unsigned int nranges;
+};
+
+/**
+ * struct mtk_func_desc - the structure that providing information
+ *			  all the funcs for this pin
+ * @name:		the name of function
+ * @muxval:		the mux to the function
+ */
+struct mtk_func_desc {
+	const char *name;
+	u8 muxval;
+};
+
+/**
+ * struct mtk_eint_desc - the structure that providing information
+ *			       for eint data per pin
+ * @eint_m:		the eint mux for this pin
+ * @eitn_n:		the eint number for this pin
+ */
+struct mtk_eint_desc {
+	u8 eint_m;
+	u16 eint_n;
+};
+
+/**
+ * struct mtk_pin_desc - the structure that providing information
+ *			       for each pin of chips
+ * @number:		unique pin number from the global pin number space
+ * @name:		name for this pin
+ * @eint:		the eint data for this pin
+ * @drv_n:		the index with the driving group
+ * @funcs:		all available functions for this pins (only used in
+ *			those drivers compatible to pinctrl-mtk-common.c-like
+ *			ones)
+ */
+struct mtk_pin_desc {
+	unsigned int number;
+	const char *name;
+	struct mtk_eint_desc eint;
+	u8 drv_n;
+	struct mtk_func_desc *funcs;
+};
+
+struct mtk_pinctrl_group {
+	const char	*name;
+	unsigned long	config;
+	unsigned	pin;
+};
+
+struct mtk_pinctrl;
+
+/* struct mtk_pin_soc - the structure that holds SoC-specific data */
+struct mtk_pin_soc {
+	const struct mtk_pin_reg_calc	*reg_cal;
+	const struct mtk_pin_desc	*pins;
+	unsigned int			npins;
+	const struct group_desc		*grps;
+	unsigned int			ngrps;
+	const struct function_desc	*funcs;
+	unsigned int			nfuncs;
+	const struct mtk_eint_regs	*eint_regs;
+	const struct mtk_eint_hw	*eint_hw;
+
+	/* Specific parameters per SoC */
+	u8				gpio_m;
+	bool				ies_present;
+	const char * const		*base_names;
+	unsigned int			nbase_names;
+
+	/* Specific pinconfig operations */
+	int (*bias_disable_set)(struct mtk_pinctrl *hw,
+				const struct mtk_pin_desc *desc);
+	int (*bias_disable_get)(struct mtk_pinctrl *hw,
+				const struct mtk_pin_desc *desc, int *res);
+	int (*bias_set)(struct mtk_pinctrl *hw,
+			const struct mtk_pin_desc *desc, bool pullup);
+	int (*bias_get)(struct mtk_pinctrl *hw,
+			const struct mtk_pin_desc *desc, bool pullup, int *res);
+
+	int (*drive_set)(struct mtk_pinctrl *hw,
+			 const struct mtk_pin_desc *desc, u32 arg);
+	int (*drive_get)(struct mtk_pinctrl *hw,
+			 const struct mtk_pin_desc *desc, int *val);
+
+	int (*adv_pull_set)(struct mtk_pinctrl *hw,
+			    const struct mtk_pin_desc *desc, bool pullup,
+			    u32 arg);
+	int (*adv_pull_get)(struct mtk_pinctrl *hw,
+			    const struct mtk_pin_desc *desc, bool pullup,
+			    u32 *val);
+	int (*adv_drive_set)(struct mtk_pinctrl *hw,
+			     const struct mtk_pin_desc *desc, u32 arg);
+	int (*adv_drive_get)(struct mtk_pinctrl *hw,
+			     const struct mtk_pin_desc *desc, u32 *val);
+
+	/* Specific driver data */
+	void				*driver_data;
+};
+
+struct mtk_pinctrl {
+	struct pinctrl_dev		*pctrl;
+	void __iomem			**base;
+	u8				nbase;
+	struct device			*dev;
+	struct gpio_chip		chip;
+	const struct mtk_pin_soc        *soc;
+	struct mtk_eint			*eint;
+	struct mtk_pinctrl_group	*groups;
+	const char          **grp_names;
+};
+
+void mtk_rmw(struct mtk_pinctrl *pctl, u8 i, u32 reg, u32 mask, u32 set);
+
+int mtk_hw_set_value(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc,
+		     int field, int value);
+int mtk_hw_get_value(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc,
+		     int field, int *value);
+
+int mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev);
+
+int mtk_pinconf_bias_disable_set(struct mtk_pinctrl *hw,
+				 const struct mtk_pin_desc *desc);
+int mtk_pinconf_bias_disable_get(struct mtk_pinctrl *hw,
+				 const struct mtk_pin_desc *desc, int *res);
+int mtk_pinconf_bias_set(struct mtk_pinctrl *hw,
+			 const struct mtk_pin_desc *desc, bool pullup);
+int mtk_pinconf_bias_get(struct mtk_pinctrl *hw,
+			 const struct mtk_pin_desc *desc, bool pullup,
+			 int *res);
+
+int mtk_pinconf_bias_disable_set_rev1(struct mtk_pinctrl *hw,
+				      const struct mtk_pin_desc *desc);
+int mtk_pinconf_bias_disable_get_rev1(struct mtk_pinctrl *hw,
+				      const struct mtk_pin_desc *desc,
+				      int *res);
+int mtk_pinconf_bias_set_rev1(struct mtk_pinctrl *hw,
+			      const struct mtk_pin_desc *desc, bool pullup);
+int mtk_pinconf_bias_get_rev1(struct mtk_pinctrl *hw,
+			      const struct mtk_pin_desc *desc, bool pullup,
+			      int *res);
+
+int mtk_pinconf_drive_set(struct mtk_pinctrl *hw,
+			  const struct mtk_pin_desc *desc, u32 arg);
+int mtk_pinconf_drive_get(struct mtk_pinctrl *hw,
+			  const struct mtk_pin_desc *desc, int *val);
+
+int mtk_pinconf_drive_set_rev1(struct mtk_pinctrl *hw,
+			       const struct mtk_pin_desc *desc, u32 arg);
+int mtk_pinconf_drive_get_rev1(struct mtk_pinctrl *hw,
+			       const struct mtk_pin_desc *desc, int *val);
+
+int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
+			     const struct mtk_pin_desc *desc, bool pullup,
+			     u32 arg);
+int mtk_pinconf_adv_pull_get(struct mtk_pinctrl *hw,
+			     const struct mtk_pin_desc *desc, bool pullup,
+			     u32 *val);
+int mtk_pinconf_adv_drive_set(struct mtk_pinctrl *hw,
+			      const struct mtk_pin_desc *desc, u32 arg);
+int mtk_pinconf_adv_drive_get(struct mtk_pinctrl *hw,
+			      const struct mtk_pin_desc *desc, u32 *val);
+
+#endif /* __PINCTRL_MTK_COMMON_V2_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8167.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8167.h
new file mode 100644
index 0000000..8fd2b69
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8167.h
@@ -0,0 +1,1256 @@
+/*
+ * Copyright (C) 2015 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __PINCTRL_MTK_MT8167_H
+#define __PINCTRL_MTK_MT8167_H
+
+#include <linux/pinctrl/pinctrl.h>
+#include "pinctrl-mtk-common.h"
+
+static const struct mtk_desc_pin mtk_pins_mt8167[] = {
+	MTK_PIN(
+		PINCTRL_PIN(0, "EINT0"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 0),
+		MTK_FUNCTION(0, "GPIO0"),
+		MTK_FUNCTION(1, "PWM_B"),
+		MTK_FUNCTION(2, "DPI_CK"),
+		MTK_FUNCTION(3, "I2S2_BCK"),
+		MTK_FUNCTION(4, "EXT_TXD0"),
+		MTK_FUNCTION(6, "SQICS"),
+		MTK_FUNCTION(7, "DBG_MON_A[6]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(1, "EINT1"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 1),
+		MTK_FUNCTION(0, "GPIO1"),
+		MTK_FUNCTION(1, "PWM_C"),
+		MTK_FUNCTION(2, "DPI_D12"),
+		MTK_FUNCTION(3, "I2S2_DI"),
+		MTK_FUNCTION(4, "EXT_TXD1"),
+		MTK_FUNCTION(5, "CONN_MCU_TDO"),
+		MTK_FUNCTION(6, "SQISO"),
+		MTK_FUNCTION(7, "DBG_MON_A[7]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(2, "EINT2"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 2),
+		MTK_FUNCTION(0, "GPIO2"),
+		MTK_FUNCTION(1, "CLKM0"),
+		MTK_FUNCTION(2, "DPI_D13"),
+		MTK_FUNCTION(3, "I2S2_LRCK"),
+		MTK_FUNCTION(4, "EXT_TXD2"),
+		MTK_FUNCTION(5, "CONN_MCU_DBGACK_N"),
+		MTK_FUNCTION(6, "SQISI"),
+		MTK_FUNCTION(7, "DBG_MON_A[8]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(3, "EINT3"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 3),
+		MTK_FUNCTION(0, "GPIO3"),
+		MTK_FUNCTION(1, "CLKM1"),
+		MTK_FUNCTION(2, "DPI_D14"),
+		MTK_FUNCTION(3, "SPI_MI"),
+		MTK_FUNCTION(4, "EXT_TXD3"),
+		MTK_FUNCTION(5, "CONN_MCU_DBGI_N"),
+		MTK_FUNCTION(6, "SQIWP"),
+		MTK_FUNCTION(7, "DBG_MON_A[9]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(4, "EINT4"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 4),
+		MTK_FUNCTION(0, "GPIO4"),
+		MTK_FUNCTION(1, "CLKM2"),
+		MTK_FUNCTION(2, "DPI_D15"),
+		MTK_FUNCTION(3, "SPI_MO"),
+		MTK_FUNCTION(4, "EXT_TXC"),
+		MTK_FUNCTION(5, "CONN_MCU_TCK"),
+		MTK_FUNCTION(6, "CONN_MCU_AICE_JCKC"),
+		MTK_FUNCTION(7, "DBG_MON_A[10]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(5, "EINT5"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 5),
+		MTK_FUNCTION(0, "GPIO5"),
+		MTK_FUNCTION(1, "UCTS2"),
+		MTK_FUNCTION(2, "DPI_D16"),
+		MTK_FUNCTION(3, "SPI_CSB"),
+		MTK_FUNCTION(4, "EXT_RXER"),
+		MTK_FUNCTION(5, "CONN_MCU_TDI"),
+		MTK_FUNCTION(6, "CONN_TEST_CK"),
+		MTK_FUNCTION(7, "DBG_MON_A[11]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(6, "EINT6"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 6),
+		MTK_FUNCTION(0, "GPIO6"),
+		MTK_FUNCTION(1, "URTS2"),
+		MTK_FUNCTION(2, "DPI_D17"),
+		MTK_FUNCTION(3, "SPI_CLK"),
+		MTK_FUNCTION(4, "EXT_RXC"),
+		MTK_FUNCTION(5, "CONN_MCU_TRST_B"),
+		MTK_FUNCTION(6, "MM_TEST_CK"),
+		MTK_FUNCTION(7, "DBG_MON_A[12]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(7, "EINT7"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 7),
+		MTK_FUNCTION(0, "GPIO7"),
+		MTK_FUNCTION(1, "SQIRST"),
+		MTK_FUNCTION(2, "DPI_D6"),
+		MTK_FUNCTION(3, "SDA1_0"),
+		MTK_FUNCTION(4, "EXT_RXDV"),
+		MTK_FUNCTION(5, "CONN_MCU_TMS"),
+		MTK_FUNCTION(6, "CONN_MCU_AICE_JMSC"),
+		MTK_FUNCTION(7, "DBG_MON_A[13]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(8, "EINT8"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 8),
+		MTK_FUNCTION(0, "GPIO8"),
+		MTK_FUNCTION(1, "SQICK"),
+		MTK_FUNCTION(2, "CLKM3"),
+		MTK_FUNCTION(3, "SCL1_0"),
+		MTK_FUNCTION(4, "EXT_RXD0"),
+		MTK_FUNCTION(5, "ANT_SEL0"),
+		MTK_FUNCTION(6, "DPI_D7"),
+		MTK_FUNCTION(7, "DBG_MON_A[14]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(9, "EINT9"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 9),
+		MTK_FUNCTION(0, "GPIO9"),
+		MTK_FUNCTION(1, "CLKM4"),
+		MTK_FUNCTION(2, "SDA2_0"),
+		MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(4, "EXT_RXD1"),
+		MTK_FUNCTION(5, "ANT_SEL1"),
+		MTK_FUNCTION(6, "DPI_D8"),
+		MTK_FUNCTION(7, "DBG_MON_A[15]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(10, "EINT10"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 10),
+		MTK_FUNCTION(0, "GPIO10"),
+		MTK_FUNCTION(1, "CLKM5"),
+		MTK_FUNCTION(2, "SCL2_0"),
+		MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(4, "EXT_RXD2"),
+		MTK_FUNCTION(5, "ANT_SEL2"),
+		MTK_FUNCTION(6, "DPI_D9"),
+		MTK_FUNCTION(7, "DBG_MON_A[16]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(11, "EINT11"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 11),
+		MTK_FUNCTION(0, "GPIO11"),
+		MTK_FUNCTION(1, "CLKM4"),
+		MTK_FUNCTION(2, "PWM_C"),
+		MTK_FUNCTION(3, "CONN_TEST_CK"),
+		MTK_FUNCTION(4, "ANT_SEL3"),
+		MTK_FUNCTION(5, "DPI_D10"),
+		MTK_FUNCTION(6, "EXT_RXD3"),
+		MTK_FUNCTION(7, "DBG_MON_A[17]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(12, "EINT12"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 12),
+		MTK_FUNCTION(0, "GPIO12"),
+		MTK_FUNCTION(1, "CLKM5"),
+		MTK_FUNCTION(2, "PWM_A"),
+		MTK_FUNCTION(3, "SPDIF_OUT"),
+		MTK_FUNCTION(4, "ANT_SEL4"),
+		MTK_FUNCTION(5, "DPI_D11"),
+		MTK_FUNCTION(6, "EXT_TXEN"),
+		MTK_FUNCTION(7, "DBG_MON_A[18]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(13, "EINT13"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 13),
+		MTK_FUNCTION(0, "GPIO13"),
+		MTK_FUNCTION(3, "TSF_IN"),
+		MTK_FUNCTION(4, "ANT_SEL5"),
+		MTK_FUNCTION(5, "DPI_D0"),
+		MTK_FUNCTION(6, "SPDIF_IN"),
+		MTK_FUNCTION(7, "DBG_MON_A[19]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(14, "EINT14"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 14),
+		MTK_FUNCTION(0, "GPIO14"),
+		MTK_FUNCTION(2, "I2S_8CH_DO1"),
+		MTK_FUNCTION(3, "TDM_RX_MCK"),
+		MTK_FUNCTION(4, "ANT_SEL1"),
+		MTK_FUNCTION(5, "CONN_MCU_DBGACK_N"),
+		MTK_FUNCTION(6, "NCLE"),
+		MTK_FUNCTION(7, "DBG_MON_B[8]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(15, "EINT15"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 15),
+		MTK_FUNCTION(0, "GPIO15"),
+		MTK_FUNCTION(2, "I2S_8CH_LRCK"),
+		MTK_FUNCTION(3, "TDM_RX_BCK"),
+		MTK_FUNCTION(4, "ANT_SEL2"),
+		MTK_FUNCTION(5, "CONN_MCU_DBGI_N"),
+		MTK_FUNCTION(6, "NCEB1"),
+		MTK_FUNCTION(7, "DBG_MON_B[9]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(16, "EINT16"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 16),
+		MTK_FUNCTION(0, "GPIO16"),
+		MTK_FUNCTION(2, "I2S_8CH_BCK"),
+		MTK_FUNCTION(3, "TDM_RX_LRCK"),
+		MTK_FUNCTION(4, "ANT_SEL3"),
+		MTK_FUNCTION(5, "CONN_MCU_TRST_B"),
+		MTK_FUNCTION(6, "NCEB0"),
+		MTK_FUNCTION(7, "DBG_MON_B[10]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(17, "EINT17"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 17),
+		MTK_FUNCTION(0, "GPIO17"),
+		MTK_FUNCTION(2, "I2S_8CH_MCK"),
+		MTK_FUNCTION(3, "TDM_RX_DI"),
+		MTK_FUNCTION(4, "IDDIG"),
+		MTK_FUNCTION(5, "ANT_SEL4"),
+		MTK_FUNCTION(6, "NREB"),
+		MTK_FUNCTION(7, "DBG_MON_B[11]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(18, "EINT18"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 18),
+		MTK_FUNCTION(0, "GPIO18"),
+		MTK_FUNCTION(2, "USB_DRVVBUS"),
+		MTK_FUNCTION(3, "I2S3_LRCK"),
+		MTK_FUNCTION(4, "CLKM1"),
+		MTK_FUNCTION(5, "ANT_SEL3"),
+		MTK_FUNCTION(6, "I2S2_BCK"),
+		MTK_FUNCTION(7, "DBG_MON_A[20]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(19, "EINT19"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 19),
+		MTK_FUNCTION(0, "GPIO19"),
+		MTK_FUNCTION(1, "UCTS1"),
+		MTK_FUNCTION(2, "IDDIG"),
+		MTK_FUNCTION(3, "I2S3_BCK"),
+		MTK_FUNCTION(4, "CLKM2"),
+		MTK_FUNCTION(5, "ANT_SEL4"),
+		MTK_FUNCTION(6, "I2S2_DI"),
+		MTK_FUNCTION(7, "DBG_MON_A[21]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(20, "EINT20"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 20),
+		MTK_FUNCTION(0, "GPIO20"),
+		MTK_FUNCTION(1, "URTS1"),
+		MTK_FUNCTION(3, "I2S3_DO"),
+		MTK_FUNCTION(4, "CLKM3"),
+		MTK_FUNCTION(5, "ANT_SEL5"),
+		MTK_FUNCTION(6, "I2S2_LRCK"),
+		MTK_FUNCTION(7, "DBG_MON_A[22]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(21, "EINT21"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 21),
+		MTK_FUNCTION(0, "GPIO21"),
+		MTK_FUNCTION(1, "NRNB"),
+		MTK_FUNCTION(2, "ANT_SEL0"),
+		MTK_FUNCTION(3, "I2S_8CH_DO4"),
+		MTK_FUNCTION(7, "DBG_MON_B[31]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(22, "EINT22"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 22),
+		MTK_FUNCTION(0, "GPIO22"),
+		MTK_FUNCTION(2, "I2S_8CH_DO2"),
+		MTK_FUNCTION(3, "TSF_IN"),
+		MTK_FUNCTION(4, "USB_DRVVBUS"),
+		MTK_FUNCTION(5, "SPDIF_OUT"),
+		MTK_FUNCTION(6, "NRE_C"),
+		MTK_FUNCTION(7, "DBG_MON_B[12]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(23, "EINT23"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 23),
+		MTK_FUNCTION(0, "GPIO23"),
+		MTK_FUNCTION(2, "I2S_8CH_DO3"),
+		MTK_FUNCTION(3, "CLKM0"),
+		MTK_FUNCTION(4, "IR"),
+		MTK_FUNCTION(5, "SPDIF_IN"),
+		MTK_FUNCTION(6, "NDQS_C"),
+		MTK_FUNCTION(7, "DBG_MON_B[13]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(24, "EINT24"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 24),
+		MTK_FUNCTION(0, "GPIO24"),
+		MTK_FUNCTION(1, "DPI_D20"),
+		MTK_FUNCTION(2, "DPI_DE"),
+		MTK_FUNCTION(3, "ANT_SEL1"),
+		MTK_FUNCTION(4, "UCTS2"),
+		MTK_FUNCTION(5, "PWM_A"),
+		MTK_FUNCTION(6, "I2S0_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_A[0]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(25, "EINT25"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 25),
+		MTK_FUNCTION(0, "GPIO25"),
+		MTK_FUNCTION(1, "DPI_D19"),
+		MTK_FUNCTION(2, "DPI_VSYNC"),
+		MTK_FUNCTION(3, "ANT_SEL0"),
+		MTK_FUNCTION(4, "URTS2"),
+		MTK_FUNCTION(5, "PWM_B"),
+		MTK_FUNCTION(6, "I2S_8CH_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_A[1]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(26, "PWRAP_SPI0_MI"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 26),
+		MTK_FUNCTION(0, "GPIO26"),
+		MTK_FUNCTION(1, "PWRAP_SPI0_MO"),
+		MTK_FUNCTION(2, "PWRAP_SPI0_MI")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(27, "PWRAP_SPI0_MO"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 27),
+		MTK_FUNCTION(0, "GPIO27"),
+		MTK_FUNCTION(1, "PWRAP_SPI0_MI"),
+		MTK_FUNCTION(2, "PWRAP_SPI0_MO")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(28, "PWRAP_INT"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 28),
+		MTK_FUNCTION(0, "GPIO28"),
+		MTK_FUNCTION(1, "I2S0_MCK"),
+		MTK_FUNCTION(4, "I2S_8CH_MCK"),
+		MTK_FUNCTION(5, "I2S2_MCK"),
+		MTK_FUNCTION(6, "I2S3_MCK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(29, "PWRAP_SPI0_CK"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 29),
+		MTK_FUNCTION(0, "GPIO29"),
+		MTK_FUNCTION(1, "PWRAP_SPI0_CK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(30, "PWRAP_SPI0_CSN"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 30),
+		MTK_FUNCTION(0, "GPIO30"),
+		MTK_FUNCTION(1, "PWRAP_SPI0_CSN")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(31, "RTC32K_CK"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 31),
+		MTK_FUNCTION(0, "GPIO31"),
+		MTK_FUNCTION(1, "RTC32K_CK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(32, "WATCHDOG"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 32),
+		MTK_FUNCTION(0, "GPIO32"),
+		MTK_FUNCTION(1, "WATCHDOG")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(33, "SRCLKENA"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 33),
+		MTK_FUNCTION(0, "GPIO33"),
+		MTK_FUNCTION(1, "SRCLKENA0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(34, "URXD2"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 34),
+		MTK_FUNCTION(0, "GPIO34"),
+		MTK_FUNCTION(1, "URXD2"),
+		MTK_FUNCTION(2, "DPI_D5"),
+		MTK_FUNCTION(3, "UTXD2"),
+		MTK_FUNCTION(4, "DBG_SCL"),
+		MTK_FUNCTION(6, "I2S2_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[0]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(35, "UTXD2"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 35),
+		MTK_FUNCTION(0, "GPIO35"),
+		MTK_FUNCTION(1, "UTXD2"),
+		MTK_FUNCTION(2, "DPI_HSYNC"),
+		MTK_FUNCTION(3, "URXD2"),
+		MTK_FUNCTION(4, "DBG_SDA"),
+		MTK_FUNCTION(5, "DPI_D18"),
+		MTK_FUNCTION(6, "I2S3_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[1]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(36, "MRG_CLK"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 36),
+		MTK_FUNCTION(0, "GPIO36"),
+		MTK_FUNCTION(1, "MRG_CLK"),
+		MTK_FUNCTION(2, "DPI_D4"),
+		MTK_FUNCTION(3, "I2S0_BCK"),
+		MTK_FUNCTION(4, "I2S3_BCK"),
+		MTK_FUNCTION(5, "PCM0_CLK"),
+		MTK_FUNCTION(6, "IR"),
+		MTK_FUNCTION(7, "DBG_MON_A[2]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(37, "MRG_SYNC"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 37),
+		MTK_FUNCTION(0, "GPIO37"),
+		MTK_FUNCTION(1, "MRG_SYNC"),
+		MTK_FUNCTION(2, "DPI_D3"),
+		MTK_FUNCTION(3, "I2S0_LRCK"),
+		MTK_FUNCTION(4, "I2S3_LRCK"),
+		MTK_FUNCTION(5, "PCM0_SYNC"),
+		MTK_FUNCTION(6, "EXT_COL"),
+		MTK_FUNCTION(7, "DBG_MON_A[3]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(38, "MRG_DI"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 38),
+		MTK_FUNCTION(0, "GPIO38"),
+		MTK_FUNCTION(1, "MRG_DI"),
+		MTK_FUNCTION(2, "DPI_D1"),
+		MTK_FUNCTION(3, "I2S0_DI"),
+		MTK_FUNCTION(4, "I2S3_DO"),
+		MTK_FUNCTION(5, "PCM0_DI"),
+		MTK_FUNCTION(6, "EXT_MDIO"),
+		MTK_FUNCTION(7, "DBG_MON_A[4]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(39, "MRG_DO"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 39),
+		MTK_FUNCTION(0, "GPIO39"),
+		MTK_FUNCTION(1, "MRG_DO"),
+		MTK_FUNCTION(2, "DPI_D2"),
+		MTK_FUNCTION(3, "I2S0_MCK"),
+		MTK_FUNCTION(4, "I2S3_MCK"),
+		MTK_FUNCTION(5, "PCM0_DO"),
+		MTK_FUNCTION(6, "EXT_MDC"),
+		MTK_FUNCTION(7, "DBG_MON_A[5]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(40, "KPROW0"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 40),
+		MTK_FUNCTION(0, "GPIO40"),
+		MTK_FUNCTION(1, "KPROW0"),
+		MTK_FUNCTION(4, "IMG_TEST_CK"),
+		MTK_FUNCTION(7, "DBG_MON_B[4]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(41, "KPROW1"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 41),
+		MTK_FUNCTION(0, "GPIO41"),
+		MTK_FUNCTION(1, "KPROW1"),
+		MTK_FUNCTION(2, "IDDIG"),
+		MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(4, "MFG_TEST_CK"),
+		MTK_FUNCTION(7, "DBG_MON_B[5]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(42, "KPCOL0"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 42),
+		MTK_FUNCTION(0, "GPIO42"),
+		MTK_FUNCTION(1, "KPCOL0"),
+		MTK_FUNCTION(7, "DBG_MON_B[6]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(43, "KPCOL1"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 43),
+		MTK_FUNCTION(0, "GPIO43"),
+		MTK_FUNCTION(1, "KPCOL1"),
+		MTK_FUNCTION(2, "USB_DRVVBUS"),
+		MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(4, "TSF_IN"),
+		MTK_FUNCTION(5, "DFD_NTRST_XI"),
+		MTK_FUNCTION(6, "UDI_NTRST_XI"),
+		MTK_FUNCTION(7, "DBG_MON_B[7]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(44, "JTMS"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 44),
+		MTK_FUNCTION(0, "GPIO44"),
+		MTK_FUNCTION(1, "JTMS"),
+		MTK_FUNCTION(2, "CONN_MCU_TMS"),
+		MTK_FUNCTION(3, "CONN_MCU_AICE_JMSC"),
+		MTK_FUNCTION(4, "GPUDFD_TMS_XI"),
+		MTK_FUNCTION(5, "DFD_TMS_XI"),
+		MTK_FUNCTION(6, "UDI_TMS_XI")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(45, "JTCK"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 45),
+		MTK_FUNCTION(0, "GPIO45"),
+		MTK_FUNCTION(1, "JTCK"),
+		MTK_FUNCTION(2, "CONN_MCU_TCK"),
+		MTK_FUNCTION(3, "CONN_MCU_AICE_JCKC"),
+		MTK_FUNCTION(4, "GPUDFD_TCK_XI"),
+		MTK_FUNCTION(5, "DFD_TCK_XI"),
+		MTK_FUNCTION(6, "UDI_TCK_XI")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(46, "JTDI"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 46),
+		MTK_FUNCTION(0, "GPIO46"),
+		MTK_FUNCTION(1, "JTDI"),
+		MTK_FUNCTION(2, "CONN_MCU_TDI"),
+		MTK_FUNCTION(4, "GPUDFD_TDI_XI"),
+		MTK_FUNCTION(5, "DFD_TDI_XI"),
+		MTK_FUNCTION(6, "UDI_TDI_XI")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(47, "JTDO"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 47),
+		MTK_FUNCTION(0, "GPIO47"),
+		MTK_FUNCTION(1, "JTDO"),
+		MTK_FUNCTION(2, "CONN_MCU_TDO"),
+		MTK_FUNCTION(4, "GPUDFD_TDO"),
+		MTK_FUNCTION(5, "DFD_TDO"),
+		MTK_FUNCTION(6, "UDI_TDO")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(48, "SPI_CS"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 48),
+		MTK_FUNCTION(0, "GPIO48"),
+		MTK_FUNCTION(1, "SPI_CSB"),
+		MTK_FUNCTION(3, "I2S0_DI"),
+		MTK_FUNCTION(4, "I2S2_BCK"),
+		MTK_FUNCTION(7, "DBG_MON_A[23]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(49, "SPI_CK"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 49),
+		MTK_FUNCTION(0, "GPIO49"),
+		MTK_FUNCTION(1, "SPI_CLK"),
+		MTK_FUNCTION(3, "I2S0_LRCK"),
+		MTK_FUNCTION(4, "I2S2_DI"),
+		MTK_FUNCTION(7, "DBG_MON_A[24]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(50, "SPI_MI"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 50),
+		MTK_FUNCTION(0, "GPIO50"),
+		MTK_FUNCTION(1, "SPI_MI"),
+		MTK_FUNCTION(2, "SPI_MO"),
+		MTK_FUNCTION(3, "I2S0_BCK"),
+		MTK_FUNCTION(4, "I2S2_LRCK"),
+		MTK_FUNCTION(7, "DBG_MON_A[25]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(51, "SPI_MO"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 51),
+		MTK_FUNCTION(0, "GPIO51"),
+		MTK_FUNCTION(1, "SPI_MO"),
+		MTK_FUNCTION(2, "SPI_MI"),
+		MTK_FUNCTION(3, "I2S0_MCK"),
+		MTK_FUNCTION(4, "I2S2_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_A[26]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(52, "SDA1"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 52),
+		MTK_FUNCTION(0, "GPIO52"),
+		MTK_FUNCTION(1, "SDA1_0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(53, "SCL1"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 53),
+		MTK_FUNCTION(0, "GPIO53"),
+		MTK_FUNCTION(1, "SCL1_0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(54, "DISP_PWM"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 54),
+		MTK_FUNCTION(0, "GPIO54"),
+		MTK_FUNCTION(1, "DISP_PWM"),
+		MTK_FUNCTION(2, "PWM_B"),
+		MTK_FUNCTION(7, "DBG_MON_B[2]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(55, "I2S_DATA_IN"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 55),
+		MTK_FUNCTION(0, "GPIO55"),
+		MTK_FUNCTION(1, "I2S0_DI"),
+		MTK_FUNCTION(2, "UCTS0"),
+		MTK_FUNCTION(3, "I2S3_DO"),
+		MTK_FUNCTION(4, "I2S_8CH_DO1"),
+		MTK_FUNCTION(5, "PWM_A"),
+		MTK_FUNCTION(6, "I2S2_BCK"),
+		MTK_FUNCTION(7, "DBG_MON_A[28]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(56, "I2S_LRCK"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 56),
+		MTK_FUNCTION(0, "GPIO56"),
+		MTK_FUNCTION(1, "I2S0_LRCK"),
+		MTK_FUNCTION(3, "I2S3_LRCK"),
+		MTK_FUNCTION(4, "I2S_8CH_LRCK"),
+		MTK_FUNCTION(5, "PWM_B"),
+		MTK_FUNCTION(6, "I2S2_DI"),
+		MTK_FUNCTION(7, "DBG_MON_A[29]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(57, "I2S_BCK"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 57),
+		MTK_FUNCTION(0, "GPIO57"),
+		MTK_FUNCTION(1, "I2S0_BCK"),
+		MTK_FUNCTION(2, "URTS0"),
+		MTK_FUNCTION(3, "I2S3_BCK"),
+		MTK_FUNCTION(4, "I2S_8CH_BCK"),
+		MTK_FUNCTION(5, "PWM_C"),
+		MTK_FUNCTION(6, "I2S2_LRCK"),
+		MTK_FUNCTION(7, "DBG_MON_A[30]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(58, "SDA0"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 58),
+		MTK_FUNCTION(0, "GPIO58"),
+		MTK_FUNCTION(1, "SDA0_0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(59, "SCL0"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 59),
+		MTK_FUNCTION(0, "GPIO59"),
+		MTK_FUNCTION(1, "SCL0_0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(60, "SDA2"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 60),
+		MTK_FUNCTION(0, "GPIO60"),
+		MTK_FUNCTION(1, "SDA2_0"),
+		MTK_FUNCTION(2, "PWM_B")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(61, "SCL2"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 61),
+		MTK_FUNCTION(0, "GPIO61"),
+		MTK_FUNCTION(1, "SCL2_0"),
+		MTK_FUNCTION(2, "PWM_C")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(62, "URXD0"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 62),
+		MTK_FUNCTION(0, "GPIO62"),
+		MTK_FUNCTION(1, "URXD0"),
+		MTK_FUNCTION(2, "UTXD0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(63, "UTXD0"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 63),
+		MTK_FUNCTION(0, "GPIO63"),
+		MTK_FUNCTION(1, "UTXD0"),
+		MTK_FUNCTION(2, "URXD0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(64, "URXD1"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 64),
+		MTK_FUNCTION(0, "GPIO64"),
+		MTK_FUNCTION(1, "URXD1"),
+		MTK_FUNCTION(2, "UTXD1"),
+		MTK_FUNCTION(7, "DBG_MON_A[27]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(65, "UTXD1"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 65),
+		MTK_FUNCTION(0, "GPIO65"),
+		MTK_FUNCTION(1, "UTXD1"),
+		MTK_FUNCTION(2, "URXD1"),
+		MTK_FUNCTION(7, "DBG_MON_A[31]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(66, "LCM_RST"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 66),
+		MTK_FUNCTION(0, "GPIO66"),
+		MTK_FUNCTION(1, "LCM_RST"),
+		MTK_FUNCTION(3, "I2S0_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[3]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(67, "DSI_TE"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 67),
+		MTK_FUNCTION(0, "GPIO67"),
+		MTK_FUNCTION(1, "DSI_TE"),
+		MTK_FUNCTION(3, "I2S_8CH_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[14]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(68, "MSDC2_CMD"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 68),
+		MTK_FUNCTION(0, "GPIO68"),
+		MTK_FUNCTION(1, "MSDC2_CMD"),
+		MTK_FUNCTION(2, "I2S_8CH_DO4"),
+		MTK_FUNCTION(3, "SDA1_0"),
+		MTK_FUNCTION(5, "USB_SDA"),
+		MTK_FUNCTION(6, "I2S3_BCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[15]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(69, "MSDC2_CLK"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 69),
+		MTK_FUNCTION(0, "GPIO69"),
+		MTK_FUNCTION(1, "MSDC2_CLK"),
+		MTK_FUNCTION(2, "I2S_8CH_DO3"),
+		MTK_FUNCTION(3, "SCL1_0"),
+		MTK_FUNCTION(4, "DPI_D21"),
+		MTK_FUNCTION(5, "USB_SCL"),
+		MTK_FUNCTION(6, "I2S3_LRCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[16]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(70, "MSDC2_DAT0"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 70),
+		MTK_FUNCTION(0, "GPIO70"),
+		MTK_FUNCTION(1, "MSDC2_DAT0"),
+		MTK_FUNCTION(2, "I2S_8CH_DO2"),
+		MTK_FUNCTION(4, "DPI_D22"),
+		MTK_FUNCTION(5, "UTXD0"),
+		MTK_FUNCTION(6, "I2S3_DO"),
+		MTK_FUNCTION(7, "DBG_MON_B[17]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(71, "MSDC2_DAT1"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 71),
+		MTK_FUNCTION(0, "GPIO71"),
+		MTK_FUNCTION(1, "MSDC2_DAT1"),
+		MTK_FUNCTION(2, "I2S_8CH_DO1"),
+		MTK_FUNCTION(3, "PWM_A"),
+		MTK_FUNCTION(4, "I2S3_MCK"),
+		MTK_FUNCTION(5, "URXD0"),
+		MTK_FUNCTION(6, "PWM_B"),
+		MTK_FUNCTION(7, "DBG_MON_B[18]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(72, "MSDC2_DAT2"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 72),
+		MTK_FUNCTION(0, "GPIO72"),
+		MTK_FUNCTION(1, "MSDC2_DAT2"),
+		MTK_FUNCTION(2, "I2S_8CH_LRCK"),
+		MTK_FUNCTION(3, "SDA2_0"),
+		MTK_FUNCTION(4, "DPI_D23"),
+		MTK_FUNCTION(5, "UTXD1"),
+		MTK_FUNCTION(6, "PWM_C"),
+		MTK_FUNCTION(7, "DBG_MON_B[19]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(73, "MSDC2_DAT3"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 73),
+		MTK_FUNCTION(0, "GPIO73"),
+		MTK_FUNCTION(1, "MSDC2_DAT3"),
+		MTK_FUNCTION(2, "I2S_8CH_BCK"),
+		MTK_FUNCTION(3, "SCL2_0"),
+		MTK_FUNCTION(4, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(5, "URXD1"),
+		MTK_FUNCTION(6, "PWM_A"),
+		MTK_FUNCTION(7, "DBG_MON_B[20]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(74, "TDN3"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 74),
+		MTK_FUNCTION(0, "GPI74"),
+		MTK_FUNCTION(1, "TDN3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(75, "TDP3"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 75),
+		MTK_FUNCTION(0, "GPI75"),
+		MTK_FUNCTION(1, "TDP3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(76, "TDN2"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 76),
+		MTK_FUNCTION(0, "GPI76"),
+		MTK_FUNCTION(1, "TDN2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(77, "TDP2"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 77),
+		MTK_FUNCTION(0, "GPI77"),
+		MTK_FUNCTION(1, "TDP2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(78, "TCN"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 78),
+		MTK_FUNCTION(0, "GPI78"),
+		MTK_FUNCTION(1, "TCN")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(79, "TCP"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 79),
+		MTK_FUNCTION(0, "GPI79"),
+		MTK_FUNCTION(1, "TCP")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(80, "TDN1"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 80),
+		MTK_FUNCTION(0, "GPI80"),
+		MTK_FUNCTION(1, "TDN1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(81, "TDP1"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 81),
+		MTK_FUNCTION(0, "GPI81"),
+		MTK_FUNCTION(1, "TDP1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(82, "TDN0"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 82),
+		MTK_FUNCTION(0, "GPI82"),
+		MTK_FUNCTION(1, "TDN0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(83, "TDP0"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 83),
+		MTK_FUNCTION(0, "GPI83"),
+		MTK_FUNCTION(1, "TDP0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(84, "RDN0"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 84),
+		MTK_FUNCTION(0, "GPI84"),
+		MTK_FUNCTION(1, "RDN0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(85, "RDP0"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 85),
+		MTK_FUNCTION(0, "GPI85"),
+		MTK_FUNCTION(1, "RDP0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(86, "RDN1"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 86),
+		MTK_FUNCTION(0, "GPI86"),
+		MTK_FUNCTION(1, "RDN1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(87, "RDP1"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 87),
+		MTK_FUNCTION(0, "GPI87"),
+		MTK_FUNCTION(1, "RDP1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(88, "RCN"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 88),
+		MTK_FUNCTION(0, "GPI88"),
+		MTK_FUNCTION(1, "RCN")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(89, "RCP"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 89),
+		MTK_FUNCTION(0, "GPI89"),
+		MTK_FUNCTION(1, "RCP")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(90, "RDN2"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 90),
+		MTK_FUNCTION(0, "GPI90"),
+		MTK_FUNCTION(1, "RDN2"),
+		MTK_FUNCTION(2, "CMDAT8")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(91, "RDP2"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 91),
+		MTK_FUNCTION(0, "GPI91"),
+		MTK_FUNCTION(1, "RDP2"),
+		MTK_FUNCTION(2, "CMDAT9")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(92, "RDN3"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 92),
+		MTK_FUNCTION(0, "GPI92"),
+		MTK_FUNCTION(1, "RDN3"),
+		MTK_FUNCTION(2, "CMDAT4")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(93, "RDP3"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 93),
+		MTK_FUNCTION(0, "GPI93"),
+		MTK_FUNCTION(1, "RDP3"),
+		MTK_FUNCTION(2, "CMDAT5")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(94, "RCN_A"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 94),
+		MTK_FUNCTION(0, "GPI94"),
+		MTK_FUNCTION(1, "RCN_A"),
+		MTK_FUNCTION(2, "CMDAT6")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(95, "RCP_A"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 95),
+		MTK_FUNCTION(0, "GPI95"),
+		MTK_FUNCTION(1, "RCP_A"),
+		MTK_FUNCTION(2, "CMDAT7")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(96, "RDN1_A"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 96),
+		MTK_FUNCTION(0, "GPI96"),
+		MTK_FUNCTION(1, "RDN1_A"),
+		MTK_FUNCTION(2, "CMDAT2"),
+		MTK_FUNCTION(3, "CMCSD2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(97, "RDP1_A"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 97),
+		MTK_FUNCTION(0, "GPI97"),
+		MTK_FUNCTION(1, "RDP1_A"),
+		MTK_FUNCTION(2, "CMDAT3"),
+		MTK_FUNCTION(3, "CMCSD3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(98, "RDN0_A"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 98),
+		MTK_FUNCTION(0, "GPI98"),
+		MTK_FUNCTION(1, "RDN0_A"),
+		MTK_FUNCTION(2, "CMHSYNC")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(99, "RDP0_A"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 99),
+		MTK_FUNCTION(0, "GPI99"),
+		MTK_FUNCTION(1, "RDP0_A"),
+		MTK_FUNCTION(2, "CMVSYNC")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(100, "CMDAT0"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 100),
+		MTK_FUNCTION(0, "GPIO100"),
+		MTK_FUNCTION(1, "CMDAT0"),
+		MTK_FUNCTION(2, "CMCSD0"),
+		MTK_FUNCTION(3, "ANT_SEL2"),
+		MTK_FUNCTION(5, "TDM_RX_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[21]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(101, "CMDAT1"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 101),
+		MTK_FUNCTION(0, "GPIO101"),
+		MTK_FUNCTION(1, "CMDAT1"),
+		MTK_FUNCTION(2, "CMCSD1"),
+		MTK_FUNCTION(3, "ANT_SEL3"),
+		MTK_FUNCTION(4, "CMFLASH"),
+		MTK_FUNCTION(5, "TDM_RX_BCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[22]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(102, "CMMCLK"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 102),
+		MTK_FUNCTION(0, "GPIO102"),
+		MTK_FUNCTION(1, "CMMCLK"),
+		MTK_FUNCTION(3, "ANT_SEL4"),
+		MTK_FUNCTION(5, "TDM_RX_LRCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[23]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(103, "CMPCLK"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 103),
+		MTK_FUNCTION(0, "GPIO103"),
+		MTK_FUNCTION(1, "CMPCLK"),
+		MTK_FUNCTION(2, "CMCSK"),
+		MTK_FUNCTION(3, "ANT_SEL5"),
+		MTK_FUNCTION(5, " TDM_RX_DI"),
+		MTK_FUNCTION(7, "DBG_MON_B[24]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(104, "MSDC1_CMD"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 104),
+		MTK_FUNCTION(0, "GPIO104"),
+		MTK_FUNCTION(1, "MSDC1_CMD"),
+		MTK_FUNCTION(4, "SQICS"),
+		MTK_FUNCTION(7, "DBG_MON_B[25]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(105, "MSDC1_CLK"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 105),
+		MTK_FUNCTION(0, "GPIO105"),
+		MTK_FUNCTION(1, "MSDC1_CLK"),
+		MTK_FUNCTION(2, "UDI_NTRST_XI"),
+		MTK_FUNCTION(3, "DFD_NTRST_XI"),
+		MTK_FUNCTION(4, "SQISO"),
+		MTK_FUNCTION(5, "GPUEJ_NTRST_XI"),
+		MTK_FUNCTION(7, "DBG_MON_B[26]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(106, "MSDC1_DAT0"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 106),
+		MTK_FUNCTION(0, "GPIO106"),
+		MTK_FUNCTION(1, "MSDC1_DAT0"),
+		MTK_FUNCTION(2, "UDI_TMS_XI"),
+		MTK_FUNCTION(3, "DFD_TMS_XI"),
+		MTK_FUNCTION(4, "SQISI"),
+		MTK_FUNCTION(5, "GPUEJ_TMS_XI"),
+		MTK_FUNCTION(7, "DBG_MON_B[27]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(107, "MSDC1_DAT1"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 107),
+		MTK_FUNCTION(0, "GPIO107"),
+		MTK_FUNCTION(1, "MSDC1_DAT1"),
+		MTK_FUNCTION(2, "UDI_TCK_XI"),
+		MTK_FUNCTION(3, "DFD_TCK_XI"),
+		MTK_FUNCTION(4, "SQIWP"),
+		MTK_FUNCTION(5, "GPUEJ_TCK_XI"),
+		MTK_FUNCTION(7, "DBG_MON_B[28]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(108, "MSDC1_DAT2"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 108),
+		MTK_FUNCTION(0, "GPIO108"),
+		MTK_FUNCTION(1, "MSDC1_DAT2"),
+		MTK_FUNCTION(2, "UDI_TDI_XI"),
+		MTK_FUNCTION(3, "DFD_TDI_XI"),
+		MTK_FUNCTION(4, "SQIRST"),
+		MTK_FUNCTION(5, "GPUEJ_TDI_XI"),
+		MTK_FUNCTION(7, "DBG_MON_B[29]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(109, "MSDC1_DAT3"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 109),
+		MTK_FUNCTION(0, "GPIO109"),
+		MTK_FUNCTION(1, "MSDC1_DAT3"),
+		MTK_FUNCTION(2, "UDI_TDO"),
+		MTK_FUNCTION(3, "DFD_TDO"),
+		MTK_FUNCTION(4, "SQICK"),
+		MTK_FUNCTION(5, "GPUEJ_TDO"),
+		MTK_FUNCTION(7, "DBG_MON_B[30]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(110, "MSDC0_DAT7"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 110),
+		MTK_FUNCTION(0, "GPIO110"),
+		MTK_FUNCTION(1, "MSDC0_DAT7"),
+		MTK_FUNCTION(4, "NLD7")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(111, "MSDC0_DAT6"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 111),
+		MTK_FUNCTION(0, "GPIO111"),
+		MTK_FUNCTION(1, "MSDC0_DAT6"),
+		MTK_FUNCTION(4, "NLD6")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(112, "MSDC0_DAT5"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 112),
+		MTK_FUNCTION(0, "GPIO112"),
+		MTK_FUNCTION(1, "MSDC0_DAT5"),
+		MTK_FUNCTION(4, "NLD4")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(113, "MSDC0_DAT4"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 113),
+		MTK_FUNCTION(0, "GPIO113"),
+		MTK_FUNCTION(1, "MSDC0_DAT4"),
+		MTK_FUNCTION(4, "NLD3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(114, "MSDC0_RSTB"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 114),
+		MTK_FUNCTION(0, "GPIO114"),
+		MTK_FUNCTION(1, "MSDC0_RSTB"),
+		MTK_FUNCTION(4, "NLD0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(115, "MSDC0_CMD"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 115),
+		MTK_FUNCTION(0, "GPIO115"),
+		MTK_FUNCTION(1, "MSDC0_CMD"),
+		MTK_FUNCTION(4, "NALE")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(116, "MSDC0_CLK"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 116),
+		MTK_FUNCTION(0, "GPIO116"),
+		MTK_FUNCTION(1, "MSDC0_CLK"),
+		MTK_FUNCTION(4, "NWEB")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(117, "MSDC0_DAT3"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 117),
+		MTK_FUNCTION(0, "GPIO117"),
+		MTK_FUNCTION(1, "MSDC0_DAT3"),
+		MTK_FUNCTION(4, "NLD1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(118, "MSDC0_DAT2"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 118),
+		MTK_FUNCTION(0, "GPIO118"),
+		MTK_FUNCTION(1, "MSDC0_DAT2"),
+		MTK_FUNCTION(4, "NLD5")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(119, "MSDC0_DAT1"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 119),
+		MTK_FUNCTION(0, "GPIO119"),
+		MTK_FUNCTION(1, "MSDC0_DAT1"),
+		MTK_FUNCTION(4, "NLD8")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(120, "MSDC0_DAT0"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 120),
+		MTK_FUNCTION(0, "GPIO120"),
+		MTK_FUNCTION(1, "MSDC0_DAT0"),
+		MTK_FUNCTION(4, "WATCHDOG"),
+		MTK_FUNCTION(5, "NLD2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(121, "CEC"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 121),
+		MTK_FUNCTION(0, "GPIO121"),
+		MTK_FUNCTION(1, "CEC")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(122, "HTPLG"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 122),
+		MTK_FUNCTION(0, "GPIO122"),
+		MTK_FUNCTION(1, "HTPLG")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(123, "HDMISCK"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 123),
+		MTK_FUNCTION(0, "GPIO123"),
+		MTK_FUNCTION(1, "HDMISCK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(124, "HDMISD"),
+		NULL, "mt8167",
+		MTK_EINT_FUNCTION(0, 124),
+		MTK_FUNCTION(0, "GPIO124"),
+		MTK_FUNCTION(1, "HDMISD")
+	),
+};
+
+#endif /* __PINCTRL_MTK_MT8167_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8183.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8183.h
new file mode 100644
index 0000000..79adf5b
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8183.h
@@ -0,0 +1,1916 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
+ *
+ */
+
+#ifndef __PINCTRL_MTK_MT8183_H
+#define __PINCTRL_MTK_MT8183_H
+
+#include "pinctrl-paris.h"
+
+static struct mtk_pin_desc mtk_pins_mt8183[] = {
+	MTK_PIN(
+		0, "GPIO0",
+		MTK_EINT_FUNCTION(0, 0),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO0"),
+		MTK_FUNCTION(1, "MRG_SYNC"),
+		MTK_FUNCTION(2, "PCM0_SYNC"),
+		MTK_FUNCTION(3, "TP_GPIO0_AO"),
+		MTK_FUNCTION(4, "SRCLKENAI0"),
+		MTK_FUNCTION(5, "SCP_SPI2_CS"),
+		MTK_FUNCTION(6, "I2S3_MCK"),
+		MTK_FUNCTION(7, "SPI2_CSB")
+	),
+	MTK_PIN(
+		1, "GPIO1",
+		MTK_EINT_FUNCTION(0, 1),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO1"),
+		MTK_FUNCTION(1, "MRG_CLK"),
+		MTK_FUNCTION(2, "PCM0_CLK"),
+		MTK_FUNCTION(3, "TP_GPIO1_AO"),
+		MTK_FUNCTION(4, "CLKM3"),
+		MTK_FUNCTION(5, "SCP_SPI2_MO"),
+		MTK_FUNCTION(6, "I2S3_BCK"),
+		MTK_FUNCTION(7, "SPI2_MO")
+	),
+	MTK_PIN(
+		2, "GPIO2",
+		MTK_EINT_FUNCTION(0, 2),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO2"),
+		MTK_FUNCTION(1, "MRG_DO"),
+		MTK_FUNCTION(2, "PCM0_DO"),
+		MTK_FUNCTION(3, "TP_GPIO2_AO"),
+		MTK_FUNCTION(4, "SCL6"),
+		MTK_FUNCTION(5, "SCP_SPI2_CK"),
+		MTK_FUNCTION(6, "I2S3_LRCK"),
+		MTK_FUNCTION(7, "SPI2_CLK")
+	),
+	MTK_PIN(
+		3, "GPIO3",
+		MTK_EINT_FUNCTION(0, 3),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO3"),
+		MTK_FUNCTION(1, "MRG_DI"),
+		MTK_FUNCTION(2, "PCM0_DI"),
+		MTK_FUNCTION(3, "TP_GPIO3_AO"),
+		MTK_FUNCTION(4, "SDA6"),
+		MTK_FUNCTION(5, "TDM_MCK"),
+		MTK_FUNCTION(6, "I2S3_DO"),
+		MTK_FUNCTION(7, "SCP_VREQ_VAO")
+	),
+	MTK_PIN(
+		4, "GPIO4",
+		MTK_EINT_FUNCTION(0, 4),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO4"),
+		MTK_FUNCTION(1, "PWM_B"),
+		MTK_FUNCTION(2, "I2S0_MCK"),
+		MTK_FUNCTION(3, "SSPM_UTXD_AO"),
+		MTK_FUNCTION(4, "MD_URXD1"),
+		MTK_FUNCTION(5, "TDM_BCK"),
+		MTK_FUNCTION(6, "TP_GPIO4_AO"),
+		MTK_FUNCTION(7, "DAP_MD32_SWD")
+	),
+	MTK_PIN(
+		5, "GPIO5",
+		MTK_EINT_FUNCTION(0, 5),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO5"),
+		MTK_FUNCTION(1, "PWM_C"),
+		MTK_FUNCTION(2, "I2S0_BCK"),
+		MTK_FUNCTION(3, "SSPM_URXD_AO"),
+		MTK_FUNCTION(4, "MD_UTXD1"),
+		MTK_FUNCTION(5, "TDM_LRCK"),
+		MTK_FUNCTION(6, "TP_GPIO5_AO"),
+		MTK_FUNCTION(7, "DAP_MD32_SWCK")
+	),
+	MTK_PIN(
+		6, "GPIO6",
+		MTK_EINT_FUNCTION(0, 6),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO6"),
+		MTK_FUNCTION(1, "PWM_A"),
+		MTK_FUNCTION(2, "I2S0_LRCK"),
+		MTK_FUNCTION(3, "IDDIG"),
+		MTK_FUNCTION(4, "MD_URXD0"),
+		MTK_FUNCTION(5, "TDM_DATA0"),
+		MTK_FUNCTION(6, "TP_GPIO6_AO"),
+		MTK_FUNCTION(7, "CMFLASH")
+	),
+	MTK_PIN(
+		7, "GPIO7",
+		MTK_EINT_FUNCTION(0, 7),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO7"),
+		MTK_FUNCTION(1, "SPI1_B_MI"),
+		MTK_FUNCTION(2, "I2S0_DI"),
+		MTK_FUNCTION(3, "USB_DRVVBUS"),
+		MTK_FUNCTION(4, "MD_UTXD0"),
+		MTK_FUNCTION(5, "TDM_DATA1"),
+		MTK_FUNCTION(6, "TP_GPIO7_AO"),
+		MTK_FUNCTION(7, "DVFSRC_EXT_REQ")
+	),
+	MTK_PIN(
+		8, "GPIO8",
+		MTK_EINT_FUNCTION(0, 8),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO8"),
+		MTK_FUNCTION(1, "SPI1_B_CSB"),
+		MTK_FUNCTION(2, "ANT_SEL3"),
+		MTK_FUNCTION(3, "SCL7"),
+		MTK_FUNCTION(4, "CONN_MCU_TRST_B"),
+		MTK_FUNCTION(5, "TDM_DATA2"),
+		MTK_FUNCTION(6, "MD_INT0"),
+		MTK_FUNCTION(7, "JTRSTN_SEL1")
+	),
+	MTK_PIN(
+		9, "GPIO9",
+		MTK_EINT_FUNCTION(0, 9),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO9"),
+		MTK_FUNCTION(1, "SPI1_B_MO"),
+		MTK_FUNCTION(2, "ANT_SEL4"),
+		MTK_FUNCTION(3, "CMMCLK2"),
+		MTK_FUNCTION(4, "CONN_MCU_DBGACK_N"),
+		MTK_FUNCTION(5, "SSPM_JTAG_TRSTN"),
+		MTK_FUNCTION(6, "IO_JTAG_TRSTN"),
+		MTK_FUNCTION(7, "DBG_MON_B10")
+	),
+	MTK_PIN(
+		10, "GPIO10",
+		MTK_EINT_FUNCTION(0, 10),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO10"),
+		MTK_FUNCTION(1, "SPI1_B_CLK"),
+		MTK_FUNCTION(2, "ANT_SEL5"),
+		MTK_FUNCTION(3, "CMMCLK3"),
+		MTK_FUNCTION(4, "CONN_MCU_DBGI_N"),
+		MTK_FUNCTION(5, "TDM_DATA3"),
+		MTK_FUNCTION(6, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(7, "DBG_MON_B11")
+	),
+	MTK_PIN(
+		11, "GPIO11",
+		MTK_EINT_FUNCTION(0, 11),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO11"),
+		MTK_FUNCTION(1, "TP_URXD1_AO"),
+		MTK_FUNCTION(2, "IDDIG"),
+		MTK_FUNCTION(3, "SCL6"),
+		MTK_FUNCTION(4, "UCTS1"),
+		MTK_FUNCTION(5, "UCTS0"),
+		MTK_FUNCTION(6, "SRCLKENAI1"),
+		MTK_FUNCTION(7, "I2S5_MCK")
+	),
+	MTK_PIN(
+		12, "GPIO12",
+		MTK_EINT_FUNCTION(0, 12),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO12"),
+		MTK_FUNCTION(1, "TP_UTXD1_AO"),
+		MTK_FUNCTION(2, "USB_DRVVBUS"),
+		MTK_FUNCTION(3, "SDA6"),
+		MTK_FUNCTION(4, "URTS1"),
+		MTK_FUNCTION(5, "URTS0"),
+		MTK_FUNCTION(6, "I2S2_DI2"),
+		MTK_FUNCTION(7, "I2S5_BCK")
+	),
+	MTK_PIN(
+		13, "GPIO13",
+		MTK_EINT_FUNCTION(0, 13),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO13"),
+		MTK_FUNCTION(1, "DBPI_D0"),
+		MTK_FUNCTION(2, "SPI5_MI"),
+		MTK_FUNCTION(3, "PCM0_SYNC"),
+		MTK_FUNCTION(4, "MD_URXD0"),
+		MTK_FUNCTION(5, "ANT_SEL3"),
+		MTK_FUNCTION(6, "I2S0_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_B15")
+	),
+	MTK_PIN(
+		14, "GPIO14",
+		MTK_EINT_FUNCTION(0, 14),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO14"),
+		MTK_FUNCTION(1, "DBPI_D1"),
+		MTK_FUNCTION(2, "SPI5_CSB"),
+		MTK_FUNCTION(3, "PCM0_CLK"),
+		MTK_FUNCTION(4, "MD_UTXD0"),
+		MTK_FUNCTION(5, "ANT_SEL4"),
+		MTK_FUNCTION(6, "I2S0_BCK"),
+		MTK_FUNCTION(7, "DBG_MON_B16")
+	),
+	MTK_PIN(
+		15, "GPIO15",
+		MTK_EINT_FUNCTION(0, 15),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO15"),
+		MTK_FUNCTION(1, "DBPI_D2"),
+		MTK_FUNCTION(2, "SPI5_MO"),
+		MTK_FUNCTION(3, "PCM0_DO"),
+		MTK_FUNCTION(4, "MD_URXD1"),
+		MTK_FUNCTION(5, "ANT_SEL5"),
+		MTK_FUNCTION(6, "I2S0_LRCK"),
+		MTK_FUNCTION(7, "DBG_MON_B17")
+	),
+	MTK_PIN(
+		16, "GPIO16",
+		MTK_EINT_FUNCTION(0, 16),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO16"),
+		MTK_FUNCTION(1, "DBPI_D3"),
+		MTK_FUNCTION(2, "SPI5_CLK"),
+		MTK_FUNCTION(3, "PCM0_DI"),
+		MTK_FUNCTION(4, "MD_UTXD1"),
+		MTK_FUNCTION(5, "ANT_SEL6"),
+		MTK_FUNCTION(6, "I2S0_DI"),
+		MTK_FUNCTION(7, "DBG_MON_B23")
+	),
+	MTK_PIN(
+		17, "GPIO17",
+		MTK_EINT_FUNCTION(0, 17),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO17"),
+		MTK_FUNCTION(1, "DBPI_D4"),
+		MTK_FUNCTION(2, "SPI4_MI"),
+		MTK_FUNCTION(3, "CONN_MCU_TRST_B"),
+		MTK_FUNCTION(4, "MD_INT0"),
+		MTK_FUNCTION(5, "ANT_SEL7"),
+		MTK_FUNCTION(6, "I2S3_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_A1")
+	),
+	MTK_PIN(
+		18, "GPIO18",
+		MTK_EINT_FUNCTION(0, 18),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO18"),
+		MTK_FUNCTION(1, "DBPI_D5"),
+		MTK_FUNCTION(2, "SPI4_CSB"),
+		MTK_FUNCTION(3, "CONN_MCU_DBGI_N"),
+		MTK_FUNCTION(4, "MD_INT0"),
+		MTK_FUNCTION(5, "SCP_VREQ_VAO"),
+		MTK_FUNCTION(6, "I2S3_BCK"),
+		MTK_FUNCTION(7, "DBG_MON_A2")
+	),
+	MTK_PIN(
+		19, "GPIO19",
+		MTK_EINT_FUNCTION(0, 19),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO19"),
+		MTK_FUNCTION(1, "DBPI_D6"),
+		MTK_FUNCTION(2, "SPI4_MO"),
+		MTK_FUNCTION(3, "CONN_MCU_TDO"),
+		MTK_FUNCTION(4, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+		MTK_FUNCTION(5, "URXD1"),
+		MTK_FUNCTION(6, "I2S3_LRCK"),
+		MTK_FUNCTION(7, "DBG_MON_A3")
+	),
+	MTK_PIN(
+		20, "GPIO20",
+		MTK_EINT_FUNCTION(0, 20),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO20"),
+		MTK_FUNCTION(1, "DBPI_D7"),
+		MTK_FUNCTION(2, "SPI4_CLK"),
+		MTK_FUNCTION(3, "CONN_MCU_DBGACK_N"),
+		MTK_FUNCTION(4, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+		MTK_FUNCTION(5, "UTXD1"),
+		MTK_FUNCTION(6, "I2S3_DO"),
+		MTK_FUNCTION(7, "DBG_MON_A19")
+	),
+	MTK_PIN(
+		21, "GPIO21",
+		MTK_EINT_FUNCTION(0, 21),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO21"),
+		MTK_FUNCTION(1, "DBPI_D8"),
+		MTK_FUNCTION(2, "SPI3_MI"),
+		MTK_FUNCTION(3, "CONN_MCU_TMS"),
+		MTK_FUNCTION(4, "DAP_MD32_SWD"),
+		MTK_FUNCTION(5, "CONN_MCU_AICE_TMSC"),
+		MTK_FUNCTION(6, "I2S2_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_B5")
+	),
+	MTK_PIN(
+		22, "GPIO22",
+		MTK_EINT_FUNCTION(0, 22),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO22"),
+		MTK_FUNCTION(1, "DBPI_D9"),
+		MTK_FUNCTION(2, "SPI3_CSB"),
+		MTK_FUNCTION(3, "CONN_MCU_TCK"),
+		MTK_FUNCTION(4, "DAP_MD32_SWCK"),
+		MTK_FUNCTION(5, "CONN_MCU_AICE_TCKC"),
+		MTK_FUNCTION(6, "I2S2_BCK"),
+		MTK_FUNCTION(7, "DBG_MON_B6")
+	),
+	MTK_PIN(
+		23, "GPIO23",
+		MTK_EINT_FUNCTION(0, 23),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO23"),
+		MTK_FUNCTION(1, "DBPI_D10"),
+		MTK_FUNCTION(2, "SPI3_MO"),
+		MTK_FUNCTION(3, "CONN_MCU_TDI"),
+		MTK_FUNCTION(4, "UCTS1"),
+		MTK_FUNCTION(5, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(6, "I2S2_LRCK"),
+		MTK_FUNCTION(7, "DBG_MON_B7")
+	),
+	MTK_PIN(
+		24, "GPIO24",
+		MTK_EINT_FUNCTION(0, 24),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO24"),
+		MTK_FUNCTION(1, "DBPI_D11"),
+		MTK_FUNCTION(2, "SPI3_CLK"),
+		MTK_FUNCTION(3, "SRCLKENAI0"),
+		MTK_FUNCTION(4, "URTS1"),
+		MTK_FUNCTION(5, "IO_JTAG_TCK"),
+		MTK_FUNCTION(6, "I2S2_DI"),
+		MTK_FUNCTION(7, "DBG_MON_B31")
+	),
+	MTK_PIN(
+		25, "GPIO25",
+		MTK_EINT_FUNCTION(0, 25),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO25"),
+		MTK_FUNCTION(1, "DBPI_HSYNC"),
+		MTK_FUNCTION(2, "ANT_SEL0"),
+		MTK_FUNCTION(3, "SCL6"),
+		MTK_FUNCTION(4, "KPCOL2"),
+		MTK_FUNCTION(5, "IO_JTAG_TMS"),
+		MTK_FUNCTION(6, "I2S1_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_B0")
+	),
+	MTK_PIN(
+		26, "GPIO26",
+		MTK_EINT_FUNCTION(0, 26),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO26"),
+		MTK_FUNCTION(1, "DBPI_VSYNC"),
+		MTK_FUNCTION(2, "ANT_SEL1"),
+		MTK_FUNCTION(3, "SDA6"),
+		MTK_FUNCTION(4, "KPROW2"),
+		MTK_FUNCTION(5, "IO_JTAG_TDI"),
+		MTK_FUNCTION(6, "I2S1_BCK"),
+		MTK_FUNCTION(7, "DBG_MON_B1")
+	),
+	MTK_PIN(
+		27, "GPIO27",
+		MTK_EINT_FUNCTION(0, 27),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO27"),
+		MTK_FUNCTION(1, "DBPI_DE"),
+		MTK_FUNCTION(2, "ANT_SEL2"),
+		MTK_FUNCTION(3, "SCL7"),
+		MTK_FUNCTION(4, "DMIC_CLK"),
+		MTK_FUNCTION(5, "IO_JTAG_TDO"),
+		MTK_FUNCTION(6, "I2S1_LRCK"),
+		MTK_FUNCTION(7, "DBG_MON_B9")
+	),
+	MTK_PIN(
+		28, "GPIO28",
+		MTK_EINT_FUNCTION(0, 28),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO28"),
+		MTK_FUNCTION(1, "DBPI_CK"),
+		MTK_FUNCTION(2, "DVFSRC_EXT_REQ"),
+		MTK_FUNCTION(3, "SDA7"),
+		MTK_FUNCTION(4, "DMIC_DAT"),
+		MTK_FUNCTION(5, "IO_JTAG_TRSTN"),
+		MTK_FUNCTION(6, "I2S1_DO"),
+		MTK_FUNCTION(7, "DBG_MON_B32")
+	),
+	MTK_PIN(
+		29, "GPIO29",
+		MTK_EINT_FUNCTION(0, 29),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO29"),
+		MTK_FUNCTION(1, "MSDC1_CLK"),
+		MTK_FUNCTION(2, "IO_JTAG_TCK"),
+		MTK_FUNCTION(3, "UDI_TCK"),
+		MTK_FUNCTION(4, "CONN_DSP_JCK"),
+		MTK_FUNCTION(5, "SSPM_JTAG_TCK"),
+		MTK_FUNCTION(6, "PCM1_CLK"),
+		MTK_FUNCTION(7, "DBG_MON_A6")
+	),
+	MTK_PIN(
+		30, "GPIO30",
+		MTK_EINT_FUNCTION(0, 30),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO30"),
+		MTK_FUNCTION(1, "MSDC1_DAT3"),
+		MTK_FUNCTION(2, "DAP_MD32_SWD"),
+		MTK_FUNCTION(3, "CONN_MCU_AICE_TMSC"),
+		MTK_FUNCTION(4, "CONN_DSP_JINTP"),
+		MTK_FUNCTION(5, "SSPM_JTAG_TRSTN"),
+		MTK_FUNCTION(6, "PCM1_DI"),
+		MTK_FUNCTION(7, "DBG_MON_A7")
+	),
+	MTK_PIN(
+		31, "GPIO31",
+		MTK_EINT_FUNCTION(0, 31),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO31"),
+		MTK_FUNCTION(1, "MSDC1_CMD"),
+		MTK_FUNCTION(2, "IO_JTAG_TMS"),
+		MTK_FUNCTION(3, "UDI_TMS"),
+		MTK_FUNCTION(4, "CONN_DSP_JMS"),
+		MTK_FUNCTION(5, "SSPM_JTAG_TMS"),
+		MTK_FUNCTION(6, "PCM1_SYNC"),
+		MTK_FUNCTION(7, "DBG_MON_A8")
+	),
+	MTK_PIN(
+		32, "GPIO32",
+		MTK_EINT_FUNCTION(0, 32),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO32"),
+		MTK_FUNCTION(1, "MSDC1_DAT0"),
+		MTK_FUNCTION(2, "IO_JTAG_TDI"),
+		MTK_FUNCTION(3, "UDI_TDI"),
+		MTK_FUNCTION(4, "CONN_DSP_JDI"),
+		MTK_FUNCTION(5, "SSPM_JTAG_TDI"),
+		MTK_FUNCTION(6, "PCM1_DO0"),
+		MTK_FUNCTION(7, "DBG_MON_A9")
+	),
+	MTK_PIN(
+		33, "GPIO33",
+		MTK_EINT_FUNCTION(0, 33),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO33"),
+		MTK_FUNCTION(1, "MSDC1_DAT2"),
+		MTK_FUNCTION(2, "IO_JTAG_TRSTN"),
+		MTK_FUNCTION(3, "UDI_NTRST"),
+		MTK_FUNCTION(4, "DAP_MD32_SWCK"),
+		MTK_FUNCTION(5, "CONN_MCU_AICE_TCKC"),
+		MTK_FUNCTION(6, "PCM1_DO2"),
+		MTK_FUNCTION(7, "DBG_MON_A10")
+	),
+	MTK_PIN(
+		34, "GPIO34",
+		MTK_EINT_FUNCTION(0, 34),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO34"),
+		MTK_FUNCTION(1, "MSDC1_DAT1"),
+		MTK_FUNCTION(2, "IO_JTAG_TDO"),
+		MTK_FUNCTION(3, "UDI_TDO"),
+		MTK_FUNCTION(4, "CONN_DSP_JDO"),
+		MTK_FUNCTION(5, "SSPM_JTAG_TDO"),
+		MTK_FUNCTION(6, "PCM1_DO1"),
+		MTK_FUNCTION(7, "DBG_MON_A11")
+	),
+	MTK_PIN(
+		35, "GPIO35",
+		MTK_EINT_FUNCTION(0, 35),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO35"),
+		MTK_FUNCTION(1, "MD1_SIM2_SIO"),
+		MTK_FUNCTION(2, "CCU_JTAG_TDO"),
+		MTK_FUNCTION(3, "MD1_SIM1_SIO"),
+		MTK_FUNCTION(5, "SCP_JTAG_TDO"),
+		MTK_FUNCTION(6, "CONN_DSP_JMS"),
+		MTK_FUNCTION(7, "DBG_MON_A28")
+	),
+	MTK_PIN(
+		36, "GPIO36",
+		MTK_EINT_FUNCTION(0, 36),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO36"),
+		MTK_FUNCTION(1, "MD1_SIM2_SRST"),
+		MTK_FUNCTION(2, "CCU_JTAG_TMS"),
+		MTK_FUNCTION(3, "MD1_SIM1_SRST"),
+		MTK_FUNCTION(4, "CONN_MCU_AICE_TMSC"),
+		MTK_FUNCTION(5, "SCP_JTAG_TMS"),
+		MTK_FUNCTION(6, "CONN_DSP_JINTP"),
+		MTK_FUNCTION(7, "DBG_MON_A29")
+	),
+	MTK_PIN(
+		37, "GPIO37",
+		MTK_EINT_FUNCTION(0, 37),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO37"),
+		MTK_FUNCTION(1, "MD1_SIM2_SCLK"),
+		MTK_FUNCTION(2, "CCU_JTAG_TDI"),
+		MTK_FUNCTION(3, "MD1_SIM1_SCLK"),
+		MTK_FUNCTION(5, "SCP_JTAG_TDI"),
+		MTK_FUNCTION(6, "CONN_DSP_JDO"),
+		MTK_FUNCTION(7, "DBG_MON_A30")
+	),
+	MTK_PIN(
+		38, "GPIO38",
+		MTK_EINT_FUNCTION(0, 38),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO38"),
+		MTK_FUNCTION(1, "MD1_SIM1_SCLK"),
+		MTK_FUNCTION(3, "MD1_SIM2_SCLK"),
+		MTK_FUNCTION(4, "CONN_MCU_AICE_TCKC"),
+		MTK_FUNCTION(7, "DBG_MON_A20")
+	),
+	MTK_PIN(
+		39, "GPIO39",
+		MTK_EINT_FUNCTION(0, 39),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO39"),
+		MTK_FUNCTION(1, "MD1_SIM1_SRST"),
+		MTK_FUNCTION(2, "CCU_JTAG_TCK"),
+		MTK_FUNCTION(3, "MD1_SIM2_SRST"),
+		MTK_FUNCTION(5, "SCP_JTAG_TCK"),
+		MTK_FUNCTION(6, "CONN_DSP_JCK"),
+		MTK_FUNCTION(7, "DBG_MON_A31")
+	),
+	MTK_PIN(
+		40, "GPIO40",
+		MTK_EINT_FUNCTION(0, 40),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO40"),
+		MTK_FUNCTION(1, "MD1_SIM1_SIO"),
+		MTK_FUNCTION(2, "CCU_JTAG_TRST"),
+		MTK_FUNCTION(3, "MD1_SIM2_SIO"),
+		MTK_FUNCTION(5, "SCP_JTAG_TRSTN"),
+		MTK_FUNCTION(6, "CONN_DSP_JDI"),
+		MTK_FUNCTION(7, "DBG_MON_A32")
+	),
+	MTK_PIN(
+		41, "GPIO41",
+		MTK_EINT_FUNCTION(0, 41),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO41"),
+		MTK_FUNCTION(1, "IDDIG"),
+		MTK_FUNCTION(2, "URXD1"),
+		MTK_FUNCTION(3, "UCTS0"),
+		MTK_FUNCTION(4, "SSPM_UTXD_AO"),
+		MTK_FUNCTION(5, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(6, "DMIC_CLK")
+	),
+	MTK_PIN(
+		42, "GPIO42",
+		MTK_EINT_FUNCTION(0, 42),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO42"),
+		MTK_FUNCTION(1, "USB_DRVVBUS"),
+		MTK_FUNCTION(2, "UTXD1"),
+		MTK_FUNCTION(3, "URTS0"),
+		MTK_FUNCTION(4, "SSPM_URXD_AO"),
+		MTK_FUNCTION(5, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(6, "DMIC_DAT")
+	),
+	MTK_PIN(
+		43, "GPIO43",
+		MTK_EINT_FUNCTION(0, 43),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO43"),
+		MTK_FUNCTION(1, "DISP_PWM")
+	),
+	MTK_PIN(
+		44, "GPIO44",
+		MTK_EINT_FUNCTION(0, 44),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO44"),
+		MTK_FUNCTION(1, "DSI_TE")
+	),
+	MTK_PIN(
+		45, "GPIO45",
+		MTK_EINT_FUNCTION(0, 45),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO45"),
+		MTK_FUNCTION(1, "LCM_RST")
+	),
+	MTK_PIN(
+		46, "GPIO46",
+		MTK_EINT_FUNCTION(0, 46),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO46"),
+		MTK_FUNCTION(1, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+		MTK_FUNCTION(2, "URXD1"),
+		MTK_FUNCTION(3, "UCTS1"),
+		MTK_FUNCTION(4, "CCU_UTXD_AO"),
+		MTK_FUNCTION(5, "TP_UCTS1_AO"),
+		MTK_FUNCTION(6, "IDDIG"),
+		MTK_FUNCTION(7, "I2S5_LRCK")
+	),
+	MTK_PIN(
+		47, "GPIO47",
+		MTK_EINT_FUNCTION(0, 47),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO47"),
+		MTK_FUNCTION(1, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+		MTK_FUNCTION(2, "UTXD1"),
+		MTK_FUNCTION(3, "URTS1"),
+		MTK_FUNCTION(4, "CCU_URXD_AO"),
+		MTK_FUNCTION(5, "TP_URTS1_AO"),
+		MTK_FUNCTION(6, "USB_DRVVBUS"),
+		MTK_FUNCTION(7, "I2S5_DO")
+	),
+	MTK_PIN(
+		48, "GPIO48",
+		MTK_EINT_FUNCTION(0, 48),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO48"),
+		MTK_FUNCTION(1, "SCL5")
+	),
+	MTK_PIN(
+		49, "GPIO49",
+		MTK_EINT_FUNCTION(0, 49),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO49"),
+		MTK_FUNCTION(1, "SDA5")
+	),
+	MTK_PIN(
+		50, "GPIO50",
+		MTK_EINT_FUNCTION(0, 50),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO50"),
+		MTK_FUNCTION(1, "SCL3")
+	),
+	MTK_PIN(
+		51, "GPIO51",
+		MTK_EINT_FUNCTION(0, 51),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO51"),
+		MTK_FUNCTION(1, "SDA3")
+	),
+	MTK_PIN(
+		52, "GPIO52",
+		MTK_EINT_FUNCTION(0, 52),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO52"),
+		MTK_FUNCTION(1, "BPI_ANT2")
+	),
+	MTK_PIN(
+		53, "GPIO53",
+		MTK_EINT_FUNCTION(0, 53),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO53"),
+		MTK_FUNCTION(1, "BPI_ANT0")
+	),
+	MTK_PIN(
+		54, "GPIO54",
+		MTK_EINT_FUNCTION(0, 54),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO54"),
+		MTK_FUNCTION(1, "BPI_OLAT1")
+	),
+	MTK_PIN(
+		55, "GPIO55",
+		MTK_EINT_FUNCTION(0, 55),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO55"),
+		MTK_FUNCTION(1, "BPI_BUS8")
+	),
+	MTK_PIN(
+		56, "GPIO56",
+		MTK_EINT_FUNCTION(0, 56),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO56"),
+		MTK_FUNCTION(1, "BPI_BUS9"),
+		MTK_FUNCTION(2, "SCL_6306")
+	),
+	MTK_PIN(
+		57, "GPIO57",
+		MTK_EINT_FUNCTION(0, 57),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO57"),
+		MTK_FUNCTION(1, "BPI_BUS10"),
+		MTK_FUNCTION(2, "SDA_6306")
+	),
+	MTK_PIN(
+		58, "GPIO58",
+		MTK_EINT_FUNCTION(0, 58),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO58"),
+		MTK_FUNCTION(1, "RFIC0_BSI_D2"),
+		MTK_FUNCTION(2, "SPM_BSI_D2"),
+		MTK_FUNCTION(3, "PWM_B")
+	),
+	MTK_PIN(
+		59, "GPIO59",
+		MTK_EINT_FUNCTION(0, 59),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO59"),
+		MTK_FUNCTION(1, "RFIC0_BSI_D1"),
+		MTK_FUNCTION(2, "SPM_BSI_D1")
+	),
+	MTK_PIN(
+		60, "GPIO60",
+		MTK_EINT_FUNCTION(0, 60),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO60"),
+		MTK_FUNCTION(1, "RFIC0_BSI_D0"),
+		MTK_FUNCTION(2, "SPM_BSI_D0")
+	),
+	MTK_PIN(
+		61, "GPIO61",
+		MTK_EINT_FUNCTION(0, 61),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO61"),
+		MTK_FUNCTION(1, "MIPI1_SDATA")
+	),
+	MTK_PIN(
+		62, "GPIO62",
+		MTK_EINT_FUNCTION(0, 62),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO62"),
+		MTK_FUNCTION(1, "MIPI1_SCLK")
+	),
+	MTK_PIN(
+		63, "GPIO63",
+		MTK_EINT_FUNCTION(0, 63),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO63"),
+		MTK_FUNCTION(1, "MIPI0_SDATA")
+	),
+	MTK_PIN(
+		64, "GPIO64",
+		MTK_EINT_FUNCTION(0, 64),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO64"),
+		MTK_FUNCTION(1, "MIPI0_SCLK")
+	),
+	MTK_PIN(
+		65, "GPIO65",
+		MTK_EINT_FUNCTION(0, 65),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO65"),
+		MTK_FUNCTION(1, "MIPI3_SDATA"),
+		MTK_FUNCTION(2, "BPI_OLAT2")
+	),
+	MTK_PIN(
+		66, "GPIO66",
+		MTK_EINT_FUNCTION(0, 66),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO66"),
+		MTK_FUNCTION(1, "MIPI3_SCLK"),
+		MTK_FUNCTION(2, "BPI_OLAT3")
+	),
+	MTK_PIN(
+		67, "GPIO67",
+		MTK_EINT_FUNCTION(0, 67),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO67"),
+		MTK_FUNCTION(1, "MIPI2_SDATA")
+	),
+	MTK_PIN(
+		68, "GPIO68",
+		MTK_EINT_FUNCTION(0, 68),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO68"),
+		MTK_FUNCTION(1, "MIPI2_SCLK")
+	),
+	MTK_PIN(
+		69, "GPIO69",
+		MTK_EINT_FUNCTION(0, 69),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO69"),
+		MTK_FUNCTION(1, "BPI_BUS7")
+	),
+	MTK_PIN(
+		70, "GPIO70",
+		MTK_EINT_FUNCTION(0, 70),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO70"),
+		MTK_FUNCTION(1, "BPI_BUS6")
+	),
+	MTK_PIN(
+		71, "GPIO71",
+		MTK_EINT_FUNCTION(0, 71),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO71"),
+		MTK_FUNCTION(1, "BPI_BUS5")
+	),
+	MTK_PIN(
+		72, "GPIO72",
+		MTK_EINT_FUNCTION(0, 72),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO72"),
+		MTK_FUNCTION(1, "BPI_BUS4")
+	),
+	MTK_PIN(
+		73, "GPIO73",
+		MTK_EINT_FUNCTION(0, 73),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO73"),
+		MTK_FUNCTION(1, "BPI_BUS3")
+	),
+	MTK_PIN(
+		74, "GPIO74",
+		MTK_EINT_FUNCTION(0, 74),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO74"),
+		MTK_FUNCTION(1, "BPI_BUS2")
+	),
+	MTK_PIN(
+		75, "GPIO75",
+		MTK_EINT_FUNCTION(0, 75),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO75"),
+		MTK_FUNCTION(1, "BPI_BUS1")
+	),
+	MTK_PIN(
+		76, "GPIO76",
+		MTK_EINT_FUNCTION(0, 76),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO76"),
+		MTK_FUNCTION(1, "BPI_BUS0")
+	),
+	MTK_PIN(
+		77, "GPIO77",
+		MTK_EINT_FUNCTION(0, 77),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO77"),
+		MTK_FUNCTION(1, "BPI_ANT1")
+	),
+	MTK_PIN(
+		78, "GPIO78",
+		MTK_EINT_FUNCTION(0, 78),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO78"),
+		MTK_FUNCTION(1, "BPI_OLAT0")
+	),
+	MTK_PIN(
+		79, "GPIO79",
+		MTK_EINT_FUNCTION(0, 79),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO79"),
+		MTK_FUNCTION(1, "BPI_PA_VM1"),
+		MTK_FUNCTION(2, "MIPI4_SDATA")
+	),
+	MTK_PIN(
+		80, "GPIO80",
+		MTK_EINT_FUNCTION(0, 80),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO80"),
+		MTK_FUNCTION(1, "BPI_PA_VM0"),
+		MTK_FUNCTION(2, "MIPI4_SCLK")
+	),
+	MTK_PIN(
+		81, "GPIO81",
+		MTK_EINT_FUNCTION(0, 81),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO81"),
+		MTK_FUNCTION(1, "SDA1")
+	),
+	MTK_PIN(
+		82, "GPIO82",
+		MTK_EINT_FUNCTION(0, 82),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO82"),
+		MTK_FUNCTION(1, "SDA0")
+	),
+	MTK_PIN(
+		83, "GPIO83",
+		MTK_EINT_FUNCTION(0, 83),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO83"),
+		MTK_FUNCTION(1, "SCL0")
+	),
+	MTK_PIN(
+		84, "GPIO84",
+		MTK_EINT_FUNCTION(0, 84),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO84"),
+		MTK_FUNCTION(1, "SCL1")
+	),
+	MTK_PIN(
+		85, "GPIO85",
+		MTK_EINT_FUNCTION(0, 85),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO85"),
+		MTK_FUNCTION(1, "SPI0_MI"),
+		MTK_FUNCTION(2, "SCP_SPI0_MI"),
+		MTK_FUNCTION(3, "CLKM3"),
+		MTK_FUNCTION(4, "I2S1_BCK"),
+		MTK_FUNCTION(5, "MFG_DFD_JTAG_TDO"),
+		MTK_FUNCTION(6, "DFD_TDO"),
+		MTK_FUNCTION(7, "JTDO_SEL1")
+	),
+	MTK_PIN(
+		86, "GPIO86",
+		MTK_EINT_FUNCTION(0, 86),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO86"),
+		MTK_FUNCTION(1, "SPI0_CSB"),
+		MTK_FUNCTION(2, "SCP_SPI0_CS"),
+		MTK_FUNCTION(3, "CLKM0"),
+		MTK_FUNCTION(4, "I2S1_LRCK"),
+		MTK_FUNCTION(5, "MFG_DFD_JTAG_TMS"),
+		MTK_FUNCTION(6, "DFD_TMS"),
+		MTK_FUNCTION(7, "JTMS_SEL1")
+	),
+	MTK_PIN(
+		87, "GPIO87",
+		MTK_EINT_FUNCTION(0, 87),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO87"),
+		MTK_FUNCTION(1, "SPI0_MO"),
+		MTK_FUNCTION(2, "SCP_SPI0_MO"),
+		MTK_FUNCTION(3, "SDA1"),
+		MTK_FUNCTION(4, "I2S1_DO"),
+		MTK_FUNCTION(5, "MFG_DFD_JTAG_TDI"),
+		MTK_FUNCTION(6, "DFD_TDI"),
+		MTK_FUNCTION(7, "JTDI_SEL1")
+	),
+	MTK_PIN(
+		88, "GPIO88",
+		MTK_EINT_FUNCTION(0, 88),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO88"),
+		MTK_FUNCTION(1, "SPI0_CLK"),
+		MTK_FUNCTION(2, "SCP_SPI0_CK"),
+		MTK_FUNCTION(3, "SCL1"),
+		MTK_FUNCTION(4, "I2S1_MCK"),
+		MTK_FUNCTION(5, "MFG_DFD_JTAG_TCK"),
+		MTK_FUNCTION(6, "DFD_TCK_XI"),
+		MTK_FUNCTION(7, "JTCK_SEL1")
+	),
+	MTK_PIN(
+		89, "GPIO89",
+		MTK_EINT_FUNCTION(0, 89),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO89"),
+		MTK_FUNCTION(1, "SRCLKENAI0"),
+		MTK_FUNCTION(2, "PWM_C"),
+		MTK_FUNCTION(3, "I2S5_BCK"),
+		MTK_FUNCTION(4, "ANT_SEL6"),
+		MTK_FUNCTION(5, "SDA8"),
+		MTK_FUNCTION(6, "CMVREF0"),
+		MTK_FUNCTION(7, "DBG_MON_A21")
+	),
+	MTK_PIN(
+		90, "GPIO90",
+		MTK_EINT_FUNCTION(0, 90),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO90"),
+		MTK_FUNCTION(1, "PWM_A"),
+		MTK_FUNCTION(2, "CMMCLK2"),
+		MTK_FUNCTION(3, "I2S5_LRCK"),
+		MTK_FUNCTION(4, "SCP_VREQ_VAO"),
+		MTK_FUNCTION(5, "SCL8"),
+		MTK_FUNCTION(6, "PTA_RXD"),
+		MTK_FUNCTION(7, "DBG_MON_A22")
+	),
+	MTK_PIN(
+		91, "GPIO91",
+		MTK_EINT_FUNCTION(0, 91),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO91"),
+		MTK_FUNCTION(1, "KPROW1"),
+		MTK_FUNCTION(2, "PWM_B"),
+		MTK_FUNCTION(3, "I2S5_DO"),
+		MTK_FUNCTION(4, "ANT_SEL7"),
+		MTK_FUNCTION(5, "CMMCLK3"),
+		MTK_FUNCTION(6, "PTA_TXD")
+	),
+	MTK_PIN(
+		92, "GPIO92",
+		MTK_EINT_FUNCTION(0, 92),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO92"),
+		MTK_FUNCTION(1, "KPROW0")
+	),
+	MTK_PIN(
+		93, "GPIO93",
+		MTK_EINT_FUNCTION(0, 93),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO93"),
+		MTK_FUNCTION(1, "KPCOL0"),
+		MTK_FUNCTION(7, "DBG_MON_B27")
+	),
+	MTK_PIN(
+		94, "GPIO94",
+		MTK_EINT_FUNCTION(0, 94),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO94"),
+		MTK_FUNCTION(1, "KPCOL1"),
+		MTK_FUNCTION(2, "I2S2_DI2"),
+		MTK_FUNCTION(3, "I2S5_MCK"),
+		MTK_FUNCTION(4, "CMMCLK2"),
+		MTK_FUNCTION(5, "SCP_SPI2_MI"),
+		MTK_FUNCTION(6, "SRCLKENAI1"),
+		MTK_FUNCTION(7, "SPI2_MI")
+	),
+	MTK_PIN(
+		95, "GPIO95",
+		MTK_EINT_FUNCTION(0, 95),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO95"),
+		MTK_FUNCTION(1, "URXD0"),
+		MTK_FUNCTION(2, "UTXD0"),
+		MTK_FUNCTION(3, "MD_URXD0"),
+		MTK_FUNCTION(4, "MD_URXD1"),
+		MTK_FUNCTION(5, "SSPM_URXD_AO"),
+		MTK_FUNCTION(6, "CCU_URXD_AO")
+	),
+	MTK_PIN(
+		96, "GPIO96",
+		MTK_EINT_FUNCTION(0, 96),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO96"),
+		MTK_FUNCTION(1, "UTXD0"),
+		MTK_FUNCTION(2, "URXD0"),
+		MTK_FUNCTION(3, "MD_UTXD0"),
+		MTK_FUNCTION(4, "MD_UTXD1"),
+		MTK_FUNCTION(5, "SSPM_UTXD_AO"),
+		MTK_FUNCTION(6, "CCU_UTXD_AO"),
+		MTK_FUNCTION(7, "DBG_MON_B2")
+	),
+	MTK_PIN(
+		97, "GPIO97",
+		MTK_EINT_FUNCTION(0, 97),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO97"),
+		MTK_FUNCTION(1, "UCTS0"),
+		MTK_FUNCTION(2, "I2S2_MCK"),
+		MTK_FUNCTION(3, "IDDIG"),
+		MTK_FUNCTION(4, "CONN_MCU_TDO"),
+		MTK_FUNCTION(5, "SSPM_JTAG_TDO"),
+		MTK_FUNCTION(6, "IO_JTAG_TDO"),
+		MTK_FUNCTION(7, "DBG_MON_B3")
+	),
+	MTK_PIN(
+		98, "GPIO98",
+		MTK_EINT_FUNCTION(0, 98),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO98"),
+		MTK_FUNCTION(1, "URTS0"),
+		MTK_FUNCTION(2, "I2S2_BCK"),
+		MTK_FUNCTION(3, "USB_DRVVBUS"),
+		MTK_FUNCTION(4, "CONN_MCU_TMS"),
+		MTK_FUNCTION(5, "SSPM_JTAG_TMS"),
+		MTK_FUNCTION(6, "IO_JTAG_TMS"),
+		MTK_FUNCTION(7, "DBG_MON_B4")
+	),
+	MTK_PIN(
+		99, "GPIO99",
+		MTK_EINT_FUNCTION(0, 99),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO99"),
+		MTK_FUNCTION(1, "CMMCLK0"),
+		MTK_FUNCTION(4, "CONN_MCU_AICE_TMSC"),
+		MTK_FUNCTION(7, "DBG_MON_B28")
+	),
+	MTK_PIN(
+		100, "GPIO100",
+		MTK_EINT_FUNCTION(0, 100),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO100"),
+		MTK_FUNCTION(1, "CMMCLK1"),
+		MTK_FUNCTION(2, "PWM_C"),
+		MTK_FUNCTION(3, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+		MTK_FUNCTION(4, "CONN_MCU_AICE_TCKC"),
+		MTK_FUNCTION(7, "DBG_MON_B29")
+	),
+	MTK_PIN(
+		101, "GPIO101",
+		MTK_EINT_FUNCTION(0, 101),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO101"),
+		MTK_FUNCTION(1, "CLKM2"),
+		MTK_FUNCTION(2, "I2S2_LRCK"),
+		MTK_FUNCTION(3, "CMVREF1"),
+		MTK_FUNCTION(4, "CONN_MCU_TCK"),
+		MTK_FUNCTION(5, "SSPM_JTAG_TCK"),
+		MTK_FUNCTION(6, "IO_JTAG_TCK")
+	),
+	MTK_PIN(
+		102, "GPIO102",
+		MTK_EINT_FUNCTION(0, 102),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO102"),
+		MTK_FUNCTION(1, "CLKM1"),
+		MTK_FUNCTION(2, "I2S2_DI"),
+		MTK_FUNCTION(3, "DVFSRC_EXT_REQ"),
+		MTK_FUNCTION(4, "CONN_MCU_TDI"),
+		MTK_FUNCTION(5, "SSPM_JTAG_TDI"),
+		MTK_FUNCTION(6, "IO_JTAG_TDI"),
+		MTK_FUNCTION(7, "DBG_MON_B8")
+	),
+	MTK_PIN(
+		103, "GPIO103",
+		MTK_EINT_FUNCTION(0, 103),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO103"),
+		MTK_FUNCTION(1, "SCL2")
+	),
+	MTK_PIN(
+		104, "GPIO104",
+		MTK_EINT_FUNCTION(0, 104),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO104"),
+		MTK_FUNCTION(1, "SDA2")
+	),
+	MTK_PIN(
+		105, "GPIO105",
+		MTK_EINT_FUNCTION(0, 105),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO105"),
+		MTK_FUNCTION(1, "SCL4")
+	),
+	MTK_PIN(
+		106, "GPIO106",
+		MTK_EINT_FUNCTION(0, 106),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO106"),
+		MTK_FUNCTION(1, "SDA4")
+	),
+	MTK_PIN(
+		107, "GPIO107",
+		MTK_EINT_FUNCTION(0, 107),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO107"),
+		MTK_FUNCTION(1, "DMIC_CLK"),
+		MTK_FUNCTION(2, "ANT_SEL0"),
+		MTK_FUNCTION(3, "CLKM0"),
+		MTK_FUNCTION(4, "SDA7"),
+		MTK_FUNCTION(5, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(6, "PWM_A"),
+		MTK_FUNCTION(7, "DBG_MON_B12")
+	),
+	MTK_PIN(
+		108, "GPIO108",
+		MTK_EINT_FUNCTION(0, 108),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO108"),
+		MTK_FUNCTION(1, "CMMCLK2"),
+		MTK_FUNCTION(2, "ANT_SEL1"),
+		MTK_FUNCTION(3, "CLKM1"),
+		MTK_FUNCTION(4, "SCL8"),
+		MTK_FUNCTION(5, "DAP_MD32_SWD"),
+		MTK_FUNCTION(6, "PWM_B"),
+		MTK_FUNCTION(7, "DBG_MON_B13")
+	),
+	MTK_PIN(
+		109, "GPIO109",
+		MTK_EINT_FUNCTION(0, 109),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO109"),
+		MTK_FUNCTION(1, "DMIC_DAT"),
+		MTK_FUNCTION(2, "ANT_SEL2"),
+		MTK_FUNCTION(3, "CLKM2"),
+		MTK_FUNCTION(4, "SDA8"),
+		MTK_FUNCTION(5, "DAP_MD32_SWCK"),
+		MTK_FUNCTION(6, "PWM_C"),
+		MTK_FUNCTION(7, "DBG_MON_B14")
+	),
+	MTK_PIN(
+		110, "GPIO110",
+		MTK_EINT_FUNCTION(0, 110),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO110"),
+		MTK_FUNCTION(1, "SCL7"),
+		MTK_FUNCTION(2, "ANT_SEL0"),
+		MTK_FUNCTION(3, "TP_URXD1_AO"),
+		MTK_FUNCTION(4, "USB_DRVVBUS"),
+		MTK_FUNCTION(5, "SRCLKENAI1"),
+		MTK_FUNCTION(6, "KPCOL2"),
+		MTK_FUNCTION(7, "URXD1")
+	),
+	MTK_PIN(
+		111, "GPIO111",
+		MTK_EINT_FUNCTION(0, 111),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO111"),
+		MTK_FUNCTION(1, "CMMCLK3"),
+		MTK_FUNCTION(2, "ANT_SEL1"),
+		MTK_FUNCTION(3, "SRCLKENAI0"),
+		MTK_FUNCTION(4, "SCP_VREQ_VAO"),
+		MTK_FUNCTION(5, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+		MTK_FUNCTION(7, "DVFSRC_EXT_REQ")
+	),
+	MTK_PIN(
+		112, "GPIO112",
+		MTK_EINT_FUNCTION(0, 112),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO112"),
+		MTK_FUNCTION(1, "SDA7"),
+		MTK_FUNCTION(2, "ANT_SEL2"),
+		MTK_FUNCTION(3, "TP_UTXD1_AO"),
+		MTK_FUNCTION(4, "IDDIG"),
+		MTK_FUNCTION(5, "AGPS_SYNC"),
+		MTK_FUNCTION(6, "KPROW2"),
+		MTK_FUNCTION(7, "UTXD1")
+	),
+	MTK_PIN(
+		113, "GPIO113",
+		MTK_EINT_FUNCTION(0, 113),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO113"),
+		MTK_FUNCTION(1, "CONN_TOP_CLK"),
+		MTK_FUNCTION(3, "SCL6"),
+		MTK_FUNCTION(4, "AUXIF_CLK0"),
+		MTK_FUNCTION(6, "TP_UCTS1_AO")
+	),
+	MTK_PIN(
+		114, "GPIO114",
+		MTK_EINT_FUNCTION(0, 114),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO114"),
+		MTK_FUNCTION(1, "CONN_TOP_DATA"),
+		MTK_FUNCTION(3, "SDA6"),
+		MTK_FUNCTION(4, "AUXIF_ST0"),
+		MTK_FUNCTION(6, "TP_URTS1_AO")
+	),
+	MTK_PIN(
+		115, "GPIO115",
+		MTK_EINT_FUNCTION(0, 115),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO115"),
+		MTK_FUNCTION(1, "CONN_BT_CLK"),
+		MTK_FUNCTION(2, "UTXD1"),
+		MTK_FUNCTION(3, "PTA_TXD"),
+		MTK_FUNCTION(4, "AUXIF_CLK1"),
+		MTK_FUNCTION(5, "DAP_MD32_SWD"),
+		MTK_FUNCTION(6, "TP_UTXD1_AO")
+	),
+	MTK_PIN(
+		116, "GPIO116",
+		MTK_EINT_FUNCTION(0, 116),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO116"),
+		MTK_FUNCTION(1, "CONN_BT_DATA"),
+		MTK_FUNCTION(2, "IPU_JTAG_TRST"),
+		MTK_FUNCTION(4, "AUXIF_ST1"),
+		MTK_FUNCTION(5, "DAP_MD32_SWCK"),
+		MTK_FUNCTION(6, "TP_URXD2_AO"),
+		MTK_FUNCTION(7, "DBG_MON_A0")
+	),
+	MTK_PIN(
+		117, "GPIO117",
+		MTK_EINT_FUNCTION(0, 117),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO117"),
+		MTK_FUNCTION(1, "CONN_WF_HB0"),
+		MTK_FUNCTION(2, "IPU_JTAG_TDO"),
+		MTK_FUNCTION(6, "TP_UTXD2_AO"),
+		MTK_FUNCTION(7, "DBG_MON_A4")
+	),
+	MTK_PIN(
+		118, "GPIO118",
+		MTK_EINT_FUNCTION(0, 118),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO118"),
+		MTK_FUNCTION(1, "CONN_WF_HB1"),
+		MTK_FUNCTION(2, "IPU_JTAG_TDI"),
+		MTK_FUNCTION(5, "SSPM_URXD_AO"),
+		MTK_FUNCTION(6, "TP_UCTS2_AO"),
+		MTK_FUNCTION(7, "DBG_MON_A5")
+	),
+	MTK_PIN(
+		119, "GPIO119",
+		MTK_EINT_FUNCTION(0, 119),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO119"),
+		MTK_FUNCTION(1, "CONN_WF_HB2"),
+		MTK_FUNCTION(2, "IPU_JTAG_TCK"),
+		MTK_FUNCTION(5, "SSPM_UTXD_AO"),
+		MTK_FUNCTION(6, "TP_URTS2_AO")
+	),
+	MTK_PIN(
+		120, "GPIO120",
+		MTK_EINT_FUNCTION(0, 120),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO120"),
+		MTK_FUNCTION(1, "CONN_WB_PTA"),
+		MTK_FUNCTION(2, "IPU_JTAG_TMS"),
+		MTK_FUNCTION(5, "CCU_URXD_AO")
+	),
+	MTK_PIN(
+		121, "GPIO121",
+		MTK_EINT_FUNCTION(0, 121),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO121"),
+		MTK_FUNCTION(1, "CONN_HRST_B"),
+		MTK_FUNCTION(2, "URXD1"),
+		MTK_FUNCTION(3, "PTA_RXD"),
+		MTK_FUNCTION(5, "CCU_UTXD_AO"),
+		MTK_FUNCTION(6, "TP_URXD1_AO")
+	),
+	MTK_PIN(
+		122, "GPIO122",
+		MTK_EINT_FUNCTION(0, 122),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO122"),
+		MTK_FUNCTION(1, "MSDC0_CMD"),
+		MTK_FUNCTION(2, "SSPM_URXD2_AO"),
+		MTK_FUNCTION(3, "ANT_SEL1"),
+		MTK_FUNCTION(7, "DBG_MON_A12")
+	),
+	MTK_PIN(
+		123, "GPIO123",
+		MTK_EINT_FUNCTION(0, 123),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO123"),
+		MTK_FUNCTION(1, "MSDC0_DAT0"),
+		MTK_FUNCTION(3, "ANT_SEL0"),
+		MTK_FUNCTION(7, "DBG_MON_A13")
+	),
+	MTK_PIN(
+		124, "GPIO124",
+		MTK_EINT_FUNCTION(0, 124),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO124"),
+		MTK_FUNCTION(1, "MSDC0_CLK"),
+		MTK_FUNCTION(7, "DBG_MON_A14")
+	),
+	MTK_PIN(
+		125, "GPIO125",
+		MTK_EINT_FUNCTION(0, 125),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO125"),
+		MTK_FUNCTION(1, "MSDC0_DAT2"),
+		MTK_FUNCTION(3, "MRG_CLK"),
+		MTK_FUNCTION(7, "DBG_MON_A15")
+	),
+	MTK_PIN(
+		126, "GPIO126",
+		MTK_EINT_FUNCTION(0, 126),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO126"),
+		MTK_FUNCTION(1, "MSDC0_DAT4"),
+		MTK_FUNCTION(3, "ANT_SEL5"),
+		MTK_FUNCTION(6, "UFS_MPHY_SCL"),
+		MTK_FUNCTION(7, "DBG_MON_A16")
+	),
+	MTK_PIN(
+		127, "GPIO127",
+		MTK_EINT_FUNCTION(0, 127),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO127"),
+		MTK_FUNCTION(1, "MSDC0_DAT6"),
+		MTK_FUNCTION(3, "ANT_SEL4"),
+		MTK_FUNCTION(6, "UFS_MPHY_SDA"),
+		MTK_FUNCTION(7, "DBG_MON_A17")
+	),
+	MTK_PIN(
+		128, "GPIO128",
+		MTK_EINT_FUNCTION(0, 128),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO128"),
+		MTK_FUNCTION(1, "MSDC0_DAT1"),
+		MTK_FUNCTION(3, "ANT_SEL2"),
+		MTK_FUNCTION(6, "UFS_UNIPRO_SDA"),
+		MTK_FUNCTION(7, "DBG_MON_A18")
+	),
+	MTK_PIN(
+		129, "GPIO129",
+		MTK_EINT_FUNCTION(0, 129),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO129"),
+		MTK_FUNCTION(1, "MSDC0_DAT5"),
+		MTK_FUNCTION(3, "ANT_SEL3"),
+		MTK_FUNCTION(6, "UFS_UNIPRO_SCL"),
+		MTK_FUNCTION(7, "DBG_MON_A23")
+	),
+	MTK_PIN(
+		130, "GPIO130",
+		MTK_EINT_FUNCTION(0, 130),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO130"),
+		MTK_FUNCTION(1, "MSDC0_DAT7"),
+		MTK_FUNCTION(3, "MRG_DO"),
+		MTK_FUNCTION(7, "DBG_MON_A24")
+	),
+	MTK_PIN(
+		131, "GPIO131",
+		MTK_EINT_FUNCTION(0, 131),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO131"),
+		MTK_FUNCTION(1, "MSDC0_DSL"),
+		MTK_FUNCTION(3, "MRG_SYNC"),
+		MTK_FUNCTION(7, "DBG_MON_A25")
+	),
+	MTK_PIN(
+		132, "GPIO132",
+		MTK_EINT_FUNCTION(0, 132),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO132"),
+		MTK_FUNCTION(1, "MSDC0_DAT3"),
+		MTK_FUNCTION(3, "MRG_DI"),
+		MTK_FUNCTION(7, "DBG_MON_A26")
+	),
+	MTK_PIN(
+		133, "GPIO133",
+		MTK_EINT_FUNCTION(0, 133),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO133"),
+		MTK_FUNCTION(1, "MSDC0_RSTB"),
+		MTK_FUNCTION(3, "AGPS_SYNC"),
+		MTK_FUNCTION(7, "DBG_MON_A27")
+	),
+	MTK_PIN(
+		134, "GPIO134",
+		MTK_EINT_FUNCTION(0, 134),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO134"),
+		MTK_FUNCTION(1, "RTC32K_CK")
+	),
+	MTK_PIN(
+		135, "GPIO135",
+		MTK_EINT_FUNCTION(0, 135),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO135"),
+		MTK_FUNCTION(1, "WATCHDOG")
+	),
+	MTK_PIN(
+		136, "GPIO136",
+		MTK_EINT_FUNCTION(0, 136),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO136"),
+		MTK_FUNCTION(1, "AUD_CLK_MOSI"),
+		MTK_FUNCTION(2, "AUD_CLK_MISO"),
+		MTK_FUNCTION(3, "I2S1_MCK"),
+		MTK_FUNCTION(6, "UFS_UNIPRO_SCL")
+	),
+	MTK_PIN(
+		137, "GPIO137",
+		MTK_EINT_FUNCTION(0, 137),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO137"),
+		MTK_FUNCTION(1, "AUD_SYNC_MOSI"),
+		MTK_FUNCTION(2, "AUD_SYNC_MISO"),
+		MTK_FUNCTION(3, "I2S1_BCK")
+	),
+	MTK_PIN(
+		138, "GPIO138",
+		MTK_EINT_FUNCTION(0, 138),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO138"),
+		MTK_FUNCTION(1, "AUD_DAT_MOSI0"),
+		MTK_FUNCTION(2, "AUD_DAT_MISO0"),
+		MTK_FUNCTION(3, "I2S1_LRCK"),
+		MTK_FUNCTION(7, "DBG_MON_B24")
+	),
+	MTK_PIN(
+		139, "GPIO139",
+		MTK_EINT_FUNCTION(0, 139),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO139"),
+		MTK_FUNCTION(1, "AUD_DAT_MOSI1"),
+		MTK_FUNCTION(2, "AUD_DAT_MISO1"),
+		MTK_FUNCTION(3, "I2S1_DO"),
+		MTK_FUNCTION(6, "UFS_MPHY_SDA")
+	),
+	MTK_PIN(
+		140, "GPIO140",
+		MTK_EINT_FUNCTION(0, 140),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO140"),
+		MTK_FUNCTION(1, "AUD_CLK_MISO"),
+		MTK_FUNCTION(2, "AUD_CLK_MOSI"),
+		MTK_FUNCTION(3, "I2S0_MCK"),
+		MTK_FUNCTION(6, "UFS_UNIPRO_SDA")
+	),
+	MTK_PIN(
+		141, "GPIO141",
+		MTK_EINT_FUNCTION(0, 141),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO141"),
+		MTK_FUNCTION(1, "AUD_SYNC_MISO"),
+		MTK_FUNCTION(2, "AUD_SYNC_MOSI"),
+		MTK_FUNCTION(3, "I2S0_BCK")
+	),
+	MTK_PIN(
+		142, "GPIO142",
+		MTK_EINT_FUNCTION(0, 142),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO142"),
+		MTK_FUNCTION(1, "AUD_DAT_MISO0"),
+		MTK_FUNCTION(2, "AUD_DAT_MOSI0"),
+		MTK_FUNCTION(3, "I2S0_LRCK"),
+		MTK_FUNCTION(4, "VOW_DAT_MISO"),
+		MTK_FUNCTION(7, "DBG_MON_B25")
+	),
+	MTK_PIN(
+		143, "GPIO143",
+		MTK_EINT_FUNCTION(0, 143),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO143"),
+		MTK_FUNCTION(1, "AUD_DAT_MISO1"),
+		MTK_FUNCTION(2, "AUD_DAT_MOSI1"),
+		MTK_FUNCTION(3, "I2S0_DI"),
+		MTK_FUNCTION(4, "VOW_CLK_MISO"),
+		MTK_FUNCTION(6, "UFS_MPHY_SCL"),
+		MTK_FUNCTION(7, "DBG_MON_B26")
+	),
+	MTK_PIN(
+		144, "GPIO144",
+		MTK_EINT_FUNCTION(0, 144),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO144"),
+		MTK_FUNCTION(1, "PWRAP_SPI0_MI"),
+		MTK_FUNCTION(2, "PWRAP_SPI0_MO")
+	),
+	MTK_PIN(
+		145, "GPIO145",
+		MTK_EINT_FUNCTION(0, 145),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO145"),
+		MTK_FUNCTION(1, "PWRAP_SPI0_CSN")
+	),
+	MTK_PIN(
+		146, "GPIO146",
+		MTK_EINT_FUNCTION(0, 146),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO146"),
+		MTK_FUNCTION(1, "PWRAP_SPI0_MO"),
+		MTK_FUNCTION(2, "PWRAP_SPI0_MI")
+	),
+	MTK_PIN(
+		147, "GPIO147",
+		MTK_EINT_FUNCTION(0, 147),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO147"),
+		MTK_FUNCTION(1, "PWRAP_SPI0_CK")
+	),
+	MTK_PIN(
+		148, "GPIO148",
+		MTK_EINT_FUNCTION(0, 148),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO148"),
+		MTK_FUNCTION(1, "SRCLKENA0")
+	),
+	MTK_PIN(
+		149, "GPIO149",
+		MTK_EINT_FUNCTION(0, 149),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO149"),
+		MTK_FUNCTION(1, "SRCLKENA1")
+	),
+	MTK_PIN(
+		150, "GPIO150",
+		MTK_EINT_FUNCTION(0, 150),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO150"),
+		MTK_FUNCTION(1, "PWM_A"),
+		MTK_FUNCTION(2, "CMFLASH"),
+		MTK_FUNCTION(3, "CLKM0"),
+		MTK_FUNCTION(7, "DBG_MON_B30")
+	),
+	MTK_PIN(
+		151, "GPIO151",
+		MTK_EINT_FUNCTION(0, 151),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO151"),
+		MTK_FUNCTION(1, "PWM_B"),
+		MTK_FUNCTION(2, "CMVREF0"),
+		MTK_FUNCTION(3, "CLKM1"),
+		MTK_FUNCTION(7, "DBG_MON_B20")
+	),
+	MTK_PIN(
+		152, "GPIO152",
+		MTK_EINT_FUNCTION(0, 152),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO152"),
+		MTK_FUNCTION(1, "PWM_C"),
+		MTK_FUNCTION(2, "CMFLASH"),
+		MTK_FUNCTION(3, "CLKM2"),
+		MTK_FUNCTION(7, "DBG_MON_B21")
+	),
+	MTK_PIN(
+		153, "GPIO153",
+		MTK_EINT_FUNCTION(0, 153),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO153"),
+		MTK_FUNCTION(1, "PWM_A"),
+		MTK_FUNCTION(2, "CMVREF0"),
+		MTK_FUNCTION(3, "CLKM3"),
+		MTK_FUNCTION(7, "DBG_MON_B22")
+	),
+	MTK_PIN(
+		154, "GPIO154",
+		MTK_EINT_FUNCTION(0, 154),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO154"),
+		MTK_FUNCTION(1, "SCP_VREQ_VAO"),
+		MTK_FUNCTION(2, "DVFSRC_EXT_REQ"),
+		MTK_FUNCTION(7, "DBG_MON_B18")
+	),
+	MTK_PIN(
+		155, "GPIO155",
+		MTK_EINT_FUNCTION(0, 155),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO155"),
+		MTK_FUNCTION(1, "ANT_SEL0"),
+		MTK_FUNCTION(2, "DVFSRC_EXT_REQ"),
+		MTK_FUNCTION(3, "CMVREF1"),
+		MTK_FUNCTION(7, "SCP_JTAG_TDI")
+	),
+	MTK_PIN(
+		156, "GPIO156",
+		MTK_EINT_FUNCTION(0, 156),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO156"),
+		MTK_FUNCTION(1, "ANT_SEL1"),
+		MTK_FUNCTION(2, "SRCLKENAI0"),
+		MTK_FUNCTION(3, "SCL6"),
+		MTK_FUNCTION(4, "KPCOL2"),
+		MTK_FUNCTION(5, "IDDIG"),
+		MTK_FUNCTION(7, "SCP_JTAG_TCK")
+	),
+	MTK_PIN(
+		157, "GPIO157",
+		MTK_EINT_FUNCTION(0, 157),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO157"),
+		MTK_FUNCTION(1, "ANT_SEL2"),
+		MTK_FUNCTION(2, "SRCLKENAI1"),
+		MTK_FUNCTION(3, "SDA6"),
+		MTK_FUNCTION(4, "KPROW2"),
+		MTK_FUNCTION(5, "USB_DRVVBUS"),
+		MTK_FUNCTION(7, "SCP_JTAG_TRSTN")
+	),
+	MTK_PIN(
+		158, "GPIO158",
+		MTK_EINT_FUNCTION(0, 158),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO158"),
+		MTK_FUNCTION(1, "ANT_SEL3")
+	),
+	MTK_PIN(
+		159, "GPIO159",
+		MTK_EINT_FUNCTION(0, 159),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO159"),
+		MTK_FUNCTION(1, "ANT_SEL4")
+	),
+	MTK_PIN(
+		160, "GPIO160",
+		MTK_EINT_FUNCTION(0, 160),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO160"),
+		MTK_FUNCTION(1, "ANT_SEL5")
+	),
+	MTK_PIN(
+		161, "GPIO161",
+		MTK_EINT_FUNCTION(0, 161),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO161"),
+		MTK_FUNCTION(1, "SPI1_A_MI"),
+		MTK_FUNCTION(2, "SCP_SPI1_MI"),
+		MTK_FUNCTION(3, "IDDIG"),
+		MTK_FUNCTION(4, "ANT_SEL6"),
+		MTK_FUNCTION(5, "KPCOL2"),
+		MTK_FUNCTION(6, "PTA_RXD"),
+		MTK_FUNCTION(7, "DBG_MON_B19")
+	),
+	MTK_PIN(
+		162, "GPIO162",
+		MTK_EINT_FUNCTION(0, 162),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO162"),
+		MTK_FUNCTION(1, "SPI1_A_CSB"),
+		MTK_FUNCTION(2, "SCP_SPI1_CS"),
+		MTK_FUNCTION(3, "USB_DRVVBUS"),
+		MTK_FUNCTION(4, "ANT_SEL5"),
+		MTK_FUNCTION(5, "KPROW2"),
+		MTK_FUNCTION(6, "PTA_TXD")
+	),
+	MTK_PIN(
+		163, "GPIO163",
+		MTK_EINT_FUNCTION(0, 163),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO163"),
+		MTK_FUNCTION(1, "SPI1_A_MO"),
+		MTK_FUNCTION(2, "SCP_SPI1_MO"),
+		MTK_FUNCTION(3, "SDA1"),
+		MTK_FUNCTION(4, "ANT_SEL4"),
+		MTK_FUNCTION(5, "CMMCLK2"),
+		MTK_FUNCTION(6, "DMIC_CLK")
+	),
+	MTK_PIN(
+		164, "GPIO164",
+		MTK_EINT_FUNCTION(0, 164),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO164"),
+		MTK_FUNCTION(1, "SPI1_A_CLK"),
+		MTK_FUNCTION(2, "SCP_SPI1_CK"),
+		MTK_FUNCTION(3, "SCL1"),
+		MTK_FUNCTION(4, "ANT_SEL3"),
+		MTK_FUNCTION(5, "CMMCLK3"),
+		MTK_FUNCTION(6, "DMIC_DAT")
+	),
+	MTK_PIN(
+		165, "GPIO165",
+		MTK_EINT_FUNCTION(0, 165),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO165"),
+		MTK_FUNCTION(1, "PWM_B"),
+		MTK_FUNCTION(2, "CMMCLK2"),
+		MTK_FUNCTION(3, "SCP_VREQ_VAO"),
+		MTK_FUNCTION(6, "TDM_MCK_2ND"),
+		MTK_FUNCTION(7, "SCP_JTAG_TDO")
+	),
+	MTK_PIN(
+		166, "GPIO166",
+		MTK_EINT_FUNCTION(0, 166),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO166"),
+		MTK_FUNCTION(1, "ANT_SEL6")
+	),
+	MTK_PIN(
+		167, "GPIO167",
+		MTK_EINT_FUNCTION(0, 167),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO167"),
+		MTK_FUNCTION(1, "RFIC0_BSI_EN"),
+		MTK_FUNCTION(2, "SPM_BSI_EN")
+	),
+	MTK_PIN(
+		168, "GPIO168",
+		MTK_EINT_FUNCTION(0, 168),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO168"),
+		MTK_FUNCTION(1, "RFIC0_BSI_CK"),
+		MTK_FUNCTION(2, "SPM_BSI_CK")
+	),
+	MTK_PIN(
+		169, "GPIO169",
+		MTK_EINT_FUNCTION(0, 169),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO169"),
+		MTK_FUNCTION(1, "PWM_C"),
+		MTK_FUNCTION(2, "CMMCLK3"),
+		MTK_FUNCTION(3, "CMVREF1"),
+		MTK_FUNCTION(4, "ANT_SEL7"),
+		MTK_FUNCTION(5, "AGPS_SYNC"),
+		MTK_FUNCTION(6, "TDM_BCK_2ND"),
+		MTK_FUNCTION(7, "SCP_JTAG_TMS")
+	),
+	MTK_PIN(
+		170, "GPIO170",
+		MTK_EINT_FUNCTION(0, 170),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO170"),
+		MTK_FUNCTION(1, "I2S1_BCK"),
+		MTK_FUNCTION(2, "I2S3_BCK"),
+		MTK_FUNCTION(3, "SCL7"),
+		MTK_FUNCTION(4, "I2S5_BCK"),
+		MTK_FUNCTION(5, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(6, "TDM_LRCK_2ND"),
+		MTK_FUNCTION(7, "ANT_SEL3")
+	),
+	MTK_PIN(
+		171, "GPIO171",
+		MTK_EINT_FUNCTION(0, 184),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO171"),
+		MTK_FUNCTION(1, "I2S1_LRCK"),
+		MTK_FUNCTION(2, "I2S3_LRCK"),
+		MTK_FUNCTION(3, "SDA7"),
+		MTK_FUNCTION(4, "I2S5_LRCK"),
+		MTK_FUNCTION(5, "URXD1"),
+		MTK_FUNCTION(6, "TDM_DATA0_2ND"),
+		MTK_FUNCTION(7, "ANT_SEL4")
+	),
+	MTK_PIN(
+		172, "GPIO172",
+		MTK_EINT_FUNCTION(0, 185),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO172"),
+		MTK_FUNCTION(1, "I2S1_DO"),
+		MTK_FUNCTION(2, "I2S3_DO"),
+		MTK_FUNCTION(3, "SCL8"),
+		MTK_FUNCTION(4, "I2S5_DO"),
+		MTK_FUNCTION(5, "UTXD1"),
+		MTK_FUNCTION(6, "TDM_DATA1_2ND"),
+		MTK_FUNCTION(7, "ANT_SEL5")
+	),
+	MTK_PIN(
+		173, "GPIO173",
+		MTK_EINT_FUNCTION(0, 186),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO173"),
+		MTK_FUNCTION(1, "I2S1_MCK"),
+		MTK_FUNCTION(2, "I2S3_MCK"),
+		MTK_FUNCTION(3, "SDA8"),
+		MTK_FUNCTION(4, "I2S5_MCK"),
+		MTK_FUNCTION(5, "UCTS0"),
+		MTK_FUNCTION(6, "TDM_DATA2_2ND"),
+		MTK_FUNCTION(7, "ANT_SEL6")
+	),
+	MTK_PIN(
+		174, "GPIO174",
+		MTK_EINT_FUNCTION(0, 187),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO174"),
+		MTK_FUNCTION(1, "I2S2_DI"),
+		MTK_FUNCTION(2, "I2S0_DI"),
+		MTK_FUNCTION(3, "DVFSRC_EXT_REQ"),
+		MTK_FUNCTION(4, "I2S2_DI2"),
+		MTK_FUNCTION(5, "URTS0"),
+		MTK_FUNCTION(6, "TDM_DATA3_2ND"),
+		MTK_FUNCTION(7, "ANT_SEL7")
+	),
+	MTK_PIN(
+		175, "GPIO175",
+		MTK_EINT_FUNCTION(0, 188),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO175"),
+		MTK_FUNCTION(1, "ANT_SEL7")
+	),
+	MTK_PIN(
+		176, "GPIO176",
+		MTK_EINT_FUNCTION(0, 189),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO176")
+	),
+	MTK_PIN(
+		177, "GPIO177",
+		MTK_EINT_FUNCTION(0, 190),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO177")
+	),
+	MTK_PIN(
+		178, "GPIO178",
+		MTK_EINT_FUNCTION(0, 191),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO178")
+	),
+	MTK_PIN(
+		179, "GPIO179",
+		MTK_EINT_FUNCTION(0, 192),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO179")
+	),
+	MTK_PIN(
+		180, "GPIO180",
+		MTK_EINT_FUNCTION(0, 171),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO180")
+	),
+	MTK_PIN(
+		181, "GPIO181",
+		MTK_EINT_FUNCTION(0, 172),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO181")
+	),
+	MTK_PIN(
+		182, "GPIO182",
+		MTK_EINT_FUNCTION(0, 173),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO182")
+	),
+	MTK_PIN(
+		183, "GPIO183",
+		MTK_EINT_FUNCTION(0, 174),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO183")
+	),
+	MTK_PIN(
+		184, "GPIO184",
+		MTK_EINT_FUNCTION(0, 175),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO184")
+	),
+	MTK_PIN(
+		185, "GPIO185",
+		MTK_EINT_FUNCTION(0, 177),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO185")
+	),
+	MTK_PIN(
+		186, "GPIO186",
+		MTK_EINT_FUNCTION(0, 178),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO186")
+	),
+	MTK_PIN(
+		187, "GPIO187",
+		MTK_EINT_FUNCTION(0, 179),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO187")
+	),
+	MTK_PIN(
+		188, "GPIO188",
+		MTK_EINT_FUNCTION(0, 180),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO188")
+	),
+	MTK_PIN(
+		189, "GPIO189",
+		MTK_EINT_FUNCTION(0, 181),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO189")
+	),
+	MTK_PIN(
+		190, "GPIO190",
+		MTK_EINT_FUNCTION(0, 182),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO190")
+	),
+	MTK_PIN(
+		191, "GPIO191",
+		MTK_EINT_FUNCTION(0, 183),
+		DRV_GRP4,
+		MTK_FUNCTION(0, "GPIO191")
+	),
+};
+
+#endif /* __PINCTRL_MTK_MT8183_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8516.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8516.h
new file mode 100644
index 0000000..f7a4c6e
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8516.h
@@ -0,0 +1,1182 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ */
+#ifndef __PINCTRL_MTK_MT8516_H
+#define __PINCTRL_MTK_MT8516_H
+
+#include <linux/pinctrl/pinctrl.h>
+#include "pinctrl-mtk-common.h"
+
+static const struct mtk_desc_pin mtk_pins_mt8516[] = {
+	MTK_PIN(
+		PINCTRL_PIN(0, "EINT0"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 0),
+		MTK_FUNCTION(0, "GPIO0"),
+		MTK_FUNCTION(1, "PWM_B"),
+		MTK_FUNCTION(3, "I2S2_BCK"),
+		MTK_FUNCTION(4, "EXT_TXD0"),
+		MTK_FUNCTION(6, "SQICS"),
+		MTK_FUNCTION(7, "DBG_MON_A[6]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(1, "EINT1"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 1),
+		MTK_FUNCTION(0, "GPIO1"),
+		MTK_FUNCTION(1, "PWM_C"),
+		MTK_FUNCTION(3, "I2S2_DI"),
+		MTK_FUNCTION(4, "EXT_TXD1"),
+		MTK_FUNCTION(5, "CONN_MCU_TDO"),
+		MTK_FUNCTION(6, "SQISO"),
+		MTK_FUNCTION(7, "DBG_MON_A[7]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(2, "EINT2"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 2),
+		MTK_FUNCTION(0, "GPIO2"),
+		MTK_FUNCTION(1, "CLKM0"),
+		MTK_FUNCTION(3, "I2S2_LRCK"),
+		MTK_FUNCTION(4, "EXT_TXD2"),
+		MTK_FUNCTION(5, "CONN_MCU_DBGACK_N"),
+		MTK_FUNCTION(6, "SQISI"),
+		MTK_FUNCTION(7, "DBG_MON_A[8]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(3, "EINT3"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 3),
+		MTK_FUNCTION(0, "GPIO3"),
+		MTK_FUNCTION(1, "CLKM1"),
+		MTK_FUNCTION(3, "SPI_MI"),
+		MTK_FUNCTION(4, "EXT_TXD3"),
+		MTK_FUNCTION(5, "CONN_MCU_DBGI_N"),
+		MTK_FUNCTION(6, "SQIWP"),
+		MTK_FUNCTION(7, "DBG_MON_A[9]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(4, "EINT4"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 4),
+		MTK_FUNCTION(0, "GPIO4"),
+		MTK_FUNCTION(1, "CLKM2"),
+		MTK_FUNCTION(3, "SPI_MO"),
+		MTK_FUNCTION(4, "EXT_TXC"),
+		MTK_FUNCTION(5, "CONN_MCU_TCK"),
+		MTK_FUNCTION(6, "CONN_MCU_AICE_JCKC"),
+		MTK_FUNCTION(7, "DBG_MON_A[10]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(5, "EINT5"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 5),
+		MTK_FUNCTION(0, "GPIO5"),
+		MTK_FUNCTION(1, "UCTS2"),
+		MTK_FUNCTION(3, "SPI_CSB"),
+		MTK_FUNCTION(4, "EXT_RXER"),
+		MTK_FUNCTION(5, "CONN_MCU_TDI"),
+		MTK_FUNCTION(6, "CONN_TEST_CK"),
+		MTK_FUNCTION(7, "DBG_MON_A[11]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(6, "EINT6"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 6),
+		MTK_FUNCTION(0, "GPIO6"),
+		MTK_FUNCTION(1, "URTS2"),
+		MTK_FUNCTION(3, "SPI_CLK"),
+		MTK_FUNCTION(4, "EXT_RXC"),
+		MTK_FUNCTION(5, "CONN_MCU_TRST_B"),
+		MTK_FUNCTION(7, "DBG_MON_A[12]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(7, "EINT7"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 7),
+		MTK_FUNCTION(0, "GPIO7"),
+		MTK_FUNCTION(1, "SQIRST"),
+		MTK_FUNCTION(3, "SDA1_0"),
+		MTK_FUNCTION(4, "EXT_RXDV"),
+		MTK_FUNCTION(5, "CONN_MCU_TMS"),
+		MTK_FUNCTION(6, "CONN_MCU_AICE_JMSC"),
+		MTK_FUNCTION(7, "DBG_MON_A[13]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(8, "EINT8"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 8),
+		MTK_FUNCTION(0, "GPIO8"),
+		MTK_FUNCTION(1, "SQICK"),
+		MTK_FUNCTION(2, "CLKM3"),
+		MTK_FUNCTION(3, "SCL1_0"),
+		MTK_FUNCTION(4, "EXT_RXD0"),
+		MTK_FUNCTION(5, "ANT_SEL0"),
+		MTK_FUNCTION(7, "DBG_MON_A[14]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(9, "EINT9"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 9),
+		MTK_FUNCTION(0, "GPIO9"),
+		MTK_FUNCTION(1, "CLKM4"),
+		MTK_FUNCTION(2, "SDA2_0"),
+		MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(4, "EXT_RXD1"),
+		MTK_FUNCTION(5, "ANT_SEL1"),
+		MTK_FUNCTION(7, "DBG_MON_A[15]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(10, "EINT10"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 10),
+		MTK_FUNCTION(0, "GPIO10"),
+		MTK_FUNCTION(1, "CLKM5"),
+		MTK_FUNCTION(2, "SCL2_0"),
+		MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(4, "EXT_RXD2"),
+		MTK_FUNCTION(5, "ANT_SEL2"),
+		MTK_FUNCTION(7, "DBG_MON_A[16]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(11, "EINT11"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 11),
+		MTK_FUNCTION(0, "GPIO11"),
+		MTK_FUNCTION(1, "CLKM4"),
+		MTK_FUNCTION(2, "PWM_C"),
+		MTK_FUNCTION(3, "CONN_TEST_CK"),
+		MTK_FUNCTION(4, "ANT_SEL3"),
+		MTK_FUNCTION(6, "EXT_RXD3"),
+		MTK_FUNCTION(7, "DBG_MON_A[17]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(12, "EINT12"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 12),
+		MTK_FUNCTION(0, "GPIO12"),
+		MTK_FUNCTION(1, "CLKM5"),
+		MTK_FUNCTION(2, "PWM_A"),
+		MTK_FUNCTION(3, "SPDIF_OUT"),
+		MTK_FUNCTION(4, "ANT_SEL4"),
+		MTK_FUNCTION(6, "EXT_TXEN"),
+		MTK_FUNCTION(7, "DBG_MON_A[18]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(13, "EINT13"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 13),
+		MTK_FUNCTION(0, "GPIO13"),
+		MTK_FUNCTION(3, "TSF_IN"),
+		MTK_FUNCTION(4, "ANT_SEL5"),
+		MTK_FUNCTION(6, "SPDIF_IN"),
+		MTK_FUNCTION(7, "DBG_MON_A[19]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(14, "EINT14"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 14),
+		MTK_FUNCTION(0, "GPIO14"),
+		MTK_FUNCTION(2, "I2S_8CH_DO1"),
+		MTK_FUNCTION(3, "TDM_RX_MCK"),
+		MTK_FUNCTION(4, "ANT_SEL1"),
+		MTK_FUNCTION(5, "CONN_MCU_DBGACK_N"),
+		MTK_FUNCTION(6, "NCLE"),
+		MTK_FUNCTION(7, "DBG_MON_B[8]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(15, "EINT15"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 15),
+		MTK_FUNCTION(0, "GPIO15"),
+		MTK_FUNCTION(2, "I2S_8CH_LRCK"),
+		MTK_FUNCTION(3, "TDM_RX_BCK"),
+		MTK_FUNCTION(4, "ANT_SEL2"),
+		MTK_FUNCTION(5, "CONN_MCU_DBGI_N"),
+		MTK_FUNCTION(6, "NCEB1"),
+		MTK_FUNCTION(7, "DBG_MON_B[9]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(16, "EINT16"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 16),
+		MTK_FUNCTION(0, "GPIO16"),
+		MTK_FUNCTION(2, "I2S_8CH_BCK"),
+		MTK_FUNCTION(3, "TDM_RX_LRCK"),
+		MTK_FUNCTION(4, "ANT_SEL3"),
+		MTK_FUNCTION(5, "CONN_MCU_TRST_B"),
+		MTK_FUNCTION(6, "NCEB0"),
+		MTK_FUNCTION(7, "DBG_MON_B[10]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(17, "EINT17"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 17),
+		MTK_FUNCTION(0, "GPIO17"),
+		MTK_FUNCTION(2, "I2S_8CH_MCK"),
+		MTK_FUNCTION(3, "TDM_RX_DI"),
+		MTK_FUNCTION(4, "IDDIG"),
+		MTK_FUNCTION(5, "ANT_SEL4"),
+		MTK_FUNCTION(6, "NREB"),
+		MTK_FUNCTION(7, "DBG_MON_B[11]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(18, "EINT18"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 18),
+		MTK_FUNCTION(0, "GPIO18"),
+		MTK_FUNCTION(2, "USB_DRVVBUS"),
+		MTK_FUNCTION(3, "I2S3_LRCK"),
+		MTK_FUNCTION(4, "CLKM1"),
+		MTK_FUNCTION(5, "ANT_SEL3"),
+		MTK_FUNCTION(6, "I2S2_BCK"),
+		MTK_FUNCTION(7, "DBG_MON_A[20]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(19, "EINT19"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 19),
+		MTK_FUNCTION(0, "GPIO19"),
+		MTK_FUNCTION(1, "UCTS1"),
+		MTK_FUNCTION(2, "IDDIG"),
+		MTK_FUNCTION(3, "I2S3_BCK"),
+		MTK_FUNCTION(4, "CLKM2"),
+		MTK_FUNCTION(5, "ANT_SEL4"),
+		MTK_FUNCTION(6, "I2S2_DI"),
+		MTK_FUNCTION(7, "DBG_MON_A[21]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(20, "EINT20"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 20),
+		MTK_FUNCTION(0, "GPIO20"),
+		MTK_FUNCTION(1, "URTS1"),
+		MTK_FUNCTION(3, "I2S3_DO"),
+		MTK_FUNCTION(4, "CLKM3"),
+		MTK_FUNCTION(5, "ANT_SEL5"),
+		MTK_FUNCTION(6, "I2S2_LRCK"),
+		MTK_FUNCTION(7, "DBG_MON_A[22]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(21, "EINT21"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 21),
+		MTK_FUNCTION(0, "GPIO21"),
+		MTK_FUNCTION(1, "NRNB"),
+		MTK_FUNCTION(2, "ANT_SEL0"),
+		MTK_FUNCTION(3, "I2S_8CH_DO4"),
+		MTK_FUNCTION(7, "DBG_MON_B[31]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(22, "EINT22"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 22),
+		MTK_FUNCTION(0, "GPIO22"),
+		MTK_FUNCTION(2, "I2S_8CH_DO2"),
+		MTK_FUNCTION(3, "TSF_IN"),
+		MTK_FUNCTION(4, "USB_DRVVBUS"),
+		MTK_FUNCTION(5, "SPDIF_OUT"),
+		MTK_FUNCTION(6, "NRE_C"),
+		MTK_FUNCTION(7, "DBG_MON_B[12]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(23, "EINT23"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 23),
+		MTK_FUNCTION(0, "GPIO23"),
+		MTK_FUNCTION(2, "I2S_8CH_DO3"),
+		MTK_FUNCTION(3, "CLKM0"),
+		MTK_FUNCTION(4, "IR"),
+		MTK_FUNCTION(5, "SPDIF_IN"),
+		MTK_FUNCTION(6, "NDQS_C"),
+		MTK_FUNCTION(7, "DBG_MON_B[13]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(24, "EINT24"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 24),
+		MTK_FUNCTION(0, "GPIO24"),
+		MTK_FUNCTION(3, "ANT_SEL1"),
+		MTK_FUNCTION(4, "UCTS2"),
+		MTK_FUNCTION(5, "PWM_A"),
+		MTK_FUNCTION(6, "I2S0_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_A[0]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(25, "EINT25"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 25),
+		MTK_FUNCTION(0, "GPIO25"),
+		MTK_FUNCTION(3, "ANT_SEL0"),
+		MTK_FUNCTION(4, "URTS2"),
+		MTK_FUNCTION(5, "PWM_B"),
+		MTK_FUNCTION(6, "I2S_8CH_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_A[1]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(26, "PWRAP_SPI0_MI"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 26),
+		MTK_FUNCTION(0, "GPIO26"),
+		MTK_FUNCTION(1, "PWRAP_SPI0_MO"),
+		MTK_FUNCTION(2, "PWRAP_SPI0_MI")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(27, "PWRAP_SPI0_MO"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 27),
+		MTK_FUNCTION(0, "GPIO27"),
+		MTK_FUNCTION(1, "PWRAP_SPI0_MI"),
+		MTK_FUNCTION(2, "PWRAP_SPI0_MO")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(28, "PWRAP_INT"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 28),
+		MTK_FUNCTION(0, "GPIO28"),
+		MTK_FUNCTION(1, "I2S0_MCK"),
+		MTK_FUNCTION(4, "I2S_8CH_MCK"),
+		MTK_FUNCTION(5, "I2S2_MCK"),
+		MTK_FUNCTION(6, "I2S3_MCK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(29, "PWRAP_SPI0_CK"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 29),
+		MTK_FUNCTION(0, "GPIO29"),
+		MTK_FUNCTION(1, "PWRAP_SPI0_CK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(30, "PWRAP_SPI0_CSN"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 30),
+		MTK_FUNCTION(0, "GPIO30"),
+		MTK_FUNCTION(1, "PWRAP_SPI0_CSN")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(31, "RTC32K_CK"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 31),
+		MTK_FUNCTION(0, "GPIO31"),
+		MTK_FUNCTION(1, "RTC32K_CK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(32, "WATCHDOG"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 32),
+		MTK_FUNCTION(0, "GPIO32"),
+		MTK_FUNCTION(1, "WATCHDOG")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(33, "SRCLKENA"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 33),
+		MTK_FUNCTION(0, "GPIO33"),
+		MTK_FUNCTION(1, "SRCLKENA0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(34, "URXD2"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 34),
+		MTK_FUNCTION(0, "GPIO34"),
+		MTK_FUNCTION(1, "URXD2"),
+		MTK_FUNCTION(3, "UTXD2"),
+		MTK_FUNCTION(4, "DBG_SCL"),
+		MTK_FUNCTION(6, "I2S2_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[0]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(35, "UTXD2"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 35),
+		MTK_FUNCTION(0, "GPIO35"),
+		MTK_FUNCTION(1, "UTXD2"),
+		MTK_FUNCTION(3, "URXD2"),
+		MTK_FUNCTION(4, "DBG_SDA"),
+		MTK_FUNCTION(6, "I2S3_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[1]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(36, "MRG_CLK"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 36),
+		MTK_FUNCTION(0, "GPIO36"),
+		MTK_FUNCTION(1, "MRG_CLK"),
+		MTK_FUNCTION(3, "I2S0_BCK"),
+		MTK_FUNCTION(4, "I2S3_BCK"),
+		MTK_FUNCTION(5, "PCM0_CLK"),
+		MTK_FUNCTION(6, "IR"),
+		MTK_FUNCTION(7, "DBG_MON_A[2]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(37, "MRG_SYNC"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 37),
+		MTK_FUNCTION(0, "GPIO37"),
+		MTK_FUNCTION(1, "MRG_SYNC"),
+		MTK_FUNCTION(3, "I2S0_LRCK"),
+		MTK_FUNCTION(4, "I2S3_LRCK"),
+		MTK_FUNCTION(5, "PCM0_SYNC"),
+		MTK_FUNCTION(6, "EXT_COL"),
+		MTK_FUNCTION(7, "DBG_MON_A[3]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(38, "MRG_DI"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 38),
+		MTK_FUNCTION(0, "GPIO38"),
+		MTK_FUNCTION(1, "MRG_DI"),
+		MTK_FUNCTION(3, "I2S0_DI"),
+		MTK_FUNCTION(4, "I2S3_DO"),
+		MTK_FUNCTION(5, "PCM0_DI"),
+		MTK_FUNCTION(6, "EXT_MDIO"),
+		MTK_FUNCTION(7, "DBG_MON_A[4]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(39, "MRG_DO"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 39),
+		MTK_FUNCTION(0, "GPIO39"),
+		MTK_FUNCTION(1, "MRG_DO"),
+		MTK_FUNCTION(3, "I2S0_MCK"),
+		MTK_FUNCTION(4, "I2S3_MCK"),
+		MTK_FUNCTION(5, "PCM0_DO"),
+		MTK_FUNCTION(6, "EXT_MDC"),
+		MTK_FUNCTION(7, "DBG_MON_A[5]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(40, "KPROW0"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 40),
+		MTK_FUNCTION(0, "GPIO40"),
+		MTK_FUNCTION(1, "KPROW0"),
+		MTK_FUNCTION(7, "DBG_MON_B[4]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(41, "KPROW1"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 41),
+		MTK_FUNCTION(0, "GPIO41"),
+		MTK_FUNCTION(1, "KPROW1"),
+		MTK_FUNCTION(2, "IDDIG"),
+		MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(7, "DBG_MON_B[5]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(42, "KPCOL0"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 42),
+		MTK_FUNCTION(0, "GPIO42"),
+		MTK_FUNCTION(1, "KPCOL0"),
+		MTK_FUNCTION(7, "DBG_MON_B[6]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(43, "KPCOL1"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 43),
+		MTK_FUNCTION(0, "GPIO43"),
+		MTK_FUNCTION(1, "KPCOL1"),
+		MTK_FUNCTION(2, "USB_DRVVBUS"),
+		MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(4, "TSF_IN"),
+		MTK_FUNCTION(7, "DBG_MON_B[7]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(44, "JTMS"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 44),
+		MTK_FUNCTION(0, "GPIO44"),
+		MTK_FUNCTION(1, "JTMS"),
+		MTK_FUNCTION(2, "CONN_MCU_TMS"),
+		MTK_FUNCTION(3, "CONN_MCU_AICE_JMSC")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(45, "JTCK"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 45),
+		MTK_FUNCTION(0, "GPIO45"),
+		MTK_FUNCTION(1, "JTCK"),
+		MTK_FUNCTION(2, "CONN_MCU_TCK"),
+		MTK_FUNCTION(3, "CONN_MCU_AICE_JCKC")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(46, "JTDI"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 46),
+		MTK_FUNCTION(0, "GPIO46"),
+		MTK_FUNCTION(1, "JTDI"),
+		MTK_FUNCTION(2, "CONN_MCU_TDI")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(47, "JTDO"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 47),
+		MTK_FUNCTION(0, "GPIO47"),
+		MTK_FUNCTION(1, "JTDO"),
+		MTK_FUNCTION(2, "CONN_MCU_TDO")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(48, "SPI_CS"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 48),
+		MTK_FUNCTION(0, "GPIO48"),
+		MTK_FUNCTION(1, "SPI_CSB"),
+		MTK_FUNCTION(3, "I2S0_DI"),
+		MTK_FUNCTION(4, "I2S2_BCK"),
+		MTK_FUNCTION(7, "DBG_MON_A[23]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(49, "SPI_CK"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 49),
+		MTK_FUNCTION(0, "GPIO49"),
+		MTK_FUNCTION(1, "SPI_CLK"),
+		MTK_FUNCTION(3, "I2S0_LRCK"),
+		MTK_FUNCTION(4, "I2S2_DI"),
+		MTK_FUNCTION(7, "DBG_MON_A[24]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(50, "SPI_MI"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 50),
+		MTK_FUNCTION(0, "GPIO50"),
+		MTK_FUNCTION(1, "SPI_MI"),
+		MTK_FUNCTION(2, "SPI_MO"),
+		MTK_FUNCTION(3, "I2S0_BCK"),
+		MTK_FUNCTION(4, "I2S2_LRCK"),
+		MTK_FUNCTION(7, "DBG_MON_A[25]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(51, "SPI_MO"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 51),
+		MTK_FUNCTION(0, "GPIO51"),
+		MTK_FUNCTION(1, "SPI_MO"),
+		MTK_FUNCTION(2, "SPI_MI"),
+		MTK_FUNCTION(3, "I2S0_MCK"),
+		MTK_FUNCTION(4, "I2S2_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_A[26]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(52, "SDA1"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 52),
+		MTK_FUNCTION(0, "GPIO52"),
+		MTK_FUNCTION(1, "SDA1_0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(53, "SCL1"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 53),
+		MTK_FUNCTION(0, "GPIO53"),
+		MTK_FUNCTION(1, "SCL1_0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(54, "GPIO54"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 54),
+		MTK_FUNCTION(0, "GPIO54"),
+		MTK_FUNCTION(2, "PWM_B"),
+		MTK_FUNCTION(7, "DBG_MON_B[2]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(55, "I2S_DATA_IN"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 55),
+		MTK_FUNCTION(0, "GPIO55"),
+		MTK_FUNCTION(1, "I2S0_DI"),
+		MTK_FUNCTION(2, "UCTS0"),
+		MTK_FUNCTION(3, "I2S3_DO"),
+		MTK_FUNCTION(4, "I2S_8CH_DO1"),
+		MTK_FUNCTION(5, "PWM_A"),
+		MTK_FUNCTION(6, "I2S2_BCK"),
+		MTK_FUNCTION(7, "DBG_MON_A[28]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(56, "I2S_LRCK"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 56),
+		MTK_FUNCTION(0, "GPIO56"),
+		MTK_FUNCTION(1, "I2S0_LRCK"),
+		MTK_FUNCTION(3, "I2S3_LRCK"),
+		MTK_FUNCTION(4, "I2S_8CH_LRCK"),
+		MTK_FUNCTION(5, "PWM_B"),
+		MTK_FUNCTION(6, "I2S2_DI"),
+		MTK_FUNCTION(7, "DBG_MON_A[29]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(57, "I2S_BCK"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 57),
+		MTK_FUNCTION(0, "GPIO57"),
+		MTK_FUNCTION(1, "I2S0_BCK"),
+		MTK_FUNCTION(2, "URTS0"),
+		MTK_FUNCTION(3, "I2S3_BCK"),
+		MTK_FUNCTION(4, "I2S_8CH_BCK"),
+		MTK_FUNCTION(5, "PWM_C"),
+		MTK_FUNCTION(6, "I2S2_LRCK"),
+		MTK_FUNCTION(7, "DBG_MON_A[30]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(58, "SDA0"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 58),
+		MTK_FUNCTION(0, "GPIO58"),
+		MTK_FUNCTION(1, "SDA0_0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(59, "SCL0"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 59),
+		MTK_FUNCTION(0, "GPIO59"),
+		MTK_FUNCTION(1, "SCL0_0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(60, "SDA2"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 60),
+		MTK_FUNCTION(0, "GPIO60"),
+		MTK_FUNCTION(1, "SDA2_0"),
+		MTK_FUNCTION(2, "PWM_B")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(61, "SCL2"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 61),
+		MTK_FUNCTION(0, "GPIO61"),
+		MTK_FUNCTION(1, "SCL2_0"),
+		MTK_FUNCTION(2, "PWM_C")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(62, "URXD0"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 62),
+		MTK_FUNCTION(0, "GPIO62"),
+		MTK_FUNCTION(1, "URXD0"),
+		MTK_FUNCTION(2, "UTXD0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(63, "UTXD0"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 63),
+		MTK_FUNCTION(0, "GPIO63"),
+		MTK_FUNCTION(1, "UTXD0"),
+		MTK_FUNCTION(2, "URXD0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(64, "URXD1"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 64),
+		MTK_FUNCTION(0, "GPIO64"),
+		MTK_FUNCTION(1, "URXD1"),
+		MTK_FUNCTION(2, "UTXD1"),
+		MTK_FUNCTION(7, "DBG_MON_A[27]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(65, "UTXD1"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 65),
+		MTK_FUNCTION(0, "GPIO65"),
+		MTK_FUNCTION(1, "UTXD1"),
+		MTK_FUNCTION(2, "URXD1"),
+		MTK_FUNCTION(7, "DBG_MON_A[31]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(66, "LCM_RST"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 66),
+		MTK_FUNCTION(0, "GPIO66"),
+		MTK_FUNCTION(1, "LCM_RST"),
+		MTK_FUNCTION(3, "I2S0_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[3]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(67, "GPIO67"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 67),
+		MTK_FUNCTION(0, "GPIO67"),
+		MTK_FUNCTION(3, "I2S_8CH_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[14]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(68, "MSDC2_CMD"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 68),
+		MTK_FUNCTION(0, "GPIO68"),
+		MTK_FUNCTION(1, "MSDC2_CMD"),
+		MTK_FUNCTION(2, "I2S_8CH_DO4"),
+		MTK_FUNCTION(3, "SDA1_0"),
+		MTK_FUNCTION(5, "USB_SDA"),
+		MTK_FUNCTION(6, "I2S3_BCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[15]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(69, "MSDC2_CLK"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 69),
+		MTK_FUNCTION(0, "GPIO69"),
+		MTK_FUNCTION(1, "MSDC2_CLK"),
+		MTK_FUNCTION(2, "I2S_8CH_DO3"),
+		MTK_FUNCTION(3, "SCL1_0"),
+		MTK_FUNCTION(5, "USB_SCL"),
+		MTK_FUNCTION(6, "I2S3_LRCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[16]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(70, "MSDC2_DAT0"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 70),
+		MTK_FUNCTION(0, "GPIO70"),
+		MTK_FUNCTION(1, "MSDC2_DAT0"),
+		MTK_FUNCTION(2, "I2S_8CH_DO2"),
+		MTK_FUNCTION(5, "UTXD0"),
+		MTK_FUNCTION(6, "I2S3_DO"),
+		MTK_FUNCTION(7, "DBG_MON_B[17]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(71, "MSDC2_DAT1"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 71),
+		MTK_FUNCTION(0, "GPIO71"),
+		MTK_FUNCTION(1, "MSDC2_DAT1"),
+		MTK_FUNCTION(2, "I2S_8CH_DO1"),
+		MTK_FUNCTION(3, "PWM_A"),
+		MTK_FUNCTION(4, "I2S3_MCK"),
+		MTK_FUNCTION(5, "URXD0"),
+		MTK_FUNCTION(6, "PWM_B"),
+		MTK_FUNCTION(7, "DBG_MON_B[18]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(72, "MSDC2_DAT2"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 72),
+		MTK_FUNCTION(0, "GPIO72"),
+		MTK_FUNCTION(1, "MSDC2_DAT2"),
+		MTK_FUNCTION(2, "I2S_8CH_LRCK"),
+		MTK_FUNCTION(3, "SDA2_0"),
+		MTK_FUNCTION(5, "UTXD1"),
+		MTK_FUNCTION(6, "PWM_C"),
+		MTK_FUNCTION(7, "DBG_MON_B[19]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(73, "MSDC2_DAT3"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 73),
+		MTK_FUNCTION(0, "GPIO73"),
+		MTK_FUNCTION(1, "MSDC2_DAT3"),
+		MTK_FUNCTION(2, "I2S_8CH_BCK"),
+		MTK_FUNCTION(3, "SCL2_0"),
+		MTK_FUNCTION(4, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(5, "URXD1"),
+		MTK_FUNCTION(6, "PWM_A"),
+		MTK_FUNCTION(7, "DBG_MON_B[20]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(74, "TDN3"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 74),
+		MTK_FUNCTION(0, "GPIO74"),
+		MTK_FUNCTION(1, "TDN3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(75, "TDP3"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 75),
+		MTK_FUNCTION(0, "GPIO75"),
+		MTK_FUNCTION(1, "TDP3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(76, "TDN2"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 76),
+		MTK_FUNCTION(0, "GPIO76"),
+		MTK_FUNCTION(1, "TDN2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(77, "TDP2"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 77),
+		MTK_FUNCTION(0, "GPIO77"),
+		MTK_FUNCTION(1, "TDP2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(78, "TCN"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 78),
+		MTK_FUNCTION(0, "GPIO78"),
+		MTK_FUNCTION(1, "TCN")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(79, "TCP"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 79),
+		MTK_FUNCTION(0, "GPIO79"),
+		MTK_FUNCTION(1, "TCP")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(80, "TDN1"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 80),
+		MTK_FUNCTION(0, "GPIO80"),
+		MTK_FUNCTION(1, "TDN1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(81, "TDP1"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 81),
+		MTK_FUNCTION(0, "GPIO81"),
+		MTK_FUNCTION(1, "TDP1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(82, "TDN0"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 82),
+		MTK_FUNCTION(0, "GPIO82"),
+		MTK_FUNCTION(1, "TDN0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(83, "TDP0"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 83),
+		MTK_FUNCTION(0, "GPIO83"),
+		MTK_FUNCTION(1, "TDP0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(84, "RDN0"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 84),
+		MTK_FUNCTION(0, "GPIO84"),
+		MTK_FUNCTION(1, "RDN0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(85, "RDP0"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 85),
+		MTK_FUNCTION(0, "GPIO85"),
+		MTK_FUNCTION(1, "RDP0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(86, "RDN1"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 86),
+		MTK_FUNCTION(0, "GPIO86"),
+		MTK_FUNCTION(1, "RDN1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(87, "RDP1"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 87),
+		MTK_FUNCTION(0, "GPIO87"),
+		MTK_FUNCTION(1, "RDP1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(88, "RCN"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 88),
+		MTK_FUNCTION(0, "GPIO88"),
+		MTK_FUNCTION(1, "RCN")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(89, "RCP"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 89),
+		MTK_FUNCTION(0, "GPIO89"),
+		MTK_FUNCTION(1, "RCP")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(90, "RDN2"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 90),
+		MTK_FUNCTION(0, "GPIO90"),
+		MTK_FUNCTION(1, "RDN2"),
+		MTK_FUNCTION(2, "CMDAT8")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(91, "RDP2"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 91),
+		MTK_FUNCTION(0, "GPIO91"),
+		MTK_FUNCTION(1, "RDP2"),
+		MTK_FUNCTION(2, "CMDAT9")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(92, "RDN3"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 92),
+		MTK_FUNCTION(0, "GPIO92"),
+		MTK_FUNCTION(1, "RDN3"),
+		MTK_FUNCTION(2, "CMDAT4")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(93, "RDP3"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 93),
+		MTK_FUNCTION(0, "GPIO93"),
+		MTK_FUNCTION(1, "RDP3"),
+		MTK_FUNCTION(2, "CMDAT5")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(94, "RCN_A"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 94),
+		MTK_FUNCTION(0, "GPIO94"),
+		MTK_FUNCTION(1, "RCN_A"),
+		MTK_FUNCTION(2, "CMDAT6")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(95, "RCP_A"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 95),
+		MTK_FUNCTION(0, "GPIO95"),
+		MTK_FUNCTION(1, "RCP_A"),
+		MTK_FUNCTION(2, "CMDAT7")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(96, "RDN1_A"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 96),
+		MTK_FUNCTION(0, "GPIO96"),
+		MTK_FUNCTION(1, "RDN1_A"),
+		MTK_FUNCTION(2, "CMDAT2"),
+		MTK_FUNCTION(3, "CMCSD2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(97, "RDP1_A"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 97),
+		MTK_FUNCTION(0, "GPIO97"),
+		MTK_FUNCTION(1, "RDP1_A"),
+		MTK_FUNCTION(2, "CMDAT3"),
+		MTK_FUNCTION(3, "CMCSD3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(98, "RDN0_A"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 98),
+		MTK_FUNCTION(0, "GPIO98"),
+		MTK_FUNCTION(1, "RDN0_A"),
+		MTK_FUNCTION(2, "CMHSYNC")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(99, "RDP0_A"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 99),
+		MTK_FUNCTION(0, "GPIO99"),
+		MTK_FUNCTION(1, "RDP0_A"),
+		MTK_FUNCTION(2, "CMVSYNC")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(100, "CMDAT0"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 100),
+		MTK_FUNCTION(0, "GPIO100"),
+		MTK_FUNCTION(1, "CMDAT0"),
+		MTK_FUNCTION(2, "CMCSD0"),
+		MTK_FUNCTION(3, "ANT_SEL2"),
+		MTK_FUNCTION(5, "TDM_RX_MCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[21]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(101, "CMDAT1"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 101),
+		MTK_FUNCTION(0, "GPIO101"),
+		MTK_FUNCTION(1, "CMDAT1"),
+		MTK_FUNCTION(2, "CMCSD1"),
+		MTK_FUNCTION(3, "ANT_SEL3"),
+		MTK_FUNCTION(4, "CMFLASH"),
+		MTK_FUNCTION(5, "TDM_RX_BCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[22]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(102, "CMMCLK"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 102),
+		MTK_FUNCTION(0, "GPIO102"),
+		MTK_FUNCTION(1, "CMMCLK"),
+		MTK_FUNCTION(3, "ANT_SEL4"),
+		MTK_FUNCTION(5, "TDM_RX_LRCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[23]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(103, "CMPCLK"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 103),
+		MTK_FUNCTION(0, "GPIO103"),
+		MTK_FUNCTION(1, "CMPCLK"),
+		MTK_FUNCTION(2, "CMCSK"),
+		MTK_FUNCTION(3, "ANT_SEL5"),
+		MTK_FUNCTION(5, " TDM_RX_DI"),
+		MTK_FUNCTION(7, "DBG_MON_B[24]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(104, "MSDC1_CMD"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 104),
+		MTK_FUNCTION(0, "GPIO104"),
+		MTK_FUNCTION(1, "MSDC1_CMD"),
+		MTK_FUNCTION(4, "SQICS"),
+		MTK_FUNCTION(7, "DBG_MON_B[25]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(105, "MSDC1_CLK"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 105),
+		MTK_FUNCTION(0, "GPIO105"),
+		MTK_FUNCTION(1, "MSDC1_CLK"),
+		MTK_FUNCTION(4, "SQISO"),
+		MTK_FUNCTION(7, "DBG_MON_B[26]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(106, "MSDC1_DAT0"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 106),
+		MTK_FUNCTION(0, "GPIO106"),
+		MTK_FUNCTION(1, "MSDC1_DAT0"),
+		MTK_FUNCTION(4, "SQISI"),
+		MTK_FUNCTION(7, "DBG_MON_B[27]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(107, "MSDC1_DAT1"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 107),
+		MTK_FUNCTION(0, "GPIO107"),
+		MTK_FUNCTION(1, "MSDC1_DAT1"),
+		MTK_FUNCTION(4, "SQIWP"),
+		MTK_FUNCTION(7, "DBG_MON_B[28]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(108, "MSDC1_DAT2"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 108),
+		MTK_FUNCTION(0, "GPIO108"),
+		MTK_FUNCTION(1, "MSDC1_DAT2"),
+		MTK_FUNCTION(4, "SQIRST"),
+		MTK_FUNCTION(7, "DBG_MON_B[29]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(109, "MSDC1_DAT3"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 109),
+		MTK_FUNCTION(0, "GPIO109"),
+		MTK_FUNCTION(1, "MSDC1_DAT3"),
+		MTK_FUNCTION(4, "SQICK"), /* WIP */
+		MTK_FUNCTION(7, "DBG_MON_B[30]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(110, "MSDC0_DAT7"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 110),
+		MTK_FUNCTION(0, "GPIO110"),
+		MTK_FUNCTION(1, "MSDC0_DAT7"),
+		MTK_FUNCTION(4, "NLD7")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(111, "MSDC0_DAT6"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 111),
+		MTK_FUNCTION(0, "GPIO111"),
+		MTK_FUNCTION(1, "MSDC0_DAT6"),
+		MTK_FUNCTION(4, "NLD6")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(112, "MSDC0_DAT5"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 112),
+		MTK_FUNCTION(0, "GPIO112"),
+		MTK_FUNCTION(1, "MSDC0_DAT5"),
+		MTK_FUNCTION(4, "NLD4")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(113, "MSDC0_DAT4"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 113),
+		MTK_FUNCTION(0, "GPIO113"),
+		MTK_FUNCTION(1, "MSDC0_DAT4"),
+		MTK_FUNCTION(4, "NLD3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(114, "MSDC0_RSTB"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 114),
+		MTK_FUNCTION(0, "GPIO114"),
+		MTK_FUNCTION(1, "MSDC0_RSTB"),
+		MTK_FUNCTION(4, "NLD0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(115, "MSDC0_CMD"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 115),
+		MTK_FUNCTION(0, "GPIO115"),
+		MTK_FUNCTION(1, "MSDC0_CMD"),
+		MTK_FUNCTION(4, "NALE")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(116, "MSDC0_CLK"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 116),
+		MTK_FUNCTION(0, "GPIO116"),
+		MTK_FUNCTION(1, "MSDC0_CLK"),
+		MTK_FUNCTION(4, "NWEB")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(117, "MSDC0_DAT3"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 117),
+		MTK_FUNCTION(0, "GPIO117"),
+		MTK_FUNCTION(1, "MSDC0_DAT3"),
+		MTK_FUNCTION(4, "NLD1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(118, "MSDC0_DAT2"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 118),
+		MTK_FUNCTION(0, "GPIO118"),
+		MTK_FUNCTION(1, "MSDC0_DAT2"),
+		MTK_FUNCTION(4, "NLD5")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(119, "MSDC0_DAT1"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 119),
+		MTK_FUNCTION(0, "GPIO119"),
+		MTK_FUNCTION(1, "MSDC0_DAT1"),
+		MTK_FUNCTION(4, "NLD8")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(120, "MSDC0_DAT0"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 120),
+		MTK_FUNCTION(0, "GPIO120"),
+		MTK_FUNCTION(1, "MSDC0_DAT0"),
+		MTK_FUNCTION(4, "WATCHDOG"),
+		MTK_FUNCTION(5, "NLD2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(121, "GPIO121"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 121),
+		MTK_FUNCTION(0, "GPIO121")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(122, "GPIO122"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 122),
+		MTK_FUNCTION(0, "GPIO122")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(123, "GPIO123"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 123),
+		MTK_FUNCTION(0, "GPIO123")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(124, "GPIO124"),
+		NULL, "mt8516",
+		MTK_EINT_FUNCTION(0, 124),
+		MTK_FUNCTION(0, "GPIO124")
+	),
+};
+
+#endif /* __PINCTRL_MTK_MT8516_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
new file mode 100644
index 0000000..563f766
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -0,0 +1,928 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MediaTek Pinctrl Paris Driver, which implement the vendor per-pin
+ * bindings for MediaTek SoC.
+ *
+ * Copyright (C) 2018 MediaTek Inc.
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *	   Zhiyong Tao <zhiyong.tao@mediatek.com>
+ *	   Hongzhou.Yang <hongzhou.yang@mediatek.com>
+ */
+
+#include <linux/gpio/driver.h>
+#include <dt-bindings/pinctrl/mt65xx.h>
+#include "pinctrl-paris.h"
+
+#define PINCTRL_PINCTRL_DEV	KBUILD_MODNAME
+
+/* Custom pinconf parameters */
+#define MTK_PIN_CONFIG_TDSEL	(PIN_CONFIG_END + 1)
+#define MTK_PIN_CONFIG_RDSEL	(PIN_CONFIG_END + 2)
+#define MTK_PIN_CONFIG_PU_ADV	(PIN_CONFIG_END + 3)
+#define MTK_PIN_CONFIG_PD_ADV	(PIN_CONFIG_END + 4)
+#define MTK_PIN_CONFIG_DRV_ADV	(PIN_CONFIG_END + 5)
+
+static const struct pinconf_generic_params mtk_custom_bindings[] = {
+	{"mediatek,tdsel",	MTK_PIN_CONFIG_TDSEL,		0},
+	{"mediatek,rdsel",	MTK_PIN_CONFIG_RDSEL,		0},
+	{"mediatek,pull-up-adv", MTK_PIN_CONFIG_PU_ADV,		1},
+	{"mediatek,pull-down-adv", MTK_PIN_CONFIG_PD_ADV,	1},
+	{"mediatek,drive-strength-adv", MTK_PIN_CONFIG_DRV_ADV,	2},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item mtk_conf_items[] = {
+	PCONFDUMP(MTK_PIN_CONFIG_TDSEL, "tdsel", NULL, true),
+	PCONFDUMP(MTK_PIN_CONFIG_RDSEL, "rdsel", NULL, true),
+	PCONFDUMP(MTK_PIN_CONFIG_PU_ADV, "pu-adv", NULL, true),
+	PCONFDUMP(MTK_PIN_CONFIG_PD_ADV, "pd-adv", NULL, true),
+	PCONFDUMP(MTK_PIN_CONFIG_DRV_ADV, "drive-strength-adv", NULL, true),
+};
+#endif
+
+static const char * const mtk_gpio_functions[] = {
+	"func0", "func1", "func2", "func3",
+	"func4", "func5", "func6", "func7",
+	"func8", "func9", "func10", "func11",
+	"func12", "func13", "func14", "func15",
+};
+
+static int mtk_pinmux_gpio_request_enable(struct pinctrl_dev *pctldev,
+					  struct pinctrl_gpio_range *range,
+					  unsigned int pin)
+{
+	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+	const struct mtk_pin_desc *desc;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
+
+	return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_MODE,
+				hw->soc->gpio_m);
+}
+
+static int mtk_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
+					 struct pinctrl_gpio_range *range,
+					 unsigned int pin, bool input)
+{
+	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+	const struct mtk_pin_desc *desc;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
+
+	/* hardware would take 0 as input direction */
+	return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR, !input);
+}
+
+static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
+			   unsigned int pin, unsigned long *config)
+{
+	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+	u32 param = pinconf_to_config_param(*config);
+	int val, val2, err, reg, ret = 1;
+	const struct mtk_pin_desc *desc;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+		if (hw->soc->bias_disable_get) {
+			err = hw->soc->bias_disable_get(hw, desc, &ret);
+			if (err)
+				return err;
+		} else {
+			return -ENOTSUPP;
+		}
+		break;
+	case PIN_CONFIG_BIAS_PULL_UP:
+		if (hw->soc->bias_get) {
+			err = hw->soc->bias_get(hw, desc, 1, &ret);
+			if (err)
+				return err;
+		} else {
+			return -ENOTSUPP;
+		}
+		break;
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		if (hw->soc->bias_get) {
+			err = hw->soc->bias_get(hw, desc, 0, &ret);
+			if (err)
+				return err;
+		} else {
+			return -ENOTSUPP;
+		}
+		break;
+	case PIN_CONFIG_SLEW_RATE:
+		err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SR, &val);
+		if (err)
+			return err;
+
+		if (!val)
+			return -EINVAL;
+
+		break;
+	case PIN_CONFIG_INPUT_ENABLE:
+	case PIN_CONFIG_OUTPUT_ENABLE:
+		err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &val);
+		if (err)
+			return err;
+
+		/* HW takes input mode as zero; output mode as non-zero */
+		if ((val && param == PIN_CONFIG_INPUT_ENABLE) ||
+		    (!val && param == PIN_CONFIG_OUTPUT_ENABLE))
+			return -EINVAL;
+
+		break;
+	case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+		err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &val);
+		if (err)
+			return err;
+
+		err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SMT, &val2);
+		if (err)
+			return err;
+
+		if (val || !val2)
+			return -EINVAL;
+
+		break;
+	case PIN_CONFIG_DRIVE_STRENGTH:
+		if (hw->soc->drive_get) {
+			err = hw->soc->drive_get(hw, desc, &ret);
+			if (err)
+				return err;
+		} else {
+			err = -ENOTSUPP;
+		}
+		break;
+	case MTK_PIN_CONFIG_TDSEL:
+	case MTK_PIN_CONFIG_RDSEL:
+		reg = (param == MTK_PIN_CONFIG_TDSEL) ?
+		       PINCTRL_PIN_REG_TDSEL : PINCTRL_PIN_REG_RDSEL;
+
+		err = mtk_hw_get_value(hw, desc, reg, &val);
+		if (err)
+			return err;
+
+		ret = val;
+
+		break;
+	case MTK_PIN_CONFIG_PU_ADV:
+	case MTK_PIN_CONFIG_PD_ADV:
+		if (hw->soc->adv_pull_get) {
+			bool pullup;
+
+			pullup = param == MTK_PIN_CONFIG_PU_ADV;
+			err = hw->soc->adv_pull_get(hw, desc, pullup, &ret);
+			if (err)
+				return err;
+		} else {
+			return -ENOTSUPP;
+		}
+		break;
+	case MTK_PIN_CONFIG_DRV_ADV:
+		if (hw->soc->adv_drive_get) {
+			err = hw->soc->adv_drive_get(hw, desc, &ret);
+			if (err)
+				return err;
+		} else {
+			return -ENOTSUPP;
+		}
+		break;
+	default:
+		return -ENOTSUPP;
+	}
+
+	*config = pinconf_to_config_packed(param, ret);
+
+	return 0;
+}
+
+static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+			   enum pin_config_param param,
+			   enum pin_config_param arg)
+{
+	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+	const struct mtk_pin_desc *desc;
+	int err = 0;
+	u32 reg;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
+
+	switch ((u32)param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+		if (hw->soc->bias_disable_set) {
+			err = hw->soc->bias_disable_set(hw, desc);
+			if (err)
+				return err;
+		} else {
+			return -ENOTSUPP;
+		}
+		break;
+	case PIN_CONFIG_BIAS_PULL_UP:
+		if (hw->soc->bias_set) {
+			err = hw->soc->bias_set(hw, desc, 1);
+			if (err)
+				return err;
+		} else {
+			return -ENOTSUPP;
+		}
+		break;
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		if (hw->soc->bias_set) {
+			err = hw->soc->bias_set(hw, desc, 0);
+			if (err)
+				return err;
+		} else {
+			return -ENOTSUPP;
+		}
+		break;
+	case PIN_CONFIG_OUTPUT_ENABLE:
+		err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
+				       MTK_DISABLE);
+		if (err)
+			goto err;
+
+		err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+				       MTK_OUTPUT);
+		if (err)
+			goto err;
+		break;
+	case PIN_CONFIG_INPUT_ENABLE:
+		if (hw->soc->ies_present) {
+			mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_IES,
+					 MTK_ENABLE);
+		}
+
+		err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+				       MTK_INPUT);
+		if (err)
+			goto err;
+		break;
+	case PIN_CONFIG_SLEW_RATE:
+		err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SR,
+				       arg);
+		if (err)
+			goto err;
+
+		break;
+	case PIN_CONFIG_OUTPUT:
+		err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+				       MTK_OUTPUT);
+		if (err)
+			goto err;
+
+		err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO,
+				       arg);
+		if (err)
+			goto err;
+		break;
+	case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+		/* arg = 1: Input mode & SMT enable ;
+		 * arg = 0: Output mode & SMT disable
+		 */
+		arg = arg ? 2 : 1;
+		err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+				       arg & 1);
+		if (err)
+			goto err;
+
+		err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
+				       !!(arg & 2));
+		if (err)
+			goto err;
+		break;
+	case PIN_CONFIG_DRIVE_STRENGTH:
+		if (hw->soc->drive_set) {
+			err = hw->soc->drive_set(hw, desc, arg);
+			if (err)
+				return err;
+		} else {
+			return -ENOTSUPP;
+		}
+		break;
+	case MTK_PIN_CONFIG_TDSEL:
+	case MTK_PIN_CONFIG_RDSEL:
+		reg = (param == MTK_PIN_CONFIG_TDSEL) ?
+		       PINCTRL_PIN_REG_TDSEL : PINCTRL_PIN_REG_RDSEL;
+
+		err = mtk_hw_set_value(hw, desc, reg, arg);
+		if (err)
+			goto err;
+		break;
+	case MTK_PIN_CONFIG_PU_ADV:
+	case MTK_PIN_CONFIG_PD_ADV:
+		if (hw->soc->adv_pull_set) {
+			bool pullup;
+
+			pullup = param == MTK_PIN_CONFIG_PU_ADV;
+			err = hw->soc->adv_pull_set(hw, desc, pullup,
+						    arg);
+			if (err)
+				return err;
+		} else {
+			return -ENOTSUPP;
+		}
+		break;
+	case MTK_PIN_CONFIG_DRV_ADV:
+		if (hw->soc->adv_drive_set) {
+			err = hw->soc->adv_drive_set(hw, desc, arg);
+			if (err)
+				return err;
+		} else {
+			return -ENOTSUPP;
+		}
+		break;
+	default:
+		err = -ENOTSUPP;
+	}
+
+err:
+	return err;
+}
+
+static struct mtk_pinctrl_group *
+mtk_pctrl_find_group_by_pin(struct mtk_pinctrl *hw, u32 pin)
+{
+	int i;
+
+	for (i = 0; i < hw->soc->ngrps; i++) {
+		struct mtk_pinctrl_group *grp = hw->groups + i;
+
+		if (grp->pin == pin)
+			return grp;
+	}
+
+	return NULL;
+}
+
+static const struct mtk_func_desc *
+mtk_pctrl_find_function_by_pin(struct mtk_pinctrl *hw, u32 pin_num, u32 fnum)
+{
+	const struct mtk_pin_desc *pin = hw->soc->pins + pin_num;
+	const struct mtk_func_desc *func = pin->funcs;
+
+	while (func && func->name) {
+		if (func->muxval == fnum)
+			return func;
+		func++;
+	}
+
+	return NULL;
+}
+
+static bool mtk_pctrl_is_function_valid(struct mtk_pinctrl *hw, u32 pin_num,
+					u32 fnum)
+{
+	int i;
+
+	for (i = 0; i < hw->soc->npins; i++) {
+		const struct mtk_pin_desc *pin = hw->soc->pins + i;
+
+		if (pin->number == pin_num) {
+			const struct mtk_func_desc *func = pin->funcs;
+
+			while (func && func->name) {
+				if (func->muxval == fnum)
+					return true;
+				func++;
+			}
+
+			break;
+		}
+	}
+
+	return false;
+}
+
+static int mtk_pctrl_dt_node_to_map_func(struct mtk_pinctrl *pctl,
+					 u32 pin, u32 fnum,
+					 struct mtk_pinctrl_group *grp,
+					 struct pinctrl_map **map,
+					 unsigned *reserved_maps,
+					 unsigned *num_maps)
+{
+	bool ret;
+
+	if (*num_maps == *reserved_maps)
+		return -ENOSPC;
+
+	(*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
+	(*map)[*num_maps].data.mux.group = grp->name;
+
+	ret = mtk_pctrl_is_function_valid(pctl, pin, fnum);
+	if (!ret) {
+		dev_err(pctl->dev, "invalid function %d on pin %d .\n",
+			fnum, pin);
+		return -EINVAL;
+	}
+
+	(*map)[*num_maps].data.mux.function = mtk_gpio_functions[fnum];
+	(*num_maps)++;
+
+	return 0;
+}
+
+static int mtk_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+				       struct device_node *node,
+				       struct pinctrl_map **map,
+				       unsigned *reserved_maps,
+				       unsigned *num_maps)
+{
+	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+	int num_pins, num_funcs, maps_per_pin, i, err;
+	struct mtk_pinctrl_group *grp;
+	unsigned int num_configs;
+	bool has_config = false;
+	unsigned long *configs;
+	u32 pinfunc, pin, func;
+	struct property *pins;
+	unsigned reserve = 0;
+
+	pins = of_find_property(node, "pinmux", NULL);
+	if (!pins) {
+		dev_err(hw->dev, "missing pins property in node %pOFn .\n",
+			node);
+		return -EINVAL;
+	}
+
+	err = pinconf_generic_parse_dt_config(node, pctldev, &configs,
+					      &num_configs);
+	if (err)
+		return err;
+
+	if (num_configs)
+		has_config = true;
+
+	num_pins = pins->length / sizeof(u32);
+	num_funcs = num_pins;
+	maps_per_pin = 0;
+	if (num_funcs)
+		maps_per_pin++;
+	if (has_config && num_pins >= 1)
+		maps_per_pin++;
+
+	if (!num_pins || !maps_per_pin) {
+		err = -EINVAL;
+		goto exit;
+	}
+
+	reserve = num_pins * maps_per_pin;
+
+	err = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps,
+					reserve);
+	if (err < 0)
+		goto exit;
+
+	for (i = 0; i < num_pins; i++) {
+		err = of_property_read_u32_index(node, "pinmux", i, &pinfunc);
+		if (err)
+			goto exit;
+
+		pin = MTK_GET_PIN_NO(pinfunc);
+		func = MTK_GET_PIN_FUNC(pinfunc);
+
+		if (pin >= hw->soc->npins ||
+		    func >= ARRAY_SIZE(mtk_gpio_functions)) {
+			dev_err(hw->dev, "invalid pins value.\n");
+			err = -EINVAL;
+			goto exit;
+		}
+
+		grp = mtk_pctrl_find_group_by_pin(hw, pin);
+		if (!grp) {
+			dev_err(hw->dev, "unable to match pin %d to group\n",
+				pin);
+			err = -EINVAL;
+			goto exit;
+		}
+
+		err = mtk_pctrl_dt_node_to_map_func(hw, pin, func, grp, map,
+						    reserved_maps, num_maps);
+		if (err < 0)
+			goto exit;
+
+		if (has_config) {
+			err = pinctrl_utils_add_map_configs(pctldev, map,
+							    reserved_maps,
+							    num_maps,
+							    grp->name,
+							    configs,
+							    num_configs,
+							    PIN_MAP_TYPE_CONFIGS_GROUP);
+			if (err < 0)
+				goto exit;
+		}
+	}
+
+	err = 0;
+
+exit:
+	kfree(configs);
+	return err;
+}
+
+static int mtk_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+				    struct device_node *np_config,
+				    struct pinctrl_map **map,
+				    unsigned *num_maps)
+{
+	struct device_node *np;
+	unsigned reserved_maps;
+	int ret;
+
+	*map = NULL;
+	*num_maps = 0;
+	reserved_maps = 0;
+
+	for_each_child_of_node(np_config, np) {
+		ret = mtk_pctrl_dt_subnode_to_map(pctldev, np, map,
+						  &reserved_maps,
+						  num_maps);
+		if (ret < 0) {
+			pinctrl_utils_free_map(pctldev, *map, *num_maps);
+			of_node_put(np);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int mtk_pctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+
+	return hw->soc->ngrps;
+}
+
+static const char *mtk_pctrl_get_group_name(struct pinctrl_dev *pctldev,
+					    unsigned group)
+{
+	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+
+	return hw->groups[group].name;
+}
+
+static int mtk_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
+				    unsigned group, const unsigned **pins,
+				    unsigned *num_pins)
+{
+	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+
+	*pins = (unsigned *)&hw->groups[group].pin;
+	*num_pins = 1;
+
+	return 0;
+}
+
+static const struct pinctrl_ops mtk_pctlops = {
+	.dt_node_to_map		= mtk_pctrl_dt_node_to_map,
+	.dt_free_map		= pinctrl_utils_free_map,
+	.get_groups_count	= mtk_pctrl_get_groups_count,
+	.get_group_name		= mtk_pctrl_get_group_name,
+	.get_group_pins		= mtk_pctrl_get_group_pins,
+};
+
+static int mtk_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
+{
+	return ARRAY_SIZE(mtk_gpio_functions);
+}
+
+static const char *mtk_pmx_get_func_name(struct pinctrl_dev *pctldev,
+					 unsigned selector)
+{
+	return mtk_gpio_functions[selector];
+}
+
+static int mtk_pmx_get_func_groups(struct pinctrl_dev *pctldev,
+				   unsigned function,
+				   const char * const **groups,
+				   unsigned * const num_groups)
+{
+	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups = hw->grp_names;
+	*num_groups = hw->soc->ngrps;
+
+	return 0;
+}
+
+static int mtk_pmx_set_mux(struct pinctrl_dev *pctldev,
+			   unsigned function,
+			   unsigned group)
+{
+	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+	struct mtk_pinctrl_group *grp = hw->groups + group;
+	const struct mtk_func_desc *desc_func;
+	const struct mtk_pin_desc *desc;
+	bool ret;
+
+	ret = mtk_pctrl_is_function_valid(hw, grp->pin, function);
+	if (!ret) {
+		dev_err(hw->dev, "invalid function %d on group %d .\n",
+			function, group);
+		return -EINVAL;
+	}
+
+	desc_func = mtk_pctrl_find_function_by_pin(hw, grp->pin, function);
+	if (!desc_func)
+		return -EINVAL;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[grp->pin];
+	mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_MODE, desc_func->muxval);
+
+	return 0;
+}
+
+static const struct pinmux_ops mtk_pmxops = {
+	.get_functions_count	= mtk_pmx_get_funcs_cnt,
+	.get_function_name	= mtk_pmx_get_func_name,
+	.get_function_groups	= mtk_pmx_get_func_groups,
+	.set_mux		= mtk_pmx_set_mux,
+	.gpio_set_direction	= mtk_pinmux_gpio_set_direction,
+	.gpio_request_enable	= mtk_pinmux_gpio_request_enable,
+};
+
+static int mtk_pconf_group_get(struct pinctrl_dev *pctldev, unsigned group,
+			       unsigned long *config)
+{
+	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+
+	*config = hw->groups[group].config;
+
+	return 0;
+}
+
+static int mtk_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group,
+			       unsigned long *configs, unsigned num_configs)
+{
+	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+	struct mtk_pinctrl_group *grp = &hw->groups[group];
+	int i, ret;
+
+	for (i = 0; i < num_configs; i++) {
+		ret = mtk_pinconf_set(pctldev, grp->pin,
+				      pinconf_to_config_param(configs[i]),
+				      pinconf_to_config_argument(configs[i]));
+		if (ret < 0)
+			return ret;
+
+		grp->config = configs[i];
+	}
+
+	return 0;
+}
+
+static const struct pinconf_ops mtk_confops = {
+	.pin_config_get = mtk_pinconf_get,
+	.pin_config_group_get	= mtk_pconf_group_get,
+	.pin_config_group_set	= mtk_pconf_group_set,
+};
+
+static struct pinctrl_desc mtk_desc = {
+	.name = PINCTRL_PINCTRL_DEV,
+	.pctlops = &mtk_pctlops,
+	.pmxops = &mtk_pmxops,
+	.confops = &mtk_confops,
+	.owner = THIS_MODULE,
+};
+
+static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
+{
+	struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+	const struct mtk_pin_desc *desc;
+	int value, err;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
+
+	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &value);
+	if (err)
+		return err;
+
+	return !value;
+}
+
+static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
+{
+	struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+	const struct mtk_pin_desc *desc;
+	int value, err;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
+
+	err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DI, &value);
+	if (err)
+		return err;
+
+	return !!value;
+}
+
+static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
+{
+	struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+	const struct mtk_pin_desc *desc;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
+
+	mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value);
+}
+
+static int mtk_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
+{
+	return pinctrl_gpio_direction_input(chip->base + gpio);
+}
+
+static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
+				     int value)
+{
+	mtk_gpio_set(chip, gpio, value);
+
+	return pinctrl_gpio_direction_output(chip->base + gpio);
+}
+
+static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
+{
+	struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+	const struct mtk_pin_desc *desc;
+
+	if (!hw->eint)
+		return -ENOTSUPP;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[offset];
+
+	if (desc->eint.eint_n == EINT_NA)
+		return -ENOTSUPP;
+
+	return mtk_eint_find_irq(hw->eint, desc->eint.eint_n);
+}
+
+static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+			       unsigned long config)
+{
+	struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+	const struct mtk_pin_desc *desc;
+	u32 debounce;
+
+	desc = (const struct mtk_pin_desc *)&hw->soc->pins[offset];
+
+	if (!hw->eint ||
+	    pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE ||
+	    desc->eint.eint_n == EINT_NA)
+		return -ENOTSUPP;
+
+	debounce = pinconf_to_config_argument(config);
+
+	return mtk_eint_set_debounce(hw->eint, desc->eint.eint_n, debounce);
+}
+
+static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
+{
+	struct gpio_chip *chip = &hw->chip;
+	int ret;
+
+	chip->label		= PINCTRL_PINCTRL_DEV;
+	chip->parent		= hw->dev;
+	chip->request		= gpiochip_generic_request;
+	chip->free		= gpiochip_generic_free;
+	chip->get_direction	= mtk_gpio_get_direction;
+	chip->direction_input	= mtk_gpio_direction_input;
+	chip->direction_output	= mtk_gpio_direction_output;
+	chip->get		= mtk_gpio_get;
+	chip->set		= mtk_gpio_set;
+	chip->to_irq		= mtk_gpio_to_irq,
+	chip->set_config	= mtk_gpio_set_config,
+	chip->base		= -1;
+	chip->ngpio		= hw->soc->npins;
+	chip->of_node		= np;
+	chip->of_gpio_n_cells	= 2;
+
+	ret = gpiochip_add_data(chip, hw);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int mtk_pctrl_build_state(struct platform_device *pdev)
+{
+	struct mtk_pinctrl *hw = platform_get_drvdata(pdev);
+	int i;
+
+	/* Allocate groups */
+	hw->groups = devm_kmalloc_array(&pdev->dev, hw->soc->ngrps,
+					sizeof(*hw->groups), GFP_KERNEL);
+	if (!hw->groups)
+		return -ENOMEM;
+
+	/* We assume that one pin is one group, use pin name as group name. */
+	hw->grp_names = devm_kmalloc_array(&pdev->dev, hw->soc->ngrps,
+					   sizeof(*hw->grp_names), GFP_KERNEL);
+	if (!hw->grp_names)
+		return -ENOMEM;
+
+	for (i = 0; i < hw->soc->npins; i++) {
+		const struct mtk_pin_desc *pin = hw->soc->pins + i;
+		struct mtk_pinctrl_group *group = hw->groups + i;
+
+		group->name = pin->name;
+		group->pin = pin->number;
+
+		hw->grp_names[i] = pin->name;
+	}
+
+	return 0;
+}
+
+int mtk_paris_pinctrl_probe(struct platform_device *pdev,
+			    const struct mtk_pin_soc *soc)
+{
+	struct pinctrl_pin_desc *pins;
+	struct mtk_pinctrl *hw;
+	struct resource *res;
+	int err, i;
+
+	hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
+	if (!hw)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, hw);
+	hw->soc = soc;
+	hw->dev = &pdev->dev;
+
+	if (!hw->soc->nbase_names) {
+		dev_err(&pdev->dev,
+			"SoC should be assigned at least one register base\n");
+		return -EINVAL;
+	}
+
+	hw->base = devm_kmalloc_array(&pdev->dev, hw->soc->nbase_names,
+				      sizeof(*hw->base), GFP_KERNEL);
+	if (IS_ERR(hw->base))
+		return PTR_ERR(hw->base);
+
+	for (i = 0; i < hw->soc->nbase_names; i++) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						   hw->soc->base_names[i]);
+		if (!res) {
+			dev_err(&pdev->dev, "missing IO resource\n");
+			return -ENXIO;
+		}
+
+		hw->base[i] = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(hw->base[i]))
+			return PTR_ERR(hw->base[i]);
+	}
+
+	hw->nbase = hw->soc->nbase_names;
+
+	err = mtk_pctrl_build_state(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "build state failed: %d\n", err);
+		return -EINVAL;
+	}
+
+	/* Copy from internal struct mtk_pin_desc to register to the core */
+	pins = devm_kmalloc_array(&pdev->dev, hw->soc->npins, sizeof(*pins),
+				  GFP_KERNEL);
+	if (IS_ERR(pins))
+		return PTR_ERR(pins);
+
+	for (i = 0; i < hw->soc->npins; i++) {
+		pins[i].number = hw->soc->pins[i].number;
+		pins[i].name = hw->soc->pins[i].name;
+	}
+
+	/* Setup pins descriptions per SoC types */
+	mtk_desc.pins = (const struct pinctrl_pin_desc *)pins;
+	mtk_desc.npins = hw->soc->npins;
+	mtk_desc.num_custom_params = ARRAY_SIZE(mtk_custom_bindings);
+	mtk_desc.custom_params = mtk_custom_bindings;
+#ifdef CONFIG_DEBUG_FS
+	mtk_desc.custom_conf_items = mtk_conf_items;
+#endif
+
+	err = devm_pinctrl_register_and_init(&pdev->dev, &mtk_desc, hw,
+					     &hw->pctrl);
+	if (err)
+		return err;
+
+	err = pinctrl_enable(hw->pctrl);
+	if (err)
+		return err;
+
+	err = mtk_build_eint(hw, pdev);
+	if (err)
+		dev_warn(&pdev->dev,
+			 "Failed to add EINT, but pinctrl still can work\n");
+
+	/* Build gpiochip should be after pinctrl_enable is done */
+	err = mtk_build_gpiochip(hw, pdev->dev.of_node);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to add gpio_chip\n");
+		return err;
+	}
+
+	platform_set_drvdata(pdev, hw);
+
+	return 0;
+}
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.h b/drivers/pinctrl/mediatek/pinctrl-paris.h
new file mode 100644
index 0000000..37146ca
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *	   Zhiyong Tao <zhiyong.tao@mediatek.com>
+ *	   Hongzhou.Yang <hongzhou.yang@mediatek.com>
+ */
+#ifndef __PINCTRL_PARIS_H
+#define __PINCTRL_PARIS_H
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#include "../core.h"
+#include "../pinconf.h"
+#include "../pinctrl-utils.h"
+#include "../pinmux.h"
+#include "mtk-eint.h"
+#include "pinctrl-mtk-common-v2.h"
+
+#define MTK_RANGE(_a)		{ .range = (_a), .nranges = ARRAY_SIZE(_a), }
+
+#define MTK_EINT_FUNCTION(_eintmux, _eintnum)				\
+	{							\
+		.eint_m = _eintmux,					\
+		.eint_n = _eintnum,					\
+	}
+
+#define MTK_FUNCTION(_val, _name)				\
+	{							\
+		.muxval = _val,					\
+		.name = _name,					\
+	}
+
+#define MTK_PIN(_number, _name, _eint, _drv_n, ...) {	\
+		.number = _number,			\
+		.name = _name,				\
+		.eint = _eint,				\
+		.drv_n = _drv_n,			\
+		.funcs = (struct mtk_func_desc[]){	\
+			__VA_ARGS__, { } },				\
+	}
+
+#define PINCTRL_PIN_GROUP(name, id)			\
+	{						\
+		name,					\
+		id##_pins,				\
+		ARRAY_SIZE(id##_pins),			\
+		id##_funcs,				\
+	}
+
+int mtk_paris_pinctrl_probe(struct platform_device *pdev,
+			    const struct mtk_pin_soc *soc);
+
+#endif /* __PINCTRL_PARIS_H */
diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c
index f40b1c19..de7fdea 100644
--- a/drivers/platform/x86/intel_cht_int33fe.c
+++ b/drivers/platform/x86/intel_cht_int33fe.c
@@ -35,7 +35,7 @@
 	struct i2c_client *fusb302;
 	struct i2c_client *pi3usb30532;
 	/* Contain a list-head must be per device */
-	struct device_connection connections[5];
+	struct device_connection connections[4];
 };
 
 /*
@@ -177,16 +177,13 @@
 
 	data->connections[0].endpoint[0] = "port0";
 	data->connections[0].endpoint[1] = "i2c-pi3usb30532";
-	data->connections[0].id = "typec-switch";
+	data->connections[0].id = "orientation-switch";
 	data->connections[1].endpoint[0] = "port0";
 	data->connections[1].endpoint[1] = "i2c-pi3usb30532";
-	data->connections[1].id = "typec-mux";
-	data->connections[2].endpoint[0] = "port0";
-	data->connections[2].endpoint[1] = "i2c-pi3usb30532";
-	data->connections[2].id = "idff01m01";
-	data->connections[3].endpoint[0] = "i2c-fusb302";
-	data->connections[3].endpoint[1] = "intel_xhci_usb_sw-role-switch";
-	data->connections[3].id = "usb-role-switch";
+	data->connections[1].id = "mode-switch";
+	data->connections[2].endpoint[0] = "i2c-fusb302";
+	data->connections[2].endpoint[1] = "intel_xhci_usb_sw-role-switch";
+	data->connections[2].id = "usb-role-switch";
 
 	device_connections_add(data->connections);
 
diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
index a67eeac..98cd024 100644
--- a/drivers/power/avs/Kconfig
+++ b/drivers/power/avs/Kconfig
@@ -18,3 +18,14 @@
           Say y here to enable support io domains on Rockchip SoCs. It is
           necessary for the io domain setting of the SoC to match the
           voltage supplied by the regulators.
+
+config MTK_SVS
+	bool "MediaTek Smart Voltage Scaling(SVS)"
+	depends on POWER_AVS && MTK_EFUSE
+	depends on THERMAL || THERMAL=n
+	help
+	  The SVS engine is a piece of hardware which is used to calculate
+	  optimized voltage values of several power domains, e.g.
+	  CPU clusters/GPU/CCI, according to chip process corner, temperatures,
+	  and other factors. Then DVFS driver could apply those optimized voltage
+	  values to reduce power consumption.
diff --git a/drivers/power/avs/Makefile b/drivers/power/avs/Makefile
index ba4c7bc..b487701 100644
--- a/drivers/power/avs/Makefile
+++ b/drivers/power/avs/Makefile
@@ -1,2 +1,3 @@
 obj-$(CONFIG_POWER_AVS_OMAP)		+= smartreflex.o
 obj-$(CONFIG_ROCKCHIP_IODOMAIN)		+= rockchip-io-domain.o
+obj-$(CONFIG_MTK_SVS)			+= mtk_svs.o
diff --git a/drivers/power/avs/mtk_svs.c b/drivers/power/avs/mtk_svs.c
new file mode 100644
index 0000000..aae9a38
--- /dev/null
+++ b/drivers/power/avs/mtk_svs.c
@@ -0,0 +1,2077 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ */
+
+#define pr_fmt(fmt)	"[mtk_svs] " fmt
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+#include <linux/power/mtk_svs.h>
+#include <linux/proc_fs.h>
+#include <linux/regulator/consumer.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/thermal.h>
+#include <linux/uaccess.h>
+
+#define SVS_INIT01_VOLT_IGNORE		1
+#define SVS_INIT01_VOLT_INC_ONLY	2
+
+#define SVS_PHASE_INIT01		0
+#define SVS_PHASE_INIT02		1
+#define SVS_PHASE_MON			2
+#define SVS_PHASE_ERROR			3
+
+#define SVS_CPU_LITTLE			1
+#define SVS_CPU_BIG			2
+#define SVS_CCI				3
+#define SVS_GPU				4
+
+#define proc_fops_rw(name) \
+	static int name ## _proc_open(struct inode *inode,	\
+		struct file *file)				\
+	{							\
+		return single_open(file, name ## _proc_show,	\
+			PDE_DATA(inode));			\
+	}							\
+	static const struct file_operations name ## _proc_fops = {	\
+		.owner          = THIS_MODULE,				\
+		.open           = name ## _proc_open,			\
+		.read           = seq_read,				\
+		.llseek         = seq_lseek,				\
+		.release        = single_release,			\
+		.write          = name ## _proc_write,			\
+	}
+
+#define proc_fops_ro(name) \
+	static int name ## _proc_open(struct inode *inode,	\
+		struct file *file)				\
+	{							\
+		return single_open(file, name ## _proc_show,	\
+			PDE_DATA(inode));			\
+	}							\
+	static const struct file_operations name ## _proc_fops = {	\
+		.owner          = THIS_MODULE,				\
+		.open           = name ## _proc_open,			\
+		.read           = seq_read,				\
+		.llseek         = seq_lseek,				\
+		.release        = single_release,			\
+	}
+
+#define proc_entry(name)	{__stringify(name), &name ## _proc_fops}
+
+static DEFINE_SPINLOCK(mtk_svs_lock);
+struct mtk_svs;
+
+enum reg_index {
+	TEMPMONCTL0 = 0,
+	TEMPMONCTL1,
+	TEMPMONCTL2,
+	TEMPMONINT,
+	TEMPMONINTSTS,
+	TEMPMONIDET0,
+	TEMPMONIDET1,
+	TEMPMONIDET2,
+	TEMPH2NTHRE,
+	TEMPHTHRE,
+	TEMPCTHRE,
+	TEMPOFFSETH,
+	TEMPOFFSETL,
+	TEMPMSRCTL0,
+	TEMPMSRCTL1,
+	TEMPAHBPOLL,
+	TEMPAHBTO,
+	TEMPADCPNP0,
+	TEMPADCPNP1,
+	TEMPADCPNP2,
+	TEMPADCMUX,
+	TEMPADCEXT,
+	TEMPADCEXT1,
+	TEMPADCEN,
+	TEMPPNPMUXADDR,
+	TEMPADCMUXADDR,
+	TEMPADCEXTADDR,
+	TEMPADCEXT1ADDR,
+	TEMPADCENADDR,
+	TEMPADCVALIDADDR,
+	TEMPADCVOLTADDR,
+	TEMPRDCTRL,
+	TEMPADCVALIDMASK,
+	TEMPADCVOLTAGESHIFT,
+	TEMPADCWRITECTRL,
+	TEMPMSR0,
+	TEMPMSR1,
+	TEMPMSR2,
+	TEMPADCHADDR,
+	TEMPIMMD0,
+	TEMPIMMD1,
+	TEMPIMMD2,
+	TEMPMONIDET3,
+	TEMPADCPNP3,
+	TEMPMSR3,
+	TEMPIMMD3,
+	TEMPPROTCTL,
+	TEMPPROTTA,
+	TEMPPROTTB,
+	TEMPPROTTC,
+	TEMPSPARE0,
+	TEMPSPARE1,
+	TEMPSPARE2,
+	TEMPSPARE3,
+	TEMPMSR0_1,
+	TEMPMSR1_1,
+	TEMPMSR2_1,
+	TEMPMSR3_1,
+	DESCHAR,
+	TEMPCHAR,
+	DETCHAR,
+	AGECHAR,
+	DCCONFIG,
+	AGECONFIG,
+	FREQPCT30,
+	FREQPCT74,
+	LIMITVALS,
+	VBOOT,
+	DETWINDOW,
+	CONFIG,
+	TSCALCS,
+	RUNCONFIG,
+	SVSEN,
+	INIT2VALS,
+	DCVALUES,
+	AGEVALUES,
+	VOP30,
+	VOP74,
+	TEMP,
+	INTSTS,
+	INTSTSRAW,
+	INTEN,
+	CHKINT,
+	CHKSHIFT,
+	STATUS,
+	VDESIGN30,
+	VDESIGN74,
+	DVT30,
+	DVT74,
+	AGECOUNT,
+	SMSTATE0,
+	SMSTATE1,
+	CTL0,
+	DESDETSEC,
+	TEMPAGESEC,
+	CTRLSPARE0,
+	CTRLSPARE1,
+	CTRLSPARE2,
+	CTRLSPARE3,
+	CORESEL,
+	THERMINTST,
+	INTST,
+	THSTAGE0ST,
+	THSTAGE1ST,
+	THSTAGE2ST,
+	THAHBST0,
+	THAHBST1,
+	SPARE0,
+	SPARE1,
+	SPARE2,
+	SPARE3,
+	THSLPEVEB,
+	reg_num,
+};
+
+static const u32 svs_regs_v2[] = {
+	[TEMPMONCTL0]		= 0x000,
+	[TEMPMONCTL1]		= 0x004,
+	[TEMPMONCTL2]		= 0x008,
+	[TEMPMONINT]		= 0x00c,
+	[TEMPMONINTSTS]		= 0x010,
+	[TEMPMONIDET0]		= 0x014,
+	[TEMPMONIDET1]		= 0x018,
+	[TEMPMONIDET2]		= 0x01c,
+	[TEMPH2NTHRE]		= 0x024,
+	[TEMPHTHRE]		= 0x028,
+	[TEMPCTHRE]		= 0x02c,
+	[TEMPOFFSETH]		= 0x030,
+	[TEMPOFFSETL]		= 0x034,
+	[TEMPMSRCTL0]		= 0x038,
+	[TEMPMSRCTL1]		= 0x03c,
+	[TEMPAHBPOLL]		= 0x040,
+	[TEMPAHBTO]		= 0x044,
+	[TEMPADCPNP0]		= 0x048,
+	[TEMPADCPNP1]		= 0x04c,
+	[TEMPADCPNP2]		= 0x050,
+	[TEMPADCMUX]		= 0x054,
+	[TEMPADCEXT]		= 0x058,
+	[TEMPADCEXT1]		= 0x05c,
+	[TEMPADCEN]		= 0x060,
+	[TEMPPNPMUXADDR]	= 0x064,
+	[TEMPADCMUXADDR]	= 0x068,
+	[TEMPADCEXTADDR]	= 0x06c,
+	[TEMPADCEXT1ADDR]	= 0x070,
+	[TEMPADCENADDR]		= 0x074,
+	[TEMPADCVALIDADDR]	= 0x078,
+	[TEMPADCVOLTADDR]	= 0x07c,
+	[TEMPRDCTRL]		= 0x080,
+	[TEMPADCVALIDMASK]	= 0x084,
+	[TEMPADCVOLTAGESHIFT]	= 0x088,
+	[TEMPADCWRITECTRL]	= 0x08c,
+	[TEMPMSR0]		= 0x090,
+	[TEMPMSR1]		= 0x094,
+	[TEMPMSR2]		= 0x098,
+	[TEMPADCHADDR]		= 0x09c,
+	[TEMPIMMD0]		= 0x0a0,
+	[TEMPIMMD1]		= 0x0a4,
+	[TEMPIMMD2]		= 0x0a8,
+	[TEMPMONIDET3]		= 0x0b0,
+	[TEMPADCPNP3]		= 0x0b4,
+	[TEMPMSR3]		= 0x0b8,
+	[TEMPIMMD3]		= 0x0bc,
+	[TEMPPROTCTL]		= 0x0c0,
+	[TEMPPROTTA]		= 0x0c4,
+	[TEMPPROTTB]		= 0x0c8,
+	[TEMPPROTTC]		= 0x0cc,
+	[TEMPSPARE0]		= 0x0f0,
+	[TEMPSPARE1]		= 0x0f4,
+	[TEMPSPARE2]		= 0x0f8,
+	[TEMPSPARE3]		= 0x0fc,
+	[TEMPMSR0_1]		= 0x190,
+	[TEMPMSR1_1]		= 0x194,
+	[TEMPMSR2_1]		= 0x198,
+	[TEMPMSR3_1]		= 0x1b8,
+	[DESCHAR]		= 0xc00,
+	[TEMPCHAR]		= 0xc04,
+	[DETCHAR]		= 0xc08,
+	[AGECHAR]		= 0xc0c,
+	[DCCONFIG]		= 0xc10,
+	[AGECONFIG]		= 0xc14,
+	[FREQPCT30]		= 0xc18,
+	[FREQPCT74]		= 0xc1c,
+	[LIMITVALS]		= 0xc20,
+	[VBOOT]			= 0xc24,
+	[DETWINDOW]		= 0xc28,
+	[CONFIG]		= 0xc2c,
+	[TSCALCS]		= 0xc30,
+	[RUNCONFIG]		= 0xc34,
+	[SVSEN]			= 0xc38,
+	[INIT2VALS]		= 0xc3c,
+	[DCVALUES]		= 0xc40,
+	[AGEVALUES]		= 0xc44,
+	[VOP30]			= 0xc48,
+	[VOP74]			= 0xc4c,
+	[TEMP]			= 0xc50,
+	[INTSTS]		= 0xc54,
+	[INTSTSRAW]		= 0xc58,
+	[INTEN]			= 0xc5c,
+	[CHKINT]		= 0xc60,
+	[CHKSHIFT]		= 0xc64,
+	[STATUS]		= 0xc68,
+	[VDESIGN30]		= 0xc6c,
+	[VDESIGN74]		= 0xc70,
+	[DVT30]			= 0xc74,
+	[DVT74]			= 0xc78,
+	[AGECOUNT]		= 0xc7c,
+	[SMSTATE0]		= 0xc80,
+	[SMSTATE1]		= 0xc84,
+	[CTL0]			= 0xc88,
+	[DESDETSEC]		= 0xce0,
+	[TEMPAGESEC]		= 0xce4,
+	[CTRLSPARE0]		= 0xcf0,
+	[CTRLSPARE1]		= 0xcf4,
+	[CTRLSPARE2]		= 0xcf8,
+	[CTRLSPARE3]		= 0xcfc,
+	[CORESEL]		= 0xf00,
+	[THERMINTST]		= 0xf04,
+	[INTST]			= 0xf08,
+	[THSTAGE0ST]		= 0xf0c,
+	[THSTAGE1ST]		= 0xf10,
+	[THSTAGE2ST]		= 0xf14,
+	[THAHBST0]		= 0xf18,
+	[THAHBST1]		= 0xf1c,
+	[SPARE0]		= 0xf20,
+	[SPARE1]		= 0xf24,
+	[SPARE2]		= 0xf28,
+	[SPARE3]		= 0xf2c,
+	[THSLPEVEB]		= 0xf30,
+};
+
+struct thermal_parameter {
+	int adc_ge_t;
+	int adc_oe_t;
+	int ge;
+	int oe;
+	int gain;
+	int o_vtsabb;
+	int o_vtsmcu1;
+	int o_vtsmcu2;
+	int o_vtsmcu3;
+	int o_vtsmcu4;
+	int o_vtsmcu5;
+	int degc_cali;
+	int adc_cali_en_t;
+	int o_slope;
+	int o_slope_sign;
+	int ts_id;
+};
+
+struct svs_bank_ops {
+	void (*set_freqs_pct)(struct mtk_svs *svs);
+	void (*get_vops)(struct mtk_svs *svs);
+};
+
+struct svs_bank {
+	struct svs_bank_ops *ops;
+	struct completion init_completion;
+	struct device *dev;
+	struct regulator *buck;
+	struct mutex lock;	/* lock to protect update voltage process */
+	bool suspended;
+	bool mtcmos_request;
+	bool init01_support;
+	bool init02_support;
+	bool mon_mode_support;
+	s32 volt_offset;
+	u32 *opp_freqs;
+	u32 *freqs_pct;
+	u32 *opp_volts;
+	u32 *init02_volts;
+	u32 *volts;
+	u32 reg_data[3][reg_num];
+	u32 freq_base;
+	u32 vboot;
+	u32 volt_step;
+	u32 volt_base;
+	u32 init01_volt_flag;
+	u32 phase;
+	u32 vmax;
+	u32 vmin;
+	u32 bts;
+	u32 mts;
+	u32 bdes;
+	u32 mdes;
+	u32 mtdes;
+	u32 dcbdet;
+	u32 dcmdet;
+	u32 dthi;
+	u32 dtlo;
+	u32 det_window;
+	u32 det_max;
+	u32 age_config;
+	u32 age_voffset_in;
+	u32 agem;
+	u32 dc_config;
+	u32 dc_voffset_in;
+	u32 dvt_fixed;
+	u32 vco;
+	u32 chkshift;
+	u32 svs_temp;
+	u32 upper_temp_bound;
+	u32 lower_temp_bound;
+	u32 low_temp_threashold;
+	u32 low_temp_offset;
+	u32 coresel;
+	u32 opp_count;
+	u32 intst;
+	u32 systemclk_en;
+	u32 sw_id;
+	u32 hw_id;
+	u32 ctl0;
+	u8 *of_compatible;
+	u8 *name;
+	u8 *zone_name;
+	u8 *buck_name;
+};
+
+struct svs_platform {
+	struct svs_bank *banks;
+	int (*efuse_parsing)(struct mtk_svs *svs);
+	bool fake_efuse;
+	const u32 *regs;
+	u32 bank_num;
+	u32 efuse_num;
+	u32 efuse_check;
+	u32 thermal_efuse_num;
+	u8 *name;
+};
+
+struct mtk_svs {
+	const struct svs_platform *platform;
+	struct svs_bank *bank;
+	struct device *dev;
+	void __iomem *base;
+	struct clk *main_clk;
+	u32 *efuse;
+	u32 *thermal_efuse;
+};
+
+unsigned long claim_mtk_svs_lock(void)
+	__acquires(&mtk_svs_lock)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&mtk_svs_lock, flags);
+
+	return flags;
+}
+EXPORT_SYMBOL_GPL(claim_mtk_svs_lock);
+
+void release_mtk_svs_lock(unsigned long flags)
+	__releases(&mtk_svs_lock)
+{
+	spin_unlock_irqrestore(&mtk_svs_lock, flags);
+}
+EXPORT_SYMBOL_GPL(release_mtk_svs_lock);
+
+static u32 percent(u32 numerator, u32 denominator)
+{
+	u32 percent;
+
+	/* If not divide 1000, "numerator * 100" would be data overflow. */
+	numerator /= 1000;
+	denominator /= 1000;
+	percent = ((numerator * 100) + denominator - 1) / denominator;
+
+	return percent;
+}
+
+static u32 svs_readl(struct mtk_svs *svs, enum reg_index i)
+{
+	return readl(svs->base + svs->platform->regs[i]);
+}
+
+static void svs_writel(struct mtk_svs *svs, u32 val, enum reg_index i)
+{
+	writel(val, svs->base + svs->platform->regs[i]);
+}
+
+static void svs_switch_bank(struct mtk_svs *svs)
+{
+	struct svs_bank *svsb = svs->bank;
+
+	svs_writel(svs, svsb->coresel, CORESEL);
+}
+
+static u32 svs_volt_to_opp_volt(u32 svsb_volt,
+				u32 svsb_volt_step, u32 svsb_volt_base)
+{
+	u32 u_volt;
+
+	u_volt = (svsb_volt * svsb_volt_step) + svsb_volt_base;
+
+	return u_volt;
+}
+
+static int svs_get_zone_temperature(struct svs_bank *svsb, int *zone_temp)
+{
+	struct thermal_zone_device *tzd;
+	int ret;
+
+	tzd = thermal_zone_get_zone_by_name(svsb->zone_name);
+	ret = thermal_zone_get_temp(tzd, zone_temp);
+
+	return ret;
+}
+
+static int svs_set_volts(struct svs_bank *svsb, bool force_update)
+{
+	u32 i, svsb_volt, opp_volt, low_temp_offset = 0;
+	int zone_temp, ret;
+
+	mutex_lock(&svsb->lock);
+
+	/* If bank is suspended, it means init02 voltage is applied.
+	 * Don't need to update opp voltage anymore.
+	 */
+	if (svsb->suspended && !force_update) {
+		pr_notice("%s: bank is suspended\n", svsb->name);
+		mutex_unlock(&svsb->lock);
+		return -EPERM;
+	}
+
+	/* get thermal effect */
+	if (svsb->phase == SVS_PHASE_MON) {
+		if (svsb->svs_temp > svsb->upper_temp_bound &&
+		    svsb->svs_temp < svsb->lower_temp_bound) {
+			pr_err("%s: svs_temp is abnormal (0x%x)?\n",
+			       svsb->name, svsb->svs_temp);
+			mutex_unlock(&svsb->lock);
+			return -EINVAL;
+		}
+
+		ret = svs_get_zone_temperature(svsb, &zone_temp);
+		if (ret) {
+			pr_err("%s: cannot get zone \"%s\" temperature\n",
+			       svsb->name, svsb->zone_name);
+			pr_err("%s: add low_temp_offset = %u\n",
+			       svsb->name, svsb->low_temp_offset);
+			zone_temp = svsb->low_temp_threashold;
+		}
+
+		if (zone_temp <= svsb->low_temp_threashold)
+			low_temp_offset = svsb->low_temp_offset;
+	}
+
+	/* vmin <= svsb_volt (opp_volt) <= signed-off voltage */
+	for (i = 0; i < svsb->opp_count; i++) {
+		if (svsb->phase == SVS_PHASE_MON) {
+			svsb_volt = max((svsb->volts[i] + svsb->volt_offset +
+					 low_temp_offset), svsb->vmin);
+			opp_volt = svs_volt_to_opp_volt(svsb_volt,
+							svsb->volt_step,
+							svsb->volt_base);
+		} else if (svsb->phase == SVS_PHASE_INIT02) {
+			svsb_volt = max((svsb->init02_volts[i] +
+					 svsb->volt_offset), svsb->vmin);
+			opp_volt = svs_volt_to_opp_volt(svsb_volt,
+							svsb->volt_step,
+							svsb->volt_base);
+		} else if (svsb->phase == SVS_PHASE_ERROR) {
+			opp_volt = svsb->opp_volts[i];
+		} else {
+			pr_err("%s: unknown phase: %u?\n",
+			       svsb->name, svsb->phase);
+			mutex_unlock(&svsb->lock);
+			return -EINVAL;
+		}
+
+		opp_volt = min(opp_volt, svsb->opp_volts[i]);
+		ret = dev_pm_opp_adjust_voltage(svsb->dev, svsb->opp_freqs[i],
+						opp_volt);
+		if (ret) {
+			pr_err("%s: set voltage failed: %d\n", svsb->name, ret);
+			mutex_unlock(&svsb->lock);
+			return ret;
+		}
+	}
+
+	mutex_unlock(&svsb->lock);
+
+	return 0;
+}
+
+static u32 interpolate(u32 f0, u32 f1, u32 v0, u32 v1, u32 fx)
+{
+	u32 vy;
+
+	if (v0 == v1 || f0 == f1)
+		return v0;
+
+	/* *100 to have decimal fraction factor, +99 for rounding up. */
+	vy = (v0 * 100) - ((((v0 - v1) * 100) / (f0 - f1)) * (f0 - fx));
+	vy = (vy + 99) / 100;
+
+	return vy;
+}
+
+static void svs_get_vops_v2(struct mtk_svs *svs)
+{
+	struct svs_bank *svsb = svs->bank;
+	u32 temp, i;
+
+	temp = svs_readl(svs, VOP30);
+	svsb->volts[6] = (temp >> 24) & 0xff;
+	svsb->volts[4] = (temp >> 16) & 0xff;
+	svsb->volts[2] = (temp >> 8)  & 0xff;
+	svsb->volts[0] = (temp & 0xff);
+
+	temp = svs_readl(svs, VOP74);
+	svsb->volts[14] = (temp >> 24) & 0xff;
+	svsb->volts[12] = (temp >> 16) & 0xff;
+	svsb->volts[10] = (temp >> 8)  & 0xff;
+	svsb->volts[8] = (temp & 0xff);
+
+	for (i = 0; i <= 7; i++) {
+		if (i < 7) {
+			svsb->volts[(i * 2) + 1] =
+				interpolate(svsb->freqs_pct[i * 2],
+					    svsb->freqs_pct[(i + 1) * 2],
+					    svsb->volts[i * 2],
+					    svsb->volts[(i + 1) * 2],
+					    svsb->freqs_pct[(i * 2) + 1]);
+		} else if (i == 7) {
+			svsb->volts[(i * 2) + 1] =
+				interpolate(svsb->freqs_pct[(i - 1) * 2],
+					    svsb->freqs_pct[i * 2],
+					    svsb->volts[(i - 1) * 2],
+					    svsb->volts[i * 2],
+					    svsb->freqs_pct[(i * 2) + 1]);
+		}
+	}
+}
+
+static void svs_set_freqs_pct_v2(struct mtk_svs *svs)
+{
+	struct svs_bank *svsb = svs->bank;
+
+	svs_writel(svs,
+		   ((svsb->freqs_pct[6] << 24) & 0xff000000) |
+		   ((svsb->freqs_pct[4] << 16) & 0xff0000) |
+		   ((svsb->freqs_pct[2] << 8) & 0xff00) |
+		   (svsb->freqs_pct[0] & 0xff),
+		   FREQPCT30);
+	svs_writel(svs,
+		   ((svsb->freqs_pct[14] << 24) & 0xff000000) |
+		   ((svsb->freqs_pct[12] << 16) & 0xff0000) |
+		   ((svsb->freqs_pct[10] << 8) & 0xff00) |
+		   ((svsb->freqs_pct[8]) & 0xff),
+		   FREQPCT74);
+}
+
+static void svs_set_phase(struct mtk_svs *svs, u32 target_phase)
+{
+	struct svs_bank *svsb = svs->bank;
+	u32 des_char, temp_char, det_char, limit_vals;
+	u32 init2vals, ts_calcs, val, filter, i;
+
+	svs_switch_bank(svs);
+
+	des_char = ((svsb->bdes << 8) & 0xff00) | (svsb->mdes & 0xff);
+	svs_writel(svs, des_char, DESCHAR);
+
+	temp_char = ((svsb->vco << 16) & 0xff0000) |
+		    ((svsb->mtdes << 8) & 0xff00) |
+		    (svsb->dvt_fixed & 0xff);
+	svs_writel(svs, temp_char, TEMPCHAR);
+
+	det_char = ((svsb->dcbdet << 8) & 0xff00) | (svsb->dcmdet & 0xff);
+	svs_writel(svs, det_char, DETCHAR);
+
+	svs_writel(svs, svsb->dc_config, DCCONFIG);
+	svs_writel(svs, svsb->age_config, AGECONFIG);
+
+	if (svsb->agem == 0x0) {
+		svs_writel(svs, 0x80000000, RUNCONFIG);
+	} else {
+		val = 0x0;
+
+		for (i = 0; i < 24; i += 2) {
+			filter = 0x3 << i;
+
+			if ((svsb->age_config & filter) == 0x0)
+				val |= (0x1 << i);
+			else
+				val |= (svsb->age_config & filter);
+		}
+		svs_writel(svs, val, RUNCONFIG);
+	}
+
+	svsb->ops->set_freqs_pct(svs);
+
+	limit_vals = ((svsb->vmax << 24) & 0xff000000) |
+		     ((svsb->vmin << 16) & 0xff0000) |
+		     ((svsb->dthi << 8) & 0xff00) |
+		     (svsb->dtlo & 0xff);
+	svs_writel(svs, limit_vals, LIMITVALS);
+	svs_writel(svs, (svsb->vboot & 0xff), VBOOT);
+	svs_writel(svs, (svsb->det_window & 0xffff), DETWINDOW);
+	svs_writel(svs, (svsb->det_max & 0xffff), CONFIG);
+
+	if (svsb->chkshift != 0)
+		svs_writel(svs, (svsb->chkshift & 0xff), CHKSHIFT);
+
+	if (svsb->ctl0 != 0)
+		svs_writel(svs, svsb->ctl0, CTL0);
+
+	svs_writel(svs, 0x00ffffff, INTSTS);
+
+	switch (target_phase) {
+	case SVS_PHASE_INIT01:
+		svs_writel(svs, 0x00005f01, INTEN);
+		svs_writel(svs, 0x00000001, SVSEN);
+		break;
+	case SVS_PHASE_INIT02:
+		svs_writel(svs, 0x00005f01, INTEN);
+		init2vals = ((svsb->age_voffset_in << 16) & 0xffff0000) |
+			    (svsb->dc_voffset_in & 0xffff);
+		svs_writel(svs, init2vals, INIT2VALS);
+		svs_writel(svs, 0x00000005, SVSEN);
+		break;
+	case SVS_PHASE_MON:
+		ts_calcs = ((svsb->bts << 12) & 0xfff000) | (svsb->mts & 0xfff);
+		svs_writel(svs, ts_calcs, TSCALCS);
+		svs_writel(svs, 0x00FF0000, INTEN);
+		svs_writel(svs, 0x00000002, SVSEN);
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+}
+
+static inline void svs_init01_isr_handler(struct mtk_svs *svs)
+{
+	struct svs_bank *svsb = svs->bank;
+	enum reg_index rg_i;
+
+	pr_notice("%s: %s: VDN74:0x%08x, VDN30:0x%08x, DCVALUES:0x%08x\n",
+		  svsb->name, __func__, svs_readl(svs, VDESIGN74),
+		  svs_readl(svs, VDESIGN30), svs_readl(svs, DCVALUES));
+
+	for (rg_i = TEMPMONCTL0; rg_i < reg_num; rg_i++)
+		svsb->reg_data[SVS_PHASE_INIT01][rg_i] = svs_readl(svs, rg_i);
+
+	svsb->dc_voffset_in = ~(svs_readl(svs, DCVALUES) & 0xffff) + 1;
+	if (svsb->init01_volt_flag == SVS_INIT01_VOLT_IGNORE)
+		svsb->dc_voffset_in = 0;
+	else if ((svsb->dc_voffset_in & 0x8000) &&
+		 (svsb->init01_volt_flag == SVS_INIT01_VOLT_INC_ONLY))
+		svsb->dc_voffset_in = 0;
+
+	svsb->age_voffset_in = svs_readl(svs, AGEVALUES) & 0xffff;
+
+	svs_writel(svs, 0x0, SVSEN);
+	svs_writel(svs, 0x1, INTSTS);
+
+	/* svs init01 clock gating */
+	svsb->coresel &= ~svsb->systemclk_en;
+
+	svsb->phase = SVS_PHASE_INIT01;
+	complete(&svsb->init_completion);
+}
+
+static inline void svs_init02_isr_handler(struct mtk_svs *svs)
+{
+	struct svs_bank *svsb = svs->bank;
+	enum reg_index rg_i;
+
+	pr_notice("%s: %s: VOP74:0x%08x, VOP30:0x%08x, DCVALUES:0x%08x\n",
+		  svsb->name, __func__, svs_readl(svs, VOP74),
+		  svs_readl(svs, VOP30), svs_readl(svs, DCVALUES));
+
+	for (rg_i = TEMPMONCTL0; rg_i < reg_num; rg_i++)
+		svsb->reg_data[SVS_PHASE_INIT02][rg_i] = svs_readl(svs, rg_i);
+
+	svsb->ops->get_vops(svs);
+	memcpy(svsb->init02_volts, svsb->volts, 4 * svsb->opp_count);
+	svsb->phase = SVS_PHASE_INIT02;
+
+	svs_writel(svs, 0x0, SVSEN);
+	svs_writel(svs, 0x1, INTSTS);
+
+	complete(&svsb->init_completion);
+}
+
+static inline void svs_mon_mode_isr_handler(struct mtk_svs *svs)
+{
+	struct svs_bank *svsb = svs->bank;
+	enum reg_index rg_i;
+
+	for (rg_i = TEMPMONCTL0; rg_i < reg_num; rg_i++)
+		svsb->reg_data[SVS_PHASE_MON][rg_i] = svs_readl(svs, rg_i);
+
+	svsb->svs_temp = svs_readl(svs, TEMP) & 0xff;
+
+	svsb->ops->get_vops(svs);
+	svsb->phase = SVS_PHASE_MON;
+
+	svs_writel(svs, 0x00ff0000, INTSTS);
+}
+
+static inline void svs_error_isr_handler(struct mtk_svs *svs)
+{
+	const struct svs_platform *svsp = svs->platform;
+	struct svs_bank *svsb = svs->bank;
+	enum reg_index rg_i;
+
+	pr_err("%s(): %s(%s)", __func__, svsp->name, svsb->name);
+	pr_err("CORESEL(0x%x) = 0x%08x\n",
+	       svsp->regs[CORESEL], svs_readl(svs, CORESEL)),
+	pr_err("SVSEN(0x%x) = 0x%08x, INTSTS(0x%x) = 0x%08x\n",
+	       svsp->regs[SVSEN], svs_readl(svs, SVSEN),
+	       svsp->regs[INTSTS], svs_readl(svs, INTSTS));
+	pr_err("SMSTATE0(0x%x) = 0x%08x, SMSTATE1(0x%x) = 0x%08x\n",
+	       svsp->regs[SMSTATE0], svs_readl(svs, SMSTATE0),
+	       svsp->regs[SMSTATE1], svs_readl(svs, SMSTATE1));
+
+	for (rg_i = TEMPMONCTL0; rg_i < reg_num; rg_i++)
+		svsb->reg_data[SVS_PHASE_MON][rg_i] = svs_readl(svs, rg_i);
+
+	svsb->init01_support = false;
+	svsb->init02_support = false;
+	svsb->mon_mode_support = false;
+
+	if (svsb->phase == SVS_PHASE_MON)
+		svsb->phase = SVS_PHASE_INIT02;
+
+	svs_writel(svs, 0x0, SVSEN);
+	svs_writel(svs, 0x00ffffff, INTSTS);
+}
+
+static inline void svs_isr_handler(struct mtk_svs *svs)
+{
+	u32 intsts, svsen;
+
+	svs_switch_bank(svs);
+
+	intsts = svs_readl(svs, INTSTS);
+	svsen = svs_readl(svs, SVSEN);
+
+	if (intsts == 0x1 && ((svsen & 0x7) == 0x1))
+		svs_init01_isr_handler(svs);
+	else if ((intsts == 0x1) && ((svsen & 0x7) == 0x5))
+		svs_init02_isr_handler(svs);
+	else if ((intsts & 0x00ff0000) != 0x0)
+		svs_mon_mode_isr_handler(svs);
+	else
+		svs_error_isr_handler(svs);
+}
+
+static irqreturn_t svs_isr(int irq, void *data)
+{
+	struct mtk_svs *svs = (struct mtk_svs *)data;
+	const struct svs_platform *svsp = svs->platform;
+	struct svs_bank *svsb = NULL;
+	unsigned long flags;
+	u32 idx;
+
+	flags = claim_mtk_svs_lock();
+	for (idx = 0; idx < svsp->bank_num; idx++) {
+		svsb = &svsp->banks[idx];
+		svs->bank = svsb;
+
+		if (svsb->suspended)
+			continue;
+		else if (svsb->intst & svs_readl(svs, INTST))
+			continue;
+
+		svs_isr_handler(svs);
+		break;
+	}
+	release_mtk_svs_lock(flags);
+
+	if (svsb->phase != SVS_PHASE_INIT01)
+		svs_set_volts(svsb, false);
+
+	return IRQ_HANDLED;
+}
+
+static void svs_mon_mode(struct mtk_svs *svs)
+{
+	const struct svs_platform *svsp = svs->platform;
+	struct svs_bank *svsb;
+	unsigned long flags;
+	u32 idx;
+
+	flags = claim_mtk_svs_lock();
+	for (idx = 0; idx < svsp->bank_num; idx++) {
+		svsb = &svsp->banks[idx];
+		svs->bank = svsb;
+
+		if (!svsb->mon_mode_support)
+			continue;
+
+		svs_set_phase(svs, SVS_PHASE_MON);
+	}
+	release_mtk_svs_lock(flags);
+}
+
+static int svs_init02(struct mtk_svs *svs)
+{
+	const struct svs_platform *svsp = svs->platform;
+	struct svs_bank *svsb;
+	unsigned long flags, time_left;
+	u32 idx;
+
+	for (idx = 0; idx < svsp->bank_num; idx++) {
+		svsb = &svsp->banks[idx];
+		svs->bank = svsb;
+
+		if (!svsb->init02_support)
+			continue;
+
+		reinit_completion(&svsb->init_completion);
+		flags = claim_mtk_svs_lock();
+		svs_set_phase(svs, SVS_PHASE_INIT02);
+		release_mtk_svs_lock(flags);
+		time_left =
+			wait_for_completion_timeout(&svsb->init_completion,
+						    msecs_to_jiffies(2000));
+		if (time_left == 0) {
+			pr_err("%s: init02 completion timeout\n", svsb->name);
+			return -EBUSY;
+		}
+	}
+
+	return 0;
+}
+
+static int svs_init01(struct mtk_svs *svs)
+{
+	const struct svs_platform *svsp = svs->platform;
+	struct svs_bank *svsb;
+	struct pm_qos_request qos_request = { {0} };
+	unsigned long flags, time_left;
+	bool search_done;
+	int ret = -EINVAL;
+	u32 opp_freqs, opp_vboot, buck_volt, idx, i;
+
+	/* Let CPUs leave idle-off state for initializing svs_init01. */
+	pm_qos_add_request(&qos_request, PM_QOS_CPU_DMA_LATENCY, 0);
+
+	/* Sometimes two svs_bank use the same buck.
+	 * Therefore, we set each svs_bank to vboot voltage first.
+	 */
+	for (idx = 0; idx < svsp->bank_num; idx++) {
+		svsb = &svsp->banks[idx];
+		search_done = false;
+
+		if (!svsb->init01_support)
+			continue;
+
+		ret = regulator_set_mode(svsb->buck, REGULATOR_MODE_FAST);
+		if (ret)
+			pr_notice("%s: fail to set fast mode: %d\n",
+				  svsb->name, ret);
+
+		if (svsb->mtcmos_request) {
+			ret = regulator_enable(svsb->buck);
+			if (ret) {
+				pr_err("%s: fail to enable %s power: %d\n",
+				       svsb->name, svsb->buck_name, ret);
+				goto init01_finish;
+			}
+
+			ret = dev_pm_domain_attach(svsb->dev, false);
+			if (ret) {
+				pr_err("%s: attach pm domain fail: %d\n",
+				       svsb->name, ret);
+				goto init01_finish;
+			}
+
+			pm_runtime_enable(svsb->dev);
+			ret = pm_runtime_get_sync(svsb->dev);
+			if (ret < 0) {
+				pr_err("%s: turn mtcmos on fail: %d\n",
+				       svsb->name, ret);
+				goto init01_finish;
+			}
+		}
+
+		/* Find the fastest freq that can be run at vboot and
+		 * fix to that freq until svs_init01 is done.
+		 */
+		opp_vboot = svs_volt_to_opp_volt(svsb->vboot,
+						 svsb->volt_step,
+						 svsb->volt_base);
+
+		for (i = 0; i < svsb->opp_count; i++) {
+			opp_freqs = svsb->opp_freqs[i];
+			if (!search_done && svsb->opp_volts[i] <= opp_vboot) {
+				ret = dev_pm_opp_adjust_voltage(svsb->dev,
+								opp_freqs,
+								opp_vboot);
+				if (ret) {
+					pr_err("%s: set voltage failed: %d\n",
+					       svsb->name, ret);
+					goto init01_finish;
+				}
+
+				search_done = true;
+			} else {
+				dev_pm_opp_disable(svsb->dev,
+						   svsb->opp_freqs[i]);
+			}
+		}
+	}
+
+	for (idx = 0; idx < svsp->bank_num; idx++) {
+		svsb = &svsp->banks[idx];
+		svs->bank = svsb;
+
+		if (!svsb->init01_support)
+			continue;
+
+		opp_vboot = svs_volt_to_opp_volt(svsb->vboot,
+						 svsb->volt_step,
+						 svsb->volt_base);
+
+		buck_volt = regulator_get_voltage(svsb->buck);
+		if (buck_volt != opp_vboot) {
+			pr_err("%s: buck voltage: %u, expected vboot: %u\n",
+			       svsb->name, buck_volt, opp_vboot);
+			ret = -EPERM;
+			goto init01_finish;
+		}
+
+		init_completion(&svsb->init_completion);
+		flags = claim_mtk_svs_lock();
+		svs_set_phase(svs, SVS_PHASE_INIT01);
+		release_mtk_svs_lock(flags);
+		time_left =
+			wait_for_completion_timeout(&svsb->init_completion,
+						    msecs_to_jiffies(2000));
+		if (time_left == 0) {
+			pr_err("%s: init01 completion timeout\n", svsb->name);
+			ret = -EBUSY;
+			goto init01_finish;
+		}
+	}
+
+init01_finish:
+	for (idx = 0; idx < svsp->bank_num; idx++) {
+		svsb = &svsp->banks[idx];
+
+		if (!svsb->init01_support)
+			continue;
+
+		for (i = 0; i < svsb->opp_count; i++)
+			dev_pm_opp_enable(svsb->dev, svsb->opp_freqs[i]);
+
+		if (regulator_set_mode(svsb->buck, REGULATOR_MODE_NORMAL))
+			pr_notice("%s: fail to set normal mode: %d\n",
+				  svsb->name, ret);
+
+		if (svsb->mtcmos_request) {
+			if (pm_runtime_put_sync(svsb->dev))
+				pr_err("%s: turn mtcmos off fail: %d\n",
+				       svsb->name, ret);
+			pm_runtime_disable(svsb->dev);
+			dev_pm_domain_detach(svsb->dev, 0);
+			if (regulator_disable(svsb->buck))
+				pr_err("%s: fail to disable %s power: %d\n",
+				       svsb->name, svsb->buck_name, ret);
+		}
+	}
+
+	pm_qos_remove_request(&qos_request);
+
+	return ret;
+}
+
+static int svs_start(struct mtk_svs *svs)
+{
+	int ret;
+
+	ret = svs_init01(svs);
+	if (ret)
+		return ret;
+
+	ret = svs_init02(svs);
+	if (ret)
+		return ret;
+
+	svs_mon_mode(svs);
+
+	return ret;
+}
+
+static int svs_mt8183_efuse_parsing(struct mtk_svs *svs)
+{
+	const struct svs_platform *svsp = svs->platform;
+	struct thermal_parameter tp;
+	struct svs_bank *svsb;
+	bool mon_mode_support = true;
+	int format[6], x_roomt[6], tb_roomt;
+	u32 idx, i, ft_pgm, mts, temp0, temp1, temp2;
+
+	if (svsp->fake_efuse) {
+		pr_notice("fake efuse\n");
+		svs->efuse[0] = 0x00310080;
+		svs->efuse[1] = 0xabfbf757;
+		svs->efuse[2] = 0x47c747c7;
+		svs->efuse[3] = 0xabfbf757;
+		svs->efuse[4] = 0xe7fca0ec;
+		svs->efuse[5] = 0x47bf4b88;
+		svs->efuse[6] = 0xabfb8fa5;
+		svs->efuse[7] = 0xabfb217b;
+		svs->efuse[8] = 0x4bf34be1;
+		svs->efuse[9] = 0xabfb670d;
+		svs->efuse[16] = 0xabfbc653;
+		svs->efuse[17] = 0x47f347e1;
+		svs->efuse[18] = 0xabfbd848;
+
+		svs->thermal_efuse[0] = 0x02873f69;
+		svs->thermal_efuse[1] = 0xa11d9142;
+		svs->thermal_efuse[2] = 0xa2526900;
+	}
+
+	/* svs efuse parsing */
+	ft_pgm = (svs->efuse[0] >> 4) & 0xf;
+
+	for (idx = 0; idx < svsp->bank_num; idx++) {
+		svsb = &svsp->banks[idx];
+		if (ft_pgm <= 1)
+			svsb->init01_volt_flag = SVS_INIT01_VOLT_IGNORE;
+
+		switch (svsb->sw_id) {
+		case SVS_CPU_LITTLE:
+			svsb->bdes = svs->efuse[16] & 0xff;
+			svsb->mdes = (svs->efuse[16] >> 8) & 0xff;
+			svsb->dcbdet = (svs->efuse[16] >> 16) & 0xff;
+			svsb->dcmdet = (svs->efuse[16] >> 24) & 0xff;
+			svsb->mtdes  = (svs->efuse[17] >> 16) & 0xff;
+
+			if (ft_pgm <= 3)
+				svsb->volt_offset += 10;
+			else
+				svsb->volt_offset += 2;
+			break;
+		case SVS_CPU_BIG:
+			svsb->bdes = svs->efuse[18] & 0xff;
+			svsb->mdes = (svs->efuse[18] >> 8) & 0xff;
+			svsb->dcbdet = (svs->efuse[18] >> 16) & 0xff;
+			svsb->dcmdet = (svs->efuse[18] >> 24) & 0xff;
+			svsb->mtdes  = svs->efuse[17] & 0xff;
+
+			if (ft_pgm <= 3)
+				svsb->volt_offset += 15;
+			else
+				svsb->volt_offset += 12;
+			break;
+		case SVS_CCI:
+			svsb->bdes = svs->efuse[4] & 0xff;
+			svsb->mdes = (svs->efuse[4] >> 8) & 0xff;
+			svsb->dcbdet = (svs->efuse[4] >> 16) & 0xff;
+			svsb->dcmdet = (svs->efuse[4] >> 24) & 0xff;
+			svsb->mtdes  = (svs->efuse[5] >> 16) & 0xff;
+
+			if (ft_pgm <= 3)
+				svsb->volt_offset += 10;
+			else
+				svsb->volt_offset += 2;
+			break;
+		case SVS_GPU:
+			svsb->bdes = svs->efuse[6] & 0xff;
+			svsb->mdes = (svs->efuse[6] >> 8) & 0xff;
+			svsb->dcbdet = (svs->efuse[6] >> 16) & 0xff;
+			svsb->dcmdet = (svs->efuse[6] >> 24) & 0xff;
+			svsb->mtdes  = svs->efuse[5] & 0xff;
+
+			if (ft_pgm >= 2) {
+				svsb->freq_base = 800000000; /* 800MHz */
+				svsb->dvt_fixed = 2;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+
+	for (i = 0; i < svsp->efuse_num; i++) {
+		if (svs->efuse[i])
+			pr_notice("M_HW_RES%d: 0x%08x\n", i, svs->efuse[i]);
+	}
+
+	/* thermal efuse parsing */
+	if (!svs->thermal_efuse)
+		return 0;
+
+	tp.adc_ge_t = (svs->thermal_efuse[1] >> 22) & 0x3ff;
+	tp.adc_oe_t = (svs->thermal_efuse[1] >> 12) & 0x3ff;
+
+	tp.o_vtsmcu1 = (svs->thermal_efuse[0] >> 17) & 0x1ff;
+	tp.o_vtsmcu2 = (svs->thermal_efuse[0] >> 8) & 0x1ff;
+	tp.o_vtsmcu3 = svs->thermal_efuse[1] & 0x1ff;
+	tp.o_vtsmcu4 = (svs->thermal_efuse[2] >> 23) & 0x1ff;
+	tp.o_vtsmcu5 = (svs->thermal_efuse[2] >> 5) & 0x1ff;
+	tp.o_vtsabb = (svs->thermal_efuse[2] >> 14) & 0x1ff;
+
+	tp.degc_cali = (svs->thermal_efuse[0] >> 1) & 0x3f;
+	tp.adc_cali_en_t = svs->thermal_efuse[0] & BIT(0);
+	tp.o_slope_sign = (svs->thermal_efuse[0] >> 7) & BIT(0);
+
+	tp.ts_id = (svs->thermal_efuse[1] >> 9) & BIT(0);
+	tp.o_slope = (svs->thermal_efuse[0] >> 26) & 0x3f;
+
+	if (tp.adc_cali_en_t == 1) {
+		if (tp.ts_id == 0)
+			tp.o_slope = 0;
+
+		if ((tp.adc_ge_t < 265 || tp.adc_ge_t > 758) ||
+		    (tp.adc_oe_t < 265 || tp.adc_oe_t > 758) ||
+		    (tp.o_vtsmcu1 < -8 || tp.o_vtsmcu1 > 484) ||
+		    (tp.o_vtsmcu2 < -8 || tp.o_vtsmcu2 > 484) ||
+		    (tp.o_vtsmcu3 < -8 || tp.o_vtsmcu3 > 484) ||
+		    (tp.o_vtsmcu4 < -8 || tp.o_vtsmcu4 > 484) ||
+		    (tp.o_vtsmcu5 < -8 || tp.o_vtsmcu5 > 484) ||
+		    (tp.o_vtsabb < -8 || tp.o_vtsabb > 484) ||
+		    (tp.degc_cali < 1 || tp.degc_cali > 63)) {
+			pr_err("bad thermal efuse data. disable mon mode\n");
+			mon_mode_support = false;
+		}
+	} else {
+		pr_err("no thermal efuse data. disable mon mode\n");
+		mon_mode_support = false;
+	}
+
+	if (!mon_mode_support) {
+		for (i = 0; i < svsp->thermal_efuse_num; i++)
+			pr_err("thermal_efuse[%u] = 0x%08x\n",
+			       i, svs->thermal_efuse[i]);
+
+		for (idx = 0; idx < svsp->bank_num; idx++) {
+			svsb = &svsp->banks[idx];
+			svsb->mon_mode_support = false;
+		}
+
+		return 0;
+	}
+
+	tp.ge = ((tp.adc_ge_t - 512) * 10000) / 4096;
+	tp.oe = (tp.adc_oe_t - 512);
+	tp.gain = (10000 + tp.ge);
+
+	format[0] = (tp.o_vtsmcu1 + 3350 - tp.oe);
+	format[1] = (tp.o_vtsmcu2 + 3350 - tp.oe);
+	format[2] = (tp.o_vtsmcu3 + 3350 - tp.oe);
+	format[3] = (tp.o_vtsmcu4 + 3350 - tp.oe);
+	format[4] = (tp.o_vtsmcu5 + 3350 - tp.oe);
+	format[5] = (tp.o_vtsabb + 3350 - tp.oe);
+
+	for (i = 0; i < 6; i++)
+		x_roomt[i] = (((format[i] * 10000) / 4096) * 10000) / tp.gain;
+
+	temp0 = (10000 * 100000 / tp.gain) * 15 / 18;
+
+	if (tp.o_slope_sign == 0)
+		mts = (temp0 * 10) / (1534 + tp.o_slope * 10);
+	else
+		mts = (temp0 * 10) / (1534 - tp.o_slope * 10);
+
+	for (idx = 0; idx < svsp->bank_num; idx++) {
+		svsb = &svsp->banks[idx];
+		svsb->mts = mts;
+
+		switch (svsb->sw_id) {
+		case SVS_CPU_LITTLE:
+			tb_roomt = x_roomt[3];
+			break;
+		case SVS_CPU_BIG:
+			tb_roomt = x_roomt[4];
+			break;
+		case SVS_CCI:
+			tb_roomt = x_roomt[3];
+			break;
+		case SVS_GPU:
+			tb_roomt = x_roomt[1];
+			break;
+		default:
+			pr_err("unknown svsb_id = %u? disable svs\n",
+			       svsb->sw_id);
+			return -EINVAL;
+		}
+
+		temp0 = (tp.degc_cali * 10 / 2);
+		temp1 = ((10000 * 100000 / 4096 / tp.gain) *
+			 tp.oe + tb_roomt * 10) * 15 / 18;
+
+		if (tp.o_slope_sign == 0)
+			temp2 = temp1 * 100 / (1534 + tp.o_slope * 10);
+		else
+			temp2 = temp1 * 100 / (1534 - tp.o_slope * 10);
+
+		svsb->bts = (temp0 + temp2 - 250) * 4 / 10;
+	}
+
+	return 0;
+}
+
+static int svs_is_support(struct mtk_svs *svs)
+{
+	const struct svs_platform *svsp = svs->platform;
+	struct svs_bank *svsb;
+	struct nvmem_cell *cell;
+	size_t len;
+	int ret;
+	u32 idx, i;
+
+	if (svsp->fake_efuse) {
+		len = svsp->efuse_num * 4;
+		svs->efuse = kzalloc(len, GFP_KERNEL);
+		if (!svs->efuse)
+			return -ENOMEM;
+
+		len = svsp->thermal_efuse_num * 4;
+		svs->thermal_efuse = kzalloc(len, GFP_KERNEL);
+		if (!svs->thermal_efuse)
+			return -ENOMEM;
+
+		goto svsp_efuse_parsing;
+	}
+
+	/* get svs efuse by nvmem */
+	cell = nvmem_cell_get(svs->dev, "svs-calibration-data");
+	if (IS_ERR(cell)) {
+		pr_err("no \"svs-calibration-data\" from dts? disable svs\n");
+		return PTR_ERR(cell);
+	}
+
+	svs->efuse = (u32 *)nvmem_cell_read(cell, &len);
+	nvmem_cell_put(cell);
+
+	ret = (svs->efuse[svsp->efuse_check] == 0) ? -EPERM : 0;
+	if (ret) {
+		pr_err("no svs efuse. disable svs.\n");
+		for (i = 0; i < svsp->efuse_num; i++)
+			pr_err("M_HW_RES%d: 0x%08x\n", i, svs->efuse[i]);
+		return ret;
+	}
+
+	/* get thermal efuse by nvmem */
+	cell = nvmem_cell_get(svs->dev, "calibration-data");
+	if (IS_ERR(cell)) {
+		pr_err("no \"calibration-data\" from dts? disable mon mode\n");
+		svs->thermal_efuse = NULL;
+		for (idx = 0; idx < svsp->bank_num; idx++) {
+			svsb = &svsp->banks[idx];
+			svsb->mon_mode_support = false;
+		}
+		goto svsp_efuse_parsing;
+	}
+
+	svs->thermal_efuse = (u32 *)nvmem_cell_read(cell, &len);
+	nvmem_cell_put(cell);
+
+svsp_efuse_parsing:
+	ret = svsp->efuse_parsing(svs);
+
+	return ret;
+}
+
+static int svs_resource_setup(struct mtk_svs *svs)
+{
+	const struct svs_platform *svsp = svs->platform;
+	struct svs_bank *svsb;
+	struct platform_device *pdev;
+	struct device_node *np = NULL;
+	struct dev_pm_opp *opp;
+	unsigned long freq;
+	size_t opp_size;
+	int count, ret;
+	u32 idx, i;
+
+	for (idx = 0; idx < svsp->bank_num; idx++) {
+		svsb = &svsp->banks[idx];
+
+		if (!svsb->init01_support)
+			continue;
+
+		switch (svsb->sw_id) {
+		case SVS_CPU_LITTLE:
+			svsb->name = "SVS_CPU_LITTLE";
+			break;
+		case SVS_CPU_BIG:
+			svsb->name = "SVS_CPU_BIG";
+			break;
+		case SVS_CCI:
+			svsb->name = "SVS_CCI";
+			break;
+		case SVS_GPU:
+			svsb->name = "SVS_GPU";
+			break;
+		default:
+			WARN_ON(1);
+			return -EINVAL;
+		}
+
+		/* Add svs_bank device for opp-table/mtcmos/buck control */
+		pdev = platform_device_alloc(svsb->name, 0);
+		if (!pdev) {
+			pr_err("%s: fail to alloc pdev for svs_bank\n",
+			       svsb->name);
+			return -ENOMEM;
+		}
+
+		for_each_child_of_node(svs->dev->of_node, np) {
+			if (of_device_is_compatible(np, svsb->of_compatible)) {
+				pdev->dev.of_node = np;
+				break;
+			}
+		}
+
+		ret = platform_device_add(pdev);
+		if (ret) {
+			pr_err("%s: fail to add svs_bank device: %d\n",
+			       svsb->name, ret);
+			return ret;
+		}
+
+		svsb->dev = &pdev->dev;
+		dev_set_drvdata(svsb->dev, svs);
+		ret = dev_pm_opp_of_add_table(svsb->dev);
+		if (ret) {
+			pr_err("%s: fail to add opp table: %d\n",
+			       svsb->name, ret);
+			return ret;
+		}
+
+		mutex_init(&svsb->lock);
+
+		svsb->buck = devm_regulator_get_optional(svsb->dev,
+							 svsb->buck_name);
+		if (IS_ERR(svsb->buck)) {
+			pr_err("%s: cannot get regulator \"%s-supply\"\n",
+			       svsb->name, svsb->buck_name);
+			return PTR_ERR(svsb->buck);
+		}
+
+		count = dev_pm_opp_get_opp_count(svsb->dev);
+		if (svsb->opp_count != count) {
+			pr_err("%s: opp_count not \"%u\" but get \"%d\"?\n",
+			       svsb->name, svsb->opp_count, count);
+			return count;
+		}
+
+		opp_size = 4 * svsb->opp_count;
+		svsb->opp_volts = kmalloc(opp_size, GFP_KERNEL);
+		if (!svsb->opp_volts)
+			return -ENOMEM;
+
+		svsb->init02_volts = kmalloc(opp_size, GFP_KERNEL);
+		if (!svsb->init02_volts)
+			return -ENOMEM;
+
+		svsb->volts = kmalloc(opp_size, GFP_KERNEL);
+		if (!svsb->volts)
+			return -ENOMEM;
+
+		svsb->opp_freqs = kmalloc(opp_size, GFP_KERNEL);
+		if (!svsb->opp_freqs)
+			return -ENOMEM;
+
+		svsb->freqs_pct = kmalloc(opp_size, GFP_KERNEL);
+		if (!svsb->freqs_pct)
+			return -ENOMEM;
+
+		for (i = 0, freq = (u32)-1; i < svsb->opp_count; i++, freq--) {
+			opp = dev_pm_opp_find_freq_floor(svsb->dev, &freq);
+			if (IS_ERR(opp)) {
+				pr_err("%s: error opp entry!!, err = %ld\n",
+				       svsb->name, PTR_ERR(opp));
+				return PTR_ERR(opp);
+			}
+
+			svsb->opp_freqs[i] = freq;
+			svsb->opp_volts[i] = dev_pm_opp_get_voltage(opp);
+			svsb->freqs_pct[i] = percent(svsb->opp_freqs[i],
+						     svsb->freq_base) & 0xff;
+		}
+	}
+
+	return 0;
+}
+
+static int svs_suspend(struct device *dev)
+{
+	struct mtk_svs *svs = dev_get_drvdata(dev);
+	const struct svs_platform *svsp = svs->platform;
+	struct svs_bank *svsb;
+	unsigned long flags;
+	u32 idx;
+
+	/* Wait if there is processing svs_isr(). Suspend all banks. */
+	flags = claim_mtk_svs_lock();
+	for (idx = 0; idx < svsp->bank_num; idx++) {
+		svsb = &svsp->banks[idx];
+		svs->bank = svsb;
+		svs_switch_bank(svs);
+		svs_writel(svs, 0x0, SVSEN);
+		svs_writel(svs, 0x00ffffff, INTSTS);
+		svsb->suspended = true;
+	}
+	release_mtk_svs_lock(flags);
+
+	for (idx = 0; idx < svsp->bank_num; idx++) {
+		svsb = &svsp->banks[idx];
+		if (svsb->phase == SVS_PHASE_MON) {
+			svsb->phase = SVS_PHASE_INIT02;
+			svs_set_volts(svsb, true);
+		}
+	}
+
+	clk_disable_unprepare(svs->main_clk);
+
+	return 0;
+}
+
+static int svs_resume(struct device *dev)
+{
+	struct mtk_svs *svs = dev_get_drvdata(dev);
+	const struct svs_platform *svsp = svs->platform;
+	struct svs_bank *svsb;
+	int ret;
+	u32 idx;
+
+	ret = clk_prepare_enable(svs->main_clk);
+	if (ret)
+		pr_err("%s(): cannot enable main_clk\n", __func__);
+
+	for (idx = 0; idx < svsp->bank_num; idx++) {
+		svsb = &svsp->banks[idx];
+		svsb->suspended = false;
+	}
+
+	ret = svs_init02(svs);
+	if (ret)
+		return ret;
+
+	svs_mon_mode(svs);
+
+	return 0;
+}
+
+static int svs_debug_proc_show(struct seq_file *m, void *v)
+{
+	struct svs_bank *svsb = (struct svs_bank *)m->private;
+
+	if (svsb->phase == SVS_PHASE_INIT01)
+		seq_puts(m, "init1\n");
+	else if (svsb->phase == SVS_PHASE_INIT02)
+		seq_puts(m, "init2\n");
+	else if (svsb->phase == SVS_PHASE_MON)
+		seq_puts(m, "mon mode\n");
+	else if (svsb->phase == SVS_PHASE_ERROR)
+		seq_puts(m, "disabled\n");
+	else
+		seq_puts(m, "unknown\n");
+
+	return 0;
+}
+
+static ssize_t svs_debug_proc_write(struct file *file,
+				    const char __user *buffer,
+				    size_t count, loff_t *pos)
+{
+	struct svs_bank *svsb = (struct svs_bank *)PDE_DATA(file_inode(file));
+	struct mtk_svs *svs = dev_get_drvdata(svsb->dev);
+	char *buf = (char *)__get_free_page(GFP_USER);
+	unsigned long flags;
+	int enabled, ret;
+
+	if (svsb->phase == SVS_PHASE_ERROR)
+		return count;
+
+	if (!buf)
+		return -ENOMEM;
+
+	if (count >= PAGE_SIZE) {
+		free_page((unsigned long)buf);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(buf, buffer, count)) {
+		free_page((unsigned long)buf);
+		return -EFAULT;
+	}
+
+	buf[count] = '\0';
+
+	ret = kstrtoint(buf, 10, &enabled);
+	if (ret)
+		return ret;
+
+	if (!enabled) {
+		flags = claim_mtk_svs_lock();
+		svs->bank = svsb;
+
+		svsb->init01_support = false;
+		svsb->init02_support = false;
+		svsb->mon_mode_support = false;
+
+		svs_switch_bank(svs);
+		svs_writel(svs, 0x0, SVSEN);
+		svs_writel(svs, 0x00ffffff, INTSTS);
+		release_mtk_svs_lock(flags);
+	}
+
+	svsb->phase = SVS_PHASE_ERROR;
+	svs_set_volts(svsb, true);
+
+	return count;
+}
+
+proc_fops_rw(svs_debug);
+
+static int svs_dump_proc_show(struct seq_file *m, void *v)
+{
+	struct mtk_svs *svs = (struct mtk_svs *)m->private;
+	const struct svs_platform *svsp = svs->platform;
+	struct svs_bank *svsb;
+	unsigned long svs_reg_addr;
+	u32 idx, i, j;
+
+	for (i = 0; i < svsp->efuse_num; i++) {
+		if (svs->efuse[i])
+			seq_printf(m, "M_HW_RES%d = 0x%08x\n",
+				   i, svs->efuse[i]);
+	}
+
+	for (i = 0; i < svsp->thermal_efuse_num; i++) {
+		if (svs->thermal_efuse && svs->thermal_efuse[i])
+			seq_printf(m, "THERMAL_EFUSE%d = 0x%08x\n",
+				   i, svs->thermal_efuse[i]);
+	}
+
+	for (idx = 0; idx < svsp->bank_num; idx++) {
+		svsb = &svsp->banks[idx];
+
+		if (!svsb->init01_support)
+			continue;
+
+		for (i = SVS_PHASE_INIT01; i <= SVS_PHASE_MON; i++) {
+			seq_printf(m, "Bank_number = %u\n", svsb->hw_id);
+
+			if (i < SVS_PHASE_MON)
+				seq_printf(m, "mode = init%d\n", i + 1);
+			else
+				seq_puts(m, "mode = mon\n");
+
+			for (j = TEMPMONCTL0; j < reg_num; j++) {
+				svs_reg_addr = (unsigned long)(svs->base +
+							       svsp->regs[j]);
+				seq_printf(m, "0x%08lx = 0x%08x\n",
+					   svs_reg_addr, svsb->reg_data[i][j]);
+			}
+		}
+	}
+
+	return 0;
+}
+
+proc_fops_ro(svs_dump);
+
+static int svs_status_proc_show(struct seq_file *m, void *v)
+{
+	struct svs_bank *svsb = (struct svs_bank *)m->private;
+	struct dev_pm_opp *opp;
+	unsigned long freq;
+	int zone_temp, ret;
+	u32 i;
+
+	ret = svs_get_zone_temperature(svsb, &zone_temp);
+	if (ret)
+		seq_printf(m, "%s: cannot get zone \"%s\" temperature\n",
+			   svsb->name, svsb->zone_name);
+	else
+		seq_printf(m, "%s: temperature = %d\n", svsb->name, zone_temp);
+
+	for (i = 0, freq = (u32)-1; i < svsb->opp_count; i++, freq--) {
+		opp = dev_pm_opp_find_freq_floor(svsb->dev, &freq);
+		if (IS_ERR(opp)) {
+			seq_printf(m, "%s: error opp entry!!, err = %ld\n",
+				   svsb->name, PTR_ERR(opp));
+			return PTR_ERR(opp);
+		}
+
+		seq_printf(m, "opp_freqs[%02u]: %lu, volts[%02u]: %lu, ",
+			   i, freq, i, dev_pm_opp_get_voltage(opp));
+		seq_printf(m, "svsb_volts[%02u]: 0x%x, freqs_pct[%02u]: %u\n",
+			   i, svsb->volts[i], i, svsb->freqs_pct[i]);
+	}
+
+	return 0;
+}
+
+proc_fops_ro(svs_status);
+
+static int svs_volt_offset_proc_show(struct seq_file *m, void *v)
+{
+	struct svs_bank *svsb = (struct svs_bank *)m->private;
+
+	seq_printf(m, "%d\n", svsb->volt_offset);
+
+	return 0;
+}
+
+static ssize_t svs_volt_offset_proc_write(struct file *file,
+					  const char __user *buffer,
+					  size_t count, loff_t *pos)
+{
+	struct svs_bank *svsb = (struct svs_bank *)PDE_DATA(file_inode(file));
+	char *buf = (char *)__get_free_page(GFP_USER);
+	int ret, volt_offset;
+
+	if (!buf)
+		return -ENOMEM;
+
+	if (count >= PAGE_SIZE) {
+		free_page((unsigned long)buf);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(buf, buffer, count)) {
+		free_page((unsigned long)buf);
+		return -EFAULT;
+	}
+
+	buf[count] = '\0';
+
+	if (!kstrtoint(buf, 10, &volt_offset)) {
+		svsb->volt_offset = volt_offset;
+		ret = svs_set_volts(svsb, true);
+		if (ret)
+			return ret;
+	}
+
+	return count;
+}
+
+proc_fops_rw(svs_volt_offset);
+
+static int svs_create_svs_procfs(struct mtk_svs *svs)
+{
+	const struct svs_platform *svsp = svs->platform;
+	struct svs_bank *svsb;
+	struct proc_dir_entry *svs_dir, *bank_dir;
+	u32 idx, i;
+
+	struct pentry {
+		const char *name;
+		const struct file_operations *fops;
+	};
+
+	struct pentry svs_entries[] = {
+		proc_entry(svs_dump),
+	};
+
+	struct pentry bank_entries[] = {
+		proc_entry(svs_debug),
+		proc_entry(svs_status),
+		proc_entry(svs_volt_offset),
+	};
+
+	svs_dir = proc_mkdir("svs", NULL);
+	if (!svs_dir) {
+		pr_err("mkdir /proc/svs failed\n");
+		return -EPERM;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(svs_entries); i++) {
+		if (!proc_create_data(svs_entries[i].name, 0664,
+				      svs_dir, svs_entries[i].fops, svs)) {
+			pr_err("create /proc/svs/%s failed\n",
+			       svs_entries[i].name);
+			return -EPERM;
+		}
+	}
+
+	for (idx = 0; idx < svsp->bank_num; idx++) {
+		svsb = &svsp->banks[idx];
+
+		if (!svsb->init01_support)
+			continue;
+
+		bank_dir = proc_mkdir(svsb->name, svs_dir);
+		if (!bank_dir) {
+			pr_err("mkdir /proc/svs/%s failed\n", svsb->name);
+			return -EPERM;
+		}
+
+		for (i = 0; i < ARRAY_SIZE(bank_entries); i++) {
+			if (!proc_create_data(bank_entries[i].name, 0664,
+					      bank_dir, bank_entries[i].fops,
+					      svsb)) {
+				pr_err("create /proc/svs/%s/%s failed\n",
+				       svsb->name, bank_entries[i].name);
+				return -EPERM;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static struct svs_bank_ops svs_mt8183_banks_ops = {
+	.set_freqs_pct	= svs_set_freqs_pct_v2,
+	.get_vops	= svs_get_vops_v2,
+};
+
+static struct svs_bank svs_mt8183_banks[4] = {
+	{
+		.of_compatible		= "mediatek,mt8183-svs-cpu-little",
+		.sw_id			= SVS_CPU_LITTLE,
+		.hw_id			= 0,
+		.ops			= &svs_mt8183_banks_ops,
+		.zone_name		= "tzts4",
+		.buck_name		= "vcpu-little",
+		.mtcmos_request		= false,
+		.init01_volt_flag	= SVS_INIT01_VOLT_INC_ONLY,
+		.init01_support		= true,
+		.init02_support		= true,
+		.mon_mode_support	= false,
+		.opp_count		= 16,
+		.freq_base		= 1989000000,
+		.vboot			= 0x30,
+		.volt_step		= 6250,
+		.volt_base		= 500000,
+		.volt_offset		= 0,
+		.vmax			= 0x64,
+		.vmin			= 0x18,
+		.dthi			= 0x1,
+		.dtlo			= 0xfe,
+		.det_window		= 0xa28,
+		.det_max		= 0xffff,
+		.age_config		= 0x555555,
+		.agem			= 0x0,
+		.dc_config		= 0x555555,
+		.dvt_fixed		= 0x7,
+		.vco			= 0x10,
+		.chkshift		= 0x77,
+		.upper_temp_bound	= 0x64,
+		.lower_temp_bound	= 0xb2,
+		.low_temp_threashold	= 25000,
+		.low_temp_offset	= 0,
+		.coresel		= 0x8fff0000,
+		.systemclk_en		= BIT(31),
+		.intst			= BIT(0),
+		.ctl0			= 0x00010001,
+	},
+	{
+		.of_compatible		= "mediatek,mt8183-svs-cpu-big",
+		.sw_id			= SVS_CPU_BIG,
+		.hw_id			= 1,
+		.ops			= &svs_mt8183_banks_ops,
+		.zone_name		= "tzts5",
+		.buck_name		= "vcpu-big",
+		.mtcmos_request		= false,
+		.init01_volt_flag	= SVS_INIT01_VOLT_INC_ONLY,
+		.init01_support		= true,
+		.init02_support		= true,
+		.mon_mode_support	= false,
+		.opp_count		= 16,
+		.freq_base		= 1989000000,
+		.vboot			= 0x30,
+		.volt_step		= 6250,
+		.volt_base		= 500000,
+		.volt_offset		= 0,
+		.vmax			= 0x58,
+		.vmin			= 0x10,
+		.dthi			= 0x1,
+		.dtlo			= 0xfe,
+		.det_window		= 0xa28,
+		.det_max		= 0xffff,
+		.age_config		= 0x555555,
+		.agem			= 0x0,
+		.dc_config		= 0x555555,
+		.dvt_fixed		= 0x7,
+		.vco			= 0x10,
+		.chkshift		= 0x77,
+		.upper_temp_bound	= 0x64,
+		.lower_temp_bound	= 0xb2,
+		.low_temp_threashold	= 25000,
+		.low_temp_offset	= 0,
+		.coresel		= 0x8fff0001,
+		.systemclk_en		= BIT(31),
+		.intst			= BIT(1),
+		.ctl0			= 0x00000001,
+	},
+	{
+		.of_compatible		= "mediatek,mt8183-svs-cci",
+		.sw_id			= SVS_CCI,
+		.hw_id			= 2,
+		.ops			= &svs_mt8183_banks_ops,
+		.zone_name		= "tzts4",
+		.buck_name		= "vcci",
+		.mtcmos_request		= false,
+		.init01_volt_flag	= SVS_INIT01_VOLT_INC_ONLY,
+		.init01_support		= true,
+		.init02_support		= true,
+		.mon_mode_support	= false,
+		.opp_count		= 16,
+		.freq_base		= 1196000000,
+		.vboot			= 0x30,
+		.volt_step		= 6250,
+		.volt_base		= 500000,
+		.volt_offset		= 0,
+		.vmax			= 0x64,
+		.vmin			= 0x18,
+		.dthi			= 0x1,
+		.dtlo			= 0xfe,
+		.det_window		= 0xa28,
+		.det_max		= 0xffff,
+		.age_config		= 0x555555,
+		.agem			= 0x0,
+		.dc_config		= 0x555555,
+		.dvt_fixed		= 0x7,
+		.vco			= 0x10,
+		.chkshift		= 0x77,
+		.upper_temp_bound	= 0x64,
+		.lower_temp_bound	= 0xb2,
+		.low_temp_threashold	= 25000,
+		.low_temp_offset	= 0,
+		.coresel		= 0x8fff0002,
+		.systemclk_en		= BIT(31),
+		.intst			= BIT(2),
+		.ctl0			= 0x00100003,
+	},
+	{
+		.of_compatible		= "mediatek,mt8183-svs-gpu",
+		.sw_id			= SVS_GPU,
+		.hw_id			= 3,
+		.ops			= &svs_mt8183_banks_ops,
+		.zone_name		= "tzts2",
+		.buck_name		= "vgpu",
+		.mtcmos_request		= true,
+		.init01_volt_flag	= SVS_INIT01_VOLT_INC_ONLY,
+		.init01_support		= true,
+		.init02_support		= true,
+		.mon_mode_support	= true,
+		.opp_count		= 16,
+		.freq_base		= 900000000,
+		.vboot			= 0x30,
+		.volt_step		= 6250,
+		.volt_base		= 500000,
+		.volt_offset		= 0,
+		.vmax			= 0x40,
+		.vmin			= 0x14,
+		.dthi			= 0x1,
+		.dtlo			= 0xfe,
+		.det_window		= 0xa28,
+		.det_max		= 0xffff,
+		.age_config		= 0x555555,
+		.agem			= 0x0,
+		.dc_config		= 0x555555,
+		.dvt_fixed		= 0x3,
+		.vco			= 0x10,
+		.chkshift		= 0x77,
+		.upper_temp_bound	= 0x64,
+		.lower_temp_bound	= 0xb2,
+		.low_temp_threashold	= 25000,
+		.low_temp_offset	= 3,
+		.coresel		= 0x8fff0003,
+		.systemclk_en		= BIT(31),
+		.intst			= BIT(3),
+		.ctl0			= 0x00050001,
+	},
+};
+
+static const struct svs_platform svs_mt8183_platform = {
+	.name		= "mt8183-svs",
+	.banks		= svs_mt8183_banks,
+	.efuse_parsing	= svs_mt8183_efuse_parsing,
+	.regs		= svs_regs_v2,
+	.fake_efuse	= false,
+	.bank_num	= 4,
+	.efuse_num	= 25,
+	.efuse_check	= 2,
+	.thermal_efuse_num = 3,
+};
+
+static const struct of_device_id mtk_svs_of_match[] = {
+	{
+		.compatible = "mediatek,mt8183-svs",
+		.data = &svs_mt8183_platform,
+	}, {
+		/* sentinel */
+	},
+};
+
+static int svs_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *of_dev_id;
+	struct mtk_svs *svs;
+	int ret;
+	u32 svs_irq;
+
+	svs = devm_kzalloc(&pdev->dev, sizeof(*svs), GFP_KERNEL);
+	if (!svs)
+		return -ENOMEM;
+
+	svs->dev = &pdev->dev;
+	if (!svs->dev->of_node) {
+		pr_err("cannot find device node\n");
+		return -ENODEV;
+	}
+
+	svs->base = of_iomap(svs->dev->of_node, 0);
+	if (IS_ERR(svs->base)) {
+		pr_err("cannot find svs register base\n");
+		return PTR_ERR(svs->base);
+	}
+
+	svs_irq = irq_of_parse_and_map(svs->dev->of_node, 0);
+	ret = devm_request_threaded_irq(svs->dev, svs_irq, NULL, svs_isr,
+					IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+					"mtk-svs", svs);
+	if (ret) {
+		pr_err("register irq(%d) failed: %d\n", svs_irq, ret);
+		return ret;
+	}
+
+	of_dev_id = of_match_node(mtk_svs_of_match, svs->dev->of_node);
+	if (!of_dev_id || !of_dev_id->data)
+		return -EINVAL;
+
+	svs->platform = of_dev_id->data;
+	dev_set_drvdata(svs->dev, svs);
+
+	svs->main_clk = devm_clk_get(svs->dev, "main_clk");
+	if (IS_ERR(svs->main_clk)) {
+		pr_err("failed to get clock: %ld\n", PTR_ERR(svs->main_clk));
+		return PTR_ERR(svs->main_clk);
+	}
+
+	ret = clk_prepare_enable(svs->main_clk);
+	if (ret) {
+		pr_err("cannot enable main_clk: %d\n", ret);
+		return ret;
+	}
+
+	ret = svs_is_support(svs);
+	if (ret)
+		goto svs_probe_fail;
+
+	ret = svs_resource_setup(svs);
+	if (ret)
+		goto svs_probe_fail;
+
+	ret = svs_start(svs);
+	if (ret)
+		goto svs_probe_fail;
+
+	ret = svs_create_svs_procfs(svs);
+	if (ret)
+		goto svs_probe_fail;
+
+	return 0;
+
+svs_probe_fail:
+	clk_disable_unprepare(svs->main_clk);
+
+	return ret;
+}
+
+static const struct dev_pm_ops svs_pm_ops = {
+	.suspend	= svs_suspend,
+	.resume		= svs_resume,
+};
+
+static struct platform_driver svs_driver = {
+	.probe	= svs_probe,
+	.driver	= {
+		.name		= "mtk-svs",
+		.pm		= &svs_pm_ops,
+		.of_match_table	= of_match_ptr(mtk_svs_of_match),
+	},
+};
+
+static int __init svs_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&svs_driver);
+	if (ret) {
+		pr_err("svs platform driver register failed: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+late_initcall_sync(svs_init);
+
+MODULE_DESCRIPTION("MediaTek SVS Driver v1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index eb6674c..ca0dad3 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -33,6 +33,9 @@
 #define PWMTHRES		0x30
 #define PWM45THRES_FIXUP	0x34
 
+#define PWM_CK_26M_SEL		0x210
+#define PWM_CK_26M_SEL_BCLK_26M	BIT(0)
+
 #define PWM_CLK_DIV_MAX		7
 
 enum {
@@ -58,6 +61,7 @@
 	unsigned int num_pwms;
 	bool pwm45_fixup;
 	bool has_clks;
+	bool has_ck_26m_sel;
 };
 
 /**
@@ -146,6 +150,7 @@
 	    reg_thres = PWMTHRES;
 	u64 resolution;
 	int ret;
+	unsigned long rate;
 
 	ret = mtk_pwm_clk_enable(chip, pwm);
 	if (ret < 0)
@@ -153,7 +158,12 @@
 
 	/* Using resolution in picosecond gets accuracy higher */
 	resolution = (u64)NSEC_PER_SEC * 1000;
-	do_div(resolution, clk_get_rate(clk));
+	if (pc->soc->has_ck_26m_sel &&
+	    readl(pc->regs + PWM_CK_26M_SEL) & PWM_CK_26M_SEL_BCLK_26M)
+		rate = 26000000; /* 26MHz */
+	else
+		rate = clk_get_rate(clk);
+	do_div(resolution, rate);
 
 	cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000, resolution);
 	while (cnt_period > 8191) {
@@ -282,24 +292,42 @@
 	.num_pwms = 8,
 	.pwm45_fixup = false,
 	.has_clks = true,
+	.has_ck_26m_sel = false,
 };
 
 static const struct mtk_pwm_platform_data mt7622_pwm_data = {
 	.num_pwms = 6,
 	.pwm45_fixup = false,
 	.has_clks = true,
+	.has_ck_26m_sel = false,
 };
 
 static const struct mtk_pwm_platform_data mt7623_pwm_data = {
 	.num_pwms = 5,
 	.pwm45_fixup = true,
 	.has_clks = true,
+	.has_ck_26m_sel = false,
 };
 
 static const struct mtk_pwm_platform_data mt7628_pwm_data = {
 	.num_pwms = 4,
 	.pwm45_fixup = true,
 	.has_clks = false,
+	.has_ck_26m_sel = false,
+};
+
+static const struct mtk_pwm_platform_data mt8183_pwm_data = {
+	.num_pwms = 4,
+	.pwm45_fixup = false,
+	.has_clks = true,
+	.has_ck_26m_sel = true,
+};
+
+static const struct mtk_pwm_platform_data mt8516_pwm_data = {
+	.num_pwms = 5,
+	.pwm45_fixup = false,
+	.has_clks = true,
+	.has_ck_26m_sel = true,
 };
 
 static const struct of_device_id mtk_pwm_of_match[] = {
@@ -307,6 +335,8 @@
 	{ .compatible = "mediatek,mt7622-pwm", .data = &mt7622_pwm_data },
 	{ .compatible = "mediatek,mt7623-pwm", .data = &mt7623_pwm_data },
 	{ .compatible = "mediatek,mt7628-pwm", .data = &mt7628_pwm_data },
+	{ .compatible = "mediatek,mt8183-pwm", .data = &mt8183_pwm_data },
+	{ .compatible = "mediatek,mt8516-pwm", .data = &mt8516_pwm_data },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, mtk_pwm_of_match);
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index 893940d..15803c7 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -277,10 +277,21 @@
 	.commit_mask = 0x1,
 };
 
+static const struct mtk_pwm_data mt8183_pwm_data = {
+	.enable_mask = BIT(0),
+	.con0 = 0x18,
+	.con0_sel = 0x0,
+	.con1 = 0x1c,
+	.has_commit = false,
+	.bls_debug = 0x80,
+	.bls_debug_mask = 0x3,
+};
+
 static const struct of_device_id mtk_disp_pwm_of_match[] = {
 	{ .compatible = "mediatek,mt2701-disp-pwm", .data = &mt2701_pwm_data},
 	{ .compatible = "mediatek,mt6595-disp-pwm", .data = &mt8173_pwm_data},
 	{ .compatible = "mediatek,mt8173-disp-pwm", .data = &mt8173_pwm_data},
+	{ .compatible = "mediatek,mt8183-disp-pwm", .data = &mt8183_pwm_data},
 	{ }
 };
 MODULE_DEVICE_TABLE(of, mtk_disp_pwm_of_match);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 329cdd3..3cadc48 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -579,6 +579,15 @@
 	  This driver supports the control of different power rails of device
 	  through regulator interface.
 
+config REGULATOR_MT6358
+	tristate "MediaTek MT6358 PMIC"
+	depends on MFD_MT6397
+	help
+	  Say y here to select this option to enable the power regulator of
+	  MediaTek MT6358 PMIC.
+	  This driver supports the control of different power rails of device
+	  through regulator interface.
+
 config REGULATOR_MT6380
 	tristate "MediaTek MT6380 PMIC"
 	depends on MTK_PMIC_WRAP
@@ -588,6 +597,15 @@
 	  This driver supports the control of different power rails of device
 	  through regulator interface.
 
+config REGULATOR_MT6392
+	tristate "MediaTek MT6392 PMIC"
+	depends on MFD_MT6397
+	help
+	  Say y here to select this option to enable the power regulator of
+	  MediaTek MT6392 PMIC.
+	  This driver supports the control of different power rails of device
+	  through regulator interface.
+
 config REGULATOR_MT6397
 	tristate "MediaTek MT6397 PMIC"
 	depends on MFD_MT6397
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index bba9c48..acdfe886 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -75,7 +75,9 @@
 obj-$(CONFIG_REGULATOR_MC13XXX_CORE) +=  mc13xxx-regulator-core.o
 obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
 obj-$(CONFIG_REGULATOR_MT6323)	+= mt6323-regulator.o
+obj-$(CONFIG_REGULATOR_MT6358)	+= mt6358-regulator.o
 obj-$(CONFIG_REGULATOR_MT6380)	+= mt6380-regulator.o
+obj-$(CONFIG_REGULATOR_MT6392)	+= mt6392-regulator.o
 obj-$(CONFIG_REGULATOR_MT6397)	+= mt6397-regulator.o
 obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
 obj-$(CONFIG_REGULATOR_QCOM_RPMH) += qcom-rpmh-regulator.o
diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c
new file mode 100644
index 0000000..fd528a3
--- /dev/null
+++ b/drivers/regulator/mt6358-regulator.c
@@ -0,0 +1,586 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 MediaTek Inc.
+
+#include <linux/mfd/mt6358/registers.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/mt6358-regulator.h>
+#include <linux/regulator/of_regulator.h>
+
+#define MT6358_BUCK_MODE_AUTO	0
+#define MT6358_BUCK_MODE_FORCE_PWM	1
+
+/*
+ * MT6358 regulators' information
+ *
+ * @desc: standard fields of regulator description.
+ * @qi: Mask for query enable signal status of regulators
+ */
+struct mt6358_regulator_info {
+	struct regulator_desc desc;
+	u32 status_reg;
+	u32 qi;
+	const u32 *index_table;
+	unsigned int n_table;
+	u32 vsel_shift;
+	u32 da_vsel_reg;
+	u32 da_vsel_mask;
+	u32 da_vsel_shift;
+	u32 modeset_reg;
+	u32 modeset_mask;
+	u32 modeset_shift;
+};
+
+#define MT6358_BUCK(match, vreg, min, max, step,		\
+	volt_ranges, vosel_mask, _da_vsel_reg, _da_vsel_mask,	\
+	_da_vsel_shift, _modeset_reg, _modeset_shift)		\
+[MT6358_ID_##vreg] = {	\
+	.desc = {	\
+		.name = #vreg,	\
+		.of_match = of_match_ptr(match),	\
+		.ops = &mt6358_volt_range_ops,	\
+		.type = REGULATOR_VOLTAGE,	\
+		.id = MT6358_ID_##vreg,		\
+		.owner = THIS_MODULE,		\
+		.n_voltages = ((max) - (min)) / (step) + 1,	\
+		.linear_ranges = volt_ranges,		\
+		.n_linear_ranges = ARRAY_SIZE(volt_ranges),	\
+		.vsel_reg = MT6358_BUCK_##vreg##_ELR0,	\
+		.vsel_mask = vosel_mask,	\
+		.enable_reg = MT6358_BUCK_##vreg##_CON0,	\
+		.enable_mask = BIT(0),	\
+		.of_map_mode = mt6358_map_mode,	\
+	},	\
+	.status_reg = MT6358_BUCK_##vreg##_DBG1,	\
+	.qi = BIT(0),	\
+	.da_vsel_reg = _da_vsel_reg,	\
+	.da_vsel_mask = _da_vsel_mask,	\
+	.da_vsel_shift = _da_vsel_shift,	\
+	.modeset_reg = _modeset_reg,	\
+	.modeset_mask = BIT(_modeset_shift),	\
+	.modeset_shift = _modeset_shift	\
+}
+
+#define MT6358_LDO(match, vreg, ldo_volt_table,	\
+	ldo_index_table, enreg, enbit, vosel,	\
+	vosel_mask, vosel_shift)	\
+[MT6358_ID_##vreg] = {	\
+	.desc = {	\
+		.name = #vreg,	\
+		.of_match = of_match_ptr(match),	\
+		.ops = &mt6358_volt_table_ops,	\
+		.type = REGULATOR_VOLTAGE,	\
+		.id = MT6358_ID_##vreg,	\
+		.owner = THIS_MODULE,	\
+		.n_voltages = ARRAY_SIZE(ldo_volt_table),	\
+		.volt_table = ldo_volt_table,	\
+		.vsel_reg = vosel,	\
+		.vsel_mask = vosel_mask,	\
+		.enable_reg = enreg,	\
+		.enable_mask = BIT(enbit),	\
+	},	\
+	.status_reg = MT6358_LDO_##vreg##_CON1,	\
+	.qi = BIT(15),	\
+	.index_table = ldo_index_table,	\
+	.n_table = ARRAY_SIZE(ldo_index_table),	\
+	.vsel_shift = vosel_shift,	\
+}
+
+#define MT6358_LDO1(match, vreg, min, max, step,	\
+	volt_ranges, _da_vsel_reg, _da_vsel_mask,	\
+	_da_vsel_shift, vosel, vosel_mask)	\
+[MT6358_ID_##vreg] = {	\
+	.desc = {	\
+		.name = #vreg,	\
+		.of_match = of_match_ptr(match),	\
+		.ops = &mt6358_volt_range_ops,	\
+		.type = REGULATOR_VOLTAGE,	\
+		.id = MT6358_ID_##vreg,	\
+		.owner = THIS_MODULE,	\
+		.n_voltages = ((max) - (min)) / (step) + 1,	\
+		.linear_ranges = volt_ranges,	\
+		.n_linear_ranges = ARRAY_SIZE(volt_ranges),	\
+		.vsel_reg = vosel,	\
+		.vsel_mask = vosel_mask,	\
+		.enable_reg = MT6358_LDO_##vreg##_CON0,	\
+		.enable_mask = BIT(0),	\
+	},	\
+	.da_vsel_reg = _da_vsel_reg,	\
+	.da_vsel_mask = _da_vsel_mask,	\
+	.da_vsel_shift = _da_vsel_shift,	\
+	.status_reg = MT6358_LDO_##vreg##_DBG1,	\
+	.qi = BIT(0),	\
+}
+
+#define MT6358_REG_FIXED(match, vreg,	\
+	enreg, enbit, volt)	\
+[MT6358_ID_##vreg] = {	\
+	.desc = {	\
+		.name = #vreg,	\
+		.of_match = of_match_ptr(match),	\
+		.ops = &mt6358_volt_fixed_ops,	\
+		.type = REGULATOR_VOLTAGE,	\
+		.id = MT6358_ID_##vreg,	\
+		.owner = THIS_MODULE,	\
+		.n_voltages = 1,	\
+		.enable_reg = enreg,	\
+		.enable_mask = BIT(enbit),	\
+		.min_uV = volt,	\
+	},	\
+	.status_reg = MT6358_LDO_##vreg##_CON1,	\
+	.qi = BIT(15),							\
+}
+
+static const struct regulator_linear_range buck_volt_range1[] = {
+	REGULATOR_LINEAR_RANGE(500000, 0, 0x7f, 6250),
+};
+
+static const struct regulator_linear_range buck_volt_range2[] = {
+	REGULATOR_LINEAR_RANGE(500000, 0, 0x7f, 12500),
+};
+
+static const struct regulator_linear_range buck_volt_range3[] = {
+	REGULATOR_LINEAR_RANGE(500000, 0, 0x3f, 50000),
+};
+
+static const struct regulator_linear_range buck_volt_range4[] = {
+	REGULATOR_LINEAR_RANGE(1000000, 0, 0x7f, 12500),
+};
+
+static const u32 vdram2_voltages[] = {
+	600000, 1800000,
+};
+
+static const u32 vsim1_voltages[] = {
+	1700000, 1800000, 2700000, 3000000, 3100000,
+};
+
+static const u32 vibr_voltages[] = {
+	1200000, 1300000, 1500000, 1800000,
+	2000000, 2800000, 3000000, 3300000,
+};
+
+static const u32 vusb_voltages[] = {
+	3000000, 3100000,
+};
+
+static const u32 vcamd_voltages[] = {
+	900000, 1000000, 1100000, 1200000,
+	1300000, 1500000, 1800000,
+};
+
+static const u32 vefuse_voltages[] = {
+	1700000, 1800000, 1900000,
+};
+
+static const u32 vmch_voltages[] = {
+	2900000, 3000000, 3300000,
+};
+
+static const u32 vcama1_voltages[] = {
+	1800000, 2500000, 2700000,
+	2800000, 2900000, 3000000,
+};
+
+static const u32 vemc_voltages[] = {
+	2900000, 3000000, 3300000,
+};
+
+static const u32 vcn33_bt_wifi_voltages[] = {
+	3300000, 3400000, 3500000,
+};
+
+static const u32 vcama2_voltages[] = {
+	1800000, 2500000, 2700000,
+	2800000, 2900000, 3000000,
+};
+
+static const u32 vmc_voltages[] = {
+	1800000, 2900000, 3000000, 3300000,
+};
+
+static const u32 vldo28_voltages[] = {
+	2800000, 3000000,
+};
+
+static const u32 vsim2_voltages[] = {
+	1700000, 1800000, 2700000,
+	3000000, 3100000,
+};
+
+static const u32 vdram2_idx[] = {
+	0, 12,
+};
+
+static const u32 vsim1_idx[] = {
+	3, 4, 8, 11, 12,
+};
+
+static const u32 vibr_idx[] = {
+	0, 1, 2, 4, 5, 9, 11, 13,
+};
+
+static const u32 vusb_idx[] = {
+	3, 4,
+};
+
+static const u32 vcamd_idx[] = {
+	3, 4, 5, 6, 7, 9, 12,
+};
+
+static const u32 vefuse_idx[] = {
+	11, 12, 13,
+};
+
+static const u32 vmch_idx[] = {
+	2, 3, 5,
+};
+
+static const u32 vcama1_idx[] = {
+	0, 7, 9, 10, 11, 12,
+};
+
+static const u32 vemc_idx[] = {
+	2, 3, 5,
+};
+
+static const u32 vcn33_bt_wifi_idx[] = {
+	1, 2, 3,
+};
+
+static const u32 vcama2_idx[] = {
+	0, 7, 9, 10, 11, 12,
+};
+
+static const u32 vmc_idx[] = {
+	4, 10, 11, 13,
+};
+
+static const u32 vldo28_idx[] = {
+	1, 3,
+};
+
+static const u32 vsim2_idx[] = {
+	3, 4, 8, 11, 12,
+};
+
+static inline unsigned int mt6358_map_mode(unsigned int mode)
+{
+	return mode == MT6358_BUCK_MODE_AUTO ?
+		REGULATOR_MODE_NORMAL : REGULATOR_MODE_FAST;
+}
+
+static int mt6358_set_voltage_sel(struct regulator_dev *rdev,
+				  unsigned int selector)
+{
+	int idx, ret;
+	const u32 *pvol;
+	struct mt6358_regulator_info *info = rdev_get_drvdata(rdev);
+
+	pvol = (const u32 *)info->index_table;
+
+	idx = pvol[selector];
+	ret = regmap_update_bits(rdev->regmap, info->desc.vsel_reg,
+				 info->desc.vsel_mask,
+				 idx << info->vsel_shift);
+
+	return ret;
+}
+
+static int mt6358_get_voltage_sel(struct regulator_dev *rdev)
+{
+	int idx, ret;
+	u32 selector;
+	struct mt6358_regulator_info *info = rdev_get_drvdata(rdev);
+	const u32 *pvol;
+
+	ret = regmap_read(rdev->regmap, info->desc.vsel_reg, &selector);
+	if (ret != 0) {
+		dev_info(&rdev->dev,
+			 "Failed to get mt6358 %s vsel reg: %d\n",
+			 info->desc.name, ret);
+		return ret;
+	}
+
+	selector = (selector & info->desc.vsel_mask) >> info->vsel_shift;
+	pvol = (const u32 *)info->index_table;
+	ret = -1;
+	for (idx = 0; idx < info->desc.n_voltages; idx++) {
+		if (pvol[idx] == selector) {
+			ret = idx;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int mt6358_get_buck_voltage_sel(struct regulator_dev *rdev)
+{
+	int ret, regval;
+	struct mt6358_regulator_info *info = rdev_get_drvdata(rdev);
+
+	ret = regmap_read(rdev->regmap, info->da_vsel_reg, &regval);
+	if (ret != 0) {
+		dev_info(&rdev->dev,
+			 "Failed to get mt6358 Buck %s vsel reg: %d\n",
+			 info->desc.name, ret);
+		return ret;
+	}
+
+	ret = (regval >> info->da_vsel_shift) & info->da_vsel_mask;
+
+	return ret;
+}
+
+static int mt6358_get_status(struct regulator_dev *rdev)
+{
+	int ret;
+	u32 regval;
+	struct mt6358_regulator_info *info = rdev_get_drvdata(rdev);
+
+	ret = regmap_read(rdev->regmap, info->status_reg, &regval);
+	if (ret != 0) {
+		dev_info(&rdev->dev, "Failed to get enable reg: %d\n", ret);
+		return ret;
+	}
+
+	return (regval & info->qi) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
+}
+
+static int mt6358_regulator_set_mode(struct regulator_dev *rdev,
+				     unsigned int mode)
+{
+	struct mt6358_regulator_info *info = rdev_get_drvdata(rdev);
+	int ret, val;
+
+	switch (mode) {
+	case REGULATOR_MODE_FAST:
+		val = MT6358_BUCK_MODE_FORCE_PWM;
+		break;
+	case REGULATOR_MODE_NORMAL:
+		val = MT6358_BUCK_MODE_AUTO;
+		break;
+	default:
+		ret = -EINVAL;
+		goto err_mode;
+	}
+
+	dev_dbg(&rdev->dev, "mt6358 buck set_mode %#x, %#x, %#x, %#x\n",
+		info->modeset_reg, info->modeset_mask,
+		info->modeset_shift, val);
+
+	val <<= info->modeset_shift;
+	ret = regmap_update_bits(rdev->regmap, info->modeset_reg,
+				 info->modeset_mask, val);
+err_mode:
+	if (ret != 0) {
+		dev_err(&rdev->dev,
+			"Failed to set mt6358 buck mode: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static unsigned int mt6358_regulator_get_mode(struct regulator_dev *rdev)
+{
+	struct mt6358_regulator_info *info = rdev_get_drvdata(rdev);
+	int ret, regval;
+
+	ret = regmap_read(rdev->regmap, info->modeset_reg, &regval);
+	if (ret != 0) {
+		dev_err(&rdev->dev,
+			"Failed to get mt6358 buck mode: %d\n", ret);
+		return ret;
+	}
+
+	switch ((regval & info->modeset_mask) >> info->modeset_shift) {
+	case MT6358_BUCK_MODE_AUTO:
+		return REGULATOR_MODE_NORMAL;
+	case MT6358_BUCK_MODE_FORCE_PWM:
+		return REGULATOR_MODE_FAST;
+	default:
+		return -EINVAL;
+	}
+}
+
+static const struct regulator_ops mt6358_volt_range_ops = {
+	.list_voltage = regulator_list_voltage_linear_range,
+	.map_voltage = regulator_map_voltage_linear_range,
+	.set_voltage_sel = regulator_set_voltage_sel_regmap,
+	.get_voltage_sel = mt6358_get_buck_voltage_sel,
+	.set_voltage_time_sel = regulator_set_voltage_time_sel,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
+	.is_enabled = regulator_is_enabled_regmap,
+	.get_status = mt6358_get_status,
+	.set_mode = mt6358_regulator_set_mode,
+	.get_mode = mt6358_regulator_get_mode,
+};
+
+static const struct regulator_ops mt6358_volt_table_ops = {
+	.list_voltage = regulator_list_voltage_table,
+	.map_voltage = regulator_map_voltage_iterate,
+	.set_voltage_sel = mt6358_set_voltage_sel,
+	.get_voltage_sel = mt6358_get_voltage_sel,
+	.set_voltage_time_sel = regulator_set_voltage_time_sel,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
+	.is_enabled = regulator_is_enabled_regmap,
+	.get_status = mt6358_get_status,
+};
+
+static const struct regulator_ops mt6358_volt_fixed_ops = {
+	.list_voltage = regulator_list_voltage_linear,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
+	.is_enabled = regulator_is_enabled_regmap,
+	.get_status = mt6358_get_status,
+};
+
+/* The array is indexed by id(MT6358_ID_XXX) */
+static struct mt6358_regulator_info mt6358_regulators[] = {
+	MT6358_BUCK("buck_vdram1", VDRAM1, 500000, 2087500, 12500,
+		    buck_volt_range2, 0x7f, MT6358_BUCK_VDRAM1_DBG0, 0x7f,
+		    0, MT6358_VDRAM1_ANA_CON0, 8),
+	MT6358_BUCK("buck_vcore", VCORE, 500000, 1293750, 6250,
+		    buck_volt_range1, 0x7f, MT6358_BUCK_VCORE_DBG0, 0x7f,
+		    0, MT6358_VCORE_VGPU_ANA_CON0, 1),
+	MT6358_BUCK("buck_vpa", VPA, 500000, 3650000, 50000,
+		    buck_volt_range3, 0x3f, MT6358_BUCK_VPA_DBG0, 0x3f, 0,
+		    MT6358_VPA_ANA_CON0, 3),
+	MT6358_BUCK("buck_vproc11", VPROC11, 500000, 1293750, 6250,
+		    buck_volt_range1, 0x7f, MT6358_BUCK_VPROC11_DBG0, 0x7f,
+		    0, MT6358_VPROC_ANA_CON0, 1),
+	MT6358_BUCK("buck_vproc12", VPROC12, 500000, 1293750, 6250,
+		    buck_volt_range1, 0x7f, MT6358_BUCK_VPROC12_DBG0, 0x7f,
+		    0, MT6358_VPROC_ANA_CON0, 2),
+	MT6358_BUCK("buck_vgpu", VGPU, 500000, 1293750, 6250,
+		    buck_volt_range1, 0x7f, MT6358_BUCK_VGPU_DBG0, 0x7f, 0,
+		    MT6358_VCORE_VGPU_ANA_CON0, 2),
+	MT6358_BUCK("buck_vs2", VS2, 500000, 2087500, 12500,
+		    buck_volt_range2, 0x7f, MT6358_BUCK_VS2_DBG0, 0x7f, 0,
+		    MT6358_VS2_ANA_CON0, 8),
+	MT6358_BUCK("buck_vmodem", VMODEM, 500000, 1293750, 6250,
+		    buck_volt_range1, 0x7f, MT6358_BUCK_VMODEM_DBG0, 0x7f,
+		    0, MT6358_VMODEM_ANA_CON0, 8),
+	MT6358_BUCK("buck_vs1", VS1, 1000000, 2587500, 12500,
+		    buck_volt_range4, 0x7f, MT6358_BUCK_VS1_DBG0, 0x7f, 0,
+		    MT6358_VS1_ANA_CON0, 8),
+	MT6358_REG_FIXED("ldo_vrf12", VRF12,
+			 MT6358_LDO_VRF12_CON0, 0, 1200000),
+	MT6358_REG_FIXED("ldo_vio18", VIO18,
+			 MT6358_LDO_VIO18_CON0, 0, 1800000),
+	MT6358_REG_FIXED("ldo_vcamio", VCAMIO,
+			 MT6358_LDO_VCAMIO_CON0, 0, 1800000),
+	MT6358_REG_FIXED("ldo_vcn18", VCN18, MT6358_LDO_VCN18_CON0, 0, 1800000),
+	MT6358_REG_FIXED("ldo_vfe28", VFE28, MT6358_LDO_VFE28_CON0, 0, 2800000),
+	MT6358_REG_FIXED("ldo_vcn28", VCN28, MT6358_LDO_VCN28_CON0, 0, 2800000),
+	MT6358_REG_FIXED("ldo_vxo22", VXO22, MT6358_LDO_VXO22_CON0, 0, 2200000),
+	MT6358_REG_FIXED("ldo_vaux18", VAUX18,
+			 MT6358_LDO_VAUX18_CON0, 0, 1800000),
+	MT6358_REG_FIXED("ldo_vbif28", VBIF28,
+			 MT6358_LDO_VBIF28_CON0, 0, 2800000),
+	MT6358_REG_FIXED("ldo_vio28", VIO28, MT6358_LDO_VIO28_CON0, 0, 2800000),
+	MT6358_REG_FIXED("ldo_va12", VA12, MT6358_LDO_VA12_CON0, 0, 1200000),
+	MT6358_REG_FIXED("ldo_vrf18", VRF18, MT6358_LDO_VRF18_CON0, 0, 1800000),
+	MT6358_REG_FIXED("ldo_vaud28", VAUD28,
+			 MT6358_LDO_VAUD28_CON0, 0, 2800000),
+	MT6358_LDO("ldo_vdram2", VDRAM2, vdram2_voltages, vdram2_idx,
+		   MT6358_LDO_VDRAM2_CON0, 0, MT6358_LDO_VDRAM2_ELR0, 0x10, 0),
+	MT6358_LDO("ldo_vsim1", VSIM1, vsim1_voltages, vsim1_idx,
+		   MT6358_LDO_VSIM1_CON0, 0, MT6358_VSIM1_ANA_CON0, 0xf00, 8),
+	MT6358_LDO("ldo_vibr", VIBR, vibr_voltages, vibr_idx,
+		   MT6358_LDO_VIBR_CON0, 0, MT6358_VIBR_ANA_CON0, 0xf00, 8),
+	MT6358_LDO("ldo_vusb", VUSB, vusb_voltages, vusb_idx,
+		   MT6358_LDO_VUSB_CON0_0, 0, MT6358_VUSB_ANA_CON0, 0x700, 8),
+	MT6358_LDO("ldo_vcamd", VCAMD, vcamd_voltages, vcamd_idx,
+		   MT6358_LDO_VCAMD_CON0, 0, MT6358_VCAMD_ANA_CON0, 0xf00, 8),
+	MT6358_LDO("ldo_vefuse", VEFUSE, vefuse_voltages, vefuse_idx,
+		   MT6358_LDO_VEFUSE_CON0, 0, MT6358_VEFUSE_ANA_CON0, 0xf00, 8),
+	MT6358_LDO("ldo_vmch", VMCH, vmch_voltages, vmch_idx,
+		   MT6358_LDO_VMCH_CON0, 0, MT6358_VMCH_ANA_CON0, 0x700, 8),
+	MT6358_LDO("ldo_vcama1", VCAMA1, vcama1_voltages, vcama1_idx,
+		   MT6358_LDO_VCAMA1_CON0, 0, MT6358_VCAMA1_ANA_CON0, 0xf00, 8),
+	MT6358_LDO("ldo_vemc", VEMC, vemc_voltages, vemc_idx,
+		   MT6358_LDO_VEMC_CON0, 0, MT6358_VEMC_ANA_CON0, 0x700, 8),
+	MT6358_LDO("ldo_vcn33_bt", VCN33_BT, vcn33_bt_wifi_voltages,
+		   vcn33_bt_wifi_idx, MT6358_LDO_VCN33_CON0_0,
+		   0, MT6358_VCN33_ANA_CON0, 0x300, 8),
+	MT6358_LDO("ldo_vcn33_wifi", VCN33_WIFI, vcn33_bt_wifi_voltages,
+		   vcn33_bt_wifi_idx, MT6358_LDO_VCN33_CON0_1,
+		   0, MT6358_VCN33_ANA_CON0, 0x300, 8),
+	MT6358_LDO("ldo_vcama2", VCAMA2, vcama2_voltages, vcama2_idx,
+		   MT6358_LDO_VCAMA2_CON0, 0, MT6358_VCAMA2_ANA_CON0, 0xf00, 8),
+	MT6358_LDO("ldo_vmc", VMC, vmc_voltages, vmc_idx,
+		   MT6358_LDO_VMC_CON0, 0, MT6358_VMC_ANA_CON0, 0xf00, 8),
+	MT6358_LDO("ldo_vldo28", VLDO28, vldo28_voltages, vldo28_idx,
+		   MT6358_LDO_VLDO28_CON0_0, 0,
+		   MT6358_VLDO28_ANA_CON0, 0x300, 8),
+	MT6358_LDO("ldo_vsim2", VSIM2, vsim2_voltages, vsim2_idx,
+		   MT6358_LDO_VSIM2_CON0, 0, MT6358_VSIM2_ANA_CON0, 0xf00, 8),
+	MT6358_LDO1("ldo_vsram_proc11", VSRAM_PROC11, 500000, 1293750, 6250,
+		    buck_volt_range1, MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f, 8,
+		    MT6358_LDO_VSRAM_CON0, 0x7f),
+	MT6358_LDO1("ldo_vsram_others", VSRAM_OTHERS, 500000, 1293750, 6250,
+		    buck_volt_range1, MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f, 8,
+		    MT6358_LDO_VSRAM_CON2, 0x7f),
+	MT6358_LDO1("ldo_vsram_gpu", VSRAM_GPU, 500000, 1293750, 6250,
+		    buck_volt_range1, MT6358_LDO_VSRAM_GPU_DBG0, 0x7f, 8,
+		    MT6358_LDO_VSRAM_CON3, 0x7f),
+	MT6358_LDO1("ldo_vsram_proc12", VSRAM_PROC12, 500000, 1293750, 6250,
+		    buck_volt_range1, MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f, 8,
+		    MT6358_LDO_VSRAM_CON1, 0x7f),
+};
+
+static int mt6358_regulator_probe(struct platform_device *pdev)
+{
+	struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent);
+	struct regulator_config config = {};
+	struct regulator_dev *rdev;
+	int i;
+
+	for (i = 0; i < MT6358_MAX_REGULATOR; i++) {
+		config.dev = &pdev->dev;
+		config.driver_data = &mt6358_regulators[i];
+		config.regmap = mt6397->regmap;
+
+		rdev = devm_regulator_register(&pdev->dev,
+					       &mt6358_regulators[i].desc,
+					       &config);
+		if (IS_ERR(rdev)) {
+			dev_err(&pdev->dev, "failed to register %s\n",
+				mt6358_regulators[i].desc.name);
+			return PTR_ERR(rdev);
+		}
+	}
+
+	return 0;
+}
+
+static const struct platform_device_id mt6358_platform_ids[] = {
+	{"mt6358-regulator", 0},
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, mt6358_platform_ids);
+
+static struct platform_driver mt6358_regulator_driver = {
+	.driver = {
+		.name = "mt6358-regulator",
+	},
+	.probe = mt6358_regulator_probe,
+	.id_table = mt6358_platform_ids,
+};
+
+module_platform_driver(mt6358_regulator_driver);
+
+MODULE_AUTHOR("Hsin-Hsiung Wang <hsin-hsiung.wang@mediatek.com>");
+MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6358 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/mt6392-regulator.c b/drivers/regulator/mt6392-regulator.c
new file mode 100644
index 0000000..2b7dcf3
--- /dev/null
+++ b/drivers/regulator/mt6392-regulator.c
@@ -0,0 +1,490 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Chen Zhong <chen.zhong@mediatek.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/mfd/mt6392/registers.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/mt6392-regulator.h>
+#include <linux/regulator/of_regulator.h>
+
+#define MT6392_BUCK_MODE_AUTO	0
+#define MT6392_BUCK_MODE_FORCE_PWM	1
+#define MT6392_LDO_MODE_NORMAL	0
+#define MT6392_LDO_MODE_LP	1
+
+/*
+ * MT6392 regulators' information
+ *
+ * @desc: standard fields of regulator description.
+ * @qi: Mask for query enable signal status of regulators
+ * @vselon_reg: Register sections for hardware control mode of bucks
+ * @vselctrl_reg: Register for controlling the buck control mode.
+ * @vselctrl_mask: Mask for query buck's voltage control mode.
+ */
+struct mt6392_regulator_info {
+	struct regulator_desc desc;
+	u32 qi;
+	u32 vselon_reg;
+	u32 vselctrl_reg;
+	u32 vselctrl_mask;
+	u32 modeset_reg;
+	u32 modeset_mask;
+};
+
+#define MT6392_BUCK(match, vreg, min, max, step, volt_ranges, enreg,	\
+		vosel, vosel_mask, voselon, vosel_ctrl,			\
+		_modeset_reg, _modeset_mask)				\
+[MT6392_ID_##vreg] = {							\
+	.desc = {							\
+		.name = #vreg,						\
+		.of_match = of_match_ptr(match),			\
+		.ops = &mt6392_volt_range_ops,				\
+		.type = REGULATOR_VOLTAGE,				\
+		.id = MT6392_ID_##vreg,					\
+		.owner = THIS_MODULE,					\
+		.n_voltages = (max - min)/step + 1,			\
+		.linear_ranges = volt_ranges,				\
+		.n_linear_ranges = ARRAY_SIZE(volt_ranges),		\
+		.vsel_reg = vosel,					\
+		.vsel_mask = vosel_mask,				\
+		.enable_reg = enreg,					\
+		.enable_mask = BIT(0),					\
+	},								\
+	.qi = BIT(13),							\
+	.vselon_reg = voselon,						\
+	.vselctrl_reg = vosel_ctrl,					\
+	.vselctrl_mask = BIT(1),					\
+	.modeset_reg = _modeset_reg,					\
+	.modeset_mask = _modeset_mask,					\
+}
+
+#define MT6392_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel,	\
+		vosel_mask, _modeset_reg, _modeset_mask)		\
+[MT6392_ID_##vreg] = {							\
+	.desc = {							\
+		.name = #vreg,						\
+		.of_match = of_match_ptr(match),			\
+		.ops = &mt6392_volt_table_ops,				\
+		.type = REGULATOR_VOLTAGE,				\
+		.id = MT6392_ID_##vreg,					\
+		.owner = THIS_MODULE,					\
+		.n_voltages = ARRAY_SIZE(ldo_volt_table),		\
+		.volt_table = ldo_volt_table,				\
+		.vsel_reg = vosel,					\
+		.vsel_mask = vosel_mask,				\
+		.enable_reg = enreg,					\
+		.enable_mask = BIT(enbit),				\
+	},								\
+	.qi = BIT(15),							\
+	.modeset_reg = _modeset_reg,					\
+	.modeset_mask = _modeset_mask,					\
+}
+
+#define MT6392_REG_FIXED(match, vreg, enreg, enbit, volt,		\
+		_modeset_reg, _modeset_mask)				\
+[MT6392_ID_##vreg] = {							\
+	.desc = {							\
+		.name = #vreg,						\
+		.of_match = of_match_ptr(match),			\
+		.ops = &mt6392_volt_fixed_ops,				\
+		.type = REGULATOR_VOLTAGE,				\
+		.id = MT6392_ID_##vreg,					\
+		.owner = THIS_MODULE,					\
+		.n_voltages = 1,					\
+		.enable_reg = enreg,					\
+		.enable_mask = BIT(enbit),				\
+		.min_uV = volt,						\
+	},								\
+	.qi = BIT(15),							\
+	.modeset_reg = _modeset_reg,					\
+	.modeset_mask = _modeset_mask,					\
+}
+
+static const struct regulator_linear_range buck_volt_range1[] = {
+	REGULATOR_LINEAR_RANGE(700000, 0, 0x7f, 6250),
+};
+
+static const struct regulator_linear_range buck_volt_range2[] = {
+	REGULATOR_LINEAR_RANGE(1400000, 0, 0x7f, 12500),
+};
+
+static const u32 ldo_volt_table1[] = {
+	1800000, 1900000, 2000000, 2200000,
+};
+
+static const u32 ldo_volt_table2[] = {
+	3300000, 3400000, 3500000, 3600000,
+};
+
+static const u32 ldo_volt_table3[] = {
+	1800000, 3300000,
+};
+
+static const u32 ldo_volt_table4[] = {
+	3000000, 3300000,
+};
+
+static const u32 ldo_volt_table5[] = {
+	1200000, 1300000, 1500000, 1800000, 2000000, 2800000, 3000000, 3300000,
+};
+
+static const u32 ldo_volt_table6[] = {
+	1240000, 1390000,
+};
+
+static const u32 ldo_volt_table7[] = {
+	1200000, 1300000, 1500000, 1800000,
+};
+
+static const u32 ldo_volt_table8[] = {
+	1800000, 2000000,
+};
+
+static int mt6392_get_status(struct regulator_dev *rdev)
+{
+	int ret;
+	u32 regval;
+	struct mt6392_regulator_info *info = rdev_get_drvdata(rdev);
+
+	ret = regmap_read(rdev->regmap, info->desc.enable_reg, &regval);
+	if (ret != 0) {
+		dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret);
+		return ret;
+	}
+
+	return (regval & info->qi) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
+}
+
+static int mt6392_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	int ret, val = 0;
+	struct mt6392_regulator_info *info = rdev_get_drvdata(rdev);
+	u32 reg_value;
+
+	if (!info->modeset_mask) {
+		dev_err(&rdev->dev, "regulator %s doesn't support set_mode\n",
+			info->desc.name);
+		return -EINVAL;
+	}
+
+	switch (mode) {
+	case REGULATOR_MODE_FAST:
+		val = MT6392_BUCK_MODE_FORCE_PWM;
+		break;
+	case REGULATOR_MODE_NORMAL:
+		val = MT6392_BUCK_MODE_AUTO;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	val <<= ffs(info->modeset_mask) - 1;
+
+	ret = regmap_update_bits(rdev->regmap, info->modeset_reg,
+				  info->modeset_mask, val);
+
+	if (regmap_read(rdev->regmap, info->modeset_reg, &reg_value) < 0) {
+		dev_err(&rdev->dev, "Failed to read register value\n");
+		return -EIO;
+	}
+
+	return ret;
+}
+
+static unsigned int mt6392_buck_get_mode(struct regulator_dev *rdev)
+{
+	unsigned int val;
+	unsigned int mode;
+	int ret;
+	struct mt6392_regulator_info *info = rdev_get_drvdata(rdev);
+
+	if (!info->modeset_mask) {
+		dev_err(&rdev->dev, "regulator %s doesn't support get_mode\n",
+			info->desc.name);
+		return -EINVAL;
+	}
+
+	ret = regmap_read(rdev->regmap, info->modeset_reg, &val);
+	if (ret < 0)
+		return ret;
+
+	val &= info->modeset_mask;
+	val >>= ffs(info->modeset_mask) - 1;
+
+	if (val & 0x1)
+		mode = REGULATOR_MODE_FAST;
+	else
+		mode = REGULATOR_MODE_NORMAL;
+
+	return mode;
+}
+
+static int mt6392_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	int ret, val = 0;
+	struct mt6392_regulator_info *info = rdev_get_drvdata(rdev);
+
+	if (!info->modeset_mask) {
+		dev_err(&rdev->dev, "regulator %s doesn't support set_mode\n",
+			info->desc.name);
+		return -EINVAL;
+	}
+
+	switch (mode) {
+	case REGULATOR_MODE_STANDBY:
+		val = MT6392_LDO_MODE_LP;
+		break;
+	case REGULATOR_MODE_NORMAL:
+		val = MT6392_LDO_MODE_NORMAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	val <<= ffs(info->modeset_mask) - 1;
+
+	ret = regmap_update_bits(rdev->regmap, info->modeset_reg,
+				  info->modeset_mask, val);
+
+	return ret;
+}
+
+static unsigned int mt6392_ldo_get_mode(struct regulator_dev *rdev)
+{
+	unsigned int val;
+	unsigned int mode;
+	int ret;
+	struct mt6392_regulator_info *info = rdev_get_drvdata(rdev);
+
+	if (!info->modeset_mask) {
+		dev_err(&rdev->dev, "regulator %s doesn't support get_mode\n",
+			info->desc.name);
+		return -EINVAL;
+	}
+
+	ret = regmap_read(rdev->regmap, info->modeset_reg, &val);
+	if (ret < 0)
+		return ret;
+
+	val &= info->modeset_mask;
+	val >>= ffs(info->modeset_mask) - 1;
+
+	if (val & 0x1)
+		mode = REGULATOR_MODE_STANDBY;
+	else
+		mode = REGULATOR_MODE_NORMAL;
+
+	return mode;
+}
+
+static const struct regulator_ops mt6392_volt_range_ops = {
+	.list_voltage = regulator_list_voltage_linear_range,
+	.map_voltage = regulator_map_voltage_linear_range,
+	.set_voltage_sel = regulator_set_voltage_sel_regmap,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+	.set_voltage_time_sel = regulator_set_voltage_time_sel,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
+	.is_enabled = regulator_is_enabled_regmap,
+	.get_status = mt6392_get_status,
+	.set_mode = mt6392_buck_set_mode,
+	.get_mode = mt6392_buck_get_mode,
+};
+
+static const struct regulator_ops mt6392_volt_table_ops = {
+	.list_voltage = regulator_list_voltage_table,
+	.map_voltage = regulator_map_voltage_iterate,
+	.set_voltage_sel = regulator_set_voltage_sel_regmap,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+	.set_voltage_time_sel = regulator_set_voltage_time_sel,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
+	.is_enabled = regulator_is_enabled_regmap,
+	.get_status = mt6392_get_status,
+	.set_mode = mt6392_ldo_set_mode,
+	.get_mode = mt6392_ldo_get_mode,
+};
+
+static const struct regulator_ops mt6392_volt_fixed_ops = {
+	.list_voltage = regulator_list_voltage_linear,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
+	.is_enabled = regulator_is_enabled_regmap,
+	.get_status = mt6392_get_status,
+	.set_mode = mt6392_ldo_set_mode,
+	.get_mode = mt6392_ldo_get_mode,
+};
+
+/* The array is indexed by id(MT6392_ID_XXX) */
+static struct mt6392_regulator_info mt6392_regulators[] = {
+	MT6392_BUCK("buck-vproc", VPROC, 700000, 1493750, 6250,
+		buck_volt_range1, MT6392_VPROC_CON7, MT6392_VPROC_CON9, 0x7f,
+		MT6392_VPROC_CON10, MT6392_VPROC_CON5, MT6392_VPROC_CON2,
+		0x100),
+	MT6392_BUCK("buck-vsys", VSYS, 1400000, 2987500, 12500,
+		buck_volt_range2, MT6392_VSYS_CON7, MT6392_VSYS_CON9, 0x7f,
+		MT6392_VSYS_CON10, MT6392_VSYS_CON5, MT6392_VSYS_CON2, 0x100),
+	MT6392_BUCK("buck-vcore", VCORE, 700000, 1493750, 6250,
+		buck_volt_range1, MT6392_VCORE_CON7, MT6392_VCORE_CON9, 0x7f,
+		MT6392_VCORE_CON10, MT6392_VCORE_CON5, MT6392_VCORE_CON2,
+		0x100),
+	MT6392_REG_FIXED("ldo-vxo22", VXO22, MT6392_ANALDO_CON1, 10, 2200000,
+		MT6392_ANALDO_CON1, 0x2),
+	MT6392_LDO("ldo-vaud22", VAUD22, ldo_volt_table1,
+		MT6392_ANALDO_CON2, 14, MT6392_ANALDO_CON8, 0x60,
+		MT6392_ANALDO_CON2, 0x2),
+	MT6392_REG_FIXED("ldo-vcama", VCAMA, MT6392_ANALDO_CON4, 15, 2800000,
+		-1, 0),
+	MT6392_REG_FIXED("ldo-vaud28", VAUD28, MT6392_ANALDO_CON23, 14, 2800000,
+		MT6392_ANALDO_CON23, 0x2),
+	MT6392_REG_FIXED("ldo-vadc18", VADC18, MT6392_ANALDO_CON25, 14, 1800000,
+		MT6392_ANALDO_CON25, 0x2),
+	MT6392_LDO("ldo-vcn35", VCN35, ldo_volt_table2,
+		MT6392_ANALDO_CON21, 12, MT6392_ANALDO_CON16, 0xC,
+		MT6392_ANALDO_CON21, 0x2),
+	MT6392_REG_FIXED("ldo-vio28", VIO28, MT6392_DIGLDO_CON0, 14, 2800000,
+		MT6392_DIGLDO_CON0, 0x2),
+	MT6392_REG_FIXED("ldo-vusb", VUSB, MT6392_DIGLDO_CON2, 14, 3300000,
+		MT6392_DIGLDO_CON2, 0x2),
+	MT6392_LDO("ldo-vmc", VMC, ldo_volt_table3,
+		MT6392_DIGLDO_CON3, 12, MT6392_DIGLDO_CON24, 0x10,
+		MT6392_DIGLDO_CON3, 0x2),
+	MT6392_LDO("ldo-vmch", VMCH, ldo_volt_table4,
+		MT6392_DIGLDO_CON5, 14, MT6392_DIGLDO_CON26, 0x80,
+		MT6392_DIGLDO_CON5, 0x2),
+	MT6392_LDO("ldo-vemc3v3", VEMC3V3, ldo_volt_table4,
+		MT6392_DIGLDO_CON6, 14, MT6392_DIGLDO_CON27, 0x80,
+		MT6392_DIGLDO_CON6, 0x2),
+	MT6392_LDO("ldo-vgp1", VGP1, ldo_volt_table5,
+		MT6392_DIGLDO_CON7, 15, MT6392_DIGLDO_CON28, 0xE0,
+		MT6392_DIGLDO_CON7, 0x2),
+	MT6392_LDO("ldo-vgp2", VGP2, ldo_volt_table5,
+		MT6392_DIGLDO_CON8, 15, MT6392_DIGLDO_CON29, 0xE0,
+		MT6392_DIGLDO_CON8, 0x2),
+	MT6392_REG_FIXED("ldo-vcn18", VCN18, MT6392_DIGLDO_CON11, 14, 1800000,
+		MT6392_DIGLDO_CON11, 0x2),
+	MT6392_LDO("ldo-vcamaf", VCAMAF, ldo_volt_table5,
+		MT6392_DIGLDO_CON31, 15, MT6392_DIGLDO_CON32, 0xE0,
+		MT6392_DIGLDO_CON31, 0x2),
+	MT6392_LDO("ldo-vm", VM, ldo_volt_table6,
+		MT6392_DIGLDO_CON47, 14, MT6392_DIGLDO_CON48, 0x30,
+		MT6392_DIGLDO_CON47, 0x2),
+	MT6392_REG_FIXED("ldo-vio18", VIO18, MT6392_DIGLDO_CON49, 14, 1800000,
+		MT6392_DIGLDO_CON49, 0x2),
+	MT6392_LDO("ldo-vcamd", VCAMD, ldo_volt_table7,
+		MT6392_DIGLDO_CON51, 14, MT6392_DIGLDO_CON52, 0x60,
+		MT6392_DIGLDO_CON51, 0x2),
+	MT6392_REG_FIXED("ldo-vcamio", VCAMIO, MT6392_DIGLDO_CON53, 14, 1800000,
+		MT6392_DIGLDO_CON53, 0x2),
+	MT6392_REG_FIXED("ldo-vm25", VM25, MT6392_DIGLDO_CON55, 14, 2500000,
+		MT6392_DIGLDO_CON55, 0x2),
+	MT6392_LDO("ldo-vefuse", VEFUSE, ldo_volt_table8,
+		MT6392_DIGLDO_CON57, 14, MT6392_DIGLDO_CON58, 0x10,
+		MT6392_DIGLDO_CON57, 0x2),
+};
+
+static int mt6392_set_buck_vosel_reg(struct platform_device *pdev)
+{
+	struct mt6397_chip *mt6392 = dev_get_drvdata(pdev->dev.parent);
+	int i;
+	u32 regval;
+
+	for (i = 0; i < MT6392_MAX_REGULATOR; i++) {
+		if (mt6392_regulators[i].vselctrl_reg) {
+			if (regmap_read(mt6392->regmap,
+				mt6392_regulators[i].vselctrl_reg,
+				&regval) < 0) {
+				dev_err(&pdev->dev,
+					"Failed to read buck ctrl\n");
+				return -EIO;
+			}
+
+			if (regval & mt6392_regulators[i].vselctrl_mask) {
+				mt6392_regulators[i].desc.vsel_reg =
+				mt6392_regulators[i].vselon_reg;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int mt6392_regulator_probe(struct platform_device *pdev)
+{
+	struct mt6397_chip *mt6392 = dev_get_drvdata(pdev->dev.parent);
+	struct regulator_config config = {};
+	struct regulator_dev *rdev;
+	struct regulation_constraints *c;
+	int i;
+	u32 reg_value;
+
+	/* Query buck controller to select activated voltage register part */
+	if (mt6392_set_buck_vosel_reg(pdev))
+		return -EIO;
+
+	/* Read PMIC chip revision to update constraints and voltage table */
+	if (regmap_read(mt6392->regmap, MT6392_CID, &reg_value) < 0) {
+		dev_err(&pdev->dev, "Failed to read Chip ID\n");
+		return -EIO;
+	}
+
+	dev_info(&pdev->dev, "Chip ID = 0x%x\n", reg_value);
+
+	for (i = 0; i < MT6392_MAX_REGULATOR; i++) {
+		config.dev = &pdev->dev;
+		config.driver_data = &mt6392_regulators[i];
+		config.regmap = mt6392->regmap;
+		rdev = devm_regulator_register(&pdev->dev,
+				&mt6392_regulators[i].desc, &config);
+		if (IS_ERR(rdev)) {
+			dev_err(&pdev->dev, "failed to register %s\n",
+				mt6392_regulators[i].desc.name);
+			return PTR_ERR(rdev);
+		}
+
+		/* Constrain board-specific capabilities according to what
+		 * this driver and the chip itself can actually do.
+		 */
+		c = rdev->constraints;
+		c->valid_modes_mask |= REGULATOR_MODE_NORMAL|
+			REGULATOR_MODE_STANDBY | REGULATOR_MODE_FAST;
+		c->valid_ops_mask |= REGULATOR_CHANGE_MODE;
+
+	}
+	return 0;
+}
+
+static const struct platform_device_id mt6392_platform_ids[] = {
+	{"mt6392-regulator", 0},
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, mt6392_platform_ids);
+
+static const struct of_device_id mt6392_of_match[] = {
+	{ .compatible = "mediatek,mt6392-regulator", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mt6392_of_match);
+
+static struct platform_driver mt6392_regulator_driver = {
+	.driver = {
+		.name = "mt6392-regulator",
+		.of_match_table = of_match_ptr(mt6392_of_match),
+	},
+	.probe = mt6392_regulator_probe,
+	.id_table = mt6392_platform_ids,
+};
+
+module_platform_driver(mt6392_regulator_driver);
+
+MODULE_AUTHOR("Chen Zhong <chen.zhong@mediatek.com>");
+MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6392 PMIC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 964ed91..498c793 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -20,6 +20,7 @@
 #include <linux/irqdomain.h>
 #include <linux/platform_device.h>
 #include <linux/of_address.h>
+#include <linux/of_device.h>
 #include <linux/of_irq.h>
 #include <linux/io.h>
 #include <linux/mfd/mt6397/core.h>
@@ -27,7 +28,8 @@
 #define RTC_BBPU		0x0000
 #define RTC_BBPU_CBUSY		BIT(6)
 
-#define RTC_WRTGR		0x003c
+#define RTC_WRTGR_MT6358	0x3a
+#define RTC_WRTGR_MT6397	0x3c
 
 #define RTC_IRQ_STA		0x0002
 #define RTC_IRQ_STA_AL		BIT(0)
@@ -71,6 +73,10 @@
 #define RTC_NUM_YEARS		128
 #define RTC_MIN_YEAR_OFFSET	(RTC_MIN_YEAR - RTC_BASE_YEAR)
 
+struct mtk_rtc_compatible {
+	u32			wrtgr_addr;
+};
+
 struct mt6397_rtc {
 	struct device		*dev;
 	struct rtc_device	*rtc_dev;
@@ -78,15 +84,34 @@
 	struct regmap		*regmap;
 	int			irq;
 	u32			addr_base;
+	const struct mtk_rtc_compatible *dev_comp;
 };
 
+static const struct mtk_rtc_compatible mt6358_rtc_compat = {
+	.wrtgr_addr = RTC_WRTGR_MT6358,
+};
+
+static const struct mtk_rtc_compatible mt6397_rtc_compat = {
+	.wrtgr_addr = RTC_WRTGR_MT6397,
+};
+
+static const struct of_device_id mt6397_rtc_of_match[] = {
+	{ .compatible = "mediatek,mt6358-rtc",
+		.data = (void *)&mt6358_rtc_compat, },
+	{ .compatible = "mediatek,mt6397-rtc",
+		.data = (void *)&mt6397_rtc_compat, },
+	{}
+};
+MODULE_DEVICE_TABLE(of, mt6397_rtc_of_match);
+
 static int mtk_rtc_write_trigger(struct mt6397_rtc *rtc)
 {
 	unsigned long timeout = jiffies + HZ;
 	int ret;
 	u32 data;
 
-	ret = regmap_write(rtc->regmap, rtc->addr_base + RTC_WRTGR, 1);
+	ret = regmap_write(rtc->regmap,
+			   rtc->addr_base + rtc->dev_comp->wrtgr_addr, 1);
 	if (ret < 0)
 		return ret;
 
@@ -332,6 +357,7 @@
 	struct resource *res;
 	struct mt6397_chip *mt6397_chip = dev_get_drvdata(pdev->dev.parent);
 	struct mt6397_rtc *rtc;
+	const struct of_device_id *of_id;
 	int ret;
 
 	rtc = devm_kzalloc(&pdev->dev, sizeof(struct mt6397_rtc), GFP_KERNEL);
@@ -341,6 +367,13 @@
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	rtc->addr_base = res->start;
 
+	of_id = of_match_device(mt6397_rtc_of_match, &pdev->dev);
+	if (!of_id) {
+		dev_err(&pdev->dev, "Failed to probe of_node\n");
+		return -EINVAL;
+	}
+	rtc->dev_comp = of_id->data;
+
 	rtc->irq = platform_get_irq(pdev, 0);
 	if (rtc->irq < 0)
 		return rtc->irq;
@@ -416,12 +449,6 @@
 static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_rtc_suspend,
 			mt6397_rtc_resume);
 
-static const struct of_device_id mt6397_rtc_of_match[] = {
-	{ .compatible = "mediatek,mt6397-rtc", },
-	{ }
-};
-MODULE_DEVICE_TABLE(of, mt6397_rtc_of_match);
-
 static struct platform_driver mtk_rtc_driver = {
 	.driver = {
 		.name = "mt6397-rtc",
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index a7d0667..17bd759 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -4,6 +4,18 @@
 menu "MediaTek SoC drivers"
 	depends on ARCH_MEDIATEK || COMPILE_TEST
 
+config MTK_CMDQ
+	tristate "MediaTek CMDQ Support"
+	depends on ARCH_MEDIATEK || COMPILE_TEST
+	select MAILBOX
+	select MTK_CMDQ_MBOX
+	select MTK_INFRACFG
+	help
+	  Say yes here to add support for the MediaTek Command Queue (CMDQ)
+	  driver. The CMDQ is used to help read/write registers with critical
+	  time limitation, such as updating display configuration during the
+	  vblank.
+
 config MTK_INFRACFG
 	bool "MediaTek INFRACFG Support"
 	select REGMAP
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
index 12998b0..b9dbad6 100644
--- a/drivers/soc/mediatek/Makefile
+++ b/drivers/soc/mediatek/Makefile
@@ -1,3 +1,4 @@
-obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o
+obj-$(CONFIG_MTK_CMDQ) += mtk-cmdq-helper.o
+obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o mtk-scpsys-ext.o
 obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
 obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
new file mode 100644
index 0000000..ad52ac3
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+
+#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/mailbox_controller.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
+#define CMDQ_WRITE_ENABLE_MASK	BIT(0)
+#define CMDQ_EOC_IRQ_EN		BIT(0)
+#define CMDQ_EOC_CMD		((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \
+				<< 32 | CMDQ_EOC_IRQ_EN)
+
+struct cmdq_instruction {
+	union {
+		u32 value;
+		u32 mask;
+	};
+	union {
+		u16 offset;
+		u16 event;
+	};
+	u8 subsys;
+	u8 op;
+};
+
+int cmdq_dev_get_client_reg(struct device *dev,
+			    struct cmdq_client_reg *client_reg, int idx)
+{
+	struct of_phandle_args spec;
+	int err;
+
+	if (!client_reg)
+		return -ENOENT;
+
+	err = of_parse_phandle_with_args(dev->of_node, "mediatek,gce-client-reg",
+					 "#subsys-cells", idx, &spec);
+	if (err < 0) {
+		dev_err(dev,
+			"error %d can't parse gce-client-reg property (%d)",
+			err, idx);
+
+		return err;
+	}
+
+	client_reg->subsys = spec.args[0];
+	client_reg->offset = spec.args[1];
+	client_reg->size = spec.args[2];
+	of_node_put(spec.np);
+
+	return 0;
+}
+EXPORT_SYMBOL(cmdq_dev_get_client_reg);
+
+static void cmdq_client_timeout(struct timer_list *t)
+{
+	struct cmdq_client *client = from_timer(client, t, timer);
+
+	dev_err(client->client.dev, "cmdq timeout!\n");
+}
+
+struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
+{
+	struct cmdq_client *client;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return (struct cmdq_client *)-ENOMEM;
+
+	client->timeout_ms = timeout;
+	if (timeout != CMDQ_NO_TIMEOUT) {
+		spin_lock_init(&client->lock);
+		timer_setup(&client->timer, cmdq_client_timeout, 0);
+	}
+	client->pkt_cnt = 0;
+	client->client.dev = dev;
+	client->client.tx_block = false;
+	client->chan = mbox_request_channel(&client->client, index);
+
+	if (IS_ERR(client->chan)) {
+		long err;
+
+		dev_err(dev, "failed to request channel\n");
+		err = PTR_ERR(client->chan);
+		kfree(client);
+
+		return ERR_PTR(err);
+	}
+
+	return client;
+}
+EXPORT_SYMBOL(cmdq_mbox_create);
+
+void cmdq_mbox_destroy(struct cmdq_client *client)
+{
+	if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
+		spin_lock(&client->lock);
+		del_timer_sync(&client->timer);
+		spin_unlock(&client->lock);
+	}
+	mbox_free_channel(client->chan);
+	kfree(client);
+}
+EXPORT_SYMBOL(cmdq_mbox_destroy);
+
+struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
+{
+	struct cmdq_pkt *pkt;
+	struct device *dev;
+	dma_addr_t dma_addr;
+
+	pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+	if (!pkt)
+		return ERR_PTR(-ENOMEM);
+	pkt->va_base = kzalloc(size, GFP_KERNEL);
+	if (!pkt->va_base) {
+		kfree(pkt);
+		return ERR_PTR(-ENOMEM);
+	}
+	pkt->buf_size = size;
+	pkt->cl = (void *)client;
+
+	dev = client->chan->mbox->dev;
+	dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
+				  DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, dma_addr)) {
+		dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
+		kfree(pkt->va_base);
+		kfree(pkt);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pkt->pa_base = dma_addr;
+
+	return pkt;
+}
+EXPORT_SYMBOL(cmdq_pkt_create);
+
+void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
+{
+	struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
+
+	dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
+			 DMA_TO_DEVICE);
+	kfree(pkt->va_base);
+	kfree(pkt);
+}
+EXPORT_SYMBOL(cmdq_pkt_destroy);
+
+static struct cmdq_instruction *cmdq_pkt_append_command(struct cmdq_pkt *pkt)
+{
+
+	if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
+		/*
+		 * In the case of allocated buffer size (pkt->buf_size) is used
+		 * up, the real required size (pkt->cmdq_buf_size) is still
+		 * increased, so that the user knows how much memory should be
+		 * ultimately allocated after appending all commands and
+		 * flushing the command packet. Therefor, the user can call
+		 * cmdq_pkt_create() again with the real required buffer size.
+		 */
+		pkt->cmd_buf_size += CMDQ_INST_SIZE;
+		WARN_ONCE(1, "%s: buffer size %u is too small !\n",
+			__func__, (u32)pkt->buf_size);
+		return NULL;
+	}
+
+	pkt->cmd_buf_size += CMDQ_INST_SIZE;
+
+	return pkt->va_base + pkt->cmd_buf_size - CMDQ_INST_SIZE;
+}
+
+int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
+{
+	struct cmdq_instruction *inst;
+
+	inst = cmdq_pkt_append_command(pkt);
+	if (!inst)
+		return -ENOMEM;
+
+	inst->op = CMDQ_CODE_WRITE;
+	inst->value = value;
+	inst->offset = offset;
+	inst->subsys = subsys;
+
+	return 0;
+}
+EXPORT_SYMBOL(cmdq_pkt_write);
+
+int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
+			u16 offset, u32 value, u32 mask)
+{
+	struct cmdq_instruction *inst;
+	u32 offset_mask = offset;
+
+	if (mask != 0xffffffff) {
+		inst = cmdq_pkt_append_command(pkt);
+		if (!inst)
+			return -ENOMEM;
+
+		inst->op = CMDQ_CODE_MASK;
+		inst->mask = ~mask;
+		offset_mask |= CMDQ_WRITE_ENABLE_MASK;
+	}
+
+	return cmdq_pkt_write(pkt, subsys, offset_mask, value);
+}
+EXPORT_SYMBOL(cmdq_pkt_write_mask);
+
+int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
+{
+	struct cmdq_instruction *inst;
+
+	if (event >= CMDQ_MAX_EVENT)
+		return -EINVAL;
+
+	inst = cmdq_pkt_append_command(pkt);
+	if (!inst)
+		return -ENOMEM;
+
+	inst->op = CMDQ_CODE_WFE;
+	inst->value = CMDQ_WFE_OPTION;
+	inst->event = event;
+
+	return 0;
+}
+EXPORT_SYMBOL(cmdq_pkt_wfe);
+
+int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
+{
+	struct cmdq_instruction *inst;
+
+	if (event >= CMDQ_MAX_EVENT)
+		return -EINVAL;
+
+	inst = cmdq_pkt_append_command(pkt);
+	if (!inst)
+		return -ENOMEM;
+
+	inst->op = CMDQ_CODE_WFE;
+	inst->value = CMDQ_WFE_UPDATE;
+	inst->event = event;
+
+	return 0;
+}
+EXPORT_SYMBOL(cmdq_pkt_clear_event);
+
+int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
+		  u16 offset, u32 value, u32 mask)
+{
+	struct cmdq_instruction *inst;
+
+	if (mask != 0xffffffff) {
+		inst = cmdq_pkt_append_command(pkt);
+		if (!inst)
+			return -ENOMEM;
+
+		inst->op = CMDQ_CODE_MASK;
+		inst->value = ~mask;
+		offset = offset | 0x1;
+	}
+
+	inst = cmdq_pkt_append_command(pkt);
+	if (!inst)
+		return -ENOMEM;
+
+	inst->op = CMDQ_CODE_POLL;
+	inst->value = value;
+	inst->offset = offset;
+	inst->subsys = subsys;
+
+	return 0;
+}
+EXPORT_SYMBOL(cmdq_pkt_poll);
+
+static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
+{
+	struct cmdq_instruction *inst;
+
+	inst = cmdq_pkt_append_command(pkt);
+	if (!inst)
+		return -ENOMEM;
+
+	inst->op = CMDQ_CODE_EOC;
+	inst->value = CMDQ_EOC_IRQ_EN;
+
+	inst = cmdq_pkt_append_command(pkt);
+	if (!inst)
+		return -ENOMEM;
+
+	inst->op = CMDQ_CODE_JUMP;
+	inst->value = CMDQ_JUMP_PASS;
+
+	return 0;
+}
+
+static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
+{
+	struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
+	struct cmdq_task_cb *cb = &pkt->cb;
+	struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
+
+	if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
+		unsigned long flags = 0;
+
+		spin_lock_irqsave(&client->lock, flags);
+		if (--client->pkt_cnt == 0)
+			del_timer(&client->timer);
+		else
+			mod_timer(&client->timer, jiffies +
+				  msecs_to_jiffies(client->timeout_ms));
+		spin_unlock_irqrestore(&client->lock, flags);
+	}
+
+	dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
+				pkt->cmd_buf_size, DMA_TO_DEVICE);
+	if (cb->cb) {
+		data.data = cb->data;
+		cb->cb(data);
+	}
+}
+
+int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
+			 void *data)
+{
+	int err;
+	unsigned long flags = 0;
+	struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
+
+	err = cmdq_pkt_finalize(pkt);
+	if (err < 0)
+		return err;
+
+	pkt->cb.cb = cb;
+	pkt->cb.data = data;
+	pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
+	pkt->async_cb.data = pkt;
+
+	dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
+				   pkt->cmd_buf_size, DMA_TO_DEVICE);
+
+	if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
+		spin_lock_irqsave(&client->lock, flags);
+		if (client->pkt_cnt++ == 0)
+			mod_timer(&client->timer, jiffies +
+				  msecs_to_jiffies(client->timeout_ms));
+		spin_unlock_irqrestore(&client->lock, flags);
+	}
+
+	mbox_send_message(client->chan, pkt);
+	/* We can send next packet immediately, so just call txdone. */
+	mbox_client_txdone(client->chan, 0);
+
+	return 0;
+}
+EXPORT_SYMBOL(cmdq_pkt_flush_async);
+
+struct cmdq_flush_completion {
+	struct completion cmplt;
+	bool err;
+};
+
+static void cmdq_pkt_flush_cb(struct cmdq_cb_data data)
+{
+	struct cmdq_flush_completion *cmplt;
+
+	cmplt = (struct cmdq_flush_completion *)data.data;
+	if (data.sta != CMDQ_CB_NORMAL)
+		cmplt->err = true;
+	else
+		cmplt->err = false;
+	complete(&cmplt->cmplt);
+}
+
+int cmdq_pkt_flush(struct cmdq_pkt *pkt)
+{
+	struct cmdq_flush_completion cmplt;
+	int err;
+
+	init_completion(&cmplt.cmplt);
+	err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);
+	if (err < 0)
+		return err;
+	wait_for_completion(&cmplt.cmplt);
+
+	return cmplt.err ? -EFAULT : 0;
+}
+EXPORT_SYMBOL(cmdq_pkt_flush);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index 011a40b..58379d0 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -76,6 +76,13 @@
 #define PWRAP_SLV_CAP_SECURITY	BIT(2)
 #define HAS_CAP(_c, _x)	(((_c) & (_x)) == (_x))
 
+/* Group of bits used for shown pwrap capability */
+#define PWRAP_CAP_BRIDGE	BIT(0)
+#define PWRAP_CAP_RESET		BIT(1)
+#define PWRAP_CAP_DCM		BIT(2)
+#define PWRAP_CAP_INT1_EN	BIT(3)
+#define PWRAP_CAP_WDT_SRC1	BIT(4)
+
 /* defines for slave device wrapper registers */
 enum dew_regs {
 	PWRAP_DEW_BASE,
@@ -91,6 +98,27 @@
 	PWRAP_DEW_CIPHER_MODE,
 	PWRAP_DEW_CIPHER_SWRST,
 
+	/* MT6323 only regs */
+	PWRAP_DEW_CIPHER_EN,
+	PWRAP_DEW_RDDMY_NO,
+
+	/* MT6358 only regs */
+	PWRAP_SMT_CON1,
+	PWRAP_DRV_CON1,
+	PWRAP_FILTER_CON0,
+	PWRAP_GPIO_PULLEN0_CLR,
+	PWRAP_RG_SPI_CON0,
+	PWRAP_RG_SPI_RECORD0,
+	PWRAP_RG_SPI_CON2,
+	PWRAP_RG_SPI_CON3,
+	PWRAP_RG_SPI_CON4,
+	PWRAP_RG_SPI_CON5,
+	PWRAP_RG_SPI_CON6,
+	PWRAP_RG_SPI_CON7,
+	PWRAP_RG_SPI_CON8,
+	PWRAP_RG_SPI_CON13,
+	PWRAP_SPISLV_KEY,
+
 	/* MT6397 only regs */
 	PWRAP_DEW_EVENT_OUT_EN,
 	PWRAP_DEW_EVENT_SRC_EN,
@@ -100,10 +128,6 @@
 	PWRAP_DEW_EVENT_TEST,
 	PWRAP_DEW_CIPHER_LOAD,
 	PWRAP_DEW_CIPHER_START,
-
-	/* MT6323 only regs */
-	PWRAP_DEW_CIPHER_EN,
-	PWRAP_DEW_RDDMY_NO,
 };
 
 static const u32 mt6323_regs[] = {
@@ -123,6 +147,64 @@
 	[PWRAP_DEW_RDDMY_NO] =		0x01a4,
 };
 
+static const u32 mt6351_regs[] = {
+	[PWRAP_DEW_DIO_EN] =		0x02F2,
+	[PWRAP_DEW_READ_TEST] =		0x02F4,
+	[PWRAP_DEW_WRITE_TEST] =	0x02F6,
+	[PWRAP_DEW_CRC_EN] =		0x02FA,
+	[PWRAP_DEW_CRC_VAL] =		0x02FC,
+	[PWRAP_DEW_CIPHER_KEY_SEL] =	0x0300,
+	[PWRAP_DEW_CIPHER_IV_SEL] =	0x0302,
+	[PWRAP_DEW_CIPHER_EN] =		0x0304,
+	[PWRAP_DEW_CIPHER_RDY] =	0x0306,
+	[PWRAP_DEW_CIPHER_MODE] =	0x0308,
+	[PWRAP_DEW_CIPHER_SWRST] =	0x030A,
+	[PWRAP_DEW_RDDMY_NO] =		0x030C,
+};
+
+static const u32 mt6357_regs[] = {
+	[PWRAP_DEW_DIO_EN] =            0x040A,
+	[PWRAP_DEW_READ_TEST] =         0x040C,
+	[PWRAP_DEW_WRITE_TEST] =        0x040E,
+	[PWRAP_DEW_CRC_EN] =            0x0412,
+	[PWRAP_DEW_CRC_VAL] =           0x0414,
+	[PWRAP_DEW_CIPHER_KEY_SEL] =    0x0418,
+	[PWRAP_DEW_CIPHER_IV_SEL] =     0x041A,
+	[PWRAP_DEW_CIPHER_EN] =         0x041C,
+	[PWRAP_DEW_CIPHER_RDY] =        0x041E,
+	[PWRAP_DEW_CIPHER_MODE] =       0x0420,
+	[PWRAP_DEW_CIPHER_SWRST] =      0x0422,
+	[PWRAP_DEW_RDDMY_NO] =          0x0424,
+};
+
+static const u32 mt6358_regs[] = {
+	[PWRAP_SMT_CON1] =		0x0030,
+	[PWRAP_DRV_CON1] =		0x0038,
+	[PWRAP_FILTER_CON0] =		0x0040,
+	[PWRAP_GPIO_PULLEN0_CLR] =	0x0098,
+	[PWRAP_RG_SPI_CON0] =		0x0408,
+	[PWRAP_RG_SPI_RECORD0] =	0x040a,
+	[PWRAP_DEW_DIO_EN] =		0x040c,
+	[PWRAP_DEW_READ_TEST]	=	0x040e,
+	[PWRAP_DEW_WRITE_TEST]	=	0x0410,
+	[PWRAP_DEW_CRC_EN] =		0x0414,
+	[PWRAP_DEW_CIPHER_KEY_SEL] =	0x041a,
+	[PWRAP_DEW_CIPHER_IV_SEL] =	0x041c,
+	[PWRAP_DEW_CIPHER_EN]	=	0x041e,
+	[PWRAP_DEW_CIPHER_RDY] =	0x0420,
+	[PWRAP_DEW_CIPHER_MODE] =	0x0422,
+	[PWRAP_DEW_CIPHER_SWRST] =	0x0424,
+	[PWRAP_RG_SPI_CON2] =		0x0432,
+	[PWRAP_RG_SPI_CON3] =		0x0434,
+	[PWRAP_RG_SPI_CON4] =		0x0436,
+	[PWRAP_RG_SPI_CON5] =		0x0438,
+	[PWRAP_RG_SPI_CON6] =		0x043a,
+	[PWRAP_RG_SPI_CON7] =		0x043c,
+	[PWRAP_RG_SPI_CON8] =		0x043e,
+	[PWRAP_RG_SPI_CON13] =		0x0448,
+	[PWRAP_SPISLV_KEY] =		0x044a,
+};
+
 static const u32 mt6397_regs[] = {
 	[PWRAP_DEW_BASE] =		0xbc00,
 	[PWRAP_DEW_EVENT_OUT_EN] =	0xbc00,
@@ -146,21 +228,6 @@
 	[PWRAP_DEW_CIPHER_SWRST] =	0xbc24,
 };
 
-static const u32 mt6351_regs[] = {
-	[PWRAP_DEW_DIO_EN] =		0x02F2,
-	[PWRAP_DEW_READ_TEST] =		0x02F4,
-	[PWRAP_DEW_WRITE_TEST] =	0x02F6,
-	[PWRAP_DEW_CRC_EN] =		0x02FA,
-	[PWRAP_DEW_CRC_VAL] =		0x02FC,
-	[PWRAP_DEW_CIPHER_KEY_SEL] =	0x0300,
-	[PWRAP_DEW_CIPHER_IV_SEL] =	0x0302,
-	[PWRAP_DEW_CIPHER_EN] =		0x0304,
-	[PWRAP_DEW_CIPHER_RDY] =	0x0306,
-	[PWRAP_DEW_CIPHER_MODE] =	0x0308,
-	[PWRAP_DEW_CIPHER_SWRST] =	0x030A,
-	[PWRAP_DEW_RDDMY_NO] =		0x030C,
-};
-
 enum pwrap_regs {
 	PWRAP_MUX_SEL,
 	PWRAP_WRAP_EN,
@@ -221,6 +288,8 @@
 	PWRAP_CIPHER_SWRST,
 	PWRAP_DCM_EN,
 	PWRAP_DCM_DBC_PRD,
+	PWRAP_EINT_STA0_ADR,
+	PWRAP_EINT_STA1_ADR,
 
 	/* MT2701 only regs */
 	PWRAP_ADC_CMD_ADDR,
@@ -230,8 +299,6 @@
 	PWRAP_ADC_RDATA_ADDR2,
 
 	/* MT7622 only regs */
-	PWRAP_EINT_STA0_ADR,
-	PWRAP_EINT_STA1_ADR,
 	PWRAP_STA,
 	PWRAP_CLR,
 	PWRAP_DVFS_ADR8,
@@ -293,6 +360,31 @@
 	PWRAP_DVFS_WDATA7,
 	PWRAP_SPMINF_STA,
 	PWRAP_CIPHER_EN,
+
+	/* MT8183 only regs */
+	PWRAP_SI_SAMPLE_CTRL,
+	PWRAP_CSLEXT_WRITE,
+	PWRAP_CSLEXT_READ,
+	PWRAP_EXT_CK_WRITE,
+	PWRAP_STAUPD_CTRL,
+	PWRAP_WACS_P2P_EN,
+	PWRAP_INIT_DONE_P2P,
+	PWRAP_WACS_MD32_EN,
+	PWRAP_INIT_DONE_MD32,
+	PWRAP_INT1_EN,
+	PWRAP_INT1_FLG,
+	PWRAP_INT1_CLR,
+	PWRAP_WDT_SRC_EN_1,
+	PWRAP_INT_GPS_AUXADC_CMD_ADDR,
+	PWRAP_INT_GPS_AUXADC_CMD,
+	PWRAP_INT_GPS_AUXADC_RDATA_ADDR,
+	PWRAP_EXT_GPS_AUXADC_RDATA_ADDR,
+	PWRAP_GPSINF_0_STA,
+	PWRAP_GPSINF_1_STA,
+
+	/* MT8516 only regs */
+	PWRAP_OP_TYPE,
+	PWRAP_MSB_FIRST,
 };
 
 static int mt2701_regs[] = {
@@ -381,6 +473,38 @@
 	[PWRAP_ADC_RDATA_ADDR2] =	0x154,
 };
 
+static int mt6765_regs[] = {
+	[PWRAP_MUX_SEL] =		0x0,
+	[PWRAP_WRAP_EN] =		0x4,
+	[PWRAP_DIO_EN] =		0x8,
+	[PWRAP_RDDMY] =			0x20,
+	[PWRAP_CSHEXT_WRITE] =		0x24,
+	[PWRAP_CSHEXT_READ] =		0x28,
+	[PWRAP_CSLEXT_START] =		0x2C,
+	[PWRAP_CSLEXT_END] =		0x30,
+	[PWRAP_STAUPD_PRD] =		0x3C,
+	[PWRAP_HARB_HPRIO] =		0x68,
+	[PWRAP_HIPRIO_ARB_EN] =		0x6C,
+	[PWRAP_MAN_EN] =		0x7C,
+	[PWRAP_MAN_CMD] =		0x80,
+	[PWRAP_WACS0_EN] =		0x8C,
+	[PWRAP_WACS1_EN] =		0x94,
+	[PWRAP_WACS2_EN] =		0x9C,
+	[PWRAP_INIT_DONE2] =		0xA0,
+	[PWRAP_WACS2_CMD] =		0xC20,
+	[PWRAP_WACS2_RDATA] =		0xC24,
+	[PWRAP_WACS2_VLDCLR] =		0xC28,
+	[PWRAP_INT_EN] =		0xB4,
+	[PWRAP_INT_FLG_RAW] =		0xB8,
+	[PWRAP_INT_FLG] =		0xBC,
+	[PWRAP_INT_CLR] =		0xC0,
+	[PWRAP_TIMER_EN] =		0xE8,
+	[PWRAP_WDT_UNIT] =		0xF0,
+	[PWRAP_WDT_SRC_EN] =		0xF4,
+	[PWRAP_DCM_EN] =		0x1DC,
+	[PWRAP_DCM_DBC_PRD] =		0x1E0,
+};
+
 static int mt6797_regs[] = {
 	[PWRAP_MUX_SEL] =		0x0,
 	[PWRAP_WRAP_EN] =		0x4,
@@ -526,6 +650,79 @@
 	[PWRAP_SPI2_CTRL] =		0x244,
 };
 
+static int mt8135_regs[] = {
+	[PWRAP_MUX_SEL] =		0x0,
+	[PWRAP_WRAP_EN] =		0x4,
+	[PWRAP_DIO_EN] =		0x8,
+	[PWRAP_SIDLY] =			0xc,
+	[PWRAP_CSHEXT] =		0x10,
+	[PWRAP_CSHEXT_WRITE] =		0x14,
+	[PWRAP_CSHEXT_READ] =		0x18,
+	[PWRAP_CSLEXT_START] =		0x1c,
+	[PWRAP_CSLEXT_END] =		0x20,
+	[PWRAP_STAUPD_PRD] =		0x24,
+	[PWRAP_STAUPD_GRPEN] =		0x28,
+	[PWRAP_STAUPD_MAN_TRIG] =	0x2c,
+	[PWRAP_STAUPD_STA] =		0x30,
+	[PWRAP_EVENT_IN_EN] =		0x34,
+	[PWRAP_EVENT_DST_EN] =		0x38,
+	[PWRAP_WRAP_STA] =		0x3c,
+	[PWRAP_RRARB_INIT] =		0x40,
+	[PWRAP_RRARB_EN] =		0x44,
+	[PWRAP_RRARB_STA0] =		0x48,
+	[PWRAP_RRARB_STA1] =		0x4c,
+	[PWRAP_HARB_INIT] =		0x50,
+	[PWRAP_HARB_HPRIO] =		0x54,
+	[PWRAP_HIPRIO_ARB_EN] =		0x58,
+	[PWRAP_HARB_STA0] =		0x5c,
+	[PWRAP_HARB_STA1] =		0x60,
+	[PWRAP_MAN_EN] =		0x64,
+	[PWRAP_MAN_CMD] =		0x68,
+	[PWRAP_MAN_RDATA] =		0x6c,
+	[PWRAP_MAN_VLDCLR] =		0x70,
+	[PWRAP_WACS0_EN] =		0x74,
+	[PWRAP_INIT_DONE0] =		0x78,
+	[PWRAP_WACS0_CMD] =		0x7c,
+	[PWRAP_WACS0_RDATA] =		0x80,
+	[PWRAP_WACS0_VLDCLR] =		0x84,
+	[PWRAP_WACS1_EN] =		0x88,
+	[PWRAP_INIT_DONE1] =		0x8c,
+	[PWRAP_WACS1_CMD] =		0x90,
+	[PWRAP_WACS1_RDATA] =		0x94,
+	[PWRAP_WACS1_VLDCLR] =		0x98,
+	[PWRAP_WACS2_EN] =		0x9c,
+	[PWRAP_INIT_DONE2] =		0xa0,
+	[PWRAP_WACS2_CMD] =		0xa4,
+	[PWRAP_WACS2_RDATA] =		0xa8,
+	[PWRAP_WACS2_VLDCLR] =		0xac,
+	[PWRAP_INT_EN] =		0xb0,
+	[PWRAP_INT_FLG_RAW] =		0xb4,
+	[PWRAP_INT_FLG] =		0xb8,
+	[PWRAP_INT_CLR] =		0xbc,
+	[PWRAP_SIG_ADR] =		0xc0,
+	[PWRAP_SIG_MODE] =		0xc4,
+	[PWRAP_SIG_VALUE] =		0xc8,
+	[PWRAP_SIG_ERRVAL] =		0xcc,
+	[PWRAP_CRC_EN] =		0xd0,
+	[PWRAP_EVENT_STA] =		0xd4,
+	[PWRAP_EVENT_STACLR] =		0xd8,
+	[PWRAP_TIMER_EN] =		0xdc,
+	[PWRAP_TIMER_STA] =		0xe0,
+	[PWRAP_WDT_UNIT] =		0xe4,
+	[PWRAP_WDT_SRC_EN] =		0xe8,
+	[PWRAP_WDT_FLG] =		0xec,
+	[PWRAP_DEBUG_INT_SEL] =		0xf0,
+	[PWRAP_CIPHER_KEY_SEL] =	0x134,
+	[PWRAP_CIPHER_IV_SEL] =		0x138,
+	[PWRAP_CIPHER_LOAD] =		0x13c,
+	[PWRAP_CIPHER_START] =		0x140,
+	[PWRAP_CIPHER_RDY] =		0x144,
+	[PWRAP_CIPHER_MODE] =		0x148,
+	[PWRAP_CIPHER_SWRST] =		0x14c,
+	[PWRAP_DCM_EN] =		0x15c,
+	[PWRAP_DCM_DBC_PRD] =		0x160,
+};
+
 static int mt8173_regs[] = {
 	[PWRAP_MUX_SEL] =		0x0,
 	[PWRAP_WRAP_EN] =		0x4,
@@ -608,92 +805,160 @@
 	[PWRAP_DCM_DBC_PRD] =		0x148,
 };
 
-static int mt8135_regs[] = {
+static int mt8183_regs[] = {
+	[PWRAP_MUX_SEL] =			0x0,
+	[PWRAP_WRAP_EN] =			0x4,
+	[PWRAP_DIO_EN] =			0x8,
+	[PWRAP_SI_SAMPLE_CTRL] =		0xC,
+	[PWRAP_RDDMY] =				0x14,
+	[PWRAP_CSHEXT_WRITE] =			0x18,
+	[PWRAP_CSHEXT_READ] =			0x1C,
+	[PWRAP_CSLEXT_WRITE] =			0x20,
+	[PWRAP_CSLEXT_READ] =			0x24,
+	[PWRAP_EXT_CK_WRITE] =			0x28,
+	[PWRAP_STAUPD_CTRL] =			0x30,
+	[PWRAP_STAUPD_GRPEN] =			0x34,
+	[PWRAP_EINT_STA0_ADR] =			0x38,
+	[PWRAP_HARB_HPRIO] =			0x5C,
+	[PWRAP_HIPRIO_ARB_EN] =			0x60,
+	[PWRAP_MAN_EN] =			0x70,
+	[PWRAP_MAN_CMD] =			0x74,
+	[PWRAP_WACS0_EN] =			0x80,
+	[PWRAP_INIT_DONE0] =			0x84,
+	[PWRAP_WACS1_EN] =			0x88,
+	[PWRAP_INIT_DONE1] =			0x8C,
+	[PWRAP_WACS2_EN] =			0x90,
+	[PWRAP_INIT_DONE2] =			0x94,
+	[PWRAP_WACS_P2P_EN] =			0xA0,
+	[PWRAP_INIT_DONE_P2P] =			0xA4,
+	[PWRAP_WACS_MD32_EN] =			0xA8,
+	[PWRAP_INIT_DONE_MD32] =		0xAC,
+	[PWRAP_INT_EN] =			0xB0,
+	[PWRAP_INT_FLG] =			0xB8,
+	[PWRAP_INT_CLR] =			0xBC,
+	[PWRAP_INT1_EN] =			0xC0,
+	[PWRAP_INT1_FLG] =			0xC8,
+	[PWRAP_INT1_CLR] =			0xCC,
+	[PWRAP_SIG_ADR] =			0xD0,
+	[PWRAP_CRC_EN] =			0xE0,
+	[PWRAP_TIMER_EN] =			0xE4,
+	[PWRAP_WDT_UNIT] =			0xEC,
+	[PWRAP_WDT_SRC_EN] =			0xF0,
+	[PWRAP_WDT_SRC_EN_1] =			0xF4,
+	[PWRAP_INT_GPS_AUXADC_CMD_ADDR] =	0x1DC,
+	[PWRAP_INT_GPS_AUXADC_CMD] =		0x1E0,
+	[PWRAP_INT_GPS_AUXADC_RDATA_ADDR] =	0x1E4,
+	[PWRAP_EXT_GPS_AUXADC_RDATA_ADDR] =	0x1E8,
+	[PWRAP_GPSINF_0_STA] =			0x1EC,
+	[PWRAP_GPSINF_1_STA] =			0x1F0,
+	[PWRAP_WACS2_CMD] =			0xC20,
+	[PWRAP_WACS2_RDATA] =			0xC24,
+	[PWRAP_WACS2_VLDCLR] =			0xC28,
+};
+
+static int mt8516_regs[] = {
 	[PWRAP_MUX_SEL] =		0x0,
 	[PWRAP_WRAP_EN] =		0x4,
 	[PWRAP_DIO_EN] =		0x8,
 	[PWRAP_SIDLY] =			0xc,
-	[PWRAP_CSHEXT] =		0x10,
-	[PWRAP_CSHEXT_WRITE] =		0x14,
-	[PWRAP_CSHEXT_READ] =		0x18,
-	[PWRAP_CSLEXT_START] =		0x1c,
-	[PWRAP_CSLEXT_END] =		0x20,
-	[PWRAP_STAUPD_PRD] =		0x24,
-	[PWRAP_STAUPD_GRPEN] =		0x28,
-	[PWRAP_STAUPD_MAN_TRIG] =	0x2c,
-	[PWRAP_STAUPD_STA] =		0x30,
-	[PWRAP_EVENT_IN_EN] =		0x34,
-	[PWRAP_EVENT_DST_EN] =		0x38,
-	[PWRAP_WRAP_STA] =		0x3c,
-	[PWRAP_RRARB_INIT] =		0x40,
-	[PWRAP_RRARB_EN] =		0x44,
-	[PWRAP_RRARB_STA0] =		0x48,
-	[PWRAP_RRARB_STA1] =		0x4c,
-	[PWRAP_HARB_INIT] =		0x50,
-	[PWRAP_HARB_HPRIO] =		0x54,
-	[PWRAP_HIPRIO_ARB_EN] =		0x58,
-	[PWRAP_HARB_STA0] =		0x5c,
-	[PWRAP_HARB_STA1] =		0x60,
-	[PWRAP_MAN_EN] =		0x64,
-	[PWRAP_MAN_CMD] =		0x68,
-	[PWRAP_MAN_RDATA] =		0x6c,
-	[PWRAP_MAN_VLDCLR] =		0x70,
-	[PWRAP_WACS0_EN] =		0x74,
-	[PWRAP_INIT_DONE0] =		0x78,
-	[PWRAP_WACS0_CMD] =		0x7c,
-	[PWRAP_WACS0_RDATA] =		0x80,
-	[PWRAP_WACS0_VLDCLR] =		0x84,
-	[PWRAP_WACS1_EN] =		0x88,
-	[PWRAP_INIT_DONE1] =		0x8c,
-	[PWRAP_WACS1_CMD] =		0x90,
-	[PWRAP_WACS1_RDATA] =		0x94,
-	[PWRAP_WACS1_VLDCLR] =		0x98,
-	[PWRAP_WACS2_EN] =		0x9c,
-	[PWRAP_INIT_DONE2] =		0xa0,
-	[PWRAP_WACS2_CMD] =		0xa4,
-	[PWRAP_WACS2_RDATA] =		0xa8,
-	[PWRAP_WACS2_VLDCLR] =		0xac,
-	[PWRAP_INT_EN] =		0xb0,
-	[PWRAP_INT_FLG_RAW] =		0xb4,
-	[PWRAP_INT_FLG] =		0xb8,
-	[PWRAP_INT_CLR] =		0xbc,
-	[PWRAP_SIG_ADR] =		0xc0,
-	[PWRAP_SIG_MODE] =		0xc4,
-	[PWRAP_SIG_VALUE] =		0xc8,
-	[PWRAP_SIG_ERRVAL] =		0xcc,
-	[PWRAP_CRC_EN] =		0xd0,
-	[PWRAP_EVENT_STA] =		0xd4,
-	[PWRAP_EVENT_STACLR] =		0xd8,
-	[PWRAP_TIMER_EN] =		0xdc,
-	[PWRAP_TIMER_STA] =		0xe0,
-	[PWRAP_WDT_UNIT] =		0xe4,
-	[PWRAP_WDT_SRC_EN] =		0xe8,
-	[PWRAP_WDT_FLG] =		0xec,
-	[PWRAP_DEBUG_INT_SEL] =		0xf0,
-	[PWRAP_CIPHER_KEY_SEL] =	0x134,
-	[PWRAP_CIPHER_IV_SEL] =		0x138,
-	[PWRAP_CIPHER_LOAD] =		0x13c,
-	[PWRAP_CIPHER_START] =		0x140,
-	[PWRAP_CIPHER_RDY] =		0x144,
-	[PWRAP_CIPHER_MODE] =		0x148,
-	[PWRAP_CIPHER_SWRST] =		0x14c,
-	[PWRAP_DCM_EN] =		0x15c,
-	[PWRAP_DCM_DBC_PRD] =		0x160,
+	[PWRAP_RDDMY] =			0x10,
+	[PWRAP_SI_CK_CON] =		0x14,
+	[PWRAP_CSHEXT_WRITE] =		0x18,
+	[PWRAP_CSHEXT_READ] =		0x1c,
+	[PWRAP_CSLEXT_START] =		0x20,
+	[PWRAP_CSLEXT_END] =		0x24,
+	[PWRAP_STAUPD_PRD] =		0x28,
+	[PWRAP_STAUPD_GRPEN] =		0x2c,
+	[PWRAP_STAUPD_MAN_TRIG] =	0x40,
+	[PWRAP_STAUPD_STA] =		0x44,
+	[PWRAP_WRAP_STA] =		0x48,
+	[PWRAP_HARB_INIT] =		0x4c,
+	[PWRAP_HARB_HPRIO] =		0x50,
+	[PWRAP_HIPRIO_ARB_EN] =		0x54,
+	[PWRAP_HARB_STA0] =		0x58,
+	[PWRAP_HARB_STA1] =		0x5c,
+	[PWRAP_MAN_EN] =		0x60,
+	[PWRAP_MAN_CMD] =		0x64,
+	[PWRAP_MAN_RDATA] =		0x68,
+	[PWRAP_MAN_VLDCLR] =		0x6c,
+	[PWRAP_WACS0_EN] =		0x70,
+	[PWRAP_INIT_DONE0] =		0x74,
+	[PWRAP_WACS0_CMD] =		0x78,
+	[PWRAP_WACS0_RDATA] =		0x7c,
+	[PWRAP_WACS0_VLDCLR] =		0x80,
+	[PWRAP_WACS1_EN] =		0x84,
+	[PWRAP_INIT_DONE1] =		0x88,
+	[PWRAP_WACS1_CMD] =		0x8c,
+	[PWRAP_WACS1_RDATA] =		0x90,
+	[PWRAP_WACS1_VLDCLR] =		0x94,
+	[PWRAP_WACS2_EN] =		0x98,
+	[PWRAP_INIT_DONE2] =		0x9c,
+	[PWRAP_WACS2_CMD] =		0xa0,
+	[PWRAP_WACS2_RDATA] =		0xa4,
+	[PWRAP_WACS2_VLDCLR] =		0xa8,
+	[PWRAP_INT_EN] =		0xac,
+	[PWRAP_INT_FLG_RAW] =		0xb0,
+	[PWRAP_INT_FLG] =		0xb4,
+	[PWRAP_INT_CLR] =		0xb8,
+	[PWRAP_SIG_ADR] =		0xbc,
+	[PWRAP_SIG_MODE] =		0xc0,
+	[PWRAP_SIG_VALUE] =		0xc4,
+	[PWRAP_SIG_ERRVAL] =		0xc8,
+	[PWRAP_CRC_EN] =		0xcc,
+	[PWRAP_TIMER_EN] =		0xd0,
+	[PWRAP_TIMER_STA] =		0xd4,
+	[PWRAP_WDT_UNIT] =		0xd8,
+	[PWRAP_WDT_SRC_EN] =		0xdc,
+	[PWRAP_WDT_FLG] =		0xe0,
+	[PWRAP_DEBUG_INT_SEL] =		0xe4,
+	[PWRAP_DVFS_ADR0] =		0xe8,
+	[PWRAP_DVFS_WDATA0] =		0xec,
+	[PWRAP_DVFS_ADR1] =		0xf0,
+	[PWRAP_DVFS_WDATA1] =		0xf4,
+	[PWRAP_DVFS_ADR2] =		0xf8,
+	[PWRAP_DVFS_WDATA2] =		0xfc,
+	[PWRAP_DVFS_ADR3] =		0x100,
+	[PWRAP_DVFS_WDATA3] =		0x104,
+	[PWRAP_DVFS_ADR4] =		0x108,
+	[PWRAP_DVFS_WDATA4] =		0x10c,
+	[PWRAP_DVFS_ADR5] =		0x110,
+	[PWRAP_DVFS_WDATA5] =		0x114,
+	[PWRAP_DVFS_ADR6] =		0x118,
+	[PWRAP_DVFS_WDATA6] =		0x11c,
+	[PWRAP_DVFS_ADR7] =		0x120,
+	[PWRAP_DVFS_WDATA7] =		0x124,
+	[PWRAP_SPMINF_STA] =		0x128,
+	[PWRAP_CIPHER_KEY_SEL] =	0x12c,
+	[PWRAP_CIPHER_IV_SEL] =		0x130,
+	[PWRAP_CIPHER_EN] =		0x134,
+	[PWRAP_CIPHER_RDY] =		0x138,
+	[PWRAP_CIPHER_MODE] =		0x13c,
+	[PWRAP_CIPHER_SWRST] =		0x140,
+	[PWRAP_DCM_EN] =		0x144,
+	[PWRAP_DCM_DBC_PRD] =		0x148,
+	[PWRAP_SW_RST] =		0x168,
+	[PWRAP_OP_TYPE] =		0x16c,
+	[PWRAP_MSB_FIRST] =		0x170,
 };
 
 enum pmic_type {
 	PMIC_MT6323,
 	PMIC_MT6351,
+	PMIC_MT6357,
+	PMIC_MT6358,
 	PMIC_MT6380,
 	PMIC_MT6397,
 };
 
 enum pwrap_type {
 	PWRAP_MT2701,
+	PWRAP_MT6765,
 	PWRAP_MT6797,
 	PWRAP_MT7622,
 	PWRAP_MT8135,
 	PWRAP_MT8173,
+	PWRAP_MT8183,
+	PWRAP_MT8516,
 };
 
 struct pmic_wrapper;
@@ -731,9 +996,11 @@
 	enum pwrap_type type;
 	u32 arb_en_all;
 	u32 int_en_all;
+	u32 int1_en_all;
 	u32 spi_w;
 	u32 wdt_src;
-	unsigned int has_bridge:1;
+	/* Flags indicating the capability for the target pwrap */
+	u32 caps;
 	int (*init_reg_clock)(struct pmic_wrapper *wrp);
 	int (*init_soc_specific)(struct pmic_wrapper *wrp);
 };
@@ -1096,7 +1363,7 @@
 	ret = pwrap_read(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_RDY],
 			 &rdata);
 	if (ret)
-		return 0;
+		return false;
 
 	return rdata == 1;
 }
@@ -1117,13 +1384,17 @@
 		pwrap_writel(wrp, 1, PWRAP_CIPHER_START);
 		break;
 	case PWRAP_MT2701:
+	case PWRAP_MT6765:
 	case PWRAP_MT6797:
 	case PWRAP_MT8173:
+	case PWRAP_MT8516:
 		pwrap_writel(wrp, 1, PWRAP_CIPHER_EN);
 		break;
 	case PWRAP_MT7622:
 		pwrap_writel(wrp, 0, PWRAP_CIPHER_EN);
 		break;
+	case PWRAP_MT8183:
+		break;
 	}
 
 	/* Config cipher mode @PMIC */
@@ -1141,6 +1412,7 @@
 		break;
 	case PMIC_MT6323:
 	case PMIC_MT6351:
+	case PMIC_MT6357:
 		pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_EN],
 			    0x1);
 		break;
@@ -1276,11 +1548,29 @@
 	return 0;
 }
 
+static int pwrap_mt8183_init_soc_specific(struct pmic_wrapper *wrp)
+{
+	pwrap_writel(wrp, 0xf5, PWRAP_STAUPD_GRPEN);
+
+	pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CRC_EN], 0x1);
+	pwrap_writel(wrp, 1, PWRAP_CRC_EN);
+	pwrap_writel(wrp, 0x416, PWRAP_SIG_ADR);
+	pwrap_writel(wrp, 0x42e, PWRAP_EINT_STA0_ADR);
+
+	pwrap_writel(wrp, 1, PWRAP_WACS_P2P_EN);
+	pwrap_writel(wrp, 1, PWRAP_WACS_MD32_EN);
+	pwrap_writel(wrp, 1, PWRAP_INIT_DONE_P2P);
+	pwrap_writel(wrp, 1, PWRAP_INIT_DONE_MD32);
+
+	return 0;
+}
+
 static int pwrap_init(struct pmic_wrapper *wrp)
 {
 	int ret;
 
-	reset_control_reset(wrp->rstc);
+	if (wrp->rstc)
+		reset_control_reset(wrp->rstc);
 	if (wrp->rstc_bridge)
 		reset_control_reset(wrp->rstc_bridge);
 
@@ -1348,7 +1638,7 @@
 	pwrap_writel(wrp, 1, PWRAP_INIT_DONE0);
 	pwrap_writel(wrp, 1, PWRAP_INIT_DONE1);
 
-	if (wrp->master->has_bridge) {
+	if (HAS_CAP(wrp->master->caps, PWRAP_CAP_BRIDGE)) {
 		writel(1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_INIT_DONE3);
 		writel(1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_INIT_DONE4);
 	}
@@ -1362,11 +1652,15 @@
 	struct pmic_wrapper *wrp = dev_id;
 
 	rdata = pwrap_readl(wrp, PWRAP_INT_FLG);
-
 	dev_err(wrp->dev, "unexpected interrupt int=0x%x\n", rdata);
-
 	pwrap_writel(wrp, 0xffffffff, PWRAP_INT_CLR);
 
+	if (HAS_CAP(wrp->master->caps, PWRAP_CAP_INT1_EN)) {
+		rdata = pwrap_readl(wrp, PWRAP_INT1_FLG);
+		dev_err(wrp->dev, "unexpected interrupt int1=0x%x\n", rdata);
+		pwrap_writel(wrp, 0xffffffff, PWRAP_INT1_CLR);
+	}
+
 	return IRQ_HANDLED;
 }
 
@@ -1398,6 +1692,33 @@
 	.pwrap_write = pwrap_write16,
 };
 
+static const struct pwrap_slv_type pmic_mt6351 = {
+	.dew_regs = mt6351_regs,
+	.type = PMIC_MT6351,
+	.regmap = &pwrap_regmap_config16,
+	.caps = 0,
+	.pwrap_read = pwrap_read16,
+	.pwrap_write = pwrap_write16,
+};
+
+static const struct pwrap_slv_type pmic_mt6357 = {
+	.dew_regs = mt6357_regs,
+	.type = PMIC_MT6357,
+	.regmap = &pwrap_regmap_config16,
+	.caps = 0,
+	.pwrap_read = pwrap_read16,
+	.pwrap_write = pwrap_write16,
+};
+
+static const struct pwrap_slv_type pmic_mt6358 = {
+	.dew_regs = mt6358_regs,
+	.type = PMIC_MT6358,
+	.regmap = &pwrap_regmap_config16,
+	.caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO,
+	.pwrap_read = pwrap_read16,
+	.pwrap_write = pwrap_write16,
+};
+
 static const struct pwrap_slv_type pmic_mt6380 = {
 	.dew_regs = NULL,
 	.type = PMIC_MT6380,
@@ -1417,20 +1738,20 @@
 	.pwrap_write = pwrap_write16,
 };
 
-static const struct pwrap_slv_type pmic_mt6351 = {
-	.dew_regs = mt6351_regs,
-	.type = PMIC_MT6351,
-	.regmap = &pwrap_regmap_config16,
-	.caps = 0,
-	.pwrap_read = pwrap_read16,
-	.pwrap_write = pwrap_write16,
-};
-
 static const struct of_device_id of_slave_match_tbl[] = {
 	{
 		.compatible = "mediatek,mt6323",
 		.data = &pmic_mt6323,
 	}, {
+		.compatible = "mediatek,mt6351",
+		.data = &pmic_mt6351,
+	}, {
+		.compatible = "mediatek,mt6357",
+		.data = &pmic_mt6357,
+	}, {
+		.compatible = "mediatek,mt6358",
+		.data = &pmic_mt6358,
+	}, {
 		/* The MT6380 PMIC only implements a regulator, so we bind it
 		 * directly instead of using a MFD.
 		 */
@@ -1440,9 +1761,6 @@
 		.compatible = "mediatek,mt6397",
 		.data = &pmic_mt6397,
 	}, {
-		.compatible = "mediatek,mt6351",
-		.data = &pmic_mt6351,
-	}, {
 		/* sentinel */
 	}
 };
@@ -1453,21 +1771,35 @@
 	.type = PWRAP_MT2701,
 	.arb_en_all = 0x3f,
 	.int_en_all = ~(u32)(BIT(31) | BIT(2)),
+	.int1_en_all = 0,
 	.spi_w = PWRAP_MAN_CMD_SPI_WRITE_NEW,
 	.wdt_src = PWRAP_WDT_SRC_MASK_ALL,
-	.has_bridge = 0,
+	.caps = PWRAP_CAP_RESET | PWRAP_CAP_DCM,
 	.init_reg_clock = pwrap_mt2701_init_reg_clock,
 	.init_soc_specific = pwrap_mt2701_init_soc_specific,
 };
 
+static const struct pmic_wrapper_type pwrap_mt6765 = {
+	.regs = mt6765_regs,
+	.type = PWRAP_MT6765,
+	.arb_en_all = 0x3fd35,
+	.int_en_all = 0xffffffff,
+	.spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+	.wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+	.caps = PWRAP_CAP_RESET | PWRAP_CAP_DCM,
+	.init_reg_clock = pwrap_common_init_reg_clock,
+	.init_soc_specific = NULL,
+};
+
 static const struct pmic_wrapper_type pwrap_mt6797 = {
 	.regs = mt6797_regs,
 	.type = PWRAP_MT6797,
 	.arb_en_all = 0x01fff,
 	.int_en_all = 0xffffffc6,
+	.int1_en_all = 0,
 	.spi_w = PWRAP_MAN_CMD_SPI_WRITE,
 	.wdt_src = PWRAP_WDT_SRC_MASK_ALL,
-	.has_bridge = 0,
+	.caps = PWRAP_CAP_RESET | PWRAP_CAP_DCM,
 	.init_reg_clock = pwrap_common_init_reg_clock,
 	.init_soc_specific = NULL,
 };
@@ -1477,9 +1809,10 @@
 	.type = PWRAP_MT7622,
 	.arb_en_all = 0xff,
 	.int_en_all = ~(u32)BIT(31),
+	.int1_en_all = 0,
 	.spi_w = PWRAP_MAN_CMD_SPI_WRITE,
 	.wdt_src = PWRAP_WDT_SRC_MASK_ALL,
-	.has_bridge = 0,
+	.caps = PWRAP_CAP_RESET | PWRAP_CAP_DCM,
 	.init_reg_clock = pwrap_common_init_reg_clock,
 	.init_soc_specific = pwrap_mt7622_init_soc_specific,
 };
@@ -1489,9 +1822,10 @@
 	.type = PWRAP_MT8135,
 	.arb_en_all = 0x1ff,
 	.int_en_all = ~(u32)(BIT(31) | BIT(1)),
+	.int1_en_all = 0,
 	.spi_w = PWRAP_MAN_CMD_SPI_WRITE,
 	.wdt_src = PWRAP_WDT_SRC_MASK_ALL,
-	.has_bridge = 1,
+	.caps = PWRAP_CAP_BRIDGE | PWRAP_CAP_RESET | PWRAP_CAP_DCM,
 	.init_reg_clock = pwrap_common_init_reg_clock,
 	.init_soc_specific = pwrap_mt8135_init_soc_specific,
 };
@@ -1501,18 +1835,47 @@
 	.type = PWRAP_MT8173,
 	.arb_en_all = 0x3f,
 	.int_en_all = ~(u32)(BIT(31) | BIT(1)),
+	.int1_en_all = 0,
 	.spi_w = PWRAP_MAN_CMD_SPI_WRITE,
 	.wdt_src = PWRAP_WDT_SRC_MASK_NO_STAUPD,
-	.has_bridge = 0,
+	.caps = PWRAP_CAP_RESET | PWRAP_CAP_DCM,
 	.init_reg_clock = pwrap_common_init_reg_clock,
 	.init_soc_specific = pwrap_mt8173_init_soc_specific,
 };
 
+static const struct pmic_wrapper_type pwrap_mt8183 = {
+	.regs = mt8183_regs,
+	.type = PWRAP_MT8183,
+	.arb_en_all = 0x3fa75,
+	.int_en_all = 0xffffffff,
+	.int1_en_all = 0xeef7ffff,
+	.spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+	.wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+	.caps = PWRAP_CAP_INT1_EN | PWRAP_CAP_WDT_SRC1,
+	.init_reg_clock = pwrap_common_init_reg_clock,
+	.init_soc_specific = pwrap_mt8183_init_soc_specific,
+};
+
+static struct pmic_wrapper_type pwrap_mt8516 = {
+	.regs = mt8516_regs,
+	.type = PWRAP_MT8516,
+	.arb_en_all = 0xff,
+	.int_en_all = ~(u32)(BIT(31) | BIT(2)),
+	.spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+	.wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+	.caps = PWRAP_CAP_DCM,
+	.init_reg_clock = pwrap_mt2701_init_reg_clock,
+	.init_soc_specific = NULL,
+};
+
 static const struct of_device_id of_pwrap_match_tbl[] = {
 	{
 		.compatible = "mediatek,mt2701-pwrap",
 		.data = &pwrap_mt2701,
 	}, {
+		.compatible = "mediatek,mt6765-pwrap",
+		.data = &pwrap_mt6765,
+	}, {
 		.compatible = "mediatek,mt6797-pwrap",
 		.data = &pwrap_mt6797,
 	}, {
@@ -1525,6 +1888,12 @@
 		.compatible = "mediatek,mt8173-pwrap",
 		.data = &pwrap_mt8173,
 	}, {
+		.compatible = "mediatek,mt8183-pwrap",
+		.data = &pwrap_mt8183,
+	}, {
+		.compatible = "mediatek,mt8516-pwrap",
+		.data = &pwrap_mt8516,
+	}, {
 		/* sentinel */
 	}
 };
@@ -1542,7 +1911,7 @@
 		of_slave_id = of_match_node(of_slave_match_tbl, np->child);
 
 	if (!of_slave_id) {
-		dev_dbg(&pdev->dev, "slave pmic should be defined in dts\n");
+		dev_err(&pdev->dev, "slave pmic should be defined in dts\n");
 		return -EINVAL;
 	}
 
@@ -1561,14 +1930,16 @@
 	if (IS_ERR(wrp->base))
 		return PTR_ERR(wrp->base);
 
-	wrp->rstc = devm_reset_control_get(wrp->dev, "pwrap");
-	if (IS_ERR(wrp->rstc)) {
-		ret = PTR_ERR(wrp->rstc);
-		dev_dbg(wrp->dev, "cannot get pwrap reset: %d\n", ret);
-		return ret;
+	if (HAS_CAP(wrp->master->caps, PWRAP_CAP_RESET)) {
+		wrp->rstc = devm_reset_control_get(wrp->dev, "pwrap");
+		if (IS_ERR(wrp->rstc)) {
+			ret = PTR_ERR(wrp->rstc);
+			dev_err(wrp->dev, "cannot get pwrap reset: %d\n", ret);
+			return ret;
+		}
 	}
 
-	if (wrp->master->has_bridge) {
+	if (HAS_CAP(wrp->master->caps, PWRAP_CAP_BRIDGE)) {
 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 				"pwrap-bridge");
 		wrp->bridge_base = devm_ioremap_resource(wrp->dev, res);
@@ -1579,7 +1950,7 @@
 							  "pwrap-bridge");
 		if (IS_ERR(wrp->rstc_bridge)) {
 			ret = PTR_ERR(wrp->rstc_bridge);
-			dev_dbg(wrp->dev,
+			dev_err(wrp->dev,
 				"cannot get pwrap-bridge reset: %d\n", ret);
 			return ret;
 		}
@@ -1587,14 +1958,14 @@
 
 	wrp->clk_spi = devm_clk_get(wrp->dev, "spi");
 	if (IS_ERR(wrp->clk_spi)) {
-		dev_dbg(wrp->dev, "failed to get clock: %ld\n",
+		dev_err(wrp->dev, "failed to get SPI clock: %ld\n",
 			PTR_ERR(wrp->clk_spi));
 		return PTR_ERR(wrp->clk_spi);
 	}
 
 	wrp->clk_wrap = devm_clk_get(wrp->dev, "wrap");
 	if (IS_ERR(wrp->clk_wrap)) {
-		dev_dbg(wrp->dev, "failed to get clock: %ld\n",
+		dev_err(wrp->dev, "failed to get WRAP clock: %ld\n",
 			PTR_ERR(wrp->clk_wrap));
 		return PTR_ERR(wrp->clk_wrap);
 	}
@@ -1608,8 +1979,10 @@
 		goto err_out1;
 
 	/* Enable internal dynamic clock */
-	pwrap_writel(wrp, 1, PWRAP_DCM_EN);
-	pwrap_writel(wrp, 0, PWRAP_DCM_DBC_PRD);
+	if (HAS_CAP(wrp->master->caps, PWRAP_CAP_DCM)) {
+		pwrap_writel(wrp, 1, PWRAP_DCM_EN);
+		pwrap_writel(wrp, 0, PWRAP_DCM_DBC_PRD);
+	}
 
 	/*
 	 * The PMIC could already be initialized by the bootloader.
@@ -1618,13 +1991,13 @@
 	if (!pwrap_readl(wrp, PWRAP_INIT_DONE2)) {
 		ret = pwrap_init(wrp);
 		if (ret) {
-			dev_dbg(wrp->dev, "init failed with %d\n", ret);
+			dev_err(wrp->dev, "init failed with %d\n", ret);
 			goto err_out2;
 		}
 	}
 
 	if (!(pwrap_readl(wrp, PWRAP_WACS2_RDATA) & PWRAP_STATE_INIT_DONE0)) {
-		dev_dbg(wrp->dev, "initialization isn't finished\n");
+		dev_err(wrp->dev, "initialization isn't finished\n");
 		ret = -ENODEV;
 		goto err_out2;
 	}
@@ -1636,8 +2009,17 @@
 	 * so STAUPD of WDT_SRC which should be turned off
 	 */
 	pwrap_writel(wrp, wrp->master->wdt_src, PWRAP_WDT_SRC_EN);
+	if (HAS_CAP(wrp->master->caps, PWRAP_CAP_WDT_SRC1))
+		pwrap_writel(wrp, wrp->master->wdt_src, PWRAP_WDT_SRC_EN_1);
+
 	pwrap_writel(wrp, 0x1, PWRAP_TIMER_EN);
 	pwrap_writel(wrp, wrp->master->int_en_all, PWRAP_INT_EN);
+	/*
+	 * We add INT1 interrupt to handle starvation and request exception
+	 * If we support it, we should enable it here.
+	 */
+	if (HAS_CAP(wrp->master->caps, PWRAP_CAP_INT1_EN))
+		pwrap_writel(wrp, wrp->master->int1_en_all, PWRAP_INT1_EN);
 
 	irq = platform_get_irq(pdev, 0);
 	ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt,
@@ -1654,7 +2036,7 @@
 
 	ret = of_platform_populate(np, NULL, NULL, wrp->dev);
 	if (ret) {
-		dev_dbg(wrp->dev, "failed to create child devices at %pOF\n",
+		dev_err(wrp->dev, "failed to create child devices at %pOF\n",
 				np);
 		goto err_out2;
 	}
diff --git a/drivers/soc/mediatek/mtk-scpsys-ext.c b/drivers/soc/mediatek/mtk-scpsys-ext.c
new file mode 100644
index 0000000..b24321e
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-scpsys-ext.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <Owen.Chen@mediatek.com>
+ */
+#include <linux/ktime.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/soc/mediatek/scpsys-ext.h>
+
+#define MTK_POLL_DELAY_US   10
+#define MTK_POLL_TIMEOUT    USEC_PER_SEC
+
+static int set_bus_protection(struct regmap *map, u32 mask, u32 ack_mask,
+		u32 reg_set, u32 reg_sta, u32 reg_en)
+{
+	u32 val;
+
+	if (reg_set)
+		regmap_write(map, reg_set, mask);
+	else
+		regmap_update_bits(map, reg_en, mask, mask);
+
+	return regmap_read_poll_timeout(map, reg_sta,
+			val, (val & ack_mask) == ack_mask,
+			MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+}
+
+static int clear_bus_protection(struct regmap *map, u32 mask, u32 ack_mask,
+		u32 reg_clr, u32 reg_sta, u32 reg_en)
+{
+	u32 val;
+
+	if (reg_clr)
+		regmap_write(map, reg_clr, mask);
+	else
+		regmap_update_bits(map, reg_en, mask, 0);
+
+	return regmap_read_poll_timeout(map, reg_sta,
+			val, !(val & ack_mask),
+			MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+}
+
+int mtk_scpsys_ext_set_bus_protection(const struct bus_prot *bp_table,
+	struct regmap *infracfg, struct regmap *smi_common)
+{
+	int i;
+
+	for (i = 0; i < MAX_STEPS; i++) {
+		struct regmap *map;
+		int ret;
+
+		if (bp_table[i].type == INVALID_TYPE)
+			continue;
+		else if (bp_table[i].type == IFR_TYPE)
+			map = infracfg;
+		else if (bp_table[i].type == SMI_TYPE)
+			map = smi_common;
+
+		ret = set_bus_protection(map,
+				bp_table[i].mask, bp_table[i].mask,
+				bp_table[i].set_ofs, bp_table[i].sta_ofs,
+				bp_table[i].en_ofs);
+
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+int mtk_scpsys_ext_clear_bus_protection(const struct bus_prot *bp_table,
+	struct regmap *infracfg, struct regmap *smi_common)
+{
+	int i;
+
+	for (i = MAX_STEPS - 1; i >= 0; i--) {
+		struct regmap *map;
+		int ret;
+
+		if (bp_table[i].type == INVALID_TYPE)
+			continue;
+		else if (bp_table[i].type == IFR_TYPE)
+			map = infracfg;
+		else if (bp_table[i].type == SMI_TYPE)
+			map = smi_common;
+
+		ret = clear_bus_protection(map,
+				bp_table[i].mask, bp_table[i].clr_ack_mask,
+				bp_table[i].clr_ofs, bp_table[i].sta_ofs,
+				bp_table[i].en_ofs);
+
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index 5b24bb4..1731eec 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -20,16 +20,19 @@
 #include <linux/pm_domain.h>
 #include <linux/regulator/consumer.h>
 #include <linux/soc/mediatek/infracfg.h>
+#include <linux/soc/mediatek/scpsys-ext.h>
 
 #include <dt-bindings/power/mt2701-power.h>
 #include <dt-bindings/power/mt2712-power.h>
 #include <dt-bindings/power/mt6797-power.h>
 #include <dt-bindings/power/mt7622-power.h>
 #include <dt-bindings/power/mt7623a-power.h>
+#include <dt-bindings/power/mt8167-power.h>
 #include <dt-bindings/power/mt8173-power.h>
+#include <dt-bindings/power/mt8183-power.h>
 
 #define MTK_POLL_DELAY_US   10
-#define MTK_POLL_TIMEOUT    (jiffies_to_usecs(HZ))
+#define MTK_POLL_TIMEOUT    USEC_PER_SEC
 
 #define MTK_SCPD_ACTIVE_WAKEUP		BIT(0)
 #define MTK_SCPD_FWAIT_SRAM		BIT(1)
@@ -64,6 +67,8 @@
 #define PWR_ON_BIT			BIT(2)
 #define PWR_ON_2ND_BIT			BIT(3)
 #define PWR_CLK_DIS_BIT			BIT(4)
+#define PWR_SRAM_CLKISO_BIT		BIT(5)
+#define PWR_SRAM_ISOINT_B_BIT		BIT(6)
 
 #define PWR_STATUS_CONN			BIT(1)
 #define PWR_STATUS_DISP			BIT(3)
@@ -97,6 +102,7 @@
 	CLK_HIFSEL,
 	CLK_JPGDEC,
 	CLK_AUDIO,
+	CLK_AXI_MFG,
 	CLK_MAX,
 };
 
@@ -115,16 +121,39 @@
 };
 
 #define MAX_CLKS	3
+#define MAX_SUBSYS_CLKS 10
 
+/**
+ * struct scp_domain_data - scp domain data for power on/off flow
+ * @name: The domain name.
+ * @sta_mask: The mask for power on/off status bit.
+ * @ctl_offs: The offset for main power control register.
+ * @sram_iso_ctrl: The flag to judge if the power domain need to do
+ *                 the extra sram isolation control.
+ * @sram_pdn_bits: The mask for sram power control bits.
+ * @sram_pdn_ack_bits: The mask for sram power control acked bits.
+ * @bus_prot_mask: The mask for single step bus protection.
+ * @clk_id: The basic clocks required by this power domain.
+ * @basic_clk_id: provide the same purpose with field "clk_id"
+ *                by declaring basic clock prefix name rather than clk_id.
+ * @subsys_clk_prefix: The prefix name of the clocks need to be enabled
+ *                     before releasing bus protection.
+ * @caps: The flag for active wake-up action.
+ * @bp_table: The mask table for multiple step bus protection.
+ */
 struct scp_domain_data {
 	const char *name;
 	u32 sta_mask;
 	int ctl_offs;
+	bool sram_iso_ctrl;
 	u32 sram_pdn_bits;
 	u32 sram_pdn_ack_bits;
 	u32 bus_prot_mask;
 	enum clk_id clk_id[MAX_CLKS];
+	const char *basic_clk_id[MAX_CLKS];
+	const char *subsys_clk_prefix;
 	u8 caps;
+	struct bus_prot bp_table[MAX_STEPS];
 };
 
 struct scp;
@@ -133,6 +162,7 @@
 	struct generic_pm_domain genpd;
 	struct scp *scp;
 	struct clk *clk[MAX_CLKS];
+	struct clk *subsys_clk[MAX_SUBSYS_CLKS];
 	const struct scp_domain_data *data;
 	struct regulator *supply;
 };
@@ -148,6 +178,7 @@
 	struct device *dev;
 	void __iomem *base;
 	struct regmap *infracfg;
+	struct regmap *smi_common;
 	struct scp_ctrl_reg ctrl_reg;
 	bool bus_prot_reg_update;
 };
@@ -188,32 +219,151 @@
 	return -EINVAL;
 }
 
+static int scpsys_regulator_enable(struct scp_domain *scpd)
+{
+	if (!scpd->supply)
+		return 0;
+
+	return regulator_enable(scpd->supply);
+}
+
+static int scpsys_regulator_disable(struct scp_domain *scpd)
+{
+	if (!scpd->supply)
+		return 0;
+
+	return regulator_disable(scpd->supply);
+}
+
+static void scpsys_clk_disable(struct clk *clk[], int max_num)
+{
+	int i;
+
+	for (i = max_num - 1; i >= 0; i--)
+		clk_disable_unprepare(clk[i]);
+}
+
+static int scpsys_clk_enable(struct clk *clk[], int max_num)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < max_num && clk[i]; i++) {
+		ret = clk_prepare_enable(clk[i]);
+		if (ret) {
+			scpsys_clk_disable(clk, i);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int scpsys_sram_enable(struct scp_domain *scpd, void __iomem *ctl_addr)
+{
+	u32 val;
+	u32 pdn_ack = scpd->data->sram_pdn_ack_bits;
+	int tmp;
+
+	val = readl(ctl_addr) & ~scpd->data->sram_pdn_bits;
+	writel(val, ctl_addr);
+
+	/* Either wait until SRAM_PDN_ACK all 0 or have a force wait */
+	if (MTK_SCPD_CAPS(scpd, MTK_SCPD_FWAIT_SRAM)) {
+		/*
+		 * Currently, MTK_SCPD_FWAIT_SRAM is necessary only for
+		 * MT7622_POWER_DOMAIN_WB and thus just a trivial setup
+		 * is applied here.
+		 */
+		usleep_range(12000, 12100);
+	} else {
+		/* Either wait until SRAM_PDN_ACK all 1 or 0 */
+		int ret = readl_poll_timeout(ctl_addr, tmp,
+				(tmp & pdn_ack) == 0,
+				MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (scpd->data->sram_iso_ctrl)	{
+		val = readl(ctl_addr) | PWR_SRAM_ISOINT_B_BIT;
+		writel(val, ctl_addr);
+		udelay(1);
+		val &= ~PWR_SRAM_CLKISO_BIT;
+		writel(val, ctl_addr);
+	}
+
+	return 0;
+}
+
+static int scpsys_sram_disable(struct scp_domain *scpd, void __iomem *ctl_addr)
+{
+	u32 val;
+	u32 pdn_ack = scpd->data->sram_pdn_ack_bits;
+	int tmp;
+
+	if (scpd->data->sram_iso_ctrl)	{
+		val = readl(ctl_addr);
+		val |= PWR_SRAM_CLKISO_BIT;
+		writel(val, ctl_addr);
+		val &= ~PWR_SRAM_ISOINT_B_BIT;
+		writel(val, ctl_addr);
+		udelay(1);
+	}
+
+	val = readl(ctl_addr) | scpd->data->sram_pdn_bits;
+	writel(val, ctl_addr);
+
+	/* Either wait until SRAM_PDN_ACK all 1 or 0 */
+	return readl_poll_timeout(ctl_addr, tmp,
+			(tmp & pdn_ack) == pdn_ack,
+			MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+}
+
+static int scpsys_bus_protect_enable(struct scp_domain *scpd)
+{
+	struct scp *scp = scpd->scp;
+
+	if (scpd->data->bus_prot_mask) {
+		return mtk_infracfg_set_bus_protection(scp->infracfg,
+				scpd->data->bus_prot_mask,
+				scp->bus_prot_reg_update);
+	}
+
+	return mtk_scpsys_ext_set_bus_protection(scpd->data->bp_table,
+			scp->infracfg, scp->smi_common);
+}
+
+static int scpsys_bus_protect_disable(struct scp_domain *scpd)
+{
+	struct scp *scp = scpd->scp;
+
+	if (scpd->data->bus_prot_mask) {
+		return mtk_infracfg_clear_bus_protection(scp->infracfg,
+				scpd->data->bus_prot_mask,
+				scp->bus_prot_reg_update);
+	}
+
+	return mtk_scpsys_ext_clear_bus_protection(scpd->data->bp_table,
+			scp->infracfg, scp->smi_common);
+}
+
 static int scpsys_power_on(struct generic_pm_domain *genpd)
 {
 	struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd);
 	struct scp *scp = scpd->scp;
 	void __iomem *ctl_addr = scp->base + scpd->data->ctl_offs;
-	u32 pdn_ack = scpd->data->sram_pdn_ack_bits;
 	u32 val;
 	int ret, tmp;
-	int i;
 
-	if (scpd->supply) {
-		ret = regulator_enable(scpd->supply);
-		if (ret)
-			return ret;
-	}
+	ret = scpsys_regulator_enable(scpd);
+	if (ret < 0)
+		return ret;
 
-	for (i = 0; i < MAX_CLKS && scpd->clk[i]; i++) {
-		ret = clk_prepare_enable(scpd->clk[i]);
-		if (ret) {
-			for (--i; i >= 0; i--)
-				clk_disable_unprepare(scpd->clk[i]);
+	ret = scpsys_clk_enable(scpd->clk, MAX_CLKS);
+	if (ret)
+		goto err_clk;
 
-			goto err_clk;
-		}
-	}
-
+	/* subsys power on */
 	val = readl(ctl_addr);
 	val |= PWR_ON_BIT;
 	writel(val, ctl_addr);
@@ -235,43 +385,26 @@
 	val |= PWR_RST_B_BIT;
 	writel(val, ctl_addr);
 
-	val &= ~scpd->data->sram_pdn_bits;
-	writel(val, ctl_addr);
+	ret = scpsys_clk_enable(scpd->subsys_clk, MAX_SUBSYS_CLKS);
+	if (ret < 0)
+		goto err_pwr_ack;
 
-	/* Either wait until SRAM_PDN_ACK all 0 or have a force wait */
-	if (MTK_SCPD_CAPS(scpd, MTK_SCPD_FWAIT_SRAM)) {
-		/*
-		 * Currently, MTK_SCPD_FWAIT_SRAM is necessary only for
-		 * MT7622_POWER_DOMAIN_WB and thus just a trivial setup is
-		 * applied here.
-		 */
-		usleep_range(12000, 12100);
+	ret = scpsys_sram_enable(scpd, ctl_addr);
+	if (ret < 0)
+		goto err_sram;
 
-	} else {
-		ret = readl_poll_timeout(ctl_addr, tmp, (tmp & pdn_ack) == 0,
-					 MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
-		if (ret < 0)
-			goto err_pwr_ack;
-	}
-
-	if (scpd->data->bus_prot_mask) {
-		ret = mtk_infracfg_clear_bus_protection(scp->infracfg,
-				scpd->data->bus_prot_mask,
-				scp->bus_prot_reg_update);
-		if (ret)
-			goto err_pwr_ack;
-	}
+	ret = scpsys_bus_protect_disable(scpd);
+	if (ret < 0)
+		goto err_sram;
 
 	return 0;
 
+err_sram:
+	scpsys_clk_disable(scpd->subsys_clk, MAX_SUBSYS_CLKS);
 err_pwr_ack:
-	for (i = MAX_CLKS - 1; i >= 0; i--) {
-		if (scpd->clk[i])
-			clk_disable_unprepare(scpd->clk[i]);
-	}
+	scpsys_clk_disable(scpd->clk, MAX_CLKS);
 err_clk:
-	if (scpd->supply)
-		regulator_disable(scpd->supply);
+	scpsys_regulator_disable(scpd);
 
 	dev_err(scp->dev, "Failed to power on domain %s\n", genpd->name);
 
@@ -283,30 +416,21 @@
 	struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd);
 	struct scp *scp = scpd->scp;
 	void __iomem *ctl_addr = scp->base + scpd->data->ctl_offs;
-	u32 pdn_ack = scpd->data->sram_pdn_ack_bits;
 	u32 val;
 	int ret, tmp;
-	int i;
 
-	if (scpd->data->bus_prot_mask) {
-		ret = mtk_infracfg_set_bus_protection(scp->infracfg,
-				scpd->data->bus_prot_mask,
-				scp->bus_prot_reg_update);
-		if (ret)
-			goto out;
-	}
-
-	val = readl(ctl_addr);
-	val |= scpd->data->sram_pdn_bits;
-	writel(val, ctl_addr);
-
-	/* wait until SRAM_PDN_ACK all 1 */
-	ret = readl_poll_timeout(ctl_addr, tmp, (tmp & pdn_ack) == pdn_ack,
-				 MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+	ret = scpsys_bus_protect_enable(scpd);
 	if (ret < 0)
 		goto out;
 
-	val |= PWR_ISO_BIT;
+	ret = scpsys_sram_disable(scpd, ctl_addr);
+	if (ret < 0)
+		goto out;
+
+	scpsys_clk_disable(scpd->subsys_clk, MAX_SUBSYS_CLKS);
+
+	/* subsys power off */
+	val = readl(ctl_addr) | PWR_ISO_BIT;
 	writel(val, ctl_addr);
 
 	val &= ~PWR_RST_B_BIT;
@@ -327,11 +451,11 @@
 	if (ret < 0)
 		goto out;
 
-	for (i = 0; i < MAX_CLKS && scpd->clk[i]; i++)
-		clk_disable_unprepare(scpd->clk[i]);
+	scpsys_clk_disable(scpd->clk, MAX_CLKS);
 
-	if (scpd->supply)
-		regulator_disable(scpd->supply);
+	ret = scpsys_regulator_disable(scpd);
+	if (ret < 0)
+		goto out;
 
 	return 0;
 
@@ -341,6 +465,48 @@
 	return ret;
 }
 
+static int init_subsys_clks(struct platform_device *pdev,
+		const char *prefix, struct clk **clk)
+{
+	struct device_node *node = pdev->dev.of_node;
+	u32 prefix_len, sub_clk_cnt = 0;
+	struct property *prop;
+	const char *clk_name;
+
+	if (!node) {
+		dev_err(&pdev->dev, "Cannot find scpsys node: %ld\n",
+			PTR_ERR(node));
+		return PTR_ERR(node);
+	}
+
+	prefix_len = strlen(prefix);
+
+	of_property_for_each_string(node, "clock-names", prop, clk_name) {
+		if (!strncmp(clk_name, prefix, prefix_len) &&
+				(clk_name[prefix_len] == '-')) {
+			if (sub_clk_cnt >= MAX_SUBSYS_CLKS) {
+				dev_err(&pdev->dev,
+					"subsys clk out of range %d\n",
+					sub_clk_cnt);
+				return -ENOMEM;
+			}
+
+			clk[sub_clk_cnt] = devm_clk_get(&pdev->dev,
+						clk_name);
+
+			if (IS_ERR(clk)) {
+				dev_err(&pdev->dev,
+					"Subsys clk read fail %ld\n",
+					PTR_ERR(clk));
+				return PTR_ERR(clk);
+			}
+			sub_clk_cnt++;
+		}
+	}
+
+	return sub_clk_cnt;
+}
+
 static void init_clks(struct platform_device *pdev, struct clk **clk)
 {
 	int i;
@@ -396,6 +562,17 @@
 		return ERR_CAST(scp->infracfg);
 	}
 
+	scp->smi_common = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+			"smi_comm");
+
+	if (scp->smi_common == ERR_PTR(-ENODEV)) {
+		scp->smi_common = NULL;
+	} else if (IS_ERR(scp->smi_common)) {
+		dev_err(&pdev->dev, "Cannot find smi_common controller: %ld\n",
+				PTR_ERR(scp->smi_common));
+		return ERR_CAST(scp->smi_common);
+	}
+
 	for (i = 0; i < num; i++) {
 		struct scp_domain *scpd = &scp->domains[i];
 		const struct scp_domain_data *data = &scp_domain_data[i];
@@ -417,22 +594,45 @@
 		struct scp_domain *scpd = &scp->domains[i];
 		struct generic_pm_domain *genpd = &scpd->genpd;
 		const struct scp_domain_data *data = &scp_domain_data[i];
+		int clk_cnt;
 
 		pd_data->domains[i] = genpd;
 		scpd->scp = scp;
 
 		scpd->data = data;
 
-		for (j = 0; j < MAX_CLKS && data->clk_id[j]; j++) {
-			struct clk *c = clk[data->clk_id[j]];
+		if (data->clk_id[0]) {
+			WARN_ON(data->basic_clk_id[0]);
 
-			if (IS_ERR(c)) {
-				dev_err(&pdev->dev, "%s: clk unavailable\n",
-					data->name);
-				return ERR_CAST(c);
+			for (j = 0; j < MAX_CLKS && data->clk_id[j]; j++) {
+				struct clk *c = clk[data->clk_id[j]];
+
+				if (IS_ERR(c)) {
+					dev_err(&pdev->dev,
+						"%s: clk unavailable\n",
+						data->name);
+					return ERR_CAST(c);
+				}
+
+				scpd->clk[j] = c;
 			}
+		} else if (data->basic_clk_id[0]) {
+			for (j = 0; j < MAX_CLKS &&
+					data->basic_clk_id[j]; j++)
+				scpd->clk[j] = devm_clk_get(&pdev->dev,
+						data->basic_clk_id[j]);
+		}
 
-			scpd->clk[j] = c;
+		if (data->subsys_clk_prefix) {
+			clk_cnt = init_subsys_clks(pdev,
+					data->subsys_clk_prefix,
+					scpd->subsys_clk);
+			if (clk_cnt < 0) {
+				dev_err(&pdev->dev,
+					"%s: subsys clk unavailable\n",
+					data->name);
+				return ERR_PTR(clk_cnt);
+			}
 		}
 
 		genpd->name = data->name;
@@ -842,6 +1042,84 @@
 };
 
 /*
+ * MT8167 power domain support
+ */
+#define PWR_STATUS_MFG_2D_MT8167	BIT(24)	/* MT8167 */
+#define PWR_STATUS_MFG_ASYNC_MT8167	BIT(25)	/* MT8167 */
+
+static const struct scp_domain_data scp_domain_data_mt8167[] = {
+	[MT8167_POWER_DOMAIN_DISP] = {
+		.name = "disp",
+		.sta_mask = PWR_STATUS_DISP,
+		.ctl_offs = SPM_DIS_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(12, 12),
+		.bus_prot_mask = BIT(1) | BIT(11),
+		.clk_id = {CLK_MM},
+		.caps = MTK_SCPD_ACTIVE_WAKEUP,
+	},
+	[MT8167_POWER_DOMAIN_VDEC] = {
+		.name = "vdec",
+		.sta_mask = PWR_STATUS_VDEC,
+		.ctl_offs = SPM_VDE_PWR_CON,
+		.sram_pdn_bits = GENMASK(8, 8),
+		.sram_pdn_ack_bits = GENMASK(12, 12),
+		.clk_id = {CLK_MM, CLK_VDEC},
+		.caps = MTK_SCPD_ACTIVE_WAKEUP,
+	},
+	[MT8167_POWER_DOMAIN_ISP] = {
+		.name = "isp",
+		.sta_mask = PWR_STATUS_ISP,
+		.ctl_offs = SPM_ISP_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(13, 12),
+		.clk_id = {CLK_MM},
+		.caps = MTK_SCPD_ACTIVE_WAKEUP,
+	},
+	[MT8167_POWER_DOMAIN_MFG_ASYNC] = {
+		.name = "mfg_async",
+		.sta_mask = PWR_STATUS_MFG_ASYNC_MT8167,
+		.ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+		.sram_pdn_bits = 0,
+		.sram_pdn_ack_bits = 0,
+		.bus_prot_mask = BIT(2) | BIT(5),
+//		.axi_si1_way_en = BIT(7), XXX: FIXME
+		.clk_id = {CLK_MFG, CLK_AXI_MFG},
+	},
+	[MT8167_POWER_DOMAIN_MFG_2D] = {
+		.name = "mfg_2d",
+		.sta_mask = PWR_STATUS_MFG_2D_MT8167,
+		.ctl_offs = SPM_MFG_2D_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(15, 12),
+		.clk_id = {CLK_NONE},
+	},
+	[MT8167_POWER_DOMAIN_MFG] = {
+		.name = "mfg",
+		.sta_mask = PWR_STATUS_MFG,
+		.ctl_offs = SPM_MFG_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(15, 12),
+		.clk_id = {CLK_NONE},
+	},
+	[MT8167_POWER_DOMAIN_CONN] = {
+		.name = "conn",
+		.sta_mask = PWR_STATUS_CONN,
+		.ctl_offs = SPM_CONN_PWR_CON,
+		.sram_pdn_bits = GENMASK(8, 8),
+		.sram_pdn_ack_bits = 0,
+		.bus_prot_mask = BIT(4) | BIT(8) | BIT(9),
+		.clk_id = {CLK_NONE},
+		.caps = MTK_SCPD_ACTIVE_WAKEUP,
+	},
+};
+
+static const struct scp_subdomain scp_subdomain_mt8167[] = {
+	{MT8167_POWER_DOMAIN_MFG_ASYNC, MT8167_POWER_DOMAIN_MFG_2D},
+	{MT8167_POWER_DOMAIN_MFG_2D, MT8167_POWER_DOMAIN_MFG},
+};
+
+/*
  * MT8173 power domain support
  */
 
@@ -940,6 +1218,217 @@
 	{MT8173_POWER_DOMAIN_MFG_2D, MT8173_POWER_DOMAIN_MFG},
 };
 
+/*
+ * MT8183 power domain support
+ */
+
+static const struct scp_domain_data scp_domain_data_mt8183[] = {
+	[MT8183_POWER_DOMAIN_AUDIO] = {
+		.name = "audio",
+		.sta_mask = PWR_STATUS_AUDIO,
+		.ctl_offs = 0x0314,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(15, 12),
+		.basic_clk_id = {"audio", "audio1", "audio2"},
+	},
+	[MT8183_POWER_DOMAIN_CONN] = {
+		.name = "conn",
+		.sta_mask = PWR_STATUS_CONN,
+		.ctl_offs = 0x032c,
+		.sram_pdn_bits = 0,
+		.sram_pdn_ack_bits = 0,
+		.bp_table = {
+			BUS_PROT(IFR_TYPE, 0x2a0, 0x2a4, 0, 0x228,
+				BIT(13) | BIT(14), BIT(13) | BIT(14)),
+		},
+	},
+	[MT8183_POWER_DOMAIN_MFG_ASYNC] = {
+		.name = "mfg_async",
+		.sta_mask = PWR_STATUS_MFG_ASYNC,
+		.ctl_offs = 0x0334,
+		.sram_pdn_bits = 0,
+		.sram_pdn_ack_bits = 0,
+		.basic_clk_id = {"mfg"},
+	},
+	[MT8183_POWER_DOMAIN_MFG] = {
+		.name = "mfg",
+		.sta_mask = PWR_STATUS_MFG,
+		.ctl_offs = 0x0338,
+		.sram_pdn_bits = GENMASK(8, 8),
+		.sram_pdn_ack_bits = GENMASK(12, 12),
+	},
+	[MT8183_POWER_DOMAIN_MFG_CORE0] = {
+		.name = "mfg_core0",
+		.sta_mask = BIT(7),
+		.ctl_offs = 0x034c,
+		.sram_pdn_bits = GENMASK(8, 8),
+		.sram_pdn_ack_bits = GENMASK(12, 12),
+	},
+	[MT8183_POWER_DOMAIN_MFG_CORE1] = {
+		.name = "mfg_core1",
+		.sta_mask = BIT(20),
+		.ctl_offs = 0x0310,
+		.sram_pdn_bits = GENMASK(8, 8),
+		.sram_pdn_ack_bits = GENMASK(12, 12),
+	},
+	[MT8183_POWER_DOMAIN_MFG_2D] = {
+		.name = "mfg_2d",
+		.sta_mask = PWR_STATUS_MFG_2D,
+		.ctl_offs = 0x0348,
+		.sram_pdn_bits = GENMASK(8, 8),
+		.sram_pdn_ack_bits = GENMASK(12, 12),
+		.bp_table = {
+			BUS_PROT(IFR_TYPE, 0x2a8, 0x2ac, 0, 0x258,
+				BIT(19) | BIT(20) | BIT(21),
+				BIT(19) | BIT(20) | BIT(21)),
+			BUS_PROT(IFR_TYPE, 0x2a0, 0x2a4, 0, 0x228,
+				BIT(21) | BIT(22), BIT(21) | BIT(22)),
+		},
+	},
+	[MT8183_POWER_DOMAIN_DISP] = {
+		.name = "disp",
+		.sta_mask = PWR_STATUS_DISP,
+		.ctl_offs = 0x030c,
+		.sram_pdn_bits = GENMASK(8, 8),
+		.sram_pdn_ack_bits = GENMASK(12, 12),
+		.basic_clk_id = {"mm"},
+		.subsys_clk_prefix = "mm",
+		.bp_table = {
+			BUS_PROT(IFR_TYPE, 0x2a8, 0x2ac, 0, 0x258,
+				BIT(16) | BIT(17), BIT(16) | BIT(17)),
+			BUS_PROT(IFR_TYPE, 0x2a0, 0x2a4, 0, 0x228,
+				BIT(10) | BIT(11), BIT(10) | BIT(11)),
+			BUS_PROT(SMI_TYPE, 0x3c4, 0x3c8, 0, 0x3c0,
+				GENMASK(7, 0), GENMASK(7, 0)),
+		},
+	},
+	[MT8183_POWER_DOMAIN_CAM] = {
+		.name = "cam",
+		.sta_mask = BIT(25),
+		.ctl_offs = 0x0344,
+		.sram_pdn_bits = GENMASK(9, 8),
+		.sram_pdn_ack_bits = GENMASK(13, 12),
+		.basic_clk_id = {"cam"},
+		.subsys_clk_prefix = "cam",
+		.bp_table = {
+			BUS_PROT(IFR_TYPE, 0x2d4, 0x2d8, 0, 0x2ec,
+				BIT(4) | BIT(5) | BIT(9) | BIT(13),
+				BIT(4) | BIT(5) | BIT(9) | BIT(13)),
+			BUS_PROT(IFR_TYPE, 0x2a0, 0x2a4, 0, 0x228,
+				BIT(28), BIT(28)),
+			BUS_PROT(IFR_TYPE, 0x2d4, 0x2d8, 0, 0x2ec,
+				BIT(11), 0),
+			BUS_PROT(SMI_TYPE, 0x3c4, 0x3c8, 0, 0x3c0,
+				BIT(3) | BIT(4), BIT(3) | BIT(4)),
+		},
+	},
+	[MT8183_POWER_DOMAIN_ISP] = {
+		.name = "isp",
+		.sta_mask = PWR_STATUS_ISP,
+		.ctl_offs = 0x0308,
+		.sram_pdn_bits = GENMASK(9, 8),
+		.sram_pdn_ack_bits = GENMASK(13, 12),
+		.basic_clk_id = {"isp"},
+		.subsys_clk_prefix = "isp",
+		.bp_table = {
+			BUS_PROT(IFR_TYPE, 0x2d4, 0x2d8, 0, 0x2ec,
+				BIT(3) | BIT(8), BIT(3) | BIT(8)),
+			BUS_PROT(IFR_TYPE, 0x2d4, 0x2d8, 0, 0x2ec,
+				BIT(10), 0),
+			BUS_PROT(SMI_TYPE, 0x3c4, 0x3c8, 0, 0x3c0,
+				BIT(2), BIT(2)),
+		},
+	},
+	[MT8183_POWER_DOMAIN_VDEC] = {
+		.name = "vdec",
+		.sta_mask = BIT(31),
+		.ctl_offs = 0x0300,
+		.sram_pdn_bits = GENMASK(8, 8),
+		.sram_pdn_ack_bits = GENMASK(12, 12),
+		.bp_table = {
+			BUS_PROT(SMI_TYPE, 0x3c4, 0x3c8, 0, 0x3c0,
+				BIT(7), BIT(7)),
+		},
+	},
+	[MT8183_POWER_DOMAIN_VENC] = {
+		.name = "venc",
+		.sta_mask = PWR_STATUS_VENC,
+		.ctl_offs = 0x0304,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(15, 12),
+		.bp_table = {
+			BUS_PROT(SMI_TYPE, 0x3c4, 0x3c8, 0, 0x3c0,
+				BIT(1), BIT(1)),
+		},
+	},
+	[MT8183_POWER_DOMAIN_VPU_TOP] = {
+		.name = "vpu_top",
+		.sta_mask = BIT(26),
+		.ctl_offs = 0x0324,
+		.sram_pdn_bits = GENMASK(8, 8),
+		.sram_pdn_ack_bits = GENMASK(12, 12),
+		.basic_clk_id = {"vpu", "vpu1"},
+		.subsys_clk_prefix = "vpu",
+		.bp_table = {
+			BUS_PROT(IFR_TYPE, 0x2d4, 0x2d8, 0, 0x2ec,
+				GENMASK(9, 6) | BIT(12),
+				GENMASK(9, 6) | BIT(12)),
+			BUS_PROT(IFR_TYPE, 0x2a0, 0x2a4, 0, 0x228,
+				BIT(27), BIT(27)),
+			BUS_PROT(IFR_TYPE, 0x2d4, 0x2d8, 0, 0x2ec,
+				BIT(10) | BIT(11), BIT(10) | BIT(11)),
+			BUS_PROT(SMI_TYPE, 0x3c4, 0x3c8, 0, 0x3c0,
+				BIT(5) | BIT(6), BIT(5) | BIT(6)),
+		},
+	},
+	[MT8183_POWER_DOMAIN_VPU_CORE0] = {
+		.name = "vpu_core0",
+		.sta_mask = BIT(27),
+		.ctl_offs = 0x33c,
+		.sram_iso_ctrl = true,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(13, 12),
+		.basic_clk_id = {"vpu2"},
+		.bp_table = {
+			BUS_PROT(IFR_TYPE, 0x2c4, 0x2c8, 0, 0x2e4,
+				BIT(6), BIT(6)),
+			BUS_PROT(IFR_TYPE, 0x2c4, 0x2c8, 0, 0x2e4,
+				BIT(0) | BIT(2) | BIT(4),
+				BIT(0) | BIT(2) | BIT(4)),
+		},
+	},
+	[MT8183_POWER_DOMAIN_VPU_CORE1] = {
+		.name = "vpu_core1",
+		.sta_mask = BIT(28),
+		.ctl_offs = 0x0340,
+		.sram_iso_ctrl = true,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(13, 12),
+		.basic_clk_id = {"vpu3"},
+		.bp_table = {
+			BUS_PROT(IFR_TYPE, 0x2c4, 0x2c8, 0, 0x2e4,
+				BIT(7), BIT(7)),
+			BUS_PROT(IFR_TYPE, 0x2c4, 0x2c8, 0, 0x2e4,
+				BIT(1) | BIT(3) | BIT(5),
+				BIT(1) | BIT(3) | BIT(5)),
+		},
+	},
+};
+
+static const struct scp_subdomain scp_subdomain_mt8183[] = {
+	{MT8183_POWER_DOMAIN_MFG_ASYNC, MT8183_POWER_DOMAIN_MFG},
+	{MT8183_POWER_DOMAIN_MFG, MT8183_POWER_DOMAIN_MFG_2D},
+	{MT8183_POWER_DOMAIN_MFG, MT8183_POWER_DOMAIN_MFG_CORE0},
+	{MT8183_POWER_DOMAIN_MFG, MT8183_POWER_DOMAIN_MFG_CORE1},
+	{MT8183_POWER_DOMAIN_DISP, MT8183_POWER_DOMAIN_CAM},
+	{MT8183_POWER_DOMAIN_DISP, MT8183_POWER_DOMAIN_ISP},
+	{MT8183_POWER_DOMAIN_DISP, MT8183_POWER_DOMAIN_VDEC},
+	{MT8183_POWER_DOMAIN_DISP, MT8183_POWER_DOMAIN_VENC},
+	{MT8183_POWER_DOMAIN_DISP, MT8183_POWER_DOMAIN_VPU_TOP},
+	{MT8183_POWER_DOMAIN_VPU_TOP, MT8183_POWER_DOMAIN_VPU_CORE0},
+	{MT8183_POWER_DOMAIN_VPU_TOP, MT8183_POWER_DOMAIN_VPU_CORE1},
+};
+
 static const struct scp_soc_data mt2701_data = {
 	.domains = scp_domain_data_mt2701,
 	.num_domains = ARRAY_SIZE(scp_domain_data_mt2701),
@@ -994,6 +1483,18 @@
 	.bus_prot_reg_update = true,
 };
 
+static const struct scp_soc_data mt8167_data = {
+	.domains = scp_domain_data_mt8167,
+	.num_domains = ARRAY_SIZE(scp_domain_data_mt8167),
+	.subdomains = scp_subdomain_mt8167,
+	.num_subdomains = ARRAY_SIZE(scp_subdomain_mt8167),
+	.regs = {
+		.pwr_sta_offs = SPM_PWR_STATUS,
+		.pwr_sta2nd_offs = SPM_PWR_STATUS_2ND
+	},
+	.bus_prot_reg_update = true,
+};
+
 static const struct scp_soc_data mt8173_data = {
 	.domains = scp_domain_data_mt8173,
 	.num_domains = ARRAY_SIZE(scp_domain_data_mt8173),
@@ -1006,6 +1507,17 @@
 	.bus_prot_reg_update = true,
 };
 
+static const struct scp_soc_data mt8183_data = {
+	.domains = scp_domain_data_mt8183,
+	.num_domains = ARRAY_SIZE(scp_domain_data_mt8183),
+	.subdomains = scp_subdomain_mt8183,
+	.num_subdomains = ARRAY_SIZE(scp_subdomain_mt8183),
+	.regs = {
+		.pwr_sta_offs = 0x0180,
+		.pwr_sta2nd_offs = 0x0184
+	}
+};
+
 /*
  * scpsys driver init
  */
@@ -1027,9 +1539,15 @@
 		.compatible = "mediatek,mt7623a-scpsys",
 		.data = &mt7623a_data,
 	}, {
+		.compatible = "mediatek,mt8167-scpsys",
+		.data = &mt8167_data,
+	}, {
 		.compatible = "mediatek,mt8173-scpsys",
 		.data = &mt8173_data,
 	}, {
+		.compatible = "mediatek,mt8183-scpsys",
+		.data = &mt8183_data,
+	}, {
 		/* sentinel */
 	}
 };
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index f646436..248a236 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -30,6 +30,7 @@
 #include <linux/thermal.h>
 #include <linux/reset.h>
 #include <linux/types.h>
+#include <linux/power/mtk_svs.h>
 
 /* AUXADC Registers */
 #define AUXADC_CON1_SET_V	0x008
@@ -71,6 +72,15 @@
 
 #define TEMP_SPARE0		0x0f0
 
+#define TEMP_ADCPNP0_1          0x148
+#define TEMP_ADCPNP1_1          0x14c
+#define TEMP_ADCPNP2_1          0x150
+#define TEMP_MSR0_1             0x190
+#define TEMP_MSR1_1             0x194
+#define TEMP_MSR2_1             0x198
+#define TEMP_ADCPNP3_1          0x1b4
+#define TEMP_MSR3_1             0x1B8
+
 #define PTPCORESEL		0x400
 
 #define TEMP_MONCTL1_PERIOD_UNIT(x)	((x) & 0x3ff)
@@ -105,24 +115,42 @@
 /* The number of sensing points per bank */
 #define MT8173_NUM_SENSORS_PER_ZONE	4
 
+/* The number of controller in the MT8173 */
+#define MT8173_NUM_CONTROLLER		1
+
+/* The calibration coefficient of sensor  */
+#define MT8173_CALIBRATION	165
+
 /*
  * Layout of the fuses providing the calibration data
- * These macros could be used for MT8173, MT2701, and MT2712.
+ * These macros could be used for MT8183, MT8173, MT2701, and MT2712.
+ * MT8183 has 6 sensors and needs 6 VTS calibration data.
  * MT8173 has 5 sensors and needs 5 VTS calibration data.
  * MT2701 has 3 sensors and needs 3 VTS calibration data.
  * MT2712 has 4 sensors and needs 4 VTS calibration data.
  */
-#define MT8173_CALIB_BUF0_VALID		BIT(0)
-#define MT8173_CALIB_BUF1_ADC_GE(x)	(((x) >> 22) & 0x3ff)
-#define MT8173_CALIB_BUF0_VTS_TS1(x)	(((x) >> 17) & 0x1ff)
-#define MT8173_CALIB_BUF0_VTS_TS2(x)	(((x) >> 8) & 0x1ff)
-#define MT8173_CALIB_BUF1_VTS_TS3(x)	(((x) >> 0) & 0x1ff)
-#define MT8173_CALIB_BUF2_VTS_TS4(x)	(((x) >> 23) & 0x1ff)
-#define MT8173_CALIB_BUF2_VTS_TSABB(x)	(((x) >> 14) & 0x1ff)
-#define MT8173_CALIB_BUF0_DEGC_CALI(x)	(((x) >> 1) & 0x3f)
-#define MT8173_CALIB_BUF0_O_SLOPE(x)	(((x) >> 26) & 0x3f)
-#define MT8173_CALIB_BUF0_O_SLOPE_SIGN(x)	(((x) >> 7) & 0x1)
-#define MT8173_CALIB_BUF1_ID(x)	(((x) >> 9) & 0x1)
+#define CALIB_BUF0_VALID		BIT(0)
+#define CALIB_BUF1_ADC_GE(x)		(((x) >> 22) & 0x3ff)
+#define CALIB_BUF0_VTS_TS1(x)		(((x) >> 17) & 0x1ff)
+#define CALIB_BUF0_VTS_TS2(x)		(((x) >> 8) & 0x1ff)
+#define CALIB_BUF1_VTS_TS3(x)		(((x) >> 0) & 0x1ff)
+#define CALIB_BUF2_VTS_TS4(x)		(((x) >> 23) & 0x1ff)
+#define CALIB_BUF2_VTS_TS5(x)		(((x) >> 5) & 0x1ff)
+#define CALIB_BUF2_VTS_TSABB(x)		(((x) >> 14) & 0x1ff)
+#define CALIB_BUF0_DEGC_CALI(x)		(((x) >> 1) & 0x3f)
+#define CALIB_BUF0_O_SLOPE(x)		(((x) >> 26) & 0x3f)
+#define CALIB_BUF0_O_SLOPE_SIGN(x)	(((x) >> 7) & 0x1)
+#define CALIB_BUF1_ID(x)		(((x) >> 9) & 0x1)
+
+enum {
+	VTS1,
+	VTS2,
+	VTS3,
+	VTS4,
+	VTS5,
+	VTSABB,
+	MAX_NUM_VTS,
+};
 
 /* MT2701 thermal sensors */
 #define MT2701_TS1	0
@@ -138,6 +166,12 @@
 /* The number of sensing points per bank */
 #define MT2701_NUM_SENSORS_PER_ZONE	3
 
+/* The number of controller in the MT2701 */
+#define MT2701_NUM_CONTROLLER		1
+
+/* The calibration coefficient of sensor  */
+#define MT2701_CALIBRATION	165
+
 /* MT2712 thermal sensors */
 #define MT2712_TS1	0
 #define MT2712_TS2	1
@@ -153,14 +187,58 @@
 /* The number of sensing points per bank */
 #define MT2712_NUM_SENSORS_PER_ZONE	4
 
+/* The number of controller in the MT2712 */
+#define MT2712_NUM_CONTROLLER		1
+
+/* The calibration coefficient of sensor  */
+#define MT2712_CALIBRATION	165
+
 #define MT7622_TEMP_AUXADC_CHANNEL	11
 #define MT7622_NUM_SENSORS		1
 #define MT7622_NUM_ZONES		1
 #define MT7622_NUM_SENSORS_PER_ZONE	1
 #define MT7622_TS1	0
+#define MT7622_NUM_CONTROLLER		1
+
+/* The maximum number of banks */
+#define MAX_NUM_ZONES		8
+
+/* The calibration coefficient of sensor  */
+#define MT7622_CALIBRATION	165
+
+/* MT8183 thermal sensors */
+#define MT8183_TS1	0
+#define MT8183_TS2	1
+#define MT8183_TS3	2
+#define MT8183_TS4	3
+#define MT8183_TS5	4
+#define MT8183_TSABB	5
+
+/* AUXADC channel  is used for the temperature sensors */
+#define MT8183_TEMP_AUXADC_CHANNEL	11
+
+/* The total number of temperature sensors in the MT8183 */
+#define MT8183_NUM_SENSORS	6
+
+/* The number of banks in the MT8183 */
+#define MT8183_NUM_ZONES               1
+
+/* The number of sensing points per bank */
+#define MT8183_NUM_SENSORS_PER_ZONE	 6
+
+/* The number of controller in the MT8183 */
+#define MT8183_NUM_CONTROLLER		2
+
+/* The calibration coefficient of sensor  */
+#define MT8183_CALIBRATION	153
 
 struct mtk_thermal;
 
+struct mtk_thermal_zone {
+	struct mtk_thermal *mt;
+	int id;
+};
+
 struct thermal_bank_cfg {
 	unsigned int num_sensors;
 	const int *sensors;
@@ -175,10 +253,15 @@
 	s32 num_banks;
 	s32 num_sensors;
 	s32 auxadc_channel;
+	const int *vts_index;
 	const int *sensor_mux_values;
 	const int *msr;
 	const int *adcpnp;
-	struct thermal_bank_cfg bank_data[];
+	const int cali_val;
+	const int num_controller;
+	const int *controller_offset;
+	bool need_switch_bank;
+	struct thermal_bank_cfg bank_data[MAX_NUM_ZONES];
 };
 
 struct mtk_thermal {
@@ -188,16 +271,37 @@
 	struct clk *clk_peri_therm;
 	struct clk *clk_auxadc;
 	/* lock: for getting and putting banks */
-	struct mutex lock;
+	unsigned long flags;
 
 	/* Calibration values */
 	s32 adc_ge;
 	s32 degc_cali;
 	s32 o_slope;
-	s32 vts[MT8173_NUM_SENSORS];
+	s32 vts[MAX_NUM_VTS];
 
 	const struct mtk_thermal_data *conf;
-	struct mtk_thermal_bank banks[];
+	struct mtk_thermal_bank banks[MAX_NUM_ZONES];
+};
+
+/* MT8183 thermal sensor data */
+static const int mt8183_bank_data[MT8183_NUM_SENSORS] = {
+	MT8183_TS1, MT8183_TS2, MT8183_TS3, MT8183_TS4, MT8183_TS5, MT8183_TSABB
+};
+
+static const int mt8183_msr[MT8183_NUM_SENSORS_PER_ZONE] = {
+	TEMP_MSR0_1, TEMP_MSR1_1, TEMP_MSR2_1, TEMP_MSR1, TEMP_MSR0, TEMP_MSR3_1
+};
+
+static const int mt8183_adcpnp[MT8183_NUM_SENSORS_PER_ZONE] = {
+	TEMP_ADCPNP0_1, TEMP_ADCPNP1_1, TEMP_ADCPNP2_1,
+	TEMP_ADCPNP1, TEMP_ADCPNP0, TEMP_ADCPNP3_1
+};
+
+static const int mt8183_mux_values[MT8183_NUM_SENSORS] = { 0, 1, 2, 3, 4, 0 };
+static const int mt8183_tc_offset[MT8183_NUM_CONTROLLER] = {0x0, 0x100};
+
+static const int mt8183_vts_index[MT8183_NUM_SENSORS] = {
+	VTS1, VTS2, VTS3, VTS4, VTS5, VTSABB
 };
 
 /* MT8173 thermal sensor data */
@@ -217,6 +321,11 @@
 };
 
 static const int mt8173_mux_values[MT8173_NUM_SENSORS] = { 0, 1, 2, 3, 16 };
+static const int mt8173_tc_offset[MT8173_NUM_CONTROLLER] = { 0x0, };
+
+static const int mt8173_vts_index[MT8173_NUM_SENSORS] = {
+	VTS1, VTS2, VTS3, VTS4, VTSABB
+};
 
 /* MT2701 thermal sensor data */
 static const int mt2701_bank_data[MT2701_NUM_SENSORS] = {
@@ -232,6 +341,11 @@
 };
 
 static const int mt2701_mux_values[MT2701_NUM_SENSORS] = { 0, 1, 16 };
+static const int mt2701_tc_offset[MT2701_NUM_CONTROLLER] = { 0x0, };
+
+static const int mt2701_vts_index[MT2701_NUM_SENSORS] = {
+	VTS1, VTS2, VTS3
+};
 
 /* MT2712 thermal sensor data */
 static const int mt2712_bank_data[MT2712_NUM_SENSORS] = {
@@ -247,14 +361,21 @@
 };
 
 static const int mt2712_mux_values[MT2712_NUM_SENSORS] = { 0, 1, 2, 3 };
+static const int mt2712_tc_offset[MT2712_NUM_CONTROLLER] = { 0x0, };
+
+static const int mt2712_vts_index[MT2712_NUM_SENSORS] = {
+	VTS1, VTS2, VTS3, VTS4
+};
 
 /* MT7622 thermal sensor data */
 static const int mt7622_bank_data[MT7622_NUM_SENSORS] = { MT7622_TS1, };
 static const int mt7622_msr[MT7622_NUM_SENSORS_PER_ZONE] = { TEMP_MSR0, };
 static const int mt7622_adcpnp[MT7622_NUM_SENSORS_PER_ZONE] = { TEMP_ADCPNP0, };
 static const int mt7622_mux_values[MT7622_NUM_SENSORS] = { 0, };
+static const int mt7622_vts_index[MT7622_NUM_SENSORS] = { VTS1 };
+static const int mt7622_tc_offset[MT7622_NUM_CONTROLLER] = { 0x0, };
 
-/**
+/*
  * The MT8173 thermal controller has four banks. Each bank can read up to
  * four temperature sensors simultaneously. The MT8173 has a total of 5
  * temperature sensors. We use each bank to measure a certain area of the
@@ -271,6 +392,11 @@
 	.auxadc_channel = MT8173_TEMP_AUXADC_CHANNEL,
 	.num_banks = MT8173_NUM_ZONES,
 	.num_sensors = MT8173_NUM_SENSORS,
+	.vts_index = mt8173_vts_index,
+	.cali_val = MT8173_CALIBRATION,
+	.num_controller = MT8173_NUM_CONTROLLER,
+	.controller_offset = mt8173_tc_offset,
+	.need_switch_bank = true,
 	.bank_data = {
 		{
 			.num_sensors = 2,
@@ -291,7 +417,7 @@
 	.sensor_mux_values = mt8173_mux_values,
 };
 
-/**
+/*
  * The MT2701 thermal controller has one bank, which can read up to
  * three temperature sensors simultaneously. The MT2701 has a total of 3
  * temperature sensors.
@@ -305,6 +431,11 @@
 	.auxadc_channel = MT2701_TEMP_AUXADC_CHANNEL,
 	.num_banks = 1,
 	.num_sensors = MT2701_NUM_SENSORS,
+	.vts_index = mt2701_vts_index,
+	.cali_val = MT2701_CALIBRATION,
+	.num_controller = MT2701_NUM_CONTROLLER,
+	.controller_offset = mt2701_tc_offset,
+	.need_switch_bank = true,
 	.bank_data = {
 		{
 			.num_sensors = 3,
@@ -316,7 +447,7 @@
 	.sensor_mux_values = mt2701_mux_values,
 };
 
-/**
+/*
  * The MT2712 thermal controller has one bank, which can read up to
  * four temperature sensors simultaneously. The MT2712 has a total of 4
  * temperature sensors.
@@ -330,6 +461,11 @@
 	.auxadc_channel = MT2712_TEMP_AUXADC_CHANNEL,
 	.num_banks = 1,
 	.num_sensors = MT2712_NUM_SENSORS,
+	.vts_index = mt2712_vts_index,
+	.cali_val = MT2712_CALIBRATION,
+	.num_controller = MT2712_NUM_CONTROLLER,
+	.controller_offset = mt2712_tc_offset,
+	.need_switch_bank = true,
 	.bank_data = {
 		{
 			.num_sensors = 4,
@@ -349,6 +485,11 @@
 	.auxadc_channel = MT7622_TEMP_AUXADC_CHANNEL,
 	.num_banks = MT7622_NUM_ZONES,
 	.num_sensors = MT7622_NUM_SENSORS,
+	.vts_index = mt7622_vts_index,
+	.cali_val = MT7622_CALIBRATION,
+	.num_controller = MT7622_NUM_CONTROLLER,
+	.controller_offset = mt7622_tc_offset,
+	.need_switch_bank = true,
 	.bank_data = {
 		{
 			.num_sensors = 1,
@@ -360,9 +501,42 @@
 	.sensor_mux_values = mt7622_mux_values,
 };
 
+/*
+ * The MT8183 thermal controller has one bank for the current SW framework.
+ * The MT8183 has a total of 6 temperature sensors.
+ * There are two thermal controller to control the six sensor.
+ * The first one bind 2 sensor, and the other bind 4 sensors.
+ * The thermal core only gets the maximum temperature of all sensor, so
+ * the bank concept wouldn't be necessary here. However, the SVS (Smart
+ * Voltage Scaling) unit makes its decisions based on the same bank
+ * data, and this indeed needs the temperatures of the individual banks
+ * for making better decisions.
+ */
+static const struct mtk_thermal_data mt8183_thermal_data = {
+	.auxadc_channel = MT8183_TEMP_AUXADC_CHANNEL,
+	.num_banks = MT8183_NUM_ZONES,
+	.num_sensors = MT8183_NUM_SENSORS,
+	.vts_index = mt8183_vts_index,
+	.cali_val = MT8183_CALIBRATION,
+	.num_controller = MT8183_NUM_CONTROLLER,
+	.controller_offset = mt8183_tc_offset,
+	.need_switch_bank = false,
+	.bank_data = {
+		{
+			.num_sensors = 6,
+			.sensors = mt8183_bank_data,
+		},
+	},
+
+	.msr = mt8183_msr,
+	.adcpnp = mt8183_adcpnp,
+	.sensor_mux_values = mt8183_mux_values,
+};
+
 /**
  * raw_to_mcelsius - convert a raw ADC value to mcelsius
- * @mt:		The thermal controller
+ * @mt:	The thermal controller
+ * @sensno:	sensor number
  * @raw:	raw ADC value
  *
  * This converts the raw ADC value to mcelsius using the SoC specific
@@ -375,7 +549,7 @@
 	raw &= 0xfff;
 
 	tmp = 203450520 << 3;
-	tmp /= 165 + mt->o_slope;
+	tmp /= mt->conf->cali_val + mt->o_slope;
 	tmp /= 10000 + mt->adc_ge;
 	tmp *= raw - mt->vts[sensno] - 3350;
 	tmp >>= 3;
@@ -395,12 +569,14 @@
 	struct mtk_thermal *mt = bank->mt;
 	u32 val;
 
-	mutex_lock(&mt->lock);
+	if (mt->conf->need_switch_bank) {
+		mt->flags = claim_mtk_svs_lock();
 
-	val = readl(mt->thermal_base + PTPCORESEL);
-	val &= ~0xf;
-	val |= bank->id;
-	writel(val, mt->thermal_base + PTPCORESEL);
+		val = readl(mt->thermal_base + PTPCORESEL);
+		val &= ~0xf;
+		val |= bank->id;
+		writel(val, mt->thermal_base + PTPCORESEL);
+	}
 }
 
 /**
@@ -413,7 +589,8 @@
 {
 	struct mtk_thermal *mt = bank->mt;
 
-	mutex_unlock(&mt->lock);
+	if (mt->conf->need_switch_bank)
+		release_mtk_svs_lock(mt->flags);
 }
 
 /**
@@ -444,7 +621,7 @@
 		 * not immediately shut down.
 		 */
 		if (temp > 200000)
-			temp = 0;
+			temp = -EACCES;
 
 		if (temp > max)
 			max = temp;
@@ -455,7 +632,8 @@
 
 static int mtk_read_temp(void *data, int *temperature)
 {
-	struct mtk_thermal *mt = data;
+	struct mtk_thermal_zone *tz = data;
+	struct mtk_thermal *mt = tz->mt;
 	int i;
 	int tempmax = INT_MIN;
 
@@ -468,30 +646,66 @@
 
 		mtk_thermal_put_bank(bank);
 	}
-
 	*temperature = tempmax;
 
 	return 0;
 }
 
+static int mtk_read_sensor_temp(void *data, int *temperature)
+{
+	struct mtk_thermal_zone *tz = data;
+	struct mtk_thermal *mt = tz->mt;
+	const struct mtk_thermal_data *conf = mt->conf;
+	int id = tz->id - 1;
+	int temp = INT_MIN;
+	u32 raw;
+
+	if (id < 0)
+		return  -EACCES;
+
+	raw = readl(mt->thermal_base + conf->msr[id]);
+
+	temp = raw_to_mcelsius(mt, id, raw);
+
+	/*
+	 * The first read of a sensor often contains very high bogus
+	 * temperature value. Filter these out so that the system does
+	 * not immediately shut down.
+	 */
+
+	if (temp > 200000)
+		return  -EACCES;
+
+	*temperature = temp;
+	return 0;
+}
+
 static const struct thermal_zone_of_device_ops mtk_thermal_ops = {
 	.get_temp = mtk_read_temp,
 };
 
+static const struct thermal_zone_of_device_ops mtk_thermal_sensor_ops = {
+	.get_temp = mtk_read_sensor_temp,
+};
+
 static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num,
-				  u32 apmixed_phys_base, u32 auxadc_phys_base)
+				  u32 apmixed_phys_base, u32 auxadc_phys_base,
+				  int ctrl_id)
 {
 	struct mtk_thermal_bank *bank = &mt->banks[num];
 	const struct mtk_thermal_data *conf = mt->conf;
 	int i;
 
+	int offset = mt->conf->controller_offset[ctrl_id];
+	void __iomem *controller_base = mt->thermal_base + offset;
+
 	bank->id = num;
 	bank->mt = mt;
 
 	mtk_thermal_get_bank(bank);
 
 	/* bus clock 66M counting unit is 12 * 15.15ns * 256 = 46.540us */
-	writel(TEMP_MONCTL1_PERIOD_UNIT(12), mt->thermal_base + TEMP_MONCTL1);
+	writel(TEMP_MONCTL1_PERIOD_UNIT(12), controller_base + TEMP_MONCTL1);
 
 	/*
 	 * filt interval is 1 * 46.540us = 46.54us,
@@ -499,21 +713,21 @@
 	 */
 	writel(TEMP_MONCTL2_FILTER_INTERVAL(1) |
 			TEMP_MONCTL2_SENSOR_INTERVAL(429),
-			mt->thermal_base + TEMP_MONCTL2);
+			controller_base + TEMP_MONCTL2);
 
 	/* poll is set to 10u */
 	writel(TEMP_AHBPOLL_ADC_POLL_INTERVAL(768),
-	       mt->thermal_base + TEMP_AHBPOLL);
+	       controller_base + TEMP_AHBPOLL);
 
 	/* temperature sampling control, 1 sample */
-	writel(0x0, mt->thermal_base + TEMP_MSRCTL0);
+	writel(0x0, controller_base + TEMP_MSRCTL0);
 
 	/* exceed this polling time, IRQ would be inserted */
-	writel(0xffffffff, mt->thermal_base + TEMP_AHBTO);
+	writel(0xffffffff, controller_base + TEMP_AHBTO);
 
 	/* number of interrupts per event, 1 is enough */
-	writel(0x0, mt->thermal_base + TEMP_MONIDET0);
-	writel(0x0, mt->thermal_base + TEMP_MONIDET1);
+	writel(0x0, controller_base + TEMP_MONIDET0);
+	writel(0x0, controller_base + TEMP_MONIDET1);
 
 	/*
 	 * The MT8173 thermal controller does not have its own ADC. Instead it
@@ -528,44 +742,44 @@
 	 * this value will be stored to TEMP_PNPMUXADDR (TEMP_SPARE0)
 	 * automatically by hw
 	 */
-	writel(BIT(conf->auxadc_channel), mt->thermal_base + TEMP_ADCMUX);
+	writel(BIT(conf->auxadc_channel), controller_base + TEMP_ADCMUX);
 
 	/* AHB address for auxadc mux selection */
 	writel(auxadc_phys_base + AUXADC_CON1_CLR_V,
-	       mt->thermal_base + TEMP_ADCMUXADDR);
+	       controller_base + TEMP_ADCMUXADDR);
 
 	/* AHB address for pnp sensor mux selection */
 	writel(apmixed_phys_base + APMIXED_SYS_TS_CON1,
-	       mt->thermal_base + TEMP_PNPMUXADDR);
+	       controller_base + TEMP_PNPMUXADDR);
 
 	/* AHB value for auxadc enable */
-	writel(BIT(conf->auxadc_channel), mt->thermal_base + TEMP_ADCEN);
+	writel(BIT(conf->auxadc_channel), controller_base + TEMP_ADCEN);
 
 	/* AHB address for auxadc enable (channel 0 immediate mode selected) */
 	writel(auxadc_phys_base + AUXADC_CON1_SET_V,
-	       mt->thermal_base + TEMP_ADCENADDR);
+	       controller_base + TEMP_ADCENADDR);
 
 	/* AHB address for auxadc valid bit */
 	writel(auxadc_phys_base + AUXADC_DATA(conf->auxadc_channel),
-	       mt->thermal_base + TEMP_ADCVALIDADDR);
+	       controller_base + TEMP_ADCVALIDADDR);
 
 	/* AHB address for auxadc voltage output */
 	writel(auxadc_phys_base + AUXADC_DATA(conf->auxadc_channel),
-	       mt->thermal_base + TEMP_ADCVOLTADDR);
+	       controller_base + TEMP_ADCVOLTADDR);
 
 	/* read valid & voltage are at the same register */
-	writel(0x0, mt->thermal_base + TEMP_RDCTRL);
+	writel(0x0, controller_base + TEMP_RDCTRL);
 
 	/* indicate where the valid bit is */
 	writel(TEMP_ADCVALIDMASK_VALID_HIGH | TEMP_ADCVALIDMASK_VALID_POS(12),
-	       mt->thermal_base + TEMP_ADCVALIDMASK);
+	       controller_base + TEMP_ADCVALIDMASK);
 
 	/* no shift */
-	writel(0x0, mt->thermal_base + TEMP_ADCVOLTAGESHIFT);
+	writel(0x0, controller_base + TEMP_ADCVOLTAGESHIFT);
 
 	/* enable auxadc mux write transaction */
 	writel(TEMP_ADCWRITECTRL_ADC_MUX_WRITE,
-	       mt->thermal_base + TEMP_ADCWRITECTRL);
+		controller_base + TEMP_ADCWRITECTRL);
 
 	for (i = 0; i < conf->bank_data[num].num_sensors; i++)
 		writel(conf->sensor_mux_values[conf->bank_data[num].sensors[i]],
@@ -573,11 +787,11 @@
 		       conf->adcpnp[conf->bank_data[num].sensors[i]]);
 
 	writel((1 << conf->bank_data[num].num_sensors) - 1,
-	       mt->thermal_base + TEMP_MONCTL0);
+	       controller_base + TEMP_MONCTL0);
 
 	writel(TEMP_ADCWRITECTRL_ADC_PNP_WRITE |
 	       TEMP_ADCWRITECTRL_ADC_MUX_WRITE,
-	       mt->thermal_base + TEMP_ADCWRITECTRL);
+	       controller_base + TEMP_ADCWRITECTRL);
 
 	mtk_thermal_put_bank(bank);
 }
@@ -629,19 +843,40 @@
 		goto out;
 	}
 
-	if (buf[0] & MT8173_CALIB_BUF0_VALID) {
-		mt->adc_ge = MT8173_CALIB_BUF1_ADC_GE(buf[1]);
-		mt->vts[MT8173_TS1] = MT8173_CALIB_BUF0_VTS_TS1(buf[0]);
-		mt->vts[MT8173_TS2] = MT8173_CALIB_BUF0_VTS_TS2(buf[0]);
-		mt->vts[MT8173_TS3] = MT8173_CALIB_BUF1_VTS_TS3(buf[1]);
-		mt->vts[MT8173_TS4] = MT8173_CALIB_BUF2_VTS_TS4(buf[2]);
-		mt->vts[MT8173_TSABB] = MT8173_CALIB_BUF2_VTS_TSABB(buf[2]);
-		mt->degc_cali = MT8173_CALIB_BUF0_DEGC_CALI(buf[0]);
-		if (MT8173_CALIB_BUF1_ID(buf[1]) &
-		    MT8173_CALIB_BUF0_O_SLOPE_SIGN(buf[0]))
-			mt->o_slope = -MT8173_CALIB_BUF0_O_SLOPE(buf[0]);
+	if (buf[0] & CALIB_BUF0_VALID) {
+		mt->adc_ge = CALIB_BUF1_ADC_GE(buf[1]);
+
+		for (i = 0; i < mt->conf->num_sensors; i++) {
+			switch (mt->conf->vts_index[i]) {
+			case VTS1:
+				mt->vts[VTS1] = CALIB_BUF0_VTS_TS1(buf[0]);
+				break;
+			case VTS2:
+				mt->vts[VTS2] = CALIB_BUF0_VTS_TS2(buf[0]);
+				break;
+			case VTS3:
+				mt->vts[VTS3] = CALIB_BUF1_VTS_TS3(buf[1]);
+				break;
+			case VTS4:
+				mt->vts[VTS4] = CALIB_BUF2_VTS_TS4(buf[2]);
+				break;
+			case VTS5:
+				mt->vts[VTS5] = CALIB_BUF2_VTS_TS5(buf[2]);
+				break;
+			case VTSABB:
+				mt->vts[VTSABB] = CALIB_BUF2_VTS_TSABB(buf[2]);
+				break;
+			default:
+				break;
+			}
+		}
+
+		mt->degc_cali = CALIB_BUF0_DEGC_CALI(buf[0]);
+		if (CALIB_BUF1_ID(buf[1]) &
+		    CALIB_BUF0_O_SLOPE_SIGN(buf[0]))
+			mt->o_slope = -CALIB_BUF0_O_SLOPE(buf[0]);
 		else
-			mt->o_slope = MT8173_CALIB_BUF0_O_SLOPE(buf[0]);
+			mt->o_slope = CALIB_BUF0_O_SLOPE(buf[0]);
 	} else {
 		dev_info(dev, "Device not calibrated, using default calibration values\n");
 	}
@@ -668,6 +903,10 @@
 	{
 		.compatible = "mediatek,mt7622-thermal",
 		.data = (void *)&mt7622_thermal_data,
+	},
+	{
+		.compatible = "mediatek,mt8183-thermal",
+		.data = (void *)&mt8183_thermal_data,
 	}, {
 	},
 };
@@ -675,12 +914,13 @@
 
 static int mtk_thermal_probe(struct platform_device *pdev)
 {
-	int ret, i;
+	int ret, i, ctrl_id;
 	struct device_node *auxadc, *apmixedsys, *np = pdev->dev.of_node;
 	struct mtk_thermal *mt;
 	struct resource *res;
 	u64 auxadc_phys_base, apmixed_phys_base;
 	struct thermal_zone_device *tzdev;
+	struct mtk_thermal_zone *tz;
 
 	mt = devm_kzalloc(&pdev->dev, sizeof(*mt), GFP_KERNEL);
 	if (!mt)
@@ -705,8 +945,6 @@
 	if (ret)
 		return ret;
 
-	mutex_init(&mt->lock);
-
 	mt->dev = &pdev->dev;
 
 	auxadc = of_parse_phandle(np, "mediatek,auxadc", 0);
@@ -755,17 +993,31 @@
 		goto err_disable_clk_auxadc;
 	}
 
-	for (i = 0; i < mt->conf->num_banks; i++)
-		mtk_thermal_init_bank(mt, i, apmixed_phys_base,
-				      auxadc_phys_base);
+	for (ctrl_id = 0; ctrl_id < mt->conf->num_controller ; ctrl_id++)
+		for (i = 0; i < mt->conf->num_banks; i++)
+			mtk_thermal_init_bank(mt, i, apmixed_phys_base,
+					      auxadc_phys_base, ctrl_id);
 
 	platform_set_drvdata(pdev, mt);
 
-	tzdev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, mt,
-						     &mtk_thermal_ops);
-	if (IS_ERR(tzdev)) {
-		ret = PTR_ERR(tzdev);
-		goto err_disable_clk_peri_therm;
+	for (i = 0; i < mt->conf->num_sensors + 1; i++) {
+		tz = kmalloc(sizeof(*tz), GFP_KERNEL);
+		if (!tz)
+			return -ENOMEM;
+
+		tz->mt = mt;
+		tz->id = i;
+
+		tzdev = devm_thermal_zone_of_sensor_register(&pdev->dev, i,
+				tz, (i == 0) ?
+				&mtk_thermal_ops : &mtk_thermal_sensor_ops);
+
+		if (IS_ERR(tzdev)) {
+			if (IS_ERR(tzdev) != -EACCES) {
+				ret = PTR_ERR(tzdev);
+				goto err_disable_clk_peri_therm;
+			}
+		}
 	}
 
 	return 0;
@@ -799,6 +1051,7 @@
 
 module_platform_driver(mtk_thermal_driver);
 
+MODULE_AUTHOR("Michael Kao <michael.kao@mediatek.com>");
 MODULE_AUTHOR("Louis Yu <louis.yu@mediatek.com>");
 MODULE_AUTHOR("Dawei Chien <dawei.chien@mediatek.com>");
 MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 70e6c95..751d427 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -35,8 +35,7 @@
 
 if USB_SUPPORT
 
-config USB_COMMON
-	tristate
+source "drivers/usb/common/Kconfig"
 
 config USB_ARCH_HAS_HCD
 	def_bool y
@@ -173,36 +172,4 @@
 
 source "drivers/usb/roles/Kconfig"
 
-config USB_LED_TRIG
-	bool "USB LED Triggers"
-	depends on LEDS_CLASS && LEDS_TRIGGERS
-	select USB_COMMON
-	help
-	  This option adds LED triggers for USB host and/or gadget activity.
-
-	  Say Y here if you are working on a system with led-class supported
-	  LEDs and you want to use them as activity indicators for USB host or
-	  gadget.
-
-config USB_ULPI_BUS
-	tristate "USB ULPI PHY interface support"
-	select USB_COMMON
-	help
-	  UTMI+ Low Pin Interface (ULPI) is specification for a commonly used
-	  USB 2.0 PHY interface. The ULPI specification defines a standard set
-	  of registers that can be used to detect the vendor and product which
-	  allows ULPI to be handled as a bus. This module is the driver for that
-	  bus.
-
-	  The ULPI interfaces (the buses) are registered by the drivers for USB
-	  controllers which support ULPI register access and have ULPI PHY
-	  attached to them. The ULPI PHY drivers themselves are normal PHY
-	  drivers.
-
-	  ULPI PHYs provide often functions such as ADP sensing/probing (OTG
-	  protocol) and USB charger detection.
-
-	  To compile this driver as a module, choose M here: the module will
-	  be called ulpi.
-
 endif # USB_SUPPORT
diff --git a/drivers/usb/common/Kconfig b/drivers/usb/common/Kconfig
new file mode 100644
index 0000000..d611477
--- /dev/null
+++ b/drivers/usb/common/Kconfig
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config USB_COMMON
+	tristate
+
+
+config USB_LED_TRIG
+	bool "USB LED Triggers"
+	depends on LEDS_CLASS && LEDS_TRIGGERS
+	select USB_COMMON
+	help
+	  This option adds LED triggers for USB host and/or gadget activity.
+
+	  Say Y here if you are working on a system with led-class supported
+	  LEDs and you want to use them as activity indicators for USB host or
+	  gadget.
+
+config USB_ULPI_BUS
+	tristate "USB ULPI PHY interface support"
+	select USB_COMMON
+	help
+	  UTMI+ Low Pin Interface (ULPI) is specification for a commonly used
+	  USB 2.0 PHY interface. The ULPI specification defines a standard set
+	  of registers that can be used to detect the vendor and product which
+	  allows ULPI to be handled as a bus. This module is the driver for that
+	  bus.
+
+	  The ULPI interfaces (the buses) are registered by the drivers for USB
+	  controllers which support ULPI register access and have ULPI PHY
+	  attached to them. The ULPI PHY drivers themselves are normal PHY
+	  drivers.
+
+	  ULPI PHYs provide often functions such as ADP sensing/probing (OTG
+	  protocol) and USB charger detection.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called ulpi.
+
+config USB_CONN_GPIO
+	tristate "USB GPIO Based Connection Detection Driver"
+	depends on GPIOLIB
+	select USB_ROLE_SWITCH
+	help
+	  The driver supports USB role switch between host and device via GPIO
+	  based USB cable detection, used typically if an input GPIO is used
+	  to detect USB ID pin, and another input GPIO may be also used to detect
+	  Vbus pin at the same time, it also can be used to enable/disable
+	  device if an input GPIO is only used to detect Vbus pin.
+
+	  To compile the driver as a module, choose M here: the module will
+	  be called usb-conn-gpio.ko
diff --git a/drivers/usb/common/Makefile b/drivers/usb/common/Makefile
index 0a7c45e..8227ffc 100644
--- a/drivers/usb/common/Makefile
+++ b/drivers/usb/common/Makefile
@@ -7,5 +7,6 @@
 usb-common-y			  += common.o
 usb-common-$(CONFIG_USB_LED_TRIG) += led.o
 
+obj-$(CONFIG_USB_CONN_GPIO)	+= usb-conn-gpio.o
 obj-$(CONFIG_USB_OTG_FSM) += usb-otg-fsm.o
 obj-$(CONFIG_USB_ULPI_BUS)	+= ulpi.o
diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
new file mode 100644
index 0000000..f94db90
--- /dev/null
+++ b/drivers/usb/common/usb-conn-gpio.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * USB GPIO Based Connection Detection Driver
+ *
+ * Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * Some code borrowed from drivers/extcon/extcon-usb-gpio.c
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/role.h>
+
+#define USB_GPIO_DEB_MS		1000	/* ms */
+#define USB_GPIO_DEB_US		((USB_GPIO_DEB_MS) * 1000)	/* us */
+
+#define USB_CONN_IRQF	\
+	(IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT)
+
+struct usb_conn_info {
+	struct device *dev;
+	struct usb_role_switch *role_sw;
+	enum usb_role last_role;
+	struct regulator *vbus;
+	struct delayed_work dw_det;
+	unsigned long debounce_jiffies;
+
+	struct gpio_desc *id_gpiod;
+	struct gpio_desc *vbus_gpiod;
+	int id_irq;
+	int vbus_irq;
+};
+
+/**
+ * "DEVICE" = VBUS and "HOST" = !ID, so we have:
+ * Both "DEVICE" and "HOST" can't be set as active at the same time
+ * so if "HOST" is active (i.e. ID is 0)  we keep "DEVICE" inactive
+ * even if VBUS is on.
+ *
+ *  Role          |   ID  |  VBUS
+ * ------------------------------------
+ *  [1] DEVICE    |   H   |   H
+ *  [2] NONE      |   H   |   L
+ *  [3] HOST      |   L   |   H
+ *  [4] HOST      |   L   |   L
+ *
+ * In case we have only one of these signals:
+ * - VBUS only - we want to distinguish between [1] and [2], so ID is always 1
+ * - ID only - we want to distinguish between [1] and [4], so VBUS = ID
+ */
+static void usb_conn_detect_cable(struct work_struct *work)
+{
+	struct usb_conn_info *info;
+	enum usb_role role;
+	int id, vbus, ret;
+
+	info = container_of(to_delayed_work(work),
+			    struct usb_conn_info, dw_det);
+
+	/* check ID and VBUS */
+	id = info->id_gpiod ?
+		gpiod_get_value_cansleep(info->id_gpiod) : 1;
+	vbus = info->vbus_gpiod ?
+		gpiod_get_value_cansleep(info->vbus_gpiod) : id;
+
+	if (!id)
+		role = USB_ROLE_HOST;
+	else if (vbus)
+		role = USB_ROLE_DEVICE;
+	else
+		role = USB_ROLE_NONE;
+
+	dev_dbg(info->dev, "role %d/%d, gpios: id %d, vbus %d\n",
+		info->last_role, role, id, vbus);
+
+	if (info->last_role == role) {
+		dev_warn(info->dev, "repeated role: %d\n", role);
+		return;
+	}
+
+	if (info->last_role == USB_ROLE_HOST)
+		regulator_disable(info->vbus);
+
+	ret = usb_role_switch_set_role(info->role_sw, role);
+	if (ret)
+		dev_err(info->dev, "failed to set role: %d\n", ret);
+
+	if (role == USB_ROLE_HOST) {
+		ret = regulator_enable(info->vbus);
+		if (ret)
+			dev_err(info->dev, "enable vbus regulator failed\n");
+	}
+
+	info->last_role = role;
+
+	dev_dbg(info->dev, "vbus regulator is %s\n",
+		regulator_is_enabled(info->vbus) ? "enabled" : "disabled");
+}
+
+static void usb_conn_queue_dwork(struct usb_conn_info *info,
+				 unsigned long delay)
+{
+	queue_delayed_work(system_power_efficient_wq, &info->dw_det, delay);
+}
+
+static irqreturn_t usb_conn_isr(int irq, void *dev_id)
+{
+	struct usb_conn_info *info = dev_id;
+
+	usb_conn_queue_dwork(info, info->debounce_jiffies);
+
+	return IRQ_HANDLED;
+}
+
+static int usb_conn_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct usb_conn_info *info;
+	int ret = 0;
+
+	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->dev = dev;
+	info->id_gpiod = devm_gpiod_get_optional(dev, "id", GPIOD_IN);
+	if (IS_ERR(info->id_gpiod))
+		return PTR_ERR(info->id_gpiod);
+
+	info->vbus_gpiod = devm_gpiod_get_optional(dev, "vbus", GPIOD_IN);
+	if (IS_ERR(info->vbus_gpiod))
+		return PTR_ERR(info->vbus_gpiod);
+
+	if (!info->id_gpiod && !info->vbus_gpiod) {
+		dev_err(dev, "failed to get gpios\n");
+		return -ENODEV;
+	}
+
+	if (info->id_gpiod)
+		ret = gpiod_set_debounce(info->id_gpiod, USB_GPIO_DEB_US);
+	if (!ret && info->vbus_gpiod)
+		ret = gpiod_set_debounce(info->vbus_gpiod, USB_GPIO_DEB_US);
+	if (ret < 0)
+		info->debounce_jiffies = msecs_to_jiffies(USB_GPIO_DEB_MS);
+
+	INIT_DELAYED_WORK(&info->dw_det, usb_conn_detect_cable);
+
+	info->vbus = devm_regulator_get(dev, "vbus");
+	if (IS_ERR(info->vbus)) {
+		dev_err(dev, "failed to get vbus\n");
+		return PTR_ERR(info->vbus);
+	}
+
+	info->role_sw = usb_role_switch_get(dev);
+	if (IS_ERR(info->role_sw)) {
+		if (PTR_ERR(info->role_sw) != -EPROBE_DEFER)
+			dev_err(dev, "failed to get role switch\n");
+
+		return PTR_ERR(info->role_sw);
+	}
+
+	if (info->id_gpiod) {
+		info->id_irq = gpiod_to_irq(info->id_gpiod);
+		if (info->id_irq < 0) {
+			dev_err(dev, "failed to get ID IRQ\n");
+			ret = info->id_irq;
+			goto put_role_sw;
+		}
+
+		ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
+						usb_conn_isr, USB_CONN_IRQF,
+						pdev->name, info);
+		if (ret < 0) {
+			dev_err(dev, "failed to request ID IRQ\n");
+			goto put_role_sw;
+		}
+	}
+
+	if (info->vbus_gpiod) {
+		info->vbus_irq = gpiod_to_irq(info->vbus_gpiod);
+		if (info->vbus_irq < 0) {
+			dev_err(dev, "failed to get VBUS IRQ\n");
+			ret = info->vbus_irq;
+			goto put_role_sw;
+		}
+
+		ret = devm_request_threaded_irq(dev, info->vbus_irq, NULL,
+						usb_conn_isr, USB_CONN_IRQF,
+						pdev->name, info);
+		if (ret < 0) {
+			dev_err(dev, "failed to request VBUS IRQ\n");
+			goto put_role_sw;
+		}
+	}
+
+	platform_set_drvdata(pdev, info);
+
+	/* Perform initial detection */
+	usb_conn_queue_dwork(info, 0);
+
+	return 0;
+
+put_role_sw:
+	usb_role_switch_put(info->role_sw);
+	return ret;
+}
+
+static int usb_conn_remove(struct platform_device *pdev)
+{
+	struct usb_conn_info *info = platform_get_drvdata(pdev);
+
+	cancel_delayed_work_sync(&info->dw_det);
+
+	if (info->last_role == USB_ROLE_HOST)
+		regulator_disable(info->vbus);
+
+	usb_role_switch_put(info->role_sw);
+
+	return 0;
+}
+
+static int __maybe_unused usb_conn_suspend(struct device *dev)
+{
+	struct usb_conn_info *info = dev_get_drvdata(dev);
+
+	if (info->id_gpiod)
+		disable_irq(info->id_irq);
+	if (info->vbus_gpiod)
+		disable_irq(info->vbus_irq);
+
+	pinctrl_pm_select_sleep_state(dev);
+
+	return 0;
+}
+
+static int __maybe_unused usb_conn_resume(struct device *dev)
+{
+	struct usb_conn_info *info = dev_get_drvdata(dev);
+
+	pinctrl_pm_select_default_state(dev);
+
+	if (info->id_gpiod)
+		enable_irq(info->id_irq);
+	if (info->vbus_gpiod)
+		enable_irq(info->vbus_irq);
+
+	usb_conn_queue_dwork(info, 0);
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(usb_conn_pm_ops,
+			 usb_conn_suspend, usb_conn_resume);
+
+static const struct of_device_id usb_conn_dt_match[] = {
+	{ .compatible = "gpio-usb-b-connector", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, usb_conn_dt_match);
+
+static struct platform_driver usb_conn_driver = {
+	.probe		= usb_conn_probe,
+	.remove		= usb_conn_remove,
+	.driver		= {
+		.name	= "usb-conn-gpio",
+		.pm	= &usb_conn_pm_ops,
+		.of_match_table = usb_conn_dt_match,
+	},
+};
+
+module_platform_driver(usb_conn_driver);
+
+MODULE_AUTHOR("Chunfeng Yun <chunfeng.yun@mediatek.com>");
+MODULE_DESCRIPTION("USB GPIO based connection detection driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 60987c7..b18a6ba 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -206,19 +206,6 @@
 	return xhci_mtk_host_enable(mtk);
 }
 
-/* ignore the error if the clock does not exist */
-static struct clk *optional_clk_get(struct device *dev, const char *id)
-{
-	struct clk *opt_clk;
-
-	opt_clk = devm_clk_get(dev, id);
-	/* ignore error number except EPROBE_DEFER */
-	if (IS_ERR(opt_clk) && (PTR_ERR(opt_clk) != -EPROBE_DEFER))
-		opt_clk = NULL;
-
-	return opt_clk;
-}
-
 static int xhci_mtk_clks_get(struct xhci_hcd_mtk *mtk)
 {
 	struct device *dev = mtk->dev;
@@ -229,15 +216,19 @@
 		return PTR_ERR(mtk->sys_clk);
 	}
 
-	mtk->ref_clk = optional_clk_get(dev, "ref_ck");
+	mtk->xhci_clk = devm_clk_get_optional(dev, "xhci_ck");
+	if (IS_ERR(mtk->xhci_clk))
+		return PTR_ERR(mtk->xhci_clk);
+
+	mtk->ref_clk = devm_clk_get_optional(dev, "ref_ck");
 	if (IS_ERR(mtk->ref_clk))
 		return PTR_ERR(mtk->ref_clk);
 
-	mtk->mcu_clk = optional_clk_get(dev, "mcu_ck");
+	mtk->mcu_clk = devm_clk_get_optional(dev, "mcu_ck");
 	if (IS_ERR(mtk->mcu_clk))
 		return PTR_ERR(mtk->mcu_clk);
 
-	mtk->dma_clk = optional_clk_get(dev, "dma_ck");
+	mtk->dma_clk = devm_clk_get_optional(dev, "dma_ck");
 	return PTR_ERR_OR_ZERO(mtk->dma_clk);
 }
 
@@ -257,6 +248,12 @@
 		goto sys_clk_err;
 	}
 
+	ret = clk_prepare_enable(mtk->xhci_clk);
+	if (ret) {
+		dev_err(mtk->dev, "failed to enable xhci_clk\n");
+		goto xhci_clk_err;
+	}
+
 	ret = clk_prepare_enable(mtk->mcu_clk);
 	if (ret) {
 		dev_err(mtk->dev, "failed to enable mcu_clk\n");
@@ -274,6 +271,8 @@
 dma_clk_err:
 	clk_disable_unprepare(mtk->mcu_clk);
 mcu_clk_err:
+	clk_disable_unprepare(mtk->xhci_clk);
+xhci_clk_err:
 	clk_disable_unprepare(mtk->sys_clk);
 sys_clk_err:
 	clk_disable_unprepare(mtk->ref_clk);
@@ -285,6 +284,7 @@
 {
 	clk_disable_unprepare(mtk->dma_clk);
 	clk_disable_unprepare(mtk->mcu_clk);
+	clk_disable_unprepare(mtk->xhci_clk);
 	clk_disable_unprepare(mtk->sys_clk);
 	clk_disable_unprepare(mtk->ref_clk);
 }
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index cc59d80..b53d4dc 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -116,6 +116,7 @@
 	struct regulator *vusb33;
 	struct regulator *vbus;
 	struct clk *sys_clk;	/* sys and mac clock */
+	struct clk *xhci_clk;
 	struct clk *ref_clk;
 	struct clk *mcu_clk;
 	struct clk *dma_clk;
diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig
index fe58904..2f2e878 100644
--- a/drivers/usb/mtu3/Kconfig
+++ b/drivers/usb/mtu3/Kconfig
@@ -42,6 +42,7 @@
 	bool "Dual Role mode"
 	depends on ((USB=y || USB=USB_MTU3) && (USB_GADGET=y || USB_GADGET=USB_MTU3))
 	depends on (EXTCON=y || EXTCON=USB_MTU3)
+	select USB_ROLE_SWITCH
 	help
 	  This is the default mode of working of MTU3 controller where
 	  both host and gadget features are enabled.
diff --git a/drivers/usb/mtu3/Makefile b/drivers/usb/mtu3/Makefile
index 4a97158..3bf8cbc 100644
--- a/drivers/usb/mtu3/Makefile
+++ b/drivers/usb/mtu3/Makefile
@@ -2,10 +2,17 @@
 
 ccflags-$(CONFIG_USB_MTU3_DEBUG)	+= -DDEBUG
 
+# define_trace.h needs to know how to find our header
+CFLAGS_mtu3_trace.o	:= -I$(src)
+
 obj-$(CONFIG_USB_MTU3)	+= mtu3.o
 
 mtu3-y	:= mtu3_plat.o
 
+ifneq ($(CONFIG_TRACING),)
+	mtu3-y	+= mtu3_trace.o
+endif
+
 ifneq ($(filter y,$(CONFIG_USB_MTU3_HOST) $(CONFIG_USB_MTU3_DUAL_ROLE)),)
 	mtu3-y	+= mtu3_host.o
 endif
@@ -17,3 +24,7 @@
 ifneq ($(CONFIG_USB_MTU3_DUAL_ROLE),)
 	mtu3-y	+= mtu3_dr.o
 endif
+
+ifneq ($(CONFIG_DEBUG_FS),)
+	mtu3-y	+= mtu3_debugfs.o
+endif
diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h
index 87823ac0..cc5774d 100644
--- a/drivers/usb/mtu3/mtu3.h
+++ b/drivers/usb/mtu3/mtu3.h
@@ -63,6 +63,15 @@
 #define MTU3_U2_IP_SLOT_DEFAULT 1
 
 /**
+ * IP TRUNK version
+ * from 0x1003 version, USB3 Gen2 is supported, two changes affect driver:
+ * 1. MAXPKT and MULTI bits layout of TXCSR1 and RXCSR1 are adjusted,
+ *    but not backward compatible
+ * 2. QMU extend buffer length supported
+ */
+#define MTU3_TRUNK_VERS_1003	0x1003
+
+/**
  * Normally the device works on HS or SS, to simplify fifo management,
  * devide fifo into some 512B parts, use bitmap to manage it; And
  * 128 bits size of bitmap is large enough, that means it can manage
@@ -135,45 +144,33 @@
  *	The format of TX GPD is a little different from RX one.
  *	And the size of GPD is 16 bytes.
  *
- * @flag:
+ * @dw0_info:
  *	bit0: Hardware Own (HWO)
  *	bit1: Buffer Descriptor Present (BDP), always 0, BD is not supported
  *	bit2: Bypass (BPS), 1: HW skips this GPD if HWO = 1
+ *	bit6: [EL] Zero Length Packet (ZLP), moved from @dw3_info[29]
  *	bit7: Interrupt On Completion (IOC)
- * @chksum: This is used to validate the contents of this GPD;
- *	If TXQ_CS_EN / RXQ_CS_EN bit is set, an interrupt is issued
- *	when checksum validation fails;
- *	Checksum value is calculated over the 16 bytes of the GPD by default;
- * @data_buf_len (RX ONLY): This value indicates the length of
- *	the assigned data buffer
- * @tx_ext_addr (TX ONLY): [3:0] are 4 extension bits of @buffer,
- *	[7:4] are 4 extension bits of @next_gpd
+ *	bit[31:16]: ([EL] bit[31:12]) allow data buffer length (RX ONLY),
+ *		the buffer length of the data to receive
+ *	bit[23:16]: ([EL] bit[31:24]) extension address (TX ONLY),
+ *		lower 4 bits are extension bits of @buffer,
+ *		upper 4 bits are extension bits of @next_gpd
  * @next_gpd: Physical address of the next GPD
  * @buffer: Physical address of the data buffer
- * @buf_len:
- *	(TX): This value indicates the length of the assigned data buffer
- *	(RX): The total length of data received
- * @ext_len: reserved
- * @rx_ext_addr(RX ONLY): [3:0] are 4 extension bits of @buffer,
- *	[7:4] are 4 extension bits of @next_gpd
- * @ext_flag:
- *	bit5 (TX ONLY): Zero Length Packet (ZLP),
+ * @dw3_info:
+ *	bit[15:0]: ([EL] bit[19:0]) data buffer length,
+ *		(TX): the buffer length of the data to transmit
+ *		(RX): The total length of data received
+ *	bit[23:16]: ([EL] bit[31:24]) extension address (RX ONLY),
+ *		lower 4 bits are extension bits of @buffer,
+ *		upper 4 bits are extension bits of @next_gpd
+ *	bit29: ([EL] abandoned) Zero Length Packet (ZLP) (TX ONLY)
  */
 struct qmu_gpd {
-	__u8 flag;
-	__u8 chksum;
-	union {
-		__le16 data_buf_len;
-		__le16 tx_ext_addr;
-	};
+	__le32 dw0_info;
 	__le32 next_gpd;
 	__le32 buffer;
-	__le16 buf_len;
-	union {
-		__u8 ext_len;
-		__u8 rx_ext_addr;
-	};
-	__u8 ext_flag;
+	__le32 dw3_info;
 } __packed;
 
 /**
@@ -202,6 +199,9 @@
 * @id_nb : notifier for iddig(idpin) detection
 * @id_work : work of iddig detection notifier
 * @id_event : event of iddig detecion notifier
+* @role_sw : use USB Role Switch to support dual-role switch, can't use
+*		extcon at the same time, and extcon is deprecated.
+* @role_sw_used : true when the USB Role Switch is used.
 * @is_u3_drd: whether port0 supports usb3.0 dual-role device or not
 * @manual_drd_enabled: it's true when supports dual-role device by debugfs
 *		to switch host/device modes depending on user input.
@@ -215,6 +215,8 @@
 	struct notifier_block id_nb;
 	struct work_struct id_work;
 	unsigned long id_event;
+	struct usb_role_switch *role_sw;
+	bool role_sw_used;
 	bool is_u3_drd;
 	bool manual_drd_enabled;
 };
@@ -229,11 +231,14 @@
  * @dma_clk: dma_bus_ck clock for AXI bus etc
  * @dr_mode: works in which mode:
  *		host only, device only or dual-role mode
+ * @otg_srp_reqd: used for SRP request handling.
+ * @otg_hnp_reqd: used for HNP request handling.
  * @u2_ports: number of usb2.0 host ports
  * @u3_ports: number of usb3.0 host ports
  * @u3p_dis_msk: mask of disabling usb3 ports, for example, bit0==1 to
  *		disable u3port0, bit1==1 to disable u3port1,... etc
  * @dbgfs_root: only used when supports manual dual-role switch via debugfs
+ * @force_vbus: without Vbus PIN, SW need set force_vbus state for device
  * @uwk_en: it's true when supports remote wakeup in host mode
  * @uwk: syscon including usb wakeup glue layer between SSUSB IP and SPM
  * @uwk_reg_base: the base address of the wakeup glue layer in @uwk
@@ -255,11 +260,14 @@
 	/* otg */
 	struct otg_switch_mtk otg_switch;
 	enum usb_dr_mode dr_mode;
+	bool otg_srp_reqd;
+	bool otg_hnp_reqd;
 	bool is_host;
 	int u2_ports;
 	int u3_ports;
 	int u3p_dis_msk;
 	struct dentry *dbgfs_root;
+	bool force_vbus;
 	/* usb wakeup for host mode */
 	bool uwk_en;
 	struct regmap *uwk;
@@ -316,6 +324,7 @@
  * @may_wakeup: means device's remote wakeup is enabled
  * @is_self_powered: is reported in device status and the config descriptor
  * @delayed_status: true when function drivers ask for delayed status
+ * @gen2cp: compatible with USB3 Gen2 IP
  * @ep0_req: dummy request used while handling standard USB requests
  *		for GET_STATUS and SET_SEL
  * @setup_buf: ep0 response buffer for GET_STATUS and SET_SEL requests
@@ -356,6 +365,7 @@
 	unsigned u2_enable:1;
 	unsigned is_u3_ip:1;
 	unsigned delayed_status:1;
+	unsigned gen2cp:1;
 
 	u8 address;
 	u8 test_mode_nr;
@@ -416,6 +426,7 @@
 }
 
 int ssusb_check_clocks(struct ssusb_mtk *ssusb, u32 ex_clks);
+void ssusb_set_force_vbus(struct ssusb_mtk *ssusb, bool vbus_on);
 struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags);
 void mtu3_free_request(struct usb_ep *ep, struct usb_request *req);
 void mtu3_req_complete(struct mtu3_ep *mep,
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index 8606935..7d6b839 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -16,6 +16,8 @@
 #include <linux/platform_device.h>
 
 #include "mtu3.h"
+#include "mtu3_debug.h"
+#include "mtu3_trace.h"
 
 static int ep_fifo_alloc(struct mtu3_ep *mep, u32 seg_size)
 {
@@ -97,7 +99,7 @@
 
 	mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
 
-	if (mtu->is_u3_ip) {
+	if (mtu->is_u3_ip && mtu->max_speed >= USB_SPEED_SUPER) {
 		check_clk = SSUSB_U3_MAC_RST_B_STS;
 		mtu3_clrbits(ibase, SSUSB_U3_CTRL(0),
 			(SSUSB_U3_PORT_DIS | SSUSB_U3_PORT_PDN |
@@ -180,7 +182,7 @@
 	mtu3_writel(mbase, U3D_LV1IESR, value);
 
 	/* Enable U2 common USB interrupts */
-	value = SUSPEND_INTR | RESUME_INTR | RESET_INTR | LPM_RESUME_INTR;
+	value = SUSPEND_INTR | RESUME_INTR | RESET_INTR;
 	mtu3_writel(mbase, U3D_COMMON_USB_INTR_ENABLE, value);
 
 	if (mtu->is_u3_ip) {
@@ -299,6 +301,7 @@
 			int interval, int burst, int mult)
 {
 	void __iomem *mbase = mtu->mac_base;
+	bool gen2cp = mtu->gen2cp;
 	int epnum = mep->epnum;
 	u32 csr0, csr1, csr2;
 	int fifo_sgsz, fifo_addr;
@@ -319,7 +322,7 @@
 
 		num_pkts = (burst + 1) * (mult + 1) - 1;
 		csr1 = TX_SS_BURST(burst) | TX_SLOT(mep->slot);
-		csr1 |= TX_MAX_PKT(num_pkts) | TX_MULT(mult);
+		csr1 |= TX_MAX_PKT(gen2cp, num_pkts) | TX_MULT(gen2cp, mult);
 
 		csr2 = TX_FIFOADDR(fifo_addr >> 4);
 		csr2 |= TX_FIFOSEGSIZE(fifo_sgsz);
@@ -355,7 +358,7 @@
 
 		num_pkts = (burst + 1) * (mult + 1) - 1;
 		csr1 = RX_SS_BURST(burst) | RX_SLOT(mep->slot);
-		csr1 |= RX_MAX_PKT(num_pkts) | RX_MULT(mult);
+		csr1 |= RX_MAX_PKT(gen2cp, num_pkts) | RX_MULT(gen2cp, mult);
 
 		csr2 = RX_FIFOADDR(fifo_addr >> 4);
 		csr2 |= RX_FIFOSEGSIZE(fifo_sgsz);
@@ -484,7 +487,7 @@
 	mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr);
 
 	/* Enable EP0 interrupt */
-	mtu3_writel(mtu->mac_base, U3D_EPIESR, EP0ISR);
+	mtu3_writel(mtu->mac_base, U3D_EPIESR, EP0ISR | SETUPENDISR);
 }
 
 static int mtu3_mem_alloc(struct mtu3 *mtu)
@@ -586,6 +589,8 @@
 		mtu3_clrbits(mbase, U3D_LTSSM_CTRL, SOFT_U3_EXIT_EN);
 		/* automatically build U2 link when U3 detect fail */
 		mtu3_setbits(mbase, U3D_USB2_TEST_MODE, U2U3_AUTO_SWITCH);
+		/* auto clear SOFT_CONN when clear USB3_EN if work as HS */
+		mtu3_setbits(mbase, U3D_U3U2_SWITCH_CTRL, SOFTCON_CLR_AUTO_EN);
 	}
 
 	mtu3_set_speed(mtu);
@@ -594,10 +599,15 @@
 	mtu3_clrbits(mbase, U3D_LINK_RESET_INFO, WTCHRP_MSK);
 	/* U2/U3 detected by HW */
 	mtu3_writel(mbase, U3D_DEVICE_CONF, 0);
-	/* enable QMU 16B checksum */
-	mtu3_setbits(mbase, U3D_QCR0, QMU_CS16B_EN);
 	/* vbus detected by HW */
 	mtu3_clrbits(mbase, U3D_MISC_CTRL, VBUS_FRC_EN | VBUS_ON);
+	/* enable automatical HWRW from L1 */
+	mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, LPM_HRWE);
+
+	ssusb_set_force_vbus(mtu->ssusb, true);
+	/* use new QMU format when HW version >= 0x1003 */
+	if (mtu->gen2cp)
+		mtu3_writel(mbase, U3D_QFCR, ~0x0);
 }
 
 static irqreturn_t mtu3_link_isr(struct mtu3 *mtu)
@@ -648,6 +658,8 @@
 		break;
 	}
 	dev_dbg(mtu->dev, "%s: %s\n", __func__, usb_speed_string(udev_speed));
+	mtu3_dbg_trace(mtu->dev, "link speed %s",
+		       usb_speed_string(udev_speed));
 
 	mtu->g.speed = udev_speed;
 	mtu->g.ep0->maxpacket = maxpkt;
@@ -670,6 +682,7 @@
 	ltssm &= mtu3_readl(mbase, U3D_LTSSM_INTR_ENABLE);
 	mtu3_writel(mbase, U3D_LTSSM_INTR, ltssm); /* W1C */
 	dev_dbg(mtu->dev, "=== LTSSM[%x] ===\n", ltssm);
+	trace_mtu3_u3_ltssm_isr(ltssm);
 
 	if (ltssm & (HOT_RST_INTR | WARM_RST_INTR))
 		mtu3_gadget_reset(mtu);
@@ -700,6 +713,7 @@
 	u2comm &= mtu3_readl(mbase, U3D_COMMON_USB_INTR_ENABLE);
 	mtu3_writel(mbase, U3D_COMMON_USB_INTR, u2comm); /* W1C */
 	dev_dbg(mtu->dev, "=== U2COMM[%x] ===\n", u2comm);
+	trace_mtu3_u2_common_isr(u2comm);
 
 	if (u2comm & SUSPEND_INTR)
 		mtu3_gadget_suspend(mtu);
@@ -710,12 +724,6 @@
 	if (u2comm & RESET_INTR)
 		mtu3_gadget_reset(mtu);
 
-	if (u2comm & LPM_RESUME_INTR) {
-		if (!(mtu3_readl(mbase, U3D_POWER_MANAGEMENT) & LPM_HRWE))
-			mtu3_setbits(mbase, U3D_USB20_MISC_CONTROL,
-				     LPM_U3_ACK_EN);
-	}
-
 	return IRQ_HANDLED;
 }
 
@@ -753,13 +761,15 @@
 
 static int mtu3_hw_init(struct mtu3 *mtu)
 {
-	u32 cap_dev;
+	u32 value;
 	int ret;
 
-	mtu->hw_version = mtu3_readl(mtu->ippc_base, U3D_SSUSB_HW_ID);
+	value = mtu3_readl(mtu->ippc_base, U3D_SSUSB_IP_TRUNK_VERS);
+	mtu->hw_version = IP_TRUNK_VERS(value);
+	mtu->gen2cp = !!(mtu->hw_version >= MTU3_TRUNK_VERS_1003);
 
-	cap_dev = mtu3_readl(mtu->ippc_base, U3D_SSUSB_IP_DEV_CAP);
-	mtu->is_u3_ip = !!SSUSB_IP_DEV_U3_PORT_NUM(cap_dev);
+	value = mtu3_readl(mtu->ippc_base, U3D_SSUSB_IP_DEV_CAP);
+	mtu->is_u3_ip = !!SSUSB_IP_DEV_U3_PORT_NUM(value);
 
 	dev_info(mtu->dev, "IP version 0x%x(%s IP)\n", mtu->hw_version,
 		mtu->is_u3_ip ? "U3" : "U2");
@@ -897,6 +907,8 @@
 	if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
 		mtu3_stop(mtu);
 
+	ssusb_dev_debugfs_init(ssusb);
+
 	dev_dbg(dev, " %s() done...\n", __func__);
 
 	return 0;
diff --git a/drivers/usb/mtu3/mtu3_debug.h b/drivers/usb/mtu3/mtu3_debug.h
new file mode 100644
index 0000000..5039637
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_debug.h
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mtu3_debug.h - debug header
+ *
+ * Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ */
+
+#ifndef __MTU3_DEBUG_H__
+#define __MTU3_DEBUG_H__
+
+#include <linux/debugfs.h>
+
+#define MTU3_DEBUGFS_NAME_LEN 32
+
+struct mtu3_regset {
+	char name[MTU3_DEBUGFS_NAME_LEN];
+	struct debugfs_regset32 regset;
+	size_t nregs;
+};
+
+struct mtu3_file_map {
+	const char *name;
+	int (*show)(struct seq_file *s, void *unused);
+};
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+void ssusb_dev_debugfs_init(struct ssusb_mtk *ssusb);
+void ssusb_debugfs_create_root(struct ssusb_mtk *ssusb);
+void ssusb_debugfs_remove_root(struct ssusb_mtk *ssusb);
+void ssusb_dr_debugfs_init(struct ssusb_mtk *ssusb);
+#else
+static inline void ssusb_dev_debugfs_init(struct ssusb_mtk *ssusb) {}
+static inline void ssusb_debugfs_create_root(struct ssusb_mtk *ssusb) {}
+static inline void ssusb_debugfs_remove_root(struct ssusb_mtk *ssusb) {}
+static inline void ssusb_dr_debugfs_init(struct ssusb_mtk *ssusb) {}
+#endif /* CONFIG_DEBUG_FS */
+
+#if IS_ENABLED(CONFIG_TRACING)
+void mtu3_dbg_trace(struct device *dev, const char *fmt, ...);
+
+#else
+static inline void mtu3_dbg_trace(struct device *dev, const char *fmt, ...) {}
+
+#endif /* CONFIG_TRACING */
+
+#endif /* __MTU3_DEBUG_H__ */
diff --git a/drivers/usb/mtu3/mtu3_debugfs.c b/drivers/usb/mtu3/mtu3_debugfs.c
new file mode 100644
index 0000000..8ca2d37
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_debugfs.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mtu3_debugfs.c - debugfs interface
+ *
+ * Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ */
+
+#include <linux/uaccess.h>
+
+#include "mtu3.h"
+#include "mtu3_debug.h"
+#include "mtu3_dr.h"
+
+#define dump_register(nm)		\
+{					\
+	.name = __stringify(nm),	\
+	.offset = U3D_ ##nm,		\
+}
+
+#define dump_prb_reg(nm, os)	\
+{				\
+	.name = nm,		\
+	.offset = os,		\
+}
+
+static const struct debugfs_reg32 mtu3_ippc_regs[] = {
+	dump_register(SSUSB_IP_PW_CTRL0),
+	dump_register(SSUSB_IP_PW_CTRL1),
+	dump_register(SSUSB_IP_PW_CTRL2),
+	dump_register(SSUSB_IP_PW_CTRL3),
+	dump_register(SSUSB_OTG_STS),
+	dump_register(SSUSB_IP_XHCI_CAP),
+	dump_register(SSUSB_IP_DEV_CAP),
+	dump_register(SSUSB_U3_CTRL_0P),
+	dump_register(SSUSB_U2_CTRL_0P),
+	dump_register(SSUSB_HW_ID),
+	dump_register(SSUSB_HW_SUB_ID),
+	dump_register(SSUSB_IP_SPARE0),
+};
+
+static const struct debugfs_reg32 mtu3_dev_regs[] = {
+	dump_register(LV1ISR),
+	dump_register(LV1IER),
+	dump_register(EPISR),
+	dump_register(EPIER),
+	dump_register(EP0CSR),
+	dump_register(RXCOUNT0),
+	dump_register(QISAR0),
+	dump_register(QIER0),
+	dump_register(QISAR1),
+	dump_register(QIER1),
+	dump_register(CAP_EPNTXFFSZ),
+	dump_register(CAP_EPNRXFFSZ),
+	dump_register(CAP_EPINFO),
+	dump_register(MISC_CTRL),
+};
+
+static const struct debugfs_reg32 mtu3_csr_regs[] = {
+	dump_register(DEVICE_CONF),
+	dump_register(DEV_LINK_INTR_ENABLE),
+	dump_register(DEV_LINK_INTR),
+	dump_register(LTSSM_CTRL),
+	dump_register(USB3_CONFIG),
+	dump_register(LINK_STATE_MACHINE),
+	dump_register(LTSSM_INTR_ENABLE),
+	dump_register(LTSSM_INTR),
+	dump_register(U3U2_SWITCH_CTRL),
+	dump_register(POWER_MANAGEMENT),
+	dump_register(DEVICE_CONTROL),
+	dump_register(COMMON_USB_INTR_ENABLE),
+	dump_register(COMMON_USB_INTR),
+	dump_register(USB20_MISC_CONTROL),
+	dump_register(USB20_OPSTATE),
+};
+
+static int mtu3_link_state_show(struct seq_file *sf, void *unused)
+{
+	struct mtu3 *mtu = sf->private;
+	void __iomem *mbase = mtu->mac_base;
+
+	seq_printf(sf, "opstate: %#x, ltssm: %#x\n",
+		   mtu3_readl(mbase, U3D_USB20_OPSTATE),
+		   LTSSM_STATE(mtu3_readl(mbase, U3D_LINK_STATE_MACHINE)));
+
+	return 0;
+}
+
+static int mtu3_ep_used_show(struct seq_file *sf, void *unused)
+{
+	struct mtu3 *mtu = sf->private;
+	struct mtu3_ep *mep;
+	unsigned long flags;
+	int used = 0;
+	int i;
+
+	spin_lock_irqsave(&mtu->lock, flags);
+
+	for (i = 0; i < mtu->num_eps; i++) {
+		mep = mtu->in_eps + i;
+		if (mep->flags & MTU3_EP_ENABLED) {
+			seq_printf(sf, "%s - type: %d\n", mep->name, mep->type);
+			used++;
+		}
+
+		mep = mtu->out_eps + i;
+		if (mep->flags & MTU3_EP_ENABLED) {
+			seq_printf(sf, "%s - type: %d\n", mep->name, mep->type);
+			used++;
+		}
+	}
+	seq_printf(sf, "total used: %d eps\n", used);
+
+	spin_unlock_irqrestore(&mtu->lock, flags);
+
+	return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mtu3_link_state);
+DEFINE_SHOW_ATTRIBUTE(mtu3_ep_used);
+
+static void mtu3_debugfs_regset(struct mtu3 *mtu, void __iomem *base,
+				const struct debugfs_reg32 *regs, size_t nregs,
+				const char *name, struct dentry *parent)
+{
+	struct debugfs_regset32 *regset;
+	struct mtu3_regset *mregs;
+
+	mregs = devm_kzalloc(mtu->dev, sizeof(*regset), GFP_KERNEL);
+	if (!mregs)
+		return;
+
+	sprintf(mregs->name, "%s", name);
+	regset = &mregs->regset;
+	regset->regs = regs;
+	regset->nregs = nregs;
+	regset->base = base;
+
+	debugfs_create_regset32(mregs->name, 0444, parent, regset);
+}
+
+static void mtu3_debugfs_ep_regset(struct mtu3 *mtu, struct mtu3_ep *mep,
+				   struct dentry *parent)
+{
+	struct debugfs_reg32 *regs;
+	int epnum = mep->epnum;
+	int in = mep->is_in;
+
+	regs = devm_kcalloc(mtu->dev, 7, sizeof(*regs), GFP_KERNEL);
+	if (!regs)
+		return;
+
+	regs[0].name = in ? "TCR0" : "RCR0";
+	regs[0].offset = in ? MU3D_EP_TXCR0(epnum) : MU3D_EP_RXCR0(epnum);
+	regs[1].name = in ? "TCR1" : "RCR1";
+	regs[1].offset = in ? MU3D_EP_TXCR1(epnum) : MU3D_EP_RXCR1(epnum);
+	regs[2].name = in ? "TCR2" : "RCR2";
+	regs[2].offset = in ? MU3D_EP_TXCR2(epnum) : MU3D_EP_RXCR2(epnum);
+	regs[3].name = in ? "TQHIAR" : "RQHIAR";
+	regs[3].offset = in ? USB_QMU_TQHIAR(epnum) : USB_QMU_RQHIAR(epnum);
+	regs[4].name = in ? "TQCSR" : "RQCSR";
+	regs[4].offset = in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
+	regs[5].name = in ? "TQSAR" : "RQSAR";
+	regs[5].offset = in ? USB_QMU_TQSAR(epnum) : USB_QMU_RQSAR(epnum);
+	regs[6].name = in ? "TQCPR" : "RQCPR";
+	regs[6].offset = in ? USB_QMU_TQCPR(epnum) : USB_QMU_RQCPR(epnum);
+
+	mtu3_debugfs_regset(mtu, mtu->mac_base, regs, 7, "ep-regs", parent);
+}
+
+static int mtu3_ep_info_show(struct seq_file *sf, void *unused)
+{
+	struct mtu3_ep *mep = sf->private;
+	struct mtu3 *mtu = mep->mtu;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mtu->lock, flags);
+	seq_printf(sf, "ep - type:%d, maxp:%d, slot:%d, flags:%x\n",
+		   mep->type, mep->maxp, mep->slot, mep->flags);
+	spin_unlock_irqrestore(&mtu->lock, flags);
+
+	return 0;
+}
+
+static int mtu3_fifo_show(struct seq_file *sf, void *unused)
+{
+	struct mtu3_ep *mep = sf->private;
+	struct mtu3 *mtu = mep->mtu;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mtu->lock, flags);
+	seq_printf(sf, "fifo - seg_size:%d, addr:%d, size:%d\n",
+		   mep->fifo_seg_size, mep->fifo_addr, mep->fifo_size);
+	spin_unlock_irqrestore(&mtu->lock, flags);
+
+	return 0;
+}
+
+static int mtu3_qmu_ring_show(struct seq_file *sf, void *unused)
+{
+	struct mtu3_ep *mep = sf->private;
+	struct mtu3 *mtu = mep->mtu;
+	struct mtu3_gpd_ring *ring;
+	unsigned long flags;
+
+	ring = &mep->gpd_ring;
+	spin_lock_irqsave(&mtu->lock, flags);
+	seq_printf(sf,
+		   "qmu-ring - dma:%pad, start:%p, end:%p, enq:%p, dep:%p\n",
+		   &ring->dma, ring->start, ring->end,
+		   ring->enqueue, ring->dequeue);
+	spin_unlock_irqrestore(&mtu->lock, flags);
+
+	return 0;
+}
+
+static int mtu3_qmu_gpd_show(struct seq_file *sf, void *unused)
+{
+	struct mtu3_ep *mep = sf->private;
+	struct mtu3 *mtu = mep->mtu;
+	struct mtu3_gpd_ring *ring;
+	struct qmu_gpd *gpd;
+	dma_addr_t dma;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&mtu->lock, flags);
+	ring = &mep->gpd_ring;
+	gpd = ring->start;
+	if (!gpd || !(mep->flags & MTU3_EP_ENABLED)) {
+		seq_puts(sf, "empty!\n");
+		goto out;
+	}
+
+	for (i = 0; i < MAX_GPD_NUM; i++, gpd++) {
+		dma = ring->dma + i * sizeof(*gpd);
+		seq_printf(sf, "gpd.%03d -> %pad, %p: %08x %08x %08x %08x\n",
+			   i, &dma, gpd, gpd->dw0_info, gpd->next_gpd,
+			   gpd->buffer, gpd->dw3_info);
+	}
+
+out:
+	spin_unlock_irqrestore(&mtu->lock, flags);
+
+	return 0;
+}
+
+static const struct mtu3_file_map mtu3_ep_files[] = {
+	{"ep-info", mtu3_ep_info_show, },
+	{"fifo", mtu3_fifo_show, },
+	{"qmu-ring", mtu3_qmu_ring_show, },
+	{"qmu-gpd", mtu3_qmu_gpd_show, },
+};
+
+static int mtu3_ep_open(struct inode *inode, struct file *file)
+{
+	const char *file_name = file_dentry(file)->d_iname;
+	const struct mtu3_file_map *f_map;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mtu3_ep_files); i++) {
+		f_map = &mtu3_ep_files[i];
+
+		if (strcmp(f_map->name, file_name) == 0)
+			break;
+	}
+
+	return single_open(file, f_map->show, inode->i_private);
+}
+
+static const struct file_operations mtu3_ep_fops = {
+	.open = mtu3_ep_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct debugfs_reg32 mtu3_prb_regs[] = {
+	dump_prb_reg("enable", U3D_SSUSB_PRB_CTRL0),
+	dump_prb_reg("byte-sell", U3D_SSUSB_PRB_CTRL1),
+	dump_prb_reg("byte-selh", U3D_SSUSB_PRB_CTRL2),
+	dump_prb_reg("module-sel", U3D_SSUSB_PRB_CTRL3),
+	dump_prb_reg("sw-out", U3D_SSUSB_PRB_CTRL4),
+	dump_prb_reg("data", U3D_SSUSB_PRB_CTRL5),
+};
+
+static int mtu3_probe_show(struct seq_file *sf, void *unused)
+{
+	const char *file_name = file_dentry(sf->file)->d_iname;
+	struct mtu3 *mtu = sf->private;
+	const struct debugfs_reg32 *regs;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mtu3_prb_regs); i++) {
+		regs = &mtu3_prb_regs[i];
+
+		if (strcmp(regs->name, file_name) == 0)
+			break;
+	}
+
+	seq_printf(sf, "0x%04x - 0x%08x\n", (u32)regs->offset,
+		   mtu3_readl(mtu->ippc_base, (u32)regs->offset));
+
+	return 0;
+}
+
+static int mtu3_probe_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mtu3_probe_show, inode->i_private);
+}
+
+static ssize_t mtu3_probe_write(struct file *file, const char __user *ubuf,
+				size_t count, loff_t *ppos)
+{
+	const char *file_name = file_dentry(file)->d_iname;
+	struct seq_file *sf = file->private_data;
+	struct mtu3 *mtu = sf->private;
+	const struct debugfs_reg32 *regs;
+	char buf[32];
+	u32 val;
+	int i;
+
+	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+		return -EFAULT;
+
+	if (kstrtou32(buf, 0, &val))
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(mtu3_prb_regs); i++) {
+		regs = &mtu3_prb_regs[i];
+
+		if (strcmp(regs->name, file_name) == 0)
+			break;
+	}
+	mtu3_writel(mtu->ippc_base, (u32)regs->offset, val);
+
+	return count;
+}
+
+static const struct file_operations mtu3_probe_fops = {
+	.open = mtu3_probe_open,
+	.write = mtu3_probe_write,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static void mtu3_debugfs_create_prb_files(struct mtu3 *mtu)
+{
+	struct ssusb_mtk *ssusb = mtu->ssusb;
+	struct debugfs_reg32 *regs;
+	struct dentry *dir_prb;
+	int i;
+
+	dir_prb = debugfs_create_dir("probe", ssusb->dbgfs_root);
+
+	for (i = 0; i < ARRAY_SIZE(mtu3_prb_regs); i++) {
+		regs = &mtu3_prb_regs[i];
+		debugfs_create_file(regs->name, 0644, dir_prb,
+				    mtu, &mtu3_probe_fops);
+	}
+
+	mtu3_debugfs_regset(mtu, mtu->ippc_base, mtu3_prb_regs,
+			    ARRAY_SIZE(mtu3_prb_regs), "regs", dir_prb);
+}
+
+static void mtu3_debugfs_create_ep_dir(struct mtu3_ep *mep,
+				       struct dentry *parent)
+{
+	const struct mtu3_file_map *files;
+	struct dentry *dir_ep;
+	int i;
+
+	dir_ep = debugfs_create_dir(mep->name, parent);
+	mtu3_debugfs_ep_regset(mep->mtu, mep, dir_ep);
+
+	for (i = 0; i < ARRAY_SIZE(mtu3_ep_files); i++) {
+		files = &mtu3_ep_files[i];
+
+		debugfs_create_file(files->name, 0444, dir_ep,
+				    mep, &mtu3_ep_fops);
+	}
+}
+
+static void mtu3_debugfs_create_ep_dirs(struct mtu3 *mtu)
+{
+	struct ssusb_mtk *ssusb = mtu->ssusb;
+	struct dentry *dir_eps;
+	int i;
+
+	dir_eps = debugfs_create_dir("eps", ssusb->dbgfs_root);
+
+	for (i = 1; i < mtu->num_eps; i++) {
+		mtu3_debugfs_create_ep_dir(mtu->in_eps + i, dir_eps);
+		mtu3_debugfs_create_ep_dir(mtu->out_eps + i, dir_eps);
+	}
+}
+
+void ssusb_dev_debugfs_init(struct ssusb_mtk *ssusb)
+{
+	struct mtu3 *mtu = ssusb->u3d;
+	struct dentry *dir_regs;
+
+	dir_regs = debugfs_create_dir("regs", ssusb->dbgfs_root);
+
+	mtu3_debugfs_regset(mtu, mtu->ippc_base,
+			    mtu3_ippc_regs, ARRAY_SIZE(mtu3_ippc_regs),
+			    "reg-ippc", dir_regs);
+
+	mtu3_debugfs_regset(mtu, mtu->mac_base,
+			    mtu3_dev_regs, ARRAY_SIZE(mtu3_dev_regs),
+			    "reg-dev", dir_regs);
+
+	mtu3_debugfs_regset(mtu, mtu->mac_base,
+			    mtu3_csr_regs, ARRAY_SIZE(mtu3_csr_regs),
+			    "reg-csr", dir_regs);
+
+	mtu3_debugfs_create_ep_dirs(mtu);
+
+	mtu3_debugfs_create_prb_files(mtu);
+
+	debugfs_create_file("link-state", 0444, ssusb->dbgfs_root,
+			    mtu, &mtu3_link_state_fops);
+	debugfs_create_file("ep-used", 0444, ssusb->dbgfs_root,
+			    mtu, &mtu3_ep_used_fops);
+}
+
+static int ssusb_mode_show(struct seq_file *sf, void *unused)
+{
+	struct ssusb_mtk *ssusb = sf->private;
+
+	seq_printf(sf, "current mode: %s(%s drd)\n(echo device/host)\n",
+		   ssusb->is_host ? "host" : "device",
+		   ssusb->otg_switch.manual_drd_enabled ? "manual" : "auto");
+
+	return 0;
+}
+
+static int ssusb_mode_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ssusb_mode_show, inode->i_private);
+}
+
+static ssize_t ssusb_mode_write(struct file *file, const char __user *ubuf,
+				size_t count, loff_t *ppos)
+{
+	struct seq_file *sf = file->private_data;
+	struct ssusb_mtk *ssusb = sf->private;
+	char buf[16];
+
+	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+		return -EFAULT;
+
+	if (!strncmp(buf, "host", 4) && !ssusb->is_host) {
+		ssusb_mode_switch(ssusb, 1);
+	} else if (!strncmp(buf, "device", 6) && ssusb->is_host) {
+		ssusb_mode_switch(ssusb, 0);
+	} else {
+		dev_err(ssusb->dev, "wrong or duplicated setting\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static const struct file_operations ssusb_mode_fops = {
+	.open = ssusb_mode_open,
+	.write = ssusb_mode_write,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int ssusb_vbus_show(struct seq_file *sf, void *unused)
+{
+	struct ssusb_mtk *ssusb = sf->private;
+	struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+
+	seq_printf(sf, "vbus state: %s\n(echo on/off)\n",
+		   regulator_is_enabled(otg_sx->vbus) ? "on" : "off");
+
+	return 0;
+}
+
+static int ssusb_vbus_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ssusb_vbus_show, inode->i_private);
+}
+
+static ssize_t ssusb_vbus_write(struct file *file, const char __user *ubuf,
+				size_t count, loff_t *ppos)
+{
+	struct seq_file *sf = file->private_data;
+	struct ssusb_mtk *ssusb = sf->private;
+	struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+	char buf[16];
+	bool enable;
+
+	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+		return -EFAULT;
+
+	if (kstrtobool(buf, &enable)) {
+		dev_err(ssusb->dev, "wrong setting\n");
+		return -EINVAL;
+	}
+
+	ssusb_set_vbus(otg_sx, enable);
+
+	return count;
+}
+
+static const struct file_operations ssusb_vbus_fops = {
+	.open = ssusb_vbus_open,
+	.write = ssusb_vbus_write,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+void ssusb_dr_debugfs_init(struct ssusb_mtk *ssusb)
+{
+	struct dentry *root = ssusb->dbgfs_root;
+
+	debugfs_create_file("mode", 0644, root, ssusb, &ssusb_mode_fops);
+	debugfs_create_file("vbus", 0644, root, ssusb, &ssusb_vbus_fops);
+}
+
+void ssusb_debugfs_create_root(struct ssusb_mtk *ssusb)
+{
+	ssusb->dbgfs_root =
+		debugfs_create_dir(dev_name(ssusb->dev), usb_debug_root);
+}
+
+void ssusb_debugfs_remove_root(struct ssusb_mtk *ssusb)
+{
+	debugfs_remove_recursive(ssusb->dbgfs_root);
+	ssusb->dbgfs_root = NULL;
+}
diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c
index ac60e9c..1753bf9 100644
--- a/drivers/usb/mtu3/mtu3_dr.c
+++ b/drivers/usb/mtu3/mtu3_dr.c
@@ -7,7 +7,7 @@
  * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
  */
 
-#include <linux/debugfs.h>
+#include <linux/usb/role.h>
 #include <linux/irq.h>
 #include <linux/kernel.h>
 #include <linux/of_device.h>
@@ -17,6 +17,7 @@
 
 #include "mtu3.h"
 #include "mtu3_dr.h"
+#include "mtu3_debug.h"
 
 #define USB2_PORT 2
 #define USB3_PORT 3
@@ -148,6 +149,7 @@
 	struct mtu3 *mtu = ssusb->u3d;
 
 	dev_dbg(ssusb->dev, "mailbox state(%d)\n", status);
+	mtu3_dbg_trace(ssusb->dev, "mailbox %d", status);
 
 	switch (status) {
 	case MTU3_ID_GROUND:
@@ -163,8 +165,10 @@
 	case MTU3_VBUS_OFF:
 		mtu3_stop(mtu);
 		pm_relax(ssusb->dev);
+		ssusb_set_force_vbus(ssusb, false);
 		break;
 	case MTU3_VBUS_VALID:
+		ssusb_set_force_vbus(ssusb, true);
 		/* avoid suspend when works as device */
 		pm_stay_awake(ssusb->dev);
 		mtu3_start(mtu);
@@ -266,7 +270,7 @@
  * This is useful in special cases, such as uses TYPE-A receptacle but also
  * wants to support dual-role mode.
  */
-static void ssusb_mode_manual_switch(struct ssusb_mtk *ssusb, int to_host)
+void ssusb_mode_switch(struct ssusb_mtk *ssusb, int to_host)
 {
 	struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
 
@@ -308,9 +312,9 @@
 		return -EFAULT;
 
 	if (!strncmp(buf, "host", 4) && !ssusb->is_host) {
-		ssusb_mode_manual_switch(ssusb, 1);
+		ssusb_mode_switch(ssusb, 1);
 	} else if (!strncmp(buf, "device", 6) && ssusb->is_host) {
-		ssusb_mode_manual_switch(ssusb, 0);
+		ssusb_mode_switch(ssusb, 0);
 	} else {
 		dev_err(ssusb->dev, "wrong or duplicated setting\n");
 		return -EINVAL;
@@ -373,22 +377,6 @@
 	.release = single_release,
 };
 
-static void ssusb_debugfs_init(struct ssusb_mtk *ssusb)
-{
-	struct dentry *root;
-
-	root = debugfs_create_dir(dev_name(ssusb->dev), usb_debug_root);
-	ssusb->dbgfs_root = root;
-
-	debugfs_create_file("mode", 0644, root, ssusb, &ssusb_mode_fops);
-	debugfs_create_file("vbus", 0644, root, ssusb, &ssusb_vbus_fops);
-}
-
-static void ssusb_debugfs_exit(struct ssusb_mtk *ssusb)
-{
-	debugfs_remove_recursive(ssusb->dbgfs_root);
-}
-
 void ssusb_set_force_mode(struct ssusb_mtk *ssusb,
 			  enum mtu3_dr_force_mode mode)
 {
@@ -412,6 +400,47 @@
 	mtu3_writel(ssusb->ippc_base, SSUSB_U2_CTRL(0), value);
 }
 
+static int ssusb_role_sw_set(struct device *dev, enum usb_role role)
+{
+	struct ssusb_mtk *ssusb = dev_get_drvdata(dev);
+	bool to_host = false;
+
+	if (role == USB_ROLE_HOST)
+		to_host = true;
+
+	if (to_host ^ ssusb->is_host)
+		ssusb_mode_switch(ssusb, to_host);
+
+	return 0;
+}
+
+static enum usb_role ssusb_role_sw_get(struct device *dev)
+{
+	struct ssusb_mtk *ssusb = dev_get_drvdata(dev);
+	enum usb_role role;
+
+	role = ssusb->is_host ? USB_ROLE_HOST : USB_ROLE_DEVICE;
+
+	return role;
+}
+
+static int ssusb_role_sw_register(struct otg_switch_mtk *otg_sx)
+{
+	struct usb_role_switch_desc role_sx_desc = { 0 };
+	struct ssusb_mtk *ssusb =
+		container_of(otg_sx, struct ssusb_mtk, otg_switch);
+
+	if (!otg_sx->role_sw_used)
+		return 0;
+
+	role_sx_desc.set = ssusb_role_sw_set;
+	role_sx_desc.get = ssusb_role_sw_get;
+	role_sx_desc.fwnode = dev_fwnode(ssusb->dev);
+	otg_sx->role_sw = usb_role_switch_register(ssusb->dev, &role_sx_desc);
+
+	return PTR_ERR_OR_ZERO(otg_sx->role_sw);
+}
+
 int ssusb_otg_switch_init(struct ssusb_mtk *ssusb)
 {
 	struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
@@ -420,7 +449,11 @@
 	INIT_WORK(&otg_sx->vbus_work, ssusb_vbus_work);
 
 	if (otg_sx->manual_drd_enabled)
-		ssusb_debugfs_init(ssusb);
+#ifdef CONFIG_DEBUG_FS
+		ssusb_dr_debugfs_init(ssusb);
+#endif
+	else if (otg_sx->role_sw_used)
+		ssusb_role_sw_register(otg_sx);
 	else
 		ssusb_extcon_register(otg_sx);
 
@@ -431,9 +464,7 @@
 {
 	struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
 
-	if (otg_sx->manual_drd_enabled)
-		ssusb_debugfs_exit(ssusb);
-
 	cancel_work_sync(&otg_sx->id_work);
 	cancel_work_sync(&otg_sx->vbus_work);
+	usb_role_switch_unregister(otg_sx->role_sw);
 }
diff --git a/drivers/usb/mtu3/mtu3_dr.h b/drivers/usb/mtu3/mtu3_dr.h
index 50702fd..5e58c4d 100644
--- a/drivers/usb/mtu3/mtu3_dr.h
+++ b/drivers/usb/mtu3/mtu3_dr.h
@@ -71,6 +71,7 @@
 #if IS_ENABLED(CONFIG_USB_MTU3_DUAL_ROLE)
 int ssusb_otg_switch_init(struct ssusb_mtk *ssusb);
 void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb);
+void ssusb_mode_switch(struct ssusb_mtk *ssusb, int to_host);
 int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on);
 void ssusb_set_force_mode(struct ssusb_mtk *ssusb,
 			  enum mtu3_dr_force_mode mode);
@@ -85,6 +86,9 @@
 static inline void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb)
 {}
 
+static inline void ssusb_mode_switch(struct ssusb_mtk *ssusb, int to_host)
+{}
+
 static inline int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on)
 {
 	return 0;
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
index bbcd333..f93732e 100644
--- a/drivers/usb/mtu3/mtu3_gadget.c
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -8,6 +8,7 @@
  */
 
 #include "mtu3.h"
+#include "mtu3_trace.h"
 
 void mtu3_req_complete(struct mtu3_ep *mep,
 		     struct usb_request *req, int status)
@@ -25,6 +26,8 @@
 
 	mtu = mreq->mtu;
 	mep->busy = 1;
+
+	trace_mtu3_req_complete(mreq);
 	spin_unlock(&mtu->lock);
 
 	/* ep0 makes use of PIO, needn't unmap it */
@@ -201,6 +204,7 @@
 	spin_unlock_irqrestore(&mtu->lock, flags);
 
 	dev_dbg(mtu->dev, "%s active_ep=%d\n", __func__, mtu->active_ep);
+	trace_mtu3_gadget_ep_enable(mep);
 
 	return ret;
 }
@@ -212,6 +216,7 @@
 	unsigned long flags;
 
 	dev_dbg(mtu->dev, "%s %s\n", __func__, mep->name);
+	trace_mtu3_gadget_ep_disable(mep);
 
 	if (!(mep->flags & MTU3_EP_ENABLED)) {
 		dev_warn(mtu->dev, "%s is already disabled\n", mep->name);
@@ -242,13 +247,17 @@
 	mreq->request.dma = DMA_ADDR_INVALID;
 	mreq->epnum = mep->epnum;
 	mreq->mep = mep;
+	trace_mtu3_alloc_request(mreq);
 
 	return &mreq->request;
 }
 
 void mtu3_free_request(struct usb_ep *ep, struct usb_request *req)
 {
-	kfree(to_mtu3_request(req));
+	struct mtu3_request *mreq = to_mtu3_request(req);
+
+	trace_mtu3_free_request(mreq);
+	kfree(mreq);
 }
 
 static int mtu3_gadget_queue(struct usb_ep *ep,
@@ -278,10 +287,12 @@
 		__func__, mep->is_in ? "TX" : "RX", mreq->epnum, ep->name,
 		mreq, ep->maxpacket, mreq->request.length);
 
-	if (req->length > GPD_BUF_SIZE) {
+	if (req->length > GPD_BUF_SIZE ||
+	    (mtu->gen2cp && req->length > GPD_BUF_SIZE_EL)) {
 		dev_warn(mtu->dev,
 			"req length > supported MAX:%d requested:%d\n",
-			GPD_BUF_SIZE, req->length);
+			mtu->gen2cp ? GPD_BUF_SIZE_EL : GPD_BUF_SIZE,
+			req->length);
 		return -EOPNOTSUPP;
 	}
 
@@ -314,6 +325,7 @@
 
 error:
 	spin_unlock_irqrestore(&mtu->lock, flags);
+	trace_mtu3_gadget_queue(mreq);
 
 	return ret;
 }
@@ -331,6 +343,7 @@
 		return -EINVAL;
 
 	dev_dbg(mtu->dev, "%s : req=%p\n", __func__, req);
+	trace_mtu3_gadget_dequeue(mreq);
 
 	spin_lock_irqsave(&mtu->lock, flags);
 
@@ -401,6 +414,7 @@
 
 done:
 	spin_unlock_irqrestore(&mtu->lock, flags);
+	trace_mtu3_gadget_ep_set_halt(mep);
 
 	return ret;
 }
diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c
index 3c464d8..1247c43 100644
--- a/drivers/usb/mtu3/mtu3_gadget_ep0.c
+++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c
@@ -11,6 +11,8 @@
 #include <linux/usb/composite.h>
 
 #include "mtu3.h"
+#include "mtu3_debug.h"
+#include "mtu3_trace.h"
 
 /* ep0 is always mtu3->in_eps[0] */
 #define	next_ep0_request(mtu)	next_request((mtu)->ep0)
@@ -283,11 +285,24 @@
 		dev_dbg(mtu->dev, "TEST_PACKET\n");
 		mtu->test_mode_nr = TEST_PACKET_MODE;
 		break;
+	case OTG_SRP_REQD:
+		dev_dbg(mtu->dev, "OTG_SRP_REQD\n");
+		mtu->ssusb->otg_srp_reqd = 1;
+		break;
+	case OTG_HNP_REQD:
+		dev_dbg(mtu->dev, "OTG_HNP_REQD\n");
+		mtu->ssusb->otg_hnp_reqd = 1;
+		break;
 	default:
 		handled = -EINVAL;
 		goto out;
 	}
 
+	if (mtu->ssusb->otg_srp_reqd || mtu->ssusb->otg_hnp_reqd) {
+		mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+		goto out;
+	}
+
 	mtu->test_mode = true;
 
 	/* no TX completion interrupt, and need restart platform after test */
@@ -634,6 +649,7 @@
 	int handled = 0;
 
 	ep0_read_setup(mtu, &setup);
+	trace_mtu3_handle_setup(&setup);
 
 	if ((setup.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
 		handled = handle_standard_request(mtu, &setup);
@@ -692,9 +708,13 @@
 	mtu3_writel(mbase, U3D_EPISR, int_status); /* W1C */
 
 	/* only handle ep0's */
-	if (!(int_status & EP0ISR))
+	if (!(int_status & (EP0ISR | SETUPENDISR)))
 		return IRQ_NONE;
 
+	/* abort current SETUP, and process new one */
+	if (int_status & SETUPENDISR)
+		mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+
 	csr = mtu3_readl(mbase, U3D_EP0CSR);
 
 	dev_dbg(mtu->dev, "%s csr=0x%x\n", __func__, csr);
@@ -706,6 +726,7 @@
 		ret = IRQ_HANDLED;
 	}
 	dev_dbg(mtu->dev, "ep0_state: %s\n", decode_ep0_state(mtu));
+	mtu3_dbg_trace(mtu->dev, "ep0_state %s", decode_ep0_state(mtu));
 
 	switch (mtu->ep0_state) {
 	case MU3D_EP0_STATE_TX:
diff --git a/drivers/usb/mtu3/mtu3_host.c b/drivers/usb/mtu3/mtu3_host.c
index c871b94..7618a0c 100644
--- a/drivers/usb/mtu3/mtu3_host.c
+++ b/drivers/usb/mtu3/mtu3_host.c
@@ -205,6 +205,7 @@
 		ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_HOST);
 
 	/* if port0 supports dual-role, works as host mode by default */
+	ssusb_set_force_vbus(ssusb, false);
 	ssusb_set_vbus(&ssusb->otg_switch, 1);
 }
 
diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h
index a45bb25..8382d06 100644
--- a/drivers/usb/mtu3/mtu3_hw_regs.h
+++ b/drivers/usb/mtu3/mtu3_hw_regs.h
@@ -49,6 +49,7 @@
 #define U3D_QCR1		(SSUSB_DEV_BASE + 0x0404)
 #define U3D_QCR2		(SSUSB_DEV_BASE + 0x0408)
 #define U3D_QCR3		(SSUSB_DEV_BASE + 0x040C)
+#define U3D_QFCR		(SSUSB_DEV_BASE + 0x0428)
 #define U3D_TXQHIAR1		(SSUSB_DEV_BASE + 0x0484)
 #define U3D_RXQHIAR1		(SSUSB_DEV_BASE + 0x04C4)
 
@@ -104,6 +105,7 @@
 
 /* U3D_EPISR */
 #define EPRISR(x)		(BIT(16) << (x))
+#define SETUPENDISR		BIT(16)
 #define EPTISR(x)		(BIT(0) << (x))
 #define EP0ISR			BIT(0)
 
@@ -132,11 +134,23 @@
 #define TX_W1C_BITS		(~(TX_SENTSTALL))
 
 /* U3D_TX1CSR1 */
-#define TX_MULT(x)		(((x) & 0x3) << 22)
-#define TX_MAX_PKT(x)		(((x) & 0x3f) << 16)
+#define TX_MAX_PKT_G2(x)	(((x) & 0x7f) << 24)
+#define TX_MULT_G2(x)		(((x) & 0x7) << 21)
+#define TX_MULT_OG(x)		(((x) & 0x3) << 22)
+#define TX_MAX_PKT_OG(x)	(((x) & 0x3f) << 16)
 #define TX_SLOT(x)		(((x) & 0x3f) << 8)
 #define TX_TYPE(x)		(((x) & 0x3) << 4)
 #define TX_SS_BURST(x)		(((x) & 0xf) << 0)
+#define TX_MULT(g2c, x)		\
+({				\
+	typeof(x) x_ = (x);	\
+	(g2c) ? TX_MULT_G2(x_) : TX_MULT_OG(x_);	\
+})
+#define TX_MAX_PKT(g2c, x)	\
+({				\
+	typeof(x) x_ = (x);	\
+	(g2c) ? TX_MAX_PKT_G2(x_) : TX_MAX_PKT_OG(x_);	\
+})
 
 /* for TX_TYPE & RX_TYPE */
 #define TYPE_BULK		(0x0)
@@ -159,11 +173,23 @@
 #define RX_W1C_BITS		(~(RX_SENTSTALL | RX_RXPKTRDY))
 
 /* U3D_RX1CSR1 */
-#define RX_MULT(x)		(((x) & 0x3) << 22)
-#define RX_MAX_PKT(x)		(((x) & 0x3f) << 16)
+#define RX_MAX_PKT_G2(x)	(((x) & 0x7f) << 24)
+#define RX_MULT_G2(x)		(((x) & 0x7) << 21)
+#define RX_MULT_OG(x)		(((x) & 0x3) << 22)
+#define RX_MAX_PKT_OG(x)	(((x) & 0x3f) << 16)
 #define RX_SLOT(x)		(((x) & 0x3f) << 8)
 #define RX_TYPE(x)		(((x) & 0x3) << 4)
 #define RX_SS_BURST(x)		(((x) & 0xf) << 0)
+#define RX_MULT(g2c, x)		\
+({				\
+	typeof(x) x_ = (x);	\
+	(g2c) ? RX_MULT_G2(x_) : RX_MULT_OG(x_);	\
+})
+#define RX_MAX_PKT(g2c, x)	\
+({				\
+	typeof(x) x_ = (x);	\
+	(g2c) ? RX_MAX_PKT_G2(x_) : RX_MAX_PKT_OG(x_);	\
+})
 
 /* U3D_RX1CSR2 */
 #define RX_BINTERVAL(x)		(((x) & 0xff) << 24)
@@ -264,9 +290,12 @@
 #define U3D_LTSSM_CTRL		(SSUSB_USB3_MAC_CSR_BASE + 0x0010)
 #define U3D_USB3_CONFIG		(SSUSB_USB3_MAC_CSR_BASE + 0x001C)
 
+#define U3D_LINK_STATE_MACHINE	(SSUSB_USB3_MAC_CSR_BASE + 0x0134)
 #define U3D_LTSSM_INTR_ENABLE	(SSUSB_USB3_MAC_CSR_BASE + 0x013C)
 #define U3D_LTSSM_INTR		(SSUSB_USB3_MAC_CSR_BASE + 0x0140)
 
+#define U3D_U3U2_SWITCH_CTRL	(SSUSB_USB3_MAC_CSR_BASE + 0x0170)
+
 /*---------------- SSUSB_USB3_MAC_CSR FIELD DEFINITION ----------------*/
 
 /* U3D_LTSSM_CTRL */
@@ -279,6 +308,9 @@
 /* U3D_USB3_CONFIG */
 #define USB3_EN			BIT(0)
 
+/* U3D_LINK_STATE_MACHINE */
+#define LTSSM_STATE(x)	((x) & 0x1f)
+
 /* U3D_LTSSM_INTR_ENABLE */
 /* U3D_LTSSM_INTR */
 #define U3_RESUME_INTR		BIT(18)
@@ -301,6 +333,9 @@
 #define SS_DISABLE_INTR		BIT(1)
 #define SS_INACTIVE_INTR	BIT(0)
 
+/* U3D_U3U2_SWITCH_CTRL */
+#define SOFTCON_CLR_AUTO_EN	BIT(0)
+
 /*---------------- SSUSB_USB3_SYS_CSR REGISTER DEFINITION ----------------*/
 
 #define U3D_LINK_UX_INACT_TIMER	(SSUSB_USB3_SYS_CSR_BASE + 0x020C)
@@ -341,6 +376,7 @@
 #define U3D_USB20_FRAME_NUM		(SSUSB_USB2_CSR_BASE + 0x003C)
 #define U3D_USB20_LPM_PARAMETER		(SSUSB_USB2_CSR_BASE + 0x0044)
 #define U3D_USB20_MISC_CONTROL		(SSUSB_USB2_CSR_BASE + 0x004C)
+#define U3D_USB20_OPSTATE		(SSUSB_USB2_CSR_BASE + 0x0060)
 
 /*---------------- SSUSB_USB2_CSR FIELD DEFINITION ----------------*/
 
@@ -413,6 +449,13 @@
 #define U3D_SSUSB_DEV_RST_CTRL	(SSUSB_SIFSLV_IPPC_BASE + 0x0098)
 #define U3D_SSUSB_HW_ID		(SSUSB_SIFSLV_IPPC_BASE + 0x00A0)
 #define U3D_SSUSB_HW_SUB_ID	(SSUSB_SIFSLV_IPPC_BASE + 0x00A4)
+#define U3D_SSUSB_IP_TRUNK_VERS	(U3D_SSUSB_HW_SUB_ID)
+#define U3D_SSUSB_PRB_CTRL0	(SSUSB_SIFSLV_IPPC_BASE + 0x00B0)
+#define U3D_SSUSB_PRB_CTRL1	(SSUSB_SIFSLV_IPPC_BASE + 0x00B4)
+#define U3D_SSUSB_PRB_CTRL2	(SSUSB_SIFSLV_IPPC_BASE + 0x00B8)
+#define U3D_SSUSB_PRB_CTRL3	(SSUSB_SIFSLV_IPPC_BASE + 0x00BC)
+#define U3D_SSUSB_PRB_CTRL4	(SSUSB_SIFSLV_IPPC_BASE + 0x00C0)
+#define U3D_SSUSB_PRB_CTRL5	(SSUSB_SIFSLV_IPPC_BASE + 0x00C4)
 #define U3D_SSUSB_IP_SPARE0	(SSUSB_SIFSLV_IPPC_BASE + 0x00C8)
 
 /*---------------- SSUSB_SIFSLV_IPPC FIELD DEFINITION ----------------*/
@@ -477,4 +520,7 @@
 /* U3D_SSUSB_DEV_RST_CTRL */
 #define SSUSB_DEV_SW_RST		BIT(0)
 
+/* U3D_SSUSB_IP_TRUNK_VERS */
+#define IP_TRUNK_VERS(x)		(((x) >> 16) & 0xffff)
+
 #endif	/* _SSUSB_HW_REGS_H_ */
diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c
index 46551f6..ca6c2af 100644
--- a/drivers/usb/mtu3/mtu3_plat.c
+++ b/drivers/usb/mtu3/mtu3_plat.c
@@ -16,6 +16,7 @@
 
 #include "mtu3.h"
 #include "mtu3_dr.h"
+#include "mtu3_debug.h"
 
 /* u2-port0 should be powered on and enabled; */
 int ssusb_check_clocks(struct ssusb_mtk *ssusb, u32 ex_clks)
@@ -44,6 +45,30 @@
 	return 0;
 }
 
+void ssusb_set_force_vbus(struct ssusb_mtk *ssusb, bool vbus_on)
+{
+	u32 u2ctl;
+	u32 misc;
+
+	if (!ssusb->force_vbus)
+		return;
+
+	u2ctl = mtu3_readl(ssusb->ippc_base, SSUSB_U2_CTRL(0));
+	misc = mtu3_readl(ssusb->mac_base, U3D_MISC_CTRL);
+	if (vbus_on) {
+		u2ctl &= ~SSUSB_U2_PORT_OTG_SEL;
+		misc |= VBUS_FRC_EN | VBUS_ON;
+	} else {
+		/* FIXME The following commented line of code
+		 * is causing the crash of the host controller
+		 */
+		//u2ctl |= SSUSB_U2_PORT_OTG_SEL;
+		misc &= ~(VBUS_FRC_EN | VBUS_ON);
+	}
+	mtu3_writel(ssusb->ippc_base, SSUSB_U2_CTRL(0), u2ctl);
+	mtu3_writel(ssusb->mac_base, U3D_MISC_CTRL, misc);
+}
+
 static int ssusb_phy_init(struct ssusb_mtk *ssusb)
 {
 	int i;
@@ -225,7 +250,7 @@
 	int i;
 	int ret;
 
-	ssusb->vusb33 = devm_regulator_get(&pdev->dev, "vusb33");
+	ssusb->vusb33 = devm_regulator_get(dev, "vusb33");
 	if (IS_ERR(ssusb->vusb33)) {
 		dev_err(dev, "failed to get vusb33\n");
 		return PTR_ERR(ssusb->vusb33);
@@ -273,6 +298,8 @@
 	if (IS_ERR(ssusb->ippc_base))
 		return PTR_ERR(ssusb->ippc_base);
 
+	ssusb->force_vbus = of_property_read_bool(node, "mediatek,force-vbus");
+
 	ssusb->dr_mode = usb_get_dr_mode(dev);
 	if (ssusb->dr_mode == USB_DR_MODE_UNKNOWN)
 		ssusb->dr_mode = USB_DR_MODE_OTG;
@@ -305,8 +332,9 @@
 	otg_sx->is_u3_drd = of_property_read_bool(node, "mediatek,usb3-drd");
 	otg_sx->manual_drd_enabled =
 		of_property_read_bool(node, "enable-manual-drd");
+	otg_sx->role_sw_used = of_property_read_bool(node, "usb-role-switch");
 
-	if (of_property_read_bool(node, "extcon")) {
+	if (!otg_sx->role_sw_used && of_property_read_bool(node, "extcon")) {
 		otg_sx->edev = extcon_get_edev_by_phandle(ssusb->dev, 0);
 		if (IS_ERR(otg_sx->edev)) {
 			dev_err(ssusb->dev, "couldn't get extcon device\n");
@@ -346,6 +374,8 @@
 	if (ret)
 		return ret;
 
+	ssusb_debugfs_create_root(ssusb);
+
 	/* enable power domain */
 	pm_runtime_enable(dev);
 	pm_runtime_get_sync(dev);
@@ -410,6 +440,7 @@
 comm_init_err:
 	pm_runtime_put_sync(dev);
 	pm_runtime_disable(dev);
+	ssusb_debugfs_remove_root(ssusb);
 
 	return ret;
 }
@@ -437,6 +468,7 @@
 	ssusb_rscs_exit(ssusb);
 	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
+	ssusb_debugfs_remove_root(ssusb);
 
 	return 0;
 }
diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
index 326b407..4bdc8d3 100644
--- a/drivers/usb/mtu3/mtu3_qmu.c
+++ b/drivers/usb/mtu3/mtu3_qmu.c
@@ -22,17 +22,49 @@
 #include <linux/iopoll.h>
 
 #include "mtu3.h"
+#include "mtu3_trace.h"
 
 #define QMU_CHECKSUM_LEN	16
 
 #define GPD_FLAGS_HWO	BIT(0)
 #define GPD_FLAGS_BDP	BIT(1)
 #define GPD_FLAGS_BPS	BIT(2)
+#define GPD_FLAGS_ZLP	BIT(6)
 #define GPD_FLAGS_IOC	BIT(7)
+#define GET_GPD_HWO(gpd)	(le32_to_cpu((gpd)->dw0_info) & GPD_FLAGS_HWO)
 
-#define GPD_EXT_FLAG_ZLP	BIT(5)
-#define GPD_EXT_NGP(x)		(((x) & 0xf) << 4)
-#define GPD_EXT_BUF(x)		(((x) & 0xf) << 0)
+#define GPD_RX_BUF_LEN_OG(x)	(((x) & 0xffff) << 16)
+#define GPD_RX_BUF_LEN_EL(x)	(((x) & 0xfffff) << 12)
+#define GPD_RX_BUF_LEN(mtu, x)	\
+({				\
+	typeof(x) x_ = (x);	\
+	((mtu)->gen2cp) ? GPD_RX_BUF_LEN_EL(x_) : GPD_RX_BUF_LEN_OG(x_); \
+})
+
+#define GPD_DATA_LEN_OG(x)	((x) & 0xffff)
+#define GPD_DATA_LEN_EL(x)	((x) & 0xfffff)
+#define GPD_DATA_LEN(mtu, x)	\
+({				\
+	typeof(x) x_ = (x);	\
+	((mtu)->gen2cp) ? GPD_DATA_LEN_EL(x_) : GPD_DATA_LEN_OG(x_); \
+})
+
+#define GPD_EXT_FLAG_ZLP	BIT(29)
+#define GPD_EXT_NGP_OG(x)	(((x) & 0xf) << 20)
+#define GPD_EXT_BUF_OG(x)	(((x) & 0xf) << 16)
+#define GPD_EXT_NGP_EL(x)	(((x) & 0xf) << 28)
+#define GPD_EXT_BUF_EL(x)	(((x) & 0xf) << 24)
+#define GPD_EXT_NGP(mtu, x)	\
+({				\
+	typeof(x) x_ = (x);	\
+	((mtu)->gen2cp) ? GPD_EXT_NGP_EL(x_) : GPD_EXT_NGP_OG(x_); \
+})
+
+#define GPD_EXT_BUF(mtu, x)	\
+({				\
+	typeof(x) x_ = (x);	\
+	((mtu)->gen2cp) ? GPD_EXT_BUF_EL(x_) : GPD_EXT_BUF_OG(x_); \
+})
 
 #define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo))
 #define HILO_DMA(hi, lo)	\
@@ -125,7 +157,7 @@
 	struct qmu_gpd *gpd = ring->start;
 
 	if (gpd) {
-		gpd->flag &= ~GPD_FLAGS_HWO;
+		gpd->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
 		gpd_ring_init(ring, gpd);
 	}
 }
@@ -154,27 +186,6 @@
 	memset(ring, 0, sizeof(*ring));
 }
 
-/*
- * calculate check sum of a gpd or bd
- * add "noinline" and "mb" to prevent wrong calculation
- */
-static noinline u8 qmu_calc_checksum(u8 *data)
-{
-	u8 chksum = 0;
-	int i;
-
-	data[1] = 0x0;  /* set checksum to 0 */
-
-	mb();	/* ensure the gpd/bd is really up-to-date */
-	for (i = 0; i < QMU_CHECKSUM_LEN; i++)
-		chksum += data[i];
-
-	/* Default: HWO=1, @flag[bit0] */
-	chksum += 1;
-
-	return 0xFF - chksum;
-}
-
 void mtu3_qmu_resume(struct mtu3_ep *mep)
 {
 	struct mtu3 *mtu = mep->mtu;
@@ -235,16 +246,14 @@
 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 	struct qmu_gpd *gpd = ring->enqueue;
 	struct usb_request *req = &mreq->request;
+	struct mtu3 *mtu = mep->mtu;
 	dma_addr_t enq_dma;
-	u16 ext_addr;
+	u32 ext_addr;
 
-	/* set all fields to zero as default value */
-	memset(gpd, 0, sizeof(*gpd));
-
+	gpd->dw0_info = 0;	/* SW own it */
 	gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
-	ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma));
-	gpd->buf_len = cpu_to_le16(req->length);
-	gpd->flag |= GPD_FLAGS_IOC;
+	ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
+	gpd->dw3_info = cpu_to_le32(GPD_DATA_LEN(mtu, req->length));
 
 	/* get the next GPD */
 	enq = advance_enq_gpd(ring);
@@ -252,18 +261,22 @@
 	dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
 		mep->epnum, gpd, enq, &enq_dma);
 
-	enq->flag &= ~GPD_FLAGS_HWO;
+	enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
 	gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
-	ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma));
-	gpd->tx_ext_addr = cpu_to_le16(ext_addr);
+	ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
+	gpd->dw0_info = cpu_to_le32(ext_addr);
 
-	if (req->zero)
-		gpd->ext_flag |= GPD_EXT_FLAG_ZLP;
+	if (req->zero) {
+		if (mtu->gen2cp)
+			gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_ZLP);
+		else
+			gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP);
+	}
 
-	gpd->chksum = qmu_calc_checksum((u8 *)gpd);
-	gpd->flag |= GPD_FLAGS_HWO;
+	gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
 
 	mreq->gpd = gpd;
+	trace_mtu3_prepare_gpd(mep, gpd);
 
 	return 0;
 }
@@ -274,16 +287,14 @@
 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 	struct qmu_gpd *gpd = ring->enqueue;
 	struct usb_request *req = &mreq->request;
+	struct mtu3 *mtu = mep->mtu;
 	dma_addr_t enq_dma;
-	u16 ext_addr;
+	u32 ext_addr;
 
-	/* set all fields to zero as default value */
-	memset(gpd, 0, sizeof(*gpd));
-
+	gpd->dw0_info = 0;	/* SW own it */
 	gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
-	ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma));
-	gpd->data_buf_len = cpu_to_le16(req->length);
-	gpd->flag |= GPD_FLAGS_IOC;
+	ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
+	gpd->dw0_info = cpu_to_le32(GPD_RX_BUF_LEN(mtu, req->length));
 
 	/* get the next GPD */
 	enq = advance_enq_gpd(ring);
@@ -291,14 +302,14 @@
 	dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
 		mep->epnum, gpd, enq, &enq_dma);
 
-	enq->flag &= ~GPD_FLAGS_HWO;
+	enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
 	gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
-	ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma));
-	gpd->rx_ext_addr = cpu_to_le16(ext_addr);
-	gpd->chksum = qmu_calc_checksum((u8 *)gpd);
-	gpd->flag |= GPD_FLAGS_HWO;
+	ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
+	gpd->dw3_info = cpu_to_le32(ext_addr);
+	gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
 
 	mreq->gpd = gpd;
+	trace_mtu3_prepare_gpd(mep, gpd);
 
 	return 0;
 }
@@ -323,7 +334,6 @@
 		/* set QMU start address */
 		write_txq_start_addr(mbase, epnum, ring->dma);
 		mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_DMAREQEN);
-		mtu3_setbits(mbase, U3D_QCR0, QMU_TX_CS_EN(epnum));
 		/* send zero length packet according to ZLP flag in GPD */
 		mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum));
 		mtu3_writel(mbase, U3D_TQERRIESR0,
@@ -338,7 +348,6 @@
 	} else {
 		write_rxq_start_addr(mbase, epnum, ring->dma);
 		mtu3_setbits(mbase, MU3D_EP_RXCR0(epnum), RX_DMAREQEN);
-		mtu3_setbits(mbase, U3D_QCR0, QMU_RX_CS_EN(epnum));
 		/* don't expect ZLP */
 		mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum));
 		/* move to next GPD when receive ZLP */
@@ -422,12 +431,13 @@
 	cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
 	gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
 
-	if (le16_to_cpu(gpd_current->buf_len) != 0) {
+	if (GPD_DATA_LEN(mtu, le32_to_cpu(gpd_current->dw3_info)) != 0) {
 		dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum);
 		return;
 	}
 
 	dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, req);
+	trace_mtu3_zlp_exp_gpd(mep, gpd_current);
 
 	mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
 
@@ -440,9 +450,7 @@
 	mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
 
 	/* by pass the current GDP */
-	gpd_current->flag |= GPD_FLAGS_BPS;
-	gpd_current->chksum = qmu_calc_checksum((u8 *)gpd_current);
-	gpd_current->flag |= GPD_FLAGS_HWO;
+	gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO);
 
 	/*enable DMAREQEN, switch back to QMU mode */
 	mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
@@ -474,7 +482,7 @@
 	dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
 		__func__, epnum, gpd, gpd_current, ring->enqueue);
 
-	while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
+	while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
 
 		mreq = next_request(mep);
 
@@ -484,7 +492,8 @@
 		}
 
 		request = &mreq->request;
-		request->actual = le16_to_cpu(gpd->buf_len);
+		request->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info));
+		trace_mtu3_complete_gpd(mep, gpd);
 		mtu3_req_complete(mep, request, 0);
 
 		gpd = advance_deq_gpd(ring);
@@ -512,7 +521,7 @@
 	dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
 		__func__, epnum, gpd, gpd_current, ring->enqueue);
 
-	while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
+	while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
 
 		mreq = next_request(mep);
 
@@ -522,7 +531,8 @@
 		}
 		req = &mreq->request;
 
-		req->actual = le16_to_cpu(gpd->buf_len);
+		req->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info));
+		trace_mtu3_complete_gpd(mep, gpd);
 		mtu3_req_complete(mep, req, 0);
 
 		gpd = advance_deq_gpd(ring);
@@ -600,6 +610,7 @@
 	dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
 		(qmu_done_status & 0xFFFF), qmu_done_status >> 16,
 		qmu_status);
+	trace_mtu3_qmu_isr(qmu_done_status, qmu_status);
 
 	if (qmu_done_status)
 		qmu_done_isr(mtu, qmu_done_status);
diff --git a/drivers/usb/mtu3/mtu3_qmu.h b/drivers/usb/mtu3/mtu3_qmu.h
index 81f5151..9cfde201 100644
--- a/drivers/usb/mtu3/mtu3_qmu.h
+++ b/drivers/usb/mtu3/mtu3_qmu.h
@@ -15,6 +15,7 @@
 #define QMU_GPD_RING_SIZE	(MAX_GPD_NUM * QMU_GPD_SIZE)
 
 #define GPD_BUF_SIZE		65532
+#define GPD_BUF_SIZE_EL		1048572
 
 void mtu3_qmu_stop(struct mtu3_ep *mep);
 int mtu3_qmu_start(struct mtu3_ep *mep);
diff --git a/drivers/usb/mtu3/mtu3_trace.c b/drivers/usb/mtu3/mtu3_trace.c
new file mode 100644
index 0000000..4f5e785
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_trace.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * mtu3_trace.c - trace support
+ *
+ * Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include "mtu3_trace.h"
+
+void mtu3_dbg_trace(struct device *dev, const char *fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	trace_mtu3_log(dev, &vaf);
+	va_end(args);
+}
diff --git a/drivers/usb/mtu3/mtu3_trace.h b/drivers/usb/mtu3/mtu3_trace.h
new file mode 100644
index 0000000..050e30f
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_trace.h
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * mtu3_trace.h - trace support
+ *
+ * Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mtu3
+
+#if !defined(__MTU3_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __MTU3_TRACE_H__
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include "mtu3.h"
+
+#define MTU3_MSG_MAX	256
+
+TRACE_EVENT(mtu3_log,
+	TP_PROTO(struct device *dev, struct va_format *vaf),
+	TP_ARGS(dev, vaf),
+	TP_STRUCT__entry(
+		__string(name, dev_name(dev))
+		__dynamic_array(char, msg, MTU3_MSG_MAX)
+	),
+	TP_fast_assign(
+		__assign_str(name, dev_name(dev));
+		vsnprintf(__get_str(msg), MTU3_MSG_MAX, vaf->fmt, *vaf->va);
+	),
+	TP_printk("%s: %s", __get_str(name), __get_str(msg))
+);
+
+TRACE_EVENT(mtu3_u3_ltssm_isr,
+	TP_PROTO(u32 intr),
+	TP_ARGS(intr),
+	TP_STRUCT__entry(
+		__field(u32, intr)
+	),
+	TP_fast_assign(
+		__entry->intr = intr;
+	),
+	TP_printk("(%08x) %s %s %s %s %s %s", __entry->intr,
+		__entry->intr & HOT_RST_INTR ? "HOT_RST" : "",
+		__entry->intr & WARM_RST_INTR ? "WARM_RST" : "",
+		__entry->intr & ENTER_U3_INTR ? "ENT_U3" : "",
+		__entry->intr & EXIT_U3_INTR ? "EXIT_U3" : "",
+		__entry->intr & VBUS_RISE_INTR ? "VBUS_RISE" : "",
+		__entry->intr & VBUS_FALL_INTR ? "VBUS_FALL" : ""
+	)
+);
+
+TRACE_EVENT(mtu3_u2_common_isr,
+	TP_PROTO(u32 intr),
+	TP_ARGS(intr),
+	TP_STRUCT__entry(
+		__field(u32, intr)
+	),
+	TP_fast_assign(
+		__entry->intr = intr;
+	),
+	TP_printk("(%08x) %s %s %s", __entry->intr,
+		__entry->intr & SUSPEND_INTR ? "SUSPEND" : "",
+		__entry->intr & RESUME_INTR ? "RESUME" : "",
+		__entry->intr & RESET_INTR ? "RESET" : ""
+	)
+);
+
+TRACE_EVENT(mtu3_qmu_isr,
+	TP_PROTO(u32 done_intr, u32 exp_intr),
+	TP_ARGS(done_intr, exp_intr),
+	TP_STRUCT__entry(
+		__field(u32, done_intr)
+		__field(u32, exp_intr)
+	),
+	TP_fast_assign(
+		__entry->done_intr = done_intr;
+		__entry->exp_intr = exp_intr;
+	),
+	TP_printk("done (tx %04x, rx %04x), exp (%08x)",
+		__entry->done_intr & 0xffff,
+		__entry->done_intr >> 16,
+		__entry->exp_intr
+	)
+);
+
+DECLARE_EVENT_CLASS(mtu3_log_setup,
+	TP_PROTO(struct usb_ctrlrequest *setup),
+	TP_ARGS(setup),
+	TP_STRUCT__entry(
+		__field(__u8, bRequestType)
+		__field(__u8, bRequest)
+		__field(__u16, wValue)
+		__field(__u16, wIndex)
+		__field(__u16, wLength)
+	),
+	TP_fast_assign(
+		__entry->bRequestType = setup->bRequestType;
+		__entry->bRequest = setup->bRequest;
+		__entry->wValue = le16_to_cpu(setup->wValue);
+		__entry->wIndex = le16_to_cpu(setup->wIndex);
+		__entry->wLength = le16_to_cpu(setup->wLength);
+	),
+	TP_printk("setup - %02x %02x %04x %04x %04x",
+		__entry->bRequestType, __entry->bRequest,
+		__entry->wValue, __entry->wIndex, __entry->wLength
+	)
+);
+
+DEFINE_EVENT(mtu3_log_setup, mtu3_handle_setup,
+	TP_PROTO(struct usb_ctrlrequest *setup),
+	TP_ARGS(setup)
+);
+
+DECLARE_EVENT_CLASS(mtu3_log_request,
+	TP_PROTO(struct mtu3_request *mreq),
+	TP_ARGS(mreq),
+	TP_STRUCT__entry(
+		__string(name, mreq->mep->name)
+		__field(struct mtu3_request *, mreq)
+		__field(struct qmu_gpd *, gpd)
+		__field(unsigned int, actual)
+		__field(unsigned int, length)
+		__field(int, status)
+		__field(int, zero)
+		__field(int, no_interrupt)
+	),
+	TP_fast_assign(
+		__assign_str(name, mreq->mep->name);
+		__entry->mreq = mreq;
+		__entry->gpd = mreq->gpd;
+		__entry->actual = mreq->request.actual;
+		__entry->length = mreq->request.length;
+		__entry->status = mreq->request.status;
+		__entry->zero = mreq->request.zero;
+		__entry->no_interrupt = mreq->request.no_interrupt;
+	),
+	TP_printk("%s: req %p gpd %p len %u/%u %s%s --> %d",
+		__get_str(name), __entry->mreq, __entry->gpd,
+		__entry->actual, __entry->length,
+		__entry->zero ? "Z" : "z",
+		__entry->no_interrupt ? "i" : "I",
+		__entry->status
+	)
+);
+
+DEFINE_EVENT(mtu3_log_request, mtu3_alloc_request,
+	TP_PROTO(struct mtu3_request *req),
+	TP_ARGS(req)
+);
+
+DEFINE_EVENT(mtu3_log_request, mtu3_free_request,
+	TP_PROTO(struct mtu3_request *req),
+	TP_ARGS(req)
+);
+
+DEFINE_EVENT(mtu3_log_request, mtu3_gadget_queue,
+	TP_PROTO(struct mtu3_request *req),
+	TP_ARGS(req)
+);
+
+DEFINE_EVENT(mtu3_log_request, mtu3_gadget_dequeue,
+	TP_PROTO(struct mtu3_request *req),
+	TP_ARGS(req)
+);
+
+DEFINE_EVENT(mtu3_log_request, mtu3_req_complete,
+	TP_PROTO(struct mtu3_request *req),
+	TP_ARGS(req)
+);
+
+DECLARE_EVENT_CLASS(mtu3_log_gpd,
+	TP_PROTO(struct mtu3_ep *mep, struct qmu_gpd *gpd),
+	TP_ARGS(mep, gpd),
+	TP_STRUCT__entry(
+		__string(name, mep->name)
+		__field(struct qmu_gpd *, gpd)
+		__field(u32, dw0)
+		__field(u32, dw1)
+		__field(u32, dw2)
+		__field(u32, dw3)
+	),
+	TP_fast_assign(
+		__assign_str(name, mep->name);
+		__entry->gpd = gpd;
+		__entry->dw0 = le32_to_cpu(gpd->dw0_info);
+		__entry->dw1 = le32_to_cpu(gpd->next_gpd);
+		__entry->dw2 = le32_to_cpu(gpd->buffer);
+		__entry->dw3 = le32_to_cpu(gpd->dw3_info);
+	),
+	TP_printk("%s: gpd %p - %08x %08x %08x %08x",
+		__get_str(name), __entry->gpd,
+		__entry->dw0, __entry->dw1,
+		__entry->dw2, __entry->dw3
+	)
+);
+
+DEFINE_EVENT(mtu3_log_gpd, mtu3_prepare_gpd,
+	TP_PROTO(struct mtu3_ep *mep, struct qmu_gpd *gpd),
+	TP_ARGS(mep, gpd)
+);
+
+DEFINE_EVENT(mtu3_log_gpd, mtu3_complete_gpd,
+	TP_PROTO(struct mtu3_ep *mep, struct qmu_gpd *gpd),
+	TP_ARGS(mep, gpd)
+);
+
+DEFINE_EVENT(mtu3_log_gpd, mtu3_zlp_exp_gpd,
+	TP_PROTO(struct mtu3_ep *mep, struct qmu_gpd *gpd),
+	TP_ARGS(mep, gpd)
+);
+
+DECLARE_EVENT_CLASS(mtu3_log_ep,
+	TP_PROTO(struct mtu3_ep *mep),
+	TP_ARGS(mep),
+	TP_STRUCT__entry(
+		__string(name, mep->name)
+		__field(unsigned int, type)
+		__field(unsigned int, slot)
+		__field(unsigned int, maxp)
+		__field(unsigned int, mult)
+		__field(unsigned int, maxburst)
+		__field(unsigned int, flags)
+		__field(unsigned int, direction)
+		__field(struct mtu3_gpd_ring *, gpd_ring)
+	),
+	TP_fast_assign(
+		__assign_str(name, mep->name);
+		__entry->type = mep->type;
+		__entry->slot = mep->slot;
+		__entry->maxp = mep->ep.maxpacket;
+		__entry->mult = mep->ep.mult;
+		__entry->maxburst = mep->ep.maxburst;
+		__entry->flags = mep->flags;
+		__entry->direction = mep->is_in;
+		__entry->gpd_ring = &mep->gpd_ring;
+	),
+	TP_printk("%s: type %d maxp %d slot %d mult %d burst %d ring %p/%pad flags %c:%c%c%c:%c",
+		__get_str(name), __entry->type,
+		__entry->maxp, __entry->slot,
+		__entry->mult, __entry->maxburst,
+		__entry->gpd_ring, &__entry->gpd_ring->dma,
+		__entry->flags & MTU3_EP_ENABLED ? 'E' : 'e',
+		__entry->flags & MTU3_EP_STALL ? 'S' : 's',
+		__entry->flags & MTU3_EP_WEDGE ? 'W' : 'w',
+		__entry->flags & MTU3_EP_BUSY ? 'B' : 'b',
+		__entry->direction ? '<' : '>'
+	)
+);
+
+DEFINE_EVENT(mtu3_log_ep, mtu3_gadget_ep_enable,
+	TP_PROTO(struct mtu3_ep *mep),
+	TP_ARGS(mep)
+);
+
+DEFINE_EVENT(mtu3_log_ep, mtu3_gadget_ep_disable,
+	TP_PROTO(struct mtu3_ep *mep),
+	TP_ARGS(mep)
+);
+
+DEFINE_EVENT(mtu3_log_ep, mtu3_gadget_ep_set_halt,
+	TP_PROTO(struct mtu3_ep *mep),
+	TP_ARGS(mep)
+);
+
+#endif /* __MTU3_TRACE_H__ */
+
+/* this part has to be here */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mtu3_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index c3dae7d..facaee7 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -115,6 +115,12 @@
 	depends on USB_MUSB_GADGET
 	depends on USB_OTG_BLACKLIST_HUB
 
+config USB_MUSB_MEDIATEK
+	tristate "MediaTek platforms"
+	depends on ARCH_MEDIATEK || COMPILE_TEST
+	depends on NOP_USB_XCEIV
+	depends on GENERIC_PHY
+
 config USB_MUSB_AM335X_CHILD
 	tristate
 
@@ -141,7 +147,7 @@
 
 config USB_INVENTRA_DMA
 	bool 'Inventra'
-	depends on USB_MUSB_OMAP2PLUS
+	depends on USB_MUSB_OMAP2PLUS || USB_MUSB_MEDIATEK
 	help
 	  Enable DMA transfers using Mentor's engine.
 
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index 3a88c79..63d82d0 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -24,6 +24,7 @@
 obj-$(CONFIG_USB_MUSB_UX500)			+= ux500.o
 obj-$(CONFIG_USB_MUSB_JZ4740)			+= jz4740.o
 obj-$(CONFIG_USB_MUSB_SUNXI)			+= sunxi.o
+obj-$(CONFIG_USB_MUSB_MEDIATEK)      		+= mediatek.o
 
 
 obj-$(CONFIG_USB_MUSB_AM335X_CHILD)		+= musb_am335x.o
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
new file mode 100644
index 0000000..2868274
--- /dev/null
+++ b/drivers/usb/musb/mediatek.c
@@ -0,0 +1,631 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * Author:
+ *  Min Guo <min.guo@mediatek.com>
+ *  Yonglong Wu <yonglong.wu@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/usb/usb_phy_generic.h>
+#include "musb_core.h"
+#include "musb_dma.h"
+
+#define USB_L1INTS	0x00a0
+#define USB_L1INTM	0x00a4
+#define MTK_MUSB_TXFUNCADDR	0x0480
+
+/* MediaTek controller toggle enable and status reg */
+#define MUSB_RXTOG		0x80
+#define MUSB_RXTOGEN		0x82
+#define MUSB_TXTOG		0x84
+#define MUSB_TXTOGEN		0x86
+
+#define TX_INT_STATUS		BIT(0)
+#define RX_INT_STATUS		BIT(1)
+#define USBCOM_INT_STATUS		BIT(2)
+#define DMA_INT_STATUS		BIT(3)
+
+#define DMA_INTR_STATUS_MSK		GENMASK(7, 0)
+#define DMA_INTR_UNMASK_SET_MSK	GENMASK(31, 24)
+
+enum mtk_vbus_id_state {
+	MTK_ID_FLOAT = 1,
+	MTK_ID_GROUND,
+	MTK_VBUS_OFF,
+	MTK_VBUS_VALID,
+};
+
+struct mtk_glue {
+	struct device *dev;
+	struct musb *musb;
+	struct platform_device *musb_pdev;
+	struct platform_device *usb_phy;
+	struct phy *phy;
+	struct usb_phy *xceiv;
+	enum phy_mode phy_mode;
+	struct clk *main;
+	struct clk *mcu;
+	struct clk *univpll;
+	struct regulator *vbus;
+	struct extcon_dev *edev;
+	struct notifier_block vbus_nb;
+	struct notifier_block id_nb;
+};
+
+static int mtk_musb_clks_get(struct mtk_glue *glue)
+{
+	struct device *dev = glue->dev;
+
+	glue->main = devm_clk_get(dev, "main");
+	if (IS_ERR(glue->main)) {
+		dev_err(dev, "fail to get main clock\n");
+		return PTR_ERR(glue->main);
+	}
+
+	glue->mcu = devm_clk_get(dev, "mcu");
+	if (IS_ERR(glue->mcu)) {
+		dev_err(dev, "fail to get mcu clock\n");
+		return PTR_ERR(glue->mcu);
+	}
+
+	glue->univpll = devm_clk_get(dev, "univpll");
+	if (IS_ERR(glue->univpll)) {
+		dev_err(dev, "fail to get univpll clock\n");
+		return PTR_ERR(glue->univpll);
+	}
+
+	return 0;
+}
+
+static int mtk_musb_clks_enable(struct mtk_glue *glue)
+{
+	int ret;
+
+	ret = clk_prepare_enable(glue->main);
+	if (ret) {
+		dev_err(glue->dev, "failed to enable main clock\n");
+		goto err_main_clk;
+	}
+
+	ret = clk_prepare_enable(glue->mcu);
+	if (ret) {
+		dev_err(glue->dev, "failed to enable mcu clock\n");
+		goto err_mcu_clk;
+	}
+
+	ret = clk_prepare_enable(glue->univpll);
+	if (ret) {
+		dev_err(glue->dev, "failed to enable univpll clock\n");
+		goto err_univpll_clk;
+	}
+
+	return 0;
+
+err_univpll_clk:
+	clk_disable_unprepare(glue->mcu);
+err_mcu_clk:
+	clk_disable_unprepare(glue->main);
+err_main_clk:
+	return ret;
+}
+
+static void mtk_musb_clks_disable(struct mtk_glue *glue)
+{
+	clk_disable_unprepare(glue->univpll);
+	clk_disable_unprepare(glue->mcu);
+	clk_disable_unprepare(glue->main);
+}
+
+static void mtk_musb_set_vbus(struct musb *musb, int is_on)
+{
+	struct device *dev = musb->controller;
+	struct mtk_glue *glue = dev_get_drvdata(dev->parent);
+	int ret;
+
+	/* vbus is optional */
+	if (!glue->vbus)
+		return;
+
+	dev_dbg(musb->controller, "%s, is_on=%d\r\n", __func__, is_on);
+	if (is_on) {
+		ret = regulator_enable(glue->vbus);
+		if (ret) {
+			dev_err(glue->dev, "fail to enable vbus regulator\n");
+			return;
+		}
+	} else {
+		regulator_disable(glue->vbus);
+	}
+}
+
+/*
+ * switch to host: -> MTK_VBUS_OFF --> MTK_ID_GROUND
+ * switch to device: -> MTK_ID_FLOAT --> MTK_VBUS_VALID
+ */
+static void mtk_musb_set_mailbox(struct mtk_glue *glue,
+				 enum mtk_vbus_id_state status)
+{
+	struct musb *musb = glue->musb;
+	u8 devctl = 0;
+
+	dev_dbg(glue->dev, "mailbox state(%d)\n", status);
+	switch (status) {
+	case MTK_ID_GROUND:
+		phy_power_on(glue->phy);
+		devctl = readb(musb->mregs + MUSB_DEVCTL);
+		musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+		mtk_musb_set_vbus(musb, 1);
+		glue->phy_mode = PHY_MODE_USB_HOST;
+		phy_set_mode(glue->phy, glue->phy_mode);
+		devctl |= MUSB_DEVCTL_SESSION;
+		musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+		MUSB_HST_MODE(musb);
+		break;
+	/*
+	 * MTK_ID_FLOAT process is the same as MTK_VBUS_VALID
+	 * except that turn off VBUS
+	 */
+	case MTK_ID_FLOAT:
+		mtk_musb_set_vbus(musb, 0);
+		/* fall through */
+	case MTK_VBUS_OFF:
+		musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+		devctl &= ~MUSB_DEVCTL_SESSION;
+		musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+		phy_power_off(glue->phy);
+		break;
+	case MTK_VBUS_VALID:
+		phy_power_on(glue->phy);
+		glue->phy_mode = PHY_MODE_USB_DEVICE;
+		phy_set_mode(glue->phy, glue->phy_mode);
+		MUSB_DEV_MODE(musb);
+		break;
+	default:
+		dev_err(glue->dev, "invalid state\n");
+	}
+}
+
+static int mtk_musb_id_notifier(struct notifier_block *nb,
+				unsigned long event, void *ptr)
+{
+	struct mtk_glue *glue = container_of(nb, struct mtk_glue, id_nb);
+
+	if (event)
+		mtk_musb_set_mailbox(glue, MTK_ID_GROUND);
+	else
+		mtk_musb_set_mailbox(glue, MTK_ID_FLOAT);
+
+	return NOTIFY_DONE;
+}
+
+static int mtk_musb_vbus_notifier(struct notifier_block *nb,
+				  unsigned long event, void *ptr)
+{
+	struct mtk_glue *glue = container_of(nb, struct mtk_glue, vbus_nb);
+
+	if (event)
+		mtk_musb_set_mailbox(glue, MTK_VBUS_VALID);
+	else
+		mtk_musb_set_mailbox(glue, MTK_VBUS_OFF);
+
+	return NOTIFY_DONE;
+}
+
+static void mtk_otg_switch_init(struct mtk_glue *glue)
+{
+	int ret;
+
+	/* extcon is optional */
+	if (!glue->edev)
+		return;
+
+	glue->vbus_nb.notifier_call = mtk_musb_vbus_notifier;
+	ret = devm_extcon_register_notifier(glue->dev, glue->edev, EXTCON_USB,
+					    &glue->vbus_nb);
+	if (ret < 0)
+		dev_err(glue->dev, "failed to register notifier for USB\n");
+
+	glue->id_nb.notifier_call = mtk_musb_id_notifier;
+	ret = devm_extcon_register_notifier(glue->dev, glue->edev,
+					    EXTCON_USB_HOST, &glue->id_nb);
+	if (ret < 0)
+		dev_err(glue->dev, "failed to register notifier for USB-HOST\n");
+
+	dev_dbg(glue->dev, "EXTCON_USB: %d, EXTCON_USB_HOST: %d\n",
+		extcon_get_state(glue->edev, EXTCON_USB),
+		extcon_get_state(glue->edev, EXTCON_USB_HOST));
+
+	/* default as host, switch to device mode if needed */
+	if (extcon_get_state(glue->edev, EXTCON_USB_HOST) == false)
+		mtk_musb_set_mailbox(glue, MTK_ID_FLOAT);
+	if (extcon_get_state(glue->edev, EXTCON_USB) == true)
+		mtk_musb_set_mailbox(glue, MTK_VBUS_VALID);
+}
+
+static irqreturn_t generic_interrupt(int irq, void *__hci)
+{
+	unsigned long flags;
+	irqreturn_t retval = IRQ_NONE;
+	struct musb *musb = __hci;
+
+	spin_lock_irqsave(&musb->lock, flags);
+	musb->int_usb = musb_clearb(musb->mregs, MUSB_INTRUSB);
+	musb->int_rx = musb_clearw(musb->mregs, MUSB_INTRRX);
+	musb->int_tx = musb_clearw(musb->mregs, MUSB_INTRTX);
+
+	if (musb->int_usb || musb->int_tx || musb->int_rx)
+		retval = musb_interrupt(musb);
+
+	spin_unlock_irqrestore(&musb->lock, flags);
+
+	return retval;
+}
+
+static irqreturn_t mtk_musb_interrupt(int irq, void *dev_id)
+{
+	irqreturn_t retval = IRQ_NONE;
+	struct musb *musb = (struct musb *)dev_id;
+	u32 l1_ints;
+
+	l1_ints = musb_readl(musb->mregs, USB_L1INTS) &
+			musb_readl(musb->mregs, USB_L1INTM);
+
+	if (l1_ints & (TX_INT_STATUS | RX_INT_STATUS | USBCOM_INT_STATUS))
+		retval = generic_interrupt(irq, musb);
+
+#if defined(CONFIG_USB_INVENTRA_DMA)
+	if (l1_ints & DMA_INT_STATUS)
+		retval = dma_controller_irq(irq, musb->dma_controller);
+#endif
+	return retval;
+}
+
+static u32 mtk_musb_busctl_offset(u8 epnum, u16 offset)
+{
+	return MTK_MUSB_TXFUNCADDR + offset + 8 * epnum;
+}
+
+static u8 mtk_musb_clearb(void __iomem *addr, unsigned int offset)
+{
+	u8 data;
+
+	/* W1C */
+	data = musb_readb(addr, offset);
+	musb_writeb(addr, offset, data);
+	return data;
+}
+
+static u16 mtk_musb_clearw(void __iomem *addr, unsigned int offset)
+{
+	u16 data;
+
+	/* W1C */
+	data = musb_readw(addr, offset);
+	musb_writew(addr, offset, data);
+	return data;
+}
+
+static int mtk_musb_init(struct musb *musb)
+{
+	struct device *dev = musb->controller;
+	struct mtk_glue *glue = dev_get_drvdata(dev->parent);
+	int ret;
+
+	glue->musb = musb;
+	musb->phy = glue->phy;
+	musb->xceiv = glue->xceiv;
+	musb->is_host = false;
+	musb->isr = mtk_musb_interrupt;
+	ret = phy_init(glue->phy);
+	if (ret)
+		return ret;
+
+	ret = phy_power_on(glue->phy);
+	if (ret) {
+		phy_exit(glue->phy);
+		return ret;
+	}
+
+	phy_set_mode(glue->phy, glue->phy_mode);
+
+#if defined(CONFIG_USB_INVENTRA_DMA)
+	musb_writel(musb->mregs, MUSB_HSDMA_INTR,
+		    DMA_INTR_STATUS_MSK | DMA_INTR_UNMASK_SET_MSK);
+#endif
+	musb_writel(musb->mregs, USB_L1INTM, TX_INT_STATUS | RX_INT_STATUS |
+		    USBCOM_INT_STATUS | DMA_INT_STATUS);
+	return 0;
+}
+
+static u16 mtk_musb_get_toggle(struct musb_qh *qh, int is_out)
+{
+	struct musb *musb = qh->hw_ep->musb;
+	u8 epnum = qh->hw_ep->epnum;
+	u16 toggle;
+
+	if (is_out)
+		toggle = musb_readw(musb->mregs, MUSB_TXTOG);
+	else
+		toggle = musb_readw(musb->mregs, MUSB_RXTOG);
+
+	return toggle & (1 << epnum);
+}
+
+static u16 mtk_musb_set_toggle(struct musb_qh *qh, int is_out, struct urb *urb)
+{
+	struct musb *musb = qh->hw_ep->musb;
+	u8 epnum = qh->hw_ep->epnum;
+	u16 toggle;
+
+	toggle = usb_gettoggle(urb->dev, qh->epnum, is_out);
+
+	if (is_out) {
+		musb_writew(musb->mregs, MUSB_TXTOGEN, (1 << epnum));
+		musb_writew(musb->mregs, MUSB_TXTOG, (toggle << epnum));
+	} else {
+		musb_writew(musb->mregs, MUSB_RXTOGEN, (1 << epnum));
+		musb_writew(musb->mregs, MUSB_RXTOG, (toggle << epnum));
+	}
+
+	return 0;
+}
+
+static int mtk_musb_set_mode(struct musb *musb, u8 mode)
+{
+	struct device *dev = musb->controller;
+	struct mtk_glue *glue = dev_get_drvdata(dev->parent);
+	enum phy_mode new_mode;
+
+	switch (mode) {
+	case MUSB_HOST:
+		new_mode = PHY_MODE_USB_HOST;
+		mtk_musb_set_vbus(musb, 1);
+		break;
+	case MUSB_PERIPHERAL:
+		new_mode = PHY_MODE_USB_DEVICE;
+		break;
+	case MUSB_OTG:
+		new_mode = PHY_MODE_USB_HOST;
+		break;
+	default:
+		dev_err(musb->controller->parent,
+			"Error requested mode not supported by this kernel\n");
+		return -EINVAL;
+	}
+	if (glue->phy_mode == new_mode)
+		return 0;
+
+	mtk_musb_set_mailbox(glue, MTK_ID_GROUND);
+	return 0;
+}
+
+static int mtk_musb_exit(struct musb *musb)
+{
+	struct device *dev = musb->controller;
+	struct mtk_glue *glue = dev_get_drvdata(dev->parent);
+
+	phy_power_off(glue->phy);
+	phy_exit(glue->phy);
+	mtk_musb_clks_disable(glue);
+
+	pm_runtime_put_sync(dev);
+	pm_runtime_disable(dev);
+	return 0;
+}
+
+static const struct musb_platform_ops mtk_musb_ops = {
+	.quirks = MUSB_DMA_INVENTRA,
+	.init = mtk_musb_init,
+	.get_toggle = mtk_musb_get_toggle,
+	.set_toggle = mtk_musb_set_toggle,
+	.exit = mtk_musb_exit,
+#ifdef CONFIG_USB_INVENTRA_DMA
+	.dma_init = musbhs_dma_controller_create_noirq,
+	.dma_exit = musbhs_dma_controller_destroy,
+#endif
+	.clearb = mtk_musb_clearb,
+	.clearw = mtk_musb_clearw,
+	.busctl_offset = mtk_musb_busctl_offset,
+	.set_mode = mtk_musb_set_mode,
+	.set_vbus = mtk_musb_set_vbus,
+};
+
+#define MTK_MUSB_MAX_EP_NUM	8
+#define MTK_MUSB_RAM_BITS	11
+
+static struct musb_fifo_cfg mtk_musb_mode_cfg[] = {
+	{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+	{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+	{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+	{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+	{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+	{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
+	{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
+	{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
+	{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
+	{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
+	{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 1024, },
+	{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 1024, },
+	{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
+	{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 64, },
+};
+
+static const struct musb_hdrc_config mtk_musb_hdrc_config = {
+	.fifo_cfg = mtk_musb_mode_cfg,
+	.fifo_cfg_size = ARRAY_SIZE(mtk_musb_mode_cfg),
+	.multipoint = true,
+	.dyn_fifo = true,
+	.num_eps = MTK_MUSB_MAX_EP_NUM,
+	.ram_bits = MTK_MUSB_RAM_BITS,
+};
+
+static const struct platform_device_info mtk_dev_info = {
+	.name = "musb-hdrc",
+	.id = PLATFORM_DEVID_AUTO,
+	.dma_mask = DMA_BIT_MASK(32),
+};
+
+static int mtk_musb_probe(struct platform_device *pdev)
+{
+	struct musb_hdrc_platform_data *pdata;
+	struct mtk_glue *glue;
+	struct platform_device_info pinfo;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct device_node *child, *extcon_node;
+	int ret = -ENOMEM;
+
+	glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
+	if (!glue)
+		return -ENOMEM;
+
+	glue->dev = dev;
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	ret = mtk_musb_clks_get(glue);
+	if (ret)
+		return ret;
+
+	pdata->config = &mtk_musb_hdrc_config;
+	pdata->platform_ops = &mtk_musb_ops;
+
+	glue->vbus = devm_regulator_get(dev, "vbus");
+	if (IS_ERR(glue->vbus)) {
+		dev_err(dev, "fail to get vbus\n");
+		return PTR_ERR(glue->vbus);
+	}
+
+	pdata->mode = usb_get_dr_mode(dev);
+	switch (pdata->mode) {
+	case USB_DR_MODE_HOST:
+		glue->phy_mode = PHY_MODE_USB_HOST;
+		break;
+	case USB_DR_MODE_PERIPHERAL:
+		glue->phy_mode = PHY_MODE_USB_DEVICE;
+		break;
+	default:
+		pdata->mode = USB_DR_MODE_OTG;
+		/* FALL THROUGH */
+	case USB_DR_MODE_OTG:
+		glue->phy_mode = PHY_MODE_USB_OTG;
+		break;
+	}
+
+	child = of_get_child_by_name(np, "connector");
+	if (!child) {
+		dev_err(dev, "failed to find usb connector node\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	if (pdata->mode == USB_DR_MODE_OTG) {
+		extcon_node = of_parse_phandle(child, "extcon", 0);
+		if (!extcon_node) {
+			dev_err(dev, "failed to get extcon phandle");
+			return ERR_PTR(-ENODEV);
+		}
+
+		glue->edev = extcon_find_edev_by_node(extcon_node);
+		if (IS_ERR(glue->edev)) {
+			dev_err(dev, "fail to get extcon\n");
+			return PTR_ERR(glue->edev);
+		}
+	}
+
+	glue->phy = devm_of_phy_get_by_index(dev, np, 0);
+	if (IS_ERR(glue->phy)) {
+		dev_err(dev, "fail to getting phy %ld\n",
+			PTR_ERR(glue->phy));
+		return PTR_ERR(glue->phy);
+	}
+
+	glue->usb_phy = usb_phy_generic_register();
+	if (IS_ERR(glue->usb_phy)) {
+		dev_err(dev, "fail to registering usb-phy %ld\n",
+			PTR_ERR(glue->usb_phy));
+		return PTR_ERR(glue->usb_phy);
+	}
+
+	glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+	if (IS_ERR(glue->xceiv)) {
+		dev_err(dev, "fail to getting usb-phy %d\n", ret);
+		ret = PTR_ERR(glue->xceiv);
+		goto err_unregister_usb_phy;
+	}
+
+	platform_set_drvdata(pdev, glue);
+	pm_runtime_enable(dev);
+	pm_runtime_get_sync(dev);
+
+	ret = mtk_musb_clks_enable(glue);
+	if (ret)
+		goto err_enable_clk;
+
+	pinfo = mtk_dev_info;
+	pinfo.parent = dev;
+	pinfo.res = pdev->resource;
+	pinfo.num_res = pdev->num_resources;
+	pinfo.data = pdata;
+	pinfo.size_data = sizeof(*pdata);
+
+	glue->musb_pdev = platform_device_register_full(&pinfo);
+	if (IS_ERR(glue->musb_pdev)) {
+		ret = PTR_ERR(glue->musb_pdev);
+		dev_err(dev, "failed to register musb device: %d\n", ret);
+		goto err_device_register;
+	}
+
+	if (pdata->mode == USB_DR_MODE_OTG)
+		mtk_otg_switch_init(glue);
+
+	return 0;
+
+err_device_register:
+	mtk_musb_clks_disable(glue);
+err_enable_clk:
+	pm_runtime_put_sync(dev);
+	pm_runtime_disable(dev);
+err_unregister_usb_phy:
+	usb_phy_generic_unregister(glue->usb_phy);
+	return ret;
+}
+
+static int mtk_musb_remove(struct platform_device *pdev)
+{
+	struct mtk_glue *glue = platform_get_drvdata(pdev);
+	struct platform_device *usb_phy = glue->usb_phy;
+
+	platform_device_unregister(glue->musb_pdev);
+	usb_phy_generic_unregister(usb_phy);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mtk_musb_match[] = {
+	{.compatible = "mediatek,mtk-musb",},
+	{},
+};
+MODULE_DEVICE_TABLE(of, mtk_musb_match);
+#endif
+
+static struct platform_driver mtk_musb_driver = {
+	.probe = mtk_musb_probe,
+	.remove = mtk_musb_remove,
+	.driver = {
+		   .name = "musb-mtk",
+		   .of_match_table = of_match_ptr(mtk_musb_match),
+	},
+};
+
+module_platform_driver(mtk_musb_driver);
+
+MODULE_DESCRIPTION("MediaTek MUSB Glue Layer");
+MODULE_AUTHOR("Min Guo <min.guo@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index b6b4f99..8862570 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -246,7 +246,7 @@
 	return 0x80 + (0x08 * epnum) + offset;
 }
 
-static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
+static u8 musb_default_readb(void __iomem *addr, unsigned offset)
 {
 	u8 data =  __raw_readb(addr + offset);
 
@@ -260,7 +260,7 @@
 	__raw_writeb(data, addr + offset);
 }
 
-static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
+static u16 musb_default_readw(void __iomem *addr, unsigned offset)
 {
 	u16 data = __raw_readw(addr + offset);
 
@@ -274,6 +274,38 @@
 	__raw_writew(data, addr + offset);
 }
 
+static u16 musb_default_get_toggle(struct musb_qh *qh, int is_out)
+{
+	void __iomem *epio = qh->hw_ep->regs;
+	u16 csr;
+
+	if (is_out)
+		csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
+	else
+		csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
+
+	return csr;
+}
+
+static u16 musb_default_set_toggle(struct musb_qh *qh, int is_out,
+				   struct urb *urb)
+{
+	u16 csr;
+	u16 toggle;
+
+	toggle = usb_gettoggle(urb->dev, qh->epnum, is_out);
+
+	if (is_out)
+		csr = toggle ? (MUSB_TXCSR_H_WR_DATATOGGLE
+				| MUSB_TXCSR_H_DATATOGGLE)
+				: MUSB_TXCSR_CLRDATATOG;
+	else
+		csr = toggle ? (MUSB_RXCSR_H_WR_DATATOGGLE
+				| MUSB_RXCSR_H_DATATOGGLE) : 0;
+
+	return csr;
+}
+
 /*
  * Load an endpoint's FIFO
  */
@@ -364,19 +396,25 @@
 /*
  * Old style IO functions
  */
-u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
+u8 (*musb_readb)(void __iomem *addr, unsigned offset);
 EXPORT_SYMBOL_GPL(musb_readb);
 
 void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
 EXPORT_SYMBOL_GPL(musb_writeb);
 
-u16 (*musb_readw)(const void __iomem *addr, unsigned offset);
+u8 (*musb_clearb)(void __iomem *addr, unsigned int offset);
+EXPORT_SYMBOL_GPL(musb_clearb);
+
+u16 (*musb_readw)(void __iomem *addr, unsigned offset);
 EXPORT_SYMBOL_GPL(musb_readw);
 
 void (*musb_writew)(void __iomem *addr, unsigned offset, u16 data);
 EXPORT_SYMBOL_GPL(musb_writew);
 
-u32 musb_readl(const void __iomem *addr, unsigned offset)
+u16 (*musb_clearw)(void __iomem *addr, unsigned int offset);
+EXPORT_SYMBOL_GPL(musb_clearw);
+
+u32 musb_readl(void __iomem *addr, unsigned offset)
 {
 	u32 data = __raw_readl(addr + offset);
 
@@ -1015,7 +1053,6 @@
 static void musb_disable_interrupts(struct musb *musb)
 {
 	void __iomem	*mbase = musb->mregs;
-	u16	temp;
 
 	/* disable interrupts */
 	musb_writeb(mbase, MUSB_INTRUSBE, 0);
@@ -1025,9 +1062,9 @@
 	musb_writew(mbase, MUSB_INTRRXE, 0);
 
 	/*  flush pending interrupts */
-	temp = musb_readb(mbase, MUSB_INTRUSB);
-	temp = musb_readw(mbase, MUSB_INTRTX);
-	temp = musb_readw(mbase, MUSB_INTRRX);
+	musb_clearb(mbase, MUSB_INTRUSB);
+	musb_clearw(mbase, MUSB_INTRTX);
+	musb_clearw(mbase, MUSB_INTRRX);
 }
 
 static void musb_enable_interrupts(struct musb *musb)
@@ -2260,10 +2297,19 @@
 		musb_readb = musb->ops->readb;
 	if (musb->ops->writeb)
 		musb_writeb = musb->ops->writeb;
+	if (musb->ops->clearb)
+		musb_clearb = musb->ops->clearb;
+	else
+		musb_clearb = musb_readb;
+
 	if (musb->ops->readw)
 		musb_readw = musb->ops->readw;
 	if (musb->ops->writew)
 		musb_writew = musb->ops->writew;
+	if (musb->ops->clearw)
+		musb_clearw = musb->ops->clearw;
+	else
+		musb_clearw = musb_readw;
 
 #ifndef CONFIG_MUSB_PIO_ONLY
 	if (!musb->ops->dma_init || !musb->ops->dma_exit) {
@@ -2285,6 +2331,16 @@
 	else
 		musb->io.write_fifo = musb_default_write_fifo;
 
+	if (musb->ops->get_toggle)
+		musb->io.get_toggle = musb->ops->get_toggle;
+	else
+		musb->io.get_toggle = musb_default_get_toggle;
+
+	if (musb->ops->set_toggle)
+		musb->io.set_toggle = musb->ops->set_toggle;
+	else
+		musb->io.set_toggle = musb_default_set_toggle;
+
 	if (!musb->xceiv->io_ops) {
 		musb->xceiv->io_dev = musb->controller;
 		musb->xceiv->io_priv = musb->mregs;
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 04203b7..0d9a35f 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -27,6 +27,7 @@
 struct musb;
 struct musb_hw_ep;
 struct musb_ep;
+struct musb_qh;
 
 /* Helper defines for struct musb->hwvers */
 #define MUSB_HWVERS_MAJOR(x)	((x >> 10) & 0x1f)
@@ -119,10 +120,14 @@
  * @fifo_offset: returns the fifo offset
  * @readb:	read 8 bits
  * @writeb:	write 8 bits
+ * @clearb:	could be clear-on-readb or W1C
  * @readw:	read 16 bits
  * @writew:	write 16 bits
+ * @clearw:	could be clear-on-readw or W1C
  * @read_fifo:	reads the fifo
  * @write_fifo:	writes to fifo
+ * @get_toggle:	platform specific get toggle function
+ * @set_toggle:	platform specific set toggle function
  * @dma_init:	platform specific dma init function
  * @dma_exit:	platform specific dma exit function
  * @init:	turns on clocks, sets up platform-specific registers, etc
@@ -161,12 +166,16 @@
 	u16	fifo_mode;
 	u32	(*fifo_offset)(u8 epnum);
 	u32	(*busctl_offset)(u8 epnum, u16 offset);
-	u8	(*readb)(const void __iomem *addr, unsigned offset);
+	u8	(*readb)(void __iomem *addr, unsigned offset);
 	void	(*writeb)(void __iomem *addr, unsigned offset, u8 data);
-	u16	(*readw)(const void __iomem *addr, unsigned offset);
+	u8	(*clearb)(void __iomem *addr, unsigned int offset);
+	u16	(*readw)(void __iomem *addr, unsigned offset);
 	void	(*writew)(void __iomem *addr, unsigned offset, u16 data);
+	u16	(*clearw)(void __iomem *addr, unsigned int offset);
 	void	(*read_fifo)(struct musb_hw_ep *hw_ep, u16 len, u8 *buf);
 	void	(*write_fifo)(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf);
+	u16	(*get_toggle)(struct musb_qh *qh, int is_out);
+	u16	(*set_toggle)(struct musb_qh *qh, int is_out, struct urb *urb);
 	struct dma_controller *
 		(*dma_init) (struct musb *musb, void __iomem *base);
 	void	(*dma_exit)(struct dma_controller *c);
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 8f60271..05103ea 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -35,6 +35,12 @@
  *    whether shared with the Inventra core or separate.
  */
 
+#define MUSB_HSDMA_BASE		0x200
+#define MUSB_HSDMA_INTR		(MUSB_HSDMA_BASE + 0)
+#define MUSB_HSDMA_CONTROL		0x4
+#define MUSB_HSDMA_ADDRESS		0x8
+#define MUSB_HSDMA_COUNT		0xc
+
 #define	DMA_ADDR_INVALID	(~(dma_addr_t)0)
 
 #ifdef CONFIG_MUSB_PIO_ONLY
@@ -191,6 +197,9 @@
 extern struct dma_controller *
 musbhs_dma_controller_create(struct musb *musb, void __iomem *base);
 extern void musbhs_dma_controller_destroy(struct dma_controller *c);
+extern struct dma_controller *
+musbhs_dma_controller_create_noirq(struct musb *musb, void __iomem *base);
+extern irqreturn_t dma_controller_irq(int irq, void *private_data);
 
 extern struct dma_controller *
 tusb_dma_controller_create(struct musb *musb, void __iomem *base);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 68f18af..a1c4fc5 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -286,26 +286,6 @@
 	spin_lock(&musb->lock);
 }
 
-/* For bulk/interrupt endpoints only */
-static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
-				    struct urb *urb)
-{
-	void __iomem		*epio = qh->hw_ep->regs;
-	u16			csr;
-
-	/*
-	 * FIXME: the current Mentor DMA code seems to have
-	 * problems getting toggle correct.
-	 */
-
-	if (is_in)
-		csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
-	else
-		csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
-
-	usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
-}
-
 /*
  * Advance this hardware endpoint's queue, completing the specified URB and
  * advancing to either the next URB queued to that qh, or else invalidating
@@ -320,6 +300,7 @@
 	struct musb_hw_ep	*ep = qh->hw_ep;
 	int			ready = qh->is_ready;
 	int			status;
+	u16			toggle;
 
 	status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
 
@@ -327,7 +308,8 @@
 	switch (qh->type) {
 	case USB_ENDPOINT_XFER_BULK:
 	case USB_ENDPOINT_XFER_INT:
-		musb_save_toggle(qh, is_in, urb);
+		toggle = musb->io.get_toggle(qh, !is_in);
+		usb_settoggle(urb->dev, qh->epnum, !is_in, toggle ? 1 : 0);
 		break;
 	case USB_ENDPOINT_XFER_ISOC:
 		if (status == 0 && urb->error_count)
@@ -772,13 +754,8 @@
 					);
 			csr |= MUSB_TXCSR_MODE;
 
-			if (!hw_ep->tx_double_buffered) {
-				if (usb_gettoggle(urb->dev, qh->epnum, 1))
-					csr |= MUSB_TXCSR_H_WR_DATATOGGLE
-						| MUSB_TXCSR_H_DATATOGGLE;
-				else
-					csr |= MUSB_TXCSR_CLRDATATOG;
-			}
+			if (!hw_ep->tx_double_buffered)
+				csr |= musb->io.set_toggle(qh, is_out, urb);
 
 			musb_writew(epio, MUSB_TXCSR, csr);
 			/* REVISIT may need to clear FLUSHFIFO ... */
@@ -860,17 +837,12 @@
 
 	/* IN/receive */
 	} else {
-		u16	csr;
+		u16 csr = 0;
 
 		if (hw_ep->rx_reinit) {
 			musb_rx_reinit(musb, qh, epnum);
+			csr |= musb->io.set_toggle(qh, is_out, urb);
 
-			/* init new state: toggle and NYET, maybe DMA later */
-			if (usb_gettoggle(urb->dev, qh->epnum, 0))
-				csr = MUSB_RXCSR_H_WR_DATATOGGLE
-					| MUSB_RXCSR_H_DATATOGGLE;
-			else
-				csr = 0;
 			if (qh->type == USB_ENDPOINT_XFER_INT)
 				csr |= MUSB_RXCSR_DISNYET;
 
@@ -933,6 +905,7 @@
 	void __iomem		*epio = ep->regs;
 	struct musb_qh		*cur_qh, *next_qh;
 	u16			rx_csr, tx_csr;
+	u16			toggle;
 
 	musb_ep_select(mbase, ep->epnum);
 	if (is_in) {
@@ -970,7 +943,8 @@
 			urb->actual_length += dma->actual_len;
 			dma->actual_len = 0L;
 		}
-		musb_save_toggle(cur_qh, is_in, urb);
+		toggle = musb->io.get_toggle(cur_qh, !is_in);
+		usb_settoggle(urb->dev, cur_qh->epnum, !is_in, toggle ? 1 : 0);
 
 		if (is_in) {
 			/* move cur_qh to end of queue */
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index 8058a58..7200596 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -22,6 +22,8 @@
  * @read_fifo:	platform specific function to read fifo
  * @write_fifo:	platform specific function to write fifo
  * @busctl_offset: platform specific function to get busctl offset
+ * @get_toggle: platform specific function to get toggle
+ * @set_toggle: platform specific function to set toggle
  */
 struct musb_io {
 	u32	(*ep_offset)(u8 epnum, u16 offset);
@@ -30,14 +32,18 @@
 	void	(*read_fifo)(struct musb_hw_ep *hw_ep, u16 len, u8 *buf);
 	void	(*write_fifo)(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf);
 	u32	(*busctl_offset)(u8 epnum, u16 offset);
+	u16	(*get_toggle)(struct musb_qh *qh, int is_out);
+	u16	(*set_toggle)(struct musb_qh *qh, int is_out, struct urb *urb);
 };
 
 /* Do not add new entries here, add them the struct musb_io instead */
-extern u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
+extern u8 (*musb_readb)(void __iomem *addr, unsigned offset);
 extern void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
-extern u16 (*musb_readw)(const void __iomem *addr, unsigned offset);
+extern u8 (*musb_clearb)(void __iomem *addr, unsigned int offset);
+extern u16 (*musb_readw)(void __iomem *addr, unsigned offset);
 extern void (*musb_writew)(void __iomem *addr, unsigned offset, u16 data);
-extern u32 musb_readl(const void __iomem *addr, unsigned offset);
+extern u16 (*musb_clearw)(void __iomem *addr, unsigned int offset);
+extern u32 musb_readl(void __iomem *addr, unsigned offset);
 extern void musb_writel(void __iomem *addr, unsigned offset, u32 data);
 
 #endif
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 2d3751d..0aacfc8 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -10,12 +10,7 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include "musb_core.h"
-
-#define MUSB_HSDMA_BASE		0x200
-#define MUSB_HSDMA_INTR		(MUSB_HSDMA_BASE + 0)
-#define MUSB_HSDMA_CONTROL		0x4
-#define MUSB_HSDMA_ADDRESS		0x8
-#define MUSB_HSDMA_COUNT		0xc
+#include "musb_dma.h"
 
 #define MUSB_HSDMA_CHANNEL_OFFSET(_bchannel, _offset)		\
 		(MUSB_HSDMA_BASE + (_bchannel << 4) + _offset)
@@ -268,7 +263,7 @@
 	return 0;
 }
 
-static irqreturn_t dma_controller_irq(int irq, void *private_data)
+irqreturn_t dma_controller_irq(int irq, void *private_data)
 {
 	struct musb_dma_controller *controller = private_data;
 	struct musb *musb = controller->private_data;
@@ -289,7 +284,7 @@
 
 	spin_lock_irqsave(&musb->lock, flags);
 
-	int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
+	int_hsdma = musb_clearb(mbase, MUSB_HSDMA_INTR);
 
 	if (!int_hsdma) {
 		musb_dbg(musb, "spurious DMA irq");
@@ -383,6 +378,7 @@
 	spin_unlock_irqrestore(&musb->lock, flags);
 	return retval;
 }
+EXPORT_SYMBOL_GPL(dma_controller_irq);
 
 void musbhs_dma_controller_destroy(struct dma_controller *c)
 {
@@ -398,18 +394,10 @@
 }
 EXPORT_SYMBOL_GPL(musbhs_dma_controller_destroy);
 
-struct dma_controller *musbhs_dma_controller_create(struct musb *musb,
-						    void __iomem *base)
+static struct musb_dma_controller *
+dma_controller_alloc(struct musb *musb, void __iomem *base)
 {
 	struct musb_dma_controller *controller;
-	struct device *dev = musb->controller;
-	struct platform_device *pdev = to_platform_device(dev);
-	int irq = platform_get_irq_byname(pdev, "dma");
-
-	if (irq <= 0) {
-		dev_err(dev, "No DMA interrupt line!\n");
-		return NULL;
-	}
 
 	controller = kzalloc(sizeof(*controller), GFP_KERNEL);
 	if (!controller)
@@ -423,6 +411,25 @@
 	controller->controller.channel_release = dma_channel_release;
 	controller->controller.channel_program = dma_channel_program;
 	controller->controller.channel_abort = dma_channel_abort;
+	return controller;
+}
+
+struct dma_controller *
+musbhs_dma_controller_create(struct musb *musb, void __iomem *base)
+{
+	struct musb_dma_controller *controller;
+	struct device *dev = musb->controller;
+	struct platform_device *pdev = to_platform_device(dev);
+	int irq = platform_get_irq_byname(pdev, "dma");
+
+	if (irq <= 0) {
+		dev_err(dev, "No DMA interrupt line!\n");
+		return NULL;
+	}
+
+	controller = dma_controller_alloc(musb, base);
+	if (!controller)
+		return NULL;
 
 	if (request_irq(irq, dma_controller_irq, 0,
 			dev_name(musb->controller), controller)) {
@@ -437,3 +444,16 @@
 	return &controller->controller;
 }
 EXPORT_SYMBOL_GPL(musbhs_dma_controller_create);
+
+struct dma_controller *
+musbhs_dma_controller_create_noirq(struct musb *musb, void __iomem *base)
+{
+	struct musb_dma_controller *controller;
+
+	controller = dma_controller_alloc(musb, base);
+	if (!controller)
+		return NULL;
+
+	return &controller->controller;
+}
+EXPORT_SYMBOL_GPL(musbhs_dma_controller_create_noirq);
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index 832a41f..2c1bbaa 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -407,7 +407,7 @@
 	return SUNXI_MUSB_TXFUNCADDR + offset;
 }
 
-static u8 sunxi_musb_readb(const void __iomem *addr, unsigned offset)
+static u8 sunxi_musb_readb(void __iomem *addr, unsigned offset)
 {
 	struct sunxi_glue *glue;
 
@@ -520,7 +520,7 @@
 		(int)(addr - sunxi_musb->mregs));
 }
 
-static u16 sunxi_musb_readw(const void __iomem *addr, unsigned offset)
+static u16 sunxi_musb_readw(void __iomem *addr, unsigned offset)
 {
 	if (addr == sunxi_musb->mregs) {
 		/* generic control or fifo control reg access */
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 3945328..cfb94f9 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -142,7 +142,7 @@
 /*
  * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
  */
-static u8 tusb_readb(const void __iomem *addr, unsigned offset)
+static u8 tusb_readb(void __iomem *addr, unsigned offset)
 {
 	u16 tmp;
 	u8 val;
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index 1dd492e..8c346be 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -8,6 +8,7 @@
  */
 
 #include <linux/usb/role.h>
+#include <linux/property.h>
 #include <linux/device.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
@@ -84,7 +85,12 @@
 }
 EXPORT_SYMBOL_GPL(usb_role_switch_get_role);
 
-static int __switch_match(struct device *dev, const void *name)
+static int switch_fwnode_match(struct device *dev, const void *fwnode)
+{
+	return dev_fwnode(dev) == fwnode;
+}
+
+static int switch_name_match(struct device *dev, const void *name)
 {
 	return !strcmp((const char *)name, dev_name(dev));
 }
@@ -94,9 +100,30 @@
 {
 	struct device *dev;
 
-	dev = class_find_device(role_class, NULL, con->endpoint[ep],
-				__switch_match);
+	if (con->fwnode) {
+		if (con->id && !fwnode_property_present(con->fwnode, con->id))
+			return NULL;
 
+		dev = class_find_device(role_class, NULL, con->fwnode,
+					switch_fwnode_match);
+	} else {
+		dev = class_find_device(role_class, NULL, con->endpoint[ep],
+					switch_name_match);
+	}
+
+	return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
+}
+
+static struct usb_role_switch *
+usb_role_switch_is_parent(struct fwnode_handle *fwnode)
+{
+	struct fwnode_handle *parent = fwnode_get_parent(fwnode);
+	struct device *dev;
+
+	if (!parent || !fwnode_property_present(parent, "usb-role-switch"))
+		return NULL;
+
+	dev = class_find_device_by_fwnode(role_class, parent);
 	return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
 }
 
@@ -111,8 +138,10 @@
 {
 	struct usb_role_switch *sw;
 
-	sw = device_connection_find_match(dev, "usb-role-switch", NULL,
-					  usb_role_switch_match);
+	sw = usb_role_switch_is_parent(dev_fwnode(dev));
+	if (!sw)
+		sw = device_connection_find_match(dev, "usb-role-switch", NULL,
+						  usb_role_switch_match);
 
 	if (!IS_ERR_OR_NULL(sw))
 		WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
@@ -122,6 +151,28 @@
 EXPORT_SYMBOL_GPL(usb_role_switch_get);
 
 /**
+ * fwnode_usb_role_switch_get - Find USB role switch linked with the caller
+ * @fwnode: The caller device node
+ *
+ * This is similar to the usb_role_switch_get() function above, but it searches
+ * the switch using fwnode instead of device entry.
+ */
+struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *fwnode)
+{
+	struct usb_role_switch *sw;
+
+	sw = usb_role_switch_is_parent(fwnode);
+	if (!sw)
+		sw = fwnode_connection_find_match(fwnode, "usb-role-switch",
+						  NULL, usb_role_switch_match);
+	if (!IS_ERR_OR_NULL(sw))
+		WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+
+	return sw;
+}
+EXPORT_SYMBOL_GPL(fwnode_usb_role_switch_get);
+
+/**
  * usb_role_switch_put - Release handle to a switch
  * @sw: USB Role Switch
  *
@@ -266,6 +317,7 @@
 	sw->get = desc->get;
 
 	sw->dev.parent = parent;
+	sw->dev.fwnode = desc->fwnode;
 	sw->dev.class = role_class;
 	sw->dev.type = &usb_role_dev_type;
 	dev_set_name(&sw->dev, "%s-role-switch", dev_name(parent));
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 1916ee1..5aecd52 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -9,6 +9,7 @@
 #include <linux/device.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/property.h>
 #include <linux/slab.h>
 
 #include "bus.h"
@@ -204,15 +205,32 @@
 	put_device(&adev->dev);
 }
 
-static int __typec_port_match(struct device *dev, const void *name)
+static int typec_port_fwnode_match(struct device *dev, const void *fwnode)
+{
+	return dev_fwnode(dev) == fwnode;
+}
+
+static int typec_port_name_match(struct device *dev, const void *name)
 {
 	return !strcmp((const char *)name, dev_name(dev));
 }
 
 static void *typec_port_match(struct device_connection *con, int ep, void *data)
 {
-	return class_find_device(typec_class, NULL, con->endpoint[ep],
-				 __typec_port_match);
+	struct device *dev;
+
+	/*
+	 * FIXME: Check does the fwnode supports the requested SVID. If it does
+	 * we need to return ERR_PTR(-PROBE_DEFER) when there is no device.
+	 */
+	if (con->fwnode)
+		return class_find_device(typec_class, NULL, con->fwnode,
+					 typec_port_fwnode_match);
+
+	dev = class_find_device(typec_class, NULL, con->endpoint[ep],
+				typec_port_name_match);
+
+	return dev ? dev : ERR_PTR(-EPROBE_DEFER);
 }
 
 struct typec_altmode *
@@ -1496,11 +1514,8 @@
 {
 	struct typec_altmode *adev;
 	struct typec_mux *mux;
-	char id[10];
 
-	sprintf(id, "id%04xm%02x", desc->svid, desc->mode);
-
-	mux = typec_mux_get(&port->dev, id);
+	mux = typec_mux_get(&port->dev, desc);
 	if (IS_ERR(mux))
 		return ERR_CAST(mux);
 
@@ -1594,7 +1609,7 @@
 		return ERR_PTR(ret);
 	}
 
-	port->mux = typec_mux_get(&port->dev, "typec-mux");
+	port->mux = typec_mux_get(&port->dev, NULL);
 	if (IS_ERR(port->mux)) {
 		ret = PTR_ERR(port->mux);
 		put_device(&port->dev);
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index d990aa5..a5947d9 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -11,6 +11,8 @@
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/slab.h>
 #include <linux/usb/typec_mux.h>
 
 static DEFINE_MUTEX(switch_lock);
@@ -23,15 +25,25 @@
 {
 	struct typec_switch *sw;
 
-	list_for_each_entry(sw, &switch_list, entry)
-		if (!strcmp(con->endpoint[ep], dev_name(sw->dev)))
-			return sw;
+	if (!con->fwnode) {
+		list_for_each_entry(sw, &switch_list, entry)
+			if (!strcmp(con->endpoint[ep], dev_name(sw->dev)))
+				return sw;
+		return ERR_PTR(-EPROBE_DEFER);
+	}
 
 	/*
-	 * We only get called if a connection was found, tell the caller to
-	 * wait for the switch to show up.
+	 * With OF graph the mux node must have a boolean device property named
+	 * "orientation-switch".
 	 */
-	return ERR_PTR(-EPROBE_DEFER);
+	if (con->id && !fwnode_property_present(con->fwnode, con->id))
+		return NULL;
+
+	list_for_each_entry(sw, &switch_list, entry)
+		if (dev_fwnode(sw->dev) == con->fwnode)
+			return sw;
+
+	return con->id ? ERR_PTR(-EPROBE_DEFER) : NULL;
 }
 
 /**
@@ -48,7 +60,7 @@
 	struct typec_switch *sw;
 
 	mutex_lock(&switch_lock);
-	sw = device_connection_find_match(dev, "typec-switch", NULL,
+	sw = device_connection_find_match(dev, "orientation-switch", NULL,
 					  typec_switch_match);
 	if (!IS_ERR_OR_NULL(sw)) {
 		WARN_ON(!try_module_get(sw->dev->driver->owner));
@@ -112,35 +124,87 @@
 
 static void *typec_mux_match(struct device_connection *con, int ep, void *data)
 {
+	const struct typec_altmode_desc *desc = data;
 	struct typec_mux *mux;
+	size_t nval;
+	bool match;
+	u16 *val;
+	int i;
 
-	list_for_each_entry(mux, &mux_list, entry)
-		if (!strcmp(con->endpoint[ep], dev_name(mux->dev)))
-			return mux;
+	if (!con->fwnode) {
+		list_for_each_entry(mux, &mux_list, entry)
+			if (!strcmp(con->endpoint[ep], dev_name(mux->dev)))
+				return mux;
+		return ERR_PTR(-EPROBE_DEFER);
+	}
 
 	/*
-	 * We only get called if a connection was found, tell the caller to
-	 * wait for the switch to show up.
+	 * Check has the identifier already been "consumed". If it
+	 * has, no need to do any extra connection identification.
 	 */
-	return ERR_PTR(-EPROBE_DEFER);
+	match = !con->id;
+	if (match)
+		goto find_mux;
+
+	/* Accessory Mode muxes */
+	if (!desc) {
+		match = fwnode_property_present(con->fwnode, "accessory");
+		if (match)
+			goto find_mux;
+		return NULL;
+	}
+
+	/* Alternate Mode muxes */
+	nval = fwnode_property_read_u16_array(con->fwnode, "svid", NULL, 0);
+	if (nval <= 0)
+		return NULL;
+
+	val = kcalloc(nval, sizeof(*val), GFP_KERNEL);
+	if (!val)
+		return ERR_PTR(-ENOMEM);
+
+	nval = fwnode_property_read_u16_array(con->fwnode, "svid", val, nval);
+	if (nval < 0) {
+		kfree(val);
+		return ERR_PTR(nval);
+	}
+
+	for (i = 0; i < nval; i++) {
+		match = val[i] == desc->svid;
+		if (match) {
+			kfree(val);
+			goto find_mux;
+		}
+	}
+	kfree(val);
+	return NULL;
+
+find_mux:
+	list_for_each_entry(mux, &mux_list, entry)
+		if (dev_fwnode(mux->dev) == con->fwnode)
+			return mux;
+
+	return match ? ERR_PTR(-EPROBE_DEFER) : NULL;
 }
 
 /**
  * typec_mux_get - Find USB Type-C Multiplexer
  * @dev: The caller device
- * @name: Mux identifier
+ * @desc: Alt Mode description
  *
  * Finds a mux linked to the caller. This function is primarily meant for the
  * Type-C drivers. Returns a reference to the mux on success, NULL if no
  * matching connection was found, or ERR_PTR(-EPROBE_DEFER) when a connection
  * was found but the mux has not been enumerated yet.
  */
-struct typec_mux *typec_mux_get(struct device *dev, const char *name)
+struct typec_mux *typec_mux_get(struct device *dev,
+				const struct typec_altmode_desc *desc)
 {
 	struct typec_mux *mux;
 
 	mutex_lock(&mux_lock);
-	mux = device_connection_find_match(dev, name, NULL, typec_mux_match);
+	mux = device_connection_find_match(dev, "mode-switch", (void *)desc,
+					   typec_mux_match);
 	if (!IS_ERR_OR_NULL(mux)) {
 		WARN_ON(!try_module_get(mux->dev->driver->owner));
 		get_device(mux->dev);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 83d3d27..3a6fb05 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -18,6 +18,8 @@
 
 source "drivers/gpu/drm/Kconfig"
 
+source "drivers/gpu/arm/Kconfig"
+
 menu "Frame buffer Devices"
 source "drivers/video/fbdev/Kconfig"
 endmenu
diff --git a/fs/read_write.c b/fs/read_write.c
index 2195380..6c326b3 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -460,6 +460,8 @@
 	return ret;
 }
 
+EXPORT_SYMBOL(vfs_read);
+
 static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
 {
 	struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
@@ -557,6 +559,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL(vfs_write);
 
 static inline loff_t file_pos_read(struct file *file)
 {
diff --git a/include/dt-bindings/clock/mt8167-clk.h b/include/dt-bindings/clock/mt8167-clk.h
new file mode 100644
index 0000000..041bfdd
--- /dev/null
+++ b/include/dt-bindings/clock/mt8167-clk.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8167_H
+#define _DT_BINDINGS_CLK_MT8167_H
+
+#include <dt-bindings/clock/mt8516-clk.h>
+
+/* APMIXEDSYS */
+
+#undef CLK_APMIXED_NR_CLK
+#define CLK_APMIXED_TVDPLL		6
+#define CLK_APMIXED_LVDSPLL		7
+#define CLK_APMIXED_HDMI_REF	8
+#define CLK_APMIXED_NR_CLK		9
+
+/* TOPCKGEN */
+
+#undef CLK_TOP_NR_CLK
+#define CLK_TOP_DSI0_LNTC_DSICK		177
+#define CLK_TOP_VPLL_DPIX		178
+#define CLK_TOP_LVDSTX_CLKDIG_CTS	179
+#define CLK_TOP_HDMTX_CLKDIG_CTS	180
+#define CLK_TOP_LVDSPLL			181
+#define CLK_TOP_LVDSPLL_D2		182
+#define CLK_TOP_LVDSPLL_D4		183
+#define CLK_TOP_LVDSPLL_D8		184
+#define CLK_TOP_MIPI_26M		185
+#define CLK_TOP_TVDPLL			186
+#define CLK_TOP_TVDPLL_D2		187
+#define CLK_TOP_TVDPLL_D4		188
+#define CLK_TOP_TVDPLL_D8		189
+#define CLK_TOP_TVDPLL_D16		190
+#define CLK_TOP_PWM_MM			191
+#define CLK_TOP_CAM_MM			192
+#define CLK_TOP_MFG_MM			193
+#define CLK_TOP_SPM_52M			194
+#define CLK_TOP_MIPI_26M_DBG		195
+#define CLK_TOP_SCAM_MM			196
+#define CLK_TOP_SMI_MM			197
+//#define CLK_TOP_GCE			198
+#define CLK_TOP_26M_HDMI_SIFM		199
+#define CLK_TOP_26M_CEC			200
+#define CLK_TOP_32K_CEC			201
+#define CLK_TOP_GCPU_B			202
+#define CLK_TOP_RG_VDEC			203
+#define CLK_TOP_RG_FDPI0		204
+#define CLK_TOP_RG_FDPI1		205
+#define CLK_TOP_RG_AXI_MFG		206
+#define CLK_TOP_RG_SLOW_MFG		207
+#define CLK_TOP_GFMUX_EMI1X_SEL		209
+#define CLK_TOP_CSW_MUX_MFG_SEL		212
+#define CLK_TOP_CAMTG_MM_SEL		214
+#define CLK_TOP_PWM_MM_SEL		215
+#define CLK_TOP_SPM_52M_SEL		216
+#define CLK_TOP_MFG_MM_SEL		217
+#define CLK_TOP_SMI_MM_SEL		218
+#define CLK_TOP_SCAM_MM_SEL		219
+#define CLK_TOP_VDEC_MM_SEL		220
+#define CLK_TOP_DPI0_MM_SEL		221
+#define CLK_TOP_DPI1_MM_SEL		222
+#define CLK_TOP_AXI_MFG_IN_SEL		223
+#define CLK_TOP_SLOW_MFG_SEL		224
+#define CLK_TOP_NR_CLK			225
+
+/* MFGCFG */
+
+#define CLK_MFG_BAXI			0
+#define CLK_MFG_BMEM			1
+#define CLK_MFG_BG3D			2
+#define CLK_MFG_B26M			3
+#define CLK_MFG_NR_CLK			4
+
+/* MMSYS */
+
+#define CLK_MM_SMI_COMMON		0
+#define CLK_MM_SMI_LARB0		1
+#define CLK_MM_CAM_MDP			2
+#define CLK_MM_MDP_RDMA			3
+#define CLK_MM_MDP_RSZ0			4
+#define CLK_MM_MDP_RSZ1			5
+#define CLK_MM_MDP_TDSHP		6
+#define CLK_MM_MDP_WDMA			7
+#define CLK_MM_MDP_WROT			8
+#define CLK_MM_FAKE_ENG			9
+#define CLK_MM_DISP_OVL0		10
+#define CLK_MM_DISP_RDMA0		11
+#define CLK_MM_DISP_RDMA1		12
+#define CLK_MM_DISP_WDMA		13
+#define CLK_MM_DISP_COLOR		14
+#define CLK_MM_DISP_CCORR		15
+#define CLK_MM_DISP_AAL			16
+#define CLK_MM_DISP_GAMMA		17
+#define CLK_MM_DISP_DITHER		18
+#define CLK_MM_DISP_UFOE		19
+#define CLK_MM_DISP_PWM_MM		20
+#define CLK_MM_DISP_PWM_26M		21
+#define CLK_MM_DSI_ENGINE		22
+#define CLK_MM_DSI_DIGITAL		23
+#define CLK_MM_DPI0_ENGINE		24
+#define CLK_MM_DPI0_PXL			25
+#define CLK_MM_LVDS_PXL			26
+#define CLK_MM_LVDS_CTS			27
+#define CLK_MM_DPI1_ENGINE		28
+#define CLK_MM_DPI1_PXL			29
+#define CLK_MM_HDMI_PXL			30
+#define CLK_MM_HDMI_SPDIF		31
+#define CLK_MM_HDMI_ADSP_BCK		32
+#define CLK_MM_HDMI_PLL			33
+#define CLK_MM_NR_CLK			34
+
+/* IMGSYS */
+
+#define CLK_IMG_LARB1_SMI		0
+#define CLK_IMG_CAM_SMI			1
+#define CLK_IMG_CAM_CAM			2
+#define CLK_IMG_SEN_TG			3
+#define CLK_IMG_SEN_CAM			4
+#define CLK_IMG_VENC			5
+#define CLK_IMG_NR_CLK			6
+
+/* VDECSYS */
+
+#define CLK_VDEC_CKEN			0
+#define CLK_VDEC_LARB1_CKEN		1
+#define CLK_VDEC_NR_CLK			2
+
+#endif /* _DT_BINDINGS_CLK_MT8167_H */
diff --git a/include/dt-bindings/clock/mt8183-clk.h b/include/dt-bindings/clock/mt8183-clk.h
new file mode 100644
index 0000000..0046506
--- /dev/null
+++ b/include/dt-bindings/clock/mt8183-clk.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8183_H
+#define _DT_BINDINGS_CLK_MT8183_H
+
+/* APMIXED */
+#define CLK_APMIXED_ARMPLL_LL		0
+#define CLK_APMIXED_ARMPLL_L		1
+#define CLK_APMIXED_CCIPLL		2
+#define CLK_APMIXED_MAINPLL		3
+#define CLK_APMIXED_UNIV2PLL		4
+#define CLK_APMIXED_MSDCPLL		5
+#define CLK_APMIXED_MMPLL		6
+#define CLK_APMIXED_MFGPLL		7
+#define CLK_APMIXED_TVDPLL		8
+#define CLK_APMIXED_APLL1		9
+#define CLK_APMIXED_APLL2		10
+#define CLK_APMIXED_SSUSB_26M		11
+#define CLK_APMIXED_APPLL_26M		12
+#define CLK_APMIXED_MIPIC0_26M		13
+#define CLK_APMIXED_MDPLLGP_26M		14
+#define CLK_APMIXED_MMSYS_26M		15
+#define CLK_APMIXED_UFS_26M		16
+#define CLK_APMIXED_MIPIC1_26M		17
+#define CLK_APMIXED_MEMPLL_26M		18
+#define CLK_APMIXED_CLKSQ_LVPLL_26M	19
+#define CLK_APMIXED_MIPID0_26M		20
+#define CLK_APMIXED_MIPID1_26M		21
+#define CLK_APMIXED_NR_CLK		22
+
+/* TOPCKGEN */
+#define CLK_TOP_MUX_AXI			0
+#define CLK_TOP_MUX_MM			1
+#define CLK_TOP_MUX_CAM			2
+#define CLK_TOP_MUX_MFG			3
+#define CLK_TOP_MUX_CAMTG		4
+#define CLK_TOP_MUX_UART		5
+#define CLK_TOP_MUX_SPI			6
+#define CLK_TOP_MUX_MSDC50_0_HCLK	7
+#define CLK_TOP_MUX_MSDC50_0		8
+#define CLK_TOP_MUX_MSDC30_1		9
+#define CLK_TOP_MUX_MSDC30_2		10
+#define CLK_TOP_MUX_AUDIO		11
+#define CLK_TOP_MUX_AUD_INTBUS		12
+#define CLK_TOP_MUX_FPWRAP_ULPOSC	13
+#define CLK_TOP_MUX_SCP			14
+#define CLK_TOP_MUX_ATB			15
+#define CLK_TOP_MUX_SSPM		16
+#define CLK_TOP_MUX_DPI0		17
+#define CLK_TOP_MUX_SCAM		18
+#define CLK_TOP_MUX_AUD_1		19
+#define CLK_TOP_MUX_AUD_2		20
+#define CLK_TOP_MUX_DISP_PWM		21
+#define CLK_TOP_MUX_SSUSB_TOP_XHCI	22
+#define CLK_TOP_MUX_USB_TOP		23
+#define CLK_TOP_MUX_SPM			24
+#define CLK_TOP_MUX_I2C			25
+#define CLK_TOP_MUX_F52M_MFG		26
+#define CLK_TOP_MUX_SENINF		27
+#define CLK_TOP_MUX_DXCC		28
+#define CLK_TOP_MUX_CAMTG2		29
+#define CLK_TOP_MUX_AUD_ENG1		30
+#define CLK_TOP_MUX_AUD_ENG2		31
+#define CLK_TOP_MUX_FAES_UFSFDE		32
+#define CLK_TOP_MUX_FUFS		33
+#define CLK_TOP_MUX_IMG			34
+#define CLK_TOP_MUX_DSP			35
+#define CLK_TOP_MUX_DSP1		36
+#define CLK_TOP_MUX_DSP2		37
+#define CLK_TOP_MUX_IPU_IF		38
+#define CLK_TOP_MUX_CAMTG3		39
+#define CLK_TOP_MUX_CAMTG4		40
+#define CLK_TOP_MUX_PMICSPI		41
+#define CLK_TOP_SYSPLL_CK		42
+#define CLK_TOP_SYSPLL_D2		43
+#define CLK_TOP_SYSPLL_D3		44
+#define CLK_TOP_SYSPLL_D5		45
+#define CLK_TOP_SYSPLL_D7		46
+#define CLK_TOP_SYSPLL_D2_D2		47
+#define CLK_TOP_SYSPLL_D2_D4		48
+#define CLK_TOP_SYSPLL_D2_D8		49
+#define CLK_TOP_SYSPLL_D2_D16		50
+#define CLK_TOP_SYSPLL_D3_D2		51
+#define CLK_TOP_SYSPLL_D3_D4		52
+#define CLK_TOP_SYSPLL_D3_D8		53
+#define CLK_TOP_SYSPLL_D5_D2		54
+#define CLK_TOP_SYSPLL_D5_D4		55
+#define CLK_TOP_SYSPLL_D7_D2		56
+#define CLK_TOP_SYSPLL_D7_D4		57
+#define CLK_TOP_UNIVPLL_CK		58
+#define CLK_TOP_UNIVPLL_D2		59
+#define CLK_TOP_UNIVPLL_D3		60
+#define CLK_TOP_UNIVPLL_D5		61
+#define CLK_TOP_UNIVPLL_D7		62
+#define CLK_TOP_UNIVPLL_D2_D2		63
+#define CLK_TOP_UNIVPLL_D2_D4		64
+#define CLK_TOP_UNIVPLL_D2_D8		65
+#define CLK_TOP_UNIVPLL_D3_D2		66
+#define CLK_TOP_UNIVPLL_D3_D4		67
+#define CLK_TOP_UNIVPLL_D3_D8		68
+#define CLK_TOP_UNIVPLL_D5_D2		69
+#define CLK_TOP_UNIVPLL_D5_D4		70
+#define CLK_TOP_UNIVPLL_D5_D8		71
+#define CLK_TOP_APLL1_CK		72
+#define CLK_TOP_APLL1_D2		73
+#define CLK_TOP_APLL1_D4		74
+#define CLK_TOP_APLL1_D8		75
+#define CLK_TOP_APLL2_CK		76
+#define CLK_TOP_APLL2_D2		77
+#define CLK_TOP_APLL2_D4		78
+#define CLK_TOP_APLL2_D8		79
+#define CLK_TOP_TVDPLL_CK		80
+#define CLK_TOP_TVDPLL_D2		81
+#define CLK_TOP_TVDPLL_D4		82
+#define CLK_TOP_TVDPLL_D8		83
+#define CLK_TOP_TVDPLL_D16		84
+#define CLK_TOP_MSDCPLL_CK		85
+#define CLK_TOP_MSDCPLL_D2		86
+#define CLK_TOP_MSDCPLL_D4		87
+#define CLK_TOP_MSDCPLL_D8		88
+#define CLK_TOP_MSDCPLL_D16		89
+#define CLK_TOP_AD_OSC_CK		90
+#define CLK_TOP_OSC_D2			91
+#define CLK_TOP_OSC_D4			92
+#define CLK_TOP_OSC_D8			93
+#define CLK_TOP_OSC_D16			94
+#define CLK_TOP_F26M_CK_D2		95
+#define CLK_TOP_MFGPLL_CK		96
+#define CLK_TOP_UNIVP_192M_CK		97
+#define CLK_TOP_UNIVP_192M_D2		98
+#define CLK_TOP_UNIVP_192M_D4		99
+#define CLK_TOP_UNIVP_192M_D8		100
+#define CLK_TOP_UNIVP_192M_D16		101
+#define CLK_TOP_UNIVP_192M_D32		102
+#define CLK_TOP_MMPLL_CK		103
+#define CLK_TOP_MMPLL_D4		104
+#define CLK_TOP_MMPLL_D4_D2		105
+#define CLK_TOP_MMPLL_D4_D4		106
+#define CLK_TOP_MMPLL_D5		107
+#define CLK_TOP_MMPLL_D5_D2		108
+#define CLK_TOP_MMPLL_D5_D4		109
+#define CLK_TOP_MMPLL_D6		110
+#define CLK_TOP_MMPLL_D7		111
+#define CLK_TOP_CLK26M			112
+#define CLK_TOP_CLK13M			113
+#define CLK_TOP_ULPOSC			114
+#define CLK_TOP_UNIVP_192M		115
+#define CLK_TOP_MUX_APLL_I2S0		116
+#define CLK_TOP_MUX_APLL_I2S1		117
+#define CLK_TOP_MUX_APLL_I2S2		118
+#define CLK_TOP_MUX_APLL_I2S3		119
+#define CLK_TOP_MUX_APLL_I2S4		120
+#define CLK_TOP_MUX_APLL_I2S5		121
+#define CLK_TOP_APLL12_DIV0		122
+#define CLK_TOP_APLL12_DIV1		123
+#define CLK_TOP_APLL12_DIV2		124
+#define CLK_TOP_APLL12_DIV3		125
+#define CLK_TOP_APLL12_DIV4		126
+#define CLK_TOP_APLL12_DIVB		127
+#define CLK_TOP_UNIVPLL			128
+#define CLK_TOP_ARMPLL_DIV_PLL1		129
+#define CLK_TOP_ARMPLL_DIV_PLL2		130
+#define CLK_TOP_UNIVPLL_D3_D16		131
+#define CLK_TOP_NR_CLK			132
+
+/* CAMSYS */
+#define CLK_CAM_LARB6			0
+#define CLK_CAM_DFP_VAD			1
+#define CLK_CAM_CAM			2
+#define CLK_CAM_CAMTG			3
+#define CLK_CAM_SENINF			4
+#define CLK_CAM_CAMSV0			5
+#define CLK_CAM_CAMSV1			6
+#define CLK_CAM_CAMSV2			7
+#define CLK_CAM_CCU			8
+#define CLK_CAM_LARB3			9
+#define CLK_CAM_NR_CLK			10
+
+/* INFRACFG_AO */
+#define CLK_INFRA_PMIC_TMR		0
+#define CLK_INFRA_PMIC_AP		1
+#define CLK_INFRA_PMIC_MD		2
+#define CLK_INFRA_PMIC_CONN		3
+#define CLK_INFRA_SCPSYS		4
+#define CLK_INFRA_SEJ			5
+#define CLK_INFRA_APXGPT		6
+#define CLK_INFRA_ICUSB			7
+#define CLK_INFRA_GCE			8
+#define CLK_INFRA_THERM			9
+#define CLK_INFRA_I2C0			10
+#define CLK_INFRA_I2C1			11
+#define CLK_INFRA_I2C2			12
+#define CLK_INFRA_I2C3			13
+#define CLK_INFRA_PWM_HCLK		14
+#define CLK_INFRA_PWM1			15
+#define CLK_INFRA_PWM2			16
+#define CLK_INFRA_PWM3			17
+#define CLK_INFRA_PWM4			18
+#define CLK_INFRA_PWM			19
+#define CLK_INFRA_UART0			20
+#define CLK_INFRA_UART1			21
+#define CLK_INFRA_UART2			22
+#define CLK_INFRA_UART3			23
+#define CLK_INFRA_GCE_26M		24
+#define CLK_INFRA_CQ_DMA_FPC		25
+#define CLK_INFRA_BTIF			26
+#define CLK_INFRA_SPI0			27
+#define CLK_INFRA_MSDC0			28
+#define CLK_INFRA_MSDC1			29
+#define CLK_INFRA_MSDC2			30
+#define CLK_INFRA_MSDC0_SCK		31
+#define CLK_INFRA_DVFSRC		32
+#define CLK_INFRA_GCPU			33
+#define CLK_INFRA_TRNG			34
+#define CLK_INFRA_AUXADC		35
+#define CLK_INFRA_CPUM			36
+#define CLK_INFRA_CCIF1_AP		37
+#define CLK_INFRA_CCIF1_MD		38
+#define CLK_INFRA_AUXADC_MD		39
+#define CLK_INFRA_MSDC1_SCK		40
+#define CLK_INFRA_MSDC2_SCK		41
+#define CLK_INFRA_AP_DMA		42
+#define CLK_INFRA_XIU			43
+#define CLK_INFRA_DEVICE_APC		44
+#define CLK_INFRA_CCIF_AP		45
+#define CLK_INFRA_DEBUGSYS		46
+#define CLK_INFRA_AUDIO			47
+#define CLK_INFRA_CCIF_MD		48
+#define CLK_INFRA_DXCC_SEC_CORE		49
+#define CLK_INFRA_DXCC_AO		50
+#define CLK_INFRA_DRAMC_F26M		51
+#define CLK_INFRA_IRTX			52
+#define CLK_INFRA_DISP_PWM		53
+#define CLK_INFRA_CLDMA_BCLK		54
+#define CLK_INFRA_AUDIO_26M_BCLK	55
+#define CLK_INFRA_SPI1			56
+#define CLK_INFRA_I2C4			57
+#define CLK_INFRA_MODEM_TEMP_SHARE	58
+#define CLK_INFRA_SPI2			59
+#define CLK_INFRA_SPI3			60
+#define CLK_INFRA_UNIPRO_SCK		61
+#define CLK_INFRA_UNIPRO_TICK		62
+#define CLK_INFRA_UFS_MP_SAP_BCLK	63
+#define CLK_INFRA_MD32_BCLK		64
+#define CLK_INFRA_SSPM			65
+#define CLK_INFRA_UNIPRO_MBIST		66
+#define CLK_INFRA_SSPM_BUS_HCLK		67
+#define CLK_INFRA_I2C5			68
+#define CLK_INFRA_I2C5_ARBITER		69
+#define CLK_INFRA_I2C5_IMM		70
+#define CLK_INFRA_I2C1_ARBITER		71
+#define CLK_INFRA_I2C1_IMM		72
+#define CLK_INFRA_I2C2_ARBITER		73
+#define CLK_INFRA_I2C2_IMM		74
+#define CLK_INFRA_SPI4			75
+#define CLK_INFRA_SPI5			76
+#define CLK_INFRA_CQ_DMA		77
+#define CLK_INFRA_UFS			78
+#define CLK_INFRA_AES_UFSFDE		79
+#define CLK_INFRA_UFS_TICK		80
+#define CLK_INFRA_MSDC0_SELF		81
+#define CLK_INFRA_MSDC1_SELF		82
+#define CLK_INFRA_MSDC2_SELF		83
+#define CLK_INFRA_SSPM_26M_SELF		84
+#define CLK_INFRA_SSPM_32K_SELF		85
+#define CLK_INFRA_UFS_AXI		86
+#define CLK_INFRA_I2C6			87
+#define CLK_INFRA_AP_MSDC0		88
+#define CLK_INFRA_MD_MSDC0		89
+#define CLK_INFRA_USB			90
+#define CLK_INFRA_DEVMPU_BCLK		91
+#define CLK_INFRA_CCIF2_AP		92
+#define CLK_INFRA_CCIF2_MD		93
+#define CLK_INFRA_CCIF3_AP		94
+#define CLK_INFRA_CCIF3_MD		95
+#define CLK_INFRA_SEJ_F13M		96
+#define CLK_INFRA_AES_BCLK		97
+#define CLK_INFRA_I2C7			98
+#define CLK_INFRA_I2C8			99
+#define CLK_INFRA_FBIST2FPC		100
+#define CLK_INFRA_NR_CLK		101
+
+/* MFGCFG */
+#define CLK_MFG_BG3D			0
+#define CLK_MFG_NR_CLK			1
+
+/* IMG */
+#define CLK_IMG_OWE			0
+#define CLK_IMG_WPE_B			1
+#define CLK_IMG_WPE_A			2
+#define CLK_IMG_MFB			3
+#define CLK_IMG_RSC			4
+#define CLK_IMG_DPE			5
+#define CLK_IMG_FDVT			6
+#define CLK_IMG_DIP			7
+#define CLK_IMG_LARB2			8
+#define CLK_IMG_LARB5			9
+#define CLK_IMG_NR_CLK			10
+
+/* MMSYS_CONFIG */
+#define CLK_MM_SMI_COMMON		0
+#define CLK_MM_SMI_LARB0		1
+#define CLK_MM_SMI_LARB1		2
+#define CLK_MM_GALS_COMM0		3
+#define CLK_MM_GALS_COMM1		4
+#define CLK_MM_GALS_CCU2MM		5
+#define CLK_MM_GALS_IPU12MM		6
+#define CLK_MM_GALS_IMG2MM		7
+#define CLK_MM_GALS_CAM2MM		8
+#define CLK_MM_GALS_IPU2MM		9
+#define CLK_MM_MDP_DL_TXCK		10
+#define CLK_MM_IPU_DL_TXCK		11
+#define CLK_MM_MDP_RDMA0		12
+#define CLK_MM_MDP_RDMA1		13
+#define CLK_MM_MDP_RSZ0			14
+#define CLK_MM_MDP_RSZ1			15
+#define CLK_MM_MDP_TDSHP		16
+#define CLK_MM_MDP_WROT0		17
+#define CLK_MM_FAKE_ENG			18
+#define CLK_MM_DISP_OVL0		19
+#define CLK_MM_DISP_OVL0_2L		20
+#define CLK_MM_DISP_OVL1_2L		21
+#define CLK_MM_DISP_RDMA0		22
+#define CLK_MM_DISP_RDMA1		23
+#define CLK_MM_DISP_WDMA0		24
+#define CLK_MM_DISP_COLOR0		25
+#define CLK_MM_DISP_CCORR0		26
+#define CLK_MM_DISP_AAL0		27
+#define CLK_MM_DISP_GAMMA0		28
+#define CLK_MM_DISP_DITHER0		29
+#define CLK_MM_DISP_SPLIT		30
+#define CLK_MM_DSI0_MM			31
+#define CLK_MM_DSI0_IF			32
+#define CLK_MM_DPI_MM			33
+#define CLK_MM_DPI_IF			34
+#define CLK_MM_FAKE_ENG2		35
+#define CLK_MM_MDP_DL_RX		36
+#define CLK_MM_IPU_DL_RX		37
+#define CLK_MM_26M			38
+#define CLK_MM_MMSYS_R2Y		39
+#define CLK_MM_DISP_RSZ			40
+#define CLK_MM_MDP_WDMA0		41
+#define CLK_MM_MDP_AAL			42
+#define CLK_MM_MDP_CCORR		43
+#define CLK_MM_DBI_MM			44
+#define CLK_MM_DBI_IF			45
+#define CLK_MM_NR_CLK			46
+
+/* VDEC_GCON */
+#define CLK_VDEC_VDEC			0
+#define CLK_VDEC_LARB1			1
+#define CLK_VDEC_NR_CLK			2
+
+/* VENC_GCON */
+#define CLK_VENC_LARB			0
+#define CLK_VENC_VENC			1
+#define CLK_VENC_JPGENC			2
+#define CLK_VENC_NR_CLK			3
+
+/* AUDIO */
+#define CLK_AUDIO_TML			0
+#define CLK_AUDIO_DAC_PREDIS		1
+#define CLK_AUDIO_DAC			2
+#define CLK_AUDIO_ADC			3
+#define CLK_AUDIO_APLL_TUNER		4
+#define CLK_AUDIO_APLL2_TUNER		5
+#define CLK_AUDIO_24M			6
+#define CLK_AUDIO_22M			7
+#define CLK_AUDIO_AFE			8
+#define CLK_AUDIO_I2S4			9
+#define CLK_AUDIO_I2S3			10
+#define CLK_AUDIO_I2S2			11
+#define CLK_AUDIO_I2S1			12
+#define CLK_AUDIO_PDN_ADDA6_ADC		13
+#define CLK_AUDIO_TDM			14
+#define CLK_AUDIO_NR_CLK		15
+
+/* IPU_CONN */
+#define CLK_IPU_CONN_IPU		0
+#define CLK_IPU_CONN_AHB		1
+#define CLK_IPU_CONN_AXI		2
+#define CLK_IPU_CONN_ISP		3
+#define CLK_IPU_CONN_CAM_ADL		4
+#define CLK_IPU_CONN_IMG_ADL		5
+#define CLK_IPU_CONN_DAP_RX		6
+#define CLK_IPU_CONN_APB2AXI		7
+#define CLK_IPU_CONN_APB2AHB		8
+#define CLK_IPU_CONN_IPU_CAB1TO2	9
+#define CLK_IPU_CONN_IPU1_CAB1TO2	10
+#define CLK_IPU_CONN_IPU2_CAB1TO2	11
+#define CLK_IPU_CONN_CAB3TO3		12
+#define CLK_IPU_CONN_CAB2TO1		13
+#define CLK_IPU_CONN_CAB3TO1_SLICE	14
+#define CLK_IPU_CONN_NR_CLK		15
+
+/* IPU_ADL */
+#define CLK_IPU_ADL_CABGEN		0
+#define CLK_IPU_ADL_NR_CLK		1
+
+/* IPU_CORE0 */
+#define CLK_IPU_CORE0_JTAG		0
+#define CLK_IPU_CORE0_AXI		1
+#define CLK_IPU_CORE0_IPU		2
+#define CLK_IPU_CORE0_NR_CLK		3
+
+/* IPU_CORE1 */
+#define CLK_IPU_CORE1_JTAG		0
+#define CLK_IPU_CORE1_AXI		1
+#define CLK_IPU_CORE1_IPU		2
+#define CLK_IPU_CORE1_NR_CLK		3
+
+/* MCUCFG */
+#define CLK_MCU_MP0_SEL			0
+#define CLK_MCU_MP2_SEL			1
+#define CLK_MCU_BUS_SEL			2
+#define CLK_MCU_NR_CLK			3
+
+#endif /* _DT_BINDINGS_CLK_MT8183_H */
diff --git a/include/dt-bindings/clock/mt8516-clk.h b/include/dt-bindings/clock/mt8516-clk.h
new file mode 100644
index 0000000..816447b
--- /dev/null
+++ b/include/dt-bindings/clock/mt8516-clk.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8516_H
+#define _DT_BINDINGS_CLK_MT8516_H
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_ARMPLL		0
+#define CLK_APMIXED_MAINPLL		1
+#define CLK_APMIXED_UNIVPLL		2
+#define CLK_APMIXED_MMPLL		3
+#define CLK_APMIXED_APLL1		4
+#define CLK_APMIXED_APLL2		5
+#define CLK_APMIXED_NR_CLK		6
+
+/* INFRACFG */
+
+#define CLK_IFR_MUX1_SEL		0
+#define CLK_IFR_ETH_25M_SEL		1
+#define CLK_IFR_I2C0_SEL		2
+#define CLK_IFR_I2C1_SEL		3
+#define CLK_IFR_I2C2_SEL		4
+#define CLK_IFR_NR_CLK			5
+
+/* TOPCKGEN */
+
+#define CLK_TOP_CLK_NULL		0
+#define CLK_TOP_I2S_INFRA_BCK		1
+#define CLK_TOP_MEMPLL			2
+#define CLK_TOP_DMPLL			3
+#define CLK_TOP_MAINPLL_D2		4
+#define CLK_TOP_MAINPLL_D4		5
+#define CLK_TOP_MAINPLL_D8		6
+#define CLK_TOP_MAINPLL_D16		7
+#define CLK_TOP_MAINPLL_D11		8
+#define CLK_TOP_MAINPLL_D22		9
+#define CLK_TOP_MAINPLL_D3		10
+#define CLK_TOP_MAINPLL_D6		11
+#define CLK_TOP_MAINPLL_D12		12
+#define CLK_TOP_MAINPLL_D5		13
+#define CLK_TOP_MAINPLL_D10		14
+#define CLK_TOP_MAINPLL_D20		15
+#define CLK_TOP_MAINPLL_D40		16
+#define CLK_TOP_MAINPLL_D7		17
+#define CLK_TOP_MAINPLL_D14		18
+#define CLK_TOP_UNIVPLL_D2		19
+#define CLK_TOP_UNIVPLL_D4		20
+#define CLK_TOP_UNIVPLL_D8		21
+#define CLK_TOP_UNIVPLL_D16		22
+#define CLK_TOP_UNIVPLL_D3		23
+#define CLK_TOP_UNIVPLL_D6		24
+#define CLK_TOP_UNIVPLL_D12		25
+#define CLK_TOP_UNIVPLL_D24		26
+#define CLK_TOP_UNIVPLL_D5		27
+#define CLK_TOP_UNIVPLL_D20		28
+#define CLK_TOP_MMPLL380M		29
+#define CLK_TOP_MMPLL_D2		30
+#define CLK_TOP_MMPLL_200M		31
+#define CLK_TOP_USB_PHY48M		32
+#define CLK_TOP_APLL1			33
+#define CLK_TOP_APLL1_D2		34
+#define CLK_TOP_APLL1_D4		35
+#define CLK_TOP_APLL1_D8		36
+#define CLK_TOP_APLL2			37
+#define CLK_TOP_APLL2_D2		38
+#define CLK_TOP_APLL2_D4		39
+#define CLK_TOP_APLL2_D8		40
+#define CLK_TOP_CLK26M			41
+#define CLK_TOP_CLK26M_D2		42
+#define CLK_TOP_AHB_INFRA_D2		43
+#define CLK_TOP_NFI1X			44
+#define CLK_TOP_ETH_D2			45
+#define CLK_TOP_THEM			46
+#define CLK_TOP_APDMA			47
+#define CLK_TOP_I2C0			48
+#define CLK_TOP_I2C1			49
+#define CLK_TOP_AUXADC1			50
+#define CLK_TOP_NFI			51
+#define CLK_TOP_NFIECC			52
+#define CLK_TOP_DEBUGSYS		53
+#define CLK_TOP_PWM			54
+#define CLK_TOP_UART0			55
+#define CLK_TOP_UART1			56
+#define CLK_TOP_BTIF			57
+#define CLK_TOP_USB			58
+#define CLK_TOP_FLASHIF_26M		59
+#define CLK_TOP_AUXADC2			60
+#define CLK_TOP_I2C2			61
+#define CLK_TOP_MSDC0			62
+#define CLK_TOP_MSDC1			63
+#define CLK_TOP_NFI2X			64
+#define CLK_TOP_PMICWRAP_AP		65
+#define CLK_TOP_SEJ			66
+#define CLK_TOP_MEMSLP_DLYER		67
+#define CLK_TOP_SPI			68
+#define CLK_TOP_APXGPT			69
+#define CLK_TOP_AUDIO			70
+#define CLK_TOP_PMICWRAP_MD		71
+#define CLK_TOP_PMICWRAP_CONN		72
+#define CLK_TOP_PMICWRAP_26M		73
+#define CLK_TOP_AUX_ADC			74
+#define CLK_TOP_AUX_TP			75
+#define CLK_TOP_MSDC2			76
+#define CLK_TOP_RBIST			77
+#define CLK_TOP_NFI_BUS			78
+#define CLK_TOP_GCE			79
+#define CLK_TOP_TRNG			80
+#define CLK_TOP_SEJ_13M			81
+#define CLK_TOP_AES			82
+#define CLK_TOP_PWM_B			83
+#define CLK_TOP_PWM1_FB			84
+#define CLK_TOP_PWM2_FB			85
+#define CLK_TOP_PWM3_FB			86
+#define CLK_TOP_PWM4_FB			87
+#define CLK_TOP_PWM5_FB			88
+#define CLK_TOP_USB_1P			89
+#define CLK_TOP_FLASHIF_FREERUN		90
+#define CLK_TOP_66M_ETH			91
+#define CLK_TOP_133M_ETH		92
+#define CLK_TOP_FETH_25M		93
+#define CLK_TOP_FETH_50M		94
+#define CLK_TOP_FLASHIF_AXI		95
+#define CLK_TOP_USBIF			96
+#define CLK_TOP_UART2			97
+#define CLK_TOP_BSI			98
+#define CLK_TOP_RG_SPINOR		99
+#define CLK_TOP_RG_MSDC2		100
+#define CLK_TOP_RG_ETH			101
+#define CLK_TOP_RG_AUD1			102
+#define CLK_TOP_RG_AUD2			103
+#define CLK_TOP_RG_AUD_ENGEN1		104
+#define CLK_TOP_RG_AUD_ENGEN2		105
+#define CLK_TOP_RG_I2C			106
+#define CLK_TOP_RG_PWM_INFRA		107
+#define CLK_TOP_RG_AUD_SPDIF_IN		108
+#define CLK_TOP_RG_UART2		109
+#define CLK_TOP_RG_BSI			110
+#define CLK_TOP_RG_DBG_ATCLK		111
+#define CLK_TOP_RG_NFIECC		112
+#define CLK_TOP_RG_APLL1_D2_EN		113
+#define CLK_TOP_RG_APLL1_D4_EN		114
+#define CLK_TOP_RG_APLL1_D8_EN		115
+#define CLK_TOP_RG_APLL2_D2_EN		116
+#define CLK_TOP_RG_APLL2_D4_EN		117
+#define CLK_TOP_RG_APLL2_D8_EN		118
+#define CLK_TOP_APLL12_DIV0		119
+#define CLK_TOP_APLL12_DIV1		120
+#define CLK_TOP_APLL12_DIV2		121
+#define CLK_TOP_APLL12_DIV3		122
+#define CLK_TOP_APLL12_DIV4		123
+#define CLK_TOP_APLL12_DIV4B		124
+#define CLK_TOP_APLL12_DIV5		125
+#define CLK_TOP_APLL12_DIV5B		126
+#define CLK_TOP_APLL12_DIV6		127
+#define CLK_TOP_UART0_SEL		128
+#define CLK_TOP_EMI_DDRPHY_SEL		129
+#define CLK_TOP_AHB_INFRA_SEL		130
+#define CLK_TOP_MSDC0_SEL		131
+#define CLK_TOP_UART1_SEL		132
+#define CLK_TOP_MSDC1_SEL		133
+#define CLK_TOP_PMICSPI_SEL		134
+#define CLK_TOP_QAXI_AUD26M_SEL		135
+#define CLK_TOP_AUD_INTBUS_SEL		136
+#define CLK_TOP_NFI2X_PAD_SEL		137
+#define CLK_TOP_NFI1X_PAD_SEL		138
+#define CLK_TOP_DDRPHYCFG_SEL		139
+#define CLK_TOP_USB_78M_SEL		140
+#define CLK_TOP_SPINOR_SEL		141
+#define CLK_TOP_MSDC2_SEL		142
+#define CLK_TOP_ETH_SEL			143
+#define CLK_TOP_AUD1_SEL		144
+#define CLK_TOP_AUD2_SEL		145
+#define CLK_TOP_AUD_ENGEN1_SEL		146
+#define CLK_TOP_AUD_ENGEN2_SEL		147
+#define CLK_TOP_I2C_SEL			148
+#define CLK_TOP_AUD_I2S0_M_SEL		149
+#define CLK_TOP_AUD_I2S1_M_SEL		150
+#define CLK_TOP_AUD_I2S2_M_SEL		151
+#define CLK_TOP_AUD_I2S3_M_SEL		152
+#define CLK_TOP_AUD_I2S4_M_SEL		153
+#define CLK_TOP_AUD_I2S5_M_SEL		154
+#define CLK_TOP_AUD_SPDIF_B_SEL		155
+#define CLK_TOP_PWM_SEL			156
+#define CLK_TOP_SPI_SEL			157
+#define CLK_TOP_AUD_SPDIFIN_SEL		158
+#define CLK_TOP_UART2_SEL		159
+#define CLK_TOP_BSI_SEL			160
+#define CLK_TOP_DBG_ATCLK_SEL		161
+#define CLK_TOP_CSW_NFIECC_SEL		162
+#define CLK_TOP_NFIECC_SEL		163
+#define CLK_TOP_APLL12_CK_DIV0		164
+#define CLK_TOP_APLL12_CK_DIV1		165
+#define CLK_TOP_APLL12_CK_DIV2		166
+#define CLK_TOP_APLL12_CK_DIV3		167
+#define CLK_TOP_APLL12_CK_DIV4		168
+#define CLK_TOP_APLL12_CK_DIV4B		169
+#define CLK_TOP_APLL12_CK_DIV5		170
+#define CLK_TOP_APLL12_CK_DIV5B		171
+#define CLK_TOP_APLL12_CK_DIV6		172
+#define CLK_TOP_USB_78M			173
+#define CLK_TOP_MSDC0_INFRA		174
+#define CLK_TOP_MSDC1_INFRA		175
+#define CLK_TOP_MSDC2_INFRA		176
+#define CLK_TOP_NR_CLK			177
+
+/* AUDSYS */
+
+#define CLK_AUD_AFE			0
+#define CLK_AUD_I2S			1
+#define CLK_AUD_22M			2
+#define CLK_AUD_24M			3
+#define CLK_AUD_INTDIR			4
+#define CLK_AUD_APLL2_TUNER		5
+#define CLK_AUD_APLL_TUNER		6
+#define CLK_AUD_HDMI			7
+#define CLK_AUD_SPDF			8
+#define CLK_AUD_ADC			9
+#define CLK_AUD_DAC			10
+#define CLK_AUD_DAC_PREDIS		11
+#define CLK_AUD_TML			12
+#define CLK_AUD_NR_CLK			13
+
+#endif /* _DT_BINDINGS_CLK_MT8516_H */
diff --git a/include/dt-bindings/gce/mt8183-gce.h b/include/dt-bindings/gce/mt8183-gce.h
new file mode 100644
index 0000000..aeb9515
--- /dev/null
+++ b/include/dt-bindings/gce/mt8183-gce.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Bibby Hsieh <bibby.hsieh@mediatek.com>
+ *
+ */
+
+#ifndef _DT_BINDINGS_GCE_MT8183_H
+#define _DT_BINDINGS_GCE_MT8183_H
+
+#define CMDQ_NO_TIMEOUT		0xffffffff
+
+#define CMDQ_THR_MAX_COUNT	24
+
+/* GCE HW thread priority */
+#define CMDQ_THR_PRIO_LOWEST	0
+#define CMDQ_THR_PRIO_HIGHEST	1
+
+/* GCE SUBSYS */
+#define SUBSYS_1300XXXX		0
+#define SUBSYS_1400XXXX		1
+#define SUBSYS_1401XXXX		2
+#define SUBSYS_1402XXXX		3
+#define SUBSYS_1502XXXX		4
+#define SUBSYS_1880XXXX		5
+#define SUBSYS_1881XXXX		6
+#define SUBSYS_1882XXXX		7
+#define SUBSYS_1883XXXX		8
+#define SUBSYS_1884XXXX		9
+#define SUBSYS_1000XXXX		10
+#define SUBSYS_1001XXXX		11
+#define SUBSYS_1002XXXX		12
+#define SUBSYS_1003XXXX		13
+#define SUBSYS_1004XXXX		14
+#define SUBSYS_1005XXXX		15
+#define SUBSYS_1020XXXX		16
+#define SUBSYS_1028XXXX		17
+#define SUBSYS_1700XXXX		18
+#define SUBSYS_1701XXXX		19
+#define SUBSYS_1702XXXX		20
+#define SUBSYS_1703XXXX		21
+#define SUBSYS_1800XXXX		22
+#define SUBSYS_1801XXXX		23
+#define SUBSYS_1802XXXX		24
+#define SUBSYS_1804XXXX		25
+#define SUBSYS_1805XXXX		26
+#define SUBSYS_1808XXXX		27
+#define SUBSYS_180aXXXX		28
+#define SUBSYS_180bXXXX		29
+
+#define CMDQ_EVENT_DISP_RDMA0_SOF					0
+#define CMDQ_EVENT_DISP_RDMA1_SOF					1
+#define CMDQ_EVENT_MDP_RDMA0_SOF					2
+#define CMDQ_EVENT_MDP_RSZ0_SOF						4
+#define CMDQ_EVENT_MDP_RSZ1_SOF						5
+#define CMDQ_EVENT_MDP_TDSHP_SOF					6
+#define CMDQ_EVENT_MDP_WROT0_SOF					7
+#define CMDQ_EVENT_MDP_WDMA0_SOF					8
+#define CMDQ_EVENT_DISP_OVL0_SOF					9
+#define CMDQ_EVENT_DISP_OVL0_2L_SOF					10
+#define CMDQ_EVENT_DISP_OVL1_2L_SOF					11
+#define CMDQ_EVENT_DISP_WDMA0_SOF					12
+#define CMDQ_EVENT_DISP_COLOR0_SOF					13
+#define CMDQ_EVENT_DISP_CCORR0_SOF					14
+#define CMDQ_EVENT_DISP_AAL0_SOF					15
+#define CMDQ_EVENT_DISP_GAMMA0_SOF					16
+#define CMDQ_EVENT_DISP_DITHER0_SOF					17
+#define CMDQ_EVENT_DISP_PWM0_SOF					18
+#define CMDQ_EVENT_DISP_DSI0_SOF					19
+#define CMDQ_EVENT_DISP_DPI0_SOF					20
+#define CMDQ_EVENT_DISP_RSZ_SOF						22
+#define CMDQ_EVENT_MDP_AAL_SOF						23
+#define CMDQ_EVENT_MDP_CCORR_SOF					24
+#define CMDQ_EVENT_DISP_DBI_SOF						25
+#define CMDQ_EVENT_DISP_RDMA0_EOF					26
+#define CMDQ_EVENT_DISP_RDMA1_EOF					27
+#define CMDQ_EVENT_MDP_RDMA0_EOF					28
+#define CMDQ_EVENT_MDP_RSZ0_EOF						30
+#define CMDQ_EVENT_MDP_RSZ1_EOF						31
+#define CMDQ_EVENT_MDP_TDSHP_EOF					32
+#define CMDQ_EVENT_MDP_WROT0_EOF					33
+#define CMDQ_EVENT_MDP_WDMA0_EOF					34
+#define CMDQ_EVENT_DISP_OVL0_EOF					35
+#define CMDQ_EVENT_DISP_OVL0_2L_EOF					36
+#define CMDQ_EVENT_DISP_OVL1_2L_EOF					37
+#define CMDQ_EVENT_DISP_WDMA0_EOF					38
+#define CMDQ_EVENT_DISP_COLOR0_EOF					39
+#define CMDQ_EVENT_DISP_CCORR0_EOF					40
+#define CMDQ_EVENT_DISP_AAL0_EOF					41
+#define CMDQ_EVENT_DISP_GAMMA0_EOF					42
+#define CMDQ_EVENT_DISP_DITHER0_EOF					43
+#define CMDQ_EVENT_DSI0_EOF						44
+#define CMDQ_EVENT_DPI0_EOF						45
+#define CMDQ_EVENT_DISP_RSZ_EOF						47
+#define CMDQ_EVENT_MDP_AAL_EOF						48
+#define CMDQ_EVENT_MDP_CCORR_EOF					49
+#define CMDQ_EVENT_DBI_EOF						50
+#define CMDQ_EVENT_MUTEX_STREAM_DONE0					130
+#define CMDQ_EVENT_MUTEX_STREAM_DONE1					131
+#define CMDQ_EVENT_MUTEX_STREAM_DONE2					132
+#define CMDQ_EVENT_MUTEX_STREAM_DONE3					133
+#define CMDQ_EVENT_MUTEX_STREAM_DONE4					134
+#define CMDQ_EVENT_MUTEX_STREAM_DONE5					135
+#define CMDQ_EVENT_MUTEX_STREAM_DONE6					136
+#define CMDQ_EVENT_MUTEX_STREAM_DONE7					137
+#define CMDQ_EVENT_MUTEX_STREAM_DONE8					138
+#define CMDQ_EVENT_MUTEX_STREAM_DONE9					139
+#define CMDQ_EVENT_MUTEX_STREAM_DONE10					140
+#define CMDQ_EVENT_MUTEX_STREAM_DONE11					141
+#define CMDQ_EVENT_DISP_RDMA0_BUF_UNDERRUN_EVEN				142
+#define CMDQ_EVENT_DISP_RDMA1_BUF_UNDERRUN_EVEN				143
+#define CMDQ_EVENT_DSI0_TE_EVENT					144
+#define CMDQ_EVENT_DSI0_IRQ_EVENT					145
+#define CMDQ_EVENT_DSI0_DONE_EVENT					146
+#define CMDQ_EVENT_DISP_WDMA0_SW_RST_DONE				150
+#define CMDQ_EVENT_MDP_WDMA_SW_RST_DONE					151
+#define CMDQ_EVENT_MDP_WROT0_SW_RST_DONE				152
+#define CMDQ_EVENT_MDP_RDMA0_SW_RST_DONE				154
+#define CMDQ_EVENT_DISP_OVL0_FRAME_RST_DONE_PULE			155
+#define CMDQ_EVENT_DISP_OVL0_2L_FRAME_RST_DONE_ULSE			156
+#define CMDQ_EVENT_DISP_OVL1_2L_FRAME_RST_DONE_ULSE			157
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_0					257
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_1					258
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_2					259
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_3					260
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_4					261
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_5					262
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_6					263
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_7					264
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_8					265
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_9					266
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_10					267
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_11					268
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_12					269
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_13					270
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_14					271
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_15					272
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_16					273
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_17					274
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_18					275
+#define CMDQ_EVENT_AMD_FRAME_DONE					276
+#define CMDQ_EVENT_DVE_DONE						277
+#define CMDQ_EVENT_WMFE_DONE						278
+#define CMDQ_EVENT_RSC_DONE						279
+#define CMDQ_EVENT_MFB_DONE						280
+#define CMDQ_EVENT_WPE_A_DONE						281
+#define CMDQ_EVENT_SPE_B_DONE						282
+#define CMDQ_EVENT_OCC_DONE						283
+#define CMDQ_EVENT_VENC_CMDQ_FRAME_DONE					289
+#define CMDQ_EVENT_JPG_ENC_CMDQ_DONE					290
+#define CMDQ_EVENT_JPG_DEC_CMDQ_DONE					291
+#define CMDQ_EVENT_VENC_CMDQ_MB_DONE					292
+#define CMDQ_EVENT_VENC_CMDQ_128BYTE_DONE				293
+#define CMDQ_EVENT_ISP_FRAME_DONE_A					321
+#define CMDQ_EVENT_ISP_FRAME_DONE_B					322
+#define CMDQ_EVENT_CAMSV0_PASS1_DONE					323
+#define CMDQ_EVENT_CAMSV1_PASS1_DONE					324
+#define CMDQ_EVENT_CAMSV2_PASS1_DONE					325
+#define CMDQ_EVENT_TSF_DONE						326
+#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL				327
+#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL				328
+#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL				329
+#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL				330
+#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL				331
+#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL				332
+#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL				333
+#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL				334
+#define CMDQ_EVENT_IPU_CORE0_DONE0					353
+#define CMDQ_EVENT_IPU_CORE0_DONE1					354
+#define CMDQ_EVENT_IPU_CORE0_DONE2					355
+#define CMDQ_EVENT_IPU_CORE0_DONE3					356
+#define CMDQ_EVENT_IPU_CORE1_DONE0					385
+#define CMDQ_EVENT_IPU_CORE1_DONE1					386
+#define CMDQ_EVENT_IPU_CORE1_DONE2					387
+#define CMDQ_EVENT_IPU_CORE1_DONE3					388
+
+#endif
diff --git a/include/dt-bindings/memory/mt8167-larb-port.h b/include/dt-bindings/memory/mt8167-larb-port.h
new file mode 100644
index 0000000..4e010a7
--- /dev/null
+++ b/include/dt-bindings/memory/mt8167-larb-port.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Honghui Zhang <honghui.zhang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _DTS_IOMMU_PORT_MT8167_H_
+#define _DTS_IOMMU_PORT_MT8167_H_
+
+#define MT8167_LARB0_PORT_NUM		8
+#define MT8167_LARB1_PORT_NUM		13
+#define MT8167_LARB2_PORT_NUM		7
+
+#define MT8167_LARB0_PORT(port)		(port)
+#define MT8167_LARB1_PORT(port)		((port) + MT8167_LARB0_PORT_NUM)
+#define MT8167_LARB2_PORT(port)		((port) + MT8167_LARB0_PORT_NUM + MT8167_LARB1_PORT_NUM)
+
+/* larb0 */
+#define M4U_PORT_DISP_OVL0		MT8167_LARB0_PORT(0)
+#define M4U_PORT_DISP_RDMA0		MT8167_LARB0_PORT(1)
+#define M4U_PORT_DISP_WDMA0		MT8167_LARB0_PORT(2)
+#define M4U_PORT_DISP_RDMA1		MT8167_LARB0_PORT(3)
+#define M4U_PORT_MDP_RDMA		MT8167_LARB0_PORT(4)
+#define M4U_PORT_MDP_WDMA		MT8167_LARB0_PORT(5)
+#define M4U_PORT_MDP_WROT		MT8167_LARB0_PORT(6)
+#define M4U_PORT_DISP_FAKE		MT8167_LARB0_PORT(7)
+
+/* IMG larb1*/
+#define M4U_PORT_CAM_IMGO		MT8167_LARB1_PORT(0)
+#define M4U_PORT_CAM_IMG2O		MT8167_LARB1_PORT(1)
+#define M4U_PORT_CAM_LSCI		MT8167_LARB1_PORT(2)
+#define M4U_PORT_CAM_ESFKO		MT8167_LARB1_PORT(3)
+#define M4U_PORT_CAM_AAO		MT8167_LARB1_PORT(4)
+#define M4U_PORT_VENC_REC		MT8167_LARB1_PORT(5)
+#define M4U_PORT_VENC_BSDMA		MT8167_LARB1_PORT(6)
+#define M4U_PORT_VENC_RD_COMV		MT8167_LARB1_PORT(7)
+#define M4U_PORT_CAM_IMGI		MT8167_LARB1_PORT(8)
+#define M4U_PORT_VENC_CUR_LUMA		MT8167_LARB1_PORT(9)
+#define M4U_PORT_VENC_CUR_CHROMA	MT8167_LARB1_PORT(10)
+#define M4U_PORT_VENC_REF_LUMA		MT8167_LARB1_PORT(11)
+#define M4U_PORT_VENC_REF_CHROMA	MT8167_LARB1_PORT(12)
+
+/* VDEC larb2*/
+#define M4U_PORT_HW_VDEC_MC_EXT		MT8167_LARB2_PORT(0)
+#define M4U_PORT_HW_VDEC_PP_EXT		MT8167_LARB2_PORT(1)
+#define M4U_PORT_HW_VDEC_VLD_EXT	MT8167_LARB2_PORT(2)
+#define M4U_PORT_HW_VDEC_AVC_MV_EXT	MT8167_LARB2_PORT(3)
+#define M4U_PORT_HW_VDEC_PRED_RD_EXT	MT8167_LARB2_PORT(4)
+#define M4U_PORT_HW_VDEC_PRED_WR_EXT	MT8167_LARB2_PORT(5)
+#define M4U_PORT_HW_VDEC_PPWRAP_EXT	MT8167_LARB2_PORT(6)
+
+#endif
diff --git a/include/dt-bindings/memory/mt8183-larb-port.h b/include/dt-bindings/memory/mt8183-larb-port.h
new file mode 100644
index 0000000..2c579f3
--- /dev/null
+++ b/include/dt-bindings/memory/mt8183-larb-port.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef __DTS_IOMMU_PORT_MT8183_H
+#define __DTS_IOMMU_PORT_MT8183_H
+
+#define MTK_M4U_ID(larb, port)		(((larb) << 5) | (port))
+
+#define M4U_LARB0_ID			0
+#define M4U_LARB1_ID			1
+#define M4U_LARB2_ID			2
+#define M4U_LARB3_ID			3
+#define M4U_LARB4_ID			4
+#define M4U_LARB5_ID			5
+#define M4U_LARB6_ID			6
+#define M4U_LARB7_ID			7
+
+/* larb0 */
+#define	M4U_PORT_DISP_OVL0		MTK_M4U_ID(M4U_LARB0_ID, 0)
+#define	M4U_PORT_DISP_2L_OVL0_LARB0     MTK_M4U_ID(M4U_LARB0_ID, 1)
+#define	M4U_PORT_DISP_2L_OVL1_LARB0     MTK_M4U_ID(M4U_LARB0_ID, 2)
+#define	M4U_PORT_DISP_RDMA0		MTK_M4U_ID(M4U_LARB0_ID, 3)
+#define	M4U_PORT_DISP_RDMA1		MTK_M4U_ID(M4U_LARB0_ID, 4)
+#define	M4U_PORT_DISP_WDMA0		MTK_M4U_ID(M4U_LARB0_ID, 5)
+#define	M4U_PORT_MDP_RDMA0		MTK_M4U_ID(M4U_LARB0_ID, 6)
+#define	M4U_PORT_MDP_WROT0		MTK_M4U_ID(M4U_LARB0_ID, 7)
+#define	M4U_PORT_MDP_WDMA0		MTK_M4U_ID(M4U_LARB0_ID, 8)
+#define	M4U_PORT_DISP_FAKE0		MTK_M4U_ID(M4U_LARB0_ID, 9)
+
+/* larb1 */
+#define	M4U_PORT_HW_VDEC_MC_EXT		MTK_M4U_ID(M4U_LARB1_ID, 0)
+#define	M4U_PORT_HW_VDEC_PP_EXT         MTK_M4U_ID(M4U_LARB1_ID, 1)
+#define	M4U_PORT_HW_VDEC_VLD_EXT	MTK_M4U_ID(M4U_LARB1_ID, 2)
+#define	M4U_PORT_HW_VDEC_AVC_MV_EXT     MTK_M4U_ID(M4U_LARB1_ID, 3)
+#define	M4U_PORT_HW_VDEC_PRED_RD_EXT	MTK_M4U_ID(M4U_LARB1_ID, 4)
+#define	M4U_PORT_HW_VDEC_PRED_WR_EXT	MTK_M4U_ID(M4U_LARB1_ID, 5)
+#define	M4U_PORT_HW_VDEC_PPWRAP_EXT	MTK_M4U_ID(M4U_LARB1_ID, 6)
+
+/* larb2 VPU0 */
+#define	M4U_PORT_IMG_IPUO		MTK_M4U_ID(M4U_LARB2_ID, 0)
+#define	M4U_PORT_IMG_IPU3O		MTK_M4U_ID(M4U_LARB2_ID, 1)
+#define	M4U_PORT_IMG_IPUI		MTK_M4U_ID(M4U_LARB2_ID, 2)
+
+/* larb3 VPU1 */
+#define	M4U_PORT_CAM_IPUO		MTK_M4U_ID(M4U_LARB3_ID, 0)
+#define	M4U_PORT_CAM_IPU2O		MTK_M4U_ID(M4U_LARB3_ID, 1)
+#define	M4U_PORT_CAM_IPU3O		MTK_M4U_ID(M4U_LARB3_ID, 2)
+#define	M4U_PORT_CAM_IPUI		MTK_M4U_ID(M4U_LARB3_ID, 3)
+#define	M4U_PORT_CAM_IPU2I		MTK_M4U_ID(M4U_LARB3_ID, 4)
+
+/* larb4 */
+#define	M4U_PORT_VENC_RCPU		MTK_M4U_ID(M4U_LARB4_ID, 0)
+#define	M4U_PORT_VENC_REC		MTK_M4U_ID(M4U_LARB4_ID, 1)
+#define	M4U_PORT_VENC_BSDMA		MTK_M4U_ID(M4U_LARB4_ID, 2)
+#define	M4U_PORT_VENC_SV_COMV		MTK_M4U_ID(M4U_LARB4_ID, 3)
+#define	M4U_PORT_VENC_RD_COMV		MTK_M4U_ID(M4U_LARB4_ID, 4)
+#define	M4U_PORT_JPGENC_RDMA		MTK_M4U_ID(M4U_LARB4_ID, 5)
+#define	M4U_PORT_JPGENC_BSDMA		MTK_M4U_ID(M4U_LARB4_ID, 6)
+#define	M4U_PORT_VENC_CUR_LUMA		MTK_M4U_ID(M4U_LARB4_ID, 7)
+#define	M4U_PORT_VENC_CUR_CHROMA	MTK_M4U_ID(M4U_LARB4_ID, 8)
+#define	M4U_PORT_VENC_REF_LUMA		MTK_M4U_ID(M4U_LARB4_ID, 9)
+#define	M4U_PORT_VENC_REF_CHROMA	MTK_M4U_ID(M4U_LARB4_ID, 10)
+
+/* larb5 */
+#define	M4U_PORT_CAM_IMGI		MTK_M4U_ID(M4U_LARB5_ID, 0)
+#define	M4U_PORT_CAM_IMG2O		MTK_M4U_ID(M4U_LARB5_ID, 1)
+#define	M4U_PORT_CAM_IMG3O		MTK_M4U_ID(M4U_LARB5_ID, 2)
+#define	M4U_PORT_CAM_VIPI		MTK_M4U_ID(M4U_LARB5_ID, 3)
+#define	M4U_PORT_CAM_LCEI		MTK_M4U_ID(M4U_LARB5_ID, 4)
+#define	M4U_PORT_CAM_SMXI		MTK_M4U_ID(M4U_LARB5_ID, 5)
+#define	M4U_PORT_CAM_SMXO		MTK_M4U_ID(M4U_LARB5_ID, 6)
+#define	M4U_PORT_CAM_WPE0_RDMA1		MTK_M4U_ID(M4U_LARB5_ID, 7)
+#define	M4U_PORT_CAM_WPE0_RDMA0		MTK_M4U_ID(M4U_LARB5_ID, 8)
+#define	M4U_PORT_CAM_WPE0_WDMA		MTK_M4U_ID(M4U_LARB5_ID, 9)
+#define	M4U_PORT_CAM_FDVT_RP		MTK_M4U_ID(M4U_LARB5_ID, 10)
+#define	M4U_PORT_CAM_FDVT_WR		MTK_M4U_ID(M4U_LARB5_ID, 11)
+#define	M4U_PORT_CAM_FDVT_RB		MTK_M4U_ID(M4U_LARB5_ID, 12)
+#define	M4U_PORT_CAM_WPE1_RDMA0		MTK_M4U_ID(M4U_LARB5_ID, 13)
+#define	M4U_PORT_CAM_WPE1_RDMA1		MTK_M4U_ID(M4U_LARB5_ID, 14)
+#define	M4U_PORT_CAM_WPE1_WDMA		MTK_M4U_ID(M4U_LARB5_ID, 15)
+#define	M4U_PORT_CAM_DPE_RDMA		MTK_M4U_ID(M4U_LARB5_ID, 16)
+#define	M4U_PORT_CAM_DPE_WDMA		MTK_M4U_ID(M4U_LARB5_ID, 17)
+#define	M4U_PORT_CAM_MFB_RDMA0		MTK_M4U_ID(M4U_LARB5_ID, 18)
+#define	M4U_PORT_CAM_MFB_RDMA1		MTK_M4U_ID(M4U_LARB5_ID, 19)
+#define	M4U_PORT_CAM_MFB_WDMA		MTK_M4U_ID(M4U_LARB5_ID, 20)
+#define	M4U_PORT_CAM_RSC_RDMA0		MTK_M4U_ID(M4U_LARB5_ID, 21)
+#define	M4U_PORT_CAM_RSC_WDMA		MTK_M4U_ID(M4U_LARB5_ID, 22)
+#define	M4U_PORT_CAM_OWE_RDMA		MTK_M4U_ID(M4U_LARB5_ID, 23)
+#define	M4U_PORT_CAM_OWE_WDMA		MTK_M4U_ID(M4U_LARB5_ID, 24)
+
+/* larb6 */
+#define	M4U_PORT_CAM_IMGO		MTK_M4U_ID(M4U_LARB6_ID, 0)
+#define	M4U_PORT_CAM_RRZO		MTK_M4U_ID(M4U_LARB6_ID, 1)
+#define	M4U_PORT_CAM_AAO		MTK_M4U_ID(M4U_LARB6_ID, 2)
+#define	M4U_PORT_CAM_AFO		MTK_M4U_ID(M4U_LARB6_ID, 3)
+#define	M4U_PORT_CAM_LSCI0		MTK_M4U_ID(M4U_LARB6_ID, 4)
+#define	M4U_PORT_CAM_LSCI1		MTK_M4U_ID(M4U_LARB6_ID, 5)
+#define	M4U_PORT_CAM_PDO		MTK_M4U_ID(M4U_LARB6_ID, 6)
+#define	M4U_PORT_CAM_BPCI		MTK_M4U_ID(M4U_LARB6_ID, 7)
+#define	M4U_PORT_CAM_LCSO		MTK_M4U_ID(M4U_LARB6_ID, 8)
+#define	M4U_PORT_CAM_CAM_RSSO_A		MTK_M4U_ID(M4U_LARB6_ID, 9)
+#define	M4U_PORT_CAM_UFEO		MTK_M4U_ID(M4U_LARB6_ID, 10)
+#define	M4U_PORT_CAM_SOCO		MTK_M4U_ID(M4U_LARB6_ID, 11)
+#define	M4U_PORT_CAM_SOC1		MTK_M4U_ID(M4U_LARB6_ID, 12)
+#define	M4U_PORT_CAM_SOC2		MTK_M4U_ID(M4U_LARB6_ID, 13)
+#define	M4U_PORT_CAM_CCUI		MTK_M4U_ID(M4U_LARB6_ID, 14)
+#define	M4U_PORT_CAM_CCUO		MTK_M4U_ID(M4U_LARB6_ID, 15)
+#define	M4U_PORT_CAM_RAWI_A		MTK_M4U_ID(M4U_LARB6_ID, 16)
+#define	M4U_PORT_CAM_CCUG		MTK_M4U_ID(M4U_LARB6_ID, 17)
+#define	M4U_PORT_CAM_PSO		MTK_M4U_ID(M4U_LARB6_ID, 18)
+#define	M4U_PORT_CAM_AFO_1		MTK_M4U_ID(M4U_LARB6_ID, 19)
+#define	M4U_PORT_CAM_LSCI_2		MTK_M4U_ID(M4U_LARB6_ID, 20)
+#define	M4U_PORT_CAM_PDI		MTK_M4U_ID(M4U_LARB6_ID, 21)
+#define	M4U_PORT_CAM_FLKO		MTK_M4U_ID(M4U_LARB6_ID, 22)
+#define	M4U_PORT_CAM_LMVO		MTK_M4U_ID(M4U_LARB6_ID, 23)
+#define	M4U_PORT_CAM_UFGO		MTK_M4U_ID(M4U_LARB6_ID, 24)
+#define	M4U_PORT_CAM_SPARE		MTK_M4U_ID(M4U_LARB6_ID, 25)
+#define	M4U_PORT_CAM_SPARE_2		MTK_M4U_ID(M4U_LARB6_ID, 26)
+#define	M4U_PORT_CAM_SPARE_3		MTK_M4U_ID(M4U_LARB6_ID, 27)
+#define	M4U_PORT_CAM_SPARE_4		MTK_M4U_ID(M4U_LARB6_ID, 28)
+#define	M4U_PORT_CAM_SPARE_5		MTK_M4U_ID(M4U_LARB6_ID, 29)
+#define	M4U_PORT_CAM_SPARE_6		MTK_M4U_ID(M4U_LARB6_ID, 30)
+
+/* CCU */
+#define	M4U_PORT_CCU0			MTK_M4U_ID(M4U_LARB7_ID, 0)
+#define	M4U_PORT_CCU1			MTK_M4U_ID(M4U_LARB7_ID, 1)
+
+#endif
diff --git a/include/dt-bindings/power/mt8167-power.h b/include/dt-bindings/power/mt8167-power.h
new file mode 100644
index 0000000..0a32fc3
--- /dev/null
+++ b/include/dt-bindings/power/mt8167-power.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2015 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#ifndef _DT_BINDINGS_POWER_MT8167_POWER_H
+#define _DT_BINDINGS_POWER_MT8167_POWER_H
+
+#define MT8167_POWER_DOMAIN_MM		0
+#define MT8167_POWER_DOMAIN_DISP	0
+#define MT8167_POWER_DOMAIN_VDEC	1
+#define MT8167_POWER_DOMAIN_ISP		2
+#define MT8167_POWER_DOMAIN_CONN	3
+#define MT8167_POWER_DOMAIN_MFG_ASYNC	4
+#define MT8167_POWER_DOMAIN_MFG_2D	5
+#define MT8167_POWER_DOMAIN_MFG		6
+
+#endif /* _DT_BINDINGS_POWER_MT8167_POWER_H */
diff --git a/include/dt-bindings/power/mt8173-power.h b/include/dt-bindings/power/mt8173-power.h
index 15d531a..ef4a7f9 100644
--- a/include/dt-bindings/power/mt8173-power.h
+++ b/include/dt-bindings/power/mt8173-power.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _DT_BINDINGS_POWER_MT8183_POWER_H
-#define _DT_BINDINGS_POWER_MT8183_POWER_H
+#ifndef _DT_BINDINGS_POWER_MT8173_POWER_H
+#define _DT_BINDINGS_POWER_MT8173_POWER_H
 
 #define MT8173_POWER_DOMAIN_VDEC	0
 #define MT8173_POWER_DOMAIN_VENC	1
@@ -13,4 +13,4 @@
 #define MT8173_POWER_DOMAIN_MFG_2D	8
 #define MT8173_POWER_DOMAIN_MFG		9
 
-#endif /* _DT_BINDINGS_POWER_MT8183_POWER_H */
+#endif /* _DT_BINDINGS_POWER_MT8173_POWER_H */
diff --git a/include/dt-bindings/power/mt8183-power.h b/include/dt-bindings/power/mt8183-power.h
new file mode 100644
index 0000000..5c0c8c7
--- /dev/null
+++ b/include/dt-bindings/power/mt8183-power.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8183_POWER_H
+#define _DT_BINDINGS_POWER_MT8183_POWER_H
+
+#define MT8183_POWER_DOMAIN_AUDIO	0
+#define MT8183_POWER_DOMAIN_CONN	1
+#define MT8183_POWER_DOMAIN_MFG_ASYNC	2
+#define MT8183_POWER_DOMAIN_MFG		3
+#define MT8183_POWER_DOMAIN_MFG_CORE0	4
+#define MT8183_POWER_DOMAIN_MFG_CORE1	5
+#define MT8183_POWER_DOMAIN_MFG_2D	6
+#define MT8183_POWER_DOMAIN_DISP	7
+#define MT8183_POWER_DOMAIN_CAM		8
+#define MT8183_POWER_DOMAIN_ISP		9
+#define MT8183_POWER_DOMAIN_VDEC	10
+#define MT8183_POWER_DOMAIN_VENC	11
+#define MT8183_POWER_DOMAIN_VPU_TOP	12
+#define MT8183_POWER_DOMAIN_VPU_CORE0	13
+#define MT8183_POWER_DOMAIN_VPU_CORE1	14
+
+#endif /* _DT_BINDINGS_POWER_MT8183_POWER_H */
diff --git a/include/dt-bindings/reset-controller/mt8183-resets.h b/include/dt-bindings/reset-controller/mt8183-resets.h
new file mode 100644
index 0000000..8804e34
--- /dev/null
+++ b/include/dt-bindings/reset-controller/mt8183-resets.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Yong Liang <yong.liang@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8183
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8183
+
+/* INFRACFG AO resets */
+#define MT8183_INFRACFG_AO_THERM_SW_RST				0
+#define MT8183_INFRACFG_AO_USB_TOP_SW_RST			1
+#define MT8183_INFRACFG_AO_MM_IOMMU_SW_RST			3
+#define MT8183_INFRACFG_AO_MSDC3_SW_RST				4
+#define MT8183_INFRACFG_AO_MSDC2_SW_RST				5
+#define MT8183_INFRACFG_AO_MSDC1_SW_RST				6
+#define MT8183_INFRACFG_AO_MSDC0_SW_RST				7
+#define MT8183_INFRACFG_AO_APDMA_SW_RST				9
+#define MT8183_INFRACFG_AO_MIMP_D_SW_RST			10
+#define MT8183_INFRACFG_AO_BTIF_SW_RST				12
+#define MT8183_INFRACFG_AO_DISP_PWM_SW_RST			14
+#define MT8183_INFRACFG_AO_AUXADC_SW_RST			15
+
+#define MT8183_INFRACFG_AO_IRTX_SW_RST				32
+#define MT8183_INFRACFG_AO_SPI0_SW_RST				33
+#define MT8183_INFRACFG_AO_I2C0_SW_RST				34
+#define MT8183_INFRACFG_AO_I2C1_SW_RST				35
+#define MT8183_INFRACFG_AO_I2C2_SW_RST				36
+#define MT8183_INFRACFG_AO_I2C3_SW_RST				37
+#define MT8183_INFRACFG_AO_UART0_SW_RST				38
+#define MT8183_INFRACFG_AO_UART1_SW_RST				39
+#define MT8183_INFRACFG_AO_UART2_SW_RST				40
+#define MT8183_INFRACFG_AO_PWM_SW_RST				41
+#define MT8183_INFRACFG_AO_SPI1_SW_RST				42
+#define MT8183_INFRACFG_AO_I2C4_SW_RST				43
+#define MT8183_INFRACFG_AO_DVFSP_SW_RST				44
+#define MT8183_INFRACFG_AO_SPI2_SW_RST				45
+#define MT8183_INFRACFG_AO_SPI3_SW_RST				46
+#define MT8183_INFRACFG_AO_UFSHCI_SW_RST			47
+
+#define MT8183_INFRACFG_AO_PMIC_WRAP_SW_RST			64
+#define MT8183_INFRACFG_AO_SPM_SW_RST				65
+#define MT8183_INFRACFG_AO_USBSIF_SW_RST			66
+#define MT8183_INFRACFG_AO_KP_SW_RST				68
+#define MT8183_INFRACFG_AO_APXGPT_SW_RST			69
+#define MT8183_INFRACFG_AO_CLDMA_AO_SW_RST			70
+#define MT8183_INFRACFG_AO_UNIPRO_UFS_SW_RST			71
+#define MT8183_INFRACFG_AO_DX_CC_SW_RST				72
+#define MT8183_INFRACFG_AO_UFSPHY_SW_RST			73
+
+#define MT8183_INFRACFG_AO_DX_CC_SEC_SW_RST			96
+#define MT8183_INFRACFG_AO_GCE_SW_RST				97
+#define MT8183_INFRACFG_AO_CLDMA_SW_RST				98
+#define MT8183_INFRACFG_AO_TRNG_SW_RST				99
+#define MT8183_INFRACFG_AO_AP_MD_CCIF_1_SW_RST			103
+#define MT8183_INFRACFG_AO_AP_MD_CCIF_SW_RST			104
+#define MT8183_INFRACFG_AO_I2C1_IMM_SW_RST			105
+#define MT8183_INFRACFG_AO_I2C1_ARB_SW_RST			106
+#define MT8183_INFRACFG_AO_I2C2_IMM_SW_RST			107
+#define MT8183_INFRACFG_AO_I2C2_ARB_SW_RST			108
+#define MT8183_INFRACFG_AO_I2C5_SW_RST				109
+#define MT8183_INFRACFG_AO_I2C5_IMM_SW_RST			110
+#define MT8183_INFRACFG_AO_I2C5_ARB_SW_RST			111
+#define MT8183_INFRACFG_AO_SPI4_SW_RST				112
+#define MT8183_INFRACFG_AO_SPI5_SW_RST				113
+#define MT8183_INFRACFG_AO_INFRA2MFGAXI_CBIP_CLAS_SW_RST	114
+#define MT8183_INFRACFG_AO_MFGAXI2INFRA_M0_CBIP_GLAS_OUT_SW_RST	115
+#define MT8183_INFRACFG_AO_MFGAXI2INFRA_M1_CBIP_GLAS_OUT_SW_RST	116
+#define MT8183_INFRACFG_AO_UFS_AES_SW_RST			117
+#define MT8183_INFRACFG_AO_CCU_I2C_IRQ_SW_RST			118
+#define MT8183_INFRACFG_AO_CCU_I2C_DMA_SW_RST			119
+#define MT8183_INFRACFG_AO_I2C6_SW_RST				120
+#define MT8183_INFRACFG_AO_CCU_GALS_SW_RST			121
+#define MT8183_INFRACFG_AO_IPU_GALS_SW_RST			122
+#define MT8183_INFRACFG_AO_CONN2AP_GALS_SW_RST			123
+#define MT8183_INFRACFG_AO_AP_MD_CCIF2_SW_RST			124
+#define MT8183_INFRACFG_AO_AP_MD_CCIF3_SW_RST			125
+#define MT8183_INFRACFG_AO_I2C7_SW_RST				126
+#define MT8183_INFRACFG_AO_I2C8_SW_RST				127
+
+#endif  /* _DT_BINDINGS_RESET_CONTROLLER_MT8183 */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 4f750c4..8baab32 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -349,6 +349,17 @@
 struct clk *devm_clk_get(struct device *dev, const char *id);
 
 /**
+ * devm_clk_get_optional - lookup and obtain a managed reference to an optional
+ *			   clock producer.
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Behaves the same as devm_clk_get() except where there is no clock producer.
+ * In this case, instead of returning -ENOENT, the function returns NULL.
+ */
+struct clk *devm_clk_get_optional(struct device *dev, const char *id);
+
+/**
  * devm_get_clk_from_child - lookup and obtain a managed reference to a
  *			     clock producer from child node.
  * @dev: device for clock "consumer"
@@ -647,6 +658,12 @@
 	return NULL;
 }
 
+static inline struct clk *devm_clk_get_optional(struct device *dev,
+						const char *id)
+{
+	return NULL;
+}
+
 static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
 						 struct clk_bulk_data *clks)
 {
@@ -774,6 +791,25 @@
 	clk_bulk_unprepare(num_clks, clks);
 }
 
+/**
+ * clk_get_optional - lookup and obtain a reference to an optional clock
+ *		      producer.
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Behaves the same as clk_get() except where there is no clock producer. In
+ * this case, instead of returning -ENOENT, the function returns NULL.
+ */
+static inline struct clk *clk_get_optional(struct device *dev, const char *id)
+{
+	struct clk *clk = clk_get(dev, id);
+
+	if (clk == ERR_PTR(-ENOENT))
+		return NULL;
+
+	return clk;
+}
+
 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
 struct clk *of_clk_get(struct device_node *np, int index);
 struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
diff --git a/include/linux/device.h b/include/linux/device.h
index b1c8150..c4043a6 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -162,6 +162,11 @@
 			 const struct device_type *type);
 struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
 void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
+int device_match_name(struct device *dev, const void *name);
+
+int device_match_of_node(struct device *dev, const void *np);
+int device_match_fwnode(struct device *dev, const void *fwnode);
+int device_match_devt(struct device *dev, const void *pdevt);
 
 int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
 		     int (*fn)(struct device *dev, void *data));
@@ -467,6 +472,57 @@
 					struct device *start, const void *data,
 					int (*match)(struct device *, const void *));
 
+/**
+ * class_find_device_by_name - device iterator for locating a particular device
+ * of a specific name.
+ * @class: class type
+ * @name: name of the device to match
+ */
+static inline struct device *class_find_device_by_name(struct class *class,
+						       const char *name)
+{
+	return class_find_device(class, NULL, name, device_match_name);
+}
+
+/**
+ * class_find_device_by_of_node : device iterator for locating a particular device
+ * matching the of_node.
+ * @class: class type
+ * @np: of_node of the device to match.
+ */
+static inline struct device *
+class_find_device_by_of_node(struct class *class, const struct device_node *np)
+{
+	return class_find_device(class, NULL, np, device_match_of_node);
+}
+
+/**
+ * class_find_device_by_fwnode : device iterator for locating a particular device
+ * matching the fwnode.
+ * @class: class type
+ * @fwnode: fwnode of the device to match.
+ */
+static inline struct device *
+class_find_device_by_fwnode(struct class *class,
+			    const struct fwnode_handle *fwnode)
+{
+	return class_find_device(class, NULL, fwnode, device_match_fwnode);
+}
+
+/**
+ * class_find_device_by_devt : device iterator for locating a particular device
+ * matching the device type.
+ * @class: class type
+ * @start: device to start search from
+ * @devt: device type of the device to match.
+ */
+static inline struct device *class_find_device_by_devt(struct class *class,
+						       struct device *start,
+						       dev_t devt)
+{
+	return class_find_device(class, start, &devt, device_match_devt);
+}
+
 struct class_attribute {
 	struct attribute attr;
 	ssize_t (*show)(struct class *class, struct class_attribute *attr,
@@ -755,20 +811,30 @@
 
 /**
  * struct device_connection - Device Connection Descriptor
+ * @fwnode: The device node of the connected device
  * @endpoint: The names of the two devices connected together
  * @id: Unique identifier for the connection
  * @list: List head, private, for internal use only
+ *
+ * NOTE: @fwnode is not used together with @endpoint. @fwnode is used when
+ * platform firmware defines the connection. When the connection is registered
+ * with device_connection_add() @endpoint is used instead.
  */
 struct device_connection {
+	struct fwnode_handle	*fwnode;
 	const char		*endpoint[2];
 	const char		*id;
 	struct list_head	list;
 };
 
+typedef void *(*devcon_match_fn_t)(struct device_connection *con, int ep,
+				   void *data);
+
+void *fwnode_connection_find_match(struct fwnode_handle *fwnode,
+				   const char *con_id, void *data,
+				   devcon_match_fn_t match);
 void *device_connection_find_match(struct device *dev, const char *con_id,
-				void *data,
-				void *(*match)(struct device_connection *con,
-					       int ep, void *data));
+				   void *data, devcon_match_fn_t match);
 
 struct device *device_connection_find(struct device *dev, const char *con_id);
 
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 87994c2..8984eb2 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -400,6 +400,17 @@
 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
 
+static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
+{
+	return dev->iommu_fwspec;
+}
+
+static inline void dev_iommu_fwspec_set(struct device *dev,
+					struct iommu_fwspec *fwspec)
+{
+	dev->iommu_fwspec = fwspec;
+}
+
 #else /* CONFIG_IOMMU_API */
 
 struct iommu_ops {};
diff --git a/include/linux/irqchip/mtk-gic-extend.h b/include/linux/irqchip/mtk-gic-extend.h
new file mode 100644
index 0000000..7597b70
--- /dev/null
+++ b/include/linux/irqchip/mtk-gic-extend.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Maoguang.Meng <maoguang.meng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_GIC_EXTEND_H
+#define __MTK_GIC_EXTEND_H
+
+#define MT_EDGE_SENSITIVE	0
+#define MT_LEVEL_SENSITIVE	1
+#define MT_POLARITY_LOW		0
+#define MT_POLARITY_HIGH	1
+
+#ifndef FIQ_SMP_CALL_SGI
+#define FIQ_SMP_CALL_SGI	13
+#endif
+
+typedef void (*fiq_isr_handler) (void *arg, void *regs, void *svc_sp);
+
+enum {
+	IRQ_MASK_HEADER = 0xF1F1F1F1,
+	IRQ_MASK_FOOTER = 0xF2F2F2F2
+};
+
+struct mtk_irq_mask {
+	unsigned int header;	/* for error checking */
+	__u32 mask0;
+	__u32 mask1;
+	__u32 mask2;
+	__u32 mask3;
+	__u32 mask4;
+	__u32 mask5;
+	__u32 mask6;
+	__u32 mask7;
+	__u32 mask8;
+	__u32 mask9;
+	__u32 mask10;
+	__u32 mask11;
+	__u32 mask12;
+	unsigned int footer;	/* for error checking */
+};
+
+unsigned int get_hardware_irq(unsigned int virq);
+void mt_irq_unmask_for_sleep(unsigned int hwirq);
+void mt_irq_unmask_for_sleep_ex(unsigned int virq);
+void mt_irq_mask_for_sleep(unsigned int virq);
+int mt_irq_mask_all(struct mtk_irq_mask *mask);
+int mt_irq_mask_restore(struct mtk_irq_mask *mask);
+void mt_irq_set_pending_for_sleep(unsigned int irq);
+extern u32 mt_irq_get_pending_vec(u32 start_irq);
+extern void mt_irq_set_pending(unsigned int irq);
+extern void mt_irq_set_pending_hw(unsigned int hwirq);
+extern unsigned int mt_irq_get_pending(unsigned int irq);
+extern unsigned int mt_irq_get_pending_hw(unsigned int hwirq);
+extern u32 mt_irq_get_pol(u32 irq);
+extern u32 mt_irq_get_pol_hw(u32 hwirq);
+void mt_gic_set_priority(unsigned int irq);
+void mt_set_irq_priority(unsigned int irq, unsigned int priority);
+unsigned int mt_get_irq_priority(unsigned int irq);
+
+#if defined(CONFIG_FIQ_GLUE)
+int request_fiq(int irq, fiq_isr_handler handler, unsigned long irq_flags, void *arg);
+void irq_raise_softirq(const struct cpumask *mask, unsigned int irq);
+#endif
+/* set the priority mask to 0x10 for masking all irqs to this cpu */
+void gic_set_primask(void);
+/* restore the priority mask value */
+void gic_clear_primask(void);
+#endif
+
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
index ccb7342..9e35029 100644
--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -19,6 +19,10 @@
 #define CMDQ_WFE_UPDATE			BIT(31)
 #define CMDQ_WFE_WAIT			BIT(15)
 #define CMDQ_WFE_WAIT_VALUE		0x1
+#define CMDQ_WFE_OPTION			(CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | \
+					CMDQ_WFE_WAIT_VALUE)
+/** cmdq event maximum */
+#define CMDQ_MAX_EVENT			0x3ff
 
 /*
  * CMDQ_CODE_MASK:
@@ -42,6 +46,7 @@
 enum cmdq_code {
 	CMDQ_CODE_MASK = 0x02,
 	CMDQ_CODE_WRITE = 0x04,
+	CMDQ_CODE_POLL = 0x08,
 	CMDQ_CODE_JUMP = 0x10,
 	CMDQ_CODE_WFE = 0x20,
 	CMDQ_CODE_EOC = 0x40,
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
index 74deadb..9b0b212 100644
--- a/include/linux/mailbox_controller.h
+++ b/include/linux/mailbox_controller.h
@@ -131,4 +131,9 @@
 void mbox_chan_received_data(struct mbox_chan *chan, void *data); /* atomic */
 void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */
 
+int devm_mbox_controller_register(struct device *dev,
+				  struct mbox_controller *mbox);
+void devm_mbox_controller_unregister(struct device *dev,
+				     struct mbox_controller *mbox);
+
 #endif /* __MAILBOX_CONTROLLER_H */
diff --git a/include/linux/memory_group_manager.h b/include/linux/memory_group_manager.h
new file mode 100644
index 0000000..b1ac253
--- /dev/null
+++ b/include/linux/memory_group_manager.h
@@ -0,0 +1,198 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _MEMORY_GROUP_MANAGER_H_
+#define _MEMORY_GROUP_MANAGER_H_
+
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/version.h>
+
+#if (KERNEL_VERSION(4, 17, 0) > LINUX_VERSION_CODE)
+typedef int vm_fault_t;
+#endif
+
+#define MEMORY_GROUP_MANAGER_NR_GROUPS (16)
+
+struct memory_group_manager_device;
+struct memory_group_manager_import_data;
+
+/**
+ * struct memory_group_manager_ops - Callbacks for memory group manager
+ *                                   operations
+ *
+ * @mgm_alloc_page:           Callback to allocate physical memory in a group
+ * @mgm_free_page:            Callback to free physical memory in a group
+ * @mgm_get_import_memory_id: Callback to get the group ID for imported memory
+ * @mgm_update_gpu_pte:       Callback to modify a GPU page table entry
+ * @mgm_vmf_insert_pfn_prot:  Callback to map a physical memory page for the CPU
+ */
+struct memory_group_manager_ops {
+	/**
+	 * mgm_alloc_page - Allocate a physical memory page in a group
+	 *
+	 * @mgm_dev:  The memory group manager through which the request is
+	 *            being made.
+	 * @group_id: A physical memory group ID. The meaning of this is defined
+	 *            by the systems integrator. Its valid range is
+	 *            0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1.
+	 * @gfp_mask: Bitmask of Get Free Page flags affecting allocator
+	 *            behavior.
+	 * @order:    Page order for physical page size (order=0 means 4 KiB,
+	 *            order=9 means 2 MiB).
+	 *
+	 * Return: Pointer to allocated page, or NULL if allocation failed.
+	 */
+	struct page *(*mgm_alloc_page)(
+		struct memory_group_manager_device *mgm_dev, int group_id,
+		gfp_t gfp_mask, unsigned int order);
+
+	/**
+	 * mgm_free_page - Free a physical memory page in a group
+	 *
+	 * @mgm_dev:  The memory group manager through which the request
+	 *            is being made.
+	 * @group_id: A physical memory group ID. The meaning of this is
+	 *            defined by the systems integrator. Its valid range is
+	 *            0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1.
+	 * @page:     Address of the struct associated with a page of physical
+	 *            memory that was allocated by calling the mgm_alloc_page
+	 *            method of the same memory pool with the same values of
+	 *            @group_id and @order.
+	 * @order:    Page order for physical page size (order=0 means 4 KiB,
+	 *            order=9 means 2 MiB).
+	 */
+	void (*mgm_free_page)(
+		struct memory_group_manager_device *mgm_dev, int group_id,
+		struct page *page, unsigned int order);
+
+	/**
+	 * mgm_get_import_memory_id - Get the physical memory group ID for the
+	 *                            imported memory
+	 *
+	 * @mgm_dev:     The memory group manager through which the request
+	 *               is being made.
+	 * @import_data: Pointer to the data which describes imported memory.
+	 *
+	 * Note that provision of this call back is optional, where it is not
+	 * provided this call back pointer must be set to NULL to indicate it
+	 * is not in use.
+	 *
+	 * Return: The memory group ID to use when mapping pages from this
+	 *         imported memory.
+	 */
+	int (*mgm_get_import_memory_id)(
+		struct memory_group_manager_device *mgm_dev,
+		struct memory_group_manager_import_data *import_data);
+
+	/**
+	 * mgm_update_gpu_pte - Modify a GPU page table entry for a memory group
+	 *
+	 * @mgm_dev:   The memory group manager through which the request
+	 *             is being made.
+	 * @group_id:  A physical memory group ID. The meaning of this is
+	 *             defined by the systems integrator. Its valid range is
+	 *             0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1.
+	 * @mmu_level: The level of the page table entry in @ate.
+	 * @pte:       The page table entry to modify, in LPAE or AArch64 format
+	 *             (depending on the driver's configuration). This should be
+	 *             decoded to determine the physical address and any other
+	 *             properties of the mapping the manager requires.
+	 *
+	 * This function allows the memory group manager to modify a GPU page
+	 * table entry before it is stored by the kbase module (controller
+	 * driver). It may set certain bits in the page table entry attributes
+	 * or in the physical address, based on the physical memory group ID.
+	 *
+	 * Return: A modified GPU page table entry to be stored in a page table.
+	 */
+	u64 (*mgm_update_gpu_pte)(struct memory_group_manager_device *mgm_dev,
+			int group_id, int mmu_level, u64 pte);
+
+	/**
+	 * mgm_vmf_insert_pfn_prot - Map a physical page in a group for the CPU
+	 *
+	 * @mgm_dev:   The memory group manager through which the request
+	 *             is being made.
+	 * @group_id:  A physical memory group ID. The meaning of this is
+	 *             defined by the systems integrator. Its valid range is
+	 *             0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1.
+	 * @vma:       The virtual memory area to insert the page into.
+	 * @addr:      A virtual address (in @vma) to assign to the page.
+	 * @pfn:       The kernel Page Frame Number to insert at @addr in @vma.
+	 * @pgprot:    Protection flags for the inserted page.
+	 *
+	 * Called from a CPU virtual memory page fault handler. This function
+	 * creates a page table entry from the given parameter values and stores
+	 * it at the appropriate location (unlike mgm_update_gpu_pte, which
+	 * returns a modified entry).
+	 *
+	 * Return: Type of fault that occurred or VM_FAULT_NOPAGE if the page
+	 *         table entry was successfully installed.
+	 */
+	vm_fault_t (*mgm_vmf_insert_pfn_prot)(
+		struct memory_group_manager_device *mgm_dev, int group_id,
+		struct vm_area_struct *vma, unsigned long addr,
+		unsigned long pfn, pgprot_t pgprot);
+};
+
+/**
+ * struct memory_group_manager_device - Device structure for a memory group
+ *                                      manager
+ *
+ * @ops  - Callbacks associated with this device
+ * @data - Pointer to device private data
+ *
+ * In order for a systems integrator to provide custom behaviors for memory
+ * operations performed by the kbase module (controller driver), they must
+ * provide a platform-specific driver module which implements this interface.
+ *
+ * This structure should be registered with the platform device using
+ * platform_set_drvdata().
+ */
+struct memory_group_manager_device {
+	struct memory_group_manager_ops ops;
+	void *data;
+	struct module *owner;
+};
+
+
+enum memory_group_manager_import_type {
+	MEMORY_GROUP_MANAGER_IMPORT_TYPE_DMA_BUF
+};
+
+/**
+ * struct memory_group_manager_import_data - Structure describing the imported
+ *                                           memory
+ *
+ * @type  - type of imported memory
+ * @u     - Union describing the imported memory
+ *
+ */
+struct memory_group_manager_import_data {
+	enum memory_group_manager_import_type type;
+	union {
+		struct dma_buf *dma_buf;
+	} u;
+};
+
+#endif /* _MEMORY_GROUP_MANAGER_H_ */
diff --git a/include/linux/mfd/mt6358/core.h b/include/linux/mfd/mt6358/core.h
new file mode 100644
index 0000000..05108617
--- /dev/null
+++ b/include/linux/mfd/mt6358/core.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6358_CORE_H__
+#define __MFD_MT6358_CORE_H__
+
+#define MT6358_REG_WIDTH 16
+
+struct irq_top_t {
+	int hwirq_base;
+	unsigned int num_int_regs;
+	unsigned int num_int_bits;
+	unsigned int en_reg;
+	unsigned int en_reg_shift;
+	unsigned int sta_reg;
+	unsigned int sta_reg_shift;
+	unsigned int top_offset;
+};
+
+struct pmic_irq_data {
+	unsigned int num_top;
+	unsigned int num_pmic_irqs;
+	unsigned short top_int_status_reg;
+	bool *enable_hwirq;
+	bool *cache_hwirq;
+};
+
+enum mt6358_irq_top_status_shift {
+	MT6358_BUCK_TOP = 0,
+	MT6358_LDO_TOP,
+	MT6358_PSC_TOP,
+	MT6358_SCK_TOP,
+	MT6358_BM_TOP,
+	MT6358_HK_TOP,
+	MT6358_AUD_TOP,
+	MT6358_MISC_TOP,
+};
+
+enum mt6358_irq_numbers {
+	MT6358_IRQ_VPROC11_OC = 0,
+	MT6358_IRQ_VPROC12_OC,
+	MT6358_IRQ_VCORE_OC,
+	MT6358_IRQ_VGPU_OC,
+	MT6358_IRQ_VMODEM_OC,
+	MT6358_IRQ_VDRAM1_OC,
+	MT6358_IRQ_VS1_OC,
+	MT6358_IRQ_VS2_OC,
+	MT6358_IRQ_VPA_OC,
+	MT6358_IRQ_VCORE_PREOC,
+	MT6358_IRQ_VFE28_OC = 16,
+	MT6358_IRQ_VXO22_OC,
+	MT6358_IRQ_VRF18_OC,
+	MT6358_IRQ_VRF12_OC,
+	MT6358_IRQ_VEFUSE_OC,
+	MT6358_IRQ_VCN33_OC,
+	MT6358_IRQ_VCN28_OC,
+	MT6358_IRQ_VCN18_OC,
+	MT6358_IRQ_VCAMA1_OC,
+	MT6358_IRQ_VCAMA2_OC,
+	MT6358_IRQ_VCAMD_OC,
+	MT6358_IRQ_VCAMIO_OC,
+	MT6358_IRQ_VLDO28_OC,
+	MT6358_IRQ_VA12_OC,
+	MT6358_IRQ_VAUX18_OC,
+	MT6358_IRQ_VAUD28_OC,
+	MT6358_IRQ_VIO28_OC,
+	MT6358_IRQ_VIO18_OC,
+	MT6358_IRQ_VSRAM_PROC11_OC,
+	MT6358_IRQ_VSRAM_PROC12_OC,
+	MT6358_IRQ_VSRAM_OTHERS_OC,
+	MT6358_IRQ_VSRAM_GPU_OC,
+	MT6358_IRQ_VDRAM2_OC,
+	MT6358_IRQ_VMC_OC,
+	MT6358_IRQ_VMCH_OC,
+	MT6358_IRQ_VEMC_OC,
+	MT6358_IRQ_VSIM1_OC,
+	MT6358_IRQ_VSIM2_OC,
+	MT6358_IRQ_VIBR_OC,
+	MT6358_IRQ_VUSB_OC,
+	MT6358_IRQ_VBIF28_OC,
+	MT6358_IRQ_PWRKEY = 48,
+	MT6358_IRQ_HOMEKEY,
+	MT6358_IRQ_PWRKEY_R,
+	MT6358_IRQ_HOMEKEY_R,
+	MT6358_IRQ_NI_LBAT_INT,
+	MT6358_IRQ_CHRDET,
+	MT6358_IRQ_CHRDET_EDGE,
+	MT6358_IRQ_VCDT_HV_DET,
+	MT6358_IRQ_RTC = 64,
+	MT6358_IRQ_FG_BAT0_H = 80,
+	MT6358_IRQ_FG_BAT0_L,
+	MT6358_IRQ_FG_CUR_H,
+	MT6358_IRQ_FG_CUR_L,
+	MT6358_IRQ_FG_ZCV,
+	MT6358_IRQ_FG_BAT1_H,
+	MT6358_IRQ_FG_BAT1_L,
+	MT6358_IRQ_FG_N_CHARGE_L,
+	MT6358_IRQ_FG_IAVG_H,
+	MT6358_IRQ_FG_IAVG_L,
+	MT6358_IRQ_FG_TIME_H,
+	MT6358_IRQ_FG_DISCHARGE,
+	MT6358_IRQ_FG_CHARGE,
+	MT6358_IRQ_BATON_LV = 96,
+	MT6358_IRQ_BATON_HT,
+	MT6358_IRQ_BATON_BAT_IN,
+	MT6358_IRQ_BATON_BAT_OUT,
+	MT6358_IRQ_BIF,
+	MT6358_IRQ_BAT_H = 112,
+	MT6358_IRQ_BAT_L,
+	MT6358_IRQ_BAT2_H,
+	MT6358_IRQ_BAT2_L,
+	MT6358_IRQ_BAT_TEMP_H,
+	MT6358_IRQ_BAT_TEMP_L,
+	MT6358_IRQ_AUXADC_IMP,
+	MT6358_IRQ_NAG_C_DLTV,
+	MT6358_IRQ_AUDIO = 128,
+	MT6358_IRQ_ACCDET = 133,
+	MT6358_IRQ_ACCDET_EINT0,
+	MT6358_IRQ_ACCDET_EINT1,
+	MT6358_IRQ_SPI_CMD_ALERT = 144,
+	MT6358_IRQ_NR,
+};
+
+#define MT6358_IRQ_BUCK_BASE MT6358_IRQ_VPROC11_OC
+#define MT6358_IRQ_LDO_BASE MT6358_IRQ_VFE28_OC
+#define MT6358_IRQ_PSC_BASE MT6358_IRQ_PWRKEY
+#define MT6358_IRQ_SCK_BASE MT6358_IRQ_RTC
+#define MT6358_IRQ_BM_BASE MT6358_IRQ_FG_BAT0_H
+#define MT6358_IRQ_HK_BASE MT6358_IRQ_BAT_H
+#define MT6358_IRQ_AUD_BASE MT6358_IRQ_AUDIO
+#define MT6358_IRQ_MISC_BASE MT6358_IRQ_SPI_CMD_ALERT
+
+#define MT6358_IRQ_BUCK_BITS (MT6358_IRQ_VCORE_PREOC - MT6358_IRQ_BUCK_BASE + 1)
+#define MT6358_IRQ_LDO_BITS (MT6358_IRQ_VBIF28_OC - MT6358_IRQ_LDO_BASE + 1)
+#define MT6358_IRQ_PSC_BITS (MT6358_IRQ_VCDT_HV_DET - MT6358_IRQ_PSC_BASE + 1)
+#define MT6358_IRQ_SCK_BITS (MT6358_IRQ_RTC - MT6358_IRQ_SCK_BASE + 1)
+#define MT6358_IRQ_BM_BITS (MT6358_IRQ_BIF - MT6358_IRQ_BM_BASE + 1)
+#define MT6358_IRQ_HK_BITS (MT6358_IRQ_NAG_C_DLTV - MT6358_IRQ_HK_BASE + 1)
+#define MT6358_IRQ_AUD_BITS (MT6358_IRQ_ACCDET_EINT1 - MT6358_IRQ_AUD_BASE + 1)
+#define MT6358_IRQ_MISC_BITS	\
+	(MT6358_IRQ_SPI_CMD_ALERT - MT6358_IRQ_MISC_BASE + 1)
+
+#define MT6358_TOP_GEN(sp)	\
+{	\
+	.hwirq_base = MT6358_IRQ_##sp##_BASE,	\
+	.num_int_regs =	\
+		((MT6358_IRQ_##sp##_BITS - 1) / MT6358_REG_WIDTH) + 1,	\
+	.num_int_bits = MT6358_IRQ_##sp##_BITS, \
+	.en_reg = MT6358_##sp##_TOP_INT_CON0,		\
+	.en_reg_shift = 0x6,	\
+	.sta_reg = MT6358_##sp##_TOP_INT_STATUS0,		\
+	.sta_reg_shift = 0x2,	\
+	.top_offset = MT6358_##sp##_TOP,	\
+}
+
+#endif /* __MFD_MT6358_CORE_H__ */
diff --git a/include/linux/mfd/mt6358/registers.h b/include/linux/mfd/mt6358/registers.h
new file mode 100644
index 0000000..ff5645b
--- /dev/null
+++ b/include/linux/mfd/mt6358/registers.h
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6358_REGISTERS_H__
+#define __MFD_MT6358_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6358_SWCID                          0xa
+#define MT6358_MISC_TOP_INT_CON0              0x188
+#define MT6358_MISC_TOP_INT_STATUS0           0x194
+#define MT6358_TOP_INT_STATUS0                0x19e
+#define MT6358_SCK_TOP_INT_CON0               0x52e
+#define MT6358_SCK_TOP_INT_STATUS0            0x53a
+#define MT6358_EOSC_CALI_CON0                 0x540
+#define MT6358_EOSC_CALI_CON1                 0x542
+#define MT6358_RTC_MIX_CON0                   0x544
+#define MT6358_RTC_MIX_CON1                   0x546
+#define MT6358_RTC_MIX_CON2                   0x548
+#define MT6358_RTC_DSN_ID                     0x580
+#define MT6358_RTC_DSN_REV0                   0x582
+#define MT6358_RTC_DBI                        0x584
+#define MT6358_RTC_DXI                        0x586
+#define MT6358_RTC_BBPU                       0x588
+#define MT6358_RTC_IRQ_STA                    0x58a
+#define MT6358_RTC_IRQ_EN                     0x58c
+#define MT6358_RTC_CII_EN                     0x58e
+#define MT6358_RTC_AL_MASK                    0x590
+#define MT6358_RTC_TC_SEC                     0x592
+#define MT6358_RTC_TC_MIN                     0x594
+#define MT6358_RTC_TC_HOU                     0x596
+#define MT6358_RTC_TC_DOM                     0x598
+#define MT6358_RTC_TC_DOW                     0x59a
+#define MT6358_RTC_TC_MTH                     0x59c
+#define MT6358_RTC_TC_YEA                     0x59e
+#define MT6358_RTC_AL_SEC                     0x5a0
+#define MT6358_RTC_AL_MIN                     0x5a2
+#define MT6358_RTC_AL_HOU                     0x5a4
+#define MT6358_RTC_AL_DOM                     0x5a6
+#define MT6358_RTC_AL_DOW                     0x5a8
+#define MT6358_RTC_AL_MTH                     0x5aa
+#define MT6358_RTC_AL_YEA                     0x5ac
+#define MT6358_RTC_OSC32CON                   0x5ae
+#define MT6358_RTC_POWERKEY1                  0x5b0
+#define MT6358_RTC_POWERKEY2                  0x5b2
+#define MT6358_RTC_PDN1                       0x5b4
+#define MT6358_RTC_PDN2                       0x5b6
+#define MT6358_RTC_SPAR0                      0x5b8
+#define MT6358_RTC_SPAR1                      0x5ba
+#define MT6358_RTC_PROT                       0x5bc
+#define MT6358_RTC_DIFF                       0x5be
+#define MT6358_RTC_CALI                       0x5c0
+#define MT6358_RTC_WRTGR                      0x5c2
+#define MT6358_RTC_CON                        0x5c4
+#define MT6358_RTC_SEC_CTRL                   0x5c6
+#define MT6358_RTC_INT_CNT                    0x5c8
+#define MT6358_RTC_SEC_DAT0                   0x5ca
+#define MT6358_RTC_SEC_DAT1                   0x5cc
+#define MT6358_RTC_SEC_DAT2                   0x5ce
+#define MT6358_RTC_SEC_DSN_ID                 0x600
+#define MT6358_RTC_SEC_DSN_REV0               0x602
+#define MT6358_RTC_SEC_DBI                    0x604
+#define MT6358_RTC_SEC_DXI                    0x606
+#define MT6358_RTC_TC_SEC_SEC                 0x608
+#define MT6358_RTC_TC_MIN_SEC                 0x60a
+#define MT6358_RTC_TC_HOU_SEC                 0x60c
+#define MT6358_RTC_TC_DOM_SEC                 0x60e
+#define MT6358_RTC_TC_DOW_SEC                 0x610
+#define MT6358_RTC_TC_MTH_SEC                 0x612
+#define MT6358_RTC_TC_YEA_SEC                 0x614
+#define MT6358_RTC_SEC_CK_PDN                 0x616
+#define MT6358_RTC_SEC_WRTGR                  0x618
+#define MT6358_PSC_TOP_INT_CON0               0x910
+#define MT6358_PSC_TOP_INT_STATUS0            0x91c
+#define MT6358_BM_TOP_INT_CON0                0xc32
+#define MT6358_BM_TOP_INT_CON1                0xc38
+#define MT6358_BM_TOP_INT_STATUS0             0xc4a
+#define MT6358_BM_TOP_INT_STATUS1             0xc4c
+#define MT6358_HK_TOP_INT_CON0                0xf92
+#define MT6358_HK_TOP_INT_STATUS0             0xf9e
+#define MT6358_BUCK_TOP_INT_CON0              0x1318
+#define MT6358_BUCK_TOP_INT_STATUS0           0x1324
+#define MT6358_BUCK_VPROC11_CON0              0x1388
+#define MT6358_BUCK_VPROC11_DBG0              0x139e
+#define MT6358_BUCK_VPROC11_DBG1              0x13a0
+#define MT6358_BUCK_VPROC11_ELR0              0x13a6
+#define MT6358_BUCK_VPROC12_CON0              0x1408
+#define MT6358_BUCK_VPROC12_DBG0              0x141e
+#define MT6358_BUCK_VPROC12_DBG1              0x1420
+#define MT6358_BUCK_VPROC12_ELR0              0x1426
+#define MT6358_BUCK_VCORE_CON0                0x1488
+#define MT6358_BUCK_VCORE_DBG0                0x149e
+#define MT6358_BUCK_VCORE_DBG1                0x14a0
+#define MT6358_BUCK_VCORE_ELR0                0x14aa
+#define MT6358_BUCK_VGPU_CON0                 0x1508
+#define MT6358_BUCK_VGPU_DBG0                 0x151e
+#define MT6358_BUCK_VGPU_DBG1                 0x1520
+#define MT6358_BUCK_VGPU_ELR0                 0x1526
+#define MT6358_BUCK_VMODEM_CON0               0x1588
+#define MT6358_BUCK_VMODEM_DBG0               0x159e
+#define MT6358_BUCK_VMODEM_DBG1               0x15a0
+#define MT6358_BUCK_VMODEM_ELR0               0x15a6
+#define MT6358_BUCK_VDRAM1_CON0               0x1608
+#define MT6358_BUCK_VDRAM1_DBG0               0x161e
+#define MT6358_BUCK_VDRAM1_DBG1               0x1620
+#define MT6358_BUCK_VDRAM1_ELR0               0x1626
+#define MT6358_BUCK_VS1_CON0                  0x1688
+#define MT6358_BUCK_VS1_DBG0                  0x169e
+#define MT6358_BUCK_VS1_DBG1                  0x16a0
+#define MT6358_BUCK_VS1_ELR0                  0x16ae
+#define MT6358_BUCK_VS2_CON0                  0x1708
+#define MT6358_BUCK_VS2_DBG0                  0x171e
+#define MT6358_BUCK_VS2_DBG1                  0x1720
+#define MT6358_BUCK_VS2_ELR0                  0x172e
+#define MT6358_BUCK_VPA_CON0                  0x1788
+#define MT6358_BUCK_VPA_CON1                  0x178a
+#define MT6358_BUCK_VPA_ELR0                  MT6358_BUCK_VPA_CON1
+#define MT6358_BUCK_VPA_DBG0                  0x1792
+#define MT6358_BUCK_VPA_DBG1                  0x1794
+#define MT6358_VPROC_ANA_CON0                 0x180c
+#define MT6358_VCORE_VGPU_ANA_CON0            0x1828
+#define MT6358_VMODEM_ANA_CON0                0x1888
+#define MT6358_VDRAM1_ANA_CON0                0x1896
+#define MT6358_VS1_ANA_CON0                   0x18a2
+#define MT6358_VS2_ANA_CON0                   0x18ae
+#define MT6358_VPA_ANA_CON0                   0x18ba
+#define MT6358_LDO_TOP_INT_CON0               0x1a50
+#define MT6358_LDO_TOP_INT_CON1               0x1a56
+#define MT6358_LDO_TOP_INT_STATUS0            0x1a68
+#define MT6358_LDO_TOP_INT_STATUS1            0x1a6a
+#define MT6358_LDO_VXO22_CON0                 0x1a88
+#define MT6358_LDO_VXO22_CON1                 0x1a96
+#define MT6358_LDO_VA12_CON0                  0x1a9c
+#define MT6358_LDO_VA12_CON1                  0x1aaa
+#define MT6358_LDO_VAUX18_CON0                0x1ab0
+#define MT6358_LDO_VAUX18_CON1                0x1abe
+#define MT6358_LDO_VAUD28_CON0                0x1ac4
+#define MT6358_LDO_VAUD28_CON1                0x1ad2
+#define MT6358_LDO_VIO28_CON0                 0x1ad8
+#define MT6358_LDO_VIO28_CON1                 0x1ae6
+#define MT6358_LDO_VIO18_CON0                 0x1aec
+#define MT6358_LDO_VIO18_CON1                 0x1afa
+#define MT6358_LDO_VDRAM2_CON0                0x1b08
+#define MT6358_LDO_VDRAM2_CON1                0x1b16
+#define MT6358_LDO_VEMC_CON0                  0x1b1c
+#define MT6358_LDO_VEMC_CON1                  0x1b2a
+#define MT6358_LDO_VUSB_CON0_0                0x1b30
+#define MT6358_LDO_VUSB_CON1                  0x1b40
+#define MT6358_LDO_VSRAM_PROC11_CON0          0x1b46
+#define MT6358_LDO_VSRAM_PROC11_DBG0          0x1b60
+#define MT6358_LDO_VSRAM_PROC11_DBG1          0x1b62
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON0 0x1b64
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON1 0x1b66
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON2 0x1b68
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON3 0x1b6a
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON0 0x1b6c
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON1 0x1b6e
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON2 0x1b70
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON3 0x1b72
+#define MT6358_LDO_VSRAM_WAKEUP_CON0          0x1b74
+#define MT6358_LDO_GON1_ELR_NUM               0x1b76
+#define MT6358_LDO_VDRAM2_ELR0                0x1b78
+#define MT6358_LDO_VSRAM_PROC12_CON0          0x1b88
+#define MT6358_LDO_VSRAM_PROC12_DBG0          0x1ba2
+#define MT6358_LDO_VSRAM_PROC12_DBG1          0x1ba4
+#define MT6358_LDO_VSRAM_OTHERS_CON0          0x1ba6
+#define MT6358_LDO_VSRAM_OTHERS_DBG0          0x1bc0
+#define MT6358_LDO_VSRAM_OTHERS_DBG1          0x1bc2
+#define MT6358_LDO_VSRAM_GPU_CON0             0x1bc8
+#define MT6358_LDO_VSRAM_GPU_DBG0             0x1be2
+#define MT6358_LDO_VSRAM_GPU_DBG1             0x1be4
+#define MT6358_LDO_VSRAM_CON0                 0x1bee
+#define MT6358_LDO_VSRAM_CON1                 0x1bf0
+#define MT6358_LDO_VSRAM_CON2                 0x1bf2
+#define MT6358_LDO_VSRAM_CON3                 0x1bf4
+#define MT6358_LDO_VFE28_CON0                 0x1c08
+#define MT6358_LDO_VFE28_CON1                 0x1c16
+#define MT6358_LDO_VFE28_CON2                 0x1c18
+#define MT6358_LDO_VFE28_CON3                 0x1c1a
+#define MT6358_LDO_VRF18_CON0                 0x1c1c
+#define MT6358_LDO_VRF18_CON1                 0x1c2a
+#define MT6358_LDO_VRF18_CON2                 0x1c2c
+#define MT6358_LDO_VRF18_CON3                 0x1c2e
+#define MT6358_LDO_VRF12_CON0                 0x1c30
+#define MT6358_LDO_VRF12_CON1                 0x1c3e
+#define MT6358_LDO_VRF12_CON2                 0x1c40
+#define MT6358_LDO_VRF12_CON3                 0x1c42
+#define MT6358_LDO_VEFUSE_CON0                0x1c44
+#define MT6358_LDO_VEFUSE_CON1                0x1c52
+#define MT6358_LDO_VEFUSE_CON2                0x1c54
+#define MT6358_LDO_VEFUSE_CON3                0x1c56
+#define MT6358_LDO_VCN18_CON0                 0x1c58
+#define MT6358_LDO_VCN18_CON1                 0x1c66
+#define MT6358_LDO_VCN18_CON2                 0x1c68
+#define MT6358_LDO_VCN18_CON3                 0x1c6a
+#define MT6358_LDO_VCAMA1_CON0                0x1c6c
+#define MT6358_LDO_VCAMA1_CON1                0x1c7a
+#define MT6358_LDO_VCAMA1_CON2                0x1c7c
+#define MT6358_LDO_VCAMA1_CON3                0x1c7e
+#define MT6358_LDO_VCAMA2_CON0                0x1c88
+#define MT6358_LDO_VCAMA2_CON1                0x1c96
+#define MT6358_LDO_VCAMA2_CON2                0x1c98
+#define MT6358_LDO_VCAMA2_CON3                0x1c9a
+#define MT6358_LDO_VCAMD_CON0                 0x1c9c
+#define MT6358_LDO_VCAMD_CON1                 0x1caa
+#define MT6358_LDO_VCAMD_CON2                 0x1cac
+#define MT6358_LDO_VCAMD_CON3                 0x1cae
+#define MT6358_LDO_VCAMIO_CON0                0x1cb0
+#define MT6358_LDO_VCAMIO_CON1                0x1cbe
+#define MT6358_LDO_VCAMIO_CON2                0x1cc0
+#define MT6358_LDO_VCAMIO_CON3                0x1cc2
+#define MT6358_LDO_VMC_CON0                   0x1cc4
+#define MT6358_LDO_VMC_CON1                   0x1cd2
+#define MT6358_LDO_VMC_CON2                   0x1cd4
+#define MT6358_LDO_VMC_CON3                   0x1cd6
+#define MT6358_LDO_VMCH_CON0                  0x1cd8
+#define MT6358_LDO_VMCH_CON1                  0x1ce6
+#define MT6358_LDO_VMCH_CON2                  0x1ce8
+#define MT6358_LDO_VMCH_CON3                  0x1cea
+#define MT6358_LDO_VIBR_CON0                  0x1d08
+#define MT6358_LDO_VIBR_CON1                  0x1d16
+#define MT6358_LDO_VIBR_CON2                  0x1d18
+#define MT6358_LDO_VIBR_CON3                  0x1d1a
+#define MT6358_LDO_VCN33_CON0_0               0x1d1c
+#define MT6358_LDO_VCN33_CON0_1               0x1d2a
+#define MT6358_LDO_VCN33_CON1                 0x1d2c
+#define MT6358_LDO_VCN33_BT_CON1              MT6358_LDO_VCN33_CON1
+#define MT6358_LDO_VCN33_WIFI_CON1            MT6358_LDO_VCN33_CON1
+#define MT6358_LDO_VCN33_CON2                 0x1d2e
+#define MT6358_LDO_VCN33_CON3                 0x1d30
+#define MT6358_LDO_VLDO28_CON0_0              0x1d32
+#define MT6358_LDO_VLDO28_CON0_1              0x1d40
+#define MT6358_LDO_VLDO28_CON1                0x1d42
+#define MT6358_LDO_VLDO28_CON2                0x1d44
+#define MT6358_LDO_VLDO28_CON3                0x1d46
+#define MT6358_LDO_VSIM1_CON0                 0x1d48
+#define MT6358_LDO_VSIM1_CON1                 0x1d56
+#define MT6358_LDO_VSIM1_CON2                 0x1d58
+#define MT6358_LDO_VSIM1_CON3                 0x1d5a
+#define MT6358_LDO_VSIM2_CON0                 0x1d5c
+#define MT6358_LDO_VSIM2_CON1                 0x1d6a
+#define MT6358_LDO_VSIM2_CON2                 0x1d6c
+#define MT6358_LDO_VSIM2_CON3                 0x1d6e
+#define MT6358_LDO_VCN28_CON0                 0x1d88
+#define MT6358_LDO_VCN28_CON1                 0x1d96
+#define MT6358_LDO_VCN28_CON2                 0x1d98
+#define MT6358_LDO_VCN28_CON3                 0x1d9a
+#define MT6358_VRTC28_CON0                    0x1d9c
+#define MT6358_LDO_VBIF28_CON0                0x1d9e
+#define MT6358_LDO_VBIF28_CON1                0x1dac
+#define MT6358_LDO_VBIF28_CON2                0x1dae
+#define MT6358_LDO_VBIF28_CON3                0x1db0
+#define MT6358_VCAMA1_ANA_CON0                0x1e08
+#define MT6358_VCAMA2_ANA_CON0                0x1e0c
+#define MT6358_VCN33_ANA_CON0                 0x1e28
+#define MT6358_VSIM1_ANA_CON0                 0x1e2c
+#define MT6358_VSIM2_ANA_CON0                 0x1e30
+#define MT6358_VUSB_ANA_CON0                  0x1e34
+#define MT6358_VEMC_ANA_CON0                  0x1e38
+#define MT6358_VLDO28_ANA_CON0                0x1e3c
+#define MT6358_VIO28_ANA_CON0                 0x1e40
+#define MT6358_VIBR_ANA_CON0                  0x1e44
+#define MT6358_VMCH_ANA_CON0                  0x1e48
+#define MT6358_VMC_ANA_CON0                   0x1e4c
+#define MT6358_VRF18_ANA_CON0                 0x1e88
+#define MT6358_VCN18_ANA_CON0                 0x1e8c
+#define MT6358_VCAMIO_ANA_CON0                0x1e90
+#define MT6358_VIO18_ANA_CON0                 0x1e94
+#define MT6358_VEFUSE_ANA_CON0                0x1e98
+#define MT6358_VRF12_ANA_CON0                 0x1e9c
+#define MT6358_VSRAM_PROC11_ANA_CON0          0x1ea0
+#define MT6358_VSRAM_PROC12_ANA_CON0          0x1ea4
+#define MT6358_VSRAM_OTHERS_ANA_CON0          0x1ea6
+#define MT6358_VSRAM_GPU_ANA_CON0             0x1ea8
+#define MT6358_VDRAM2_ANA_CON0                0x1eaa
+#define MT6358_VCAMD_ANA_CON0                 0x1eae
+#define MT6358_VA12_ANA_CON0                  0x1eb2
+#define MT6358_AUD_TOP_INT_CON0               0x2228
+#define MT6358_AUD_TOP_INT_STATUS0            0x2234
+
+#endif /* __MFD_MT6358_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6392/core.h b/include/linux/mfd/mt6392/core.h
new file mode 100644
index 0000000..7575a79
--- /dev/null
+++ b/include/linux/mfd/mt6392/core.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Chen Zhong <chen.zhong@mediatek.com>
+ */
+
+#ifndef __MFD_MT6392_CORE_H__
+#define __MFD_MT6392_CORE_H__
+
+enum MT6392_IRQ_numbers {
+	MT6392_IRQ_SPKL_AB = 0,
+	MT6392_IRQ_SPKL,
+	MT6392_IRQ_BAT_L,
+	MT6392_IRQ_BAT_H,
+	MT6392_IRQ_WATCHDOG,
+	MT6392_IRQ_PWRKEY,
+	MT6392_IRQ_THR_L,
+	MT6392_IRQ_THR_H,
+	MT6392_IRQ_VBATON_UNDET,
+	MT6392_IRQ_BVALID_DET,
+	MT6392_IRQ_CHRDET,
+	MT6392_IRQ_OV,
+	MT6392_IRQ_LDO = 16,
+	MT6392_IRQ_FCHRKEY,
+	MT6392_IRQ_RELEASE_PWRKEY,
+	MT6392_IRQ_RELEASE_FCHRKEY,
+	MT6392_IRQ_RTC,
+	MT6392_IRQ_VPROC,
+	MT6392_IRQ_VSYS,
+	MT6392_IRQ_VCORE,
+	MT6392_IRQ_TYPE_C_CC,
+	MT6392_IRQ_TYPEC_H_MAX,
+	MT6392_IRQ_TYPEC_H_MIN,
+	MT6392_IRQ_TYPEC_L_MAX,
+	MT6392_IRQ_TYPEC_L_MIN,
+	MT6392_IRQ_THR_MAX,
+	MT6392_IRQ_THR_MIN,
+	MT6392_IRQ_NAG_C_DLTV,
+	MT6392_IRQ_NR,
+};
+
+#endif /* __MFD_MT6392_CORE_H__ */
diff --git a/include/linux/mfd/mt6392/registers.h b/include/linux/mfd/mt6392/registers.h
new file mode 100644
index 0000000..f02b478
--- /dev/null
+++ b/include/linux/mfd/mt6392/registers.h
@@ -0,0 +1,487 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Chen Zhong <chen.zhong@mediatek.com>
+ */
+
+#ifndef __MFD_MT6392_REGISTERS_H__
+#define __MFD_MT6392_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6392_CHR_CON0                         0x0000
+#define MT6392_CHR_CON1                         0x0002
+#define MT6392_CHR_CON2                         0x0004
+#define MT6392_CHR_CON3                         0x0006
+#define MT6392_CHR_CON4                         0x0008
+#define MT6392_CHR_CON5                         0x000A
+#define MT6392_CHR_CON6                         0x000C
+#define MT6392_CHR_CON7                         0x000E
+#define MT6392_CHR_CON8                         0x0010
+#define MT6392_CHR_CON9                         0x0012
+#define MT6392_CHR_CON10                        0x0014
+#define MT6392_CHR_CON11                        0x0016
+#define MT6392_CHR_CON12                        0x0018
+#define MT6392_CHR_CON13                        0x001A
+#define MT6392_CHR_CON14                        0x001C
+#define MT6392_CHR_CON15                        0x001E
+#define MT6392_CHR_CON16                        0x0020
+#define MT6392_CHR_CON17                        0x0022
+#define MT6392_CHR_CON18                        0x0024
+#define MT6392_CHR_CON19                        0x0026
+#define MT6392_CHR_CON20                        0x0028
+#define MT6392_CHR_CON21                        0x002A
+#define MT6392_CHR_CON22                        0x002C
+#define MT6392_CHR_CON23                        0x002E
+#define MT6392_CHR_CON24                        0x0030
+#define MT6392_CHR_CON25                        0x0032
+#define MT6392_CHR_CON26                        0x0034
+#define MT6392_CHR_CON27                        0x0036
+#define MT6392_CHR_CON28                        0x0038
+#define MT6392_CHR_CON29                        0x003A
+#define MT6392_STRUP_CON0                       0x003C
+#define MT6392_STRUP_CON2                       0x003E
+#define MT6392_STRUP_CON3                       0x0040
+#define MT6392_STRUP_CON4                       0x0042
+#define MT6392_STRUP_CON5                       0x0044
+#define MT6392_STRUP_CON6                       0x0046
+#define MT6392_STRUP_CON7                       0x0048
+#define MT6392_STRUP_CON8                       0x004A
+#define MT6392_STRUP_CON9                       0x004C
+#define MT6392_STRUP_CON10                      0x004E
+#define MT6392_STRUP_CON11                      0x0050
+#define MT6392_SPK_CON0                         0x0052
+#define MT6392_SPK_CON1                         0x0054
+#define MT6392_SPK_CON2                         0x0056
+#define MT6392_SPK_CON6                         0x005E
+#define MT6392_SPK_CON7                         0x0060
+#define MT6392_SPK_CON8                         0x0062
+#define MT6392_SPK_CON9                         0x0064
+#define MT6392_SPK_CON10                        0x0066
+#define MT6392_SPK_CON11                        0x0068
+#define MT6392_SPK_CON12                        0x006A
+#define MT6392_STRUP_CON12                      0x006E
+#define MT6392_STRUP_CON13                      0x0070
+#define MT6392_STRUP_CON14                      0x0072
+#define MT6392_STRUP_CON15                      0x0074
+#define MT6392_STRUP_CON16                      0x0076
+#define MT6392_STRUP_CON17                      0x0078
+#define MT6392_STRUP_CON18                      0x007A
+#define MT6392_STRUP_CON19                      0x007C
+#define MT6392_STRUP_CON20                      0x007E
+#define MT6392_CID                              0x0100
+#define MT6392_TOP_CKPDN0                       0x0102
+#define MT6392_TOP_CKPDN0_SET                   0x0104
+#define MT6392_TOP_CKPDN0_CLR                   0x0106
+#define MT6392_TOP_CKPDN1                       0x0108
+#define MT6392_TOP_CKPDN1_SET                   0x010A
+#define MT6392_TOP_CKPDN1_CLR                   0x010C
+#define MT6392_TOP_CKPDN2                       0x010E
+#define MT6392_TOP_CKPDN2_SET                   0x0110
+#define MT6392_TOP_CKPDN2_CLR                   0x0112
+#define MT6392_TOP_RST_CON                      0x0114
+#define MT6392_TOP_RST_CON_SET                  0x0116
+#define MT6392_TOP_RST_CON_CLR                  0x0118
+#define MT6392_TOP_RST_MISC                     0x011A
+#define MT6392_TOP_RST_MISC_SET                 0x011C
+#define MT6392_TOP_RST_MISC_CLR                 0x011E
+#define MT6392_TOP_CKCON0                       0x0120
+#define MT6392_TOP_CKCON0_SET                   0x0122
+#define MT6392_TOP_CKCON0_CLR                   0x0124
+#define MT6392_TOP_CKCON1                       0x0126
+#define MT6392_TOP_CKCON1_SET                   0x0128
+#define MT6392_TOP_CKCON1_CLR                   0x012A
+#define MT6392_TOP_CKTST0                       0x012C
+#define MT6392_TOP_CKTST1                       0x012E
+#define MT6392_TOP_CKTST2                       0x0130
+#define MT6392_TEST_OUT                         0x0132
+#define MT6392_TEST_CON0                        0x0134
+#define MT6392_TEST_CON1                        0x0136
+#define MT6392_EN_STATUS0                       0x0138
+#define MT6392_EN_STATUS1                       0x013A
+#define MT6392_OCSTATUS0                        0x013C
+#define MT6392_OCSTATUS1                        0x013E
+#define MT6392_PGSTATUS                         0x0140
+#define MT6392_CHRSTATUS                        0x0142
+#define MT6392_TDSEL_CON                        0x0144
+#define MT6392_RDSEL_CON                        0x0146
+#define MT6392_SMT_CON0                         0x0148
+#define MT6392_SMT_CON1                         0x014A
+#define MT6392_DRV_CON0                         0x0152
+#define MT6392_DRV_CON1                         0x0154
+#define MT6392_INT_CON0                         0x0160
+#define MT6392_INT_CON0_SET                     0x0162
+#define MT6392_INT_CON0_CLR                     0x0164
+#define MT6392_INT_CON1                         0x0166
+#define MT6392_INT_CON1_SET                     0x0168
+#define MT6392_INT_CON1_CLR                     0x016A
+#define MT6392_INT_MISC_CON                     0x016C
+#define MT6392_INT_MISC_CON_SET                 0x016E
+#define MT6392_INT_MISC_CON_CLR                 0x0170
+#define MT6392_INT_STATUS0                      0x0172
+#define MT6392_INT_STATUS1                      0x0174
+#define MT6392_OC_GEAR_0                        0x0176
+#define MT6392_OC_GEAR_1                        0x0178
+#define MT6392_OC_GEAR_2                        0x017A
+#define MT6392_OC_CTL_VPROC                     0x017C
+#define MT6392_OC_CTL_VSYS                      0x017E
+#define MT6392_OC_CTL_VCORE                     0x0180
+#define MT6392_FQMTR_CON0                       0x0182
+#define MT6392_FQMTR_CON1                       0x0184
+#define MT6392_FQMTR_CON2                       0x0186
+#define MT6392_RG_SPI_CON                       0x0188
+#define MT6392_DEW_DIO_EN                       0x018A
+#define MT6392_DEW_READ_TEST                    0x018C
+#define MT6392_DEW_WRITE_TEST                   0x018E
+#define MT6392_DEW_CRC_SWRST                    0x0190
+#define MT6392_DEW_CRC_EN                       0x0192
+#define MT6392_DEW_CRC_VAL                      0x0194
+#define MT6392_DEW_DBG_MON_SEL                  0x0196
+#define MT6392_DEW_CIPHER_KEY_SEL               0x0198
+#define MT6392_DEW_CIPHER_IV_SEL                0x019A
+#define MT6392_DEW_CIPHER_EN                    0x019C
+#define MT6392_DEW_CIPHER_RDY                   0x019E
+#define MT6392_DEW_CIPHER_MODE                  0x01A0
+#define MT6392_DEW_CIPHER_SWRST                 0x01A2
+#define MT6392_DEW_RDDMY_NO                     0x01A4
+#define MT6392_DEW_RDATA_DLY_SEL                0x01A6
+#define MT6392_CLK_TRIM_CON0                    0x01A8
+#define MT6392_BUCK_CON0                        0x0200
+#define MT6392_BUCK_CON1                        0x0202
+#define MT6392_BUCK_CON2                        0x0204
+#define MT6392_BUCK_CON3                        0x0206
+#define MT6392_BUCK_CON4                        0x0208
+#define MT6392_BUCK_CON5                        0x020A
+#define MT6392_VPROC_CON0                       0x020C
+#define MT6392_VPROC_CON1                       0x020E
+#define MT6392_VPROC_CON2                       0x0210
+#define MT6392_VPROC_CON3                       0x0212
+#define MT6392_VPROC_CON4                       0x0214
+#define MT6392_VPROC_CON5                       0x0216
+#define MT6392_VPROC_CON7                       0x021A
+#define MT6392_VPROC_CON8                       0x021C
+#define MT6392_VPROC_CON9                       0x021E
+#define MT6392_VPROC_CON10                      0x0220
+#define MT6392_VPROC_CON11                      0x0222
+#define MT6392_VPROC_CON12                      0x0224
+#define MT6392_VPROC_CON13                      0x0226
+#define MT6392_VPROC_CON14                      0x0228
+#define MT6392_VPROC_CON15                      0x022A
+#define MT6392_VPROC_CON18                      0x0230
+#define MT6392_VSYS_CON0                        0x0232
+#define MT6392_VSYS_CON1                        0x0234
+#define MT6392_VSYS_CON2                        0x0236
+#define MT6392_VSYS_CON3                        0x0238
+#define MT6392_VSYS_CON4                        0x023A
+#define MT6392_VSYS_CON5                        0x023C
+#define MT6392_VSYS_CON7                        0x0240
+#define MT6392_VSYS_CON8                        0x0242
+#define MT6392_VSYS_CON9                        0x0244
+#define MT6392_VSYS_CON10                       0x0246
+#define MT6392_VSYS_CON11                       0x0248
+#define MT6392_VSYS_CON12                       0x024A
+#define MT6392_VSYS_CON13                       0x024C
+#define MT6392_VSYS_CON14                       0x024E
+#define MT6392_VSYS_CON15                       0x0250
+#define MT6392_VSYS_CON18                       0x0256
+#define MT6392_BUCK_OC_CON0                     0x0258
+#define MT6392_BUCK_OC_CON1                     0x025A
+#define MT6392_BUCK_OC_CON2                     0x025C
+#define MT6392_BUCK_OC_CON3                     0x025E
+#define MT6392_BUCK_OC_CON4                     0x0260
+#define MT6392_BUCK_OC_VPROC_CON0               0x0262
+#define MT6392_BUCK_OC_VCORE_CON0               0x0264
+#define MT6392_BUCK_OC_VSYS_CON0                0x0266
+#define MT6392_BUCK_ANA_MON_CON0                0x0268
+#define MT6392_BUCK_EFUSE_OC_CON0               0x026A
+#define MT6392_VCORE_CON0                       0x0300
+#define MT6392_VCORE_CON1                       0x0302
+#define MT6392_VCORE_CON2                       0x0304
+#define MT6392_VCORE_CON3                       0x0306
+#define MT6392_VCORE_CON4                       0x0308
+#define MT6392_VCORE_CON5                       0x030A
+#define MT6392_VCORE_CON7                       0x030E
+#define MT6392_VCORE_CON8                       0x0310
+#define MT6392_VCORE_CON9                       0x0312
+#define MT6392_VCORE_CON10                      0x0314
+#define MT6392_VCORE_CON11                      0x0316
+#define MT6392_VCORE_CON12                      0x0318
+#define MT6392_VCORE_CON13                      0x031A
+#define MT6392_VCORE_CON14                      0x031C
+#define MT6392_VCORE_CON15                      0x031E
+#define MT6392_VCORE_CON18                      0x0324
+#define MT6392_BUCK_K_CON0                      0x032A
+#define MT6392_BUCK_K_CON1                      0x032C
+#define MT6392_BUCK_K_CON2                      0x032E
+#define MT6392_ANALDO_CON0                      0x0400
+#define MT6392_ANALDO_CON1                      0x0402
+#define MT6392_ANALDO_CON2                      0x0404
+#define MT6392_ANALDO_CON3                      0x0406
+#define MT6392_ANALDO_CON4                      0x0408
+#define MT6392_ANALDO_CON6                      0x040C
+#define MT6392_ANALDO_CON7                      0x040E
+#define MT6392_ANALDO_CON8                      0x0410
+#define MT6392_ANALDO_CON10                     0x0412
+#define MT6392_ANALDO_CON15                     0x0414
+#define MT6392_ANALDO_CON16                     0x0416
+#define MT6392_ANALDO_CON17                     0x0418
+#define MT6392_ANALDO_CON21                     0x0420
+#define MT6392_ANALDO_CON22                     0x0422
+#define MT6392_ANALDO_CON23                     0x0424
+#define MT6392_ANALDO_CON24                     0x0426
+#define MT6392_ANALDO_CON25                     0x0428
+#define MT6392_ANALDO_CON26                     0x042A
+#define MT6392_ANALDO_CON27                     0x042C
+#define MT6392_ANALDO_CON28                     0x042E
+#define MT6392_ANALDO_CON29                     0x0430
+#define MT6392_DIGLDO_CON0                      0x0500
+#define MT6392_DIGLDO_CON2                      0x0502
+#define MT6392_DIGLDO_CON3                      0x0504
+#define MT6392_DIGLDO_CON5                      0x0506
+#define MT6392_DIGLDO_CON6                      0x0508
+#define MT6392_DIGLDO_CON7                      0x050A
+#define MT6392_DIGLDO_CON8                      0x050C
+#define MT6392_DIGLDO_CON10                     0x0510
+#define MT6392_DIGLDO_CON11                     0x0512
+#define MT6392_DIGLDO_CON12                     0x0514
+#define MT6392_DIGLDO_CON15                     0x051A
+#define MT6392_DIGLDO_CON20                     0x0524
+#define MT6392_DIGLDO_CON21                     0x0526
+#define MT6392_DIGLDO_CON23                     0x0528
+#define MT6392_DIGLDO_CON24                     0x052A
+#define MT6392_DIGLDO_CON26                     0x052C
+#define MT6392_DIGLDO_CON27                     0x052E
+#define MT6392_DIGLDO_CON28                     0x0530
+#define MT6392_DIGLDO_CON29                     0x0532
+#define MT6392_DIGLDO_CON30                     0x0534
+#define MT6392_DIGLDO_CON31                     0x0536
+#define MT6392_DIGLDO_CON32                     0x0538
+#define MT6392_DIGLDO_CON33                     0x053A
+#define MT6392_DIGLDO_CON36                     0x0540
+#define MT6392_DIGLDO_CON41                     0x0546
+#define MT6392_DIGLDO_CON44                     0x054C
+#define MT6392_DIGLDO_CON47                     0x0552
+#define MT6392_DIGLDO_CON48                     0x0554
+#define MT6392_DIGLDO_CON49                     0x0556
+#define MT6392_DIGLDO_CON50                     0x0558
+#define MT6392_DIGLDO_CON51                     0x055A
+#define MT6392_DIGLDO_CON52                     0x055C
+#define MT6392_DIGLDO_CON53                     0x055E
+#define MT6392_DIGLDO_CON54                     0x0560
+#define MT6392_DIGLDO_CON55                     0x0562
+#define MT6392_DIGLDO_CON56                     0x0564
+#define MT6392_DIGLDO_CON57                     0x0566
+#define MT6392_DIGLDO_CON58                     0x0568
+#define MT6392_DIGLDO_CON59                     0x056A
+#define MT6392_DIGLDO_CON60                     0x056C
+#define MT6392_DIGLDO_CON61                     0x056E
+#define MT6392_DIGLDO_CON62                     0x0570
+#define MT6392_DIGLDO_CON63                     0x0572
+#define MT6392_EFUSE_CON0                       0x0600
+#define MT6392_EFUSE_CON1                       0x0602
+#define MT6392_EFUSE_CON2                       0x0604
+#define MT6392_EFUSE_CON3                       0x0606
+#define MT6392_EFUSE_CON4                       0x0608
+#define MT6392_EFUSE_CON5                       0x060A
+#define MT6392_EFUSE_CON6                       0x060C
+#define MT6392_EFUSE_VAL_0_15                   0x060E
+#define MT6392_EFUSE_VAL_16_31                  0x0610
+#define MT6392_EFUSE_VAL_32_47                  0x0612
+#define MT6392_EFUSE_VAL_48_63                  0x0614
+#define MT6392_EFUSE_VAL_64_79                  0x0616
+#define MT6392_EFUSE_VAL_80_95                  0x0618
+#define MT6392_EFUSE_VAL_96_111                 0x061A
+#define MT6392_EFUSE_VAL_112_127                0x061C
+#define MT6392_EFUSE_VAL_128_143                0x061E
+#define MT6392_EFUSE_VAL_144_159                0x0620
+#define MT6392_EFUSE_VAL_160_175                0x0622
+#define MT6392_EFUSE_VAL_176_191                0x0624
+#define MT6392_EFUSE_VAL_192_207                0x0626
+#define MT6392_EFUSE_VAL_208_223                0x0628
+#define MT6392_EFUSE_VAL_224_239                0x062A
+#define MT6392_EFUSE_VAL_240_255                0x062C
+#define MT6392_EFUSE_VAL_256_271                0x062E
+#define MT6392_EFUSE_VAL_272_287                0x0630
+#define MT6392_EFUSE_VAL_288_303                0x0632
+#define MT6392_EFUSE_VAL_304_319                0x0634
+#define MT6392_EFUSE_VAL_320_335                0x0636
+#define MT6392_EFUSE_VAL_336_351                0x0638
+#define MT6392_EFUSE_VAL_352_367                0x063A
+#define MT6392_EFUSE_VAL_368_383                0x063C
+#define MT6392_EFUSE_VAL_384_399                0x063E
+#define MT6392_EFUSE_VAL_400_415                0x0640
+#define MT6392_EFUSE_VAL_416_431                0x0642
+#define MT6392_RTC_MIX_CON0                     0x0644
+#define MT6392_RTC_MIX_CON1                     0x0646
+#define MT6392_EFUSE_VAL_432_447                0x0648
+#define MT6392_EFUSE_VAL_448_463                0x064A
+#define MT6392_EFUSE_VAL_464_479                0x064C
+#define MT6392_EFUSE_VAL_480_495                0x064E
+#define MT6392_EFUSE_VAL_496_511                0x0650
+#define MT6392_EFUSE_DOUT_0_15                  0x0652
+#define MT6392_EFUSE_DOUT_16_31                 0x0654
+#define MT6392_EFUSE_DOUT_32_47                 0x0656
+#define MT6392_EFUSE_DOUT_48_63                 0x0658
+#define MT6392_EFUSE_DOUT_64_79                 0x065A
+#define MT6392_EFUSE_DOUT_80_95                 0x065C
+#define MT6392_EFUSE_DOUT_96_111                0x065E
+#define MT6392_EFUSE_DOUT_112_127               0x0660
+#define MT6392_EFUSE_DOUT_128_143               0x0662
+#define MT6392_EFUSE_DOUT_144_159               0x0664
+#define MT6392_EFUSE_DOUT_160_175               0x0666
+#define MT6392_EFUSE_DOUT_176_191               0x0668
+#define MT6392_EFUSE_DOUT_192_207               0x066A
+#define MT6392_EFUSE_DOUT_208_223               0x066C
+#define MT6392_EFUSE_DOUT_224_239               0x066E
+#define MT6392_EFUSE_DOUT_240_255               0x0670
+#define MT6392_EFUSE_DOUT_256_271               0x0672
+#define MT6392_EFUSE_DOUT_272_287               0x0674
+#define MT6392_EFUSE_DOUT_288_303               0x0676
+#define MT6392_EFUSE_DOUT_304_319               0x0678
+#define MT6392_EFUSE_DOUT_320_335               0x067A
+#define MT6392_EFUSE_DOUT_336_351               0x067C
+#define MT6392_EFUSE_DOUT_352_367               0x067E
+#define MT6392_EFUSE_DOUT_368_383               0x0680
+#define MT6392_EFUSE_DOUT_384_399               0x0682
+#define MT6392_EFUSE_DOUT_400_415               0x0684
+#define MT6392_EFUSE_DOUT_416_431               0x0686
+#define MT6392_EFUSE_DOUT_432_447               0x0688
+#define MT6392_EFUSE_DOUT_448_463               0x068A
+#define MT6392_EFUSE_DOUT_464_479               0x068C
+#define MT6392_EFUSE_DOUT_480_495               0x068E
+#define MT6392_EFUSE_DOUT_496_511               0x0690
+#define MT6392_EFUSE_CON7                       0x0692
+#define MT6392_EFUSE_CON8                       0x0694
+#define MT6392_EFUSE_CON9                       0x0696
+#define MT6392_AUXADC_ADC0                      0x0700
+#define MT6392_AUXADC_ADC1                      0x0702
+#define MT6392_AUXADC_ADC2                      0x0704
+#define MT6392_AUXADC_ADC3                      0x0706
+#define MT6392_AUXADC_ADC4                      0x0708
+#define MT6392_AUXADC_ADC5                      0x070A
+#define MT6392_AUXADC_ADC6                      0x070C
+#define MT6392_AUXADC_ADC7                      0x070E
+#define MT6392_AUXADC_ADC8                      0x0710
+#define MT6392_AUXADC_ADC9                      0x0712
+#define MT6392_AUXADC_ADC10                     0x0714
+#define MT6392_AUXADC_ADC11                     0x0716
+#define MT6392_AUXADC_ADC12                     0x0718
+#define MT6392_AUXADC_ADC13                     0x071A
+#define MT6392_AUXADC_ADC14                     0x071C
+#define MT6392_AUXADC_ADC15                     0x071E
+#define MT6392_AUXADC_ADC16                     0x0720
+#define MT6392_AUXADC_ADC17                     0x0722
+#define MT6392_AUXADC_ADC18                     0x0724
+#define MT6392_AUXADC_ADC19                     0x0726
+#define MT6392_AUXADC_ADC20                     0x0728
+#define MT6392_AUXADC_ADC21                     0x072A
+#define MT6392_AUXADC_ADC22                     0x072C
+#define MT6392_AUXADC_STA0                      0x072E
+#define MT6392_AUXADC_STA1                      0x0730
+#define MT6392_AUXADC_RQST0                     0x0732
+#define MT6392_AUXADC_RQST0_SET                 0x0734
+#define MT6392_AUXADC_RQST0_CLR                 0x0736
+#define MT6392_AUXADC_CON0                      0x0738
+#define MT6392_AUXADC_CON0_SET                  0x073A
+#define MT6392_AUXADC_CON0_CLR                  0x073C
+#define MT6392_AUXADC_CON1                      0x073E
+#define MT6392_AUXADC_CON2                      0x0740
+#define MT6392_AUXADC_CON3                      0x0742
+#define MT6392_AUXADC_CON4                      0x0744
+#define MT6392_AUXADC_CON5                      0x0746
+#define MT6392_AUXADC_CON6                      0x0748
+#define MT6392_AUXADC_CON7                      0x074A
+#define MT6392_AUXADC_CON8                      0x074C
+#define MT6392_AUXADC_CON9                      0x074E
+#define MT6392_AUXADC_CON10                     0x0750
+#define MT6392_AUXADC_CON11                     0x0752
+#define MT6392_AUXADC_CON12                     0x0754
+#define MT6392_AUXADC_CON13                     0x0756
+#define MT6392_AUXADC_CON14                     0x0758
+#define MT6392_AUXADC_CON15                     0x075A
+#define MT6392_AUXADC_CON16                     0x075C
+#define MT6392_AUXADC_AUTORPT0                  0x075E
+#define MT6392_AUXADC_LBAT0                     0x0760
+#define MT6392_AUXADC_LBAT1                     0x0762
+#define MT6392_AUXADC_LBAT2                     0x0764
+#define MT6392_AUXADC_LBAT3                     0x0766
+#define MT6392_AUXADC_LBAT4                     0x0768
+#define MT6392_AUXADC_LBAT5                     0x076A
+#define MT6392_AUXADC_LBAT6                     0x076C
+#define MT6392_AUXADC_THR0                      0x076E
+#define MT6392_AUXADC_THR1                      0x0770
+#define MT6392_AUXADC_THR2                      0x0772
+#define MT6392_AUXADC_THR3                      0x0774
+#define MT6392_AUXADC_THR4                      0x0776
+#define MT6392_AUXADC_THR5                      0x0778
+#define MT6392_AUXADC_THR6                      0x077A
+#define MT6392_AUXADC_EFUSE0                    0x077C
+#define MT6392_AUXADC_EFUSE1                    0x077E
+#define MT6392_AUXADC_EFUSE2                    0x0780
+#define MT6392_AUXADC_EFUSE3                    0x0782
+#define MT6392_AUXADC_EFUSE4                    0x0784
+#define MT6392_AUXADC_EFUSE5                    0x0786
+#define MT6392_AUXADC_NAG_0                     0x0788
+#define MT6392_AUXADC_NAG_1                     0x078A
+#define MT6392_AUXADC_NAG_2                     0x078C
+#define MT6392_AUXADC_NAG_3                     0x078E
+#define MT6392_AUXADC_NAG_4                     0x0790
+#define MT6392_AUXADC_NAG_5                     0x0792
+#define MT6392_AUXADC_NAG_6                     0x0794
+#define MT6392_AUXADC_NAG_7                     0x0796
+#define MT6392_AUXADC_NAG_8                     0x0798
+#define MT6392_AUXADC_TYPEC_H_1                 0x079A
+#define MT6392_AUXADC_TYPEC_H_2                 0x079C
+#define MT6392_AUXADC_TYPEC_H_3                 0x079E
+#define MT6392_AUXADC_TYPEC_H_4                 0x07A0
+#define MT6392_AUXADC_TYPEC_H_5                 0x07A2
+#define MT6392_AUXADC_TYPEC_H_6                 0x07A4
+#define MT6392_AUXADC_TYPEC_H_7                 0x07A6
+#define MT6392_AUXADC_TYPEC_L_1                 0x07A8
+#define MT6392_AUXADC_TYPEC_L_2                 0x07AA
+#define MT6392_AUXADC_TYPEC_L_3                 0x07AC
+#define MT6392_AUXADC_TYPEC_L_4                 0x07AE
+#define MT6392_AUXADC_TYPEC_L_5                 0x07B0
+#define MT6392_AUXADC_TYPEC_L_6                 0x07B2
+#define MT6392_AUXADC_TYPEC_L_7                 0x07B4
+#define MT6392_AUXADC_NAG_9                     0x07B6
+#define MT6392_TYPE_C_PHY_RG_0                  0x0800
+#define MT6392_TYPE_C_PHY_RG_CC_RESERVE_CSR     0x0802
+#define MT6392_TYPE_C_VCMP_CTRL                 0x0804
+#define MT6392_TYPE_C_CTRL                      0x0806
+#define MT6392_TYPE_C_CC_SW_CTRL                0x080a
+#define MT6392_TYPE_C_CC_VOL_PERIODIC_MEAS_VAL  0x080c
+#define MT6392_TYPE_C_CC_VOL_DEBOUCE_CNT_VAL    0x080e
+#define MT6392_TYPE_C_DRP_SRC_CNT_VAL_0         0x0810
+#define MT6392_TYPE_C_DRP_SNK_CNT_VAL_0         0x0814
+#define MT6392_TYPE_C_DRP_TRY_CNT_VAL_0         0x0818
+#define MT6392_TYPE_C_CC_SRC_DEFAULT_DAC_VAL    0x0820
+#define MT6392_TYPE_C_CC_SRC_15_DAC_VAL         0x0822
+#define MT6392_TYPE_C_CC_SRC_30_DAC_VAL         0x0824
+#define MT6392_TYPE_C_CC_SNK_DAC_VAL_0          0x0828
+#define MT6392_TYPE_C_CC_SNK_DAC_VAL_1          0x082a
+#define MT6392_TYPE_C_INTR_EN_0                 0x0830
+#define MT6392_TYPE_C_INTR_EN_2                 0x0834
+#define MT6392_TYPE_C_INTR_0                    0x0838
+#define MT6392_TYPE_C_INTR_2                    0x083C
+#define MT6392_TYPE_C_CC_STATUS                 0x0840
+#define MT6392_TYPE_C_PWR_STATUS                0x0842
+#define MT6392_TYPE_C_PHY_RG_CC1_RESISTENCE_0   0x0844
+#define MT6392_TYPE_C_PHY_RG_CC1_RESISTENCE_1   0x0846
+#define MT6392_TYPE_C_PHY_RG_CC2_RESISTENCE_0   0x0848
+#define MT6392_TYPE_C_PHY_RG_CC2_RESISTENCE_1   0x084a
+#define MT6392_TYPE_C_CC_SW_FORCE_MODE_ENABLE_0 0x0860
+#define MT6392_TYPE_C_CC_SW_FORCE_MODE_VAL_0    0x0864
+#define MT6392_TYPE_C_CC_SW_FORCE_MODE_VAL_1    0x0866
+#define MT6392_TYPE_C_CC_SW_FORCE_MODE_ENABLE_1 0x0868
+#define MT6392_TYPE_C_CC_SW_FORCE_MODE_VAL_2    0x086c
+#define MT6392_TYPE_C_CC_DAC_CALI_CTRL          0x0870
+#define MT6392_TYPE_C_CC_DAC_CALI_RESULT        0x0872
+#define MT6392_TYPE_C_DEBUG_PORT_SELECT_0       0x0880
+#define MT6392_TYPE_C_DEBUG_PORT_SELECT_1       0x0882
+#define MT6392_TYPE_C_DEBUG_MODE_SELECT         0x0884
+#define MT6392_TYPE_C_DEBUG_OUT_READ_0          0x0888
+#define MT6392_TYPE_C_DEBUG_OUT_READ_1          0x088a
+#define MT6392_TYPE_C_SW_DEBUG_PORT_0           0x088c
+#define MT6392_TYPE_C_SW_DEBUG_PORT_1           0x088e
+
+#endif /* __MFD_MT6392_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index d678f52..2548693 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -15,6 +15,16 @@
 #ifndef __MFD_MT6397_CORE_H__
 #define __MFD_MT6397_CORE_H__
 
+#include <linux/notifier.h>
+
+enum chip_id {
+	MT6323_CHIP_ID = 0x23,
+	MT6358_CHIP_ID = 0x58,
+	MT6391_CHIP_ID = 0x91,
+	MT6392_CHIP_ID = 0x92,
+	MT6397_CHIP_ID = 0x97,
+};
+
 enum mt6397_irq_numbers {
 	MT6397_IRQ_SPKL_AB = 0,
 	MT6397_IRQ_SPKR_AB,
@@ -54,6 +64,7 @@
 struct mt6397_chip {
 	struct device *dev;
 	struct regmap *regmap;
+	struct notifier_block pm_nb;
 	int irq;
 	struct irq_domain *irq_domain;
 	struct mutex irqlock;
@@ -62,6 +73,11 @@
 	u16 irq_masks_cache[2];
 	u16 int_con[2];
 	u16 int_status[2];
+	u16 chip_id;
+	void *irq_data;
 };
 
+int mt6358_irq_init(struct mt6397_chip *chip);
+int mt6397_irq_init(struct mt6397_chip *chip);
+
 #endif /* __MFD_MT6397_CORE_H__ */
diff --git a/include/linux/mfd/mt6397/rtc_misc.h b/include/linux/mfd/mt6397/rtc_misc.h
new file mode 100644
index 0000000..bf53fe9e2
--- /dev/null
+++ b/include/linux/mfd/mt6397/rtc_misc.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2014-2015 MediaTek Inc.
+ * Author: Tianping.Fang <tianping.fang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT6397_RTC_MISC_H__
+#define __MT6397_RTC_MISC_H__
+#include <linux/types.h>
+
+typedef enum {
+	RTC_GPIO_USER_WIFI = 8,
+	RTC_GPIO_USER_GPS = 9,
+	RTC_GPIO_USER_BT = 10,
+	RTC_GPIO_USER_FM = 11,
+	RTC_GPIO_USER_PMIC = 12,
+} rtc_gpio_user_t;
+
+#ifdef CONFIG_MT6397_MISC
+extern void mtk_misc_mark_fast(void);
+extern void mtk_misc_mark_recovery(void);
+extern bool mtk_misc_low_power_detected(void);
+extern bool mtk_misc_crystal_exist_status(void);
+extern int mtk_misc_set_spare_fg_value(u32 val);
+extern u32 mtk_misc_get_spare_fg_value(void);
+extern void rtc_gpio_enable_32k(rtc_gpio_user_t user);
+extern void rtc_gpio_disable_32k(rtc_gpio_user_t user);
+#else
+#define mtk_misc_mark_fast()			do {} while (0)
+#define mtk_misc_mark_recovey()			do {} while (0)
+#define mtk_misc_low_power_detected()       ({ 0; })
+#define mtk_misc_crystal_exist_status()     ({ 1; })
+#define mtk_misc_set_spare_fg_value(val)    ({ 0; })
+#define mtk_misc_get_spare_fg_value()       ({ 0; })
+#define rtc_gpio_enable_32k(user)		do {} while (0)
+#define rtc_gpio_disable_32k(user)		do {} while (0)
+#endif
+#endif /* __MT6397_RTC_MISC_H__ */
+
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 099b319..a61c7d3 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -25,6 +25,7 @@
 
 enum dev_pm_opp_event {
 	OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
+	OPP_EVENT_ADJUST_VOLTAGE,
 };
 
 /**
@@ -83,6 +84,9 @@
 
 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
 
+unsigned long dev_pm_opp_get_voltage_supply(struct dev_pm_opp *opp,
+					    unsigned int index);
+
 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
 
 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
@@ -107,6 +111,10 @@
 int dev_pm_opp_add(struct device *dev, unsigned long freq,
 		   unsigned long u_volt);
 void dev_pm_opp_remove(struct device *dev, unsigned long freq);
+void dev_pm_opp_remove_all_dynamic(struct device *dev);
+
+int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+			      unsigned long u_volt);
 
 int dev_pm_opp_enable(struct device *dev, unsigned long freq);
 
@@ -208,6 +216,17 @@
 {
 }
 
+static inline void dev_pm_opp_remove_all_dynamic(struct device *dev)
+{
+}
+
+static inline int
+dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+			  unsigned long u_volt)
+{
+	return 0;
+}
+
 static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
 {
 	return 0;
diff --git a/include/linux/power/mtk_svs.h b/include/linux/power/mtk_svs.h
new file mode 100644
index 0000000..d5efca8
--- /dev/null
+++ b/include/linux/power/mtk_svs.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 MediaTek Inc.
+ */
+
+#ifndef __MTK_SVS_H__
+#define __MTK_SVS_H__
+
+#if defined(CONFIG_MTK_SVS)
+unsigned long claim_mtk_svs_lock(void);
+void release_mtk_svs_lock(unsigned long flags);
+#else
+static inline unsigned long claim_mtk_svs_lock(void)
+{
+	return 0;
+}
+
+static inline void release_mtk_svs_lock(unsigned long flags)
+{
+}
+#endif /* CONFIG_MTK_SVS */
+
+#endif /* __MTK_SVS_H__ */
diff --git a/include/linux/property.h b/include/linux/property.h
index 1a12364..216dcfe 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -78,6 +78,10 @@
 				       unsigned int nargs, unsigned int index,
 				       struct fwnode_reference_args *args);
 
+struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
+					    const char *name,
+					    unsigned int index);
+
 struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode);
 struct fwnode_handle *fwnode_get_next_parent(
 	struct fwnode_handle *fwnode);
diff --git a/include/linux/regulator/mt6358-regulator.h b/include/linux/regulator/mt6358-regulator.h
new file mode 100644
index 0000000..1cc3049
--- /dev/null
+++ b/include/linux/regulator/mt6358-regulator.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6358_H
+#define __LINUX_REGULATOR_MT6358_H
+
+enum {
+	MT6358_ID_VDRAM1 = 0,
+	MT6358_ID_VCORE,
+	MT6358_ID_VPA,
+	MT6358_ID_VPROC11,
+	MT6358_ID_VPROC12,
+	MT6358_ID_VGPU,
+	MT6358_ID_VS2,
+	MT6358_ID_VMODEM,
+	MT6358_ID_VS1,
+	MT6358_ID_VDRAM2 = 9,
+	MT6358_ID_VSIM1,
+	MT6358_ID_VIBR,
+	MT6358_ID_VRF12,
+	MT6358_ID_VIO18,
+	MT6358_ID_VUSB,
+	MT6358_ID_VCAMIO,
+	MT6358_ID_VCAMD,
+	MT6358_ID_VCN18,
+	MT6358_ID_VFE28,
+	MT6358_ID_VSRAM_PROC11,
+	MT6358_ID_VCN28,
+	MT6358_ID_VSRAM_OTHERS,
+	MT6358_ID_VSRAM_GPU,
+	MT6358_ID_VXO22,
+	MT6358_ID_VEFUSE,
+	MT6358_ID_VAUX18,
+	MT6358_ID_VMCH,
+	MT6358_ID_VBIF28,
+	MT6358_ID_VSRAM_PROC12,
+	MT6358_ID_VCAMA1,
+	MT6358_ID_VEMC,
+	MT6358_ID_VIO28,
+	MT6358_ID_VA12,
+	MT6358_ID_VRF18,
+	MT6358_ID_VCN33_BT,
+	MT6358_ID_VCN33_WIFI,
+	MT6358_ID_VCAMA2,
+	MT6358_ID_VMC,
+	MT6358_ID_VLDO28,
+	MT6358_ID_VAUD28,
+	MT6358_ID_VSIM2,
+	MT6358_ID_RG_MAX,
+};
+
+#define MT6358_MAX_REGULATOR	MT6358_ID_RG_MAX
+
+#endif /* __LINUX_REGULATOR_MT6358_H */
diff --git a/include/linux/regulator/mt6392-regulator.h b/include/linux/regulator/mt6392-regulator.h
new file mode 100644
index 0000000..dfcbcac
--- /dev/null
+++ b/include/linux/regulator/mt6392-regulator.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Chen Zhong <chen.zhong@mediatek.com>
+ */
+
+#ifndef __LINUX_REGULATOR_MT6392_H
+#define __LINUX_REGULATOR_MT6392_H
+
+enum {
+	MT6392_ID_VPROC = 0,
+	MT6392_ID_VSYS,
+	MT6392_ID_VCORE,
+	MT6392_ID_VXO22,
+	MT6392_ID_VAUD22,
+	MT6392_ID_VCAMA,
+	MT6392_ID_VAUD28,
+	MT6392_ID_VADC18,
+	MT6392_ID_VCN35,
+	MT6392_ID_VIO28,
+	MT6392_ID_VUSB = 10,
+	MT6392_ID_VMC,
+	MT6392_ID_VMCH,
+	MT6392_ID_VEMC3V3,
+	MT6392_ID_VGP1,
+	MT6392_ID_VGP2,
+	MT6392_ID_VCN18,
+	MT6392_ID_VCAMAF,
+	MT6392_ID_VM,
+	MT6392_ID_VIO18,
+	MT6392_ID_VCAMD,
+	MT6392_ID_VCAMIO,
+	MT6392_ID_VM25,
+	MT6392_ID_VEFUSE,
+	MT6392_ID_RG_MAX,
+};
+
+#define MT6392_MAX_REGULATOR	MT6392_ID_RG_MAX
+
+#endif /* __LINUX_REGULATOR_MT6392_H */
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
new file mode 100644
index 0000000..be402c4
--- /dev/null
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ *
+ */
+
+#ifndef __MTK_CMDQ_H__
+#define __MTK_CMDQ_H__
+
+#include <linux/mailbox_client.h>
+#include <linux/mailbox/mtk-cmdq-mailbox.h>
+#include <linux/timer.h>
+
+#define CMDQ_NO_TIMEOUT		0xffffffffu
+
+struct cmdq_pkt;
+
+struct cmdq_client_reg {
+	u8 subsys;
+	u16 offset;
+	u16 size;
+};
+
+struct cmdq_client {
+	spinlock_t lock;
+	u32 pkt_cnt;
+	struct mbox_client client;
+	struct mbox_chan *chan;
+	struct timer_list timer;
+	u32 timeout_ms; /* in unit of microsecond */
+};
+
+/**
+ * cmdq_mbox_create() - create CMDQ mailbox client and channel
+ * @dev:	device of CMDQ mailbox client
+ * @index:	index of CMDQ mailbox channel
+ * @timeout:	timeout of a pkt execution by GCE, in unit of microsecond, set
+ *		CMDQ_NO_TIMEOUT if a timer is not used.
+ *
+ * Return: CMDQ mailbox client pointer
+ */
+struct cmdq_client *cmdq_mbox_create(struct device *dev, int index,
+				     u32 timeout);
+
+/**
+ * cmdq_mbox_destroy() - destroy CMDQ mailbox client and channel
+ * @client:	the CMDQ mailbox client
+ */
+void cmdq_mbox_destroy(struct cmdq_client *client);
+
+/**
+ * cmdq_pkt_create() - create a CMDQ packet
+ * @client:	the CMDQ mailbox client
+ * @size:	required CMDQ buffer size
+ *
+ * Return: CMDQ packet pointer
+ */
+struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size);
+
+/**
+ * cmdq_pkt_destroy() - destroy the CMDQ packet
+ * @pkt:	the CMDQ packet
+ */
+void cmdq_pkt_destroy(struct cmdq_pkt *pkt);
+
+/**
+ * cmdq_pkt_write() - append write command to the CMDQ packet
+ * @pkt:	the CMDQ packet
+ * @subsys:	the CMDQ sub system code
+ * @offset:	register offset from CMDQ sub system
+ * @value:	the specified target register value
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value);
+
+/**
+ * cmdq_pkt_write_mask() - append write command with mask to the CMDQ packet
+ * @pkt:	the CMDQ packet
+ * @subsys:	the CMDQ sub system code
+ * @offset:	register offset from CMDQ sub system
+ * @value:	the specified target register value
+ * @mask:	the specified target register mask
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
+			u16 offset, u32 value, u32 mask);
+
+/**
+ * cmdq_pkt_wfe() - append wait for event command to the CMDQ packet
+ * @pkt:	the CMDQ packet
+ * @event:	the desired event type to "wait and CLEAR"
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event);
+
+/**
+ * cmdq_pkt_clear_event() - append clear event command to the CMDQ packet
+ * @pkt:	the CMDQ packet
+ * @event:	the desired event to be cleared
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event);
+
+/**
+ * cmdq_pkt_poll() - Append polling command to the CMDQ packet, ask GCE to
+ *		     execute an instruction that wait for a specified hardware
+ *		     register to check for the value. All GCE hardware
+ *		     threads will be blocked by this instruction.
+ * @pkt:	the CMDQ packet
+ * @subsys:	the CMDQ sub system code
+ * @offset:	register offset from CMDQ sub system
+ * @value:	the specified target register value
+ * @mask:	the specified target register mask
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
+		  u16 offset, u32 value, u32 mask);
+/**
+ * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ
+ *                          packet and call back at the end of done packet
+ * @pkt:	the CMDQ packet
+ * @cb:		called at the end of done packet
+ * @data:	this data will pass back to cb
+ *
+ * Return: 0 for success; else the error code is returned
+ *
+ * Trigger CMDQ to asynchronously execute the CMDQ packet and call back
+ * at the end of done packet. Note that this is an ASYNC function. When the
+ * function returned, it may or may not be finished.
+ */
+int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
+			 void *data);
+
+/**
+ * cmdq_pkt_flush() - trigger CMDQ to execute the CMDQ packet
+ * @pkt:	the CMDQ packet
+ *
+ * Return: 0 for success; else the error code is returned
+ *
+ * Trigger CMDQ to execute the CMDQ packet. Note that this is a
+ * synchronous flush function. When the function returned, the recorded
+ * commands have been done.
+ */
+int cmdq_pkt_flush(struct cmdq_pkt *pkt);
+
+/**
+ * cmdq_dev_get_client_reg() - parse cmdq client reg from the device
+ *			       node of CMDQ client
+ * @dev:	device of CMDQ mailbox clienti
+ * @client_reg: CMDQ client reg pointer
+ * @idx:	the index of desired reg
+ *
+ * Return: 0 for success; else the error code is returned
+ *
+ * Help CMDQ client pasing the cmdq client reg
+ * from the device node of CMDQ client.
+ */
+int cmdq_dev_get_client_reg(struct device *dev,
+			    struct cmdq_client_reg *client_reg, int idx);
+
+#endif	/* __MTK_CMDQ_H__ */
diff --git a/include/linux/soc/mediatek/scpsys-ext.h b/include/linux/soc/mediatek/scpsys-ext.h
new file mode 100644
index 0000000..3e5b84d
--- /dev/null
+++ b/include/linux/soc/mediatek/scpsys-ext.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SOC_MEDIATEK_SCPSYS_EXT_H
+#define __SOC_MEDIATEK_SCPSYS_EXT_H
+
+#define MAX_STEPS	4
+
+#define BUS_PROT(_type, _set_ofs, _clr_ofs,			\
+		_en_ofs, _sta_ofs, _mask, _clr_ack_mask) {	\
+		.type = _type,					\
+		.set_ofs = _set_ofs,				\
+		.clr_ofs = _clr_ofs,				\
+		.en_ofs = _en_ofs,				\
+		.sta_ofs = _sta_ofs,				\
+		.mask = _mask,					\
+		.clr_ack_mask = _clr_ack_mask,			\
+	}
+
+enum regmap_type {
+	INVALID_TYPE = 0,
+	IFR_TYPE,
+	SMI_TYPE,
+};
+
+struct bus_prot {
+	enum regmap_type type;
+	u32 set_ofs;
+	u32 clr_ofs;
+	u32 en_ofs;
+	u32 sta_ofs;
+	u32 mask;
+	u32 clr_ack_mask;
+};
+
+int mtk_scpsys_ext_set_bus_protection(const struct bus_prot *bp_table,
+	struct regmap *infracfg, struct regmap *smi_common);
+int mtk_scpsys_ext_clear_bus_protection(const struct bus_prot *bp_table,
+	struct regmap *infracfg, struct regmap *smi_common);
+
+#endif /* __SOC_MEDIATEK_SCPSYS_EXT_H */
diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h
index edc51be..2d77f97 100644
--- a/include/linux/usb/role.h
+++ b/include/linux/usb/role.h
@@ -18,6 +18,7 @@
 
 /**
  * struct usb_role_switch_desc - USB Role Switch Descriptor
+ * @fwnode: The device node to be associated with the role switch
  * @usb2_port: Optional reference to the host controller port device (USB2)
  * @usb3_port: Optional reference to the host controller port device (USB3)
  * @udc: Optional reference to the peripheral controller device
@@ -32,6 +33,7 @@
  * usb_role_switch_register() before registering the switch.
  */
 struct usb_role_switch_desc {
+	struct fwnode_handle *fwnode;
 	struct device *usb2_port;
 	struct device *usb3_port;
 	struct device *udc;
@@ -40,14 +42,51 @@
 	bool allow_userspace_control;
 };
 
+
+#if IS_ENABLED(CONFIG_USB_ROLE_SWITCH)
 int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role);
 enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw);
 struct usb_role_switch *usb_role_switch_get(struct device *dev);
+struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *node);
 void usb_role_switch_put(struct usb_role_switch *sw);
 
 struct usb_role_switch *
 usb_role_switch_register(struct device *parent,
 			 const struct usb_role_switch_desc *desc);
 void usb_role_switch_unregister(struct usb_role_switch *sw);
+#else
+static inline int usb_role_switch_set_role(struct usb_role_switch *sw,
+		enum usb_role role)
+{
+	return 0;
+}
+
+static inline enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
+{
+	return USB_ROLE_NONE;
+}
+
+static inline struct usb_role_switch *usb_role_switch_get(struct device *dev)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline struct usb_role_switch *
+fwnode_usb_role_switch_get(struct fwnode_handle *node)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void usb_role_switch_put(struct usb_role_switch *sw) { }
+
+static inline struct usb_role_switch *
+usb_role_switch_register(struct device *parent,
+			 const struct usb_role_switch_desc *desc)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void usb_role_switch_unregister(struct usb_role_switch *sw) { }
+#endif
 
 #endif /* __LINUX_USB_ROLE_H */
diff --git a/include/linux/usb/typec_mux.h b/include/linux/usb/typec_mux.h
index 79293f6..43f4068 100644
--- a/include/linux/usb/typec_mux.h
+++ b/include/linux/usb/typec_mux.h
@@ -47,7 +47,8 @@
 int typec_switch_register(struct typec_switch *sw);
 void typec_switch_unregister(struct typec_switch *sw);
 
-struct typec_mux *typec_mux_get(struct device *dev, const char *name);
+struct typec_mux *
+typec_mux_get(struct device *dev, const struct typec_altmode_desc *desc);
 void typec_mux_put(struct typec_mux *mux);
 int typec_mux_register(struct typec_mux *mux);
 void typec_mux_unregister(struct typec_mux *mux);
diff --git a/include/soc/mediatek/smi.h b/include/soc/mediatek/smi.h
index 5201e90..baad22f 100644
--- a/include/soc/mediatek/smi.h
+++ b/include/soc/mediatek/smi.h
@@ -28,11 +28,6 @@
 	unsigned int   mmu;
 };
 
-struct mtk_smi_iommu {
-	unsigned int larb_nr;
-	struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX];
-};
-
 /*
  * mtk_smi_larb_get: Enable the power domain and clocks for this local arbiter.
  *                   It also initialize some basic setting(like iommu).
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 88aa48e..4621613 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1114,6 +1114,10 @@
 
 	void *drvdata;
 };
+#define for_each_card_prelinks(card, i, link)				\
+	for ((i) = 0;							\
+	     ((i) < (card)->num_links) && ((link) = &(card)->dai_link[i]); \
+	     (i)++)
 
 /* SoC machine DAI configuration, glues a codec and cpu DAI together */
 struct snd_soc_pcm_runtime {
diff --git a/include/uapi/linux/mtk_vcu_controls.h b/include/uapi/linux/mtk_vcu_controls.h
new file mode 100644
index 0000000..40cb5ec
--- /dev/null
+++ b/include/uapi/linux/mtk_vcu_controls.h
@@ -0,0 +1,84 @@
+/*
+ * MediaTek Controls Header
+ *
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Yunfei Dong <yunfei.dong@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __UAPI_MTK_VCU_CONTROLS_H__
+#define __UAPI_MTK_VCU_CONTROLS_H__
+
+#define SHARE_BUF_SIZE 48
+
+#define VCUD_SET_OBJECT	_IOW('v', 0, struct share_obj)
+#define VCUD_MVA_ALLOCATION	_IOWR('v', 1, struct mem_obj)
+#define VCUD_MVA_FREE		_IOWR('v', 2, struct mem_obj)
+#define VCUD_CACHE_FLUSH_ALL	_IOWR('v', 3, struct mem_obj)
+#define VCUD_GET_OBJECT	_IOWR('v', 4, struct share_obj)
+#define VCUD_MVA_MAP_CACHE	_IOWR('v', 6, struct mem_obj)
+#define VCUD_SET_MMAP_TYPE	_IOW('v', 8, struct map_obj)
+
+#define COMPAT_VCUD_SET_OBJECT	_IOW('v', 0, struct share_obj)
+#define COMPAT_VCUD_MVA_ALLOCATION	_IOWR('v', 1, struct compat_mem_obj)
+#define COMPAT_VCUD_MVA_FREE		_IOWR('v', 2, struct compat_mem_obj)
+#define COMPAT_VCUD_CACHE_FLUSH_ALL	_IOWR('v', 3, struct compat_mem_obj)
+#define COMPAT_VCUD_SET_MMAP_TYPE	_IOWR('v', 4, struct map_obj)
+
+/**
+ * struct mem_obj - memory buffer allocated in kernel
+ *
+ * @iova:	iova of buffer
+ * @len:	buffer length
+ * @va: kernel virtual address
+ */
+struct mem_obj {
+	unsigned long iova;
+	unsigned long len;
+	u64 va;
+};
+
+/**
+ * struct map_obj - memory buffer mmaped in kernel
+ *
+ * @map_buf:	iova of buffer
+ * @map_type:	the type of mmap
+ * @reserved: reserved
+ */
+struct map_obj {
+	unsigned long map_buf;
+	unsigned long map_type;
+	u64 reserved;
+};
+
+#if IS_ENABLED(CONFIG_COMPAT)
+struct compat_mem_obj {
+	compat_ulong_t iova;
+	compat_ulong_t len;
+	compat_u64 va;
+};
+#endif
+
+/**
+ * struct share_obj - DTCM (Data Tightly-Coupled Memory) buffer shared with
+ *		      AP and VCU
+ *
+ * @id:		IPI id
+ * @len:	share buffer length
+ * @share_buf:	share buffer data
+ */
+struct share_obj {
+	s32 id;
+	u32 len;
+	unsigned char share_buf[SHARE_BUF_SIZE];
+};
+
+#endif
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index d5a5cae..545918c 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -143,6 +143,11 @@
 #define	TEST_SE0_NAK	3
 #define	TEST_PACKET	4
 #define	TEST_FORCE_EN	5
+/*
+ * OTG HNP and SRP REQD
+ */
+#define	OTG_SRP_REQD	6
+#define	OTG_HNP_REQD	7
 
 /* Status Type */
 #define USB_STATUS_TYPE_STANDARD	0
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index efb095d..99be3b3 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -107,6 +107,7 @@
 	select SND_SOC_MC13783 if MFD_MC13XXX
 	select SND_SOC_ML26124 if I2C
 	select SND_SOC_MT6351 if MTK_PMIC_WRAP
+	select SND_SOC_MT6358 if MTK_PMIC_WRAP
 	select SND_SOC_NAU8540 if I2C
 	select SND_SOC_NAU8810 if I2C
 	select SND_SOC_NAU8824 if I2C
@@ -118,6 +119,7 @@
 	select SND_SOC_PCM179X_SPI if SPI_MASTER
 	select SND_SOC_PCM186X_I2C if I2C
 	select SND_SOC_PCM186X_SPI if SPI_MASTER
+	select SND_SOC_TLV320ADC3101 if  I2C
 	select SND_SOC_PCM3008
 	select SND_SOC_PCM3168A_I2C if I2C
 	select SND_SOC_PCM3168A_SPI if SPI_MASTER
@@ -729,6 +731,16 @@
 	select SND_SOC_PCM186X
 	select REGMAP_SPI
 
+config SND_SOC_TLV320ADC3101
+	tristate
+
+config SND_SOC_TLV320ADC3101_I2C
+	tristate "Texas Instruments TLV320ADC3101 CODECs - I2C"
+	depends on I2C
+	select SND_SOC_TLV320ADC3101
+	select REGMAP_I2C
+
+
 config SND_SOC_PCM3008
        tristate
 
@@ -1268,6 +1280,9 @@
 	depends on I2C
 	select REGMAP_I2C
 
+config SND_SOC_MT8167_CODEC
+	tristate
+
 # Amp
 config SND_SOC_LM4857
 	tristate
@@ -1291,6 +1306,25 @@
 config SND_SOC_MT6351
 	tristate "MediaTek MT6351 Codec"
 
+config SND_SOC_MT6358
+	tristate "MediaTek MT6358 Codec"
+	help
+	  Enable support for the platform which uses MT6358 as
+	  external codec device.
+
+config SND_SOC_MT6392_MT8167
+	tristate "MediaTek MT6392 Codec in addition to MT8167 Codec"
+	select MT8167_CODEC
+	select MTK_SPEAKER
+
+config MTK_SPEAKER
+	bool "MTK SPEAKER AMP"
+	default n
+	help
+	  If you say Y, enable MTK_SPEAKER_AMP
+	  This is a config for mediatek internal speaker amp.
+	  Generally you select "N", if unsupport it.
+
 config SND_SOC_NAU8540
        tristate "Nuvoton Technology Corporation NAU85L40 CODEC"
        depends on I2C
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 7ae7c85..bd9d656 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -104,6 +104,7 @@
 snd-soc-msm8916-analog-objs := msm8916-wcd-analog.o
 snd-soc-msm8916-digital-objs := msm8916-wcd-digital.o
 snd-soc-mt6351-objs := mt6351.o
+snd-soc-mt6358-objs := mt6358.o
 snd-soc-nau8540-objs := nau8540.o
 snd-soc-nau8810-objs := nau8810.o
 snd-soc-nau8824-objs := nau8824.o
@@ -118,6 +119,8 @@
 snd-soc-pcm186x-objs := pcm186x.o
 snd-soc-pcm186x-i2c-objs := pcm186x-i2c.o
 snd-soc-pcm186x-spi-objs := pcm186x-spi.o
+snd-soc-tlv320adc3101-objs := tlv320adc3101.o
+snd-soc-tlv320adc3101-i2c-objs := tlv320adc3101-i2c.o
 snd-soc-pcm3008-objs := pcm3008.o
 snd-soc-pcm3168a-objs := pcm3168a.o
 snd-soc-pcm3168a-i2c-objs := pcm3168a-i2c.o
@@ -250,6 +253,9 @@
 snd-soc-wm9713-objs := wm9713.o
 snd-soc-wm-hubs-objs := wm_hubs.o
 snd-soc-zx-aud96p22-objs := zx_aud96p22.o
+snd-soc-mt8167-codec-objs := mt8167-codec.o mt8167-codec-utils.o
+snd-soc-mt6392-codec-objs := mt6392-codec.o
+
 # Amp
 snd-soc-max9877-objs := max9877.o
 snd-soc-max98504-objs := max98504.o
@@ -364,6 +370,7 @@
 obj-$(CONFIG_SND_SOC_MSM8916_WCD_ANALOG) +=snd-soc-msm8916-analog.o
 obj-$(CONFIG_SND_SOC_MSM8916_WCD_DIGITAL) +=snd-soc-msm8916-digital.o
 obj-$(CONFIG_SND_SOC_MT6351)	+= snd-soc-mt6351.o
+obj-$(CONFIG_SND_SOC_MT6358)	+= snd-soc-mt6358.o
 obj-$(CONFIG_SND_SOC_NAU8540)   += snd-soc-nau8540.o
 obj-$(CONFIG_SND_SOC_NAU8810)   += snd-soc-nau8810.o
 obj-$(CONFIG_SND_SOC_NAU8824)   += snd-soc-nau8824.o
@@ -378,6 +385,8 @@
 obj-$(CONFIG_SND_SOC_PCM186X)	+= snd-soc-pcm186x.o
 obj-$(CONFIG_SND_SOC_PCM186X_I2C)	+= snd-soc-pcm186x-i2c.o
 obj-$(CONFIG_SND_SOC_PCM186X_SPI)	+= snd-soc-pcm186x-spi.o
+obj-$(CONFIG_SND_SOC_TLV320ADC3101)	+= snd-soc-tlv320adc3101.o
+obj-$(CONFIG_SND_SOC_TLV320ADC3101_I2C)	+= snd-soc-tlv320adc3101-i2c.o
 obj-$(CONFIG_SND_SOC_PCM3008)	+= snd-soc-pcm3008.o
 obj-$(CONFIG_SND_SOC_PCM3168A)	+= snd-soc-pcm3168a.o
 obj-$(CONFIG_SND_SOC_PCM3168A_I2C)	+= snd-soc-pcm3168a-i2c.o
@@ -509,9 +518,11 @@
 obj-$(CONFIG_SND_SOC_WM_ADSP)	+= snd-soc-wm-adsp.o
 obj-$(CONFIG_SND_SOC_WM_HUBS)	+= snd-soc-wm-hubs.o
 obj-$(CONFIG_SND_SOC_ZX_AUD96P22) += snd-soc-zx-aud96p22.o
+obj-$(CONFIG_SND_SOC_MT8167_CODEC)	+= snd-soc-mt8167-codec.o
 
 # Amp
 obj-$(CONFIG_SND_SOC_MAX9877)	+= snd-soc-max9877.o
 obj-$(CONFIG_SND_SOC_MAX98504)	+= snd-soc-max98504.o
 obj-$(CONFIG_SND_SOC_SIMPLE_AMPLIFIER)	+= snd-soc-simple-amplifier.o
 obj-$(CONFIG_SND_SOC_TPA6130A2)	+= snd-soc-tpa6130a2.o
+obj-$(CONFIG_MTK_SPEAKER)	+= snd-soc-mt6392-codec.o
diff --git a/sound/soc/codecs/mt6358.c b/sound/soc/codecs/mt6358.c
new file mode 100644
index 0000000..d4c4fee
--- /dev/null
+++ b/sound/soc/codecs/mt6358.c
@@ -0,0 +1,2336 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mt6358.c  --  mt6358 ALSA SoC audio codec driver
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/regulator/consumer.h>
+
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#include "mt6358.h"
+
+enum {
+	AUDIO_ANALOG_VOLUME_HSOUTL,
+	AUDIO_ANALOG_VOLUME_HSOUTR,
+	AUDIO_ANALOG_VOLUME_HPOUTL,
+	AUDIO_ANALOG_VOLUME_HPOUTR,
+	AUDIO_ANALOG_VOLUME_LINEOUTL,
+	AUDIO_ANALOG_VOLUME_LINEOUTR,
+	AUDIO_ANALOG_VOLUME_MICAMP1,
+	AUDIO_ANALOG_VOLUME_MICAMP2,
+	AUDIO_ANALOG_VOLUME_TYPE_MAX
+};
+
+enum {
+	MUX_ADC_L,
+	MUX_ADC_R,
+	MUX_PGA_L,
+	MUX_PGA_R,
+	MUX_MIC_TYPE,
+	MUX_HP_L,
+	MUX_HP_R,
+	MUX_NUM,
+};
+
+enum {
+	DEVICE_HP,
+	DEVICE_LO,
+	DEVICE_RCV,
+	DEVICE_MIC1,
+	DEVICE_MIC2,
+	DEVICE_NUM
+};
+
+/* Supply widget subseq */
+enum {
+	/* common */
+	SUPPLY_SEQ_CLK_BUF,
+	SUPPLY_SEQ_AUD_GLB,
+	SUPPLY_SEQ_CLKSQ,
+	SUPPLY_SEQ_VOW_AUD_LPW,
+	SUPPLY_SEQ_AUD_VOW,
+	SUPPLY_SEQ_VOW_CLK,
+	SUPPLY_SEQ_VOW_LDO,
+	SUPPLY_SEQ_TOP_CK,
+	SUPPLY_SEQ_TOP_CK_LAST,
+	SUPPLY_SEQ_AUD_TOP,
+	SUPPLY_SEQ_AUD_TOP_LAST,
+	SUPPLY_SEQ_AFE,
+	/* capture */
+	SUPPLY_SEQ_ADC_SUPPLY,
+};
+
+enum {
+	CH_L = 0,
+	CH_R,
+	NUM_CH,
+};
+
+#define REG_STRIDE 2
+
+struct mt6358_priv {
+	struct device *dev;
+	struct regmap *regmap;
+
+	unsigned int dl_rate;
+	unsigned int ul_rate;
+
+	int ana_gain[AUDIO_ANALOG_VOLUME_TYPE_MAX];
+	unsigned int mux_select[MUX_NUM];
+
+	int dev_counter[DEVICE_NUM];
+
+	int mtkaif_protocol;
+
+	struct regulator *avdd_reg;
+};
+
+int mt6358_set_mtkaif_protocol(struct snd_soc_component *cmpnt,
+			       int mtkaif_protocol)
+{
+	struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+	priv->mtkaif_protocol = mtkaif_protocol;
+	return 0;
+}
+
+static void playback_gpio_set(struct mt6358_priv *priv)
+{
+	/* set gpio mosi mode */
+	regmap_update_bits(priv->regmap, MT6358_GPIO_MODE2_CLR,
+			   0x01f8, 0x01f8);
+	regmap_update_bits(priv->regmap, MT6358_GPIO_MODE2_SET,
+			   0xffff, 0x0249);
+	regmap_update_bits(priv->regmap, MT6358_GPIO_MODE2,
+			   0xffff, 0x0249);
+}
+
+static void playback_gpio_reset(struct mt6358_priv *priv)
+{
+	/* set pad_aud_*_mosi to GPIO mode and dir input
+	 * reason:
+	 * pad_aud_dat_mosi*, because the pin is used as boot strap
+	 * don't clean clk/sync, for mtkaif protocol 2
+	 */
+	regmap_update_bits(priv->regmap, MT6358_GPIO_MODE2_CLR,
+			   0x01f8, 0x01f8);
+	regmap_update_bits(priv->regmap, MT6358_GPIO_MODE2,
+			   0x01f8, 0x0000);
+	regmap_update_bits(priv->regmap, MT6358_GPIO_DIR0,
+			   0xf << 8, 0x0);
+}
+
+static void capture_gpio_set(struct mt6358_priv *priv)
+{
+	/* set gpio miso mode */
+	regmap_update_bits(priv->regmap, MT6358_GPIO_MODE3_CLR,
+			   0xffff, 0xffff);
+	regmap_update_bits(priv->regmap, MT6358_GPIO_MODE3_SET,
+			   0xffff, 0x0249);
+	regmap_update_bits(priv->regmap, MT6358_GPIO_MODE3,
+			   0xffff, 0x0249);
+}
+
+static void capture_gpio_reset(struct mt6358_priv *priv)
+{
+	/* set pad_aud_*_miso to GPIO mode and dir input
+	 * reason:
+	 * pad_aud_clk_miso, because when playback only the miso_clk
+	 * will also have 26m, so will have power leak
+	 * pad_aud_dat_miso*, because the pin is used as boot strap
+	 */
+	regmap_update_bits(priv->regmap, MT6358_GPIO_MODE3_CLR,
+			   0xffff, 0xffff);
+	regmap_update_bits(priv->regmap, MT6358_GPIO_MODE3,
+			   0xffff, 0x0000);
+	regmap_update_bits(priv->regmap, MT6358_GPIO_DIR0,
+			   0xf << 12, 0x0);
+}
+
+/* use only when not govern by DAPM */
+static int mt6358_set_dcxo(struct mt6358_priv *priv, bool enable)
+{
+	regmap_update_bits(priv->regmap, MT6358_DCXO_CW14,
+			   0x1 << RG_XO_AUDIO_EN_M_SFT,
+			   (enable ? 1 : 0) << RG_XO_AUDIO_EN_M_SFT);
+	return 0;
+}
+
+/* use only when not govern by DAPM */
+static int mt6358_set_clksq(struct mt6358_priv *priv, bool enable)
+{
+	/* audio clk source from internal dcxo */
+	regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON6,
+			   RG_CLKSQ_IN_SEL_TEST_MASK_SFT,
+			   0x0);
+
+	/* Enable/disable CLKSQ 26MHz */
+	regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON6,
+			   RG_CLKSQ_EN_MASK_SFT,
+			   (enable ? 1 : 0) << RG_CLKSQ_EN_SFT);
+	return 0;
+}
+
+/* use only when not govern by DAPM */
+static int mt6358_set_aud_global_bias(struct mt6358_priv *priv, bool enable)
+{
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13,
+			   RG_AUDGLB_PWRDN_VA28_MASK_SFT,
+			   (enable ? 0 : 1) << RG_AUDGLB_PWRDN_VA28_SFT);
+	return 0;
+}
+
+/* use only when not govern by DAPM */
+static int mt6358_set_topck(struct mt6358_priv *priv, bool enable)
+{
+	regmap_update_bits(priv->regmap, MT6358_AUD_TOP_CKPDN_CON0,
+			   0x0066, enable ? 0x0 : 0x66);
+	return 0;
+}
+
+static int mt6358_mtkaif_tx_enable(struct mt6358_priv *priv)
+{
+	switch (priv->mtkaif_protocol) {
+	case MT6358_MTKAIF_PROTOCOL_2_CLK_P2:
+		/* MTKAIF TX format setting */
+		regmap_update_bits(priv->regmap,
+				   MT6358_AFE_ADDA_MTKAIF_CFG0,
+				   0xffff, 0x0010);
+		/* enable aud_pad TX fifos */
+		regmap_update_bits(priv->regmap,
+				   MT6358_AFE_AUD_PAD_TOP,
+				   0xff00, 0x3800);
+		regmap_update_bits(priv->regmap,
+				   MT6358_AFE_AUD_PAD_TOP,
+				   0xff00, 0x3900);
+		break;
+	case MT6358_MTKAIF_PROTOCOL_2:
+		/* MTKAIF TX format setting */
+		regmap_update_bits(priv->regmap,
+				   MT6358_AFE_ADDA_MTKAIF_CFG0,
+				   0xffff, 0x0010);
+		/* enable aud_pad TX fifos */
+		regmap_update_bits(priv->regmap,
+				   MT6358_AFE_AUD_PAD_TOP,
+				   0xff00, 0x3100);
+		break;
+	case MT6358_MTKAIF_PROTOCOL_1:
+	default:
+		/* MTKAIF TX format setting */
+		regmap_update_bits(priv->regmap,
+				   MT6358_AFE_ADDA_MTKAIF_CFG0,
+				   0xffff, 0x0000);
+		/* enable aud_pad TX fifos */
+		regmap_update_bits(priv->regmap,
+				   MT6358_AFE_AUD_PAD_TOP,
+				   0xff00, 0x3100);
+		break;
+	}
+	return 0;
+}
+
+static int mt6358_mtkaif_tx_disable(struct mt6358_priv *priv)
+{
+	/* disable aud_pad TX fifos */
+	regmap_update_bits(priv->regmap, MT6358_AFE_AUD_PAD_TOP,
+			   0xff00, 0x3000);
+	return 0;
+}
+
+int mt6358_mtkaif_calibration_enable(struct snd_soc_component *cmpnt)
+{
+	struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+	playback_gpio_set(priv);
+	capture_gpio_set(priv);
+	mt6358_mtkaif_tx_enable(priv);
+
+	mt6358_set_dcxo(priv, true);
+	mt6358_set_aud_global_bias(priv, true);
+	mt6358_set_clksq(priv, true);
+	mt6358_set_topck(priv, true);
+
+	/* set dat_miso_loopback on */
+	regmap_update_bits(priv->regmap, MT6358_AUDIO_DIG_CFG,
+			   RG_AUD_PAD_TOP_DAT_MISO2_LOOPBACK_MASK_SFT,
+			   1 << RG_AUD_PAD_TOP_DAT_MISO2_LOOPBACK_SFT);
+	regmap_update_bits(priv->regmap, MT6358_AUDIO_DIG_CFG,
+			   RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_MASK_SFT,
+			   1 << RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_SFT);
+	return 0;
+}
+
+int mt6358_mtkaif_calibration_disable(struct snd_soc_component *cmpnt)
+{
+	struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+	/* set dat_miso_loopback off */
+	regmap_update_bits(priv->regmap, MT6358_AUDIO_DIG_CFG,
+			   RG_AUD_PAD_TOP_DAT_MISO2_LOOPBACK_MASK_SFT,
+			   0 << RG_AUD_PAD_TOP_DAT_MISO2_LOOPBACK_SFT);
+	regmap_update_bits(priv->regmap, MT6358_AUDIO_DIG_CFG,
+			   RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_MASK_SFT,
+			   0 << RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_SFT);
+
+	mt6358_set_topck(priv, false);
+	mt6358_set_clksq(priv, false);
+	mt6358_set_aud_global_bias(priv, false);
+	mt6358_set_dcxo(priv, false);
+
+	mt6358_mtkaif_tx_disable(priv);
+	playback_gpio_reset(priv);
+	capture_gpio_reset(priv);
+	return 0;
+}
+
+int mt6358_set_mtkaif_calibration_phase(struct snd_soc_component *cmpnt,
+					int phase_1, int phase_2)
+{
+	struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+	regmap_update_bits(priv->regmap, MT6358_AUDIO_DIG_CFG,
+			   RG_AUD_PAD_TOP_PHASE_MODE_MASK_SFT,
+			   phase_1 << RG_AUD_PAD_TOP_PHASE_MODE_SFT);
+	regmap_update_bits(priv->regmap, MT6358_AUDIO_DIG_CFG,
+			   RG_AUD_PAD_TOP_PHASE_MODE2_MASK_SFT,
+			   phase_2 << RG_AUD_PAD_TOP_PHASE_MODE2_SFT);
+	return 0;
+}
+
+/* dl pga gain */
+enum {
+	DL_GAIN_8DB = 0,
+	DL_GAIN_0DB = 8,
+	DL_GAIN_N_1DB = 9,
+	DL_GAIN_N_10DB = 18,
+	DL_GAIN_N_40DB = 0x1f,
+};
+
+#define DL_GAIN_N_10DB_REG (DL_GAIN_N_10DB << 7 | DL_GAIN_N_10DB)
+#define DL_GAIN_N_40DB_REG (DL_GAIN_N_40DB << 7 | DL_GAIN_N_40DB)
+#define DL_GAIN_REG_MASK 0x0f9f
+
+static void lo_store_gain(struct mt6358_priv *priv)
+{
+	unsigned int reg;
+	unsigned int gain_l, gain_r;
+
+	regmap_read(priv->regmap, MT6358_ZCD_CON1, &reg);
+	gain_l = (reg >> RG_AUDLOLGAIN_SFT) & RG_AUDLOLGAIN_MASK;
+	gain_r = (reg >> RG_AUDLORGAIN_SFT) & RG_AUDLORGAIN_MASK;
+
+	priv->ana_gain[AUDIO_ANALOG_VOLUME_LINEOUTL] = gain_l;
+	priv->ana_gain[AUDIO_ANALOG_VOLUME_LINEOUTR] = gain_r;
+}
+
+static void hp_store_gain(struct mt6358_priv *priv)
+{
+	unsigned int reg;
+	unsigned int gain_l, gain_r;
+
+	regmap_read(priv->regmap, MT6358_ZCD_CON2, &reg);
+	gain_l = (reg >> RG_AUDHPLGAIN_SFT) & RG_AUDHPLGAIN_MASK;
+	gain_r = (reg >> RG_AUDHPRGAIN_SFT) & RG_AUDHPRGAIN_MASK;
+
+	priv->ana_gain[AUDIO_ANALOG_VOLUME_HPOUTL] = gain_l;
+	priv->ana_gain[AUDIO_ANALOG_VOLUME_HPOUTR] = gain_r;
+}
+
+static void hp_zcd_disable(struct mt6358_priv *priv)
+{
+	regmap_write(priv->regmap, MT6358_ZCD_CON0, 0x0000);
+}
+
+static void hp_main_output_ramp(struct mt6358_priv *priv, bool up)
+{
+	int i = 0, stage = 0;
+	int target = 7;
+
+	/* Enable/Reduce HPL/R main output stage step by step */
+	for (i = 0; i <= target; i++) {
+		stage = up ? i : target - i;
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON1,
+				   0x7 << 8, stage << 8);
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON1,
+				   0x7 << 11, stage << 11);
+		usleep_range(100, 150);
+	}
+}
+
+static void hp_aux_feedback_loop_gain_ramp(struct mt6358_priv *priv, bool up)
+{
+	int i = 0, stage = 0;
+
+	/* Reduce HP aux feedback loop gain step by step */
+	for (i = 0; i <= 0xf; i++) {
+		stage = up ? i : 0xf - i;
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON9,
+				   0xf << 12, stage << 12);
+		usleep_range(100, 150);
+	}
+}
+
+static void hp_pull_down(struct mt6358_priv *priv, bool enable)
+{
+	int i;
+
+	if (enable) {
+		for (i = 0x0; i <= 0x6; i++) {
+			regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON4,
+					   0x7, i);
+			usleep_range(600, 700);
+		}
+	} else {
+		for (i = 0x6; i >= 0x1; i--) {
+			regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON4,
+					   0x7, i);
+			usleep_range(600, 700);
+		}
+	}
+}
+
+static bool is_valid_hp_pga_idx(int reg_idx)
+{
+	return (reg_idx >= DL_GAIN_8DB && reg_idx <= DL_GAIN_N_10DB) ||
+	       reg_idx == DL_GAIN_N_40DB;
+}
+
+static void headset_volume_ramp(struct mt6358_priv *priv,
+				int from, int to)
+{
+	int offset = 0, count = 1, reg_idx;
+
+	if (!is_valid_hp_pga_idx(from) || !is_valid_hp_pga_idx(to))
+		dev_warn(priv->dev, "%s(), volume index is not valid, from %d, to %d\n",
+			 __func__, from, to);
+
+	dev_info(priv->dev, "%s(), from %d, to %d\n",
+		 __func__, from, to);
+
+	if (to > from)
+		offset = to - from;
+	else
+		offset = from - to;
+
+	while (offset > 0) {
+		if (to > from)
+			reg_idx = from + count;
+		else
+			reg_idx = from - count;
+
+		if (is_valid_hp_pga_idx(reg_idx)) {
+			regmap_update_bits(priv->regmap,
+					   MT6358_ZCD_CON2,
+					   DL_GAIN_REG_MASK,
+					   (reg_idx << 7) | reg_idx);
+			usleep_range(200, 300);
+		}
+		offset--;
+		count++;
+	}
+}
+
+static const DECLARE_TLV_DB_SCALE(playback_tlv, -1000, 100, 0);
+static const DECLARE_TLV_DB_SCALE(pga_tlv, 0, 600, 0);
+
+static const struct snd_kcontrol_new mt6358_snd_controls[] = {
+	/* dl pga gain */
+	SOC_DOUBLE_TLV("Headphone Volume",
+		       MT6358_ZCD_CON2, 0, 7, 0x12, 1,
+		       playback_tlv),
+	SOC_DOUBLE_TLV("Lineout Volume",
+		       MT6358_ZCD_CON1, 0, 7, 0x12, 1,
+		       playback_tlv),
+	SOC_SINGLE_TLV("Handset Volume",
+		       MT6358_ZCD_CON3, 0, 0x12, 1,
+		       playback_tlv),
+	/* ul pga gain */
+	SOC_DOUBLE_R_TLV("PGA Volume",
+			 MT6358_AUDENC_ANA_CON0, MT6358_AUDENC_ANA_CON1,
+			 8, 4, 0,
+			 pga_tlv),
+};
+
+/* MUX */
+/* LOL MUX */
+static const char * const lo_in_mux_map[] = {
+	"Open", "Mute", "Playback", "Test Mode"
+};
+
+static int lo_in_mux_map_value[] = {
+	0x0, 0x1, 0x2, 0x3,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(lo_in_mux_map_enum,
+				  MT6358_AUDDEC_ANA_CON7,
+				  RG_AUDLOLMUXINPUTSEL_VAUDP15_SFT,
+				  RG_AUDLOLMUXINPUTSEL_VAUDP15_MASK,
+				  lo_in_mux_map,
+				  lo_in_mux_map_value);
+
+static const struct snd_kcontrol_new lo_in_mux_control =
+	SOC_DAPM_ENUM("In Select", lo_in_mux_map_enum);
+
+/*HP MUX */
+enum {
+	HP_MUX_OPEN = 0,
+	HP_MUX_HPSPK,
+	HP_MUX_HP,
+	HP_MUX_TEST_MODE,
+	HP_MUX_HP_IMPEDANCE,
+	HP_MUX_MASK = 0x7,
+};
+
+static const char * const hp_in_mux_map[] = {
+	"Open",
+	"LoudSPK Playback",
+	"Audio Playback",
+	"Test Mode",
+	"HP Impedance",
+	"undefined1",
+	"undefined2",
+	"undefined3",
+};
+
+static int hp_in_mux_map_value[] = {
+	HP_MUX_OPEN,
+	HP_MUX_HPSPK,
+	HP_MUX_HP,
+	HP_MUX_TEST_MODE,
+	HP_MUX_HP_IMPEDANCE,
+	HP_MUX_OPEN,
+	HP_MUX_OPEN,
+	HP_MUX_OPEN,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hpl_in_mux_map_enum,
+				  SND_SOC_NOPM,
+				  0,
+				  HP_MUX_MASK,
+				  hp_in_mux_map,
+				  hp_in_mux_map_value);
+
+static const struct snd_kcontrol_new hpl_in_mux_control =
+	SOC_DAPM_ENUM("HPL Select", hpl_in_mux_map_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hpr_in_mux_map_enum,
+				  SND_SOC_NOPM,
+				  0,
+				  HP_MUX_MASK,
+				  hp_in_mux_map,
+				  hp_in_mux_map_value);
+
+static const struct snd_kcontrol_new hpr_in_mux_control =
+	SOC_DAPM_ENUM("HPR Select", hpr_in_mux_map_enum);
+
+/* RCV MUX */
+enum {
+	RCV_MUX_OPEN = 0,
+	RCV_MUX_MUTE,
+	RCV_MUX_VOICE_PLAYBACK,
+	RCV_MUX_TEST_MODE,
+	RCV_MUX_MASK = 0x3,
+};
+
+static const char * const rcv_in_mux_map[] = {
+	"Open", "Mute", "Voice Playback", "Test Mode"
+};
+
+static int rcv_in_mux_map_value[] = {
+	RCV_MUX_OPEN,
+	RCV_MUX_MUTE,
+	RCV_MUX_VOICE_PLAYBACK,
+	RCV_MUX_TEST_MODE,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(rcv_in_mux_map_enum,
+				  SND_SOC_NOPM,
+				  0,
+				  RCV_MUX_MASK,
+				  rcv_in_mux_map,
+				  rcv_in_mux_map_value);
+
+static const struct snd_kcontrol_new rcv_in_mux_control =
+	SOC_DAPM_ENUM("RCV Select", rcv_in_mux_map_enum);
+
+/* DAC In MUX */
+static const char * const dac_in_mux_map[] = {
+	"Normal Path", "Sgen"
+};
+
+static int dac_in_mux_map_value[] = {
+	0x0, 0x1,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(dac_in_mux_map_enum,
+				  MT6358_AFE_TOP_CON0,
+				  DL_SINE_ON_SFT,
+				  DL_SINE_ON_MASK,
+				  dac_in_mux_map,
+				  dac_in_mux_map_value);
+
+static const struct snd_kcontrol_new dac_in_mux_control =
+	SOC_DAPM_ENUM("DAC Select", dac_in_mux_map_enum);
+
+/* AIF Out MUX */
+static SOC_VALUE_ENUM_SINGLE_DECL(aif_out_mux_map_enum,
+				  MT6358_AFE_TOP_CON0,
+				  UL_SINE_ON_SFT,
+				  UL_SINE_ON_MASK,
+				  dac_in_mux_map,
+				  dac_in_mux_map_value);
+
+static const struct snd_kcontrol_new aif_out_mux_control =
+	SOC_DAPM_ENUM("AIF Out Select", aif_out_mux_map_enum);
+
+/* Mic Type MUX */
+enum {
+	MIC_TYPE_MUX_IDLE = 0,
+	MIC_TYPE_MUX_ACC,
+	MIC_TYPE_MUX_DMIC,
+	MIC_TYPE_MUX_DCC,
+	MIC_TYPE_MUX_DCC_ECM_DIFF,
+	MIC_TYPE_MUX_DCC_ECM_SINGLE,
+	MIC_TYPE_MUX_MASK = 0x7,
+};
+
+#define IS_DCC_BASE(type) ((type) == MIC_TYPE_MUX_DCC || \
+			(type) == MIC_TYPE_MUX_DCC_ECM_DIFF || \
+			(type) == MIC_TYPE_MUX_DCC_ECM_SINGLE)
+
+static const char * const mic_type_mux_map[] = {
+	"Idle",
+	"ACC",
+	"DMIC",
+	"DCC",
+	"DCC_ECM_DIFF",
+	"DCC_ECM_SINGLE",
+};
+
+static int mic_type_mux_map_value[] = {
+	MIC_TYPE_MUX_IDLE,
+	MIC_TYPE_MUX_ACC,
+	MIC_TYPE_MUX_DMIC,
+	MIC_TYPE_MUX_DCC,
+	MIC_TYPE_MUX_DCC_ECM_DIFF,
+	MIC_TYPE_MUX_DCC_ECM_SINGLE,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(mic_type_mux_map_enum,
+				  SND_SOC_NOPM,
+				  0,
+				  MIC_TYPE_MUX_MASK,
+				  mic_type_mux_map,
+				  mic_type_mux_map_value);
+
+static const struct snd_kcontrol_new mic_type_mux_control =
+	SOC_DAPM_ENUM("Mic Type Select", mic_type_mux_map_enum);
+
+/* ADC L MUX */
+enum {
+	ADC_MUX_IDLE = 0,
+	ADC_MUX_AIN0,
+	ADC_MUX_PREAMPLIFIER,
+	ADC_MUX_IDLE1,
+	ADC_MUX_MASK = 0x3,
+};
+
+static const char * const adc_left_mux_map[] = {
+	"Idle", "AIN0", "Left Preamplifier", "Idle_1"
+};
+
+static int adc_mux_map_value[] = {
+	ADC_MUX_IDLE,
+	ADC_MUX_AIN0,
+	ADC_MUX_PREAMPLIFIER,
+	ADC_MUX_IDLE1,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(adc_left_mux_map_enum,
+				  SND_SOC_NOPM,
+				  0,
+				  ADC_MUX_MASK,
+				  adc_left_mux_map,
+				  adc_mux_map_value);
+
+static const struct snd_kcontrol_new adc_left_mux_control =
+	SOC_DAPM_ENUM("ADC L Select", adc_left_mux_map_enum);
+
+/* ADC R MUX */
+static const char * const adc_right_mux_map[] = {
+	"Idle", "AIN0", "Right Preamplifier", "Idle_1"
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(adc_right_mux_map_enum,
+				  SND_SOC_NOPM,
+				  0,
+				  ADC_MUX_MASK,
+				  adc_right_mux_map,
+				  adc_mux_map_value);
+
+static const struct snd_kcontrol_new adc_right_mux_control =
+	SOC_DAPM_ENUM("ADC R Select", adc_right_mux_map_enum);
+
+/* PGA L MUX */
+enum {
+	PGA_MUX_NONE = 0,
+	PGA_MUX_AIN0,
+	PGA_MUX_AIN1,
+	PGA_MUX_AIN2,
+	PGA_MUX_MASK = 0x3,
+};
+
+static const char * const pga_mux_map[] = {
+	"None", "AIN0", "AIN1", "AIN2"
+};
+
+static int pga_mux_map_value[] = {
+	PGA_MUX_NONE,
+	PGA_MUX_AIN0,
+	PGA_MUX_AIN1,
+	PGA_MUX_AIN2,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(pga_left_mux_map_enum,
+				  SND_SOC_NOPM,
+				  0,
+				  PGA_MUX_MASK,
+				  pga_mux_map,
+				  pga_mux_map_value);
+
+static const struct snd_kcontrol_new pga_left_mux_control =
+	SOC_DAPM_ENUM("PGA L Select", pga_left_mux_map_enum);
+
+/* PGA R MUX */
+static SOC_VALUE_ENUM_SINGLE_DECL(pga_right_mux_map_enum,
+				  SND_SOC_NOPM,
+				  0,
+				  PGA_MUX_MASK,
+				  pga_mux_map,
+				  pga_mux_map_value);
+
+static const struct snd_kcontrol_new pga_right_mux_control =
+	SOC_DAPM_ENUM("PGA R Select", pga_right_mux_map_enum);
+
+static int mt_clksq_event(struct snd_soc_dapm_widget *w,
+			  struct snd_kcontrol *kcontrol,
+			  int event)
+{
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+	dev_dbg(priv->dev, "%s(), event = 0x%x\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/* audio clk source from internal dcxo */
+		regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON6,
+				   RG_CLKSQ_IN_SEL_TEST_MASK_SFT,
+				   0x0);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int mt_sgen_event(struct snd_soc_dapm_widget *w,
+			 struct snd_kcontrol *kcontrol,
+			 int event)
+{
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+	dev_dbg(priv->dev, "%s(), event = 0x%x\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/* sdm audio fifo clock power on */
+		regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON2, 0x0006);
+		/* scrambler clock on enable */
+		regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON0, 0xCBA1);
+		/* sdm power on */
+		regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON2, 0x0003);
+		/* sdm fifo enable */
+		regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON2, 0x000B);
+
+		regmap_update_bits(priv->regmap, MT6358_AFE_SGEN_CFG0,
+				   0xff3f,
+				   0x0000);
+		regmap_update_bits(priv->regmap, MT6358_AFE_SGEN_CFG1,
+				   0xffff,
+				   0x0001);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* DL scrambler disabling sequence */
+		regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON2, 0x0000);
+		regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON0, 0xcba0);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int mt_aif_in_event(struct snd_soc_dapm_widget *w,
+			   struct snd_kcontrol *kcontrol,
+			   int event)
+{
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+	dev_info(priv->dev, "%s(), event 0x%x, rate %d\n",
+		 __func__, event, priv->dl_rate);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		playback_gpio_set(priv);
+
+		/* sdm audio fifo clock power on */
+		regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON2, 0x0006);
+		/* scrambler clock on enable */
+		regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON0, 0xCBA1);
+		/* sdm power on */
+		regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON2, 0x0003);
+		/* sdm fifo enable */
+		regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON2, 0x000B);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* DL scrambler disabling sequence */
+		regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON2, 0x0000);
+		regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON0, 0xcba0);
+
+		playback_gpio_reset(priv);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int mtk_hp_enable(struct mt6358_priv *priv)
+{
+	/* Pull-down HPL/R to AVSS28_AUD */
+	hp_pull_down(priv, true);
+	/* release HP CMFB gate rstb */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON4,
+			   0x1 << 6, 0x1 << 6);
+
+	/* Reduce ESD resistance of AU_REFN */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON2, 0x4000);
+
+	/* save target gain to restore after hardware open complete */
+	hp_store_gain(priv);
+	/* Set HPR/HPL gain as minimum (~ -40dB) */
+	regmap_write(priv->regmap, MT6358_ZCD_CON2, DL_GAIN_N_40DB_REG);
+
+	/* Turn on DA_600K_NCP_VA18 */
+	regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON1, 0x0001);
+	/* Set NCP clock as 604kHz // 26MHz/43 = 604KHz */
+	regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON2, 0x002c);
+	/* Toggle RG_DIVCKS_CHG */
+	regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON0, 0x0001);
+	/* Set NCP soft start mode as default mode: 100us */
+	regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON4, 0x0003);
+	/* Enable NCP */
+	regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON3, 0x0000);
+	usleep_range(250, 270);
+
+	/* Enable cap-less LDOs (1.5V) */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14,
+			   0x1055, 0x1055);
+	/* Enable NV regulator (-1.2V) */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON15, 0x0001);
+	usleep_range(100, 120);
+
+	/* Disable AUD_ZCD */
+	hp_zcd_disable(priv);
+
+	/* Disable headphone short-circuit protection */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x3000);
+
+	/* Enable IBIST */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON12, 0x0055);
+
+	/* Set HP DR bias current optimization, 010: 6uA */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON11, 0x4900);
+	/* Set HP & ZCD bias current optimization */
+	/* 01: ZCD: 4uA, HP/HS/LO: 5uA */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON12, 0x0055);
+	/* Set HPP/N STB enhance circuits */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON2, 0x4033);
+
+	/* Enable HP aux output stage */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x000c);
+	/* Enable HP aux feedback loop */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x003c);
+	/* Enable HP aux CMFB loop */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0c00);
+	/* Enable HP driver bias circuits */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x30c0);
+	/* Enable HP driver core circuits */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x30f0);
+	/* Short HP main output to HP aux output stage */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x00fc);
+
+	/* Enable HP main CMFB loop */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0e00);
+	/* Disable HP aux CMFB loop */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0200);
+
+	/* Select CMFB resistor bulk to AC mode */
+	/* Selec HS/LO cap size (6.5pF default) */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON10, 0x0000);
+
+	/* Enable HP main output stage */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x00ff);
+	/* Enable HPR/L main output stage step by step */
+	hp_main_output_ramp(priv, true);
+
+	/* Reduce HP aux feedback loop gain */
+	hp_aux_feedback_loop_gain_ramp(priv, true);
+	/* Disable HP aux feedback loop */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3fcf);
+
+	/* apply volume setting */
+	headset_volume_ramp(priv,
+			    DL_GAIN_N_10DB,
+			    priv->ana_gain[AUDIO_ANALOG_VOLUME_HPOUTL]);
+
+	/* Disable HP aux output stage */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3fc3);
+	/* Unshort HP main output to HP aux output stage */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3f03);
+	usleep_range(100, 120);
+
+	/* Enable AUD_CLK */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13, 0x1, 0x1);
+	/* Enable Audio DAC  */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x30ff);
+	/* Enable low-noise mode of DAC */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0xf201);
+	usleep_range(100, 120);
+
+	/* Switch HPL MUX to audio DAC */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x32ff);
+	/* Switch HPR MUX to audio DAC */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x3aff);
+
+	/* Disable Pull-down HPL/R to AVSS28_AUD */
+	hp_pull_down(priv, false);
+
+	return 0;
+}
+
+static int mtk_hp_disable(struct mt6358_priv *priv)
+{
+	/* Pull-down HPL/R to AVSS28_AUD */
+	hp_pull_down(priv, true);
+
+	/* HPR/HPL mux to open */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+			   0x0f00, 0x0000);
+
+	/* Disable low-noise mode of DAC */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON9,
+			   0x0001, 0x0000);
+
+	/* Disable Audio DAC */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+			   0x000f, 0x0000);
+
+	/* Disable AUD_CLK */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13, 0x1, 0x0);
+
+	/* Short HP main output to HP aux output stage */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3fc3);
+	/* Enable HP aux output stage */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3fcf);
+
+	/* decrease HPL/R gain to normal gain step by step */
+	headset_volume_ramp(priv,
+			    priv->ana_gain[AUDIO_ANALOG_VOLUME_HPOUTL],
+			    DL_GAIN_N_40DB);
+
+	/* Enable HP aux feedback loop */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3fff);
+
+	/* Reduce HP aux feedback loop gain */
+	hp_aux_feedback_loop_gain_ramp(priv, false);
+
+	/* decrease HPR/L main output stage step by step */
+	hp_main_output_ramp(priv, false);
+
+	/* Disable HP main output stage */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3, 0x0);
+
+	/* Enable HP aux CMFB loop */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0e00);
+
+	/* Disable HP main CMFB loop */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0c00);
+
+	/* Unshort HP main output to HP aux output stage */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON1,
+			   0x3 << 6, 0x0);
+
+	/* Disable HP driver core circuits */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+			   0x3 << 4, 0x0);
+
+	/* Disable HP driver bias circuits */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+			   0x3 << 6, 0x0);
+
+	/* Disable HP aux CMFB loop */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0000);
+
+	/* Disable HP aux feedback loop */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON1,
+			   0x3 << 4, 0x0);
+
+	/* Disable HP aux output stage */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON1,
+			   0x3 << 2, 0x0);
+
+	/* Disable IBIST */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON12,
+			   0x1 << 8, 0x1 << 8);
+
+	/* Disable NV regulator (-1.2V) */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON15, 0x1, 0x0);
+	/* Disable cap-less LDOs (1.5V) */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14,
+			   0x1055, 0x0);
+	/* Disable NCP */
+	regmap_update_bits(priv->regmap, MT6358_AUDNCP_CLKDIV_CON3,
+			   0x1, 0x1);
+
+	/* Increase ESD resistance of AU_REFN */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON2,
+			   0x1 << 14, 0x0);
+
+	/* Set HP CMFB gate rstb */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON4,
+			   0x1 << 6, 0x0);
+	/* disable Pull-down HPL/R to AVSS28_AUD */
+	hp_pull_down(priv, false);
+
+	return 0;
+}
+
+static int mtk_hp_spk_enable(struct mt6358_priv *priv)
+{
+	/* Pull-down HPL/R to AVSS28_AUD */
+	hp_pull_down(priv, true);
+	/* release HP CMFB gate rstb */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON4,
+			   0x1 << 6, 0x1 << 6);
+
+	/* Reduce ESD resistance of AU_REFN */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON2, 0x4000);
+
+	/* save target gain to restore after hardware open complete */
+	hp_store_gain(priv);
+	/* Set HPR/HPL gain to -10dB */
+	regmap_write(priv->regmap, MT6358_ZCD_CON2, DL_GAIN_N_10DB_REG);
+
+	/* Turn on DA_600K_NCP_VA18 */
+	regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON1, 0x0001);
+	/* Set NCP clock as 604kHz // 26MHz/43 = 604KHz */
+	regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON2, 0x002c);
+	/* Toggle RG_DIVCKS_CHG */
+	regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON0, 0x0001);
+	/* Set NCP soft start mode as default mode: 100us */
+	regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON4, 0x0003);
+	/* Enable NCP */
+	regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON3, 0x0000);
+	usleep_range(250, 270);
+
+	/* Enable cap-less LDOs (1.5V) */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14,
+			   0x1055, 0x1055);
+	/* Enable NV regulator (-1.2V) */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON15, 0x0001);
+	usleep_range(100, 120);
+
+	/* Disable AUD_ZCD */
+	hp_zcd_disable(priv);
+
+	/* Disable headphone short-circuit protection */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x3000);
+
+	/* Enable IBIST */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON12, 0x0055);
+
+	/* Set HP DR bias current optimization, 010: 6uA */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON11, 0x4900);
+	/* Set HP & ZCD bias current optimization */
+	/* 01: ZCD: 4uA, HP/HS/LO: 5uA */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON12, 0x0055);
+	/* Set HPP/N STB enhance circuits */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON2, 0x4033);
+
+	/* Disable Pull-down HPL/R to AVSS28_AUD */
+	hp_pull_down(priv, false);
+
+	/* Enable HP driver bias circuits */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x30c0);
+	/* Enable HP driver core circuits */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x30f0);
+	/* Enable HP main CMFB loop */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0200);
+
+	/* Select CMFB resistor bulk to AC mode */
+	/* Selec HS/LO cap size (6.5pF default) */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON10, 0x0000);
+
+	/* Enable HP main output stage */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x0003);
+	/* Enable HPR/L main output stage step by step */
+	hp_main_output_ramp(priv, true);
+
+	/* Set LO gain as minimum (~ -40dB) */
+	lo_store_gain(priv);
+	regmap_write(priv->regmap, MT6358_ZCD_CON1, DL_GAIN_N_40DB_REG);
+	/* apply volume setting */
+	headset_volume_ramp(priv,
+			    DL_GAIN_N_10DB,
+			    priv->ana_gain[AUDIO_ANALOG_VOLUME_HPOUTL]);
+
+	/* Set LO STB enhance circuits */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON7, 0x0110);
+	/* Enable LO driver bias circuits */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON7, 0x0112);
+	/* Enable LO driver core circuits */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON7, 0x0113);
+
+	/* Set LOL gain to normal gain step by step */
+	regmap_update_bits(priv->regmap, MT6358_ZCD_CON1,
+			   RG_AUDLOLGAIN_MASK_SFT,
+			   priv->ana_gain[AUDIO_ANALOG_VOLUME_LINEOUTL] <<
+			   RG_AUDLOLGAIN_SFT);
+	regmap_update_bits(priv->regmap, MT6358_ZCD_CON1,
+			   RG_AUDLORGAIN_MASK_SFT,
+			   priv->ana_gain[AUDIO_ANALOG_VOLUME_LINEOUTR] <<
+			   RG_AUDLORGAIN_SFT);
+
+	/* Enable AUD_CLK */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13, 0x1, 0x1);
+	/* Enable Audio DAC  */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x30f9);
+	/* Enable low-noise mode of DAC */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0201);
+	/* Switch LOL MUX to audio DAC */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON7, 0x011b);
+	/* Switch HPL/R MUX to Line-out */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x35f9);
+
+	return 0;
+}
+
+static int mtk_hp_spk_disable(struct mt6358_priv *priv)
+{
+	/* HPR/HPL mux to open */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+			   0x0f00, 0x0000);
+	/* LOL mux to open */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON7,
+			   0x3 << 2, 0x0000);
+
+	/* Disable Audio DAC */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+			   0x000f, 0x0000);
+
+	/* Disable AUD_CLK */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13, 0x1, 0x0);
+
+	/* decrease HPL/R gain to normal gain step by step */
+	headset_volume_ramp(priv,
+			    priv->ana_gain[AUDIO_ANALOG_VOLUME_HPOUTL],
+			    DL_GAIN_N_40DB);
+
+	/* decrease LOL gain to minimum gain step by step */
+	regmap_update_bits(priv->regmap, MT6358_ZCD_CON1,
+			   DL_GAIN_REG_MASK, DL_GAIN_N_40DB_REG);
+
+	/* decrease HPR/L main output stage step by step */
+	hp_main_output_ramp(priv, false);
+
+	/* Disable HP main output stage */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3, 0x0);
+
+	/* Short HP main output to HP aux output stage */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3fc3);
+	/* Enable HP aux output stage */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3fcf);
+
+	/* Enable HP aux feedback loop */
+	regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3fff);
+
+	/* Reduce HP aux feedback loop gain */
+	hp_aux_feedback_loop_gain_ramp(priv, false);
+
+	/* Disable HP driver core circuits */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+			   0x3 << 4, 0x0);
+	/* Disable LO driver core circuits */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON7,
+			   0x1, 0x0);
+
+	/* Disable HP driver bias circuits */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+			   0x3 << 6, 0x0);
+	/* Disable LO driver bias circuits */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON7,
+			   0x1 << 1, 0x0);
+
+	/* Disable HP aux CMFB loop */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON9,
+			   0xff << 8, 0x0000);
+
+	/* Disable IBIST */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON12,
+			   0x1 << 8, 0x1 << 8);
+	/* Disable NV regulator (-1.2V) */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON15, 0x1, 0x0);
+	/* Disable cap-less LDOs (1.5V) */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14, 0x1055, 0x0);
+	/* Disable NCP */
+	regmap_update_bits(priv->regmap, MT6358_AUDNCP_CLKDIV_CON3, 0x1, 0x1);
+
+	/* Set HP CMFB gate rstb */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON4,
+			   0x1 << 6, 0x0);
+	/* disable Pull-down HPL/R to AVSS28_AUD */
+	hp_pull_down(priv, false);
+
+	return 0;
+}
+
+static int mt_hp_event(struct snd_soc_dapm_widget *w,
+		       struct snd_kcontrol *kcontrol,
+		       int event)
+{
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+	unsigned int mux = dapm_kcontrol_get_value(w->kcontrols[0]);
+	int device = DEVICE_HP;
+
+	dev_info(priv->dev, "%s(), event 0x%x, dev_counter[DEV_HP] %d, mux %u\n",
+		 __func__,
+		 event,
+		 priv->dev_counter[device],
+		 mux);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		priv->dev_counter[device]++;
+		if (priv->dev_counter[device] > 1)
+			break;	/* already enabled, do nothing */
+		else if (priv->dev_counter[device] <= 0)
+			dev_warn(priv->dev, "%s(), dev_counter[DEV_HP] %d <= 0\n",
+				 __func__,
+				 priv->dev_counter[device]);
+
+		priv->mux_select[MUX_HP_L] = mux;
+
+		if (mux == HP_MUX_HP)
+			mtk_hp_enable(priv);
+		else if (mux == HP_MUX_HPSPK)
+			mtk_hp_spk_enable(priv);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		priv->dev_counter[device]--;
+		if (priv->dev_counter[device] > 0) {
+			break;	/* still being used, don't close */
+		} else if (priv->dev_counter[device] < 0) {
+			dev_warn(priv->dev, "%s(), dev_counter[DEV_HP] %d < 0\n",
+				 __func__,
+				 priv->dev_counter[device]);
+			priv->dev_counter[device] = 0;
+			break;
+		}
+
+		if (priv->mux_select[MUX_HP_L] == HP_MUX_HP)
+			mtk_hp_disable(priv);
+		else if (priv->mux_select[MUX_HP_L] == HP_MUX_HPSPK)
+			mtk_hp_spk_disable(priv);
+
+		priv->mux_select[MUX_HP_L] = mux;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int mt_rcv_event(struct snd_soc_dapm_widget *w,
+			struct snd_kcontrol *kcontrol,
+			int event)
+{
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+	dev_info(priv->dev, "%s(), event 0x%x, mux %u\n",
+		 __func__,
+		 event,
+		 dapm_kcontrol_get_value(w->kcontrols[0]));
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/* Reduce ESD resistance of AU_REFN */
+		regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON2, 0x4000);
+
+		/* Turn on DA_600K_NCP_VA18 */
+		regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON1, 0x0001);
+		/* Set NCP clock as 604kHz // 26MHz/43 = 604KHz */
+		regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON2, 0x002c);
+		/* Toggle RG_DIVCKS_CHG */
+		regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON0, 0x0001);
+		/* Set NCP soft start mode as default mode: 100us */
+		regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON4, 0x0003);
+		/* Enable NCP */
+		regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON3, 0x0000);
+		usleep_range(250, 270);
+
+		/* Enable cap-less LDOs (1.5V) */
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14,
+				   0x1055, 0x1055);
+		/* Enable NV regulator (-1.2V) */
+		regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON15, 0x0001);
+		usleep_range(100, 120);
+
+		/* Disable AUD_ZCD */
+		hp_zcd_disable(priv);
+
+		/* Disable handset short-circuit protection */
+		regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON6, 0x0010);
+
+		/* Enable IBIST */
+		regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON12, 0x0055);
+		/* Set HP DR bias current optimization, 010: 6uA */
+		regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON11, 0x4900);
+		/* Set HP & ZCD bias current optimization */
+		/* 01: ZCD: 4uA, HP/HS/LO: 5uA */
+		regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON12, 0x0055);
+		/* Set HS STB enhance circuits */
+		regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON6, 0x0090);
+
+		/* Disable HP main CMFB loop */
+		regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0000);
+		/* Select CMFB resistor bulk to AC mode */
+		/* Selec HS/LO cap size (6.5pF default) */
+		regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON10, 0x0000);
+
+		/* Enable HS driver bias circuits */
+		regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON6, 0x0092);
+		/* Enable HS driver core circuits */
+		regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON6, 0x0093);
+
+		/* Enable AUD_CLK */
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13,
+				   0x1, 0x1);
+
+		/* Enable Audio DAC  */
+		regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x0009);
+		/* Enable low-noise mode of DAC */
+		regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0001);
+		/* Switch HS MUX to audio DAC */
+		regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON6, 0x009b);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		/* HS mux to open */
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON6,
+				   RG_AUDHSMUXINPUTSEL_VAUDP15_MASK_SFT,
+				   RCV_MUX_OPEN);
+
+		/* Disable Audio DAC */
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+				   0x000f, 0x0000);
+
+		/* Disable AUD_CLK */
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13,
+				   0x1, 0x0);
+
+		/* decrease HS gain to minimum gain step by step */
+		regmap_write(priv->regmap, MT6358_ZCD_CON3, DL_GAIN_N_40DB);
+
+		/* Disable HS driver core circuits */
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON6,
+				   0x1, 0x0);
+
+		/* Disable HS driver bias circuits */
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON6,
+				   0x1 << 1, 0x0000);
+
+		/* Disable HP aux CMFB loop */
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON9,
+				   0xff << 8, 0x0);
+
+		/* Enable HP main CMFB Switch */
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON9,
+				   0xff << 8, 0x2 << 8);
+
+		/* Disable IBIST */
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON12,
+				   0x1 << 8, 0x1 << 8);
+
+		/* Disable NV regulator (-1.2V) */
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON15,
+				   0x1, 0x0);
+		/* Disable cap-less LDOs (1.5V) */
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14,
+				   0x1055, 0x0);
+		/* Disable NCP */
+		regmap_update_bits(priv->regmap, MT6358_AUDNCP_CLKDIV_CON3,
+				   0x1, 0x1);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int mt_aif_out_event(struct snd_soc_dapm_widget *w,
+			    struct snd_kcontrol *kcontrol,
+			    int event)
+{
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+	dev_dbg(priv->dev, "%s(), event 0x%x, rate %d\n",
+		__func__, event, priv->ul_rate);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		capture_gpio_set(priv);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		capture_gpio_reset(priv);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int mt_adc_supply_event(struct snd_soc_dapm_widget *w,
+			       struct snd_kcontrol *kcontrol,
+			       int event)
+{
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+	dev_dbg(priv->dev, "%s(), event 0x%x\n",
+		__func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/* Enable audio ADC CLKGEN  */
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13,
+				   0x1 << 5, 0x1 << 5);
+		/* ADC CLK from CLKGEN (13MHz) */
+		regmap_write(priv->regmap, MT6358_AUDENC_ANA_CON3,
+			     0x0000);
+		/* Enable  LCLDO_ENC 1P8V */
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14,
+				   0x2500, 0x0100);
+		/* LCLDO_ENC remote sense */
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14,
+				   0x2500, 0x2500);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* LCLDO_ENC remote sense off */
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14,
+				   0x2500, 0x0100);
+		/* disable LCLDO_ENC 1P8V */
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14,
+				   0x2500, 0x0000);
+
+		/* ADC CLK from CLKGEN (13MHz) */
+		regmap_write(priv->regmap, MT6358_AUDENC_ANA_CON3, 0x0000);
+		/* disable audio ADC CLKGEN  */
+		regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13,
+				   0x1 << 5, 0x0 << 5);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int mt6358_amic_enable(struct mt6358_priv *priv)
+{
+	unsigned int mic_type = priv->mux_select[MUX_MIC_TYPE];
+	unsigned int mux_pga_l = priv->mux_select[MUX_PGA_L];
+	unsigned int mux_pga_r = priv->mux_select[MUX_PGA_R];
+
+	dev_info(priv->dev, "%s(), mux, mic %u, pga l %u, pga r %u\n",
+		 __func__, mic_type, mux_pga_l, mux_pga_r);
+
+	if (IS_DCC_BASE(mic_type)) {
+		/* DCC 50k CLK (from 26M) */
+		regmap_write(priv->regmap, MT6358_AFE_DCCLK_CFG0, 0x2062);
+		regmap_write(priv->regmap, MT6358_AFE_DCCLK_CFG0, 0x2062);
+		regmap_write(priv->regmap, MT6358_AFE_DCCLK_CFG0, 0x2060);
+		regmap_write(priv->regmap, MT6358_AFE_DCCLK_CFG0, 0x2061);
+		regmap_write(priv->regmap, MT6358_AFE_DCCLK_CFG1, 0x0100);
+	}
+
+	/* mic bias 0 */
+	if (mux_pga_l == PGA_MUX_AIN0 || mux_pga_l == PGA_MUX_AIN2 ||
+	    mux_pga_r == PGA_MUX_AIN0 || mux_pga_r == PGA_MUX_AIN2) {
+		switch (mic_type) {
+		case MIC_TYPE_MUX_DCC_ECM_DIFF:
+			regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON9,
+					   0xff00, 0x7700);
+			break;
+		case MIC_TYPE_MUX_DCC_ECM_SINGLE:
+			regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON9,
+					   0xff00, 0x1100);
+			break;
+		default:
+			regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON9,
+					   0xff00, 0x0000);
+			break;
+		}
+		/* Enable MICBIAS0, MISBIAS0 = 1P9V */
+		regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON9,
+				   0xff, 0x21);
+	}
+
+	/* mic bias 1 */
+	if (mux_pga_l == PGA_MUX_AIN1 || mux_pga_r == PGA_MUX_AIN1) {
+		/* Enable MICBIAS1, MISBIAS1 = 2P6V */
+		if (mic_type == MIC_TYPE_MUX_DCC_ECM_SINGLE)
+			regmap_write(priv->regmap,
+				     MT6358_AUDENC_ANA_CON10, 0x0161);
+		else
+			regmap_write(priv->regmap,
+				     MT6358_AUDENC_ANA_CON10, 0x0061);
+	}
+
+	if (IS_DCC_BASE(mic_type)) {
+		/* Audio L/R preamplifier DCC precharge */
+		regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+				   0xf8ff, 0x0004);
+		regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+				   0xf8ff, 0x0004);
+	} else {
+		/* reset reg */
+		regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+				   0xf8ff, 0x0000);
+		regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+				   0xf8ff, 0x0000);
+	}
+
+	if (mux_pga_l != PGA_MUX_NONE) {
+		/* L preamplifier input sel */
+		regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+				   RG_AUDPREAMPLINPUTSEL_MASK_SFT,
+				   mux_pga_l << RG_AUDPREAMPLINPUTSEL_SFT);
+
+		/* L preamplifier enable */
+		regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+				   RG_AUDPREAMPLON_MASK_SFT,
+				   0x1 << RG_AUDPREAMPLON_SFT);
+
+		if (IS_DCC_BASE(mic_type)) {
+			/* L preamplifier DCCEN */
+			regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+					   RG_AUDPREAMPLDCCEN_MASK_SFT,
+					   0x1 << RG_AUDPREAMPLDCCEN_SFT);
+		}
+
+		/* L ADC input sel : L PGA. Enable audio L ADC */
+		regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+				   RG_AUDADCLINPUTSEL_MASK_SFT,
+				   ADC_MUX_PREAMPLIFIER <<
+				   RG_AUDADCLINPUTSEL_SFT);
+		regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+				   RG_AUDADCLPWRUP_MASK_SFT,
+				   0x1 << RG_AUDADCLPWRUP_SFT);
+	}
+
+	if (mux_pga_r != PGA_MUX_NONE) {
+		/* R preamplifier input sel */
+		regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+				   RG_AUDPREAMPRINPUTSEL_MASK_SFT,
+				   mux_pga_r << RG_AUDPREAMPRINPUTSEL_SFT);
+
+		/* R preamplifier enable */
+		regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+				   RG_AUDPREAMPRON_MASK_SFT,
+				   0x1 << RG_AUDPREAMPRON_SFT);
+
+		if (IS_DCC_BASE(mic_type)) {
+			/* R preamplifier DCCEN */
+			regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+					   RG_AUDPREAMPRDCCEN_MASK_SFT,
+					   0x1 << RG_AUDPREAMPRDCCEN_SFT);
+		}
+
+		/* R ADC input sel : R PGA. Enable audio R ADC */
+		regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+				   RG_AUDADCRINPUTSEL_MASK_SFT,
+				   ADC_MUX_PREAMPLIFIER <<
+				   RG_AUDADCRINPUTSEL_SFT);
+		regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+				   RG_AUDADCRPWRUP_MASK_SFT,
+				   0x1 << RG_AUDADCRPWRUP_SFT);
+	}
+
+	if (IS_DCC_BASE(mic_type)) {
+		usleep_range(100, 150);
+		/* Audio L preamplifier DCC precharge off */
+		regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+				   RG_AUDPREAMPLDCPRECHARGE_MASK_SFT, 0x0);
+		/* Audio R preamplifier DCC precharge off */
+		regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+				   RG_AUDPREAMPRDCPRECHARGE_MASK_SFT, 0x0);
+
+		/* Short body to ground in PGA */
+		regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON3,
+				   0x1 << 12, 0x0);
+	}
+
+	/* here to set digital part */
+	mt6358_mtkaif_tx_enable(priv);
+
+	/* UL dmic setting off */
+	regmap_write(priv->regmap, MT6358_AFE_UL_SRC_CON0_H, 0x0000);
+
+	/* UL turn on */
+	regmap_write(priv->regmap, MT6358_AFE_UL_SRC_CON0_L, 0x0001);
+
+	return 0;
+}
+
+static void mt6358_amic_disable(struct mt6358_priv *priv)
+{
+	unsigned int mic_type = priv->mux_select[MUX_MIC_TYPE];
+	unsigned int mux_pga_l = priv->mux_select[MUX_PGA_L];
+	unsigned int mux_pga_r = priv->mux_select[MUX_PGA_R];
+
+	dev_info(priv->dev, "%s(), mux, mic %u, pga l %u, pga r %u\n",
+		 __func__, mic_type, mux_pga_l, mux_pga_r);
+
+	/* UL turn off */
+	regmap_update_bits(priv->regmap, MT6358_AFE_UL_SRC_CON0_L,
+			   0x0001, 0x0000);
+
+	/* disable aud_pad TX fifos */
+	mt6358_mtkaif_tx_disable(priv);
+
+	/* L ADC input sel : off, disable L ADC */
+	regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+			   0xf000, 0x0000);
+	/* L preamplifier DCCEN */
+	regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+			   0x1 << 1, 0x0);
+	/* L preamplifier input sel : off, L PGA 0 dB gain */
+	regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+			   0xfffb, 0x0000);
+
+	/* disable L preamplifier DCC precharge */
+	regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+			   0x1 << 2, 0x0);
+
+	/* R ADC input sel : off, disable R ADC */
+	regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+			   0xf000, 0x0000);
+	/* R preamplifier DCCEN */
+	regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+			   0x1 << 1, 0x0);
+	/* R preamplifier input sel : off, R PGA 0 dB gain */
+	regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+			   0x0ffb, 0x0000);
+
+	/* disable R preamplifier DCC precharge */
+	regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+			   0x1 << 2, 0x0);
+
+	/* mic bias */
+	/* Disable MICBIAS0, MISBIAS0 = 1P7V */
+	regmap_write(priv->regmap, MT6358_AUDENC_ANA_CON9, 0x0000);
+
+	/* Disable MICBIAS1 */
+	regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON10,
+			   0x0001, 0x0000);
+
+	if (IS_DCC_BASE(mic_type)) {
+		/* dcclk_gen_on=1'b0 */
+		regmap_write(priv->regmap, MT6358_AFE_DCCLK_CFG0, 0x2060);
+		/* dcclk_pdn=1'b1 */
+		regmap_write(priv->regmap, MT6358_AFE_DCCLK_CFG0, 0x2062);
+		/* dcclk_ref_ck_sel=2'b00 */
+		regmap_write(priv->regmap, MT6358_AFE_DCCLK_CFG0, 0x2062);
+		/* dcclk_div=11'b00100000011 */
+		regmap_write(priv->regmap, MT6358_AFE_DCCLK_CFG0, 0x2062);
+	}
+}
+
+static int mt6358_dmic_enable(struct mt6358_priv *priv)
+{
+	dev_info(priv->dev, "%s()\n", __func__);
+
+	/* mic bias */
+	/* Enable MICBIAS0, MISBIAS0 = 1P9V */
+	regmap_write(priv->regmap, MT6358_AUDENC_ANA_CON9, 0x0021);
+
+	/* RG_BANDGAPGEN=1'b0 */
+	regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON10,
+			   0x1 << 12, 0x0);
+
+	/* DMIC enable */
+	regmap_write(priv->regmap, MT6358_AUDENC_ANA_CON8, 0x0005);
+
+	/* here to set digital part */
+	mt6358_mtkaif_tx_enable(priv);
+
+	/* UL dmic setting */
+	regmap_write(priv->regmap, MT6358_AFE_UL_SRC_CON0_H, 0x0080);
+
+	/* UL turn on */
+	regmap_write(priv->regmap, MT6358_AFE_UL_SRC_CON0_L, 0x0003);
+	return 0;
+}
+
+static void mt6358_dmic_disable(struct mt6358_priv *priv)
+{
+	dev_info(priv->dev, "%s()\n", __func__);
+
+	/* UL turn off */
+	regmap_update_bits(priv->regmap, MT6358_AFE_UL_SRC_CON0_L,
+			   0x0003, 0x0000);
+
+	/* disable aud_pad TX fifos */
+	mt6358_mtkaif_tx_disable(priv);
+
+	/* DMIC disable */
+	regmap_write(priv->regmap, MT6358_AUDENC_ANA_CON8, 0x0000);
+
+	/* mic bias */
+	/* MISBIAS0 = 1P7V */
+	regmap_write(priv->regmap, MT6358_AUDENC_ANA_CON9, 0x0001);
+
+	/* RG_BANDGAPGEN=1'b0 */
+	regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON10,
+			   0x1 << 12, 0x0);
+
+	/* MICBIA0 disable */
+	regmap_write(priv->regmap, MT6358_AUDENC_ANA_CON9, 0x0000);
+}
+
+static int mt_mic_type_event(struct snd_soc_dapm_widget *w,
+			     struct snd_kcontrol *kcontrol,
+			     int event)
+{
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+	unsigned int mux = dapm_kcontrol_get_value(w->kcontrols[0]);
+
+	dev_dbg(priv->dev, "%s(), event 0x%x, mux %u\n",
+		__func__, event, mux);
+
+	switch (event) {
+	case SND_SOC_DAPM_WILL_PMU:
+		priv->mux_select[MUX_MIC_TYPE] = mux;
+		break;
+	case SND_SOC_DAPM_PRE_PMU:
+		switch (mux) {
+		case MIC_TYPE_MUX_DMIC:
+			mt6358_dmic_enable(priv);
+			break;
+		default:
+			mt6358_amic_enable(priv);
+			break;
+		}
+
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		switch (priv->mux_select[MUX_MIC_TYPE]) {
+		case MIC_TYPE_MUX_DMIC:
+			mt6358_dmic_disable(priv);
+			break;
+		default:
+			mt6358_amic_disable(priv);
+			break;
+		}
+
+		priv->mux_select[MUX_MIC_TYPE] = mux;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int mt_adc_l_event(struct snd_soc_dapm_widget *w,
+			  struct snd_kcontrol *kcontrol,
+			  int event)
+{
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+	unsigned int mux = dapm_kcontrol_get_value(w->kcontrols[0]);
+
+	dev_dbg(priv->dev, "%s(), event = 0x%x, mux %u\n",
+		__func__, event, mux);
+
+	priv->mux_select[MUX_ADC_L] = mux;
+
+	return 0;
+}
+
+static int mt_adc_r_event(struct snd_soc_dapm_widget *w,
+			  struct snd_kcontrol *kcontrol,
+			  int event)
+{
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+	unsigned int mux = dapm_kcontrol_get_value(w->kcontrols[0]);
+
+	dev_dbg(priv->dev, "%s(), event = 0x%x, mux %u\n",
+		__func__, event, mux);
+
+	priv->mux_select[MUX_ADC_R] = mux;
+
+	return 0;
+}
+
+static int mt_pga_left_event(struct snd_soc_dapm_widget *w,
+			     struct snd_kcontrol *kcontrol,
+			     int event)
+{
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+	unsigned int mux = dapm_kcontrol_get_value(w->kcontrols[0]);
+
+	dev_dbg(priv->dev, "%s(), event = 0x%x, mux %u\n",
+		__func__, event, mux);
+
+	priv->mux_select[MUX_PGA_L] = mux;
+
+	return 0;
+}
+
+static int mt_pga_right_event(struct snd_soc_dapm_widget *w,
+			      struct snd_kcontrol *kcontrol,
+			      int event)
+{
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+	unsigned int mux = dapm_kcontrol_get_value(w->kcontrols[0]);
+
+	dev_dbg(priv->dev, "%s(), event = 0x%x, mux %u\n",
+		__func__, event, mux);
+
+	priv->mux_select[MUX_PGA_R] = mux;
+
+	return 0;
+}
+
+static int mt_delay_250_event(struct snd_soc_dapm_widget *w,
+			      struct snd_kcontrol *kcontrol,
+			      int event)
+{
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		usleep_range(250, 270);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		usleep_range(250, 270);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* DAPM Widgets */
+static const struct snd_soc_dapm_widget mt6358_dapm_widgets[] = {
+	/* Global Supply*/
+	SND_SOC_DAPM_SUPPLY_S("CLK_BUF", SUPPLY_SEQ_CLK_BUF,
+			      MT6358_DCXO_CW14,
+			      RG_XO_AUDIO_EN_M_SFT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("AUDGLB", SUPPLY_SEQ_AUD_GLB,
+			      MT6358_AUDDEC_ANA_CON13,
+			      RG_AUDGLB_PWRDN_VA28_SFT, 1, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("CLKSQ Audio", SUPPLY_SEQ_CLKSQ,
+			      MT6358_AUDENC_ANA_CON6,
+			      RG_CLKSQ_EN_SFT, 0,
+			      mt_clksq_event,
+			      SND_SOC_DAPM_PRE_PMU),
+	SND_SOC_DAPM_SUPPLY_S("AUDNCP_CK", SUPPLY_SEQ_TOP_CK,
+			      MT6358_AUD_TOP_CKPDN_CON0,
+			      RG_AUDNCP_CK_PDN_SFT, 1, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("ZCD13M_CK", SUPPLY_SEQ_TOP_CK,
+			      MT6358_AUD_TOP_CKPDN_CON0,
+			      RG_ZCD13M_CK_PDN_SFT, 1, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("AUD_CK", SUPPLY_SEQ_TOP_CK_LAST,
+			      MT6358_AUD_TOP_CKPDN_CON0,
+			      RG_AUD_CK_PDN_SFT, 1,
+			      mt_delay_250_event,
+			      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_SUPPLY_S("AUDIF_CK", SUPPLY_SEQ_TOP_CK,
+			      MT6358_AUD_TOP_CKPDN_CON0,
+			      RG_AUDIF_CK_PDN_SFT, 1, NULL, 0),
+
+	/* Digital Clock */
+	SND_SOC_DAPM_SUPPLY_S("AUDIO_TOP_AFE_CTL", SUPPLY_SEQ_AUD_TOP_LAST,
+			      MT6358_AUDIO_TOP_CON0,
+			      PDN_AFE_CTL_SFT, 1,
+			      mt_delay_250_event,
+			      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_SUPPLY_S("AUDIO_TOP_DAC_CTL", SUPPLY_SEQ_AUD_TOP,
+			      MT6358_AUDIO_TOP_CON0,
+			      PDN_DAC_CTL_SFT, 1, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("AUDIO_TOP_ADC_CTL", SUPPLY_SEQ_AUD_TOP,
+			      MT6358_AUDIO_TOP_CON0,
+			      PDN_ADC_CTL_SFT, 1, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("AUDIO_TOP_I2S_DL", SUPPLY_SEQ_AUD_TOP,
+			      MT6358_AUDIO_TOP_CON0,
+			      PDN_I2S_DL_CTL_SFT, 1, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("AUDIO_TOP_PWR_CLK", SUPPLY_SEQ_AUD_TOP,
+			      MT6358_AUDIO_TOP_CON0,
+			      PWR_CLK_DIS_CTL_SFT, 1, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("AUDIO_TOP_PDN_AFE_TESTMODEL", SUPPLY_SEQ_AUD_TOP,
+			      MT6358_AUDIO_TOP_CON0,
+			      PDN_AFE_TESTMODEL_CTL_SFT, 1, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("AUDIO_TOP_PDN_RESERVED", SUPPLY_SEQ_AUD_TOP,
+			      MT6358_AUDIO_TOP_CON0,
+			      PDN_RESERVED_SFT, 1, NULL, 0),
+
+	SND_SOC_DAPM_SUPPLY("DL Digital Clock", SND_SOC_NOPM,
+			    0, 0, NULL, 0),
+
+	/* AFE ON */
+	SND_SOC_DAPM_SUPPLY_S("AFE_ON", SUPPLY_SEQ_AFE,
+			      MT6358_AFE_UL_DL_CON0, AFE_ON_SFT, 0,
+			      NULL, 0),
+
+	/* AIF Rx*/
+	SND_SOC_DAPM_AIF_IN_E("AIF_RX", "AIF1 Playback", 0,
+			      MT6358_AFE_DL_SRC2_CON0_L,
+			      DL_2_SRC_ON_TMP_CTL_PRE_SFT, 0,
+			      mt_aif_in_event,
+			      SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	/* DL Supply */
+	SND_SOC_DAPM_SUPPLY("DL Power Supply", SND_SOC_NOPM,
+			    0, 0, NULL, 0),
+
+	/* DAC */
+	SND_SOC_DAPM_MUX("DAC In Mux", SND_SOC_NOPM, 0, 0, &dac_in_mux_control),
+
+	SND_SOC_DAPM_DAC("DACL", NULL, SND_SOC_NOPM, 0, 0),
+
+	SND_SOC_DAPM_DAC("DACR", NULL, SND_SOC_NOPM, 0, 0),
+
+	/* LOL */
+	SND_SOC_DAPM_MUX("LOL Mux", SND_SOC_NOPM, 0, 0, &lo_in_mux_control),
+
+	SND_SOC_DAPM_SUPPLY("LO Stability Enh", MT6358_AUDDEC_ANA_CON7,
+			    RG_LOOUTPUTSTBENH_VAUDP15_SFT, 0, NULL, 0),
+
+	SND_SOC_DAPM_OUT_DRV("LOL Buffer", MT6358_AUDDEC_ANA_CON7,
+			     RG_AUDLOLPWRUP_VAUDP15_SFT, 0, NULL, 0),
+
+	/* Headphone */
+	SND_SOC_DAPM_MUX_E("HPL Mux", SND_SOC_NOPM, 0, 0,
+			   &hpl_in_mux_control,
+			   mt_hp_event,
+			   SND_SOC_DAPM_PRE_PMU |
+			   SND_SOC_DAPM_PRE_PMD),
+
+	SND_SOC_DAPM_MUX_E("HPR Mux", SND_SOC_NOPM, 0, 0,
+			   &hpr_in_mux_control,
+			   mt_hp_event,
+			   SND_SOC_DAPM_PRE_PMU |
+			   SND_SOC_DAPM_PRE_PMD),
+
+	/* Receiver */
+	SND_SOC_DAPM_MUX_E("RCV Mux", SND_SOC_NOPM, 0, 0,
+			   &rcv_in_mux_control,
+			   mt_rcv_event,
+			   SND_SOC_DAPM_PRE_PMU |
+			   SND_SOC_DAPM_PRE_PMD),
+
+	/* Outputs */
+	SND_SOC_DAPM_OUTPUT("Receiver"),
+	SND_SOC_DAPM_OUTPUT("Headphone L"),
+	SND_SOC_DAPM_OUTPUT("Headphone R"),
+	SND_SOC_DAPM_OUTPUT("Headphone L Ext Spk Amp"),
+	SND_SOC_DAPM_OUTPUT("Headphone R Ext Spk Amp"),
+	SND_SOC_DAPM_OUTPUT("LINEOUT L"),
+	SND_SOC_DAPM_OUTPUT("LINEOUT L HSSPK"),
+
+	/* SGEN */
+	SND_SOC_DAPM_SUPPLY("SGEN DL Enable", MT6358_AFE_SGEN_CFG0,
+			    SGEN_DAC_EN_CTL_SFT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("SGEN MUTE", MT6358_AFE_SGEN_CFG0,
+			    SGEN_MUTE_SW_CTL_SFT, 1,
+			    mt_sgen_event,
+			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY("SGEN DL SRC", MT6358_AFE_DL_SRC2_CON0_L,
+			    DL_2_SRC_ON_TMP_CTL_PRE_SFT, 0, NULL, 0),
+
+	SND_SOC_DAPM_INPUT("SGEN DL"),
+
+	/* Uplinks */
+	SND_SOC_DAPM_AIF_OUT_E("AIF1TX", "AIF1 Capture", 0,
+			       SND_SOC_NOPM, 0, 0,
+			       mt_aif_out_event,
+			       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_SUPPLY_S("ADC Supply", SUPPLY_SEQ_ADC_SUPPLY,
+			      SND_SOC_NOPM, 0, 0,
+			      mt_adc_supply_event,
+			      SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	/* Uplinks MUX */
+	SND_SOC_DAPM_MUX("AIF Out Mux", SND_SOC_NOPM, 0, 0,
+			 &aif_out_mux_control),
+
+	SND_SOC_DAPM_MUX_E("Mic Type Mux", SND_SOC_NOPM, 0, 0,
+			   &mic_type_mux_control,
+			   mt_mic_type_event,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD |
+			   SND_SOC_DAPM_WILL_PMU),
+
+	SND_SOC_DAPM_MUX_E("ADC L Mux", SND_SOC_NOPM, 0, 0,
+			   &adc_left_mux_control,
+			   mt_adc_l_event,
+			   SND_SOC_DAPM_WILL_PMU),
+	SND_SOC_DAPM_MUX_E("ADC R Mux", SND_SOC_NOPM, 0, 0,
+			   &adc_right_mux_control,
+			   mt_adc_r_event,
+			   SND_SOC_DAPM_WILL_PMU),
+
+	SND_SOC_DAPM_ADC("ADC L", NULL, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_ADC("ADC R", NULL, SND_SOC_NOPM, 0, 0),
+
+	SND_SOC_DAPM_MUX_E("PGA L Mux", SND_SOC_NOPM, 0, 0,
+			   &pga_left_mux_control,
+			   mt_pga_left_event,
+			   SND_SOC_DAPM_WILL_PMU),
+	SND_SOC_DAPM_MUX_E("PGA R Mux", SND_SOC_NOPM, 0, 0,
+			   &pga_right_mux_control,
+			   mt_pga_right_event,
+			   SND_SOC_DAPM_WILL_PMU),
+
+	SND_SOC_DAPM_PGA("PGA L", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("PGA R", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	/* UL input */
+	SND_SOC_DAPM_INPUT("AIN0"),
+	SND_SOC_DAPM_INPUT("AIN1"),
+	SND_SOC_DAPM_INPUT("AIN2"),
+};
+
+static const struct snd_soc_dapm_route mt6358_dapm_routes[] = {
+	/* Capture */
+	{"AIF1TX", NULL, "AIF Out Mux"},
+	{"AIF1TX", NULL, "CLK_BUF"},
+	{"AIF1TX", NULL, "AUDGLB"},
+	{"AIF1TX", NULL, "CLKSQ Audio"},
+
+	{"AIF1TX", NULL, "AUD_CK"},
+	{"AIF1TX", NULL, "AUDIF_CK"},
+
+	{"AIF1TX", NULL, "AUDIO_TOP_AFE_CTL"},
+	{"AIF1TX", NULL, "AUDIO_TOP_ADC_CTL"},
+	{"AIF1TX", NULL, "AUDIO_TOP_PWR_CLK"},
+	{"AIF1TX", NULL, "AUDIO_TOP_PDN_RESERVED"},
+	{"AIF1TX", NULL, "AUDIO_TOP_I2S_DL"},
+
+	{"AIF1TX", NULL, "AFE_ON"},
+
+	{"AIF Out Mux", NULL, "Mic Type Mux"},
+
+	{"Mic Type Mux", "ACC", "ADC L"},
+	{"Mic Type Mux", "ACC", "ADC R"},
+	{"Mic Type Mux", "DCC", "ADC L"},
+	{"Mic Type Mux", "DCC", "ADC R"},
+	{"Mic Type Mux", "DCC_ECM_DIFF", "ADC L"},
+	{"Mic Type Mux", "DCC_ECM_DIFF", "ADC R"},
+	{"Mic Type Mux", "DCC_ECM_SINGLE", "ADC L"},
+	{"Mic Type Mux", "DCC_ECM_SINGLE", "ADC R"},
+	{"Mic Type Mux", "DMIC", "AIN0"},
+	{"Mic Type Mux", "DMIC", "AIN2"},
+
+	{"ADC L", NULL, "ADC L Mux"},
+	{"ADC L", NULL, "ADC Supply"},
+	{"ADC R", NULL, "ADC R Mux"},
+	{"ADC R", NULL, "ADC Supply"},
+
+	{"ADC L Mux", "Left Preamplifier", "PGA L"},
+
+	{"ADC R Mux", "Right Preamplifier", "PGA R"},
+
+	{"PGA L", NULL, "PGA L Mux"},
+	{"PGA R", NULL, "PGA R Mux"},
+
+	{"PGA L Mux", "AIN0", "AIN0"},
+	{"PGA L Mux", "AIN1", "AIN1"},
+	{"PGA L Mux", "AIN2", "AIN2"},
+
+	{"PGA R Mux", "AIN0", "AIN0"},
+	{"PGA R Mux", "AIN1", "AIN1"},
+	{"PGA R Mux", "AIN2", "AIN2"},
+
+	/* DL Supply */
+	{"DL Power Supply", NULL, "CLK_BUF"},
+	{"DL Power Supply", NULL, "AUDGLB"},
+	{"DL Power Supply", NULL, "CLKSQ Audio"},
+
+	{"DL Power Supply", NULL, "AUDNCP_CK"},
+	{"DL Power Supply", NULL, "ZCD13M_CK"},
+	{"DL Power Supply", NULL, "AUD_CK"},
+	{"DL Power Supply", NULL, "AUDIF_CK"},
+
+	/* DL Digital Supply */
+	{"DL Digital Clock", NULL, "AUDIO_TOP_AFE_CTL"},
+	{"DL Digital Clock", NULL, "AUDIO_TOP_DAC_CTL"},
+	{"DL Digital Clock", NULL, "AUDIO_TOP_PWR_CLK"},
+
+	{"DL Digital Clock", NULL, "AFE_ON"},
+
+	{"AIF_RX", NULL, "DL Digital Clock"},
+
+	/* DL Path */
+	{"DAC In Mux", "Normal Path", "AIF_RX"},
+
+	{"DAC In Mux", "Sgen", "SGEN DL"},
+	{"SGEN DL", NULL, "SGEN DL SRC"},
+	{"SGEN DL", NULL, "SGEN MUTE"},
+	{"SGEN DL", NULL, "SGEN DL Enable"},
+	{"SGEN DL", NULL, "DL Digital Clock"},
+	{"SGEN DL", NULL, "AUDIO_TOP_PDN_AFE_TESTMODEL"},
+
+	{"DACL", NULL, "DAC In Mux"},
+	{"DACL", NULL, "DL Power Supply"},
+
+	{"DACR", NULL, "DAC In Mux"},
+	{"DACR", NULL, "DL Power Supply"},
+
+	/* Lineout Path */
+	{"LOL Mux", "Playback", "DACL"},
+
+	{"LOL Buffer", NULL, "LOL Mux"},
+	{"LOL Buffer", NULL, "LO Stability Enh"},
+
+	{"LINEOUT L", NULL, "LOL Buffer"},
+
+	/* Headphone Path */
+	{"HPL Mux", "Audio Playback", "DACL"},
+	{"HPR Mux", "Audio Playback", "DACR"},
+	{"HPL Mux", "HP Impedance", "DACL"},
+	{"HPR Mux", "HP Impedance", "DACR"},
+	{"HPL Mux", "LoudSPK Playback", "DACL"},
+	{"HPR Mux", "LoudSPK Playback", "DACR"},
+
+	{"Headphone L", NULL, "HPL Mux"},
+	{"Headphone R", NULL, "HPR Mux"},
+	{"Headphone L Ext Spk Amp", NULL, "HPL Mux"},
+	{"Headphone R Ext Spk Amp", NULL, "HPR Mux"},
+	{"LINEOUT L HSSPK", NULL, "HPL Mux"},
+
+	/* Receiver Path */
+	{"RCV Mux", "Voice Playback", "DACL"},
+	{"Receiver", NULL, "RCV Mux"},
+};
+
+static int mt6358_codec_dai_hw_params(struct snd_pcm_substream *substream,
+				      struct snd_pcm_hw_params *params,
+				      struct snd_soc_dai *dai)
+{
+	struct snd_soc_component *cmpnt = dai->component;
+	struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+	unsigned int rate = params_rate(params);
+
+	dev_info(priv->dev, "%s(), substream->stream %d, rate %d, number %d\n",
+		 __func__,
+		 substream->stream,
+		 rate,
+		 substream->number);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		priv->dl_rate = rate;
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		priv->ul_rate = rate;
+
+	return 0;
+}
+
+static const struct snd_soc_dai_ops mt6358_codec_dai_ops = {
+	.hw_params = mt6358_codec_dai_hw_params,
+};
+
+#define MT6358_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE |\
+			SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U16_BE |\
+			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE |\
+			SNDRV_PCM_FMTBIT_U24_LE | SNDRV_PCM_FMTBIT_U24_BE |\
+			SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE |\
+			SNDRV_PCM_FMTBIT_U32_LE | SNDRV_PCM_FMTBIT_U32_BE)
+
+static struct snd_soc_dai_driver mt6358_dai_driver[] = {
+	{
+		.name = "mt6358-snd-codec-aif1",
+		.playback = {
+			.stream_name = "AIF1 Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000 |
+				 SNDRV_PCM_RATE_96000 |
+				 SNDRV_PCM_RATE_192000,
+			.formats = MT6358_FORMATS,
+		},
+		.capture = {
+			.stream_name = "AIF1 Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000 |
+				 SNDRV_PCM_RATE_16000 |
+				 SNDRV_PCM_RATE_32000 |
+				 SNDRV_PCM_RATE_48000,
+			.formats = MT6358_FORMATS,
+		},
+		.ops = &mt6358_codec_dai_ops,
+	},
+};
+
+static int mt6358_codec_init_reg(struct mt6358_priv *priv)
+{
+	int ret = 0;
+
+	/* Disable HeadphoneL/HeadphoneR short circuit protection */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+			   RG_AUDHPLSCDISABLE_VAUDP15_MASK_SFT,
+			   0x1 << RG_AUDHPLSCDISABLE_VAUDP15_SFT);
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+			   RG_AUDHPRSCDISABLE_VAUDP15_MASK_SFT,
+			   0x1 << RG_AUDHPRSCDISABLE_VAUDP15_SFT);
+	/* Disable voice short circuit protection */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON6,
+			   RG_AUDHSSCDISABLE_VAUDP15_MASK_SFT,
+			   0x1 << RG_AUDHSSCDISABLE_VAUDP15_SFT);
+	/* disable LO buffer left short circuit protection */
+	regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON7,
+			   RG_AUDLOLSCDISABLE_VAUDP15_MASK_SFT,
+			   0x1 << RG_AUDLOLSCDISABLE_VAUDP15_SFT);
+
+	/* accdet s/w enable */
+	regmap_update_bits(priv->regmap, MT6358_ACCDET_CON13,
+			   0xFFFF, 0x700E);
+
+	/* gpio miso driving set to 4mA */
+	regmap_write(priv->regmap, MT6358_DRV_CON3, 0x8888);
+
+	/* set gpio */
+	playback_gpio_reset(priv);
+	capture_gpio_reset(priv);
+
+	return ret;
+}
+
+static int mt6358_codec_probe(struct snd_soc_component *cmpnt)
+{
+	struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+	int ret;
+
+	snd_soc_component_init_regmap(cmpnt, priv->regmap);
+
+	mt6358_codec_init_reg(priv);
+
+	priv->avdd_reg = devm_regulator_get(priv->dev, "Avdd");
+	if (IS_ERR(priv->avdd_reg)) {
+		dev_err(priv->dev, "%s() have no Avdd supply", __func__);
+		return PTR_ERR(priv->avdd_reg);
+	}
+
+	ret = regulator_enable(priv->avdd_reg);
+	if (ret)
+		return  ret;
+
+	return 0;
+}
+
+static const struct snd_soc_component_driver mt6358_soc_component_driver = {
+	.probe = mt6358_codec_probe,
+	.controls = mt6358_snd_controls,
+	.num_controls = ARRAY_SIZE(mt6358_snd_controls),
+	.dapm_widgets = mt6358_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(mt6358_dapm_widgets),
+	.dapm_routes = mt6358_dapm_routes,
+	.num_dapm_routes = ARRAY_SIZE(mt6358_dapm_routes),
+};
+
+static int mt6358_platform_driver_probe(struct platform_device *pdev)
+{
+	struct mt6358_priv *priv;
+	struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent);
+
+	priv = devm_kzalloc(&pdev->dev,
+			    sizeof(struct mt6358_priv),
+			    GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	dev_set_drvdata(&pdev->dev, priv);
+
+	priv->dev = &pdev->dev;
+
+	priv->regmap = mt6397->regmap;
+	if (IS_ERR(priv->regmap))
+		return PTR_ERR(priv->regmap);
+
+	dev_info(priv->dev, "%s(), dev name %s\n",
+		 __func__, dev_name(&pdev->dev));
+
+	return devm_snd_soc_register_component(&pdev->dev,
+				      &mt6358_soc_component_driver,
+				      mt6358_dai_driver,
+				      ARRAY_SIZE(mt6358_dai_driver));
+}
+
+static const struct of_device_id mt6358_of_match[] = {
+	{.compatible = "mediatek,mt6358-sound",},
+	{}
+};
+MODULE_DEVICE_TABLE(of, mt6358_of_match);
+
+static struct platform_driver mt6358_platform_driver = {
+	.driver = {
+		.name = "mt6358-sound",
+		.of_match_table = mt6358_of_match,
+	},
+	.probe = mt6358_platform_driver_probe,
+};
+
+module_platform_driver(mt6358_platform_driver)
+
+/* Module information */
+MODULE_DESCRIPTION("MT6358 ALSA SoC codec driver");
+MODULE_AUTHOR("KaiChieh Chuang <kaichieh.chuang@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/mt6358.h b/sound/soc/codecs/mt6358.h
new file mode 100644
index 0000000..a595331
--- /dev/null
+++ b/sound/soc/codecs/mt6358.h
@@ -0,0 +1,2314 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * mt6358.h  --  mt6358 ALSA SoC audio codec driver
+ *
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+ */
+
+#ifndef __MT6358_H__
+#define __MT6358_H__
+
+/* Reg bit define */
+/* MT6358_DCXO_CW14 */
+#define RG_XO_AUDIO_EN_M_SFT 13
+
+/* MT6358_DCXO_CW13 */
+#define RG_XO_VOW_EN_SFT 8
+
+/* MT6358_AUD_TOP_CKPDN_CON0 */
+#define RG_VOW13M_CK_PDN_SFT                              13
+#define RG_VOW13M_CK_PDN_MASK                             0x1
+#define RG_VOW13M_CK_PDN_MASK_SFT                         (0x1 << 13)
+#define RG_VOW32K_CK_PDN_SFT                              12
+#define RG_VOW32K_CK_PDN_MASK                             0x1
+#define RG_VOW32K_CK_PDN_MASK_SFT                         (0x1 << 12)
+#define RG_AUD_INTRP_CK_PDN_SFT                           8
+#define RG_AUD_INTRP_CK_PDN_MASK                          0x1
+#define RG_AUD_INTRP_CK_PDN_MASK_SFT                      (0x1 << 8)
+#define RG_PAD_AUD_CLK_MISO_CK_PDN_SFT                    7
+#define RG_PAD_AUD_CLK_MISO_CK_PDN_MASK                   0x1
+#define RG_PAD_AUD_CLK_MISO_CK_PDN_MASK_SFT               (0x1 << 7)
+#define RG_AUDNCP_CK_PDN_SFT                              6
+#define RG_AUDNCP_CK_PDN_MASK                             0x1
+#define RG_AUDNCP_CK_PDN_MASK_SFT                         (0x1 << 6)
+#define RG_ZCD13M_CK_PDN_SFT                              5
+#define RG_ZCD13M_CK_PDN_MASK                             0x1
+#define RG_ZCD13M_CK_PDN_MASK_SFT                         (0x1 << 5)
+#define RG_AUDIF_CK_PDN_SFT                               2
+#define RG_AUDIF_CK_PDN_MASK                              0x1
+#define RG_AUDIF_CK_PDN_MASK_SFT                          (0x1 << 2)
+#define RG_AUD_CK_PDN_SFT                                 1
+#define RG_AUD_CK_PDN_MASK                                0x1
+#define RG_AUD_CK_PDN_MASK_SFT                            (0x1 << 1)
+#define RG_ACCDET_CK_PDN_SFT                              0
+#define RG_ACCDET_CK_PDN_MASK                             0x1
+#define RG_ACCDET_CK_PDN_MASK_SFT                         (0x1 << 0)
+
+/* MT6358_AUD_TOP_CKPDN_CON0_SET */
+#define RG_AUD_TOP_CKPDN_CON0_SET_SFT                     0
+#define RG_AUD_TOP_CKPDN_CON0_SET_MASK                    0x3fff
+#define RG_AUD_TOP_CKPDN_CON0_SET_MASK_SFT                (0x3fff << 0)
+
+/* MT6358_AUD_TOP_CKPDN_CON0_CLR */
+#define RG_AUD_TOP_CKPDN_CON0_CLR_SFT                     0
+#define RG_AUD_TOP_CKPDN_CON0_CLR_MASK                    0x3fff
+#define RG_AUD_TOP_CKPDN_CON0_CLR_MASK_SFT                (0x3fff << 0)
+
+/* MT6358_AUD_TOP_CKSEL_CON0 */
+#define RG_AUDIF_CK_CKSEL_SFT                             3
+#define RG_AUDIF_CK_CKSEL_MASK                            0x1
+#define RG_AUDIF_CK_CKSEL_MASK_SFT                        (0x1 << 3)
+#define RG_AUD_CK_CKSEL_SFT                               2
+#define RG_AUD_CK_CKSEL_MASK                              0x1
+#define RG_AUD_CK_CKSEL_MASK_SFT                          (0x1 << 2)
+
+/* MT6358_AUD_TOP_CKSEL_CON0_SET */
+#define RG_AUD_TOP_CKSEL_CON0_SET_SFT                     0
+#define RG_AUD_TOP_CKSEL_CON0_SET_MASK                    0xf
+#define RG_AUD_TOP_CKSEL_CON0_SET_MASK_SFT                (0xf << 0)
+
+/* MT6358_AUD_TOP_CKSEL_CON0_CLR */
+#define RG_AUD_TOP_CKSEL_CON0_CLR_SFT                     0
+#define RG_AUD_TOP_CKSEL_CON0_CLR_MASK                    0xf
+#define RG_AUD_TOP_CKSEL_CON0_CLR_MASK_SFT                (0xf << 0)
+
+/* MT6358_AUD_TOP_CKTST_CON0 */
+#define RG_VOW13M_CK_TSTSEL_SFT                           9
+#define RG_VOW13M_CK_TSTSEL_MASK                          0x1
+#define RG_VOW13M_CK_TSTSEL_MASK_SFT                      (0x1 << 9)
+#define RG_VOW13M_CK_TST_DIS_SFT                          8
+#define RG_VOW13M_CK_TST_DIS_MASK                         0x1
+#define RG_VOW13M_CK_TST_DIS_MASK_SFT                     (0x1 << 8)
+#define RG_AUD26M_CK_TSTSEL_SFT                           4
+#define RG_AUD26M_CK_TSTSEL_MASK                          0x1
+#define RG_AUD26M_CK_TSTSEL_MASK_SFT                      (0x1 << 4)
+#define RG_AUDIF_CK_TSTSEL_SFT                            3
+#define RG_AUDIF_CK_TSTSEL_MASK                           0x1
+#define RG_AUDIF_CK_TSTSEL_MASK_SFT                       (0x1 << 3)
+#define RG_AUD_CK_TSTSEL_SFT                              2
+#define RG_AUD_CK_TSTSEL_MASK                             0x1
+#define RG_AUD_CK_TSTSEL_MASK_SFT                         (0x1 << 2)
+#define RG_AUD26M_CK_TST_DIS_SFT                          0
+#define RG_AUD26M_CK_TST_DIS_MASK                         0x1
+#define RG_AUD26M_CK_TST_DIS_MASK_SFT                     (0x1 << 0)
+
+/* MT6358_AUD_TOP_CLK_HWEN_CON0 */
+#define RG_AUD_INTRP_CK_PDN_HWEN_SFT                      0
+#define RG_AUD_INTRP_CK_PDN_HWEN_MASK                     0x1
+#define RG_AUD_INTRP_CK_PDN_HWEN_MASK_SFT                 (0x1 << 0)
+
+/* MT6358_AUD_TOP_CLK_HWEN_CON0_SET */
+#define RG_AUD_INTRP_CK_PND_HWEN_CON0_SET_SFT             0
+#define RG_AUD_INTRP_CK_PND_HWEN_CON0_SET_MASK            0xffff
+#define RG_AUD_INTRP_CK_PND_HWEN_CON0_SET_MASK_SFT        (0xffff << 0)
+
+/* MT6358_AUD_TOP_CLK_HWEN_CON0_CLR */
+#define RG_AUD_INTRP_CLK_PDN_HWEN_CON0_CLR_SFT            0
+#define RG_AUD_INTRP_CLK_PDN_HWEN_CON0_CLR_MASK           0xffff
+#define RG_AUD_INTRP_CLK_PDN_HWEN_CON0_CLR_MASK_SFT       (0xffff << 0)
+
+/* MT6358_AUD_TOP_RST_CON0 */
+#define RG_AUDNCP_RST_SFT                                 3
+#define RG_AUDNCP_RST_MASK                                0x1
+#define RG_AUDNCP_RST_MASK_SFT                            (0x1 << 3)
+#define RG_ZCD_RST_SFT                                    2
+#define RG_ZCD_RST_MASK                                   0x1
+#define RG_ZCD_RST_MASK_SFT                               (0x1 << 2)
+#define RG_ACCDET_RST_SFT                                 1
+#define RG_ACCDET_RST_MASK                                0x1
+#define RG_ACCDET_RST_MASK_SFT                            (0x1 << 1)
+#define RG_AUDIO_RST_SFT                                  0
+#define RG_AUDIO_RST_MASK                                 0x1
+#define RG_AUDIO_RST_MASK_SFT                             (0x1 << 0)
+
+/* MT6358_AUD_TOP_RST_CON0_SET */
+#define RG_AUD_TOP_RST_CON0_SET_SFT                       0
+#define RG_AUD_TOP_RST_CON0_SET_MASK                      0xf
+#define RG_AUD_TOP_RST_CON0_SET_MASK_SFT                  (0xf << 0)
+
+/* MT6358_AUD_TOP_RST_CON0_CLR */
+#define RG_AUD_TOP_RST_CON0_CLR_SFT                       0
+#define RG_AUD_TOP_RST_CON0_CLR_MASK                      0xf
+#define RG_AUD_TOP_RST_CON0_CLR_MASK_SFT                  (0xf << 0)
+
+/* MT6358_AUD_TOP_RST_BANK_CON0 */
+#define BANK_AUDZCD_SWRST_SFT                             2
+#define BANK_AUDZCD_SWRST_MASK                            0x1
+#define BANK_AUDZCD_SWRST_MASK_SFT                        (0x1 << 2)
+#define BANK_AUDIO_SWRST_SFT                              1
+#define BANK_AUDIO_SWRST_MASK                             0x1
+#define BANK_AUDIO_SWRST_MASK_SFT                         (0x1 << 1)
+#define BANK_ACCDET_SWRST_SFT                             0
+#define BANK_ACCDET_SWRST_MASK                            0x1
+#define BANK_ACCDET_SWRST_MASK_SFT                        (0x1 << 0)
+
+/* MT6358_AUD_TOP_INT_CON0 */
+#define RG_INT_EN_AUDIO_SFT                               0
+#define RG_INT_EN_AUDIO_MASK                              0x1
+#define RG_INT_EN_AUDIO_MASK_SFT                          (0x1 << 0)
+#define RG_INT_EN_ACCDET_SFT                              5
+#define RG_INT_EN_ACCDET_MASK                             0x1
+#define RG_INT_EN_ACCDET_MASK_SFT                         (0x1 << 5)
+#define RG_INT_EN_ACCDET_EINT0_SFT                        6
+#define RG_INT_EN_ACCDET_EINT0_MASK                       0x1
+#define RG_INT_EN_ACCDET_EINT0_MASK_SFT                   (0x1 << 6)
+#define RG_INT_EN_ACCDET_EINT1_SFT                        7
+#define RG_INT_EN_ACCDET_EINT1_MASK                       0x1
+#define RG_INT_EN_ACCDET_EINT1_MASK_SFT                   (0x1 << 7)
+
+/* MT6358_AUD_TOP_INT_CON0_SET */
+#define RG_AUD_INT_CON0_SET_SFT                           0
+#define RG_AUD_INT_CON0_SET_MASK                          0xffff
+#define RG_AUD_INT_CON0_SET_MASK_SFT                      (0xffff << 0)
+
+/* MT6358_AUD_TOP_INT_CON0_CLR */
+#define RG_AUD_INT_CON0_CLR_SFT                           0
+#define RG_AUD_INT_CON0_CLR_MASK                          0xffff
+#define RG_AUD_INT_CON0_CLR_MASK_SFT                      (0xffff << 0)
+
+/* MT6358_AUD_TOP_INT_MASK_CON0 */
+#define RG_INT_MASK_AUDIO_SFT                             0
+#define RG_INT_MASK_AUDIO_MASK                            0x1
+#define RG_INT_MASK_AUDIO_MASK_SFT                        (0x1 << 0)
+#define RG_INT_MASK_ACCDET_SFT                            5
+#define RG_INT_MASK_ACCDET_MASK                           0x1
+#define RG_INT_MASK_ACCDET_MASK_SFT                       (0x1 << 5)
+#define RG_INT_MASK_ACCDET_EINT0_SFT                      6
+#define RG_INT_MASK_ACCDET_EINT0_MASK                     0x1
+#define RG_INT_MASK_ACCDET_EINT0_MASK_SFT                 (0x1 << 6)
+#define RG_INT_MASK_ACCDET_EINT1_SFT                      7
+#define RG_INT_MASK_ACCDET_EINT1_MASK                     0x1
+#define RG_INT_MASK_ACCDET_EINT1_MASK_SFT                 (0x1 << 7)
+
+/* MT6358_AUD_TOP_INT_MASK_CON0_SET */
+#define RG_AUD_INT_MASK_CON0_SET_SFT                      0
+#define RG_AUD_INT_MASK_CON0_SET_MASK                     0xff
+#define RG_AUD_INT_MASK_CON0_SET_MASK_SFT                 (0xff << 0)
+
+/* MT6358_AUD_TOP_INT_MASK_CON0_CLR */
+#define RG_AUD_INT_MASK_CON0_CLR_SFT                      0
+#define RG_AUD_INT_MASK_CON0_CLR_MASK                     0xff
+#define RG_AUD_INT_MASK_CON0_CLR_MASK_SFT                 (0xff << 0)
+
+/* MT6358_AUD_TOP_INT_STATUS0 */
+#define RG_INT_STATUS_AUDIO_SFT                           0
+#define RG_INT_STATUS_AUDIO_MASK                          0x1
+#define RG_INT_STATUS_AUDIO_MASK_SFT                      (0x1 << 0)
+#define RG_INT_STATUS_ACCDET_SFT                          5
+#define RG_INT_STATUS_ACCDET_MASK                         0x1
+#define RG_INT_STATUS_ACCDET_MASK_SFT                     (0x1 << 5)
+#define RG_INT_STATUS_ACCDET_EINT0_SFT                    6
+#define RG_INT_STATUS_ACCDET_EINT0_MASK                   0x1
+#define RG_INT_STATUS_ACCDET_EINT0_MASK_SFT               (0x1 << 6)
+#define RG_INT_STATUS_ACCDET_EINT1_SFT                    7
+#define RG_INT_STATUS_ACCDET_EINT1_MASK                   0x1
+#define RG_INT_STATUS_ACCDET_EINT1_MASK_SFT               (0x1 << 7)
+
+/* MT6358_AUD_TOP_INT_RAW_STATUS0 */
+#define RG_INT_RAW_STATUS_AUDIO_SFT                       0
+#define RG_INT_RAW_STATUS_AUDIO_MASK                      0x1
+#define RG_INT_RAW_STATUS_AUDIO_MASK_SFT                  (0x1 << 0)
+#define RG_INT_RAW_STATUS_ACCDET_SFT                      5
+#define RG_INT_RAW_STATUS_ACCDET_MASK                     0x1
+#define RG_INT_RAW_STATUS_ACCDET_MASK_SFT                 (0x1 << 5)
+#define RG_INT_RAW_STATUS_ACCDET_EINT0_SFT                6
+#define RG_INT_RAW_STATUS_ACCDET_EINT0_MASK               0x1
+#define RG_INT_RAW_STATUS_ACCDET_EINT0_MASK_SFT           (0x1 << 6)
+#define RG_INT_RAW_STATUS_ACCDET_EINT1_SFT                7
+#define RG_INT_RAW_STATUS_ACCDET_EINT1_MASK               0x1
+#define RG_INT_RAW_STATUS_ACCDET_EINT1_MASK_SFT           (0x1 << 7)
+
+/* MT6358_AUD_TOP_INT_MISC_CON0 */
+#define RG_AUD_TOP_INT_POLARITY_SFT                       0
+#define RG_AUD_TOP_INT_POLARITY_MASK                      0x1
+#define RG_AUD_TOP_INT_POLARITY_MASK_SFT                  (0x1 << 0)
+
+/* MT6358_AUDNCP_CLKDIV_CON0 */
+#define RG_DIVCKS_CHG_SFT                                 0
+#define RG_DIVCKS_CHG_MASK                                0x1
+#define RG_DIVCKS_CHG_MASK_SFT                            (0x1 << 0)
+
+/* MT6358_AUDNCP_CLKDIV_CON1 */
+#define RG_DIVCKS_ON_SFT                                  0
+#define RG_DIVCKS_ON_MASK                                 0x1
+#define RG_DIVCKS_ON_MASK_SFT                             (0x1 << 0)
+
+/* MT6358_AUDNCP_CLKDIV_CON2 */
+#define RG_DIVCKS_PRG_SFT                                 0
+#define RG_DIVCKS_PRG_MASK                                0x1ff
+#define RG_DIVCKS_PRG_MASK_SFT                            (0x1ff << 0)
+
+/* MT6358_AUDNCP_CLKDIV_CON3 */
+#define RG_DIVCKS_PWD_NCP_SFT                             0
+#define RG_DIVCKS_PWD_NCP_MASK                            0x1
+#define RG_DIVCKS_PWD_NCP_MASK_SFT                        (0x1 << 0)
+
+/* MT6358_AUDNCP_CLKDIV_CON4 */
+#define RG_DIVCKS_PWD_NCP_ST_SEL_SFT                      0
+#define RG_DIVCKS_PWD_NCP_ST_SEL_MASK                     0x3
+#define RG_DIVCKS_PWD_NCP_ST_SEL_MASK_SFT                 (0x3 << 0)
+
+/* MT6358_AUD_TOP_MON_CON0 */
+#define RG_AUD_TOP_MON_SEL_SFT                            0
+#define RG_AUD_TOP_MON_SEL_MASK                           0x7
+#define RG_AUD_TOP_MON_SEL_MASK_SFT                       (0x7 << 0)
+#define RG_AUD_CLK_INT_MON_FLAG_SEL_SFT                   3
+#define RG_AUD_CLK_INT_MON_FLAG_SEL_MASK                  0xff
+#define RG_AUD_CLK_INT_MON_FLAG_SEL_MASK_SFT              (0xff << 3)
+#define RG_AUD_CLK_INT_MON_FLAG_EN_SFT                    11
+#define RG_AUD_CLK_INT_MON_FLAG_EN_MASK                   0x1
+#define RG_AUD_CLK_INT_MON_FLAG_EN_MASK_SFT               (0x1 << 11)
+
+/* MT6358_AUDIO_DIG_DSN_ID */
+#define AUDIO_DIG_ANA_ID_SFT                              0
+#define AUDIO_DIG_ANA_ID_MASK                             0xff
+#define AUDIO_DIG_ANA_ID_MASK_SFT                         (0xff << 0)
+#define AUDIO_DIG_DIG_ID_SFT                              8
+#define AUDIO_DIG_DIG_ID_MASK                             0xff
+#define AUDIO_DIG_DIG_ID_MASK_SFT                         (0xff << 8)
+
+/* MT6358_AUDIO_DIG_DSN_REV0 */
+#define AUDIO_DIG_ANA_MINOR_REV_SFT                       0
+#define AUDIO_DIG_ANA_MINOR_REV_MASK                      0xf
+#define AUDIO_DIG_ANA_MINOR_REV_MASK_SFT                  (0xf << 0)
+#define AUDIO_DIG_ANA_MAJOR_REV_SFT                       4
+#define AUDIO_DIG_ANA_MAJOR_REV_MASK                      0xf
+#define AUDIO_DIG_ANA_MAJOR_REV_MASK_SFT                  (0xf << 4)
+#define AUDIO_DIG_DIG_MINOR_REV_SFT                       8
+#define AUDIO_DIG_DIG_MINOR_REV_MASK                      0xf
+#define AUDIO_DIG_DIG_MINOR_REV_MASK_SFT                  (0xf << 8)
+#define AUDIO_DIG_DIG_MAJOR_REV_SFT                       12
+#define AUDIO_DIG_DIG_MAJOR_REV_MASK                      0xf
+#define AUDIO_DIG_DIG_MAJOR_REV_MASK_SFT                  (0xf << 12)
+
+/* MT6358_AUDIO_DIG_DSN_DBI */
+#define AUDIO_DIG_DSN_CBS_SFT                             0
+#define AUDIO_DIG_DSN_CBS_MASK                            0x3
+#define AUDIO_DIG_DSN_CBS_MASK_SFT                        (0x3 << 0)
+#define AUDIO_DIG_DSN_BIX_SFT                             2
+#define AUDIO_DIG_DSN_BIX_MASK                            0x3
+#define AUDIO_DIG_DSN_BIX_MASK_SFT                        (0x3 << 2)
+#define AUDIO_DIG_ESP_SFT                                 8
+#define AUDIO_DIG_ESP_MASK                                0xff
+#define AUDIO_DIG_ESP_MASK_SFT                            (0xff << 8)
+
+/* MT6358_AUDIO_DIG_DSN_DXI */
+#define AUDIO_DIG_DSN_FPI_SFT                             0
+#define AUDIO_DIG_DSN_FPI_MASK                            0xff
+#define AUDIO_DIG_DSN_FPI_MASK_SFT                        (0xff << 0)
+
+/* MT6358_AFE_UL_DL_CON0 */
+#define AFE_UL_LR_SWAP_SFT                                15
+#define AFE_UL_LR_SWAP_MASK                               0x1
+#define AFE_UL_LR_SWAP_MASK_SFT                           (0x1 << 15)
+#define AFE_DL_LR_SWAP_SFT                                14
+#define AFE_DL_LR_SWAP_MASK                               0x1
+#define AFE_DL_LR_SWAP_MASK_SFT                           (0x1 << 14)
+#define AFE_ON_SFT                                        0
+#define AFE_ON_MASK                                       0x1
+#define AFE_ON_MASK_SFT                                   (0x1 << 0)
+
+/* MT6358_AFE_DL_SRC2_CON0_L */
+#define DL_2_SRC_ON_TMP_CTL_PRE_SFT                       0
+#define DL_2_SRC_ON_TMP_CTL_PRE_MASK                      0x1
+#define DL_2_SRC_ON_TMP_CTL_PRE_MASK_SFT                  (0x1 << 0)
+
+/* MT6358_AFE_UL_SRC_CON0_H */
+#define C_DIGMIC_PHASE_SEL_CH1_CTL_SFT                    11
+#define C_DIGMIC_PHASE_SEL_CH1_CTL_MASK                   0x7
+#define C_DIGMIC_PHASE_SEL_CH1_CTL_MASK_SFT               (0x7 << 11)
+#define C_DIGMIC_PHASE_SEL_CH2_CTL_SFT                    8
+#define C_DIGMIC_PHASE_SEL_CH2_CTL_MASK                   0x7
+#define C_DIGMIC_PHASE_SEL_CH2_CTL_MASK_SFT               (0x7 << 8)
+#define C_TWO_DIGITAL_MIC_CTL_SFT                         7
+#define C_TWO_DIGITAL_MIC_CTL_MASK                        0x1
+#define C_TWO_DIGITAL_MIC_CTL_MASK_SFT                    (0x1 << 7)
+
+/* MT6358_AFE_UL_SRC_CON0_L */
+#define DMIC_LOW_POWER_MODE_CTL_SFT                       14
+#define DMIC_LOW_POWER_MODE_CTL_MASK                      0x3
+#define DMIC_LOW_POWER_MODE_CTL_MASK_SFT                  (0x3 << 14)
+#define DIGMIC_3P25M_1P625M_SEL_CTL_SFT                   5
+#define DIGMIC_3P25M_1P625M_SEL_CTL_MASK                  0x1
+#define DIGMIC_3P25M_1P625M_SEL_CTL_MASK_SFT              (0x1 << 5)
+#define UL_LOOP_BACK_MODE_CTL_SFT                         2
+#define UL_LOOP_BACK_MODE_CTL_MASK                        0x1
+#define UL_LOOP_BACK_MODE_CTL_MASK_SFT                    (0x1 << 2)
+#define UL_SDM_3_LEVEL_CTL_SFT                            1
+#define UL_SDM_3_LEVEL_CTL_MASK                           0x1
+#define UL_SDM_3_LEVEL_CTL_MASK_SFT                       (0x1 << 1)
+#define UL_SRC_ON_TMP_CTL_SFT                             0
+#define UL_SRC_ON_TMP_CTL_MASK                            0x1
+#define UL_SRC_ON_TMP_CTL_MASK_SFT                        (0x1 << 0)
+
+/* MT6358_AFE_TOP_CON0 */
+#define MTKAIF_SINE_ON_SFT                                2
+#define MTKAIF_SINE_ON_MASK                               0x1
+#define MTKAIF_SINE_ON_MASK_SFT                           (0x1 << 2)
+#define UL_SINE_ON_SFT                                    1
+#define UL_SINE_ON_MASK                                   0x1
+#define UL_SINE_ON_MASK_SFT                               (0x1 << 1)
+#define DL_SINE_ON_SFT                                    0
+#define DL_SINE_ON_MASK                                   0x1
+#define DL_SINE_ON_MASK_SFT                               (0x1 << 0)
+
+/* MT6358_AUDIO_TOP_CON0 */
+#define PDN_AFE_CTL_SFT                                   7
+#define PDN_AFE_CTL_MASK                                  0x1
+#define PDN_AFE_CTL_MASK_SFT                              (0x1 << 7)
+#define PDN_DAC_CTL_SFT                                   6
+#define PDN_DAC_CTL_MASK                                  0x1
+#define PDN_DAC_CTL_MASK_SFT                              (0x1 << 6)
+#define PDN_ADC_CTL_SFT                                   5
+#define PDN_ADC_CTL_MASK                                  0x1
+#define PDN_ADC_CTL_MASK_SFT                              (0x1 << 5)
+#define PDN_I2S_DL_CTL_SFT                                3
+#define PDN_I2S_DL_CTL_MASK                               0x1
+#define PDN_I2S_DL_CTL_MASK_SFT                           (0x1 << 3)
+#define PWR_CLK_DIS_CTL_SFT                               2
+#define PWR_CLK_DIS_CTL_MASK                              0x1
+#define PWR_CLK_DIS_CTL_MASK_SFT                          (0x1 << 2)
+#define PDN_AFE_TESTMODEL_CTL_SFT                         1
+#define PDN_AFE_TESTMODEL_CTL_MASK                        0x1
+#define PDN_AFE_TESTMODEL_CTL_MASK_SFT                    (0x1 << 1)
+#define PDN_RESERVED_SFT                                  0
+#define PDN_RESERVED_MASK                                 0x1
+#define PDN_RESERVED_MASK_SFT                             (0x1 << 0)
+
+/* MT6358_AFE_MON_DEBUG0 */
+#define AUDIO_SYS_TOP_MON_SWAP_SFT                        14
+#define AUDIO_SYS_TOP_MON_SWAP_MASK                       0x3
+#define AUDIO_SYS_TOP_MON_SWAP_MASK_SFT                   (0x3 << 14)
+#define AUDIO_SYS_TOP_MON_SEL_SFT                         8
+#define AUDIO_SYS_TOP_MON_SEL_MASK                        0x1f
+#define AUDIO_SYS_TOP_MON_SEL_MASK_SFT                    (0x1f << 8)
+#define AFE_MON_SEL_SFT                                   0
+#define AFE_MON_SEL_MASK                                  0xff
+#define AFE_MON_SEL_MASK_SFT                              (0xff << 0)
+
+/* MT6358_AFUNC_AUD_CON0 */
+#define CCI_AUD_ANACK_SEL_SFT                             15
+#define CCI_AUD_ANACK_SEL_MASK                            0x1
+#define CCI_AUD_ANACK_SEL_MASK_SFT                        (0x1 << 15)
+#define CCI_AUDIO_FIFO_WPTR_SFT                           12
+#define CCI_AUDIO_FIFO_WPTR_MASK                          0x7
+#define CCI_AUDIO_FIFO_WPTR_MASK_SFT                      (0x7 << 12)
+#define CCI_SCRAMBLER_CG_EN_SFT                           11
+#define CCI_SCRAMBLER_CG_EN_MASK                          0x1
+#define CCI_SCRAMBLER_CG_EN_MASK_SFT                      (0x1 << 11)
+#define CCI_LCH_INV_SFT                                   10
+#define CCI_LCH_INV_MASK                                  0x1
+#define CCI_LCH_INV_MASK_SFT                              (0x1 << 10)
+#define CCI_RAND_EN_SFT                                   9
+#define CCI_RAND_EN_MASK                                  0x1
+#define CCI_RAND_EN_MASK_SFT                              (0x1 << 9)
+#define CCI_SPLT_SCRMB_CLK_ON_SFT                         8
+#define CCI_SPLT_SCRMB_CLK_ON_MASK                        0x1
+#define CCI_SPLT_SCRMB_CLK_ON_MASK_SFT                    (0x1 << 8)
+#define CCI_SPLT_SCRMB_ON_SFT                             7
+#define CCI_SPLT_SCRMB_ON_MASK                            0x1
+#define CCI_SPLT_SCRMB_ON_MASK_SFT                        (0x1 << 7)
+#define CCI_AUD_IDAC_TEST_EN_SFT                          6
+#define CCI_AUD_IDAC_TEST_EN_MASK                         0x1
+#define CCI_AUD_IDAC_TEST_EN_MASK_SFT                     (0x1 << 6)
+#define CCI_ZERO_PAD_DISABLE_SFT                          5
+#define CCI_ZERO_PAD_DISABLE_MASK                         0x1
+#define CCI_ZERO_PAD_DISABLE_MASK_SFT                     (0x1 << 5)
+#define CCI_AUD_SPLIT_TEST_EN_SFT                         4
+#define CCI_AUD_SPLIT_TEST_EN_MASK                        0x1
+#define CCI_AUD_SPLIT_TEST_EN_MASK_SFT                    (0x1 << 4)
+#define CCI_AUD_SDM_MUTEL_SFT                             3
+#define CCI_AUD_SDM_MUTEL_MASK                            0x1
+#define CCI_AUD_SDM_MUTEL_MASK_SFT                        (0x1 << 3)
+#define CCI_AUD_SDM_MUTER_SFT                             2
+#define CCI_AUD_SDM_MUTER_MASK                            0x1
+#define CCI_AUD_SDM_MUTER_MASK_SFT                        (0x1 << 2)
+#define CCI_AUD_SDM_7BIT_SEL_SFT                          1
+#define CCI_AUD_SDM_7BIT_SEL_MASK                         0x1
+#define CCI_AUD_SDM_7BIT_SEL_MASK_SFT                     (0x1 << 1)
+#define CCI_SCRAMBLER_EN_SFT                              0
+#define CCI_SCRAMBLER_EN_MASK                             0x1
+#define CCI_SCRAMBLER_EN_MASK_SFT                         (0x1 << 0)
+
+/* MT6358_AFUNC_AUD_CON1 */
+#define AUD_SDM_TEST_L_SFT                                8
+#define AUD_SDM_TEST_L_MASK                               0xff
+#define AUD_SDM_TEST_L_MASK_SFT                           (0xff << 8)
+#define AUD_SDM_TEST_R_SFT                                0
+#define AUD_SDM_TEST_R_MASK                               0xff
+#define AUD_SDM_TEST_R_MASK_SFT                           (0xff << 0)
+
+/* MT6358_AFUNC_AUD_CON2 */
+#define CCI_AUD_DAC_ANA_MUTE_SFT                          7
+#define CCI_AUD_DAC_ANA_MUTE_MASK                         0x1
+#define CCI_AUD_DAC_ANA_MUTE_MASK_SFT                     (0x1 << 7)
+#define CCI_AUD_DAC_ANA_RSTB_SEL_SFT                      6
+#define CCI_AUD_DAC_ANA_RSTB_SEL_MASK                     0x1
+#define CCI_AUD_DAC_ANA_RSTB_SEL_MASK_SFT                 (0x1 << 6)
+#define CCI_AUDIO_FIFO_CLKIN_INV_SFT                      4
+#define CCI_AUDIO_FIFO_CLKIN_INV_MASK                     0x1
+#define CCI_AUDIO_FIFO_CLKIN_INV_MASK_SFT                 (0x1 << 4)
+#define CCI_AUDIO_FIFO_ENABLE_SFT                         3
+#define CCI_AUDIO_FIFO_ENABLE_MASK                        0x1
+#define CCI_AUDIO_FIFO_ENABLE_MASK_SFT                    (0x1 << 3)
+#define CCI_ACD_MODE_SFT                                  2
+#define CCI_ACD_MODE_MASK                                 0x1
+#define CCI_ACD_MODE_MASK_SFT                             (0x1 << 2)
+#define CCI_AFIFO_CLK_PWDB_SFT                            1
+#define CCI_AFIFO_CLK_PWDB_MASK                           0x1
+#define CCI_AFIFO_CLK_PWDB_MASK_SFT                       (0x1 << 1)
+#define CCI_ACD_FUNC_RSTB_SFT                             0
+#define CCI_ACD_FUNC_RSTB_MASK                            0x1
+#define CCI_ACD_FUNC_RSTB_MASK_SFT                        (0x1 << 0)
+
+/* MT6358_AFUNC_AUD_CON3 */
+#define SDM_ANA13M_TESTCK_SEL_SFT                         15
+#define SDM_ANA13M_TESTCK_SEL_MASK                        0x1
+#define SDM_ANA13M_TESTCK_SEL_MASK_SFT                    (0x1 << 15)
+#define SDM_ANA13M_TESTCK_SRC_SEL_SFT                     12
+#define SDM_ANA13M_TESTCK_SRC_SEL_MASK                    0x7
+#define SDM_ANA13M_TESTCK_SRC_SEL_MASK_SFT                (0x7 << 12)
+#define SDM_TESTCK_SRC_SEL_SFT                            8
+#define SDM_TESTCK_SRC_SEL_MASK                           0x7
+#define SDM_TESTCK_SRC_SEL_MASK_SFT                       (0x7 << 8)
+#define DIGMIC_TESTCK_SRC_SEL_SFT                         4
+#define DIGMIC_TESTCK_SRC_SEL_MASK                        0x7
+#define DIGMIC_TESTCK_SRC_SEL_MASK_SFT                    (0x7 << 4)
+#define DIGMIC_TESTCK_SEL_SFT                             0
+#define DIGMIC_TESTCK_SEL_MASK                            0x1
+#define DIGMIC_TESTCK_SEL_MASK_SFT                        (0x1 << 0)
+
+/* MT6358_AFUNC_AUD_CON4 */
+#define UL_FIFO_WCLK_INV_SFT                              8
+#define UL_FIFO_WCLK_INV_MASK                             0x1
+#define UL_FIFO_WCLK_INV_MASK_SFT                         (0x1 << 8)
+#define UL_FIFO_DIGMIC_WDATA_TESTSRC_SEL_SFT              6
+#define UL_FIFO_DIGMIC_WDATA_TESTSRC_SEL_MASK             0x1
+#define UL_FIFO_DIGMIC_WDATA_TESTSRC_SEL_MASK_SFT         (0x1 << 6)
+#define UL_FIFO_WDATA_TESTEN_SFT                          5
+#define UL_FIFO_WDATA_TESTEN_MASK                         0x1
+#define UL_FIFO_WDATA_TESTEN_MASK_SFT                     (0x1 << 5)
+#define UL_FIFO_WDATA_TESTSRC_SEL_SFT                     4
+#define UL_FIFO_WDATA_TESTSRC_SEL_MASK                    0x1
+#define UL_FIFO_WDATA_TESTSRC_SEL_MASK_SFT                (0x1 << 4)
+#define UL_FIFO_WCLK_6P5M_TESTCK_SEL_SFT                  3
+#define UL_FIFO_WCLK_6P5M_TESTCK_SEL_MASK                 0x1
+#define UL_FIFO_WCLK_6P5M_TESTCK_SEL_MASK_SFT             (0x1 << 3)
+#define UL_FIFO_WCLK_6P5M_TESTCK_SRC_SEL_SFT              0
+#define UL_FIFO_WCLK_6P5M_TESTCK_SRC_SEL_MASK             0x7
+#define UL_FIFO_WCLK_6P5M_TESTCK_SRC_SEL_MASK_SFT         (0x7 << 0)
+
+/* MT6358_AFUNC_AUD_CON5 */
+#define R_AUD_DAC_POS_LARGE_MONO_SFT                      8
+#define R_AUD_DAC_POS_LARGE_MONO_MASK                     0xff
+#define R_AUD_DAC_POS_LARGE_MONO_MASK_SFT                 (0xff << 8)
+#define R_AUD_DAC_NEG_LARGE_MONO_SFT                      0
+#define R_AUD_DAC_NEG_LARGE_MONO_MASK                     0xff
+#define R_AUD_DAC_NEG_LARGE_MONO_MASK_SFT                 (0xff << 0)
+
+/* MT6358_AFUNC_AUD_CON6 */
+#define R_AUD_DAC_POS_SMALL_MONO_SFT                      12
+#define R_AUD_DAC_POS_SMALL_MONO_MASK                     0xf
+#define R_AUD_DAC_POS_SMALL_MONO_MASK_SFT                 (0xf << 12)
+#define R_AUD_DAC_NEG_SMALL_MONO_SFT                      8
+#define R_AUD_DAC_NEG_SMALL_MONO_MASK                     0xf
+#define R_AUD_DAC_NEG_SMALL_MONO_MASK_SFT                 (0xf << 8)
+#define R_AUD_DAC_POS_TINY_MONO_SFT                       6
+#define R_AUD_DAC_POS_TINY_MONO_MASK                      0x3
+#define R_AUD_DAC_POS_TINY_MONO_MASK_SFT                  (0x3 << 6)
+#define R_AUD_DAC_NEG_TINY_MONO_SFT                       4
+#define R_AUD_DAC_NEG_TINY_MONO_MASK                      0x3
+#define R_AUD_DAC_NEG_TINY_MONO_MASK_SFT                  (0x3 << 4)
+#define R_AUD_DAC_MONO_SEL_SFT                            3
+#define R_AUD_DAC_MONO_SEL_MASK                           0x1
+#define R_AUD_DAC_MONO_SEL_MASK_SFT                       (0x1 << 3)
+#define R_AUD_DAC_SW_RSTB_SFT                             0
+#define R_AUD_DAC_SW_RSTB_MASK                            0x1
+#define R_AUD_DAC_SW_RSTB_MASK_SFT                        (0x1 << 0)
+
+/* MT6358_AFUNC_AUD_MON0 */
+#define AUD_SCR_OUT_L_SFT                                 8
+#define AUD_SCR_OUT_L_MASK                                0xff
+#define AUD_SCR_OUT_L_MASK_SFT                            (0xff << 8)
+#define AUD_SCR_OUT_R_SFT                                 0
+#define AUD_SCR_OUT_R_MASK                                0xff
+#define AUD_SCR_OUT_R_MASK_SFT                            (0xff << 0)
+
+/* MT6358_AUDRC_TUNE_MON0 */
+#define ASYNC_TEST_OUT_BCK_SFT                            15
+#define ASYNC_TEST_OUT_BCK_MASK                           0x1
+#define ASYNC_TEST_OUT_BCK_MASK_SFT                       (0x1 << 15)
+#define RGS_AUDRCTUNE1READ_SFT                            8
+#define RGS_AUDRCTUNE1READ_MASK                           0x1f
+#define RGS_AUDRCTUNE1READ_MASK_SFT                       (0x1f << 8)
+#define RGS_AUDRCTUNE0READ_SFT                            0
+#define RGS_AUDRCTUNE0READ_MASK                           0x1f
+#define RGS_AUDRCTUNE0READ_MASK_SFT                       (0x1f << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_FIFO_CFG0 */
+#define AFE_RESERVED_SFT                                  1
+#define AFE_RESERVED_MASK                                 0x7fff
+#define AFE_RESERVED_MASK_SFT                             (0x7fff << 1)
+#define RG_MTKAIF_RXIF_FIFO_INTEN_SFT                     0
+#define RG_MTKAIF_RXIF_FIFO_INTEN_MASK                    0x1
+#define RG_MTKAIF_RXIF_FIFO_INTEN_MASK_SFT                (0x1 << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_FIFO_LOG_MON1 */
+#define MTKAIF_RXIF_WR_FULL_STATUS_SFT                    1
+#define MTKAIF_RXIF_WR_FULL_STATUS_MASK                   0x1
+#define MTKAIF_RXIF_WR_FULL_STATUS_MASK_SFT               (0x1 << 1)
+#define MTKAIF_RXIF_RD_EMPTY_STATUS_SFT                   0
+#define MTKAIF_RXIF_RD_EMPTY_STATUS_MASK                  0x1
+#define MTKAIF_RXIF_RD_EMPTY_STATUS_MASK_SFT              (0x1 << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_MON0 */
+#define MTKAIFTX_V3_SYNC_OUT_SFT                          14
+#define MTKAIFTX_V3_SYNC_OUT_MASK                         0x1
+#define MTKAIFTX_V3_SYNC_OUT_MASK_SFT                     (0x1 << 14)
+#define MTKAIFTX_V3_SDATA_OUT2_SFT                        13
+#define MTKAIFTX_V3_SDATA_OUT2_MASK                       0x1
+#define MTKAIFTX_V3_SDATA_OUT2_MASK_SFT                   (0x1 << 13)
+#define MTKAIFTX_V3_SDATA_OUT1_SFT                        12
+#define MTKAIFTX_V3_SDATA_OUT1_MASK                       0x1
+#define MTKAIFTX_V3_SDATA_OUT1_MASK_SFT                   (0x1 << 12)
+#define MTKAIF_RXIF_FIFO_STATUS_SFT                       0
+#define MTKAIF_RXIF_FIFO_STATUS_MASK                      0xfff
+#define MTKAIF_RXIF_FIFO_STATUS_MASK_SFT                  (0xfff << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_MON1 */
+#define MTKAIFRX_V3_SYNC_IN_SFT                           14
+#define MTKAIFRX_V3_SYNC_IN_MASK                          0x1
+#define MTKAIFRX_V3_SYNC_IN_MASK_SFT                      (0x1 << 14)
+#define MTKAIFRX_V3_SDATA_IN2_SFT                         13
+#define MTKAIFRX_V3_SDATA_IN2_MASK                        0x1
+#define MTKAIFRX_V3_SDATA_IN2_MASK_SFT                    (0x1 << 13)
+#define MTKAIFRX_V3_SDATA_IN1_SFT                         12
+#define MTKAIFRX_V3_SDATA_IN1_MASK                        0x1
+#define MTKAIFRX_V3_SDATA_IN1_MASK_SFT                    (0x1 << 12)
+#define MTKAIF_RXIF_SEARCH_FAIL_FLAG_SFT                  11
+#define MTKAIF_RXIF_SEARCH_FAIL_FLAG_MASK                 0x1
+#define MTKAIF_RXIF_SEARCH_FAIL_FLAG_MASK_SFT             (0x1 << 11)
+#define MTKAIF_RXIF_INVALID_FLAG_SFT                      8
+#define MTKAIF_RXIF_INVALID_FLAG_MASK                     0x1
+#define MTKAIF_RXIF_INVALID_FLAG_MASK_SFT                 (0x1 << 8)
+#define MTKAIF_RXIF_INVALID_CYCLE_SFT                     0
+#define MTKAIF_RXIF_INVALID_CYCLE_MASK                    0xff
+#define MTKAIF_RXIF_INVALID_CYCLE_MASK_SFT                (0xff << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_MON2 */
+#define MTKAIF_TXIF_IN_CH2_SFT                            8
+#define MTKAIF_TXIF_IN_CH2_MASK                           0xff
+#define MTKAIF_TXIF_IN_CH2_MASK_SFT                       (0xff << 8)
+#define MTKAIF_TXIF_IN_CH1_SFT                            0
+#define MTKAIF_TXIF_IN_CH1_MASK                           0xff
+#define MTKAIF_TXIF_IN_CH1_MASK_SFT                       (0xff << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_MON3 */
+#define MTKAIF_RXIF_OUT_CH2_SFT                           8
+#define MTKAIF_RXIF_OUT_CH2_MASK                          0xff
+#define MTKAIF_RXIF_OUT_CH2_MASK_SFT                      (0xff << 8)
+#define MTKAIF_RXIF_OUT_CH1_SFT                           0
+#define MTKAIF_RXIF_OUT_CH1_MASK                          0xff
+#define MTKAIF_RXIF_OUT_CH1_MASK_SFT                      (0xff << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_CFG0 */
+#define RG_MTKAIF_RXIF_CLKINV_SFT                         15
+#define RG_MTKAIF_RXIF_CLKINV_MASK                        0x1
+#define RG_MTKAIF_RXIF_CLKINV_MASK_SFT                    (0x1 << 15)
+#define RG_MTKAIF_RXIF_PROTOCOL2_SFT                      8
+#define RG_MTKAIF_RXIF_PROTOCOL2_MASK                     0x1
+#define RG_MTKAIF_RXIF_PROTOCOL2_MASK_SFT                 (0x1 << 8)
+#define RG_MTKAIF_BYPASS_SRC_MODE_SFT                     6
+#define RG_MTKAIF_BYPASS_SRC_MODE_MASK                    0x3
+#define RG_MTKAIF_BYPASS_SRC_MODE_MASK_SFT                (0x3 << 6)
+#define RG_MTKAIF_BYPASS_SRC_TEST_SFT                     5
+#define RG_MTKAIF_BYPASS_SRC_TEST_MASK                    0x1
+#define RG_MTKAIF_BYPASS_SRC_TEST_MASK_SFT                (0x1 << 5)
+#define RG_MTKAIF_TXIF_PROTOCOL2_SFT                      4
+#define RG_MTKAIF_TXIF_PROTOCOL2_MASK                     0x1
+#define RG_MTKAIF_TXIF_PROTOCOL2_MASK_SFT                 (0x1 << 4)
+#define RG_MTKAIF_PMIC_TXIF_8TO5_SFT                      2
+#define RG_MTKAIF_PMIC_TXIF_8TO5_MASK                     0x1
+#define RG_MTKAIF_PMIC_TXIF_8TO5_MASK_SFT                 (0x1 << 2)
+#define RG_MTKAIF_LOOPBACK_TEST2_SFT                      1
+#define RG_MTKAIF_LOOPBACK_TEST2_MASK                     0x1
+#define RG_MTKAIF_LOOPBACK_TEST2_MASK_SFT                 (0x1 << 1)
+#define RG_MTKAIF_LOOPBACK_TEST1_SFT                      0
+#define RG_MTKAIF_LOOPBACK_TEST1_MASK                     0x1
+#define RG_MTKAIF_LOOPBACK_TEST1_MASK_SFT                 (0x1 << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_RX_CFG0 */
+#define RG_MTKAIF_RXIF_VOICE_MODE_SFT                     12
+#define RG_MTKAIF_RXIF_VOICE_MODE_MASK                    0xf
+#define RG_MTKAIF_RXIF_VOICE_MODE_MASK_SFT                (0xf << 12)
+#define RG_MTKAIF_RXIF_DATA_BIT_SFT                       8
+#define RG_MTKAIF_RXIF_DATA_BIT_MASK                      0x7
+#define RG_MTKAIF_RXIF_DATA_BIT_MASK_SFT                  (0x7 << 8)
+#define RG_MTKAIF_RXIF_FIFO_RSP_SFT                       4
+#define RG_MTKAIF_RXIF_FIFO_RSP_MASK                      0x7
+#define RG_MTKAIF_RXIF_FIFO_RSP_MASK_SFT                  (0x7 << 4)
+#define RG_MTKAIF_RXIF_DETECT_ON_SFT                      3
+#define RG_MTKAIF_RXIF_DETECT_ON_MASK                     0x1
+#define RG_MTKAIF_RXIF_DETECT_ON_MASK_SFT                 (0x1 << 3)
+#define RG_MTKAIF_RXIF_DATA_MODE_SFT                      0
+#define RG_MTKAIF_RXIF_DATA_MODE_MASK                     0x1
+#define RG_MTKAIF_RXIF_DATA_MODE_MASK_SFT                 (0x1 << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_RX_CFG1 */
+#define RG_MTKAIF_RXIF_SYNC_SEARCH_TABLE_SFT              12
+#define RG_MTKAIF_RXIF_SYNC_SEARCH_TABLE_MASK             0xf
+#define RG_MTKAIF_RXIF_SYNC_SEARCH_TABLE_MASK_SFT         (0xf << 12)
+#define RG_MTKAIF_RXIF_INVALID_SYNC_CHECK_ROUND_SFT       8
+#define RG_MTKAIF_RXIF_INVALID_SYNC_CHECK_ROUND_MASK      0xf
+#define RG_MTKAIF_RXIF_INVALID_SYNC_CHECK_ROUND_MASK_SFT  (0xf << 8)
+#define RG_MTKAIF_RXIF_SYNC_CHECK_ROUND_SFT               4
+#define RG_MTKAIF_RXIF_SYNC_CHECK_ROUND_MASK              0xf
+#define RG_MTKAIF_RXIF_SYNC_CHECK_ROUND_MASK_SFT          (0xf << 4)
+#define RG_MTKAIF_RXIF_VOICE_MODE_PROTOCOL2_SFT           0
+#define RG_MTKAIF_RXIF_VOICE_MODE_PROTOCOL2_MASK          0xf
+#define RG_MTKAIF_RXIF_VOICE_MODE_PROTOCOL2_MASK_SFT      (0xf << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_RX_CFG2 */
+#define RG_MTKAIF_RXIF_CLEAR_SYNC_FAIL_SFT                12
+#define RG_MTKAIF_RXIF_CLEAR_SYNC_FAIL_MASK               0x1
+#define RG_MTKAIF_RXIF_CLEAR_SYNC_FAIL_MASK_SFT           (0x1 << 12)
+#define RG_MTKAIF_RXIF_SYNC_CNT_TABLE_SFT                 0
+#define RG_MTKAIF_RXIF_SYNC_CNT_TABLE_MASK                0xfff
+#define RG_MTKAIF_RXIF_SYNC_CNT_TABLE_MASK_SFT            (0xfff << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_RX_CFG3 */
+#define RG_MTKAIF_RXIF_LOOPBACK_USE_NLE_SFT               7
+#define RG_MTKAIF_RXIF_LOOPBACK_USE_NLE_MASK              0x1
+#define RG_MTKAIF_RXIF_LOOPBACK_USE_NLE_MASK_SFT          (0x1 << 7)
+#define RG_MTKAIF_RXIF_FIFO_RSP_PROTOCOL2_SFT             4
+#define RG_MTKAIF_RXIF_FIFO_RSP_PROTOCOL2_MASK            0x7
+#define RG_MTKAIF_RXIF_FIFO_RSP_PROTOCOL2_MASK_SFT        (0x7 << 4)
+#define RG_MTKAIF_RXIF_DETECT_ON_PROTOCOL2_SFT            3
+#define RG_MTKAIF_RXIF_DETECT_ON_PROTOCOL2_MASK           0x1
+#define RG_MTKAIF_RXIF_DETECT_ON_PROTOCOL2_MASK_SFT       (0x1 << 3)
+
+/* MT6358_AFE_ADDA_MTKAIF_TX_CFG1 */
+#define RG_MTKAIF_SYNC_WORD2_SFT                          4
+#define RG_MTKAIF_SYNC_WORD2_MASK                         0x7
+#define RG_MTKAIF_SYNC_WORD2_MASK_SFT                     (0x7 << 4)
+#define RG_MTKAIF_SYNC_WORD1_SFT                          0
+#define RG_MTKAIF_SYNC_WORD1_MASK                         0x7
+#define RG_MTKAIF_SYNC_WORD1_MASK_SFT                     (0x7 << 0)
+
+/* MT6358_AFE_SGEN_CFG0 */
+#define SGEN_AMP_DIV_CH1_CTL_SFT                          12
+#define SGEN_AMP_DIV_CH1_CTL_MASK                         0xf
+#define SGEN_AMP_DIV_CH1_CTL_MASK_SFT                     (0xf << 12)
+#define SGEN_DAC_EN_CTL_SFT                               7
+#define SGEN_DAC_EN_CTL_MASK                              0x1
+#define SGEN_DAC_EN_CTL_MASK_SFT                          (0x1 << 7)
+#define SGEN_MUTE_SW_CTL_SFT                              6
+#define SGEN_MUTE_SW_CTL_MASK                             0x1
+#define SGEN_MUTE_SW_CTL_MASK_SFT                         (0x1 << 6)
+#define R_AUD_SDM_MUTE_L_SFT                              5
+#define R_AUD_SDM_MUTE_L_MASK                             0x1
+#define R_AUD_SDM_MUTE_L_MASK_SFT                         (0x1 << 5)
+#define R_AUD_SDM_MUTE_R_SFT                              4
+#define R_AUD_SDM_MUTE_R_MASK                             0x1
+#define R_AUD_SDM_MUTE_R_MASK_SFT                         (0x1 << 4)
+
+/* MT6358_AFE_SGEN_CFG1 */
+#define C_SGEN_RCH_INV_5BIT_SFT                           15
+#define C_SGEN_RCH_INV_5BIT_MASK                          0x1
+#define C_SGEN_RCH_INV_5BIT_MASK_SFT                      (0x1 << 15)
+#define C_SGEN_RCH_INV_8BIT_SFT                           14
+#define C_SGEN_RCH_INV_8BIT_MASK                          0x1
+#define C_SGEN_RCH_INV_8BIT_MASK_SFT                      (0x1 << 14)
+#define SGEN_FREQ_DIV_CH1_CTL_SFT                         0
+#define SGEN_FREQ_DIV_CH1_CTL_MASK                        0x1f
+#define SGEN_FREQ_DIV_CH1_CTL_MASK_SFT                    (0x1f << 0)
+
+/* MT6358_AFE_ADC_ASYNC_FIFO_CFG */
+#define RG_UL_ASYNC_FIFO_SOFT_RST_EN_SFT                  5
+#define RG_UL_ASYNC_FIFO_SOFT_RST_EN_MASK                 0x1
+#define RG_UL_ASYNC_FIFO_SOFT_RST_EN_MASK_SFT             (0x1 << 5)
+#define RG_UL_ASYNC_FIFO_SOFT_RST_SFT                     4
+#define RG_UL_ASYNC_FIFO_SOFT_RST_MASK                    0x1
+#define RG_UL_ASYNC_FIFO_SOFT_RST_MASK_SFT                (0x1 << 4)
+#define RG_AMIC_UL_ADC_CLK_SEL_SFT                        1
+#define RG_AMIC_UL_ADC_CLK_SEL_MASK                       0x1
+#define RG_AMIC_UL_ADC_CLK_SEL_MASK_SFT                   (0x1 << 1)
+
+/* MT6358_AFE_DCCLK_CFG0 */
+#define DCCLK_DIV_SFT                                     5
+#define DCCLK_DIV_MASK                                    0x7ff
+#define DCCLK_DIV_MASK_SFT                                (0x7ff << 5)
+#define DCCLK_INV_SFT                                     4
+#define DCCLK_INV_MASK                                    0x1
+#define DCCLK_INV_MASK_SFT                                (0x1 << 4)
+#define DCCLK_PDN_SFT                                     1
+#define DCCLK_PDN_MASK                                    0x1
+#define DCCLK_PDN_MASK_SFT                                (0x1 << 1)
+#define DCCLK_GEN_ON_SFT                                  0
+#define DCCLK_GEN_ON_MASK                                 0x1
+#define DCCLK_GEN_ON_MASK_SFT                             (0x1 << 0)
+
+/* MT6358_AFE_DCCLK_CFG1 */
+#define RESYNC_SRC_SEL_SFT                                10
+#define RESYNC_SRC_SEL_MASK                               0x3
+#define RESYNC_SRC_SEL_MASK_SFT                           (0x3 << 10)
+#define RESYNC_SRC_CK_INV_SFT                             9
+#define RESYNC_SRC_CK_INV_MASK                            0x1
+#define RESYNC_SRC_CK_INV_MASK_SFT                        (0x1 << 9)
+#define DCCLK_RESYNC_BYPASS_SFT                           8
+#define DCCLK_RESYNC_BYPASS_MASK                          0x1
+#define DCCLK_RESYNC_BYPASS_MASK_SFT                      (0x1 << 8)
+#define DCCLK_PHASE_SEL_SFT                               4
+#define DCCLK_PHASE_SEL_MASK                              0xf
+#define DCCLK_PHASE_SEL_MASK_SFT                          (0xf << 4)
+
+/* MT6358_AUDIO_DIG_CFG */
+#define RG_AUD_PAD_TOP_DAT_MISO2_LOOPBACK_SFT             15
+#define RG_AUD_PAD_TOP_DAT_MISO2_LOOPBACK_MASK            0x1
+#define RG_AUD_PAD_TOP_DAT_MISO2_LOOPBACK_MASK_SFT        (0x1 << 15)
+#define RG_AUD_PAD_TOP_PHASE_MODE2_SFT                    8
+#define RG_AUD_PAD_TOP_PHASE_MODE2_MASK                   0x7f
+#define RG_AUD_PAD_TOP_PHASE_MODE2_MASK_SFT               (0x7f << 8)
+#define RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_SFT              7
+#define RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_MASK             0x1
+#define RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_MASK_SFT         (0x1 << 7)
+#define RG_AUD_PAD_TOP_PHASE_MODE_SFT                     0
+#define RG_AUD_PAD_TOP_PHASE_MODE_MASK                    0x7f
+#define RG_AUD_PAD_TOP_PHASE_MODE_MASK_SFT                (0x7f << 0)
+
+/* MT6358_AFE_AUD_PAD_TOP */
+#define RG_AUD_PAD_TOP_TX_FIFO_RSP_SFT                    12
+#define RG_AUD_PAD_TOP_TX_FIFO_RSP_MASK                   0x7
+#define RG_AUD_PAD_TOP_TX_FIFO_RSP_MASK_SFT               (0x7 << 12)
+#define RG_AUD_PAD_TOP_MTKAIF_CLK_PROTOCOL2_SFT           11
+#define RG_AUD_PAD_TOP_MTKAIF_CLK_PROTOCOL2_MASK          0x1
+#define RG_AUD_PAD_TOP_MTKAIF_CLK_PROTOCOL2_MASK_SFT      (0x1 << 11)
+#define RG_AUD_PAD_TOP_TX_FIFO_ON_SFT                     8
+#define RG_AUD_PAD_TOP_TX_FIFO_ON_MASK                    0x1
+#define RG_AUD_PAD_TOP_TX_FIFO_ON_MASK_SFT                (0x1 << 8)
+
+/* MT6358_AFE_AUD_PAD_TOP_MON */
+#define ADDA_AUD_PAD_TOP_MON_SFT                          0
+#define ADDA_AUD_PAD_TOP_MON_MASK                         0xffff
+#define ADDA_AUD_PAD_TOP_MON_MASK_SFT                     (0xffff << 0)
+
+/* MT6358_AFE_AUD_PAD_TOP_MON1 */
+#define ADDA_AUD_PAD_TOP_MON1_SFT                         0
+#define ADDA_AUD_PAD_TOP_MON1_MASK                        0xffff
+#define ADDA_AUD_PAD_TOP_MON1_MASK_SFT                    (0xffff << 0)
+
+/* MT6358_AFE_DL_NLE_CFG */
+#define NLE_RCH_HPGAIN_SEL_SFT                            10
+#define NLE_RCH_HPGAIN_SEL_MASK                           0x1
+#define NLE_RCH_HPGAIN_SEL_MASK_SFT                       (0x1 << 10)
+#define NLE_RCH_CH_SEL_SFT                                9
+#define NLE_RCH_CH_SEL_MASK                               0x1
+#define NLE_RCH_CH_SEL_MASK_SFT                           (0x1 << 9)
+#define NLE_RCH_ON_SFT                                    8
+#define NLE_RCH_ON_MASK                                   0x1
+#define NLE_RCH_ON_MASK_SFT                               (0x1 << 8)
+#define NLE_LCH_HPGAIN_SEL_SFT                            2
+#define NLE_LCH_HPGAIN_SEL_MASK                           0x1
+#define NLE_LCH_HPGAIN_SEL_MASK_SFT                       (0x1 << 2)
+#define NLE_LCH_CH_SEL_SFT                                1
+#define NLE_LCH_CH_SEL_MASK                               0x1
+#define NLE_LCH_CH_SEL_MASK_SFT                           (0x1 << 1)
+#define NLE_LCH_ON_SFT                                    0
+#define NLE_LCH_ON_MASK                                   0x1
+#define NLE_LCH_ON_MASK_SFT                               (0x1 << 0)
+
+/* MT6358_AFE_DL_NLE_MON */
+#define NLE_MONITOR_SFT                                   0
+#define NLE_MONITOR_MASK                                  0x3fff
+#define NLE_MONITOR_MASK_SFT                              (0x3fff << 0)
+
+/* MT6358_AFE_CG_EN_MON */
+#define CK_CG_EN_MON_SFT                                  0
+#define CK_CG_EN_MON_MASK                                 0x3f
+#define CK_CG_EN_MON_MASK_SFT                             (0x3f << 0)
+
+/* MT6358_AFE_VOW_TOP */
+#define PDN_VOW_SFT                                       15
+#define PDN_VOW_MASK                                      0x1
+#define PDN_VOW_MASK_SFT                                  (0x1 << 15)
+#define VOW_1P6M_800K_SEL_SFT                             14
+#define VOW_1P6M_800K_SEL_MASK                            0x1
+#define VOW_1P6M_800K_SEL_MASK_SFT                        (0x1 << 14)
+#define VOW_DIGMIC_ON_SFT                                 13
+#define VOW_DIGMIC_ON_MASK                                0x1
+#define VOW_DIGMIC_ON_MASK_SFT                            (0x1 << 13)
+#define VOW_CK_DIV_RST_SFT                                12
+#define VOW_CK_DIV_RST_MASK                               0x1
+#define VOW_CK_DIV_RST_MASK_SFT                           (0x1 << 12)
+#define VOW_ON_SFT                                        11
+#define VOW_ON_MASK                                       0x1
+#define VOW_ON_MASK_SFT                                   (0x1 << 11)
+#define VOW_DIGMIC_CK_PHASE_SEL_SFT                       8
+#define VOW_DIGMIC_CK_PHASE_SEL_MASK                      0x7
+#define VOW_DIGMIC_CK_PHASE_SEL_MASK_SFT                  (0x7 << 8)
+#define MAIN_DMIC_CK_VOW_SEL_SFT                          7
+#define MAIN_DMIC_CK_VOW_SEL_MASK                         0x1
+#define MAIN_DMIC_CK_VOW_SEL_MASK_SFT                     (0x1 << 7)
+#define VOW_SDM_3_LEVEL_SFT                               6
+#define VOW_SDM_3_LEVEL_MASK                              0x1
+#define VOW_SDM_3_LEVEL_MASK_SFT                          (0x1 << 6)
+#define VOW_LOOP_BACK_MODE_SFT                            5
+#define VOW_LOOP_BACK_MODE_MASK                           0x1
+#define VOW_LOOP_BACK_MODE_MASK_SFT                       (0x1 << 5)
+#define VOW_INTR_SOURCE_SEL_SFT                           4
+#define VOW_INTR_SOURCE_SEL_MASK                          0x1
+#define VOW_INTR_SOURCE_SEL_MASK_SFT                      (0x1 << 4)
+#define VOW_INTR_CLR_SFT                                  3
+#define VOW_INTR_CLR_MASK                                 0x1
+#define VOW_INTR_CLR_MASK_SFT                             (0x1 << 3)
+#define S_N_VALUE_RST_SFT                                 2
+#define S_N_VALUE_RST_MASK                                0x1
+#define S_N_VALUE_RST_MASK_SFT                            (0x1 << 2)
+#define SAMPLE_BASE_MODE_SFT                              1
+#define SAMPLE_BASE_MODE_MASK                             0x1
+#define SAMPLE_BASE_MODE_MASK_SFT                         (0x1 << 1)
+#define VOW_INTR_FLAG_SFT                                 0
+#define VOW_INTR_FLAG_MASK                                0x1
+#define VOW_INTR_FLAG_MASK_SFT                            (0x1 << 0)
+
+/* MT6358_AFE_VOW_CFG0 */
+#define AMPREF_SFT                                        0
+#define AMPREF_MASK                                       0xffff
+#define AMPREF_MASK_SFT                                   (0xffff << 0)
+
+/* MT6358_AFE_VOW_CFG1 */
+#define TIMERINI_SFT                                      0
+#define TIMERINI_MASK                                     0xffff
+#define TIMERINI_MASK_SFT                                 (0xffff << 0)
+
+/* MT6358_AFE_VOW_CFG2 */
+#define B_DEFAULT_SFT                                     12
+#define B_DEFAULT_MASK                                    0x7
+#define B_DEFAULT_MASK_SFT                                (0x7 << 12)
+#define A_DEFAULT_SFT                                     8
+#define A_DEFAULT_MASK                                    0x7
+#define A_DEFAULT_MASK_SFT                                (0x7 << 8)
+#define B_INI_SFT                                         4
+#define B_INI_MASK                                        0x7
+#define B_INI_MASK_SFT                                    (0x7 << 4)
+#define A_INI_SFT                                         0
+#define A_INI_MASK                                        0x7
+#define A_INI_MASK_SFT                                    (0x7 << 0)
+
+/* MT6358_AFE_VOW_CFG3 */
+#define K_BETA_RISE_SFT                                   12
+#define K_BETA_RISE_MASK                                  0xf
+#define K_BETA_RISE_MASK_SFT                              (0xf << 12)
+#define K_BETA_FALL_SFT                                   8
+#define K_BETA_FALL_MASK                                  0xf
+#define K_BETA_FALL_MASK_SFT                              (0xf << 8)
+#define K_ALPHA_RISE_SFT                                  4
+#define K_ALPHA_RISE_MASK                                 0xf
+#define K_ALPHA_RISE_MASK_SFT                             (0xf << 4)
+#define K_ALPHA_FALL_SFT                                  0
+#define K_ALPHA_FALL_MASK                                 0xf
+#define K_ALPHA_FALL_MASK_SFT                             (0xf << 0)
+
+/* MT6358_AFE_VOW_CFG4 */
+#define VOW_TXIF_SCK_INV_SFT                              15
+#define VOW_TXIF_SCK_INV_MASK                             0x1
+#define VOW_TXIF_SCK_INV_MASK_SFT                         (0x1 << 15)
+#define VOW_ADC_TESTCK_SRC_SEL_SFT                        12
+#define VOW_ADC_TESTCK_SRC_SEL_MASK                       0x7
+#define VOW_ADC_TESTCK_SRC_SEL_MASK_SFT                   (0x7 << 12)
+#define VOW_ADC_TESTCK_SEL_SFT                            11
+#define VOW_ADC_TESTCK_SEL_MASK                           0x1
+#define VOW_ADC_TESTCK_SEL_MASK_SFT                       (0x1 << 11)
+#define VOW_ADC_CLK_INV_SFT                               10
+#define VOW_ADC_CLK_INV_MASK                              0x1
+#define VOW_ADC_CLK_INV_MASK_SFT                          (0x1 << 10)
+#define VOW_TXIF_MONO_SFT                                 9
+#define VOW_TXIF_MONO_MASK                                0x1
+#define VOW_TXIF_MONO_MASK_SFT                            (0x1 << 9)
+#define VOW_TXIF_SCK_DIV_SFT                              4
+#define VOW_TXIF_SCK_DIV_MASK                             0x1f
+#define VOW_TXIF_SCK_DIV_MASK_SFT                         (0x1f << 4)
+#define K_GAMMA_SFT                                       0
+#define K_GAMMA_MASK                                      0xf
+#define K_GAMMA_MASK_SFT                                  (0xf << 0)
+
+/* MT6358_AFE_VOW_CFG5 */
+#define N_MIN_SFT                                         0
+#define N_MIN_MASK                                        0xffff
+#define N_MIN_MASK_SFT                                    (0xffff << 0)
+
+/* MT6358_AFE_VOW_CFG6 */
+#define RG_WINDOW_SIZE_SEL_SFT                            12
+#define RG_WINDOW_SIZE_SEL_MASK                           0x1
+#define RG_WINDOW_SIZE_SEL_MASK_SFT                       (0x1 << 12)
+#define RG_FLR_BYPASS_SFT                                 11
+#define RG_FLR_BYPASS_MASK                                0x1
+#define RG_FLR_BYPASS_MASK_SFT                            (0x1 << 11)
+#define RG_FLR_RATIO_SFT                                  8
+#define RG_FLR_RATIO_MASK                                 0x7
+#define RG_FLR_RATIO_MASK_SFT                             (0x7 << 8)
+#define RG_BUCK_DVFS_DONE_SW_CTL_SFT                      7
+#define RG_BUCK_DVFS_DONE_SW_CTL_MASK                     0x1
+#define RG_BUCK_DVFS_DONE_SW_CTL_MASK_SFT                 (0x1 << 7)
+#define RG_BUCK_DVFS_DONE_HW_MODE_SFT                     6
+#define RG_BUCK_DVFS_DONE_HW_MODE_MASK                    0x1
+#define RG_BUCK_DVFS_DONE_HW_MODE_MASK_SFT                (0x1 << 6)
+#define RG_BUCK_DVFS_HW_CNT_THR_SFT                       0
+#define RG_BUCK_DVFS_HW_CNT_THR_MASK                      0x3f
+#define RG_BUCK_DVFS_HW_CNT_THR_MASK_SFT                  (0x3f << 0)
+
+/* MT6358_AFE_VOW_MON0 */
+#define VOW_DOWNCNT_SFT                                   0
+#define VOW_DOWNCNT_MASK                                  0xffff
+#define VOW_DOWNCNT_MASK_SFT                              (0xffff << 0)
+
+/* MT6358_AFE_VOW_MON1 */
+#define K_TMP_MON_SFT                                     10
+#define K_TMP_MON_MASK                                    0xf
+#define K_TMP_MON_MASK_SFT                                (0xf << 10)
+#define SLT_COUNTER_MON_SFT                               7
+#define SLT_COUNTER_MON_MASK                              0x7
+#define SLT_COUNTER_MON_MASK_SFT                          (0x7 << 7)
+#define VOW_B_SFT                                         4
+#define VOW_B_MASK                                        0x7
+#define VOW_B_MASK_SFT                                    (0x7 << 4)
+#define VOW_A_SFT                                         1
+#define VOW_A_MASK                                        0x7
+#define VOW_A_MASK_SFT                                    (0x7 << 1)
+#define SECOND_CNT_START_SFT                              0
+#define SECOND_CNT_START_MASK                             0x1
+#define SECOND_CNT_START_MASK_SFT                         (0x1 << 0)
+
+/* MT6358_AFE_VOW_MON2 */
+#define VOW_S_L_SFT                                       0
+#define VOW_S_L_MASK                                      0xffff
+#define VOW_S_L_MASK_SFT                                  (0xffff << 0)
+
+/* MT6358_AFE_VOW_MON3 */
+#define VOW_S_H_SFT                                       0
+#define VOW_S_H_MASK                                      0xffff
+#define VOW_S_H_MASK_SFT                                  (0xffff << 0)
+
+/* MT6358_AFE_VOW_MON4 */
+#define VOW_N_L_SFT                                       0
+#define VOW_N_L_MASK                                      0xffff
+#define VOW_N_L_MASK_SFT                                  (0xffff << 0)
+
+/* MT6358_AFE_VOW_MON5 */
+#define VOW_N_H_SFT                                       0
+#define VOW_N_H_MASK                                      0xffff
+#define VOW_N_H_MASK_SFT                                  (0xffff << 0)
+
+/* MT6358_AFE_VOW_SN_INI_CFG */
+#define VOW_SN_INI_CFG_EN_SFT                             15
+#define VOW_SN_INI_CFG_EN_MASK                            0x1
+#define VOW_SN_INI_CFG_EN_MASK_SFT                        (0x1 << 15)
+#define VOW_SN_INI_CFG_VAL_SFT                            0
+#define VOW_SN_INI_CFG_VAL_MASK                           0x7fff
+#define VOW_SN_INI_CFG_VAL_MASK_SFT                       (0x7fff << 0)
+
+/* MT6358_AFE_VOW_TGEN_CFG0 */
+#define VOW_TGEN_EN_SFT                                   15
+#define VOW_TGEN_EN_MASK                                  0x1
+#define VOW_TGEN_EN_MASK_SFT                              (0x1 << 15)
+#define VOW_TGEN_MUTE_SW_SFT                              14
+#define VOW_TGEN_MUTE_SW_MASK                             0x1
+#define VOW_TGEN_MUTE_SW_MASK_SFT                         (0x1 << 14)
+#define VOW_TGEN_FREQ_DIV_SFT                             0
+#define VOW_TGEN_FREQ_DIV_MASK                            0x3fff
+#define VOW_TGEN_FREQ_DIV_MASK_SFT                        (0x3fff << 0)
+
+/* MT6358_AFE_VOW_POSDIV_CFG0 */
+#define BUCK_DVFS_DONE_SFT                                15
+#define BUCK_DVFS_DONE_MASK                               0x1
+#define BUCK_DVFS_DONE_MASK_SFT                           (0x1 << 15)
+#define VOW_32K_MODE_SFT                                  13
+#define VOW_32K_MODE_MASK                                 0x1
+#define VOW_32K_MODE_MASK_SFT                             (0x1 << 13)
+#define RG_BUCK_CLK_DIV_SFT                               8
+#define RG_BUCK_CLK_DIV_MASK                              0x1f
+#define RG_BUCK_CLK_DIV_MASK_SFT                          (0x1f << 8)
+#define RG_A1P6M_EN_SEL_SFT                               7
+#define RG_A1P6M_EN_SEL_MASK                              0x1
+#define RG_A1P6M_EN_SEL_MASK_SFT                          (0x1 << 7)
+#define VOW_CLK_SEL_SFT                                   6
+#define VOW_CLK_SEL_MASK                                  0x1
+#define VOW_CLK_SEL_MASK_SFT                              (0x1 << 6)
+#define VOW_INTR_SW_MODE_SFT                              5
+#define VOW_INTR_SW_MODE_MASK                             0x1
+#define VOW_INTR_SW_MODE_MASK_SFT                         (0x1 << 5)
+#define VOW_INTR_SW_VAL_SFT                               4
+#define VOW_INTR_SW_VAL_MASK                              0x1
+#define VOW_INTR_SW_VAL_MASK_SFT                          (0x1 << 4)
+#define VOW_CIC_MODE_SEL_SFT                              2
+#define VOW_CIC_MODE_SEL_MASK                             0x3
+#define VOW_CIC_MODE_SEL_MASK_SFT                         (0x3 << 2)
+#define RG_VOW_POSDIV_SFT                                 0
+#define RG_VOW_POSDIV_MASK                                0x3
+#define RG_VOW_POSDIV_MASK_SFT                            (0x3 << 0)
+
+/* MT6358_AFE_VOW_HPF_CFG0 */
+#define VOW_HPF_DC_TEST_SFT                               12
+#define VOW_HPF_DC_TEST_MASK                              0xf
+#define VOW_HPF_DC_TEST_MASK_SFT                          (0xf << 12)
+#define VOW_IRQ_LATCH_SNR_EN_SFT                          10
+#define VOW_IRQ_LATCH_SNR_EN_MASK                         0x1
+#define VOW_IRQ_LATCH_SNR_EN_MASK_SFT                     (0x1 << 10)
+#define VOW_DMICCLK_PDN_SFT                               9
+#define VOW_DMICCLK_PDN_MASK                              0x1
+#define VOW_DMICCLK_PDN_MASK_SFT                          (0x1 << 9)
+#define VOW_POSDIVCLK_PDN_SFT                             8
+#define VOW_POSDIVCLK_PDN_MASK                            0x1
+#define VOW_POSDIVCLK_PDN_MASK_SFT                        (0x1 << 8)
+#define RG_BASELINE_ALPHA_ORDER_SFT                       4
+#define RG_BASELINE_ALPHA_ORDER_MASK                      0xf
+#define RG_BASELINE_ALPHA_ORDER_MASK_SFT                  (0xf << 4)
+#define RG_MTKAIF_HPF_BYPASS_SFT                          2
+#define RG_MTKAIF_HPF_BYPASS_MASK                         0x1
+#define RG_MTKAIF_HPF_BYPASS_MASK_SFT                     (0x1 << 2)
+#define RG_SNRDET_HPF_BYPASS_SFT                          1
+#define RG_SNRDET_HPF_BYPASS_MASK                         0x1
+#define RG_SNRDET_HPF_BYPASS_MASK_SFT                     (0x1 << 1)
+#define RG_HPF_ON_SFT                                     0
+#define RG_HPF_ON_MASK                                    0x1
+#define RG_HPF_ON_MASK_SFT                                (0x1 << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG0 */
+#define RG_PERIODIC_EN_SFT                                15
+#define RG_PERIODIC_EN_MASK                               0x1
+#define RG_PERIODIC_EN_MASK_SFT                           (0x1 << 15)
+#define RG_PERIODIC_CNT_CLR_SFT                           14
+#define RG_PERIODIC_CNT_CLR_MASK                          0x1
+#define RG_PERIODIC_CNT_CLR_MASK_SFT                      (0x1 << 14)
+#define RG_PERIODIC_CNT_PERIOD_SFT                        0
+#define RG_PERIODIC_CNT_PERIOD_MASK                       0x3fff
+#define RG_PERIODIC_CNT_PERIOD_MASK_SFT                   (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG1 */
+#define RG_PERIODIC_CNT_SET_SFT                           15
+#define RG_PERIODIC_CNT_SET_MASK                          0x1
+#define RG_PERIODIC_CNT_SET_MASK_SFT                      (0x1 << 15)
+#define RG_PERIODIC_CNT_PAUSE_SFT                         14
+#define RG_PERIODIC_CNT_PAUSE_MASK                        0x1
+#define RG_PERIODIC_CNT_PAUSE_MASK_SFT                    (0x1 << 14)
+#define RG_PERIODIC_CNT_SET_VALUE_SFT                     0
+#define RG_PERIODIC_CNT_SET_VALUE_MASK                    0x3fff
+#define RG_PERIODIC_CNT_SET_VALUE_MASK_SFT                (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG2 */
+#define AUDPREAMPLON_PERIODIC_MODE_SFT                    15
+#define AUDPREAMPLON_PERIODIC_MODE_MASK                   0x1
+#define AUDPREAMPLON_PERIODIC_MODE_MASK_SFT               (0x1 << 15)
+#define AUDPREAMPLON_PERIODIC_INVERSE_SFT                 14
+#define AUDPREAMPLON_PERIODIC_INVERSE_MASK                0x1
+#define AUDPREAMPLON_PERIODIC_INVERSE_MASK_SFT            (0x1 << 14)
+#define AUDPREAMPLON_PERIODIC_ON_CYCLE_SFT                0
+#define AUDPREAMPLON_PERIODIC_ON_CYCLE_MASK               0x3fff
+#define AUDPREAMPLON_PERIODIC_ON_CYCLE_MASK_SFT           (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG3 */
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_MODE_SFT           15
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_MODE_MASK          0x1
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_MODE_MASK_SFT      (0x1 << 15)
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_INVERSE_SFT        14
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_INVERSE_MASK       0x1
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_INVERSE_MASK_SFT   (0x1 << 14)
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_ON_CYCLE_SFT       0
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_ON_CYCLE_MASK      0x3fff
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_ON_CYCLE_MASK_SFT  (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG4 */
+#define AUDADCLPWRUP_PERIODIC_MODE_SFT                    15
+#define AUDADCLPWRUP_PERIODIC_MODE_MASK                   0x1
+#define AUDADCLPWRUP_PERIODIC_MODE_MASK_SFT               (0x1 << 15)
+#define AUDADCLPWRUP_PERIODIC_INVERSE_SFT                 14
+#define AUDADCLPWRUP_PERIODIC_INVERSE_MASK                0x1
+#define AUDADCLPWRUP_PERIODIC_INVERSE_MASK_SFT            (0x1 << 14)
+#define AUDADCLPWRUP_PERIODIC_ON_CYCLE_SFT                0
+#define AUDADCLPWRUP_PERIODIC_ON_CYCLE_MASK               0x3fff
+#define AUDADCLPWRUP_PERIODIC_ON_CYCLE_MASK_SFT           (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG5 */
+#define AUDGLBVOWLPWEN_PERIODIC_MODE_SFT                  15
+#define AUDGLBVOWLPWEN_PERIODIC_MODE_MASK                 0x1
+#define AUDGLBVOWLPWEN_PERIODIC_MODE_MASK_SFT             (0x1 << 15)
+#define AUDGLBVOWLPWEN_PERIODIC_INVERSE_SFT               14
+#define AUDGLBVOWLPWEN_PERIODIC_INVERSE_MASK              0x1
+#define AUDGLBVOWLPWEN_PERIODIC_INVERSE_MASK_SFT          (0x1 << 14)
+#define AUDGLBVOWLPWEN_PERIODIC_ON_CYCLE_SFT              0
+#define AUDGLBVOWLPWEN_PERIODIC_ON_CYCLE_MASK             0x3fff
+#define AUDGLBVOWLPWEN_PERIODIC_ON_CYCLE_MASK_SFT         (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG6 */
+#define AUDDIGMICEN_PERIODIC_MODE_SFT                     15
+#define AUDDIGMICEN_PERIODIC_MODE_MASK                    0x1
+#define AUDDIGMICEN_PERIODIC_MODE_MASK_SFT                (0x1 << 15)
+#define AUDDIGMICEN_PERIODIC_INVERSE_SFT                  14
+#define AUDDIGMICEN_PERIODIC_INVERSE_MASK                 0x1
+#define AUDDIGMICEN_PERIODIC_INVERSE_MASK_SFT             (0x1 << 14)
+#define AUDDIGMICEN_PERIODIC_ON_CYCLE_SFT                 0
+#define AUDDIGMICEN_PERIODIC_ON_CYCLE_MASK                0x3fff
+#define AUDDIGMICEN_PERIODIC_ON_CYCLE_MASK_SFT            (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG7 */
+#define AUDPWDBMICBIAS0_PERIODIC_MODE_SFT                 15
+#define AUDPWDBMICBIAS0_PERIODIC_MODE_MASK                0x1
+#define AUDPWDBMICBIAS0_PERIODIC_MODE_MASK_SFT            (0x1 << 15)
+#define AUDPWDBMICBIAS0_PERIODIC_INVERSE_SFT              14
+#define AUDPWDBMICBIAS0_PERIODIC_INVERSE_MASK             0x1
+#define AUDPWDBMICBIAS0_PERIODIC_INVERSE_MASK_SFT         (0x1 << 14)
+#define AUDPWDBMICBIAS0_PERIODIC_ON_CYCLE_SFT             0
+#define AUDPWDBMICBIAS0_PERIODIC_ON_CYCLE_MASK            0x3fff
+#define AUDPWDBMICBIAS0_PERIODIC_ON_CYCLE_MASK_SFT        (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG8 */
+#define AUDPWDBMICBIAS1_PERIODIC_MODE_SFT                 15
+#define AUDPWDBMICBIAS1_PERIODIC_MODE_MASK                0x1
+#define AUDPWDBMICBIAS1_PERIODIC_MODE_MASK_SFT            (0x1 << 15)
+#define AUDPWDBMICBIAS1_PERIODIC_INVERSE_SFT              14
+#define AUDPWDBMICBIAS1_PERIODIC_INVERSE_MASK             0x1
+#define AUDPWDBMICBIAS1_PERIODIC_INVERSE_MASK_SFT         (0x1 << 14)
+#define AUDPWDBMICBIAS1_PERIODIC_ON_CYCLE_SFT             0
+#define AUDPWDBMICBIAS1_PERIODIC_ON_CYCLE_MASK            0x3fff
+#define AUDPWDBMICBIAS1_PERIODIC_ON_CYCLE_MASK_SFT        (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG9 */
+#define XO_VOW_CK_EN_PERIODIC_MODE_SFT                    15
+#define XO_VOW_CK_EN_PERIODIC_MODE_MASK                   0x1
+#define XO_VOW_CK_EN_PERIODIC_MODE_MASK_SFT               (0x1 << 15)
+#define XO_VOW_CK_EN_PERIODIC_INVERSE_SFT                 14
+#define XO_VOW_CK_EN_PERIODIC_INVERSE_MASK                0x1
+#define XO_VOW_CK_EN_PERIODIC_INVERSE_MASK_SFT            (0x1 << 14)
+#define XO_VOW_CK_EN_PERIODIC_ON_CYCLE_SFT                0
+#define XO_VOW_CK_EN_PERIODIC_ON_CYCLE_MASK               0x3fff
+#define XO_VOW_CK_EN_PERIODIC_ON_CYCLE_MASK_SFT           (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG10 */
+#define AUDGLB_PWRDN_PERIODIC_MODE_SFT                    15
+#define AUDGLB_PWRDN_PERIODIC_MODE_MASK                   0x1
+#define AUDGLB_PWRDN_PERIODIC_MODE_MASK_SFT               (0x1 << 15)
+#define AUDGLB_PWRDN_PERIODIC_INVERSE_SFT                 14
+#define AUDGLB_PWRDN_PERIODIC_INVERSE_MASK                0x1
+#define AUDGLB_PWRDN_PERIODIC_INVERSE_MASK_SFT            (0x1 << 14)
+#define AUDGLB_PWRDN_PERIODIC_ON_CYCLE_SFT                0
+#define AUDGLB_PWRDN_PERIODIC_ON_CYCLE_MASK               0x3fff
+#define AUDGLB_PWRDN_PERIODIC_ON_CYCLE_MASK_SFT           (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG11 */
+#define VOW_ON_PERIODIC_MODE_SFT                          15
+#define VOW_ON_PERIODIC_MODE_MASK                         0x1
+#define VOW_ON_PERIODIC_MODE_MASK_SFT                     (0x1 << 15)
+#define VOW_ON_PERIODIC_INVERSE_SFT                       14
+#define VOW_ON_PERIODIC_INVERSE_MASK                      0x1
+#define VOW_ON_PERIODIC_INVERSE_MASK_SFT                  (0x1 << 14)
+#define VOW_ON_PERIODIC_ON_CYCLE_SFT                      0
+#define VOW_ON_PERIODIC_ON_CYCLE_MASK                     0x3fff
+#define VOW_ON_PERIODIC_ON_CYCLE_MASK_SFT                 (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG12 */
+#define DMIC_ON_PERIODIC_MODE_SFT                         15
+#define DMIC_ON_PERIODIC_MODE_MASK                        0x1
+#define DMIC_ON_PERIODIC_MODE_MASK_SFT                    (0x1 << 15)
+#define DMIC_ON_PERIODIC_INVERSE_SFT                      14
+#define DMIC_ON_PERIODIC_INVERSE_MASK                     0x1
+#define DMIC_ON_PERIODIC_INVERSE_MASK_SFT                 (0x1 << 14)
+#define DMIC_ON_PERIODIC_ON_CYCLE_SFT                     0
+#define DMIC_ON_PERIODIC_ON_CYCLE_MASK                    0x3fff
+#define DMIC_ON_PERIODIC_ON_CYCLE_MASK_SFT                (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG13 */
+#define PDN_VOW_F32K_CK_SFT                               15
+#define PDN_VOW_F32K_CK_MASK                              0x1
+#define PDN_VOW_F32K_CK_MASK_SFT                          (0x1 << 15)
+#define AUDPREAMPLON_PERIODIC_OFF_CYCLE_SFT               0
+#define AUDPREAMPLON_PERIODIC_OFF_CYCLE_MASK              0x3fff
+#define AUDPREAMPLON_PERIODIC_OFF_CYCLE_MASK_SFT          (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG14 */
+#define VOW_SNRDET_PERIODIC_CFG_SFT                       15
+#define VOW_SNRDET_PERIODIC_CFG_MASK                      0x1
+#define VOW_SNRDET_PERIODIC_CFG_MASK_SFT                  (0x1 << 15)
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_OFF_CYCLE_SFT      0
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_OFF_CYCLE_MASK     0x3fff
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_OFF_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG15 */
+#define AUDADCLPWRUP_PERIODIC_OFF_CYCLE_SFT               0
+#define AUDADCLPWRUP_PERIODIC_OFF_CYCLE_MASK              0x3fff
+#define AUDADCLPWRUP_PERIODIC_OFF_CYCLE_MASK_SFT          (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG16 */
+#define AUDGLBVOWLPWEN_PERIODIC_OFF_CYCLE_SFT             0
+#define AUDGLBVOWLPWEN_PERIODIC_OFF_CYCLE_MASK            0x3fff
+#define AUDGLBVOWLPWEN_PERIODIC_OFF_CYCLE_MASK_SFT        (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG17 */
+#define AUDDIGMICEN_PERIODIC_OFF_CYCLE_SFT                0
+#define AUDDIGMICEN_PERIODIC_OFF_CYCLE_MASK               0x3fff
+#define AUDDIGMICEN_PERIODIC_OFF_CYCLE_MASK_SFT           (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG18 */
+#define AUDPWDBMICBIAS0_PERIODIC_OFF_CYCLE_SFT            0
+#define AUDPWDBMICBIAS0_PERIODIC_OFF_CYCLE_MASK           0x3fff
+#define AUDPWDBMICBIAS0_PERIODIC_OFF_CYCLE_MASK_SFT       (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG19 */
+#define AUDPWDBMICBIAS1_PERIODIC_OFF_CYCLE_SFT            0
+#define AUDPWDBMICBIAS1_PERIODIC_OFF_CYCLE_MASK           0x3fff
+#define AUDPWDBMICBIAS1_PERIODIC_OFF_CYCLE_MASK_SFT       (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG20 */
+#define CLKSQ_EN_VOW_PERIODIC_MODE_SFT                    15
+#define CLKSQ_EN_VOW_PERIODIC_MODE_MASK                   0x1
+#define CLKSQ_EN_VOW_PERIODIC_MODE_MASK_SFT               (0x1 << 15)
+#define XO_VOW_CK_EN_PERIODIC_OFF_CYCLE_SFT               0
+#define XO_VOW_CK_EN_PERIODIC_OFF_CYCLE_MASK              0x3fff
+#define XO_VOW_CK_EN_PERIODIC_OFF_CYCLE_MASK_SFT          (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG21 */
+#define AUDGLB_PWRDN_PERIODIC_OFF_CYCLE_SFT               0
+#define AUDGLB_PWRDN_PERIODIC_OFF_CYCLE_MASK              0x3fff
+#define AUDGLB_PWRDN_PERIODIC_OFF_CYCLE_MASK_SFT          (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG22 */
+#define VOW_ON_PERIODIC_OFF_CYCLE_SFT                     0
+#define VOW_ON_PERIODIC_OFF_CYCLE_MASK                    0x3fff
+#define VOW_ON_PERIODIC_OFF_CYCLE_MASK_SFT                (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG23 */
+#define DMIC_ON_PERIODIC_OFF_CYCLE_SFT                    0
+#define DMIC_ON_PERIODIC_OFF_CYCLE_MASK                   0x3fff
+#define DMIC_ON_PERIODIC_OFF_CYCLE_MASK_SFT               (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_MON0 */
+#define VOW_PERIODIC_MON_SFT                              0
+#define VOW_PERIODIC_MON_MASK                             0xffff
+#define VOW_PERIODIC_MON_MASK_SFT                         (0xffff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_MON1 */
+#define VOW_PERIODIC_COUNT_MON_SFT                        0
+#define VOW_PERIODIC_COUNT_MON_MASK                       0xffff
+#define VOW_PERIODIC_COUNT_MON_MASK_SFT                   (0xffff << 0)
+
+/* MT6358_AUDENC_DSN_ID */
+#define AUDENC_ANA_ID_SFT                                 0
+#define AUDENC_ANA_ID_MASK                                0xff
+#define AUDENC_ANA_ID_MASK_SFT                            (0xff << 0)
+#define AUDENC_DIG_ID_SFT                                 8
+#define AUDENC_DIG_ID_MASK                                0xff
+#define AUDENC_DIG_ID_MASK_SFT                            (0xff << 8)
+
+/* MT6358_AUDENC_DSN_REV0 */
+#define AUDENC_ANA_MINOR_REV_SFT                          0
+#define AUDENC_ANA_MINOR_REV_MASK                         0xf
+#define AUDENC_ANA_MINOR_REV_MASK_SFT                     (0xf << 0)
+#define AUDENC_ANA_MAJOR_REV_SFT                          4
+#define AUDENC_ANA_MAJOR_REV_MASK                         0xf
+#define AUDENC_ANA_MAJOR_REV_MASK_SFT                     (0xf << 4)
+#define AUDENC_DIG_MINOR_REV_SFT                          8
+#define AUDENC_DIG_MINOR_REV_MASK                         0xf
+#define AUDENC_DIG_MINOR_REV_MASK_SFT                     (0xf << 8)
+#define AUDENC_DIG_MAJOR_REV_SFT                          12
+#define AUDENC_DIG_MAJOR_REV_MASK                         0xf
+#define AUDENC_DIG_MAJOR_REV_MASK_SFT                     (0xf << 12)
+
+/* MT6358_AUDENC_DSN_DBI */
+#define AUDENC_DSN_CBS_SFT                                0
+#define AUDENC_DSN_CBS_MASK                               0x3
+#define AUDENC_DSN_CBS_MASK_SFT                           (0x3 << 0)
+#define AUDENC_DSN_BIX_SFT                                2
+#define AUDENC_DSN_BIX_MASK                               0x3
+#define AUDENC_DSN_BIX_MASK_SFT                           (0x3 << 2)
+#define AUDENC_DSN_ESP_SFT                                8
+#define AUDENC_DSN_ESP_MASK                               0xff
+#define AUDENC_DSN_ESP_MASK_SFT                           (0xff << 8)
+
+/* MT6358_AUDENC_DSN_FPI */
+#define AUDENC_DSN_FPI_SFT                                0
+#define AUDENC_DSN_FPI_MASK                               0xff
+#define AUDENC_DSN_FPI_MASK_SFT                           (0xff << 0)
+
+/* MT6358_AUDENC_ANA_CON0 */
+#define RG_AUDPREAMPLON_SFT                               0
+#define RG_AUDPREAMPLON_MASK                              0x1
+#define RG_AUDPREAMPLON_MASK_SFT                          (0x1 << 0)
+#define RG_AUDPREAMPLDCCEN_SFT                            1
+#define RG_AUDPREAMPLDCCEN_MASK                           0x1
+#define RG_AUDPREAMPLDCCEN_MASK_SFT                       (0x1 << 1)
+#define RG_AUDPREAMPLDCPRECHARGE_SFT                      2
+#define RG_AUDPREAMPLDCPRECHARGE_MASK                     0x1
+#define RG_AUDPREAMPLDCPRECHARGE_MASK_SFT                 (0x1 << 2)
+#define RG_AUDPREAMPLPGATEST_SFT                          3
+#define RG_AUDPREAMPLPGATEST_MASK                         0x1
+#define RG_AUDPREAMPLPGATEST_MASK_SFT                     (0x1 << 3)
+#define RG_AUDPREAMPLVSCALE_SFT                           4
+#define RG_AUDPREAMPLVSCALE_MASK                          0x3
+#define RG_AUDPREAMPLVSCALE_MASK_SFT                      (0x3 << 4)
+#define RG_AUDPREAMPLINPUTSEL_SFT                         6
+#define RG_AUDPREAMPLINPUTSEL_MASK                        0x3
+#define RG_AUDPREAMPLINPUTSEL_MASK_SFT                    (0x3 << 6)
+#define RG_AUDPREAMPLGAIN_SFT                             8
+#define RG_AUDPREAMPLGAIN_MASK                            0x7
+#define RG_AUDPREAMPLGAIN_MASK_SFT                        (0x7 << 8)
+#define RG_AUDADCLPWRUP_SFT                               12
+#define RG_AUDADCLPWRUP_MASK                              0x1
+#define RG_AUDADCLPWRUP_MASK_SFT                          (0x1 << 12)
+#define RG_AUDADCLINPUTSEL_SFT                            13
+#define RG_AUDADCLINPUTSEL_MASK                           0x3
+#define RG_AUDADCLINPUTSEL_MASK_SFT                       (0x3 << 13)
+
+/* MT6358_AUDENC_ANA_CON1 */
+#define RG_AUDPREAMPRON_SFT                               0
+#define RG_AUDPREAMPRON_MASK                              0x1
+#define RG_AUDPREAMPRON_MASK_SFT                          (0x1 << 0)
+#define RG_AUDPREAMPRDCCEN_SFT                            1
+#define RG_AUDPREAMPRDCCEN_MASK                           0x1
+#define RG_AUDPREAMPRDCCEN_MASK_SFT                       (0x1 << 1)
+#define RG_AUDPREAMPRDCPRECHARGE_SFT                      2
+#define RG_AUDPREAMPRDCPRECHARGE_MASK                     0x1
+#define RG_AUDPREAMPRDCPRECHARGE_MASK_SFT                 (0x1 << 2)
+#define RG_AUDPREAMPRPGATEST_SFT                          3
+#define RG_AUDPREAMPRPGATEST_MASK                         0x1
+#define RG_AUDPREAMPRPGATEST_MASK_SFT                     (0x1 << 3)
+#define RG_AUDPREAMPRVSCALE_SFT                           4
+#define RG_AUDPREAMPRVSCALE_MASK                          0x3
+#define RG_AUDPREAMPRVSCALE_MASK_SFT                      (0x3 << 4)
+#define RG_AUDPREAMPRINPUTSEL_SFT                         6
+#define RG_AUDPREAMPRINPUTSEL_MASK                        0x3
+#define RG_AUDPREAMPRINPUTSEL_MASK_SFT                    (0x3 << 6)
+#define RG_AUDPREAMPRGAIN_SFT                             8
+#define RG_AUDPREAMPRGAIN_MASK                            0x7
+#define RG_AUDPREAMPRGAIN_MASK_SFT                        (0x7 << 8)
+#define RG_AUDIO_VOW_EN_SFT                               11
+#define RG_AUDIO_VOW_EN_MASK                              0x1
+#define RG_AUDIO_VOW_EN_MASK_SFT                          (0x1 << 11)
+#define RG_AUDADCRPWRUP_SFT                               12
+#define RG_AUDADCRPWRUP_MASK                              0x1
+#define RG_AUDADCRPWRUP_MASK_SFT                          (0x1 << 12)
+#define RG_AUDADCRINPUTSEL_SFT                            13
+#define RG_AUDADCRINPUTSEL_MASK                           0x3
+#define RG_AUDADCRINPUTSEL_MASK_SFT                       (0x3 << 13)
+#define RG_CLKSQ_EN_VOW_SFT                               15
+#define RG_CLKSQ_EN_VOW_MASK                              0x1
+#define RG_CLKSQ_EN_VOW_MASK_SFT                          (0x1 << 15)
+
+/* MT6358_AUDENC_ANA_CON2 */
+#define RG_AUDULHALFBIAS_SFT                              0
+#define RG_AUDULHALFBIAS_MASK                             0x1
+#define RG_AUDULHALFBIAS_MASK_SFT                         (0x1 << 0)
+#define RG_AUDGLBVOWLPWEN_SFT                             1
+#define RG_AUDGLBVOWLPWEN_MASK                            0x1
+#define RG_AUDGLBVOWLPWEN_MASK_SFT                        (0x1 << 1)
+#define RG_AUDPREAMPLPEN_SFT                              2
+#define RG_AUDPREAMPLPEN_MASK                             0x1
+#define RG_AUDPREAMPLPEN_MASK_SFT                         (0x1 << 2)
+#define RG_AUDADC1STSTAGELPEN_SFT                         3
+#define RG_AUDADC1STSTAGELPEN_MASK                        0x1
+#define RG_AUDADC1STSTAGELPEN_MASK_SFT                    (0x1 << 3)
+#define RG_AUDADC2NDSTAGELPEN_SFT                         4
+#define RG_AUDADC2NDSTAGELPEN_MASK                        0x1
+#define RG_AUDADC2NDSTAGELPEN_MASK_SFT                    (0x1 << 4)
+#define RG_AUDADCFLASHLPEN_SFT                            5
+#define RG_AUDADCFLASHLPEN_MASK                           0x1
+#define RG_AUDADCFLASHLPEN_MASK_SFT                       (0x1 << 5)
+#define RG_AUDPREAMPIDDTEST_SFT                           6
+#define RG_AUDPREAMPIDDTEST_MASK                          0x3
+#define RG_AUDPREAMPIDDTEST_MASK_SFT                      (0x3 << 6)
+#define RG_AUDADC1STSTAGEIDDTEST_SFT                      8
+#define RG_AUDADC1STSTAGEIDDTEST_MASK                     0x3
+#define RG_AUDADC1STSTAGEIDDTEST_MASK_SFT                 (0x3 << 8)
+#define RG_AUDADC2NDSTAGEIDDTEST_SFT                      10
+#define RG_AUDADC2NDSTAGEIDDTEST_MASK                     0x3
+#define RG_AUDADC2NDSTAGEIDDTEST_MASK_SFT                 (0x3 << 10)
+#define RG_AUDADCREFBUFIDDTEST_SFT                        12
+#define RG_AUDADCREFBUFIDDTEST_MASK                       0x3
+#define RG_AUDADCREFBUFIDDTEST_MASK_SFT                   (0x3 << 12)
+#define RG_AUDADCFLASHIDDTEST_SFT                         14
+#define RG_AUDADCFLASHIDDTEST_MASK                        0x3
+#define RG_AUDADCFLASHIDDTEST_MASK_SFT                    (0x3 << 14)
+
+/* MT6358_AUDENC_ANA_CON3 */
+#define RG_AUDADCDAC0P25FS_SFT                            0
+#define RG_AUDADCDAC0P25FS_MASK                           0x1
+#define RG_AUDADCDAC0P25FS_MASK_SFT                       (0x1 << 0)
+#define RG_AUDADCCLKSEL_SFT                               1
+#define RG_AUDADCCLKSEL_MASK                              0x1
+#define RG_AUDADCCLKSEL_MASK_SFT                          (0x1 << 1)
+#define RG_AUDADCCLKSOURCE_SFT                            2
+#define RG_AUDADCCLKSOURCE_MASK                           0x3
+#define RG_AUDADCCLKSOURCE_MASK_SFT                       (0x3 << 2)
+#define RG_AUDPREAMPAAFEN_SFT                             8
+#define RG_AUDPREAMPAAFEN_MASK                            0x1
+#define RG_AUDPREAMPAAFEN_MASK_SFT                        (0x1 << 8)
+#define RG_DCCVCMBUFLPMODSEL_SFT                          9
+#define RG_DCCVCMBUFLPMODSEL_MASK                         0x1
+#define RG_DCCVCMBUFLPMODSEL_MASK_SFT                     (0x1 << 9)
+#define RG_DCCVCMBUFLPSWEN_SFT                            10
+#define RG_DCCVCMBUFLPSWEN_MASK                           0x1
+#define RG_DCCVCMBUFLPSWEN_MASK_SFT                       (0x1 << 10)
+#define RG_CMSTBENH_SFT                                   11
+#define RG_CMSTBENH_MASK                                  0x1
+#define RG_CMSTBENH_MASK_SFT                              (0x1 << 11)
+#define RG_PGABODYSW_SFT                                  12
+#define RG_PGABODYSW_MASK                                 0x1
+#define RG_PGABODYSW_MASK_SFT                             (0x1 << 12)
+
+/* MT6358_AUDENC_ANA_CON4 */
+#define RG_AUDADC1STSTAGESDENB_SFT                        0
+#define RG_AUDADC1STSTAGESDENB_MASK                       0x1
+#define RG_AUDADC1STSTAGESDENB_MASK_SFT                   (0x1 << 0)
+#define RG_AUDADC2NDSTAGERESET_SFT                        1
+#define RG_AUDADC2NDSTAGERESET_MASK                       0x1
+#define RG_AUDADC2NDSTAGERESET_MASK_SFT                   (0x1 << 1)
+#define RG_AUDADC3RDSTAGERESET_SFT                        2
+#define RG_AUDADC3RDSTAGERESET_MASK                       0x1
+#define RG_AUDADC3RDSTAGERESET_MASK_SFT                   (0x1 << 2)
+#define RG_AUDADCFSRESET_SFT                              3
+#define RG_AUDADCFSRESET_MASK                             0x1
+#define RG_AUDADCFSRESET_MASK_SFT                         (0x1 << 3)
+#define RG_AUDADCWIDECM_SFT                               4
+#define RG_AUDADCWIDECM_MASK                              0x1
+#define RG_AUDADCWIDECM_MASK_SFT                          (0x1 << 4)
+#define RG_AUDADCNOPATEST_SFT                             5
+#define RG_AUDADCNOPATEST_MASK                            0x1
+#define RG_AUDADCNOPATEST_MASK_SFT                        (0x1 << 5)
+#define RG_AUDADCBYPASS_SFT                               6
+#define RG_AUDADCBYPASS_MASK                              0x1
+#define RG_AUDADCBYPASS_MASK_SFT                          (0x1 << 6)
+#define RG_AUDADCFFBYPASS_SFT                             7
+#define RG_AUDADCFFBYPASS_MASK                            0x1
+#define RG_AUDADCFFBYPASS_MASK_SFT                        (0x1 << 7)
+#define RG_AUDADCDACFBCURRENT_SFT                         8
+#define RG_AUDADCDACFBCURRENT_MASK                        0x1
+#define RG_AUDADCDACFBCURRENT_MASK_SFT                    (0x1 << 8)
+#define RG_AUDADCDACIDDTEST_SFT                           9
+#define RG_AUDADCDACIDDTEST_MASK                          0x3
+#define RG_AUDADCDACIDDTEST_MASK_SFT                      (0x3 << 9)
+#define RG_AUDADCDACNRZ_SFT                               11
+#define RG_AUDADCDACNRZ_MASK                              0x1
+#define RG_AUDADCDACNRZ_MASK_SFT                          (0x1 << 11)
+#define RG_AUDADCNODEM_SFT                                12
+#define RG_AUDADCNODEM_MASK                               0x1
+#define RG_AUDADCNODEM_MASK_SFT                           (0x1 << 12)
+#define RG_AUDADCDACTEST_SFT                              13
+#define RG_AUDADCDACTEST_MASK                             0x1
+#define RG_AUDADCDACTEST_MASK_SFT                         (0x1 << 13)
+
+/* MT6358_AUDENC_ANA_CON5 */
+#define RG_AUDRCTUNEL_SFT                                 0
+#define RG_AUDRCTUNEL_MASK                                0x1f
+#define RG_AUDRCTUNEL_MASK_SFT                            (0x1f << 0)
+#define RG_AUDRCTUNELSEL_SFT                              5
+#define RG_AUDRCTUNELSEL_MASK                             0x1
+#define RG_AUDRCTUNELSEL_MASK_SFT                         (0x1 << 5)
+#define RG_AUDRCTUNER_SFT                                 8
+#define RG_AUDRCTUNER_MASK                                0x1f
+#define RG_AUDRCTUNER_MASK_SFT                            (0x1f << 8)
+#define RG_AUDRCTUNERSEL_SFT                              13
+#define RG_AUDRCTUNERSEL_MASK                             0x1
+#define RG_AUDRCTUNERSEL_MASK_SFT                         (0x1 << 13)
+
+/* MT6358_AUDENC_ANA_CON6 */
+#define RG_CLKSQ_EN_SFT                                   0
+#define RG_CLKSQ_EN_MASK                                  0x1
+#define RG_CLKSQ_EN_MASK_SFT                              (0x1 << 0)
+#define RG_CLKSQ_IN_SEL_TEST_SFT                          1
+#define RG_CLKSQ_IN_SEL_TEST_MASK                         0x1
+#define RG_CLKSQ_IN_SEL_TEST_MASK_SFT                     (0x1 << 1)
+#define RG_CM_REFGENSEL_SFT                               2
+#define RG_CM_REFGENSEL_MASK                              0x1
+#define RG_CM_REFGENSEL_MASK_SFT                          (0x1 << 2)
+#define RG_AUDSPARE_SFT                                   4
+#define RG_AUDSPARE_MASK                                  0xf
+#define RG_AUDSPARE_MASK_SFT                              (0xf << 4)
+#define RG_AUDENCSPARE_SFT                                8
+#define RG_AUDENCSPARE_MASK                               0x3f
+#define RG_AUDENCSPARE_MASK_SFT                           (0x3f << 8)
+
+/* MT6358_AUDENC_ANA_CON7 */
+#define RG_AUDENCSPARE2_SFT                               0
+#define RG_AUDENCSPARE2_MASK                              0xff
+#define RG_AUDENCSPARE2_MASK_SFT                          (0xff << 0)
+
+/* MT6358_AUDENC_ANA_CON8 */
+#define RG_AUDDIGMICEN_SFT                                0
+#define RG_AUDDIGMICEN_MASK                               0x1
+#define RG_AUDDIGMICEN_MASK_SFT                           (0x1 << 0)
+#define RG_AUDDIGMICBIAS_SFT                              1
+#define RG_AUDDIGMICBIAS_MASK                             0x3
+#define RG_AUDDIGMICBIAS_MASK_SFT                         (0x3 << 1)
+#define RG_DMICHPCLKEN_SFT                                3
+#define RG_DMICHPCLKEN_MASK                               0x1
+#define RG_DMICHPCLKEN_MASK_SFT                           (0x1 << 3)
+#define RG_AUDDIGMICPDUTY_SFT                             4
+#define RG_AUDDIGMICPDUTY_MASK                            0x3
+#define RG_AUDDIGMICPDUTY_MASK_SFT                        (0x3 << 4)
+#define RG_AUDDIGMICNDUTY_SFT                             6
+#define RG_AUDDIGMICNDUTY_MASK                            0x3
+#define RG_AUDDIGMICNDUTY_MASK_SFT                        (0x3 << 6)
+#define RG_DMICMONEN_SFT                                  8
+#define RG_DMICMONEN_MASK                                 0x1
+#define RG_DMICMONEN_MASK_SFT                             (0x1 << 8)
+#define RG_DMICMONSEL_SFT                                 9
+#define RG_DMICMONSEL_MASK                                0x7
+#define RG_DMICMONSEL_MASK_SFT                            (0x7 << 9)
+#define RG_AUDSPAREVMIC_SFT                               12
+#define RG_AUDSPAREVMIC_MASK                              0xf
+#define RG_AUDSPAREVMIC_MASK_SFT                          (0xf << 12)
+
+/* MT6358_AUDENC_ANA_CON9 */
+#define RG_AUDPWDBMICBIAS0_SFT                            0
+#define RG_AUDPWDBMICBIAS0_MASK                           0x1
+#define RG_AUDPWDBMICBIAS0_MASK_SFT                       (0x1 << 0)
+#define RG_AUDMICBIAS0BYPASSEN_SFT                        1
+#define RG_AUDMICBIAS0BYPASSEN_MASK                       0x1
+#define RG_AUDMICBIAS0BYPASSEN_MASK_SFT                   (0x1 << 1)
+#define RG_AUDMICBIAS0LOWPEN_SFT                          2
+#define RG_AUDMICBIAS0LOWPEN_MASK                         0x1
+#define RG_AUDMICBIAS0LOWPEN_MASK_SFT                     (0x1 << 2)
+#define RG_AUDMICBIAS0VREF_SFT                            4
+#define RG_AUDMICBIAS0VREF_MASK                           0x7
+#define RG_AUDMICBIAS0VREF_MASK_SFT                       (0x7 << 4)
+#define RG_AUDMICBIAS0DCSW0P1EN_SFT                       8
+#define RG_AUDMICBIAS0DCSW0P1EN_MASK                      0x1
+#define RG_AUDMICBIAS0DCSW0P1EN_MASK_SFT                  (0x1 << 8)
+#define RG_AUDMICBIAS0DCSW0P2EN_SFT                       9
+#define RG_AUDMICBIAS0DCSW0P2EN_MASK                      0x1
+#define RG_AUDMICBIAS0DCSW0P2EN_MASK_SFT                  (0x1 << 9)
+#define RG_AUDMICBIAS0DCSW0NEN_SFT                        10
+#define RG_AUDMICBIAS0DCSW0NEN_MASK                       0x1
+#define RG_AUDMICBIAS0DCSW0NEN_MASK_SFT                   (0x1 << 10)
+#define RG_AUDMICBIAS0DCSW2P1EN_SFT                       12
+#define RG_AUDMICBIAS0DCSW2P1EN_MASK                      0x1
+#define RG_AUDMICBIAS0DCSW2P1EN_MASK_SFT                  (0x1 << 12)
+#define RG_AUDMICBIAS0DCSW2P2EN_SFT                       13
+#define RG_AUDMICBIAS0DCSW2P2EN_MASK                      0x1
+#define RG_AUDMICBIAS0DCSW2P2EN_MASK_SFT                  (0x1 << 13)
+#define RG_AUDMICBIAS0DCSW2NEN_SFT                        14
+#define RG_AUDMICBIAS0DCSW2NEN_MASK                       0x1
+#define RG_AUDMICBIAS0DCSW2NEN_MASK_SFT                   (0x1 << 14)
+
+/* MT6358_AUDENC_ANA_CON10 */
+#define RG_AUDPWDBMICBIAS1_SFT                            0
+#define RG_AUDPWDBMICBIAS1_MASK                           0x1
+#define RG_AUDPWDBMICBIAS1_MASK_SFT                       (0x1 << 0)
+#define RG_AUDMICBIAS1BYPASSEN_SFT                        1
+#define RG_AUDMICBIAS1BYPASSEN_MASK                       0x1
+#define RG_AUDMICBIAS1BYPASSEN_MASK_SFT                   (0x1 << 1)
+#define RG_AUDMICBIAS1LOWPEN_SFT                          2
+#define RG_AUDMICBIAS1LOWPEN_MASK                         0x1
+#define RG_AUDMICBIAS1LOWPEN_MASK_SFT                     (0x1 << 2)
+#define RG_AUDMICBIAS1VREF_SFT                            4
+#define RG_AUDMICBIAS1VREF_MASK                           0x7
+#define RG_AUDMICBIAS1VREF_MASK_SFT                       (0x7 << 4)
+#define RG_AUDMICBIAS1DCSW1PEN_SFT                        8
+#define RG_AUDMICBIAS1DCSW1PEN_MASK                       0x1
+#define RG_AUDMICBIAS1DCSW1PEN_MASK_SFT                   (0x1 << 8)
+#define RG_AUDMICBIAS1DCSW1NEN_SFT                        9
+#define RG_AUDMICBIAS1DCSW1NEN_MASK                       0x1
+#define RG_AUDMICBIAS1DCSW1NEN_MASK_SFT                   (0x1 << 9)
+#define RG_BANDGAPGEN_SFT                                 12
+#define RG_BANDGAPGEN_MASK                                0x1
+#define RG_BANDGAPGEN_MASK_SFT                            (0x1 << 12)
+#define RG_MTEST_EN_SFT                                   13
+#define RG_MTEST_EN_MASK                                  0x1
+#define RG_MTEST_EN_MASK_SFT                              (0x1 << 13)
+#define RG_MTEST_SEL_SFT                                  14
+#define RG_MTEST_SEL_MASK                                 0x1
+#define RG_MTEST_SEL_MASK_SFT                             (0x1 << 14)
+#define RG_MTEST_CURRENT_SFT                              15
+#define RG_MTEST_CURRENT_MASK                             0x1
+#define RG_MTEST_CURRENT_MASK_SFT                         (0x1 << 15)
+
+/* MT6358_AUDENC_ANA_CON11 */
+#define RG_AUDACCDETMICBIAS0PULLLOW_SFT                   0
+#define RG_AUDACCDETMICBIAS0PULLLOW_MASK                  0x1
+#define RG_AUDACCDETMICBIAS0PULLLOW_MASK_SFT              (0x1 << 0)
+#define RG_AUDACCDETMICBIAS1PULLLOW_SFT                   1
+#define RG_AUDACCDETMICBIAS1PULLLOW_MASK                  0x1
+#define RG_AUDACCDETMICBIAS1PULLLOW_MASK_SFT              (0x1 << 1)
+#define RG_AUDACCDETVIN1PULLLOW_SFT                       2
+#define RG_AUDACCDETVIN1PULLLOW_MASK                      0x1
+#define RG_AUDACCDETVIN1PULLLOW_MASK_SFT                  (0x1 << 2)
+#define RG_AUDACCDETVTHACAL_SFT                           4
+#define RG_AUDACCDETVTHACAL_MASK                          0x1
+#define RG_AUDACCDETVTHACAL_MASK_SFT                      (0x1 << 4)
+#define RG_AUDACCDETVTHBCAL_SFT                           5
+#define RG_AUDACCDETVTHBCAL_MASK                          0x1
+#define RG_AUDACCDETVTHBCAL_MASK_SFT                      (0x1 << 5)
+#define RG_AUDACCDETTVDET_SFT                             6
+#define RG_AUDACCDETTVDET_MASK                            0x1
+#define RG_AUDACCDETTVDET_MASK_SFT                        (0x1 << 6)
+#define RG_ACCDETSEL_SFT                                  7
+#define RG_ACCDETSEL_MASK                                 0x1
+#define RG_ACCDETSEL_MASK_SFT                             (0x1 << 7)
+#define RG_SWBUFMODSEL_SFT                                8
+#define RG_SWBUFMODSEL_MASK                               0x1
+#define RG_SWBUFMODSEL_MASK_SFT                           (0x1 << 8)
+#define RG_SWBUFSWEN_SFT                                  9
+#define RG_SWBUFSWEN_MASK                                 0x1
+#define RG_SWBUFSWEN_MASK_SFT                             (0x1 << 9)
+#define RG_EINTCOMPVTH_SFT                                10
+#define RG_EINTCOMPVTH_MASK                               0x1
+#define RG_EINTCOMPVTH_MASK_SFT                           (0x1 << 10)
+#define RG_EINTCONFIGACCDET_SFT                           11
+#define RG_EINTCONFIGACCDET_MASK                          0x1
+#define RG_EINTCONFIGACCDET_MASK_SFT                      (0x1 << 11)
+#define RG_EINTHIRENB_SFT                                 12
+#define RG_EINTHIRENB_MASK                                0x1
+#define RG_EINTHIRENB_MASK_SFT                            (0x1 << 12)
+#define RG_ACCDET2AUXRESBYPASS_SFT                        13
+#define RG_ACCDET2AUXRESBYPASS_MASK                       0x1
+#define RG_ACCDET2AUXRESBYPASS_MASK_SFT                   (0x1 << 13)
+#define RG_ACCDET2AUXBUFFERBYPASS_SFT                     14
+#define RG_ACCDET2AUXBUFFERBYPASS_MASK                    0x1
+#define RG_ACCDET2AUXBUFFERBYPASS_MASK_SFT                (0x1 << 14)
+#define RG_ACCDET2AUXSWEN_SFT                             15
+#define RG_ACCDET2AUXSWEN_MASK                            0x1
+#define RG_ACCDET2AUXSWEN_MASK_SFT                        (0x1 << 15)
+
+/* MT6358_AUDENC_ANA_CON12 */
+#define RGS_AUDRCTUNELREAD_SFT                            0
+#define RGS_AUDRCTUNELREAD_MASK                           0x1f
+#define RGS_AUDRCTUNELREAD_MASK_SFT                       (0x1f << 0)
+#define RGS_AUDRCTUNERREAD_SFT                            8
+#define RGS_AUDRCTUNERREAD_MASK                           0x1f
+#define RGS_AUDRCTUNERREAD_MASK_SFT                       (0x1f << 8)
+
+/* MT6358_AUDDEC_DSN_ID */
+#define AUDDEC_ANA_ID_SFT                                 0
+#define AUDDEC_ANA_ID_MASK                                0xff
+#define AUDDEC_ANA_ID_MASK_SFT                            (0xff << 0)
+#define AUDDEC_DIG_ID_SFT                                 8
+#define AUDDEC_DIG_ID_MASK                                0xff
+#define AUDDEC_DIG_ID_MASK_SFT                            (0xff << 8)
+
+/* MT6358_AUDDEC_DSN_REV0 */
+#define AUDDEC_ANA_MINOR_REV_SFT                          0
+#define AUDDEC_ANA_MINOR_REV_MASK                         0xf
+#define AUDDEC_ANA_MINOR_REV_MASK_SFT                     (0xf << 0)
+#define AUDDEC_ANA_MAJOR_REV_SFT                          4
+#define AUDDEC_ANA_MAJOR_REV_MASK                         0xf
+#define AUDDEC_ANA_MAJOR_REV_MASK_SFT                     (0xf << 4)
+#define AUDDEC_DIG_MINOR_REV_SFT                          8
+#define AUDDEC_DIG_MINOR_REV_MASK                         0xf
+#define AUDDEC_DIG_MINOR_REV_MASK_SFT                     (0xf << 8)
+#define AUDDEC_DIG_MAJOR_REV_SFT                          12
+#define AUDDEC_DIG_MAJOR_REV_MASK                         0xf
+#define AUDDEC_DIG_MAJOR_REV_MASK_SFT                     (0xf << 12)
+
+/* MT6358_AUDDEC_DSN_DBI */
+#define AUDDEC_DSN_CBS_SFT                                0
+#define AUDDEC_DSN_CBS_MASK                               0x3
+#define AUDDEC_DSN_CBS_MASK_SFT                           (0x3 << 0)
+#define AUDDEC_DSN_BIX_SFT                                2
+#define AUDDEC_DSN_BIX_MASK                               0x3
+#define AUDDEC_DSN_BIX_MASK_SFT                           (0x3 << 2)
+#define AUDDEC_DSN_ESP_SFT                                8
+#define AUDDEC_DSN_ESP_MASK                               0xff
+#define AUDDEC_DSN_ESP_MASK_SFT                           (0xff << 8)
+
+/* MT6358_AUDDEC_DSN_FPI */
+#define AUDDEC_DSN_FPI_SFT                                0
+#define AUDDEC_DSN_FPI_MASK                               0xff
+#define AUDDEC_DSN_FPI_MASK_SFT                           (0xff << 0)
+
+/* MT6358_AUDDEC_ANA_CON0 */
+#define RG_AUDDACLPWRUP_VAUDP15_SFT                       0
+#define RG_AUDDACLPWRUP_VAUDP15_MASK                      0x1
+#define RG_AUDDACLPWRUP_VAUDP15_MASK_SFT                  (0x1 << 0)
+#define RG_AUDDACRPWRUP_VAUDP15_SFT                       1
+#define RG_AUDDACRPWRUP_VAUDP15_MASK                      0x1
+#define RG_AUDDACRPWRUP_VAUDP15_MASK_SFT                  (0x1 << 1)
+#define RG_AUD_DAC_PWR_UP_VA28_SFT                        2
+#define RG_AUD_DAC_PWR_UP_VA28_MASK                       0x1
+#define RG_AUD_DAC_PWR_UP_VA28_MASK_SFT                   (0x1 << 2)
+#define RG_AUD_DAC_PWL_UP_VA28_SFT                        3
+#define RG_AUD_DAC_PWL_UP_VA28_MASK                       0x1
+#define RG_AUD_DAC_PWL_UP_VA28_MASK_SFT                   (0x1 << 3)
+#define RG_AUDHPLPWRUP_VAUDP15_SFT                        4
+#define RG_AUDHPLPWRUP_VAUDP15_MASK                       0x1
+#define RG_AUDHPLPWRUP_VAUDP15_MASK_SFT                   (0x1 << 4)
+#define RG_AUDHPRPWRUP_VAUDP15_SFT                        5
+#define RG_AUDHPRPWRUP_VAUDP15_MASK                       0x1
+#define RG_AUDHPRPWRUP_VAUDP15_MASK_SFT                   (0x1 << 5)
+#define RG_AUDHPLPWRUP_IBIAS_VAUDP15_SFT                  6
+#define RG_AUDHPLPWRUP_IBIAS_VAUDP15_MASK                 0x1
+#define RG_AUDHPLPWRUP_IBIAS_VAUDP15_MASK_SFT             (0x1 << 6)
+#define RG_AUDHPRPWRUP_IBIAS_VAUDP15_SFT                  7
+#define RG_AUDHPRPWRUP_IBIAS_VAUDP15_MASK                 0x1
+#define RG_AUDHPRPWRUP_IBIAS_VAUDP15_MASK_SFT             (0x1 << 7)
+#define RG_AUDHPLMUXINPUTSEL_VAUDP15_SFT                  8
+#define RG_AUDHPLMUXINPUTSEL_VAUDP15_MASK                 0x3
+#define RG_AUDHPLMUXINPUTSEL_VAUDP15_MASK_SFT             (0x3 << 8)
+#define RG_AUDHPRMUXINPUTSEL_VAUDP15_SFT                  10
+#define RG_AUDHPRMUXINPUTSEL_VAUDP15_MASK                 0x3
+#define RG_AUDHPRMUXINPUTSEL_VAUDP15_MASK_SFT             (0x3 << 10)
+#define RG_AUDHPLSCDISABLE_VAUDP15_SFT                    12
+#define RG_AUDHPLSCDISABLE_VAUDP15_MASK                   0x1
+#define RG_AUDHPLSCDISABLE_VAUDP15_MASK_SFT               (0x1 << 12)
+#define RG_AUDHPRSCDISABLE_VAUDP15_SFT                    13
+#define RG_AUDHPRSCDISABLE_VAUDP15_MASK                   0x1
+#define RG_AUDHPRSCDISABLE_VAUDP15_MASK_SFT               (0x1 << 13)
+#define RG_AUDHPLBSCCURRENT_VAUDP15_SFT                   14
+#define RG_AUDHPLBSCCURRENT_VAUDP15_MASK                  0x1
+#define RG_AUDHPLBSCCURRENT_VAUDP15_MASK_SFT              (0x1 << 14)
+#define RG_AUDHPRBSCCURRENT_VAUDP15_SFT                   15
+#define RG_AUDHPRBSCCURRENT_VAUDP15_MASK                  0x1
+#define RG_AUDHPRBSCCURRENT_VAUDP15_MASK_SFT              (0x1 << 15)
+
+/* MT6358_AUDDEC_ANA_CON1 */
+#define RG_AUDHPLOUTPWRUP_VAUDP15_SFT                     0
+#define RG_AUDHPLOUTPWRUP_VAUDP15_MASK                    0x1
+#define RG_AUDHPLOUTPWRUP_VAUDP15_MASK_SFT                (0x1 << 0)
+#define RG_AUDHPROUTPWRUP_VAUDP15_SFT                     1
+#define RG_AUDHPROUTPWRUP_VAUDP15_MASK                    0x1
+#define RG_AUDHPROUTPWRUP_VAUDP15_MASK_SFT                (0x1 << 1)
+#define RG_AUDHPLOUTAUXPWRUP_VAUDP15_SFT                  2
+#define RG_AUDHPLOUTAUXPWRUP_VAUDP15_MASK                 0x1
+#define RG_AUDHPLOUTAUXPWRUP_VAUDP15_MASK_SFT             (0x1 << 2)
+#define RG_AUDHPROUTAUXPWRUP_VAUDP15_SFT                  3
+#define RG_AUDHPROUTAUXPWRUP_VAUDP15_MASK                 0x1
+#define RG_AUDHPROUTAUXPWRUP_VAUDP15_MASK_SFT             (0x1 << 3)
+#define RG_HPLAUXFBRSW_EN_VAUDP15_SFT                     4
+#define RG_HPLAUXFBRSW_EN_VAUDP15_MASK                    0x1
+#define RG_HPLAUXFBRSW_EN_VAUDP15_MASK_SFT                (0x1 << 4)
+#define RG_HPRAUXFBRSW_EN_VAUDP15_SFT                     5
+#define RG_HPRAUXFBRSW_EN_VAUDP15_MASK                    0x1
+#define RG_HPRAUXFBRSW_EN_VAUDP15_MASK_SFT                (0x1 << 5)
+#define RG_HPLSHORT2HPLAUX_EN_VAUDP15_SFT                 6
+#define RG_HPLSHORT2HPLAUX_EN_VAUDP15_MASK                0x1
+#define RG_HPLSHORT2HPLAUX_EN_VAUDP15_MASK_SFT            (0x1 << 6)
+#define RG_HPRSHORT2HPRAUX_EN_VAUDP15_SFT                 7
+#define RG_HPRSHORT2HPRAUX_EN_VAUDP15_MASK                0x1
+#define RG_HPRSHORT2HPRAUX_EN_VAUDP15_MASK_SFT            (0x1 << 7)
+#define RG_HPLOUTSTGCTRL_VAUDP15_SFT                      8
+#define RG_HPLOUTSTGCTRL_VAUDP15_MASK                     0x7
+#define RG_HPLOUTSTGCTRL_VAUDP15_MASK_SFT                 (0x7 << 8)
+#define RG_HPROUTSTGCTRL_VAUDP15_SFT                      11
+#define RG_HPROUTSTGCTRL_VAUDP15_MASK                     0x7
+#define RG_HPROUTSTGCTRL_VAUDP15_MASK_SFT                 (0x7 << 11)
+
+/* MT6358_AUDDEC_ANA_CON2 */
+#define RG_HPLOUTPUTSTBENH_VAUDP15_SFT                    0
+#define RG_HPLOUTPUTSTBENH_VAUDP15_MASK                   0x7
+#define RG_HPLOUTPUTSTBENH_VAUDP15_MASK_SFT               (0x7 << 0)
+#define RG_HPROUTPUTSTBENH_VAUDP15_SFT                    4
+#define RG_HPROUTPUTSTBENH_VAUDP15_MASK                   0x7
+#define RG_HPROUTPUTSTBENH_VAUDP15_MASK_SFT               (0x7 << 4)
+#define RG_AUDHPSTARTUP_VAUDP15_SFT                       13
+#define RG_AUDHPSTARTUP_VAUDP15_MASK                      0x1
+#define RG_AUDHPSTARTUP_VAUDP15_MASK_SFT                  (0x1 << 13)
+#define RG_AUDREFN_DERES_EN_VAUDP15_SFT                   14
+#define RG_AUDREFN_DERES_EN_VAUDP15_MASK                  0x1
+#define RG_AUDREFN_DERES_EN_VAUDP15_MASK_SFT              (0x1 << 14)
+#define RG_HPPSHORT2VCM_VAUDP15_SFT                       15
+#define RG_HPPSHORT2VCM_VAUDP15_MASK                      0x1
+#define RG_HPPSHORT2VCM_VAUDP15_MASK_SFT                  (0x1 << 15)
+
+/* MT6358_AUDDEC_ANA_CON3 */
+#define RG_HPINPUTSTBENH_VAUDP15_SFT                      13
+#define RG_HPINPUTSTBENH_VAUDP15_MASK                     0x1
+#define RG_HPINPUTSTBENH_VAUDP15_MASK_SFT                 (0x1 << 13)
+#define RG_HPINPUTRESET0_VAUDP15_SFT                      14
+#define RG_HPINPUTRESET0_VAUDP15_MASK                     0x1
+#define RG_HPINPUTRESET0_VAUDP15_MASK_SFT                 (0x1 << 14)
+#define RG_HPOUTPUTRESET0_VAUDP15_SFT                     15
+#define RG_HPOUTPUTRESET0_VAUDP15_MASK                    0x1
+#define RG_HPOUTPUTRESET0_VAUDP15_MASK_SFT                (0x1 << 15)
+
+/* MT6358_AUDDEC_ANA_CON4 */
+#define RG_ABIDEC_RSVD0_VAUDP28_SFT                       0
+#define RG_ABIDEC_RSVD0_VAUDP28_MASK                      0xff
+#define RG_ABIDEC_RSVD0_VAUDP28_MASK_SFT                  (0xff << 0)
+
+/* MT6358_AUDDEC_ANA_CON5 */
+#define RG_AUDHPDECMGAINADJ_VAUDP15_SFT                   0
+#define RG_AUDHPDECMGAINADJ_VAUDP15_MASK                  0x7
+#define RG_AUDHPDECMGAINADJ_VAUDP15_MASK_SFT              (0x7 << 0)
+#define RG_AUDHPDEDMGAINADJ_VAUDP15_SFT                   4
+#define RG_AUDHPDEDMGAINADJ_VAUDP15_MASK                  0x7
+#define RG_AUDHPDEDMGAINADJ_VAUDP15_MASK_SFT              (0x7 << 4)
+
+/* MT6358_AUDDEC_ANA_CON6 */
+#define RG_AUDHSPWRUP_VAUDP15_SFT                         0
+#define RG_AUDHSPWRUP_VAUDP15_MASK                        0x1
+#define RG_AUDHSPWRUP_VAUDP15_MASK_SFT                    (0x1 << 0)
+#define RG_AUDHSPWRUP_IBIAS_VAUDP15_SFT                   1
+#define RG_AUDHSPWRUP_IBIAS_VAUDP15_MASK                  0x1
+#define RG_AUDHSPWRUP_IBIAS_VAUDP15_MASK_SFT              (0x1 << 1)
+#define RG_AUDHSMUXINPUTSEL_VAUDP15_SFT                   2
+#define RG_AUDHSMUXINPUTSEL_VAUDP15_MASK                  0x3
+#define RG_AUDHSMUXINPUTSEL_VAUDP15_MASK_SFT              (0x3 << 2)
+#define RG_AUDHSSCDISABLE_VAUDP15_SFT                     4
+#define RG_AUDHSSCDISABLE_VAUDP15_MASK                    0x1
+#define RG_AUDHSSCDISABLE_VAUDP15_MASK_SFT                (0x1 << 4)
+#define RG_AUDHSBSCCURRENT_VAUDP15_SFT                    5
+#define RG_AUDHSBSCCURRENT_VAUDP15_MASK                   0x1
+#define RG_AUDHSBSCCURRENT_VAUDP15_MASK_SFT               (0x1 << 5)
+#define RG_AUDHSSTARTUP_VAUDP15_SFT                       6
+#define RG_AUDHSSTARTUP_VAUDP15_MASK                      0x1
+#define RG_AUDHSSTARTUP_VAUDP15_MASK_SFT                  (0x1 << 6)
+#define RG_HSOUTPUTSTBENH_VAUDP15_SFT                     7
+#define RG_HSOUTPUTSTBENH_VAUDP15_MASK                    0x1
+#define RG_HSOUTPUTSTBENH_VAUDP15_MASK_SFT                (0x1 << 7)
+#define RG_HSINPUTSTBENH_VAUDP15_SFT                      8
+#define RG_HSINPUTSTBENH_VAUDP15_MASK                     0x1
+#define RG_HSINPUTSTBENH_VAUDP15_MASK_SFT                 (0x1 << 8)
+#define RG_HSINPUTRESET0_VAUDP15_SFT                      9
+#define RG_HSINPUTRESET0_VAUDP15_MASK                     0x1
+#define RG_HSINPUTRESET0_VAUDP15_MASK_SFT                 (0x1 << 9)
+#define RG_HSOUTPUTRESET0_VAUDP15_SFT                     10
+#define RG_HSOUTPUTRESET0_VAUDP15_MASK                    0x1
+#define RG_HSOUTPUTRESET0_VAUDP15_MASK_SFT                (0x1 << 10)
+#define RG_HSOUT_SHORTVCM_VAUDP15_SFT                     11
+#define RG_HSOUT_SHORTVCM_VAUDP15_MASK                    0x1
+#define RG_HSOUT_SHORTVCM_VAUDP15_MASK_SFT                (0x1 << 11)
+
+/* MT6358_AUDDEC_ANA_CON7 */
+#define RG_AUDLOLPWRUP_VAUDP15_SFT                        0
+#define RG_AUDLOLPWRUP_VAUDP15_MASK                       0x1
+#define RG_AUDLOLPWRUP_VAUDP15_MASK_SFT                   (0x1 << 0)
+#define RG_AUDLOLPWRUP_IBIAS_VAUDP15_SFT                  1
+#define RG_AUDLOLPWRUP_IBIAS_VAUDP15_MASK                 0x1
+#define RG_AUDLOLPWRUP_IBIAS_VAUDP15_MASK_SFT             (0x1 << 1)
+#define RG_AUDLOLMUXINPUTSEL_VAUDP15_SFT                  2
+#define RG_AUDLOLMUXINPUTSEL_VAUDP15_MASK                 0x3
+#define RG_AUDLOLMUXINPUTSEL_VAUDP15_MASK_SFT             (0x3 << 2)
+#define RG_AUDLOLSCDISABLE_VAUDP15_SFT                    4
+#define RG_AUDLOLSCDISABLE_VAUDP15_MASK                   0x1
+#define RG_AUDLOLSCDISABLE_VAUDP15_MASK_SFT               (0x1 << 4)
+#define RG_AUDLOLBSCCURRENT_VAUDP15_SFT                   5
+#define RG_AUDLOLBSCCURRENT_VAUDP15_MASK                  0x1
+#define RG_AUDLOLBSCCURRENT_VAUDP15_MASK_SFT              (0x1 << 5)
+#define RG_AUDLOSTARTUP_VAUDP15_SFT                       6
+#define RG_AUDLOSTARTUP_VAUDP15_MASK                      0x1
+#define RG_AUDLOSTARTUP_VAUDP15_MASK_SFT                  (0x1 << 6)
+#define RG_LOINPUTSTBENH_VAUDP15_SFT                      7
+#define RG_LOINPUTSTBENH_VAUDP15_MASK                     0x1
+#define RG_LOINPUTSTBENH_VAUDP15_MASK_SFT                 (0x1 << 7)
+#define RG_LOOUTPUTSTBENH_VAUDP15_SFT                     8
+#define RG_LOOUTPUTSTBENH_VAUDP15_MASK                    0x1
+#define RG_LOOUTPUTSTBENH_VAUDP15_MASK_SFT                (0x1 << 8)
+#define RG_LOINPUTRESET0_VAUDP15_SFT                      9
+#define RG_LOINPUTRESET0_VAUDP15_MASK                     0x1
+#define RG_LOINPUTRESET0_VAUDP15_MASK_SFT                 (0x1 << 9)
+#define RG_LOOUTPUTRESET0_VAUDP15_SFT                     10
+#define RG_LOOUTPUTRESET0_VAUDP15_MASK                    0x1
+#define RG_LOOUTPUTRESET0_VAUDP15_MASK_SFT                (0x1 << 10)
+#define RG_LOOUT_SHORTVCM_VAUDP15_SFT                     11
+#define RG_LOOUT_SHORTVCM_VAUDP15_MASK                    0x1
+#define RG_LOOUT_SHORTVCM_VAUDP15_MASK_SFT                (0x1 << 11)
+
+/* MT6358_AUDDEC_ANA_CON8 */
+#define RG_AUDTRIMBUF_INPUTMUXSEL_VAUDP15_SFT             0
+#define RG_AUDTRIMBUF_INPUTMUXSEL_VAUDP15_MASK            0xf
+#define RG_AUDTRIMBUF_INPUTMUXSEL_VAUDP15_MASK_SFT        (0xf << 0)
+#define RG_AUDTRIMBUF_GAINSEL_VAUDP15_SFT                 4
+#define RG_AUDTRIMBUF_GAINSEL_VAUDP15_MASK                0x3
+#define RG_AUDTRIMBUF_GAINSEL_VAUDP15_MASK_SFT            (0x3 << 4)
+#define RG_AUDTRIMBUF_EN_VAUDP15_SFT                      6
+#define RG_AUDTRIMBUF_EN_VAUDP15_MASK                     0x1
+#define RG_AUDTRIMBUF_EN_VAUDP15_MASK_SFT                 (0x1 << 6)
+#define RG_AUDHPSPKDET_INPUTMUXSEL_VAUDP15_SFT            8
+#define RG_AUDHPSPKDET_INPUTMUXSEL_VAUDP15_MASK           0x3
+#define RG_AUDHPSPKDET_INPUTMUXSEL_VAUDP15_MASK_SFT       (0x3 << 8)
+#define RG_AUDHPSPKDET_OUTPUTMUXSEL_VAUDP15_SFT           10
+#define RG_AUDHPSPKDET_OUTPUTMUXSEL_VAUDP15_MASK          0x3
+#define RG_AUDHPSPKDET_OUTPUTMUXSEL_VAUDP15_MASK_SFT      (0x3 << 10)
+#define RG_AUDHPSPKDET_EN_VAUDP15_SFT                     12
+#define RG_AUDHPSPKDET_EN_VAUDP15_MASK                    0x1
+#define RG_AUDHPSPKDET_EN_VAUDP15_MASK_SFT                (0x1 << 12)
+
+/* MT6358_AUDDEC_ANA_CON9 */
+#define RG_ABIDEC_RSVD0_VA28_SFT                          0
+#define RG_ABIDEC_RSVD0_VA28_MASK                         0xff
+#define RG_ABIDEC_RSVD0_VA28_MASK_SFT                     (0xff << 0)
+#define RG_ABIDEC_RSVD0_VAUDP15_SFT                       8
+#define RG_ABIDEC_RSVD0_VAUDP15_MASK                      0xff
+#define RG_ABIDEC_RSVD0_VAUDP15_MASK_SFT                  (0xff << 8)
+
+/* MT6358_AUDDEC_ANA_CON10 */
+#define RG_ABIDEC_RSVD1_VAUDP15_SFT                       0
+#define RG_ABIDEC_RSVD1_VAUDP15_MASK                      0xff
+#define RG_ABIDEC_RSVD1_VAUDP15_MASK_SFT                  (0xff << 0)
+#define RG_ABIDEC_RSVD2_VAUDP15_SFT                       8
+#define RG_ABIDEC_RSVD2_VAUDP15_MASK                      0xff
+#define RG_ABIDEC_RSVD2_VAUDP15_MASK_SFT                  (0xff << 8)
+
+/* MT6358_AUDDEC_ANA_CON11 */
+#define RG_AUDZCDMUXSEL_VAUDP15_SFT                       0
+#define RG_AUDZCDMUXSEL_VAUDP15_MASK                      0x7
+#define RG_AUDZCDMUXSEL_VAUDP15_MASK_SFT                  (0x7 << 0)
+#define RG_AUDZCDCLKSEL_VAUDP15_SFT                       3
+#define RG_AUDZCDCLKSEL_VAUDP15_MASK                      0x1
+#define RG_AUDZCDCLKSEL_VAUDP15_MASK_SFT                  (0x1 << 3)
+#define RG_AUDBIASADJ_0_VAUDP15_SFT                       7
+#define RG_AUDBIASADJ_0_VAUDP15_MASK                      0x1ff
+#define RG_AUDBIASADJ_0_VAUDP15_MASK_SFT                  (0x1ff << 7)
+
+/* MT6358_AUDDEC_ANA_CON12 */
+#define RG_AUDBIASADJ_1_VAUDP15_SFT                       0
+#define RG_AUDBIASADJ_1_VAUDP15_MASK                      0xff
+#define RG_AUDBIASADJ_1_VAUDP15_MASK_SFT                  (0xff << 0)
+#define RG_AUDIBIASPWRDN_VAUDP15_SFT                      8
+#define RG_AUDIBIASPWRDN_VAUDP15_MASK                     0x1
+#define RG_AUDIBIASPWRDN_VAUDP15_MASK_SFT                 (0x1 << 8)
+
+/* MT6358_AUDDEC_ANA_CON13 */
+#define RG_RSTB_DECODER_VA28_SFT                          0
+#define RG_RSTB_DECODER_VA28_MASK                         0x1
+#define RG_RSTB_DECODER_VA28_MASK_SFT                     (0x1 << 0)
+#define RG_SEL_DECODER_96K_VA28_SFT                       1
+#define RG_SEL_DECODER_96K_VA28_MASK                      0x1
+#define RG_SEL_DECODER_96K_VA28_MASK_SFT                  (0x1 << 1)
+#define RG_SEL_DELAY_VCORE_SFT                            2
+#define RG_SEL_DELAY_VCORE_MASK                           0x1
+#define RG_SEL_DELAY_VCORE_MASK_SFT                       (0x1 << 2)
+#define RG_AUDGLB_PWRDN_VA28_SFT                          4
+#define RG_AUDGLB_PWRDN_VA28_MASK                         0x1
+#define RG_AUDGLB_PWRDN_VA28_MASK_SFT                     (0x1 << 4)
+#define RG_RSTB_ENCODER_VA28_SFT                          5
+#define RG_RSTB_ENCODER_VA28_MASK                         0x1
+#define RG_RSTB_ENCODER_VA28_MASK_SFT                     (0x1 << 5)
+#define RG_SEL_ENCODER_96K_VA28_SFT                       6
+#define RG_SEL_ENCODER_96K_VA28_MASK                      0x1
+#define RG_SEL_ENCODER_96K_VA28_MASK_SFT                  (0x1 << 6)
+
+/* MT6358_AUDDEC_ANA_CON14 */
+#define RG_HCLDO_EN_VA18_SFT                              0
+#define RG_HCLDO_EN_VA18_MASK                             0x1
+#define RG_HCLDO_EN_VA18_MASK_SFT                         (0x1 << 0)
+#define RG_HCLDO_PDDIS_EN_VA18_SFT                        1
+#define RG_HCLDO_PDDIS_EN_VA18_MASK                       0x1
+#define RG_HCLDO_PDDIS_EN_VA18_MASK_SFT                   (0x1 << 1)
+#define RG_HCLDO_REMOTE_SENSE_VA18_SFT                    2
+#define RG_HCLDO_REMOTE_SENSE_VA18_MASK                   0x1
+#define RG_HCLDO_REMOTE_SENSE_VA18_MASK_SFT               (0x1 << 2)
+#define RG_LCLDO_EN_VA18_SFT                              4
+#define RG_LCLDO_EN_VA18_MASK                             0x1
+#define RG_LCLDO_EN_VA18_MASK_SFT                         (0x1 << 4)
+#define RG_LCLDO_PDDIS_EN_VA18_SFT                        5
+#define RG_LCLDO_PDDIS_EN_VA18_MASK                       0x1
+#define RG_LCLDO_PDDIS_EN_VA18_MASK_SFT                   (0x1 << 5)
+#define RG_LCLDO_REMOTE_SENSE_VA18_SFT                    6
+#define RG_LCLDO_REMOTE_SENSE_VA18_MASK                   0x1
+#define RG_LCLDO_REMOTE_SENSE_VA18_MASK_SFT               (0x1 << 6)
+#define RG_LCLDO_ENC_EN_VA28_SFT                          8
+#define RG_LCLDO_ENC_EN_VA28_MASK                         0x1
+#define RG_LCLDO_ENC_EN_VA28_MASK_SFT                     (0x1 << 8)
+#define RG_LCLDO_ENC_PDDIS_EN_VA28_SFT                    9
+#define RG_LCLDO_ENC_PDDIS_EN_VA28_MASK                   0x1
+#define RG_LCLDO_ENC_PDDIS_EN_VA28_MASK_SFT               (0x1 << 9)
+#define RG_LCLDO_ENC_REMOTE_SENSE_VA28_SFT                10
+#define RG_LCLDO_ENC_REMOTE_SENSE_VA28_MASK               0x1
+#define RG_LCLDO_ENC_REMOTE_SENSE_VA28_MASK_SFT           (0x1 << 10)
+#define RG_VA33REFGEN_EN_VA18_SFT                         12
+#define RG_VA33REFGEN_EN_VA18_MASK                        0x1
+#define RG_VA33REFGEN_EN_VA18_MASK_SFT                    (0x1 << 12)
+#define RG_VA28REFGEN_EN_VA28_SFT                         13
+#define RG_VA28REFGEN_EN_VA28_MASK                        0x1
+#define RG_VA28REFGEN_EN_VA28_MASK_SFT                    (0x1 << 13)
+#define RG_HCLDO_VOSEL_VA18_SFT                           14
+#define RG_HCLDO_VOSEL_VA18_MASK                          0x1
+#define RG_HCLDO_VOSEL_VA18_MASK_SFT                      (0x1 << 14)
+#define RG_LCLDO_VOSEL_VA18_SFT                           15
+#define RG_LCLDO_VOSEL_VA18_MASK                          0x1
+#define RG_LCLDO_VOSEL_VA18_MASK_SFT                      (0x1 << 15)
+
+/* MT6358_AUDDEC_ANA_CON15 */
+#define RG_NVREG_EN_VAUDP15_SFT                           0
+#define RG_NVREG_EN_VAUDP15_MASK                          0x1
+#define RG_NVREG_EN_VAUDP15_MASK_SFT                      (0x1 << 0)
+#define RG_NVREG_PULL0V_VAUDP15_SFT                       1
+#define RG_NVREG_PULL0V_VAUDP15_MASK                      0x1
+#define RG_NVREG_PULL0V_VAUDP15_MASK_SFT                  (0x1 << 1)
+#define RG_AUDPMU_RSD0_VAUDP15_SFT                        4
+#define RG_AUDPMU_RSD0_VAUDP15_MASK                       0xf
+#define RG_AUDPMU_RSD0_VAUDP15_MASK_SFT                   (0xf << 4)
+#define RG_AUDPMU_RSD0_VA18_SFT                           8
+#define RG_AUDPMU_RSD0_VA18_MASK                          0xf
+#define RG_AUDPMU_RSD0_VA18_MASK_SFT                      (0xf << 8)
+#define RG_AUDPMU_RSD0_VA28_SFT                           12
+#define RG_AUDPMU_RSD0_VA28_MASK                          0xf
+#define RG_AUDPMU_RSD0_VA28_MASK_SFT                      (0xf << 12)
+
+/* MT6358_ZCD_CON0 */
+#define RG_AUDZCDENABLE_SFT                               0
+#define RG_AUDZCDENABLE_MASK                              0x1
+#define RG_AUDZCDENABLE_MASK_SFT                          (0x1 << 0)
+#define RG_AUDZCDGAINSTEPTIME_SFT                         1
+#define RG_AUDZCDGAINSTEPTIME_MASK                        0x7
+#define RG_AUDZCDGAINSTEPTIME_MASK_SFT                    (0x7 << 1)
+#define RG_AUDZCDGAINSTEPSIZE_SFT                         4
+#define RG_AUDZCDGAINSTEPSIZE_MASK                        0x3
+#define RG_AUDZCDGAINSTEPSIZE_MASK_SFT                    (0x3 << 4)
+#define RG_AUDZCDTIMEOUTMODESEL_SFT                       6
+#define RG_AUDZCDTIMEOUTMODESEL_MASK                      0x1
+#define RG_AUDZCDTIMEOUTMODESEL_MASK_SFT                  (0x1 << 6)
+
+/* MT6358_ZCD_CON1 */
+#define RG_AUDLOLGAIN_SFT                                 0
+#define RG_AUDLOLGAIN_MASK                                0x1f
+#define RG_AUDLOLGAIN_MASK_SFT                            (0x1f << 0)
+#define RG_AUDLORGAIN_SFT                                 7
+#define RG_AUDLORGAIN_MASK                                0x1f
+#define RG_AUDLORGAIN_MASK_SFT                            (0x1f << 7)
+
+/* MT6358_ZCD_CON2 */
+#define RG_AUDHPLGAIN_SFT                                 0
+#define RG_AUDHPLGAIN_MASK                                0x1f
+#define RG_AUDHPLGAIN_MASK_SFT                            (0x1f << 0)
+#define RG_AUDHPRGAIN_SFT                                 7
+#define RG_AUDHPRGAIN_MASK                                0x1f
+#define RG_AUDHPRGAIN_MASK_SFT                            (0x1f << 7)
+
+/* MT6358_ZCD_CON3 */
+#define RG_AUDHSGAIN_SFT                                  0
+#define RG_AUDHSGAIN_MASK                                 0x1f
+#define RG_AUDHSGAIN_MASK_SFT                             (0x1f << 0)
+
+/* MT6358_ZCD_CON4 */
+#define RG_AUDIVLGAIN_SFT                                 0
+#define RG_AUDIVLGAIN_MASK                                0x7
+#define RG_AUDIVLGAIN_MASK_SFT                            (0x7 << 0)
+#define RG_AUDIVRGAIN_SFT                                 8
+#define RG_AUDIVRGAIN_MASK                                0x7
+#define RG_AUDIVRGAIN_MASK_SFT                            (0x7 << 8)
+
+/* MT6358_ZCD_CON5 */
+#define RG_AUDINTGAIN1_SFT                                0
+#define RG_AUDINTGAIN1_MASK                               0x3f
+#define RG_AUDINTGAIN1_MASK_SFT                           (0x3f << 0)
+#define RG_AUDINTGAIN2_SFT                                8
+#define RG_AUDINTGAIN2_MASK                               0x3f
+#define RG_AUDINTGAIN2_MASK_SFT                           (0x3f << 8)
+
+/* audio register */
+#define MT6358_DRV_CON3            0x3c
+#define MT6358_GPIO_DIR0           0x88
+
+#define MT6358_GPIO_MODE2          0xd8	/* mosi */
+#define MT6358_GPIO_MODE2_SET      0xda
+#define MT6358_GPIO_MODE2_CLR      0xdc
+
+#define MT6358_GPIO_MODE3          0xde	/* miso */
+#define MT6358_GPIO_MODE3_SET      0xe0
+#define MT6358_GPIO_MODE3_CLR      0xe2
+
+#define MT6358_TOP_CKPDN_CON0      0x10c
+#define MT6358_TOP_CKPDN_CON0_SET  0x10e
+#define MT6358_TOP_CKPDN_CON0_CLR  0x110
+
+#define MT6358_TOP_CKHWEN_CON0     0x12a
+#define MT6358_TOP_CKHWEN_CON0_SET 0x12c
+#define MT6358_TOP_CKHWEN_CON0_CLR 0x12e
+
+#define MT6358_OTP_CON0            0x38a
+#define MT6358_OTP_CON8            0x39a
+#define MT6358_OTP_CON11           0x3a0
+#define MT6358_OTP_CON12           0x3a2
+#define MT6358_OTP_CON13           0x3a4
+
+#define MT6358_DCXO_CW13           0x7aa
+#define MT6358_DCXO_CW14           0x7ac
+
+#define MT6358_AUXADC_CON10        0x11a0
+
+/* audio register */
+#define MT6358_AUD_TOP_ID                    0x2200
+#define MT6358_AUD_TOP_REV0                  0x2202
+#define MT6358_AUD_TOP_DBI                   0x2204
+#define MT6358_AUD_TOP_DXI                   0x2206
+#define MT6358_AUD_TOP_CKPDN_TPM0            0x2208
+#define MT6358_AUD_TOP_CKPDN_TPM1            0x220a
+#define MT6358_AUD_TOP_CKPDN_CON0            0x220c
+#define MT6358_AUD_TOP_CKPDN_CON0_SET        0x220e
+#define MT6358_AUD_TOP_CKPDN_CON0_CLR        0x2210
+#define MT6358_AUD_TOP_CKSEL_CON0            0x2212
+#define MT6358_AUD_TOP_CKSEL_CON0_SET        0x2214
+#define MT6358_AUD_TOP_CKSEL_CON0_CLR        0x2216
+#define MT6358_AUD_TOP_CKTST_CON0            0x2218
+#define MT6358_AUD_TOP_CLK_HWEN_CON0         0x221a
+#define MT6358_AUD_TOP_CLK_HWEN_CON0_SET     0x221c
+#define MT6358_AUD_TOP_CLK_HWEN_CON0_CLR     0x221e
+#define MT6358_AUD_TOP_RST_CON0              0x2220
+#define MT6358_AUD_TOP_RST_CON0_SET          0x2222
+#define MT6358_AUD_TOP_RST_CON0_CLR          0x2224
+#define MT6358_AUD_TOP_RST_BANK_CON0         0x2226
+#define MT6358_AUD_TOP_INT_CON0              0x2228
+#define MT6358_AUD_TOP_INT_CON0_SET          0x222a
+#define MT6358_AUD_TOP_INT_CON0_CLR          0x222c
+#define MT6358_AUD_TOP_INT_MASK_CON0         0x222e
+#define MT6358_AUD_TOP_INT_MASK_CON0_SET     0x2230
+#define MT6358_AUD_TOP_INT_MASK_CON0_CLR     0x2232
+#define MT6358_AUD_TOP_INT_STATUS0           0x2234
+#define MT6358_AUD_TOP_INT_RAW_STATUS0       0x2236
+#define MT6358_AUD_TOP_INT_MISC_CON0         0x2238
+#define MT6358_AUDNCP_CLKDIV_CON0            0x223a
+#define MT6358_AUDNCP_CLKDIV_CON1            0x223c
+#define MT6358_AUDNCP_CLKDIV_CON2            0x223e
+#define MT6358_AUDNCP_CLKDIV_CON3            0x2240
+#define MT6358_AUDNCP_CLKDIV_CON4            0x2242
+#define MT6358_AUD_TOP_MON_CON0              0x2244
+#define MT6358_AUDIO_DIG_DSN_ID              0x2280
+#define MT6358_AUDIO_DIG_DSN_REV0            0x2282
+#define MT6358_AUDIO_DIG_DSN_DBI             0x2284
+#define MT6358_AUDIO_DIG_DSN_DXI             0x2286
+#define MT6358_AFE_UL_DL_CON0                0x2288
+#define MT6358_AFE_DL_SRC2_CON0_L            0x228a
+#define MT6358_AFE_UL_SRC_CON0_H             0x228c
+#define MT6358_AFE_UL_SRC_CON0_L             0x228e
+#define MT6358_AFE_TOP_CON0                  0x2290
+#define MT6358_AUDIO_TOP_CON0                0x2292
+#define MT6358_AFE_MON_DEBUG0                0x2294
+#define MT6358_AFUNC_AUD_CON0                0x2296
+#define MT6358_AFUNC_AUD_CON1                0x2298
+#define MT6358_AFUNC_AUD_CON2                0x229a
+#define MT6358_AFUNC_AUD_CON3                0x229c
+#define MT6358_AFUNC_AUD_CON4                0x229e
+#define MT6358_AFUNC_AUD_CON5                0x22a0
+#define MT6358_AFUNC_AUD_CON6                0x22a2
+#define MT6358_AFUNC_AUD_MON0                0x22a4
+#define MT6358_AUDRC_TUNE_MON0               0x22a6
+#define MT6358_AFE_ADDA_MTKAIF_FIFO_CFG0     0x22a8
+#define MT6358_AFE_ADDA_MTKAIF_FIFO_LOG_MON1 0x22aa
+#define MT6358_AFE_ADDA_MTKAIF_MON0          0x22ac
+#define MT6358_AFE_ADDA_MTKAIF_MON1          0x22ae
+#define MT6358_AFE_ADDA_MTKAIF_MON2          0x22b0
+#define MT6358_AFE_ADDA_MTKAIF_MON3          0x22b2
+#define MT6358_AFE_ADDA_MTKAIF_CFG0          0x22b4
+#define MT6358_AFE_ADDA_MTKAIF_RX_CFG0       0x22b6
+#define MT6358_AFE_ADDA_MTKAIF_RX_CFG1       0x22b8
+#define MT6358_AFE_ADDA_MTKAIF_RX_CFG2       0x22ba
+#define MT6358_AFE_ADDA_MTKAIF_RX_CFG3       0x22bc
+#define MT6358_AFE_ADDA_MTKAIF_TX_CFG1       0x22be
+#define MT6358_AFE_SGEN_CFG0                 0x22c0
+#define MT6358_AFE_SGEN_CFG1                 0x22c2
+#define MT6358_AFE_ADC_ASYNC_FIFO_CFG        0x22c4
+#define MT6358_AFE_DCCLK_CFG0                0x22c6
+#define MT6358_AFE_DCCLK_CFG1                0x22c8
+#define MT6358_AUDIO_DIG_CFG                 0x22ca
+#define MT6358_AFE_AUD_PAD_TOP               0x22cc
+#define MT6358_AFE_AUD_PAD_TOP_MON           0x22ce
+#define MT6358_AFE_AUD_PAD_TOP_MON1          0x22d0
+#define MT6358_AFE_DL_NLE_CFG                0x22d2
+#define MT6358_AFE_DL_NLE_MON                0x22d4
+#define MT6358_AFE_CG_EN_MON                 0x22d6
+#define MT6358_AUDIO_DIG_2ND_DSN_ID          0x2300
+#define MT6358_AUDIO_DIG_2ND_DSN_REV0        0x2302
+#define MT6358_AUDIO_DIG_2ND_DSN_DBI         0x2304
+#define MT6358_AUDIO_DIG_2ND_DSN_DXI         0x2306
+#define MT6358_AFE_PMIC_NEWIF_CFG3           0x2308
+#define MT6358_AFE_VOW_TOP                   0x230a
+#define MT6358_AFE_VOW_CFG0                  0x230c
+#define MT6358_AFE_VOW_CFG1                  0x230e
+#define MT6358_AFE_VOW_CFG2                  0x2310
+#define MT6358_AFE_VOW_CFG3                  0x2312
+#define MT6358_AFE_VOW_CFG4                  0x2314
+#define MT6358_AFE_VOW_CFG5                  0x2316
+#define MT6358_AFE_VOW_CFG6                  0x2318
+#define MT6358_AFE_VOW_MON0                  0x231a
+#define MT6358_AFE_VOW_MON1                  0x231c
+#define MT6358_AFE_VOW_MON2                  0x231e
+#define MT6358_AFE_VOW_MON3                  0x2320
+#define MT6358_AFE_VOW_MON4                  0x2322
+#define MT6358_AFE_VOW_MON5                  0x2324
+#define MT6358_AFE_VOW_SN_INI_CFG            0x2326
+#define MT6358_AFE_VOW_TGEN_CFG0             0x2328
+#define MT6358_AFE_VOW_POSDIV_CFG0           0x232a
+#define MT6358_AFE_VOW_HPF_CFG0              0x232c
+#define MT6358_AFE_VOW_PERIODIC_CFG0         0x232e
+#define MT6358_AFE_VOW_PERIODIC_CFG1         0x2330
+#define MT6358_AFE_VOW_PERIODIC_CFG2         0x2332
+#define MT6358_AFE_VOW_PERIODIC_CFG3         0x2334
+#define MT6358_AFE_VOW_PERIODIC_CFG4         0x2336
+#define MT6358_AFE_VOW_PERIODIC_CFG5         0x2338
+#define MT6358_AFE_VOW_PERIODIC_CFG6         0x233a
+#define MT6358_AFE_VOW_PERIODIC_CFG7         0x233c
+#define MT6358_AFE_VOW_PERIODIC_CFG8         0x233e
+#define MT6358_AFE_VOW_PERIODIC_CFG9         0x2340
+#define MT6358_AFE_VOW_PERIODIC_CFG10        0x2342
+#define MT6358_AFE_VOW_PERIODIC_CFG11        0x2344
+#define MT6358_AFE_VOW_PERIODIC_CFG12        0x2346
+#define MT6358_AFE_VOW_PERIODIC_CFG13        0x2348
+#define MT6358_AFE_VOW_PERIODIC_CFG14        0x234a
+#define MT6358_AFE_VOW_PERIODIC_CFG15        0x234c
+#define MT6358_AFE_VOW_PERIODIC_CFG16        0x234e
+#define MT6358_AFE_VOW_PERIODIC_CFG17        0x2350
+#define MT6358_AFE_VOW_PERIODIC_CFG18        0x2352
+#define MT6358_AFE_VOW_PERIODIC_CFG19        0x2354
+#define MT6358_AFE_VOW_PERIODIC_CFG20        0x2356
+#define MT6358_AFE_VOW_PERIODIC_CFG21        0x2358
+#define MT6358_AFE_VOW_PERIODIC_CFG22        0x235a
+#define MT6358_AFE_VOW_PERIODIC_CFG23        0x235c
+#define MT6358_AFE_VOW_PERIODIC_MON0         0x235e
+#define MT6358_AFE_VOW_PERIODIC_MON1         0x2360
+#define MT6358_AUDENC_DSN_ID                 0x2380
+#define MT6358_AUDENC_DSN_REV0               0x2382
+#define MT6358_AUDENC_DSN_DBI                0x2384
+#define MT6358_AUDENC_DSN_FPI                0x2386
+#define MT6358_AUDENC_ANA_CON0               0x2388
+#define MT6358_AUDENC_ANA_CON1               0x238a
+#define MT6358_AUDENC_ANA_CON2               0x238c
+#define MT6358_AUDENC_ANA_CON3               0x238e
+#define MT6358_AUDENC_ANA_CON4               0x2390
+#define MT6358_AUDENC_ANA_CON5               0x2392
+#define MT6358_AUDENC_ANA_CON6               0x2394
+#define MT6358_AUDENC_ANA_CON7               0x2396
+#define MT6358_AUDENC_ANA_CON8               0x2398
+#define MT6358_AUDENC_ANA_CON9               0x239a
+#define MT6358_AUDENC_ANA_CON10              0x239c
+#define MT6358_AUDENC_ANA_CON11              0x239e
+#define MT6358_AUDENC_ANA_CON12              0x23a0
+#define MT6358_AUDDEC_DSN_ID                 0x2400
+#define MT6358_AUDDEC_DSN_REV0               0x2402
+#define MT6358_AUDDEC_DSN_DBI                0x2404
+#define MT6358_AUDDEC_DSN_FPI                0x2406
+#define MT6358_AUDDEC_ANA_CON0               0x2408
+#define MT6358_AUDDEC_ANA_CON1               0x240a
+#define MT6358_AUDDEC_ANA_CON2               0x240c
+#define MT6358_AUDDEC_ANA_CON3               0x240e
+#define MT6358_AUDDEC_ANA_CON4               0x2410
+#define MT6358_AUDDEC_ANA_CON5               0x2412
+#define MT6358_AUDDEC_ANA_CON6               0x2414
+#define MT6358_AUDDEC_ANA_CON7               0x2416
+#define MT6358_AUDDEC_ANA_CON8               0x2418
+#define MT6358_AUDDEC_ANA_CON9               0x241a
+#define MT6358_AUDDEC_ANA_CON10              0x241c
+#define MT6358_AUDDEC_ANA_CON11              0x241e
+#define MT6358_AUDDEC_ANA_CON12              0x2420
+#define MT6358_AUDDEC_ANA_CON13              0x2422
+#define MT6358_AUDDEC_ANA_CON14              0x2424
+#define MT6358_AUDDEC_ANA_CON15              0x2426
+#define MT6358_AUDDEC_ELR_NUM                0x2428
+#define MT6358_AUDDEC_ELR_0                  0x242a
+#define MT6358_AUDZCD_DSN_ID                 0x2480
+#define MT6358_AUDZCD_DSN_REV0               0x2482
+#define MT6358_AUDZCD_DSN_DBI                0x2484
+#define MT6358_AUDZCD_DSN_FPI                0x2486
+#define MT6358_ZCD_CON0                      0x2488
+#define MT6358_ZCD_CON1                      0x248a
+#define MT6358_ZCD_CON2                      0x248c
+#define MT6358_ZCD_CON3                      0x248e
+#define MT6358_ZCD_CON4                      0x2490
+#define MT6358_ZCD_CON5                      0x2492
+#define MT6358_ACCDET_CON13                  0x2522
+
+#define MT6358_MAX_REGISTER MT6358_ZCD_CON5
+
+enum {
+	MT6358_MTKAIF_PROTOCOL_1 = 0,
+	MT6358_MTKAIF_PROTOCOL_2,
+	MT6358_MTKAIF_PROTOCOL_2_CLK_P2,
+};
+
+/* set only during init */
+int mt6358_set_mtkaif_protocol(struct snd_soc_component *cmpnt,
+			       int mtkaif_protocol);
+int mt6358_mtkaif_calibration_enable(struct snd_soc_component *cmpnt);
+int mt6358_mtkaif_calibration_disable(struct snd_soc_component *cmpnt);
+int mt6358_set_mtkaif_calibration_phase(struct snd_soc_component *cmpnt,
+					int phase_1, int phase_2);
+#endif /* __MT6358_H__ */
diff --git a/sound/soc/codecs/mt6392-codec.c b/sound/soc/codecs/mt6392-codec.c
new file mode 100644
index 0000000..23463f8
--- /dev/null
+++ b/sound/soc/codecs/mt6392-codec.c
@@ -0,0 +1,505 @@
+/*
+ * mt6392-codec.c --  MT6392 ALSA SoC codec driver
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include "mt6392-codec.h"
+
+/*
+ * Class D: has HW Trim mode and SW Trim mode
+ * Class AB: use the trim offset derived from Class D HW Trim
+ *
+ * The option used to choose the trim mode of Class D
+ */
+#define USE_HW_TRIM_CLASS_D
+
+/* Int Spk Amp Playback Volume
+ * {mute, 0, 4, 5, 6, 7, 8, ..., 17} dB
+ */
+static const unsigned int int_spk_amp_gain_tlv[] = {
+	TLV_DB_RANGE_HEAD(3),
+	0, 0, TLV_DB_SCALE_ITEM(0, 0, 1),
+	1, 1, TLV_DB_SCALE_ITEM(0, 0, 0),
+	2, 15, TLV_DB_SCALE_ITEM(400, 100, 0),
+};
+
+/* Audio_Speaker_PGA_gain
+ * {mute, 0, 4, 5, 6, 7, 8, ..., 17} dB
+ */
+static const char *const int_spk_amp_gain_text[] = {
+	"MUTE", "+0dB", "+4dB", "+5dB",
+	"+6dB", "+7dB", "+8dB", "+9dB",
+	"+10dB", "+11dB", "+12dB", "+13dB",
+	"+14dB", "+15dB", "+16dB", "+17dB",
+};
+
+static const struct soc_enum int_spk_amp_gain_enum =
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(int_spk_amp_gain_text),
+		int_spk_amp_gain_text);
+
+static int int_spk_amp_gain_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt6392_codec_priv *codec_data =
+		snd_soc_component_get_drvdata(component);
+	uint32_t value = 0;
+
+	value = codec_data->spk_amp_gain;
+
+	ucontrol->value.integer.value[0] = value;
+
+	return 0;
+}
+
+static int int_spk_amp_gain_put(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt6392_codec_priv *codec_data =
+		snd_soc_component_get_drvdata(component);
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+	uint32_t value = ucontrol->value.integer.value[0];
+
+	if (value >= e->items)
+		return -EINVAL;
+
+	snd_soc_component_update_bits(codec_data->codec, SPK_CON9,
+		GENMASK(11, 8), value << 8);
+
+	codec_data->spk_amp_gain = value;
+
+	dev_dbg(codec_data->codec->dev, "%s value = %u\n",
+		__func__, value);
+
+	return 0;
+}
+
+static int int_spk_amp_gain_put_volsw(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = 0;
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt6392_codec_priv *codec_data =
+		snd_soc_component_get_drvdata(component);
+
+	ret = snd_soc_put_volsw(kcontrol, ucontrol);
+	if (ret < 0)
+		return ret;
+
+	codec_data->spk_amp_gain = ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
+/* Internal speaker mode (AB/D) */
+static const char * const int_spk_amp_mode_texts[] = {
+	"Class D",
+	"Class AB",
+};
+
+static SOC_ENUM_SINGLE_EXT_DECL(mt6392_speaker_mode_enum,
+		int_spk_amp_mode_texts);
+
+static int mt6392_spk_mode_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt6392_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(component);
+
+	ucontrol->value.integer.value[0] = codec_data->speaker_mode;
+	return 0;
+}
+
+static int mt6392_spk_mode_put(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt6392_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(component);
+	int ret = 0;
+	uint32_t mode = ucontrol->value.integer.value[0];
+
+	switch (mode) {
+	case MT6392_CLASS_D:
+	case MT6392_CLASS_AB:
+		codec_data->speaker_mode = ucontrol->value.integer.value[0];
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+
+/* Check OC Flag */
+static const char *const mt6392_speaker_oc_flag_texts[] = {
+	"NoOverCurrent",
+	"OverCurrent"
+};
+
+static SOC_ENUM_SINGLE_EXT_DECL(mt6392_speaker_oc_flag_enum,
+		mt6392_speaker_oc_flag_texts);
+
+static int mt6392_speaker_oc_flag_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt6392_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(component);
+	uint32_t reg_value = snd_soc_component_read32(codec_data->codec,
+						      SPK_CON6);
+
+	if (codec_data->speaker_mode == MT6392_CLASS_AB)
+		ucontrol->value.integer.value[0] =
+			(reg_value & BIT(15)) ? 1 : 0;
+	else
+		ucontrol->value.integer.value[0] =
+			(reg_value & BIT(14)) ? 1 : 0;
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new mt6392_codec_controls[] = {
+	/* Internal speaker PGA gain control */
+	SOC_SINGLE_EXT_TLV("Int Spk Amp Playback Volume",
+		SPK_CON9, 8, 15, 0,
+		snd_soc_get_volsw,
+		int_spk_amp_gain_put_volsw,
+		int_spk_amp_gain_tlv),
+	/* Audio_Speaker_PGA_gain */
+	SOC_ENUM_EXT("Audio_Speaker_PGA_gain",
+		int_spk_amp_gain_enum,
+		int_spk_amp_gain_get,
+		int_spk_amp_gain_put),
+	/* Internal speaker mode (AB/D) */
+	SOC_ENUM_EXT("Int Spk Amp Mode", mt6392_speaker_mode_enum,
+		mt6392_spk_mode_get, mt6392_spk_mode_put),
+	/* Check OC Flag */
+	SOC_ENUM_EXT("Speaker_OC_Flag", mt6392_speaker_oc_flag_enum,
+		mt6392_speaker_oc_flag_get, NULL),
+};
+
+static void mt6392_codec_get_spk_trim_offset(struct snd_soc_component *codec)
+{
+	struct mt6392_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(codec);
+
+	/* turn on spk (class D) and hw trim */
+	snd_soc_component_update_bits(codec, TOP_CKPDN1_CLR, 0x000E, 0x000E);
+	snd_soc_component_update_bits(codec, SPK_CON7, 0xFFFF, 0x48F4);
+	snd_soc_component_update_bits(codec, SPK_CON11, 0xFFFF, 0x0055);
+	snd_soc_component_update_bits(codec, SPK_CON9, 0xF0FF, 0x2018);
+	snd_soc_component_update_bits(codec, SPK_CON2, 0xFFFF, 0x0414);
+	snd_soc_component_update_bits(codec, SPK_CON0, 0xFFFF, 0x3409);
+	usleep_range(20000, 21000);
+
+	/* save trim offset */
+	codec_data->spk_trim_offset =
+		snd_soc_component_read32(codec, SPK_CON1) & GENMASK(4, 0);
+
+	/* turn off trim */
+	snd_soc_component_update_bits(codec, SPK_CON0, 0xFFFF, 0x3401);
+	snd_soc_component_update_bits(codec, SPK_CON9, 0xF0FF, 0x2000);
+	usleep_range(2000, 3000);
+
+	/* turn off spk */
+	snd_soc_component_update_bits(codec, SPK_CON12, 0xFFFF, 0x0000);
+	snd_soc_component_update_bits(codec, SPK_CON0, 0xFFFF, 0x3400);
+	snd_soc_component_update_bits(codec, TOP_CKPDN1_CLR, 0x000E, 0x0000);
+}
+
+static void mt6392_int_spk_on_with_trim(struct snd_soc_component *codec)
+{
+#if defined(USE_HW_TRIM_CLASS_D)
+	/* turn on spk (class D) and hw trim */
+	snd_soc_component_update_bits(codec, TOP_CKPDN1_CLR, 0x000E, 0x000E);
+	snd_soc_component_update_bits(codec, SPK_CON7, 0xFFFF, 0x48F4);
+	snd_soc_component_update_bits(codec, SPK_CON11, 0xFFFF, 0x0055);
+	snd_soc_component_update_bits(codec, SPK_CON9, 0xF0FF, 0x2018);
+	snd_soc_component_update_bits(codec, SPK_CON2, 0xFFFF, 0x0414);
+	snd_soc_component_update_bits(codec, SPK_CON0, 0xFFFF, 0x3409);
+	usleep_range(20000, 21000);
+
+	/* turn off trim */
+	snd_soc_component_update_bits(codec, SPK_CON0, 0xFFFF, 0x3401);
+	snd_soc_component_update_bits(codec, SPK_CON9, 0xF0FF, 0x2000);
+#else
+	struct mt6392_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(codec);
+
+	/* turn on spk (class D) */
+	snd_soc_component_update_bits(codec, TOP_CKPDN1_CLR, 0x000E, 0x000E);
+	snd_soc_component_update_bits(codec, SPK_CON7, 0xFFFF, 0x48F4);
+	snd_soc_component_update_bits(codec, SPK_CON2, 0xFFFF, 0x0414);
+	snd_soc_component_update_bits(codec, SPK_CON0, 0xFFFF, 0x3001);
+	snd_soc_component_update_bits(codec, SPK_CON9, 0xF0FF, 0x2000);
+
+	/* enable sw trim */
+	snd_soc_component_update_bits(codec, SPK_CON12, 0xFFFF, 0x0009);
+	snd_soc_component_update_bits(codec, SPK_CON12, 0xFFFF, 0x0001);
+	snd_soc_component_update_bits(codec, SPK_CON12, 0xFFFF, 0x0283);
+	snd_soc_component_update_bits(codec, SPK_CON12, 0xFFFF, 0x0281);
+	snd_soc_component_update_bits(codec, SPK_CON12, 0xFFFF, 0x2A81);
+	snd_soc_component_update_bits(codec, SPK_CON1, 0xFFFF, 0x6000);
+	/* class D and class AB use the same trim offset value */
+	snd_soc_component_update_bits(codec, SPK_CON1,
+		GENMASK(12, 8), (codec_data->spk_trim_offset << 8));
+
+	/* trim stop */
+	snd_soc_component_update_bits(codec, SPK_CON12, 0xFFFF, 0xAA81);
+#endif
+	usleep_range(2000, 3000);
+}
+
+int mt6392_int_spk_turn_on(struct snd_soc_component *codec)
+{
+	struct mt6392_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(codec);
+	int ret = 0;
+
+	dev_dbg(codec->dev, "%s\n", __func__);
+
+	switch (codec_data->speaker_mode) {
+	case MT6392_CLASS_D:
+		mt6392_int_spk_on_with_trim(codec);
+		break;
+	case MT6392_CLASS_AB:
+		snd_soc_component_update_bits(codec, TOP_CKPDN1_CLR, 0x000E,
+					      0x000E);
+		snd_soc_component_update_bits(codec, SPK_CON7, 0xFFFF, 0x48F4);
+		snd_soc_component_update_bits(codec, SPK_CON2, 0xFFFF, 0x0414);
+		snd_soc_component_update_bits(codec, SPK_CON0, 0xFFFF, 0x3005);
+		snd_soc_component_update_bits(codec, SPK_CON9, 0xF0FF, 0x2000);
+		snd_soc_component_update_bits(codec, SPK_CON1, 0xFFFF, 0x6000);
+		/* class D and class AB use the same trim offset value */
+		snd_soc_component_update_bits(codec, SPK_CON1,
+			GENMASK(12, 8), (codec_data->spk_trim_offset << 8));
+		usleep_range(2000, 3000);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mt6392_int_spk_turn_on);
+
+int mt6392_int_spk_turn_off(struct snd_soc_component *codec)
+{
+	struct mt6392_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(codec);
+	int ret = 0;
+
+	dev_dbg(codec->dev, "%s\n", __func__);
+
+	switch (codec_data->speaker_mode) {
+	case MT6392_CLASS_D:
+		snd_soc_component_update_bits(codec, SPK_CON12, 0xFFFF, 0x0000);
+		snd_soc_component_update_bits(codec, SPK_CON0, 0xFFFF, 0x3400);
+		snd_soc_component_update_bits(codec, TOP_CKPDN1_CLR, 0x000E,
+					      0x0000);
+		break;
+	case MT6392_CLASS_AB:
+		snd_soc_component_update_bits(codec, SPK_CON0, 0xFFFF, 0x3404);
+		snd_soc_component_update_bits(codec, TOP_CKPDN1_CLR, 0x000E,
+					      0x0000);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mt6392_int_spk_turn_off);
+
+static int mt6392_int_spk_amp_wevent(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *codec = snd_soc_dapm_to_component(w->dapm);
+
+	dev_dbg(codec->dev, "%s, event %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		mt6392_int_spk_turn_on(codec);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		mt6392_int_spk_turn_off(codec);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget mt6392_codec_dapm_widgets[] = {
+	SND_SOC_DAPM_SPK("Int Spk Amp", mt6392_int_spk_amp_wevent),
+};
+
+#ifdef CONFIG_DEBUG_FS
+struct mt6392_codec_reg_attr {
+	uint32_t offset;
+	char *name;
+};
+
+#define DUMP_REG_ENTRY(reg) {reg, #reg}
+
+static const struct mt6392_codec_reg_attr mt6392_codec_dump_reg_list[] = {
+	DUMP_REG_ENTRY(SPK_CON0),
+	DUMP_REG_ENTRY(SPK_CON1),
+	DUMP_REG_ENTRY(SPK_CON2),
+	DUMP_REG_ENTRY(SPK_CON3),
+	DUMP_REG_ENTRY(SPK_CON4),
+	DUMP_REG_ENTRY(SPK_CON5),
+	DUMP_REG_ENTRY(SPK_CON6),
+	DUMP_REG_ENTRY(SPK_CON7),
+	DUMP_REG_ENTRY(SPK_CON8),
+	DUMP_REG_ENTRY(SPK_CON9),
+	DUMP_REG_ENTRY(SPK_CON10),
+	DUMP_REG_ENTRY(SPK_CON11),
+	DUMP_REG_ENTRY(SPK_CON12),
+};
+
+static ssize_t mt6392_codec_debug_read(struct file *file,
+			char __user *user_buf,
+			size_t count, loff_t *pos)
+{
+	struct mt6392_codec_priv *codec_data = file->private_data;
+	ssize_t ret, i;
+	char *buf;
+	int n = 0;
+
+	if (*pos < 0 || !count)
+		return -EINVAL;
+
+	buf = kmalloc(count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	for (i = 0; i < ARRAY_SIZE(mt6392_codec_dump_reg_list); i++) {
+		n += scnprintf(buf + n, count - n, "%s = 0x%x\n",
+			mt6392_codec_dump_reg_list[i].name,
+			snd_soc_component_read32(codec_data->codec,
+				mt6392_codec_dump_reg_list[i].offset));
+	}
+
+	ret = simple_read_from_buffer(user_buf, count, pos, buf, n);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static const struct file_operations mt6392_codec_debug_ops = {
+	.open = simple_open,
+	.read = mt6392_codec_debug_read,
+	.llseek = default_llseek,
+};
+#endif
+
+static void mt6392_codec_init_regs(struct mt6392_codec_priv *codec_data)
+{
+	struct snd_soc_component *codec = codec_data->codec;
+
+	dev_dbg(codec->dev, "%s\n", __func__);
+
+	/* default PGA gain: 12dB */
+	codec_data->spk_amp_gain = 0xA;
+	snd_soc_component_update_bits(codec, SPK_CON9,
+		GENMASK(11, 8), (codec_data->spk_amp_gain) << 8);
+}
+
+static int mt6392_codec_parse_dt(struct snd_soc_component *codec)
+{
+	struct mt6392_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(codec);
+	struct device *dev = codec->dev;
+	int ret = 0;
+
+	ret = of_property_read_u32(dev->of_node, "mediatek,speaker-mode",
+				&codec_data->speaker_mode);
+	if (ret) {
+		dev_warn(dev, "%s fail to read speaker-mode in node %s\n",
+			__func__, dev->of_node->full_name);
+		codec_data->speaker_mode = MT6392_CLASS_D;
+	} else if (codec_data->speaker_mode != MT6392_CLASS_D &&
+		codec_data->speaker_mode != MT6392_CLASS_AB) {
+		codec_data->speaker_mode = MT6392_CLASS_D;
+	}
+
+	return ret;
+}
+
+/* FIXME:
+ * attached to the mt8167 codec for now
+ * there is no dev for mt6392, thus use the dev from mt8167 codec
+ * need to parse the dt from mt6392 itself
+ * and detach it into a standalone codec driver
+ */
+int mt6392_codec_probe(struct snd_soc_component *codec)
+{
+	struct mt6392_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(codec);
+	int ret = 0;
+
+	ret = snd_soc_add_component_controls(codec, mt6392_codec_controls,
+		ARRAY_SIZE(mt6392_codec_controls));
+	if (ret < 0)
+		goto error_probe;
+
+	ret = snd_soc_dapm_new_controls(dapm, mt6392_codec_dapm_widgets,
+		ARRAY_SIZE(mt6392_codec_dapm_widgets));
+	if (ret < 0)
+		goto error_probe;
+
+	codec_data->codec = codec;
+
+	mt6392_codec_parse_dt(codec);
+
+	mt6392_codec_init_regs(codec_data);
+
+	mt6392_codec_get_spk_trim_offset(codec);
+
+#ifdef CONFIG_DEBUG_FS
+	codec_data->debugfs = debugfs_create_file("mt6392_codec_regs",
+			S_IFREG | S_IRUGO,
+			NULL, codec_data, &mt6392_codec_debug_ops);
+#endif
+error_probe:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mt6392_codec_probe);
+
+int mt6392_codec_remove(struct snd_soc_component *codec)
+{
+#ifdef CONFIG_DEBUG_FS
+	struct mt6392_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(codec);
+	debugfs_remove(codec_data->debugfs);
+#endif
+	dev_dbg(codec->dev, "%s\n", __func__);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt6392_codec_remove);
diff --git a/sound/soc/codecs/mt6392-codec.h b/sound/soc/codecs/mt6392-codec.h
new file mode 100644
index 0000000..9ebb375
--- /dev/null
+++ b/sound/soc/codecs/mt6392-codec.h
@@ -0,0 +1,59 @@
+/*
+ * mt6392-codec.h --  MT6392 ALSA SoC codec driver
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT6392_CODEC_H__
+#define __MT6392_CODEC_H__
+
+enum mt6392_speaker_mode {
+	MT6392_CLASS_D = 0,
+	MT6392_CLASS_AB,
+};
+
+struct mt6392_codec_priv {
+	struct snd_soc_component *codec;
+	uint32_t speaker_mode;
+	uint32_t spk_amp_gain;
+	uint16_t spk_trim_offset;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *debugfs;
+#endif
+};
+
+#define PMIC_OFFSET              (0x00C00000)
+#define PMIC_REG(reg)            (reg | PMIC_OFFSET)
+
+/* mt6392 */
+#define SPK_CON0                 PMIC_REG(0x0052)
+#define SPK_CON1                 PMIC_REG(0x0054)
+#define SPK_CON2                 PMIC_REG(0x0056)
+#define SPK_CON3                 PMIC_REG(0x0058)
+#define SPK_CON4                 PMIC_REG(0x005A)
+#define SPK_CON5                 PMIC_REG(0x005C)
+#define SPK_CON6                 PMIC_REG(0x005E)
+#define SPK_CON7                 PMIC_REG(0x0060)
+#define SPK_CON8                 PMIC_REG(0x0062)
+#define SPK_CON9                 PMIC_REG(0x0064)
+#define SPK_CON10                PMIC_REG(0x0066)
+#define SPK_CON11                PMIC_REG(0x0068)
+#define SPK_CON12                PMIC_REG(0x006A)
+
+#define TOP_CKPDN1_CLR           PMIC_REG(0x010C)
+
+int mt6392_codec_probe(struct snd_soc_component *codec);
+int mt6392_codec_remove(struct snd_soc_component *codec);
+int mt6392_int_spk_turn_on(struct snd_soc_component *codec);
+int mt6392_int_spk_turn_off(struct snd_soc_component *codec);
+
+#endif
diff --git a/sound/soc/codecs/mt8167-codec-utils.c b/sound/soc/codecs/mt8167-codec-utils.c
new file mode 100644
index 0000000..2165fbe
--- /dev/null
+++ b/sound/soc/codecs/mt8167-codec-utils.c
@@ -0,0 +1,1256 @@
+/*
+ * mt8167-codec-utils.c  --  MT8167 codec driver utility functions
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <sound/soc.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include "mt8167-codec.h"
+#include "mt8167-codec-utils.h"
+#ifdef CONFIG_MTK_SPEAKER
+#include "mt6392-codec.h"
+#endif
+#ifdef CONFIG_MTK_AUXADC
+#include "mtk_auxadc.h"
+#endif
+
+enum auxadc_channel_id {
+	AUXADC_CH_AU_HPL = 7,
+	AUXADC_CH_AU_HPR,
+};
+
+struct reg_setting {
+	uint32_t reg;
+	uint32_t mask;
+	uint32_t val;
+};
+
+static const struct reg_setting mt8167_codec_cali_setup_regs[] = {
+	{
+		.reg = AUDIO_TOP_CON0,
+		.mask = GENMASK(26, 25),
+		.val = 0x0,
+	},
+	{
+		.reg = AUDIO_TOP_CON0,
+		.mask = BIT(2),
+		.val = 0x0,
+	},
+	{
+		.reg = AFE_MEMIF_PBUF_SIZE,
+		.mask = GENMASK(17, 16),
+		.val = 0x0,
+	},
+	{
+		.reg = AFE_CONN_24BIT,
+		.mask = GENMASK(4, 3),
+		.val = 0x0,
+	},
+	{
+		.reg = AFE_DAC_CON1,
+		.mask = GENMASK(3, 0),
+		.val = 0x9,
+	},
+	{
+		.reg = AFE_CONN1,
+		.mask = BIT(21),
+		.val = BIT(21),
+	},
+	{
+		.reg = AFE_CONN2,
+		.mask = BIT(6),
+		.val = BIT(6),
+	},
+	{
+		.reg = AFE_DAC_CON0,
+		.mask = BIT(1),
+		.val = BIT(1),
+	},
+	{
+		.reg = AFE_ADDA_PREDIS_CON0,
+		.mask = GENMASK(31, 0),
+		.val = 0x0,
+	},
+	{
+		.reg = AFE_ADDA_PREDIS_CON1,
+		.mask = GENMASK(31, 0),
+		.val = 0x0,
+	},
+	{
+		.reg = AFE_ADDA_DL_SRC2_CON0,
+		.mask = GENMASK(31, 0),
+		.val = (0x7 << 28) | (0x03 << 24) | (0x03 << 11) | BIT(1),
+	},
+	{
+		.reg = AFE_ADDA_DL_SRC2_CON1,
+		.mask = GENMASK(31, 0),
+		.val = 0xf74f0000,
+	},
+	{ /* I2S2_OUT_MODE (44.1khz) */
+		.reg = AFE_I2S_CON1,
+		.mask = (0x9 << 8),
+		.val = (0x9 << 8),
+	},
+	{
+		.reg = AFE_ADDA_DL_SRC2_CON0,
+		.mask = BIT(0),
+		.val = BIT(0),
+	},
+	{
+		.reg = AFE_I2S_CON1,
+		.mask = BIT(0),
+		.val = BIT(0),
+	},
+	{
+		.reg = AFE_ADDA_UL_DL_CON0,
+		.mask = BIT(0),
+		.val = BIT(0),
+	},
+	{
+		.reg = AFE_DAC_CON0,
+		.mask = BIT(0),
+		.val = BIT(0),
+	},
+	{ /* ADA_HPLO_TO_AUXADC */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(15),
+		.val = BIT(15),
+	},
+	{ /* ADA_HPRO_TO_AUXADC */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(14),
+		.val = BIT(14),
+	},
+};
+
+static const struct reg_setting mt8167_codec_cali_cleanup_regs[] = {
+	{ /* ADA_HPLO_TO_AUXADC */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(15),
+		.val = 0x0,
+	},
+	{ /* ADA_HPRO_TO_AUXADC */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = 0x0,
+		.val = BIT(14),
+	},
+	{
+		.reg = AFE_DAC_CON0,
+		.mask = BIT(1),
+		.val = 0x0,
+	},
+	{
+		.reg = AFE_ADDA_DL_SRC2_CON0,
+		.mask = BIT(0),
+		.val = 0x0,
+	},
+	{
+		.reg = AFE_I2S_CON1,
+		.mask = BIT(0),
+		.val = 0x0,
+	},
+	{
+		.reg = AFE_ADDA_UL_DL_CON0,
+		.mask = BIT(0),
+		.val = 0x0,
+	},
+	{
+		.reg = AFE_DAC_CON0,
+		.mask = BIT(0),
+		.val = 0x0,
+	},
+	{
+		.reg = AUDIO_TOP_CON0,
+		.mask = BIT(2),
+		.val = BIT(2),
+	},
+	{
+		.reg = AUDIO_TOP_CON0,
+		.mask = GENMASK(26, 25),
+		.val = (0x3 << 25),
+	},
+};
+
+static const struct reg_setting mt8167_codec_cali_enable_regs[] = {
+	{ /* dl_rate (44.1khz) */
+		.reg = ABB_AFE_CON1,
+		.mask = GENMASK(3, 0),
+		.val = 9,
+	},
+	{ /* toggle top_ctrl */
+		.reg = ABB_AFE_CON11,
+		.mask = ABB_AFE_CON11_TOP_CTRL,
+		.val = ABB_AFE_CON11_TOP_CTRL,
+	},
+	{ /* toggle top_ctrl */
+		.reg = ABB_AFE_CON11,
+		.mask = ABB_AFE_CON11_TOP_CTRL,
+		.val = 0x0,
+	},
+	{
+		.reg = ABB_AFE_CON3,
+		.mask = GENMASK(31, 0),
+		.val = 0x0,
+	},
+	{
+		.reg = ABB_AFE_CON4,
+		.mask = GENMASK(31, 0),
+		.val = 0x0,
+	},
+	{
+		.reg = ABB_AFE_CON10,
+		.mask = BIT(0),
+		.val = BIT(0),
+	},
+	{
+		.reg = ABB_AFE_CON0,
+		.mask = BIT(0),
+		.val = BIT(0),
+	},
+	{
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(27) | BIT(17) | BIT(16),
+		.val = BIT(27) | BIT(17) | BIT(16),
+	},
+	{
+		.reg = AUDIO_CODEC_CON00,
+		.mask = BIT(19),
+		.val = BIT(19),
+	},
+	{
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(15) | BIT(14) | BIT(13) | BIT(12) | BIT(11),
+		.val = BIT(15) | BIT(14) | BIT(13) | BIT(12) | BIT(11),
+	},
+	{
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(28) | BIT(25) | BIT(22),
+		.val = BIT(28),
+	},
+};
+
+static const struct reg_setting mt8167_codec_cali_disable_regs[] = {
+	{
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(28) | BIT(25) | BIT(22),
+		.val = BIT(25) | BIT(22),
+	},
+	{
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(15) | BIT(14) | BIT(13) | BIT(12) | BIT(11),
+		.val = 0x0,
+	},
+	{
+		.reg = AUDIO_CODEC_CON00,
+		.mask = BIT(19),
+		.val = 0x0,
+	},
+	{
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(27) | BIT(17) | BIT(16),
+		.val = 0x0,
+	},
+	{
+		.reg = ABB_AFE_CON0,
+		.mask = BIT(0),
+		.val = 0x0,
+	},
+};
+
+static int mt8167_codec_apply_reg_setting(struct snd_soc_component *codec,
+	const struct reg_setting regs[], uint32_t reg_nums)
+{
+	int i;
+
+	for (i = 0 ; i < reg_nums ; i++)
+		snd_soc_component_update_bits(codec,
+			regs[i].reg, regs[i].mask, regs[i].val);
+
+	return 0;
+}
+
+#define AFIFO_SIZE (48 * 1024)
+static int mt8167_codec_setup_cali_path(struct snd_soc_component *codec,
+	struct snd_dma_buffer *buf)
+{
+	/* enable power */
+	snd_soc_component_update_bits(codec, AUDIO_CODEC_CON01, BIT(20), BIT(20));
+	/* enable clock */
+	snd_soc_component_update_bits(codec, AUDIO_CODEC_CON03, BIT(30), BIT(30));
+	snd_soc_component_update_bits(codec, AUDIO_CODEC_CON04, BIT(15), BIT(15));
+	/* allocate buffer */
+	buf->area = dma_alloc_coherent(codec->dev, AFIFO_SIZE,
+		&buf->addr, GFP_KERNEL | __GFP_ZERO);
+
+	snd_soc_component_update_bits(codec,
+			AFE_DL1_BASE,
+			GENMASK(31, 0),
+			buf->addr);
+	snd_soc_component_update_bits(codec,
+			AFE_DL1_END,
+			GENMASK(31, 0),
+			buf->addr + (AFIFO_SIZE - 1));
+
+	/* setup */
+	return mt8167_codec_apply_reg_setting(codec,
+			mt8167_codec_cali_setup_regs,
+			ARRAY_SIZE(mt8167_codec_cali_setup_regs));
+}
+
+static int mt8167_codec_cleanup_cali_path(struct snd_soc_component *codec,
+	struct snd_dma_buffer *buf)
+{
+	int ret = 0;
+
+	/* cleanup */
+	ret = mt8167_codec_apply_reg_setting(codec,
+			mt8167_codec_cali_cleanup_regs,
+			ARRAY_SIZE(mt8167_codec_cali_cleanup_regs));
+
+	/* free buffer */
+	dma_free_coherent(codec->dev, AFIFO_SIZE, buf->area,
+		buf->addr);
+	/* disable clock */
+	snd_soc_component_update_bits(codec, AUDIO_CODEC_CON04, BIT(15), 0x0);
+	snd_soc_component_update_bits(codec, AUDIO_CODEC_CON03, BIT(30), 0x0);
+	/* disable power */
+	snd_soc_component_update_bits(codec, AUDIO_CODEC_CON01, BIT(20), 0x0);
+
+	return ret;
+}
+
+static int mt8167_codec_enable_cali_path(struct snd_soc_component *codec)
+{
+	/* enable */
+	return mt8167_codec_apply_reg_setting(codec,
+			mt8167_codec_cali_enable_regs,
+			ARRAY_SIZE(mt8167_codec_cali_enable_regs));
+}
+
+static int mt8167_codec_disable_cali_path(struct snd_soc_component *codec)
+{
+	/* disable */
+	return mt8167_codec_apply_reg_setting(codec,
+			mt8167_codec_cali_disable_regs,
+			ARRAY_SIZE(mt8167_codec_cali_disable_regs));
+}
+
+static int32_t mt8167_codec_get_hp_cali_val(struct snd_soc_component *codec,
+	uint32_t auxadc_channel)
+{
+	int32_t cali_val = 0;
+	struct snd_dma_buffer dma_buf;
+#ifdef CONFIG_MTK_AUXADC
+	int32_t auxadc_on_val = 0;
+	int32_t auxadc_off_val = 0;
+	int32_t auxadc_val_sum = 0;
+	int32_t auxadc_val_avg = 0;
+	int32_t count = 0;
+	int32_t countlimit = 5;
+#endif
+
+	dev_dbg(codec->dev, "%s\n", __func__);
+
+	mt8167_codec_setup_cali_path(codec, &dma_buf);
+
+	usleep_range(10 * 1000, 15 * 1000);
+
+#ifdef CONFIG_MTK_AUXADC
+	IMM_GetOneChannelValue_Cali(auxadc_channel, &auxadc_off_val);
+	dev_dbg(codec->dev, "%s auxadc_off_val: %d\n",
+		__func__, auxadc_off_val);
+#endif
+
+	mt8167_codec_enable_cali_path(codec);
+
+	usleep_range(10 * 1000, 15 * 1000);
+
+#ifdef CONFIG_MTK_AUXADC
+	for (count = 0 ; count < countlimit ; count++) {
+		IMM_GetOneChannelValue_Cali(auxadc_channel, &auxadc_on_val);
+		auxadc_val_sum += auxadc_on_val;
+		dev_dbg(codec->dev, "%s auxadc_on_val: %d, auxadc_val_sum: %d\n",
+			__func__, auxadc_on_val, auxadc_val_sum);
+	}
+
+	auxadc_val_avg = auxadc_val_sum / countlimit;
+
+	cali_val = auxadc_off_val - auxadc_val_avg;
+
+	cali_val = cali_val/1000; /* mV */
+#endif
+
+	mt8167_codec_disable_cali_path(codec);
+	mt8167_codec_cleanup_cali_path(codec, &dma_buf);
+
+	return cali_val;
+}
+
+uint32_t mt8167_codec_conv_dc_offset_to_comp_val(int32_t dc_offset)
+{
+	uint32_t dccomp_val = 0;
+	bool invert = false;
+
+	if (!dc_offset)
+		return 0;
+
+	if (dc_offset > 0)
+		invert = true;
+	else
+		dc_offset *= (-1);
+
+	/* transform to 1.8V scale */
+	dc_offset = (dc_offset * 18) / 10;
+
+	/*
+	 * ABB_AFE_CON3, 1 step is (1/32768) * 1800 mV = 0.0549
+	 * dccomp_val = dc_cali_value/0.0549 = dc_cali_value * 18
+	 */
+	dccomp_val = dc_offset * 18;
+
+	/* two's complement to change the direction of dc compensation value */
+	if (invert)
+		dccomp_val = 0xFFFFFFFF - dccomp_val + 1;
+
+	return dccomp_val;
+}
+EXPORT_SYMBOL_GPL(mt8167_codec_conv_dc_offset_to_comp_val);
+
+int mt8167_codec_get_hpl_cali_val(struct snd_soc_component *codec,
+	uint32_t *dccomp_val, int32_t *dc_offset)
+{
+	*dc_offset = mt8167_codec_get_hp_cali_val(codec, AUXADC_CH_AU_HPL);
+	*dccomp_val = mt8167_codec_conv_dc_offset_to_comp_val(*dc_offset);
+
+	dev_dbg(codec->dev, "%s dc_offset %d, dccomp_val %d\n",
+		__func__, *dc_offset, *dccomp_val);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt8167_codec_get_hpl_cali_val);
+
+int mt8167_codec_get_hpr_cali_val(struct snd_soc_component *codec,
+	uint32_t *dccomp_val, int32_t *dc_offset)
+{
+	*dc_offset = mt8167_codec_get_hp_cali_val(codec, AUXADC_CH_AU_HPR);
+	*dccomp_val = mt8167_codec_conv_dc_offset_to_comp_val(*dc_offset);
+
+	dev_dbg(codec->dev, "%s dc_offset %d, dccomp_val %d\n",
+		__func__, *dc_offset, *dccomp_val);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt8167_codec_get_hpr_cali_val);
+
+static bool ul_is_builtin_mic(uint32_t type)
+{
+	bool ret = false;
+
+	switch (type) {
+	case CODEC_LOOPBACK_AMIC_TO_SPK:
+	case CODEC_LOOPBACK_AMIC_TO_HP:
+		ret = true;
+		break;
+	default:
+		ret = false;
+		break;
+	}
+
+	return ret;
+}
+
+static bool ul_is_headset_mic(uint32_t type)
+{
+	bool ret = false;
+
+	switch (type) {
+	case CODEC_LOOPBACK_HEADSET_MIC_TO_SPK:
+	case CODEC_LOOPBACK_HEADSET_MIC_TO_HP:
+		ret = true;
+		break;
+	default:
+		ret = false;
+		break;
+	}
+
+	return ret;
+}
+
+static bool ul_is_amic(uint32_t type)
+{
+	return ul_is_builtin_mic(type) ||
+			ul_is_headset_mic(type);
+}
+
+static bool ul_is_dmic(uint32_t type)
+{
+	bool ret = false;
+
+	switch (type) {
+	case CODEC_LOOPBACK_DMIC_TO_SPK:
+	case CODEC_LOOPBACK_DMIC_TO_HP:
+		ret = true;
+		break;
+	default:
+		ret = false;
+		break;
+	}
+
+	return ret;
+}
+
+static bool dl_is_spk(uint32_t type)
+{
+	bool ret = false;
+
+	switch (type) {
+	case CODEC_LOOPBACK_AMIC_TO_SPK:
+	case CODEC_LOOPBACK_DMIC_TO_SPK:
+	case CODEC_LOOPBACK_HEADSET_MIC_TO_SPK:
+		ret = true;
+		break;
+	default:
+		ret = false;
+		break;
+	}
+
+	return ret;
+}
+
+static bool dl_is_hp(uint32_t type)
+{
+	bool ret = false;
+
+	switch (type) {
+	case CODEC_LOOPBACK_AMIC_TO_HP:
+	case CODEC_LOOPBACK_DMIC_TO_HP:
+	case CODEC_LOOPBACK_HEADSET_MIC_TO_HP:
+		ret = true;
+		break;
+	default:
+		ret = false;
+		break;
+	}
+
+	return ret;
+}
+
+static const struct reg_setting mt8167_codec_ul_rate_32000_setup_regs[] = {
+	{ /* ul_rate (32khz) */
+		.reg = ABB_AFE_CON1,
+		.mask = GENMASK(7, 4),
+		.val = 0x0,
+	},
+	{ /* toggle top_ctrl */
+		.reg = ABB_AFE_CON11,
+		.mask = ABB_AFE_CON11_TOP_CTRL,
+		.val = ABB_AFE_CON11_TOP_CTRL,
+	},
+	{ /* toggle top_ctrl */
+		.reg = ABB_AFE_CON11,
+		.mask = ABB_AFE_CON11_TOP_CTRL,
+		.val = 0x0,
+	},
+};
+
+static const struct reg_setting mt8167_codec_ul_rate_48000_setup_regs[] = {
+	{ /* ul_rate (48khz) */
+		.reg = ABB_AFE_CON1,
+		.mask = GENMASK(7, 4),
+		.val = (0x1 << 4),
+	},
+	{ /* toggle top_ctrl */
+		.reg = ABB_AFE_CON11,
+		.mask = ABB_AFE_CON11_TOP_CTRL,
+		.val = ABB_AFE_CON11_TOP_CTRL,
+	},
+	{ /* toggle top_ctrl */
+		.reg = ABB_AFE_CON11,
+		.mask = ABB_AFE_CON11_TOP_CTRL,
+		.val = 0x0,
+	},
+};
+
+static const struct reg_setting mt8167_codec_dl_rate_32000_setup_regs[] = {
+	{ /* dl_rate (32khz) */
+		.reg = ABB_AFE_CON1,
+		.mask = GENMASK(3, 0),
+		.val = 8,
+	},
+	{ /* toggle top_ctrl */
+		.reg = ABB_AFE_CON11,
+		.mask = ABB_AFE_CON11_TOP_CTRL,
+		.val = ABB_AFE_CON11_TOP_CTRL,
+	},
+	{ /* toggle top_ctrl */
+		.reg = ABB_AFE_CON11,
+		.mask = ABB_AFE_CON11_TOP_CTRL,
+		.val = 0x0,
+	},
+};
+
+static const struct reg_setting mt8167_codec_dl_rate_48000_setup_regs[] = {
+	{ /* dl_rate (48khz) */
+		.reg = ABB_AFE_CON1,
+		.mask = GENMASK(3, 0),
+		.val = 10,
+	},
+	{ /* toggle top_ctrl */
+		.reg = ABB_AFE_CON11,
+		.mask = ABB_AFE_CON11_TOP_CTRL,
+		.val = ABB_AFE_CON11_TOP_CTRL,
+	},
+	{ /* toggle top_ctrl */
+		.reg = ABB_AFE_CON11,
+		.mask = ABB_AFE_CON11_TOP_CTRL,
+		.val = 0x0,
+	},
+};
+
+static const struct reg_setting mt8167_codec_builtin_amic_enable_regs[] = {
+	{ /* micbias0 */
+		.reg = AUDIO_CODEC_CON03,
+		.mask = BIT(17),
+		.val = BIT(17),
+	},
+	{ /* left pga mux <- AU_VIN0 */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = GENMASK(31, 28),
+		.val = 0x0,
+	},
+	{ /* right pga mux <- AU_VIN2 */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = GENMASK(13, 10),
+		.val = 0x0,
+	},
+};
+
+static const struct reg_setting mt8167_codec_headset_amic_enable_regs[] = {
+	{ /* micbias1 */
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(21),
+		.val = BIT(21),
+	},
+	{ /* left pga mux <- AU_VIN1 */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = GENMASK(31, 28),
+		.val = BIT(28),
+	},
+	{ /* right pga mux <- AU_VIN1 */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = GENMASK(13, 10),
+		.val = BIT(10),
+	},
+};
+
+static const struct reg_setting mt8167_codec_amic_enable_regs[] = {
+	{ /* CLKLDO power on */
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(20),
+		.val = BIT(20),
+	},
+	{ /* Audio Codec CLK on */
+		.reg = AUDIO_CODEC_CON03,
+		.mask = BIT(30),
+		.val = BIT(30),
+	},
+	{ /* UL CLK on */
+		.reg = AUDIO_CODEC_CON03,
+		.mask = BIT(21),
+		.val = BIT(21),
+	},
+	{ /* vcm14 */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = AUDIO_CODEC_CON00_AUDULL_VCM14_EN,
+		.val = AUDIO_CODEC_CON00_AUDULL_VCM14_EN,
+	},
+	{ /* vcm14 */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = AUDIO_CODEC_CON00_AUDULR_VCM14_EN,
+		.val = AUDIO_CODEC_CON00_AUDULR_VCM14_EN,
+	},
+	{ /* vref24 */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = AUDIO_CODEC_CON00_AUDULL_VREF24_EN,
+		.val = AUDIO_CODEC_CON00_AUDULL_VREF24_EN,
+	},
+	{ /* vref24 */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = AUDIO_CODEC_CON00_AUDULR_VREF24_EN,
+		.val = AUDIO_CODEC_CON00_AUDULR_VREF24_EN,
+	},
+	{ /* ul_en */
+		.reg = ABB_AFE_CON0,
+		.mask = BIT(1),
+		.val = BIT(1),
+	},
+	{ /* left pga */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = BIT(24),
+		.val = BIT(24),
+	},
+	{ /* right pga */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = BIT(6),
+		.val = BIT(6),
+	},
+	{ /* left adc */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = BIT(23),
+		.val = BIT(23),
+	},
+	{ /* right adc */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = BIT(5),
+		.val = BIT(5),
+	},
+};
+
+static const struct reg_setting mt8167_codec_amic_disable_regs[] = {
+	{ /* left adc */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = BIT(23),
+		.val = 0x0,
+	},
+	{ /* right adc */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = BIT(5),
+		.val = 0x0,
+	},
+	{ /* left pga */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = BIT(24),
+		.val = 0x0,
+	},
+	{ /* right pga */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = BIT(6),
+		.val = 0x0,
+	},
+	{ /* micbias0 */
+		.reg = AUDIO_CODEC_CON03,
+		.mask = BIT(17),
+		.val = 0x0,
+	},
+	{ /* micbias1 */
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(21),
+		.val = 0x0,
+	},
+	{ /* left pga mux <- OPEN */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = BIT(28),
+		.val = (0x2 << 28),
+	},
+	{ /* right pga mux <- OPEN */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = BIT(10),
+		.val = (0x2 << 10),
+	},
+	{ /* ul_en */
+		.reg = ABB_AFE_CON0,
+		.mask = BIT(1),
+		.val = 0x0,
+	},
+	{ /* vref24 */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = AUDIO_CODEC_CON00_AUDULL_VREF24_EN,
+		.val = 0x0,
+	},
+	{ /* vref24 */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = AUDIO_CODEC_CON00_AUDULR_VREF24_EN,
+		.val = 0x0,
+	},
+	{ /* vcm14 */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = AUDIO_CODEC_CON00_AUDULL_VCM14_EN,
+		.val = 0x0,
+	},
+	{ /* vcm14 */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = AUDIO_CODEC_CON00_AUDULR_VCM14_EN,
+		.val = 0x0,
+	},
+	{ /* UL CLK off */
+		.reg = AUDIO_CODEC_CON03,
+		.mask = BIT(21),
+		.val = 0x0,
+	},
+	{ /* Audio Codec CLK off */
+		.reg = AUDIO_CODEC_CON03,
+		.mask = BIT(30),
+		.val = 0x0,
+	},
+	{ /* CLKLDO power off */
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(20),
+		.val = 0x0,
+	},
+};
+
+static const struct reg_setting mt8167_codec_dmic_enable_regs[] = {
+	{ /* vcm14 */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = AUDIO_CODEC_CON00_AUDULL_VCM14_EN,
+		.val = AUDIO_CODEC_CON00_AUDULL_VCM14_EN,
+	},
+	{ /* vcm14 */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = AUDIO_CODEC_CON00_AUDULR_VCM14_EN,
+		.val = AUDIO_CODEC_CON00_AUDULR_VCM14_EN,
+	},
+	{ /* micbias0 */
+		.reg = AUDIO_CODEC_CON03,
+		.mask = BIT(17),
+		.val = BIT(17),
+	},
+	{ /* ul_en */
+		.reg = ABB_AFE_CON0,
+		.mask = BIT(1),
+		.val = BIT(1),
+	},
+	{ /* dig_mic_en (one-wire) */
+		.reg = ABB_AFE_CON9,
+		.mask = ABB_AFE_CON9_DIG_MIC_EN,
+		.val = ABB_AFE_CON9_DIG_MIC_EN,
+	},
+	{ /* Digital microphone enable */
+		.reg = AUDIO_CODEC_CON03,
+		.mask = AUDIO_CODEC_CON03_DIG_MIC_EN,
+		.val = AUDIO_CODEC_CON03_DIG_MIC_EN,
+	},
+};
+
+static const struct reg_setting mt8167_codec_dmic_disable_regs[] = {
+	{ /* Digital microphone enable */
+		.reg = AUDIO_CODEC_CON03,
+		.mask = AUDIO_CODEC_CON03_DIG_MIC_EN,
+		.val = 0x0,
+	},
+	{ /* dig_mic_en (one-wire) */
+		.reg = ABB_AFE_CON9,
+		.mask = ABB_AFE_CON9_DIG_MIC_EN,
+		.val = 0x0,
+	},
+	{ /* micbias0 */
+		.reg = AUDIO_CODEC_CON03,
+		.mask = BIT(17),
+		.val = 0x0,
+	},
+	{ /* ul_en */
+		.reg = ABB_AFE_CON0,
+		.mask = BIT(1),
+		.val = 0x0,
+	},
+	{ /* vcm14 */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = AUDIO_CODEC_CON00_AUDULL_VCM14_EN,
+		.val = 0x0,
+	},
+	{ /* vcm14 */
+		.reg = AUDIO_CODEC_CON00,
+		.mask = AUDIO_CODEC_CON00_AUDULR_VCM14_EN,
+		.val = 0x0,
+	},
+};
+
+static const struct reg_setting mt8167_codec_spk_enable_regs[] = {
+	{ /* DL CLK on */
+		.reg = AUDIO_CODEC_CON04,
+		.mask = BIT(15),
+		.val = BIT(15),
+	},
+	{ /* dl_vef24 */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(16),
+		.val = BIT(16),
+	},
+	{ /* dac_clk */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(27),
+		.val = BIT(27),
+	},
+	{ /* dl_vcm1 */
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(13),
+		.val = BIT(13),
+	},
+	{ /* dl_vcm2 */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(17),
+		.val = BIT(17),
+	},
+	{ /* dl_en */
+		.reg = ABB_AFE_CON0,
+		.mask = BIT(0),
+		.val = BIT(0),
+	},
+	{ /* left dac */
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(15),
+		.val = BIT(15),
+	},
+	{ /* voice amp */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(8),
+		.val = BIT(8),
+	},
+};
+
+static const struct reg_setting mt8167_codec_spk_disable_regs[] = {
+	{ /* voice amp */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(8),
+		.val = 0x0,
+	},
+	{ /* left dac */
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(15),
+		.val = 0x0,
+	},
+	{ /* dl_en */
+		.reg = ABB_AFE_CON0,
+		.mask = BIT(0),
+		.val = 0x0,
+	},
+	{ /* dl_vcm2 */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(17),
+		.val = 0x0,
+	},
+	{ /* dl_vcm1 */
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(13),
+		.val = 0x0,
+	},
+	{ /* dac_clk */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(27),
+		.val = 0x0,
+	},
+	{ /* dl_vef24 */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(16),
+		.val = 0x0,
+	},
+	{ /* DL CLK off */
+		.reg = AUDIO_CODEC_CON04,
+		.mask = BIT(15),
+		.val = 0x0,
+	},
+};
+
+static const struct reg_setting mt8167_codec_hp_enable_regs[] = {
+	{ /* DL CLK on */
+		.reg = AUDIO_CODEC_CON04,
+		.mask = BIT(15),
+		.val = BIT(15),
+	},
+	{ /* dl_vef24 */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(16),
+		.val = BIT(16),
+	},
+	{ /* dac_clk */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(27),
+		.val = BIT(27),
+	},
+	{ /* dl_vcm1 */
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(13),
+		.val = BIT(13),
+	},
+	{ /* dl_vcm2 */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(17),
+		.val = BIT(17),
+	},
+	{ /* dl_en */
+		.reg = ABB_AFE_CON0,
+		.mask = BIT(0),
+		.val = BIT(0),
+	},
+	{ /* left dac */
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(15),
+		.val = BIT(15),
+	},
+	{ /* right dac */
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(14),
+		.val = BIT(14),
+	},
+	{ /* left audio amp */
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(12),
+		.val = BIT(12),
+	},
+	{ /* right audio amp */
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(11),
+		.val = BIT(11),
+	},
+	{ /* HP Pre-charge function release */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(28),
+		.val = BIT(28),
+	},
+	{ /* Disable the depop mux of HP drivers */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(25),
+		.val = 0x0,
+	},
+	{ /* Disable the depop VCM gen */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(22),
+		.val = 0x0,
+	},
+};
+
+static const struct reg_setting mt8167_codec_hp_disable_regs[] = {
+	{ /* Reset HP Pre-charge function */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(28),
+		.val = 0x0,
+	},
+	{ /* Enable the depop mux of HP drivers */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(25),
+		.val = BIT(25),
+	},
+	{ /* Enable depop VCM gen */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(22),
+		.val = BIT(22),
+	},
+	{ /* left audio amp */
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(12),
+		.val = 0x0,
+	},
+	{ /* right audio amp */
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(11),
+		.val = 0x0,
+	},
+	{ /* left dac */
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(15),
+		.val = 0x0,
+	},
+	{ /* right dac */
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(14),
+		.val = 0x0,
+	},
+	{ /* dl_en */
+		.reg = ABB_AFE_CON0,
+		.mask = BIT(0),
+		.val = 0x0,
+	},
+	{ /* dl_vcm2 */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(17),
+		.val = 0x0,
+	},
+	{ /* dl_vcm1 */
+		.reg = AUDIO_CODEC_CON01,
+		.mask = BIT(13),
+		.val = 0x0,
+	},
+	{ /* dac_clk */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(27),
+		.val = 0x0,
+	},
+	{ /* dl_vef24 */
+		.reg = AUDIO_CODEC_CON02,
+		.mask = BIT(16),
+		.val = 0x0,
+	},
+	{ /* DL CLK off */
+		.reg = AUDIO_CODEC_CON04,
+		.mask = BIT(15),
+		.val = 0x0,
+	},
+};
+
+static void mt8167_codec_turn_on_ul_amic_path(struct snd_soc_component *codec,
+	uint32_t lpbk_type)
+{
+	dev_dbg(codec->dev, "%s %s\n", __func__,
+		ul_is_builtin_mic(lpbk_type) ? "built-in mic" : "headset mic");
+
+	if (ul_is_builtin_mic(lpbk_type))
+		mt8167_codec_apply_reg_setting(codec,
+			mt8167_codec_builtin_amic_enable_regs,
+			ARRAY_SIZE(mt8167_codec_builtin_amic_enable_regs));
+	else if (ul_is_headset_mic(lpbk_type))
+		mt8167_codec_apply_reg_setting(codec,
+			mt8167_codec_headset_amic_enable_regs,
+			ARRAY_SIZE(mt8167_codec_headset_amic_enable_regs));
+
+	mt8167_codec_apply_reg_setting(codec,
+		mt8167_codec_amic_enable_regs,
+		ARRAY_SIZE(mt8167_codec_amic_enable_regs));
+}
+
+static void mt8167_codec_turn_on_ul_dmic_path(struct snd_soc_component *codec)
+{
+	dev_dbg(codec->dev, "%s\n", __func__);
+
+	mt8167_codec_apply_reg_setting(codec,
+		mt8167_codec_dmic_enable_regs,
+		ARRAY_SIZE(mt8167_codec_dmic_enable_regs));
+}
+
+static void mt8167_codec_turn_off_ul_amic_path(struct snd_soc_component *codec)
+{
+	dev_dbg(codec->dev, "%s\n", __func__);
+
+	mt8167_codec_apply_reg_setting(codec,
+		mt8167_codec_amic_disable_regs,
+		ARRAY_SIZE(mt8167_codec_amic_disable_regs));
+}
+
+static void mt8167_codec_turn_off_ul_dmic_path(struct snd_soc_component *codec)
+{
+	dev_dbg(codec->dev, "%s\n", __func__);
+
+	mt8167_codec_apply_reg_setting(codec,
+		mt8167_codec_dmic_disable_regs,
+		ARRAY_SIZE(mt8167_codec_dmic_disable_regs));
+}
+
+static void mt8167_codec_turn_on_dl_spk_path(struct snd_soc_component *codec)
+{
+	dev_dbg(codec->dev, "%s\n", __func__);
+
+	mt8167_codec_apply_reg_setting(codec,
+		mt8167_codec_spk_enable_regs,
+		ARRAY_SIZE(mt8167_codec_spk_enable_regs));
+}
+
+static void mt8167_codec_turn_on_dl_hp_path(struct snd_soc_component *codec)
+{
+	dev_dbg(codec->dev, "%s\n", __func__);
+
+	mt8167_codec_apply_reg_setting(codec,
+		mt8167_codec_hp_enable_regs,
+		ARRAY_SIZE(mt8167_codec_hp_enable_regs));
+}
+
+static void mt8167_codec_turn_off_dl_spk_path(struct snd_soc_component *codec)
+{
+	dev_dbg(codec->dev, "%s\n", __func__);
+
+	mt8167_codec_apply_reg_setting(codec,
+		mt8167_codec_spk_disable_regs,
+		ARRAY_SIZE(mt8167_codec_spk_disable_regs));
+}
+
+static void mt8167_codec_turn_off_dl_hp_path(struct snd_soc_component *codec)
+{
+	dev_dbg(codec->dev, "%s\n", __func__);
+
+	mt8167_codec_apply_reg_setting(codec,
+		mt8167_codec_hp_disable_regs,
+		ARRAY_SIZE(mt8167_codec_hp_disable_regs));
+}
+
+static void mt8167_codec_setup_ul_rate(struct snd_soc_component *codec,
+	uint32_t lpbk_type)
+{
+	if (ul_is_amic(lpbk_type))
+		mt8167_codec_apply_reg_setting(codec,
+			mt8167_codec_ul_rate_48000_setup_regs,
+			ARRAY_SIZE(mt8167_codec_ul_rate_48000_setup_regs));
+	else if (ul_is_dmic(lpbk_type))
+		mt8167_codec_apply_reg_setting(codec,
+			mt8167_codec_ul_rate_32000_setup_regs,
+			ARRAY_SIZE(mt8167_codec_ul_rate_32000_setup_regs));
+}
+
+static void mt8167_codec_turn_on_ul_path(struct snd_soc_component *codec,
+	uint32_t lpbk_type)
+{
+	mt8167_codec_setup_ul_rate(codec, lpbk_type);
+	if (ul_is_amic(lpbk_type))
+		mt8167_codec_turn_on_ul_amic_path(codec, lpbk_type);
+	else if (ul_is_dmic(lpbk_type))
+		mt8167_codec_turn_on_ul_dmic_path(codec);
+}
+
+static void mt8167_codec_turn_off_ul_path(struct snd_soc_component *codec,
+	uint32_t lpbk_type)
+{
+	if (ul_is_amic(lpbk_type))
+		mt8167_codec_turn_off_ul_amic_path(codec);
+	else if (ul_is_dmic(lpbk_type))
+		mt8167_codec_turn_off_ul_dmic_path(codec);
+}
+
+static void mt8167_codec_setup_dl_rate(struct snd_soc_component *codec,
+	uint32_t lpbk_type)
+{
+	if (ul_is_amic(lpbk_type))
+		mt8167_codec_apply_reg_setting(codec,
+			mt8167_codec_dl_rate_48000_setup_regs,
+			ARRAY_SIZE(mt8167_codec_dl_rate_48000_setup_regs));
+	else if (ul_is_dmic(lpbk_type))
+		mt8167_codec_apply_reg_setting(codec,
+			mt8167_codec_dl_rate_32000_setup_regs,
+			ARRAY_SIZE(mt8167_codec_dl_rate_32000_setup_regs));
+}
+
+static void mt8167_codec_turn_on_dl_path(struct snd_soc_component *codec,
+	uint32_t lpbk_type)
+{
+	mt8167_codec_setup_dl_rate(codec, lpbk_type);
+	if (dl_is_spk(lpbk_type)) {
+		mt8167_codec_turn_on_dl_spk_path(codec);
+#ifdef CONFIG_MTK_SPEAKER
+		mt6392_int_spk_turn_on(codec);
+#endif
+	} else if (dl_is_hp(lpbk_type))
+		mt8167_codec_turn_on_dl_hp_path(codec);
+}
+
+static void mt8167_codec_turn_off_dl_path(struct snd_soc_component *codec,
+	uint32_t lpbk_type)
+{
+	if (dl_is_spk(lpbk_type)) {
+#ifdef CONFIG_MTK_SPEAKER
+		mt6392_int_spk_turn_off(codec);
+#endif
+		mt8167_codec_turn_off_dl_spk_path(codec);
+	} else if (dl_is_hp(lpbk_type))
+		mt8167_codec_turn_off_dl_hp_path(codec);
+}
+
+void mt8167_codec_turn_on_lpbk_path(struct snd_soc_component *codec,
+	uint32_t lpbk_type)
+{
+	mt8167_codec_turn_on_ul_path(codec, lpbk_type);
+	mt8167_codec_turn_on_dl_path(codec, lpbk_type);
+}
+EXPORT_SYMBOL_GPL(mt8167_codec_turn_on_lpbk_path);
+
+void mt8167_codec_turn_off_lpbk_path(struct snd_soc_component *codec,
+	uint32_t lpbk_type)
+{
+	mt8167_codec_turn_off_dl_path(codec, lpbk_type);
+	mt8167_codec_turn_off_ul_path(codec, lpbk_type);
+}
+EXPORT_SYMBOL_GPL(mt8167_codec_turn_off_lpbk_path);
diff --git a/sound/soc/codecs/mt8167-codec-utils.h b/sound/soc/codecs/mt8167-codec-utils.h
new file mode 100644
index 0000000..0d7562a
--- /dev/null
+++ b/sound/soc/codecs/mt8167-codec-utils.h
@@ -0,0 +1,32 @@
+/*
+ * mt8167-codec-utils.h  --  MT8167 codec driver utility functions
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT8167_CODEC_UTILS_H__
+#define __MT8167_CODEC_UTILS_H__
+
+uint32_t mt8167_codec_conv_dc_offset_to_comp_val(int32_t dc_offset);
+
+int mt8167_codec_get_hpl_cali_val(struct snd_soc_component *component,
+	uint32_t *dccomp_val, int32_t *dc_offset);
+int mt8167_codec_get_hpr_cali_val(struct snd_soc_component *component,
+	uint32_t *dccomp_val, int32_t *dc_offset);
+
+void mt8167_codec_turn_off_lpbk_path(struct snd_soc_component *component,
+	uint32_t lpbk_type);
+
+void mt8167_codec_turn_on_lpbk_path(struct snd_soc_component *component,
+	uint32_t lpbk_type);
+
+#endif
diff --git a/sound/soc/codecs/mt8167-codec.c b/sound/soc/codecs/mt8167-codec.c
new file mode 100644
index 0000000..47042dc
--- /dev/null
+++ b/sound/soc/codecs/mt8167-codec.c
@@ -0,0 +1,2150 @@
+/*
+ * mt8167-codec.c  --  MT8167 ALSA SoC codec driver
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include "mt8167-codec.h"
+#include "mt8167-codec-utils.h"
+#include <linux/of_address.h>
+
+#ifdef CONFIG_MTK_SPEAKER
+#include "mt6392-codec.h"
+#endif
+
+#define DMIC_PHASE_NUM		8
+
+enum regmap_module_id {
+	REGMAP_AFE = 0,
+//	REGMAP_APMIXEDSYS,
+#ifdef CONFIG_MTK_SPEAKER
+	REGMAP_PWRAP,
+#endif
+	REGMAP_NUMS,
+};
+
+enum dmic_wire_mode {
+	DMIC_ONE_WIRE = 0,
+	DMIC_TWO_WIRE,
+};
+
+enum dmic_rate_mode {
+	DMIC_RATE_D1P625M = 0,
+	DMIC_RATE_D3P25M,
+};
+
+enum dmic_ch_mode {
+	DMIC_L_CH = 0,
+	DMIC_R_CH,
+	DMIC_CH_NUM,
+};
+
+enum codec_pga_gain_enum_id {
+	HP_L_PGA_GAIN = 0,
+	HP_R_PGA_GAIN,
+	LOUT_PGA_GAIN,
+	UL_L_PGA_GAIN,
+	UL_R_PGA_GAIN,
+	PGA_GAIN_MAX,
+};
+
+struct mt8167_codec_priv {
+#ifdef CONFIG_MTK_SPEAKER
+	struct mt6392_codec_priv mt6392_data;
+#endif
+	struct snd_soc_component *codec;
+	struct regmap *regmap;
+	struct regmap *regmap_modules[REGMAP_NUMS];
+	void __iomem *apmixedsys_reg_base;
+	uint32_t lch_dccomp_val; /* L-ch DC compensation value */
+	uint32_t rch_dccomp_val; /* R-ch DC compensation value */
+	int32_t lch_dc_offset; /* L-ch DC offset value */
+	int32_t rch_dc_offset; /* R-ch DC offset value */
+	bool is_lch_dc_calibrated;
+	bool is_rch_dc_calibrated;
+	uint32_t pga_gain[PGA_GAIN_MAX];
+	uint32_t dmic_wire_mode;
+	uint32_t dmic_ch_phase[DMIC_CH_NUM];
+	uint32_t dmic_rate_mode; /* 0:1.625MHz 1:3.25MHz */
+	uint32_t loopback_type;
+	bool dl_en;
+	struct clk *clk;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *debugfs;
+#endif
+};
+
+#define MT8167_CODEC_NAME "mt8167-codec"
+
+static int mt8167_codec_startup(struct snd_pcm_substream *substream,
+			struct snd_soc_dai *codec_dai)
+{
+	dev_dbg(codec_dai->component->dev, "%s\n", __func__);
+	return 0;
+}
+
+static void mt8167_codec_shutdown(struct snd_pcm_substream *substream,
+			struct snd_soc_dai *codec_dai)
+{
+	dev_dbg(codec_dai->component->dev, "%s\n", __func__);
+}
+
+struct mt8167_codec_rate {
+	unsigned int rate;
+	unsigned int regvalue;
+};
+
+static const struct mt8167_codec_rate mt8167_codec_ul_rates[] = {
+	{ .rate =  8000, .regvalue = 0 },
+	{ .rate = 16000, .regvalue = 0 },
+	{ .rate = 32000, .regvalue = 0 },
+	{ .rate = 48000, .regvalue = 1 },
+};
+
+static const struct mt8167_codec_rate mt8167_codec_dl_rates[] = {
+	{ .rate =   8000, .regvalue = 0 },
+	{ .rate =  11025, .regvalue = 1 },
+	{ .rate =  12000, .regvalue = 2 },
+	{ .rate =  16000, .regvalue = 3 },
+	{ .rate =  22050, .regvalue = 4 },
+	{ .rate =  24000, .regvalue = 5 },
+	{ .rate =  32000, .regvalue = 6 },
+	{ .rate =  44100, .regvalue = 7 },
+	{ .rate =  48000, .regvalue = 8 },
+};
+
+static int mt8167_codec_ul_rate_to_val(struct mt8167_codec_priv *codec_data,
+	int rate)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mt8167_codec_ul_rates); i++)
+		if (mt8167_codec_ul_rates[i].rate == rate)
+			return mt8167_codec_ul_rates[i].regvalue;
+
+	dev_err(codec_data->codec->dev, "%s unsupported ul rate %d\n",
+			__func__, rate);
+
+	return -EINVAL;
+}
+
+static int mt8167_codec_dl_rate_to_val(struct mt8167_codec_priv *codec_data,
+	int rate)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mt8167_codec_dl_rates); i++)
+		if (mt8167_codec_dl_rates[i].rate == rate)
+			return mt8167_codec_dl_rates[i].regvalue;
+
+	dev_err(codec_data->codec->dev, "%s unsupported dl rate %d\n",
+			__func__, rate);
+
+	return -EINVAL;
+}
+
+static int mt8167_codec_valid_new_rate(struct mt8167_codec_priv *codec_data)
+{
+	uint32_t abb_afe_con11_val = 0;
+
+	abb_afe_con11_val = ABB_AFE_CON11_TOP_CTRL;
+
+	/* toggle top_ctrl status */
+	if (snd_soc_component_read32(codec_data->codec, ABB_AFE_CON11) &
+			ABB_AFE_CON11_TOP_CTRL_STATUS)
+		snd_soc_component_update_bits(codec_data->codec,
+			ABB_AFE_CON11,
+			ABB_AFE_CON11_TOP_CTRL, 0x0);
+	else
+		snd_soc_component_update_bits(codec_data->codec,
+			ABB_AFE_CON11,
+			ABB_AFE_CON11_TOP_CTRL, abb_afe_con11_val);
+
+	return 0;
+}
+
+static int mt8167_codec_setup_ul_rate(struct snd_soc_dai *codec_dai, int rate)
+{
+	struct mt8167_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(codec_dai->component);
+	uint32_t val = 0;
+
+	if (mt8167_codec_ul_rate_to_val(codec_data, rate) < 0) {
+		dev_err(codec_dai->component->dev,
+			"%s error to get ul rate\n", __func__);
+		return -EINVAL;
+	}
+
+	val = mt8167_codec_ul_rate_to_val(codec_data, rate);
+	snd_soc_component_update_bits(codec_data->codec,
+			ABB_AFE_CON1, (0xF << 4), (val << 4));
+	mt8167_codec_valid_new_rate(codec_data);
+
+	return 0;
+}
+
+static int mt8167_codec_setup_dl_rate(struct snd_soc_dai *codec_dai, int rate)
+{
+	struct mt8167_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(codec_dai->component);
+	uint32_t val = 0;
+
+	if (mt8167_codec_dl_rate_to_val(codec_data, rate) < 0) {
+		dev_err(codec_dai->component->dev,
+			"%s error to get dl rate\n", __func__);
+		return -EINVAL;
+	}
+
+	val = mt8167_codec_dl_rate_to_val(codec_data, rate);
+	snd_soc_component_update_bits(codec_data->codec,
+			ABB_AFE_CON1, GENMASK(3, 0), val);
+	mt8167_codec_valid_new_rate(codec_data);
+
+	return 0;
+}
+
+static int mt8167_codec_valid_new_dc_comp(
+			struct mt8167_codec_priv *codec_data)
+{
+	/* toggle DC status */
+	if (snd_soc_component_read32(codec_data->codec, ABB_AFE_CON11) &
+			ABB_AFE_CON11_DC_CTRL_STATUS)
+		snd_soc_component_update_bits(codec_data->codec,
+			ABB_AFE_CON11,
+			ABB_AFE_CON11_DC_CTRL, 0);
+	else
+		snd_soc_component_update_bits(codec_data->codec,
+			ABB_AFE_CON11,
+			ABB_AFE_CON11_DC_CTRL, ABB_AFE_CON11_DC_CTRL);
+
+	return 0;
+}
+
+static void mt8167_codec_setup_dc_comp(struct snd_soc_dai *codec_dai)
+{
+	struct mt8167_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(codec_dai->component);
+
+	/*  L-ch DC compensation value */
+	snd_soc_component_update_bits(codec_data->codec,
+			ABB_AFE_CON3, GENMASK(15, 0),
+			codec_data->lch_dccomp_val);
+	/*  R-ch DC compensation value */
+	snd_soc_component_update_bits(codec_data->codec,
+			ABB_AFE_CON4, GENMASK(15, 0),
+			codec_data->rch_dccomp_val);
+	/* DC compensation enable */
+	snd_soc_component_update_bits(codec_data->codec, ABB_AFE_CON10, 0x1, 0x1);
+	mt8167_codec_valid_new_dc_comp(codec_data);
+}
+
+static int mt8167_codec_hw_params(struct snd_pcm_substream *substream,
+			struct snd_pcm_hw_params *params,
+			struct snd_soc_dai *codec_dai)
+{
+	int ret = 0;
+	int rate = params_rate(params);
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		dev_dbg(codec_dai->component->dev,
+			"%s capture rate = %d\n", __func__, rate);
+		ret = mt8167_codec_setup_ul_rate(codec_dai, rate);
+	} else if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		dev_dbg(codec_dai->component->dev,
+			"%s playback rate = %d\n", __func__, rate);
+		ret = mt8167_codec_setup_dl_rate(codec_dai, rate);
+		mt8167_codec_setup_dc_comp(codec_dai);
+	}
+
+	return ret;
+}
+
+static int mt8167_codec_hw_free(struct snd_pcm_substream *substream,
+			struct snd_soc_dai *codec_dai)
+{
+	dev_dbg(codec_dai->component->dev, "%s\n", __func__);
+	return 0;
+}
+
+static int mt8167_codec_prepare(struct snd_pcm_substream *substream,
+			struct snd_soc_dai *codec_dai)
+{
+	dev_err(codec_dai->component->dev, "%s\n", __func__);
+	return 0;
+}
+
+static int mt8167_codec_dl_ul_enable(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *codec_dai)
+{
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		snd_soc_component_update_bits(codec_dai->component,
+			ABB_AFE_CON0, BIT(1), BIT(1));
+	else if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		snd_soc_component_update_bits(codec_dai->component,
+			ABB_AFE_CON0, BIT(0), BIT(0));
+
+	return 0;
+}
+
+static int mt8167_codec_dl_ul_disable(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *codec_dai)
+{
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		snd_soc_component_update_bits(codec_dai->component,
+			ABB_AFE_CON0, BIT(1), 0x0);
+	else if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		snd_soc_component_update_bits(codec_dai->component,
+			ABB_AFE_CON0, BIT(0), 0x0);
+
+	return 0;
+}
+
+static int mt8167_codec_trigger(struct snd_pcm_substream *substream,
+			int command,
+			struct snd_soc_dai *codec_dai)
+{
+	switch (command) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		mt8167_codec_dl_ul_enable(substream, codec_dai);
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		mt8167_codec_dl_ul_disable(substream, codec_dai);
+		break;
+	}
+	dev_err(codec_dai->component->dev, "%s command = %d\n ",
+			__func__, command);
+	return 0;
+}
+
+static const struct snd_soc_dai_ops mt8167_codec_aif_dai_ops = {
+	.startup = mt8167_codec_startup,
+	.shutdown = mt8167_codec_shutdown,
+	.hw_params = mt8167_codec_hw_params,
+	.hw_free = mt8167_codec_hw_free,
+	.prepare = mt8167_codec_prepare,
+	.trigger = mt8167_codec_trigger,
+};
+
+#define MT8167_CODEC_DL_RATES SNDRV_PCM_RATE_8000_48000
+#define MT8167_CODEC_UL_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | \
+	SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000)
+
+static struct snd_soc_dai_driver mt8167_codec_dai = {
+	.name = "mt8167-codec-dai",
+	.ops = &mt8167_codec_aif_dai_ops,
+	.playback = {
+		.stream_name = "Playback",
+		.channels_min = 1,
+		.channels_max = 2,
+		.rates = MT8167_CODEC_DL_RATES,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 1,
+		.channels_max = 2,
+		.rates = MT8167_CODEC_UL_RATES,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+};
+
+static int mt8167_codec_dmic_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *codec = snd_soc_dapm_to_component(w->dapm);
+	struct mt8167_codec_priv *codec_data = snd_soc_component_get_drvdata(codec);
+	uint32_t abb_afe_con9_val = 0;
+	uint32_t audio_codec_con03_val = 0;
+
+	if (codec_data->dmic_wire_mode == DMIC_TWO_WIRE)
+		abb_afe_con9_val |=
+			ABB_AFE_CON9_TWO_WIRE_EN;
+
+	if (codec_data->dmic_rate_mode == DMIC_RATE_D3P25M)
+		abb_afe_con9_val |=
+			ABB_AFE_CON9_D3P25M_SEL;
+
+	abb_afe_con9_val |=
+		(codec_data->dmic_ch_phase[DMIC_L_CH] << 13);
+	abb_afe_con9_val |=
+		(codec_data->dmic_ch_phase[DMIC_R_CH] << 10);
+
+	abb_afe_con9_val |=
+		ABB_AFE_CON9_DIG_MIC_EN;
+
+	audio_codec_con03_val =
+		AUDIO_CODEC_CON03_SLEW_RATE_10 |
+		AUDIO_CODEC_CON03_DIG_MIC_EN;
+
+	dev_dbg(codec->dev, "%s, event %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_component_update_bits(codec, ABB_AFE_CON9,
+			abb_afe_con9_val, abb_afe_con9_val);
+		snd_soc_component_update_bits(codec, AUDIO_CODEC_CON03,
+			audio_codec_con03_val, audio_codec_con03_val);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_component_update_bits(codec, AUDIO_CODEC_CON03,
+			AUDIO_CODEC_CON03_DIG_MIC_EN, 0x0);
+		snd_soc_component_update_bits(codec, ABB_AFE_CON9,
+			ABB_AFE_CON9_DIG_MIC_EN, 0x0);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int mt8167_codec_left_audio_amp_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *codec = snd_soc_dapm_to_component(w->dapm);
+	struct mt8167_codec_priv *codec_data = snd_soc_component_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s, event %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/* store gain */
+		codec_data->pga_gain[HP_L_PGA_GAIN] =
+			snd_soc_component_read32(codec, AUDIO_CODEC_CON01) &
+				GENMASK(2, 0);
+		/* set to small gain for depop sequence */
+		snd_soc_component_update_bits(codec,
+			AUDIO_CODEC_CON01, GENMASK(2, 0), 0x0);
+		/* disable input short (left audio amp only) */
+		snd_soc_component_update_bits(codec, AUDIO_CODEC_CON02,
+			AUDIO_CODEC_CON02_ABUF_INSHORT, 0x0);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* enable input short to prevent signal leakage from L-DAC */
+		snd_soc_component_update_bits(codec, AUDIO_CODEC_CON02,
+			AUDIO_CODEC_CON02_ABUF_INSHORT,
+			AUDIO_CODEC_CON02_ABUF_INSHORT);
+		/* restore gain */
+		snd_soc_component_update_bits(codec, AUDIO_CODEC_CON01,
+				GENMASK(2, 0),
+				codec_data->pga_gain[HP_L_PGA_GAIN]);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int mt8167_codec_right_audio_amp_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *codec = snd_soc_dapm_to_component(w->dapm);
+	struct mt8167_codec_priv *codec_data = snd_soc_component_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s, event %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/* store gain */
+		codec_data->pga_gain[HP_R_PGA_GAIN] =
+			(snd_soc_component_read32(codec, AUDIO_CODEC_CON01) &
+				GENMASK(5, 3)) >> 3;
+		/* set to small gain for depop sequence */
+		snd_soc_component_update_bits(codec,
+			AUDIO_CODEC_CON01, GENMASK(5, 3), 0x0);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* restore gain */
+		snd_soc_component_update_bits(codec,
+			AUDIO_CODEC_CON01, GENMASK(5, 3),
+			(codec_data->pga_gain[HP_R_PGA_GAIN]) << 3);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int mt8167_codec_voice_amp_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *codec = snd_soc_dapm_to_component(w->dapm);
+	struct mt8167_codec_priv *codec_data = snd_soc_component_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s, event %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/* store gain */
+		codec_data->pga_gain[LOUT_PGA_GAIN] =
+			(snd_soc_component_read32(codec, AUDIO_CODEC_CON02) &
+				GENMASK(12, 9)) >> 9;
+		/* set to small gain for depop sequence */
+		snd_soc_component_update_bits(codec,
+			AUDIO_CODEC_CON02, GENMASK(12, 9), 0x0);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		/* restore gain (fade in ?) */
+		snd_soc_component_update_bits(codec, AUDIO_CODEC_CON02,
+			GENMASK(12, 9),
+			(codec_data->pga_gain[LOUT_PGA_GAIN]) << 9);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		/* store gain */
+		codec_data->pga_gain[LOUT_PGA_GAIN] =
+			(snd_soc_component_read32(codec, AUDIO_CODEC_CON02) &
+				GENMASK(12, 9)) >> 9;
+		/* set to small gain for depop sequence (fade out ?) */
+		snd_soc_component_update_bits(codec,
+				AUDIO_CODEC_CON02, GENMASK(12, 9), 0x0);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* restore gain */
+		snd_soc_component_update_bits(codec, AUDIO_CODEC_CON02,
+			GENMASK(12, 9),
+			(codec_data->pga_gain[LOUT_PGA_GAIN]) << 9);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* CODEC_CLK */
+static int mt8167_codec_clk_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *codec = snd_soc_dapm_to_component(w->dapm);
+
+	dev_dbg(codec->dev, "%s, event %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/* CLKLDO power on */
+		snd_soc_component_update_bits(codec, AUDIO_CODEC_CON01, BIT(20), BIT(20));
+		/* Audio Codec CLK on */
+		snd_soc_component_update_bits(codec, AUDIO_CODEC_CON03, BIT(30), BIT(30));
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* Audio Codec CLK off */
+		snd_soc_component_update_bits(codec, AUDIO_CODEC_CON03, BIT(30), 0x0);
+		/* CLKLDO power off */
+		snd_soc_component_update_bits(codec, AUDIO_CODEC_CON01, BIT(20), 0x0);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* 1.4V common mode voltage */
+static int mt8167_codec_vcm14_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *codec = snd_soc_dapm_to_component(w->dapm);
+
+	dev_dbg(codec->dev, "%s, event %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_component_update_bits(codec,
+			AUDIO_CODEC_CON00,
+			AUDIO_CODEC_CON00_AUDULL_VCM14_EN,
+			AUDIO_CODEC_CON00_AUDULL_VCM14_EN);
+		snd_soc_component_update_bits(codec,
+			AUDIO_CODEC_CON00,
+			AUDIO_CODEC_CON00_AUDULR_VCM14_EN,
+			AUDIO_CODEC_CON00_AUDULR_VCM14_EN);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_component_update_bits(codec,
+			AUDIO_CODEC_CON00,
+			AUDIO_CODEC_CON00_AUDULL_VCM14_EN, 0x0);
+		snd_soc_component_update_bits(codec,
+			AUDIO_CODEC_CON00,
+			AUDIO_CODEC_CON00_AUDULR_VCM14_EN, 0x0);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* Uplink 2.4V differential reference */
+static int mt8167_codec_ul_vref24_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *codec = snd_soc_dapm_to_component(w->dapm);
+
+	dev_dbg(codec->dev, "%s, event %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_component_update_bits(codec, AUDIO_CODEC_CON00,
+			AUDIO_CODEC_CON00_AUDULL_VREF24_EN,
+			AUDIO_CODEC_CON00_AUDULL_VREF24_EN);
+		snd_soc_component_update_bits(codec, AUDIO_CODEC_CON00,
+			AUDIO_CODEC_CON00_AUDULR_VREF24_EN,
+			AUDIO_CODEC_CON00_AUDULR_VREF24_EN);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_component_update_bits(codec, AUDIO_CODEC_CON00,
+			AUDIO_CODEC_CON00_AUDULL_VREF24_EN, 0x0);
+		snd_soc_component_update_bits(codec, AUDIO_CODEC_CON00,
+			AUDIO_CODEC_CON00_AUDULR_VREF24_EN, 0x0);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static void mt8167_codec_hp_depop_setup(
+			struct mt8167_codec_priv *codec_data)
+{
+	/* Set audio DAC bias current */
+	snd_soc_component_update_bits(codec_data->codec,
+			AUDIO_CODEC_CON01, (0x1F << 6), 0x0);
+	/* Set the charge option of depop VCM gen. to "charge type" */
+	snd_soc_component_update_bits(codec_data->codec,
+			AUDIO_CODEC_CON02, BIT(18), BIT(18));
+	/* Set the 22uA current step of depop VCM gen. to charge 22uF cap. */
+	snd_soc_component_update_bits(codec_data->codec,
+			AUDIO_CODEC_CON02, GENMASK(20, 19), (0x3 << 19));//47uf
+	/* Set the depop VCM voltage of depop VCM gen. to 1.35V. */
+	snd_soc_component_update_bits(codec_data->codec,
+			AUDIO_CODEC_CON02, BIT(21), 0x0);
+	/* Enable the depop VCM generator. */
+	snd_soc_component_update_bits(codec_data->codec,
+			AUDIO_CODEC_CON02, BIT(22), BIT(22));
+	/* Set the series resistor of depop mux of HP drivers to 62.5 Ohm. */
+	snd_soc_component_update_bits(codec_data->codec,
+			AUDIO_CODEC_CON02, (0x3 << 23), (0x3 << 23));
+	/* Disable audio DAC clock. */
+	snd_soc_component_update_bits(codec_data->codec,
+			AUDIO_CODEC_CON02, BIT(27), 0x0);
+	/* Enable the depop mux of HP drivers. */
+	snd_soc_component_update_bits(codec_data->codec,
+			AUDIO_CODEC_CON02, BIT(25), BIT(25));
+}
+
+static void mt8167_codec_hp_depop_cleanup(
+			struct mt8167_codec_priv *codec_data)
+{
+	/* Set the charge option of depop VCM gen. to "dis-charge type" */
+	snd_soc_component_update_bits(codec_data->codec,
+			AUDIO_CODEC_CON02, BIT(18), 0x0);
+	/* Set the 33uF cap current step of depop VCM gen to dis-charge */
+	snd_soc_component_update_bits(codec_data->codec,
+			AUDIO_CODEC_CON02, (0x2 << 19), (0x2 << 19));
+	/* Disable the depop VCM generator */
+	snd_soc_component_update_bits(codec_data->codec,
+			AUDIO_CODEC_CON02, BIT(22), 0x0);
+}
+
+/* PRE_PMD */
+static void mt8167_codec_hp_depop_enable(
+			struct mt8167_codec_priv *codec_data)
+{
+	/* store gain */
+	codec_data->pga_gain[HP_L_PGA_GAIN] =
+		snd_soc_component_read32(codec_data->codec, AUDIO_CODEC_CON01) &
+			GENMASK(2, 0);
+	codec_data->pga_gain[HP_R_PGA_GAIN] =
+		(snd_soc_component_read32(codec_data->codec, AUDIO_CODEC_CON01) &
+			GENMASK(5, 3)) >> 3;
+	/* set to small gain for depop sequence */
+	snd_soc_component_update_bits(codec_data->codec,
+			AUDIO_CODEC_CON01, GENMASK(2, 0), 0x0);
+	snd_soc_component_update_bits(codec_data->codec,
+			AUDIO_CODEC_CON01, GENMASK(5, 3), 0x0);
+	/* Reset HP Pre-charge function */
+	snd_soc_component_update_bits(codec_data->codec, AUDIO_CODEC_CON02,
+			BIT(28), 0x0);
+	/* Enable the depop mux of HP drivers */
+	snd_soc_component_update_bits(codec_data->codec, AUDIO_CODEC_CON02,
+			BIT(25), BIT(25));
+	/* Enable depop VCM gen */
+	snd_soc_component_update_bits(codec_data->codec, AUDIO_CODEC_CON02,
+			BIT(22), BIT(22));
+}
+
+/* POST_PMU */
+static void mt8167_codec_hp_depop_disable(
+			struct mt8167_codec_priv *codec_data)
+{
+	/* HP Pre-charge function release */
+	usleep_range(10000, 11000);
+	snd_soc_component_update_bits(codec_data->codec, AUDIO_CODEC_CON02,
+			BIT(28), BIT(28));
+	/* Disable the depop mux of HP drivers */
+	snd_soc_component_update_bits(codec_data->codec, AUDIO_CODEC_CON02,
+			BIT(25), 0x0);
+	/* Disable the depop VCM gen */
+	snd_soc_component_update_bits(codec_data->codec, AUDIO_CODEC_CON02,
+			BIT(22), 0x0);
+	/* restore gain */
+	snd_soc_component_update_bits(codec_data->codec, AUDIO_CODEC_CON01,
+			GENMASK(2, 0),
+			codec_data->pga_gain[HP_L_PGA_GAIN]);
+	snd_soc_component_update_bits(codec_data->codec, AUDIO_CODEC_CON01,
+			GENMASK(5, 3),
+			(codec_data->pga_gain[HP_R_PGA_GAIN]) << 3);
+}
+
+static int mt8167_codec_depop_vcm_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *codec = snd_soc_dapm_to_component(w->dapm);
+	struct mt8167_codec_priv *codec_data = snd_soc_component_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s, event %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		mt8167_codec_hp_depop_disable(codec_data);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		mt8167_codec_hp_depop_enable(codec_data);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* AIF DL_UL loopback Switch */
+static int mt8167_codec_aif_dl_ul_lpbk_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *codec = snd_soc_dapm_to_component(w->dapm);
+
+	dev_dbg(codec->dev, "%s, event %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		/* Enable downlink data loopback to uplink */
+		snd_soc_component_update_bits(codec, ABB_AFE_CON2, BIT(3), BIT(3));
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		/* Disable downlink data loopback to uplink */
+		snd_soc_component_update_bits(codec, ABB_AFE_CON2, BIT(3), 0x0);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* DMIC Data Gen Switch */
+static int mt8167_codec_dmic_data_gen_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *codec = snd_soc_dapm_to_component(w->dapm);
+
+	dev_dbg(codec->dev, "%s, event %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		/* ul_dmic_debug_ch1/2 data 1 */
+		snd_soc_component_update_bits(codec, ABB_AFE_CON1,
+				GENMASK(13, 12), (0x3 << 12));
+		/* ul_dmic_debug_en (enable) */
+		snd_soc_component_update_bits(codec, ABB_AFE_CON1,
+				BIT(8), BIT(8));
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		/* ul_dmic_debug_ch1/2 data 0 */
+		snd_soc_component_update_bits(codec, ABB_AFE_CON1,
+				GENMASK(13, 12), 0x0);
+		/* ul_dmic_debug_en (disable) */
+		snd_soc_component_update_bits(codec, ABB_AFE_CON1,
+				BIT(8), 0x0);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* AMIC Data Gen Switch */
+static int mt8167_codec_amic_data_gen_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *codec = snd_soc_dapm_to_component(w->dapm);
+
+	dev_dbg(codec->dev, "%s, event %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		/* ul_amic_debug_ch1/2 data 1 */
+		snd_soc_component_update_bits(codec, ABB_AFE_CON1,
+				GENMASK(15, 14), (0x3 << 14));
+		/* ul_amic_debug_en (enable) */
+		snd_soc_component_update_bits(codec, ABB_AFE_CON1,
+				BIT(9), BIT(9));
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		/* ul_amic_debug_ch1/2 data 0 */
+		snd_soc_component_update_bits(codec, ABB_AFE_CON1,
+				GENMASK(15, 14), 0x0);
+		/* ul_amic_debug_en (disable) */
+		snd_soc_component_update_bits(codec, ABB_AFE_CON1,
+				BIT(9), 0x0);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* SDM Tone Gen Switch */
+static int mt8167_codec_sdm_tone_gen_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *codec = snd_soc_dapm_to_component(w->dapm);
+
+	dev_dbg(codec->dev, "%s, event %d\n", __func__, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		/* tri_amp_div */
+		snd_soc_component_update_bits(codec, ABB_AFE_SDM_TEST,
+				GENMASK(14, 12), (7 << 12));
+		/* tri_freq_div */
+		snd_soc_component_update_bits(codec, ABB_AFE_SDM_TEST,
+				GENMASK(9, 4), (1 << 4));
+		/* abb_sdm_src_sel_ctl (Triangular tone) */
+		snd_soc_component_update_bits(codec, ABB_AFE_SDM_TEST,
+				BIT(2), BIT(2));
+		/* tri_mute_sw (Unmute trigen) */
+		snd_soc_component_update_bits(codec, ABB_AFE_SDM_TEST,
+				BIT(1), 0x0);
+		/* tri_dac_en (Enable trigen) */
+		snd_soc_component_update_bits(codec, ABB_AFE_SDM_TEST,
+				BIT(0), BIT(0));
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		/* tri_mute_sw (Mute trigen) */
+		snd_soc_component_update_bits(codec, ABB_AFE_SDM_TEST,
+				BIT(1), BIT(1));
+		/* tri_dac_en (Disable trigen) */
+		snd_soc_component_update_bits(codec, ABB_AFE_SDM_TEST,
+				BIT(0), 0x0);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* Audio Amp Playback Volume
+ * {-2, 0, +2, +4, +6, +8, +10, +12} dB
+ */
+static const unsigned int dl_audio_amp_gain_tlv[] = {
+	TLV_DB_RANGE_HEAD(1),
+	0, 7, TLV_DB_SCALE_ITEM(-200, 200, 0),
+};
+
+/* Voice Amp Playback Volume
+ * {-18, -16, -14, -12, -10, ..., +12} dB
+ */
+static const unsigned int dl_voice_amp_gain_tlv[] = {
+	TLV_DB_RANGE_HEAD(1),
+	0, 15, TLV_DB_SCALE_ITEM(-1800, 200, 0),
+};
+
+/* PGA Capture Volume
+ * {-6, 0, +6, +12, +18, +24} dB
+ */
+static const unsigned int ul_pga_gain_tlv[] = {
+	TLV_DB_RANGE_HEAD(1),
+	0, 5, TLV_DB_SCALE_ITEM(-600, 600, 0),
+};
+
+/* Headset_PGAL_GAIN
+ * Headset_PGAR_GAIN
+ * {-2, 0, +2, +4, +6, +8, +10, +12} dB
+ */
+static const char *const headset_pga_gain_text[] = {
+	"-2dB", "+0dB", "+2dB", "+4dB",
+	"+6dB", "+8dB", "+10dB", "+12dB",
+};
+
+/* Lineout_PGA_GAIN
+ * {-18, -16, -14, -12, -10, ..., +12} dB
+ */
+static const char *const lineout_pga_gain_text[] = {
+	"-18dB", "-16dB", "-14dB", "-12dB",
+	"-10dB", "-8dB", "-6dB", "-4dB",
+	"-2dB", "+0dB", "+2dB", "+4dB",
+	"+6dB", "+8dB", "+10dB", "+12dB",
+};
+
+/* Audio_PGA1_Setting
+ * Audio_PGA2_Setting
+ * {-6, 0, +6, +12, +18, +24} dB
+ */
+static const char *const ul_pga_gain_text[] = {
+	"-6dB", "+0dB", "+6dB", "+12dB", "+18dB", "+24dB"
+};
+
+static const struct soc_enum mt8167_codec_pga_gain_enums[] = {
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(headset_pga_gain_text),
+		headset_pga_gain_text),
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(headset_pga_gain_text),
+		headset_pga_gain_text),
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(lineout_pga_gain_text),
+		lineout_pga_gain_text),
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(ul_pga_gain_text),
+		ul_pga_gain_text),
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(ul_pga_gain_text),
+		ul_pga_gain_text),
+};
+
+static int mt8167_codec_get_gain_enum_id(const char *name)
+{
+	if (!strcmp(name, "Headset_PGAL_GAIN"))
+		return HP_L_PGA_GAIN;
+	if (!strcmp(name, "Headset_PGAR_GAIN"))
+		return HP_R_PGA_GAIN;
+	if (!strcmp(name, "Lineout_PGA_GAIN"))
+		return LOUT_PGA_GAIN;
+	if (!strcmp(name, "Audio_PGA1_Setting"))
+		return UL_L_PGA_GAIN;
+	if (!strcmp(name, "Audio_PGA2_Setting"))
+		return UL_R_PGA_GAIN;
+	return -EINVAL;
+}
+
+static int mt8167_codec_pga_gain_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt8167_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(component);
+	int id = mt8167_codec_get_gain_enum_id(kcontrol->id.name);
+	uint32_t value = 0;
+
+	switch (id) {
+	case HP_L_PGA_GAIN:
+	case HP_R_PGA_GAIN:
+	case LOUT_PGA_GAIN:
+	case UL_L_PGA_GAIN:
+	case UL_R_PGA_GAIN:
+		value = codec_data->pga_gain[id];
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ucontrol->value.integer.value[0] = value;
+
+	return 0;
+}
+
+static int mt8167_codec_pga_gain_put(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt8167_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(component);
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+	int id = mt8167_codec_get_gain_enum_id(kcontrol->id.name);
+	uint32_t value = ucontrol->value.integer.value[0];
+
+	if (value >= e->items)
+		return -EINVAL;
+
+	dev_dbg(codec_data->codec->dev,
+		"%s id %d, value %u\n", __func__, id, value);
+
+	switch (id) {
+	case HP_L_PGA_GAIN:
+		snd_soc_component_update_bits(codec_data->codec, AUDIO_CODEC_CON01,
+			GENMASK(2, 0), value);
+		break;
+	case HP_R_PGA_GAIN:
+		snd_soc_component_update_bits(codec_data->codec, AUDIO_CODEC_CON01,
+			GENMASK(5, 3), value << 3);
+		break;
+	case LOUT_PGA_GAIN:
+		snd_soc_component_update_bits(codec_data->codec, AUDIO_CODEC_CON02,
+			GENMASK(12, 9), value << 9);
+		break;
+	case UL_L_PGA_GAIN:
+		snd_soc_component_update_bits(codec_data->codec, AUDIO_CODEC_CON00,
+			GENMASK(27, 25), value << 25);
+		break;
+	case UL_R_PGA_GAIN:
+		snd_soc_component_update_bits(codec_data->codec, AUDIO_CODEC_CON00,
+			GENMASK(9, 7), value << 7);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	codec_data->pga_gain[id] = value;
+
+	return 0;
+}
+
+/* HPL Calibration */
+static int mt8167_codec_hpl_dc_comp_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt8167_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(component);
+
+	dev_dbg(codec_data->codec->dev, "%s\n", __func__);
+
+	if (!codec_data->is_lch_dc_calibrated) {
+		mt8167_codec_get_hpl_cali_val(codec_data->codec,
+			&codec_data->lch_dccomp_val,
+			&codec_data->lch_dc_offset);
+		codec_data->is_lch_dc_calibrated = true;
+	}
+	ucontrol->value.integer.value[0] = codec_data->lch_dc_offset;
+	return 0;
+}
+
+static int mt8167_codec_hpl_dc_comp_put(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt8167_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(component);
+
+	dev_dbg(codec_data->codec->dev, "%s\n", __func__);
+
+	codec_data->lch_dc_offset = ucontrol->value.integer.value[0];
+	codec_data->lch_dccomp_val =
+		mt8167_codec_conv_dc_offset_to_comp_val(
+			codec_data->lch_dc_offset);
+	return 0;
+}
+
+/* HPR Calibration */
+static int mt8167_codec_hpr_dc_comp_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt8167_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(component);
+
+	dev_dbg(codec_data->codec->dev, "%s\n", __func__);
+	if (!codec_data->is_rch_dc_calibrated) {
+		mt8167_codec_get_hpr_cali_val(codec_data->codec,
+			&codec_data->rch_dccomp_val,
+			&codec_data->rch_dc_offset);
+		codec_data->is_rch_dc_calibrated = true;
+	}
+	ucontrol->value.integer.value[0] = codec_data->rch_dc_offset;
+	return 0;
+}
+
+static int mt8167_codec_hpr_dc_comp_put(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt8167_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(component);
+
+	dev_dbg(codec_data->codec->dev, "%s\n", __func__);
+
+	codec_data->rch_dc_offset = ucontrol->value.integer.value[0];
+	codec_data->rch_dccomp_val =
+		mt8167_codec_conv_dc_offset_to_comp_val(
+			codec_data->rch_dc_offset);
+
+	return 0;
+}
+
+/* UL to DL loopback (Codec_Loopback_Select) */
+#define ENUM_TO_STR(enum) #enum
+static const char * const mt8167_codec_loopback_text[] = {
+	ENUM_TO_STR(CODEC_LOOPBACK_NONE),
+	ENUM_TO_STR(CODEC_LOOPBACK_AMIC_TO_SPK),
+	ENUM_TO_STR(CODEC_LOOPBACK_AMIC_TO_HP),
+	ENUM_TO_STR(CODEC_LOOPBACK_DMIC_TO_SPK),
+	ENUM_TO_STR(CODEC_LOOPBACK_DMIC_TO_HP),
+	ENUM_TO_STR(CODEC_LOOPBACK_HEADSET_MIC_TO_SPK),
+	ENUM_TO_STR(CODEC_LOOPBACK_HEADSET_MIC_TO_HP),
+};
+
+static const struct soc_enum mt8167_codec_loopback_enum =
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(mt8167_codec_loopback_text),
+		mt8167_codec_loopback_text);
+
+static int mt8167_codec_loopback_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt8167_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(component);
+
+	dev_dbg(codec_data->codec->dev, "%s\n", __func__);
+	ucontrol->value.integer.value[0] = codec_data->loopback_type;
+	return 0;
+}
+
+static int mt8167_codec_loopback_put(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt8167_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(component);
+	uint32_t prev_lpbk_type = codec_data->loopback_type;
+	uint32_t next_lpbk_type = ucontrol->value.integer.value[0];
+
+	dev_dbg(codec_data->codec->dev, "%s\n", __func__);
+
+	if (next_lpbk_type == prev_lpbk_type) {
+		dev_dbg(codec_data->codec->dev, "%s dummy action\n", __func__);
+		return 0;
+	}
+
+	if (prev_lpbk_type != CODEC_LOOPBACK_NONE)
+		mt8167_codec_turn_off_lpbk_path(
+			codec_data->codec, prev_lpbk_type);
+	if (next_lpbk_type != CODEC_LOOPBACK_NONE)
+		mt8167_codec_turn_on_lpbk_path(
+			codec_data->codec, next_lpbk_type);
+
+	codec_data->loopback_type = ucontrol->value.integer.value[0];
+	return 0;
+}
+
+/* Codec_DL_Switch */
+static const char * const mt8167_codec_dl_switch_text[] = { "Off", "On" };
+
+static const struct soc_enum mt8167_codec_dl_switch_enum =
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(mt8167_codec_dl_switch_text),
+		mt8167_codec_dl_switch_text);
+
+static int mt8167_codec_dl_switch_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt8167_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(component);
+
+	dev_dbg(codec_data->codec->dev, "%s\n", __func__);
+	ucontrol->value.integer.value[0] = codec_data->dl_en;
+	return 0;
+}
+
+static int mt8167_codec_dl_switch_put(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt8167_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(component);
+	uint32_t prev_dl_en = codec_data->dl_en;
+	uint32_t next_dl_en = ucontrol->value.integer.value[0];
+
+	dev_dbg(codec_data->codec->dev, "%s\n", __func__);
+
+	if (next_dl_en == prev_dl_en) {
+		dev_dbg(codec_data->codec->dev, "%s dummy action\n", __func__);
+		return 0;
+	}
+
+	if (prev_dl_en)
+		/* turn off */
+		snd_soc_component_update_bits(codec_data->codec,
+			ABB_AFE_CON0, BIT(0), 0x0);
+	else
+		/* turn on */
+		snd_soc_component_update_bits(codec_data->codec,
+			ABB_AFE_CON0, BIT(0), BIT(0));
+
+	codec_data->dl_en = next_dl_en;
+	return 0;
+}
+
+static int mt8167_codec_dmic_ch_phase_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt8167_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(component);
+
+	ucontrol->value.integer.value[0] = codec_data->dmic_ch_phase[DMIC_L_CH];
+	ucontrol->value.integer.value[1] = codec_data->dmic_ch_phase[DMIC_R_CH];
+	return 0;
+}
+
+static int mt8167_codec_dmic_ch_phase_put(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt8167_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(component);
+
+	codec_data->dmic_ch_phase[DMIC_L_CH] = ucontrol->value.integer.value[0];
+	codec_data->dmic_ch_phase[DMIC_R_CH] = ucontrol->value.integer.value[1];
+	return 0;
+}
+
+static int mt8167_codec_dmic_rate_mode_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt8167_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(component);
+
+	ucontrol->value.integer.value[0] = codec_data->dmic_rate_mode;
+	return 0;
+}
+
+static int mt8167_codec_dmic_rate_mode_put(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct mt8167_codec_priv *codec_data =
+			snd_soc_component_get_drvdata(component);
+
+	codec_data->dmic_rate_mode = ucontrol->value.integer.value[0];
+	return 0;
+}
+
+static const struct snd_kcontrol_new mt8167_codec_controls[] = {
+	/* DL Audio amplifier gain adjustment */
+	SOC_DOUBLE_TLV("Audio Amp Playback Volume",
+		AUDIO_CODEC_CON01, 0, 3, 7, 0,
+		dl_audio_amp_gain_tlv),
+	/* DL Voice amplifier gain adjustment */
+	SOC_SINGLE_TLV("Voice Amp Playback Volume",
+		AUDIO_CODEC_CON02, 9, 15, 0,
+		dl_voice_amp_gain_tlv),
+	/* UL PGA gain adjustment */
+	SOC_DOUBLE_TLV("PGA Capture Volume",
+		AUDIO_CODEC_CON00, 25, 7, 5, 0,
+		ul_pga_gain_tlv),
+	/* Headset_PGAL_GAIN */
+	SOC_ENUM_EXT("Headset_PGAL_GAIN",
+		mt8167_codec_pga_gain_enums[HP_L_PGA_GAIN],
+		mt8167_codec_pga_gain_get,
+		mt8167_codec_pga_gain_put),
+	/* Headset_PGAR_GAIN */
+	SOC_ENUM_EXT("Headset_PGAR_GAIN",
+		mt8167_codec_pga_gain_enums[HP_R_PGA_GAIN],
+		mt8167_codec_pga_gain_get,
+		mt8167_codec_pga_gain_put),
+	/* Lineout_PGA_GAIN */
+	SOC_ENUM_EXT("Lineout_PGA_GAIN",
+		mt8167_codec_pga_gain_enums[LOUT_PGA_GAIN],
+		mt8167_codec_pga_gain_get,
+		mt8167_codec_pga_gain_put),
+	/* Audio_PGA1_Setting */
+	SOC_ENUM_EXT("Audio_PGA1_Setting",
+		mt8167_codec_pga_gain_enums[UL_L_PGA_GAIN],
+		mt8167_codec_pga_gain_get,
+		mt8167_codec_pga_gain_put),
+	/* Audio_PGA2_Setting */
+	SOC_ENUM_EXT("Audio_PGA2_Setting",
+		mt8167_codec_pga_gain_enums[UL_R_PGA_GAIN],
+		mt8167_codec_pga_gain_get,
+		mt8167_codec_pga_gain_put),
+	/* HP calibration */
+	SOC_SINGLE_EXT("Audio HPL Offset",
+		SND_SOC_NOPM, 0, 0x8000, 0,
+		mt8167_codec_hpl_dc_comp_get,
+		mt8167_codec_hpl_dc_comp_put),
+	SOC_SINGLE_EXT("Audio HPR Offset",
+		SND_SOC_NOPM, 0, 0x8000, 0,
+		mt8167_codec_hpr_dc_comp_get,
+		mt8167_codec_hpr_dc_comp_put),
+	/* UL to DL loopback */
+	SOC_ENUM_EXT("Codec_Loopback_Select",
+		mt8167_codec_loopback_enum,
+		mt8167_codec_loopback_get,
+		mt8167_codec_loopback_put),
+	/* for factory usage */
+	SOC_ENUM_EXT("Codec_DL_Switch",
+		mt8167_codec_dl_switch_enum,
+		mt8167_codec_dl_switch_get,
+		mt8167_codec_dl_switch_put),
+	/* for dmic phase debug */
+	SOC_DOUBLE_EXT("Dmic Ch Phase",
+		SND_SOC_NOPM, 0, 1, 7, 0,
+		mt8167_codec_dmic_ch_phase_get,
+		mt8167_codec_dmic_ch_phase_put),
+	/* for dmic rate mode debug */
+	SOC_SINGLE_EXT("Dmic Rate Mode",
+		SND_SOC_NOPM, 0, 1, 0,
+		mt8167_codec_dmic_rate_mode_get,
+		mt8167_codec_dmic_rate_mode_put),
+};
+
+/* Left PGA Mux/Right PGA Mux */
+static const char * const pga_mux_text[] = {
+	"CH0", "CH1", "OPEN",
+};
+
+static SOC_ENUM_SINGLE_DECL(mt8167_codec_left_pga_mux_enum,
+	AUDIO_CODEC_CON00, 28, pga_mux_text);
+
+static SOC_ENUM_SINGLE_DECL(mt8167_codec_right_pga_mux_enum,
+	AUDIO_CODEC_CON00, 10, pga_mux_text);
+
+static const struct snd_kcontrol_new mt8167_codec_left_pga_mux =
+	SOC_DAPM_ENUM("Left PGA Mux", mt8167_codec_left_pga_mux_enum);
+
+static const struct snd_kcontrol_new mt8167_codec_right_pga_mux =
+	SOC_DAPM_ENUM("Right PGA Mux", mt8167_codec_right_pga_mux_enum);
+
+/* AIF TX Mux */
+static const char * const aif_tx_mux_text[] = {
+	"Analog MIC", "Digital MIC", "Aif Rx"
+};
+
+static SOC_ENUM_SINGLE_DECL(mt8167_codec_aif_tx_mux_enum,
+	SND_SOC_NOPM, 0, aif_tx_mux_text);
+
+static const struct snd_kcontrol_new mt8167_codec_aif_tx_mux =
+	SOC_DAPM_ENUM("AIF TX Mux", mt8167_codec_aif_tx_mux_enum);
+
+/* HPOUT Mux */
+static const char * const hp_out_mux_text[] = {
+	"OPEN", "AUDIO_AMP",
+};
+
+static SOC_ENUM_SINGLE_DECL(mt8167_codec_hp_out_mux_enum,
+	SND_SOC_NOPM, 0, hp_out_mux_text);
+
+static const struct snd_kcontrol_new mt8167_codec_hp_out_mux =
+	SOC_DAPM_ENUM("HPOUT Mux",
+			mt8167_codec_hp_out_mux_enum);
+
+/* LINEOUT Mux  */
+static const char * const line_out_mux_text[] = {
+	"OPEN", "VOICE_AMP",
+};
+
+static SOC_ENUM_SINGLE_DECL(mt8167_codec_line_out_mux_enum,
+	SND_SOC_NOPM, 0, line_out_mux_text);
+
+static const struct snd_kcontrol_new mt8167_codec_line_out_mux =
+	SOC_DAPM_ENUM("LINEOUT Mux",
+		mt8167_codec_line_out_mux_enum);
+
+/* AIF DL_UL loopback Switch */
+static const struct snd_kcontrol_new mt8167_codec_aif_dl_ul_lpbk_ctrl =
+	SOC_DAPM_SINGLE_VIRT("Switch", 1);
+
+/* DMIC Data Gen Switch */
+static const struct snd_kcontrol_new mt8167_codec_dmic_data_gen_ctrl =
+	SOC_DAPM_SINGLE_VIRT("Switch", 1);
+
+/* AMIC Data Gen Switch */
+static const struct snd_kcontrol_new mt8167_codec_amic_data_gen_ctrl =
+	SOC_DAPM_SINGLE_VIRT("Switch", 1);
+
+/* SDM Tone Gen Switch */
+static const struct snd_kcontrol_new mt8167_codec_sdm_tone_gen_ctrl =
+	SOC_DAPM_SINGLE_VIRT("Switch", 1);
+
+static const struct snd_soc_dapm_widget mt8167_codec_dapm_widgets[] = {
+	/* stream domain */
+	SND_SOC_DAPM_AIF_OUT("AIF TX", "Capture", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("AIF RX", "Playback", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_ADC("Left ADC", NULL, AUDIO_CODEC_CON00, 23, 0),
+	SND_SOC_DAPM_ADC("Right ADC", NULL, AUDIO_CODEC_CON00, 5, 0),
+	SND_SOC_DAPM_DAC("Left DAC", NULL, AUDIO_CODEC_CON01, 15, 0),
+	SND_SOC_DAPM_DAC("Right DAC", NULL, AUDIO_CODEC_CON01, 14, 0),
+
+	/* path domain */
+	SND_SOC_DAPM_MUX("Left PGA Mux", SND_SOC_NOPM, 0, 0,
+			&mt8167_codec_left_pga_mux),
+	SND_SOC_DAPM_MUX("Right PGA Mux", SND_SOC_NOPM, 0, 0,
+			&mt8167_codec_right_pga_mux),
+	SND_SOC_DAPM_MUX("AIF TX Mux", SND_SOC_NOPM, 0, 0,
+			&mt8167_codec_aif_tx_mux),
+	SND_SOC_DAPM_PGA("Left PGA", AUDIO_CODEC_CON00, 24, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("Right PGA", AUDIO_CODEC_CON00, 6, 0, NULL, 0),
+	SND_SOC_DAPM_PGA_S("Left DMIC", 1, SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA_S("Right DMIC", 1, SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA_S("DMIC", 2, SND_SOC_NOPM, 0, 0,
+			mt8167_codec_dmic_event,
+			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_PGA_S("Left Audio Amp", 1,
+			AUDIO_CODEC_CON01, 12, 0,
+			mt8167_codec_left_audio_amp_event,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_S("Right Audio Amp", 1,
+			AUDIO_CODEC_CON01, 11, 0,
+			mt8167_codec_right_audio_amp_event,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_S("HP Depop VCM", 2, SND_SOC_NOPM, 0, 0,
+		mt8167_codec_depop_vcm_event,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_MUX("HPOUT Mux", SND_SOC_NOPM, 0, 0,
+			&mt8167_codec_hp_out_mux),
+	SND_SOC_DAPM_PGA_E("Voice Amp", AUDIO_CODEC_CON02, 8, 0, NULL, 0,
+			mt8167_codec_voice_amp_event,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX("LINEOUT Mux", SND_SOC_NOPM, 0, 0,
+			&mt8167_codec_line_out_mux),
+	SND_SOC_DAPM_SWITCH_E("AIF DL_UL loopback", SND_SOC_NOPM, 0, 0,
+			&mt8167_codec_aif_dl_ul_lpbk_ctrl,
+			mt8167_codec_aif_dl_ul_lpbk_event,
+			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_SWITCH_E("DMIC Data Gen", SND_SOC_NOPM, 0, 0,
+			&mt8167_codec_dmic_data_gen_ctrl,
+			mt8167_codec_dmic_data_gen_event,
+			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_SWITCH_E("AMIC Data Gen", SND_SOC_NOPM, 0, 0,
+			&mt8167_codec_amic_data_gen_ctrl,
+			mt8167_codec_amic_data_gen_event,
+			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_SWITCH_E("SDM Tone Gen", SND_SOC_NOPM, 0, 0,
+			&mt8167_codec_sdm_tone_gen_ctrl,
+			mt8167_codec_sdm_tone_gen_event,
+			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	/* generic widgets */
+	SND_SOC_DAPM_SUPPLY_S("CODEC_CLK", 1, SND_SOC_NOPM, 0, 0,
+			mt8167_codec_clk_event,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY_S("UL_CLK", 2, AUDIO_CODEC_CON03, 21, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("DL_CLK", 2, AUDIO_CODEC_CON04, 15, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("Vcm14", 3, SND_SOC_NOPM, 0, 0,
+			mt8167_codec_vcm14_event,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY_S("UL_Vref24", 4, SND_SOC_NOPM, 0, 0,
+			mt8167_codec_ul_vref24_event,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY_S("DL_Vref24", 4,
+			AUDIO_CODEC_CON02, 16, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("DAC_CLK", 4, AUDIO_CODEC_CON02, 27, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("DL_VCM1", 4, AUDIO_CODEC_CON01, 13, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("DL_VCM2", 4, AUDIO_CODEC_CON02, 17, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("AU_MICBIAS0", 5,
+			AUDIO_CODEC_CON03, 17, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("AU_MICBIAS1", 5,
+			AUDIO_CODEC_CON01, 21, 0, NULL, 0),
+
+	/* platform domain */
+	SND_SOC_DAPM_INPUT("AU_VIN0"),
+	SND_SOC_DAPM_INPUT("AU_VIN1"),
+	SND_SOC_DAPM_INPUT("AU_VIN2"),
+
+	SND_SOC_DAPM_OUTPUT("AU_HPL"),
+	SND_SOC_DAPM_OUTPUT("AU_HPR"),
+
+	SND_SOC_DAPM_OUTPUT("AU_LOL"),
+
+	SND_SOC_DAPM_SIGGEN("SDM Tone"),
+	SND_SOC_DAPM_SIGGEN("DMIC Data"),
+	SND_SOC_DAPM_SIGGEN("AMIC Data"),
+};
+
+static const struct snd_soc_dapm_route mt8167_codec_dapm_routes[] = {
+	/* Power */
+	{"AIF TX", NULL, "CODEC_CLK"},
+	{"AIF RX", NULL, "CODEC_CLK"},
+
+	{"AIF TX", NULL, "UL_CLK"},
+	{"AIF RX", NULL, "DL_CLK"},
+
+	/* DL to UL loopback */
+	{"AIF DL_UL loopback", "Switch", "AIF RX"},
+	{"AIF TX Mux", "Aif Rx", "AIF DL_UL loopback"},
+
+	/* UL */
+	{"AIF TX", NULL, "AIF TX Mux"},
+
+	{"AU_VIN0", NULL, "AU_MICBIAS0"},
+	{"AU_VIN2", NULL, "AU_MICBIAS0"},
+
+	{"AU_VIN1", NULL, "AU_MICBIAS1"},
+
+	/* UL - Analog MIC Path */
+	{"AIF TX Mux", "Analog MIC", "Left ADC"},
+	{"AIF TX Mux", "Analog MIC", "Right ADC"},
+
+	{"Left ADC", NULL, "Left PGA"},
+	{"Right ADC", NULL, "Right PGA"},
+
+	{"Left PGA", NULL, "Left PGA Mux"},
+	{"Left PGA", NULL, "Left PGA Mux"},
+
+	{"Right PGA", NULL, "Right PGA Mux"},
+	{"Right PGA", NULL, "Right PGA Mux"},
+
+	{"Left PGA Mux", "CH0", "AU_VIN0"},
+	{"Left PGA Mux", "CH1", "AU_VIN1"},
+
+	{"Right PGA Mux", "CH1", "AU_VIN1"},
+	{"Right PGA Mux", "CH0", "AU_VIN2"},
+
+	{"Left PGA", NULL, "Vcm14"},
+	{"Right PGA", NULL, "Vcm14"},
+
+	{"Left ADC", NULL, "UL_Vref24"},
+	{"Right ADC", NULL, "UL_Vref24"},
+
+	/* UL - Digital MIC Path */
+	{"AIF TX Mux", "Digital MIC", "DMIC"},
+
+	{"DMIC", NULL, "Left DMIC"},
+	{"DMIC", NULL, "Right DMIC"},
+
+	{"Left DMIC", NULL, "AU_VIN0"},
+	{"Right DMIC", NULL, "AU_VIN2"},
+
+	{"Left DMIC", NULL, "Vcm14"},
+	{"Right DMIC", NULL, "Vcm14"},
+
+	/* UL - Debug Path (DMIC Data Generator) */
+	{"DMIC Data Gen", "Switch", "DMIC Data"},
+	{"AIF TX Mux", "Digital MIC", "DMIC Data Gen"},
+
+	/* UL - Debug Path (AMIC Data Generator) */
+	{"AMIC Data Gen", "Switch", "AMIC Data"},
+	{"AIF TX Mux", "Analog MIC", "AMIC Data Gen"},
+
+	/* DL */
+	{"AIF RX", NULL, "Vcm14"},
+
+	{"Left DAC", NULL, "AIF RX"},
+	{"Right DAC", NULL, "AIF RX"},
+
+	{"Left DAC", NULL, "DL_Vref24"},
+	{"Right DAC", NULL, "DL_Vref24"},
+
+	{"Left DAC", NULL, "DAC_CLK"},
+	{"Right DAC", NULL, "DAC_CLK"},
+
+	{"Left DAC", NULL, "DL_VCM1"},
+	{"Right DAC", NULL, "DL_VCM1"},
+
+	/* DL - Audio Amp Path */
+	{"Left Audio Amp", NULL, "Left DAC"},
+	{"Right Audio Amp", NULL, "Right DAC"},
+
+	{"Left Audio Amp", NULL, "DL_VCM2"},
+	{"Right Audio Amp", NULL, "DL_VCM2"},
+
+	{"HP Depop VCM", NULL, "Left Audio Amp"},
+	{"HP Depop VCM", NULL, "Right Audio Amp"},
+
+	{"HPOUT Mux", "AUDIO_AMP", "HP Depop VCM"},
+
+	{"AU_HPL", NULL, "HPOUT Mux"},
+	{"AU_HPR", NULL, "HPOUT Mux"},
+
+	/* DL - Voice Amp Path */
+	{"Voice Amp", NULL, "Left DAC"},
+
+	{"Voice Amp", NULL, "DL_VCM2"},
+	{"Voice Amp", NULL, "DL_VCM2"},
+
+	{"LINEOUT Mux", "VOICE_AMP", "Voice Amp"},
+
+	{"AU_LOL", NULL, "LINEOUT Mux"},
+
+	/* DL - Debug Path (Triangular Tone Generator) */
+	{"SDM Tone Gen", "Switch", "SDM Tone"},
+	{"AIF RX", NULL, "SDM Tone Gen"},
+};
+
+static int afe_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+	struct mt8167_codec_priv *codec_data =
+			(struct mt8167_codec_priv *) context;
+	int ret = 0;
+
+	if (!(codec_data && codec_data->regmap_modules[REGMAP_AFE]))
+		return -1;
+
+	dev_dbg(codec_data->codec->dev, "%s reg 0x%x\n",
+		__func__, reg);
+
+	ret = regmap_read(codec_data->regmap_modules[REGMAP_AFE],
+			(reg & (~AFE_OFFSET)), val);
+	return ret;
+}
+
+static int afe_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+	struct mt8167_codec_priv *codec_data =
+			(struct mt8167_codec_priv *) context;
+	int ret = 0;
+
+	if (!(codec_data && codec_data->regmap_modules[REGMAP_AFE]))
+		return -1;
+
+	dev_dbg(codec_data->codec->dev, "%s reg 0x%x, val 0x%x\n",
+		__func__, reg, val);
+
+	ret = regmap_write(codec_data->regmap_modules[REGMAP_AFE],
+			(reg & (~AFE_OFFSET)), val);
+	return ret;
+}
+
+static bool reg_is_in_afe(unsigned int reg)
+{
+	if (reg & AFE_OFFSET)
+		return true;
+	else
+		return false;
+}
+
+static int apmixedsys_reg_read(void *context,
+			unsigned int reg, unsigned int *val)
+{
+	struct mt8167_codec_priv *codec_data =
+			(struct mt8167_codec_priv *) context;
+	int ret = 0;
+
+	*val = readl(codec_data->apmixedsys_reg_base + (reg & (~APMIXED_OFFSET)));
+//	printk(KERN_ERR "XXX: APMIXED[%x] = %x\n", reg & 0xffff, *val);
+#if 0
+	if (!(codec_data && codec_data->regmap_modules[REGMAP_APMIXEDSYS]))
+		return -1;
+	ret = regmap_read(codec_data->regmap_modules[REGMAP_APMIXEDSYS],
+			(reg & (~APMIXED_OFFSET)), val);
+#endif
+return ret;
+}
+
+static int apmixedsys_reg_write(void *context,
+			unsigned int reg, unsigned int val)
+{
+	struct mt8167_codec_priv *codec_data =
+			(struct mt8167_codec_priv *) context;
+	int ret = 0;
+
+	writel(val, codec_data->apmixedsys_reg_base + (reg & (~APMIXED_OFFSET)));
+//	printk(KERN_ERR "XXX: APMIXED[%x] = %x\n", reg & 0xffff, val);
+#if 0
+	if (!(codec_data && codec_data->regmap_modules[REGMAP_APMIXEDSYS]))
+		return -1;
+	ret = regmap_write(codec_data->regmap_modules[REGMAP_APMIXEDSYS],
+			(reg & (~APMIXED_OFFSET)), val);
+#endif
+return ret;
+}
+
+static bool reg_is_in_apmixedsys(unsigned int reg)
+{
+	if (reg & APMIXED_OFFSET)
+		return true;
+	else
+		return false;
+}
+
+#ifdef CONFIG_MTK_SPEAKER
+static int pwrap_reg_read(void *context,
+			unsigned int reg, unsigned int *val)
+{
+	struct mt8167_codec_priv *codec_data =
+			(struct mt8167_codec_priv *) context;
+	int ret = 0;
+
+	if (!(codec_data && codec_data->regmap_modules[REGMAP_PWRAP]))
+		return -1;
+
+	dev_dbg(codec_data->codec->dev, "%s reg 0x%x\n",
+		__func__, reg);
+
+	ret = regmap_read(codec_data->regmap_modules[REGMAP_PWRAP],
+			(reg & (~PMIC_OFFSET)), val);
+	return ret;
+}
+
+static int pwrap_reg_write(void *context,
+			unsigned int reg, unsigned int val)
+{
+	struct mt8167_codec_priv *codec_data =
+			(struct mt8167_codec_priv *) context;
+	int ret = 0;
+
+	if (!(codec_data && codec_data->regmap_modules[REGMAP_PWRAP]))
+		return -1;
+
+	dev_dbg(codec_data->codec->dev, "%s reg 0x%x, val 0x%x\n",
+		__func__, reg, val);
+
+	ret = regmap_write(codec_data->regmap_modules[REGMAP_PWRAP],
+			(reg & (~PMIC_OFFSET)), val);
+	return ret;
+}
+
+static bool reg_is_in_pmic(unsigned int reg)
+{
+	if (reg & PMIC_OFFSET)
+		return true;
+	else
+		return false;
+}
+#endif
+
+/* regmap functions */
+static int codec_reg_read(void *context,
+		unsigned int reg, unsigned int *val)
+{
+	int ret = 0;
+
+	if (reg_is_in_afe(reg))
+		ret = afe_reg_read(context, reg, val);
+	else if (reg_is_in_apmixedsys(reg))
+		ret = apmixedsys_reg_read(context, reg, val);
+#ifdef CONFIG_MTK_SPEAKER
+	else if (reg_is_in_pmic(reg))
+		ret = pwrap_reg_read(context, reg, val);
+#endif
+	else
+		ret = -1;
+	return ret;
+}
+
+static int codec_reg_write(void *context,
+			unsigned int reg, unsigned int val)
+{
+	int ret = 0;
+
+	if (reg_is_in_afe(reg))
+		ret = afe_reg_write(context, reg, val);
+	else if (reg_is_in_apmixedsys(reg))
+		ret = apmixedsys_reg_write(context, reg, val);
+#ifdef CONFIG_MTK_SPEAKER
+	else if (reg_is_in_pmic(reg))
+		ret = pwrap_reg_write(context, reg, val);
+#endif
+	else
+		ret = -1;
+	return ret;
+}
+
+static void codec_regmap_lock(void *lock_arg)
+{
+}
+
+static void codec_regmap_unlock(void *lock_arg)
+{
+}
+
+static struct regmap_config mt8167_codec_regmap_config = {
+	.reg_bits = 32,
+	.val_bits = 32,
+	.reg_read = codec_reg_read,
+	.reg_write = codec_reg_write,
+	.lock = codec_regmap_lock,
+	.unlock = codec_regmap_unlock,
+	.cache_type = REGCACHE_NONE,
+};
+
+#ifdef CONFIG_DEBUG_FS
+struct mt8167_codec_reg_attr {
+	uint32_t offset;
+	char *name;
+};
+
+#define DUMP_REG_ENTRY(reg) {reg, #reg}
+
+static const struct mt8167_codec_reg_attr mt8167_codec_dump_reg_list[] = {
+	/* audio_top_sys */
+	DUMP_REG_ENTRY(ABB_AFE_CON0),
+	DUMP_REG_ENTRY(ABB_AFE_CON1),
+	DUMP_REG_ENTRY(ABB_AFE_CON2),
+	DUMP_REG_ENTRY(ABB_AFE_CON3),
+	DUMP_REG_ENTRY(ABB_AFE_CON4),
+	DUMP_REG_ENTRY(ABB_AFE_CON5),
+	DUMP_REG_ENTRY(ABB_AFE_CON6),
+	DUMP_REG_ENTRY(ABB_AFE_CON7),
+	DUMP_REG_ENTRY(ABB_AFE_CON8),
+	DUMP_REG_ENTRY(ABB_AFE_CON9),
+	DUMP_REG_ENTRY(ABB_AFE_CON10),
+	DUMP_REG_ENTRY(ABB_AFE_CON11),
+	DUMP_REG_ENTRY(ABB_AFE_STA0),
+	DUMP_REG_ENTRY(ABB_AFE_STA1),
+	DUMP_REG_ENTRY(ABB_AFE_STA2),
+	DUMP_REG_ENTRY(AFE_MON_DEBUG0),
+	DUMP_REG_ENTRY(AFE_MON_DEBUG1),
+	DUMP_REG_ENTRY(ABB_AFE_SDM_TEST),
+
+	/* apmixedsys */
+	DUMP_REG_ENTRY(AUDIO_CODEC_CON00),
+	DUMP_REG_ENTRY(AUDIO_CODEC_CON01),
+	DUMP_REG_ENTRY(AUDIO_CODEC_CON02),
+	DUMP_REG_ENTRY(AUDIO_CODEC_CON03),
+	DUMP_REG_ENTRY(AUDIO_CODEC_CON04),
+};
+
+static ssize_t mt8167_codec_debug_read(struct file *file,
+			char __user *user_buf,
+			size_t count, loff_t *pos)
+{
+	struct mt8167_codec_priv *codec_data = file->private_data;
+	ssize_t ret, i;
+	char *buf;
+	int n = 0;
+
+	if (*pos < 0 || !count)
+		return -EINVAL;
+
+	buf = kmalloc(count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	for (i = 0; i < ARRAY_SIZE(mt8167_codec_dump_reg_list); i++) {
+		n += scnprintf(buf + n, count - n, "%s = 0x%x\n",
+			mt8167_codec_dump_reg_list[i].name,
+			snd_soc_component_read32(codec_data->codec,
+				mt8167_codec_dump_reg_list[i].offset));
+	}
+
+	ret = simple_read_from_buffer(user_buf, count, pos, buf, n);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static const struct file_operations mt8167_codec_debug_ops = {
+	.open = simple_open,
+	.read = mt8167_codec_debug_read,
+	.llseek = default_llseek,
+};
+#endif
+
+static void mt8167_codec_init_regs(struct mt8167_codec_priv *codec_data)
+{
+	dev_dbg(codec_data->codec->dev, "%s\n", __func__);
+
+	/* disable chopper of uplink */
+	snd_soc_component_update_bits(codec_data->codec,
+		AUDIO_CODEC_CON00, BIT(17), 0x0);
+	snd_soc_component_update_bits(codec_data->codec,
+		AUDIO_CODEC_CON01, BIT(31), 0x0);
+
+	/* Audio buffer quasi-current  */
+	snd_soc_component_update_bits(codec_data->codec,
+			AUDIO_CODEC_CON02, GENMASK(31, 30), 0x0);
+
+	/* setup default gain */
+	/* +4dB for voice buf gain */
+	codec_data->pga_gain[LOUT_PGA_GAIN] = 0xB;
+	snd_soc_component_update_bits(codec_data->codec,
+		AUDIO_CODEC_CON02, GENMASK(12, 9),
+		(codec_data->pga_gain[LOUT_PGA_GAIN]) << 9);
+
+	mt8167_codec_hp_depop_setup(codec_data);
+}
+
+static struct regmap *mt8167_codec_get_regmap_from_dt(const char *phandle_name,
+		struct mt8167_codec_priv *codec_data)
+{
+	struct device_node *self_node = NULL, *node = NULL;
+	struct platform_device *platdev = NULL;
+	struct device *dev = codec_data->codec->dev;
+	struct regmap *regmap = NULL;
+
+	self_node = of_find_compatible_node(NULL, NULL,
+		"mediatek," MT8167_CODEC_NAME);
+	if (!self_node) {
+		dev_err(dev, "%s failed to find %s node\n",
+			__func__, MT8167_CODEC_NAME);
+		return NULL;
+	}
+	dev_err(dev, "%s found %s node\n", __func__, MT8167_CODEC_NAME);
+
+	node = of_parse_phandle(self_node, phandle_name, 0);
+	if (!node) {
+		dev_err(dev, "%s failed to find %s node\n",
+			__func__, phandle_name);
+		return NULL;
+	}
+	dev_err(dev, "%s found %s\n", __func__, phandle_name);
+
+	platdev = of_find_device_by_node(node);
+	if (!platdev) {
+		dev_err(dev, "%s failed to get platform device of %s\n",
+			__func__, phandle_name);
+		return NULL;
+	}
+	dev_err(dev, "%s found platform device of %s\n",
+		__func__, phandle_name);
+
+	regmap = dev_get_regmap(&platdev->dev, NULL);
+	if (regmap) {
+		dev_err(dev, "%s found regmap of %s\n", __func__, phandle_name);
+		return regmap;
+	}
+
+	regmap = syscon_regmap_lookup_by_phandle(dev->of_node, phandle_name);
+	if (!IS_ERR(regmap)) {
+		dev_err(dev, "%s found regmap of syscon node %s\n",
+			__func__, phandle_name);
+		return regmap;
+	}
+	dev_err(dev, "%s failed to get regmap of syscon node %s\n",
+		__func__, phandle_name);
+
+	return NULL;
+}
+
+static const char * const modules_dt_regmap_str[REGMAP_NUMS] = {
+	"mediatek,afe-regmap",
+//	"mediatek,apmixedsys-regmap",
+#ifdef CONFIG_MTK_SPEAKER
+	"mediatek,pwrap-regmap",
+#endif
+};
+
+static int mt8167_codec_parse_dt(struct mt8167_codec_priv *codec_data)
+{
+	struct device *dev = codec_data->codec->dev;
+	int ret = 0;
+	int i;
+	struct device_node *np;
+
+	for (i = 0 ; i < REGMAP_NUMS ; i++) {
+		codec_data->regmap_modules[i] = mt8167_codec_get_regmap_from_dt(
+				modules_dt_regmap_str[i],
+				codec_data);
+		if (!codec_data->regmap_modules[i]) {
+			dev_err(dev, "%s failed to get %s\n",
+				__func__, modules_dt_regmap_str[i]);
+//			devm_kfree(dev, codec_data);
+			return -EPROBE_DEFER;
+			break;
+		}
+	}
+
+	np = of_find_compatible_node(NULL, NULL, "mediatek,mt8516-apmixedsys");
+	if (!np)
+	    np = of_find_compatible_node(NULL, NULL, "mediatek,mt8167-apmixedsys");
+	codec_data->apmixedsys_reg_base =  of_iomap(np, 0);
+
+	ret = of_property_read_u32(dev->of_node, "mediatek,dmic-wire-mode",
+				&codec_data->dmic_wire_mode);
+	if (ret) {
+		dev_warn(dev, "%s fail to read dmic-wire-mode in node %s\n",
+			__func__, dev->of_node->full_name);
+		codec_data->dmic_wire_mode = DMIC_ONE_WIRE;
+	} else if ((codec_data->dmic_wire_mode != DMIC_ONE_WIRE) &&
+		codec_data->dmic_wire_mode != DMIC_TWO_WIRE) {
+		codec_data->dmic_wire_mode = DMIC_ONE_WIRE;
+	}
+
+	if (of_property_read_u32_array(dev->of_node, "mediatek,dmic-ch-phase",
+		codec_data->dmic_ch_phase, ARRAY_SIZE(codec_data->dmic_ch_phase))) {
+		for (i = 0; i < ARRAY_SIZE(codec_data->dmic_ch_phase); i++)
+			codec_data->dmic_ch_phase[i] = 0;
+	}
+	for (i = 0; i < ARRAY_SIZE(codec_data->dmic_ch_phase); i++) {
+		if (codec_data->dmic_ch_phase[i] >= DMIC_PHASE_NUM)
+			codec_data->dmic_ch_phase[i] = 0;
+	}
+
+	if (of_property_read_u32(dev->of_node, "mediatek,dmic-rate-mode",
+				&codec_data->dmic_rate_mode))
+		codec_data->dmic_rate_mode = DMIC_RATE_D1P625M;
+	else if ((codec_data->dmic_rate_mode != DMIC_RATE_D1P625M) &&
+		(codec_data->dmic_rate_mode != DMIC_RATE_D3P25M))
+		codec_data->dmic_rate_mode = DMIC_RATE_D1P625M;
+	return ret;
+}
+
+static int mt8167_codec_probe(struct snd_soc_component *codec)
+{
+	struct mt8167_codec_priv *codec_data = snd_soc_component_get_drvdata(codec);
+	int ret = 0;
+
+	dev_dbg(codec->dev, "%s\n", __func__);
+
+	codec_data->codec = codec;
+
+	ret = mt8167_codec_parse_dt(codec_data);
+	if (ret < 0)
+		return ret;
+
+	codec_data->clk = devm_clk_get(codec->dev, "bus");
+	if (IS_ERR(codec_data->clk)) {
+		dev_err(codec->dev, "%s devm_clk_get %s fail\n",
+			__func__, "bus");
+		return PTR_ERR(codec_data->clk);
+	}
+
+	ret = clk_prepare_enable(codec_data->clk);
+	if (ret)
+		return ret;
+
+	mt8167_codec_init_regs(codec_data);
+#ifdef CONFIG_DEBUG_FS
+	codec_data->debugfs = debugfs_create_file("mt8167_codec_regs",
+			S_IFREG | S_IRUGO,
+			NULL, codec_data, &mt8167_codec_debug_ops);
+#endif
+#ifdef CONFIG_MTK_SPEAKER
+	ret = mt6392_codec_probe(codec);
+	if (ret < 0)
+		clk_disable_unprepare(codec_data->clk);
+#endif
+	return ret;
+}
+
+static void mt8167_codec_remove(struct snd_soc_component *codec)
+{
+	struct mt8167_codec_priv *codec_data = snd_soc_component_get_drvdata(codec);
+
+	clk_disable_unprepare(codec_data->clk);
+#ifdef CONFIG_DEBUG_FS
+	debugfs_remove(codec_data->debugfs);
+#endif
+#ifdef CONFIG_MTK_SPEAKER
+	mt6392_codec_remove(codec);
+#endif
+}
+
+static int mt8167_codec_suspend(struct snd_soc_component *codec)
+{
+	struct mt8167_codec_priv *codec_data = snd_soc_component_get_drvdata(codec);
+
+	clk_disable_unprepare(codec_data->clk);
+	return 0;
+}
+
+static int mt8167_codec_resume(struct snd_soc_component *codec)
+{
+	struct mt8167_codec_priv *codec_data = snd_soc_component_get_drvdata(codec);
+
+	return clk_prepare_enable(codec_data->clk);
+}
+
+static int mt8167_codec_set_bias_level(struct snd_soc_component *codec,
+			enum snd_soc_bias_level level)
+{
+	dev_dbg(codec->dev, "%s curr bias_level %d, set bias_level: %d\n",
+		__func__, snd_soc_component_get_bias_level(codec), level);
+
+	switch (snd_soc_component_get_bias_level(codec)) {
+	case SND_SOC_BIAS_OFF:
+		break;
+	case SND_SOC_BIAS_STANDBY:
+		break;
+	case SND_SOC_BIAS_PREPARE:
+		break;
+	case SND_SOC_BIAS_ON:
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static struct snd_soc_component_driver mt8167_codec_driver = {
+	.probe = mt8167_codec_probe,
+	.remove = mt8167_codec_remove,
+	.suspend = mt8167_codec_suspend,
+	.resume = mt8167_codec_resume,
+	.set_bias_level = mt8167_codec_set_bias_level,
+	.controls = mt8167_codec_controls,
+	.num_controls = ARRAY_SIZE(mt8167_codec_controls),
+	.dapm_widgets = mt8167_codec_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(mt8167_codec_dapm_widgets),
+	.dapm_routes = mt8167_codec_dapm_routes,
+	.num_dapm_routes = ARRAY_SIZE(mt8167_codec_dapm_routes),
+};
+
+static int mt8167_codec_dev_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mt8167_codec_priv *codec_data = NULL;
+
+	dev_dbg(dev, "%s dev name %s\n", __func__, dev_name(dev));
+
+	if (dev->of_node) {
+		dev_set_name(dev, "%s", MT8167_CODEC_NAME);
+		dev_dbg(dev, "%s set dev name %s\n", __func__, dev_name(dev));
+	}
+
+	codec_data = devm_kzalloc(dev,
+			sizeof(struct mt8167_codec_priv), GFP_KERNEL);
+	if (!codec_data)
+		return -ENOMEM;
+
+	dev_set_drvdata(dev, codec_data);
+
+	/* get regmap of codec */
+	codec_data->regmap = devm_regmap_init(dev, NULL, codec_data,
+		&mt8167_codec_regmap_config);
+	if (IS_ERR(codec_data->regmap)) {
+		dev_err(dev, "%s failed to get regmap of codec\n", __func__);
+//		devm_kfree(dev, codec_data);
+		codec_data->regmap = NULL;
+		return -EINVAL;
+	}
+
+	return devm_snd_soc_register_component(dev,
+			&mt8167_codec_driver, &mt8167_codec_dai, 1);
+}
+
+static int mt8167_codec_dev_remove(struct platform_device *pdev)
+{
+	struct mt8167_codec_priv *codec_data = dev_get_drvdata(&pdev->dev);
+
+	mt8167_codec_hp_depop_cleanup(codec_data);
+
+	return 0;
+}
+
+static const struct of_device_id mt8167_codec_dt_match[] = {
+	{.compatible = "mediatek," MT8167_CODEC_NAME,},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, mt8167_codec_dt_match);
+
+static struct platform_driver mt8167_codec_device_driver = {
+	.driver = {
+		   .name = MT8167_CODEC_NAME,
+		   .owner = THIS_MODULE,
+		   .of_match_table = mt8167_codec_dt_match,
+		   },
+	.probe = mt8167_codec_dev_probe,
+	.remove = mt8167_codec_dev_remove,
+};
+
+module_platform_driver(mt8167_codec_device_driver);
+
+/* Module information */
+MODULE_DESCRIPTION("ASoC MT8167 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/mt8167-codec.h b/sound/soc/codecs/mt8167-codec.h
new file mode 100644
index 0000000..8d488a4
--- /dev/null
+++ b/sound/soc/codecs/mt8167-codec.h
@@ -0,0 +1,104 @@
+/*
+ * mt8167-codec.h  --  MT8167 ALSA SoC codec driver
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT8167_CODEC_H__
+#define __MT8167_CODEC_H__
+
+enum mt8167_codec_loopback_type {
+	CODEC_LOOPBACK_NONE = 0,
+	CODEC_LOOPBACK_AMIC_TO_SPK,
+	CODEC_LOOPBACK_AMIC_TO_HP,
+	CODEC_LOOPBACK_DMIC_TO_SPK,
+	CODEC_LOOPBACK_DMIC_TO_HP,
+	CODEC_LOOPBACK_HEADSET_MIC_TO_SPK,
+	CODEC_LOOPBACK_HEADSET_MIC_TO_HP,
+};
+
+#define AFE_OFFSET               (0xA0000000)
+#define APMIXED_OFFSET           (0x0B000000)
+
+#define AFE_REG(reg)             (reg | AFE_OFFSET)
+#define APMIXED_REG(reg)         (reg | APMIXED_OFFSET)
+
+/* audio_top_sys */
+#define AUDIO_TOP_CON0           AFE_REG(0x0000)
+#define AFE_DAC_CON0             AFE_REG(0x0010)
+#define AFE_DAC_CON1             AFE_REG(0x0014)
+#define AFE_CONN1                AFE_REG(0x0024)
+#define AFE_CONN2                AFE_REG(0x0028)
+#define AFE_I2S_CON1             AFE_REG(0x0034)
+#define AFE_DL1_BASE             AFE_REG(0x0040)
+#define AFE_DL1_END              AFE_REG(0x0048)
+#define AFE_CONN_24BIT           AFE_REG(0x006C)
+#define AFE_ADDA_DL_SRC2_CON0    AFE_REG(0x0108)
+#define AFE_ADDA_DL_SRC2_CON1    AFE_REG(0x010C)
+#define AFE_ADDA_UL_DL_CON0      AFE_REG(0x0124)
+#define AFE_ADDA_PREDIS_CON0     AFE_REG(0x0260)
+#define AFE_ADDA_PREDIS_CON1     AFE_REG(0x0264)
+#define AFE_MEMIF_PBUF_SIZE      AFE_REG(0x03D8)
+#define ABB_AFE_CON0             AFE_REG(0x0F00)
+#define ABB_AFE_CON1             AFE_REG(0x0F04)
+#define ABB_AFE_CON2             AFE_REG(0x0F08)
+#define ABB_AFE_CON3             AFE_REG(0x0F0C)
+#define ABB_AFE_CON4             AFE_REG(0x0F10)
+#define ABB_AFE_CON5             AFE_REG(0x0F14)
+#define ABB_AFE_CON6             AFE_REG(0x0F18)
+#define ABB_AFE_CON7             AFE_REG(0x0F1C)
+#define ABB_AFE_CON8             AFE_REG(0x0F20)
+#define ABB_AFE_CON9             AFE_REG(0x0F24)
+#define ABB_AFE_CON10            AFE_REG(0x0F28)
+#define ABB_AFE_CON11            AFE_REG(0x0F2C)
+#define ABB_AFE_STA0             AFE_REG(0x0F30)
+#define ABB_AFE_STA1             AFE_REG(0x0F34)
+#define ABB_AFE_STA2             AFE_REG(0x0F38)
+#define AFE_MON_DEBUG0           AFE_REG(0x0F44)
+#define AFE_MON_DEBUG1           AFE_REG(0x0F48)
+#define ABB_AFE_SDM_TEST         AFE_REG(0x0F4C)
+
+/* apmixedsys */
+#define AUDIO_CODEC_CON00        APMIXED_REG(0x0700)
+#define AUDIO_CODEC_CON01        APMIXED_REG(0x0704)
+#define AUDIO_CODEC_CON02        APMIXED_REG(0x0708)
+#define AUDIO_CODEC_CON03        APMIXED_REG(0x070C)
+#define AUDIO_CODEC_CON04        APMIXED_REG(0x0710)
+
+/* ABB_AFE_CON9 */
+#define ABB_AFE_CON9_TWO_WIRE_EN BIT(8)
+#define ABB_AFE_CON9_DIG_MIC_EN  BIT(4)
+#define ABB_AFE_CON9_D3P25M_SEL  BIT(0)
+
+/* ABB_AFE_CON11 */
+#define ABB_AFE_CON11_DC_CTRL         BIT(9)
+#define ABB_AFE_CON11_TOP_CTRL        BIT(8)
+#define ABB_AFE_CON11_DC_CTRL_STATUS  BIT(1)
+#define ABB_AFE_CON11_TOP_CTRL_STATUS BIT(0)
+
+/* AUDIO_CODEC_CON00 */
+#define AUDIO_CODEC_CON00_AUDULL_VREF24_EN BIT(20)
+#define AUDIO_CODEC_CON00_AUDULL_VCM14_EN  BIT(19)
+#define AUDIO_CODEC_CON00_AUDULR_VREF24_EN BIT(2)
+#define AUDIO_CODEC_CON00_AUDULR_VCM14_EN  BIT(1)
+
+/* AUDIO_CODEC_CON02 */
+#define AUDIO_CODEC_CON02_ABUF_INSHORT BIT(29)
+
+/* AUDIO_CODEC_CON03 */
+#define AUDIO_CODEC_CON03_DIG_MIC_EN   BIT(28)
+#define AUDIO_CODEC_CON03_SLEW_RATE_11 (0x3 << 22)
+#define AUDIO_CODEC_CON03_SLEW_RATE_10 (0x2 << 22)
+#define AUDIO_CODEC_CON03_SLEW_RATE_01 (0x1 << 22)
+#define AUDIO_CODEC_CON03_SLEW_RATE_00 (0x0 << 22)
+
+#endif
diff --git a/sound/soc/codecs/pcm186x.c b/sound/soc/codecs/pcm186x.c
index 3be0e14..ec5f064 100644
--- a/sound/soc/codecs/pcm186x.c
+++ b/sound/soc/codecs/pcm186x.c
@@ -38,8 +38,15 @@
 	struct regulator_bulk_data supplies[PCM186x_NUM_SUPPLIES];
 	unsigned int sysclk;
 	unsigned int tdm_offset;
+	unsigned int tdm_additional_offset;
+	unsigned int tdm_max_channels;
 	bool is_tdm_mode;
 	bool is_master_mode;
+	unsigned int  adc1_left_input_select;
+	unsigned int  adc1_right_input_select;
+	unsigned int  adc2_left_input_select;
+	unsigned int  adc2_right_input_select;
+	unsigned int  apga_gain_control;
 };
 
 static const DECLARE_TLV_DB_SCALE(pcm186x_pga_tlv, -1200, 50, 0);
@@ -50,6 +57,8 @@
 			   pcm186x_pga_tlv),
 };
 
+static const DECLARE_TLV_DB_SCALE(pcm1865_dpga_tlv, 0, 50, 0);
+
 static const struct snd_kcontrol_new pcm1865_snd_controls[] = {
 	SOC_DOUBLE_R_S_TLV("ADC1 Capture Volume", PCM186X_PGA_VAL_CH1_L,
 			   PCM186X_PGA_VAL_CH1_R, 0, -24, 80, 7, 0,
@@ -57,6 +66,16 @@
 	SOC_DOUBLE_R_S_TLV("ADC2 Capture Volume", PCM186X_PGA_VAL_CH2_L,
 			   PCM186X_PGA_VAL_CH2_R, 0, -24, 80, 7, 0,
 			   pcm186x_pga_tlv),
+	SOC_DOUBLE_R_S_TLV("ADC1 Digital Capture Volume",
+			   PCM186X_DPGA_VAL_CH1_L,
+			   PCM186X_DPGA_VAL_CH1_R,
+			   0, 0x28, 0x37, 7, 0,
+			   pcm1865_dpga_tlv),
+	SOC_DOUBLE_R_S_TLV("ADC2 Digital Capture Volume",
+			   PCM186X_DPGA_VAL_CH2_L,
+			   PCM186X_DPGA_VAL_CH2_R,
+			   0, 0x28, 0x37, 7, 0,
+			   pcm1865_dpga_tlv),
 };
 
 static const unsigned int pcm186x_adc_input_channel_sel_value[] = {
@@ -67,46 +86,46 @@
 
 static const char * const pcm186x_adcl_input_channel_sel_text[] = {
 	"No Select",
-	"VINL1[SE]",					/* Default for ADC1L */
-	"VINL2[SE]",					/* Default for ADC2L */
-	"VINL2[SE] + VINL1[SE]",
-	"VINL3[SE]",
-	"VINL3[SE] + VINL1[SE]",
-	"VINL3[SE] + VINL2[SE]",
-	"VINL3[SE] + VINL2[SE] + VINL1[SE]",
-	"VINL4[SE]",
-	"VINL4[SE] + VINL1[SE]",
-	"VINL4[SE] + VINL2[SE]",
-	"VINL4[SE] + VINL2[SE] + VINL1[SE]",
-	"VINL4[SE] + VINL3[SE]",
-	"VINL4[SE] + VINL3[SE] + VINL1[SE]",
-	"VINL4[SE] + VINL3[SE] + VINL2[SE]",
-	"VINL4[SE] + VINL3[SE] + VINL2[SE] + VINL1[SE]",
-	"{VIN1P, VIN1M}[DIFF]",
-	"{VIN4P, VIN4M}[DIFF]",
-	"{VIN1P, VIN1M}[DIFF] + {VIN4P, VIN4M}[DIFF]"
+	"VINL1_SE",					/* Default for ADC1L */
+	"VINL2_SE",					/* Default for ADC2L */
+	"VINL2_SE + VINL1_SE",
+	"VINL3_SE",
+	"VINL3_SE + VINL1_SE",
+	"VINL3_SE + VINL2_SE",
+	"VINL3_SE + VINL2_SE + VINL1_SE",
+	"VINL4_SE",
+	"VINL4_SE + VINL1_SE",
+	"VINL4_SE + VINL2_SE",
+	"VINL4_SE + VINL2_SE + VINL1_SE",
+	"VINL4_SE + VINL3_SE",
+	"VINL4_SE + VINL3_SE + VINL1_SE",
+	"VINL4_SE + VINL3_SE + VINL2_SE",
+	"VINL4_SE + VINL3_SE + VINL2_SE + VINL1_SE",
+	"VIN1P_DIFF - VIN1M_DIFF",
+	"VIN4P_DIFF - VIN4M_DIFF",
+	"VIN1P_DIFF - VIN1M_DIFF + VIN4P_DIFF - VIN4M_DIFF"
 };
 
 static const char * const pcm186x_adcr_input_channel_sel_text[] = {
 	"No Select",
-	"VINR1[SE]",					/* Default for ADC1R */
-	"VINR2[SE]",					/* Default for ADC2R */
-	"VINR2[SE] + VINR1[SE]",
-	"VINR3[SE]",
-	"VINR3[SE] + VINR1[SE]",
-	"VINR3[SE] + VINR2[SE]",
-	"VINR3[SE] + VINR2[SE] + VINR1[SE]",
-	"VINR4[SE]",
-	"VINR4[SE] + VINR1[SE]",
-	"VINR4[SE] + VINR2[SE]",
-	"VINR4[SE] + VINR2[SE] + VINR1[SE]",
-	"VINR4[SE] + VINR3[SE]",
-	"VINR4[SE] + VINR3[SE] + VINR1[SE]",
-	"VINR4[SE] + VINR3[SE] + VINR2[SE]",
-	"VINR4[SE] + VINR3[SE] + VINR2[SE] + VINR1[SE]",
-	"{VIN2P, VIN2M}[DIFF]",
-	"{VIN3P, VIN3M}[DIFF]",
-	"{VIN2P, VIN2M}[DIFF] + {VIN3P, VIN3M}[DIFF]"
+	"VINR1_SE",					/* Default for ADC1R */
+	"VINR2_SE",					/* Default for ADC2R */
+	"VINR2_SE + VINR1_SE",
+	"VINR3_SE",
+	"VINR3_SE + VINR1_SE",
+	"VINR3_SE + VINR2_SE",
+	"VINR3_SE + VINR2_SE + VINR1_SE",
+	"VINR4_SE",
+	"VINR4_SE + VINR1_SE",
+	"VINR4_SE + VINR2_SE",
+	"VINR4_SE + VINR2_SE + VINR1_SE",
+	"VINR4_SE + VINR3_SE",
+	"VINR4_SE + VINR3_SE + VINR1_SE",
+	"VINR4_SE + VINR3_SE + VINR2_SE",
+	"VINR4_SE + VINR3_SE + VINR2_SE + VINR1_SE",
+	"VIN2P_DIFF - VIN2M_DIFF",
+	"VIN3P_DIFF - VIN3M_DIFF",
+	"VIN2P_DIFF - VIN2M_DIFF + VIN3P_DIFF - VIN3M_DIFF"
 };
 
 static const struct soc_enum pcm186x_adc_input_channel_sel[] = {
@@ -154,11 +173,8 @@
 	SND_SOC_DAPM_MUX("ADC Right Capture Source", SND_SOC_NOPM, 0, 0,
 			 &pcm186x_adc_mux_controls[1]),
 
-	/*
-	 * Put the codec into SLEEP mode when not in use, allowing the
-	 * Energysense mechanism to operate.
-	 */
-	SND_SOC_DAPM_ADC("ADC", "HiFi Capture", PCM186X_POWER_CTRL, 1,  1),
+	/* Put the codec into Digital standby mode when not in use. */
+	SND_SOC_DAPM_ADC("ADC", "HiFi Capture", PCM186X_POWER_CTRL, 0,  1),
 };
 
 static const struct snd_soc_dapm_widget pcm1865_dapm_widgets[] = {
@@ -180,12 +196,9 @@
 	SND_SOC_DAPM_MUX("ADC2 Right Capture Source", SND_SOC_NOPM, 0, 0,
 			 &pcm186x_adc_mux_controls[3]),
 
-	/*
-	 * Put the codec into SLEEP mode when not in use, allowing the
-	 * Energysense mechanism to operate.
-	 */
-	SND_SOC_DAPM_ADC("ADC1", "HiFi Capture 1", PCM186X_POWER_CTRL, 1,  1),
-	SND_SOC_DAPM_ADC("ADC2", "HiFi Capture 2", PCM186X_POWER_CTRL, 1,  1),
+	/* Put the codec into Digital standby mode when not in use. */
+	SND_SOC_DAPM_ADC("ADC1", "HiFi Capture 1", PCM186X_POWER_CTRL, 0,  1),
+	SND_SOC_DAPM_ADC("ADC2", "HiFi Capture 2", PCM186X_POWER_CTRL, 0,  1),
 };
 
 static const struct snd_soc_dapm_route pcm1863_dapm_routes[] = {
@@ -258,6 +271,13 @@
 	{ "ADC2", NULL, "ADC2 Right Capture Source" },
 };
 
+static const unsigned int pcm1865_digital_gain_registers[] = {
+	PCM186X_DPGA_VAL_CH1_L,
+	PCM186X_DPGA_VAL_CH1_R,
+	PCM186X_DPGA_VAL_CH2_L,
+	PCM186X_DPGA_VAL_CH2_R,
+};
+
 static int pcm186x_hw_params(struct snd_pcm_substream *substream,
 			     struct snd_pcm_hw_params *params,
 			     struct snd_soc_dai *dai)
@@ -313,6 +333,12 @@
 	div_lrck = width * channels;
 
 	if (priv->is_tdm_mode) {
+		/* limitating the number of channels looking at the number
+		 * of allocated tdm slots
+		 */
+		if (priv->tdm_max_channels < channels)
+			channels = priv->tdm_max_channels;
+
 		/* Select TDM transmission data */
 		switch (channels) {
 		case 2:
@@ -348,8 +374,10 @@
 			"%s() master_clk=%u div_bck=%u div_lrck=%u\n",
 			__func__, priv->sysclk, div_bck, div_lrck);
 
-		snd_soc_component_write(component, PCM186X_BCK_DIV, div_bck - 1);
-		snd_soc_component_write(component, PCM186X_LRK_DIV, div_lrck - 1);
+		snd_soc_component_write(component,
+					PCM186X_BCK_DIV, div_bck - 1);
+		snd_soc_component_write(component,
+					PCM186X_LRK_DIV, div_lrck - 1);
 	}
 
 	return 0;
@@ -364,6 +392,8 @@
 
 	dev_dbg(component->dev, "%s() format=0x%x\n", __func__, format);
 
+	priv->tdm_additional_offset = 0;
+
 	/* set master/slave audio interface */
 	switch (format & SND_SOC_DAIFMT_MASTER_MASK) {
 	case SND_SOC_DAIFMT_CBM_CFM:
@@ -400,7 +430,7 @@
 		pcm_cfg = PCM186X_PCM_CFG_FMT_LEFTJ;
 		break;
 	case SND_SOC_DAIFMT_DSP_A:
-		priv->tdm_offset += 1;
+		priv->tdm_additional_offset = 1;
 		/* Fall through... DSP_A uses the same basic config as DSP_B
 		 * except we need to shift the TDM output by one BCK cycle
 		 */
@@ -414,12 +444,10 @@
 	}
 
 	snd_soc_component_update_bits(component, PCM186X_CLK_CTRL,
-			    PCM186X_CLK_CTRL_MST_MODE, clk_ctrl);
-
-	snd_soc_component_write(component, PCM186X_TDM_TX_OFFSET, priv->tdm_offset);
+				PCM186X_CLK_CTRL_MST_MODE, clk_ctrl);
 
 	snd_soc_component_update_bits(component, PCM186X_PCM_CFG,
-			    PCM186X_PCM_CFG_FMT_MASK, pcm_cfg);
+				PCM186X_PCM_CFG_FMT_MASK, pcm_cfg);
 
 	return 0;
 }
@@ -449,6 +477,7 @@
 	}
 
 	tdm_offset = first_slot * slot_width;
+	tdm_offset += priv->tdm_additional_offset;
 
 	if (tdm_offset > 255) {
 		dev_err(component->dev, "tdm tx slot selection out of bounds\n");
@@ -456,6 +485,10 @@
 	}
 
 	priv->tdm_offset = tdm_offset;
+	priv->tdm_max_channels = last_slot - first_slot + 1;
+
+	snd_soc_component_write(component, PCM186X_TDM_TX_OFFSET,
+				priv->tdm_offset);
 
 	return 0;
 }
@@ -498,7 +531,7 @@
 	.capture = {
 		 .stream_name = "Capture",
 		 .channels_min = 1,
-		 .channels_max = 4,
+		 .channels_max = 8,
 		 .rates = PCM186X_RATES,
 		 .formats = PCM186X_FORMATS,
 	 },
@@ -561,7 +594,8 @@
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF)
+		if (snd_soc_component_get_bias_level(component) ==
+		    SND_SOC_BIAS_OFF)
 			pcm186x_power_on(component);
 		break;
 	case SND_SOC_BIAS_OFF:
@@ -572,7 +606,7 @@
 	return 0;
 }
 
-static struct snd_soc_component_driver soc_codec_dev_pcm1863 = {
+static const struct snd_soc_component_driver soc_codec_dev_pcm1863 = {
 	.set_bias_level		= pcm186x_set_bias_level,
 	.controls		= pcm1863_snd_controls,
 	.num_controls		= ARRAY_SIZE(pcm1863_snd_controls),
@@ -586,7 +620,7 @@
 	.non_legacy_dai_naming	= 1,
 };
 
-static struct snd_soc_component_driver soc_codec_dev_pcm1865 = {
+static const struct snd_soc_component_driver soc_codec_dev_pcm1865 = {
 	.set_bias_level		= pcm186x_set_bias_level,
 	.controls		= pcm1865_snd_controls,
 	.num_controls		= ARRAY_SIZE(pcm1865_snd_controls),
@@ -653,19 +687,21 @@
 
 	dev_set_drvdata(dev, priv);
 	priv->regmap = regmap;
+	/* the maximum number of channels that can be outputted on TDM is 6 */
+	priv->tdm_max_channels = 6;
 
 	for (i = 0; i < ARRAY_SIZE(priv->supplies); i++)
 		priv->supplies[i].supply = pcm186x_supply_names[i];
 
 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(priv->supplies),
-				      priv->supplies);
+				priv->supplies);
 	if (ret) {
 		dev_err(dev, "failed to request supplies: %d\n", ret);
 		return ret;
 	}
 
 	ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies),
-				    priv->supplies);
+				priv->supplies);
 	if (ret) {
 		dev_err(dev, "failed enable supplies: %d\n", ret);
 		return ret;
@@ -678,6 +714,94 @@
 		return ret;
 	}
 
+	ret = of_property_read_u32(dev->of_node, "adc1-left-input-select",
+				&priv->adc1_left_input_select);
+	if (!ret) {
+		ret = regmap_write(regmap, PCM186X_ADC1_INPUT_SEL_L,
+				0x40 | priv->adc1_left_input_select);
+		if (ret) {
+			dev_err(dev, "failed to write device: %d\n", ret);
+			return ret;
+		}
+	} else {
+		dev_err(dev, "adc1 left input not selected\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32(dev->of_node, "adc1-right-input-select",
+				&priv->adc1_right_input_select);
+	if (!ret) {
+		ret = regmap_write(regmap, PCM186X_ADC1_INPUT_SEL_R,
+				0x40 | priv->adc1_right_input_select);
+		if (ret) {
+			dev_err(dev, "failed to write device: %d\n", ret);
+			return ret;
+		}
+	} else {
+		dev_err(dev, "adc1 right input not selected\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32(dev->of_node, "adc2-left-input-select",
+				&priv->adc2_left_input_select);
+	if (!ret) {
+		ret = regmap_write(regmap, PCM186X_ADC2_INPUT_SEL_L,
+				0x40 | priv->adc2_left_input_select);
+		if (ret) {
+			dev_err(dev, "failed to write device: %d\n", ret);
+			return ret;
+		}
+	} else {
+		dev_err(dev, "adc2 left input not selected\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32(dev->of_node, "adc2-right-input-select",
+				&priv->adc2_right_input_select);
+	if (!ret) {
+		ret = regmap_write(regmap, PCM186X_ADC2_INPUT_SEL_R,
+				0x40 | priv->adc2_right_input_select);
+		if (ret) {
+			dev_err(dev, "failed to write device: %d\n", ret);
+			return ret;
+		}
+	} else {
+		dev_err(dev, "adc2 right input not selected\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32(dev->of_node, "apga-gain-control",
+				&priv->apga_gain_control);
+	if (!ret) {
+		ret = regmap_write(regmap, PCM186X_DPGA_GAIN_CTRL,
+						priv->apga_gain_control);
+		if (ret) {
+			dev_err(dev, "failed to write device: %d\n", ret);
+			return ret;
+		}
+	} else {
+		dev_info(dev, "apga-gain-control not found, using default\n");
+		return ret;
+	}
+
+	/* Setting the digital gains to 0dB to be in the documentation range */
+	for (i = 0; i < ARRAY_SIZE(pcm1865_digital_gain_registers); i++) {
+		ret = regmap_write(regmap, pcm1865_digital_gain_registers[i],
+				PCM186X_DPGA_0DB);
+		if (ret) {
+			dev_err(dev, "failed to write val at addr 0x%x: %d\n",
+				pcm1865_digital_gain_registers[i], ret);
+			return ret;
+		}
+	}
+
+	/* standby */
+	ret = regmap_write(regmap, PCM186X_POWER_CTRL, 0x70 | 0x01);
+		if (ret) {
+			dev_err(dev, "failed to write device: %d\n", ret);
+			return ret;
+		}
+
 	ret = regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
 				     priv->supplies);
 	if (ret) {
@@ -688,14 +812,14 @@
 	switch (type) {
 	case PCM1865:
 	case PCM1864:
-		ret = devm_snd_soc_register_component(dev, &soc_codec_dev_pcm1865,
-					     &pcm1865_dai, 1);
+		ret = devm_snd_soc_register_component(dev,
+				&soc_codec_dev_pcm1865, &pcm1865_dai, 1);
 		break;
 	case PCM1863:
 	case PCM1862:
 	default:
-		ret = devm_snd_soc_register_component(dev, &soc_codec_dev_pcm1863,
-					     &pcm1863_dai, 1);
+		ret = devm_snd_soc_register_component(dev,
+				&soc_codec_dev_pcm1863, &pcm1863_dai, 1);
 	}
 	if (ret) {
 		dev_err(dev, "failed to register CODEC: %d\n", ret);
diff --git a/sound/soc/codecs/pcm186x.h b/sound/soc/codecs/pcm186x.h
index bb3f0c4..dc315d2 100644
--- a/sound/soc/codecs/pcm186x.h
+++ b/sound/soc/codecs/pcm186x.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Texas Instruments PCM186x Universal Audio ADC
  *
@@ -211,6 +211,9 @@
 #define PCM186X_MMAP_STAT_R_REQ		BIT(1)
 #define PCM186X_MMAP_STAT_W_REQ		BIT(0)
 
+/* PCM186X_DPGA_VAL */
+#define PCM186X_DPGA_0DB			0x28
+
 extern const struct regmap_config pcm186x_regmap;
 
 int pcm186x_probe(struct device *dev, enum pcm186x_type type, int irq,
diff --git a/sound/soc/codecs/tlv320adc3101-i2c.c b/sound/soc/codecs/tlv320adc3101-i2c.c
new file mode 100644
index 0000000..68aa1c2
--- /dev/null
+++ b/sound/soc/codecs/tlv320adc3101-i2c.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/sound/soc/codecs/tlv320adc3101-i2c.c
+ *
+ * Copyright 2019 Baylibre
+ *
+ * Author: Nicolas Belin <nbelin@baylibre.com>
+ *
+ * Based on sound/soc/codecs/tlv320aic332x4.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+
+#include "tlv320adc3101.h"
+
+static int adc3101_i2c_probe(struct i2c_client *i2c,
+			     const struct i2c_device_id *id)
+{
+	struct regmap *regmap;
+	struct regmap_config config;
+
+	config = adc3101_regmap_config;
+	config.reg_bits = 8;
+	config.val_bits = 8;
+
+	regmap = devm_regmap_init_i2c(i2c, &config);
+	return adc3101_probe(&i2c->dev, regmap);
+}
+
+static int adc3101_i2c_remove(struct i2c_client *i2c)
+{
+	return adc3101_remove(&i2c->dev);
+}
+
+static const struct i2c_device_id adc3101_i2c_id[] = {
+	{ "tlv320adc3101", 0 },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, adc3101_i2c_id);
+
+static const struct of_device_id adc3101_of_id[] = {
+	{ .compatible = "ti,tlv320adc3101", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, adc3101_of_id);
+
+static struct i2c_driver adc3101_i2c_driver = {
+	.driver = {
+		.name = "tlv320adc3101",
+		.of_match_table = adc3101_of_id,
+	},
+	.probe =    adc3101_i2c_probe,
+	.remove =   adc3101_i2c_remove,
+	.id_table = adc3101_i2c_id,
+};
+
+module_i2c_driver(adc3101_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC TLV320ADC3101 codec driver I2C");
+MODULE_AUTHOR("Jeremy McDermond <nh6z@nh6z.net>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tlv320adc3101.c b/sound/soc/codecs/tlv320adc3101.c
new file mode 100644
index 0000000..69a2b81
--- /dev/null
+++ b/sound/soc/codecs/tlv320adc3101.c
@@ -0,0 +1,792 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/sound/soc/codecs/tlv320adc3101.c
+ *
+ * Copyright 2019 Baylibre
+ *
+ * Author: Nicolas Belin <nbelin@baylibre.com>
+ *
+ * Based on sound/soc/codecs/tlv320aic332x4.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "tlv320adc3101.h"
+
+struct adc3101_rate_divs {
+	u32 mclk;
+	u32 rate;
+	u8 nadc;
+	u8 madc;
+	u8 aosr;
+};
+
+struct adc3101_priv {
+	struct regmap *regmap;
+	u32 sysclk;
+	int rstn_gpio;
+	struct clk *mclk;
+	u32 tdm_offset;
+	u32 tdm_additional_offset;
+	u32 right_pin_select;
+	u32 left_pin_select;
+	u32 fmt;
+	struct regulator *supply_iov;
+	struct regulator *supply_dv;
+	struct regulator *supply_av;
+	unsigned int minus6db_left_input;
+	unsigned int minus6db_right_input;
+	struct device *dev;
+};
+
+static int adc3101_get_adc_left_input_switch(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component =
+				snd_soc_kcontrol_component(kcontrol);
+	struct adc3101_priv *adc3101 = snd_soc_component_get_drvdata(component);
+
+	ucontrol->value.integer.value[0] = adc3101->minus6db_left_input;
+	return 0;
+}
+
+static int adc3101_put_adc_left_input_switch(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component =
+				snd_soc_kcontrol_component(kcontrol);
+	struct adc3101_priv *adc3101 = snd_soc_component_get_drvdata(component);
+	unsigned int minus6db_left_input = ucontrol->value.integer.value[0];
+
+	if (minus6db_left_input > 1)
+		return -EINVAL;
+
+	adc3101->minus6db_left_input = minus6db_left_input;
+	snd_soc_component_update_bits(component, ADC3101_LPGAPIN,
+		ADC3101_PGAPIN_6DB_MASK,
+		minus6db_left_input ? ADC3101_PGAPIN_6DB_MASK : 0);
+	snd_soc_component_update_bits(component, ADC3101_LPGAPIN2,
+		ADC3101_PGAPIN2_6DB_MASK,
+		minus6db_left_input ? ADC3101_PGAPIN2_6DB_MASK : 0);
+
+	return 0;
+}
+
+static int adc3101_get_adc_right_input_switch(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component =
+				snd_soc_kcontrol_component(kcontrol);
+	struct adc3101_priv *adc3101 = snd_soc_component_get_drvdata(component);
+
+	ucontrol->value.integer.value[0] = adc3101->minus6db_right_input;
+	return 0;
+}
+
+static int adc3101_put_adc_right_input_switch(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component =
+				snd_soc_kcontrol_component(kcontrol);
+	struct adc3101_priv *adc3101 = snd_soc_component_get_drvdata(component);
+	unsigned int minus6db_right_input = ucontrol->value.integer.value[0];
+
+	if (minus6db_right_input > 1)
+		return -EINVAL;
+
+	adc3101->minus6db_right_input = minus6db_right_input;
+	snd_soc_component_update_bits(component, ADC3101_RPGAPIN,
+		ADC3101_PGAPIN_6DB_MASK,
+		minus6db_right_input ? ADC3101_PGAPIN_6DB_MASK : 0);
+	snd_soc_component_update_bits(component, ADC3101_RPGAPIN2,
+		ADC3101_PGAPIN2_6DB_MASK,
+		minus6db_right_input ? ADC3101_PGAPIN2_6DB_MASK : 0);
+
+	return 0;
+}
+
+/* 0dB min, 0.5dB steps */
+static DECLARE_TLV_DB_SCALE(tlv_step_0_5, 0, 50, 0);
+/* -12dB min, 0.5dB steps */
+static DECLARE_TLV_DB_SCALE(tlv_adc_vol, -1200, 50, 0);
+
+static const struct snd_kcontrol_new adc3101_snd_controls[] = {
+	SOC_SINGLE("ADC Mute Left Switch", ADC3101_FADCVOL, 7, 1, 0),
+	SOC_SINGLE("ADC Mute Right Switch", ADC3101_FADCVOL, 3, 1, 0),
+	SOC_DOUBLE_R_S_TLV("ADC Level Volume", ADC3101_LADCVOL,
+			ADC3101_RADCVOL, 0, -0x18, 0x28, 6, 0, tlv_adc_vol),
+	SOC_DOUBLE_R_TLV("PGA Level Volume", ADC3101_LAPGAVOL,
+			ADC3101_RAPGAVOL, 0, 0x50, 0, tlv_step_0_5),
+	SOC_SINGLE_BOOL_EXT("Minus 6dB ADC Left input Switch", 0,
+			    adc3101_get_adc_left_input_switch,
+			    adc3101_put_adc_left_input_switch),
+	SOC_SINGLE_BOOL_EXT("Minus 6dB ADC Right input Switch", 0,
+			    adc3101_get_adc_right_input_switch,
+			    adc3101_put_adc_right_input_switch),
+};
+
+static const struct adc3101_rate_divs adc3101_divs[] = {
+
+	//conditions
+	//MCLK 50MHz MAX
+	//33MHz Max after NADC
+	//2.8 MHz < AOSR × ADC_fs < 6.2 MHz
+
+	/* 8k rate */
+	{2048000, 8000, 1, 1, 0}, //256 mclk_fs, AOSR=0 means 256
+
+	/* 11.025k rate */
+	{2822400, 11025, 1, 1, 0}, //64 mclk_fs, AOSR=0 means 256
+
+	/* 16k rate */
+	{4096000, 16000, 1, 1, 0}, //256 mclk_fs, AOSR=0 means 256
+
+	/* 22.05k rate */
+	{5644800, 22050, 1, 1, 0}, //256 mclk_fs, AOSR=0 means 256
+
+	/* 32k rate */
+	{4096000, 32000, 1, 1, 128}, //128 mclk_fs
+	{8192000, 32000, 1, 2, 128}, //256 mclk_fs
+	{22579200, 32000, 2, 2, 128}, //512 mclk_fs
+
+	/* 44.1k rate */
+	{2822400, 44100, 1, 1, 64}, //64 mclk_fs
+	{5644800, 44100, 1, 1, 128}, //128 mclk_fs
+	{11289600, 44100, 1, 2, 128}, //256 mclk_fs
+	{22579200, 44100, 2, 2, 128}, //512 mclk_fs
+
+	/* 48k rate */
+	{3072000, 48000, 1, 1, 64}, //64 mclk_fs
+	{6144000, 48000, 1, 1, 128}, //128 mclk_fs
+	{12288000, 48000, 1, 2, 128}, //256 mclk_fs
+	{24576000, 48000, 2, 2, 128}, //512 mclk_fs
+
+	/* 96k rate */
+	{24576000, 96000, 2, 2, 64}, //256 mclk_fs
+
+};
+
+static const struct snd_soc_dapm_widget adc3101_dapm_widgets[] = {
+	SND_SOC_DAPM_ADC("Right ADC", "Right Capture",
+			ADC3101_ADC_DIGITAL, 6, 0),
+	SND_SOC_DAPM_ADC("Left ADC", "Left Capture", ADC3101_ADC_DIGITAL, 7, 0),
+	SND_SOC_DAPM_INPUT("IN1_L"),
+	SND_SOC_DAPM_INPUT("IN1_R"),
+};
+
+static const struct snd_soc_dapm_route adc3101_dapm_routes[] = {
+	/* Right Input */
+	{"Right ADC", NULL, "IN1_R"},
+	/* Left Input */
+	{"Left ADC", NULL, "IN1_L"},
+};
+
+static const struct regmap_range_cfg adc3101_regmap_pages[] = {
+	{
+		.name = "Pages",
+		.selector_reg = 0,
+		.selector_mask  = 0xff,
+		.window_start = 0,
+		.window_len = 128,
+		.range_min = 0,
+		.range_max = ADC3101_APGAFLAGS,
+	},
+};
+
+const struct regmap_config adc3101_regmap_config = {
+	.max_register = ADC3101_APGAFLAGS,
+	.ranges = adc3101_regmap_pages,
+	.num_ranges = ARRAY_SIZE(adc3101_regmap_pages),
+};
+EXPORT_SYMBOL(adc3101_regmap_config);
+
+static inline int adc3101_get_divs(int mclk, int rate)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(adc3101_divs); i++) {
+		if ((adc3101_divs[i].rate == rate)
+			&& (adc3101_divs[i].mclk == mclk)) {
+			return i;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int adc3101_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+				  int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_component *component = codec_dai->component;
+	struct adc3101_priv *adc3101 = snd_soc_component_get_drvdata(component);
+
+	dev_dbg(component->dev, "frequency to set DAI system clock=%d\n", freq);
+	adc3101->sysclk = freq;
+	return 0;
+}
+
+static int adc3101_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+				unsigned int rx_mask, int slots, int slot_width)
+{
+	struct snd_soc_component *component = dai->component;
+	struct adc3101_priv *adc3101 = snd_soc_component_get_drvdata(component);
+	unsigned int first_slot, last_slot, tdm_offset;
+
+	dev_dbg(component->dev,
+		"%s() tx_mask=0x%x rx_mask=0x%x slots=%d slot_width=%d\n",
+		__func__, tx_mask, rx_mask, slots, slot_width);
+
+	if (!tx_mask) {
+		dev_err(component->dev, "tdm tx mask must not be 0\n");
+		return -EINVAL;
+	}
+
+	first_slot = __ffs(tx_mask);
+	last_slot = __fls(tx_mask);
+
+	if (last_slot - first_slot != hweight32(tx_mask) - 1) {
+		dev_err(component->dev, "tdm tx mask must be contiguous\n");
+		return -EINVAL;
+	}
+
+	tdm_offset = first_slot * slot_width;
+	tdm_offset += adc3101->tdm_additional_offset;
+
+	if (tdm_offset > 255) {
+		dev_err(component->dev,
+			"tdm tx slot selection out of bounds\n");
+		return -EINVAL;
+	}
+
+	adc3101->tdm_offset = tdm_offset;
+	dev_dbg(component->dev, "tdm offset is %d\n", adc3101->tdm_offset);
+
+	/* tdm offset */
+	snd_soc_component_write(component, ADC3101_CH_OFFSET_1, tdm_offset);
+	/* second channel always after the first one */
+	snd_soc_component_write(component, ADC3101_CH_OFFSET_2, 0);
+
+
+	return 0;
+}
+
+static int adc3101_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+{
+	struct snd_soc_component *component = codec_dai->component;
+	struct adc3101_priv *adc3101 = snd_soc_component_get_drvdata(component);
+
+	dev_dbg(component->dev,
+		"adc3101: fmt to set DAI fmt=0x%x\n",
+		fmt);
+
+	/* set master/slave audio interface */
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		dev_err(component->dev,
+			"adc3101: invalid DAI master mode not supported\n");
+		return -EINVAL;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		break;
+	default:
+		dev_err(component->dev,
+			"adc3101: invalid DAI master/slave interface\n");
+		return -EINVAL;
+	}
+
+	adc3101->tdm_additional_offset = 0;
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_DSP_A:
+		/* same as B but add offset 1 */
+		adc3101->tdm_additional_offset = 0x01;
+	case SND_SOC_DAIFMT_I2S:
+	case SND_SOC_DAIFMT_DSP_B:
+	case SND_SOC_DAIFMT_RIGHT_J:
+	case SND_SOC_DAIFMT_LEFT_J:
+		adc3101->fmt = fmt & SND_SOC_DAIFMT_FORMAT_MASK;
+		break;
+	default:
+		dev_err(component->dev,
+			"adc3101: invalid DAI interface format\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int adc3101_hw_params(struct snd_pcm_substream *substream,
+			     struct snd_pcm_hw_params *params,
+			     struct snd_soc_dai *dai)
+{
+	struct snd_soc_component *component = dai->component;
+	struct adc3101_priv *adc3101 = snd_soc_component_get_drvdata(component);
+	u8 iface1_reg = 0;
+	u8 iface2_reg = 0;
+	u8 i2s_tdm_reg = 0;
+	int i;
+	struct device *dev = adc3101->dev;
+
+	i = adc3101_get_divs(adc3101->sysclk, params_rate(params));
+	if (i < 0) {
+		dev_err(component->dev,
+			"adc3101: sampling rate not supported\n");
+		return i;
+	}
+
+	/* NADC divider value  and enable */
+	snd_soc_component_write(component, ADC3101_NADC, adc3101_divs[i].nadc |
+							ADC3101_NADCEN);
+
+	/* MADC divider value and enable */
+	snd_soc_component_write(component, ADC3101_MADC, adc3101_divs[i].madc |
+							ADC3101_MADCEN);
+
+	/* AOSR value */
+	snd_soc_component_write(component, ADC3101_AOSR, adc3101_divs[i].aosr);
+
+	/* check the wanted interface configuration */
+	switch (adc3101->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		break;
+	case SND_SOC_DAIFMT_DSP_A:
+	case SND_SOC_DAIFMT_DSP_B:
+		iface1_reg |= (ADC3101_DSP_MODE <<
+			       ADC3101_IFACE1_DATATYPE_SHIFT);
+		iface1_reg |= ADC3101_3STATE;
+		iface2_reg |= ADC3101_BCLKINV_MASK; /* invert bit clock */
+		i2s_tdm_reg |= ADC3101_TDM_EN | ADC3101_EARLY_STATE_EN;
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		iface1_reg |= (ADC3101_RIGHT_J_MODE <<
+			       ADC3101_IFACE1_DATATYPE_SHIFT);
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		iface1_reg |= (ADC3101_LEFT_J_MODE <<
+			       ADC3101_IFACE1_DATATYPE_SHIFT);
+		break;
+	default:
+		dev_err(component->dev,
+			"adc3101: invalid DAI interface format\n");
+		return -EINVAL;
+	}
+
+	switch (params_width(params)) {
+	case 16:
+		iface1_reg |= (ADC3101_WORD_LEN_16BITS <<
+			       ADC3101_IFACE1_DATALEN_SHIFT);
+		break;
+	case 20:
+		iface1_reg |= (ADC3101_WORD_LEN_20BITS <<
+			       ADC3101_IFACE1_DATALEN_SHIFT);
+		break;
+	case 24:
+		iface1_reg |= (ADC3101_WORD_LEN_24BITS <<
+			       ADC3101_IFACE1_DATALEN_SHIFT);
+		break;
+	case 32:
+		iface1_reg |= (ADC3101_WORD_LEN_32BITS <<
+			       ADC3101_IFACE1_DATALEN_SHIFT);
+		break;
+	}
+
+	/* BDIVCLKIN is always ADC_CLK */
+	iface2_reg |= ADC3101_BDIVCLKIN_ADC_CLK << ADC3101_BDIVCLKIN_SHIFT;
+
+	/* writing the iface 1 & 2 settings */
+	snd_soc_component_write(component, ADC3101_IFACE1, iface1_reg);
+	snd_soc_component_write(component, ADC3101_IFACE2, iface2_reg);
+
+	/* enabling tdm if needed */
+	snd_soc_component_write(component, ADC3101_I2S_TDM, i2s_tdm_reg);
+
+	return 0;
+}
+
+static int adc3101_mute(struct snd_soc_dai *dai, int mute)
+{
+	struct snd_soc_component *component = dai->component;
+
+	snd_soc_component_update_bits(component, ADC3101_FADCVOL,
+			    ADC3101_MUTE_MASK, mute ? ADC3101_MUTE : 0);
+
+	return 0;
+}
+
+static int adc3101_set_bias_level(struct snd_soc_component *component,
+				  enum snd_soc_bias_level level)
+{
+	switch (level) {
+	case SND_SOC_BIAS_ON:
+		break;
+	case SND_SOC_BIAS_PREPARE:
+		break;
+	case SND_SOC_BIAS_STANDBY:
+		break;
+	case SND_SOC_BIAS_OFF:
+		break;
+	}
+	return 0;
+}
+
+#define ADC3101_RATES	SNDRV_PCM_RATE_8000_96000
+#define ADC3101_FORMATS	(SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE \
+			 | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static const struct snd_soc_dai_ops adc3101_ops = {
+	.hw_params = adc3101_hw_params,
+	.digital_mute = adc3101_mute,
+	.set_tdm_slot = adc3101_set_tdm_slot,
+	.set_fmt = adc3101_set_dai_fmt,
+	.set_sysclk = adc3101_set_dai_sysclk,
+};
+
+static struct snd_soc_dai_driver adc3101_dai = {
+	.name = "tlv320adc3101-aif",
+	.capture = {
+			.stream_name = "Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = ADC3101_RATES,
+			.formats = ADC3101_FORMATS,
+	},
+	.ops = &adc3101_ops,
+
+};
+
+static int adc3101_component_probe(struct snd_soc_component *component)
+{
+
+	struct adc3101_priv *adc3101 = snd_soc_component_get_drvdata(component);
+	struct device *dev = adc3101->dev;
+	u8 clk_mux = 0;
+
+	if (gpio_is_valid(adc3101->rstn_gpio)) {
+		gpio_set_value(adc3101->rstn_gpio, 0);
+		ndelay(10);
+		gpio_set_value(adc3101->rstn_gpio, 1);
+		ndelay(10);
+	} else {
+		dev_err(dev,
+			"invalid reset gpio. Your adc may not work properly\n");
+	}
+
+	/* SW reset */
+	snd_soc_component_write(component, ADC3101_RESET, ADC3101_RESET_VALUE);
+
+	/* MCLK as an input, not supporting anything else */
+	clk_mux |= (ADC3101_PLL_CLKIN_MCLK << ADC3101_PLL_CLKIN_SHIFT) |
+			(ADC3101_CODEC_CLKIN_MCLK << ADC3101_CODEC_CLKIN_MCLK);
+	snd_soc_component_write(component, ADC3101_CLKMUX, clk_mux);
+
+	/* left pin selection */
+	switch (adc3101->left_pin_select) {
+	case CH_SEL1:
+	case CH_SEL2:
+	case CH_SEL3:
+	case CH_SEL4:
+		snd_soc_component_update_bits(component, ADC3101_LPGAPIN,
+			ADC3101_PGAPIN_SEL_MASK <<
+			(2 * adc3101->left_pin_select),
+			adc3101->minus6db_left_input);
+		break;
+	case CH_SEL1X:
+	case CH_SEL2X:
+	case CH_SEL3X:
+		snd_soc_component_update_bits(component, ADC3101_LPGAPIN2,
+			ADC3101_PGAPIN_SEL_MASK <<
+			(2 * adc3101->left_pin_select - 8),
+			adc3101->minus6db_left_input);
+		break;
+	default:
+		dev_err(component->dev, "wrong left pin selection\n");
+		return -EINVAL;
+	}
+
+	/* right pin selection */
+	switch (adc3101->right_pin_select) {
+	case CH_SEL1:
+	case CH_SEL2:
+	case CH_SEL3:
+	case CH_SEL4:
+		snd_soc_component_update_bits(component, ADC3101_RPGAPIN,
+			ADC3101_PGAPIN_SEL_MASK <<
+			(2 * adc3101->right_pin_select),
+			adc3101->minus6db_right_input);
+		break;
+	case CH_SEL1X:
+	case CH_SEL2X:
+	case CH_SEL3X:
+		snd_soc_component_update_bits(component, ADC3101_RPGAPIN2,
+			ADC3101_PGAPIN_SEL_MASK <<
+			(2 * adc3101->right_pin_select - 8),
+			adc3101->minus6db_right_input);
+		break;
+	default:
+		dev_err(component->dev, "wrong right pin selection\n");
+		return -EINVAL;
+	}
+
+	/* unmute the left analog PGA */
+	snd_soc_component_update_bits(component, ADC3101_LAPGAVOL,
+							ADC3101_APGA_MUTE, 0);
+	/* unmute the right analog PGA */
+	snd_soc_component_update_bits(component, ADC3101_RAPGAVOL,
+							ADC3101_APGA_MUTE, 0);
+
+	/* update the soft stepping only */
+	snd_soc_component_update_bits(component, ADC3101_ADC_DIGITAL,
+			ADC3101_SOFT_STEPPING_MASK,
+			ADC3101_SOFT_STEPPING_DISABLE);
+
+	/* unmute */
+	snd_soc_component_update_bits(component, ADC3101_FADCVOL,
+			ADC3101_MUTE_MASK, 0);
+
+	return 0;
+}
+
+static const struct snd_soc_component_driver soc_component_dev_adc3101 = {
+	.probe			= adc3101_component_probe,
+	.set_bias_level		= adc3101_set_bias_level,
+	.controls		= adc3101_snd_controls,
+	.num_controls		= ARRAY_SIZE(adc3101_snd_controls),
+	.dapm_widgets		= adc3101_dapm_widgets,
+	.num_dapm_widgets	= ARRAY_SIZE(adc3101_dapm_widgets),
+	.dapm_routes		= adc3101_dapm_routes,
+	.num_dapm_routes	= ARRAY_SIZE(adc3101_dapm_routes),
+	.suspend_bias_off	= 1,
+	.idle_bias_on		= 1,
+	.use_pmdown_time	= 1,
+	.endianness		= 1,
+	.non_legacy_dai_naming	= 1,
+};
+
+static int adc3101_parse_dt(struct adc3101_priv *adc3101,
+		struct device_node *np)
+{
+	struct device *dev = adc3101->dev;
+	int ret = 0;
+
+	adc3101->rstn_gpio = of_get_named_gpio(np, "rst-gpio", 0);
+	if (!gpio_is_valid(adc3101->rstn_gpio)) {
+		dev_err(dev, "Invalid reset gpio\n");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32(np, "left-pin-select",
+					&adc3101->left_pin_select);
+	if (!ret) {
+		if (adc3101->left_pin_select > CH_SEL3X) {
+			dev_err(dev, "wrong left pin selection\n");
+			return -EINVAL;
+		}
+	} else {
+		dev_err(dev, "left pin not selected\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32(np, "right-pin-select",
+					&adc3101->right_pin_select);
+	if (!ret) {
+		if (adc3101->right_pin_select > CH_SEL3X) {
+			dev_err(dev, "wrong right pin selection\n");
+			return -EINVAL;
+		}
+	} else {
+		dev_err(dev, "right pin not selected\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static void adc3101_disable_regulators(struct adc3101_priv *adc3101)
+{
+	if (!IS_ERR(adc3101->supply_iov))
+		regulator_disable(adc3101->supply_iov);
+
+	if (!IS_ERR(adc3101->supply_dv))
+		regulator_disable(adc3101->supply_dv);
+
+	if (!IS_ERR(adc3101->supply_av))
+		regulator_disable(adc3101->supply_av);
+}
+
+static int adc3101_setup_regulators(struct device *dev,
+		struct adc3101_priv *adc3101)
+{
+	int ret = 0;
+
+	adc3101->supply_iov = devm_regulator_get(dev, "iov");
+	adc3101->supply_dv = devm_regulator_get_optional(dev, "dv");
+	adc3101->supply_av = devm_regulator_get_optional(dev, "av");
+
+	/* Check for the regulators */
+	if (IS_ERR(adc3101->supply_iov)) {
+		dev_err(dev, "Missing supply 'iov'\n");
+		return PTR_ERR(adc3101->supply_iov);
+	}
+
+	if (IS_ERR(adc3101->supply_dv)) {
+		dev_err(dev, "Missing supply 'dv'\n");
+		return PTR_ERR(adc3101->supply_dv);
+	}
+
+	if (IS_ERR(adc3101->supply_av)) {
+		dev_err(dev, "Missing supply 'av'\n");
+		return PTR_ERR(adc3101->supply_av);
+	}
+
+	ret = regulator_enable(adc3101->supply_iov);
+	if (ret) {
+		dev_err(dev, "Failed to enable regulator iov\n");
+		return ret;
+	}
+
+	ret = regulator_enable(adc3101->supply_dv);
+	if (ret) {
+		dev_err(dev, "Failed to enable regulator dv\n");
+		goto error_dv;
+	}
+
+	ret = regulator_enable(adc3101->supply_av);
+	if (ret) {
+		dev_err(dev, "Failed to enable regulator av\n");
+		goto error_av;
+	}
+
+	return 0;
+
+error_av:
+	regulator_disable(adc3101->supply_dv);
+
+error_dv:
+	regulator_disable(adc3101->supply_iov);
+	return ret;
+}
+
+int adc3101_probe(struct device *dev, struct regmap *regmap)
+{
+	struct adc3101_priv *adc3101;
+	struct device_node *np = dev->of_node;
+	int ret;
+
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	adc3101 = devm_kzalloc(dev, sizeof(struct adc3101_priv),
+			       GFP_KERNEL);
+	if (adc3101 == NULL)
+		return -ENOMEM;
+
+	adc3101->dev = dev;
+	dev_set_drvdata(dev, adc3101);
+	adc3101->regmap = regmap;
+
+	if (np) {
+		ret = adc3101_parse_dt(adc3101, np);
+		if (ret) {
+			dev_err(dev, "Failed to parse DT node\n");
+			return ret;
+		}
+	} else {
+		dev_err(dev, "Could not parse DT node\n");
+		return -EINVAL;
+	}
+
+	/* setting default values */
+	adc3101->fmt = SND_SOC_DAIFMT_DSP_A;
+	adc3101->sysclk = 0;
+	adc3101->tdm_offset = 0;
+	adc3101->tdm_additional_offset = 0;
+	adc3101->minus6db_left_input = 1;
+	adc3101->minus6db_right_input = 1;
+
+	if (gpio_is_valid(adc3101->rstn_gpio)) {
+		ret = devm_gpio_request_one(dev, adc3101->rstn_gpio,
+				GPIOF_OUT_INIT_LOW, "tlv320adc3101 rstn");
+		if (ret != 0)
+			return ret;
+	}
+
+	ret = adc3101_setup_regulators(dev, adc3101);
+	if (ret) {
+		dev_err(dev, "Failed to setup regulators\n");
+		return ret;
+	}
+
+	ret = devm_snd_soc_register_component(dev,
+			&soc_component_dev_adc3101, &adc3101_dai, 1);
+	if (ret) {
+		dev_err(dev, "Failed to register component\n");
+		adc3101_disable_regulators(adc3101);
+		return ret;
+	}
+
+	if (gpio_is_valid(adc3101->rstn_gpio)) {
+		gpio_set_value(adc3101->rstn_gpio, 0);
+		ndelay(10);
+		gpio_set_value(adc3101->rstn_gpio, 1);
+		ndelay(10);
+	} else {
+		dev_err(dev,
+			"invalid reset gpio. Your adc may not work properly\n");
+	}
+
+	/* SW reset */
+	ret = regmap_write(adc3101->regmap, ADC3101_RESET, ADC3101_RESET_VALUE);
+	if (ret) {
+		dev_err(adc3101->dev,
+			"failed to write the reset register: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(adc3101_probe);
+
+int adc3101_remove(struct device *dev)
+{
+	struct adc3101_priv *adc3101 = dev_get_drvdata(dev);
+
+	adc3101_disable_regulators(adc3101);
+
+	return 0;
+}
+EXPORT_SYMBOL(adc3101_remove);
+
+MODULE_DESCRIPTION("ASoC tlv320adc3101 codec driver");
+MODULE_AUTHOR("Nicolas Belin <nbelin.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/tlv320adc3101.h b/sound/soc/codecs/tlv320adc3101.h
new file mode 100644
index 0000000..2440fbf
--- /dev/null
+++ b/sound/soc/codecs/tlv320adc3101.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * tlv320adc3101.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#ifndef _TLV320ADC3101_H
+#define _TLV320ADC3101_H
+
+struct device;
+struct regmap_config;
+
+extern const struct regmap_config adc3101_regmap_config;
+int adc3101_probe(struct device *dev, struct regmap *regmap);
+int adc3101_remove(struct device *dev);
+
+/* tlv320adc3101 register space */
+
+#define ADC3101_REG(page, reg)	((page * 128) + reg)
+
+#define	ADC3101_PSEL			ADC3101_REG(0, 0)
+#define	ADC3101_RESET			ADC3101_REG(0, 1)
+#define	ADC3101_CLKMUX			ADC3101_REG(0, 4)
+#define	ADC3101_PLLPR			ADC3101_REG(0, 5)
+#define	ADC3101_PLLJ			ADC3101_REG(0, 6)
+#define	ADC3101_PLLDMSB			ADC3101_REG(0, 7)
+#define	ADC3101_PLLDLSB			ADC3101_REG(0, 8)
+#define	ADC3101_NADC			ADC3101_REG(0, 18)
+#define	ADC3101_MADC			ADC3101_REG(0, 19)
+#define ADC3101_AOSR			ADC3101_REG(0, 20)
+#define ADC3101_CLKMUX2			ADC3101_REG(0, 25)
+#define ADC3101_CLKOUTM			ADC3101_REG(0, 26)
+#define ADC3101_IFACE1			ADC3101_REG(0, 27)
+#define ADC3101_CH_OFFSET_1		ADC3101_REG(0, 28)
+#define ADC3101_IFACE2			ADC3101_REG(0, 29)
+#define ADC3101_I2S_SYNC		ADC3101_REG(0, 34)
+#define ADC3101_CH_OFFSET_2		ADC3101_REG(0, 37)
+#define ADC3101_I2S_TDM			ADC3101_REG(0, 38)
+#define ADC3101_ADC_DIGITAL		ADC3101_REG(0, 81)
+#define ADC3101_FADCVOL			ADC3101_REG(0, 82)
+#define ADC3101_LADCVOL			ADC3101_REG(0, 83)
+#define ADC3101_RADCVOL			ADC3101_REG(0, 84)
+#define ADC3101_LAGCMAX			ADC3101_REG(0, 88)
+#define ADC3101_RAGCMAX			ADC3101_REG(0, 96)
+#define ADC3101_MICBIAS			ADC3101_REG(1, 51)
+#define ADC3101_LPGAPIN			ADC3101_REG(1, 52)
+#define ADC3101_LPGAPIN2		ADC3101_REG(1, 54)
+#define ADC3101_RPGAPIN			ADC3101_REG(1, 55)
+#define ADC3101_RPGAPIN2		ADC3101_REG(1, 57)
+#define ADC3101_LAPGAVOL		ADC3101_REG(1, 59)
+#define ADC3101_RAPGAVOL		ADC3101_REG(1, 60)
+#define ADC3101_APGAFLAGS		ADC3101_REG(1, 62)
+
+/* Bits, masks, and shifts */
+
+/*  ADC3101_RESET */
+#define	ADC3101_RESET_VALUE				0x01
+
+/* ADC3101_CLKMUX */
+#define ADC3101_PLL_CLKIN_MASK			GENMASK(3, 2)
+#define ADC3101_PLL_CLKIN_SHIFT			(2)
+#define ADC3101_PLL_CLKIN_MCLK			(0x00)
+#define ADC3101_PLL_CLKIN_BCKL			(0x01)
+#define ADC3101_PLL_CLKIN_GPIO1			(0x02)
+#define ADC3101_PLL_CLKIN_DIN			(0x03)
+#define ADC3101_CODEC_CLKIN_MASK		GENMASK(1, 0)
+#define ADC3101_CODEC_CLKIN_SHIFT		(0)
+#define ADC3101_CODEC_CLKIN_MCLK		(0x00)
+#define ADC3101_CODEC_CLKIN_BCLK		(0x01)
+#define ADC3101_CODEC_CLKIN_GPIO1		(0x02)
+#define ADC3101_CODEC_CLKIN_PLL			(0x03)
+
+/* ADC3101_MUTE */
+#define ADC3101_MUTE_MASK				0x88
+#define ADC3101_MUTE					0x88
+
+/* ADC3101_NADC */
+#define ADC3101_NADCEN					BIT(7)
+#define ADC3101_NADC_MASK				GENMASK(6, 0)
+
+/* ADC3101_MADC */
+#define ADC3101_MADCEN					BIT(7)
+#define ADC3101_MADC_MASK				GENMASK(6, 0)
+
+/* ADC3101_IFACE1 */
+#define ADC3101_IFACE1_DATATYPE_MASK	GENMASK(7, 6)
+#define ADC3101_IFACE1_DATATYPE_SHIFT	(6)
+#define ADC3101_I2S_MODE				(0x00)
+#define ADC3101_DSP_MODE				(0x01)
+#define ADC3101_RIGHT_J_MODE			(0x02)
+#define ADC3101_LEFT_J_MODE				(0x03)
+#define ADC3101_IFACE1_DATALEN_MASK		GENMASK(5, 4)
+#define ADC3101_IFACE1_DATALEN_SHIFT	(4)
+#define ADC3101_WORD_LEN_16BITS			(0x00)
+#define ADC3101_WORD_LEN_20BITS			(0x01)
+#define ADC3101_WORD_LEN_24BITS			(0x02)
+#define ADC3101_WORD_LEN_32BITS			(0x03)
+#define ADC3101_IFACE1_MASTER_MASK		GENMASK(3, 2)
+#define ADC3101_BCLKMASTER				BIT(2)
+#define ADC3101_WCLKMASTER				BIT(3)
+#define ADC3101_3STATE					BIT(0)
+
+/* ADC3101_IFACE2 */
+#define ADC3101_BCLKINV_MASK			BIT(3)
+#define ADC3101_BDIVCLKIN_MASK			GENMASK(1, 0)
+#define ADC3101_BDIVCLKIN_SHIFT			(0)
+#define ADC3101_BDIVCLKIN_ADC_CLK		(0x02)
+#define ADC3101_BDIVCLKIN_ADC_MOD_CLK	(0x03)
+
+/* ADC3101_I2S_TDM */
+#define ADC3101_TDM_EN					BIT(0)
+#define ADC3101_EARLY_STATE_EN			BIT(1)
+
+/* ADC3101_PGAPIN */
+#define ADC3101_PGAPIN_SEL_MASK			GENMASK(1, 0)
+#define ADC3101_PGAPIN_6DB_MASK			0x55
+#define ADC3101_PGAPIN2_6DB_MASK		0x15
+
+/* ADC3101_APGAVOL */
+#define ADC3101_APGA_MUTE				BIT(7)
+
+/* ADC3101_ADC_DIGITAL */
+#define ADC3101_LADC_EN					BIT(7)
+#define ADC3101_RADC_EN					BIT(6)
+#define ADC3101_SOFT_STEPPING_MASK		GENMASK(1, 0)
+#define ADC3101_SOFT_STEPPING_SHIFT		(0)
+#define ADC3101_SOFT_STEPPING_DISABLE	(0x02)
+#define ADC3101_SOFT_STEPPING_HALFSTEP	(0x01)
+#define ADC3101_SOFT_STEPPING_1STEP		(0x00)
+
+enum adc_input_selection {
+	CH_SEL1 = 0,
+	CH_SEL2,
+	CH_SEL3,
+	CH_SEL4,
+	CH_SEL1X,
+	CH_SEL2X,
+	CH_SEL3X
+};
+
+#endif				/* _TLV320ADC3101_H */
diff --git a/sound/soc/codecs/tlv320wn.h b/sound/soc/codecs/tlv320wn.h
new file mode 100644
index 0000000..b903f95
--- /dev/null
+++ b/sound/soc/codecs/tlv320wn.h
@@ -0,0 +1,326 @@
+/*
+ * linux/sound/soc/codecs/tlv320wn.h
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _Tlv320_H
+#define _Tlv320_H
+
+#define AUDIO_NAME "tlv320aic3101"
+#define TLV320_VERSION "1.3"
+
+
+enum {
+	AIC3101_PLL_ADC_FS_CLKIN_MCLK,
+	AIC3101_PLL_ADC_FS_CLKIN_BCLK,
+};
+
+enum aic3x_micbias_voltage {
+	AIC3X_MICBIAS_OFF = 0,
+	AIC3X_MICBIAS_2_0V = 1,
+	AIC3X_MICBIAS_2_5V = 2,
+	AIC3X_MICBIAS_AVDDV = 3,
+};
+
+
+/* AIC31xx supported sample rate are 8k to 48k */
+#define TLV320_RATES   SNDRV_PCM_RATE_8000_48000
+
+/* AIC31xx supports the word formats 16bits,24bits and 32 bits */
+#define TLV320_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+
+#define TLV320_WORD_LEN_16BITS		0x00
+#define TLV320_WORD_LEN_24BITS		0x02
+#define TLV320_WORD_LEN_32BITS		0x03
+
+
+
+#ifdef CONFIG_SND_SOC_4_ADCS
+#define NUM_ADC3101             4
+#else
+#define NUM_ADC3101             1
+#endif
+
+#define ADC3101_CACHEREGNUM     (128 + 128)
+#define TLV320_CACHEREGNUM     (ADC3101_CACHEREGNUM * NUM_ADC3101)
+
+/*
+ * Creates the register using 8 bits for reg, 3 bits for device
+ * and 7 bits for page. The extra 2 high order bits for page are
+ * necessary for large pages, which may be sent in with the 32
+ * bit reg value to the following functions where dev, page, and
+ * reg no are properly masked out:
+ * - aic31xx_write
+ * - aic31xx_read_reg_cache
+ * For ALSA calls (where the register is limited to 16bits), the
+ * 5 bits for page is sufficient, and no high order bits will be
+ * truncated.
+ */
+#define MAKE_REG(device, page, reg) \
+	((u32) (((page) & 0x7f) << 11 | ((device) & 0x7) << 8 | ((reg) & 0x7f)))
+
+/****************************************************************************/
+/*          Page 0 Registers                              */
+/****************************************************************************/
+/*
+*/
+/* Page select register */
+#define ADC_PAGE_SELECT(cnum)         MAKE_REG((cnum), 0, 0)
+/* Software reset register */
+#define ADC_RESET(cnum)               MAKE_REG((cnum), 0, 1)
+
+/* 2-3 Reserved */
+
+/* PLL programming register B */
+#define ADC_CLKGEN_MUX(cnum)          MAKE_REG((cnum), 0, 4)
+#define TLV320_CODEC_CLKIN_MCLK			0x0
+#define TLV320_CODEC_CLKIN_BCLK			0x1
+/* PLL P and R-Val */
+#define ADC_PLL_PROG_PR(cnum)         MAKE_REG((cnum), 0, 5)
+/* PLL J-Val */
+#define ADC_PLL_PROG_J(cnum)          MAKE_REG((cnum), 0, 6)
+/* PLL D-Val MSB */
+#define ADC_PLL_PROG_D_MSB(cnum)      MAKE_REG((cnum), 0, 7)
+/* PLL D-Val LSB */
+#define ADC_PLL_PROG_D_LSB(cnum)      MAKE_REG((cnum), 0, 8)
+
+/* 9-17 Reserved */
+
+/* ADC NADC */
+#define ADC_ADC_NADC(cnum)            MAKE_REG((cnum), 0, 18)
+/* ADC MADC */
+#define ADC_ADC_MADC(cnum)            MAKE_REG((cnum), 0, 19)
+/* ADC AOSR */
+#define ADC_ADC_AOSR(cnum)            MAKE_REG((cnum), 0, 20)
+/* ADC IADC */
+#define ADC_ADC_IADC(cnum)            MAKE_REG((cnum), 0, 21)
+/* ADC miniDSP engine decimation */
+#define ADC_MINIDSP_DECIMATION(cnum)   MAKE_REG((cnum), 0, 22)
+
+/* 23-24 Reserved */
+
+/* CLKOUT MUX */
+#define ADC_CLKOUT_MUX(cnum)      MAKE_REG((cnum), 0, 25)
+/* CLOCKOUT M divider value */
+#define ADC_CLKOUT_M_DIV(cnum)      MAKE_REG((cnum), 0, 26)
+/*Audio Interface Setting Register 1*/
+#define ADC_INTERFACE_CTRL_1(cnum)     MAKE_REG((cnum), 0, 27)
+/* Data Slot Offset (Ch_Offset_1) */
+#define ADC_CH_OFFSET_1(cnum)      MAKE_REG((cnum), 0, 28)
+/* ADC interface control 2 */
+#define ADC_INTERFACE_CTRL_2(cnum)     MAKE_REG((cnum), 0, 29)
+/* BCLK N Divider */
+#define ADC_BCLK_N_DIV(cnum)      MAKE_REG((cnum), 0, 30)
+/* Secondary audio interface control 1 */
+#define ADC_INTERFACE_CTRL_3(cnum)     MAKE_REG((cnum), 0, 31)
+/* Secondary audio interface control 2 */
+#define ADC_INTERFACE_CTRL_4(cnum)     MAKE_REG((cnum), 0, 32)
+/* Secondary audio interface control 3 */
+#define ADC_INTERFACE_CTRL_5(cnum)     MAKE_REG((cnum), 0, 33)
+/* I2S sync */
+#define ADC_I2S_SYNC(cnum)      MAKE_REG((cnum), 0, 34)
+
+/* 35 Reserved */
+
+/* ADC flag register */
+#define ADC_ADC_FLAG(cnum)      MAKE_REG((cnum), 0, 36)
+/* Data slot offset 2 (Ch_Offset_2) */
+#define ADC_CH_OFFSET_2(cnum)      MAKE_REG((cnum), 0, 37)
+/* I2S TDM control register */
+#define ADC_I2S_TDM_CTRL(cnum)      MAKE_REG((cnum), 0, 38)
+
+/* 39-41 Reserved */
+
+/* Interrupt flags (overflow) */
+#define ADC_INTR_FLAG_1(cnum)      MAKE_REG((cnum), 0, 42)
+/* Interrupt flags (overflow) */
+#define ADC_INTR_FLAG_2(cnum)      MAKE_REG((cnum), 0, 43)
+
+/* 44 Reserved */
+
+/* Interrupt flags ADC */
+#define ADC_INTR_FLAG_ADC1(cnum)      MAKE_REG((cnum), 0, 45)
+
+/* 46 Reserved */
+
+/* Interrupt flags ADC */
+#define ADC_INTR_FLAG_ADC2(cnum)      MAKE_REG((cnum), 0, 47)
+/* INT1 interrupt control */
+#define ADC_INT1_CTRL(cnum)      MAKE_REG((cnum), 0, 48)
+/* INT2 interrupt control */
+#define ADC_INT2_CTRL(cnum)      MAKE_REG((cnum), 0, 49)
+
+/* 50 Reserved */
+
+/* DMCLK/GPIO2 control */
+#define ADC_GPIO2_CTRL(cnum)      MAKE_REG((cnum), 0, 51)
+/* DMDIN/GPIO1 control */
+#define ADC_GPIO1_CTRL(cnum)      MAKE_REG((cnum), 0, 52)
+/* DOUT Control */
+#define ADC_DOUT_CTRL(cnum)      MAKE_REG((cnum), 0, 53)
+
+/* 54-56 Reserved */
+
+/* ADC sync control 1 */
+#define ADC_SYNC_CTRL_1(cnum)      MAKE_REG((cnum), 0, 57)
+/* ADC sync control 2 */
+#define ADC_SYNC_CTRL_2(cnum)      MAKE_REG((cnum), 0, 58)
+/* ADC CIC filter gain control */
+#define ADC_CIC_GAIN_CTRL(cnum)      MAKE_REG((cnum), 0, 59)
+
+/* 60 Reserved */
+
+/* ADC processing block selection  */
+#define ADC_PRB_SELECT(cnum)      MAKE_REG((cnum), 0, 61)
+/* Programmable instruction mode control bits */
+#define ADC_INST_MODE_CTRL(cnum)      MAKE_REG((cnum), 0, 62)
+
+/* 63-79 Reserved */
+
+/* Digital microphone polarity control */
+#define ADC_MIC_POLARITY_CTRL(cnum)    MAKE_REG((cnum), 0, 80)
+/* ADC Digital */
+#define ADC_ADC_DIGITAL(cnum)          MAKE_REG((cnum), 0, 81)
+/* ADC Fine Gain Adjust */
+#define ADC_ADC_FGA(cnum)              MAKE_REG((cnum), 0, 82)
+/* Left ADC Channel Volume Control */
+#define ADC_LADC_VOL(cnum)             MAKE_REG((cnum), 0, 83)
+/* Right ADC Channel Volume Control */
+#define ADC_RADC_VOL(cnum)             MAKE_REG((cnum), 0, 84)
+/* ADC phase compensation */
+#define ADC_ADC_PHASE_COMP(cnum)       MAKE_REG((cnum), 0, 85)
+/* Left Channel AGC Control Register 1 */
+#define ADC_LEFT_CHN_AGC_1(cnum)       MAKE_REG((cnum), 0, 86)
+/* Left Channel AGC Control Register 2 */
+#define ADC_LEFT_CHN_AGC_2(cnum)       MAKE_REG((cnum), 0, 87)
+/* Left Channel AGC Control Register 3 */
+#define ADC_LEFT_CHN_AGC_3(cnum)       MAKE_REG((cnum), 0, 88)
+/* Left Channel AGC Control Register 4 */
+#define ADC_LEFT_CHN_AGC_4(cnum)       MAKE_REG((cnum), 0, 89)
+/* Left Channel AGC Control Register 5 */
+#define ADC_LEFT_CHN_AGC_5(cnum)       MAKE_REG((cnum), 0, 90)
+/* Left Channel AGC Control Register 6 */
+#define ADC_LEFT_CHN_AGC_6(cnum)       MAKE_REG((cnum), 0, 91)
+/* Left Channel AGC Control Register 7 */
+#define ADC_LEFT_CHN_AGC_7(cnum)       MAKE_REG((cnum), 0, 92)
+/* Left AGC gain */
+#define ADC_LEFT_AGC_GAIN(cnum)        MAKE_REG((cnum), 0, 93)
+/* Right Channel AGC Control Register 1 */
+#define ADC_RIGHT_CHN_AGC_1(cnum)      MAKE_REG((cnum), 0, 94)
+/* Right Channel AGC Control Register 2 */
+#define ADC_RIGHT_CHN_AGC_2(cnum)      MAKE_REG((cnum), 0, 95)
+/* Right Channel AGC Control Register 3 */
+#define ADC_RIGHT_CHN_AGC_3(cnum)      MAKE_REG((cnum), 0, 96)
+/* Right Channel AGC Control Register 4 */
+#define ADC_RIGHT_CHN_AGC_4(cnum)      MAKE_REG((cnum), 0, 97)
+/* Right Channel AGC Control Register 5 */
+#define ADC_RIGHT_CHN_AGC_5(cnum)      MAKE_REG((cnum), 0, 98)
+/* Right Channel AGC Control Register 6 */
+#define ADC_RIGHT_CHN_AGC_6(cnum)      MAKE_REG((cnum), 0, 99)
+/* Right Channel AGC Control Register 7 */
+#define ADC_RIGHT_CHN_AGC_7(cnum)      MAKE_REG((cnum), 0, 100)
+/* Right AGC gain */
+#define ADC_RIGHT_AGC_GAIN(cnum)       MAKE_REG((cnum), 0, 101)
+
+/* 102-127 Reserved */
+
+/****************************************************************************/
+/*                           Page 1 Registers                               */
+/****************************************************************************/
+#define ADC_PAGE_1                    128
+
+/* 1-25 Reserved */
+
+/* Dither control */
+#define ADC_DITHER_CTRL(cnum)          MAKE_REG((cnum), 1, 26)
+
+/* 27-50 Reserved */
+
+/* MICBIAS Configuration Register */
+#define ADC_MICBIAS_CTRL(cnum)         MAKE_REG((cnum), 1, 51)
+/* Left ADC input selection for Left PGA */
+#define ADC_LEFT_PGA_SEL_1(cnum)       MAKE_REG((cnum), 1, 52)
+
+/* 53 Reserved */
+
+/* Right ADC input selection for Left PGA */
+#define ADC_LEFT_PGA_SEL_2(cnum)       MAKE_REG((cnum), 1, 54)
+/* Right ADC input selection for right PGA */
+#define ADC_RIGHT_PGA_SEL_1(cnum)      MAKE_REG((cnum), 1, 55)
+
+/* 56 Reserved */
+
+/* Right ADC input selection for right PGA */
+#define ADC_RIGHT_PGA_SEL_2(cnum)      MAKE_REG((cnum), 1, 57)
+
+/* 58 Reserved */
+
+/* Left analog PGA settings */
+#define ADC_LEFT_APGA_CTRL(cnum)       MAKE_REG((cnum), 1, 59)
+/* Right analog PGA settings */
+#define ADC_RIGHT_APGA_CTRL(cnum)      MAKE_REG((cnum), 1, 60)
+/* ADC Low current Modes */
+#define ADC_LOW_CURRENT_MODES(cnum)    MAKE_REG((cnum), 1, 61)
+/* ADC analog PGA flags */
+#define ADC_ANALOG_PGA_FLAGS(cnum)     MAKE_REG((cnum), 1, 62)
+
+/* 63-127 Reserved */
+
+
+#define EARLY_3STATE_ENABLED        0x02
+#define TIME_SOLT_MODE              0x01
+/*
+ *****************************************************************************
+ * Structures Definitions
+ *****************************************************************************
+ */
+/*
+ *----------------------------------------------------------------------------
+ * @struct  aic31xx_setup_data |
+ *          i2c specific data setup for AIC31xx.
+ * @field   unsigned short |i2c_address |
+ *          Unsigned short for i2c address.
+ *----------------------------------------------------------------------------
+ */
+struct aic31xx_setup_data {
+	unsigned short i2c_address;
+};
+
+struct tlv320_priv {
+	u8 adc_page_no[4];
+	struct i2c_client *adc_control_data[5];
+	struct mutex codecMutex;
+	struct gpio_desc *reset_gpiod;
+	int adc_pos;
+    	int ref_ch;
+	int adc_num;
+};
+
+/*
+ *----------------------------------------------------------------------------
+ * @struct  snd_soc_codec_dai |
+ *          It is SoC Codec DAI structure which has DAI capabilities viz.,
+ *          playback and capture, DAI runtime information viz. state of DAI
+ *          and pop wait state, and DAI private data.
+ *----------------------------------------------------------------------------
+ */
+extern struct snd_soc_dai tlv320aic3101_dai;
+
+/*
+ *----------------------------------------------------------------------------
+ * @struct  snd_soc_codec_device |
+ *          This structure is soc audio codec device sturecute which pointer
+ *          to basic functions aic31xx_probe(), aic31xx_remove(),
+ *          aic31xx_suspend() and aic31xx_resume()
+ *
+ */
+extern struct snd_soc_codec_device soc_codec_dev_aic31xx;
+
+#endif              /* _Tlv320aic3101_H */
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index e731d40..8f239a4 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -105,3 +105,91 @@
 	  with the RT5650 and RT5676 codecs.
 	  Select Y if you have such device.
 	  If unsure select "N".
+
+config SND_SOC_MT8183
+	tristate "ASoC support for Mediatek MT8183 chip"
+	depends on ARCH_MEDIATEK
+	select SND_SOC_MEDIATEK
+	help
+	  This adds ASoC platform driver support for Mediatek MT8183 chip
+	  that can be used with other codecs.
+	  Select Y if you have such device.
+	  If unsure select "N".
+
+config SND_SOC_MT8183_MT6358_TS3A227E_MAX98357A
+	tristate "ASoC Audio driver for MT8183 with MT6358 TS3A227E MAX98357A codec"
+	depends on SND_SOC_MT8183
+	select SND_SOC_MT6358
+	select SND_SOC_MAX98357A
+	select SND_SOC_BT_SCO
+	select SND_SOC_TS3A227E
+	help
+	  This adds ASoC driver for Mediatek MT8183 boards
+	  with the MT6358 TS3A227E MAX98357A audio codec.
+	  Select Y if you have such device.
+	  If unsure select "N".
+
+config SND_SOC_MT8183_DA7219_MAX98357A
+	tristate "ASoC Audio driver for MT8183 with DA7219 MAX98357A codec"
+	depends on SND_SOC_MT8183
+	select SND_SOC_MT6358
+	select SND_SOC_MAX98357A
+	select SND_SOC_DA7219
+	select SND_SOC_BT_SCO
+	help
+	  This adds ASoC driver for Mediatek MT8183 boards
+	  with the DA7219 MAX98357A audio codec.
+	  Select Y if you have such device.
+	  If unsure select "N".
+
+config SND_SOC_MT8183_MT6358
+	tristate "ASoc Audio driver for MT8183 with MT6358 codec"
+	depends on SND_SOC_MT8183 && MTK_PMIC_WRAP
+	help
+	  This adds ASoC driver for Mediatek MT8183 boards
+	  with the MT6358 codecs.
+	  Select Y if you have such device.
+	  If unsure select "N".
+
+config SND_SOC_MT8516
+	tristate "ASoC support for Mediatek MT8516 chip"
+	depends on ARCH_MEDIATEK
+	select SND_SOC_MEDIATEK
+	help
+	  This adds ASoC platform driver support for Mediatek MT8516 chip
+	  that can be used with other codecs.
+	  Select Y if you have such device.
+	  If unsure select "N".
+
+config SND_SOC_MTK_BTCVSD
+	tristate "ALSA BT SCO CVSD/MSBC Driver"
+	help
+	  This is for software BTCVSD. This enable
+	  the function for transferring/receiving
+	  BT encoded data to/from BT firmware.
+	  Select Y if you have such device.
+	  If unsure select "N".
+
+config SND_SOC_MT8167
+	tristate "ASoC support for Mediatek MT8167 chip"
+	depends on ARCH_MEDIATEK
+	select SND_SOC_MEDIATEK
+
+config SND_SOC_MT8516_PUMPKIN_MACH
+       tristate "ASoC Audio driver for MT8516P1"
+       select SND_SOC_MT8167
+       select SND_SOC_MT8167_CODEC
+       help
+         This adds support for ASoC machine driver for Mediatek MT8516 Pumpkin
+         platforms with internal audio codec and speaker codec CS4382A.
+         Select Y if you have such device.
+         If unsure select "N".
+
+config SND_SOC_MT8516_VESPER
+	tristate "ASoC Audio driver for MT8516-Vesper"
+	select SND_SOC_MT8167
+	select SND_SOC_MT8167_CODEC
+	help
+	  This adds support for ASoC machine driver for Mediatek MT8516 Vesper
+	  Select Y if you have such device.
+	  If unsure select "N".
diff --git a/sound/soc/mediatek/Makefile b/sound/soc/mediatek/Makefile
index 3bb2c47..281e027 100644
--- a/sound/soc/mediatek/Makefile
+++ b/sound/soc/mediatek/Makefile
@@ -3,3 +3,6 @@
 obj-$(CONFIG_SND_SOC_MT2701) += mt2701/
 obj-$(CONFIG_SND_SOC_MT6797) += mt6797/
 obj-$(CONFIG_SND_SOC_MT8173) += mt8173/
+obj-$(CONFIG_SND_SOC_MT8183) += mt8183/
+obj-$(CONFIG_SND_SOC_MT8516) += mt8516/
+obj-$(CONFIG_SND_SOC_MT8167) += mt8167/
diff --git a/sound/soc/mediatek/common/Makefile b/sound/soc/mediatek/common/Makefile
index cdadabc..9ab9043 100644
--- a/sound/soc/mediatek/common/Makefile
+++ b/sound/soc/mediatek/common/Makefile
@@ -2,3 +2,5 @@
 # platform driver
 snd-soc-mtk-common-objs := mtk-afe-platform-driver.o mtk-afe-fe-dai.o
 obj-$(CONFIG_SND_SOC_MEDIATEK) += snd-soc-mtk-common.o
+
+obj-$(CONFIG_SND_SOC_MTK_BTCVSD) += mtk-btcvsd.o
\ No newline at end of file
diff --git a/sound/soc/mediatek/common/mtk-afe-fe-dai.c b/sound/soc/mediatek/common/mtk-afe-fe-dai.c
index cf4978b..f2c3a77 100644
--- a/sound/soc/mediatek/common/mtk-afe-fe-dai.c
+++ b/sound/soc/mediatek/common/mtk-afe-fe-dai.c
@@ -18,11 +18,11 @@
 
 static int mtk_regmap_update_bits(struct regmap *map, int reg,
 			   unsigned int mask,
-			   unsigned int val)
+			   unsigned int val, int shift)
 {
-	if (reg < 0)
+	if (reg < 0 || WARN_ON_ONCE(shift < 0))
 		return 0;
-	return regmap_update_bits(map, reg, mask, val);
+	return regmap_update_bits(map, reg, mask << shift, val << shift);
 }
 
 static int mtk_regmap_write(struct regmap *map, int reg, unsigned int val)
@@ -47,10 +47,12 @@
 
 	snd_pcm_hw_constraint_step(substream->runtime, 0,
 				   SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
+
 	/* enable agent */
-	mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
-			       1 << memif->data->agent_disable_shift,
-			       0 << memif->data->agent_disable_shift);
+	if (memif->data->agent_disable_shift >= 0)
+		mtk_regmap_update_bits(afe->regmap,
+				       memif->data->agent_disable_reg, 1, 0,
+				       memif->data->agent_disable_shift);
 
 	snd_soc_set_runtime_hwparams(substream, mtk_afe_hardware);
 
@@ -105,8 +107,7 @@
 	irq_id = memif->irq_usage;
 
 	mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
-			       1 << memif->data->agent_disable_shift,
-			       1 << memif->data->agent_disable_shift);
+			       1, 1, memif->data->agent_disable_shift);
 
 	if (!memif->const_irq) {
 		mtk_dynamic_irq_release(afe, irq_id);
@@ -143,17 +144,16 @@
 			 memif->phys_buf_addr + memif->buffer_size - 1);
 
 	/* set MSB to 33-bit */
-	mtk_regmap_update_bits(afe->regmap, memif->data->msb_reg,
-			       1 << memif->data->msb_shift,
-			       msb_at_bit33 << memif->data->msb_shift);
+	if (memif->data->msb_shift >= 0)
+		mtk_regmap_update_bits(afe->regmap, memif->data->msb_reg,
+				       1, msb_at_bit33, memif->data->msb_shift);
 
 	/* set channel */
 	if (memif->data->mono_shift >= 0) {
 		unsigned int mono = (params_channels(params) == 1) ? 1 : 0;
 
 		mtk_regmap_update_bits(afe->regmap, memif->data->mono_reg,
-				       1 << memif->data->mono_shift,
-				       mono << memif->data->mono_shift);
+				       1, mono, memif->data->mono_shift);
 	}
 
 	/* set rate */
@@ -166,8 +166,8 @@
 		return -EINVAL;
 
 	mtk_regmap_update_bits(afe->regmap, memif->data->fs_reg,
-			       memif->data->fs_maskbit << memif->data->fs_shift,
-			       fs << memif->data->fs_shift);
+			       memif->data->fs_maskbit, fs,
+			       memif->data->fs_shift);
 
 	return 0;
 }
@@ -197,17 +197,14 @@
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
 	case SNDRV_PCM_TRIGGER_RESUME:
-		if (memif->data->enable_shift >= 0)
-			mtk_regmap_update_bits(afe->regmap,
-					       memif->data->enable_reg,
-					       1 << memif->data->enable_shift,
-					       1 << memif->data->enable_shift);
+		mtk_regmap_update_bits(afe->regmap,
+				       memif->data->enable_reg,
+				       1, 1, memif->data->enable_shift);
 
 		/* set irq counter */
 		mtk_regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg,
-				       irq_data->irq_cnt_maskbit
-				       << irq_data->irq_cnt_shift,
-				       counter << irq_data->irq_cnt_shift);
+				       irq_data->irq_cnt_maskbit, counter,
+				       irq_data->irq_cnt_shift);
 
 		/* set irq fs */
 		fs = afe->irq_fs(substream, runtime->rate);
@@ -216,24 +213,21 @@
 			return -EINVAL;
 
 		mtk_regmap_update_bits(afe->regmap, irq_data->irq_fs_reg,
-				       irq_data->irq_fs_maskbit
-				       << irq_data->irq_fs_shift,
-				       fs << irq_data->irq_fs_shift);
+				       irq_data->irq_fs_maskbit, fs,
+				       irq_data->irq_fs_shift);
 
 		/* enable interrupt */
 		mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
-				       1 << irq_data->irq_en_shift,
-				       1 << irq_data->irq_en_shift);
+				       1, 1, irq_data->irq_en_shift);
 
 		return 0;
 	case SNDRV_PCM_TRIGGER_STOP:
 	case SNDRV_PCM_TRIGGER_SUSPEND:
 		mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
-				       1 << memif->data->enable_shift, 0);
+				       1, 0, memif->data->enable_shift);
 		/* disable interrupt */
 		mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
-				       1 << irq_data->irq_en_shift,
-				       0 << irq_data->irq_en_shift);
+				       1, 0, irq_data->irq_en_shift);
 		/* and clear pending IRQ */
 		mtk_regmap_write(afe->regmap, irq_data->irq_clr_reg,
 				 1 << irq_data->irq_clr_shift);
@@ -269,9 +263,9 @@
 		break;
 	}
 
-	mtk_regmap_update_bits(afe->regmap, memif->data->hd_reg,
-			       1 << memif->data->hd_shift,
-			       hd_audio << memif->data->hd_shift);
+	if (memif->data->hd_shift >= 0)
+		mtk_regmap_update_bits(afe->regmap, memif->data->hd_reg,
+				       1, hd_audio, memif->data->hd_shift);
 
 	return 0;
 }
diff --git a/sound/soc/mediatek/common/mtk-btcvsd.c b/sound/soc/mediatek/common/mtk-btcvsd.c
new file mode 100644
index 0000000..4b613d5
--- /dev/null
+++ b/sound/soc/mediatek/common/mtk-btcvsd.c
@@ -0,0 +1,1364 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Mediatek ALSA BT SCO CVSD/MSBC Driver
+//
+// Copyright (c) 2019 MediaTek Inc.
+// Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/sched/clock.h>
+
+#include <sound/soc.h>
+
+#define BTCVSD_SND_NAME "mtk-btcvsd-snd"
+
+#define BT_CVSD_TX_NREADY	BIT(21)
+#define BT_CVSD_RX_READY	BIT(22)
+#define BT_CVSD_TX_UNDERFLOW	BIT(23)
+#define BT_CVSD_RX_OVERFLOW	BIT(24)
+#define BT_CVSD_INTERRUPT	BIT(31)
+
+#define BT_CVSD_CLEAR \
+	(BT_CVSD_TX_NREADY | BT_CVSD_RX_READY | BT_CVSD_TX_UNDERFLOW |\
+	 BT_CVSD_RX_OVERFLOW | BT_CVSD_INTERRUPT)
+
+/* TX */
+#define SCO_TX_ENCODE_SIZE (60)
+/* 18 = 6 * 180 / SCO_TX_ENCODE_SIZE */
+#define SCO_TX_PACKER_BUF_NUM (18)
+
+/* RX */
+#define SCO_RX_PLC_SIZE (30)
+#define SCO_RX_PACKER_BUF_NUM (64)
+#define SCO_RX_PACKET_MASK (0x3F)
+
+#define SCO_CVSD_PACKET_VALID_SIZE 2
+
+#define SCO_PACKET_120 120
+#define SCO_PACKET_180 180
+
+#define BTCVSD_RX_PACKET_SIZE (SCO_RX_PLC_SIZE + SCO_CVSD_PACKET_VALID_SIZE)
+#define BTCVSD_TX_PACKET_SIZE (SCO_TX_ENCODE_SIZE)
+
+#define BTCVSD_RX_BUF_SIZE (BTCVSD_RX_PACKET_SIZE * SCO_RX_PACKER_BUF_NUM)
+#define BTCVSD_TX_BUF_SIZE (BTCVSD_TX_PACKET_SIZE * SCO_TX_PACKER_BUF_NUM)
+
+enum bt_sco_state {
+	BT_SCO_STATE_IDLE,
+	BT_SCO_STATE_RUNNING,
+	BT_SCO_STATE_ENDING,
+};
+
+enum bt_sco_direct {
+	BT_SCO_DIRECT_BT2ARM,
+	BT_SCO_DIRECT_ARM2BT,
+};
+
+enum bt_sco_packet_len {
+	BT_SCO_CVSD_30 = 0,
+	BT_SCO_CVSD_60,
+	BT_SCO_CVSD_90,
+	BT_SCO_CVSD_120,
+	BT_SCO_CVSD_10,
+	BT_SCO_CVSD_20,
+	BT_SCO_CVSD_MAX,
+};
+
+enum BT_SCO_BAND {
+	BT_SCO_NB,
+	BT_SCO_WB,
+};
+
+struct mtk_btcvsd_snd_hw_info {
+	unsigned int num_valid_addr;
+	unsigned long bt_sram_addr[20];
+	unsigned int packet_length;
+	unsigned int packet_num;
+};
+
+struct mtk_btcvsd_snd_stream {
+	struct snd_pcm_substream *substream;
+	int stream;
+
+	enum bt_sco_state state;
+
+	unsigned int packet_size;
+	unsigned int buf_size;
+	u8 temp_packet_buf[SCO_PACKET_180];
+
+	int packet_w;
+	int packet_r;
+	snd_pcm_uframes_t prev_frame;
+	int prev_packet_idx;
+
+	unsigned int xrun:1;
+	unsigned int timeout:1;
+	unsigned int mute:1;
+	unsigned int trigger_start:1;
+	unsigned int wait_flag:1;
+	unsigned int rw_cnt;
+
+	unsigned long long time_stamp;
+	unsigned long long buf_data_equivalent_time;
+
+	struct mtk_btcvsd_snd_hw_info buffer_info;
+};
+
+struct mtk_btcvsd_snd {
+	struct device *dev;
+	int irq_id;
+
+	struct regmap *infra;
+	void __iomem *bt_pkv_base;
+	void __iomem *bt_sram_bank2_base;
+
+	unsigned int infra_misc_offset;
+	unsigned int conn_bt_cvsd_mask;
+	unsigned int cvsd_mcu_read_offset;
+	unsigned int cvsd_mcu_write_offset;
+	unsigned int cvsd_packet_indicator;
+
+	u32 *bt_reg_pkt_r;
+	u32 *bt_reg_pkt_w;
+	u32 *bt_reg_ctl;
+
+	unsigned int irq_disabled:1;
+
+	spinlock_t tx_lock;	/* spinlock for bt tx stream control */
+	spinlock_t rx_lock;	/* spinlock for bt rx stream control */
+	wait_queue_head_t tx_wait;
+	wait_queue_head_t rx_wait;
+
+	struct mtk_btcvsd_snd_stream *tx;
+	struct mtk_btcvsd_snd_stream *rx;
+	u8 tx_packet_buf[BTCVSD_TX_BUF_SIZE];
+	u8 rx_packet_buf[BTCVSD_RX_BUF_SIZE];
+
+	enum BT_SCO_BAND band;
+};
+
+struct mtk_btcvsd_snd_time_buffer_info {
+	unsigned long long data_count_equi_time;
+	unsigned long long time_stamp_us;
+};
+
+static const unsigned int btsco_packet_valid_mask[BT_SCO_CVSD_MAX][6] = {
+	{0x1, 0x1 << 1, 0x1 << 2, 0x1 << 3, 0x1 << 4, 0x1 << 5},
+	{0x1, 0x1, 0x2, 0x2, 0x4, 0x4},
+	{0x1, 0x1, 0x1, 0x2, 0x2, 0x2},
+	{0x1, 0x1, 0x1, 0x1, 0x0, 0x0},
+	{0x7, 0x7 << 3, 0x7 << 6, 0x7 << 9, 0x7 << 12, 0x7 << 15},
+	{0x3, 0x3 << 1, 0x3 << 3, 0x3 << 4, 0x3 << 6, 0x3 << 7},
+};
+
+static const unsigned int btsco_packet_info[BT_SCO_CVSD_MAX][4] = {
+	{30, 6, SCO_PACKET_180 / SCO_TX_ENCODE_SIZE,
+	 SCO_PACKET_180 / SCO_RX_PLC_SIZE},
+	{60, 3, SCO_PACKET_180 / SCO_TX_ENCODE_SIZE,
+	 SCO_PACKET_180 / SCO_RX_PLC_SIZE},
+	{90, 2, SCO_PACKET_180 / SCO_TX_ENCODE_SIZE,
+	 SCO_PACKET_180 / SCO_RX_PLC_SIZE},
+	{120, 1, SCO_PACKET_120 / SCO_TX_ENCODE_SIZE,
+	 SCO_PACKET_120 / SCO_RX_PLC_SIZE},
+	{10, 18, SCO_PACKET_180 / SCO_TX_ENCODE_SIZE,
+	 SCO_PACKET_180 / SCO_RX_PLC_SIZE},
+	{20, 9, SCO_PACKET_180 / SCO_TX_ENCODE_SIZE,
+	 SCO_PACKET_180 / SCO_RX_PLC_SIZE},
+};
+
+static const u8 table_msbc_silence[SCO_PACKET_180] = {
+	0x01, 0x38, 0xad, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, 0x00,
+	0x77, 0x6d, 0xb6, 0xdd, 0xdb, 0x6d, 0xb7, 0x76, 0xdb, 0x6d,
+	0xdd, 0xb6, 0xdb, 0x77, 0x6d, 0xb6, 0xdd, 0xdb, 0x6d, 0xb7,
+	0x76, 0xdb, 0x6d, 0xdd, 0xb6, 0xdb, 0x77, 0x6d, 0xb6, 0xdd,
+	0xdb, 0x6d, 0xb7, 0x76, 0xdb, 0x6d, 0xdd, 0xb6, 0xdb, 0x77,
+	0x6d, 0xb6, 0xdd, 0xdb, 0x6d, 0xb7, 0x76, 0xdb, 0x6c, 0x00,
+	0x01, 0xc8, 0xad, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, 0x00,
+	0x77, 0x6d, 0xb6, 0xdd, 0xdb, 0x6d, 0xb7, 0x76, 0xdb, 0x6d,
+	0xdd, 0xb6, 0xdb, 0x77, 0x6d, 0xb6, 0xdd, 0xdb, 0x6d, 0xb7,
+	0x76, 0xdb, 0x6d, 0xdd, 0xb6, 0xdb, 0x77, 0x6d, 0xb6, 0xdd,
+	0xdb, 0x6d, 0xb7, 0x76, 0xdb, 0x6d, 0xdd, 0xb6, 0xdb, 0x77,
+	0x6d, 0xb6, 0xdd, 0xdb, 0x6d, 0xb7, 0x76, 0xdb, 0x6c, 0x00,
+	0x01, 0xf8, 0xad, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, 0x00,
+	0x77, 0x6d, 0xb6, 0xdd, 0xdb, 0x6d, 0xb7, 0x76, 0xdb, 0x6d,
+	0xdd, 0xb6, 0xdb, 0x77, 0x6d, 0xb6, 0xdd, 0xdb, 0x6d, 0xb7,
+	0x76, 0xdb, 0x6d, 0xdd, 0xb6, 0xdb, 0x77, 0x6d, 0xb6, 0xdd,
+	0xdb, 0x6d, 0xb7, 0x76, 0xdb, 0x6d, 0xdd, 0xb6, 0xdb, 0x77,
+	0x6d, 0xb6, 0xdd, 0xdb, 0x6d, 0xb7, 0x76, 0xdb, 0x6c, 0x00
+};
+
+static void mtk_btcvsd_snd_irq_enable(struct mtk_btcvsd_snd *bt)
+{
+	regmap_update_bits(bt->infra, bt->infra_misc_offset,
+			   bt->conn_bt_cvsd_mask, bt->conn_bt_cvsd_mask);
+}
+
+static void mtk_btcvsd_snd_irq_disable(struct mtk_btcvsd_snd *bt)
+{
+	regmap_update_bits(bt->infra, bt->infra_misc_offset,
+			   bt->conn_bt_cvsd_mask, 0);
+}
+
+static void mtk_btcvsd_snd_set_state(struct mtk_btcvsd_snd *bt,
+				     struct mtk_btcvsd_snd_stream *bt_stream,
+				     int state)
+{
+	dev_dbg(bt->dev, "%s(), stream %d, state %d, tx->state %d, rx->state %d, irq_disabled %d\n",
+		__func__,
+		bt_stream->stream, state,
+		bt->tx->state, bt->rx->state, bt->irq_disabled);
+
+	bt_stream->state = state;
+
+	if (bt->tx->state == BT_SCO_STATE_IDLE &&
+	    bt->rx->state == BT_SCO_STATE_IDLE) {
+		if (!bt->irq_disabled) {
+			disable_irq(bt->irq_id);
+			mtk_btcvsd_snd_irq_disable(bt);
+			bt->irq_disabled = 1;
+		}
+	} else {
+		if (bt->irq_disabled) {
+			enable_irq(bt->irq_id);
+			mtk_btcvsd_snd_irq_enable(bt);
+			bt->irq_disabled = 0;
+		}
+	}
+}
+
+static int mtk_btcvsd_snd_tx_init(struct mtk_btcvsd_snd *bt)
+{
+	memset(bt->tx, 0, sizeof(*bt->tx));
+	memset(bt->tx_packet_buf, 0, sizeof(bt->tx_packet_buf));
+
+	bt->tx->packet_size = BTCVSD_TX_PACKET_SIZE;
+	bt->tx->buf_size = BTCVSD_TX_BUF_SIZE;
+	bt->tx->timeout = 0;
+	bt->tx->rw_cnt = 0;
+	bt->tx->stream = SNDRV_PCM_STREAM_PLAYBACK;
+	return 0;
+}
+
+static int mtk_btcvsd_snd_rx_init(struct mtk_btcvsd_snd *bt)
+{
+	memset(bt->rx, 0, sizeof(*bt->rx));
+	memset(bt->rx_packet_buf, 0, sizeof(bt->rx_packet_buf));
+
+	bt->rx->packet_size = BTCVSD_RX_PACKET_SIZE;
+	bt->rx->buf_size = BTCVSD_RX_BUF_SIZE;
+	bt->rx->timeout = 0;
+	bt->rx->rw_cnt = 0;
+	bt->tx->stream = SNDRV_PCM_STREAM_CAPTURE;
+	return 0;
+}
+
+static void get_tx_time_stamp(struct mtk_btcvsd_snd *bt,
+			      struct mtk_btcvsd_snd_time_buffer_info *ts)
+{
+	ts->time_stamp_us = bt->tx->time_stamp;
+	ts->data_count_equi_time = bt->tx->buf_data_equivalent_time;
+}
+
+static void get_rx_time_stamp(struct mtk_btcvsd_snd *bt,
+			      struct mtk_btcvsd_snd_time_buffer_info *ts)
+{
+	ts->time_stamp_us = bt->rx->time_stamp;
+	ts->data_count_equi_time = bt->rx->buf_data_equivalent_time;
+}
+
+static int btcvsd_bytes_to_frame(struct snd_pcm_substream *substream,
+				 int bytes)
+{
+	int count = bytes;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+
+	if (runtime->format == SNDRV_PCM_FORMAT_S32_LE ||
+	    runtime->format == SNDRV_PCM_FORMAT_U32_LE)
+		count = count >> 2;
+	else
+		count = count >> 1;
+
+	count = count / runtime->channels;
+	return count;
+}
+
+static void mtk_btcvsd_snd_data_transfer(enum bt_sco_direct dir,
+					 u8 *src, u8 *dst,
+					 unsigned int blk_size,
+					 unsigned int blk_num)
+{
+	unsigned int i, j;
+
+	if (blk_size == 60 || blk_size == 120 || blk_size == 20) {
+		u32 *src_32 = (u32 *)src;
+		u32 *dst_32 = (u32 *)dst;
+
+		for (i = 0; i < (blk_size * blk_num / 4); i++)
+			*dst_32++ = *src_32++;
+	} else {
+		u16 *src_16 = (u16 *)src;
+		u16 *dst_16 = (u16 *)dst;
+
+		for (j = 0; j < blk_num; j++) {
+			for (i = 0; i < (blk_size / 2); i++)
+				*dst_16++ = *src_16++;
+
+			if (dir == BT_SCO_DIRECT_BT2ARM)
+				src_16++;
+			else
+				dst_16++;
+		}
+	}
+}
+
+/* write encoded mute data to bt sram */
+static int btcvsd_tx_clean_buffer(struct mtk_btcvsd_snd *bt)
+{
+	unsigned int i;
+	unsigned int num_valid_addr;
+	unsigned long flags;
+	enum BT_SCO_BAND band = bt->band;
+
+	/* prepare encoded mute data */
+	if (band == BT_SCO_NB)
+		memset(bt->tx->temp_packet_buf, 170, SCO_PACKET_180);
+	else
+		memcpy(bt->tx->temp_packet_buf,
+		       table_msbc_silence, SCO_PACKET_180);
+
+	/* write mute data to bt tx sram buffer */
+	spin_lock_irqsave(&bt->tx_lock, flags);
+	num_valid_addr = bt->tx->buffer_info.num_valid_addr;
+
+	dev_info(bt->dev, "%s(), band %d, num_valid_addr %u\n",
+		 __func__, band, num_valid_addr);
+
+	for (i = 0; i < num_valid_addr; i++) {
+		void *dst;
+
+		dev_info(bt->dev, "%s(), clean addr 0x%lx\n", __func__,
+			 bt->tx->buffer_info.bt_sram_addr[i]);
+
+		dst = (void *)bt->tx->buffer_info.bt_sram_addr[i];
+
+		mtk_btcvsd_snd_data_transfer(BT_SCO_DIRECT_ARM2BT,
+					     bt->tx->temp_packet_buf, dst,
+					     bt->tx->buffer_info.packet_length,
+					     bt->tx->buffer_info.packet_num);
+	}
+	spin_unlock_irqrestore(&bt->tx_lock, flags);
+
+	return 0;
+}
+
+static int mtk_btcvsd_read_from_bt(struct mtk_btcvsd_snd *bt,
+				   enum bt_sco_packet_len packet_type,
+				   unsigned int packet_length,
+				   unsigned int packet_num,
+				   unsigned int blk_size,
+				   unsigned int control)
+{
+	unsigned int i;
+	int pv;
+	u8 *src;
+	unsigned int packet_buf_ofs;
+	unsigned long flags;
+	unsigned long connsys_addr_rx, ap_addr_rx;
+
+	connsys_addr_rx = *bt->bt_reg_pkt_r;
+	ap_addr_rx = (unsigned long)bt->bt_sram_bank2_base +
+		     (connsys_addr_rx & 0xFFFF);
+
+	if (connsys_addr_rx == 0xdeadfeed) {
+		/* bt return 0xdeadfeed if read register during bt sleep */
+		dev_warn(bt->dev, "%s(), connsys_addr_rx == 0xdeadfeed",
+			 __func__);
+		return -EIO;
+	}
+
+	src = (u8 *)ap_addr_rx;
+
+	mtk_btcvsd_snd_data_transfer(BT_SCO_DIRECT_BT2ARM, src,
+				     bt->rx->temp_packet_buf, packet_length,
+				     packet_num);
+
+	spin_lock_irqsave(&bt->rx_lock, flags);
+	for (i = 0; i < blk_size; i++) {
+		packet_buf_ofs = (bt->rx->packet_w & SCO_RX_PACKET_MASK) *
+				 bt->rx->packet_size;
+		memcpy(bt->rx_packet_buf + packet_buf_ofs,
+		       bt->rx->temp_packet_buf + (SCO_RX_PLC_SIZE * i),
+		       SCO_RX_PLC_SIZE);
+		if ((control & btsco_packet_valid_mask[packet_type][i]) ==
+		    btsco_packet_valid_mask[packet_type][i])
+			pv = 1;
+		else
+			pv = 0;
+
+		packet_buf_ofs += SCO_RX_PLC_SIZE;
+		memcpy(bt->rx_packet_buf + packet_buf_ofs, (void *)&pv,
+		       SCO_CVSD_PACKET_VALID_SIZE);
+		bt->rx->packet_w++;
+	}
+	spin_unlock_irqrestore(&bt->rx_lock, flags);
+	return 0;
+}
+
+int mtk_btcvsd_write_to_bt(struct mtk_btcvsd_snd *bt,
+			   enum bt_sco_packet_len packet_type,
+			   unsigned int packet_length,
+			   unsigned int packet_num,
+			   unsigned int blk_size)
+{
+	unsigned int i;
+	unsigned long flags;
+	u8 *dst;
+	unsigned long connsys_addr_tx, ap_addr_tx;
+	bool new_ap_addr_tx = true;
+
+	connsys_addr_tx = *bt->bt_reg_pkt_w;
+	ap_addr_tx = (unsigned long)bt->bt_sram_bank2_base +
+		     (connsys_addr_tx & 0xFFFF);
+
+	if (connsys_addr_tx == 0xdeadfeed) {
+		/* bt return 0xdeadfeed if read register during bt sleep */
+		dev_warn(bt->dev, "%s(), connsys_addr_tx == 0xdeadfeed\n",
+			 __func__);
+		return -EIO;
+	}
+
+	spin_lock_irqsave(&bt->tx_lock, flags);
+	for (i = 0; i < blk_size; i++) {
+		memcpy(bt->tx->temp_packet_buf + (bt->tx->packet_size * i),
+		       (bt->tx_packet_buf +
+			(bt->tx->packet_r % SCO_TX_PACKER_BUF_NUM) *
+			bt->tx->packet_size),
+		       bt->tx->packet_size);
+
+		bt->tx->packet_r++;
+	}
+	spin_unlock_irqrestore(&bt->tx_lock, flags);
+
+	dst = (u8 *)ap_addr_tx;
+
+	if (!bt->tx->mute) {
+		mtk_btcvsd_snd_data_transfer(BT_SCO_DIRECT_ARM2BT,
+					     bt->tx->temp_packet_buf, dst,
+					     packet_length, packet_num);
+	}
+
+	/* store bt tx buffer sram info */
+	bt->tx->buffer_info.packet_length = packet_length;
+	bt->tx->buffer_info.packet_num = packet_num;
+	for (i = 0; i < bt->tx->buffer_info.num_valid_addr; i++) {
+		if (bt->tx->buffer_info.bt_sram_addr[i] == ap_addr_tx) {
+			new_ap_addr_tx = false;
+			break;
+		}
+	}
+	if (new_ap_addr_tx) {
+		unsigned int next_idx;
+
+		spin_lock_irqsave(&bt->tx_lock, flags);
+		bt->tx->buffer_info.num_valid_addr++;
+		next_idx = bt->tx->buffer_info.num_valid_addr - 1;
+		bt->tx->buffer_info.bt_sram_addr[next_idx] = ap_addr_tx;
+		spin_unlock_irqrestore(&bt->tx_lock, flags);
+		dev_info(bt->dev, "%s(), new ap_addr_tx = 0x%lx, num_valid_addr %d\n",
+			 __func__, ap_addr_tx,
+			 bt->tx->buffer_info.num_valid_addr);
+	}
+
+	if (bt->tx->mute)
+		btcvsd_tx_clean_buffer(bt);
+
+	return 0;
+}
+
+static irqreturn_t mtk_btcvsd_snd_irq_handler(int irq_id, void *dev)
+{
+	struct mtk_btcvsd_snd *bt = dev;
+	unsigned int packet_type, packet_num, packet_length;
+	unsigned int buf_cnt_tx, buf_cnt_rx, control;
+
+	if (bt->rx->state != BT_SCO_STATE_RUNNING &&
+	    bt->rx->state != BT_SCO_STATE_ENDING &&
+	    bt->tx->state != BT_SCO_STATE_RUNNING &&
+	    bt->tx->state != BT_SCO_STATE_ENDING) {
+		dev_warn(bt->dev, "%s(), in idle state: rx->state: %d, tx->state: %d\n",
+			 __func__, bt->rx->state, bt->tx->state);
+		goto irq_handler_exit;
+	}
+
+	control = *bt->bt_reg_ctl;
+	packet_type = (control >> 18) & 0x7;
+
+	if (((control >> 31) & 1) == 0) {
+		dev_warn(bt->dev, "%s(), ((control >> 31) & 1) == 0, control 0x%x\n",
+			 __func__, control);
+		goto irq_handler_exit;
+	}
+
+	if (packet_type >= BT_SCO_CVSD_MAX) {
+		dev_warn(bt->dev, "%s(), invalid packet_type %u, exit\n",
+			 __func__, packet_type);
+		goto irq_handler_exit;
+	}
+
+	packet_length = btsco_packet_info[packet_type][0];
+	packet_num = btsco_packet_info[packet_type][1];
+	buf_cnt_tx = btsco_packet_info[packet_type][2];
+	buf_cnt_rx = btsco_packet_info[packet_type][3];
+
+	if (bt->rx->state == BT_SCO_STATE_RUNNING ||
+	    bt->rx->state == BT_SCO_STATE_ENDING) {
+		if (bt->rx->xrun) {
+			if (bt->rx->packet_w - bt->rx->packet_r <=
+			    SCO_RX_PACKER_BUF_NUM - 2 * buf_cnt_rx) {
+				/*
+				 * free space is larger then
+				 * twice interrupt rx data size
+				 */
+				bt->rx->xrun = 0;
+				dev_warn(bt->dev, "%s(), rx->xrun 0!\n",
+					 __func__);
+			}
+		}
+
+		if (!bt->rx->xrun &&
+		    (bt->rx->packet_w - bt->rx->packet_r <=
+		     SCO_RX_PACKER_BUF_NUM - buf_cnt_rx)) {
+			mtk_btcvsd_read_from_bt(bt,
+						packet_type,
+						packet_length,
+						packet_num,
+						buf_cnt_rx,
+						control);
+			bt->rx->rw_cnt++;
+		} else {
+			bt->rx->xrun = 1;
+			dev_warn(bt->dev, "%s(), rx->xrun 1\n", __func__);
+		}
+	}
+
+	/* tx */
+	bt->tx->timeout = 0;
+	if ((bt->tx->state == BT_SCO_STATE_RUNNING ||
+	     bt->tx->state == BT_SCO_STATE_ENDING) &&
+	    bt->tx->trigger_start) {
+		if (bt->tx->xrun) {
+			/* prepared data is larger then twice
+			 * interrupt tx data size
+			 */
+			if (bt->tx->packet_w - bt->tx->packet_r >=
+			    2 * buf_cnt_tx) {
+				bt->tx->xrun = 0;
+				dev_warn(bt->dev, "%s(), tx->xrun 0\n",
+					 __func__);
+			}
+		}
+
+		if ((!bt->tx->xrun &&
+		     (bt->tx->packet_w - bt->tx->packet_r >= buf_cnt_tx)) ||
+		    bt->tx->state == BT_SCO_STATE_ENDING) {
+			mtk_btcvsd_write_to_bt(bt,
+					       packet_type,
+					       packet_length,
+					       packet_num,
+					       buf_cnt_tx);
+			bt->tx->rw_cnt++;
+		} else {
+			bt->tx->xrun = 1;
+			dev_warn(bt->dev, "%s(), tx->xrun 1\n", __func__);
+		}
+	}
+
+	*bt->bt_reg_ctl &= ~BT_CVSD_CLEAR;
+
+	if (bt->rx->state == BT_SCO_STATE_RUNNING ||
+	    bt->rx->state == BT_SCO_STATE_ENDING) {
+		bt->rx->wait_flag = 1;
+		wake_up_interruptible(&bt->rx_wait);
+		snd_pcm_period_elapsed(bt->rx->substream);
+	}
+	if (bt->tx->state == BT_SCO_STATE_RUNNING ||
+	    bt->tx->state == BT_SCO_STATE_ENDING) {
+		bt->tx->wait_flag = 1;
+		wake_up_interruptible(&bt->tx_wait);
+		snd_pcm_period_elapsed(bt->tx->substream);
+	}
+
+	return IRQ_HANDLED;
+irq_handler_exit:
+	*bt->bt_reg_ctl &= ~BT_CVSD_CLEAR;
+	return IRQ_HANDLED;
+}
+
+static int wait_for_bt_irq(struct mtk_btcvsd_snd *bt,
+			   struct mtk_btcvsd_snd_stream *bt_stream)
+{
+	unsigned long long t1, t2;
+	/* one interrupt period = 22.5ms */
+	unsigned long long timeout_limit = 22500000;
+	int max_timeout_trial = 2;
+	int ret;
+
+	bt_stream->wait_flag = 0;
+
+	while (max_timeout_trial && !bt_stream->wait_flag) {
+		t1 = sched_clock();
+		if (bt_stream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+			ret = wait_event_interruptible_timeout(bt->tx_wait,
+				bt_stream->wait_flag,
+				nsecs_to_jiffies(timeout_limit));
+		} else {
+			ret = wait_event_interruptible_timeout(bt->rx_wait,
+				bt_stream->wait_flag,
+				nsecs_to_jiffies(timeout_limit));
+		}
+
+		t2 = sched_clock();
+		t2 = t2 - t1; /* in ns (10^9) */
+
+		if (t2 > timeout_limit) {
+			dev_warn(bt->dev, "%s(), stream %d, timeout %llu, limit %llu, ret %d, flag %d\n",
+				 __func__, bt_stream->stream,
+				 t2, timeout_limit, ret,
+				 bt_stream->wait_flag);
+		}
+
+		if (ret < 0) {
+			/*
+			 * error, -ERESTARTSYS if it was interrupted by
+			 * a signal
+			 */
+			dev_warn(bt->dev, "%s(), stream %d, error, trial left %d\n",
+				 __func__,
+				 bt_stream->stream, max_timeout_trial);
+
+			bt_stream->timeout = 1;
+			return ret;
+		} else if (ret == 0) {
+			/* conidtion is false after timeout */
+			max_timeout_trial--;
+			dev_warn(bt->dev, "%s(), stream %d, error, timeout, condition is false, trial left %d\n",
+				 __func__,
+				 bt_stream->stream, max_timeout_trial);
+
+			if (max_timeout_trial <= 0) {
+				bt_stream->timeout = 1;
+				return -ETIME;
+			}
+		}
+	}
+
+	return 0;
+}
+
+ssize_t mtk_btcvsd_snd_read(struct mtk_btcvsd_snd *bt,
+			    char __user *buf,
+			    size_t count)
+{
+	ssize_t read_size = 0, read_count = 0, cur_read_idx, cont;
+	unsigned int cur_buf_ofs = 0;
+	unsigned long avail;
+	unsigned long flags;
+	unsigned int packet_size = bt->rx->packet_size;
+
+	while (count) {
+		spin_lock_irqsave(&bt->rx_lock, flags);
+		/* available data in RX packet buffer */
+		avail = (bt->rx->packet_w - bt->rx->packet_r) * packet_size;
+
+		cur_read_idx = (bt->rx->packet_r & SCO_RX_PACKET_MASK) *
+			       packet_size;
+		spin_unlock_irqrestore(&bt->rx_lock, flags);
+
+		if (!avail) {
+			int ret = wait_for_bt_irq(bt, bt->rx);
+
+			if (ret)
+				return read_count;
+
+			continue;
+		}
+
+		/* count must be multiple of packet_size */
+		if (count % packet_size != 0 ||
+		    avail % packet_size != 0) {
+			dev_warn(bt->dev, "%s(), count %zu or d %lu is not multiple of packet_size %dd\n",
+				 __func__, count, avail, packet_size);
+
+			count -= count % packet_size;
+			avail -= avail % packet_size;
+		}
+
+		if (count > avail)
+			read_size = avail;
+		else
+			read_size = count;
+
+		/* calculate continue space */
+		cont = bt->rx->buf_size - cur_read_idx;
+		if (read_size > cont)
+			read_size = cont;
+
+		if (copy_to_user(buf + cur_buf_ofs,
+				 bt->rx_packet_buf + cur_read_idx,
+				 read_size)) {
+			dev_warn(bt->dev, "%s(), copy_to_user fail\n",
+				 __func__);
+			return -EFAULT;
+		}
+
+		spin_lock_irqsave(&bt->rx_lock, flags);
+		bt->rx->packet_r += read_size / packet_size;
+		spin_unlock_irqrestore(&bt->rx_lock, flags);
+
+		read_count += read_size;
+		cur_buf_ofs += read_size;
+		count -= read_size;
+	}
+
+	/*
+	 * save current timestamp & buffer time in times_tamp and
+	 * buf_data_equivalent_time
+	 */
+	bt->rx->time_stamp = sched_clock();
+	bt->rx->buf_data_equivalent_time =
+		(unsigned long long)(bt->rx->packet_w - bt->rx->packet_r) *
+		SCO_RX_PLC_SIZE * 16 * 1000 / 2 / 64;
+	bt->rx->buf_data_equivalent_time += read_count * SCO_RX_PLC_SIZE *
+					    16 * 1000 / packet_size / 2 / 64;
+	/* return equivalent time(us) to data count */
+	bt->rx->buf_data_equivalent_time *= 1000;
+
+	return read_count;
+}
+
+ssize_t mtk_btcvsd_snd_write(struct mtk_btcvsd_snd *bt,
+			     char __user *buf,
+			     size_t count)
+{
+	int written_size = count, avail = 0, cur_write_idx, write_size, cont;
+	unsigned int cur_buf_ofs = 0;
+	unsigned long flags;
+	unsigned int packet_size = bt->tx->packet_size;
+
+	/*
+	 * save current timestamp & buffer time in time_stamp and
+	 * buf_data_equivalent_time
+	 */
+	bt->tx->time_stamp = sched_clock();
+	bt->tx->buf_data_equivalent_time =
+		(unsigned long long)(bt->tx->packet_w - bt->tx->packet_r) *
+		packet_size * 16 * 1000 / 2 / 64;
+
+	/* return equivalent time(us) to data count */
+	bt->tx->buf_data_equivalent_time *= 1000;
+
+	while (count) {
+		spin_lock_irqsave(&bt->tx_lock, flags);
+		/* free space of TX packet buffer */
+		avail = bt->tx->buf_size -
+			(bt->tx->packet_w - bt->tx->packet_r) * packet_size;
+
+		cur_write_idx = (bt->tx->packet_w % SCO_TX_PACKER_BUF_NUM) *
+				packet_size;
+		spin_unlock_irqrestore(&bt->tx_lock, flags);
+
+		if (!avail) {
+			int ret = wait_for_bt_irq(bt, bt->rx);
+
+			if (ret)
+				return written_size;
+
+			continue;
+		}
+
+		/* count must be multiple of bt->tx->packet_size */
+		if (count % packet_size != 0 ||
+		    avail % packet_size != 0) {
+			dev_warn(bt->dev, "%s(), count %zu or avail %d is not multiple of packet_size %d\n",
+				 __func__, count, avail, packet_size);
+			count -= count % packet_size;
+			avail -= avail % packet_size;
+		}
+
+		if (count > avail)
+			write_size = avail;
+		else
+			write_size = count;
+
+		/* calculate continue space */
+		cont = bt->tx->buf_size - cur_write_idx;
+		if (write_size > cont)
+			write_size = cont;
+
+		if (copy_from_user(bt->tx_packet_buf +
+				   cur_write_idx,
+				   buf + cur_buf_ofs,
+				   write_size)) {
+			dev_warn(bt->dev, "%s(), copy_from_user fail\n",
+				 __func__);
+			return -EFAULT;
+		}
+
+		spin_lock_irqsave(&bt->tx_lock, flags);
+		bt->tx->packet_w += write_size / packet_size;
+		spin_unlock_irqrestore(&bt->tx_lock, flags);
+		cur_buf_ofs += write_size;
+		count -= write_size;
+	}
+
+	return written_size;
+}
+
+static struct mtk_btcvsd_snd_stream *get_bt_stream
+	(struct mtk_btcvsd_snd *bt, struct snd_pcm_substream *substream)
+{
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		return bt->tx;
+	else
+		return bt->rx;
+}
+
+/* pcm ops */
+static const struct snd_pcm_hardware mtk_btcvsd_hardware = {
+	.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
+		 SNDRV_PCM_INFO_RESUME),
+	.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	.buffer_bytes_max = 24 * 1024,
+	.period_bytes_max = 24 * 1024,
+	.periods_min = 2,
+	.periods_max = 16,
+	.fifo_size = 0,
+};
+
+static int mtk_pcm_btcvsd_open(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_component *component =
+		snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
+	struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
+	int ret;
+
+	dev_dbg(bt->dev, "%s(), stream %d, substream %p\n",
+		__func__, substream->stream, substream);
+
+	snd_soc_set_runtime_hwparams(substream, &mtk_btcvsd_hardware);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		ret = mtk_btcvsd_snd_tx_init(bt);
+		bt->tx->substream = substream;
+	} else {
+		ret = mtk_btcvsd_snd_rx_init(bt);
+		bt->rx->substream = substream;
+	}
+
+	return ret;
+}
+
+static int mtk_pcm_btcvsd_close(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_component *component =
+		snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
+	struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
+	struct mtk_btcvsd_snd_stream *bt_stream = get_bt_stream(bt, substream);
+
+	dev_dbg(bt->dev, "%s(), stream %d\n", __func__, substream->stream);
+
+	mtk_btcvsd_snd_set_state(bt, bt_stream, BT_SCO_STATE_IDLE);
+	bt_stream->substream = NULL;
+	return 0;
+}
+
+static int mtk_pcm_btcvsd_hw_params(struct snd_pcm_substream *substream,
+				    struct snd_pcm_hw_params *hw_params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_component *component =
+		snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
+	struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+	    params_buffer_bytes(hw_params) % bt->tx->packet_size != 0) {
+		dev_warn(bt->dev, "%s(), error, buffer size %d not valid\n",
+			 __func__,
+			 params_buffer_bytes(hw_params));
+		return -EINVAL;
+	}
+
+	substream->runtime->dma_bytes = params_buffer_bytes(hw_params);
+	return 0;
+}
+
+static int mtk_pcm_btcvsd_hw_free(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_component *component =
+		snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
+	struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		btcvsd_tx_clean_buffer(bt);
+
+	return 0;
+}
+
+static int mtk_pcm_btcvsd_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_component *component =
+		snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
+	struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
+	struct mtk_btcvsd_snd_stream *bt_stream = get_bt_stream(bt, substream);
+
+	dev_dbg(bt->dev, "%s(), stream %d\n", __func__, substream->stream);
+
+	mtk_btcvsd_snd_set_state(bt, bt_stream, BT_SCO_STATE_RUNNING);
+	return 0;
+}
+
+static int mtk_pcm_btcvsd_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_component *component =
+		snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
+	struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
+	struct mtk_btcvsd_snd_stream *bt_stream = get_bt_stream(bt, substream);
+	int stream = substream->stream;
+	int hw_packet_ptr;
+
+	dev_dbg(bt->dev, "%s(), stream %d, cmd %d\n",
+		__func__, substream->stream, cmd);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		hw_packet_ptr = stream == SNDRV_PCM_STREAM_PLAYBACK ?
+				bt_stream->packet_r : bt_stream->packet_w;
+		bt_stream->prev_packet_idx = hw_packet_ptr;
+		bt_stream->prev_frame = 0;
+		bt_stream->trigger_start = 1;
+		return 0;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		bt_stream->trigger_start = 0;
+		mtk_btcvsd_snd_set_state(bt, bt_stream, BT_SCO_STATE_ENDING);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static snd_pcm_uframes_t mtk_pcm_btcvsd_pointer
+	(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_component *component =
+		snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
+	struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
+	struct mtk_btcvsd_snd_stream *bt_stream;
+	snd_pcm_uframes_t frame = 0;
+	int byte = 0;
+	int hw_packet_ptr;
+	int packet_diff;
+	spinlock_t *lock;	/* spinlock for bt stream control */
+	unsigned long flags;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		lock = &bt->tx_lock;
+		bt_stream = bt->tx;
+	} else {
+		lock = &bt->rx_lock;
+		bt_stream = bt->rx;
+	}
+
+	spin_lock_irqsave(lock, flags);
+	hw_packet_ptr = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+			bt->tx->packet_r : bt->rx->packet_w;
+
+	/* get packet diff from last time */
+	if (hw_packet_ptr >= bt_stream->prev_packet_idx) {
+		packet_diff = hw_packet_ptr - bt_stream->prev_packet_idx;
+	} else {
+		/* integer overflow */
+		packet_diff = (INT_MAX - bt_stream->prev_packet_idx) +
+			      (hw_packet_ptr - INT_MIN) + 1;
+	}
+	bt_stream->prev_packet_idx = hw_packet_ptr;
+
+	/* increased bytes */
+	byte = packet_diff * bt_stream->packet_size;
+
+	frame = btcvsd_bytes_to_frame(substream, byte);
+	frame += bt_stream->prev_frame;
+	frame %= substream->runtime->buffer_size;
+
+	bt_stream->prev_frame = frame;
+
+	spin_unlock_irqrestore(lock, flags);
+
+	return frame;
+}
+
+static int mtk_pcm_btcvsd_copy(struct snd_pcm_substream *substream,
+			       int channel, unsigned long pos,
+			       void __user *buf, unsigned long count)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_component *component =
+		snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
+	struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		mtk_btcvsd_snd_write(bt, buf, count);
+	else
+		mtk_btcvsd_snd_read(bt, buf, count);
+
+	return 0;
+}
+
+static struct snd_pcm_ops mtk_btcvsd_ops = {
+	.open = mtk_pcm_btcvsd_open,
+	.close = mtk_pcm_btcvsd_close,
+	.ioctl = snd_pcm_lib_ioctl,
+	.hw_params = mtk_pcm_btcvsd_hw_params,
+	.hw_free = mtk_pcm_btcvsd_hw_free,
+	.prepare = mtk_pcm_btcvsd_prepare,
+	.trigger = mtk_pcm_btcvsd_trigger,
+	.pointer = mtk_pcm_btcvsd_pointer,
+	.copy_user = mtk_pcm_btcvsd_copy,
+};
+
+/* kcontrol */
+static const char *const btsco_band_str[] = {"NB", "WB"};
+
+static const struct soc_enum btcvsd_enum[] = {
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(btsco_band_str), btsco_band_str),
+};
+
+static int btcvsd_band_get(struct snd_kcontrol *kcontrol,
+			   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+
+	ucontrol->value.integer.value[0] = bt->band;
+	return 0;
+}
+
+static int btcvsd_band_set(struct snd_kcontrol *kcontrol,
+			   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+
+	if (ucontrol->value.enumerated.item[0] >= e->items)
+		return -EINVAL;
+
+	bt->band = ucontrol->value.integer.value[0];
+	dev_dbg(bt->dev, "%s(), band %d\n", __func__, bt->band);
+	return 0;
+}
+
+static int btcvsd_tx_mute_get(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+
+	if (!bt->tx) {
+		ucontrol->value.integer.value[0] = 0;
+		return 0;
+	}
+
+	ucontrol->value.integer.value[0] = bt->tx->mute;
+	return 0;
+}
+
+static int btcvsd_tx_mute_set(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+
+	if (!bt->tx)
+		return 0;
+
+	bt->tx->mute = ucontrol->value.integer.value[0];
+	return 0;
+}
+
+static int btcvsd_rx_irq_received_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+
+	if (!bt->rx)
+		return 0;
+
+	ucontrol->value.integer.value[0] = bt->rx->rw_cnt ? 1 : 0;
+	return 0;
+}
+
+static int btcvsd_rx_timeout_get(struct snd_kcontrol *kcontrol,
+				 struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+
+	if (!bt->rx)
+		return 0;
+
+	ucontrol->value.integer.value[0] = bt->rx->timeout;
+	bt->rx->timeout = 0;
+	return 0;
+}
+
+static int btcvsd_rx_timestamp_get(struct snd_kcontrol *kcontrol,
+				   unsigned int __user *data, unsigned int size)
+{
+	struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+	int ret = 0;
+	struct mtk_btcvsd_snd_time_buffer_info time_buffer_info_rx;
+
+	if (size > sizeof(struct mtk_btcvsd_snd_time_buffer_info))
+		return -EINVAL;
+
+	get_rx_time_stamp(bt, &time_buffer_info_rx);
+
+	dev_dbg(bt->dev, "%s(), time_stamp_us %llu, data_count_equi_time %llu",
+		__func__,
+		time_buffer_info_rx.time_stamp_us,
+		time_buffer_info_rx.data_count_equi_time);
+
+	if (copy_to_user(data, &time_buffer_info_rx,
+			 sizeof(struct mtk_btcvsd_snd_time_buffer_info))) {
+		dev_warn(bt->dev, "%s(), copy_to_user fail", __func__);
+		ret = -EFAULT;
+	}
+
+	return ret;
+}
+
+static int btcvsd_tx_irq_received_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+
+	if (!bt->tx)
+		return 0;
+
+	ucontrol->value.integer.value[0] = bt->tx->rw_cnt ? 1 : 0;
+	return 0;
+}
+
+static int btcvsd_tx_timeout_get(struct snd_kcontrol *kcontrol,
+				 struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+
+	ucontrol->value.integer.value[0] = bt->tx->timeout;
+	return 0;
+}
+
+static int btcvsd_tx_timestamp_get(struct snd_kcontrol *kcontrol,
+				   unsigned int __user *data, unsigned int size)
+{
+	struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+	int ret = 0;
+	struct mtk_btcvsd_snd_time_buffer_info time_buffer_info_tx;
+
+	if (size > sizeof(struct mtk_btcvsd_snd_time_buffer_info))
+		return -EINVAL;
+
+	get_tx_time_stamp(bt, &time_buffer_info_tx);
+
+	dev_dbg(bt->dev, "%s(), time_stamp_us %llu, data_count_equi_time %llu",
+		__func__,
+		time_buffer_info_tx.time_stamp_us,
+		time_buffer_info_tx.data_count_equi_time);
+
+	if (copy_to_user(data, &time_buffer_info_tx,
+			 sizeof(struct mtk_btcvsd_snd_time_buffer_info))) {
+		dev_warn(bt->dev, "%s(), copy_to_user fail", __func__);
+		ret = -EFAULT;
+	}
+
+	return ret;
+}
+
+static const struct snd_kcontrol_new mtk_btcvsd_snd_controls[] = {
+	SOC_ENUM_EXT("BTCVSD Band", btcvsd_enum[0],
+		     btcvsd_band_get, btcvsd_band_set),
+	SOC_SINGLE_BOOL_EXT("BTCVSD Tx Mute Switch", 0,
+			    btcvsd_tx_mute_get, btcvsd_tx_mute_set),
+	SOC_SINGLE_BOOL_EXT("BTCVSD Tx Irq Received Switch", 0,
+			    btcvsd_tx_irq_received_get, NULL),
+	SOC_SINGLE_BOOL_EXT("BTCVSD Tx Timeout Switch", 0,
+			    btcvsd_tx_timeout_get, NULL),
+	SOC_SINGLE_BOOL_EXT("BTCVSD Rx Irq Received Switch", 0,
+			    btcvsd_rx_irq_received_get, NULL),
+	SOC_SINGLE_BOOL_EXT("BTCVSD Rx Timeout Switch", 0,
+			    btcvsd_rx_timeout_get, NULL),
+	SND_SOC_BYTES_TLV("BTCVSD Rx Timestamp",
+			  sizeof(struct mtk_btcvsd_snd_time_buffer_info),
+			  btcvsd_rx_timestamp_get, NULL),
+	SND_SOC_BYTES_TLV("BTCVSD Tx Timestamp",
+			  sizeof(struct mtk_btcvsd_snd_time_buffer_info),
+			  btcvsd_tx_timestamp_get, NULL),
+};
+
+static int mtk_btcvsd_snd_component_probe(struct snd_soc_component *component)
+{
+	return snd_soc_add_component_controls(component,
+		mtk_btcvsd_snd_controls,
+		ARRAY_SIZE(mtk_btcvsd_snd_controls));
+}
+
+static const struct snd_soc_component_driver mtk_btcvsd_snd_platform = {
+	.name = BTCVSD_SND_NAME,
+	.ops = &mtk_btcvsd_ops,
+	.probe = mtk_btcvsd_snd_component_probe,
+};
+
+static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	int irq_id;
+	u32 offset[5] = {0, 0, 0, 0, 0};
+	struct mtk_btcvsd_snd *btcvsd;
+	struct device *dev = &pdev->dev;
+
+	/* init btcvsd private data */
+	btcvsd = devm_kzalloc(dev, sizeof(*btcvsd), GFP_KERNEL);
+	if (!btcvsd)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, btcvsd);
+	btcvsd->dev = dev;
+
+	/* init tx/rx */
+	btcvsd->rx = devm_kzalloc(btcvsd->dev, sizeof(*btcvsd->rx), GFP_KERNEL);
+	if (!btcvsd->rx)
+		return -ENOMEM;
+
+	btcvsd->tx = devm_kzalloc(btcvsd->dev, sizeof(*btcvsd->tx), GFP_KERNEL);
+	if (!btcvsd->tx)
+		return -ENOMEM;
+
+	spin_lock_init(&btcvsd->tx_lock);
+	spin_lock_init(&btcvsd->rx_lock);
+
+	init_waitqueue_head(&btcvsd->tx_wait);
+	init_waitqueue_head(&btcvsd->rx_wait);
+
+	mtk_btcvsd_snd_tx_init(btcvsd);
+	mtk_btcvsd_snd_rx_init(btcvsd);
+
+	/* irq */
+	irq_id = platform_get_irq(pdev, 0);
+	if (irq_id <= 0) {
+		dev_err(dev, "%pOFn no irq found\n", dev->of_node);
+		return irq_id < 0 ? irq_id : -ENXIO;
+	}
+
+	ret = devm_request_irq(dev, irq_id, mtk_btcvsd_snd_irq_handler,
+			       IRQF_TRIGGER_LOW, "BTCVSD_ISR_Handle",
+			       (void *)btcvsd);
+	if (ret) {
+		dev_err(dev, "could not request_irq for BTCVSD_ISR_Handle\n");
+		return ret;
+	}
+
+	btcvsd->irq_id = irq_id;
+
+	/* iomap */
+	btcvsd->bt_pkv_base = of_iomap(dev->of_node, 0);
+	if (!btcvsd->bt_pkv_base) {
+		dev_err(dev, "iomap bt_pkv_base fail\n");
+		return -EIO;
+	}
+
+	btcvsd->bt_sram_bank2_base = of_iomap(dev->of_node, 1);
+	if (!btcvsd->bt_sram_bank2_base) {
+		dev_err(dev, "iomap bt_sram_bank2_base fail\n");
+		return -EIO;
+	}
+
+	btcvsd->infra = syscon_regmap_lookup_by_phandle(dev->of_node,
+							"mediatek,infracfg");
+	if (IS_ERR(btcvsd->infra)) {
+		dev_err(dev, "cannot find infra controller: %ld\n",
+			PTR_ERR(btcvsd->infra));
+		return PTR_ERR(btcvsd->infra);
+	}
+
+	/* get offset */
+	ret = of_property_read_u32_array(dev->of_node, "mediatek,offset",
+					 offset,
+					 ARRAY_SIZE(offset));
+	if (ret) {
+		dev_warn(dev, "%s(), get offset fail, ret %d\n", __func__, ret);
+		return ret;
+	}
+	btcvsd->infra_misc_offset = offset[0];
+	btcvsd->conn_bt_cvsd_mask = offset[1];
+	btcvsd->cvsd_mcu_read_offset = offset[2];
+	btcvsd->cvsd_mcu_write_offset = offset[3];
+	btcvsd->cvsd_packet_indicator = offset[4];
+
+	btcvsd->bt_reg_pkt_r = btcvsd->bt_pkv_base +
+			       btcvsd->cvsd_mcu_read_offset;
+	btcvsd->bt_reg_pkt_w = btcvsd->bt_pkv_base +
+			       btcvsd->cvsd_mcu_write_offset;
+	btcvsd->bt_reg_ctl = btcvsd->bt_pkv_base +
+			     btcvsd->cvsd_packet_indicator;
+
+	/* init state */
+	mtk_btcvsd_snd_set_state(btcvsd, btcvsd->tx, BT_SCO_STATE_IDLE);
+	mtk_btcvsd_snd_set_state(btcvsd, btcvsd->rx, BT_SCO_STATE_IDLE);
+
+	return devm_snd_soc_register_component(dev, &mtk_btcvsd_snd_platform,
+					       NULL, 0);
+}
+
+static int mtk_btcvsd_snd_remove(struct platform_device *pdev)
+{
+	struct mtk_btcvsd_snd *btcvsd = dev_get_drvdata(&pdev->dev);
+
+	iounmap(btcvsd->bt_pkv_base);
+	iounmap(btcvsd->bt_sram_bank2_base);
+	return 0;
+}
+
+static const struct of_device_id mtk_btcvsd_snd_dt_match[] = {
+	{ .compatible = "mediatek,mtk-btcvsd-snd", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mtk_btcvsd_snd_dt_match);
+
+static struct platform_driver mtk_btcvsd_snd_driver = {
+	.driver = {
+		.name = "mtk-btcvsd-snd",
+		.of_match_table = mtk_btcvsd_snd_dt_match,
+	},
+	.probe = mtk_btcvsd_snd_probe,
+	.remove = mtk_btcvsd_snd_remove,
+};
+
+module_platform_driver(mtk_btcvsd_snd_driver);
+
+MODULE_DESCRIPTION("Mediatek ALSA BT SCO CVSD/MSBC Driver");
+MODULE_AUTHOR("KaiChieh Chuang <kaichieh.chuang@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c b/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
index 968fba4..7064a9f 100644
--- a/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
+++ b/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
@@ -994,7 +994,6 @@
 		.agent_disable_reg = AUDIO_TOP_CON5,
 		.agent_disable_shift = 6,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 	{
 		.name = "DL2",
@@ -1013,7 +1012,6 @@
 		.agent_disable_reg = AUDIO_TOP_CON5,
 		.agent_disable_shift = 7,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 	{
 		.name = "DL3",
@@ -1032,7 +1030,6 @@
 		.agent_disable_reg = AUDIO_TOP_CON5,
 		.agent_disable_shift = 8,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 	{
 		.name = "DL4",
@@ -1051,7 +1048,6 @@
 		.agent_disable_reg = AUDIO_TOP_CON5,
 		.agent_disable_shift = 9,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 	{
 		.name = "DL5",
@@ -1070,7 +1066,6 @@
 		.agent_disable_reg = AUDIO_TOP_CON5,
 		.agent_disable_shift = 10,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 	{
 		.name = "DLM",
@@ -1089,7 +1084,6 @@
 		.agent_disable_reg = AUDIO_TOP_CON5,
 		.agent_disable_shift = 12,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 	{
 		.name = "UL1",
@@ -1108,7 +1102,6 @@
 		.agent_disable_reg = AUDIO_TOP_CON5,
 		.agent_disable_shift = 0,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 	{
 		.name = "UL2",
@@ -1127,7 +1120,6 @@
 		.agent_disable_reg = AUDIO_TOP_CON5,
 		.agent_disable_shift = 1,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 	{
 		.name = "UL3",
@@ -1146,7 +1138,6 @@
 		.agent_disable_reg = AUDIO_TOP_CON5,
 		.agent_disable_shift = 2,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 	{
 		.name = "UL4",
@@ -1165,7 +1156,6 @@
 		.agent_disable_reg = AUDIO_TOP_CON5,
 		.agent_disable_shift = 3,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 	{
 		.name = "UL5",
@@ -1184,7 +1174,6 @@
 		.agent_disable_reg = AUDIO_TOP_CON5,
 		.agent_disable_shift = 4,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 	{
 		.name = "DLBT",
@@ -1203,7 +1192,6 @@
 		.agent_disable_reg = AUDIO_TOP_CON5,
 		.agent_disable_shift = 13,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 	{
 		.name = "ULBT",
@@ -1222,7 +1210,6 @@
 		.agent_disable_reg = AUDIO_TOP_CON5,
 		.agent_disable_shift = 16,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 };
 
diff --git a/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c b/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
index 192f4d7..17faa42 100644
--- a/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
+++ b/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
@@ -401,9 +401,7 @@
 		.hd_reg = AFE_MEMIF_HD_MODE,
 		.hd_shift = DL1_HD_SFT,
 		.agent_disable_reg = -1,
-		.agent_disable_shift = -1,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 	[MT6797_MEMIF_DL2] = {
 		.name = "DL2",
@@ -420,9 +418,7 @@
 		.hd_reg = AFE_MEMIF_HD_MODE,
 		.hd_shift = DL2_HD_SFT,
 		.agent_disable_reg = -1,
-		.agent_disable_shift = -1,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 	[MT6797_MEMIF_DL3] = {
 		.name = "DL3",
@@ -439,9 +435,7 @@
 		.hd_reg = AFE_MEMIF_HD_MODE,
 		.hd_shift = DL3_HD_SFT,
 		.agent_disable_reg = -1,
-		.agent_disable_shift = -1,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 	[MT6797_MEMIF_VUL] = {
 		.name = "VUL",
@@ -458,9 +452,7 @@
 		.hd_reg = AFE_MEMIF_HD_MODE,
 		.hd_shift = VUL_HD_SFT,
 		.agent_disable_reg = -1,
-		.agent_disable_shift = -1,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 	[MT6797_MEMIF_AWB] = {
 		.name = "AWB",
@@ -477,9 +469,7 @@
 		.hd_reg = AFE_MEMIF_HD_MODE,
 		.hd_shift = AWB_HD_SFT,
 		.agent_disable_reg = -1,
-		.agent_disable_shift = -1,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 	[MT6797_MEMIF_VUL12] = {
 		.name = "VUL12",
@@ -496,9 +486,7 @@
 		.hd_reg = AFE_MEMIF_HD_MODE,
 		.hd_shift = VUL_DATA2_HD_SFT,
 		.agent_disable_reg = -1,
-		.agent_disable_shift = -1,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 	[MT6797_MEMIF_DAI] = {
 		.name = "DAI",
@@ -515,9 +503,7 @@
 		.hd_reg = AFE_MEMIF_HD_MODE,
 		.hd_shift = DAI_HD_SFT,
 		.agent_disable_reg = -1,
-		.agent_disable_shift = -1,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 	[MT6797_MEMIF_MOD_DAI] = {
 		.name = "MOD_DAI",
@@ -534,9 +520,7 @@
 		.hd_reg = AFE_MEMIF_HD_MODE,
 		.hd_shift = MOD_DAI_HD_SFT,
 		.agent_disable_reg = -1,
-		.agent_disable_shift = -1,
 		.msb_reg = -1,
-		.msb_shift = -1,
 	},
 };
 
diff --git a/sound/soc/mediatek/mt8167/Makefile b/sound/soc/mediatek/mt8167/Makefile
new file mode 100644
index 0000000..2f37af5
--- /dev/null
+++ b/sound/soc/mediatek/mt8167/Makefile
@@ -0,0 +1,9 @@
+MTK_PLATFORM := $(subst ",,$(CONFIG_MTK_PLATFORM))
+subdir-ccflags-y += -Werror -I$(srctree)/drivers/misc/mediatek/base/power/$(MTK_PLATFORM)
+
+snd-soc-mt8167-pcm-objs := \
+    mt8167-afe-pcm.o mt8167-afe-util.o mt8167-afe-controls.o mt8167-afe-debug.o
+
+obj-$(CONFIG_SND_SOC_MT8167) += snd-soc-mt8167-pcm.o
+obj-$(CONFIG_SND_SOC_MT8516_PUMPKIN_MACH) += mt8516-pumpkin.o
+obj-$(CONFIG_SND_SOC_MT8516_VESPER) += mt8516-vesper.o
diff --git a/sound/soc/mediatek/mt8167/mt8167-afe-common.h b/sound/soc/mediatek/mt8167/mt8167-afe-common.h
new file mode 100644
index 0000000..3204050
--- /dev/null
+++ b/sound/soc/mediatek/mt8167/mt8167-afe-common.h
@@ -0,0 +1,239 @@
+/*
+ * mtk-afe-common.h  --  Mediatek audio driver common definitions
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MT8167_AFE_COMMON_H_
+#define _MT8167_AFE_COMMON_H_
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <sound/asound.h>
+
+#define COMMON_CLOCK_FRAMEWORK_API
+//#define IDLE_TASK_DRIVER_API
+#define ENABLE_AFE_APLL_TUNER
+#define MT8167_AFE_E1_ONLY
+
+enum {
+	MT8167_AFE_MEMIF_DL1,
+	MT8167_AFE_MEMIF_DL2,
+	MT8167_AFE_MEMIF_VUL,
+	MT8167_AFE_MEMIF_DAI,
+	MT8167_AFE_MEMIF_AWB,
+	MT8167_AFE_MEMIF_MOD_DAI,
+	MT8167_AFE_MEMIF_HDMI,
+	MT8167_AFE_MEMIF_TDM_IN,
+	MT8167_AFE_MEMIF_MULTILINE_IN,
+	MT8167_AFE_MEMIF_NUM,
+	MT8167_AFE_BACKEND_BASE = MT8167_AFE_MEMIF_NUM,
+	MT8167_AFE_IO_MOD_PCM1 = MT8167_AFE_BACKEND_BASE,
+	MT8167_AFE_IO_MOD_PCM2,
+	MT8167_AFE_IO_INT_ADDA,
+	MT8167_AFE_IO_I2S,
+	MT8167_AFE_IO_2ND_I2S,
+	MT8167_AFE_IO_HW_GAIN1,
+	MT8167_AFE_IO_HW_GAIN2,
+	MT8167_AFE_IO_MRG,
+	MT8167_AFE_IO_MRG_BT,
+	MT8167_AFE_IO_PCM_BT,
+	MT8167_AFE_IO_HDMI,
+	MT8167_AFE_IO_TDM_IN,
+	MT8167_AFE_IO_DL_BE,
+	MT8167_AFE_IO_INTDIR_BE,
+	MT8167_AFE_BACKEND_END,
+	MT8167_AFE_BACKEND_NUM = (MT8167_AFE_BACKEND_END - MT8167_AFE_BACKEND_BASE),
+};
+
+enum {
+	MT8167_CLK_TOP_PDN_AUD,
+	MT8167_CLK_APLL12_DIV0,
+	MT8167_CLK_APLL12_DIV1,
+	MT8167_CLK_APLL12_DIV2,
+	MT8167_CLK_APLL12_DIV3,
+	MT8167_CLK_APLL12_DIV4,
+	MT8167_CLK_APLL12_DIV4B,
+	MT8167_CLK_APLL12_DIV5,
+	MT8167_CLK_APLL12_DIV5B,
+	MT8167_CLK_APLL12_DIV6,
+	MT8167_CLK_SPDIF_IN,
+	MT8167_CLK_ENGEN1,
+	MT8167_CLK_ENGEN2,
+	MT8167_CLK_AUD1,
+	MT8167_CLK_AUD2,
+	MT8167_CLK_I2S0_M_SEL,
+	MT8167_CLK_I2S1_M_SEL,
+	MT8167_CLK_I2S2_M_SEL,
+	MT8167_CLK_I2S3_M_SEL,
+	MT8167_CLK_I2S4_M_SEL,
+	MT8167_CLK_I2S5_M_SEL,
+	MT8167_CLK_SPDIF_B_SEL,
+	MT8167_CLK_SPDIFIN_SEL,
+	MT8167_CLK_TOP_UNIVPLL_D2,
+	MT8167_CLK_NUM
+};
+
+enum mt8167_afe_tdm_ch_start {
+	AFE_TDM_CH_START_O28_O29 = 0,
+	AFE_TDM_CH_START_O30_O31,
+	AFE_TDM_CH_START_O32_O33,
+	AFE_TDM_CH_START_O34_O35,
+	AFE_TDM_CH_ZERO,
+};
+
+enum mt8167_afe_irq_mode {
+	MT8167_AFE_IRQ_1 = 0,
+	MT8167_AFE_IRQ_2,
+	MT8167_AFE_IRQ_5, /* dedicated for HDMI */
+	MT8167_AFE_IRQ_6, /* dedicated for SPDIF */
+	MT8167_AFE_IRQ_7,
+	MT8167_AFE_IRQ_10,/* dedicated for TDM IN */
+	MT8167_AFE_IRQ_13,/* dedicated for ULM*/
+	MT8167_AFE_IRQ_NUM
+};
+
+enum mt8167_afe_top_clock_gate {
+	MT8167_AFE_CG_AFE,
+	MT8167_AFE_CG_I2S,
+	MT8167_AFE_CG_22M,
+	MT8167_AFE_CG_24M,
+	MT8167_AFE_CG_INTDIR_CK,
+	MT8167_AFE_CG_APLL_TUNER,
+	MT8167_AFE_CG_APLL2_TUNER,
+	MT8167_AFE_CG_HDMI,
+	MT8167_AFE_CG_SPDIF,
+	MT8167_AFE_CG_ADC,
+	MT8167_AFE_CG_DAC,
+	MT8167_AFE_CG_DAC_PREDIS,
+	MT8167_AFE_CG_NUM
+};
+
+enum {
+	MT8167_AFE_DEBUGFS_AFE,
+	MT8167_AFE_DEBUGFS_HDMI,
+	MT8167_AFE_DEBUGFS_TDM_IN,
+	MT8167_AFE_DEBUGFS_NUM,
+};
+
+enum {
+	MT8167_AFE_TDM_OUT_HDMI = 0,
+	MT8167_AFE_TDM_OUT_I2S,
+	MT8167_AFE_TDM_OUT_TDM,
+	MT8167_AFE_TDM_OUT_I2S_32BITS,
+};
+
+enum {
+	MT8167_AFE_1ST_I2S = 0,
+	MT8167_AFE_2ND_I2S,
+	MT8167_AFE_I2S_SETS,
+};
+
+enum {
+	MT8167_AFE_I2S_SEPARATE_CLOCK = 0,
+	MT8167_AFE_I2S_SHARED_CLOCK,
+};
+
+enum {
+	MT8167_AFE_APLL1 = 0,
+	MT8167_AFE_APLL2,
+	MT8167_AFE_APLL_NUM,
+};
+
+struct snd_pcm_substream;
+
+struct mt8167_afe_memif_data {
+	int id;
+	const char *name;
+	int reg_ofs_base;
+	int reg_ofs_end;
+	int reg_ofs_cur;
+	int fs_shift;
+	int mono_shift;
+	int enable_shift;
+	int irq_reg_cnt;
+	int irq_cnt_shift;
+	int irq_mode;
+	int irq_fs_reg;
+	int irq_fs_shift;
+	int irq_clr_shift;
+	int max_sram_size;
+	int sram_offset;
+	int format_reg;
+	int format_shift;
+	int conn_format_mask;
+	int prealloc_size;
+	unsigned long buffer_align_bytes;
+};
+
+struct mt8167_afe_be_dai_data {
+	bool prepared[SNDRV_PCM_STREAM_LAST + 1];
+	unsigned int fmt_mode;
+	unsigned int cached_rate[SNDRV_PCM_STREAM_LAST + 1];
+	snd_pcm_format_t cached_format[SNDRV_PCM_STREAM_LAST + 1];
+	int cached_channels[SNDRV_PCM_STREAM_LAST + 1];
+};
+
+struct mt8167_afe_memif {
+	unsigned int phys_buf_addr;
+	int buffer_size;
+	bool use_sram;
+	bool prepared;
+	struct snd_pcm_substream *substream;
+	const struct mt8167_afe_memif_data *data;
+};
+
+struct mt8167_afe_control_data {
+	unsigned int sinegen_type;
+	unsigned int sinegen_fs;
+	unsigned int loopback_type;
+	bool hdmi_force_clk;
+};
+
+struct mtk_afe {
+	/* address for ioremap audio hardware register */
+	void __iomem *base_addr;
+	void __iomem *sram_address;
+	u32 sram_phy_address;
+	u32 sram_size;
+	struct device *dev;
+	struct regmap *regmap;
+	struct mt8167_afe_memif memif[MT8167_AFE_MEMIF_NUM];
+	struct mt8167_afe_be_dai_data be_data[MT8167_AFE_BACKEND_NUM];
+	struct mt8167_afe_control_data ctrl_data;
+	struct clk *clocks[MT8167_CLK_NUM];
+	unsigned int *backup_regs;
+	bool suspended;
+	int afe_on_ref_cnt;
+	int adda_afe_on_ref_cnt;
+	int i2s_out_on_ref_cnt;
+	int daibt_on_ref_cnt;
+	int irq_mode_ref_cnt[MT8167_AFE_IRQ_NUM];
+	int top_cg_ref_cnt[MT8167_AFE_CG_NUM];
+	int apll_tuner_ref_cnt[MT8167_AFE_APLL_NUM];
+	unsigned int tdm_out_mode;
+	unsigned int i2s_clk_modes[MT8167_AFE_I2S_SETS];
+	unsigned int awb_irq_mode;
+	unsigned int tdm_in_lrck_cycle;
+	/* locks */
+	spinlock_t afe_ctrl_lock;
+	struct mutex afe_clk_mutex;
+#ifdef IDLE_TASK_DRIVER_API
+	int emi_clk_ref_cnt;
+	struct mutex emi_clk_mutex;
+#endif
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *debugfs_dentry[MT8167_AFE_DEBUGFS_NUM];
+#endif
+};
+
+#endif
diff --git a/sound/soc/mediatek/mt8167/mt8167-afe-controls.c b/sound/soc/mediatek/mt8167/mt8167-afe-controls.c
new file mode 100644
index 0000000..07eadb5
--- /dev/null
+++ b/sound/soc/mediatek/mt8167/mt8167-afe-controls.c
@@ -0,0 +1,1206 @@
+/*
+ * Mediatek Platform driver ALSA contorls
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt8167-afe-controls.h"
+#include "mt8167-afe-common.h"
+#include "mt8167-afe-regs.h"
+#include "mt8167-afe-util.h"
+#include <sound/soc.h>
+
+
+#define ENUM_TO_STR(enum) #enum
+
+struct snd_soc_component *spdif_component;
+
+enum {
+	CTRL_SGEN_EN = 0,
+	CTRL_SGEN_FS,
+	CTRL_AP_LOOPBACK,
+};
+
+enum {
+	AFE_SGEN_OFF = 0,
+	AFE_SGEN_I0I1,
+	AFE_SGEN_I2,
+	AFE_SGEN_I3I4,
+	AFE_SGEN_I5I6,
+	AFE_SGEN_I7I8,
+	AFE_SGEN_I9,
+	AFE_SGEN_I10I11,
+	AFE_SGEN_I12I13,
+	AFE_SGEN_I14,
+	AFE_SGEN_I15I16,
+	AFE_SGEN_I17I18,
+	AFE_SGEN_I19I20,
+	AFE_SGEN_I21I22,
+
+	AFE_SGEN_O0O1,
+	AFE_SGEN_O2,
+	AFE_SGEN_O3,
+	AFE_SGEN_O4,
+	AFE_SGEN_O3O4,
+	AFE_SGEN_O5O6,
+	AFE_SGEN_O7O8,
+	AFE_SGEN_O9O10,
+	AFE_SGEN_O11,
+	AFE_SGEN_O12,
+	AFE_SGEN_O13O14,
+	AFE_SGEN_O15O16,
+	AFE_SGEN_O17O18,
+	AFE_SGEN_O19O20,
+	AFE_SGEN_O21O22,
+	AFE_SGEN_O23O24,
+	AFE_SGEN_SPDIFIN,
+};
+
+enum {
+	AFE_SGEN_8K = 0,
+	AFE_SGEN_11K,
+	AFE_SGEN_12K,
+	AFE_SGEN_16K,
+	AFE_SGEN_22K,
+	AFE_SGEN_24K,
+	AFE_SGEN_32K,
+	AFE_SGEN_44K,
+	AFE_SGEN_48K,
+};
+
+enum {
+	AP_LOOPBACK_NONE = 0,
+	AP_LOOPBACK_AMIC_TO_SPK,
+	AP_LOOPBACK_AMIC_TO_HP,
+	AP_LOOPBACK_DMIC_TO_SPK,
+	AP_LOOPBACK_DMIC_TO_HP,
+	AP_LOOPBACK_HEADSET_MIC_TO_SPK,
+	AP_LOOPBACK_HEADSET_MIC_TO_HP,
+	AP_LOOPBACK_DUAL_AMIC_TO_SPK,
+	AP_LOOPBACK_DUAL_AMIC_TO_HP,
+	AP_LOOPBACK_DUAL_DMIC_TO_SPK,
+	AP_LOOPBACK_DUAL_DMIC_TO_HP,
+};
+
+
+static const char *const sgen_func[] = {
+	ENUM_TO_STR(AFE_SGEN_OFF),
+	ENUM_TO_STR(AFE_SGEN_I0I1),
+	ENUM_TO_STR(AFE_SGEN_I2),
+	ENUM_TO_STR(AFE_SGEN_I3I4),
+	ENUM_TO_STR(AFE_SGEN_I5I6),
+	ENUM_TO_STR(AFE_SGEN_I7I8),
+	ENUM_TO_STR(AFE_SGEN_I9),
+	ENUM_TO_STR(AFE_SGEN_I10I11),
+	ENUM_TO_STR(AFE_SGEN_I12I13),
+	ENUM_TO_STR(AFE_SGEN_I14),
+	ENUM_TO_STR(AFE_SGEN_I15I16),
+	ENUM_TO_STR(AFE_SGEN_I17I18),
+	ENUM_TO_STR(AFE_SGEN_I19I20),
+	ENUM_TO_STR(AFE_SGEN_I21I22),
+	ENUM_TO_STR(AFE_SGEN_O0O1),
+	ENUM_TO_STR(AFE_SGEN_O2),
+	ENUM_TO_STR(AFE_SGEN_O3),
+	ENUM_TO_STR(AFE_SGEN_O4),
+	ENUM_TO_STR(AFE_SGEN_O3O4),
+	ENUM_TO_STR(AFE_SGEN_O5O6),
+	ENUM_TO_STR(AFE_SGEN_O7O8),
+	ENUM_TO_STR(AFE_SGEN_O9O10),
+	ENUM_TO_STR(AFE_SGEN_O11),
+	ENUM_TO_STR(AFE_SGEN_O12),
+	ENUM_TO_STR(AFE_SGEN_O13O14),
+	ENUM_TO_STR(AFE_SGEN_O15O16),
+	ENUM_TO_STR(AFE_SGEN_O17O18),
+	ENUM_TO_STR(AFE_SGEN_O19O20),
+	ENUM_TO_STR(AFE_SGEN_O21O22),
+	ENUM_TO_STR(AFE_SGEN_O23O24),
+	ENUM_TO_STR(AFE_SGEN_SPDIFIN),
+};
+
+static const char *const sgen_fs_func[] = {
+	ENUM_TO_STR(AFE_SGEN_8K),
+	ENUM_TO_STR(AFE_SGEN_11K),
+	ENUM_TO_STR(AFE_SGEN_12K),
+	ENUM_TO_STR(AFE_SGEN_16K),
+	ENUM_TO_STR(AFE_SGEN_22K),
+	ENUM_TO_STR(AFE_SGEN_24K),
+	ENUM_TO_STR(AFE_SGEN_32K),
+	ENUM_TO_STR(AFE_SGEN_44K),
+	ENUM_TO_STR(AFE_SGEN_48K),
+};
+
+static const char *const ap_loopback_func[] = {
+	ENUM_TO_STR(AP_LOOPBACK_NONE),
+	ENUM_TO_STR(AP_LOOPBACK_AMIC_TO_SPK),
+	ENUM_TO_STR(AP_LOOPBACK_AMIC_TO_HP),
+	ENUM_TO_STR(AP_LOOPBACK_DMIC_TO_SPK),
+	ENUM_TO_STR(AP_LOOPBACK_DMIC_TO_HP),
+	ENUM_TO_STR(AP_LOOPBACK_HEADSET_MIC_TO_SPK),
+	ENUM_TO_STR(AP_LOOPBACK_HEADSET_MIC_TO_HP),
+	ENUM_TO_STR(AP_LOOPBACK_DUAL_AMIC_TO_SPK),
+	ENUM_TO_STR(AP_LOOPBACK_DUAL_AMIC_TO_HP),
+	ENUM_TO_STR(AP_LOOPBACK_DUAL_DMIC_TO_SPK),
+	ENUM_TO_STR(AP_LOOPBACK_DUAL_DMIC_TO_HP),
+};
+
+static int mt8167_afe_sgen_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(comp);
+	struct mt8167_afe_control_data *data = &afe->ctrl_data;
+
+	ucontrol->value.integer.value[0] = data->sinegen_type;
+	return 0;
+}
+
+static int mt8167_afe_sgen_put(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(comp);
+	struct mt8167_afe_control_data *data = &afe->ctrl_data;
+
+	if (data->sinegen_type == ucontrol->value.integer.value[0])
+		return 0;
+
+	mt8167_afe_enable_main_clk(afe);
+
+	if (data->sinegen_type != AFE_SGEN_OFF)
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0xf0000000);
+
+	switch (ucontrol->value.integer.value[0]) {
+	case AFE_SGEN_I0I1:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x048c2762);
+		break;
+	case AFE_SGEN_I2:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x146c2662);
+		break;
+	case AFE_SGEN_I3I4:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x24862862);
+		break;
+	case AFE_SGEN_I5I6:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x346c2662);
+		break;
+	case AFE_SGEN_I7I8:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x446c2662);
+		break;
+	case AFE_SGEN_I10I11:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x646c2662);
+		break;
+	case AFE_SGEN_I12I13:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x746c2662);
+		break;
+	case AFE_SGEN_I15I16:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x946c2662);
+		break;
+	case AFE_SGEN_O0O1:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x0c7c27c2);
+		break;
+	case AFE_SGEN_O2:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x1c6c26c2);
+		break;
+	case AFE_SGEN_O3:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x2e8c28c2);
+		break;
+	case AFE_SGEN_O4:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x2d8c28c2);
+		break;
+	case AFE_SGEN_O3O4:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x2c8c28c2);
+		break;
+	case AFE_SGEN_O5O6:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x3c6c26c2);
+		break;
+	case AFE_SGEN_O7O8:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x4c6c26c2);
+		break;
+	case AFE_SGEN_O9O10:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x5c6c26c2);
+		break;
+	case AFE_SGEN_O11:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x6c6c26c2);
+		break;
+	case AFE_SGEN_O12:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x7c0e80e8);
+		break;
+	case AFE_SGEN_O13O14:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x8c6c26c2);
+		break;
+	case AFE_SGEN_O15O16:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0x9c6c26c2);
+		break;
+	case AFE_SGEN_I9:
+	case AFE_SGEN_I14:
+	case AFE_SGEN_I17I18:
+	case AFE_SGEN_I19I20:
+	case AFE_SGEN_I21I22:
+	case AFE_SGEN_O17O18:
+	case AFE_SGEN_O19O20:
+	case AFE_SGEN_O21O22:
+	case AFE_SGEN_O23O24:
+		/* not supported */
+		break;
+    case AFE_SGEN_SPDIFIN:/*2ch 24bit*/
+        regmap_update_bits(afe->regmap, AFE_SINEGEN_CON_SPDIFIN, 0xffffffff,0x110e10e2 /*0x110c10e2*/);
+        break;
+	default:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xffffffff, 0xf0000000);
+		break;
+	}
+
+	mt8167_afe_disable_main_clk(afe);
+
+	data->sinegen_type = ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
+static int mt8167_afe_sgen_fs_get(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(comp);
+	struct mt8167_afe_control_data *data = &afe->ctrl_data;
+
+	ucontrol->value.integer.value[0] = data->sinegen_fs;
+	return 0;
+}
+
+static int mt8167_afe_sgen_fs_put(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(comp);
+	struct mt8167_afe_control_data *data = &afe->ctrl_data;
+
+	mt8167_afe_enable_main_clk(afe);
+
+	switch (ucontrol->value.integer.value[0]) {
+	case AFE_SGEN_8K:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xf00f00, 0x0);
+		break;
+	case AFE_SGEN_11K:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xf00f00, 0x100100);
+		break;
+	case AFE_SGEN_12K:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xf00f00, 0x200200);
+		break;
+	case AFE_SGEN_16K:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xf00f00, 0x400400);
+		break;
+	case AFE_SGEN_22K:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xf00f00, 0x500500);
+		break;
+	case AFE_SGEN_24K:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xf00f00, 0x600600);
+		break;
+	case AFE_SGEN_32K:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xf00f00, 0x800800);
+		break;
+	case AFE_SGEN_44K:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xf00f00, 0x900900);
+		break;
+	case AFE_SGEN_48K:
+		regmap_update_bits(afe->regmap, AFE_SGEN_CON0, 0xf00f00, 0xa00a00);
+		break;
+	default:
+		break;
+	}
+
+	mt8167_afe_disable_main_clk(afe);
+
+	data->sinegen_fs = ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
+static int mt8167_afe_ap_loopback_get(struct snd_kcontrol *kcontrol,
+					     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(comp);
+	struct mt8167_afe_control_data *data = &afe->ctrl_data;
+
+	ucontrol->value.integer.value[0] = data->loopback_type;
+
+	return 0;
+}
+
+static int mt8167_afe_ap_loopback_put(struct snd_kcontrol *kcontrol,
+					     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(comp);
+	struct mt8167_afe_control_data *data = &afe->ctrl_data;
+	uint32_t sample_rate = 48000;
+	long val = ucontrol->value.integer.value[0];
+
+	if (data->loopback_type == val)
+		return 0;
+
+	if (data->loopback_type != AP_LOOPBACK_NONE) {
+		if (val == AP_LOOPBACK_AMIC_TO_SPK ||
+		    val == AP_LOOPBACK_AMIC_TO_HP ||
+		    val == AP_LOOPBACK_DMIC_TO_SPK ||
+		    val == AP_LOOPBACK_DMIC_TO_HP) {
+			/* disconnect I03 <-> O03, I03 <-> O04 */
+			regmap_update_bits(afe->regmap, AFE_CONN1,
+					   AFE_CONN1_I03_O03_S,
+					   0);
+			regmap_update_bits(afe->regmap, AFE_CONN2,
+					   AFE_CONN2_I03_O04_S,
+					   0);
+		} else {
+			/* disconnect I03 <-> O03, I04 <-> O04 */
+			regmap_update_bits(afe->regmap, AFE_CONN1,
+					   AFE_CONN1_I03_O03_S,
+					   0);
+			regmap_update_bits(afe->regmap, AFE_CONN2,
+					   AFE_CONN2_I04_O04_S,
+					   0);
+		}
+
+		regmap_update_bits(afe->regmap, AFE_ADDA_UL_DL_CON0, 0x1, 0x0);
+		regmap_update_bits(afe->regmap, AFE_ADDA_DL_SRC2_CON0, 0x1, 0x0);
+		regmap_update_bits(afe->regmap, AFE_ADDA_UL_SRC_CON0, 0x1, 0x0);
+		regmap_update_bits(afe->regmap, AFE_I2S_CON1, 0x1, 0x0);
+
+		mt8167_afe_disable_afe_on(afe);
+
+		mt8167_afe_disable_top_cg(afe, MT8167_AFE_CG_DAC);
+		mt8167_afe_disable_top_cg(afe, MT8167_AFE_CG_DAC_PREDIS);
+		mt8167_afe_disable_top_cg(afe, MT8167_AFE_CG_ADC);
+		mt8167_afe_disable_main_clk(afe);
+	}
+
+	if (val != AP_LOOPBACK_NONE) {
+		if (val == AP_LOOPBACK_DMIC_TO_SPK ||
+		    val == AP_LOOPBACK_DMIC_TO_HP ||
+		    val == AP_LOOPBACK_DUAL_DMIC_TO_HP ||
+		    val == AP_LOOPBACK_DUAL_DMIC_TO_HP) {
+			sample_rate = 32000;
+		}
+
+		mt8167_afe_enable_main_clk(afe);
+
+		mt8167_afe_enable_top_cg(afe, MT8167_AFE_CG_DAC);
+		mt8167_afe_enable_top_cg(afe, MT8167_AFE_CG_DAC_PREDIS);
+		mt8167_afe_enable_top_cg(afe, MT8167_AFE_CG_ADC);
+
+		if (val == AP_LOOPBACK_AMIC_TO_SPK ||
+		    val == AP_LOOPBACK_AMIC_TO_HP ||
+		    val == AP_LOOPBACK_DMIC_TO_SPK ||
+		    val == AP_LOOPBACK_DMIC_TO_HP) {
+			/* connect I03 <-> O03, I03 <-> O04 */
+			regmap_update_bits(afe->regmap, AFE_CONN1,
+					   AFE_CONN1_I03_O03_S,
+					   AFE_CONN1_I03_O03_S);
+			regmap_update_bits(afe->regmap, AFE_CONN2,
+					   AFE_CONN2_I03_O04_S,
+					   AFE_CONN2_I03_O04_S);
+		} else {
+			/* connect I03 <-> O03, I04 <-> O04 */
+			regmap_update_bits(afe->regmap, AFE_CONN1,
+					   AFE_CONN1_I03_O03_S,
+					   AFE_CONN1_I03_O03_S);
+			regmap_update_bits(afe->regmap, AFE_CONN2,
+					   AFE_CONN2_I04_O04_S,
+					   AFE_CONN2_I04_O04_S);
+		}
+
+		/* 16 bit by default */
+		regmap_update_bits(afe->regmap, AFE_CONN_24BIT,
+				AFE_CONN_24BIT_O03 | AFE_CONN_24BIT_O04, 0);
+
+		/* configure uplink */
+		if (sample_rate == 32000) {
+			regmap_update_bits(afe->regmap, AFE_ADDA_UL_SRC_CON0,
+					   0x001e0000, (2 << 17) | (2 << 19));
+			regmap_update_bits(afe->regmap, AFE_ADDA_NEWIF_CFG1,
+					   0xc00, 1 << 10);
+		} else {
+			regmap_update_bits(afe->regmap, AFE_ADDA_UL_SRC_CON0,
+					   0x001e0000, (3 << 17) | (3 << 19));
+			regmap_update_bits(afe->regmap, AFE_ADDA_NEWIF_CFG1,
+					   0xc00, 3 << 10);
+		}
+
+		regmap_update_bits(afe->regmap, AFE_ADDA_UL_SRC_CON0, 0x1, 0x1);
+
+		/* configure downlink */
+		regmap_update_bits(afe->regmap, AFE_ADDA_PREDIS_CON0,
+				   0xffffffff, 0);
+		regmap_update_bits(afe->regmap, AFE_ADDA_PREDIS_CON1,
+				   0xffffffff, 0);
+
+		if (sample_rate == 32000) {
+			regmap_update_bits(afe->regmap, AFE_ADDA_DL_SRC2_CON0,
+					   0xffffffff, 0x63001802);
+			regmap_update_bits(afe->regmap, AFE_I2S_CON1,
+					   0xf << 8, 0x9 << 8);
+		} else {
+			regmap_update_bits(afe->regmap, AFE_ADDA_DL_SRC2_CON0,
+					   0xffffffff, 0x83001802);
+			regmap_update_bits(afe->regmap, AFE_I2S_CON1,
+					   0xf << 8, 0xa << 8);
+		}
+
+		regmap_update_bits(afe->regmap, AFE_ADDA_DL_SRC2_CON1,
+				   0xffffffff, 0xf74f0000);
+		regmap_update_bits(afe->regmap, AFE_ADDA_DL_SRC2_CON0, 0x1, 0x1);
+		regmap_update_bits(afe->regmap, AFE_ADDA_UL_DL_CON0, 0x1, 0x1);
+		regmap_update_bits(afe->regmap, AFE_I2S_CON1, 0x1, 0x1);
+
+		mt8167_afe_enable_afe_on(afe);
+	}
+
+	data->loopback_type = ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
+static int mt8167_afe_hdmi_force_clk_get(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(comp);
+	struct mt8167_afe_control_data *data = &afe->ctrl_data;
+
+	ucontrol->value.integer.value[0] = data->hdmi_force_clk;
+
+	return 0;
+}
+
+static int mt8167_afe_hdmi_force_clk_put(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(comp);
+	struct mt8167_afe_control_data *data = &afe->ctrl_data;
+
+	data->hdmi_force_clk = ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
+static int mt8167_afe_tdm_out_sgen_get(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(comp);
+	unsigned int val = 0;
+
+	mt8167_afe_enable_main_clk(afe);
+
+	regmap_read(afe->regmap, AFE_SINEGEN_CON_TDM, &val);
+
+	mt8167_afe_disable_main_clk(afe);
+
+	ucontrol->value.integer.value[0] = (val & AFE_SINEGEN_CON_TDM_OUT_EN);
+
+	return 0;
+}
+
+static int mt8167_afe_tdm_out_sgen_put(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(comp);
+
+	mt8167_afe_enable_main_clk(afe);
+
+	if (ucontrol->value.integer.value[0])
+		regmap_update_bits(afe->regmap, AFE_SINEGEN_CON_TDM,
+				   GENMASK(31, 0), 0x11071071);
+	else
+		regmap_update_bits(afe->regmap, AFE_SINEGEN_CON_TDM,
+				   GENMASK(31, 0), 0x100100);
+
+	mt8167_afe_disable_main_clk(afe);
+
+	return 0;
+}
+
+static int mt8167_afe_tdm_in_sgen_get(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(comp);
+	unsigned int val = 0;
+
+	mt8167_afe_enable_main_clk(afe);
+
+	regmap_read(afe->regmap, AFE_SINEGEN_CON_TDM_IN, &val);
+
+	mt8167_afe_disable_main_clk(afe);
+
+	ucontrol->value.integer.value[0] = (val & AFE_SINEGEN_CON_TDM_IN_EN);
+
+	return 0;
+}
+
+static int mt8167_afe_tdm_in_sgen_put(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(comp);
+
+	mt8167_afe_enable_main_clk(afe);
+
+	if (ucontrol->value.integer.value[0])
+		regmap_update_bits(afe->regmap, AFE_SINEGEN_CON_TDM_IN,
+				   GENMASK(31, 0), 0x11071071);
+	else
+		regmap_update_bits(afe->regmap, AFE_SINEGEN_CON_TDM_IN,
+				   GENMASK(31, 0), 0x100100);
+
+	mt8167_afe_disable_main_clk(afe);
+
+	return 0;
+}
+
+
+static const struct soc_enum mt8167_afe_soc_enums[] = {
+	[CTRL_SGEN_EN] = SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(sgen_func),
+				sgen_func),
+	[CTRL_SGEN_FS] = SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(sgen_fs_func),
+				sgen_fs_func),
+	[CTRL_AP_LOOPBACK] = SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(ap_loopback_func),
+				ap_loopback_func),
+};
+
+static int mt8167_afe_hw_gain1_vol_get(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(comp);
+	unsigned int val;
+
+	mt8167_afe_enable_main_clk(afe);
+	regmap_read(afe->regmap, AFE_GAIN1_CON1, &val);
+	mt8167_afe_disable_main_clk(afe);
+	ucontrol->value.integer.value[0] = val & AFE_GAIN1_CON1_MASK;
+
+	return 0;
+}
+
+static int mt8167_afe_hw_gain1_vol_put(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(comp);
+	unsigned int val;
+
+	val = ucontrol->value.integer.value[0];
+	mt8167_afe_enable_main_clk(afe);
+	regmap_update_bits(afe->regmap, AFE_GAIN1_CON1, AFE_GAIN1_CON1_MASK, val);
+	mt8167_afe_disable_main_clk(afe);
+	return 0;
+}
+
+static int mt8167_afe_hw_gain1_sampleperstep_get(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(comp);
+	unsigned int val;
+
+	mt8167_afe_enable_main_clk(afe);
+	regmap_read(afe->regmap, AFE_GAIN1_CON0, &val);
+	mt8167_afe_disable_main_clk(afe);
+	ucontrol->value.integer.value[0] = (val & AFE_GAIN1_CON0_SAMPLE_PER_STEP_MASK) >> 8;
+
+	return 0;
+}
+
+static int mt8167_afe_hw_gain1_sampleperstep_put(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(comp);
+	unsigned int val;
+
+	val = ucontrol->value.integer.value[0];
+	mt8167_afe_enable_main_clk(afe);
+	regmap_update_bits(afe->regmap, AFE_GAIN1_CON0, AFE_GAIN1_CON0_SAMPLE_PER_STEP_MASK, val << 8);
+	mt8167_afe_disable_main_clk(afe);
+	return 0;
+}
+
+/*********spdif in**************/
+#define ISPDIF_FS_SUPPORT_RANGE 9
+
+typedef enum {
+	SPDIFIN_OUT_RANGE = 0x00, /*0x00~0x06 Freq out of range*/
+	SPDIFIN_32K = 0x07,
+	SPDIFIN_44K = 0x08,
+	SPDIFIN_48K = 0x09,
+	SPDIFIN_64K = 0x0A,
+	SPDIFIN_88K = 0x0B,
+	SPDIFIN_96K = 0x0C,
+	SPDIFIN_128K = 0x0D,
+	SPDIFIN_176K = 0x0E,
+	SPDIFIN_192K = 0x0F
+} SPDIFIN_FS;
+
+struct afe_dir_info {
+	int rate;
+	u32 u_bit[2][6];
+	u32 c_bit[6];
+};
+
+enum afe_spdifrx_port {
+	SPDIFRX_PORT_NONE = 0,
+	SPDIFRX_PORT_OPT = 1,
+	SPDIFRX_PORT_ARC = 2
+};
+
+static volatile struct afe_dir_info spdifrx_state;
+static bool spdifrx_inited;
+
+static u32 spdifrx_fscnt[16][9] = {
+	/*32k       44.1k        48k             64k             88.2k      96k          128k       176k         192k*/
+	{6750, 4898, 4500, 3375, 2455, 2250, 1688, 1227, 1125 }, /* 1 subframe*/
+	{13500, 9796, 9000, 6750, 4909, 4500, 3375, 2455, 2250 }, /* 2 subframe*/
+	{27000, 19592, 18000, 13500, 9818, 9000, 6750, 4909, 4500 }, /* 4 subframe*/
+	{54000, 39184, 36000, 27000, 19636, 18000, 13500, 9818, 9000 }, /* 8 subframe*/
+	{108000, 78367, 72000, 54000, 39273, 36000, 27000, 19636, 18000 }, /* 16 subframe*/
+	{216000, 156735, 144000, 108000, 78546, 72000, 54000, 39273, 36000 }, /* 32 subframe*/
+	{432000, 313469, 288000, 216000, 157091, 144000, 108000, 78546, 72000 }, /* 64 subframe*/
+	{864000, 626939, 576000, 432000, 314182, 288000, 216000, 157091, 144000 }, /* 128 subframe*/
+	{1728027, 1253897, 1152018, 864014, 626949, 576008, 432000, 313469, 288000 }, /*256 subframe*/
+	{3456000, 2507755, 2304000, 1728000, 1256727, 1152000, 864000, 628364, 576000 }, /* 512 subframe*/
+	{6912000, 5015510, 4608000, 3456000, 2513455, 2304000, 1728000, 1256727, 1152000 }, /* 1024 subframe*/
+	{13824000, 10031020, 9216000, 6912000, 5026909, 4608000, 3456000, 2513455, 2304000 }, /* 2048 subframe*/
+	 /* 4096 subframe*/
+	{27648000, 20062041, 18432000, 13824000, 10053818, 9216000, 6912000, 5026909, 4608000 },
+	/* 8192 subframe*/
+	{55296000, 40124082, 36864000, 27648000, 20107636, 18432000, 13824000, 10053818, 9216000 },
+	/* 16384 subframe*/
+	{110592000, 80248163, 73728000, 55296000, 40215272, 36864000, 27648000, 20107636, 18432000},
+	/* 32768 subframe*/
+	{221184000, 160496327, 147456000, 110592000, 80430546, 73728000, 55296000, 40215273, 36864000}
+};
+
+static u32 spdifrx_fsoft[16][9]  = {
+	/*32k       44.1k        48k            64k         88.2k       96k         128k        176k        192k*/
+	{78, 78, 78, 78, 78, 78, 78, 78, 78 }, /* 1 subframe*/
+	{156, 156, 156, 156, 156, 156, 156, 156, 156 }, /* 2 subframe*/
+	{312, 312, 312, 312, 312, 312, 312, 312, 312 }, /* 4 subframe*/
+	{625, 625, 625, 625, 625, 625, 625, 625, 625 }, /* 8 subframe*/
+	{1250, 1250, 1250, 1250, 1250, 1250, 1250, 1250, 1250 }, /* 16 subframe*/
+	{2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500 }, /*32 subframe*/
+	{5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000 }, /* 64 subframe*/
+	{10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000 }, /* 128 subframe*/
+	{200000, 45000, 45000, 27000, 20000, 18000, 14000, 10000, 9000 }, /* 256 subframe*/
+	{60000, 45000, 45000, 20000, 20000, 20000, 20000, 20000, 20000 }, /* 512 subframe*/
+	{80000, 80000, 80000, 80000, 80000, 80000, 80000, 80000, 80000 }, /* 1024 subframe*/
+	{160000, 160000, 160000, 160000, 160000, 160000, 160000, 160000, 160000 }, /* 2048 subframe*/
+	{320000, 320000, 320000, 320000, 320000, 320000, 320000, 320000, 320000 }, /* 4096 subframe*/
+	{640000, 640000, 640000, 640000, 640000, 640000, 640000, 640000, 640000 }, /* 8192 subframe*/
+	{1280000, 1280000, 1280000, 1280000, 1280000, 1280000, 1280000, 1280000, 1280000 }, /* 16384 subframe*/
+	{2560000, 2560000, 2560000, 2560000, 2560000, 2560000, 2560000, 2560000, 2560000 }  /* 32768 subframe*/
+};
+
+u32 _u4LRCKCmp432M[9] = {
+    /*32k   44.1k    48k     64k   88.2k    96k      128k   176.4k    192k*/
+    /*203,   147,   135,   102,   73,      68,    51,      37,         34   432M*3%*fs/2*/
+176,   98,   90,   68,   49,      46,    34,      20,         23  /* 432M*3%*fs/3 ,32k : 136+40 176.4k(25-5)*/
+    /*102,   74,   68,   51,   37,      34,    26,      18,         17  // 432M*3%*fs/4 */
+};
+
+u32 _u4LRCKCmp594M[9] = {
+    /*32k   44.1k    48k     64k   88.2k    96k      128k   176.4k    192k */
+279,    203,    186,     140,   102,    93,      70,   51,       47
+};
+
+const volatile struct afe_dir_info *afe_spdifrx_state(void)
+{
+	return &spdifrx_state;
+}
+
+static void spdifrx_select_port(enum afe_spdifrx_port port)
+{
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(spdif_component);
+	unsigned int val;
+
+	if (port == SPDIFRX_PORT_OPT) {
+		regmap_update_bits(afe->regmap, AFE_SPDIFIN_INT_EXT, MULTI_INPUT_SEL_MASK, MULTI_INPUT_SEL_OPT);
+		/* cautious
+		* AFE_SPDIFIN_INT_EXT: 0x08, not 0x108
+		* if bit(8) ==1 irq9 will continue come, not stop
+		*/
+		regmap_update_bits(afe->regmap, AFE_SPDIFIN_INT_EXT, 0xf << 8, 0 << 8);
+	}else {
+		regmap_update_bits(afe->regmap, AFE_SPDIFIN_INT_EXT, MULTI_INPUT_SEL_MASK, MULTI_INPUT_SEL_ARC);
+		regmap_update_bits(afe->regmap, AFE_SPDIFIN_INT_EXT, 0xf << 8, 4 << 8);
+	}
+
+	regmap_read(afe->regmap, AFE_SPDIFIN_CFG1, &val);
+	val &= (AFE_SPDIFIN_REAL_OPTICAL) & (AFE_SPDIFIN_SWITCH_REAL_OPTICAL);
+	regmap_write(afe->regmap, AFE_SPDIFIN_CFG1, val);
+
+	regmap_read(afe->regmap, AFE_SPDIFIN_CFG1, &val);
+	val |= AFE_SPDIFIN_FIFOSTARTPOINT_5;
+	regmap_write(afe->regmap, AFE_SPDIFIN_CFG1, val);
+}
+
+static void spdifrx_clear_vucp(void)
+{
+	memset((void *)spdifrx_state.c_bit, 0xff, sizeof(spdifrx_state.c_bit));
+	memset((void *)spdifrx_state.u_bit, 0xff, sizeof(spdifrx_state.u_bit));
+}
+
+static u32 spdifrx_fs_interpreter(u32 fsval)
+{
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(spdif_component);
+	u8 period, cnt;
+	u32 fs = SPDIFIN_OUT_RANGE;
+	u32 rangeplus, rangeminus;
+	unsigned int val;
+
+	regmap_read(afe->regmap, AFE_SPDIFIN_BR, &val);
+	period = (val&AFE_SPDIFIN_BR_SUBFRAME_MASK) >> 8;
+
+	for (cnt = 0; cnt < ISPDIF_FS_SUPPORT_RANGE; cnt++) {
+		rangeplus = (spdifrx_fscnt[period][cnt] + spdifrx_fsoft[period][cnt]);
+		rangeminus = (spdifrx_fscnt[period][cnt] - spdifrx_fsoft[period][cnt]);
+		rangeplus = (rangeplus * 624) / 432;
+		rangeminus = (rangeminus * 624) / 432;
+		if ((fsval > rangeminus) && (fsval < rangeplus)) {
+			fs = cnt + SPDIFIN_32K; /*from 32k~192k*/
+			break;
+		}
+	}
+
+	if (cnt > ISPDIF_FS_SUPPORT_RANGE) {
+		fs = SPDIFIN_OUT_RANGE;
+		pr_err("%s()FS Out of Detected Range!\n", __func__);
+	}
+
+	return fs;
+}
+
+static void (*spdifrx_callback)(void);
+
+/*
+ * [Programming Guide]
+ * [SPDIF IN] spdif in IRQ9 callback
+ */
+static u32 get_clear_bits(u32 v)
+{
+	u32 bits = 0;
+	/* AFE_SPDIFIN_DEBUG3 */
+	if (v & SPDIFIN_PRE_ERR_NON_STS)
+		bits |= SPDIFIN_PRE_ERR_CLEAR;				/* 0-0 */
+	if (v & SPDIFIN_PRE_ERR_B_STS)
+		bits |= SPDIFIN_PRE_ERR_B_CLEAR;				/* 1-1 */
+	if (v & SPDIFIN_PRE_ERR_M_STS)
+		bits |= SPDIFIN_PRE_ERR_M_CLEAR;				/* 2-2 */
+	if (v & SPDIFIN_PRE_ERR_W_STS)
+		bits |= SPDIFIN_PRE_ERR_W_CLEAR;				/* 3-3 */
+	if (v & SPDIFIN_PRE_ERR_BITCNT_STS)
+		bits |= SPDIFIN_PRE_ERR_BITCNT_CLEAR;			/* 4-4 */
+	if (v & SPDIFIN_PRE_ERR_PARITY_STS)
+		bits |= SPDIFIN_PRE_ERR_PARITY_CLEAR;			/* 5-5 */
+	if (v & SPDIFIN_FIFO_ERR_STS)
+		bits |= SPDIFIN_FIFO_ERR_CLEAR;				/* 30,31 - 6,7 */
+	if (v & SPDIFIN_TIMEOUT_ERR_STS)
+		bits |= SPDIFIN_TIMEOUT_INT_CLEAR;				/* 6-8 */
+	/* AFE_SPDIFIN_INT_EXT2 */
+	if (v & SPDIFIN_LRCK_CHG_INT_STS)
+		bits |= SPDIFIN_DATA_LRCK_CHANGE_CLEAR;			/* 27-16 */
+	/* AFE_SPDIFIN_DEBUG1 */
+	if (v & SPDIFIN_DATA_LATCH_ERR)
+		bits |= SPDIFIN_DATA_LATCH_CLEAR;				/* 10-17 */
+	/* not error AFE_SPDIFIN_DEBUG2*/
+	if (v & SPDIFIN_CHSTS_PREAMPHASIS_STS)
+		bits |= SPDIFIN_CHSTS_PREAMPHASIS_CLEAR;				/* 7-9 */
+	if (v & SPDIFIN_CHSTS_INT_FLAG)
+		bits |= SPDIFIN_CHSTS_INT_CLR_EN;		/* 26-11 */
+	return bits;
+}
+
+void afe_spdifrx_isr(void)
+{
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(spdif_component);
+	u32 regval1, regval2, regval3, fsval, fsvalod, chsintflag;
+	int i, j;
+	unsigned int err, noterr, clear_bits;
+
+	regmap_read(afe->regmap, AFE_SPDIFIN_DEBUG3, &regval1);
+	regmap_read(afe->regmap, AFE_SPDIFIN_INT_EXT2, &regval2);
+	regmap_read(afe->regmap, AFE_SPDIFIN_DEBUG1, &regval3);
+	regmap_read(afe->regmap, AFE_SPDIFIN_DEBUG2, &chsintflag);
+
+	err = (regval1 & SPDIFIN_ALL_ERR_ERR_STS) |	(regval2 & SPDIFIN_LRCK_CHG_INT_STS) |
+		(regval3 & SPDIFIN_DATA_LATCH_ERR) | (chsintflag & SPDIFIN_FIFO_ERR_STS);
+	noterr = (regval1 & SPDIFIN_CHSTS_PREAMPHASIS_STS) | (chsintflag & SPDIFIN_CHSTS_INT_FLAG);
+	clear_bits = get_clear_bits(err);
+	if (err != 0) {
+		if (spdifrx_state.rate > 0) {
+			pr_debug("%s Spdif Rx unlock!\n", __func__);
+			if (regval1 & SPDIFIN_ALL_ERR_ERR_STS)
+				pr_debug("%s Error is 0x%x\n", __func__, regval1 & SPDIFIN_ALL_ERR_ERR_STS);
+			if (regval2 & SPDIFIN_LRCK_CHG_INT_STS)
+				pr_debug("%s LRCK Change\n", __func__);
+			if (regval3 & SPDIFIN_DATA_LATCH_ERR)
+				pr_debug("%s Data Latch error!\n", __func__);
+			if (chsintflag & SPDIFIN_FIFO_ERR_STS)
+				pr_debug("%s FIFO error!\n", __func__);
+			spdifrx_state.rate = 0;
+			spdifrx_clear_vucp();
+			if (spdifrx_callback)
+				spdifrx_callback();
+		}
+		/*Disable SpdifRx interrupt disable*/
+		regmap_update_bits(afe->regmap, AFE_SPDIFIN_CFG0, SPDIFIN_INT_EN_MASK | SPDIFIN_EN_MASK, SPDIFIN_INT_DIS | SPDIFIN_DIS);
+		/*Clear interrupt bits*/
+		regmap_write(afe->regmap, AFE_SPDIFIN_EC, clear_bits);
+		/*Enable SpdifRx interrupt disable*/
+		regmap_update_bits(afe->regmap, AFE_SPDIFIN_CFG0, SPDIFIN_INT_EN_MASK | SPDIFIN_EN_MASK, SPDIFIN_INT_EN | SPDIFIN_EN);
+	} else {
+		/*Enable Timeout Interrupt*/
+		regmap_read(afe->regmap, AFE_SPDIFIN_BR_DBG1, &fsval);
+		fsval = spdifrx_fs_interpreter(fsval);
+
+		if (fsval != SPDIFIN_OUT_RANGE) {
+			regmap_update_bits(afe->regmap, AFE_SPDIFIN_INT_EXT2, SPDIFIN_LRC_MASK, _u4LRCKCmp594M[fsval-SPDIFIN_32K]);
+			fsvalod = spdifrx_state.rate;
+			spdifrx_state.rate = fsval;
+			pr_debug("%s spdifrx_state.rate =0x%x.\n", __func__, spdifrx_state.rate);
+			if ((spdifrx_callback) && (fsvalod != fsval))
+				spdifrx_callback();
+
+		}
+		if (((chsintflag & SPDIFIN_CHSTS_INT_FLAG) != 0) && (fsval != SPDIFIN_OUT_RANGE)) {
+			for (i = 0; i < 6; i++) {
+				unsigned int temp;
+				regmap_read(afe->regmap, AFE_SPDIFIN_CHSTS1 + i * 0x4, &temp);
+
+				if (temp != spdifrx_state.c_bit[i]) {
+					spdifrx_state.c_bit[i] =  temp;
+					if (spdifrx_callback)
+						spdifrx_callback();
+				}
+			}
+			for (i = 0; i < 2; i++) {
+				for (j = 0; j < 6; j++) {
+					unsigned int temp;
+					regmap_read(afe->regmap, SPDIFIN_FREQ_USERCODE1 + (i * 6 + j) * 0x4, &temp);
+
+					if (temp != spdifrx_state.u_bit[i][j]) {
+						spdifrx_state.u_bit[i][j] = temp;
+					if (spdifrx_callback)
+						spdifrx_callback();
+					}
+				}
+			}
+		}
+
+		if (fsval == SPDIFIN_OUT_RANGE)
+		/*Disable SpdifRx interrupt disable*/
+			regmap_update_bits(afe->regmap, AFE_SPDIFIN_CFG0, SPDIFIN_INT_EN_MASK | SPDIFIN_EN_MASK, SPDIFIN_INT_DIS | SPDIFIN_DIS);
+		/*Clear interrupt bits*/
+		regmap_write(afe->regmap, AFE_SPDIFIN_EC, SPDIFIN_INT_CLEAR_ALL);
+		if (fsval == SPDIFIN_OUT_RANGE)
+		/* enable spdif  */
+		    regmap_update_bits(afe->regmap, AFE_SPDIFIN_CFG0, SPDIFIN_INT_EN_MASK | SPDIFIN_EN_MASK, SPDIFIN_INT_EN | SPDIFIN_EN);
+	}
+}
+
+static void spdifrx_irq_enable(int en)
+{
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(spdif_component);
+
+	if (en) {
+		regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON2, 1 << 2, 1 << 2 );/*enable IRQ 9*/
+		/*
+		* AFE_SPDIFIN_CFG1:
+		* ok->0xb3f00010, 0xb3f00000 can not lock signal sometimes, the AFE_SPDIFIN_FIFOSTARTPOINT value
+		* should between in 5 and 7, default value is 3
+		* bit0 should be 0
+		*/
+		regmap_update_bits(afe->regmap, AFE_SPDIFIN_CFG1,
+			      SPDIFIN_INT_ERR_EN_MASK | SEL_BCK_SPDIFIN | AFE_SPDIFIN_FIFOSTARTPOINT_5,
+			      SPDIFIN_ALL_ERR_INT_EN | SEL_BCK_SPDIFIN | AFE_SPDIFIN_FIFOSTARTPOINT_5);
+		regmap_update_bits(afe->regmap, AFE_SPDIFIN_INT_EXT, SPDIFIN_DATALATCH_ERR_EN_MASK, SPDIFIN_DATALATCH_ERR_EN);
+		regmap_update_bits(afe->regmap, AFE_SPDIFIN_CFG0,
+			      SPDIFIN_EN_MASK | SPDIFIN_INT_EN_MASK |SPDIFIN_FLIP_EN_MASK|
+			      SPDIFIN_DE_CNT_MASK | SPDIFIN_DE_SEL_MASK | MAX_LEN_NUM_MASK,
+			      SPDIFIN_EN | SPDIFIN_INT_EN | SPDIFIN_FLIP_EN | 4 << 8 |
+			      SPDIFIN_DE_SEL_DECNT | 0xED << 16);
+
+	} else {
+		regmap_update_bits(afe->regmap, AFE_SPDIFIN_CFG0,
+			      SPDIFIN_EN | SPDIFIN_INT_EN | AFE_SPDIFIN_SEL_SPDIFIN_EN | SPDIFIN_FLIP_EN,
+			      SPDIFIN_DIS | SPDIFIN_INT_DIS |AFE_SPDIFIN_SEL_SPDIFIN_DIS | SPDIFIN_FLIP_DIS);
+		regmap_update_bits(afe->regmap, AFE_SPDIFIN_CFG1,
+			      SPDIFIN_INT_ERR_EN_MASK | SEL_BCK_SPDIFIN,
+			      SPDIFIN_ALL_ERR_INT_DIS | ~SEL_BCK_SPDIFIN);
+		regmap_update_bits(afe->regmap, AFE_SPDIFIN_INT_EXT, SPDIFIN_DATALATCH_ERR_EN_MASK, SPDIFIN_DATALATCH_ERR_DIS);
+		regmap_update_bits(afe->regmap, AFE_SPDIFIN_INT_EXT2, SPDIFIN_LRCK_CHG_INT_MASK, SPDIFIN_LRCK_CHG_INT_DIS);
+
+		regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON2, 1 << 2 , 0 << 2);/*disable IRQ 9*/
+	}
+}
+
+static void spdifrx_init(enum afe_spdifrx_port port)
+{
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(spdif_component);
+
+	if (spdifrx_inited) {
+		pr_debug("%s() Dir has already inited.\n", __func__);
+		return;
+	}
+	spdifrx_clear_vucp();
+	spdifrx_state.rate = 0;
+
+    mt8167_afe_enable_main_clk(afe);
+	/*
+	 * Set spdifin clk cfg
+	 */
+	mt_afe_spdif_dir_clk_on(afe);
+
+	/*
+	 * [Programming Guide]
+	 * [SPDIF IN] spdifin config
+	 * AFE_SPDIFIN_INT_EXT2: 0x00020000
+	 * SPDIFIN_FREQ_INFO_2:  0x006596e8 or 0x6596ED
+	 * SPDIFIN_FREQ_INFO_3:  0x000005a5 or 0x5A4
+	 * AFE_SPDIFIN_BR:            0x00039000
+	 */
+	regmap_write(afe->regmap, SPDIFIN_FREQ_INFO, 0x00877986);
+	regmap_write(afe->regmap, SPDIFIN_FREQ_INFO_2, 0x006596e8);
+	regmap_write(afe->regmap, SPDIFIN_FREQ_INFO_3, 0x000005a5);
+
+	/*Bitclk recovery enable and lowbound*/
+	regmap_write(afe->regmap, AFE_SPDIFIN_BR, 0x00039000);
+	regmap_update_bits(afe->regmap, AFE_SPDIFIN_INT_EXT2, SPDIFIN_594MODE_MASK, SPDIFIN_594MODE_EN);
+
+	mt8167_afe_enable_top_cg(afe, MT8167_AFE_CG_INTDIR_CK);
+	mt8167_afe_enable_afe_on(afe);
+	spdifrx_select_port(port);
+
+	spdifrx_irq_enable(1);
+	spdifrx_inited = 1;
+}
+
+static void spdifrx_uninit(void)
+{
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(spdif_component);
+	if (!spdifrx_inited) {
+		pr_err("%s() Dir has already uninited.\n", __func__);
+		return;
+	}
+	spdifrx_irq_enable(0);
+
+	mt_afe_spdif_dir_clk_off(afe);
+	mt8167_afe_disable_top_cg(afe, MT8167_AFE_CG_INTDIR_CK);
+	mt8167_afe_disable_afe_on(afe);
+	mt8167_afe_disable_main_clk(afe);
+	spdifrx_state.rate = 0;
+	spdifrx_inited = 0;
+}
+
+void afe_spdifrx_start(enum afe_spdifrx_port port, void (*callback)(void))
+{
+	/*
+	 * [Programming Guide]
+	 * [SPDIF IN]GPIO mode setting
+	 */
+	switch (port) {
+	case SPDIFRX_PORT_OPT:
+		break;
+	case SPDIFRX_PORT_ARC:
+		break;
+	default:
+		pr_err("%s() invalid port: %d\n", __func__, port);
+		return;
+	}
+	spdifrx_callback = callback;
+	spdifrx_init(port);
+}
+
+void afe_spdifrx_stop(void)
+{
+	spdifrx_uninit();
+	spdifrx_callback = NULL;
+}
+
+static int spdif_rx_info(struct snd_kcontrol *kcontrol,
+			 struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 1;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = 2;
+	return 0;
+}
+
+static int spdif_rx_get(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	const volatile struct afe_dir_info *state = afe_spdifrx_state();
+	int rate;
+	int i;
+
+	switch (state->rate) {
+	case 0x7:
+		rate = 32000;
+		break;
+	case 0x8:
+		rate = 44100;
+		break;
+	case 0x9:
+		rate = 48000;
+		break;
+	case 0xb:
+		rate = 88200;
+		break;
+	case 0xc:
+		rate = 96000;
+		break;
+	case 0xe:
+		rate = 176400;
+		break;
+	case 0xf:
+		rate = 192000;
+		break;
+	default:
+		rate = 0;
+		break;
+	}
+	memcpy((void *)ucontrol->value.bytes.data, (void *)&rate, sizeof(rate));
+	memcpy((void *)ucontrol->value.bytes.data + sizeof(rate),
+	       (void *)state->u_bit, sizeof(state->u_bit));
+	memcpy((void *)ucontrol->value.bytes.data + sizeof(rate) +
+	       sizeof(state->u_bit), (void *)state->c_bit,
+	       sizeof(state->c_bit));
+	pr_notice("%s() rate=0x%X\n", __func__, rate);
+	for (i = 0; i < 4; i++)
+		pr_debug("%s() ucontrol->value.bytes.data[%d]=0x%02X\n",
+			 __func__,
+			i, ucontrol->value.bytes.data[i]);
+	for (i = 4; i < 48 + 4; i++)
+		pr_debug("%s() ucontrol->value.bytes.data[%d]=0x%02X\n",
+			 __func__,
+			i, ucontrol->value.bytes.data[i]);
+	for (i = 4 + 48; i < 76; i++)
+		pr_debug("%s() ucontrol->value.bytes.data[%d]=0x%02X\n",
+			 __func__, i, ucontrol->value.bytes.data[i]);
+	return 0;
+}
+
+static struct snd_kcontrol *snd_ctl_find_name(struct snd_card *card,
+					      unsigned char *name)
+{
+	struct snd_kcontrol *kctl;
+
+	if (snd_BUG_ON(!card || !name))
+		return NULL;
+	list_for_each_entry(kctl, &card->controls, list) {
+		if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name)))
+			return kctl;
+	}
+	return NULL;
+}
+
+static void spdif_rx_ctl_notify(void)
+{
+	struct snd_kcontrol *kctl;
+	struct snd_card *card = spdif_component->card->snd_card;
+	kctl = snd_ctl_find_name(card, "SPDIF In");
+	if (!kctl) {
+		pr_err("%s() can not get name\n", __func__);
+		return;
+	}
+	snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &kctl->id);
+}
+
+static int spdif_rx_put(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	/* 0:stop, 1:start opt */
+	enum afe_spdifrx_port port =
+	(enum afe_spdifrx_port)(ucontrol->value.integer.value[0]);
+
+	spdif_component = snd_soc_kcontrol_component(kcontrol);
+
+	if (port != SPDIFRX_PORT_NONE &&
+	    port != SPDIFRX_PORT_OPT &&
+	    port != SPDIFRX_PORT_ARC)
+		return -EINVAL;
+	pr_debug("%s() port=%d\n", __func__, port);
+	if (port == SPDIFRX_PORT_NONE)
+		afe_spdifrx_stop();
+	else
+		afe_spdifrx_start(port, spdif_rx_ctl_notify);
+	return 0;
+}
+
+static const struct snd_kcontrol_new mt8167_afe_controls[] = {
+	SOC_ENUM_EXT("Audio_SideGen_Switch",
+		     mt8167_afe_soc_enums[CTRL_SGEN_EN],
+		     mt8167_afe_sgen_get,
+		     mt8167_afe_sgen_put),
+	SOC_ENUM_EXT("Audio_SideGen_SampleRate",
+		     mt8167_afe_soc_enums[CTRL_SGEN_FS],
+		     mt8167_afe_sgen_fs_get,
+		     mt8167_afe_sgen_fs_put),
+	SOC_ENUM_EXT("AP_Loopback_Select",
+		     mt8167_afe_soc_enums[CTRL_AP_LOOPBACK],
+		     mt8167_afe_ap_loopback_get,
+		     mt8167_afe_ap_loopback_put),
+	SOC_SINGLE_BOOL_EXT("HDMI_Force_Clk_Switch",
+			    0,
+			    mt8167_afe_hdmi_force_clk_get,
+			    mt8167_afe_hdmi_force_clk_put),
+	SOC_SINGLE_BOOL_EXT("TDM_Out_Sgen_Switch",
+			    0,
+			    mt8167_afe_tdm_out_sgen_get,
+			    mt8167_afe_tdm_out_sgen_put),
+	SOC_SINGLE_BOOL_EXT("TDM_In_Sgen_Switch",
+			    0,
+			    mt8167_afe_tdm_in_sgen_get,
+			    mt8167_afe_tdm_in_sgen_put),
+	SOC_SINGLE_EXT("HW Gain1 Volume",
+			    0,
+			    0,
+			    0x80000,
+			    0,
+			    mt8167_afe_hw_gain1_vol_get,
+			    mt8167_afe_hw_gain1_vol_put),
+	SOC_SINGLE_EXT("HW Gain1 SamplePerStep",
+			    0,
+			    0,
+			    255,
+			    0,
+			    mt8167_afe_hw_gain1_sampleperstep_get,
+			    mt8167_afe_hw_gain1_sampleperstep_put),
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "SPDIF In",
+		.info = spdif_rx_info,
+		.get = spdif_rx_get,
+		.put = spdif_rx_put
+	},
+};
+
+
+int mt8167_afe_add_controls(struct snd_soc_component *component)
+{
+	return snd_soc_add_component_controls(component, mt8167_afe_controls,
+					      ARRAY_SIZE(mt8167_afe_controls));
+}
+
diff --git a/sound/soc/mediatek/mt8167/mt8167-afe-controls.h b/sound/soc/mediatek/mt8167/mt8167-afe-controls.h
new file mode 100644
index 0000000..a95faa1
--- /dev/null
+++ b/sound/soc/mediatek/mt8167/mt8167-afe-controls.h
@@ -0,0 +1,24 @@
+/*
+ * mtk-afe-contols.h  --  Mediatek Platform driver ALSA contorls
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MT8167_AFE_CONTROLS_H_
+#define _MT8167_AFE_CONTROLS_H_
+
+struct snd_soc_component;
+
+void afe_spdifrx_isr(void);
+int mt8167_afe_add_controls(struct snd_soc_component *component);
+
+#endif
diff --git a/sound/soc/mediatek/mt8167/mt8167-afe-debug.c b/sound/soc/mediatek/mt8167/mt8167-afe-debug.c
new file mode 100644
index 0000000..e137ccf
--- /dev/null
+++ b/sound/soc/mediatek/mt8167/mt8167-afe-debug.c
@@ -0,0 +1,336 @@
+/*
+ * Mediatek audio debug function
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#include "mt8167-afe-debug.h"
+#include "mt8167-afe-regs.h"
+#include "mt8167-afe-util.h"
+#include "mt8167-afe-common.h"
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+
+
+#ifdef CONFIG_DEBUG_FS
+
+struct mt8167_afe_debug_fs {
+	char *fs_name;
+	const struct file_operations *fops;
+};
+
+struct afe_dump_reg_attr {
+	uint32_t offset;
+	char *name;
+};
+
+#define DUMP_REG_ENTRY(reg) {reg, #reg}
+
+static const struct afe_dump_reg_attr afe_dump_regs[] = {
+	DUMP_REG_ENTRY(AUDIO_TOP_CON0),
+	DUMP_REG_ENTRY(AUDIO_TOP_CON1),
+	DUMP_REG_ENTRY(AUDIO_TOP_CON3),
+	DUMP_REG_ENTRY(AFE_DAC_CON0),
+	DUMP_REG_ENTRY(AFE_DAC_CON1),
+	DUMP_REG_ENTRY(AFE_I2S_CON),
+	DUMP_REG_ENTRY(AFE_I2S_CON1),
+	DUMP_REG_ENTRY(AFE_I2S_CON2),
+	DUMP_REG_ENTRY(AFE_I2S_CON3),
+	DUMP_REG_ENTRY(AFE_CONN0),
+	DUMP_REG_ENTRY(AFE_CONN1),
+	DUMP_REG_ENTRY(AFE_CONN2),
+	DUMP_REG_ENTRY(AFE_CONN3),
+	DUMP_REG_ENTRY(AFE_CONN4),
+	DUMP_REG_ENTRY(AFE_CONN5),
+	DUMP_REG_ENTRY(AFE_CONN_24BIT),
+	DUMP_REG_ENTRY(AFE_DL1_BASE),
+	DUMP_REG_ENTRY(AFE_DL1_CUR),
+	DUMP_REG_ENTRY(AFE_DL1_END),
+	DUMP_REG_ENTRY(AFE_DL2_BASE),
+	DUMP_REG_ENTRY(AFE_DL2_CUR),
+	DUMP_REG_ENTRY(AFE_DL2_END),
+	DUMP_REG_ENTRY(AFE_AWB_BASE),
+	DUMP_REG_ENTRY(AFE_AWB_CUR),
+	DUMP_REG_ENTRY(AFE_AWB_END),
+	DUMP_REG_ENTRY(AFE_VUL_BASE),
+	DUMP_REG_ENTRY(AFE_VUL_CUR),
+	DUMP_REG_ENTRY(AFE_VUL_END),
+	DUMP_REG_ENTRY(AFE_DAI_BASE),
+	DUMP_REG_ENTRY(AFE_DAI_CUR),
+	DUMP_REG_ENTRY(AFE_DAI_END),
+	DUMP_REG_ENTRY(AFE_MEMIF_MSB),
+	DUMP_REG_ENTRY(AFE_MEMIF_MON0),
+	DUMP_REG_ENTRY(AFE_MEMIF_MON1),
+	DUMP_REG_ENTRY(AFE_MEMIF_MON2),
+	DUMP_REG_ENTRY(AFE_MEMIF_MON3),
+	DUMP_REG_ENTRY(AFE_ADDA_DL_SRC2_CON0),
+	DUMP_REG_ENTRY(AFE_ADDA_DL_SRC2_CON1),
+	DUMP_REG_ENTRY(AFE_ADDA_UL_SRC_CON0),
+	DUMP_REG_ENTRY(AFE_ADDA_UL_SRC_CON1),
+	DUMP_REG_ENTRY(AFE_ADDA_TOP_CON0),
+	DUMP_REG_ENTRY(AFE_ADDA_UL_DL_CON0),
+	DUMP_REG_ENTRY(AFE_ADDA_NEWIF_CFG0),
+	DUMP_REG_ENTRY(AFE_ADDA_NEWIF_CFG1),
+	DUMP_REG_ENTRY(AFE_ADDA_PREDIS_CON0),
+	DUMP_REG_ENTRY(AFE_ADDA_PREDIS_CON1),
+	DUMP_REG_ENTRY(AFE_MRGIF_CON),
+	DUMP_REG_ENTRY(AFE_DAIBT_CON0),
+	DUMP_REG_ENTRY(AFE_IRQ_MCU_CON),
+	DUMP_REG_ENTRY(AFE_IRQ_MCU_EN),
+	DUMP_REG_ENTRY(AFE_IRQ_CNT1),
+	DUMP_REG_ENTRY(AFE_IRQ_CNT2),
+	DUMP_REG_ENTRY(AFE_MEMIF_PBUF_SIZE),
+	DUMP_REG_ENTRY(AFE_SGEN_CON0),
+	DUMP_REG_ENTRY(AFE_APLL1_TUNER_CFG),
+	DUMP_REG_ENTRY(AFE_APLL2_TUNER_CFG),
+};
+
+static const struct afe_dump_reg_attr hdmi_dump_regs[] = {
+	DUMP_REG_ENTRY(AUDIO_TOP_CON0),
+	DUMP_REG_ENTRY(AFE_DAC_CON0),
+	DUMP_REG_ENTRY(AFE_HDMI_OUT_CON0),
+	DUMP_REG_ENTRY(AFE_HDMI_CONN0),
+	DUMP_REG_ENTRY(AFE_HDMI_OUT_BASE),
+	DUMP_REG_ENTRY(AFE_HDMI_OUT_CUR),
+	DUMP_REG_ENTRY(AFE_HDMI_OUT_END),
+	DUMP_REG_ENTRY(AFE_TDM_CON1),
+	DUMP_REG_ENTRY(AFE_TDM_CON2),
+	DUMP_REG_ENTRY(AFE_IRQ_MCU_CON2),
+	DUMP_REG_ENTRY(AFE_IRQ_CNT5),
+	DUMP_REG_ENTRY(AFE_MEMIF_PBUF2_SIZE),
+	DUMP_REG_ENTRY(AFE_APLL1_TUNER_CFG),
+	DUMP_REG_ENTRY(AFE_APLL2_TUNER_CFG),
+};
+
+static const struct afe_dump_reg_attr tdmi_in_dump_regs[] = {
+	DUMP_REG_ENTRY(AUDIO_TOP_CON0),
+	DUMP_REG_ENTRY(AFE_DAC_CON0),
+	DUMP_REG_ENTRY(AFE_CONN_TDMIN_CON),
+	DUMP_REG_ENTRY(AFE_HDMI_IN_2CH_BASE),
+	DUMP_REG_ENTRY(AFE_HDMI_IN_2CH_CUR),
+	DUMP_REG_ENTRY(AFE_HDMI_IN_2CH_END),
+	DUMP_REG_ENTRY(AFE_TDM_IN_CON1),
+	DUMP_REG_ENTRY(AFE_HDMI_IN_2CH_CON0),
+	DUMP_REG_ENTRY(AFE_IRQ_MCU_CON2),
+	DUMP_REG_ENTRY(AFE_IRQ_CNT10),
+	DUMP_REG_ENTRY(AFE_MEMIF_PBUF2_SIZE),
+	DUMP_REG_ENTRY(AFE_APLL1_TUNER_CFG),
+	DUMP_REG_ENTRY(AFE_APLL2_TUNER_CFG),
+};
+
+static ssize_t mt8167_afe_read_file(struct file *file, char __user *user_buf,
+	size_t count, loff_t *pos)
+{
+	struct mtk_afe *afe = file->private_data;
+	ssize_t ret, i;
+	char *buf;
+	unsigned int reg_value;
+	int n = 0;
+
+	if (*pos < 0 || !count)
+		return -EINVAL;
+
+	buf = kmalloc(count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mt8167_afe_enable_main_clk(afe);
+
+	for (i = 0; i < ARRAY_SIZE(afe_dump_regs); i++) {
+		if (regmap_read(afe->regmap, afe_dump_regs[i].offset, &reg_value))
+			n += scnprintf(buf + n, count - n, "%s = N/A\n",
+				       afe_dump_regs[i].name);
+		else
+			n += scnprintf(buf + n, count - n, "%s = 0x%x\n",
+				       afe_dump_regs[i].name, reg_value);
+	}
+
+	mt8167_afe_disable_main_clk(afe);
+
+	ret = simple_read_from_buffer(user_buf, count, pos, buf, n);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static ssize_t mt8167_afe_write_file(struct file *file, const char __user *user_buf,
+	size_t count, loff_t *pos)
+{
+	char buf[64];
+	size_t buf_size;
+	char *start = buf;
+	char *reg_str;
+	char *value_str;
+	const char delim[] = " ,";
+	unsigned long reg, value;
+	struct mtk_afe *afe = file->private_data;
+
+	buf_size = min(count, (sizeof(buf) - 1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = 0;
+
+	reg_str = strsep(&start, delim);
+	if (!reg_str || !strlen(reg_str))
+		return -EINVAL;
+
+	value_str = strsep(&start, delim);
+	if (!value_str || !strlen(value_str))
+		return -EINVAL;
+
+	if (kstrtoul(reg_str, 16, &reg))
+		return -EINVAL;
+
+	if (kstrtoul(value_str, 16, &value))
+		return -EINVAL;
+
+	mt8167_afe_enable_main_clk(afe);
+
+	regmap_write(afe->regmap, reg, value);
+
+	mt8167_afe_disable_main_clk(afe);
+
+	return buf_size;
+}
+
+static ssize_t mt8167_afe_hdmi_read_file(struct file *file, char __user *user_buf,
+				size_t count, loff_t *pos)
+{
+	struct mtk_afe *afe = file->private_data;
+	ssize_t ret, i;
+	char *buf;
+	unsigned int reg_value;
+	int n = 0;
+
+	if (*pos < 0 || !count)
+		return -EINVAL;
+
+	buf = kmalloc(count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mt8167_afe_enable_main_clk(afe);
+
+	for (i = 0; i < ARRAY_SIZE(hdmi_dump_regs); i++) {
+		if (regmap_read(afe->regmap, hdmi_dump_regs[i].offset, &reg_value))
+			n += scnprintf(buf + n, count - n, "%s = N/A\n",
+				       hdmi_dump_regs[i].name);
+		else
+			n += scnprintf(buf + n, count - n, "%s = 0x%x\n",
+				       hdmi_dump_regs[i].name, reg_value);
+	}
+
+	mt8167_afe_disable_main_clk(afe);
+
+	ret = simple_read_from_buffer(user_buf, count, pos, buf, n);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static ssize_t mt8167_afe_tdm_in_read_file(struct file *file, char __user *user_buf,
+				size_t count, loff_t *pos)
+{
+	struct mtk_afe *afe = file->private_data;
+	ssize_t ret, i;
+	char *buf;
+	unsigned int reg_value;
+	int n = 0;
+
+	if (*pos < 0 || !count)
+		return -EINVAL;
+
+	buf = kmalloc(count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mt8167_afe_enable_main_clk(afe);
+
+	for (i = 0; i < ARRAY_SIZE(tdmi_in_dump_regs); i++) {
+		if (regmap_read(afe->regmap, tdmi_in_dump_regs[i].offset, &reg_value))
+			n += scnprintf(buf + n, count - n, "%s = N/A\n",
+				       tdmi_in_dump_regs[i].name);
+		else
+			n += scnprintf(buf + n, count - n, "%s = 0x%x\n",
+				       tdmi_in_dump_regs[i].name, reg_value);
+	}
+
+	mt8167_afe_disable_main_clk(afe);
+
+	ret = simple_read_from_buffer(user_buf, count, pos, buf, n);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static const struct file_operations mt8167_afe_fops = {
+	.open = simple_open,
+	.read = mt8167_afe_read_file,
+	.write = mt8167_afe_write_file,
+	.llseek = default_llseek,
+};
+
+static const struct file_operations mt8167_afe_hdmi_fops = {
+	.open = simple_open,
+	.read = mt8167_afe_hdmi_read_file,
+	.llseek = default_llseek,
+};
+
+static const struct file_operations mt8167_afe_tdm_in_fops = {
+	.open = simple_open,
+	.read = mt8167_afe_tdm_in_read_file,
+	.llseek = default_llseek,
+};
+
+static const struct mt8167_afe_debug_fs afe_debug_fs[MT8167_AFE_DEBUGFS_NUM] = {
+	{"mtksocaudio", &mt8167_afe_fops},
+	{"mtksochdmiaudio", &mt8167_afe_hdmi_fops},
+	{"mtksoctdminaudio", &mt8167_afe_tdm_in_fops},
+};
+
+#endif
+
+void mt8167_afe_init_debugfs(struct mtk_afe *afe)
+{
+#ifdef CONFIG_DEBUG_FS
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(afe_debug_fs); i++) {
+		afe->debugfs_dentry[i] = debugfs_create_file(afe_debug_fs[i].fs_name,
+							  0644, NULL, afe,
+							  afe_debug_fs[i].fops);
+		if (!afe->debugfs_dentry[i])
+			dev_warn(afe->dev, "%s failed to create %s debugfs file\n",
+				 __func__, afe_debug_fs[i].fs_name);
+	}
+#endif
+}
+
+void mt8167_afe_cleanup_debugfs(struct mtk_afe *afe)
+{
+#ifdef CONFIG_DEBUG_FS
+	int i;
+
+	if (!afe)
+		return;
+
+	for (i = 0; i < MT8167_AFE_DEBUGFS_NUM; i++)
+		debugfs_remove(afe->debugfs_dentry[i]);
+#endif
+}
diff --git a/sound/soc/mediatek/mt8167/mt8167-afe-debug.h b/sound/soc/mediatek/mt8167/mt8167-afe-debug.h
new file mode 100644
index 0000000..9282185
--- /dev/null
+++ b/sound/soc/mediatek/mt8167/mt8167-afe-debug.h
@@ -0,0 +1,27 @@
+/*
+ * mtk-afe-debug.h  --  Mediatek audio debug function
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#ifndef __MT_AFE_DEBUG_H__
+#define __MT_AFE_DEBUG_H__
+
+struct mtk_afe;
+
+
+void mt8167_afe_init_debugfs(struct mtk_afe *afe);
+
+void mt8167_afe_cleanup_debugfs(struct mtk_afe *afe);
+
+#endif
diff --git a/sound/soc/mediatek/mt8167/mt8167-afe-pcm.c b/sound/soc/mediatek/mt8167/mt8167-afe-pcm.c
new file mode 100644
index 0000000..232b0b2
--- /dev/null
+++ b/sound/soc/mediatek/mt8167/mt8167-afe-pcm.c
@@ -0,0 +1,3691 @@
+/*
+ * Mediatek ALSA SoC AFE platform driver
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+#include "mt8167-afe-common.h"
+#include "mt8167-afe-regs.h"
+#include "mt8167-afe-util.h"
+#include "mt8167-afe-controls.h"
+#include "mt8167-afe-debug.h"
+
+#define MT8167_I2S0_MCLK_MULTIPLIER 256
+#define MT8167_I2S1_MCLK_MULTIPLIER 256
+#define MT8167_I2S2_MCLK_MULTIPLIER 256
+#define MT8167_I2S3_MCLK_MULTIPLIER 256
+#define MT8167_HDMI_OUT_MCLK_MULTIPLIER 64
+#define MT8167_TDM_OUT_MCLK_MULTIPLIER 256
+#define MT8167_TDM_IN_MCLK_MULTIPLIER 256
+
+#define LRCK_CYCLE_INVALID   ((unsigned int)-1)
+
+#define AFE_PCM_NAME	"mtk-afe-pcm"
+
+static const unsigned int mt8167_afe_backup_list[] = {
+	AUDIO_TOP_CON0,
+	AUDIO_TOP_CON3,
+	AFE_CONN0,
+	AFE_CONN1,
+	AFE_CONN2,
+	AFE_CONN3,
+	AFE_CONN5,
+	AFE_CONN_24BIT,
+	AFE_APLL1_TUNER_CFG,
+	AFE_APLL2_TUNER_CFG,
+	AFE_I2S_CON,
+	AFE_I2S_CON1,
+	AFE_I2S_CON2,
+	AFE_I2S_CON3,
+	AFE_ADDA_PREDIS_CON0,
+	AFE_ADDA_PREDIS_CON1,
+	AFE_ADDA_DL_SRC2_CON0,
+	AFE_ADDA_DL_SRC2_CON1,
+	AFE_ADDA_UL_SRC_CON0,
+	AFE_ADDA_UL_SRC_CON0,
+	AFE_ADDA_NEWIF_CFG1,
+	AFE_ADDA_TOP_CON0,
+	AFE_ADDA_UL_DL_CON0,
+	AFE_GAIN1_CON0,
+	AFE_GAIN1_CON1,
+	AFE_GAIN1_CUR,
+	AFE_MEMIF_PBUF_SIZE,
+	AFE_MEMIF_PBUF2_SIZE,
+	AFE_DAC_CON0,
+	AFE_DAC_CON1,
+	AFE_DL1_BASE,
+	AFE_DL1_END,
+	AFE_DL2_BASE,
+	AFE_DL2_END,
+	AFE_VUL_BASE,
+	AFE_VUL_END,
+	AFE_AWB_BASE,
+	AFE_AWB_END,
+	AFE_DAI_BASE,
+	AFE_DAI_END,
+	AFE_HDMI_OUT_BASE,
+	AFE_HDMI_OUT_END,
+	AFE_HDMI_IN_2CH_BASE,
+	AFE_HDMI_IN_2CH_END,
+	AFE_TDM_CON1,
+	AFE_TDM_CON2,
+	AFE_HDMI_OUT_CON0,
+	AFE_TDM_IN_CON1,
+};
+
+static const struct snd_pcm_hardware mt8167_afe_hardware = {
+	.info = SNDRV_PCM_INFO_MMAP |
+		SNDRV_PCM_INFO_INTERLEAVED |
+		SNDRV_PCM_INFO_RESUME |
+		SNDRV_PCM_INFO_MMAP_VALID,
+	.buffer_bytes_max = 1024 * 1024,
+	.period_bytes_min = 256,
+	.period_bytes_max = 512 * 1024,
+	.periods_min = 2,
+	.periods_max = 256,
+	.fifo_size = 0,
+};
+
+static unsigned int channels_2_4_6_8[] = {
+	2, 4, 6, 8
+};
+
+
+static unsigned int rate_2nd_i2s_slave[] = {
+	8000, 11025, 16000, 22050,32000, 44100, 48000
+};
+
+static struct snd_pcm_hw_constraint_list constraints_rate_2nd_i2s_slave = {
+	.count = ARRAY_SIZE(rate_2nd_i2s_slave),
+	.list = rate_2nd_i2s_slave,
+	.mask = 0,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_channels_tdm_in = {
+	.count = ARRAY_SIZE(channels_2_4_6_8),
+	.list = channels_2_4_6_8,
+	.mask = 0,
+};
+
+static unsigned int mt8167_afe_tdm_ch_fixup(unsigned int channels)
+{
+	if (channels > 4)
+		return 8;
+	else if (channels > 2)
+		return 4;
+	else
+		return 2;
+}
+
+static unsigned int mt8167_afe_tdm_out_ch_per_sdata(unsigned int mode,
+	unsigned int channels)
+{
+	if (mode == MT8167_AFE_TDM_OUT_TDM)
+		return mt8167_afe_tdm_ch_fixup(channels);
+	else
+		return 2;
+}
+
+static int mt8167_afe_tdm_out_bitwidth_fixup(unsigned int mode,
+	int bitwidth)
+{
+	if (mode == MT8167_AFE_TDM_OUT_HDMI ||
+	    mode == MT8167_AFE_TDM_OUT_I2S_32BITS ||
+	    bitwidth == 24)
+		return 32;
+	else
+		return bitwidth;
+}
+
+static snd_pcm_uframes_t mt8167_afe_pcm_pointer
+			 (struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_component *component =
+		snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(component);
+	struct mt8167_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+	unsigned int hw_ptr;
+	int ret;
+
+	ret = regmap_read(afe->regmap, memif->data->reg_ofs_cur, &hw_ptr);
+	if (ret || hw_ptr == 0) {
+		dev_err(afe->dev, "%s hw_ptr err ret = %d\n", __func__, ret);
+		hw_ptr = memif->phys_buf_addr;
+	} else if (memif->use_sram) {
+		/* enforce natural alignment to 8 bytes */
+		hw_ptr &= ~7;
+	}
+
+	return bytes_to_frames(substream->runtime,
+			       hw_ptr - memif->phys_buf_addr);
+}
+
+
+static const struct snd_pcm_ops mt8167_afe_pcm_ops = {
+	.ioctl = snd_pcm_lib_ioctl,
+	.pointer = mt8167_afe_pcm_pointer,
+};
+
+static int mt8167_afe_pcm_probe(struct snd_soc_component *component)
+{
+	return mt8167_afe_add_controls(component);
+}
+
+static int mt8167_afe_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+	struct snd_pcm *pcm = rtd->pcm;
+	struct snd_soc_component *component =
+		snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
+	struct mtk_afe *afe = snd_soc_component_get_drvdata(component);
+	size_t size = afe->memif[rtd->cpu_dai->id].data->prealloc_size;
+	struct snd_pcm_substream *substream;
+	int stream;
+
+	for (stream = 0; stream < 2; stream++) {
+		substream = pcm->streams[stream].substream;
+		if (substream) {
+			struct snd_dma_buffer *buf = &substream->dma_buffer;
+
+			buf->dev.type = SNDRV_DMA_TYPE_DEV;
+			buf->dev.dev = card->dev;
+			buf->private_data = NULL;
+		}
+	}
+
+	if (size > 0)
+		snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+						      card->dev, size, size);
+
+	return 0;
+}
+
+static void mt8167_afe_pcm_free(struct snd_pcm *pcm)
+{
+	snd_pcm_lib_preallocate_free_for_all(pcm);
+}
+
+static const struct snd_soc_component_driver mt8167_afe_pcm_platform = {
+	.name = AFE_PCM_NAME,
+	.probe = mt8167_afe_pcm_probe,
+	.pcm_new = mt8167_afe_pcm_new,
+	.pcm_free = mt8167_afe_pcm_free,
+	.ops = &mt8167_afe_pcm_ops,
+};
+
+struct mt8167_afe_rate {
+	unsigned int rate;
+	unsigned int regvalue;
+};
+
+static const struct mt8167_afe_rate mt8167_afe_i2s_rates[] = {
+	{ .rate = 8000, .regvalue = 0 },
+	{ .rate = 11025, .regvalue = 1 },
+	{ .rate = 12000, .regvalue = 2 },
+	{ .rate = 16000, .regvalue = 4 },
+	{ .rate = 22050, .regvalue = 5 },
+	{ .rate = 24000, .regvalue = 6 },
+	{ .rate = 32000, .regvalue = 8 },
+	{ .rate = 44100, .regvalue = 9 },
+	{ .rate = 48000, .regvalue = 10 },
+	{ .rate = 88000, .regvalue = 11 },
+	{ .rate = 96000, .regvalue = 12 },
+	{ .rate = 176400, .regvalue = 13 },
+	{ .rate = 192000, .regvalue = 14 },
+};
+
+struct mt8167_afe_asrc_info {
+	unsigned int rate_in;
+	unsigned int rate_out;
+	unsigned int freq_in;
+	unsigned int freq_out;
+	unsigned int fc_rstth_high;
+	unsigned int fc_rstth_low;
+	unsigned int fc_demtr;
+};
+
+static const struct mt8167_afe_asrc_info mt8167_afe_asrc_settings[] = {
+	/* 8Khz => 8Khz */
+	{ .rate_in = 8000, .rate_out = 8000,
+	  .freq_in = 0x50000, .freq_out = 0x50000,
+	  .fc_rstth_high = 0x37dc0, .fc_rstth_low = 0x2db40,
+	  .fc_demtr = 0x1fbd },
+	/* 11.025Khz => 11.025Khz */
+	{ .rate_in = 11025, .rate_out = 11025,
+	  .freq_in = 0x6e400, .freq_out = 0x6e400,
+	  .fc_rstth_high = 0x28886, .fc_rstth_low = 0x2129c,
+	  .fc_demtr = 0x1fbd },
+	/* 16Khz =>16Khz */
+	{ .rate_in = 16000, .rate_out = 16000,
+	  .freq_in = 0xa0000, .freq_out = 0xa0000,
+	  .fc_rstth_high = 0x1bee0, .fc_rstth_low = 0x16da0,
+	  .fc_demtr = 0x1fbd },
+	/* 22.05Khz =>22.05Khz */
+	{ .rate_in = 22050, .rate_out = 22050,
+	  .freq_in = 0xdc800, .freq_out = 0xdc800,
+	  .fc_rstth_high = 0x14443, .fc_rstth_low = 0x1094e,
+	  .fc_demtr = 0x1fbd },
+	/* 32Khz => 32Khz */
+	{ .rate_in = 32000, .rate_out = 32000,
+	  .freq_in = 0x140000, .freq_out = 0x140000,
+	  .fc_rstth_high = 0xd800, .fc_rstth_low = 0xbd00,
+	  .fc_demtr = 0x1fbd },
+	/* 44.1Khz => 44.1Khz */
+	{ .rate_in = 44100, .rate_out = 44100,
+	  .freq_in = 0x1b9000, .freq_out = 0x1b9000,
+	  .fc_rstth_high = 0x9c00, .fc_rstth_low = 0x8b00,
+	  .fc_demtr = 0x1fbd },
+	/* 48Khz =>48Khz */
+	{ .rate_in = 48000, .rate_out = 48000,
+	  .freq_in = 0x1e0000, .freq_out = 0x1e0000,
+	  .fc_rstth_high = 0x8f00, .fc_rstth_low = 0x7f00,
+	  .fc_demtr = 0x1fbd },
+};
+
+static int mt8167_afe_asrc_fs(unsigned int rate_in, unsigned int rate_out)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mt8167_afe_asrc_settings); i++)
+		if ((mt8167_afe_asrc_settings[i].rate_in == rate_in) &&
+			(mt8167_afe_asrc_settings[i].rate_out == rate_out))
+			return i;
+
+	return -EINVAL;
+}
+
+static int mt8167_afe_i2s_fs(unsigned int sample_rate)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mt8167_afe_i2s_rates); i++)
+		if (mt8167_afe_i2s_rates[i].rate == sample_rate)
+			return mt8167_afe_i2s_rates[i].regvalue;
+
+	return -EINVAL;
+}
+
+static int mt8167_afe_set_i2s_out(struct mtk_afe *afe, unsigned int rate,
+	int bit_width)
+{
+	unsigned int val;
+	int fs = mt8167_afe_i2s_fs(rate);
+
+	if (fs < 0)
+		return -EINVAL;
+
+	val = AFE_I2S_CON1_I2S2_TO_PAD |
+	      AFE_I2S_CON1_LOW_JITTER_CLK |
+	      AFE_I2S_CON1_RATE(fs) |
+	      AFE_I2S_CON1_FORMAT_I2S;
+
+	if (bit_width > 16)
+		val |= AFE_I2S_CON1_WLEN_32BIT;
+
+	regmap_update_bits(afe->regmap, AFE_I2S_CON1, ~(u32)AFE_I2S_CON1_EN, val);
+
+	return 0;
+}
+
+static int mt8167_afe_set_2nd_i2s_out(struct mtk_afe *afe, unsigned int rate,
+	int bit_width)
+{
+	unsigned int val;
+	int fs = mt8167_afe_i2s_fs(rate);
+
+	if (fs < 0)
+		return -EINVAL;
+
+	val = AFE_I2S_CON3_LOW_JITTER_CLK |
+	      AFE_I2S_CON3_RATE(fs) |
+	      AFE_I2S_CON3_FORMAT_I2S;
+
+	if (bit_width > 16)
+		val |= AFE_I2S_CON3_WLEN_32BIT;
+
+	regmap_update_bits(afe->regmap, AFE_I2S_CON3, ~(u32)AFE_I2S_CON3_EN, val);
+
+	return 0;
+}
+
+static int mt8167_afe_set_i2s_in(struct mtk_afe *afe, unsigned int rate,
+	int bit_width)
+{
+	unsigned int val;
+	int fs = mt8167_afe_i2s_fs(rate);
+
+	if (fs < 0)
+		return -EINVAL;
+
+	val = AFE_I2S_CON2_LOW_JITTER_CLK |
+	      AFE_I2S_CON2_RATE(fs) |
+	      AFE_I2S_CON2_FORMAT_I2S;
+
+	if (bit_width > 16)
+		val |= AFE_I2S_CON2_WLEN_32BIT;
+
+	regmap_update_bits(afe->regmap, AFE_I2S_CON2, ~(u32)AFE_I2S_CON2_EN, val);
+
+	regmap_update_bits(afe->regmap, AFE_ADDA_TOP_CON0, 0x1, 0x1);
+
+	return 0;
+}
+
+static int mt8167_afe_set_2nd_i2s_asrc(struct mtk_afe *afe, unsigned int rate_in,
+	unsigned int rate_out, unsigned int width, unsigned int mono)
+{
+	int id = 0;
+	unsigned int val = 0;
+	unsigned int mask = 0;
+
+	id = mt8167_afe_asrc_fs(rate_in, rate_out);
+	if (id < 0)
+		return -EINVAL;
+
+	if (width == 16)
+		val |= AFE_ASRC_CON13_16BIT;
+	if (mono)
+		val |= AFE_ASRC_CON13_MONO;
+
+	regmap_update_bits(afe->regmap, AFE_ASRC_CON13,
+		AFE_ASRC_CON13_16BIT | AFE_ASRC_CON13_MONO,
+		val);
+
+	regmap_write(afe->regmap,
+		AFE_ASRC_CON14,
+		mt8167_afe_asrc_settings[id].freq_out);
+	regmap_write(afe->regmap,
+		AFE_ASRC_CON15,
+		mt8167_afe_asrc_settings[id].freq_in);
+
+	val = AFE_ASRC_CON16_FC2_CYCLE(64) |
+		AFE_ASRC_CON16_FC2_AUTO_RST |
+		AFE_ASRC_CON16_TUNE_FREQ5 |
+		AFE_ASRC_CON16_COMP_FREQ_EN |
+		AFE_ASRC_CON16_FC2_I2S_IN |
+		AFE_ASRC_CON16_FC2_DGL_BYPASS |
+		AFE_ASRC_CON16_FC2_AUTO_RESTART |
+		AFE_ASRC_CON16_FC2_FREQ |
+		AFE_ASRC_CON16_FC2_EN;
+	mask = AFE_ASRC_CON16_FC2_CYCLE_MASK |
+		AFE_ASRC_CON16_FC2_AUTO_RST |
+		AFE_ASRC_CON16_TUNE_FREQ5 |
+		AFE_ASRC_CON16_COMP_FREQ_EN |
+		AFE_ASRC_CON16_FC2_SEL |
+		AFE_ASRC_CON16_FC2_DGL_BYPASS |
+		AFE_ASRC_CON16_FC2_AUTO_RESTART |
+		AFE_ASRC_CON16_FC2_FREQ |
+		AFE_ASRC_CON16_FC2_EN;
+	regmap_update_bits(afe->regmap, AFE_ASRC_CON16,
+		mask, val);
+
+	regmap_write(afe->regmap,
+		AFE_ASRC_CON17,
+		mt8167_afe_asrc_settings[id].fc_demtr);
+	regmap_write(afe->regmap,
+		AFE_ASRC_CON20,
+		mt8167_afe_asrc_settings[id].fc_rstth_high);
+	regmap_write(afe->regmap,
+		AFE_ASRC_CON21,
+		mt8167_afe_asrc_settings[id].fc_rstth_low);
+
+	val = AFE_ASRC_CON0_CLR_TX |
+		AFE_ASRC_CON0_CLR_RX |
+		AFE_ASRC_CON0_CLR_I2S;
+	regmap_update_bits(afe->regmap, AFE_ASRC_CON0,
+		AFE_ASRC_CON0_STR_CLR_MASK, val);
+	return 0;
+}
+
+static int mt8167_afe_set_2nd_i2s_asrc_enable(struct mtk_afe *afe, bool enable)
+{
+	if (enable)
+		regmap_update_bits(afe->regmap, AFE_ASRC_CON0,
+			AFE_ASRC_CON0_ASM_ON, AFE_ASRC_CON0_ASM_ON);
+	else
+		regmap_update_bits(afe->regmap, AFE_ASRC_CON0,
+			AFE_ASRC_CON0_ASM_ON, 0);
+	return 0;
+}
+
+static int mt8167_afe_set_2nd_i2s_in(struct mtk_afe *afe, unsigned int rate,
+	int bit_width)
+{
+	unsigned int val;
+	int fs = mt8167_afe_i2s_fs(rate);
+	struct mt8167_afe_be_dai_data *be = &afe->be_data[MT8167_AFE_IO_2ND_I2S - MT8167_AFE_BACKEND_BASE];
+
+	if (fs < 0)
+		return -EINVAL;
+
+	regmap_update_bits(afe->regmap, AFE_DAC_CON1, 0xf << 8, fs << 8);
+
+	val = AFE_I2S_CON_PHASE_SHIFT_FIX |
+	      AFE_I2S_CON_FROM_IO_MUX |
+	      AFE_I2S_CON_LOW_JITTER_CLK;
+
+	if ((be->fmt_mode & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_I2S)
+		val |= AFE_I2S_CON_FORMAT_I2S;
+
+	switch (be->fmt_mode & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_IB_IF:
+		val |= AFE_I2S_CON_LRCK_INV;
+		val |= AFE_I2S_CON_BCK_INV;
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		val |= AFE_I2S_CON_LRCK_INV;
+		break;
+	case SND_SOC_DAIFMT_IB_NF:
+		val |= AFE_I2S_CON_BCK_INV;
+		break;
+	default:
+		break;
+	}
+
+	if ((be->fmt_mode & SND_SOC_DAIFMT_MASTER_MASK) == SND_SOC_DAIFMT_CBM_CFM)
+		val |= AFE_I2S_CON_SRC_SLAVE;
+
+	if (bit_width > 16)
+		val |= AFE_I2S_CON_WLEN_32BIT;
+
+	regmap_update_bits(afe->regmap, AFE_I2S_CON, ~(u32)AFE_I2S_CON_EN, val);
+
+	return 0;
+}
+
+static void mt8167_afe_set_i2s_out_enable(struct mtk_afe *afe, bool enable)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&afe->afe_ctrl_lock, flags);
+
+	if (enable) {
+		afe->i2s_out_on_ref_cnt++;
+		if (afe->i2s_out_on_ref_cnt == 1)
+			regmap_update_bits(afe->regmap, AFE_I2S_CON1, 0x1, enable);
+	} else {
+		afe->i2s_out_on_ref_cnt--;
+		if (afe->i2s_out_on_ref_cnt == 0)
+			regmap_update_bits(afe->regmap, AFE_I2S_CON1, 0x1, enable);
+		else if (afe->i2s_out_on_ref_cnt < 0)
+			afe->i2s_out_on_ref_cnt = 0;
+	}
+
+	spin_unlock_irqrestore(&afe->afe_ctrl_lock, flags);
+}
+
+static void mt8167_afe_set_2nd_i2s_out_enable(struct mtk_afe *afe, bool enable)
+{
+	regmap_update_bits(afe->regmap, AFE_I2S_CON3, 0x1, enable);
+}
+
+static void mt8167_afe_set_i2s_in_enable(struct mtk_afe *afe, bool enable)
+{
+	regmap_update_bits(afe->regmap, AFE_I2S_CON2, 0x1, enable);
+}
+
+static void mt8167_afe_set_2nd_i2s_in_enable(struct mtk_afe *afe, bool enable)
+{
+	regmap_update_bits(afe->regmap, AFE_I2S_CON, 0x1, enable);
+}
+
+static int mt8167_afe_enable_adda_on(struct mtk_afe *afe)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&afe->afe_ctrl_lock, flags);
+
+	afe->adda_afe_on_ref_cnt++;
+	if (afe->adda_afe_on_ref_cnt == 1)
+		regmap_update_bits(afe->regmap, AFE_ADDA_UL_DL_CON0, 0x1, 0x1);
+
+	spin_unlock_irqrestore(&afe->afe_ctrl_lock, flags);
+
+	return 0;
+}
+
+static int mt8167_afe_disable_adda_on(struct mtk_afe *afe)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&afe->afe_ctrl_lock, flags);
+
+	afe->adda_afe_on_ref_cnt--;
+	if (afe->adda_afe_on_ref_cnt == 0)
+		regmap_update_bits(afe->regmap, AFE_ADDA_UL_DL_CON0, 0x1, 0x0);
+	else if (afe->adda_afe_on_ref_cnt < 0)
+		afe->adda_afe_on_ref_cnt = 0;
+
+	spin_unlock_irqrestore(&afe->afe_ctrl_lock, flags);
+
+	return 0;
+}
+
+static int mt8167_afe_set_adda_out(struct mtk_afe *afe, unsigned int rate)
+{
+	unsigned int val = 0;
+
+	switch (rate) {
+	case 8000:
+		val |= (0 << 28) | AFE_ADDA_DL_VOICE_DATA;
+		break;
+	case 11025:
+		val |= 1 << 28;
+		break;
+	case 12000:
+		val |= 2 << 28;
+		break;
+	case 16000:
+		val |= (3 << 28) | AFE_ADDA_DL_VOICE_DATA;
+		break;
+	case 22050:
+		val |= 4 << 28;
+		break;
+	case 24000:
+		val |= 5 << 28;
+		break;
+	case 32000:
+		val |= 6 << 28;
+		break;
+	case 44100:
+		val |= 7 << 28;
+		break;
+	case 48000:
+		val |= 8 << 28;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	val |= AFE_ADDA_DL_8X_UPSAMPLE |
+	       AFE_ADDA_DL_MUTE_OFF |
+	       AFE_ADDA_DL_DEGRADE_GAIN;
+
+	regmap_update_bits(afe->regmap, AFE_ADDA_PREDIS_CON0, 0xffffffff, 0);
+	regmap_update_bits(afe->regmap, AFE_ADDA_PREDIS_CON1, 0xffffffff, 0);
+	regmap_update_bits(afe->regmap, AFE_ADDA_DL_SRC2_CON0, 0xffffffff, val);
+
+	regmap_update_bits(afe->regmap, AFE_ADDA_DL_SRC2_CON1, 0xffffffff, 0xf74f0000);
+
+	return 0;
+}
+
+static int mt8167_afe_set_adda_in(struct mtk_afe *afe, unsigned int rate)
+{
+	unsigned int val = 0;
+	unsigned int val2 = 0;
+
+	switch (rate) {
+	case 8000:
+		val |= (0 << 17) | (0 << 19);
+		val2 |= 1 << 10;
+		break;
+	case 16000:
+		val |= (1 << 17) | (1 << 19);
+		val2 |= 1 << 10;
+		break;
+	case 32000:
+		val |= (2 << 17) | (2 << 19);
+		val2 |= 1 << 10;
+		break;
+	case 48000:
+		val |= (3 << 17) | (3 << 19);
+		val2 |= 3 << 10;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	regmap_update_bits(afe->regmap, AFE_ADDA_UL_SRC_CON0, 0x001e0000, val);
+
+	regmap_update_bits(afe->regmap, AFE_ADDA_NEWIF_CFG1, 0xc00, val2);
+
+	regmap_update_bits(afe->regmap, AFE_ADDA_TOP_CON0, 0x1, 0x0);
+
+	return 0;
+}
+
+static void mt8167_afe_set_adda_out_enable(struct mtk_afe *afe, bool enable)
+{
+	regmap_update_bits(afe->regmap, AFE_ADDA_DL_SRC2_CON0, 0x1, enable);
+
+	if (enable)
+		mt8167_afe_enable_adda_on(afe);
+	else
+		mt8167_afe_disable_adda_on(afe);
+}
+
+static void mt8167_afe_set_adda_in_enable(struct mtk_afe *afe, bool enable)
+{
+	regmap_update_bits(afe->regmap, AFE_ADDA_UL_SRC_CON0, 0x1, enable);
+
+	if (enable)
+		mt8167_afe_enable_adda_on(afe);
+	else
+		mt8167_afe_disable_adda_on(afe);
+}
+
+static int mt8167_afe_set_mrg(struct mtk_afe *afe, unsigned int rate)
+{
+	unsigned int val = 0;
+
+	switch (rate) {
+	case 8000:
+		val |= 0 << 9;
+		break;
+	case 16000:
+		val |= 1 << 9;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	val |= AFE_DAIBT_CON0_USE_MRG_INPUT |
+	       AFE_DAIBT_CON0_DATA_DRY;
+
+	regmap_update_bits(afe->regmap, AFE_MRGIF_CON, 0xf00000, 9 << 20);
+	regmap_update_bits(afe->regmap, AFE_DAIBT_CON0, 0x1208, val);
+
+	return 0;
+}
+
+static void mt8167_afe_enable_mrg(struct mtk_afe *afe)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&afe->afe_ctrl_lock, flags);
+	afe->daibt_on_ref_cnt++;
+	spin_unlock_irqrestore(&afe->afe_ctrl_lock, flags);
+
+	if (afe->daibt_on_ref_cnt != 1)
+		return;
+
+	regmap_update_bits(afe->regmap, AFE_MRGIF_CON, 1 << 16, 1 << 16);
+	regmap_update_bits(afe->regmap, AFE_MRGIF_CON, 0x1, 0x1);
+
+	udelay(100);
+
+	regmap_update_bits(afe->regmap, AFE_DAIBT_CON0, 0x3, 0x3);
+}
+
+static void mt8167_afe_disable_mrg(struct mtk_afe *afe)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&afe->afe_ctrl_lock, flags);
+	afe->daibt_on_ref_cnt--;
+	if (afe->daibt_on_ref_cnt < 0)
+		afe->daibt_on_ref_cnt = 0;
+	spin_unlock_irqrestore(&afe->afe_ctrl_lock, flags);
+
+	if (afe->daibt_on_ref_cnt != 0)
+		return;
+
+	regmap_update_bits(afe->regmap, AFE_DAIBT_CON0, 0x3, 0x0);
+
+	udelay(100);
+
+	regmap_update_bits(afe->regmap, AFE_MRGIF_CON, 1 << 16, 0x0);
+	regmap_update_bits(afe->regmap, AFE_MRGIF_CON, 0x1, 0x0);
+}
+
+static int mt8167_afe_set_pcm0(struct mtk_afe *afe, unsigned int rate)
+{
+	unsigned int val = 0;
+
+	switch (rate) {
+	case 8000:
+		val |= 0 << 9;
+		break;
+	case 16000:
+		val |= 1 << 9;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	val |= AFE_DAIBT_CON0_DATA_DRY;
+
+	regmap_update_bits(afe->regmap, AFE_DAIBT_CON0, 0x1208, val);
+
+	return 0;
+}
+
+static void mt8167_afe_enable_pcm0(struct mtk_afe *afe)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&afe->afe_ctrl_lock, flags);
+	afe->daibt_on_ref_cnt++;
+	spin_unlock_irqrestore(&afe->afe_ctrl_lock, flags);
+
+	if (afe->daibt_on_ref_cnt != 1)
+		return;
+
+	regmap_update_bits(afe->regmap, AFE_DAIBT_CON0, 0x3, 0x3);
+}
+
+static void mt8167_afe_disable_pcm0(struct mtk_afe *afe)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&afe->afe_ctrl_lock, flags);
+	afe->daibt_on_ref_cnt--;
+	if (afe->daibt_on_ref_cnt < 0)
+		afe->daibt_on_ref_cnt = 0;
+	spin_unlock_irqrestore(&afe->afe_ctrl_lock, flags);
+
+	if (afe->daibt_on_ref_cnt != 0)
+		return;
+
+	regmap_update_bits(afe->regmap, AFE_DAIBT_CON0, 0x3, 0x0);
+}
+
+static int mt8167_afe_enable_irq(struct mtk_afe *afe, struct mt8167_afe_memif *memif)
+{
+	int irq_mode = memif->data->irq_mode;
+	unsigned long flags;
+
+	spin_lock_irqsave(&afe->afe_ctrl_lock, flags);
+
+	afe->irq_mode_ref_cnt[irq_mode]++;
+	if (afe->irq_mode_ref_cnt[irq_mode] > 1) {
+		spin_unlock_irqrestore(&afe->afe_ctrl_lock, flags);
+		return 0;
+	}
+
+	switch (irq_mode) {
+	case MT8167_AFE_IRQ_1:
+		regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON, 1 << 0, 1 << 0);
+		break;
+	case MT8167_AFE_IRQ_2:
+		regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON, 1 << 1, 1 << 1);
+		break;
+	case MT8167_AFE_IRQ_5:
+		regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON2, 1 << 3, 1 << 3);
+		break;
+	case MT8167_AFE_IRQ_7:
+		regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON, 1 << 14, 1 << 14);
+		break;
+	case MT8167_AFE_IRQ_10:
+		regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON2, 1 << 4, 1 << 4);
+		break;
+    case MT8167_AFE_IRQ_13:
+		regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON2, 1 << 7, 1 << 7);
+		break;
+	default:
+		break;
+	}
+
+	spin_unlock_irqrestore(&afe->afe_ctrl_lock, flags);
+
+	return 0;
+}
+
+static int mt8167_afe_disable_irq(struct mtk_afe *afe, struct mt8167_afe_memif *memif)
+{
+	int irq_mode = memif->data->irq_mode;
+	unsigned long flags;
+
+	spin_lock_irqsave(&afe->afe_ctrl_lock, flags);
+
+	afe->irq_mode_ref_cnt[irq_mode]--;
+	if (afe->irq_mode_ref_cnt[irq_mode] > 0) {
+		spin_unlock_irqrestore(&afe->afe_ctrl_lock, flags);
+		return 0;
+	} else if (afe->irq_mode_ref_cnt[irq_mode] < 0) {
+		afe->irq_mode_ref_cnt[irq_mode] = 0;
+		spin_unlock_irqrestore(&afe->afe_ctrl_lock, flags);
+		return 0;
+	}
+
+	switch (irq_mode) {
+	case MT8167_AFE_IRQ_1:
+		regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON, 1 << 0, 0 << 0);
+		regmap_write(afe->regmap, AFE_IRQ_CLR, 1 << 0);
+		break;
+	case MT8167_AFE_IRQ_2:
+		regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON, 1 << 1, 0 << 1);
+		regmap_write(afe->regmap, AFE_IRQ_CLR, 1 << 1);
+		break;
+	case MT8167_AFE_IRQ_5:
+		regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON2, 1 << 3, 0 << 3);
+		regmap_write(afe->regmap, AFE_IRQ_CLR, 1 << 4);
+		break;
+	case MT8167_AFE_IRQ_7:
+		regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON, 1 << 14, 0 << 14);
+		regmap_write(afe->regmap, AFE_IRQ_CLR, 1 << 6);
+		break;
+	case MT8167_AFE_IRQ_10:
+		regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON2, 1 << 4, 0 << 4);
+		regmap_write(afe->regmap, AFE_IRQ_CLR, 1 << 9);
+		break;
+	case MT8167_AFE_IRQ_13:
+		regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON2, 1 << 7, 0 << 7);
+		break;
+	default:
+		break;
+	}
+
+	spin_unlock_irqrestore(&afe->afe_ctrl_lock, flags);
+
+	return 0;
+}
+
+static int mt8167_afe_dais_enable_clks(struct mtk_afe *afe,
+				    struct clk *m_ck, struct clk *b_ck)
+{
+#ifdef COMMON_CLOCK_FRAMEWORK_API
+	int ret;
+
+	if (m_ck) {
+		ret = clk_prepare_enable(m_ck);
+		if (ret) {
+			dev_err(afe->dev, "Failed to enable m_ck\n");
+			return ret;
+		}
+	}
+
+	if (b_ck) {
+		ret = clk_prepare_enable(b_ck);
+		if (ret) {
+			dev_err(afe->dev, "Failed to enable b_ck\n");
+			return ret;
+		}
+	}
+#endif
+	return 0;
+}
+
+static int mt8167_afe_dais_set_clks(struct mtk_afe *afe,
+				 struct clk *m_ck, unsigned int mck_rate,
+				 struct clk *b_ck, unsigned int bck_rate)
+{
+#ifdef COMMON_CLOCK_FRAMEWORK_API
+	int ret;
+
+	if (m_ck) {
+		ret = clk_set_rate(m_ck, mck_rate);
+		if (ret) {
+			dev_err(afe->dev, "Failed to set m_ck rate\n");
+			return ret;
+		}
+	}
+
+	if (b_ck) {
+		ret = clk_set_rate(b_ck, bck_rate);
+		if (ret) {
+			dev_err(afe->dev, "Failed to set b_ck rate\n");
+			return ret;
+		}
+	}
+#endif
+	return 0;
+}
+
+static void mt8167_afe_dais_disable_clks(struct mtk_afe *afe,
+				      struct clk *m_ck, struct clk *b_ck)
+{
+#ifdef COMMON_CLOCK_FRAMEWORK_API
+	if (m_ck)
+		clk_disable_unprepare(m_ck);
+	if (b_ck)
+		clk_disable_unprepare(b_ck);
+#endif
+}
+
+static int mt8167_afe_i2s_startup(struct snd_pcm_substream *substream,
+			       struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	const unsigned int clk_mode = afe->i2s_clk_modes[MT8167_AFE_1ST_I2S];
+
+	dev_dbg(afe->dev, "%s '%s'\n",
+		__func__, snd_pcm_stream_str(substream));
+
+	if (clk_mode == MT8167_AFE_I2S_SHARED_CLOCK && dai->active)
+		return 0;
+
+	mt8167_afe_enable_main_clk(afe);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ||
+	    clk_mode == MT8167_AFE_I2S_SHARED_CLOCK)
+		mt8167_afe_dais_enable_clks(afe, afe->clocks[MT8167_CLK_APLL12_DIV1], NULL);
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE ||
+	    clk_mode == MT8167_AFE_I2S_SHARED_CLOCK)
+		mt8167_afe_dais_enable_clks(afe, afe->clocks[MT8167_CLK_APLL12_DIV2], NULL);
+
+	return 0;
+}
+
+static void mt8167_afe_i2s_shutdown(struct snd_pcm_substream *substream,
+				 struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_be_dai_data *be = &afe->be_data[dai->id - MT8167_AFE_BACKEND_BASE];
+	const unsigned int rate = substream->runtime->rate;
+	const unsigned int stream = substream->stream;
+	const unsigned int clk_mode = afe->i2s_clk_modes[MT8167_AFE_1ST_I2S];
+	const bool reset_i2s_out_change = (stream == SNDRV_PCM_STREAM_PLAYBACK) ||
+		(clk_mode == MT8167_AFE_I2S_SHARED_CLOCK);
+	const bool reset_i2s_in_change = (stream == SNDRV_PCM_STREAM_CAPTURE) ||
+		(clk_mode == MT8167_AFE_I2S_SHARED_CLOCK);
+
+	dev_dbg(afe->dev, "%s '%s'\n",
+		__func__, snd_pcm_stream_str(substream));
+
+	if (clk_mode == MT8167_AFE_I2S_SHARED_CLOCK && dai->active)
+		return;
+
+	if (be->prepared[stream]) {
+		if (reset_i2s_out_change)
+			mt8167_afe_set_i2s_out_enable(afe, false);
+
+		if (reset_i2s_in_change)
+			mt8167_afe_set_i2s_in_enable(afe, false);
+
+		if (rate % 8000)
+			mt8167_afe_disable_apll_associated_cfg(afe, MT8167_AFE_APLL1);
+		else
+			mt8167_afe_disable_apll_associated_cfg(afe, MT8167_AFE_APLL2);
+
+		if (reset_i2s_out_change)
+			be->prepared[SNDRV_PCM_STREAM_PLAYBACK] = false;
+
+		if (reset_i2s_in_change)
+			be->prepared[SNDRV_PCM_STREAM_CAPTURE] = false;
+	}
+
+	if (reset_i2s_out_change)
+		mt8167_afe_dais_disable_clks(afe, afe->clocks[MT8167_CLK_APLL12_DIV1], NULL);
+
+	if (reset_i2s_in_change)
+		mt8167_afe_dais_disable_clks(afe, afe->clocks[MT8167_CLK_APLL12_DIV2], NULL);
+
+	mt8167_afe_disable_main_clk(afe);
+}
+
+static int mt8167_afe_i2s_hw_params(struct snd_pcm_substream *substream,
+			  struct snd_pcm_hw_params *params,
+			  struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	unsigned int width_val = params_width(params) > 16 ?
+		(AFE_CONN_24BIT_O03 | AFE_CONN_24BIT_O04) : 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		regmap_update_bits(afe->regmap, AFE_CONN_24BIT,
+			   AFE_CONN_24BIT_O03 | AFE_CONN_24BIT_O04, width_val);
+
+	return 0;
+}
+
+static int mt8167_afe_i2s_prepare(struct snd_pcm_substream *substream,
+			       struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_be_dai_data *be = &afe->be_data[dai->id - MT8167_AFE_BACKEND_BASE];
+	const unsigned int rate = substream->runtime->rate;
+	const int bit_width = snd_pcm_format_width(substream->runtime->format);
+	const unsigned int stream = substream->stream;
+	const unsigned int clk_mode = afe->i2s_clk_modes[MT8167_AFE_1ST_I2S];
+	const bool apply_i2s_out_change = (stream == SNDRV_PCM_STREAM_PLAYBACK) ||
+		(clk_mode == MT8167_AFE_I2S_SHARED_CLOCK);
+	const bool apply_i2s_in_change = (stream == SNDRV_PCM_STREAM_CAPTURE) ||
+		(clk_mode == MT8167_AFE_I2S_SHARED_CLOCK);
+	int ret;
+
+	if ((clk_mode == MT8167_AFE_I2S_SHARED_CLOCK) &&
+	    (dai->playback_widget->power || dai->capture_widget->power)) {
+		dev_dbg(afe->dev, "%s '%s' widget powered(%u-%u) already\n",
+			__func__, snd_pcm_stream_str(substream),
+			dai->playback_widget->power,
+			dai->capture_widget->power);
+		return 0;
+	}
+
+	if (be->prepared[stream] &&
+		(be->cached_rate[stream] == rate) &&
+		(be->cached_format[stream] == substream->runtime->format)) {
+		dev_info(afe->dev, "%s '%s' prepared already\n",
+			 __func__, snd_pcm_stream_str(substream));
+		return 0;
+	}
+
+	if (be->prepared[stream]) {
+		if (apply_i2s_out_change)
+			mt8167_afe_set_i2s_out_enable(afe, false);
+
+		if (apply_i2s_in_change)
+			mt8167_afe_set_i2s_in_enable(afe, false);
+
+		if (be->cached_rate[stream] % 8000)
+			mt8167_afe_disable_apll_associated_cfg(afe, MT8167_AFE_APLL1);
+		else
+			mt8167_afe_disable_apll_associated_cfg(afe, MT8167_AFE_APLL2);
+	}
+
+	if (apply_i2s_out_change) {
+		ret = mt8167_afe_set_i2s_out(afe, rate, bit_width);
+		if (ret)
+			return ret;
+	}
+
+	if (apply_i2s_in_change) {
+		ret = mt8167_afe_set_i2s_in(afe, rate, bit_width);
+		if (ret)
+			return ret;
+	}
+
+	if (rate % 8000)
+		mt8167_afe_enable_apll_associated_cfg(afe, MT8167_AFE_APLL1);
+	else
+		mt8167_afe_enable_apll_associated_cfg(afe, MT8167_AFE_APLL2);
+
+	if (apply_i2s_out_change) {
+		clk_set_parent(afe->clocks[MT8167_CLK_I2S1_M_SEL], (rate % 8000) ?
+			afe->clocks[MT8167_CLK_AUD1] : afe->clocks[MT8167_CLK_AUD2]);
+
+		mt8167_afe_dais_set_clks(afe, afe->clocks[MT8167_CLK_APLL12_DIV1],
+				rate * MT8167_I2S1_MCLK_MULTIPLIER, NULL, 0);
+
+		mt8167_afe_set_i2s_out_enable(afe, true);
+
+		be->prepared[SNDRV_PCM_STREAM_PLAYBACK] = true;
+		be->cached_rate[SNDRV_PCM_STREAM_PLAYBACK] = rate;
+		be->cached_format[SNDRV_PCM_STREAM_PLAYBACK] = substream->runtime->format;
+	}
+
+	if (apply_i2s_in_change) {
+		clk_set_parent(afe->clocks[MT8167_CLK_I2S2_M_SEL], (rate % 8000) ?
+			afe->clocks[MT8167_CLK_AUD1] : afe->clocks[MT8167_CLK_AUD2]);
+
+		mt8167_afe_dais_set_clks(afe, afe->clocks[MT8167_CLK_APLL12_DIV2],
+				rate * MT8167_I2S2_MCLK_MULTIPLIER, NULL, 0);
+
+		mt8167_afe_set_i2s_in_enable(afe, true);
+
+		be->prepared[SNDRV_PCM_STREAM_CAPTURE] = true;
+		be->cached_rate[SNDRV_PCM_STREAM_CAPTURE] = rate;
+		be->cached_format[SNDRV_PCM_STREAM_CAPTURE] = substream->runtime->format;
+	}
+
+	return 0;
+}
+
+static int mt8167_afe_2nd_i2s_startup(struct snd_pcm_substream *substream,
+			       struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	const unsigned int stream = substream->stream;
+	struct snd_pcm_runtime * const runtime = substream->runtime;
+	const unsigned int clk_mode = afe->i2s_clk_modes[MT8167_AFE_2ND_I2S];
+	struct mt8167_afe_be_dai_data *be = &afe->be_data[dai->id - MT8167_AFE_BACKEND_BASE];
+	const bool i2s_in_slave = (stream == SNDRV_PCM_STREAM_CAPTURE) &&
+		((be->fmt_mode & SND_SOC_DAIFMT_MASTER_MASK) == SND_SOC_DAIFMT_CBM_CFM);
+
+	dev_dbg(afe->dev, "%s '%s'\n",
+		__func__, snd_pcm_stream_str(substream));
+
+	if (clk_mode == MT8167_AFE_I2S_SHARED_CLOCK && dai->active)
+		return 0;
+
+	if (i2s_in_slave)
+		snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+				&constraints_rate_2nd_i2s_slave);
+
+	mt8167_afe_enable_main_clk(afe);
+
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK ||
+	    clk_mode == MT8167_AFE_I2S_SHARED_CLOCK)
+		mt8167_afe_dais_enable_clks(afe, afe->clocks[MT8167_CLK_APLL12_DIV3], NULL);
+
+	if ((stream == SNDRV_PCM_STREAM_CAPTURE ||
+	    clk_mode == MT8167_AFE_I2S_SHARED_CLOCK) && !i2s_in_slave)
+		mt8167_afe_dais_enable_clks(afe, afe->clocks[MT8167_CLK_APLL12_DIV0], NULL);
+
+	if (i2s_in_slave)
+		mt8167_afe_enable_top_cg(afe, MT8167_AFE_CG_I2S);
+
+	return 0;
+}
+
+static void mt8167_afe_2nd_i2s_shutdown(struct snd_pcm_substream *substream,
+				 struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_be_dai_data *be = &afe->be_data[dai->id - MT8167_AFE_BACKEND_BASE];
+	const unsigned int rate = substream->runtime->rate;
+	const unsigned int stream = substream->stream;
+	const unsigned int clk_mode = afe->i2s_clk_modes[MT8167_AFE_2ND_I2S];
+	const bool reset_i2s_out_change = (stream == SNDRV_PCM_STREAM_PLAYBACK) ||
+		(clk_mode == MT8167_AFE_I2S_SHARED_CLOCK);
+	const bool reset_i2s_in_change = (stream == SNDRV_PCM_STREAM_CAPTURE) ||
+		(clk_mode == MT8167_AFE_I2S_SHARED_CLOCK);
+	const bool i2s_in_slave = (stream == SNDRV_PCM_STREAM_CAPTURE) &&
+		((be->fmt_mode & SND_SOC_DAIFMT_MASTER_MASK) == SND_SOC_DAIFMT_CBM_CFM);
+
+	dev_dbg(afe->dev, "%s '%s'\n",
+		__func__, snd_pcm_stream_str(substream));
+
+	if (clk_mode == MT8167_AFE_I2S_SHARED_CLOCK && dai->active)
+		return;
+
+	if (be->prepared[stream]) {
+		if (reset_i2s_out_change)
+			mt8167_afe_set_2nd_i2s_out_enable(afe, false);
+
+		if (reset_i2s_in_change) {
+			if (i2s_in_slave)
+				mt8167_afe_set_2nd_i2s_asrc_enable(afe, false);
+			mt8167_afe_set_2nd_i2s_in_enable(afe, false);
+		}
+
+		if (rate % 8000)
+			mt8167_afe_disable_apll_associated_cfg(afe, MT8167_AFE_APLL1);
+		else
+			mt8167_afe_disable_apll_associated_cfg(afe, MT8167_AFE_APLL2);
+
+		if (reset_i2s_out_change)
+			be->prepared[SNDRV_PCM_STREAM_PLAYBACK] = false;
+
+		if (reset_i2s_in_change)
+			be->prepared[SNDRV_PCM_STREAM_CAPTURE] = false;
+	}
+
+	if (reset_i2s_out_change)
+		mt8167_afe_dais_disable_clks(afe, afe->clocks[MT8167_CLK_APLL12_DIV3], NULL);
+
+	if (reset_i2s_in_change && !i2s_in_slave)
+		mt8167_afe_dais_disable_clks(afe, afe->clocks[MT8167_CLK_APLL12_DIV0], NULL);
+
+	if (i2s_in_slave)
+		mt8167_afe_disable_top_cg(afe, MT8167_AFE_CG_I2S);
+
+	mt8167_afe_disable_main_clk(afe);
+}
+
+static int mt8167_afe_2nd_i2s_hw_params(struct snd_pcm_substream *substream,
+			  struct snd_pcm_hw_params *params,
+			  struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	unsigned int width_val = params_width(params) > 16 ?
+		(AFE_CONN_24BIT_O00 | AFE_CONN_24BIT_O01) : 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		regmap_update_bits(afe->regmap, AFE_CONN_24BIT,
+			   AFE_CONN_24BIT_O00 | AFE_CONN_24BIT_O01, width_val);
+
+	return 0;
+}
+
+static int mt8167_afe_2nd_i2s_prepare(struct snd_pcm_substream *substream,
+			       struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_be_dai_data *be = &afe->be_data[dai->id - MT8167_AFE_BACKEND_BASE];
+	const unsigned int rate = substream->runtime->rate;
+	const int bit_width = snd_pcm_format_width(substream->runtime->format);
+	const unsigned int stream = substream->stream;
+	const unsigned int clk_mode = afe->i2s_clk_modes[MT8167_AFE_2ND_I2S];
+	const bool apply_i2s_out_change = (stream == SNDRV_PCM_STREAM_PLAYBACK) ||
+		(clk_mode == MT8167_AFE_I2S_SHARED_CLOCK);
+	const bool apply_i2s_in_change = (stream == SNDRV_PCM_STREAM_CAPTURE) ||
+		(clk_mode == MT8167_AFE_I2S_SHARED_CLOCK);
+	const bool i2s_in_slave = (stream == SNDRV_PCM_STREAM_CAPTURE) &&
+		((be->fmt_mode & SND_SOC_DAIFMT_MASTER_MASK) == SND_SOC_DAIFMT_CBM_CFM);
+	int ret;
+
+	if ((clk_mode == MT8167_AFE_I2S_SHARED_CLOCK) &&
+	    (dai->playback_widget->power || dai->capture_widget->power)) {
+		dev_dbg(afe->dev, "%s '%s' widget powered(%u-%u) already\n",
+			__func__, snd_pcm_stream_str(substream),
+			dai->playback_widget->power,
+			dai->capture_widget->power);
+		return 0;
+	}
+
+	if (be->prepared[stream] &&
+		(be->cached_rate[stream] == rate) &&
+		(be->cached_format[stream] == substream->runtime->format)) {
+		dev_info(afe->dev, "%s '%s' prepared already\n",
+			 __func__, snd_pcm_stream_str(substream));
+		return 0;
+	}
+	if (be->prepared[stream]) {
+		if (apply_i2s_out_change)
+			mt8167_afe_set_2nd_i2s_out_enable(afe, false);
+
+		if (apply_i2s_in_change) {
+			if (i2s_in_slave)
+				mt8167_afe_set_2nd_i2s_asrc_enable(afe, false);
+			mt8167_afe_set_2nd_i2s_in_enable(afe, false);
+		}
+
+		if (be->cached_rate[stream] % 8000)
+			mt8167_afe_disable_apll_associated_cfg(afe, MT8167_AFE_APLL1);
+		else
+			mt8167_afe_disable_apll_associated_cfg(afe, MT8167_AFE_APLL2);
+	}
+
+	if (apply_i2s_out_change) {
+		ret = mt8167_afe_set_2nd_i2s_out(afe, rate, bit_width);
+		if (ret)
+			return ret;
+	}
+
+	if (apply_i2s_in_change) {
+		if ((be->fmt_mode & SND_SOC_DAIFMT_MASTER_MASK) == SND_SOC_DAIFMT_CBM_CFM) {
+			ret = mt8167_afe_set_2nd_i2s_asrc(afe, rate, rate,
+					(unsigned int)bit_width, 0);
+			if (ret < 0)
+				return ret;
+		}
+		ret = mt8167_afe_set_2nd_i2s_in(afe, rate, bit_width);
+		if (ret)
+			return ret;
+	}
+
+	if (rate % 8000)
+		mt8167_afe_enable_apll_associated_cfg(afe, MT8167_AFE_APLL1);
+	else
+		mt8167_afe_enable_apll_associated_cfg(afe, MT8167_AFE_APLL2);
+
+	if (apply_i2s_out_change) {
+		clk_set_parent(afe->clocks[MT8167_CLK_I2S3_M_SEL], (rate % 8000) ?
+			afe->clocks[MT8167_CLK_AUD1] : afe->clocks[MT8167_CLK_AUD2]);
+
+		mt8167_afe_dais_set_clks(afe, afe->clocks[MT8167_CLK_APLL12_DIV3],
+				rate * MT8167_I2S3_MCLK_MULTIPLIER, NULL, 0);
+
+		mt8167_afe_set_2nd_i2s_out_enable(afe, true);
+
+		be->prepared[SNDRV_PCM_STREAM_PLAYBACK] = true;
+		be->cached_rate[SNDRV_PCM_STREAM_PLAYBACK] = rate;
+		be->cached_format[SNDRV_PCM_STREAM_PLAYBACK] = substream->runtime->format;
+	}
+
+	if (apply_i2s_in_change) {
+		clk_set_parent(afe->clocks[MT8167_CLK_I2S0_M_SEL], (rate % 8000) ?
+			afe->clocks[MT8167_CLK_AUD1] : afe->clocks[MT8167_CLK_AUD2]);
+
+		mt8167_afe_dais_set_clks(afe, afe->clocks[MT8167_CLK_APLL12_DIV0],
+				rate * MT8167_I2S0_MCLK_MULTIPLIER, NULL, 0);
+
+		mt8167_afe_set_2nd_i2s_in_enable(afe, true);
+
+		if ((be->fmt_mode & SND_SOC_DAIFMT_MASTER_MASK) == SND_SOC_DAIFMT_CBM_CFM)
+			mt8167_afe_set_2nd_i2s_asrc_enable(afe, true);
+
+		be->prepared[SNDRV_PCM_STREAM_CAPTURE] = true;
+		be->cached_rate[SNDRV_PCM_STREAM_CAPTURE] = rate;
+		be->cached_format[SNDRV_PCM_STREAM_CAPTURE] = substream->runtime->format;
+	}
+
+	return 0;
+}
+
+static int mt8167_afe_2nd_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_be_dai_data *be = &afe->be_data[dai->id - MT8167_AFE_BACKEND_BASE];
+
+	be->fmt_mode = 0;
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		be->fmt_mode |= SND_SOC_DAIFMT_I2S;
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		be->fmt_mode |= SND_SOC_DAIFMT_LEFT_J;
+		break;
+	default:
+		dev_err(afe->dev, "invalid audio format for 2nd i2s!\n");
+		return -EINVAL;
+	}
+
+	if (((fmt & SND_SOC_DAIFMT_INV_MASK) != SND_SOC_DAIFMT_NB_NF) &&
+		((fmt & SND_SOC_DAIFMT_INV_MASK) != SND_SOC_DAIFMT_NB_IF) &&
+		((fmt & SND_SOC_DAIFMT_INV_MASK) != SND_SOC_DAIFMT_IB_NF) &&
+		((fmt & SND_SOC_DAIFMT_INV_MASK) != SND_SOC_DAIFMT_IB_IF)) {
+		dev_err(afe->dev, "invalid audio format for 2nd i2s!\n");
+		return -EINVAL;
+	}
+
+	be->fmt_mode |= (fmt & SND_SOC_DAIFMT_INV_MASK);
+
+	if (((fmt & SND_SOC_DAIFMT_MASTER_MASK) == SND_SOC_DAIFMT_CBM_CFM) &&
+		(afe->i2s_clk_modes[MT8167_AFE_2ND_I2S] == MT8167_AFE_I2S_SEPARATE_CLOCK))
+		be->fmt_mode |= (fmt & SND_SOC_DAIFMT_MASTER_MASK);
+
+	return 0;
+}
+
+static int mt8167_afe_int_adda_startup(struct snd_pcm_substream *substream,
+			       struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+	mt8167_afe_enable_main_clk(afe);
+
+	mt8167_afe_enable_afe_on(afe);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		mt8167_afe_enable_top_cg(afe, MT8167_AFE_CG_DAC);
+		mt8167_afe_enable_top_cg(afe, MT8167_AFE_CG_DAC_PREDIS);
+	} else {
+		mt8167_afe_enable_top_cg(afe, MT8167_AFE_CG_ADC);
+	}
+	udelay(100);
+
+	return 0;
+}
+
+static void mt8167_afe_int_adda_shutdown(struct snd_pcm_substream *substream,
+				 struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_be_dai_data *be = &afe->be_data[dai->id - MT8167_AFE_BACKEND_BASE];
+	const unsigned int stream = substream->stream;
+
+	dev_dbg(afe->dev, "%s '%s'\n", __func__,
+		snd_pcm_stream_str(substream));
+
+	if (be->prepared[stream]) {
+		if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+			mt8167_afe_set_adda_out_enable(afe, false);
+			mt8167_afe_set_i2s_out_enable(afe, false);
+		} else {
+			mt8167_afe_set_adda_in_enable(afe, false);
+		}
+
+		be->prepared[stream] = false;
+	}
+
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		mt8167_afe_disable_top_cg(afe, MT8167_AFE_CG_DAC);
+		mt8167_afe_disable_top_cg(afe, MT8167_AFE_CG_DAC_PREDIS);
+	} else {
+		mt8167_afe_disable_top_cg(afe, MT8167_AFE_CG_ADC);
+	}
+	mt8167_afe_disable_afe_on(afe);
+
+	mt8167_afe_disable_main_clk(afe);
+}
+
+static int mt8167_afe_int_adda_hw_params(struct snd_pcm_substream *substream,
+			  struct snd_pcm_hw_params *params,
+			  struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	unsigned int width_val = params_width(params) > 16 ?
+		(AFE_CONN_24BIT_O03 | AFE_CONN_24BIT_O04) : 0;
+
+	dev_dbg(afe->dev, "%s '%s'\n", __func__, snd_pcm_stream_str(substream));
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		regmap_update_bits(afe->regmap, AFE_CONN_24BIT,
+			   AFE_CONN_24BIT_O03 | AFE_CONN_24BIT_O04, width_val);
+
+	return 0;
+}
+
+static int mt8167_afe_int_adda_prepare(struct snd_pcm_substream *substream,
+			       struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_be_dai_data *be = &afe->be_data[dai->id - MT8167_AFE_BACKEND_BASE];
+	const unsigned int rate = substream->runtime->rate;
+	const unsigned int stream = substream->stream;
+	const int bit_width = snd_pcm_format_width(substream->runtime->format);
+	int ret;
+
+	dev_dbg(afe->dev, "%s '%s' rate = %u\n", __func__,
+		snd_pcm_stream_str(substream), rate);
+
+	if (be->prepared[stream] &&
+		(be->cached_rate[stream] == rate) &&
+		(be->cached_format[stream] == substream->runtime->format)) {
+		dev_info(afe->dev, "%s '%s' prepared already\n",
+			 __func__, snd_pcm_stream_str(substream));
+		return 0;
+	}
+
+	if (be->prepared[stream]) {
+		if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+			mt8167_afe_set_adda_out_enable(afe, false);
+			mt8167_afe_set_i2s_out_enable(afe, false);
+		} else {
+			mt8167_afe_set_adda_in_enable(afe, false);
+		}
+	}
+
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		ret = mt8167_afe_set_adda_out(afe, rate);
+		if (ret)
+			return ret;
+
+		ret = mt8167_afe_set_i2s_out(afe, rate, bit_width);
+		if (ret)
+			return ret;
+
+		mt8167_afe_set_adda_out_enable(afe, true);
+		mt8167_afe_set_i2s_out_enable(afe, true);
+	} else {
+		ret = mt8167_afe_set_adda_in(afe, rate);
+		if (ret)
+			return ret;
+
+		mt8167_afe_set_adda_in_enable(afe, true);
+	}
+
+	be->prepared[stream] = true;
+	be->cached_rate[stream] = rate;
+	be->cached_format[stream] = substream->runtime->format;
+
+	return 0;
+}
+
+static int mt8167_afe_mrg_bt_startup(struct snd_pcm_substream *substream,
+			       struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+	mt8167_afe_enable_main_clk(afe);
+
+	return 0;
+}
+
+static void mt8167_afe_mrg_bt_shutdown(struct snd_pcm_substream *substream,
+				 struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+	mt8167_afe_disable_main_clk(afe);
+}
+
+static int mt8167_afe_mrg_bt_hw_params(struct snd_pcm_substream *substream,
+			  struct snd_pcm_hw_params *params,
+			  struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	int ret;
+
+	dev_dbg(afe->dev, "%s '%s' rate = %u\n", __func__,
+		snd_pcm_stream_str(substream), params_rate(params));
+
+	ret = mt8167_afe_set_mrg(afe, params_rate(params));
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int mt8167_afe_mrg_bt_trigger(struct snd_pcm_substream *substream, int cmd,
+				struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+	dev_info(afe->dev, "%s %s '%s' cmd = %d\n", __func__,
+		 dai->name, snd_pcm_stream_str(substream), cmd);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		mt8167_afe_enable_mrg(afe);
+		return 0;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		mt8167_afe_disable_mrg(afe);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int mt8167_afe_pcm0_startup(struct snd_pcm_substream *substream,
+			       struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+	mt8167_afe_enable_main_clk(afe);
+
+	return 0;
+}
+
+static void mt8167_afe_pcm0_shutdown(struct snd_pcm_substream *substream,
+				 struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+	mt8167_afe_disable_main_clk(afe);
+}
+
+static int mt8167_afe_pcm0_hw_params(struct snd_pcm_substream *substream,
+			  struct snd_pcm_hw_params *params,
+			  struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	int ret;
+
+	dev_dbg(afe->dev, "%s '%s' rate = %u\n", __func__,
+		snd_pcm_stream_str(substream), params_rate(params));
+
+	ret = mt8167_afe_set_pcm0(afe, params_rate(params));
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int mt8167_afe_pcm0_trigger(struct snd_pcm_substream *substream, int cmd,
+				struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+	dev_info(afe->dev, "%s %s '%s' cmd = %d\n", __func__,
+		 dai->name, snd_pcm_stream_str(substream), cmd);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		mt8167_afe_enable_pcm0(afe);
+		return 0;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		mt8167_afe_disable_pcm0(afe);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int mt8167_afe_hdmi_startup(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+	mt8167_afe_enable_main_clk(afe);
+
+	mt8167_afe_dais_enable_clks(afe, afe->clocks[MT8167_CLK_APLL12_DIV4],
+				 afe->clocks[MT8167_CLK_APLL12_DIV4B]);
+	return 0;
+}
+
+static void mt8167_afe_hdmi_shutdown(struct snd_pcm_substream *substream,
+				  struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_be_dai_data *be = &afe->be_data[dai->id - MT8167_AFE_BACKEND_BASE];
+	const unsigned int rate = substream->runtime->rate;
+	const unsigned int stream = substream->stream;
+
+	if (be->prepared[stream]) {
+		/* disable tdm */
+		regmap_update_bits(afe->regmap, AFE_TDM_CON1, 0x1, 0);
+
+		mt8167_afe_disable_top_cg(afe, MT8167_AFE_CG_HDMI);
+
+		if (rate % 8000)
+			mt8167_afe_disable_apll_associated_cfg(afe, MT8167_AFE_APLL1);
+		else
+			mt8167_afe_disable_apll_associated_cfg(afe, MT8167_AFE_APLL2);
+
+		be->prepared[stream] = false;
+	}
+
+	mt8167_afe_dais_disable_clks(afe, afe->clocks[MT8167_CLK_APLL12_DIV4],
+				  afe->clocks[MT8167_CLK_APLL12_DIV4B]);
+
+	mt8167_afe_disable_main_clk(afe);
+}
+
+static int mt8167_afe_hdmi_prepare(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct snd_pcm_runtime * const runtime = substream->runtime;
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_be_dai_data *be = &afe->be_data[dai->id - MT8167_AFE_BACKEND_BASE];
+	const unsigned int tdm_out_mode = afe->tdm_out_mode;
+	const unsigned int rate = runtime->rate;
+	const unsigned int channels = runtime->channels;
+	const unsigned int out_channels_per_sdata =
+		mt8167_afe_tdm_out_ch_per_sdata(tdm_out_mode, runtime->channels);
+	const int bit_width = snd_pcm_format_width(runtime->format);
+	const int out_bit_width = mt8167_afe_tdm_out_bitwidth_fixup(tdm_out_mode, bit_width);
+	const unsigned int stream = substream->stream;
+	unsigned int val;
+	unsigned int bck_inverse = 0;
+
+	if (be->prepared[stream] &&
+		(be->cached_rate[stream] == rate) &&
+		(be->cached_format[stream] == runtime->format) &&
+		(be->cached_channels[stream] == channels)) {
+		dev_info(afe->dev, "%s prepared already\n", __func__);
+		return 0;
+	}
+
+	if (be->prepared[stream]) {
+		/* disable tdm */
+		regmap_update_bits(afe->regmap, AFE_TDM_CON1, 0x1, 0);
+
+		mt8167_afe_disable_top_cg(afe, MT8167_AFE_CG_HDMI);
+
+		if (be->cached_rate[stream] % 8000)
+			mt8167_afe_disable_apll_associated_cfg(afe, MT8167_AFE_APLL1);
+		else
+			mt8167_afe_disable_apll_associated_cfg(afe, MT8167_AFE_APLL2);
+	}
+
+	if (rate % 8000) {
+		mt8167_afe_enable_apll_associated_cfg(afe, MT8167_AFE_APLL1);
+		clk_set_parent(afe->clocks[MT8167_CLK_I2S4_M_SEL], afe->clocks[MT8167_CLK_AUD1]);
+	} else {
+		mt8167_afe_enable_apll_associated_cfg(afe, MT8167_AFE_APLL2);
+		clk_set_parent(afe->clocks[MT8167_CLK_I2S4_M_SEL], afe->clocks[MT8167_CLK_AUD2]);
+	}
+
+	mt8167_afe_dais_set_clks(afe,
+		      afe->clocks[MT8167_CLK_APLL12_DIV4],
+		      rate * MT8167_TDM_OUT_MCLK_MULTIPLIER,
+		      afe->clocks[MT8167_CLK_APLL12_DIV4B],
+		      rate * out_channels_per_sdata * out_bit_width);
+
+	val = AFE_TDM_CON1_MSB_ALIGNED;
+
+	if ((tdm_out_mode == MT8167_AFE_TDM_OUT_I2S ||
+	     tdm_out_mode == MT8167_AFE_TDM_OUT_I2S_32BITS) &&
+	    (be->fmt_mode & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_I2S) {
+		val |= AFE_TDM_CON1_1_BCK_DELAY |
+		       AFE_TDM_CON1_LRCK_INV;
+		bck_inverse = AUD_TCON3_HDMI_BCK_INV;
+	} else if (tdm_out_mode == MT8167_AFE_TDM_OUT_HDMI) {
+		val |= AFE_TDM_CON1_1_BCK_DELAY |
+		       AFE_TDM_CON1_LRCK_INV;
+	} else if (tdm_out_mode == MT8167_AFE_TDM_OUT_TDM) {
+		val |= AFE_TDM_CON1_1_BCK_DELAY;
+		bck_inverse = AUD_TCON3_HDMI_BCK_INV;
+	}
+
+	/* bit width related */
+	if (out_bit_width > 16) {
+		val |= AFE_TDM_CON1_WLEN_32BIT |
+		       AFE_TDM_CON1_32_BCK_CYCLES |
+		       AFE_TDM_CON1_LRCK_WIDTH(32);
+	} else {
+		val |= AFE_TDM_CON1_WLEN_16BIT |
+		       AFE_TDM_CON1_16_BCK_CYCLES |
+		       AFE_TDM_CON1_LRCK_WIDTH(16);
+	}
+
+	/* channel per sdata */
+	if (out_channels_per_sdata > 4)
+		val |= AFE_TDM_CON1_8CH_PER_SDATA;
+	else if (out_channels_per_sdata > 2)
+		val |= AFE_TDM_CON1_4CH_PER_SDATA;
+	else
+		val |= AFE_TDM_CON1_2CH_PER_SDATA;
+
+	regmap_update_bits(afe->regmap, AFE_TDM_CON1, ~(u32)AFE_TDM_CON1_EN, val);
+
+	/* set tdm2 config */
+	if (out_channels_per_sdata == 2) {
+		switch (channels) {
+		case 1:
+		case 2:
+			val = AFE_TDM_CH_START_O28_O29;
+			val |= (AFE_TDM_CH_ZERO << 4);
+			val |= (AFE_TDM_CH_ZERO << 8);
+			val |= (AFE_TDM_CH_ZERO << 12);
+			break;
+		case 3:
+		case 4:
+			val = AFE_TDM_CH_START_O28_O29;
+			val |= (AFE_TDM_CH_START_O30_O31 << 4);
+			val |= (AFE_TDM_CH_ZERO << 8);
+			val |= (AFE_TDM_CH_ZERO << 12);
+			break;
+		case 5:
+		case 6:
+			val = AFE_TDM_CH_START_O28_O29;
+			val |= (AFE_TDM_CH_START_O30_O31 << 4);
+			val |= (AFE_TDM_CH_START_O32_O33 << 8);
+			val |= (AFE_TDM_CH_ZERO << 12);
+			break;
+		case 7:
+		case 8:
+			val = AFE_TDM_CH_START_O28_O29;
+			val |= (AFE_TDM_CH_START_O30_O31 << 4);
+			val |= (AFE_TDM_CH_START_O32_O33 << 8);
+			val |= (AFE_TDM_CH_START_O34_O35 << 12);
+			break;
+		default:
+			val = 0;
+		}
+	} else {
+		val = AFE_TDM_CH_START_O28_O29;
+		val |= (AFE_TDM_CH_ZERO << 4);
+		val |= (AFE_TDM_CH_ZERO << 8);
+		val |= (AFE_TDM_CH_ZERO << 12);
+	}
+
+	regmap_update_bits(afe->regmap, AFE_TDM_CON2,
+			   AFE_TDM_CON2_SOUT_MASK, val);
+
+	regmap_update_bits(afe->regmap, AUDIO_TOP_CON3,
+			   AUD_TCON3_HDMI_BCK_INV, bck_inverse);
+
+	regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0,
+			   AFE_HDMI_OUT_CON0_CH_MASK, channels << 4);
+
+	if (tdm_out_mode != MT8167_AFE_TDM_OUT_HDMI)
+		regmap_update_bits(afe->regmap, AFE_I2S_CON1,
+			   AFE_I2S_CON1_TDMOUT_MUX_MASK, AFE_I2S_CON1_TDMOUT_TO_PAD);
+
+	mt8167_afe_enable_top_cg(afe, MT8167_AFE_CG_HDMI);
+
+	/* enable tdm */
+	regmap_update_bits(afe->regmap, AFE_TDM_CON1, 0x1, 0x1);
+
+	be->prepared[stream] = true;
+	be->cached_rate[stream] = rate;
+	be->cached_format[stream] = runtime->format;
+	be->cached_channels[stream] = channels;
+
+	return 0;
+}
+
+static int mt8167_afe_hdmi_trigger(struct snd_pcm_substream *substream, int cmd,
+				struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+	dev_dbg(afe->dev, "%s cmd=%d %s\n", __func__, cmd, dai->name);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		/* align the connection logic with HDMI Tx */
+		/* set connections:  O28~O35: L/R/LFE/C/LS/RS/CH7/CH8 */
+		if (afe->tdm_out_mode == MT8167_AFE_TDM_OUT_HDMI)
+			regmap_write(afe->regmap, AFE_HDMI_CONN0,
+			     AFE_HDMI_CONN0_O28_I28 | AFE_HDMI_CONN0_O29_I29 |
+			     AFE_HDMI_CONN0_O30_I31 | AFE_HDMI_CONN0_O31_I30 |
+			     AFE_HDMI_CONN0_O32_I32 | AFE_HDMI_CONN0_O33_I33 |
+			     AFE_HDMI_CONN0_O34_I34 | AFE_HDMI_CONN0_O35_I35);
+		else
+			regmap_write(afe->regmap, AFE_HDMI_CONN0,
+			     AFE_HDMI_CONN0_O28_I28 | AFE_HDMI_CONN0_O29_I29 |
+			     AFE_HDMI_CONN0_O30_I30 | AFE_HDMI_CONN0_O31_I31 |
+			     AFE_HDMI_CONN0_O32_I32 | AFE_HDMI_CONN0_O33_I33 |
+			     AFE_HDMI_CONN0_O34_I34 | AFE_HDMI_CONN0_O35_I35);
+
+		/* enable Out control */
+		regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0, 0x1, 0x1);
+		return 0;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		/* disable Out control */
+		regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0, 0x1, 0);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int mt8167_afe_hdmi_set_fmt(struct snd_soc_dai *dai,
+				unsigned int fmt)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_be_dai_data *be = &afe->be_data[dai->id - MT8167_AFE_BACKEND_BASE];
+
+	be->fmt_mode = 0;
+	/* set DAI format */
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+	case SND_SOC_DAIFMT_LEFT_J:
+		be->fmt_mode |= fmt & SND_SOC_DAIFMT_FORMAT_MASK;
+		break;
+	default:
+		dev_err(afe->dev, "invalid dai format %u\n", fmt);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int mt8167_afe_tdm_in_startup(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct snd_pcm_runtime * const runtime = substream->runtime;
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+	snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+				   &constraints_channels_tdm_in);
+
+	mt8167_afe_enable_main_clk(afe);
+
+	mt8167_afe_dais_enable_clks(afe, afe->clocks[MT8167_CLK_APLL12_DIV5],
+				 afe->clocks[MT8167_CLK_APLL12_DIV5B]);
+	return 0;
+}
+
+static void mt8167_afe_tdm_in_shutdown(struct snd_pcm_substream *substream,
+				  struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_be_dai_data *be = &afe->be_data[dai->id - MT8167_AFE_BACKEND_BASE];
+	const unsigned int rate = substream->runtime->rate;
+	const unsigned int stream = substream->stream;
+
+	if (be->prepared[stream]) {
+		if (rate % 8000)
+			mt8167_afe_disable_apll_associated_cfg(afe, MT8167_AFE_APLL1);
+		else
+			mt8167_afe_disable_apll_associated_cfg(afe, MT8167_AFE_APLL2);
+
+		be->prepared[stream] = false;
+	}
+
+	mt8167_afe_dais_disable_clks(afe, afe->clocks[MT8167_CLK_APLL12_DIV5],
+				  afe->clocks[MT8167_CLK_APLL12_DIV5B]);
+
+	mt8167_afe_disable_main_clk(afe);
+}
+
+static int mt8167_afe_tdm_in_prepare(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct snd_pcm_runtime * const runtime = substream->runtime;
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_be_dai_data *be = &afe->be_data[dai->id - MT8167_AFE_BACKEND_BASE];
+	const unsigned int rate = runtime->rate;
+	const unsigned int channels = runtime->channels;
+	int bit_width = snd_pcm_format_width(runtime->format);
+	const unsigned int stream = substream->stream;
+	unsigned int val,val2;
+	unsigned int bck;
+
+	dev_info(afe->dev, "%s bit_width = %d\n", __func__,bit_width);
+        /*support S24_LE*/
+	if (bit_width > 16)
+		bit_width = 32;
+
+	if (be->prepared[stream] &&
+		(be->cached_rate[stream] == rate) &&
+		(be->cached_format[stream] == runtime->format) &&
+		(be->cached_channels[stream] == channels)) {
+		dev_info(afe->dev, "%s prepared already\n", __func__);
+		return 0;
+	}
+
+	if (be->prepared[stream]) {
+		if (be->cached_rate[stream] % 8000)
+			mt8167_afe_disable_apll_associated_cfg(afe, MT8167_AFE_APLL1);
+		else
+			mt8167_afe_disable_apll_associated_cfg(afe, MT8167_AFE_APLL2);
+	}
+
+	dev_dbg(afe->dev, "afe->tdm_in_lrck_cycle= %u\n", afe->tdm_in_lrck_cycle);
+
+	if (rate % 8000) {
+		mt8167_afe_enable_apll_associated_cfg(afe, MT8167_AFE_APLL1);
+		clk_set_parent(afe->clocks[MT8167_CLK_I2S5_M_SEL], afe->clocks[MT8167_CLK_AUD1]);
+	} else {
+		mt8167_afe_enable_apll_associated_cfg(afe, MT8167_AFE_APLL2);
+		clk_set_parent(afe->clocks[MT8167_CLK_I2S5_M_SEL], afe->clocks[MT8167_CLK_AUD2]);
+	}
+
+	bck = ((channels == 6 || channels == 4)
+				? 8 : channels) * rate * bit_width;
+
+	mt8167_afe_dais_set_clks(afe, afe->clocks[MT8167_CLK_APLL12_DIV5],
+		rate * MT8167_TDM_IN_MCLK_MULTIPLIER,
+		afe->clocks[MT8167_CLK_APLL12_DIV5B], bck);
+
+	val = 0;
+
+	if ((be->fmt_mode & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_I2S)
+		val |= AFE_TDM_IN_CON1_I2S;
+
+	/* bck&lrck phase */
+	switch (be->fmt_mode & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_IB_IF:
+		val |= AFE_TDM_IN_CON1_LRCK_INV |
+		       AFE_TDM_IN_CON1_BCK_INV;
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		val |= AFE_TDM_IN_CON1_LRCK_INV;
+		break;
+	case SND_SOC_DAIFMT_IB_NF:
+		val |= AFE_TDM_IN_CON1_BCK_INV;
+		break;
+	default:
+		break;
+	}
+
+	/* bit width related */
+	if (bit_width > 16)
+		val |= AFE_TDM_IN_CON1_WLEN_32BIT |
+			AFE_TDM_IN_CON1_FAST_LRCK_CYCLE_32BCK;
+	else
+		val |= AFE_TDM_IN_CON1_WLEN_16BIT |
+			AFE_TDM_IN_CON1_FAST_LRCK_CYCLE_16BCK;
+
+	if (LRCK_CYCLE_INVALID == afe->tdm_in_lrck_cycle) {
+		val2 = bit_width > 16 ? 32 : 16;
+		val |= AFE_TDM_IN_CON1_LRCK_WIDTH(val2);
+	}
+	else
+		val |= AFE_TDM_IN_CON1_LRCK_WIDTH(afe->tdm_in_lrck_cycle);
+
+	switch (channels) {
+	case 2:
+		val |= AFE_TDM_IN_CON1_4CH_PER_SDATA;
+		val |= AFE_TDM_IN_CON1_DISABLE_CH23;
+		break;
+	case 4:
+		val |= AFE_TDM_IN_CON1_8CH_PER_SDATA;
+		val |= AFE_TDM_IN_CON1_DISABLE_CH67;
+		val |= AFE_TDM_IN_CON1_DISABLE_CH45;
+		break;
+	case 6:
+		val |= AFE_TDM_IN_CON1_8CH_PER_SDATA;
+		val |= AFE_TDM_IN_CON1_DISABLE_CH67;
+		break;
+	case 8:
+		val |= AFE_TDM_IN_CON1_8CH_PER_SDATA;
+		break;
+	default:
+		break;
+	}
+
+	regmap_update_bits(afe->regmap, AFE_TDM_IN_CON1,
+			   ~(u32)AFE_TDM_IN_CON1_EN, val);
+
+	be->prepared[stream] = true;
+	be->cached_rate[stream] = rate;
+	be->cached_format[stream] = runtime->format;
+	be->cached_channels[stream] = channels;
+
+	return 0;
+}
+
+static int mt8167_afe_tdm_in_set_fmt(struct snd_soc_dai *dai,
+				unsigned int fmt)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_be_dai_data *be = &afe->be_data[dai->id - MT8167_AFE_BACKEND_BASE];
+
+	be->fmt_mode = 0;
+	/* set DAI format */
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+	case SND_SOC_DAIFMT_LEFT_J:
+		be->fmt_mode |= fmt & SND_SOC_DAIFMT_FORMAT_MASK;
+		break;
+	default:
+		dev_err(afe->dev, "invalid dai format %u\n", fmt);
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+	case SND_SOC_DAIFMT_NB_IF:
+	case SND_SOC_DAIFMT_IB_NF:
+	case SND_SOC_DAIFMT_IB_IF:
+		be->fmt_mode |= fmt & SND_SOC_DAIFMT_INV_MASK;
+		break;
+	default:
+		dev_err(afe->dev, "invalid dai format %u\n", fmt);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mt8167_afe_tdm_in_trigger(struct snd_pcm_substream *substream, int cmd,
+				struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	unsigned int val;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		regmap_update_bits(afe->regmap, AFE_CONN_TDMIN_CON,
+				   AFE_CONN_TDMIN_CON0_MASK,
+				   AFE_CONN_TDMIN_O40_I40 | AFE_CONN_TDMIN_O41_I41);
+
+		regmap_update_bits(afe->regmap, AFE_HDMI_IN_2CH_CON0, 0x1, 0x1);
+
+		/* enable tdm in */
+		regmap_update_bits(afe->regmap, AFE_TDM_IN_CON1, 0x1, 0x1);
+		return 0;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+#ifdef MT8167_AFE_E1_ONLY
+		do {
+			regmap_read(afe->regmap, AFE_TDM_IN_MON2, &val);
+		} while (val & AFE_TDM_IN_MON2_FAST_LRCK);
+#endif
+		/* disable tdm in */
+		regmap_update_bits(afe->regmap, AFE_TDM_IN_CON1, 0x1, 0);
+
+		regmap_update_bits(afe->regmap, AFE_HDMI_IN_2CH_CON0, 0x1, 0);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mt8167_afe_dais_startup(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct mt8167_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+	int ret;
+
+	dev_dbg(afe->dev, "%s %s\n", __func__, memif->data->name);
+
+	snd_pcm_hw_constraint_step(substream->runtime, 0,
+		SNDRV_PCM_HW_PARAM_BUFFER_BYTES, memif->data->buffer_align_bytes);
+
+	snd_soc_set_runtime_hwparams(substream, &mt8167_afe_hardware);
+
+	ret = snd_pcm_hw_constraint_integer(runtime,
+					    SNDRV_PCM_HW_PARAM_PERIODS);
+	if (ret < 0) {
+		dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n");
+		return ret;
+	}
+
+	memif->substream = substream;
+
+	mt8167_afe_enable_main_clk(afe);
+
+	return 0;
+}
+
+static void mt8167_afe_dais_shutdown(struct snd_pcm_substream *substream,
+				  struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+
+	dev_dbg(afe->dev, "%s %s\n", __func__, memif->data->name);
+
+	if (memif->prepared) {
+		mt8167_afe_disable_afe_on(afe);
+		memif->prepared = false;
+	}
+
+	memif->substream = NULL;
+
+	mt8167_afe_disable_main_clk(afe);
+}
+
+static int mt8167_afe_dais_hw_params(struct snd_pcm_substream *substream,
+				  struct snd_pcm_hw_params *params,
+				  struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+	const struct mt8167_afe_memif_data *data = memif->data;
+	const size_t request_size = params_buffer_bytes(params);
+	int ret;
+
+	dev_dbg(afe->dev,
+		"%s %s period = %u rate = %u channels = %u size = %zu\n",
+		__func__, data->name, params_period_size(params),
+		params_rate(params), params_channels(params), request_size);
+
+	if (request_size > data->max_sram_size) {
+		ret = snd_pcm_lib_malloc_pages(substream, request_size);
+		if (ret < 0) {
+			dev_err(afe->dev,
+				"%s %s malloc pages %zu bytes failed %d\n",
+				__func__, data->name, request_size, ret);
+			return ret;
+		}
+
+		memif->use_sram = false;
+
+		mt8167_afe_emi_clk_on(afe);
+	} else {
+		struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
+
+		dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
+		dma_buf->dev.dev = substream->pcm->card->dev;
+		dma_buf->area = ((unsigned char *)afe->sram_address) +
+				 data->sram_offset;
+		dma_buf->addr = afe->sram_phy_address + data->sram_offset;
+		dma_buf->bytes = request_size;
+		snd_pcm_set_runtime_buffer(substream, dma_buf);
+
+		memif->use_sram = true;
+	}
+
+	memif->phys_buf_addr = substream->runtime->dma_addr;
+	memif->buffer_size = substream->runtime->dma_bytes;
+
+	/* start */
+	regmap_write(afe->regmap, data->reg_ofs_base,
+		     memif->phys_buf_addr);
+
+	/* end */
+	regmap_write(afe->regmap, data->reg_ofs_end,
+		     memif->phys_buf_addr + memif->buffer_size - 1);
+
+	{
+		/* set channel */
+		unsigned int mono = (params_channels(params) == 1) ? 1 : 0;
+		if (data->mono_shift >= 0) {
+			regmap_update_bits(afe->regmap, AFE_DAC_CON1,
+					1 << data->mono_shift,
+					mono << data->mono_shift);
+		}
+		/* set spdif in channel bit[1]: 0stereo/1mono*/
+		if (data->id == MT8167_AFE_MEMIF_MULTILINE_IN) {
+			regmap_update_bits(afe->regmap, SPDIFIN_MEMIF_CON0, 1 << 1, mono << 1);
+			if (mono == 1)/*0L,1R channel*/
+				regmap_update_bits(afe->regmap, SPDIFIN_MEMIF_CON0, 1 << 2, 1 << 2);
+
+			/*set spdif in intr_period*/
+			regmap_update_bits(afe->regmap, SPDIFIN_MEMIF_CON0, MULTI_INT_PERIOD_MASK, MULTI_INT_PERIOD_64);
+		}
+	}
+
+	/* set format */
+	if (data->format_shift >= 0) {
+		switch (params_format(params)) {
+		case SNDRV_PCM_FORMAT_S16_LE:
+			regmap_update_bits(afe->regmap, data->format_reg,
+					   3 << data->format_shift,
+					   0 << data->format_shift);
+			break;
+		case SNDRV_PCM_FORMAT_S32_LE:
+			regmap_update_bits(afe->regmap, data->format_reg,
+					   3 << data->format_shift,
+					   3 << data->format_shift);
+			break;
+		case SNDRV_PCM_FORMAT_S24_LE:
+			regmap_update_bits(afe->regmap, data->format_reg,
+					   3 << data->format_shift,
+					   1 << data->format_shift);
+            if (data->id == MT8167_AFE_MEMIF_MULTILINE_IN) {
+                regmap_update_bits(afe->regmap, data->format_reg,
+					   3 << data->format_shift,
+					   3 << data->format_shift);
+            }
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	if (data->conn_format_mask > 0) {
+		if (params_width(params) > 16)
+			regmap_update_bits(afe->regmap, AFE_CONN_24BIT,
+					   data->conn_format_mask,
+					   data->conn_format_mask);
+		else
+			regmap_update_bits(afe->regmap, AFE_CONN_24BIT,
+					   data->conn_format_mask,
+					   0);
+	}
+
+	/* set rate */
+	if (data->fs_shift < 0)
+		return 0;
+
+	if (data->id == MT8167_AFE_MEMIF_DAI ||
+	    data->id == MT8167_AFE_MEMIF_MOD_DAI) {
+		unsigned int val;
+
+		switch (params_rate(params)) {
+		case 8000:
+			val = 0;
+			break;
+		case 16000:
+			val = 1;
+			break;
+		case 32000:
+			val = 2;
+			break;
+		default:
+			dev_err(afe->dev, "%s %s rate %u not supported\n",
+				__func__, data->name, params_rate(params));
+			return -EINVAL;
+		}
+
+		if (data->id == MT8167_AFE_MEMIF_DAI)
+			regmap_update_bits(afe->regmap, AFE_DAC_CON0,
+					   0x3 << data->fs_shift,
+					   val << data->fs_shift);
+		else
+			regmap_update_bits(afe->regmap, AFE_DAC_CON1,
+					   0x3 << data->fs_shift,
+					   val << data->fs_shift);
+	} else {
+		int fs = mt8167_afe_i2s_fs(params_rate(params));
+
+		if (fs < 0)
+			return -EINVAL;
+
+		regmap_update_bits(afe->regmap, AFE_DAC_CON1,
+				   0xf << data->fs_shift,
+				   fs << data->fs_shift);
+	}
+
+	return 0;
+}
+
+static int mt8167_afe_dais_hw_free(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+	int ret = 0;
+
+	dev_dbg(afe->dev, "%s %s\n", __func__, memif->data->name);
+
+	if (memif->use_sram) {
+		snd_pcm_set_runtime_buffer(substream, NULL);
+	} else {
+		ret = snd_pcm_lib_free_pages(substream);
+
+		mt8167_afe_emi_clk_off(afe);
+	}
+
+	return ret;
+}
+
+static int mt8167_afe_dais_prepare(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+
+	if (!memif->prepared) {
+		mt8167_afe_enable_afe_on(afe);
+		memif->prepared = true;
+	}
+
+	return 0;
+}
+
+static int mt8167_afe_dais_trigger(struct snd_pcm_substream *substream, int cmd,
+				struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_pcm_runtime * const runtime = substream->runtime;
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+	unsigned int counter = runtime->period_size;
+
+	dev_info(afe->dev, "%s %s cmd = %d\n", __func__,
+		 memif->data->name, cmd);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		if (memif->data->enable_shift >= 0)
+			regmap_update_bits(afe->regmap, AFE_DAC_CON0,
+					   1 << memif->data->enable_shift,
+					   1 << memif->data->enable_shift);
+
+		/*enable spdif in memif input
+		*enable update for spdifin sample, enable type detection and periodic interrupt generation
+		*/
+		if (memif->data->id == MT8167_AFE_MEMIF_MULTILINE_IN) {
+			regmap_update_bits(afe->regmap, SPDIFIN_MEMIF_CON0, SPDIFIN_IN_MEMIF_EN_MASK, SPDIFIN_IN_MEMIF_EN);
+			regmap_update_bits(afe->regmap, MPHONE_MULTI_CON0, MULTI_HW_EN_MASK, MULTI_HW_EN);
+			regmap_update_bits(afe->regmap, memif->data->irq_reg_cnt, 1 <<31, 1 <<31);
+		}
+
+		/* set irq counter */
+		regmap_update_bits(afe->regmap,
+				   memif->data->irq_reg_cnt,
+				   0x3ffff << memif->data->irq_cnt_shift,
+				   counter << memif->data->irq_cnt_shift);
+
+		/* set irq fs */
+		if (memif->data->irq_fs_shift >= 0) {
+			int fs = mt8167_afe_i2s_fs(runtime->rate);
+
+			if (fs < 0)
+				return -EINVAL;
+
+			regmap_update_bits(afe->regmap,
+					   memif->data->irq_fs_reg,
+					   0xf << memif->data->irq_fs_shift,
+					   fs << memif->data->irq_fs_shift);
+		}
+
+		mt8167_afe_enable_irq(afe, memif);
+		return 0;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		if (memif->data->enable_shift >= 0)
+			regmap_update_bits(afe->regmap, AFE_DAC_CON0,
+					   1 << memif->data->enable_shift, 0);
+
+		/*disable spdif in memif input */
+		if (memif->data->id == MT8167_AFE_MEMIF_MULTILINE_IN) {
+			regmap_update_bits(afe->regmap, SPDIFIN_MEMIF_CON0, SPDIFIN_IN_MEMIF_EN_MASK, SPDIFIN_IN_MEMIF_DIS);
+			regmap_update_bits(afe->regmap, MPHONE_MULTI_CON0, MULTI_HW_EN_MASK, MULTI_HW_DIS);
+		}
+
+		mt8167_afe_disable_irq(afe, memif);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int mt8167_afe_hw_gain1_startup(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+	mt8167_afe_enable_main_clk(afe);
+	return 0;
+}
+
+static void mt8167_afe_hw_gain1_shutdown(struct snd_pcm_substream *substream,
+				  struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_be_dai_data *be = &afe->be_data[dai->id - MT8167_AFE_BACKEND_BASE];
+	const unsigned int stream = substream->stream;
+
+	if (be->prepared[stream]) {
+		regmap_update_bits(afe->regmap, AFE_GAIN1_CON0, AFE_GAIN1_CON0_EN_MASK, 0);
+		be->prepared[stream] = false;
+	}
+	mt8167_afe_disable_main_clk(afe);
+}
+
+static int mt8167_afe_hw_gain1_prepare(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct snd_pcm_runtime * const runtime = substream->runtime;
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8167_afe_be_dai_data *be = &afe->be_data[dai->id - MT8167_AFE_BACKEND_BASE];
+	const unsigned int rate = runtime->rate;
+	const unsigned int stream = substream->stream;
+	int fs;
+	unsigned int val1, val2;
+
+	if (be->prepared[stream] &&
+		(be->cached_rate[stream] == rate)) {
+		dev_info(afe->dev, "%s prepared already\n", __func__);
+		return 0;
+	}
+
+	if (be->prepared[stream])
+		regmap_update_bits(afe->regmap, AFE_GAIN1_CON0, AFE_GAIN1_CON0_EN_MASK, 0);
+
+	fs = mt8167_afe_i2s_fs(rate);
+	regmap_update_bits(afe->regmap, AFE_GAIN1_CON0, AFE_GAIN1_CON0_MODE_MASK, (unsigned int)fs<<4);
+
+	regmap_read(afe->regmap, AFE_GAIN1_CON1, &val1);
+	regmap_read(afe->regmap, AFE_GAIN1_CUR, &val2);
+	if ((val1 & AFE_GAIN1_CON1_MASK) != (val2 & AFE_GAIN1_CUR_MASK))
+		regmap_update_bits(afe->regmap, AFE_GAIN1_CUR, AFE_GAIN1_CUR_MASK, val1);
+
+	regmap_update_bits(afe->regmap, AFE_GAIN1_CON0, AFE_GAIN1_CON0_EN_MASK, 1);
+	be->prepared[stream] = true;
+	be->cached_rate[stream] = rate;
+
+	return 0;
+}
+
+/* FE DAIs */
+static const struct snd_soc_dai_ops mt8167_afe_dai_ops = {
+	.startup	= mt8167_afe_dais_startup,
+	.shutdown	= mt8167_afe_dais_shutdown,
+	.hw_params	= mt8167_afe_dais_hw_params,
+	.hw_free	= mt8167_afe_dais_hw_free,
+	.prepare	= mt8167_afe_dais_prepare,
+	.trigger	= mt8167_afe_dais_trigger,
+};
+
+/* BE DAIs */
+static const struct snd_soc_dai_ops mt8167_afe_i2s_ops = {
+	.startup	= mt8167_afe_i2s_startup,
+	.shutdown	= mt8167_afe_i2s_shutdown,
+	.hw_params	= mt8167_afe_i2s_hw_params,
+	.prepare	= mt8167_afe_i2s_prepare,
+};
+
+static const struct snd_soc_dai_ops mt8167_afe_2nd_i2s_ops = {
+	.startup	= mt8167_afe_2nd_i2s_startup,
+	.shutdown	= mt8167_afe_2nd_i2s_shutdown,
+	.hw_params	= mt8167_afe_2nd_i2s_hw_params,
+	.prepare	= mt8167_afe_2nd_i2s_prepare,
+	.set_fmt	= mt8167_afe_2nd_i2s_set_fmt,
+};
+
+static const struct snd_soc_dai_ops mt8167_afe_int_adda_ops = {
+	.startup	= mt8167_afe_int_adda_startup,
+	.shutdown	= mt8167_afe_int_adda_shutdown,
+	.hw_params	= mt8167_afe_int_adda_hw_params,
+	.prepare	= mt8167_afe_int_adda_prepare,
+};
+
+static const struct snd_soc_dai_ops mt8167_afe_mrg_bt_ops = {
+	.startup	= mt8167_afe_mrg_bt_startup,
+	.shutdown	= mt8167_afe_mrg_bt_shutdown,
+	.hw_params	= mt8167_afe_mrg_bt_hw_params,
+	.trigger	= mt8167_afe_mrg_bt_trigger,
+};
+
+static const struct snd_soc_dai_ops mt8167_afe_pcm0_ops = {
+	.startup	= mt8167_afe_pcm0_startup,
+	.shutdown	= mt8167_afe_pcm0_shutdown,
+	.hw_params	= mt8167_afe_pcm0_hw_params,
+	.trigger	= mt8167_afe_pcm0_trigger,
+};
+
+static const struct snd_soc_dai_ops mt8167_afe_hdmi_ops = {
+	.startup	= mt8167_afe_hdmi_startup,
+	.shutdown	= mt8167_afe_hdmi_shutdown,
+	.prepare	= mt8167_afe_hdmi_prepare,
+	.trigger	= mt8167_afe_hdmi_trigger,
+	.set_fmt	= mt8167_afe_hdmi_set_fmt,
+};
+
+static const struct snd_soc_dai_ops mt8167_afe_tdm_in_ops = {
+	.startup	= mt8167_afe_tdm_in_startup,
+	.shutdown	= mt8167_afe_tdm_in_shutdown,
+	.prepare	= mt8167_afe_tdm_in_prepare,
+	.trigger	= mt8167_afe_tdm_in_trigger,
+	.set_fmt	= mt8167_afe_tdm_in_set_fmt,
+};
+
+static const struct snd_soc_dai_ops mt8167_afe_hw_gain1_ops = {
+	.startup	= mt8167_afe_hw_gain1_startup,
+	.shutdown	= mt8167_afe_hw_gain1_shutdown,
+	.prepare	= mt8167_afe_hw_gain1_prepare,
+};
+
+static int mt8167_afe_suspend(struct device *dev);
+static int mt8167_afe_resume(struct device *dev);
+
+static int mt8167_afe_dai_suspend(struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+	dev_dbg(afe->dev, "%s id %d suspended %d\n",
+		__func__, dai->id, afe->suspended);
+
+	if (afe->suspended)
+		return 0;
+
+	return mt8167_afe_suspend(afe->dev);
+}
+
+static int mt8167_afe_dai_resume(struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+	dev_dbg(afe->dev, "%s id %d suspended %d\n",
+		__func__, dai->id, afe->suspended);
+
+	if (!afe->suspended)
+		return 0;
+
+	mt8167_afe_resume(afe->dev);
+
+	return 0;
+}
+
+static struct snd_soc_dai_driver mt8167_afe_pcm_dais[] = {
+	/* FE DAIs: memory intefaces to CPU */
+	{
+		.name = "DL1",
+		.id = MT8167_AFE_MEMIF_DL1,
+		.suspend = mt8167_afe_dai_suspend,
+		.resume = mt8167_afe_dai_resume,
+		.playback = {
+			.stream_name = "DL1",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = &mt8167_afe_dai_ops,
+	}, {
+		.name = "VUL",
+		.id = MT8167_AFE_MEMIF_VUL,
+		.suspend = mt8167_afe_dai_suspend,
+		.resume = mt8167_afe_dai_resume,
+		.capture = {
+			.stream_name = "VUL",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = &mt8167_afe_dai_ops,
+	}, {
+		.name = "DL2",
+		.id = MT8167_AFE_MEMIF_DL2,
+		.suspend = mt8167_afe_dai_suspend,
+		.resume = mt8167_afe_dai_resume,
+		.playback = {
+			.stream_name = "DL2",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = &mt8167_afe_dai_ops,
+	}, {
+		.name = "AWB",
+		.id = MT8167_AFE_MEMIF_AWB,
+		.suspend = mt8167_afe_dai_suspend,
+		.resume = mt8167_afe_dai_resume,
+		.capture = {
+			.stream_name = "AWB",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = &mt8167_afe_dai_ops,
+	}, {
+		.name = "DAI",
+		.id = MT8167_AFE_MEMIF_DAI,
+		.suspend = mt8167_afe_dai_suspend,
+		.resume = mt8167_afe_dai_resume,
+		.capture = {
+			.stream_name = "DAI",
+			.channels_min = 1,
+			.channels_max = 1,
+			.rates = SNDRV_PCM_RATE_8000 |
+				 SNDRV_PCM_RATE_16000 |
+				 SNDRV_PCM_RATE_32000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+		.ops = &mt8167_afe_dai_ops,
+	}, {
+		.name = "HDMI",
+		.id = MT8167_AFE_MEMIF_HDMI,
+		.suspend = mt8167_afe_dai_suspend,
+		.resume = mt8167_afe_dai_resume,
+		.playback = {
+			.stream_name = "HDMI",
+			.channels_min = 1,
+			.channels_max = 8,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = &mt8167_afe_dai_ops,
+	}, {
+		.name = "TDM_IN",
+		.id = MT8167_AFE_MEMIF_TDM_IN,
+		.suspend = mt8167_afe_dai_suspend,
+		.resume = mt8167_afe_dai_resume,
+		.capture = {
+			.stream_name = "TDM_IN",
+			.channels_min = 2,
+			.channels_max = 8,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = &mt8167_afe_dai_ops,
+	}, {
+		.name = "ULM",
+		.id = MT8167_AFE_MEMIF_MULTILINE_IN,
+		.suspend = mt8167_afe_dai_suspend,
+		.resume = mt8167_afe_dai_resume,
+		.capture = {
+			.stream_name = "MULTILINE_IN",
+			.channels_min = 1,
+			.channels_max = 8,
+			.rates = SNDRV_PCM_RATE_32000
+				| SNDRV_PCM_RATE_44100
+				| SNDRV_PCM_RATE_48000
+				| SNDRV_PCM_RATE_88200
+				| SNDRV_PCM_RATE_96000
+				| SNDRV_PCM_RATE_176400
+				| SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE
+				| SNDRV_PCM_FMTBIT_S24_LE
+				| SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = &mt8167_afe_dai_ops,
+	}, {
+	/* BE DAIs */
+		.name = "I2S",
+		.id = MT8167_AFE_IO_I2S,
+		.playback = {
+			.stream_name = "I2S Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.capture = {
+			.stream_name = "I2S Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = &mt8167_afe_i2s_ops,
+	}, {
+		.name = "2ND I2S",
+		.id = MT8167_AFE_IO_2ND_I2S,
+		.playback = {
+			.stream_name = "2ND I2S Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.capture = {
+			.stream_name = "2ND I2S Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = &mt8167_afe_2nd_i2s_ops,
+	}, {
+	/* BE DAIs */
+		.name = "INT ADDA",
+		.id = MT8167_AFE_IO_INT_ADDA,
+		.playback = {
+			.stream_name = "INT ADDA Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE,
+		},
+		.capture = {
+			.stream_name = "INT ADDA Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000 |
+				 SNDRV_PCM_RATE_16000 |
+				 SNDRV_PCM_RATE_32000 |
+				 SNDRV_PCM_RATE_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+		.ops = &mt8167_afe_int_adda_ops,
+	}, {
+	/* BE DAIs */
+		.name = "MRG BT",
+		.id = MT8167_AFE_IO_MRG_BT,
+		.playback = {
+			.stream_name = "MRG BT Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000 |
+				 SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+		.capture = {
+			.stream_name = "MRG BT Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000 |
+				 SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+		.ops = &mt8167_afe_mrg_bt_ops,
+		.symmetric_rates = 1,
+	}, {
+	/* BE DAIs */
+		.name = "PCM0",
+		.id = MT8167_AFE_IO_PCM_BT,
+		.playback = {
+			.stream_name = "PCM0 Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000 |
+				 SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+		.capture = {
+			.stream_name = "PCM0 Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000 |
+				 SNDRV_PCM_RATE_16000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+		.ops = &mt8167_afe_pcm0_ops,
+		.symmetric_rates = 1,
+	}, {
+	/* BE DAIs */
+		.name = "DL Input",
+		.id = MT8167_AFE_IO_DL_BE,
+		.capture = {
+			.stream_name = "DL Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+	}, {
+	/* BE DAIs */
+		.name = "HDMIO",
+		.id = MT8167_AFE_IO_HDMI,
+		.playback = {
+			.stream_name = "HDMIO Playback",
+			.channels_min = 1,
+			.channels_max = 8,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = &mt8167_afe_hdmi_ops,
+	},  {
+	/* BE DAIs */
+		.name = "TDM_IN_IO",
+		.id = MT8167_AFE_IO_TDM_IN,
+		.capture = {
+			.stream_name = "TDM IN Capture",
+			.channels_min = 2,
+			.channels_max = 8,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = &mt8167_afe_tdm_in_ops,
+	}, {
+		.name = "INTDIR_IO",
+		.id = MT8167_AFE_IO_INTDIR_BE,
+		.capture = {
+			.stream_name = "INTDIR Capture",
+			.channels_min = 1,
+			.channels_max = 8,
+			.rates = SNDRV_PCM_RATE_32000
+				| SNDRV_PCM_RATE_44100
+				| SNDRV_PCM_RATE_48000
+				| SNDRV_PCM_RATE_88200
+				| SNDRV_PCM_RATE_96000
+				| SNDRV_PCM_RATE_176400
+				| SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE
+				| SNDRV_PCM_FMTBIT_S24_LE
+				| SNDRV_PCM_FMTBIT_S32_LE,
+		},
+	}, {
+	/* BE DAIs */
+		.name = "HW_GAIN1",
+		.id = MT8167_AFE_IO_HW_GAIN1,
+		.capture = {
+			.stream_name = "HW GAIN1 Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = &mt8167_afe_hw_gain1_ops,
+	},
+};
+
+static struct snd_soc_dai_driver *mt8167_afe_get_dai_drv_by_id(unsigned int id)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(mt8167_afe_pcm_dais); i++) {
+		if (id == mt8167_afe_pcm_dais[i].id)
+			return &mt8167_afe_pcm_dais[i];
+	}
+
+	return NULL;
+}
+
+static int mt8167_afe_set_memif_irq_by_mode(struct mt8167_afe_memif_data *data,
+			unsigned int mode)
+{
+	int ret = 0;
+
+	if (data == NULL)
+		return -EINVAL;
+	switch (mode) {
+	case MT8167_AFE_IRQ_1:
+		data->irq_reg_cnt = AFE_IRQ_CNT1;
+		data->irq_cnt_shift = 0;
+		data->irq_mode = MT8167_AFE_IRQ_1;
+		data->irq_fs_reg = AFE_IRQ_MCU_CON;
+		data->irq_fs_shift = 4;
+		data->irq_clr_shift = 0;
+		break;
+	case MT8167_AFE_IRQ_2:
+		data->irq_reg_cnt = AFE_IRQ_CNT2;
+		data->irq_cnt_shift = 0;
+		data->irq_mode = MT8167_AFE_IRQ_2;
+		data->irq_fs_reg = AFE_IRQ_MCU_CON;
+		data->irq_fs_shift = 8;
+		data->irq_clr_shift = 1;
+		break;
+	case MT8167_AFE_IRQ_7:
+		data->irq_reg_cnt = AFE_IRQ_CNT7;
+		data->irq_cnt_shift = 0;
+		data->irq_mode = MT8167_AFE_IRQ_7;
+		data->irq_fs_reg = AFE_IRQ_MCU_CON;
+		data->irq_fs_shift = 24;
+		data->irq_clr_shift = 6;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+static const struct snd_kcontrol_new mt8167_afe_o00_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("I05 Switch", AFE_CONN0, 5, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I07 Switch", AFE_CONN0, 7, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt8167_afe_o01_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("I06 Switch", AFE_CONN0, 22, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I08 Switch", AFE_CONN0, 24, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt8167_afe_o02_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("I05 Switch", AFE_CONN1, 5, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I06 Switch", AFE_CONN1, 6, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt8167_afe_o03_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("I05 Switch", AFE_CONN1, 21, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I07 Switch", AFE_CONN1, 23, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt8167_afe_o04_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("I06 Switch", AFE_CONN2, 6, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I08 Switch", AFE_CONN2, 8, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt8167_afe_o05_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("I00 Switch", AFE_CONN2, 16, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I05 Switch", AFE_CONN2, 19, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I07 Switch", AFE_CONN2, 20, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt8167_afe_o06_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("I01 Switch", AFE_CONN2, 22, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I06 Switch", AFE_CONN2, 24, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I08 Switch", AFE_CONN2, 25, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt8167_afe_o09_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("I00 Switch", AFE_CONN5, 8, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I03 Switch", AFE_CONN3, 0, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I10 Switch", AFE_GAIN1_CONN, 26, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt8167_afe_o10_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("I01 Switch", AFE_CONN5, 13, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I04 Switch", AFE_CONN3, 3, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I11 Switch", AFE_GAIN1_CONN, 29, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt8167_afe_o13_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("I03 Switch", AFE_GAIN1_CONN2, 5, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt8167_afe_o14_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("I04 Switch", AFE_GAIN1_CONN2, 19, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt8167_afe_o11_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("I02 Switch", AFE_CONN3, 6, 1, 0),
+};
+
+static const char * const ain_text[] = {
+	"INT ADC", "EXT ADC"
+};
+
+static SOC_ENUM_SINGLE_DECL(ain_enum, AFE_ADDA_TOP_CON0, 0, ain_text);
+
+static const struct snd_kcontrol_new ain_mux = SOC_DAPM_ENUM("AIN Source", ain_enum);
+
+static const char * const daibt_mux_text[] = {
+	"PCM", "MRG"
+};
+
+static SOC_ENUM_SINGLE_DECL(daibt_mux_enum, AFE_DAIBT_CON0, 12, daibt_mux_text);
+
+static const struct snd_kcontrol_new daibt_mux = SOC_DAPM_ENUM("DAIBT Source", daibt_mux_enum);
+
+static const struct snd_kcontrol_new i2s_o03_o04_enable_ctl =
+	SOC_DAPM_SINGLE_VIRT("Switch", 1);
+
+static const struct snd_kcontrol_new int_adda_o03_o04_enable_ctl =
+	SOC_DAPM_SINGLE_VIRT("Switch", 1);
+
+static const struct snd_kcontrol_new mrg_bt_o02_enable_ctl =
+	SOC_DAPM_SINGLE_VIRT("Switch", 1);
+
+static const struct snd_kcontrol_new pcm0_o02_enable_ctl =
+	SOC_DAPM_SINGLE_VIRT("Switch", 1);
+
+static const struct snd_soc_dapm_widget mt8167_afe_pcm_widgets[] = {
+	/* inter-connections */
+	SND_SOC_DAPM_MIXER("I00", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("I01", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("I02", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("I03", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("I04", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("I05", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("I06", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("I07", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("I08", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("I05L", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("I06L", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("I07L", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("I08L", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("I10", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("I11", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_MIXER("O00", SND_SOC_NOPM, 0, 0,
+			   mt8167_afe_o00_mix, ARRAY_SIZE(mt8167_afe_o00_mix)),
+	SND_SOC_DAPM_MIXER("O01", SND_SOC_NOPM, 0, 0,
+			   mt8167_afe_o01_mix, ARRAY_SIZE(mt8167_afe_o01_mix)),
+	SND_SOC_DAPM_MIXER("O02", SND_SOC_NOPM, 0, 0,
+			   mt8167_afe_o02_mix, ARRAY_SIZE(mt8167_afe_o02_mix)),
+	SND_SOC_DAPM_MIXER("O03", SND_SOC_NOPM, 0, 0,
+			   mt8167_afe_o03_mix, ARRAY_SIZE(mt8167_afe_o03_mix)),
+	SND_SOC_DAPM_MIXER("O04", SND_SOC_NOPM, 0, 0,
+			   mt8167_afe_o04_mix, ARRAY_SIZE(mt8167_afe_o04_mix)),
+	SND_SOC_DAPM_MIXER("O05", SND_SOC_NOPM, 0, 0,
+			   mt8167_afe_o05_mix, ARRAY_SIZE(mt8167_afe_o05_mix)),
+	SND_SOC_DAPM_MIXER("O06", SND_SOC_NOPM, 0, 0,
+			   mt8167_afe_o06_mix, ARRAY_SIZE(mt8167_afe_o06_mix)),
+	SND_SOC_DAPM_MIXER("O09", SND_SOC_NOPM, 0, 0,
+			   mt8167_afe_o09_mix, ARRAY_SIZE(mt8167_afe_o09_mix)),
+	SND_SOC_DAPM_MIXER("O10", SND_SOC_NOPM, 0, 0,
+			   mt8167_afe_o10_mix, ARRAY_SIZE(mt8167_afe_o10_mix)),
+	SND_SOC_DAPM_MIXER("O11", SND_SOC_NOPM, 0, 0,
+			   mt8167_afe_o11_mix, ARRAY_SIZE(mt8167_afe_o11_mix)),
+	SND_SOC_DAPM_MIXER("O13", SND_SOC_NOPM, 0, 0,
+			   mt8167_afe_o13_mix, ARRAY_SIZE(mt8167_afe_o13_mix)),
+	SND_SOC_DAPM_MIXER("O14", SND_SOC_NOPM, 0, 0,
+			   mt8167_afe_o14_mix, ARRAY_SIZE(mt8167_afe_o14_mix)),
+
+	SND_SOC_DAPM_MUX("AIN Mux", SND_SOC_NOPM, 0, 0, &ain_mux),
+	SND_SOC_DAPM_MUX("DAIBT Mux", SND_SOC_NOPM, 0, 0, &daibt_mux),
+
+	SND_SOC_DAPM_SWITCH("I2S O03_O04", SND_SOC_NOPM, 0, 0,
+			    &i2s_o03_o04_enable_ctl),
+	SND_SOC_DAPM_SWITCH("INT ADDA O03_O04", SND_SOC_NOPM, 0, 0,
+			    &int_adda_o03_o04_enable_ctl),
+	SND_SOC_DAPM_SWITCH("MRG BT O02", SND_SOC_NOPM, 0, 0,
+			    &mrg_bt_o02_enable_ctl),
+	SND_SOC_DAPM_SWITCH("PCM0 O02", SND_SOC_NOPM, 0, 0,
+			    &pcm0_o02_enable_ctl),
+
+	SND_SOC_DAPM_OUTPUT("MRG Out"),
+	SND_SOC_DAPM_OUTPUT("PCM0 Out"),
+
+	SND_SOC_DAPM_INPUT("DL Source"),
+	SND_SOC_DAPM_INPUT("MRG In"),
+	SND_SOC_DAPM_INPUT("PCM0 In"),
+
+	SND_SOC_DAPM_INPUT("SPDIF In"),
+};
+
+static const struct snd_soc_dapm_route mt8167_afe_pcm_routes[] = {
+	/* downlink */
+	{"I05", NULL, "DL1"},
+	{"I06", NULL, "DL1"},
+	{"I07", NULL, "DL2"},
+	{"I08", NULL, "DL2"},
+	{"O03", "I05 Switch", "I05"},
+	{"O04", "I06 Switch", "I06"},
+	{"O02", "I05 Switch", "I05"},
+	{"O02", "I06 Switch", "I06"},
+	{"O00", "I05 Switch", "I05"},
+	{"O01", "I06 Switch", "I06"},
+	{"O03", "I07 Switch", "I07"},
+	{"O04", "I08 Switch", "I08"},
+	{"O00", "I07 Switch", "I07"},
+	{"O01", "I08 Switch", "I08"},
+	{"I2S O03_O04", "Switch", "O03"},
+	{"I2S O03_O04", "Switch", "O04"},
+	{"I2S Playback", NULL, "I2S O03_O04"},
+	{"INT ADDA O03_O04", "Switch", "O03"},
+	{"INT ADDA O03_O04", "Switch", "O04"},
+	{"INT ADDA Playback", NULL, "INT ADDA O03_O04"},
+	{"2ND I2S Playback", NULL, "O00"},
+	{"2ND I2S Playback", NULL, "O01"},
+
+	{"MRG BT O02", "Switch", "O02"},
+	{"PCM0 O02", "Switch", "O02"},
+	{"MRG BT Playback", NULL, "MRG BT O02"},
+	{"PCM0 Playback", NULL, "PCM0 O02"},
+	{"MRG Out", NULL, "MRG BT Playback"},
+	{"PCM0 Out", NULL, "PCM0 Playback"},
+
+	{"HDMIO Playback", NULL, "HDMI"},
+
+	/* uplink */
+	{"AIN Mux", "EXT ADC", "I2S Capture"},
+	{"AIN Mux", "INT ADC", "INT ADDA Capture"},
+	{"I03", NULL, "AIN Mux"},
+	{"I04", NULL, "AIN Mux"},
+	{"I00", NULL, "2ND I2S Capture"},
+	{"I01", NULL, "2ND I2S Capture"},
+
+	{"O13", "I03 Switch", "I03"},
+	{"O14", "I04 Switch", "I04"},
+	{"HW GAIN1 Capture", NULL, "O13"},
+	{"HW GAIN1 Capture", NULL, "O14"},
+	{"I10", NULL, "HW GAIN1 Capture"},
+	{"I11", NULL, "HW GAIN1 Capture"},
+	{"O09", "I10 Switch", "I10"},
+	{"O10", "I11 Switch", "I11"},
+
+	{"O09", "I03 Switch", "I03"},
+	{"O10", "I04 Switch", "I04"},
+	{"O09", "I00 Switch", "I00"},
+	{"O10", "I01 Switch", "I01"},
+	{"VUL", NULL, "O09"},
+	{"VUL", NULL, "O10"},
+
+	{"DL Capture", NULL, "DL Source"},
+	{"I05L", NULL, "DL Capture"},
+	{"I06L", NULL, "DL Capture"},
+	{"I07L", NULL, "DL Capture"},
+	{"I08L", NULL, "DL Capture"},
+	{"O05", "I05 Switch", "I05L"},
+	{"O06", "I06 Switch", "I06L"},
+	{"O05", "I07 Switch", "I07L"},
+	{"O06", "I08 Switch", "I08L"},
+	{"O05", "I00 Switch", "I00"},
+	{"O06", "I01 Switch", "I01"},
+	{"AWB", NULL, "O05"},
+	{"AWB", NULL, "O06"},
+
+	{"PCM0 Capture", NULL, "PCM0 In"},
+	{"MRG BT Capture", NULL, "MRG In"},
+	{"DAIBT Mux", "PCM", "PCM0 Capture"},
+	{"DAIBT Mux", "MRG", "MRG BT Capture"},
+	{"I02", NULL, "DAIBT Mux"},
+	{"O11", "I02 Switch", "I02"},
+	{"DAI", NULL, "O11"},
+
+	{"TDM_IN", NULL, "TDM IN Capture"},
+
+	{"MULTILINE_IN", NULL, "INTDIR Capture"},
+	{"INTDIR Capture", NULL, "SPDIF In"},
+};
+
+static const struct snd_soc_component_driver mt8167_afe_pcm_dai_component = {
+	.name = "mtk-afe-pcm-dai",
+	.dapm_widgets = mt8167_afe_pcm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(mt8167_afe_pcm_widgets),
+	.dapm_routes = mt8167_afe_pcm_routes,
+	.num_dapm_routes = ARRAY_SIZE(mt8167_afe_pcm_routes),
+};
+
+
+#ifdef COMMON_CLOCK_FRAMEWORK_API
+static const char *aud_clks[MT8167_CLK_NUM] = {
+	[MT8167_CLK_TOP_PDN_AUD] = "top_pdn_audio",
+	[MT8167_CLK_APLL12_DIV0] =  "apll12_div0",
+	[MT8167_CLK_APLL12_DIV1] =  "apll12_div1",
+	[MT8167_CLK_APLL12_DIV2] =  "apll12_div2",
+	[MT8167_CLK_APLL12_DIV3] =  "apll12_div3",
+	[MT8167_CLK_APLL12_DIV4] =  "apll12_div4",
+	[MT8167_CLK_APLL12_DIV4B] =  "apll12_div4b",
+	[MT8167_CLK_APLL12_DIV5] =  "apll12_div5",
+	[MT8167_CLK_APLL12_DIV5B] =  "apll12_div5b",
+	[MT8167_CLK_APLL12_DIV6] =  "apll12_div6",
+	[MT8167_CLK_SPDIF_IN] =  "spdif_in",
+	[MT8167_CLK_ENGEN1] =  "engen1",
+	[MT8167_CLK_ENGEN2] =  "engen2",
+	[MT8167_CLK_AUD1] =  "aud1",
+	[MT8167_CLK_AUD2] =  "aud2",
+	[MT8167_CLK_I2S0_M_SEL] =  "i2s0_m_sel",
+	[MT8167_CLK_I2S1_M_SEL] =  "i2s1_m_sel",
+	[MT8167_CLK_I2S2_M_SEL] =  "i2s2_m_sel",
+	[MT8167_CLK_I2S3_M_SEL] =  "i2s3_m_sel",
+	[MT8167_CLK_I2S4_M_SEL] =  "i2s4_m_sel",
+	[MT8167_CLK_I2S5_M_SEL] =  "i2s5_m_sel",
+	[MT8167_CLK_SPDIF_B_SEL] =  "spdif_b_sel",
+	[MT8167_CLK_SPDIFIN_SEL] =  "spdifin_sel",
+	[MT8167_CLK_TOP_UNIVPLL_D2] =  "univpll_div2",
+};
+#endif
+
+static struct mt8167_afe_memif_data memif_data[MT8167_AFE_MEMIF_NUM] = {
+	{
+		.name = "DL1",
+		.id = MT8167_AFE_MEMIF_DL1,
+		.reg_ofs_base = AFE_DL1_BASE,
+		.reg_ofs_end = AFE_DL1_END,
+		.reg_ofs_cur = AFE_DL1_CUR,
+		.fs_shift = 0,
+		.mono_shift = 21,
+		.enable_shift = 1,
+		.irq_reg_cnt = AFE_IRQ_CNT1,
+		.irq_cnt_shift = 0,
+		.irq_mode = MT8167_AFE_IRQ_1,
+		.irq_fs_reg = AFE_IRQ_MCU_CON,
+		.irq_fs_shift = 4,
+		.irq_clr_shift = 0,
+		.max_sram_size = 0, //36 * 1024, disabling sram because of kernel panics
+		.sram_offset = 0,
+		.format_reg = AFE_MEMIF_PBUF_SIZE,
+		.format_shift = 16,
+		.conn_format_mask = -1,
+		.prealloc_size = 128 * 1024,
+		.buffer_align_bytes = 16,
+	}, {
+		.name = "DL2",
+		.id = MT8167_AFE_MEMIF_DL2,
+		.reg_ofs_base = AFE_DL2_BASE,
+		.reg_ofs_end = AFE_DL2_END,
+		.reg_ofs_cur = AFE_DL2_CUR,
+		.fs_shift = 4,
+		.mono_shift = 22,
+		.enable_shift = 2,
+		.irq_reg_cnt = AFE_IRQ_CNT7,
+		.irq_cnt_shift = 0,
+		.irq_mode = MT8167_AFE_IRQ_7,
+		.irq_fs_reg = AFE_IRQ_MCU_CON,
+		.irq_fs_shift = 24,
+		.irq_clr_shift = 6,
+		.max_sram_size = 0,
+		.sram_offset = 0,
+		.format_reg = AFE_MEMIF_PBUF_SIZE,
+		.format_shift = 18,
+		.conn_format_mask = -1,
+		.prealloc_size = 128 * 1024,
+		.buffer_align_bytes = 16,
+	}, {
+		.name = "VUL",
+		.id = MT8167_AFE_MEMIF_VUL,
+		.reg_ofs_base = AFE_VUL_BASE,
+		.reg_ofs_end = AFE_VUL_END,
+		.reg_ofs_cur = AFE_VUL_CUR,
+		.fs_shift = 16,
+		.mono_shift = 27,
+		.enable_shift = 3,
+		.irq_reg_cnt = AFE_IRQ_CNT2,
+		.irq_cnt_shift = 0,
+		.irq_mode = MT8167_AFE_IRQ_2,
+		.irq_fs_reg = AFE_IRQ_MCU_CON,
+		.irq_fs_shift = 8,
+		.irq_clr_shift = 1,
+		.max_sram_size = 0,
+		.sram_offset = 0,
+		.format_reg = AFE_MEMIF_PBUF_SIZE,
+		.format_shift = 22,
+		.conn_format_mask = AFE_CONN_24BIT_O09 | AFE_CONN_24BIT_O10,
+		.prealloc_size = 32 * 1024,
+		.buffer_align_bytes = 8,
+	}, {
+		.name = "DAI",
+		.id = MT8167_AFE_MEMIF_DAI,
+		.reg_ofs_base = AFE_DAI_BASE,
+		.reg_ofs_end = AFE_DAI_END,
+		.reg_ofs_cur = AFE_DAI_CUR,
+		.fs_shift = 24,
+		.mono_shift = -1,
+		.enable_shift = 4,
+		.irq_reg_cnt = AFE_IRQ_CNT2,
+		.irq_cnt_shift = 0,
+		.irq_mode = MT8167_AFE_IRQ_2,
+		.irq_fs_reg = AFE_IRQ_MCU_CON,
+		.irq_fs_shift = 8,
+		.irq_clr_shift = 1,
+		.max_sram_size = 0,
+		.sram_offset = 0,
+		.format_reg = AFE_MEMIF_PBUF_SIZE,
+		.format_shift = 24,
+		.conn_format_mask = -1,
+		.prealloc_size = 16 * 1024,
+		.buffer_align_bytes = 8,
+	}, {
+		.name = "AWB",
+		.id = MT8167_AFE_MEMIF_AWB,
+		.reg_ofs_base = AFE_AWB_BASE,
+		.reg_ofs_end = AFE_AWB_END,
+		.reg_ofs_cur = AFE_AWB_CUR,
+		.fs_shift = 12,
+		.mono_shift = 24,
+		.enable_shift = 6,
+		.irq_reg_cnt = AFE_IRQ_CNT2,
+		.irq_cnt_shift = 0,
+		.irq_mode = MT8167_AFE_IRQ_2,
+		.irq_fs_reg = AFE_IRQ_MCU_CON,
+		.irq_fs_shift = 8,
+		.irq_clr_shift = 1,
+		.max_sram_size = 0,
+		.sram_offset = 0,
+		.format_reg = AFE_MEMIF_PBUF_SIZE,
+		.format_shift = 20,
+		.conn_format_mask = AFE_CONN_24BIT_O05 | AFE_CONN_24BIT_O06,
+		.prealloc_size = 0,
+		.buffer_align_bytes = 8,
+	}, {
+		.name = "MOD_DAI",
+		.id = MT8167_AFE_MEMIF_MOD_DAI,
+		.reg_ofs_base = AFE_MOD_PCM_BASE,
+		.reg_ofs_end = AFE_MOD_PCM_END,
+		.reg_ofs_cur = AFE_MOD_PCM_CUR,
+		.fs_shift = 30,
+		.mono_shift = -1,
+		.enable_shift = 7,
+		.irq_reg_cnt = AFE_IRQ_CNT2,
+		.irq_cnt_shift = 0,
+		.irq_mode = MT8167_AFE_IRQ_2,
+		.irq_fs_reg = AFE_IRQ_MCU_CON,
+		.irq_fs_shift = 8,
+		.irq_clr_shift = 1,
+		.max_sram_size = 0,
+		.sram_offset = 0,
+		.format_reg = AFE_MEMIF_PBUF_SIZE,
+		.format_shift = 26,
+		.conn_format_mask = -1,
+		.prealloc_size = 0,
+		.buffer_align_bytes = 8,
+	}, {
+		.name = "HDMI",
+		.id = MT8167_AFE_MEMIF_HDMI,
+		.reg_ofs_base = AFE_HDMI_OUT_BASE,
+		.reg_ofs_end = AFE_HDMI_OUT_END,
+		.reg_ofs_cur = AFE_HDMI_OUT_CUR,
+		.fs_shift = -1,
+		.mono_shift = -1,
+		.enable_shift = -1,
+		.irq_reg_cnt = AFE_IRQ_CNT5,
+		.irq_cnt_shift = 0,
+		.irq_mode = MT8167_AFE_IRQ_5,
+		.irq_fs_reg = -1,
+		.irq_fs_shift = -1,
+		.irq_clr_shift = 4,
+		.max_sram_size = 0,
+		.sram_offset = 0,
+		.format_reg = AFE_MEMIF_PBUF_SIZE,
+		.format_shift = 28,
+		.conn_format_mask = -1,
+		.prealloc_size = 0,
+		.buffer_align_bytes = 16,
+	}, {
+		.name = "TDM_IN",
+		.id = MT8167_AFE_MEMIF_TDM_IN,
+		.reg_ofs_base = AFE_HDMI_IN_2CH_BASE,
+		.reg_ofs_end = AFE_HDMI_IN_2CH_END,
+		.reg_ofs_cur = AFE_HDMI_IN_2CH_CUR,
+		.fs_shift = -1,
+		.mono_shift = -1,
+		.enable_shift = -1,
+		.irq_reg_cnt = AFE_IRQ_CNT10,
+		.irq_cnt_shift = 0,
+		.irq_mode = MT8167_AFE_IRQ_10,
+		.irq_fs_reg = -1,
+		.irq_fs_shift = -1,
+		.irq_clr_shift = 9,
+		.max_sram_size = 0,
+		.sram_offset = 0,
+		.format_reg = AFE_MEMIF_PBUF2_SIZE,
+		.format_shift = 4,
+		.conn_format_mask = -1,
+		.prealloc_size = 0,
+		.buffer_align_bytes = 8,
+	}, {
+		.name = "ULM",
+		.id = MT8167_AFE_MEMIF_MULTILINE_IN,
+		.reg_ofs_base = SPDIFIN_BASE_ADR,
+		.reg_ofs_end = SPDIFIN_END_ADR,
+		.reg_ofs_cur = SPDIFIN_CUR_ADR,
+		.fs_shift = -1,
+		.mono_shift = -1,
+		.enable_shift = -1,
+		.irq_reg_cnt = AFE_IRQ_CNT13,
+		.irq_cnt_shift = 0,
+		.irq_mode = MT8167_AFE_IRQ_13,
+		.irq_fs_reg = -1,
+		.irq_fs_shift = -1,
+		.irq_clr_shift = 12,
+		.max_sram_size = 0,
+		.sram_offset = 0,
+		.format_reg = AFE_MEMIF_PBUF2_SIZE,
+		.format_shift = 6,
+		.conn_format_mask = -1,
+		.prealloc_size = 0,
+		.buffer_align_bytes = 8,
+	},
+};
+
+static const struct regmap_config mt8167_afe_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = ABB_AFE_SDM_TEST,
+	.cache_type = REGCACHE_NONE,
+};
+
+static irqreturn_t mt8167_afe_irq_handler(int irq, void *dev_id)
+{
+	struct mtk_afe *afe = dev_id;
+	unsigned int reg_value;
+	unsigned int memif_status;
+	int i, ret;
+
+	ret = regmap_read(afe->regmap, AFE_IRQ_STATUS, &reg_value);
+	if (ret) {
+		dev_err(afe->dev, "%s irq status err\n", __func__);
+		reg_value = AFE_IRQ_STATUS_BITS;
+		goto err_irq;
+	}
+
+	ret = regmap_read(afe->regmap, AFE_DAC_CON0, &memif_status);
+	if (ret) {
+		dev_err(afe->dev, "%s memif status err\n", __func__);
+		reg_value = AFE_IRQ_STATUS_BITS;
+		goto err_irq;
+	}
+
+	for (i = 0; i < MT8167_AFE_MEMIF_NUM; i++) {
+		struct mt8167_afe_memif *memif = &afe->memif[i];
+		struct snd_pcm_substream *substream = memif->substream;
+
+		if (!substream || !(reg_value & (1 << memif->data->irq_clr_shift)))
+			continue;
+
+		if (memif->data->enable_shift >= 0 &&
+		    !((1 << memif->data->enable_shift) & memif_status))
+			continue;
+
+		snd_pcm_period_elapsed(substream);
+	}
+
+	/*spdifin irq9   call spdifin irq handler*/
+	if(reg_value&0x100) {
+		afe_spdifrx_isr();
+	}
+
+err_irq:
+	/* clear irq */
+	regmap_write(afe->regmap, AFE_IRQ_CLR, reg_value & AFE_IRQ_STATUS_BITS);
+
+	return IRQ_HANDLED;
+}
+
+static int mt8167_afe_suspend(struct device *dev)
+{
+	struct mtk_afe *afe = dev_get_drvdata(dev);
+	int i;
+
+	dev_info(dev, "%s >>\n", __func__);
+
+	mt8167_afe_enable_main_clk(afe);
+
+	for (i = 0; i < ARRAY_SIZE(mt8167_afe_backup_list); i++)
+		regmap_read(afe->regmap, mt8167_afe_backup_list[i],
+			    &afe->backup_regs[i]);
+
+	mt8167_afe_disable_main_clk(afe);
+
+	afe->suspended = true;
+
+	dev_info(dev, "%s <<\n", __func__);
+
+	return 0;
+}
+
+static int mt8167_afe_resume(struct device *dev)
+{
+	struct mtk_afe *afe = dev_get_drvdata(dev);
+	int i;
+
+	dev_info(dev, "%s >>\n", __func__);
+
+	mt8167_afe_enable_main_clk(afe);
+
+	/* unmask all IRQs */
+	regmap_update_bits(afe->regmap, AFE_IRQ_MCU_EN, 0xff, 0xff);
+
+	for (i = 0; i < ARRAY_SIZE(mt8167_afe_backup_list); i++)
+		regmap_write(afe->regmap, mt8167_afe_backup_list[i],
+			     afe->backup_regs[i]);
+
+	mt8167_afe_disable_main_clk(afe);
+
+	afe->suspended = false;
+
+	dev_info(dev, "%s <<\n", __func__);
+
+	return 0;
+}
+
+static int mt8167_afe_init_audio_clk(struct mtk_afe *afe)
+{
+#ifdef COMMON_CLOCK_FRAMEWORK_API
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(aud_clks); i++) {
+		afe->clocks[i] = devm_clk_get(afe->dev, aud_clks[i]);
+		if (IS_ERR(afe->clocks[i])) {
+			dev_err(afe->dev, "%s devm_clk_get %s fail\n",
+				__func__, aud_clks[i]);
+			return PTR_ERR(afe->clocks[i]);
+		}
+	}
+#endif
+	return 0;
+}
+
+static int mt8167_afe_pcm_dev_probe(struct platform_device *pdev)
+{
+	int ret, i;
+	unsigned int irq_id;
+	struct mtk_afe *afe;
+	struct resource *res;
+	struct device_node *np = pdev->dev.of_node;
+
+	afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL);
+	if (!afe)
+		return -ENOMEM;
+
+	afe->backup_regs = kzalloc(ARRAY_SIZE(mt8167_afe_backup_list) *
+				   sizeof(unsigned int), GFP_KERNEL);
+	if (!afe->backup_regs)
+		return -ENOMEM;
+
+	spin_lock_init(&afe->afe_ctrl_lock);
+	mutex_init(&afe->afe_clk_mutex);
+#ifdef IDLE_TASK_DRIVER_API
+	mutex_init(&afe->emi_clk_mutex);
+#endif
+
+	afe->dev = &pdev->dev;
+
+	irq_id = platform_get_irq(pdev, 0);
+	if (!irq_id) {
+		dev_err(afe->dev, "np %s no irq\n", np->name);
+		return -ENXIO;
+	}
+
+	ret = devm_request_irq(afe->dev, irq_id, mt8167_afe_irq_handler,
+			       0, "Afe_ISR_Handle", (void *)afe);
+	if (ret) {
+		dev_err(afe->dev, "could not request_irq\n");
+		return ret;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	afe->base_addr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(afe->base_addr))
+		return PTR_ERR(afe->base_addr);
+
+	afe->regmap = devm_regmap_init_mmio(&pdev->dev, afe->base_addr,
+		&mt8167_afe_regmap_config);
+	if (IS_ERR(afe->regmap))
+		return PTR_ERR(afe->regmap);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	afe->sram_address = devm_ioremap_resource(&pdev->dev, res);
+	if (!IS_ERR(afe->sram_address)) {
+		afe->sram_phy_address = res->start;
+		afe->sram_size = resource_size(res);
+	}
+
+	/* initial audio related clock */
+	ret = mt8167_afe_init_audio_clk(afe);
+	if (ret) {
+		dev_err(afe->dev, "%s mt8167_afe_init_audio_clk fail\n", __func__);
+		return ret;
+	}
+
+	for (i = 0; i < MT8167_AFE_MEMIF_NUM; i++)
+		afe->memif[i].data = &memif_data[i];
+
+	platform_set_drvdata(pdev, afe);
+
+	if (of_property_read_u32(np, "mediatek,tdm-out-mode", &afe->tdm_out_mode))
+		afe->tdm_out_mode = MT8167_AFE_TDM_OUT_HDMI;
+
+	if (of_property_read_u32(np, "mediatek,tdm-in-lrck-cycle", &afe->tdm_in_lrck_cycle))
+		afe->tdm_in_lrck_cycle = LRCK_CYCLE_INVALID;
+
+	if (of_property_read_u32_array(np, "mediatek,i2s-clock-modes",
+	    afe->i2s_clk_modes, ARRAY_SIZE(afe->i2s_clk_modes))) {
+		for (i = 0; i < ARRAY_SIZE(afe->i2s_clk_modes); i++)
+			afe->i2s_clk_modes[i] = MT8167_AFE_I2S_SEPARATE_CLOCK;
+	}
+
+	for (i = 0; i < MT8167_AFE_I2S_SETS; i++) {
+		struct snd_soc_dai_driver *drv;
+
+		const unsigned int i2s_dai_ids[MT8167_AFE_I2S_SETS] = {
+			MT8167_AFE_IO_I2S, MT8167_AFE_IO_2ND_I2S
+		};
+
+		if (afe->i2s_clk_modes[i] == MT8167_AFE_I2S_SHARED_CLOCK) {
+			drv = mt8167_afe_get_dai_drv_by_id(i2s_dai_ids[i]);
+			if (!drv)
+				continue;
+
+			drv->symmetric_rates = 1;
+			drv->symmetric_samplebits = 1;
+		}
+	}
+
+	if (of_property_read_u32(np, "mediatek,awb-irq-mode", &afe->awb_irq_mode))
+		afe->awb_irq_mode = MT8167_AFE_IRQ_2;
+
+	if (afe->awb_irq_mode != memif_data[MT8167_AFE_MEMIF_AWB].irq_mode)
+		mt8167_afe_set_memif_irq_by_mode(&memif_data[MT8167_AFE_MEMIF_AWB],
+						 afe->awb_irq_mode);
+
+	ret = devm_snd_soc_register_component(&pdev->dev,
+					      &mt8167_afe_pcm_platform,
+					      NULL, 0);
+	if (ret)
+		goto err_platform;
+
+	ret = snd_soc_register_component(&pdev->dev,
+					 &mt8167_afe_pcm_dai_component,
+					 mt8167_afe_pcm_dais,
+					 ARRAY_SIZE(mt8167_afe_pcm_dais));
+	if (ret)
+		goto err_platform;
+
+	mt8167_afe_init_debugfs(afe);
+
+	dev_info(&pdev->dev, "MTK AFE driver initialized.\n");
+	return 0;
+
+err_platform:
+	return ret;
+}
+
+static int mt8167_afe_pcm_dev_remove(struct platform_device *pdev)
+{
+	struct mtk_afe *afe = platform_get_drvdata(pdev);
+
+	mt8167_afe_cleanup_debugfs(afe);
+
+	if (afe && afe->backup_regs)
+		kfree(afe->backup_regs);
+
+	snd_soc_unregister_component(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id mt8167_afe_pcm_dt_match[] = {
+	{ .compatible = "mediatek,mt8167-afe-pcm", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, mt8167_afe_pcm_dt_match);
+
+static struct platform_driver mt8167_afe_pcm_driver = {
+	.driver = {
+		   .name = "mtk-afe-pcm",
+		   .of_match_table = mt8167_afe_pcm_dt_match,
+	},
+	.probe = mt8167_afe_pcm_dev_probe,
+	.remove = mt8167_afe_pcm_dev_remove,
+};
+
+module_platform_driver(mt8167_afe_pcm_driver);
+
+MODULE_DESCRIPTION("Mediatek ALSA SoC AFE platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/mediatek/mt8167/mt8167-afe-regs.h b/sound/soc/mediatek/mt8167/mt8167-afe-regs.h
new file mode 100644
index 0000000..4ba239c
--- /dev/null
+++ b/sound/soc/mediatek/mt8167/mt8167-afe-regs.h
@@ -0,0 +1,576 @@
+/*
+ * mt8167_afe_regs.h  --  Mediatek audio register definitions
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MT8167_AFE_REGS_H_
+#define _MT8167_AFE_REGS_H_
+
+#include <linux/bitops.h>
+
+/*****************************************************************************
+ *                  R E G I S T E R       D E F I N I T I O N
+ *****************************************************************************/
+#define AUDIO_TOP_CON0		0x0000
+#define AUDIO_TOP_CON1		0x0004
+#define AUDIO_TOP_CON3		0x000c
+#define AFE_DAC_CON0		0x0010
+#define AFE_DAC_CON1		0x0014
+#define AFE_I2S_CON		0x0018
+#define AFE_I2S_CON1		0x0034
+#define AFE_I2S_CON2		0x0038
+#define AFE_I2S_CON3		0x004c
+#define AFE_DAIBT_CON0          0x001c
+#define AFE_MRGIF_CON           0x003c
+#define AFE_CONN_24BIT		0x006c
+
+#define AFE_CONN0		0x0020
+#define AFE_CONN1		0x0024
+#define AFE_CONN2		0x0028
+#define AFE_CONN3		0x002C
+#define AFE_CONN4		0x0030
+#define AFE_CONN5		0x005C
+#define AFE_HDMI_CONN0		0x0390
+#define AFE_CONN_TDMIN_CON	0x039C
+
+#define AFE_GAIN1_CON0		0x0410
+#define AFE_GAIN1_CON1		0x0414
+#define AFE_GAIN1_CON2		0x0418
+#define AFE_GAIN1_CON3		0x041c
+#define AFE_GAIN1_CONN		0x0420
+#define AFE_GAIN1_CUR		0x0424
+#define AFE_GAIN1_CONN2		0x0448
+
+/* Memory interface */
+#define AFE_DL1_BASE		0x0040
+#define AFE_DL1_CUR		0x0044
+#define AFE_DL1_END		0x0048
+#define AFE_DL2_BASE		0x0050
+#define AFE_DL2_CUR		0x0054
+#define AFE_DL2_END             0x0058
+#define AFE_AWB_BASE		0x0070
+#define AFE_AWB_END             0x0078
+#define AFE_AWB_CUR		0x007c
+#define AFE_VUL_BASE		0x0080
+#define AFE_VUL_CUR		0x008c
+#define AFE_VUL_END		0x0088
+#define AFE_DAI_BASE		0x0090
+#define AFE_DAI_END		0x0098
+#define AFE_DAI_CUR		0x009c
+#define AFE_MOD_PCM_BASE	0x0330
+#define AFE_MOD_PCM_END		0x0338
+#define AFE_MOD_PCM_CUR		0x033c
+#define AFE_HDMI_OUT_BASE	0x0374
+#define AFE_HDMI_OUT_CUR	0x0378
+#define AFE_HDMI_OUT_END	0x037c
+
+#define AFE_MEMIF_MSB           0x00cc
+#define AFE_MEMIF_MON0          0x00d0
+#define AFE_MEMIF_MON1          0x00d4
+#define AFE_MEMIF_MON2          0x00d8
+#define AFE_MEMIF_MON3          0x00dc
+
+#define AFE_ADDA_DL_SRC2_CON0   0x0108
+#define AFE_ADDA_DL_SRC2_CON1   0x010c
+#define AFE_ADDA_UL_SRC_CON0    0x0114
+#define AFE_ADDA_UL_SRC_CON1    0x0118
+#define AFE_ADDA_TOP_CON0	0x0120
+#define AFE_ADDA_UL_DL_CON0     0x0124
+#define AFE_ADDA_NEWIF_CFG0     0x0138
+#define AFE_ADDA_NEWIF_CFG1     0x013c
+#define AFE_ADDA_PREDIS_CON0    0x0260
+#define AFE_ADDA_PREDIS_CON1    0x0264
+
+#define AFE_SGEN_CON0		0x01f0
+#define AFE_SINEGEN_CON_TDM	0x01f8
+#define AFE_SINEGEN_CON_TDM_IN	0x01fc
+
+#define AFE_HDMI_OUT_CON0	0x0370
+
+#define AFE_IRQ_MCU_CON		0x03a0
+#define AFE_IRQ_STATUS		0x03a4
+#define AFE_IRQ_CLR		0x03a8
+#define AFE_IRQ_CNT1		0x03ac
+#define AFE_IRQ_CNT2		0x03b0
+#define AFE_IRQ_MCU_EN		0x03b4
+#define AFE_IRQ_CNT5		0x03bc
+#define AFE_IRQ_CNT7		0x03dc
+#define AFE_IRQ_CNT13		0x0408
+#define AFE_IRQ1_MCU_CNT_MON    0x03c0
+#define AFE_IRQ2_MCU_CNT_MON    0x03c4
+#define AFE_IRQ_MCU_CON2	0x03f8
+
+#define AFE_MEMIF_PBUF_SIZE	0x03d8
+#define AFE_MEMIF_PBUF2_SIZE	0x03ec
+
+#define AFE_APLL1_TUNER_CFG	0x03f0
+#define AFE_APLL2_TUNER_CFG	0x03f4
+
+#define AFE_ASRC_CON0		0x0500
+
+#define AFE_ASRC_CON13		0x0550
+#define AFE_ASRC_CON14		0x0554
+#define AFE_ASRC_CON15		0x0558
+#define AFE_ASRC_CON16		0x055c
+#define AFE_ASRC_CON17		0x0560
+#define AFE_ASRC_CON18		0x0564
+#define AFE_ASRC_CON19		0x0568
+#define AFE_ASRC_CON20		0x056c
+#define AFE_ASRC_CON21		0x0570
+
+#define AFE_TDM_CON1		0x0548
+#define AFE_TDM_CON2		0x054c
+
+#define AFE_TDM_IN_CON1		0x0588
+#define AFE_TDM_IN_MON2		0x0594
+#define AFE_IRQ_CNT10		0x08dc
+
+#define AFE_HDMI_IN_2CH_CON0	0x09c0
+#define AFE_HDMI_IN_2CH_BASE	0x09c4
+#define AFE_HDMI_IN_2CH_END	0x09c8
+#define AFE_HDMI_IN_2CH_CUR	0x09cc
+
+#define AFE_MEMIF_MON15		0x0d7c
+#define ABB_AFE_SDM_TEST	0x0f4c
+
+#define AFE_IRQ_STATUS_BITS	0x13ff
+
+/* AUDIO_TOP_CON0 (0x0000) */
+#define AUD_TCON0_PDN_DAC_PREDIS	BIT(26)
+#define AUD_TCON0_PDN_DAC		BIT(25)
+#define AUD_TCON0_PDN_ADC		BIT(24)
+#define AUD_TCON0_PDN_SPDF		BIT(21)
+#define AUD_TCON0_PDN_HDMI		BIT(20)
+#define AUD_TCON0_PDN_APLL_TUNER	BIT(19)
+#define AUD_TCON0_PDN_APLL2_TUNER	BIT(18)
+#define AUD_TCON0_PDN_INTDIR_CK		BIT(15)
+#define AUD_TCON0_PDN_24M		BIT(9)
+#define AUD_TCON0_PDN_22M		BIT(8)
+#define AUD_TCON0_PDN_I2S		BIT(6)
+#define AUD_TCON0_PDN_AFE		BIT(2)
+
+/* AUDIO_TOP_CON3 (0x000C) */
+#define AUD_TCON3_HDMI_BCK_INV		BIT(3)
+
+/* AFE_I2S_CON (0x0018) */
+#define AFE_I2S_CON_PHASE_SHIFT_FIX	BIT(31)
+#define AFE_I2S_CON_BCK_INV			BIT(29)
+#define AFE_I2S_CON_FROM_IO_MUX		BIT(28)
+#define AFE_I2S_CON_LOW_JITTER_CLK	BIT(12)
+#define AFE_I2S_CON_LRCK_INV		BIT(5)
+#define AFE_I2S_CON_FORMAT_I2S		BIT(3)
+#define AFE_I2S_CON_SRC_SLAVE		BIT(2)
+#define AFE_I2S_CON_WLEN_32BIT		BIT(1)
+#define AFE_I2S_CON_EN			BIT(0)
+
+/* AFE_DAIBT_CON0 (0x001c) */
+#define AFE_DAIBT_CON0_USE_MRG_INPUT    BIT(12)
+#define AFE_DAIBT_CON0_DATA_DRY         BIT(3)
+
+/* AFE_CONN1 (0x0024) */
+#define AFE_CONN1_I03_O03_S		BIT(19)
+
+/* AFE_CONN2 (0x0028) */
+#define AFE_CONN2_I04_O04_S		BIT(4)
+#define AFE_CONN2_I03_O04_S		BIT(3)
+
+/* AFE_I2S_CON1 (0x0034) */
+#define AFE_I2S_CON1_I2S2_TO_PAD	(1 << 18)
+#define AFE_I2S_CON1_TDMOUT_TO_PAD	(0 << 18)
+#define AFE_I2S_CON1_TDMOUT_MUX_MASK	GENMASK(18, 18)
+#define AFE_I2S_CON1_LOW_JITTER_CLK	BIT(12)
+#define AFE_I2S_CON1_RATE(x)		(((x) & 0xf) << 8)
+#define AFE_I2S_CON1_FORMAT_I2S		BIT(3)
+#define AFE_I2S_CON1_WLEN_32BIT		BIT(1)
+#define AFE_I2S_CON1_EN			BIT(0)
+
+/* AFE_I2S_CON2 (0x0038) */
+#define AFE_I2S_CON2_LOW_JITTER_CLK	BIT(12)
+#define AFE_I2S_CON2_RATE(x)		(((x) & 0xf) << 8)
+#define AFE_I2S_CON2_FORMAT_I2S		BIT(3)
+#define AFE_I2S_CON2_WLEN_32BIT		BIT(1)
+#define AFE_I2S_CON2_EN			BIT(0)
+
+/* AFE_I2S_CON3 (0x004C) */
+#define AFE_I2S_CON3_LOW_JITTER_CLK	BIT(12)
+#define AFE_I2S_CON3_RATE(x)		(((x) & 0xf) << 8)
+#define AFE_I2S_CON3_FORMAT_I2S		BIT(3)
+#define AFE_I2S_CON3_WLEN_32BIT		BIT(1)
+#define AFE_I2S_CON3_EN			BIT(0)
+
+/* AFE_CONN_24BIT (0x006c) */
+#define AFE_CONN_24BIT_O10		BIT(10)
+#define AFE_CONN_24BIT_O09		BIT(9)
+#define AFE_CONN_24BIT_O06		BIT(6)
+#define AFE_CONN_24BIT_O05		BIT(5)
+#define AFE_CONN_24BIT_O04		BIT(4)
+#define AFE_CONN_24BIT_O03		BIT(3)
+#define AFE_CONN_24BIT_O02		BIT(2)
+#define AFE_CONN_24BIT_O01		BIT(1)
+#define AFE_CONN_24BIT_O00		BIT(0)
+
+/* AFE_ADDA_DL_SRC2_CON0 (0x0108) */
+#define AFE_ADDA_DL_8X_UPSAMPLE		(BIT(25) | BIT(24))
+#define AFE_ADDA_DL_MUTE_OFF		(BIT(12) | BIT(11))
+#define AFE_ADDA_DL_VOICE_DATA		BIT(5)
+#define AFE_ADDA_DL_DEGRADE_GAIN	BIT(1)
+
+/* AFE_SINEGEN_CON_TDM (0x01f8) */
+#define AFE_SINEGEN_CON_TDM_OUT_EN	BIT(28)
+
+/* AFE_SINEGEN_CON_TDM_IN (0x01fc) */
+#define AFE_SINEGEN_CON_TDM_IN_EN	BIT(28)
+
+/* AFE_HDMI_OUT_CON0 (0x0370) */
+#define AFE_HDMI_OUT_CON0_CH_MASK	GENMASK(7, 4)
+
+/* AFE_HDMI_CONN0 (0x0390) */
+#define AFE_HDMI_CONN0_O35_I35		(0x7 << 21)
+#define AFE_HDMI_CONN0_O34_I34		(0x6 << 18)
+#define AFE_HDMI_CONN0_O33_I33		(0x5 << 15)
+#define AFE_HDMI_CONN0_O32_I32		(0x4 << 12)
+#define AFE_HDMI_CONN0_O31_I30		(0x2 << 9)
+#define AFE_HDMI_CONN0_O31_I31		(0x3 << 9)
+#define AFE_HDMI_CONN0_O30_I31		(0x3 << 6)
+#define AFE_HDMI_CONN0_O30_I30		(0x2 << 6)
+#define AFE_HDMI_CONN0_O29_I29		(0x1 << 3)
+#define AFE_HDMI_CONN0_O28_I28		(0x0 << 0)
+
+/* AFE_CONN_TDMIN_CON (0x039c) */
+#define AFE_CONN_TDMIN_O41_I41		(0x1 << 3)
+#define AFE_CONN_TDMIN_O41_I40		(0x0 << 3)
+#define AFE_CONN_TDMIN_O40_I41		(0x1 << 0)
+#define AFE_CONN_TDMIN_O40_I40		(0x0 << 0)
+#define AFE_CONN_TDMIN_CON0_MASK	GENMASK(5, 0)
+
+/* AFE_APLL1_TUNER_CFG (0x03ec) */
+#define AFE_APLL1_TUNER_CFG_MASK	GENMASK(15, 1)
+#define AFE_APLL1_TUNER_CFG_EN_MASK	GENMASK(0, 0)
+
+/* AFE_APLL2_TUNER_CFG (0x03f0) */
+#define AFE_APLL2_TUNER_CFG_MASK	GENMASK(15, 1)
+#define AFE_APLL2_TUNER_CFG_EN_MASK	GENMASK(0, 0)
+
+/* AFE_GAIN1_CON0 (0x0410) */
+#define AFE_GAIN1_CON0_EN_MASK		GENMASK(0, 0)
+#define AFE_GAIN1_CON0_MODE_MASK	GENMASK(7, 4)
+#define AFE_GAIN1_CON0_SAMPLE_PER_STEP_MASK		GENMASK(15, 8)
+
+/* AFE_GAIN1_CON1 (0x0414) */
+#define AFE_GAIN1_CON1_MASK		GENMASK(19, 0)
+
+/* AFE_GAIN1_CUR (0x0424) */
+#define AFE_GAIN1_CUR_MASK		GENMASK(19, 0)
+
+/* AFE_ASRC_CON0 (0x0500) */
+#define AFE_ASRC_CON0_ASM_ON		BIT(0)
+#define AFE_ASRC_CON0_STR_CLR_MASK		GENMASK(6, 4)
+#define AFE_ASRC_CON0_CLR_TX		(0x1 << 4)
+#define AFE_ASRC_CON0_CLR_RX		(0x2 << 4)
+#define AFE_ASRC_CON0_CLR_I2S		(0x4 << 4)
+
+/* AFE_ASRC_CON13 (0x0550) */
+#define AFE_ASRC_CON13_16BIT		BIT(19)
+#define AFE_ASRC_CON13_MONO		BIT(16)
+
+/* AFE_ASRC_CON16 (0x055c) */
+#define AFE_ASRC_CON16_FC2_CYCLE_MASK		GENMASK(31, 16)
+#define AFE_ASRC_CON16_FC2_CYCLE(x)		(((x) - 1) << 16)
+#define AFE_ASRC_CON16_FC2_AUTO_RST		BIT(14)
+#define AFE_ASRC_CON16_TUNE_FREQ5		BIT(12)
+#define AFE_ASRC_CON16_COMP_FREQ_EN		BIT(11)
+#define AFE_ASRC_CON16_FC2_SEL		GENMASK(9, 8)
+#define AFE_ASRC_CON16_FC2_I2S_IN		(0x1 << 8)
+#define AFE_ASRC_CON16_FC2_DGL_BYPASS		BIT(7)
+#define AFE_ASRC_CON16_FC2_AUTO_RESTART		BIT(2)
+#define AFE_ASRC_CON16_FC2_FREQ		BIT(1)
+#define AFE_ASRC_CON16_FC2_EN		BIT(0)
+
+/* AFE_TDM_CON1 (0x0548) */
+#define AFE_TDM_CON1_LRCK_WIDTH(x)	(((x) - 1) << 24)
+#define AFE_TDM_CON1_32_BCK_CYCLES	(0x2 << 12)
+#define AFE_TDM_CON1_16_BCK_CYCLES	(0x0 << 12)
+#define AFE_TDM_CON1_8CH_PER_SDATA	(0x2 << 10)
+#define AFE_TDM_CON1_4CH_PER_SDATA	(0x1 << 10)
+#define AFE_TDM_CON1_2CH_PER_SDATA	(0x0 << 10)
+#define AFE_TDM_CON1_WLEN_32BIT		BIT(9)
+#define AFE_TDM_CON1_WLEN_16BIT		BIT(8)
+#define AFE_TDM_CON1_MSB_ALIGNED	BIT(4)
+#define AFE_TDM_CON1_1_BCK_DELAY	BIT(3)
+#define AFE_TDM_CON1_LRCK_INV		BIT(2)
+#define AFE_TDM_CON1_EN			BIT(0)
+
+/* AFE_TDM_CON2 (0x054c) */
+#define AFE_TDM_CON2_SOUT_MASK		GENMASK(14, 0)
+
+/* AFE_TDM_IN_CON1 (0x0588) */
+#define AFE_TDM_IN_CON1_LRCK_WIDTH(x)		(((x) - 1) << 24)
+#define AFE_TDM_IN_CON1_DISABLE_CH67		BIT(19)
+#define AFE_TDM_IN_CON1_DISABLE_CH01		BIT(18)
+#define AFE_TDM_IN_CON1_DISABLE_CH23		BIT(17)
+#define AFE_TDM_IN_CON1_DISABLE_CH45		BIT(16)
+#define AFE_TDM_IN_CON1_FAST_LRCK_CYCLE_32BCK	(0x2 << 12)
+#define AFE_TDM_IN_CON1_FAST_LRCK_CYCLE_24BCK	(0x1 << 12)
+#define AFE_TDM_IN_CON1_FAST_LRCK_CYCLE_16BCK	(0x0 << 12)
+#define AFE_TDM_IN_CON1_8CH_PER_SDATA		(0x2 << 10)
+#define AFE_TDM_IN_CON1_4CH_PER_SDATA		(0x1 << 10)
+#define AFE_TDM_IN_CON1_2CH_PER_SDATA		(0x0 << 10)
+#define AFE_TDM_IN_CON1_WLEN_32BIT		(0x3 << 8)
+#define AFE_TDM_IN_CON1_WLEN_24BIT		(0x2 << 8)
+#define AFE_TDM_IN_CON1_WLEN_16BIT		(0x1 << 8)
+#define AFE_TDM_IN_CON1_I2S			BIT(3)
+#define AFE_TDM_IN_CON1_LRCK_INV		BIT(2)
+#define AFE_TDM_IN_CON1_BCK_INV			BIT(1)
+#define AFE_TDM_IN_CON1_EN			BIT(0)
+
+/* AFE_TDM_IN_MON2 (0x0594) */
+#define AFE_TDM_IN_MON2_FAST_LRCK		BIT(28)
+
+/*AFE_SPDIF_IN*/
+#define AUDIO_BASE  0
+
+#define AUDIO_HW_PHYSICAL_BASE  AUDIO_BASE
+#define AFE_BASE                (AUDIO_HW_PHYSICAL_BASE)
+
+#define AFE_SPDIFIN_CFG0         (0 + 0x0900)
+    #define SPDIFIN_EN_MASK          0x1 << 0
+    #define SPDIFIN_EN               0x1 << 0
+    #define SPDIFIN_DIS              0x0 << 0
+    #define SPDIFIN_FLIP_EN_MASK     0x1 << 1
+    #define SPDIFIN_FLIP_EN          0x1 << 1
+    #define SPDIFIN_FLIP_DIS         0x0 << 1
+    #define SPDIFIN_INT_EN_MASK      0x1 << 6
+    #define SPDIFIN_INT_EN           0x1 << 6
+    #define SPDIFIN_INT_DIS          0x0 << 6
+    #define SPDIFIN_DE_CNT_MASK      0x1F << 8
+    #define SPDIFIN_DE_SEL_MASK      0x3 << 13
+    #define SPDIFIN_DE_SEL_3SAMPLES  0x0
+    #define SPDIFIN_DE_SEL_14SAMPLES 0x1 << 13
+    #define SPDIFIN_DE_SEL_30SAMPLES 0x2 << 13
+    #define SPDIFIN_DE_SEL_DECNT     0x3 << 13
+    #define MAX_LEN_NUM_MASK         0xFF << 16
+    #define SPDIFIN_INT_CHSTLR_MASK  0x1 << 28
+    #define SPDIFIN_INT_CHSTLR_EN    0x1 << 28
+    #define SPDIFIN_INT_CHSTLR_DIS   0x0 << 28
+
+#define AFE_SPDIFIN_CFG1         (0 + 0x0904)
+    #define SPDIFIN_DATA_FROM_LOOPBACK_EN_MASK  0x1 << 14
+    #define SPDIFIN_DATA_FROM_LOOPBACK_EN       0x1 << 14
+    #define SPDIFIN_DATA_FROM_LOOPBACK_DIS      0x0 << 14
+    #define SPDIFIN_INT_ERR_EN_MASK             0xFFF << 20
+
+    #define AFE_SPDIFIN_REAL_OPTICAL            (0x0 << 14)
+    #define AFE_SPDIFIN_SWITCH_REAL_OPTICAL     (0x0 << 15)
+    #define SEL_BCK_SPDIFIN                     (0X1 << 16)
+    #define AFE_SPDIFIN_SEL_SPDIFIN_EN          (0x1 << 0)
+    #define AFE_SPDIFIN_SEL_SPDIFIN_DIS         (0x0 << 0)
+
+    #define AFE_SPDIFIN_SEL_SPDIFIN_CLK_DIS     (0x0 << 1)
+    #define AFE_SPDIFIN_FIFOSTARTPOINT_5        (0x1 << 4)
+
+    #define SPDIFIN_PRE_ERR_NON_EN              (0x1 << 20)
+    #define SPDIFIN_PRE_ERR_NON_DIS             (0x0 << 20)
+    #define SPDIFIN_PRE_ERR_B_EN                (0x1 << 21)
+    #define SPDIFIN_PRE_ERR_B_DIS               (0x0 << 21)
+    #define SPDIFIN_PRE_ERR_M_EN                (0x1 << 22)
+    #define SPDIFIN_PRE_ERR_M_DIS               (0x0 << 22)
+    #define SPDIFIN_PRE_ERR_W_EN                (0x1 << 23)
+    #define SPDIFIN_PRE_ERR_W_DIS               (0x0 << 23)
+    #define SPDIFIN_PRE_ERR_BITCNT_EN           (0x1 << 24)
+    #define SPDIFIN_PRE_ERR_BITCNT_DIS          (0x0 << 24)
+    #define SPDIFIN_PRE_ERR_PARITY_EN           (0x1 << 25)
+    #define SPDIFIN_PRE_ERR_PARITY_DIS          (0x0 << 25)
+
+    #define SPDIFIN_FIFO_ERR_EN                 (0x3 << 26)
+    #define SPDIFIN_FIFO_ERR_DIS                (0x0 << 26)
+
+    #define SPDIFIN_TIMEOUT_INT_EN              (0x1 << 28)
+    #define SPDIFIN_TIMEOUT_INT_DIS             (0x0 << 28)
+    #define SPDIFIN_CHSTS_PREAMPHASIS_EN        (0x1 << 29) // channel status and emphasis
+    #define SPDIFIN_CHSTS_PREAMPHASIS_DIS       (0x0 << 29)
+    #define SPDIFIN_CHSTS_COLLECTION_MASK       (0x1 << 31) // channel status and emphasis
+    #define SPDIFIN_CHSTS_COLLECTION_EN         (0x1 << 31) // channel status and emphasis
+    #define SPDIFIN_CHSTS_COLLECTION_DIS        (0x0 << 31)
+    #define SPDIFIN_ALL_ERR_INT_EN              (SPDIFIN_PRE_ERR_NON_EN|SPDIFIN_PRE_ERR_B_EN|SPDIFIN_PRE_ERR_M_EN| \
+                                                 SPDIFIN_PRE_ERR_W_EN|SPDIFIN_PRE_ERR_BITCNT_EN|SPDIFIN_PRE_ERR_PARITY_EN|\
+                                                 SPDIFIN_TIMEOUT_INT_EN|SPDIFIN_CHSTS_PREAMPHASIS_EN | SPDIFIN_CHSTS_COLLECTION_EN)
+
+    #define SPDIFIN_ALL_ERR_INT_DIS             (SPDIFIN_PRE_ERR_NON_DIS|SPDIFIN_PRE_ERR_B_DIS|SPDIFIN_PRE_ERR_M_DIS|\
+                                                 SPDIFIN_PRE_ERR_W_DIS|SPDIFIN_PRE_ERR_BITCNT_DIS|SPDIFIN_PRE_ERR_PARITY_DIS|\
+                                                 SPDIFIN_TIMEOUT_INT_DIS|SPDIFIN_CHSTS_PREAMPHASIS_DIS | SPDIFIN_CHSTS_COLLECTION_DIS)
+
+#define AFE_SPDIFIN_CHSTS1       (AFE_BASE + 0x0908)
+#define AFE_SPDIFIN_CHSTS2       (AFE_BASE + 0x090C)
+#define AFE_SPDIFIN_CHSTS3       (AFE_BASE + 0x0910)
+#define AFE_SPDIFIN_CHSTS4       (AFE_BASE + 0x0914)
+#define AFE_SPDIFIN_CHSTS5       (AFE_BASE + 0x0918)
+#define AFE_SPDIFIN_CHSTS6       (AFE_BASE + 0x091C)
+#define AFE_SPDIFIN_DEBUG1       (0 + 0x0920)
+    #define SPDIFIN_DATA_LATCH_ERR    1 << 10
+
+#define AFE_SPDIFIN_DEBUG2       (0 + 0x0924)
+    #define SPDIFIN_CHSTS_INT_MASK     (0x1 << 26)
+    #define SPDIFIN_CHSTS_INT_EN       (0x1 << 26)
+    #define SPDIFIN_CHSTS_INT_FLAG     (0x1 << 26)
+    #define SPDIFIN_CHSTS_INT_DIS      (0x0 << 26)
+    #define SPDIFIN_FIFO_ERR_STS       (0x3 << 30)
+
+#define AFE_SPDIFIN_DEBUG3       (0 + 0x0928)
+    #define SPDIFIN_PRE_ERR_NON_STS        (0x1 << 0)
+    #define SPDIFIN_PRE_ERR_B_STS          (0x1 << 1)
+    #define SPDIFIN_PRE_ERR_M_STS          (0x1 << 2)
+    #define SPDIFIN_PRE_ERR_W_STS          (0x1 << 3)
+    #define SPDIFIN_PRE_ERR_BITCNT_STS     (0x1 << 4)
+    #define SPDIFIN_PRE_ERR_PARITY_STS     (0x1 << 5)
+    #define SPDIFIN_TIMEOUT_ERR_STS        (0x1 << 6)
+    #define SPDIFIN_CHSTS_PREAMPHASIS_STS  (0x1 << 7)
+    #define SPDIFIN_ALL_ERR_ERR_STS        (SPDIFIN_PRE_ERR_NON_STS|SPDIFIN_PRE_ERR_B_STS|SPDIFIN_PRE_ERR_M_STS|\
+                                            SPDIFIN_PRE_ERR_W_STS|SPDIFIN_PRE_ERR_BITCNT_STS|\
+                                            SPDIFIN_PRE_ERR_PARITY_STS|SPDIFIN_TIMEOUT_ERR_STS)
+
+#define AFE_SPDIFIN_DEBUG4       (AFE_BASE + 0x092C)
+#define AFE_SPDIFIN_EC           (0 + 0x0930)
+#if 0
+    #define SPDIFIN_INT_ERR_CLEAR_MASK 0x7FFFF
+#else
+    #define SPDIFIN_INT_ERR_CLEAR_MASK 0xFFF
+#endif
+    #define SPDIFIN_PRE_ERR_CLEAR               0x1 << 0
+    #define SPDIFIN_PRE_ERR_B_CLEAR             0x1 << 1
+    #define SPDIFIN_PRE_ERR_M_CLEAR             0x1 << 2
+    #define SPDIFIN_PRE_ERR_W_CLEAR             0x1 << 3
+    #define SPDIFIN_PRE_ERR_BITCNT_CLEAR        0x1 << 4
+    #define SPDIFIN_PRE_ERR_PARITY_CLEAR        0x1 << 5
+    #define SPDIFIN_FIFO_ERR_CLEAR			    0x3 << 6
+    #define SPDIFIN_TIMEOUT_INT_CLEAR           0x1 << 8
+    #define SPDIFIN_CHSTS_PREAMPHASIS_CLEAR     0x1 << 9  // channel status and emphasis
+    #define SPDIFIN_CHSTS_INT_CLR_MASK          (0x1 << 11) // channel status int clear
+    #define SPDIFIN_CHSTS_COLLECTION_CLEAR         (0x1 << 11)
+    #define SPDIFIN_CHSTS_INT_CLR_EN            (0x1 << 11) // channel status
+    #define SPDIFIN_DATA_LRCK_CHANGE_CLEAR      0x1 << 16
+    #define SPDIFIN_DATA_LATCH_CLEAR            0x1 << 17
+#if 0
+    #define SPDIFIN_INT_CLEAR_ALL   SPDIFIN_INT_ERR_CLEAR_MASK
+#else
+    #define SPDIFIN_INT_CLEAR_ALL               (SPDIFIN_PRE_ERR_CLEAR|SPDIFIN_PRE_ERR_B_CLEAR|SPDIFIN_PRE_ERR_M_CLEAR|\
+                                                 SPDIFIN_PRE_ERR_W_CLEAR|SPDIFIN_FIFO_ERR_CLEAR|SPDIFIN_PRE_ERR_BITCNT_CLEAR|\
+                                                 SPDIFIN_PRE_ERR_PARITY_CLEAR|SPDIFIN_TIMEOUT_INT_CLEAR|SPDIFIN_CHSTS_PREAMPHASIS_CLEAR|\
+                                                 SPDIFIN_DATA_LATCH_CLEAR|SPDIFIN_CHSTS_COLLECTION_CLEAR)
+#endif
+
+#define AFE_SPDIFIN_BR           (0 + 0x093C)
+    #define AFE_SPDIFIN_BRE_MASK           0x1 << 0
+    #define AFE_SPDIFIN_BR_FS_MASK         0x7 << 4
+    #define AFE_SPDIFIN_BR_FS_256          0x3 << 4
+    #define AFE_SPDIFIN_BR_SUBFRAME_MASK   0xF << 8
+    #define AFE_SPDIFIN_BR_SUBFRAME_256    0x8 << 8
+    #define AFE_SPDIFIN_BR_LOWBOUND_MASK   0x1F << 12
+    #define AFE_SPDIFIN_BR_TUNE_MODE_MASK  0x3 << 17
+    #define AFE_SPDIFIN_BR_TUNE_MODE0      0x0 << 17
+    #define AFE_SPDIFIN_BR_TUNE_MODE1      0x1 << 17
+    #define AFE_SPDIFIN_BR_TUNE_MODE2      0x2 << 17
+    #define AFE_SPDIFIN_BR_TUNE_MODE1_D    0x3 << 17
+
+#define AFE_SPDIFIN_BR_DBG1      (AFE_BASE + 0x0940)
+
+#define AFE_SPDIFIN_INT_EXT      (0 + 0x0948)
+    #define MULTI_INPUT_DETECT_SEL_MASK    0xF << 8
+    #define MULTI_INPUT_DETECT_SEL_OPT     0x1 << 8
+    #define MULTI_INPUT_DETECT_SEL_COA     0x2 << 8
+    #define MULTI_INPUT_DETECT_SEL_ARC     0x4 << 8
+    #define MULTI_INPUT_SEL_MASK           0x3 << 14
+    #define MULTI_INPUT_SEL_OPT            0x0 << 14
+    #define MULTI_INPUT_SEL_COA            0x1 << 14
+    #define MULTI_INPUT_SEL_ARC            0x2 << 14
+    #define MULTI_INPUT_SEL_LOW            0x3 << 14
+    #define SPDIFIN_DATALATCH_ERR_EN_MASK  0x1 << 17
+    #define SPDIFIN_DATALATCH_ERR_EN       0x1 << 17
+    #define SPDIFIN_DATALATCH_ERR_DIS      0x0 << 17
+    #define MULTI_INPUT_STATUS_MASK        0xF << 28
+    #define MULTI_INPUT_STATUS_OPT         0x1 << 28
+    #define MULTI_INPUT_STATUS_COA         0x2 << 28
+    #define MULTI_INPUT_STATUS_ARC         0x4 << 28
+
+#define AFE_SPDIFIN_INT_EXT2     (0 + 0x094C)
+    #define SPDIFIN_LRC_MASK             0x7FFF
+    #define SPDIFIN_LRC_COMPARE_594M     0x173
+    #define SPDIFIN_LRCK_CHG_INT_MASK    0x1 << 15
+    #define SPDIFIN_LRCK_CHG_INT_EN      0x1 << 15
+    #define SPDIFIN_LRCK_CHG_INT_DIS     0x0 << 15
+    #define SPDIFIN_432MODE_MASK         0x1 << 16
+    #define SPDIFIN_432MODE_EN           0x1 << 16
+    #define SPDIFIN_432MODE_DIS          0x0 << 16
+
+    #define SPDIFIN_MODE_CLK_MASK        0x3 << 16
+
+    #define SPDIFIN_594MODE_MASK         0x1 << 17
+    #define SPDIFIN_594MODE_EN           0x1 << 17
+    #define SPDIFIN_594MODE_DIS          0x0 << 17
+    #define SPDIFIN_LRCK_CHG_INT_STS     0x1 << 27
+    #define SPDIFIN_ROUGH_FS_MASK        0xF << 28
+    #define SPDIFIN_ROUGH_FS_32K         0x1 << 28
+    #define SPDIFIN_ROUGH_FS_44K         0x2 << 28
+    #define SPDIFIN_ROUGH_FS_48K         0x3 << 28
+    #define SPDIFIN_ROUGH_FS_64K         0x4 << 28
+    #define SPDIFIN_ROUGH_FS_88K         0x5 << 28
+    #define SPDIFIN_ROUGH_FS_96K         0x6 << 28
+    #define SPDIFIN_ROUGH_FS_128K        0x7 << 28
+    #define SPDIFIN_ROUGH_FS_144K        0x8 << 28
+    #define SPDIFIN_ROUGH_FS_176K        0x9 << 28
+    #define SPDIFIN_ROUGH_FS_192K        0xA << 28
+    #define SPDIFIN_ROUGH_FS_216K        0xB << 28
+
+#define SPDIFIN_FREQ_INFO        (0 + 0x0950)
+#define SPDIFIN_FREQ_INFO_2      (0 + 0x0954)
+#define SPDIFIN_FREQ_INFO_3      (0 + 0x0958)
+#define SPDIFIN_FREQ_STATUS      (AFE_BASE + 0x095C)
+#define SPDIFIN_FREQ_USERCODE1   (AFE_BASE + 0x0960)
+#define SPDIFIN_FREQ_USERCODE2   (AFE_BASE + 0x0964)
+#define SPDIFIN_FREQ_USERCODE3   (AFE_BASE + 0x0968)
+#define SPDIFIN_FREQ_USERCODE4   (AFE_BASE + 0x096C)
+#define SPDIFIN_FREQ_USERCODE5   (AFE_BASE + 0x0970)
+#define SPDIFIN_FREQ_USERCODE6   (AFE_BASE + 0x0974)
+#define SPDIFIN_FREQ_USERCODE7   (AFE_BASE + 0x0978)
+#define SPDIFIN_FREQ_USERCODE8   (AFE_BASE + 0x097C)
+#define SPDIFIN_FREQ_USERCODE9   (AFE_BASE + 0x0980)
+#define SPDIFIN_FREQ_USERCODE10  (AFE_BASE + 0x0984)
+#define SPDIFIN_FREQ_USERCODE11  (AFE_BASE + 0x0988)
+#define SPDIFIN_FREQ_USERCODE12  (AFE_BASE + 0x098C)
+#define SPDIFIN_MEMIF_CON0       (0x0990)
+    #define SPDIFIN_IN_MEMIF_EN_MASK    0x1 << 0
+    #define SPDIFIN_IN_MEMIF_EN         0x1 << 0
+    #define SPDIFIN_IN_MEMIF_DIS        0x0 << 0
+
+#define SPDIFIN_BASE_ADR         (0x0994)
+#define SPDIFIN_END_ADR          (0x0998)
+#define SPDIFIN_APLL_TUNER_CFG   (AFE_BASE + 0x09A0)
+#define SPDIFIN_APLL_TUNER_CFG1  (AFE_BASE + 0x09A4)
+#define SPDIFIN_APLL2_TUNER_CFG  (AFE_BASE + 0x09A8)
+#define SPDIFIN_APLL2_TUNER_CFG1 (AFE_BASE + 0x09AC)
+#define SPDIFIN_TYPE_DET         (AFE_BASE + 0x09B0)
+#define MPHONE_MULTI_CON0        (0x09B4)
+    #define MULTI_HW_EN_MASK         0x1
+    #define MULTI_HW_EN              0x1
+    #define MULTI_HW_DIS             0x0
+    #define MULTI_STORE_TYPE_MASK    0x1 << 1
+    #define MULTI_STORE_24BIT        0x1 << 1
+    #define MULTI_STORE_16BIT        0x0
+    #define MULTI_INT_PERIOD_MASK    0x3 << 4
+    #define MULTI_INT_PERIOD_64      0x1 << 4
+    #define MULTI_INT_PERIOD_128     0x2 << 4
+    #define MULTI_INT_PERIOD_256     0x3 << 4
+
+#define SPDIFIN_CUR_ADR          (0x09B8)
+#define AFE_SINEGEN_CON_SPDIFIN  (AFE_BASE + 0x09BC)
+
+#endif
diff --git a/sound/soc/mediatek/mt8167/mt8167-afe-util.c b/sound/soc/mediatek/mt8167/mt8167-afe-util.c
new file mode 100644
index 0000000..94e13e2
--- /dev/null
+++ b/sound/soc/mediatek/mt8167/mt8167-afe-util.c
@@ -0,0 +1,333 @@
+/*
+ * Mediatek audio utility
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt8167-afe-util.h"
+#include "mt8167-afe-regs.h"
+#include "mt8167-afe-common.h"
+#ifdef IDLE_TASK_DRIVER_API
+#include "mtk_idle.h"
+#define MT_CG_AUDIO_AFE (CGID(CG_AUDIO, 2))
+#endif
+#include <linux/device.h>
+
+static int aud_spdif_dir_clk_cntr = 0;
+static DEFINE_MUTEX(afe_clk_mutex);
+
+static unsigned int get_top_cg_mask(unsigned int cg_type)
+{
+	switch (cg_type) {
+	case MT8167_AFE_CG_AFE:
+		return AUD_TCON0_PDN_AFE;
+	case MT8167_AFE_CG_I2S:
+		return AUD_TCON0_PDN_I2S;
+	case MT8167_AFE_CG_22M:
+		return AUD_TCON0_PDN_22M;
+	case MT8167_AFE_CG_24M:
+		return AUD_TCON0_PDN_24M;
+	case MT8167_AFE_CG_INTDIR_CK:
+		return AUD_TCON0_PDN_INTDIR_CK;
+	case MT8167_AFE_CG_APLL_TUNER:
+		return AUD_TCON0_PDN_APLL_TUNER;
+	case MT8167_AFE_CG_APLL2_TUNER:
+		return AUD_TCON0_PDN_APLL2_TUNER;
+	case MT8167_AFE_CG_HDMI:
+		return AUD_TCON0_PDN_HDMI;
+	case MT8167_AFE_CG_SPDIF:
+		return AUD_TCON0_PDN_SPDF;
+	case MT8167_AFE_CG_ADC:
+		return AUD_TCON0_PDN_ADC;
+	case MT8167_AFE_CG_DAC:
+		return AUD_TCON0_PDN_DAC;
+	case MT8167_AFE_CG_DAC_PREDIS:
+		return AUD_TCON0_PDN_DAC_PREDIS;
+	default:
+		return 0;
+	}
+}
+
+
+int mt8167_afe_enable_top_cg(struct mtk_afe *afe, unsigned int cg_type)
+{
+	unsigned int mask = get_top_cg_mask(cg_type);
+	unsigned long flags;
+
+	spin_lock_irqsave(&afe->afe_ctrl_lock, flags);
+
+	afe->top_cg_ref_cnt[cg_type]++;
+	if (afe->top_cg_ref_cnt[cg_type] == 1)
+		regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, mask, 0x0);
+
+	spin_unlock_irqrestore(&afe->afe_ctrl_lock, flags);
+
+	return 0;
+}
+
+int mt8167_afe_disable_top_cg(struct mtk_afe *afe, unsigned cg_type)
+{
+	unsigned int mask = get_top_cg_mask(cg_type);
+	unsigned long flags;
+
+	spin_lock_irqsave(&afe->afe_ctrl_lock, flags);
+
+	afe->top_cg_ref_cnt[cg_type]--;
+	if (afe->top_cg_ref_cnt[cg_type] == 0)
+		regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, mask, mask);
+	else if (afe->top_cg_ref_cnt[cg_type] < 0)
+		afe->top_cg_ref_cnt[cg_type] = 0;
+
+	spin_unlock_irqrestore(&afe->afe_ctrl_lock, flags);
+
+	return 0;
+}
+
+int mt8167_afe_enable_main_clk(struct mtk_afe *afe)
+{
+#if defined(COMMON_CLOCK_FRAMEWORK_API)
+	int ret;
+
+	ret = clk_prepare_enable(afe->clocks[MT8167_CLK_TOP_PDN_AUD]);
+	if (ret)
+		goto err;
+#endif
+
+	mt8167_afe_enable_top_cg(afe, MT8167_AFE_CG_AFE);
+	return 0;
+
+#if defined(COMMON_CLOCK_FRAMEWORK_API)
+err:
+	dev_err(afe->dev, "%s failed %d\n", __func__, ret);
+	return ret;
+#endif
+}
+
+int mt8167_afe_disable_main_clk(struct mtk_afe *afe)
+{
+	mt8167_afe_disable_top_cg(afe, MT8167_AFE_CG_AFE);
+#if defined(COMMON_CLOCK_FRAMEWORK_API)
+	clk_disable_unprepare(afe->clocks[MT8167_CLK_TOP_PDN_AUD]);
+#endif
+	return 0;
+}
+
+int mt8167_afe_emi_clk_on(struct mtk_afe *afe)
+{
+#ifdef IDLE_TASK_DRIVER_API
+	mutex_lock(&afe->emi_clk_mutex);
+	if (afe->emi_clk_ref_cnt == 0) {
+		disable_dpidle_by_bit(MT_CG_AUDIO_AFE);
+		disable_soidle_by_bit(MT_CG_AUDIO_AFE);
+	}
+	afe->emi_clk_ref_cnt++;
+	mutex_unlock(&afe->emi_clk_mutex);
+#endif
+	return 0;
+}
+
+int mt8167_afe_emi_clk_off(struct mtk_afe *afe)
+{
+#ifdef IDLE_TASK_DRIVER_API
+	mutex_lock(&afe->emi_clk_mutex);
+	afe->emi_clk_ref_cnt--;
+	if (afe->emi_clk_ref_cnt == 0) {
+		enable_dpidle_by_bit(MT_CG_AUDIO_AFE);
+		enable_soidle_by_bit(MT_CG_AUDIO_AFE);
+	} else if (afe->emi_clk_ref_cnt < 0) {
+		afe->emi_clk_ref_cnt = 0;
+	}
+	mutex_unlock(&afe->emi_clk_mutex);
+#endif
+	return 0;
+}
+
+int mt8167_afe_enable_afe_on(struct mtk_afe *afe)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&afe->afe_ctrl_lock, flags);
+
+	afe->afe_on_ref_cnt++;
+	if (afe->afe_on_ref_cnt == 1)
+		regmap_update_bits(afe->regmap, AFE_DAC_CON0, 0x1, 0x1);
+
+	spin_unlock_irqrestore(&afe->afe_ctrl_lock, flags);
+
+	return 0;
+}
+
+int mt8167_afe_disable_afe_on(struct mtk_afe *afe)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&afe->afe_ctrl_lock, flags);
+
+	afe->afe_on_ref_cnt--;
+	if (afe->afe_on_ref_cnt == 0)
+		regmap_update_bits(afe->regmap, AFE_DAC_CON0, 0x1, 0x0);
+	else if (afe->afe_on_ref_cnt < 0)
+		afe->afe_on_ref_cnt = 0;
+
+	spin_unlock_irqrestore(&afe->afe_ctrl_lock, flags);
+
+	return 0;
+}
+
+int mt8167_afe_enable_apll_tuner_cfg(struct mtk_afe *afe, unsigned int apll)
+{
+	mutex_lock(&afe->afe_clk_mutex);
+
+	afe->apll_tuner_ref_cnt[apll]++;
+	if (afe->apll_tuner_ref_cnt[apll] != 1) {
+		mutex_unlock(&afe->afe_clk_mutex);
+		return 0;
+	}
+
+	if (apll == MT8167_AFE_APLL1) {
+		regmap_update_bits(afe->regmap, AFE_APLL1_TUNER_CFG,
+				   AFE_APLL1_TUNER_CFG_MASK, 0x832);
+		regmap_update_bits(afe->regmap, AFE_APLL1_TUNER_CFG,
+				   AFE_APLL1_TUNER_CFG_EN_MASK, 0x1);
+	} else {
+		regmap_update_bits(afe->regmap, AFE_APLL2_TUNER_CFG,
+				   AFE_APLL2_TUNER_CFG_MASK, 0x634);
+		regmap_update_bits(afe->regmap, AFE_APLL2_TUNER_CFG,
+				   AFE_APLL2_TUNER_CFG_EN_MASK, 0x1);
+	}
+
+	mutex_unlock(&afe->afe_clk_mutex);
+	return 0;
+}
+
+int mt8167_afe_disable_apll_tuner_cfg(struct mtk_afe *afe, unsigned int apll)
+{
+	mutex_lock(&afe->afe_clk_mutex);
+
+	afe->apll_tuner_ref_cnt[apll]--;
+	if (afe->apll_tuner_ref_cnt[apll] == 0) {
+		if (apll == MT8167_AFE_APLL1)
+			regmap_update_bits(afe->regmap, AFE_APLL1_TUNER_CFG,
+					   AFE_APLL1_TUNER_CFG_EN_MASK, 0x0);
+		else
+			regmap_update_bits(afe->regmap, AFE_APLL2_TUNER_CFG,
+					   AFE_APLL2_TUNER_CFG_EN_MASK, 0x0);
+	} else if (afe->apll_tuner_ref_cnt[apll] < 0) {
+		afe->apll_tuner_ref_cnt[apll] = 0;
+	}
+
+	mutex_unlock(&afe->afe_clk_mutex);
+	return 0;
+}
+
+int mt8167_afe_enable_apll_associated_cfg(struct mtk_afe *afe, unsigned int apll)
+{
+	if (apll == MT8167_AFE_APLL1) {
+		clk_prepare_enable(afe->clocks[MT8167_CLK_ENGEN1]);
+		mt8167_afe_enable_top_cg(afe, MT8167_AFE_CG_22M);
+#ifdef ENABLE_AFE_APLL_TUNER
+		mt8167_afe_enable_top_cg(afe, MT8167_AFE_CG_APLL_TUNER);
+		mt8167_afe_enable_apll_tuner_cfg(afe, MT8167_AFE_APLL1);
+#endif
+	} else {
+		clk_prepare_enable(afe->clocks[MT8167_CLK_ENGEN2]);
+		mt8167_afe_enable_top_cg(afe, MT8167_AFE_CG_24M);
+#ifdef ENABLE_AFE_APLL_TUNER
+		mt8167_afe_enable_top_cg(afe, MT8167_AFE_CG_APLL2_TUNER);
+		mt8167_afe_enable_apll_tuner_cfg(afe, MT8167_AFE_APLL2);
+#endif
+	}
+
+	return 0;
+}
+
+int mt8167_afe_disable_apll_associated_cfg(struct mtk_afe *afe, unsigned int apll)
+{
+	if (apll == MT8167_AFE_APLL1) {
+#ifdef ENABLE_AFE_APLL_TUNER
+		mt8167_afe_disable_apll_tuner_cfg(afe, MT8167_AFE_APLL1);
+		mt8167_afe_disable_top_cg(afe, MT8167_AFE_CG_APLL_TUNER);
+#endif
+		mt8167_afe_disable_top_cg(afe, MT8167_AFE_CG_22M);
+		clk_disable_unprepare(afe->clocks[MT8167_CLK_ENGEN1]);
+	} else {
+#ifdef ENABLE_AFE_APLL_TUNER
+		mt8167_afe_disable_apll_tuner_cfg(afe, MT8167_AFE_APLL2);
+		mt8167_afe_disable_top_cg(afe, MT8167_AFE_CG_APLL2_TUNER);
+#endif
+		mt8167_afe_disable_top_cg(afe, MT8167_AFE_CG_24M);
+		clk_disable_unprepare(afe->clocks[MT8167_CLK_ENGEN2]);
+	}
+
+	return 0;
+}
+
+/*SPDIF CLK*/
+void turn_on_spdif_dir_ck(struct mtk_afe *afe)
+{
+	int ret = 0;
+
+	pr_debug("%s\n", __func__);
+#ifdef COMMON_CLOCK_FRAMEWORK_API
+	ret = clk_prepare_enable(afe->clocks[MT8167_CLK_SPDIFIN_SEL]);
+	if (ret)
+		pr_err("%s clk_prepare_enable %s fail %d\n",
+		__func__, "spdifin_sel", ret);
+
+	ret = clk_set_parent(afe->clocks[MT8167_CLK_SPDIFIN_SEL], afe->clocks[MT8167_CLK_TOP_UNIVPLL_D2]);
+	if (ret)
+		pr_err("%s clk_set_parent %s-%s fail %d\n", __func__,
+			"spdifin_sel", "univpll_div2", ret);
+
+	ret = clk_prepare_enable(afe->clocks[MT8167_CLK_SPDIF_IN]);
+		if (ret)
+			pr_err("%s clk_prepare_enable %s fail %d\n",
+			__func__, "spdif_in", ret);
+
+#endif
+}
+
+void turn_off_spdif_dir_ck(struct mtk_afe *afe)
+{
+	pr_debug("%s\n", __func__);
+#ifdef COMMON_CLOCK_FRAMEWORK_API
+    clk_disable_unprepare(afe->clocks[MT8167_CLK_SPDIF_IN]);
+	clk_disable_unprepare(afe->clocks[MT8167_CLK_SPDIFIN_SEL]);
+#endif
+}
+
+void mt_afe_spdif_dir_clk_on(struct mtk_afe *afe)
+{
+	mutex_lock(&afe_clk_mutex);
+
+	if (aud_spdif_dir_clk_cntr == 0)
+		turn_on_spdif_dir_ck(afe);
+
+	aud_spdif_dir_clk_cntr++;
+	mutex_unlock(&afe_clk_mutex);
+}
+
+void mt_afe_spdif_dir_clk_off(struct mtk_afe *afe)
+{
+	mutex_lock(&afe_clk_mutex);
+
+	aud_spdif_dir_clk_cntr--;
+	if (aud_spdif_dir_clk_cntr == 0)
+		turn_off_spdif_dir_ck(afe);
+
+	if (aud_spdif_dir_clk_cntr < 0) {
+		pr_err("%s aud_spdif_dir_clk_cntr:%d<0\n",
+		       __func__, aud_spdif_dir_clk_cntr);
+		aud_spdif_dir_clk_cntr = 0;
+	}
+	mutex_unlock(&afe_clk_mutex);
+}
diff --git a/sound/soc/mediatek/mt8167/mt8167-afe-util.h b/sound/soc/mediatek/mt8167/mt8167-afe-util.h
new file mode 100644
index 0000000..ad3600a
--- /dev/null
+++ b/sound/soc/mediatek/mt8167/mt8167-afe-util.h
@@ -0,0 +1,49 @@
+/*
+ * mtk-afe-util.h  --  Mediatek audio utility
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MT8167_AFE_UTILITY_H_
+#define _MT8167_AFE_UTILITY_H_
+
+struct mtk_afe;
+
+int mt8167_afe_enable_top_cg(struct mtk_afe *afe, unsigned int cg_type);
+
+int mt8167_afe_disable_top_cg(struct mtk_afe *afe, unsigned int cg_type);
+
+int mt8167_afe_enable_main_clk(struct mtk_afe *afe);
+
+int mt8167_afe_disable_main_clk(struct mtk_afe *afe);
+
+int mt8167_afe_emi_clk_on(struct mtk_afe *afe);
+
+int mt8167_afe_emi_clk_off(struct mtk_afe *afe);
+
+int mt8167_afe_enable_afe_on(struct mtk_afe *afe);
+
+int mt8167_afe_disable_afe_on(struct mtk_afe *afe);
+
+int mt8167_afe_enable_apll_tuner_cfg(struct mtk_afe *afe, unsigned int apll);
+
+int mt8167_afe_disable_apll_tuner_cfg(struct mtk_afe *afe, unsigned int apll);
+
+int mt8167_afe_enable_apll_associated_cfg(struct mtk_afe *afe, unsigned int apll);
+
+int mt8167_afe_disable_apll_associated_cfg(struct mtk_afe *afe, unsigned int apll);
+void turn_on_spdif_dir_ck(struct mtk_afe *afe);
+void turn_off_spdif_dir_ck(struct mtk_afe *afe);
+void mt_afe_spdif_dir_clk_on(struct mtk_afe *afe);
+void mt_afe_spdif_dir_clk_off(struct mtk_afe *afe);
+
+#endif
diff --git a/sound/soc/mediatek/mt8167/mt8516-pumpkin.c b/sound/soc/mediatek/mt8167/mt8516-pumpkin.c
new file mode 100644
index 0000000..1a9c22c
--- /dev/null
+++ b/sound/soc/mediatek/mt8167/mt8516-pumpkin.c
@@ -0,0 +1,914 @@
+/*
+ * mt8516_p1.c  --  MT8516P1 ALSA SoC machine driver
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <sound/soc.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
+
+#define ENUM_TO_STR(enum) #enum
+
+enum PINCTRL_PIN_STATE {
+	PIN_STATE_DEFAULT = 0,
+	PIN_STATE_MAX
+};
+
+enum mtkfile_pcm_state {
+	MTKFILE_PCM_STATE_UNKNOWN = 0,
+	MTKFILE_PCM_STATE_OPEN,
+	MTKFILE_PCM_STATE_HW_PARAMS,
+	MTKFILE_PCM_STATE_PREPARE,
+	MTKFILE_PCM_STATE_START,
+	MTKFILE_PCM_STATE_PAUSE,
+	MTKFILE_PCM_STATE_RESUME,
+	MTKFILE_PCM_STATE_DRAIN,
+	MTKFILE_PCM_STATE_STOP,
+	MTKFILE_PCM_STATE_HW_FREE,
+	MTKFILE_PCM_STATE_CLOSE,
+	MTKFILE_PCM_STATE_NUM,
+};
+
+static const char *const pcm_state_func[] = {
+	ENUM_TO_STR(MTKFILE_PCM_STATE_UNKNOWN),
+	ENUM_TO_STR(MTKFILE_PCM_STATE_OPEN),
+	ENUM_TO_STR(MTKFILE_PCM_STATE_HW_PARAMS),
+	ENUM_TO_STR(MTKFILE_PCM_STATE_PREPARE),
+	ENUM_TO_STR(MTKFILE_PCM_STATE_START),
+	ENUM_TO_STR(MTKFILE_PCM_STATE_PAUSE),
+	ENUM_TO_STR(MTKFILE_PCM_STATE_RESUME),
+	ENUM_TO_STR(MTKFILE_PCM_STATE_DRAIN),
+	ENUM_TO_STR(MTKFILE_PCM_STATE_STOP),
+	ENUM_TO_STR(MTKFILE_PCM_STATE_HW_FREE),
+	ENUM_TO_STR(MTKFILE_PCM_STATE_CLOSE),
+};
+
+static const char * const nfy_ctl_names[] = {
+	"Master Volume",
+	"Master Volume X",
+	"Master Switch",
+	"Master Switch X",
+	"PCM State",
+	"PCM State X",
+};
+
+enum {
+	MASTER_VOLUME_ID = 0,
+	MASTER_VOLUMEX_ID,
+	MASTER_SWITCH_ID,
+	MASTER_SWITCHX_ID,
+	PCM_STATE_ID,
+	PCM_STATEX_ID,
+	CTRL_NOTIFY_NUM,
+	CTRL_NOTIFY_INVAL = 0xFFFF,
+};
+struct soc_ctlx_res {
+	int master_volume;
+	int master_switch;
+	int pcm_state;
+	struct snd_ctl_elem_id nfy_ids[CTRL_NOTIFY_NUM];
+	struct mutex res_mutex;
+	spinlock_t res_lock;
+};
+
+struct mt8516_pumpkin_priv {
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *pin_states[PIN_STATE_MAX];
+	struct regulator *tdmadc_1p8_supply;
+	struct regulator *tdmadc_3p3_supply;
+	struct soc_ctlx_res ctlx_res;
+};
+
+static const char * const mt8516_pumpkin_pinctrl_pin_str[PIN_STATE_MAX] = {
+	"default",
+};
+
+static SOC_ENUM_SINGLE_EXT_DECL(pcm_state_enums, pcm_state_func);
+
+/* ctrl resource manager */
+static inline int soc_ctlx_init(struct soc_ctlx_res *ctlx_res, struct snd_soc_card *soc_card)
+{
+	int i;
+	struct snd_card *card = soc_card->snd_card;
+	struct snd_kcontrol *control;
+
+	ctlx_res->master_volume = 100;
+	ctlx_res->master_switch = 1;
+	ctlx_res->pcm_state = MTKFILE_PCM_STATE_UNKNOWN;
+	mutex_init(&ctlx_res->res_mutex);
+	spin_lock_init(&ctlx_res->res_lock);
+
+	for (i = 0; i < CTRL_NOTIFY_NUM; i++) {
+		list_for_each_entry(control, &card->controls, list) {
+			if (strncmp(control->id.name, nfy_ctl_names[i], sizeof(control->id.name)))
+				continue;
+			ctlx_res->nfy_ids[i] = control->id;
+		}
+	}
+
+	return 0;
+}
+
+static int soc_ctlx_get(struct snd_kcontrol *kctl,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_card *card = snd_kcontrol_chip(kctl);
+	struct mt8516_pumpkin_priv *card_data = snd_soc_card_get_drvdata(card);
+	struct soc_ctlx_res *res_mgr = &card_data->ctlx_res;
+	int type;
+
+	for (type = 0; type < CTRL_NOTIFY_NUM; type++) {
+		if (kctl->id.numid == res_mgr->nfy_ids[type].numid)
+			break;
+	}
+	if (type == CTRL_NOTIFY_NUM) {
+		pr_err("invalid mixer control(numid:%d)\n", kctl->id.numid);
+		return -EINVAL;
+	}
+
+	mutex_lock(&res_mgr->res_mutex);
+	switch (type) {
+	case MASTER_VOLUME_ID:
+	case MASTER_VOLUMEX_ID:
+		ucontrol->value.integer.value[0] = res_mgr->master_volume;
+		break;
+	case MASTER_SWITCH_ID:
+	case MASTER_SWITCHX_ID:
+		ucontrol->value.integer.value[0] = res_mgr->master_switch;
+		break;
+	default:
+		break;
+	}
+	mutex_unlock(&res_mgr->res_mutex);
+//	pr_notice("get mixer control(%s) value is:%ld\n", kctl->id.name, ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int soc_ctlx_put(struct snd_kcontrol *kctl,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_card *card = snd_kcontrol_chip(kctl);
+	struct mt8516_pumpkin_priv *card_data = snd_soc_card_get_drvdata(card);
+	struct soc_ctlx_res *res_mgr = &card_data->ctlx_res;
+	int type;
+	int nfy_type;
+	int need_notify_self = 0;
+	int *value = NULL;
+
+	for (type = 0; type < CTRL_NOTIFY_NUM; type++) {
+		if (kctl->id.numid == res_mgr->nfy_ids[type].numid)
+			break;
+	}
+	if (type == CTRL_NOTIFY_NUM) {
+		pr_err("invalid mixer control(numid:%d)\n", kctl->id.numid);
+		return -EINVAL;
+	}
+
+	mutex_lock(&res_mgr->res_mutex);
+	switch (type) {
+	case MASTER_VOLUME_ID:
+		if ((res_mgr->master_switch == 1) ||
+			(ucontrol->value.integer.value[0] != 0)) {
+			nfy_type = MASTER_VOLUMEX_ID;
+			value = &res_mgr->master_volume;
+			need_notify_self = 1;
+		}
+		break;
+	case MASTER_VOLUMEX_ID:
+		nfy_type = MASTER_VOLUME_ID;
+		value = &res_mgr->master_volume;
+		break;
+	case MASTER_SWITCH_ID:
+		nfy_type = MASTER_SWITCHX_ID;
+		value = &res_mgr->master_switch;
+		need_notify_self = 1;
+		break;
+	case MASTER_SWITCHX_ID:
+		nfy_type = MASTER_SWITCH_ID;
+		value = &res_mgr->master_switch;
+		break;
+	default:
+		break;
+	}
+	if (value != NULL) {
+		*value = ucontrol->value.integer.value[0];
+		snd_ctl_notify(card->snd_card, SNDRV_CTL_EVENT_MASK_VALUE, &(res_mgr->nfy_ids[nfy_type]));
+	} else {
+		nfy_type = CTRL_NOTIFY_INVAL;
+	}
+	if (need_notify_self) {
+		snd_ctl_notify(card->snd_card, SNDRV_CTL_EVENT_MASK_VALUE, &(kctl->id));
+	}
+	mutex_unlock(&res_mgr->res_mutex);
+	pr_notice("set mixer control(%s) value is:%ld, notify id:%x, notify self:%d\n",
+						kctl->id.name,
+						ucontrol->value.integer.value[0],
+						nfy_type,
+						need_notify_self);
+
+	return 0;
+}
+
+static int soc_pcm_state_get(struct snd_kcontrol *kctl,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_card *card = snd_kcontrol_chip(kctl);
+	struct mt8516_pumpkin_priv *card_data = snd_soc_card_get_drvdata(card);
+	struct soc_ctlx_res *res_mgr = &card_data->ctlx_res;
+	unsigned long flags;
+
+	spin_lock_irqsave(&res_mgr->res_lock, flags);
+	ucontrol->value.integer.value[0] = res_mgr->pcm_state;
+	spin_unlock_irqrestore(&res_mgr->res_lock, flags);
+	pr_notice("get mixer control(%s) value is:%ld\n", kctl->id.name, ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static int soc_pcm_state_put(struct snd_kcontrol *kctl,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_card *card = snd_kcontrol_chip(kctl);
+	struct mt8516_pumpkin_priv *card_data = snd_soc_card_get_drvdata(card);
+	struct soc_ctlx_res *res_mgr = &card_data->ctlx_res;
+	unsigned long flags;
+
+	spin_lock_irqsave(&res_mgr->res_lock, flags);
+	if (ucontrol->value.integer.value[0] != res_mgr->pcm_state) {
+		res_mgr->pcm_state = ucontrol->value.integer.value[0];
+		snd_ctl_notify(card->snd_card, SNDRV_CTL_EVENT_MASK_VALUE, &(res_mgr->nfy_ids[PCM_STATEX_ID]));
+	}
+	spin_unlock_irqrestore(&res_mgr->res_lock, flags);
+	pr_notice("set mixer control(%s) value is:%ld\n",
+						kctl->id.name,
+						ucontrol->value.integer.value[0]);
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new mt8516_pumpkin_soc_controls[] = {
+	/* for third party app use */
+	SOC_SINGLE_EXT("Master Volume",
+			    0,
+			    0,
+			    100,
+			    0,
+			    soc_ctlx_get,
+			    soc_ctlx_put),
+	SOC_SINGLE_EXT("Master Volume X",
+			    0,
+			    0,
+			    100,
+			    0,
+			    soc_ctlx_get,
+			    soc_ctlx_put),
+	SOC_SINGLE_BOOL_EXT("Master Switch",
+			    0,
+			    soc_ctlx_get,
+			    soc_ctlx_put),
+	SOC_SINGLE_BOOL_EXT("Master Switch X",
+			    0,
+			    soc_ctlx_get,
+			    soc_ctlx_put),
+	SOC_ENUM_EXT("PCM State",
+		     pcm_state_enums,
+		     soc_pcm_state_get,
+		     soc_pcm_state_put),
+	SOC_ENUM_EXT("PCM State X",
+		     pcm_state_enums,
+		     soc_pcm_state_get,
+		     0),
+};
+
+static int i2s_8ch_playback_state_set(struct snd_pcm_substream *substream, int state)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_card *card = rtd->card;
+	struct mt8516_pumpkin_priv *card_data = snd_soc_card_get_drvdata(card);
+	struct soc_ctlx_res *res_mgr = &card_data->ctlx_res;
+	int nfy_type;
+	unsigned long flags;
+
+	nfy_type = PCM_STATEX_ID;
+	spin_lock_irqsave(&res_mgr->res_lock, flags);
+	if (res_mgr->pcm_state != state) {
+		res_mgr->pcm_state = state;
+		snd_ctl_notify(card->snd_card, SNDRV_CTL_EVENT_MASK_VALUE, &(res_mgr->nfy_ids[nfy_type]));
+	} else {
+		nfy_type = CTRL_NOTIFY_INVAL;
+	}
+	spin_unlock_irqrestore(&res_mgr->res_lock, flags);
+
+	return 0;
+}
+
+static int i2s_8ch_playback_startup(struct snd_pcm_substream *substream)
+{
+	i2s_8ch_playback_state_set(substream, MTKFILE_PCM_STATE_OPEN);
+	return 0;
+}
+
+static void i2s_8ch_playback_shutdown(struct snd_pcm_substream *substream)
+{
+	i2s_8ch_playback_state_set(substream, MTKFILE_PCM_STATE_CLOSE);
+}
+
+static int i2s_8ch_playback_hw_params(struct snd_pcm_substream *substream,
+					struct snd_pcm_hw_params *params)
+{
+	i2s_8ch_playback_state_set(substream, MTKFILE_PCM_STATE_HW_PARAMS);
+	return 0;
+}
+
+static int i2s_8ch_playback_hw_free(struct snd_pcm_substream *substream)
+{
+	i2s_8ch_playback_state_set(substream, MTKFILE_PCM_STATE_HW_FREE);
+	return 0;
+}
+
+static int i2s_8ch_playback_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		i2s_8ch_playback_state_set(substream, MTKFILE_PCM_STATE_START);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static struct snd_soc_ops i2s_8ch_playback_ops = {
+	.startup = i2s_8ch_playback_startup,
+	.shutdown = i2s_8ch_playback_shutdown,
+	.hw_params = i2s_8ch_playback_hw_params,
+	.hw_free = i2s_8ch_playback_hw_free,
+	.trigger = i2s_8ch_playback_trigger,
+};
+
+static int pcm186x_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+
+	pr_notice("%s\n", __func__);
+#if 0
+	struct snd_soc_pcm_runtime *rtd;
+	struct snd_soc_dai *codec_dai;
+
+
+	printk("aic:%s\n", __func__);
+	if (substream == NULL) {
+		pr_err("invalid stream parameter\n");
+		return -EINVAL;
+	}
+
+	rtd = substream->private_data;
+	if (rtd == NULL) {
+		pr_err("invalid runtime parameter\n");
+		return -EINVAL;
+	}
+
+	codec_dai = rtd->codec_dai;
+	if (codec_dai == NULL) {
+		pr_err("invalid dai parameter\n");
+		return -EINVAL;
+	}
+
+#define  TLV320_MCLK_SOURCE 0
+#define  TLV320_BCLK_SOURCE 1
+
+	snd_soc_dai_set_pll(codec_dai, 0,TLV320_MCLK_SOURCE,
+			256 * params_rate(params), params_rate(params));
+
+#endif
+	return 0;
+}
+
+static struct snd_soc_ops pcm186x_machine_ops = {
+	.hw_params = pcm186x_hw_params,
+};
+
+#if 0
+static struct snd_soc_dai_link_component tdm_in_codecs[] = {
+	{.name = "pcm186x.2-004a", .dai_name = "pcm1865-aif" },
+	{.name = "pcm186x.2-004b", .dai_name = "pcm1865-aif" },
+};
+#endif
+
+/* Digital audio interface glue - connects codec <---> CPU */
+static struct snd_soc_dai_link mt8516_pumpkin_dais[] = {
+	/* Front End DAI links */
+	{
+		.name = "I2S 8CH Playback",
+		.stream_name = "I2S8CH Playback",
+		.cpu_dai_name = "HDMI",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {
+			SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST
+		},
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.ops = &i2s_8ch_playback_ops,
+	},
+	{
+		.name = "TDM Capture",
+		.stream_name = "TDM_Capture",
+		.cpu_dai_name = "TDM_IN",
+//		.codecs = tdm_in_codecs,
+//		.num_codecs = 2,
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+//		.codec_name = "pcm186x.2-004a", //i2c2 addr 0x4a
+//		.codec_dai_name = "pcm1865-aif",
+
+		.dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF |
+				SND_SOC_DAIFMT_CBS_CFS,
+		.trigger = {
+			SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST
+		},
+		.dynamic = 1,
+		.dpcm_capture = 1,
+		.ops = &pcm186x_machine_ops,
+	},
+	{
+		.name = "DMIC Capture",
+		.stream_name = "DMIC_Capture",
+		.cpu_dai_name = "VUL",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {
+			SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST
+		},
+		.dynamic = 1,
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "AWB Capture",
+		.stream_name = "AWB_Record",
+		.cpu_dai_name = "AWB",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {
+			SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST
+		},
+		.dynamic = 1,
+		.dpcm_capture = 1,
+	},
+#ifdef CONFIG_MTK_BTCVSD_ALSA
+	{
+		.name = "BTCVSD_RX",
+		.stream_name = "BTCVSD_Capture",
+		.cpu_dai_name = "snd-soc-dummy-dai",
+		.platform_name = "mt-soc-btcvsd-rx-pcm",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+	},
+	{
+		.name = "BTCVSD_TX",
+		.stream_name = "BTCVSD_Playback",
+		.cpu_dai_name = "snd-soc-dummy-dai",
+		.platform_name = "mt-soc-btcvsd-tx-pcm",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+	},
+#endif
+	{
+		.name = "DL1 Playback",
+		.stream_name = "DL1_Playback",
+		.cpu_dai_name = "DL1",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {
+			SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST
+		},
+		.dynamic = 1,
+		.dpcm_playback = 1,
+	},
+	{
+		.name = "Ref In Capture",
+		.stream_name = "DL1_AWB_Record",
+		.cpu_dai_name = "AWB",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {
+			SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST
+		},
+		.dynamic = 1,
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "DAI Capture",
+		.stream_name = "VOIP_Call_BT_Capture",
+		.cpu_dai_name = "DAI",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {
+			SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST
+		},
+		.dynamic = 1,
+		.dpcm_capture = 1,
+	},
+	{
+        	.name = "DL2 Playback",
+	        .stream_name = "DL2_Playback",
+	        .cpu_dai_name = "DL2",
+	        .codec_name = "snd-soc-dummy",
+	        .codec_dai_name = "snd-soc-dummy-dai",
+	        .trigger = {
+	                SND_SOC_DPCM_TRIGGER_POST,
+	                SND_SOC_DPCM_TRIGGER_POST
+        	},
+	        .dynamic = 1,
+	        .dpcm_playback = 1,
+	},
+
+	/* Backend End DAI links */
+	{
+		.name = "HDMI BE",
+		.cpu_dai_name = "HDMIO",
+		.no_pcm = 1,
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+			   SND_SOC_DAIFMT_CBS_CFS,
+		.dpcm_playback = 1,
+	},
+	{
+		.name = "2ND EXT Codec",
+		.cpu_dai_name = "2ND I2S",
+		.no_pcm = 1,
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+			   SND_SOC_DAIFMT_CBS_CFS,
+        	.dpcm_playback = 1,
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "MTK Codec",
+		.cpu_dai_name = "INT ADDA",
+		.no_pcm = 1,
+		.codec_name = "mt8167-codec",
+		.codec_dai_name = "mt8167-codec-dai",
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "DMIC BE",
+		.cpu_dai_name = "INT ADDA",
+		.no_pcm = 1,
+		.codec_name = "mt8167-codec",
+		.codec_dai_name = "mt8167-codec-dai",
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "HW Gain1 BE",
+		.cpu_dai_name = "HW_GAIN1",
+		.no_pcm = 1,
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "TDM IN BE",
+		.cpu_dai_name = "TDM_IN_IO",
+		.no_pcm = 1,
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+//		.codecs = tdm_in_codecs,
+//		.num_codecs = 2,
+
+		.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+//		.dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF |
+				SND_SOC_DAIFMT_CBS_CFS,
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "I2S BE",
+		.cpu_dai_name = "I2S",
+		.no_pcm = 1,
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		//.codec_name = "tas5782m",
+		//.codec_dai_name = "tas5782m-i2s",
+		.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+				SND_SOC_DAIFMT_CBS_CFS,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+
+	},
+	{
+		.name = "DL BE",
+		.cpu_dai_name = "DL Input",
+		.no_pcm = 1,
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "MRG BT BE",
+		.cpu_dai_name = "MRG BT",
+		.no_pcm = 1,
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "INTDIR BE",
+		.cpu_dai_name = "INTDIR_IO",
+		.no_pcm = 1,
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.dpcm_capture = 1,
+	},
+};
+
+static const struct snd_soc_dapm_widget mt8516_pumpkin_dapm_widgets[] = {
+	SND_SOC_DAPM_INPUT("External Line In"),
+	SND_SOC_DAPM_OUTPUT("External I2S out"),
+	SND_SOC_DAPM_INPUT("External Line In2"),
+	SND_SOC_DAPM_OUTPUT("External I2S out2"),
+};
+
+static const struct snd_soc_dapm_route mt8516_pumpkin_audio_map[] = {
+	{"2ND I2S Capture", NULL, "External Line In"},
+	{"I2S Capture", NULL, "External Line In2"},
+	{"External I2S out", NULL, "I2S Playback"},
+	{"External I2S out2", NULL, "2ND I2S Playback"},
+};
+
+static int mt8516_pumpkin_suspend_post(struct snd_soc_card *card)
+{
+	struct mt8516_pumpkin_priv *card_data;
+
+	card_data = snd_soc_card_get_drvdata(card);
+
+	if (!IS_ERR(card_data->tdmadc_1p8_supply))
+		regulator_disable(card_data->tdmadc_1p8_supply);
+	if (!IS_ERR(card_data->tdmadc_3p3_supply))
+		regulator_disable(card_data->tdmadc_3p3_supply);
+	return 0;
+}
+
+static int mt8516_pumpkin_resume_pre(struct snd_soc_card *card)
+{
+ 	struct mt8516_pumpkin_priv *card_data;
+	int ret;
+
+	card_data = snd_soc_card_get_drvdata(card);
+
+	/* tdm adc power down */
+	if  (!IS_ERR(card_data->tdmadc_1p8_supply)) {
+		ret = regulator_enable(card_data->tdmadc_1p8_supply);
+		if (ret != 0)
+			dev_err(card->dev, "%s failed to enable tdm 1p8 supply %d!\n", __func__, ret);
+	}
+	if (!IS_ERR(card_data->tdmadc_3p3_supply)) {
+		ret = regulator_enable(card_data->tdmadc_3p3_supply);
+		if (ret != 0)
+			dev_err(card->dev, "%s failed to enable tdm 3p3 supply %d!\n", __func__, ret);
+	}
+	return 0;
+}
+static struct snd_soc_card mt8516_pumpkin_card = {
+	.name = "mt-snd-card",
+	.owner = THIS_MODULE,
+	.dai_link = mt8516_pumpkin_dais,
+	.num_links = ARRAY_SIZE(mt8516_pumpkin_dais),
+	.controls = mt8516_pumpkin_soc_controls,
+	.num_controls = ARRAY_SIZE(mt8516_pumpkin_soc_controls),
+	.dapm_widgets = mt8516_pumpkin_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(mt8516_pumpkin_dapm_widgets),
+	.dapm_routes = mt8516_pumpkin_audio_map,
+	.num_dapm_routes = ARRAY_SIZE(mt8516_pumpkin_audio_map),
+	.suspend_post = mt8516_pumpkin_suspend_post,
+	.resume_pre = mt8516_pumpkin_resume_pre,
+};
+
+static int mt8516_pumpkin_gpio_probe(struct snd_soc_card *card)
+{
+	struct mt8516_pumpkin_priv *card_data;
+	int ret = 0;
+	int i;
+
+	card_data = snd_soc_card_get_drvdata(card);
+
+	card_data->pinctrl = devm_pinctrl_get(card->dev);
+	if (IS_ERR(card_data->pinctrl)) {
+		ret = PTR_ERR(card_data->pinctrl);
+		dev_err(card->dev, "%s pinctrl_get failed %d\n",
+			__func__, ret);
+		goto exit;
+	}
+
+	for (i = 0 ; i < PIN_STATE_MAX ; i++) {
+		card_data->pin_states[i] =
+			pinctrl_lookup_state(card_data->pinctrl,
+				mt8516_pumpkin_pinctrl_pin_str[i]);
+		if (IS_ERR(card_data->pin_states[i])) {
+			ret = PTR_ERR(card_data->pin_states[i]);
+			dev_warn(card->dev, "%s Can't find pinctrl state %s %d\n",
+				__func__, mt8516_pumpkin_pinctrl_pin_str[i], ret);
+		}
+	}
+	/* default state */
+	if (!IS_ERR(card_data->pin_states[PIN_STATE_DEFAULT])) {
+		ret = pinctrl_select_state(card_data->pinctrl,
+				card_data->pin_states[PIN_STATE_DEFAULT]);
+		if (ret) {
+			dev_err(card->dev, "%s failed to select state %d\n",
+				__func__, ret);
+			goto exit;
+		}
+	}
+
+exit:
+
+	return ret;
+}
+
+static int mt8516_pumpkin_regulator_probe(struct snd_soc_card *card)
+{
+	struct mt8516_pumpkin_priv *card_data;
+	int isenable, vol, ret;
+
+	card_data = snd_soc_card_get_drvdata(card);
+
+	card_data->tdmadc_3p3_supply = devm_regulator_get(card->dev, "tdmadc-3p3v");
+	if (IS_ERR(card_data->tdmadc_3p3_supply)) {
+		ret = PTR_ERR(card_data->tdmadc_3p3_supply);
+		dev_err(card->dev, "%s failed to get tdmadc-3p3v regulator %d\n", __func__, ret);
+		return ret;
+	}
+
+	ret = regulator_set_voltage(card_data->tdmadc_3p3_supply, 3300000, 3300000);
+	if (ret != 0) {
+		dev_err(card->dev, "%s failed to set tdmadc-3p3v supply to 3.3v %d\n", __func__, ret);
+		return ret;
+	}
+
+	ret = regulator_enable(card_data->tdmadc_3p3_supply);
+	if (ret != 0) {
+		dev_err(card->dev, "%s failed to enable tdmadc 3p3 supply %d!\n", __func__, ret);
+		return ret;
+	}
+
+	isenable = regulator_is_enabled(card_data->tdmadc_3p3_supply);
+	if (isenable != 1)
+		dev_err(card->dev, "%s tdmadc 3.3V supply is not enabled\n", __func__);
+
+
+	vol = regulator_get_voltage(card_data->tdmadc_3p3_supply);
+	if (vol != 3300000)
+		dev_err(card->dev, "%s tdmadc 3p3 supply != 3.3V (%d)\n", __func__, vol);
+
+
+	card_data->tdmadc_1p8_supply = devm_regulator_get(card->dev, "tdmadc-1p8v");
+	if (IS_ERR(card_data->tdmadc_1p8_supply)) {
+		ret = PTR_ERR(card_data->tdmadc_1p8_supply);
+		dev_err(card->dev, "%s failed to get tdmadc-1p8v regulator %d\n", __func__, ret);
+		return ret;
+	}
+
+	ret = regulator_set_voltage(card_data->tdmadc_1p8_supply, 1800000, 1800000);
+	if (ret != 0) {
+		dev_err(card->dev, "%s failed to set tdmadc-1p8v supply to 1.8v %d\n", __func__, ret);
+		return ret;
+	}
+
+	ret = regulator_enable(card_data->tdmadc_1p8_supply);
+	if (ret != 0) {
+		dev_err(card->dev, "%s failed to enable tdmadc 1p8 supply %d!\n", __func__, ret);
+		return ret;
+	}
+
+	isenable = regulator_is_enabled(card_data->tdmadc_1p8_supply);
+	if (isenable != 1)
+		dev_err(card->dev, "%s tdmadc 1.8V supply is not enabled\n", __func__);
+
+	vol = regulator_get_voltage(card_data->tdmadc_1p8_supply);
+	if (vol != 1800000)
+		dev_err(card->dev, "%s tdmadc 1p8 supply != 1.8V (%d)\n", __func__, vol);
+
+	return 0;
+}
+
+static int mt8516_pumpkin_dev_probe(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = &mt8516_pumpkin_card;
+	struct device_node *platform_node;
+	struct device_node *codec_node;
+//	struct device_node *tdmin_adc_node;
+
+	int ret, i;
+	struct mt8516_pumpkin_priv *card_data;
+
+	platform_node = of_parse_phandle(pdev->dev.of_node,
+					 "mediatek,platform", 0);
+	if (!platform_node) {
+		dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
+		return -EINVAL;
+	}
+	codec_node = of_parse_phandle(pdev->dev.of_node,
+					 "mediatek,audio-codec", 0);
+	if (!codec_node) {
+		dev_err(&pdev->dev, "Property 'audio-codec' missing or invalid\n");
+		return -EINVAL;
+	}
+
+/*	tdmin_adc_node = of_parse_phandle(pdev->dev.of_node,
+							"mediatek,tdmin-adc", 0);
+		if (!tdmin_adc_node) {
+			dev_err(&pdev->dev, "Property 'tdmin-adc' missing or invalid\n");
+			}
+*/
+	for (i = 0; i < card->num_links; i++) {
+		if (mt8516_pumpkin_dais[i].platform_name)
+			continue;
+		mt8516_pumpkin_dais[i].platform_of_node = platform_node;
+	}
+	for (i = 0; i < card->num_links; i++) {
+/*		if (tdmin_adc_node && (strcmp(mt8516_pumpkin_dais[i].cpu_dai_name, "TDM_IN") == 0)) {
+			mt8516_pumpkin_dais[i].codec_of_node = tdmin_adc_node;
+			mt8516_pumpkin_dais[i].codec_name = NULL;
+			continue;
+		}
+		if (mt8516_pumpkin_dais[i].codec_name)
+			continue;
+		mt8516_pumpkin_dais[i].codec_of_node = codec_node;
+*/	}
+	card->dev = &pdev->dev;
+
+	card_data = devm_kzalloc(&pdev->dev,
+		sizeof(struct mt8516_pumpkin_priv), GFP_KERNEL);
+	if (!card_data) {
+		ret = -ENOMEM;
+		dev_err(&pdev->dev,
+			"%s allocate card private data fail %d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	snd_soc_card_set_drvdata(card, card_data);
+
+	mt8516_pumpkin_regulator_probe(card);
+	mt8516_pumpkin_gpio_probe(card);
+
+	ret = devm_snd_soc_register_card(&pdev->dev, card);
+	if (ret) {
+		dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
+		__func__, ret);
+		return ret;
+	}
+	soc_ctlx_init(&card_data->ctlx_res, card);
+
+	return ret;
+}
+
+static const struct of_device_id mt8516_pumpkin_dt_match[] = {
+	{ .compatible = "mediatek,mt8516-soc-pumpkin", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, mt8516_pumpkin_dt_match);
+
+static struct platform_driver mt8516_pumpkin_mach_driver = {
+	.driver = {
+		   .name = "mt8516-soc-pumpkin",
+		   .of_match_table = mt8516_pumpkin_dt_match,
+#ifdef CONFIG_PM
+		   .pm = &snd_soc_pm_ops,
+#endif
+	},
+	.probe = mt8516_pumpkin_dev_probe,
+};
+
+module_platform_driver(mt8516_pumpkin_mach_driver);
+
+/* Module information */
+MODULE_DESCRIPTION("MT8516Pumpkin ALSA SoC machine driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mt8516-pumpkin");
+
diff --git a/sound/soc/mediatek/mt8167/mt8516-vesper.c b/sound/soc/mediatek/mt8167/mt8516-vesper.c
new file mode 100644
index 0000000..a6d9bd6
--- /dev/null
+++ b/sound/soc/mediatek/mt8167/mt8516-vesper.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mt8516-vesper.c  --  MT8516-Vesper ALSA SoC machine driver
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <sound/soc.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+struct adc_config {
+	struct device_node *component_of_node;
+	unsigned int tdm_mask;
+};
+
+struct vesper_config {
+	const char *dai_link_name;
+	struct adc_config *tdm_cfg;
+	int num_cfg;
+};
+
+static struct vesper_config *vesper_cfg;
+static int num_vesper_cfg;
+
+static int tdmin_capture_startup(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	int i, j, k;
+
+	for (i = 0; i < rtd->num_codecs; i++)
+		if (rtd->codec_dais[i]->active)
+			return -EBUSY;
+
+	for (k = 0; k < num_vesper_cfg; k++)
+		if (!strcmp(rtd->dai_link->name, vesper_cfg[k].dai_link_name))
+			break;
+
+	if (k == num_vesper_cfg)
+		return 0;
+
+	for (i = 0; i < rtd->num_codecs; i++) {
+		for (j = 0; j < vesper_cfg[k].num_cfg; j++) {
+			if (rtd->codec_dais[i]->dev->of_node ==
+			    vesper_cfg[k].tdm_cfg[j].component_of_node) {
+				snd_soc_dai_set_tdm_slot(rtd->codec_dais[i],
+				vesper_cfg[k].tdm_cfg[j].tdm_mask, 0, 8, 32);
+				break;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tdmin_hw_params(struct snd_pcm_substream *substream,
+			   struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	unsigned int rate = params_rate(params);
+	unsigned int mclk_rate = rate * 256;
+	int i;
+
+	/* codec mclk */
+	for (i = 0; i < rtd->num_codecs; i++)
+		snd_soc_dai_set_sysclk(rtd->codec_dais[i], 0, mclk_rate,
+				       SND_SOC_CLOCK_IN);
+
+	return 0;
+}
+
+static struct snd_soc_ops tdmin_capture_ops = {
+	   .startup = tdmin_capture_startup,
+	   .hw_params = tdmin_hw_params,
+};
+
+/* No codec declared by default in the dai links. They are added
+ * dynamically depending on the dt
+ */
+
+/* Digital audio interface glue - connects codec <---> CPU */
+static struct snd_soc_dai_link mt8516_vesper_dais[] = {
+	/* Front End DAI links */
+	{
+		.name = "TDM Capture",
+		.stream_name = "TDM_Capture",
+		.cpu_dai_name = "TDM_IN",
+		.dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF |
+				SND_SOC_DAIFMT_CBS_CFS,
+		.trigger = {
+			SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST
+		},
+		.dynamic = 1,
+		.dpcm_capture = 1,
+		.ops = &tdmin_capture_ops,
+	},
+	{
+		.name = "TDM Capture 6.1",
+		.stream_name = "TDM_Capture_6_1",
+		.cpu_dai_name = "TDM_IN",
+		.dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF |
+				SND_SOC_DAIFMT_CBS_CFS,
+		.trigger = {
+			SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST
+		},
+		.dynamic = 1,
+		.dpcm_capture = 1,
+		.ops = &tdmin_capture_ops,
+	},
+	{
+		.name = "DL1 Playback",
+		.stream_name = "DL1_Playback",
+		.cpu_dai_name = "DL1",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {
+			SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST
+		},
+		.dynamic = 1,
+		.dpcm_playback = 1,
+	},
+
+	/* Backend End DAI links */
+	{
+		.name = "TDM IN BE",
+		.cpu_dai_name = "TDM_IN_IO",
+		.no_pcm = 1,
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+				SND_SOC_DAIFMT_CBS_CFS,
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "I2S BE",
+		.cpu_dai_name = "I2S",
+		.no_pcm = 1,
+		.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+				SND_SOC_DAIFMT_CBS_CFS,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+	},
+};
+
+static struct snd_soc_card mt8516_vesper_card = {
+	.name = "mt-snd-card",
+	.owner = THIS_MODULE,
+	.dai_link = mt8516_vesper_dais,
+	.num_links = ARRAY_SIZE(mt8516_vesper_dais),
+};
+
+static int set_card_codec_info(struct snd_soc_card *card)
+{
+	struct snd_soc_dai_link_component *dai_link_codecs, *dlc;
+	struct device_node *dl_node, *c_node;
+	struct device *dev = card->dev;
+	struct of_phandle_args args;
+	struct adc_config *cur_cfg;
+	const char *dai_link_name;
+	const char *dai_format;
+	int cfg_idx, link_idx;
+	bool is_tdm_format;
+	int num_codecs;
+	u32 tdm_mask;
+	int ret, i;
+
+	/* Figure out how many vesper tdm configs are needed */
+	num_vesper_cfg = 0;
+	for_each_child_of_node(dev->of_node, dl_node) {
+		if (!of_property_read_string(dl_node, "dai-format",
+					     &dai_format)) {
+			if (!strcmp(dai_format, "tdm"))
+				num_vesper_cfg++;
+		}
+	}
+	/* Allocate the number of vesper tdm configs that are needed */
+	vesper_cfg = devm_kcalloc(dev, num_vesper_cfg,
+				  sizeof(*vesper_cfg), GFP_KERNEL);
+	if (!vesper_cfg)
+		return -ENOMEM;
+
+	cfg_idx = 0;
+	/* Loop over all the dai link sub nodes*/
+	for_each_child_of_node(dev->of_node, dl_node) {
+		if (of_property_read_string(dl_node, "dai-link-name",
+					    &dai_link_name))
+			return -EINVAL;
+
+		/* Check wether the used format is tdm. If this is the case,
+		 * the tdm mask information is stored to be used when the tdm
+		 * is started and the tdm slots must be set.
+		 */
+		is_tdm_format = false;
+		if (!of_property_read_string(dl_node, "dai-format",
+					     &dai_format)) {
+			if (!strcmp(dai_format, "tdm"))
+				is_tdm_format = true;
+		}
+
+		num_codecs = of_get_child_count(dl_node);
+		/* Allocate the snd_soc_dai_link_component array that will be
+		 * used to dynamically add the list of codecs to the static
+		 * snd_soc_dai_link array.
+		 */
+		dlc = dai_link_codecs = devm_kcalloc(dev, num_codecs,
+					       sizeof(*dai_link_codecs),
+					       GFP_KERNEL);
+		if (!dai_link_codecs)
+			return -ENOMEM;
+
+		if (is_tdm_format) {
+			/* Fill the vesper_cfg structure and allocate a number
+			 * of tdm_cfg corresponding to the number of codecs.
+			 */
+			vesper_cfg[cfg_idx].num_cfg = num_codecs;
+			vesper_cfg[cfg_idx].dai_link_name = dai_link_name;
+			vesper_cfg[cfg_idx].tdm_cfg = devm_kcalloc(dev,
+				num_codecs, sizeof(struct adc_config),
+				GFP_KERNEL);
+			if (!vesper_cfg[cfg_idx].tdm_cfg)
+				return -ENOMEM;
+		}
+
+		link_idx = 0;
+		cur_cfg = vesper_cfg[cfg_idx].tdm_cfg;
+		/* Loop over all the codec sub nodes for this dai link */
+		for_each_child_of_node(dl_node, c_node) {
+			/* Retrieve the node and the dai_name that are used
+			 * by the soundcard.
+			 */
+			ret = of_parse_phandle_with_args(c_node, "sound-dai",
+							 "#sound-dai-cells", 0,
+							 &args);
+			if (ret) {
+				if (ret != -EPROBE_DEFER)
+					dev_err(dev,
+						"can't parse dai %d\n", ret);
+				return ret;
+			}
+			dlc->of_node = args.np;
+			ret =  snd_soc_get_dai_name(&args, &dlc->dai_name);
+			if (ret) {
+				of_node_put(c_node);
+				return ret;
+			}
+
+			if (is_tdm_format) {
+				/* Fill the tdm cfg for this codec */
+				if (of_property_read_u32(c_node, "tdm-mask",
+							 &tdm_mask))
+					return -EINVAL;
+				cur_cfg->component_of_node = dlc->of_node;
+				cur_cfg->tdm_mask = tdm_mask;
+				cur_cfg++;
+			}
+			dlc++;
+			link_idx++;
+		}
+
+		/* Update the snd_soc_dai_link static array with the codecs
+		 * we have just found.
+		 */
+		for (i = 0; i < card->num_links; i++) {
+			if (!strcmp(dai_link_name, card->dai_link[i].name)) {
+				card->dai_link[i].num_codecs = link_idx;
+				card->dai_link[i].codecs = dai_link_codecs;
+				break;
+			}
+		}
+
+		if (is_tdm_format)
+			cfg_idx++;
+	}
+
+	return 0;
+}
+
+static int mt8516_vesper_dev_probe(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = &mt8516_vesper_card;
+	struct device_node *platform_node;
+	struct device_node *codec_node;
+	int ret, i;
+
+	card->dev = &pdev->dev;
+	ret = set_card_codec_info(card);
+	if (ret) {
+		dev_err(&pdev->dev, "%s set_card_codec_info failed %d\n",
+		__func__, ret);
+		return ret;
+	}
+
+	platform_node = of_parse_phandle(pdev->dev.of_node,
+					 "mediatek,platform", 0);
+	if (!platform_node) {
+		dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
+		return -EINVAL;
+	}
+	codec_node = of_parse_phandle(pdev->dev.of_node,
+					 "mediatek,audio-codec", 0);
+	if (!codec_node) {
+		dev_err(&pdev->dev, "Property 'audio-codec' missing or invalid\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < card->num_links; i++) {
+		if (mt8516_vesper_dais[i].platform_name)
+			continue;
+		mt8516_vesper_dais[i].platform_of_node = platform_node;
+	}
+
+
+	ret = devm_snd_soc_register_card(&pdev->dev, card);
+	if (ret) {
+		dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
+		__func__, ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static const struct of_device_id mt8516_vesper_dt_match[] = {
+	{ .compatible = "mediatek,mt8516-soc-vesper", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, mt8516_vesper_dt_match);
+
+static struct platform_driver mt8516_vesper_mach_driver = {
+	.driver = {
+		   .name = "mt8516-soc-vesper",
+		   .of_match_table = mt8516_vesper_dt_match,
+#ifdef CONFIG_PM
+		   .pm = &snd_soc_pm_ops,
+#endif
+	},
+	.probe = mt8516_vesper_dev_probe,
+};
+
+module_platform_driver(mt8516_vesper_mach_driver);
+
+/* Module information */
+MODULE_DESCRIPTION("MT8516-Vesper ALSA SoC machine driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mt8516-vesper");
diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
index c0b6697..332856b 100644
--- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
@@ -714,13 +714,11 @@
 		.mono_reg = AFE_DAC_CON1,
 		.mono_shift = 21,
 		.hd_reg = -1,
-		.hd_shift = -1,
 		.enable_reg = AFE_DAC_CON0,
 		.enable_shift = 1,
 		.msb_reg = AFE_MEMIF_MSB,
 		.msb_shift = 0,
 		.agent_disable_reg = -1,
-		.agent_disable_shift = -1,
 	}, {
 		.name = "DL2",
 		.id = MT8173_AFE_MEMIF_DL2,
@@ -732,13 +730,11 @@
 		.mono_reg = AFE_DAC_CON1,
 		.mono_shift = 22,
 		.hd_reg = -1,
-		.hd_shift = -1,
 		.enable_reg = AFE_DAC_CON0,
 		.enable_shift = 2,
 		.msb_reg = AFE_MEMIF_MSB,
 		.msb_shift = 1,
 		.agent_disable_reg = -1,
-		.agent_disable_shift = -1,
 	}, {
 		.name = "VUL",
 		.id = MT8173_AFE_MEMIF_VUL,
@@ -750,13 +746,11 @@
 		.mono_reg = AFE_DAC_CON1,
 		.mono_shift = 27,
 		.hd_reg = -1,
-		.hd_shift = -1,
 		.enable_reg = AFE_DAC_CON0,
 		.enable_shift = 3,
 		.msb_reg = AFE_MEMIF_MSB,
 		.msb_shift = 6,
 		.agent_disable_reg = -1,
-		.agent_disable_shift = -1,
 	}, {
 		.name = "DAI",
 		.id = MT8173_AFE_MEMIF_DAI,
@@ -768,13 +762,11 @@
 		.mono_reg = -1,
 		.mono_shift = -1,
 		.hd_reg = -1,
-		.hd_shift = -1,
 		.enable_reg = AFE_DAC_CON0,
 		.enable_shift = 4,
 		.msb_reg = AFE_MEMIF_MSB,
 		.msb_shift = 5,
 		.agent_disable_reg = -1,
-		.agent_disable_shift = -1,
 	}, {
 		.name = "AWB",
 		.id = MT8173_AFE_MEMIF_AWB,
@@ -786,13 +778,11 @@
 		.mono_reg = AFE_DAC_CON1,
 		.mono_shift = 24,
 		.hd_reg = -1,
-		.hd_shift = -1,
 		.enable_reg = AFE_DAC_CON0,
 		.enable_shift = 6,
 		.msb_reg = AFE_MEMIF_MSB,
 		.msb_shift = 3,
 		.agent_disable_reg = -1,
-		.agent_disable_shift = -1,
 	}, {
 		.name = "MOD_DAI",
 		.id = MT8173_AFE_MEMIF_MOD_DAI,
@@ -804,13 +794,11 @@
 		.mono_reg = AFE_DAC_CON1,
 		.mono_shift = 30,
 		.hd_reg = -1,
-		.hd_shift = -1,
 		.enable_reg = AFE_DAC_CON0,
 		.enable_shift = 7,
 		.msb_reg = AFE_MEMIF_MSB,
 		.msb_shift = 4,
 		.agent_disable_reg = -1,
-		.agent_disable_shift = -1,
 	}, {
 		.name = "HDMI",
 		.id = MT8173_AFE_MEMIF_HDMI,
@@ -822,13 +810,10 @@
 		.mono_reg = -1,
 		.mono_shift = -1,
 		.hd_reg = -1,
-		.hd_shift = -1,
 		.enable_reg = -1,
-		.enable_shift = -1,
 		.msb_reg = AFE_MEMIF_MSB,
 		.msb_shift = 8,
 		.agent_disable_reg = -1,
-		.agent_disable_shift = -1,
 	},
 };
 
@@ -914,7 +899,6 @@
 		.irq_en_reg = AFE_IRQ_MCU_CON,
 		.irq_en_shift = 12,
 		.irq_fs_reg = -1,
-		.irq_fs_shift = -1,
 		.irq_fs_maskbit = -1,
 		.irq_clr_reg = AFE_IRQ_CLR,
 		.irq_clr_shift = 4,
diff --git a/sound/soc/mediatek/mt8183/Makefile b/sound/soc/mediatek/mt8183/Makefile
new file mode 100644
index 0000000..407db21
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# platform driver
+snd-soc-mt8183-afe-objs := \
+	mt8183-afe-pcm.o \
+	mt8183-afe-clk.o \
+	mt8183-dai-i2s.o \
+	mt8183-dai-tdm.o \
+	mt8183-dai-pcm.o \
+	mt8183-dai-hostless.o \
+	mt8183-dai-adda.o
+
+obj-$(CONFIG_SND_SOC_MT8183) += snd-soc-mt8183-afe.o
+obj-$(CONFIG_SND_SOC_MT8183_MT6358_TS3A227E_MAX98357A) += mt8183-mt6358-ts3a227-max98357.o
+obj-$(CONFIG_SND_SOC_MT8183_DA7219_MAX98357A) += mt8183-da7219-max98357.o
+obj-$(CONFIG_SND_SOC_MT8183_MT6358) += mt8183-mt6358.o
diff --git a/sound/soc/mediatek/mt8183/mt8183-afe-clk.c b/sound/soc/mediatek/mt8183/mt8183-afe-clk.c
new file mode 100644
index 0000000..48e81c5
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-afe-clk.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mt8183-afe-clk.c  --  Mediatek 8183 afe clock ctrl
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+
+#include <linux/clk.h>
+
+#include "mt8183-afe-common.h"
+#include "mt8183-afe-clk.h"
+#include "mt8183-reg.h"
+
+enum {
+	CLK_AFE = 0,
+	CLK_TML,
+	CLK_APLL22M,
+	CLK_APLL24M,
+	CLK_APLL1_TUNER,
+	CLK_APLL2_TUNER,
+	CLK_I2S1_BCLK_SW,
+	CLK_I2S2_BCLK_SW,
+	CLK_I2S3_BCLK_SW,
+	CLK_I2S4_BCLK_SW,
+	CLK_INFRA_SYS_AUDIO,
+	CLK_MUX_AUDIO,
+	CLK_MUX_AUDIOINTBUS,
+	CLK_TOP_SYSPLL_D2_D4,
+	/* apll related mux */
+	CLK_TOP_MUX_AUD_1,
+	CLK_TOP_APLL1_CK,
+	CLK_TOP_MUX_AUD_2,
+	CLK_TOP_APLL2_CK,
+	CLK_TOP_MUX_AUD_ENG1,
+	CLK_TOP_APLL1_D8,
+	CLK_TOP_MUX_AUD_ENG2,
+	CLK_TOP_APLL2_D8,
+	CLK_TOP_I2S0_M_SEL,
+	CLK_TOP_I2S1_M_SEL,
+	CLK_TOP_I2S2_M_SEL,
+	CLK_TOP_I2S3_M_SEL,
+	CLK_TOP_I2S4_M_SEL,
+	CLK_TOP_I2S5_M_SEL,
+	CLK_TOP_APLL12_DIV0,
+	CLK_TOP_APLL12_DIV1,
+	CLK_TOP_APLL12_DIV2,
+	CLK_TOP_APLL12_DIV3,
+	CLK_TOP_APLL12_DIV4,
+	CLK_TOP_APLL12_DIVB,
+	CLK_CLK26M,
+	CLK_NUM
+};
+
+static const char *aud_clks[CLK_NUM] = {
+	[CLK_AFE] = "aud_afe_clk",
+	[CLK_TML] = "aud_tml_clk",
+	[CLK_APLL22M] = "aud_apll22m_clk",
+	[CLK_APLL24M] = "aud_apll24m_clk",
+	[CLK_APLL1_TUNER] = "aud_apll1_tuner_clk",
+	[CLK_APLL2_TUNER] = "aud_apll2_tuner_clk",
+	[CLK_I2S1_BCLK_SW] = "aud_i2s1_bclk_sw",
+	[CLK_I2S2_BCLK_SW] = "aud_i2s2_bclk_sw",
+	[CLK_I2S3_BCLK_SW] = "aud_i2s3_bclk_sw",
+	[CLK_I2S4_BCLK_SW] = "aud_i2s4_bclk_sw",
+	[CLK_INFRA_SYS_AUDIO] = "aud_infra_clk",
+	[CLK_MUX_AUDIO] = "top_mux_audio",
+	[CLK_MUX_AUDIOINTBUS] = "top_mux_aud_intbus",
+	[CLK_TOP_SYSPLL_D2_D4] = "top_syspll_d2_d4",
+	[CLK_TOP_MUX_AUD_1] = "top_mux_aud_1",
+	[CLK_TOP_APLL1_CK] = "top_apll1_ck",
+	[CLK_TOP_MUX_AUD_2] = "top_mux_aud_2",
+	[CLK_TOP_APLL2_CK] = "top_apll2_ck",
+	[CLK_TOP_MUX_AUD_ENG1] = "top_mux_aud_eng1",
+	[CLK_TOP_APLL1_D8] = "top_apll1_d8",
+	[CLK_TOP_MUX_AUD_ENG2] = "top_mux_aud_eng2",
+	[CLK_TOP_APLL2_D8] = "top_apll2_d8",
+	[CLK_TOP_I2S0_M_SEL] = "top_i2s0_m_sel",
+	[CLK_TOP_I2S1_M_SEL] = "top_i2s1_m_sel",
+	[CLK_TOP_I2S2_M_SEL] = "top_i2s2_m_sel",
+	[CLK_TOP_I2S3_M_SEL] = "top_i2s3_m_sel",
+	[CLK_TOP_I2S4_M_SEL] = "top_i2s4_m_sel",
+	[CLK_TOP_I2S5_M_SEL] = "top_i2s5_m_sel",
+	[CLK_TOP_APLL12_DIV0] = "top_apll12_div0",
+	[CLK_TOP_APLL12_DIV1] = "top_apll12_div1",
+	[CLK_TOP_APLL12_DIV2] = "top_apll12_div2",
+	[CLK_TOP_APLL12_DIV3] = "top_apll12_div3",
+	[CLK_TOP_APLL12_DIV4] = "top_apll12_div4",
+	[CLK_TOP_APLL12_DIVB] = "top_apll12_divb",
+	[CLK_CLK26M] = "top_clk26m_clk",
+};
+
+int mt8183_init_clock(struct mtk_base_afe *afe)
+{
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	int i;
+
+	afe_priv->clk = devm_kcalloc(afe->dev, CLK_NUM, sizeof(*afe_priv->clk),
+				     GFP_KERNEL);
+	if (!afe_priv->clk)
+		return -ENOMEM;
+
+	for (i = 0; i < CLK_NUM; i++) {
+		afe_priv->clk[i] = devm_clk_get(afe->dev, aud_clks[i]);
+		if (IS_ERR(afe_priv->clk[i])) {
+			dev_err(afe->dev, "%s(), devm_clk_get %s fail, ret %ld\n",
+				__func__, aud_clks[i],
+				PTR_ERR(afe_priv->clk[i]));
+			return PTR_ERR(afe_priv->clk[i]);
+		}
+	}
+
+	return 0;
+}
+
+int mt8183_afe_enable_clock(struct mtk_base_afe *afe)
+{
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	int ret;
+
+	ret = clk_prepare_enable(afe_priv->clk[CLK_INFRA_SYS_AUDIO]);
+	if (ret) {
+		dev_err(afe->dev, "%s(), clk_prepare_enable %s fail %d\n",
+			__func__, aud_clks[CLK_INFRA_SYS_AUDIO], ret);
+		goto CLK_INFRA_SYS_AUDIO_ERR;
+	}
+
+	ret = clk_prepare_enable(afe_priv->clk[CLK_MUX_AUDIO]);
+	if (ret) {
+		dev_err(afe->dev, "%s(), clk_prepare_enable %s fail %d\n",
+			__func__, aud_clks[CLK_MUX_AUDIO], ret);
+		goto CLK_MUX_AUDIO_ERR;
+	}
+
+	ret = clk_set_parent(afe_priv->clk[CLK_MUX_AUDIO],
+			     afe_priv->clk[CLK_CLK26M]);
+	if (ret) {
+		dev_err(afe->dev, "%s(), clk_set_parent %s-%s fail %d\n",
+			__func__, aud_clks[CLK_MUX_AUDIO],
+			aud_clks[CLK_CLK26M], ret);
+		goto CLK_MUX_AUDIO_ERR;
+	}
+
+	ret = clk_prepare_enable(afe_priv->clk[CLK_MUX_AUDIOINTBUS]);
+	if (ret) {
+		dev_err(afe->dev, "%s(), clk_prepare_enable %s fail %d\n",
+			__func__, aud_clks[CLK_MUX_AUDIOINTBUS], ret);
+		goto CLK_MUX_AUDIO_INTBUS_ERR;
+	}
+
+	ret = clk_set_parent(afe_priv->clk[CLK_MUX_AUDIOINTBUS],
+			     afe_priv->clk[CLK_TOP_SYSPLL_D2_D4]);
+	if (ret) {
+		dev_err(afe->dev, "%s(), clk_set_parent %s-%s fail %d\n",
+			__func__, aud_clks[CLK_MUX_AUDIOINTBUS],
+			aud_clks[CLK_TOP_SYSPLL_D2_D4], ret);
+		goto CLK_MUX_AUDIO_INTBUS_ERR;
+	}
+
+	ret = clk_prepare_enable(afe_priv->clk[CLK_AFE]);
+	if (ret) {
+		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+			__func__, aud_clks[CLK_AFE], ret);
+		goto CLK_AFE_ERR;
+	}
+
+	ret = clk_prepare_enable(afe_priv->clk[CLK_I2S1_BCLK_SW]);
+	if (ret) {
+		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+			__func__, aud_clks[CLK_I2S1_BCLK_SW], ret);
+		goto CLK_I2S1_BCLK_SW_ERR;
+	}
+
+	ret = clk_prepare_enable(afe_priv->clk[CLK_I2S2_BCLK_SW]);
+	if (ret) {
+		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+			__func__, aud_clks[CLK_I2S2_BCLK_SW], ret);
+		goto CLK_I2S2_BCLK_SW_ERR;
+	}
+
+	ret = clk_prepare_enable(afe_priv->clk[CLK_I2S3_BCLK_SW]);
+	if (ret) {
+		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+			__func__, aud_clks[CLK_I2S3_BCLK_SW], ret);
+		goto CLK_I2S3_BCLK_SW_ERR;
+	}
+
+	ret = clk_prepare_enable(afe_priv->clk[CLK_I2S4_BCLK_SW]);
+	if (ret) {
+		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+			__func__, aud_clks[CLK_I2S4_BCLK_SW], ret);
+		goto CLK_I2S4_BCLK_SW_ERR;
+	}
+
+	return 0;
+
+CLK_I2S4_BCLK_SW_ERR:
+	clk_disable_unprepare(afe_priv->clk[CLK_I2S3_BCLK_SW]);
+CLK_I2S3_BCLK_SW_ERR:
+	clk_disable_unprepare(afe_priv->clk[CLK_I2S2_BCLK_SW]);
+CLK_I2S2_BCLK_SW_ERR:
+	clk_disable_unprepare(afe_priv->clk[CLK_I2S1_BCLK_SW]);
+CLK_I2S1_BCLK_SW_ERR:
+	clk_disable_unprepare(afe_priv->clk[CLK_AFE]);
+CLK_AFE_ERR:
+	clk_disable_unprepare(afe_priv->clk[CLK_MUX_AUDIOINTBUS]);
+CLK_MUX_AUDIO_INTBUS_ERR:
+	clk_disable_unprepare(afe_priv->clk[CLK_MUX_AUDIO]);
+CLK_MUX_AUDIO_ERR:
+	clk_disable_unprepare(afe_priv->clk[CLK_INFRA_SYS_AUDIO]);
+CLK_INFRA_SYS_AUDIO_ERR:
+	return ret;
+}
+
+int mt8183_afe_disable_clock(struct mtk_base_afe *afe)
+{
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+
+	clk_disable_unprepare(afe_priv->clk[CLK_I2S4_BCLK_SW]);
+	clk_disable_unprepare(afe_priv->clk[CLK_I2S3_BCLK_SW]);
+	clk_disable_unprepare(afe_priv->clk[CLK_I2S2_BCLK_SW]);
+	clk_disable_unprepare(afe_priv->clk[CLK_I2S1_BCLK_SW]);
+	clk_disable_unprepare(afe_priv->clk[CLK_AFE]);
+	clk_disable_unprepare(afe_priv->clk[CLK_MUX_AUDIOINTBUS]);
+	clk_disable_unprepare(afe_priv->clk[CLK_MUX_AUDIO]);
+	clk_disable_unprepare(afe_priv->clk[CLK_INFRA_SYS_AUDIO]);
+
+	return 0;
+}
+
+/* apll */
+static int apll1_mux_setting(struct mtk_base_afe *afe, bool enable)
+{
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	int ret;
+
+	if (enable) {
+		ret = clk_prepare_enable(afe_priv->clk[CLK_TOP_MUX_AUD_1]);
+		if (ret) {
+			dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+				__func__, aud_clks[CLK_TOP_MUX_AUD_1], ret);
+			goto ERR_ENABLE_CLK_TOP_MUX_AUD_1;
+		}
+		ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_1],
+				     afe_priv->clk[CLK_TOP_APLL1_CK]);
+		if (ret) {
+			dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+				__func__, aud_clks[CLK_TOP_MUX_AUD_1],
+				aud_clks[CLK_TOP_APLL1_CK], ret);
+			goto ERR_SELECT_CLK_TOP_MUX_AUD_1;
+		}
+
+		/* 180.6336 / 8 = 22.5792MHz */
+		ret = clk_prepare_enable(afe_priv->clk[CLK_TOP_MUX_AUD_ENG1]);
+		if (ret) {
+			dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+				__func__, aud_clks[CLK_TOP_MUX_AUD_ENG1], ret);
+			goto ERR_ENABLE_CLK_TOP_MUX_AUD_ENG1;
+		}
+		ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_ENG1],
+				     afe_priv->clk[CLK_TOP_APLL1_D8]);
+		if (ret) {
+			dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+				__func__, aud_clks[CLK_TOP_MUX_AUD_ENG1],
+				aud_clks[CLK_TOP_APLL1_D8], ret);
+			goto ERR_SELECT_CLK_TOP_MUX_AUD_ENG1;
+		}
+	} else {
+		ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_ENG1],
+				     afe_priv->clk[CLK_CLK26M]);
+		if (ret) {
+			dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+				__func__, aud_clks[CLK_TOP_MUX_AUD_ENG1],
+				aud_clks[CLK_CLK26M], ret);
+			goto EXIT;
+		}
+		clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_ENG1]);
+
+		ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_1],
+				     afe_priv->clk[CLK_CLK26M]);
+		if (ret) {
+			dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+				__func__, aud_clks[CLK_TOP_MUX_AUD_1],
+				aud_clks[CLK_CLK26M], ret);
+			goto EXIT;
+		}
+		clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_1]);
+	}
+
+	return 0;
+
+ERR_SELECT_CLK_TOP_MUX_AUD_ENG1:
+	clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_ENG1],
+		       afe_priv->clk[CLK_CLK26M]);
+	clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_ENG1]);
+ERR_ENABLE_CLK_TOP_MUX_AUD_ENG1:
+ERR_SELECT_CLK_TOP_MUX_AUD_1:
+	clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_1],
+		       afe_priv->clk[CLK_CLK26M]);
+	clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_1]);
+ERR_ENABLE_CLK_TOP_MUX_AUD_1:
+EXIT:
+	return ret;
+}
+
+static int apll2_mux_setting(struct mtk_base_afe *afe, bool enable)
+{
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	int ret;
+
+	if (enable) {
+		ret = clk_prepare_enable(afe_priv->clk[CLK_TOP_MUX_AUD_2]);
+		if (ret) {
+			dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+				__func__, aud_clks[CLK_TOP_MUX_AUD_2], ret);
+			goto ERR_ENABLE_CLK_TOP_MUX_AUD_2;
+		}
+		ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_2],
+				     afe_priv->clk[CLK_TOP_APLL2_CK]);
+		if (ret) {
+			dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+				__func__, aud_clks[CLK_TOP_MUX_AUD_2],
+				aud_clks[CLK_TOP_APLL2_CK], ret);
+			goto ERR_SELECT_CLK_TOP_MUX_AUD_2;
+		}
+
+		/* 196.608 / 8 = 24.576MHz */
+		ret = clk_prepare_enable(afe_priv->clk[CLK_TOP_MUX_AUD_ENG2]);
+		if (ret) {
+			dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+				__func__, aud_clks[CLK_TOP_MUX_AUD_ENG2], ret);
+			goto ERR_ENABLE_CLK_TOP_MUX_AUD_ENG2;
+		}
+		ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_ENG2],
+				     afe_priv->clk[CLK_TOP_APLL2_D8]);
+		if (ret) {
+			dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+				__func__, aud_clks[CLK_TOP_MUX_AUD_ENG2],
+				aud_clks[CLK_TOP_APLL2_D8], ret);
+			goto ERR_SELECT_CLK_TOP_MUX_AUD_ENG2;
+		}
+	} else {
+		ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_ENG2],
+				     afe_priv->clk[CLK_CLK26M]);
+		if (ret) {
+			dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+				__func__, aud_clks[CLK_TOP_MUX_AUD_ENG2],
+				aud_clks[CLK_CLK26M], ret);
+			goto EXIT;
+		}
+		clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_ENG2]);
+
+		ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_2],
+				     afe_priv->clk[CLK_CLK26M]);
+		if (ret) {
+			dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+				__func__, aud_clks[CLK_TOP_MUX_AUD_2],
+				aud_clks[CLK_CLK26M], ret);
+			goto EXIT;
+		}
+		clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_2]);
+	}
+
+	return 0;
+
+ERR_SELECT_CLK_TOP_MUX_AUD_ENG2:
+	clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_ENG2],
+		       afe_priv->clk[CLK_CLK26M]);
+	clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_ENG2]);
+ERR_ENABLE_CLK_TOP_MUX_AUD_ENG2:
+ERR_SELECT_CLK_TOP_MUX_AUD_2:
+	clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_2],
+		       afe_priv->clk[CLK_CLK26M]);
+	clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_2]);
+ERR_ENABLE_CLK_TOP_MUX_AUD_2:
+EXIT:
+	return ret;
+}
+
+int mt8183_apll1_enable(struct mtk_base_afe *afe)
+{
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	int ret;
+
+	/* setting for APLL */
+	apll1_mux_setting(afe, true);
+
+	ret = clk_prepare_enable(afe_priv->clk[CLK_APLL22M]);
+	if (ret) {
+		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+			__func__, aud_clks[CLK_APLL22M], ret);
+		goto ERR_CLK_APLL22M;
+	}
+
+	ret = clk_prepare_enable(afe_priv->clk[CLK_APLL1_TUNER]);
+	if (ret) {
+		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+			__func__, aud_clks[CLK_APLL1_TUNER], ret);
+		goto ERR_CLK_APLL1_TUNER;
+	}
+
+	regmap_update_bits(afe->regmap, AFE_APLL1_TUNER_CFG,
+			   0x0000FFF7, 0x00000832);
+	regmap_update_bits(afe->regmap, AFE_APLL1_TUNER_CFG, 0x1, 0x1);
+
+	regmap_update_bits(afe->regmap, AFE_HD_ENGEN_ENABLE,
+			   AFE_22M_ON_MASK_SFT,
+			   0x1 << AFE_22M_ON_SFT);
+
+	return 0;
+
+ERR_CLK_APLL1_TUNER:
+	clk_disable_unprepare(afe_priv->clk[CLK_APLL22M]);
+ERR_CLK_APLL22M:
+	return ret;
+}
+
+void mt8183_apll1_disable(struct mtk_base_afe *afe)
+{
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+
+	regmap_update_bits(afe->regmap, AFE_HD_ENGEN_ENABLE,
+			   AFE_22M_ON_MASK_SFT,
+			   0x0 << AFE_22M_ON_SFT);
+
+	regmap_update_bits(afe->regmap, AFE_APLL1_TUNER_CFG, 0x1, 0x0);
+
+	clk_disable_unprepare(afe_priv->clk[CLK_APLL1_TUNER]);
+	clk_disable_unprepare(afe_priv->clk[CLK_APLL22M]);
+
+	apll1_mux_setting(afe, false);
+}
+
+int mt8183_apll2_enable(struct mtk_base_afe *afe)
+{
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	int ret;
+
+	/* setting for APLL */
+	apll2_mux_setting(afe, true);
+
+	ret = clk_prepare_enable(afe_priv->clk[CLK_APLL24M]);
+	if (ret) {
+		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+			__func__, aud_clks[CLK_APLL24M], ret);
+		goto ERR_CLK_APLL24M;
+	}
+
+	ret = clk_prepare_enable(afe_priv->clk[CLK_APLL2_TUNER]);
+	if (ret) {
+		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+			__func__, aud_clks[CLK_APLL2_TUNER], ret);
+		goto ERR_CLK_APLL2_TUNER;
+	}
+
+	regmap_update_bits(afe->regmap, AFE_APLL2_TUNER_CFG,
+			   0x0000FFF7, 0x00000634);
+	regmap_update_bits(afe->regmap, AFE_APLL2_TUNER_CFG, 0x1, 0x1);
+
+	regmap_update_bits(afe->regmap, AFE_HD_ENGEN_ENABLE,
+			   AFE_24M_ON_MASK_SFT,
+			   0x1 << AFE_24M_ON_SFT);
+
+	return 0;
+
+ERR_CLK_APLL2_TUNER:
+	clk_disable_unprepare(afe_priv->clk[CLK_APLL24M]);
+ERR_CLK_APLL24M:
+	return ret;
+}
+
+void mt8183_apll2_disable(struct mtk_base_afe *afe)
+{
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+
+	regmap_update_bits(afe->regmap, AFE_HD_ENGEN_ENABLE,
+			   AFE_24M_ON_MASK_SFT,
+			   0x0 << AFE_24M_ON_SFT);
+
+	regmap_update_bits(afe->regmap, AFE_APLL2_TUNER_CFG, 0x1, 0x0);
+
+	clk_disable_unprepare(afe_priv->clk[CLK_APLL2_TUNER]);
+	clk_disable_unprepare(afe_priv->clk[CLK_APLL24M]);
+
+	apll2_mux_setting(afe, false);
+}
+
+int mt8183_get_apll_rate(struct mtk_base_afe *afe, int apll)
+{
+	return (apll == MT8183_APLL1) ? 180633600 : 196608000;
+}
+
+int mt8183_get_apll_by_rate(struct mtk_base_afe *afe, int rate)
+{
+	return ((rate % 8000) == 0) ? MT8183_APLL2 : MT8183_APLL1;
+}
+
+int mt8183_get_apll_by_name(struct mtk_base_afe *afe, const char *name)
+{
+	if (strcmp(name, APLL1_W_NAME) == 0)
+		return MT8183_APLL1;
+	else
+		return MT8183_APLL2;
+}
+
+/* mck */
+struct mt8183_mck_div {
+	int m_sel_id;
+	int div_clk_id;
+};
+
+static const struct mt8183_mck_div mck_div[MT8183_MCK_NUM] = {
+	[MT8183_I2S0_MCK] = {
+		.m_sel_id = CLK_TOP_I2S0_M_SEL,
+		.div_clk_id = CLK_TOP_APLL12_DIV0,
+	},
+	[MT8183_I2S1_MCK] = {
+		.m_sel_id = CLK_TOP_I2S1_M_SEL,
+		.div_clk_id = CLK_TOP_APLL12_DIV1,
+	},
+	[MT8183_I2S2_MCK] = {
+		.m_sel_id = CLK_TOP_I2S2_M_SEL,
+		.div_clk_id = CLK_TOP_APLL12_DIV2,
+	},
+	[MT8183_I2S3_MCK] = {
+		.m_sel_id = CLK_TOP_I2S3_M_SEL,
+		.div_clk_id = CLK_TOP_APLL12_DIV3,
+	},
+	[MT8183_I2S4_MCK] = {
+		.m_sel_id = CLK_TOP_I2S4_M_SEL,
+		.div_clk_id = CLK_TOP_APLL12_DIV4,
+	},
+	[MT8183_I2S4_BCK] = {
+		.m_sel_id = -1,
+		.div_clk_id = CLK_TOP_APLL12_DIVB,
+	},
+	[MT8183_I2S5_MCK] = {
+		.m_sel_id = -1,
+		.div_clk_id = -1,
+	},
+};
+
+int mt8183_mck_enable(struct mtk_base_afe *afe, int mck_id, int rate)
+{
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	int apll = mt8183_get_apll_by_rate(afe, rate);
+	int apll_clk_id = apll == MT8183_APLL1 ?
+			  CLK_TOP_MUX_AUD_1 : CLK_TOP_MUX_AUD_2;
+	int m_sel_id = mck_div[mck_id].m_sel_id;
+	int div_clk_id = mck_div[mck_id].div_clk_id;
+	int ret;
+
+	/* i2s5 mck not support */
+	if (mck_id == MT8183_I2S5_MCK)
+		return 0;
+
+	/* select apll */
+	if (m_sel_id >= 0) {
+		ret = clk_prepare_enable(afe_priv->clk[m_sel_id]);
+		if (ret) {
+			dev_err(afe->dev, "%s(), clk_prepare_enable %s fail %d\n",
+				__func__, aud_clks[m_sel_id], ret);
+			goto ERR_ENABLE_MCLK;
+		}
+		ret = clk_set_parent(afe_priv->clk[m_sel_id],
+				     afe_priv->clk[apll_clk_id]);
+		if (ret) {
+			dev_err(afe->dev, "%s(), clk_set_parent %s-%s fail %d\n",
+				__func__, aud_clks[m_sel_id],
+				aud_clks[apll_clk_id], ret);
+			goto ERR_SELECT_MCLK;
+		}
+	}
+
+	/* enable div, set rate */
+	ret = clk_prepare_enable(afe_priv->clk[div_clk_id]);
+	if (ret) {
+		dev_err(afe->dev, "%s(), clk_prepare_enable %s fail %d\n",
+			__func__, aud_clks[div_clk_id], ret);
+		goto ERR_ENABLE_MCLK_DIV;
+	}
+	ret = clk_set_rate(afe_priv->clk[div_clk_id], rate);
+	if (ret) {
+		dev_err(afe->dev, "%s(), clk_set_rate %s, rate %d, fail %d\n",
+			__func__, aud_clks[div_clk_id],
+			rate, ret);
+		goto ERR_SET_MCLK_RATE;
+		return ret;
+	}
+
+	return 0;
+
+ERR_SET_MCLK_RATE:
+	clk_disable_unprepare(afe_priv->clk[div_clk_id]);
+ERR_ENABLE_MCLK_DIV:
+ERR_SELECT_MCLK:
+	if (m_sel_id >= 0)
+		clk_disable_unprepare(afe_priv->clk[m_sel_id]);
+ERR_ENABLE_MCLK:
+	return ret;
+}
+
+void mt8183_mck_disable(struct mtk_base_afe *afe, int mck_id)
+{
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	int m_sel_id = mck_div[mck_id].m_sel_id;
+	int div_clk_id = mck_div[mck_id].div_clk_id;
+
+	/* i2s5 mck not support */
+	if (mck_id == MT8183_I2S5_MCK)
+		return;
+
+	clk_disable_unprepare(afe_priv->clk[div_clk_id]);
+	if (m_sel_id >= 0)
+		clk_disable_unprepare(afe_priv->clk[m_sel_id]);
+}
diff --git a/sound/soc/mediatek/mt8183/mt8183-afe-clk.h b/sound/soc/mediatek/mt8183/mt8183-afe-clk.h
new file mode 100644
index 0000000..2c510aa
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-afe-clk.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * mt8183-afe-clk.h  --  Mediatek 8183 afe clock ctrl definition
+ *
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+ */
+
+#ifndef _MT8183_AFE_CLK_H_
+#define _MT8183_AFE_CLK_H_
+
+/* APLL */
+#define APLL1_W_NAME "APLL1"
+#define APLL2_W_NAME "APLL2"
+enum {
+	MT8183_APLL1 = 0,
+	MT8183_APLL2,
+};
+
+struct mtk_base_afe;
+
+int mt8183_init_clock(struct mtk_base_afe *afe);
+int mt8183_afe_enable_clock(struct mtk_base_afe *afe);
+int mt8183_afe_disable_clock(struct mtk_base_afe *afe);
+
+int mt8183_apll1_enable(struct mtk_base_afe *afe);
+void mt8183_apll1_disable(struct mtk_base_afe *afe);
+
+int mt8183_apll2_enable(struct mtk_base_afe *afe);
+void mt8183_apll2_disable(struct mtk_base_afe *afe);
+
+int mt8183_get_apll_rate(struct mtk_base_afe *afe, int apll);
+int mt8183_get_apll_by_rate(struct mtk_base_afe *afe, int rate);
+int mt8183_get_apll_by_name(struct mtk_base_afe *afe, const char *name);
+
+int mt8183_mck_enable(struct mtk_base_afe *afe, int mck_id, int rate);
+void mt8183_mck_disable(struct mtk_base_afe *afe, int mck_id);
+#endif
diff --git a/sound/soc/mediatek/mt8183/mt8183-afe-common.h b/sound/soc/mediatek/mt8183/mt8183-afe-common.h
new file mode 100644
index 0000000..b220e7a
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-afe-common.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * mt8183-afe-common.h  --  Mediatek 8183 audio driver definitions
+ *
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+ */
+
+#ifndef _MT_8183_AFE_COMMON_H_
+#define _MT_8183_AFE_COMMON_H_
+
+#include <sound/soc.h>
+#include <linux/list.h>
+#include <linux/regmap.h>
+#include "../common/mtk-base-afe.h"
+
+enum {
+	MT8183_MEMIF_DL1,
+	MT8183_MEMIF_DL2,
+	MT8183_MEMIF_DL3,
+	MT8183_MEMIF_VUL12,
+	MT8183_MEMIF_VUL2,
+	MT8183_MEMIF_AWB,
+	MT8183_MEMIF_AWB2,
+	MT8183_MEMIF_MOD_DAI,
+	MT8183_MEMIF_HDMI,
+	MT8183_MEMIF_NUM,
+	MT8183_DAI_ADDA = MT8183_MEMIF_NUM,
+	MT8183_DAI_PCM_1,
+	MT8183_DAI_PCM_2,
+	MT8183_DAI_I2S_0,
+	MT8183_DAI_I2S_1,
+	MT8183_DAI_I2S_2,
+	MT8183_DAI_I2S_3,
+	MT8183_DAI_I2S_5,
+	MT8183_DAI_TDM,
+	MT8183_DAI_HOSTLESS_LPBK,
+	MT8183_DAI_HOSTLESS_SPEECH,
+	MT8183_DAI_NUM,
+};
+
+enum {
+	MT8183_IRQ_0,
+	MT8183_IRQ_1,
+	MT8183_IRQ_2,
+	MT8183_IRQ_3,
+	MT8183_IRQ_4,
+	MT8183_IRQ_5,
+	MT8183_IRQ_6,
+	MT8183_IRQ_7,
+	MT8183_IRQ_8,	/* hw bundle to TDM */
+	MT8183_IRQ_11,
+	MT8183_IRQ_12,
+	MT8183_IRQ_NUM,
+};
+
+enum {
+	MT8183_MTKAIF_PROTOCOL_1 = 0,
+	MT8183_MTKAIF_PROTOCOL_2,
+	MT8183_MTKAIF_PROTOCOL_2_CLK_P2,
+};
+
+/* MCLK */
+enum {
+	MT8183_I2S0_MCK = 0,
+	MT8183_I2S1_MCK,
+	MT8183_I2S2_MCK,
+	MT8183_I2S3_MCK,
+	MT8183_I2S4_MCK,
+	MT8183_I2S4_BCK,
+	MT8183_I2S5_MCK,
+	MT8183_MCK_NUM,
+};
+
+struct clk;
+
+struct mt8183_afe_private {
+	struct clk **clk;
+
+	int pm_runtime_bypass_reg_ctl;
+
+	/* dai */
+	void *dai_priv[MT8183_DAI_NUM];
+
+	/* adda */
+	int mtkaif_protocol;
+	int mtkaif_calibration_ok;
+	int mtkaif_chosen_phase[4];
+	int mtkaif_phase_cycle[4];
+	int mtkaif_calibration_num_phase;
+	int mtkaif_dmic;
+
+	/* mck */
+	int mck_rate[MT8183_MCK_NUM];
+};
+
+unsigned int mt8183_general_rate_transform(struct device *dev,
+					   unsigned int rate);
+unsigned int mt8183_rate_transform(struct device *dev,
+				   unsigned int rate, int aud_blk);
+
+/* dai register */
+int mt8183_dai_adda_register(struct mtk_base_afe *afe);
+int mt8183_dai_pcm_register(struct mtk_base_afe *afe);
+int mt8183_dai_i2s_register(struct mtk_base_afe *afe);
+int mt8183_dai_tdm_register(struct mtk_base_afe *afe);
+int mt8183_dai_hostless_register(struct mtk_base_afe *afe);
+#endif
diff --git a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
new file mode 100644
index 0000000..3a82518
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
@@ -0,0 +1,1235 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Mediatek ALSA SoC AFE platform driver for 8183
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+
+#include "mt8183-afe-common.h"
+#include "mt8183-afe-clk.h"
+#include "mt8183-interconnection.h"
+#include "mt8183-reg.h"
+#include "../common/mtk-afe-platform-driver.h"
+#include "../common/mtk-afe-fe-dai.h"
+
+enum {
+	MTK_AFE_RATE_8K = 0,
+	MTK_AFE_RATE_11K = 1,
+	MTK_AFE_RATE_12K = 2,
+	MTK_AFE_RATE_384K = 3,
+	MTK_AFE_RATE_16K = 4,
+	MTK_AFE_RATE_22K = 5,
+	MTK_AFE_RATE_24K = 6,
+	MTK_AFE_RATE_130K = 7,
+	MTK_AFE_RATE_32K = 8,
+	MTK_AFE_RATE_44K = 9,
+	MTK_AFE_RATE_48K = 10,
+	MTK_AFE_RATE_88K = 11,
+	MTK_AFE_RATE_96K = 12,
+	MTK_AFE_RATE_176K = 13,
+	MTK_AFE_RATE_192K = 14,
+	MTK_AFE_RATE_260K = 15,
+};
+
+enum {
+	MTK_AFE_DAI_MEMIF_RATE_8K = 0,
+	MTK_AFE_DAI_MEMIF_RATE_16K = 1,
+	MTK_AFE_DAI_MEMIF_RATE_32K = 2,
+	MTK_AFE_DAI_MEMIF_RATE_48K = 3,
+};
+
+enum {
+	MTK_AFE_PCM_RATE_8K = 0,
+	MTK_AFE_PCM_RATE_16K = 1,
+	MTK_AFE_PCM_RATE_32K = 2,
+	MTK_AFE_PCM_RATE_48K = 3,
+};
+
+unsigned int mt8183_general_rate_transform(struct device *dev,
+					   unsigned int rate)
+{
+	switch (rate) {
+	case 8000:
+		return MTK_AFE_RATE_8K;
+	case 11025:
+		return MTK_AFE_RATE_11K;
+	case 12000:
+		return MTK_AFE_RATE_12K;
+	case 16000:
+		return MTK_AFE_RATE_16K;
+	case 22050:
+		return MTK_AFE_RATE_22K;
+	case 24000:
+		return MTK_AFE_RATE_24K;
+	case 32000:
+		return MTK_AFE_RATE_32K;
+	case 44100:
+		return MTK_AFE_RATE_44K;
+	case 48000:
+		return MTK_AFE_RATE_48K;
+	case 88200:
+		return MTK_AFE_RATE_88K;
+	case 96000:
+		return MTK_AFE_RATE_96K;
+	case 130000:
+		return MTK_AFE_RATE_130K;
+	case 176400:
+		return MTK_AFE_RATE_176K;
+	case 192000:
+		return MTK_AFE_RATE_192K;
+	case 260000:
+		return MTK_AFE_RATE_260K;
+	default:
+		dev_warn(dev, "%s(), rate %u invalid, use %d!!!\n",
+			 __func__, rate, MTK_AFE_RATE_48K);
+		return MTK_AFE_RATE_48K;
+	}
+}
+
+static unsigned int dai_memif_rate_transform(struct device *dev,
+					     unsigned int rate)
+{
+	switch (rate) {
+	case 8000:
+		return MTK_AFE_DAI_MEMIF_RATE_8K;
+	case 16000:
+		return MTK_AFE_DAI_MEMIF_RATE_16K;
+	case 32000:
+		return MTK_AFE_DAI_MEMIF_RATE_32K;
+	case 48000:
+		return MTK_AFE_DAI_MEMIF_RATE_48K;
+	default:
+		dev_warn(dev, "%s(), rate %u invalid, use %d!!!\n",
+			 __func__, rate, MTK_AFE_DAI_MEMIF_RATE_16K);
+		return MTK_AFE_DAI_MEMIF_RATE_16K;
+	}
+}
+
+unsigned int mt8183_rate_transform(struct device *dev,
+				   unsigned int rate, int aud_blk)
+{
+	switch (aud_blk) {
+	case MT8183_MEMIF_MOD_DAI:
+		return dai_memif_rate_transform(dev, rate);
+	default:
+		return mt8183_general_rate_transform(dev, rate);
+	}
+}
+
+static const struct snd_pcm_hardware mt8183_afe_hardware = {
+	.info = SNDRV_PCM_INFO_MMAP |
+		SNDRV_PCM_INFO_INTERLEAVED |
+		SNDRV_PCM_INFO_MMAP_VALID,
+	.formats = SNDRV_PCM_FMTBIT_S16_LE |
+		   SNDRV_PCM_FMTBIT_S24_LE |
+		   SNDRV_PCM_FMTBIT_S32_LE,
+	.period_bytes_min = 256,
+	.period_bytes_max = 4 * 48 * 1024,
+	.periods_min = 2,
+	.periods_max = 256,
+	.buffer_bytes_max = 8 * 48 * 1024,
+	.fifo_size = 0,
+};
+
+static int mt8183_memif_fs(struct snd_pcm_substream *substream,
+			   unsigned int rate)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_component *component =
+		snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
+	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
+	int id = rtd->cpu_dai->id;
+
+	return mt8183_rate_transform(afe->dev, rate, id);
+}
+
+static int mt8183_irq_fs(struct snd_pcm_substream *substream, unsigned int rate)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_component *component =
+		snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
+	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
+
+	return mt8183_general_rate_transform(afe->dev, rate);
+}
+
+#define MTK_PCM_RATES (SNDRV_PCM_RATE_8000_48000 |\
+		       SNDRV_PCM_RATE_88200 |\
+		       SNDRV_PCM_RATE_96000 |\
+		       SNDRV_PCM_RATE_176400 |\
+		       SNDRV_PCM_RATE_192000)
+
+#define MTK_PCM_DAI_RATES (SNDRV_PCM_RATE_8000 |\
+			   SNDRV_PCM_RATE_16000 |\
+			   SNDRV_PCM_RATE_32000 |\
+			   SNDRV_PCM_RATE_48000)
+
+#define MTK_PCM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+			 SNDRV_PCM_FMTBIT_S24_LE |\
+			 SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mt8183_memif_dai_driver[] = {
+	/* FE DAIs: memory intefaces to CPU */
+	{
+		.name = "DL1",
+		.id = MT8183_MEMIF_DL1,
+		.playback = {
+			.stream_name = "DL1",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_PCM_RATES,
+			.formats = MTK_PCM_FORMATS,
+		},
+		.ops = &mtk_afe_fe_ops,
+	},
+	{
+		.name = "DL2",
+		.id = MT8183_MEMIF_DL2,
+		.playback = {
+			.stream_name = "DL2",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_PCM_RATES,
+			.formats = MTK_PCM_FORMATS,
+		},
+		.ops = &mtk_afe_fe_ops,
+	},
+	{
+		.name = "DL3",
+		.id = MT8183_MEMIF_DL3,
+		.playback = {
+			.stream_name = "DL3",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_PCM_RATES,
+			.formats = MTK_PCM_FORMATS,
+		},
+		.ops = &mtk_afe_fe_ops,
+	},
+	{
+		.name = "UL1",
+		.id = MT8183_MEMIF_VUL12,
+		.capture = {
+			.stream_name = "UL1",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_PCM_RATES,
+			.formats = MTK_PCM_FORMATS,
+		},
+		.ops = &mtk_afe_fe_ops,
+	},
+	{
+		.name = "UL2",
+		.id = MT8183_MEMIF_AWB,
+		.capture = {
+			.stream_name = "UL2",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_PCM_RATES,
+			.formats = MTK_PCM_FORMATS,
+		},
+		.ops = &mtk_afe_fe_ops,
+	},
+	{
+		.name = "UL3",
+		.id = MT8183_MEMIF_VUL2,
+		.capture = {
+			.stream_name = "UL3",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_PCM_RATES,
+			.formats = MTK_PCM_FORMATS,
+		},
+		.ops = &mtk_afe_fe_ops,
+	},
+	{
+		.name = "UL4",
+		.id = MT8183_MEMIF_AWB2,
+		.capture = {
+			.stream_name = "UL4",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_PCM_RATES,
+			.formats = MTK_PCM_FORMATS,
+		},
+		.ops = &mtk_afe_fe_ops,
+	},
+	{
+		.name = "UL_MONO_1",
+		.id = MT8183_MEMIF_MOD_DAI,
+		.capture = {
+			.stream_name = "UL_MONO_1",
+			.channels_min = 1,
+			.channels_max = 1,
+			.rates = MTK_PCM_DAI_RATES,
+			.formats = MTK_PCM_FORMATS,
+		},
+		.ops = &mtk_afe_fe_ops,
+	},
+	{
+		.name = "HDMI",
+		.id = MT8183_MEMIF_HDMI,
+		.playback = {
+			.stream_name = "HDMI",
+			.channels_min = 2,
+			.channels_max = 8,
+			.rates = MTK_PCM_RATES,
+			.formats = MTK_PCM_FORMATS,
+		},
+		.ops = &mtk_afe_fe_ops,
+	},
+};
+
+/* dma widget & routes*/
+static const struct snd_kcontrol_new memif_ul1_ch1_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN21,
+				    I_ADDA_UL_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH1", AFE_CONN21,
+				    I_I2S0_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul1_ch2_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN22,
+				    I_ADDA_UL_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH2", AFE_CONN21,
+				    I_I2S0_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul2_ch1_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN5,
+				    I_ADDA_UL_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN5,
+				    I_DL1_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN5,
+				    I_DL2_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1", AFE_CONN5,
+				    I_DL3_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I2S2_CH1", AFE_CONN5,
+				    I_I2S2_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul2_ch2_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN6,
+				    I_ADDA_UL_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2", AFE_CONN6,
+				    I_DL1_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN6,
+				    I_DL2_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2", AFE_CONN6,
+				    I_DL3_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I2S2_CH2", AFE_CONN6,
+				    I_I2S2_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul3_ch1_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN32,
+				    I_ADDA_UL_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I2S2_CH1", AFE_CONN32,
+				    I_I2S2_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul3_ch2_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN33,
+				    I_ADDA_UL_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I2S2_CH2", AFE_CONN33,
+				    I_I2S2_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul4_ch1_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN38,
+				    I_ADDA_UL_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul4_ch2_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN39,
+				    I_ADDA_UL_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul_mono_1_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN12,
+				    I_ADDA_UL_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN12,
+				    I_ADDA_UL_CH2, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget mt8183_memif_widgets[] = {
+	/* memif */
+	SND_SOC_DAPM_MIXER("UL1_CH1", SND_SOC_NOPM, 0, 0,
+			   memif_ul1_ch1_mix, ARRAY_SIZE(memif_ul1_ch1_mix)),
+	SND_SOC_DAPM_MIXER("UL1_CH2", SND_SOC_NOPM, 0, 0,
+			   memif_ul1_ch2_mix, ARRAY_SIZE(memif_ul1_ch2_mix)),
+
+	SND_SOC_DAPM_MIXER("UL2_CH1", SND_SOC_NOPM, 0, 0,
+			   memif_ul2_ch1_mix, ARRAY_SIZE(memif_ul2_ch1_mix)),
+	SND_SOC_DAPM_MIXER("UL2_CH2", SND_SOC_NOPM, 0, 0,
+			   memif_ul2_ch2_mix, ARRAY_SIZE(memif_ul2_ch2_mix)),
+
+	SND_SOC_DAPM_MIXER("UL3_CH1", SND_SOC_NOPM, 0, 0,
+			   memif_ul3_ch1_mix, ARRAY_SIZE(memif_ul3_ch1_mix)),
+	SND_SOC_DAPM_MIXER("UL3_CH2", SND_SOC_NOPM, 0, 0,
+			   memif_ul3_ch2_mix, ARRAY_SIZE(memif_ul3_ch2_mix)),
+
+	SND_SOC_DAPM_MIXER("UL4_CH1", SND_SOC_NOPM, 0, 0,
+			   memif_ul4_ch1_mix, ARRAY_SIZE(memif_ul4_ch1_mix)),
+	SND_SOC_DAPM_MIXER("UL4_CH2", SND_SOC_NOPM, 0, 0,
+			   memif_ul4_ch2_mix, ARRAY_SIZE(memif_ul4_ch2_mix)),
+
+	SND_SOC_DAPM_MIXER("UL_MONO_1_CH1", SND_SOC_NOPM, 0, 0,
+			   memif_ul_mono_1_mix,
+			   ARRAY_SIZE(memif_ul_mono_1_mix)),
+};
+
+static const struct snd_soc_dapm_route mt8183_memif_routes[] = {
+	/* capture */
+	{"UL1", NULL, "UL1_CH1"},
+	{"UL1", NULL, "UL1_CH2"},
+	{"UL1_CH1", "ADDA_UL_CH1", "ADDA Capture"},
+	{"UL1_CH2", "ADDA_UL_CH2", "ADDA Capture"},
+	{"UL1_CH1", "I2S0_CH1", "I2S0"},
+	{"UL1_CH2", "I2S0_CH2", "I2S0"},
+
+	{"UL2", NULL, "UL2_CH1"},
+	{"UL2", NULL, "UL2_CH2"},
+	{"UL2_CH1", "ADDA_UL_CH1", "ADDA Capture"},
+	{"UL2_CH2", "ADDA_UL_CH2", "ADDA Capture"},
+	{"UL2_CH1", "I2S2_CH1", "I2S2"},
+	{"UL2_CH2", "I2S2_CH2", "I2S2"},
+
+	{"UL3", NULL, "UL3_CH1"},
+	{"UL3", NULL, "UL3_CH2"},
+	{"UL3_CH1", "ADDA_UL_CH1", "ADDA Capture"},
+	{"UL3_CH2", "ADDA_UL_CH2", "ADDA Capture"},
+	{"UL3_CH1", "I2S2_CH1", "I2S2"},
+	{"UL3_CH2", "I2S2_CH2", "I2S2"},
+
+	{"UL4", NULL, "UL4_CH1"},
+	{"UL4", NULL, "UL4_CH2"},
+	{"UL4_CH1", "ADDA_UL_CH1", "ADDA Capture"},
+	{"UL4_CH2", "ADDA_UL_CH2", "ADDA Capture"},
+
+	{"UL_MONO_1", NULL, "UL_MONO_1_CH1"},
+	{"UL_MONO_1_CH1", "ADDA_UL_CH1", "ADDA Capture"},
+	{"UL_MONO_1_CH1", "ADDA_UL_CH2", "ADDA Capture"},
+};
+
+static const struct snd_soc_component_driver mt8183_afe_pcm_dai_component = {
+	.name = "mt8183-afe-pcm-dai",
+};
+
+static const struct mtk_base_memif_data memif_data[MT8183_MEMIF_NUM] = {
+	[MT8183_MEMIF_DL1] = {
+		.name = "DL1",
+		.id = MT8183_MEMIF_DL1,
+		.reg_ofs_base = AFE_DL1_BASE,
+		.reg_ofs_cur = AFE_DL1_CUR,
+		.fs_reg = AFE_DAC_CON1,
+		.fs_shift = DL1_MODE_SFT,
+		.fs_maskbit = DL1_MODE_MASK,
+		.mono_reg = AFE_DAC_CON1,
+		.mono_shift = DL1_DATA_SFT,
+		.enable_reg = AFE_DAC_CON0,
+		.enable_shift = DL1_ON_SFT,
+		.hd_reg = AFE_MEMIF_HD_MODE,
+		.hd_shift = DL1_HD_SFT,
+		.agent_disable_reg = -1,
+		.msb_reg = -1,
+	},
+	[MT8183_MEMIF_DL2] = {
+		.name = "DL2",
+		.id = MT8183_MEMIF_DL2,
+		.reg_ofs_base = AFE_DL2_BASE,
+		.reg_ofs_cur = AFE_DL2_CUR,
+		.fs_reg = AFE_DAC_CON1,
+		.fs_shift = DL2_MODE_SFT,
+		.fs_maskbit = DL2_MODE_MASK,
+		.mono_reg = AFE_DAC_CON1,
+		.mono_shift = DL2_DATA_SFT,
+		.enable_reg = AFE_DAC_CON0,
+		.enable_shift = DL2_ON_SFT,
+		.hd_reg = AFE_MEMIF_HD_MODE,
+		.hd_shift = DL2_HD_SFT,
+		.agent_disable_reg = -1,
+		.msb_reg = -1,
+	},
+	[MT8183_MEMIF_DL3] = {
+		.name = "DL3",
+		.id = MT8183_MEMIF_DL3,
+		.reg_ofs_base = AFE_DL3_BASE,
+		.reg_ofs_cur = AFE_DL3_CUR,
+		.fs_reg = AFE_DAC_CON2,
+		.fs_shift = DL3_MODE_SFT,
+		.fs_maskbit = DL3_MODE_MASK,
+		.mono_reg = AFE_DAC_CON1,
+		.mono_shift = DL3_DATA_SFT,
+		.enable_reg = AFE_DAC_CON0,
+		.enable_shift = DL3_ON_SFT,
+		.hd_reg = AFE_MEMIF_HD_MODE,
+		.hd_shift = DL3_HD_SFT,
+		.agent_disable_reg = -1,
+		.msb_reg = -1,
+	},
+	[MT8183_MEMIF_VUL2] = {
+		.name = "VUL2",
+		.id = MT8183_MEMIF_VUL2,
+		.reg_ofs_base = AFE_VUL2_BASE,
+		.reg_ofs_cur = AFE_VUL2_CUR,
+		.fs_reg = AFE_DAC_CON2,
+		.fs_shift = VUL2_MODE_SFT,
+		.fs_maskbit = VUL2_MODE_MASK,
+		.mono_reg = AFE_DAC_CON2,
+		.mono_shift = VUL2_DATA_SFT,
+		.enable_reg = AFE_DAC_CON0,
+		.enable_shift = VUL2_ON_SFT,
+		.hd_reg = AFE_MEMIF_HD_MODE,
+		.hd_shift = VUL2_HD_SFT,
+		.agent_disable_reg = -1,
+		.msb_reg = -1,
+	},
+	[MT8183_MEMIF_AWB] = {
+		.name = "AWB",
+		.id = MT8183_MEMIF_AWB,
+		.reg_ofs_base = AFE_AWB_BASE,
+		.reg_ofs_cur = AFE_AWB_CUR,
+		.fs_reg = AFE_DAC_CON1,
+		.fs_shift = AWB_MODE_SFT,
+		.fs_maskbit = AWB_MODE_MASK,
+		.mono_reg = AFE_DAC_CON1,
+		.mono_shift = AWB_DATA_SFT,
+		.enable_reg = AFE_DAC_CON0,
+		.enable_shift = AWB_ON_SFT,
+		.hd_reg = AFE_MEMIF_HD_MODE,
+		.hd_shift = AWB_HD_SFT,
+		.agent_disable_reg = -1,
+		.msb_reg = -1,
+	},
+	[MT8183_MEMIF_AWB2] = {
+		.name = "AWB2",
+		.id = MT8183_MEMIF_AWB2,
+		.reg_ofs_base = AFE_AWB2_BASE,
+		.reg_ofs_cur = AFE_AWB2_CUR,
+		.fs_reg = AFE_DAC_CON2,
+		.fs_shift = AWB2_MODE_SFT,
+		.fs_maskbit = AWB2_MODE_MASK,
+		.mono_reg = AFE_DAC_CON2,
+		.mono_shift = AWB2_DATA_SFT,
+		.enable_reg = AFE_DAC_CON0,
+		.enable_shift = AWB2_ON_SFT,
+		.hd_reg = AFE_MEMIF_HD_MODE,
+		.hd_shift = AWB2_HD_SFT,
+		.agent_disable_reg = -1,
+		.msb_reg = -1,
+	},
+	[MT8183_MEMIF_VUL12] = {
+		.name = "VUL12",
+		.id = MT8183_MEMIF_VUL12,
+		.reg_ofs_base = AFE_VUL_D2_BASE,
+		.reg_ofs_cur = AFE_VUL_D2_CUR,
+		.fs_reg = AFE_DAC_CON0,
+		.fs_shift = VUL12_MODE_SFT,
+		.fs_maskbit = VUL12_MODE_MASK,
+		.mono_reg = AFE_DAC_CON0,
+		.mono_shift = VUL12_MONO_SFT,
+		.enable_reg = AFE_DAC_CON0,
+		.enable_shift = VUL12_ON_SFT,
+		.hd_reg = AFE_MEMIF_HD_MODE,
+		.hd_shift = VUL12_HD_SFT,
+		.agent_disable_reg = -1,
+		.msb_reg = -1,
+	},
+	[MT8183_MEMIF_MOD_DAI] = {
+		.name = "MOD_DAI",
+		.id = MT8183_MEMIF_MOD_DAI,
+		.reg_ofs_base = AFE_MOD_DAI_BASE,
+		.reg_ofs_cur = AFE_MOD_DAI_CUR,
+		.fs_reg = AFE_DAC_CON1,
+		.fs_shift = MOD_DAI_MODE_SFT,
+		.fs_maskbit = MOD_DAI_MODE_MASK,
+		.mono_reg = -1,
+		.mono_shift = 0,
+		.enable_reg = AFE_DAC_CON0,
+		.enable_shift = MOD_DAI_ON_SFT,
+		.hd_reg = AFE_MEMIF_HD_MODE,
+		.hd_shift = MOD_DAI_HD_SFT,
+		.agent_disable_reg = -1,
+		.msb_reg = -1,
+	},
+	[MT8183_MEMIF_HDMI] = {
+		.name = "HDMI",
+		.id = MT8183_MEMIF_HDMI,
+		.reg_ofs_base = AFE_HDMI_OUT_BASE,
+		.reg_ofs_cur = AFE_HDMI_OUT_CUR,
+		.fs_reg = -1,
+		.fs_shift = -1,
+		.fs_maskbit = -1,
+		.mono_reg = -1,
+		.mono_shift = -1,
+		.enable_reg = -1,	/* control in tdm for sync start */
+		.hd_reg = AFE_MEMIF_HD_MODE,
+		.hd_shift = HDMI_HD_SFT,
+		.agent_disable_reg = -1,
+		.msb_reg = -1,
+	},
+};
+
+static const struct mtk_base_irq_data irq_data[MT8183_IRQ_NUM] = {
+	[MT8183_IRQ_0] = {
+		.id = MT8183_IRQ_0,
+		.irq_cnt_reg = AFE_IRQ_MCU_CNT0,
+		.irq_cnt_shift = 0,
+		.irq_cnt_maskbit = 0x3ffff,
+		.irq_fs_reg = AFE_IRQ_MCU_CON1,
+		.irq_fs_shift = IRQ0_MCU_MODE_SFT,
+		.irq_fs_maskbit = IRQ0_MCU_MODE_MASK,
+		.irq_en_reg = AFE_IRQ_MCU_CON0,
+		.irq_en_shift = IRQ0_MCU_ON_SFT,
+		.irq_clr_reg = AFE_IRQ_MCU_CLR,
+		.irq_clr_shift = IRQ0_MCU_CLR_SFT,
+	},
+	[MT8183_IRQ_1] = {
+		.id = MT8183_IRQ_1,
+		.irq_cnt_reg = AFE_IRQ_MCU_CNT1,
+		.irq_cnt_shift = 0,
+		.irq_cnt_maskbit = 0x3ffff,
+		.irq_fs_reg = AFE_IRQ_MCU_CON1,
+		.irq_fs_shift = IRQ1_MCU_MODE_SFT,
+		.irq_fs_maskbit = IRQ1_MCU_MODE_MASK,
+		.irq_en_reg = AFE_IRQ_MCU_CON0,
+		.irq_en_shift = IRQ1_MCU_ON_SFT,
+		.irq_clr_reg = AFE_IRQ_MCU_CLR,
+		.irq_clr_shift = IRQ1_MCU_CLR_SFT,
+	},
+	[MT8183_IRQ_2] = {
+		.id = MT8183_IRQ_2,
+		.irq_cnt_reg = AFE_IRQ_MCU_CNT2,
+		.irq_cnt_shift = 0,
+		.irq_cnt_maskbit = 0x3ffff,
+		.irq_fs_reg = AFE_IRQ_MCU_CON1,
+		.irq_fs_shift = IRQ2_MCU_MODE_SFT,
+		.irq_fs_maskbit = IRQ2_MCU_MODE_MASK,
+		.irq_en_reg = AFE_IRQ_MCU_CON0,
+		.irq_en_shift = IRQ2_MCU_ON_SFT,
+		.irq_clr_reg = AFE_IRQ_MCU_CLR,
+		.irq_clr_shift = IRQ2_MCU_CLR_SFT,
+	},
+	[MT8183_IRQ_3] = {
+		.id = MT8183_IRQ_3,
+		.irq_cnt_reg = AFE_IRQ_MCU_CNT3,
+		.irq_cnt_shift = 0,
+		.irq_cnt_maskbit = 0x3ffff,
+		.irq_fs_reg = AFE_IRQ_MCU_CON1,
+		.irq_fs_shift = IRQ3_MCU_MODE_SFT,
+		.irq_fs_maskbit = IRQ3_MCU_MODE_MASK,
+		.irq_en_reg = AFE_IRQ_MCU_CON0,
+		.irq_en_shift = IRQ3_MCU_ON_SFT,
+		.irq_clr_reg = AFE_IRQ_MCU_CLR,
+		.irq_clr_shift = IRQ3_MCU_CLR_SFT,
+	},
+	[MT8183_IRQ_4] = {
+		.id = MT8183_IRQ_4,
+		.irq_cnt_reg = AFE_IRQ_MCU_CNT4,
+		.irq_cnt_shift = 0,
+		.irq_cnt_maskbit = 0x3ffff,
+		.irq_fs_reg = AFE_IRQ_MCU_CON1,
+		.irq_fs_shift = IRQ4_MCU_MODE_SFT,
+		.irq_fs_maskbit = IRQ4_MCU_MODE_MASK,
+		.irq_en_reg = AFE_IRQ_MCU_CON0,
+		.irq_en_shift = IRQ4_MCU_ON_SFT,
+		.irq_clr_reg = AFE_IRQ_MCU_CLR,
+		.irq_clr_shift = IRQ4_MCU_CLR_SFT,
+	},
+	[MT8183_IRQ_5] = {
+		.id = MT8183_IRQ_5,
+		.irq_cnt_reg = AFE_IRQ_MCU_CNT5,
+		.irq_cnt_shift = 0,
+		.irq_cnt_maskbit = 0x3ffff,
+		.irq_fs_reg = AFE_IRQ_MCU_CON1,
+		.irq_fs_shift = IRQ5_MCU_MODE_SFT,
+		.irq_fs_maskbit = IRQ5_MCU_MODE_MASK,
+		.irq_en_reg = AFE_IRQ_MCU_CON0,
+		.irq_en_shift = IRQ5_MCU_ON_SFT,
+		.irq_clr_reg = AFE_IRQ_MCU_CLR,
+		.irq_clr_shift = IRQ5_MCU_CLR_SFT,
+	},
+	[MT8183_IRQ_6] = {
+		.id = MT8183_IRQ_6,
+		.irq_cnt_reg = AFE_IRQ_MCU_CNT6,
+		.irq_cnt_shift = 0,
+		.irq_cnt_maskbit = 0x3ffff,
+		.irq_fs_reg = AFE_IRQ_MCU_CON1,
+		.irq_fs_shift = IRQ6_MCU_MODE_SFT,
+		.irq_fs_maskbit = IRQ6_MCU_MODE_MASK,
+		.irq_en_reg = AFE_IRQ_MCU_CON0,
+		.irq_en_shift = IRQ6_MCU_ON_SFT,
+		.irq_clr_reg = AFE_IRQ_MCU_CLR,
+		.irq_clr_shift = IRQ6_MCU_CLR_SFT,
+	},
+	[MT8183_IRQ_7] = {
+		.id = MT8183_IRQ_7,
+		.irq_cnt_reg = AFE_IRQ_MCU_CNT7,
+		.irq_cnt_shift = 0,
+		.irq_cnt_maskbit = 0x3ffff,
+		.irq_fs_reg = AFE_IRQ_MCU_CON1,
+		.irq_fs_shift = IRQ7_MCU_MODE_SFT,
+		.irq_fs_maskbit = IRQ7_MCU_MODE_MASK,
+		.irq_en_reg = AFE_IRQ_MCU_CON0,
+		.irq_en_shift = IRQ7_MCU_ON_SFT,
+		.irq_clr_reg = AFE_IRQ_MCU_CLR,
+		.irq_clr_shift = IRQ7_MCU_CLR_SFT,
+	},
+	[MT8183_IRQ_8] = {
+		.id = MT8183_IRQ_8,
+		.irq_cnt_reg = AFE_IRQ_MCU_CNT8,
+		.irq_cnt_shift = 0,
+		.irq_cnt_maskbit = 0x3ffff,
+		.irq_fs_reg = -1,
+		.irq_fs_maskbit = -1,
+		.irq_en_reg = AFE_IRQ_MCU_CON0,
+		.irq_en_shift = IRQ8_MCU_ON_SFT,
+		.irq_clr_reg = AFE_IRQ_MCU_CLR,
+		.irq_clr_shift = IRQ8_MCU_CLR_SFT,
+	},
+	[MT8183_IRQ_11] = {
+		.id = MT8183_IRQ_11,
+		.irq_cnt_reg = AFE_IRQ_MCU_CNT11,
+		.irq_cnt_shift = 0,
+		.irq_cnt_maskbit = 0x3ffff,
+		.irq_fs_reg = AFE_IRQ_MCU_CON2,
+		.irq_fs_shift = IRQ11_MCU_MODE_SFT,
+		.irq_fs_maskbit = IRQ11_MCU_MODE_MASK,
+		.irq_en_reg = AFE_IRQ_MCU_CON0,
+		.irq_en_shift = IRQ11_MCU_ON_SFT,
+		.irq_clr_reg = AFE_IRQ_MCU_CLR,
+		.irq_clr_shift = IRQ11_MCU_CLR_SFT,
+	},
+	[MT8183_IRQ_12] = {
+		.id = MT8183_IRQ_12,
+		.irq_cnt_reg = AFE_IRQ_MCU_CNT12,
+		.irq_cnt_shift = 0,
+		.irq_cnt_maskbit = 0x3ffff,
+		.irq_fs_reg = AFE_IRQ_MCU_CON2,
+		.irq_fs_shift = IRQ12_MCU_MODE_SFT,
+		.irq_fs_maskbit = IRQ12_MCU_MODE_MASK,
+		.irq_en_reg = AFE_IRQ_MCU_CON0,
+		.irq_en_shift = IRQ12_MCU_ON_SFT,
+		.irq_clr_reg = AFE_IRQ_MCU_CLR,
+		.irq_clr_shift = IRQ12_MCU_CLR_SFT,
+	},
+};
+
+static bool mt8183_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+	/* these auto-gen reg has read-only bit, so put it as volatile */
+	/* volatile reg cannot be cached, so cannot be set when power off */
+	switch (reg) {
+	case AUDIO_TOP_CON0:	/* reg bit controlled by CCF */
+	case AUDIO_TOP_CON1:	/* reg bit controlled by CCF */
+	case AUDIO_TOP_CON3:
+	case AFE_DL1_CUR:
+	case AFE_DL1_END:
+	case AFE_DL2_CUR:
+	case AFE_DL2_END:
+	case AFE_AWB_END:
+	case AFE_AWB_CUR:
+	case AFE_VUL_END:
+	case AFE_VUL_CUR:
+	case AFE_MEMIF_MON0:
+	case AFE_MEMIF_MON1:
+	case AFE_MEMIF_MON2:
+	case AFE_MEMIF_MON3:
+	case AFE_MEMIF_MON4:
+	case AFE_MEMIF_MON5:
+	case AFE_MEMIF_MON6:
+	case AFE_MEMIF_MON7:
+	case AFE_MEMIF_MON8:
+	case AFE_MEMIF_MON9:
+	case AFE_ADDA_SRC_DEBUG_MON0:
+	case AFE_ADDA_SRC_DEBUG_MON1:
+	case AFE_ADDA_UL_SRC_MON0:
+	case AFE_ADDA_UL_SRC_MON1:
+	case AFE_SIDETONE_MON:
+	case AFE_SIDETONE_CON0:
+	case AFE_SIDETONE_COEFF:
+	case AFE_BUS_MON0:
+	case AFE_MRGIF_MON0:
+	case AFE_MRGIF_MON1:
+	case AFE_MRGIF_MON2:
+	case AFE_I2S_MON:
+	case AFE_DAC_MON:
+	case AFE_VUL2_END:
+	case AFE_VUL2_CUR:
+	case AFE_IRQ0_MCU_CNT_MON:
+	case AFE_IRQ6_MCU_CNT_MON:
+	case AFE_MOD_DAI_END:
+	case AFE_MOD_DAI_CUR:
+	case AFE_VUL_D2_END:
+	case AFE_VUL_D2_CUR:
+	case AFE_DL3_CUR:
+	case AFE_DL3_END:
+	case AFE_HDMI_OUT_CON0:
+	case AFE_HDMI_OUT_CUR:
+	case AFE_HDMI_OUT_END:
+	case AFE_IRQ3_MCU_CNT_MON:
+	case AFE_IRQ4_MCU_CNT_MON:
+	case AFE_IRQ_MCU_STATUS:
+	case AFE_IRQ_MCU_CLR:
+	case AFE_IRQ_MCU_MON2:
+	case AFE_IRQ1_MCU_CNT_MON:
+	case AFE_IRQ2_MCU_CNT_MON:
+	case AFE_IRQ1_MCU_EN_CNT_MON:
+	case AFE_IRQ5_MCU_CNT_MON:
+	case AFE_IRQ7_MCU_CNT_MON:
+	case AFE_GAIN1_CUR:
+	case AFE_GAIN2_CUR:
+	case AFE_SRAM_DELSEL_CON0:
+	case AFE_SRAM_DELSEL_CON2:
+	case AFE_SRAM_DELSEL_CON3:
+	case AFE_ASRC_2CH_CON12:
+	case AFE_ASRC_2CH_CON13:
+	case PCM_INTF_CON2:
+	case FPGA_CFG0:
+	case FPGA_CFG1:
+	case FPGA_CFG2:
+	case FPGA_CFG3:
+	case AUDIO_TOP_DBG_MON0:
+	case AUDIO_TOP_DBG_MON1:
+	case AFE_IRQ8_MCU_CNT_MON:
+	case AFE_IRQ11_MCU_CNT_MON:
+	case AFE_IRQ12_MCU_CNT_MON:
+	case AFE_CBIP_MON0:
+	case AFE_CBIP_SLV_MUX_MON0:
+	case AFE_CBIP_SLV_DECODER_MON0:
+	case AFE_ADDA6_SRC_DEBUG_MON0:
+	case AFE_ADD6A_UL_SRC_MON0:
+	case AFE_ADDA6_UL_SRC_MON1:
+	case AFE_DL1_CUR_MSB:
+	case AFE_DL2_CUR_MSB:
+	case AFE_AWB_CUR_MSB:
+	case AFE_VUL_CUR_MSB:
+	case AFE_VUL2_CUR_MSB:
+	case AFE_MOD_DAI_CUR_MSB:
+	case AFE_VUL_D2_CUR_MSB:
+	case AFE_DL3_CUR_MSB:
+	case AFE_HDMI_OUT_CUR_MSB:
+	case AFE_AWB2_END:
+	case AFE_AWB2_CUR:
+	case AFE_AWB2_CUR_MSB:
+	case AFE_ADDA_DL_SDM_FIFO_MON:
+	case AFE_ADDA_DL_SRC_LCH_MON:
+	case AFE_ADDA_DL_SRC_RCH_MON:
+	case AFE_ADDA_DL_SDM_OUT_MON:
+	case AFE_CONNSYS_I2S_MON:
+	case AFE_ASRC_2CH_CON0:
+	case AFE_ASRC_2CH_CON2:
+	case AFE_ASRC_2CH_CON3:
+	case AFE_ASRC_2CH_CON4:
+	case AFE_ASRC_2CH_CON5:
+	case AFE_ASRC_2CH_CON7:
+	case AFE_ASRC_2CH_CON8:
+	case AFE_MEMIF_MON12:
+	case AFE_MEMIF_MON13:
+	case AFE_MEMIF_MON14:
+	case AFE_MEMIF_MON15:
+	case AFE_MEMIF_MON16:
+	case AFE_MEMIF_MON17:
+	case AFE_MEMIF_MON18:
+	case AFE_MEMIF_MON19:
+	case AFE_MEMIF_MON20:
+	case AFE_MEMIF_MON21:
+	case AFE_MEMIF_MON22:
+	case AFE_MEMIF_MON23:
+	case AFE_MEMIF_MON24:
+	case AFE_ADDA_MTKAIF_MON0:
+	case AFE_ADDA_MTKAIF_MON1:
+	case AFE_AUD_PAD_TOP:
+	case AFE_GENERAL1_ASRC_2CH_CON0:
+	case AFE_GENERAL1_ASRC_2CH_CON2:
+	case AFE_GENERAL1_ASRC_2CH_CON3:
+	case AFE_GENERAL1_ASRC_2CH_CON4:
+	case AFE_GENERAL1_ASRC_2CH_CON5:
+	case AFE_GENERAL1_ASRC_2CH_CON7:
+	case AFE_GENERAL1_ASRC_2CH_CON8:
+	case AFE_GENERAL1_ASRC_2CH_CON12:
+	case AFE_GENERAL1_ASRC_2CH_CON13:
+	case AFE_GENERAL2_ASRC_2CH_CON0:
+	case AFE_GENERAL2_ASRC_2CH_CON2:
+	case AFE_GENERAL2_ASRC_2CH_CON3:
+	case AFE_GENERAL2_ASRC_2CH_CON4:
+	case AFE_GENERAL2_ASRC_2CH_CON5:
+	case AFE_GENERAL2_ASRC_2CH_CON7:
+	case AFE_GENERAL2_ASRC_2CH_CON8:
+	case AFE_GENERAL2_ASRC_2CH_CON12:
+	case AFE_GENERAL2_ASRC_2CH_CON13:
+		return true;
+	default:
+		return false;
+	};
+}
+
+static const struct regmap_config mt8183_afe_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+
+	.volatile_reg = mt8183_is_volatile_reg,
+
+	.max_register = AFE_MAX_REGISTER,
+	.num_reg_defaults_raw = AFE_MAX_REGISTER,
+
+	.cache_type = REGCACHE_FLAT,
+};
+
+static irqreturn_t mt8183_afe_irq_handler(int irq_id, void *dev)
+{
+	struct mtk_base_afe *afe = dev;
+	struct mtk_base_afe_irq *irq;
+	unsigned int status;
+	unsigned int status_mcu;
+	unsigned int mcu_en;
+	int ret;
+	int i;
+	irqreturn_t irq_ret = IRQ_HANDLED;
+
+	/* get irq that is sent to MCU */
+	regmap_read(afe->regmap, AFE_IRQ_MCU_EN, &mcu_en);
+
+	ret = regmap_read(afe->regmap, AFE_IRQ_MCU_STATUS, &status);
+	/* only care IRQ which is sent to MCU */
+	status_mcu = status & mcu_en & AFE_IRQ_STATUS_BITS;
+
+	if (ret || status_mcu == 0) {
+		dev_err(afe->dev, "%s(), irq status err, ret %d, status 0x%x, mcu_en 0x%x\n",
+			__func__, ret, status, mcu_en);
+
+		irq_ret = IRQ_NONE;
+		goto err_irq;
+	}
+
+	for (i = 0; i < MT8183_MEMIF_NUM; i++) {
+		struct mtk_base_afe_memif *memif = &afe->memif[i];
+
+		if (!memif->substream)
+			continue;
+
+		if (memif->irq_usage < 0)
+			continue;
+
+		irq = &afe->irqs[memif->irq_usage];
+
+		if (status_mcu & (1 << irq->irq_data->irq_en_shift))
+			snd_pcm_period_elapsed(memif->substream);
+	}
+
+err_irq:
+	/* clear irq */
+	regmap_write(afe->regmap,
+		     AFE_IRQ_MCU_CLR,
+		     status_mcu);
+
+	return irq_ret;
+}
+
+static int mt8183_afe_runtime_suspend(struct device *dev)
+{
+	struct mtk_base_afe *afe = dev_get_drvdata(dev);
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	unsigned int value;
+	int ret;
+
+	if (!afe->regmap || afe_priv->pm_runtime_bypass_reg_ctl)
+		goto skip_regmap;
+
+	/* disable AFE */
+	regmap_update_bits(afe->regmap, AFE_DAC_CON0, AFE_ON_MASK_SFT, 0x0);
+
+	ret = regmap_read_poll_timeout(afe->regmap,
+				       AFE_DAC_MON,
+				       value,
+				       (value & AFE_ON_RETM_MASK_SFT) == 0,
+				       20,
+				       1 * 1000 * 1000);
+	if (ret)
+		dev_warn(afe->dev, "%s(), ret %d\n", __func__, ret);
+
+	/* make sure all irq status are cleared, twice intended */
+	regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CLR, 0xffff, 0xffff);
+	regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CLR, 0xffff, 0xffff);
+
+	/* cache only */
+	regcache_cache_only(afe->regmap, true);
+	regcache_mark_dirty(afe->regmap);
+
+skip_regmap:
+	return mt8183_afe_disable_clock(afe);
+}
+
+static int mt8183_afe_runtime_resume(struct device *dev)
+{
+	struct mtk_base_afe *afe = dev_get_drvdata(dev);
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	int ret;
+
+	ret = mt8183_afe_enable_clock(afe);
+	if (ret)
+		return ret;
+
+	if (!afe->regmap || afe_priv->pm_runtime_bypass_reg_ctl)
+		goto skip_regmap;
+
+	regcache_cache_only(afe->regmap, false);
+	regcache_sync(afe->regmap);
+
+	/* enable audio sys DCM for power saving */
+	regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, 0x1 << 29, 0x1 << 29);
+
+	/* force cpu use 8_24 format when writing 32bit data */
+	regmap_update_bits(afe->regmap, AFE_MEMIF_MSB,
+			   CPU_HD_ALIGN_MASK_SFT, 0 << CPU_HD_ALIGN_SFT);
+
+	/* set all output port to 24bit */
+	regmap_write(afe->regmap, AFE_CONN_24BIT, 0xffffffff);
+	regmap_write(afe->regmap, AFE_CONN_24BIT_1, 0xffffffff);
+
+	/* enable AFE */
+	regmap_update_bits(afe->regmap, AFE_DAC_CON0, 0x1, 0x1);
+
+skip_regmap:
+	return 0;
+}
+
+static int mt8183_afe_component_probe(struct snd_soc_component *component)
+{
+	return mtk_afe_add_sub_dai_control(component);
+}
+
+static const struct snd_soc_component_driver mt8183_afe_component = {
+	.name = AFE_PCM_NAME,
+	.ops = &mtk_afe_pcm_ops,
+	.pcm_new = mtk_afe_pcm_new,
+	.pcm_free = mtk_afe_pcm_free,
+	.probe = mt8183_afe_component_probe,
+};
+
+static int mt8183_dai_memif_register(struct mtk_base_afe *afe)
+{
+	struct mtk_base_afe_dai *dai;
+
+	dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+	if (!dai)
+		return -ENOMEM;
+
+	list_add(&dai->list, &afe->sub_dais);
+
+	dai->dai_drivers = mt8183_memif_dai_driver;
+	dai->num_dai_drivers = ARRAY_SIZE(mt8183_memif_dai_driver);
+
+	dai->dapm_widgets = mt8183_memif_widgets;
+	dai->num_dapm_widgets = ARRAY_SIZE(mt8183_memif_widgets);
+	dai->dapm_routes = mt8183_memif_routes;
+	dai->num_dapm_routes = ARRAY_SIZE(mt8183_memif_routes);
+	return 0;
+}
+
+typedef int (*dai_register_cb)(struct mtk_base_afe *);
+static const dai_register_cb dai_register_cbs[] = {
+	mt8183_dai_adda_register,
+	mt8183_dai_i2s_register,
+	mt8183_dai_pcm_register,
+	mt8183_dai_tdm_register,
+	mt8183_dai_hostless_register,
+	mt8183_dai_memif_register,
+};
+
+static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
+{
+	struct mtk_base_afe *afe;
+	struct mt8183_afe_private *afe_priv;
+	struct device *dev;
+	int i, irq_id, ret;
+
+	afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL);
+	if (!afe)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, afe);
+
+	afe->platform_priv = devm_kzalloc(&pdev->dev, sizeof(*afe_priv),
+					  GFP_KERNEL);
+	if (!afe->platform_priv)
+		return -ENOMEM;
+
+	afe_priv = afe->platform_priv;
+	afe->dev = &pdev->dev;
+	dev = afe->dev;
+
+	/* initial audio related clock */
+	ret = mt8183_init_clock(afe);
+	if (ret) {
+		dev_err(dev, "init clock error\n");
+		return ret;
+	}
+
+	pm_runtime_enable(dev);
+
+	/* regmap init */
+	afe->regmap = syscon_node_to_regmap(dev->parent->of_node);
+	if (IS_ERR(afe->regmap)) {
+		dev_err(dev, "could not get regmap from parent\n");
+		return PTR_ERR(afe->regmap);
+	}
+	ret = regmap_attach_dev(dev, afe->regmap, &mt8183_afe_regmap_config);
+	if (ret) {
+		dev_warn(dev, "regmap_attach_dev fail, ret %d\n", ret);
+		return ret;
+	}
+
+	/* enable clock for regcache get default value from hw */
+	afe_priv->pm_runtime_bypass_reg_ctl = true;
+	pm_runtime_get_sync(&pdev->dev);
+
+	ret = regmap_reinit_cache(afe->regmap, &mt8183_afe_regmap_config);
+	if (ret) {
+		dev_err(dev, "regmap_reinit_cache fail, ret %d\n", ret);
+		return ret;
+	}
+
+	pm_runtime_put_sync(&pdev->dev);
+	afe_priv->pm_runtime_bypass_reg_ctl = false;
+
+	regcache_cache_only(afe->regmap, true);
+	regcache_mark_dirty(afe->regmap);
+
+	pm_runtime_get_sync(&pdev->dev);
+
+	/* init memif */
+	afe->memif_size = MT8183_MEMIF_NUM;
+	afe->memif = devm_kcalloc(dev, afe->memif_size, sizeof(*afe->memif),
+				  GFP_KERNEL);
+	if (!afe->memif)
+		return -ENOMEM;
+
+	for (i = 0; i < afe->memif_size; i++) {
+		afe->memif[i].data = &memif_data[i];
+		afe->memif[i].irq_usage = -1;
+	}
+
+	afe->memif[MT8183_MEMIF_HDMI].irq_usage = MT8183_IRQ_8;
+	afe->memif[MT8183_MEMIF_HDMI].const_irq = 1;
+
+	mutex_init(&afe->irq_alloc_lock);
+
+	/* init memif */
+	/* irq initialize */
+	afe->irqs_size = MT8183_IRQ_NUM;
+	afe->irqs = devm_kcalloc(dev, afe->irqs_size, sizeof(*afe->irqs),
+				 GFP_KERNEL);
+	if (!afe->irqs)
+		return -ENOMEM;
+
+	for (i = 0; i < afe->irqs_size; i++)
+		afe->irqs[i].irq_data = &irq_data[i];
+
+	/* request irq */
+	irq_id = platform_get_irq(pdev, 0);
+	if (!irq_id) {
+		dev_err(dev, "%pOFn no irq found\n", dev->of_node);
+		return -ENXIO;
+	}
+	ret = devm_request_irq(dev, irq_id, mt8183_afe_irq_handler,
+			       IRQF_TRIGGER_NONE, "asys-isr", (void *)afe);
+	if (ret) {
+		dev_err(dev, "could not request_irq for asys-isr\n");
+		return ret;
+	}
+
+	/* init sub_dais */
+	INIT_LIST_HEAD(&afe->sub_dais);
+
+	for (i = 0; i < ARRAY_SIZE(dai_register_cbs); i++) {
+		ret = dai_register_cbs[i](afe);
+		if (ret) {
+			dev_warn(afe->dev, "dai register i %d fail, ret %d\n",
+				 i, ret);
+			return ret;
+		}
+	}
+
+	/* init dai_driver and component_driver */
+	ret = mtk_afe_combine_sub_dai(afe);
+	if (ret) {
+		dev_warn(afe->dev, "mtk_afe_combine_sub_dai fail, ret %d\n",
+			 ret);
+		return ret;
+	}
+
+	afe->mtk_afe_hardware = &mt8183_afe_hardware;
+	afe->memif_fs = mt8183_memif_fs;
+	afe->irq_fs = mt8183_irq_fs;
+
+	afe->runtime_resume = mt8183_afe_runtime_resume;
+	afe->runtime_suspend = mt8183_afe_runtime_suspend;
+
+	/* register component */
+	ret = devm_snd_soc_register_component(&pdev->dev,
+					      &mt8183_afe_component,
+					      NULL, 0);
+	if (ret) {
+		dev_warn(dev, "err_platform\n");
+		return ret;
+	}
+
+	ret = devm_snd_soc_register_component(afe->dev,
+					      &mt8183_afe_pcm_dai_component,
+					      afe->dai_drivers,
+					      afe->num_dai_drivers);
+	if (ret) {
+		dev_warn(dev, "err_dai_component\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static int mt8183_afe_pcm_dev_remove(struct platform_device *pdev)
+{
+	pm_runtime_put_sync(&pdev->dev);
+
+	pm_runtime_disable(&pdev->dev);
+	if (!pm_runtime_status_suspended(&pdev->dev))
+		mt8183_afe_runtime_suspend(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id mt8183_afe_pcm_dt_match[] = {
+	{ .compatible = "mediatek,mt8183-audio", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mt8183_afe_pcm_dt_match);
+
+static const struct dev_pm_ops mt8183_afe_pm_ops = {
+	SET_RUNTIME_PM_OPS(mt8183_afe_runtime_suspend,
+			   mt8183_afe_runtime_resume, NULL)
+};
+
+static struct platform_driver mt8183_afe_pcm_driver = {
+	.driver = {
+		   .name = "mt8183-audio",
+		   .of_match_table = mt8183_afe_pcm_dt_match,
+#ifdef CONFIG_PM
+		   .pm = &mt8183_afe_pm_ops,
+#endif
+	},
+	.probe = mt8183_afe_pcm_dev_probe,
+	.remove = mt8183_afe_pcm_dev_remove,
+};
+
+module_platform_driver(mt8183_afe_pcm_driver);
+
+MODULE_DESCRIPTION("Mediatek ALSA SoC AFE platform driver for 8183");
+MODULE_AUTHOR("KaiChieh Chuang <kaichieh.chuang@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
new file mode 100644
index 0000000..8eecb2c
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
@@ -0,0 +1,462 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mt8183-da7219-max98357.c
+//	--  MT8183-DA7219-MAX98357 ALSA SoC machine driver
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Shunli Wang <shunli.wang@mediatek.com>
+
+#include <linux/module.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "mt8183-afe-common.h"
+#include "../../codecs/da7219-aad.h"
+#include "../../codecs/da7219.h"
+
+static struct snd_soc_jack headset_jack;
+
+/* Headset jack detection DAPM pins */
+static struct snd_soc_jack_pin headset_jack_pins[] = {
+	{
+		.pin = "Headphone",
+		.mask = SND_JACK_HEADPHONE,
+	},
+	{
+		.pin = "Headset Mic",
+		.mask = SND_JACK_MICROPHONE,
+	},
+};
+
+static struct snd_soc_dai_link_component
+mt8183_da7219_max98357_external_codecs[] = {
+	{
+		.name = "max98357a",
+		.dai_name = "HiFi",
+	},
+	{
+		.name = "da7219.5-001a",
+		.dai_name = "da7219-hifi",
+	},
+};
+
+static int mt8183_mt6358_i2s_hw_params(struct snd_pcm_substream *substream,
+				       struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	unsigned int rate = params_rate(params);
+	unsigned int mclk_fs_ratio = 128;
+	unsigned int mclk_fs = rate * mclk_fs_ratio;
+
+	return snd_soc_dai_set_sysclk(rtd->cpu_dai,
+				      0, mclk_fs, SND_SOC_CLOCK_OUT);
+}
+
+static const struct snd_soc_ops mt8183_mt6358_i2s_ops = {
+	.hw_params = mt8183_mt6358_i2s_hw_params,
+};
+
+static int mt8183_da7219_i2s_hw_params(struct snd_pcm_substream *substream,
+				       struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	unsigned int rate = params_rate(params);
+	unsigned int mclk_fs_ratio = 256;
+	unsigned int mclk_fs = rate * mclk_fs_ratio;
+	unsigned int freq;
+	int ret = 0, j;
+
+	ret = snd_soc_dai_set_sysclk(rtd->cpu_dai, 0,
+				     mclk_fs, SND_SOC_CLOCK_OUT);
+	if (ret < 0)
+		dev_err(rtd->dev, "failed to set cpu dai sysclk\n");
+
+	for (j = 0; j < rtd->num_codecs; j++) {
+		struct snd_soc_dai *codec_dai = rtd->codec_dais[j];
+
+		if (!strcmp(codec_dai->component->name, "da7219.5-001a")) {
+			ret = snd_soc_dai_set_sysclk(codec_dai,
+						     DA7219_CLKSRC_MCLK,
+						     mclk_fs,
+						     SND_SOC_CLOCK_IN);
+			if (ret < 0)
+				dev_err(rtd->dev, "failed to set sysclk\n");
+
+			if ((rate % 8000) == 0)
+				freq = DA7219_PLL_FREQ_OUT_98304;
+			else
+				freq = DA7219_PLL_FREQ_OUT_90316;
+
+			ret = snd_soc_dai_set_pll(codec_dai, 0,
+						  DA7219_SYSCLK_PLL_SRM,
+						  0, freq);
+			if (ret)
+				dev_err(rtd->dev, "failed to start PLL: %d\n",
+					ret);
+		}
+	}
+
+	return ret;
+}
+
+static int mt8183_da7219_hw_free(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	int ret = 0, j;
+
+	for (j = 0; j < rtd->num_codecs; j++) {
+		struct snd_soc_dai *codec_dai = rtd->codec_dais[j];
+
+		if (!strcmp(codec_dai->component->name, "da7219.5-001a")) {
+			ret = snd_soc_dai_set_pll(codec_dai,
+						  0, DA7219_SYSCLK_MCLK, 0, 0);
+			if (ret < 0) {
+				dev_err(rtd->dev, "failed to stop PLL: %d\n",
+					ret);
+				break;
+			}
+		}
+	}
+
+	return ret;
+}
+
+static const struct snd_soc_ops mt8183_da7219_i2s_ops = {
+	.hw_params = mt8183_da7219_i2s_hw_params,
+	.hw_free = mt8183_da7219_hw_free,
+};
+
+static int mt8183_i2s_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+				      struct snd_pcm_hw_params *params)
+{
+	/* fix BE i2s format to 32bit, clean param mask first */
+	snd_mask_reset_range(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT),
+			     0, SNDRV_PCM_FORMAT_LAST);
+
+	params_set_format(params, SNDRV_PCM_FORMAT_S32_LE);
+
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget
+mt8183_da7219_max98357_dapm_widgets[] = {
+	SND_SOC_DAPM_OUTPUT("IT6505_8CH"),
+};
+
+static const struct snd_soc_dapm_route mt8183_da7219_max98357_dapm_routes[] = {
+	{"IT6505_8CH", NULL, "TDM"},
+};
+
+static struct snd_soc_dai_link mt8183_da7219_max98357_dai_links[] = {
+	/* FE */
+	{
+		.name = "Playback_1",
+		.stream_name = "Playback_1",
+		.cpu_dai_name = "DL1",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_playback = 1,
+	},
+	{
+		.name = "Playback_2",
+		.stream_name = "Playback_2",
+		.cpu_dai_name = "DL2",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_playback = 1,
+	},
+	{
+		.name = "Playback_3",
+		.stream_name = "Playback_3",
+		.cpu_dai_name = "DL3",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_playback = 1,
+	},
+	{
+		.name = "Capture_1",
+		.stream_name = "Capture_1",
+		.cpu_dai_name = "UL1",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "Capture_2",
+		.stream_name = "Capture_2",
+		.cpu_dai_name = "UL2",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "Capture_3",
+		.stream_name = "Capture_3",
+		.cpu_dai_name = "UL3",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "Capture_Mono_1",
+		.stream_name = "Capture_Mono_1",
+		.cpu_dai_name = "UL_MONO_1",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "Playback_HDMI",
+		.stream_name = "Playback_HDMI",
+		.cpu_dai_name = "HDMI",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_playback = 1,
+	},
+	/* BE */
+	{
+		.name = "Primary Codec",
+		.cpu_dai_name = "ADDA",
+		.codec_dai_name = "mt6358-snd-codec-aif1",
+		.codec_name = "mt6358-sound",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = "PCM 1",
+		.cpu_dai_name = "PCM 1",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = "PCM 2",
+		.cpu_dai_name = "PCM 2",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = "I2S0",
+		.cpu_dai_name = "I2S0",
+		.codec_dai_name = "bt-sco-pcm",
+		.codec_name = "bt-sco",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.ignore_suspend = 1,
+		.be_hw_params_fixup = mt8183_i2s_hw_params_fixup,
+		.ops = &mt8183_mt6358_i2s_ops,
+	},
+	{
+		.name = "I2S1",
+		.cpu_dai_name = "I2S1",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.ignore_suspend = 1,
+		.be_hw_params_fixup = mt8183_i2s_hw_params_fixup,
+		.ops = &mt8183_mt6358_i2s_ops,
+	},
+	{
+		.name = "I2S2",
+		.cpu_dai_name = "I2S2",
+		.codec_dai_name = "da7219-hifi",
+		.codec_name = "da7219.5-001a",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.ignore_suspend = 1,
+		.be_hw_params_fixup = mt8183_i2s_hw_params_fixup,
+		.ops = &mt8183_da7219_i2s_ops,
+	},
+	{
+		.name = "I2S3",
+		.cpu_dai_name = "I2S3",
+		.codecs = mt8183_da7219_max98357_external_codecs,
+		.num_codecs =
+			ARRAY_SIZE(mt8183_da7219_max98357_external_codecs),
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.ignore_suspend = 1,
+		.be_hw_params_fixup = mt8183_i2s_hw_params_fixup,
+		.ops = &mt8183_da7219_i2s_ops,
+	},
+	{
+		.name = "I2S5",
+		.cpu_dai_name = "I2S5",
+		.codec_dai_name = "bt-sco-pcm",
+		.codec_name = "bt-sco",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.ignore_suspend = 1,
+		.be_hw_params_fixup = mt8183_i2s_hw_params_fixup,
+		.ops = &mt8183_mt6358_i2s_ops,
+	},
+	{
+		.name = "TDM",
+		.cpu_dai_name = "TDM",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.ignore_suspend = 1,
+	},
+};
+
+static int
+mt8183_da7219_max98357_headset_init(struct snd_soc_component *component);
+
+static struct snd_soc_aux_dev mt8183_da7219_max98357_headset_dev = {
+	.name = "Headset Chip",
+	.init = mt8183_da7219_max98357_headset_init,
+};
+
+static struct snd_soc_codec_conf mt6358_codec_conf[] = {
+	{
+		.dev_name = "mt6358-sound",
+		.name_prefix = "Mt6358",
+	},
+};
+
+static struct snd_soc_card mt8183_da7219_max98357_card = {
+	.name = "mt8183_da7219_max98357",
+	.owner = THIS_MODULE,
+	.dai_link = mt8183_da7219_max98357_dai_links,
+	.num_links = ARRAY_SIZE(mt8183_da7219_max98357_dai_links),
+	.aux_dev = &mt8183_da7219_max98357_headset_dev,
+	.num_aux_devs = 1,
+	.codec_conf = mt6358_codec_conf,
+	.num_configs = ARRAY_SIZE(mt6358_codec_conf),
+};
+
+static int
+mt8183_da7219_max98357_headset_init(struct snd_soc_component *component)
+{
+	int ret;
+
+	/* Enable Headset and 4 Buttons Jack detection */
+	ret = snd_soc_card_jack_new(&mt8183_da7219_max98357_card,
+				    "Headset Jack",
+				    SND_JACK_HEADSET |
+				    SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+				    SND_JACK_BTN_2 | SND_JACK_BTN_3,
+				    &headset_jack,
+				    headset_jack_pins,
+				    ARRAY_SIZE(headset_jack_pins));
+	if (ret)
+		return ret;
+
+	da7219_aad_jack_det(component, &headset_jack);
+
+	return ret;
+}
+
+static int mt8183_da7219_max98357_dev_probe(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = &mt8183_da7219_max98357_card;
+	struct device_node *platform_node;
+	struct snd_soc_dai_link *dai_link;
+	struct pinctrl *default_pins;
+	int ret, i;
+
+	card->dev = &pdev->dev;
+
+	platform_node = of_parse_phandle(pdev->dev.of_node,
+					 "mediatek,platform", 0);
+	if (!platform_node) {
+		dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
+		return -EINVAL;
+	}
+
+	for_each_card_prelinks(card, i, dai_link) {
+		if (dai_link->platform_name)
+			continue;
+		dai_link->platform_of_node = platform_node;
+	}
+
+	mt8183_da7219_max98357_headset_dev.codec_of_node =
+		of_parse_phandle(pdev->dev.of_node,
+				 "mediatek,headset-codec", 0);
+	if (!mt8183_da7219_max98357_headset_dev.codec_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'mediatek,headset-codec' missing/invalid\n");
+		return -EINVAL;
+	}
+
+	ret = devm_snd_soc_register_card(&pdev->dev, card);
+	if (ret) {
+		dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	default_pins =
+		devm_pinctrl_get_select(&pdev->dev, PINCTRL_STATE_DEFAULT);
+	if (IS_ERR(default_pins)) {
+		dev_err(&pdev->dev, "%s set pins failed\n",
+			__func__);
+		return PTR_ERR(default_pins);
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mt8183_da7219_max98357_dt_match[] = {
+	{.compatible = "mediatek,mt8183_da7219_max98357",},
+	{}
+};
+#endif
+
+static struct platform_driver mt8183_da7219_max98357_driver = {
+	.driver = {
+		.name = "mt8183_da7219_max98357",
+#ifdef CONFIG_OF
+		.of_match_table = mt8183_da7219_max98357_dt_match,
+#endif
+	},
+	.probe = mt8183_da7219_max98357_dev_probe,
+};
+
+module_platform_driver(mt8183_da7219_max98357_driver);
+
+/* Module information */
+MODULE_DESCRIPTION("MT8183-DA7219-MAX98357 ALSA SoC machine driver");
+MODULE_AUTHOR("Shunli Wang <shunli.wang@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("mt8183_da7219_max98357 soc card");
+
diff --git a/sound/soc/mediatek/mt8183/mt8183-dai-adda.c b/sound/soc/mediatek/mt8183/mt8183-dai-adda.c
new file mode 100644
index 0000000..017d7d1
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-dai-adda.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MediaTek ALSA SoC Audio DAI ADDA Control
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include "mt8183-afe-common.h"
+#include "mt8183-interconnection.h"
+#include "mt8183-reg.h"
+
+enum {
+	AUDIO_SDM_LEVEL_MUTE = 0,
+	AUDIO_SDM_LEVEL_NORMAL = 0x1d,
+	/* if you change level normal */
+	/* you need to change formula of hp impedance and dc trim too */
+};
+
+enum {
+	DELAY_DATA_MISO1 = 0,
+	DELAY_DATA_MISO2,
+};
+
+enum {
+	MTK_AFE_ADDA_DL_RATE_8K = 0,
+	MTK_AFE_ADDA_DL_RATE_11K = 1,
+	MTK_AFE_ADDA_DL_RATE_12K = 2,
+	MTK_AFE_ADDA_DL_RATE_16K = 3,
+	MTK_AFE_ADDA_DL_RATE_22K = 4,
+	MTK_AFE_ADDA_DL_RATE_24K = 5,
+	MTK_AFE_ADDA_DL_RATE_32K = 6,
+	MTK_AFE_ADDA_DL_RATE_44K = 7,
+	MTK_AFE_ADDA_DL_RATE_48K = 8,
+	MTK_AFE_ADDA_DL_RATE_96K = 9,
+	MTK_AFE_ADDA_DL_RATE_192K = 10,
+};
+
+enum {
+	MTK_AFE_ADDA_UL_RATE_8K = 0,
+	MTK_AFE_ADDA_UL_RATE_16K = 1,
+	MTK_AFE_ADDA_UL_RATE_32K = 2,
+	MTK_AFE_ADDA_UL_RATE_48K = 3,
+	MTK_AFE_ADDA_UL_RATE_96K = 4,
+	MTK_AFE_ADDA_UL_RATE_192K = 5,
+	MTK_AFE_ADDA_UL_RATE_48K_HD = 6,
+};
+
+static unsigned int adda_dl_rate_transform(struct mtk_base_afe *afe,
+					   unsigned int rate)
+{
+	switch (rate) {
+	case 8000:
+		return MTK_AFE_ADDA_DL_RATE_8K;
+	case 11025:
+		return MTK_AFE_ADDA_DL_RATE_11K;
+	case 12000:
+		return MTK_AFE_ADDA_DL_RATE_12K;
+	case 16000:
+		return MTK_AFE_ADDA_DL_RATE_16K;
+	case 22050:
+		return MTK_AFE_ADDA_DL_RATE_22K;
+	case 24000:
+		return MTK_AFE_ADDA_DL_RATE_24K;
+	case 32000:
+		return MTK_AFE_ADDA_DL_RATE_32K;
+	case 44100:
+		return MTK_AFE_ADDA_DL_RATE_44K;
+	case 48000:
+		return MTK_AFE_ADDA_DL_RATE_48K;
+	case 96000:
+		return MTK_AFE_ADDA_DL_RATE_96K;
+	case 192000:
+		return MTK_AFE_ADDA_DL_RATE_192K;
+	default:
+		dev_warn(afe->dev, "%s(), rate %d invalid, use 48kHz!!!\n",
+			 __func__, rate);
+		return MTK_AFE_ADDA_DL_RATE_48K;
+	}
+}
+
+static unsigned int adda_ul_rate_transform(struct mtk_base_afe *afe,
+					   unsigned int rate)
+{
+	switch (rate) {
+	case 8000:
+		return MTK_AFE_ADDA_UL_RATE_8K;
+	case 16000:
+		return MTK_AFE_ADDA_UL_RATE_16K;
+	case 32000:
+		return MTK_AFE_ADDA_UL_RATE_32K;
+	case 48000:
+		return MTK_AFE_ADDA_UL_RATE_48K;
+	case 96000:
+		return MTK_AFE_ADDA_UL_RATE_96K;
+	case 192000:
+		return MTK_AFE_ADDA_UL_RATE_192K;
+	default:
+		dev_warn(afe->dev, "%s(), rate %d invalid, use 48kHz!!!\n",
+			 __func__, rate);
+		return MTK_AFE_ADDA_UL_RATE_48K;
+	}
+}
+
+/* dai component */
+static const struct snd_kcontrol_new mtk_adda_dl_ch1_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN3, I_DL1_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN3, I_DL2_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1", AFE_CONN3, I_DL3_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN3,
+				    I_ADDA_UL_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN3,
+				    I_ADDA_UL_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN3,
+				    I_PCM_1_CAP_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH1", AFE_CONN3,
+				    I_PCM_2_CAP_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_adda_dl_ch2_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN4, I_DL1_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2", AFE_CONN4, I_DL1_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN4, I_DL2_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN4, I_DL2_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1", AFE_CONN4, I_DL3_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2", AFE_CONN4, I_DL3_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN4,
+				    I_ADDA_UL_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN4,
+				    I_ADDA_UL_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN4,
+				    I_PCM_1_CAP_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH1", AFE_CONN4,
+				    I_PCM_2_CAP_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH2", AFE_CONN4,
+				    I_PCM_1_CAP_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH2", AFE_CONN4,
+				    I_PCM_2_CAP_CH2, 1, 0),
+};
+
+static int mtk_adda_ul_event(struct snd_soc_dapm_widget *w,
+			     struct snd_kcontrol *kcontrol,
+			     int event)
+{
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+
+	dev_dbg(afe->dev, "%s(), name %s, event 0x%x\n",
+		__func__, w->name, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/* update setting to dmic */
+		if (afe_priv->mtkaif_dmic) {
+			/* mtkaif_rxif_data_mode = 1, dmic */
+			regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIF_RX_CFG0,
+					   0x1, 0x1);
+
+			/* dmic mode, 3.25M*/
+			regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIF_RX_CFG0,
+					   0x0, 0xf << 20);
+			regmap_update_bits(afe->regmap, AFE_ADDA_UL_SRC_CON0,
+					   0x0, 0x1 << 5);
+			regmap_update_bits(afe->regmap, AFE_ADDA_UL_SRC_CON0,
+					   0x0, 0x3 << 14);
+
+			/* turn on dmic, ch1, ch2 */
+			regmap_update_bits(afe->regmap, AFE_ADDA_UL_SRC_CON0,
+					   0x1 << 1, 0x1 << 1);
+			regmap_update_bits(afe->regmap, AFE_ADDA_UL_SRC_CON0,
+					   0x3 << 21, 0x3 << 21);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		/* should delayed 1/fs(smallest is 8k) = 125us before afe off */
+		usleep_range(125, 135);
+
+		/* reset dmic */
+		afe_priv->mtkaif_dmic = 0;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* mtkaif dmic */
+static const char * const mt8183_adda_off_on_str[] = {
+	"Off", "On"
+};
+
+static const struct soc_enum mt8183_adda_enum[] = {
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(mt8183_adda_off_on_str),
+			    mt8183_adda_off_on_str),
+};
+
+static int mt8183_adda_dmic_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+
+	ucontrol->value.integer.value[0] = afe_priv->mtkaif_dmic;
+
+	return 0;
+}
+
+static int mt8183_adda_dmic_set(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+
+	if (ucontrol->value.enumerated.item[0] >= e->items)
+		return -EINVAL;
+
+	afe_priv->mtkaif_dmic = ucontrol->value.integer.value[0];
+
+	dev_info(afe->dev, "%s(), kcontrol name %s, mtkaif_dmic %d\n",
+		 __func__, kcontrol->id.name, afe_priv->mtkaif_dmic);
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new mtk_adda_controls[] = {
+	SOC_ENUM_EXT("MTKAIF_DMIC", mt8183_adda_enum[0],
+		     mt8183_adda_dmic_get, mt8183_adda_dmic_set),
+};
+
+enum {
+	SUPPLY_SEQ_ADDA_AFE_ON,
+	SUPPLY_SEQ_ADDA_DL_ON,
+	SUPPLY_SEQ_ADDA_UL_ON,
+};
+
+static const struct snd_soc_dapm_widget mtk_dai_adda_widgets[] = {
+	/* adda */
+	SND_SOC_DAPM_MIXER("ADDA_DL_CH1", SND_SOC_NOPM, 0, 0,
+			   mtk_adda_dl_ch1_mix,
+			   ARRAY_SIZE(mtk_adda_dl_ch1_mix)),
+	SND_SOC_DAPM_MIXER("ADDA_DL_CH2", SND_SOC_NOPM, 0, 0,
+			   mtk_adda_dl_ch2_mix,
+			   ARRAY_SIZE(mtk_adda_dl_ch2_mix)),
+
+	SND_SOC_DAPM_SUPPLY_S("ADDA Enable", SUPPLY_SEQ_ADDA_AFE_ON,
+			      AFE_ADDA_UL_DL_CON0, ADDA_AFE_ON_SFT, 0,
+			      NULL, 0),
+
+	SND_SOC_DAPM_SUPPLY_S("ADDA Playback Enable", SUPPLY_SEQ_ADDA_DL_ON,
+			      AFE_ADDA_DL_SRC2_CON0,
+			      DL_2_SRC_ON_TMP_CTL_PRE_SFT, 0,
+			      NULL, 0),
+
+	SND_SOC_DAPM_SUPPLY_S("ADDA Capture Enable", SUPPLY_SEQ_ADDA_UL_ON,
+			      AFE_ADDA_UL_SRC_CON0,
+			      UL_SRC_ON_TMP_CTL_SFT, 0,
+			      mtk_adda_ul_event,
+			      SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_CLOCK_SUPPLY("aud_dac_clk"),
+	SND_SOC_DAPM_CLOCK_SUPPLY("aud_dac_predis_clk"),
+	SND_SOC_DAPM_CLOCK_SUPPLY("aud_adc_clk"),
+	SND_SOC_DAPM_CLOCK_SUPPLY("mtkaif_26m_clk"),
+};
+
+static const struct snd_soc_dapm_route mtk_dai_adda_routes[] = {
+	/* playback */
+	{"ADDA_DL_CH1", "DL1_CH1", "DL1"},
+	{"ADDA_DL_CH2", "DL1_CH1", "DL1"},
+	{"ADDA_DL_CH2", "DL1_CH2", "DL1"},
+
+	{"ADDA_DL_CH1", "DL2_CH1", "DL2"},
+	{"ADDA_DL_CH2", "DL2_CH1", "DL2"},
+	{"ADDA_DL_CH2", "DL2_CH2", "DL2"},
+
+	{"ADDA_DL_CH1", "DL3_CH1", "DL3"},
+	{"ADDA_DL_CH2", "DL3_CH1", "DL3"},
+	{"ADDA_DL_CH2", "DL3_CH2", "DL3"},
+
+	{"ADDA Playback", NULL, "ADDA_DL_CH1"},
+	{"ADDA Playback", NULL, "ADDA_DL_CH2"},
+
+	/* adda enable */
+	{"ADDA Playback", NULL, "ADDA Enable"},
+	{"ADDA Playback", NULL, "ADDA Playback Enable"},
+	{"ADDA Capture", NULL, "ADDA Enable"},
+	{"ADDA Capture", NULL, "ADDA Capture Enable"},
+
+	/* clk */
+	{"ADDA Playback", NULL, "mtkaif_26m_clk"},
+	{"ADDA Playback", NULL, "aud_dac_clk"},
+	{"ADDA Playback", NULL, "aud_dac_predis_clk"},
+
+	{"ADDA Capture", NULL, "mtkaif_26m_clk"},
+	{"ADDA Capture", NULL, "aud_adc_clk"},
+};
+
+static int set_mtkaif_rx(struct mtk_base_afe *afe)
+{
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	int delay_data;
+	int delay_cycle;
+
+	switch (afe_priv->mtkaif_protocol) {
+	case MT8183_MTKAIF_PROTOCOL_2_CLK_P2:
+		regmap_write(afe->regmap, AFE_AUD_PAD_TOP, 0x38);
+		regmap_write(afe->regmap, AFE_AUD_PAD_TOP, 0x39);
+		/* mtkaif_rxif_clkinv_adc inverse for calibration */
+		regmap_write(afe->regmap, AFE_ADDA_MTKAIF_CFG0,
+			     0x80010000);
+
+		if (afe_priv->mtkaif_phase_cycle[0] >=
+		    afe_priv->mtkaif_phase_cycle[1]) {
+			delay_data = DELAY_DATA_MISO1;
+			delay_cycle = afe_priv->mtkaif_phase_cycle[0] -
+				      afe_priv->mtkaif_phase_cycle[1];
+		} else {
+			delay_data = DELAY_DATA_MISO2;
+			delay_cycle = afe_priv->mtkaif_phase_cycle[1] -
+				      afe_priv->mtkaif_phase_cycle[0];
+		}
+
+		regmap_update_bits(afe->regmap,
+				   AFE_ADDA_MTKAIF_RX_CFG2,
+				   MTKAIF_RXIF_DELAY_DATA_MASK_SFT,
+				   delay_data << MTKAIF_RXIF_DELAY_DATA_SFT);
+
+		regmap_update_bits(afe->regmap,
+				   AFE_ADDA_MTKAIF_RX_CFG2,
+				   MTKAIF_RXIF_DELAY_CYCLE_MASK_SFT,
+				   delay_cycle << MTKAIF_RXIF_DELAY_CYCLE_SFT);
+		break;
+	case MT8183_MTKAIF_PROTOCOL_2:
+		regmap_write(afe->regmap, AFE_AUD_PAD_TOP, 0x31);
+		regmap_write(afe->regmap, AFE_ADDA_MTKAIF_CFG0,
+			     0x00010000);
+		break;
+	case MT8183_MTKAIF_PROTOCOL_1:
+		regmap_write(afe->regmap, AFE_AUD_PAD_TOP, 0x31);
+		regmap_write(afe->regmap, AFE_ADDA_MTKAIF_CFG0, 0x0);
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* dai ops */
+static int mtk_dai_adda_hw_params(struct snd_pcm_substream *substream,
+				  struct snd_pcm_hw_params *params,
+				  struct snd_soc_dai *dai)
+{
+	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+	unsigned int rate = params_rate(params);
+
+	dev_dbg(afe->dev, "%s(), id %d, stream %d, rate %d\n",
+		__func__, dai->id, substream->stream, rate);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		unsigned int dl_src2_con0 = 0;
+		unsigned int dl_src2_con1 = 0;
+
+		/* clean predistortion */
+		regmap_write(afe->regmap, AFE_ADDA_PREDIS_CON0, 0);
+		regmap_write(afe->regmap, AFE_ADDA_PREDIS_CON1, 0);
+
+		/* set sampling rate */
+		dl_src2_con0 = adda_dl_rate_transform(afe, rate) << 28;
+
+		/* set output mode */
+		switch (rate) {
+		case 192000:
+			dl_src2_con0 |= (0x1 << 24); /* UP_SAMPLING_RATE_X2 */
+			dl_src2_con0 |= 1 << 14;
+			break;
+		case 96000:
+			dl_src2_con0 |= (0x2 << 24); /* UP_SAMPLING_RATE_X4 */
+			dl_src2_con0 |= 1 << 14;
+			break;
+		default:
+			dl_src2_con0 |= (0x3 << 24); /* UP_SAMPLING_RATE_X8 */
+			break;
+		}
+
+		/* turn off mute function */
+		dl_src2_con0 |= (0x03 << 11);
+
+		/* set voice input data if input sample rate is 8k or 16k */
+		if (rate == 8000 || rate == 16000)
+			dl_src2_con0 |= 0x01 << 5;
+
+		/* SA suggest apply -0.3db to audio/speech path */
+		dl_src2_con1 = 0xf74f0000;
+
+		/* turn on down-link gain */
+		dl_src2_con0 = dl_src2_con0 | (0x01 << 1);
+
+		regmap_write(afe->regmap, AFE_ADDA_DL_SRC2_CON0, dl_src2_con0);
+		regmap_write(afe->regmap, AFE_ADDA_DL_SRC2_CON1, dl_src2_con1);
+
+		/* set sdm gain */
+		regmap_update_bits(afe->regmap,
+				   AFE_ADDA_DL_SDM_DCCOMP_CON,
+				   ATTGAIN_CTL_MASK_SFT,
+				   AUDIO_SDM_LEVEL_NORMAL << ATTGAIN_CTL_SFT);
+	} else {
+		unsigned int voice_mode = 0;
+		unsigned int ul_src_con0 = 0;	/* default value */
+
+		/* set mtkaif protocol */
+		set_mtkaif_rx(afe);
+
+		/* Using Internal ADC */
+		regmap_update_bits(afe->regmap,
+				   AFE_ADDA_TOP_CON0,
+				   0x1 << 0,
+				   0x0 << 0);
+
+		voice_mode = adda_ul_rate_transform(afe, rate);
+
+		ul_src_con0 |= (voice_mode << 17) & (0x7 << 17);
+
+		regmap_write(afe->regmap, AFE_ADDA_UL_SRC_CON0, ul_src_con0);
+
+		/* mtkaif_rxif_data_mode = 0, amic */
+		regmap_update_bits(afe->regmap,
+				   AFE_ADDA_MTKAIF_RX_CFG0,
+				   0x1 << 0,
+				   0x0 << 0);
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_dai_ops mtk_dai_adda_ops = {
+	.hw_params = mtk_dai_adda_hw_params,
+};
+
+/* dai driver */
+#define MTK_ADDA_PLAYBACK_RATES (SNDRV_PCM_RATE_8000_48000 |\
+				 SNDRV_PCM_RATE_96000 |\
+				 SNDRV_PCM_RATE_192000)
+
+#define MTK_ADDA_CAPTURE_RATES (SNDRV_PCM_RATE_8000 |\
+				SNDRV_PCM_RATE_16000 |\
+				SNDRV_PCM_RATE_32000 |\
+				SNDRV_PCM_RATE_48000)
+
+#define MTK_ADDA_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+			  SNDRV_PCM_FMTBIT_S24_LE |\
+			  SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mtk_dai_adda_driver[] = {
+	{
+		.name = "ADDA",
+		.id = MT8183_DAI_ADDA,
+		.playback = {
+			.stream_name = "ADDA Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_ADDA_PLAYBACK_RATES,
+			.formats = MTK_ADDA_FORMATS,
+		},
+		.capture = {
+			.stream_name = "ADDA Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_ADDA_CAPTURE_RATES,
+			.formats = MTK_ADDA_FORMATS,
+		},
+		.ops = &mtk_dai_adda_ops,
+	},
+};
+
+int mt8183_dai_adda_register(struct mtk_base_afe *afe)
+{
+	struct mtk_base_afe_dai *dai;
+
+	dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+	if (!dai)
+		return -ENOMEM;
+
+	list_add(&dai->list, &afe->sub_dais);
+
+	dai->dai_drivers = mtk_dai_adda_driver;
+	dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_adda_driver);
+
+	dai->controls = mtk_adda_controls;
+	dai->num_controls = ARRAY_SIZE(mtk_adda_controls);
+	dai->dapm_widgets = mtk_dai_adda_widgets;
+	dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_adda_widgets);
+	dai->dapm_routes = mtk_dai_adda_routes;
+	dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_adda_routes);
+	return 0;
+}
diff --git a/sound/soc/mediatek/mt8183/mt8183-dai-hostless.c b/sound/soc/mediatek/mt8183/mt8183-dai-hostless.c
new file mode 100644
index 0000000..1667ad3
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-dai-hostless.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MediaTek ALSA SoC Audio DAI Hostless Control
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+
+#include "mt8183-afe-common.h"
+
+/* dai component */
+static const struct snd_soc_dapm_route mtk_dai_hostless_routes[] = {
+	/* Hostless ADDA Loopback */
+	{"ADDA_DL_CH1", "ADDA_UL_CH1", "Hostless LPBK DL"},
+	{"ADDA_DL_CH1", "ADDA_UL_CH2", "Hostless LPBK DL"},
+	{"ADDA_DL_CH2", "ADDA_UL_CH1", "Hostless LPBK DL"},
+	{"ADDA_DL_CH2", "ADDA_UL_CH2", "Hostless LPBK DL"},
+	{"Hostless LPBK UL", NULL, "ADDA Capture"},
+
+	/* Hostless Speech */
+	{"ADDA_DL_CH1", "PCM_1_CAP_CH1", "Hostless Speech DL"},
+	{"ADDA_DL_CH2", "PCM_1_CAP_CH1", "Hostless Speech DL"},
+	{"ADDA_DL_CH2", "PCM_1_CAP_CH2", "Hostless Speech DL"},
+	{"ADDA_DL_CH1", "PCM_2_CAP_CH1", "Hostless Speech DL"},
+	{"ADDA_DL_CH2", "PCM_2_CAP_CH1", "Hostless Speech DL"},
+	{"ADDA_DL_CH2", "PCM_2_CAP_CH2", "Hostless Speech DL"},
+	{"PCM_1_PB_CH1", "ADDA_UL_CH1", "Hostless Speech DL"},
+	{"PCM_1_PB_CH2", "ADDA_UL_CH2", "Hostless Speech DL"},
+	{"PCM_2_PB_CH1", "ADDA_UL_CH1", "Hostless Speech DL"},
+	{"PCM_2_PB_CH2", "ADDA_UL_CH2", "Hostless Speech DL"},
+
+	{"Hostless Speech UL", NULL, "PCM 1 Capture"},
+	{"Hostless Speech UL", NULL, "PCM 2 Capture"},
+	{"Hostless Speech UL", NULL, "ADDA Capture"},
+};
+
+/* dai ops */
+static int mtk_dai_hostless_startup(struct snd_pcm_substream *substream,
+				    struct snd_soc_dai *dai)
+{
+	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+	return snd_soc_set_runtime_hwparams(substream, afe->mtk_afe_hardware);
+}
+
+static const struct snd_soc_dai_ops mtk_dai_hostless_ops = {
+	.startup = mtk_dai_hostless_startup,
+};
+
+/* dai driver */
+#define MTK_HOSTLESS_RATES (SNDRV_PCM_RATE_8000_48000 |\
+			   SNDRV_PCM_RATE_88200 |\
+			   SNDRV_PCM_RATE_96000 |\
+			   SNDRV_PCM_RATE_176400 |\
+			   SNDRV_PCM_RATE_192000)
+
+#define MTK_HOSTLESS_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+			     SNDRV_PCM_FMTBIT_S24_LE |\
+			     SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mtk_dai_hostless_driver[] = {
+	{
+		.name = "Hostless LPBK DAI",
+		.id = MT8183_DAI_HOSTLESS_LPBK,
+		.playback = {
+			.stream_name = "Hostless LPBK DL",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_HOSTLESS_RATES,
+			.formats = MTK_HOSTLESS_FORMATS,
+		},
+		.capture = {
+			.stream_name = "Hostless LPBK UL",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_HOSTLESS_RATES,
+			.formats = MTK_HOSTLESS_FORMATS,
+		},
+		.ops = &mtk_dai_hostless_ops,
+	},
+	{
+		.name = "Hostless Speech DAI",
+		.id = MT8183_DAI_HOSTLESS_SPEECH,
+		.playback = {
+			.stream_name = "Hostless Speech DL",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_HOSTLESS_RATES,
+			.formats = MTK_HOSTLESS_FORMATS,
+		},
+		.capture = {
+			.stream_name = "Hostless Speech UL",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_HOSTLESS_RATES,
+			.formats = MTK_HOSTLESS_FORMATS,
+		},
+		.ops = &mtk_dai_hostless_ops,
+	},
+};
+
+int mt8183_dai_hostless_register(struct mtk_base_afe *afe)
+{
+	struct mtk_base_afe_dai *dai;
+
+	dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+	if (!dai)
+		return -ENOMEM;
+
+	list_add(&dai->list, &afe->sub_dais);
+
+	dai->dai_drivers = mtk_dai_hostless_driver;
+	dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_hostless_driver);
+
+	dai->dapm_routes = mtk_dai_hostless_routes;
+	dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_hostless_routes);
+
+	return 0;
+}
diff --git a/sound/soc/mediatek/mt8183/mt8183-dai-i2s.c b/sound/soc/mediatek/mt8183/mt8183-dai-i2s.c
new file mode 100644
index 0000000..777e93d
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-dai-i2s.c
@@ -0,0 +1,1040 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MediaTek ALSA SoC Audio DAI I2S Control
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <sound/pcm_params.h>
+#include "mt8183-afe-clk.h"
+#include "mt8183-afe-common.h"
+#include "mt8183-interconnection.h"
+#include "mt8183-reg.h"
+
+enum {
+	I2S_FMT_EIAJ = 0,
+	I2S_FMT_I2S = 1,
+};
+
+enum {
+	I2S_WLEN_16_BIT = 0,
+	I2S_WLEN_32_BIT = 1,
+};
+
+enum {
+	I2S_HD_NORMAL = 0,
+	I2S_HD_LOW_JITTER = 1,
+};
+
+enum {
+	I2S1_SEL_O28_O29 = 0,
+	I2S1_SEL_O03_O04 = 1,
+};
+
+enum {
+	I2S_IN_PAD_CONNSYS = 0,
+	I2S_IN_PAD_IO_MUX = 1,
+};
+
+struct mtk_afe_i2s_priv {
+	int id;
+	int rate; /* for determine which apll to use */
+	int low_jitter_en;
+
+	const char *share_property_name;
+	int share_i2s_id;
+
+	int mclk_id;
+	int mclk_rate;
+	int mclk_apll;
+};
+
+static unsigned int get_i2s_wlen(snd_pcm_format_t format)
+{
+	return snd_pcm_format_physical_width(format) <= 16 ?
+	       I2S_WLEN_16_BIT : I2S_WLEN_32_BIT;
+}
+
+#define MTK_AFE_I2S0_KCONTROL_NAME "I2S0_HD_Mux"
+#define MTK_AFE_I2S1_KCONTROL_NAME "I2S1_HD_Mux"
+#define MTK_AFE_I2S2_KCONTROL_NAME "I2S2_HD_Mux"
+#define MTK_AFE_I2S3_KCONTROL_NAME "I2S3_HD_Mux"
+#define MTK_AFE_I2S5_KCONTROL_NAME "I2S5_HD_Mux"
+
+#define I2S0_HD_EN_W_NAME "I2S0_HD_EN"
+#define I2S1_HD_EN_W_NAME "I2S1_HD_EN"
+#define I2S2_HD_EN_W_NAME "I2S2_HD_EN"
+#define I2S3_HD_EN_W_NAME "I2S3_HD_EN"
+#define I2S5_HD_EN_W_NAME "I2S5_HD_EN"
+
+#define I2S0_MCLK_EN_W_NAME "I2S0_MCLK_EN"
+#define I2S1_MCLK_EN_W_NAME "I2S1_MCLK_EN"
+#define I2S2_MCLK_EN_W_NAME "I2S2_MCLK_EN"
+#define I2S3_MCLK_EN_W_NAME "I2S3_MCLK_EN"
+#define I2S5_MCLK_EN_W_NAME "I2S5_MCLK_EN"
+
+static int get_i2s_id_by_name(struct mtk_base_afe *afe,
+			      const char *name)
+{
+	if (strncmp(name, "I2S0", 4) == 0)
+		return MT8183_DAI_I2S_0;
+	else if (strncmp(name, "I2S1", 4) == 0)
+		return MT8183_DAI_I2S_1;
+	else if (strncmp(name, "I2S2", 4) == 0)
+		return MT8183_DAI_I2S_2;
+	else if (strncmp(name, "I2S3", 4) == 0)
+		return MT8183_DAI_I2S_3;
+	else if (strncmp(name, "I2S5", 4) == 0)
+		return MT8183_DAI_I2S_5;
+	else
+		return -EINVAL;
+}
+
+static struct mtk_afe_i2s_priv *get_i2s_priv_by_name(struct mtk_base_afe *afe,
+						     const char *name)
+{
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	int dai_id = get_i2s_id_by_name(afe, name);
+
+	if (dai_id < 0)
+		return NULL;
+
+	return afe_priv->dai_priv[dai_id];
+}
+
+/* low jitter control */
+static const char * const mt8183_i2s_hd_str[] = {
+	"Normal", "Low_Jitter"
+};
+
+static const struct soc_enum mt8183_i2s_enum[] = {
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(mt8183_i2s_hd_str),
+			    mt8183_i2s_hd_str),
+};
+
+static int mt8183_i2s_hd_get(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+	struct mtk_afe_i2s_priv *i2s_priv;
+
+	i2s_priv = get_i2s_priv_by_name(afe, kcontrol->id.name);
+
+	if (!i2s_priv) {
+		dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+		return -EINVAL;
+	}
+
+	ucontrol->value.integer.value[0] = i2s_priv->low_jitter_en;
+
+	return 0;
+}
+
+static int mt8183_i2s_hd_set(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+	struct mtk_afe_i2s_priv *i2s_priv;
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+	int hd_en;
+
+	if (ucontrol->value.enumerated.item[0] >= e->items)
+		return -EINVAL;
+
+	hd_en = ucontrol->value.integer.value[0];
+
+	dev_info(afe->dev, "%s(), kcontrol name %s, hd_en %d\n",
+		 __func__, kcontrol->id.name, hd_en);
+
+	i2s_priv = get_i2s_priv_by_name(afe, kcontrol->id.name);
+
+	if (!i2s_priv) {
+		dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+		return -EINVAL;
+	}
+
+	i2s_priv->low_jitter_en = hd_en;
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new mtk_dai_i2s_controls[] = {
+	SOC_ENUM_EXT(MTK_AFE_I2S0_KCONTROL_NAME, mt8183_i2s_enum[0],
+		     mt8183_i2s_hd_get, mt8183_i2s_hd_set),
+	SOC_ENUM_EXT(MTK_AFE_I2S1_KCONTROL_NAME, mt8183_i2s_enum[0],
+		     mt8183_i2s_hd_get, mt8183_i2s_hd_set),
+	SOC_ENUM_EXT(MTK_AFE_I2S2_KCONTROL_NAME, mt8183_i2s_enum[0],
+		     mt8183_i2s_hd_get, mt8183_i2s_hd_set),
+	SOC_ENUM_EXT(MTK_AFE_I2S3_KCONTROL_NAME, mt8183_i2s_enum[0],
+		     mt8183_i2s_hd_get, mt8183_i2s_hd_set),
+	SOC_ENUM_EXT(MTK_AFE_I2S5_KCONTROL_NAME, mt8183_i2s_enum[0],
+		     mt8183_i2s_hd_get, mt8183_i2s_hd_set),
+};
+
+/* dai component */
+/* interconnection */
+static const struct snd_kcontrol_new mtk_i2s3_ch1_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN0, I_DL1_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN0, I_DL2_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1", AFE_CONN0, I_DL3_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN0,
+				    I_ADDA_UL_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN0,
+				    I_PCM_1_CAP_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH1", AFE_CONN0,
+				    I_PCM_2_CAP_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_i2s3_ch2_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2", AFE_CONN1, I_DL1_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN1, I_DL2_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2", AFE_CONN1, I_DL3_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN1,
+				    I_ADDA_UL_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN1,
+				    I_PCM_1_CAP_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH1", AFE_CONN1,
+				    I_PCM_2_CAP_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH2", AFE_CONN1,
+				    I_PCM_1_CAP_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH2", AFE_CONN1,
+				    I_PCM_2_CAP_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_i2s1_ch1_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN28, I_DL1_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN28, I_DL2_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1", AFE_CONN28, I_DL3_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN28,
+				    I_ADDA_UL_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN28,
+				    I_PCM_1_CAP_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH1", AFE_CONN28,
+				    I_PCM_2_CAP_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_i2s1_ch2_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2", AFE_CONN29, I_DL1_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN29, I_DL2_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2", AFE_CONN29, I_DL3_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN29,
+				    I_ADDA_UL_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN29,
+				    I_PCM_1_CAP_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH1", AFE_CONN29,
+				    I_PCM_2_CAP_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH2", AFE_CONN29,
+				    I_PCM_1_CAP_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH2", AFE_CONN29,
+				    I_PCM_2_CAP_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_i2s5_ch1_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN30, I_DL1_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN30, I_DL2_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1", AFE_CONN30, I_DL3_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN30,
+				    I_ADDA_UL_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN30,
+				    I_PCM_1_CAP_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH1", AFE_CONN30,
+				    I_PCM_2_CAP_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_i2s5_ch2_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2", AFE_CONN31, I_DL1_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN31, I_DL2_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2", AFE_CONN31, I_DL3_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN31,
+				    I_ADDA_UL_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN31,
+				    I_PCM_1_CAP_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH1", AFE_CONN31,
+				    I_PCM_2_CAP_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH2", AFE_CONN31,
+				    I_PCM_1_CAP_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH2", AFE_CONN31,
+				    I_PCM_2_CAP_CH2, 1, 0),
+};
+
+enum {
+	SUPPLY_SEQ_APLL,
+	SUPPLY_SEQ_I2S_MCLK_EN,
+	SUPPLY_SEQ_I2S_HD_EN,
+	SUPPLY_SEQ_I2S_EN,
+};
+
+static int mtk_apll_event(struct snd_soc_dapm_widget *w,
+			  struct snd_kcontrol *kcontrol,
+			  int event)
+{
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+
+	dev_info(cmpnt->dev, "%s(), name %s, event 0x%x\n",
+		 __func__, w->name, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (strcmp(w->name, APLL1_W_NAME) == 0)
+			mt8183_apll1_enable(afe);
+		else
+			mt8183_apll2_enable(afe);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		if (strcmp(w->name, APLL1_W_NAME) == 0)
+			mt8183_apll1_disable(afe);
+		else
+			mt8183_apll2_disable(afe);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int mtk_mclk_en_event(struct snd_soc_dapm_widget *w,
+			     struct snd_kcontrol *kcontrol,
+			     int event)
+{
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+	struct mtk_afe_i2s_priv *i2s_priv;
+
+	dev_info(cmpnt->dev, "%s(), name %s, event 0x%x\n",
+		 __func__, w->name, event);
+
+	i2s_priv = get_i2s_priv_by_name(afe, w->name);
+
+	if (!i2s_priv) {
+		dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+		return -EINVAL;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		mt8183_mck_enable(afe, i2s_priv->mclk_id, i2s_priv->mclk_rate);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		i2s_priv->mclk_rate = 0;
+		mt8183_mck_disable(afe, i2s_priv->mclk_id);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget mtk_dai_i2s_widgets[] = {
+	SND_SOC_DAPM_MIXER("I2S1_CH1", SND_SOC_NOPM, 0, 0,
+			   mtk_i2s1_ch1_mix,
+			   ARRAY_SIZE(mtk_i2s1_ch1_mix)),
+	SND_SOC_DAPM_MIXER("I2S1_CH2", SND_SOC_NOPM, 0, 0,
+			   mtk_i2s1_ch2_mix,
+			   ARRAY_SIZE(mtk_i2s1_ch2_mix)),
+
+	SND_SOC_DAPM_MIXER("I2S3_CH1", SND_SOC_NOPM, 0, 0,
+			   mtk_i2s3_ch1_mix,
+			   ARRAY_SIZE(mtk_i2s3_ch1_mix)),
+	SND_SOC_DAPM_MIXER("I2S3_CH2", SND_SOC_NOPM, 0, 0,
+			   mtk_i2s3_ch2_mix,
+			   ARRAY_SIZE(mtk_i2s3_ch2_mix)),
+
+	SND_SOC_DAPM_MIXER("I2S5_CH1", SND_SOC_NOPM, 0, 0,
+			   mtk_i2s5_ch1_mix,
+			   ARRAY_SIZE(mtk_i2s5_ch1_mix)),
+	SND_SOC_DAPM_MIXER("I2S5_CH2", SND_SOC_NOPM, 0, 0,
+			   mtk_i2s5_ch2_mix,
+			   ARRAY_SIZE(mtk_i2s5_ch2_mix)),
+
+	/* i2s en*/
+	SND_SOC_DAPM_SUPPLY_S("I2S0_EN", SUPPLY_SEQ_I2S_EN,
+			      AFE_I2S_CON, I2S_EN_SFT, 0,
+			      NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("I2S1_EN", SUPPLY_SEQ_I2S_EN,
+			      AFE_I2S_CON1, I2S_EN_SFT, 0,
+			      NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("I2S2_EN", SUPPLY_SEQ_I2S_EN,
+			      AFE_I2S_CON2, I2S_EN_SFT, 0,
+			      NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("I2S3_EN", SUPPLY_SEQ_I2S_EN,
+			      AFE_I2S_CON3, I2S_EN_SFT, 0,
+			      NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("I2S5_EN", SUPPLY_SEQ_I2S_EN,
+			      AFE_I2S_CON4, I2S5_EN_SFT, 0,
+			      NULL, 0),
+	/* i2s hd en */
+	SND_SOC_DAPM_SUPPLY_S(I2S0_HD_EN_W_NAME, SUPPLY_SEQ_I2S_HD_EN,
+			      AFE_I2S_CON, I2S1_HD_EN_SFT, 0,
+			      NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S(I2S1_HD_EN_W_NAME, SUPPLY_SEQ_I2S_HD_EN,
+			      AFE_I2S_CON1, I2S2_HD_EN_SFT, 0,
+			      NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S(I2S2_HD_EN_W_NAME, SUPPLY_SEQ_I2S_HD_EN,
+			      AFE_I2S_CON2, I2S3_HD_EN_SFT, 0,
+			      NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S(I2S3_HD_EN_W_NAME, SUPPLY_SEQ_I2S_HD_EN,
+			      AFE_I2S_CON3, I2S4_HD_EN_SFT, 0,
+			      NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S(I2S5_HD_EN_W_NAME, SUPPLY_SEQ_I2S_HD_EN,
+			      AFE_I2S_CON4, I2S5_HD_EN_SFT, 0,
+			      NULL, 0),
+
+	/* i2s mclk en */
+	SND_SOC_DAPM_SUPPLY_S(I2S0_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN,
+			      SND_SOC_NOPM, 0, 0,
+			      mtk_mclk_en_event,
+			      SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY_S(I2S1_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN,
+			      SND_SOC_NOPM, 0, 0,
+			      mtk_mclk_en_event,
+			      SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY_S(I2S2_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN,
+			      SND_SOC_NOPM, 0, 0,
+			      mtk_mclk_en_event,
+			      SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY_S(I2S3_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN,
+			      SND_SOC_NOPM, 0, 0,
+			      mtk_mclk_en_event,
+			      SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY_S(I2S5_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN,
+			      SND_SOC_NOPM, 0, 0,
+			      mtk_mclk_en_event,
+			      SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	/* apll */
+	SND_SOC_DAPM_SUPPLY_S(APLL1_W_NAME, SUPPLY_SEQ_APLL,
+			      SND_SOC_NOPM, 0, 0,
+			      mtk_apll_event,
+			      SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY_S(APLL2_W_NAME, SUPPLY_SEQ_APLL,
+			      SND_SOC_NOPM, 0, 0,
+			      mtk_apll_event,
+			      SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static int mtk_afe_i2s_share_connect(struct snd_soc_dapm_widget *source,
+				     struct snd_soc_dapm_widget *sink)
+{
+	struct snd_soc_dapm_widget *w = sink;
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+	struct mtk_afe_i2s_priv *i2s_priv;
+
+	i2s_priv = get_i2s_priv_by_name(afe, sink->name);
+
+	if (!i2s_priv) {
+		dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+		return 0;
+	}
+
+	if (i2s_priv->share_i2s_id < 0)
+		return 0;
+
+	return i2s_priv->share_i2s_id == get_i2s_id_by_name(afe, source->name);
+}
+
+static int mtk_afe_i2s_hd_connect(struct snd_soc_dapm_widget *source,
+				  struct snd_soc_dapm_widget *sink)
+{
+	struct snd_soc_dapm_widget *w = sink;
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+	struct mtk_afe_i2s_priv *i2s_priv;
+
+	i2s_priv = get_i2s_priv_by_name(afe, sink->name);
+
+	if (!i2s_priv) {
+		dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+		return 0;
+	}
+
+	if (get_i2s_id_by_name(afe, sink->name) ==
+	    get_i2s_id_by_name(afe, source->name))
+		return i2s_priv->low_jitter_en;
+
+	/* check if share i2s need hd en */
+	if (i2s_priv->share_i2s_id < 0)
+		return 0;
+
+	if (i2s_priv->share_i2s_id == get_i2s_id_by_name(afe, source->name))
+		return i2s_priv->low_jitter_en;
+
+	return 0;
+}
+
+static int mtk_afe_i2s_apll_connect(struct snd_soc_dapm_widget *source,
+				    struct snd_soc_dapm_widget *sink)
+{
+	struct snd_soc_dapm_widget *w = sink;
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+	struct mtk_afe_i2s_priv *i2s_priv;
+	int cur_apll;
+	int i2s_need_apll;
+
+	i2s_priv = get_i2s_priv_by_name(afe, w->name);
+
+	if (!i2s_priv) {
+		dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+		return 0;
+	}
+
+	/* which apll */
+	cur_apll = mt8183_get_apll_by_name(afe, source->name);
+
+	/* choose APLL from i2s rate */
+	i2s_need_apll = mt8183_get_apll_by_rate(afe, i2s_priv->rate);
+
+	return (i2s_need_apll == cur_apll) ? 1 : 0;
+}
+
+static int mtk_afe_i2s_mclk_connect(struct snd_soc_dapm_widget *source,
+				    struct snd_soc_dapm_widget *sink)
+{
+	struct snd_soc_dapm_widget *w = sink;
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+	struct mtk_afe_i2s_priv *i2s_priv;
+
+	i2s_priv = get_i2s_priv_by_name(afe, sink->name);
+
+	if (!i2s_priv) {
+		dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+		return 0;
+	}
+
+	if (get_i2s_id_by_name(afe, sink->name) ==
+	    get_i2s_id_by_name(afe, source->name))
+		return (i2s_priv->mclk_rate > 0) ? 1 : 0;
+
+	/* check if share i2s need mclk */
+	if (i2s_priv->share_i2s_id < 0)
+		return 0;
+
+	if (i2s_priv->share_i2s_id == get_i2s_id_by_name(afe, source->name))
+		return (i2s_priv->mclk_rate > 0) ? 1 : 0;
+
+	return 0;
+}
+
+static int mtk_afe_mclk_apll_connect(struct snd_soc_dapm_widget *source,
+				     struct snd_soc_dapm_widget *sink)
+{
+	struct snd_soc_dapm_widget *w = sink;
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+	struct mtk_afe_i2s_priv *i2s_priv;
+	int cur_apll;
+
+	i2s_priv = get_i2s_priv_by_name(afe, w->name);
+
+	if (!i2s_priv) {
+		dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+		return 0;
+	}
+
+	/* which apll */
+	cur_apll = mt8183_get_apll_by_name(afe, source->name);
+
+	return (i2s_priv->mclk_apll == cur_apll) ? 1 : 0;
+}
+
+static const struct snd_soc_dapm_route mtk_dai_i2s_routes[] = {
+	/* i2s0 */
+	{"I2S0", NULL, "I2S0_EN"},
+	{"I2S0", NULL, "I2S1_EN", mtk_afe_i2s_share_connect},
+	{"I2S0", NULL, "I2S2_EN", mtk_afe_i2s_share_connect},
+	{"I2S0", NULL, "I2S3_EN", mtk_afe_i2s_share_connect},
+	{"I2S0", NULL, "I2S5_EN", mtk_afe_i2s_share_connect},
+
+	{"I2S0", NULL, I2S0_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S0", NULL, I2S1_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S0", NULL, I2S2_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S0", NULL, I2S3_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S0", NULL, I2S5_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{I2S0_HD_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect},
+	{I2S0_HD_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect},
+
+	{"I2S0", NULL, I2S0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S0", NULL, I2S1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S0", NULL, I2S2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S0", NULL, I2S3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S0", NULL, I2S5_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{I2S0_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect},
+	{I2S0_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect},
+
+	/* i2s1 */
+	{"I2S1_CH1", "DL1_CH1", "DL1"},
+	{"I2S1_CH2", "DL1_CH2", "DL1"},
+
+	{"I2S1_CH1", "DL2_CH1", "DL2"},
+	{"I2S1_CH2", "DL2_CH2", "DL2"},
+
+	{"I2S1_CH1", "DL3_CH1", "DL3"},
+	{"I2S1_CH2", "DL3_CH2", "DL3"},
+
+	{"I2S1", NULL, "I2S1_CH1"},
+	{"I2S1", NULL, "I2S1_CH2"},
+
+	{"I2S1", NULL, "I2S0_EN", mtk_afe_i2s_share_connect},
+	{"I2S1", NULL, "I2S1_EN"},
+	{"I2S1", NULL, "I2S2_EN", mtk_afe_i2s_share_connect},
+	{"I2S1", NULL, "I2S3_EN", mtk_afe_i2s_share_connect},
+	{"I2S1", NULL, "I2S5_EN", mtk_afe_i2s_share_connect},
+
+	{"I2S1", NULL, I2S0_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S1", NULL, I2S1_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S1", NULL, I2S2_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S1", NULL, I2S3_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S1", NULL, I2S5_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{I2S1_HD_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect},
+	{I2S1_HD_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect},
+
+	{"I2S1", NULL, I2S0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S1", NULL, I2S1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S1", NULL, I2S2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S1", NULL, I2S3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S1", NULL, I2S5_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{I2S1_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect},
+	{I2S1_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect},
+
+	/* i2s2 */
+	{"I2S2", NULL, "I2S0_EN", mtk_afe_i2s_share_connect},
+	{"I2S2", NULL, "I2S1_EN", mtk_afe_i2s_share_connect},
+	{"I2S2", NULL, "I2S2_EN"},
+	{"I2S2", NULL, "I2S3_EN", mtk_afe_i2s_share_connect},
+	{"I2S2", NULL, "I2S5_EN", mtk_afe_i2s_share_connect},
+
+	{"I2S2", NULL, I2S0_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S2", NULL, I2S1_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S2", NULL, I2S2_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S2", NULL, I2S3_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S2", NULL, I2S5_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{I2S2_HD_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect},
+	{I2S2_HD_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect},
+
+	{"I2S2", NULL, I2S0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S2", NULL, I2S1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S2", NULL, I2S2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S2", NULL, I2S3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S2", NULL, I2S5_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{I2S2_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect},
+	{I2S2_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect},
+
+	/* i2s3 */
+	{"I2S3_CH1", "DL1_CH1", "DL1"},
+	{"I2S3_CH2", "DL1_CH2", "DL1"},
+
+	{"I2S3_CH1", "DL2_CH1", "DL2"},
+	{"I2S3_CH2", "DL2_CH2", "DL2"},
+
+	{"I2S3_CH1", "DL3_CH1", "DL3"},
+	{"I2S3_CH2", "DL3_CH2", "DL3"},
+
+	{"I2S3", NULL, "I2S3_CH1"},
+	{"I2S3", NULL, "I2S3_CH2"},
+
+	{"I2S3", NULL, "I2S0_EN", mtk_afe_i2s_share_connect},
+	{"I2S3", NULL, "I2S1_EN", mtk_afe_i2s_share_connect},
+	{"I2S3", NULL, "I2S2_EN", mtk_afe_i2s_share_connect},
+	{"I2S3", NULL, "I2S3_EN"},
+	{"I2S3", NULL, "I2S5_EN", mtk_afe_i2s_share_connect},
+
+	{"I2S3", NULL, I2S0_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S3", NULL, I2S1_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S3", NULL, I2S2_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S3", NULL, I2S3_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S3", NULL, I2S5_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{I2S3_HD_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect},
+	{I2S3_HD_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect},
+
+	{"I2S3", NULL, I2S0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S3", NULL, I2S1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S3", NULL, I2S2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S3", NULL, I2S3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S3", NULL, I2S5_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{I2S3_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect},
+	{I2S3_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect},
+
+	/* i2s5 */
+	{"I2S5_CH1", "DL1_CH1", "DL1"},
+	{"I2S5_CH2", "DL1_CH2", "DL1"},
+
+	{"I2S5_CH1", "DL2_CH1", "DL2"},
+	{"I2S5_CH2", "DL2_CH2", "DL2"},
+
+	{"I2S5_CH1", "DL3_CH1", "DL3"},
+	{"I2S5_CH2", "DL3_CH2", "DL3"},
+
+	{"I2S5", NULL, "I2S5_CH1"},
+	{"I2S5", NULL, "I2S5_CH2"},
+
+	{"I2S5", NULL, "I2S0_EN", mtk_afe_i2s_share_connect},
+	{"I2S5", NULL, "I2S1_EN", mtk_afe_i2s_share_connect},
+	{"I2S5", NULL, "I2S2_EN", mtk_afe_i2s_share_connect},
+	{"I2S5", NULL, "I2S3_EN", mtk_afe_i2s_share_connect},
+	{"I2S5", NULL, "I2S5_EN"},
+
+	{"I2S5", NULL, I2S0_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S5", NULL, I2S1_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S5", NULL, I2S2_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S5", NULL, I2S3_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{"I2S5", NULL, I2S5_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+	{I2S5_HD_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect},
+	{I2S5_HD_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect},
+
+	{"I2S5", NULL, I2S0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S5", NULL, I2S1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S5", NULL, I2S2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S5", NULL, I2S3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{"I2S5", NULL, I2S5_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+	{I2S5_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect},
+	{I2S5_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect},
+};
+
+/* dai ops */
+static int mtk_dai_i2s_config(struct mtk_base_afe *afe,
+			      struct snd_pcm_hw_params *params,
+			      int i2s_id)
+{
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	struct mtk_afe_i2s_priv *i2s_priv = afe_priv->dai_priv[i2s_id];
+
+	unsigned int rate = params_rate(params);
+	unsigned int rate_reg = mt8183_rate_transform(afe->dev,
+						      rate, i2s_id);
+	snd_pcm_format_t format = params_format(params);
+	unsigned int i2s_con = 0;
+	int ret = 0;
+
+	dev_info(afe->dev, "%s(), id %d, rate %d, format %d\n",
+		 __func__,
+		 i2s_id,
+		 rate, format);
+
+	if (i2s_priv)
+		i2s_priv->rate = rate;
+	else
+		dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+
+	switch (i2s_id) {
+	case MT8183_DAI_I2S_0:
+		regmap_update_bits(afe->regmap, AFE_DAC_CON1,
+				   I2S_MODE_MASK_SFT, rate_reg << I2S_MODE_SFT);
+		i2s_con = I2S_IN_PAD_IO_MUX << I2SIN_PAD_SEL_SFT;
+		i2s_con |= I2S_FMT_I2S << I2S_FMT_SFT;
+		i2s_con |= get_i2s_wlen(format) << I2S_WLEN_SFT;
+		regmap_update_bits(afe->regmap, AFE_I2S_CON,
+				   0xffffeffe, i2s_con);
+		break;
+	case MT8183_DAI_I2S_1:
+		i2s_con = I2S1_SEL_O28_O29 << I2S2_SEL_O03_O04_SFT;
+		i2s_con |= rate_reg << I2S2_OUT_MODE_SFT;
+		i2s_con |= I2S_FMT_I2S << I2S2_FMT_SFT;
+		i2s_con |= get_i2s_wlen(format) << I2S2_WLEN_SFT;
+		regmap_update_bits(afe->regmap, AFE_I2S_CON1,
+				   0xffffeffe, i2s_con);
+		break;
+	case MT8183_DAI_I2S_2:
+		i2s_con = 8 << I2S3_UPDATE_WORD_SFT;
+		i2s_con |= rate_reg << I2S3_OUT_MODE_SFT;
+		i2s_con |= I2S_FMT_I2S << I2S3_FMT_SFT;
+		i2s_con |= get_i2s_wlen(format) << I2S3_WLEN_SFT;
+		regmap_update_bits(afe->regmap, AFE_I2S_CON2,
+				   0xffffeffe, i2s_con);
+		break;
+	case MT8183_DAI_I2S_3:
+		i2s_con = rate_reg << I2S4_OUT_MODE_SFT;
+		i2s_con |= I2S_FMT_I2S << I2S4_FMT_SFT;
+		i2s_con |= get_i2s_wlen(format) << I2S4_WLEN_SFT;
+		regmap_update_bits(afe->regmap, AFE_I2S_CON3,
+				   0xffffeffe, i2s_con);
+		break;
+	case MT8183_DAI_I2S_5:
+		i2s_con = rate_reg << I2S5_OUT_MODE_SFT;
+		i2s_con |= I2S_FMT_I2S << I2S5_FMT_SFT;
+		i2s_con |= get_i2s_wlen(format) << I2S5_WLEN_SFT;
+		regmap_update_bits(afe->regmap, AFE_I2S_CON4,
+				   0xffffeffe, i2s_con);
+		break;
+	default:
+		dev_warn(afe->dev, "%s(), id %d not support\n",
+			 __func__, i2s_id);
+		return -EINVAL;
+	}
+
+	/* set share i2s */
+	if (i2s_priv && i2s_priv->share_i2s_id >= 0)
+		ret = mtk_dai_i2s_config(afe, params, i2s_priv->share_i2s_id);
+
+	return ret;
+}
+
+static int mtk_dai_i2s_hw_params(struct snd_pcm_substream *substream,
+				 struct snd_pcm_hw_params *params,
+				 struct snd_soc_dai *dai)
+{
+	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+	return mtk_dai_i2s_config(afe, params, dai->id);
+}
+
+static int mtk_dai_i2s_set_sysclk(struct snd_soc_dai *dai,
+				  int clk_id, unsigned int freq, int dir)
+{
+	struct mtk_base_afe *afe = dev_get_drvdata(dai->dev);
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	struct mtk_afe_i2s_priv *i2s_priv = afe_priv->dai_priv[dai->id];
+	int apll;
+	int apll_rate;
+
+	if (!i2s_priv) {
+		dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+		return -EINVAL;
+	}
+
+	if (dir != SND_SOC_CLOCK_OUT) {
+		dev_warn(afe->dev, "%s(), dir != SND_SOC_CLOCK_OUT", __func__);
+		return -EINVAL;
+	}
+
+	dev_info(afe->dev, "%s(), freq %d\n", __func__, freq);
+
+	apll = mt8183_get_apll_by_rate(afe, freq);
+	apll_rate = mt8183_get_apll_rate(afe, apll);
+
+	if (freq > apll_rate) {
+		dev_warn(afe->dev, "%s(), freq > apll rate", __func__);
+		return -EINVAL;
+	}
+
+	if (apll_rate % freq != 0) {
+		dev_warn(afe->dev, "%s(), APLL cannot generate freq Hz",
+			 __func__);
+		return -EINVAL;
+	}
+
+	i2s_priv->mclk_rate = freq;
+	i2s_priv->mclk_apll = apll;
+
+	if (i2s_priv->share_i2s_id > 0) {
+		struct mtk_afe_i2s_priv *share_i2s_priv;
+
+		share_i2s_priv = afe_priv->dai_priv[i2s_priv->share_i2s_id];
+		if (!share_i2s_priv) {
+			dev_warn(afe->dev, "%s(), share_i2s_priv == NULL",
+				 __func__);
+			return -EINVAL;
+		}
+
+		share_i2s_priv->mclk_rate = i2s_priv->mclk_rate;
+		share_i2s_priv->mclk_apll = i2s_priv->mclk_apll;
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_dai_ops mtk_dai_i2s_ops = {
+	.hw_params = mtk_dai_i2s_hw_params,
+	.set_sysclk = mtk_dai_i2s_set_sysclk,
+};
+
+/* dai driver */
+#define MTK_I2S_RATES (SNDRV_PCM_RATE_8000_48000 |\
+		       SNDRV_PCM_RATE_88200 |\
+		       SNDRV_PCM_RATE_96000 |\
+		       SNDRV_PCM_RATE_176400 |\
+		       SNDRV_PCM_RATE_192000)
+
+#define MTK_I2S_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+			 SNDRV_PCM_FMTBIT_S24_LE |\
+			 SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mtk_dai_i2s_driver[] = {
+	{
+		.name = "I2S0",
+		.id = MT8183_DAI_I2S_0,
+		.capture = {
+			.stream_name = "I2S0",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_I2S_RATES,
+			.formats = MTK_I2S_FORMATS,
+		},
+		.ops = &mtk_dai_i2s_ops,
+	},
+	{
+		.name = "I2S1",
+		.id = MT8183_DAI_I2S_1,
+		.playback = {
+			.stream_name = "I2S1",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_I2S_RATES,
+			.formats = MTK_I2S_FORMATS,
+		},
+		.ops = &mtk_dai_i2s_ops,
+	},
+	{
+		.name = "I2S2",
+		.id = MT8183_DAI_I2S_2,
+		.capture = {
+			.stream_name = "I2S2",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_I2S_RATES,
+			.formats = MTK_I2S_FORMATS,
+		},
+		.ops = &mtk_dai_i2s_ops,
+	},
+	{
+		.name = "I2S3",
+		.id = MT8183_DAI_I2S_3,
+		.playback = {
+			.stream_name = "I2S3",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_I2S_RATES,
+			.formats = MTK_I2S_FORMATS,
+		},
+		.ops = &mtk_dai_i2s_ops,
+	},
+	{
+		.name = "I2S5",
+		.id = MT8183_DAI_I2S_5,
+		.playback = {
+			.stream_name = "I2S5",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_I2S_RATES,
+			.formats = MTK_I2S_FORMATS,
+		},
+		.ops = &mtk_dai_i2s_ops,
+	},
+};
+
+/* this enum is merely for mtk_afe_i2s_priv declare */
+enum {
+	DAI_I2S0 = 0,
+	DAI_I2S1,
+	DAI_I2S2,
+	DAI_I2S3,
+	DAI_I2S5,
+	DAI_I2S_NUM,
+};
+
+static const struct mtk_afe_i2s_priv mt8183_i2s_priv[DAI_I2S_NUM] = {
+	[DAI_I2S0] = {
+		.id = MT8183_DAI_I2S_0,
+		.mclk_id = MT8183_I2S0_MCK,
+		.share_property_name = "i2s0-share",
+		.share_i2s_id = -1,
+	},
+	[DAI_I2S1] = {
+		.id = MT8183_DAI_I2S_1,
+		.mclk_id = MT8183_I2S1_MCK,
+		.share_property_name = "i2s1-share",
+		.share_i2s_id = -1,
+	},
+	[DAI_I2S2] = {
+		.id = MT8183_DAI_I2S_2,
+		.mclk_id = MT8183_I2S2_MCK,
+		.share_property_name = "i2s2-share",
+		.share_i2s_id = -1,
+	},
+	[DAI_I2S3] = {
+		.id = MT8183_DAI_I2S_3,
+		.mclk_id = MT8183_I2S3_MCK,
+		.share_property_name = "i2s3-share",
+		.share_i2s_id = -1,
+	},
+	[DAI_I2S5] = {
+		.id = MT8183_DAI_I2S_5,
+		.mclk_id = MT8183_I2S5_MCK,
+		.share_property_name = "i2s5-share",
+		.share_i2s_id = -1,
+	},
+};
+
+static int mt8183_dai_i2s_get_share(struct mtk_base_afe *afe)
+{
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	const struct device_node *of_node = afe->dev->of_node;
+	const char *of_str;
+	const char *property_name;
+	struct mtk_afe_i2s_priv *i2s_priv;
+	int i;
+
+	for (i = 0; i < DAI_I2S_NUM; i++) {
+		i2s_priv = afe_priv->dai_priv[mt8183_i2s_priv[i].id];
+		property_name = mt8183_i2s_priv[i].share_property_name;
+		if (of_property_read_string(of_node, property_name, &of_str))
+			continue;
+		i2s_priv->share_i2s_id = get_i2s_id_by_name(afe, of_str);
+	}
+
+	return 0;
+}
+
+static int mt8183_dai_i2s_set_priv(struct mtk_base_afe *afe)
+{
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	struct mtk_afe_i2s_priv *i2s_priv;
+	int i;
+
+	for (i = 0; i < DAI_I2S_NUM; i++) {
+		i2s_priv = devm_kzalloc(afe->dev,
+					sizeof(struct mtk_afe_i2s_priv),
+					GFP_KERNEL);
+		if (!i2s_priv)
+			return -ENOMEM;
+
+		memcpy(i2s_priv, &mt8183_i2s_priv[i],
+		       sizeof(struct mtk_afe_i2s_priv));
+
+		afe_priv->dai_priv[mt8183_i2s_priv[i].id] = i2s_priv;
+	}
+
+	return 0;
+}
+
+int mt8183_dai_i2s_register(struct mtk_base_afe *afe)
+{
+	struct mtk_base_afe_dai *dai;
+	int ret;
+
+	dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+	if (!dai)
+		return -ENOMEM;
+
+	list_add(&dai->list, &afe->sub_dais);
+
+	dai->dai_drivers = mtk_dai_i2s_driver;
+	dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_i2s_driver);
+
+	dai->controls = mtk_dai_i2s_controls;
+	dai->num_controls = ARRAY_SIZE(mtk_dai_i2s_controls);
+	dai->dapm_widgets = mtk_dai_i2s_widgets;
+	dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_i2s_widgets);
+	dai->dapm_routes = mtk_dai_i2s_routes;
+	dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_i2s_routes);
+
+	/* set all dai i2s private data */
+	ret = mt8183_dai_i2s_set_priv(afe);
+	if (ret)
+		return ret;
+
+	/* parse share i2s */
+	ret = mt8183_dai_i2s_get_share(afe);
+	if (ret)
+		return ret;
+
+	return 0;
+}
diff --git a/sound/soc/mediatek/mt8183/mt8183-dai-pcm.c b/sound/soc/mediatek/mt8183/mt8183-dai-pcm.c
new file mode 100644
index 0000000..bc3ba32
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-dai-pcm.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MediaTek ALSA SoC Audio DAI I2S Control
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+
+#include <linux/regmap.h>
+#include <sound/pcm_params.h>
+#include "mt8183-afe-common.h"
+#include "mt8183-interconnection.h"
+#include "mt8183-reg.h"
+
+enum AUD_TX_LCH_RPT {
+	AUD_TX_LCH_RPT_NO_REPEAT = 0,
+	AUD_TX_LCH_RPT_REPEAT = 1
+};
+
+enum AUD_VBT_16K_MODE {
+	AUD_VBT_16K_MODE_DISABLE = 0,
+	AUD_VBT_16K_MODE_ENABLE = 1
+};
+
+enum AUD_EXT_MODEM {
+	AUD_EXT_MODEM_SELECT_INTERNAL = 0,
+	AUD_EXT_MODEM_SELECT_EXTERNAL = 1
+};
+
+enum AUD_PCM_SYNC_TYPE {
+	/* bck sync length = 1 */
+	AUD_PCM_ONE_BCK_CYCLE_SYNC = 0,
+	/* bck sync length = PCM_INTF_CON1[9:13] */
+	AUD_PCM_EXTENDED_BCK_CYCLE_SYNC = 1
+};
+
+enum AUD_BT_MODE {
+	AUD_BT_MODE_DUAL_MIC_ON_TX = 0,
+	AUD_BT_MODE_SINGLE_MIC_ON_TX = 1
+};
+
+enum AUD_PCM_AFIFO_SRC {
+	/* slave mode & external modem uses different crystal */
+	AUD_PCM_AFIFO_ASRC = 0,
+	/* slave mode & external modem uses the same crystal */
+	AUD_PCM_AFIFO_AFIFO = 1
+};
+
+enum AUD_PCM_CLOCK_SOURCE {
+	AUD_PCM_CLOCK_MASTER_MODE = 0,
+	AUD_PCM_CLOCK_SLAVE_MODE = 1
+};
+
+enum AUD_PCM_WLEN {
+	AUD_PCM_WLEN_PCM_32_BCK_CYCLES = 0,
+	AUD_PCM_WLEN_PCM_64_BCK_CYCLES = 1
+};
+
+enum AUD_PCM_MODE {
+	AUD_PCM_MODE_PCM_MODE_8K = 0,
+	AUD_PCM_MODE_PCM_MODE_16K = 1,
+	AUD_PCM_MODE_PCM_MODE_32K = 2,
+	AUD_PCM_MODE_PCM_MODE_48K = 3,
+};
+
+enum AUD_PCM_FMT {
+	AUD_PCM_FMT_I2S = 0,
+	AUD_PCM_FMT_EIAJ = 1,
+	AUD_PCM_FMT_PCM_MODE_A = 2,
+	AUD_PCM_FMT_PCM_MODE_B = 3
+};
+
+enum AUD_BCLK_OUT_INV {
+	AUD_BCLK_OUT_INV_NO_INVERSE = 0,
+	AUD_BCLK_OUT_INV_INVERSE = 1
+};
+
+enum AUD_PCM_EN {
+	AUD_PCM_EN_DISABLE = 0,
+	AUD_PCM_EN_ENABLE = 1
+};
+
+/* dai component */
+static const struct snd_kcontrol_new mtk_pcm_1_playback_ch1_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN7,
+				    I_ADDA_UL_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN7,
+				    I_DL2_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_pcm_1_playback_ch2_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN8,
+				    I_ADDA_UL_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN8,
+				    I_DL2_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_pcm_1_playback_ch4_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN27,
+				    I_DL1_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_pcm_2_playback_ch1_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN17,
+				    I_ADDA_UL_CH1, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN17,
+				    I_DL2_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_pcm_2_playback_ch2_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN18,
+				    I_ADDA_UL_CH2, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN18,
+				    I_DL2_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_pcm_2_playback_ch4_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN24,
+				    I_DL1_CH1, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget mtk_dai_pcm_widgets[] = {
+	/* inter-connections */
+	SND_SOC_DAPM_MIXER("PCM_1_PB_CH1", SND_SOC_NOPM, 0, 0,
+			   mtk_pcm_1_playback_ch1_mix,
+			   ARRAY_SIZE(mtk_pcm_1_playback_ch1_mix)),
+	SND_SOC_DAPM_MIXER("PCM_1_PB_CH2", SND_SOC_NOPM, 0, 0,
+			   mtk_pcm_1_playback_ch2_mix,
+			   ARRAY_SIZE(mtk_pcm_1_playback_ch2_mix)),
+	SND_SOC_DAPM_MIXER("PCM_1_PB_CH4", SND_SOC_NOPM, 0, 0,
+			   mtk_pcm_1_playback_ch4_mix,
+			   ARRAY_SIZE(mtk_pcm_1_playback_ch4_mix)),
+	SND_SOC_DAPM_MIXER("PCM_2_PB_CH1", SND_SOC_NOPM, 0, 0,
+			   mtk_pcm_2_playback_ch1_mix,
+			   ARRAY_SIZE(mtk_pcm_2_playback_ch1_mix)),
+	SND_SOC_DAPM_MIXER("PCM_2_PB_CH2", SND_SOC_NOPM, 0, 0,
+			   mtk_pcm_2_playback_ch2_mix,
+			   ARRAY_SIZE(mtk_pcm_2_playback_ch2_mix)),
+	SND_SOC_DAPM_MIXER("PCM_2_PB_CH4", SND_SOC_NOPM, 0, 0,
+			   mtk_pcm_2_playback_ch4_mix,
+			   ARRAY_SIZE(mtk_pcm_2_playback_ch4_mix)),
+
+	SND_SOC_DAPM_SUPPLY("PCM_1_EN", PCM_INTF_CON1, PCM_EN_SFT, 0,
+			    NULL, 0),
+
+	SND_SOC_DAPM_SUPPLY("PCM_2_EN", PCM2_INTF_CON, PCM2_EN_SFT, 0,
+			    NULL, 0),
+
+	SND_SOC_DAPM_INPUT("MD1_TO_AFE"),
+	SND_SOC_DAPM_INPUT("MD2_TO_AFE"),
+	SND_SOC_DAPM_OUTPUT("AFE_TO_MD1"),
+	SND_SOC_DAPM_OUTPUT("AFE_TO_MD2"),
+};
+
+static const struct snd_soc_dapm_route mtk_dai_pcm_routes[] = {
+	{"PCM 1 Playback", NULL, "PCM_1_PB_CH1"},
+	{"PCM 1 Playback", NULL, "PCM_1_PB_CH2"},
+	{"PCM 1 Playback", NULL, "PCM_1_PB_CH4"},
+	{"PCM 2 Playback", NULL, "PCM_2_PB_CH1"},
+	{"PCM 2 Playback", NULL, "PCM_2_PB_CH2"},
+	{"PCM 2 Playback", NULL, "PCM_2_PB_CH4"},
+
+	{"PCM 1 Playback", NULL, "PCM_1_EN"},
+	{"PCM 2 Playback", NULL, "PCM_2_EN"},
+	{"PCM 1 Capture", NULL, "PCM_1_EN"},
+	{"PCM 2 Capture", NULL, "PCM_2_EN"},
+
+	{"AFE_TO_MD1", NULL, "PCM 2 Playback"},
+	{"AFE_TO_MD2", NULL, "PCM 1 Playback"},
+	{"PCM 2 Capture", NULL, "MD1_TO_AFE"},
+	{"PCM 1 Capture", NULL, "MD2_TO_AFE"},
+
+	{"PCM_1_PB_CH1", "DL2_CH1", "DL2"},
+	{"PCM_1_PB_CH2", "DL2_CH2", "DL2"},
+	{"PCM_1_PB_CH4", "DL1_CH1", "DL1"},
+	{"PCM_2_PB_CH1", "DL2_CH1", "DL2"},
+	{"PCM_2_PB_CH2", "DL2_CH2", "DL2"},
+	{"PCM_2_PB_CH4", "DL1_CH1", "DL1"},
+};
+
+/* dai ops */
+static int mtk_dai_pcm_hw_params(struct snd_pcm_substream *substream,
+				 struct snd_pcm_hw_params *params,
+				 struct snd_soc_dai *dai)
+{
+	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+	unsigned int rate = params_rate(params);
+	unsigned int rate_reg = mt8183_rate_transform(afe->dev, rate, dai->id);
+	unsigned int pcm_con = 0;
+
+	dev_dbg(afe->dev, "%s(), id %d, stream %d, rate %d, rate_reg %d, widget active p %d, c %d\n",
+		__func__,
+		dai->id,
+		substream->stream,
+		rate,
+		rate_reg,
+		dai->playback_widget->active,
+		dai->capture_widget->active);
+
+	if (dai->playback_widget->active || dai->capture_widget->active)
+		return 0;
+
+	switch (dai->id) {
+	case MT8183_DAI_PCM_1:
+		pcm_con |= AUD_BCLK_OUT_INV_NO_INVERSE << PCM_BCLK_OUT_INV_SFT;
+		pcm_con |= AUD_TX_LCH_RPT_NO_REPEAT << PCM_TX_LCH_RPT_SFT;
+		pcm_con |= AUD_VBT_16K_MODE_DISABLE << PCM_VBT_16K_MODE_SFT;
+		pcm_con |= AUD_EXT_MODEM_SELECT_INTERNAL << PCM_EXT_MODEM_SFT;
+		pcm_con |= 0 << PCM_SYNC_LENGTH_SFT;
+		pcm_con |= AUD_PCM_ONE_BCK_CYCLE_SYNC << PCM_SYNC_TYPE_SFT;
+		pcm_con |= AUD_BT_MODE_DUAL_MIC_ON_TX << PCM_BT_MODE_SFT;
+		pcm_con |= AUD_PCM_AFIFO_AFIFO << PCM_BYP_ASRC_SFT;
+		pcm_con |= AUD_PCM_CLOCK_SLAVE_MODE << PCM_SLAVE_SFT;
+		pcm_con |= rate_reg << PCM_MODE_SFT;
+		pcm_con |= AUD_PCM_FMT_PCM_MODE_B << PCM_FMT_SFT;
+
+		regmap_update_bits(afe->regmap, PCM_INTF_CON1,
+				   0xfffffffe, pcm_con);
+		break;
+	case MT8183_DAI_PCM_2:
+		pcm_con |= AUD_TX_LCH_RPT_NO_REPEAT << PCM2_TX_LCH_RPT_SFT;
+		pcm_con |= AUD_VBT_16K_MODE_DISABLE << PCM2_VBT_16K_MODE_SFT;
+		pcm_con |= AUD_BT_MODE_DUAL_MIC_ON_TX << PCM2_BT_MODE_SFT;
+		pcm_con |= AUD_PCM_AFIFO_AFIFO << PCM2_AFIFO_SFT;
+		pcm_con |= AUD_PCM_WLEN_PCM_32_BCK_CYCLES << PCM2_WLEN_SFT;
+		pcm_con |= rate_reg << PCM2_MODE_SFT;
+		pcm_con |= AUD_PCM_FMT_PCM_MODE_B << PCM2_FMT_SFT;
+
+		regmap_update_bits(afe->regmap, PCM2_INTF_CON,
+				   0xfffffffe, pcm_con);
+		break;
+	default:
+		dev_warn(afe->dev, "%s(), id %d not support\n",
+			 __func__, dai->id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_dai_ops mtk_dai_pcm_ops = {
+	.hw_params = mtk_dai_pcm_hw_params,
+};
+
+/* dai driver */
+#define MTK_PCM_RATES (SNDRV_PCM_RATE_8000 |\
+		       SNDRV_PCM_RATE_16000 |\
+		       SNDRV_PCM_RATE_32000 |\
+		       SNDRV_PCM_RATE_48000)
+
+#define MTK_PCM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+			 SNDRV_PCM_FMTBIT_S24_LE |\
+			 SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mtk_dai_pcm_driver[] = {
+	{
+		.name = "PCM 1",
+		.id = MT8183_DAI_PCM_1,
+		.playback = {
+			.stream_name = "PCM 1 Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_PCM_RATES,
+			.formats = MTK_PCM_FORMATS,
+		},
+		.capture = {
+			.stream_name = "PCM 1 Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_PCM_RATES,
+			.formats = MTK_PCM_FORMATS,
+		},
+		.ops = &mtk_dai_pcm_ops,
+		.symmetric_rates = 1,
+		.symmetric_samplebits = 1,
+	},
+	{
+		.name = "PCM 2",
+		.id = MT8183_DAI_PCM_2,
+		.playback = {
+			.stream_name = "PCM 2 Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_PCM_RATES,
+			.formats = MTK_PCM_FORMATS,
+		},
+		.capture = {
+			.stream_name = "PCM 2 Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MTK_PCM_RATES,
+			.formats = MTK_PCM_FORMATS,
+		},
+		.ops = &mtk_dai_pcm_ops,
+		.symmetric_rates = 1,
+		.symmetric_samplebits = 1,
+	},
+};
+
+int mt8183_dai_pcm_register(struct mtk_base_afe *afe)
+{
+	struct mtk_base_afe_dai *dai;
+
+	dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+	if (!dai)
+		return -ENOMEM;
+
+	list_add(&dai->list, &afe->sub_dais);
+
+	dai->dai_drivers = mtk_dai_pcm_driver;
+	dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_pcm_driver);
+
+	dai->dapm_widgets = mtk_dai_pcm_widgets;
+	dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_pcm_widgets);
+	dai->dapm_routes = mtk_dai_pcm_routes;
+	dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_pcm_routes);
+
+	return 0;
+}
diff --git a/sound/soc/mediatek/mt8183/mt8183-dai-tdm.c b/sound/soc/mediatek/mt8183/mt8183-dai-tdm.c
new file mode 100644
index 0000000..8983d54a
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-dai-tdm.c
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MediaTek ALSA SoC Audio DAI TDM Control
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+
+#include <linux/regmap.h>
+#include <sound/pcm_params.h>
+#include "mt8183-afe-clk.h"
+#include "mt8183-afe-common.h"
+#include "mt8183-interconnection.h"
+#include "mt8183-reg.h"
+
+struct mtk_afe_tdm_priv {
+	int bck_id;
+	int bck_rate;
+
+	int mclk_id;
+	int mclk_multiple; /* according to sample rate */
+	int mclk_rate;
+	int mclk_apll;
+};
+
+enum {
+	TDM_WLEN_16_BIT = 1,
+	TDM_WLEN_32_BIT = 2,
+};
+
+enum {
+	TDM_CHANNEL_BCK_16 = 0,
+	TDM_CHANNEL_BCK_24 = 1,
+	TDM_CHANNEL_BCK_32 = 2,
+};
+
+enum {
+	TDM_CHANNEL_NUM_2 = 0,
+	TDM_CHANNEL_NUM_4 = 1,
+	TDM_CHANNEL_NUM_8 = 2,
+};
+
+enum  {
+	TDM_CH_START_O30_O31 = 0,
+	TDM_CH_START_O32_O33,
+	TDM_CH_START_O34_O35,
+	TDM_CH_START_O36_O37,
+	TDM_CH_ZERO,
+};
+
+enum {
+	HDMI_BIT_WIDTH_16_BIT = 0,
+	HDMI_BIT_WIDTH_32_BIT = 1,
+};
+
+static unsigned int get_hdmi_wlen(snd_pcm_format_t format)
+{
+	return snd_pcm_format_physical_width(format) <= 16 ?
+	       HDMI_BIT_WIDTH_16_BIT : HDMI_BIT_WIDTH_32_BIT;
+}
+
+static unsigned int get_tdm_wlen(snd_pcm_format_t format)
+{
+	return snd_pcm_format_physical_width(format) <= 16 ?
+	       TDM_WLEN_16_BIT : TDM_WLEN_32_BIT;
+}
+
+static unsigned int get_tdm_channel_bck(snd_pcm_format_t format)
+{
+	return snd_pcm_format_physical_width(format) <= 16 ?
+	       TDM_CHANNEL_BCK_16 : TDM_CHANNEL_BCK_32;
+}
+
+static unsigned int get_tdm_lrck_width(snd_pcm_format_t format)
+{
+	return snd_pcm_format_physical_width(format) - 1;
+}
+
+static unsigned int get_tdm_ch(unsigned int ch)
+{
+	switch (ch) {
+	case 1:
+	case 2:
+		return TDM_CHANNEL_NUM_2;
+	case 3:
+	case 4:
+		return TDM_CHANNEL_NUM_4;
+	case 5:
+	case 6:
+	case 7:
+	case 8:
+	default:
+		return TDM_CHANNEL_NUM_8;
+	}
+}
+
+/* interconnection */
+enum {
+	HDMI_CONN_CH0 = 0,
+	HDMI_CONN_CH1,
+	HDMI_CONN_CH2,
+	HDMI_CONN_CH3,
+	HDMI_CONN_CH4,
+	HDMI_CONN_CH5,
+	HDMI_CONN_CH6,
+	HDMI_CONN_CH7,
+};
+
+static const char *const hdmi_conn_mux_map[] = {
+	"CH0", "CH1", "CH2", "CH3",
+	"CH4", "CH5", "CH6", "CH7",
+};
+
+static int hdmi_conn_mux_map_value[] = {
+	HDMI_CONN_CH0,
+	HDMI_CONN_CH1,
+	HDMI_CONN_CH2,
+	HDMI_CONN_CH3,
+	HDMI_CONN_CH4,
+	HDMI_CONN_CH5,
+	HDMI_CONN_CH6,
+	HDMI_CONN_CH7,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch0_mux_map_enum,
+				  AFE_HDMI_CONN0,
+				  HDMI_O_0_SFT,
+				  HDMI_O_0_MASK,
+				  hdmi_conn_mux_map,
+				  hdmi_conn_mux_map_value);
+
+static const struct snd_kcontrol_new hdmi_ch0_mux_control =
+	SOC_DAPM_ENUM("HDMI_CH0_MUX", hdmi_ch0_mux_map_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch1_mux_map_enum,
+				  AFE_HDMI_CONN0,
+				  HDMI_O_1_SFT,
+				  HDMI_O_1_MASK,
+				  hdmi_conn_mux_map,
+				  hdmi_conn_mux_map_value);
+
+static const struct snd_kcontrol_new hdmi_ch1_mux_control =
+	SOC_DAPM_ENUM("HDMI_CH1_MUX", hdmi_ch1_mux_map_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch2_mux_map_enum,
+				  AFE_HDMI_CONN0,
+				  HDMI_O_2_SFT,
+				  HDMI_O_2_MASK,
+				  hdmi_conn_mux_map,
+				  hdmi_conn_mux_map_value);
+
+static const struct snd_kcontrol_new hdmi_ch2_mux_control =
+	SOC_DAPM_ENUM("HDMI_CH2_MUX", hdmi_ch2_mux_map_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch3_mux_map_enum,
+				  AFE_HDMI_CONN0,
+				  HDMI_O_3_SFT,
+				  HDMI_O_3_MASK,
+				  hdmi_conn_mux_map,
+				  hdmi_conn_mux_map_value);
+
+static const struct snd_kcontrol_new hdmi_ch3_mux_control =
+	SOC_DAPM_ENUM("HDMI_CH3_MUX", hdmi_ch3_mux_map_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch4_mux_map_enum,
+				  AFE_HDMI_CONN0,
+				  HDMI_O_4_SFT,
+				  HDMI_O_4_MASK,
+				  hdmi_conn_mux_map,
+				  hdmi_conn_mux_map_value);
+
+static const struct snd_kcontrol_new hdmi_ch4_mux_control =
+	SOC_DAPM_ENUM("HDMI_CH4_MUX", hdmi_ch4_mux_map_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch5_mux_map_enum,
+				  AFE_HDMI_CONN0,
+				  HDMI_O_5_SFT,
+				  HDMI_O_5_MASK,
+				  hdmi_conn_mux_map,
+				  hdmi_conn_mux_map_value);
+
+static const struct snd_kcontrol_new hdmi_ch5_mux_control =
+	SOC_DAPM_ENUM("HDMI_CH5_MUX", hdmi_ch5_mux_map_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch6_mux_map_enum,
+				  AFE_HDMI_CONN0,
+				  HDMI_O_6_SFT,
+				  HDMI_O_6_MASK,
+				  hdmi_conn_mux_map,
+				  hdmi_conn_mux_map_value);
+
+static const struct snd_kcontrol_new hdmi_ch6_mux_control =
+	SOC_DAPM_ENUM("HDMI_CH6_MUX", hdmi_ch6_mux_map_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch7_mux_map_enum,
+				  AFE_HDMI_CONN0,
+				  HDMI_O_7_SFT,
+				  HDMI_O_7_MASK,
+				  hdmi_conn_mux_map,
+				  hdmi_conn_mux_map_value);
+
+static const struct snd_kcontrol_new hdmi_ch7_mux_control =
+	SOC_DAPM_ENUM("HDMI_CH7_MUX", hdmi_ch7_mux_map_enum);
+
+enum {
+	SUPPLY_SEQ_APLL,
+	SUPPLY_SEQ_TDM_MCK_EN,
+	SUPPLY_SEQ_TDM_BCK_EN,
+};
+
+static int mtk_tdm_bck_en_event(struct snd_soc_dapm_widget *w,
+				struct snd_kcontrol *kcontrol,
+				int event)
+{
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[MT8183_DAI_TDM];
+
+	dev_info(cmpnt->dev, "%s(), name %s, event 0x%x\n",
+		 __func__, w->name, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		mt8183_mck_enable(afe, tdm_priv->bck_id, tdm_priv->bck_rate);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		mt8183_mck_disable(afe, tdm_priv->bck_id);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int mtk_tdm_mck_en_event(struct snd_soc_dapm_widget *w,
+				struct snd_kcontrol *kcontrol,
+				int event)
+{
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[MT8183_DAI_TDM];
+
+	dev_info(cmpnt->dev, "%s(), name %s, event 0x%x\n",
+		 __func__, w->name, event);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		mt8183_mck_enable(afe, tdm_priv->mclk_id, tdm_priv->mclk_rate);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		tdm_priv->mclk_rate = 0;
+		mt8183_mck_disable(afe, tdm_priv->mclk_id);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget mtk_dai_tdm_widgets[] = {
+	SND_SOC_DAPM_MUX("HDMI_CH0_MUX", SND_SOC_NOPM, 0, 0,
+			 &hdmi_ch0_mux_control),
+	SND_SOC_DAPM_MUX("HDMI_CH1_MUX", SND_SOC_NOPM, 0, 0,
+			 &hdmi_ch1_mux_control),
+	SND_SOC_DAPM_MUX("HDMI_CH2_MUX", SND_SOC_NOPM, 0, 0,
+			 &hdmi_ch2_mux_control),
+	SND_SOC_DAPM_MUX("HDMI_CH3_MUX", SND_SOC_NOPM, 0, 0,
+			 &hdmi_ch3_mux_control),
+	SND_SOC_DAPM_MUX("HDMI_CH4_MUX", SND_SOC_NOPM, 0, 0,
+			 &hdmi_ch4_mux_control),
+	SND_SOC_DAPM_MUX("HDMI_CH5_MUX", SND_SOC_NOPM, 0, 0,
+			 &hdmi_ch5_mux_control),
+	SND_SOC_DAPM_MUX("HDMI_CH6_MUX", SND_SOC_NOPM, 0, 0,
+			 &hdmi_ch6_mux_control),
+	SND_SOC_DAPM_MUX("HDMI_CH7_MUX", SND_SOC_NOPM, 0, 0,
+			 &hdmi_ch7_mux_control),
+
+	SND_SOC_DAPM_CLOCK_SUPPLY("aud_tdm_clk"),
+
+	SND_SOC_DAPM_SUPPLY_S("TDM_BCK", SUPPLY_SEQ_TDM_BCK_EN,
+			      SND_SOC_NOPM, 0, 0,
+			      mtk_tdm_bck_en_event,
+			      SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_SUPPLY_S("TDM_MCK", SUPPLY_SEQ_TDM_MCK_EN,
+			      SND_SOC_NOPM, 0, 0,
+			      mtk_tdm_mck_en_event,
+			      SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static int mtk_afe_tdm_apll_connect(struct snd_soc_dapm_widget *source,
+				    struct snd_soc_dapm_widget *sink)
+{
+	struct snd_soc_dapm_widget *w = sink;
+	struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[MT8183_DAI_TDM];
+	int cur_apll;
+
+	/* which apll */
+	cur_apll = mt8183_get_apll_by_name(afe, source->name);
+
+	return (tdm_priv->mclk_apll == cur_apll) ? 1 : 0;
+}
+
+static const struct snd_soc_dapm_route mtk_dai_tdm_routes[] = {
+	{"HDMI_CH0_MUX", "CH0", "HDMI"},
+	{"HDMI_CH0_MUX", "CH1", "HDMI"},
+	{"HDMI_CH0_MUX", "CH2", "HDMI"},
+	{"HDMI_CH0_MUX", "CH3", "HDMI"},
+	{"HDMI_CH0_MUX", "CH4", "HDMI"},
+	{"HDMI_CH0_MUX", "CH5", "HDMI"},
+	{"HDMI_CH0_MUX", "CH6", "HDMI"},
+	{"HDMI_CH0_MUX", "CH7", "HDMI"},
+
+	{"HDMI_CH1_MUX", "CH0", "HDMI"},
+	{"HDMI_CH1_MUX", "CH1", "HDMI"},
+	{"HDMI_CH1_MUX", "CH2", "HDMI"},
+	{"HDMI_CH1_MUX", "CH3", "HDMI"},
+	{"HDMI_CH1_MUX", "CH4", "HDMI"},
+	{"HDMI_CH1_MUX", "CH5", "HDMI"},
+	{"HDMI_CH1_MUX", "CH6", "HDMI"},
+	{"HDMI_CH1_MUX", "CH7", "HDMI"},
+
+	{"HDMI_CH2_MUX", "CH0", "HDMI"},
+	{"HDMI_CH2_MUX", "CH1", "HDMI"},
+	{"HDMI_CH2_MUX", "CH2", "HDMI"},
+	{"HDMI_CH2_MUX", "CH3", "HDMI"},
+	{"HDMI_CH2_MUX", "CH4", "HDMI"},
+	{"HDMI_CH2_MUX", "CH5", "HDMI"},
+	{"HDMI_CH2_MUX", "CH6", "HDMI"},
+	{"HDMI_CH2_MUX", "CH7", "HDMI"},
+
+	{"HDMI_CH3_MUX", "CH0", "HDMI"},
+	{"HDMI_CH3_MUX", "CH1", "HDMI"},
+	{"HDMI_CH3_MUX", "CH2", "HDMI"},
+	{"HDMI_CH3_MUX", "CH3", "HDMI"},
+	{"HDMI_CH3_MUX", "CH4", "HDMI"},
+	{"HDMI_CH3_MUX", "CH5", "HDMI"},
+	{"HDMI_CH3_MUX", "CH6", "HDMI"},
+	{"HDMI_CH3_MUX", "CH7", "HDMI"},
+
+	{"HDMI_CH4_MUX", "CH0", "HDMI"},
+	{"HDMI_CH4_MUX", "CH1", "HDMI"},
+	{"HDMI_CH4_MUX", "CH2", "HDMI"},
+	{"HDMI_CH4_MUX", "CH3", "HDMI"},
+	{"HDMI_CH4_MUX", "CH4", "HDMI"},
+	{"HDMI_CH4_MUX", "CH5", "HDMI"},
+	{"HDMI_CH4_MUX", "CH6", "HDMI"},
+	{"HDMI_CH4_MUX", "CH7", "HDMI"},
+
+	{"HDMI_CH5_MUX", "CH0", "HDMI"},
+	{"HDMI_CH5_MUX", "CH1", "HDMI"},
+	{"HDMI_CH5_MUX", "CH2", "HDMI"},
+	{"HDMI_CH5_MUX", "CH3", "HDMI"},
+	{"HDMI_CH5_MUX", "CH4", "HDMI"},
+	{"HDMI_CH5_MUX", "CH5", "HDMI"},
+	{"HDMI_CH5_MUX", "CH6", "HDMI"},
+	{"HDMI_CH5_MUX", "CH7", "HDMI"},
+
+	{"HDMI_CH6_MUX", "CH0", "HDMI"},
+	{"HDMI_CH6_MUX", "CH1", "HDMI"},
+	{"HDMI_CH6_MUX", "CH2", "HDMI"},
+	{"HDMI_CH6_MUX", "CH3", "HDMI"},
+	{"HDMI_CH6_MUX", "CH4", "HDMI"},
+	{"HDMI_CH6_MUX", "CH5", "HDMI"},
+	{"HDMI_CH6_MUX", "CH6", "HDMI"},
+	{"HDMI_CH6_MUX", "CH7", "HDMI"},
+
+	{"HDMI_CH7_MUX", "CH0", "HDMI"},
+	{"HDMI_CH7_MUX", "CH1", "HDMI"},
+	{"HDMI_CH7_MUX", "CH2", "HDMI"},
+	{"HDMI_CH7_MUX", "CH3", "HDMI"},
+	{"HDMI_CH7_MUX", "CH4", "HDMI"},
+	{"HDMI_CH7_MUX", "CH5", "HDMI"},
+	{"HDMI_CH7_MUX", "CH6", "HDMI"},
+	{"HDMI_CH7_MUX", "CH7", "HDMI"},
+
+	{"TDM", NULL, "HDMI_CH0_MUX"},
+	{"TDM", NULL, "HDMI_CH1_MUX"},
+	{"TDM", NULL, "HDMI_CH2_MUX"},
+	{"TDM", NULL, "HDMI_CH3_MUX"},
+	{"TDM", NULL, "HDMI_CH4_MUX"},
+	{"TDM", NULL, "HDMI_CH5_MUX"},
+	{"TDM", NULL, "HDMI_CH6_MUX"},
+	{"TDM", NULL, "HDMI_CH7_MUX"},
+
+	{"TDM", NULL, "aud_tdm_clk"},
+	{"TDM", NULL, "TDM_BCK"},
+	{"TDM_BCK", NULL, "TDM_MCK"},
+	{"TDM_MCK", NULL, APLL1_W_NAME, mtk_afe_tdm_apll_connect},
+	{"TDM_MCK", NULL, APLL2_W_NAME, mtk_afe_tdm_apll_connect},
+};
+
+/* dai ops */
+static int mtk_dai_tdm_cal_mclk(struct mtk_base_afe *afe,
+				struct mtk_afe_tdm_priv *tdm_priv,
+				int freq)
+{
+	int apll;
+	int apll_rate;
+
+	apll = mt8183_get_apll_by_rate(afe, freq);
+	apll_rate = mt8183_get_apll_rate(afe, apll);
+
+	if (!freq || freq > apll_rate) {
+		dev_warn(afe->dev,
+			 "%s(), freq(%d Hz) invalid\n", __func__, freq);
+		return -EINVAL;
+	}
+
+	if (apll_rate % freq != 0) {
+		dev_warn(afe->dev,
+			 "%s(), APLL cannot generate %d Hz", __func__, freq);
+		return -EINVAL;
+	}
+
+	tdm_priv->mclk_rate = freq;
+	tdm_priv->mclk_apll = apll;
+
+	return 0;
+}
+
+static int mtk_dai_tdm_hw_params(struct snd_pcm_substream *substream,
+				 struct snd_pcm_hw_params *params,
+				 struct snd_soc_dai *dai)
+{
+	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	int tdm_id = dai->id;
+	struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[tdm_id];
+	unsigned int rate = params_rate(params);
+	unsigned int channels = params_channels(params);
+	snd_pcm_format_t format = params_format(params);
+	unsigned int tdm_con = 0;
+
+	/* calculate mclk_rate, if not set explicitly */
+	if (!tdm_priv->mclk_rate) {
+		tdm_priv->mclk_rate = rate * tdm_priv->mclk_multiple;
+		mtk_dai_tdm_cal_mclk(afe,
+				     tdm_priv,
+				     tdm_priv->mclk_rate);
+	}
+
+	/* calculate bck */
+	tdm_priv->bck_rate = rate *
+			     channels *
+			     snd_pcm_format_physical_width(format);
+
+	if (tdm_priv->bck_rate > tdm_priv->mclk_rate)
+		dev_warn(afe->dev, "%s(), bck_rate > mclk_rate rate", __func__);
+
+	if (tdm_priv->mclk_rate % tdm_priv->bck_rate != 0)
+		dev_warn(afe->dev, "%s(), bck cannot generate", __func__);
+
+	dev_info(afe->dev, "%s(), id %d, rate %d, channels %d, format %d, mclk_rate %d, bck_rate %d\n",
+		 __func__,
+		 tdm_id, rate, channels, format,
+		 tdm_priv->mclk_rate, tdm_priv->bck_rate);
+
+	/* set tdm */
+	tdm_con = 1 << BCK_INVERSE_SFT;
+	tdm_con |= 1 << LRCK_INVERSE_SFT;
+	tdm_con |= 1 << DELAY_DATA_SFT;
+	tdm_con |= 1 << LEFT_ALIGN_SFT;
+	tdm_con |= get_tdm_wlen(format) << WLEN_SFT;
+	tdm_con |= get_tdm_ch(channels) << CHANNEL_NUM_SFT;
+	tdm_con |= get_tdm_channel_bck(format) << CHANNEL_BCK_CYCLES_SFT;
+	tdm_con |= get_tdm_lrck_width(format) << LRCK_TDM_WIDTH_SFT;
+	regmap_write(afe->regmap, AFE_TDM_CON1, tdm_con);
+
+	switch (channels) {
+	case 1:
+	case 2:
+		tdm_con = TDM_CH_START_O30_O31 << ST_CH_PAIR_SOUT0_SFT;
+		tdm_con |= TDM_CH_ZERO << ST_CH_PAIR_SOUT1_SFT;
+		tdm_con |= TDM_CH_ZERO << ST_CH_PAIR_SOUT2_SFT;
+		tdm_con |= TDM_CH_ZERO << ST_CH_PAIR_SOUT3_SFT;
+		break;
+	case 3:
+	case 4:
+		tdm_con = TDM_CH_START_O30_O31 << ST_CH_PAIR_SOUT0_SFT;
+		tdm_con |= TDM_CH_START_O32_O33 << ST_CH_PAIR_SOUT1_SFT;
+		tdm_con |= TDM_CH_ZERO << ST_CH_PAIR_SOUT2_SFT;
+		tdm_con |= TDM_CH_ZERO << ST_CH_PAIR_SOUT3_SFT;
+		break;
+	case 5:
+	case 6:
+		tdm_con = TDM_CH_START_O30_O31 << ST_CH_PAIR_SOUT0_SFT;
+		tdm_con |= TDM_CH_START_O32_O33 << ST_CH_PAIR_SOUT1_SFT;
+		tdm_con |= TDM_CH_START_O34_O35 << ST_CH_PAIR_SOUT2_SFT;
+		tdm_con |= TDM_CH_ZERO << ST_CH_PAIR_SOUT3_SFT;
+		break;
+	case 7:
+	case 8:
+		tdm_con = TDM_CH_START_O30_O31 << ST_CH_PAIR_SOUT0_SFT;
+		tdm_con |= TDM_CH_START_O32_O33 << ST_CH_PAIR_SOUT1_SFT;
+		tdm_con |= TDM_CH_START_O34_O35 << ST_CH_PAIR_SOUT2_SFT;
+		tdm_con |= TDM_CH_START_O36_O37 << ST_CH_PAIR_SOUT3_SFT;
+		break;
+	default:
+		tdm_con = 0;
+	}
+	regmap_write(afe->regmap, AFE_TDM_CON2, tdm_con);
+
+	regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0,
+			   AFE_HDMI_OUT_CH_NUM_MASK_SFT,
+			   channels << AFE_HDMI_OUT_CH_NUM_SFT);
+
+	regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0,
+			   AFE_HDMI_OUT_BIT_WIDTH_MASK_SFT,
+			   get_hdmi_wlen(format) << AFE_HDMI_OUT_BIT_WIDTH_SFT);
+	return 0;
+}
+
+static int mtk_dai_tdm_trigger(struct snd_pcm_substream *substream,
+			       int cmd,
+			       struct snd_soc_dai *dai)
+{
+	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		/* enable Out control */
+		regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0,
+				   AFE_HDMI_OUT_ON_MASK_SFT,
+				   0x1 << AFE_HDMI_OUT_ON_SFT);
+		/* enable tdm */
+		regmap_update_bits(afe->regmap, AFE_TDM_CON1,
+				   TDM_EN_MASK_SFT, 0x1 << TDM_EN_SFT);
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		/* disable tdm */
+		regmap_update_bits(afe->regmap, AFE_TDM_CON1,
+				   TDM_EN_MASK_SFT, 0);
+		/* disable Out control */
+		regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0,
+				   AFE_HDMI_OUT_ON_MASK_SFT,
+				   0);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mtk_dai_tdm_set_sysclk(struct snd_soc_dai *dai,
+				  int clk_id, unsigned int freq, int dir)
+{
+	struct mtk_base_afe *afe = dev_get_drvdata(dai->dev);
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai->id];
+
+	if (!tdm_priv) {
+		dev_warn(afe->dev, "%s(), tdm_priv == NULL", __func__);
+		return -EINVAL;
+	}
+
+	if (dir != SND_SOC_CLOCK_OUT) {
+		dev_warn(afe->dev, "%s(), dir != SND_SOC_CLOCK_OUT", __func__);
+		return -EINVAL;
+	}
+
+	dev_info(afe->dev, "%s(), freq %d\n", __func__, freq);
+
+	return mtk_dai_tdm_cal_mclk(afe, tdm_priv, freq);
+}
+
+static const struct snd_soc_dai_ops mtk_dai_tdm_ops = {
+	.hw_params = mtk_dai_tdm_hw_params,
+	.trigger = mtk_dai_tdm_trigger,
+	.set_sysclk = mtk_dai_tdm_set_sysclk,
+};
+
+/* dai driver */
+#define MTK_TDM_RATES (SNDRV_PCM_RATE_8000_48000 |\
+		       SNDRV_PCM_RATE_88200 |\
+		       SNDRV_PCM_RATE_96000 |\
+		       SNDRV_PCM_RATE_176400 |\
+		       SNDRV_PCM_RATE_192000)
+
+#define MTK_TDM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+			 SNDRV_PCM_FMTBIT_S24_LE |\
+			 SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mtk_dai_tdm_driver[] = {
+	{
+		.name = "TDM",
+		.id = MT8183_DAI_TDM,
+		.playback = {
+			.stream_name = "TDM",
+			.channels_min = 2,
+			.channels_max = 8,
+			.rates = MTK_TDM_RATES,
+			.formats = MTK_TDM_FORMATS,
+		},
+		.ops = &mtk_dai_tdm_ops,
+	},
+};
+
+int mt8183_dai_tdm_register(struct mtk_base_afe *afe)
+{
+	struct mt8183_afe_private *afe_priv = afe->platform_priv;
+	struct mtk_afe_tdm_priv *tdm_priv;
+	struct mtk_base_afe_dai *dai;
+
+	dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+	if (!dai)
+		return -ENOMEM;
+
+	list_add(&dai->list, &afe->sub_dais);
+
+	dai->dai_drivers = mtk_dai_tdm_driver;
+	dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_tdm_driver);
+
+	dai->dapm_widgets = mtk_dai_tdm_widgets;
+	dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_tdm_widgets);
+	dai->dapm_routes = mtk_dai_tdm_routes;
+	dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_tdm_routes);
+
+	tdm_priv = devm_kzalloc(afe->dev, sizeof(struct mtk_afe_tdm_priv),
+				GFP_KERNEL);
+	if (!tdm_priv)
+		return -ENOMEM;
+
+	tdm_priv->mclk_multiple = 128;
+	tdm_priv->bck_id = MT8183_I2S4_BCK;
+	tdm_priv->mclk_id = MT8183_I2S4_MCK;
+
+	afe_priv->dai_priv[MT8183_DAI_TDM] = tdm_priv;
+	return 0;
+}
diff --git a/sound/soc/mediatek/mt8183/mt8183-interconnection.h b/sound/soc/mediatek/mt8183/mt8183-interconnection.h
new file mode 100644
index 0000000..6332f5f
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-interconnection.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Mediatek MT8183 audio driver interconnection definition
+ *
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+ */
+
+#ifndef _MT8183_INTERCONNECTION_H_
+#define _MT8183_INTERCONNECTION_H_
+
+#define I_I2S0_CH1 0
+#define I_I2S0_CH2 1
+#define I_ADDA_UL_CH1 3
+#define I_ADDA_UL_CH2 4
+#define I_DL1_CH1 5
+#define I_DL1_CH2 6
+#define I_DL2_CH1 7
+#define I_DL2_CH2 8
+#define I_PCM_1_CAP_CH1 9
+#define I_GAIN1_OUT_CH1 10
+#define I_GAIN1_OUT_CH2 11
+#define I_GAIN2_OUT_CH1 12
+#define I_GAIN2_OUT_CH2 13
+#define I_PCM_2_CAP_CH1 14
+#define I_PCM_2_CAP_CH2 21
+#define I_PCM_1_CAP_CH2 22
+#define I_DL3_CH1 23
+#define I_DL3_CH2 24
+#define I_I2S2_CH1 25
+#define I_I2S2_CH2 26
+
+#endif
diff --git a/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c
new file mode 100644
index 0000000..730da81
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mt8183-mt6358.c  --
+//	MT8183-MT6358-TS3A227-MAX98357 ALSA SoC machine driver
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Shunli Wang <shunli.wang@mediatek.com>
+
+#include <linux/module.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "mt8183-afe-common.h"
+#include "../../codecs/ts3a227e.h"
+
+static struct snd_soc_jack headset_jack;
+
+/* Headset jack detection DAPM pins */
+static struct snd_soc_jack_pin headset_jack_pins[] = {
+	{
+		.pin = "Headphone",
+		.mask = SND_JACK_HEADPHONE,
+	},
+	{
+		.pin = "Headset Mic",
+		.mask = SND_JACK_MICROPHONE,
+	},
+
+};
+
+static int mt8183_mt6358_i2s_hw_params(struct snd_pcm_substream *substream,
+				       struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	unsigned int rate = params_rate(params);
+	unsigned int mclk_fs_ratio = 128;
+	unsigned int mclk_fs = rate * mclk_fs_ratio;
+
+	return snd_soc_dai_set_sysclk(rtd->cpu_dai,
+				      0, mclk_fs, SND_SOC_CLOCK_OUT);
+}
+
+static const struct snd_soc_ops mt8183_mt6358_i2s_ops = {
+	.hw_params = mt8183_mt6358_i2s_hw_params,
+};
+
+static int mt8183_i2s_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+				      struct snd_pcm_hw_params *params)
+{
+	dev_dbg(rtd->dev, "%s(), fix format to 32bit\n", __func__);
+
+	/* fix BE i2s format to 32bit, clean param mask first */
+	snd_mask_reset_range(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT),
+			     0, SNDRV_PCM_FORMAT_LAST);
+
+	params_set_format(params, SNDRV_PCM_FORMAT_S32_LE);
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget
+mt8183_mt6358_ts3a227_max98357_dapm_widgets[] = {
+	SND_SOC_DAPM_OUTPUT("IT6505_8CH"),
+};
+
+static const struct snd_soc_dapm_route
+mt8183_mt6358_ts3a227_max98357_dapm_routes[] = {
+	{"IT6505_8CH", NULL, "TDM"},
+};
+
+static int
+mt8183_mt6358_ts3a227_max98357_bt_sco_startup(
+	struct snd_pcm_substream *substream)
+{
+	static const unsigned int rates[] = {
+		8000, 16000
+	};
+	static const struct snd_pcm_hw_constraint_list constraints_rates = {
+		.count = ARRAY_SIZE(rates),
+		.list  = rates,
+		.mask = 0,
+	};
+	static const unsigned int channels[] = {
+		1,
+	};
+	static const struct snd_pcm_hw_constraint_list constraints_channels = {
+		.count = ARRAY_SIZE(channels),
+		.list = channels,
+		.mask = 0,
+	};
+
+	struct snd_pcm_runtime *runtime = substream->runtime;
+
+	snd_pcm_hw_constraint_list(runtime, 0,
+			SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+	runtime->hw.channels_max = 1;
+	snd_pcm_hw_constraint_list(runtime, 0,
+			SNDRV_PCM_HW_PARAM_CHANNELS,
+			&constraints_channels);
+
+	runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+	snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
+
+	return 0;
+}
+
+static const struct snd_soc_ops mt8183_mt6358_ts3a227_max98357_bt_sco_ops = {
+	.startup = mt8183_mt6358_ts3a227_max98357_bt_sco_startup,
+};
+
+static struct snd_soc_dai_link
+mt8183_mt6358_ts3a227_max98357_dai_links[] = {
+	/* FE */
+	{
+		.name = "Playback_1",
+		.stream_name = "Playback_1",
+		.cpu_dai_name = "DL1",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_playback = 1,
+	},
+	{
+		.name = "Playback_2",
+		.stream_name = "Playback_2",
+		.cpu_dai_name = "DL2",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.ops = &mt8183_mt6358_ts3a227_max98357_bt_sco_ops,
+	},
+	{
+		.name = "Playback_3",
+		.stream_name = "Playback_3",
+		.cpu_dai_name = "DL3",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_playback = 1,
+	},
+	{
+		.name = "Capture_1",
+		.stream_name = "Capture_1",
+		.cpu_dai_name = "UL1",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_capture = 1,
+		.ops = &mt8183_mt6358_ts3a227_max98357_bt_sco_ops,
+	},
+	{
+		.name = "Capture_2",
+		.stream_name = "Capture_2",
+		.cpu_dai_name = "UL2",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "Capture_3",
+		.stream_name = "Capture_3",
+		.cpu_dai_name = "UL3",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "Capture_Mono_1",
+		.stream_name = "Capture_Mono_1",
+		.cpu_dai_name = "UL_MONO_1",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "Playback_HDMI",
+		.stream_name = "Playback_HDMI",
+		.cpu_dai_name = "HDMI",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_playback = 1,
+	},
+	/* BE */
+	{
+		.name = "Primary Codec",
+		.cpu_dai_name = "ADDA",
+		.codec_dai_name = "mt6358-snd-codec-aif1",
+		.codec_name = "mt6358-sound",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = "PCM 1",
+		.cpu_dai_name = "PCM 1",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = "PCM 2",
+		.cpu_dai_name = "PCM 2",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = "I2S0",
+		.cpu_dai_name = "I2S0",
+		.codec_dai_name = "bt-sco-pcm",
+		.codec_name = "bt-sco",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.ignore_suspend = 1,
+		.be_hw_params_fixup = mt8183_i2s_hw_params_fixup,
+		.ops = &mt8183_mt6358_i2s_ops,
+	},
+	{
+		.name = "I2S1",
+		.cpu_dai_name = "I2S1",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.ignore_suspend = 1,
+		.be_hw_params_fixup = mt8183_i2s_hw_params_fixup,
+		.ops = &mt8183_mt6358_i2s_ops,
+	},
+	{
+		.name = "I2S2",
+		.cpu_dai_name = "I2S2",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.ignore_suspend = 1,
+		.be_hw_params_fixup = mt8183_i2s_hw_params_fixup,
+		.ops = &mt8183_mt6358_i2s_ops,
+	},
+	{
+		.name = "I2S3",
+		.cpu_dai_name = "I2S3",
+		.codec_dai_name = "HiFi",
+		.codec_name = "max98357a",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.ignore_suspend = 1,
+		.be_hw_params_fixup = mt8183_i2s_hw_params_fixup,
+		.ops = &mt8183_mt6358_i2s_ops,
+	},
+	{
+		.name = "I2S5",
+		.cpu_dai_name = "I2S5",
+		.codec_dai_name = "bt-sco-pcm",
+		.codec_name = "bt-sco",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.ignore_suspend = 1,
+		.be_hw_params_fixup = mt8183_i2s_hw_params_fixup,
+		.ops = &mt8183_mt6358_i2s_ops,
+	},
+	{
+		.name = "TDM",
+		.cpu_dai_name = "TDM",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.ignore_suspend = 1,
+	},
+};
+
+static int
+mt8183_mt6358_ts3a227_max98357_headset_init(struct snd_soc_component *cpnt);
+
+static struct snd_soc_aux_dev mt8183_mt6358_ts3a227_max98357_headset_dev = {
+	.name = "Headset Chip",
+	.init = mt8183_mt6358_ts3a227_max98357_headset_init,
+};
+
+static struct snd_soc_card mt8183_mt6358_ts3a227_max98357_card = {
+	.name = "mt8183_mt6358_ts3a227_max98357",
+	.owner = THIS_MODULE,
+	.dai_link = mt8183_mt6358_ts3a227_max98357_dai_links,
+	.num_links = ARRAY_SIZE(mt8183_mt6358_ts3a227_max98357_dai_links),
+	.aux_dev = &mt8183_mt6358_ts3a227_max98357_headset_dev,
+	.num_aux_devs = 1,
+};
+
+static int
+mt8183_mt6358_ts3a227_max98357_headset_init(struct snd_soc_component *component)
+{
+	int ret;
+
+	/* Enable Headset and 4 Buttons Jack detection */
+	ret = snd_soc_card_jack_new(&mt8183_mt6358_ts3a227_max98357_card,
+				    "Headset Jack",
+				    SND_JACK_HEADSET |
+				    SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+				    SND_JACK_BTN_2 | SND_JACK_BTN_3,
+				    &headset_jack,
+				    headset_jack_pins,
+				    ARRAY_SIZE(headset_jack_pins));
+	if (ret)
+		return ret;
+
+	ret = ts3a227e_enable_jack_detect(component, &headset_jack);
+
+	return ret;
+}
+
+static int
+mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = &mt8183_mt6358_ts3a227_max98357_card;
+	struct device_node *platform_node;
+	struct snd_soc_dai_link *dai_link;
+	struct pinctrl *default_pins;
+	int ret, i;
+
+	card->dev = &pdev->dev;
+
+	platform_node = of_parse_phandle(pdev->dev.of_node,
+					 "mediatek,platform", 0);
+	if (!platform_node) {
+		dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
+		return -EINVAL;
+	}
+
+	for_each_card_prelinks(card, i, dai_link) {
+		if (dai_link->platform_name)
+			continue;
+		dai_link->platform_of_node = platform_node;
+	}
+
+	mt8183_mt6358_ts3a227_max98357_headset_dev.codec_of_node =
+		of_parse_phandle(pdev->dev.of_node,
+				 "mediatek,headset-codec", 0);
+	if (!mt8183_mt6358_ts3a227_max98357_headset_dev.codec_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'mediatek,headset-codec' missing/invalid\n");
+		return -EINVAL;
+	}
+
+	ret = devm_snd_soc_register_card(&pdev->dev, card);
+	if (ret)
+		dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
+			__func__, ret);
+
+	default_pins =
+		devm_pinctrl_get_select(&pdev->dev, PINCTRL_STATE_DEFAULT);
+	if (IS_ERR(default_pins)) {
+		dev_err(&pdev->dev, "%s set pins failed\n",
+			__func__);
+		return PTR_ERR(default_pins);
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mt8183_mt6358_ts3a227_max98357_dt_match[] = {
+	{.compatible = "mediatek,mt8183_mt6358_ts3a227_max98357",},
+	{}
+};
+#endif
+
+static struct platform_driver mt8183_mt6358_ts3a227_max98357_driver = {
+	.driver = {
+		.name = "mt8183_mt6358_ts3a227_max98357",
+#ifdef CONFIG_OF
+		.of_match_table = mt8183_mt6358_ts3a227_max98357_dt_match,
+#endif
+	},
+	.probe = mt8183_mt6358_ts3a227_max98357_dev_probe,
+};
+
+module_platform_driver(mt8183_mt6358_ts3a227_max98357_driver);
+
+/* Module information */
+MODULE_DESCRIPTION("MT8183-MT6358-TS3A227-MAX98357 ALSA SoC machine driver");
+MODULE_AUTHOR("Shunli Wang <shunli.wang@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("mt8183_mt6358_ts3a227_max98357 soc card");
+
diff --git a/sound/soc/mediatek/mt8183/mt8183-mt6358.c b/sound/soc/mediatek/mt8183/mt8183-mt6358.c
new file mode 100644
index 0000000..ad9c789
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-mt6358.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mt8183-mt6358.c
+//	--  MT8183-MT6358 ALSA SoC machine driver
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+
+#include <linux/module.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "mt8183-afe-common.h"
+
+static int mt8183_mt6358_i2s_hw_params(struct snd_pcm_substream *substream,
+				       struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	unsigned int rate = params_rate(params);
+	unsigned int mclk_fs_ratio = 128;
+	unsigned int mclk_fs = rate * mclk_fs_ratio;
+
+	return snd_soc_dai_set_sysclk(rtd->cpu_dai,
+				      0, mclk_fs, SND_SOC_CLOCK_OUT);
+}
+
+static const struct snd_soc_ops mt8183_mt6358_i2s_ops = {
+	.hw_params = mt8183_mt6358_i2s_hw_params,
+};
+
+static struct snd_soc_dai_link mt8183_mt6358_dai_links[] = {
+	/* FE */
+	{
+		.name = "Playback_1",
+		.stream_name = "Playback_1",
+		.cpu_dai_name = "DL1",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_playback = 1,
+	},
+	{
+		.name = "Playback_2",
+		.stream_name = "Playback_2",
+		.cpu_dai_name = "DL2",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_playback = 1,
+	},
+	{
+		.name = "Playback_3",
+		.stream_name = "Playback_3",
+		.cpu_dai_name = "DL3",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_playback = 1,
+	},
+	{
+		.name = "Capture_1",
+		.stream_name = "Capture_1",
+		.cpu_dai_name = "UL1",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "Capture_2",
+		.stream_name = "Capture_2",
+		.cpu_dai_name = "UL2",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "Capture_3",
+		.stream_name = "Capture_3",
+		.cpu_dai_name = "UL3",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "Capture_Mono_1",
+		.stream_name = "Capture_Mono_1",
+		.cpu_dai_name = "UL_MONO_1",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+			    SND_SOC_DPCM_TRIGGER_PRE},
+		.dynamic = 1,
+		.dpcm_capture = 1,
+	},
+	/* BE */
+	{
+		.name = "Primary Codec",
+		.cpu_dai_name = "ADDA",
+		.codec_dai_name = "mt6358-snd-codec-aif1",
+		.codec_name = "mt6358-sound",
+		.no_pcm = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.ignore_suspend = 1,
+	},
+};
+
+static struct snd_soc_card mt8183_mt6358_card = {
+	.name = "mt8183-mt6358",
+	.owner = THIS_MODULE,
+	.dai_link = mt8183_mt6358_dai_links,
+	.num_links = ARRAY_SIZE(mt8183_mt6358_dai_links),
+};
+
+static int mt8183_mt6358_dev_probe(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = &mt8183_mt6358_card;
+	struct device_node *platform_node;
+	struct snd_soc_dai_link *dai_link;
+	struct pinctrl *default_pins;
+	int ret, i;
+
+	card->dev = &pdev->dev;
+
+	platform_node = of_parse_phandle(pdev->dev.of_node,
+					 "mediatek,platform", 0);
+	if (!platform_node) {
+		dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
+		return -EINVAL;
+	}
+
+	for_each_card_prelinks(card, i, dai_link) {
+//		dai_link->platform = NULL;
+
+		if (dai_link->platform_name)
+			continue;
+		dai_link->platform_of_node = platform_node;
+	}
+
+	ret = devm_snd_soc_register_card(&pdev->dev, card);
+	if (ret) {
+		dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	default_pins =
+		devm_pinctrl_get_select(&pdev->dev, PINCTRL_STATE_DEFAULT);
+	if (IS_ERR(default_pins)) {
+		dev_err(&pdev->dev, "%s set pins failed\n",
+			__func__);
+		return PTR_ERR(default_pins);
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mt8183_mt6358_dt_match[] = {
+	{.compatible = "mediatek,mt8183-mt6358-sound",},
+	{}
+};
+#endif
+
+static struct platform_driver mt8183_mt6358_driver = {
+	.driver = {
+		.name = "mt8183-mt6358",
+		.owner = THIS_MODULE,
+#ifdef CONFIG_OF
+		.of_match_table = mt8183_mt6358_dt_match,
+#endif
+	},
+	.probe = mt8183_mt6358_dev_probe,
+};
+
+module_platform_driver(mt8183_mt6358_driver);
+
+/* Module information */
+MODULE_DESCRIPTION("MT8183-MT6358 ALSA SoC machine driver");
+MODULE_AUTHOR("KaiChieh Chuang <kaichieh.chuang@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("mt8183_mt6358 sound card");
+
diff --git a/sound/soc/mediatek/mt8183/mt8183-reg.h b/sound/soc/mediatek/mt8183/mt8183-reg.h
new file mode 100644
index 0000000..e0482f2
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-reg.h
@@ -0,0 +1,1666 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * mt8183-reg.h  --  Mediatek 8183 audio driver reg definition
+ *
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+ */
+
+#ifndef _MT8183_REG_H_
+#define _MT8183_REG_H_
+
+#define AUDIO_TOP_CON0              0x0000
+#define AUDIO_TOP_CON1              0x0004
+#define AUDIO_TOP_CON3              0x000c
+#define AFE_DAC_CON0                0x0010
+#define AFE_DAC_CON1                0x0014
+#define AFE_I2S_CON                 0x0018
+#define AFE_DAIBT_CON0              0x001c
+#define AFE_CONN0                   0x0020
+#define AFE_CONN1                   0x0024
+#define AFE_CONN2                   0x0028
+#define AFE_CONN3                   0x002c
+#define AFE_CONN4                   0x0030
+#define AFE_I2S_CON1                0x0034
+#define AFE_I2S_CON2                0x0038
+#define AFE_MRGIF_CON               0x003c
+#define AFE_DL1_BASE                0x0040
+#define AFE_DL1_CUR                 0x0044
+#define AFE_DL1_END                 0x0048
+#define AFE_I2S_CON3                0x004c
+#define AFE_DL2_BASE                0x0050
+#define AFE_DL2_CUR                 0x0054
+#define AFE_DL2_END                 0x0058
+#define AFE_CONN5                   0x005c
+#define AFE_CONN_24BIT              0x006c
+#define AFE_AWB_BASE                0x0070
+#define AFE_AWB_END                 0x0078
+#define AFE_AWB_CUR                 0x007c
+#define AFE_VUL_BASE                0x0080
+#define AFE_VUL_END                 0x0088
+#define AFE_VUL_CUR                 0x008c
+#define AFE_CONN6                   0x00bc
+#define AFE_MEMIF_MSB               0x00cc
+#define AFE_MEMIF_MON0              0x00d0
+#define AFE_MEMIF_MON1              0x00d4
+#define AFE_MEMIF_MON2              0x00d8
+#define AFE_MEMIF_MON3              0x00dc
+#define AFE_MEMIF_MON4              0x00e0
+#define AFE_MEMIF_MON5              0x00e4
+#define AFE_MEMIF_MON6              0x00e8
+#define AFE_MEMIF_MON7              0x00ec
+#define AFE_MEMIF_MON8              0x00f0
+#define AFE_MEMIF_MON9              0x00f4
+#define AFE_ADDA_DL_SRC2_CON0       0x0108
+#define AFE_ADDA_DL_SRC2_CON1       0x010c
+#define AFE_ADDA_UL_SRC_CON0        0x0114
+#define AFE_ADDA_UL_SRC_CON1        0x0118
+#define AFE_ADDA_TOP_CON0           0x0120
+#define AFE_ADDA_UL_DL_CON0         0x0124
+#define AFE_ADDA_SRC_DEBUG          0x012c
+#define AFE_ADDA_SRC_DEBUG_MON0     0x0130
+#define AFE_ADDA_SRC_DEBUG_MON1     0x0134
+#define AFE_ADDA_UL_SRC_MON0        0x0148
+#define AFE_ADDA_UL_SRC_MON1        0x014c
+#define AFE_SIDETONE_DEBUG          0x01d0
+#define AFE_SIDETONE_MON            0x01d4
+#define AFE_SINEGEN_CON2            0x01dc
+#define AFE_SIDETONE_CON0           0x01e0
+#define AFE_SIDETONE_COEFF          0x01e4
+#define AFE_SIDETONE_CON1           0x01e8
+#define AFE_SIDETONE_GAIN           0x01ec
+#define AFE_SINEGEN_CON0            0x01f0
+#define AFE_TOP_CON0                0x0200
+#define AFE_BUS_CFG                 0x0240
+#define AFE_BUS_MON0                0x0244
+#define AFE_ADDA_PREDIS_CON0        0x0260
+#define AFE_ADDA_PREDIS_CON1        0x0264
+#define AFE_MRGIF_MON0              0x0270
+#define AFE_MRGIF_MON1              0x0274
+#define AFE_MRGIF_MON2              0x0278
+#define AFE_I2S_MON                 0x027c
+#define AFE_ADDA_IIR_COEF_02_01     0x0290
+#define AFE_ADDA_IIR_COEF_04_03     0x0294
+#define AFE_ADDA_IIR_COEF_06_05     0x0298
+#define AFE_ADDA_IIR_COEF_08_07     0x029c
+#define AFE_ADDA_IIR_COEF_10_09     0x02a0
+#define AFE_DAC_CON2                0x02e0
+#define AFE_IRQ_MCU_CON1            0x02e4
+#define AFE_IRQ_MCU_CON2            0x02e8
+#define AFE_DAC_MON                 0x02ec
+#define AFE_VUL2_BASE               0x02f0
+#define AFE_VUL2_END                0x02f8
+#define AFE_VUL2_CUR                0x02fc
+#define AFE_IRQ_MCU_CNT0            0x0300
+#define AFE_IRQ_MCU_CNT6            0x0304
+#define AFE_IRQ_MCU_CNT8            0x0308
+#define AFE_IRQ_MCU_EN1             0x030c
+#define AFE_IRQ0_MCU_CNT_MON        0x0310
+#define AFE_IRQ6_MCU_CNT_MON        0x0314
+#define AFE_MOD_DAI_BASE            0x0330
+#define AFE_MOD_DAI_END             0x0338
+#define AFE_MOD_DAI_CUR             0x033c
+#define AFE_VUL_D2_BASE             0x0350
+#define AFE_VUL_D2_END              0x0358
+#define AFE_VUL_D2_CUR              0x035c
+#define AFE_DL3_BASE                0x0360
+#define AFE_DL3_CUR                 0x0364
+#define AFE_DL3_END                 0x0368
+#define AFE_HDMI_OUT_CON0           0x0370
+#define AFE_HDMI_OUT_BASE           0x0374
+#define AFE_HDMI_OUT_CUR            0x0378
+#define AFE_HDMI_OUT_END            0x037c
+#define AFE_HDMI_CONN0              0x0390
+#define AFE_IRQ3_MCU_CNT_MON        0x0398
+#define AFE_IRQ4_MCU_CNT_MON        0x039c
+#define AFE_IRQ_MCU_CON0            0x03a0
+#define AFE_IRQ_MCU_STATUS          0x03a4
+#define AFE_IRQ_MCU_CLR             0x03a8
+#define AFE_IRQ_MCU_CNT1            0x03ac
+#define AFE_IRQ_MCU_CNT2            0x03b0
+#define AFE_IRQ_MCU_EN              0x03b4
+#define AFE_IRQ_MCU_MON2            0x03b8
+#define AFE_IRQ_MCU_CNT5            0x03bc
+#define AFE_IRQ1_MCU_CNT_MON        0x03c0
+#define AFE_IRQ2_MCU_CNT_MON        0x03c4
+#define AFE_IRQ1_MCU_EN_CNT_MON     0x03c8
+#define AFE_IRQ5_MCU_CNT_MON        0x03cc
+#define AFE_MEMIF_MINLEN            0x03d0
+#define AFE_MEMIF_MAXLEN            0x03d4
+#define AFE_MEMIF_PBUF_SIZE         0x03d8
+#define AFE_IRQ_MCU_CNT7            0x03dc
+#define AFE_IRQ7_MCU_CNT_MON        0x03e0
+#define AFE_IRQ_MCU_CNT3            0x03e4
+#define AFE_IRQ_MCU_CNT4            0x03e8
+#define AFE_IRQ_MCU_CNT11           0x03ec
+#define AFE_APLL1_TUNER_CFG         0x03f0
+#define AFE_APLL2_TUNER_CFG         0x03f4
+#define AFE_MEMIF_HD_MODE           0x03f8
+#define AFE_MEMIF_HDALIGN           0x03fc
+#define AFE_CONN33                  0x0408
+#define AFE_IRQ_MCU_CNT12           0x040c
+#define AFE_GAIN1_CON0              0x0410
+#define AFE_GAIN1_CON1              0x0414
+#define AFE_GAIN1_CON2              0x0418
+#define AFE_GAIN1_CON3              0x041c
+#define AFE_CONN7                   0x0420
+#define AFE_GAIN1_CUR               0x0424
+#define AFE_GAIN2_CON0              0x0428
+#define AFE_GAIN2_CON1              0x042c
+#define AFE_GAIN2_CON2              0x0430
+#define AFE_GAIN2_CON3              0x0434
+#define AFE_CONN8                   0x0438
+#define AFE_GAIN2_CUR               0x043c
+#define AFE_CONN9                   0x0440
+#define AFE_CONN10                  0x0444
+#define AFE_CONN11                  0x0448
+#define AFE_CONN12                  0x044c
+#define AFE_CONN13                  0x0450
+#define AFE_CONN14                  0x0454
+#define AFE_CONN15                  0x0458
+#define AFE_CONN16                  0x045c
+#define AFE_CONN17                  0x0460
+#define AFE_CONN18                  0x0464
+#define AFE_CONN19                  0x0468
+#define AFE_CONN20                  0x046c
+#define AFE_CONN21                  0x0470
+#define AFE_CONN22                  0x0474
+#define AFE_CONN23                  0x0478
+#define AFE_CONN24                  0x047c
+#define AFE_CONN_RS                 0x0494
+#define AFE_CONN_DI                 0x0498
+#define AFE_CONN25                  0x04b0
+#define AFE_CONN26                  0x04b4
+#define AFE_CONN27                  0x04b8
+#define AFE_CONN28                  0x04bc
+#define AFE_CONN29                  0x04c0
+#define AFE_CONN30                  0x04c4
+#define AFE_CONN31                  0x04c8
+#define AFE_CONN32                  0x04cc
+#define AFE_SRAM_DELSEL_CON0        0x04f0
+#define AFE_SRAM_DELSEL_CON2        0x04f8
+#define AFE_SRAM_DELSEL_CON3        0x04fc
+#define AFE_ASRC_2CH_CON12          0x0528
+#define AFE_ASRC_2CH_CON13          0x052c
+#define PCM_INTF_CON1               0x0530
+#define PCM_INTF_CON2               0x0538
+#define PCM2_INTF_CON               0x053c
+#define AFE_TDM_CON1                0x0548
+#define AFE_TDM_CON2                0x054c
+#define AFE_CONN34                  0x0580
+#define FPGA_CFG0                   0x05b0
+#define FPGA_CFG1                   0x05b4
+#define FPGA_CFG2                   0x05c0
+#define FPGA_CFG3                   0x05c4
+#define AUDIO_TOP_DBG_CON           0x05c8
+#define AUDIO_TOP_DBG_MON0          0x05cc
+#define AUDIO_TOP_DBG_MON1          0x05d0
+#define AFE_IRQ8_MCU_CNT_MON        0x05e4
+#define AFE_IRQ11_MCU_CNT_MON       0x05e8
+#define AFE_IRQ12_MCU_CNT_MON       0x05ec
+#define AFE_GENERAL_REG0            0x0800
+#define AFE_GENERAL_REG1            0x0804
+#define AFE_GENERAL_REG2            0x0808
+#define AFE_GENERAL_REG3            0x080c
+#define AFE_GENERAL_REG4            0x0810
+#define AFE_GENERAL_REG5            0x0814
+#define AFE_GENERAL_REG6            0x0818
+#define AFE_GENERAL_REG7            0x081c
+#define AFE_GENERAL_REG8            0x0820
+#define AFE_GENERAL_REG9            0x0824
+#define AFE_GENERAL_REG10           0x0828
+#define AFE_GENERAL_REG11           0x082c
+#define AFE_GENERAL_REG12           0x0830
+#define AFE_GENERAL_REG13           0x0834
+#define AFE_GENERAL_REG14           0x0838
+#define AFE_GENERAL_REG15           0x083c
+#define AFE_CBIP_CFG0               0x0840
+#define AFE_CBIP_MON0               0x0844
+#define AFE_CBIP_SLV_MUX_MON0       0x0848
+#define AFE_CBIP_SLV_DECODER_MON0   0x084c
+#define AFE_CONN0_1                 0x0900
+#define AFE_CONN1_1                 0x0904
+#define AFE_CONN2_1                 0x0908
+#define AFE_CONN3_1                 0x090c
+#define AFE_CONN4_1                 0x0910
+#define AFE_CONN5_1                 0x0914
+#define AFE_CONN6_1                 0x0918
+#define AFE_CONN7_1                 0x091c
+#define AFE_CONN8_1                 0x0920
+#define AFE_CONN9_1                 0x0924
+#define AFE_CONN10_1                0x0928
+#define AFE_CONN11_1                0x092c
+#define AFE_CONN12_1                0x0930
+#define AFE_CONN13_1                0x0934
+#define AFE_CONN14_1                0x0938
+#define AFE_CONN15_1                0x093c
+#define AFE_CONN16_1                0x0940
+#define AFE_CONN17_1                0x0944
+#define AFE_CONN18_1                0x0948
+#define AFE_CONN19_1                0x094c
+#define AFE_CONN20_1                0x0950
+#define AFE_CONN21_1                0x0954
+#define AFE_CONN22_1                0x0958
+#define AFE_CONN23_1                0x095c
+#define AFE_CONN24_1                0x0960
+#define AFE_CONN25_1                0x0964
+#define AFE_CONN26_1                0x0968
+#define AFE_CONN27_1                0x096c
+#define AFE_CONN28_1                0x0970
+#define AFE_CONN29_1                0x0974
+#define AFE_CONN30_1                0x0978
+#define AFE_CONN31_1                0x097c
+#define AFE_CONN32_1                0x0980
+#define AFE_CONN33_1                0x0984
+#define AFE_CONN34_1                0x0988
+#define AFE_CONN_RS_1               0x098c
+#define AFE_CONN_DI_1               0x0990
+#define AFE_CONN_24BIT_1            0x0994
+#define AFE_CONN_REG                0x0998
+#define AFE_CONN35                  0x09a0
+#define AFE_CONN36                  0x09a4
+#define AFE_CONN37                  0x09a8
+#define AFE_CONN38                  0x09ac
+#define AFE_CONN35_1                0x09b0
+#define AFE_CONN36_1                0x09b4
+#define AFE_CONN37_1                0x09b8
+#define AFE_CONN38_1                0x09bc
+#define AFE_CONN39                  0x09c0
+#define AFE_CONN40                  0x09c4
+#define AFE_CONN41                  0x09c8
+#define AFE_CONN42                  0x09cc
+#define AFE_CONN39_1                0x09e0
+#define AFE_CONN40_1                0x09e4
+#define AFE_CONN41_1                0x09e8
+#define AFE_CONN42_1                0x09ec
+#define AFE_I2S_CON4                0x09f8
+#define AFE_ADDA6_TOP_CON0          0x0a80
+#define AFE_ADDA6_UL_SRC_CON0       0x0a84
+#define AFE_ADD6_UL_SRC_CON1        0x0a88
+#define AFE_ADDA6_SRC_DEBUG         0x0a8c
+#define AFE_ADDA6_SRC_DEBUG_MON0    0x0a90
+#define AFE_ADDA6_ULCF_CFG_02_01    0x0aa0
+#define AFE_ADDA6_ULCF_CFG_04_03    0x0aa4
+#define AFE_ADDA6_ULCF_CFG_06_05    0x0aa8
+#define AFE_ADDA6_ULCF_CFG_08_07    0x0aac
+#define AFE_ADDA6_ULCF_CFG_10_09    0x0ab0
+#define AFE_ADDA6_ULCF_CFG_12_11    0x0ab4
+#define AFE_ADDA6_ULCF_CFG_14_13    0x0ab8
+#define AFE_ADDA6_ULCF_CFG_16_15    0x0abc
+#define AFE_ADDA6_ULCF_CFG_18_17    0x0ac0
+#define AFE_ADDA6_ULCF_CFG_20_19    0x0ac4
+#define AFE_ADDA6_ULCF_CFG_22_21    0x0ac8
+#define AFE_ADDA6_ULCF_CFG_24_23    0x0acc
+#define AFE_ADDA6_ULCF_CFG_26_25    0x0ad0
+#define AFE_ADDA6_ULCF_CFG_28_27    0x0ad4
+#define AFE_ADDA6_ULCF_CFG_30_29    0x0ad8
+#define AFE_ADD6A_UL_SRC_MON0       0x0ae4
+#define AFE_ADDA6_UL_SRC_MON1       0x0ae8
+#define AFE_CONN43                  0x0af8
+#define AFE_CONN43_1                0x0afc
+#define AFE_DL1_BASE_MSB            0x0b00
+#define AFE_DL1_CUR_MSB             0x0b04
+#define AFE_DL1_END_MSB             0x0b08
+#define AFE_DL2_BASE_MSB            0x0b10
+#define AFE_DL2_CUR_MSB             0x0b14
+#define AFE_DL2_END_MSB             0x0b18
+#define AFE_AWB_BASE_MSB            0x0b20
+#define AFE_AWB_END_MSB             0x0b28
+#define AFE_AWB_CUR_MSB             0x0b2c
+#define AFE_VUL_BASE_MSB            0x0b30
+#define AFE_VUL_END_MSB             0x0b38
+#define AFE_VUL_CUR_MSB             0x0b3c
+#define AFE_VUL2_BASE_MSB           0x0b50
+#define AFE_VUL2_END_MSB            0x0b58
+#define AFE_VUL2_CUR_MSB            0x0b5c
+#define AFE_MOD_DAI_BASE_MSB        0x0b60
+#define AFE_MOD_DAI_END_MSB         0x0b68
+#define AFE_MOD_DAI_CUR_MSB         0x0b6c
+#define AFE_VUL_D2_BASE_MSB         0x0b80
+#define AFE_VUL_D2_END_MSB          0x0b88
+#define AFE_VUL_D2_CUR_MSB          0x0b8c
+#define AFE_DL3_BASE_MSB            0x0b90
+#define AFE_DL3_CUR_MSB             0x0b94
+#define AFE_DL3_END_MSB             0x0b98
+#define AFE_HDMI_OUT_BASE_MSB       0x0ba4
+#define AFE_HDMI_OUT_CUR_MSB        0x0ba8
+#define AFE_HDMI_OUT_END_MSB        0x0bac
+#define AFE_AWB2_BASE               0x0bd0
+#define AFE_AWB2_END                0x0bd8
+#define AFE_AWB2_CUR                0x0bdc
+#define AFE_AWB2_BASE_MSB           0x0be0
+#define AFE_AWB2_END_MSB            0x0be8
+#define AFE_AWB2_CUR_MSB            0x0bec
+#define AFE_ADDA_DL_SDM_DCCOMP_CON  0x0c50
+#define AFE_ADDA_DL_SDM_TEST        0x0c54
+#define AFE_ADDA_DL_DC_COMP_CFG0    0x0c58
+#define AFE_ADDA_DL_DC_COMP_CFG1    0x0c5c
+#define AFE_ADDA_DL_SDM_FIFO_MON    0x0c60
+#define AFE_ADDA_DL_SRC_LCH_MON     0x0c64
+#define AFE_ADDA_DL_SRC_RCH_MON     0x0c68
+#define AFE_ADDA_DL_SDM_OUT_MON     0x0c6c
+#define AFE_CONNSYS_I2S_CON         0x0c78
+#define AFE_CONNSYS_I2S_MON         0x0c7c
+#define AFE_ASRC_2CH_CON0           0x0c80
+#define AFE_ASRC_2CH_CON1           0x0c84
+#define AFE_ASRC_2CH_CON2           0x0c88
+#define AFE_ASRC_2CH_CON3           0x0c8c
+#define AFE_ASRC_2CH_CON4           0x0c90
+#define AFE_ASRC_2CH_CON5           0x0c94
+#define AFE_ASRC_2CH_CON6           0x0c98
+#define AFE_ASRC_2CH_CON7           0x0c9c
+#define AFE_ASRC_2CH_CON8           0x0ca0
+#define AFE_ASRC_2CH_CON9           0x0ca4
+#define AFE_ASRC_2CH_CON10          0x0ca8
+#define AFE_ADDA6_IIR_COEF_02_01    0x0ce0
+#define AFE_ADDA6_IIR_COEF_04_03    0x0ce4
+#define AFE_ADDA6_IIR_COEF_06_05    0x0ce8
+#define AFE_ADDA6_IIR_COEF_08_07    0x0cec
+#define AFE_ADDA6_IIR_COEF_10_09    0x0cf0
+#define AFE_ADDA_PREDIS_CON2        0x0d40
+#define AFE_ADDA_PREDIS_CON3        0x0d44
+#define AFE_MEMIF_MON12             0x0d70
+#define AFE_MEMIF_MON13             0x0d74
+#define AFE_MEMIF_MON14             0x0d78
+#define AFE_MEMIF_MON15             0x0d7c
+#define AFE_MEMIF_MON16             0x0d80
+#define AFE_MEMIF_MON17             0x0d84
+#define AFE_MEMIF_MON18             0x0d88
+#define AFE_MEMIF_MON19             0x0d8c
+#define AFE_MEMIF_MON20             0x0d90
+#define AFE_MEMIF_MON21             0x0d94
+#define AFE_MEMIF_MON22             0x0d98
+#define AFE_MEMIF_MON23             0x0d9c
+#define AFE_MEMIF_MON24             0x0da0
+#define AFE_HD_ENGEN_ENABLE         0x0dd0
+#define AFE_ADDA_MTKAIF_CFG0        0x0e00
+#define AFE_ADDA_MTKAIF_TX_CFG1     0x0e14
+#define AFE_ADDA_MTKAIF_RX_CFG0     0x0e20
+#define AFE_ADDA_MTKAIF_RX_CFG1     0x0e24
+#define AFE_ADDA_MTKAIF_RX_CFG2     0x0e28
+#define AFE_ADDA_MTKAIF_MON0        0x0e34
+#define AFE_ADDA_MTKAIF_MON1        0x0e38
+#define AFE_AUD_PAD_TOP             0x0e40
+#define AFE_GENERAL1_ASRC_2CH_CON0  0x0e80
+#define AFE_GENERAL1_ASRC_2CH_CON1  0x0e84
+#define AFE_GENERAL1_ASRC_2CH_CON2  0x0e88
+#define AFE_GENERAL1_ASRC_2CH_CON3  0x0e8c
+#define AFE_GENERAL1_ASRC_2CH_CON4  0x0e90
+#define AFE_GENERAL1_ASRC_2CH_CON5  0x0e94
+#define AFE_GENERAL1_ASRC_2CH_CON6  0x0e98
+#define AFE_GENERAL1_ASRC_2CH_CON7  0x0e9c
+#define AFE_GENERAL1_ASRC_2CH_CON8  0x0ea0
+#define AFE_GENERAL1_ASRC_2CH_CON9  0x0ea4
+#define AFE_GENERAL1_ASRC_2CH_CON10 0x0ea8
+#define AFE_GENERAL1_ASRC_2CH_CON12 0x0eb0
+#define AFE_GENERAL1_ASRC_2CH_CON13 0x0eb4
+#define GENERAL_ASRC_MODE           0x0eb8
+#define GENERAL_ASRC_EN_ON          0x0ebc
+#define AFE_GENERAL2_ASRC_2CH_CON0  0x0f00
+#define AFE_GENERAL2_ASRC_2CH_CON1  0x0f04
+#define AFE_GENERAL2_ASRC_2CH_CON2  0x0f08
+#define AFE_GENERAL2_ASRC_2CH_CON3  0x0f0c
+#define AFE_GENERAL2_ASRC_2CH_CON4  0x0f10
+#define AFE_GENERAL2_ASRC_2CH_CON5  0x0f14
+#define AFE_GENERAL2_ASRC_2CH_CON6  0x0f18
+#define AFE_GENERAL2_ASRC_2CH_CON7  0x0f1c
+#define AFE_GENERAL2_ASRC_2CH_CON8  0x0f20
+#define AFE_GENERAL2_ASRC_2CH_CON9  0x0f24
+#define AFE_GENERAL2_ASRC_2CH_CON10 0x0f28
+#define AFE_GENERAL2_ASRC_2CH_CON12 0x0f30
+#define AFE_GENERAL2_ASRC_2CH_CON13 0x0f34
+
+#define AFE_MAX_REGISTER AFE_GENERAL2_ASRC_2CH_CON13
+#define AFE_IRQ_STATUS_BITS 0x1fff
+
+/* AFE_DAC_CON0 */
+#define AWB2_ON_SFT                                   29
+#define AWB2_ON_MASK                                  0x1
+#define AWB2_ON_MASK_SFT                              (0x1 << 29)
+#define VUL2_ON_SFT                                   27
+#define VUL2_ON_MASK                                  0x1
+#define VUL2_ON_MASK_SFT                              (0x1 << 27)
+#define MOD_DAI_DUP_WR_SFT                            26
+#define MOD_DAI_DUP_WR_MASK                           0x1
+#define MOD_DAI_DUP_WR_MASK_SFT                       (0x1 << 26)
+#define VUL12_MODE_SFT                                20
+#define VUL12_MODE_MASK                               0xf
+#define VUL12_MODE_MASK_SFT                           (0xf << 20)
+#define VUL12_R_MONO_SFT                              11
+#define VUL12_R_MONO_MASK                             0x1
+#define VUL12_R_MONO_MASK_SFT                         (0x1 << 11)
+#define VUL12_MONO_SFT                                10
+#define VUL12_MONO_MASK                               0x1
+#define VUL12_MONO_MASK_SFT                           (0x1 << 10)
+#define VUL12_ON_SFT                                  9
+#define VUL12_ON_MASK                                 0x1
+#define VUL12_ON_MASK_SFT                             (0x1 << 9)
+#define MOD_DAI_ON_SFT                                7
+#define MOD_DAI_ON_MASK                               0x1
+#define MOD_DAI_ON_MASK_SFT                           (0x1 << 7)
+#define AWB_ON_SFT                                    6
+#define AWB_ON_MASK                                   0x1
+#define AWB_ON_MASK_SFT                               (0x1 << 6)
+#define DL3_ON_SFT                                    5
+#define DL3_ON_MASK                                   0x1
+#define DL3_ON_MASK_SFT                               (0x1 << 5)
+#define VUL_ON_SFT                                    3
+#define VUL_ON_MASK                                   0x1
+#define VUL_ON_MASK_SFT                               (0x1 << 3)
+#define DL2_ON_SFT                                    2
+#define DL2_ON_MASK                                   0x1
+#define DL2_ON_MASK_SFT                               (0x1 << 2)
+#define DL1_ON_SFT                                    1
+#define DL1_ON_MASK                                   0x1
+#define DL1_ON_MASK_SFT                               (0x1 << 1)
+#define AFE_ON_SFT                                    0
+#define AFE_ON_MASK                                   0x1
+#define AFE_ON_MASK_SFT                               (0x1 << 0)
+
+/* AFE_DAC_CON1 */
+#define MOD_DAI_MODE_SFT                              30
+#define MOD_DAI_MODE_MASK                             0x3
+#define MOD_DAI_MODE_MASK_SFT                         (0x3 << 30)
+#define VUL_R_MONO_SFT                                28
+#define VUL_R_MONO_MASK                               0x1
+#define VUL_R_MONO_MASK_SFT                           (0x1 << 28)
+#define VUL_DATA_SFT                                  27
+#define VUL_DATA_MASK                                 0x1
+#define VUL_DATA_MASK_SFT                             (0x1 << 27)
+#define AWB_R_MONO_SFT                                25
+#define AWB_R_MONO_MASK                               0x1
+#define AWB_R_MONO_MASK_SFT                           (0x1 << 25)
+#define AWB_DATA_SFT                                  24
+#define AWB_DATA_MASK                                 0x1
+#define AWB_DATA_MASK_SFT                             (0x1 << 24)
+#define DL3_DATA_SFT                                  23
+#define DL3_DATA_MASK                                 0x1
+#define DL3_DATA_MASK_SFT                             (0x1 << 23)
+#define DL2_DATA_SFT                                  22
+#define DL2_DATA_MASK                                 0x1
+#define DL2_DATA_MASK_SFT                             (0x1 << 22)
+#define DL1_DATA_SFT                                  21
+#define DL1_DATA_MASK                                 0x1
+#define DL1_DATA_MASK_SFT                             (0x1 << 21)
+#define VUL_MODE_SFT                                  16
+#define VUL_MODE_MASK                                 0xf
+#define VUL_MODE_MASK_SFT                             (0xf << 16)
+#define AWB_MODE_SFT                                  12
+#define AWB_MODE_MASK                                 0xf
+#define AWB_MODE_MASK_SFT                             (0xf << 12)
+#define I2S_MODE_SFT                                  8
+#define I2S_MODE_MASK                                 0xf
+#define I2S_MODE_MASK_SFT                             (0xf << 8)
+#define DL2_MODE_SFT                                  4
+#define DL2_MODE_MASK                                 0xf
+#define DL2_MODE_MASK_SFT                             (0xf << 4)
+#define DL1_MODE_SFT                                  0
+#define DL1_MODE_MASK                                 0xf
+#define DL1_MODE_MASK_SFT                             (0xf << 0)
+
+/* AFE_DAC_CON2 */
+#define AWB2_R_MONO_SFT                               21
+#define AWB2_R_MONO_MASK                              0x1
+#define AWB2_R_MONO_MASK_SFT                          (0x1 << 21)
+#define AWB2_DATA_SFT                                 20
+#define AWB2_DATA_MASK                                0x1
+#define AWB2_DATA_MASK_SFT                            (0x1 << 20)
+#define AWB2_MODE_SFT                                 16
+#define AWB2_MODE_MASK                                0xf
+#define AWB2_MODE_MASK_SFT                            (0xf << 16)
+#define DL3_MODE_SFT                                  8
+#define DL3_MODE_MASK                                 0xf
+#define DL3_MODE_MASK_SFT                             (0xf << 8)
+#define VUL2_MODE_SFT                                 4
+#define VUL2_MODE_MASK                                0xf
+#define VUL2_MODE_MASK_SFT                            (0xf << 4)
+#define VUL2_R_MONO_SFT                               1
+#define VUL2_R_MONO_MASK                              0x1
+#define VUL2_R_MONO_MASK_SFT                          (0x1 << 1)
+#define VUL2_DATA_SFT                                 0
+#define VUL2_DATA_MASK                                0x1
+#define VUL2_DATA_MASK_SFT                            (0x1 << 0)
+
+/* AFE_DAC_MON */
+#define AFE_ON_RETM_SFT                               0
+#define AFE_ON_RETM_MASK                              0x1
+#define AFE_ON_RETM_MASK_SFT                          (0x1 << 0)
+
+/* AFE_I2S_CON */
+#define BCK_NEG_EG_LATCH_SFT                          30
+#define BCK_NEG_EG_LATCH_MASK                         0x1
+#define BCK_NEG_EG_LATCH_MASK_SFT                     (0x1 << 30)
+#define BCK_INV_SFT                                   29
+#define BCK_INV_MASK                                  0x1
+#define BCK_INV_MASK_SFT                              (0x1 << 29)
+#define I2SIN_PAD_SEL_SFT                             28
+#define I2SIN_PAD_SEL_MASK                            0x1
+#define I2SIN_PAD_SEL_MASK_SFT                        (0x1 << 28)
+#define I2S_LOOPBACK_SFT                              20
+#define I2S_LOOPBACK_MASK                             0x1
+#define I2S_LOOPBACK_MASK_SFT                         (0x1 << 20)
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_SFT             17
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK            0x1
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK_SFT        (0x1 << 17)
+#define I2S1_HD_EN_SFT                                12
+#define I2S1_HD_EN_MASK                               0x1
+#define I2S1_HD_EN_MASK_SFT                           (0x1 << 12)
+#define INV_PAD_CTRL_SFT                              7
+#define INV_PAD_CTRL_MASK                             0x1
+#define INV_PAD_CTRL_MASK_SFT                         (0x1 << 7)
+#define I2S_BYPSRC_SFT                                6
+#define I2S_BYPSRC_MASK                               0x1
+#define I2S_BYPSRC_MASK_SFT                           (0x1 << 6)
+#define INV_LRCK_SFT                                  5
+#define INV_LRCK_MASK                                 0x1
+#define INV_LRCK_MASK_SFT                             (0x1 << 5)
+#define I2S_FMT_SFT                                   3
+#define I2S_FMT_MASK                                  0x1
+#define I2S_FMT_MASK_SFT                              (0x1 << 3)
+#define I2S_SRC_SFT                                   2
+#define I2S_SRC_MASK                                  0x1
+#define I2S_SRC_MASK_SFT                              (0x1 << 2)
+#define I2S_WLEN_SFT                                  1
+#define I2S_WLEN_MASK                                 0x1
+#define I2S_WLEN_MASK_SFT                             (0x1 << 1)
+#define I2S_EN_SFT                                    0
+#define I2S_EN_MASK                                   0x1
+#define I2S_EN_MASK_SFT                               (0x1 << 0)
+
+/* AFE_I2S_CON1 */
+#define I2S2_LR_SWAP_SFT                              31
+#define I2S2_LR_SWAP_MASK                             0x1
+#define I2S2_LR_SWAP_MASK_SFT                         (0x1 << 31)
+#define I2S2_SEL_O19_O20_SFT                          18
+#define I2S2_SEL_O19_O20_MASK                         0x1
+#define I2S2_SEL_O19_O20_MASK_SFT                     (0x1 << 18)
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_SFT             17
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK            0x1
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK_SFT        (0x1 << 17)
+#define I2S2_SEL_O03_O04_SFT                          16
+#define I2S2_SEL_O03_O04_MASK                         0x1
+#define I2S2_SEL_O03_O04_MASK_SFT                     (0x1 << 16)
+#define I2S2_32BIT_EN_SFT                             13
+#define I2S2_32BIT_EN_MASK                            0x1
+#define I2S2_32BIT_EN_MASK_SFT                        (0x1 << 13)
+#define I2S2_HD_EN_SFT                                12
+#define I2S2_HD_EN_MASK                               0x1
+#define I2S2_HD_EN_MASK_SFT                           (0x1 << 12)
+#define I2S2_OUT_MODE_SFT                             8
+#define I2S2_OUT_MODE_MASK                            0xf
+#define I2S2_OUT_MODE_MASK_SFT                        (0xf << 8)
+#define INV_LRCK_SFT                                  5
+#define INV_LRCK_MASK                                 0x1
+#define INV_LRCK_MASK_SFT                             (0x1 << 5)
+#define I2S2_FMT_SFT                                  3
+#define I2S2_FMT_MASK                                 0x1
+#define I2S2_FMT_MASK_SFT                             (0x1 << 3)
+#define I2S2_WLEN_SFT                                 1
+#define I2S2_WLEN_MASK                                0x1
+#define I2S2_WLEN_MASK_SFT                            (0x1 << 1)
+#define I2S2_EN_SFT                                   0
+#define I2S2_EN_MASK                                  0x1
+#define I2S2_EN_MASK_SFT                              (0x1 << 0)
+
+/* AFE_I2S_CON2 */
+#define I2S3_LR_SWAP_SFT                              31
+#define I2S3_LR_SWAP_MASK                             0x1
+#define I2S3_LR_SWAP_MASK_SFT                         (0x1 << 31)
+#define I2S3_UPDATE_WORD_SFT                          24
+#define I2S3_UPDATE_WORD_MASK                         0x1f
+#define I2S3_UPDATE_WORD_MASK_SFT                     (0x1f << 24)
+#define I2S3_BCK_INV_SFT                              23
+#define I2S3_BCK_INV_MASK                             0x1
+#define I2S3_BCK_INV_MASK_SFT                         (0x1 << 23)
+#define I2S3_FPGA_BIT_TEST_SFT                        22
+#define I2S3_FPGA_BIT_TEST_MASK                       0x1
+#define I2S3_FPGA_BIT_TEST_MASK_SFT                   (0x1 << 22)
+#define I2S3_FPGA_BIT_SFT                             21
+#define I2S3_FPGA_BIT_MASK                            0x1
+#define I2S3_FPGA_BIT_MASK_SFT                        (0x1 << 21)
+#define I2S3_LOOPBACK_SFT                             20
+#define I2S3_LOOPBACK_MASK                            0x1
+#define I2S3_LOOPBACK_MASK_SFT                        (0x1 << 20)
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_SFT             17
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK            0x1
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK_SFT        (0x1 << 17)
+#define I2S3_HD_EN_SFT                                12
+#define I2S3_HD_EN_MASK                               0x1
+#define I2S3_HD_EN_MASK_SFT                           (0x1 << 12)
+#define I2S3_OUT_MODE_SFT                             8
+#define I2S3_OUT_MODE_MASK                            0xf
+#define I2S3_OUT_MODE_MASK_SFT                        (0xf << 8)
+#define I2S3_FMT_SFT                                  3
+#define I2S3_FMT_MASK                                 0x1
+#define I2S3_FMT_MASK_SFT                             (0x1 << 3)
+#define I2S3_WLEN_SFT                                 1
+#define I2S3_WLEN_MASK                                0x1
+#define I2S3_WLEN_MASK_SFT                            (0x1 << 1)
+#define I2S3_EN_SFT                                   0
+#define I2S3_EN_MASK                                  0x1
+#define I2S3_EN_MASK_SFT                              (0x1 << 0)
+
+/* AFE_I2S_CON3 */
+#define I2S4_LR_SWAP_SFT                              31
+#define I2S4_LR_SWAP_MASK                             0x1
+#define I2S4_LR_SWAP_MASK_SFT                         (0x1 << 31)
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_SFT             17
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK            0x1
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK_SFT        (0x1 << 17)
+#define I2S4_32BIT_EN_SFT                             13
+#define I2S4_32BIT_EN_MASK                            0x1
+#define I2S4_32BIT_EN_MASK_SFT                        (0x1 << 13)
+#define I2S4_HD_EN_SFT                                12
+#define I2S4_HD_EN_MASK                               0x1
+#define I2S4_HD_EN_MASK_SFT                           (0x1 << 12)
+#define I2S4_OUT_MODE_SFT                             8
+#define I2S4_OUT_MODE_MASK                            0xf
+#define I2S4_OUT_MODE_MASK_SFT                        (0xf << 8)
+#define INV_LRCK_SFT                                  5
+#define INV_LRCK_MASK                                 0x1
+#define INV_LRCK_MASK_SFT                             (0x1 << 5)
+#define I2S4_FMT_SFT                                  3
+#define I2S4_FMT_MASK                                 0x1
+#define I2S4_FMT_MASK_SFT                             (0x1 << 3)
+#define I2S4_WLEN_SFT                                 1
+#define I2S4_WLEN_MASK                                0x1
+#define I2S4_WLEN_MASK_SFT                            (0x1 << 1)
+#define I2S4_EN_SFT                                   0
+#define I2S4_EN_MASK                                  0x1
+#define I2S4_EN_MASK_SFT                              (0x1 << 0)
+
+/* AFE_I2S_CON4 */
+#define I2S5_LR_SWAP_SFT                              31
+#define I2S5_LR_SWAP_MASK                             0x1
+#define I2S5_LR_SWAP_MASK_SFT                         (0x1 << 31)
+#define I2S_LOOPBACK_SFT                              20
+#define I2S_LOOPBACK_MASK                             0x1
+#define I2S_LOOPBACK_MASK_SFT                         (0x1 << 20)
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_SFT             17
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK            0x1
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK_SFT        (0x1 << 17)
+#define I2S5_32BIT_EN_SFT                             13
+#define I2S5_32BIT_EN_MASK                            0x1
+#define I2S5_32BIT_EN_MASK_SFT                        (0x1 << 13)
+#define I2S5_HD_EN_SFT                                12
+#define I2S5_HD_EN_MASK                               0x1
+#define I2S5_HD_EN_MASK_SFT                           (0x1 << 12)
+#define I2S5_OUT_MODE_SFT                             8
+#define I2S5_OUT_MODE_MASK                            0xf
+#define I2S5_OUT_MODE_MASK_SFT                        (0xf << 8)
+#define INV_LRCK_SFT                                  5
+#define INV_LRCK_MASK                                 0x1
+#define INV_LRCK_MASK_SFT                             (0x1 << 5)
+#define I2S5_FMT_SFT                                  3
+#define I2S5_FMT_MASK                                 0x1
+#define I2S5_FMT_MASK_SFT                             (0x1 << 3)
+#define I2S5_WLEN_SFT                                 1
+#define I2S5_WLEN_MASK                                0x1
+#define I2S5_WLEN_MASK_SFT                            (0x1 << 1)
+#define I2S5_EN_SFT                                   0
+#define I2S5_EN_MASK                                  0x1
+#define I2S5_EN_MASK_SFT                              (0x1 << 0)
+
+/* AFE_GAIN1_CON0 */
+#define GAIN1_SAMPLE_PER_STEP_SFT                     8
+#define GAIN1_SAMPLE_PER_STEP_MASK                    0xff
+#define GAIN1_SAMPLE_PER_STEP_MASK_SFT                (0xff << 8)
+#define GAIN1_MODE_SFT                                4
+#define GAIN1_MODE_MASK                               0xf
+#define GAIN1_MODE_MASK_SFT                           (0xf << 4)
+#define GAIN1_ON_SFT                                  0
+#define GAIN1_ON_MASK                                 0x1
+#define GAIN1_ON_MASK_SFT                             (0x1 << 0)
+
+/* AFE_GAIN1_CON1 */
+#define GAIN1_TARGET_SFT                              0
+#define GAIN1_TARGET_MASK                             0xfffff
+#define GAIN1_TARGET_MASK_SFT                         (0xfffff << 0)
+
+/* AFE_GAIN2_CON0 */
+#define GAIN2_SAMPLE_PER_STEP_SFT                     8
+#define GAIN2_SAMPLE_PER_STEP_MASK                    0xff
+#define GAIN2_SAMPLE_PER_STEP_MASK_SFT                (0xff << 8)
+#define GAIN2_MODE_SFT                                4
+#define GAIN2_MODE_MASK                               0xf
+#define GAIN2_MODE_MASK_SFT                           (0xf << 4)
+#define GAIN2_ON_SFT                                  0
+#define GAIN2_ON_MASK                                 0x1
+#define GAIN2_ON_MASK_SFT                             (0x1 << 0)
+
+/* AFE_GAIN2_CON1 */
+#define GAIN2_TARGET_SFT                              0
+#define GAIN2_TARGET_MASK                             0xfffff
+#define GAIN2_TARGET_MASK_SFT                         (0xfffff << 0)
+
+/* AFE_GAIN1_CUR */
+#define AFE_GAIN1_CUR_SFT                             0
+#define AFE_GAIN1_CUR_MASK                            0xfffff
+#define AFE_GAIN1_CUR_MASK_SFT                        (0xfffff << 0)
+
+/* AFE_GAIN2_CUR */
+#define AFE_GAIN2_CUR_SFT                             0
+#define AFE_GAIN2_CUR_MASK                            0xfffff
+#define AFE_GAIN2_CUR_MASK_SFT                        (0xfffff << 0)
+
+/* AFE_MEMIF_HD_MODE */
+#define AWB2_HD_SFT                                   28
+#define AWB2_HD_MASK                                  0x3
+#define AWB2_HD_MASK_SFT                              (0x3 << 28)
+#define HDMI_HD_SFT                                   20
+#define HDMI_HD_MASK                                  0x3
+#define HDMI_HD_MASK_SFT                              (0x3 << 20)
+#define MOD_DAI_HD_SFT                                18
+#define MOD_DAI_HD_MASK                               0x3
+#define MOD_DAI_HD_MASK_SFT                           (0x3 << 18)
+#define DAI_HD_SFT                                    16
+#define DAI_HD_MASK                                   0x3
+#define DAI_HD_MASK_SFT                               (0x3 << 16)
+#define VUL2_HD_SFT                                   14
+#define VUL2_HD_MASK                                  0x3
+#define VUL2_HD_MASK_SFT                              (0x3 << 14)
+#define VUL12_HD_SFT                                  12
+#define VUL12_HD_MASK                                 0x3
+#define VUL12_HD_MASK_SFT                             (0x3 << 12)
+#define VUL_HD_SFT                                    10
+#define VUL_HD_MASK                                   0x3
+#define VUL_HD_MASK_SFT                               (0x3 << 10)
+#define AWB_HD_SFT                                    8
+#define AWB_HD_MASK                                   0x3
+#define AWB_HD_MASK_SFT                               (0x3 << 8)
+#define DL3_HD_SFT                                    6
+#define DL3_HD_MASK                                   0x3
+#define DL3_HD_MASK_SFT                               (0x3 << 6)
+#define DL2_HD_SFT                                    4
+#define DL2_HD_MASK                                   0x3
+#define DL2_HD_MASK_SFT                               (0x3 << 4)
+#define DL1_HD_SFT                                    0
+#define DL1_HD_MASK                                   0x3
+#define DL1_HD_MASK_SFT                               (0x3 << 0)
+
+/* AFE_MEMIF_HDALIGN */
+#define AWB2_NORMAL_MODE_SFT                          30
+#define AWB2_NORMAL_MODE_MASK                         0x1
+#define AWB2_NORMAL_MODE_MASK_SFT                     (0x1 << 30)
+#define HDMI_NORMAL_MODE_SFT                          26
+#define HDMI_NORMAL_MODE_MASK                         0x1
+#define HDMI_NORMAL_MODE_MASK_SFT                     (0x1 << 26)
+#define MOD_DAI_NORMAL_MODE_SFT                       25
+#define MOD_DAI_NORMAL_MODE_MASK                      0x1
+#define MOD_DAI_NORMAL_MODE_MASK_SFT                  (0x1 << 25)
+#define DAI_NORMAL_MODE_SFT                           24
+#define DAI_NORMAL_MODE_MASK                          0x1
+#define DAI_NORMAL_MODE_MASK_SFT                      (0x1 << 24)
+#define VUL2_NORMAL_MODE_SFT                          23
+#define VUL2_NORMAL_MODE_MASK                         0x1
+#define VUL2_NORMAL_MODE_MASK_SFT                     (0x1 << 23)
+#define VUL12_NORMAL_MODE_SFT                         22
+#define VUL12_NORMAL_MODE_MASK                        0x1
+#define VUL12_NORMAL_MODE_MASK_SFT                    (0x1 << 22)
+#define VUL_NORMAL_MODE_SFT                           21
+#define VUL_NORMAL_MODE_MASK                          0x1
+#define VUL_NORMAL_MODE_MASK_SFT                      (0x1 << 21)
+#define AWB_NORMAL_MODE_SFT                           20
+#define AWB_NORMAL_MODE_MASK                          0x1
+#define AWB_NORMAL_MODE_MASK_SFT                      (0x1 << 20)
+#define DL3_NORMAL_MODE_SFT                           19
+#define DL3_NORMAL_MODE_MASK                          0x1
+#define DL3_NORMAL_MODE_MASK_SFT                      (0x1 << 19)
+#define DL2_NORMAL_MODE_SFT                           18
+#define DL2_NORMAL_MODE_MASK                          0x1
+#define DL2_NORMAL_MODE_MASK_SFT                      (0x1 << 18)
+#define DL1_NORMAL_MODE_SFT                           16
+#define DL1_NORMAL_MODE_MASK                          0x1
+#define DL1_NORMAL_MODE_MASK_SFT                      (0x1 << 16)
+#define RESERVED1_SFT                                 15
+#define RESERVED1_MASK                                0x1
+#define RESERVED1_MASK_SFT                            (0x1 << 15)
+#define AWB2_ALIGN_SFT                                14
+#define AWB2_ALIGN_MASK                               0x1
+#define AWB2_ALIGN_MASK_SFT                           (0x1 << 14)
+#define HDMI_HD_ALIGN_SFT                             10
+#define HDMI_HD_ALIGN_MASK                            0x1
+#define HDMI_HD_ALIGN_MASK_SFT                        (0x1 << 10)
+#define MOD_DAI_HD_ALIGN_SFT                          9
+#define MOD_DAI_HD_ALIGN_MASK                         0x1
+#define MOD_DAI_HD_ALIGN_MASK_SFT                     (0x1 << 9)
+#define VUL2_HD_ALIGN_SFT                             7
+#define VUL2_HD_ALIGN_MASK                            0x1
+#define VUL2_HD_ALIGN_MASK_SFT                        (0x1 << 7)
+#define VUL12_HD_ALIGN_SFT                            6
+#define VUL12_HD_ALIGN_MASK                           0x1
+#define VUL12_HD_ALIGN_MASK_SFT                       (0x1 << 6)
+#define VUL_HD_ALIGN_SFT                              5
+#define VUL_HD_ALIGN_MASK                             0x1
+#define VUL_HD_ALIGN_MASK_SFT                         (0x1 << 5)
+#define AWB_HD_ALIGN_SFT                              4
+#define AWB_HD_ALIGN_MASK                             0x1
+#define AWB_HD_ALIGN_MASK_SFT                         (0x1 << 4)
+#define DL3_HD_ALIGN_SFT                              3
+#define DL3_HD_ALIGN_MASK                             0x1
+#define DL3_HD_ALIGN_MASK_SFT                         (0x1 << 3)
+#define DL2_HD_ALIGN_SFT                              2
+#define DL2_HD_ALIGN_MASK                             0x1
+#define DL2_HD_ALIGN_MASK_SFT                         (0x1 << 2)
+#define DL1_HD_ALIGN_SFT                              0
+#define DL1_HD_ALIGN_MASK                             0x1
+#define DL1_HD_ALIGN_MASK_SFT                         (0x1 << 0)
+
+/* PCM_INTF_CON1 */
+#define PCM_FIX_VALUE_SEL_SFT                         31
+#define PCM_FIX_VALUE_SEL_MASK                        0x1
+#define PCM_FIX_VALUE_SEL_MASK_SFT                    (0x1 << 31)
+#define PCM_BUFFER_LOOPBACK_SFT                       30
+#define PCM_BUFFER_LOOPBACK_MASK                      0x1
+#define PCM_BUFFER_LOOPBACK_MASK_SFT                  (0x1 << 30)
+#define PCM_PARALLEL_LOOPBACK_SFT                     29
+#define PCM_PARALLEL_LOOPBACK_MASK                    0x1
+#define PCM_PARALLEL_LOOPBACK_MASK_SFT                (0x1 << 29)
+#define PCM_SERIAL_LOOPBACK_SFT                       28
+#define PCM_SERIAL_LOOPBACK_MASK                      0x1
+#define PCM_SERIAL_LOOPBACK_MASK_SFT                  (0x1 << 28)
+#define PCM_DAI_PCM_LOOPBACK_SFT                      27
+#define PCM_DAI_PCM_LOOPBACK_MASK                     0x1
+#define PCM_DAI_PCM_LOOPBACK_MASK_SFT                 (0x1 << 27)
+#define PCM_I2S_PCM_LOOPBACK_SFT                      26
+#define PCM_I2S_PCM_LOOPBACK_MASK                     0x1
+#define PCM_I2S_PCM_LOOPBACK_MASK_SFT                 (0x1 << 26)
+#define PCM_SYNC_DELSEL_SFT                           25
+#define PCM_SYNC_DELSEL_MASK                          0x1
+#define PCM_SYNC_DELSEL_MASK_SFT                      (0x1 << 25)
+#define PCM_TX_LR_SWAP_SFT                            24
+#define PCM_TX_LR_SWAP_MASK                           0x1
+#define PCM_TX_LR_SWAP_MASK_SFT                       (0x1 << 24)
+#define PCM_SYNC_OUT_INV_SFT                          23
+#define PCM_SYNC_OUT_INV_MASK                         0x1
+#define PCM_SYNC_OUT_INV_MASK_SFT                     (0x1 << 23)
+#define PCM_BCLK_OUT_INV_SFT                          22
+#define PCM_BCLK_OUT_INV_MASK                         0x1
+#define PCM_BCLK_OUT_INV_MASK_SFT                     (0x1 << 22)
+#define PCM_SYNC_IN_INV_SFT                           21
+#define PCM_SYNC_IN_INV_MASK                          0x1
+#define PCM_SYNC_IN_INV_MASK_SFT                      (0x1 << 21)
+#define PCM_BCLK_IN_INV_SFT                           20
+#define PCM_BCLK_IN_INV_MASK                          0x1
+#define PCM_BCLK_IN_INV_MASK_SFT                      (0x1 << 20)
+#define PCM_TX_LCH_RPT_SFT                            19
+#define PCM_TX_LCH_RPT_MASK                           0x1
+#define PCM_TX_LCH_RPT_MASK_SFT                       (0x1 << 19)
+#define PCM_VBT_16K_MODE_SFT                          18
+#define PCM_VBT_16K_MODE_MASK                         0x1
+#define PCM_VBT_16K_MODE_MASK_SFT                     (0x1 << 18)
+#define PCM_EXT_MODEM_SFT                             17
+#define PCM_EXT_MODEM_MASK                            0x1
+#define PCM_EXT_MODEM_MASK_SFT                        (0x1 << 17)
+#define PCM_24BIT_SFT                                 16
+#define PCM_24BIT_MASK                                0x1
+#define PCM_24BIT_MASK_SFT                            (0x1 << 16)
+#define PCM_WLEN_SFT                                  14
+#define PCM_WLEN_MASK                                 0x3
+#define PCM_WLEN_MASK_SFT                             (0x3 << 14)
+#define PCM_SYNC_LENGTH_SFT                           9
+#define PCM_SYNC_LENGTH_MASK                          0x1f
+#define PCM_SYNC_LENGTH_MASK_SFT                      (0x1f << 9)
+#define PCM_SYNC_TYPE_SFT                             8
+#define PCM_SYNC_TYPE_MASK                            0x1
+#define PCM_SYNC_TYPE_MASK_SFT                        (0x1 << 8)
+#define PCM_BT_MODE_SFT                               7
+#define PCM_BT_MODE_MASK                              0x1
+#define PCM_BT_MODE_MASK_SFT                          (0x1 << 7)
+#define PCM_BYP_ASRC_SFT                              6
+#define PCM_BYP_ASRC_MASK                             0x1
+#define PCM_BYP_ASRC_MASK_SFT                         (0x1 << 6)
+#define PCM_SLAVE_SFT                                 5
+#define PCM_SLAVE_MASK                                0x1
+#define PCM_SLAVE_MASK_SFT                            (0x1 << 5)
+#define PCM_MODE_SFT                                  3
+#define PCM_MODE_MASK                                 0x3
+#define PCM_MODE_MASK_SFT                             (0x3 << 3)
+#define PCM_FMT_SFT                                   1
+#define PCM_FMT_MASK                                  0x3
+#define PCM_FMT_MASK_SFT                              (0x3 << 1)
+#define PCM_EN_SFT                                    0
+#define PCM_EN_MASK                                   0x1
+#define PCM_EN_MASK_SFT                               (0x1 << 0)
+
+/* PCM_INTF_CON2 */
+#define PCM1_TX_FIFO_OV_SFT                           31
+#define PCM1_TX_FIFO_OV_MASK                          0x1
+#define PCM1_TX_FIFO_OV_MASK_SFT                      (0x1 << 31)
+#define PCM1_RX_FIFO_OV_SFT                           30
+#define PCM1_RX_FIFO_OV_MASK                          0x1
+#define PCM1_RX_FIFO_OV_MASK_SFT                      (0x1 << 30)
+#define PCM2_TX_FIFO_OV_SFT                           29
+#define PCM2_TX_FIFO_OV_MASK                          0x1
+#define PCM2_TX_FIFO_OV_MASK_SFT                      (0x1 << 29)
+#define PCM2_RX_FIFO_OV_SFT                           28
+#define PCM2_RX_FIFO_OV_MASK                          0x1
+#define PCM2_RX_FIFO_OV_MASK_SFT                      (0x1 << 28)
+#define PCM1_SYNC_GLITCH_SFT                          27
+#define PCM1_SYNC_GLITCH_MASK                         0x1
+#define PCM1_SYNC_GLITCH_MASK_SFT                     (0x1 << 27)
+#define PCM2_SYNC_GLITCH_SFT                          26
+#define PCM2_SYNC_GLITCH_MASK                         0x1
+#define PCM2_SYNC_GLITCH_MASK_SFT                     (0x1 << 26)
+#define TX3_RCH_DBG_MODE_SFT                          17
+#define TX3_RCH_DBG_MODE_MASK                         0x1
+#define TX3_RCH_DBG_MODE_MASK_SFT                     (0x1 << 17)
+#define PCM1_PCM2_LOOPBACK_SFT                        16
+#define PCM1_PCM2_LOOPBACK_MASK                       0x1
+#define PCM1_PCM2_LOOPBACK_MASK_SFT                   (0x1 << 16)
+#define DAI_PCM_LOOPBACK_CH_SFT                       14
+#define DAI_PCM_LOOPBACK_CH_MASK                      0x3
+#define DAI_PCM_LOOPBACK_CH_MASK_SFT                  (0x3 << 14)
+#define I2S_PCM_LOOPBACK_CH_SFT                       12
+#define I2S_PCM_LOOPBACK_CH_MASK                      0x3
+#define I2S_PCM_LOOPBACK_CH_MASK_SFT                  (0x3 << 12)
+#define TX_FIX_VALUE_SFT                              0
+#define TX_FIX_VALUE_MASK                             0xff
+#define TX_FIX_VALUE_MASK_SFT                         (0xff << 0)
+
+/* PCM2_INTF_CON */
+#define PCM2_TX_FIX_VALUE_SFT                         24
+#define PCM2_TX_FIX_VALUE_MASK                        0xff
+#define PCM2_TX_FIX_VALUE_MASK_SFT                    (0xff << 24)
+#define PCM2_FIX_VALUE_SEL_SFT                        23
+#define PCM2_FIX_VALUE_SEL_MASK                       0x1
+#define PCM2_FIX_VALUE_SEL_MASK_SFT                   (0x1 << 23)
+#define PCM2_BUFFER_LOOPBACK_SFT                      22
+#define PCM2_BUFFER_LOOPBACK_MASK                     0x1
+#define PCM2_BUFFER_LOOPBACK_MASK_SFT                 (0x1 << 22)
+#define PCM2_PARALLEL_LOOPBACK_SFT                    21
+#define PCM2_PARALLEL_LOOPBACK_MASK                   0x1
+#define PCM2_PARALLEL_LOOPBACK_MASK_SFT               (0x1 << 21)
+#define PCM2_SERIAL_LOOPBACK_SFT                      20
+#define PCM2_SERIAL_LOOPBACK_MASK                     0x1
+#define PCM2_SERIAL_LOOPBACK_MASK_SFT                 (0x1 << 20)
+#define PCM2_DAI_PCM_LOOPBACK_SFT                     19
+#define PCM2_DAI_PCM_LOOPBACK_MASK                    0x1
+#define PCM2_DAI_PCM_LOOPBACK_MASK_SFT                (0x1 << 19)
+#define PCM2_I2S_PCM_LOOPBACK_SFT                     18
+#define PCM2_I2S_PCM_LOOPBACK_MASK                    0x1
+#define PCM2_I2S_PCM_LOOPBACK_MASK_SFT                (0x1 << 18)
+#define PCM2_SYNC_DELSEL_SFT                          17
+#define PCM2_SYNC_DELSEL_MASK                         0x1
+#define PCM2_SYNC_DELSEL_MASK_SFT                     (0x1 << 17)
+#define PCM2_TX_LR_SWAP_SFT                           16
+#define PCM2_TX_LR_SWAP_MASK                          0x1
+#define PCM2_TX_LR_SWAP_MASK_SFT                      (0x1 << 16)
+#define PCM2_SYNC_IN_INV_SFT                          15
+#define PCM2_SYNC_IN_INV_MASK                         0x1
+#define PCM2_SYNC_IN_INV_MASK_SFT                     (0x1 << 15)
+#define PCM2_BCLK_IN_INV_SFT                          14
+#define PCM2_BCLK_IN_INV_MASK                         0x1
+#define PCM2_BCLK_IN_INV_MASK_SFT                     (0x1 << 14)
+#define PCM2_TX_LCH_RPT_SFT                           13
+#define PCM2_TX_LCH_RPT_MASK                          0x1
+#define PCM2_TX_LCH_RPT_MASK_SFT                      (0x1 << 13)
+#define PCM2_VBT_16K_MODE_SFT                         12
+#define PCM2_VBT_16K_MODE_MASK                        0x1
+#define PCM2_VBT_16K_MODE_MASK_SFT                    (0x1 << 12)
+#define PCM2_LOOPBACK_CH_SEL_SFT                      10
+#define PCM2_LOOPBACK_CH_SEL_MASK                     0x3
+#define PCM2_LOOPBACK_CH_SEL_MASK_SFT                 (0x3 << 10)
+#define PCM2_TX2_BT_MODE_SFT                          8
+#define PCM2_TX2_BT_MODE_MASK                         0x1
+#define PCM2_TX2_BT_MODE_MASK_SFT                     (0x1 << 8)
+#define PCM2_BT_MODE_SFT                              7
+#define PCM2_BT_MODE_MASK                             0x1
+#define PCM2_BT_MODE_MASK_SFT                         (0x1 << 7)
+#define PCM2_AFIFO_SFT                                6
+#define PCM2_AFIFO_MASK                               0x1
+#define PCM2_AFIFO_MASK_SFT                           (0x1 << 6)
+#define PCM2_WLEN_SFT                                 5
+#define PCM2_WLEN_MASK                                0x1
+#define PCM2_WLEN_MASK_SFT                            (0x1 << 5)
+#define PCM2_MODE_SFT                                 3
+#define PCM2_MODE_MASK                                0x3
+#define PCM2_MODE_MASK_SFT                            (0x3 << 3)
+#define PCM2_FMT_SFT                                  1
+#define PCM2_FMT_MASK                                 0x3
+#define PCM2_FMT_MASK_SFT                             (0x3 << 1)
+#define PCM2_EN_SFT                                   0
+#define PCM2_EN_MASK                                  0x1
+#define PCM2_EN_MASK_SFT                              (0x1 << 0)
+
+/* AFE_ADDA_MTKAIF_CFG0 */
+#define MTKAIF_RXIF_CLKINV_ADC_SFT                    31
+#define MTKAIF_RXIF_CLKINV_ADC_MASK                   0x1
+#define MTKAIF_RXIF_CLKINV_ADC_MASK_SFT               (0x1 << 31)
+#define MTKAIF_RXIF_BYPASS_SRC_SFT                    17
+#define MTKAIF_RXIF_BYPASS_SRC_MASK                   0x1
+#define MTKAIF_RXIF_BYPASS_SRC_MASK_SFT               (0x1 << 17)
+#define MTKAIF_RXIF_PROTOCOL2_SFT                     16
+#define MTKAIF_RXIF_PROTOCOL2_MASK                    0x1
+#define MTKAIF_RXIF_PROTOCOL2_MASK_SFT                (0x1 << 16)
+#define MTKAIF_TXIF_BYPASS_SRC_SFT                    5
+#define MTKAIF_TXIF_BYPASS_SRC_MASK                   0x1
+#define MTKAIF_TXIF_BYPASS_SRC_MASK_SFT               (0x1 << 5)
+#define MTKAIF_TXIF_PROTOCOL2_SFT                     4
+#define MTKAIF_TXIF_PROTOCOL2_MASK                    0x1
+#define MTKAIF_TXIF_PROTOCOL2_MASK_SFT                (0x1 << 4)
+#define MTKAIF_TXIF_8TO5_SFT                          2
+#define MTKAIF_TXIF_8TO5_MASK                         0x1
+#define MTKAIF_TXIF_8TO5_MASK_SFT                     (0x1 << 2)
+#define MTKAIF_RXIF_8TO5_SFT                          1
+#define MTKAIF_RXIF_8TO5_MASK                         0x1
+#define MTKAIF_RXIF_8TO5_MASK_SFT                     (0x1 << 1)
+#define MTKAIF_IF_LOOPBACK1_SFT                       0
+#define MTKAIF_IF_LOOPBACK1_MASK                      0x1
+#define MTKAIF_IF_LOOPBACK1_MASK_SFT                  (0x1 << 0)
+
+/* AFE_ADDA_MTKAIF_RX_CFG2 */
+#define MTKAIF_RXIF_DETECT_ON_PROTOCOL2_SFT           16
+#define MTKAIF_RXIF_DETECT_ON_PROTOCOL2_MASK          0x1
+#define MTKAIF_RXIF_DETECT_ON_PROTOCOL2_MASK_SFT      (0x1 << 16)
+#define MTKAIF_RXIF_DELAY_CYCLE_SFT                   12
+#define MTKAIF_RXIF_DELAY_CYCLE_MASK                  0xf
+#define MTKAIF_RXIF_DELAY_CYCLE_MASK_SFT              (0xf << 12)
+#define MTKAIF_RXIF_DELAY_DATA_SFT                    8
+#define MTKAIF_RXIF_DELAY_DATA_MASK                   0x1
+#define MTKAIF_RXIF_DELAY_DATA_MASK_SFT               (0x1 << 8)
+#define MTKAIF_RXIF_FIFO_RSP_PROTOCOL2_SFT            4
+#define MTKAIF_RXIF_FIFO_RSP_PROTOCOL2_MASK           0x7
+#define MTKAIF_RXIF_FIFO_RSP_PROTOCOL2_MASK_SFT       (0x7 << 4)
+
+/* AFE_ADDA_DL_SRC2_CON0 */
+#define DL_2_INPUT_MODE_CTL_SFT                       28
+#define DL_2_INPUT_MODE_CTL_MASK                      0xf
+#define DL_2_INPUT_MODE_CTL_MASK_SFT                  (0xf << 28)
+#define DL_2_CH1_SATURATION_EN_CTL_SFT                27
+#define DL_2_CH1_SATURATION_EN_CTL_MASK               0x1
+#define DL_2_CH1_SATURATION_EN_CTL_MASK_SFT           (0x1 << 27)
+#define DL_2_CH2_SATURATION_EN_CTL_SFT                26
+#define DL_2_CH2_SATURATION_EN_CTL_MASK               0x1
+#define DL_2_CH2_SATURATION_EN_CTL_MASK_SFT           (0x1 << 26)
+#define DL_2_OUTPUT_SEL_CTL_SFT                       24
+#define DL_2_OUTPUT_SEL_CTL_MASK                      0x3
+#define DL_2_OUTPUT_SEL_CTL_MASK_SFT                  (0x3 << 24)
+#define DL_2_FADEIN_0START_EN_SFT                     16
+#define DL_2_FADEIN_0START_EN_MASK                    0x3
+#define DL_2_FADEIN_0START_EN_MASK_SFT                (0x3 << 16)
+#define DL_DISABLE_HW_CG_CTL_SFT                      15
+#define DL_DISABLE_HW_CG_CTL_MASK                     0x1
+#define DL_DISABLE_HW_CG_CTL_MASK_SFT                 (0x1 << 15)
+#define C_DATA_EN_SEL_CTL_PRE_SFT                     14
+#define C_DATA_EN_SEL_CTL_PRE_MASK                    0x1
+#define C_DATA_EN_SEL_CTL_PRE_MASK_SFT                (0x1 << 14)
+#define DL_2_SIDE_TONE_ON_CTL_PRE_SFT                 13
+#define DL_2_SIDE_TONE_ON_CTL_PRE_MASK                0x1
+#define DL_2_SIDE_TONE_ON_CTL_PRE_MASK_SFT            (0x1 << 13)
+#define DL_2_MUTE_CH1_OFF_CTL_PRE_SFT                 12
+#define DL_2_MUTE_CH1_OFF_CTL_PRE_MASK                0x1
+#define DL_2_MUTE_CH1_OFF_CTL_PRE_MASK_SFT            (0x1 << 12)
+#define DL_2_MUTE_CH2_OFF_CTL_PRE_SFT                 11
+#define DL_2_MUTE_CH2_OFF_CTL_PRE_MASK                0x1
+#define DL_2_MUTE_CH2_OFF_CTL_PRE_MASK_SFT            (0x1 << 11)
+#define DL2_ARAMPSP_CTL_PRE_SFT                       9
+#define DL2_ARAMPSP_CTL_PRE_MASK                      0x3
+#define DL2_ARAMPSP_CTL_PRE_MASK_SFT                  (0x3 << 9)
+#define DL_2_IIRMODE_CTL_PRE_SFT                      6
+#define DL_2_IIRMODE_CTL_PRE_MASK                     0x7
+#define DL_2_IIRMODE_CTL_PRE_MASK_SFT                 (0x7 << 6)
+#define DL_2_VOICE_MODE_CTL_PRE_SFT                   5
+#define DL_2_VOICE_MODE_CTL_PRE_MASK                  0x1
+#define DL_2_VOICE_MODE_CTL_PRE_MASK_SFT              (0x1 << 5)
+#define D2_2_MUTE_CH1_ON_CTL_PRE_SFT                  4
+#define D2_2_MUTE_CH1_ON_CTL_PRE_MASK                 0x1
+#define D2_2_MUTE_CH1_ON_CTL_PRE_MASK_SFT             (0x1 << 4)
+#define D2_2_MUTE_CH2_ON_CTL_PRE_SFT                  3
+#define D2_2_MUTE_CH2_ON_CTL_PRE_MASK                 0x1
+#define D2_2_MUTE_CH2_ON_CTL_PRE_MASK_SFT             (0x1 << 3)
+#define DL_2_IIR_ON_CTL_PRE_SFT                       2
+#define DL_2_IIR_ON_CTL_PRE_MASK                      0x1
+#define DL_2_IIR_ON_CTL_PRE_MASK_SFT                  (0x1 << 2)
+#define DL_2_GAIN_ON_CTL_PRE_SFT                      1
+#define DL_2_GAIN_ON_CTL_PRE_MASK                     0x1
+#define DL_2_GAIN_ON_CTL_PRE_MASK_SFT                 (0x1 << 1)
+#define DL_2_SRC_ON_TMP_CTL_PRE_SFT                   0
+#define DL_2_SRC_ON_TMP_CTL_PRE_MASK                  0x1
+#define DL_2_SRC_ON_TMP_CTL_PRE_MASK_SFT              (0x1 << 0)
+
+/* AFE_ADDA_DL_SRC2_CON1 */
+#define DL_2_GAIN_CTL_PRE_SFT                         16
+#define DL_2_GAIN_CTL_PRE_MASK                        0xffff
+#define DL_2_GAIN_CTL_PRE_MASK_SFT                    (0xffff << 16)
+#define DL_2_GAIN_MODE_CTL_SFT                        0
+#define DL_2_GAIN_MODE_CTL_MASK                       0x1
+#define DL_2_GAIN_MODE_CTL_MASK_SFT                   (0x1 << 0)
+
+/* AFE_ADDA_UL_SRC_CON0 */
+#define ULCF_CFG_EN_CTL_SFT                           31
+#define ULCF_CFG_EN_CTL_MASK                          0x1
+#define ULCF_CFG_EN_CTL_MASK_SFT                      (0x1 << 31)
+#define UL_MODE_3P25M_CH2_CTL_SFT                     22
+#define UL_MODE_3P25M_CH2_CTL_MASK                    0x1
+#define UL_MODE_3P25M_CH2_CTL_MASK_SFT                (0x1 << 22)
+#define UL_MODE_3P25M_CH1_CTL_SFT                     21
+#define UL_MODE_3P25M_CH1_CTL_MASK                    0x1
+#define UL_MODE_3P25M_CH1_CTL_MASK_SFT                (0x1 << 21)
+#define UL_VOICE_MODE_CH1_CH2_CTL_SFT                 17
+#define UL_VOICE_MODE_CH1_CH2_CTL_MASK                0x7
+#define UL_VOICE_MODE_CH1_CH2_CTL_MASK_SFT            (0x7 << 17)
+#define DMIC_LOW_POWER_MODE_CTL_SFT                   14
+#define DMIC_LOW_POWER_MODE_CTL_MASK                  0x3
+#define DMIC_LOW_POWER_MODE_CTL_MASK_SFT              (0x3 << 14)
+#define UL_DISABLE_HW_CG_CTL_SFT                      12
+#define UL_DISABLE_HW_CG_CTL_MASK                     0x1
+#define UL_DISABLE_HW_CG_CTL_MASK_SFT                 (0x1 << 12)
+#define UL_IIR_ON_TMP_CTL_SFT                         10
+#define UL_IIR_ON_TMP_CTL_MASK                        0x1
+#define UL_IIR_ON_TMP_CTL_MASK_SFT                    (0x1 << 10)
+#define UL_IIRMODE_CTL_SFT                            7
+#define UL_IIRMODE_CTL_MASK                           0x7
+#define UL_IIRMODE_CTL_MASK_SFT                       (0x7 << 7)
+#define DIGMIC_3P25M_1P625M_SEL_CTL_SFT               5
+#define DIGMIC_3P25M_1P625M_SEL_CTL_MASK              0x1
+#define DIGMIC_3P25M_1P625M_SEL_CTL_MASK_SFT          (0x1 << 5)
+#define UL_LOOP_BACK_MODE_CTL_SFT                     2
+#define UL_LOOP_BACK_MODE_CTL_MASK                    0x1
+#define UL_LOOP_BACK_MODE_CTL_MASK_SFT                (0x1 << 2)
+#define UL_SDM_3_LEVEL_CTL_SFT                        1
+#define UL_SDM_3_LEVEL_CTL_MASK                       0x1
+#define UL_SDM_3_LEVEL_CTL_MASK_SFT                   (0x1 << 1)
+#define UL_SRC_ON_TMP_CTL_SFT                         0
+#define UL_SRC_ON_TMP_CTL_MASK                        0x1
+#define UL_SRC_ON_TMP_CTL_MASK_SFT                    (0x1 << 0)
+
+/* AFE_ADDA_UL_SRC_CON1 */
+#define C_DAC_EN_CTL_SFT                              27
+#define C_DAC_EN_CTL_MASK                             0x1
+#define C_DAC_EN_CTL_MASK_SFT                         (0x1 << 27)
+#define C_MUTE_SW_CTL_SFT                             26
+#define C_MUTE_SW_CTL_MASK                            0x1
+#define C_MUTE_SW_CTL_MASK_SFT                        (0x1 << 26)
+#define ASDM_SRC_SEL_CTL_SFT                          25
+#define ASDM_SRC_SEL_CTL_MASK                         0x1
+#define ASDM_SRC_SEL_CTL_MASK_SFT                     (0x1 << 25)
+#define C_AMP_DIV_CH2_CTL_SFT                         21
+#define C_AMP_DIV_CH2_CTL_MASK                        0x7
+#define C_AMP_DIV_CH2_CTL_MASK_SFT                    (0x7 << 21)
+#define C_FREQ_DIV_CH2_CTL_SFT                        16
+#define C_FREQ_DIV_CH2_CTL_MASK                       0x1f
+#define C_FREQ_DIV_CH2_CTL_MASK_SFT                   (0x1f << 16)
+#define C_SINE_MODE_CH2_CTL_SFT                       12
+#define C_SINE_MODE_CH2_CTL_MASK                      0xf
+#define C_SINE_MODE_CH2_CTL_MASK_SFT                  (0xf << 12)
+#define C_AMP_DIV_CH1_CTL_SFT                         9
+#define C_AMP_DIV_CH1_CTL_MASK                        0x7
+#define C_AMP_DIV_CH1_CTL_MASK_SFT                    (0x7 << 9)
+#define C_FREQ_DIV_CH1_CTL_SFT                        4
+#define C_FREQ_DIV_CH1_CTL_MASK                       0x1f
+#define C_FREQ_DIV_CH1_CTL_MASK_SFT                   (0x1f << 4)
+#define C_SINE_MODE_CH1_CTL_SFT                       0
+#define C_SINE_MODE_CH1_CTL_MASK                      0xf
+#define C_SINE_MODE_CH1_CTL_MASK_SFT                  (0xf << 0)
+
+/* AFE_ADDA_TOP_CON0 */
+#define C_LOOP_BACK_MODE_CTL_SFT                      12
+#define C_LOOP_BACK_MODE_CTL_MASK                     0xf
+#define C_LOOP_BACK_MODE_CTL_MASK_SFT                 (0xf << 12)
+#define C_EXT_ADC_CTL_SFT                             0
+#define C_EXT_ADC_CTL_MASK                            0x1
+#define C_EXT_ADC_CTL_MASK_SFT                        (0x1 << 0)
+
+/* AFE_ADDA_UL_DL_CON0 */
+#define AFE_ADDA6_UL_LR_SWAP_SFT                      15
+#define AFE_ADDA6_UL_LR_SWAP_MASK                     0x1
+#define AFE_ADDA6_UL_LR_SWAP_MASK_SFT                 (0x1 << 15)
+#define AFE_ADDA6_CKDIV_RST_SFT                       14
+#define AFE_ADDA6_CKDIV_RST_MASK                      0x1
+#define AFE_ADDA6_CKDIV_RST_MASK_SFT                  (0x1 << 14)
+#define AFE_ADDA6_FIFO_AUTO_RST_SFT                   13
+#define AFE_ADDA6_FIFO_AUTO_RST_MASK                  0x1
+#define AFE_ADDA6_FIFO_AUTO_RST_MASK_SFT              (0x1 << 13)
+#define UL_FIFO_DIGMIC_TESTIN_SFT                     5
+#define UL_FIFO_DIGMIC_TESTIN_MASK                    0x3
+#define UL_FIFO_DIGMIC_TESTIN_MASK_SFT                (0x3 << 5)
+#define UL_FIFO_DIGMIC_WDATA_TESTEN_SFT               4
+#define UL_FIFO_DIGMIC_WDATA_TESTEN_MASK              0x1
+#define UL_FIFO_DIGMIC_WDATA_TESTEN_MASK_SFT          (0x1 << 4)
+#define ADDA_AFE_ON_SFT                               0
+#define ADDA_AFE_ON_MASK                              0x1
+#define ADDA_AFE_ON_MASK_SFT                          (0x1 << 0)
+
+/* AFE_SIDETONE_CON0 */
+#define R_RDY_SFT                                     30
+#define R_RDY_MASK                                    0x1
+#define R_RDY_MASK_SFT                                (0x1 << 30)
+#define W_RDY_SFT                                     29
+#define W_RDY_MASK                                    0x1
+#define W_RDY_MASK_SFT                                (0x1 << 29)
+#define R_W_EN_SFT                                    25
+#define R_W_EN_MASK                                   0x1
+#define R_W_EN_MASK_SFT                               (0x1 << 25)
+#define R_W_SEL_SFT                                   24
+#define R_W_SEL_MASK                                  0x1
+#define R_W_SEL_MASK_SFT                              (0x1 << 24)
+#define SEL_CH2_SFT                                   23
+#define SEL_CH2_MASK                                  0x1
+#define SEL_CH2_MASK_SFT                              (0x1 << 23)
+#define SIDE_TONE_COEFFICIENT_ADDR_SFT                16
+#define SIDE_TONE_COEFFICIENT_ADDR_MASK               0x1f
+#define SIDE_TONE_COEFFICIENT_ADDR_MASK_SFT           (0x1f << 16)
+#define SIDE_TONE_COEFFICIENT_SFT                     0
+#define SIDE_TONE_COEFFICIENT_MASK                    0xffff
+#define SIDE_TONE_COEFFICIENT_MASK_SFT                (0xffff << 0)
+
+/* AFE_SIDETONE_COEFF */
+#define SIDE_TONE_COEFF_SFT                           0
+#define SIDE_TONE_COEFF_MASK                          0xffff
+#define SIDE_TONE_COEFF_MASK_SFT                      (0xffff << 0)
+
+/* AFE_SIDETONE_CON1 */
+#define STF_BYPASS_MODE_SFT                           31
+#define STF_BYPASS_MODE_MASK                          0x1
+#define STF_BYPASS_MODE_MASK_SFT                      (0x1 << 31)
+#define STF_BYPASS_MODE_O28_O29_SFT                   30
+#define STF_BYPASS_MODE_O28_O29_MASK                  0x1
+#define STF_BYPASS_MODE_O28_O29_MASK_SFT              (0x1 << 30)
+#define STF_BYPASS_MODE_I2S4_SFT                      29
+#define STF_BYPASS_MODE_I2S4_MASK                     0x1
+#define STF_BYPASS_MODE_I2S4_MASK_SFT                 (0x1 << 29)
+#define STF_BYPASS_MODE_I2S5_SFT                      28
+#define STF_BYPASS_MODE_I2S5_MASK                     0x1
+#define STF_BYPASS_MODE_I2S5_MASK_SFT                 (0x1 << 28)
+#define STF_INPUT_EN_SEL_SFT                          13
+#define STF_INPUT_EN_SEL_MASK                         0x1
+#define STF_INPUT_EN_SEL_MASK_SFT                     (0x1 << 13)
+#define STF_SOURCE_FROM_O19O20_SFT                    12
+#define STF_SOURCE_FROM_O19O20_MASK                   0x1
+#define STF_SOURCE_FROM_O19O20_MASK_SFT               (0x1 << 12)
+#define SIDE_TONE_ON_SFT                              8
+#define SIDE_TONE_ON_MASK                             0x1
+#define SIDE_TONE_ON_MASK_SFT                         (0x1 << 8)
+#define SIDE_TONE_HALF_TAP_NUM_SFT                    0
+#define SIDE_TONE_HALF_TAP_NUM_MASK                   0x3f
+#define SIDE_TONE_HALF_TAP_NUM_MASK_SFT               (0x3f << 0)
+
+/* AFE_SIDETONE_GAIN */
+#define POSITIVE_GAIN_SFT                             16
+#define POSITIVE_GAIN_MASK                            0x7
+#define POSITIVE_GAIN_MASK_SFT                        (0x7 << 16)
+#define SIDE_TONE_GAIN_SFT                            0
+#define SIDE_TONE_GAIN_MASK                           0xffff
+#define SIDE_TONE_GAIN_MASK_SFT                       (0xffff << 0)
+
+/* AFE_ADDA_DL_SDM_DCCOMP_CON */
+#define AUD_DC_COMP_EN_SFT                            8
+#define AUD_DC_COMP_EN_MASK                           0x1
+#define AUD_DC_COMP_EN_MASK_SFT                       (0x1 << 8)
+#define ATTGAIN_CTL_SFT                               0
+#define ATTGAIN_CTL_MASK                              0x3f
+#define ATTGAIN_CTL_MASK_SFT                          (0x3f << 0)
+
+/* AFE_SINEGEN_CON0 */
+#define DAC_EN_SFT                                    26
+#define DAC_EN_MASK                                   0x1
+#define DAC_EN_MASK_SFT                               (0x1 << 26)
+#define MUTE_SW_CH2_SFT                               25
+#define MUTE_SW_CH2_MASK                              0x1
+#define MUTE_SW_CH2_MASK_SFT                          (0x1 << 25)
+#define MUTE_SW_CH1_SFT                               24
+#define MUTE_SW_CH1_MASK                              0x1
+#define MUTE_SW_CH1_MASK_SFT                          (0x1 << 24)
+#define SINE_MODE_CH2_SFT                             20
+#define SINE_MODE_CH2_MASK                            0xf
+#define SINE_MODE_CH2_MASK_SFT                        (0xf << 20)
+#define AMP_DIV_CH2_SFT                               17
+#define AMP_DIV_CH2_MASK                              0x7
+#define AMP_DIV_CH2_MASK_SFT                          (0x7 << 17)
+#define FREQ_DIV_CH2_SFT                              12
+#define FREQ_DIV_CH2_MASK                             0x1f
+#define FREQ_DIV_CH2_MASK_SFT                         (0x1f << 12)
+#define SINE_MODE_CH1_SFT                             8
+#define SINE_MODE_CH1_MASK                            0xf
+#define SINE_MODE_CH1_MASK_SFT                        (0xf << 8)
+#define AMP_DIV_CH1_SFT                               5
+#define AMP_DIV_CH1_MASK                              0x7
+#define AMP_DIV_CH1_MASK_SFT                          (0x7 << 5)
+#define FREQ_DIV_CH1_SFT                              0
+#define FREQ_DIV_CH1_MASK                             0x1f
+#define FREQ_DIV_CH1_MASK_SFT                         (0x1f << 0)
+
+/* AFE_SINEGEN_CON2 */
+#define INNER_LOOP_BACK_MODE_SFT                      0
+#define INNER_LOOP_BACK_MODE_MASK                     0x3f
+#define INNER_LOOP_BACK_MODE_MASK_SFT                 (0x3f << 0)
+
+/* AFE_MEMIF_MINLEN */
+#define HDMI_MINLEN_SFT                               24
+#define HDMI_MINLEN_MASK                              0xf
+#define HDMI_MINLEN_MASK_SFT                          (0xf << 24)
+#define DL3_MINLEN_SFT                                12
+#define DL3_MINLEN_MASK                               0xf
+#define DL3_MINLEN_MASK_SFT                           (0xf << 12)
+#define DL2_MINLEN_SFT                                8
+#define DL2_MINLEN_MASK                               0xf
+#define DL2_MINLEN_MASK_SFT                           (0xf << 8)
+#define DL1_DATA2_MINLEN_SFT                          4
+#define DL1_DATA2_MINLEN_MASK                         0xf
+#define DL1_DATA2_MINLEN_MASK_SFT                     (0xf << 4)
+#define DL1_MINLEN_SFT                                0
+#define DL1_MINLEN_MASK                               0xf
+#define DL1_MINLEN_MASK_SFT                           (0xf << 0)
+
+/* AFE_MEMIF_MAXLEN */
+#define HDMI_MAXLEN_SFT                               24
+#define HDMI_MAXLEN_MASK                              0xf
+#define HDMI_MAXLEN_MASK_SFT                          (0xf << 24)
+#define DL3_MAXLEN_SFT                                8
+#define DL3_MAXLEN_MASK                               0xf
+#define DL3_MAXLEN_MASK_SFT                           (0xf << 8)
+#define DL2_MAXLEN_SFT                                4
+#define DL2_MAXLEN_MASK                               0xf
+#define DL2_MAXLEN_MASK_SFT                           (0xf << 4)
+#define DL1_MAXLEN_SFT                                0
+#define DL1_MAXLEN_MASK                               0x3
+#define DL1_MAXLEN_MASK_SFT                           (0x3 << 0)
+
+/* AFE_MEMIF_PBUF_SIZE */
+#define VUL12_4CH_SFT                                 17
+#define VUL12_4CH_MASK                                0x1
+#define VUL12_4CH_MASK_SFT                            (0x1 << 17)
+#define DL3_PBUF_SIZE_SFT                             10
+#define DL3_PBUF_SIZE_MASK                            0x3
+#define DL3_PBUF_SIZE_MASK_SFT                        (0x3 << 10)
+#define HDMI_PBUF_SIZE_SFT                            4
+#define HDMI_PBUF_SIZE_MASK                           0x3
+#define HDMI_PBUF_SIZE_MASK_SFT                       (0x3 << 4)
+#define DL2_PBUF_SIZE_SFT                             2
+#define DL2_PBUF_SIZE_MASK                            0x3
+#define DL2_PBUF_SIZE_MASK_SFT                        (0x3 << 2)
+#define DL1_PBUF_SIZE_SFT                             0
+#define DL1_PBUF_SIZE_MASK                            0x3
+#define DL1_PBUF_SIZE_MASK_SFT                        (0x3 << 0)
+
+/* AFE_HD_ENGEN_ENABLE */
+#define AFE_24M_ON_SFT                                1
+#define AFE_24M_ON_MASK                               0x1
+#define AFE_24M_ON_MASK_SFT                           (0x1 << 1)
+#define AFE_22M_ON_SFT                                0
+#define AFE_22M_ON_MASK                               0x1
+#define AFE_22M_ON_MASK_SFT                           (0x1 << 0)
+
+/* AFE_IRQ_MCU_CON0 */
+#define IRQ12_MCU_ON_SFT                              12
+#define IRQ12_MCU_ON_MASK                             0x1
+#define IRQ12_MCU_ON_MASK_SFT                         (0x1 << 12)
+#define IRQ11_MCU_ON_SFT                              11
+#define IRQ11_MCU_ON_MASK                             0x1
+#define IRQ11_MCU_ON_MASK_SFT                         (0x1 << 11)
+#define IRQ10_MCU_ON_SFT                              10
+#define IRQ10_MCU_ON_MASK                             0x1
+#define IRQ10_MCU_ON_MASK_SFT                         (0x1 << 10)
+#define IRQ9_MCU_ON_SFT                               9
+#define IRQ9_MCU_ON_MASK                              0x1
+#define IRQ9_MCU_ON_MASK_SFT                          (0x1 << 9)
+#define IRQ8_MCU_ON_SFT                               8
+#define IRQ8_MCU_ON_MASK                              0x1
+#define IRQ8_MCU_ON_MASK_SFT                          (0x1 << 8)
+#define IRQ7_MCU_ON_SFT                               7
+#define IRQ7_MCU_ON_MASK                              0x1
+#define IRQ7_MCU_ON_MASK_SFT                          (0x1 << 7)
+#define IRQ6_MCU_ON_SFT                               6
+#define IRQ6_MCU_ON_MASK                              0x1
+#define IRQ6_MCU_ON_MASK_SFT                          (0x1 << 6)
+#define IRQ5_MCU_ON_SFT                               5
+#define IRQ5_MCU_ON_MASK                              0x1
+#define IRQ5_MCU_ON_MASK_SFT                          (0x1 << 5)
+#define IRQ4_MCU_ON_SFT                               4
+#define IRQ4_MCU_ON_MASK                              0x1
+#define IRQ4_MCU_ON_MASK_SFT                          (0x1 << 4)
+#define IRQ3_MCU_ON_SFT                               3
+#define IRQ3_MCU_ON_MASK                              0x1
+#define IRQ3_MCU_ON_MASK_SFT                          (0x1 << 3)
+#define IRQ2_MCU_ON_SFT                               2
+#define IRQ2_MCU_ON_MASK                              0x1
+#define IRQ2_MCU_ON_MASK_SFT                          (0x1 << 2)
+#define IRQ1_MCU_ON_SFT                               1
+#define IRQ1_MCU_ON_MASK                              0x1
+#define IRQ1_MCU_ON_MASK_SFT                          (0x1 << 1)
+#define IRQ0_MCU_ON_SFT                               0
+#define IRQ0_MCU_ON_MASK                              0x1
+#define IRQ0_MCU_ON_MASK_SFT                          (0x1 << 0)
+
+/* AFE_IRQ_MCU_CON1 */
+#define IRQ7_MCU_MODE_SFT                             28
+#define IRQ7_MCU_MODE_MASK                            0xf
+#define IRQ7_MCU_MODE_MASK_SFT                        (0xf << 28)
+#define IRQ6_MCU_MODE_SFT                             24
+#define IRQ6_MCU_MODE_MASK                            0xf
+#define IRQ6_MCU_MODE_MASK_SFT                        (0xf << 24)
+#define IRQ5_MCU_MODE_SFT                             20
+#define IRQ5_MCU_MODE_MASK                            0xf
+#define IRQ5_MCU_MODE_MASK_SFT                        (0xf << 20)
+#define IRQ4_MCU_MODE_SFT                             16
+#define IRQ4_MCU_MODE_MASK                            0xf
+#define IRQ4_MCU_MODE_MASK_SFT                        (0xf << 16)
+#define IRQ3_MCU_MODE_SFT                             12
+#define IRQ3_MCU_MODE_MASK                            0xf
+#define IRQ3_MCU_MODE_MASK_SFT                        (0xf << 12)
+#define IRQ2_MCU_MODE_SFT                             8
+#define IRQ2_MCU_MODE_MASK                            0xf
+#define IRQ2_MCU_MODE_MASK_SFT                        (0xf << 8)
+#define IRQ1_MCU_MODE_SFT                             4
+#define IRQ1_MCU_MODE_MASK                            0xf
+#define IRQ1_MCU_MODE_MASK_SFT                        (0xf << 4)
+#define IRQ0_MCU_MODE_SFT                             0
+#define IRQ0_MCU_MODE_MASK                            0xf
+#define IRQ0_MCU_MODE_MASK_SFT                        (0xf << 0)
+
+/* AFE_IRQ_MCU_CON2 */
+#define IRQ12_MCU_MODE_SFT                            4
+#define IRQ12_MCU_MODE_MASK                           0xf
+#define IRQ12_MCU_MODE_MASK_SFT                       (0xf << 4)
+#define IRQ11_MCU_MODE_SFT                            0
+#define IRQ11_MCU_MODE_MASK                           0xf
+#define IRQ11_MCU_MODE_MASK_SFT                       (0xf << 0)
+
+/* AFE_IRQ_MCU_CLR */
+#define IRQ12_MCU_MISS_CNT_CLR_SFT                    28
+#define IRQ12_MCU_MISS_CNT_CLR_MASK                   0x1
+#define IRQ12_MCU_MISS_CNT_CLR_MASK_SFT               (0x1 << 28)
+#define IRQ11_MCU_MISS_CNT_CLR_SFT                    27
+#define IRQ11_MCU_MISS_CNT_CLR_MASK                   0x1
+#define IRQ11_MCU_MISS_CNT_CLR_MASK_SFT               (0x1 << 27)
+#define IRQ10_MCU_MISS_CLR_SFT                        26
+#define IRQ10_MCU_MISS_CLR_MASK                       0x1
+#define IRQ10_MCU_MISS_CLR_MASK_SFT                   (0x1 << 26)
+#define IRQ9_MCU_MISS_CLR_SFT                         25
+#define IRQ9_MCU_MISS_CLR_MASK                        0x1
+#define IRQ9_MCU_MISS_CLR_MASK_SFT                    (0x1 << 25)
+#define IRQ8_MCU_MISS_CLR_SFT                         24
+#define IRQ8_MCU_MISS_CLR_MASK                        0x1
+#define IRQ8_MCU_MISS_CLR_MASK_SFT                    (0x1 << 24)
+#define IRQ7_MCU_MISS_CLR_SFT                         23
+#define IRQ7_MCU_MISS_CLR_MASK                        0x1
+#define IRQ7_MCU_MISS_CLR_MASK_SFT                    (0x1 << 23)
+#define IRQ6_MCU_MISS_CLR_SFT                         22
+#define IRQ6_MCU_MISS_CLR_MASK                        0x1
+#define IRQ6_MCU_MISS_CLR_MASK_SFT                    (0x1 << 22)
+#define IRQ5_MCU_MISS_CLR_SFT                         21
+#define IRQ5_MCU_MISS_CLR_MASK                        0x1
+#define IRQ5_MCU_MISS_CLR_MASK_SFT                    (0x1 << 21)
+#define IRQ4_MCU_MISS_CLR_SFT                         20
+#define IRQ4_MCU_MISS_CLR_MASK                        0x1
+#define IRQ4_MCU_MISS_CLR_MASK_SFT                    (0x1 << 20)
+#define IRQ3_MCU_MISS_CLR_SFT                         19
+#define IRQ3_MCU_MISS_CLR_MASK                        0x1
+#define IRQ3_MCU_MISS_CLR_MASK_SFT                    (0x1 << 19)
+#define IRQ2_MCU_MISS_CLR_SFT                         18
+#define IRQ2_MCU_MISS_CLR_MASK                        0x1
+#define IRQ2_MCU_MISS_CLR_MASK_SFT                    (0x1 << 18)
+#define IRQ1_MCU_MISS_CLR_SFT                         17
+#define IRQ1_MCU_MISS_CLR_MASK                        0x1
+#define IRQ1_MCU_MISS_CLR_MASK_SFT                    (0x1 << 17)
+#define IRQ0_MCU_MISS_CLR_SFT                         16
+#define IRQ0_MCU_MISS_CLR_MASK                        0x1
+#define IRQ0_MCU_MISS_CLR_MASK_SFT                    (0x1 << 16)
+#define IRQ12_MCU_CLR_SFT                             12
+#define IRQ12_MCU_CLR_MASK                            0x1
+#define IRQ12_MCU_CLR_MASK_SFT                        (0x1 << 12)
+#define IRQ11_MCU_CLR_SFT                             11
+#define IRQ11_MCU_CLR_MASK                            0x1
+#define IRQ11_MCU_CLR_MASK_SFT                        (0x1 << 11)
+#define IRQ10_MCU_CLR_SFT                             10
+#define IRQ10_MCU_CLR_MASK                            0x1
+#define IRQ10_MCU_CLR_MASK_SFT                        (0x1 << 10)
+#define IRQ9_MCU_CLR_SFT                              9
+#define IRQ9_MCU_CLR_MASK                             0x1
+#define IRQ9_MCU_CLR_MASK_SFT                         (0x1 << 9)
+#define IRQ8_MCU_CLR_SFT                              8
+#define IRQ8_MCU_CLR_MASK                             0x1
+#define IRQ8_MCU_CLR_MASK_SFT                         (0x1 << 8)
+#define IRQ7_MCU_CLR_SFT                              7
+#define IRQ7_MCU_CLR_MASK                             0x1
+#define IRQ7_MCU_CLR_MASK_SFT                         (0x1 << 7)
+#define IRQ6_MCU_CLR_SFT                              6
+#define IRQ6_MCU_CLR_MASK                             0x1
+#define IRQ6_MCU_CLR_MASK_SFT                         (0x1 << 6)
+#define IRQ5_MCU_CLR_SFT                              5
+#define IRQ5_MCU_CLR_MASK                             0x1
+#define IRQ5_MCU_CLR_MASK_SFT                         (0x1 << 5)
+#define IRQ4_MCU_CLR_SFT                              4
+#define IRQ4_MCU_CLR_MASK                             0x1
+#define IRQ4_MCU_CLR_MASK_SFT                         (0x1 << 4)
+#define IRQ3_MCU_CLR_SFT                              3
+#define IRQ3_MCU_CLR_MASK                             0x1
+#define IRQ3_MCU_CLR_MASK_SFT                         (0x1 << 3)
+#define IRQ2_MCU_CLR_SFT                              2
+#define IRQ2_MCU_CLR_MASK                             0x1
+#define IRQ2_MCU_CLR_MASK_SFT                         (0x1 << 2)
+#define IRQ1_MCU_CLR_SFT                              1
+#define IRQ1_MCU_CLR_MASK                             0x1
+#define IRQ1_MCU_CLR_MASK_SFT                         (0x1 << 1)
+#define IRQ0_MCU_CLR_SFT                              0
+#define IRQ0_MCU_CLR_MASK                             0x1
+#define IRQ0_MCU_CLR_MASK_SFT                         (0x1 << 0)
+
+/* AFE_MEMIF_MSB */
+#define CPU_COMPACT_MODE_SFT                          29
+#define CPU_COMPACT_MODE_MASK                         0x1
+#define CPU_COMPACT_MODE_MASK_SFT                     (0x1 << 29)
+#define CPU_HD_ALIGN_SFT                              28
+#define CPU_HD_ALIGN_MASK                             0x1
+#define CPU_HD_ALIGN_MASK_SFT                         (0x1 << 28)
+#define AWB2_AXI_WR_SIGN_SFT                          24
+#define AWB2_AXI_WR_SIGN_MASK                         0x1
+#define AWB2_AXI_WR_SIGN_MASK_SFT                     (0x1 << 24)
+#define VUL2_AXI_WR_SIGN_SFT                          22
+#define VUL2_AXI_WR_SIGN_MASK                         0x1
+#define VUL2_AXI_WR_SIGN_MASK_SFT                     (0x1 << 22)
+#define VUL12_AXI_WR_SIGN_SFT                         21
+#define VUL12_AXI_WR_SIGN_MASK                        0x1
+#define VUL12_AXI_WR_SIGN_MASK_SFT                    (0x1 << 21)
+#define VUL_AXI_WR_SIGN_SFT                           20
+#define VUL_AXI_WR_SIGN_MASK                          0x1
+#define VUL_AXI_WR_SIGN_MASK_SFT                      (0x1 << 20)
+#define MOD_DAI_AXI_WR_SIGN_SFT                       18
+#define MOD_DAI_AXI_WR_SIGN_MASK                      0x1
+#define MOD_DAI_AXI_WR_SIGN_MASK_SFT                  (0x1 << 18)
+#define AWB_MSTR_SIGN_SFT                             17
+#define AWB_MSTR_SIGN_MASK                            0x1
+#define AWB_MSTR_SIGN_MASK_SFT                        (0x1 << 17)
+#define SYSRAM_SIGN_SFT                               16
+#define SYSRAM_SIGN_MASK                              0x1
+#define SYSRAM_SIGN_MASK_SFT                          (0x1 << 16)
+
+/* AFE_HDMI_CONN0 */
+#define HDMI_O_7_SFT                                  21
+#define HDMI_O_7_MASK                                 0x7
+#define HDMI_O_7_MASK_SFT                             (0x7 << 21)
+#define HDMI_O_6_SFT                                  18
+#define HDMI_O_6_MASK                                 0x7
+#define HDMI_O_6_MASK_SFT                             (0x7 << 18)
+#define HDMI_O_5_SFT                                  15
+#define HDMI_O_5_MASK                                 0x7
+#define HDMI_O_5_MASK_SFT                             (0x7 << 15)
+#define HDMI_O_4_SFT                                  12
+#define HDMI_O_4_MASK                                 0x7
+#define HDMI_O_4_MASK_SFT                             (0x7 << 12)
+#define HDMI_O_3_SFT                                  9
+#define HDMI_O_3_MASK                                 0x7
+#define HDMI_O_3_MASK_SFT                             (0x7 << 9)
+#define HDMI_O_2_SFT                                  6
+#define HDMI_O_2_MASK                                 0x7
+#define HDMI_O_2_MASK_SFT                             (0x7 << 6)
+#define HDMI_O_1_SFT                                  3
+#define HDMI_O_1_MASK                                 0x7
+#define HDMI_O_1_MASK_SFT                             (0x7 << 3)
+#define HDMI_O_0_SFT                                  0
+#define HDMI_O_0_MASK                                 0x7
+#define HDMI_O_0_MASK_SFT                             (0x7 << 0)
+
+/* AFE_TDM_CON1 */
+#define TDM_EN_SFT                                    0
+#define TDM_EN_MASK                                   0x1
+#define TDM_EN_MASK_SFT                               (0x1 << 0)
+#define BCK_INVERSE_SFT                               1
+#define BCK_INVERSE_MASK                              0x1
+#define BCK_INVERSE_MASK_SFT                          (0x1 << 1)
+#define LRCK_INVERSE_SFT                              2
+#define LRCK_INVERSE_MASK                             0x1
+#define LRCK_INVERSE_MASK_SFT                         (0x1 << 2)
+#define DELAY_DATA_SFT                                3
+#define DELAY_DATA_MASK                               0x1
+#define DELAY_DATA_MASK_SFT                           (0x1 << 3)
+#define LEFT_ALIGN_SFT                                4
+#define LEFT_ALIGN_MASK                               0x1
+#define LEFT_ALIGN_MASK_SFT                           (0x1 << 4)
+#define WLEN_SFT                                      8
+#define WLEN_MASK                                     0x3
+#define WLEN_MASK_SFT                                 (0x3 << 8)
+#define CHANNEL_NUM_SFT                               10
+#define CHANNEL_NUM_MASK                              0x3
+#define CHANNEL_NUM_MASK_SFT                          (0x3 << 10)
+#define CHANNEL_BCK_CYCLES_SFT                        12
+#define CHANNEL_BCK_CYCLES_MASK                       0x3
+#define CHANNEL_BCK_CYCLES_MASK_SFT                   (0x3 << 12)
+#define DAC_BIT_NUM_SFT                               16
+#define DAC_BIT_NUM_MASK                              0x1f
+#define DAC_BIT_NUM_MASK_SFT                          (0x1f << 16)
+#define LRCK_TDM_WIDTH_SFT                            24
+#define LRCK_TDM_WIDTH_MASK                           0xff
+#define LRCK_TDM_WIDTH_MASK_SFT                       (0xff << 24)
+
+/* AFE_TDM_CON2 */
+#define ST_CH_PAIR_SOUT0_SFT                          0
+#define ST_CH_PAIR_SOUT0_MASK                         0x7
+#define ST_CH_PAIR_SOUT0_MASK_SFT                     (0x7 << 0)
+#define ST_CH_PAIR_SOUT1_SFT                          4
+#define ST_CH_PAIR_SOUT1_MASK                         0x7
+#define ST_CH_PAIR_SOUT1_MASK_SFT                     (0x7 << 4)
+#define ST_CH_PAIR_SOUT2_SFT                          8
+#define ST_CH_PAIR_SOUT2_MASK                         0x7
+#define ST_CH_PAIR_SOUT2_MASK_SFT                     (0x7 << 8)
+#define ST_CH_PAIR_SOUT3_SFT                          12
+#define ST_CH_PAIR_SOUT3_MASK                         0x7
+#define ST_CH_PAIR_SOUT3_MASK_SFT                     (0x7 << 12)
+#define TDM_FIX_VALUE_SEL_SFT                         16
+#define TDM_FIX_VALUE_SEL_MASK                        0x1
+#define TDM_FIX_VALUE_SEL_MASK_SFT                    (0x1 << 16)
+#define TDM_I2S_LOOPBACK_SFT                          20
+#define TDM_I2S_LOOPBACK_MASK                         0x1
+#define TDM_I2S_LOOPBACK_MASK_SFT                     (0x1 << 20)
+#define TDM_I2S_LOOPBACK_CH_SFT                       21
+#define TDM_I2S_LOOPBACK_CH_MASK                      0x3
+#define TDM_I2S_LOOPBACK_CH_MASK_SFT                  (0x3 << 21)
+#define TDM_FIX_VALUE_SFT                             24
+#define TDM_FIX_VALUE_MASK                            0xff
+#define TDM_FIX_VALUE_MASK_SFT                        (0xff << 24)
+
+/* AFE_HDMI_OUT_CON0 */
+#define AFE_HDMI_OUT_ON_RETM_SFT                      8
+#define AFE_HDMI_OUT_ON_RETM_MASK                     0x1
+#define AFE_HDMI_OUT_ON_RETM_MASK_SFT                 (0x1 << 8)
+#define AFE_HDMI_OUT_CH_NUM_SFT                       4
+#define AFE_HDMI_OUT_CH_NUM_MASK                      0xf
+#define AFE_HDMI_OUT_CH_NUM_MASK_SFT                  (0xf << 4)
+#define AFE_HDMI_OUT_BIT_WIDTH_SFT                    1
+#define AFE_HDMI_OUT_BIT_WIDTH_MASK                   0x1
+#define AFE_HDMI_OUT_BIT_WIDTH_MASK_SFT               (0x1 << 1)
+#define AFE_HDMI_OUT_ON_SFT                           0
+#define AFE_HDMI_OUT_ON_MASK                          0x1
+#define AFE_HDMI_OUT_ON_MASK_SFT                      (0x1 << 0)
+#endif
diff --git a/sound/soc/mediatek/mt8516/Makefile b/sound/soc/mediatek/mt8516/Makefile
new file mode 100644
index 0000000..dc05ab4
--- /dev/null
+++ b/sound/soc/mediatek/mt8516/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+snd-soc-mt8516-afe-objs := \
+	mt8516-afe-pcm.o \
+	mt8516-dai-adda.o
+
+obj-$(CONFIG_SND_SOC_MT8516) += snd-soc-mt8516-afe.o
diff --git a/sound/soc/mediatek/mt8516/mt8516-afe-common.h b/sound/soc/mediatek/mt8516/mt8516-afe-common.h
new file mode 100644
index 0000000..e70877c
--- /dev/null
+++ b/sound/soc/mediatek/mt8516/mt8516-afe-common.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 BayLibre, SAS
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+#ifndef _MT8516_AFE_COMMON_H_
+#define _MT8516_AFE_COMMON_H_
+
+#include "../common/mtk-base-afe.h"
+
+enum {
+	MT8516_AFE_BE_ADDA,
+};
+
+int mt8516_dai_adda_register(struct mtk_base_afe *afe);
+
+#endif
diff --git a/sound/soc/mediatek/mt8516/mt8516-afe-pcm.c b/sound/soc/mediatek/mt8516/mt8516-afe-pcm.c
new file mode 100644
index 0000000..33f58f3
--- /dev/null
+++ b/sound/soc/mediatek/mt8516/mt8516-afe-pcm.c
@@ -0,0 +1,787 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 BayLibre, SAS
+ * Copyright (c) 2019 MediaTek, Inc
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include "mt8516-afe-common.h"
+#include "mt8516-afe-regs.h"
+
+#include "../common/mtk-afe-platform-driver.h"
+#include "../common/mtk-afe-fe-dai.h"
+#include "../common/mtk-base-afe.h"
+
+enum {
+	MT8516_AFE_MEMIF_DL1,
+	MT8516_AFE_MEMIF_DL2,
+	MT8516_AFE_MEMIF_VUL,
+	MT8516_AFE_MEMIF_DAI,
+	MT8516_AFE_MEMIF_AWB,
+	MT8516_AFE_MEMIF_MOD_DAI,
+	MT8516_AFE_MEMIF_HDMI,
+	MT8516_AFE_MEMIF_TDM_IN,
+	MT8516_AFE_MEMIF_MULTILINE_IN,
+	MT8516_AFE_MEMIF_NUM,
+};
+
+enum {
+	MT8516_AFE_IRQ_1 = 0,
+	MT8516_AFE_IRQ_2,
+	MT8516_AFE_IRQ_5, /* dedicated for HDMI */
+	MT8516_AFE_IRQ_7,
+	MT8516_AFE_IRQ_10, /* dedicated for TDM IN */
+	MT8516_AFE_IRQ_13, /* dedicated for ULM*/
+	MT8516_AFE_IRQ_NUM
+};
+
+struct mt8516_afe_rate {
+	unsigned int rate;
+	unsigned int regvalue;
+};
+
+static const struct mt8516_afe_rate mt8516_afe_i2s_rates[] = {
+	{ .rate = 8000, .regvalue = 0 },
+	{ .rate = 11025, .regvalue = 1 },
+	{ .rate = 12000, .regvalue = 2 },
+	{ .rate = 16000, .regvalue = 4 },
+	{ .rate = 22050, .regvalue = 5 },
+	{ .rate = 24000, .regvalue = 6 },
+	{ .rate = 32000, .regvalue = 8 },
+	{ .rate = 44100, .regvalue = 9 },
+	{ .rate = 48000, .regvalue = 10 },
+	{ .rate = 88000, .regvalue = 11 },
+	{ .rate = 96000, .regvalue = 12 },
+	{ .rate = 176400, .regvalue = 13 },
+	{ .rate = 192000, .regvalue = 14 },
+};
+
+static int mt8516_afe_i2s_fs(struct snd_pcm_substream *substream,
+				unsigned int sample_rate)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mt8516_afe_i2s_rates); i++)
+		if (mt8516_afe_i2s_rates[i].rate == sample_rate)
+			return mt8516_afe_i2s_rates[i].regvalue;
+
+	return -EINVAL;
+}
+
+
+static int mt8516_afe_irq_fs(struct snd_pcm_substream *substream,
+		      unsigned int rate)
+{
+	return mt8516_afe_i2s_fs(substream, rate);
+}
+
+static const unsigned int mt8516_afe_backup_list[] = {
+	AUDIO_TOP_CON0,
+	AUDIO_TOP_CON1,
+	AUDIO_TOP_CON3,
+	AFE_CONN0,
+	AFE_CONN1,
+	AFE_CONN2,
+	AFE_CONN3,
+	AFE_CONN5,
+	AFE_CONN_24BIT,
+	AFE_I2S_CON,
+	AFE_I2S_CON1,
+	AFE_I2S_CON2,
+	AFE_I2S_CON3,
+	AFE_ADDA_PREDIS_CON0,
+	AFE_ADDA_PREDIS_CON1,
+	AFE_ADDA_DL_SRC2_CON0,
+	AFE_ADDA_DL_SRC2_CON1,
+	AFE_ADDA_UL_SRC_CON0,
+	AFE_ADDA_UL_SRC_CON0,
+	AFE_ADDA_NEWIF_CFG1,
+	AFE_ADDA_TOP_CON0,
+	AFE_ADDA_UL_DL_CON0,
+	AFE_MEMIF_PBUF_SIZE,
+	AFE_MEMIF_PBUF2_SIZE,
+	AFE_DAC_CON0,
+	AFE_DAC_CON1,
+	AFE_DL1_BASE,
+	AFE_DL1_END,
+	AFE_DL2_BASE,
+	AFE_DL2_END,
+	AFE_VUL_BASE,
+	AFE_VUL_END,
+	AFE_AWB_BASE,
+	AFE_AWB_END,
+	AFE_DAI_BASE,
+	AFE_DAI_END,
+	AFE_HDMI_OUT_BASE,
+	AFE_HDMI_OUT_END,
+	AFE_HDMI_IN_2CH_BASE,
+	AFE_HDMI_IN_2CH_END,
+};
+
+static const struct snd_pcm_hardware mt8516_afe_hardware = {
+	.info = SNDRV_PCM_INFO_MMAP |
+		SNDRV_PCM_INFO_INTERLEAVED |
+		SNDRV_PCM_INFO_RESUME |
+		SNDRV_PCM_INFO_MMAP_VALID,
+	.buffer_bytes_max = 1024 * 1024,
+	.period_bytes_min = 256,
+	.period_bytes_max = 512 * 1024,
+	.periods_min = 2,
+	.periods_max = 256,
+	.fifo_size = 0,
+};
+
+static const struct snd_kcontrol_new mt8516_afe_o03_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("I05 Switch", AFE_CONN1, 21, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I07 Switch", AFE_CONN1, 23, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt8516_afe_o04_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("I06 Switch", AFE_CONN2, 6, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I08 Switch", AFE_CONN2, 8, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt8516_afe_o09_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("I00 Switch", AFE_CONN5, 8, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I03 Switch", AFE_CONN3, 0, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt8516_afe_o10_mix[] = {
+	SOC_DAPM_SINGLE_AUTODISABLE("I01 Switch", AFE_CONN5, 13, 1, 0),
+	SOC_DAPM_SINGLE_AUTODISABLE("I04 Switch", AFE_CONN3, 3, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget mt8516_memif_widgets[] = {
+	/* inter-connections */
+	SND_SOC_DAPM_MIXER("I03", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("I04", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("I05", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("I06", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("I07", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("I08", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_MIXER("O03", SND_SOC_NOPM, 0, 0,
+			   mt8516_afe_o03_mix, ARRAY_SIZE(mt8516_afe_o03_mix)),
+	SND_SOC_DAPM_MIXER("O04", SND_SOC_NOPM, 0, 0,
+			   mt8516_afe_o04_mix, ARRAY_SIZE(mt8516_afe_o04_mix)),
+	SND_SOC_DAPM_MIXER("O09", SND_SOC_NOPM, 0, 0,
+			   mt8516_afe_o09_mix, ARRAY_SIZE(mt8516_afe_o09_mix)),
+	SND_SOC_DAPM_MIXER("O10", SND_SOC_NOPM, 0, 0,
+			   mt8516_afe_o10_mix, ARRAY_SIZE(mt8516_afe_o10_mix)),
+};
+
+static const struct snd_soc_dapm_route mt8516_memif_routes[] = {
+	/* downlink */
+	{"I05", NULL, "DL1"},
+	{"I06", NULL, "DL1"},
+	{"I07", NULL, "DL2"},
+	{"I08", NULL, "DL2"},
+	{"O03", "I05 Switch", "I05"},
+	{"O04", "I06 Switch", "I06"},
+	{"O03", "I07 Switch", "I07"},
+	{"O04", "I08 Switch", "I08"},
+
+	/* uplink */
+	{"I03", NULL, "AIN Mux"},
+	{"I04", NULL, "AIN Mux"},
+
+	{"O09", "I03 Switch", "I03"},
+	{"O10", "I04 Switch", "I04"},
+	{"VUL", NULL, "O09"},
+	{"VUL", NULL, "O10"},
+};
+
+static struct mtk_base_irq_data mt8516_irq_data[MT8516_AFE_IRQ_NUM] = {
+	[MT8516_AFE_IRQ_1] = {
+		.id = MT8516_AFE_IRQ_1,
+		.irq_cnt_reg = AFE_IRQ_CNT1,
+		.irq_cnt_shift = 0,
+		.irq_cnt_maskbit = 0x3ffff,
+		.irq_fs_reg = AFE_IRQ_MCU_CON,
+		.irq_fs_shift = 4,
+		.irq_fs_maskbit = 0xf,
+		.irq_en_reg = AFE_IRQ_MCU_CON,
+		.irq_en_shift = 0,
+		.irq_clr_reg = AFE_IRQ_CLR,
+		.irq_clr_shift = 0,
+	},
+	[MT8516_AFE_IRQ_2] = {
+		.id = MT8516_AFE_IRQ_2,
+		.irq_cnt_reg = AFE_IRQ_CNT2,
+		.irq_cnt_shift = 0,
+		.irq_cnt_maskbit = 0x3ffff,
+		.irq_fs_reg = AFE_IRQ_MCU_CON,
+		.irq_fs_shift = 8,
+		.irq_fs_maskbit = 0xf,
+		.irq_en_reg = AFE_IRQ_MCU_CON,
+		.irq_en_shift = 1,
+		.irq_clr_reg = AFE_IRQ_CLR,
+		.irq_clr_shift = 1,
+	},
+	[MT8516_AFE_IRQ_5] = {
+		.id = MT8516_AFE_IRQ_5,
+		.irq_cnt_reg = AFE_IRQ_CNT5,
+		.irq_cnt_shift = 0,
+		.irq_cnt_maskbit = 0x3ffff,
+		.irq_fs_reg = -1,
+		.irq_fs_shift = -1,
+		.irq_en_reg = AFE_IRQ_MCU_CON2,
+		.irq_en_shift = 3,
+		.irq_clr_reg = AFE_IRQ_CLR,
+		.irq_clr_shift = 4,
+	},
+	[MT8516_AFE_IRQ_7] = {
+		.id = MT8516_AFE_IRQ_7,
+		.irq_cnt_reg = AFE_IRQ_CNT7,
+		.irq_cnt_shift = 0,
+		.irq_cnt_maskbit = 0x3ffff,
+		.irq_fs_reg = AFE_IRQ_MCU_CON,
+		.irq_fs_shift = 24,
+		.irq_fs_maskbit = 0xf,
+		.irq_en_reg = AFE_IRQ_MCU_CON,
+		.irq_en_shift = 14,
+		.irq_clr_reg = AFE_IRQ_CLR,
+		.irq_clr_shift = 6,
+	},
+	[MT8516_AFE_IRQ_10] = {
+		.id = MT8516_AFE_IRQ_10,
+		.irq_cnt_reg = AFE_IRQ_CNT10,
+		.irq_cnt_shift = 0,
+		.irq_cnt_maskbit = 0x3ffff,
+		.irq_fs_reg = -1,
+		.irq_fs_shift = -1,
+		.irq_en_reg = AFE_IRQ_MCU_CON2,
+		.irq_en_shift = 4,
+		.irq_clr_reg = AFE_IRQ_CLR,
+		.irq_clr_shift = 9,
+	},
+	[MT8516_AFE_IRQ_13] = {
+		.id = MT8516_AFE_IRQ_13,
+		.irq_cnt_reg = AFE_IRQ_CNT13,
+		.irq_cnt_shift = 0,
+		.irq_cnt_maskbit = 0x3ffff,
+		.irq_fs_reg = -1,
+		.irq_fs_shift = -1,
+		.irq_en_reg = AFE_IRQ_MCU_CON2,
+		.irq_en_shift = 7,
+		.irq_clr_reg = AFE_IRQ_CLR,
+		.irq_clr_shift = 12,
+	},
+};
+
+static struct mtk_base_afe_irq mt8516_irqs[MT8516_AFE_IRQ_NUM] = {
+	{ .irq_data = &mt8516_irq_data[MT8516_AFE_IRQ_1] },
+	{ .irq_data = &mt8516_irq_data[MT8516_AFE_IRQ_2] },
+	{ .irq_data = &mt8516_irq_data[MT8516_AFE_IRQ_5] },
+	{ .irq_data = &mt8516_irq_data[MT8516_AFE_IRQ_7] },
+	{ .irq_data = &mt8516_irq_data[MT8516_AFE_IRQ_10] },
+	{ .irq_data = &mt8516_irq_data[MT8516_AFE_IRQ_13] },
+};
+
+static struct mtk_base_memif_data mt8516_memif_data[MT8516_AFE_MEMIF_NUM] = {
+	[MT8516_AFE_MEMIF_DL1] = {
+		.name = "DL1",
+		.id = MT8516_AFE_MEMIF_DL1,
+		.reg_ofs_base = AFE_DL1_BASE,
+		.reg_ofs_cur = AFE_DL1_CUR,
+		.fs_reg = AFE_DAC_CON1,
+		.fs_shift = 0,
+		.fs_maskbit = 0xf,
+		.mono_reg = AFE_DAC_CON1,
+		.mono_shift = 21,
+		.enable_reg = AFE_DAC_CON0,
+		.enable_shift = 1,
+		.hd_shift = -1,
+		.msb_shift = -1,
+		.agent_disable_shift = -1,
+	},
+	[MT8516_AFE_MEMIF_DL2] = {
+		.name = "DL2",
+		.id = MT8516_AFE_MEMIF_DL2,
+		.reg_ofs_base = AFE_DL2_BASE,
+		.reg_ofs_cur = AFE_DL2_CUR,
+		.fs_reg = AFE_DAC_CON1,
+		.fs_shift = 4,
+		.fs_maskbit = 0xf,
+		.mono_reg = AFE_DAC_CON1,
+		.mono_shift = 22,
+		.enable_reg = AFE_DAC_CON0,
+		.enable_shift = 2,
+		.hd_shift = -1,
+		.msb_shift = -1,
+		.agent_disable_shift = -1,
+	},
+	[MT8516_AFE_MEMIF_VUL] = {
+		.name = "VUL",
+		.id = MT8516_AFE_MEMIF_VUL,
+		.reg_ofs_base = AFE_VUL_BASE,
+		.reg_ofs_cur = AFE_VUL_CUR,
+		.fs_reg = AFE_DAC_CON1,
+		.fs_shift = 16,
+		.fs_maskbit = 0xf,
+		.mono_reg = AFE_DAC_CON1,
+		.mono_shift = 27,
+		.enable_reg = AFE_DAC_CON0,
+		.enable_shift = 3,
+		.hd_shift = -1,
+		.msb_shift = -1,
+		.agent_disable_shift = -1,
+	},
+	[MT8516_AFE_MEMIF_DAI] = {
+		.name = "DAI",
+		.id = MT8516_AFE_MEMIF_DAI,
+		.reg_ofs_base = AFE_DAI_BASE,
+		.reg_ofs_cur = AFE_DAI_CUR,
+		.fs_reg = AFE_DAC_CON0,
+		.fs_shift = 24,
+		.fs_maskbit = 0x3,
+		.mono_reg = AFE_DAC_CON1,
+		.mono_shift = -1,
+		.enable_reg = AFE_DAC_CON0,
+		.enable_shift = 4,
+		.hd_shift = -1,
+		.msb_shift = -1,
+		.agent_disable_shift = -1,
+	},
+	[MT8516_AFE_MEMIF_AWB] = {
+		.name = "AWB",
+		.id = MT8516_AFE_MEMIF_AWB,
+		.reg_ofs_base = AFE_AWB_BASE,
+		.reg_ofs_cur = AFE_AWB_CUR,
+		.fs_reg = AFE_DAC_CON1,
+		.fs_shift = 12,
+		.fs_maskbit = 0xf,
+		.mono_reg = AFE_DAC_CON1,
+		.mono_shift = 24,
+		.enable_reg = AFE_DAC_CON0,
+		.enable_shift = 6,
+		.hd_shift = -1,
+		.msb_shift = -1,
+		.agent_disable_shift = -1,
+	},
+	[MT8516_AFE_MEMIF_MOD_DAI] = {
+		.name = "MOD_DAI",
+		.id = MT8516_AFE_MEMIF_MOD_DAI,
+		.reg_ofs_base = AFE_MOD_PCM_BASE,
+		.reg_ofs_cur = AFE_MOD_PCM_CUR,
+		.fs_reg = AFE_DAC_CON1,
+		.fs_shift = 30,
+		.fs_maskbit = 0x3,
+		.mono_reg = AFE_DAC_CON1,
+		.mono_shift = -1,
+		.enable_reg = AFE_DAC_CON0,
+		.enable_shift = 7,
+		.hd_shift = -1,
+		.msb_shift = -1,
+		.agent_disable_shift = -1,
+	},
+	[MT8516_AFE_MEMIF_HDMI] = {
+		.name = "HDMI",
+		.id = MT8516_AFE_MEMIF_HDMI,
+		.reg_ofs_base = AFE_HDMI_OUT_BASE,
+		.reg_ofs_cur = AFE_HDMI_OUT_CUR,
+		.fs_reg = AFE_DAC_CON1,
+		.fs_shift = -1,
+		.fs_maskbit = 0xf,
+		.mono_reg = AFE_DAC_CON1,
+		.mono_shift = -1,
+		.enable_reg = AFE_DAC_CON0,
+		.enable_shift = -1,
+		.hd_shift = -1,
+		.msb_shift = -1,
+		.agent_disable_shift = -1,
+	},
+	[MT8516_AFE_MEMIF_TDM_IN] = {
+		.name = "TDM_IN",
+		.id = MT8516_AFE_MEMIF_TDM_IN,
+		.reg_ofs_base = AFE_HDMI_IN_2CH_BASE,
+		.reg_ofs_cur = AFE_HDMI_IN_2CH_CUR,
+		.fs_reg = AFE_DAC_CON1,
+		.fs_shift = -1,
+		.fs_maskbit = 0xf,
+		.mono_reg = AFE_DAC_CON1,
+		.mono_shift = -1,
+		.enable_reg = AFE_DAC_CON0,
+		.enable_shift = -1,
+		.hd_shift = -1,
+		.msb_shift = -1,
+		.agent_disable_shift = -1,
+	},
+	[MT8516_AFE_MEMIF_MULTILINE_IN] = {
+		.name = "ULM",
+		.id = MT8516_AFE_MEMIF_MULTILINE_IN,
+		.reg_ofs_base = SPDIFIN_BASE_ADR,
+		.reg_ofs_cur = SPDIFIN_CUR_ADR,
+		.fs_reg = AFE_DAC_CON1,
+		.fs_shift = -1,
+		.fs_maskbit = 0xf,
+		.mono_reg = AFE_DAC_CON1,
+		.mono_shift = -1,
+		.enable_reg = AFE_DAC_CON0,
+		.enable_shift = -1,
+		.hd_shift = -1,
+		.msb_shift = -1,
+		.agent_disable_shift = -1,
+	},
+};
+
+struct mtk_base_afe_memif mt8516_memif[] = {
+	[MT8516_AFE_MEMIF_DL1] = {
+		.data = &mt8516_memif_data[MT8516_AFE_MEMIF_DL1],
+		.irq_usage = MT8516_AFE_IRQ_1,
+		.const_irq = 1,
+	},
+	[MT8516_AFE_MEMIF_DL2] = {
+		.data = &mt8516_memif_data[MT8516_AFE_MEMIF_DL2],
+		.irq_usage = MT8516_AFE_IRQ_7,
+		.const_irq = 1,
+	},
+	[MT8516_AFE_MEMIF_VUL] = {
+		.data = &mt8516_memif_data[MT8516_AFE_MEMIF_VUL],
+		.irq_usage = MT8516_AFE_IRQ_2,
+		.const_irq = 1,
+	},
+	[MT8516_AFE_MEMIF_DAI] = {
+		.data = &mt8516_memif_data[MT8516_AFE_MEMIF_DAI],
+		.irq_usage = MT8516_AFE_IRQ_2,
+		.const_irq = 1,
+	},
+	[MT8516_AFE_MEMIF_AWB] = {
+		.data = &mt8516_memif_data[MT8516_AFE_MEMIF_AWB],
+		.irq_usage = MT8516_AFE_IRQ_2,
+		.const_irq = 1,
+	},
+	[MT8516_AFE_MEMIF_MOD_DAI] = {
+		.data = &mt8516_memif_data[MT8516_AFE_MEMIF_MOD_DAI],
+		.irq_usage = MT8516_AFE_IRQ_2,
+		.const_irq = 1,
+	},
+	[MT8516_AFE_MEMIF_HDMI] = {
+		.data = &mt8516_memif_data[MT8516_AFE_MEMIF_HDMI],
+		.irq_usage = MT8516_AFE_IRQ_5,
+		.const_irq = 1,
+	},
+	[MT8516_AFE_MEMIF_TDM_IN] = {
+		.data = &mt8516_memif_data[MT8516_AFE_MEMIF_TDM_IN],
+		.irq_usage = MT8516_AFE_IRQ_10,
+		.const_irq = 1,
+	},
+	[MT8516_AFE_MEMIF_MULTILINE_IN] = {
+		.data = &mt8516_memif_data[MT8516_AFE_MEMIF_MULTILINE_IN],
+		.data = &mt8516_memif_data[8],
+		.irq_usage = MT8516_AFE_IRQ_13,
+		.const_irq = 1,
+	},
+};
+
+static const struct regmap_config mt8516_afe_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = ABB_AFE_SDM_TEST,
+	.cache_type = REGCACHE_NONE,
+};
+
+static irqreturn_t mt8516_afe_irq_handler(int irq, void *dev_id)
+{
+	struct mtk_base_afe *afe = dev_id;
+	unsigned int reg_value;
+	unsigned int memif_status;
+	int i, ret;
+
+	ret = regmap_read(afe->regmap, AFE_IRQ_STATUS, &reg_value);
+	if (ret)
+		goto exit_irq;
+
+	ret = regmap_read(afe->regmap, AFE_DAC_CON0, &memif_status);
+	if (ret)
+		goto exit_irq;
+
+	for (i = 0; i < MT8516_AFE_MEMIF_NUM; i++) {
+		struct mtk_base_afe_memif *memif = &afe->memif[i];
+		struct snd_pcm_substream *substream = memif->substream;
+		unsigned int irq_clr_shift =
+			afe->irqs[memif->irq_usage].irq_data->irq_clr_shift;
+		unsigned int enable_shift = memif->data->enable_shift;
+
+		if (!substream)
+			continue;
+
+		if (!(reg_value & (1 << irq_clr_shift)))
+			continue;
+
+		if (enable_shift >= 0 && !((1 << enable_shift) & memif_status))
+			continue;
+
+		snd_pcm_period_elapsed(substream);
+	}
+
+	regmap_write(afe->regmap, AFE_IRQ_CLR, reg_value & AFE_IRQ_STATUS_BITS);
+
+	return IRQ_HANDLED;
+
+exit_irq:
+	return IRQ_NONE;
+}
+
+static struct snd_soc_dai_driver mt8516_memif_dai_driver[] = {
+	/* FE DAIs: memory intefaces to CPU */
+	{
+		.name = "DL1",
+		.id = MT8516_AFE_MEMIF_DL1,
+		.suspend = mtk_afe_dai_suspend,
+		.resume = mtk_afe_dai_resume,
+		.playback = {
+			.stream_name = "DL1",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = &mtk_afe_fe_ops,
+	}, {
+		.name = "DL2",
+		.id = MT8516_AFE_MEMIF_DL2,
+		.suspend = mtk_afe_dai_suspend,
+		.resume = mtk_afe_dai_resume,
+		.playback = {
+			.stream_name = "DL2",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = &mtk_afe_fe_ops,
+	}, {
+		.name = "VUL",
+		.id = MT8516_AFE_MEMIF_VUL,
+		.suspend = mtk_afe_dai_suspend,
+		.resume = mtk_afe_dai_resume,
+		.capture = {
+			.stream_name = "VUL",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = &mtk_afe_fe_ops,
+	}, {
+		.name = "DAI",
+		.id = MT8516_AFE_MEMIF_DAI,
+		.suspend = mtk_afe_dai_suspend,
+		.resume = mtk_afe_dai_resume,
+		.capture = {
+			.stream_name = "DAI",
+			.channels_min = 1,
+			.channels_max = 1,
+			.rates = SNDRV_PCM_RATE_8000 |
+				 SNDRV_PCM_RATE_16000 |
+				 SNDRV_PCM_RATE_32000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+		.ops = &mtk_afe_fe_ops,
+	}, {
+		.name = "HDMI",
+		.id = MT8516_AFE_MEMIF_HDMI,
+		.suspend = mtk_afe_dai_suspend,
+		.resume = mtk_afe_dai_resume,
+		.playback = {
+			.stream_name = "HDMI",
+			.channels_min = 1,
+			.channels_max = 8,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = &mtk_afe_fe_ops,
+	}, {
+		.name = "TDM_IN",
+		.id = MT8516_AFE_MEMIF_TDM_IN,
+		.suspend = mtk_afe_dai_suspend,
+		.resume = mtk_afe_dai_resume,
+		.capture = {
+			.stream_name = "TDM_IN",
+			.channels_min = 2,
+			.channels_max = 8,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = &mtk_afe_fe_ops,
+	}, {
+		.name = "ULM",
+		.id = MT8516_AFE_MEMIF_MULTILINE_IN,
+		.suspend = mtk_afe_dai_suspend,
+		.resume = mtk_afe_dai_resume,
+		.capture = {
+			.stream_name = "MULTILINE_IN",
+			.channels_min = 1,
+			.channels_max = 8,
+			.rates = SNDRV_PCM_RATE_32000
+				| SNDRV_PCM_RATE_44100
+				| SNDRV_PCM_RATE_48000
+				| SNDRV_PCM_RATE_88200
+				| SNDRV_PCM_RATE_96000
+				| SNDRV_PCM_RATE_176400
+				| SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE
+				| SNDRV_PCM_FMTBIT_S24_LE
+				| SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = &mtk_afe_fe_ops,
+	},
+};
+
+static int mt8516_dai_memif_register(struct mtk_base_afe *afe)
+{
+	struct mtk_base_afe_dai *dai;
+
+	dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+	if (!dai)
+		return -ENOMEM;
+
+	list_add(&dai->list, &afe->sub_dais);
+
+	dai->dai_drivers = mt8516_memif_dai_driver;
+	dai->num_dai_drivers = ARRAY_SIZE(mt8516_memif_dai_driver);
+
+	dai->dapm_widgets = mt8516_memif_widgets;
+	dai->num_dapm_widgets = ARRAY_SIZE(mt8516_memif_widgets);
+	dai->dapm_routes = mt8516_memif_routes;
+	dai->num_dapm_routes = ARRAY_SIZE(mt8516_memif_routes);
+
+	return 0;
+}
+
+typedef int (*dai_register_cb)(struct mtk_base_afe *);
+static const dai_register_cb dai_register_cbs[] = {
+	mt8516_dai_adda_register,
+	mt8516_dai_memif_register,
+};
+
+static int mt8516_afe_component_probe(struct snd_soc_component *component)
+{
+	return mtk_afe_add_sub_dai_control(component);
+}
+
+static const struct snd_soc_component_driver mt8516_afe_component = {
+	.name = AFE_PCM_NAME,
+	.ops = &mtk_afe_pcm_ops,
+	.pcm_new = mtk_afe_pcm_new,
+	.pcm_free = mtk_afe_pcm_free,
+	.probe = mt8516_afe_component_probe,
+};
+
+static int mt8516_afe_pcm_dev_probe(struct platform_device *pdev)
+{
+	int ret, i;
+	unsigned int irq_id;
+	struct mtk_base_afe *afe;
+	struct resource *res;
+	struct device_node *np = pdev->dev.of_node;
+
+	afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL);
+	if (!afe)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, afe);
+
+	afe->dev = &pdev->dev;
+
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	afe->base_addr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(afe->base_addr))
+		return PTR_ERR(afe->base_addr);
+
+	afe->regmap = devm_regmap_init_mmio(&pdev->dev, afe->base_addr,
+		&mt8516_afe_regmap_config);
+	if (IS_ERR(afe->regmap))
+		return PTR_ERR(afe->regmap);
+
+	afe->reg_back_up_list = &mt8516_afe_backup_list[0];
+	afe->reg_back_up_list_num = ARRAY_SIZE(mt8516_afe_backup_list);
+
+	/* init sub_dais */
+	INIT_LIST_HEAD(&afe->sub_dais);
+
+	for (i = 0; i < ARRAY_SIZE(dai_register_cbs); i++) {
+		ret = dai_register_cbs[i](afe);
+		if (ret) {
+			dev_warn(afe->dev,
+				 "Failed to register dai register %d, ret %d\n",
+				 i, ret);
+			return ret;
+		}
+	}
+
+	/* init dai_driver and component_driver */
+	ret = mtk_afe_combine_sub_dai(afe);
+	if (ret) {
+		dev_warn(afe->dev, "Failed to combine sub-dais, ret %d\n", ret);
+		return ret;
+	}
+
+	afe->mtk_afe_hardware = &mt8516_afe_hardware;
+
+	afe->irqs = mt8516_irqs;
+	afe->irq_fs = mt8516_afe_irq_fs;
+
+	afe->memif = &mt8516_memif[0];
+	afe->memif_size = ARRAY_SIZE(mt8516_memif);
+	afe->memif_fs = mt8516_afe_i2s_fs;
+
+	ret = devm_snd_soc_register_component(&pdev->dev,
+					      &mt8516_afe_component,
+					      afe->dai_drivers,
+					      afe->num_dai_drivers);
+	if (ret)
+		return ret;
+
+	irq_id = platform_get_irq(pdev, 0);
+	if (!irq_id) {
+		dev_err(afe->dev, "np %s no irq\n", np->name);
+		return -ENXIO;
+	}
+
+	ret = devm_request_irq(afe->dev, irq_id, mt8516_afe_irq_handler,
+			       0, "Afe_ISR_Handle", (void *)afe);
+	if (ret) {
+		dev_err(afe->dev, "could not request_irq\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id mt8516_afe_pcm_dt_match[] = {
+	{ .compatible = "mediatek,mt8516-audio", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, mt8516_afe_pcm_dt_match);
+
+static struct platform_driver mt8516_afe_pcm_driver = {
+	.driver = {
+		   .name = "mtk-afe-pcm",
+		   .of_match_table = mt8516_afe_pcm_dt_match,
+	},
+	.probe = mt8516_afe_pcm_dev_probe,
+};
+
+module_platform_driver(mt8516_afe_pcm_driver);
+
+MODULE_DESCRIPTION("Mediatek ALSA SoC AFE platform driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Fabien Parent <fparent@baylibre.com>");
diff --git a/sound/soc/mediatek/mt8516/mt8516-afe-regs.h b/sound/soc/mediatek/mt8516/mt8516-afe-regs.h
new file mode 100644
index 0000000..0edb19c
--- /dev/null
+++ b/sound/soc/mediatek/mt8516/mt8516-afe-regs.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 BayLibre, SAS
+ * Copyright (c) 2019 MediaTek, Inc
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+#ifndef _MT8516_AFE_REGS_H_
+#define _MT8516_AFE_REGS_H_
+
+#include <linux/bitops.h>
+
+#define AUDIO_TOP_CON0		0x0000
+#define AUDIO_TOP_CON1		0x0004
+#define AUDIO_TOP_CON3		0x000c
+#define AFE_DAC_CON0		0x0010
+#define AFE_DAC_CON1		0x0014
+#define AFE_I2S_CON		0x0018
+#define AFE_I2S_CON1		0x0034
+#define AFE_I2S_CON2		0x0038
+#define AFE_I2S_CON3		0x004c
+#define AFE_DAIBT_CON0          0x001c
+#define AFE_MRGIF_CON           0x003c
+#define AFE_CONN_24BIT		0x006c
+
+#define AFE_CONN0		0x0020
+#define AFE_CONN1		0x0024
+#define AFE_CONN2		0x0028
+#define AFE_CONN3		0x002C
+#define AFE_CONN4		0x0030
+#define AFE_CONN5		0x005C
+
+/* Memory interface */
+#define AFE_DL1_BASE		0x0040
+#define AFE_DL1_CUR		0x0044
+#define AFE_DL1_END		0x0048
+#define AFE_DL2_BASE		0x0050
+#define AFE_DL2_CUR		0x0054
+#define AFE_DL2_END             0x0058
+#define AFE_AWB_BASE		0x0070
+#define AFE_AWB_END             0x0078
+#define AFE_AWB_CUR		0x007c
+#define AFE_VUL_BASE		0x0080
+#define AFE_VUL_CUR		0x008c
+#define AFE_VUL_END		0x0088
+#define AFE_DAI_BASE		0x0090
+#define AFE_DAI_END		0x0098
+#define AFE_DAI_CUR		0x009c
+#define AFE_MOD_PCM_BASE	0x0330
+#define AFE_MOD_PCM_END		0x0338
+#define AFE_MOD_PCM_CUR		0x033c
+#define AFE_HDMI_OUT_BASE	0x0374
+#define AFE_HDMI_OUT_CUR	0x0378
+#define AFE_HDMI_OUT_END	0x037c
+
+#define AFE_MEMIF_MSB           0x00cc
+#define AFE_MEMIF_MON0          0x00d0
+#define AFE_MEMIF_MON1          0x00d4
+#define AFE_MEMIF_MON2          0x00d8
+#define AFE_MEMIF_MON3          0x00dc
+
+#define AFE_ADDA_DL_SRC2_CON0   0x0108
+#define AFE_ADDA_DL_SRC2_CON1   0x010c
+#define AFE_ADDA_UL_SRC_CON0    0x0114
+#define AFE_ADDA_UL_SRC_CON1    0x0118
+#define AFE_ADDA_TOP_CON0	0x0120
+#define AFE_ADDA_UL_DL_CON0     0x0124
+#define AFE_ADDA_NEWIF_CFG0     0x0138
+#define AFE_ADDA_NEWIF_CFG1     0x013c
+#define AFE_ADDA_PREDIS_CON0    0x0260
+#define AFE_ADDA_PREDIS_CON1    0x0264
+
+#define AFE_HDMI_OUT_CON0	0x0370
+
+#define AFE_IRQ_MCU_CON		0x03a0
+#define AFE_IRQ_STATUS		0x03a4
+#define AFE_IRQ_CLR		0x03a8
+#define AFE_IRQ_CNT1		0x03ac
+#define AFE_IRQ_CNT2		0x03b0
+#define AFE_IRQ_MCU_EN		0x03b4
+#define AFE_IRQ_CNT5		0x03bc
+#define AFE_IRQ_CNT7		0x03dc
+#define AFE_IRQ_CNT13		0x0408
+#define AFE_IRQ1_MCU_CNT_MON    0x03c0
+#define AFE_IRQ2_MCU_CNT_MON    0x03c4
+#define AFE_IRQ_MCU_CON2	0x03f8
+
+#define AFE_MEMIF_PBUF_SIZE	0x03d8
+#define AFE_MEMIF_PBUF2_SIZE	0x03ec
+
+#define AFE_ASRC_CON0		0x0500
+
+#define AFE_ASRC_CON13		0x0550
+#define AFE_ASRC_CON14		0x0554
+#define AFE_ASRC_CON15		0x0558
+#define AFE_ASRC_CON16		0x055c
+#define AFE_ASRC_CON17		0x0560
+#define AFE_ASRC_CON18		0x0564
+#define AFE_ASRC_CON19		0x0568
+#define AFE_ASRC_CON20		0x056c
+#define AFE_ASRC_CON21		0x0570
+
+#define AFE_TDM_CON1		0x0548
+#define AFE_TDM_CON2		0x054c
+
+#define AFE_TDM_IN_CON1		0x0588
+#define AFE_TDM_IN_MON2		0x0594
+#define AFE_IRQ_CNT10		0x08dc
+
+#define AFE_HDMI_IN_2CH_CON0	0x09c0
+#define AFE_HDMI_IN_2CH_BASE	0x09c4
+#define AFE_HDMI_IN_2CH_END	0x09c8
+#define AFE_HDMI_IN_2CH_CUR	0x09cc
+
+#define AFE_MEMIF_MON15		0x0d7c
+#define ABB_AFE_SDM_TEST	0x0f4c
+
+#define AFE_IRQ_STATUS_BITS	0x13ff
+
+/* AFE_I2S_CON (0x0018) */
+#define AFE_I2S_CON_PHASE_SHIFT_FIX	BIT(31)
+#define AFE_I2S_CON_BCK_INV			BIT(29)
+#define AFE_I2S_CON_FROM_IO_MUX		BIT(28)
+#define AFE_I2S_CON_LOW_JITTER_CLK	BIT(12)
+#define AFE_I2S_CON_LRCK_INV		BIT(5)
+#define AFE_I2S_CON_FORMAT_I2S		BIT(3)
+#define AFE_I2S_CON_SRC_SLAVE		BIT(2)
+#define AFE_I2S_CON_WLEN_32BIT		BIT(1)
+#define AFE_I2S_CON_EN			BIT(0)
+
+/* AFE_CONN1 (0x0024) */
+#define AFE_CONN1_I03_O03_S		BIT(19)
+
+/* AFE_CONN2 (0x0028) */
+#define AFE_CONN2_I04_O04_S		BIT(4)
+#define AFE_CONN2_I03_O04_S		BIT(3)
+
+/* AFE_I2S_CON1 (0x0034) */
+#define AFE_I2S_CON1_I2S2_TO_PAD	(1 << 18)
+#define AFE_I2S_CON1_TDMOUT_TO_PAD	(0 << 18)
+#define AFE_I2S_CON1_TDMOUT_MUX_MASK	GENMASK(18, 18)
+#define AFE_I2S_CON1_LOW_JITTER_CLK	BIT(12)
+#define AFE_I2S_CON1_RATE(x)		(((x) & 0xf) << 8)
+#define AFE_I2S_CON1_FORMAT_I2S		BIT(3)
+#define AFE_I2S_CON1_WLEN_32BIT		BIT(1)
+#define AFE_I2S_CON1_EN			BIT(0)
+
+/* AFE_I2S_CON2 (0x0038) */
+#define AFE_I2S_CON2_LOW_JITTER_CLK	BIT(12)
+#define AFE_I2S_CON2_RATE(x)		(((x) & 0xf) << 8)
+#define AFE_I2S_CON2_FORMAT_I2S		BIT(3)
+#define AFE_I2S_CON2_WLEN_32BIT		BIT(1)
+#define AFE_I2S_CON2_EN			BIT(0)
+
+/* AFE_I2S_CON3 (0x004C) */
+#define AFE_I2S_CON3_LOW_JITTER_CLK	BIT(12)
+#define AFE_I2S_CON3_RATE(x)		(((x) & 0xf) << 8)
+#define AFE_I2S_CON3_FORMAT_I2S		BIT(3)
+#define AFE_I2S_CON3_WLEN_32BIT		BIT(1)
+#define AFE_I2S_CON3_EN			BIT(0)
+
+/* AFE_CONN_24BIT (0x006c) */
+#define AFE_CONN_24BIT_O10		BIT(10)
+#define AFE_CONN_24BIT_O09		BIT(9)
+#define AFE_CONN_24BIT_O06		BIT(6)
+#define AFE_CONN_24BIT_O05		BIT(5)
+#define AFE_CONN_24BIT_O04		BIT(4)
+#define AFE_CONN_24BIT_O03		BIT(3)
+#define AFE_CONN_24BIT_O02		BIT(2)
+#define AFE_CONN_24BIT_O01		BIT(1)
+#define AFE_CONN_24BIT_O00		BIT(0)
+
+/* AFE_ADDA_UL_SRC_CON0 */
+#define AFE_ADDA_UL_RATE_CH1_SHIFT	17
+#define AFE_ADDA_UL_RATE_CH1_MASK	0x3
+#define AFE_ADDA_UL_RATE_CH2_SHIFT	19
+#define AFE_ADDA_UL_RATE_CH2_MASK	0x3
+
+/* AFE_ADDA_DL_SRC2_CON0 (0x0108) */
+#define AFE_ADDA_DL_8X_UPSAMPLE		(BIT(25) | BIT(24))
+#define AFE_ADDA_DL_MUTE_OFF		(BIT(12) | BIT(11))
+#define AFE_ADDA_DL_VOICE_DATA		BIT(5)
+#define AFE_ADDA_DL_DEGRADE_GAIN	BIT(1)
+#define AFE_ADDA_DL_RATE_SHIFT	28
+
+/* AFE_ASRC_CON0 (0x0500) */
+#define AFE_ASRC_CON0_ASM_ON		BIT(0)
+#define AFE_ASRC_CON0_STR_CLR_MASK	GENMASK(6, 4)
+#define AFE_ASRC_CON0_CLR_TX		(0x1 << 4)
+#define AFE_ASRC_CON0_CLR_RX		(0x2 << 4)
+#define AFE_ASRC_CON0_CLR_I2S		(0x4 << 4)
+
+/* AFE_ASRC_CON13 (0x0550) */
+#define AFE_ASRC_CON13_16BIT		BIT(19)
+#define AFE_ASRC_CON13_MONO		BIT(16)
+
+/* AFE_ASRC_CON16 (0x055c) */
+#define AFE_ASRC_CON16_FC2_CYCLE_MASK		GENMASK(31, 16)
+#define AFE_ASRC_CON16_FC2_CYCLE(x)		(((x) - 1) << 16)
+#define AFE_ASRC_CON16_FC2_AUTO_RST		BIT(14)
+#define AFE_ASRC_CON16_TUNE_FREQ5		BIT(12)
+#define AFE_ASRC_CON16_COMP_FREQ_EN		BIT(11)
+#define AFE_ASRC_CON16_FC2_SEL			GENMASK(9, 8)
+#define AFE_ASRC_CON16_FC2_I2S_IN		(0x1 << 8)
+#define AFE_ASRC_CON16_FC2_DGL_BYPASS		BIT(7)
+#define AFE_ASRC_CON16_FC2_AUTO_RESTART		BIT(2)
+#define AFE_ASRC_CON16_FC2_FREQ			BIT(1)
+#define AFE_ASRC_CON16_FC2_EN			BIT(0)
+
+/* AFE_ADDA_NEWIF_CFG0 (0x0138) */
+#define AFE_ADDA_NEWIF_ADC_VOICE_MODE_SHIFT	10
+#define AFE_ADDA_NEWIF_ADC_VOICE_MODE_CLR	(0x3 << 10)
+
+/* AFE_SPDIF_IN */
+#define SPDIFIN_BASE_ADR			(0x0994)
+#define SPDIFIN_CUR_ADR				(0x09B8)
+
+#endif
diff --git a/sound/soc/mediatek/mt8516/mt8516-dai-adda.c b/sound/soc/mediatek/mt8516/mt8516-dai-adda.c
new file mode 100644
index 0000000..e5a7435
--- /dev/null
+++ b/sound/soc/mediatek/mt8516/mt8516-dai-adda.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 BayLibre, SAS
+ * Copyright (c) 2019 MediaTek, Inc
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+
+#include "mt8516-afe-common.h"
+#include "mt8516-afe-regs.h"
+
+enum {
+	MTK_AFE_ADDA_DL_RATE_8K = 0,
+	MTK_AFE_ADDA_DL_RATE_11K = 1,
+	MTK_AFE_ADDA_DL_RATE_12K = 2,
+	MTK_AFE_ADDA_DL_RATE_16K = 3,
+	MTK_AFE_ADDA_DL_RATE_22K = 4,
+	MTK_AFE_ADDA_DL_RATE_24K = 5,
+	MTK_AFE_ADDA_DL_RATE_32K = 6,
+	MTK_AFE_ADDA_DL_RATE_44K = 7,
+	MTK_AFE_ADDA_DL_RATE_48K = 8,
+};
+
+enum {
+	MTK_AFE_ADDA_UL_RATE_8K = 0,
+	MTK_AFE_ADDA_UL_RATE_16K = 1,
+	MTK_AFE_ADDA_UL_RATE_32K = 2,
+	MTK_AFE_ADDA_UL_RATE_48K = 3,
+};
+
+static int mt8516_afe_setup_i2s(struct mtk_base_afe *afe,
+				    struct snd_pcm_substream *substream,
+				    unsigned int rate, int bit_width)
+{
+	int fs = afe->memif_fs(substream, rate);
+	unsigned int val;
+
+	if (bit_width > 16)
+		val |= AFE_I2S_CON1_WLEN_32BIT;
+
+	if (fs < 0)
+		return -EINVAL;
+
+	val = AFE_I2S_CON1_I2S2_TO_PAD |
+	      AFE_I2S_CON1_LOW_JITTER_CLK |
+	      AFE_I2S_CON1_RATE(fs) |
+	      AFE_I2S_CON1_FORMAT_I2S |
+	      AFE_I2S_CON1_EN;
+
+	regmap_write(afe->regmap, AFE_I2S_CON1, val);
+
+	return 0;
+}
+
+static int mt8516_afe_setup_adda_dl(struct mtk_base_afe *afe, unsigned int rate)
+{
+	unsigned int val = AFE_ADDA_DL_8X_UPSAMPLE |
+			   AFE_ADDA_DL_MUTE_OFF |
+			   AFE_ADDA_DL_DEGRADE_GAIN;
+
+	if (rate == 8000 || rate == 16000)
+		val |= AFE_ADDA_DL_VOICE_DATA;
+
+	switch (rate) {
+	case 8000:
+		val |= MTK_AFE_ADDA_DL_RATE_8K << AFE_ADDA_DL_RATE_SHIFT;
+		break;
+	case 11025:
+		val |= MTK_AFE_ADDA_DL_RATE_11K << AFE_ADDA_DL_RATE_SHIFT;
+		break;
+	case 12000:
+		val |= MTK_AFE_ADDA_DL_RATE_12K << AFE_ADDA_DL_RATE_SHIFT;
+		break;
+	case 16000:
+		val |= MTK_AFE_ADDA_DL_RATE_16K << AFE_ADDA_DL_RATE_SHIFT;
+		break;
+	case 22050:
+		val |= MTK_AFE_ADDA_DL_RATE_22K << AFE_ADDA_DL_RATE_SHIFT;
+		break;
+	case 24000:
+		val |= MTK_AFE_ADDA_DL_RATE_24K << AFE_ADDA_DL_RATE_SHIFT;
+		break;
+	case 32000:
+		val |= MTK_AFE_ADDA_DL_RATE_32K << AFE_ADDA_DL_RATE_SHIFT;
+		break;
+	case 44100:
+		val |= MTK_AFE_ADDA_DL_RATE_44K << AFE_ADDA_DL_RATE_SHIFT;
+		break;
+	case 48000:
+		val |= MTK_AFE_ADDA_DL_RATE_48K << AFE_ADDA_DL_RATE_SHIFT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	regmap_write(afe->regmap, AFE_ADDA_PREDIS_CON0, 0);
+	regmap_write(afe->regmap, AFE_ADDA_PREDIS_CON1, 0);
+	regmap_write(afe->regmap, AFE_ADDA_DL_SRC2_CON0, val);
+	regmap_write(afe->regmap, AFE_ADDA_DL_SRC2_CON1, 0xf74f0000);
+
+	return 0;
+}
+
+static int mt8516_afe_setup_adda_ul(struct mtk_base_afe *afe, unsigned int rate)
+{
+	unsigned int val = 0;
+	unsigned int val2 = 0;
+
+	switch (rate) {
+	case 8000:
+		val |= MTK_AFE_ADDA_UL_RATE_8K << AFE_ADDA_UL_RATE_CH1_SHIFT;
+		val |= MTK_AFE_ADDA_UL_RATE_8K << AFE_ADDA_UL_RATE_CH2_SHIFT;
+		val2 |= 1 << AFE_ADDA_NEWIF_ADC_VOICE_MODE_SHIFT;
+		break;
+	case 16000:
+		val |= MTK_AFE_ADDA_UL_RATE_16K << AFE_ADDA_UL_RATE_CH1_SHIFT;
+		val |= MTK_AFE_ADDA_UL_RATE_16K << AFE_ADDA_UL_RATE_CH2_SHIFT;
+		val2 |= 1 << AFE_ADDA_NEWIF_ADC_VOICE_MODE_SHIFT;
+		break;
+	case 32000:
+		val |= MTK_AFE_ADDA_UL_RATE_32K << AFE_ADDA_UL_RATE_CH1_SHIFT;
+		val |= MTK_AFE_ADDA_UL_RATE_32K << AFE_ADDA_UL_RATE_CH2_SHIFT;
+		val2 |= 1 << AFE_ADDA_NEWIF_ADC_VOICE_MODE_SHIFT;
+		break;
+	case 48000:
+		val |= MTK_AFE_ADDA_UL_RATE_48K << AFE_ADDA_UL_RATE_CH1_SHIFT;
+		val |= MTK_AFE_ADDA_UL_RATE_48K << AFE_ADDA_UL_RATE_CH2_SHIFT;
+		val2 |= 3 << AFE_ADDA_NEWIF_ADC_VOICE_MODE_SHIFT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	regmap_update_bits(afe->regmap, AFE_ADDA_UL_SRC_CON0,
+		(AFE_ADDA_UL_RATE_CH1_MASK << AFE_ADDA_UL_RATE_CH1_SHIFT) ||
+		(AFE_ADDA_UL_RATE_CH2_MASK << AFE_ADDA_UL_RATE_CH2_MASK), val);
+	regmap_update_bits(afe->regmap, AFE_ADDA_NEWIF_CFG1,
+		AFE_ADDA_NEWIF_ADC_VOICE_MODE_CLR, val2);
+	regmap_update_bits(afe->regmap, AFE_ADDA_TOP_CON0, 1, 0);
+
+	return 0;
+}
+
+static void mt8516_afe_adda_shutdown(struct snd_pcm_substream *substream,
+				 struct snd_soc_dai *dai)
+{
+	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+	unsigned int stream = substream->stream;
+
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		regmap_update_bits(afe->regmap, AFE_ADDA_DL_SRC2_CON0, 1, 0);
+		regmap_update_bits(afe->regmap, AFE_I2S_CON1, 1, 0);
+	} else {
+		regmap_update_bits(afe->regmap, AFE_ADDA_UL_SRC_CON0, 1, 0);
+	}
+
+	regmap_update_bits(afe->regmap, AFE_ADDA_UL_DL_CON0, 1, 0);
+}
+
+static int mt8516_afe_adda_hw_params(struct snd_pcm_substream *substream,
+			  struct snd_pcm_hw_params *params,
+			  struct snd_soc_dai *dai)
+{
+	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+	unsigned int width_val = 0;
+
+	if (params_width(params) > 16)
+		width_val = AFE_CONN_24BIT_O03 | AFE_CONN_24BIT_O04;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		regmap_update_bits(afe->regmap, AFE_CONN_24BIT,
+			   AFE_CONN_24BIT_O03 | AFE_CONN_24BIT_O04, width_val);
+
+	return 0;
+}
+
+static int mt8516_afe_adda_prepare(struct snd_pcm_substream *substream,
+			       struct snd_soc_dai *dai)
+{
+	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+	const unsigned int rate = substream->runtime->rate;
+	unsigned int stream = substream->stream;
+	int bit_width = snd_pcm_format_width(substream->runtime->format);
+	int ret;
+
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		ret = mt8516_afe_setup_adda_dl(afe, rate);
+		if (ret)
+			return ret;
+
+		ret = mt8516_afe_setup_i2s(afe, substream, rate, bit_width);
+		if (ret)
+			return ret;
+
+		regmap_update_bits(afe->regmap, AFE_ADDA_DL_SRC2_CON0, 1, 1);
+	} else {
+		ret = mt8516_afe_setup_adda_ul(afe, rate);
+		if (ret)
+			return ret;
+
+		regmap_update_bits(afe->regmap, AFE_ADDA_UL_SRC_CON0, 1, 1);
+	}
+
+	regmap_update_bits(afe->regmap, AFE_ADDA_UL_DL_CON0, 1, 1);
+
+	return 0;
+}
+
+static const struct snd_soc_dai_ops mt8516_afe_adda_ops = {
+	.shutdown	= mt8516_afe_adda_shutdown,
+	.hw_params	= mt8516_afe_adda_hw_params,
+	.prepare	= mt8516_afe_adda_prepare,
+};
+
+static const struct snd_kcontrol_new adda_o03_o04_enable_ctl =
+	SOC_DAPM_SINGLE_VIRT("Switch", 1);
+
+static const char * const ain_text[] = {
+	"INT ADC", "EXT ADC"
+};
+
+static SOC_ENUM_SINGLE_DECL(ain_enum, AFE_ADDA_TOP_CON0, 0, ain_text);
+
+static const struct snd_kcontrol_new ain_mux =
+	SOC_DAPM_ENUM("AIN Source", ain_enum);
+
+enum {
+	SUPPLY_SEQ_ADDA_AFE_ON,
+};
+
+static const struct snd_soc_dapm_widget mtk_dai_adda_widgets[] = {
+	SND_SOC_DAPM_MUX("AIN Mux", SND_SOC_NOPM, 0, 0, &ain_mux),
+
+	SND_SOC_DAPM_SWITCH("ADDA O03_O04", SND_SOC_NOPM, 0, 0,
+			    &adda_o03_o04_enable_ctl),
+
+
+	SND_SOC_DAPM_SUPPLY_S("ADDA Enable", SUPPLY_SEQ_ADDA_AFE_ON,
+			      AFE_DAC_CON0, 0, 0,
+			      NULL, 0),
+
+	/* Clocks */
+	SND_SOC_DAPM_CLOCK_SUPPLY("top_pdn_audio"),
+	SND_SOC_DAPM_CLOCK_SUPPLY("aud_dac_clk"),
+	SND_SOC_DAPM_CLOCK_SUPPLY("aud_dac_predis_clk"),
+	SND_SOC_DAPM_CLOCK_SUPPLY("aud_adc_clk"),
+};
+
+static const struct snd_soc_dapm_route mtk_dai_adda_routes[] = {
+	/* playback */
+	{"ADDA O03_O04", "Switch", "O03"},
+	{"ADDA O03_O04", "Switch", "O04"},
+	{"ADDA Playback", NULL, "ADDA O03_O04"},
+
+	/* capture */
+	{"AIN Mux", "INT ADC", "ADDA Capture"},
+
+	/* enable */
+	{"ADDA Playback", NULL, "ADDA Enable"},
+	{"ADDA Capture", NULL, "ADDA Enable"},
+
+	/* clock */
+	{"ADDA Playback", NULL, "aud_dac_clk"},
+	{"ADDA Playback", NULL, "aud_dac_predis_clk"},
+	{"ADDA Playback", NULL, "top_pdn_audio"},
+
+	{"ADDA Capture", NULL, "top_pdn_audio"},
+	{"ADDA Capture", NULL, "aud_adc_clk"},
+};
+
+static struct snd_soc_dai_driver mtk_dai_adda_driver[] = {
+	{
+		.name = "ADDA",
+		.id = MT8516_AFE_BE_ADDA,
+		.playback = {
+			.stream_name = "ADDA Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S24_LE,
+		},
+		.capture = {
+			.stream_name = "ADDA Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000 |
+				 SNDRV_PCM_RATE_16000 |
+				 SNDRV_PCM_RATE_32000 |
+				 SNDRV_PCM_RATE_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+		.ops = &mt8516_afe_adda_ops,
+	},
+};
+
+int mt8516_dai_adda_register(struct mtk_base_afe *afe)
+{
+	struct mtk_base_afe_dai *dai;
+
+	dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+	if (!dai)
+		return -ENOMEM;
+
+	list_add(&dai->list, &afe->sub_dais);
+
+	dai->dai_drivers = mtk_dai_adda_driver;
+	dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_adda_driver);
+
+	dai->dapm_widgets = mtk_dai_adda_widgets;
+	dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_adda_widgets);
+	dai->dapm_routes = mtk_dai_adda_routes;
+	dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_adda_routes);
+
+	return 0;
+}